summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig8
-rw-r--r--drivers/Makefile5
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/acpica/acglobal.h6
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/acpredef.h1
-rw-r--r--drivers/acpi/acpica/nspredef.c19
-rw-r--r--drivers/acpi/acpica/nsrepair2.c15
-rw-r--r--drivers/acpi/acpica/tbinstal.c27
-rw-r--r--drivers/acpi/apei/Kconfig11
-rw-r--r--drivers/acpi/apei/apei-base.c35
-rw-r--r--drivers/acpi/apei/apei-internal.h15
-rw-r--r--drivers/acpi/apei/einj.c43
-rw-r--r--drivers/acpi/apei/erst-dbg.c6
-rw-r--r--drivers/acpi/apei/erst.c32
-rw-r--r--drivers/acpi/apei/ghes.c431
-rw-r--r--drivers/acpi/apei/hest.c29
-rw-r--r--drivers/acpi/battery.c88
-rw-r--r--drivers/acpi/bus.c14
-rw-r--r--drivers/acpi/dock.c4
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/osl.c42
-rw-r--r--drivers/acpi/pci_irq.c58
-rw-r--r--drivers/acpi/pci_root.c3
-rw-r--r--drivers/acpi/processor_thermal.c2
-rw-r--r--drivers/acpi/sbs.c15
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/video.c9
-rw-r--r--drivers/ata/Kconfig10
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/acard-ahci.c27
-rw-r--r--drivers/ata/ahci.c122
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ata_generic.c5
-rw-r--r--drivers/ata/ata_piix.c44
-rw-r--r--drivers/ata/libahci.c152
-rw-r--r--drivers/ata/libata-acpi.c70
-rw-r--r--drivers/ata/libata-core.c406
-rw-r--r--drivers/ata/libata-eh.c154
-rw-r--r--drivers/ata/libata-pmp.c123
-rw-r--r--drivers/ata/libata-scsi.c28
-rw-r--r--drivers/ata/libata-sff.c53
-rw-r--r--drivers/ata/pata_acpi.c8
-rw-r--r--drivers/ata/pata_ali.c8
-rw-r--r--drivers/ata/pata_amd.c8
-rw-r--r--drivers/ata/pata_artop.c5
-rw-r--r--drivers/ata/pata_atp867x.c13
-rw-r--r--drivers/ata/pata_bf54x.c4
-rw-r--r--drivers/ata/pata_cs5520.c3
-rw-r--r--drivers/ata/pata_efar.c5
-rw-r--r--drivers/ata/pata_hpt3x3.c4
-rw-r--r--drivers/ata/pata_icside.c4
-rw-r--r--drivers/ata/pata_imx.c253
-rw-r--r--drivers/ata/pata_it8213.c5
-rw-r--r--drivers/ata/pata_it821x.c16
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c4
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_macio.c10
-rw-r--r--drivers/ata/pata_marvell.c3
-rw-r--r--drivers/ata/pata_mpiix.c4
-rw-r--r--drivers/ata/pata_netcell.c5
-rw-r--r--drivers/ata/pata_ns87410.c2
-rw-r--r--drivers/ata/pata_ns87415.c5
-rw-r--r--drivers/ata/pata_octeon_cf.c8
-rw-r--r--drivers/ata/pata_oldpiix.c5
-rw-r--r--drivers/ata/pata_opti.c4
-rw-r--r--drivers/ata/pata_optidma.c4
-rw-r--r--drivers/ata/pata_pcmcia.c5
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_radisys.c5
-rw-r--r--drivers/ata/pata_rdc.c5
-rw-r--r--drivers/ata/pata_rz1000.c4
-rw-r--r--drivers/ata/pata_samsung_cf.c2
-rw-r--r--drivers/ata/pata_scc.c8
-rw-r--r--drivers/ata/pata_sch.c5
-rw-r--r--drivers/ata/pata_sil680.c4
-rw-r--r--drivers/ata/pata_sis.c5
-rw-r--r--drivers/ata/pata_sl82c105.c6
-rw-r--r--drivers/ata/pata_triflex.c4
-rw-r--r--drivers/ata/pata_via.c26
-rw-r--r--drivers/ata/pdc_adma.c10
-rw-r--r--drivers/ata/sata_dwc_460ex.c40
-rw-r--r--drivers/ata/sata_fsl.c43
-rw-r--r--drivers/ata/sata_inic162x.c31
-rw-r--r--drivers/ata/sata_mv.c94
-rw-r--r--drivers/ata/sata_nv.c79
-rw-r--r--drivers/ata/sata_promise.c6
-rw-r--r--drivers/ata/sata_qstor.c13
-rw-r--r--drivers/ata/sata_sil.c23
-rw-r--r--drivers/ata/sata_sil24.c43
-rw-r--r--drivers/ata/sata_sis.c26
-rw-r--r--drivers/ata/sata_svw.c4
-rw-r--r--drivers/ata/sata_sx4.c4
-rw-r--r--drivers/ata/sata_uli.c4
-rw-r--r--drivers/ata/sata_via.c42
-rw-r--r--drivers/ata/sata_vsc.c9
-rw-r--r--drivers/atm/ambassador.c13
-rw-r--r--drivers/atm/ambassador.h4
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/eni.h3
-rw-r--r--drivers/atm/firestream.c3
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/horizon.c3
-rw-r--r--drivers/atm/idt77252.c3
-rw-r--r--drivers/atm/iphase.c3
-rw-r--r--drivers/atm/lanai.c9
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/atm/suni.c2
-rw-r--r--drivers/atm/uPD98402.c2
-rw-r--r--drivers/atm/zatm.c3
-rw-r--r--drivers/base/Kconfig66
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/devtmpfs.c340
-rw-r--r--drivers/base/memory.c3
-rw-r--r--drivers/base/platform.c23
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c238
-rw-r--r--drivers/base/power/domain.c1272
-rw-r--r--drivers/base/power/generic_ops.c98
-rw-r--r--drivers/base/power/main.c87
-rw-r--r--drivers/base/power/opp.c19
-rw-r--r--drivers/base/power/runtime.c99
-rw-r--r--drivers/base/power/sysfs.c8
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/base/regmap/Kconfig13
-rw-r--r--drivers/base/regmap/Makefile3
-rw-r--r--drivers/base/regmap/regmap-i2c.c116
-rw-r--r--drivers/base/regmap/regmap-spi.c75
-rw-r--r--drivers/base/regmap/regmap.c455
-rw-r--r--drivers/base/syscore.c8
-rw-r--r--drivers/bcma/Kconfig11
-rw-r--r--drivers/bcma/Makefile3
-rw-r--r--drivers/bcma/bcma_private.h11
-rw-r--r--drivers/bcma/core.c75
-rw-r--r--drivers/bcma/driver_chipcommon.c16
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c8
-rw-r--r--drivers/bcma/driver_pci.c66
-rw-r--r--drivers/bcma/driver_pci_host.c14
-rw-r--r--drivers/bcma/host_pci.c53
-rw-r--r--drivers/bcma/main.c14
-rw-r--r--drivers/bcma/sprom.c171
-rw-r--r--drivers/block/Kconfig17
-rw-r--r--drivers/block/cciss.h2
-rw-r--r--drivers/block/cciss_scsi.c2
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c37
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_worker.c7
-rw-r--r--drivers/block/hd.c2
-rw-r--r--drivers/block/loop.c297
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rbd.c46
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/xen-blkback/blkback.c37
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/block/xsysace.c98
-rw-r--r--drivers/bluetooth/ath3k.c5
-rw-r--r--drivers/bluetooth/btusb.c12
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/char/Kconfig11
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/intel-agp.h7
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/char/bsr.c2
-rw-r--r--drivers/char/generic_nvram.c4
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig12
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/hw_random/n2-drv.c29
-rw-r--r--drivers/char/hw_random/n2rng.h2
-rw-r--r--drivers/char/hw_random/nomadik-rng.c3
-rw-r--r--drivers/char/hw_random/omap-rng.c6
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c156
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/nvram.c2
-rw-r--r--drivers/char/ps3flash.c13
-rw-r--r--drivers/char/ramoops.c101
-rw-r--r--drivers/char/random.c334
-rw-r--r--drivers/char/tile-srom.c481
-rw-r--r--drivers/char/tpm/tpm.c102
-rw-r--r--drivers/char/tpm/tpm.h7
-rw-r--r--drivers/char/tpm/tpm_nsc.c14
-rw-r--r--drivers/char/tpm/tpm_tis.c179
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c2
-rw-r--r--drivers/clk/Kconfig3
-rw-r--r--drivers/clocksource/Kconfig12
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/dw_apb_timer.c401
-rw-r--r--drivers/clocksource/i8253.c114
-rw-r--r--drivers/connector/cn_proc.c45
-rw-r--r--drivers/connector/connector.c1
-rw-r--r--drivers/cpufreq/Kconfig10
-rw-r--r--drivers/cpufreq/Kconfig.arm32
-rw-r--r--drivers/cpufreq/Kconfig.powerpc7
-rw-r--r--drivers/cpufreq/Makefile12
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c568
-rw-r--r--drivers/cpufreq/maple-cpufreq.c309
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c273
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c649
-rw-r--r--drivers/cpuidle/cpuidle.c50
-rw-r--r--drivers/cpuidle/cpuidle.h1
-rw-r--r--drivers/cpuidle/driver.c3
-rw-r--r--drivers/cpuidle/governor.c3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c5
-rw-r--r--drivers/crypto/caam/caamalg.c1832
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c30
-rw-r--r--drivers/crypto/caam/desc_constr.h58
-rw-r--r--drivers/crypto/n2_core.c33
-rw-r--r--drivers/crypto/omap-sham.c180
-rw-r--r--drivers/crypto/talitos.c47
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/TODO1
-rw-r--r--drivers/dma/amba-pl08x.c247
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/coh901318.c20
-rw-r--r--drivers/dma/dmaengine.c9
-rw-r--r--drivers/dma/dmatest.c1
-rw-r--r--drivers/dma/ep93xx_dma.c1355
-rw-r--r--drivers/dma/imx-dma.c3
-rw-r--r--drivers/dma/imx-sdma.c100
-rw-r--r--drivers/dma/intel_mid_dma.c2
-rw-r--r--drivers/dma/ioat/dma_v3.c8
-rw-r--r--drivers/dma/ioat/pci.c11
-rw-r--r--drivers/dma/ipu/ipu_idmac.c7
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/mxs-dma.c13
-rw-r--r--drivers/dma/pch_dma.c127
-rw-r--r--drivers/dma/pl330.c64
-rw-r--r--drivers/dma/shdma.c88
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/dma/ste_dma40.c271
-rw-r--r--drivers/dma/ste_dma40_ll.h3
-rw-r--r--drivers/edac/cell_edac.c2
-rw-r--r--drivers/edac/edac_stub.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c16
-rw-r--r--drivers/eisa/pci_eisa.c2
-rw-r--r--drivers/firewire/core-card.c2
-rw-r--r--drivers/firewire/core-cdev.c44
-rw-r--r--drivers/firewire/core-device.c17
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/net.c11
-rw-r--r--drivers/firewire/nosy.c2
-rw-r--r--drivers/firewire/ohci.c57
-rw-r--r--drivers/firmware/efivars.c243
-rw-r--r--drivers/firmware/google/Kconfig1
-rw-r--r--drivers/firmware/google/gsmi.c19
-rw-r--r--drivers/firmware/iscsi_ibft.c14
-rw-r--r--drivers/firmware/sigma.c3
-rw-r--r--drivers/gpio/Kconfig88
-rw-r--r--drivers/gpio/Makefile90
-rw-r--r--drivers/gpio/gpio-74x164.c (renamed from drivers/gpio/74x164.c)33
-rw-r--r--drivers/gpio/gpio-ab8500.c (renamed from drivers/gpio/ab8500-gpio.c)3
-rw-r--r--drivers/gpio/gpio-adp5520.c (renamed from drivers/gpio/adp5520-gpio.c)0
-rw-r--r--drivers/gpio/gpio-adp5588.c (renamed from drivers/gpio/adp5588-gpio.c)0
-rw-r--r--drivers/gpio/gpio-bt8xx.c (renamed from drivers/gpio/bt8xxgpio.c)2
-rw-r--r--drivers/gpio/gpio-cs5535.c (renamed from drivers/gpio/cs5535-gpio.c)0
-rw-r--r--drivers/gpio/gpio-da9052.c277
-rw-r--r--drivers/gpio/gpio-ep93xx.c405
-rw-r--r--drivers/gpio/gpio-exynos4.c5
-rw-r--r--drivers/gpio/gpio-generic.c (renamed from drivers/gpio/basic_mmio_gpio.c)6
-rw-r--r--drivers/gpio/gpio-it8761e.c (renamed from drivers/gpio/it8761e_gpio.c)2
-rw-r--r--drivers/gpio/gpio-janz-ttl.c (renamed from drivers/gpio/janz-ttl.c)0
-rw-r--r--drivers/gpio/gpio-langwell.c (renamed from drivers/gpio/langwell_gpio.c)6
-rw-r--r--drivers/gpio/gpio-max7300.c (renamed from drivers/gpio/max7300.c)2
-rw-r--r--drivers/gpio/gpio-max7301.c (renamed from drivers/gpio/max7301.c)2
-rw-r--r--drivers/gpio/gpio-max730x.c (renamed from drivers/gpio/max730x.c)2
-rw-r--r--drivers/gpio/gpio-max732x.c (renamed from drivers/gpio/max732x.c)2
-rw-r--r--drivers/gpio/gpio-mc33880.c (renamed from drivers/gpio/mc33880.c)2
-rw-r--r--drivers/gpio/gpio-mcp23s08.c (renamed from drivers/gpio/mcp23s08.c)291
-rw-r--r--drivers/gpio/gpio-ml-ioh.c (renamed from drivers/gpio/ml_ioh_gpio.c)2
-rw-r--r--drivers/gpio/gpio-mpc5200.c376
-rw-r--r--drivers/gpio/gpio-msm-v1.c636
-rw-r--r--drivers/gpio/gpio-msm-v2.c433
-rw-r--r--drivers/gpio/gpio-mxc.c460
-rw-r--r--drivers/gpio/gpio-mxs.c289
-rw-r--r--drivers/gpio/gpio-omap.c723
-rw-r--r--drivers/gpio/gpio-pca953x.c (renamed from drivers/gpio/pca953x.c)105
-rw-r--r--drivers/gpio/gpio-pcf857x.c (renamed from drivers/gpio/pcf857x.c)2
-rw-r--r--drivers/gpio/gpio-pch.c (renamed from drivers/gpio/pch_gpio.c)0
-rw-r--r--drivers/gpio/gpio-pl061.c (renamed from drivers/gpio/pl061.c)4
-rw-r--r--drivers/gpio/gpio-plat-samsung.c3
-rw-r--r--drivers/gpio/gpio-rdc321x.c (renamed from drivers/gpio/rdc321x-gpio.c)0
-rw-r--r--drivers/gpio/gpio-s5pc100.c5
-rw-r--r--drivers/gpio/gpio-s5pv210.c5
-rw-r--r--drivers/gpio/gpio-sch.c (renamed from drivers/gpio/sch_gpio.c)2
-rw-r--r--drivers/gpio/gpio-stmpe.c (renamed from drivers/gpio/stmpe-gpio.c)0
-rw-r--r--drivers/gpio/gpio-sx150x.c (renamed from drivers/gpio/sx150x.c)0
-rw-r--r--drivers/gpio/gpio-tc3589x.c (renamed from drivers/gpio/tc3589x-gpio.c)0
-rw-r--r--drivers/gpio/gpio-tegra.c441
-rw-r--r--drivers/gpio/gpio-timberdale.c (renamed from drivers/gpio/timbgpio.c)2
-rw-r--r--drivers/gpio/gpio-tps65910.c (renamed from drivers/gpio/tps65910-gpio.c)4
-rw-r--r--drivers/gpio/gpio-tps65912.c156
-rw-r--r--drivers/gpio/gpio-twl4030.c (renamed from drivers/gpio/twl4030-gpio.c)2
-rw-r--r--drivers/gpio/gpio-u300.c13
-rw-r--r--drivers/gpio/gpio-ucb1400.c (renamed from drivers/gpio/ucb1400_gpio.c)0
-rw-r--r--drivers/gpio/gpio-vr41xx.c (renamed from drivers/gpio/vr41xx_giu.c)2
-rw-r--r--drivers/gpio/gpio-vx855.c (renamed from drivers/gpio/vx855_gpio.c)0
-rw-r--r--drivers/gpio/gpio-wm831x.c (renamed from drivers/gpio/wm831x-gpio.c)3
-rw-r--r--drivers/gpio/gpio-wm8350.c (renamed from drivers/gpio/wm8350-gpiolib.c)2
-rw-r--r--drivers/gpio/gpio-wm8994.c (renamed from drivers/gpio/wm8994-gpio.c)2
-rw-r--r--drivers/gpio/gpio-xilinx.c (renamed from drivers/gpio/xilinx_gpio.c)0
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c5
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c37
-rw-r--r--drivers/gpu/drm/drm_gem.c48
-rw-r--r--drivers/gpu/drm/drm_irq.c26
-rw-r--r--drivers/gpu/drm/drm_modes.c87
-rw-r--r--drivers/gpu/drm/drm_platform.c5
-rw-r--r--drivers/gpu/drm/drm_scatter.c10
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c248
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c58
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c81
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h91
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c316
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c99
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h60
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c29
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c142
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1333
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c221
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h35
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c166
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c90
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c16
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c18
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c76
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c13
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c232
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c167
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h78
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c148
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c119
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c116
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c112
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mpeg.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c41
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c270
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.fuc400
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c239
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc474
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h483
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc808
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h838
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c14
-rw-r--r--drivers/gpu/drm/radeon/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/atom.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c34
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c180
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c59
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h6
-rw-r--r--drivers/gpu/drm/radeon/ni.c2
-rw-r--r--drivers/gpu/drm/radeon/nid.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c16
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c23
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c12
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c99
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c23
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen3
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rs600.c8
-rw-r--r--drivers/gpu/drm/radeon/rv770.c8
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c18
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c8
-rw-r--r--drivers/gpu/vga/vgaarb.c3
-rw-r--r--drivers/hid/Kconfig44
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-axff.c1
-rw-r--r--drivers/hid/hid-core.c14
-rw-r--r--drivers/hid/hid-emsff.c7
-rw-r--r--drivers/hid/hid-holtekff.c240
-rw-r--r--drivers/hid/hid-ids.h16
-rw-r--r--drivers/hid/hid-lg.c74
-rw-r--r--drivers/hid/hid-microsoft.c28
-rw-r--r--drivers/hid/hid-multitouch.c16
-rw-r--r--drivers/hid/hid-prodikeys.c17
-rw-r--r--drivers/hid/hid-roccat-arvo.c21
-rw-r--r--drivers/hid/hid-roccat-arvo.h13
-rw-r--r--drivers/hid/hid-roccat-common.c20
-rw-r--r--drivers/hid/hid-roccat-common.h4
-rw-r--r--drivers/hid/hid-roccat-kone.c56
-rw-r--r--drivers/hid/hid-roccat-kone.h2
-rw-r--r--drivers/hid/hid-roccat-koneplus.c49
-rw-r--r--drivers/hid/hid-roccat-koneplus.h23
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c25
-rw-r--r--drivers/hid/hid-roccat-kovaplus.h9
-rw-r--r--drivers/hid/hid-roccat-pyra.c25
-rw-r--r--drivers/hid/hid-roccat-pyra.h9
-rw-r--r--drivers/hid/hid-sony.c35
-rw-r--r--drivers/hid/hid-speedlink.c89
-rw-r--r--drivers/hid/hid-uclogic.c195
-rw-r--r--drivers/hid/hid-wiimote.c489
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/hwmon/Kconfig150
-rw-r--r--drivers/hwmon/Makefile15
-rw-r--r--drivers/hwmon/asus_atk0110.c1
-rw-r--r--drivers/hwmon/coretemp.c177
-rw-r--r--drivers/hwmon/emc6w201.c58
-rw-r--r--drivers/hwmon/f71882fg.c19
-rw-r--r--drivers/hwmon/gl520sm.c2
-rw-r--r--drivers/hwmon/hwmon-vid.c44
-rw-r--r--drivers/hwmon/ibmaem.c15
-rw-r--r--drivers/hwmon/it87.c31
-rw-r--r--drivers/hwmon/lm78.c305
-rw-r--r--drivers/hwmon/lm90.c65
-rw-r--r--drivers/hwmon/lm95241.c51
-rw-r--r--drivers/hwmon/lm95245.c543
-rw-r--r--drivers/hwmon/max1111.c38
-rw-r--r--drivers/hwmon/max1668.c502
-rw-r--r--drivers/hwmon/ntc_thermistor.c453
-rw-r--r--drivers/hwmon/pmbus/Kconfig100
-rw-r--r--drivers/hwmon/pmbus/Makefile13
-rw-r--r--drivers/hwmon/pmbus/adm1275.c (renamed from drivers/hwmon/adm1275.c)92
-rw-r--r--drivers/hwmon/pmbus/lm25066.c352
-rw-r--r--drivers/hwmon/pmbus/max16064.c (renamed from drivers/hwmon/max16064.c)57
-rw-r--r--drivers/hwmon/pmbus/max34440.c (renamed from drivers/hwmon/max34440.c)81
-rw-r--r--drivers/hwmon/pmbus/max8688.c (renamed from drivers/hwmon/max8688.c)69
-rw-r--r--drivers/hwmon/pmbus/pmbus.c (renamed from drivers/hwmon/pmbus.c)56
-rw-r--r--drivers/hwmon/pmbus/pmbus.h (renamed from drivers/hwmon/pmbus.h)49
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c (renamed from drivers/hwmon/pmbus_core.c)402
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c (renamed from drivers/hwmon/ucd9000.c)0
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c (renamed from drivers/hwmon/ucd9200.c)0
-rw-r--r--drivers/hwmon/sch5627.c336
-rw-r--r--drivers/hwmon/sch5636.c539
-rw-r--r--drivers/hwmon/sch56xx-common.c340
-rw-r--r--drivers/hwmon/sch56xx-common.h24
-rw-r--r--drivers/hwmon/sht15.c4
-rw-r--r--drivers/hwmon/via-cputemp.c44
-rw-r--r--drivers/i2c/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c61
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c8
-rw-r--r--drivers/i2c/busses/i2c-cpm.c7
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c41
-rw-r--r--drivers/i2c/busses/i2c-highlander.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c4
-rw-r--r--drivers/i2c/busses/i2c-pxa.c7
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c7
-rw-r--r--drivers/i2c/busses/i2c-s6000.c5
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c8
-rw-r--r--drivers/i2c/busses/i2c-tegra.c25
-rw-r--r--drivers/i2c/i2c-core.c5
-rw-r--r--drivers/i2c/muxes/pca954x.c7
-rw-r--r--drivers/ide/cy82c693.c2
-rw-r--r--drivers/ide/ide_platform.c6
-rw-r--r--drivers/ide/palm_bk3710.c2
-rw-r--r--drivers/ide/tx4939ide.c4
-rw-r--r--drivers/ieee802154/Makefile2
-rw-r--r--drivers/ieee802154/fakehard.c2
-rw-r--r--drivers/infiniband/core/addr.c7
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/cma.c84
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/netlink.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c3
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c1
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c12
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c88
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c198
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c68
-rw-r--r--drivers/infiniband/hw/mlx4/main.c21
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c276
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h93
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c43
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c175
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c101
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c43
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c35
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c77
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c49
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c33
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c19
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h4
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c50
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c11
-rw-r--r--drivers/infiniband/hw/qib/qib.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c97
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c78
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h143
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c43
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c20
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h3
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c4
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c4
-rw-r--r--drivers/input/gameport/gameport.c2
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c3
-rw-r--r--drivers/input/misc/Kconfig13
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/pcspkr.c9
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c3
-rw-r--r--drivers/input/misc/twl4030-vibra.c12
-rw-r--r--drivers/input/misc/twl6040-vibra.c423
-rw-r--r--drivers/input/serio/libps2.c2
-rw-r--r--drivers/input/serio/sa1111ps2.c6
-rw-r--r--drivers/input/serio/xilinx_ps2.c2
-rw-r--r--drivers/iommu/Kconfig110
-rw-r--r--drivers/iommu/Makefile5
-rw-r--r--drivers/iommu/amd_iommu.c2824
-rw-r--r--drivers/iommu/amd_iommu_init.c1574
-rw-r--r--drivers/iommu/amd_iommu_proto.h54
-rw-r--r--drivers/iommu/amd_iommu_types.h585
-rw-r--r--drivers/iommu/dmar.c (renamed from drivers/pci/dmar.c)0
-rw-r--r--drivers/iommu/intel-iommu.c (renamed from drivers/pci/intel-iommu.c)1
-rw-r--r--drivers/iommu/intr_remapping.c (renamed from drivers/pci/intr_remapping.c)1
-rw-r--r--drivers/iommu/intr_remapping.h (renamed from drivers/pci/intr_remapping.h)0
-rw-r--r--drivers/iommu/iommu.c (renamed from drivers/base/iommu.c)0
-rw-r--r--drivers/iommu/iova.c (renamed from drivers/pci/iova.c)0
-rw-r--r--drivers/iommu/msm_iommu.c731
-rw-r--r--drivers/iommu/msm_iommu_dev.c422
-rw-r--r--drivers/isdn/gigaset/gigaset.h2
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c1
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c1
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c1
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c1
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c1
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c1
-rw-r--r--drivers/isdn/hisax/hisax.h1
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c1
-rw-r--r--drivers/isdn/i4l/isdn_bsdcomp.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c8
-rw-r--r--drivers/leds/Kconfig20
-rw-r--r--drivers/leds/leds-gpio.c6
-rw-r--r--drivers/leds/leds-lm3530.c67
-rw-r--r--drivers/leds/leds-lp5521.c8
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/leds/leds-sunfire.c12
-rw-r--r--drivers/lguest/core.c2
-rw-r--r--drivers/lguest/interrupts_and_traps.c10
-rw-r--r--drivers/lguest/lg.h2
-rw-r--r--drivers/lguest/lguest_device.c37
-rw-r--r--drivers/lguest/lguest_user.c17
-rw-r--r--drivers/lguest/page_tables.c282
-rw-r--r--drivers/lguest/x86/core.c107
-rw-r--r--drivers/macintosh/nvram.c4
-rw-r--r--drivers/md/Kconfig5
-rw-r--r--drivers/md/bitmap.c137
-rw-r--r--drivers/md/bitmap.h5
-rw-r--r--drivers/md/dm-crypt.c64
-rw-r--r--drivers/md/dm-flakey.c270
-rw-r--r--drivers/md/dm-io.c29
-rw-r--r--drivers/md/dm-ioctl.c89
-rw-r--r--drivers/md/dm-kcopyd.c44
-rw-r--r--drivers/md/dm-log-userspace-base.c3
-rw-r--r--drivers/md/dm-log.c32
-rw-r--r--drivers/md/dm-mpath.c149
-rw-r--r--drivers/md/dm-queue-length.c2
-rw-r--r--drivers/md/dm-raid.c621
-rw-r--r--drivers/md/dm-snap-persistent.c80
-rw-r--r--drivers/md/dm-snap.c84
-rw-r--r--drivers/md/dm-table.c157
-rw-r--r--drivers/md/dm.c75
-rw-r--r--drivers/md/dm.h2
-rw-r--r--drivers/md/linear.c8
-rw-r--r--drivers/md/md.c896
-rw-r--r--drivers/md/md.h110
-rw-r--r--drivers/md/raid1.c962
-rw-r--r--drivers/md/raid1.h26
-rw-r--r--drivers/md/raid10.c1183
-rw-r--r--drivers/md/raid10.h21
-rw-r--r--drivers/md/raid5.c1015
-rw-r--r--drivers/md/raid5.h99
-rw-r--r--drivers/media/Kconfig14
-rw-r--r--drivers/media/common/tuners/Kconfig10
-rw-r--r--drivers/media/common/tuners/Makefile1
-rw-r--r--drivers/media/common/tuners/tuner-types.c4
-rw-r--r--drivers/media/common/tuners/xc4000.c1691
-rw-r--r--drivers/media/common/tuners/xc4000.h67
-rw-r--r--drivers/media/dvb/Kconfig4
-rw-r--r--drivers/media/dvb/Makefile3
-rw-r--r--drivers/media/dvb/b2c2/flexcop-common.h1
-rw-r--r--drivers/media/dvb/bt8xx/dvb-bt8xx.c4
-rw-r--r--drivers/media/dvb/ddbridge/Kconfig18
-rw-r--r--drivers/media/dvb/ddbridge/Makefile14
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-core.c1719
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-regs.h151
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge.h187
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c1
-rw-r--r--drivers/media/dvb/dvb-core/Makefile4
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c11
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.h21
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig1
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c135
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.h1
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c69
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.h16
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c188
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h3
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.h3
-rw-r--r--drivers/media/dvb/dvb-usb/technisat-usb2.c4
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.h3
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c2
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c34
-rw-r--r--drivers/media/dvb/frontends/Kconfig21
-rw-r--r--drivers/media/dvb/frontends/Makefile3
-rw-r--r--drivers/media/dvb/frontends/au8522_decoder.c2
-rw-r--r--drivers/media/dvb/frontends/cx24113.c20
-rw-r--r--drivers/media/dvb/frontends/cx24116.c6
-rw-r--r--drivers/media/dvb/frontends/cxd2820r.h4
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_core.c22
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_priv.h4
-rw-r--r--drivers/media/dvb/frontends/dib7000p.c5
-rw-r--r--drivers/media/dvb/frontends/drxd_hard.c9
-rw-r--r--drivers/media/dvb/frontends/drxk.h47
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.c6454
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.h348
-rw-r--r--drivers/media/dvb/frontends/drxk_map.h449
-rw-r--r--drivers/media/dvb/frontends/itd1000.c25
-rw-r--r--drivers/media/dvb/frontends/nxt6000.c2
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c12
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd.c1251
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd.h16
-rw-r--r--drivers/media/dvb/frontends/tda18271c2dd_maps.h814
-rw-r--r--drivers/media/dvb/mantis/mantis_ca.c1
-rw-r--r--drivers/media/dvb/mantis/mantis_common.h1
-rw-r--r--drivers/media/dvb/mantis/mantis_evm.c1
-rw-r--r--drivers/media/dvb/mantis/mantis_hif.c1
-rw-r--r--drivers/media/dvb/mantis/mantis_ioc.c1
-rw-r--r--drivers/media/dvb/mantis/mantis_pcmcia.c1
-rw-r--r--drivers/media/dvb/mantis/mantis_uart.c1
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1034.c1
-rw-r--r--drivers/media/dvb/ngene/Kconfig2
-rw-r--r--drivers/media/dvb/ngene/ngene-cards.c182
-rw-r--r--drivers/media/dvb/ngene/ngene-core.c26
-rw-r--r--drivers/media/dvb/ngene/ngene-dvb.c46
-rw-r--r--drivers/media/dvb/ngene/ngene.h7
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c1
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c2
-rw-r--r--drivers/media/dvb/siano/smscoreapi.h1
-rw-r--r--drivers/media/radio/Kconfig4
-rw-r--r--drivers/media/radio/dsbr100.c7
-rw-r--r--drivers/media/radio/radio-aimslab.c5
-rw-r--r--drivers/media/radio/radio-aztech.c5
-rw-r--r--drivers/media/radio/radio-cadet.c5
-rw-r--r--drivers/media/radio/radio-gemtek.c7
-rw-r--r--drivers/media/radio/radio-maxiradio.c10
-rw-r--r--drivers/media/radio/radio-mr800.c6
-rw-r--r--drivers/media/radio/radio-rtrack2.c5
-rw-r--r--drivers/media/radio/radio-sf16fmi.c5
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c531
-rw-r--r--drivers/media/radio/radio-tea5764.c8
-rw-r--r--drivers/media/radio/radio-terratec.c5
-rw-r--r--drivers/media/radio/radio-timb.c3
-rw-r--r--drivers/media/radio/radio-trust.c5
-rw-r--r--drivers/media/radio/radio-typhoon.c9
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/radio-zoltrix.c5
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c4
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c6
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h1
-rw-r--r--drivers/media/radio/si4713-i2c.c4
-rw-r--r--drivers/media/radio/wl128x/fmdrv.h5
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c3
-rw-r--r--drivers/media/rc/Kconfig11
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ene_ir.c4
-rw-r--r--drivers/media/rc/ene_ir.h2
-rw-r--r--drivers/media/rc/fintek-cir.c5
-rw-r--r--drivers/media/rc/imon.c19
-rw-r--r--drivers/media/rc/ir-lirc-codec.c15
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c449
-rw-r--r--drivers/media/rc/ir-raw.c5
-rw-r--r--drivers/media/rc/ite-cir.c18
-rw-r--r--drivers/media/rc/ite-cir.h3
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c58
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c3
-rw-r--r--drivers/media/rc/lirc_dev.c37
-rw-r--r--drivers/media/rc/mceusb.c99
-rw-r--r--drivers/media/rc/nuvoton-cir.c16
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-core-priv.h18
-rw-r--r--drivers/media/rc/rc-loopback.c13
-rw-r--r--drivers/media/rc/rc-main.c52
-rw-r--r--drivers/media/rc/redrat3.c63
-rw-r--r--drivers/media/rc/winbond-cir.c28
-rw-r--r--drivers/media/video/Kconfig44
-rw-r--r--drivers/media/video/Makefile8
-rw-r--r--drivers/media/video/adp1653.c491
-rw-r--r--drivers/media/video/arv.c5
-rw-r--r--drivers/media/video/atmel-isi.c1048
-rw-r--r--drivers/media/video/au0828/au0828-core.c1
-rw-r--r--drivers/media/video/au0828/au0828-video.c5
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c7
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c16
-rw-r--r--drivers/media/video/bt8xx/bttvp.h3
-rw-r--r--drivers/media/video/bw-qcam.c4
-rw-r--r--drivers/media/video/c-qcam.c4
-rw-r--r--drivers/media/video/cafe_ccic-regs.h166
-rw-r--r--drivers/media/video/cafe_ccic.c2267
-rw-r--r--drivers/media/video/cpia2/cpia2.h5
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c12
-rw-r--r--drivers/media/video/cx18/cx18-alsa-main.c1
-rw-r--r--drivers/media/video/cx18/cx18-driver.h1
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c9
-rw-r--r--drivers/media/video/cx18/cx18-version.h8
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c78
-rw-r--r--drivers/media/video/cx231xx/cx231xx-core.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-video.c29
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h5
-rw-r--r--drivers/media/video/cx23885/altera-ci.c1
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c1
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c70
-rw-r--r--drivers/media/video/cx23885/cx23885-core.c22
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c23
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c6
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c1
-rw-r--r--drivers/media/video/cx23885/cx23885.h4
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c19
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c20
-rw-r--r--drivers/media/video/cx88/cx88-cards.c150
-rw-r--r--drivers/media/video/cx88/cx88-core.c11
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c77
-rw-r--r--drivers/media/video/cx88/cx88-input.c4
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c35
-rw-r--r--drivers/media/video/cx88/cx88-video.c65
-rw-r--r--drivers/media/video/cx88/cx88.h7
-rw-r--r--drivers/media/video/davinci/Kconfig23
-rw-r--r--drivers/media/video/davinci/Makefile2
-rw-r--r--drivers/media/video/davinci/vpbe.c864
-rw-r--r--drivers/media/video/davinci/vpbe_display.c1860
-rw-r--r--drivers/media/video/davinci/vpbe_osd.c1231
-rw-r--r--drivers/media/video/davinci/vpbe_osd_regs.h364
-rw-r--r--drivers/media/video/davinci/vpbe_venc.c566
-rw-r--r--drivers/media/video/davinci/vpbe_venc_regs.h177
-rw-r--r--drivers/media/video/davinci/vpif.c2
-rw-r--r--drivers/media/video/davinci/vpif_capture.c9
-rw-r--r--drivers/media/video/davinci/vpif_capture.h7
-rw-r--r--drivers/media/video/davinci/vpif_display.c9
-rw-r--r--drivers/media/video/davinci/vpif_display.h8
-rw-r--r--drivers/media/video/em28xx/Kconfig12
-rw-r--r--drivers/media/video/em28xx/Makefile6
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c251
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c159
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c84
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c126
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c17
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c1
-rw-r--r--drivers/media/video/em28xx/em28xx-reg.h1
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c14
-rw-r--r--drivers/media/video/em28xx/em28xx.h24
-rw-r--r--drivers/media/video/et61x251/et61x251.h1
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c16
-rw-r--r--drivers/media/video/fsl-viu.c10
-rw-r--r--drivers/media/video/gspca/Kconfig10
-rw-r--r--drivers/media/video/gspca/Makefile2
-rw-r--r--drivers/media/video/gspca/gl860/gl860.h1
-rw-r--r--drivers/media/video/gspca/gspca.c23
-rw-r--r--drivers/media/video/gspca/ov519.c115
-rw-r--r--drivers/media/video/gspca/se401.c774
-rw-r--r--drivers/media/video/gspca/se401.h90
-rw-r--r--drivers/media/video/gspca/sunplus.c3
-rw-r--r--drivers/media/video/gspca/t613.c2
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c3
-rw-r--r--drivers/media/video/hdpvr/hdpvr-video.c2
-rw-r--r--drivers/media/video/hdpvr/hdpvr.h6
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h1
-rw-r--r--drivers/media/video/ivtv/ivtv-fileops.c19
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c13
-rw-r--r--drivers/media/video/ivtv/ivtv-version.h7
-rw-r--r--drivers/media/video/m5mols/m5mols.h57
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c24
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c6
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c145
-rw-r--r--drivers/media/video/m5mols/m5mols_reg.h21
-rw-r--r--drivers/media/video/marvell-ccic/Kconfig23
-rw-r--r--drivers/media/video/marvell-ccic/Makefile6
-rw-r--r--drivers/media/video/marvell-ccic/cafe-driver.c654
-rw-r--r--drivers/media/video/marvell-ccic/mcam-core.c1843
-rw-r--r--drivers/media/video/marvell-ccic/mcam-core.h323
-rw-r--r--drivers/media/video/marvell-ccic/mmp-driver.c340
-rw-r--r--drivers/media/video/mem2mem_testdev.c4
-rw-r--r--drivers/media/video/msp3400-driver.c12
-rw-r--r--drivers/media/video/mt9m001.c14
-rw-r--r--drivers/media/video/mt9m111.c359
-rw-r--r--drivers/media/video/mt9t031.c3
-rw-r--r--drivers/media/video/mt9t112.c10
-rw-r--r--drivers/media/video/mt9v011.c85
-rw-r--r--drivers/media/video/mt9v022.c10
-rw-r--r--drivers/media/video/mt9v032.c20
-rw-r--r--drivers/media/video/mx1_camera.c57
-rw-r--r--drivers/media/video/mx2_camera.c66
-rw-r--r--drivers/media/video/mx3_camera.c71
-rw-r--r--drivers/media/video/omap/Kconfig7
-rw-r--r--drivers/media/video/omap/Makefile1
-rw-r--r--drivers/media/video/omap/omap_vout.c665
-rw-r--r--drivers/media/video/omap/omap_vout_vrfb.c390
-rw-r--r--drivers/media/video/omap/omap_vout_vrfb.h40
-rw-r--r--drivers/media/video/omap/omap_voutdef.h78
-rw-r--r--drivers/media/video/omap/omap_voutlib.c52
-rw-r--r--drivers/media/video/omap/omap_voutlib.h12
-rw-r--r--drivers/media/video/omap1_camera.c57
-rw-r--r--drivers/media/video/omap24xxcam.c14
-rw-r--r--drivers/media/video/omap3isp/isp.c3
-rw-r--r--drivers/media/video/omap3isp/isp.h6
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c7
-rw-r--r--drivers/media/video/omap3isp/ispccp2.c27
-rw-r--r--drivers/media/video/omap3isp/ispccp2.h1
-rw-r--r--drivers/media/video/omap3isp/ispstat.c3
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c1
-rw-r--r--drivers/media/video/omap3isp/ispvideo.h3
-rw-r--r--drivers/media/video/ov2640.c13
-rw-r--r--drivers/media/video/ov5642.c1012
-rw-r--r--drivers/media/video/ov7670.c3
-rw-r--r--drivers/media/video/ov7670.h20
-rw-r--r--drivers/media/video/ov772x.c10
-rw-r--r--drivers/media/video/ov9640.c13
-rw-r--r--drivers/media/video/ov9740.c556
-rw-r--r--drivers/media/video/pms.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-main.c1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c9
-rw-r--r--drivers/media/video/pwc/Kconfig1
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c805
-rw-r--r--drivers/media/video/pwc/pwc-dec1.c28
-rw-r--r--drivers/media/video/pwc/pwc-dec1.h8
-rw-r--r--drivers/media/video/pwc/pwc-dec23.c22
-rw-r--r--drivers/media/video/pwc/pwc-dec23.h10
-rw-r--r--drivers/media/video/pwc/pwc-if.c1399
-rw-r--r--drivers/media/video/pwc/pwc-ioctl.h323
-rw-r--r--drivers/media/video/pwc/pwc-kiara.c1
-rw-r--r--drivers/media/video/pwc/pwc-misc.c4
-rw-r--r--drivers/media/video/pwc/pwc-uncompress.c17
-rw-r--r--drivers/media/video/pwc/pwc-uncompress.h40
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c1257
-rw-r--r--drivers/media/video/pwc/pwc.h411
-rw-r--r--drivers/media/video/pxa_camera.c92
-rw-r--r--drivers/media/video/rj54n1cb0c.c7
-rw-r--r--drivers/media/video/s2255drv.c35
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c23
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c31
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h29
-rw-r--r--drivers/media/video/s5p-mfc/Makefile5
-rw-r--r--drivers/media/video/s5p-mfc/regs-mfc.h413
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc.c1274
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_cmd.c120
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_cmd.h30
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_common.h572
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c343
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h29
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_debug.h48
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c1036
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.h23
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c1829
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.h23
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_intr.c92
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_intr.h26
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.c1397
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.h91
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_pm.c117
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_pm.h24
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_shm.c47
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_shm.h91
-rw-r--r--drivers/media/video/s5p-tv/Kconfig76
-rw-r--r--drivers/media/video/s5p-tv/Makefile17
-rw-r--r--drivers/media/video/s5p-tv/hdmi_drv.c1042
-rw-r--r--drivers/media/video/s5p-tv/hdmiphy_drv.c188
-rw-r--r--drivers/media/video/s5p-tv/mixer.h354
-rw-r--r--drivers/media/video/s5p-tv/mixer_drv.c487
-rw-r--r--drivers/media/video/s5p-tv/mixer_grp_layer.c185
-rw-r--r--drivers/media/video/s5p-tv/mixer_reg.c541
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c1006
-rw-r--r--drivers/media/video/s5p-tv/mixer_vp_layer.c211
-rw-r--r--drivers/media/video/s5p-tv/regs-hdmi.h141
-rw-r--r--drivers/media/video/s5p-tv/regs-mixer.h121
-rw-r--r--drivers/media/video/s5p-tv/regs-sdo.h63
-rw-r--r--drivers/media/video/s5p-tv/regs-vp.h88
-rw-r--r--drivers/media/video/s5p-tv/sdo_drv.c479
-rw-r--r--drivers/media/video/saa7115.c4
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c13
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c12
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c25
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c1
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/video/saa7134/saa7134.h3
-rw-r--r--drivers/media/video/saa7164/saa7164-encoder.c6
-rw-r--r--drivers/media/video/saa7164/saa7164-vbi.c6
-rw-r--r--drivers/media/video/saa7164/saa7164.h1
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c207
-rw-r--r--drivers/media/video/sh_mobile_csi2.c135
-rw-r--r--drivers/media/video/sh_vou.c3
-rw-r--r--drivers/media/video/sn9c102/sn9c102.h1
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c16
-rw-r--r--drivers/media/video/soc_camera.c281
-rw-r--r--drivers/media/video/soc_camera_platform.c10
-rw-r--r--drivers/media/video/sr030pc30.c7
-rw-r--r--drivers/media/video/tda7432.c5
-rw-r--r--drivers/media/video/timblogiw.c1
-rw-r--r--drivers/media/video/tlg2300/pd-common.h1
-rw-r--r--drivers/media/video/tlg2300/pd-dvb.c2
-rw-r--r--drivers/media/video/tlg2300/pd-main.c1
-rw-r--r--drivers/media/video/tlg2300/pd-radio.c2
-rw-r--r--drivers/media/video/tuner-core.c259
-rw-r--r--drivers/media/video/tw9910.c21
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c12
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c6
-rw-r--r--drivers/media/video/uvc/uvc_driver.c12
-rw-r--r--drivers/media/video/uvc/uvc_entity.c34
-rw-r--r--drivers/media/video/uvc/uvc_queue.c4
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c6
-rw-r--r--drivers/media/video/uvc/uvc_video.c6
-rw-r--r--drivers/media/video/uvc/uvcvideo.h3
-rw-r--r--drivers/media/video/v4l2-common.c3
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c37
-rw-r--r--drivers/media/video/v4l2-ctrls.c826
-rw-r--r--drivers/media/video/v4l2-dev.c39
-rw-r--r--drivers/media/video/v4l2-device.c1
-rw-r--r--drivers/media/video/v4l2-event.c282
-rw-r--r--drivers/media/video/v4l2-fh.c23
-rw-r--r--drivers/media/video/v4l2-ioctl.c68
-rw-r--r--drivers/media/video/v4l2-subdev.c31
-rw-r--r--drivers/media/video/videobuf-dma-sg.c5
-rw-r--r--drivers/media/video/videobuf2-core.c14
-rw-r--r--drivers/media/video/videobuf2-dma-sg.c10
-rw-r--r--drivers/media/video/videobuf2-memops.c7
-rw-r--r--drivers/media/video/vino.c5
-rw-r--r--drivers/media/video/vivi.c91
-rw-r--r--drivers/media/video/w9966.c4
-rw-r--r--drivers/media/video/zoran/zoran.h4
-rw-r--r--drivers/media/video/zoran/zoran_card.c7
-rw-r--r--drivers/media/video/zoran/zoran_driver.c3
-rw-r--r--drivers/media/video/zr364xx.c6
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/i2o/i2o_scsi.c2
-rw-r--r--drivers/message/i2o/iop.c8
-rw-r--r--drivers/mfd/Kconfig58
-rw-r--r--drivers/mfd/Makefile12
-rw-r--r--drivers/mfd/aat2870-core.c535
-rw-r--r--drivers/mfd/ab3550-core.c41
-rw-r--r--drivers/mfd/ab8500-core.c231
-rw-r--r--drivers/mfd/ab8500-debugfs.c41
-rw-r--r--drivers/mfd/asic3.c1
-rw-r--r--drivers/mfd/htc-pasic3.c1
-rw-r--r--drivers/mfd/jz4740-adc.c90
-rw-r--r--drivers/mfd/lpc_sch.c49
-rw-r--r--drivers/mfd/max8997-irq.c2
-rw-r--r--drivers/mfd/max8998.c2
-rw-r--r--drivers/mfd/omap-usb-host.c135
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/mfd/stmpe.h1
-rw-r--r--drivers/mfd/tc6387xb.c2
-rw-r--r--drivers/mfd/timberdale.c8
-rw-r--r--drivers/mfd/tps65910.c13
-rw-r--r--drivers/mfd/tps65911-comparator.c4
-rw-r--r--drivers/mfd/tps65912-core.c177
-rw-r--r--drivers/mfd/tps65912-i2c.c139
-rw-r--r--drivers/mfd/tps65912-irq.c224
-rw-r--r--drivers/mfd/tps65912-spi.c142
-rw-r--r--drivers/mfd/twl-core.c15
-rw-r--r--drivers/mfd/twl4030-audio.c277
-rw-r--r--drivers/mfd/twl4030-codec.c277
-rw-r--r--drivers/mfd/twl4030-madc.c8
-rw-r--r--drivers/mfd/twl6030-pwm.c2
-rw-r--r--drivers/mfd/twl6040-core.c620
-rw-r--r--drivers/mfd/twl6040-irq.c191
-rw-r--r--drivers/mfd/wm831x-auxadc.c299
-rw-r--r--drivers/mfd/wm831x-core.c259
-rw-r--r--drivers/mfd/wm831x-irq.c77
-rw-r--r--drivers/mfd/wm8350-irq.c18
-rw-r--r--drivers/mfd/wm8994-core.c33
-rw-r--r--drivers/mfd/wm8994-irq.c12
-rw-r--r--drivers/misc/Kconfig12
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/atmel-ssc.c2
-rw-r--r--drivers/misc/atmel_pwm.c2
-rw-r--r--drivers/misc/cb710/core.c3
-rw-r--r--drivers/misc/cb710/sgbuf2.c2
-rw-r--r--drivers/misc/eeprom/Kconfig25
-rw-r--r--drivers/misc/eeprom/Makefile2
-rw-r--r--drivers/misc/eeprom/digsy_mtc_eeprom.c85
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c410
-rw-r--r--drivers/misc/fsa9480.c557
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/lkdtm.c8
-rw-r--r--drivers/misc/pch_phub.c9
-rw-r--r--drivers/misc/phantom.c2
-rw-r--r--drivers/misc/pti.c116
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/ti-st/st_kim.c8
-rw-r--r--drivers/misc/vmw_balloon.c31
-rw-r--r--drivers/mmc/card/block.c686
-rw-r--r--drivers/mmc/card/mmc_test.c532
-rw-r--r--drivers/mmc/card/queue.c230
-rw-r--r--drivers/mmc/card/queue.h36
-rw-r--r--drivers/mmc/core/core.c202
-rw-r--r--drivers/mmc/core/mmc.c79
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/sd.c68
-rw-r--r--drivers/mmc/core/sdio.c39
-rw-r--r--drivers/mmc/core/sdio_bus.c10
-rw-r--r--drivers/mmc/host/Kconfig84
-rw-r--r--drivers/mmc/host/Makefile25
-rw-r--r--drivers/mmc/host/at91_mci.c3
-rw-r--r--drivers/mmc/host/at91_mci.h115
-rw-r--r--drivers/mmc/host/atmel-mci.c63
-rw-r--r--drivers/mmc/host/dw_mmc.c454
-rw-r--r--drivers/mmc/host/dw_mmc.h17
-rw-r--r--drivers/mmc/host/mmci.c159
-rw-r--r--drivers/mmc/host/mmci.h13
-rw-r--r--drivers/mmc/host/mxcmmc.c8
-rw-r--r--drivers/mmc/host/mxs-mmc.c30
-rw-r--r--drivers/mmc/host/of_mmc_spi.c5
-rw-r--r--drivers/mmc/host/omap_hsmmc.c675
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c44
-rw-r--r--drivers/mmc/host/sdhci-dove.c43
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c372
-rw-r--r--drivers/mmc/host/sdhci-of-core.c253
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c86
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c67
-rw-r--r--drivers/mmc/host/sdhci-of.h42
-rw-r--r--drivers/mmc/host/sdhci-pci.c54
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c219
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h90
-rw-r--r--drivers/mmc/host/sdhci-pxa.c303
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c244
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c290
-rw-r--r--drivers/mmc/host/sdhci-s3c.c10
-rw-r--r--drivers/mmc/host/sdhci-tegra.c117
-rw-r--r--drivers/mmc/host/sdhci.c81
-rw-r--r--drivers/mmc/host/sh_mmcif.c27
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c41
-rw-r--r--drivers/mmc/host/tmio_mmc.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.h54
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c8
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c209
-rw-r--r--drivers/mmc/host/vub300.c15
-rw-r--r--drivers/mtd/devices/sst25l.c4
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c11
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/nand/atmel_nand.c5
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c6
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c2
-rw-r--r--drivers/mtd/ubi/build.c28
-rw-r--r--drivers/mtd/ubi/cdev.c10
-rw-r--r--drivers/mtd/ubi/debug.c269
-rw-r--r--drivers/mtd/ubi/debug.h113
-rw-r--r--drivers/mtd/ubi/io.c20
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/ubi.h8
-rw-r--r--drivers/mtd/ubi/vmt.c2
-rw-r--r--drivers/mtd/ubi/vtbl.c18
-rw-r--r--drivers/mtd/ubi/wl.c42
-rw-r--r--drivers/net/3c503.c1
-rw-r--r--drivers/net/7990.c9
-rw-r--r--drivers/net/8139cp.c84
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/8390.h1
-rw-r--r--drivers/net/Kconfig46
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/a2065.c356
-rw-r--r--drivers/net/ac3200.c1
-rw-r--r--drivers/net/acenic.c105
-rw-r--r--drivers/net/acenic.h14
-rw-r--r--drivers/net/amd8111e.c37
-rw-r--r--drivers/net/amd8111e.h3
-rw-r--r--drivers/net/apne.c1
-rw-r--r--drivers/net/appletalk/ltpc.c8
-rw-r--r--drivers/net/arcnet/arc-rimi.c1
-rw-r--r--drivers/net/arcnet/com20020-isa.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c1
-rw-r--r--drivers/net/arcnet/com20020.c1
-rw-r--r--drivers/net/arcnet/com90io.c1
-rw-r--r--drivers/net/arcnet/com90xx.c1
-rw-r--r--drivers/net/ariadne.c1267
-rw-r--r--drivers/net/arm/am79c961a.c14
-rw-r--r--drivers/net/arm/at91_ether.c1
-rw-r--r--drivers/net/arm/ep93xx_eth.c1
-rw-r--r--drivers/net/arm/ks8695net.c2
-rw-r--r--drivers/net/atl1c/atl1c.h3
-rw-r--r--drivers/net/atl1c/atl1c_main.c74
-rw-r--r--drivers/net/atl1e/atl1e.h3
-rw-r--r--drivers/net/atl1e/atl1e_main.c84
-rw-r--r--drivers/net/atlx/atl1.c23
-rw-r--r--drivers/net/atlx/atl1.h1
-rw-r--r--drivers/net/atlx/atl2.c83
-rw-r--r--drivers/net/atlx/atl2.h5
-rw-r--r--drivers/net/atlx/atlx.c57
-rw-r--r--drivers/net/b44.c23
-rw-r--r--drivers/net/bcm63xx_enet.c9
-rw-r--r--drivers/net/benet/be.h16
-rw-r--r--drivers/net/benet/be_cmds.c142
-rw-r--r--drivers/net/benet/be_cmds.h29
-rw-r--r--drivers/net/benet/be_ethtool.c55
-rw-r--r--drivers/net/benet/be_main.c430
-rw-r--r--drivers/net/bmac.c9
-rw-r--r--drivers/net/bna/bfa_cee.c63
-rw-r--r--drivers/net/bna/bfa_cee.h3
-rw-r--r--drivers/net/bna/bfa_cs.h (renamed from drivers/net/bna/bfa_sm.h)78
-rw-r--r--drivers/net/bna/bfa_defs.h5
-rw-r--r--drivers/net/bna/bfa_defs_mfg_comm.h20
-rw-r--r--drivers/net/bna/bfa_defs_status.h134
-rw-r--r--drivers/net/bna/bfa_ioc.c157
-rw-r--r--drivers/net/bna/bfa_ioc.h51
-rw-r--r--drivers/net/bna/bfa_wc.h69
-rw-r--r--drivers/net/bna/bfi.h20
-rw-r--r--drivers/net/bna/bna.h18
-rw-r--r--drivers/net/bna/bna_ctrl.c45
-rw-r--r--drivers/net/bna/bna_hw.h92
-rw-r--r--drivers/net/bna/bna_txrx.c44
-rw-r--r--drivers/net/bna/bna_types.h58
-rw-r--r--drivers/net/bna/bnad.c131
-rw-r--r--drivers/net/bna/bnad.h30
-rw-r--r--drivers/net/bna/bnad_ethtool.c2
-rw-r--r--drivers/net/bna/cna.h5
-rw-r--r--drivers/net/bnx2.c97
-rw-r--r--drivers/net/bnx2.h15
-rw-r--r--drivers/net/bnx2x/Makefile2
-rw-r--r--drivers/net/bnx2x/bnx2x.h1105
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c1536
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h675
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c880
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h30
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h1721
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c698
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_defs.h519
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h5401
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h409
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h194
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c6457
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h184
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c6254
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h989
-rw-r--r--drivers/net/bnx2x/bnx2x_sp.c5692
-rw-r--r--drivers/net/bnx2x/bnx2x_sp.h1297
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c908
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h228
-rw-r--r--drivers/net/bonding/bond_3ad.c53
-rw-r--r--drivers/net/bonding/bond_3ad.h8
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_ipv6.c8
-rw-r--r--drivers/net/bonding/bond_main.c159
-rw-r--r--drivers/net/bonding/bond_procfs.c1
-rw-r--r--drivers/net/bonding/bond_sysfs.c167
-rw-r--r--drivers/net/bonding/bonding.h8
-rw-r--r--drivers/net/bsd_comp.c2
-rw-r--r--drivers/net/caif/Kconfig9
-rw-r--r--drivers/net/caif/Makefile3
-rw-r--r--drivers/net/caif/caif_hsi.c1219
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/caif/caif_shm_u5500.c1
-rw-r--r--drivers/net/caif/caif_shmcore.c2
-rw-r--r--drivers/net/caif/caif_spi.c1
-rw-r--r--drivers/net/caif/caif_spi_slave.c1
-rw-r--r--drivers/net/can/Kconfig12
-rw-r--r--drivers/net/can/at91_can.c366
-rw-r--r--drivers/net/can/bfin_can.c137
-rw-r--r--drivers/net/can/c_can/c_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/janz-ican3.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c4
-rw-r--r--drivers/net/can/sja1000/sja1000.h1
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/can/slcan.c65
-rw-r--r--drivers/net/can/softing/softing_fw.c1
-rw-r--r--drivers/net/can/softing/softing_main.c4
-rw-r--r--drivers/net/cassini.c3
-rw-r--r--drivers/net/chelsio/common.h2
-rw-r--r--drivers/net/chelsio/cxgb2.c39
-rw-r--r--drivers/net/chelsio/sge.c18
-rw-r--r--drivers/net/chelsio/sge.h2
-rw-r--r--drivers/net/cnic.c412
-rw-r--r--drivers/net/cnic.h52
-rw-r--r--drivers/net/cnic_defs.h5288
-rw-r--r--drivers/net/cnic_if.h15
-rw-r--r--drivers/net/cpmac.c3
-rw-r--r--drivers/net/cxgb3/adapter.h2
-rw-r--r--drivers/net/cxgb3/common.h1
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c51
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c21
-rw-r--r--drivers/net/cxgb3/l2t.h2
-rw-r--r--drivers/net/cxgb3/sge.c39
-rw-r--r--drivers/net/cxgb3/t3_hw.c11
-rw-r--r--drivers/net/cxgb3/t3cdev.h2
-rw-r--r--drivers/net/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/cxgb4/l2t.h2
-rw-r--r--drivers/net/cxgb4vf/adapter.h2
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c68
-rw-r--r--drivers/net/cxgb4vf/sge.c34
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c1
-rw-r--r--drivers/net/davinci_cpdma.c2
-rw-r--r--drivers/net/davinci_emac.c13
-rw-r--r--drivers/net/declance.c47
-rw-r--r--drivers/net/depca.c31
-rw-r--r--drivers/net/dl2k.c4
-rw-r--r--drivers/net/dm9000.c27
-rw-r--r--drivers/net/dnet.c3
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000/e1000.h2
-rw-r--r--drivers/net/e1000/e1000_ethtool.c75
-rw-r--r--drivers/net/e1000/e1000_hw.c10
-rw-r--r--drivers/net/e1000/e1000_main.c203
-rw-r--r--drivers/net/e1000e/82571.c11
-rw-r--r--drivers/net/e1000e/e1000.h56
-rw-r--r--drivers/net/e1000e/es2lan.c10
-rw-r--r--drivers/net/e1000e/ethtool.c13
-rw-r--r--drivers/net/e1000e/hw.h21
-rw-r--r--drivers/net/e1000e/ich8lan.c263
-rw-r--r--drivers/net/e1000e/lib.c12
-rw-r--r--drivers/net/e1000e/netdev.c258
-rw-r--r--drivers/net/e1000e/phy.c354
-rw-r--r--drivers/net/e2100.c1
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/ehea/ehea.h1
-rw-r--r--drivers/net/ehea/ehea_main.c54
-rw-r--r--drivers/net/ehea/ehea_qmr.c2
-rw-r--r--drivers/net/enic/enic.h4
-rw-r--r--drivers/net/enic/enic_dev.c11
-rw-r--r--drivers/net/enic/enic_dev.h1
-rw-r--r--drivers/net/enic/enic_main.c153
-rw-r--r--drivers/net/enic/enic_res.c30
-rw-r--r--drivers/net/enic/vnic_cq.c2
-rw-r--r--drivers/net/enic/vnic_cq.h1
-rw-r--r--drivers/net/enic/vnic_dev.c60
-rw-r--r--drivers/net/enic/vnic_dev.h5
-rw-r--r--drivers/net/enic/vnic_devcmd.h19
-rw-r--r--drivers/net/enic/vnic_enet.h11
-rw-r--r--drivers/net/enic/vnic_intr.c7
-rw-r--r--drivers/net/enic/vnic_intr.h6
-rw-r--r--drivers/net/epic100.c4
-rw-r--r--drivers/net/es3210.c1
-rw-r--r--drivers/net/ethoc.c5
-rw-r--r--drivers/net/ewrk3.c39
-rw-r--r--drivers/net/fealnx.c4
-rw-r--r--drivers/net/fec.c134
-rw-r--r--drivers/net/fec_mpc52xx.c13
-rw-r--r--drivers/net/forcedeth.c83
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/fs_enet/mii-fec.c2
-rw-r--r--drivers/net/ftgmac100.c1365
-rw-r--r--drivers/net/ftgmac100.h246
-rw-r--r--drivers/net/gianfar.c209
-rw-r--r--drivers/net/gianfar.h60
-rw-r--r--drivers/net/gianfar_ethtool.c984
-rw-r--r--drivers/net/gianfar_ptp.c11
-rw-r--r--drivers/net/greth.c9
-rw-r--r--drivers/net/hamachi.c4
-rw-r--r--drivers/net/hamradio/6pack.c6
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c1
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c1
-rw-r--r--drivers/net/hamradio/dmascc.c2
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hp-plus.c1
-rw-r--r--drivers/net/hp.c1
-rw-r--r--drivers/net/hp100.c18
-rw-r--r--drivers/net/ibm_newemac/core.c35
-rw-r--r--drivers/net/ibm_newemac/emac.h19
-rw-r--r--drivers/net/ibm_newemac/phy.c7
-rw-r--r--drivers/net/ibmveth.c3
-rw-r--r--drivers/net/ifb.c59
-rw-r--r--drivers/net/igb/Makefile2
-rw-r--r--drivers/net/igb/e1000_82575.c22
-rw-r--r--drivers/net/igb/e1000_82575.h4
-rw-r--r--drivers/net/igb/e1000_defines.h17
-rw-r--r--drivers/net/igb/e1000_hw.h2
-rw-r--r--drivers/net/igb/e1000_mac.c5
-rw-r--r--drivers/net/igb/e1000_mac.h2
-rw-r--r--drivers/net/igb/e1000_mbx.c2
-rw-r--r--drivers/net/igb/e1000_mbx.h2
-rw-r--r--drivers/net/igb/e1000_nvm.c3
-rw-r--r--drivers/net/igb/e1000_nvm.h2
-rw-r--r--drivers/net/igb/e1000_phy.c2
-rw-r--r--drivers/net/igb/e1000_phy.h2
-rw-r--r--drivers/net/igb/e1000_regs.h2
-rw-r--r--drivers/net/igb/igb.h6
-rw-r--r--drivers/net/igb/igb_ethtool.c107
-rw-r--r--drivers/net/igb/igb_main.c131
-rw-r--r--drivers/net/igbvf/igbvf.h4
-rw-r--r--drivers/net/igbvf/netdev.c48
-rw-r--r--drivers/net/ioc3-eth.c9
-rw-r--r--drivers/net/ipg.c1
-rw-r--r--drivers/net/irda/ali-ircc.c1
-rw-r--r--drivers/net/irda/donauboe.c1
-rw-r--r--drivers/net/irda/nsc-ircc.c1
-rw-r--r--drivers/net/irda/pxaficp_ir.c2
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c4
-rw-r--r--drivers/net/irda/sir_dev.c1
-rw-r--r--drivers/net/irda/smsc-ircc2.c21
-rw-r--r--drivers/net/irda/toim3232-sir.c2
-rw-r--r--drivers/net/irda/via-ircc.c1
-rw-r--r--drivers/net/irda/via-ircc.h2
-rw-r--r--drivers/net/irda/vlsi_ir.c1
-rw-r--r--drivers/net/irda/w83977af_ir.c1
-rw-r--r--drivers/net/iseries_veth.c14
-rw-r--r--drivers/net/ixgb/ixgb_ee.c9
-rw-r--r--drivers/net/ixgb/ixgb_hw.c2
-rw-r--r--drivers/net/ixgbe/ixgbe.h102
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c43
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c646
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c245
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c10
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h7
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c43
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c119
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h14
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c129
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c594
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c279
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h12
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1361
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c3
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h99
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c3
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h8
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c88
-rw-r--r--drivers/net/ixp2000/ixpdev.c1
-rw-r--r--drivers/net/jme.c73
-rw-r--r--drivers/net/jme.h7
-rw-r--r--drivers/net/korina.c7
-rw-r--r--drivers/net/ks8842.c1
-rw-r--r--drivers/net/ks8851.c1
-rw-r--r--drivers/net/ks8851_mll.c4
-rw-r--r--drivers/net/ksz884x.c3
-rw-r--r--drivers/net/lib8390.c290
-rw-r--r--drivers/net/ll_temac_main.c7
-rw-r--r--drivers/net/lne390.c1
-rw-r--r--drivers/net/macb.c8
-rw-r--r--drivers/net/mace.c1
-rw-r--r--drivers/net/macmace.c2
-rw-r--r--drivers/net/macvlan.c29
-rw-r--r--drivers/net/macvtap.c134
-rw-r--r--drivers/net/mipsnet.c1
-rw-r--r--drivers/net/mlx4/en_ethtool.c9
-rw-r--r--drivers/net/mlx4/en_main.c3
-rw-r--r--drivers/net/mlx4/en_netdev.c54
-rw-r--r--drivers/net/mlx4/en_port.c31
-rw-r--r--drivers/net/mlx4/en_rx.c26
-rw-r--r--drivers/net/mlx4/en_selftest.c3
-rw-r--r--drivers/net/mlx4/en_tx.c5
-rw-r--r--drivers/net/mlx4/fw.c39
-rw-r--r--drivers/net/mlx4/fw.h8
-rw-r--r--drivers/net/mlx4/main.c64
-rw-r--r--drivers/net/mlx4/mcg.c17
-rw-r--r--drivers/net/mlx4/mlx4.h5
-rw-r--r--drivers/net/mlx4/mlx4_en.h6
-rw-r--r--drivers/net/mlx4/port.c17
-rw-r--r--drivers/net/mlx4/reset.c2
-rw-r--r--drivers/net/mv643xx_eth.c10
-rw-r--r--drivers/net/myri10ge/myri10ge.c268
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp_gen_header.h2
-rw-r--r--drivers/net/myri_sbus.c1187
-rw-r--r--drivers/net/myri_sbus.h311
-rw-r--r--drivers/net/natsemi.c13
-rw-r--r--drivers/net/netx-eth.c1
-rw-r--r--drivers/net/netxen/netxen_nic.h5
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c16
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/netxen/netxen_nic_init.c74
-rw-r--r--drivers/net/netxen/netxen_nic_main.c13
-rw-r--r--drivers/net/niu.c43
-rw-r--r--drivers/net/ns83820.c34
-rw-r--r--drivers/net/octeon/octeon_mgmt.c1
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pcnet32.c11
-rw-r--r--drivers/net/phy/dp83640.c5
-rw-r--r--drivers/net/phy/icplus.c6
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/plip.c6
-rw-r--r--drivers/net/ppp_async.c1
-rw-r--r--drivers/net/ppp_deflate.c5
-rw-r--r--drivers/net/ppp_generic.c2
-rw-r--r--drivers/net/ppp_synctty.c1
-rw-r--r--drivers/net/pppoe.c3
-rw-r--r--drivers/net/pptp.c1
-rw-r--r--drivers/net/ps3_gelic_net.c44
-rw-r--r--drivers/net/ps3_gelic_net.h1
-rw-r--r--drivers/net/pxa168_eth.c7
-rw-r--r--drivers/net/qla3xxx.c5
-rw-r--r--drivers/net/qlcnic/qlcnic.h46
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c37
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c163
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c74
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c133
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c78
-rw-r--r--drivers/net/qlge/qlge.h7
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c206
-rw-r--r--drivers/net/r6040.c11
-rw-r--r--drivers/net/r8169.c667
-rw-r--r--drivers/net/rionet.c30
-rw-r--r--drivers/net/s2io.c90
-rw-r--r--drivers/net/s2io.h5
-rw-r--r--drivers/net/sb1250-mac.c2
-rw-r--r--drivers/net/sc92031.c1
-rw-r--r--drivers/net/sfc/Kconfig7
-rw-r--r--drivers/net/sfc/efx.c60
-rw-r--r--drivers/net/sfc/enum.h3
-rw-r--r--drivers/net/sfc/ethtool.c27
-rw-r--r--drivers/net/sfc/falcon.c51
-rw-r--r--drivers/net/sfc/filter.c47
-rw-r--r--drivers/net/sfc/net_driver.h12
-rw-r--r--drivers/net/sfc/nic.c1
-rw-r--r--drivers/net/sfc/siena.c49
-rw-r--r--drivers/net/sgiseeq.c1
-rw-r--r--drivers/net/sh_eth.c19
-rw-r--r--drivers/net/sis190.c13
-rw-r--r--drivers/net/sis900.c4
-rw-r--r--drivers/net/skge.c103
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c165
-rw-r--r--drivers/net/sky2.h16
-rw-r--r--drivers/net/slhc.c2
-rw-r--r--drivers/net/slip.c39
-rw-r--r--drivers/net/slip.h1
-rw-r--r--drivers/net/smc-mca.c1
-rw-r--r--drivers/net/smc-ultra.c1
-rw-r--r--drivers/net/smc911x.c5
-rw-r--r--drivers/net/smc9194.c5
-rw-r--r--drivers/net/smc91x.c5
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/smsc9420.c3
-rw-r--r--drivers/net/spider_net.c5
-rw-r--r--drivers/net/starfire.c90
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c1
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c1
-rw-r--r--drivers/net/stmmac/dwmac100_core.c1
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c1
-rw-r--r--drivers/net/stmmac/stmmac.h11
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/stmmac/stmmac_main.c192
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c84
-rw-r--r--drivers/net/sunbmac.c8
-rw-r--r--drivers/net/sungem.c898
-rw-r--r--drivers/net/sungem.h25
-rw-r--r--drivers/net/sunhme.c16
-rw-r--r--drivers/net/sunlance.c8
-rw-r--r--drivers/net/sunqe.c7
-rw-r--r--drivers/net/tehuti.c37
-rw-r--r--drivers/net/tehuti.h1
-rw-r--r--drivers/net/tg3.c1065
-rw-r--r--drivers/net/tg3.h39
-rw-r--r--drivers/net/tlan.c2
-rw-r--r--drivers/net/tokenring/3c359.c6
-rw-r--r--drivers/net/tokenring/ibmtr.c3
-rw-r--r--drivers/net/tokenring/madgemc.c2
-rw-r--r--drivers/net/tsi108_eth.c1
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/tulip/de4x5.c12
-rw-r--r--drivers/net/tulip/dmfe.c4
-rw-r--r--drivers/net/tulip/pnic.c1
-rw-r--r--drivers/net/tulip/tulip_core.c1
-rw-r--r--drivers/net/tun.c33
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/ucc_geth.c7
-rw-r--r--drivers/net/usb/asix.c11
-rw-r--r--drivers/net/usb/cdc-phonet.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c192
-rw-r--r--drivers/net/usb/hso.c7
-rw-r--r--drivers/net/usb/ipheth.c15
-rw-r--r--drivers/net/usb/kalmia.c42
-rw-r--r--drivers/net/usb/rtl8150.c1
-rw-r--r--drivers/net/usb/zaurus.c10
-rw-r--r--drivers/net/veth.c75
-rw-r--r--drivers/net/via-velocity.c48
-rw-r--r--drivers/net/via-velocity.h2
-rw-r--r--drivers/net/virtio_net.c79
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c290
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c53
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h13
-rw-r--r--drivers/net/vxge/vxge-config.c57
-rw-r--r--drivers/net/vxge/vxge-config.h2
-rw-r--r--drivers/net/vxge/vxge-main.c159
-rw-r--r--drivers/net/vxge/vxge-main.h53
-rw-r--r--drivers/net/vxge/vxge-traffic.c6
-rw-r--r--drivers/net/wan/c101.c30
-rw-r--r--drivers/net/wan/cosa.c226
-rw-r--r--drivers/net/wan/cycx_drv.c81
-rw-r--r--drivers/net/wan/cycx_main.c32
-rw-r--r--drivers/net/wan/cycx_x25.c177
-rw-r--r--drivers/net/wan/dlci.c22
-rw-r--r--drivers/net/wan/dscc4.c81
-rw-r--r--drivers/net/wan/farsync.c8
-rw-r--r--drivers/net/wan/hd64570.c8
-rw-r--r--drivers/net/wan/hd64572.c8
-rw-r--r--drivers/net/wan/hdlc.c16
-rw-r--r--drivers/net/wan/hdlc_cisco.c21
-rw-r--r--drivers/net/wan/hdlc_fr.c101
-rw-r--r--drivers/net/wan/hdlc_ppp.c9
-rw-r--r--drivers/net/wan/hdlc_x25.c12
-rw-r--r--drivers/net/wan/hostess_sv11.c11
-rw-r--r--drivers/net/wan/ixp4xx_hss.c28
-rw-r--r--drivers/net/wan/lapbether.c18
-rw-r--r--drivers/net/wan/lmc/lmc_var.h2
-rw-r--r--drivers/net/wan/n2.c42
-rw-r--r--drivers/net/wan/pc300_drv.c4
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/pc300too.c36
-rw-r--r--drivers/net/wan/pci200syn.c29
-rw-r--r--drivers/net/wan/sbni.c72
-rw-r--r--drivers/net/wan/sdla.c52
-rw-r--r--drivers/net/wan/sealevel.c11
-rw-r--r--drivers/net/wan/wanxl.c93
-rw-r--r--drivers/net/wan/x25_asy.c47
-rw-r--r--drivers/net/wan/z85230.c34
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wireless/adm8211.c1
-rw-r--r--drivers/net/wireless/airo.c1
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c50
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c89
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h438
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c55
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c1214
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h206
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c49
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c423
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h21
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c13
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c58
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h12
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c74
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c263
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c55
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c40
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c133
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h89
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c93
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/rfgain.h8
-rw-r--r--drivers/net/wireless/ath/ath5k/rfkill.c65
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c45
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c45
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c188
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c131
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h1147
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h1080
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h18
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c229
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c179
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c200
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h44
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c353
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h13
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.h4
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h19
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h21
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h56
-rw-r--r--drivers/net/wireless/ath/carl9170/led.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c129
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c21
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c12
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c290
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h6
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h25
-rw-r--r--drivers/net/wireless/ath/key.c7
-rw-r--r--drivers/net/wireless/atmel.c1
-rw-r--r--drivers/net/wireless/b43/Kconfig32
-rw-r--r--drivers/net/wireless/b43/Makefile5
-rw-r--r--drivers/net/wireless/b43/b43.h63
-rw-r--r--drivers/net/wireless/b43/bus.c255
-rw-r--r--drivers/net/wireless/b43/bus.h70
-rw-r--r--drivers/net/wireless/b43/debugfs.c2
-rw-r--r--drivers/net/wireless/b43/dma.c97
-rw-r--r--drivers/net/wireless/b43/dma.h4
-rw-r--r--drivers/net/wireless/b43/leds.c17
-rw-r--r--drivers/net/wireless/b43/lo.c8
-rw-r--r--drivers/net/wireless/b43/main.c677
-rw-r--r--drivers/net/wireless/b43/main.h4
-rw-r--r--drivers/net/wireless/b43/pcmcia.c2
-rw-r--r--drivers/net/wireless/b43/phy_a.c23
-rw-r--r--drivers/net/wireless/b43/phy_common.c22
-rw-r--r--drivers/net/wireless/b43/phy_common.h6
-rw-r--r--drivers/net/wireless/b43/phy_g.c94
-rw-r--r--drivers/net/wireless/b43/phy_ht.c413
-rw-r--r--drivers/net/wireless/b43/phy_ht.h46
-rw-r--r--drivers/net/wireless/b43/phy_lcn.c52
-rw-r--r--drivers/net/wireless/b43/phy_lcn.h14
-rw-r--r--drivers/net/wireless/b43/phy_lp.c137
-rw-r--r--drivers/net/wireless/b43/phy_n.c122
-rw-r--r--drivers/net/wireless/b43/pio.c12
-rw-r--r--drivers/net/wireless/b43/radio_2055.c2
-rw-r--r--drivers/net/wireless/b43/radio_2055.h5
-rw-r--r--drivers/net/wireless/b43/radio_2056.h5
-rw-r--r--drivers/net/wireless/b43/radio_2059.c174
-rw-r--r--drivers/net/wireless/b43/radio_2059.h54
-rw-r--r--drivers/net/wireless/b43/rfkill.c11
-rw-r--r--drivers/net/wireless/b43/sdio.c12
-rw-r--r--drivers/net/wireless/b43/sysfs.c6
-rw-r--r--drivers/net/wireless/b43/tables.c2
-rw-r--r--drivers/net/wireless/b43/tables_lpphy.c17
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c2
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h8
-rw-r--r--drivers/net/wireless/b43/tables_phy_ht.c750
-rw-r--r--drivers/net/wireless/b43/tables_phy_ht.h22
-rw-r--r--drivers/net/wireless/b43/tables_phy_lcn.c34
-rw-r--r--drivers/net/wireless/b43/tables_phy_lcn.h6
-rw-r--r--drivers/net/wireless/b43/wa.c26
-rw-r--r--drivers/net/wireless/b43/xmit.c7
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h4
-rw-r--r--drivers/net/wireless/b43legacy/debugfs.c2
-rw-r--r--drivers/net/wireless/b43legacy/dma.c19
-rw-r--r--drivers/net/wireless/b43legacy/dma.h2
-rw-r--r--drivers/net/wireless/b43legacy/ilt.c2
-rw-r--r--drivers/net/wireless/b43legacy/leds.c2
-rw-r--r--drivers/net/wireless/b43legacy/main.c12
-rw-r--r--drivers/net/wireless/b43legacy/main.h2
-rw-r--r--drivers/net/wireless/b43legacy/phy.c2
-rw-r--r--drivers/net/wireless/b43legacy/phy.h2
-rw-r--r--drivers/net/wireless/b43legacy/pio.c2
-rw-r--r--drivers/net/wireless/b43legacy/radio.c2
-rw-r--r--drivers/net/wireless/b43legacy/radio.h2
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.c2
-rw-r--r--drivers/net/wireless/b43legacy/sysfs.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h1
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c4
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_wx.c1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c10
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c79
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c78
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c3
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c18
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-commands.h11
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c91
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h17
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c192
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h61
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c3
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h60
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c1
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h9
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c21
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c92
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c301
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c465
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c124
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c124
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c74
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c328
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c306
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c774
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c38
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c235
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c394
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c38
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c614
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c224
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c2038
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h87
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-bus.h139
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h222
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c169
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h196
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h181
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c291
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c564
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c47
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c296
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sv-open.c194
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h251
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c979
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c (renamed from drivers/net/wireless/iwlwifi/iwl-tx.c)734
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.c1172
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h225
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c2
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/libertas/cmd.c47
-rw-r--r--drivers/net/wireless/libertas/cmd.h2
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c7
-rw-r--r--drivers/net/wireless/libertas/debugfs.c1
-rw-r--r--drivers/net/wireless/libertas/dev.h2
-rw-r--r--drivers/net/wireless/libertas/ethtool.c1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c34
-rw-r--r--drivers/net/wireless/libertas/if_spi.c8
-rw-r--r--drivers/net/wireless/libertas/main.c15
-rw-r--r--drivers/net/wireless/libertas/mesh.c973
-rw-r--r--drivers/net/wireless/libertas/mesh.h31
-rw-r--r--drivers/net/wireless/libertas/rx.c1
-rw-r--r--drivers/net/wireless/libertas/tx.c3
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c1
-rw-r--r--drivers/net/wireless/libertas_tf/main.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c464
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h133
-rw-r--r--drivers/net/wireless/mwifiex/11n.c33
-rw-r--r--drivers/net/wireless/mwifiex/11n.h14
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c54
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c5
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h5
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c56
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c28
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c35
-rw-r--r--drivers/net/wireless/mwifiex/decl.h4
-rw-r--r--drivers/net/wireless/mwifiex/fw.h11
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h1
-rw-r--r--drivers/net/wireless/mwifiex/join.c23
-rw-r--r--drivers/net/wireless/mwifiex/main.c36
-rw-r--r--drivers/net/wireless/mwifiex/main.h30
-rw-r--r--drivers/net/wireless/mwifiex/scan.c6
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c94
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c113
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c50
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c14
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c13
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c2
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c32
-rw-r--r--drivers/net/wireless/mwl8k.c7
-rw-r--r--drivers/net/wireless/orinoco/airport.c9
-rw-r--r--drivers/net/wireless/orinoco/cfg.c6
-rw-r--r--drivers/net/wireless/orinoco/fw.c7
-rw-r--r--drivers/net/wireless/orinoco/fw.h2
-rw-r--r--drivers/net/wireless/orinoco/hermes.c40
-rw-r--r--drivers/net/wireless/orinoco/hermes.h37
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c8
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.h12
-rw-r--r--drivers/net/wireless/orinoco/hw.c48
-rw-r--r--drivers/net/wireless/orinoco/hw.h2
-rw-r--r--drivers/net/wireless/orinoco/main.c48
-rw-r--r--drivers/net/wireless/orinoco/mic.c8
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h16
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c11
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c3
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c6
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c23
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c10
-rw-r--r--drivers/net/wireless/orinoco/wext.c14
-rw-r--r--drivers/net/wireless/p54/p54pci.h1
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h1
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c1
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig9
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h16
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c381
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c104
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c19
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c113
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c18
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c1
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig15
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile1
-rw-r--r--drivers/net/wireless/rtlwifi/base.c26
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c8
-rw-r--r--drivers/net/wireless/rtlwifi/core.c18
-rw-r--r--drivers/net/wireless/rtlwifi/core.h2
-rw-r--r--drivers/net/wireless/rtlwifi/debug.h5
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c14
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c282
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h12
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c97
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c2
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c77
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/led.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c32
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/Makefile14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/def.h269
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c1355
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.h212
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c790
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.h155
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c2329
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.h66
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/led.c159
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/led.h38
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c3831
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.h178
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/reg.h1313
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.c628
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.h44
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c423
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.h37
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/table.c1690
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/table.h57
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c959
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h756
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/fw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c38
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/led.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c27
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/reg.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c12
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c12
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h10
-rw-r--r--drivers/net/wireless/wl1251/acx.c6
-rw-r--r--drivers/net/wireless/wl1251/cmd.c2
-rw-r--r--drivers/net/wireless/wl1251/sdio.c1
-rw-r--r--drivers/net/wireless/wl1251/spi.c1
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig2
-rw-r--r--drivers/net/wireless/wl12xx/acx.c67
-rw-r--r--drivers/net/wireless/wl12xx/acx.h20
-rw-r--r--drivers/net/wireless/wl12xx/boot.c40
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c109
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h62
-rw-r--r--drivers/net/wireless/wl12xx/conf.h65
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c153
-rw-r--r--drivers/net/wireless/wl12xx/event.c101
-rw-r--r--drivers/net/wireless/wl12xx/event.h29
-rw-r--r--drivers/net/wireless/wl12xx/ini.h3
-rw-r--r--drivers/net/wireless/wl12xx/init.c27
-rw-r--r--drivers/net/wireless/wl12xx/io.c7
-rw-r--r--drivers/net/wireless/wl12xx/io.h15
-rw-r--r--drivers/net/wireless/wl12xx/main.c793
-rw-r--r--drivers/net/wireless/wl12xx/ps.c21
-rw-r--r--drivers/net/wireless/wl12xx/rx.c39
-rw-r--r--drivers/net/wireless/wl12xx/rx.h12
-rw-r--r--drivers/net/wireless/wl12xx/scan.c90
-rw-r--r--drivers/net/wireless/wl12xx/scan.h18
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c83
-rw-r--r--drivers/net/wireless/wl12xx/spi.c16
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c7
-rw-r--r--drivers/net/wireless/wl12xx/tx.c176
-rw-r--r--drivers/net/wireless/wl12xx/tx.h28
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h63
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c118
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c129
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h5
-rw-r--r--drivers/net/xen-netback/netback.c1
-rw-r--r--drivers/net/xen-netfront.c69
-rw-r--r--drivers/net/xilinx_emaclite.c14
-rw-r--r--drivers/net/yellowfin.c6
-rw-r--r--drivers/net/znet.c2
-rw-r--r--drivers/net/zorro8390.c673
-rw-r--r--drivers/nfc/Kconfig24
-rw-r--r--drivers/nfc/Makefile3
-rw-r--r--drivers/nfc/pn533.c1632
-rw-r--r--drivers/of/Kconfig8
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/address.c20
-rw-r--r--drivers/of/base.c66
-rw-r--r--drivers/of/gpio.c16
-rw-r--r--drivers/of/of_net.c45
-rw-r--r--drivers/of/of_pci.c113
-rw-r--r--drivers/of/of_pci_irq.c92
-rw-r--r--drivers/of/platform.c200
-rw-r--r--drivers/oprofile/oprofile_perf.c4
-rw-r--r--drivers/oprofile/oprofile_stats.h2
-rw-r--r--drivers/parport/parport_ax88796.c2
-rw-r--r--drivers/parport/parport_pc.c54
-rw-r--r--drivers/parport/parport_serial.c229
-rw-r--r--drivers/pci/Makefile7
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c6
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c17
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c11
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c45
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c2
-rw-r--r--drivers/pci/hotplug/shpchp_sysfs.c21
-rw-r--r--drivers/pci/of.c61
-rw-r--r--drivers/pci/pci-driver.c18
-rw-r--r--drivers/pci/pci-label.c2
-rw-r--r--drivers/pci/pci.c82
-rw-r--r--drivers/pci/pci.h8
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c76
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c3
-rw-r--r--drivers/pci/pcie/aspm.c2
-rw-r--r--drivers/pci/probe.c242
-rw-r--r--drivers/pci/quirks.c25
-rw-r--r--drivers/pci/setup-bus.c184
-rw-r--r--drivers/pci/setup-irq.c4
-rw-r--r--drivers/pci/setup-res.c155
-rw-r--r--drivers/pci/xen-pcifront.c2
-rw-r--r--drivers/pcmcia/at91_cf.c7
-rw-r--r--drivers/pcmcia/electra_cf.c4
-rw-r--r--drivers/pcmcia/pxa2xx_balloon3.c10
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x255.c11
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c11
-rw-r--r--drivers/pcmcia/pxa2xx_colibri.c11
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c10
-rw-r--r--drivers/pcmcia/pxa2xx_palmld.c11
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c11
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c11
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c3
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c34
-rw-r--r--drivers/pcmcia/pxa2xx_trizeps4.c4
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c10
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c4
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c6
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c6
-rw-r--r--drivers/pcmcia/soc_common.c7
-rw-r--r--drivers/platform/x86/Kconfig8
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c87
-rw-r--r--drivers/platform/x86/acerhdf.c13
-rw-r--r--drivers/platform/x86/asus-laptop.c9
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c27
-rw-r--r--drivers/platform/x86/asus-wmi.c242
-rw-r--r--drivers/platform/x86/asus-wmi.h7
-rw-r--r--drivers/platform/x86/compal-laptop.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c31
-rw-r--r--drivers/platform/x86/dell-wmi.c10
-rw-r--r--drivers/platform/x86/eeepc-wmi.c27
-rw-r--r--drivers/platform/x86/hp-wmi.c11
-rw-r--r--drivers/platform/x86/ideapad-laptop.c195
-rw-r--r--drivers/platform/x86/intel_ips.c4
-rw-r--r--drivers/platform/x86/intel_menlow.c2
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c26
-rw-r--r--drivers/platform/x86/intel_oaktrail.c1
-rw-r--r--drivers/platform/x86/intel_rar_register.c4
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c10
-rw-r--r--drivers/platform/x86/msi-wmi.c1
-rw-r--r--drivers/platform/x86/samsung-laptop.c20
-rw-r--r--drivers/platform/x86/samsung-q10.c196
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c83
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c62
-rw-r--r--drivers/pnp/pnpbios/rsparser.c12
-rw-r--r--drivers/power/Kconfig14
-rw-r--r--drivers/power/Makefile2
-rw-r--r--drivers/power/apm_power.c8
-rw-r--r--drivers/power/bq20z75.c103
-rw-r--r--drivers/power/ds2782_battery.c4
-rw-r--r--drivers/power/gpio-charger.c2
-rw-r--r--drivers/power/max17042_battery.c175
-rw-r--r--drivers/power/max8903_charger.c16
-rw-r--r--drivers/power/max8997_charger.c207
-rw-r--r--drivers/power/max8998_charger.c219
-rw-r--r--drivers/power/s3c_adc_battery.c3
-rw-r--r--drivers/power/twl4030_charger.c10
-rw-r--r--drivers/power/wm831x_backup.c12
-rw-r--r--drivers/power/wm831x_power.c26
-rw-r--r--drivers/regulator/Kconfig14
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/aat2870-regulator.c232
-rw-r--r--drivers/regulator/core.c190
-rw-r--r--drivers/regulator/db8500-prcmu.c14
-rw-r--r--drivers/regulator/dummy.c32
-rw-r--r--drivers/regulator/max8952.c2
-rw-r--r--drivers/regulator/max8997.c55
-rw-r--r--drivers/regulator/tps65023-regulator.c97
-rw-r--r--drivers/regulator/tps65910-regulator.c63
-rw-r--r--drivers/regulator/tps65912-regulator.c800
-rw-r--r--drivers/regulator/twl-regulator.c66
-rw-r--r--drivers/regulator/wm831x-dcdc.c126
-rw-r--r--drivers/regulator/wm831x-ldo.c25
-rw-r--r--drivers/regulator/wm8994-regulator.c4
-rw-r--r--drivers/rtc/Kconfig20
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/class.c65
-rw-r--r--drivers/rtc/interface.c55
-rw-r--r--drivers/rtc/rtc-at32ap700x.c2
-rw-r--r--drivers/rtc/rtc-cmos.c6
-rw-r--r--drivers/rtc/rtc-ds1286.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c1
-rw-r--r--drivers/rtc/rtc-ds1511.c2
-rw-r--r--drivers/rtc/rtc-ds1742.c2
-rw-r--r--drivers/rtc/rtc-m48t35.c2
-rw-r--r--drivers/rtc/rtc-m48t59.c2
-rw-r--r--drivers/rtc/rtc-mpc5121.c81
-rw-r--r--drivers/rtc/rtc-mrst.c5
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/rtc/rtc-pm8xxx.c550
-rw-r--r--drivers/rtc/rtc-puv3.c5
-rw-r--r--drivers/rtc/rtc-s3c.c32
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c153
-rw-r--r--drivers/rtc/rtc-tegra.c2
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/rtc/rtc-vt8500.c51
-rw-r--r--drivers/s390/block/dasd.c577
-rw-r--r--drivers/s390/block/dasd_eckd.c9
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_int.h57
-rw-r--r--drivers/s390/block/dasd_ioctl.c38
-rw-r--r--drivers/s390/block/dasd_proc.c110
-rw-r--r--drivers/s390/char/Kconfig3
-rw-r--r--drivers/s390/char/sclp_async.c9
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c2
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/qdio.h2
-rw-r--r--drivers/s390/cio/qdio_debug.c12
-rw-r--r--drivers/s390/cio/qdio_main.c23
-rw-r--r--drivers/s390/cio/qdio_thinint.c17
-rw-r--r--drivers/s390/crypto/ap_bus.c98
-rw-r--r--drivers/s390/crypto/ap_bus.h22
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c2
-rw-r--r--drivers/s390/crypto/zcrypt_mono.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.h1
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c125
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aha152x.c17
-rw-r--r--drivers/scsi/atari_NCR5380.c6
-rw-r--r--drivers/scsi/atari_scsi.c1
-rw-r--r--drivers/scsi/be2iscsi/be_main.c199
-rw-r--r--drivers/scsi/be2iscsi/be_main.h4
-rw-r--r--drivers/scsi/bfa/Makefile2
-rw-r--r--drivers/scsi/bfa/bfa.h187
-rw-r--r--drivers/scsi/bfa/bfa_core.c912
-rw-r--r--drivers/scsi/bfa/bfa_defs.h754
-rw-r--r--drivers/scsi/bfa/bfa_defs_fcs.h27
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h269
-rw-r--r--drivers/scsi/bfa/bfa_fc.h166
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c49
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h16
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c1214
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h134
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c179
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h32
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c47
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c403
-rw-r--r--drivers/scsi/bfa/bfa_fcs_rport.c170
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c130
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c114
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c3551
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h655
-rw-r--r--drivers/scsi/bfa/bfa_ioc_cb.c69
-rw-r--r--drivers/scsi/bfa/bfa_ioc_ct.c516
-rw-r--r--drivers/scsi/bfa/bfa_modules.h30
-rw-r--r--drivers/scsi/bfa/bfa_port.c428
-rw-r--r--drivers/scsi/bfa/bfa_port.h62
-rw-r--r--drivers/scsi/bfa/bfa_svc.c1375
-rw-r--r--drivers/scsi/bfa/bfa_svc.h180
-rw-r--r--drivers/scsi/bfa/bfad.c303
-rw-r--r--drivers/scsi/bfa/bfad_attr.c53
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c3235
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h746
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c14
-rw-r--r--drivers/scsi/bfa/bfad_drv.h30
-rw-r--r--drivers/scsi/bfa/bfad_im.c58
-rw-r--r--drivers/scsi/bfa/bfad_im.h25
-rw-r--r--drivers/scsi/bfa/bfi.h657
-rw-r--r--drivers/scsi/bfa/bfi_cbreg.h305
-rw-r--r--drivers/scsi/bfa/bfi_ctreg.h636
-rw-r--r--drivers/scsi/bfa/bfi_ms.h159
-rw-r--r--drivers/scsi/bfa/bfi_reg.h450
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h1162
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h121
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_constants.h139
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c441
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c765
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c841
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c219
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c75
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h4
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h14
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h35
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c215
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c174
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c40
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c6
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c84
-rw-r--r--drivers/scsi/dpt/dpti_i2o.h2
-rw-r--r--drivers/scsi/fcoe/fcoe.c187
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_main.c21
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c2
-rw-r--r--drivers/scsi/hpsa.c22
-rw-r--r--drivers/scsi/hpsa.h2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c4
-rw-r--r--drivers/scsi/ipr.c12
-rw-r--r--drivers/scsi/isci/Makefile8
-rw-r--r--drivers/scsi/isci/firmware/Makefile19
-rw-r--r--drivers/scsi/isci/firmware/README36
-rw-r--r--drivers/scsi/isci/firmware/create_fw.c99
-rw-r--r--drivers/scsi/isci/firmware/create_fw.h77
-rw-r--r--drivers/scsi/isci/host.c2751
-rw-r--r--drivers/scsi/isci/host.h542
-rw-r--r--drivers/scsi/isci/init.c565
-rw-r--r--drivers/scsi/isci/isci.h538
-rw-r--r--drivers/scsi/isci/phy.c1312
-rw-r--r--drivers/scsi/isci/phy.h504
-rw-r--r--drivers/scsi/isci/port.c1757
-rw-r--r--drivers/scsi/isci/port.h306
-rw-r--r--drivers/scsi/isci/port_config.c754
-rw-r--r--drivers/scsi/isci/probe_roms.c243
-rw-r--r--drivers/scsi/isci/probe_roms.h249
-rw-r--r--drivers/scsi/isci/registers.h1934
-rw-r--r--drivers/scsi/isci/remote_device.c1501
-rw-r--r--drivers/scsi/isci/remote_device.h352
-rw-r--r--drivers/scsi/isci/remote_node_context.c627
-rw-r--r--drivers/scsi/isci/remote_node_context.h224
-rw-r--r--drivers/scsi/isci/remote_node_table.c598
-rw-r--r--drivers/scsi/isci/remote_node_table.h188
-rw-r--r--drivers/scsi/isci/request.c3391
-rw-r--r--drivers/scsi/isci/request.h448
-rw-r--r--drivers/scsi/isci/sas.h219
-rw-r--r--drivers/scsi/isci/scu_completion_codes.h283
-rw-r--r--drivers/scsi/isci/scu_event_codes.h336
-rw-r--r--drivers/scsi/isci/scu_remote_node_context.h229
-rw-r--r--drivers/scsi/isci/scu_task_context.h942
-rw-r--r--drivers/scsi/isci/task.c1676
-rw-r--r--drivers/scsi/isci/task.h367
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c225
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h278
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c31
-rw-r--r--drivers/scsi/iscsi_tcp.c61
-rw-r--r--drivers/scsi/libfc/fc_exch.c35
-rw-r--r--drivers/scsi/libfc/fc_fcp.c9
-rw-r--r--drivers/scsi/libfc/fc_lport.c3
-rw-r--r--drivers/scsi/libfc/fc_rport.c28
-rw-r--r--drivers/scsi/libiscsi.c36
-rw-r--r--drivers/scsi/libiscsi_tcp.c14
-rw-r--r--drivers/scsi/libsas/sas_expander.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c161
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c89
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h12
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c1357
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h125
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c105
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c222
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h30
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c97
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c399
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_scsi.c14
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c18
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c1
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h12
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h74
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h6
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c84
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h77
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c12
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c283
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c24
-rw-r--r--drivers/scsi/mvsas/Kconfig9
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c101
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c508
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h99
-rw-r--r--drivers/scsi/mvsas/mv_chips.h17
-rw-r--r--drivers/scsi/mvsas/mv_defs.h11
-rw-r--r--drivers/scsi/mvsas/mv_init.c187
-rw-r--r--drivers/scsi/mvsas/mv_sas.c422
-rw-r--r--drivers/scsi/mvsas/mv_sas.h105
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c183
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c441
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c396
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h187
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c371
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c856
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c667
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c1091
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c160
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c556
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c747
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c275
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_pm.c8
-rw-r--r--drivers/scsi/scsi_transport_spi.c24
-rw-r--r--drivers/scsi/ses.c6
-rw-r--r--drivers/scsi/sr.c46
-rw-r--r--drivers/scsi/sr.h7
-rw-r--r--drivers/scsi/sun3_NCR5380.c98
-rw-r--r--drivers/scsi/sun3_scsi.c11
-rw-r--r--drivers/scsi/sun3_scsi_vme.c11
-rw-r--r--drivers/sh/clk/core.c36
-rw-r--r--drivers/spi/Kconfig48
-rw-r--r--drivers/spi/Makefile111
-rw-r--r--drivers/spi/atmel_spi.h167
-rw-r--r--drivers/spi/spi-altera.c (renamed from drivers/spi/spi_altera.c)0
-rw-r--r--drivers/spi/spi-ath79.c (renamed from drivers/spi/ath79_spi.c)2
-rw-r--r--drivers/spi/spi-atmel.c (renamed from drivers/spi/atmel_spi.c)155
-rw-r--r--drivers/spi/spi-au1550.c (renamed from drivers/spi/au1550_spi.c)2
-rw-r--r--drivers/spi/spi-bfin-sport.c (renamed from drivers/spi/spi_bfin_sport.c)0
-rw-r--r--drivers/spi/spi-bfin5xx.c (renamed from drivers/spi/spi_bfin5xx.c)218
-rw-r--r--drivers/spi/spi-bitbang-txrx.h (renamed from drivers/spi/spi_bitbang_txrx.h)0
-rw-r--r--drivers/spi/spi-bitbang.c (renamed from drivers/spi/spi_bitbang.c)8
-rw-r--r--drivers/spi/spi-butterfly.c (renamed from drivers/spi/spi_butterfly.c)4
-rw-r--r--drivers/spi/spi-coldfire-qspi.c (renamed from drivers/spi/coldfire_qspi.c)0
-rw-r--r--drivers/spi/spi-davinci.c (renamed from drivers/spi/davinci_spi.c)0
-rw-r--r--drivers/spi/spi-dw-mid.c (renamed from drivers/spi/dw_spi_mid.c)4
-rw-r--r--drivers/spi/spi-dw-mmio.c (renamed from drivers/spi/dw_spi_mmio.c)4
-rw-r--r--drivers/spi/spi-dw-pci.c (renamed from drivers/spi/dw_spi_pci.c)4
-rw-r--r--drivers/spi/spi-dw.c (renamed from drivers/spi/dw_spi.c)8
-rw-r--r--drivers/spi/spi-dw.h (renamed from drivers/spi/dw_spi.h)1
-rw-r--r--drivers/spi/spi-ep93xx.c (renamed from drivers/spi/ep93xx_spi.c)303
-rw-r--r--drivers/spi/spi-fsl-espi.c (renamed from drivers/spi/spi_fsl_espi.c)2
-rw-r--r--drivers/spi/spi-fsl-lib.c (renamed from drivers/spi/spi_fsl_lib.c)2
-rw-r--r--drivers/spi/spi-fsl-lib.h (renamed from drivers/spi/spi_fsl_lib.h)0
-rw-r--r--drivers/spi/spi-fsl-spi.c (renamed from drivers/spi/spi_fsl_spi.c)30
-rw-r--r--drivers/spi/spi-gpio.c (renamed from drivers/spi/spi_gpio.c)6
-rw-r--r--drivers/spi/spi-imx.c (renamed from drivers/spi/spi_imx.c)466
-rw-r--r--drivers/spi/spi-lm70llp.c (renamed from drivers/spi/spi_lm70llp.c)4
-rw-r--r--drivers/spi/spi-mpc512x-psc.c (renamed from drivers/spi/mpc512x_psc_spi.c)0
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c (renamed from drivers/spi/mpc52xx_psc_spi.c)0
-rw-r--r--drivers/spi/spi-mpc52xx.c (renamed from drivers/spi/mpc52xx_spi.c)0
-rw-r--r--drivers/spi/spi-nuc900.c (renamed from drivers/spi/spi_nuc900.c)5
-rw-r--r--drivers/spi/spi-oc-tiny.c (renamed from drivers/spi/spi_oc_tiny.c)0
-rw-r--r--drivers/spi/spi-omap-100k.c (renamed from drivers/spi/omap_spi_100k.c)0
-rw-r--r--drivers/spi/spi-omap-uwire.c (renamed from drivers/spi/omap_uwire.c)2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c (renamed from drivers/spi/omap2_mcspi.c)10
-rw-r--r--drivers/spi/spi-orion.c (renamed from drivers/spi/orion_spi.c)8
-rw-r--r--drivers/spi/spi-pl022.c (renamed from drivers/spi/amba-pl022.c)122
-rw-r--r--drivers/spi/spi-ppc4xx.c (renamed from drivers/spi/spi_ppc4xx.c)2
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c (renamed from drivers/spi/pxa2xx_spi_pci.c)0
-rw-r--r--drivers/spi/spi-pxa2xx.c (renamed from drivers/spi/pxa2xx_spi.c)0
-rw-r--r--drivers/spi/spi-s3c24xx-fiq.S (renamed from drivers/spi/spi_s3c24xx_fiq.S)2
-rw-r--r--drivers/spi/spi-s3c24xx-fiq.h (renamed from drivers/spi/spi_s3c24xx_fiq.h)0
-rw-r--r--drivers/spi/spi-s3c24xx.c (renamed from drivers/spi/spi_s3c24xx.c)5
-rw-r--r--drivers/spi/spi-s3c64xx.c (renamed from drivers/spi/spi_s3c64xx.c)7
-rw-r--r--drivers/spi/spi-sh-msiof.c (renamed from drivers/spi/spi_sh_msiof.c)0
-rw-r--r--drivers/spi/spi-sh-sci.c (renamed from drivers/spi/spi_sh_sci.c)2
-rw-r--r--drivers/spi/spi-sh.c (renamed from drivers/spi/spi_sh.c)0
-rw-r--r--drivers/spi/spi-stmp.c (renamed from drivers/spi/spi_stmp.c)0
-rw-r--r--drivers/spi/spi-tegra.c (renamed from drivers/spi/spi_tegra.c)20
-rw-r--r--drivers/spi/spi-ti-ssp.c (renamed from drivers/spi/ti-ssp-spi.c)0
-rw-r--r--drivers/spi/spi-tle62x0.c (renamed from drivers/spi/tle62x0.c)2
-rw-r--r--drivers/spi/spi-topcliff-pch.c (renamed from drivers/spi/spi_topcliff_pch.c)1158
-rw-r--r--drivers/spi/spi-txx9.c (renamed from drivers/spi/spi_txx9.c)2
-rw-r--r--drivers/spi/spi-xilinx.c (renamed from drivers/spi/xilinx_spi.c)0
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c201
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/ssb/b43_pci_bridge.c2
-rw-r--r--drivers/ssb/driver_chipcommon.c2
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c6
-rw-r--r--drivers/ssb/driver_extif.c2
-rw-r--r--drivers/ssb/driver_gige.c15
-rw-r--r--drivers/ssb/driver_mipscore.c2
-rw-r--r--drivers/ssb/driver_pcicore.c30
-rw-r--r--drivers/ssb/embedded.c2
-rw-r--r--drivers/ssb/main.c40
-rw-r--r--drivers/ssb/pci.c11
-rw-r--r--drivers/ssb/pcihost_wrapper.c8
-rw-r--r--drivers/ssb/pcmcia.c2
-rw-r--r--drivers/ssb/scan.c5
-rw-r--r--drivers/ssb/sdio.c2
-rw-r--r--drivers/ssb/sprom.c2
-rw-r--r--drivers/staging/Kconfig10
-rw-r--r--drivers/staging/Makefile5
-rw-r--r--drivers/staging/altera-stapl/Kconfig2
-rw-r--r--drivers/staging/altera-stapl/Makefile2
-rw-r--r--drivers/staging/altera-stapl/altera.c33
-rw-r--r--drivers/staging/ath6kl/TODO2
-rw-r--r--drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c2
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_drv.c57
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c211
-rw-r--r--drivers/staging/ath6kl/os/linux/include/ar6000_drv.h20
-rw-r--r--drivers/staging/ath6kl/os/linux/include/cfg80211.h11
-rw-r--r--drivers/staging/ath6kl/os/linux/include/config_linux.h2
-rw-r--r--drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h2
-rw-r--r--drivers/staging/ath6kl/os/linux/include/osapi_linux.h1
-rw-r--r--drivers/staging/ath6kl/wmi/wmi.c6
-rw-r--r--drivers/staging/bcm/Debug.h2
-rw-r--r--drivers/staging/bcm/InterfaceRx.c4
-rw-r--r--drivers/staging/bcm/Macros.h4
-rw-r--r--drivers/staging/bcm/Misc.c10
-rw-r--r--drivers/staging/bcm/PHSModule.c24
-rw-r--r--drivers/staging/bcm/PHSModule.h2
-rw-r--r--drivers/staging/bcm/headers.h8
-rw-r--r--drivers/staging/bcm/sort.c77
-rw-r--r--drivers/staging/brcm80211/Kconfig2
-rw-r--r--drivers/staging/brcm80211/Makefile4
-rw-r--r--drivers/staging/brcm80211/README65
-rw-r--r--drivers/staging/brcm80211/TODO12
-rw-r--r--drivers/staging/brcm80211/brcmfmac/Makefile27
-rw-r--r--drivers/staging/brcm80211/brcmfmac/README2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/aiutils.c1
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmcdc.h98
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmchip.h9
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdbus.h113
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh.c631
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c386
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c925
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.h134
-rw-r--r--drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c235
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd.h970
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_bus.h76
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_cdc.c348
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_common.c1606
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c158
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_dbg.h105
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_linux.c2238
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c25
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_proto.h63
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_sdio.c4746
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhdioctl.h100
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dngl_stats.h32
-rw-r--r--drivers/staging/brcm80211/brcmfmac/hndrte_armtrap.h75
-rw-r--r--drivers/staging/brcm80211/brcmfmac/hndrte_cons.h62
-rw-r--r--drivers/staging/brcm80211/brcmfmac/msgtrace.h61
-rw-r--r--drivers/staging/brcm80211/brcmfmac/sdio_host.h347
-rw-r--r--drivers/staging/brcm80211/brcmfmac/sdioh.h63
-rw-r--r--drivers/staging/brcm80211/brcmfmac/sdiovar.h38
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c2480
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h208
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_iw.c3693
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_iw.h142
-rw-r--r--drivers/staging/brcm80211/brcmsmac/Makefile45
-rw-r--r--drivers/staging/brcm80211/brcmsmac/aiutils.c717
-rw-r--r--drivers/staging/brcm80211/brcmsmac/aiutils.h230
-rw-r--r--drivers/staging/brcm80211/brcmsmac/alloc.c (renamed from drivers/staging/brcm80211/brcmsmac/wlc_alloc.c)153
-rw-r--r--drivers/staging/brcm80211/brcmsmac/alloc.h (renamed from drivers/staging/brcm80211/brcmsmac/wlc_alloc.h)5
-rw-r--r--drivers/staging/brcm80211/brcmsmac/ampdu.c (renamed from drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c)410
-rw-r--r--drivers/staging/brcm80211/brcmsmac/ampdu.h (renamed from drivers/staging/brcm80211/brcmsmac/wlc_types.h)31
-rw-r--r--drivers/staging/brcm80211/brcmsmac/antsel.c (renamed from drivers/staging/brcm80211/brcmsmac/wlc_antsel.c)113
-rw-r--r--drivers/staging/brcm80211/brcmsmac/antsel.h (renamed from drivers/staging/brcm80211/brcmsmac/wlc_antsel.h)16
-rw-r--r--drivers/staging/brcm80211/brcmsmac/bcmotp.c936
-rw-r--r--drivers/staging/brcm80211/brcmsmac/bcmsrom.c714
-rw-r--r--drivers/staging/brcm80211/brcmsmac/bmac.c (renamed from drivers/staging/brcm80211/brcmsmac/wlc_bmac.c)1112
-rw-r--r--drivers/staging/brcm80211/brcmsmac/bmac.h174
-rw-r--r--drivers/staging/brcm80211/brcmsmac/channel.c (renamed from drivers/staging/brcm80211/brcmsmac/wlc_channel.c)594
-rw-r--r--drivers/staging/brcm80211/brcmsmac/channel.h (renamed from drivers/staging/brcm80211/brcmsmac/wlc_channel.h)82
-rw-r--r--drivers/staging/brcm80211/brcmsmac/d11.h160
-rw-r--r--drivers/staging/brcm80211/brcmsmac/dma.c (renamed from drivers/staging/brcm80211/brcmsmac/hnddma.c)563
-rw-r--r--drivers/staging/brcm80211/brcmsmac/dma.h (renamed from drivers/staging/brcm80211/include/hnddma.h)146
-rw-r--r--drivers/staging/brcm80211/brcmsmac/mac80211_if.c (renamed from drivers/staging/brcm80211/brcmsmac/wl_mac80211.c)823
-rw-r--r--drivers/staging/brcm80211/brcmsmac/mac80211_if.h (renamed from drivers/staging/brcm80211/brcmsmac/wl_mac80211.h)75
-rw-r--r--drivers/staging/brcm80211/brcmsmac/main.c (renamed from drivers/staging/brcm80211/brcmsmac/wlc_main.c)4123
-rw-r--r--drivers/staging/brcm80211/brcmsmac/main.h (renamed from drivers/staging/brcm80211/brcmsmac/wlc_main.h)794
-rw-r--r--drivers/staging/brcm80211/brcmsmac/nicpci.c672
-rw-r--r--drivers/staging/brcm80211/brcmsmac/nicpci.h85
-rw-r--r--drivers/staging/brcm80211/brcmsmac/nvram.c215
-rw-r--r--drivers/staging/brcm80211/brcmsmac/otp.c544
-rw-r--r--drivers/staging/brcm80211/brcmsmac/otp.h (renamed from drivers/staging/brcm80211/include/bcmotp.h)13
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phy_cmn.c (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_cmn.c)718
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phy_hal.h294
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phy_int.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h)467
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.c (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_lcn.c)584
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_lcn.h)12
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phy_n.c (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_n.c)1031
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.c (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_qmath.c)4
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_qmath.h)8
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phy_radio.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_radio.h)6
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phy_version.h36
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phyreg_n.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phyreg_n.h)14
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.c (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_lcn.c)53
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_lcn.h)35
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.c (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_n.c)53
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.h (renamed from drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_n.h)21
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_hal.h256
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy_shim.c218
-rw-r--r--drivers/staging/brcm80211/brcmsmac/phy_shim.h164
-rw-r--r--drivers/staging/brcm80211/brcmsmac/pmu.c474
-rw-r--r--drivers/staging/brcm80211/brcmsmac/pmu.h (renamed from drivers/staging/brcm80211/brcmsmac/wlc_pmu.h)44
-rw-r--r--drivers/staging/brcm80211/brcmsmac/pub.h (renamed from drivers/staging/brcm80211/brcmsmac/wlc_pub.h)427
-rw-r--r--drivers/staging/brcm80211/brcmsmac/rate.c (renamed from drivers/staging/brcm80211/brcmsmac/wlc_rate.c)207
-rw-r--r--drivers/staging/brcm80211/brcmsmac/rate.h (renamed from drivers/staging/brcm80211/brcmsmac/wlc_rate.h)116
-rw-r--r--drivers/staging/brcm80211/brcmsmac/scb.h (renamed from drivers/staging/brcm80211/brcmsmac/wlc_scb.h)31
-rw-r--r--drivers/staging/brcm80211/brcmsmac/srom.c (renamed from drivers/staging/brcm80211/brcmsmac/bcmsrom_tbl.h)890
-rw-r--r--drivers/staging/brcm80211/brcmsmac/srom.h (renamed from drivers/staging/brcm80211/include/bcmsrom.h)12
-rw-r--r--drivers/staging/brcm80211/brcmsmac/stf.c (renamed from drivers/staging/brcm80211/brcmsmac/wlc_stf.c)204
-rw-r--r--drivers/staging/brcm80211/brcmsmac/stf.h42
-rw-r--r--drivers/staging/brcm80211/brcmsmac/types.h398
-rw-r--r--drivers/staging/brcm80211/brcmsmac/ucode_loader.c (renamed from drivers/staging/brcm80211/brcmsmac/wl_ucode_loader.c)80
-rw-r--r--drivers/staging/brcm80211/brcmsmac/ucode_loader.h (renamed from drivers/staging/brcm80211/brcmsmac/wl_ucode.h)15
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wl_dbg.h92
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wl_export.h47
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wlc_ampdu.h29
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wlc_bmac.h178
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wlc_bsscfg.h135
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wlc_cfg.h280
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wlc_key.h140
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wlc_phy_shim.c243
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wlc_phy_shim.h112
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wlc_pmu.c1929
-rw-r--r--drivers/staging/brcm80211/brcmsmac/wlc_stf.h38
-rw-r--r--drivers/staging/brcm80211/brcmutil/Makefile (renamed from drivers/staging/brcm80211/util/Makefile)6
-rw-r--r--drivers/staging/brcm80211/brcmutil/utils.c (renamed from drivers/staging/brcm80211/util/bcmutils.c)153
-rw-r--r--drivers/staging/brcm80211/brcmutil/wifi.c (renamed from drivers/staging/brcm80211/util/bcmwifi.c)20
-rw-r--r--drivers/staging/brcm80211/include/aidmp.h374
-rw-r--r--drivers/staging/brcm80211/include/bcmdefs.h150
-rw-r--r--drivers/staging/brcm80211/include/bcmdevs.h124
-rw-r--r--drivers/staging/brcm80211/include/bcmnvram.h153
-rw-r--r--drivers/staging/brcm80211/include/bcmsdh.h205
-rw-r--r--drivers/staging/brcm80211/include/bcmsdpcm.h208
-rw-r--r--drivers/staging/brcm80211/include/bcmsrom_fmt.h367
-rw-r--r--drivers/staging/brcm80211/include/bcmutils.h500
-rw-r--r--drivers/staging/brcm80211/include/brcm_hw_ids.h59
-rw-r--r--drivers/staging/brcm80211/include/brcmu_utils.h301
-rw-r--r--drivers/staging/brcm80211/include/brcmu_wifi.h (renamed from drivers/staging/brcm80211/include/bcmwifi.h)126
-rw-r--r--drivers/staging/brcm80211/include/chipcommon.h281
-rw-r--r--drivers/staging/brcm80211/include/defs.h112
-rw-r--r--drivers/staging/brcm80211/include/hndsoc.h199
-rw-r--r--drivers/staging/brcm80211/include/nicpci.h79
-rw-r--r--drivers/staging/brcm80211/include/pci_core.h122
-rw-r--r--drivers/staging/brcm80211/include/pcicfg.h50
-rw-r--r--drivers/staging/brcm80211/include/pcie_core.h299
-rw-r--r--drivers/staging/brcm80211/include/proto/802.11.h200
-rw-r--r--drivers/staging/brcm80211/include/proto/bcmeth.h44
-rw-r--r--drivers/staging/brcm80211/include/proto/bcmevent.h207
-rw-r--r--drivers/staging/brcm80211/include/sbchipc.h1588
-rw-r--r--drivers/staging/brcm80211/include/sbconfig.h272
-rw-r--r--drivers/staging/brcm80211/include/sbhnddma.h315
-rw-r--r--drivers/staging/brcm80211/include/sbsdio.h152
-rw-r--r--drivers/staging/brcm80211/include/sbsdpcmdev.h281
-rw-r--r--drivers/staging/brcm80211/include/sdio.h552
-rw-r--r--drivers/staging/brcm80211/include/soc.h95
-rw-r--r--drivers/staging/brcm80211/include/wlioctl.h1365
-rw-r--r--drivers/staging/comedi/Kconfig32
-rw-r--r--drivers/staging/comedi/comedi.h6
-rw-r--r--drivers/staging/comedi/comedi_fops.c18
-rw-r--r--drivers/staging/comedi/comedidev.h3
-rw-r--r--drivers/staging/comedi/drivers.c4
-rw-r--r--drivers/staging/comedi/drivers/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c1
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c2
-rw-r--r--drivers/staging/comedi/drivers/das1800.c28
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c1
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c462
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c10
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c37
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c1
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c1
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c1
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c22
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c45
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c46
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c7
-rw-r--r--drivers/staging/cs5535_gpio/Kconfig11
-rw-r--r--drivers/staging/cs5535_gpio/Makefile1
-rw-r--r--drivers/staging/cs5535_gpio/TODO6
-rw-r--r--drivers/staging/cs5535_gpio/cs5535_gpio.c260
-rw-r--r--drivers/staging/cxd2099/Kconfig11
-rw-r--r--drivers/staging/cxd2099/cxd2099.c312
-rw-r--r--drivers/staging/cxd2099/cxd2099.h18
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h1
-rw-r--r--drivers/staging/dt3155v4l/Kconfig10
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.c487
-rw-r--r--drivers/staging/dt3155v4l/dt3155v4l.h22
-rw-r--r--drivers/staging/easycap/Kconfig32
-rw-r--r--drivers/staging/easycap/Makefile4
-rw-r--r--drivers/staging/easycap/README28
-rw-r--r--drivers/staging/easycap/easycap.h46
-rw-r--r--drivers/staging/easycap/easycap_ioctl.c17
-rw-r--r--drivers/staging/easycap/easycap_main.c156
-rw-r--r--drivers/staging/easycap/easycap_settings.c2
-rw-r--r--drivers/staging/easycap/easycap_sound.c35
-rw-r--r--drivers/staging/easycap/easycap_sound_oss.c954
-rw-r--r--drivers/staging/echo/echo.c11
-rw-r--r--drivers/staging/et131x/et1310_address_map.h323
-rw-r--r--drivers/staging/et131x/et1310_mac.c265
-rw-r--r--drivers/staging/et131x/et1310_phy.c33
-rw-r--r--drivers/staging/et131x/et1310_phy.h169
-rw-r--r--drivers/staging/et131x/et1310_pm.c4
-rw-r--r--drivers/staging/et131x/et1310_rx.c214
-rw-r--r--drivers/staging/et131x/et1310_tx.c12
-rw-r--r--drivers/staging/et131x/et131x.h4
-rw-r--r--drivers/staging/et131x/et131x_adapter.h21
-rw-r--r--drivers/staging/et131x/et131x_defs.h1
-rw-r--r--drivers/staging/et131x/et131x_initpci.c34
-rw-r--r--drivers/staging/et131x/et131x_isr.c20
-rw-r--r--drivers/staging/et131x/et131x_netdev.c140
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.conf14
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000.h480
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c190
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_dev.h66
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c561
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c224
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c27
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c9
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h32
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.h413
-rw-r--r--drivers/staging/ft1000/ft1000.h252
-rw-r--r--drivers/staging/generic_serial/Kconfig45
-rw-r--r--drivers/staging/generic_serial/Makefile6
-rw-r--r--drivers/staging/generic_serial/TODO6
-rw-r--r--drivers/staging/generic_serial/generic_serial.c844
-rw-r--r--drivers/staging/generic_serial/rio/Makefile12
-rw-r--r--drivers/staging/generic_serial/rio/board.h132
-rw-r--r--drivers/staging/generic_serial/rio/cirrus.h210
-rw-r--r--drivers/staging/generic_serial/rio/cmdblk.h53
-rw-r--r--drivers/staging/generic_serial/rio/cmdpkt.h177
-rw-r--r--drivers/staging/generic_serial/rio/daemon.h307
-rw-r--r--drivers/staging/generic_serial/rio/errors.h98
-rw-r--r--drivers/staging/generic_serial/rio/func.h143
-rw-r--r--drivers/staging/generic_serial/rio/host.h123
-rw-r--r--drivers/staging/generic_serial/rio/link.h96
-rw-r--r--drivers/staging/generic_serial/rio/linux_compat.h77
-rw-r--r--drivers/staging/generic_serial/rio/map.h98
-rw-r--r--drivers/staging/generic_serial/rio/param.h55
-rw-r--r--drivers/staging/generic_serial/rio/parmmap.h81
-rw-r--r--drivers/staging/generic_serial/rio/pci.h72
-rw-r--r--drivers/staging/generic_serial/rio/phb.h142
-rw-r--r--drivers/staging/generic_serial/rio/pkt.h77
-rw-r--r--drivers/staging/generic_serial/rio/port.h179
-rw-r--r--drivers/staging/generic_serial/rio/protsts.h110
-rw-r--r--drivers/staging/generic_serial/rio/rio.h208
-rw-r--r--drivers/staging/generic_serial/rio/rio_linux.c1204
-rw-r--r--drivers/staging/generic_serial/rio/rio_linux.h197
-rw-r--r--drivers/staging/generic_serial/rio/rioboard.h275
-rw-r--r--drivers/staging/generic_serial/rio/rioboot.c1113
-rw-r--r--drivers/staging/generic_serial/rio/riocmd.c939
-rw-r--r--drivers/staging/generic_serial/rio/rioctrl.c1504
-rw-r--r--drivers/staging/generic_serial/rio/riodrvr.h138
-rw-r--r--drivers/staging/generic_serial/rio/rioinfo.h92
-rw-r--r--drivers/staging/generic_serial/rio/rioinit.c421
-rw-r--r--drivers/staging/generic_serial/rio/riointr.c645
-rw-r--r--drivers/staging/generic_serial/rio/rioioctl.h57
-rw-r--r--drivers/staging/generic_serial/rio/rioparam.c663
-rw-r--r--drivers/staging/generic_serial/rio/rioroute.c1039
-rw-r--r--drivers/staging/generic_serial/rio/riospace.h154
-rw-r--r--drivers/staging/generic_serial/rio/riotable.c941
-rw-r--r--drivers/staging/generic_serial/rio/riotty.c654
-rw-r--r--drivers/staging/generic_serial/rio/route.h101
-rw-r--r--drivers/staging/generic_serial/rio/rup.h69
-rw-r--r--drivers/staging/generic_serial/rio/unixrup.h51
-rw-r--r--drivers/staging/generic_serial/ser_a2232.c831
-rw-r--r--drivers/staging/generic_serial/ser_a2232.h202
-rw-r--r--drivers/staging/generic_serial/ser_a2232fw.ax529
-rw-r--r--drivers/staging/generic_serial/ser_a2232fw.h306
-rw-r--r--drivers/staging/generic_serial/sx.c2894
-rw-r--r--drivers/staging/generic_serial/sx.h201
-rw-r--r--drivers/staging/generic_serial/sxboards.h206
-rw-r--r--drivers/staging/generic_serial/sxwindow.h393
-rw-r--r--drivers/staging/generic_serial/vme_scc.c1145
-rw-r--r--drivers/staging/gma500/Kconfig27
-rw-r--r--drivers/staging/gma500/Makefile48
-rw-r--r--drivers/staging/gma500/TODO29
-rw-r--r--drivers/staging/gma500/accel_2d.c (renamed from drivers/staging/gma500/psb_2d.c)298
-rw-r--r--drivers/staging/gma500/backlight.c49
-rw-r--r--drivers/staging/gma500/cdv_device.c351
-rw-r--r--drivers/staging/gma500/cdv_device.h36
-rw-r--r--drivers/staging/gma500/cdv_intel_crt.c326
-rw-r--r--drivers/staging/gma500/cdv_intel_display.c1508
-rw-r--r--drivers/staging/gma500/cdv_intel_hdmi.c376
-rw-r--r--drivers/staging/gma500/cdv_intel_lvds.c721
-rw-r--r--drivers/staging/gma500/displays/hdmi.h33
-rw-r--r--drivers/staging/gma500/displays/pyr_cmd.h34
-rw-r--r--drivers/staging/gma500/displays/pyr_vid.h34
-rw-r--r--drivers/staging/gma500/displays/tmd_cmd.h34
-rw-r--r--drivers/staging/gma500/displays/tmd_vid.h34
-rw-r--r--drivers/staging/gma500/displays/tpo_cmd.h35
-rw-r--r--drivers/staging/gma500/displays/tpo_vid.h33
-rw-r--r--drivers/staging/gma500/framebuffer.c (renamed from drivers/staging/gma500/psb_fb.c)530
-rw-r--r--drivers/staging/gma500/framebuffer.h (renamed from drivers/staging/gma500/psb_fb.h)11
-rw-r--r--drivers/staging/gma500/gem.c (renamed from drivers/staging/gma500/psb_gem.c)165
-rw-r--r--drivers/staging/gma500/gem_glue.c89
-rw-r--r--drivers/staging/gma500/gem_glue.h2
-rw-r--r--drivers/staging/gma500/gtt.c (renamed from drivers/staging/gma500/psb_gtt.c)247
-rw-r--r--drivers/staging/gma500/gtt.h (renamed from drivers/staging/gma500/psb_gtt.h)14
-rw-r--r--drivers/staging/gma500/intel_bios.c (renamed from drivers/staging/gma500/psb_intel_bios.c)25
-rw-r--r--drivers/staging/gma500/intel_bios.h (renamed from drivers/staging/gma500/psb_intel_bios.h)0
-rw-r--r--drivers/staging/gma500/intel_i2c.c (renamed from drivers/staging/gma500/psb_intel_i2c.c)0
-rw-r--r--drivers/staging/gma500/intel_opregion.c (renamed from drivers/staging/gma500/psb_intel_opregion.c)28
-rw-r--r--drivers/staging/gma500/mdfld_device.c714
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.c761
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.h173
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi_dpu.c778
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi_dpu.h154
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dpi.c805
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dpi.h78
-rw-r--r--drivers/staging/gma500/mdfld_dsi_output.c1013
-rw-r--r--drivers/staging/gma500/mdfld_dsi_output.h138
-rw-r--r--drivers/staging/gma500/mdfld_dsi_pkg_sender.c1484
-rw-r--r--drivers/staging/gma500/mdfld_dsi_pkg_sender.h184
-rw-r--r--drivers/staging/gma500/mdfld_intel_display.c1402
-rw-r--r--drivers/staging/gma500/mdfld_msic.h31
-rw-r--r--drivers/staging/gma500/mdfld_output.c170
-rw-r--r--drivers/staging/gma500/mdfld_output.h41
-rw-r--r--drivers/staging/gma500/mdfld_pyr_cmd.c558
-rw-r--r--drivers/staging/gma500/mdfld_tmd_vid.c206
-rw-r--r--drivers/staging/gma500/mdfld_tpo_cmd.c509
-rw-r--r--drivers/staging/gma500/mdfld_tpo_vid.c140
-rw-r--r--drivers/staging/gma500/medfield.h268
-rw-r--r--drivers/staging/gma500/mid_bios.c269
-rw-r--r--drivers/staging/gma500/mid_bios.h21
-rw-r--r--drivers/staging/gma500/mmu.c (renamed from drivers/staging/gma500/psb_mmu.c)0
-rw-r--r--drivers/staging/gma500/mrst.h73
-rw-r--r--drivers/staging/gma500/mrst_crtc.c31
-rw-r--r--drivers/staging/gma500/mrst_device.c634
-rw-r--r--drivers/staging/gma500/mrst_hdmi.c852
-rw-r--r--drivers/staging/gma500/mrst_hdmi_i2c.c327
-rw-r--r--drivers/staging/gma500/mrst_lvds.c92
-rw-r--r--drivers/staging/gma500/power.c (renamed from drivers/staging/gma500/psb_powermgmt.c)237
-rw-r--r--drivers/staging/gma500/power.h (renamed from drivers/staging/gma500/psb_powermgmt.h)6
-rw-r--r--drivers/staging/gma500/psb_bl.c227
-rw-r--r--drivers/staging/gma500/psb_device.c353
-rw-r--r--drivers/staging/gma500/psb_drm.h152
-rw-r--r--drivers/staging/gma500/psb_drv.c456
-rw-r--r--drivers/staging/gma500/psb_drv.h493
-rw-r--r--drivers/staging/gma500/psb_intel_display.c173
-rw-r--r--drivers/staging/gma500/psb_intel_display.h3
-rw-r--r--drivers/staging/gma500/psb_intel_drv.h24
-rw-r--r--drivers/staging/gma500/psb_intel_lvds.c148
-rw-r--r--drivers/staging/gma500/psb_intel_reg.h1144
-rw-r--r--drivers/staging/gma500/psb_intel_sdvo.c35
-rw-r--r--drivers/staging/gma500/psb_intel_sdvo_regs.h14
-rw-r--r--drivers/staging/gma500/psb_irq.c99
-rw-r--r--drivers/staging/gma500/psb_irq.h8
-rw-r--r--drivers/staging/gma500/psb_lid.c6
-rw-r--r--drivers/staging/gma500/psb_reg.h842
-rw-r--r--drivers/staging/hv/blkvsc_drv.c46
-rw-r--r--drivers/staging/hv/channel.c113
-rw-r--r--drivers/staging/hv/channel_mgmt.c8
-rw-r--r--drivers/staging/hv/connection.c52
-rw-r--r--drivers/staging/hv/hv.c4
-rw-r--r--drivers/staging/hv/hv_mouse.c2
-rw-r--r--drivers/staging/hv/hv_timesource.c1
-rw-r--r--drivers/staging/hv/hyperv.h86
-rw-r--r--drivers/staging/hv/hyperv_net.h14
-rw-r--r--drivers/staging/hv/hyperv_vmbus.h2
-rw-r--r--drivers/staging/hv/netvsc.c173
-rw-r--r--drivers/staging/hv/netvsc_drv.c60
-rw-r--r--drivers/staging/hv/ring_buffer.c6
-rw-r--r--drivers/staging/hv/rndis_filter.c59
-rw-r--r--drivers/staging/hv/storvsc.c8
-rw-r--r--drivers/staging/hv/storvsc_drv.c88
-rw-r--r--drivers/staging/hv/tools/hv_kvp_daemon.c1
-rw-r--r--drivers/staging/hv/vmbus_drv.c136
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-light8
-rw-r--r--drivers/staging/iio/Kconfig2
-rw-r--r--drivers/staging/iio/accel/adis16201.h12
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c133
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c8
-rw-r--r--drivers/staging/iio/accel/adis16201_trigger.c11
-rw-r--r--drivers/staging/iio/accel/adis16203.h12
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c93
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c8
-rw-r--r--drivers/staging/iio/accel/adis16203_trigger.c17
-rw-r--r--drivers/staging/iio/accel/adis16204.h14
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c92
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c8
-rw-r--r--drivers/staging/iio/accel/adis16204_trigger.c11
-rw-r--r--drivers/staging/iio/accel/adis16209.h16
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c90
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c8
-rw-r--r--drivers/staging/iio/accel/adis16209_trigger.c17
-rw-r--r--drivers/staging/iio/accel/adis16220.h14
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c105
-rw-r--r--drivers/staging/iio/accel/adis16240.h14
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c90
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c8
-rw-r--r--drivers/staging/iio/accel/adis16240_trigger.c17
-rw-r--r--drivers/staging/iio/accel/kxsd9.c102
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c2
-rw-r--r--drivers/staging/iio/accel/sca3000.h1
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c100
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c14
-rw-r--r--drivers/staging/iio/adc/Kconfig14
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7150.c91
-rw-r--r--drivers/staging/iio/adc/ad7152.c73
-rw-r--r--drivers/staging/iio/adc/ad7291.c86
-rw-r--r--drivers/staging/iio/adc/ad7314.c48
-rw-r--r--drivers/staging/iio/adc/ad7476.h5
-rw-r--r--drivers/staging/iio/adc/ad7476_core.c84
-rw-r--r--drivers/staging/iio/adc/ad7476_ring.c10
-rw-r--r--drivers/staging/iio/adc/ad7745.c79
-rw-r--r--drivers/staging/iio/adc/ad7793.c987
-rw-r--r--drivers/staging/iio/adc/ad7793.h107
-rw-r--r--drivers/staging/iio/adc/ad7816.c74
-rw-r--r--drivers/staging/iio/adc/ad7887_core.c3
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c6
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c13
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c4
-rw-r--r--drivers/staging/iio/adc/adt7310.c94
-rw-r--r--drivers/staging/iio/adc/adt7410.c86
-rw-r--r--drivers/staging/iio/adc/adt75.c123
-rw-r--r--drivers/staging/iio/adc/max1363_core.c20
-rw-r--r--drivers/staging/iio/addac/adt7316.c224
-rw-r--r--drivers/staging/iio/dac/Kconfig11
-rw-r--r--drivers/staging/iio/dac/Makefile1
-rw-r--r--drivers/staging/iio/dac/ad5446.c82
-rw-r--r--drivers/staging/iio/dac/ad5446.h2
-rw-r--r--drivers/staging/iio/dac/ad5504.c89
-rw-r--r--drivers/staging/iio/dac/ad5504.h2
-rw-r--r--drivers/staging/iio/dac/ad5624r.h1
-rw-r--r--drivers/staging/iio/dac/ad5624r_spi.c84
-rw-r--r--drivers/staging/iio/dac/ad5686.c497
-rw-r--r--drivers/staging/iio/dac/ad5791.c112
-rw-r--r--drivers/staging/iio/dac/ad5791.h2
-rw-r--r--drivers/staging/iio/dac/max517.c40
-rw-r--r--drivers/staging/iio/dds/ad5930.c37
-rw-r--r--drivers/staging/iio/dds/ad9832.c69
-rw-r--r--drivers/staging/iio/dds/ad9832.h2
-rw-r--r--drivers/staging/iio/dds/ad9834.c81
-rw-r--r--drivers/staging/iio/dds/ad9834.h2
-rw-r--r--drivers/staging/iio/dds/ad9850.c37
-rw-r--r--drivers/staging/iio/dds/ad9852.c38
-rw-r--r--drivers/staging/iio/dds/ad9910.c36
-rw-r--r--drivers/staging/iio/dds/ad9951.c36
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c59
-rw-r--r--drivers/staging/iio/gyro/adis16080_core.c48
-rw-r--r--drivers/staging/iio/gyro/adis16130_core.c50
-rw-r--r--drivers/staging/iio/gyro/adis16260.h20
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c108
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c9
-rw-r--r--drivers/staging/iio/gyro/adis16260_trigger.c15
-rw-r--r--drivers/staging/iio/gyro/adxrs450.h13
-rw-r--r--drivers/staging/iio/gyro/adxrs450_core.c71
-rw-r--r--drivers/staging/iio/iio.h16
-rw-r--r--drivers/staging/iio/imu/adis16400.h5
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c50
-rw-r--r--drivers/staging/iio/industrialio-core.c47
-rw-r--r--drivers/staging/iio/industrialio-trigger.c20
-rw-r--r--drivers/staging/iio/kfifo_buf.c2
-rw-r--r--drivers/staging/iio/light/Kconfig2
-rw-r--r--drivers/staging/iio/light/isl29018.c197
-rw-r--r--drivers/staging/iio/light/tsl2563.c125
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c91
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c50
-rw-r--r--drivers/staging/iio/meter/ade7753.c83
-rw-r--r--drivers/staging/iio/meter/ade7753.h10
-rw-r--r--drivers/staging/iio/meter/ade7754.c80
-rw-r--r--drivers/staging/iio/meter/ade7754.h12
-rw-r--r--drivers/staging/iio/meter/ade7759.c80
-rw-r--r--drivers/staging/iio/meter/ade7759.h12
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c39
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c40
-rw-r--r--drivers/staging/iio/meter/ade7854.c76
-rw-r--r--drivers/staging/iio/meter/ade7854.h36
-rw-r--r--drivers/staging/iio/resolver/Kconfig27
-rw-r--r--drivers/staging/iio/resolver/ad2s120x.c196
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c856
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.h17
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c48
-rw-r--r--drivers/staging/iio/trigger.h52
-rw-r--r--drivers/staging/iio/trigger/iio-trig-gpio.c2
-rw-r--r--drivers/staging/intel_sst/intel_sst.c5
-rw-r--r--drivers/staging/intel_sst/intel_sst_app_interface.c4
-rw-r--r--drivers/staging/intel_sst/intel_sst_common.h2
-rw-r--r--drivers/staging/intel_sst/intel_sst_drv_interface.c10
-rw-r--r--drivers/staging/intel_sst/intelmid_v2_control.c2
-rw-r--r--drivers/staging/keucr/Kconfig6
-rw-r--r--drivers/staging/keucr/Makefile2
-rw-r--r--drivers/staging/keucr/init.c71
-rw-r--r--drivers/staging/keucr/init.h773
-rw-r--r--drivers/staging/keucr/ms.c1034
-rw-r--r--drivers/staging/keucr/ms.h401
-rw-r--r--drivers/staging/keucr/msscsi.c344
-rw-r--r--drivers/staging/keucr/transport.c4
-rw-r--r--drivers/staging/keucr/transport.h46
-rw-r--r--drivers/staging/keucr/usb.c5
-rw-r--r--drivers/staging/keucr/usb.h3
-rw-r--r--drivers/staging/lirc/lirc_bt829.c4
-rw-r--r--drivers/staging/lirc/lirc_imon.c10
-rw-r--r--drivers/staging/lirc/lirc_parallel.c6
-rw-r--r--drivers/staging/lirc/lirc_serial.c44
-rw-r--r--drivers/staging/lirc/lirc_sir.c11
-rw-r--r--drivers/staging/lirc/lirc_ttusbir.c1
-rw-r--r--drivers/staging/lirc/lirc_zilog.c6
-rw-r--r--drivers/staging/mei/init.c151
-rw-r--r--drivers/staging/mei/interface.c9
-rw-r--r--drivers/staging/mei/interrupt.c47
-rw-r--r--drivers/staging/mei/iorw.c32
-rw-r--r--drivers/staging/mei/main.c39
-rw-r--r--drivers/staging/mei/mei_dev.h123
-rw-r--r--drivers/staging/mei/wd.c31
-rw-r--r--drivers/staging/msm/Kconfig124
-rw-r--r--drivers/staging/msm/Makefile88
-rw-r--r--drivers/staging/msm/TODO3
-rw-r--r--drivers/staging/msm/ebi2_l2f.c569
-rw-r--r--drivers/staging/msm/ebi2_lcd.c250
-rw-r--r--drivers/staging/msm/ebi2_tmd20.c1122
-rw-r--r--drivers/staging/msm/hdmi_sii9022.c248
-rw-r--r--drivers/staging/msm/lcdc.c239
-rw-r--r--drivers/staging/msm/lcdc_external.c54
-rw-r--r--drivers/staging/msm/lcdc_gordon.c446
-rw-r--r--drivers/staging/msm/lcdc_panel.c88
-rw-r--r--drivers/staging/msm/lcdc_prism.c64
-rw-r--r--drivers/staging/msm/lcdc_sharp_wvga_pt.c290
-rw-r--r--drivers/staging/msm/lcdc_st15.c237
-rw-r--r--drivers/staging/msm/lcdc_toshiba_wvga_pt.c374
-rw-r--r--drivers/staging/msm/logo.c98
-rw-r--r--drivers/staging/msm/mddi.c375
-rw-r--r--drivers/staging/msm/mddi_ext.c320
-rw-r--r--drivers/staging/msm/mddi_ext_lcd.c91
-rw-r--r--drivers/staging/msm/mddi_prism.c114
-rw-r--r--drivers/staging/msm/mddi_sharp.c892
-rw-r--r--drivers/staging/msm/mddi_toshiba.c1741
-rw-r--r--drivers/staging/msm/mddi_toshiba.h36
-rw-r--r--drivers/staging/msm/mddi_toshiba_vga.c136
-rw-r--r--drivers/staging/msm/mddi_toshiba_wvga_pt.c64
-rw-r--r--drivers/staging/msm/mddihost.c377
-rw-r--r--drivers/staging/msm/mddihost.h207
-rw-r--r--drivers/staging/msm/mddihost_e.c63
-rw-r--r--drivers/staging/msm/mddihosti.c2239
-rw-r--r--drivers/staging/msm/mddihosti.h531
-rw-r--r--drivers/staging/msm/mdp.c1113
-rw-r--r--drivers/staging/msm/mdp.h679
-rw-r--r--drivers/staging/msm/mdp4.h336
-rw-r--r--drivers/staging/msm/mdp4_debugfs.c175
-rw-r--r--drivers/staging/msm/mdp4_overlay.c1259
-rw-r--r--drivers/staging/msm/mdp4_overlay_lcdc.c313
-rw-r--r--drivers/staging/msm/mdp4_overlay_mddi.c254
-rw-r--r--drivers/staging/msm/mdp4_util.c1686
-rw-r--r--drivers/staging/msm/mdp_cursor.c104
-rw-r--r--drivers/staging/msm/mdp_dma.c561
-rw-r--r--drivers/staging/msm/mdp_dma_lcdc.c379
-rw-r--r--drivers/staging/msm/mdp_dma_s.c139
-rw-r--r--drivers/staging/msm/mdp_dma_tv.c142
-rw-r--r--drivers/staging/msm/mdp_hw_init.c720
-rw-r--r--drivers/staging/msm/mdp_ppp.c1502
-rw-r--r--drivers/staging/msm/mdp_ppp_dq.c347
-rw-r--r--drivers/staging/msm/mdp_ppp_dq.h69
-rw-r--r--drivers/staging/msm/mdp_ppp_v20.c2486
-rw-r--r--drivers/staging/msm/mdp_ppp_v31.c828
-rw-r--r--drivers/staging/msm/mdp_vsync.c389
-rw-r--r--drivers/staging/msm/memory.c214
-rw-r--r--drivers/staging/msm/memory_ll.h61
-rw-r--r--drivers/staging/msm/msm_fb.c2354
-rw-r--r--drivers/staging/msm/msm_fb.h158
-rw-r--r--drivers/staging/msm/msm_fb_bl.c79
-rw-r--r--drivers/staging/msm/msm_fb_def.h181
-rw-r--r--drivers/staging/msm/msm_fb_panel.c136
-rw-r--r--drivers/staging/msm/msm_fb_panel.h129
-rw-r--r--drivers/staging/msm/msm_mdp.h245
-rw-r--r--drivers/staging/msm/staging-devices.c312
-rw-r--r--drivers/staging/msm/tv_ntsc.c163
-rw-r--r--drivers/staging/msm/tv_pal.c213
-rw-r--r--drivers/staging/msm/tvenc.c296
-rw-r--r--drivers/staging/msm/tvenc.h101
-rw-r--r--drivers/staging/nvec/TODO6
-rw-r--r--drivers/staging/nvec/nvec.c1
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c27
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c33
-rw-r--r--drivers/staging/octeon/ethernet-rx.c26
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c14
-rw-r--r--drivers/staging/octeon/ethernet-tx.c13
-rw-r--r--drivers/staging/octeon/ethernet-util.h4
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c22
-rw-r--r--drivers/staging/panel/panel.c1
-rw-r--r--drivers/staging/pohmelfs/crypto.c1
-rw-r--r--drivers/staging/pohmelfs/dir.c2
-rw-r--r--drivers/staging/pohmelfs/inode.c11
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c1
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c1
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c1
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c1
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_module.c1
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c9
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8187se/r8180.h1
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c3
-rw-r--r--drivers/staging/rtl8192e/dot11d.h23
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c1
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c1
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c1
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c1
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_module.c3
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c1
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c3
-rw-r--r--drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c3
-rw-r--r--drivers/staging/rtl8192e/r8192E.h2
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c14
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c7
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U.h1
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c10
-rw-r--r--drivers/staging/rtl8192u/r819xU_firmware.c9
-rw-r--r--drivers/staging/rtl8712/drv_types.h1
-rw-r--r--drivers/staging/rtl8712/ieee80211.h31
-rw-r--r--drivers/staging/rtl8712/osdep_service.h7
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h2
-rw-r--r--drivers/staging/rtl8712/wifi.h7
-rw-r--r--drivers/staging/rts_pstor/ms.c3
-rw-r--r--drivers/staging/rts_pstor/rtsx.c109
-rw-r--r--drivers/staging/rts_pstor/rtsx.h10
-rw-r--r--drivers/staging/rts_pstor/rtsx_chip.c6
-rw-r--r--drivers/staging/rts_pstor/sd.c64
-rw-r--r--drivers/staging/rts_pstor/sd.h5
-rw-r--r--drivers/staging/sep/sep_driver.c7
-rw-r--r--drivers/staging/sep/sep_driver_config.h2
-rw-r--r--drivers/staging/solo6x10/core.c1
-rw-r--r--drivers/staging/solo6x10/enc.c1
-rw-r--r--drivers/staging/solo6x10/g723.c1
-rw-r--r--drivers/staging/solo6x10/p2m.c1
-rw-r--r--drivers/staging/solo6x10/solo6x10.h3
-rw-r--r--drivers/staging/speakup/devsynth.c5
-rw-r--r--drivers/staging/speakup/main.c1
-rw-r--r--drivers/staging/speakup/speakup.h1
-rw-r--r--drivers/staging/speakup/speakup_soft.c9
-rw-r--r--drivers/staging/spectra/lld_nand.c18
-rw-r--r--drivers/staging/ste_rmi4/Makefile1
-rw-r--r--drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c32
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c43
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/host_os.h3
-rw-r--r--drivers/staging/tm6000/tm6000-alsa.c20
-rw-r--r--drivers/staging/tm6000/tm6000-cards.c7
-rw-r--r--drivers/staging/tm6000/tm6000-dvb.c10
-rw-r--r--drivers/staging/tm6000/tm6000-i2c.c2
-rw-r--r--drivers/staging/tm6000/tm6000-input.c3
-rw-r--r--drivers/staging/tm6000/tm6000-video.c43
-rw-r--r--drivers/staging/tm6000/tm6000.h3
-rw-r--r--drivers/staging/tty/Kconfig87
-rw-r--r--drivers/staging/tty/Makefile7
-rw-r--r--drivers/staging/tty/TODO6
-rw-r--r--drivers/staging/tty/cd1865.h263
-rw-r--r--drivers/staging/tty/digi1.h100
-rw-r--r--drivers/staging/tty/digiFep1.h136
-rw-r--r--drivers/staging/tty/digiPCI.h42
-rw-r--r--drivers/staging/tty/epca.c2784
-rw-r--r--drivers/staging/tty/epca.h158
-rw-r--r--drivers/staging/tty/epcaconfig.h7
-rw-r--r--drivers/staging/tty/ip2/Makefile8
-rw-r--r--drivers/staging/tty/ip2/i2cmd.c210
-rw-r--r--drivers/staging/tty/ip2/i2cmd.h630
-rw-r--r--drivers/staging/tty/ip2/i2ellis.c1403
-rw-r--r--drivers/staging/tty/ip2/i2ellis.h566
-rw-r--r--drivers/staging/tty/ip2/i2hw.h652
-rw-r--r--drivers/staging/tty/ip2/i2lib.c2214
-rw-r--r--drivers/staging/tty/ip2/i2lib.h351
-rw-r--r--drivers/staging/tty/ip2/i2pack.h364
-rw-r--r--drivers/staging/tty/ip2/ip2.h107
-rw-r--r--drivers/staging/tty/ip2/ip2ioctl.h35
-rw-r--r--drivers/staging/tty/ip2/ip2main.c3234
-rw-r--r--drivers/staging/tty/ip2/ip2trace.h42
-rw-r--r--drivers/staging/tty/ip2/ip2types.h57
-rw-r--r--drivers/staging/tty/istallion.c4507
-rw-r--r--drivers/staging/tty/riscom8.c1560
-rw-r--r--drivers/staging/tty/riscom8.h91
-rw-r--r--drivers/staging/tty/riscom8_reg.h254
-rw-r--r--drivers/staging/tty/serial167.c2489
-rw-r--r--drivers/staging/tty/specialix.c2368
-rw-r--r--drivers/staging/tty/specialix_io8.h140
-rw-r--r--drivers/staging/tty/stallion.c4651
-rw-r--r--drivers/staging/usbip/README1
-rw-r--r--drivers/staging/usbip/stub.h4
-rw-r--r--drivers/staging/usbip/stub_dev.c55
-rw-r--r--drivers/staging/usbip/stub_main.c193
-rw-r--r--drivers/staging/usbip/stub_rx.c6
-rw-r--r--drivers/staging/usbip/stub_tx.c15
-rw-r--r--drivers/staging/usbip/usbip_common.c35
-rw-r--r--drivers/staging/usbip/usbip_common.h238
-rw-r--r--drivers/staging/usbip/userspace/AUTHORS1
-rw-r--r--drivers/staging/usbip/userspace/Makefile.am9
-rw-r--r--drivers/staging/usbip/userspace/README298
-rwxr-xr-xdrivers/staging/usbip/userspace/cleanup.sh10
-rw-r--r--drivers/staging/usbip/userspace/configure.ac26
-rw-r--r--drivers/staging/usbip/userspace/libsrc/Makefile.am8
-rw-r--r--drivers/staging/usbip/userspace/libsrc/stub_driver.c391
-rw-r--r--drivers/staging/usbip/userspace/libsrc/stub_driver.h36
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip.h19
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_common.c28
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_common.h138
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c401
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h48
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.c167
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.h32
-rw-r--r--drivers/staging/usbip/userspace/src/Makefile.am17
-rw-r--r--drivers/staging/usbip/userspace/src/bind-driver.c643
-rw-r--r--drivers/staging/usbip/userspace/src/usbip.c817
-rw-r--r--drivers/staging/usbip/userspace/src/usbip.h39
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_attach.c228
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_bind.c277
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_detach.c103
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_list.c303
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_network.c198
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_network.h94
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_unbind.c186
-rw-r--r--drivers/staging/usbip/userspace/src/usbipd.c705
-rw-r--r--drivers/staging/usbip/userspace/src/utils.c283
-rw-r--r--drivers/staging/usbip/userspace/src/utils.h56
-rw-r--r--drivers/staging/usbip/userspace/usb.ids13209
-rw-r--r--drivers/staging/usbip/vhci.h11
-rw-r--r--drivers/staging/usbip/vhci_hcd.c59
-rw-r--r--drivers/staging/usbip/vhci_rx.c2
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c6
-rw-r--r--drivers/staging/vme/boards/vme_vmivme7805.c4
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c19
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c2
-rw-r--r--drivers/staging/vme/devices/vme_user.c53
-rw-r--r--drivers/staging/vme/devices/vme_user.h6
-rw-r--r--drivers/staging/vme/vme.h2
-rw-r--r--drivers/staging/westbridge/Kconfig53
-rw-r--r--drivers/staging/westbridge/TODO7
-rw-r--r--drivers/staging/westbridge/astoria/Kconfig9
-rw-r--r--drivers/staging/westbridge/astoria/Makefile11
-rw-r--r--drivers/staging/westbridge/astoria/api/Makefile14
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasdma.c1107
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasintr.c143
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyaslep2pep.c358
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyaslowlevel.c1264
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasmisc.c3488
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasmtp.c1136
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasstorage.c4125
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasusb.c3740
-rw-r--r--drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c2441
-rw-r--r--drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/cyashaldef.h55
-rw-r--r--drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h319
-rw-r--r--drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h558
-rw-r--r--drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h72
-rw-r--r--drivers/staging/westbridge/astoria/block/Kconfig9
-rw-r--r--drivers/staging/westbridge/astoria/block/Makefile11
-rw-r--r--drivers/staging/westbridge/astoria/block/cyasblkdev_block.c1631
-rw-r--r--drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c417
-rw-r--r--drivers/staging/westbridge/astoria/block/cyasblkdev_queue.h64
-rw-r--r--drivers/staging/westbridge/astoria/device/Kconfig9
-rw-r--r--drivers/staging/westbridge/astoria/device/Makefile23
-rw-r--r--drivers/staging/westbridge/astoria/device/cyasdevice.c409
-rw-r--r--drivers/staging/westbridge/astoria/gadget/Kconfig9
-rw-r--r--drivers/staging/westbridge/astoria/gadget/Makefile11
-rw-r--r--drivers/staging/westbridge/astoria/gadget/cyasgadget.c2177
-rw-r--r--drivers/staging/westbridge/astoria/gadget/cyasgadget.h193
-rw-r--r--drivers/staging/westbridge/astoria/gadget/cyasgadget_ioctl.h99
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanerr.h418
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanmedia.h59
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanmisc.h614
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanregs.h180
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyansdkversion.h30
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanstorage.h419
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyantioch.h35
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyantypes.h31
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyanusb.h619
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyas_cplus_end.h11
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyas_cplus_start.h11
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyascast.h35
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasdevice.h1057
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasdma.h375
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyaserr.h1094
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyashal.h108
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyashalcb.h44
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyashaldoc.h800
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasintr.h104
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyaslep2pep.h36
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyaslowlevel.h366
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmedia.h54
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmisc.h1549
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmisc_dep.h53
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmtp.h646
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasprotocol.h3838
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasregs.h201
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasstorage.h2759
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasstorage_dep.h309
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyastoria.h36
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyastsdkversion.h30
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyastypes.h71
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasusb.h1862
-rw-r--r--drivers/staging/westbridge/astoria/include/linux/westbridge/cyasusb_dep.h224
-rw-r--r--drivers/staging/winbond/mds_s.h2
-rw-r--r--drivers/staging/winbond/phy_calibration.c462
-rw-r--r--drivers/staging/winbond/wb35reg_s.h2
-rw-r--r--drivers/staging/wlags49_h2/wl_internal.h8
-rw-r--r--drivers/staging/wlags49_h2/wl_version.h1
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c4
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c1
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c1
-rw-r--r--drivers/staging/xgifb/vb_ext.c1
-rw-r--r--drivers/staging/xgifb/vb_init.c1
-rw-r--r--drivers/staging/xgifb/vb_setmode.c1
-rw-r--r--drivers/staging/zcache/Makefile2
-rw-r--r--drivers/staging/zcache/tmem.c100
-rw-r--r--drivers/staging/zcache/tmem.h23
-rw-r--r--drivers/staging/zcache/zcache-main.c (renamed from drivers/staging/zcache/zcache.c)528
-rw-r--r--drivers/staging/zram/zram_drv.c457
-rw-r--r--drivers/staging/zram/zram_drv.h9
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/Makefile2
-rw-r--r--drivers/target/iscsi/Kconfig9
-rw-r--r--drivers/target/iscsi/Makefile20
-rw-r--r--drivers/target/iscsi/iscsi_target.c4564
-rw-r--r--drivers/target/iscsi/iscsi_target.h42
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c490
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h31
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c1882
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h859
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c531
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c87
-rw-r--r--drivers/target/iscsi/iscsi_target_device.h9
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c1004
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h15
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c1299
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h26
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c474
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h18
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c1232
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h12
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c1067
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h17
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c263
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c1905
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h269
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c664
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h86
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c950
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.h64
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c849
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c759
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h41
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c551
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h88
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c1819
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h60
-rw-r--r--drivers/target/loopback/Kconfig6
-rw-r--r--drivers/target/loopback/tcm_loop.c220
-rw-r--r--drivers/target/loopback/tcm_loop.h6
-rw-r--r--drivers/target/target_core_alua.c426
-rw-r--r--drivers/target/target_core_cdb.c457
-rw-r--r--drivers/target/target_core_configfs.c677
-rw-r--r--drivers/target/target_core_device.c820
-rw-r--r--drivers/target/target_core_fabric_configfs.c122
-rw-r--r--drivers/target/target_core_fabric_lib.c27
-rw-r--r--drivers/target/target_core_file.c149
-rw-r--r--drivers/target/target_core_file.h4
-rw-r--r--drivers/target/target_core_hba.c37
-rw-r--r--drivers/target/target_core_iblock.c199
-rw-r--r--drivers/target/target_core_iblock.h9
-rw-r--r--drivers/target/target_core_pr.c868
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c316
-rw-r--r--drivers/target/target_core_pscsi.h4
-rw-r--r--drivers/target/target_core_rd.c483
-rw-r--r--drivers/target/target_core_rd.h4
-rw-r--r--drivers/target/target_core_scdb.c20
-rw-r--r--drivers/target/target_core_scdb.h10
-rw-r--r--drivers/target/target_core_stat.c112
-rw-r--r--drivers/target/target_core_tmr.c191
-rw-r--r--drivers/target/target_core_tpg.c206
-rw-r--r--drivers/target/target_core_transport.c3658
-rw-r--r--drivers/target/target_core_ua.c62
-rw-r--r--drivers/target/tcm_fc/Makefile17
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h32
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c198
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c48
-rw-r--r--drivers/target/tcm_fc/tfc_io.c250
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c24
-rw-r--r--drivers/thermal/Kconfig8
-rw-r--r--drivers/thermal/thermal_sys.c142
-rw-r--r--drivers/tty/bfin_jtag_comm.c2
-rw-r--r--drivers/tty/hvc/Kconfig5
-rw-r--r--drivers/tty/hvc/Makefile3
-rw-r--r--drivers/tty/hvc/hvc_console.c70
-rw-r--r--drivers/tty/hvc/hvc_console.h4
-rw-r--r--drivers/tty/hvc/hvc_vio.c408
-rw-r--r--drivers/tty/hvc/hvsi.c129
-rw-r--r--drivers/tty/hvc/hvsi_lib.c426
-rw-r--r--drivers/tty/moxa.c5
-rw-r--r--drivers/tty/mxser.c4
-rw-r--r--drivers/tty/n_gsm.c427
-rw-r--r--drivers/tty/n_tty.c3
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serial/8250.c5
-rw-r--r--drivers/tty/serial/8250_pci.c246
-rw-r--r--drivers/tty/serial/Kconfig19
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/amba-pl011.c123
-rw-r--r--drivers/tty/serial/atmel_serial.c3
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c18
-rw-r--r--drivers/tty/serial/bfin_5xx.c5
-rw-r--r--drivers/tty/serial/dz.c2
-rw-r--r--drivers/tty/serial/ifx6x60.c1
-rw-r--r--drivers/tty/serial/imx.c166
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/tty/serial/m32r_sio.c2
-rw-r--r--drivers/tty/serial/mrst_max3110.c5
-rw-r--r--drivers/tty/serial/of_serial.c43
-rw-r--r--drivers/tty/serial/omap-serial.c6
-rw-r--r--drivers/tty/serial/pch_uart.c11
-rw-r--r--drivers/tty/serial/pxa.c2
-rw-r--r--drivers/tty/serial/s3c2400.c105
-rw-r--r--drivers/tty/serial/s3c2410.c2
-rw-r--r--drivers/tty/serial/s3c2412.c2
-rw-r--r--drivers/tty/serial/s3c2440.c2
-rw-r--r--drivers/tty/serial/s3c24a0.c117
-rw-r--r--drivers/tty/serial/s3c6400.c2
-rw-r--r--drivers/tty/serial/s5pv210.c15
-rw-r--r--drivers/tty/serial/samsung.c36
-rw-r--r--drivers/tty/serial/samsung.h19
-rw-r--r--drivers/tty/serial/sb1250-duart.c2
-rw-r--r--drivers/tty/serial/sh-sci.c757
-rw-r--r--drivers/tty/serial/sh-sci.h434
-rw-r--r--drivers/tty/serial/sunsu.c2
-rw-r--r--drivers/tty/serial/vt8500_serial.c3
-rw-r--r--drivers/tty/serial/zs.c2
-rw-r--r--drivers/tty/tty_io.c4
-rw-r--r--drivers/tty/tty_ldisc.c4
-rw-r--r--drivers/uio/uio_pdrv.c2
-rw-r--r--drivers/uio/uio_pdrv_genirq.c45
-rw-r--r--drivers/usb/atm/ueagle-atm.c38
-rw-r--r--drivers/usb/atm/usbatm.c4
-rw-r--r--drivers/usb/class/usblp.c7
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/core/config.c11
-rw-r--r--drivers/usb/core/driver.c17
-rw-r--r--drivers/usb/core/hcd.c17
-rw-r--r--drivers/usb/core/hub.c16
-rw-r--r--drivers/usb/core/message.c26
-rw-r--r--drivers/usb/gadget/Kconfig315
-rw-r--r--drivers/usb/gadget/Makefile2
-rw-r--r--drivers/usb/gadget/amd5536udc.c18
-rw-r--r--drivers/usb/gadget/at91_udc.c21
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c28
-rw-r--r--drivers/usb/gadget/audio.c1
-rw-r--r--drivers/usb/gadget/cdc2.c1
-rw-r--r--drivers/usb/gadget/ci13xxx_msm.c1
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c62
-rw-r--r--drivers/usb/gadget/composite.c365
-rw-r--r--drivers/usb/gadget/config.c25
-rw-r--r--drivers/usb/gadget/dbgp.c10
-rw-r--r--drivers/usb/gadget/dummy_hcd.c1162
-rw-r--r--drivers/usb/gadget/epautoconf.c132
-rw-r--r--drivers/usb/gadget/ether.c1
-rw-r--r--drivers/usb/gadget/f_acm.c50
-rw-r--r--drivers/usb/gadget/f_audio.c7
-rw-r--r--drivers/usb/gadget/f_ecm.c152
-rw-r--r--drivers/usb/gadget/f_eem.c90
-rw-r--r--drivers/usb/gadget/f_fs.c3
-rw-r--r--drivers/usb/gadget/f_hid.c29
-rw-r--r--drivers/usb/gadget/f_loopback.c72
-rw-r--r--drivers/usb/gadget/f_mass_storage.c33
-rw-r--r--drivers/usb/gadget/f_ncm.c58
-rw-r--r--drivers/usb/gadget/f_obex.c32
-rw-r--r--drivers/usb/gadget/f_phonet.c18
-rw-r--r--drivers/usb/gadget/f_rndis.c153
-rw-r--r--drivers/usb/gadget/f_serial.c32
-rw-r--r--drivers/usb/gadget/f_sourcesink.c71
-rw-r--r--drivers/usb/gadget/f_subset.c95
-rw-r--r--drivers/usb/gadget/f_uvc.c8
-rw-r--r--drivers/usb/gadget/file_storage.c15
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c20
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c62
-rw-r--r--drivers/usb/gadget/fusb300_udc.c212
-rw-r--r--drivers/usb/gadget/g_ffs.c1
-rw-r--r--drivers/usb/gadget/gadget_chips.h178
-rw-r--r--drivers/usb/gadget/gmidi.c9
-rw-r--r--drivers/usb/gadget/goku_udc.c19
-rw-r--r--drivers/usb/gadget/hid.c1
-rw-r--r--drivers/usb/gadget/imx_udc.c20
-rw-r--r--drivers/usb/gadget/inode.c11
-rw-r--r--drivers/usb/gadget/langwell_udc.c32
-rw-r--r--drivers/usb/gadget/m66592-udc.c62
-rw-r--r--drivers/usb/gadget/m66592-udc.h40
-rw-r--r--drivers/usb/gadget/mass_storage.c1
-rw-r--r--drivers/usb/gadget/multi.c1
-rw-r--r--drivers/usb/gadget/mv_udc_core.c21
-rw-r--r--drivers/usb/gadget/ncm.c1
-rw-r--r--drivers/usb/gadget/net2272.c2752
-rw-r--r--drivers/usb/gadget/net2272.h601
-rw-r--r--drivers/usb/gadget/net2280.c74
-rw-r--r--drivers/usb/gadget/nokia.c1
-rw-r--r--drivers/usb/gadget/omap_udc.c22
-rw-r--r--drivers/usb/gadget/pch_udc.c16
-rw-r--r--drivers/usb/gadget/printer.c45
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c19
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c23
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h4
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c71
-rw-r--r--drivers/usb/gadget/r8a66597-udc.h2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c19
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c17
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c61
-rw-r--r--drivers/usb/gadget/serial.c1
-rw-r--r--drivers/usb/gadget/storage_common.c2
-rw-r--r--drivers/usb/gadget/u_ether.c24
-rw-r--r--drivers/usb/gadget/u_ether.h4
-rw-r--r--drivers/usb/gadget/u_serial.c4
-rw-r--r--drivers/usb/gadget/u_serial.h2
-rw-r--r--drivers/usb/gadget/udc-core.c484
-rw-r--r--drivers/usb/gadget/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c22
-rw-r--r--drivers/usb/gadget/webcam.c1
-rw-r--r--drivers/usb/gadget/zero.c1
-rw-r--r--drivers/usb/host/ehci-ath79.c12
-rw-r--r--drivers/usb/host/ehci-cns3xxx.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-grlib.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c25
-rw-r--r--drivers/usb/host/ehci-hub.c90
-rw-r--r--drivers/usb/host/ehci-ixp4xx.c2
-rw-r--r--drivers/usb/host/ehci-msm.c20
-rw-r--r--drivers/usb/host/ehci-mxc.c1
-rw-r--r--drivers/usb/host/ehci-octeon.c2
-rw-r--r--drivers/usb/host/ehci-omap.c16
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c10
-rw-r--r--drivers/usb/host/ehci-ppc-of.c2
-rw-r--r--drivers/usb/host/ehci-q.c85
-rw-r--r--drivers/usb/host/ehci-s5p.c95
-rw-r--r--drivers/usb/host/ehci-sched.c17
-rw-r--r--drivers/usb/host/ehci-sysfs.c190
-rw-r--r--drivers/usb/host/ehci-w90x900.c2
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/ehci.h6
-rw-r--r--drivers/usb/host/fhci-hcd.c2
-rw-r--r--drivers/usb/host/isp1760-hcd.c5
-rw-r--r--drivers/usb/host/ohci-ath79.c4
-rw-r--r--drivers/usb/host/ohci-cns3xxx.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c4
-rw-r--r--drivers/usb/host/ohci-octeon.c2
-rw-r--r--drivers/usb/host/ohci-ppc-of.c2
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c2
-rw-r--r--drivers/usb/host/ohci-sa1111.c2
-rw-r--r--drivers/usb/host/ohci-sh.c2
-rw-r--r--drivers/usb/host/ohci-sm501.c11
-rw-r--r--drivers/usb/host/ohci-ssb.c2
-rw-r--r--drivers/usb/host/ohci-tmio.c6
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/host/pci-quirks.c58
-rw-r--r--drivers/usb/host/r8a66597-hcd.c7
-rw-r--r--drivers/usb/host/r8a66597.h38
-rw-r--r--drivers/usb/host/uhci-grlib.c2
-rw-r--r--drivers/usb/host/whci/init.c2
-rw-r--r--drivers/usb/host/xhci-dbg.c22
-rw-r--r--drivers/usb/host/xhci-mem.c28
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c72
-rw-r--r--drivers/usb/host/xhci.c68
-rw-r--r--drivers/usb/host/xhci.h10
-rw-r--r--drivers/usb/image/microtek.c2
-rw-r--r--drivers/usb/misc/appledisplay.c2
-rw-r--r--drivers/usb/misc/ftdi-elan.c28
-rw-r--r--drivers/usb/mon/mon_text.c9
-rw-r--r--drivers/usb/musb/Kconfig78
-rw-r--r--drivers/usb/musb/Makefile4
-rw-r--r--drivers/usb/musb/am35x.c4
-rw-r--r--drivers/usb/musb/blackfin.h2
-rw-r--r--drivers/usb/musb/da8xx.c12
-rw-r--r--drivers/usb/musb/davinci.c5
-rw-r--r--drivers/usb/musb/musb_core.c135
-rw-r--r--drivers/usb/musb/musb_core.h70
-rw-r--r--drivers/usb/musb/musb_gadget.c113
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c4
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/musb_host.h4
-rw-r--r--drivers/usb/musb/musb_virthub.c6
-rw-r--r--drivers/usb/musb/omap2430.c15
-rw-r--r--drivers/usb/musb/tusb6010.c26
-rw-r--r--drivers/usb/musb/tusb6010_omap.c2
-rw-r--r--drivers/usb/otg/isp1301_omap.c4
-rw-r--r--drivers/usb/otg/otg_fsm.c1
-rw-r--r--drivers/usb/otg/twl4030-usb.c2
-rw-r--r--drivers/usb/otg/twl6030-usb.c30
-rw-r--r--drivers/usb/renesas_usbhs/Kconfig13
-rw-r--r--drivers/usb/renesas_usbhs/Makefile2
-rw-r--r--drivers/usb/renesas_usbhs/common.c34
-rw-r--r--drivers/usb/renesas_usbhs/common.h44
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c1016
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h104
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c843
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c298
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h47
-rw-r--r--drivers/usb/serial/ftdi_sio.c27
-rw-r--r--drivers/usb/serial/ftdi_sio.h3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/garmin_gps.c2
-rw-r--r--drivers/usb/serial/option.c12
-rw-r--r--drivers/usb/serial/pl2303.c27
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c1
-rw-r--r--drivers/usb/storage/Kconfig8
-rw-r--r--drivers/usb/storage/ene_ub6250.c1639
-rw-r--r--drivers/usb/storage/realtek_cr.c490
-rw-r--r--drivers/usb/storage/unusual_devs.h10
-rw-r--r--drivers/usb/wusbcore/cbaf.c4
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c2
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c7
-rw-r--r--drivers/uwb/uwbd.c2
-rw-r--r--drivers/uwb/whc-rc.c2
-rw-r--r--drivers/vhost/net.c91
-rw-r--r--drivers/vhost/test.c5
-rw-r--r--drivers/vhost/vhost.c213
-rw-r--r--drivers/vhost/vhost.h34
-rw-r--r--drivers/video/amba-clcd.c2
-rw-r--r--drivers/video/atmel_lcdfb.c4
-rw-r--r--drivers/video/aty/atyfb_base.c7
-rw-r--r--drivers/video/au1100fb.c2
-rw-r--r--drivers/video/backlight/Kconfig15
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/aat2870_bl.c246
-rw-r--r--drivers/video/backlight/adp8860_bl.c3
-rw-r--r--drivers/video/backlight/ams369fg06.c646
-rw-r--r--drivers/video/backlight/ld9040.c17
-rw-r--r--drivers/video/backlight/s6e63m0.c11
-rw-r--r--drivers/video/cobalt_lcdfb.c2
-rw-r--r--drivers/video/controlfb.c4
-rw-r--r--drivers/video/ep93xx-fb.c4
-rw-r--r--drivers/video/fb_defio.c11
-rw-r--r--drivers/video/fsl-diu-fb.c16
-rw-r--r--drivers/video/geode/gx1fb_core.c14
-rw-r--r--drivers/video/hecubafb.c3
-rw-r--r--drivers/video/i810/i810.h2
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c4
-rw-r--r--drivers/video/msm/mdp.c3
-rw-r--r--drivers/video/msm/msm_fb.c7
-rw-r--r--drivers/video/nuc900fb.c2
-rw-r--r--drivers/video/omap2/displays/panel-taal.c55
-rw-r--r--drivers/video/omap2/dss/Kconfig12
-rw-r--r--drivers/video/omap2/dss/core.c21
-rw-r--r--drivers/video/omap2/dss/dispc.c562
-rw-r--r--drivers/video/omap2/dss/display.c57
-rw-r--r--drivers/video/omap2/dss/dpi.c73
-rw-r--r--drivers/video/omap2/dss/dsi.c296
-rw-r--r--drivers/video/omap2/dss/dss.c583
-rw-r--r--drivers/video/omap2/dss/dss.h54
-rw-r--r--drivers/video/omap2/dss/dss_features.c36
-rw-r--r--drivers/video/omap2/dss/dss_features.h7
-rw-r--r--drivers/video/omap2/dss/hdmi.c162
-rw-r--r--drivers/video/omap2/dss/manager.c351
-rw-r--r--drivers/video/omap2/dss/overlay.c27
-rw-r--r--drivers/video/omap2/dss/rfbi.c114
-rw-r--r--drivers/video/omap2/dss/sdi.c40
-rw-r--r--drivers/video/omap2/dss/venc.c183
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c72
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c166
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c34
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h37
-rw-r--r--drivers/video/platinumfb.c5
-rw-r--r--drivers/video/pxa168fb.c2
-rw-r--r--drivers/video/savage/savagefb.h2
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/sh_mobile_meram.c2
-rw-r--r--drivers/video/sm501fb.c2
-rw-r--r--drivers/video/udlfb.c9
-rw-r--r--drivers/video/vermilion/vermilion.h2
-rw-r--r--drivers/video/vesafb.c1
-rw-r--r--drivers/video/via/viafbdev.c4
-rw-r--r--drivers/virt/Kconfig32
-rw-r--r--drivers/virt/Makefile5
-rw-r--r--drivers/virt/fsl_hypervisor.c938
-rw-r--r--drivers/virtio/Kconfig3
-rw-r--r--drivers/w1/masters/ds1wm.c5
-rw-r--r--drivers/w1/masters/matrox_w1.c2
-rw-r--r--drivers/w1/slaves/w1_therm.c9
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--drivers/w1/w1_family.h3
-rw-r--r--drivers/watchdog/Kconfig39
-rw-r--r--drivers/watchdog/Makefile8
-rw-r--r--drivers/watchdog/at32ap700x_wdt.c2
-rw-r--r--drivers/watchdog/at91sam9_wdt.c21
-rw-r--r--drivers/watchdog/at91sam9_wdt.h37
-rw-r--r--drivers/watchdog/dw_wdt.c376
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c104
-rw-r--r--drivers/watchdog/iTCO_wdt.c412
-rw-r--r--drivers/watchdog/imx2_wdt.c6
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c3
-rw-r--r--drivers/watchdog/it8712f_wdt.c63
-rw-r--r--drivers/watchdog/it87_wdt.c168
-rw-r--r--drivers/watchdog/mpcore_wdt.c23
-rw-r--r--drivers/watchdog/mtx-1_wdt.c33
-rw-r--r--drivers/watchdog/nv_tco.c8
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c433
-rw-r--r--drivers/watchdog/pc87413_wdt.c96
-rw-r--r--drivers/watchdog/s3c2410_wdt.c10
-rw-r--r--drivers/watchdog/sbc7240_wdt.c2
-rw-r--r--drivers/watchdog/sch311x_wdt.c5
-rw-r--r--drivers/watchdog/shwdt.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c5
-rw-r--r--drivers/watchdog/watchdog_core.c111
-rw-r--r--drivers/watchdog/watchdog_dev.c395
-rw-r--r--drivers/watchdog/watchdog_dev.h33
-rw-r--r--drivers/watchdog/wm831x_wdt.c5
-rw-r--r--drivers/xen/Kconfig76
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/balloon.c139
-rw-r--r--drivers/xen/events.c7
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/tmem.c170
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xen-pciback/Makefile7
-rw-r--r--drivers/xen/xen-pciback/conf_space.c438
-rw-r--r--drivers/xen/xen-pciback/conf_space.h126
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c207
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c386
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.c140
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.h33
-rw-r--r--drivers/xen/xen-pciback/passthrough.c194
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c1376
-rw-r--r--drivers/xen/xen-pciback/pciback.h183
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c384
-rw-r--r--drivers/xen/xen-pciback/vpci.c259
-rw-r--r--drivers/xen/xen-pciback/xenbus.c748
-rw-r--r--drivers/xen/xen-selfballoon.c486
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c44
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c9
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c6
3712 files changed, 335139 insertions, 268870 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 3bb154d8c8c..95b9e7eefad 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -94,8 +94,6 @@ source "drivers/memstick/Kconfig"
source "drivers/leds/Kconfig"
-source "drivers/nfc/Kconfig"
-
source "drivers/accessibility/Kconfig"
source "drivers/infiniband/Kconfig"
@@ -114,6 +112,8 @@ source "drivers/uio/Kconfig"
source "drivers/vlynq/Kconfig"
+source "drivers/virtio/Kconfig"
+
source "drivers/xen/Kconfig"
source "drivers/staging/Kconfig"
@@ -126,4 +126,8 @@ source "drivers/hwspinlock/Kconfig"
source "drivers/clocksource/Kconfig"
+source "drivers/iommu/Kconfig"
+
+source "drivers/virt/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 09f3232bcdc..7fa433a7030 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -122,3 +122,8 @@ obj-y += ieee802154/
obj-y += clk/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
+obj-$(CONFIG_NFC) += nfc/
+obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
+
+# Virtualization drivers
+obj-$(CONFIG_VIRT_DRIVERS) += virt/
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 58c3f74bd84..6512b20aecc 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -89,7 +89,7 @@ struct acpi_ac {
unsigned long long state;
};
-#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger);
+#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
#ifdef CONFIG_ACPI_PROCFS_POWER
static const struct file_operations acpi_ac_fops = {
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 73863d86f02..76dc02f1557 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -126,6 +126,12 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE);
+/*
+ * Disable runtime checking and repair of values returned by control methods.
+ * Use only if the repair is causing a problem on a particular machine.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
+
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
struct acpi_table_fadt acpi_gbl_FADT;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index c7f743ca395..5552125d834 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -357,6 +357,7 @@ struct acpi_predefined_data {
char *pathname;
const union acpi_predefined_info *predefined;
union acpi_operand_object *parent_package;
+ struct acpi_namespace_node *node;
u32 flags;
u8 node_flags;
};
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 94e73c97cf8..c445cca490e 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -468,6 +468,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_SWS", 0, ACPI_RTYPE_INTEGER}},
{{"_TC1", 0, ACPI_RTYPE_INTEGER}},
{{"_TC2", 0, ACPI_RTYPE_INTEGER}},
+ {{"_TDL", 0, ACPI_RTYPE_INTEGER}},
{{"_TIP", 1, ACPI_RTYPE_INTEGER}},
{{"_TIV", 1, ACPI_RTYPE_INTEGER}},
{{"_TMP", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 9fb03fa8ffd..c845c8089f3 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -193,14 +193,20 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
}
/*
- * 1) We have a return value, but if one wasn't expected, just exit, this is
- * not a problem. For example, if the "Implicit Return" feature is
- * enabled, methods will always return a value.
+ * Return value validation and possible repair.
*
- * 2) If the return value can be of any type, then we cannot perform any
- * validation, exit.
+ * 1) Don't perform return value validation/repair if this feature
+ * has been disabled via a global option.
+ *
+ * 2) We have a return value, but if one wasn't expected, just exit,
+ * this is not a problem. For example, if the "Implicit Return"
+ * feature is enabled, methods will always return a value.
+ *
+ * 3) If the return value can be of any type, then we cannot perform
+ * any validation, just exit.
*/
- if ((!predefined->info.expected_btypes) ||
+ if (acpi_gbl_disable_auto_repair ||
+ (!predefined->info.expected_btypes) ||
(predefined->info.expected_btypes == ACPI_RTYPE_ALL)) {
goto cleanup;
}
@@ -212,6 +218,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
goto cleanup;
}
data->predefined = predefined;
+ data->node = node;
data->node_flags = node->flags;
data->pathname = pathname;
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 973883babee..024c4f263f8 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -503,6 +503,21 @@ acpi_ns_repair_TSS(struct acpi_predefined_data *data,
{
union acpi_operand_object *return_object = *return_object_ptr;
acpi_status status;
+ struct acpi_namespace_node *node;
+
+ /*
+ * We can only sort the _TSS return package if there is no _PSS in the
+ * same scope. This is because if _PSS is present, the ACPI specification
+ * dictates that the _TSS Power Dissipation field is to be ignored, and
+ * therefore some BIOSs leave garbage values in the _TSS Power field(s).
+ * In this case, it is best to just return the _TSS package as-is.
+ * (May, 2011)
+ */
+ status =
+ acpi_ns_get_node(data->node, "^_PSS", ACPI_NS_NO_UPSEARCH, &node);
+ if (ACPI_SUCCESS(status)) {
+ return (AE_OK);
+ }
status = acpi_ns_check_sorted_list(data, return_object, 5, 1,
ACPI_SORT_DESCENDING,
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 48db0944ce4..62365f6075d 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -126,12 +126,29 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
}
/*
- * Originally, we checked the table signature for "SSDT" or "PSDT" here.
- * Next, we added support for OEMx tables, signature "OEM".
- * Valid tables were encountered with a null signature, so we've just
- * given up on validating the signature, since it seems to be a waste
- * of code. The original code was removed (05/2008).
+ * Validate the incoming table signature.
+ *
+ * 1) Originally, we checked the table signature for "SSDT" or "PSDT".
+ * 2) We added support for OEMx tables, signature "OEM".
+ * 3) Valid tables were encountered with a null signature, so we just
+ * gave up on validating the signature, (05/2008).
+ * 4) We encountered non-AML tables such as the MADT, which caused
+ * interpreter errors and kernel faults. So now, we once again allow
+ * only "SSDT", "OEMx", and now, also a null signature. (05/2011).
*/
+ if ((table_desc->pointer->signature[0] != 0x00) &&
+ (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT))
+ && (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) {
+ ACPI_ERROR((AE_INFO,
+ "Table has invalid signature [%4.4s] (0x%8.8X), must be SSDT or OEMx",
+ acpi_ut_valid_acpi_name(*(u32 *)table_desc->
+ pointer->
+ signature) ? table_desc->
+ pointer->signature : "????",
+ *(u32 *)table_desc->pointer->signature));
+
+ return_ACPI_STATUS(AE_BAD_SIGNATURE);
+ }
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index f739a70b1c7..c34aa51af4e 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -10,9 +10,11 @@ config ACPI_APEI
error injection.
config ACPI_APEI_GHES
- tristate "APEI Generic Hardware Error Source"
+ bool "APEI Generic Hardware Error Source"
depends on ACPI_APEI && X86
select ACPI_HED
+ select LLIST
+ select GENERIC_ALLOCATOR
help
Generic Hardware Error Source provides a way to report
platform hardware errors (such as that from chipset). It
@@ -30,6 +32,13 @@ config ACPI_APEI_PCIEAER
PCIe AER errors may be reported via APEI firmware first mode.
Turn on this option to enable the corresponding support.
+config ACPI_APEI_MEMORY_FAILURE
+ bool "APEI memory error recovering support"
+ depends on ACPI_APEI && MEMORY_FAILURE
+ help
+ Memory errors may be reported via APEI firmware first mode.
+ Turn on this option to enable the memory recovering support.
+
config ACPI_APEI_EINJ
tristate "APEI Error INJection (EINJ)"
depends on ACPI_APEI && DEBUG_FS
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 4a904a4bf05..8041248fce9 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -157,9 +157,10 @@ EXPORT_SYMBOL_GPL(apei_exec_noop);
* Interpret the specified action. Go through whole action table,
* execute all instructions belong to the action.
*/
-int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
+ bool optional)
{
- int rc;
+ int rc = -ENOENT;
u32 i, ip;
struct acpi_whea_header *entry;
apei_exec_ins_func_t run;
@@ -198,9 +199,9 @@ rewind:
goto rewind;
}
- return 0;
+ return !optional && rc < 0 ? rc : 0;
}
-EXPORT_SYMBOL_GPL(apei_exec_run);
+EXPORT_SYMBOL_GPL(__apei_exec_run);
typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
@@ -603,3 +604,29 @@ struct dentry *apei_get_debugfs_dir(void)
return dapei;
}
EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
+
+int apei_osc_setup(void)
+{
+ static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
+ acpi_handle handle;
+ u32 capbuf[3];
+ struct acpi_osc_context context = {
+ .uuid_str = whea_uuid_str,
+ .rev = 1,
+ .cap.length = sizeof(capbuf),
+ .cap.pointer = capbuf,
+ };
+
+ capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_TYPE] = 0;
+ capbuf[OSC_CONTROL_TYPE] = 0;
+
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
+ || ACPI_FAILURE(acpi_run_osc(handle, &context)))
+ return -EIO;
+ else {
+ kfree(context.ret.pointer);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(apei_osc_setup);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index ef0581f2094..f57050e7a5e 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -50,7 +50,18 @@ static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
return ctx->value;
}
-int apei_exec_run(struct apei_exec_context *ctx, u8 action);
+int __apei_exec_run(struct apei_exec_context *ctx, u8 action, bool optional);
+
+static inline int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+{
+ return __apei_exec_run(ctx, action, 0);
+}
+
+/* It is optional whether the firmware provides the action */
+static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 action)
+{
+ return __apei_exec_run(ctx, action, 1);
+}
/* Common instruction implementation */
@@ -113,4 +124,6 @@ void apei_estatus_print(const char *pfx,
const struct acpi_hest_generic_status *estatus);
int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
+
+int apei_osc_setup(void);
#endif
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index f74b2ea11f2..589b96c3870 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -46,7 +46,8 @@
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
* most will ignore the parameter and make their own choice of address
- * for error injection.
+ * for error injection. This extension is used only if
+ * param_extension module parameter is specified.
*/
struct einj_parameter {
u64 type;
@@ -65,6 +66,9 @@ struct einj_parameter {
((struct acpi_whea_header *)((char *)(tab) + \
sizeof(struct acpi_table_einj)))
+static bool param_extension;
+module_param(param_extension, bool, 0);
+
static struct acpi_table_einj *einj_tab;
static struct apei_resources einj_resources;
@@ -285,7 +289,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
einj_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
+ rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, type);
@@ -323,7 +327,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
rc = __einj_error_trigger(trigger_paddr);
if (rc)
return rc;
- rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
+ rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
return rc;
}
@@ -489,14 +493,6 @@ static int __init einj_init(void)
einj_debug_dir, NULL, &error_type_fops);
if (!fentry)
goto err_cleanup;
- fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param1);
- if (!fentry)
- goto err_cleanup;
- fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param2);
- if (!fentry)
- goto err_cleanup;
fentry = debugfs_create_file("error_inject", S_IWUSR,
einj_debug_dir, NULL, &error_inject_fops);
if (!fentry)
@@ -513,12 +509,23 @@ static int __init einj_init(void)
rc = apei_exec_pre_map_gars(&ctx);
if (rc)
goto err_release;
- param_paddr = einj_get_parameter_address();
- if (param_paddr) {
- einj_param = ioremap(param_paddr, sizeof(*einj_param));
- rc = -ENOMEM;
- if (!einj_param)
- goto err_unmap;
+ if (param_extension) {
+ param_paddr = einj_get_parameter_address();
+ if (param_paddr) {
+ einj_param = ioremap(param_paddr, sizeof(*einj_param));
+ rc = -ENOMEM;
+ if (!einj_param)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_unmap;
+ } else
+ pr_warn(EINJ_PFX "Parameter extension is not supported.\n");
}
pr_info(EINJ_PFX "Error INJection is initialized.\n");
@@ -526,6 +533,8 @@ static int __init einj_init(void)
return 0;
err_unmap:
+ if (einj_param)
+ iounmap(einj_param);
apei_exec_post_unmap_gars(&ctx);
err_release:
apei_resources_release(&einj_resources);
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index a4cfb64c86a..903549df809 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -33,7 +33,7 @@
#define ERST_DBG_PFX "ERST DBG: "
-#define ERST_DBG_RECORD_LEN_MAX 4096
+#define ERST_DBG_RECORD_LEN_MAX 0x4000
static void *erst_dbg_buf;
static unsigned int erst_dbg_buf_len;
@@ -213,6 +213,10 @@ static struct miscdevice erst_dbg_dev = {
static __init int erst_dbg_init(void)
{
+ if (erst_disable) {
+ pr_info(ERST_DBG_PFX "ERST support is disabled.\n");
+ return -ENODEV;
+ }
return misc_register(&erst_dbg_dev);
}
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index e6cef8e1b53..2ca59dc69f7 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -642,7 +642,7 @@ static int __erst_write_to_storage(u64 offset)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, offset);
@@ -666,7 +666,7 @@ static int __erst_write_to_storage(u64 offset)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -681,7 +681,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, offset);
@@ -709,7 +709,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -724,7 +724,7 @@ static int __erst_clear_from_storage(u64 record_id)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, record_id);
@@ -748,7 +748,7 @@ static int __erst_clear_from_storage(u64 record_id)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -932,8 +932,11 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time);
-static u64 erst_writer(enum pstore_type_id type, size_t size);
+ struct timespec *time, struct pstore_info *psi);
+static u64 erst_writer(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi);
+static int erst_clearer(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi);
static struct pstore_info erst_info = {
.owner = THIS_MODULE,
@@ -942,7 +945,7 @@ static struct pstore_info erst_info = {
.close = erst_close_pstore,
.read = erst_reader,
.write = erst_writer,
- .erase = erst_clear
+ .erase = erst_clearer
};
#define CPER_CREATOR_PSTORE \
@@ -983,7 +986,7 @@ static int erst_close_pstore(struct pstore_info *psi)
}
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time)
+ struct timespec *time, struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
@@ -1037,7 +1040,8 @@ out:
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
-static u64 erst_writer(enum pstore_type_id type, size_t size)
+static u64 erst_writer(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi)
{
struct cper_pstore_record *rcd = (struct cper_pstore_record *)
(erst_info.buf - sizeof(*rcd));
@@ -1080,6 +1084,12 @@ static u64 erst_writer(enum pstore_type_id type, size_t size)
return rcd->hdr.record_id;
}
+static int erst_clearer(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi)
+{
+ return erst_clear(id);
+}
+
static int __init erst_init(void)
{
int rc = 0;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f703b288115..0784f99a466 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -12,7 +12,7 @@
* For more information about Generic Hardware Error Source, please
* refer to ACPI Specification version 4.0, section 17.3.2.6
*
- * Copyright 2010 Intel Corp.
+ * Copyright 2010,2011 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
@@ -42,6 +42,9 @@
#include <linux/mutex.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
+#include <linux/irq_work.h>
+#include <linux/llist.h>
+#include <linux/genalloc.h>
#include <acpi/apei.h>
#include <acpi/atomicio.h>
#include <acpi/hed.h>
@@ -53,6 +56,30 @@
#define GHES_PFX "GHES: "
#define GHES_ESTATUS_MAX_SIZE 65536
+#define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
+
+#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
+
+/* This is just an estimation for memory pool allocation */
+#define GHES_ESTATUS_CACHE_AVG_SIZE 512
+
+#define GHES_ESTATUS_CACHES_SIZE 4
+
+#define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
+/* Prevent too many caches are allocated because of RCU */
+#define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
+
+#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
+ (sizeof(struct ghes_estatus_cache) + (estatus_len))
+#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
+ ((struct acpi_hest_generic_status *) \
+ ((struct ghes_estatus_cache *)(estatus_cache) + 1))
+
+#define GHES_ESTATUS_NODE_LEN(estatus_len) \
+ (sizeof(struct ghes_estatus_node) + (estatus_len))
+#define GHES_ESTATUS_FROM_NODE(estatus_node) \
+ ((struct acpi_hest_generic_status *) \
+ ((struct ghes_estatus_node *)(estatus_node) + 1))
/*
* One struct ghes is created for each generic hardware error source.
@@ -77,6 +104,22 @@ struct ghes {
};
};
+struct ghes_estatus_node {
+ struct llist_node llnode;
+ struct acpi_hest_generic *generic;
+};
+
+struct ghes_estatus_cache {
+ u32 estatus_len;
+ atomic_t count;
+ struct acpi_hest_generic *generic;
+ unsigned long long time_in;
+ struct rcu_head rcu;
+};
+
+int ghes_disable;
+module_param_named(disable, ghes_disable, bool, 0);
+
static int ghes_panic_timeout __read_mostly = 30;
/*
@@ -121,6 +164,22 @@ static struct vm_struct *ghes_ioremap_area;
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
+/*
+ * printk is not safe in NMI context. So in NMI handler, we allocate
+ * required memory from lock-less memory allocator
+ * (ghes_estatus_pool), save estatus into it, put them into lock-less
+ * list (ghes_estatus_llist), then delay printk into IRQ context via
+ * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
+ * required pool size by all NMI error source.
+ */
+static struct gen_pool *ghes_estatus_pool;
+static unsigned long ghes_estatus_pool_size_request;
+static struct llist_head ghes_estatus_llist;
+static struct irq_work ghes_proc_irq_work;
+
+struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
+static atomic_t ghes_estatus_cache_alloced;
+
static int ghes_ioremap_init(void)
{
ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
@@ -180,6 +239,55 @@ static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
__flush_tlb_one(vaddr);
}
+static int ghes_estatus_pool_init(void)
+{
+ ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
+ if (!ghes_estatus_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk,
+ void *data)
+{
+ free_page(chunk->start_addr);
+}
+
+static void ghes_estatus_pool_exit(void)
+{
+ gen_pool_for_each_chunk(ghes_estatus_pool,
+ ghes_estatus_pool_free_chunk_page, NULL);
+ gen_pool_destroy(ghes_estatus_pool);
+}
+
+static int ghes_estatus_pool_expand(unsigned long len)
+{
+ unsigned long i, pages, size, addr;
+ int ret;
+
+ ghes_estatus_pool_size_request += PAGE_ALIGN(len);
+ size = gen_pool_size(ghes_estatus_pool);
+ if (size >= ghes_estatus_pool_size_request)
+ return 0;
+ pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
+ for (i = 0; i < pages; i++) {
+ addr = __get_free_page(GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+ ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ghes_estatus_pool_shrink(unsigned long len)
+{
+ ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
+}
+
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
{
struct ghes *ghes;
@@ -341,43 +449,196 @@ static void ghes_clear_estatus(struct ghes *ghes)
ghes->flags &= ~GHES_TO_CLEAR;
}
-static void ghes_do_proc(struct ghes *ghes)
+static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
{
- int sev, processed = 0;
+ int sev, sec_sev;
struct acpi_hest_generic_data *gdata;
- sev = ghes_severity(ghes->estatus->error_severity);
- apei_estatus_for_each_section(ghes->estatus, gdata) {
-#ifdef CONFIG_X86_MCE
+ sev = ghes_severity(estatus->error_severity);
+ apei_estatus_for_each_section(estatus, gdata) {
+ sec_sev = ghes_severity(gdata->error_severity);
if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
CPER_SEC_PLATFORM_MEM)) {
- apei_mce_report_mem_error(
- sev == GHES_SEV_CORRECTED,
- (struct cper_sec_mem_err *)(gdata+1));
- processed = 1;
- }
+ struct cper_sec_mem_err *mem_err;
+ mem_err = (struct cper_sec_mem_err *)(gdata+1);
+#ifdef CONFIG_X86_MCE
+ apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
+ mem_err);
#endif
+#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
+ if (sev == GHES_SEV_RECOVERABLE &&
+ sec_sev == GHES_SEV_RECOVERABLE &&
+ mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+ unsigned long pfn;
+ pfn = mem_err->physical_addr >> PAGE_SHIFT;
+ memory_failure_queue(pfn, 0, 0);
+ }
+#endif
+ }
}
}
-static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
+static void __ghes_print_estatus(const char *pfx,
+ const struct acpi_hest_generic *generic,
+ const struct acpi_hest_generic_status *estatus)
{
- /* Not more than 2 messages every 5 seconds */
- static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
-
if (pfx == NULL) {
- if (ghes_severity(ghes->estatus->error_severity) <=
+ if (ghes_severity(estatus->error_severity) <=
GHES_SEV_CORRECTED)
pfx = KERN_WARNING HW_ERR;
else
pfx = KERN_ERR HW_ERR;
}
- if (__ratelimit(&ratelimit)) {
- printk(
- "%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
- pfx, ghes->generic->header.source_id);
- apei_estatus_print(pfx, ghes->estatus);
+ printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
+ pfx, generic->header.source_id);
+ apei_estatus_print(pfx, estatus);
+}
+
+static int ghes_print_estatus(const char *pfx,
+ const struct acpi_hest_generic *generic,
+ const struct acpi_hest_generic_status *estatus)
+{
+ /* Not more than 2 messages every 5 seconds */
+ static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
+ static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
+ struct ratelimit_state *ratelimit;
+
+ if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
+ ratelimit = &ratelimit_corrected;
+ else
+ ratelimit = &ratelimit_uncorrected;
+ if (__ratelimit(ratelimit)) {
+ __ghes_print_estatus(pfx, generic, estatus);
+ return 1;
}
+ return 0;
+}
+
+/*
+ * GHES error status reporting throttle, to report more kinds of
+ * errors, instead of just most frequently occurred errors.
+ */
+static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
+{
+ u32 len;
+ int i, cached = 0;
+ unsigned long long now;
+ struct ghes_estatus_cache *cache;
+ struct acpi_hest_generic_status *cache_estatus;
+
+ len = apei_estatus_len(estatus);
+ rcu_read_lock();
+ for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
+ cache = rcu_dereference(ghes_estatus_caches[i]);
+ if (cache == NULL)
+ continue;
+ if (len != cache->estatus_len)
+ continue;
+ cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+ if (memcmp(estatus, cache_estatus, len))
+ continue;
+ atomic_inc(&cache->count);
+ now = sched_clock();
+ if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
+ cached = 1;
+ break;
+ }
+ rcu_read_unlock();
+ return cached;
+}
+
+static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
+ struct acpi_hest_generic *generic,
+ struct acpi_hest_generic_status *estatus)
+{
+ int alloced;
+ u32 len, cache_len;
+ struct ghes_estatus_cache *cache;
+ struct acpi_hest_generic_status *cache_estatus;
+
+ alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
+ if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
+ atomic_dec(&ghes_estatus_cache_alloced);
+ return NULL;
+ }
+ len = apei_estatus_len(estatus);
+ cache_len = GHES_ESTATUS_CACHE_LEN(len);
+ cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
+ if (!cache) {
+ atomic_dec(&ghes_estatus_cache_alloced);
+ return NULL;
+ }
+ cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+ memcpy(cache_estatus, estatus, len);
+ cache->estatus_len = len;
+ atomic_set(&cache->count, 0);
+ cache->generic = generic;
+ cache->time_in = sched_clock();
+ return cache;
+}
+
+static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
+{
+ u32 len;
+
+ len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
+ len = GHES_ESTATUS_CACHE_LEN(len);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
+ atomic_dec(&ghes_estatus_cache_alloced);
+}
+
+static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
+{
+ struct ghes_estatus_cache *cache;
+
+ cache = container_of(head, struct ghes_estatus_cache, rcu);
+ ghes_estatus_cache_free(cache);
+}
+
+static void ghes_estatus_cache_add(
+ struct acpi_hest_generic *generic,
+ struct acpi_hest_generic_status *estatus)
+{
+ int i, slot = -1, count;
+ unsigned long long now, duration, period, max_period = 0;
+ struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
+
+ new_cache = ghes_estatus_cache_alloc(generic, estatus);
+ if (new_cache == NULL)
+ return;
+ rcu_read_lock();
+ now = sched_clock();
+ for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
+ cache = rcu_dereference(ghes_estatus_caches[i]);
+ if (cache == NULL) {
+ slot = i;
+ slot_cache = NULL;
+ break;
+ }
+ duration = now - cache->time_in;
+ if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
+ slot = i;
+ slot_cache = cache;
+ break;
+ }
+ count = atomic_read(&cache->count);
+ period = duration;
+ do_div(period, (count + 1));
+ if (period > max_period) {
+ max_period = period;
+ slot = i;
+ slot_cache = cache;
+ }
+ }
+ /* new_cache must be put into array after its contents are written */
+ smp_wmb();
+ if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
+ slot_cache, new_cache) == slot_cache) {
+ if (slot_cache)
+ call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
+ } else
+ ghes_estatus_cache_free(new_cache);
+ rcu_read_unlock();
}
static int ghes_proc(struct ghes *ghes)
@@ -387,9 +648,11 @@ static int ghes_proc(struct ghes *ghes)
rc = ghes_read_estatus(ghes, 0);
if (rc)
goto out;
- ghes_print_estatus(NULL, ghes);
- ghes_do_proc(ghes);
-
+ if (!ghes_estatus_cached(ghes->estatus)) {
+ if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
+ ghes_estatus_cache_add(ghes->generic, ghes->estatus);
+ }
+ ghes_do_proc(ghes->estatus);
out:
ghes_clear_estatus(ghes);
return 0;
@@ -447,6 +710,45 @@ static int ghes_notify_sci(struct notifier_block *this,
return ret;
}
+static void ghes_proc_in_irq(struct irq_work *irq_work)
+{
+ struct llist_node *llnode, *next, *tail = NULL;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ u32 len, node_len;
+
+ /*
+ * Because the time order of estatus in list is reversed,
+ * revert it back to proper order.
+ */
+ llnode = llist_del_all(&ghes_estatus_llist);
+ while (llnode) {
+ next = llnode->next;
+ llnode->next = tail;
+ tail = llnode;
+ llnode = next;
+ }
+ llnode = tail;
+ while (llnode) {
+ next = llnode->next;
+ estatus_node = llist_entry(llnode, struct ghes_estatus_node,
+ llnode);
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ len = apei_estatus_len(estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ ghes_do_proc(estatus);
+ if (!ghes_estatus_cached(estatus)) {
+ generic = estatus_node->generic;
+ if (ghes_print_estatus(NULL, generic, estatus))
+ ghes_estatus_cache_add(generic, estatus);
+ }
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
+ node_len);
+ llnode = next;
+ }
+}
+
static int ghes_notify_nmi(struct notifier_block *this,
unsigned long cmd, void *data)
{
@@ -476,7 +778,8 @@ static int ghes_notify_nmi(struct notifier_block *this,
if (sev_global >= GHES_SEV_PANIC) {
oops_begin();
- ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
+ __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
+ ghes_global->estatus);
/* reboot to log the error! */
if (panic_timeout == 0)
panic_timeout = ghes_panic_timeout;
@@ -484,12 +787,34 @@ static int ghes_notify_nmi(struct notifier_block *this,
}
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ u32 len, node_len;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic_status *estatus;
+#endif
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
- /* Do not print estatus because printk is not NMI safe */
- ghes_do_proc(ghes);
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ if (ghes_estatus_cached(ghes->estatus))
+ goto next;
+ /* Save estatus for further processing in IRQ context */
+ len = apei_estatus_len(ghes->estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
+ node_len);
+ if (estatus_node) {
+ estatus_node->generic = ghes->generic;
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ memcpy(estatus, ghes->estatus, len);
+ llist_add(&estatus_node->llnode, &ghes_estatus_llist);
+ }
+next:
+#endif
ghes_clear_estatus(ghes);
}
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ irq_work_queue(&ghes_proc_irq_work);
+#endif
out:
raw_spin_unlock(&ghes_nmi_lock);
@@ -504,10 +829,26 @@ static struct notifier_block ghes_notifier_nmi = {
.notifier_call = ghes_notify_nmi,
};
+static unsigned long ghes_esource_prealloc_size(
+ const struct acpi_hest_generic *generic)
+{
+ unsigned long block_length, prealloc_records, prealloc_size;
+
+ block_length = min_t(unsigned long, generic->error_block_length,
+ GHES_ESTATUS_MAX_SIZE);
+ prealloc_records = max_t(unsigned long,
+ generic->records_to_preallocate, 1);
+ prealloc_size = min_t(unsigned long, block_length * prealloc_records,
+ GHES_ESOURCE_PREALLOC_MAX_SIZE);
+
+ return prealloc_size;
+}
+
static int __devinit ghes_probe(struct platform_device *ghes_dev)
{
struct acpi_hest_generic *generic;
struct ghes *ghes = NULL;
+ unsigned long len;
int rc = -EINVAL;
generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
@@ -573,6 +914,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
mutex_unlock(&ghes_list_mutex);
break;
case ACPI_HEST_NOTIFY_NMI:
+ len = ghes_esource_prealloc_size(generic);
+ ghes_estatus_pool_expand(len);
mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_nmi))
register_die_notifier(&ghes_notifier_nmi);
@@ -597,6 +940,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
{
struct ghes *ghes;
struct acpi_hest_generic *generic;
+ unsigned long len;
ghes = platform_get_drvdata(ghes_dev);
generic = ghes->generic;
@@ -627,6 +971,8 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
* freed after NMI handler finishes.
*/
synchronize_rcu();
+ len = ghes_esource_prealloc_size(generic);
+ ghes_estatus_pool_shrink(len);
break;
default:
BUG();
@@ -662,15 +1008,43 @@ static int __init ghes_init(void)
return -EINVAL;
}
+ if (ghes_disable) {
+ pr_info(GHES_PFX "GHES is not enabled!\n");
+ return -EINVAL;
+ }
+
+ init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
+
rc = ghes_ioremap_init();
if (rc)
goto err;
- rc = platform_driver_register(&ghes_platform_driver);
+ rc = ghes_estatus_pool_init();
if (rc)
goto err_ioremap_exit;
+ rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
+ GHES_ESTATUS_CACHE_ALLOCED_MAX);
+ if (rc)
+ goto err_pool_exit;
+
+ rc = platform_driver_register(&ghes_platform_driver);
+ if (rc)
+ goto err_pool_exit;
+
+ rc = apei_osc_setup();
+ if (rc == 0 && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
+ else if (rc == 0 && !osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
+ else if (rc && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
+ else
+ pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
+
return 0;
+err_pool_exit:
+ ghes_estatus_pool_exit();
err_ioremap_exit:
ghes_ioremap_exit();
err:
@@ -680,6 +1054,7 @@ err:
static void __exit ghes_exit(void)
{
platform_driver_unregister(&ghes_platform_driver);
+ ghes_estatus_pool_exit();
ghes_ioremap_exit();
}
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index abda3786a5d..05fee06f4d6 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -139,13 +139,23 @@ static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
{
struct platform_device *ghes_dev;
struct ghes_arr *ghes_arr = data;
- int rc;
+ int rc, i;
if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
return 0;
if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
return 0;
+ for (i = 0; i < ghes_arr->count; i++) {
+ struct acpi_hest_header *hdr;
+ ghes_dev = ghes_arr->ghes_devs[i];
+ hdr = *(struct acpi_hest_header **)ghes_dev->dev.platform_data;
+ if (hdr->source_id == hest_hdr->source_id) {
+ pr_warning(FW_WARN HEST_PFX "Duplicated hardware error source ID: %d.\n",
+ hdr->source_id);
+ return -EIO;
+ }
+ }
ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
if (!ghes_dev)
return -ENOMEM;
@@ -221,16 +231,17 @@ void __init acpi_hest_init(void)
goto err;
}
- rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
- if (rc)
- goto err;
-
- rc = hest_ghes_dev_register(ghes_count);
- if (!rc) {
- pr_info(HEST_PFX "Table parsing has been initialized.\n");
- return;
+ if (!ghes_disable) {
+ rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
+ if (rc)
+ goto err;
+ rc = hest_ghes_dev_register(ghes_count);
+ if (rc)
+ goto err;
}
+ pr_info(HEST_PFX "Table parsing has been initialized.\n");
+ return;
err:
hest_disable = 1;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index fcc13ac0aa1..7711d94a040 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -55,6 +55,9 @@
#define ACPI_BATTERY_NOTIFY_INFO 0x81
#define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82
+/* Battery power unit: 0 means mW, 1 means mA */
+#define ACPI_BATTERY_POWER_UNIT_MA 1
+
#define _COMPONENT ACPI_BATTERY_COMPONENT
ACPI_MODULE_NAME("battery");
@@ -91,16 +94,12 @@ MODULE_DEVICE_TABLE(acpi, battery_device_ids);
enum {
ACPI_BATTERY_ALARM_PRESENT,
ACPI_BATTERY_XINFO_PRESENT,
- /* For buggy DSDTs that report negative 16-bit values for either
- * charging or discharging current and/or report 0 as 65536
- * due to bad math.
- */
- ACPI_BATTERY_QUIRK_SIGNED16_CURRENT,
ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY,
};
struct acpi_battery {
struct mutex lock;
+ struct mutex sysfs_lock;
struct power_supply bat;
struct acpi_device *device;
struct notifier_block pm_nb;
@@ -132,7 +131,7 @@ struct acpi_battery {
unsigned long flags;
};
-#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
+#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat)
inline int acpi_battery_present(struct acpi_battery *battery)
{
@@ -301,7 +300,8 @@ static enum power_supply_property energy_battery_props[] = {
#ifdef CONFIG_ACPI_PROCFS_POWER
inline char *acpi_battery_units(struct acpi_battery *battery)
{
- return (battery->power_unit)?"mA":"mW";
+ return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
+ "mA" : "mW";
}
#endif
@@ -461,9 +461,17 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
battery->update_time = jiffies;
kfree(buffer.pointer);
- if (test_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags) &&
- battery->rate_now != -1)
+ /* For buggy DSDTs that report negative 16-bit values for either
+ * charging or discharging current and/or report 0 as 65536
+ * due to bad math.
+ */
+ if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA &&
+ battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN &&
+ (s16)(battery->rate_now) < 0) {
battery->rate_now = abs((s16)battery->rate_now);
+ printk_once(KERN_WARNING FW_BUG "battery: (dis)charge rate"
+ " invalid.\n");
+ }
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
&& battery->capacity_now >= 0 && battery->capacity_now <= 100)
@@ -544,7 +552,7 @@ static int sysfs_add_battery(struct acpi_battery *battery)
{
int result;
- if (battery->power_unit) {
+ if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) {
battery->bat.properties = charge_battery_props;
battery->bat.num_properties =
ARRAY_SIZE(charge_battery_props);
@@ -566,18 +574,16 @@ static int sysfs_add_battery(struct acpi_battery *battery)
static void sysfs_remove_battery(struct acpi_battery *battery)
{
- if (!battery->bat.dev)
+ mutex_lock(&battery->sysfs_lock);
+ if (!battery->bat.dev) {
+ mutex_unlock(&battery->sysfs_lock);
return;
+ }
+
device_remove_file(battery->bat.dev, &alarm_attr);
power_supply_unregister(&battery->bat);
battery->bat.dev = NULL;
-}
-
-static void acpi_battery_quirks(struct acpi_battery *battery)
-{
- if (dmi_name_in_vendors("Acer") && battery->power_unit) {
- set_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags);
- }
+ mutex_unlock(&battery->sysfs_lock);
}
/*
@@ -592,7 +598,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
*
* Handle this correctly so that they won't break userspace.
*/
-static void acpi_battery_quirks2(struct acpi_battery *battery)
+static void acpi_battery_quirks(struct acpi_battery *battery)
{
if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
return ;
@@ -623,13 +629,15 @@ static int acpi_battery_update(struct acpi_battery *battery)
result = acpi_battery_get_info(battery);
if (result)
return result;
- acpi_battery_quirks(battery);
acpi_battery_init_alarm(battery);
}
- if (!battery->bat.dev)
- sysfs_add_battery(battery);
+ if (!battery->bat.dev) {
+ result = sysfs_add_battery(battery);
+ if (result)
+ return result;
+ }
result = acpi_battery_get_state(battery);
- acpi_battery_quirks2(battery);
+ acpi_battery_quirks(battery);
return result;
}
@@ -863,7 +871,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
}, \
}
-static struct battery_file {
+static const struct battery_file {
struct file_operations ops;
mode_t mode;
const char *name;
@@ -948,9 +956,12 @@ static int battery_notify(struct notifier_block *nb,
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
switch (mode) {
+ case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
- sysfs_remove_battery(battery);
- sysfs_add_battery(battery);
+ if (battery->bat.dev) {
+ sysfs_remove_battery(battery);
+ sysfs_add_battery(battery);
+ }
break;
}
@@ -972,28 +983,38 @@ static int acpi_battery_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
mutex_init(&battery->lock);
+ mutex_init(&battery->sysfs_lock);
if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle,
"_BIX", &handle)))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
- acpi_battery_update(battery);
+ result = acpi_battery_update(battery);
+ if (result)
+ goto fail;
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
#endif
- if (!result) {
- printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
- ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
- device->status.battery_present ? "present" : "absent");
- } else {
+ if (result) {
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
#endif
- kfree(battery);
+ goto fail;
}
+ printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
+ ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
+ device->status.battery_present ? "present" : "absent");
+
battery->pm_nb.notifier_call = battery_notify;
register_pm_notifier(&battery->pm_nb);
return result;
+
+fail:
+ sysfs_remove_battery(battery);
+ mutex_destroy(&battery->lock);
+ mutex_destroy(&battery->sysfs_lock);
+ kfree(battery);
+ return result;
}
static int acpi_battery_remove(struct acpi_device *device, int type)
@@ -1009,6 +1030,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
#endif
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
+ mutex_destroy(&battery->sysfs_lock);
kfree(battery);
return 0;
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d1e06c182cd..437ddbf0c49 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -39,6 +39,7 @@
#include <linux/pci.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include <acpi/apei.h>
#include <linux/dmi.h>
#include <linux/suspend.h>
@@ -519,6 +520,7 @@ out_kfree:
}
EXPORT_SYMBOL(acpi_run_osc);
+bool osc_sb_apei_support_acked;
static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48";
static void acpi_bus_osc_support(void)
{
@@ -541,11 +543,19 @@ static void acpi_bus_osc_support(void)
#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
#endif
+
+ if (!ghes_disable)
+ capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
return;
- if (ACPI_SUCCESS(acpi_run_osc(handle, &context)))
+ if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
+ u32 *capbuf_ret = context.ret.pointer;
+ if (context.ret.length > OSC_SUPPORT_TYPE)
+ osc_sb_apei_support_acked =
+ capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT;
kfree(context.ret.pointer);
- /* do we need to check the returned cap? Sounds no */
+ }
+ /* do we need to check other returned cap? Sounds no */
}
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 1864ad3cf89..19a61136d84 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -77,7 +77,7 @@ struct dock_dependent_device {
struct list_head list;
struct list_head hotplug_list;
acpi_handle handle;
- struct acpi_dock_ops *ops;
+ const struct acpi_dock_ops *ops;
void *context;
};
@@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
* the dock driver after _DCK is executed.
*/
int
-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
void *context)
{
struct dock_dependent_device *dd;
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 05b44201a61..22f918bacd3 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -92,7 +92,7 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
return count;
}
-static struct file_operations acpi_ec_io_ops = {
+static const struct file_operations acpi_ec_io_ops = {
.owner = THIS_MODULE,
.open = acpi_ec_open_io,
.read = acpi_ec_read_io,
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 467479f07c1..0f0356ca1a9 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -110,7 +110,7 @@ fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
return result;
}
-static struct thermal_cooling_device_ops fan_cooling_ops = {
+static const struct thermal_cooling_device_ops fan_cooling_ops = {
.get_max_state = fan_get_max_state,
.get_cur_state = fan_get_cur_state,
.set_cur_state = fan_set_cur_state,
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 52ca9649d76..fa32f584229 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -155,7 +155,7 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
{
if (!strcmp("Linux", interface)) {
- printk(KERN_NOTICE FW_BUG PREFIX
+ printk_once(KERN_NOTICE FW_BUG PREFIX
"BIOS _OSI(Linux) query %s%s\n",
osi_linux.enable ? "honored" : "ignored",
osi_linux.cmdline ? " via cmdline" :
@@ -237,8 +237,23 @@ void acpi_os_vprintf(const char *fmt, va_list args)
#endif
}
+#ifdef CONFIG_KEXEC
+static unsigned long acpi_rsdp;
+static int __init setup_acpi_rsdp(char *arg)
+{
+ acpi_rsdp = simple_strtoul(arg, NULL, 16);
+ return 0;
+}
+early_param("acpi_rsdp", setup_acpi_rsdp);
+#endif
+
acpi_physical_address __init acpi_os_get_root_pointer(void)
{
+#ifdef CONFIG_KEXEC
+ if (acpi_rsdp)
+ return acpi_rsdp;
+#endif
+
if (efi_enabled) {
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
return efi.acpi20;
@@ -1083,7 +1098,13 @@ struct osi_setup_entry {
bool enable;
};
-static struct osi_setup_entry __initdata osi_setup_entries[OSI_STRING_ENTRIES_MAX];
+static struct osi_setup_entry __initdata
+ osi_setup_entries[OSI_STRING_ENTRIES_MAX] = {
+ {"Module Device", true},
+ {"Processor Device", true},
+ {"3.0 _SCP Extensions", true},
+ {"Processor Aggregator Device", true},
+};
void __init acpi_osi_setup(char *str)
{
@@ -1333,23 +1354,6 @@ int acpi_resources_are_enforced(void)
EXPORT_SYMBOL(acpi_resources_are_enforced);
/*
- * Create and initialize a spinlock.
- */
-acpi_status
-acpi_os_create_lock(acpi_spinlock *out_handle)
-{
- spinlock_t *lock;
-
- lock = ACPI_ALLOCATE(sizeof(spinlock_t));
- if (!lock)
- return AE_NO_MEMORY;
- spin_lock_init(lock);
- *out_handle = lock;
-
- return AE_OK;
-}
-
-/*
* Deallocate the memory for a spinlock.
*/
void acpi_os_delete_lock(acpi_spinlock handle)
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index f907cfbfa13..7f9eba9a0b0 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -303,6 +303,61 @@ void acpi_pci_irq_del_prt(struct pci_bus *bus)
/* --------------------------------------------------------------------------
PCI Interrupt Routing Support
-------------------------------------------------------------------------- */
+#ifdef CONFIG_X86_IO_APIC
+extern int noioapicquirk;
+extern int noioapicreroute;
+
+static int bridge_has_boot_interrupt_variant(struct pci_bus *bus)
+{
+ struct pci_bus *bus_it;
+
+ for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) {
+ if (!bus_it->self)
+ return 0;
+ if (bus_it->self->irq_reroute_variant)
+ return bus_it->self->irq_reroute_variant;
+ }
+ return 0;
+}
+
+/*
+ * Some chipsets (e.g. Intel 6700PXH) generate a legacy INTx when the IRQ
+ * entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel does
+ * during interrupt handling). When this INTx generation cannot be disabled,
+ * we reroute these interrupts to their legacy equivalent to get rid of
+ * spurious interrupts.
+ */
+static int acpi_reroute_boot_interrupt(struct pci_dev *dev,
+ struct acpi_prt_entry *entry)
+{
+ if (noioapicquirk || noioapicreroute) {
+ return 0;
+ } else {
+ switch (bridge_has_boot_interrupt_variant(dev->bus)) {
+ case 0:
+ /* no rerouting necessary */
+ return 0;
+ case INTEL_IRQ_REROUTE_VARIANT:
+ /*
+ * Remap according to INTx routing table in 6700PXH
+ * specs, intel order number 302628-002, section
+ * 2.15.2. Other chipsets (80332, ...) have the same
+ * mapping and are handled here as well.
+ */
+ dev_info(&dev->dev, "PCI IRQ %d -> rerouted to legacy "
+ "IRQ %d\n", entry->index,
+ (entry->index % 4) + 16);
+ entry->index = (entry->index % 4) + 16;
+ return 1;
+ default:
+ dev_warn(&dev->dev, "Cannot reroute IRQ %d to legacy "
+ "IRQ: unknown mapping\n", entry->index);
+ return -1;
+ }
+ }
+}
+#endif /* CONFIG_X86_IO_APIC */
+
static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
{
struct acpi_prt_entry *entry;
@@ -311,6 +366,9 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
entry = acpi_pci_irq_find_prt_entry(dev, pin);
if (entry) {
+#ifdef CONFIG_X86_IO_APIC
+ acpi_reroute_boot_interrupt(dev, entry);
+#endif /* CONFIG_X86_IO_APIC */
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n",
pci_name(dev), pin_name(pin)));
return entry;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d06078d660a..2672c798272 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -485,7 +485,8 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
root->secondary.end = 0xFF;
printk(KERN_WARNING FW_BUG PREFIX
"no secondary bus range in _CRS\n");
- status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
+ status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN,
+ NULL, &bus);
if (ACPI_SUCCESS(status))
root->secondary.start = bus;
else if (status == AE_NOT_FOUND)
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 79cb6533289..870550d6a4b 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -244,7 +244,7 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
return result;
}
-struct thermal_cooling_device_ops processor_cooling_ops = {
+const struct thermal_cooling_device_ops processor_cooling_ops = {
.get_max_state = processor_get_max_state,
.get_cur_state = processor_get_cur_state,
.set_cur_state = processor_set_cur_state,
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 51ae3794ec7..6e36d0c0057 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -112,7 +112,7 @@ struct acpi_battery {
u8 have_sysfs_alarm:1;
};
-#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
+#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat)
struct acpi_sbs {
struct power_supply charger;
@@ -130,6 +130,9 @@ struct acpi_sbs {
#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger)
+static int acpi_sbs_remove(struct acpi_device *device, int type);
+static int acpi_battery_get_state(struct acpi_battery *battery);
+
static inline int battery_scale(int log)
{
int scale = 1;
@@ -195,6 +198,8 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy,
if ((!battery->present) && psp != POWER_SUPPLY_PROP_PRESENT)
return -ENODEV;
+
+ acpi_battery_get_state(battery);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
if (battery->rate_now < 0)
@@ -225,11 +230,17 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_POWER_NOW:
val->intval = abs(battery->rate_now) *
acpi_battery_ipscale(battery) * 1000;
+ val->intval *= (acpi_battery_mode(battery)) ?
+ (battery->voltage_now *
+ acpi_battery_vscale(battery) / 1000) : 1;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_POWER_AVG:
val->intval = abs(battery->rate_avg) *
acpi_battery_ipscale(battery) * 1000;
+ val->intval *= (acpi_battery_mode(battery)) ?
+ (battery->voltage_now *
+ acpi_battery_vscale(battery) / 1000) : 1;
break;
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = battery->state_of_charge;
@@ -903,8 +914,6 @@ static void acpi_sbs_callback(void *context)
}
}
-static int acpi_sbs_remove(struct acpi_device *device, int type);
-
static int acpi_sbs_add(struct acpi_device *device)
{
struct acpi_sbs *sbs;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6c949602cbd..3ed80b2ca90 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -428,6 +428,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
},
},
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus A8N-SLI DELUXE",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
+ },
+ },
+ {
+ .callback = init_old_suspend_ordering,
+ .ident = "Asus A8N-SLI Premium",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 77255f250db..c538d0ef10f 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -149,12 +149,12 @@ static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
return result;
}
-static struct kernel_param_ops param_ops_debug_layer = {
+static const struct kernel_param_ops param_ops_debug_layer = {
.set = param_set_uint,
.get = param_get_debug_layer,
};
-static struct kernel_param_ops param_ops_debug_level = {
+static const struct kernel_param_ops param_ops_debug_level = {
.set = param_set_uint,
.get = param_get_debug_level,
};
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 2607e17b520..48fbc647b17 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -812,7 +812,7 @@ acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
thermal_zone_unbind_cooling_device);
}
-static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
+static const struct thermal_zone_device_ops acpi_thermal_zone_ops = {
.bind = acpi_thermal_bind_cooling_device,
.unbind = acpi_thermal_unbind_cooling_device,
.get_temp = thermal_get_temp,
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index db39e9e607d..08a44b532f7 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -46,7 +46,6 @@
#define PREFIX "ACPI: "
-#define ACPI_VIDEO_CLASS "video"
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
@@ -308,7 +307,7 @@ video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long st
return acpi_video_device_lcd_set_level(video, level);
}
-static struct thermal_cooling_device_ops video_cooling_ops = {
+static const struct thermal_cooling_device_ops video_cooling_ops = {
.get_max_state = video_get_max_state,
.get_cur_state = video_get_cur_state,
.set_cur_state = video_set_cur_state,
@@ -1445,7 +1444,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
* most likely via hotkey. */
acpi_bus_generate_proc_event(device, event, 0);
- keycode = KEY_SWITCHVIDEOMODE;
+ if (!acpi_notifier_call_chain(device, event, 0))
+ keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video
@@ -1475,7 +1475,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
break;
}
- acpi_notifier_call_chain(device, event, 0);
+ if (event != ACPI_VIDEO_NOTIFY_SWITCH)
+ acpi_notifier_call_chain(device, event, 0);
if (keycode) {
input_report_key(input, keycode, 1);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 75afa75a515..5987e0ba8c2 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -313,6 +313,7 @@ config PATA_AMD
config PATA_ARASAN_CF
tristate "ARASAN CompactFlash PATA Controller Support"
+ depends on DMADEVICES
select DMA_ENGINE
help
Say Y here to support the ARASAN CompactFlash PATA controller
@@ -467,6 +468,15 @@ config PATA_ICSIDE
interface card. This is not required for ICS partition support.
If you are unsure, say N to this.
+config PATA_IMX
+ tristate "PATA support for Freescale iMX"
+ depends on ARCH_MXC
+ help
+ This option enables support for the PATA host available on Freescale
+ iMX SoCs.
+
+ If unsure, say N.
+
config PATA_IT8213
tristate "IT8213 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8ac64e1aa05..9550d691fd1 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
+obj-$(CONFIG_PATA_IMX) += pata_imx.o
obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index ae22be4157b..3bc8c79bf2c 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -135,8 +135,8 @@ static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg
if (mesg.event & PM_EVENT_SUSPEND &&
hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
- dev_printk(KERN_ERR, &pdev->dev,
- "BIOS update required for suspend/resume\n");
+ dev_err(&pdev->dev,
+ "BIOS update required for suspend/resume\n");
return -EIO;
}
@@ -187,7 +187,7 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
+ dev_err(&pdev->dev,
"64-bit DMA enable failed\n");
return rc;
}
@@ -195,14 +195,13 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
return rc;
}
}
@@ -343,14 +342,12 @@ static int acard_ahci_port_start(struct ata_port *ap)
if (cmd & PORT_CMD_FBSCP)
pp->fbs_supported = true;
else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
- dev_printk(KERN_INFO, dev,
- "port %d can do FBS, forcing FBSCP\n",
- ap->port_no);
+ dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
+ ap->port_no);
pp->fbs_supported = true;
} else
- dev_printk(KERN_WARNING, dev,
- "port %d is not capable of FBS\n",
- ap->port_no);
+ dev_warn(dev, "port %d is not capable of FBS\n",
+ ap->port_no);
}
if (pp->fbs_supported) {
@@ -406,7 +403,6 @@ static int acard_ahci_port_start(struct ata_port *ap)
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_id = ent->driver_data;
struct ata_port_info pi = acard_ahci_port_info[board_id];
const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -419,8 +415,7 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* acquire resources */
rc = pcim_enable_device(pdev);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 71afe037131..fb7b90b0592 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -79,8 +79,6 @@ enum board_ids {
};
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
@@ -104,12 +102,6 @@ static struct ata_port_operations ahci_p5wdh_ops = {
.hardreset = ahci_p5wdh_hardreset,
};
-static struct ata_port_operations ahci_sb600_ops = {
- .inherits = &ahci_ops,
- .softreset = ahci_sb600_softreset,
- .pmp_softreset = ahci_sb600_softreset,
-};
-
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
static const struct ata_port_info ahci_port_info[] = {
@@ -188,7 +180,7 @@ static const struct ata_port_info ahci_port_info[] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_pmp_retry_srst_ops,
},
[board_ahci_sb700] = /* for SB700 and SB800 */
{
@@ -196,7 +188,7 @@ static const struct ata_port_info ahci_port_info[] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_pmp_retry_srst_ops,
},
[board_ahci_vt8251] =
{
@@ -267,6 +259,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -502,55 +495,6 @@ static void ahci_pci_init_controller(struct ata_host *host)
ahci_init_controller(host);
}
-static int ahci_sb600_check_ready(struct ata_link *link)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
- u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
-
- /*
- * There is no need to check TFDATA if BAD PMP is found due to HW bug,
- * which can save timeout delay.
- */
- if (irq_status & PORT_IRQ_BAD_PMP)
- return -EIO;
-
- return ata_check_ready(status);
-}
-
-static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- struct ata_port *ap = link->ap;
- void __iomem *port_mmio = ahci_port_base(ap);
- int pmp = sata_srst_pmp(link);
- int rc;
- u32 irq_sts;
-
- DPRINTK("ENTER\n");
-
- rc = ahci_do_softreset(link, class, pmp, deadline,
- ahci_sb600_check_ready);
-
- /*
- * Soft reset fails on some ATI chips with IPMS set when PMP
- * is enabled but SATA HDD/ODD is connected to SATA port,
- * do soft reset again to port 0.
- */
- if (rc == -EIO) {
- irq_sts = readl(port_mmio + PORT_IRQ_STAT);
- if (irq_sts & PORT_IRQ_BAD_PMP) {
- ata_link_printk(link, KERN_WARNING,
- "applying SB600 PMP SRST workaround "
- "and retrying\n");
- rc = ahci_do_softreset(link, class, 0, deadline,
- ahci_check_ready);
- }
- }
-
- return rc;
-}
-
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
@@ -629,8 +573,8 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
if (mesg.event & PM_EVENT_SUSPEND &&
hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
- dev_printk(KERN_ERR, &pdev->dev,
- "BIOS update required for suspend/resume\n");
+ dev_err(&pdev->dev,
+ "BIOS update required for suspend/resume\n");
return -EIO;
}
@@ -681,22 +625,21 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
return rc;
}
}
@@ -759,8 +702,8 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
dmi_check_system(sysids)) {
struct ata_port *ap = host->ports[1];
- dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
- "Deluxe on-board SIMG4726 workaround\n");
+ dev_info(&pdev->dev,
+ "enabling ASUS P5W DH Deluxe on-board SIMG4726 workaround\n");
ap->ops = &ahci_p5wdh_ops;
ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
@@ -811,6 +754,18 @@ static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
},
},
+ /*
+ * All BIOS versions for the Asus M3A support 64bit DMA.
+ * (all release versions from 0301 to 1206 were tested)
+ */
+ {
+ .ident = "ASUS M3A",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "M3A"),
+ },
+ },
{ }
};
const struct dmi_system_id *match;
@@ -831,14 +786,14 @@ static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
if (strcmp(buf, match->driver_data) >= 0)
goto enable_64bit;
else {
- dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
- "forcing 32bit DMA, update BIOS\n", match->ident);
+ dev_warn(&pdev->dev,
+ "%s: BIOS too old, forcing 32bit DMA, update BIOS\n",
+ match->ident);
return false;
}
enable_64bit:
- dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
- match->ident);
+ dev_warn(&pdev->dev, "%s: enabling 64bit DMA\n", match->ident);
return true;
}
@@ -1041,9 +996,8 @@ static void ahci_gtf_filter_workaround(struct ata_host *host)
return;
filter = (unsigned long)dmi->driver_data;
- dev_printk(KERN_INFO, host->dev,
- "applying extra ACPI _GTF filter 0x%x for %s\n",
- filter, dmi->ident);
+ dev_info(host->dev, "applying extra ACPI _GTF filter 0x%x for %s\n",
+ filter, dmi->ident);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
@@ -1062,7 +1016,6 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_id = ent->driver_data;
struct ata_port_info pi = ahci_port_info[board_id];
const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -1075,8 +1028,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* The AHCI driver can only drive the SATA ports, the PATA driver
can drive them all so if both drivers are selected make sure
@@ -1099,8 +1051,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* that for SAS drives they're out of luck.
*/
if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
- dev_printk(KERN_INFO, &pdev->dev, "PDC42819 "
- "can only drive SATA devices with this driver\n");
+ dev_info(&pdev->dev,
+ "PDC42819 can only drive SATA devices with this driver\n");
/* acquire resources */
rc = pcim_enable_device(pdev);
@@ -1126,8 +1078,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
pci_read_config_byte(pdev, ICH_MAP, &map);
if (map & 0x3) {
- dev_printk(KERN_INFO, &pdev->dev, "controller is in "
- "combined mode, can't enable AHCI mode\n");
+ dev_info(&pdev->dev,
+ "controller is in combined mode, can't enable AHCI mode\n");
return -ENODEV;
}
}
@@ -1184,8 +1136,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_broken_suspend(pdev)) {
hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
- dev_printk(KERN_WARNING, &pdev->dev,
- "BIOS update required for suspend/resume\n");
+ dev_warn(&pdev->dev,
+ "BIOS update required for suspend/resume\n");
}
if (ahci_broken_online(pdev)) {
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 12c5282e7fc..b1750007c8d 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -312,6 +312,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
.sdev_attrs = ahci_sdev_attrs
extern struct ata_port_operations ahci_ops;
+extern struct ata_port_operations ahci_pmp_retry_srst_ops;
void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
u32 opts);
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 721d38bfa33..7df56ec3181 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -81,14 +81,13 @@ static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0);
}
- ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
- name);
+ ata_dev_info(dev, "configured for %s\n", name);
dev->xfer_mode = ata_xfer_mask2mode(xfer_mask);
dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode);
dev->flags &= ~ATA_DFLAG_PIO;
} else {
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 6f6e7718b05..43107e9415d 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1225,8 +1225,9 @@ static int piix_pci_device_resume(struct pci_dev *pdev)
*/
rc = pci_reenable_device(pdev);
if (rc)
- dev_printk(KERN_ERR, &pdev->dev, "failed to enable "
- "device after resume (%d)\n", rc);
+ dev_err(&pdev->dev,
+ "failed to enable device after resume (%d)\n",
+ rc);
} else
rc = ata_pci_device_do_resume(pdev);
@@ -1303,9 +1304,11 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
no_piix_dma = 2;
}
if (no_piix_dma)
- dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
- if (no_piix_dma == 2)
- dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
+ dev_warn(&ata_dev->dev,
+ "450NX errata present, disabling IDE DMA%s\n",
+ no_piix_dma == 2 ? " - a BIOS update may resolve this"
+ : "");
+
return no_piix_dma;
}
@@ -1338,37 +1341,36 @@ static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
map = map_db->map[map_value & map_db->mask];
- dev_printk(KERN_INFO, &pdev->dev, "MAP [");
+ dev_info(&pdev->dev, "MAP [");
for (i = 0; i < 4; i++) {
switch (map[i]) {
case RV:
invalid_map = 1;
- printk(" XX");
+ pr_cont(" XX");
break;
case NA:
- printk(" --");
+ pr_cont(" --");
break;
case IDE:
WARN_ON((i & 1) || map[i + 1] != IDE);
pinfo[i / 2] = piix_port_info[ich_pata_100];
i++;
- printk(" IDE IDE");
+ pr_cont(" IDE IDE");
break;
default:
- printk(" P%d", map[i]);
+ pr_cont(" P%d", map[i]);
if (i & 1)
pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
break;
}
}
- printk(" ]\n");
+ pr_cont(" ]\n");
if (invalid_map)
- dev_printk(KERN_ERR, &pdev->dev,
- "invalid MAP value %u\n", map_value);
+ dev_err(&pdev->dev, "invalid MAP value %u\n", map_value);
return map;
}
@@ -1398,8 +1400,8 @@ static bool piix_no_sidpr(struct ata_host *host)
if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2920 &&
pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
pdev->subsystem_device == 0xb049) {
- dev_printk(KERN_WARNING, host->dev,
- "Samsung DB-P70 detected, disabling SIDPR\n");
+ dev_warn(host->dev,
+ "Samsung DB-P70 detected, disabling SIDPR\n");
return true;
}
@@ -1451,8 +1453,8 @@ static int __devinit piix_init_sidpr(struct ata_host *host)
piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
if ((scontrol & 0xf00) != 0x300) {
- dev_printk(KERN_INFO, host->dev, "SCR access via "
- "SIDPR is available but doesn't work\n");
+ dev_info(host->dev,
+ "SCR access via SIDPR is available but doesn't work\n");
return 0;
}
}
@@ -1501,8 +1503,7 @@ static void piix_iocfg_bit18_quirk(struct ata_host *host)
* affected systems.
*/
if (hpriv->saved_iocfg & (1 << 18)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "applying IOCFG bit18 quirk\n");
+ dev_info(&pdev->dev, "applying IOCFG bit18 quirk\n");
pci_write_config_dword(pdev, PIIX_IOCFG,
hpriv->saved_iocfg & ~(1 << 18));
}
@@ -1561,7 +1562,6 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev)
static int __devinit piix_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
@@ -1571,9 +1571,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
struct piix_host_priv *hpriv;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* no hotplugging support for later devices (FIXME) */
if (!in_module_init && ent->driver_data >= ich5_sata)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index d38c40fe4dd..3c92dbd751e 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -82,6 +82,8 @@ static void ahci_pmp_attach(struct ata_port *ap);
static void ahci_pmp_detach(struct ata_port *ap);
static int ahci_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
+static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void ahci_postreset(struct ata_link *link, unsigned int *class);
@@ -178,6 +180,12 @@ struct ata_port_operations ahci_ops = {
};
EXPORT_SYMBOL_GPL(ahci_ops);
+struct ata_port_operations ahci_pmp_retry_srst_ops = {
+ .inherits = &ahci_ops,
+ .softreset = ahci_pmp_retry_softreset,
+};
+EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
+
int ahci_em_messages = 1;
EXPORT_SYMBOL_GPL(ahci_em_messages);
module_param(ahci_em_messages, int, 0444);
@@ -286,10 +294,10 @@ static ssize_t ahci_read_em_buffer(struct device *dev,
/* the count should not be larger than PAGE_SIZE */
if (count > PAGE_SIZE) {
if (printk_ratelimit())
- ata_port_printk(ap, KERN_WARNING,
- "EM read buffer size too large: "
- "buffer size %u, page size %lu\n",
- hpriv->em_buf_sz, PAGE_SIZE);
+ ata_port_warn(ap,
+ "EM read buffer size too large: "
+ "buffer size %u, page size %lu\n",
+ hpriv->em_buf_sz, PAGE_SIZE);
count = PAGE_SIZE;
}
@@ -410,51 +418,46 @@ void ahci_save_initial_config(struct device *dev,
/* some chips have errata preventing 64bit use */
if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
- dev_printk(KERN_INFO, dev,
- "controller can't do 64bit DMA, forcing 32bit\n");
+ dev_info(dev, "controller can't do 64bit DMA, forcing 32bit\n");
cap &= ~HOST_CAP_64;
}
if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
- dev_printk(KERN_INFO, dev,
- "controller can't do NCQ, turning off CAP_NCQ\n");
+ dev_info(dev, "controller can't do NCQ, turning off CAP_NCQ\n");
cap &= ~HOST_CAP_NCQ;
}
if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
- dev_printk(KERN_INFO, dev,
- "controller can do NCQ, turning on CAP_NCQ\n");
+ dev_info(dev, "controller can do NCQ, turning on CAP_NCQ\n");
cap |= HOST_CAP_NCQ;
}
if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
- dev_printk(KERN_INFO, dev,
- "controller can't do PMP, turning off CAP_PMP\n");
+ dev_info(dev, "controller can't do PMP, turning off CAP_PMP\n");
cap &= ~HOST_CAP_PMP;
}
if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
- dev_printk(KERN_INFO, dev,
- "controller can't do SNTF, turning off CAP_SNTF\n");
+ dev_info(dev,
+ "controller can't do SNTF, turning off CAP_SNTF\n");
cap &= ~HOST_CAP_SNTF;
}
if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
- dev_printk(KERN_INFO, dev,
- "controller can do FBS, turning on CAP_FBS\n");
+ dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
cap |= HOST_CAP_FBS;
}
if (force_port_map && port_map != force_port_map) {
- dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
- port_map, force_port_map);
+ dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+ port_map, force_port_map);
port_map = force_port_map;
}
if (mask_port_map) {
- dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
- port_map,
- port_map & mask_port_map);
+ dev_warn(dev, "masking port_map 0x%x -> 0x%x\n",
+ port_map,
+ port_map & mask_port_map);
port_map &= mask_port_map;
}
@@ -470,10 +473,9 @@ void ahci_save_initial_config(struct device *dev,
* port_map and let it be generated from n_ports.
*/
if (map_ports > ahci_nr_ports(cap)) {
- dev_printk(KERN_WARNING, dev,
- "implemented port map (0x%x) contains more "
- "ports than nr_ports (%u), using nr_ports\n",
- port_map, ahci_nr_ports(cap));
+ dev_warn(dev,
+ "implemented port map (0x%x) contains more ports than nr_ports (%u), using nr_ports\n",
+ port_map, ahci_nr_ports(cap));
port_map = 0;
}
}
@@ -481,8 +483,7 @@ void ahci_save_initial_config(struct device *dev,
/* fabricate port_map from cap.nr_ports */
if (!port_map) {
port_map = (1 << ahci_nr_ports(cap)) - 1;
- dev_printk(KERN_WARNING, dev,
- "forcing PORTS_IMPL to 0x%x\n", port_map);
+ dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
/* write the fixed up value to the PI register */
hpriv->saved_port_map = port_map;
@@ -822,8 +823,8 @@ int ahci_reset_controller(struct ata_host *host)
HOST_RESET, 10, 1000);
if (tmp & HOST_RESET) {
- dev_printk(KERN_ERR, host->dev,
- "controller reset failed (0x%x)\n", tmp);
+ dev_err(host->dev, "controller reset failed (0x%x)\n",
+ tmp);
return -EIO;
}
@@ -835,8 +836,7 @@ int ahci_reset_controller(struct ata_host *host)
*/
ahci_restore_initial_config(host);
} else
- dev_printk(KERN_INFO, host->dev,
- "skipping global host reset\n");
+ dev_info(host->dev, "skipping global host reset\n");
return 0;
}
@@ -1132,8 +1132,8 @@ static void ahci_dev_config(struct ata_device *dev)
if (hpriv->flags & AHCI_HFLAG_SECT255) {
dev->max_sectors = 255;
- ata_dev_printk(dev, KERN_INFO,
- "SB600 AHCI: limiting to 255 sectors per cmd\n");
+ ata_dev_info(dev,
+ "SB600 AHCI: limiting to 255 sectors per cmd\n");
}
}
@@ -1257,8 +1257,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
/* prepare for SRST (AHCI-1.1 10.4.1) */
rc = ahci_kick_engine(ap);
if (rc && rc != -EOPNOTSUPP)
- ata_link_printk(link, KERN_WARNING,
- "failed to reset engine (errno=%d)\n", rc);
+ ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
ata_tf_init(link->device, &tf);
@@ -1291,8 +1290,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
* be trusted. Treat device readiness timeout as link
* offline.
*/
- ata_link_printk(link, KERN_INFO,
- "device not ready, treating as offline\n");
+ ata_link_info(link, "device not ready, treating as offline\n");
*class = ATA_DEV_NONE;
} else if (rc) {
/* link occupied, -ENODEV too is an error */
@@ -1305,7 +1303,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
return 0;
fail:
- ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
+ ata_link_err(link, "softreset failed (%s)\n", reason);
return rc;
}
@@ -1329,6 +1327,55 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class,
}
EXPORT_SYMBOL_GPL(ahci_do_softreset);
+static int ahci_bad_pmp_check_ready(struct ata_link *link)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+ u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
+
+ /*
+ * There is no need to check TFDATA if BAD PMP is found due to HW bug,
+ * which can save timeout delay.
+ */
+ if (irq_status & PORT_IRQ_BAD_PMP)
+ return -EIO;
+
+ return ata_check_ready(status);
+}
+
+int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ int pmp = sata_srst_pmp(link);
+ int rc;
+ u32 irq_sts;
+
+ DPRINTK("ENTER\n");
+
+ rc = ahci_do_softreset(link, class, pmp, deadline,
+ ahci_bad_pmp_check_ready);
+
+ /*
+ * Soft reset fails with IPMS set when PMP is enabled but
+ * SATA HDD/ODD is connected to SATA port, do soft reset
+ * again to port 0.
+ */
+ if (rc == -EIO) {
+ irq_sts = readl(port_mmio + PORT_IRQ_STAT);
+ if (irq_sts & PORT_IRQ_BAD_PMP) {
+ ata_link_printk(link, KERN_WARNING,
+ "applying PMP SRST workaround "
+ "and retrying\n");
+ rc = ahci_do_softreset(link, class, 0, deadline,
+ ahci_check_ready);
+ }
+ }
+
+ return rc;
+}
+
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
@@ -1474,8 +1521,7 @@ static void ahci_fbs_dec_intr(struct ata_port *ap)
}
if (fbs & PORT_FBS_DEC)
- dev_printk(KERN_ERR, ap->host->dev,
- "failed to clear device error\n");
+ dev_err(ap->host->dev, "failed to clear device error\n");
}
static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
@@ -1713,8 +1759,8 @@ irqreturn_t ahci_interrupt(int irq, void *dev_instance)
} else {
VPRINTK("port %u (no irq)\n", i);
if (ata_ratelimit())
- dev_printk(KERN_WARNING, host->dev,
- "interrupt on disabled port %u\n", i);
+ dev_warn(host->dev,
+ "interrupt on disabled port %u\n", i);
}
handled = 1;
@@ -1865,11 +1911,11 @@ static void ahci_enable_fbs(struct ata_port *ap)
writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
fbs = readl(port_mmio + PORT_FBS);
if (fbs & PORT_FBS_EN) {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
+ dev_info(ap->host->dev, "FBS is enabled\n");
pp->fbs_enabled = true;
pp->fbs_last_dev = -1; /* initialization */
} else
- dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
+ dev_err(ap->host->dev, "Failed to enable FBS\n");
ahci_start_engine(ap);
}
@@ -1897,9 +1943,9 @@ static void ahci_disable_fbs(struct ata_port *ap)
writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
fbs = readl(port_mmio + PORT_FBS);
if (fbs & PORT_FBS_EN)
- dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
+ dev_err(ap->host->dev, "Failed to disable FBS\n");
else {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
+ dev_info(ap->host->dev, "FBS is disabled\n");
pp->fbs_enabled = false;
}
@@ -1975,7 +2021,7 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
if (rc == 0)
ahci_power_down(ap);
else {
- ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
+ ata_port_err(ap, "%s (%d)\n", emsg, rc);
ahci_start_port(ap);
}
@@ -2003,14 +2049,12 @@ static int ahci_port_start(struct ata_port *ap)
if (cmd & PORT_CMD_FBSCP)
pp->fbs_supported = true;
else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
- dev_printk(KERN_INFO, dev,
- "port %d can do FBS, forcing FBSCP\n",
- ap->port_no);
+ dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
+ ap->port_no);
pp->fbs_supported = true;
} else
- dev_printk(KERN_WARNING, dev,
- "port %d is not capable of FBS\n",
- ap->port_no);
+ dev_warn(dev, "port %d is not capable of FBS\n",
+ ap->port_no);
}
if (pp->fbs_supported) {
@@ -2072,7 +2116,7 @@ static void ahci_port_stop(struct ata_port *ap)
/* de-initialize port */
rc = ahci_deinit_port(ap, &emsg);
if (rc)
- ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
+ ata_port_warn(ap, "%s (%d)\n", emsg, rc);
}
void ahci_print_info(struct ata_host *host, const char *scc_s)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index a791b8ce629..bb7c5f1085c 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -218,12 +218,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
ata_acpi_uevent(dev->link->ap, dev, event);
}
-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
.handler = ata_acpi_dev_notify_dock,
.uevent = ata_acpi_dev_uevent,
};
-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
.handler = ata_acpi_ap_notify_dock,
.uevent = ata_acpi_ap_uevent,
};
@@ -332,25 +332,22 @@ int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
rc = -EINVAL;
if (ACPI_FAILURE(status)) {
- ata_port_printk(ap, KERN_ERR,
- "ACPI get timing mode failed (AE 0x%x)\n",
- status);
+ ata_port_err(ap, "ACPI get timing mode failed (AE 0x%x)\n",
+ status);
goto out_free;
}
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
- ata_port_printk(ap, KERN_WARNING,
- "_GTM returned unexpected object type 0x%x\n",
- out_obj->type);
+ ata_port_warn(ap, "_GTM returned unexpected object type 0x%x\n",
+ out_obj->type);
goto out_free;
}
if (out_obj->buffer.length != sizeof(struct ata_acpi_gtm)) {
- ata_port_printk(ap, KERN_ERR,
- "_GTM returned invalid length %d\n",
- out_obj->buffer.length);
+ ata_port_err(ap, "_GTM returned invalid length %d\n",
+ out_obj->buffer.length);
goto out_free;
}
@@ -402,8 +399,8 @@ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm)
if (status == AE_NOT_FOUND)
return -ENOENT;
if (ACPI_FAILURE(status)) {
- ata_port_printk(ap, KERN_ERR,
- "ACPI set timing mode failed (status=0x%x)\n", status);
+ ata_port_err(ap, "ACPI set timing mode failed (status=0x%x)\n",
+ status);
return -EINVAL;
}
return 0;
@@ -450,8 +447,8 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
- __func__, ap->port_no);
+ ata_dev_dbg(dev, "%s: ENTER: port#: %d\n",
+ __func__, ap->port_no);
/* _GTF has no input parameters */
status = acpi_evaluate_object(dev->acpi_handle, "_GTF", NULL, &output);
@@ -459,9 +456,8 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
- ata_dev_printk(dev, KERN_WARNING,
- "_GTF evaluation failed (AE 0x%x)\n",
- status);
+ ata_dev_warn(dev, "_GTF evaluation failed (AE 0x%x)\n",
+ status);
rc = -EINVAL;
}
goto out_free;
@@ -469,27 +465,24 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
if (!output.length || !output.pointer) {
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: Run _GTF: "
- "length or ptr is NULL (0x%llx, 0x%p)\n",
- __func__,
- (unsigned long long)output.length,
- output.pointer);
+ ata_dev_dbg(dev, "%s: Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n",
+ __func__,
+ (unsigned long long)output.length,
+ output.pointer);
rc = -EINVAL;
goto out_free;
}
if (out_obj->type != ACPI_TYPE_BUFFER) {
- ata_dev_printk(dev, KERN_WARNING,
- "_GTF unexpected object type 0x%x\n",
- out_obj->type);
+ ata_dev_warn(dev, "_GTF unexpected object type 0x%x\n",
+ out_obj->type);
rc = -EINVAL;
goto out_free;
}
if (out_obj->buffer.length % REGS_PER_GTF) {
- ata_dev_printk(dev, KERN_WARNING,
- "unexpected _GTF length (%d)\n",
- out_obj->buffer.length);
+ ata_dev_warn(dev, "unexpected _GTF length (%d)\n",
+ out_obj->buffer.length);
rc = -EINVAL;
goto out_free;
}
@@ -499,9 +492,8 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
if (gtf) {
*gtf = (void *)out_obj->buffer.pointer;
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG,
- "%s: returning gtf=%p, gtf_count=%d\n",
- __func__, *gtf, rc);
+ ata_dev_dbg(dev, "%s: returning gtf=%p, gtf_count=%d\n",
+ __func__, *gtf, rc);
}
return rc;
@@ -811,8 +803,8 @@ static int ata_acpi_push_id(struct ata_device *dev)
union acpi_object in_params[1];
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ix = %d, port#: %d\n",
- __func__, dev->devno, ap->port_no);
+ ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n",
+ __func__, dev->devno, ap->port_no);
/* Give the drive Identify data to the drive via the _SDD method */
/* _SDD: set up input parameters */
@@ -832,8 +824,7 @@ static int ata_acpi_push_id(struct ata_device *dev)
return -ENOENT;
if (ACPI_FAILURE(status)) {
- ata_dev_printk(dev, KERN_WARNING,
- "ACPI _SDD failed (AE 0x%x)\n", status);
+ ata_dev_warn(dev, "ACPI _SDD failed (AE 0x%x)\n", status);
return -EIO;
}
@@ -983,8 +974,8 @@ int ata_acpi_on_devcfg(struct ata_device *dev)
if (nr_executed) {
rc = ata_dev_reread_id(dev, 0);
if (rc < 0) {
- ata_dev_printk(dev, KERN_ERR, "failed to IDENTIFY "
- "after ACPI commands\n");
+ ata_dev_err(dev,
+ "failed to IDENTIFY after ACPI commands\n");
return rc;
}
}
@@ -1002,8 +993,7 @@ int ata_acpi_on_devcfg(struct ata_device *dev)
return rc;
}
- ata_dev_printk(dev, KERN_WARNING,
- "ACPI: failed the second time, disabled\n");
+ ata_dev_warn(dev, "ACPI: failed the second time, disabled\n");
dev->acpi_handle = NULL;
/* We can safely continue if no _GTF command has been executed
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 736bee5dafe..4a3a5ae7bb4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -335,8 +335,7 @@ void ata_force_cbl(struct ata_port *ap)
continue;
ap->cbl = fe->param.cbl;
- ata_port_printk(ap, KERN_NOTICE,
- "FORCE: cable set to %s\n", fe->param.name);
+ ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
return;
}
}
@@ -378,8 +377,7 @@ static void ata_force_link_limits(struct ata_link *link)
/* only honor the first spd limit */
if (!did_spd && fe->param.spd_limit) {
link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
- ata_link_printk(link, KERN_NOTICE,
- "FORCE: PHY spd limit set to %s\n",
+ ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
fe->param.name);
did_spd = true;
}
@@ -387,7 +385,7 @@ static void ata_force_link_limits(struct ata_link *link)
/* let lflags stack */
if (fe->param.lflags) {
link->flags |= fe->param.lflags;
- ata_link_printk(link, KERN_NOTICE,
+ ata_link_notice(link,
"FORCE: link flag 0x%x forced -> 0x%x\n",
fe->param.lflags, link->flags);
}
@@ -442,8 +440,8 @@ static void ata_force_xfermask(struct ata_device *dev)
dev->pio_mask = pio_mask;
}
- ata_dev_printk(dev, KERN_NOTICE,
- "FORCE: xfer_mask set to %s\n", fe->param.name);
+ ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
+ fe->param.name);
return;
}
}
@@ -486,8 +484,8 @@ static void ata_force_horkage(struct ata_device *dev)
dev->horkage |= fe->param.horkage_on;
dev->horkage &= ~fe->param.horkage_off;
- ata_dev_printk(dev, KERN_NOTICE,
- "FORCE: horkage modified (%s)\n", fe->param.name);
+ ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
+ fe->param.name);
}
}
@@ -711,8 +709,8 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
sect = tf->lbal;
if (!sect) {
- ata_dev_printk(dev, KERN_WARNING, "device reported "
- "invalid CHS sector 0\n");
+ ata_dev_warn(dev,
+ "device reported invalid CHS sector 0\n");
sect = 1; /* oh well */
}
@@ -1230,8 +1228,9 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (err_mask) {
- ata_dev_printk(dev, KERN_WARNING, "failed to read native "
- "max address (err_mask=0x%x)\n", err_mask);
+ ata_dev_warn(dev,
+ "failed to read native max address (err_mask=0x%x)\n",
+ err_mask);
if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
return -EACCES;
return -EIO;
@@ -1292,8 +1291,9 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (err_mask) {
- ata_dev_printk(dev, KERN_WARNING, "failed to set "
- "max address (err_mask=0x%x)\n", err_mask);
+ ata_dev_warn(dev,
+ "failed to set max address (err_mask=0x%x)\n",
+ err_mask);
if (err_mask == AC_ERR_DEV &&
(tf.feature & (ATA_ABORTED | ATA_IDNF)))
return -EACCES;
@@ -1336,8 +1336,8 @@ static int ata_hpa_resize(struct ata_device *dev)
* be unlocked, skip HPA resizing.
*/
if (rc == -EACCES || !unlock_hpa) {
- ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
- "broken, skipping HPA handling\n");
+ ata_dev_warn(dev,
+ "HPA support seems broken, skipping HPA handling\n");
dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
/* we can continue if device aborted the command */
@@ -1355,14 +1355,13 @@ static int ata_hpa_resize(struct ata_device *dev)
return 0;
if (native_sectors > sectors)
- ata_dev_printk(dev, KERN_INFO,
+ ata_dev_info(dev,
"HPA detected: current %llu, native %llu\n",
(unsigned long long)sectors,
(unsigned long long)native_sectors);
else if (native_sectors < sectors)
- ata_dev_printk(dev, KERN_WARNING,
- "native sectors (%llu) is smaller than "
- "sectors (%llu)\n",
+ ata_dev_warn(dev,
+ "native sectors (%llu) is smaller than sectors (%llu)\n",
(unsigned long long)native_sectors,
(unsigned long long)sectors);
return 0;
@@ -1372,10 +1371,10 @@ static int ata_hpa_resize(struct ata_device *dev)
rc = ata_set_max_sectors(dev, native_sectors);
if (rc == -EACCES) {
/* if device aborted the command, skip HPA resizing */
- ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
- "(%llu -> %llu), skipping HPA handling\n",
- (unsigned long long)sectors,
- (unsigned long long)native_sectors);
+ ata_dev_warn(dev,
+ "device aborted resize (%llu -> %llu), skipping HPA handling\n",
+ (unsigned long long)sectors,
+ (unsigned long long)native_sectors);
dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
return 0;
} else if (rc)
@@ -1384,14 +1383,14 @@ static int ata_hpa_resize(struct ata_device *dev)
/* re-read IDENTIFY data */
rc = ata_dev_reread_id(dev, 0);
if (rc) {
- ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
- "data after HPA resizing\n");
+ ata_dev_err(dev,
+ "failed to re-read IDENTIFY data after HPA resizing\n");
return rc;
}
if (print_info) {
u64 new_sectors = ata_id_n_sectors(dev->id);
- ata_dev_printk(dev, KERN_INFO,
+ ata_dev_info(dev,
"HPA unlocked: %llu -> %llu, native %llu\n",
(unsigned long long)sectors,
(unsigned long long)new_sectors,
@@ -1655,8 +1654,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
ata_qc_complete(qc);
if (ata_msg_warn(ap))
- ata_dev_printk(dev, KERN_WARNING,
- "qc timeout (cmd 0x%x)\n", command);
+ ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
+ command);
}
spin_unlock_irqrestore(ap->lock, flags);
@@ -1870,7 +1869,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
int rc;
if (ata_msg_ctl(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
+ ata_dev_dbg(dev, "%s: ENTER\n", __func__);
retry:
ata_tf_init(dev, &tf);
@@ -1909,14 +1908,13 @@ retry:
if (err_mask) {
if (err_mask & AC_ERR_NODEV_HINT) {
- ata_dev_printk(dev, KERN_DEBUG,
- "NODEV after polling detection\n");
+ ata_dev_dbg(dev, "NODEV after polling detection\n");
return -ENOENT;
}
if (is_semb) {
- ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on "
- "device w/ SEMB sig, disabled\n");
+ ata_dev_info(dev,
+ "IDENTIFY failed on device w/ SEMB sig, disabled\n");
/* SEMB is not supported yet */
*p_class = ATA_DEV_SEMB_UNSUP;
return 0;
@@ -1942,8 +1940,8 @@ retry:
* both flavors of IDENTIFYs which happens
* sometimes with phantom devices.
*/
- ata_dev_printk(dev, KERN_DEBUG,
- "both IDENTIFYs aborted, assuming NODEV\n");
+ ata_dev_dbg(dev,
+ "both IDENTIFYs aborted, assuming NODEV\n");
return -ENOENT;
}
@@ -1953,9 +1951,9 @@ retry:
}
if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
- ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
- "class=%d may_fallback=%d tried_spinup=%d\n",
- class, may_fallback, tried_spinup);
+ ata_dev_dbg(dev, "dumping IDENTIFY data, "
+ "class=%d may_fallback=%d tried_spinup=%d\n",
+ class, may_fallback, tried_spinup);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
}
@@ -2034,8 +2032,8 @@ retry:
err_out:
if (ata_msg_warn(ap))
- ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
- "(%s, err_mask=0x%x)\n", reason, err_mask);
+ ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
+ reason, err_mask);
return rc;
}
@@ -2065,9 +2063,8 @@ static int ata_do_link_spd_horkage(struct ata_device *dev)
* guaranteed by setting sata_spd_limit to target_limit above.
*/
if (plink->sata_spd > target) {
- ata_dev_printk(dev, KERN_INFO,
- "applying link speed limit horkage to %s\n",
- sata_spd_string(target));
+ ata_dev_info(dev, "applying link speed limit horkage to %s\n",
+ sata_spd_string(target));
return -EAGAIN;
}
return 0;
@@ -2110,8 +2107,9 @@ static int ata_dev_config_ncq(struct ata_device *dev,
err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
SATA_FPDMA_AA);
if (err_mask) {
- ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
- "(error_mask=0x%x)\n", err_mask);
+ ata_dev_err(dev,
+ "failed to enable AA (error_mask=0x%x)\n",
+ err_mask);
if (err_mask != AC_ERR_DEV) {
dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
return -EIO;
@@ -2154,31 +2152,28 @@ int ata_dev_configure(struct ata_device *dev)
int rc;
if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
- ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
- __func__);
+ ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
return 0;
}
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
+ ata_dev_dbg(dev, "%s: ENTER\n", __func__);
/* set horkage */
dev->horkage |= ata_dev_blacklisted(dev);
ata_force_horkage(dev);
if (dev->horkage & ATA_HORKAGE_DISABLE) {
- ata_dev_printk(dev, KERN_INFO,
- "unsupported device, disabling\n");
+ ata_dev_info(dev, "unsupported device, disabling\n");
ata_dev_disable(dev);
return 0;
}
if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
dev->class == ATA_DEV_ATAPI) {
- ata_dev_printk(dev, KERN_WARNING,
- "WARNING: ATAPI is %s, device ignored.\n",
- atapi_enabled ? "not supported with this driver"
- : "disabled");
+ ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
+ atapi_enabled ? "not supported with this driver"
+ : "disabled");
ata_dev_disable(dev);
return 0;
}
@@ -2199,12 +2194,12 @@ int ata_dev_configure(struct ata_device *dev)
/* print device capabilities */
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG,
- "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
- "85:%04x 86:%04x 87:%04x 88:%04x\n",
- __func__,
- id[49], id[82], id[83], id[84],
- id[85], id[86], id[87], id[88]);
+ ata_dev_dbg(dev,
+ "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
+ "85:%04x 86:%04x 87:%04x 88:%04x\n",
+ __func__,
+ id[49], id[82], id[83], id[84],
+ id[85], id[86], id[87], id[88]);
/* initialize to-be-configured parameters */
dev->flags &= ~ATA_DFLAG_CFG_MASK;
@@ -2238,17 +2233,15 @@ int ata_dev_configure(struct ata_device *dev)
if (ata_id_is_cfa(id)) {
/* CPRM may make this media unusable */
if (id[ATA_ID_CFA_KEY_MGMT] & 1)
- ata_dev_printk(dev, KERN_WARNING,
- "supports DRM functions and may "
- "not be fully accessible.\n");
+ ata_dev_warn(dev,
+ "supports DRM functions and may not be fully accessible\n");
snprintf(revbuf, 7, "CFA");
} else {
snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
/* Warn the user if the device has TPM extensions */
if (ata_id_has_tpm(id))
- ata_dev_printk(dev, KERN_WARNING,
- "supports DRM functions and may "
- "not be fully accessible.\n");
+ ata_dev_warn(dev,
+ "supports DRM functions and may not be fully accessible\n");
}
dev->n_sectors = ata_id_n_sectors(id);
@@ -2285,12 +2278,11 @@ int ata_dev_configure(struct ata_device *dev)
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info) {
- ata_dev_printk(dev, KERN_INFO,
- "%s: %s, %s, max %s\n",
- revbuf, modelbuf, fwrevbuf,
- ata_mode_string(xfer_mask));
- ata_dev_printk(dev, KERN_INFO,
- "%Lu sectors, multi %u: %s %s\n",
+ ata_dev_info(dev, "%s: %s, %s, max %s\n",
+ revbuf, modelbuf, fwrevbuf,
+ ata_mode_string(xfer_mask));
+ ata_dev_info(dev,
+ "%llu sectors, multi %u: %s %s\n",
(unsigned long long)dev->n_sectors,
dev->multi_count, lba_desc, ncq_desc);
}
@@ -2311,15 +2303,14 @@ int ata_dev_configure(struct ata_device *dev)
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info) {
- ata_dev_printk(dev, KERN_INFO,
- "%s: %s, %s, max %s\n",
- revbuf, modelbuf, fwrevbuf,
- ata_mode_string(xfer_mask));
- ata_dev_printk(dev, KERN_INFO,
- "%Lu sectors, multi %u, CHS %u/%u/%u\n",
- (unsigned long long)dev->n_sectors,
- dev->multi_count, dev->cylinders,
- dev->heads, dev->sectors);
+ ata_dev_info(dev, "%s: %s, %s, max %s\n",
+ revbuf, modelbuf, fwrevbuf,
+ ata_mode_string(xfer_mask));
+ ata_dev_info(dev,
+ "%llu sectors, multi %u, CHS %u/%u/%u\n",
+ (unsigned long long)dev->n_sectors,
+ dev->multi_count, dev->cylinders,
+ dev->heads, dev->sectors);
}
}
@@ -2336,8 +2327,7 @@ int ata_dev_configure(struct ata_device *dev)
rc = atapi_cdb_len(id);
if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
if (ata_msg_warn(ap))
- ata_dev_printk(dev, KERN_WARNING,
- "unsupported CDB len\n");
+ ata_dev_warn(dev, "unsupported CDB len\n");
rc = -EINVAL;
goto err_out_nosup;
}
@@ -2358,9 +2348,9 @@ int ata_dev_configure(struct ata_device *dev)
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_AN);
if (err_mask)
- ata_dev_printk(dev, KERN_ERR,
- "failed to enable ATAPI AN "
- "(err_mask=0x%x)\n", err_mask);
+ ata_dev_err(dev,
+ "failed to enable ATAPI AN (err_mask=0x%x)\n",
+ err_mask);
else {
dev->flags |= ATA_DFLAG_AN;
atapi_an_string = ", ATAPI AN";
@@ -2379,12 +2369,12 @@ int ata_dev_configure(struct ata_device *dev)
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info)
- ata_dev_printk(dev, KERN_INFO,
- "ATAPI: %s, %s, max %s%s%s%s\n",
- modelbuf, fwrevbuf,
- ata_mode_string(xfer_mask),
- cdb_intr_string, atapi_an_string,
- dma_dir_string);
+ ata_dev_info(dev,
+ "ATAPI: %s, %s, max %s%s%s%s\n",
+ modelbuf, fwrevbuf,
+ ata_mode_string(xfer_mask),
+ cdb_intr_string, atapi_an_string,
+ dma_dir_string);
}
/* determine max_sectors */
@@ -2396,8 +2386,7 @@ int ata_dev_configure(struct ata_device *dev)
200 sectors */
if (ata_dev_knobble(dev)) {
if (ata_msg_drv(ap) && print_info)
- ata_dev_printk(dev, KERN_INFO,
- "applying bridge limits\n");
+ ata_dev_info(dev, "applying bridge limits\n");
dev->udma_mask &= ATA_UDMA5;
dev->max_sectors = ATA_MAX_SECTORS;
}
@@ -2423,26 +2412,23 @@ int ata_dev_configure(struct ata_device *dev)
bugs */
if (print_info) {
- ata_dev_printk(dev, KERN_WARNING,
+ ata_dev_warn(dev,
"Drive reports diagnostics failure. This may indicate a drive\n");
- ata_dev_printk(dev, KERN_WARNING,
+ ata_dev_warn(dev,
"fault or invalid emulation. Contact drive vendor for information.\n");
}
}
if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
- ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
- "firmware update to be fully functional.\n");
- ata_dev_printk(dev, KERN_WARNING, " contact the vendor "
- "or visit http://ata.wiki.kernel.org.\n");
+ ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
+ ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
}
return 0;
err_out_nosup:
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG,
- "%s: EXIT, err\n", __func__);
+ ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
return rc;
}
@@ -2663,13 +2649,11 @@ static void sata_print_link_status(struct ata_link *link)
if (ata_phys_link_online(link)) {
tmp = (sstatus >> 4) & 0xf;
- ata_link_printk(link, KERN_INFO,
- "SATA link up %s (SStatus %X SControl %X)\n",
- sata_spd_string(tmp), sstatus, scontrol);
+ ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
+ sata_spd_string(tmp), sstatus, scontrol);
} else {
- ata_link_printk(link, KERN_INFO,
- "SATA link down (SStatus %X SControl %X)\n",
- sstatus, scontrol);
+ ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
+ sstatus, scontrol);
}
}
@@ -2758,8 +2742,8 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
link->sata_spd_limit = mask;
- ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
- sata_spd_string(fls(mask)));
+ ata_link_warn(link, "limiting SATA link speed to %s\n",
+ sata_spd_string(fls(mask)));
return 0;
}
@@ -3136,8 +3120,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
snprintf(buf, sizeof(buf), "%s",
ata_mode_string(xfer_mask));
- ata_dev_printk(dev, KERN_WARNING,
- "limiting speed to %s\n", buf);
+ ata_dev_warn(dev, "limiting speed to %s\n", buf);
}
ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
@@ -3164,9 +3147,9 @@ static int ata_dev_set_mode(struct ata_device *dev)
dev_err_whine = " (SET_XFERMODE skipped)";
else {
if (nosetxfer)
- ata_dev_printk(dev, KERN_WARNING,
- "NOSETXFER but PATA detected - can't "
- "skip SETXFER, might malfunction\n");
+ ata_dev_warn(dev,
+ "NOSETXFER but PATA detected - can't "
+ "skip SETXFER, might malfunction\n");
err_mask = ata_dev_set_xfermode(dev);
}
@@ -3216,15 +3199,14 @@ static int ata_dev_set_mode(struct ata_device *dev)
DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
dev->xfer_shift, (int)dev->xfer_mode);
- ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
- ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
- dev_err_whine);
+ ata_dev_info(dev, "configured for %s%s\n",
+ ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
+ dev_err_whine);
return 0;
fail:
- ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
- "(err_mask=0x%x)\n", err_mask);
+ ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
return -EIO;
}
@@ -3286,7 +3268,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
/* step 2: always set host PIO timings */
ata_for_each_dev(dev, link, ENABLED) {
if (dev->pio_mode == 0xff) {
- ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
+ ata_dev_warn(dev, "no PIO support\n");
rc = -EINVAL;
goto out;
}
@@ -3404,7 +3386,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
if (!warned && time_after(now, start + 5 * HZ) &&
(deadline - now > 3 * HZ)) {
- ata_link_printk(link, KERN_WARNING,
+ ata_link_warn(link,
"link is slow to respond, please be patient "
"(ready=%d)\n", tmp);
warned = 1;
@@ -3552,16 +3534,14 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params,
} while ((scontrol & 0xf0f) != 0x300 && --tries);
if ((scontrol & 0xf0f) != 0x300) {
- ata_link_printk(link, KERN_ERR,
- "failed to resume link (SControl %X)\n",
- scontrol);
+ ata_link_warn(link, "failed to resume link (SControl %X)\n",
+ scontrol);
return 0;
}
if (tries < ATA_LINK_RESUME_TRIES)
- ata_link_printk(link, KERN_WARNING,
- "link resume succeeded after %d retries\n",
- ATA_LINK_RESUME_TRIES - tries);
+ ata_link_warn(link, "link resume succeeded after %d retries\n",
+ ATA_LINK_RESUME_TRIES - tries);
if ((rc = sata_link_debounce(link, params, deadline)))
return rc;
@@ -3678,8 +3658,9 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
rc = sata_link_resume(link, timing, deadline);
/* whine about phy resume failure but proceed */
if (rc && rc != -EOPNOTSUPP)
- ata_link_printk(link, KERN_WARNING, "failed to resume "
- "link for reset (errno=%d)\n", rc);
+ ata_link_warn(link,
+ "failed to resume link for reset (errno=%d)\n",
+ rc);
}
/* no point in trying softreset on offline link */
@@ -3795,8 +3776,7 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
/* online is set iff link is online && reset succeeded */
if (online)
*online = false;
- ata_link_printk(link, KERN_ERR,
- "COMRESET failed (errno=%d)\n", rc);
+ ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
}
DPRINTK("EXIT, rc=%d\n", rc);
return rc;
@@ -3880,8 +3860,8 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
if (dev->class != new_class) {
- ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
- dev->class, new_class);
+ ata_dev_info(dev, "class mismatch %d != %d\n",
+ dev->class, new_class);
return 0;
}
@@ -3891,14 +3871,14 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
if (strcmp(model[0], model[1])) {
- ata_dev_printk(dev, KERN_INFO, "model number mismatch "
- "'%s' != '%s'\n", model[0], model[1]);
+ ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
+ model[0], model[1]);
return 0;
}
if (strcmp(serial[0], serial[1])) {
- ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
- "'%s' != '%s'\n", serial[0], serial[1]);
+ ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
+ serial[0], serial[1]);
return 0;
}
@@ -3968,8 +3948,8 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
new_class != ATA_DEV_ATA &&
new_class != ATA_DEV_ATAPI &&
new_class != ATA_DEV_SEMB) {
- ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
- dev->class, new_class);
+ ata_dev_info(dev, "class mismatch %u != %u\n",
+ dev->class, new_class);
rc = -ENODEV;
goto fail;
}
@@ -3990,9 +3970,9 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
return 0;
/* n_sectors has changed */
- ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n",
- (unsigned long long)n_sectors,
- (unsigned long long)dev->n_sectors);
+ ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
+ (unsigned long long)n_sectors,
+ (unsigned long long)dev->n_sectors);
/*
* Something could have caused HPA to be unlocked
@@ -4001,9 +3981,9 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
*/
if (dev->n_native_sectors == n_native_sectors &&
dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
- ata_dev_printk(dev, KERN_WARNING,
- "new n_sectors matches native, probably "
- "late HPA unlock, n_sectors updated\n");
+ ata_dev_warn(dev,
+ "new n_sectors matches native, probably "
+ "late HPA unlock, n_sectors updated\n");
/* use the larger n_sectors */
return 0;
}
@@ -4017,9 +3997,9 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
if (dev->n_native_sectors == n_native_sectors &&
dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
!(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
- ata_dev_printk(dev, KERN_WARNING,
- "old n_sectors matches native, probably "
- "late HPA lock, will try to unlock HPA\n");
+ ata_dev_warn(dev,
+ "old n_sectors matches native, probably "
+ "late HPA lock, will try to unlock HPA\n");
/* try unlocking HPA */
dev->flags |= ATA_DFLAG_UNLOCK_HPA;
rc = -EIO;
@@ -4030,7 +4010,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
dev->n_native_sectors = n_native_sectors;
dev->n_sectors = n_sectors;
fail:
- ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
+ ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
return rc;
}
@@ -4143,9 +4123,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* Devices which choke on SETXFER. Applies only if both the
* device and controller are SATA.
*/
- { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER },
- { "PIONEER DVD-RW DVR-216D", "1.08", ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* End Marker */
{ }
@@ -4358,15 +4338,15 @@ static void ata_dev_xfermask(struct ata_device *dev)
if (ata_dma_blacklisted(dev)) {
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
- ata_dev_printk(dev, KERN_WARNING,
- "device is on DMA blacklist, disabling DMA\n");
+ ata_dev_warn(dev,
+ "device is on DMA blacklist, disabling DMA\n");
}
if ((host->flags & ATA_HOST_SIMPLEX) &&
host->simplex_claimed && host->simplex_claimed != ap) {
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
- ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
- "other device, disabling DMA\n");
+ ata_dev_warn(dev,
+ "simplex DMA is claimed by other device, disabling DMA\n");
}
if (ap->flags & ATA_FLAG_NO_IORDY)
@@ -4386,8 +4366,8 @@ static void ata_dev_xfermask(struct ata_device *dev)
if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
/* UDMA/44 or higher would be available */
if (cable_is_40wire(ap)) {
- ata_dev_printk(dev, KERN_WARNING,
- "limited to UDMA/33 due to 40-wire cable\n");
+ ata_dev_warn(dev,
+ "limited to UDMA/33 due to 40-wire cable\n");
xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
}
@@ -4954,8 +4934,8 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
done_mask = ap->qc_active ^ qc_active;
if (unlikely(done_mask & qc_active)) {
- ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
- "(%08x->%08x)\n", ap->qc_active, qc_active);
+ ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
+ ap->qc_active, qc_active);
return -EINVAL;
}
@@ -5847,9 +5827,9 @@ int ata_host_start(struct ata_host *host)
rc = ap->ops->port_start(ap);
if (rc) {
if (rc != -ENODEV)
- dev_printk(KERN_ERR, host->dev,
- "failed to start port %d "
- "(errno=%d)\n", i, rc);
+ dev_err(host->dev,
+ "failed to start port %d (errno=%d)\n",
+ i, rc);
goto err_out;
}
}
@@ -5971,8 +5951,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
/* host must have been started */
if (!(host->flags & ATA_HOST_STARTED)) {
- dev_printk(KERN_ERR, host->dev,
- "BUG: trying to register unstarted host\n");
+ dev_err(host->dev, "BUG: trying to register unstarted host\n");
WARN_ON(1);
return -EINVAL;
}
@@ -6023,14 +6002,13 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
ap->udma_mask);
if (!ata_port_is_dummy(ap)) {
- ata_port_printk(ap, KERN_INFO,
- "%cATA max %s %s\n",
- (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
- ata_mode_string(xfer_mask),
- ap->link.eh_info.desc);
+ ata_port_info(ap, "%cATA max %s %s\n",
+ (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
+ ata_mode_string(xfer_mask),
+ ap->link.eh_info.desc);
ata_ehi_clear_desc(&ap->link.eh_info);
} else
- ata_port_printk(ap, KERN_INFO, "DUMMY\n");
+ ata_port_info(ap, "DUMMY\n");
}
/* perform each probe asynchronously */
@@ -6242,8 +6220,8 @@ int ata_pci_device_do_resume(struct pci_dev *pdev)
rc = pcim_enable_device(pdev);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to enable device after resume (%d)\n", rc);
+ dev_err(&pdev->dev,
+ "failed to enable device after resume (%d)\n", rc);
return rc;
}
@@ -6600,6 +6578,82 @@ const struct ata_port_info ata_dummy_port_info = {
};
/*
+ * Utility print functions
+ */
+int ata_port_printk(const struct ata_port *ap, const char *level,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(ata_port_printk);
+
+int ata_link_printk(const struct ata_link *link, const char *level,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (sata_pmp_attached(link->ap) || link->ap->slave_link)
+ r = printk("%sata%u.%02u: %pV",
+ level, link->ap->print_id, link->pmp, &vaf);
+ else
+ r = printk("%sata%u: %pV",
+ level, link->ap->print_id, &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(ata_link_printk);
+
+int ata_dev_printk(const struct ata_device *dev, const char *level,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ r = printk("%sata%u.%02u: %pV",
+ level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
+ &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(ata_dev_printk);
+
+void ata_print_version(const struct device *dev, const char *version)
+{
+ dev_printk(KERN_DEBUG, dev, "version %s\n", version);
+}
+EXPORT_SYMBOL(ata_print_version);
+
+/*
* libata is essentially a library of internal helper functions for
* low-level ATA host controller drivers. As such, the API/ABI is
* likely to change as new drivers are added and updated.
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7f099d6e4e0..ed16fbedaab 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -782,8 +782,9 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
goto repeat;
}
- ata_port_printk(ap, KERN_ERR, "EH pending after %d "
- "tries, giving up\n", ATA_EH_MAX_TRIES);
+ ata_port_err(ap,
+ "EH pending after %d tries, giving up\n",
+ ATA_EH_MAX_TRIES);
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
}
@@ -816,7 +817,7 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
schedule_delayed_work(&ap->hotplug_task, 0);
if (ap->pflags & ATA_PFLAG_RECOVERED)
- ata_port_printk(ap, KERN_INFO, "EH complete\n");
+ ata_port_info(ap, "EH complete\n");
ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
@@ -1310,7 +1311,7 @@ void ata_dev_disable(struct ata_device *dev)
return;
if (ata_msg_drv(dev->link->ap))
- ata_dev_printk(dev, KERN_WARNING, "disabled\n");
+ ata_dev_warn(dev, "disabled\n");
ata_acpi_on_disable(dev);
ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
dev->class++;
@@ -1515,8 +1516,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
for (i = 0; i < ATA_SECT_SIZE; i++)
csum += buf[i];
if (csum)
- ata_dev_printk(dev, KERN_WARNING,
- "invalid checksum 0x%x on log page 10h\n", csum);
+ ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
+ csum);
if (buf[0] & 0x80)
return -ENOENT;
@@ -1716,14 +1717,14 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memset(&tf, 0, sizeof(tf));
rc = ata_eh_read_log_10h(dev, &tag, &tf);
if (rc) {
- ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
- "(errno=%d)\n", rc);
+ ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
+ rc);
return;
}
if (!(link->sactive & (1 << tag))) {
- ata_link_printk(link, KERN_ERR, "log page 10h reported "
- "inactive tag %d\n", tag);
+ ata_link_err(link, "log page 10h reported inactive tag %d\n",
+ tag);
return;
}
@@ -1988,8 +1989,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev,
(dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
dev->flags |= ATA_DFLAG_NCQ_OFF;
- ata_dev_printk(dev, KERN_WARNING,
- "NCQ disabled due to excessive errors\n");
+ ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
goto done;
}
@@ -2374,24 +2374,24 @@ static void ata_eh_link_report(struct ata_link *link)
ap->eh_tries);
if (ehc->i.dev) {
- ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
- "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
- ehc->i.err_mask, link->sactive, ehc->i.serror,
- ehc->i.action, frozen, tries_buf);
+ ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
+ "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+ ehc->i.err_mask, link->sactive, ehc->i.serror,
+ ehc->i.action, frozen, tries_buf);
if (desc)
- ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
+ ata_dev_err(ehc->i.dev, "%s\n", desc);
} else {
- ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
- "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
- ehc->i.err_mask, link->sactive, ehc->i.serror,
- ehc->i.action, frozen, tries_buf);
+ ata_link_err(link, "exception Emask 0x%x "
+ "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+ ehc->i.err_mask, link->sactive, ehc->i.serror,
+ ehc->i.action, frozen, tries_buf);
if (desc)
- ata_link_printk(link, KERN_ERR, "%s\n", desc);
+ ata_link_err(link, "%s\n", desc);
}
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (ehc->i.serror)
- ata_link_printk(link, KERN_ERR,
+ ata_link_err(link,
"SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
@@ -2456,11 +2456,11 @@ static void ata_eh_link_report(struct ata_link *link)
} else {
const char *descr = ata_get_cmd_descript(cmd->command);
if (descr)
- ata_dev_printk(qc->dev, KERN_ERR,
- "failed command: %s\n", descr);
+ ata_dev_err(qc->dev, "failed command: %s\n",
+ descr);
}
- ata_dev_printk(qc->dev, KERN_ERR,
+ ata_dev_err(qc->dev,
"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
"tag %d%s\n %s"
"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
@@ -2481,11 +2481,9 @@ static void ata_eh_link_report(struct ata_link *link)
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
ATA_ERR)) {
if (res->command & ATA_BUSY)
- ata_dev_printk(qc->dev, KERN_ERR,
- "status: { Busy }\n");
+ ata_dev_err(qc->dev, "status: { Busy }\n");
else
- ata_dev_printk(qc->dev, KERN_ERR,
- "status: { %s%s%s%s}\n",
+ ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "",
@@ -2495,8 +2493,7 @@ static void ata_eh_link_report(struct ata_link *link)
if (cmd->command != ATA_CMD_PACKET &&
(res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
ATA_ABORTED)))
- ata_dev_printk(qc->dev, KERN_ERR,
- "error: { %s%s%s%s}\n",
+ ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
res->feature & ATA_ICRC ? "ICRC " : "",
res->feature & ATA_UNC ? "UNC " : "",
res->feature & ATA_IDNF ? "IDNF " : "",
@@ -2650,8 +2647,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
if (rc) {
if (rc == -ENOENT) {
- ata_link_printk(link, KERN_DEBUG,
- "port disabled. ignoring.\n");
+ ata_link_dbg(link, "port disabled--ignoring\n");
ehc->i.action &= ~ATA_EH_RESET;
ata_for_each_dev(dev, link, ALL)
@@ -2659,8 +2655,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
rc = 0;
} else
- ata_link_printk(link, KERN_ERR,
- "prereset failed (errno=%d)\n", rc);
+ ata_link_err(link,
+ "prereset failed (errno=%d)\n",
+ rc);
goto out;
}
@@ -2689,8 +2686,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
if (reset) {
if (verbose)
- ata_link_printk(link, KERN_INFO, "%s resetting link\n",
- reset == softreset ? "soft" : "hard");
+ ata_link_info(link, "%s resetting link\n",
+ reset == softreset ? "soft" : "hard");
/* mark that this EH session started with reset */
ehc->last_reset = jiffies;
@@ -2710,8 +2707,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
int tmp;
if (verbose)
- ata_link_printk(slave, KERN_INFO,
- "hard resetting link\n");
+ ata_link_info(slave, "hard resetting link\n");
ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
tmp = ata_do_reset(slave, reset, classes, deadline,
@@ -2734,9 +2730,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
reset = softreset;
if (!reset) {
- ata_link_printk(link, KERN_ERR,
- "follow-up softreset required "
- "but no softreset available\n");
+ ata_link_err(link,
+ "follow-up softreset required but no softreset available\n");
failed_link = link;
rc = -EINVAL;
goto fail;
@@ -2751,8 +2746,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
}
} else {
if (verbose)
- ata_link_printk(link, KERN_INFO, "no reset method "
- "available, skipping reset\n");
+ ata_link_info(link,
+ "no reset method available, skipping reset\n");
if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
lflags |= ATA_LFLAG_ASSUME_ATA;
}
@@ -2830,36 +2825,35 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_for_each_dev(dev, link, ALL) {
if (ata_phys_link_online(ata_dev_phys_link(dev))) {
if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
- ata_dev_printk(dev, KERN_DEBUG, "link online "
- "but device misclassifed\n");
+ ata_dev_dbg(dev, "link online but device misclassified\n");
classes[dev->devno] = ATA_DEV_NONE;
nr_unknown++;
}
} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
if (ata_class_enabled(classes[dev->devno]))
- ata_dev_printk(dev, KERN_DEBUG, "link offline, "
- "clearing class %d to NONE\n",
- classes[dev->devno]);
+ ata_dev_dbg(dev,
+ "link offline, clearing class %d to NONE\n",
+ classes[dev->devno]);
classes[dev->devno] = ATA_DEV_NONE;
} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
- ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
- "clearing UNKNOWN to NONE\n");
+ ata_dev_dbg(dev,
+ "link status unknown, clearing UNKNOWN to NONE\n");
classes[dev->devno] = ATA_DEV_NONE;
}
}
if (classify && nr_unknown) {
if (try < max_tries) {
- ata_link_printk(link, KERN_WARNING, "link online but "
- "%d devices misclassified, retrying\n",
- nr_unknown);
+ ata_link_warn(link,
+ "link online but %d devices misclassified, retrying\n",
+ nr_unknown);
failed_link = link;
rc = -EAGAIN;
goto fail;
}
- ata_link_printk(link, KERN_WARNING,
- "link online but %d devices misclassified, "
- "device detection might fail\n", nr_unknown);
+ ata_link_warn(link,
+ "link online but %d devices misclassified, "
+ "device detection might fail\n", nr_unknown);
}
/* reset successful, schedule revalidation */
@@ -2889,14 +2883,23 @@ int ata_eh_reset(struct ata_link *link, int classify,
sata_scr_read(link, SCR_STATUS, &sstatus))
rc = -ERESTART;
- if (rc == -ERESTART || try >= max_tries)
+ if (rc == -ERESTART || try >= max_tries) {
+ /*
+ * Thaw host port even if reset failed, so that the port
+ * can be retried on the next phy event. This risks
+ * repeated EH runs but seems to be a better tradeoff than
+ * shutting down a port after a botched hotplug attempt.
+ */
+ if (ata_is_host_link(link))
+ ata_eh_thaw_port(ap);
goto out;
+ }
now = jiffies;
if (time_before(now, deadline)) {
unsigned long delta = deadline - now;
- ata_link_printk(failed_link, KERN_WARNING,
+ ata_link_warn(failed_link,
"reset failed (errno=%d), retrying in %u secs\n",
rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
@@ -2987,7 +2990,7 @@ static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
tf.protocol |= ATA_PROT_NODATA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (park && (err_mask || tf.lbal != 0xc4)) {
- ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
+ ata_dev_err(dev, "head unload failed!\n");
ehc->unloaded_mask &= ~(1 << dev->devno);
}
}
@@ -3198,8 +3201,9 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
err_mask = atapi_eh_tur(dev, &sense_key);
if (err_mask != 0 && err_mask != AC_ERR_DEV) {
- ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
- "failed (err_mask=0x%x)\n", err_mask);
+ ata_dev_warn(dev,
+ "TEST_UNIT_READY failed (err_mask=0x%x)\n",
+ err_mask);
return -EIO;
}
@@ -3208,14 +3212,14 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
if (err_mask) {
- ata_dev_printk(dev, KERN_WARNING, "failed to clear "
+ ata_dev_warn(dev, "failed to clear "
"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
return -EIO;
}
}
- ata_dev_printk(dev, KERN_WARNING,
- "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
+ ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
+ ATA_EH_UA_TRIES);
return 0;
}
@@ -3266,7 +3270,7 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
tf.flags |= ATA_TFLAG_DEVICE;
tf.protocol = ATA_PROT_NODATA;
- ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
+ ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
tf.command, qc->err_mask);
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
@@ -3281,7 +3285,7 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
*/
qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
} else {
- ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
+ ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
err_mask);
rc = -EIO;
@@ -3355,9 +3359,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_DISABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_printk(dev, KERN_WARNING,
- "failed to disable DIPM, Emask 0x%x\n",
- err_mask);
+ ata_dev_warn(dev,
+ "failed to disable DIPM, Emask 0x%x\n",
+ err_mask);
rc = -EIO;
goto fail;
}
@@ -3399,7 +3403,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_printk(dev, KERN_WARNING,
+ ata_dev_warn(dev,
"failed to enable DIPM, Emask 0x%x\n",
err_mask);
rc = -EIO;
@@ -3418,8 +3422,7 @@ fail:
/* if no device or only one more chance is left, disable LPM */
if (!dev || ehc->tries[dev->devno] <= 2) {
- ata_link_printk(link, KERN_WARNING,
- "disabling LPM on the link\n");
+ ata_link_warn(link, "disabling LPM on the link\n");
link->flags |= ATA_LFLAG_NO_LPM;
}
if (r_failed_dev)
@@ -3690,8 +3693,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
rc = ata_eh_reset(link, ata_link_nr_vacant(link),
prereset, softreset, hardreset, postreset);
if (rc) {
- ata_link_printk(link, KERN_ERR,
- "reset failed, giving up\n");
+ ata_link_err(link, "reset failed, giving up\n");
goto out;
}
}
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index f06b7ea590d..3eb2b816eb2 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -147,8 +147,8 @@ int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *r_val)
err_mask = sata_pmp_read(link, reg, r_val);
if (err_mask) {
- ata_link_printk(link, KERN_WARNING, "failed to read SCR %d "
- "(Emask=0x%x)\n", reg, err_mask);
+ ata_link_warn(link, "failed to read SCR %d (Emask=0x%x)\n",
+ reg, err_mask);
return -EIO;
}
return 0;
@@ -178,8 +178,8 @@ int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
err_mask = sata_pmp_write(link, reg, val);
if (err_mask) {
- ata_link_printk(link, KERN_WARNING, "failed to write SCR %d "
- "(Emask=0x%x)\n", reg, err_mask);
+ ata_link_warn(link, "failed to write SCR %d (Emask=0x%x)\n",
+ reg, err_mask);
return -EIO;
}
return 0;
@@ -231,8 +231,8 @@ static int sata_pmp_read_gscr(struct ata_device *dev, u32 *gscr)
err_mask = sata_pmp_read(dev->link, reg, &gscr[reg]);
if (err_mask) {
- ata_dev_printk(dev, KERN_ERR, "failed to read PMP "
- "GSCR[%d] (Emask=0x%x)\n", reg, err_mask);
+ ata_dev_err(dev, "failed to read PMP GSCR[%d] (Emask=0x%x)\n",
+ reg, err_mask);
return -EIO;
}
}
@@ -311,26 +311,25 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
}
if (print_info) {
- ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
- "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
- sata_pmp_spec_rev_str(gscr), vendor, devid,
- sata_pmp_gscr_rev(gscr),
- nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
- gscr[SATA_PMP_GSCR_FEAT]);
+ ata_dev_info(dev, "Port Multiplier %s, "
+ "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
+ sata_pmp_spec_rev_str(gscr), vendor, devid,
+ sata_pmp_gscr_rev(gscr),
+ nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
+ gscr[SATA_PMP_GSCR_FEAT]);
if (!(dev->flags & ATA_DFLAG_AN))
- ata_dev_printk(dev, KERN_INFO,
+ ata_dev_info(dev,
"Asynchronous notification not supported, "
- "hotplug won't\n work on fan-out "
- "ports. Use warm-plug instead.\n");
+ "hotplug won't work on fan-out ports. Use warm-plug instead.\n");
}
return 0;
fail:
- ata_dev_printk(dev, KERN_ERR,
- "failed to configure Port Multiplier (%s, Emask=0x%x)\n",
- reason, err_mask);
+ ata_dev_err(dev,
+ "failed to configure Port Multiplier (%s, Emask=0x%x)\n",
+ reason, err_mask);
return rc;
}
@@ -485,20 +484,17 @@ int sata_pmp_attach(struct ata_device *dev)
/* is it hanging off the right place? */
if (!sata_pmp_supported(ap)) {
- ata_dev_printk(dev, KERN_ERR,
- "host does not support Port Multiplier\n");
+ ata_dev_err(dev, "host does not support Port Multiplier\n");
return -EINVAL;
}
if (!ata_is_host_link(link)) {
- ata_dev_printk(dev, KERN_ERR,
- "Port Multipliers cannot be nested\n");
+ ata_dev_err(dev, "Port Multipliers cannot be nested\n");
return -EINVAL;
}
if (dev->devno) {
- ata_dev_printk(dev, KERN_ERR,
- "Port Multiplier must be the first device\n");
+ ata_dev_err(dev, "Port Multiplier must be the first device\n");
return -EINVAL;
}
@@ -517,8 +513,7 @@ int sata_pmp_attach(struct ata_device *dev)
rc = sata_pmp_init_links(ap, sata_pmp_gscr_ports(dev->gscr));
if (rc) {
- ata_dev_printk(dev, KERN_INFO,
- "failed to initialize PMP links\n");
+ ata_dev_info(dev, "failed to initialize PMP links\n");
goto fail;
}
@@ -562,7 +557,7 @@ static void sata_pmp_detach(struct ata_device *dev)
struct ata_link *tlink;
unsigned long flags;
- ata_dev_printk(dev, KERN_INFO, "Port Multiplier detaching\n");
+ ata_dev_info(dev, "Port Multiplier detaching\n");
WARN_ON(!ata_is_host_link(link) || dev->devno ||
link->pmp != SATA_PMP_CTRL_PORT);
@@ -609,23 +604,23 @@ static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr)
new_nr_ports = sata_pmp_gscr_ports(new_gscr);
if (old_vendor != new_vendor) {
- ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
- "vendor mismatch '0x%x' != '0x%x'\n",
- old_vendor, new_vendor);
+ ata_dev_info(dev,
+ "Port Multiplier vendor mismatch '0x%x' != '0x%x'\n",
+ old_vendor, new_vendor);
return 0;
}
if (old_devid != new_devid) {
- ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
- "device ID mismatch '0x%x' != '0x%x'\n",
- old_devid, new_devid);
+ ata_dev_info(dev,
+ "Port Multiplier device ID mismatch '0x%x' != '0x%x'\n",
+ old_devid, new_devid);
return 0;
}
if (old_nr_ports != new_nr_ports) {
- ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
- "nr_ports mismatch '0x%x' != '0x%x'\n",
- old_nr_ports, new_nr_ports);
+ ata_dev_info(dev,
+ "Port Multiplier nr_ports mismatch '0x%x' != '0x%x'\n",
+ old_nr_ports, new_nr_ports);
return 0;
}
@@ -691,8 +686,7 @@ static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
return 0;
fail:
- ata_dev_printk(dev, KERN_ERR,
- "PMP revalidation failed (errno=%d)\n", rc);
+ ata_dev_err(dev, "PMP revalidation failed (errno=%d)\n", rc);
DPRINTK("EXIT, rc=%d\n", rc);
return rc;
}
@@ -716,13 +710,14 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev)
err_mask = sata_pmp_read(dev->link, SATA_PMP_GSCR_PROD_ID, &prod_id);
if (err_mask) {
- ata_dev_printk(dev, KERN_ERR, "failed to read PMP product ID "
- "(Emask=0x%x)\n", err_mask);
+ ata_dev_err(dev,
+ "failed to read PMP product ID (Emask=0x%x)\n",
+ err_mask);
return -EIO;
}
if (prod_id != dev->gscr[SATA_PMP_GSCR_PROD_ID]) {
- ata_dev_printk(dev, KERN_ERR, "PMP product ID mismatch\n");
+ ata_dev_err(dev, "PMP product ID mismatch\n");
/* something weird is going on, request full PMP recovery */
return -EIO;
}
@@ -777,8 +772,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
postreset);
if (rc) {
- ata_link_printk(link, KERN_ERR,
- "failed to reset PMP, giving up\n");
+ ata_link_err(link, "failed to reset PMP, giving up\n");
goto fail;
}
@@ -819,9 +813,9 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
ehc->i.action |= ATA_EH_RESET;
goto retry;
} else {
- ata_dev_printk(dev, KERN_ERR, "failed to recover PMP "
- "after %d tries, giving up\n",
- ATA_EH_PMP_TRIES);
+ ata_dev_err(dev,
+ "failed to recover PMP after %d tries, giving up\n",
+ ATA_EH_PMP_TRIES);
goto fail;
}
}
@@ -867,8 +861,9 @@ static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap)
/* unconditionally clear SError.N */
rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
if (rc) {
- ata_link_printk(link, KERN_ERR, "failed to clear "
- "SError.N (errno=%d)\n", rc);
+ ata_link_err(link,
+ "failed to clear SError.N (errno=%d)\n",
+ rc);
return rc;
}
@@ -890,7 +885,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
/* disable this link */
if (!(link->flags & ATA_LFLAG_DISABLED)) {
- ata_link_printk(link, KERN_WARNING,
+ ata_link_warn(link,
"failed to recover link after %d tries, disabling\n",
ATA_EH_PMP_LINK_TRIES);
@@ -974,7 +969,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
gscr[SATA_PMP_GSCR_FEAT_EN]);
if (err_mask) {
- ata_link_printk(pmp_link, KERN_WARNING,
+ ata_link_warn(pmp_link,
"failed to disable NOTIFY (err_mask=0x%x)\n",
err_mask);
goto pmp_fail;
@@ -1018,8 +1013,9 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
gscr[SATA_PMP_GSCR_FEAT_EN]);
if (err_mask) {
- ata_dev_printk(pmp_dev, KERN_ERR, "failed to write "
- "PMP_FEAT_EN (Emask=0x%x)\n", err_mask);
+ ata_dev_err(pmp_dev,
+ "failed to write PMP_FEAT_EN (Emask=0x%x)\n",
+ err_mask);
rc = -EIO;
goto pmp_fail;
}
@@ -1028,8 +1024,9 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
/* check GSCR_ERROR */
err_mask = sata_pmp_read(pmp_link, SATA_PMP_GSCR_ERROR, &gscr_error);
if (err_mask) {
- ata_dev_printk(pmp_dev, KERN_ERR, "failed to read "
- "PMP_GSCR_ERROR (Emask=0x%x)\n", err_mask);
+ ata_dev_err(pmp_dev,
+ "failed to read PMP_GSCR_ERROR (Emask=0x%x)\n",
+ err_mask);
rc = -EIO;
goto pmp_fail;
}
@@ -1043,17 +1040,16 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
ata_ehi_hotplugged(&link->eh_context.i);
cnt++;
} else {
- ata_link_printk(link, KERN_WARNING,
- "PHY status changed but maxed out on retries, "
- "giving up\n");
- ata_link_printk(link, KERN_WARNING,
- "Manully issue scan to resume this link\n");
+ ata_link_warn(link,
+ "PHY status changed but maxed out on retries, giving up\n");
+ ata_link_warn(link,
+ "Manually issue scan to resume this link\n");
}
}
if (cnt) {
- ata_port_printk(ap, KERN_INFO, "PMP SError.N set for some "
- "ports, repeating recovery\n");
+ ata_port_info(ap,
+ "PMP SError.N set for some ports, repeating recovery\n");
goto retry;
}
@@ -1081,9 +1077,8 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
goto retry;
}
- ata_port_printk(ap, KERN_ERR,
- "failed to recover PMP after %d tries, giving up\n",
- ATA_EH_PMP_TRIES);
+ ata_port_err(ap, "failed to recover PMP after %d tries, giving up\n",
+ ATA_EH_PMP_TRIES);
sata_pmp_detach(pmp_dev);
ata_dev_disable(pmp_dev);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d51f9795c06..46d087f0860 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1108,8 +1108,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
/* configure draining */
buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
if (!buf) {
- ata_dev_printk(dev, KERN_ERR,
- "drain buffer allocation failed\n");
+ ata_dev_err(dev, "drain buffer allocation failed\n");
return -ENOMEM;
}
@@ -1127,7 +1126,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
* IDENTIFY_PACKET is executed as ATA_PROT_PIO.
*/
if (sdev->sector_size > PAGE_SIZE)
- ata_dev_printk(dev, KERN_WARNING,
+ ata_dev_warn(dev,
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
sdev->sector_size);
@@ -1784,8 +1783,7 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
cmd->sc_data_direction == DMA_TO_DEVICE) {
if (unlikely(scsi_bufflen(cmd) < 1)) {
- ata_dev_printk(dev, KERN_WARNING,
- "WARNING: zero len r/w req\n");
+ ata_dev_warn(dev, "WARNING: zero len r/w req\n");
goto err_did;
}
@@ -2969,9 +2967,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* with the cached multi_count of libata
*/
if (multi_count != dev->multi_count)
- ata_dev_printk(dev, KERN_WARNING,
- "invalid multi_count %u ignored\n",
- multi_count);
+ ata_dev_warn(dev, "invalid multi_count %u ignored\n",
+ multi_count);
}
/*
@@ -3466,9 +3463,8 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
goto repeat;
}
- ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan "
- "failed without making any progress,\n"
- " switching to async\n");
+ ata_port_err(ap,
+ "WARNING: synchronous SCSI scan failed without making any progress, switching to async\n");
}
queue_delayed_work(system_long_wq, &ap->hotplug_task,
@@ -3550,8 +3546,8 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
mutex_unlock(&ap->scsi_host->scan_mutex);
if (sdev) {
- ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
- dev_name(&sdev->sdev_gendev));
+ ata_dev_info(dev, "detaching (SCSI %s)\n",
+ dev_name(&sdev->sdev_gendev));
scsi_remove_device(sdev);
scsi_device_put(sdev);
@@ -3797,6 +3793,12 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
*/
int ata_sas_port_start(struct ata_port *ap)
{
+ /*
+ * the port is marked as frozen at allocation time, but if we don't
+ * have new eh, we won't thaw it
+ */
+ if (!ap->ops->error_handler)
+ ap->pflags &= ~ATA_PFLAG_FROZEN;
return 0;
}
EXPORT_SYMBOL_GPL(ata_sas_port_start);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index b1b926c55a7..c24127dd6ef 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -227,9 +227,9 @@ int ata_sff_busy_sleep(struct ata_port *ap,
}
if (status != 0xff && (status & ATA_BUSY))
- ata_port_printk(ap, KERN_WARNING,
- "port is slow to respond, please be patient "
- "(Status 0x%x)\n", status);
+ ata_port_warn(ap,
+ "port is slow to respond, please be patient (Status 0x%x)\n",
+ status);
timeout = ata_deadline(timer_start, tmout);
while (status != 0xff && (status & ATA_BUSY) &&
@@ -242,9 +242,9 @@ int ata_sff_busy_sleep(struct ata_port *ap,
return -ENODEV;
if (status & ATA_BUSY) {
- ata_port_printk(ap, KERN_ERR, "port failed to respond "
- "(%lu secs, Status 0x%x)\n",
- DIV_ROUND_UP(tmout, 1000), status);
+ ata_port_err(ap,
+ "port failed to respond (%lu secs, Status 0x%x)\n",
+ DIV_ROUND_UP(tmout, 1000), status);
return -EBUSY;
}
@@ -350,8 +350,8 @@ static void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
if (ata_msg_probe(ap))
- ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
- "device %u, wait %u\n", device, wait);
+ ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
+ device, wait);
if (wait)
ata_wait_idle(ap);
@@ -1333,9 +1333,10 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
cancel_delayed_work_sync(&ap->sff_pio_task);
ap->hsm_task_state = HSM_ST_IDLE;
+ ap->sff_pio_task_link = NULL;
if (ata_msg_ctl(ap))
- ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
+ ata_port_dbg(ap, "%s: EXIT\n", __func__);
}
static void ata_sff_pio_task(struct work_struct *work)
@@ -1513,7 +1514,7 @@ static unsigned int ata_sff_idle_irq(struct ata_port *ap)
ap->ops->sff_check_status(ap);
if (ap->ops->sff_irq_clear)
ap->ops->sff_irq_clear(ap);
- ata_port_printk(ap, KERN_WARNING, "irq trap\n");
+ ata_port_warn(ap, "irq trap\n");
return 1;
}
#endif
@@ -1711,7 +1712,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
/* There was a command running, we are no longer busy and we have
no interrupt. */
- ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n",
+ ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
status);
/* Run the host interrupt logic as if the interrupt had not been
lost */
@@ -1798,8 +1799,9 @@ int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
if (!ata_link_offline(link)) {
rc = ata_sff_wait_ready(link, deadline);
if (rc && rc != -ENODEV) {
- ata_link_printk(link, KERN_WARNING, "device not ready "
- "(errno=%d), forcing hardreset\n", rc);
+ ata_link_warn(link,
+ "device not ready (errno=%d), forcing hardreset\n",
+ rc);
ehc->i.action |= ATA_EH_HARDRESET;
}
}
@@ -2056,7 +2058,7 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
rc = ata_bus_softreset(ap, devmask, deadline);
/* if link is occupied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
- ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+ ata_link_err(link, "SRST failed (errno=%d)\n", rc);
return rc;
}
@@ -2170,8 +2172,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
/* Can become DEBUG later */
if (count)
- ata_port_printk(ap, KERN_DEBUG,
- "drained %d bytes to clear DRQ.\n", count);
+ ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
}
EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
@@ -2316,9 +2317,9 @@ int ata_pci_sff_init_host(struct ata_host *host)
rc = pcim_iomap_regions(pdev, 0x3 << base,
dev_driver_string(gdev));
if (rc) {
- dev_printk(KERN_WARNING, gdev,
- "failed to request/iomap BARs for port %d "
- "(errno=%d)\n", i, rc);
+ dev_warn(gdev,
+ "failed to request/iomap BARs for port %d (errno=%d)\n",
+ i, rc);
if (rc == -EBUSY)
pcim_pin_device(pdev);
ap->ops = &ata_dummy_port_ops;
@@ -2340,7 +2341,7 @@ int ata_pci_sff_init_host(struct ata_host *host)
}
if (!mask) {
- dev_printk(KERN_ERR, gdev, "no available native port\n");
+ dev_err(gdev, "no available native port\n");
return -ENODEV;
}
@@ -2375,8 +2376,7 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev,
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to allocate ATA host\n");
+ dev_err(&pdev->dev, "failed to allocate ATA host\n");
rc = -ENOMEM;
goto err_out;
}
@@ -2542,8 +2542,7 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
pi = ata_sff_find_valid_pi(ppi);
if (!pi) {
- dev_printk(KERN_ERR, &pdev->dev,
- "no valid port_info specified\n");
+ dev_err(&pdev->dev, "no valid port_info specified\n");
return -EINVAL;
}
@@ -3164,8 +3163,7 @@ static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
{
int i;
- dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n",
- reason);
+ dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
for (i = 0; i < 2; i++) {
host->ports[i]->mwdma_mask = 0;
@@ -3297,8 +3295,7 @@ int ata_pci_bmdma_init_one(struct pci_dev *pdev,
pi = ata_sff_find_valid_pi(ppi);
if (!pi) {
- dev_printk(KERN_ERR, &pdev->dev,
- "no valid port_info specified\n");
+ dev_err(&pdev->dev, "no valid port_info specified\n");
return -EINVAL;
}
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 91949d99755..54145edf50e 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -195,8 +195,6 @@ static int pacpi_port_start(struct ata_port *ap)
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct pata_acpi *acpi;
- int ret;
-
if (ap->acpi_handle == NULL)
return -ENODEV;
@@ -205,11 +203,7 @@ static int pacpi_port_start(struct ata_port *ap)
return -ENOMEM;
acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
- ret = ata_bmdma_port_start(ap);
- if (ret < 0)
- return ret;
-
- return ret;
+ return ata_bmdma_port_start(ap);
}
static struct scsi_host_template pacpi_sht = {
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 794ec6e3275..cadd67998ba 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -287,10 +287,10 @@ static void ali_warn_atapi_dma(struct ata_device *adev)
int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
if (print_info && adev->class == ATA_DEV_ATAPI && !ali_atapi_dma) {
- ata_dev_printk(adev, KERN_WARNING,
- "WARNING: ATAPI DMA disabled for reliability issues. It can be enabled\n");
- ata_dev_printk(adev, KERN_WARNING,
- "WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n");
+ ata_dev_warn(adev,
+ "WARNING: ATAPI DMA disabled for reliability issues. It can be enabled\n");
+ ata_dev_warn(adev,
+ "WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n");
}
}
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index b0975a5ad8c..dc6b5dae046 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -60,7 +60,7 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
UT = T / 2;
if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
- dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
+ dev_err(&pdev->dev, "unknown mode %d\n", speed);
return;
}
@@ -311,7 +311,7 @@ static unsigned long nv_mode_filter(struct ata_device *dev,
cable detection result */
limit |= ata_pack_xfermask(ATA_PIO4, ATA_MWDMA2, ATA_UDMA2);
- ata_port_printk(ap, KERN_DEBUG, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
+ ata_port_dbg(ap, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
"BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n",
xfer_mask, limit, xfer_mask & limit, bios_limit,
saved_udma, acpi_limit, acpi_str);
@@ -530,14 +530,12 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
};
const struct ata_port_info *ppi[] = { NULL, NULL };
- static int printed_version;
int type = id->driver_data;
void *hpriv = NULL;
u8 fifo;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 2215632e4b3..78a93b69095 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -346,7 +346,6 @@ static struct ata_port_operations artop6260_ops = {
static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
- static int printed_version;
static const struct ata_port_info info_6210 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -378,9 +377,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
const struct ata_port_info *ppi[] = { NULL, NULL };
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 95295935dd9..3cfabb262af 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -470,7 +470,7 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
}
if (!mask) {
- dev_printk(KERN_ERR, gdev, "no available native port\n");
+ dev_err(gdev, "no available native port\n");
return -ENODEV;
}
@@ -487,7 +487,6 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
static int atp867x_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- static int printed_version;
static const struct ata_port_info info_867x = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -499,8 +498,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
const struct ata_port_info *ppi[] = { &info_867x, NULL };
int rc;
- if (!printed_version++)
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
@@ -511,15 +509,14 @@ static int atp867x_init_one(struct pci_dev *pdev,
host = ata_host_alloc_pinfo(&pdev->dev, ppi, ATP867X_NUM_PORTS);
if (!host) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to allocate ATA host\n");
+ dev_err(&pdev->dev, "failed to allocate ATA host\n");
rc = -ENOMEM;
goto err_out;
}
rc = atp867x_ata_pci_sff_init_host(host);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to init host\n");
+ dev_err(&pdev->dev, "failed to init host\n");
goto err_out;
}
@@ -528,7 +525,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &atp867x_sht);
if (rc)
- dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n");
+ dev_err(&pdev->dev, "failed to activate host\n");
err_out:
return rc;
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index ea64967000f..bd987bb082e 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1129,7 +1129,7 @@ static int bfin_softreset(struct ata_link *link, unsigned int *classes,
/* issue bus reset */
err_mask = bfin_bus_softreset(ap, devmask);
if (err_mask) {
- ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
+ ata_port_err(ap, "SRST failed (err_mask=0x%x)\n",
err_mask);
return -EIO;
}
@@ -1382,7 +1382,7 @@ idle_irq:
#ifdef ATA_IRQ_TRAP
if ((ap->stats.idle_irq % 1000) == 0) {
ap->ops->irq_ack(ap, 0); /* debug trap */
- ata_port_printk(ap, KERN_WARNING, "irq trap\n");
+ ata_port_warn(ap, "irq trap\n");
return 1;
}
#endif
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index e3254fcff0f..9ddcddc66a2 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -149,8 +149,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
ppi[1] = &pi;
if ((pcicfg & 0x40) == 0) {
- dev_printk(KERN_WARNING, &pdev->dev,
- "DMA mode disabled. Enabling.\n");
+ dev_warn(&pdev->dev, "DMA mode disabled. Enabling.\n");
pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
}
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index a08834758ea..aca47e4e29e 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -263,7 +263,6 @@ static struct ata_port_operations efar_ops = {
static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -273,9 +272,7 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
};
const struct ata_port_info *ppi[] = { &info, &info };
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 24d7df81546..b3042dab08b 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -185,7 +185,6 @@ static void hpt3x3_init_chipset(struct pci_dev *dev)
static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -206,8 +205,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
hpt3x3_init_chipset(pdev);
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 9f2889fe43b..52e7e7b8c74 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -210,8 +210,8 @@ static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev
else
iomd_type = 'A', cycle = 562;
- ata_dev_printk(adev, KERN_INFO, "timings: act %dns rec %dns cyc %dns (%c)\n",
- t.active, t.recover, t.cycle, iomd_type);
+ ata_dev_info(adev, "timings: act %dns rec %dns cyc %dns (%c)\n",
+ t.active, t.recover, t.cycle, iomd_type);
state->port[ap->port_no].speed[adev->devno] = cycle;
}
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
new file mode 100644
index 00000000000..ca9d9caedfa
--- /dev/null
+++ b/drivers/ata/pata_imx.c
@@ -0,0 +1,253 @@
+/*
+ * Freescale iMX PATA driver
+ *
+ * Copyright (C) 2011 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * Based on pata_platform - Copyright (C) 2006 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * TODO:
+ * - dmaengine support
+ * - check if timing stuff needed
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <scsi/scsi_host.h>
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#define DRV_NAME "pata_imx"
+
+#define PATA_IMX_ATA_CONTROL 0x24
+#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7)
+#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6)
+#define PATA_IMX_ATA_CTRL_IORDY_EN (1<<0)
+#define PATA_IMX_ATA_INT_EN 0x2C
+#define PATA_IMX_ATA_INTR_ATA_INTRQ2 (1<<3)
+#define PATA_IMX_DRIVE_DATA 0xA0
+#define PATA_IMX_DRIVE_CONTROL 0xD8
+
+struct pata_imx_priv {
+ struct clk *clk;
+ /* timings/interrupt/control regs */
+ u8 *host_regs;
+ u32 ata_ctl;
+};
+
+static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
+{
+ struct ata_device *dev;
+ struct ata_port *ap = link->ap;
+ struct pata_imx_priv *priv = ap->host->private_data;
+ u32 val;
+
+ ata_for_each_dev(dev, link, ENABLED) {
+ dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
+ dev->xfer_shift = ATA_SHIFT_PIO;
+ dev->flags |= ATA_DFLAG_PIO;
+
+ val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+ if (ata_pio_need_iordy(dev))
+ val |= PATA_IMX_ATA_CTRL_IORDY_EN;
+ else
+ val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
+ __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
+
+ ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ }
+ return 0;
+}
+
+static struct scsi_host_template pata_imx_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static struct ata_port_operations pata_imx_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_data_xfer = ata_sff_data_xfer_noirq,
+ .cable_detect = ata_cable_unknown,
+ .set_mode = pata_imx_set_mode,
+};
+
+static void pata_imx_setup_port(struct ata_ioports *ioaddr)
+{
+ /* Fixup the port shift for platforms that need it */
+ ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2);
+ ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2);
+ ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2);
+ ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2);
+ ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2);
+ ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2);
+ ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2);
+ ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2);
+ ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2);
+ ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
+}
+
+static int __devinit pata_imx_probe(struct platform_device *pdev)
+{
+ struct ata_host *host;
+ struct ata_port *ap;
+ struct pata_imx_priv *priv;
+ int irq = 0;
+ struct resource *io_res;
+
+ io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (io_res == NULL)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct pata_imx_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ clk_enable(priv->clk);
+
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host)
+ goto free_priv;
+
+ host->private_data = priv;
+ ap = host->ports[0];
+
+ ap->ops = &pata_imx_port_ops;
+ ap->pio_mask = ATA_PIO0;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
+
+ priv->host_regs = devm_ioremap(&pdev->dev, io_res->start,
+ resource_size(io_res));
+ if (!priv->host_regs) {
+ dev_err(&pdev->dev, "failed to map IO/CTL base\n");
+ goto free_priv;
+ }
+
+ ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA;
+ ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL;
+
+ ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
+
+ pata_imx_setup_port(&ap->ioaddr);
+
+ ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
+ (unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA,
+ (unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL);
+
+ /* deassert resets */
+ __raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B |
+ PATA_IMX_ATA_CTRL_ATA_RST_B,
+ priv->host_regs + PATA_IMX_ATA_CONTROL);
+ /* enable interrupts */
+ __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
+ priv->host_regs + PATA_IMX_ATA_INT_EN);
+
+ /* activate */
+ return ata_host_activate(host, irq, ata_sff_interrupt, 0,
+ &pata_imx_sht);
+
+free_priv:
+ clk_disable(priv->clk);
+ clk_put(priv->clk);
+ return -ENOMEM;
+}
+
+static int __devexit pata_imx_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct pata_imx_priv *priv = host->private_data;
+
+ ata_host_detach(host);
+
+ __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
+
+ clk_disable(priv->clk);
+ clk_put(priv->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pata_imx_suspend(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct pata_imx_priv *priv = host->private_data;
+ int ret;
+
+ ret = ata_host_suspend(host, PMSG_SUSPEND);
+ if (!ret) {
+ __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
+ priv->ata_ctl =
+ __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+ clk_disable(priv->clk);
+ }
+
+ return ret;
+}
+
+static int pata_imx_resume(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct pata_imx_priv *priv = host->private_data;
+
+ clk_enable(priv->clk);
+
+ __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
+
+ __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2,
+ priv->host_regs + PATA_IMX_ATA_INT_EN);
+
+ ata_host_resume(host);
+
+ return 0;
+}
+
+static const struct dev_pm_ops pata_imx_pm_ops = {
+ .suspend = pata_imx_suspend,
+ .resume = pata_imx_resume,
+};
+#endif
+
+static struct platform_driver pata_imx_driver = {
+ .probe = pata_imx_probe,
+ .remove = __devexit_p(pata_imx_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &pata_imx_pm_ops,
+#endif
+ },
+};
+
+static int __init pata_imx_init(void)
+{
+ return platform_driver_register(&pata_imx_driver);
+}
+
+static void __exit pata_imx_exit(void)
+{
+ platform_driver_unregister(&pata_imx_driver);
+}
+module_init(pata_imx_init);
+module_exit(pata_imx_exit);
+
+MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
+MODULE_DESCRIPTION("low-level driver for iMX PATA");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 4d142a2ab8f..998af0e629b 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -258,7 +258,6 @@ static struct ata_port_operations it8213_ops = {
static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -269,9 +268,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en
/* Current IT8213 stuff is single port */
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 2d15f2548a1..62c5d00abd2 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -473,12 +473,12 @@ static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unus
/* We do need the right mode information for DMA or PIO
and this comes from the current configuration flags */
if (ata_id_has_dma(dev->id)) {
- ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
+ ata_dev_info(dev, "configured for DMA\n");
dev->xfer_mode = XFER_MW_DMA_0;
dev->xfer_shift = ATA_SHIFT_MWDMA;
dev->flags &= ~ATA_DFLAG_PIO;
} else {
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
@@ -508,12 +508,12 @@ static void it821x_dev_config(struct ata_device *adev)
if (strstr(model_num, "Integrated Technology Express")) {
/* RAID mode */
- ata_dev_printk(adev, KERN_INFO, "%sRAID%d volume",
- adev->id[147]?"Bootable ":"",
- adev->id[129]);
+ ata_dev_info(adev, "%sRAID%d volume",
+ adev->id[147] ? "Bootable " : "",
+ adev->id[129]);
if (adev->id[129] != 1)
- printk("(%dK stripe)", adev->id[146]);
- printk(".\n");
+ pr_cont("(%dK stripe)", adev->id[146]);
+ pr_cont("\n");
}
/* This is a controller firmware triggered funny, don't
report the drive faulty! */
@@ -610,7 +610,7 @@ static void it821x_display_disk(int n, u8 *buf)
char *cbl = "(40 wire cable)";
static const char *types[5] = {
- "RAID0", "RAID1" "RAID 0+1", "JBOD", "DISK"
+ "RAID0", "RAID1", "RAID 0+1", "JBOD", "DISK"
};
if (buf[52] > 4) /* No Disk */
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index f6b3f995f58..15b64311fe0 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -31,7 +31,7 @@ static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
- ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n");
+ ata_dev_info(dev, "configured for PIO0\n");
dev->pio_mode = XFER_PIO_0;
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
@@ -181,7 +181,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* activate host */
return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 6bd9425ba5a..d960f8e9e8b 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -213,7 +213,7 @@ static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
dev->pio_mode = XFER_PIO_0;
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 46f589edccd..b057e3fa44b 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -772,8 +772,9 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
pci_restore_state(priv->pdev);
rc = pcim_enable_device(priv->pdev);
if (rc)
- dev_printk(KERN_ERR, &priv->pdev->dev,
- "Failed to enable device after resume (%d)\n", rc);
+ dev_err(&priv->pdev->dev,
+ "Failed to enable device after resume (%d)\n",
+ rc);
else
pci_set_master(priv->pdev);
}
@@ -812,7 +813,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
blk_queue_update_dma_pad(sdev->request_queue, 31);
/* Tell the world about it */
- ata_dev_printk(dev, KERN_INFO, "OHare alignment limits applied\n");
+ ata_dev_info(dev, "OHare alignment limits applied\n");
return 0;
}
@@ -838,8 +839,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
cmd | PCI_COMMAND_INVALIDATE);
/* Tell the world about it */
- ata_dev_printk(dev, KERN_INFO,
- "K2/Shasta alignment limits applied\n");
+ ata_dev_info(dev, "K2/Shasta alignment limits applied\n");
}
return 0;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 75a6a0c0094..5d7f58a7e34 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -161,6 +161,9 @@ static const struct pci_device_id marvell_pci_tbl[] = {
{ PCI_DEVICE(0x11AB, 0x6121), },
{ PCI_DEVICE(0x11AB, 0x6123), },
{ PCI_DEVICE(0x11AB, 0x6145), },
+ { PCI_DEVICE(0x1B4B, 0x91A0), },
+ { PCI_DEVICE(0x1B4B, 0x91A4), },
+
{ } /* terminate list */
};
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index d8d9c580774..9dc16df8419 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -152,15 +152,13 @@ static struct ata_port_operations mpiix_port_ops = {
static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
/* Single threaded by the PCI probe logic */
- static int printed_version;
struct ata_host *host;
struct ata_port *ap;
void __iomem *cmd_addr, *ctl_addr;
u16 idetim;
int cmd, ctl, irq;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&dev->dev, DRV_VERSION);
host = ata_host_alloc(&dev->dev, 1);
if (!host)
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 3eb921c746a..9979a43bc59 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -57,7 +57,6 @@ static struct ata_port_operations netcell_ops = {
static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
/* Actually we don't really care about these as the
@@ -70,9 +69,7 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e
const struct ata_port_info *port_info[] = { &info, NULL };
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 2110863bb3d..31d5986537a 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -86,7 +86,7 @@ static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
idefr &= ~0x04;
if (ata_timing_compute(adev, adev->pio_mode, &at, 30303, 1) < 0) {
- dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", adev->pio_mode);
+ dev_err(&pdev->dev, "unknown mode %d\n", adev->pio_mode);
return;
}
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index 605f198f958..f1d517bc5b4 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -350,7 +350,6 @@ static void ns87415_fixup(struct pci_dev *pdev)
static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -370,9 +369,7 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e
if (PCI_SLOT(pdev->devfn) == 0x0E)
ppi[0] = &info87560;
#endif
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 220ddc90608..1d61d5d278f 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -405,7 +405,7 @@ static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
rc = ata_sff_wait_after_reset(link, 1, deadline);
if (rc) {
- ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+ ata_link_err(link, "SRST failed (errno=%d)\n", rc);
return rc;
}
@@ -807,6 +807,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
irq_handler_t irq_handler = NULL;
void __iomem *base;
struct octeon_cf_port *cf_port;
+ char version[32];
res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -905,10 +906,11 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
- dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
+ snprintf(version, sizeof(version), "%s %d bit%s",
+ DRV_VERSION,
(ocd->is16bit) ? 16 : 8,
(cs1) ? ", True IDE" : "");
-
+ ata_print_version_once(&pdev->dev, version);
return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht);
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index b811c163620..98cdf50e406 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -235,7 +235,6 @@ static struct ata_port_operations oldpiix_pata_ops = {
static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -244,9 +243,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e
};
const struct ata_port_info *ppi[] = { &info, NULL };
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 00c5a02a94f..accc033faf7 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -167,10 +167,8 @@ static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
.port_ops = &opti_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
- static int printed_version;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&dev->dev, DRV_VERSION);
return ata_pci_sff_init_one(dev, ppi, &opti_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 0852cd07de0..77cb9140863 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -411,11 +411,9 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
.port_ops = &optiplus_port_ops
};
const struct ata_port_info *ppi[] = { &info_82c700, NULL };
- static int printed_version;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&dev->dev, DRV_VERSION);
rc = pcim_enable_device(dev);
if (rc)
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 021abe6d852..a808ba03bd7 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -68,7 +68,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
the same vendor - check serial */
if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO,
ATA_ID_SERNO_LEN) == 0 && master->id[ATA_ID_SERNO] >> 8) {
- ata_dev_printk(slave, KERN_WARNING, "is a ghost device, ignoring.\n");
+ ata_dev_warn(slave, "is a ghost device, ignoring\n");
ata_dev_disable(slave);
}
}
@@ -142,8 +142,7 @@ static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc)
ioread8(ap->ioaddr.data_addr);
if (count)
- ata_port_printk(ap, KERN_WARNING, "drained %d bytes to clear DRQ.\n",
- count);
+ ata_port_warn(ap, "drained %d bytes to clear DRQ\n", count);
}
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 9765ace1692..b1511f38b0e 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -655,7 +655,7 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
*/
pll_clock = pdc_detect_pll_input_clock(host);
- dev_printk(KERN_INFO, host->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
+ dev_info(host->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
/* Adjust PLL control register */
pdc_adjust_pll(host, pll_clock, board_idx);
@@ -697,7 +697,6 @@ static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
*/
static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const unsigned long cmd_offset[] = { 0x17c0, 0x15c0 };
static const unsigned long bmdma_offset[] = { 0x1000, 0x1008 };
unsigned int board_idx = (unsigned int) ent->driver_data;
@@ -707,8 +706,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
void __iomem *mmio_base;
int i, rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 50400fa120f..2067308f683 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -39,7 +39,7 @@ static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unu
dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 8574b31f177..b2d3a2bb4e6 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -213,7 +213,6 @@ static struct ata_port_operations radisys_pata_ops = {
static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -223,9 +222,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e
};
const struct ata_port_info *ppi[] = { &info, NULL };
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 5fbe9b166c6..4d318f86ae8 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -312,7 +312,6 @@ static struct scsi_host_template rdc_sht = {
static int __devinit rdc_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
@@ -321,9 +320,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
struct rdc_host_priv *hpriv;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
port_info[0] = rdc_port_info;
port_info[1] = rdc_port_info;
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 4d04471794b..aca321e1e6a 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -44,7 +44,7 @@ static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused)
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
@@ -92,7 +92,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
};
const struct ata_port_info *ppi[] = { &info, NULL };
- printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
if (rz1000_fifo_disable(pdev) == 0)
return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL, 0);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index c446ae6055a..1b372c29719 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -376,7 +376,7 @@ static int pata_s3c_softreset(struct ata_link *link, unsigned int *classes,
rc = pata_s3c_bus_softreset(ap, deadline);
/* if link is occupied, -ENODEV too is an error */
if (rc && rc != -ENODEV) {
- ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+ ata_link_err(link, "SRST failed (errno=%d)\n", rc);
return rc;
}
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 88ea9b677b4..eb748e32714 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -637,8 +637,7 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
DPRINTK("about to softreset, devmask=%x\n", devmask);
err_mask = scc_bus_softreset(ap, devmask, deadline);
if (err_mask) {
- ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
- err_mask);
+ ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask);
return -EIO;
}
@@ -1072,15 +1071,12 @@ static int scc_host_init(struct ata_host *host)
static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL };
struct ata_host *host;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
if (!host)
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index e97b32f03a6..7c78b999362 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -172,12 +172,9 @@ static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev)
static int __devinit sch_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 118787caa93..31f759b0ab7 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -327,13 +327,11 @@ static int __devinit sil680_init_one(struct pci_dev *pdev,
.port_ops = &sil680_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
- static int printed_version;
struct ata_host *host;
void __iomem *mmio_base;
int rc, try_mmio;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index be08ff92db1..533f2aefab8 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -681,7 +681,6 @@ static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis)
static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] = { NULL, NULL };
struct pci_dev *host = NULL;
struct sis_chipset *chipset = NULL;
@@ -735,9 +734,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
0x0, &sis_info100
};
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 7f5d020ed56..c06ce8ced56 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -317,9 +317,11 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
rev = sl82c105_bridge_revision(dev);
if (rev == -1)
- dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Unable to find bridge, disabling DMA.\n");
+ dev_warn(&dev->dev,
+ "pata_sl82c105: Unable to find bridge, disabling DMA\n");
else if (rev <= 5)
- dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Early bridge revision, no DMA available.\n");
+ dev_warn(&dev->dev,
+ "pata_sl82c105: Early bridge revision, no DMA available\n");
else
ppi[0] = &info_dma;
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index b3e0c943228..28da1c6becf 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -196,10 +196,8 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
.port_ops = &triflex_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
- static int printed_version;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&dev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index ac8d7d97e40..8e9f5048a10 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -124,6 +124,17 @@ static const struct via_isa_bridge {
{ NULL }
};
+static const struct dmi_system_id no_atapi_dma_dmi_table[] = {
+ {
+ .ident = "AVERATEC 3200",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"),
+ DMI_MATCH(DMI_BOARD_NAME, "3200"),
+ },
+ },
+ { }
+};
+
struct via_port {
u8 cached_device;
};
@@ -350,11 +361,18 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
if (config->id == PCI_DEVICE_ID_VIA_82C586_0) {
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (strcmp(model_num, "TS64GSSD25-M") == 0) {
- ata_dev_printk(dev, KERN_WARNING,
- "disabling UDMA mode due to reported lockups with this device.\n");
+ ata_dev_warn(dev,
+ "disabling UDMA mode due to reported lockups with this device\n");
mask &= ~ ATA_MASK_UDMA;
}
}
+
+ if (dev->class == ATA_DEV_ATAPI &&
+ dmi_check_system(no_atapi_dma_dmi_table)) {
+ ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
+ mask &= ATA_MASK_PIO;
+ }
+
return mask;
}
@@ -551,14 +569,12 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
const struct ata_port_info *ppi[] = { NULL, NULL };
struct pci_dev *isa;
const struct via_isa_bridge *config;
- static int printed_version;
u8 enable;
u32 timing;
unsigned long flags = id->driver_data;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 1111712b3d7..04911d52f59 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -596,14 +596,12 @@ static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
return rc;
}
return 0;
@@ -612,15 +610,13 @@ static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] = { &adma_port_info[board_idx], NULL };
struct ata_host *host;
void __iomem *mmio_base;
int rc, port_no;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, ADMA_PORTS);
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 1c4b3aa4c7c..5c4237452f5 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -389,7 +389,7 @@ static void sata_dwc_tf_dump(struct ata_taskfile *tf)
/*
* Function: get_burst_length_encode
* arguments: datalength: length in bytes of data
- * returns value to be programmed in register corrresponding to data length
+ * returns value to be programmed in register corresponding to data length
* This value is effectively the log(base 2) of the length
*/
static int get_burst_length_encode(int datalength)
@@ -766,11 +766,15 @@ static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
static void dma_dwc_exit(struct sata_dwc_device *hsdev)
{
dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__);
- if (host_pvt.sata_dma_regs)
+ if (host_pvt.sata_dma_regs) {
iounmap(host_pvt.sata_dma_regs);
+ host_pvt.sata_dma_regs = NULL;
+ }
- if (hsdev->irq_dma)
+ if (hsdev->irq_dma) {
free_irq(hsdev->irq_dma, hsdev);
+ hsdev->irq_dma = 0;
+ }
}
/*
@@ -1325,7 +1329,7 @@ static int sata_dwc_port_start(struct ata_port *ap)
dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
__func__);
err = -ENOMEM;
- goto CLEANUP;
+ goto CLEANUP_ALLOC;
}
}
@@ -1345,15 +1349,13 @@ static int sata_dwc_port_start(struct ata_port *ap)
/* Clear any error bits before libata starts issuing commands */
clear_serror();
ap->private_data = hsdevp;
+ dev_dbg(ap->dev, "%s: done\n", __func__);
+ return 0;
+CLEANUP_ALLOC:
+ kfree(hsdevp);
CLEANUP:
- if (err) {
- sata_dwc_port_stop(ap);
- dev_dbg(ap->dev, "%s: fail\n", __func__);
- } else {
- dev_dbg(ap->dev, "%s: done\n", __func__);
- }
-
+ dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
return err;
}
@@ -1638,13 +1640,12 @@ static int sata_dwc_probe(struct platform_device *ofdev)
const struct ata_port_info *ppi[] = { &pi, NULL };
/* Allocate DWC SATA device */
- hsdev = kmalloc(sizeof(*hsdev), GFP_KERNEL);
+ hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
if (hsdev == NULL) {
dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
err = -ENOMEM;
- goto error_out;
+ goto error;
}
- memset(hsdev, 0, sizeof(*hsdev));
/* Ioremap SATA registers */
base = of_iomap(ofdev->dev.of_node, 0);
@@ -1652,7 +1653,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
dev_err(&ofdev->dev, "ioremap failed for SATA register"
" address\n");
err = -ENODEV;
- goto error_out;
+ goto error_kmalloc;
}
hsdev->reg_base = base;
dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
@@ -1665,7 +1666,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
if (!host) {
dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
err = -ENOMEM;
- goto error_out;
+ goto error_iomap;
}
host->private_data = hsdev;
@@ -1733,8 +1734,11 @@ error_out:
/* Free SATA DMA resources */
dma_dwc_exit(hsdev);
- if (base)
- iounmap(base);
+error_iomap:
+ iounmap(base);
+error_kmalloc:
+ kfree(hsdev);
+error:
return err;
}
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 35a71d875d0..78ae7b67b09 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -346,12 +346,11 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
/* warn if each s/g element is not dword aligned */
if (sg_addr & 0x03)
- ata_port_printk(qc->ap, KERN_ERR,
- "s/g addr unaligned : 0x%llx\n",
- (unsigned long long)sg_addr);
+ ata_port_err(qc->ap, "s/g addr unaligned : 0x%llx\n",
+ (unsigned long long)sg_addr);
if (sg_len & 0x03)
- ata_port_printk(qc->ap, KERN_ERR,
- "s/g len unaligned : 0x%x\n", sg_len);
+ ata_port_err(qc->ap, "s/g len unaligned : 0x%x\n",
+ sg_len);
if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) &&
sg_next(sg) != NULL) {
@@ -661,8 +660,7 @@ static int sata_fsl_port_start(struct ata_port *ap)
sata_fsl_scr_write(&ap->link, SCR_CONTROL, temp);
sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
- dev_printk(KERN_WARNING, dev, "scr_control, speed limited to %x\n",
- temp);
+ dev_warn(dev, "scr_control, speed limited to %x\n", temp);
#endif
return 0;
@@ -740,8 +738,7 @@ try_offline_again:
1, 500);
if (temp & ONLINE) {
- ata_port_printk(ap, KERN_ERR,
- "Hardreset failed, not off-lined %d\n", i);
+ ata_port_err(ap, "Hardreset failed, not off-lined %d\n", i);
/*
* Try to offline controller atleast twice
@@ -777,8 +774,7 @@ try_offline_again:
temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, 0, 1, 500);
if (!(temp & ONLINE)) {
- ata_port_printk(ap, KERN_ERR,
- "Hardreset failed, not on-lined\n");
+ ata_port_err(ap, "Hardreset failed, not on-lined\n");
goto err;
}
@@ -794,9 +790,8 @@ try_offline_again:
temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0, 1, 500);
if ((!(temp & 0x10)) || ata_link_offline(link)) {
- ata_port_printk(ap, KERN_WARNING,
- "No Device OR PHYRDY change,Hstatus = 0x%x\n",
- ioread32(hcr_base + HSTATUS));
+ ata_port_warn(ap, "No Device OR PHYRDY change,Hstatus = 0x%x\n",
+ ioread32(hcr_base + HSTATUS));
*class = ATA_DEV_NONE;
return 0;
}
@@ -809,13 +804,12 @@ try_offline_again:
500, jiffies_to_msecs(deadline - start_jiffies));
if ((temp & 0xFF) != 0x18) {
- ata_port_printk(ap, KERN_WARNING, "No Signature Update\n");
+ ata_port_warn(ap, "No Signature Update\n");
*class = ATA_DEV_NONE;
goto do_followup_srst;
} else {
- ata_port_printk(ap, KERN_INFO,
- "Signature Update detected @ %d msecs\n",
- jiffies_to_msecs(jiffies - start_jiffies));
+ ata_port_info(ap, "Signature Update detected @ %d msecs\n",
+ jiffies_to_msecs(jiffies - start_jiffies));
*class = sata_fsl_dev_classify(ap);
return 0;
}
@@ -890,7 +884,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
temp = ata_wait_register(ap, CQ + hcr_base, 0x1, 0x1, 1, 5000);
if (temp & 0x1) {
- ata_port_printk(ap, KERN_WARNING, "ATA_SRST issue failed\n");
+ ata_port_warn(ap, "ATA_SRST issue failed\n");
DPRINTK("Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n",
ioread32(CQ + hcr_base),
@@ -1202,8 +1196,7 @@ static irqreturn_t sata_fsl_interrupt(int irq, void *dev_instance)
if (ap) {
sata_fsl_host_intr(ap);
} else {
- dev_printk(KERN_WARNING, host->dev,
- "interrupt on disabled port 0\n");
+ dev_warn(host->dev, "interrupt on disabled port 0\n");
}
iowrite32(interrupt_enables, hcr_base + HSTATUS);
@@ -1317,8 +1310,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
struct ata_port_info pi = sata_fsl_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
- dev_printk(KERN_INFO, &ofdev->dev,
- "Sata FSL Platform/CSB Driver init\n");
+ dev_info(&ofdev->dev, "Sata FSL Platform/CSB Driver init\n");
hcr_base = of_iomap(ofdev->dev.of_node, 0);
if (!hcr_base)
@@ -1347,7 +1339,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (irq < 0) {
- dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n");
+ dev_err(&ofdev->dev, "invalid irq from platform\n");
goto error_exit_with_cleanup;
}
host_priv->irq = irq;
@@ -1422,8 +1414,7 @@ static int sata_fsl_resume(struct platform_device *op)
ret = sata_fsl_init_controller(host);
if (ret) {
- dev_printk(KERN_ERR, &op->dev,
- "Error initialize hardware\n");
+ dev_err(&op->dev, "Error initializing hardware\n");
return ret;
}
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 83a44471b18..5c7d70c03bf 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -396,9 +396,8 @@ static void inic_host_intr(struct ata_port *ap)
}
spurious:
- ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: "
- "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
- qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
+ ata_port_warn(ap, "unhandled interrupt: cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
+ qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
}
static irqreturn_t inic_interrupt(int irq, void *dev_instance)
@@ -619,8 +618,9 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
rc = sata_link_resume(link, timing, deadline);
if (rc) {
- ata_link_printk(link, KERN_WARNING, "failed to resume "
- "link after reset (errno=%d)\n", rc);
+ ata_link_warn(link,
+ "failed to resume link after reset (errno=%d)\n",
+ rc);
return rc;
}
@@ -632,8 +632,9 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
rc = ata_wait_after_reset(link, deadline, inic_check_ready);
/* link occupied, -ENODEV too is an error */
if (rc) {
- ata_link_printk(link, KERN_WARNING, "device not ready "
- "after hardreset (errno=%d)\n", rc);
+ ata_link_warn(link,
+ "device not ready after hardreset (errno=%d)\n",
+ rc);
return rc;
}
@@ -799,7 +800,6 @@ static int inic_pci_device_resume(struct pci_dev *pdev)
static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] = { &inic_port_info, NULL };
struct ata_host *host;
struct inic_host_priv *hpriv;
@@ -807,8 +807,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int mmio_bar;
int i, rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
@@ -847,15 +846,13 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set dma_mask. This devices doesn't support 64bit addressing. */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
return rc;
}
@@ -866,15 +863,13 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
rc = pci_set_dma_max_seg_size(pdev, 65536 - 512);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to set the maximum segment size.\n");
+ dev_err(&pdev->dev, "failed to set the maximum segment size\n");
return rc;
}
rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to initialize controller\n");
+ dev_err(&pdev->dev, "failed to initialize controller\n");
return rc;
}
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b52c0519ad0..4b6b2090784 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1190,7 +1190,7 @@ static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
break;
udelay(per_loop);
}
- /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
+ /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
}
/**
@@ -1228,7 +1228,7 @@ static int mv_stop_edma(struct ata_port *ap)
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
mv_wait_for_edma_empty_idle(ap);
if (mv_stop_edma_engine(port_mmio)) {
- ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
+ ata_port_err(ap, "Unable to stop eDMA\n");
err = -EIO;
}
mv_edma_cfg(ap, 0, 0);
@@ -1382,7 +1382,7 @@ static void mv6_dev_config(struct ata_device *adev)
if (adev->flags & ATA_DFLAG_NCQ) {
if (sata_pmp_attached(adev->link->ap)) {
adev->flags &= ~ATA_DFLAG_NCQ;
- ata_dev_printk(adev, KERN_INFO,
+ ata_dev_info(adev,
"NCQ disabled for command-based switching\n");
}
}
@@ -2225,9 +2225,8 @@ static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
/* See if it worked */
if ((ifstat & 0x3000) != 0x1000) {
- ata_port_printk(ap, KERN_WARNING,
- "%s transmission error, ifstat=%08x\n",
- __func__, ifstat);
+ ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
+ __func__, ifstat);
return AC_ERR_OTHER;
}
return 0;
@@ -2342,9 +2341,9 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
*/
if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
--limit_warnings;
- ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
- ": attempting PIO w/multiple DRQ: "
- "this may fail due to h/w errata\n");
+ ata_link_warn(qc->dev->link, DRV_NAME
+ ": attempting PIO w/multiple DRQ: "
+ "this may fail due to h/w errata\n");
}
/* drop through */
case ATA_PROT_NODATA:
@@ -2499,20 +2498,20 @@ static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
}
failed_links = hweight16(new_map);
- ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
- "failed_links=%d nr_active_links=%d\n",
- __func__, pp->delayed_eh_pmp_map,
- ap->qc_active, failed_links,
- ap->nr_active_links);
+ ata_port_info(ap,
+ "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
+ __func__, pp->delayed_eh_pmp_map,
+ ap->qc_active, failed_links,
+ ap->nr_active_links);
if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
mv_process_crpb_entries(ap, pp);
mv_stop_edma(ap);
mv_eh_freeze(ap);
- ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
+ ata_port_info(ap, "%s: done\n", __func__);
return 1; /* handled */
}
- ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
+ ata_port_info(ap, "%s: waiting\n", __func__);
return 1; /* handled */
}
@@ -2554,9 +2553,8 @@ static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
* and we cannot handle it here.
*/
if (edma_err_cause & EDMA_ERR_SELF_DIS) {
- ata_port_printk(ap, KERN_WARNING,
- "%s: err_cause=0x%x pp_flags=0x%x\n",
- __func__, edma_err_cause, pp->pp_flags);
+ ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
+ __func__, edma_err_cause, pp->pp_flags);
return 0; /* not handled */
}
return mv_handle_fbs_ncq_dev_err(ap);
@@ -2567,9 +2565,8 @@ static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
* and we cannot handle it here.
*/
if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
- ata_port_printk(ap, KERN_WARNING,
- "%s: err_cause=0x%x pp_flags=0x%x\n",
- __func__, edma_err_cause, pp->pp_flags);
+ ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
+ __func__, edma_err_cause, pp->pp_flags);
return 0; /* not handled */
}
return mv_handle_fbs_non_ncq_dev_err(ap);
@@ -2930,8 +2927,7 @@ static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
err_cause = readl(mmio + hpriv->irq_cause_offset);
- dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
- err_cause);
+ dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
DPRINTK("All regs @ PCI error\n");
mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
@@ -3760,8 +3756,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
default:
- dev_printk(KERN_WARNING, &pdev->dev,
- "Applying 50XXB2 workarounds to unknown rev\n");
+ dev_warn(&pdev->dev,
+ "Applying 50XXB2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
}
@@ -3780,8 +3776,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
default:
- dev_printk(KERN_WARNING, &pdev->dev,
- "Applying B2 workarounds to unknown rev\n");
+ dev_warn(&pdev->dev,
+ "Applying B2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
}
@@ -3801,8 +3797,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
default:
- dev_printk(KERN_WARNING, &pdev->dev,
- "Applying B2 workarounds to unknown rev\n");
+ dev_warn(&pdev->dev,
+ "Applying B2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_60X1B2;
break;
}
@@ -3851,8 +3847,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
default:
- dev_printk(KERN_WARNING, &pdev->dev,
- "Applying 60X1C0 workarounds to unknown rev\n");
+ dev_warn(&pdev->dev,
+ "Applying 60X1C0 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
}
@@ -3867,8 +3863,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
default:
- dev_printk(KERN_ERR, host->dev,
- "BUG: invalid board index %u\n", board_idx);
+ dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
return 1;
}
@@ -4023,7 +4018,6 @@ static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
*/
static int mv_platform_probe(struct platform_device *pdev)
{
- static int printed_version;
const struct mv_sata_platform_data *mv_platform_data;
const struct ata_port_info *ppi[] =
{ &mv_port_info[chip_soc], NULL };
@@ -4032,8 +4026,7 @@ static int mv_platform_probe(struct platform_device *pdev)
struct resource *res;
int n_ports, rc;
- if (!printed_version++)
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/*
* Simple resource validation ..
@@ -4091,9 +4084,8 @@ static int mv_platform_probe(struct platform_device *pdev)
if (rc)
goto err;
- dev_printk(KERN_INFO, &pdev->dev,
- "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
- host->n_ports);
+ dev_info(&pdev->dev, "slots %u ports %d\n",
+ (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
IRQF_SHARED, &mv6_sht);
@@ -4217,22 +4209,21 @@ static int pci_go_64(struct pci_dev *pdev)
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
return rc;
}
}
@@ -4276,10 +4267,9 @@ static void mv_print_info(struct ata_host *host)
else
gen = "?";
- dev_printk(KERN_INFO, &pdev->dev,
- "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
- gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
- scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
+ dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
+ gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
+ scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
}
/**
@@ -4293,15 +4283,13 @@ static void mv_print_info(struct ata_host *host)
static int mv_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_idx = (unsigned int)ent->driver_data;
const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
struct ata_host *host;
struct mv_host_priv *hpriv;
int n_ports, port, rc;
- if (!printed_version++)
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index f173ef3bfc1..e0bc9646a38 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -620,9 +620,8 @@ static void nv_adma_register_mode(struct ata_port *ap)
count++;
}
if (count == 20)
- ata_port_printk(ap, KERN_WARNING,
- "timeout waiting for ADMA IDLE, stat=0x%hx\n",
- status);
+ ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
+ status);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
@@ -635,9 +634,9 @@ static void nv_adma_register_mode(struct ata_port *ap)
count++;
}
if (count == 20)
- ata_port_printk(ap, KERN_WARNING,
- "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
- status);
+ ata_port_warn(ap,
+ "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
+ status);
pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
}
@@ -665,7 +664,7 @@ static void nv_adma_mode(struct ata_port *ap)
count++;
}
if (count == 20)
- ata_port_printk(ap, KERN_WARNING,
+ ata_port_warn(ap,
"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
status);
@@ -772,10 +771,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
blk_queue_max_segments(sdev->request_queue, sg_tablesize);
- ata_port_printk(ap, KERN_INFO,
- "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
- (unsigned long long)*ap->host->dev->dma_mask,
- segment_boundary, sg_tablesize);
+ ata_port_info(ap,
+ "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
+ (unsigned long long)*ap->host->dev->dma_mask,
+ segment_boundary, sg_tablesize);
spin_unlock_irqrestore(ap->lock, flags);
@@ -1443,8 +1442,7 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
existing commands. */
if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
(qc->flags & ATA_QCFLAG_RESULT_TF))) {
- ata_dev_printk(qc->dev, KERN_ERR,
- "NCQ w/ RESULT_TF not allowed\n");
+ ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
return AC_ERR_SYSTEM;
}
@@ -1581,15 +1579,15 @@ static int nv_hardreset(struct ata_link *link, unsigned int *class,
int rc;
if (!(ehc->i.flags & ATA_EHI_QUIET))
- ata_link_printk(link, KERN_INFO, "nv: skipping "
- "hardreset on occupied port\n");
+ ata_link_info(link,
+ "nv: skipping hardreset on occupied port\n");
/* make sure the link is online */
rc = sata_link_resume(link, timing, deadline);
/* whine about phy resume failure but proceed */
if (rc && rc != -EOPNOTSUPP)
- ata_link_printk(link, KERN_WARNING, "failed to resume "
- "link (errno=%d)\n", rc);
+ ata_link_warn(link, "failed to resume link (errno=%d)\n",
+ rc);
}
/* device signature acquisition is unreliable */
@@ -1686,7 +1684,7 @@ static void nv_adma_error_handler(struct ata_port *ap)
u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
- ata_port_printk(ap, KERN_ERR,
+ ata_port_err(ap,
"EH in ADMA mode, notifier 0x%X "
"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
"next cpb count 0x%X next cpb idx 0x%x\n",
@@ -1697,7 +1695,7 @@ static void nv_adma_error_handler(struct ata_port *ap)
struct nv_adma_cpb *cpb = &pp->cpb[i];
if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
ap->link.sactive & (1 << i))
- ata_port_printk(ap, KERN_ERR,
+ ata_port_err(ap,
"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
i, cpb->ctl_flags, cpb->resp_flags);
}
@@ -1799,23 +1797,22 @@ static void nv_swncq_ncq_stop(struct ata_port *ap)
u32 sactive;
u32 done_mask;
- ata_port_printk(ap, KERN_ERR,
- "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
- ap->qc_active, ap->link.sactive);
- ata_port_printk(ap, KERN_ERR,
+ ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
+ ap->qc_active, ap->link.sactive);
+ ata_port_err(ap,
"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
- ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
- ap->ops->sff_check_status(ap),
- ioread8(ap->ioaddr.error_addr));
+ ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
+ ap->ops->sff_check_status(ap),
+ ioread8(ap->ioaddr.error_addr));
sactive = readl(pp->sactive_block);
done_mask = pp->qc_active ^ sactive;
- ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
+ ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
for (i = 0; i < ATA_MAX_QUEUE; i++) {
u8 err = 0;
if (pp->qc_active & (1 << i))
@@ -1825,13 +1822,13 @@ static void nv_swncq_ncq_stop(struct ata_port *ap)
else
continue;
- ata_port_printk(ap, KERN_ERR,
- "tag 0x%x: %01x %01x %01x %01x %s\n", i,
- (pp->dhfis_bits >> i) & 0x1,
- (pp->dmafis_bits >> i) & 0x1,
- (pp->sdbfis_bits >> i) & 0x1,
- (sactive >> i) & 0x1,
- (err ? "error! tag doesn't exit" : " "));
+ ata_port_err(ap,
+ "tag 0x%x: %01x %01x %01x %01x %s\n", i,
+ (pp->dhfis_bits >> i) & 0x1,
+ (pp->dmafis_bits >> i) & 0x1,
+ (pp->sdbfis_bits >> i) & 0x1,
+ (sactive >> i) & 0x1,
+ (err ? "error! tag doesn't exit" : " "));
}
nv_swncq_pp_reinit(ap);
@@ -1956,8 +1953,8 @@ static int nv_swncq_slave_config(struct scsi_device *sdev)
if (strncmp(model_num, "Maxtor", 6) == 0) {
ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT);
- ata_dev_printk(dev, KERN_NOTICE,
- "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
+ ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
+ sdev->queue_depth);
}
return rc;
@@ -2356,7 +2353,6 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] = { NULL, NULL };
struct nv_pi_priv *ipriv;
struct ata_host *host;
@@ -2373,8 +2369,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_resource_start(pdev, bar) == 0)
return -ENODEV;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
@@ -2382,10 +2377,10 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* determine type and allocate host */
if (type == CK804 && adma_enabled) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
+ dev_notice(&pdev->dev, "Using ADMA mode\n");
type = ADMA;
} else if (type == MCP5x && swncq_enabled) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
+ dev_notice(&pdev->dev, "Using SWNCQ mode\n");
type = SWNCQ;
}
@@ -2429,7 +2424,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
nv_swncq_host_init(host);
if (msi_enabled) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n");
+ dev_notice(&pdev->dev, "Using MSI\n");
pci_enable_msi(pdev);
}
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index a004b1e0ea6..000fcc99e01 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -1179,7 +1179,6 @@ static void pdc_host_init(struct ata_host *host)
static int pdc_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
const struct ata_port_info *ppi[PDC_MAX_PORTS];
struct ata_host *host;
@@ -1187,8 +1186,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
int n_ports, i, rc;
int is_sataii_tx4;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* enable and acquire resources */
rc = pcim_enable_device(pdev);
@@ -1217,7 +1215,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
+ dev_err(&pdev->dev, "failed to allocate host\n");
return -ENOMEM;
}
host->iomap = pcim_iomap_table(pdev);
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index c5603265fa5..9d1a47bb21b 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -563,21 +563,20 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
+ dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n");
return rc;
}
@@ -588,14 +587,12 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
static int qs_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL };
struct ata_host *host;
int rc, port_no;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index b42edaaf3a5..9dfb40b8c2c 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -438,7 +438,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
u8 status;
if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
- u32 serror;
+ u32 serror = 0xffffffff;
/* SIEN doesn't mask SATA IRQs on some 3112s. Those
* controllers continue to assert IRQ as long as
@@ -643,8 +643,8 @@ static void sil_dev_config(struct ata_device *dev)
((ap->flags & SIL_FLAG_MOD15WRITE) &&
(quirks & SIL_QUIRK_MOD15WRITE))) {
if (print_info)
- ata_dev_printk(dev, KERN_INFO, "applying Seagate "
- "errata fix (mod15write workaround)\n");
+ ata_dev_info(dev,
+ "applying Seagate errata fix (mod15write workaround)\n");
dev->max_sectors = 15;
return;
}
@@ -652,8 +652,8 @@ static void sil_dev_config(struct ata_device *dev)
/* limit to udma5 */
if (quirks & SIL_QUIRK_UDMA5MAX) {
if (print_info)
- ata_dev_printk(dev, KERN_INFO, "applying Maxtor "
- "errata fix %s\n", model_num);
+ ata_dev_info(dev, "applying Maxtor errata fix %s\n",
+ model_num);
dev->udma_mask &= ATA_UDMA5;
return;
}
@@ -676,8 +676,8 @@ static void sil_init_controller(struct ata_host *host)
writew(cls << 8 | cls,
mmio_base + sil_port[i].fifo_cfg);
} else
- dev_printk(KERN_WARNING, &pdev->dev,
- "cache line size not set. Driver may not function\n");
+ dev_warn(&pdev->dev,
+ "cache line size not set. Driver may not function\n");
/* Apply R_ERR on DMA activate FIS errata workaround */
if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
@@ -688,9 +688,8 @@ static void sil_init_controller(struct ata_host *host)
if ((tmp & 0x3) != 0x01)
continue;
if (!cnt)
- dev_printk(KERN_INFO, &pdev->dev,
- "Applying R_ERR on DMA activate "
- "FIS errata fix\n");
+ dev_info(&pdev->dev,
+ "Applying R_ERR on DMA activate FIS errata fix\n");
writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
cnt++;
}
@@ -733,7 +732,6 @@ static bool sil_broken_system_poweroff(struct pci_dev *pdev)
static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
int board_id = ent->driver_data;
struct ata_port_info pi = sil_port_info[board_id];
const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -742,8 +740,7 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int n_ports, rc;
unsigned int i;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
n_ports = 2;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 06c564e5505..55470f337e5 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -694,7 +694,7 @@ static int sil24_softreset(struct ata_link *link, unsigned int *class,
return 0;
err:
- ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
+ ata_link_err(link, "softreset failed (%s)\n", reason);
return -EIO;
}
@@ -714,8 +714,8 @@ static int sil24_hardreset(struct ata_link *link, unsigned int *class,
* This happens often after PM DMA CS errata.
*/
if (pp->do_port_rst) {
- ata_port_printk(ap, KERN_WARNING, "controller in dubious "
- "state, performing PORT_RST\n");
+ ata_port_warn(ap,
+ "controller in dubious state, performing PORT_RST\n");
writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT);
ata_msleep(ap, 10);
@@ -773,7 +773,7 @@ static int sil24_hardreset(struct ata_link *link, unsigned int *class,
goto retry;
}
- ata_link_printk(link, KERN_ERR, "hardreset failed (%s)\n", reason);
+ ata_link_err(link, "hardreset failed (%s)\n", reason);
return -EIO;
}
@@ -925,7 +925,7 @@ static void sil24_pmp_attach(struct ata_port *ap)
if (sata_pmp_gscr_vendor(gscr) == 0x11ab &&
sata_pmp_gscr_devid(gscr) == 0x4140) {
- ata_port_printk(ap, KERN_INFO,
+ ata_port_info(ap,
"disabling NCQ support due to sil24-mv4140 quirk\n");
ap->flags &= ~ATA_FLAG_NCQ;
}
@@ -946,8 +946,7 @@ static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
rc = sil24_init_port(link->ap);
if (rc) {
- ata_link_printk(link, KERN_ERR,
- "hardreset failed (port not ready)\n");
+ ata_link_err(link, "hardreset failed (port not ready)\n");
return rc;
}
@@ -1141,8 +1140,8 @@ static inline void sil24_host_intr(struct ata_port *ap)
/* spurious interrupts are expected if PCIX_IRQ_WOC */
if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
- ata_port_printk(ap, KERN_INFO, "spurious interrupt "
- "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
+ ata_port_info(ap,
+ "spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n",
slot_stat, ap->link.active_tag, ap->link.sactive);
}
@@ -1256,8 +1255,8 @@ static void sil24_init_controller(struct ata_host *host)
PORT_CS_PORT_RST,
PORT_CS_PORT_RST, 10, 100);
if (tmp & PORT_CS_PORT_RST)
- dev_printk(KERN_ERR, host->dev,
- "failed to clear port RST\n");
+ dev_err(host->dev,
+ "failed to clear port RST\n");
}
/* configure port */
@@ -1271,7 +1270,6 @@ static void sil24_init_controller(struct ata_host *host)
static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
extern int __MARKER__sil24_cmd_block_is_sized_wrongly;
- static int printed_version;
struct ata_port_info pi = sil24_port_info[ent->driver_data];
const struct ata_port_info *ppi[] = { &pi, NULL };
void __iomem * const *iomap;
@@ -1283,8 +1281,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (sizeof(union sil24_cmd_block) != PAGE_SIZE)
__MARKER__sil24_cmd_block_is_sized_wrongly = 1;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* acquire resources */
rc = pcim_enable_device(pdev);
@@ -1302,9 +1299,8 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) {
tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL);
if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
- dev_printk(KERN_INFO, &pdev->dev,
- "Applying completion IRQ loss on PCI-X "
- "errata fix\n");
+ dev_info(&pdev->dev,
+ "Applying completion IRQ loss on PCI-X errata fix\n");
else
pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
}
@@ -1322,22 +1318,21 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
return rc;
}
}
@@ -1350,7 +1345,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
sil24_init_controller(host);
if (sata_sil24_msi && !pci_enable_msi(pdev)) {
- dev_printk(KERN_INFO, &pdev->dev, "Using MSI\n");
+ dev_info(&pdev->dev, "Using MSI\n");
pci_intx(pdev, 0);
}
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index cdcc13e9cf5..447d9c05fb5 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -193,7 +193,6 @@ static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
struct ata_port_info pi = sis_port_info;
const struct ata_port_info *ppi[] = { &pi, &pi };
struct ata_host *host;
@@ -202,8 +201,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u8 port2_start = 0x20;
int i, rc;
- if (!printed_version++)
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
@@ -241,12 +239,12 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
if ((pmr & SIS_PMR_COMBINED) == 0) {
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 180/181/964 chipset in SATA mode\n");
+ dev_info(&pdev->dev,
+ "Detected SiS 180/181/964 chipset in SATA mode\n");
port2_start = 64;
} else {
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 180/181 chipset in combined mode\n");
+ dev_info(&pdev->dev,
+ "Detected SiS 180/181 chipset in combined mode\n");
port2_start = 0;
pi.flags |= ATA_FLAG_SLAVE_POSS;
}
@@ -256,24 +254,22 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
case 0x0183:
pci_read_config_dword(pdev, 0x6C, &val);
if (val & (1L << 31)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 182/965 chipset\n");
+ dev_info(&pdev->dev, "Detected SiS 182/965 chipset\n");
pi.flags |= ATA_FLAG_SLAVE_POSS;
} else {
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 182/965L chipset\n");
+ dev_info(&pdev->dev, "Detected SiS 182/965L chipset\n");
}
break;
case 0x1182:
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 1182/966/680 SATA controller\n");
+ dev_info(&pdev->dev,
+ "Detected SiS 1182/966/680 SATA controller\n");
pi.flags |= ATA_FLAG_SLAVE_POSS;
break;
case 0x1183:
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
+ dev_info(&pdev->dev,
+ "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
ppi[0] = &sis_info133_for_sata;
ppi[1] = &sis_info133_for_sata;
break;
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 35eabcf3456..c646118943f 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -414,15 +414,13 @@ static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] =
{ &k2_port_info[ent->driver_data], NULL };
struct ata_host *host;
void __iomem *mmio_base;
int n_ports, i, rc, bar_pos;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
n_ports = 4;
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 8fd3b7252bd..cdaebbe3d18 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1440,15 +1440,13 @@ static void pdc_20621_init(struct ata_host *host)
static int pdc_sata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] =
{ &pdc_port_info[ent->driver_data], NULL };
struct ata_host *host;
struct pdc_host_priv *hpriv;
int i, rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 235be717a71..b54ebfcdda3 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -145,7 +145,6 @@ static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] = { &uli_port_info, NULL };
unsigned int board_idx = (unsigned int) ent->driver_data;
struct ata_host *host;
@@ -154,8 +153,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ata_ioports *ioaddr;
int n_ports, rc;
- if (!printed_version++)
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 54434db15b1..f93e43b0ccd 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -360,9 +360,9 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
online = (sstatus & 0xf) == 0x3;
- ata_port_printk(ap, KERN_INFO,
- "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
- online ? "up" : "down", sstatus, scontrol);
+ ata_port_info(ap,
+ "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
+ online ? "up" : "down", sstatus, scontrol);
/* SStatus is read one more time */
svia_scr_read(link, SCR_STATUS, &sstatus);
@@ -469,7 +469,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
+ dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
return rc;
}
@@ -488,14 +488,14 @@ static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
*r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
if (!host) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
+ dev_err(&pdev->dev, "failed to allocate host\n");
return -ENOMEM;
}
rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap "
- "PCI BARs (errno=%d)\n", rc);
+ dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n",
+ rc);
return rc;
}
host->iomap = pcim_iomap_table(pdev);
@@ -526,7 +526,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
+ dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
return rc;
}
@@ -542,15 +542,14 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
u8 tmp8;
pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
- dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
- (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
+ dev_info(&pdev->dev, "routed to hard irq line %d\n",
+ (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
/* make sure SATA channels are enabled */
pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
- dev_printk(KERN_DEBUG, &pdev->dev,
- "enabling SATA channels (0x%x)\n",
- (int) tmp8);
+ dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n",
+ (int)tmp8);
tmp8 |= ALL_PORTS;
pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
}
@@ -558,9 +557,8 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
/* make sure interrupts for each channel sent to us */
pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
- dev_printk(KERN_DEBUG, &pdev->dev,
- "enabling SATA channel interrupts (0x%x)\n",
- (int) tmp8);
+ dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n",
+ (int) tmp8);
tmp8 |= ALL_PORTS;
pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
}
@@ -568,9 +566,9 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
/* make sure native mode is enabled */
pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
- dev_printk(KERN_DEBUG, &pdev->dev,
- "enabling SATA channel native mode (0x%x)\n",
- (int) tmp8);
+ dev_dbg(&pdev->dev,
+ "enabling SATA channel native mode (0x%x)\n",
+ (int) tmp8);
tmp8 |= NATIVE_MODE_ALL;
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
@@ -606,15 +604,13 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int i;
int rc;
struct ata_host *host = NULL;
int board_id = (int) ent->driver_data;
const unsigned *bar_sizes;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
@@ -628,7 +624,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
if ((pci_resource_start(pdev, i) == 0) ||
(pci_resource_len(pdev, i) < bar_sizes[i])) {
- dev_printk(KERN_ERR, &pdev->dev,
+ dev_err(&pdev->dev,
"invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
i,
(unsigned long long)pci_resource_start(pdev, i),
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 7c987371136..6135a528869 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -273,9 +273,8 @@ static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
if (unlikely(status == 0xffffffff || status == 0)) {
if (status)
- dev_printk(KERN_ERR, host->dev,
- ": IRQ status == 0xffffffff, "
- "PCI fault or device removal?\n");
+ dev_err(host->dev,
+ ": IRQ status == 0xffffffff, PCI fault or device removal?\n");
goto out;
}
@@ -347,14 +346,12 @@ static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
.port_ops = &vsc_sata_ops,
};
const struct ata_port_info *ppi[] = { &pi, NULL };
- static int printed_version;
struct ata_host *host;
void __iomem *mmio_base;
int i, rc;
u8 cls;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index a5fcb1eb862..f8f41e0e8a8 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -38,7 +38,7 @@
#include <linux/ihex.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -813,7 +813,7 @@ static void fill_rx_pool (amb_dev * dev, unsigned char pool,
return;
}
-// top up all RX pools (can also be called as a bottom half)
+// top up all RX pools
static void fill_rx_pools (amb_dev * dev) {
unsigned char pool;
@@ -872,11 +872,7 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id) {
++irq_work;
if (irq_work) {
-#ifdef FILL_RX_POOLS_IN_BH
- schedule_work (&dev->bh);
-#else
fill_rx_pools (dev);
-#endif
PRINTD (DBG_IRQ, "work done: %u", irq_work);
} else {
@@ -2154,11 +2150,6 @@ static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)
dev->tx_avail = ATM_OC3_PCR;
dev->rx_avail = ATM_OC3_PCR;
-#ifdef FILL_RX_POOLS_IN_BH
- // initialise bottom half
- INIT_WORK(&dev->bh, (void (*)(void *)) fill_rx_pools, dev);
-#endif
-
// semaphore for txer/rxer modifications - we cannot use a
// spinlock as the critical region needs to switch processes
mutex_init(&dev->vcc_sf);
diff --git a/drivers/atm/ambassador.h b/drivers/atm/ambassador.h
index bd1c46a7ef4..aa9710556bd 100644
--- a/drivers/atm/ambassador.h
+++ b/drivers/atm/ambassador.h
@@ -630,10 +630,6 @@ struct amb_dev {
u32 iobase;
u32 * membase;
-#ifdef FILL_RX_POOLS_IN_BH
- struct work_struct bh;
-#endif
-
amb_cq cq;
amb_txq txq;
amb_rxq rxq[NUM_RX_POOLS];
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 0b0625054a8..b22d71cac54 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
extern int atm_init_aal5(struct atm_vcc *vcc); /* "raw" AAL5 transport */
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 3230ea0df83..93071417315 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
diff --git a/drivers/atm/eni.h b/drivers/atm/eni.h
index e4c9525e60b..dc9a62cc260 100644
--- a/drivers/atm/eni.h
+++ b/drivers/atm/eni.h
@@ -8,12 +8,13 @@
#include <linux/atm.h>
#include <linux/atmdev.h>
+#include <linux/interrupt.h>
#include <linux/sonet.h>
#include <linux/skbuff.h>
#include <linux/time.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "midway.h"
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index ef7a658312a..5072f8ac16f 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -44,6 +44,7 @@
#include <linux/ioport.h> /* for request_region */
#include <linux/uio.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/bitops.h>
#include <linux/slab.h>
@@ -51,7 +52,7 @@
#include <asm/system.h>
#include <asm/string.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <linux/wait.h>
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index bc9e702186d..361f5aee3be 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -44,7 +44,7 @@
#include <asm/dma.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_SBUS
#include <linux/of.h>
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index d58e3fcb9db..b81210330ac 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -38,13 +38,14 @@
#include <linux/delay.h>
#include <linux/uio.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1f8d724a18b..db06f34419c 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -37,6 +37,7 @@
#include <linux/atm.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/wait.h>
#include <linux/jiffies.h>
@@ -45,7 +46,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#ifdef CONFIG_ATM_IDT77252_USE_SUNI
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index dee4f01a64d..cb90f7a3e07 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -53,11 +53,12 @@
#include <linux/delay.h>
#include <linux/uio.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 4e8ba56f75d..e828c548749 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1947,7 +1947,6 @@ static int __devinit lanai_pci_start(struct lanai_dev *lanai)
{
struct pci_dev *pci = lanai->pci;
int result;
- u16 w;
if (pci_enable_device(pci) != 0) {
printk(KERN_ERR DEV_LABEL "(itf %d): can't enable "
@@ -1965,13 +1964,7 @@ static int __devinit lanai_pci_start(struct lanai_dev *lanai)
"(itf %d): No suitable DMA available.\n", lanai->number);
return -EBUSY;
}
- result = pci_read_config_word(pci, PCI_SUBSYSTEM_ID, &w);
- if (result != PCIBIOS_SUCCESSFUL) {
- printk(KERN_ERR DEV_LABEL "(itf %d): can't read "
- "PCI_SUBSYSTEM_ID: %d\n", lanai->number, result);
- return -EINVAL;
- }
- result = check_board_id_and_rev("PCI", w, NULL);
+ result = check_board_id_and_rev("PCI", pci->subsystem_device, NULL);
if (result != 0)
return result;
/* Set latency timer to zero as per lanai docs */
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 6b313ee9231..1c70c45fa04 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -51,7 +51,7 @@
#include <linux/idr.h>
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "nicstar.h"
#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
#include "suni.h"
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index 41c56eae4c8..90f1ccca9e5 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -25,7 +25,7 @@
#include <asm/system.h>
#include <asm/param.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "suni.h"
diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
index c45ae0573bb..5120a96b3a8 100644
--- a/drivers/atm/uPD98402.c
+++ b/drivers/atm/uPD98402.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "uPD98402.h"
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 624917902b6..d889f56e8d8 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/uio.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/atm_zatm.h>
#include <linux/capability.h>
@@ -26,7 +27,7 @@
#include <asm/system.h>
#include <asm/string.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "uPD98401.h"
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index d57e8d0fb82..21cf46f4524 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -16,6 +16,11 @@ config UEVENT_HELPER_PATH
that it creates a high system load, or on smaller systems
it is known to create out-of-memory situations during bootup.
+ To disable user space helper program execution at early boot
+ time specify an empty string here. This setting can be altered
+ via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper
+ later at runtime.
+
config DEVTMPFS
bool "Maintain a devtmpfs filesystem to mount at /dev"
depends on HOTPLUG
@@ -65,17 +70,17 @@ config PREVENT_FIRMWARE_BUILD
default y
help
Say yes to avoid building firmware. Firmware is usually shipped
- with the driver, and only when updating the firmware a rebuild
- should be made.
- If unsure say Y here.
+ with the driver and only when updating the firmware should a
+ rebuild be made.
+ If unsure, say Y here.
config FW_LOADER
tristate "Userspace firmware loading support" if EXPERT
default y
---help---
- This option is provided for the case where no in-kernel-tree modules
- require userspace firmware loading support, but a module built outside
- the kernel tree does.
+ This option is provided for the case where none of the in-tree modules
+ require userspace firmware loading support, but a module built
+ out-of-tree does.
config FIRMWARE_IN_KERNEL
bool "Include in-kernel firmware blobs in kernel binary"
@@ -83,22 +88,22 @@ config FIRMWARE_IN_KERNEL
default y
help
The kernel source tree includes a number of firmware 'blobs'
- which are used by various drivers. The recommended way to
- use these is to run "make firmware_install" and to copy the
- resulting binary files created in usr/lib/firmware directory
- of the kernel tree to the /lib/firmware on your system so
+ that are used by various drivers. The recommended way to
+ use these is to run "make firmware_install", which, after
+ converting ihex files to binary, copies all of the needed
+ binary files in firmware/ to /lib/firmware/ on your system so
that they can be loaded by userspace helpers on request.
Enabling this option will build each required firmware blob
into the kernel directly, where request_firmware() will find
them without having to call out to userspace. This may be
- useful if your root file system requires a device which uses
- such firmware, and do not wish to use an initrd.
+ useful if your root file system requires a device that uses
+ such firmware and do not wish to use an initrd.
This single option controls the inclusion of firmware for
- every driver which uses request_firmware() and ships its
- firmware in the kernel source tree, to avoid a proliferation
- of 'Include firmware for xxx device' options.
+ every driver that uses request_firmware() and ships its
+ firmware in the kernel source tree, which avoids a
+ proliferation of 'Include firmware for xxx device' options.
Say 'N' and let firmware be loaded from userspace.
@@ -106,27 +111,27 @@ config EXTRA_FIRMWARE
string "External firmware blobs to build into the kernel binary"
depends on FW_LOADER
help
- This option allows firmware to be built into the kernel, for the
- cases where the user either cannot or doesn't want to provide it from
+ This option allows firmware to be built into the kernel for the case
+ where the user either cannot or doesn't want to provide it from
userspace at runtime (for example, when the firmware in question is
required for accessing the boot device, and the user doesn't want to
use an initrd).
- This option is a string, and takes the (space-separated) names of the
- firmware files -- the same names which appear in MODULE_FIRMWARE()
+ This option is a string and takes the (space-separated) names of the
+ firmware files -- the same names that appear in MODULE_FIRMWARE()
and request_firmware() in the source. These files should exist under
the directory specified by the EXTRA_FIRMWARE_DIR option, which is
- by default the firmware/ subdirectory of the kernel source tree.
+ by default the firmware subdirectory of the kernel source tree.
- So, for example, you might set CONFIG_EXTRA_FIRMWARE="usb8388.bin",
- copy the usb8388.bin file into the firmware/ directory, and build the
- kernel. Then any request_firmware("usb8388.bin") will be
- satisfied internally without needing to call out to userspace.
+ For example, you might set CONFIG_EXTRA_FIRMWARE="usb8388.bin", copy
+ the usb8388.bin file into the firmware directory, and build the kernel.
+ Then any request_firmware("usb8388.bin") will be satisfied internally
+ without needing to call out to userspace.
WARNING: If you include additional firmware files into your binary
- kernel image which are not available under the terms of the GPL,
+ kernel image that are not available under the terms of the GPL,
then it may be a violation of the GPL to distribute the resulting
- image -- since it combines both GPL and non-GPL work. You should
+ image since it combines both GPL and non-GPL work. You should
consult a lawyer of your own before distributing such an image.
config EXTRA_FIRMWARE_DIR
@@ -136,10 +141,9 @@ config EXTRA_FIRMWARE_DIR
help
This option controls the directory in which the kernel build system
looks for the firmware files listed in the EXTRA_FIRMWARE option.
- The default is the firmware/ directory in the kernel source tree,
- but by changing this option you can point it elsewhere, such as
- the /lib/firmware/ directory or another separate directory
- containing firmware files.
+ The default is firmware/ in the kernel source tree, but by changing
+ this option you can point it elsewhere, such as /lib/firmware/ or
+ some other directory containing the firmware files.
config DEBUG_DRIVER
bool "Driver Core verbose debug messages"
@@ -168,4 +172,6 @@ config SYS_HYPERVISOR
bool
default n
+source "drivers/base/regmap/Kconfig"
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 4c5701c15f5..99a375ad2cc 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -13,11 +13,11 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
obj-$(CONFIG_SMP) += topology.o
-obj-$(CONFIG_IOMMU_API) += iommu.o
ifeq ($(CONFIG_SYSFS),y)
obj-$(CONFIG_MODULES) += module.o
endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
+obj-$(CONFIG_REGMAP) += regmap/
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 82bbb5967aa..33e1bed68fd 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -21,12 +21,11 @@
#include <linux/fs.h>
#include <linux/shmem_fs.h>
#include <linux/ramfs.h>
-#include <linux/cred.h>
#include <linux/sched.h>
-#include <linux/init_task.h>
#include <linux/slab.h>
+#include <linux/kthread.h>
-static struct vfsmount *dev_mnt;
+static struct task_struct *thread;
#if defined CONFIG_DEVTMPFS_MOUNT
static int mount_dev = 1;
@@ -34,7 +33,16 @@ static int mount_dev = 1;
static int mount_dev;
#endif
-static DEFINE_MUTEX(dirlock);
+static DEFINE_SPINLOCK(req_lock);
+
+static struct req {
+ struct req *next;
+ struct completion done;
+ int err;
+ const char *name;
+ mode_t mode; /* 0 => delete */
+ struct device *dev;
+} *requests;
static int __init mount_param(char *str)
{
@@ -68,131 +76,152 @@ static inline int is_blockdev(struct device *dev)
static inline int is_blockdev(struct device *dev) { return 0; }
#endif
-static int dev_mkdir(const char *name, mode_t mode)
+int devtmpfs_create_node(struct device *dev)
{
- struct nameidata nd;
- struct dentry *dentry;
- int err;
+ const char *tmp = NULL;
+ struct req req;
- err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
- name, LOOKUP_PARENT, &nd);
- if (err)
- return err;
+ if (!thread)
+ return 0;
- dentry = lookup_create(&nd, 1);
- if (!IS_ERR(dentry)) {
- err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
- if (!err)
- /* mark as kernel-created inode */
- dentry->d_inode->i_private = &dev_mnt;
- dput(dentry);
- } else {
- err = PTR_ERR(dentry);
- }
+ req.mode = 0;
+ req.name = device_get_devnode(dev, &req.mode, &tmp);
+ if (!req.name)
+ return -ENOMEM;
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
- path_put(&nd.path);
- return err;
+ if (req.mode == 0)
+ req.mode = 0600;
+ if (is_blockdev(dev))
+ req.mode |= S_IFBLK;
+ else
+ req.mode |= S_IFCHR;
+
+ req.dev = dev;
+
+ init_completion(&req.done);
+
+ spin_lock(&req_lock);
+ req.next = requests;
+ requests = &req;
+ spin_unlock(&req_lock);
+
+ wake_up_process(thread);
+ wait_for_completion(&req.done);
+
+ kfree(tmp);
+
+ return req.err;
}
-static int create_path(const char *nodepath)
+int devtmpfs_delete_node(struct device *dev)
{
- int err;
+ const char *tmp = NULL;
+ struct req req;
- mutex_lock(&dirlock);
- err = dev_mkdir(nodepath, 0755);
- if (err == -ENOENT) {
- char *path;
- char *s;
-
- /* parent directories do not exist, create them */
- path = kstrdup(nodepath, GFP_KERNEL);
- if (!path) {
- err = -ENOMEM;
- goto out;
- }
- s = path;
- for (;;) {
- s = strchr(s, '/');
- if (!s)
- break;
- s[0] = '\0';
- err = dev_mkdir(path, 0755);
- if (err && err != -EEXIST)
- break;
- s[0] = '/';
- s++;
- }
- kfree(path);
- }
-out:
- mutex_unlock(&dirlock);
- return err;
+ if (!thread)
+ return 0;
+
+ req.name = device_get_devnode(dev, NULL, &tmp);
+ if (!req.name)
+ return -ENOMEM;
+
+ req.mode = 0;
+ req.dev = dev;
+
+ init_completion(&req.done);
+
+ spin_lock(&req_lock);
+ req.next = requests;
+ requests = &req;
+ spin_unlock(&req_lock);
+
+ wake_up_process(thread);
+ wait_for_completion(&req.done);
+
+ kfree(tmp);
+ return req.err;
}
-int devtmpfs_create_node(struct device *dev)
+static int dev_mkdir(const char *name, mode_t mode)
{
- const char *tmp = NULL;
- const char *nodename;
- const struct cred *curr_cred;
- mode_t mode = 0;
- struct nameidata nd;
struct dentry *dentry;
+ struct path path;
int err;
- if (!dev_mnt)
- return 0;
+ dentry = kern_path_create(AT_FDCWD, name, &path, 1);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ err = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+ if (!err)
+ /* mark as kernel-created inode */
+ dentry->d_inode->i_private = &thread;
+ dput(dentry);
+ mutex_unlock(&path.dentry->d_inode->i_mutex);
+ path_put(&path);
+ return err;
+}
- nodename = device_get_devnode(dev, &mode, &tmp);
- if (!nodename)
+static int create_path(const char *nodepath)
+{
+ char *path;
+ char *s;
+ int err = 0;
+
+ /* parent directories do not exist, create them */
+ path = kstrdup(nodepath, GFP_KERNEL);
+ if (!path)
return -ENOMEM;
- if (mode == 0)
- mode = 0600;
- if (is_blockdev(dev))
- mode |= S_IFBLK;
- else
- mode |= S_IFCHR;
+ s = path;
+ for (;;) {
+ s = strchr(s, '/');
+ if (!s)
+ break;
+ s[0] = '\0';
+ err = dev_mkdir(path, 0755);
+ if (err && err != -EEXIST)
+ break;
+ s[0] = '/';
+ s++;
+ }
+ kfree(path);
+ return err;
+}
- curr_cred = override_creds(&init_cred);
+static int handle_create(const char *nodename, mode_t mode, struct device *dev)
+{
+ struct dentry *dentry;
+ struct path path;
+ int err;
- err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
- nodename, LOOKUP_PARENT, &nd);
- if (err == -ENOENT) {
+ dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ if (dentry == ERR_PTR(-ENOENT)) {
create_path(nodename);
- err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
- nodename, LOOKUP_PARENT, &nd);
+ dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
}
- if (err)
- goto out;
-
- dentry = lookup_create(&nd, 0);
- if (!IS_ERR(dentry)) {
- err = vfs_mknod(nd.path.dentry->d_inode,
- dentry, mode, dev->devt);
- if (!err) {
- struct iattr newattrs;
-
- /* fixup possibly umasked mode */
- newattrs.ia_mode = mode;
- newattrs.ia_valid = ATTR_MODE;
- mutex_lock(&dentry->d_inode->i_mutex);
- notify_change(dentry, &newattrs);
- mutex_unlock(&dentry->d_inode->i_mutex);
-
- /* mark as kernel-created inode */
- dentry->d_inode->i_private = &dev_mnt;
- }
- dput(dentry);
- } else {
- err = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ err = vfs_mknod(path.dentry->d_inode,
+ dentry, mode, dev->devt);
+ if (!err) {
+ struct iattr newattrs;
+
+ /* fixup possibly umasked mode */
+ newattrs.ia_mode = mode;
+ newattrs.ia_valid = ATTR_MODE;
+ mutex_lock(&dentry->d_inode->i_mutex);
+ notify_change(dentry, &newattrs);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+
+ /* mark as kernel-created inode */
+ dentry->d_inode->i_private = &thread;
}
+ dput(dentry);
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
- path_put(&nd.path);
-out:
- kfree(tmp);
- revert_creds(curr_cred);
+ mutex_unlock(&path.dentry->d_inode->i_mutex);
+ path_put(&path);
return err;
}
@@ -202,8 +231,7 @@ static int dev_rmdir(const char *name)
struct dentry *dentry;
int err;
- err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
- name, LOOKUP_PARENT, &nd);
+ err = kern_path_parent(name, &nd);
if (err)
return err;
@@ -211,7 +239,7 @@ static int dev_rmdir(const char *name)
dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
if (!IS_ERR(dentry)) {
if (dentry->d_inode) {
- if (dentry->d_inode->i_private == &dev_mnt)
+ if (dentry->d_inode->i_private == &thread)
err = vfs_rmdir(nd.path.dentry->d_inode,
dentry);
else
@@ -238,7 +266,6 @@ static int delete_path(const char *nodepath)
if (!path)
return -ENOMEM;
- mutex_lock(&dirlock);
for (;;) {
char *base;
@@ -250,7 +277,6 @@ static int delete_path(const char *nodepath)
if (err)
break;
}
- mutex_unlock(&dirlock);
kfree(path);
return err;
@@ -259,7 +285,7 @@ static int delete_path(const char *nodepath)
static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
{
/* did we create it */
- if (inode->i_private != &dev_mnt)
+ if (inode->i_private != &thread)
return 0;
/* does the dev_t match */
@@ -277,29 +303,17 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta
return 1;
}
-int devtmpfs_delete_node(struct device *dev)
+static int handle_remove(const char *nodename, struct device *dev)
{
- const char *tmp = NULL;
- const char *nodename;
- const struct cred *curr_cred;
struct nameidata nd;
struct dentry *dentry;
struct kstat stat;
int deleted = 1;
int err;
- if (!dev_mnt)
- return 0;
-
- nodename = device_get_devnode(dev, NULL, &tmp);
- if (!nodename)
- return -ENOMEM;
-
- curr_cred = override_creds(&init_cred);
- err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
- nodename, LOOKUP_PARENT, &nd);
+ err = kern_path_parent(nodename, &nd);
if (err)
- goto out;
+ return err;
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
@@ -337,9 +351,6 @@ int devtmpfs_delete_node(struct device *dev)
path_put(&nd.path);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
-out:
- kfree(tmp);
- revert_creds(curr_cred);
return err;
}
@@ -354,7 +365,7 @@ int devtmpfs_mount(const char *mntdir)
if (!mount_dev)
return 0;
- if (!dev_mnt)
+ if (!thread)
return 0;
err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
@@ -365,31 +376,80 @@ int devtmpfs_mount(const char *mntdir)
return err;
}
+static __initdata DECLARE_COMPLETION(setup_done);
+
+static int handle(const char *name, mode_t mode, struct device *dev)
+{
+ if (mode)
+ return handle_create(name, mode, dev);
+ else
+ return handle_remove(name, dev);
+}
+
+static int devtmpfsd(void *p)
+{
+ char options[] = "mode=0755";
+ int *err = p;
+ *err = sys_unshare(CLONE_NEWNS);
+ if (*err)
+ goto out;
+ *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
+ if (*err)
+ goto out;
+ sys_chdir("/.."); /* will traverse into overmounted root */
+ sys_chroot(".");
+ complete(&setup_done);
+ while (1) {
+ spin_lock(&req_lock);
+ while (requests) {
+ struct req *req = requests;
+ requests = NULL;
+ spin_unlock(&req_lock);
+ while (req) {
+ struct req *next = req->next;
+ req->err = handle(req->name, req->mode, req->dev);
+ complete(&req->done);
+ req = next;
+ }
+ spin_lock(&req_lock);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&req_lock);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ }
+ return 0;
+out:
+ complete(&setup_done);
+ return *err;
+}
+
/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
int __init devtmpfs_init(void)
{
- int err;
- struct vfsmount *mnt;
- char options[] = "mode=0755";
-
- err = register_filesystem(&dev_fs_type);
+ int err = register_filesystem(&dev_fs_type);
if (err) {
printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
"type %i\n", err);
return err;
}
- mnt = kern_mount_data(&dev_fs_type, options);
- if (IS_ERR(mnt)) {
- err = PTR_ERR(mnt);
+ thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
+ if (!IS_ERR(thread)) {
+ wait_for_completion(&setup_done);
+ } else {
+ err = PTR_ERR(thread);
+ thread = NULL;
+ }
+
+ if (err) {
printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
unregister_filesystem(&dev_fs_type);
return err;
}
- dev_mnt = mnt;
printk(KERN_INFO "devtmpfs: initialized\n");
return 0;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9f9b2359f71..2840ed4668c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -24,13 +24,12 @@
#include <linux/stat.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
static DEFINE_MUTEX(mem_sysfs_mutex);
#define MEMORY_CLASS_NAME "memory"
-#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
static int sections_per_block;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1c291af637b..0cad9c7f6bb 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -32,6 +32,25 @@ struct device platform_bus = {
EXPORT_SYMBOL_GPL(platform_bus);
/**
+ * arch_setup_pdev_archdata - Allow manipulation of archdata before its used
+ * @dev: platform device
+ *
+ * This is called before platform_device_add() such that any pdev_archdata may
+ * be setup before the platform_notifier is called. So if a user needs to
+ * manipulate any relevant information in the pdev_archdata they can do:
+ *
+ * platform_devic_alloc()
+ * ... manipulate ...
+ * platform_device_add()
+ *
+ * And if they don't care they can just call platform_device_register() and
+ * everything will just work out.
+ */
+void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
+{
+}
+
+/**
* platform_get_resource - get a resource for a device
* @dev: platform device
* @type: resource type
@@ -173,6 +192,7 @@ struct platform_device *platform_device_alloc(const char *name, int id)
pa->pdev.id = id;
device_initialize(&pa->pdev.dev);
pa->pdev.dev.release = platform_device_release;
+ arch_setup_pdev_archdata(&pa->pdev);
}
return pa ? &pa->pdev : NULL;
@@ -334,6 +354,7 @@ EXPORT_SYMBOL_GPL(platform_device_del);
int platform_device_register(struct platform_device *pdev)
{
device_initialize(&pdev->dev);
+ arch_setup_pdev_archdata(pdev);
return platform_device_add(pdev);
}
EXPORT_SYMBOL_GPL(platform_device_register);
@@ -367,7 +388,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
*
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
-struct platform_device *__init_or_module platform_device_register_resndata(
+struct platform_device *platform_device_register_resndata(
struct device *parent,
const char *name, int id,
const struct resource *res, unsigned int num,
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 3647e114d0e..2639ae79a37 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index eaa8a854af0..a846b2f95cf 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -15,9 +15,9 @@
#include <linux/slab.h>
#include <linux/err.h>
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
-struct pm_runtime_clk_data {
+struct pm_clk_data {
struct list_head clock_list;
struct mutex lock;
};
@@ -36,25 +36,25 @@ struct pm_clock_entry {
enum pce_status status;
};
-static struct pm_runtime_clk_data *__to_prd(struct device *dev)
+static struct pm_clk_data *__to_pcd(struct device *dev)
{
return dev ? dev->power.subsys_data : NULL;
}
/**
- * pm_runtime_clk_add - Start using a device clock for runtime PM.
- * @dev: Device whose clock is going to be used for runtime PM.
+ * pm_clk_add - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
* @con_id: Connection ID of the clock.
*
* Add the clock represented by @con_id to the list of clocks used for
- * the runtime PM of @dev.
+ * the power management of @dev.
*/
-int pm_runtime_clk_add(struct device *dev, const char *con_id)
+int pm_clk_add(struct device *dev, const char *con_id)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
- if (!prd)
+ if (!pcd)
return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
@@ -73,20 +73,20 @@ int pm_runtime_clk_add(struct device *dev, const char *con_id)
}
}
- mutex_lock(&prd->lock);
- list_add_tail(&ce->node, &prd->clock_list);
- mutex_unlock(&prd->lock);
+ mutex_lock(&pcd->lock);
+ list_add_tail(&ce->node, &pcd->clock_list);
+ mutex_unlock(&pcd->lock);
return 0;
}
/**
- * __pm_runtime_clk_remove - Destroy runtime PM clock entry.
- * @ce: Runtime PM clock entry to destroy.
+ * __pm_clk_remove - Destroy PM clock entry.
+ * @ce: PM clock entry to destroy.
*
- * This routine must be called under the mutex protecting the runtime PM list
- * of clocks corresponding the the @ce's device.
+ * This routine must be called under the mutex protecting the PM list of clocks
+ * corresponding the the @ce's device.
*/
-static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
+static void __pm_clk_remove(struct pm_clock_entry *ce)
{
if (!ce)
return;
@@ -108,95 +108,99 @@ static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
}
/**
- * pm_runtime_clk_remove - Stop using a device clock for runtime PM.
- * @dev: Device whose clock should not be used for runtime PM any more.
+ * pm_clk_remove - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
* @con_id: Connection ID of the clock.
*
* Remove the clock represented by @con_id from the list of clocks used for
- * the runtime PM of @dev.
+ * the power management of @dev.
*/
-void pm_runtime_clk_remove(struct device *dev, const char *con_id)
+void pm_clk_remove(struct device *dev, const char *con_id)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
- if (!prd)
+ if (!pcd)
return;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry(ce, &prd->clock_list, node) {
+ list_for_each_entry(ce, &pcd->clock_list, node) {
if (!con_id && !ce->con_id) {
- __pm_runtime_clk_remove(ce);
+ __pm_clk_remove(ce);
break;
} else if (!con_id || !ce->con_id) {
continue;
} else if (!strcmp(con_id, ce->con_id)) {
- __pm_runtime_clk_remove(ce);
+ __pm_clk_remove(ce);
break;
}
}
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
}
/**
- * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
- * @dev: Device to initialize the list of runtime PM clocks for.
+ * pm_clk_init - Initialize a device's list of power management clocks.
+ * @dev: Device to initialize the list of PM clocks for.
*
- * Allocate a struct pm_runtime_clk_data object, initialize its lock member and
+ * Allocate a struct pm_clk_data object, initialize its lock member and
* make the @dev's power.subsys_data field point to it.
*/
-int pm_runtime_clk_init(struct device *dev)
+int pm_clk_init(struct device *dev)
{
- struct pm_runtime_clk_data *prd;
+ struct pm_clk_data *pcd;
- prd = kzalloc(sizeof(*prd), GFP_KERNEL);
- if (!prd) {
- dev_err(dev, "Not enough memory fo runtime PM data.\n");
+ pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
+ if (!pcd) {
+ dev_err(dev, "Not enough memory for PM clock data.\n");
return -ENOMEM;
}
- INIT_LIST_HEAD(&prd->clock_list);
- mutex_init(&prd->lock);
- dev->power.subsys_data = prd;
+ INIT_LIST_HEAD(&pcd->clock_list);
+ mutex_init(&pcd->lock);
+ dev->power.subsys_data = pcd;
return 0;
}
/**
- * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
- * @dev: Device to destroy the list of runtime PM clocks for.
+ * pm_clk_destroy - Destroy a device's list of power management clocks.
+ * @dev: Device to destroy the list of PM clocks for.
*
* Clear the @dev's power.subsys_data field, remove the list of clock entries
- * from the struct pm_runtime_clk_data object pointed to by it before and free
+ * from the struct pm_clk_data object pointed to by it before and free
* that object.
*/
-void pm_runtime_clk_destroy(struct device *dev)
+void pm_clk_destroy(struct device *dev)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce, *c;
- if (!prd)
+ if (!pcd)
return;
dev->power.subsys_data = NULL;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
- __pm_runtime_clk_remove(ce);
+ list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
+ __pm_clk_remove(ce);
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
- kfree(prd);
+ kfree(pcd);
}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_RUNTIME
+
/**
- * pm_runtime_clk_acquire - Acquire a device clock.
+ * pm_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
* @con_id: Connection ID of the clock.
*/
-static void pm_runtime_clk_acquire(struct device *dev,
+static void pm_clk_acquire(struct device *dev,
struct pm_clock_entry *ce)
{
ce->clk = clk_get(dev, ce->con_id);
@@ -209,24 +213,24 @@ static void pm_runtime_clk_acquire(struct device *dev,
}
/**
- * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
+ * pm_clk_suspend - Disable clocks in a device's PM clock list.
* @dev: Device to disable the clocks for.
*/
-int pm_runtime_clk_suspend(struct device *dev)
+int pm_clk_suspend(struct device *dev)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
- if (!prd)
+ if (!pcd)
return 0;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry_reverse(ce, &prd->clock_list, node) {
+ list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
- pm_runtime_clk_acquire(dev, ce);
+ pm_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_disable(ce->clk);
@@ -234,30 +238,30 @@ int pm_runtime_clk_suspend(struct device *dev)
}
}
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
return 0;
}
/**
- * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
+ * pm_clk_resume - Enable clocks in a device's PM clock list.
* @dev: Device to enable the clocks for.
*/
-int pm_runtime_clk_resume(struct device *dev)
+int pm_clk_resume(struct device *dev)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
- if (!prd)
+ if (!pcd)
return 0;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry(ce, &prd->clock_list, node) {
+ list_for_each_entry(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
- pm_runtime_clk_acquire(dev, ce);
+ pm_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_enable(ce->clk);
@@ -265,28 +269,28 @@ int pm_runtime_clk_resume(struct device *dev)
}
}
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
return 0;
}
/**
- * pm_runtime_clk_notify - Notify routine for device addition and removal.
+ * pm_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
*
* For this function to work, @nb must be a member of an object of type
* struct pm_clk_notifier_block containing all of the requisite data.
- * Specifically, the pwr_domain member of that object is copied to the device's
- * pwr_domain field and its con_ids member is used to populate the device's list
- * of runtime PM clocks, depending on @action.
+ * Specifically, the pm_domain member of that object is copied to the device's
+ * pm_domain field and its con_ids member is used to populate the device's list
+ * of PM clocks, depending on @action.
*
- * If the device's pwr_domain field is already populated with a value different
+ * If the device's pm_domain field is already populated with a value different
* from the one stored in the struct pm_clk_notifier_block object, the function
* does nothing.
*/
-static int pm_runtime_clk_notify(struct notifier_block *nb,
+static int pm_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
@@ -300,28 +304,28 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
- if (dev->pwr_domain)
+ if (dev->pm_domain)
break;
- error = pm_runtime_clk_init(dev);
+ error = pm_clk_init(dev);
if (error)
break;
- dev->pwr_domain = clknb->pwr_domain;
+ dev->pm_domain = clknb->pm_domain;
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
- pm_runtime_clk_add(dev, *con_id);
+ pm_clk_add(dev, *con_id);
} else {
- pm_runtime_clk_add(dev, NULL);
+ pm_clk_add(dev, NULL);
}
break;
case BUS_NOTIFY_DEL_DEVICE:
- if (dev->pwr_domain != clknb->pwr_domain)
+ if (dev->pm_domain != clknb->pm_domain)
break;
- dev->pwr_domain = NULL;
- pm_runtime_clk_destroy(dev);
+ dev->pm_domain = NULL;
+ pm_clk_destroy(dev);
break;
}
@@ -330,6 +334,60 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
#else /* !CONFIG_PM_RUNTIME */
+#ifdef CONFIG_PM
+
+/**
+ * pm_clk_suspend - Disable clocks in a device's PM clock list.
+ * @dev: Device to disable the clocks for.
+ */
+int pm_clk_suspend(struct device *dev)
+{
+ struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_clock_entry *ce;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ /* If there is no driver, the clocks are already disabled. */
+ if (!pcd || !dev->driver)
+ return 0;
+
+ mutex_lock(&pcd->lock);
+
+ list_for_each_entry_reverse(ce, &pcd->clock_list, node)
+ clk_disable(ce->clk);
+
+ mutex_unlock(&pcd->lock);
+
+ return 0;
+}
+
+/**
+ * pm_clk_resume - Enable clocks in a device's PM clock list.
+ * @dev: Device to enable the clocks for.
+ */
+int pm_clk_resume(struct device *dev)
+{
+ struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_clock_entry *ce;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ /* If there is no driver, the clocks should remain disabled. */
+ if (!pcd || !dev->driver)
+ return 0;
+
+ mutex_lock(&pcd->lock);
+
+ list_for_each_entry(ce, &pcd->clock_list, node)
+ clk_enable(ce->clk);
+
+ mutex_unlock(&pcd->lock);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
/**
* enable_clock - Enable a device clock.
* @dev: Device whose clock is to be enabled.
@@ -365,7 +423,7 @@ static void disable_clock(struct device *dev, const char *con_id)
}
/**
- * pm_runtime_clk_notify - Notify routine for device addition and removal.
+ * pm_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
@@ -375,7 +433,7 @@ static void disable_clock(struct device *dev, const char *con_id)
* Specifically, the con_ids member of that object is used to enable or disable
* the device's clocks, depending on @action.
*/
-static int pm_runtime_clk_notify(struct notifier_block *nb,
+static int pm_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
@@ -387,7 +445,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
clknb = container_of(nb, struct pm_clk_notifier_block, nb);
switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
+ case BUS_NOTIFY_BIND_DRIVER:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
enable_clock(dev, *con_id);
@@ -395,7 +453,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
enable_clock(dev, NULL);
}
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_UNBOUND_DRIVER:
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
disable_clock(dev, *con_id);
@@ -411,21 +469,21 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
#endif /* !CONFIG_PM_RUNTIME */
/**
- * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
+ * pm_clk_add_notifier - Add bus type notifier for power management clocks.
* @bus: Bus type to add the notifier to.
* @clknb: Notifier to be added to the given bus type.
*
* The nb member of @clknb is not expected to be initialized and its
- * notifier_call member will be replaced with pm_runtime_clk_notify(). However,
+ * notifier_call member will be replaced with pm_clk_notify(). However,
* the remaining members of @clknb should be populated prior to calling this
* routine.
*/
-void pm_runtime_clk_add_notifier(struct bus_type *bus,
+void pm_clk_add_notifier(struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
if (!bus || !clknb)
return;
- clknb->nb.notifier_call = pm_runtime_clk_notify;
+ clknb->nb.notifier_call = pm_clk_notify;
bus_register_notifier(bus, &clknb->nb);
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
new file mode 100644
index 00000000000..1c374579407
--- /dev/null
+++ b/drivers/base/power/domain.c
@@ -0,0 +1,1272 @@
+/*
+ * drivers/base/power/domain.c - Common code related to device power domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+
+static LIST_HEAD(gpd_list);
+static DEFINE_MUTEX(gpd_list_lock);
+
+#ifdef CONFIG_PM
+
+static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev->pm_domain))
+ return ERR_PTR(-EINVAL);
+
+ return pd_to_genpd(dev->pm_domain);
+}
+
+static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+{
+ if (!WARN_ON(genpd->sd_count == 0))
+ genpd->sd_count--;
+}
+
+static void genpd_acquire_lock(struct generic_pm_domain *genpd)
+{
+ DEFINE_WAIT(wait);
+
+ mutex_lock(&genpd->lock);
+ /*
+ * Wait for the domain to transition into either the active,
+ * or the power off state.
+ */
+ for (;;) {
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (genpd->status == GPD_STATE_ACTIVE
+ || genpd->status == GPD_STATE_POWER_OFF)
+ break;
+ mutex_unlock(&genpd->lock);
+
+ schedule();
+
+ mutex_lock(&genpd->lock);
+ }
+ finish_wait(&genpd->status_wait_queue, &wait);
+}
+
+static void genpd_release_lock(struct generic_pm_domain *genpd)
+{
+ mutex_unlock(&genpd->lock);
+}
+
+static void genpd_set_active(struct generic_pm_domain *genpd)
+{
+ if (genpd->resume_count == 0)
+ genpd->status = GPD_STATE_ACTIVE;
+}
+
+/**
+ * pm_genpd_poweron - Restore power to a given PM domain and its parents.
+ * @genpd: PM domain to power up.
+ *
+ * Restore power to @genpd and all of its parents so that it is possible to
+ * resume a device belonging to it.
+ */
+int pm_genpd_poweron(struct generic_pm_domain *genpd)
+{
+ struct generic_pm_domain *parent = genpd->parent;
+ int ret = 0;
+
+ start:
+ if (parent) {
+ genpd_acquire_lock(parent);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ mutex_lock(&genpd->lock);
+ }
+
+ if (genpd->status == GPD_STATE_ACTIVE
+ || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+ goto out;
+
+ if (genpd->status != GPD_STATE_POWER_OFF) {
+ genpd_set_active(genpd);
+ goto out;
+ }
+
+ if (parent && parent->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&genpd->lock);
+ genpd_release_lock(parent);
+
+ ret = pm_genpd_poweron(parent);
+ if (ret)
+ return ret;
+
+ goto start;
+ }
+
+ if (genpd->power_on) {
+ ret = genpd->power_on(genpd);
+ if (ret)
+ goto out;
+ }
+
+ genpd_set_active(genpd);
+ if (parent)
+ parent->sd_count++;
+
+ out:
+ mutex_unlock(&genpd->lock);
+ if (parent)
+ genpd_release_lock(parent);
+
+ return ret;
+}
+
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_RUNTIME
+
+/**
+ * __pm_genpd_save_device - Save the pre-suspend state of a device.
+ * @dle: Device list entry of the device to save the state of.
+ * @genpd: PM domain the device belongs to.
+ */
+static int __pm_genpd_save_device(struct dev_list_entry *dle,
+ struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
+{
+ struct device *dev = dle->dev;
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (dle->need_restore)
+ return 0;
+
+ mutex_unlock(&genpd->lock);
+
+ if (drv && drv->pm && drv->pm->runtime_suspend) {
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ ret = drv->pm->runtime_suspend(dev);
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+ }
+
+ mutex_lock(&genpd->lock);
+
+ if (!ret)
+ dle->need_restore = true;
+
+ return ret;
+}
+
+/**
+ * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
+ * @dle: Device list entry of the device to restore the state of.
+ * @genpd: PM domain the device belongs to.
+ */
+static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+ struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
+{
+ struct device *dev = dle->dev;
+ struct device_driver *drv = dev->driver;
+
+ if (!dle->need_restore)
+ return;
+
+ mutex_unlock(&genpd->lock);
+
+ if (drv && drv->pm && drv->pm->runtime_resume) {
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ drv->pm->runtime_resume(dev);
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+ }
+
+ mutex_lock(&genpd->lock);
+
+ dle->need_restore = false;
+}
+
+/**
+ * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
+ * @genpd: PM domain to check.
+ *
+ * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
+ * a "power off" operation, which means that a "power on" has occured in the
+ * meantime, or if its resume_count field is different from zero, which means
+ * that one of its devices has been resumed in the meantime.
+ */
+static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
+{
+ return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
+}
+
+/**
+ * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
+ * @genpd: PM domait to power off.
+ *
+ * Queue up the execution of pm_genpd_poweroff() unless it's already been done
+ * before.
+ */
+void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+ if (!work_pending(&genpd->power_off_work))
+ queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
+ * pm_genpd_poweroff - Remove power from a given PM domain.
+ * @genpd: PM domain to power down.
+ *
+ * If all of the @genpd's devices have been suspended and all of its subdomains
+ * have been powered down, run the runtime suspend callbacks provided by all of
+ * the @genpd's devices' drivers and remove power from @genpd.
+ */
+static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
+{
+ struct generic_pm_domain *parent;
+ struct dev_list_entry *dle;
+ unsigned int not_suspended;
+ int ret = 0;
+
+ start:
+ /*
+ * Do not try to power off the domain in the following situations:
+ * (1) The domain is already in the "power off" state.
+ * (2) System suspend is in progress.
+ * (3) One of the domain's devices is being resumed right now.
+ */
+ if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
+ || genpd->resume_count > 0)
+ return 0;
+
+ if (genpd->sd_count > 0)
+ return -EBUSY;
+
+ not_suspended = 0;
+ list_for_each_entry(dle, &genpd->dev_list, node)
+ if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
+ not_suspended++;
+
+ if (not_suspended > genpd->in_progress)
+ return -EBUSY;
+
+ if (genpd->poweroff_task) {
+ /*
+ * Another instance of pm_genpd_poweroff() is executing
+ * callbacks, so tell it to start over and return.
+ */
+ genpd->status = GPD_STATE_REPEAT;
+ return 0;
+ }
+
+ if (genpd->gov && genpd->gov->power_down_ok) {
+ if (!genpd->gov->power_down_ok(&genpd->domain))
+ return -EAGAIN;
+ }
+
+ genpd->status = GPD_STATE_BUSY;
+ genpd->poweroff_task = current;
+
+ list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
+ ret = __pm_genpd_save_device(dle, genpd);
+ if (ret) {
+ genpd_set_active(genpd);
+ goto out;
+ }
+
+ if (genpd_abort_poweroff(genpd))
+ goto out;
+
+ if (genpd->status == GPD_STATE_REPEAT) {
+ genpd->poweroff_task = NULL;
+ goto start;
+ }
+ }
+
+ parent = genpd->parent;
+ if (parent) {
+ mutex_unlock(&genpd->lock);
+
+ genpd_acquire_lock(parent);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+
+ if (genpd_abort_poweroff(genpd)) {
+ genpd_release_lock(parent);
+ goto out;
+ }
+ }
+
+ if (genpd->power_off) {
+ ret = genpd->power_off(genpd);
+ if (ret == -EBUSY) {
+ genpd_set_active(genpd);
+ if (parent)
+ genpd_release_lock(parent);
+
+ goto out;
+ }
+ }
+
+ genpd->status = GPD_STATE_POWER_OFF;
+
+ if (parent) {
+ genpd_sd_counter_dec(parent);
+ if (parent->sd_count == 0)
+ genpd_queue_power_off_work(parent);
+
+ genpd_release_lock(parent);
+ }
+
+ out:
+ genpd->poweroff_task = NULL;
+ wake_up_all(&genpd->status_wait_queue);
+ return ret;
+}
+
+/**
+ * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
+ * @work: Work structure used for scheduling the execution of this function.
+ */
+static void genpd_power_off_work_fn(struct work_struct *work)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = container_of(work, struct generic_pm_domain, power_off_work);
+
+ genpd_acquire_lock(genpd);
+ pm_genpd_poweroff(genpd);
+ genpd_release_lock(genpd);
+}
+
+/**
+ * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a runtime suspend of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_runtime_suspend(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->stop_device) {
+ int ret = genpd->stop_device(dev);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&genpd->lock);
+ genpd->in_progress++;
+ pm_genpd_poweroff(genpd);
+ genpd->in_progress--;
+ mutex_unlock(&genpd->lock);
+
+ return 0;
+}
+
+/**
+ * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * @dev: Device to resume.
+ * @genpd: PM domain the device belongs to.
+ */
+static void __pm_genpd_runtime_resume(struct device *dev,
+ struct generic_pm_domain *genpd)
+{
+ struct dev_list_entry *dle;
+
+ list_for_each_entry(dle, &genpd->dev_list, node) {
+ if (dle->dev == dev) {
+ __pm_genpd_restore_device(dle, genpd);
+ break;
+ }
+ }
+}
+
+/**
+ * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Carry out a runtime resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_runtime_resume(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ DEFINE_WAIT(wait);
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ ret = pm_genpd_poweron(genpd);
+ if (ret)
+ return ret;
+
+ mutex_lock(&genpd->lock);
+ genpd->status = GPD_STATE_BUSY;
+ genpd->resume_count++;
+ for (;;) {
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ /*
+ * If current is the powering off task, we have been called
+ * reentrantly from one of the device callbacks, so we should
+ * not wait.
+ */
+ if (!genpd->poweroff_task || genpd->poweroff_task == current)
+ break;
+ mutex_unlock(&genpd->lock);
+
+ schedule();
+
+ mutex_lock(&genpd->lock);
+ }
+ finish_wait(&genpd->status_wait_queue, &wait);
+ __pm_genpd_runtime_resume(dev, genpd);
+ genpd->resume_count--;
+ genpd_set_active(genpd);
+ wake_up_all(&genpd->status_wait_queue);
+ mutex_unlock(&genpd->lock);
+
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
+ */
+void pm_genpd_poweroff_unused(void)
+{
+ struct generic_pm_domain *genpd;
+
+ mutex_lock(&gpd_list_lock);
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+ genpd_queue_power_off_work(genpd);
+
+ mutex_unlock(&gpd_list_lock);
+}
+
+#else
+
+static inline void genpd_power_off_work_fn(struct work_struct *work) {}
+static inline void __pm_genpd_runtime_resume(struct device *dev,
+ struct generic_pm_domain *genpd) {}
+
+#define pm_genpd_runtime_suspend NULL
+#define pm_genpd_runtime_resume NULL
+
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+
+/**
+ * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
+ * @genpd: PM domain to power off, if possible.
+ *
+ * Check if the given PM domain can be powered off (during system suspend or
+ * hibernation) and do that if so. Also, in that case propagate to its parent.
+ *
+ * This function is only called in "noirq" stages of system power transitions,
+ * so it need not acquire locks (all of the "noirq" callbacks are executed
+ * sequentially, so it is guaranteed that it will never run twice in parallel).
+ */
+static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+{
+ struct generic_pm_domain *parent = genpd->parent;
+
+ if (genpd->status == GPD_STATE_POWER_OFF)
+ return;
+
+ if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
+ return;
+
+ if (genpd->power_off)
+ genpd->power_off(genpd);
+
+ genpd->status = GPD_STATE_POWER_OFF;
+ if (parent) {
+ genpd_sd_counter_dec(parent);
+ pm_genpd_sync_poweroff(parent);
+ }
+}
+
+/**
+ * resume_needed - Check whether to resume a device before system suspend.
+ * @dev: Device to check.
+ * @genpd: PM domain the device belongs to.
+ *
+ * There are two cases in which a device that can wake up the system from sleep
+ * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
+ * to wake up the system and it has to remain active for this purpose while the
+ * system is in the sleep state and (2) if the device is not enabled to wake up
+ * the system from sleep states and it generally doesn't generate wakeup signals
+ * by itself (those signals are generated on its behalf by other parts of the
+ * system). In the latter case it may be necessary to reconfigure the device's
+ * wakeup settings during system suspend, because it may have been set up to
+ * signal remote wakeup from the system's working state as needed by runtime PM.
+ * Return 'true' in either of the above cases.
+ */
+static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
+{
+ bool active_wakeup;
+
+ if (!device_can_wakeup(dev))
+ return false;
+
+ active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
+ return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
+}
+
+/**
+ * pm_genpd_prepare - Start power transition of a device in a PM domain.
+ * @dev: Device to start the transition of.
+ *
+ * Start a power transition of a device (during a system-wide power transition)
+ * under the assumption that its pm_domain field points to the domain member of
+ * an object of type struct generic_pm_domain representing a PM domain
+ * consisting of I/O devices.
+ */
+static int pm_genpd_prepare(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * If a wakeup request is pending for the device, it should be woken up
+ * at this point and a system wakeup event should be reported if it's
+ * set up to wake up the system from sleep states.
+ */
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
+
+ if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
+ return -EBUSY;
+ }
+
+ if (resume_needed(dev, genpd))
+ pm_runtime_resume(dev);
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->prepared_count++ == 0)
+ genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
+
+ genpd_release_lock(genpd);
+
+ if (genpd->suspend_power_off) {
+ pm_runtime_put_noidle(dev);
+ return 0;
+ }
+
+ /*
+ * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
+ * so pm_genpd_poweron() will return immediately, but if the device
+ * is suspended (e.g. it's been stopped by .stop_device()), we need
+ * to make it operational.
+ */
+ pm_runtime_resume(dev);
+ __pm_runtime_disable(dev, false);
+
+ ret = pm_generic_prepare(dev);
+ if (ret) {
+ mutex_lock(&genpd->lock);
+
+ if (--genpd->prepared_count == 0)
+ genpd->suspend_power_off = false;
+
+ mutex_unlock(&genpd->lock);
+ pm_runtime_enable(dev);
+ }
+
+ pm_runtime_put_sync(dev);
+ return ret;
+}
+
+/**
+ * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Suspend a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_suspend(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
+}
+
+/**
+ * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a late suspend of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_suspend_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ ret = pm_generic_suspend_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (device_may_wakeup(dev)
+ && genpd->active_wakeup && genpd->active_wakeup(dev))
+ return 0;
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ genpd->suspended_count++;
+ pm_genpd_sync_poweroff(genpd);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Carry out an early resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_resume_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ pm_genpd_poweron(genpd);
+ genpd->suspended_count--;
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return pm_generic_resume_noirq(dev);
+}
+
+/**
+ * pm_genpd_resume - Resume a device belonging to an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Resume a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_resume(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
+}
+
+/**
+ * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
+ * @dev: Device to freeze.
+ *
+ * Freeze a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_freeze(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
+}
+
+/**
+ * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
+ * @dev: Device to freeze.
+ *
+ * Carry out a late freeze of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_freeze_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ ret = pm_generic_freeze_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
+ * @dev: Device to thaw.
+ *
+ * Carry out an early thaw of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_thaw_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
+ * @dev: Device to thaw.
+ *
+ * Thaw a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_thaw(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
+}
+
+/**
+ * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Power off a device under the assumption that its pm_domain field points to
+ * the domain member of an object of type struct generic_pm_domain representing
+ * a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_dev_poweroff(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
+}
+
+/**
+ * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a late powering off of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_dev_poweroff_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ ret = pm_generic_poweroff_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (device_may_wakeup(dev)
+ && genpd->active_wakeup && genpd->active_wakeup(dev))
+ return 0;
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ genpd->suspended_count++;
+ pm_genpd_sync_poweroff(genpd);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Carry out an early restore of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_restore_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ genpd->status = GPD_STATE_POWER_OFF;
+ if (genpd->suspend_power_off) {
+ /*
+ * The boot kernel might put the domain into the power on state,
+ * so make sure it really is powered off.
+ */
+ if (genpd->power_off)
+ genpd->power_off(genpd);
+ return 0;
+ }
+
+ pm_genpd_poweron(genpd);
+ genpd->suspended_count--;
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return pm_generic_restore_noirq(dev);
+}
+
+/**
+ * pm_genpd_restore - Restore a device belonging to an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Restore a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_restore(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
+}
+
+/**
+ * pm_genpd_complete - Complete power transition of a device in a power domain.
+ * @dev: Device to complete the transition of.
+ *
+ * Complete a power transition of a device (during a system-wide power
+ * transition) under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static void pm_genpd_complete(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ bool run_complete;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return;
+
+ mutex_lock(&genpd->lock);
+
+ run_complete = !genpd->suspend_power_off;
+ if (--genpd->prepared_count == 0)
+ genpd->suspend_power_off = false;
+
+ mutex_unlock(&genpd->lock);
+
+ if (run_complete) {
+ pm_generic_complete(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+ }
+}
+
+#else
+
+#define pm_genpd_prepare NULL
+#define pm_genpd_suspend NULL
+#define pm_genpd_suspend_noirq NULL
+#define pm_genpd_resume_noirq NULL
+#define pm_genpd_resume NULL
+#define pm_genpd_freeze NULL
+#define pm_genpd_freeze_noirq NULL
+#define pm_genpd_thaw_noirq NULL
+#define pm_genpd_thaw NULL
+#define pm_genpd_dev_poweroff_noirq NULL
+#define pm_genpd_dev_poweroff NULL
+#define pm_genpd_restore_noirq NULL
+#define pm_genpd_restore NULL
+#define pm_genpd_complete NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * @genpd: PM domain to add the device to.
+ * @dev: Device to be added.
+ */
+int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+{
+ struct dev_list_entry *dle;
+ int ret = 0;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->status == GPD_STATE_POWER_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (genpd->prepared_count > 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ list_for_each_entry(dle, &genpd->dev_list, node)
+ if (dle->dev == dev) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dle = kzalloc(sizeof(*dle), GFP_KERNEL);
+ if (!dle) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dle->dev = dev;
+ dle->need_restore = false;
+ list_add_tail(&dle->node, &genpd->dev_list);
+ genpd->device_count++;
+
+ spin_lock_irq(&dev->power.lock);
+ dev->pm_domain = &genpd->domain;
+ spin_unlock_irq(&dev->power.lock);
+
+ out:
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_remove_device - Remove a device from an I/O PM domain.
+ * @genpd: PM domain to remove the device from.
+ * @dev: Device to be removed.
+ */
+int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ struct dev_list_entry *dle;
+ int ret = -EINVAL;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->prepared_count > 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ list_for_each_entry(dle, &genpd->dev_list, node) {
+ if (dle->dev != dev)
+ continue;
+
+ spin_lock_irq(&dev->power.lock);
+ dev->pm_domain = NULL;
+ spin_unlock_irq(&dev->power.lock);
+
+ genpd->device_count--;
+ list_del(&dle->node);
+ kfree(dle);
+
+ ret = 0;
+ break;
+ }
+
+ out:
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+ * @genpd: Master PM domain to add the subdomain to.
+ * @new_subdomain: Subdomain to be added.
+ */
+int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *new_subdomain)
+{
+ struct generic_pm_domain *subdomain;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
+ return -EINVAL;
+
+ start:
+ genpd_acquire_lock(genpd);
+ mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
+
+ if (new_subdomain->status != GPD_STATE_POWER_OFF
+ && new_subdomain->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&new_subdomain->lock);
+ genpd_release_lock(genpd);
+ goto start;
+ }
+
+ if (genpd->status == GPD_STATE_POWER_OFF
+ && new_subdomain->status != GPD_STATE_POWER_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
+ if (subdomain == new_subdomain) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
+ new_subdomain->parent = genpd;
+ if (subdomain->status != GPD_STATE_POWER_OFF)
+ genpd->sd_count++;
+
+ out:
+ mutex_unlock(&new_subdomain->lock);
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @genpd: Master PM domain to remove the subdomain from.
+ * @target: Subdomain to be removed.
+ */
+int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *target)
+{
+ struct generic_pm_domain *subdomain;
+ int ret = -EINVAL;
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
+ return -EINVAL;
+
+ start:
+ genpd_acquire_lock(genpd);
+
+ list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
+ if (subdomain != target)
+ continue;
+
+ mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+
+ if (subdomain->status != GPD_STATE_POWER_OFF
+ && subdomain->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&subdomain->lock);
+ genpd_release_lock(genpd);
+ goto start;
+ }
+
+ list_del(&subdomain->sd_node);
+ subdomain->parent = NULL;
+ if (subdomain->status != GPD_STATE_POWER_OFF)
+ genpd_sd_counter_dec(genpd);
+
+ mutex_unlock(&subdomain->lock);
+
+ ret = 0;
+ break;
+ }
+
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_init - Initialize a generic I/O PM domain object.
+ * @genpd: PM domain object to initialize.
+ * @gov: PM domain governor to associate with the domain (may be NULL).
+ * @is_off: Initial value of the domain's power_is_off field.
+ */
+void pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off)
+{
+ if (IS_ERR_OR_NULL(genpd))
+ return;
+
+ INIT_LIST_HEAD(&genpd->sd_node);
+ genpd->parent = NULL;
+ INIT_LIST_HEAD(&genpd->dev_list);
+ INIT_LIST_HEAD(&genpd->sd_list);
+ mutex_init(&genpd->lock);
+ genpd->gov = gov;
+ INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
+ genpd->in_progress = 0;
+ genpd->sd_count = 0;
+ genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
+ init_waitqueue_head(&genpd->status_wait_queue);
+ genpd->poweroff_task = NULL;
+ genpd->resume_count = 0;
+ genpd->device_count = 0;
+ genpd->suspended_count = 0;
+ genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
+ genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
+ genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
+ genpd->domain.ops.prepare = pm_genpd_prepare;
+ genpd->domain.ops.suspend = pm_genpd_suspend;
+ genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
+ genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
+ genpd->domain.ops.resume = pm_genpd_resume;
+ genpd->domain.ops.freeze = pm_genpd_freeze;
+ genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
+ genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
+ genpd->domain.ops.thaw = pm_genpd_thaw;
+ genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
+ genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
+ genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
+ genpd->domain.ops.restore = pm_genpd_restore;
+ genpd->domain.ops.complete = pm_genpd_complete;
+ mutex_lock(&gpd_list_lock);
+ list_add(&genpd->gpd_list_node, &gpd_list);
+ mutex_unlock(&gpd_list_lock);
+}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index cb3bb368681..9508df71274 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -94,12 +94,13 @@ int pm_generic_prepare(struct device *dev)
* __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
* @dev: Device to handle.
* @event: PM transition of the system under way.
+ * @bool: Whether or not this is the "noirq" stage.
*
* If the device has not been suspended at run time, execute the
* suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
* return its error code. Otherwise, return zero.
*/
-static int __pm_generic_call(struct device *dev, int event)
+static int __pm_generic_call(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
@@ -109,16 +110,16 @@ static int __pm_generic_call(struct device *dev, int event)
switch (event) {
case PM_EVENT_SUSPEND:
- callback = pm->suspend;
+ callback = noirq ? pm->suspend_noirq : pm->suspend;
break;
case PM_EVENT_FREEZE:
- callback = pm->freeze;
+ callback = noirq ? pm->freeze_noirq : pm->freeze;
break;
case PM_EVENT_HIBERNATE:
- callback = pm->poweroff;
+ callback = noirq ? pm->poweroff_noirq : pm->poweroff;
break;
case PM_EVENT_THAW:
- callback = pm->thaw;
+ callback = noirq ? pm->thaw_noirq : pm->thaw;
break;
default:
callback = NULL;
@@ -129,42 +130,82 @@ static int __pm_generic_call(struct device *dev, int event)
}
/**
+ * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
+
+/**
* pm_generic_suspend - Generic suspend callback for subsystems.
* @dev: Device to suspend.
*/
int pm_generic_suspend(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_SUSPEND);
+ return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
/**
+ * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
+
+/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_FREEZE);
+ return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
/**
+ * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
+
+/**
* pm_generic_poweroff - Generic poweroff callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_HIBERNATE);
+ return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
/**
+ * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_THAW, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
+
+/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_THAW);
+ return __pm_generic_call(dev, PM_EVENT_THAW, false);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -172,12 +213,13 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
* __pm_generic_resume - Generic resume/restore callback for subsystems.
* @dev: Device to handle.
* @event: PM transition of the system under way.
+ * @bool: Whether or not this is the "noirq" stage.
*
* Execute the resume/resotre callback provided by the @dev's driver, if
* defined. If it returns 0, change the device's runtime PM status to 'active'.
* Return the callback's error code.
*/
-static int __pm_generic_resume(struct device *dev, int event)
+static int __pm_generic_resume(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
@@ -188,10 +230,10 @@ static int __pm_generic_resume(struct device *dev, int event)
switch (event) {
case PM_EVENT_RESUME:
- callback = pm->resume;
+ callback = noirq ? pm->resume_noirq : pm->resume;
break;
case PM_EVENT_RESTORE:
- callback = pm->restore;
+ callback = noirq ? pm->restore_noirq : pm->restore;
break;
default:
callback = NULL;
@@ -202,7 +244,7 @@ static int __pm_generic_resume(struct device *dev, int event)
return 0;
ret = callback(dev);
- if (!ret && pm_runtime_enabled(dev)) {
+ if (!ret && !noirq && pm_runtime_enabled(dev)) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -212,22 +254,42 @@ static int __pm_generic_resume(struct device *dev, int event)
}
/**
+ * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_noirq(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
+
+/**
* pm_generic_resume - Generic resume callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESUME);
+ return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
/**
+ * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore_noirq(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
+
+/**
* pm_generic_restore - Generic restore callback for subsystems.
* @dev: Device to restore.
*/
int pm_generic_restore(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESTORE);
+ return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
@@ -256,11 +318,17 @@ struct dev_pm_ops generic_subsys_pm_ops = {
#ifdef CONFIG_PM_SLEEP
.prepare = pm_generic_prepare,
.suspend = pm_generic_suspend,
+ .suspend_noirq = pm_generic_suspend_noirq,
.resume = pm_generic_resume,
+ .resume_noirq = pm_generic_resume_noirq,
.freeze = pm_generic_freeze,
+ .freeze_noirq = pm_generic_freeze_noirq,
.thaw = pm_generic_thaw,
+ .thaw_noirq = pm_generic_thaw_noirq,
.poweroff = pm_generic_poweroff,
+ .poweroff_noirq = pm_generic_poweroff_noirq,
.restore = pm_generic_restore,
+ .restore_noirq = pm_generic_restore_noirq,
.complete = pm_generic_complete,
#endif
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index aa632020774..a85459126bc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,7 +57,8 @@ static int async_error;
*/
void device_pm_init(struct device *dev)
{
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
+ dev->power.is_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@@ -91,7 +92,7 @@ void device_pm_add(struct device *dev)
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
mutex_lock(&dpm_list_mtx);
- if (dev->parent && dev->parent->power.in_suspend)
+ if (dev->parent && dev->parent->power.is_prepared)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
@@ -424,9 +425,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "EARLY power domain ");
- error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "EARLY type ");
error = pm_noirq_op(dev, dev->type->pm, state);
@@ -504,6 +505,7 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
static int device_resume(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
+ bool put = false;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
@@ -511,11 +513,21 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
dpm_wait(dev->parent, async);
device_lock(dev);
- dev->power.in_suspend = false;
+ /*
+ * This is a fib. But we'll allow new children to be added below
+ * a resumed device, even if the device hasn't been completed yet.
+ */
+ dev->power.is_prepared = false;
- if (dev->pwr_domain) {
+ if (!dev->power.is_suspended)
+ goto Unlock;
+
+ pm_runtime_enable(dev);
+ put = true;
+
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_op(dev, &dev->pm_domain->ops, state);
goto End;
}
@@ -548,10 +560,17 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
}
End:
+ dev->power.is_suspended = false;
+
+ Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
TRACE_RESUME(error);
+
+ if (put)
+ pm_runtime_put_sync(dev);
+
return error;
}
@@ -630,10 +649,10 @@ static void device_complete(struct device *dev, pm_message_t state)
{
device_lock(dev);
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "completing power domain ");
- if (dev->pwr_domain->ops.complete)
- dev->pwr_domain->ops.complete(dev);
+ if (dev->pm_domain->ops.complete)
+ dev->pm_domain->ops.complete(dev);
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "completing type ");
if (dev->type->pm->complete)
@@ -670,7 +689,7 @@ void dpm_complete(pm_message_t state)
struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
@@ -733,9 +752,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
int error;
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "LATE power domain ");
- error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
if (error)
return error;
} else if (dev->type && dev->type->pm) {
@@ -832,19 +851,25 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
int error = 0;
dpm_wait_for_children(dev, async);
- device_lock(dev);
if (async_error)
- goto End;
+ return 0;
+
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
async_error = -EBUSY;
- goto End;
+ return 0;
}
- if (dev->pwr_domain) {
+ device_lock(dev);
+
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_op(dev, &dev->pm_domain->ops, state);
goto End;
}
@@ -877,11 +902,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
End:
+ dev->power.is_suspended = !error;
+
device_unlock(dev);
complete_all(&dev->power.completion);
- if (error)
+ if (error) {
+ pm_runtime_put_sync(dev);
async_error = error;
+ } else if (dev->power.is_suspended) {
+ __pm_runtime_disable(dev, false);
+ }
return error;
}
@@ -968,11 +999,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "preparing power domain ");
- if (dev->pwr_domain->ops.prepare)
- error = dev->pwr_domain->ops.prepare(dev);
- suspend_report_result(dev->pwr_domain->ops.prepare, error);
+ if (dev->pm_domain->ops.prepare)
+ error = dev->pm_domain->ops.prepare(dev);
+ suspend_report_result(dev->pm_domain->ops.prepare, error);
if (error)
goto End;
} else if (dev->type && dev->type->pm) {
@@ -1021,13 +1052,7 @@ int dpm_prepare(pm_message_t state)
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- pm_runtime_get_noresume(dev);
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
-
- pm_runtime_put_sync(dev);
- error = pm_wakeup_pending() ?
- -EBUSY : device_prepare(dev, state);
+ error = device_prepare(dev, state);
mutex_lock(&dpm_list_mtx);
if (error) {
@@ -1042,7 +1067,7 @@ int dpm_prepare(pm_message_t state)
put_device(dev);
break;
}
- dev->power.in_suspend = true;
+ dev->power.is_prepared = true;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
put_device(dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 56a6899f5e9..b23de185cb0 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -453,7 +453,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
static int opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
- struct device_opp *tmp_dev_opp, *dev_opp = NULL;
+ struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
@@ -625,4 +625,21 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
+
+/**
+ * opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by opp_init_cpufreq_table
+ */
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0d4587b15c5..acb3f83b807 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1,5 +1,5 @@
/*
- * drivers/base/power/runtime.c - Helper functions for device run-time PM
+ * drivers/base/power/runtime.c - Helper functions for device runtime PM
*
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
* Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
@@ -135,8 +135,9 @@ static int rpm_check_suspend_allowed(struct device *dev)
if (dev->power.runtime_error)
retval = -EINVAL;
- else if (atomic_read(&dev->power.usage_count) > 0
- || dev->power.disable_depth > 0)
+ else if (dev->power.disable_depth > 0)
+ retval = -EACCES;
+ else if (atomic_read(&dev->power.usage_count) > 0)
retval = -EAGAIN;
else if (!pm_children_suspended(dev))
retval = -EBUSY;
@@ -158,7 +159,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
* @dev: Device to notify the bus type about.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be suspended. If
+ * Check if the device's runtime PM status allows it to be suspended. If
* another idle notification has been started earlier, return immediately. If
* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
* run the ->runtime_idle() callback directly.
@@ -213,8 +214,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
- if (dev->pwr_domain)
- callback = dev->pwr_domain->ops.runtime_idle;
+ if (dev->pm_domain)
+ callback = dev->pm_domain->ops.runtime_idle;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_idle;
else if (dev->class && dev->class->pm)
@@ -225,11 +226,17 @@ static int rpm_idle(struct device *dev, int rpmflags)
callback = NULL;
if (callback) {
- spin_unlock_irq(&dev->power.lock);
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
callback(dev);
- spin_lock_irq(&dev->power.lock);
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
}
dev->power.idle_notification = false;
@@ -262,15 +269,15 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
spin_lock_irq(&dev->power.lock);
}
dev->power.runtime_error = retval;
- return retval;
+ return retval != -EACCES ? retval : -EIO;
}
/**
- * rpm_suspend - Carry out run-time suspend of given device.
+ * rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be suspended. If
+ * Check if the device's runtime PM status allows it to be suspended. If
* another suspend has been started earlier, either return immediately or wait
* for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
* pending idle notification. If the RPM_ASYNC flag is set then queue a
@@ -374,8 +381,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
- if (dev->pwr_domain)
- callback = dev->pwr_domain->ops.runtime_suspend;
+ if (dev->pm_domain)
+ callback = dev->pm_domain->ops.runtime_suspend;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_suspend;
else if (dev->class && dev->class->pm)
@@ -388,7 +395,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_ACTIVE);
- dev->power.deferred_resume = 0;
+ dev->power.deferred_resume = false;
if (retval == -EAGAIN || retval == -EBUSY)
dev->power.runtime_error = 0;
else
@@ -429,11 +436,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
/**
- * rpm_resume - Carry out run-time resume of given device.
+ * rpm_resume - Carry out runtime resume of given device.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be resumed. Cancel
+ * Check if the device's runtime PM status allows it to be resumed. Cancel
* any scheduled or pending requests. If another resume has been started
* earlier, either return immediately or wait for it to finish, depending on the
* RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
@@ -458,7 +465,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.runtime_error)
retval = -EINVAL;
else if (dev->power.disable_depth > 0)
- retval = -EAGAIN;
+ retval = -EACCES;
if (retval)
goto out;
@@ -550,7 +557,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock(&parent->power.lock);
/*
- * We can resume if the parent's run-time PM is disabled or it
+ * We can resume if the parent's runtime PM is disabled or it
* is set to ignore children.
*/
if (!parent->power.disable_depth
@@ -573,8 +580,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
- if (dev->pwr_domain)
- callback = dev->pwr_domain->ops.runtime_resume;
+ if (dev->pm_domain)
+ callback = dev->pm_domain->ops.runtime_resume;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_resume;
else if (dev->class && dev->class->pm)
@@ -614,11 +621,11 @@ static int rpm_resume(struct device *dev, int rpmflags)
}
/**
- * pm_runtime_work - Universal run-time PM work function.
+ * pm_runtime_work - Universal runtime PM work function.
* @work: Work structure used for scheduling the execution of this function.
*
* Use @work to get the device object the work is to be done for, determine what
- * is to be done and execute the appropriate run-time PM function.
+ * is to be done and execute the appropriate runtime PM function.
*/
static void pm_runtime_work(struct work_struct *work)
{
@@ -717,7 +724,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
EXPORT_SYMBOL_GPL(pm_schedule_suspend);
/**
- * __pm_runtime_idle - Entry point for run-time idle operations.
+ * __pm_runtime_idle - Entry point for runtime idle operations.
* @dev: Device to send idle notification for.
* @rpmflags: Flag bits.
*
@@ -746,7 +753,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_idle);
/**
- * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
+ * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
@@ -775,7 +782,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
/**
- * __pm_runtime_resume - Entry point for run-time resume operations.
+ * __pm_runtime_resume - Entry point for runtime resume operations.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
@@ -801,11 +808,11 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * __pm_runtime_set_status - Set run-time PM status of a device.
+ * __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
- * @status: New run-time PM status of the device.
+ * @status: New runtime PM status of the device.
*
- * If run-time PM of the device is disabled or its power.runtime_error field is
+ * If runtime PM of the device is disabled or its power.runtime_error field is
* different from zero, the status may be changed either to RPM_ACTIVE, or to
* RPM_SUSPENDED, as long as that reflects the actual state of the device.
* However, if the device has a parent and the parent is not active, and the
@@ -851,7 +858,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
/*
* It is invalid to put an active child under a parent that is
- * not active, has run-time PM enabled and the
+ * not active, has runtime PM enabled and the
* 'power.ignore_children' flag unset.
*/
if (!parent->power.disable_depth
@@ -885,7 +892,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
* @dev: Device to handle.
*
* Flush all pending requests for the device from pm_wq and wait for all
- * run-time PM operations involving the device in progress to complete.
+ * runtime PM operations involving the device in progress to complete.
*
* Should be called under dev->power.lock with interrupts disabled.
*/
@@ -933,7 +940,7 @@ static void __pm_runtime_barrier(struct device *dev)
* Prevent the device from being suspended by incrementing its usage counter and
* if there's a pending resume request for the device, wake the device up.
* Next, make sure that all pending requests for the device have been flushed
- * from pm_wq and wait for all run-time PM operations involving the device in
+ * from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
*
* Return value:
@@ -963,18 +970,18 @@ int pm_runtime_barrier(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
/**
- * __pm_runtime_disable - Disable run-time PM of a device.
+ * __pm_runtime_disable - Disable runtime PM of a device.
* @dev: Device to handle.
* @check_resume: If set, check if there's a resume request for the device.
*
* Increment power.disable_depth for the device and if was zero previously,
- * cancel all pending run-time PM requests for the device and wait for all
+ * cancel all pending runtime PM requests for the device and wait for all
* operations in progress to complete. The device can be either active or
- * suspended after its run-time PM has been disabled.
+ * suspended after its runtime PM has been disabled.
*
* If @check_resume is set and there's a resume request pending when
* __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its run-time PM.
+ * function will wake up the device before disabling its runtime PM.
*/
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
@@ -987,7 +994,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
/*
* Wake up the device if there's a resume request pending, because that
- * means there probably is some I/O to process and disabling run-time PM
+ * means there probably is some I/O to process and disabling runtime PM
* shouldn't prevent the device from processing the I/O.
*/
if (check_resume && dev->power.request_pending
@@ -1012,7 +1019,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
EXPORT_SYMBOL_GPL(__pm_runtime_disable);
/**
- * pm_runtime_enable - Enable run-time PM of a device.
+ * pm_runtime_enable - Enable runtime PM of a device.
* @dev: Device to handle.
*/
void pm_runtime_enable(struct device *dev)
@@ -1031,7 +1038,7 @@ void pm_runtime_enable(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_enable);
/**
- * pm_runtime_forbid - Block run-time PM of a device.
+ * pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
*
* Increase the device's usage count and clear its power.runtime_auto flag,
@@ -1054,7 +1061,7 @@ void pm_runtime_forbid(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_forbid);
/**
- * pm_runtime_allow - Unblock run-time PM of a device.
+ * pm_runtime_allow - Unblock runtime PM of a device.
* @dev: Device to handle.
*
* Decrease the device's usage count and set its power.runtime_auto flag.
@@ -1075,12 +1082,12 @@ void pm_runtime_allow(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_allow);
/**
- * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
+ * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
* @dev: Device to handle.
*
* Set the power.no_callbacks flag, which tells the PM core that this
- * device is power-managed through its parent and has no run-time PM
- * callbacks of its own. The run-time sysfs attributes will be removed.
+ * device is power-managed through its parent and has no runtime PM
+ * callbacks of its own. The runtime sysfs attributes will be removed.
*/
void pm_runtime_no_callbacks(struct device *dev)
{
@@ -1156,8 +1163,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
* @delay: Value of the new delay in milliseconds.
*
* Set the device's power.autosuspend_delay value. If it changes to negative
- * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
- * changes the other way, allow run-time suspends.
+ * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
+ * changes the other way, allow runtime suspends.
*/
void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
{
@@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
* @dev: Device to handle.
* @use: New value for use_autosuspend.
*
- * Set the device's power.use_autosuspend flag, and allow or prevent run-time
+ * Set the device's power.use_autosuspend flag, and allow or prevent runtime
* suspends as needed.
*/
void __pm_runtime_use_autosuspend(struct device *dev, bool use)
@@ -1194,7 +1201,7 @@ void __pm_runtime_use_autosuspend(struct device *dev, bool use)
EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
/**
- * pm_runtime_init - Initialize run-time PM fields in given device object.
+ * pm_runtime_init - Initialize runtime PM fields in given device object.
* @dev: Device object to initialize.
*/
void pm_runtime_init(struct device *dev)
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a9f5b897961..17b7934f31c 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,7 +5,7 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/pm_runtime.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"
@@ -116,12 +116,14 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
cp = memchr(buf, '\n', n);
if (cp)
len = cp - buf;
+ device_lock(dev);
if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
pm_runtime_allow(dev);
else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
pm_runtime_forbid(dev);
else
- return -EINVAL;
+ n = -EINVAL;
+ device_unlock(dev);
return n;
}
@@ -205,7 +207,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
return -EINVAL;
+ device_lock(dev);
pm_runtime_set_autosuspend_delay(dev, delay);
+ device_unlock(dev);
return n;
}
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index c80e138b62f..af10abecb99 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
unsigned int val;
get_rtc_time(&time);
- pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n",
+ pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
time.tm_hour, time.tm_min, time.tm_sec,
time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
val = time.tm_year; /* 100 years */
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
new file mode 100644
index 00000000000..fabbf6cc536
--- /dev/null
+++ b/drivers/base/regmap/Kconfig
@@ -0,0 +1,13 @@
+# Generic register map support. There are no user servicable options here,
+# this is an API intended to be used by other kernel subsystems. These
+# subsystems should select the appropriate symbols.
+
+config REGMAP
+ default y if (REGMAP_I2C || REGMAP_SPI)
+ bool
+
+config REGMAP_I2C
+ tristate
+
+config REGMAP_SPI
+ tristate
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
new file mode 100644
index 00000000000..f476f457129
--- /dev/null
+++ b/drivers/base/regmap/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_REGMAP) += regmap.o
+obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
+obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
new file mode 100644
index 00000000000..c4f7a45cd2c
--- /dev/null
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -0,0 +1,116 @@
+/*
+ * Register map access API - I2C support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static int regmap_i2c_write(struct device *dev, const void *data, size_t count)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ ret = i2c_master_send(i2c, data, count);
+ if (ret == count)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static int regmap_i2c_gather_write(struct device *dev,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct i2c_msg xfer[2];
+ int ret;
+
+ /* If the I2C controller can't do a gather tell the core, it
+ * will substitute in a linear write for us.
+ */
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+ return -ENOTSUPP;
+
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = reg_size;
+ xfer[0].buf = (void *)reg;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_NOSTART;
+ xfer[1].len = val_size;
+ xfer[1].buf = (void *)val;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ return 0;
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static int regmap_i2c_read(struct device *dev,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct i2c_msg xfer[2];
+ int ret;
+
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = reg_size;
+ xfer[0].buf = (void *)reg;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = val_size;
+ xfer[1].buf = val;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static struct regmap_bus regmap_i2c = {
+ .type = &i2c_bus_type,
+ .write = regmap_i2c_write,
+ .gather_write = regmap_i2c_gather_write,
+ .read = regmap_i2c_read,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * regmap_init_i2c(): Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config)
+{
+ return regmap_init(&i2c->dev, &regmap_i2c, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_i2c);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
new file mode 100644
index 00000000000..f8396945d6e
--- /dev/null
+++ b/drivers/base/regmap/regmap-spi.c
@@ -0,0 +1,75 @@
+/*
+ * Register map access API - SPI support
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+static int regmap_spi_write(struct device *dev, const void *data, size_t count)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write(spi, data, count);
+}
+
+static int regmap_spi_gather_write(struct device *dev,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_message m;
+ struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
+ { .tx_buf = val, .len = val_len, }, };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int regmap_spi_read(struct device *dev,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write_then_read(spi, reg, reg_size, val, val_size);
+}
+
+static struct regmap_bus regmap_spi = {
+ .type = &spi_bus_type,
+ .write = regmap_spi_write,
+ .gather_write = regmap_spi_gather_write,
+ .read = regmap_spi_read,
+ .owner = THIS_MODULE,
+ .read_flag_mask = 0x80,
+};
+
+/**
+ * regmap_init_spi(): Initialise register map
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_spi(struct spi_device *spi,
+ const struct regmap_config *config)
+{
+ return regmap_init(&spi->dev, &regmap_spi, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_spi);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
new file mode 100644
index 00000000000..0eef4da1ac6
--- /dev/null
+++ b/drivers/base/regmap/regmap.c
@@ -0,0 +1,455 @@
+/*
+ * Register map access API
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+
+#include <linux/regmap.h>
+
+struct regmap;
+
+struct regmap_format {
+ size_t buf_size;
+ size_t reg_bytes;
+ size_t val_bytes;
+ void (*format_write)(struct regmap *map,
+ unsigned int reg, unsigned int val);
+ void (*format_reg)(void *buf, unsigned int reg);
+ void (*format_val)(void *buf, unsigned int val);
+ unsigned int (*parse_val)(void *buf);
+};
+
+struct regmap {
+ struct mutex lock;
+
+ struct device *dev; /* Device we do I/O on */
+ void *work_buf; /* Scratch buffer used to format I/O */
+ struct regmap_format format; /* Buffer format */
+ const struct regmap_bus *bus;
+};
+
+static void regmap_format_4_12_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ __be16 *out = map->work_buf;
+ *out = cpu_to_be16((reg << 12) | val);
+}
+
+static void regmap_format_7_9_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ __be16 *out = map->work_buf;
+ *out = cpu_to_be16((reg << 9) | val);
+}
+
+static void regmap_format_8(void *buf, unsigned int val)
+{
+ u8 *b = buf;
+
+ b[0] = val;
+}
+
+static void regmap_format_16(void *buf, unsigned int val)
+{
+ __be16 *b = buf;
+
+ b[0] = cpu_to_be16(val);
+}
+
+static unsigned int regmap_parse_8(void *buf)
+{
+ u8 *b = buf;
+
+ return b[0];
+}
+
+static unsigned int regmap_parse_16(void *buf)
+{
+ __be16 *b = buf;
+
+ b[0] = be16_to_cpu(b[0]);
+
+ return b[0];
+}
+
+/**
+ * regmap_init(): Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions.
+ */
+struct regmap *regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ const struct regmap_config *config)
+{
+ struct regmap *map;
+ int ret = -EINVAL;
+
+ if (!bus || !config)
+ return NULL;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (map == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mutex_init(&map->lock);
+ map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
+ map->format.reg_bytes = config->reg_bits / 8;
+ map->format.val_bytes = config->val_bits / 8;
+ map->dev = dev;
+ map->bus = bus;
+
+ switch (config->reg_bits) {
+ case 4:
+ switch (config->val_bits) {
+ case 12:
+ map->format.format_write = regmap_format_4_12_write;
+ break;
+ default:
+ goto err_map;
+ }
+ break;
+
+ case 7:
+ switch (config->val_bits) {
+ case 9:
+ map->format.format_write = regmap_format_7_9_write;
+ break;
+ default:
+ goto err_map;
+ }
+ break;
+
+ case 8:
+ map->format.format_reg = regmap_format_8;
+ break;
+
+ case 16:
+ map->format.format_reg = regmap_format_16;
+ break;
+
+ default:
+ goto err_map;
+ }
+
+ switch (config->val_bits) {
+ case 8:
+ map->format.format_val = regmap_format_8;
+ map->format.parse_val = regmap_parse_8;
+ break;
+ case 16:
+ map->format.format_val = regmap_format_16;
+ map->format.parse_val = regmap_parse_16;
+ break;
+ }
+
+ if (!map->format.format_write &&
+ !(map->format.format_reg && map->format.format_val))
+ goto err_map;
+
+ map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
+ if (map->work_buf == NULL) {
+ ret = -ENOMEM;
+ goto err_bus;
+ }
+
+ return map;
+
+err_bus:
+ module_put(map->bus->owner);
+err_map:
+ kfree(map);
+err:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(regmap_init);
+
+/**
+ * regmap_exit(): Free a previously allocated register map
+ */
+void regmap_exit(struct regmap *map)
+{
+ kfree(map->work_buf);
+ module_put(map->bus->owner);
+ kfree(map);
+}
+EXPORT_SYMBOL_GPL(regmap_exit);
+
+static int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ void *buf;
+ int ret = -ENOTSUPP;
+ size_t len;
+
+ map->format.format_reg(map->work_buf, reg);
+
+ /* Try to do a gather write if we can */
+ if (map->bus->gather_write)
+ ret = map->bus->gather_write(map->dev, map->work_buf,
+ map->format.reg_bytes,
+ val, val_len);
+
+ /* Otherwise fall back on linearising by hand. */
+ if (ret == -ENOTSUPP) {
+ len = map->format.reg_bytes + val_len;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, map->work_buf, map->format.reg_bytes);
+ memcpy(buf + map->format.reg_bytes, val, val_len);
+ ret = map->bus->write(map->dev, buf, len);
+
+ kfree(buf);
+ }
+
+ return ret;
+}
+
+static int _regmap_write(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ BUG_ON(!map->format.format_write && !map->format.format_val);
+
+ if (map->format.format_write) {
+ map->format.format_write(map, reg, val);
+
+ return map->bus->write(map->dev, map->work_buf,
+ map->format.buf_size);
+ } else {
+ map->format.format_val(map->work_buf + map->format.reg_bytes,
+ val);
+ return _regmap_raw_write(map, reg,
+ map->work_buf + map->format.reg_bytes,
+ map->format.val_bytes);
+ }
+}
+
+/**
+ * regmap_write(): Write a value to a single register
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&map->lock);
+
+ ret = _regmap_write(map, reg, val);
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write);
+
+/**
+ * regmap_raw_write(): Write raw values to one or more registers
+ *
+ * @map: Register map to write to
+ * @reg: Initial register to write to
+ * @val: Block of data to be written, laid out for direct transmission to the
+ * device
+ * @val_len: Length of data pointed to by val.
+ *
+ * This function is intended to be used for things like firmware
+ * download where a large block of data needs to be transferred to the
+ * device. No formatting will be done on the data provided.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ int ret;
+
+ mutex_lock(&map->lock);
+
+ ret = _regmap_raw_write(map, reg, val, val_len);
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_write);
+
+static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+ unsigned int val_len)
+{
+ u8 *u8 = map->work_buf;
+ int ret;
+
+ map->format.format_reg(map->work_buf, reg);
+
+ /*
+ * Some buses flag reads by setting the high bits in the
+ * register addresss; since it's always the high bits for all
+ * current formats we can do this here rather than in
+ * formatting. This may break if we get interesting formats.
+ */
+ if (map->bus->read_flag_mask)
+ u8[0] |= map->bus->read_flag_mask;
+
+ ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes,
+ val, val_len);
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static int _regmap_read(struct regmap *map, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ if (ret == 0)
+ *val = map->format.parse_val(map->work_buf);
+
+ return ret;
+}
+
+/**
+ * regmap_read(): Read a value from a single register
+ *
+ * @map: Register map to write to
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
+{
+ int ret;
+
+ mutex_lock(&map->lock);
+
+ ret = _regmap_read(map, reg, val);
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_read);
+
+/**
+ * regmap_raw_read(): Read raw data from the device
+ *
+ * @map: Register map to write to
+ * @reg: First register to be read from
+ * @val: Pointer to store read value
+ * @val_len: Size of data to read
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+ size_t val_len)
+{
+ int ret;
+
+ mutex_lock(&map->lock);
+
+ ret = _regmap_raw_read(map, reg, val, val_len);
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_read);
+
+/**
+ * regmap_bulk_read(): Read multiple registers from the device
+ *
+ * @map: Register map to write to
+ * @reg: First register to be read from
+ * @val: Pointer to store read value, in native register size for device
+ * @val_count: Number of registers to read
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
+ size_t val_count)
+{
+ int ret, i;
+ size_t val_bytes = map->format.val_bytes;
+
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < val_count * val_bytes; i += val_bytes)
+ map->format.parse_val(val + i);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_bulk_read);
+
+/**
+ * remap_update_bits: Perform a read/modify/write cycle on the register map
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret;
+ unsigned int tmp;
+
+ mutex_lock(&map->lock);
+
+ ret = _regmap_read(map, reg, &tmp);
+ if (ret != 0)
+ goto out;
+
+ tmp &= ~mask;
+ tmp |= val & mask;
+
+ ret = _regmap_write(map, reg, tmp);
+
+out:
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits);
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index c126db3cb7d..e8d11b6630e 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -9,6 +9,7 @@
#include <linux/syscore_ops.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/interrupt.h>
static LIST_HEAD(syscore_ops_list);
static DEFINE_MUTEX(syscore_ops_lock);
@@ -48,6 +49,13 @@ int syscore_suspend(void)
struct syscore_ops *ops;
int ret = 0;
+ pr_debug("Checking wakeup interrupts\n");
+
+ /* Return error code if there are any wakeup interrupts pending. */
+ ret = check_wakeup_irqs();
+ if (ret)
+ return ret;
+
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 353781b5b78..ae0a02e1b80 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -13,6 +13,11 @@ config BCMA
Bus driver for Broadcom specific Advanced Microcontroller Bus
Architecture.
+# Support for Block-I/O. SELECT this from the driver that needs it.
+config BCMA_BLOCKIO
+ bool
+ depends on BCMA
+
config BCMA_HOST_PCI_POSSIBLE
bool
depends on BCMA && PCI = y
@@ -22,6 +27,12 @@ config BCMA_HOST_PCI
bool "Support for BCMA on PCI-host bus"
depends on BCMA_HOST_PCI_POSSIBLE
+config BCMA_DRIVER_PCI_HOSTMODE
+ bool "Driver for PCI core working in hostmode"
+ depends on BCMA && MIPS
+ help
+ PCI core hostmode operation (external PCI bus).
+
config BCMA_DEBUG
bool "BCMA debugging"
depends on BCMA
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index 0d56245bcb7..a2161cceafb 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -1,6 +1,7 @@
-bcma-y += main.o scan.o core.o
+bcma-y += main.o scan.o core.o sprom.o
bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
bcma-y += driver_pci.o
+bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
obj-$(CONFIG_BCMA) += bcma.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 2f72e9c585f..e02ff21835c 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -13,16 +13,23 @@
struct bcma_bus;
/* main.c */
-extern int bcma_bus_register(struct bcma_bus *bus);
-extern void bcma_bus_unregister(struct bcma_bus *bus);
+int bcma_bus_register(struct bcma_bus *bus);
+void bcma_bus_unregister(struct bcma_bus *bus);
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
+/* sprom.c */
+int bcma_sprom_get(struct bcma_bus *bus);
+
#ifdef CONFIG_BCMA_HOST_PCI
/* host_pci.c */
extern int __init bcma_host_pci_init(void);
extern void __exit bcma_host_pci_exit(void);
#endif /* CONFIG_BCMA_HOST_PCI */
+#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
+void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
+#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
+
#endif
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index ced379f7b37..4a04a49cc06 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -19,7 +19,7 @@ bool bcma_core_is_enabled(struct bcma_device *core)
}
EXPORT_SYMBOL_GPL(bcma_core_is_enabled);
-static void bcma_core_disable(struct bcma_device *core, u32 flags)
+void bcma_core_disable(struct bcma_device *core, u32 flags)
{
if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
return;
@@ -31,6 +31,7 @@ static void bcma_core_disable(struct bcma_device *core, u32 flags)
bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
udelay(1);
}
+EXPORT_SYMBOL_GPL(bcma_core_disable);
int bcma_core_enable(struct bcma_device *core, u32 flags)
{
@@ -49,3 +50,75 @@ int bcma_core_enable(struct bcma_device *core, u32 flags)
return 0;
}
EXPORT_SYMBOL_GPL(bcma_core_enable);
+
+void bcma_core_set_clockmode(struct bcma_device *core,
+ enum bcma_clkmode clkmode)
+{
+ u16 i;
+
+ WARN_ON(core->id.id != BCMA_CORE_CHIPCOMMON &&
+ core->id.id != BCMA_CORE_PCIE &&
+ core->id.id != BCMA_CORE_80211);
+
+ switch (clkmode) {
+ case BCMA_CLKMODE_FAST:
+ bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
+ udelay(64);
+ for (i = 0; i < 1500; i++) {
+ if (bcma_read32(core, BCMA_CLKCTLST) &
+ BCMA_CLKCTLST_HAVEHT) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ pr_err("HT force timeout\n");
+ break;
+ case BCMA_CLKMODE_DYNAMIC:
+ pr_warn("Dynamic clockmode not supported yet!\n");
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(bcma_core_set_clockmode);
+
+void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
+{
+ u16 i;
+
+ WARN_ON(req & ~BCMA_CLKCTLST_EXTRESREQ);
+ WARN_ON(status & ~BCMA_CLKCTLST_EXTRESST);
+
+ if (on) {
+ bcma_set32(core, BCMA_CLKCTLST, req);
+ for (i = 0; i < 10000; i++) {
+ if ((bcma_read32(core, BCMA_CLKCTLST) & status) ==
+ status) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ pr_err("PLL enable timeout\n");
+ } else {
+ pr_warn("Disabling PLL not supported yet!\n");
+ }
+}
+EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
+
+u32 bcma_core_dma_translation(struct bcma_device *core)
+{
+ switch (core->bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+ if (bcma_aread32(core, BCMA_IOST) & BCMA_IOST_DMA64)
+ return BCMA_DMA_TRANSLATION_DMA64_CMT;
+ else
+ return BCMA_DMA_TRANSLATION_DMA32_CMT;
+ default:
+ pr_err("DMA translation unknown for host %d\n",
+ core->bus->hosttype);
+ }
+ return BCMA_DMA_TRANSLATION_NONE;
+}
+EXPORT_SYMBOL(bcma_core_dma_translation);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index 606102256b4..851e05bc948 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -3,7 +3,7 @@
* ChipCommon core driver
*
* Copyright 2005, Broadcom Corporation
- * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
@@ -23,6 +23,9 @@ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
{
+ u32 leddc_on = 10;
+ u32 leddc_off = 90;
+
if (cc->core->id.rev >= 11)
cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
@@ -38,6 +41,17 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
bcma_pmu_init(cc);
if (cc->capabilities & BCMA_CC_CAP_PCTL)
pr_err("Power control not implemented!\n");
+
+ if (cc->core->id.rev >= 16) {
+ if (cc->core->bus->sprom.leddc_on_time &&
+ cc->core->bus->sprom.leddc_off_time) {
+ leddc_on = cc->core->bus->sprom.leddc_on_time;
+ leddc_off = cc->core->bus->sprom.leddc_off_time;
+ }
+ bcma_cc_write32(cc, BCMA_CC_GPIOTIMER,
+ ((leddc_on << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
+ (leddc_off << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT)));
+ }
}
/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index f44177a644c..fcc63db0ce7 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -2,7 +2,7 @@
* Broadcom specific AMBA
* ChipCommon Power Management Unit driver
*
- * Copyright 2009, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2009, Michael Buesch <m@bues.ch>
* Copyright 2007, Broadcom Corporation
*
* Licensed under the GNU/GPL. See COPYING for details.
@@ -53,6 +53,7 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
max_msk = 0xFFFF;
break;
case 43224:
+ case 43225:
break;
default:
pr_err("PMU resource config unknown for device 0x%04X\n",
@@ -74,6 +75,7 @@ void bcma_pmu_swreg_init(struct bcma_drv_cc *cc)
case 0x4313:
case 0x4331:
case 43224:
+ case 43225:
break;
default:
pr_err("PMU switch/regulators init unknown for device "
@@ -96,11 +98,13 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
if (bus->chipinfo.rev == 0) {
pr_err("Workarounds for 43224 rev 0 not fully "
"implemented\n");
- bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0);
+ bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x00F000F0);
} else {
bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0);
}
break;
+ case 43225:
+ break;
default:
pr_err("Workarounds unknown for device 0x%04X\n",
bus->chipinfo.id);
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index e757e4e3c7e..25f3ddf3382 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -3,7 +3,7 @@
* PCI Core
*
* Copyright 2005, Broadcom Corporation
- * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
@@ -157,7 +157,69 @@ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
* Init.
**************************************************/
-void bcma_core_pci_init(struct bcma_drv_pci *pc)
+static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
{
bcma_pcicore_serdes_workaround(pc);
}
+
+static bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
+{
+ struct bcma_bus *bus = pc->core->bus;
+ u16 chipid_top;
+
+ chipid_top = (bus->chipinfo.id & 0xFF00);
+ if (chipid_top != 0x4700 &&
+ chipid_top != 0x5300)
+ return false;
+
+#ifdef CONFIG_SSB_DRIVER_PCICORE
+ if (bus->sprom.boardflags_lo & SSB_PCICORE_BFL_NOPCI)
+ return false;
+#endif /* CONFIG_SSB_DRIVER_PCICORE */
+
+#if 0
+ /* TODO: on BCMA we use address from EROM instead of magic formula */
+ u32 tmp;
+ return !mips_busprobe32(tmp, (bus->mmio +
+ (pc->core->core_index * BCMA_CORE_SIZE)));
+#endif
+
+ return true;
+}
+
+void bcma_core_pci_init(struct bcma_drv_pci *pc)
+{
+ if (bcma_core_pci_is_in_hostmode(pc)) {
+#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
+ bcma_core_pci_hostmode_init(pc);
+#else
+ pr_err("Driver compiled without support for hostmode PCI\n");
+#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
+ } else {
+ bcma_core_pci_clientmode_init(pc);
+ }
+}
+
+int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
+ bool enable)
+{
+ struct pci_dev *pdev = pc->core->bus->host_pci;
+ u32 coremask, tmp;
+ int err;
+
+ err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
+ if (err)
+ goto out;
+
+ coremask = BIT(core->core_index) << 8;
+ if (enable)
+ tmp |= coremask;
+ else
+ tmp &= ~coremask;
+
+ err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
new file mode 100644
index 00000000000..eb332b75ce8
--- /dev/null
+++ b/drivers/bcma/driver_pci_host.c
@@ -0,0 +1,14 @@
+/*
+ * Broadcom specific AMBA
+ * PCI Core in hostmode
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/bcma/bcma.h>
+
+void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
+{
+ pr_err("No support for PCI core in hostmode yet\n");
+}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 471a04013fe..ac4bc626c14 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -65,6 +65,54 @@ static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
iowrite32(value, core->bus->mmio + offset);
}
+#ifdef CONFIG_BCMA_BLOCKIO
+void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ void __iomem *addr = core->bus->mmio + offset;
+ if (core->bus->mapped_core != core)
+ bcma_host_pci_switch_core(core);
+ switch (reg_width) {
+ case sizeof(u8):
+ ioread8_rep(addr, buffer, count);
+ break;
+ case sizeof(u16):
+ WARN_ON(count & 1);
+ ioread16_rep(addr, buffer, count >> 1);
+ break;
+ case sizeof(u32):
+ WARN_ON(count & 3);
+ ioread32_rep(addr, buffer, count >> 2);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ void __iomem *addr = core->bus->mmio + offset;
+ if (core->bus->mapped_core != core)
+ bcma_host_pci_switch_core(core);
+ switch (reg_width) {
+ case sizeof(u8):
+ iowrite8_rep(addr, buffer, count);
+ break;
+ case sizeof(u16):
+ WARN_ON(count & 1);
+ iowrite16_rep(addr, buffer, count >> 1);
+ break;
+ case sizeof(u32):
+ WARN_ON(count & 3);
+ iowrite32_rep(addr, buffer, count >> 2);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+#endif
+
static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
{
if (core->bus->mapped_core != core)
@@ -87,6 +135,10 @@ const struct bcma_host_ops bcma_host_pci_ops = {
.write8 = bcma_host_pci_write8,
.write16 = bcma_host_pci_write16,
.write32 = bcma_host_pci_write32,
+#ifdef CONFIG_BCMA_BLOCKIO
+ .block_read = bcma_host_pci_block_read,
+ .block_write = bcma_host_pci_block_write,
+#endif
.aread32 = bcma_host_pci_aread32,
.awrite32 = bcma_host_pci_awrite32,
};
@@ -175,6 +227,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
{ 0, },
};
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index be52344ed19..873e2e4ac55 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -7,6 +7,7 @@
#include "bcma_private.h"
#include <linux/bcma/bcma.h>
+#include <linux/slab.h>
MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
@@ -89,6 +90,8 @@ static int bcma_register_cores(struct bcma_bus *bus)
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
core->dev.parent = &bus->host_pci->dev;
+ core->dma_dev = &bus->host_pci->dev;
+ core->irq = bus->host_pci->irq;
break;
case BCMA_HOSTTYPE_NONE:
case BCMA_HOSTTYPE_SDIO:
@@ -144,6 +147,15 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_pci_init(&bus->drv_pci);
}
+ /* Try to get SPROM */
+ err = bcma_sprom_get(bus);
+ if (err == -ENOENT) {
+ pr_err("No SPROM available\n");
+ } else if (err) {
+ pr_err("Failed to get SPROM: %d\n", err);
+ return -ENOENT;
+ }
+
/* Register found cores */
bcma_register_cores(bus);
@@ -151,13 +163,11 @@ int bcma_bus_register(struct bcma_bus *bus)
return 0;
}
-EXPORT_SYMBOL_GPL(bcma_bus_register);
void bcma_bus_unregister(struct bcma_bus *bus)
{
bcma_unregister_cores(bus);
}
-EXPORT_SYMBOL_GPL(bcma_bus_unregister);
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
new file mode 100644
index 00000000000..8b5b7856abe
--- /dev/null
+++ b/drivers/bcma/sprom.c
@@ -0,0 +1,171 @@
+/*
+ * Broadcom specific AMBA
+ * SPROM reading
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#define SPOFF(offset) ((offset) / sizeof(u16))
+
+/**************************************************
+ * R/W ops.
+ **************************************************/
+
+static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom)
+{
+ int i;
+ for (i = 0; i < SSB_SPROMSIZE_WORDS_R4; i++)
+ sprom[i] = bcma_read16(bus->drv_cc.core,
+ offset + (i * 2));
+}
+
+/**************************************************
+ * Validation.
+ **************************************************/
+
+static inline u8 bcma_crc8(u8 crc, u8 data)
+{
+ /* Polynomial: x^8 + x^7 + x^6 + x^4 + x^2 + 1 */
+ static const u8 t[] = {
+ 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+ 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+ 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+ 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+ 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+ 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+ 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+ 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+ 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+ 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+ 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+ 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+ 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+ 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+ 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+ 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+ 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+ 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+ 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+ 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+ 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+ 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+ 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+ 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+ 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+ 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+ 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+ 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+ 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+ 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+ 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+ 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F,
+ };
+ return t[crc ^ data];
+}
+
+static u8 bcma_sprom_crc(const u16 *sprom)
+{
+ int word;
+ u8 crc = 0xFF;
+
+ for (word = 0; word < SSB_SPROMSIZE_WORDS_R4 - 1; word++) {
+ crc = bcma_crc8(crc, sprom[word] & 0x00FF);
+ crc = bcma_crc8(crc, (sprom[word] & 0xFF00) >> 8);
+ }
+ crc = bcma_crc8(crc, sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & 0x00FF);
+ crc ^= 0xFF;
+
+ return crc;
+}
+
+static int bcma_sprom_check_crc(const u16 *sprom)
+{
+ u8 crc;
+ u8 expected_crc;
+ u16 tmp;
+
+ crc = bcma_sprom_crc(sprom);
+ tmp = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_CRC;
+ expected_crc = tmp >> SSB_SPROM_REVISION_CRC_SHIFT;
+ if (crc != expected_crc)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int bcma_sprom_valid(const u16 *sprom)
+{
+ u16 revision;
+ int err;
+
+ err = bcma_sprom_check_crc(sprom);
+ if (err)
+ return err;
+
+ revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_REV;
+ if (revision != 8 && revision != 9) {
+ pr_err("Unsupported SPROM revision: %d\n", revision);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/**************************************************
+ * SPROM extraction.
+ **************************************************/
+
+static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
+{
+ u16 v;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
+ *(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
+ }
+}
+
+int bcma_sprom_get(struct bcma_bus *bus)
+{
+ u16 offset;
+ u16 *sprom;
+ int err = 0;
+
+ if (!bus->drv_cc.core)
+ return -EOPNOTSUPP;
+
+ if (!(bus->drv_cc.capabilities & BCMA_CC_CAP_SPROM))
+ return -ENOENT;
+
+ sprom = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16),
+ GFP_KERNEL);
+ if (!sprom)
+ return -ENOMEM;
+
+ /* Most cards have SPROM moved by additional offset 0x30 (48 dwords).
+ * According to brcm80211 this applies to cards with PCIe rev >= 6
+ * TODO: understand this condition and use it */
+ offset = (bus->chipinfo.id == 0x4331) ? BCMA_CC_SPROM :
+ BCMA_CC_SPROM_PCIE6;
+ bcma_sprom_read(bus, offset, sprom);
+
+ err = bcma_sprom_valid(sprom);
+ if (err)
+ goto out;
+
+ bcma_sprom_extract_r8(bus, sprom);
+
+out:
+ kfree(sprom);
+ return err;
+}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 717d6e4e18d..6f07ec1c2f5 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -256,6 +256,21 @@ config BLK_DEV_LOOP
Most users will answer N here.
+config BLK_DEV_LOOP_MIN_COUNT
+ int "Number of loop devices to pre-create at init time"
+ depends on BLK_DEV_LOOP
+ default 8
+ help
+ Static number of loop devices to be unconditionally pre-created
+ at init time.
+
+ This default value can be overwritten on the kernel command
+ line or with module-parameter loop.max_loop.
+
+ The historic default is 8. If a late 2011 version of losetup(8)
+ is used, it can be set to 0, since needed loop devices can be
+ dynamically allocated with the /dev/loop-control interface.
+
config BLK_DEV_CRYPTOLOOP
tristate "Cryptoloop Support"
select CRYPTO
@@ -471,7 +486,7 @@ config XEN_BLKDEV_FRONTEND
in another domain which drives the actual block device.
config XEN_BLKDEV_BACKEND
- tristate "Block-device backend driver"
+ tristate "Xen block-device backend driver"
depends on XEN_BACKEND
help
The block-device backend driver allows the kernel to export its
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 16b4d58d84d..c049548e68b 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -223,7 +223,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
h->ctlr, c->busaddr);
#endif /* CCISS_DEBUG */
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
- readl(h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
h->commands_outstanding++;
if ( h->commands_outstanding > h->max_outstanding)
h->max_outstanding = h->commands_outstanding;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 696100241a6..951a4e33b92 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -33,7 +33,7 @@
#include <linux/slab.h>
#include <linux/string.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 09ef9a878ef..cf0e63dd97d 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -79,7 +79,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
md_io.error = 0;
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
- rw |= REQ_FUA;
+ rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
bio = bio_alloc(GFP_NOIO, 1);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index f440a02dfdb..7b976296b56 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -112,9 +112,6 @@ struct drbd_bitmap {
struct task_struct *bm_task;
};
-static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
- unsigned long e, int val, const enum km_type km);
-
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
{
@@ -994,6 +991,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bio_endio(bio, -EIO);
} else {
submit_bio(rw, bio);
+ /* this should not count as user activity and cause the
+ * resync to throttle -- see drbd_rs_should_slow_down(). */
+ atomic_add(len >> 9, &mdev->rs_sect_ev);
}
}
@@ -1256,7 +1256,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
- unsigned long e, int val, const enum km_type km)
+ unsigned long e, int val)
{
struct drbd_bitmap *b = mdev->bitmap;
unsigned long *p_addr = NULL;
@@ -1274,14 +1274,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
if (page_nr != last_page_nr) {
if (p_addr)
- __bm_unmap(p_addr, km);
+ __bm_unmap(p_addr, KM_IRQ1);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
changed_total += c;
c = 0;
- p_addr = __bm_map_pidx(b, page_nr, km);
+ p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1);
last_page_nr = page_nr;
}
if (val)
@@ -1290,7 +1290,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
}
if (p_addr)
- __bm_unmap(p_addr, km);
+ __bm_unmap(p_addr, KM_IRQ1);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
@@ -1318,7 +1318,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
bm_print_lock_info(mdev);
- c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
+ c = __bm_change_bits_to(mdev, s, e, val);
spin_unlock_irqrestore(&b->bm_lock, flags);
return c;
@@ -1343,16 +1343,17 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
{
int i;
int bits;
- unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
+ unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1);
for (i = first_word; i < last_word; i++) {
bits = hweight_long(paddr[i]);
paddr[i] = ~0UL;
b->bm_set += BITS_PER_LONG - bits;
}
- kunmap_atomic(paddr, KM_USER0);
+ kunmap_atomic(paddr, KM_IRQ1);
}
-/* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
+/* Same thing as drbd_bm_set_bits,
+ * but more efficient for a large bit range.
* You must first drbd_bm_lock().
* Can be called to set the whole bitmap in one go.
* Sets bits from s to e _inclusive_. */
@@ -1366,6 +1367,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* Do not use memset, because we must account for changes,
* so we need to loop over the words with hweight() anyways.
*/
+ struct drbd_bitmap *b = mdev->bitmap;
unsigned long sl = ALIGN(s,BITS_PER_LONG);
unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
int first_page;
@@ -1376,15 +1378,19 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
if (e - s <= 3*BITS_PER_LONG) {
/* don't bother; el and sl may even be wrong. */
- __bm_change_bits_to(mdev, s, e, 1, KM_USER0);
+ spin_lock_irq(&b->bm_lock);
+ __bm_change_bits_to(mdev, s, e, 1);
+ spin_unlock_irq(&b->bm_lock);
return;
}
/* difference is large enough that we can trust sl and el */
+ spin_lock_irq(&b->bm_lock);
+
/* bits filling the current long */
if (sl)
- __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
+ __bm_change_bits_to(mdev, s, sl-1, 1);
first_page = sl >> (3 + PAGE_SHIFT);
last_page = el >> (3 + PAGE_SHIFT);
@@ -1397,8 +1403,10 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
/* first and full pages, unless first page == last page */
for (page_nr = first_page; page_nr < last_page; page_nr++) {
bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
+ spin_unlock_irq(&b->bm_lock);
cond_resched();
first_word = 0;
+ spin_lock_irq(&b->bm_lock);
}
/* last page (respectively only page, for first page == last page) */
@@ -1411,7 +1419,8 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* it would trigger an assert in __bm_change_bits_to()
*/
if (el <= e)
- __bm_change_bits_to(mdev, el, e, 1, KM_USER0);
+ __bm_change_bits_to(mdev, el, e, 1);
+ spin_unlock_irq(&b->bm_lock);
}
/* returns bit state
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 515bcd948a4..0feab261e29 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1829,10 +1829,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
/* silently ignore cpu mask on UP kernel */
if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
- err = __bitmap_parse(sc.cpu_mask, 32, 0,
+ err = bitmap_parse(sc.cpu_mask, 32,
cpumask_bits(new_cpu_mask), nr_cpu_ids);
if (err) {
- dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
+ dev_warn(DEV, "bitmap_parse() failed with %d\n", err);
retcode = ERR_CPU_MASK_PARSE;
goto fail;
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 25d32c5aa50..43beaca5317 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -4602,6 +4602,11 @@ int drbd_asender(struct drbd_thread *thi)
dev_err(DEV, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
+ /* If the data socket received something meanwhile,
+ * that is good enough: peer is still alive. */
+ if (time_after(mdev->last_received,
+ jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
+ continue;
if (ping_timeout_active) {
dev_err(DEV, "PingAck did not arrive in time.\n");
goto reconnect;
@@ -4637,6 +4642,7 @@ int drbd_asender(struct drbd_thread *thi)
goto reconnect;
}
if (received == expect) {
+ mdev->last_received = jiffies;
D_ASSERT(cmd != NULL);
if (!cmd->process(mdev, h))
goto reconnect;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4d76b06b6b2..4d3e6f6213b 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -536,12 +536,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
return 1;
}
- /* starting with drbd 8.3.8, we can handle multi-bio EEs,
- * if it should be necessary */
- max_bio_size =
- mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
- mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
-
+ max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
number = drbd_rs_number_requests(mdev);
if (number == 0)
goto requeue;
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index 007c630904c..b52c9ca146f 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -155,7 +155,7 @@ else \
#if (HD_DELAY > 0)
-#include <asm/i8253.h>
+#include <linux/i8253.h>
unsigned long last_req;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 76c8da78212..4720c7ade0a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -75,11 +75,11 @@
#include <linux/kthread.h>
#include <linux/splice.h>
#include <linux/sysfs.h>
-
+#include <linux/miscdevice.h>
#include <asm/uaccess.h>
-static LIST_HEAD(loop_devices);
-static DEFINE_MUTEX(loop_devices_mutex);
+static DEFINE_IDR(loop_index_idr);
+static DEFINE_MUTEX(loop_index_mutex);
static int max_part;
static int part_shift;
@@ -722,17 +722,10 @@ static inline int is_loop_device(struct file *file)
static ssize_t loop_attr_show(struct device *dev, char *page,
ssize_t (*callback)(struct loop_device *, char *))
{
- struct loop_device *l, *lo = NULL;
-
- mutex_lock(&loop_devices_mutex);
- list_for_each_entry(l, &loop_devices, lo_list)
- if (disk_to_dev(l->lo_disk) == dev) {
- lo = l;
- break;
- }
- mutex_unlock(&loop_devices_mutex);
+ struct gendisk *disk = dev_to_disk(dev);
+ struct loop_device *lo = disk->private_data;
- return lo ? callback(lo, page) : -EIO;
+ return callback(lo, page);
}
#define LOOP_ATTR_RO(_name) \
@@ -750,10 +743,10 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
ssize_t ret;
char *p = NULL;
- mutex_lock(&lo->lo_ctl_mutex);
+ spin_lock_irq(&lo->lo_lock);
if (lo->lo_backing_file)
p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1);
- mutex_unlock(&lo->lo_ctl_mutex);
+ spin_unlock_irq(&lo->lo_lock);
if (IS_ERR_OR_NULL(p))
ret = PTR_ERR(p);
@@ -1007,7 +1000,9 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
kthread_stop(lo->lo_thread);
+ spin_lock_irq(&lo->lo_lock);
lo->lo_backing_file = NULL;
+ spin_unlock_irq(&lo->lo_lock);
loop_release_xfer(lo);
lo->transfer = NULL;
@@ -1485,13 +1480,22 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
static int lo_open(struct block_device *bdev, fmode_t mode)
{
- struct loop_device *lo = bdev->bd_disk->private_data;
+ struct loop_device *lo;
+ int err = 0;
+
+ mutex_lock(&loop_index_mutex);
+ lo = bdev->bd_disk->private_data;
+ if (!lo) {
+ err = -ENXIO;
+ goto out;
+ }
mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
mutex_unlock(&lo->lo_ctl_mutex);
-
- return 0;
+out:
+ mutex_unlock(&loop_index_mutex);
+ return err;
}
static int lo_release(struct gendisk *disk, fmode_t mode)
@@ -1557,40 +1561,71 @@ int loop_register_transfer(struct loop_func_table *funcs)
return 0;
}
+static int unregister_transfer_cb(int id, void *ptr, void *data)
+{
+ struct loop_device *lo = ptr;
+ struct loop_func_table *xfer = data;
+
+ mutex_lock(&lo->lo_ctl_mutex);
+ if (lo->lo_encryption == xfer)
+ loop_release_xfer(lo);
+ mutex_unlock(&lo->lo_ctl_mutex);
+ return 0;
+}
+
int loop_unregister_transfer(int number)
{
unsigned int n = number;
- struct loop_device *lo;
struct loop_func_table *xfer;
if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
return -EINVAL;
xfer_funcs[n] = NULL;
-
- list_for_each_entry(lo, &loop_devices, lo_list) {
- mutex_lock(&lo->lo_ctl_mutex);
-
- if (lo->lo_encryption == xfer)
- loop_release_xfer(lo);
-
- mutex_unlock(&lo->lo_ctl_mutex);
- }
-
+ idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
return 0;
}
EXPORT_SYMBOL(loop_register_transfer);
EXPORT_SYMBOL(loop_unregister_transfer);
-static struct loop_device *loop_alloc(int i)
+static int loop_add(struct loop_device **l, int i)
{
struct loop_device *lo;
struct gendisk *disk;
+ int err;
lo = kzalloc(sizeof(*lo), GFP_KERNEL);
- if (!lo)
+ if (!lo) {
+ err = -ENOMEM;
goto out;
+ }
+
+ err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
+ if (err < 0)
+ goto out_free_dev;
+
+ if (i >= 0) {
+ int m;
+
+ /* create specific i in the index */
+ err = idr_get_new_above(&loop_index_idr, lo, i, &m);
+ if (err >= 0 && i != m) {
+ idr_remove(&loop_index_idr, m);
+ err = -EEXIST;
+ }
+ } else if (i == -1) {
+ int m;
+
+ /* get next free nr */
+ err = idr_get_new(&loop_index_idr, lo, &m);
+ if (err >= 0)
+ i = m;
+ } else {
+ err = -EINVAL;
+ }
+ if (err < 0)
+ goto out_free_dev;
lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
if (!lo->lo_queue)
@@ -1611,81 +1646,158 @@ static struct loop_device *loop_alloc(int i)
disk->private_data = lo;
disk->queue = lo->lo_queue;
sprintf(disk->disk_name, "loop%d", i);
- return lo;
+ add_disk(disk);
+ *l = lo;
+ return lo->lo_number;
out_free_queue:
blk_cleanup_queue(lo->lo_queue);
out_free_dev:
kfree(lo);
out:
- return NULL;
+ return err;
}
-static void loop_free(struct loop_device *lo)
+static void loop_remove(struct loop_device *lo)
{
+ del_gendisk(lo->lo_disk);
blk_cleanup_queue(lo->lo_queue);
put_disk(lo->lo_disk);
- list_del(&lo->lo_list);
kfree(lo);
}
-static struct loop_device *loop_init_one(int i)
+static int find_free_cb(int id, void *ptr, void *data)
+{
+ struct loop_device *lo = ptr;
+ struct loop_device **l = data;
+
+ if (lo->lo_state == Lo_unbound) {
+ *l = lo;
+ return 1;
+ }
+ return 0;
+}
+
+static int loop_lookup(struct loop_device **l, int i)
{
struct loop_device *lo;
+ int ret = -ENODEV;
- list_for_each_entry(lo, &loop_devices, lo_list) {
- if (lo->lo_number == i)
- return lo;
+ if (i < 0) {
+ int err;
+
+ err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
+ if (err == 1) {
+ *l = lo;
+ ret = lo->lo_number;
+ }
+ goto out;
}
- lo = loop_alloc(i);
+ /* lookup and return a specific i */
+ lo = idr_find(&loop_index_idr, i);
if (lo) {
- add_disk(lo->lo_disk);
- list_add_tail(&lo->lo_list, &loop_devices);
+ *l = lo;
+ ret = lo->lo_number;
}
- return lo;
-}
-
-static void loop_del_one(struct loop_device *lo)
-{
- del_gendisk(lo->lo_disk);
- loop_free(lo);
+out:
+ return ret;
}
static struct kobject *loop_probe(dev_t dev, int *part, void *data)
{
struct loop_device *lo;
struct kobject *kobj;
+ int err;
- mutex_lock(&loop_devices_mutex);
- lo = loop_init_one(MINOR(dev) >> part_shift);
- kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM);
- mutex_unlock(&loop_devices_mutex);
+ mutex_lock(&loop_index_mutex);
+ err = loop_lookup(&lo, MINOR(dev) >> part_shift);
+ if (err < 0)
+ err = loop_add(&lo, MINOR(dev) >> part_shift);
+ if (err < 0)
+ kobj = ERR_PTR(err);
+ else
+ kobj = get_disk(lo->lo_disk);
+ mutex_unlock(&loop_index_mutex);
*part = 0;
return kobj;
}
+static long loop_control_ioctl(struct file *file, unsigned int cmd,
+ unsigned long parm)
+{
+ struct loop_device *lo;
+ int ret = -ENOSYS;
+
+ mutex_lock(&loop_index_mutex);
+ switch (cmd) {
+ case LOOP_CTL_ADD:
+ ret = loop_lookup(&lo, parm);
+ if (ret >= 0) {
+ ret = -EEXIST;
+ break;
+ }
+ ret = loop_add(&lo, parm);
+ break;
+ case LOOP_CTL_REMOVE:
+ ret = loop_lookup(&lo, parm);
+ if (ret < 0)
+ break;
+ mutex_lock(&lo->lo_ctl_mutex);
+ if (lo->lo_state != Lo_unbound) {
+ ret = -EBUSY;
+ mutex_unlock(&lo->lo_ctl_mutex);
+ break;
+ }
+ if (lo->lo_refcnt > 0) {
+ ret = -EBUSY;
+ mutex_unlock(&lo->lo_ctl_mutex);
+ break;
+ }
+ lo->lo_disk->private_data = NULL;
+ mutex_unlock(&lo->lo_ctl_mutex);
+ idr_remove(&loop_index_idr, lo->lo_number);
+ loop_remove(lo);
+ break;
+ case LOOP_CTL_GET_FREE:
+ ret = loop_lookup(&lo, -1);
+ if (ret >= 0)
+ break;
+ ret = loop_add(&lo, -1);
+ }
+ mutex_unlock(&loop_index_mutex);
+
+ return ret;
+}
+
+static const struct file_operations loop_ctl_fops = {
+ .open = nonseekable_open,
+ .unlocked_ioctl = loop_control_ioctl,
+ .compat_ioctl = loop_control_ioctl,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice loop_misc = {
+ .minor = LOOP_CTRL_MINOR,
+ .name = "loop-control",
+ .fops = &loop_ctl_fops,
+};
+
+MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
+MODULE_ALIAS("devname:loop-control");
+
static int __init loop_init(void)
{
int i, nr;
unsigned long range;
- struct loop_device *lo, *next;
+ struct loop_device *lo;
+ int err;
- /*
- * loop module now has a feature to instantiate underlying device
- * structure on-demand, provided that there is an access dev node.
- * However, this will not work well with user space tool that doesn't
- * know about such "feature". In order to not break any existing
- * tool, we do the following:
- *
- * (1) if max_loop is specified, create that many upfront, and this
- * also becomes a hard limit.
- * (2) if max_loop is not specified, create 8 loop device on module
- * load, user can further extend loop device by create dev node
- * themselves and have kernel automatically instantiate actual
- * device on-demand.
- */
+ err = misc_register(&loop_misc);
+ if (err < 0)
+ return err;
part_shift = 0;
if (max_part > 0) {
@@ -1708,57 +1820,60 @@ static int __init loop_init(void)
if (max_loop > 1UL << (MINORBITS - part_shift))
return -EINVAL;
+ /*
+ * If max_loop is specified, create that many devices upfront.
+ * This also becomes a hard limit. If max_loop is not specified,
+ * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
+ * init time. Loop devices can be requested on-demand with the
+ * /dev/loop-control interface, or be instantiated by accessing
+ * a 'dead' device node.
+ */
if (max_loop) {
nr = max_loop;
range = max_loop << part_shift;
} else {
- nr = 8;
+ nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
range = 1UL << MINORBITS;
}
if (register_blkdev(LOOP_MAJOR, "loop"))
return -EIO;
- for (i = 0; i < nr; i++) {
- lo = loop_alloc(i);
- if (!lo)
- goto Enomem;
- list_add_tail(&lo->lo_list, &loop_devices);
- }
-
- /* point of no return */
-
- list_for_each_entry(lo, &loop_devices, lo_list)
- add_disk(lo->lo_disk);
-
blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
THIS_MODULE, loop_probe, NULL, NULL);
+ /* pre-create number of devices given by config or max_loop */
+ mutex_lock(&loop_index_mutex);
+ for (i = 0; i < nr; i++)
+ loop_add(&lo, i);
+ mutex_unlock(&loop_index_mutex);
+
printk(KERN_INFO "loop: module loaded\n");
return 0;
+}
-Enomem:
- printk(KERN_INFO "loop: out of memory\n");
-
- list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
- loop_free(lo);
+static int loop_exit_cb(int id, void *ptr, void *data)
+{
+ struct loop_device *lo = ptr;
- unregister_blkdev(LOOP_MAJOR, "loop");
- return -ENOMEM;
+ loop_remove(lo);
+ return 0;
}
static void __exit loop_exit(void)
{
unsigned long range;
- struct loop_device *lo, *next;
range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
- list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
- loop_del_one(lo);
+ idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
+ idr_remove_all(&loop_index_idr);
+ idr_destroy(&loop_index_idr);
blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
unregister_blkdev(LOOP_MAJOR, "loop");
+
+ misc_deregister(&loop_misc);
}
module_init(loop_init);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 07a382eaf0a..e133f094ab0 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1206,7 +1206,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
if (!sb)
return 0;
- if (!sb->s_op || !sb->s_op->relocate_blocks)
+ if (!sb->s_op->relocate_blocks)
goto out;
old_block = pkt->sector / (CD_FRAMESIZE >> 9);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 1278098624e..15f65b5f3fc 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -630,6 +630,14 @@ static int rbd_get_num_segments(struct rbd_image_header *header,
}
/*
+ * returns the size of an object in the image
+ */
+static u64 rbd_obj_bytes(struct rbd_image_header *header)
+{
+ return 1 << header->obj_order;
+}
+
+/*
* bio helpers
*/
@@ -1253,6 +1261,35 @@ fail:
return ret;
}
+/*
+ * Request sync osd unwatch
+ */
+static int rbd_req_sync_unwatch(struct rbd_device *dev,
+ const char *obj)
+{
+ struct ceph_osd_req_op *ops;
+
+ int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0);
+ if (ret < 0)
+ return ret;
+
+ ops[0].watch.ver = 0;
+ ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie);
+ ops[0].watch.flag = 0;
+
+ ret = rbd_req_sync_op(dev, NULL,
+ CEPH_NOSNAP,
+ 0,
+ CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
+ ops,
+ 1, obj, 0, 0, NULL, NULL, NULL);
+
+ rbd_destroy_ops(ops);
+ ceph_osdc_cancel_event(dev->watch_event);
+ dev->watch_event = NULL;
+ return ret;
+}
+
struct rbd_notify_info {
struct rbd_device *dev;
};
@@ -1736,6 +1773,13 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
if (!q)
goto out_disk;
+
+ /* set io sizes to object size */
+ blk_queue_max_hw_sectors(q, rbd_obj_bytes(&rbd_dev->header) / 512ULL);
+ blk_queue_max_segment_size(q, rbd_obj_bytes(&rbd_dev->header));
+ blk_queue_io_min(q, rbd_obj_bytes(&rbd_dev->header));
+ blk_queue_io_opt(q, rbd_obj_bytes(&rbd_dev->header));
+
blk_queue_merge_bvec(q, rbd_merge_bvec);
disk->queue = q;
@@ -2290,7 +2334,7 @@ static void rbd_dev_release(struct device *dev)
ceph_osdc_unregister_linger_request(&rbd_dev->client->osdc,
rbd_dev->watch_request);
if (rbd_dev->watch_event)
- ceph_osdc_cancel_event(rbd_dev->watch_event);
+ rbd_req_sync_unwatch(rbd_dev, rbd_dev->obj_md_name);
rbd_put_client(rbd_dev);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 773bfa79277..ae3e167e17a 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1184,6 +1184,7 @@ static struct of_device_id swim3_match[] =
{
.compatible = "swim3"
},
+ { /* end of list */ }
};
static struct macio_driver swim3_driver =
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 5cf2993a833..2330a9ad5e9 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -458,7 +458,8 @@ static void end_block_io_op(struct bio *bio, int error)
* (which has the sectors we want, number of them, grant references, etc),
* and transmute it to the block API to hand it over to the proper block disk.
*/
-static int do_block_io_op(struct xen_blkif *blkif)
+static int
+__do_block_io_op(struct xen_blkif *blkif)
{
union blkif_back_rings *blk_rings = &blkif->blk_rings;
struct blkif_request req;
@@ -515,6 +516,23 @@ static int do_block_io_op(struct xen_blkif *blkif)
return more_to_do;
}
+static int
+do_block_io_op(struct xen_blkif *blkif)
+{
+ union blkif_back_rings *blk_rings = &blkif->blk_rings;
+ int more_to_do;
+
+ do {
+ more_to_do = __do_block_io_op(blkif);
+ if (more_to_do)
+ break;
+
+ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
+ } while (more_to_do);
+
+ return more_to_do;
+}
+
/*
* Transmutation of the 'struct blkif_request' to a proper 'struct bio'
* and call the 'submit_bio' to pass it to the underlying storage.
@@ -700,7 +718,6 @@ static void make_response(struct xen_blkif *blkif, u64 id,
struct blkif_response resp;
unsigned long flags;
union blkif_back_rings *blk_rings = &blkif->blk_rings;
- int more_to_do = 0;
int notify;
resp.id = id;
@@ -727,22 +744,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
}
blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
- if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
- /*
- * Tail check for pending requests. Allows frontend to avoid
- * notifications if requests are already in flight (lower
- * overheads and promotes batching).
- */
- RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
-
- } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
- more_to_do = 1;
- }
-
spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
-
- if (more_to_do)
- blkif_notify_work(blkif);
if (notify)
notify_remote_via_irq(blkif->irq);
}
@@ -824,3 +826,4 @@ static int __init xen_blkif_init(void)
module_init(xen_blkif_init);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("xen-backend:vbd");
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 6cc0db1bf52..3f129b45451 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -684,7 +684,7 @@ again:
err = xenbus_switch_state(dev, XenbusStateConnected);
if (err)
- xenbus_dev_fatal(dev, err, "switching to Connected state",
+ xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
dev->nodename);
return;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b536a9cef91..9ea8c2576c7 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -123,8 +123,8 @@ static DEFINE_SPINLOCK(minor_lock);
#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
#define EMULATED_HD_DISK_MINOR_OFFSET (0)
#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
-#define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16))
-#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4)
+#define EMULATED_SD_DISK_MINOR_OFFSET (0)
+#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
#define DEV_NAME "xvd" /* name in /dev */
@@ -529,7 +529,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
minor = BLKIF_MINOR_EXT(info->vdevice);
nr_parts = PARTS_PER_EXT_DISK;
offset = minor / nr_parts;
- if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4)
+ if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
"emulated IDE disks,\n\t choose an xvd device name"
"from xvde on\n", info->vdevice);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 6c7fd7db6df..fb1975d82a7 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1155,12 +1155,19 @@ static int __devinit ace_probe(struct platform_device *dev)
{
resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
- int id = dev->id;
+ u32 id = dev->id;
int irq = NO_IRQ;
int i;
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
+ /* device id and bus width */
+ of_property_read_u32(dev->dev.of_node, "port-number", &id);
+ if (id < 0)
+ id = 0;
+ if (of_find_property(dev->dev.of_node, "8-bit", NULL))
+ bus_width = ACE_BUS_WIDTH_8;
+
for (i = 0; i < dev->num_resources; i++) {
if (dev->resource[i].flags & IORESOURCE_MEM)
physaddr = dev->resource[i].start;
@@ -1181,57 +1188,7 @@ static int __devexit ace_remove(struct platform_device *dev)
return 0;
}
-static struct platform_driver ace_platform_driver = {
- .probe = ace_probe,
- .remove = __devexit_p(ace_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = "xsysace",
- },
-};
-
-/* ---------------------------------------------------------------------
- * OF_Platform Bus Support
- */
-
#if defined(CONFIG_OF)
-static int __devinit ace_of_probe(struct platform_device *op)
-{
- struct resource res;
- resource_size_t physaddr;
- const u32 *id;
- int irq, bus_width, rc;
-
- /* device id */
- id = of_get_property(op->dev.of_node, "port-number", NULL);
-
- /* physaddr */
- rc = of_address_to_resource(op->dev.of_node, 0, &res);
- if (rc) {
- dev_err(&op->dev, "invalid address\n");
- return rc;
- }
- physaddr = res.start;
-
- /* irq */
- irq = irq_of_parse_and_map(op->dev.of_node, 0);
-
- /* bus width */
- bus_width = ACE_BUS_WIDTH_16;
- if (of_find_property(op->dev.of_node, "8-bit", NULL))
- bus_width = ACE_BUS_WIDTH_8;
-
- /* Call the bus-independent setup code */
- return ace_alloc(&op->dev, id ? be32_to_cpup(id) : 0,
- physaddr, irq, bus_width);
-}
-
-static int __devexit ace_of_remove(struct platform_device *op)
-{
- ace_free(&op->dev);
- return 0;
-}
-
/* Match table for of_platform binding */
static const struct of_device_id ace_of_match[] __devinitconst = {
{ .compatible = "xlnx,opb-sysace-1.00.b", },
@@ -1241,34 +1198,20 @@ static const struct of_device_id ace_of_match[] __devinitconst = {
{},
};
MODULE_DEVICE_TABLE(of, ace_of_match);
+#else /* CONFIG_OF */
+#define ace_of_match NULL
+#endif /* CONFIG_OF */
-static struct platform_driver ace_of_driver = {
- .probe = ace_of_probe,
- .remove = __devexit_p(ace_of_remove),
+static struct platform_driver ace_platform_driver = {
+ .probe = ace_probe,
+ .remove = __devexit_p(ace_remove),
.driver = {
- .name = "xsysace",
.owner = THIS_MODULE,
+ .name = "xsysace",
.of_match_table = ace_of_match,
},
};
-/* Registration helpers to keep the number of #ifdefs to a minimum */
-static inline int __init ace_of_register(void)
-{
- pr_debug("xsysace: registering OF binding\n");
- return platform_driver_register(&ace_of_driver);
-}
-
-static inline void __exit ace_of_unregister(void)
-{
- platform_driver_unregister(&ace_of_driver);
-}
-#else /* CONFIG_OF */
-/* CONFIG_OF not enabled; do nothing helpers */
-static inline int __init ace_of_register(void) { return 0; }
-static inline void __exit ace_of_unregister(void) { }
-#endif /* CONFIG_OF */
-
/* ---------------------------------------------------------------------
* Module init/exit routines
*/
@@ -1282,11 +1225,6 @@ static int __init ace_init(void)
goto err_blk;
}
- rc = ace_of_register();
- if (rc)
- goto err_of;
-
- pr_debug("xsysace: registering platform binding\n");
rc = platform_driver_register(&ace_platform_driver);
if (rc)
goto err_plat;
@@ -1295,21 +1233,17 @@ static int __init ace_init(void)
return 0;
err_plat:
- ace_of_unregister();
-err_of:
unregister_blkdev(ace_major, "xsysace");
err_blk:
printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc);
return rc;
}
+module_init(ace_init);
static void __exit ace_exit(void)
{
pr_debug("Unregistering Xilinx SystemACE driver\n");
platform_driver_unregister(&ace_platform_driver);
- ace_of_unregister();
unregister_blkdev(ace_major, "xsysace");
}
-
-module_init(ace_init);
module_exit(ace_exit);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 6bacef368fa..a5854735bb2 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -375,6 +375,11 @@ static int ath3k_probe(struct usb_interface *intf,
/* load patch and sysconfig files for AR3012 */
if (id->driver_info & BTUSB_ATH3012) {
+
+ /* New firmware with patch and sysconfig files already loaded */
+ if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x0001)
+ return -ENODEV;
+
ret = ath3k_load_patch(udev);
if (ret < 0) {
BT_ERR("Loading patch file failed");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c2de8951e3f..91d13a9e8c6 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -54,6 +54,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_BCM92035 0x10
#define BTUSB_BROKEN_ISOC 0x20
#define BTUSB_WRONG_SCO_MTU 0x40
+#define BTUSB_ATH3012 0x80
static struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -110,7 +111,7 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
/* Atheros 3012 with sflash firmware */
- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -914,6 +915,15 @@ static int btusb_probe(struct usb_interface *intf,
if (ignore_sniffer && id->driver_info & BTUSB_SNIFFER)
return -ENODEV;
+ if (id->driver_info & BTUSB_ATH3012) {
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ /* Old firmware would otherwise let ath3k driver load
+ * patch and sysconfig files */
+ if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001)
+ return -ENODEV;
+ }
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 75fb965b8f7..f997c27d79e 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1929,11 +1929,17 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
goto out;
s->manufact.len = buf[0] << 8 | buf[1];
- if (s->manufact.len < 0 || s->manufact.len > 2048) {
+ if (s->manufact.len < 0) {
cdinfo(CD_WARNING, "Received invalid manufacture info length"
" (%d)\n", s->manufact.len);
ret = -EIO;
} else {
+ if (s->manufact.len > 2048) {
+ cdinfo(CD_WARNING, "Received invalid manufacture info "
+ "length (%d): truncating to 2048\n",
+ s->manufact.len);
+ s->manufact.len = 2048;
+ }
memcpy(s->manufact.value, &buf[4], s->manufact.len);
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 49502bc5360..423fd56bf61 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -616,5 +616,16 @@ config MSM_SMD_PKT
Enables userspace clients to read and write to some packet SMD
ports via device interface for MSM chipset.
+config TILE_SROM
+ bool "Character-device access via hypervisor to the Tilera SPI ROM"
+ depends on TILE
+ default y
+ ---help---
+ This device provides character-level read-write access
+ to the SROM, typically via the "0", "1", and "2" devices
+ in /dev/srom/. The Tilera hypervisor makes the flash
+ device appear much like a simple EEPROM, and knows
+ how to partition a single ROM for multiple purposes.
+
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7a00672bd85..32762ba769c 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -63,3 +63,5 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o
obj-$(CONFIG_JS_RTC) += js-rtc.o
js-rtc-y = rtc.o
+
+obj-$(CONFIG_TILE_SROM) += tile-srom.o
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 999803ce10d..5da67f165af 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -90,9 +90,10 @@
#define G4x_GMCH_SIZE_MASK (0xf << 8)
#define G4x_GMCH_SIZE_1M (0x1 << 8)
#define G4x_GMCH_SIZE_2M (0x3 << 8)
-#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
-#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
-#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
+#define G4x_GMCH_SIZE_VT_EN (0x8 << 8)
+#define G4x_GMCH_SIZE_VT_1M (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_1_5M ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 548708c4b2b..a7346ab97a3 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -606,7 +606,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
return NOTIFY_OK;
/* interrupted by signal */
- return NOTIFY_BAD;
+ return notifier_from_errno(err);
case PM_POST_SUSPEND:
/*
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index cf39bc08ce0..0c688232aab 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -212,7 +212,7 @@ static int bsr_add_node(struct device_node *bn)
cur->bsr_minor = i + total_bsr_devs;
cur->bsr_addr = res.start;
- cur->bsr_len = res.end - res.start + 1;
+ cur->bsr_len = resource_size(&res);
cur->bsr_bytes = bsr_bytes[i];
cur->bsr_stride = bsr_stride[i];
cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c
index 0e941b57482..6c4f4b5a9dd 100644
--- a/drivers/char/generic_nvram.c
+++ b/drivers/char/generic_nvram.c
@@ -34,12 +34,16 @@ static ssize_t nvram_len;
static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
{
switch (origin) {
+ case 0:
+ break;
case 1:
offset += file->f_pos;
break;
case 2:
offset += nvram_len;
break;
+ default:
+ offset = -1;
}
if (offset < 0)
return -EINVAL;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 34d6a1cab8d..0833896cf6f 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -952,7 +952,7 @@ int hpet_alloc(struct hpet_data *hdp)
#ifdef CONFIG_IA64
if (!hpet_clocksource) {
hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
- CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr);
+ clocksource_hpet.archdata.fsys_mmio = hpet_mctr;
clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
hpetp->hp_clocksource = &clocksource_hpet;
hpet_clocksource = &clocksource_hpet;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index a60043b3e40..1d2ebc7a494 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -210,3 +210,15 @@ config HW_RANDOM_PICOXCELL
module will be called picoxcell-rng.
If unsure, say Y.
+
+config HW_RANDOM_PPC4XX
+ tristate "PowerPC 4xx generic true random number generator support"
+ depends on HW_RANDOM && PPC && 4xx
+ ---help---
+ This driver provides the kernel-side support for the TRNG hardware
+ found in the security function of some PowerPC 4xx SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ppc4xx-rng.
+
+ If unsure, say N.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 3db4eb8b19c..c88f244c8a7 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
+obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 2016aad8520..1bafb40ec8a 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -19,7 +19,7 @@
Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
Added generic RNG API
- Copyright 2006 Michael Buesch <mbuesch@freenet.de>
+ Copyright 2006 Michael Buesch <m@bues.ch>
Copyright 2005 (c) MontaVista Software, Inc.
Please read Documentation/hw_random.txt for details on use.
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index ac6739e085e..c3de70de00d 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -1,6 +1,6 @@
/* n2-drv.c: Niagara-2 RNG driver.
*
- * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2008, 2011 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
@@ -22,8 +22,8 @@
#define DRV_MODULE_NAME "n2rng"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "0.1"
-#define DRV_MODULE_RELDATE "May 15, 2008"
+#define DRV_MODULE_VERSION "0.2"
+#define DRV_MODULE_RELDATE "July 27, 2011"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -623,14 +623,14 @@ static const struct of_device_id n2rng_match[];
static int __devinit n2rng_probe(struct platform_device *op)
{
const struct of_device_id *match;
- int victoria_falls;
+ int multi_capable;
int err = -ENOMEM;
struct n2rng *np;
match = of_match_device(n2rng_match, &op->dev);
if (!match)
return -EINVAL;
- victoria_falls = (match->data != NULL);
+ multi_capable = (match->data != NULL);
n2rng_driver_version();
np = kzalloc(sizeof(*np), GFP_KERNEL);
@@ -640,8 +640,8 @@ static int __devinit n2rng_probe(struct platform_device *op)
INIT_DELAYED_WORK(&np->work, n2rng_work);
- if (victoria_falls)
- np->flags |= N2RNG_FLAG_VF;
+ if (multi_capable)
+ np->flags |= N2RNG_FLAG_MULTI;
err = -ENODEV;
np->hvapi_major = 2;
@@ -658,10 +658,10 @@ static int __devinit n2rng_probe(struct platform_device *op)
}
}
- if (np->flags & N2RNG_FLAG_VF) {
+ if (np->flags & N2RNG_FLAG_MULTI) {
if (np->hvapi_major < 2) {
- dev_err(&op->dev, "VF RNG requires HVAPI major "
- "version 2 or later, got %lu\n",
+ dev_err(&op->dev, "multi-unit-capable RNG requires "
+ "HVAPI major version 2 or later, got %lu\n",
np->hvapi_major);
goto out_hvapi_unregister;
}
@@ -688,8 +688,8 @@ static int __devinit n2rng_probe(struct platform_device *op)
goto out_free_units;
dev_info(&op->dev, "Found %s RNG, units: %d\n",
- ((np->flags & N2RNG_FLAG_VF) ?
- "Victoria Falls" : "Niagara2"),
+ ((np->flags & N2RNG_FLAG_MULTI) ?
+ "multi-unit-capable" : "single-unit"),
np->num_units);
np->hwrng.name = "n2rng";
@@ -751,6 +751,11 @@ static const struct of_device_id n2rng_match[] = {
.compatible = "SUNW,vf-rng",
.data = (void *) 1,
},
+ {
+ .name = "random-number-generator",
+ .compatible = "SUNW,kt-rng",
+ .data = (void *) 1,
+ },
{},
};
MODULE_DEVICE_TABLE(of, n2rng_match);
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index 4bea07f3097..f244ac89087 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -68,7 +68,7 @@ struct n2rng {
struct platform_device *op;
unsigned long flags;
-#define N2RNG_FLAG_VF 0x00000001 /* Victoria Falls RNG, else N2 */
+#define N2RNG_FLAG_MULTI 0x00000001 /* Multi-unit capable RNG */
#define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */
#define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */
#define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index dd1d143eb8e..52e08ca3ccd 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -55,7 +55,7 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
ret = amba_request_regions(dev, dev->dev.init_name);
if (ret)
- return ret;
+ goto out_clk;
ret = -ENOMEM;
base = ioremap(dev->res.start, resource_size(&dev->res));
if (!base)
@@ -70,6 +70,7 @@ out_unmap:
iounmap(base);
out_release:
amba_release_regions(dev);
+out_clk:
clk_disable(rng_clk);
clk_put(rng_clk);
return ret;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 2cc755a6430..b757fac3cd1 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -113,8 +113,10 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
+ if (!res) {
+ ret = -ENOENT;
+ goto err_region;
+ }
if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
ret = -EBUSY;
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
new file mode 100644
index 00000000000..b8afa6a4ff6
--- /dev/null
+++ b/drivers/char/hw_random/ppc4xx-rng.c
@@ -0,0 +1,156 @@
+/*
+ * Generic PowerPC 44x RNG driver
+ *
+ * Copyright 2011 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <asm/io.h>
+
+#define PPC4XX_TRNG_DEV_CTRL 0x60080
+
+#define PPC4XX_TRNGE 0x00020000
+#define PPC4XX_TRNG_CTRL 0x0008
+#define PPC4XX_TRNG_CTRL_DALM 0x20
+#define PPC4XX_TRNG_STAT 0x0004
+#define PPC4XX_TRNG_STAT_B 0x1
+#define PPC4XX_TRNG_DATA 0x0000
+
+#define MODULE_NAME "ppc4xx_rng"
+
+static int ppc4xx_rng_data_present(struct hwrng *rng, int wait)
+{
+ void __iomem *rng_regs = (void __iomem *) rng->priv;
+ int busy, i, present = 0;
+
+ for (i = 0; i < 20; i++) {
+ busy = (in_le32(rng_regs + PPC4XX_TRNG_STAT) & PPC4XX_TRNG_STAT_B);
+ if (!busy || !wait) {
+ present = 1;
+ break;
+ }
+ udelay(10);
+ }
+ return present;
+}
+
+static int ppc4xx_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ void __iomem *rng_regs = (void __iomem *) rng->priv;
+ *data = in_le32(rng_regs + PPC4XX_TRNG_DATA);
+ return 4;
+}
+
+static int ppc4xx_rng_enable(int enable)
+{
+ struct device_node *ctrl;
+ void __iomem *ctrl_reg;
+ int err = 0;
+ u32 val;
+
+ /* Find the main crypto device node and map it to turn the TRNG on */
+ ctrl = of_find_compatible_node(NULL, NULL, "amcc,ppc4xx-crypto");
+ if (!ctrl)
+ return -ENODEV;
+
+ ctrl_reg = of_iomap(ctrl, 0);
+ if (!ctrl_reg) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ val = in_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL);
+
+ if (enable)
+ val |= PPC4XX_TRNGE;
+ else
+ val = val & ~PPC4XX_TRNGE;
+
+ out_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL, val);
+ iounmap(ctrl_reg);
+
+out:
+ of_node_put(ctrl);
+
+ return err;
+}
+
+static struct hwrng ppc4xx_rng = {
+ .name = MODULE_NAME,
+ .data_present = ppc4xx_rng_data_present,
+ .data_read = ppc4xx_rng_data_read,
+};
+
+static int __devinit ppc4xx_rng_probe(struct platform_device *dev)
+{
+ void __iomem *rng_regs;
+ int err = 0;
+
+ rng_regs = of_iomap(dev->dev.of_node, 0);
+ if (!rng_regs)
+ return -ENODEV;
+
+ err = ppc4xx_rng_enable(1);
+ if (err)
+ return err;
+
+ out_le32(rng_regs + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
+ ppc4xx_rng.priv = (unsigned long) rng_regs;
+
+ err = hwrng_register(&ppc4xx_rng);
+
+ return err;
+}
+
+static int __devexit ppc4xx_rng_remove(struct platform_device *dev)
+{
+ void __iomem *rng_regs = (void __iomem *) ppc4xx_rng.priv;
+
+ hwrng_unregister(&ppc4xx_rng);
+ ppc4xx_rng_enable(0);
+ iounmap(rng_regs);
+
+ return 0;
+}
+
+static struct of_device_id ppc4xx_rng_match[] = {
+ { .compatible = "ppc4xx-rng", },
+ { .compatible = "amcc,ppc460ex-rng", },
+ { .compatible = "amcc,ppc440epx-rng", },
+ {},
+};
+
+static struct platform_driver ppc4xx_rng_driver = {
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ppc4xx_rng_match,
+ },
+ .probe = ppc4xx_rng_probe,
+ .remove = ppc4xx_rng_remove,
+};
+
+static int __init ppc4xx_rng_init(void)
+{
+ return platform_driver_register(&ppc4xx_rng_driver);
+}
+module_init(ppc4xx_rng_init);
+
+static void __exit ppc4xx_rng_exit(void)
+{
+ platform_driver_unregister(&ppc4xx_rng_driver);
+}
+module_exit(ppc4xx_rng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("HW RNG driver for PPC 4xx processors");
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index a94e930575f..a8428e6f64a 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -100,8 +100,7 @@ static int __devinit timeriomem_rng_probe(struct platform_device *pdev)
timeriomem_rng_data = pdev->dev.platform_data;
- timeriomem_rng_data->address = ioremap(res->start,
- res->end - res->start + 1);
+ timeriomem_rng_data->address = ioremap(res->start, resource_size(res));
if (!timeriomem_rng_data->address)
return -EIO;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 320668f4c3a..3302586655c 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -52,7 +52,7 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_X86
/*
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 25d139c9dbe..5c0d96a820f 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -46,7 +46,7 @@
#include <asm/page.h>
#include <asm/system.h>
#include <asm/pgtable.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/tlbflush.h>
#include <asm/uncached.h>
#include <asm/sn/addrs.h>
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 166f1e7aaa7..da3cfee782d 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -224,6 +224,8 @@ static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
case 2:
offset += NVRAM_BYTES;
break;
+ default:
+ return -EINVAL;
}
return (offset >= 0) ? (file->f_pos = offset) : -EINVAL;
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 85c004a518e..d0c57c2e290 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -101,12 +101,16 @@ static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin)
mutex_lock(&file->f_mapping->host->i_mutex);
switch (origin) {
+ case 0:
+ break;
case 1:
offset += file->f_pos;
break;
case 2:
offset += dev->regions[dev->region_idx].size*dev->blk_size;
break;
+ default:
+ offset = -1;
}
if (offset < 0) {
res = -EINVAL;
@@ -305,9 +309,14 @@ static int ps3flash_flush(struct file *file, fl_owner_t id)
return ps3flash_writeback(ps3flash_dev);
}
-static int ps3flash_fsync(struct file *file, int datasync)
+static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- return ps3flash_writeback(ps3flash_dev);
+ struct inode *inode = file->f_path.dentry->d_inode;
+ int err;
+ mutex_lock(&inode->i_mutex);
+ err = ps3flash_writeback(ps3flash_dev);
+ mutex_unlock(&inode->i_mutex);
+ return err;
}
static irqreturn_t ps3flash_interrupt(int irq, void *data)
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index 1a9f5f6d6ac..810aff9e750 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -19,18 +19,26 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/kmsg_dump.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/ramoops.h>
#define RAMOOPS_KERNMSG_HDR "===="
+#define MIN_MEM_SIZE 4096UL
-#define RECORD_SIZE 4096UL
+static ulong record_size = MIN_MEM_SIZE;
+module_param(record_size, ulong, 0400);
+MODULE_PARM_DESC(record_size,
+ "size of each dump done on oops/panic");
static ulong mem_address;
module_param(mem_address, ulong, 0400);
@@ -52,10 +60,15 @@ static struct ramoops_context {
void *virt_addr;
phys_addr_t phys_addr;
unsigned long size;
+ unsigned long record_size;
+ int dump_oops;
int count;
int max_count;
} oops_cxt;
+static struct platform_device *dummy;
+static struct ramoops_platform_data *dummy_data;
+
static void ramoops_do_dump(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
const char *s2, unsigned long l2)
@@ -74,13 +87,13 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
return;
/* Only dump oopses if dump_oops is set */
- if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ if (reason == KMSG_DUMP_OOPS && !cxt->dump_oops)
return;
- buf = cxt->virt_addr + (cxt->count * RECORD_SIZE);
+ buf = cxt->virt_addr + (cxt->count * cxt->record_size);
buf_orig = buf;
- memset(buf, '\0', RECORD_SIZE);
+ memset(buf, '\0', cxt->record_size);
res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
buf += res;
do_gettimeofday(&timestamp);
@@ -88,8 +101,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
buf += res;
hdr_size = buf - buf_orig;
- l2_cpy = min(l2, RECORD_SIZE - hdr_size);
- l1_cpy = min(l1, RECORD_SIZE - hdr_size - l2_cpy);
+ l2_cpy = min(l2, cxt->record_size - hdr_size);
+ l1_cpy = min(l1, cxt->record_size - hdr_size - l2_cpy);
s2_start = l2 - l2_cpy;
s1_start = l1 - l1_cpy;
@@ -106,44 +119,59 @@ static int __init ramoops_probe(struct platform_device *pdev)
struct ramoops_context *cxt = &oops_cxt;
int err = -EINVAL;
- if (pdata) {
- mem_size = pdata->mem_size;
- mem_address = pdata->mem_address;
+ if (!pdata->mem_size || !pdata->record_size) {
+ pr_err("The memory size and the record size must be "
+ "non-zero\n");
+ goto fail3;
}
- if (!mem_size) {
- printk(KERN_ERR "ramoops: invalid size specification");
+ rounddown_pow_of_two(pdata->mem_size);
+ rounddown_pow_of_two(pdata->record_size);
+
+ /* Check for the minimum memory size */
+ if (pdata->mem_size < MIN_MEM_SIZE &&
+ pdata->record_size < MIN_MEM_SIZE) {
+ pr_err("memory size too small, minium is %lu\n", MIN_MEM_SIZE);
goto fail3;
}
- rounddown_pow_of_two(mem_size);
-
- if (mem_size < RECORD_SIZE) {
- printk(KERN_ERR "ramoops: size too small");
+ if (pdata->mem_size < pdata->record_size) {
+ pr_err("The memory size must be larger than the "
+ "records size\n");
goto fail3;
}
- cxt->max_count = mem_size / RECORD_SIZE;
+ cxt->max_count = pdata->mem_size / pdata->record_size;
cxt->count = 0;
- cxt->size = mem_size;
- cxt->phys_addr = mem_address;
+ cxt->size = pdata->mem_size;
+ cxt->phys_addr = pdata->mem_address;
+ cxt->record_size = pdata->record_size;
+ cxt->dump_oops = pdata->dump_oops;
+ /*
+ * Update the module parameter variables as well so they are visible
+ * through /sys/module/ramoops/parameters/
+ */
+ mem_size = pdata->mem_size;
+ mem_address = pdata->mem_address;
+ record_size = pdata->record_size;
+ dump_oops = pdata->dump_oops;
if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
- printk(KERN_ERR "ramoops: request mem region failed");
+ pr_err("request mem region failed\n");
err = -EINVAL;
goto fail3;
}
cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size);
if (!cxt->virt_addr) {
- printk(KERN_ERR "ramoops: ioremap failed");
+ pr_err("ioremap failed\n");
goto fail2;
}
cxt->dump.dump = ramoops_do_dump;
err = kmsg_dump_register(&cxt->dump);
if (err) {
- printk(KERN_ERR "ramoops: registering kmsg dumper failed");
+ pr_err("registering kmsg dumper failed\n");
goto fail1;
}
@@ -162,7 +190,7 @@ static int __exit ramoops_remove(struct platform_device *pdev)
struct ramoops_context *cxt = &oops_cxt;
if (kmsg_dump_unregister(&cxt->dump) < 0)
- printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper");
+ pr_warn("could not unregister kmsg_dumper\n");
iounmap(cxt->virt_addr);
release_mem_region(cxt->phys_addr, cxt->size);
@@ -179,12 +207,39 @@ static struct platform_driver ramoops_driver = {
static int __init ramoops_init(void)
{
- return platform_driver_probe(&ramoops_driver, ramoops_probe);
+ int ret;
+ ret = platform_driver_probe(&ramoops_driver, ramoops_probe);
+ if (ret == -ENODEV) {
+ /*
+ * If we didn't find a platform device, we use module parameters
+ * building platform data on the fly.
+ */
+ pr_info("platform device not found, using module parameters\n");
+ dummy_data = kzalloc(sizeof(struct ramoops_platform_data),
+ GFP_KERNEL);
+ if (!dummy_data)
+ return -ENOMEM;
+ dummy_data->mem_size = mem_size;
+ dummy_data->mem_address = mem_address;
+ dummy_data->record_size = record_size;
+ dummy_data->dump_oops = dump_oops;
+ dummy = platform_create_bundle(&ramoops_driver, ramoops_probe,
+ NULL, 0, dummy_data,
+ sizeof(struct ramoops_platform_data));
+
+ if (IS_ERR(dummy))
+ ret = PTR_ERR(dummy);
+ else
+ ret = 0;
+ }
+
+ return ret;
}
static void __exit ramoops_exit(void)
{
platform_driver_unregister(&ramoops_driver);
+ kfree(dummy_data);
}
module_init(ramoops_init);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d4ddeba5668..c35a785005b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1300,330 +1300,14 @@ ctl_table random_table[] = {
};
#endif /* CONFIG_SYSCTL */
-/********************************************************************
- *
- * Random functions for networking
- *
- ********************************************************************/
-
-/*
- * TCP initial sequence number picking. This uses the random number
- * generator to pick an initial secret value. This value is hashed
- * along with the TCP endpoint information to provide a unique
- * starting point for each pair of TCP endpoints. This defeats
- * attacks which rely on guessing the initial TCP sequence number.
- * This algorithm was suggested by Steve Bellovin.
- *
- * Using a very strong hash was taking an appreciable amount of the total
- * TCP connection establishment time, so this is a weaker hash,
- * compensated for by changing the secret periodically.
- */
-
-/* F, G and H are basic MD4 functions: selection, majority, parity */
-#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
-#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
-#define H(x, y, z) ((x) ^ (y) ^ (z))
-
-/*
- * The generic round function. The application is so specific that
- * we don't bother protecting all the arguments with parens, as is generally
- * good macro practice, in favor of extra legibility.
- * Rotation is separate from addition to prevent recomputation
- */
-#define ROUND(f, a, b, c, d, x, s) \
- (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
-#define K1 0
-#define K2 013240474631UL
-#define K3 015666365641UL
+static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-
-static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
+static int __init random_int_secret_init(void)
{
- __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
-
- /* Round 1 */
- ROUND(F, a, b, c, d, in[ 0] + K1, 3);
- ROUND(F, d, a, b, c, in[ 1] + K1, 7);
- ROUND(F, c, d, a, b, in[ 2] + K1, 11);
- ROUND(F, b, c, d, a, in[ 3] + K1, 19);
- ROUND(F, a, b, c, d, in[ 4] + K1, 3);
- ROUND(F, d, a, b, c, in[ 5] + K1, 7);
- ROUND(F, c, d, a, b, in[ 6] + K1, 11);
- ROUND(F, b, c, d, a, in[ 7] + K1, 19);
- ROUND(F, a, b, c, d, in[ 8] + K1, 3);
- ROUND(F, d, a, b, c, in[ 9] + K1, 7);
- ROUND(F, c, d, a, b, in[10] + K1, 11);
- ROUND(F, b, c, d, a, in[11] + K1, 19);
-
- /* Round 2 */
- ROUND(G, a, b, c, d, in[ 1] + K2, 3);
- ROUND(G, d, a, b, c, in[ 3] + K2, 5);
- ROUND(G, c, d, a, b, in[ 5] + K2, 9);
- ROUND(G, b, c, d, a, in[ 7] + K2, 13);
- ROUND(G, a, b, c, d, in[ 9] + K2, 3);
- ROUND(G, d, a, b, c, in[11] + K2, 5);
- ROUND(G, c, d, a, b, in[ 0] + K2, 9);
- ROUND(G, b, c, d, a, in[ 2] + K2, 13);
- ROUND(G, a, b, c, d, in[ 4] + K2, 3);
- ROUND(G, d, a, b, c, in[ 6] + K2, 5);
- ROUND(G, c, d, a, b, in[ 8] + K2, 9);
- ROUND(G, b, c, d, a, in[10] + K2, 13);
-
- /* Round 3 */
- ROUND(H, a, b, c, d, in[ 3] + K3, 3);
- ROUND(H, d, a, b, c, in[ 7] + K3, 9);
- ROUND(H, c, d, a, b, in[11] + K3, 11);
- ROUND(H, b, c, d, a, in[ 2] + K3, 15);
- ROUND(H, a, b, c, d, in[ 6] + K3, 3);
- ROUND(H, d, a, b, c, in[10] + K3, 9);
- ROUND(H, c, d, a, b, in[ 1] + K3, 11);
- ROUND(H, b, c, d, a, in[ 5] + K3, 15);
- ROUND(H, a, b, c, d, in[ 9] + K3, 3);
- ROUND(H, d, a, b, c, in[ 0] + K3, 9);
- ROUND(H, c, d, a, b, in[ 4] + K3, 11);
- ROUND(H, b, c, d, a, in[ 8] + K3, 15);
-
- return buf[1] + b; /* "most hashed" word */
- /* Alternative: return sum of all words? */
-}
-#endif
-
-#undef ROUND
-#undef F
-#undef G
-#undef H
-#undef K1
-#undef K2
-#undef K3
-
-/* This should not be decreased so low that ISNs wrap too fast. */
-#define REKEY_INTERVAL (300 * HZ)
-/*
- * Bit layout of the tcp sequence numbers (before adding current time):
- * bit 24-31: increased after every key exchange
- * bit 0-23: hash(source,dest)
- *
- * The implementation is similar to the algorithm described
- * in the Appendix of RFC 1185, except that
- * - it uses a 1 MHz clock instead of a 250 kHz clock
- * - it performs a rekey every 5 minutes, which is equivalent
- * to a (source,dest) tulple dependent forward jump of the
- * clock by 0..2^(HASH_BITS+1)
- *
- * Thus the average ISN wraparound time is 68 minutes instead of
- * 4.55 hours.
- *
- * SMP cleanup and lock avoidance with poor man's RCU.
- * Manfred Spraul <manfred@colorfullife.com>
- *
- */
-#define COUNT_BITS 8
-#define COUNT_MASK ((1 << COUNT_BITS) - 1)
-#define HASH_BITS 24
-#define HASH_MASK ((1 << HASH_BITS) - 1)
-
-static struct keydata {
- __u32 count; /* already shifted to the final position */
- __u32 secret[12];
-} ____cacheline_aligned ip_keydata[2];
-
-static unsigned int ip_cnt;
-
-static void rekey_seq_generator(struct work_struct *work);
-
-static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
-
-/*
- * Lock avoidance:
- * The ISN generation runs lockless - it's just a hash over random data.
- * State changes happen every 5 minutes when the random key is replaced.
- * Synchronization is performed by having two copies of the hash function
- * state and rekey_seq_generator always updates the inactive copy.
- * The copy is then activated by updating ip_cnt.
- * The implementation breaks down if someone blocks the thread
- * that processes SYN requests for more than 5 minutes. Should never
- * happen, and even if that happens only a not perfectly compliant
- * ISN is generated, nothing fatal.
- */
-static void rekey_seq_generator(struct work_struct *work)
-{
- struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
-
- get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
- keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
- smp_wmb();
- ip_cnt++;
- schedule_delayed_work(&rekey_work,
- round_jiffies_relative(REKEY_INTERVAL));
-}
-
-static inline struct keydata *get_keyptr(void)
-{
- struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
-
- smp_rmb();
-
- return keyptr;
-}
-
-static __init int seqgen_init(void)
-{
- rekey_seq_generator(NULL);
+ get_random_bytes(random_int_secret, sizeof(random_int_secret));
return 0;
}
-late_initcall(seqgen_init);
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
- __be16 sport, __be16 dport)
-{
- __u32 seq;
- __u32 hash[12];
- struct keydata *keyptr = get_keyptr();
-
- /* The procedure is the same as for IPv4, but addresses are longer.
- * Thus we must use twothirdsMD4Transform.
- */
-
- memcpy(hash, saddr, 16);
- hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
- memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
-
- seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
- seq += keyptr->count;
-
- seq += ktime_to_ns(ktime_get_real());
-
- return seq;
-}
-EXPORT_SYMBOL(secure_tcpv6_sequence_number);
-#endif
-
-/* The code below is shamelessly stolen from secure_tcp_sequence_number().
- * All blames to Andrey V. Savochkin <saw@msu.ru>.
- */
-__u32 secure_ip_id(__be32 daddr)
-{
- struct keydata *keyptr;
- __u32 hash[4];
-
- keyptr = get_keyptr();
-
- /*
- * Pick a unique starting offset for each IP destination.
- * The dest ip address is placed in the starting vector,
- * which is then hashed with random data.
- */
- hash[0] = (__force __u32)daddr;
- hash[1] = keyptr->secret[9];
- hash[2] = keyptr->secret[10];
- hash[3] = keyptr->secret[11];
-
- return half_md4_transform(hash, keyptr->secret);
-}
-
-#ifdef CONFIG_INET
-
-__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport)
-{
- __u32 seq;
- __u32 hash[4];
- struct keydata *keyptr = get_keyptr();
-
- /*
- * Pick a unique starting offset for each TCP connection endpoints
- * (saddr, daddr, sport, dport).
- * Note that the words are placed into the starting vector, which is
- * then mixed with a partial MD4 over random data.
- */
- hash[0] = (__force u32)saddr;
- hash[1] = (__force u32)daddr;
- hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
- hash[3] = keyptr->secret[11];
-
- seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
- seq += keyptr->count;
- /*
- * As close as possible to RFC 793, which
- * suggests using a 250 kHz clock.
- * Further reading shows this assumes 2 Mb/s networks.
- * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
- * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
- * we also need to limit the resolution so that the u32 seq
- * overlaps less than one time per MSL (2 minutes).
- * Choosing a clock of 64 ns period is OK. (period of 274 s)
- */
- seq += ktime_to_ns(ktime_get_real()) >> 6;
-
- return seq;
-}
-
-/* Generate secure starting point for ephemeral IPV4 transport port search */
-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
-{
- struct keydata *keyptr = get_keyptr();
- u32 hash[4];
-
- /*
- * Pick a unique starting offset for each ephemeral port search
- * (saddr, daddr, dport) and 48bits of random data.
- */
- hash[0] = (__force u32)saddr;
- hash[1] = (__force u32)daddr;
- hash[2] = (__force u32)dport ^ keyptr->secret[10];
- hash[3] = keyptr->secret[11];
-
- return half_md4_transform(hash, keyptr->secret);
-}
-EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
- __be16 dport)
-{
- struct keydata *keyptr = get_keyptr();
- u32 hash[12];
-
- memcpy(hash, saddr, 16);
- hash[4] = (__force u32)dport;
- memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
-
- return twothirdsMD4Transform((const __u32 *)daddr, hash);
-}
-#endif
-
-#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
-/* Similar to secure_tcp_sequence_number but generate a 48 bit value
- * bit's 32-47 increase every key exchange
- * 0-31 hash(source, dest)
- */
-u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport)
-{
- u64 seq;
- __u32 hash[4];
- struct keydata *keyptr = get_keyptr();
-
- hash[0] = (__force u32)saddr;
- hash[1] = (__force u32)daddr;
- hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
- hash[3] = keyptr->secret[11];
-
- seq = half_md4_transform(hash, keyptr->secret);
- seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
-
- seq += ktime_to_ns(ktime_get_real());
- seq &= (1ull << 48) - 1;
-
- return seq;
-}
-EXPORT_SYMBOL(secure_dccp_sequence_number);
-#endif
-
-#endif /* CONFIG_INET */
-
+late_initcall(random_int_secret_init);
/*
* Get a random word for internal kernel use only. Similar to urandom but
@@ -1631,17 +1315,15 @@ EXPORT_SYMBOL(secure_dccp_sequence_number);
* value is not cryptographically secure but for several uses the cost of
* depleting entropy is too high
*/
-DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
+DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
unsigned int get_random_int(void)
{
- struct keydata *keyptr;
__u32 *hash = get_cpu_var(get_random_int_hash);
- int ret;
+ unsigned int ret;
- keyptr = get_keyptr();
hash[0] += current->pid + jiffies + get_cycles();
-
- ret = half_md4_transform(hash, keyptr->secret);
+ md5_transform(hash, random_int_secret);
+ ret = hash[0];
put_cpu_var(get_random_int_hash);
return ret;
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
new file mode 100644
index 00000000000..cf3ee008dca
--- /dev/null
+++ b/drivers/char/tile-srom.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * SPI Flash ROM driver
+ *
+ * This source code is derived from code provided in "Linux Device
+ * Drivers, Third Edition", by Jonathan Corbet, Alessandro Rubini, and
+ * Greg Kroah-Hartman, published by O'Reilly Media, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/proc_fs.h>
+#include <linux/fcntl.h> /* O_ACCMODE */
+#include <linux/aio.h>
+#include <linux/pagemap.h>
+#include <linux/hugetlb.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <hv/hypervisor.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <hv/drv_srom_intf.h>
+
+/*
+ * Size of our hypervisor I/O requests. We break up large transfers
+ * so that we don't spend large uninterrupted spans of time in the
+ * hypervisor. Erasing an SROM sector takes a significant fraction of
+ * a second, so if we allowed the user to, say, do one I/O to write the
+ * entire ROM, we'd get soft lockup timeouts, or worse.
+ */
+#define SROM_CHUNK_SIZE ((size_t)4096)
+
+/*
+ * When hypervisor is busy (e.g. erasing), poll the status periodically.
+ */
+
+/*
+ * Interval to poll the state in msec
+ */
+#define SROM_WAIT_TRY_INTERVAL 20
+
+/*
+ * Maximum times to poll the state
+ */
+#define SROM_MAX_WAIT_TRY_TIMES 1000
+
+struct srom_dev {
+ int hv_devhdl; /* Handle for hypervisor device */
+ u32 total_size; /* Size of this device */
+ u32 sector_size; /* Size of a sector */
+ u32 page_size; /* Size of a page */
+ struct mutex lock; /* Allow only one accessor at a time */
+};
+
+static int srom_major; /* Dynamic major by default */
+module_param(srom_major, int, 0);
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_LICENSE("GPL");
+
+static int srom_devs; /* Number of SROM partitions */
+static struct cdev srom_cdev;
+static struct class *srom_class;
+static struct srom_dev *srom_devices;
+
+/*
+ * Handle calling the hypervisor and managing EAGAIN/EBUSY.
+ */
+
+static ssize_t _srom_read(int hv_devhdl, void *buf,
+ loff_t off, size_t count)
+{
+ int retval, retries = SROM_MAX_WAIT_TRY_TIMES;
+ for (;;) {
+ retval = hv_dev_pread(hv_devhdl, 0, (HV_VirtAddr)buf,
+ count, off);
+ if (retval >= 0)
+ return retval;
+ if (retval == HV_EAGAIN)
+ continue;
+ if (retval == HV_EBUSY && --retries > 0) {
+ msleep(SROM_WAIT_TRY_INTERVAL);
+ continue;
+ }
+ pr_err("_srom_read: error %d\n", retval);
+ return -EIO;
+ }
+}
+
+static ssize_t _srom_write(int hv_devhdl, const void *buf,
+ loff_t off, size_t count)
+{
+ int retval, retries = SROM_MAX_WAIT_TRY_TIMES;
+ for (;;) {
+ retval = hv_dev_pwrite(hv_devhdl, 0, (HV_VirtAddr)buf,
+ count, off);
+ if (retval >= 0)
+ return retval;
+ if (retval == HV_EAGAIN)
+ continue;
+ if (retval == HV_EBUSY && --retries > 0) {
+ msleep(SROM_WAIT_TRY_INTERVAL);
+ continue;
+ }
+ pr_err("_srom_write: error %d\n", retval);
+ return -EIO;
+ }
+}
+
+/**
+ * srom_open() - Device open routine.
+ * @inode: Inode for this device.
+ * @filp: File for this specific open of the device.
+ *
+ * Returns zero, or an error code.
+ */
+static int srom_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = &srom_devices[iminor(inode)];
+ return 0;
+}
+
+
+/**
+ * srom_release() - Device release routine.
+ * @inode: Inode for this device.
+ * @filp: File for this specific open of the device.
+ *
+ * Returns zero, or an error code.
+ */
+static int srom_release(struct inode *inode, struct file *filp)
+{
+ struct srom_dev *srom = filp->private_data;
+ char dummy;
+
+ /* Make sure we've flushed anything written to the ROM. */
+ mutex_lock(&srom->lock);
+ if (srom->hv_devhdl >= 0)
+ _srom_write(srom->hv_devhdl, &dummy, SROM_FLUSH_OFF, 1);
+ mutex_unlock(&srom->lock);
+
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+
+/**
+ * srom_read() - Read data from the device.
+ * @filp: File for this specific open of the device.
+ * @buf: User's data buffer.
+ * @count: Number of bytes requested.
+ * @f_pos: File position.
+ *
+ * Returns number of bytes read, or an error code.
+ */
+static ssize_t srom_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int retval = 0;
+ void *kernbuf;
+ struct srom_dev *srom = filp->private_data;
+
+ kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL);
+ if (!kernbuf)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&srom->lock)) {
+ retval = -ERESTARTSYS;
+ kfree(kernbuf);
+ return retval;
+ }
+
+ while (count) {
+ int hv_retval;
+ int bytes_this_pass = min(count, SROM_CHUNK_SIZE);
+
+ hv_retval = _srom_read(srom->hv_devhdl, kernbuf,
+ *f_pos, bytes_this_pass);
+ if (hv_retval > 0) {
+ if (copy_to_user(buf, kernbuf, hv_retval) != 0) {
+ retval = -EFAULT;
+ break;
+ }
+ } else if (hv_retval <= 0) {
+ if (retval == 0)
+ retval = hv_retval;
+ break;
+ }
+
+ retval += hv_retval;
+ *f_pos += hv_retval;
+ buf += hv_retval;
+ count -= hv_retval;
+ }
+
+ mutex_unlock(&srom->lock);
+ kfree(kernbuf);
+
+ return retval;
+}
+
+/**
+ * srom_write() - Write data to the device.
+ * @filp: File for this specific open of the device.
+ * @buf: User's data buffer.
+ * @count: Number of bytes requested.
+ * @f_pos: File position.
+ *
+ * Returns number of bytes written, or an error code.
+ */
+static ssize_t srom_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ int retval = 0;
+ void *kernbuf;
+ struct srom_dev *srom = filp->private_data;
+
+ kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL);
+ if (!kernbuf)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&srom->lock)) {
+ retval = -ERESTARTSYS;
+ kfree(kernbuf);
+ return retval;
+ }
+
+ while (count) {
+ int hv_retval;
+ int bytes_this_pass = min(count, SROM_CHUNK_SIZE);
+
+ if (copy_from_user(kernbuf, buf, bytes_this_pass) != 0) {
+ retval = -EFAULT;
+ break;
+ }
+
+ hv_retval = _srom_write(srom->hv_devhdl, kernbuf,
+ *f_pos, bytes_this_pass);
+ if (hv_retval <= 0) {
+ if (retval == 0)
+ retval = hv_retval;
+ break;
+ }
+
+ retval += hv_retval;
+ *f_pos += hv_retval;
+ buf += hv_retval;
+ count -= hv_retval;
+ }
+
+ mutex_unlock(&srom->lock);
+ kfree(kernbuf);
+
+ return retval;
+}
+
+/* Provide our own implementation so we can use srom->total_size. */
+loff_t srom_llseek(struct file *filp, loff_t offset, int origin)
+{
+ struct srom_dev *srom = filp->private_data;
+
+ if (mutex_lock_interruptible(&srom->lock))
+ return -ERESTARTSYS;
+
+ switch (origin) {
+ case SEEK_END:
+ offset += srom->total_size;
+ break;
+ case SEEK_CUR:
+ offset += filp->f_pos;
+ break;
+ }
+
+ if (offset < 0 || offset > srom->total_size) {
+ offset = -EINVAL;
+ } else {
+ filp->f_pos = offset;
+ filp->f_version = 0;
+ }
+
+ mutex_unlock(&srom->lock);
+
+ return offset;
+}
+
+static ssize_t total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srom_dev *srom = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", srom->total_size);
+}
+
+static ssize_t sector_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srom_dev *srom = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", srom->sector_size);
+}
+
+static ssize_t page_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct srom_dev *srom = dev_get_drvdata(dev);
+ return sprintf(buf, "%u\n", srom->page_size);
+}
+
+static struct device_attribute srom_dev_attrs[] = {
+ __ATTR(total_size, S_IRUGO, total_show, NULL),
+ __ATTR(sector_size, S_IRUGO, sector_show, NULL),
+ __ATTR(page_size, S_IRUGO, page_show, NULL),
+ __ATTR_NULL
+};
+
+static char *srom_devnode(struct device *dev, mode_t *mode)
+{
+ *mode = S_IRUGO | S_IWUSR;
+ return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
+}
+
+/*
+ * The fops
+ */
+static const struct file_operations srom_fops = {
+ .owner = THIS_MODULE,
+ .llseek = srom_llseek,
+ .read = srom_read,
+ .write = srom_write,
+ .open = srom_open,
+ .release = srom_release,
+};
+
+/**
+ * srom_setup_minor() - Initialize per-minor information.
+ * @srom: Per-device SROM state.
+ * @index: Device to set up.
+ */
+static int srom_setup_minor(struct srom_dev *srom, int index)
+{
+ struct device *dev;
+ int devhdl = srom->hv_devhdl;
+
+ mutex_init(&srom->lock);
+
+ if (_srom_read(devhdl, &srom->total_size,
+ SROM_TOTAL_SIZE_OFF, sizeof(srom->total_size)) < 0)
+ return -EIO;
+ if (_srom_read(devhdl, &srom->sector_size,
+ SROM_SECTOR_SIZE_OFF, sizeof(srom->sector_size)) < 0)
+ return -EIO;
+ if (_srom_read(devhdl, &srom->page_size,
+ SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0)
+ return -EIO;
+
+ dev = device_create(srom_class, &platform_bus,
+ MKDEV(srom_major, index), srom, "%d", index);
+ return IS_ERR(dev) ? PTR_ERR(dev) : 0;
+}
+
+/** srom_init() - Initialize the driver's module. */
+static int srom_init(void)
+{
+ int result, i;
+ dev_t dev = MKDEV(srom_major, 0);
+
+ /*
+ * Start with a plausible number of partitions; the krealloc() call
+ * below will yield about log(srom_devs) additional allocations.
+ */
+ srom_devices = kzalloc(4 * sizeof(struct srom_dev), GFP_KERNEL);
+
+ /* Discover the number of srom partitions. */
+ for (i = 0; ; i++) {
+ int devhdl;
+ char buf[20];
+ struct srom_dev *new_srom_devices =
+ krealloc(srom_devices, (i+1) * sizeof(struct srom_dev),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!new_srom_devices) {
+ result = -ENOMEM;
+ goto fail_mem;
+ }
+ srom_devices = new_srom_devices;
+ sprintf(buf, "srom/0/%d", i);
+ devhdl = hv_dev_open((HV_VirtAddr)buf, 0);
+ if (devhdl < 0) {
+ if (devhdl != HV_ENODEV)
+ pr_notice("srom/%d: hv_dev_open failed: %d.\n",
+ i, devhdl);
+ break;
+ }
+ srom_devices[i].hv_devhdl = devhdl;
+ }
+ srom_devs = i;
+
+ /* Bail out early if we have no partitions at all. */
+ if (srom_devs == 0) {
+ result = -ENODEV;
+ goto fail_mem;
+ }
+
+ /* Register our major, and accept a dynamic number. */
+ if (srom_major)
+ result = register_chrdev_region(dev, srom_devs, "srom");
+ else {
+ result = alloc_chrdev_region(&dev, 0, srom_devs, "srom");
+ srom_major = MAJOR(dev);
+ }
+ if (result < 0)
+ goto fail_mem;
+
+ /* Register a character device. */
+ cdev_init(&srom_cdev, &srom_fops);
+ srom_cdev.owner = THIS_MODULE;
+ srom_cdev.ops = &srom_fops;
+ result = cdev_add(&srom_cdev, dev, srom_devs);
+ if (result < 0)
+ goto fail_chrdev;
+
+ /* Create a sysfs class. */
+ srom_class = class_create(THIS_MODULE, "srom");
+ if (IS_ERR(srom_class)) {
+ result = PTR_ERR(srom_class);
+ goto fail_cdev;
+ }
+ srom_class->dev_attrs = srom_dev_attrs;
+ srom_class->devnode = srom_devnode;
+
+ /* Do per-partition initialization */
+ for (i = 0; i < srom_devs; i++) {
+ result = srom_setup_minor(srom_devices + i, i);
+ if (result < 0)
+ goto fail_class;
+ }
+
+ return 0;
+
+fail_class:
+ for (i = 0; i < srom_devs; i++)
+ device_destroy(srom_class, MKDEV(srom_major, i));
+ class_destroy(srom_class);
+fail_cdev:
+ cdev_del(&srom_cdev);
+fail_chrdev:
+ unregister_chrdev_region(dev, srom_devs);
+fail_mem:
+ kfree(srom_devices);
+ return result;
+}
+
+/** srom_cleanup() - Clean up the driver's module. */
+static void srom_cleanup(void)
+{
+ int i;
+ for (i = 0; i < srom_devs; i++)
+ device_destroy(srom_class, MKDEV(srom_major, i));
+ class_destroy(srom_class);
+ cdev_del(&srom_cdev);
+ unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs);
+ kfree(srom_devices);
+}
+
+module_init(srom_init);
+module_exit(srom_cleanup);
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 7beb0e25f1e..caf8012ef47 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -534,6 +534,7 @@ void tpm_get_timeouts(struct tpm_chip *chip)
struct duration_t *duration_cap;
ssize_t rc;
u32 timeout;
+ unsigned int scale = 1;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
@@ -545,24 +546,30 @@ void tpm_get_timeouts(struct tpm_chip *chip)
if (rc)
goto duration;
- if (be32_to_cpu(tpm_cmd.header.out.length)
- != 4 * sizeof(u32))
- goto duration;
+ if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
+ be32_to_cpu(tpm_cmd.header.out.length)
+ != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
+ return;
timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
/* Don't overwrite default if value is 0 */
timeout = be32_to_cpu(timeout_cap->a);
+ if (timeout && timeout < 1000) {
+ /* timeouts in msec rather usec */
+ scale = 1000;
+ chip->vendor.timeout_adjusted = true;
+ }
if (timeout)
- chip->vendor.timeout_a = usecs_to_jiffies(timeout);
+ chip->vendor.timeout_a = usecs_to_jiffies(timeout * scale);
timeout = be32_to_cpu(timeout_cap->b);
if (timeout)
- chip->vendor.timeout_b = usecs_to_jiffies(timeout);
+ chip->vendor.timeout_b = usecs_to_jiffies(timeout * scale);
timeout = be32_to_cpu(timeout_cap->c);
if (timeout)
- chip->vendor.timeout_c = usecs_to_jiffies(timeout);
+ chip->vendor.timeout_c = usecs_to_jiffies(timeout * scale);
timeout = be32_to_cpu(timeout_cap->d);
if (timeout)
- chip->vendor.timeout_d = usecs_to_jiffies(timeout);
+ chip->vendor.timeout_d = usecs_to_jiffies(timeout * scale);
duration:
tpm_cmd.header.in = tpm_getcap_header;
@@ -575,23 +582,31 @@ duration:
if (rc)
return;
- if (be32_to_cpu(tpm_cmd.header.out.return_code)
- != 3 * sizeof(u32))
+ if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
+ be32_to_cpu(tpm_cmd.header.out.length)
+ != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
return;
+
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
+ chip->vendor.duration[TPM_MEDIUM] =
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
+ chip->vendor.duration[TPM_LONG] =
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
* fix up the resulting too-small TPM_SHORT value to make things work.
+ * We also scale the TPM_MEDIUM and -_LONG values by 1000.
*/
- if (chip->vendor.duration[TPM_SHORT] < (HZ/100))
+ if (chip->vendor.duration[TPM_SHORT] < (HZ / 100)) {
chip->vendor.duration[TPM_SHORT] = HZ;
-
- chip->vendor.duration[TPM_MEDIUM] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
- chip->vendor.duration[TPM_LONG] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+ chip->vendor.duration[TPM_MEDIUM] *= 1000;
+ chip->vendor.duration[TPM_LONG] *= 1000;
+ chip->vendor.duration_adjusted = true;
+ dev_info(chip->dev, "Adjusting TPM timeout parameters.");
+ }
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
@@ -600,7 +615,7 @@ void tpm_continue_selftest(struct tpm_chip *chip)
u8 data[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 10, /* length */
- 0, 0, 0, 83, /* TPM_ORD_GetCapability */
+ 0, 0, 0, 83, /* TPM_ORD_ContinueSelfTest */
};
tpm_transmit(chip, data, sizeof(data));
@@ -863,18 +878,24 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
data = tpm_cmd.params.readpubek_out_buffer;
str +=
sprintf(str,
- "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
- "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X"
- " %02X %02X %02X %02X %02X %02X %02X %02X\n"
- "Modulus length: %d\nModulus: \n",
- data[10], data[11], data[12], data[13], data[14],
- data[15], data[16], data[17], data[22], data[23],
- data[24], data[25], data[26], data[27], data[28],
- data[29], data[30], data[31], data[32], data[33],
- be32_to_cpu(*((__be32 *) (data + 34))));
+ "Algorithm: %02X %02X %02X %02X\n"
+ "Encscheme: %02X %02X\n"
+ "Sigscheme: %02X %02X\n"
+ "Parameters: %02X %02X %02X %02X "
+ "%02X %02X %02X %02X "
+ "%02X %02X %02X %02X\n"
+ "Modulus length: %d\n"
+ "Modulus:\n",
+ data[0], data[1], data[2], data[3],
+ data[4], data[5],
+ data[6], data[7],
+ data[12], data[13], data[14], data[15],
+ data[16], data[17], data[18], data[19],
+ data[20], data[21], data[22], data[23],
+ be32_to_cpu(*((__be32 *) (data + 24))));
for (i = 0; i < 256; i++) {
- str += sprintf(str, "%02X ", data[i + 38]);
+ str += sprintf(str, "%02X ", data[i + 28]);
if ((i + 1) % 16 == 0)
str += sprintf(str, "\n");
}
@@ -937,6 +958,35 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
+ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d %d %d [%s]\n",
+ jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
+ jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
+ jiffies_to_usecs(chip->vendor.duration[TPM_LONG]),
+ chip->vendor.duration_adjusted
+ ? "adjusted" : "original");
+}
+EXPORT_SYMBOL_GPL(tpm_show_durations);
+
+ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d %d %d %d [%s]\n",
+ jiffies_to_usecs(chip->vendor.timeout_a),
+ jiffies_to_usecs(chip->vendor.timeout_b),
+ jiffies_to_usecs(chip->vendor.timeout_c),
+ jiffies_to_usecs(chip->vendor.timeout_d),
+ chip->vendor.timeout_adjusted
+ ? "adjusted" : "original");
+}
+EXPORT_SYMBOL_GPL(tpm_show_timeouts);
+
ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 72ddb031b69..9c4163cfa3c 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,6 +56,10 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
char *);
extern ssize_t tpm_show_temp_deactivated(struct device *,
struct device_attribute *attr, char *);
+extern ssize_t tpm_show_durations(struct device *,
+ struct device_attribute *attr, char *);
+extern ssize_t tpm_show_timeouts(struct device *,
+ struct device_attribute *attr, char *);
struct tpm_chip;
@@ -67,6 +71,7 @@ struct tpm_vendor_specific {
unsigned long base; /* TPM base address */
int irq;
+ int probed_irq;
int region_size;
int have_region;
@@ -81,7 +86,9 @@ struct tpm_vendor_specific {
struct list_head list;
int locality;
unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */
+ bool timeout_adjusted;
unsigned long duration[3]; /* jiffies */
+ bool duration_adjusted;
wait_queue_head_t read_queue;
wait_queue_head_t int_queue;
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index a605cb7dd89..82facc9104c 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -330,12 +330,12 @@ static int __init init_nsc(void)
pdev->dev.driver = &nsc_drv.driver;
pdev->dev.release = tpm_nsc_remove;
- if ((rc = platform_device_register(pdev)) < 0)
- goto err_free_dev;
+ if ((rc = platform_device_add(pdev)) < 0)
+ goto err_put_dev;
if (request_region(base, 2, "tpm_nsc0") == NULL ) {
rc = -EBUSY;
- goto err_unreg_dev;
+ goto err_del_dev;
}
if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) {
@@ -382,10 +382,10 @@ static int __init init_nsc(void)
err_rel_reg:
release_region(base, 2);
-err_unreg_dev:
- platform_device_unregister(pdev);
-err_free_dev:
- kfree(pdev);
+err_del_dev:
+ platform_device_del(pdev);
+err_put_dev:
+ platform_device_put(pdev);
err_unreg_drv:
platform_driver_unregister(&nsc_drv);
return rc;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index dd21df55689..3f4051a7c5a 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/acpi.h>
+#include <linux/freezer.h>
#include "tpm.h"
#define TPM_HEADER_SIZE 10
@@ -79,7 +80,7 @@ enum tis_defaults {
static LIST_HEAD(tis_chips);
static DEFINE_SPINLOCK(tis_lock);
-#ifdef CONFIG_ACPI
+#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int is_itpm(struct pnp_dev *dev)
{
struct acpi_device *acpi = pnp_acpi_device(dev);
@@ -93,7 +94,7 @@ static int is_itpm(struct pnp_dev *dev)
return 0;
}
#else
-static int is_itpm(struct pnp_dev *dev)
+static inline int is_itpm(struct pnp_dev *dev)
{
return 0;
}
@@ -120,7 +121,7 @@ static void release_locality(struct tpm_chip *chip, int l, int force)
static int request_locality(struct tpm_chip *chip, int l)
{
- unsigned long stop;
+ unsigned long stop, timeout;
long rc;
if (check_locality(chip, l) >= 0)
@@ -129,17 +130,25 @@ static int request_locality(struct tpm_chip *chip, int l)
iowrite8(TPM_ACCESS_REQUEST_USE,
chip->vendor.iobase + TPM_ACCESS(l));
+ stop = jiffies + chip->vendor.timeout_a;
+
if (chip->vendor.irq) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -1;
rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
(check_locality
(chip, l) >= 0),
- chip->vendor.timeout_a);
+ timeout);
if (rc > 0)
return l;
-
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
} else {
/* wait for burstcount */
- stop = jiffies + chip->vendor.timeout_a;
do {
if (check_locality(chip, l) >= 0)
return l;
@@ -196,15 +205,24 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
if ((status & mask) == mask)
return 0;
+ stop = jiffies + timeout;
+
if (chip->vendor.irq) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
rc = wait_event_interruptible_timeout(*queue,
((tpm_tis_status
(chip) & mask) ==
mask), timeout);
if (rc > 0)
return 0;
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
} else {
- stop = jiffies + timeout;
do {
msleep(TPM_TIMEOUT);
status = tpm_tis_status(chip);
@@ -288,11 +306,10 @@ MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
{
int rc, status, burstcnt;
size_t count = 0;
- u32 ordinal;
if (request_locality(chip, 0) < 0)
return -EBUSY;
@@ -327,8 +344,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
/* write last byte */
iowrite8(buf[count],
- chip->vendor.iobase +
- TPM_DATA_FIFO(chip->vendor.locality));
+ chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
&chip->vendor.int_queue);
status = tpm_tis_status(chip);
@@ -337,6 +353,28 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
goto out_err;
}
+ return 0;
+
+out_err:
+ tpm_tis_ready(chip);
+ release_locality(chip, chip->vendor.locality, 0);
+ return rc;
+}
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ int rc;
+ u32 ordinal;
+
+ rc = tpm_tis_send_data(chip, buf, len);
+ if (rc < 0)
+ return rc;
+
/* go and do it */
iowrite8(TPM_STS_GO,
chip->vendor.iobase + TPM_STS(chip->vendor.locality));
@@ -358,6 +396,47 @@ out_err:
return rc;
}
+/*
+ * Early probing for iTPM with STS_DATA_EXPECT flaw.
+ * Try sending command without itpm flag set and if that
+ * fails, repeat with itpm flag set.
+ */
+static int probe_itpm(struct tpm_chip *chip)
+{
+ int rc = 0;
+ u8 cmd_getticks[] = {
+ 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x00, 0x00, 0xf1
+ };
+ size_t len = sizeof(cmd_getticks);
+ int rem_itpm = itpm;
+
+ itpm = 0;
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0)
+ goto out;
+
+ tpm_tis_ready(chip);
+ release_locality(chip, chip->vendor.locality, 0);
+
+ itpm = 1;
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0) {
+ dev_info(chip->dev, "Detected an iTPM.\n");
+ rc = 1;
+ } else
+ rc = -EFAULT;
+
+out:
+ itpm = rem_itpm;
+ tpm_tis_ready(chip);
+ release_locality(chip, chip->vendor.locality, 0);
+
+ return rc;
+}
+
static const struct file_operations tis_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -376,6 +455,8 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
static struct attribute *tis_attrs[] = {
&dev_attr_pubek.attr,
@@ -385,7 +466,9 @@ static struct attribute *tis_attrs[] = {
&dev_attr_owned.attr,
&dev_attr_temp_deactivated.attr,
&dev_attr_caps.attr,
- &dev_attr_cancel.attr, NULL,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr, NULL,
};
static struct attribute_group tis_attr_grp = {
@@ -416,7 +499,7 @@ static irqreturn_t tis_int_probe(int irq, void *dev_id)
if (interrupt == 0)
return IRQ_NONE;
- chip->vendor.irq = irq;
+ chip->vendor.probed_irq = irq;
/* Clear interrupts handled with TPM_EOI */
iowrite32(interrupt,
@@ -464,7 +547,7 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
resource_size_t len, unsigned int irq)
{
u32 vendor, intfcaps, intmask;
- int rc, i;
+ int rc, i, irq_s, irq_e;
struct tpm_chip *chip;
if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
@@ -493,6 +576,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
+ if (!itpm) {
+ itpm = probe_itpm(chip);
+ if (itpm < 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+ }
+
if (itpm)
dev_info(dev, "Intel iTPM workaround enabled\n");
@@ -522,6 +613,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");
+ /* get the timeouts before testing for irqs */
+ tpm_get_timeouts(chip);
+
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);
@@ -540,13 +634,19 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
if (interrupts)
chip->vendor.irq = irq;
if (interrupts && !chip->vendor.irq) {
- chip->vendor.irq =
+ irq_s =
ioread8(chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
+ if (irq_s) {
+ irq_e = irq_s;
+ } else {
+ irq_s = 3;
+ irq_e = 15;
+ }
- for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
+ for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
iowrite8(i, chip->vendor.iobase +
- TPM_INT_VECTOR(chip->vendor.locality));
+ TPM_INT_VECTOR(chip->vendor.locality));
if (request_irq
(i, tis_int_probe, IRQF_SHARED,
chip->vendor.miscdev.name, chip) != 0) {
@@ -568,9 +668,22 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
+ chip->vendor.probed_irq = 0;
+
/* Generate Interrupts */
tpm_gen_interrupt(chip);
+ chip->vendor.irq = chip->vendor.probed_irq;
+
+ /* free_irq will call into tis_int_probe;
+ clear all irqs we haven't seen while doing
+ tpm_gen_interrupt */
+ iowrite32(ioread32
+ (chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality)),
+ chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality));
+
/* Turn off */
iowrite32(intmask,
chip->vendor.iobase +
@@ -609,7 +722,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
list_add(&chip->vendor.list, &tis_chips);
spin_unlock(&tis_lock);
- tpm_get_timeouts(chip);
tpm_continue_selftest(chip);
return 0;
@@ -619,6 +731,29 @@ out_err:
tpm_remove_hardware(chip->dev);
return rc;
}
+
+static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
+{
+ u32 intmask;
+
+ /* reenable interrupts that device may have lost or
+ BIOS/firmware may have disabled */
+ iowrite8(chip->vendor.irq, chip->vendor.iobase +
+ TPM_INT_VECTOR(chip->vendor.locality));
+
+ intmask =
+ ioread32(chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
+ | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
+
+ iowrite32(intmask,
+ chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
+}
+
+
#ifdef CONFIG_PNP
static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
const struct pnp_device_id *pnp_id)
@@ -650,6 +785,9 @@ static int tpm_tis_pnp_resume(struct pnp_dev *dev)
struct tpm_chip *chip = pnp_get_drvdata(dev);
int ret;
+ if (chip->vendor.irq)
+ tpm_tis_reenable_interrupts(chip);
+
ret = tpm_pm_resume(&dev->dev);
if (!ret)
tpm_continue_selftest(chip);
@@ -702,6 +840,11 @@ static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
static int tpm_tis_resume(struct platform_device *dev)
{
+ struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
+
+ if (chip->vendor.irq)
+ tpm_tis_reenable_interrupts(chip);
+
return tpm_pm_resume(&dev->dev);
}
static struct platform_driver tis_drv = {
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 39ccdeada79..e90e1c74fd4 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -621,7 +621,7 @@ static int __devinit hwicap_setup(struct device *dev, int id,
drvdata->mem_start = regs_res->start;
drvdata->mem_end = regs_res->end;
- drvdata->mem_size = regs_res->end - regs_res->start + 1;
+ drvdata->mem_size = resource_size(regs_res);
if (!request_mem_region(drvdata->mem_start,
drvdata->mem_size, DRIVER_NAME)) {
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 4168c8896e1..35309274ad6 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -2,3 +2,6 @@
config CLKDEV_LOOKUP
bool
select HAVE_CLK
+
+config HAVE_MACH_CLKDEV
+ bool
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 96c92191046..34e9c4f8892 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,5 +1,17 @@
config CLKSRC_I8253
bool
+config CLKEVT_I8253
+ bool
+
+config I8253_LOCK
+ bool
+
+config CLKBLD_I8253
+ def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK
+
config CLKSRC_MMIO
bool
+
+config DW_APB_TIMER
+ bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index b995942a506..85ad1646a7b 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -6,5 +6,6 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
-obj-$(CONFIG_CLKSRC_I8253) += i8253.o
+obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
+obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
new file mode 100644
index 00000000000..580f870541a
--- /dev/null
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -0,0 +1,401 @@
+/*
+ * (C) Copyright 2009 Intel Corporation
+ * Author: Jacob Pan (jacob.jun.pan@intel.com)
+ *
+ * Shared with ARM platforms, Jamie Iles, Picochip 2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Support for the Synopsys DesignWare APB Timers.
+ */
+#include <linux/dw_apb_timer.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define APBT_MIN_PERIOD 4
+#define APBT_MIN_DELTA_USEC 200
+
+#define APBTMR_N_LOAD_COUNT 0x00
+#define APBTMR_N_CURRENT_VALUE 0x04
+#define APBTMR_N_CONTROL 0x08
+#define APBTMR_N_EOI 0x0c
+#define APBTMR_N_INT_STATUS 0x10
+
+#define APBTMRS_INT_STATUS 0xa0
+#define APBTMRS_EOI 0xa4
+#define APBTMRS_RAW_INT_STATUS 0xa8
+#define APBTMRS_COMP_VERSION 0xac
+
+#define APBTMR_CONTROL_ENABLE (1 << 0)
+/* 1: periodic, 0:free running. */
+#define APBTMR_CONTROL_MODE_PERIODIC (1 << 1)
+#define APBTMR_CONTROL_INT (1 << 2)
+
+static inline struct dw_apb_clock_event_device *
+ced_to_dw_apb_ced(struct clock_event_device *evt)
+{
+ return container_of(evt, struct dw_apb_clock_event_device, ced);
+}
+
+static inline struct dw_apb_clocksource *
+clocksource_to_dw_apb_clocksource(struct clocksource *cs)
+{
+ return container_of(cs, struct dw_apb_clocksource, cs);
+}
+
+static unsigned long apbt_readl(struct dw_apb_timer *timer, unsigned long offs)
+{
+ return readl(timer->base + offs);
+}
+
+static void apbt_writel(struct dw_apb_timer *timer, unsigned long val,
+ unsigned long offs)
+{
+ writel(val, timer->base + offs);
+}
+
+static void apbt_disable_int(struct dw_apb_timer *timer)
+{
+ unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
+
+ ctrl |= APBTMR_CONTROL_INT;
+ apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
+}
+
+/**
+ * dw_apb_clockevent_pause() - stop the clock_event_device from running
+ *
+ * @dw_ced: The APB clock to stop generating events.
+ */
+void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced)
+{
+ disable_irq(dw_ced->timer.irq);
+ apbt_disable_int(&dw_ced->timer);
+}
+
+static void apbt_eoi(struct dw_apb_timer *timer)
+{
+ apbt_readl(timer, APBTMR_N_EOI);
+}
+
+static irqreturn_t dw_apb_clockevent_irq(int irq, void *data)
+{
+ struct clock_event_device *evt = data;
+ struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
+
+ if (!evt->event_handler) {
+ pr_info("Spurious APBT timer interrupt %d", irq);
+ return IRQ_NONE;
+ }
+
+ if (dw_ced->eoi)
+ dw_ced->eoi(&dw_ced->timer);
+
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static void apbt_enable_int(struct dw_apb_timer *timer)
+{
+ unsigned long ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
+ /* clear pending intr */
+ apbt_readl(timer, APBTMR_N_EOI);
+ ctrl &= ~APBTMR_CONTROL_INT;
+ apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
+}
+
+static void apbt_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long ctrl;
+ unsigned long period;
+ struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
+
+ pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask),
+ mode);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ period = DIV_ROUND_UP(dw_ced->timer.freq, HZ);
+ ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
+ ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ /*
+ * DW APB p. 46, have to disable timer before load counter,
+ * may cause sync problem.
+ */
+ ctrl &= ~APBTMR_CONTROL_ENABLE;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ udelay(1);
+ pr_debug("Setting clock period %lu for HZ %d\n", period, HZ);
+ apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT);
+ ctrl |= APBTMR_CONTROL_ENABLE;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
+ /*
+ * set free running mode, this mode will let timer reload max
+ * timeout which will give time (3min on 25MHz clock) to rearm
+ * the next event, therefore emulate the one-shot mode.
+ */
+ ctrl &= ~APBTMR_CONTROL_ENABLE;
+ ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
+
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ /* write again to set free running mode */
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+
+ /*
+ * DW APB p. 46, load counter with all 1s before starting free
+ * running mode.
+ */
+ apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT);
+ ctrl &= ~APBTMR_CONTROL_INT;
+ ctrl |= APBTMR_CONTROL_ENABLE;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
+ ctrl &= ~APBTMR_CONTROL_ENABLE;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ apbt_enable_int(&dw_ced->timer);
+ break;
+ }
+}
+
+static int apbt_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned long ctrl;
+ struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
+
+ /* Disable timer */
+ ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
+ ctrl &= ~APBTMR_CONTROL_ENABLE;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+ /* write new count */
+ apbt_writel(&dw_ced->timer, delta, APBTMR_N_LOAD_COUNT);
+ ctrl |= APBTMR_CONTROL_ENABLE;
+ apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+
+ return 0;
+}
+
+/**
+ * dw_apb_clockevent_init() - use an APB timer as a clock_event_device
+ *
+ * @cpu: The CPU the events will be targeted at.
+ * @name: The name used for the timer and the IRQ for it.
+ * @rating: The rating to give the timer.
+ * @base: I/O base for the timer registers.
+ * @irq: The interrupt number to use for the timer.
+ * @freq: The frequency that the timer counts at.
+ *
+ * This creates a clock_event_device for using with the generic clock layer
+ * but does not start and register it. This should be done with
+ * dw_apb_clockevent_register() as the next step. If this is the first time
+ * it has been called for a timer then the IRQ will be requested, if not it
+ * just be enabled to allow CPU hotplug to avoid repeatedly requesting and
+ * releasing the IRQ.
+ */
+struct dw_apb_clock_event_device *
+dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
+ void __iomem *base, int irq, unsigned long freq)
+{
+ struct dw_apb_clock_event_device *dw_ced =
+ kzalloc(sizeof(*dw_ced), GFP_KERNEL);
+ int err;
+
+ if (!dw_ced)
+ return NULL;
+
+ dw_ced->timer.base = base;
+ dw_ced->timer.irq = irq;
+ dw_ced->timer.freq = freq;
+
+ clockevents_calc_mult_shift(&dw_ced->ced, freq, APBT_MIN_PERIOD);
+ dw_ced->ced.max_delta_ns = clockevent_delta2ns(0x7fffffff,
+ &dw_ced->ced);
+ dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
+ dw_ced->ced.cpumask = cpumask_of(cpu);
+ dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ dw_ced->ced.set_mode = apbt_set_mode;
+ dw_ced->ced.set_next_event = apbt_next_event;
+ dw_ced->ced.irq = dw_ced->timer.irq;
+ dw_ced->ced.rating = rating;
+ dw_ced->ced.name = name;
+
+ dw_ced->irqaction.name = dw_ced->ced.name;
+ dw_ced->irqaction.handler = dw_apb_clockevent_irq;
+ dw_ced->irqaction.dev_id = &dw_ced->ced;
+ dw_ced->irqaction.irq = irq;
+ dw_ced->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL |
+ IRQF_NOBALANCING |
+ IRQF_DISABLED;
+
+ dw_ced->eoi = apbt_eoi;
+ err = setup_irq(irq, &dw_ced->irqaction);
+ if (err) {
+ pr_err("failed to request timer irq\n");
+ kfree(dw_ced);
+ dw_ced = NULL;
+ }
+
+ return dw_ced;
+}
+
+/**
+ * dw_apb_clockevent_resume() - resume a clock that has been paused.
+ *
+ * @dw_ced: The APB clock to resume.
+ */
+void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced)
+{
+ enable_irq(dw_ced->timer.irq);
+}
+
+/**
+ * dw_apb_clockevent_stop() - stop the clock_event_device and release the IRQ.
+ *
+ * @dw_ced: The APB clock to stop generating the events.
+ */
+void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced)
+{
+ free_irq(dw_ced->timer.irq, &dw_ced->ced);
+}
+
+/**
+ * dw_apb_clockevent_register() - register the clock with the generic layer
+ *
+ * @dw_ced: The APB clock to register as a clock_event_device.
+ */
+void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced)
+{
+ apbt_writel(&dw_ced->timer, 0, APBTMR_N_CONTROL);
+ clockevents_register_device(&dw_ced->ced);
+ apbt_enable_int(&dw_ced->timer);
+}
+
+/**
+ * dw_apb_clocksource_start() - start the clocksource counting.
+ *
+ * @dw_cs: The clocksource to start.
+ *
+ * This is used to start the clocksource before registration and can be used
+ * to enable calibration of timers.
+ */
+void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs)
+{
+ /*
+ * start count down from 0xffff_ffff. this is done by toggling the
+ * enable bit then load initial load count to ~0.
+ */
+ unsigned long ctrl = apbt_readl(&dw_cs->timer, APBTMR_N_CONTROL);
+
+ ctrl &= ~APBTMR_CONTROL_ENABLE;
+ apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL);
+ apbt_writel(&dw_cs->timer, ~0, APBTMR_N_LOAD_COUNT);
+ /* enable, mask interrupt */
+ ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
+ ctrl |= (APBTMR_CONTROL_ENABLE | APBTMR_CONTROL_INT);
+ apbt_writel(&dw_cs->timer, ctrl, APBTMR_N_CONTROL);
+ /* read it once to get cached counter value initialized */
+ dw_apb_clocksource_read(dw_cs);
+}
+
+static cycle_t __apbt_read_clocksource(struct clocksource *cs)
+{
+ unsigned long current_count;
+ struct dw_apb_clocksource *dw_cs =
+ clocksource_to_dw_apb_clocksource(cs);
+
+ current_count = apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
+
+ return (cycle_t)~current_count;
+}
+
+static void apbt_restart_clocksource(struct clocksource *cs)
+{
+ struct dw_apb_clocksource *dw_cs =
+ clocksource_to_dw_apb_clocksource(cs);
+
+ dw_apb_clocksource_start(dw_cs);
+}
+
+/**
+ * dw_apb_clocksource_init() - use an APB timer as a clocksource.
+ *
+ * @rating: The rating to give the clocksource.
+ * @name: The name for the clocksource.
+ * @base: The I/O base for the timer registers.
+ * @freq: The frequency that the timer counts at.
+ *
+ * This creates a clocksource using an APB timer but does not yet register it
+ * with the clocksource system. This should be done with
+ * dw_apb_clocksource_register() as the next step.
+ */
+struct dw_apb_clocksource *
+dw_apb_clocksource_init(unsigned rating, char *name, void __iomem *base,
+ unsigned long freq)
+{
+ struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL);
+
+ if (!dw_cs)
+ return NULL;
+
+ dw_cs->timer.base = base;
+ dw_cs->timer.freq = freq;
+ dw_cs->cs.name = name;
+ dw_cs->cs.rating = rating;
+ dw_cs->cs.read = __apbt_read_clocksource;
+ dw_cs->cs.mask = CLOCKSOURCE_MASK(32);
+ dw_cs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ dw_cs->cs.resume = apbt_restart_clocksource;
+
+ return dw_cs;
+}
+
+/**
+ * dw_apb_clocksource_register() - register the APB clocksource.
+ *
+ * @dw_cs: The clocksource to register.
+ */
+void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
+{
+ clocksource_register_hz(&dw_cs->cs, dw_cs->timer.freq);
+}
+
+/**
+ * dw_apb_clocksource_read() - read the current value of a clocksource.
+ *
+ * @dw_cs: The clocksource to read.
+ */
+cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs)
+{
+ return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE);
+}
+
+/**
+ * dw_apb_clocksource_unregister() - unregister and free a clocksource.
+ *
+ * @dw_cs: The clocksource to unregister/free.
+ */
+void dw_apb_clocksource_unregister(struct dw_apb_clocksource *dw_cs)
+{
+ clocksource_unregister(&dw_cs->cs);
+
+ kfree(dw_cs);
+}
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 225c1761b37..27c49e60b7d 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -1,14 +1,25 @@
/*
* i8253 PIT clocksource
*/
-#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/timex.h>
+#include <linux/module.h>
+#include <linux/i8253.h>
+#include <linux/smp.h>
-#include <asm/i8253.h>
+/*
+ * Protects access to I/O ports
+ *
+ * 0040-0043 : timer0, i8253 / i8254
+ * 0061-0061 : NMI Control Register which contains two speaker control bits.
+ */
+DEFINE_RAW_SPINLOCK(i8253_lock);
+EXPORT_SYMBOL(i8253_lock);
+#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
* to just read by itself. So use jiffies to emulate a free
@@ -37,15 +48,15 @@ static cycle_t i8253_read(struct clocksource *cs)
* count), it cannot be newer.
*/
jifs = jiffies;
- outb_pit(0x00, PIT_MODE); /* latch the count ASAP */
- count = inb_pit(PIT_CH0); /* read the latched count */
- count |= inb_pit(PIT_CH0) << 8;
+ outb_p(0x00, PIT_MODE); /* latch the count ASAP */
+ count = inb_p(PIT_CH0); /* read the latched count */
+ count |= inb_p(PIT_CH0) << 8;
/* VIA686a test code... reset the latch if count > max + 1 */
if (count > LATCH) {
- outb_pit(0x34, PIT_MODE);
- outb_pit(PIT_LATCH & 0xff, PIT_CH0);
- outb_pit(PIT_LATCH >> 8, PIT_CH0);
+ outb_p(0x34, PIT_MODE);
+ outb_p(PIT_LATCH & 0xff, PIT_CH0);
+ outb_p(PIT_LATCH >> 8, PIT_CH0);
count = PIT_LATCH - 1;
}
@@ -86,3 +97,90 @@ int __init clocksource_i8253_init(void)
{
return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE);
}
+#endif
+
+#ifdef CONFIG_CLKEVT_I8253
+/*
+ * Initialize the PIT timer.
+ *
+ * This is also called after resume to bring the PIT into operation again.
+ */
+static void init_pit_timer(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ raw_spin_lock(&i8253_lock);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* binary, mode 2, LSB/MSB, ch 0 */
+ outb_p(0x34, PIT_MODE);
+ outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
+ outb_p(LATCH >> 8 , PIT_CH0); /* MSB */
+ break;
+
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
+ evt->mode == CLOCK_EVT_MODE_ONESHOT) {
+ outb_p(0x30, PIT_MODE);
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
+ }
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* One shot setup */
+ outb_p(0x38, PIT_MODE);
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ /* Nothing to do here */
+ break;
+ }
+ raw_spin_unlock(&i8253_lock);
+}
+
+/*
+ * Program the next event in oneshot mode
+ *
+ * Delta is given in PIT ticks
+ */
+static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ raw_spin_lock(&i8253_lock);
+ outb_p(delta & 0xff , PIT_CH0); /* LSB */
+ outb_p(delta >> 8 , PIT_CH0); /* MSB */
+ raw_spin_unlock(&i8253_lock);
+
+ return 0;
+}
+
+/*
+ * On UP the PIT can serve all of the possible timer functions. On SMP systems
+ * it can be solely used for the global tick.
+ */
+struct clock_event_device i8253_clockevent = {
+ .name = "pit",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_mode = init_pit_timer,
+ .set_next_event = pit_next_event,
+};
+
+/*
+ * Initialize the conversion factor and the min/max deltas of the clock event
+ * structure and register the clock event source with the framework.
+ */
+void __init clockevent_i8253_init(bool oneshot)
+{
+ if (oneshot)
+ i8253_clockevent.features |= CLOCK_EVT_FEAT_ONESHOT;
+ /*
+ * Start pit with the boot cpu mask. x86 might make it global
+ * when it is used as broadcast device later.
+ */
+ i8253_clockevent.cpumask = cpumask_of(smp_processor_id());
+
+ clockevents_config_and_register(&i8253_clockevent, PIT_TICK_RATE,
+ 0xF, 0x7FFF);
+}
+#endif
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 2b46a7efa0a..e55814bc0d0 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -28,7 +28,9 @@
#include <linux/init.h>
#include <linux/connector.h>
#include <linux/gfp.h>
-#include <asm/atomic.h>
+#include <linux/ptrace.h>
+#include <linux/atomic.h>
+
#include <asm/unaligned.h>
#include <linux/cn_proc.h>
@@ -55,6 +57,7 @@ void proc_fork_connector(struct task_struct *task)
struct proc_event *ev;
__u8 buffer[CN_PROC_MSG_SIZE];
struct timespec ts;
+ struct task_struct *parent;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
@@ -65,8 +68,11 @@ void proc_fork_connector(struct task_struct *task)
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
ev->what = PROC_EVENT_FORK;
- ev->event_data.fork.parent_pid = task->real_parent->pid;
- ev->event_data.fork.parent_tgid = task->real_parent->tgid;
+ rcu_read_lock();
+ parent = rcu_dereference(task->real_parent);
+ ev->event_data.fork.parent_pid = parent->pid;
+ ev->event_data.fork.parent_tgid = parent->tgid;
+ rcu_read_unlock();
ev->event_data.fork.child_pid = task->pid;
ev->event_data.fork.child_tgid = task->tgid;
@@ -166,6 +172,39 @@ void proc_sid_connector(struct task_struct *task)
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
+void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+{
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+ __u8 buffer[CN_PROC_MSG_SIZE];
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->what = PROC_EVENT_PTRACE;
+ ev->event_data.ptrace.process_pid = task->pid;
+ ev->event_data.ptrace.process_tgid = task->tgid;
+ if (ptrace_id == PTRACE_ATTACH) {
+ ev->event_data.ptrace.tracer_pid = current->pid;
+ ev->event_data.ptrace.tracer_tgid = current->tgid;
+ } else if (ptrace_id == PTRACE_DETACH) {
+ ev->event_data.ptrace.tracer_pid = 0;
+ ev->event_data.ptrace.tracer_tgid = 0;
+ } else
+ return;
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+}
+
void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 219d88a0eea..dde6a0fad40 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -139,6 +139,7 @@ static int cn_call_callback(struct sk_buff *skb)
spin_unlock_bh(&dev->cbdev->queue_lock);
if (cbq != NULL) {
+ err = 0;
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 9fb84853d8e..e24a2a1b666 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -184,5 +184,15 @@ depends on X86
source "drivers/cpufreq/Kconfig.x86"
endmenu
+menu "ARM CPU frequency scaling drivers"
+depends on ARM
+source "drivers/cpufreq/Kconfig.arm"
+endmenu
+
+menu "PowerPC CPU frequency scaling drivers"
+depends on PPC32 || PPC64
+source "drivers/cpufreq/Kconfig.powerpc"
+endmenu
+
endif
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
new file mode 100644
index 00000000000..72a0044c1ba
--- /dev/null
+++ b/drivers/cpufreq/Kconfig.arm
@@ -0,0 +1,32 @@
+#
+# ARM CPU Frequency scaling drivers
+#
+
+config ARM_S3C64XX_CPUFREQ
+ bool "Samsung S3C64XX"
+ depends on CPU_S3C6410
+ default y
+ help
+ This adds the CPUFreq driver for Samsung S3C6410 SoC.
+
+ If in doubt, say N.
+
+config ARM_S5PV210_CPUFREQ
+ bool "Samsung S5PV210 and S5PC110"
+ depends on CPU_S5PV210
+ default y
+ help
+ This adds the CPUFreq driver for Samsung S5PV210 and
+ S5PC110 SoCs.
+
+ If in doubt, say N.
+
+config ARM_EXYNOS4210_CPUFREQ
+ bool "Samsung EXYNOS4210"
+ depends on CPU_EXYNOS4210
+ default y
+ help
+ This adds the CPUFreq driver for Samsung EXYNOS4210
+ SoC (S5PV310 or S5PC210).
+
+ If in doubt, say N.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
new file mode 100644
index 00000000000..e76992f7968
--- /dev/null
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -0,0 +1,7 @@
+config CPU_FREQ_MAPLE
+ bool "Support for Maple 970FX Evaluation Board"
+ depends on PPC_MAPLE
+ select CPU_FREQ_TABLE
+ help
+ This adds support for frequency switching on Maple 970FX
+ Evaluation Board and compatible boards (IBM JS2x blades).
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index e2fc2d21fa6..a48bc02cd76 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
-##################################################################################d
+##################################################################################
# x86 drivers.
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
# K8 systems. ACPI is preferred to all other hardware-specific drivers.
@@ -37,7 +37,13 @@ obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
-##################################################################################d
-
+##################################################################################
# ARM SoC drivers
obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
+obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
+obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
+obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
+
+##################################################################################
+# PowerPC platform drivers
+obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 4e04e127438..56c6c6b4eb4 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -655,7 +655,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
acpi_processor_notify_smm(THIS_MODULE);
/* Check for APERF/MPERF support in hardware */
- if (cpu_has(c, X86_FEATURE_APERFMPERF))
+ if (boot_cpu_has(X86_FEATURE_APERFMPERF))
acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
@@ -759,7 +759,7 @@ static void __exit acpi_cpufreq_exit(void)
cpufreq_unregister_driver(&acpi_cpufreq_driver);
- free_percpu(acpi_perf_data);
+ free_acpi_perf_data();
}
module_param(acpi_pstate_strict, uint, 0644);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0a5bea9e358..987a165ede2 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1199,6 +1199,26 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_quick_get);
+/**
+ * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
+ * @cpu: CPU number
+ *
+ * Just return the max possible frequency for a given CPU.
+ */
+unsigned int cpufreq_quick_get_max(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
+
+ if (policy) {
+ ret_freq = policy->max;
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_quick_get_max);
+
static unsigned int __cpufreq_get(unsigned int cpu)
{
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
new file mode 100644
index 00000000000..b7c3a84c4cf
--- /dev/null
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS4 - CPU frequency scaling support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/cpufreq.h>
+
+#include <mach/map.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-mem.h>
+
+#include <plat/clock.h>
+#include <plat/pm.h>
+
+static struct clk *cpu_clk;
+static struct clk *moutcore;
+static struct clk *mout_mpll;
+static struct clk *mout_apll;
+
+static struct regulator *arm_regulator;
+static struct regulator *int_regulator;
+
+static struct cpufreq_freqs freqs;
+static unsigned int memtype;
+
+enum exynos4_memory_type {
+ DDR2 = 4,
+ LPDDR2,
+ DDR3,
+};
+
+enum cpufreq_level_index {
+ L0, L1, L2, L3, CPUFREQ_LEVEL_END,
+};
+
+static struct cpufreq_frequency_table exynos4_freq_table[] = {
+ {L0, 1000*1000},
+ {L1, 800*1000},
+ {L2, 400*1000},
+ {L3, 100*1000},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
+ /*
+ * Clock divider value for following
+ * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
+ * DIVATB, DIVPCLK_DBG, DIVAPLL }
+ */
+
+ /* ARM L0: 1000MHz */
+ { 0, 3, 7, 3, 3, 0, 1 },
+
+ /* ARM L1: 800MHz */
+ { 0, 3, 7, 3, 3, 0, 1 },
+
+ /* ARM L2: 400MHz */
+ { 0, 1, 3, 1, 3, 0, 1 },
+
+ /* ARM L3: 100MHz */
+ { 0, 0, 1, 0, 3, 1, 1 },
+};
+
+static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVCOPY, DIVHPM }
+ */
+
+ /* ARM L0: 1000MHz */
+ { 3, 0 },
+
+ /* ARM L1: 800MHz */
+ { 3, 0 },
+
+ /* ARM L2: 400MHz */
+ { 3, 0 },
+
+ /* ARM L3: 100MHz */
+ { 3, 0 },
+};
+
+static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = {
+ /*
+ * Clock divider value for following
+ * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+ * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
+ */
+
+ /* DMC L0: 400MHz */
+ { 3, 1, 1, 1, 1, 1, 3, 1 },
+
+ /* DMC L1: 400MHz */
+ { 3, 1, 1, 1, 1, 1, 3, 1 },
+
+ /* DMC L2: 266.7MHz */
+ { 7, 1, 1, 2, 1, 1, 3, 1 },
+
+ /* DMC L3: 200MHz */
+ { 7, 1, 1, 3, 1, 1, 3, 1 },
+};
+
+static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = {
+ /*
+ * Clock divider value for following
+ * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
+ */
+
+ /* ACLK200 L0: 200MHz */
+ { 3, 7, 4, 5, 1 },
+
+ /* ACLK200 L1: 200MHz */
+ { 3, 7, 4, 5, 1 },
+
+ /* ACLK200 L2: 160MHz */
+ { 4, 7, 5, 7, 1 },
+
+ /* ACLK200 L3: 133.3MHz */
+ { 5, 7, 7, 7, 1 },
+};
+
+static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL/R, DIVGPL/R }
+ */
+
+ /* ACLK_GDL/R L0: 200MHz */
+ { 3, 1 },
+
+ /* ACLK_GDL/R L1: 200MHz */
+ { 3, 1 },
+
+ /* ACLK_GDL/R L2: 160MHz */
+ { 4, 1 },
+
+ /* ACLK_GDL/R L3: 133.3MHz */
+ { 5, 1 },
+};
+
+struct cpufreq_voltage_table {
+ unsigned int index; /* any */
+ unsigned int arm_volt; /* uV */
+ unsigned int int_volt;
+};
+
+static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = {
+ {
+ .index = L0,
+ .arm_volt = 1200000,
+ .int_volt = 1100000,
+ }, {
+ .index = L1,
+ .arm_volt = 1100000,
+ .int_volt = 1100000,
+ }, {
+ .index = L2,
+ .arm_volt = 1000000,
+ .int_volt = 1000000,
+ }, {
+ .index = L3,
+ .arm_volt = 900000,
+ .int_volt = 1000000,
+ },
+};
+
+static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = {
+ /* APLL FOUT L0: 1000MHz */
+ ((250 << 16) | (6 << 8) | 1),
+
+ /* APLL FOUT L1: 800MHz */
+ ((200 << 16) | (6 << 8) | 1),
+
+ /* APLL FOUT L2 : 400MHz */
+ ((200 << 16) | (6 << 8) | 2),
+
+ /* APLL FOUT L3: 100MHz */
+ ((200 << 16) | (6 << 8) | 4),
+};
+
+static int exynos4_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, exynos4_freq_table);
+}
+
+static unsigned int exynos4_getspeed(unsigned int cpu)
+{
+ return clk_get_rate(cpu_clk) / 1000;
+}
+
+static void exynos4_set_clkdiv(unsigned int div_index)
+{
+ unsigned int tmp;
+
+ /* Change Divider - CPU0 */
+
+ tmp = __raw_readl(S5P_CLKDIV_CPU);
+
+ tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK |
+ S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK |
+ S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK |
+ S5P_CLKDIV_CPU0_APLL_MASK);
+
+ tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
+ (clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
+ (clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
+ (clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
+ (clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
+ (clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
+ (clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_CPU);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STATCPU);
+ } while (tmp & 0x1111111);
+
+ /* Change Divider - CPU1 */
+
+ tmp = __raw_readl(S5P_CLKDIV_CPU1);
+
+ tmp &= ~((0x7 << 4) | 0x7);
+
+ tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
+ (clkdiv_cpu1[div_index][1] << 0));
+
+ __raw_writel(tmp, S5P_CLKDIV_CPU1);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STATCPU1);
+ } while (tmp & 0x11);
+
+ /* Change Divider - DMC0 */
+
+ tmp = __raw_readl(S5P_CLKDIV_DMC0);
+
+ tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+ S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK |
+ S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK |
+ S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK);
+
+ tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) |
+ (clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ (clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+ (clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) |
+ (clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+ (clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) |
+ (clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) |
+ (clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+ } while (tmp & 0x11111111);
+
+ /* Change Divider - TOP */
+
+ tmp = __raw_readl(S5P_CLKDIV_TOP);
+
+ tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK |
+ S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK |
+ S5P_CLKDIV_TOP_ONENAND_MASK);
+
+ tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) |
+ (clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+ (clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+ (clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+ (clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_TOP);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+ } while (tmp & 0x11111);
+
+ /* Change Divider - LEFTBUS */
+
+ tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - RIGHTBUS */
+
+ tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+ } while (tmp & 0x11);
+}
+
+static void exynos4_set_apll(unsigned int index)
+{
+ unsigned int tmp;
+
+ /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+ clk_set_parent(moutcore, mout_mpll);
+
+ do {
+ tmp = (__raw_readl(S5P_CLKMUX_STATCPU)
+ >> S5P_CLKSRC_CPU_MUXCORE_SHIFT);
+ tmp &= 0x7;
+ } while (tmp != 0x2);
+
+ /* 2. Set APLL Lock time */
+ __raw_writel(S5P_APLL_LOCKTIME, S5P_APLL_LOCK);
+
+ /* 3. Change PLL PMS values */
+ tmp = __raw_readl(S5P_APLL_CON0);
+ tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
+ tmp |= exynos4_apll_pms_table[index];
+ __raw_writel(tmp, S5P_APLL_CON0);
+
+ /* 4. wait_lock_time */
+ do {
+ tmp = __raw_readl(S5P_APLL_CON0);
+ } while (!(tmp & (0x1 << S5P_APLLCON0_LOCKED_SHIFT)));
+
+ /* 5. MUX_CORE_SEL = APLL */
+ clk_set_parent(moutcore, mout_apll);
+
+ do {
+ tmp = __raw_readl(S5P_CLKMUX_STATCPU);
+ tmp &= S5P_CLKMUX_STATCPU_MUXCORE_MASK;
+ } while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
+}
+
+static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index)
+{
+ unsigned int tmp;
+
+ if (old_index > new_index) {
+ /* The frequency changing to L0 needs to change apll */
+ if (freqs.new == exynos4_freq_table[L0].frequency) {
+ /* 1. Change the system clock divider values */
+ exynos4_set_clkdiv(new_index);
+
+ /* 2. Change the apll m,p,s value */
+ exynos4_set_apll(new_index);
+ } else {
+ /* 1. Change the system clock divider values */
+ exynos4_set_clkdiv(new_index);
+
+ /* 2. Change just s value in apll m,p,s value */
+ tmp = __raw_readl(S5P_APLL_CON0);
+ tmp &= ~(0x7 << 0);
+ tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
+ __raw_writel(tmp, S5P_APLL_CON0);
+ }
+ }
+
+ else if (old_index < new_index) {
+ /* The frequency changing from L0 needs to change apll */
+ if (freqs.old == exynos4_freq_table[L0].frequency) {
+ /* 1. Change the apll m,p,s value */
+ exynos4_set_apll(new_index);
+
+ /* 2. Change the system clock divider values */
+ exynos4_set_clkdiv(new_index);
+ } else {
+ /* 1. Change just s value in apll m,p,s value */
+ tmp = __raw_readl(S5P_APLL_CON0);
+ tmp &= ~(0x7 << 0);
+ tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
+ __raw_writel(tmp, S5P_APLL_CON0);
+
+ /* 2. Change the system clock divider values */
+ exynos4_set_clkdiv(new_index);
+ }
+ }
+}
+
+static int exynos4_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int index, old_index;
+ unsigned int arm_volt, int_volt;
+
+ freqs.old = exynos4_getspeed(policy->cpu);
+
+ if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
+ freqs.old, relation, &old_index))
+ return -EINVAL;
+
+ if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
+ target_freq, relation, &index))
+ return -EINVAL;
+
+ freqs.new = exynos4_freq_table[index].frequency;
+ freqs.cpu = policy->cpu;
+
+ if (freqs.new == freqs.old)
+ return 0;
+
+ /* get the voltage value */
+ arm_volt = exynos4_volt_table[index].arm_volt;
+ int_volt = exynos4_volt_table[index].int_volt;
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ /* control regulator */
+ if (freqs.new > freqs.old) {
+ /* Voltage up */
+ regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
+ regulator_set_voltage(int_regulator, int_volt, int_volt);
+ }
+
+ /* Clock Configuration Procedure */
+ exynos4_set_frequency(old_index, index);
+
+ /* control regulator */
+ if (freqs.new < freqs.old) {
+ /* Voltage down */
+ regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
+ regulator_set_voltage(int_regulator, int_volt, int_volt);
+ }
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static int exynos4_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+#endif
+
+static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu);
+
+ cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
+
+ /* set the transition latency value */
+ policy->cpuinfo.transition_latency = 100000;
+
+ /*
+ * EXYNOS4 multi-core processors has 2 cores
+ * that the frequency cannot be set independently.
+ * Each cpu is bound to the same speed.
+ * So the affected cpu is all of the cpus.
+ */
+ cpumask_setall(policy->cpus);
+
+ return cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
+}
+
+static struct cpufreq_driver exynos4_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = exynos4_verify_speed,
+ .target = exynos4_target,
+ .get = exynos4_getspeed,
+ .init = exynos4_cpufreq_cpu_init,
+ .name = "exynos4_cpufreq",
+#ifdef CONFIG_PM
+ .suspend = exynos4_cpufreq_suspend,
+ .resume = exynos4_cpufreq_resume,
+#endif
+};
+
+static int __init exynos4_cpufreq_init(void)
+{
+ cpu_clk = clk_get(NULL, "armclk");
+ if (IS_ERR(cpu_clk))
+ return PTR_ERR(cpu_clk);
+
+ moutcore = clk_get(NULL, "moutcore");
+ if (IS_ERR(moutcore))
+ goto out;
+
+ mout_mpll = clk_get(NULL, "mout_mpll");
+ if (IS_ERR(mout_mpll))
+ goto out;
+
+ mout_apll = clk_get(NULL, "mout_apll");
+ if (IS_ERR(mout_apll))
+ goto out;
+
+ arm_regulator = regulator_get(NULL, "vdd_arm");
+ if (IS_ERR(arm_regulator)) {
+ printk(KERN_ERR "failed to get resource %s\n", "vdd_arm");
+ goto out;
+ }
+
+ int_regulator = regulator_get(NULL, "vdd_int");
+ if (IS_ERR(int_regulator)) {
+ printk(KERN_ERR "failed to get resource %s\n", "vdd_int");
+ goto out;
+ }
+
+ /*
+ * Check DRAM type.
+ * Because DVFS level is different according to DRAM type.
+ */
+ memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET);
+ memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT);
+ memtype &= S5P_DMC0_MEMTYPE_MASK;
+
+ if ((memtype < DDR2) && (memtype > DDR3)) {
+ printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype);
+ goto out;
+ } else {
+ printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
+ }
+
+ return cpufreq_register_driver(&exynos4_driver);
+
+out:
+ if (!IS_ERR(cpu_clk))
+ clk_put(cpu_clk);
+
+ if (!IS_ERR(moutcore))
+ clk_put(moutcore);
+
+ if (!IS_ERR(mout_mpll))
+ clk_put(mout_mpll);
+
+ if (!IS_ERR(mout_apll))
+ clk_put(mout_apll);
+
+ if (!IS_ERR(arm_regulator))
+ regulator_put(arm_regulator);
+
+ if (!IS_ERR(int_regulator))
+ regulator_put(int_regulator);
+
+ printk(KERN_ERR "%s: failed initialization\n", __func__);
+
+ return -EINVAL;
+}
+late_initcall(exynos4_cpufreq_init);
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
new file mode 100644
index 00000000000..89b178a3f84
--- /dev/null
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2011 Dmitry Eremin-Solenikov
+ * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
+ * that is iMac G5 and latest single CPU desktop.
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+#include <linux/of.h>
+
+#define DBG(fmt...) pr_debug(fmt)
+
+/* see 970FX user manual */
+
+#define SCOM_PCR 0x0aa001 /* PCR scom addr */
+
+#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
+#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
+#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
+#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
+#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
+#define PCR_SPEED_SHIFT 17
+#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
+#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
+#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
+#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
+#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
+#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
+
+#define SCOM_PSR 0x408001 /* PSR scom addr */
+/* warning: PSR is a 64 bits register */
+#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
+#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
+#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
+#define PSR_CUR_SPEED_SHIFT (56)
+
+/*
+ * The G5 only supports two frequencies (Quarter speed is not supported)
+ */
+#define CPUFREQ_HIGH 0
+#define CPUFREQ_LOW 1
+
+static struct cpufreq_frequency_table maple_cpu_freqs[] = {
+ {CPUFREQ_HIGH, 0},
+ {CPUFREQ_LOW, 0},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static struct freq_attr *maple_cpu_freqs_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+/* Power mode data is an array of the 32 bits PCR values to use for
+ * the various frequencies, retrieved from the device-tree
+ */
+static int maple_pmode_cur;
+
+static DEFINE_MUTEX(maple_switch_mutex);
+
+static const u32 *maple_pmode_data;
+static int maple_pmode_max;
+
+/*
+ * SCOM based frequency switching for 970FX rev3
+ */
+static int maple_scom_switch_freq(int speed_mode)
+{
+ unsigned long flags;
+ int to;
+
+ local_irq_save(flags);
+
+ /* Clear PCR high */
+ scom970_write(SCOM_PCR, 0);
+ /* Clear PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
+ /* Set PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT |
+ maple_pmode_data[speed_mode]);
+
+ /* Wait for completion */
+ for (to = 0; to < 10; to++) {
+ unsigned long psr = scom970_read(SCOM_PSR);
+
+ if ((psr & PSR_CMD_RECEIVED) == 0 &&
+ (((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (maple_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
+ == 0)
+ break;
+ if (psr & PSR_CMD_COMPLETED)
+ break;
+ udelay(100);
+ }
+
+ local_irq_restore(flags);
+
+ maple_pmode_cur = speed_mode;
+ ppc_proc_freq = maple_cpu_freqs[speed_mode].frequency * 1000ul;
+
+ return 0;
+}
+
+static int maple_scom_query_freq(void)
+{
+ unsigned long psr = scom970_read(SCOM_PSR);
+ int i;
+
+ for (i = 0; i <= maple_pmode_max; i++)
+ if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (maple_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
+ break;
+ return i;
+}
+
+/*
+ * Common interface to the cpufreq core
+ */
+
+static int maple_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, maple_cpu_freqs);
+}
+
+static int maple_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ unsigned int newstate = 0;
+ struct cpufreq_freqs freqs;
+ int rc;
+
+ if (cpufreq_frequency_table_target(policy, maple_cpu_freqs,
+ target_freq, relation, &newstate))
+ return -EINVAL;
+
+ if (maple_pmode_cur == newstate)
+ return 0;
+
+ mutex_lock(&maple_switch_mutex);
+
+ freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
+ freqs.new = maple_cpu_freqs[newstate].frequency;
+ freqs.cpu = 0;
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ rc = maple_scom_switch_freq(newstate);
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ mutex_unlock(&maple_switch_mutex);
+
+ return rc;
+}
+
+static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
+{
+ return maple_cpu_freqs[maple_pmode_cur].frequency;
+}
+
+static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ policy->cpuinfo.transition_latency = 12000;
+ policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency;
+ /* secondary CPUs are tied to the primary one by the
+ * cpufreq core if in the secondary policy we tell it that
+ * it actually must be one policy together with all others. */
+ cpumask_copy(policy->cpus, cpu_online_mask);
+ cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
+
+ return cpufreq_frequency_table_cpuinfo(policy,
+ maple_cpu_freqs);
+}
+
+
+static struct cpufreq_driver maple_cpufreq_driver = {
+ .name = "maple",
+ .owner = THIS_MODULE,
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = maple_cpufreq_cpu_init,
+ .verify = maple_cpufreq_verify,
+ .target = maple_cpufreq_target,
+ .get = maple_cpufreq_get_speed,
+ .attr = maple_cpu_freqs_attr,
+};
+
+static int __init maple_cpufreq_init(void)
+{
+ struct device_node *cpus;
+ struct device_node *cpunode;
+ unsigned int psize;
+ unsigned long max_freq;
+ const u32 *valp;
+ u32 pvr_hi;
+ int rc = -ENODEV;
+
+ /*
+ * Behave here like powermac driver which checks machine compatibility
+ * to ease merging of two drivers in future.
+ */
+ if (!of_machine_is_compatible("Momentum,Maple") &&
+ !of_machine_is_compatible("Momentum,Apache"))
+ return 0;
+
+ cpus = of_find_node_by_path("/cpus");
+ if (cpus == NULL) {
+ DBG("No /cpus node !\n");
+ return -ENODEV;
+ }
+
+ /* Get first CPU node */
+ for (cpunode = NULL;
+ (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
+ const u32 *reg = of_get_property(cpunode, "reg", NULL);
+ if (reg == NULL || (*reg) != 0)
+ continue;
+ if (!strcmp(cpunode->type, "cpu"))
+ break;
+ }
+ if (cpunode == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
+ goto bail_cpus;
+ }
+
+ /* Check 970FX for now */
+ /* we actually don't care on which CPU to access PVR */
+ pvr_hi = PVR_VER(mfspr(SPRN_PVR));
+ if (pvr_hi != 0x3c && pvr_hi != 0x44) {
+ printk(KERN_ERR "cpufreq: Unsupported CPU version (%x)\n",
+ pvr_hi);
+ goto bail_noprops;
+ }
+
+ /* Look for the powertune data in the device-tree */
+ /*
+ * On Maple this property is provided by PIBS in dual-processor config,
+ * not provided by PIBS in CPU0 config and also not provided by SLOF,
+ * so YMMV
+ */
+ maple_pmode_data = of_get_property(cpunode, "power-mode-data", &psize);
+ if (!maple_pmode_data) {
+ DBG("No power-mode-data !\n");
+ goto bail_noprops;
+ }
+ maple_pmode_max = psize / sizeof(u32) - 1;
+
+ /*
+ * From what I see, clock-frequency is always the maximal frequency.
+ * The current driver can not slew sysclk yet, so we really only deal
+ * with powertune steps for now. We also only implement full freq and
+ * half freq in this version. So far, I haven't yet seen a machine
+ * supporting anything else.
+ */
+ valp = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!valp)
+ return -ENODEV;
+ max_freq = (*valp)/1000;
+ maple_cpu_freqs[0].frequency = max_freq;
+ maple_cpu_freqs[1].frequency = max_freq/2;
+
+ /* Force apply current frequency to make sure everything is in
+ * sync (voltage is right for example). Firmware may leave us with
+ * a strange setting ...
+ */
+ msleep(10);
+ maple_pmode_cur = -1;
+ maple_scom_switch_freq(maple_scom_query_freq());
+
+ printk(KERN_INFO "Registering Maple CPU frequency driver\n");
+ printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ maple_cpu_freqs[1].frequency/1000,
+ maple_cpu_freqs[0].frequency/1000,
+ maple_cpu_freqs[maple_pmode_cur].frequency/1000);
+
+ rc = cpufreq_register_driver(&maple_cpufreq_driver);
+
+ of_node_put(cpunode);
+ of_node_put(cpus);
+
+ return rc;
+
+bail_noprops:
+ of_node_put(cpunode);
+bail_cpus:
+ of_node_put(cpus);
+
+ return rc;
+}
+
+module_init(maple_cpufreq_init);
+
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
new file mode 100644
index 00000000000..b8d1d205e1e
--- /dev/null
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * S3C64xx CPUfreq Support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+
+static struct clk *armclk;
+static struct regulator *vddarm;
+static unsigned long regulator_latency;
+
+#ifdef CONFIG_CPU_S3C6410
+struct s3c64xx_dvfs {
+ unsigned int vddarm_min;
+ unsigned int vddarm_max;
+};
+
+static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
+ [0] = { 1000000, 1150000 },
+ [1] = { 1050000, 1150000 },
+ [2] = { 1100000, 1150000 },
+ [3] = { 1200000, 1350000 },
+ [4] = { 1300000, 1350000 },
+};
+
+static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
+ { 0, 66000 },
+ { 0, 100000 },
+ { 0, 133000 },
+ { 1, 200000 },
+ { 1, 222000 },
+ { 1, 266000 },
+ { 2, 333000 },
+ { 2, 400000 },
+ { 2, 532000 },
+ { 2, 533000 },
+ { 3, 667000 },
+ { 4, 800000 },
+ { 0, CPUFREQ_TABLE_END },
+};
+#endif
+
+static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
+}
+
+static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
+{
+ if (cpu != 0)
+ return 0;
+
+ return clk_get_rate(armclk) / 1000;
+}
+
+static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int ret;
+ unsigned int i;
+ struct cpufreq_freqs freqs;
+ struct s3c64xx_dvfs *dvfs;
+
+ ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
+ target_freq, relation, &i);
+ if (ret != 0)
+ return ret;
+
+ freqs.cpu = 0;
+ freqs.old = clk_get_rate(armclk) / 1000;
+ freqs.new = s3c64xx_freq_table[i].frequency;
+ freqs.flags = 0;
+ dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index];
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && freqs.new > freqs.old) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ freqs.new, ret);
+ goto err;
+ }
+ }
+#endif
+
+ ret = clk_set_rate(armclk, freqs.new * 1000);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to set rate %dkHz: %d\n",
+ freqs.new, ret);
+ goto err;
+ }
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && freqs.new < freqs.old) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ freqs.new, ret);
+ goto err_clk;
+ }
+ }
+#endif
+
+ pr_debug("cpufreq: Set actual frequency %lukHz\n",
+ clk_get_rate(armclk) / 1000);
+
+ return 0;
+
+err_clk:
+ if (clk_set_rate(armclk, freqs.old * 1000) < 0)
+ pr_err("Failed to restore original clock rate\n");
+err:
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+#ifdef CONFIG_REGULATOR
+static void __init s3c64xx_cpufreq_config_regulator(void)
+{
+ int count, v, i, found;
+ struct cpufreq_frequency_table *freq;
+ struct s3c64xx_dvfs *dvfs;
+
+ count = regulator_count_voltages(vddarm);
+ if (count < 0) {
+ pr_err("cpufreq: Unable to check supported voltages\n");
+ }
+
+ freq = s3c64xx_freq_table;
+ while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) {
+ if (freq->frequency == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ dvfs = &s3c64xx_dvfs_table[freq->index];
+ found = 0;
+
+ for (i = 0; i < count; i++) {
+ v = regulator_list_voltage(vddarm, i);
+ if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
+ found = 1;
+ }
+
+ if (!found) {
+ pr_debug("cpufreq: %dkHz unsupported by regulator\n",
+ freq->frequency);
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ freq++;
+ }
+
+ /* Guess based on having to do an I2C/SPI write; in future we
+ * will be able to query the regulator performance here. */
+ regulator_latency = 1 * 1000 * 1000;
+}
+#endif
+
+static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+ int ret;
+ struct cpufreq_frequency_table *freq;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ if (s3c64xx_freq_table == NULL) {
+ pr_err("cpufreq: No frequency information for this CPU\n");
+ return -ENODEV;
+ }
+
+ armclk = clk_get(NULL, "armclk");
+ if (IS_ERR(armclk)) {
+ pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n",
+ PTR_ERR(armclk));
+ return PTR_ERR(armclk);
+ }
+
+#ifdef CONFIG_REGULATOR
+ vddarm = regulator_get(NULL, "vddarm");
+ if (IS_ERR(vddarm)) {
+ ret = PTR_ERR(vddarm);
+ pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
+ pr_err("cpufreq: Only frequency scaling available\n");
+ vddarm = NULL;
+ } else {
+ s3c64xx_cpufreq_config_regulator();
+ }
+#endif
+
+ freq = s3c64xx_freq_table;
+ while (freq->frequency != CPUFREQ_TABLE_END) {
+ unsigned long r;
+
+ /* Check for frequencies we can generate */
+ r = clk_round_rate(armclk, freq->frequency * 1000);
+ r /= 1000;
+ if (r != freq->frequency) {
+ pr_debug("cpufreq: %dkHz unsupported by clock\n",
+ freq->frequency);
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ /* If we have no regulator then assume startup
+ * frequency is the maximum we can support. */
+ if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0))
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+
+ freq++;
+ }
+
+ policy->cur = clk_get_rate(armclk) / 1000;
+
+ /* Datasheet says PLL stabalisation time (if we were to use
+ * the PLLs, which we don't currently) is ~300us worst case,
+ * but add some fudge.
+ */
+ policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to configure frequency table: %d\n",
+ ret);
+ regulator_put(vddarm);
+ clk_put(armclk);
+ }
+
+ return ret;
+}
+
+static struct cpufreq_driver s3c64xx_cpufreq_driver = {
+ .owner = THIS_MODULE,
+ .flags = 0,
+ .verify = s3c64xx_cpufreq_verify_speed,
+ .target = s3c64xx_cpufreq_set_target,
+ .get = s3c64xx_cpufreq_get_speed,
+ .init = s3c64xx_cpufreq_driver_init,
+ .name = "s3c",
+};
+
+static int __init s3c64xx_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
+}
+module_init(s3c64xx_cpufreq_init);
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
new file mode 100644
index 00000000000..a484aaea980
--- /dev/null
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -0,0 +1,649 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * CPU frequency scaling for S5PC110/S5PV210
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/cpufreq.h>
+#include <linux/reboot.h>
+#include <linux/regulator/consumer.h>
+#include <linux/suspend.h>
+
+#include <mach/map.h>
+#include <mach/regs-clock.h>
+
+static struct clk *cpu_clk;
+static struct clk *dmc0_clk;
+static struct clk *dmc1_clk;
+static struct cpufreq_freqs freqs;
+static DEFINE_MUTEX(set_freq_lock);
+
+/* APLL M,P,S values for 1G/800Mhz */
+#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
+#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
+
+/* Use 800MHz when entering sleep mode */
+#define SLEEP_FREQ (800 * 1000)
+
+/*
+ * relation has an additional symantics other than the standard of cpufreq
+ * DISALBE_FURTHER_CPUFREQ: disable further access to target
+ * ENABLE_FURTUER_CPUFREQ: enable access to target
+ */
+enum cpufreq_access {
+ DISABLE_FURTHER_CPUFREQ = 0x10,
+ ENABLE_FURTHER_CPUFREQ = 0x20,
+};
+
+static bool no_cpufreq_access;
+
+/*
+ * DRAM configurations to calculate refresh counter for changing
+ * frequency of memory.
+ */
+struct dram_conf {
+ unsigned long freq; /* HZ */
+ unsigned long refresh; /* DRAM refresh counter * 1000 */
+};
+
+/* DRAM configuration (DMC0 and DMC1) */
+static struct dram_conf s5pv210_dram_conf[2];
+
+enum perf_level {
+ L0, L1, L2, L3, L4,
+};
+
+enum s5pv210_mem_type {
+ LPDDR = 0x1,
+ LPDDR2 = 0x2,
+ DDR2 = 0x4,
+};
+
+enum s5pv210_dmc_port {
+ DMC0 = 0,
+ DMC1,
+};
+
+static struct cpufreq_frequency_table s5pv210_freq_table[] = {
+ {L0, 1000*1000},
+ {L1, 800*1000},
+ {L2, 400*1000},
+ {L3, 200*1000},
+ {L4, 100*1000},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static struct regulator *arm_regulator;
+static struct regulator *int_regulator;
+
+struct s5pv210_dvs_conf {
+ int arm_volt; /* uV */
+ int int_volt; /* uV */
+};
+
+static const int arm_volt_max = 1350000;
+static const int int_volt_max = 1250000;
+
+static struct s5pv210_dvs_conf dvs_conf[] = {
+ [L0] = {
+ .arm_volt = 1250000,
+ .int_volt = 1100000,
+ },
+ [L1] = {
+ .arm_volt = 1200000,
+ .int_volt = 1100000,
+ },
+ [L2] = {
+ .arm_volt = 1050000,
+ .int_volt = 1100000,
+ },
+ [L3] = {
+ .arm_volt = 950000,
+ .int_volt = 1100000,
+ },
+ [L4] = {
+ .arm_volt = 950000,
+ .int_volt = 1000000,
+ },
+};
+
+static u32 clkdiv_val[5][11] = {
+ /*
+ * Clock divider value for following
+ * { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
+ * HCLK_DSYS, PCLK_DSYS, HCLK_PSYS, PCLK_PSYS,
+ * ONEDRAM, MFC, G3D }
+ */
+
+ /* L0 : [1000/200/100][166/83][133/66][200/200] */
+ {0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L1 : [800/200/100][166/83][133/66][200/200] */
+ {0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L2 : [400/200/100][166/83][133/66][200/200] */
+ {1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L3 : [200/200/100][166/83][133/66][200/200] */
+ {3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L4 : [100/100/100][83/83][66/66][100/100] */
+ {7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
+};
+
+/*
+ * This function set DRAM refresh counter
+ * accoriding to operating frequency of DRAM
+ * ch: DMC port number 0 or 1
+ * freq: Operating frequency of DRAM(KHz)
+ */
+static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
+{
+ unsigned long tmp, tmp1;
+ void __iomem *reg = NULL;
+
+ if (ch == DMC0) {
+ reg = (S5P_VA_DMC0 + 0x30);
+ } else if (ch == DMC1) {
+ reg = (S5P_VA_DMC1 + 0x30);
+ } else {
+ printk(KERN_ERR "Cannot find DMC port\n");
+ return;
+ }
+
+ /* Find current DRAM frequency */
+ tmp = s5pv210_dram_conf[ch].freq;
+
+ do_div(tmp, freq);
+
+ tmp1 = s5pv210_dram_conf[ch].refresh;
+
+ do_div(tmp1, tmp);
+
+ __raw_writel(tmp1, reg);
+}
+
+static int s5pv210_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu)
+ return -EINVAL;
+
+ return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
+}
+
+static unsigned int s5pv210_getspeed(unsigned int cpu)
+{
+ if (cpu)
+ return 0;
+
+ return clk_get_rate(cpu_clk) / 1000;
+}
+
+static int s5pv210_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned long reg;
+ unsigned int index, priv_index;
+ unsigned int pll_changing = 0;
+ unsigned int bus_speed_changing = 0;
+ int arm_volt, int_volt;
+ int ret = 0;
+
+ mutex_lock(&set_freq_lock);
+
+ if (relation & ENABLE_FURTHER_CPUFREQ)
+ no_cpufreq_access = false;
+
+ if (no_cpufreq_access) {
+#ifdef CONFIG_PM_VERBOSE
+ pr_err("%s:%d denied access to %s as it is disabled"
+ "temporarily\n", __FILE__, __LINE__, __func__);
+#endif
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (relation & DISABLE_FURTHER_CPUFREQ)
+ no_cpufreq_access = true;
+
+ relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ);
+
+ freqs.old = s5pv210_getspeed(0);
+
+ if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
+ target_freq, relation, &index)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ freqs.new = s5pv210_freq_table[index].frequency;
+ freqs.cpu = 0;
+
+ if (freqs.new == freqs.old)
+ goto exit;
+
+ /* Finding current running level index */
+ if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
+ freqs.old, relation, &priv_index)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ arm_volt = dvs_conf[index].arm_volt;
+ int_volt = dvs_conf[index].int_volt;
+
+ if (freqs.new > freqs.old) {
+ ret = regulator_set_voltage(arm_regulator,
+ arm_volt, arm_volt_max);
+ if (ret)
+ goto exit;
+
+ ret = regulator_set_voltage(int_regulator,
+ int_volt, int_volt_max);
+ if (ret)
+ goto exit;
+ }
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ /* Check if there need to change PLL */
+ if ((index == L0) || (priv_index == L0))
+ pll_changing = 1;
+
+ /* Check if there need to change System bus clock */
+ if ((index == L4) || (priv_index == L4))
+ bus_speed_changing = 1;
+
+ if (bus_speed_changing) {
+ /*
+ * Reconfigure DRAM refresh counter value for minimum
+ * temporary clock while changing divider.
+ * expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
+ */
+ if (pll_changing)
+ s5pv210_set_refresh(DMC1, 83000);
+ else
+ s5pv210_set_refresh(DMC1, 100000);
+
+ s5pv210_set_refresh(DMC0, 83000);
+ }
+
+ /*
+ * APLL should be changed in this level
+ * APLL -> MPLL(for stable transition) -> APLL
+ * Some clock source's clock API are not prepared.
+ * Do not use clock API in below code.
+ */
+ if (pll_changing) {
+ /*
+ * 1. Temporary Change divider for MFC and G3D
+ * SCLKA2M(200/1=200)->(200/4=50)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_DIV2);
+ reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
+ reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
+ (3 << S5P_CLKDIV2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_DIV2);
+
+ /* For MFC, G3D dividing */
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT0);
+ } while (reg & ((1 << 16) | (1 << 17)));
+
+ /*
+ * 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
+ * (200/4=50)->(667/4=166)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_SRC2);
+ reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
+ reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
+ (1 << S5P_CLKSRC2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC2);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT1);
+ } while (reg & ((1 << 7) | (1 << 3)));
+
+ /*
+ * 3. DMC1 refresh count for 133Mhz if (index == L4) is
+ * true refresh counter is already programed in upper
+ * code. 0x287@83Mhz
+ */
+ if (!bus_speed_changing)
+ s5pv210_set_refresh(DMC1, 133000);
+
+ /* 4. SCLKAPLL -> SCLKMPLL */
+ reg = __raw_readl(S5P_CLK_SRC0);
+ reg &= ~(S5P_CLKSRC0_MUX200_MASK);
+ reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC0);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT0);
+ } while (reg & (0x1 << 18));
+
+ }
+
+ /* Change divider */
+ reg = __raw_readl(S5P_CLK_DIV0);
+
+ reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
+ S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
+ S5P_CLKDIV0_HCLK166_MASK | S5P_CLKDIV0_PCLK83_MASK |
+ S5P_CLKDIV0_HCLK133_MASK | S5P_CLKDIV0_PCLK66_MASK);
+
+ reg |= ((clkdiv_val[index][0] << S5P_CLKDIV0_APLL_SHIFT) |
+ (clkdiv_val[index][1] << S5P_CLKDIV0_A2M_SHIFT) |
+ (clkdiv_val[index][2] << S5P_CLKDIV0_HCLK200_SHIFT) |
+ (clkdiv_val[index][3] << S5P_CLKDIV0_PCLK100_SHIFT) |
+ (clkdiv_val[index][4] << S5P_CLKDIV0_HCLK166_SHIFT) |
+ (clkdiv_val[index][5] << S5P_CLKDIV0_PCLK83_SHIFT) |
+ (clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
+ (clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
+
+ __raw_writel(reg, S5P_CLK_DIV0);
+
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT0);
+ } while (reg & 0xff);
+
+ /* ARM MCS value changed */
+ reg = __raw_readl(S5P_ARM_MCS_CON);
+ reg &= ~0x3;
+ if (index >= L3)
+ reg |= 0x3;
+ else
+ reg |= 0x1;
+
+ __raw_writel(reg, S5P_ARM_MCS_CON);
+
+ if (pll_changing) {
+ /* 5. Set Lock time = 30us*24Mhz = 0x2cf */
+ __raw_writel(0x2cf, S5P_APLL_LOCK);
+
+ /*
+ * 6. Turn on APLL
+ * 6-1. Set PMS values
+ * 6-2. Wait untile the PLL is locked
+ */
+ if (index == L0)
+ __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
+ else
+ __raw_writel(APLL_VAL_800, S5P_APLL_CON);
+
+ do {
+ reg = __raw_readl(S5P_APLL_CON);
+ } while (!(reg & (0x1 << 29)));
+
+ /*
+ * 7. Change souce clock from SCLKMPLL(667Mhz)
+ * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
+ * (667/4=166)->(200/4=50)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_SRC2);
+ reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
+ reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
+ (0 << S5P_CLKSRC2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC2);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT1);
+ } while (reg & ((1 << 7) | (1 << 3)));
+
+ /*
+ * 8. Change divider for MFC and G3D
+ * (200/4=50)->(200/1=200)Mhz
+ */
+ reg = __raw_readl(S5P_CLK_DIV2);
+ reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
+ reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
+ (clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
+ __raw_writel(reg, S5P_CLK_DIV2);
+
+ /* For MFC, G3D dividing */
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT0);
+ } while (reg & ((1 << 16) | (1 << 17)));
+
+ /* 9. Change MPLL to APLL in MSYS_MUX */
+ reg = __raw_readl(S5P_CLK_SRC0);
+ reg &= ~(S5P_CLKSRC0_MUX200_MASK);
+ reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
+ __raw_writel(reg, S5P_CLK_SRC0);
+
+ do {
+ reg = __raw_readl(S5P_CLKMUX_STAT0);
+ } while (reg & (0x1 << 18));
+
+ /*
+ * 10. DMC1 refresh counter
+ * L4 : DMC1 = 100Mhz 7.8us/(1/100) = 0x30c
+ * Others : DMC1 = 200Mhz 7.8us/(1/200) = 0x618
+ */
+ if (!bus_speed_changing)
+ s5pv210_set_refresh(DMC1, 200000);
+ }
+
+ /*
+ * L4 level need to change memory bus speed, hence onedram clock divier
+ * and memory refresh parameter should be changed
+ */
+ if (bus_speed_changing) {
+ reg = __raw_readl(S5P_CLK_DIV6);
+ reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
+ reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
+ __raw_writel(reg, S5P_CLK_DIV6);
+
+ do {
+ reg = __raw_readl(S5P_CLKDIV_STAT1);
+ } while (reg & (1 << 15));
+
+ /* Reconfigure DRAM refresh counter value */
+ if (index != L4) {
+ /*
+ * DMC0 : 166Mhz
+ * DMC1 : 200Mhz
+ */
+ s5pv210_set_refresh(DMC0, 166000);
+ s5pv210_set_refresh(DMC1, 200000);
+ } else {
+ /*
+ * DMC0 : 83Mhz
+ * DMC1 : 100Mhz
+ */
+ s5pv210_set_refresh(DMC0, 83000);
+ s5pv210_set_refresh(DMC1, 100000);
+ }
+ }
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ if (freqs.new < freqs.old) {
+ regulator_set_voltage(int_regulator,
+ int_volt, int_volt_max);
+
+ regulator_set_voltage(arm_regulator,
+ arm_volt, arm_volt_max);
+ }
+
+ printk(KERN_DEBUG "Perf changed[L%d]\n", index);
+
+exit:
+ mutex_unlock(&set_freq_lock);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+#endif
+
+static int check_mem_type(void __iomem *dmc_reg)
+{
+ unsigned long val;
+
+ val = __raw_readl(dmc_reg + 0x4);
+ val = (val & (0xf << 8));
+
+ return val >> 8;
+}
+
+static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned long mem_type;
+ int ret;
+
+ cpu_clk = clk_get(NULL, "armclk");
+ if (IS_ERR(cpu_clk))
+ return PTR_ERR(cpu_clk);
+
+ dmc0_clk = clk_get(NULL, "sclk_dmc0");
+ if (IS_ERR(dmc0_clk)) {
+ ret = PTR_ERR(dmc0_clk);
+ goto out_dmc0;
+ }
+
+ dmc1_clk = clk_get(NULL, "hclk_msys");
+ if (IS_ERR(dmc1_clk)) {
+ ret = PTR_ERR(dmc1_clk);
+ goto out_dmc1;
+ }
+
+ if (policy->cpu != 0) {
+ ret = -EINVAL;
+ goto out_dmc1;
+ }
+
+ /*
+ * check_mem_type : This driver only support LPDDR & LPDDR2.
+ * other memory type is not supported.
+ */
+ mem_type = check_mem_type(S5P_VA_DMC0);
+
+ if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
+ printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
+ ret = -EINVAL;
+ goto out_dmc1;
+ }
+
+ /* Find current refresh counter and frequency each DMC */
+ s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000);
+ s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
+
+ s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
+ s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
+
+ policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
+
+ cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
+
+ policy->cpuinfo.transition_latency = 40000;
+
+ return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
+
+out_dmc1:
+ clk_put(dmc0_clk);
+out_dmc0:
+ clk_put(cpu_clk);
+ return ret;
+}
+
+static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int ret;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
+ DISABLE_FURTHER_CPUFREQ);
+ if (ret < 0)
+ return NOTIFY_BAD;
+
+ return NOTIFY_OK;
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
+ ENABLE_FURTHER_CPUFREQ);
+
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int ret;
+
+ ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
+ DISABLE_FURTHER_CPUFREQ);
+ if (ret < 0)
+ return NOTIFY_BAD;
+
+ return NOTIFY_DONE;
+}
+
+static struct cpufreq_driver s5pv210_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = s5pv210_verify_speed,
+ .target = s5pv210_target,
+ .get = s5pv210_getspeed,
+ .init = s5pv210_cpu_init,
+ .name = "s5pv210",
+#ifdef CONFIG_PM
+ .suspend = s5pv210_cpufreq_suspend,
+ .resume = s5pv210_cpufreq_resume,
+#endif
+};
+
+static struct notifier_block s5pv210_cpufreq_notifier = {
+ .notifier_call = s5pv210_cpufreq_notifier_event,
+};
+
+static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
+ .notifier_call = s5pv210_cpufreq_reboot_notifier_event,
+};
+
+static int __init s5pv210_cpufreq_init(void)
+{
+ arm_regulator = regulator_get(NULL, "vddarm");
+ if (IS_ERR(arm_regulator)) {
+ pr_err("failed to get regulator vddarm");
+ return PTR_ERR(arm_regulator);
+ }
+
+ int_regulator = regulator_get(NULL, "vddint");
+ if (IS_ERR(int_regulator)) {
+ pr_err("failed to get regulator vddint");
+ regulator_put(arm_regulator);
+ return PTR_ERR(int_regulator);
+ }
+
+ register_pm_notifier(&s5pv210_cpufreq_notifier);
+ register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
+
+ return cpufreq_register_driver(&s5pv210_driver);
+}
+
+late_initcall(s5pv210_cpufreq_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index bf5092455a8..d4c54237288 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -25,9 +25,19 @@ DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices);
-static void (*pm_idle_old)(void);
static int enabled_devices;
+static int off __read_mostly;
+static int initialized __read_mostly;
+
+int cpuidle_disabled(void)
+{
+ return off;
+}
+void disable_cpuidle(void)
+{
+ off = 1;
+}
#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
static void cpuidle_kick_cpus(void)
@@ -46,25 +56,23 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
* cpuidle_idle_call - the main idle loop
*
* NOTE: no locks or semaphores should be used here
+ * return non-zero on failure
*/
-static void cpuidle_idle_call(void)
+int cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_state *target_state;
int next_state;
+ if (off)
+ return -ENODEV;
+
+ if (!initialized)
+ return -ENODEV;
+
/* check if the device is ready */
- if (!dev || !dev->enabled) {
- if (pm_idle_old)
- pm_idle_old();
- else
-#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
- default_idle();
-#else
- local_irq_enable();
-#endif
- return;
- }
+ if (!dev || !dev->enabled)
+ return -EBUSY;
#if 0
/* shows regressions, re-enable for 2.6.29 */
@@ -89,7 +97,7 @@ static void cpuidle_idle_call(void)
next_state = cpuidle_curr_governor->select(dev);
if (need_resched()) {
local_irq_enable();
- return;
+ return 0;
}
target_state = &dev->states[next_state];
@@ -114,6 +122,8 @@ static void cpuidle_idle_call(void)
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev);
+
+ return 0;
}
/**
@@ -121,10 +131,10 @@ static void cpuidle_idle_call(void)
*/
void cpuidle_install_idle_handler(void)
{
- if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
+ if (enabled_devices) {
/* Make sure all changes finished before we switch to new idle */
smp_wmb();
- pm_idle = cpuidle_idle_call;
+ initialized = 1;
}
}
@@ -133,8 +143,8 @@ void cpuidle_install_idle_handler(void)
*/
void cpuidle_uninstall_idle_handler(void)
{
- if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) {
- pm_idle = pm_idle_old;
+ if (enabled_devices) {
+ initialized = 0;
cpuidle_kick_cpus();
}
}
@@ -427,7 +437,8 @@ static int __init cpuidle_init(void)
{
int ret;
- pm_idle_old = pm_idle;
+ if (cpuidle_disabled())
+ return -ENODEV;
ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
if (ret)
@@ -438,4 +449,5 @@ static int __init cpuidle_init(void)
return 0;
}
+module_param(off, int, 0444);
core_initcall(cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 33e50d556f1..38c3fd8b9d7 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -13,6 +13,7 @@ extern struct list_head cpuidle_governors;
extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
extern spinlock_t cpuidle_driver_lock;
+extern int cpuidle_disabled(void);
/* idle loop */
extern void cpuidle_install_idle_handler(void);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index fd1601e3d12..3f7e3cedd13 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -26,6 +26,9 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
if (!drv)
return -EINVAL;
+ if (cpuidle_disabled())
+ return -ENODEV;
+
spin_lock(&cpuidle_driver_lock);
if (cpuidle_curr_driver) {
spin_unlock(&cpuidle_driver_lock);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 724c164d31c..ea2f8e7aa24 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -81,6 +81,9 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
if (!gov || !gov->select)
return -EINVAL;
+ if (cpuidle_disabled())
+ return -ENODEV;
+
mutex_lock(&cpuidle_lock);
if (__cpuidle_find_governor(gov->name) == NULL) {
ret = 0;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 18912521a7a..1d103f997dc 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -51,6 +51,7 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
union ce_io_threshold io_threshold;
u32 rand_num;
union ce_pe_dma_cfg pe_dma_cfg;
+ u32 device_ctrl;
writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
/* setup pe dma, include reset sg, pdr and pe, then release reset */
@@ -84,7 +85,9 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
ring_ctrl.w = 0;
writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
- writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+ device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+ device_ctrl |= PPC4XX_DC_3DES_EN;
+ writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
part_ring_size.w = 0;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index d0e65d6ddc7..4159265b453 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -62,10 +62,22 @@
#define CAAM_MAX_IV_LENGTH 16
/* length of descriptors text */
-#define DESC_AEAD_SHARED_TEXT_LEN 4
-#define DESC_AEAD_ENCRYPT_TEXT_LEN 21
-#define DESC_AEAD_DECRYPT_TEXT_LEN 24
-#define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3)
+
+#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
+ CAAM_MAX_KEY_SIZE)
+#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
#ifdef DEBUG
/* for print_hex_dumps with line references */
@@ -76,30 +88,366 @@
#define debug(format, arg...)
#endif
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/*
+ * Wait for completion of class 1 key loading before allowing
+ * error propagation
+ */
+static inline void append_dec_shr_done(u32 *desc)
+{
+ u32 *jump_cmd;
+
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+}
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/*
+ * For aead encrypt and decrypt, read iv for both classes
+ */
+static inline void aead_append_ld_iv(u32 *desc, int ivsize)
+{
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | ivsize);
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
+}
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \
+}
+
+/*
+ * If all data, including src (with assoc and iv) or dst (with iv only) are
+ * contiguous
+ */
+#define GIV_SRC_CONTIG 1
+#define GIV_DST_CONTIG (1 << 1)
+
/*
* per-session context
*/
struct caam_ctx {
struct device *jrdev;
- u32 *sh_desc;
- dma_addr_t shared_desc_phys;
+ u32 sh_desc_enc[DESC_MAX_USED_LEN];
+ u32 sh_desc_dec[DESC_MAX_USED_LEN];
+ u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+ dma_addr_t sh_desc_enc_dma;
+ dma_addr_t sh_desc_dec_dma;
+ dma_addr_t sh_desc_givenc_dma;
u32 class1_alg_type;
u32 class2_alg_type;
u32 alg_op;
- u8 *key;
- dma_addr_t key_phys;
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t key_dma;
unsigned int enckeylen;
unsigned int split_key_len;
unsigned int split_key_pad_len;
unsigned int authsize;
};
-static int aead_authenc_setauthsize(struct crypto_aead *authenc,
+static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
+ int keys_fit_inline)
+{
+ if (keys_fit_inline) {
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key_as_imm(desc, (void *)ctx->key +
+ ctx->split_key_pad_len, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ } else {
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ }
+}
+
+static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
+ int keys_fit_inline)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_aead(desc, ctx, keys_fit_inline);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = 0;
+ u32 *key_jump_cmd, *jump_cmd;
+ u32 geniv, moveiv;
+ u32 *desc;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = 1;
+
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+ /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+ aead_append_ld_iv(desc, tfm->ivsize);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = 1;
+
+ desc = ctx->sh_desc_dec;
+
+ /* aead_decrypt shared descriptor */
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_aead(desc, ctx, keys_fit_inline);
+
+ /* Only propagate error immediately if shared */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, key_jump_cmd);
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize)
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ aead_append_ld_iv(desc, tfm->ivsize);
+
+ append_dec_op1(desc, ctx->class1_alg_type);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ append_dec_shr_done(desc);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = 1;
+
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy IV to class 1 context */
+ append_move(desc, MOVE_SRC_CLASS1CTX |
+ MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Return to encryption */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = seqinlen - (ivsize + cryptlen) */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Copy iv from class 1 ctx to class 2 fifo*/
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Not need to reload iv */
+ append_seq_fifo_load(desc, tfm->ivsize,
+ FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
return 0;
}
@@ -117,6 +465,7 @@ static void split_key_done(struct device *dev, u32 *desc, u32 err,
#ifdef DEBUG
dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
+
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
@@ -220,73 +569,7 @@ static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
return ret;
}
-static int build_sh_desc_ipsec(struct caam_ctx *ctx)
-{
- struct device *jrdev = ctx->jrdev;
- u32 *sh_desc;
- u32 *jump_cmd;
- bool keys_fit_inline = 0;
-
- /*
- * largest Job Descriptor and its Shared Descriptor
- * must both fit into the 64-word Descriptor h/w Buffer
- */
- if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN +
- DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ +
- ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = 1;
-
- /* build shared descriptor for this session */
- sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
- keys_fit_inline ?
- ctx->split_key_pad_len + ctx->enckeylen :
- CAAM_PTR_SZ * 2, GFP_DMA | GFP_KERNEL);
- if (!sh_desc) {
- dev_err(jrdev, "could not allocate shared descriptor\n");
- return -ENOMEM;
- }
-
- init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL);
-
- jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
-
- /*
- * process keys, starting with class 2/authentication.
- */
- if (keys_fit_inline) {
- append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len,
- CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
-
- append_key_as_imm(sh_desc, (void *)ctx->key +
- ctx->split_key_pad_len, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- } else {
- append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- }
-
- /* update jump cmd now that we are at the jump target */
- set_jump_tgt_here(sh_desc, jump_cmd);
-
- ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc,
- desc_bytes(sh_desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- kfree(sh_desc);
- return -ENOMEM;
- }
-
- ctx->sh_desc = sh_desc;
-
- return 0;
-}
-
-static int aead_authenc_setkey(struct crypto_aead *aead,
+static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
@@ -326,27 +609,19 @@ static int aead_authenc_setkey(struct crypto_aead *aead,
print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen,
- GFP_KERNEL | GFP_DMA);
- if (!ctx->key) {
- dev_err(jrdev, "could not allocate key output memory\n");
- return -ENOMEM;
- }
ret = gen_split_key(ctx, key, authkeylen);
if (ret) {
- kfree(ctx->key);
goto badkey;
}
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
- ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
enckeylen, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->key_phys)) {
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
- kfree(ctx->key);
return -ENOMEM;
}
#ifdef DEBUG
@@ -357,11 +632,10 @@ static int aead_authenc_setkey(struct crypto_aead *aead,
ctx->enckeylen = enckeylen;
- ret = build_sh_desc_ipsec(ctx);
+ ret = aead_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len +
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
enckeylen, DMA_TO_DEVICE);
- kfree(ctx->key);
}
return ret;
@@ -370,6 +644,119 @@ badkey:
return -EINVAL;
}
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+ u32 *key_jump_cmd, *jump_cmd;
+ u32 *desc;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+ ctx->enckeylen = keylen;
+
+ /* ablkcipher_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+
+ /* Load iv */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* Load operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ /* ablkcipher_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* For aead, only propagate error immediately if shared */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, key_jump_cmd);
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /* load IV */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* Choose operation */
+ append_dec_op1(desc, ctx->class1_alg_type);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ /* Wait for key to load before allowing propagating error */
+ append_dec_shr_done(desc);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return ret;
+}
+
struct link_tbl_entry {
u64 ptr;
u32 len;
@@ -379,64 +766,109 @@ struct link_tbl_entry {
};
/*
- * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
+ * aead_edesc - s/w-extended aead descriptor
+ * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
* @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @link_tbl_bytes: length of dma mapped link_tbl space
* @link_tbl_dma: bus physical mapped address of h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
-struct ipsec_esp_edesc {
+struct aead_edesc {
int assoc_nents;
int src_nents;
int dst_nents;
+ dma_addr_t iv_dma;
int link_tbl_bytes;
dma_addr_t link_tbl_dma;
struct link_tbl_entry *link_tbl;
u32 hw_desc[0];
};
-static void ipsec_esp_unmap(struct device *dev,
- struct ipsec_esp_edesc *edesc,
- struct aead_request *areq)
-{
- dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @link_tbl_bytes: length of dma mapped link_tbl space
+ * @link_tbl_dma: bus physical mapped address of h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct ablkcipher_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int link_tbl_bytes;
+ dma_addr_t link_tbl_dma;
+ struct link_tbl_entry *link_tbl;
+ u32 hw_desc[0];
+};
- if (unlikely(areq->dst != areq->src)) {
- dma_unmap_sg(dev, areq->src, edesc->src_nents,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev, areq->dst, edesc->dst_nents,
- DMA_FROM_DEVICE);
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents, int dst_nents,
+ dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma,
+ int link_tbl_bytes)
+{
+ if (unlikely(dst != src)) {
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
- dma_unmap_sg(dev, areq->src, edesc->src_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
- if (edesc->link_tbl_bytes)
- dma_unmap_single(dev, edesc->link_tbl_dma,
- edesc->link_tbl_bytes,
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ if (link_tbl_bytes)
+ dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes,
DMA_TO_DEVICE);
}
-/*
- * ipsec_esp descriptor callbacks
- */
-static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+static void aead_unmap(struct device *dev,
+ struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
+
+ caam_unmap(dev, req->src, req->dst,
+ edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->link_tbl_dma,
+ edesc->link_tbl_bytes);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ caam_unmap(dev, req->src, req->dst,
+ edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->link_tbl_dma,
+ edesc->link_tbl_bytes);
+}
+
+static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
- struct aead_request *areq = context;
- struct ipsec_esp_edesc *edesc;
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
- int ivsize = crypto_aead_ivsize(aead);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ipsec_esp_edesc *)((char *)desc -
- offsetof(struct ipsec_esp_edesc, hw_desc));
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
@@ -444,39 +876,50 @@ static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
- ipsec_esp_unmap(jrdev, edesc, areq);
+ aead_unmap(jrdev, edesc, req);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
- areq->assoclen , 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
edesc->src_nents ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
- edesc->src_nents ? 100 : areq->cryptlen +
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen +
ctx->authsize + 4, 1);
#endif
kfree(edesc);
- aead_request_complete(areq, err);
+ aead_request_complete(req, err);
}
-static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
- struct aead_request *areq = context;
- struct ipsec_esp_edesc *edesc;
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ipsec_esp_edesc *)((char *)desc -
- offsetof(struct ipsec_esp_edesc, hw_desc));
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
+ req->cryptlen, 1);
+#endif
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
@@ -484,7 +927,7 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
- ipsec_esp_unmap(jrdev, edesc, areq);
+ aead_unmap(jrdev, edesc, req);
/*
* verify hw auth check passed else return -EBADMSG
@@ -495,255 +938,413 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4,
- ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)),
- sizeof(struct iphdr) + areq->assoclen +
- ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) +
+ ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
+ sizeof(struct iphdr) + req->assoclen +
+ ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
ctx->authsize + 36, 1);
if (!err && edesc->link_tbl_bytes) {
- struct scatterlist *sg = sg_last(areq->src, edesc->src_nents);
+ struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
sg->length + ctx->authsize + 16, 1);
}
#endif
+
kfree(edesc);
- aead_request_complete(areq, err);
+ aead_request_complete(req, err);
+}
+
+static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ablkcipher_request *req = context;
+ struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+
+ ablkcipher_request_complete(req, err);
+}
+
+static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ablkcipher_request *req = context;
+ struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+
+ ablkcipher_request_complete(req, err);
+}
+
+static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
+ dma_addr_t dma, u32 len, u32 offset)
+{
+ link_tbl_ptr->ptr = dma;
+ link_tbl_ptr->len = len;
+ link_tbl_ptr->reserved = 0;
+ link_tbl_ptr->buf_pool_id = 0;
+ link_tbl_ptr->offset = offset;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr,
+ sizeof(struct link_tbl_entry), 1);
+#endif
}
/*
* convert scatterlist to h/w link table format
- * scatterlist must have been previously dma mapped
+ * but does not have final bit; instead, returns last entry
*/
-static void sg_to_link_tbl(struct scatterlist *sg, int sg_count,
- struct link_tbl_entry *link_tbl_ptr, u32 offset)
+static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg,
+ int sg_count, struct link_tbl_entry
+ *link_tbl_ptr, u32 offset)
{
while (sg_count) {
- link_tbl_ptr->ptr = sg_dma_address(sg);
- link_tbl_ptr->len = sg_dma_len(sg);
- link_tbl_ptr->reserved = 0;
- link_tbl_ptr->buf_pool_id = 0;
- link_tbl_ptr->offset = offset;
+ sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
link_tbl_ptr++;
sg = sg_next(sg);
sg_count--;
}
+ return link_tbl_ptr - 1;
+}
- /* set Final bit (marks end of link table) */
- link_tbl_ptr--;
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count,
+ struct link_tbl_entry *link_tbl_ptr, u32 offset)
+{
+ link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset);
link_tbl_ptr->len |= 0x40000000;
}
/*
- * fill in and submit ipsec_esp job descriptor
+ * Fill in aead job descriptor
*/
-static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
- u32 encrypt,
- void (*callback) (struct device *dev, u32 *desc,
- u32 err, void *context))
+static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ bool all_contig, bool encrypt)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- u32 *desc = edesc->hw_desc, options;
- int ret, sg_count, assoc_sg_count;
int ivsize = crypto_aead_ivsize(aead);
int authsize = ctx->authsize;
- dma_addr_t ptr, dst_dma, src_dma;
-#ifdef DEBUG
- u32 *sh_desc = ctx->sh_desc;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, link_tbl_index = 0;
+#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
- areq->assoclen, areq->cryptlen, authsize);
+ req->assoclen, req->cryptlen, authsize);
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
- areq->assoclen , 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
- edesc->src_nents ? 100 : areq->cryptlen + authsize, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen, 1);
print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
desc_bytes(sh_desc), 1);
#endif
- assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1,
- DMA_TO_DEVICE);
- if (areq->src == areq->dst)
- sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
- DMA_BIDIRECTIONAL);
- else
- sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
- DMA_TO_DEVICE);
- /* start auth operation */
- append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL |
- (encrypt ? : OP_ALG_ICV_ON));
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- /* Load FIFO with data for Class 2 CHA */
- options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG;
- if (!edesc->assoc_nents) {
- ptr = sg_dma_address(areq->assoc);
+ if (all_contig) {
+ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
} else {
- sg_to_link_tbl(areq->assoc, edesc->assoc_nents,
- edesc->link_tbl, 0);
- ptr = edesc->link_tbl_dma;
- options |= LDST_SGF;
+ src_dma = edesc->link_tbl_dma;
+ link_tbl_index += (edesc->assoc_nents ? : 1) + 1 +
+ (edesc->src_nents ? : 1);
+ in_options = LDST_SGF;
}
- append_fifo_load(desc, ptr, areq->assoclen, options);
-
- /* copy iv from cipher/class1 input context to class2 infifo */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
-
- if (!encrypt) {
- u32 *jump_cmd, *uncond_jump_cmd;
-
- /* JUMP if shared */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ if (encrypt)
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen - authsize, in_options);
+ else
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen, in_options);
- /* start class 1 (cipher) operation, non-shared version */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL);
+ if (likely(req->src == req->dst)) {
+ if (all_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = src_dma + sizeof(struct link_tbl_entry) *
+ ((edesc->assoc_nents ? : 1) + 1);
+ out_options = LDST_SGF;
+ }
+ } else {
+ if (!edesc->dst_nents) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->link_tbl_dma +
+ link_tbl_index *
+ sizeof(struct link_tbl_entry);
+ out_options = LDST_SGF;
+ }
+ }
+ if (encrypt)
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+ else
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
+ out_options);
+}
- uncond_jump_cmd = append_jump(desc, 0);
+/*
+ * Fill in aead givencrypt job descriptor
+ */
+static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ int contig)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
+ int authsize = ctx->authsize;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, link_tbl_index = 0;
- set_jump_tgt_here(desc, jump_cmd);
+#ifdef DEBUG
+ debug("assoclen %d cryptlen %d authsize %d\n",
+ req->assoclen, req->cryptlen, authsize);
+ print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
+ print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+ print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+ desc_bytes(sh_desc), 1);
+#endif
- /* start class 1 (cipher) operation, shared version */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK);
- set_jump_tgt_here(desc, uncond_jump_cmd);
- } else
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | encrypt);
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- /* load payload & instruct to class2 to snoop class 1 if encrypting */
- options = 0;
- if (!edesc->src_nents) {
- src_dma = sg_dma_address(areq->src);
+ if (contig & GIV_SRC_CONTIG) {
+ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
} else {
- sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl +
- edesc->assoc_nents, 0);
- src_dma = edesc->link_tbl_dma + edesc->assoc_nents *
- sizeof(struct link_tbl_entry);
- options |= LDST_SGF;
+ src_dma = edesc->link_tbl_dma;
+ link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents;
+ in_options = LDST_SGF;
}
- append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options);
- append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH |
- FIFOLD_TYPE_LASTBOTH |
- (encrypt ? FIFOLD_TYPE_MSG1OUT2
- : FIFOLD_TYPE_MSG));
-
- /* specify destination */
- if (areq->src == areq->dst) {
- dst_dma = src_dma;
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen - authsize, in_options);
+
+ if (contig & GIV_DST_CONTIG) {
+ dst_dma = edesc->iv_dma;
} else {
- sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE);
- if (!edesc->dst_nents) {
- dst_dma = sg_dma_address(areq->dst);
- options = 0;
+ if (likely(req->src == req->dst)) {
+ dst_dma = src_dma + sizeof(struct link_tbl_entry) *
+ edesc->assoc_nents;
+ out_options = LDST_SGF;
} else {
- sg_to_link_tbl(areq->dst, edesc->dst_nents,
- edesc->link_tbl + edesc->assoc_nents +
- edesc->src_nents, 0);
- dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents +
- edesc->src_nents) *
+ dst_dma = edesc->link_tbl_dma +
+ link_tbl_index *
sizeof(struct link_tbl_entry);
- options = LDST_SGF;
+ out_options = LDST_SGF;
}
}
- append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options);
- append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA);
- /* ICV */
- if (encrypt)
- append_seq_store(desc, authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
- else
- append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
+}
+
+/*
+ * Fill in ablkcipher job descriptor
+ */
+static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req,
+ bool iv_contig)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, link_tbl_index = 0;
#ifdef DEBUG
- debug("job_desc_len %d\n", desc_len(desc));
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1);
- print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
- edesc->link_tbl_bytes, 1);
+ print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
- ret = caam_jr_enqueue(jrdev, desc, callback, areq);
- if (!ret)
- ret = -EINPROGRESS;
- else {
- ipsec_esp_unmap(jrdev, edesc, areq);
- kfree(edesc);
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (iv_contig) {
+ src_dma = edesc->iv_dma;
+ in_options = 0;
+ } else {
+ src_dma = edesc->link_tbl_dma;
+ link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents;
+ in_options = LDST_SGF;
}
+ append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
- return ret;
+ if (likely(req->src == req->dst)) {
+ if (!edesc->src_nents && iv_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = edesc->link_tbl_dma +
+ sizeof(struct link_tbl_entry);
+ out_options = LDST_SGF;
+ }
+ } else {
+ if (!edesc->dst_nents) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->link_tbl_dma +
+ link_tbl_index * sizeof(struct link_tbl_entry);
+ out_options = LDST_SGF;
+ }
+ }
+ append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
}
/*
* derive number of elements in scatterlist
*/
-static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
+static int sg_count(struct scatterlist *sg_list, int nbytes)
{
struct scatterlist *sg = sg_list;
int sg_nents = 0;
- *chained = 0;
while (nbytes > 0) {
sg_nents++;
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0)
- *chained = 1;
+ BUG(); /* Not support chaining */
sg = scatterwalk_sg_next(sg);
}
+ if (likely(sg_nents == 1))
+ return 0;
+
return sg_nents;
}
/*
- * allocate and map the ipsec_esp extended descriptor
+ * allocate and map the aead extended descriptor
*/
-static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
- int desc_bytes)
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ int desc_bytes, bool *all_contig_ptr)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes;
- struct ipsec_esp_edesc *edesc;
-
- assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained);
- BUG_ON(chained);
- if (likely(assoc_nents == 1))
- assoc_nents = 0;
-
- src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize,
- &chained);
- BUG_ON(chained);
- if (src_nents == 1)
- src_nents = 0;
-
- if (unlikely(areq->dst != areq->src)) {
- dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize,
- &chained);
- BUG_ON(chained);
- if (dst_nents == 1)
- dst_nents = 0;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int assoc_nents, src_nents, dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ int sgc;
+ bool all_contig = true;
+ int ivsize = crypto_aead_ivsize(aead);
+ int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
+
+ assoc_nents = sg_count(req->assoc, req->assoclen);
+ src_nents = sg_count(req->src, req->cryptlen);
+
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->cryptlen);
+
+ sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ } else {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
}
- link_tbl_bytes = (assoc_nents + src_nents + dst_nents) *
- sizeof(struct link_tbl_entry);
- debug("link_tbl_bytes %d\n", link_tbl_bytes);
+ /* Check if data are contiguous */
+ iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
+ iv_dma || src_nents || iv_dma + ivsize !=
+ sg_dma_address(req->src)) {
+ all_contig = false;
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ link_tbl_len = assoc_nents + 1 + src_nents;
+ }
+ link_tbl_len += dst_nents;
+
+ link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes +
+ edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
link_tbl_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -753,142 +1354,450 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
edesc->assoc_nents = assoc_nents;
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
- edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) +
+ edesc->iv_dma = iv_dma;
+ edesc->link_tbl_bytes = link_tbl_bytes;
+ edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
link_tbl_bytes, DMA_TO_DEVICE);
- edesc->link_tbl_bytes = link_tbl_bytes;
+ *all_contig_ptr = all_contig;
+
+ link_tbl_index = 0;
+ if (!all_contig) {
+ sg_to_link_tbl(req->assoc,
+ (assoc_nents ? : 1),
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += assoc_nents ? : 1;
+ sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ iv_dma, ivsize, 0);
+ link_tbl_index += 1;
+ sg_to_link_tbl_last(req->src,
+ (src_nents ? : 1),
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += src_nents ? : 1;
+ }
+ if (dst_nents) {
+ sg_to_link_tbl_last(req->dst, dst_nents,
+ edesc->link_tbl + link_tbl_index, 0);
+ }
return edesc;
}
-static int aead_authenc_encrypt(struct aead_request *areq)
+static int aead_encrypt(struct aead_request *req)
{
- struct ipsec_esp_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- int ivsize = crypto_aead_ivsize(aead);
+ bool all_contig;
u32 *desc;
- dma_addr_t iv_dma;
+ int ret = 0;
+
+ req->cryptlen += ctx->authsize;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- desc = edesc->hw_desc;
-
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
-
- iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE);
- /* check dma error */
+ /* Create and submit job descriptor */
+ init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
+ all_contig, true);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
- append_load(desc, iv_dma, ivsize,
- LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
+ return ret;
}
-static int aead_authenc_decrypt(struct aead_request *req)
+static int aead_decrypt(struct aead_request *req)
{
+ struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- struct ipsec_esp_edesc *edesc;
+ bool all_contig;
u32 *desc;
- dma_addr_t iv_dma;
-
- req->cryptlen -= ctx->authsize;
+ int ret = 0;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+#endif
+
+ /* Create and submit job descriptor*/
+ init_aead_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
+ return ret;
+}
- iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
- /* check dma error */
+/*
+ * allocate and map the aead extended descriptor for aead givencrypt
+ */
+static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
+ *greq, int desc_bytes,
+ u32 *contig_ptr)
+{
+ struct aead_request *req = &greq->areq;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int assoc_nents, src_nents, dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ int sgc;
+ u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
+ int ivsize = crypto_aead_ivsize(aead);
+ int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
+
+ assoc_nents = sg_count(req->assoc, req->assoclen);
+ src_nents = sg_count(req->src, req->cryptlen);
- append_load(desc, iv_dma, ivsize,
- LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->cryptlen);
- return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done);
+ sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ } else {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
+ }
+
+ /* Check if data are contiguous */
+ iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
+ iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
+ contig &= ~GIV_SRC_CONTIG;
+ if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
+ contig &= ~GIV_DST_CONTIG;
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = dst_nents ? : 1;
+ link_tbl_len += 1;
+ }
+ if (!(contig & GIV_SRC_CONTIG)) {
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ link_tbl_len += assoc_nents + 1 + src_nents;
+ if (likely(req->src == req->dst))
+ contig &= ~GIV_DST_CONTIG;
+ }
+ link_tbl_len += dst_nents;
+
+ link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+ link_tbl_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->assoc_nents = assoc_nents;
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ edesc->link_tbl_bytes = link_tbl_bytes;
+ edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
+ edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
+ link_tbl_bytes, DMA_TO_DEVICE);
+ *contig_ptr = contig;
+
+ link_tbl_index = 0;
+ if (!(contig & GIV_SRC_CONTIG)) {
+ sg_to_link_tbl(req->assoc, assoc_nents,
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += assoc_nents;
+ sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ iv_dma, ivsize, 0);
+ link_tbl_index += 1;
+ sg_to_link_tbl_last(req->src, src_nents,
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += src_nents;
+ }
+ if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
+ sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ iv_dma, ivsize, 0);
+ link_tbl_index += 1;
+ sg_to_link_tbl_last(req->dst, dst_nents,
+ edesc->link_tbl + link_tbl_index, 0);
+ }
+
+ return edesc;
}
-static int aead_authenc_givencrypt(struct aead_givcrypt_request *req)
+static int aead_givencrypt(struct aead_givcrypt_request *areq)
{
- struct aead_request *areq = &req->areq;
- struct ipsec_esp_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct aead_request *req = &areq->areq;
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- int ivsize = crypto_aead_ivsize(aead);
- dma_addr_t iv_dma;
+ u32 contig;
u32 *desc;
+ int ret = 0;
- iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE);
-
- debug("%s: giv %p\n", __func__, req->giv);
+ req->cryptlen += ctx->authsize;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &contig);
+
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+#endif
+
+ /* Create and submit job descriptor*/
+ init_aead_giv_job(ctx->sh_desc_givenc,
+ ctx->sh_desc_givenc_dma, edesc, req, contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
+ return ret;
+}
- /*
- * LOAD IMM Info FIFO
- * to DECO, Last, Padding, Random, Message, 16 bytes
- */
- append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 |
- NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG |
- NFIFOENTRY_PTYPE_RND | ivsize,
- LDST_SRCDST_WORD_INFO_FIFO);
+/*
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
+ */
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ *req, int desc_bytes,
+ bool *iv_contig_out)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, dst_nents = 0, link_tbl_bytes;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ bool iv_contig = false;
+ int sgc;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int link_tbl_index;
+
+ src_nents = sg_count(req->src, req->nbytes);
+
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->nbytes);
+
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ } else {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
+ }
/*
- * disable info fifo entries since the above serves as the entry
- * this way, the MOVE command won't generate an entry.
- * Note that this isn't required in more recent versions of
- * SEC as a MOVE that doesn't do info FIFO entries is available.
+ * Check if iv can be contiguous with source and destination.
+ * If so, include it. If not, create scatterlist.
*/
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+ if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+ iv_contig = true;
+ else
+ src_nents = src_nents ? : 1;
+ link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+ sizeof(struct link_tbl_entry);
- /* MOVE DECO Alignment -> C1 Context 16 bytes */
- append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize);
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
+ link_tbl_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
- /* re-enable info fifo entries */
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->link_tbl_bytes = link_tbl_bytes;
+ edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ desc_bytes;
+
+ link_tbl_index = 0;
+ if (!iv_contig) {
+ sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0);
+ sg_to_link_tbl_last(req->src, src_nents,
+ edesc->link_tbl + 1, 0);
+ link_tbl_index += 1 + src_nents;
+ }
+
+ if (unlikely(dst_nents)) {
+ sg_to_link_tbl_last(req->dst, dst_nents,
+ edesc->link_tbl + link_tbl_index, 0);
+ }
+
+ edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
+ link_tbl_bytes, DMA_TO_DEVICE);
+ edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
+ link_tbl_bytes, 1);
+#endif
+
+ *iv_contig_out = iv_contig;
+ return edesc;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
- /* MOVE C1 Context -> OFIFO 16 bytes */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize);
+ /* Create and submit job descriptor*/
+ init_ablkcipher_job(ctx->sh_desc_enc,
+ ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
- append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
+ return ret;
}
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor*/
+ init_ablkcipher_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, iv_contig);
+ desc = edesc->hw_desc;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+#define template_aead template_u.aead
+#define template_ablkcipher template_u.ablkcipher
struct caam_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
- struct aead_alg aead;
+ u32 type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ struct aead_alg aead;
+ struct blkcipher_alg blkcipher;
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ struct rng_alg rng;
+ } template_u;
u32 class1_alg_type;
u32 class2_alg_type;
u32 alg_op;
@@ -900,12 +1809,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -918,12 +1828,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha256),cbc(aes))",
.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -937,12 +1848,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha512),cbc(aes))",
.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -956,12 +1868,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -974,12 +1887,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha256),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -993,12 +1907,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha512),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -1012,12 +1927,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha1),cbc(des))",
.driver_name = "authenc-hmac-sha1-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -1030,12 +1946,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha256),cbc(des))",
.driver_name = "authenc-hmac-sha256-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -1049,12 +1966,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha512),cbc(des))",
.driver_name = "authenc-hmac-sha512-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -1064,6 +1982,55 @@ static struct caam_alg_template driver_algs[] = {
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
+ /* ablkcipher descriptor */
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ }
};
struct caam_crypto_alg {
@@ -1102,16 +2069,19 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
{
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys))
- dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys,
- desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
- kfree(ctx->sh_desc);
-
- if (!dma_mapping_error(ctx->jrdev, ctx->key_phys))
- dma_unmap_single(ctx->jrdev, ctx->key_phys,
- ctx->split_key_pad_len + ctx->enckeylen,
+ if (ctx->sh_desc_enc_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
+ if (ctx->sh_desc_dec_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
+ if (ctx->sh_desc_givenc_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
+ desc_bytes(ctx->sh_desc_givenc),
DMA_TO_DEVICE);
- kfree(ctx->key);
}
static void __exit caam_algapi_exit(void)
@@ -1175,12 +2145,20 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
alg->cra_init = caam_cra_init;
alg->cra_exit = caam_cra_exit;
alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_type = &crypto_aead_type;
alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_u.aead = template->aead;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | template->type;
+ switch (template->type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg->cra_type = &crypto_aead_type;
+ alg->cra_aead = template->template_aead;
+ break;
+ }
t_alg->class1_alg_type = template->class1_alg_type;
t_alg->class2_alg_type = template->class2_alg_type;
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 950450346f7..d38f2afaa96 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -31,5 +31,6 @@
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 9009713a3c2..73988bb7322 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -52,9 +52,11 @@ static int caam_probe(struct platform_device *pdev)
struct caam_ctrl __iomem *ctrl;
struct caam_full __iomem *topregs;
struct caam_drv_private *ctrlpriv;
- struct caam_perfmon *perfmon;
struct caam_deco **deco;
u32 deconum;
+#ifdef CONFIG_DEBUG_FS
+ struct caam_perfmon *perfmon;
+#endif
ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
if (!ctrlpriv)
@@ -164,52 +166,52 @@ static int caam_probe(struct platform_device *pdev)
/* Controller-level - performance monitor counters */
ctrlpriv->ctl_rq_dequeued =
debugfs_create_u64("rq_dequeued",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->req_dequeued);
ctrlpriv->ctl_ob_enc_req =
debugfs_create_u64("ob_rq_encrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_enc_req);
ctrlpriv->ctl_ib_dec_req =
debugfs_create_u64("ib_rq_decrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_dec_req);
ctrlpriv->ctl_ob_enc_bytes =
debugfs_create_u64("ob_bytes_encrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_enc_bytes);
ctrlpriv->ctl_ob_prot_bytes =
debugfs_create_u64("ob_bytes_protected",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_prot_bytes);
ctrlpriv->ctl_ib_dec_bytes =
debugfs_create_u64("ib_bytes_decrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_dec_bytes);
ctrlpriv->ctl_ib_valid_bytes =
debugfs_create_u64("ib_bytes_validated",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_valid_bytes);
/* Controller level - global status values */
ctrlpriv->ctl_faultaddr =
debugfs_create_u64("fault_addr",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->faultaddr);
ctrlpriv->ctl_faultdetail =
debugfs_create_u32("fault_detail",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->faultdetail);
ctrlpriv->ctl_faultstatus =
debugfs_create_u32("fault_status",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->status);
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
- S_IFCHR | S_IRUSR |
+ S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
@@ -217,7 +219,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
- S_IFCHR | S_IRUSR |
+ S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
@@ -225,7 +227,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
- S_IFCHR | S_IRUSR |
+ S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_tdsk_wrap);
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 46915800c26..0991323cf3f 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -9,7 +9,7 @@
#define IMMEDIATE (1 << 23)
#define CAAM_CMD_SZ sizeof(u32)
#define CAAM_PTR_SZ sizeof(dma_addr_t)
-#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
#ifdef DEBUG
#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
@@ -18,6 +18,9 @@
#define PRINT_POS
#endif
+#define SET_OK_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_CHG_SHARE_OK_PROP << LDST_OFFSET_SHIFT))
#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
@@ -203,3 +206,56 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
append_cmd(desc, immediate); \
}
APPEND_CMD_RAW_IMM(load, LOAD, u32);
+
+/*
+ * Append math command. Only the last part of destination and source need to
+ * be specified
+ */
+#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
+append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
+ MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK));
+
+#define append_math_add(desc, dest, src0, src1, len) \
+ APPEND_MATH(ADD, desc, dest, src0, src1, len)
+#define append_math_sub(desc, dest, src0, src1, len) \
+ APPEND_MATH(SUB, desc, dest, src0, src1, len)
+#define append_math_add_c(desc, dest, src0, src1, len) \
+ APPEND_MATH(ADDC, desc, dest, src0, src1, len)
+#define append_math_sub_b(desc, dest, src0, src1, len) \
+ APPEND_MATH(SUBB, desc, dest, src0, src1, len)
+#define append_math_and(desc, dest, src0, src1, len) \
+ APPEND_MATH(AND, desc, dest, src0, src1, len)
+#define append_math_or(desc, dest, src0, src1, len) \
+ APPEND_MATH(OR, desc, dest, src0, src1, len)
+#define append_math_xor(desc, dest, src0, src1, len) \
+ APPEND_MATH(XOR, desc, dest, src0, src1, len)
+#define append_math_lshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
+#define append_math_rshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
+
+/* Exactly one source is IMM. Data is passed in as u32 value */
+#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
+do { \
+ APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
+ append_cmd(desc, data); \
+} while (0);
+
+#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
+#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data)
+#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data)
+#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data)
+#define append_math_and_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data)
+#define append_math_or_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data)
+#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data)
+#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
+#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 2e5b2044c96..d0183ddb307 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1,6 +1,6 @@
/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
*
- * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -31,8 +31,8 @@
#include "n2_core.h"
#define DRV_MODULE_NAME "n2_crypto"
-#define DRV_MODULE_VERSION "0.1"
-#define DRV_MODULE_RELDATE "April 29, 2010"
+#define DRV_MODULE_VERSION "0.2"
+#define DRV_MODULE_RELDATE "July 28, 2011"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -1823,22 +1823,17 @@ static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *de
static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
struct spu_mdesc_info *ip)
{
- const u64 *intr, *ino;
- int intr_len, ino_len;
+ const u64 *ino;
+ int ino_len;
int i;
- intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
- if (!intr)
- return -ENODEV;
-
ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
- if (!ino)
+ if (!ino) {
+ printk("NO 'ino'\n");
return -ENODEV;
+ }
- if (intr_len != ino_len)
- return -EINVAL;
-
- ip->num_intrs = intr_len / sizeof(u64);
+ ip->num_intrs = ino_len / sizeof(u64);
ip->ino_table = kzalloc((sizeof(struct ino_blob) *
ip->num_intrs),
GFP_KERNEL);
@@ -1847,7 +1842,7 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
for (i = 0; i < ip->num_intrs; i++) {
struct ino_blob *b = &ip->ino_table[i];
- b->intr = intr[i];
+ b->intr = i + 1;
b->ino = ino[i];
}
@@ -2204,6 +2199,10 @@ static struct of_device_id n2_crypto_match[] = {
.name = "n2cp",
.compatible = "SUNW,vf-cwq",
},
+ {
+ .name = "n2cp",
+ .compatible = "SUNW,kt-cwq",
+ },
{},
};
@@ -2228,6 +2227,10 @@ static struct of_device_id n2_mau_match[] = {
.name = "ncp",
.compatible = "SUNW,vf-mau",
},
+ {
+ .name = "ncp",
+ .compatible = "SUNW,kt-mau",
+ },
{},
};
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ba8f1ea84c5..6399a8f1938 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -72,17 +72,20 @@
#define DEFAULT_TIMEOUT_INTERVAL HZ
-#define FLAGS_FINUP 0x0002
-#define FLAGS_FINAL 0x0004
-#define FLAGS_SG 0x0008
-#define FLAGS_SHA1 0x0010
-#define FLAGS_DMA_ACTIVE 0x0020
-#define FLAGS_OUTPUT_READY 0x0040
-#define FLAGS_INIT 0x0100
-#define FLAGS_CPU 0x0200
-#define FLAGS_HMAC 0x0400
-#define FLAGS_ERROR 0x0800
-#define FLAGS_BUSY 0x1000
+/* mostly device flags */
+#define FLAGS_BUSY 0
+#define FLAGS_FINAL 1
+#define FLAGS_DMA_ACTIVE 2
+#define FLAGS_OUTPUT_READY 3
+#define FLAGS_INIT 4
+#define FLAGS_CPU 5
+#define FLAGS_DMA_READY 6
+/* context flags */
+#define FLAGS_FINUP 16
+#define FLAGS_SG 17
+#define FLAGS_SHA1 18
+#define FLAGS_HMAC 19
+#define FLAGS_ERROR 20
#define OP_UPDATE 1
#define OP_FINAL 2
@@ -144,7 +147,6 @@ struct omap_sham_dev {
int dma;
int dma_lch;
struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
unsigned long flags;
struct crypto_queue queue;
@@ -223,7 +225,7 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
if (!hash)
return;
- if (likely(ctx->flags & FLAGS_SHA1)) {
+ if (likely(ctx->flags & BIT(FLAGS_SHA1))) {
/* SHA1 results are in big endian */
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
hash[i] = be32_to_cpu(in[i]);
@@ -238,7 +240,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
{
clk_enable(dd->iclk);
- if (!(dd->flags & FLAGS_INIT)) {
+ if (!test_bit(FLAGS_INIT, &dd->flags)) {
omap_sham_write_mask(dd, SHA_REG_MASK,
SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
@@ -246,7 +248,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
SHA_REG_SYSSTATUS_RESETDONE))
return -ETIMEDOUT;
- dd->flags |= FLAGS_INIT;
+ set_bit(FLAGS_INIT, &dd->flags);
dd->err = 0;
}
@@ -269,7 +271,7 @@ static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
* Setting ALGO_CONST only for the first iteration
* and CLOSE_HASH only for the last one.
*/
- if (ctx->flags & FLAGS_SHA1)
+ if (ctx->flags & BIT(FLAGS_SHA1))
val |= SHA_REG_CTRL_ALGO;
if (!ctx->digcnt)
val |= SHA_REG_CTRL_ALGO_CONST;
@@ -301,7 +303,9 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
return -ETIMEDOUT;
if (final)
- ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+ set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
+
+ set_bit(FLAGS_CPU, &dd->flags);
len32 = DIV_ROUND_UP(length, sizeof(u32));
@@ -334,9 +338,9 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
ctx->digcnt += length;
if (final)
- ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+ set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
- dd->flags |= FLAGS_DMA_ACTIVE;
+ set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
omap_start_dma(dd->dma_lch);
@@ -392,7 +396,7 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
return -EINVAL;
}
- ctx->flags &= ~FLAGS_SG;
+ ctx->flags &= ~BIT(FLAGS_SG);
/* next call does not fail... so no unmap in the case of error */
return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
@@ -406,7 +410,7 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
omap_sham_append_sg(ctx);
- final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+ final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
ctx->bufcnt, ctx->digcnt, final);
@@ -452,7 +456,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
length = min(ctx->total, sg->length);
if (sg_is_last(sg)) {
- if (!(ctx->flags & FLAGS_FINUP)) {
+ if (!(ctx->flags & BIT(FLAGS_FINUP))) {
/* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
/* without finup() we need one block to close hash */
@@ -467,12 +471,12 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
return -EINVAL;
}
- ctx->flags |= FLAGS_SG;
+ ctx->flags |= BIT(FLAGS_SG);
ctx->total -= length;
ctx->offset = length; /* offset where to start slow */
- final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+ final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
/* next call does not fail... so no unmap in the case of error */
return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
@@ -495,7 +499,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
omap_stop_dma(dd->dma_lch);
- if (ctx->flags & FLAGS_SG) {
+ if (ctx->flags & BIT(FLAGS_SG)) {
dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
if (ctx->sg->length == ctx->offset) {
ctx->sg = sg_next(ctx->sg);
@@ -537,18 +541,18 @@ static int omap_sham_init(struct ahash_request *req)
crypto_ahash_digestsize(tfm));
if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
- ctx->flags |= FLAGS_SHA1;
+ ctx->flags |= BIT(FLAGS_SHA1);
ctx->bufcnt = 0;
ctx->digcnt = 0;
ctx->buflen = BUFLEN;
- if (tctx->flags & FLAGS_HMAC) {
+ if (tctx->flags & BIT(FLAGS_HMAC)) {
struct omap_sham_hmac_ctx *bctx = tctx->base;
memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
- ctx->flags |= FLAGS_HMAC;
+ ctx->flags |= BIT(FLAGS_HMAC);
}
return 0;
@@ -562,9 +566,9 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
int err;
dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
- ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
+ ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
- if (ctx->flags & FLAGS_CPU)
+ if (ctx->flags & BIT(FLAGS_CPU))
err = omap_sham_update_cpu(dd);
else
err = omap_sham_update_dma_start(dd);
@@ -624,7 +628,7 @@ static int omap_sham_finish(struct ahash_request *req)
if (ctx->digcnt) {
omap_sham_copy_ready_hash(req);
- if (ctx->flags & FLAGS_HMAC)
+ if (ctx->flags & BIT(FLAGS_HMAC))
err = omap_sham_finish_hmac(req);
}
@@ -639,18 +643,23 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
struct omap_sham_dev *dd = ctx->dd;
if (!err) {
- omap_sham_copy_hash(ctx->dd->req, 1);
- if (ctx->flags & FLAGS_FINAL)
+ omap_sham_copy_hash(req, 1);
+ if (test_bit(FLAGS_FINAL, &dd->flags))
err = omap_sham_finish(req);
} else {
- ctx->flags |= FLAGS_ERROR;
+ ctx->flags |= BIT(FLAGS_ERROR);
}
+ /* atomic operation is not needed here */
+ dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
+ BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
clk_disable(dd->iclk);
- dd->flags &= ~FLAGS_BUSY;
if (req->base.complete)
req->base.complete(&req->base, err);
+
+ /* handle new request */
+ tasklet_schedule(&dd->done_task);
}
static int omap_sham_handle_queue(struct omap_sham_dev *dd,
@@ -658,21 +667,20 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
{
struct crypto_async_request *async_req, *backlog;
struct omap_sham_reqctx *ctx;
- struct ahash_request *prev_req;
unsigned long flags;
int err = 0, ret = 0;
spin_lock_irqsave(&dd->lock, flags);
if (req)
ret = ahash_enqueue_request(&dd->queue, req);
- if (dd->flags & FLAGS_BUSY) {
+ if (test_bit(FLAGS_BUSY, &dd->flags)) {
spin_unlock_irqrestore(&dd->lock, flags);
return ret;
}
backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue);
if (async_req)
- dd->flags |= FLAGS_BUSY;
+ set_bit(FLAGS_BUSY, &dd->flags);
spin_unlock_irqrestore(&dd->lock, flags);
if (!async_req)
@@ -682,16 +690,12 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
backlog->complete(backlog, -EINPROGRESS);
req = ahash_request_cast(async_req);
-
- prev_req = dd->req;
dd->req = req;
-
ctx = ahash_request_ctx(req);
dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
ctx->op, req->nbytes);
-
err = omap_sham_hw_init(dd);
if (err)
goto err1;
@@ -712,18 +716,16 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
if (ctx->op == OP_UPDATE) {
err = omap_sham_update_req(dd);
- if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
+ if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
/* no final() after finup() */
err = omap_sham_final_req(dd);
} else if (ctx->op == OP_FINAL) {
err = omap_sham_final_req(dd);
}
err1:
- if (err != -EINPROGRESS) {
+ if (err != -EINPROGRESS)
/* done_task will not finish it, so do it here */
omap_sham_finish_req(req, err);
- tasklet_schedule(&dd->queue_task);
- }
dev_dbg(dd->dev, "exit, err: %d\n", err);
@@ -752,7 +754,7 @@ static int omap_sham_update(struct ahash_request *req)
ctx->sg = req->src;
ctx->offset = 0;
- if (ctx->flags & FLAGS_FINUP) {
+ if (ctx->flags & BIT(FLAGS_FINUP)) {
if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
/*
* OMAP HW accel works only with buffers >= 9
@@ -765,7 +767,7 @@ static int omap_sham_update(struct ahash_request *req)
/*
* faster to use CPU for short transfers
*/
- ctx->flags |= FLAGS_CPU;
+ ctx->flags |= BIT(FLAGS_CPU);
}
} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
omap_sham_append_sg(ctx);
@@ -802,9 +804,9 @@ static int omap_sham_final(struct ahash_request *req)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- ctx->flags |= FLAGS_FINUP;
+ ctx->flags |= BIT(FLAGS_FINUP);
- if (ctx->flags & FLAGS_ERROR)
+ if (ctx->flags & BIT(FLAGS_ERROR))
return 0; /* uncompleted hash is not needed */
/* OMAP HW accel works only with buffers >= 9 */
@@ -823,7 +825,7 @@ static int omap_sham_finup(struct ahash_request *req)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err1, err2;
- ctx->flags |= FLAGS_FINUP;
+ ctx->flags |= BIT(FLAGS_FINUP);
err1 = omap_sham_update(req);
if (err1 == -EINPROGRESS || err1 == -EBUSY)
@@ -895,7 +897,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
if (alg_base) {
struct omap_sham_hmac_ctx *bctx = tctx->base;
- tctx->flags |= FLAGS_HMAC;
+ tctx->flags |= BIT(FLAGS_HMAC);
bctx->shash = crypto_alloc_shash(alg_base, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(bctx->shash)) {
@@ -932,7 +934,7 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm)
crypto_free_shash(tctx->fallback);
tctx->fallback = NULL;
- if (tctx->flags & FLAGS_HMAC) {
+ if (tctx->flags & BIT(FLAGS_HMAC)) {
struct omap_sham_hmac_ctx *bctx = tctx->base;
crypto_free_shash(bctx->shash);
}
@@ -1036,51 +1038,46 @@ static struct ahash_alg algs[] = {
static void omap_sham_done_task(unsigned long data)
{
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
- struct ahash_request *req = dd->req;
- struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- int ready = 0, err = 0;
+ int err = 0;
- if (ctx->flags & FLAGS_OUTPUT_READY) {
- ctx->flags &= ~FLAGS_OUTPUT_READY;
- ready = 1;
+ if (!test_bit(FLAGS_BUSY, &dd->flags)) {
+ omap_sham_handle_queue(dd, NULL);
+ return;
}
- if (dd->flags & FLAGS_DMA_ACTIVE) {
- dd->flags &= ~FLAGS_DMA_ACTIVE;
- omap_sham_update_dma_stop(dd);
- if (!dd->err)
+ if (test_bit(FLAGS_CPU, &dd->flags)) {
+ if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
+ goto finish;
+ } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
+ if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
+ omap_sham_update_dma_stop(dd);
+ if (dd->err) {
+ err = dd->err;
+ goto finish;
+ }
+ }
+ if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
+ /* hash or semi-hash ready */
+ clear_bit(FLAGS_DMA_READY, &dd->flags);
err = omap_sham_update_dma_start(dd);
+ if (err != -EINPROGRESS)
+ goto finish;
+ }
}
- err = dd->err ? : err;
-
- if (err != -EINPROGRESS && (ready || err)) {
- dev_dbg(dd->dev, "update done: err: %d\n", err);
- /* finish curent request */
- omap_sham_finish_req(req, err);
- /* start new request */
- omap_sham_handle_queue(dd, NULL);
- }
-}
-
-static void omap_sham_queue_task(unsigned long data)
-{
- struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+ return;
- omap_sham_handle_queue(dd, NULL);
+finish:
+ dev_dbg(dd->dev, "update done: err: %d\n", err);
+ /* finish curent request */
+ omap_sham_finish_req(dd->req, err);
}
static irqreturn_t omap_sham_irq(int irq, void *dev_id)
{
struct omap_sham_dev *dd = dev_id;
- struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-
- if (!ctx) {
- dev_err(dd->dev, "unknown interrupt.\n");
- return IRQ_HANDLED;
- }
- if (unlikely(ctx->flags & FLAGS_FINAL))
+ if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
/* final -> allow device to go to power-saving mode */
omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
@@ -1088,8 +1085,12 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id)
SHA_REG_CTRL_OUTPUT_READY);
omap_sham_read(dd, SHA_REG_CTRL);
- ctx->flags |= FLAGS_OUTPUT_READY;
- dd->err = 0;
+ if (!test_bit(FLAGS_BUSY, &dd->flags)) {
+ dev_warn(dd->dev, "Interrupt when no active requests.\n");
+ return IRQ_HANDLED;
+ }
+
+ set_bit(FLAGS_OUTPUT_READY, &dd->flags);
tasklet_schedule(&dd->done_task);
return IRQ_HANDLED;
@@ -1102,9 +1103,10 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
if (ch_status != OMAP_DMA_BLOCK_IRQ) {
pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
dd->err = -EIO;
- dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
+ clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */
}
+ set_bit(FLAGS_DMA_READY, &dd->flags);
tasklet_schedule(&dd->done_task);
}
@@ -1151,7 +1153,6 @@ static int __devinit omap_sham_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dd->list);
spin_lock_init(&dd->lock);
tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
- tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
dd->irq = -1;
@@ -1260,7 +1261,6 @@ static int __devexit omap_sham_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(algs); i++)
crypto_unregister_ahash(&algs[i]);
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
iounmap(dd->io_base);
clk_put(dd->iclk);
omap_sham_dma_cleanup(dd);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 854e2632f9a..8a0bb417aa1 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1,7 +1,7 @@
/*
* talitos - Freescale Integrated Security Engine (SEC) device driver
*
- * Copyright (c) 2008-2010 Freescale Semiconductor, Inc.
+ * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
*
* Scatterlist Crypto API glue code copied from files with the following:
* Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
@@ -282,6 +282,7 @@ static int init_device(struct device *dev)
/**
* talitos_submit - submits a descriptor to the device for processing
* @dev: the SEC device to be used
+ * @ch: the SEC device channel to be used
* @desc: the descriptor to be processed by the device
* @callback: whom to call when processing is complete
* @context: a handle for use by caller (optional)
@@ -290,7 +291,7 @@ static int init_device(struct device *dev)
* callback must check err and feedback in descriptor header
* for device processing status.
*/
-static int talitos_submit(struct device *dev, struct talitos_desc *desc,
+static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
void (*callback)(struct device *dev,
struct talitos_desc *desc,
void *context, int error),
@@ -298,15 +299,9 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
{
struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_request *request;
- unsigned long flags, ch;
+ unsigned long flags;
int head;
- /* select done notification */
- desc->hdr |= DESC_HDR_DONE_NOTIFY;
-
- /* emulate SEC's round-robin channel fifo polling scheme */
- ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
-
spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
@@ -706,6 +701,7 @@ static void talitos_unregister_rng(struct device *dev)
struct talitos_ctx {
struct device *dev;
+ int ch;
__be32 desc_hdr_template;
u8 key[TALITOS_MAX_KEY_SIZE];
u8 iv[TALITOS_MAX_IV_LENGTH];
@@ -1117,7 +1113,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
DMA_FROM_DEVICE);
- ret = talitos_submit(dev, desc, callback, areq);
+ ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
ipsec_esp_unmap(dev, edesc, areq);
kfree(edesc);
@@ -1382,22 +1378,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
-
- if (keylen > TALITOS_MAX_KEY_SIZE)
- goto badkey;
-
- if (keylen < alg->min_keysize || keylen > alg->max_keysize)
- goto badkey;
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;
return 0;
-
-badkey:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static void common_nonsnoop_unmap(struct device *dev,
@@ -1433,7 +1418,6 @@ static void ablkcipher_done(struct device *dev,
static int common_nonsnoop(struct talitos_edesc *edesc,
struct ablkcipher_request *areq,
- u8 *giv,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
@@ -1453,7 +1437,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* cipher iv */
ivsize = crypto_ablkcipher_ivsize(cipher);
- map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
+ map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0,
DMA_TO_DEVICE);
/* cipher key */
@@ -1524,7 +1508,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
to_talitos_ptr(&desc->ptr[6], 0);
desc->ptr[6].j_extent = 0;
- ret = talitos_submit(dev, desc, callback, areq);
+ ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
common_nonsnoop_unmap(dev, edesc, areq);
kfree(edesc);
@@ -1556,7 +1540,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+ return common_nonsnoop(edesc, areq, ablkcipher_done);
}
static int ablkcipher_decrypt(struct ablkcipher_request *areq)
@@ -1572,7 +1556,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
- return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+ return common_nonsnoop(edesc, areq, ablkcipher_done);
}
static void common_nonsnoop_hash_unmap(struct device *dev,
@@ -1703,7 +1687,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
/* last DWORD empty */
desc->ptr[6] = zero_entry;
- ret = talitos_submit(dev, desc, callback, areq);
+ ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
common_nonsnoop_hash_unmap(dev, edesc, areq);
kfree(edesc);
@@ -2244,6 +2228,7 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
struct crypto_alg *alg = tfm->__crt_alg;
struct talitos_crypto_alg *talitos_alg;
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct talitos_private *priv;
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
talitos_alg = container_of(__crypto_ahash_alg(alg),
@@ -2256,9 +2241,17 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
+ /* assign SEC channel to tfm in round-robin fashion */
+ priv = dev_get_drvdata(ctx->dev);
+ ctx->ch = atomic_inc_return(&priv->last_chan) &
+ (priv->num_channels - 1);
+
/* copy descriptor header template value */
ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
+ /* select done notification */
+ ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
+
return 0;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 25cf327cd1c..2e3b3d38c46 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -237,6 +237,13 @@ config MXS_DMA
Support the MXS DMA engine. This engine including APBH-DMA
and APBX-DMA is integrated into Freescale i.MX23/28 chips.
+config EP93XX_DMA
+ bool "Cirrus Logic EP93xx DMA support"
+ depends on ARCH_EP93XX
+ select DMA_ENGINE
+ help
+ Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 836095ab3c5..30cf3b1f0c5 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
index a4af8589330..734ed0206cd 100644
--- a/drivers/dma/TODO
+++ b/drivers/dma/TODO
@@ -9,6 +9,5 @@ TODO for slave dma
- mxs-dma.c
- dw_dmac
- intel_mid_dma
- - ste_dma40
4. Check other subsystems for dma drivers and merge/move to dmaengine
5. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e6d7228b147..be21e3f138a 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -80,6 +80,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/dmaengine.h>
#include <linux/amba/bus.h>
@@ -156,14 +157,10 @@ struct pl08x_driver_data {
#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
-/* Minimum period between work queue runs */
-#define PL08X_WQ_PERIODMIN 20
-
/* Size (bytes) of each LLI buffer allocated for one transfer */
# define PL08X_LLI_TSFR_SIZE 0x2000
/* Maximum times we call dma_pool_alloc on this pool without freeing */
-#define PL08X_MAX_ALLOCS 0x40
#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
#define PL08X_ALIGN 8
@@ -495,10 +492,10 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
struct pl08x_lli_build_data {
struct pl08x_txd *txd;
- struct pl08x_driver_data *pl08x;
struct pl08x_bus_data srcbus;
struct pl08x_bus_data dstbus;
size_t remainder;
+ u32 lli_bus;
};
/*
@@ -551,8 +548,7 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
llis_va[num_llis].src = bd->srcbus.addr;
llis_va[num_llis].dst = bd->dstbus.addr;
llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
- if (bd->pl08x->lli_buses & PL08X_AHB2)
- llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
+ llis_va[num_llis].lli |= bd->lli_bus;
if (cctl & PL080_CONTROL_SRC_INCR)
bd->srcbus.addr += len;
@@ -605,9 +601,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = txd->cctl;
bd.txd = txd;
- bd.pl08x = pl08x;
bd.srcbus.addr = txd->src_addr;
bd.dstbus.addr = txd->dst_addr;
+ bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
/* Find maximum width of the source bus */
bd.srcbus.maxwidth =
@@ -622,25 +618,15 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
/* Set up the bus widths to the maximum */
bd.srcbus.buswidth = bd.srcbus.maxwidth;
bd.dstbus.buswidth = bd.dstbus.maxwidth;
- dev_vdbg(&pl08x->adev->dev,
- "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
- __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
-
/*
* Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
*/
max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
PL080_CONTROL_TRANSFER_SIZE_MASK;
- dev_vdbg(&pl08x->adev->dev,
- "%s max bytes per lli = %zu\n",
- __func__, max_bytes_per_lli);
/* We need to count this down to zero */
bd.remainder = txd->len;
- dev_vdbg(&pl08x->adev->dev,
- "%s remainder = %zu\n",
- __func__, bd.remainder);
/*
* Choose bus to align to
@@ -649,6 +635,16 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
*/
pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
+ dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n",
+ bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
+ bd.srcbus.buswidth,
+ bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
+ bd.dstbus.buswidth,
+ bd.remainder, max_bytes_per_lli);
+ dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
+ mbus == &bd.srcbus ? "src" : "dst",
+ sbus == &bd.srcbus ? "src" : "dst");
+
if (txd->len < mbus->buswidth) {
/* Less than a bus width available - send as single bytes */
while (bd.remainder) {
@@ -840,15 +836,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
{
int i;
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl");
for (i = 0; i < num_llis; i++) {
dev_vdbg(&pl08x->adev->dev,
- "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
- i,
- &llis_va[i],
- llis_va[i].src,
- llis_va[i].dst,
- llis_va[i].cctl,
- llis_va[i].lli
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, &llis_va[i], llis_va[i].src,
+ llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
);
}
}
@@ -1054,64 +1049,105 @@ pl08x_dma_tx_status(struct dma_chan *chan,
/* PrimeCell DMA extension */
struct burst_table {
- int burstwords;
+ u32 burstwords;
u32 reg;
};
static const struct burst_table burst_sizes[] = {
{
.burstwords = 256,
- .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_256,
},
{
.burstwords = 128,
- .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_128,
},
{
.burstwords = 64,
- .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_64,
},
{
.burstwords = 32,
- .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_32,
},
{
.burstwords = 16,
- .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_16,
},
{
.burstwords = 8,
- .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_8,
},
{
.burstwords = 4,
- .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_4,
},
{
- .burstwords = 1,
- .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .burstwords = 0,
+ .reg = PL080_BSIZE_1,
},
};
+/*
+ * Given the source and destination available bus masks, select which
+ * will be routed to each port. We try to have source and destination
+ * on separate ports, but always respect the allowable settings.
+ */
+static u32 pl08x_select_bus(u8 src, u8 dst)
+{
+ u32 cctl = 0;
+
+ if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
+ cctl |= PL080_CONTROL_DST_AHB2;
+ if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
+ cctl |= PL080_CONTROL_SRC_AHB2;
+
+ return cctl;
+}
+
+static u32 pl08x_cctl(u32 cctl)
+{
+ cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
+ PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
+ PL080_CONTROL_PROT_MASK);
+
+ /* Access the cell in privileged mode, non-bufferable, non-cacheable */
+ return cctl | PL080_CONTROL_PROT_SYS;
+}
+
+static u32 pl08x_width(enum dma_slave_buswidth width)
+{
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ return PL080_WIDTH_8BIT;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return PL080_WIDTH_16BIT;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return PL080_WIDTH_32BIT;
+ default:
+ return ~0;
+ }
+}
+
+static u32 pl08x_burst(u32 maxburst)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
+ if (burst_sizes[i].burstwords <= maxburst)
+ break;
+
+ return burst_sizes[i].reg;
+}
+
static int dma_set_runtime_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
- struct pl08x_channel_data *cd = plchan->cd;
enum dma_slave_buswidth addr_width;
- dma_addr_t addr;
- u32 maxburst;
+ u32 width, burst, maxburst;
u32 cctl = 0;
- int i;
if (!plchan->slave)
return -EINVAL;
@@ -1119,11 +1155,9 @@ static int dma_set_runtime_config(struct dma_chan *chan,
/* Transfer direction */
plchan->runtime_direction = config->direction;
if (config->direction == DMA_TO_DEVICE) {
- addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
} else if (config->direction == DMA_FROM_DEVICE) {
- addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else {
@@ -1132,46 +1166,40 @@ static int dma_set_runtime_config(struct dma_chan *chan,
return -EINVAL;
}
- switch (addr_width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- default:
+ width = pl08x_width(addr_width);
+ if (width == ~0) {
dev_err(&pl08x->adev->dev,
"bad runtime_config: alien address width\n");
return -EINVAL;
}
+ cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
+ cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
+
/*
- * Now decide on a maxburst:
* If this channel will only request single transfers, set this
* down to ONE element. Also select one element if no maxburst
* is specified.
*/
- if (plchan->cd->single || maxburst == 0) {
- cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
+ if (plchan->cd->single)
+ maxburst = 1;
+
+ burst = pl08x_burst(maxburst);
+ cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
+ cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
+
+ if (plchan->runtime_direction == DMA_FROM_DEVICE) {
+ plchan->src_addr = config->src_addr;
+ plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
+ pl08x_select_bus(plchan->cd->periph_buses,
+ pl08x->mem_buses);
} else {
- for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
- if (burst_sizes[i].burstwords <= maxburst)
- break;
- cctl |= burst_sizes[i].reg;
+ plchan->dst_addr = config->dst_addr;
+ plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
+ pl08x_select_bus(pl08x->mem_buses,
+ plchan->cd->periph_buses);
}
- plchan->runtime_addr = addr;
-
- /* Modify the default channel data to fit PrimeCell request */
- cd->cctl = cctl;
-
dev_dbg(&pl08x->adev->dev,
"configured channel %s (%s) for %s, data width %d, "
"maxburst %d words, LE, CCTL=0x%08x\n",
@@ -1270,23 +1298,6 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
return 0;
}
-/*
- * Given the source and destination available bus masks, select which
- * will be routed to each port. We try to have source and destination
- * on separate ports, but always respect the allowable settings.
- */
-static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
-{
- u32 cctl = 0;
-
- if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
- cctl |= PL080_CONTROL_DST_AHB2;
- if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
- cctl |= PL080_CONTROL_SRC_AHB2;
-
- return cctl;
-}
-
static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
unsigned long flags)
{
@@ -1338,8 +1349,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
if (pl08x->vd->dualmaster)
- txd->cctl |= pl08x_select_bus(pl08x,
- pl08x->mem_buses, pl08x->mem_buses);
+ txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
+ pl08x->mem_buses);
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
@@ -1356,7 +1367,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
- u8 src_buses, dst_buses;
int ret;
/*
@@ -1390,42 +1400,22 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
txd->direction = direction;
txd->len = sgl->length;
- txd->cctl = plchan->cd->cctl &
- ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
- PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
- PL080_CONTROL_PROT_MASK);
-
- /* Access the cell in privileged mode, non-bufferable, non-cacheable */
- txd->cctl |= PL080_CONTROL_PROT_SYS;
-
if (direction == DMA_TO_DEVICE) {
txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
- txd->cctl |= PL080_CONTROL_SRC_INCR;
+ txd->cctl = plchan->dst_cctl;
txd->src_addr = sgl->dma_address;
- if (plchan->runtime_addr)
- txd->dst_addr = plchan->runtime_addr;
- else
- txd->dst_addr = plchan->cd->addr;
- src_buses = pl08x->mem_buses;
- dst_buses = plchan->cd->periph_buses;
+ txd->dst_addr = plchan->dst_addr;
} else if (direction == DMA_FROM_DEVICE) {
txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
- txd->cctl |= PL080_CONTROL_DST_INCR;
- if (plchan->runtime_addr)
- txd->src_addr = plchan->runtime_addr;
- else
- txd->src_addr = plchan->cd->addr;
+ txd->cctl = plchan->src_cctl;
+ txd->src_addr = plchan->src_addr;
txd->dst_addr = sgl->dma_address;
- src_buses = plchan->cd->periph_buses;
- dst_buses = pl08x->mem_buses;
} else {
dev_err(&pl08x->adev->dev,
"%s direction unsupported\n", __func__);
return NULL;
}
- txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
-
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
return NULL;
@@ -1676,6 +1666,20 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
return mask ? IRQ_HANDLED : IRQ_NONE;
}
+static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
+{
+ u32 cctl = pl08x_cctl(chan->cd->cctl);
+
+ chan->slave = true;
+ chan->name = chan->cd->bus_id;
+ chan->src_addr = chan->cd->addr;
+ chan->dst_addr = chan->cd->addr;
+ chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
+ pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
+ chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
+ pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
+}
+
/*
* Initialise the DMAC memcpy/slave channels.
* Make a local wrapper to hold required data
@@ -1707,9 +1711,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
chan->state = PL08X_CHAN_IDLE;
if (slave) {
- chan->slave = true;
- chan->name = pl08x->pd->slave_channels[i].bus_id;
chan->cd = &pl08x->pd->slave_channels[i];
+ pl08x_dma_slave_init(chan);
} else {
chan->cd = &pl08x->pd->memcpy_channel;
chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 36144f88d71..6a483eac7b3 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma->dma_common.cap_mask = pdata->cap_mask;
atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
- size = io->end - io->start + 1;
+ size = resource_size(io);
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
err = -EBUSY;
goto err_kfree;
@@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
atdma->regs = NULL;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(io->start, io->end - io->start + 1);
+ release_mem_region(io->start, resource_size(io));
kfree(atdma);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index af8c0b5ed70..4234f416ef1 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/kernel.h> /* printk() */
#include <linux/fs.h> /* everything... */
+#include <linux/scatterlist.h>
#include <linux/slab.h> /* kmalloc() */
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
@@ -40,6 +41,8 @@ struct coh901318_desc {
struct coh901318_lli *lli;
enum dma_data_direction dir;
unsigned long flags;
+ u32 head_config;
+ u32 head_ctrl;
};
struct coh901318_base {
@@ -660,6 +663,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
coh901318_desc_submit(cohc, cohd);
+ /* Program the transaction head */
+ coh901318_set_conf(cohc, cohd->head_config);
+ coh901318_set_ctrl(cohc, cohd->head_ctrl);
coh901318_prep_linked_list(cohc, cohd->lli);
/* start dma job on this channel */
@@ -1090,8 +1096,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
} else
goto err_direction;
- coh901318_set_conf(cohc, config);
-
/* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of
* dma elemts required to send the entire sg list
@@ -1128,16 +1132,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret)
goto err_lli_fill;
- /*
- * Set the default ctrl for the channel to the one from the lli,
- * things may have changed due to odd buffer alignment etc.
- */
- coh901318_set_ctrl(cohc, lli->control);
COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
+ cohd->head_config = config;
+ /*
+ * Set the default head ctrl for the channel to the one from the
+ * lli, things may have changed due to odd buffer alignment
+ * etc.
+ */
+ cohd->head_ctrl = lli->control;
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8bcb15fb959..b48967b499d 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -45,6 +45,7 @@
* See Documentation/dmaengine.txt for more details
*/
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -61,9 +62,9 @@
#include <linux/slab.h>
static DEFINE_MUTEX(dma_list_mutex);
+static DEFINE_IDR(dma_idr);
static LIST_HEAD(dma_device_list);
static long dmaengine_ref_count;
-static struct idr dma_idr;
/* --- sysfs implementation --- */
@@ -509,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else if (err)
- pr_err("dmaengine: failed to get %s: (%d)\n",
- dma_chan_name(chan), err);
+ pr_debug("dmaengine: failed to get %s: (%d)\n",
+ dma_chan_name(chan), err);
else
break;
if (--device->privatecnt == 0)
@@ -1049,8 +1050,6 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
static int __init dma_bus_init(void)
{
- idr_init(&dma_idr);
- mutex_init(&dma_list_mutex);
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index b4f5c32b6a4..765f5ff2230 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
#include <linux/kthread.h>
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
new file mode 100644
index 00000000000..5d7a49bd7c2
--- /dev/null
+++ b/drivers/dma/ep93xx_dma.c
@@ -0,0 +1,1355 @@
+/*
+ * Driver for the Cirrus Logic EP93xx DMA Controller
+ *
+ * Copyright (C) 2011 Mika Westerberg
+ *
+ * DMA M2P implementation is based on the original
+ * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
+ *
+ * This driver is based on dw_dmac and amba-pl08x drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <mach/dma.h>
+
+/* M2P registers */
+#define M2P_CONTROL 0x0000
+#define M2P_CONTROL_STALLINT BIT(0)
+#define M2P_CONTROL_NFBINT BIT(1)
+#define M2P_CONTROL_CH_ERROR_INT BIT(3)
+#define M2P_CONTROL_ENABLE BIT(4)
+#define M2P_CONTROL_ICE BIT(6)
+
+#define M2P_INTERRUPT 0x0004
+#define M2P_INTERRUPT_STALL BIT(0)
+#define M2P_INTERRUPT_NFB BIT(1)
+#define M2P_INTERRUPT_ERROR BIT(3)
+
+#define M2P_PPALLOC 0x0008
+#define M2P_STATUS 0x000c
+
+#define M2P_MAXCNT0 0x0020
+#define M2P_BASE0 0x0024
+#define M2P_MAXCNT1 0x0030
+#define M2P_BASE1 0x0034
+
+#define M2P_STATE_IDLE 0
+#define M2P_STATE_STALL 1
+#define M2P_STATE_ON 2
+#define M2P_STATE_NEXT 3
+
+/* M2M registers */
+#define M2M_CONTROL 0x0000
+#define M2M_CONTROL_DONEINT BIT(2)
+#define M2M_CONTROL_ENABLE BIT(3)
+#define M2M_CONTROL_START BIT(4)
+#define M2M_CONTROL_DAH BIT(11)
+#define M2M_CONTROL_SAH BIT(12)
+#define M2M_CONTROL_PW_SHIFT 9
+#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_TM_SHIFT 13
+#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_RSS_SHIFT 22
+#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_NO_HDSK BIT(24)
+#define M2M_CONTROL_PWSC_SHIFT 25
+
+#define M2M_INTERRUPT 0x0004
+#define M2M_INTERRUPT_DONEINT BIT(1)
+
+#define M2M_BCR0 0x0010
+#define M2M_BCR1 0x0014
+#define M2M_SAR_BASE0 0x0018
+#define M2M_SAR_BASE1 0x001c
+#define M2M_DAR_BASE0 0x002c
+#define M2M_DAR_BASE1 0x0030
+
+#define DMA_MAX_CHAN_BYTES 0xffff
+#define DMA_MAX_CHAN_DESCRIPTORS 32
+
+struct ep93xx_dma_engine;
+
+/**
+ * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
+ * @src_addr: source address of the transaction
+ * @dst_addr: destination address of the transaction
+ * @size: size of the transaction (in bytes)
+ * @complete: this descriptor is completed
+ * @txd: dmaengine API descriptor
+ * @tx_list: list of linked descriptors
+ * @node: link used for putting this into a channel queue
+ */
+struct ep93xx_dma_desc {
+ u32 src_addr;
+ u32 dst_addr;
+ size_t size;
+ bool complete;
+ struct dma_async_tx_descriptor txd;
+ struct list_head tx_list;
+ struct list_head node;
+};
+
+/**
+ * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
+ * @chan: dmaengine API channel
+ * @edma: pointer to to the engine device
+ * @regs: memory mapped registers
+ * @irq: interrupt number of the channel
+ * @clk: clock used by this channel
+ * @tasklet: channel specific tasklet used for callbacks
+ * @lock: lock protecting the fields following
+ * @flags: flags for the channel
+ * @buffer: which buffer to use next (0/1)
+ * @last_completed: last completed cookie value
+ * @active: flattened chain of descriptors currently being processed
+ * @queue: pending descriptors which are handled next
+ * @free_list: list of free descriptors which can be used
+ * @runtime_addr: physical address currently used as dest/src (M2M only). This
+ * is set via %DMA_SLAVE_CONFIG before slave operation is
+ * prepared
+ * @runtime_ctrl: M2M runtime values for the control register.
+ *
+ * As EP93xx DMA controller doesn't support real chained DMA descriptors we
+ * will have slightly different scheme here: @active points to a head of
+ * flattened DMA descriptor chain.
+ *
+ * @queue holds pending transactions. These are linked through the first
+ * descriptor in the chain. When a descriptor is moved to the @active queue,
+ * the first and chained descriptors are flattened into a single list.
+ *
+ * @chan.private holds pointer to &struct ep93xx_dma_data which contains
+ * necessary channel configuration information. For memcpy channels this must
+ * be %NULL.
+ */
+struct ep93xx_dma_chan {
+ struct dma_chan chan;
+ const struct ep93xx_dma_engine *edma;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct tasklet_struct tasklet;
+ /* protects the fields following */
+ spinlock_t lock;
+ unsigned long flags;
+/* Channel is configured for cyclic transfers */
+#define EP93XX_DMA_IS_CYCLIC 0
+
+ int buffer;
+ dma_cookie_t last_completed;
+ struct list_head active;
+ struct list_head queue;
+ struct list_head free_list;
+ u32 runtime_addr;
+ u32 runtime_ctrl;
+};
+
+/**
+ * struct ep93xx_dma_engine - the EP93xx DMA engine instance
+ * @dma_dev: holds the dmaengine device
+ * @m2m: is this an M2M or M2P device
+ * @hw_setup: method which sets the channel up for operation
+ * @hw_shutdown: shuts the channel down and flushes whatever is left
+ * @hw_submit: pushes active descriptor(s) to the hardware
+ * @hw_interrupt: handle the interrupt
+ * @num_channels: number of channels for this instance
+ * @channels: array of channels
+ *
+ * There is one instance of this struct for the M2P channels and one for the
+ * M2M channels. hw_xxx() methods are used to perform operations which are
+ * different on M2M and M2P channels. These methods are called with channel
+ * lock held and interrupts disabled so they cannot sleep.
+ */
+struct ep93xx_dma_engine {
+ struct dma_device dma_dev;
+ bool m2m;
+ int (*hw_setup)(struct ep93xx_dma_chan *);
+ void (*hw_shutdown)(struct ep93xx_dma_chan *);
+ void (*hw_submit)(struct ep93xx_dma_chan *);
+ int (*hw_interrupt)(struct ep93xx_dma_chan *);
+#define INTERRUPT_UNKNOWN 0
+#define INTERRUPT_DONE 1
+#define INTERRUPT_NEXT_BUFFER 2
+
+ size_t num_channels;
+ struct ep93xx_dma_chan channels[];
+};
+
+static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
+{
+ return &edmac->chan.dev->device;
+}
+
+static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct ep93xx_dma_chan, chan);
+}
+
+/**
+ * ep93xx_dma_set_active - set new active descriptor chain
+ * @edmac: channel
+ * @desc: head of the new active descriptor chain
+ *
+ * Sets @desc to be the head of the new active descriptor chain. This is the
+ * chain which is processed next. The active list must be empty before calling
+ * this function.
+ *
+ * Called with @edmac->lock held and interrupts disabled.
+ */
+static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
+ struct ep93xx_dma_desc *desc)
+{
+ BUG_ON(!list_empty(&edmac->active));
+
+ list_add_tail(&desc->node, &edmac->active);
+
+ /* Flatten the @desc->tx_list chain into @edmac->active list */
+ while (!list_empty(&desc->tx_list)) {
+ struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
+ struct ep93xx_dma_desc, node);
+
+ /*
+ * We copy the callback parameters from the first descriptor
+ * to all the chained descriptors. This way we can call the
+ * callback without having to find out the first descriptor in
+ * the chain. Useful for cyclic transfers.
+ */
+ d->txd.callback = desc->txd.callback;
+ d->txd.callback_param = desc->txd.callback_param;
+
+ list_move_tail(&d->node, &edmac->active);
+ }
+}
+
+/* Called with @edmac->lock held and interrupts disabled */
+static struct ep93xx_dma_desc *
+ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
+{
+ return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
+}
+
+/**
+ * ep93xx_dma_advance_active - advances to the next active descriptor
+ * @edmac: channel
+ *
+ * Function advances active descriptor to the next in the @edmac->active and
+ * returns %true if we still have descriptors in the chain to process.
+ * Otherwise returns %false.
+ *
+ * When the channel is in cyclic mode always returns %true.
+ *
+ * Called with @edmac->lock held and interrupts disabled.
+ */
+static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
+{
+ list_rotate_left(&edmac->active);
+
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
+ return true;
+
+ /*
+ * If txd.cookie is set it means that we are back in the first
+ * descriptor in the chain and hence done with it.
+ */
+ return !ep93xx_dma_get_active(edmac)->txd.cookie;
+}
+
+/*
+ * M2P DMA implementation
+ */
+
+static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
+{
+ writel(control, edmac->regs + M2P_CONTROL);
+ /*
+ * EP93xx User's Guide states that we must perform a dummy read after
+ * write to the control register.
+ */
+ readl(edmac->regs + M2P_CONTROL);
+}
+
+static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_data *data = edmac->chan.private;
+ u32 control;
+
+ writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
+
+ control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
+ | M2P_CONTROL_ENABLE;
+ m2p_set_control(edmac, control);
+
+ return 0;
+}
+
+static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
+{
+ return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
+}
+
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
+ u32 control;
+
+ control = readl(edmac->regs + M2P_CONTROL);
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ m2p_set_control(edmac, control);
+
+ while (m2p_channel_state(edmac) >= M2P_STATE_ON)
+ cpu_relax();
+
+ m2p_set_control(edmac, 0);
+
+ while (m2p_channel_state(edmac) == M2P_STATE_STALL)
+ cpu_relax();
+}
+
+static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+ u32 bus_addr;
+
+ if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
+ bus_addr = desc->src_addr;
+ else
+ bus_addr = desc->dst_addr;
+
+ if (edmac->buffer == 0) {
+ writel(desc->size, edmac->regs + M2P_MAXCNT0);
+ writel(bus_addr, edmac->regs + M2P_BASE0);
+ } else {
+ writel(desc->size, edmac->regs + M2P_MAXCNT1);
+ writel(bus_addr, edmac->regs + M2P_BASE1);
+ }
+
+ edmac->buffer ^= 1;
+}
+
+static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
+{
+ u32 control = readl(edmac->regs + M2P_CONTROL);
+
+ m2p_fill_desc(edmac);
+ control |= M2P_CONTROL_STALLINT;
+
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2p_fill_desc(edmac);
+ control |= M2P_CONTROL_NFBINT;
+ }
+
+ m2p_set_control(edmac, control);
+}
+
+static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
+{
+ u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
+ u32 control;
+
+ if (irq_status & M2P_INTERRUPT_ERROR) {
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+
+ /* Clear the error interrupt */
+ writel(1, edmac->regs + M2P_INTERRUPT);
+
+ /*
+ * It seems that there is no easy way of reporting errors back
+ * to client so we just report the error here and continue as
+ * usual.
+ *
+ * Revisit this when there is a mechanism to report back the
+ * errors.
+ */
+ dev_err(chan2dev(edmac),
+ "DMA transfer failed! Details:\n"
+ "\tcookie : %d\n"
+ "\tsrc_addr : 0x%08x\n"
+ "\tdst_addr : 0x%08x\n"
+ "\tsize : %zu\n",
+ desc->txd.cookie, desc->src_addr, desc->dst_addr,
+ desc->size);
+ }
+
+ switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
+ case M2P_INTERRUPT_STALL:
+ /* Disable interrupts */
+ control = readl(edmac->regs + M2P_CONTROL);
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ m2p_set_control(edmac, control);
+
+ return INTERRUPT_DONE;
+
+ case M2P_INTERRUPT_NFB:
+ if (ep93xx_dma_advance_active(edmac))
+ m2p_fill_desc(edmac);
+
+ return INTERRUPT_NEXT_BUFFER;
+ }
+
+ return INTERRUPT_UNKNOWN;
+}
+
+/*
+ * M2M DMA implementation
+ *
+ * For the M2M transfers we don't use NFB at all. This is because it simply
+ * doesn't work well with memcpy transfers. When you submit both buffers it is
+ * extremely unlikely that you get an NFB interrupt, but it instead reports
+ * DONE interrupt and both buffers are already transferred which means that we
+ * weren't able to update the next buffer.
+ *
+ * So for now we "simulate" NFB by just submitting buffer after buffer
+ * without double buffering.
+ */
+
+static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
+{
+ const struct ep93xx_dma_data *data = edmac->chan.private;
+ u32 control = 0;
+
+ if (!data) {
+ /* This is memcpy channel, nothing to configure */
+ writel(control, edmac->regs + M2M_CONTROL);
+ return 0;
+ }
+
+ switch (data->port) {
+ case EP93XX_DMA_SSP:
+ /*
+ * This was found via experimenting - anything less than 5
+ * causes the channel to perform only a partial transfer which
+ * leads to problems since we don't get DONE interrupt then.
+ */
+ control = (5 << M2M_CONTROL_PWSC_SHIFT);
+ control |= M2M_CONTROL_NO_HDSK;
+
+ if (data->direction == DMA_TO_DEVICE) {
+ control |= M2M_CONTROL_DAH;
+ control |= M2M_CONTROL_TM_TX;
+ control |= M2M_CONTROL_RSS_SSPTX;
+ } else {
+ control |= M2M_CONTROL_SAH;
+ control |= M2M_CONTROL_TM_RX;
+ control |= M2M_CONTROL_RSS_SSPRX;
+ }
+ break;
+
+ case EP93XX_DMA_IDE:
+ /*
+ * This IDE part is totally untested. Values below are taken
+ * from the EP93xx Users's Guide and might not be correct.
+ */
+ control |= M2M_CONTROL_NO_HDSK;
+ control |= M2M_CONTROL_RSS_IDE;
+ control |= M2M_CONTROL_PW_16;
+
+ if (data->direction == DMA_TO_DEVICE) {
+ /* Worst case from the UG */
+ control = (3 << M2M_CONTROL_PWSC_SHIFT);
+ control |= M2M_CONTROL_DAH;
+ control |= M2M_CONTROL_TM_TX;
+ } else {
+ control = (2 << M2M_CONTROL_PWSC_SHIFT);
+ control |= M2M_CONTROL_SAH;
+ control |= M2M_CONTROL_TM_RX;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ writel(control, edmac->regs + M2M_CONTROL);
+ return 0;
+}
+
+static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
+ /* Just disable the channel */
+ writel(0, edmac->regs + M2M_CONTROL);
+}
+
+static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+
+ if (edmac->buffer == 0) {
+ writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
+ writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
+ writel(desc->size, edmac->regs + M2M_BCR0);
+ } else {
+ writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
+ writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
+ writel(desc->size, edmac->regs + M2M_BCR1);
+ }
+
+ edmac->buffer ^= 1;
+}
+
+static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_data *data = edmac->chan.private;
+ u32 control = readl(edmac->regs + M2M_CONTROL);
+
+ /*
+ * Since we allow clients to configure PW (peripheral width) we always
+ * clear PW bits here and then set them according what is given in
+ * the runtime configuration.
+ */
+ control &= ~M2M_CONTROL_PW_MASK;
+ control |= edmac->runtime_ctrl;
+
+ m2m_fill_desc(edmac);
+ control |= M2M_CONTROL_DONEINT;
+
+ /*
+ * Now we can finally enable the channel. For M2M channel this must be
+ * done _after_ the BCRx registers are programmed.
+ */
+ control |= M2M_CONTROL_ENABLE;
+ writel(control, edmac->regs + M2M_CONTROL);
+
+ if (!data) {
+ /*
+ * For memcpy channels the software trigger must be asserted
+ * in order to start the memcpy operation.
+ */
+ control |= M2M_CONTROL_START;
+ writel(control, edmac->regs + M2M_CONTROL);
+ }
+}
+
+static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
+{
+ u32 control;
+
+ if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
+ return INTERRUPT_UNKNOWN;
+
+ /* Clear the DONE bit */
+ writel(0, edmac->regs + M2M_INTERRUPT);
+
+ /* Disable interrupts and the channel */
+ control = readl(edmac->regs + M2M_CONTROL);
+ control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
+ writel(control, edmac->regs + M2M_CONTROL);
+
+ /*
+ * Since we only get DONE interrupt we have to find out ourselves
+ * whether there still is something to process. So we try to advance
+ * the chain an see whether it succeeds.
+ */
+ if (ep93xx_dma_advance_active(edmac)) {
+ edmac->edma->hw_submit(edmac);
+ return INTERRUPT_NEXT_BUFFER;
+ }
+
+ return INTERRUPT_DONE;
+}
+
+/*
+ * DMA engine API implementation
+ */
+
+static struct ep93xx_dma_desc *
+ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc, *_desc;
+ struct ep93xx_dma_desc *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
+ if (async_tx_test_ack(&desc->txd)) {
+ list_del_init(&desc->node);
+
+ /* Re-initialize the descriptor */
+ desc->src_addr = 0;
+ desc->dst_addr = 0;
+ desc->size = 0;
+ desc->complete = false;
+ desc->txd.cookie = 0;
+ desc->txd.callback = NULL;
+ desc->txd.callback_param = NULL;
+
+ ret = desc;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ return ret;
+}
+
+static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
+ struct ep93xx_dma_desc *desc)
+{
+ if (desc) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ list_splice_init(&desc->tx_list, &edmac->free_list);
+ list_add(&desc->node, &edmac->free_list);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ }
+}
+
+/**
+ * ep93xx_dma_advance_work - start processing the next pending transaction
+ * @edmac: channel
+ *
+ * If we have pending transactions queued and we are currently idling, this
+ * function takes the next queued transaction from the @edmac->queue and
+ * pushes it to the hardware for execution.
+ */
+static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *new;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ return;
+ }
+
+ /* Take the next descriptor from the pending queue */
+ new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
+ list_del_init(&new->node);
+
+ ep93xx_dma_set_active(edmac, new);
+
+ /* Push it to the hardware */
+ edmac->edma->hw_submit(edmac);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+}
+
+static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
+{
+ struct device *dev = desc->txd.chan->device->dev;
+
+ if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(dev, desc->src_addr, desc->size,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, desc->src_addr, desc->size,
+ DMA_TO_DEVICE);
+ }
+ if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(dev, desc->dst_addr, desc->size,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(dev, desc->dst_addr, desc->size,
+ DMA_FROM_DEVICE);
+ }
+}
+
+static void ep93xx_dma_tasklet(unsigned long data)
+{
+ struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
+ struct ep93xx_dma_desc *desc, *d;
+ dma_async_tx_callback callback;
+ void *callback_param;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&edmac->lock);
+ desc = ep93xx_dma_get_active(edmac);
+ if (desc->complete) {
+ edmac->last_completed = desc->txd.cookie;
+ list_splice_init(&edmac->active, &list);
+ }
+ spin_unlock_irq(&edmac->lock);
+
+ /* Pick up the next descriptor from the queue */
+ ep93xx_dma_advance_work(edmac);
+
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
+
+ /* Now we can release all the chained descriptors */
+ list_for_each_entry_safe(desc, d, &list, node) {
+ /*
+ * For the memcpy channels the API requires us to unmap the
+ * buffers unless requested otherwise.
+ */
+ if (!edmac->chan.private)
+ ep93xx_dma_unmap_buffers(desc);
+
+ ep93xx_dma_desc_put(edmac, desc);
+ }
+
+ if (callback)
+ callback(callback_param);
+}
+
+static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
+{
+ struct ep93xx_dma_chan *edmac = dev_id;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ spin_lock(&edmac->lock);
+
+ switch (edmac->edma->hw_interrupt(edmac)) {
+ case INTERRUPT_DONE:
+ ep93xx_dma_get_active(edmac)->complete = true;
+ tasklet_schedule(&edmac->tasklet);
+ break;
+
+ case INTERRUPT_NEXT_BUFFER:
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
+ tasklet_schedule(&edmac->tasklet);
+ break;
+
+ default:
+ dev_warn(chan2dev(edmac), "unknown interrupt!\n");
+ ret = IRQ_NONE;
+ break;
+ }
+
+ spin_unlock(&edmac->lock);
+ return ret;
+}
+
+/**
+ * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
+ * @tx: descriptor to be executed
+ *
+ * Function will execute given descriptor on the hardware or if the hardware
+ * is busy, queue the descriptor to be executed later on. Returns cookie which
+ * can be used to poll the status of the descriptor.
+ */
+static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
+ struct ep93xx_dma_desc *desc;
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+
+ cookie = edmac->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ desc = container_of(tx, struct ep93xx_dma_desc, txd);
+
+ edmac->chan.cookie = cookie;
+ desc->txd.cookie = cookie;
+
+ /*
+ * If nothing is currently prosessed, we push this descriptor
+ * directly to the hardware. Otherwise we put the descriptor
+ * to the pending queue.
+ */
+ if (list_empty(&edmac->active)) {
+ ep93xx_dma_set_active(edmac, desc);
+ edmac->edma->hw_submit(edmac);
+ } else {
+ list_add_tail(&desc->node, &edmac->queue);
+ }
+
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ return cookie;
+}
+
+/**
+ * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
+ * @chan: channel to allocate resources
+ *
+ * Function allocates necessary resources for the given DMA channel and
+ * returns number of allocated descriptors for the channel. Negative errno
+ * is returned in case of failure.
+ */
+static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_data *data = chan->private;
+ const char *name = dma_chan_name(chan);
+ int ret, i;
+
+ /* Sanity check the channel parameters */
+ if (!edmac->edma->m2m) {
+ if (!data)
+ return -EINVAL;
+ if (data->port < EP93XX_DMA_I2S1 ||
+ data->port > EP93XX_DMA_IRDA)
+ return -EINVAL;
+ if (data->direction != ep93xx_dma_chan_direction(chan))
+ return -EINVAL;
+ } else {
+ if (data) {
+ switch (data->port) {
+ case EP93XX_DMA_SSP:
+ case EP93XX_DMA_IDE:
+ if (data->direction != DMA_TO_DEVICE &&
+ data->direction != DMA_FROM_DEVICE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (data && data->name)
+ name = data->name;
+
+ ret = clk_enable(edmac->clk);
+ if (ret)
+ return ret;
+
+ ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
+ if (ret)
+ goto fail_clk_disable;
+
+ spin_lock_irq(&edmac->lock);
+ edmac->last_completed = 1;
+ edmac->chan.cookie = 1;
+ ret = edmac->edma->hw_setup(edmac);
+ spin_unlock_irq(&edmac->lock);
+
+ if (ret)
+ goto fail_free_irq;
+
+ for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
+ struct ep93xx_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "not enough descriptors\n");
+ break;
+ }
+
+ INIT_LIST_HEAD(&desc->tx_list);
+
+ dma_async_tx_descriptor_init(&desc->txd, chan);
+ desc->txd.flags = DMA_CTRL_ACK;
+ desc->txd.tx_submit = ep93xx_dma_tx_submit;
+
+ ep93xx_dma_desc_put(edmac, desc);
+ }
+
+ return i;
+
+fail_free_irq:
+ free_irq(edmac->irq, edmac);
+fail_clk_disable:
+ clk_disable(edmac->clk);
+
+ return ret;
+}
+
+/**
+ * ep93xx_dma_free_chan_resources - release resources for the channel
+ * @chan: channel
+ *
+ * Function releases all the resources allocated for the given channel.
+ * The channel must be idle when this is called.
+ */
+static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *d;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ BUG_ON(!list_empty(&edmac->active));
+ BUG_ON(!list_empty(&edmac->queue));
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ edmac->edma->hw_shutdown(edmac);
+ edmac->runtime_addr = 0;
+ edmac->runtime_ctrl = 0;
+ edmac->buffer = 0;
+ list_splice_init(&edmac->free_list, &list);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ list_for_each_entry_safe(desc, d, &list, node)
+ kfree(desc);
+
+ clk_disable(edmac->clk);
+ free_irq(edmac->irq, edmac);
+}
+
+/**
+ * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
+ * @chan: channel
+ * @dest: destination bus address
+ * @src: source bus address
+ * @len: size of the transaction
+ * @flags: flags for the descriptor
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *first;
+ size_t bytes, offset;
+
+ first = NULL;
+ for (offset = 0; offset < len; offset += bytes) {
+ desc = ep93xx_dma_desc_get(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+ goto fail;
+ }
+
+ bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
+
+ desc->src_addr = src + offset;
+ desc->dst_addr = dest + offset;
+ desc->size = bytes;
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+ first->txd.flags = flags;
+
+ return &first->txd;
+fail:
+ ep93xx_dma_desc_put(edmac, first);
+ return NULL;
+}
+
+/**
+ * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
+ * @chan: channel
+ * @sgl: list of buffers to transfer
+ * @sg_len: number of entries in @sgl
+ * @dir: direction of tha DMA transfer
+ * @flags: flags for the descriptor
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction dir,
+ unsigned long flags)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *first;
+ struct scatterlist *sg;
+ int i;
+
+ if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
+ dev_warn(chan2dev(edmac),
+ "channel was configured with different direction\n");
+ return NULL;
+ }
+
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
+ dev_warn(chan2dev(edmac),
+ "channel is already used for cyclic transfers\n");
+ return NULL;
+ }
+
+ first = NULL;
+ for_each_sg(sgl, sg, sg_len, i) {
+ size_t sg_len = sg_dma_len(sg);
+
+ if (sg_len > DMA_MAX_CHAN_BYTES) {
+ dev_warn(chan2dev(edmac), "too big transfer size %d\n",
+ sg_len);
+ goto fail;
+ }
+
+ desc = ep93xx_dma_desc_get(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+ goto fail;
+ }
+
+ if (dir == DMA_TO_DEVICE) {
+ desc->src_addr = sg_dma_address(sg);
+ desc->dst_addr = edmac->runtime_addr;
+ } else {
+ desc->src_addr = edmac->runtime_addr;
+ desc->dst_addr = sg_dma_address(sg);
+ }
+ desc->size = sg_len;
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+ first->txd.flags = flags;
+
+ return &first->txd;
+
+fail:
+ ep93xx_dma_desc_put(edmac, first);
+ return NULL;
+}
+
+/**
+ * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
+ * @chan: channel
+ * @dma_addr: DMA mapped address of the buffer
+ * @buf_len: length of the buffer (in bytes)
+ * @period_len: lenght of a single period
+ * @dir: direction of the operation
+ *
+ * Prepares a descriptor for cyclic DMA operation. This means that once the
+ * descriptor is submitted, we will be submitting in a @period_len sized
+ * buffers and calling callback once the period has been elapsed. Transfer
+ * terminates only when client calls dmaengine_terminate_all() for this
+ * channel.
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_data_direction dir)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *first;
+ size_t offset = 0;
+
+ if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
+ dev_warn(chan2dev(edmac),
+ "channel was configured with different direction\n");
+ return NULL;
+ }
+
+ if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
+ dev_warn(chan2dev(edmac),
+ "channel is already used for cyclic transfers\n");
+ return NULL;
+ }
+
+ if (period_len > DMA_MAX_CHAN_BYTES) {
+ dev_warn(chan2dev(edmac), "too big period length %d\n",
+ period_len);
+ return NULL;
+ }
+
+ /* Split the buffer into period size chunks */
+ first = NULL;
+ for (offset = 0; offset < buf_len; offset += period_len) {
+ desc = ep93xx_dma_desc_get(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+ goto fail;
+ }
+
+ if (dir == DMA_TO_DEVICE) {
+ desc->src_addr = dma_addr + offset;
+ desc->dst_addr = edmac->runtime_addr;
+ } else {
+ desc->src_addr = edmac->runtime_addr;
+ desc->dst_addr = dma_addr + offset;
+ }
+
+ desc->size = period_len;
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+
+ return &first->txd;
+
+fail:
+ ep93xx_dma_desc_put(edmac, first);
+ return NULL;
+}
+
+/**
+ * ep93xx_dma_terminate_all - terminate all transactions
+ * @edmac: channel
+ *
+ * Stops all DMA transactions. All descriptors are put back to the
+ * @edmac->free_list and callbacks are _not_ called.
+ */
+static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc, *_d;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ /* First we disable and flush the DMA channel */
+ edmac->edma->hw_shutdown(edmac);
+ clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
+ list_splice_init(&edmac->active, &list);
+ list_splice_init(&edmac->queue, &list);
+ /*
+ * We then re-enable the channel. This way we can continue submitting
+ * the descriptors by just calling ->hw_submit() again.
+ */
+ edmac->edma->hw_setup(edmac);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ list_for_each_entry_safe(desc, _d, &list, node)
+ ep93xx_dma_desc_put(edmac, desc);
+
+ return 0;
+}
+
+static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
+ struct dma_slave_config *config)
+{
+ enum dma_slave_buswidth width;
+ unsigned long flags;
+ u32 addr, ctrl;
+
+ if (!edmac->edma->m2m)
+ return -EINVAL;
+
+ switch (config->direction) {
+ case DMA_FROM_DEVICE:
+ width = config->src_addr_width;
+ addr = config->src_addr;
+ break;
+
+ case DMA_TO_DEVICE:
+ width = config->dst_addr_width;
+ addr = config->dst_addr;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl = 0;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl = M2M_CONTROL_PW_16;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ ctrl = M2M_CONTROL_PW_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ edmac->runtime_addr = addr;
+ edmac->runtime_ctrl = ctrl;
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ return 0;
+}
+
+/**
+ * ep93xx_dma_control - manipulate all pending operations on a channel
+ * @chan: channel
+ * @cmd: control command to perform
+ * @arg: optional argument
+ *
+ * Controls the channel. Function returns %0 in case of success or negative
+ * error in case of failure.
+ */
+static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ return ep93xx_dma_terminate_all(edmac);
+
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ return ep93xx_dma_slave_config(edmac, config);
+
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+/**
+ * ep93xx_dma_tx_status - check if a transaction is completed
+ * @chan: channel
+ * @cookie: transaction specific cookie
+ * @state: state of the transaction is stored here if given
+ *
+ * This function can be used to query state of a given transaction.
+ */
+static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ dma_cookie_t last_used, last_completed;
+ enum dma_status ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ last_used = chan->cookie;
+ last_completed = edmac->last_completed;
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ ret = dma_async_is_complete(cookie, last_completed, last_used);
+ dma_set_tx_state(state, last_completed, last_used, 0);
+
+ return ret;
+}
+
+/**
+ * ep93xx_dma_issue_pending - push pending transactions to the hardware
+ * @chan: channel
+ *
+ * When this function is called, all pending transactions are pushed to the
+ * hardware and executed.
+ */
+static void ep93xx_dma_issue_pending(struct dma_chan *chan)
+{
+ ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
+}
+
+static int __init ep93xx_dma_probe(struct platform_device *pdev)
+{
+ struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct ep93xx_dma_engine *edma;
+ struct dma_device *dma_dev;
+ size_t edma_size;
+ int ret, i;
+
+ edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
+ edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
+ if (!edma)
+ return -ENOMEM;
+
+ dma_dev = &edma->dma_dev;
+ edma->m2m = platform_get_device_id(pdev)->driver_data;
+ edma->num_channels = pdata->num_channels;
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+ for (i = 0; i < pdata->num_channels; i++) {
+ const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
+ struct ep93xx_dma_chan *edmac = &edma->channels[i];
+
+ edmac->chan.device = dma_dev;
+ edmac->regs = cdata->base;
+ edmac->irq = cdata->irq;
+ edmac->edma = edma;
+
+ edmac->clk = clk_get(NULL, cdata->name);
+ if (IS_ERR(edmac->clk)) {
+ dev_warn(&pdev->dev, "failed to get clock for %s\n",
+ cdata->name);
+ continue;
+ }
+
+ spin_lock_init(&edmac->lock);
+ INIT_LIST_HEAD(&edmac->active);
+ INIT_LIST_HEAD(&edmac->queue);
+ INIT_LIST_HEAD(&edmac->free_list);
+ tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
+ (unsigned long)edmac);
+
+ list_add_tail(&edmac->chan.device_node,
+ &dma_dev->channels);
+ }
+
+ dma_cap_zero(dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+
+ dma_dev->dev = &pdev->dev;
+ dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
+ dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
+ dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
+ dma_dev->device_control = ep93xx_dma_control;
+ dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
+ dma_dev->device_tx_status = ep93xx_dma_tx_status;
+
+ dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
+
+ if (edma->m2m) {
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
+
+ edma->hw_setup = m2m_hw_setup;
+ edma->hw_shutdown = m2m_hw_shutdown;
+ edma->hw_submit = m2m_hw_submit;
+ edma->hw_interrupt = m2m_hw_interrupt;
+ } else {
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+ edma->hw_setup = m2p_hw_setup;
+ edma->hw_shutdown = m2p_hw_shutdown;
+ edma->hw_submit = m2p_hw_submit;
+ edma->hw_interrupt = m2p_hw_interrupt;
+ }
+
+ ret = dma_async_device_register(dma_dev);
+ if (unlikely(ret)) {
+ for (i = 0; i < edma->num_channels; i++) {
+ struct ep93xx_dma_chan *edmac = &edma->channels[i];
+ if (!IS_ERR_OR_NULL(edmac->clk))
+ clk_put(edmac->clk);
+ }
+ kfree(edma);
+ } else {
+ dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
+ edma->m2m ? "M" : "P");
+ }
+
+ return ret;
+}
+
+static struct platform_device_id ep93xx_dma_driver_ids[] = {
+ { "ep93xx-dma-m2p", 0 },
+ { "ep93xx-dma-m2m", 1 },
+ { },
+};
+
+static struct platform_driver ep93xx_dma_driver = {
+ .driver = {
+ .name = "ep93xx-dma",
+ },
+ .id_table = ep93xx_dma_driver_ids,
+};
+
+static int __init ep93xx_dma_module_init(void)
+{
+ return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
+}
+subsys_initcall(ep93xx_dma_module_init);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
+MODULE_DESCRIPTION("EP93xx DMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index e18eaabe92b..d99f71c356b 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -135,7 +135,8 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (ret)
return ret;
- imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level);
+ imx_dma_config_burstlen(imxdmac->imxdma_channel,
+ imxdmac->watermark_level * imxdmac->word_size);
return 0;
default:
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b6d1455fa93..7bd7e98548c 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -32,6 +32,8 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <asm/irq.h>
#include <mach/sdma.h>
@@ -65,8 +67,8 @@
#define SDMA_ONCE_RTB 0x060
#define SDMA_XTRIG_CONF1 0x070
#define SDMA_XTRIG_CONF2 0x074
-#define SDMA_CHNENBL0_V2 0x200
-#define SDMA_CHNENBL0_V1 0x080
+#define SDMA_CHNENBL0_IMX35 0x200
+#define SDMA_CHNENBL0_IMX31 0x080
#define SDMA_CHNPRI_0 0x100
/*
@@ -299,13 +301,18 @@ struct sdma_firmware_header {
u32 ram_code_size;
};
+enum sdma_devtype {
+ IMX31_SDMA, /* runs on i.mx31 */
+ IMX35_SDMA, /* runs on i.mx35 and later */
+};
+
struct sdma_engine {
struct device *dev;
struct device_dma_parameters dma_parms;
struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control;
void __iomem *regs;
- unsigned int version;
+ enum sdma_devtype devtype;
unsigned int num_events;
struct sdma_context_data *context;
dma_addr_t context_phys;
@@ -314,6 +321,26 @@ struct sdma_engine {
struct sdma_script_start_addrs *script_addrs;
};
+static struct platform_device_id sdma_devtypes[] = {
+ {
+ .name = "imx31-sdma",
+ .driver_data = IMX31_SDMA,
+ }, {
+ .name = "imx35-sdma",
+ .driver_data = IMX35_SDMA,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, sdma_devtypes);
+
+static const struct of_device_id sdma_dt_ids[] = {
+ { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
+ { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sdma_dt_ids);
+
#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */
#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */
@@ -321,8 +348,8 @@ struct sdma_engine {
static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
{
- u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1);
-
+ u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
+ SDMA_CHNENBL0_IMX35);
return chnenbl0 + event * 4;
}
@@ -1105,25 +1132,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
}
static int __init sdma_get_firmware(struct sdma_engine *sdma,
- const char *cpu_name, int to_version)
+ const char *fw_name)
{
const struct firmware *fw;
- char *fwname;
const struct sdma_firmware_header *header;
int ret;
const struct sdma_script_start_addrs *addr;
unsigned short *ram_code;
- fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin", cpu_name, to_version);
- if (!fwname)
- return -ENOMEM;
-
- ret = request_firmware(&fw, fwname, sdma->dev);
- if (ret) {
- kfree(fwname);
+ ret = request_firmware(&fw, fw_name, sdma->dev);
+ if (ret)
return ret;
- }
- kfree(fwname);
if (fw->size < sizeof(*header))
goto err_firmware;
@@ -1162,15 +1181,16 @@ static int __init sdma_init(struct sdma_engine *sdma)
int i, ret;
dma_addr_t ccb_phys;
- switch (sdma->version) {
- case 1:
+ switch (sdma->devtype) {
+ case IMX31_SDMA:
sdma->num_events = 32;
break;
- case 2:
+ case IMX35_SDMA:
sdma->num_events = 48;
break;
default:
- dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version);
+ dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
+ sdma->devtype);
return -ENODEV;
}
@@ -1239,6 +1259,10 @@ err_dma_alloc:
static int __init sdma_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(sdma_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ const char *fw_name;
int ret;
int irq;
struct resource *iores;
@@ -1254,7 +1278,7 @@ static int __init sdma_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!iores || irq < 0 || !pdata) {
+ if (!iores || irq < 0) {
ret = -EINVAL;
goto err_irq;
}
@@ -1281,10 +1305,14 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_request_irq;
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
- if (!sdma->script_addrs)
+ if (!sdma->script_addrs) {
+ ret = -ENOMEM;
goto err_alloc;
+ }
- sdma->version = pdata->sdma_version;
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ sdma->devtype = pdev->id_entry->driver_data;
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
@@ -1314,10 +1342,30 @@ static int __init sdma_probe(struct platform_device *pdev)
if (ret)
goto err_init;
- if (pdata->script_addrs)
+ if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);
- sdma_get_firmware(sdma, pdata->cpu_name, pdata->to_version);
+ if (pdata) {
+ sdma_get_firmware(sdma, pdata->fw_name);
+ } else {
+ /*
+ * Because that device tree does not encode ROM script address,
+ * the RAM script in firmware is mandatory for device tree
+ * probe, otherwise it fails.
+ */
+ ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+ &fw_name);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get firmware name\n");
+ goto err_init;
+ }
+
+ ret = sdma_get_firmware(sdma, fw_name);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get firmware\n");
+ goto err_init;
+ }
+ }
sdma->dma_device.dev = &pdev->dev;
@@ -1365,7 +1413,9 @@ static int __exit sdma_remove(struct platform_device *pdev)
static struct platform_driver sdma_driver = {
.driver = {
.name = "imx-sdma",
+ .of_match_table = sdma_dt_ids,
},
+ .id_table = sdma_devtypes,
.remove = __exit_p(sdma_remove),
};
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index f653517ef74..8a3fdd87db9 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1351,7 +1351,6 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
return -EAGAIN;
}
device->state = SUSPENDED;
- pci_set_drvdata(pci, device);
pci_save_state(pci);
pci_disable_device(pci);
pci_set_power_state(pci, PCI_D3hot);
@@ -1380,7 +1379,6 @@ int dma_resume(struct pci_dev *pci)
}
device->state = RUNNING;
iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
- pci_set_drvdata(pci, device);
return 0;
}
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index d845dc4b710..f519c93a61e 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -73,10 +73,10 @@
/* provide a lookup table for setting the source address in the base or
* extended descriptor of an xor or pq descriptor
*/
-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 xor_idx_to_desc = 0xe0;
+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
+static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
{
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index fab37d1cf48..5e3a40f7994 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -72,6 +72,17 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
+
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index c1a125e7d1d..6815905a772 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -1705,16 +1706,14 @@ static int __init ipu_probe(struct platform_device *pdev)
ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
/* Remap IPU common registers */
- ipu_data.reg_ipu = ioremap(mem_ipu->start,
- mem_ipu->end - mem_ipu->start + 1);
+ ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
if (!ipu_data.reg_ipu) {
ret = -ENOMEM;
goto err_ioremap_ipu;
}
/* Remap Image Converter and Image DMA Controller registers */
- ipu_data.reg_ic = ioremap(mem_ic->start,
- mem_ic->end - mem_ic->start + 1);
+ ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
if (!ipu_data.reg_ic) {
ret = -ENOMEM;
goto err_ioremap_ic;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 954e334e01b..9a353c2216d 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1305,7 +1305,7 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
return -ENODEV;
msp->xor_base = devm_ioremap(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!msp->xor_base)
return -EBUSY;
@@ -1314,7 +1314,7 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
return -ENODEV;
msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!msp->xor_high_base)
return -EBUSY;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 88aad4f5400..be641cbd36f 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -327,10 +327,12 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
memset(mxs_chan->ccw, 0, PAGE_SIZE);
- ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
- 0, "mxs-dma", mxs_dma);
- if (ret)
- goto err_irq;
+ if (mxs_chan->chan_irq != NO_IRQ) {
+ ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
+ 0, "mxs-dma", mxs_dma);
+ if (ret)
+ goto err_irq;
+ }
ret = clk_enable(mxs_dma->clk);
if (ret)
@@ -535,6 +537,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
mxs_dma_disable_chan(mxs_chan);
+ mxs_dma_reset_chan(mxs_chan);
break;
case DMA_PAUSE:
mxs_dma_pause_chan(mxs_chan);
@@ -707,6 +710,8 @@ static struct platform_device_id mxs_dma_type[] = {
}, {
.name = "mxs-dma-apbx",
.driver_data = MXS_DMA_APBX,
+ }, {
+ /* end of list */
}
};
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index ff5b38f9d45..1ac8d4b580b 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -45,7 +45,8 @@
#define DMA_STATUS_MASK_BITS 0x3
#define DMA_STATUS_SHIFT_BITS 16
#define DMA_STATUS_IRQ(x) (0x1 << (x))
-#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8))
+#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
+#define DMA_STATUS2_ERR(x) (0x1 << (x))
#define DMA_DESC_WIDTH_SHIFT_BITS 12
#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
@@ -61,6 +62,9 @@
#define MAX_CHAN_NR 8
+#define DMA_MASK_CTL0_MODE 0x33333333
+#define DMA_MASK_CTL2_MODE 0x00003333
+
static unsigned int init_nr_desc_per_channel = 64;
module_param(init_nr_desc_per_channel, uint, 0644);
MODULE_PARM_DESC(init_nr_desc_per_channel,
@@ -133,6 +137,7 @@ struct pch_dma {
#define PCH_DMA_CTL3 0x0C
#define PCH_DMA_STS0 0x10
#define PCH_DMA_STS1 0x14
+#define PCH_DMA_STS2 0x18
#define dma_readl(pd, name) \
readl((pd)->membase + PCH_DMA_##name)
@@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable)
{
struct pch_dma *pd = to_pd(chan->device);
u32 val;
+ int pos;
+
+ if (chan->chan_id < 8)
+ pos = chan->chan_id;
+ else
+ pos = chan->chan_id + 8;
val = dma_readl(pd, CTL2);
if (enable)
- val |= 0x1 << chan->chan_id;
+ val |= 0x1 << pos;
else
- val &= ~(0x1 << chan->chan_id);
+ val &= ~(0x1 << pos);
dma_writel(pd, CTL2, val);
@@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan)
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
struct pch_dma *pd = to_pd(chan->device);
u32 val;
+ u32 mask_mode;
+ u32 mask_ctl;
if (chan->chan_id < 8) {
val = dma_readl(pd, CTL0);
+ mask_mode = DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id);
+ mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ val &= mask_mode;
if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS);
@@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan)
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS));
+ val |= mask_ctl;
dma_writel(pd, CTL0, val);
} else {
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
val = dma_readl(pd, CTL3);
+ mask_mode = DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch);
+ mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch));
+ val &= mask_mode;
if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS);
else
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS));
-
+ val |= mask_ctl;
dma_writel(pd, CTL3, val);
}
@@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
{
struct pch_dma *pd = to_pd(chan->device);
u32 val;
+ u32 mask_ctl;
+ u32 mask_dir;
if (chan->chan_id < 8) {
+ mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
+ DMA_CTL0_DIR_SHIFT_BITS);
val = dma_readl(pd, CTL0);
-
- val &= ~(DMA_CTL0_MODE_MASK_BITS <<
- (DMA_CTL0_BITS_PER_CH * chan->chan_id));
+ val &= mask_dir;
val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
-
+ val |= mask_ctl;
dma_writel(pd, CTL0, val);
} else {
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
-
+ mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
+ (DMA_CTL0_BITS_PER_CH * ch));
+ mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
+ DMA_CTL0_DIR_SHIFT_BITS);
val = dma_readl(pd, CTL3);
-
- val &= ~(DMA_CTL0_MODE_MASK_BITS <<
- (DMA_CTL0_BITS_PER_CH * ch));
+ val &= mask_dir;
val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
-
+ val |= mask_ctl;
dma_writel(pd, CTL3, val);
-
}
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
chan->chan_id, val);
}
-static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
+static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
{
struct pch_dma *pd = to_pd(pd_chan->chan.device);
u32 val;
@@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
}
+static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
+{
+ struct pch_dma *pd = to_pd(pd_chan->chan.device);
+ u32 val;
+
+ val = dma_readl(pd, STS2);
+ return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
+ DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
+}
+
static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
{
- if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE)
+ u32 sts;
+
+ if (pd_chan->chan.chan_id < 8)
+ sts = pdc_get_status0(pd_chan);
+ else
+ sts = pdc_get_status2(pd_chan);
+
+
+ if (sts == DMA_STATUS_IDLE)
return true;
else
return false;
@@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list);
}
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
list_splice(&tmp_list, &pd_chan->free_list);
pd_chan->descs_allocated = i;
pd_chan->completed_cookie = chan->cookie = 1;
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
pdc_enable_irq(chan, 1);
@@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan)
BUG_ON(!list_empty(&pd_chan->active_list));
BUG_ON(!list_empty(&pd_chan->queue));
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
list_splice_init(&pd_chan->free_list, &tmp_list);
pd_chan->descs_allocated = 0;
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
pci_pool_free(pd->pool, desc, desc->txd.phys);
@@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
dma_cookie_t last_completed;
int ret;
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
last_completed = pd_chan->completed_cookie;
last_used = chan->cookie;
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
ret = dma_async_is_complete(cookie, last_completed, last_used);
@@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
- spin_lock_bh(&pd_chan->lock);
+ spin_lock_irq(&pd_chan->lock);
pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
@@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
list_for_each_entry_safe(desc, _d, &list, desc_node)
pdc_chain_complete(pd_chan, desc);
- spin_unlock_bh(&pd_chan->lock);
+ spin_unlock_irq(&pd_chan->lock);
return 0;
}
@@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid)
struct pch_dma *pd = (struct pch_dma *)devid;
struct pch_dma_chan *pd_chan;
u32 sts0;
+ u32 sts2;
int i;
- int ret = IRQ_NONE;
+ int ret0 = IRQ_NONE;
+ int ret2 = IRQ_NONE;
sts0 = dma_readl(pd, STS0);
+ sts2 = dma_readl(pd, STS2);
dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
for (i = 0; i < pd->dma.chancnt; i++) {
pd_chan = &pd->channels[i];
- if (sts0 & DMA_STATUS_IRQ(i)) {
- if (sts0 & DMA_STATUS_ERR(i))
- set_bit(0, &pd_chan->err_status);
+ if (i < 8) {
+ if (sts0 & DMA_STATUS_IRQ(i)) {
+ if (sts0 & DMA_STATUS0_ERR(i))
+ set_bit(0, &pd_chan->err_status);
- tasklet_schedule(&pd_chan->tasklet);
- ret = IRQ_HANDLED;
- }
+ tasklet_schedule(&pd_chan->tasklet);
+ ret0 = IRQ_HANDLED;
+ }
+ } else {
+ if (sts2 & DMA_STATUS_IRQ(i - 8)) {
+ if (sts2 & DMA_STATUS2_ERR(i))
+ set_bit(0, &pd_chan->err_status);
+ tasklet_schedule(&pd_chan->tasklet);
+ ret2 = IRQ_HANDLED;
+ }
+ }
}
/* clear interrupt bits in status register */
- dma_writel(pd, STS0, sts0);
+ if (ret0)
+ dma_writel(pd, STS0, sts0);
+ if (ret2)
+ dma_writel(pd, STS2, sts2);
- return ret;
+ return ret0 | ret2;
}
#ifdef CONFIG_PM
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 6abe1ec1f2c..00eee59e8b3 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -82,7 +82,7 @@ struct dma_pl330_dmac {
spinlock_t pool_lock;
/* Peripheral channels connected to this DMAC */
- struct dma_pl330_chan peripherals[0]; /* keep at end */
+ struct dma_pl330_chan *peripherals; /* keep at end */
};
struct dma_pl330_desc {
@@ -451,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
- desc->req.rqtype = peri->rqtype;
- desc->req.peri = peri->peri_id;
+ if (peri) {
+ desc->req.rqtype = peri->rqtype;
+ desc->req.peri = peri->peri_id;
+ } else {
+ desc->req.rqtype = MEMTOMEM;
+ desc->req.peri = 0;
+ }
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
@@ -529,10 +534,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
struct pl330_info *pi;
int burst;
- if (unlikely(!pch || !len || !peri))
+ if (unlikely(!pch || !len))
return NULL;
- if (peri->rqtype != MEMTOMEM)
+ if (peri && peri->rqtype != MEMTOMEM)
return NULL;
pi = &pch->dmac->pif;
@@ -577,7 +582,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
int i, burst_size;
dma_addr_t addr;
- if (unlikely(!pch || !sgl || !sg_len))
+ if (unlikely(!pch || !sgl || !sg_len || !peri))
return NULL;
/* Make sure the direction is consistent */
@@ -666,17 +671,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
struct dma_device *pd;
struct resource *res;
int i, ret, irq;
+ int num_chan;
pdat = adev->dev.platform_data;
- if (!pdat || !pdat->nr_valid_peri) {
- dev_err(&adev->dev, "platform data missing\n");
- return -ENODEV;
- }
-
/* Allocate a new DMAC and its Channels */
- pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
- + sizeof(*pdmac), GFP_KERNEL);
+ pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
dev_err(&adev->dev, "unable to allocate mem\n");
return -ENOMEM;
@@ -685,7 +685,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi = &pdmac->pif;
pi->dev = &adev->dev;
pi->pl330_data = NULL;
- pi->mcbufsz = pdat->mcbuf_sz;
+ pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
request_mem_region(res->start, resource_size(res), "dma-pl330");
@@ -717,27 +717,35 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
- for (i = 0; i < pdat->nr_valid_peri; i++) {
- struct dma_pl330_peri *peri = &pdat->peri[i];
- pch = &pdmac->peripherals[i];
+ num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
+ pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
- switch (peri->rqtype) {
- case MEMTOMEM:
+ for (i = 0; i < num_chan; i++) {
+ pch = &pdmac->peripherals[i];
+ if (pdat) {
+ struct dma_pl330_peri *peri = &pdat->peri[i];
+
+ switch (peri->rqtype) {
+ case MEMTOMEM:
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ break;
+ case MEMTODEV:
+ case DEVTOMEM:
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ break;
+ default:
+ dev_err(&adev->dev, "DEVTODEV Not Supported\n");
+ continue;
+ }
+ pch->chan.private = peri;
+ } else {
dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- break;
- case MEMTODEV:
- case DEVTOMEM:
- dma_cap_set(DMA_SLAVE, pd->cap_mask);
- break;
- default:
- dev_err(&adev->dev, "DEVTODEV Not Supported\n");
- continue;
+ pch->chan.private = NULL;
}
INIT_LIST_HEAD(&pch->work_list);
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
- pch->chan.private = peri;
pch->chan.device = pd;
pch->chan.chan_id = i;
pch->dmac = pdmac;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 02833004420..7f49235d14b 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -70,12 +70,36 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
static u16 dmaor_read(struct sh_dmae_device *shdev)
{
- return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ return __raw_readl(addr);
+ else
+ return __raw_readw(addr);
}
static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
{
- __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ __raw_writel(data, addr);
+ else
+ __raw_writew(data, addr);
+}
+
+static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
+}
+
+static u32 chcr_read(struct sh_dmae_chan *sh_dc)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
}
/*
@@ -120,7 +144,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ u32 chcr = chcr_read(sh_chan);
if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
return true; /* working */
@@ -130,8 +154,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
@@ -144,8 +167,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
@@ -169,18 +191,23 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
static void dmae_start(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
+
+ if (shdev->pdata->needs_tend_set)
+ sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
- chcr |= CHCR_DE | CHCR_IE;
- sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
+ chcr |= CHCR_DE | shdev->chcr_ie_bit;
+ chcr_write(sh_chan, chcr & ~CHCR_TE);
}
static void dmae_halt(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
- chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
- sh_dmae_writel(sh_chan, chcr, CHCR);
+ chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
+ chcr_write(sh_chan, chcr);
}
static void dmae_init(struct sh_dmae_chan *sh_chan)
@@ -192,7 +219,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan)
u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
LOG2_DEFAULT_XFER_SIZE);
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
- sh_dmae_writel(sh_chan, chcr, CHCR);
+ chcr_write(sh_chan, chcr);
}
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
@@ -202,23 +229,25 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
return -EBUSY;
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
- sh_dmae_writel(sh_chan, val, CHCR);
+ chcr_write(sh_chan, val);
return 0;
}
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
u16 __iomem *addr = shdev->dmars;
- int shift = chan_pdata->dmars_bit;
+ unsigned int shift = chan_pdata->dmars_bit;
if (dmae_is_busy(sh_chan))
return -EBUSY;
+ if (pdata->no_dmars)
+ return 0;
+
/* in the case of a missing DMARS resource use first memory window */
if (!addr)
addr = (u16 __iomem *)shdev->chan_reg;
@@ -296,9 +325,7 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
static const struct sh_dmae_slave_config *sh_dmae_find_slave(
struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
{
- struct dma_device *dma_dev = sh_chan->common.device;
- struct sh_dmae_device *shdev = container_of(dma_dev,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
@@ -771,10 +798,8 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
spin_lock_bh(&sh_chan->desc_lock);
/* DMA work check */
- if (dmae_is_busy(sh_chan)) {
- spin_unlock_bh(&sh_chan->desc_lock);
- return;
- }
+ if (dmae_is_busy(sh_chan))
+ goto sh_chan_xfer_ld_queue_end;
/* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -788,6 +813,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
break;
}
+sh_chan_xfer_ld_queue_end:
spin_unlock_bh(&sh_chan->desc_lock);
}
@@ -846,7 +872,7 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
spin_lock(&sh_chan->desc_lock);
- chcr = sh_dmae_readl(sh_chan, CHCR);
+ chcr = chcr_read(sh_chan);
if (chcr & CHCR_TE) {
/* DMA stop */
@@ -1144,6 +1170,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
/* platform data */
shdev->pdata = pdata;
+ if (pdata->chcr_offset)
+ shdev->chcr_offset = pdata->chcr_offset;
+ else
+ shdev->chcr_offset = CHCR;
+
+ if (pdata->chcr_ie_bit)
+ shdev->chcr_ie_bit = pdata->chcr_ie_bit;
+ else
+ shdev->chcr_ie_bit = CHCR_IE;
+
platform_set_drvdata(pdev, shdev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 5ae9fc51218..dc56576f9fd 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -47,10 +47,14 @@ struct sh_dmae_device {
struct list_head node;
u32 __iomem *chan_reg;
u16 __iomem *dmars;
+ unsigned int chcr_offset;
+ u32 chcr_ie_bit;
};
#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
+#define to_sh_dev(chan) container_of(chan->common.device,\
+ struct sh_dmae_device, common)
#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 8f222d4db7d..cd3a7c726bf 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -6,6 +6,7 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
@@ -13,6 +14,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/amba/bus.h>
#include <plat/ste_dma40.h>
@@ -44,9 +46,6 @@
#define D40_ALLOC_PHY (1 << 30)
#define D40_ALLOC_LOG_FREE 0
-/* Hardware designer of the block */
-#define D40_HW_DESIGNER 0x8
-
/**
* enum 40_command - The different commands and/or statuses.
*
@@ -185,6 +184,8 @@ struct d40_base;
* @log_def: Default logical channel settings.
* @lcla: Space for one dst src pair for logical channel transfers.
* @lcpa: Pointer to dst and src lcpa settings.
+ * @runtime_addr: runtime configured address.
+ * @runtime_direction: runtime configured direction.
*
* This struct can either "be" a logical or a physical channel.
*/
@@ -199,6 +200,7 @@ struct d40_chan {
struct dma_chan chan;
struct tasklet_struct tasklet;
struct list_head client;
+ struct list_head pending_queue;
struct list_head active;
struct list_head queue;
struct stedma40_chan_cfg dma_cfg;
@@ -644,7 +646,20 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
{
- list_add_tail(&desc->node, &d40c->queue);
+ list_add_tail(&desc->node, &d40c->pending_queue);
+}
+
+static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->pending_queue))
+ return NULL;
+
+ d = list_first_entry(&d40c->pending_queue,
+ struct d40_desc,
+ node);
+ return d;
}
static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
@@ -801,6 +816,11 @@ static void d40_term_all(struct d40_chan *d40c)
d40_desc_free(d40c, d40d);
}
+ /* Release pending descriptors */
+ while ((d40d = d40_first_pending(d40c))) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
d40c->pending_tx = 0;
d40c->busy = false;
@@ -2091,7 +2111,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
struct scatterlist *sg;
int i;
- sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL);
+ sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
for (i = 0; i < periods; i++) {
sg_dma_address(&sg[i]) = dma_addr;
sg_dma_len(&sg[i]) = period_len;
@@ -2151,24 +2171,87 @@ static void d40_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&d40c->lock, flags);
- /* Busy means that pending jobs are already being processed */
+ list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
+
+ /* Busy means that queued jobs are already being processed */
if (!d40c->busy)
(void) d40_queue_start(d40c);
spin_unlock_irqrestore(&d40c->lock, flags);
}
+static int
+dma40_config_to_halfchannel(struct d40_chan *d40c,
+ struct stedma40_half_channel_info *info,
+ enum dma_slave_buswidth width,
+ u32 maxburst)
+{
+ enum stedma40_periph_data_width addr_width;
+ int psize;
+
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ addr_width = STEDMA40_BYTE_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ addr_width = STEDMA40_HALFWORD_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ addr_width = STEDMA40_WORD_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ addr_width = STEDMA40_DOUBLEWORD_WIDTH;
+ break;
+ default:
+ dev_err(d40c->base->dev,
+ "illegal peripheral address width "
+ "requested (%d)\n",
+ width);
+ return -EINVAL;
+ }
+
+ if (chan_is_logical(d40c)) {
+ if (maxburst >= 16)
+ psize = STEDMA40_PSIZE_LOG_16;
+ else if (maxburst >= 8)
+ psize = STEDMA40_PSIZE_LOG_8;
+ else if (maxburst >= 4)
+ psize = STEDMA40_PSIZE_LOG_4;
+ else
+ psize = STEDMA40_PSIZE_LOG_1;
+ } else {
+ if (maxburst >= 16)
+ psize = STEDMA40_PSIZE_PHY_16;
+ else if (maxburst >= 8)
+ psize = STEDMA40_PSIZE_PHY_8;
+ else if (maxburst >= 4)
+ psize = STEDMA40_PSIZE_PHY_4;
+ else
+ psize = STEDMA40_PSIZE_PHY_1;
+ }
+
+ info->data_width = addr_width;
+ info->psize = psize;
+ info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+
+ return 0;
+}
+
/* Runtime reconfiguration extension */
-static void d40_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
+static int d40_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
{
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
- enum dma_slave_buswidth config_addr_width;
+ enum dma_slave_buswidth src_addr_width, dst_addr_width;
dma_addr_t config_addr;
- u32 config_maxburst;
- enum stedma40_periph_data_width addr_width;
- int psize;
+ u32 src_maxburst, dst_maxburst;
+ int ret;
+
+ src_addr_width = config->src_addr_width;
+ src_maxburst = config->src_maxburst;
+ dst_addr_width = config->dst_addr_width;
+ dst_maxburst = config->dst_maxburst;
if (config->direction == DMA_FROM_DEVICE) {
dma_addr_t dev_addr_rx =
@@ -2187,8 +2270,11 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg->dir);
cfg->dir = STEDMA40_PERIPH_TO_MEM;
- config_addr_width = config->src_addr_width;
- config_maxburst = config->src_maxburst;
+ /* Configure the memory side */
+ if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ dst_addr_width = src_addr_width;
+ if (dst_maxburst == 0)
+ dst_maxburst = src_maxburst;
} else if (config->direction == DMA_TO_DEVICE) {
dma_addr_t dev_addr_tx =
@@ -2207,68 +2293,39 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg->dir);
cfg->dir = STEDMA40_MEM_TO_PERIPH;
- config_addr_width = config->dst_addr_width;
- config_maxburst = config->dst_maxburst;
-
+ /* Configure the memory side */
+ if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ src_addr_width = dst_addr_width;
+ if (src_maxburst == 0)
+ src_maxburst = dst_maxburst;
} else {
dev_err(d40c->base->dev,
"unrecognized channel direction %d\n",
config->direction);
- return;
+ return -EINVAL;
}
- switch (config_addr_width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- addr_width = STEDMA40_BYTE_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- addr_width = STEDMA40_HALFWORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- addr_width = STEDMA40_WORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_8_BYTES:
- addr_width = STEDMA40_DOUBLEWORD_WIDTH;
- break;
- default:
+ if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
dev_err(d40c->base->dev,
- "illegal peripheral address width "
- "requested (%d)\n",
- config->src_addr_width);
- return;
+ "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
+ src_maxburst,
+ src_addr_width,
+ dst_maxburst,
+ dst_addr_width);
+ return -EINVAL;
}
- if (chan_is_logical(d40c)) {
- if (config_maxburst >= 16)
- psize = STEDMA40_PSIZE_LOG_16;
- else if (config_maxburst >= 8)
- psize = STEDMA40_PSIZE_LOG_8;
- else if (config_maxburst >= 4)
- psize = STEDMA40_PSIZE_LOG_4;
- else
- psize = STEDMA40_PSIZE_LOG_1;
- } else {
- if (config_maxburst >= 16)
- psize = STEDMA40_PSIZE_PHY_16;
- else if (config_maxburst >= 8)
- psize = STEDMA40_PSIZE_PHY_8;
- else if (config_maxburst >= 4)
- psize = STEDMA40_PSIZE_PHY_4;
- else if (config_maxburst >= 2)
- psize = STEDMA40_PSIZE_PHY_2;
- else
- psize = STEDMA40_PSIZE_PHY_1;
- }
+ ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
+ src_addr_width,
+ src_maxburst);
+ if (ret)
+ return ret;
- /* Set up all the endpoint configs */
- cfg->src_info.data_width = addr_width;
- cfg->src_info.psize = psize;
- cfg->src_info.big_endian = false;
- cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
- cfg->dst_info.data_width = addr_width;
- cfg->dst_info.psize = psize;
- cfg->dst_info.big_endian = false;
- cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+ ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
+ dst_addr_width,
+ dst_maxburst);
+ if (ret)
+ return ret;
/* Fill in register values */
if (chan_is_logical(d40c))
@@ -2281,12 +2338,14 @@ static void d40_set_runtime_config(struct dma_chan *chan,
d40c->runtime_addr = config_addr;
d40c->runtime_direction = config->direction;
dev_dbg(d40c->base->dev,
- "configured channel %s for %s, data width %d, "
- "maxburst %d bytes, LE, no flow control\n",
+ "configured channel %s for %s, data width %d/%d, "
+ "maxburst %d/%d elements, LE, no flow control\n",
dma_chan_name(chan),
(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
- config_addr_width,
- config_maxburst);
+ src_addr_width, dst_addr_width,
+ src_maxburst, dst_maxburst);
+
+ return 0;
}
static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -2307,9 +2366,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
case DMA_RESUME:
return d40_resume(d40c);
case DMA_SLAVE_CONFIG:
- d40_set_runtime_config(chan,
+ return d40_set_runtime_config(chan,
(struct dma_slave_config *) arg);
- return 0;
default:
break;
}
@@ -2340,6 +2398,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
INIT_LIST_HEAD(&d40c->active);
INIT_LIST_HEAD(&d40c->queue);
+ INIT_LIST_HEAD(&d40c->pending_queue);
INIT_LIST_HEAD(&d40c->client);
tasklet_init(&d40c->tasklet, dma_tasklet,
@@ -2501,25 +2560,6 @@ static int __init d40_phy_res_init(struct d40_base *base)
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
- static const struct d40_reg_val dma_id_regs[] = {
- /* Peripheral Id */
- { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
- { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
- /*
- * D40_DREG_PERIPHID2 Depends on HW revision:
- * DB8500ed has 0x0008,
- * ? has 0x0018,
- * DB8500v1 has 0x0028
- * DB8500v2 has 0x0038
- */
- { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
-
- /* PCell Id */
- { .reg = D40_DREG_CELLID0, .val = 0x000d},
- { .reg = D40_DREG_CELLID1, .val = 0x00f0},
- { .reg = D40_DREG_CELLID2, .val = 0x0005},
- { .reg = D40_DREG_CELLID3, .val = 0x00b1}
- };
struct stedma40_platform_data *plat_data;
struct clk *clk = NULL;
void __iomem *virtbase = NULL;
@@ -2528,8 +2568,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
int num_log_chans = 0;
int num_phy_chans;
int i;
- u32 val;
- u32 rev;
+ u32 pid;
+ u32 cid;
+ u8 rev;
clk = clk_get(&pdev->dev, NULL);
@@ -2553,32 +2594,32 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if (!virtbase)
goto failure;
- /* HW version check */
- for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
- if (dma_id_regs[i].val !=
- readl(virtbase + dma_id_regs[i].reg)) {
- d40_err(&pdev->dev,
- "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
- dma_id_regs[i].val,
- dma_id_regs[i].reg,
- readl(virtbase + dma_id_regs[i].reg));
- goto failure;
- }
- }
+ /* This is just a regular AMBA PrimeCell ID actually */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
+ & 255) << (i * 8);
+ for (cid = 0, i = 0; i < 4; i++)
+ cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
+ & 255) << (i * 8);
- /* Get silicon revision and designer */
- val = readl(virtbase + D40_DREG_PERIPHID2);
-
- if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
- D40_HW_DESIGNER) {
+ if (cid != AMBA_CID) {
+ d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
+ goto failure;
+ }
+ if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
- val & D40_DREG_PERIPHID2_DESIGNER_MASK,
- D40_HW_DESIGNER);
+ AMBA_MANF_BITS(pid),
+ AMBA_VENDOR_ST);
goto failure;
}
-
- rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
- D40_DREG_PERIPHID2_REV_POS;
+ /*
+ * HW revision:
+ * DB8500ed has revision 0
+ * ? has revision 1
+ * DB8500v1 has revision 2
+ * DB8500v2 has revision 3
+ */
+ rev = AMBA_REV_BITS(pid);
/* The number of physical channels on this HW */
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 195ee65ee7f..b44c455158d 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -184,9 +184,6 @@
#define D40_DREG_PERIPHID0 0xFE0
#define D40_DREG_PERIPHID1 0xFE4
#define D40_DREG_PERIPHID2 0xFE8
-#define D40_DREG_PERIPHID2_REV_POS 4
-#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
-#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
#define D40_DREG_PERIPHID3 0xFEC
#define D40_DREG_CELLID0 0xFF0
#define D40_DREG_CELLID1 0xFF4
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index db1df59ae2b..9a6a274e692 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -140,7 +140,7 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
if (of_node_to_nid(np) != priv->node)
continue;
csrow->first_page = r.start >> PAGE_SHIFT;
- csrow->nr_pages = (r.end - r.start + 1) >> PAGE_SHIFT;
+ csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT;
csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
csrow->mtype = MEM_XDR;
csrow->edac_mode = EDAC_SECDED;
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
index aab970760b7..86ad2eee120 100644
--- a/drivers/edac/edac_stub.c
+++ b/drivers/edac/edac_stub.c
@@ -14,7 +14,7 @@
*/
#include <linux/module.h>
#include <linux/edac.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/edac.h>
int edac_op_state = EDAC_OPSTATE_INVAL;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 04f1e7ce02b..f6cf448d69b 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
char *type, *optype, *err, *msg;
unsigned long error = m->status & 0x1ff0000l;
u32 optypenum = (m->status >> 4) & 0x07;
- u32 core_err_cnt = (m->status >> 38) && 0x7fff;
+ u32 core_err_cnt = (m->status >> 38) & 0x7fff;
u32 dimm = (m->misc >> 16) & 0x3;
u32 channel = (m->misc >> 18) & 0x3;
u32 syndrome = m->misc >> 32;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 38ab8e2cd7f..8af8e864a9c 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -538,15 +538,15 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
/* we only need the error registers */
r.start += 0xe00;
- if (!devm_request_mem_region(&op->dev, r.start,
- r.end - r.start + 1, pdata->name)) {
+ if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
+ pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
res = -EBUSY;
goto err;
}
- pdata->l2_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
+ pdata->l2_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
if (!pdata->l2_vbase) {
printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
res = -ENOMEM;
@@ -854,11 +854,11 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
if (err_detect & DDR_EDE_SBE)
- edac_mc_handle_ce(mci, pfn, err_addr & PAGE_MASK,
+ edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK,
syndrome, row_index, 0, mci->ctl_name);
if (err_detect & DDR_EDE_MBE)
- edac_mc_handle_ue(mci, pfn, err_addr & PAGE_MASK,
+ edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK,
row_index, mci->ctl_name);
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
@@ -987,15 +987,15 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
goto err;
}
- if (!devm_request_mem_region(&op->dev, r.start,
- r.end - r.start + 1, pdata->name)) {
+ if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
+ pdata->name)) {
printk(KERN_ERR "%s: Error while requesting mem region\n",
__func__);
res = -EBUSY;
goto err;
}
- pdata->mc_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
+ pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
if (!pdata->mc_vbase) {
printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
res = -ENOMEM;
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index 0dd0f633b18..cdae207028a 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -51,7 +51,7 @@ static struct pci_device_id pci_eisa_pci_tbl[] = {
{ 0, }
};
-static struct pci_driver pci_eisa_driver = {
+static struct pci_driver __refdata pci_eisa_driver = {
.name = "pci_eisa",
.id_table = pci_eisa_pci_tbl,
.probe = pci_eisa_init,
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 29d2423fae6..85661b060ed 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -32,7 +32,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include "core.h"
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index b1c11775839..4799393247c 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -216,15 +216,33 @@ struct inbound_phy_packet_event {
struct fw_cdev_event_phy_packet phy_packet;
};
-static inline void __user *u64_to_uptr(__u64 value)
+#ifdef CONFIG_COMPAT
+static void __user *u64_to_uptr(u64 value)
+{
+ if (is_compat_task())
+ return compat_ptr(value);
+ else
+ return (void __user *)(unsigned long)value;
+}
+
+static u64 uptr_to_u64(void __user *ptr)
+{
+ if (is_compat_task())
+ return ptr_to_compat(ptr);
+ else
+ return (u64)(unsigned long)ptr;
+}
+#else
+static inline void __user *u64_to_uptr(u64 value)
{
return (void __user *)(unsigned long)value;
}
-static inline __u64 uptr_to_u64(void __user *ptr)
+static inline u64 uptr_to_u64(void __user *ptr)
{
- return (__u64)(unsigned long)ptr;
+ return (u64)(unsigned long)ptr;
}
+#endif /* CONFIG_COMPAT */
static int fw_device_op_open(struct inode *inode, struct file *file)
{
@@ -253,14 +271,11 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
init_waitqueue_head(&client->wait);
init_waitqueue_head(&client->tx_flush_wait);
INIT_LIST_HEAD(&client->phy_receiver_link);
+ INIT_LIST_HEAD(&client->link);
kref_init(&client->kref);
file->private_data = client;
- mutex_lock(&device->client_list_mutex);
- list_add_tail(&client->link, &device->client_list);
- mutex_unlock(&device->client_list_mutex);
-
return nonseekable_open(inode, file);
}
@@ -451,15 +466,20 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
if (ret != 0)
return -EFAULT;
+ mutex_lock(&client->device->client_list_mutex);
+
client->bus_reset_closure = a->bus_reset_closure;
if (a->bus_reset != 0) {
fill_bus_reset_event(&bus_reset, client);
- if (copy_to_user(u64_to_uptr(a->bus_reset),
- &bus_reset, sizeof(bus_reset)))
- return -EFAULT;
+ ret = copy_to_user(u64_to_uptr(a->bus_reset),
+ &bus_reset, sizeof(bus_reset));
}
+ if (ret == 0 && list_empty(&client->link))
+ list_add_tail(&client->link, &client->device->client_list);
- return 0;
+ mutex_unlock(&client->device->client_list_mutex);
+
+ return ret ? -EFAULT : 0;
}
static int add_client_resource(struct client *client,
@@ -1583,7 +1603,7 @@ static int dispatch_ioctl(struct client *client,
if (_IOC_TYPE(cmd) != '#' ||
_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
_IOC_SIZE(cmd) > sizeof(buffer))
- return -EINVAL;
+ return -ENOTTY;
if (_IOC_DIR(cmd) == _IOC_READ)
memset(&buffer, 0, _IOC_SIZE(cmd));
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 95a47140189..f3b890da1e8 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -38,7 +38,7 @@
#include <linux/string.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include <asm/system.h>
@@ -455,15 +455,20 @@ static struct device_attribute fw_device_attributes[] = {
static int read_rom(struct fw_device *device,
int generation, int index, u32 *data)
{
- int rcode;
+ u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
+ int i, rcode;
/* device->node_id, accessed below, must not be older than generation */
smp_rmb();
- rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST,
- device->node_id, generation, device->max_speed,
- (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4,
- data, 4);
+ for (i = 10; i < 100; i += 10) {
+ rcode = fw_run_transaction(device->card,
+ TCODE_READ_QUADLET_REQUEST, device->node_id,
+ generation, device->max_speed, offset, data, 4);
+ if (rcode != RCODE_BUSY)
+ break;
+ msleep(i);
+ }
be32_to_cpus(data);
return rcode;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 193ed923314..94d3b494ddf 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -29,7 +29,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include <asm/system.h>
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0fe4e4e6eda..b45be576752 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -9,7 +9,7 @@
#include <linux/slab.h>
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct device;
struct fw_card;
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index b9762d07198..03a7a85d042 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -7,6 +7,7 @@
*/
#include <linux/bug.h>
+#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/ethtool.h>
@@ -73,7 +74,7 @@ struct rfc2734_arp {
__be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
__be32 sip; /* Sender's IP Address */
__be32 tip; /* IP Address of requested hw addr */
-} __attribute__((packed));
+} __packed;
/* This header format is specific to this driver implementation. */
#define FWNET_ALEN 8
@@ -81,7 +82,7 @@ struct rfc2734_arp {
struct fwnet_header {
u8 h_dest[FWNET_ALEN]; /* destination address */
__be16 h_proto; /* packet type ID field */
-} __attribute__((packed));
+} __packed;
/* IPv4 and IPv6 encapsulation header */
struct rfc2734_header {
@@ -261,16 +262,16 @@ static int fwnet_header_rebuild(struct sk_buff *skb)
}
static int fwnet_header_cache(const struct neighbour *neigh,
- struct hh_cache *hh)
+ struct hh_cache *hh, __be16 type)
{
struct net_device *net;
struct fwnet_header *h;
- if (hh->hh_type == cpu_to_be16(ETH_P_802_3))
+ if (type == cpu_to_be16(ETH_P_802_3))
return -1;
net = neigh->dev;
h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h));
- h->h_proto = hh->hh_type;
+ h->h_proto = type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
hh->hh_len = FWNET_HLEN;
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 0618145376a..763626b739d 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -37,7 +37,7 @@
#include <linux/uaccess.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include "nosy.h"
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 438e6c83117..57cd3a406ed 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -253,7 +253,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
#define OHCI1394_REGISTER_SIZE 0x800
-#define OHCI_LOOP_COUNT 500
#define OHCI1394_PCI_HCI_Control 0x40
#define SELF_ID_BUF_SIZE 0x800
#define OHCI_TCODE_PHY_PACKET 0x0e
@@ -264,6 +263,7 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define PCI_DEVICE_ID_AGERE_FW643 0x5901
#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
+#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
@@ -513,6 +513,12 @@ static inline void flush_writes(const struct fw_ohci *ohci)
reg_read(ohci, OHCI1394_Version);
}
+/*
+ * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
+ * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
+ * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
+ * directly. Exceptions are intrinsically serialized contexts like pci_probe.
+ */
static int read_phy_reg(struct fw_ohci *ohci, int addr)
{
u32 val;
@@ -521,6 +527,9 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr)
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
+ if (!~val)
+ return -ENODEV; /* Card was ejected. */
+
if (val & OHCI1394_PhyControl_ReadDone)
return OHCI1394_PhyControl_ReadData(val);
@@ -544,6 +553,9 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
OHCI1394_PhyControl_Write(addr, val));
for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
+ if (!~val)
+ return -ENODEV; /* Card was ejected. */
+
if (!(val & OHCI1394_PhyControl_WritePending))
return 0;
@@ -629,7 +641,6 @@ static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
ctx->last_buffer_index = index;
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
- flush_writes(ctx->ohci);
}
static void ar_context_release(struct ar_context *ctx)
@@ -1001,7 +1012,6 @@ static void ar_context_run(struct ar_context *ctx)
reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
- flush_writes(ctx->ohci);
}
static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
@@ -1201,14 +1211,14 @@ static void context_stop(struct context *ctx)
reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
ctx->running = false;
- flush_writes(ctx->ohci);
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 1000; i++) {
reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
if ((reg & CONTEXT_ACTIVE) == 0)
return;
- mdelay(1);
+ if (i)
+ udelay(10);
}
fw_error("Error: DMA context still active (0x%08x)\n", reg);
}
@@ -1345,12 +1355,10 @@ static int at_context_queue_packet(struct context *ctx,
context_append(ctx, d, z, 4 - z);
- if (ctx->running) {
+ if (ctx->running)
reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
- flush_writes(ohci);
- } else {
+ else
context_run(ctx, 0);
- }
return 0;
}
@@ -1959,14 +1967,18 @@ static irqreturn_t irq_handler(int irq, void *data)
static int software_reset(struct fw_ohci *ohci)
{
+ u32 val;
int i;
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
+ for (i = 0; i < 500; i++) {
+ val = reg_read(ohci, OHCI1394_HCControlSet);
+ if (!~val)
+ return -ENODEV; /* Card was ejected. */
- for (i = 0; i < OHCI_LOOP_COUNT; i++) {
- if ((reg_read(ohci, OHCI1394_HCControlSet) &
- OHCI1394_HCControl_softReset) == 0)
+ if (!(val & OHCI1394_HCControl_softReset))
return 0;
+
msleep(1);
}
@@ -2167,8 +2179,13 @@ static int ohci_enable(struct fw_card *card,
ohci_driver_name, ohci)) {
fw_error("Failed to allocate interrupt %d.\n", dev->irq);
pci_disable_msi(dev);
- dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
- ohci->config_rom, ohci->config_rom_bus);
+
+ if (config_rom) {
+ dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+ ohci->next_config_rom,
+ ohci->next_config_rom_bus);
+ ohci->next_config_rom = NULL;
+ }
return -EIO;
}
@@ -2196,7 +2213,9 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_LinkControl_rcvPhyPkt);
ar_context_run(&ohci->ar_request_ctx);
- ar_context_run(&ohci->ar_response_ctx); /* also flushes writes */
+ ar_context_run(&ohci->ar_response_ctx);
+
+ flush_writes(ohci);
/* We are ready to go, reset bus to finish initialization. */
fw_schedule_bus_reset(&ohci->card, false, true);
@@ -3128,7 +3147,6 @@ static void ohci_flush_queue_iso(struct fw_iso_context *base)
&container_of(base, struct iso_context, base)->context;
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
- flush_writes(ctx->ohci);
}
static const struct fw_card_driver ohci_driver = {
@@ -3190,6 +3208,11 @@ static int __devinit pci_probe(struct pci_dev *dev,
int i, err;
size_t size;
+ if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
+ dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
+ return -ENOSYS;
+ }
+
ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
if (ohci == NULL) {
err = -ENOMEM;
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 5f29aafd446..eb80b549ed8 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -78,6 +78,7 @@
#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/pstore.h>
#include <asm/uaccess.h>
@@ -89,6 +90,8 @@ MODULE_DESCRIPTION("sysfs interface to EFI Variables");
MODULE_LICENSE("GPL");
MODULE_VERSION(EFIVARS_VERSION);
+#define DUMP_NAME_LEN 52
+
/*
* The maximum size of VariableName + Data = 1024
* Therefore, it's reasonable to save that much
@@ -119,6 +122,10 @@ struct efivar_attribute {
ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count);
};
+#define PSTORE_EFI_ATTRIBUTES \
+ (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+ EFI_VARIABLE_RUNTIME_ACCESS)
#define EFIVAR_ATTR(_name, _mode, _show, _store) \
struct efivar_attribute efivar_attr_##_name = { \
@@ -141,38 +148,72 @@ efivar_create_sysfs_entry(struct efivars *efivars,
/* Return the number of unicode characters in data */
static unsigned long
-utf8_strlen(efi_char16_t *data, unsigned long maxlength)
+utf16_strnlen(efi_char16_t *s, size_t maxlength)
{
unsigned long length = 0;
- while (*data++ != 0 && length < maxlength)
+ while (*s++ != 0 && length < maxlength)
length++;
return length;
}
+static inline unsigned long
+utf16_strlen(efi_char16_t *s)
+{
+ return utf16_strnlen(s, ~0UL);
+}
+
/*
* Return the number of bytes is the length of this string
* Note: this is NOT the same as the number of unicode characters
*/
static inline unsigned long
-utf8_strsize(efi_char16_t *data, unsigned long maxlength)
+utf16_strsize(efi_char16_t *data, unsigned long maxlength)
{
- return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
+ return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
+}
+
+static inline int
+utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
+{
+ while (1) {
+ if (len == 0)
+ return 0;
+ if (*a < *b)
+ return -1;
+ if (*a > *b)
+ return 1;
+ if (*a == 0) /* implies *b == 0 */
+ return 0;
+ a++;
+ b++;
+ len--;
+ }
}
static efi_status_t
-get_var_data(struct efivars *efivars, struct efi_variable *var)
+get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
{
efi_status_t status;
- spin_lock(&efivars->lock);
var->DataSize = 1024;
status = efivars->ops->get_variable(var->VariableName,
&var->VendorGuid,
&var->Attributes,
&var->DataSize,
var->Data);
+ return status;
+}
+
+static efi_status_t
+get_var_data(struct efivars *efivars, struct efi_variable *var)
+{
+ efi_status_t status;
+
+ spin_lock(&efivars->lock);
+ status = get_var_data_locked(efivars, var);
spin_unlock(&efivars->lock);
+
if (status != EFI_SUCCESS) {
printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n",
status);
@@ -387,12 +428,180 @@ static struct kobj_type efivar_ktype = {
.default_attrs = def_attrs,
};
+static struct pstore_info efi_pstore_info;
+
static inline void
efivar_unregister(struct efivar_entry *var)
{
kobject_put(&var->kobj);
}
+#ifdef CONFIG_PSTORE
+
+static int efi_pstore_open(struct pstore_info *psi)
+{
+ struct efivars *efivars = psi->data;
+
+ spin_lock(&efivars->lock);
+ efivars->walk_entry = list_first_entry(&efivars->list,
+ struct efivar_entry, list);
+ return 0;
+}
+
+static int efi_pstore_close(struct pstore_info *psi)
+{
+ struct efivars *efivars = psi->data;
+
+ spin_unlock(&efivars->lock);
+ return 0;
+}
+
+static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+ struct timespec *timespec, struct pstore_info *psi)
+{
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ struct efivars *efivars = psi->data;
+ char name[DUMP_NAME_LEN];
+ int i;
+ unsigned int part, size;
+ unsigned long time;
+
+ while (&efivars->walk_entry->list != &efivars->list) {
+ if (!efi_guidcmp(efivars->walk_entry->var.VendorGuid,
+ vendor)) {
+ for (i = 0; i < DUMP_NAME_LEN; i++) {
+ name[i] = efivars->walk_entry->var.VariableName[i];
+ }
+ if (sscanf(name, "dump-type%u-%u-%lu", type, &part, &time) == 3) {
+ *id = part;
+ timespec->tv_sec = time;
+ timespec->tv_nsec = 0;
+ get_var_data_locked(efivars, &efivars->walk_entry->var);
+ size = efivars->walk_entry->var.DataSize;
+ memcpy(psi->buf, efivars->walk_entry->var.Data, size);
+ efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
+ struct efivar_entry, list);
+ return size;
+ }
+ }
+ efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
+ struct efivar_entry, list);
+ }
+ return 0;
+}
+
+static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi)
+{
+ char name[DUMP_NAME_LEN];
+ char stub_name[DUMP_NAME_LEN];
+ efi_char16_t efi_name[DUMP_NAME_LEN];
+ efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
+ struct efivars *efivars = psi->data;
+ struct efivar_entry *entry, *found = NULL;
+ int i;
+
+ sprintf(stub_name, "dump-type%u-%u-", type, part);
+ sprintf(name, "%s%lu", stub_name, get_seconds());
+
+ spin_lock(&efivars->lock);
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = stub_name[i];
+
+ /*
+ * Clean up any entries with the same name
+ */
+
+ list_for_each_entry(entry, &efivars->list, list) {
+ get_var_data_locked(efivars, &entry->var);
+
+ if (efi_guidcmp(entry->var.VendorGuid, vendor))
+ continue;
+ if (utf16_strncmp(entry->var.VariableName, efi_name,
+ utf16_strlen(efi_name)))
+ continue;
+ /* Needs to be a prefix */
+ if (entry->var.VariableName[utf16_strlen(efi_name)] == 0)
+ continue;
+
+ /* found */
+ found = entry;
+ efivars->ops->set_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ PSTORE_EFI_ATTRIBUTES,
+ 0, NULL);
+ }
+
+ if (found)
+ list_del(&found->list);
+
+ for (i = 0; i < DUMP_NAME_LEN; i++)
+ efi_name[i] = name[i];
+
+ efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES,
+ size, psi->buf);
+
+ spin_unlock(&efivars->lock);
+
+ if (found)
+ efivar_unregister(found);
+
+ if (size)
+ efivar_create_sysfs_entry(efivars,
+ utf16_strsize(efi_name,
+ DUMP_NAME_LEN * 2),
+ efi_name, &vendor);
+
+ return part;
+};
+
+static int efi_pstore_erase(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi)
+{
+ efi_pstore_write(type, id, 0, psi);
+
+ return 0;
+}
+#else
+static int efi_pstore_open(struct pstore_info *psi)
+{
+ return 0;
+}
+
+static int efi_pstore_close(struct pstore_info *psi)
+{
+ return 0;
+}
+
+static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
+ struct timespec *time, struct pstore_info *psi)
+{
+ return -1;
+}
+
+static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part,
+ size_t size, struct pstore_info *psi)
+{
+ return 0;
+}
+
+static int efi_pstore_erase(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi)
+{
+ return 0;
+}
+#endif
+
+static struct pstore_info efi_pstore_info = {
+ .owner = THIS_MODULE,
+ .name = "efi",
+ .open = efi_pstore_open,
+ .close = efi_pstore_close,
+ .read = efi_pstore_read,
+ .write = efi_pstore_write,
+ .erase = efi_pstore_erase,
+};
static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -414,8 +623,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = utf8_strsize(new_var->VariableName, 1024);
+ strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
+ strsize2 = utf16_strsize(new_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
new_var->VariableName, strsize1) &&
@@ -447,8 +656,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
/* Create the entry in sysfs. Locking is not required here */
status = efivar_create_sysfs_entry(efivars,
- utf8_strsize(new_var->VariableName,
- 1024),
+ utf16_strsize(new_var->VariableName,
+ 1024),
new_var->VariableName,
&new_var->VendorGuid);
if (status) {
@@ -477,8 +686,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = utf8_strsize(del_var->VariableName, 1024);
+ strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
+ strsize2 = utf16_strsize(del_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
del_var->VariableName, strsize1) &&
@@ -763,6 +972,16 @@ int register_efivars(struct efivars *efivars,
if (error)
unregister_efivars(efivars);
+ efivars->efi_pstore_info = efi_pstore_info;
+
+ efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+ if (efivars->efi_pstore_info.buf) {
+ efivars->efi_pstore_info.bufsize = 1024;
+ efivars->efi_pstore_info.data = efivars;
+ mutex_init(&efivars->efi_pstore_info.buf_mutex);
+ pstore_register(&efivars->efi_pstore_info);
+ }
+
out:
kfree(variable_name);
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 87096b6ca5c..2f21b0bfe65 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -13,6 +13,7 @@ menu "Google Firmware Drivers"
config GOOGLE_SMI
tristate "SMI interface for Google platforms"
depends on ACPI && DMI
+ select EFI
select EFI_VARS
help
Say Y here if you want to enable SMI callbacks for Google
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index fa7f0b3e81d..68810fd1a59 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -869,8 +869,6 @@ static __init int gsmi_init(void)
goto out_err;
}
- printk(KERN_INFO "gsmi version " DRIVER_VERSION " loaded\n");
-
/* Register in the firmware directory */
ret = -ENOMEM;
gsmi_kobj = kobject_create_and_add("gsmi", firmware_kobj);
@@ -890,12 +888,13 @@ static __init int gsmi_init(void)
ret = sysfs_create_files(gsmi_kobj, gsmi_attrs);
if (ret) {
printk(KERN_INFO "gsmi: Failed to add attrs");
- goto out_err;
+ goto out_remove_bin_file;
}
- if (register_efivars(&efivars, &efivar_ops, gsmi_kobj)) {
+ ret = register_efivars(&efivars, &efivar_ops, gsmi_kobj);
+ if (ret) {
printk(KERN_INFO "gsmi: Failed to register efivars\n");
- goto out_err;
+ goto out_remove_sysfs_files;
}
register_reboot_notifier(&gsmi_reboot_notifier);
@@ -903,9 +902,15 @@ static __init int gsmi_init(void)
atomic_notifier_chain_register(&panic_notifier_list,
&gsmi_panic_notifier);
+ printk(KERN_INFO "gsmi version " DRIVER_VERSION " loaded\n");
+
return 0;
- out_err:
+out_remove_sysfs_files:
+ sysfs_remove_files(gsmi_kobj, gsmi_attrs);
+out_remove_bin_file:
+ sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
+out_err:
kobject_put(gsmi_kobj);
gsmi_buf_free(gsmi_dev.param_buf);
gsmi_buf_free(gsmi_dev.data_buf);
@@ -925,6 +930,8 @@ static void __exit gsmi_exit(void)
&gsmi_panic_notifier);
unregister_efivars(&efivars);
+ sysfs_remove_files(gsmi_kobj, gsmi_attrs);
+ sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
kobject_put(gsmi_kobj);
gsmi_buf_free(gsmi_dev.param_buf);
gsmi_buf_free(gsmi_dev.data_buf);
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index ce33f462695..c811cb10790 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -566,6 +566,11 @@ static mode_t __init ibft_check_initiator_for(void *data, int type)
return rc;
}
+static void ibft_kobj_release(void *data)
+{
+ kfree(data);
+}
+
/*
* Helper function for ibft_register_kobjects.
*/
@@ -595,7 +600,8 @@ static int __init ibft_create_kobject(struct acpi_table_ibft *header,
boot_kobj = iscsi_boot_create_initiator(boot_kset, hdr->index,
ibft_kobj,
ibft_attr_show_initiator,
- ibft_check_initiator_for);
+ ibft_check_initiator_for,
+ ibft_kobj_release);
if (!boot_kobj) {
rc = -ENOMEM;
goto free_ibft_obj;
@@ -610,7 +616,8 @@ static int __init ibft_create_kobject(struct acpi_table_ibft *header,
boot_kobj = iscsi_boot_create_ethernet(boot_kset, hdr->index,
ibft_kobj,
ibft_attr_show_nic,
- ibft_check_nic_for);
+ ibft_check_nic_for,
+ ibft_kobj_release);
if (!boot_kobj) {
rc = -ENOMEM;
goto free_ibft_obj;
@@ -625,7 +632,8 @@ static int __init ibft_create_kobject(struct acpi_table_ibft *header,
boot_kobj = iscsi_boot_create_target(boot_kset, hdr->index,
ibft_kobj,
ibft_attr_show_target,
- ibft_check_tgt_for);
+ ibft_check_tgt_for,
+ ibft_kobj_release);
if (!boot_kobj) {
rc = -ENOMEM;
goto free_ibft_obj;
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
index c19cd2c39fa..f10fc521951 100644
--- a/drivers/firmware/sigma.c
+++ b/drivers/firmware/sigma.c
@@ -11,6 +11,7 @@
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/i2c.h>
+#include <linux/module.h>
#include <linux/sigma.h>
/* Return: 0==OK, <0==error, =1 ==no more actions */
@@ -113,3 +114,5 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
return ret;
}
EXPORT_SYMBOL(process_sigma_firmware);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 2967002a9f8..d539efd96d4 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -63,33 +63,74 @@ config GPIO_SYSFS
Kernel drivers may also request that a particular GPIO be
exported to userspace; this can be useful when debugging.
+config GPIO_GENERIC
+ tristate
+
# put drivers in the right section, in alphabetical order
+config GPIO_DA9052
+ tristate "Dialog DA9052 GPIO"
+ depends on PMIC_DA9052
+ help
+ Say yes here to enable the GPIO driver for the DA9052 chip.
+
config GPIO_MAX730X
tristate
comment "Memory mapped GPIO drivers:"
-config GPIO_BASIC_MMIO_CORE
- tristate
- help
- Provides core functionality for basic memory-mapped GPIO controllers.
-
-config GPIO_BASIC_MMIO
- tristate "Basic memory-mapped GPIO controllers support"
- select GPIO_BASIC_MMIO_CORE
+config GPIO_GENERIC_PLATFORM
+ tristate "Generic memory-mapped GPIO controller support (MMIO platform device)"
+ select GPIO_GENERIC
help
- Say yes here to support basic memory-mapped GPIO controllers.
+ Say yes here to support basic platform_device memory-mapped GPIO controllers.
config GPIO_IT8761E
tristate "IT8761E GPIO support"
help
Say yes here to support GPIO functionality of IT8761E super I/O chip.
+config GPIO_EP93XX
+ def_bool y
+ depends on ARCH_EP93XX
+ select GPIO_GENERIC
+
config GPIO_EXYNOS4
def_bool y
depends on CPU_EXYNOS4210
+config GPIO_MPC5200
+ def_bool y
+ depends on PPC_MPC52xx
+
+config GPIO_MSM_V1
+ tristate "Qualcomm MSM GPIO v1"
+ depends on GPIOLIB && ARCH_MSM
+ help
+ Say yes here to support the GPIO interface on ARM v6 based
+ Qualcomm MSM chips. Most of the pins on the MSM can be
+ selected for GPIO, and are controlled by this driver.
+
+config GPIO_MSM_V2
+ tristate "Qualcomm MSM GPIO v2"
+ depends on GPIOLIB && ARCH_MSM
+ help
+ Say yes here to support the GPIO interface on ARM v7 based
+ Qualcomm MSM chips. Most of the pins on the MSM can be
+ selected for GPIO, and are controlled by this driver.
+
+config GPIO_MXC
+ def_bool y
+ depends on ARCH_MXC
+ select GPIO_GENERIC
+ select GENERIC_IRQ_CHIP
+
+config GPIO_MXS
+ def_bool y
+ depends on ARCH_MXS
+ select GPIO_GENERIC
+ select GENERIC_IRQ_CHIP
+
config GPIO_PLAT_SAMSUNG
def_bool y
depends on SAMSUNG_GPIOLIB_4BIT
@@ -137,9 +178,6 @@ config GPIO_SCH
The Intel Tunnel Creek processor has 5 GPIOs powered by the
core power rail and 9 from suspend power supply.
- This driver can also be built as a module. If so, the module
- will be called sch-gpio.
-
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
depends on MFD_SUPPORT && PCI
@@ -202,9 +240,6 @@ config GPIO_PCA953X
16 bits: pca9535, pca9539, pca9555, tca6416
- This driver can also be built as a module. If so, the module
- will be called pca953x.
-
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
depends on GPIO_PCA953X=y
@@ -261,6 +296,12 @@ config GPIO_TC3589X
This enables support for the GPIOs found on the TC3589X
I/O Expander.
+config GPIO_TPS65912
+ tristate "TI TPS65912 GPIO"
+ depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
+ help
+ This driver supports TPS65912 gpio chip
+
config GPIO_TWL4030
tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
depends on TWL4030_CORE
@@ -296,17 +337,12 @@ config GPIO_ADP5520
This option enables support for on-chip GPIO found
on Analog Devices ADP5520 PMICs.
- To compile this driver as a module, choose M here: the module will
- be called adp5520-gpio.
-
config GPIO_ADP5588
tristate "ADP5588 I2C GPIO expander"
depends on I2C
help
This option enables support for 18 GPIOs found
on Analog Devices ADP5588 GPIO Expanders.
- To compile this driver as a module, choose M here: the module will be
- called adp5588-gpio.
config GPIO_ADP5588_IRQ
bool "Interrupt controller support for ADP5588"
@@ -398,10 +434,11 @@ config GPIO_MAX7301
GPIO driver for Maxim MAX7301 SPI-based GPIO expander.
config GPIO_MCP23S08
- tristate "Microchip MCP23Sxx I/O expander"
- depends on SPI_MASTER
+ tristate "Microchip MCP23xxx I/O expander"
+ depends on SPI_MASTER || I2C
help
- SPI driver for Microchip MCP23S08/MPC23S17 I/O expanders.
+ SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
+ I/O expanders.
This provides a GPIO interface supporting inputs and outputs.
config GPIO_MC33880
@@ -428,9 +465,6 @@ config GPIO_UCB1400
This enables support for the Philips UCB1400 GPIO pins.
The UCB1400 is an AC97 audio codec.
- To compile this driver as a module, choose M here: the
- module will be called ucb1400_gpio.
-
comment "MODULbus GPIO expanders:"
config GPIO_JANZ_TTL
@@ -441,7 +475,7 @@ config GPIO_JANZ_TTL
This driver provides support for driving the pins in output
mode only. Input mode is not supported.
-config AB8500_GPIO
+config GPIO_AB8500
bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
depends on AB8500_CORE && BROKEN
help
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index b605f8ec6fb..9588948c96f 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -4,47 +4,59 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o
-obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
-obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
-obj-$(CONFIG_GPIO_BASIC_MMIO_CORE) += basic_mmio_gpio.o
-obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o
+# Device drivers. Generally keep list sorted alphabetically
+obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
+
+obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
+obj-$(CONFIG_GPIO_AB8500) += gpio-ab8500.o
+obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
+obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
+obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
+obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
+obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
+obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_EXYNOS4) += gpio-exynos4.o
+obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
+obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
+obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
+obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
+obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
+obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
+obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
+obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
+obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
+obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
+obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
+obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o
+obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o
+obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
+obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
+obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
+obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
+obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
+obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
+obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
+obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
+obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
+
obj-$(CONFIG_GPIO_PLAT_SAMSUNG) += gpio-plat-samsung.o
obj-$(CONFIG_GPIO_S5PC100) += gpio-s5pc100.o
obj-$(CONFIG_GPIO_S5PV210) += gpio-s5pv210.o
-obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
-obj-$(CONFIG_GPIO_MAX730X) += max730x.o
-obj-$(CONFIG_GPIO_MAX7300) += max7300.o
-obj-$(CONFIG_GPIO_MAX7301) += max7301.o
-obj-$(CONFIG_GPIO_MAX732X) += max732x.o
-obj-$(CONFIG_GPIO_MC33880) += mc33880.o
-obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
-obj-$(CONFIG_GPIO_74X164) += 74x164.o
-obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
-obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
-obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
-obj-$(CONFIG_GPIO_PCH) += pch_gpio.o
-obj-$(CONFIG_GPIO_PL061) += pl061.o
-obj-$(CONFIG_GPIO_STMPE) += stmpe-gpio.o
-obj-$(CONFIG_GPIO_TC3589X) += tc3589x-gpio.o
-obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
-obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
-obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
-obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
-obj-$(CONFIG_GPIO_CS5535) += cs5535-gpio.o
-obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
-obj-$(CONFIG_GPIO_IT8761E) += it8761e_gpio.o
-obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
-obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
-obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
-obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
-obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
+
+obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
+obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
+obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
+obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
+obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
+obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
+obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
+obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
+obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_MACH_U300) += gpio-u300.o
-obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
-obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
-obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
-obj-$(CONFIG_GPIO_SX150X) += sx150x.o
-obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o
-obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o
-obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o
-obj-$(CONFIG_GPIO_TPS65910) += tps65910-gpio.o
+obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
+obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
+obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
+obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
+obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
+obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
+obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
diff --git a/drivers/gpio/74x164.c b/drivers/gpio/gpio-74x164.c
index 84e07021983..ff525c0958d 100644
--- a/drivers/gpio/74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -16,9 +16,6 @@
#include <linux/gpio.h>
#include <linux/slab.h>
-#define GEN_74X164_GPIO_COUNT 8
-
-
struct gen_74x164_chip {
struct spi_device *spi;
struct gpio_chip gpio_chip;
@@ -26,9 +23,7 @@ struct gen_74x164_chip {
u8 port_config;
};
-static void gen_74x164_set_value(struct gpio_chip *, unsigned, int);
-
-static struct gen_74x164_chip *gpio_to_chip(struct gpio_chip *gc)
+static struct gen_74x164_chip *gpio_to_74x164_chip(struct gpio_chip *gc)
{
return container_of(gc, struct gen_74x164_chip, gpio_chip);
}
@@ -39,16 +34,9 @@ static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
&chip->port_config, sizeof(chip->port_config));
}
-static int gen_74x164_direction_output(struct gpio_chip *gc,
- unsigned offset, int val)
-{
- gen_74x164_set_value(gc, offset, val);
- return 0;
-}
-
static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
{
- struct gen_74x164_chip *chip = gpio_to_chip(gc);
+ struct gen_74x164_chip *chip = gpio_to_74x164_chip(gc);
int ret;
mutex_lock(&chip->lock);
@@ -61,7 +49,7 @@ static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
static void gen_74x164_set_value(struct gpio_chip *gc,
unsigned offset, int val)
{
- struct gen_74x164_chip *chip = gpio_to_chip(gc);
+ struct gen_74x164_chip *chip = gpio_to_74x164_chip(gc);
mutex_lock(&chip->lock);
if (val)
@@ -73,6 +61,13 @@ static void gen_74x164_set_value(struct gpio_chip *gc,
mutex_unlock(&chip->lock);
}
+static int gen_74x164_direction_output(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ gen_74x164_set_value(gc, offset, val);
+ return 0;
+}
+
static int __devinit gen_74x164_probe(struct spi_device *spi)
{
struct gen_74x164_chip *chip;
@@ -104,12 +99,12 @@ static int __devinit gen_74x164_probe(struct spi_device *spi)
chip->spi = spi;
- chip->gpio_chip.label = GEN_74X164_DRIVER_NAME,
- chip->gpio_chip.direction_output = gen_74x164_direction_output;
+ chip->gpio_chip.label = spi->modalias;
+ chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
chip->gpio_chip.set = gen_74x164_set_value;
chip->gpio_chip.base = pdata->base;
- chip->gpio_chip.ngpio = GEN_74X164_GPIO_COUNT;
+ chip->gpio_chip.ngpio = 8;
chip->gpio_chip.can_sleep = 1;
chip->gpio_chip.dev = &spi->dev;
chip->gpio_chip.owner = THIS_MODULE;
@@ -157,7 +152,7 @@ static int __devexit gen_74x164_remove(struct spi_device *spi)
static struct spi_driver gen_74x164_driver = {
.driver = {
- .name = GEN_74X164_DRIVER_NAME,
+ .name = "74x164",
.owner = THIS_MODULE,
},
.probe = gen_74x164_probe,
diff --git a/drivers/gpio/ab8500-gpio.c b/drivers/gpio/gpio-ab8500.c
index 970053c89ff..050c05d9189 100644
--- a/drivers/gpio/ab8500-gpio.c
+++ b/drivers/gpio/gpio-ab8500.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
@@ -517,5 +516,5 @@ module_exit(ab8500_gpio_exit);
MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO");
-MODULE_ALIAS("AB8500 GPIO driver");
+MODULE_ALIAS("platform:ab8500-gpio");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/adp5520-gpio.c b/drivers/gpio/gpio-adp5520.c
index 9f278153700..9f278153700 100644
--- a/drivers/gpio/adp5520-gpio.c
+++ b/drivers/gpio/gpio-adp5520.c
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/gpio-adp5588.c
index 3525ad91877..3525ad91877 100644
--- a/drivers/gpio/adp5588-gpio.c
+++ b/drivers/gpio/gpio-adp5588.c
diff --git a/drivers/gpio/bt8xxgpio.c b/drivers/gpio/gpio-bt8xx.c
index aa4f09ad3ce..ec57936aef6 100644
--- a/drivers/gpio/bt8xxgpio.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -2,7 +2,7 @@
bt8xx GPIO abuser
- Copyright (C) 2008 Michael Buesch <mb@bu3sch.de>
+ Copyright (C) 2008 Michael Buesch <m@bues.ch>
Please do _only_ contact the people listed _above_ with issues related to this driver.
All the other people listed below are not related to this driver. Their names
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/gpio-cs5535.c
index 6e16cba56ad..6e16cba56ad 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/gpio-cs5535.c
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
new file mode 100644
index 00000000000..038f5eb8b13
--- /dev/null
+++ b/drivers/gpio/gpio-da9052.c
@@ -0,0 +1,277 @@
+/*
+ * GPIO Driver for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/syscalls.h>
+#include <linux/seq_file.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/pdata.h>
+#include <linux/mfd/da9052/gpio.h>
+
+#define DA9052_INPUT 1
+#define DA9052_OUTPUT_OPENDRAIN 2
+#define DA9052_OUTPUT_PUSHPULL 3
+
+#define DA9052_SUPPLY_VDD_IO1 0
+
+#define DA9052_DEBOUNCING_OFF 0
+#define DA9052_DEBOUNCING_ON 1
+
+#define DA9052_OUTPUT_LOWLEVEL 0
+
+#define DA9052_ACTIVE_LOW 0
+#define DA9052_ACTIVE_HIGH 1
+
+#define DA9052_GPIO_MAX_PORTS_PER_REGISTER 8
+#define DA9052_GPIO_SHIFT_COUNT(no) (no%8)
+#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
+#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
+#define DA9052_GPIO_NIBBLE_SHIFT 4
+
+struct da9052_gpio {
+ struct da9052 *da9052;
+ struct gpio_chip gp;
+};
+
+static inline struct da9052_gpio *to_da9052_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct da9052_gpio, gp);
+}
+
+static unsigned char da9052_gpio_port_odd(unsigned offset)
+{
+ return offset % 2;
+}
+
+static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct da9052_gpio *gpio = to_da9052_gpio(gc);
+ int da9052_port_direction = 0;
+ int ret;
+
+ ret = da9052_reg_read(gpio->da9052,
+ DA9052_GPIO_0_1_REG + (offset >> 1));
+ if (ret < 0)
+ return ret;
+
+ if (da9052_gpio_port_odd(offset)) {
+ da9052_port_direction = ret & DA9052_GPIO_ODD_PORT_PIN;
+ da9052_port_direction >>= 4;
+ } else {
+ da9052_port_direction = ret & DA9052_GPIO_EVEN_PORT_PIN;
+ }
+
+ switch (da9052_port_direction) {
+ case DA9052_INPUT:
+ if (offset < DA9052_GPIO_MAX_PORTS_PER_REGISTER)
+ ret = da9052_reg_read(gpio->da9052,
+ DA9052_STATUS_C_REG);
+ else
+ ret = da9052_reg_read(gpio->da9052,
+ DA9052_STATUS_D_REG);
+ if (ret < 0)
+ return ret;
+ if (ret & (1 << DA9052_GPIO_SHIFT_COUNT(offset)))
+ return 1;
+ else
+ return 0;
+ case DA9052_OUTPUT_PUSHPULL:
+ if (da9052_gpio_port_odd(offset))
+ return ret & DA9052_GPIO_ODD_PORT_MODE;
+ else
+ return ret & DA9052_GPIO_EVEN_PORT_MODE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct da9052_gpio *gpio = to_da9052_gpio(gc);
+ unsigned char register_value = 0;
+ int ret;
+
+ if (da9052_gpio_port_odd(offset)) {
+ if (value) {
+ register_value = DA9052_GPIO_ODD_PORT_MODE;
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_ODD_PORT_MODE,
+ register_value);
+ if (ret != 0)
+ dev_err(gpio->da9052->dev,
+ "Failed to updated gpio odd reg,%d",
+ ret);
+ }
+ } else {
+ if (value) {
+ register_value = DA9052_GPIO_EVEN_PORT_MODE;
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_EVEN_PORT_MODE,
+ register_value);
+ if (ret != 0)
+ dev_err(gpio->da9052->dev,
+ "Failed to updated gpio even reg,%d",
+ ret);
+ }
+ }
+}
+
+static int da9052_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct da9052_gpio *gpio = to_da9052_gpio(gc);
+ unsigned char register_value;
+ int ret;
+
+ /* Format: function - 2 bits type - 1 bit mode - 1 bit */
+ register_value = DA9052_INPUT | DA9052_ACTIVE_LOW << 2 |
+ DA9052_DEBOUNCING_ON << 3;
+
+ if (da9052_gpio_port_odd(offset))
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_MASK_UPPER_NIBBLE,
+ (register_value <<
+ DA9052_GPIO_NIBBLE_SHIFT));
+ else
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_MASK_LOWER_NIBBLE,
+ register_value);
+
+ return ret;
+}
+
+static int da9052_gpio_direction_output(struct gpio_chip *gc,
+ unsigned offset, int value)
+{
+ struct da9052_gpio *gpio = to_da9052_gpio(gc);
+ unsigned char register_value;
+ int ret;
+
+ /* Format: Function - 2 bits Type - 1 bit Mode - 1 bit */
+ register_value = DA9052_OUTPUT_PUSHPULL | DA9052_SUPPLY_VDD_IO1 << 2 |
+ value << 3;
+
+ if (da9052_gpio_port_odd(offset))
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_MASK_UPPER_NIBBLE,
+ (register_value <<
+ DA9052_GPIO_NIBBLE_SHIFT));
+ else
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_MASK_LOWER_NIBBLE,
+ register_value);
+
+ return ret;
+}
+
+static int da9052_gpio_to_irq(struct gpio_chip *gc, u32 offset)
+{
+ struct da9052_gpio *gpio = to_da9052_gpio(gc);
+ struct da9052 *da9052 = gpio->da9052;
+
+ return da9052->irq_base + DA9052_IRQ_GPI0 + offset;
+}
+
+static struct gpio_chip reference_gp __devinitdata = {
+ .label = "da9052-gpio",
+ .owner = THIS_MODULE,
+ .get = da9052_gpio_get,
+ .set = da9052_gpio_set,
+ .direction_input = da9052_gpio_direction_input,
+ .direction_output = da9052_gpio_direction_output,
+ .to_irq = da9052_gpio_to_irq,
+ .can_sleep = 1;
+ .ngpio = 16;
+ .base = -1;
+};
+
+static int __devinit da9052_gpio_probe(struct platform_device *pdev)
+{
+ struct da9052_gpio *gpio;
+ struct da9052_pdata *pdata;
+ int ret;
+
+ gpio = kzalloc(sizeof(*gpio), GFP_KERNEL);
+ if (gpio == NULL)
+ return -ENOMEM;
+
+ gpio->da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = gpio->da9052->dev->platform_data;
+
+ gpio->gp = reference_gp;
+ if (pdata && pdata->gpio_base)
+ gpio->gp.base = pdata->gpio_base;
+
+ ret = gpiochip_add(&gpio->gp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ goto err_mem;
+ }
+
+ platform_set_drvdata(pdev, gpio);
+
+ return 0;
+
+err_mem:
+ kfree(gpio);
+ return ret;
+}
+
+static int __devexit da9052_gpio_remove(struct platform_device *pdev)
+{
+ struct da9052_gpio *gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&gpio->gp);
+ if (ret == 0)
+ kfree(gpio);
+
+ return ret;
+}
+
+static struct platform_driver da9052_gpio_driver = {
+ .probe = da9052_gpio_probe,
+ .remove = __devexit_p(da9052_gpio_remove),
+ .driver = {
+ .name = "da9052-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_gpio_init(void)
+{
+ return platform_driver_register(&da9052_gpio_driver);
+}
+module_init(da9052_gpio_init);
+
+static void __exit da9052_gpio_exit(void)
+{
+ return platform_driver_unregister(&da9052_gpio_driver);
+}
+module_exit(da9052_gpio_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 GPIO Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-gpio");
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
new file mode 100644
index 00000000000..72fb9c66532
--- /dev/null
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -0,0 +1,405 @@
+/*
+ * Generic EP93xx GPIO handling
+ *
+ * Copyright (c) 2008 Ryan Mallon
+ * Copyright (c) 2011 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * Based on code originally from:
+ * linux/arch/arm/mach-ep93xx/core.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/basic_mmio_gpio.h>
+
+#include <mach/hardware.h>
+
+struct ep93xx_gpio {
+ void __iomem *mmio_base;
+ struct bgpio_chip bgc[8];
+};
+
+/*************************************************************************
+ * Interrupt handling for EP93xx on-chip GPIOs
+ *************************************************************************/
+static unsigned char gpio_int_unmasked[3];
+static unsigned char gpio_int_enabled[3];
+static unsigned char gpio_int_type1[3];
+static unsigned char gpio_int_type2[3];
+static unsigned char gpio_int_debounce[3];
+
+/* Port ordering is: A B F */
+static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c };
+static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 };
+static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
+static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
+static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
+
+static void ep93xx_gpio_update_int_params(unsigned port)
+{
+ BUG_ON(port > 2);
+
+ __raw_writeb(0, EP93XX_GPIO_REG(int_en_register_offset[port]));
+
+ __raw_writeb(gpio_int_type2[port],
+ EP93XX_GPIO_REG(int_type2_register_offset[port]));
+
+ __raw_writeb(gpio_int_type1[port],
+ EP93XX_GPIO_REG(int_type1_register_offset[port]));
+
+ __raw_writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
+ EP93XX_GPIO_REG(int_en_register_offset[port]));
+}
+
+static inline void ep93xx_gpio_int_mask(unsigned line)
+{
+ gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7));
+}
+
+static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
+{
+ int line = irq_to_gpio(irq);
+ int port = line >> 3;
+ int port_mask = 1 << (line & 7);
+
+ if (enable)
+ gpio_int_debounce[port] |= port_mask;
+ else
+ gpio_int_debounce[port] &= ~port_mask;
+
+ __raw_writeb(gpio_int_debounce[port],
+ EP93XX_GPIO_REG(int_debounce_register_offset[port]));
+}
+
+static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned char status;
+ int i;
+
+ status = __raw_readb(EP93XX_GPIO_A_INT_STATUS);
+ for (i = 0; i < 8; i++) {
+ if (status & (1 << i)) {
+ int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_A(0)) + i;
+ generic_handle_irq(gpio_irq);
+ }
+ }
+
+ status = __raw_readb(EP93XX_GPIO_B_INT_STATUS);
+ for (i = 0; i < 8; i++) {
+ if (status & (1 << i)) {
+ int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_B(0)) + i;
+ generic_handle_irq(gpio_irq);
+ }
+ }
+}
+
+static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ /*
+ * map discontiguous hw irq range to continuous sw irq range:
+ *
+ * IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7})
+ */
+ int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
+ int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx;
+
+ generic_handle_irq(gpio_irq);
+}
+
+static void ep93xx_gpio_irq_ack(struct irq_data *d)
+{
+ int line = irq_to_gpio(d->irq);
+ int port = line >> 3;
+ int port_mask = 1 << (line & 7);
+
+ if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
+ gpio_int_type2[port] ^= port_mask; /* switch edge direction */
+ ep93xx_gpio_update_int_params(port);
+ }
+
+ __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
+}
+
+static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
+{
+ int line = irq_to_gpio(d->irq);
+ int port = line >> 3;
+ int port_mask = 1 << (line & 7);
+
+ if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
+ gpio_int_type2[port] ^= port_mask; /* switch edge direction */
+
+ gpio_int_unmasked[port] &= ~port_mask;
+ ep93xx_gpio_update_int_params(port);
+
+ __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port]));
+}
+
+static void ep93xx_gpio_irq_mask(struct irq_data *d)
+{
+ int line = irq_to_gpio(d->irq);
+ int port = line >> 3;
+
+ gpio_int_unmasked[port] &= ~(1 << (line & 7));
+ ep93xx_gpio_update_int_params(port);
+}
+
+static void ep93xx_gpio_irq_unmask(struct irq_data *d)
+{
+ int line = irq_to_gpio(d->irq);
+ int port = line >> 3;
+
+ gpio_int_unmasked[port] |= 1 << (line & 7);
+ ep93xx_gpio_update_int_params(port);
+}
+
+/*
+ * gpio_int_type1 controls whether the interrupt is level (0) or
+ * edge (1) triggered, while gpio_int_type2 controls whether it
+ * triggers on low/falling (0) or high/rising (1).
+ */
+static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ const int gpio = irq_to_gpio(d->irq);
+ const int port = gpio >> 3;
+ const int port_mask = 1 << (gpio & 7);
+ irq_flow_handler_t handler;
+
+ gpio_direction_input(gpio);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ gpio_int_type1[port] |= port_mask;
+ gpio_int_type2[port] |= port_mask;
+ handler = handle_edge_irq;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ gpio_int_type1[port] |= port_mask;
+ gpio_int_type2[port] &= ~port_mask;
+ handler = handle_edge_irq;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ gpio_int_type1[port] &= ~port_mask;
+ gpio_int_type2[port] |= port_mask;
+ handler = handle_level_irq;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ gpio_int_type1[port] &= ~port_mask;
+ gpio_int_type2[port] &= ~port_mask;
+ handler = handle_level_irq;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ gpio_int_type1[port] |= port_mask;
+ /* set initial polarity based on current input level */
+ if (gpio_get_value(gpio))
+ gpio_int_type2[port] &= ~port_mask; /* falling */
+ else
+ gpio_int_type2[port] |= port_mask; /* rising */
+ handler = handle_edge_irq;
+ break;
+ default:
+ pr_err("failed to set irq type %d for gpio %d\n", type, gpio);
+ return -EINVAL;
+ }
+
+ __irq_set_handler_locked(d->irq, handler);
+
+ gpio_int_enabled[port] |= port_mask;
+
+ ep93xx_gpio_update_int_params(port);
+
+ return 0;
+}
+
+static struct irq_chip ep93xx_gpio_irq_chip = {
+ .name = "GPIO",
+ .irq_ack = ep93xx_gpio_irq_ack,
+ .irq_mask_ack = ep93xx_gpio_irq_mask_ack,
+ .irq_mask = ep93xx_gpio_irq_mask,
+ .irq_unmask = ep93xx_gpio_irq_unmask,
+ .irq_set_type = ep93xx_gpio_irq_type,
+};
+
+static void ep93xx_gpio_init_irq(void)
+{
+ int gpio_irq;
+
+ for (gpio_irq = gpio_to_irq(0);
+ gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) {
+ irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip,
+ handle_level_irq);
+ set_irq_flags(gpio_irq, IRQF_VALID);
+ }
+
+ irq_set_chained_handler(IRQ_EP93XX_GPIO_AB,
+ ep93xx_gpio_ab_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO0MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO1MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO2MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO3MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO4MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO5MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO6MUX,
+ ep93xx_gpio_f_irq_handler);
+ irq_set_chained_handler(IRQ_EP93XX_GPIO7MUX,
+ ep93xx_gpio_f_irq_handler);
+}
+
+
+/*************************************************************************
+ * gpiolib interface for EP93xx on-chip GPIOs
+ *************************************************************************/
+struct ep93xx_gpio_bank {
+ const char *label;
+ int data;
+ int dir;
+ int base;
+ bool has_debounce;
+};
+
+#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _debounce) \
+ { \
+ .label = _label, \
+ .data = _data, \
+ .dir = _dir, \
+ .base = _base, \
+ .has_debounce = _debounce, \
+ }
+
+static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
+ EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true),
+ EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true),
+ EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false),
+ EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false),
+ EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false),
+ EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, true),
+ EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false),
+ EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false),
+};
+
+static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
+ unsigned offset, unsigned debounce)
+{
+ int gpio = chip->base + offset;
+ int irq = gpio_to_irq(gpio);
+
+ if (irq < 0)
+ return -EINVAL;
+
+ ep93xx_gpio_int_debounce(irq, debounce ? true : false);
+
+ return 0;
+}
+
+static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev,
+ void __iomem *mmio_base, struct ep93xx_gpio_bank *bank)
+{
+ void __iomem *data = mmio_base + bank->data;
+ void __iomem *dir = mmio_base + bank->dir;
+ int err;
+
+ err = bgpio_init(bgc, dev, 1, data, NULL, NULL, dir, NULL, false);
+ if (err)
+ return err;
+
+ bgc->gc.label = bank->label;
+ bgc->gc.base = bank->base;
+
+ if (bank->has_debounce)
+ bgc->gc.set_debounce = ep93xx_gpio_set_debounce;
+
+ return gpiochip_add(&bgc->gc);
+}
+
+static int __devinit ep93xx_gpio_probe(struct platform_device *pdev)
+{
+ struct ep93xx_gpio *ep93xx_gpio;
+ struct resource *res;
+ void __iomem *mmio;
+ int i;
+ int ret;
+
+ ep93xx_gpio = kzalloc(sizeof(*ep93xx_gpio), GFP_KERNEL);
+ if (!ep93xx_gpio)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENXIO;
+ goto exit_free;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ ret = -EBUSY;
+ goto exit_free;
+ }
+
+ mmio = ioremap(res->start, resource_size(res));
+ if (!mmio) {
+ ret = -ENXIO;
+ goto exit_release;
+ }
+ ep93xx_gpio->mmio_base = mmio;
+
+ /* Default all ports to GPIO */
+ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
+ EP93XX_SYSCON_DEVCFG_GONK |
+ EP93XX_SYSCON_DEVCFG_EONIDE |
+ EP93XX_SYSCON_DEVCFG_GONIDE |
+ EP93XX_SYSCON_DEVCFG_HONIDE);
+
+ for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
+ struct bgpio_chip *bgc = &ep93xx_gpio->bgc[i];
+ struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
+
+ if (ep93xx_gpio_add_bank(bgc, &pdev->dev, mmio, bank))
+ dev_warn(&pdev->dev, "Unable to add gpio bank %s\n",
+ bank->label);
+ }
+
+ ep93xx_gpio_init_irq();
+
+ return 0;
+
+exit_release:
+ release_mem_region(res->start, resource_size(res));
+exit_free:
+ kfree(ep93xx_gpio);
+ dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, ret);
+ return ret;
+}
+
+static struct platform_driver ep93xx_gpio_driver = {
+ .driver = {
+ .name = "gpio-ep93xx",
+ .owner = THIS_MODULE,
+ },
+ .probe = ep93xx_gpio_probe,
+};
+
+static int __init ep93xx_gpio_init(void)
+{
+ return platform_driver_register(&ep93xx_gpio_driver);
+}
+postcore_initcall(ep93xx_gpio_init);
+
+MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com> "
+ "H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_DESCRIPTION("EP93XX GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-exynos4.c b/drivers/gpio/gpio-exynos4.c
index 9029835112e..d24b337cf1a 100644
--- a/drivers/gpio/gpio-exynos4.c
+++ b/drivers/gpio/gpio-exynos4.c
@@ -1,10 +1,9 @@
-/* linux/arch/arm/mach-exynos4/gpiolib.c
+/*
+ * EXYNOS4 - GPIOlib support
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4 - GPIOlib support
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/gpio/basic_mmio_gpio.c b/drivers/gpio/gpio-generic.c
index 8152e9f516b..231714def4d 100644
--- a/drivers/gpio/basic_mmio_gpio.c
+++ b/drivers/gpio/gpio-generic.c
@@ -1,5 +1,5 @@
/*
- * Driver for basic memory-mapped GPIO controllers.
+ * Generic driver for memory-mapped GPIO controllers.
*
* Copyright 2008 MontaVista Software, Inc.
* Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
@@ -404,7 +404,7 @@ int __devinit bgpio_init(struct bgpio_chip *bgc,
}
EXPORT_SYMBOL_GPL(bgpio_init);
-#ifdef CONFIG_GPIO_BASIC_MMIO
+#ifdef CONFIG_GPIO_GENERIC_PLATFORM
static void __iomem *bgpio_map(struct platform_device *pdev,
const char *name,
@@ -541,7 +541,7 @@ static void __exit bgpio_platform_exit(void)
}
module_exit(bgpio_platform_exit);
-#endif /* CONFIG_GPIO_BASIC_MMIO */
+#endif /* CONFIG_GPIO_GENERIC_PLATFORM */
MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers");
MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/gpio-it8761e.c
index 48fc43c4bdd..278b8131701 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/gpio-it8761e.c
@@ -1,5 +1,5 @@
/*
- * it8761_gpio.c - GPIO interface for IT8761E Super I/O chip
+ * GPIO interface for IT8761E Super I/O chip
*
* Author: Denis Turischev <denis@compulab.co.il>
*
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 813ac077e5d..813ac077e5d 100644
--- a/drivers/gpio/janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/gpio-langwell.c
index bd6571e0097..d2eb57c60e0 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -1,4 +1,6 @@
-/* langwell_gpio.c Moorestown platform Langwell chip GPIO driver
+/*
+ * Moorestown platform Langwell chip GPIO driver
+ *
* Copyright (c) 2008 - 2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
@@ -223,7 +225,7 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
gedr = gpio_reg(&lnw->chip, base, GEDR);
pending = readl(gedr);
while (pending) {
- gpio = __ffs(pending) - 1;
+ gpio = __ffs(pending);
mask = BIT(gpio);
pending &= ~mask;
/* Clear before handling so we can't lose an edge */
diff --git a/drivers/gpio/max7300.c b/drivers/gpio/gpio-max7300.c
index 962f661c18c..a5ca0ab1b37 100644
--- a/drivers/gpio/max7300.c
+++ b/drivers/gpio/gpio-max7300.c
@@ -1,6 +1,4 @@
/*
- * drivers/gpio/max7300.c
- *
* Copyright (C) 2009 Wolfram Sang, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/gpio/max7301.c b/drivers/gpio/gpio-max7301.c
index 92a100ddef6..741acfcbe76 100644
--- a/drivers/gpio/max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -1,6 +1,4 @@
/*
- * drivers/gpio/max7301.c
- *
* Copyright (C) 2006 Juergen Beisert, Pengutronix
* Copyright (C) 2008 Guennadi Liakhovetski, Pengutronix
* Copyright (C) 2009 Wolfram Sang, Pengutronix
diff --git a/drivers/gpio/max730x.c b/drivers/gpio/gpio-max730x.c
index 94ce773f95f..05e2dac60b3 100644
--- a/drivers/gpio/max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -1,6 +1,4 @@
/**
- * drivers/gpio/max7301.c
- *
* Copyright (C) 2006 Juergen Beisert, Pengutronix
* Copyright (C) 2008 Guennadi Liakhovetski, Pengutronix
* Copyright (C) 2009 Wolfram Sang, Pengutronix
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/gpio-max732x.c
index ad6951edc16..9504120812a 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -1,5 +1,5 @@
/*
- * max732x.c - I2C Port Expander with 8/16 I/O
+ * MAX732x I2C Port Expander with 8/16 I/O
*
* Copyright (C) 2007 Marvell International Ltd.
* Copyright (C) 2008 Jack Ren <jack.ren@marvell.com>
diff --git a/drivers/gpio/mc33880.c b/drivers/gpio/gpio-mc33880.c
index 4ec797593bd..b3b4652e89e 100644
--- a/drivers/gpio/mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -1,5 +1,5 @@
/*
- * mc33880.c MC33880 high-side/low-side switch GPIO driver
+ * MC33880 high-side/low-side switch GPIO driver
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 40e076083ec..1ef46e6c2a2 100644
--- a/drivers/gpio/mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -1,12 +1,12 @@
/*
- * mcp23s08.c - SPI gpio expander driver
+ * MCP23S08 SPI/GPIO gpio expander driver
*/
#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
+#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/spi/mcp23s08.h>
#include <linux/slab.h>
@@ -17,13 +17,13 @@
*/
#define MCP_TYPE_S08 0
#define MCP_TYPE_S17 1
+#define MCP_TYPE_008 2
+#define MCP_TYPE_017 3
/* Registers are all 8 bits wide.
*
* The mcp23s17 has twice as many bits, and can be configured to work
* with either 16 bit registers or with two adjacent 8 bit banks.
- *
- * Also, there are I2C versions of both chips.
*/
#define MCP_IODIR 0x00 /* init/reset: all ones */
#define MCP_IPOL 0x01
@@ -51,7 +51,6 @@ struct mcp23s08_ops {
};
struct mcp23s08 {
- struct spi_device *spi;
u8 addr;
u16 cache[11];
@@ -60,9 +59,8 @@ struct mcp23s08 {
struct gpio_chip chip;
- struct work_struct work;
-
const struct mcp23s08_ops *ops;
+ void *data; /* ops specific data */
};
/* A given spi_device can represent up to eight mcp23sxx chips
@@ -76,6 +74,74 @@ struct mcp23s08_driver_data {
struct mcp23s08 chip[];
};
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_I2C
+
+static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg)
+{
+ return i2c_smbus_read_byte_data(mcp->data, reg);
+}
+
+static int mcp23008_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
+{
+ return i2c_smbus_write_byte_data(mcp->data, reg, val);
+}
+
+static int
+mcp23008_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
+{
+ while (n--) {
+ int ret = mcp23008_read(mcp, reg++);
+ if (ret < 0)
+ return ret;
+ *vals++ = ret;
+ }
+
+ return 0;
+}
+
+static int mcp23017_read(struct mcp23s08 *mcp, unsigned reg)
+{
+ return i2c_smbus_read_word_data(mcp->data, reg << 1);
+}
+
+static int mcp23017_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
+{
+ return i2c_smbus_write_word_data(mcp->data, reg << 1, val);
+}
+
+static int
+mcp23017_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
+{
+ while (n--) {
+ int ret = mcp23017_read(mcp, reg++);
+ if (ret < 0)
+ return ret;
+ *vals++ = ret;
+ }
+
+ return 0;
+}
+
+static const struct mcp23s08_ops mcp23008_ops = {
+ .read = mcp23008_read,
+ .write = mcp23008_write,
+ .read_regs = mcp23008_read_regs,
+};
+
+static const struct mcp23s08_ops mcp23017_ops = {
+ .read = mcp23017_read,
+ .write = mcp23017_write,
+ .read_regs = mcp23017_read_regs,
+};
+
+#endif /* CONFIG_I2C */
+
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_SPI_MASTER
+
static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg)
{
u8 tx[2], rx[1];
@@ -83,7 +149,7 @@ static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg)
tx[0] = mcp->addr | 0x01;
tx[1] = reg;
- status = spi_write_then_read(mcp->spi, tx, sizeof tx, rx, sizeof rx);
+ status = spi_write_then_read(mcp->data, tx, sizeof tx, rx, sizeof rx);
return (status < 0) ? status : rx[0];
}
@@ -94,7 +160,7 @@ static int mcp23s08_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
tx[0] = mcp->addr;
tx[1] = reg;
tx[2] = val;
- return spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0);
+ return spi_write_then_read(mcp->data, tx, sizeof tx, NULL, 0);
}
static int
@@ -109,7 +175,7 @@ mcp23s08_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
tx[1] = reg;
tmp = (u8 *)vals;
- status = spi_write_then_read(mcp->spi, tx, sizeof tx, tmp, n);
+ status = spi_write_then_read(mcp->data, tx, sizeof tx, tmp, n);
if (status >= 0) {
while (n--)
vals[n] = tmp[n]; /* expand to 16bit */
@@ -124,7 +190,7 @@ static int mcp23s17_read(struct mcp23s08 *mcp, unsigned reg)
tx[0] = mcp->addr | 0x01;
tx[1] = reg << 1;
- status = spi_write_then_read(mcp->spi, tx, sizeof tx, rx, sizeof rx);
+ status = spi_write_then_read(mcp->data, tx, sizeof tx, rx, sizeof rx);
return (status < 0) ? status : (rx[0] | (rx[1] << 8));
}
@@ -136,7 +202,7 @@ static int mcp23s17_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
tx[1] = reg << 1;
tx[2] = val;
tx[3] = val >> 8;
- return spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0);
+ return spi_write_then_read(mcp->data, tx, sizeof tx, NULL, 0);
}
static int
@@ -150,7 +216,7 @@ mcp23s17_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
tx[0] = mcp->addr | 0x01;
tx[1] = reg << 1;
- status = spi_write_then_read(mcp->spi, tx, sizeof tx,
+ status = spi_write_then_read(mcp->data, tx, sizeof tx,
(u8 *)vals, n * 2);
if (status >= 0) {
while (n--)
@@ -172,6 +238,7 @@ static const struct mcp23s08_ops mcp23s17_ops = {
.read_regs = mcp23s17_read_regs,
};
+#endif /* CONFIG_SPI_MASTER */
/*----------------------------------------------------------------------*/
@@ -299,17 +366,16 @@ done:
/*----------------------------------------------------------------------*/
-static int mcp23s08_probe_one(struct spi_device *spi, unsigned addr,
+static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ void *data, unsigned addr,
unsigned type, unsigned base, unsigned pullups)
{
- struct mcp23s08_driver_data *data = spi_get_drvdata(spi);
- struct mcp23s08 *mcp = data->mcp[addr];
- int status;
+ int status;
mutex_init(&mcp->lock);
- mcp->spi = spi;
- mcp->addr = 0x40 | (addr << 1);
+ mcp->data = data;
+ mcp->addr = addr;
mcp->chip.direction_input = mcp23s08_direction_input;
mcp->chip.get = mcp23s08_get;
@@ -317,18 +383,43 @@ static int mcp23s08_probe_one(struct spi_device *spi, unsigned addr,
mcp->chip.set = mcp23s08_set;
mcp->chip.dbg_show = mcp23s08_dbg_show;
- if (type == MCP_TYPE_S17) {
+ switch (type) {
+#ifdef CONFIG_SPI_MASTER
+ case MCP_TYPE_S08:
+ mcp->ops = &mcp23s08_ops;
+ mcp->chip.ngpio = 8;
+ mcp->chip.label = "mcp23s08";
+ break;
+
+ case MCP_TYPE_S17:
mcp->ops = &mcp23s17_ops;
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23s17";
- } else {
- mcp->ops = &mcp23s08_ops;
+ break;
+#endif /* CONFIG_SPI_MASTER */
+
+#ifdef CONFIG_I2C
+ case MCP_TYPE_008:
+ mcp->ops = &mcp23008_ops;
mcp->chip.ngpio = 8;
- mcp->chip.label = "mcp23s08";
+ mcp->chip.label = "mcp23008";
+ break;
+
+ case MCP_TYPE_017:
+ mcp->ops = &mcp23017_ops;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23017";
+ break;
+#endif /* CONFIG_I2C */
+
+ default:
+ dev_err(dev, "invalid device type (%d)\n", type);
+ return -EINVAL;
}
+
mcp->chip.base = base;
mcp->chip.can_sleep = 1;
- mcp->chip.dev = &spi->dev;
+ mcp->chip.dev = dev;
mcp->chip.owner = THIS_MODULE;
/* verify MCP_IOCON.SEQOP = 0, so sequential reads work,
@@ -374,11 +465,98 @@ static int mcp23s08_probe_one(struct spi_device *spi, unsigned addr,
status = gpiochip_add(&mcp->chip);
fail:
if (status < 0)
- dev_dbg(&spi->dev, "can't setup chip %d, --> %d\n",
- addr, status);
+ dev_dbg(dev, "can't setup chip %d, --> %d\n",
+ addr, status);
return status;
}
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_I2C
+
+static int __devinit mcp230xx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mcp23s08_platform_data *pdata;
+ struct mcp23s08 *mcp;
+ int status;
+
+ pdata = client->dev.platform_data;
+ if (!pdata || !gpio_is_valid(pdata->base)) {
+ dev_dbg(&client->dev, "invalid or missing platform data\n");
+ return -EINVAL;
+ }
+
+ mcp = kzalloc(sizeof *mcp, GFP_KERNEL);
+ if (!mcp)
+ return -ENOMEM;
+
+ status = mcp23s08_probe_one(mcp, &client->dev, client, client->addr,
+ id->driver_data, pdata->base,
+ pdata->chip[0].pullups);
+ if (status)
+ goto fail;
+
+ i2c_set_clientdata(client, mcp);
+
+ return 0;
+
+fail:
+ kfree(mcp);
+
+ return status;
+}
+
+static int __devexit mcp230xx_remove(struct i2c_client *client)
+{
+ struct mcp23s08 *mcp = i2c_get_clientdata(client);
+ int status;
+
+ status = gpiochip_remove(&mcp->chip);
+ if (status == 0)
+ kfree(mcp);
+
+ return status;
+}
+
+static const struct i2c_device_id mcp230xx_id[] = {
+ { "mcp23008", MCP_TYPE_008 },
+ { "mcp23017", MCP_TYPE_017 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
+
+static struct i2c_driver mcp230xx_driver = {
+ .driver = {
+ .name = "mcp230xx",
+ .owner = THIS_MODULE,
+ },
+ .probe = mcp230xx_probe,
+ .remove = __devexit_p(mcp230xx_remove),
+ .id_table = mcp230xx_id,
+};
+
+static int __init mcp23s08_i2c_init(void)
+{
+ return i2c_add_driver(&mcp230xx_driver);
+}
+
+static void mcp23s08_i2c_exit(void)
+{
+ i2c_del_driver(&mcp230xx_driver);
+}
+
+#else
+
+static int __init mcp23s08_i2c_init(void) { return 0; }
+static void mcp23s08_i2c_exit(void) { }
+
+#endif /* CONFIG_I2C */
+
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_SPI_MASTER
+
static int mcp23s08_probe(struct spi_device *spi)
{
struct mcp23s08_platform_data *pdata;
@@ -421,7 +599,8 @@ static int mcp23s08_probe(struct spi_device *spi)
continue;
chips--;
data->mcp[addr] = &data->chip[chips];
- status = mcp23s08_probe_one(spi, addr, type, base,
+ status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
+ 0x40 | (addr << 1), type, base,
pdata->chip[addr].pullups);
if (status < 0)
goto fail;
@@ -435,14 +614,6 @@ static int mcp23s08_probe(struct spi_device *spi)
* handled here...
*/
- if (pdata->setup) {
- status = pdata->setup(spi,
- pdata->base, data->ngpio,
- pdata->context);
- if (status < 0)
- dev_dbg(&spi->dev, "setup --> %d\n", status);
- }
-
return 0;
fail:
@@ -462,20 +633,9 @@ fail:
static int mcp23s08_remove(struct spi_device *spi)
{
struct mcp23s08_driver_data *data = spi_get_drvdata(spi);
- struct mcp23s08_platform_data *pdata = spi->dev.platform_data;
unsigned addr;
int status = 0;
- if (pdata->teardown) {
- status = pdata->teardown(spi,
- pdata->base, data->ngpio,
- pdata->context);
- if (status < 0) {
- dev_err(&spi->dev, "%s --> %d\n", "teardown", status);
- return status;
- }
- }
-
for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) {
int tmp;
@@ -510,20 +670,53 @@ static struct spi_driver mcp23s08_driver = {
},
};
+static int __init mcp23s08_spi_init(void)
+{
+ return spi_register_driver(&mcp23s08_driver);
+}
+
+static void mcp23s08_spi_exit(void)
+{
+ spi_unregister_driver(&mcp23s08_driver);
+}
+
+#else
+
+static int __init mcp23s08_spi_init(void) { return 0; }
+static void mcp23s08_spi_exit(void) { }
+
+#endif /* CONFIG_SPI_MASTER */
+
/*----------------------------------------------------------------------*/
static int __init mcp23s08_init(void)
{
- return spi_register_driver(&mcp23s08_driver);
+ int ret;
+
+ ret = mcp23s08_spi_init();
+ if (ret)
+ goto spi_fail;
+
+ ret = mcp23s08_i2c_init();
+ if (ret)
+ goto i2c_fail;
+
+ return 0;
+
+ i2c_fail:
+ mcp23s08_spi_exit();
+ spi_fail:
+ return ret;
}
-/* register after spi postcore initcall and before
+/* register after spi/i2c postcore initcall and before
* subsys initcalls that may rely on these GPIOs
*/
subsys_initcall(mcp23s08_init);
static void __exit mcp23s08_exit(void)
{
- spi_unregister_driver(&mcp23s08_driver);
+ mcp23s08_spi_exit();
+ mcp23s08_i2c_exit();
}
module_exit(mcp23s08_exit);
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/gpio-ml-ioh.c
index 1bc621ac353..a9016f56ed7 100644
--- a/drivers/gpio/ml_ioh_gpio.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -233,7 +233,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
return 0;
err_gpiochip_add:
- for (; i != 0; i--) {
+ while (--i >= 0) {
chip--;
ret = gpiochip_remove(&chip->gpio);
if (ret)
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
new file mode 100644
index 00000000000..52d3ed20810
--- /dev/null
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -0,0 +1,376 @@
+/*
+ * MPC52xx gpio driver
+ *
+ * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+
+#include <asm/gpio.h>
+#include <asm/mpc52xx.h>
+#include <sysdev/fsl_soc.h>
+
+static DEFINE_SPINLOCK(gpio_lock);
+
+struct mpc52xx_gpiochip {
+ struct of_mm_gpio_chip mmchip;
+ unsigned int shadow_dvo;
+ unsigned int shadow_gpioe;
+ unsigned int shadow_ddr;
+};
+
+/*
+ * GPIO LIB API implementation for wakeup GPIOs.
+ *
+ * There's a maximum of 8 wakeup GPIOs. Which of these are available
+ * for use depends on your board setup.
+ *
+ * 0 -> GPIO_WKUP_7
+ * 1 -> GPIO_WKUP_6
+ * 2 -> PSC6_1
+ * 3 -> PSC6_0
+ * 4 -> ETH_17
+ * 5 -> PSC3_9
+ * 6 -> PSC2_4
+ * 7 -> PSC1_4
+ *
+ */
+static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ unsigned int ret;
+
+ ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1;
+
+ pr_debug("%s: gpio: %d ret: %d\n", __func__, gpio, ret);
+
+ return ret;
+}
+
+static inline void
+__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+ struct mpc52xx_gpiochip, mmchip);
+ struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+
+ if (val)
+ chip->shadow_dvo |= 1 << (7 - gpio);
+ else
+ chip->shadow_dvo &= ~(1 << (7 - gpio));
+
+ out_8(&regs->wkup_dvo, chip->shadow_dvo);
+}
+
+static void
+mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ __mpc52xx_wkup_gpio_set(gc, gpio, val);
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+}
+
+static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+ struct mpc52xx_gpiochip, mmchip);
+ struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ /* set the direction */
+ chip->shadow_ddr &= ~(1 << (7 - gpio));
+ out_8(&regs->wkup_ddr, chip->shadow_ddr);
+
+ /* and enable the pin */
+ chip->shadow_gpioe |= 1 << (7 - gpio);
+ out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ return 0;
+}
+
+static int
+mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+ struct mpc52xx_gpiochip, mmchip);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ __mpc52xx_wkup_gpio_set(gc, gpio, val);
+
+ /* Then set direction */
+ chip->shadow_ddr |= 1 << (7 - gpio);
+ out_8(&regs->wkup_ddr, chip->shadow_ddr);
+
+ /* Finally enable the pin */
+ chip->shadow_gpioe |= 1 << (7 - gpio);
+ out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+ return 0;
+}
+
+static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
+{
+ struct mpc52xx_gpiochip *chip;
+ struct mpc52xx_gpio_wkup __iomem *regs;
+ struct gpio_chip *gc;
+ int ret;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ gc = &chip->mmchip.gc;
+
+ gc->ngpio = 8;
+ gc->direction_input = mpc52xx_wkup_gpio_dir_in;
+ gc->direction_output = mpc52xx_wkup_gpio_dir_out;
+ gc->get = mpc52xx_wkup_gpio_get;
+ gc->set = mpc52xx_wkup_gpio_set;
+
+ ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
+ if (ret)
+ return ret;
+
+ regs = chip->mmchip.regs;
+ chip->shadow_gpioe = in_8(&regs->wkup_gpioe);
+ chip->shadow_ddr = in_8(&regs->wkup_ddr);
+ chip->shadow_dvo = in_8(&regs->wkup_dvo);
+
+ return 0;
+}
+
+static int mpc52xx_gpiochip_remove(struct platform_device *ofdev)
+{
+ return -EBUSY;
+}
+
+static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
+ { .compatible = "fsl,mpc5200-gpio-wkup", },
+ {}
+};
+
+static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
+ .driver = {
+ .name = "mpc5200-gpio-wkup",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_wkup_gpiochip_match,
+ },
+ .probe = mpc52xx_wkup_gpiochip_probe,
+ .remove = mpc52xx_gpiochip_remove,
+};
+
+/*
+ * GPIO LIB API implementation for simple GPIOs
+ *
+ * There's a maximum of 32 simple GPIOs. Which of these are available
+ * for use depends on your board setup.
+ * The numbering reflects the bit numbering in the port registers:
+ *
+ * 0..1 > reserved
+ * 2..3 > IRDA
+ * 4..7 > ETHR
+ * 8..11 > reserved
+ * 12..15 > USB
+ * 16..17 > reserved
+ * 18..23 > PSC3
+ * 24..27 > PSC2
+ * 28..31 > PSC1
+ */
+static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ unsigned int ret;
+
+ ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1;
+
+ return ret;
+}
+
+static inline void
+__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+ struct mpc52xx_gpiochip, mmchip);
+ struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+
+ if (val)
+ chip->shadow_dvo |= 1 << (31 - gpio);
+ else
+ chip->shadow_dvo &= ~(1 << (31 - gpio));
+ out_be32(&regs->simple_dvo, chip->shadow_dvo);
+}
+
+static void
+mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ __mpc52xx_simple_gpio_set(gc, gpio, val);
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+}
+
+static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+ struct mpc52xx_gpiochip, mmchip);
+ struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ /* set the direction */
+ chip->shadow_ddr &= ~(1 << (31 - gpio));
+ out_be32(&regs->simple_ddr, chip->shadow_ddr);
+
+ /* and enable the pin */
+ chip->shadow_gpioe |= 1 << (31 - gpio);
+ out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ return 0;
+}
+
+static int
+mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+ struct mpc52xx_gpiochip, mmchip);
+ struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ /* First set initial value */
+ __mpc52xx_simple_gpio_set(gc, gpio, val);
+
+ /* Then set direction */
+ chip->shadow_ddr |= 1 << (31 - gpio);
+ out_be32(&regs->simple_ddr, chip->shadow_ddr);
+
+ /* Finally enable the pin */
+ chip->shadow_gpioe |= 1 << (31 - gpio);
+ out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+ return 0;
+}
+
+static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
+{
+ struct mpc52xx_gpiochip *chip;
+ struct gpio_chip *gc;
+ struct mpc52xx_gpio __iomem *regs;
+ int ret;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ gc = &chip->mmchip.gc;
+
+ gc->ngpio = 32;
+ gc->direction_input = mpc52xx_simple_gpio_dir_in;
+ gc->direction_output = mpc52xx_simple_gpio_dir_out;
+ gc->get = mpc52xx_simple_gpio_get;
+ gc->set = mpc52xx_simple_gpio_set;
+
+ ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip);
+ if (ret)
+ return ret;
+
+ regs = chip->mmchip.regs;
+ chip->shadow_gpioe = in_be32(&regs->simple_gpioe);
+ chip->shadow_ddr = in_be32(&regs->simple_ddr);
+ chip->shadow_dvo = in_be32(&regs->simple_dvo);
+
+ return 0;
+}
+
+static const struct of_device_id mpc52xx_simple_gpiochip_match[] = {
+ { .compatible = "fsl,mpc5200-gpio", },
+ {}
+};
+
+static struct platform_driver mpc52xx_simple_gpiochip_driver = {
+ .driver = {
+ .name = "mpc5200-gpio",
+ .owner = THIS_MODULE,
+ .of_match_table = mpc52xx_simple_gpiochip_match,
+ },
+ .probe = mpc52xx_simple_gpiochip_probe,
+ .remove = mpc52xx_gpiochip_remove,
+};
+
+static int __init mpc52xx_gpio_init(void)
+{
+ if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver))
+ printk(KERN_ERR "Unable to register wakeup GPIO driver\n");
+
+ if (platform_driver_register(&mpc52xx_simple_gpiochip_driver))
+ printk(KERN_ERR "Unable to register simple GPIO driver\n");
+
+ return 0;
+}
+
+
+/* Make sure we get initialised before anyone else tries to use us */
+subsys_initcall(mpc52xx_gpio_init);
+
+/* No exit call at the moment as we cannot unregister of gpio chips */
+
+MODULE_DESCRIPTION("Freescale MPC52xx gpio driver");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
new file mode 100644
index 00000000000..52a4d4286eb
--- /dev/null
+++ b/drivers/gpio/gpio-msm-v1.c
@@ -0,0 +1,636 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <mach/cpu.h>
+#include <mach/msm_gpiomux.h>
+#include <mach/msm_iomap.h>
+
+/* see 80-VA736-2 Rev C pp 695-751
+**
+** These are actually the *shadow* gpio registers, since the
+** real ones (which allow full access) are only available to the
+** ARM9 side of the world.
+**
+** Since the _BASE need to be page-aligned when we're mapping them
+** to virtual addresses, adjust for the additional offset in these
+** macros.
+*/
+
+#define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + (off))
+#define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0x400 + (off))
+#define MSM_GPIO1_SHADOW_REG(off) (MSM_GPIO1_BASE + 0x800 + (off))
+#define MSM_GPIO2_SHADOW_REG(off) (MSM_GPIO2_BASE + 0xC00 + (off))
+
+/*
+ * MSM7X00 registers
+ */
+/* output value */
+#define MSM7X00_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */
+#define MSM7X00_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */
+#define MSM7X00_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */
+#define MSM7X00_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */
+#define MSM7X00_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 106-95 */
+#define MSM7X00_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x50) /* gpio 107-121 */
+
+/* same pin map as above, output enable */
+#define MSM7X00_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x10)
+#define MSM7X00_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08)
+#define MSM7X00_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x14)
+#define MSM7X00_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x18)
+#define MSM7X00_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x1C)
+#define MSM7X00_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x54)
+
+/* same pin map as above, input read */
+#define MSM7X00_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x34)
+#define MSM7X00_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20)
+#define MSM7X00_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x38)
+#define MSM7X00_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x3C)
+#define MSM7X00_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x40)
+#define MSM7X00_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x44)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define MSM7X00_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x60)
+#define MSM7X00_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50)
+#define MSM7X00_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x64)
+#define MSM7X00_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x68)
+#define MSM7X00_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x6C)
+#define MSM7X00_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0xC0)
+
+/* same pin map as above, 1=positive 0=negative */
+#define MSM7X00_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x70)
+#define MSM7X00_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58)
+#define MSM7X00_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x74)
+#define MSM7X00_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x78)
+#define MSM7X00_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x7C)
+#define MSM7X00_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xBC)
+
+/* same pin map as above, interrupt enable */
+#define MSM7X00_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0x80)
+#define MSM7X00_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60)
+#define MSM7X00_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0x84)
+#define MSM7X00_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0x88)
+#define MSM7X00_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0x8C)
+#define MSM7X00_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xB8)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define MSM7X00_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0x90)
+#define MSM7X00_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68)
+#define MSM7X00_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0x94)
+#define MSM7X00_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0x98)
+#define MSM7X00_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0x9C)
+#define MSM7X00_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xB4)
+
+/* same pin map as above, 1=interrupt pending */
+#define MSM7X00_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xA0)
+#define MSM7X00_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70)
+#define MSM7X00_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xA4)
+#define MSM7X00_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xA8)
+#define MSM7X00_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xAC)
+#define MSM7X00_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0xB0)
+
+/*
+ * QSD8X50 registers
+ */
+/* output value */
+#define QSD8X50_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */
+#define QSD8X50_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */
+#define QSD8X50_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */
+#define QSD8X50_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */
+#define QSD8X50_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 103-95 */
+#define QSD8X50_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x10) /* gpio 121-104 */
+#define QSD8X50_GPIO_OUT_6 MSM_GPIO1_SHADOW_REG(0x14) /* gpio 152-122 */
+#define QSD8X50_GPIO_OUT_7 MSM_GPIO1_SHADOW_REG(0x18) /* gpio 164-153 */
+
+/* same pin map as above, output enable */
+#define QSD8X50_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x20)
+#define QSD8X50_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08)
+#define QSD8X50_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x24)
+#define QSD8X50_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x28)
+#define QSD8X50_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x2C)
+#define QSD8X50_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x30)
+#define QSD8X50_GPIO_OE_6 MSM_GPIO1_SHADOW_REG(0x34)
+#define QSD8X50_GPIO_OE_7 MSM_GPIO1_SHADOW_REG(0x38)
+
+/* same pin map as above, input read */
+#define QSD8X50_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x50)
+#define QSD8X50_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20)
+#define QSD8X50_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x54)
+#define QSD8X50_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x58)
+#define QSD8X50_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x5C)
+#define QSD8X50_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x60)
+#define QSD8X50_GPIO_IN_6 MSM_GPIO1_SHADOW_REG(0x64)
+#define QSD8X50_GPIO_IN_7 MSM_GPIO1_SHADOW_REG(0x68)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define QSD8X50_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x70)
+#define QSD8X50_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50)
+#define QSD8X50_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x74)
+#define QSD8X50_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x78)
+#define QSD8X50_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x7C)
+#define QSD8X50_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0x80)
+#define QSD8X50_GPIO_INT_EDGE_6 MSM_GPIO1_SHADOW_REG(0x84)
+#define QSD8X50_GPIO_INT_EDGE_7 MSM_GPIO1_SHADOW_REG(0x88)
+
+/* same pin map as above, 1=positive 0=negative */
+#define QSD8X50_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x90)
+#define QSD8X50_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58)
+#define QSD8X50_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x94)
+#define QSD8X50_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x98)
+#define QSD8X50_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x9C)
+#define QSD8X50_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xA0)
+#define QSD8X50_GPIO_INT_POS_6 MSM_GPIO1_SHADOW_REG(0xA4)
+#define QSD8X50_GPIO_INT_POS_7 MSM_GPIO1_SHADOW_REG(0xA8)
+
+/* same pin map as above, interrupt enable */
+#define QSD8X50_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0xB0)
+#define QSD8X50_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60)
+#define QSD8X50_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0xB4)
+#define QSD8X50_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0xB8)
+#define QSD8X50_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0xBC)
+#define QSD8X50_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xC0)
+#define QSD8X50_GPIO_INT_EN_6 MSM_GPIO1_SHADOW_REG(0xC4)
+#define QSD8X50_GPIO_INT_EN_7 MSM_GPIO1_SHADOW_REG(0xC8)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define QSD8X50_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0xD0)
+#define QSD8X50_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68)
+#define QSD8X50_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0xD4)
+#define QSD8X50_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0xD8)
+#define QSD8X50_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0xDC)
+#define QSD8X50_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xE0)
+#define QSD8X50_GPIO_INT_CLEAR_6 MSM_GPIO1_SHADOW_REG(0xE4)
+#define QSD8X50_GPIO_INT_CLEAR_7 MSM_GPIO1_SHADOW_REG(0xE8)
+
+/* same pin map as above, 1=interrupt pending */
+#define QSD8X50_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xF0)
+#define QSD8X50_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70)
+#define QSD8X50_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xF4)
+#define QSD8X50_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xF8)
+#define QSD8X50_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xFC)
+#define QSD8X50_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0x100)
+#define QSD8X50_GPIO_INT_STATUS_6 MSM_GPIO1_SHADOW_REG(0x104)
+#define QSD8X50_GPIO_INT_STATUS_7 MSM_GPIO1_SHADOW_REG(0x108)
+
+/*
+ * MSM7X30 registers
+ */
+/* output value */
+#define MSM7X30_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */
+#define MSM7X30_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 43-16 */
+#define MSM7X30_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-44 */
+#define MSM7X30_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */
+#define MSM7X30_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */
+#define MSM7X30_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 133-107 */
+#define MSM7X30_GPIO_OUT_6 MSM_GPIO1_REG(0xC4) /* gpio 150-134 */
+#define MSM7X30_GPIO_OUT_7 MSM_GPIO1_REG(0x214) /* gpio 181-151 */
+
+/* same pin map as above, output enable */
+#define MSM7X30_GPIO_OE_0 MSM_GPIO1_REG(0x10)
+#define MSM7X30_GPIO_OE_1 MSM_GPIO2_REG(0x08)
+#define MSM7X30_GPIO_OE_2 MSM_GPIO1_REG(0x14)
+#define MSM7X30_GPIO_OE_3 MSM_GPIO1_REG(0x18)
+#define MSM7X30_GPIO_OE_4 MSM_GPIO1_REG(0x1C)
+#define MSM7X30_GPIO_OE_5 MSM_GPIO1_REG(0x54)
+#define MSM7X30_GPIO_OE_6 MSM_GPIO1_REG(0xC8)
+#define MSM7X30_GPIO_OE_7 MSM_GPIO1_REG(0x218)
+
+/* same pin map as above, input read */
+#define MSM7X30_GPIO_IN_0 MSM_GPIO1_REG(0x34)
+#define MSM7X30_GPIO_IN_1 MSM_GPIO2_REG(0x20)
+#define MSM7X30_GPIO_IN_2 MSM_GPIO1_REG(0x38)
+#define MSM7X30_GPIO_IN_3 MSM_GPIO1_REG(0x3C)
+#define MSM7X30_GPIO_IN_4 MSM_GPIO1_REG(0x40)
+#define MSM7X30_GPIO_IN_5 MSM_GPIO1_REG(0x44)
+#define MSM7X30_GPIO_IN_6 MSM_GPIO1_REG(0xCC)
+#define MSM7X30_GPIO_IN_7 MSM_GPIO1_REG(0x21C)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define MSM7X30_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60)
+#define MSM7X30_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50)
+#define MSM7X30_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64)
+#define MSM7X30_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68)
+#define MSM7X30_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C)
+#define MSM7X30_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0)
+#define MSM7X30_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0xD0)
+#define MSM7X30_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x240)
+
+/* same pin map as above, 1=positive 0=negative */
+#define MSM7X30_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70)
+#define MSM7X30_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58)
+#define MSM7X30_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74)
+#define MSM7X30_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78)
+#define MSM7X30_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C)
+#define MSM7X30_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC)
+#define MSM7X30_GPIO_INT_POS_6 MSM_GPIO1_REG(0xD4)
+#define MSM7X30_GPIO_INT_POS_7 MSM_GPIO1_REG(0x228)
+
+/* same pin map as above, interrupt enable */
+#define MSM7X30_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80)
+#define MSM7X30_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60)
+#define MSM7X30_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84)
+#define MSM7X30_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88)
+#define MSM7X30_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C)
+#define MSM7X30_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8)
+#define MSM7X30_GPIO_INT_EN_6 MSM_GPIO1_REG(0xD8)
+#define MSM7X30_GPIO_INT_EN_7 MSM_GPIO1_REG(0x22C)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define MSM7X30_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90)
+#define MSM7X30_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68)
+#define MSM7X30_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94)
+#define MSM7X30_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98)
+#define MSM7X30_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C)
+#define MSM7X30_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4)
+#define MSM7X30_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xDC)
+#define MSM7X30_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0x230)
+
+/* same pin map as above, 1=interrupt pending */
+#define MSM7X30_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0)
+#define MSM7X30_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70)
+#define MSM7X30_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4)
+#define MSM7X30_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8)
+#define MSM7X30_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC)
+#define MSM7X30_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0)
+#define MSM7X30_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0xE0)
+#define MSM7X30_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x234)
+
+#define FIRST_GPIO_IRQ MSM_GPIO_TO_INT(0)
+
+#define MSM_GPIO_BANK(soc, bank, first, last) \
+ { \
+ .regs = { \
+ .out = soc##_GPIO_OUT_##bank, \
+ .in = soc##_GPIO_IN_##bank, \
+ .int_status = soc##_GPIO_INT_STATUS_##bank, \
+ .int_clear = soc##_GPIO_INT_CLEAR_##bank, \
+ .int_en = soc##_GPIO_INT_EN_##bank, \
+ .int_edge = soc##_GPIO_INT_EDGE_##bank, \
+ .int_pos = soc##_GPIO_INT_POS_##bank, \
+ .oe = soc##_GPIO_OE_##bank, \
+ }, \
+ .chip = { \
+ .base = (first), \
+ .ngpio = (last) - (first) + 1, \
+ .get = msm_gpio_get, \
+ .set = msm_gpio_set, \
+ .direction_input = msm_gpio_direction_input, \
+ .direction_output = msm_gpio_direction_output, \
+ .to_irq = msm_gpio_to_irq, \
+ .request = msm_gpio_request, \
+ .free = msm_gpio_free, \
+ } \
+ }
+
+#define MSM_GPIO_BROKEN_INT_CLEAR 1
+
+struct msm_gpio_regs {
+ void __iomem *out;
+ void __iomem *in;
+ void __iomem *int_status;
+ void __iomem *int_clear;
+ void __iomem *int_en;
+ void __iomem *int_edge;
+ void __iomem *int_pos;
+ void __iomem *oe;
+};
+
+struct msm_gpio_chip {
+ spinlock_t lock;
+ struct gpio_chip chip;
+ struct msm_gpio_regs regs;
+#if MSM_GPIO_BROKEN_INT_CLEAR
+ unsigned int_status_copy;
+#endif
+ unsigned int both_edge_detect;
+ unsigned int int_enable[2]; /* 0: awake, 1: sleep */
+};
+
+static int msm_gpio_write(struct msm_gpio_chip *msm_chip,
+ unsigned offset, unsigned on)
+{
+ unsigned mask = BIT(offset);
+ unsigned val;
+
+ val = readl(msm_chip->regs.out);
+ if (on)
+ writel(val | mask, msm_chip->regs.out);
+ else
+ writel(val & ~mask, msm_chip->regs.out);
+ return 0;
+}
+
+static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip)
+{
+ int loop_limit = 100;
+ unsigned pol, val, val2, intstat;
+ do {
+ val = readl(msm_chip->regs.in);
+ pol = readl(msm_chip->regs.int_pos);
+ pol = (pol & ~msm_chip->both_edge_detect) |
+ (~val & msm_chip->both_edge_detect);
+ writel(pol, msm_chip->regs.int_pos);
+ intstat = readl(msm_chip->regs.int_status);
+ val2 = readl(msm_chip->regs.in);
+ if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0)
+ return;
+ } while (loop_limit-- > 0);
+ printk(KERN_ERR "msm_gpio_update_both_edge_detect, "
+ "failed to reach stable state %x != %x\n", val, val2);
+}
+
+static int msm_gpio_clear_detect_status(struct msm_gpio_chip *msm_chip,
+ unsigned offset)
+{
+ unsigned bit = BIT(offset);
+
+#if MSM_GPIO_BROKEN_INT_CLEAR
+ /* Save interrupts that already triggered before we loose them. */
+ /* Any interrupt that triggers between the read of int_status */
+ /* and the write to int_clear will still be lost though. */
+ msm_chip->int_status_copy |= readl(msm_chip->regs.int_status);
+ msm_chip->int_status_copy &= ~bit;
+#endif
+ writel(bit, msm_chip->regs.int_clear);
+ msm_gpio_update_both_edge_detect(msm_chip);
+ return 0;
+}
+
+static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct msm_gpio_chip *msm_chip;
+ unsigned long irq_flags;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ writel(readl(msm_chip->regs.oe) & ~BIT(offset), msm_chip->regs.oe);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static int
+msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct msm_gpio_chip *msm_chip;
+ unsigned long irq_flags;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ msm_gpio_write(msm_chip, offset, value);
+ writel(readl(msm_chip->regs.oe) | BIT(offset), msm_chip->regs.oe);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct msm_gpio_chip *msm_chip;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ return (readl(msm_chip->regs.in) & (1U << offset)) ? 1 : 0;
+}
+
+static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct msm_gpio_chip *msm_chip;
+ unsigned long irq_flags;
+
+ msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ msm_gpio_write(msm_chip, offset, value);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return MSM_GPIO_TO_INT(chip->base + offset);
+}
+
+#ifdef CONFIG_MSM_GPIOMUX
+static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return msm_gpiomux_get(chip->base + offset);
+}
+
+static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ msm_gpiomux_put(chip->base + offset);
+}
+#else
+#define msm_gpio_request NULL
+#define msm_gpio_free NULL
+#endif
+
+static struct msm_gpio_chip *msm_gpio_chips;
+static int msm_gpio_count;
+
+static struct msm_gpio_chip msm_gpio_chips_msm7x01[] = {
+ MSM_GPIO_BANK(MSM7X00, 0, 0, 15),
+ MSM_GPIO_BANK(MSM7X00, 1, 16, 42),
+ MSM_GPIO_BANK(MSM7X00, 2, 43, 67),
+ MSM_GPIO_BANK(MSM7X00, 3, 68, 94),
+ MSM_GPIO_BANK(MSM7X00, 4, 95, 106),
+ MSM_GPIO_BANK(MSM7X00, 5, 107, 121),
+};
+
+static struct msm_gpio_chip msm_gpio_chips_msm7x30[] = {
+ MSM_GPIO_BANK(MSM7X30, 0, 0, 15),
+ MSM_GPIO_BANK(MSM7X30, 1, 16, 43),
+ MSM_GPIO_BANK(MSM7X30, 2, 44, 67),
+ MSM_GPIO_BANK(MSM7X30, 3, 68, 94),
+ MSM_GPIO_BANK(MSM7X30, 4, 95, 106),
+ MSM_GPIO_BANK(MSM7X30, 5, 107, 133),
+ MSM_GPIO_BANK(MSM7X30, 6, 134, 150),
+ MSM_GPIO_BANK(MSM7X30, 7, 151, 181),
+};
+
+static struct msm_gpio_chip msm_gpio_chips_qsd8x50[] = {
+ MSM_GPIO_BANK(QSD8X50, 0, 0, 15),
+ MSM_GPIO_BANK(QSD8X50, 1, 16, 42),
+ MSM_GPIO_BANK(QSD8X50, 2, 43, 67),
+ MSM_GPIO_BANK(QSD8X50, 3, 68, 94),
+ MSM_GPIO_BANK(QSD8X50, 4, 95, 103),
+ MSM_GPIO_BANK(QSD8X50, 5, 104, 121),
+ MSM_GPIO_BANK(QSD8X50, 6, 122, 152),
+ MSM_GPIO_BANK(QSD8X50, 7, 153, 164),
+};
+
+static void msm_gpio_irq_ack(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ msm_gpio_clear_detect_status(msm_chip,
+ d->irq - gpio_to_irq(msm_chip->chip.base));
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static void msm_gpio_irq_mask(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ /* level triggered interrupts are also latched */
+ if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
+ msm_gpio_clear_detect_status(msm_chip, offset);
+ msm_chip->int_enable[0] &= ~BIT(offset);
+ writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static void msm_gpio_irq_unmask(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ /* level triggered interrupts are also latched */
+ if (!(readl(msm_chip->regs.int_edge) & BIT(offset)))
+ msm_gpio_clear_detect_status(msm_chip, offset);
+ msm_chip->int_enable[0] |= BIT(offset);
+ writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+}
+
+static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+
+ if (on)
+ msm_chip->int_enable[1] |= BIT(offset);
+ else
+ msm_chip->int_enable[1] &= ~BIT(offset);
+
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ unsigned long irq_flags;
+ struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d);
+ unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base);
+ unsigned val, mask = BIT(offset);
+
+ spin_lock_irqsave(&msm_chip->lock, irq_flags);
+ val = readl(msm_chip->regs.int_edge);
+ if (flow_type & IRQ_TYPE_EDGE_BOTH) {
+ writel(val | mask, msm_chip->regs.int_edge);
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ } else {
+ writel(val & ~mask, msm_chip->regs.int_edge);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ }
+ if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+ msm_chip->both_edge_detect |= mask;
+ msm_gpio_update_both_edge_detect(msm_chip);
+ } else {
+ msm_chip->both_edge_detect &= ~mask;
+ val = readl(msm_chip->regs.int_pos);
+ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
+ writel(val | mask, msm_chip->regs.int_pos);
+ else
+ writel(val & ~mask, msm_chip->regs.int_pos);
+ }
+ spin_unlock_irqrestore(&msm_chip->lock, irq_flags);
+ return 0;
+}
+
+static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ int i, j, mask;
+ unsigned val;
+
+ for (i = 0; i < msm_gpio_count; i++) {
+ struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i];
+ val = readl(msm_chip->regs.int_status);
+ val &= msm_chip->int_enable[0];
+ while (val) {
+ mask = val & -val;
+ j = fls(mask) - 1;
+ /* printk("%s %08x %08x bit %d gpio %d irq %d\n",
+ __func__, v, m, j, msm_chip->chip.start + j,
+ FIRST_GPIO_IRQ + msm_chip->chip.start + j); */
+ val &= ~mask;
+ generic_handle_irq(FIRST_GPIO_IRQ +
+ msm_chip->chip.base + j);
+ }
+ }
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+}
+
+static struct irq_chip msm_gpio_irq_chip = {
+ .name = "msmgpio",
+ .irq_ack = msm_gpio_irq_ack,
+ .irq_mask = msm_gpio_irq_mask,
+ .irq_unmask = msm_gpio_irq_unmask,
+ .irq_set_wake = msm_gpio_irq_set_wake,
+ .irq_set_type = msm_gpio_irq_set_type,
+};
+
+static int __init msm_init_gpio(void)
+{
+ int i, j = 0;
+
+ if (cpu_is_msm7x01()) {
+ msm_gpio_chips = msm_gpio_chips_msm7x01;
+ msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x01);
+ } else if (cpu_is_msm7x30()) {
+ msm_gpio_chips = msm_gpio_chips_msm7x30;
+ msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x30);
+ } else if (cpu_is_qsd8x50()) {
+ msm_gpio_chips = msm_gpio_chips_qsd8x50;
+ msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_qsd8x50);
+ } else {
+ return 0;
+ }
+
+ for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) {
+ if (i - FIRST_GPIO_IRQ >=
+ msm_gpio_chips[j].chip.base +
+ msm_gpio_chips[j].chip.ngpio)
+ j++;
+ irq_set_chip_data(i, &msm_gpio_chips[j]);
+ irq_set_chip_and_handler(i, &msm_gpio_irq_chip,
+ handle_edge_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+
+ for (i = 0; i < msm_gpio_count; i++) {
+ spin_lock_init(&msm_gpio_chips[i].lock);
+ writel(0, msm_gpio_chips[i].regs.int_en);
+ gpiochip_add(&msm_gpio_chips[i].chip);
+ }
+
+ irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler);
+ irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler);
+ irq_set_irq_wake(INT_GPIO_GROUP1, 1);
+ irq_set_irq_wake(INT_GPIO_GROUP2, 2);
+ return 0;
+}
+
+postcore_initcall(msm_init_gpio);
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
new file mode 100644
index 00000000000..5cb1227d69c
--- /dev/null
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -0,0 +1,433 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#include <asm/mach/irq.h>
+
+#include <mach/msm_gpiomux.h>
+#include <mach/msm_iomap.h>
+
+/* Bits of interest in the GPIO_IN_OUT register.
+ */
+enum {
+ GPIO_IN = 0,
+ GPIO_OUT = 1
+};
+
+/* Bits of interest in the GPIO_INTR_STATUS register.
+ */
+enum {
+ INTR_STATUS = 0,
+};
+
+/* Bits of interest in the GPIO_CFG register.
+ */
+enum {
+ GPIO_OE = 9,
+};
+
+/* Bits of interest in the GPIO_INTR_CFG register.
+ * When a GPIO triggers, two separate decisions are made, controlled
+ * by two separate flags.
+ *
+ * - First, INTR_RAW_STATUS_EN controls whether or not the GPIO_INTR_STATUS
+ * register for that GPIO will be updated to reflect the triggering of that
+ * gpio. If this bit is 0, this register will not be updated.
+ * - Second, INTR_ENABLE controls whether an interrupt is triggered.
+ *
+ * If INTR_ENABLE is set and INTR_RAW_STATUS_EN is NOT set, an interrupt
+ * can be triggered but the status register will not reflect it.
+ */
+enum {
+ INTR_ENABLE = 0,
+ INTR_POL_CTL = 1,
+ INTR_DECT_CTL = 2,
+ INTR_RAW_STATUS_EN = 3,
+};
+
+/* Codes of interest in GPIO_INTR_CFG_SU.
+ */
+enum {
+ TARGET_PROC_SCORPION = 4,
+ TARGET_PROC_NONE = 7,
+};
+
+
+#define GPIO_INTR_CFG_SU(gpio) (MSM_TLMM_BASE + 0x0400 + (0x04 * (gpio)))
+#define GPIO_CONFIG(gpio) (MSM_TLMM_BASE + 0x1000 + (0x10 * (gpio)))
+#define GPIO_IN_OUT(gpio) (MSM_TLMM_BASE + 0x1004 + (0x10 * (gpio)))
+#define GPIO_INTR_CFG(gpio) (MSM_TLMM_BASE + 0x1008 + (0x10 * (gpio)))
+#define GPIO_INTR_STATUS(gpio) (MSM_TLMM_BASE + 0x100c + (0x10 * (gpio)))
+
+/**
+ * struct msm_gpio_dev: the MSM8660 SoC GPIO device structure
+ *
+ * @enabled_irqs: a bitmap used to optimize the summary-irq handler. By
+ * keeping track of which gpios are unmasked as irq sources, we avoid
+ * having to do readl calls on hundreds of iomapped registers each time
+ * the summary interrupt fires in order to locate the active interrupts.
+ *
+ * @wake_irqs: a bitmap for tracking which interrupt lines are enabled
+ * as wakeup sources. When the device is suspended, interrupts which are
+ * not wakeup sources are disabled.
+ *
+ * @dual_edge_irqs: a bitmap used to track which irqs are configured
+ * as dual-edge, as this is not supported by the hardware and requires
+ * some special handling in the driver.
+ */
+struct msm_gpio_dev {
+ struct gpio_chip gpio_chip;
+ DECLARE_BITMAP(enabled_irqs, NR_GPIO_IRQS);
+ DECLARE_BITMAP(wake_irqs, NR_GPIO_IRQS);
+ DECLARE_BITMAP(dual_edge_irqs, NR_GPIO_IRQS);
+};
+
+static DEFINE_SPINLOCK(tlmm_lock);
+
+static inline struct msm_gpio_dev *to_msm_gpio_dev(struct gpio_chip *chip)
+{
+ return container_of(chip, struct msm_gpio_dev, gpio_chip);
+}
+
+static inline void set_gpio_bits(unsigned n, void __iomem *reg)
+{
+ writel(readl(reg) | n, reg);
+}
+
+static inline void clear_gpio_bits(unsigned n, void __iomem *reg)
+{
+ writel(readl(reg) & ~n, reg);
+}
+
+static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ return readl(GPIO_IN_OUT(offset)) & BIT(GPIO_IN);
+}
+
+static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+ writel(val ? BIT(GPIO_OUT) : 0, GPIO_IN_OUT(offset));
+}
+
+static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ clear_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+ return 0;
+}
+
+static int msm_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset,
+ int val)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ msm_gpio_set(chip, offset, val);
+ set_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset));
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+ return 0;
+}
+
+static int msm_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return msm_gpiomux_get(chip->base + offset);
+}
+
+static void msm_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ msm_gpiomux_put(chip->base + offset);
+}
+
+static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return MSM_GPIO_TO_INT(chip->base + offset);
+}
+
+static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
+{
+ return irq - MSM_GPIO_TO_INT(chip->base);
+}
+
+static struct msm_gpio_dev msm_gpio = {
+ .gpio_chip = {
+ .base = 0,
+ .ngpio = NR_GPIO_IRQS,
+ .direction_input = msm_gpio_direction_input,
+ .direction_output = msm_gpio_direction_output,
+ .get = msm_gpio_get,
+ .set = msm_gpio_set,
+ .to_irq = msm_gpio_to_irq,
+ .request = msm_gpio_request,
+ .free = msm_gpio_free,
+ },
+};
+
+/* For dual-edge interrupts in software, since the hardware has no
+ * such support:
+ *
+ * At appropriate moments, this function may be called to flip the polarity
+ * settings of both-edge irq lines to try and catch the next edge.
+ *
+ * The attempt is considered successful if:
+ * - the status bit goes high, indicating that an edge was caught, or
+ * - the input value of the gpio doesn't change during the attempt.
+ * If the value changes twice during the process, that would cause the first
+ * test to fail but would force the second, as two opposite
+ * transitions would cause a detection no matter the polarity setting.
+ *
+ * The do-loop tries to sledge-hammer closed the timing hole between
+ * the initial value-read and the polarity-write - if the line value changes
+ * during that window, an interrupt is lost, the new polarity setting is
+ * incorrect, and the first success test will fail, causing a retry.
+ *
+ * Algorithm comes from Google's msmgpio driver, see mach-msm/gpio.c.
+ */
+static void msm_gpio_update_dual_edge_pos(unsigned gpio)
+{
+ int loop_limit = 100;
+ unsigned val, val2, intstat;
+
+ do {
+ val = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
+ if (val)
+ clear_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
+ else
+ set_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio));
+ val2 = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN);
+ intstat = readl(GPIO_INTR_STATUS(gpio)) & BIT(INTR_STATUS);
+ if (intstat || val == val2)
+ return;
+ } while (loop_limit-- > 0);
+ pr_err("dual-edge irq failed to stabilize, "
+ "interrupts dropped. %#08x != %#08x\n",
+ val, val2);
+}
+
+static void msm_gpio_irq_ack(struct irq_data *d)
+{
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+
+ writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio));
+ if (test_bit(gpio, msm_gpio.dual_edge_irqs))
+ msm_gpio_update_dual_edge_pos(gpio);
+}
+
+static void msm_gpio_irq_mask(struct irq_data *d)
+{
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
+ clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+ __clear_bit(gpio, msm_gpio.enabled_irqs);
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+}
+
+static void msm_gpio_irq_unmask(struct irq_data *d)
+{
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+ __set_bit(gpio, msm_gpio.enabled_irqs);
+ set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+ writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+}
+
+static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+ unsigned long irq_flags;
+ uint32_t bits;
+
+ spin_lock_irqsave(&tlmm_lock, irq_flags);
+
+ bits = readl(GPIO_INTR_CFG(gpio));
+
+ if (flow_type & IRQ_TYPE_EDGE_BOTH) {
+ bits |= BIT(INTR_DECT_CTL);
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ __set_bit(gpio, msm_gpio.dual_edge_irqs);
+ else
+ __clear_bit(gpio, msm_gpio.dual_edge_irqs);
+ } else {
+ bits &= ~BIT(INTR_DECT_CTL);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ __clear_bit(gpio, msm_gpio.dual_edge_irqs);
+ }
+
+ if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
+ bits |= BIT(INTR_POL_CTL);
+ else
+ bits &= ~BIT(INTR_POL_CTL);
+
+ writel(bits, GPIO_INTR_CFG(gpio));
+
+ if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ msm_gpio_update_dual_edge_pos(gpio);
+
+ spin_unlock_irqrestore(&tlmm_lock, irq_flags);
+
+ return 0;
+}
+
+/*
+ * When the summary IRQ is raised, any number of GPIO lines may be high.
+ * It is the job of the summary handler to find all those GPIO lines
+ * which have been set as summary IRQ lines and which are triggered,
+ * and to call their interrupt handlers.
+ */
+static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned long i;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS);
+ i < NR_GPIO_IRQS;
+ i = find_next_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS, i + 1)) {
+ if (readl(GPIO_INTR_STATUS(i)) & BIT(INTR_STATUS))
+ generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip,
+ i));
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+
+ if (on) {
+ if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS))
+ irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1);
+ set_bit(gpio, msm_gpio.wake_irqs);
+ } else {
+ clear_bit(gpio, msm_gpio.wake_irqs);
+ if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS))
+ irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0);
+ }
+
+ return 0;
+}
+
+static struct irq_chip msm_gpio_irq_chip = {
+ .name = "msmgpio",
+ .irq_mask = msm_gpio_irq_mask,
+ .irq_unmask = msm_gpio_irq_unmask,
+ .irq_ack = msm_gpio_irq_ack,
+ .irq_set_type = msm_gpio_irq_set_type,
+ .irq_set_wake = msm_gpio_irq_set_wake,
+};
+
+static int __devinit msm_gpio_probe(struct platform_device *dev)
+{
+ int i, irq, ret;
+
+ bitmap_zero(msm_gpio.enabled_irqs, NR_GPIO_IRQS);
+ bitmap_zero(msm_gpio.wake_irqs, NR_GPIO_IRQS);
+ bitmap_zero(msm_gpio.dual_edge_irqs, NR_GPIO_IRQS);
+ msm_gpio.gpio_chip.label = dev->name;
+ ret = gpiochip_add(&msm_gpio.gpio_chip);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) {
+ irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i);
+ irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
+ handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+
+ irq_set_chained_handler(TLMM_SCSS_SUMMARY_IRQ,
+ msm_summary_irq_handler);
+ return 0;
+}
+
+static int __devexit msm_gpio_remove(struct platform_device *dev)
+{
+ int ret = gpiochip_remove(&msm_gpio.gpio_chip);
+
+ if (ret < 0)
+ return ret;
+
+ irq_set_handler(TLMM_SCSS_SUMMARY_IRQ, NULL);
+
+ return 0;
+}
+
+static struct platform_driver msm_gpio_driver = {
+ .probe = msm_gpio_probe,
+ .remove = __devexit_p(msm_gpio_remove),
+ .driver = {
+ .name = "msmgpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct platform_device msm_device_gpio = {
+ .name = "msmgpio",
+ .id = -1,
+};
+
+static int __init msm_gpio_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&msm_gpio_driver);
+ if (!rc) {
+ rc = platform_device_register(&msm_device_gpio);
+ if (rc)
+ platform_driver_unregister(&msm_gpio_driver);
+ }
+
+ return rc;
+}
+
+static void __exit msm_gpio_exit(void)
+{
+ platform_device_unregister(&msm_device_gpio);
+ platform_driver_unregister(&msm_gpio_driver);
+}
+
+postcore_initcall(msm_gpio_init);
+module_exit(msm_gpio_exit);
+
+MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
+MODULE_DESCRIPTION("Driver for Qualcomm MSM TLMMv2 SoC GPIOs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:msmgpio");
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
new file mode 100644
index 00000000000..4340acae3bd
--- /dev/null
+++ b/drivers/gpio/gpio-mxc.c
@@ -0,0 +1,460 @@
+/*
+ * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ *
+ * Based on code from Freescale,
+ * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/basic_mmio_gpio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <asm-generic/bug.h>
+
+enum mxc_gpio_hwtype {
+ IMX1_GPIO, /* runs on i.mx1 */
+ IMX21_GPIO, /* runs on i.mx21 and i.mx27 */
+ IMX31_GPIO, /* runs on all other i.mx */
+};
+
+/* device type dependent stuff */
+struct mxc_gpio_hwdata {
+ unsigned dr_reg;
+ unsigned gdir_reg;
+ unsigned psr_reg;
+ unsigned icr1_reg;
+ unsigned icr2_reg;
+ unsigned imr_reg;
+ unsigned isr_reg;
+ unsigned low_level;
+ unsigned high_level;
+ unsigned rise_edge;
+ unsigned fall_edge;
+};
+
+struct mxc_gpio_port {
+ struct list_head node;
+ void __iomem *base;
+ int irq;
+ int irq_high;
+ int virtual_irq_start;
+ struct bgpio_chip bgc;
+ u32 both_edges;
+};
+
+static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = {
+ .dr_reg = 0x1c,
+ .gdir_reg = 0x00,
+ .psr_reg = 0x24,
+ .icr1_reg = 0x28,
+ .icr2_reg = 0x2c,
+ .imr_reg = 0x30,
+ .isr_reg = 0x34,
+ .low_level = 0x03,
+ .high_level = 0x02,
+ .rise_edge = 0x00,
+ .fall_edge = 0x01,
+};
+
+static struct mxc_gpio_hwdata imx31_gpio_hwdata = {
+ .dr_reg = 0x00,
+ .gdir_reg = 0x04,
+ .psr_reg = 0x08,
+ .icr1_reg = 0x0c,
+ .icr2_reg = 0x10,
+ .imr_reg = 0x14,
+ .isr_reg = 0x18,
+ .low_level = 0x00,
+ .high_level = 0x01,
+ .rise_edge = 0x02,
+ .fall_edge = 0x03,
+};
+
+static enum mxc_gpio_hwtype mxc_gpio_hwtype;
+static struct mxc_gpio_hwdata *mxc_gpio_hwdata;
+
+#define GPIO_DR (mxc_gpio_hwdata->dr_reg)
+#define GPIO_GDIR (mxc_gpio_hwdata->gdir_reg)
+#define GPIO_PSR (mxc_gpio_hwdata->psr_reg)
+#define GPIO_ICR1 (mxc_gpio_hwdata->icr1_reg)
+#define GPIO_ICR2 (mxc_gpio_hwdata->icr2_reg)
+#define GPIO_IMR (mxc_gpio_hwdata->imr_reg)
+#define GPIO_ISR (mxc_gpio_hwdata->isr_reg)
+
+#define GPIO_INT_LOW_LEV (mxc_gpio_hwdata->low_level)
+#define GPIO_INT_HIGH_LEV (mxc_gpio_hwdata->high_level)
+#define GPIO_INT_RISE_EDGE (mxc_gpio_hwdata->rise_edge)
+#define GPIO_INT_FALL_EDGE (mxc_gpio_hwdata->fall_edge)
+#define GPIO_INT_NONE 0x4
+
+static struct platform_device_id mxc_gpio_devtype[] = {
+ {
+ .name = "imx1-gpio",
+ .driver_data = IMX1_GPIO,
+ }, {
+ .name = "imx21-gpio",
+ .driver_data = IMX21_GPIO,
+ }, {
+ .name = "imx31-gpio",
+ .driver_data = IMX31_GPIO,
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct of_device_id mxc_gpio_dt_ids[] = {
+ { .compatible = "fsl,imx1-gpio", .data = &mxc_gpio_devtype[IMX1_GPIO], },
+ { .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], },
+ { .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], },
+ { /* sentinel */ }
+};
+
+/*
+ * MX2 has one interrupt *for all* gpio ports. The list is used
+ * to save the references to all ports, so that mx2_gpio_irq_handler
+ * can walk through all interrupt status registers.
+ */
+static LIST_HEAD(mxc_gpio_ports);
+
+/* Note: This driver assumes 32 GPIOs are handled in one register */
+
+static int gpio_set_irq_type(struct irq_data *d, u32 type)
+{
+ u32 gpio = irq_to_gpio(d->irq);
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mxc_gpio_port *port = gc->private;
+ u32 bit, val;
+ int edge;
+ void __iomem *reg = port->base;
+
+ port->both_edges &= ~(1 << (gpio & 31));
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ edge = GPIO_INT_RISE_EDGE;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge = GPIO_INT_FALL_EDGE;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ val = gpio_get_value(gpio);
+ if (val) {
+ edge = GPIO_INT_LOW_LEV;
+ pr_debug("mxc: set GPIO %d to low trigger\n", gpio);
+ } else {
+ edge = GPIO_INT_HIGH_LEV;
+ pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
+ }
+ port->both_edges |= 1 << (gpio & 31);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ edge = GPIO_INT_LOW_LEV;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ edge = GPIO_INT_HIGH_LEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
+ bit = gpio & 0xf;
+ val = readl(reg) & ~(0x3 << (bit << 1));
+ writel(val | (edge << (bit << 1)), reg);
+ writel(1 << (gpio & 0x1f), port->base + GPIO_ISR);
+
+ return 0;
+}
+
+static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
+{
+ void __iomem *reg = port->base;
+ u32 bit, val;
+ int edge;
+
+ reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
+ bit = gpio & 0xf;
+ val = readl(reg);
+ edge = (val >> (bit << 1)) & 3;
+ val &= ~(0x3 << (bit << 1));
+ if (edge == GPIO_INT_HIGH_LEV) {
+ edge = GPIO_INT_LOW_LEV;
+ pr_debug("mxc: switch GPIO %d to low trigger\n", gpio);
+ } else if (edge == GPIO_INT_LOW_LEV) {
+ edge = GPIO_INT_HIGH_LEV;
+ pr_debug("mxc: switch GPIO %d to high trigger\n", gpio);
+ } else {
+ pr_err("mxc: invalid configuration for GPIO %d: %x\n",
+ gpio, edge);
+ return;
+ }
+ writel(val | (edge << (bit << 1)), reg);
+}
+
+/* handle 32 interrupts in one status register */
+static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
+{
+ u32 gpio_irq_no_base = port->virtual_irq_start;
+
+ while (irq_stat != 0) {
+ int irqoffset = fls(irq_stat) - 1;
+
+ if (port->both_edges & (1 << irqoffset))
+ mxc_flip_edge(port, irqoffset);
+
+ generic_handle_irq(gpio_irq_no_base + irqoffset);
+
+ irq_stat &= ~(1 << irqoffset);
+ }
+}
+
+/* MX1 and MX3 has one interrupt *per* gpio port */
+static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+{
+ u32 irq_stat;
+ struct mxc_gpio_port *port = irq_get_handler_data(irq);
+
+ irq_stat = readl(port->base + GPIO_ISR) & readl(port->base + GPIO_IMR);
+
+ mxc_gpio_irq_handler(port, irq_stat);
+}
+
+/* MX2 has one interrupt *for all* gpio ports */
+static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+{
+ u32 irq_msk, irq_stat;
+ struct mxc_gpio_port *port;
+
+ /* walk through all interrupt status registers */
+ list_for_each_entry(port, &mxc_gpio_ports, node) {
+ irq_msk = readl(port->base + GPIO_IMR);
+ if (!irq_msk)
+ continue;
+
+ irq_stat = readl(port->base + GPIO_ISR) & irq_msk;
+ if (irq_stat)
+ mxc_gpio_irq_handler(port, irq_stat);
+ }
+}
+
+/*
+ * Set interrupt number "irq" in the GPIO as a wake-up source.
+ * While system is running, all registered GPIO interrupts need to have
+ * wake-up enabled. When system is suspended, only selected GPIO interrupts
+ * need to have wake-up enabled.
+ * @param irq interrupt source number
+ * @param enable enable as wake-up if equal to non-zero
+ * @return This function returns 0 on success.
+ */
+static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
+{
+ u32 gpio = irq_to_gpio(d->irq);
+ u32 gpio_idx = gpio & 0x1F;
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mxc_gpio_port *port = gc->private;
+
+ if (enable) {
+ if (port->irq_high && (gpio_idx >= 16))
+ enable_irq_wake(port->irq_high);
+ else
+ enable_irq_wake(port->irq);
+ } else {
+ if (port->irq_high && (gpio_idx >= 16))
+ disable_irq_wake(port->irq_high);
+ else
+ disable_irq_wake(port->irq);
+ }
+
+ return 0;
+}
+
+static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("gpio-mxc", 1, port->virtual_irq_start,
+ port->base, handle_level_irq);
+ gc->private = port;
+
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->chip.irq_set_type = gpio_set_irq_type;
+ ct->chip.irq_set_wake = gpio_set_wake_irq;
+ ct->regs.ack = GPIO_ISR;
+ ct->regs.mask = GPIO_IMR;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
+ IRQ_NOREQUEST, 0);
+}
+
+static void __devinit mxc_gpio_get_hw(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(mxc_gpio_dt_ids, &pdev->dev);
+ enum mxc_gpio_hwtype hwtype;
+
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ hwtype = pdev->id_entry->driver_data;
+
+ if (mxc_gpio_hwtype) {
+ /*
+ * The driver works with a reasonable presupposition,
+ * that is all gpio ports must be the same type when
+ * running on one soc.
+ */
+ BUG_ON(mxc_gpio_hwtype != hwtype);
+ return;
+ }
+
+ if (hwtype == IMX31_GPIO)
+ mxc_gpio_hwdata = &imx31_gpio_hwdata;
+ else
+ mxc_gpio_hwdata = &imx1_imx21_gpio_hwdata;
+
+ mxc_gpio_hwtype = hwtype;
+}
+
+static int __devinit mxc_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mxc_gpio_port *port;
+ struct resource *iores;
+ int err;
+
+ mxc_gpio_get_hw(pdev);
+
+ port = kzalloc(sizeof(struct mxc_gpio_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores) {
+ err = -ENODEV;
+ goto out_kfree;
+ }
+
+ if (!request_mem_region(iores->start, resource_size(iores),
+ pdev->name)) {
+ err = -EBUSY;
+ goto out_kfree;
+ }
+
+ port->base = ioremap(iores->start, resource_size(iores));
+ if (!port->base) {
+ err = -ENOMEM;
+ goto out_release_mem;
+ }
+
+ port->irq_high = platform_get_irq(pdev, 1);
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0) {
+ err = -EINVAL;
+ goto out_iounmap;
+ }
+
+ /* disable the interrupt and clear the status */
+ writel(0, port->base + GPIO_IMR);
+ writel(~0, port->base + GPIO_ISR);
+
+ if (mxc_gpio_hwtype == IMX21_GPIO) {
+ /* setup one handler for all GPIO interrupts */
+ if (pdev->id == 0)
+ irq_set_chained_handler(port->irq,
+ mx2_gpio_irq_handler);
+ } else {
+ /* setup one handler for each entry */
+ irq_set_chained_handler(port->irq, mx3_gpio_irq_handler);
+ irq_set_handler_data(port->irq, port);
+ if (port->irq_high > 0) {
+ /* setup handler for GPIO 16 to 31 */
+ irq_set_chained_handler(port->irq_high,
+ mx3_gpio_irq_handler);
+ irq_set_handler_data(port->irq_high, port);
+ }
+ }
+
+ err = bgpio_init(&port->bgc, &pdev->dev, 4,
+ port->base + GPIO_PSR,
+ port->base + GPIO_DR, NULL,
+ port->base + GPIO_GDIR, NULL, false);
+ if (err)
+ goto out_iounmap;
+
+ port->bgc.gc.base = pdev->id * 32;
+ port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir);
+ port->bgc.data = port->bgc.read_reg(port->bgc.reg_set);
+
+ err = gpiochip_add(&port->bgc.gc);
+ if (err)
+ goto out_bgpio_remove;
+
+ /*
+ * In dt case, we use gpio number range dynamically
+ * allocated by gpio core.
+ */
+ port->virtual_irq_start = MXC_GPIO_IRQ_START + (np ? port->bgc.gc.base :
+ pdev->id * 32);
+
+ /* gpio-mxc can be a generic irq chip */
+ mxc_gpio_init_gc(port);
+
+ list_add_tail(&port->node, &mxc_gpio_ports);
+
+ return 0;
+
+out_bgpio_remove:
+ bgpio_remove(&port->bgc);
+out_iounmap:
+ iounmap(port->base);
+out_release_mem:
+ release_mem_region(iores->start, resource_size(iores));
+out_kfree:
+ kfree(port);
+ dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
+ return err;
+}
+
+static struct platform_driver mxc_gpio_driver = {
+ .driver = {
+ .name = "gpio-mxc",
+ .owner = THIS_MODULE,
+ .of_match_table = mxc_gpio_dt_ids,
+ },
+ .probe = mxc_gpio_probe,
+ .id_table = mxc_gpio_devtype,
+};
+
+static int __init gpio_mxc_init(void)
+{
+ return platform_driver_register(&mxc_gpio_driver);
+}
+postcore_initcall(gpio_mxc_init);
+
+MODULE_AUTHOR("Freescale Semiconductor, "
+ "Daniel Mack <danielncaiaq.de>, "
+ "Juergen Beisert <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Freescale MXC GPIO");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
new file mode 100644
index 00000000000..af55a8577c2
--- /dev/null
+++ b/drivers/gpio/gpio-mxs.c
@@ -0,0 +1,289 @@
+/*
+ * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ *
+ * Based on code from Freescale,
+ * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/basic_mmio_gpio.h>
+#include <mach/mxs.h>
+
+#define MXS_SET 0x4
+#define MXS_CLR 0x8
+
+#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10)
+#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10)
+#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10)
+#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10)
+#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10)
+#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10)
+#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10)
+#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10)
+
+#define GPIO_INT_FALL_EDGE 0x0
+#define GPIO_INT_LOW_LEV 0x1
+#define GPIO_INT_RISE_EDGE 0x2
+#define GPIO_INT_HIGH_LEV 0x3
+#define GPIO_INT_LEV_MASK (1 << 0)
+#define GPIO_INT_POL_MASK (1 << 1)
+
+struct mxs_gpio_port {
+ void __iomem *base;
+ int id;
+ int irq;
+ int virtual_irq_start;
+ struct bgpio_chip bgc;
+};
+
+/* Note: This driver assumes 32 GPIOs are handled in one register */
+
+static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ u32 gpio = irq_to_gpio(d->irq);
+ u32 pin_mask = 1 << (gpio & 31);
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mxs_gpio_port *port = gc->private;
+ void __iomem *pin_addr;
+ int edge;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ edge = GPIO_INT_RISE_EDGE;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge = GPIO_INT_FALL_EDGE;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ edge = GPIO_INT_LOW_LEV;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ edge = GPIO_INT_HIGH_LEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set level or edge */
+ pin_addr = port->base + PINCTRL_IRQLEV(port->id);
+ if (edge & GPIO_INT_LEV_MASK)
+ writel(pin_mask, pin_addr + MXS_SET);
+ else
+ writel(pin_mask, pin_addr + MXS_CLR);
+
+ /* set polarity */
+ pin_addr = port->base + PINCTRL_IRQPOL(port->id);
+ if (edge & GPIO_INT_POL_MASK)
+ writel(pin_mask, pin_addr + MXS_SET);
+ else
+ writel(pin_mask, pin_addr + MXS_CLR);
+
+ writel(1 << (gpio & 0x1f),
+ port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+
+ return 0;
+}
+
+/* MXS has one interrupt *per* gpio port */
+static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+{
+ u32 irq_stat;
+ struct mxs_gpio_port *port = irq_get_handler_data(irq);
+ u32 gpio_irq_no_base = port->virtual_irq_start;
+
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+
+ irq_stat = readl(port->base + PINCTRL_IRQSTAT(port->id)) &
+ readl(port->base + PINCTRL_IRQEN(port->id));
+
+ while (irq_stat != 0) {
+ int irqoffset = fls(irq_stat) - 1;
+ generic_handle_irq(gpio_irq_no_base + irqoffset);
+ irq_stat &= ~(1 << irqoffset);
+ }
+}
+
+/*
+ * Set interrupt number "irq" in the GPIO as a wake-up source.
+ * While system is running, all registered GPIO interrupts need to have
+ * wake-up enabled. When system is suspended, only selected GPIO interrupts
+ * need to have wake-up enabled.
+ * @param irq interrupt source number
+ * @param enable enable as wake-up if equal to non-zero
+ * @return This function returns 0 on success.
+ */
+static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mxs_gpio_port *port = gc->private;
+
+ if (enable)
+ enable_irq_wake(port->irq);
+ else
+ disable_irq_wake(port->irq);
+
+ return 0;
+}
+
+static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("gpio-mxs", 1, port->virtual_irq_start,
+ port->base, handle_level_irq);
+ gc->private = port;
+
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->chip.irq_set_type = mxs_gpio_set_irq_type;
+ ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
+ ct->regs.ack = PINCTRL_IRQSTAT(port->id) + MXS_CLR;
+ ct->regs.mask = PINCTRL_IRQEN(port->id);
+
+ irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
+}
+
+static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+ struct mxs_gpio_port *port =
+ container_of(bgc, struct mxs_gpio_port, bgc);
+
+ return port->virtual_irq_start + offset;
+}
+
+static int __devinit mxs_gpio_probe(struct platform_device *pdev)
+{
+ static void __iomem *base;
+ struct mxs_gpio_port *port;
+ struct resource *iores = NULL;
+ int err;
+
+ port = kzalloc(sizeof(struct mxs_gpio_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->id = pdev->id;
+ port->virtual_irq_start = MXS_GPIO_IRQ_START + port->id * 32;
+
+ /*
+ * map memory region only once, as all the gpio ports
+ * share the same one
+ */
+ if (!base) {
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores) {
+ err = -ENODEV;
+ goto out_kfree;
+ }
+
+ if (!request_mem_region(iores->start, resource_size(iores),
+ pdev->name)) {
+ err = -EBUSY;
+ goto out_kfree;
+ }
+
+ base = ioremap(iores->start, resource_size(iores));
+ if (!base) {
+ err = -ENOMEM;
+ goto out_release_mem;
+ }
+ }
+ port->base = base;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0) {
+ err = -EINVAL;
+ goto out_iounmap;
+ }
+
+ /*
+ * select the pin interrupt functionality but initially
+ * disable the interrupts
+ */
+ writel(~0U, port->base + PINCTRL_PIN2IRQ(port->id));
+ writel(0, port->base + PINCTRL_IRQEN(port->id));
+
+ /* clear address has to be used to clear IRQSTAT bits */
+ writel(~0U, port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+
+ /* gpio-mxs can be a generic irq chip */
+ mxs_gpio_init_gc(port);
+
+ /* setup one handler for each entry */
+ irq_set_chained_handler(port->irq, mxs_gpio_irq_handler);
+ irq_set_handler_data(port->irq, port);
+
+ err = bgpio_init(&port->bgc, &pdev->dev, 4,
+ port->base + PINCTRL_DIN(port->id),
+ port->base + PINCTRL_DOUT(port->id), NULL,
+ port->base + PINCTRL_DOE(port->id), NULL, false);
+ if (err)
+ goto out_iounmap;
+
+ port->bgc.gc.to_irq = mxs_gpio_to_irq;
+ port->bgc.gc.base = port->id * 32;
+
+ err = gpiochip_add(&port->bgc.gc);
+ if (err)
+ goto out_bgpio_remove;
+
+ return 0;
+
+out_bgpio_remove:
+ bgpio_remove(&port->bgc);
+out_iounmap:
+ if (iores)
+ iounmap(port->base);
+out_release_mem:
+ if (iores)
+ release_mem_region(iores->start, resource_size(iores));
+out_kfree:
+ kfree(port);
+ dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
+ return err;
+}
+
+static struct platform_driver mxs_gpio_driver = {
+ .driver = {
+ .name = "gpio-mxs",
+ .owner = THIS_MODULE,
+ },
+ .probe = mxs_gpio_probe,
+};
+
+static int __init mxs_gpio_init(void)
+{
+ return platform_driver_register(&mxs_gpio_driver);
+}
+postcore_initcall(mxs_gpio_init);
+
+MODULE_AUTHOR("Freescale Semiconductor, "
+ "Daniel Mack <danielncaiaq.de>, "
+ "Juergen Beisert <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Freescale MXS GPIO");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 35bebde23e8..0599854e221 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -54,6 +54,11 @@ struct gpio_bank {
struct device *dev;
bool dbck_flag;
int stride;
+ u32 width;
+
+ void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
+
+ struct omap_gpio_reg_offs *regs;
};
#ifdef CONFIG_ARCH_OMAP3
@@ -79,121 +84,18 @@ static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
*/
static struct gpio_bank *gpio_bank;
-static int bank_width;
-
/* TODO: Analyze removing gpio_bank_count usage from driver code */
int gpio_bank_count;
-static inline struct gpio_bank *get_gpio_bank(int gpio)
-{
- if (cpu_is_omap15xx()) {
- if (OMAP_GPIO_IS_MPUIO(gpio))
- return &gpio_bank[0];
- return &gpio_bank[1];
- }
- if (cpu_is_omap16xx()) {
- if (OMAP_GPIO_IS_MPUIO(gpio))
- return &gpio_bank[0];
- return &gpio_bank[1 + (gpio >> 4)];
- }
- if (cpu_is_omap7xx()) {
- if (OMAP_GPIO_IS_MPUIO(gpio))
- return &gpio_bank[0];
- return &gpio_bank[1 + (gpio >> 5)];
- }
- if (cpu_is_omap24xx())
- return &gpio_bank[gpio >> 5];
- if (cpu_is_omap34xx() || cpu_is_omap44xx())
- return &gpio_bank[gpio >> 5];
- BUG();
- return NULL;
-}
-
-static inline int get_gpio_index(int gpio)
-{
- if (cpu_is_omap7xx())
- return gpio & 0x1f;
- if (cpu_is_omap24xx())
- return gpio & 0x1f;
- if (cpu_is_omap34xx() || cpu_is_omap44xx())
- return gpio & 0x1f;
- return gpio & 0x0f;
-}
-
-static inline int gpio_valid(int gpio)
-{
- if (gpio < 0)
- return -1;
- if (cpu_class_is_omap1() && OMAP_GPIO_IS_MPUIO(gpio)) {
- if (gpio >= OMAP_MAX_GPIO_LINES + 16)
- return -1;
- return 0;
- }
- if (cpu_is_omap15xx() && gpio < 16)
- return 0;
- if ((cpu_is_omap16xx()) && gpio < 64)
- return 0;
- if (cpu_is_omap7xx() && gpio < 192)
- return 0;
- if (cpu_is_omap2420() && gpio < 128)
- return 0;
- if (cpu_is_omap2430() && gpio < 160)
- return 0;
- if ((cpu_is_omap34xx() || cpu_is_omap44xx()) && gpio < 192)
- return 0;
- return -1;
-}
-
-static int check_gpio(int gpio)
-{
- if (unlikely(gpio_valid(gpio) < 0)) {
- printk(KERN_ERR "omap-gpio: invalid GPIO %d\n", gpio);
- dump_stack();
- return -1;
- }
- return 0;
-}
+#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
+#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
{
void __iomem *reg = bank->base;
u32 l;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_IO_CNTL / bank->stride;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DIR_CONTROL;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DIRECTION;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DIR_CONTROL;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_OE;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_OE;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
- }
+ reg += bank->regs->direction;
l = __raw_readl(reg);
if (is_input)
l |= 1 << gpio;
@@ -202,165 +104,48 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
__raw_writel(l, reg);
}
-static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable)
+
+/* set data out value using dedicate set/clear register */
+static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
{
void __iomem *reg = bank->base;
- u32 l = 0;
+ u32 l = GPIO_BIT(bank, gpio);
+
+ if (enable)
+ reg += bank->regs->set_dataout;
+ else
+ reg += bank->regs->clr_dataout;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_OUTPUT / bank->stride;
- l = __raw_readl(reg);
- if (enable)
- l |= 1 << gpio;
- else
- l &= ~(1 << gpio);
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DATA_OUTPUT;
- l = __raw_readl(reg);
- if (enable)
- l |= 1 << gpio;
- else
- l &= ~(1 << gpio);
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- if (enable)
- reg += OMAP1610_GPIO_SET_DATAOUT;
- else
- reg += OMAP1610_GPIO_CLEAR_DATAOUT;
- l = 1 << gpio;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DATA_OUTPUT;
- l = __raw_readl(reg);
- if (enable)
- l |= 1 << gpio;
- else
- l &= ~(1 << gpio);
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- if (enable)
- reg += OMAP24XX_GPIO_SETDATAOUT;
- else
- reg += OMAP24XX_GPIO_CLEARDATAOUT;
- l = 1 << gpio;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- if (enable)
- reg += OMAP4_GPIO_SETDATAOUT;
- else
- reg += OMAP4_GPIO_CLEARDATAOUT;
- l = 1 << gpio;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
- }
__raw_writel(l, reg);
}
-static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
+/* set data out value using mask register */
+static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
{
- void __iomem *reg;
+ void __iomem *reg = bank->base + bank->regs->dataout;
+ u32 gpio_bit = GPIO_BIT(bank, gpio);
+ u32 l;
- if (check_gpio(gpio) < 0)
- return -EINVAL;
- reg = bank->base;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_INPUT_LATCH / bank->stride;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DATA_INPUT;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DATAIN;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DATA_INPUT;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_DATAIN;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_DATAIN;
- break;
-#endif
- default:
- return -EINVAL;
- }
- return (__raw_readl(reg)
- & (1 << get_gpio_index(gpio))) != 0;
+ l = __raw_readl(reg);
+ if (enable)
+ l |= gpio_bit;
+ else
+ l &= ~gpio_bit;
+ __raw_writel(l, reg);
}
-static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
+static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
{
- void __iomem *reg;
+ void __iomem *reg = bank->base + bank->regs->datain;
- if (check_gpio(gpio) < 0)
- return -EINVAL;
- reg = bank->base;
+ return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
+}
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_OUTPUT / bank->stride;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DATA_OUTPUT;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DATAOUT;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DATA_OUTPUT;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_DATAOUT;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_DATAOUT;
- break;
-#endif
- default:
- return -EINVAL;
- }
+static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
+{
+ void __iomem *reg = bank->base + bank->regs->dataout;
- return (__raw_readl(reg) & (1 << get_gpio_index(gpio))) != 0;
+ return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
}
#define MOD_REG_BIT(reg, bit_mask, set) \
@@ -383,7 +168,7 @@ do { \
static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
unsigned debounce)
{
- void __iomem *reg = bank->base;
+ void __iomem *reg;
u32 val;
u32 l;
@@ -397,21 +182,12 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
else
debounce = (debounce / 0x1f) - 1;
- l = 1 << get_gpio_index(gpio);
-
- if (bank->method == METHOD_GPIO_44XX)
- reg += OMAP4_GPIO_DEBOUNCINGTIME;
- else
- reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
+ l = GPIO_BIT(bank, gpio);
+ reg = bank->base + bank->regs->debounce;
__raw_writel(debounce, reg);
- reg = bank->base;
- if (bank->method == METHOD_GPIO_44XX)
- reg += OMAP4_GPIO_DEBOUNCENABLE;
- else
- reg += OMAP24XX_GPIO_DEBOUNCE_EN;
-
+ reg = bank->base + bank->regs->debounce_en;
val = __raw_readl(reg);
if (debounce) {
@@ -629,9 +405,6 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
else
gpio = d->irq - IH_GPIO_BASE;
- if (check_gpio(gpio) < 0)
- return -EINVAL;
-
if (type & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
@@ -642,7 +415,7 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
bank = irq_data_get_irq_chip_data(d);
spin_lock_irqsave(&bank->lock, flags);
- retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type);
+ retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
spin_unlock_irqrestore(&bank->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -657,195 +430,81 @@ static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
void __iomem *reg = bank->base;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- /* MPUIO irqstatus is reset by reading the status register,
- * so do nothing here */
- return;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_STATUS;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_IRQSTATUS1;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_STATUS;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_IRQSTATUS1;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_IRQSTATUS0;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
- }
+ reg += bank->regs->irqstatus;
__raw_writel(gpio_mask, reg);
/* Workaround for clearing DSP GPIO interrupts to allow retention */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- reg = bank->base + OMAP24XX_GPIO_IRQSTATUS2;
- else if (cpu_is_omap44xx())
- reg = bank->base + OMAP4_GPIO_IRQSTATUS1;
-
- if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
+ if (bank->regs->irqstatus2) {
+ reg = bank->base + bank->regs->irqstatus2;
__raw_writel(gpio_mask, reg);
+ }
/* Flush posted write for the irq status to avoid spurious interrupts */
__raw_readl(reg);
- }
}
static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
{
- _clear_gpio_irqbank(bank, 1 << get_gpio_index(gpio));
+ _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
}
static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
{
void __iomem *reg = bank->base;
- int inv = 0;
u32 l;
- u32 mask;
-
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
- mask = 0xffff;
- inv = 1;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_MASK;
- mask = 0xffff;
- inv = 1;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_IRQENABLE1;
- mask = 0xffff;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_MASK;
- mask = 0xffffffff;
- inv = 1;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_IRQENABLE1;
- mask = 0xffffffff;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_IRQSTATUSSET0;
- mask = 0xffffffff;
- break;
-#endif
- default:
- WARN_ON(1);
- return 0;
- }
+ u32 mask = (1 << bank->width) - 1;
+ reg += bank->regs->irqenable;
l = __raw_readl(reg);
- if (inv)
+ if (bank->regs->irqenable_inv)
l = ~l;
l &= mask;
return l;
}
-static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enable)
+static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
void __iomem *reg = bank->base;
u32 l;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
- l = __raw_readl(reg);
- if (enable)
- l &= ~(gpio_mask);
- else
- l |= gpio_mask;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_MASK;
+ if (bank->regs->set_irqenable) {
+ reg += bank->regs->set_irqenable;
+ l = gpio_mask;
+ } else {
+ reg += bank->regs->irqenable;
l = __raw_readl(reg);
- if (enable)
- l &= ~(gpio_mask);
+ if (bank->regs->irqenable_inv)
+ l &= ~gpio_mask;
else
l |= gpio_mask;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- if (enable)
- reg += OMAP1610_GPIO_SET_IRQENABLE1;
- else
- reg += OMAP1610_GPIO_CLEAR_IRQENABLE1;
+ }
+
+ __raw_writel(l, reg);
+}
+
+static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+{
+ void __iomem *reg = bank->base;
+ u32 l;
+
+ if (bank->regs->clr_irqenable) {
+ reg += bank->regs->clr_irqenable;
l = gpio_mask;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_MASK;
+ } else {
+ reg += bank->regs->irqenable;
l = __raw_readl(reg);
- if (enable)
- l &= ~(gpio_mask);
- else
+ if (bank->regs->irqenable_inv)
l |= gpio_mask;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- if (enable)
- reg += OMAP24XX_GPIO_SETIRQENABLE1;
- else
- reg += OMAP24XX_GPIO_CLEARIRQENABLE1;
- l = gpio_mask;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- if (enable)
- reg += OMAP4_GPIO_IRQSTATUSSET0;
else
- reg += OMAP4_GPIO_IRQSTATUSCLR0;
- l = gpio_mask;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
+ l &= ~gpio_mask;
}
+
__raw_writel(l, reg);
}
static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
{
- _enable_gpio_irqbank(bank, 1 << get_gpio_index(gpio), enable);
+ _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
}
/*
@@ -858,50 +517,32 @@ static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int ena
*/
static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
{
- unsigned long uninitialized_var(flags);
+ u32 gpio_bit = GPIO_BIT(bank, gpio);
+ unsigned long flags;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_MPUIO:
- case METHOD_GPIO_1610:
- spin_lock_irqsave(&bank->lock, flags);
- if (enable)
- bank->suspend_wakeup |= (1 << gpio);
- else
- bank->suspend_wakeup &= ~(1 << gpio);
- spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
-#endif
-#ifdef CONFIG_ARCH_OMAP2PLUS
- case METHOD_GPIO_24XX:
- case METHOD_GPIO_44XX:
- if (bank->non_wakeup_gpios & (1 << gpio)) {
- printk(KERN_ERR "Unable to modify wakeup on "
- "non-wakeup GPIO%d\n",
- (bank - gpio_bank) * 32 + gpio);
- return -EINVAL;
- }
- spin_lock_irqsave(&bank->lock, flags);
- if (enable)
- bank->suspend_wakeup |= (1 << gpio);
- else
- bank->suspend_wakeup &= ~(1 << gpio);
- spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
-#endif
- default:
- printk(KERN_ERR "Can't enable GPIO wakeup for method %i\n",
- bank->method);
+ if (bank->non_wakeup_gpios & gpio_bit) {
+ dev_err(bank->dev,
+ "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
return -EINVAL;
}
+
+ spin_lock_irqsave(&bank->lock, flags);
+ if (enable)
+ bank->suspend_wakeup |= gpio_bit;
+ else
+ bank->suspend_wakeup &= ~gpio_bit;
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
}
static void _reset_gpio(struct gpio_bank *bank, int gpio)
{
- _set_gpio_direction(bank, get_gpio_index(gpio), 1);
+ _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
_set_gpio_irqenable(bank, gpio, 0);
_clear_gpio_irqstatus(bank, gpio);
- _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
+ _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
}
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
@@ -911,10 +552,8 @@ static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
struct gpio_bank *bank;
int retval;
- if (check_gpio(gpio) < 0)
- return -ENODEV;
bank = irq_data_get_irq_chip_data(d);
- retval = _set_gpio_wakeup(bank, get_gpio_index(gpio), enable);
+ retval = _set_gpio_wakeup(bank, gpio, enable);
return retval;
}
@@ -1030,31 +669,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
chained_irq_enter(chip, desc);
bank = irq_get_handler_data(irq);
-#ifdef CONFIG_ARCH_OMAP1
- if (bank->method == METHOD_MPUIO)
- isr_reg = bank->base +
- OMAP_MPUIO_GPIO_INT / bank->stride;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- if (bank->method == METHOD_GPIO_1510)
- isr_reg = bank->base + OMAP1510_GPIO_INT_STATUS;
-#endif
-#if defined(CONFIG_ARCH_OMAP16XX)
- if (bank->method == METHOD_GPIO_1610)
- isr_reg = bank->base + OMAP1610_GPIO_IRQSTATUS1;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- if (bank->method == METHOD_GPIO_7XX)
- isr_reg = bank->base + OMAP7XX_GPIO_INT_STATUS;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- if (bank->method == METHOD_GPIO_24XX)
- isr_reg = bank->base + OMAP24XX_GPIO_IRQSTATUS1;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- if (bank->method == METHOD_GPIO_44XX)
- isr_reg = bank->base + OMAP4_GPIO_IRQSTATUS0;
-#endif
+ isr_reg = bank->base + bank->regs->irqstatus;
if (WARN_ON(!isr_reg))
goto exit;
@@ -1076,9 +691,9 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
/* clear edge sensitive interrupts before handler(s) are
called so that we don't miss any interrupt occurred while
executing them */
- _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 0);
+ _disable_gpio_irqbank(bank, isr_saved & ~level_mask);
_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
- _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 1);
+ _enable_gpio_irqbank(bank, isr_saved & ~level_mask);
/* if there is only edge sensitive GPIO pin interrupts
configured, we could unmask GPIO bank interrupt immediately */
@@ -1094,7 +709,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
gpio_irq = bank->virtual_irq_start;
for (; isr != 0; isr >>= 1, gpio_irq++) {
- gpio_index = get_gpio_index(irq_to_gpio(gpio_irq));
+ gpio_index = GPIO_INDEX(bank, irq_to_gpio(gpio_irq));
if (!(isr & 1))
continue;
@@ -1150,7 +765,7 @@ static void gpio_mask_irq(struct irq_data *d)
spin_lock_irqsave(&bank->lock, flags);
_set_gpio_irqenable(bank, gpio, 0);
- _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
+ _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -1158,13 +773,13 @@ static void gpio_unmask_irq(struct irq_data *d)
{
unsigned int gpio = d->irq - IH_GPIO_BASE;
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
- unsigned int irq_mask = 1 << get_gpio_index(gpio);
+ unsigned int irq_mask = GPIO_BIT(bank, gpio);
u32 trigger = irqd_get_trigger_type(d);
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
if (trigger)
- _set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
+ _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
/* For level-triggered GPIOs, the clearing must be done after
* the HW source is cleared, thus after the handler has run */
@@ -1191,45 +806,8 @@ static struct irq_chip gpio_irq_chip = {
#ifdef CONFIG_ARCH_OMAP1
-/* MPUIO uses the always-on 32k clock */
-
-static void mpuio_ack_irq(struct irq_data *d)
-{
- /* The ISR is reset automatically, so do nothing here. */
-}
-
-static void mpuio_mask_irq(struct irq_data *d)
-{
- unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
-
- _set_gpio_irqenable(bank, gpio, 0);
-}
-
-static void mpuio_unmask_irq(struct irq_data *d)
-{
- unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
-
- _set_gpio_irqenable(bank, gpio, 1);
-}
-
-static struct irq_chip mpuio_irq_chip = {
- .name = "MPUIO",
- .irq_ack = mpuio_ack_irq,
- .irq_mask = mpuio_mask_irq,
- .irq_unmask = mpuio_unmask_irq,
- .irq_set_type = gpio_irq_type,
-#ifdef CONFIG_ARCH_OMAP16XX
- /* REVISIT: assuming only 16xx supports MPUIO wake events */
- .irq_set_wake = gpio_wake_enable,
-#endif
-};
-
-
#define bank_is_mpuio(bank) ((bank)->method == METHOD_MPUIO)
-
#ifdef CONFIG_ARCH_OMAP16XX
#include <linux/platform_device.h>
@@ -1289,7 +867,7 @@ static struct platform_device omap_mpuio_device = {
static inline void mpuio_init(void)
{
- struct gpio_bank *bank = get_gpio_bank(OMAP_MPUIO(0));
+ struct gpio_bank *bank = &gpio_bank[0];
platform_set_drvdata(&omap_mpuio_device, bank);
if (platform_driver_register(&omap_mpuio_driver) == 0)
@@ -1302,8 +880,6 @@ static inline void mpuio_init(void) {}
#else
-extern struct irq_chip mpuio_irq_chip;
-
#define bank_is_mpuio(bank) 0
static inline void mpuio_init(void) {}
@@ -1329,31 +905,8 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset)
static int gpio_is_input(struct gpio_bank *bank, int mask)
{
- void __iomem *reg = bank->base;
+ void __iomem *reg = bank->base + bank->regs->direction;
- switch (bank->method) {
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_IO_CNTL / bank->stride;
- break;
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DIR_CONTROL;
- break;
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DIRECTION;
- break;
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DIR_CONTROL;
- break;
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_OE;
- break;
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_OE;
- break;
- default:
- WARN_ONCE(1, "gpio_is_input: incorrect OMAP GPIO method");
- return -EINVAL;
- }
return __raw_readl(reg) & mask;
}
@@ -1365,9 +918,9 @@ static int gpio_get(struct gpio_chip *chip, unsigned offset)
u32 mask;
gpio = chip->base + offset;
- bank = get_gpio_bank(gpio);
+ bank = container_of(chip, struct gpio_bank, chip);
reg = bank->base;
- mask = 1 << get_gpio_index(gpio);
+ mask = GPIO_BIT(bank, gpio);
if (gpio_is_input(bank, mask))
return _get_gpio_datain(bank, gpio);
@@ -1382,7 +935,7 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
bank = container_of(chip, struct gpio_bank, chip);
spin_lock_irqsave(&bank->lock, flags);
- _set_gpio_dataout(bank, offset, value);
+ bank->set_dataout(bank, offset, value);
_set_gpio_direction(bank, offset, 0);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -1416,7 +969,7 @@ static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
bank = container_of(chip, struct gpio_bank, chip);
spin_lock_irqsave(&bank->lock, flags);
- _set_gpio_dataout(bank, offset, value);
+ bank->set_dataout(bank, offset, value);
spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -1432,19 +985,17 @@ static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
static void __init omap_gpio_show_rev(struct gpio_bank *bank)
{
+ static bool called;
u32 rev;
- if (cpu_is_omap16xx() && !(bank->method != METHOD_MPUIO))
- rev = __raw_readw(bank->base + OMAP1610_GPIO_REVISION);
- else if (cpu_is_omap24xx() || cpu_is_omap34xx())
- rev = __raw_readl(bank->base + OMAP24XX_GPIO_REVISION);
- else if (cpu_is_omap44xx())
- rev = __raw_readl(bank->base + OMAP4_GPIO_REVISION);
- else
+ if (called || bank->regs->revision == USHRT_MAX)
return;
- printk(KERN_INFO "OMAP GPIO hardware version %d.%d\n",
+ rev = __raw_readw(bank->base + bank->regs->revision);
+ pr_info("OMAP GPIO hardware version %d.%d\n",
(rev >> 4) & 0x0f, rev & 0x0f);
+
+ called = true;
}
/* This lock class tells lockdep that GPIO irqs are in a different
@@ -1526,6 +1077,30 @@ static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
}
}
+static __init void
+omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
+ unsigned int num)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
+ handle_simple_irq);
+ ct = gc->chip_types;
+
+ /* NOTE: No ack required, reading IRQ status clears it. */
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ ct->chip.irq_set_type = gpio_irq_type;
+ /* REVISIT: assuming only 16xx supports MPUIO wake events */
+ if (cpu_is_omap16xx())
+ ct->chip.irq_set_wake = gpio_wake_enable,
+
+ ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
+ irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+}
+
static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
{
int j;
@@ -1553,22 +1128,23 @@ static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
} else {
bank->chip.label = "gpio";
bank->chip.base = gpio;
- gpio += bank_width;
+ gpio += bank->width;
}
- bank->chip.ngpio = bank_width;
+ bank->chip.ngpio = bank->width;
gpiochip_add(&bank->chip);
for (j = bank->virtual_irq_start;
- j < bank->virtual_irq_start + bank_width; j++) {
+ j < bank->virtual_irq_start + bank->width; j++) {
irq_set_lockdep_class(j, &gpio_lock_class);
irq_set_chip_data(j, bank);
- if (bank_is_mpuio(bank))
- irq_set_chip(j, &mpuio_irq_chip);
- else
+ if (bank_is_mpuio(bank)) {
+ omap_mpuio_alloc_gc(bank, j, bank->width);
+ } else {
irq_set_chip(j, &gpio_irq_chip);
- irq_set_handler(j, handle_simple_irq);
- set_irq_flags(j, IRQF_VALID);
+ irq_set_handler(j, handle_simple_irq);
+ set_irq_flags(j, IRQF_VALID);
+ }
}
irq_set_chained_handler(bank->irq, gpio_irq_handler);
irq_set_handler_data(bank->irq, bank);
@@ -1610,7 +1186,14 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
bank->dev = &pdev->dev;
bank->dbck_flag = pdata->dbck_flag;
bank->stride = pdata->bank_stride;
- bank_width = pdata->bank_width;
+ bank->width = pdata->bank_width;
+
+ bank->regs = pdata->regs;
+
+ if (bank->regs->set_dataout && bank->regs->clr_dataout)
+ bank->set_dataout = _set_gpio_dataout_reg;
+ else
+ bank->set_dataout = _set_gpio_dataout_mask;
spin_lock_init(&bank->lock);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/gpio-pca953x.c
index 0451d7ac94a..c43b8ff626a 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1,5 +1,5 @@
/*
- * pca953x.c - 4/8/16 bit I/O ports
+ * PCA953x 4/8/16 bit I/O ports
*
* Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
* Copyright (C) 2007 Marvell International Ltd.
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#ifdef CONFIG_OF_GPIO
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#endif
#define PCA953X_INPUT 0
@@ -85,7 +84,6 @@ struct pca953x_chip {
#endif
struct i2c_client *client;
- struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
const char *const *names;
int chip_type;
@@ -437,7 +435,7 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
do {
level = __ffs(pending);
- generic_handle_irq(level + chip->irq_base);
+ handle_nested_irq(level + chip->irq_base);
pending &= ~(1 << level);
} while (pending);
@@ -446,13 +444,13 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
}
static int pca953x_irq_setup(struct pca953x_chip *chip,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id,
+ int irq_base)
{
struct i2c_client *client = chip->client;
- struct pca953x_platform_data *pdata = client->dev.platform_data;
int ret, offset = 0;
- if (pdata->irq_base != -1
+ if (irq_base != -1
&& (id->driver_data & PCA_INT)) {
int lvl;
@@ -474,15 +472,19 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
* this purpose.
*/
chip->irq_stat &= chip->reg_direction;
- chip->irq_base = pdata->irq_base;
mutex_init(&chip->irq_lock);
+ chip->irq_base = irq_alloc_descs(-1, irq_base, chip->gpio_chip.ngpio, -1);
+ if (chip->irq_base < 0)
+ goto out_failed;
+
for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
int irq = lvl + chip->irq_base;
+ irq_clear_status_flags(irq, IRQ_NOREQUEST);
irq_set_chip_data(irq, chip);
- irq_set_chip_and_handler(irq, &pca953x_irq_chip,
- handle_simple_irq);
+ irq_set_chip(irq, &pca953x_irq_chip);
+ irq_set_nested_thread(irq, true);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
@@ -493,8 +495,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
ret = request_threaded_irq(client->irq,
NULL,
pca953x_irq_handler,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
dev_name(&client->dev), chip);
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
@@ -514,17 +515,19 @@ out_failed:
static void pca953x_irq_teardown(struct pca953x_chip *chip)
{
- if (chip->irq_base != -1)
+ if (chip->irq_base != -1) {
+ irq_free_descs(chip->irq_base, chip->gpio_chip.ngpio);
free_irq(chip->client->irq, chip);
+ }
}
#else /* CONFIG_GPIO_PCA953X_IRQ */
static int pca953x_irq_setup(struct pca953x_chip *chip,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id,
+ int irq_base)
{
struct i2c_client *client = chip->client;
- struct pca953x_platform_data *pdata = client->dev.platform_data;
- if (pdata->irq_base != -1 && (id->driver_data & PCA_INT))
+ if (irq_base != -1 && (id->driver_data & PCA_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
@@ -541,46 +544,39 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
#ifdef CONFIG_OF_GPIO
/*
* Translate OpenFirmware node properties into platform_data
+ * WARNING: This is DEPRECATED and will be removed eventually!
*/
-static struct pca953x_platform_data *
-pca953x_get_alt_pdata(struct i2c_client *client)
+void
+pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
- struct pca953x_platform_data *pdata;
struct device_node *node;
const __be32 *val;
int size;
node = client->dev.of_node;
if (node == NULL)
- return NULL;
-
- pdata = kzalloc(sizeof(struct pca953x_platform_data), GFP_KERNEL);
- if (pdata == NULL) {
- dev_err(&client->dev, "Unable to allocate platform_data\n");
- return NULL;
- }
+ return;
- pdata->gpio_base = -1;
+ *gpio_base = -1;
val = of_get_property(node, "linux,gpio-base", &size);
+ WARN(val, "%s: device-tree property 'linux,gpio-base' is deprecated!", __func__);
if (val) {
if (size != sizeof(*val))
dev_warn(&client->dev, "%s: wrong linux,gpio-base\n",
node->full_name);
else
- pdata->gpio_base = be32_to_cpup(val);
+ *gpio_base = be32_to_cpup(val);
}
val = of_get_property(node, "polarity", NULL);
+ WARN(val, "%s: device-tree property 'polarity' is deprecated!", __func__);
if (val)
- pdata->invert = *val;
-
- return pdata;
+ *invert = *val;
}
#else
-static struct pca953x_platform_data *
-pca953x_get_alt_pdata(struct i2c_client *client)
+void
+pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
- return NULL;
}
#endif
@@ -642,6 +638,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
+ int irq_base=0, invert=0;
int ret = 0;
chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
@@ -649,26 +646,22 @@ static int __devinit pca953x_probe(struct i2c_client *client,
return -ENOMEM;
pdata = client->dev.platform_data;
- if (pdata == NULL) {
- pdata = pca953x_get_alt_pdata(client);
- /*
- * Unlike normal platform_data, this is allocated
- * dynamically and must be freed in the driver
- */
- chip->dyn_pdata = pdata;
- }
-
- if (pdata == NULL) {
- dev_dbg(&client->dev, "no platform data\n");
- ret = -EINVAL;
- goto out_failed;
+ if (pdata) {
+ irq_base = pdata->irq_base;
+ chip->gpio_start = pdata->gpio_base;
+ invert = pdata->invert;
+ chip->names = pdata->names;
+ } else {
+ pca953x_get_alt_pdata(client, &chip->gpio_start, &invert);
+#ifdef CONFIG_OF_GPIO
+ /* If I2C node has no interrupts property, disable GPIO interrupts */
+ if (of_find_property(client->dev.of_node, "interrupts", NULL) == NULL)
+ irq_base = -1;
+#endif
}
chip->client = client;
- chip->gpio_start = pdata->gpio_base;
-
- chip->names = pdata->names;
chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE);
mutex_init(&chip->i2c_lock);
@@ -679,13 +672,13 @@ static int __devinit pca953x_probe(struct i2c_client *client,
pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
if (chip->chip_type == PCA953X_TYPE)
- device_pca953x_init(chip, pdata->invert);
+ device_pca953x_init(chip, invert);
else if (chip->chip_type == PCA957X_TYPE)
- device_pca957x_init(chip, pdata->invert);
+ device_pca957x_init(chip, invert);
else
goto out_failed;
- ret = pca953x_irq_setup(chip, id);
+ ret = pca953x_irq_setup(chip, id, irq_base);
if (ret)
goto out_failed;
@@ -693,7 +686,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
if (ret)
goto out_failed_irq;
- if (pdata->setup) {
+ if (pdata && pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
if (ret < 0)
@@ -706,7 +699,6 @@ static int __devinit pca953x_probe(struct i2c_client *client,
out_failed_irq:
pca953x_irq_teardown(chip);
out_failed:
- kfree(chip->dyn_pdata);
kfree(chip);
return ret;
}
@@ -717,7 +709,7 @@ static int pca953x_remove(struct i2c_client *client)
struct pca953x_chip *chip = i2c_get_clientdata(client);
int ret = 0;
- if (pdata->teardown) {
+ if (pdata && pdata->teardown) {
ret = pdata->teardown(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
if (ret < 0) {
@@ -735,7 +727,6 @@ static int pca953x_remove(struct i2c_client *client)
}
pca953x_irq_teardown(chip);
- kfree(chip->dyn_pdata);
kfree(chip);
return 0;
}
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 879b473aab5..7369fdda92b 100644
--- a/drivers/gpio/pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -1,5 +1,5 @@
/*
- * pcf857x - driver for pcf857x, pca857x, and pca967x I2C GPIO expanders
+ * Driver for pcf857x, pca857x, and pca967x I2C GPIO expanders
*
* Copyright (C) 2007 David Brownell
*
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/gpio-pch.c
index 36919e77c49..36919e77c49 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/gpio-pch.c
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/gpio-pl061.c
index 6fcb28cdd86..2c5a18f32bf 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -1,7 +1,5 @@
/*
- * linux/drivers/gpio/pl061.c
- *
- * Copyright (C) 2008, 2009 Provigent Ltd.
+ * Copyright (C) 2008, 2009 Provigent Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/gpio/gpio-plat-samsung.c b/drivers/gpio/gpio-plat-samsung.c
index ea37c046178..ef67f1952a7 100644
--- a/drivers/gpio/gpio-plat-samsung.c
+++ b/drivers/gpio/gpio-plat-samsung.c
@@ -1,5 +1,4 @@
-/* arch/arm/plat-samsung/gpiolib.c
- *
+/*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/gpio-rdc321x.c
index 2762698e020..2762698e020 100644
--- a/drivers/gpio/rdc321x-gpio.c
+++ b/drivers/gpio/gpio-rdc321x.c
diff --git a/drivers/gpio/gpio-s5pc100.c b/drivers/gpio/gpio-s5pc100.c
index 2842394b28b..7f87b0c76e0 100644
--- a/drivers/gpio/gpio-s5pc100.c
+++ b/drivers/gpio/gpio-s5pc100.c
@@ -1,4 +1,5 @@
-/* linux/arch/arm/mach-s5pc100/gpiolib.c
+/*
+ * S5PC100 - GPIOlib support
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
@@ -6,8 +7,6 @@
* Copyright 2009 Samsung Electronics Co
* Kyungmin Park <kyungmin.park@samsung.com>
*
- * S5PC100 - GPIOlib support
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/gpio/gpio-s5pv210.c b/drivers/gpio/gpio-s5pv210.c
index 1ba20a703e0..eb12f1602de 100644
--- a/drivers/gpio/gpio-s5pv210.c
+++ b/drivers/gpio/gpio-s5pv210.c
@@ -1,10 +1,9 @@
-/* linux/arch/arm/mach-s5pv210/gpiolib.c
+/*
+ * S5PV210 - GPIOlib support
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
- * S5PV210 - GPIOlib support
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/gpio/sch_gpio.c b/drivers/gpio/gpio-sch.c
index 56060421cdf..16351584549 100644
--- a/drivers/gpio/sch_gpio.c
+++ b/drivers/gpio/gpio-sch.c
@@ -1,5 +1,5 @@
/*
- * sch_gpio.c - GPIO interface for Intel Poulsbo SCH
+ * GPIO interface for Intel Poulsbo SCH
*
* Copyright (c) 2010 CompuLab Ltd
* Author: Denis Turischev <denis@compulab.co.il>
diff --git a/drivers/gpio/stmpe-gpio.c b/drivers/gpio/gpio-stmpe.c
index 4c980b57332..4c980b57332 100644
--- a/drivers/gpio/stmpe-gpio.c
+++ b/drivers/gpio/gpio-stmpe.c
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/gpio-sx150x.c
index a4f73534394..a4f73534394 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
diff --git a/drivers/gpio/tc3589x-gpio.c b/drivers/gpio/gpio-tc3589x.c
index 2a82e8999a4..2a82e8999a4 100644
--- a/drivers/gpio/tc3589x-gpio.c
+++ b/drivers/gpio/gpio-tc3589x.c
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
new file mode 100644
index 00000000000..747eb40e8af
--- /dev/null
+++ b/drivers/gpio/gpio-tegra.c
@@ -0,0 +1,441 @@
+/*
+ * arch/arm/mach-tegra/gpio.c
+ *
+ * Copyright (c) 2010 Google, Inc
+ *
+ * Author:
+ * Erik Gilling <konkers@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+
+#include <asm/mach/irq.h>
+
+#include <mach/iomap.h>
+#include <mach/suspend.h>
+
+#define GPIO_BANK(x) ((x) >> 5)
+#define GPIO_PORT(x) (((x) >> 3) & 0x3)
+#define GPIO_BIT(x) ((x) & 0x7)
+
+#define GPIO_REG(x) (IO_TO_VIRT(TEGRA_GPIO_BASE) + \
+ GPIO_BANK(x) * 0x80 + \
+ GPIO_PORT(x) * 4)
+
+#define GPIO_CNF(x) (GPIO_REG(x) + 0x00)
+#define GPIO_OE(x) (GPIO_REG(x) + 0x10)
+#define GPIO_OUT(x) (GPIO_REG(x) + 0X20)
+#define GPIO_IN(x) (GPIO_REG(x) + 0x30)
+#define GPIO_INT_STA(x) (GPIO_REG(x) + 0x40)
+#define GPIO_INT_ENB(x) (GPIO_REG(x) + 0x50)
+#define GPIO_INT_LVL(x) (GPIO_REG(x) + 0x60)
+#define GPIO_INT_CLR(x) (GPIO_REG(x) + 0x70)
+
+#define GPIO_MSK_CNF(x) (GPIO_REG(x) + 0x800)
+#define GPIO_MSK_OE(x) (GPIO_REG(x) + 0x810)
+#define GPIO_MSK_OUT(x) (GPIO_REG(x) + 0X820)
+#define GPIO_MSK_INT_STA(x) (GPIO_REG(x) + 0x840)
+#define GPIO_MSK_INT_ENB(x) (GPIO_REG(x) + 0x850)
+#define GPIO_MSK_INT_LVL(x) (GPIO_REG(x) + 0x860)
+
+#define GPIO_INT_LVL_MASK 0x010101
+#define GPIO_INT_LVL_EDGE_RISING 0x000101
+#define GPIO_INT_LVL_EDGE_FALLING 0x000100
+#define GPIO_INT_LVL_EDGE_BOTH 0x010100
+#define GPIO_INT_LVL_LEVEL_HIGH 0x000001
+#define GPIO_INT_LVL_LEVEL_LOW 0x000000
+
+struct tegra_gpio_bank {
+ int bank;
+ int irq;
+ spinlock_t lvl_lock[4];
+#ifdef CONFIG_PM
+ u32 cnf[4];
+ u32 out[4];
+ u32 oe[4];
+ u32 int_enb[4];
+ u32 int_lvl[4];
+#endif
+};
+
+
+static struct tegra_gpio_bank tegra_gpio_banks[] = {
+ {.bank = 0, .irq = INT_GPIO1},
+ {.bank = 1, .irq = INT_GPIO2},
+ {.bank = 2, .irq = INT_GPIO3},
+ {.bank = 3, .irq = INT_GPIO4},
+ {.bank = 4, .irq = INT_GPIO5},
+ {.bank = 5, .irq = INT_GPIO6},
+ {.bank = 6, .irq = INT_GPIO7},
+};
+
+static int tegra_gpio_compose(int bank, int port, int bit)
+{
+ return (bank << 5) | ((port & 0x3) << 3) | (bit & 0x7);
+}
+
+static void tegra_gpio_mask_write(u32 reg, int gpio, int value)
+{
+ u32 val;
+
+ val = 0x100 << GPIO_BIT(gpio);
+ if (value)
+ val |= 1 << GPIO_BIT(gpio);
+ __raw_writel(val, reg);
+}
+
+void tegra_gpio_enable(int gpio)
+{
+ tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 1);
+}
+
+void tegra_gpio_disable(int gpio)
+{
+ tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 0);
+}
+
+static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ tegra_gpio_mask_write(GPIO_MSK_OUT(offset), offset, value);
+}
+
+static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ return (__raw_readl(GPIO_IN(offset)) >> GPIO_BIT(offset)) & 0x1;
+}
+
+static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 0);
+ return 0;
+}
+
+static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ tegra_gpio_set(chip, offset, value);
+ tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 1);
+ return 0;
+}
+
+
+
+static struct gpio_chip tegra_gpio_chip = {
+ .label = "tegra-gpio",
+ .direction_input = tegra_gpio_direction_input,
+ .get = tegra_gpio_get,
+ .direction_output = tegra_gpio_direction_output,
+ .set = tegra_gpio_set,
+ .base = 0,
+ .ngpio = TEGRA_NR_GPIOS,
+};
+
+static void tegra_gpio_irq_ack(struct irq_data *d)
+{
+ int gpio = d->irq - INT_GPIO_BASE;
+
+ __raw_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio));
+}
+
+static void tegra_gpio_irq_mask(struct irq_data *d)
+{
+ int gpio = d->irq - INT_GPIO_BASE;
+
+ tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 0);
+}
+
+static void tegra_gpio_irq_unmask(struct irq_data *d)
+{
+ int gpio = d->irq - INT_GPIO_BASE;
+
+ tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 1);
+}
+
+static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ int gpio = d->irq - INT_GPIO_BASE;
+ struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ int port = GPIO_PORT(gpio);
+ int lvl_type;
+ int val;
+ unsigned long flags;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ lvl_type = GPIO_INT_LVL_EDGE_RISING;
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ lvl_type = GPIO_INT_LVL_EDGE_FALLING;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ lvl_type = GPIO_INT_LVL_EDGE_BOTH;
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ lvl_type = GPIO_INT_LVL_LEVEL_HIGH;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ lvl_type = GPIO_INT_LVL_LEVEL_LOW;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&bank->lvl_lock[port], flags);
+
+ val = __raw_readl(GPIO_INT_LVL(gpio));
+ val &= ~(GPIO_INT_LVL_MASK << GPIO_BIT(gpio));
+ val |= lvl_type << GPIO_BIT(gpio);
+ __raw_writel(val, GPIO_INT_LVL(gpio));
+
+ spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+
+ return 0;
+}
+
+static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct tegra_gpio_bank *bank;
+ int port;
+ int pin;
+ int unmasked = 0;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ bank = irq_get_handler_data(irq);
+
+ for (port = 0; port < 4; port++) {
+ int gpio = tegra_gpio_compose(bank->bank, port, 0);
+ unsigned long sta = __raw_readl(GPIO_INT_STA(gpio)) &
+ __raw_readl(GPIO_INT_ENB(gpio));
+ u32 lvl = __raw_readl(GPIO_INT_LVL(gpio));
+
+ for_each_set_bit(pin, &sta, 8) {
+ __raw_writel(1 << pin, GPIO_INT_CLR(gpio));
+
+ /* if gpio is edge triggered, clear condition
+ * before executing the hander so that we don't
+ * miss edges
+ */
+ if (lvl & (0x100 << pin)) {
+ unmasked = 1;
+ chained_irq_exit(chip, desc);
+ }
+
+ generic_handle_irq(gpio_to_irq(gpio + pin));
+ }
+ }
+
+ if (!unmasked)
+ chained_irq_exit(chip, desc);
+
+}
+
+#ifdef CONFIG_PM
+void tegra_gpio_resume(void)
+{
+ unsigned long flags;
+ int b;
+ int p;
+
+ local_irq_save(flags);
+
+ for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
+ struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
+
+ for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
+ unsigned int gpio = (b<<5) | (p<<3);
+ __raw_writel(bank->cnf[p], GPIO_CNF(gpio));
+ __raw_writel(bank->out[p], GPIO_OUT(gpio));
+ __raw_writel(bank->oe[p], GPIO_OE(gpio));
+ __raw_writel(bank->int_lvl[p], GPIO_INT_LVL(gpio));
+ __raw_writel(bank->int_enb[p], GPIO_INT_ENB(gpio));
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+void tegra_gpio_suspend(void)
+{
+ unsigned long flags;
+ int b;
+ int p;
+
+ local_irq_save(flags);
+ for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
+ struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
+
+ for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
+ unsigned int gpio = (b<<5) | (p<<3);
+ bank->cnf[p] = __raw_readl(GPIO_CNF(gpio));
+ bank->out[p] = __raw_readl(GPIO_OUT(gpio));
+ bank->oe[p] = __raw_readl(GPIO_OE(gpio));
+ bank->int_enb[p] = __raw_readl(GPIO_INT_ENB(gpio));
+ bank->int_lvl[p] = __raw_readl(GPIO_INT_LVL(gpio));
+ }
+ }
+ local_irq_restore(flags);
+}
+
+static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable)
+{
+ struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ return irq_set_irq_wake(bank->irq, enable);
+}
+#endif
+
+static struct irq_chip tegra_gpio_irq_chip = {
+ .name = "GPIO",
+ .irq_ack = tegra_gpio_irq_ack,
+ .irq_mask = tegra_gpio_irq_mask,
+ .irq_unmask = tegra_gpio_irq_unmask,
+ .irq_set_type = tegra_gpio_irq_set_type,
+#ifdef CONFIG_PM
+ .irq_set_wake = tegra_gpio_wake_enable,
+#endif
+};
+
+
+/* This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
+static int __init tegra_gpio_init(void)
+{
+ struct tegra_gpio_bank *bank;
+ int i;
+ int j;
+
+ for (i = 0; i < 7; i++) {
+ for (j = 0; j < 4; j++) {
+ int gpio = tegra_gpio_compose(i, j, 0);
+ __raw_writel(0x00, GPIO_INT_ENB(gpio));
+ }
+ }
+
+#ifdef CONFIG_OF_GPIO
+ /*
+ * This isn't ideal, but it gets things hooked up until this
+ * driver is converted into a platform_device
+ */
+ tegra_gpio_chip.of_node = of_find_compatible_node(NULL, NULL,
+ "nvidia,tegra20-gpio");
+#endif /* CONFIG_OF_GPIO */
+
+ gpiochip_add(&tegra_gpio_chip);
+
+ for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
+ bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))];
+
+ irq_set_lockdep_class(i, &gpio_lock_class);
+ irq_set_chip_data(i, bank);
+ irq_set_chip_and_handler(i, &tegra_gpio_irq_chip,
+ handle_simple_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) {
+ bank = &tegra_gpio_banks[i];
+
+ irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler);
+ irq_set_handler_data(bank->irq, bank);
+
+ for (j = 0; j < 4; j++)
+ spin_lock_init(&bank->lvl_lock[j]);
+ }
+
+ return 0;
+}
+
+postcore_initcall(tegra_gpio_init);
+
+void __init tegra_gpio_config(struct tegra_gpio_table *table, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ int gpio = table[i].gpio;
+
+ if (table[i].enable)
+ tegra_gpio_enable(gpio);
+ else
+ tegra_gpio_disable(gpio);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static int dbg_gpio_show(struct seq_file *s, void *unused)
+{
+ int i;
+ int j;
+
+ for (i = 0; i < 7; i++) {
+ for (j = 0; j < 4; j++) {
+ int gpio = tegra_gpio_compose(i, j, 0);
+ seq_printf(s,
+ "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
+ i, j,
+ __raw_readl(GPIO_CNF(gpio)),
+ __raw_readl(GPIO_OE(gpio)),
+ __raw_readl(GPIO_OUT(gpio)),
+ __raw_readl(GPIO_IN(gpio)),
+ __raw_readl(GPIO_INT_STA(gpio)),
+ __raw_readl(GPIO_INT_ENB(gpio)),
+ __raw_readl(GPIO_INT_LVL(gpio)));
+ }
+ }
+ return 0;
+}
+
+static int dbg_gpio_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dbg_gpio_show, &inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+ .open = dbg_gpio_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tegra_gpio_debuginit(void)
+{
+ (void) debugfs_create_file("tegra_gpio", S_IRUGO,
+ NULL, NULL, &debug_fops);
+ return 0;
+}
+late_initcall(tegra_gpio_debuginit);
+#endif
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/gpio-timberdale.c
index 0265872e57d..c593bd46bfb 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -1,5 +1,5 @@
/*
- * timbgpio.c timberdale FPGA GPIO driver
+ * Timberdale FPGA GPIO driver
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/gpio/tps65910-gpio.c b/drivers/gpio/gpio-tps65910.c
index 8d1ddfdd63e..b9c1c297669 100644
--- a/drivers/gpio/tps65910-gpio.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -1,5 +1,5 @@
/*
- * tps65910-gpio.c -- TI TPS6591x
+ * TI TPS6591x GPIO driver
*
* Copyright 2010 Texas Instruments Inc.
*
@@ -81,8 +81,10 @@ void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
switch(tps65910_chip_id(tps65910)) {
case TPS65910:
tps65910->gpio.ngpio = 6;
+ break;
case TPS65911:
tps65910->gpio.ngpio = 9;
+ break;
default:
return;
}
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
new file mode 100644
index 00000000000..79e66c00235
--- /dev/null
+++ b/drivers/gpio/gpio-tps65912.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/mfd/tps65912.h>
+
+struct tps65912_gpio_data {
+ struct tps65912 *tps65912;
+ struct gpio_chip gpio_chip;
+};
+
+static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+ int val;
+
+ val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
+
+ if (val & GPIO_STS_MASK)
+ return 1;
+
+ return 0;
+}
+
+static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+
+ if (value)
+ tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK);
+ else
+ tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK);
+}
+
+static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+
+ /* Set the initial value */
+ tps65912_gpio_set(gc, offset, value);
+
+ return tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_CFG_MASK);
+}
+
+static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+
+ return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
+ GPIO_CFG_MASK);
+
+}
+
+static struct gpio_chip template_chip = {
+ .label = "tps65912",
+ .owner = THIS_MODULE,
+ .direction_input = tps65912_gpio_input,
+ .direction_output = tps65912_gpio_output,
+ .get = tps65912_gpio_get,
+ .set = tps65912_gpio_set,
+ .can_sleep = 1,
+ .ngpio = 5,
+ .base = -1,
+};
+
+static int __devinit tps65912_gpio_probe(struct platform_device *pdev)
+{
+ struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
+ struct tps65912_board *pdata = tps65912->dev->platform_data;
+ struct tps65912_gpio_data *tps65912_gpio;
+ int ret;
+
+ tps65912_gpio = kzalloc(sizeof(*tps65912_gpio), GFP_KERNEL);
+ if (tps65912_gpio == NULL)
+ return -ENOMEM;
+
+ tps65912_gpio->tps65912 = tps65912;
+ tps65912_gpio->gpio_chip = template_chip;
+ tps65912_gpio->gpio_chip.dev = &pdev->dev;
+ if (pdata && pdata->gpio_base)
+ tps65912_gpio->gpio_chip.base = pdata->gpio_base;
+
+ ret = gpiochip_add(&tps65912_gpio->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, tps65912_gpio);
+
+ return ret;
+
+err:
+ kfree(tps65912_gpio);
+ return ret;
+}
+
+static int __devexit tps65912_gpio_remove(struct platform_device *pdev)
+{
+ struct tps65912_gpio_data *tps65912_gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&tps65912_gpio->gpio_chip);
+ if (ret == 0)
+ kfree(tps65912_gpio);
+
+ return ret;
+}
+
+static struct platform_driver tps65912_gpio_driver = {
+ .driver = {
+ .name = "tps65912-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_gpio_probe,
+ .remove = __devexit_p(tps65912_gpio_remove),
+};
+
+static int __init tps65912_gpio_init(void)
+{
+ return platform_driver_register(&tps65912_gpio_driver);
+}
+subsys_initcall(tps65912_gpio_init);
+
+static void __exit tps65912_gpio_exit(void)
+{
+ platform_driver_unregister(&tps65912_gpio_driver);
+}
+module_exit(tps65912_gpio_exit);
+
+MODULE_AUTHOR("Margarita Olaya Cabrera <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("GPIO interface for TPS65912 PMICs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65912-gpio");
diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/gpio-twl4030.c
index 57635ac35a7..b8b4f228757 100644
--- a/drivers/gpio/twl4030-gpio.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -1,5 +1,5 @@
/*
- * twl4030_gpio.c -- access to GPIOs on TWL4030/TPS659x0 chips
+ * Access to GPIOs on TWL4030/TPS659x0 chips
*
* Copyright (C) 2006-2007 Texas Instruments, Inc.
* Copyright (C) 2006 MontaVista Software, Inc.
diff --git a/drivers/gpio/gpio-u300.c b/drivers/gpio/gpio-u300.c
index d92790140fe..53e8255cb0b 100644
--- a/drivers/gpio/gpio-u300.c
+++ b/drivers/gpio/gpio-u300.c
@@ -1,11 +1,8 @@
/*
- *
- * arch/arm/mach-u300/gpio.c
- *
+ * U300 GPIO module.
*
* Copyright (C) 2007-2009 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
- * U300 GPIO module.
* This can driver either of the two basic GPIO cores
* available in the U300 platforms:
* COH 901 335 - Used in DB3150 (U300 1.0) and DB3200 (U330 1.0)
@@ -581,8 +578,8 @@ static int __init gpio_probe(struct platform_device *pdev)
if (!memres)
goto err_no_resource;
- if (request_mem_region(memres->start, memres->end - memres->start, "GPIO Controller")
- == NULL) {
+ if (!request_mem_region(memres->start, resource_size(memres),
+ "GPIO Controller")) {
err = -ENODEV;
goto err_no_ioregion;
}
@@ -640,7 +637,7 @@ static int __init gpio_probe(struct platform_device *pdev)
free_irq(gpio_ports[i].irq, &gpio_ports[i]);
iounmap(virtbase);
err_no_ioremap:
- release_mem_region(memres->start, memres->end - memres->start);
+ release_mem_region(memres->start, resource_size(memres));
err_no_ioregion:
err_no_resource:
clk_disable(clk);
@@ -660,7 +657,7 @@ static int __exit gpio_remove(struct platform_device *pdev)
for (i = 0 ; i < U300_GPIO_NUM_PORTS; i++)
free_irq(gpio_ports[i].irq, &gpio_ports[i]);
iounmap(virtbase);
- release_mem_region(memres->start, memres->end - memres->start);
+ release_mem_region(memres->start, resource_size(memres));
clk_disable(clk);
clk_put(clk);
return 0;
diff --git a/drivers/gpio/ucb1400_gpio.c b/drivers/gpio/gpio-ucb1400.c
index 50e6bd1392c..50e6bd1392c 100644
--- a/drivers/gpio/ucb1400_gpio.c
+++ b/drivers/gpio/gpio-ucb1400.c
diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/gpio-vr41xx.c
index a365be040b3..98723cb9ac6 100644
--- a/drivers/gpio/vr41xx_giu.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -518,7 +518,7 @@ static int __devinit giu_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- giu_base = ioremap(res->start, res->end - res->start + 1);
+ giu_base = ioremap(res->start, resource_size(res));
if (!giu_base)
return -ENOMEM;
diff --git a/drivers/gpio/vx855_gpio.c b/drivers/gpio/gpio-vx855.c
index ef5aabd8b8b..ef5aabd8b8b 100644
--- a/drivers/gpio/vx855_gpio.c
+++ b/drivers/gpio/gpio-vx855.c
diff --git a/drivers/gpio/wm831x-gpio.c b/drivers/gpio/gpio-wm831x.c
index 309644cf4d9..deb949e75ec 100644
--- a/drivers/gpio/wm831x-gpio.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -1,5 +1,5 @@
/*
- * wm831x-gpio.c -- gpiolib support for Wolfson WM831x PMICs
+ * gpiolib support for Wolfson WM831x PMICs
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
@@ -180,6 +180,7 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
break;
case WM831X_GPIO_PULL_UP:
pull = "pullup";
+ break;
default:
pull = "INVALID PULL";
break;
diff --git a/drivers/gpio/wm8350-gpiolib.c b/drivers/gpio/gpio-wm8350.c
index 359999290f5..a06af515483 100644
--- a/drivers/gpio/wm8350-gpiolib.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -1,5 +1,5 @@
/*
- * wm835x-gpiolib.c -- gpiolib support for Wolfson WM835x PMICs
+ * gpiolib support for Wolfson WM835x PMICs
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
diff --git a/drivers/gpio/wm8994-gpio.c b/drivers/gpio/gpio-wm8994.c
index c822baacd8f..96198f3fab7 100644
--- a/drivers/gpio/wm8994-gpio.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -1,5 +1,5 @@
/*
- * wm8994-gpio.c -- gpiolib support for Wolfson WM8994
+ * gpiolib support for Wolfson WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
diff --git a/drivers/gpio/xilinx_gpio.c b/drivers/gpio/gpio-xilinx.c
index 846fbd5e31b..846fbd5e31b 100644
--- a/drivers/gpio/xilinx_gpio.c
+++ b/drivers/gpio/gpio-xilinx.c
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 21058e6ad2b..82db1850666 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -886,9 +886,6 @@ int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
total_objects += dev->mode_config.num_connector;
total_objects += dev->mode_config.num_encoder;
- if (total_objects == 0)
- return -EINVAL;
-
group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 92369655dca..f88a9b2c977 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -560,6 +560,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
+ } else if (set->fb->depth != set->crtc->fb->depth) {
+ mode_changed = true;
+ } else if (set->fb->bits_per_pixel !=
+ set->crtc->fb->bits_per_pixel) {
+ mode_changed = true;
} else
fb_changed = true;
}
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 9d8c892d07c..9d2668a5087 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -90,7 +90,6 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
struct drm_device *dev = minor->dev;
struct dentry *ent;
struct drm_info_node *tmp;
- char name[64];
int i, ret;
for (i = 0; i < count; i++) {
@@ -108,6 +107,9 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
root, tmp, &drm_debugfs_fops);
if (!ent) {
+ char name[64];
+ strncpy(name, root->d_name.name,
+ min(root->d_name.len, 64U));
DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
name, files[i].name);
kfree(tmp);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 09292193daf..7425e5c9bd7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -127,6 +127,23 @@ static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
+ /*
+ * Sanity check the header of the base EDID block. Return 8 if the header
+ * is perfect, down to 0 if it's totally wrong.
+ */
+int drm_edid_header_is_valid(const u8 *raw_edid)
+{
+ int i, score = 0;
+
+ for (i = 0; i < sizeof(edid_header); i++)
+ if (raw_edid[i] == edid_header[i])
+ score++;
+
+ return score;
+}
+EXPORT_SYMBOL(drm_edid_header_is_valid);
+
+
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
@@ -139,12 +156,7 @@ drm_edid_block_valid(u8 *raw_edid)
struct edid *edid = (struct edid *)raw_edid;
if (raw_edid[0] == 0x00) {
- int score = 0;
-
- for (i = 0; i < sizeof(edid_header); i++)
- if (raw_edid[i] == edid_header[i])
- score++;
-
+ int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
else if (score >= 6) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
@@ -185,8 +197,8 @@ drm_edid_block_valid(u8 *raw_edid)
bad:
if (raw_edid) {
printk(KERN_ERR "Raw EDID:\n");
- print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
- printk(KERN_ERR "\n");
+ print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
+ raw_edid, EDID_LENGTH, false);
}
return 0;
}
@@ -1439,6 +1451,8 @@ EXPORT_SYMBOL(drm_detect_monitor_audio);
static void drm_add_display_info(struct edid *edid,
struct drm_display_info *info)
{
+ u8 *edid_ext;
+
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
@@ -1483,6 +1497,13 @@ static void drm_add_display_info(struct edid *edid,
info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
+
+ /* Get data from CEA blocks if present */
+ edid_ext = drm_find_cea_extension(edid);
+ if (!edid_ext)
+ return;
+
+ info->cea_rev = edid_ext[1];
}
/**
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 74e4ff57801..186d62eb063 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
#include "drmP.h"
/** @file drm_gem.c
@@ -128,7 +129,7 @@ drm_gem_destroy(struct drm_device *dev)
}
/**
- * Initialize an already allocate GEM object of the specified size with
+ * Initialize an already allocated GEM object of the specified size with
* shmfs backing store.
*/
int drm_gem_object_init(struct drm_device *dev,
@@ -150,6 +151,27 @@ int drm_gem_object_init(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_object_init);
/**
+ * Initialize an already allocated GEM object of the specified size with
+ * no GEM provided backing store. Instead the caller is responsible for
+ * backing the object and handling it.
+ */
+int drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj->dev = dev;
+ obj->filp = NULL;
+
+ kref_init(&obj->refcount);
+ atomic_set(&obj->handle_count, 0);
+ obj->size = size;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_private_object_init);
+
+/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
@@ -210,6 +232,8 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, filp);
drm_gem_object_handle_unreference_unlocked(obj);
return 0;
@@ -226,7 +250,8 @@ drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
- int ret;
+ struct drm_device *dev = obj->dev;
+ int ret;
/*
* Get the user-visible handle using idr.
@@ -247,6 +272,15 @@ again:
return ret;
drm_gem_object_handle_reference(obj);
+
+ if (dev->driver->gem_open_object) {
+ ret = dev->driver->gem_open_object(obj, file_priv);
+ if (ret) {
+ drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
+ }
+
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_create);
@@ -401,7 +435,12 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
+ struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
+ struct drm_device *dev = obj->dev;
+
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_unreference_unlocked(obj);
@@ -417,7 +456,7 @@ void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
idr_for_each(&file_private->object_idr,
- &drm_gem_object_release_handle, NULL);
+ &drm_gem_object_release_handle, file_private);
idr_remove_all(&file_private->object_idr);
idr_destroy(&file_private->object_idr);
@@ -426,7 +465,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
- fput(obj->filp);
+ if (obj->filp)
+ fput(obj->filp);
}
EXPORT_SYMBOL(drm_gem_object_release);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 2022a5c966b..3830e9e478c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -291,11 +291,14 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
if (!dev->irq_enabled)
return;
- if (state)
- dev->driver->irq_uninstall(dev);
- else {
- dev->driver->irq_preinstall(dev);
- dev->driver->irq_postinstall(dev);
+ if (state) {
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
+ } else {
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_postinstall)
+ dev->driver->irq_postinstall(dev);
}
}
@@ -338,7 +341,8 @@ int drm_irq_install(struct drm_device *dev)
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
/* Before installing handler */
- dev->driver->irq_preinstall(dev);
+ if (dev->driver->irq_preinstall)
+ dev->driver->irq_preinstall(dev);
/* Install handler */
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
@@ -363,11 +367,16 @@ int drm_irq_install(struct drm_device *dev)
vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
/* After installing handler */
- ret = dev->driver->irq_postinstall(dev);
+ if (dev->driver->irq_postinstall)
+ ret = dev->driver->irq_postinstall(dev);
+
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = 0;
mutex_unlock(&dev->struct_mutex);
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
+ free_irq(drm_dev_to_irq(dev), dev);
}
return ret;
@@ -413,7 +422,8 @@ int drm_irq_uninstall(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
- dev->driver->irq_uninstall(dev);
+ if (dev->driver->irq_uninstall)
+ dev->driver->irq_uninstall(dev);
free_irq(drm_dev_to_irq(dev), dev);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c2d32f20e2f..ad74fb4dc54 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -994,9 +994,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
{
const char *name;
unsigned int namelen;
- int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
+ bool res_specified = false, bpp_specified = false, refresh_specified = false;
unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
- int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
+ bool yres_specified = false, cvt = false, rb = false;
+ bool interlace = false, margins = false, was_digit = false;
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
@@ -1015,54 +1016,65 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
for (i = namelen-1; i >= 0; i--) {
switch (name[i]) {
case '@':
- namelen = i;
if (!refresh_specified && !bpp_specified &&
- !yres_specified) {
+ !yres_specified && !cvt && !rb && was_digit) {
refresh = simple_strtol(&name[i+1], NULL, 10);
- refresh_specified = 1;
- if (cvt || rb)
- cvt = 0;
+ refresh_specified = true;
+ was_digit = false;
} else
goto done;
break;
case '-':
- namelen = i;
- if (!bpp_specified && !yres_specified) {
+ if (!bpp_specified && !yres_specified && !cvt &&
+ !rb && was_digit) {
bpp = simple_strtol(&name[i+1], NULL, 10);
- bpp_specified = 1;
- if (cvt || rb)
- cvt = 0;
+ bpp_specified = true;
+ was_digit = false;
} else
goto done;
break;
case 'x':
- if (!yres_specified) {
+ if (!yres_specified && was_digit) {
yres = simple_strtol(&name[i+1], NULL, 10);
- yres_specified = 1;
+ yres_specified = true;
+ was_digit = false;
} else
goto done;
case '0' ... '9':
+ was_digit = true;
break;
case 'M':
- if (!yres_specified)
- cvt = 1;
+ if (yres_specified || cvt || was_digit)
+ goto done;
+ cvt = true;
break;
case 'R':
- if (cvt)
- rb = 1;
+ if (yres_specified || cvt || rb || was_digit)
+ goto done;
+ rb = true;
break;
case 'm':
- if (!cvt)
- margins = 1;
+ if (cvt || yres_specified || was_digit)
+ goto done;
+ margins = true;
break;
case 'i':
- if (!cvt)
- interlace = 1;
+ if (cvt || yres_specified || was_digit)
+ goto done;
+ interlace = true;
break;
case 'e':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
force = DRM_FORCE_ON;
break;
case 'D':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
(connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
force = DRM_FORCE_ON;
@@ -1070,17 +1082,37 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
force = DRM_FORCE_ON_DIGITAL;
break;
case 'd':
+ if (yres_specified || bpp_specified || refresh_specified ||
+ was_digit || (force != DRM_FORCE_UNSPECIFIED))
+ goto done;
+
force = DRM_FORCE_OFF;
break;
default:
goto done;
}
}
+
if (i < 0 && yres_specified) {
- xres = simple_strtol(name, NULL, 10);
- res_specified = 1;
+ char *ch;
+ xres = simple_strtol(name, &ch, 10);
+ if ((ch != NULL) && (*ch == 'x'))
+ res_specified = true;
+ else
+ i = ch - name;
+ } else if (!yres_specified && was_digit) {
+ /* catch mode that begins with digits but has no 'x' */
+ i = 0;
}
done:
+ if (i >= 0) {
+ printk(KERN_WARNING
+ "parse error at position %i in video mode '%s'\n",
+ i, name);
+ mode->specified = false;
+ return false;
+ }
+
if (res_specified) {
mode->specified = true;
mode->xres = xres;
@@ -1096,9 +1128,10 @@ done:
mode->bpp_specified = true;
mode->bpp = bpp;
}
- mode->rb = rb ? true : false;
- mode->cvt = cvt ? true : false;
- mode->interlace = interlace ? true : false;
+ mode->rb = rb;
+ mode->cvt = cvt;
+ mode->interlace = interlace;
+ mode->margins = margins;
mode->force = force;
return true;
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 7223f06d8e5..2a8b6265ad3 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -123,14 +123,15 @@ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *mas
{
int len, ret;
- master->unique_len = 10 + strlen(dev->platformdev->name);
+ master->unique_len = 13 + strlen(dev->platformdev->name);
+ master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
- "platform:%s", dev->platformdev->name);
+ "platform:%s:%02d", dev->platformdev->name, dev->platformdev->id);
if (len > master->unique_len) {
DRM_ERROR("Unique buffer overflowed\n");
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d15e09b0ae0..7525e0311e5 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -83,30 +83,26 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
if (dev->sg)
return -EINVAL;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- memset(entry, 0, sizeof(*entry));
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
- entry->pagelist = kmalloc(pages * sizeof(*entry->pagelist), GFP_KERNEL);
+ entry->pagelist = kcalloc(pages, sizeof(*entry->pagelist), GFP_KERNEL);
if (!entry->pagelist) {
kfree(entry);
return -ENOMEM;
}
- memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
-
- entry->busaddr = kmalloc(pages * sizeof(*entry->busaddr), GFP_KERNEL);
+ entry->busaddr = kcalloc(pages, sizeof(*entry->busaddr), GFP_KERNEL);
if (!entry->busaddr) {
kfree(entry->pagelist);
kfree(entry);
return -ENOMEM;
}
- memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
if (!entry->virtual) {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4d46441cbe2..3c395a59da3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
for (i = 0; i < I915_NUM_RINGS; i++) {
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
dev_priv->ring[i].name,
I915_READ_IMR(&dev_priv->ring[i]));
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
MEMSTAT_VID_SHIFT);
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if (IS_GEN6(dev)) {
+ } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1123,6 +1123,44 @@ static int i915_emon_status(struct seq_file *m, void *unused)
return 0;
}
+static int i915_ring_freq_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ int gpu_freq, ia_freq;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+ seq_printf(m, "unsupported on this chipset\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
+
+ for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+ gpu_freq++) {
+ I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_READ_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode read of freq table timed out\n");
+ continue;
+ }
+ ia_freq = I915_READ(GEN6_PCODE_DATA);
+ seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static int i915_gfxec(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1207,13 +1245,17 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- seq_printf(m, "power context ");
- describe_obj(m, dev_priv->pwrctx);
- seq_printf(m, "\n");
+ if (dev_priv->pwrctx) {
+ seq_printf(m, "power context ");
+ describe_obj(m, dev_priv->pwrctx);
+ seq_printf(m, "\n");
+ }
- seq_printf(m, "render context ");
- describe_obj(m, dev_priv->renderctx);
- seq_printf(m, "\n");
+ if (dev_priv->renderctx) {
+ seq_printf(m, "render context ");
+ describe_obj(m, dev_priv->renderctx);
+ seq_printf(m, "\n");
+ }
mutex_unlock(&dev->mode_config.mutex);
@@ -1296,6 +1338,155 @@ static const struct file_operations i915_wedged_fops = {
.llseek = default_llseek,
};
+static int
+i915_max_freq_open(struct inode *inode,
+ struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+i915_max_freq_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ int len;
+
+ len = snprintf(buf, sizeof (buf),
+ "max freq: %d\n", dev_priv->max_delay * 50);
+
+ if (len > sizeof (buf))
+ len = sizeof (buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_max_freq_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 1;
+
+ if (cnt > 0) {
+ if (cnt > sizeof (buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
+
+ /*
+ * Turbo will still be enabled, but won't go above the set value.
+ */
+ dev_priv->max_delay = val / 50;
+
+ gen6_set_rps(dev, val / 50);
+
+ return cnt;
+}
+
+static const struct file_operations i915_max_freq_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_max_freq_open,
+ .read = i915_max_freq_read,
+ .write = i915_max_freq_write,
+ .llseek = default_llseek,
+};
+
+static int
+i915_cache_sharing_open(struct inode *inode,
+ struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+i915_cache_sharing_read(struct file *filp,
+ char __user *ubuf,
+ size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ u32 snpcr;
+ int len;
+
+ mutex_lock(&dev_priv->dev->struct_mutex);
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+
+ len = snprintf(buf, sizeof (buf),
+ "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
+ GEN6_MBC_SNPCR_SHIFT);
+
+ if (len > sizeof (buf))
+ len = sizeof (buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_cache_sharing_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ u32 snpcr;
+ int val = 1;
+
+ if (cnt > 0) {
+ if (cnt > sizeof (buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ if (val < 0 || val > 3)
+ return -EINVAL;
+
+ DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
+
+ /* Update the cache sharing policy here as well */
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+ return cnt;
+}
+
+static const struct file_operations i915_cache_sharing_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_cache_sharing_open,
+ .read = i915_cache_sharing_read,
+ .write = i915_cache_sharing_write,
+ .llseek = default_llseek,
+};
+
/* As the drm_debugfs_init() routines are called before dev->dev_private is
* allocated we need to hook into the minor for release. */
static int
@@ -1395,6 +1586,36 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
}
+static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+
+ ent = debugfs_create_file("i915_max_freq",
+ S_IRUGO | S_IWUSR,
+ root, dev,
+ &i915_max_freq_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
+}
+
+static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ struct dentry *ent;
+
+ ent = debugfs_create_file("i915_cache_sharing",
+ S_IRUGO | S_IWUSR,
+ root, dev,
+ &i915_cache_sharing_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
+}
+
static struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
@@ -1426,6 +1647,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
+ {"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_sr_status", i915_sr_status, 0},
@@ -1447,6 +1669,12 @@ int i915_debugfs_init(struct drm_minor *minor)
ret = i915_forcewake_create(minor->debugfs_root, minor);
if (ret)
return ret;
+ ret = i915_max_freq_create(minor->debugfs_root, minor);
+ if (ret)
+ return ret;
+ ret = i915_cache_sharing_create(minor->debugfs_root, minor);
+ if (ret)
+ return ret;
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
@@ -1461,6 +1689,10 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
+ 1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0239e9974bf..8a3942c4f09 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -61,7 +61,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
@@ -71,10 +70,9 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr =
- (void __force __iomem *)dev_priv->status_page_dmah->vaddr;
- memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
+ memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
+ 0, PAGE_SIZE);
i915_write_hws_pga(dev);
@@ -1073,6 +1071,9 @@ static void i915_setup_compression(struct drm_device *dev, int size)
unsigned long cfb_base;
unsigned long ll_base = 0;
+ /* Just in case the BIOS is doing something questionable. */
+ intel_disable_fbc(dev);
+
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
@@ -1099,7 +1100,6 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->cfb_size = size;
- intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
@@ -1266,30 +1266,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
- if (IS_IVYBRIDGE(dev)) {
- /* Share pre & uninstall handlers with ILK/SNB */
- dev->driver->irq_handler = ivybridge_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_preinstall;
- dev->driver->irq_postinstall = ivybridge_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ivybridge_enable_vblank;
- dev->driver->disable_vblank = ivybridge_disable_vblank;
- } else if (HAS_PCH_SPLIT(dev)) {
- dev->driver->irq_handler = ironlake_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_preinstall;
- dev->driver->irq_postinstall = ironlake_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ironlake_enable_vblank;
- dev->driver->disable_vblank = ironlake_disable_vblank;
- } else {
- dev->driver->irq_preinstall = i915_driver_irq_preinstall;
- dev->driver->irq_postinstall = i915_driver_irq_postinstall;
- dev->driver->irq_uninstall = i915_driver_irq_uninstall;
- dev->driver->irq_handler = i915_driver_irq_handler;
- dev->driver->enable_vblank = i915_enable_vblank;
- dev->driver->disable_vblank = i915_disable_vblank;
- }
-
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
@@ -1967,7 +1943,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
- goto out_iomapfree;
+ goto out_rmmap;
}
agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
@@ -2011,18 +1987,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
- goto out_iomapfree;
+ goto out_mtrrfree;
}
/* enable GEM by default */
dev_priv->has_gem = 1;
- dev->driver->get_vblank_counter = i915_get_vblank_counter;
- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
- dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
- dev->driver->get_vblank_counter = gm45_get_vblank_counter;
- }
+ intel_irq_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@@ -2103,13 +2074,21 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_gem_unload:
+ if (dev_priv->mm.inactive_shrinker.shrink)
+ unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
-out_iomapfree:
+out_mtrrfree:
+ if (dev_priv->mm.gtt_mtrr >= 0) {
+ mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
+ dev->agp->agp_info.aper_size * 1024 * 1024);
+ dev_priv->mm.gtt_mtrr = -1;
+ }
io_mapping_free(dev_priv->mm.gtt_mapping);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
@@ -2182,9 +2161,8 @@ int i915_driver_unload(struct drm_device *dev)
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
- i915_gem_free_all_phys_object(dev);
-
mutex_lock(&dev->struct_mutex);
+ i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0defd427059..ce045a8cf82 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -37,38 +37,70 @@
#include <linux/console.h>
#include "drm_crtc_helper.h"
-static int i915_modeset = -1;
+static int i915_modeset __read_mostly = -1;
module_param_named(modeset, i915_modeset, int, 0400);
+MODULE_PARM_DESC(modeset,
+ "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
+ "1=on, -1=force vga console preference [default])");
-unsigned int i915_fbpercrtc = 0;
+unsigned int i915_fbpercrtc __always_unused = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
-int i915_panel_ignore_lid = 0;
+int i915_panel_ignore_lid __read_mostly = 0;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
+MODULE_PARM_DESC(panel_ignore_lid,
+ "Override lid status (0=autodetect [default], 1=lid open, "
+ "-1=lid closed)");
-unsigned int i915_powersave = 1;
+unsigned int i915_powersave __read_mostly = 1;
module_param_named(powersave, i915_powersave, int, 0600);
+MODULE_PARM_DESC(powersave,
+ "Enable powersavings, fbc, downclocking, etc. (default: true)");
-unsigned int i915_semaphores = 0;
+unsigned int i915_semaphores __read_mostly = 0;
module_param_named(semaphores, i915_semaphores, int, 0600);
+MODULE_PARM_DESC(semaphores,
+ "Use semaphores for inter-ring sync (default: false)");
-unsigned int i915_enable_rc6 = 1;
+unsigned int i915_enable_rc6 __read_mostly = 0;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
+MODULE_PARM_DESC(i915_enable_rc6,
+ "Enable power-saving render C-state 6 (default: true)");
-unsigned int i915_enable_fbc = 0;
+unsigned int i915_enable_fbc __read_mostly = 1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
+MODULE_PARM_DESC(i915_enable_fbc,
+ "Enable frame buffer compression for power savings "
+ "(default: false)");
-unsigned int i915_lvds_downclock = 0;
+unsigned int i915_lvds_downclock __read_mostly = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
+MODULE_PARM_DESC(lvds_downclock,
+ "Use panel (LVDS/eDP) downclocking for power savings "
+ "(default: false)");
-unsigned int i915_panel_use_ssc = 1;
+unsigned int i915_panel_use_ssc __read_mostly = 1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
+MODULE_PARM_DESC(lvds_use_ssc,
+ "Use Spread Spectrum Clock with panels [LVDS/eDP] "
+ "(default: true)");
-int i915_vbt_sdvo_panel_type = -1;
+int i915_vbt_sdvo_panel_type __read_mostly = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
+MODULE_PARM_DESC(vbt_sdvo_panel_type,
+ "Override selection of SDVO panel mode in the VBT "
+ "(default: auto)");
-static bool i915_try_reset = true;
+static bool i915_try_reset __read_mostly = true;
module_param_named(reset, i915_try_reset, bool, 0600);
+MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+
+bool i915_enable_hangcheck __read_mostly = true;
+module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
+MODULE_PARM_DESC(enable_hangcheck,
+ "Periodically check GPU activity for detecting hangs. "
+ "WARNING: Disabling this can cause system wide hangs. "
+ "(default: true)");
static struct drm_driver driver;
extern int intel_agp_enabled;
@@ -345,12 +377,17 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
- int loop = 500;
- u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- while (fifo < 20 && loop--) {
- udelay(10);
- fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) {
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ udelay(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+ WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
+ dev_priv->gt_fifo_count = fifo;
}
+ dev_priv->gt_fifo_count--;
}
static int i915_drm_freeze(struct drm_device *dev)
@@ -577,8 +614,12 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else switch (INTEL_INFO(dev)->gen) {
+ case 7:
case 6:
ret = gen6_do_reset(dev, flags);
+ /* If reset with a user forcewake, try to restore */
+ if (atomic_read(&dev_priv->forcewake_count))
+ __gen6_gt_force_wake_get(dev_priv);
break;
case 5:
ret = ironlake_do_reset(dev, flags);
@@ -762,14 +803,6 @@ static struct drm_driver driver = {
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
- .enable_vblank = i915_enable_vblank,
- .disable_vblank = i915_disable_vblank,
- .get_vblank_timestamp = i915_get_vblank_timestamp,
- .get_scanout_position = i915_get_crtc_scanoutpos,
- .irq_preinstall = i915_driver_irq_preinstall,
- .irq_postinstall = i915_driver_irq_postinstall,
- .irq_uninstall = i915_driver_irq_uninstall,
- .irq_handler = i915_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f63ee162f12..7916bd97d5c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -36,6 +36,7 @@
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <drm/intel-gtt.h>
+#include <linux/backlight.h>
/* General customization:
*/
@@ -211,6 +212,11 @@ struct drm_i915_display_funcs {
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
+ int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj);
+ int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -259,8 +265,10 @@ enum intel_pch {
};
#define QUIRK_PIPEA_FORCE (1<<0)
+#define QUIRK_LVDS_SSC_DISABLE (1<<1)
struct intel_fbdev;
+struct intel_fbc_work;
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -271,6 +279,7 @@ typedef struct drm_i915_private {
int relative_constants_mode;
void __iomem *regs;
+ u32 gt_fifo_count;
struct intel_gmbus {
struct i2c_adapter adapter;
@@ -325,11 +334,10 @@ typedef struct drm_i915_private {
uint32_t last_instdone1;
unsigned long cfb_size;
- unsigned long cfb_pitch;
- unsigned long cfb_offset;
- int cfb_fence;
- int cfb_plane;
+ unsigned int cfb_fb;
+ enum plane cfb_plane;
int cfb_y;
+ struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
@@ -537,6 +545,7 @@ typedef struct drm_i915_private {
u32 savePIPEB_LINK_M1;
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
+ u32 savePCH_PORT_HOTPLUG;
struct {
/** Bridge to intel-gtt-ko */
@@ -682,6 +691,7 @@ typedef struct drm_i915_private {
int child_dev_num;
struct child_device_config *child_dev;
struct drm_connector *int_lvds_connector;
+ struct drm_connector *int_edp_connector;
bool mchbar_need_disable;
@@ -715,6 +725,8 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+ struct backlight_device *backlight;
+
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
@@ -982,20 +994,19 @@ struct drm_i915_file_private {
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
-extern unsigned int i915_fbpercrtc;
-extern int i915_panel_ignore_lid;
-extern unsigned int i915_powersave;
-extern unsigned int i915_semaphores;
-extern unsigned int i915_lvds_downclock;
-extern unsigned int i915_panel_use_ssc;
-extern int i915_vbt_sdvo_panel_type;
-extern unsigned int i915_enable_rc6;
-extern unsigned int i915_enable_fbc;
+extern unsigned int i915_fbpercrtc __always_unused;
+extern int i915_panel_ignore_lid __read_mostly;
+extern unsigned int i915_powersave __read_mostly;
+extern unsigned int i915_semaphores __read_mostly;
+extern unsigned int i915_lvds_downclock __read_mostly;
+extern unsigned int i915_panel_use_ssc __read_mostly;
+extern int i915_vbt_sdvo_panel_type __read_mostly;
+extern unsigned int i915_enable_rc6 __read_mostly;
+extern unsigned int i915_enable_fbc __read_mostly;
+extern bool i915_enable_hangcheck __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
-extern void i915_save_display(struct drm_device *dev);
-extern void i915_restore_display(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -1030,33 +1041,12 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i915_driver_irq_preinstall(struct drm_device * dev);
-extern int i915_driver_irq_postinstall(struct drm_device *dev);
-extern void i915_driver_irq_uninstall(struct drm_device * dev);
-
-extern irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS);
-extern void ironlake_irq_preinstall(struct drm_device *dev);
-extern int ironlake_irq_postinstall(struct drm_device *dev);
-extern void ironlake_irq_uninstall(struct drm_device *dev);
-
-extern irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS);
-extern void ivybridge_irq_preinstall(struct drm_device *dev);
-extern int ivybridge_irq_postinstall(struct drm_device *dev);
-extern void ivybridge_irq_uninstall(struct drm_device *dev);
+extern void intel_irq_init(struct drm_device *dev);
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int i915_enable_vblank(struct drm_device *dev, int crtc);
-extern void i915_disable_vblank(struct drm_device *dev, int crtc);
-extern int ironlake_enable_vblank(struct drm_device *dev, int crtc);
-extern void ironlake_disable_vblank(struct drm_device *dev, int crtc);
-extern int ivybridge_enable_vblank(struct drm_device *dev, int crtc);
-extern void ivybridge_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
-extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1067,13 +1057,6 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void intel_enable_asle (struct drm_device *dev);
-int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
- int *max_error,
- struct timeval *vblank_time,
- unsigned flags);
-
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
- int *vpos, int *hpos);
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1190,7 +1173,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
-int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
@@ -1209,7 +1192,8 @@ int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
-i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
struct intel_ring_buffer *pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -1221,11 +1205,18 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode);
+
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */
@@ -1307,12 +1298,8 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void i8xx_disable_fbc(struct drm_device *dev);
-extern void g4x_disable_fbc(struct drm_device *dev);
-extern void ironlake_disable_fbc(struct drm_device *dev);
-extern void intel_disable_fbc(struct drm_device *dev);
-extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
+extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 94c84d74410..a546a71fb06 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -463,8 +463,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -797,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -907,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -1219,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
- }
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto unlock;
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
+ }
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
@@ -1377,25 +1374,24 @@ i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
}
static uint32_t
-i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
- struct drm_device *dev = obj->base.dev;
- uint32_t size;
+ uint32_t gtt_size;
if (INTEL_INFO(dev)->gen >= 4 ||
- obj->tiling_mode == I915_TILING_NONE)
- return obj->base.size;
+ tiling_mode == I915_TILING_NONE)
+ return size;
/* Previous chips need a power-of-two fence region when tiling */
if (INTEL_INFO(dev)->gen == 3)
- size = 1024*1024;
+ gtt_size = 1024*1024;
else
- size = 512*1024;
+ gtt_size = 512*1024;
- while (size < obj->base.size)
- size <<= 1;
+ while (gtt_size < size)
+ gtt_size <<= 1;
- return size;
+ return gtt_size;
}
/**
@@ -1406,59 +1402,52 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
* potential fence register mapping.
*/
static uint32_t
-i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode)
{
- struct drm_device *dev = obj->base.dev;
-
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
if (INTEL_INFO(dev)->gen >= 4 ||
- obj->tiling_mode == I915_TILING_NONE)
+ tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- return i915_gem_get_gtt_size(obj);
+ return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
/**
* i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
* unfenced object
- * @obj: object to check
+ * @dev: the device
+ * @size: size of the object
+ * @tiling_mode: tiling mode of the object
*
* Return the required GTT alignment for an object, only taking into account
* unfenced tiled surface requirements.
*/
uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode)
{
- struct drm_device *dev = obj->base.dev;
- int tile_height;
-
/*
* Minimum alignment is 4k (GTT page size) for sane hw.
*/
if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
- obj->tiling_mode == I915_TILING_NONE)
+ tiling_mode == I915_TILING_NONE)
return 4096;
- /*
- * Older chips need unfenced tiled buffers to be aligned to the left
- * edge of an even tile row (where tile rows are counted as if the bo is
- * placed in a fenced gtt region).
+ /* Previous hardware however needs to be aligned to a power-of-two
+ * tile height. The simplest method for determining this is to reuse
+ * the power-of-tile object size.
*/
- if (IS_GEN2(dev))
- tile_height = 16;
- else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
- tile_height = 32;
- else
- tile_height = 8;
-
- return tile_height * obj->stride * 2;
+ return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
int
@@ -1558,12 +1547,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
inode = obj->base.filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
+ gfpmask |= mapping_gfp_mask(mapping);
+
for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_RECLAIMABLE |
- gfpmask);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
if (IS_ERR(page))
goto err_pages;
@@ -1701,13 +1688,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*. Here we mirror the actions taken
- * when by shmem_delete_inode() to release the backing store.
+ * backing pages, *now*.
*/
inode = obj->base.filp->f_path.dentry->d_inode;
- truncate_inode_pages(inode->i_mapping, 0);
- if (inode->i_op->truncate_range)
- inode->i_op->truncate_range(inode, 0, (loff_t)-1);
+ shmem_truncate_range(inode, 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
}
@@ -1779,8 +1763,11 @@ i915_add_request(struct intel_ring_buffer *ring,
ring->outstanding_lazy_request = false;
if (!dev_priv->mm.suspended) {
- mod_timer(&dev_priv->hangcheck_timer,
- jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ if (i915_enable_hangcheck) {
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies +
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
if (was_empty)
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
@@ -2080,8 +2067,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
- i915_driver_irq_preinstall(ring->dev);
- i915_driver_irq_postinstall(ring->dev);
+ ring->dev->driver->irq_preinstall(ring->dev);
+ ring->dev->driver->irq_postinstall(ring->dev);
}
trace_i915_gem_request_wait_begin(ring, seqno);
@@ -2151,6 +2138,30 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
return 0;
}
+static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+{
+ u32 old_write_domain, old_read_domains;
+
+ /* Act a barrier for all accesses through the GTT */
+ mb();
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ return;
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+
+ obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+}
+
/**
* Unbinds an object from the GTT aperture.
*/
@@ -2167,23 +2178,28 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return -EINVAL;
}
- /* blow away mappings if mapped through GTT */
- i915_gem_release_mmap(obj);
-
- /* Move the object to the CPU domain to ensure that
- * any possible CPU writes while it's not in the GTT
- * are flushed when we go to remap it. This will
- * also ensure that all pending GPU writes are finished
- * before we unbind.
- */
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ ret = i915_gem_object_finish_gpu(obj);
if (ret == -ERESTARTSYS)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
* cause memory corruption through use-after-free.
*/
+
+ i915_gem_object_finish_gtt(obj);
+
+ /* Move the object to the CPU domain to ensure that
+ * any possible CPU writes while it's not in the GTT
+ * are flushed when we go to remap it.
+ */
+ if (ret == 0)
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret == -ERESTARTSYS)
+ return ret;
if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
i915_gem_clflush_object(obj);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -2752,9 +2768,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return -EINVAL;
}
- fence_size = i915_gem_get_gtt_size(obj);
- fence_alignment = i915_gem_get_gtt_alignment(obj);
- unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+ fence_size = i915_gem_get_gtt_size(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ unfenced_alignment =
+ i915_gem_get_unfenced_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode);
if (alignment == 0)
alignment = map_and_fenceable ? fence_alignment :
@@ -2926,8 +2949,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
*/
wmb();
- i915_gem_release_mmap(obj);
-
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3007,51 +3028,139 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ int ret;
+
+ if (obj->cache_level == cache_level)
+ return 0;
+
+ if (obj->pin_count) {
+ DRM_DEBUG("can not change the cache level of pinned objects\n");
+ return -EBUSY;
+ }
+
+ if (obj->gtt_space) {
+ ret = i915_gem_object_finish_gpu(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_finish_gtt(obj);
+
+ /* Before SandyBridge, you could not use tiling or fence
+ * registers with snooped memory, so relinquish any fences
+ * currently pointing to our region in the aperture.
+ */
+ if (INTEL_INFO(obj->base.dev)->gen < 6) {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+ }
+
+ i915_gem_gtt_rebind_object(obj, cache_level);
+ }
+
+ if (cache_level == I915_CACHE_NONE) {
+ u32 old_read_domains, old_write_domain;
+
+ /* If we're coming from LLC cached, then we haven't
+ * actually been tracking whether the data is in the
+ * CPU cache or not, since we only allow one bit set
+ * in obj->write_domain and have been skipping the clflushes.
+ * Just set it to the CPU cache for now.
+ */
+ WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+ WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+ }
+
+ obj->cache_level = cache_level;
+ return 0;
+}
+
/*
- * Prepare buffer for display plane. Use uninterruptible for possible flush
- * wait, as in modesetting process we're not supposed to be interrupted.
+ * Prepare buffer for display plane (scanout, cursors, etc).
+ * Can be called from an uninterruptible phase (modesetting) and allows
+ * any flushes to be pipelined (for pageflips).
+ *
+ * For the display plane, we want to be in the GTT but out of any write
+ * domains. So in many ways this looks like set_to_gtt_domain() apart from the
+ * ability to pipeline the waits, pinning and any additional subtleties
+ * that may differentiate the display plane from ordinary buffers.
*/
int
-i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
struct intel_ring_buffer *pipelined)
{
- uint32_t old_read_domains;
+ u32 old_read_domains, old_write_domain;
int ret;
- /* Not valid to be called on unbound objects. */
- if (obj->gtt_space == NULL)
- return -EINVAL;
-
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
-
- /* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj);
- if (ret)
+ if (ret == -ERESTARTSYS)
return ret;
}
+ /* The display engine is not coherent with the LLC cache on gen6. As
+ * a result, we make sure that the pinning that is about to occur is
+ * done with uncached PTEs. This is lowest common denominator for all
+ * chipsets.
+ *
+ * However for gen6+, we could do better by using the GFDT bit instead
+ * of uncaching, which would allow us to flush all the LLC-cached data
+ * with that bit in the PTE to main memory with just one PIPE_CONTROL.
+ */
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ if (ret)
+ return ret;
+
+ /* As the user may map the buffer once pinned in the display plane
+ * (e.g. libkms for the bootup splash), we have to ensure that we
+ * always use map_and_fenceable for all scanout buffers.
+ */
+ ret = i915_gem_object_pin(obj, alignment, true);
+ if (ret)
+ return ret;
+
i915_gem_object_flush_cpu_write_domain(obj);
+ old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->base.write_domain);
+ old_write_domain);
return 0;
}
int
-i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
+i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
{
int ret;
- if (!obj->active)
+ if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
@@ -3060,6 +3169,9 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
return ret;
}
+ /* Ensure that we invalidate the GPU's caches and TLBs. */
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+
return i915_gem_object_wait_rendering(obj);
}
@@ -3567,6 +3679,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct address_space *mapping;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
@@ -3577,12 +3690,31 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
i915_gem_info_add_obj(dev_priv, size);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->cache_level = I915_CACHE_NONE;
+ if (IS_GEN6(dev)) {
+ /* On Gen6, we can have the GPU use the LLC (the CPU
+ * cache) for about a 10% performance improvement
+ * compared to uncached. Graphics requests other than
+ * display scanout are coherent with the CPU in
+ * accessing this cache. This means in this mode we
+ * don't need to clflush on the CPU side, and on the
+ * GPU side we only need to flush internal caches to
+ * get data visible to the CPU.
+ *
+ * However, we maintain the display planes as UC, and so
+ * need to rebind when first used as such.
+ */
+ obj->cache_level = I915_CACHE_LLC;
+ } else
+ obj->cache_level = I915_CACHE_NONE;
+
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->mm_list);
@@ -3952,8 +4084,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- struct page *page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ struct page *page = shmem_read_mapping_page(mapping, i);
if (!IS_ERR(page)) {
char *dst = kmap_atomic(page);
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4014,8 +4145,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
struct page *page;
char *dst, *src;
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
return PTR_ERR(page);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 20a4cc5b818..4934cf84c32 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -187,10 +187,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- /* blow away mappings if mapped through GTT */
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
- i915_gem_release_mmap(obj);
-
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e46b645773c..7a709cd8d54 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -59,24 +59,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
- unsigned int agp_type =
- cache_level_to_agp_type(dev, obj->cache_level);
-
i915_gem_clflush_object(obj);
-
- if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start
- >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
+ i915_gem_gtt_rebind_object(obj, obj->cache_level);
}
intel_gtt_chipset_flush();
@@ -110,6 +94,27 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
return 0;
}
+void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ BUG_ON(!obj->sg_list);
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ agp_type);
+}
+
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 82d70fd9e93..99c4faa59d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -348,7 +348,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
- i915_gem_get_unfenced_gtt_alignment(obj);
+ i915_gem_get_unfenced_gtt_alignment(dev,
+ obj->base.size,
+ args->tiling_mode);
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9e34a1abeb6..9cbb0cd8f46 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -152,7 +152,7 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
-u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
@@ -184,7 +184,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
return (high1 << 8) | low;
}
-u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int reg = PIPE_FRMCOUNT_GM45(pipe);
@@ -198,7 +198,7 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
return I915_READ(reg);
}
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int *vpos, int *hpos)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -264,7 +264,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
return ret;
}
-int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
int *max_error,
struct timeval *vblank_time,
unsigned flags)
@@ -306,12 +306,15 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
+ mutex_unlock(&mode_config->mutex);
+
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
@@ -361,10 +364,12 @@ static void notify_ring(struct drm_device *dev,
ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
-
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer,
- jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ if (i915_enable_hangcheck) {
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies +
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
}
static void gen6_pm_rps_work(struct work_struct *work)
@@ -462,7 +467,7 @@ static void pch_irq_handler(struct drm_device *dev)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
}
-irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -550,7 +555,7 @@ done:
return ret;
}
-irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1209,7 +1214,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
}
}
-irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1454,7 +1459,7 @@ int i915_irq_wait(struct drm_device *dev, void *data,
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-int i915_enable_vblank(struct drm_device *dev, int pipe)
+static int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1478,7 +1483,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
return 0;
}
-int ironlake_enable_vblank(struct drm_device *dev, int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1494,7 +1499,7 @@ int ironlake_enable_vblank(struct drm_device *dev, int pipe)
return 0;
}
-int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
+static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1513,7 +1518,7 @@ int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-void i915_disable_vblank(struct drm_device *dev, int pipe)
+static void i915_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1529,7 +1534,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void ironlake_disable_vblank(struct drm_device *dev, int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1540,7 +1545,7 @@ void ironlake_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
+static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1664,6 +1669,9 @@ void i915_hangcheck_elapsed(unsigned long data)
uint32_t acthd, instdone, instdone1;
bool err = false;
+ if (!i915_enable_hangcheck)
+ return;
+
/* If all work is done then ACTHD clearly hasn't advanced. */
if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
@@ -1728,7 +1736,7 @@ repeat:
/* drm_dma.h hooks
*/
-void ironlake_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1740,7 +1748,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
/* Workaround stalls observed on Sandy Bridge GPUs by
* making the blitter command streamer generate a
* write to the Hardware Status Page for
@@ -1749,6 +1757,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
* happens.
*/
I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
+ I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
}
/* XXX hotplug from PCH */
@@ -1768,7 +1777,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-int ironlake_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
@@ -1840,7 +1849,7 @@ int ironlake_irq_postinstall(struct drm_device *dev)
return 0;
}
-int ivybridge_irq_postinstall(struct drm_device *dev)
+static int ivybridge_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
@@ -1890,7 +1899,7 @@ int ivybridge_irq_postinstall(struct drm_device *dev)
return 0;
}
-void i915_driver_irq_preinstall(struct drm_device * dev)
+static void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -1917,7 +1926,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
* Must be called after intel_modeset_init or hotplug interrupts won't be
* enabled correctly.
*/
-int i915_driver_irq_postinstall(struct drm_device *dev)
+static int i915_driver_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
@@ -1993,7 +2002,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-void ironlake_irq_uninstall(struct drm_device *dev)
+static void ironlake_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2013,7 +2022,7 @@ void ironlake_irq_uninstall(struct drm_device *dev)
I915_WRITE(GTIIR, I915_READ(GTIIR));
}
-void i915_driver_irq_uninstall(struct drm_device * dev)
+static void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -2039,3 +2048,43 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
I915_WRITE(IIR, I915_READ(IIR));
}
+
+void intel_irq_init(struct drm_device *dev)
+{
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+ if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ else
+ dev->driver->get_vblank_timestamp = NULL;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+ if (IS_IVYBRIDGE(dev)) {
+ /* Share pre & uninstall handlers with ILK/SNB */
+ dev->driver->irq_handler = ivybridge_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ivybridge_enable_vblank;
+ dev->driver->disable_vblank = ivybridge_disable_vblank;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev->driver->irq_handler = ironlake_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ironlake_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ironlake_enable_vblank;
+ dev->driver->disable_vblank = ironlake_disable_vblank;
+ } else {
+ dev->driver->irq_preinstall = i915_driver_irq_preinstall;
+ dev->driver->irq_postinstall = i915_driver_irq_postinstall;
+ dev->driver->irq_uninstall = i915_driver_irq_uninstall;
+ dev->driver->irq_handler = i915_driver_irq_handler;
+ dev->driver->enable_vblank = i915_enable_vblank;
+ dev->driver->disable_vblank = i915_disable_vblank;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2f967af8e62..542453f7498 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,14 @@
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
+#define GEN6_MBC_SNPCR_SHIFT 21
+#define GEN6_MBC_SNPCR_MASK (3<<21)
+#define GEN6_MBC_SNPCR_MAX (0<<21)
+#define GEN6_MBC_SNPCR_MED (1<<21)
+#define GEN6_MBC_SNPCR_LOW (2<<21)
+#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
+
#define GEN6_GDRST 0x941c
#define GEN6_GRDOM_FULL (1 << 0)
#define GEN6_GRDOM_RENDER (1 << 1)
@@ -367,6 +375,7 @@
# define MI_FLUSH_ENABLE (1 << 11)
#define GFX_MODE 0x02520
+#define GFX_MODE_GEN7 0x0229c
#define GFX_RUN_LIST_ENABLE (1<<15)
#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
@@ -374,6 +383,9 @@
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
+#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
+#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
+
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
@@ -531,6 +543,7 @@
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+#define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8
#define GEN6_BSD_USER_INTERRUPT (1 << 12)
@@ -578,6 +591,7 @@
#define DPFC_CTL_PLANEA (0<<30)
#define DPFC_CTL_PLANEB (1<<30)
#define DPFC_CTL_FENCE_EN (1<<29)
+#define DPFC_CTL_PERSISTENT_MODE (1<<25)
#define DPFC_SR_EN (1<<10)
#define DPFC_CTL_LIMIT_1X (0<<6)
#define DPFC_CTL_LIMIT_2X (1<<6)
@@ -1308,6 +1322,7 @@
#define ADPA_PIPE_SELECT_MASK (1<<30)
#define ADPA_PIPE_A_SELECT 0
#define ADPA_PIPE_B_SELECT (1<<30)
+#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1450,6 +1465,7 @@
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
#define LVDS_PIPE_MASK (1 << 30)
+#define LVDS_PIPE(pipe) ((pipe) << 30)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
/* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -1489,9 +1505,6 @@
#define LVDS_B0B3_POWER_DOWN (0 << 2)
#define LVDS_B0B3_POWER_UP (3 << 2)
-#define LVDS_PIPE_ENABLED(V, P) \
- (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
-
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
@@ -1504,6 +1517,7 @@
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
#define VIDEO_DIP_SELECT_SPD (3 << 19)
+#define VIDEO_DIP_SELECT_MASK (3 << 19)
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
@@ -2082,9 +2096,6 @@
#define DP_PIPEB_SELECT (1 << 30)
#define DP_PIPE_MASK (1 << 30)
-#define DP_PIPE_ENABLED(V, P) \
- (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN))
-
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
#define DP_LINK_TRAIN_PAT_2 (1 << 28)
@@ -3022,6 +3033,20 @@
#define _TRANSA_DP_LINK_M2 0xe0048
#define _TRANSA_DP_LINK_N2 0xe004c
+/* Per-transcoder DIP controls */
+
+#define _VIDEO_DIP_CTL_A 0xe0200
+#define _VIDEO_DIP_DATA_A 0xe0208
+#define _VIDEO_DIP_GCP_A 0xe0210
+
+#define _VIDEO_DIP_CTL_B 0xe1200
+#define _VIDEO_DIP_DATA_B 0xe1208
+#define _VIDEO_DIP_GCP_B 0xe1210
+
+#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B)
+#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
+#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
@@ -3074,6 +3099,16 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
+#define _TRANSA_CHICKEN2 0xf0064
+#define _TRANSB_CHICKEN2 0xf1064
+#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
+#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
+
+#define SOUTH_CHICKEN1 0xc2000
+#define FDIA_PHASE_SYNC_SHIFT_OVR 19
+#define FDIA_PHASE_SYNC_SHIFT_EN 18
+#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define SOUTH_CHICKEN2 0xc2004
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
@@ -3224,14 +3259,12 @@
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-#define ADPA_PIPE_ENABLED(V, P) \
- (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
-
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER_A (0)
#define TRANSCODER_B (1 << 30)
+#define TRANSCODER(pipe) ((pipe) << 30)
#define TRANSCODER_MASK (1 << 30)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
@@ -3248,9 +3281,6 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
-#define HDMI_PIPE_ENABLED(V, P) \
- (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
-
/* PCH SDVOB multiplex with HDMIB */
#define PCH_SDVOB HDMIB
@@ -3317,6 +3347,7 @@
#define PORT_TRANS_B_SEL_CPT (1<<29)
#define PORT_TRANS_C_SEL_CPT (2<<29)
#define PORT_TRANS_SEL_MASK (3<<29)
+#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
@@ -3359,6 +3390,7 @@
#define FORCEWAKE_ACK 0x130090
#define GT_FIFO_FREE_ENTRIES 0x120008
+#define GT_FIFO_NUM_RESERVED_ENTRIES 20
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
@@ -3433,7 +3465,9 @@
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
-#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_DATA 0x138128
+#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 60a94d2b526..f10742359ec 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -597,7 +597,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
return;
}
-void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -689,7 +689,7 @@ void i915_save_display(struct drm_device *dev)
i915_save_vga(dev);
}
-void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -760,15 +760,13 @@ void i915_restore_display(struct drm_device *dev)
/* FIXME: restore TV & SDVO state */
/* only restore FBC info on the platform that supports FBC*/
+ intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
- ironlake_disable_fbc(dev);
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else if (IS_GM45(dev)) {
- g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else {
- i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
@@ -780,6 +778,7 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -796,6 +795,8 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+ mutex_lock(&dev->struct_mutex);
+
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -811,6 +812,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
dev_priv->saveMCHBAR_RENDER_STANDBY =
I915_READ(RSTDBYCTL);
+ dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
} else {
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
@@ -835,6 +837,8 @@ int i915_save_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -845,6 +849,8 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+ mutex_lock(&dev->struct_mutex);
+
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@@ -858,20 +864,27 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(GTIMR, dev_priv->saveGTIMR);
I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
+ I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
} else {
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
}
+ mutex_unlock(&dev->struct_mutex);
- intel_init_clock_gating(dev);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_init_clock_gating(dev);
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev)) {
gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
+
+ mutex_lock(&dev->struct_mutex);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -886,6 +899,8 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ mutex_unlock(&dev->struct_mutex);
+
intel_i2c_reset(dev);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 927442a1192..61abef8a811 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -74,7 +74,7 @@ get_blocksize(void *p)
static void
fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
- struct lvds_dvo_timing *dvo_timing)
+ const struct lvds_dvo_timing *dvo_timing)
{
panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
dvo_timing->hactive_lo;
@@ -115,20 +115,75 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
drm_mode_set_name(panel_fixed_mode);
}
+static bool
+lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
+ const struct lvds_dvo_timing *b)
+{
+ if (a->hactive_hi != b->hactive_hi ||
+ a->hactive_lo != b->hactive_lo)
+ return false;
+
+ if (a->hsync_off_hi != b->hsync_off_hi ||
+ a->hsync_off_lo != b->hsync_off_lo)
+ return false;
+
+ if (a->hsync_pulse_width != b->hsync_pulse_width)
+ return false;
+
+ if (a->hblank_hi != b->hblank_hi ||
+ a->hblank_lo != b->hblank_lo)
+ return false;
+
+ if (a->vactive_hi != b->vactive_hi ||
+ a->vactive_lo != b->vactive_lo)
+ return false;
+
+ if (a->vsync_off != b->vsync_off)
+ return false;
+
+ if (a->vsync_pulse_width != b->vsync_pulse_width)
+ return false;
+
+ if (a->vblank_hi != b->vblank_hi ||
+ a->vblank_lo != b->vblank_lo)
+ return false;
+
+ return true;
+}
+
+static const struct lvds_dvo_timing *
+get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
+ int index)
+{
+ /*
+ * the size of fp_timing varies on the different platform.
+ * So calculate the DVO timing relative offset in LVDS data
+ * entry to get the DVO timing entry
+ */
+
+ int lfp_data_size =
+ lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
+ int dvo_timing_offset =
+ lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
+ lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
+ char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
+
+ return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
+}
+
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
- struct bdb_lvds_options *lvds_options;
- struct bdb_lvds_lfp_data *lvds_lfp_data;
- struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
- struct bdb_lvds_lfp_data_entry *entry;
- struct lvds_dvo_timing *dvo_timing;
+ const struct bdb_lvds_options *lvds_options;
+ const struct bdb_lvds_lfp_data *lvds_lfp_data;
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
+ const struct lvds_dvo_timing *panel_dvo_timing;
struct drm_display_mode *panel_fixed_mode;
- int lfp_data_size, dvo_timing_offset;
- int i, temp_downclock;
- struct drm_display_mode *temp_mode;
+ int i, downclock;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
@@ -150,75 +205,44 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
dev_priv->lvds_vbt = 1;
- lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
- lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
- entry = (struct bdb_lvds_lfp_data_entry *)
- ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
- lvds_options->panel_type));
- dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
- lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
-
- /*
- * the size of fp_timing varies on the different platform.
- * So calculate the DVO timing relative offset in LVDS data
- * entry to get the DVO timing entry
- */
- dvo_timing = (struct lvds_dvo_timing *)
- ((unsigned char *)entry + dvo_timing_offset);
+ panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ lvds_options->panel_type);
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+ fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
- temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
- temp_downclock = panel_fixed_mode->clock;
/*
- * enumerate the LVDS panel timing info entry in VBT to check whether
- * the LVDS downclock is found.
+ * Iterate over the LVDS panel timing info to find the lowest clock
+ * for the native resolution.
*/
+ downclock = panel_dvo_timing->clock;
for (i = 0; i < 16; i++) {
- entry = (struct bdb_lvds_lfp_data_entry *)
- ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
- dvo_timing = (struct lvds_dvo_timing *)
- ((unsigned char *)entry + dvo_timing_offset);
-
- fill_detail_timing_data(temp_mode, dvo_timing);
-
- if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
- temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
- temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
- temp_mode->htotal == panel_fixed_mode->htotal &&
- temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
- temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
- temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
- temp_mode->vtotal == panel_fixed_mode->vtotal &&
- temp_mode->clock < temp_downclock) {
- /*
- * downclock is already found. But we expect
- * to find the lower downclock.
- */
- temp_downclock = temp_mode->clock;
- }
- /* clear it to zero */
- memset(temp_mode, 0, sizeof(*temp_mode));
+ const struct lvds_dvo_timing *dvo_timing;
+
+ dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
+ lvds_lfp_data_ptrs,
+ i);
+ if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
+ dvo_timing->clock < downclock)
+ downclock = dvo_timing->clock;
}
- kfree(temp_mode);
- if (temp_downclock < panel_fixed_mode->clock &&
- i915_lvds_downclock) {
+
+ if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
dev_priv->lvds_downclock_avail = 1;
- dev_priv->lvds_downclock = temp_downclock;
+ dev_priv->lvds_downclock = downclock * 10;
DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
"Normal Clock %dKHz, downclock %dKHz\n",
- temp_downclock, panel_fixed_mode->clock);
+ panel_fixed_mode->clock, 10*downclock);
}
- return;
}
/* Try to find sdvo panel data */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 81a9059b6a9..ee1d701317f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,6 +24,7 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -979,11 +980,76 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
+static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 port_sel, u32 val)
+{
+ if ((val & DP_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
+ u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
+ if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
+ return false;
+ } else {
+ if ((val & DP_PIPE_MASK) != (pipe << 30))
+ return false;
+ }
+ return true;
+}
+
+static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & PORT_ENABLE) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
+ return false;
+ }
+ return true;
+}
+
+static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & LVDS_PORT_EN) == 0)
+ return false;
+
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
+ return false;
+ }
+ return true;
+}
+
+static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 val)
+{
+ if ((val & ADPA_DAC_ENABLE) == 0)
+ return false;
+ if (HAS_PCH_CPT(dev_priv->dev)) {
+ if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
+ return false;
+ } else {
+ if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
+ return false;
+ }
+ return true;
+}
+
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
+ enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
- WARN(DP_PIPE_ENABLED(val, pipe),
+ WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
@@ -992,7 +1058,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- WARN(HDMI_PIPE_ENABLED(val, pipe),
+ WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
@@ -1003,19 +1069,19 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
int reg;
u32 val;
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
- assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
reg = PCH_ADPA;
val = I915_READ(reg);
- WARN(ADPA_PIPE_ENABLED(val, pipe),
+ WARN(adpa_pipe_enabled(dev_priv, val, pipe),
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
reg = PCH_LVDS;
val = I915_READ(reg);
- WARN(LVDS_PIPE_ENABLED(val, pipe),
+ WARN(lvds_pipe_enabled(dev_priv, val, pipe),
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
@@ -1157,12 +1223,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
reg = TRANSCONF(pipe);
val = I915_READ(reg);
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- val &= ~PIPE_BPC_MASK;
- val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ val &= ~PIPE_BPC_MASK;
+ val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+ }
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
@@ -1272,6 +1341,17 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
intel_wait_for_pipe_off(dev_priv->dev, pipe);
}
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch. The display address reg provides this.
+ */
+static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+ I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+}
+
/**
* intel_enable_plane - enable a display plane on a given pipe
* @dev_priv: i915 private structure
@@ -1295,20 +1375,10 @@ static void intel_enable_plane(struct drm_i915_private *dev_priv,
return;
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
-/*
- * Plane regs are double buffered, going from enabled->disabled needs a
- * trigger in order to latch. The display address reg provides this.
- */
-static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
- enum plane plane)
-{
- u32 reg = DSPADDR(plane);
- I915_WRITE(reg, I915_READ(reg));
-}
-
/**
* intel_disable_plane - disable a display plane
* @dev_priv: i915 private structure
@@ -1334,19 +1404,24 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
}
static void disable_pch_dp(struct drm_i915_private *dev_priv,
- enum pipe pipe, int reg)
+ enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
- if (DP_PIPE_ENABLED(val, pipe))
+ if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
+ DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
I915_WRITE(reg, val & ~DP_PORT_EN);
+ }
}
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- if (HDMI_PIPE_ENABLED(val, pipe))
+ if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
+ DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
+ reg, pipe);
I915_WRITE(reg, val & ~PORT_ENABLE);
+ }
}
/* Disable any ports connected to this transcoder */
@@ -1358,18 +1433,19 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
val = I915_READ(PCH_PP_CONTROL);
I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
- disable_pch_dp(dev_priv, pipe, PCH_DP_B);
- disable_pch_dp(dev_priv, pipe, PCH_DP_C);
- disable_pch_dp(dev_priv, pipe, PCH_DP_D);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
reg = PCH_ADPA;
val = I915_READ(reg);
- if (ADPA_PIPE_ENABLED(val, pipe))
+ if (adpa_pipe_enabled(dev_priv, val, pipe))
I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
reg = PCH_LVDS;
val = I915_READ(reg);
- if (LVDS_PIPE_ENABLED(val, pipe)) {
+ if (lvds_pipe_enabled(dev_priv, val, pipe)) {
+ DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
I915_WRITE(reg, val & ~LVDS_PORT_EN);
POSTING_READ(reg);
udelay(100);
@@ -1380,6 +1456,28 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
disable_pch_hdmi(dev_priv, pipe, HDMID);
}
+static void i8xx_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 fbc_ctl;
+
+ /* Disable compression */
+ fbc_ctl = I915_READ(FBC_CONTROL);
+ if ((fbc_ctl & FBC_CTL_EN) == 0)
+ return;
+
+ fbc_ctl &= ~FBC_CTL_EN;
+ I915_WRITE(FBC_CONTROL, fbc_ctl);
+
+ /* Wait for compressing bit to clear */
+ if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+ DRM_DEBUG_KMS("FBC idle timed out\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
@@ -1388,36 +1486,25 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int cfb_pitch;
int plane, i;
u32 fbc_ctl, fbc_ctl2;
- if (fb->pitch == dev_priv->cfb_pitch &&
- obj->fence_reg == dev_priv->cfb_fence &&
- intel_crtc->plane == dev_priv->cfb_plane &&
- I915_READ(FBC_CONTROL) & FBC_CTL_EN)
- return;
-
- i8xx_disable_fbc(dev);
-
- dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
-
- if (fb->pitch < dev_priv->cfb_pitch)
- dev_priv->cfb_pitch = fb->pitch;
+ cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ if (fb->pitch < cfb_pitch)
+ cfb_pitch = fb->pitch;
/* FBC_CTL wants 64B units */
- dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj->fence_reg;
- dev_priv->cfb_plane = intel_crtc->plane;
- plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
+ cfb_pitch = (cfb_pitch / 64) - 1;
+ plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
/* Set it up... */
- fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
- if (obj->tiling_mode != I915_TILING_NONE)
- fbc_ctl2 |= FBC_CTL_CPU_FENCE;
+ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
+ fbc_ctl2 |= plane;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1425,36 +1512,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
- fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
+ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- if (obj->tiling_mode != I915_TILING_NONE)
- fbc_ctl |= dev_priv->cfb_fence;
+ fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
- DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
- dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
-}
-
-void i8xx_disable_fbc(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 fbc_ctl;
-
- /* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
- if ((fbc_ctl & FBC_CTL_EN) == 0)
- return;
-
- fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
-
- /* Wait for compressing bit to clear */
- if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
- DRM_DEBUG_KMS("FBC idle timed out\n");
- return;
- }
-
- DRM_DEBUG_KMS("disabled FBC\n");
+ DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
+ cfb_pitch, crtc->y, intel_crtc->plane);
}
static bool i8xx_fbc_enabled(struct drm_device *dev)
@@ -1476,30 +1540,9 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
- dpfc_ctl = I915_READ(DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj->fence_reg &&
- dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_y == crtc->y)
- return;
-
- I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
-
- dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj->fence_reg;
- dev_priv->cfb_plane = intel_crtc->plane;
- dev_priv->cfb_y = crtc->y;
-
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- if (obj->tiling_mode != I915_TILING_NONE) {
- dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
- I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
- } else {
- I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
- }
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+ I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1512,7 +1555,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
-void g4x_disable_fbc(struct drm_device *dev)
+static void g4x_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
@@ -1567,32 +1610,12 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
u32 dpfc_ctl;
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
- if (dpfc_ctl & DPFC_CTL_EN) {
- if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj->fence_reg &&
- dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_offset == obj->gtt_offset &&
- dev_priv->cfb_y == crtc->y)
- return;
-
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
-
- dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj->fence_reg;
- dev_priv->cfb_plane = intel_crtc->plane;
- dev_priv->cfb_offset = obj->gtt_offset;
- dev_priv->cfb_y = crtc->y;
-
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- if (obj->tiling_mode != I915_TILING_NONE) {
- dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
- I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
- } else {
- I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
- }
+ /* Set persistent mode for front-buffer rendering, ala X. */
+ dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1604,7 +1627,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
if (IS_GEN6(dev)) {
I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
+ SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
sandybridge_blit_fbc_update(dev);
}
@@ -1612,7 +1635,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
-void ironlake_disable_fbc(struct drm_device *dev)
+static void ironlake_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
@@ -1644,24 +1667,109 @@ bool intel_fbc_enabled(struct drm_device *dev)
return dev_priv->display.fbc_enabled(dev);
}
-void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_fbc_work_fn(struct work_struct *__work)
{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_fbc_work *work =
+ container_of(to_delayed_work(__work),
+ struct intel_fbc_work, work);
+ struct drm_device *dev = work->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+ if (work == dev_priv->fbc_work) {
+ /* Double check that we haven't switched fb without cancelling
+ * the prior work.
+ */
+ if (work->crtc->fb == work->fb) {
+ dev_priv->display.enable_fbc(work->crtc,
+ work->interval);
+
+ dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->cfb_fb = work->crtc->fb->base.id;
+ dev_priv->cfb_y = work->crtc->y;
+ }
+
+ dev_priv->fbc_work = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(work);
+}
+
+static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->fbc_work == NULL)
+ return;
+
+ DRM_DEBUG_KMS("cancelling pending FBC enable\n");
+
+ /* Synchronisation is provided by struct_mutex and checking of
+ * dev_priv->fbc_work, so we can perform the cancellation
+ * entirely asynchronously.
+ */
+ if (cancel_delayed_work(&dev_priv->fbc_work->work))
+ /* tasklet was killed before being run, clean up */
+ kfree(dev_priv->fbc_work);
+
+ /* Mark the work as no longer wanted so that if it does
+ * wake-up (because the work was already running and waiting
+ * for our mutex), it will discover that is no longer
+ * necessary to run.
+ */
+ dev_priv->fbc_work = NULL;
+}
+
+static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct intel_fbc_work *work;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->display.enable_fbc)
return;
- dev_priv->display.enable_fbc(crtc, interval);
+ intel_cancel_fbc_work(dev_priv);
+
+ work = kzalloc(sizeof *work, GFP_KERNEL);
+ if (work == NULL) {
+ dev_priv->display.enable_fbc(crtc, interval);
+ return;
+ }
+
+ work->crtc = crtc;
+ work->fb = crtc->fb;
+ work->interval = interval;
+ INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
+
+ dev_priv->fbc_work = work;
+
+ DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+
+ /* Delay the actual enabling to let pageflipping cease and the
+ * display to settle before starting the compression. Note that
+ * this delay also serves a second purpose: it allows for a
+ * vblank to pass after disabling the FBC before we attempt
+ * to modify the control registers.
+ *
+ * A more complicated solution would involve tracking vblanks
+ * following the termination of the page-flipping sequence
+ * and indeed performing the enable as a co-routine and not
+ * waiting synchronously upon the vblank.
+ */
+ schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
void intel_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_cancel_fbc_work(dev_priv);
+
if (!dev_priv->display.disable_fbc)
return;
dev_priv->display.disable_fbc(dev);
+ dev_priv->cfb_plane = -1;
}
/**
@@ -1760,8 +1868,13 @@ static void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
- if (obj->tiling_mode != I915_TILING_X) {
- DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
+
+ /* The use of a CPU fence is mandatory in order to detect writes
+ * by the CPU to the scanout and trigger updates to the FBC.
+ */
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
@@ -1770,6 +1883,44 @@ static void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master())
goto out_disable;
+ /* If the scanout has not changed, don't modify the FBC settings.
+ * Note that we make the fundamental assumption that the fb->obj
+ * cannot be unpinned (and have its GTT offset and fence revoked)
+ * without first being decoupled from the scanout and FBC disabled.
+ */
+ if (dev_priv->cfb_plane == intel_crtc->plane &&
+ dev_priv->cfb_fb == fb->base.id &&
+ dev_priv->cfb_y == crtc->y)
+ return;
+
+ if (intel_fbc_enabled(dev)) {
+ /* We update FBC along two paths, after changing fb/crtc
+ * configuration (modeswitching) and after page-flipping
+ * finishes. For the latter, we know that not only did
+ * we disable the FBC at the start of the page-flip
+ * sequence, but also more than one vblank has passed.
+ *
+ * For the former case of modeswitching, it is possible
+ * to switch between two FBC valid configurations
+ * instantaneously so we do need to disable the FBC
+ * before we can modify its control registers. We also
+ * have to wait for the next vblank for that to take
+ * effect. However, since we delay enabling FBC we can
+ * assume that a vblank has passed since disabling and
+ * that we can safely alter the registers in the deferred
+ * callback.
+ *
+ * In the scenario that we go from a valid to invalid
+ * and then back to valid FBC configuration we have
+ * no strict enforcement that a vblank occurred since
+ * disabling the FBC. However, along all current pipe
+ * disabling paths we do need to wait for a vblank at
+ * some point. And we wait before enabling FBC anyway.
+ */
+ DRM_DEBUG_KMS("disabling active FBC for update\n");
+ intel_disable_fbc(dev);
+ }
+
intel_enable_fbc(crtc, 500);
return;
@@ -1812,14 +1963,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
}
dev_priv->mm.interruptible = false;
- ret = i915_gem_object_pin(obj, alignment, true);
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
if (ret)
goto err_interruptible;
- ret = i915_gem_object_set_to_display_plane(obj, pipelined);
- if (ret)
- goto err_unpin;
-
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always install
@@ -1841,10 +1988,8 @@ err_interruptible:
return ret;
}
-/* Assume fb object is pinned & idle & fenced and just update base pointers */
-static int
-intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
+static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1887,7 +2032,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
- DRM_ERROR("Unknown color depth\n");
+ DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1897,10 +2042,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr &= ~DISPPLANE_TILED;
}
- if (HAS_PCH_SPLIT(dev))
- /* must disable */
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
@@ -1917,6 +2058,99 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(DSPADDR(plane), Start + Offset);
POSTING_READ(reg);
+ return 0;
+}
+
+static int ironlake_update_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long Start, Offset;
+ u32 dspcntr;
+ u32 reg;
+
+ switch (plane) {
+ case 0:
+ case 1:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (fb->depth != 16)
+ return -EINVAL;
+
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ if (fb->depth == 24)
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ else if (fb->depth == 30)
+ dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
+ else
+ return -EINVAL;
+ break;
+ default:
+ DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ I915_WRITE(reg, dspcntr);
+
+ Start = obj->gtt_offset;
+ Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, fb->pitch);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ I915_WRITE(DSPSURF(plane), Start);
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPADDR(plane), Offset);
+ POSTING_READ(reg);
+
+ return 0;
+}
+
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = dev_priv->display.update_plane(crtc, fb, x, y);
+ if (ret)
+ return ret;
+
intel_update_fbc(dev);
intel_increase_pllclock(crtc);
@@ -1934,7 +2168,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG_KMS("No FB bound\n");
+ DRM_ERROR("No FB bound\n");
return 0;
}
@@ -1943,6 +2177,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
case 1:
break;
default:
+ DRM_ERROR("no plane for crtc\n");
return -EINVAL;
}
@@ -1952,6 +2187,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("pin & fence failed\n");
return ret;
}
@@ -1971,7 +2207,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- ret = i915_gem_object_flush_gpu(obj);
+ ret = i915_gem_object_finish_gpu(obj);
(void) ret;
}
@@ -1980,6 +2216,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("failed to update base address\n");
return ret;
}
@@ -2086,6 +2323,18 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
FDI_FE_ERRC_ENABLE);
}
+static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 flags = I915_READ(SOUTH_CHICKEN1);
+
+ flags |= FDI_PHASE_SYNC_OVR(pipe);
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
+ flags |= FDI_PHASE_SYNC_EN(pipe);
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
+ POSTING_READ(SOUTH_CHICKEN1);
+}
+
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -2236,6 +2485,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
+ if (HAS_PCH_CPT(dev))
+ cpt_phase_pointer_enable(dev, pipe);
+
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2352,6 +2604,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
POSTING_READ(reg);
udelay(150);
+ if (HAS_PCH_CPT(dev))
+ cpt_phase_pointer_enable(dev, pipe);
+
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
@@ -2461,6 +2716,17 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
}
}
+static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 flags = I915_READ(SOUTH_CHICKEN1);
+
+ flags &= ~(FDI_PHASE_SYNC_EN(pipe));
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
+ flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
+ I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
+ POSTING_READ(SOUTH_CHICKEN1);
+}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2490,6 +2756,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_CHICKEN(pipe),
I915_READ(FDI_RX_CHICKEN(pipe) &
~FDI_RX_PHASE_SYNC_POINTER_EN));
+ } else if (HAS_PCH_CPT(dev)) {
+ cpt_phase_pointer_disable(dev, pipe);
}
/* still set train pattern 1 */
@@ -2622,6 +2890,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -2629,7 +2898,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
TRANS_DP_BPC_MASK);
temp |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
- temp |= TRANS_DP_8BPC;
+ temp |= bpc << 9; /* same format but at 11:9 */
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2699,14 +2968,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
+ /*
+ * On ILK+ LUT must be loaded before the pipe is running but with
+ * clocks enabled
+ */
+ intel_crtc_load_lut(crtc);
+
intel_enable_pipe(dev_priv, pipe, is_pch_port);
intel_enable_plane(dev_priv, plane, pipe);
if (is_pch_port)
ironlake_pch_enable(crtc);
- intel_crtc_load_lut(crtc);
-
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
@@ -2732,9 +3005,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_disable_plane(dev_priv, plane, pipe);
- if (dev_priv->cfb_plane == plane &&
- dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
intel_disable_pipe(dev_priv, pipe);
@@ -2898,9 +3170,8 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
- if (dev_priv->cfb_plane == plane &&
- dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ if (dev_priv->cfb_plane == plane)
+ intel_disable_fbc(dev);
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
@@ -4305,7 +4576,137 @@ static void intel_update_watermarks(struct drm_device *dev)
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
- return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
+ return dev_priv->lvds_use_ssc && i915_panel_use_ssc
+ && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+/**
+ * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
+ * @crtc: CRTC structure
+ *
+ * A pipe may be connected to one or more outputs. Based on the depth of the
+ * attached framebuffer, choose a good color depth to use on the pipe.
+ *
+ * If possible, match the pipe depth to the fb depth. In some cases, this
+ * isn't ideal, because the connected output supports a lesser or restricted
+ * set of depths. Resolve that here:
+ * LVDS typically supports only 6bpc, so clamp down in that case
+ * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
+ * Displays may support a restricted set as well, check EDID and clamp as
+ * appropriate.
+ *
+ * RETURNS:
+ * Dithering requirement (i.e. false if display bpc and pipe bpc match,
+ * true if they don't match).
+ */
+static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ unsigned int *pipe_bpp)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned int display_bpc = UINT_MAX, bpc;
+
+ /* Walk the encoders & connectors on this crtc, get min bpc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
+ unsigned int lvds_bpc;
+
+ if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
+ LVDS_A3_POWER_UP)
+ lvds_bpc = 8;
+ else
+ lvds_bpc = 6;
+
+ if (lvds_bpc < display_bpc) {
+ DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
+ display_bpc = lvds_bpc;
+ }
+ continue;
+ }
+
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ /* Use VBT settings if we have an eDP panel */
+ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+ if (edp_bpc < display_bpc) {
+ DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ display_bpc = edp_bpc;
+ }
+ continue;
+ }
+
+ /* Not one of the known troublemakers, check the EDID */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ /* Don't use an invalid EDID bpc value */
+ if (connector->display_info.bpc &&
+ connector->display_info.bpc < display_bpc) {
+ DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
+ display_bpc = connector->display_info.bpc;
+ }
+ }
+
+ /*
+ * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+ * through, clamp it down. (Note: >12bpc will be caught below.)
+ */
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+ if (display_bpc > 8 && display_bpc < 12) {
+ DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
+ display_bpc = 12;
+ } else {
+ DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
+ display_bpc = 8;
+ }
+ }
+ }
+
+ /*
+ * We could just drive the pipe at the highest bpc all the time and
+ * enable dithering as needed, but that costs bandwidth. So choose
+ * the minimum value that expresses the full color range of the fb but
+ * also stays within the max display bpc discovered above.
+ */
+
+ switch (crtc->fb->depth) {
+ case 8:
+ bpc = 8; /* since we go through a colormap */
+ break;
+ case 15:
+ case 16:
+ bpc = 6; /* min is 18bpp */
+ break;
+ case 24:
+ bpc = min((unsigned int)8, display_bpc);
+ break;
+ case 30:
+ bpc = min((unsigned int)10, display_bpc);
+ break;
+ case 48:
+ bpc = min((unsigned int)12, display_bpc);
+ break;
+ default:
+ DRM_DEBUG("unsupported depth, assuming 24 bits\n");
+ bpc = min((unsigned int)8, display_bpc);
+ break;
+ }
+
+ DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
+ bpc, display_bpc);
+
+ *pipe_bpp = bpc * 3;
+
+ return display_bpc != bpc;
}
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
@@ -4687,6 +5088,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
+ intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -4695,6 +5097,81 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
return ret;
}
+static void ironlake_update_pch_refclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_crtc *crtc;
+ struct intel_encoder *encoder;
+ struct intel_encoder *has_edp_encoder = NULL;
+ u32 temp;
+ bool has_lvds = false;
+
+ /* We need to take the global config into account */
+ list_for_each_entry(crtc, &mode_config->crtc_list, head) {
+ if (!crtc->enabled)
+ continue;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list,
+ base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ has_lvds = true;
+ case INTEL_OUTPUT_EDP:
+ has_edp_encoder = encoder;
+ break;
+ }
+ }
+ }
+
+ /* Ironlake: try to setup display ref clock before DPLL
+ * enabling. This is only under driver's control after
+ * PCH B stepping, previous chipset stepping should be
+ * ignoring this setting.
+ */
+ temp = I915_READ(PCH_DREF_CONTROL);
+ /* Always enable nonspread source */
+ temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+ temp |= DREF_NONSPREAD_SOURCE_ENABLE;
+ temp &= ~DREF_SSC_SOURCE_MASK;
+ temp |= DREF_SSC_SOURCE_ENABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+
+ if (has_edp_encoder) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ temp |= DREF_SSC1_ENABLE;
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
+ temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+
+ /* Enable CPU source on CPU attached eDP */
+ if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
+ if (intel_panel_use_ssc(dev_priv))
+ temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
+ else
+ temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ } else {
+ /* Enable SSC on PCH eDP if needed */
+ if (intel_panel_use_ssc(dev_priv)) {
+ DRM_ERROR("enabling SSC on PCH\n");
+ temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
+ }
+ }
+ I915_WRITE(PCH_DREF_CONTROL, temp);
+ POSTING_READ(PCH_DREF_CONTROL);
+ udelay(200);
+ }
+}
+
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4719,7 +5196,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct fdi_m_n m_n = {0};
u32 temp;
u32 lvds_sync = 0;
- int target_clock, pixel_multiplier, lane, link_bw, bpp, factor;
+ int target_clock, pixel_multiplier, lane, link_bw, factor;
+ unsigned int pipe_bpp;
+ bool dither;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -4846,56 +5325,38 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
- if (is_lvds) {
- /* the BPC will be 6 if it is 18-bit LVDS panel */
- if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
- temp |= PIPE_8BPC;
- else
- temp |= PIPE_6BPC;
- } else if (has_edp_encoder) {
- switch (dev_priv->edp.bpp/3) {
- case 8:
- temp |= PIPE_8BPC;
- break;
- case 10:
- temp |= PIPE_10BPC;
- break;
- case 6:
- temp |= PIPE_6BPC;
- break;
- case 12:
- temp |= PIPE_12BPC;
- break;
- }
- } else
- temp |= PIPE_8BPC;
- I915_WRITE(PIPECONF(pipe), temp);
-
- switch (temp & PIPE_BPC_MASK) {
- case PIPE_8BPC:
- bpp = 24;
+ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+ switch (pipe_bpp) {
+ case 18:
+ temp |= PIPE_6BPC;
break;
- case PIPE_10BPC:
- bpp = 30;
+ case 24:
+ temp |= PIPE_8BPC;
break;
- case PIPE_6BPC:
- bpp = 18;
+ case 30:
+ temp |= PIPE_10BPC;
break;
- case PIPE_12BPC:
- bpp = 36;
+ case 36:
+ temp |= PIPE_12BPC;
break;
default:
- DRM_ERROR("unknown pipe bpc value\n");
- bpp = 24;
+ WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
+ pipe_bpp);
+ temp |= PIPE_8BPC;
+ pipe_bpp = 24;
+ break;
}
+ intel_crtc->bpp = pipe_bpp;
+ I915_WRITE(PIPECONF(pipe), temp);
+
if (!lane) {
/*
* Account for spread spectrum to avoid
* oversubscribing the link. Max center spread
* is 2.5%; use 5% for safety's sake.
*/
- u32 bps = target_clock * bpp * 21 / 20;
+ u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
lane = bps / (link_bw * 8) + 1;
}
@@ -4903,51 +5364,10 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (pixel_multiplier > 1)
link_bw *= pixel_multiplier;
- ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
+ ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
+ &m_n);
- /* Ironlake: try to setup display ref clock before DPLL
- * enabling. This is only under driver's control after
- * PCH B stepping, previous chipset stepping should be
- * ignoring this setting.
- */
- temp = I915_READ(PCH_DREF_CONTROL);
- /* Always enable nonspread source */
- temp &= ~DREF_NONSPREAD_SOURCE_MASK;
- temp |= DREF_NONSPREAD_SOURCE_ENABLE;
- temp &= ~DREF_SSC_SOURCE_MASK;
- temp |= DREF_SSC_SOURCE_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
-
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
-
- if (has_edp_encoder) {
- if (intel_panel_use_ssc(dev_priv)) {
- temp |= DREF_SSC1_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, temp);
-
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
- }
- temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-
- /* Enable CPU source on CPU attached eDP */
- if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- if (intel_panel_use_ssc(dev_priv))
- temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- else
- temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- } else {
- /* Enable SSC on PCH eDP if needed */
- if (intel_panel_use_ssc(dev_priv)) {
- DRM_ERROR("enabling SSC on PCH\n");
- temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
- }
- }
- I915_WRITE(PCH_DREF_CONTROL, temp);
- POSTING_READ(PCH_DREF_CONTROL);
- udelay(200);
- }
+ ironlake_update_pch_refclk(dev);
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
@@ -4964,7 +5384,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
} else if (is_sdvo && is_tv)
factor = 20;
- if (clock.m1 < factor * clock.n)
+ if (clock.m < factor * clock.n)
fp |= FP_CB_TUNE;
dpll = 0;
@@ -5106,14 +5526,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PCH_LVDS, temp);
}
- /* set the dithering flag and clear for anything other than a panel. */
pipeconf &= ~PIPECONF_DITHER_EN;
pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
- if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
+ if ((is_lvds && dev_priv->lvds_dither) || dither) {
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_ST1;
}
-
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
@@ -5217,8 +5635,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- if (!HAS_PCH_SPLIT(dev))
- intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -5246,6 +5662,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
drm_vblank_post_modeset(dev, pipe);
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+
return ret;
}
@@ -5435,21 +5853,15 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_locked;
}
- ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
- if (ret) {
- DRM_ERROR("failed to pin cursor bo\n");
- goto fail_locked;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, 0);
+ ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
- goto fail_unpin;
+ goto fail_locked;
}
ret = i915_gem_object_put_fence(obj);
if (ret) {
- DRM_ERROR("failed to move cursor bo into the GTT\n");
+ DRM_ERROR("failed to release fence for cursor");
goto fail_unpin;
}
@@ -6152,6 +6564,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
+ intel_update_fbc(work->dev);
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -6262,6 +6675,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+static int intel_gen2_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long offset;
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ goto out;
+
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+ */
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen3_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long offset;
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ goto out;
+
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen4_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto out;
+
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+ * so we need only reprogram the base address.
+ */
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset | obj->tiling_mode);
+
+ /* XXX Enabling the panel-fitter across page-flip is so far
+ * untested on non-native modes, so ignore it for now.
+ * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen6_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto out;
+
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(obj->gtt_offset);
+
+ pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+/*
+ * On gen7 we currently use the blit ring because (in early silicon at least)
+ * the render ring doesn't give us interrpts for page flip completion, which
+ * means clients will hang after the first flip is queued. Fortunately the
+ * blit ring generates interrupts properly, so use it instead.
+ */
+static int intel_gen7_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto out;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto out;
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+ intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+ intel_ring_emit(ring, (obj->gtt_offset));
+ intel_ring_emit(ring, (MI_NOOP));
+ intel_ring_advance(ring);
+out:
+ return ret;
+}
+
+static int intel_default_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ return -ENODEV;
+}
+
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
@@ -6272,9 +6876,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- unsigned long flags, offset;
- int pipe = intel_crtc->pipe;
- u32 pf, pipesrc;
+ unsigned long flags;
int ret;
work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -6303,9 +6905,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
- if (ret)
- goto cleanup_work;
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6317,102 +6916,31 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_objs;
- if (IS_GEN3(dev) || IS_GEN2(dev)) {
- u32 flip_mask;
-
- /* Can't queue multiple flips, so wait for the previous
- * one to finish before executing the next.
- */
- ret = BEGIN_LP_RING(2);
- if (ret)
- goto cleanup_objs;
-
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
-
work->pending_flip_obj = obj;
work->enable_stall_check = true;
- /* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
-
- ret = BEGIN_LP_RING(4);
- if (ret)
- goto cleanup_objs;
-
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
- switch (INTEL_INFO(dev)->gen) {
- case 2:
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
- break;
-
- case 3:
- OUT_RING(MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
- break;
-
- case 4:
- case 5:
- /* i965+ uses the linear or tiled offsets from the
- * Display Registers (which do not change across a page-flip)
- * so we need only reprogram the base address.
- */
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset | obj->tiling_mode);
-
- /* XXX Enabling the panel-fitter across page-flip is so far
- * untested on non-native modes, so ignore it for now.
- * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- */
- pf = 0;
- pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- break;
-
- case 6:
- case 7:
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj->tiling_mode);
- OUT_RING(obj->gtt_offset);
-
- pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
- pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- break;
- }
- ADVANCE_LP_RING();
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+ if (ret)
+ goto cleanup_pending;
+ intel_disable_fbc(dev);
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
return 0;
+cleanup_pending:
+ atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
-cleanup_work:
mutex_unlock(&dev->struct_mutex);
spin_lock_irqsave(&dev->event_lock, flags);
@@ -6530,6 +7058,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc_reset(&intel_crtc->base);
intel_crtc->active = true; /* force the pipe off on setup_init_config */
+ intel_crtc->bpp = 24; /* default for pre-Ironlake */
if (HAS_PCH_SPLIT(dev)) {
intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6756,6 +7285,11 @@ int intel_framebuffer_init(struct drm_device *dev,
switch (mode_cmd->bpp) {
case 8:
case 16:
+ /* Only pre-ILK can handle 5:5:5 */
+ if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
+ return -EINVAL;
+ break;
+
case 24:
case 32:
break;
@@ -7170,6 +7704,59 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+{
+ int min_freq = 15;
+ int gpu_freq, ia_freq, max_ia_freq;
+ int scaling_factor = 180;
+
+ max_ia_freq = cpufreq_quick_get_max(0);
+ /*
+ * Default to measured freq if none found, PCU will ensure we don't go
+ * over
+ */
+ if (!max_ia_freq)
+ max_ia_freq = tsc_khz;
+
+ /* Convert from kHz to MHz */
+ max_ia_freq /= 1000;
+
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ gpu_freq--) {
+ int diff = dev_priv->max_delay - gpu_freq;
+
+ /*
+ * For GPU frequencies less than 750MHz, just use the lowest
+ * ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+
+ I915_WRITE(GEN6_PCODE_DATA,
+ (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
+ gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode write of freq table timed out\n");
+ continue;
+ }
+ }
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7275,10 +7862,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
- for_each_pipe(pipe)
+ for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
}
static void ivybridge_init_clock_gating(struct drm_device *dev)
@@ -7295,10 +7884,12 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
- for_each_pipe(pipe)
+ for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -7381,6 +7972,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -7390,6 +7982,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
+ /* Without this, mode sets may fail silently on FDI */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
}
static void ironlake_teardown_rc6(struct drm_device *dev)
@@ -7526,9 +8121,11 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.update_plane = ironlake_update_plane;
} else {
dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.update_plane = i9xx_update_plane;
}
if (I915_HAS_FBC(dev)) {
@@ -7657,6 +8254,31 @@ static void intel_init_display(struct drm_device *dev)
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
+
+ /* Default just returns -ENODEV to indicate unsupported */
+ dev_priv->display.queue_flip = intel_default_queue_flip;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ dev_priv->display.queue_flip = intel_gen2_queue_flip;
+ break;
+
+ case 3:
+ dev_priv->display.queue_flip = intel_gen3_queue_flip;
+ break;
+
+ case 4:
+ case 5:
+ dev_priv->display.queue_flip = intel_gen4_queue_flip;
+ break;
+
+ case 6:
+ dev_priv->display.queue_flip = intel_gen6_queue_flip;
+ break;
+ case 7:
+ dev_priv->display.queue_flip = intel_gen7_queue_flip;
+ break;
+ }
}
/*
@@ -7672,6 +8294,15 @@ static void quirk_pipea_force (struct drm_device *dev)
DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
}
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -7700,6 +8331,12 @@ struct intel_quirk intel_quirks[] = {
/* 855 & before need to leave pipe A & dpll A up */
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+
+ /* Lenovo U160 cannot use SSC on LVDS */
+ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -7788,8 +8425,10 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_emon(dev);
}
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
@@ -7825,12 +8464,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_increase_pllclock(crtc);
}
- if (dev_priv->display.disable_fbc)
- dev_priv->display.disable_fbc(dev);
+ intel_disable_fbc(dev);
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev) || IS_GEN7(dev))
gen6_disable_rps(dev);
if (IS_IRONLAKE_M(dev))
@@ -7843,6 +8481,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
+ /* flush any delayed tasks or pending work */
+ flush_scheduled_work();
+
/* Shut off idle work before the crtcs get freed. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
intel_crtc = to_intel_crtc(crtc);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 391b55f1cc7..44fef5e1c49 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -53,7 +53,7 @@ struct intel_dp {
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
- uint8_t dpcd[4];
+ uint8_t dpcd[8];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
@@ -138,8 +138,8 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
int max_lane_count = 4;
- if (intel_dp->dpcd[0] >= 0x11) {
- max_lane_count = intel_dp->dpcd[2] & 0x1f;
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+ max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
@@ -153,7 +153,7 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
- int max_link_bw = intel_dp->dpcd[1];
+ int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
switch (max_link_bw) {
case DP_LINK_BW_1_62:
@@ -179,12 +179,14 @@ intel_dp_link_clock(uint8_t link_bw)
static int
intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int bpp = 24;
- if (is_edp(intel_dp))
- return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
- else
- return pixel_clock * 3;
+ if (intel_crtc)
+ bpp = intel_crtc->bpp;
+
+ return (pixel_clock * bpp + 7) / 8;
}
static int
@@ -315,9 +317,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
else
precharge = 5;
- if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
- DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
- I915_READ(ch_ctl));
+ /* Try to wait for any previous AUX channel activity */
+ for (try = 0; try < 3; try++) {
+ status = I915_READ(ch_ctl);
+ if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (try == 3) {
+ WARN(1, "dp_aux_ch not started status 0x%08x\n",
+ I915_READ(ch_ctl));
return -EBUSY;
}
@@ -682,7 +692,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int lane_count = 4, bpp = 24;
+ int lane_count = 4;
struct intel_dp_m_n m_n;
int pipe = intel_crtc->pipe;
@@ -701,7 +711,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
break;
} else if (is_edp(intel_dp)) {
lane_count = dev_priv->edp.lanes;
- bpp = dev_priv->edp.bpp;
break;
}
}
@@ -711,7 +720,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
- intel_dp_compute_m_n(bpp, lane_count,
+ intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
if (HAS_PCH_SPLIT(dev)) {
@@ -770,11 +779,13 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
+ intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
- if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
intel_dp->DP |= DP_ENHANCED_FRAMING;
}
@@ -942,11 +953,44 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
udelay(200);
}
+/* If the sink supports it, try to set the power state appropriately */
+static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+{
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ if (ret != 1)
+ DRM_DEBUG_DRIVER("failed to write sink power state\n");
+ } else {
+ /*
+ * When turning on, we need to retry for 1ms to give the sink
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_write_1(intel_dp,
+ DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ msleep(1);
+ }
+ }
+}
+
static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
+ /* Wake up the sink first */
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
if (is_edp(intel_dp)) {
ironlake_edp_backlight_off(dev);
ironlake_edp_panel_off(dev);
@@ -977,6 +1021,8 @@ static void intel_dp_commit(struct drm_encoder *encoder)
if (is_edp(intel_dp))
ironlake_edp_backlight_on(dev);
+
+ intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
}
static void
@@ -990,6 +1036,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (mode != DRM_MODE_DPMS_ON) {
if (is_edp(intel_dp))
ironlake_edp_backlight_off(dev);
+ intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
if (is_edp(intel_dp))
ironlake_edp_panel_off(dev);
@@ -998,6 +1045,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
} else {
if (is_edp(intel_dp))
ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, mode);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_start_link_train(intel_dp);
if (is_edp(intel_dp)) {
@@ -1013,20 +1061,41 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
}
/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
+ uint8_t *recv, int recv_bytes)
+{
+ int ret, i;
+
+ /*
+ * Sinks are *supposed* to come up within 1ms from an off state,
+ * but we're also supposed to retry 3 times per the spec.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_read(intel_dp, address, recv,
+ recv_bytes);
+ if (ret == recv_bytes)
+ return true;
+ msleep(1);
+ }
+
+ return false;
+}
+
+/*
* Fetch AUX CH registers 0x202 - 0x207 which contain
* link status information
*/
static bool
intel_dp_get_link_status(struct intel_dp *intel_dp)
{
- int ret;
-
- ret = intel_dp_aux_native_read(intel_dp,
- DP_LANE0_1_STATUS,
- intel_dp->link_status, DP_LINK_STATUS_SIZE);
- if (ret != DP_LINK_STATUS_SIZE)
- return false;
- return true;
+ return intel_dp_aux_native_read_retry(intel_dp,
+ DP_LANE0_1_STATUS,
+ intel_dp->link_status,
+ DP_LINK_STATUS_SIZE);
}
static uint8_t
@@ -1278,10 +1347,16 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
u32 reg;
uint32_t DP = intel_dp->DP;
- /* Enable output, wait for it to become active */
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ /*
+ * On CPT we have to enable the port in training pattern 1, which
+ * will happen below in intel_dp_set_link_train. Otherwise, enable
+ * the port and wait for it to become active.
+ */
+ if (!HAS_PCH_CPT(dev)) {
+ I915_WRITE(intel_dp->output_reg, intel_dp->DP);
+ POSTING_READ(intel_dp->output_reg);
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1314,7 +1389,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
reg = DP | DP_LINK_TRAIN_PAT_1;
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_1))
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE))
break;
/* Set training pattern 1 */
@@ -1389,7 +1465,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg,
- DP_TRAINING_PATTERN_2))
+ DP_TRAINING_PATTERN_2 |
+ DP_LINK_SCRAMBLING_DISABLE))
break;
udelay(400);
@@ -1503,6 +1580,18 @@ intel_dp_link_down(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
}
+static bool
+intel_dp_get_dpcd(struct intel_dp *intel_dp)
+{
+ if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd)) &&
+ (intel_dp->dpcd[DP_DPCD_REV] != 0)) {
+ return true;
+ }
+
+ return false;
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -1515,21 +1604,41 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
+ return;
+
if (!intel_dp->base.base.crtc)
return;
+ /* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp)) {
intel_dp_link_down(intel_dp);
return;
}
+ /* Now read the DPCD to see if it's actually running */
+ if (!intel_dp_get_dpcd(intel_dp)) {
+ intel_dp_link_down(intel_dp);
+ return;
+ }
+
if (!intel_channel_eq_ok(intel_dp)) {
+ DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+ drm_get_encoder_name(&intel_dp->base.base));
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
}
}
static enum drm_connector_status
+intel_dp_detect_dpcd(struct intel_dp *intel_dp)
+{
+ if (intel_dp_get_dpcd(intel_dp))
+ return connector_status_connected;
+ return connector_status_disconnected;
+}
+
+static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
enum drm_connector_status status;
@@ -1542,17 +1651,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
return status;
}
- status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp,
- 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd))
- == sizeof(intel_dp->dpcd)) {
- if (intel_dp->dpcd[0] != 0)
- status = connector_status_connected;
- }
- DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
- intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
- return status;
+ return intel_dp_detect_dpcd(intel_dp);
}
static enum drm_connector_status
@@ -1560,7 +1659,6 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum drm_connector_status status;
uint32_t temp, bit;
switch (intel_dp->output_reg) {
@@ -1582,15 +1680,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
if ((temp & bit) == 0)
return connector_status_disconnected;
- status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
- {
- if (intel_dp->dpcd[0] != 0)
- status = connector_status_connected;
- }
-
- return status;
+ return intel_dp_detect_dpcd(intel_dp);
}
/**
@@ -1613,6 +1703,12 @@ intel_dp_detect(struct drm_connector *connector, bool force)
status = ironlake_dp_detect(intel_dp);
else
status = g4x_dp_detect(intel_dp);
+
+ DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
+ intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
+ intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
+ intel_dp->dpcd[6], intel_dp->dpcd[7]);
+
if (status != connector_status_connected)
return status;
@@ -1745,6 +1841,11 @@ done:
static void
intel_dp_destroy (struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
+
+ if (intel_dpd_is_edp(dev))
+ intel_panel_destroy_backlight(dev);
+
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -1790,8 +1891,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
- if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
- intel_dp_check_link_status(intel_dp);
+ intel_dp_check_link_status(intel_dp);
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -1936,7 +2036,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
- int ret;
+ bool ret;
u32 pp_on, pp_div;
pp_on = I915_READ(PCH_PP_ON_DELAYS);
@@ -1949,13 +2049,12 @@ intel_dp_init(struct drm_device *dev, int output_reg)
dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
ironlake_edp_panel_vdd_on(intel_dp);
- ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
- intel_dp->dpcd,
- sizeof(intel_dp->dpcd));
+ ret = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp);
- if (ret == sizeof(intel_dp->dpcd)) {
- if (intel_dp->dpcd[0] >= 0x11)
- dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
+ if (ret) {
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+ dev_priv->no_aux_handshake =
+ intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
} else {
/* if this fails, presume the device is a ghost */
@@ -1978,6 +2077,8 @@ intel_dp_init(struct drm_device *dev, int output_reg)
DRM_MODE_TYPE_PREFERRED;
}
}
+ dev_priv->int_edp_connector = connector;
+ intel_panel_setup_backlight(dev);
}
intel_dp_add_properties(intel_dp, connector);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9ffa61eb4d7..0b2ee9d3998 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -170,6 +170,7 @@ struct intel_crtc {
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
bool cursor_visible;
+ unsigned int bpp;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -177,10 +178,28 @@ struct intel_crtc {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define DIP_HEADER_SIZE 5
+
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
+#define DIP_TYPE_SPD 0x3
+#define DIP_VERSION_SPD 0x1
+#define DIP_LEN_SPD 25
+#define DIP_SPD_UNKNOWN 0
+#define DIP_SPD_DSTB 0x1
+#define DIP_SPD_DVDP 0x2
+#define DIP_SPD_DVHS 0x3
+#define DIP_SPD_HDDVR 0x4
+#define DIP_SPD_DVC 0x5
+#define DIP_SPD_DSC 0x6
+#define DIP_SPD_VCD 0x7
+#define DIP_SPD_GAME 0x8
+#define DIP_SPD_PC 0x9
+#define DIP_SPD_BD 0xa
+#define DIP_SPD_SCD 0xb
+
struct dip_infoframe {
uint8_t type; /* HB0 */
uint8_t ver; /* HB1 */
@@ -205,6 +224,11 @@ struct dip_infoframe {
uint16_t left_bar_end;
uint16_t right_bar_start;
} avi;
+ struct {
+ uint8_t vn[8];
+ uint8_t pd[16];
+ uint8_t sdi;
+ } spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
@@ -233,6 +257,13 @@ struct intel_unpin_work {
bool enable_stall_check;
};
+struct intel_fbc_work {
+ struct delayed_work work;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int interval;
+};
+
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
@@ -266,9 +297,10 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern void intel_panel_setup_backlight(struct drm_device *dev);
+extern int intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_disable_backlight(struct drm_device *dev);
+extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
@@ -317,6 +349,7 @@ extern void intel_enable_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index aa0a8e83142..226ba830f38 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -45,6 +45,8 @@ struct intel_hdmi {
bool has_hdmi_sink;
bool has_audio;
int force_audio;
+ void (*write_infoframe)(struct drm_encoder *encoder,
+ struct dip_infoframe *frame);
};
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
@@ -58,37 +60,70 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
struct intel_hdmi, base);
}
-void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
+void intel_dip_infoframe_csum(struct dip_infoframe *frame)
{
- uint8_t *data = (uint8_t *)avi_if;
+ uint8_t *data = (uint8_t *)frame;
uint8_t sum = 0;
unsigned i;
- avi_if->checksum = 0;
- avi_if->ecc = 0;
+ frame->checksum = 0;
+ frame->ecc = 0;
- for (i = 0; i < sizeof(*avi_if); i++)
+ /* Header isn't part of the checksum */
+ for (i = 5; i < frame->len; i++)
sum += data[i];
- avi_if->checksum = 0x100 - sum;
+ frame->checksum = 0x100 - sum;
}
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+static u32 intel_infoframe_index(struct dip_infoframe *frame)
{
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .ver = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
- uint32_t *data = (uint32_t *)&avi_if;
+ u32 flags = 0;
+
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ flags |= VIDEO_DIP_SELECT_AVI;
+ break;
+ case DIP_TYPE_SPD:
+ flags |= VIDEO_DIP_SELECT_SPD;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ break;
+ }
+
+ return flags;
+}
+
+static u32 intel_infoframe_flags(struct dip_infoframe *frame)
+{
+ u32 flags = 0;
+
+ switch (frame->type) {
+ case DIP_TYPE_AVI:
+ flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
+ break;
+ case DIP_TYPE_SPD:
+ flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ break;
+ }
+
+ return flags;
+}
+
+static void i9xx_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- u32 port;
- unsigned i;
+ u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
- if (!intel_hdmi->has_hdmi_sink)
- return;
/* XXX first guess at handling video port, is this corrent? */
if (intel_hdmi->sdvox_reg == SDVOB)
@@ -98,18 +133,87 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
else
return;
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
- VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
+ flags = intel_infoframe_index(frame);
+
+ val &= ~VIDEO_DIP_SELECT_MASK;
- intel_dip_infoframe_csum(&avi_if);
- for (i = 0; i < sizeof(avi_if); i += 4) {
+ I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
+
+ for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
- I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
- VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
- VIDEO_DIP_ENABLE_AVI);
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
+}
+
+static void ironlake_write_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ uint32_t *data = (uint32_t *)frame;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ u32 flags, val = I915_READ(reg);
+
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ flags = intel_infoframe_index(frame);
+
+ val &= ~VIDEO_DIP_SELECT_MASK;
+
+ I915_WRITE(reg, val | flags);
+
+ for (i = 0; i < len; i += 4) {
+ I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ data++;
+ }
+
+ flags |= intel_infoframe_flags(frame);
+
+ I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
+}
+static void intel_set_infoframe(struct drm_encoder *encoder,
+ struct dip_infoframe *frame)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ if (!intel_hdmi->has_hdmi_sink)
+ return;
+
+ intel_dip_infoframe_csum(frame);
+ intel_hdmi->write_infoframe(encoder, frame);
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+
+ intel_set_infoframe(encoder, &avi_if);
+}
+
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+{
+ struct dip_infoframe spd_if;
+
+ memset(&spd_if, 0, sizeof(spd_if));
+ spd_if.type = DIP_TYPE_SPD;
+ spd_if.ver = DIP_VERSION_SPD;
+ spd_if.len = DIP_LEN_SPD;
+ strcpy(spd_if.body.spd.vn, "Intel");
+ strcpy(spd_if.body.spd.pd, "Integrated gfx");
+ spd_if.body.spd.sdi = DIP_SPD_PC;
+
+ intel_set_infoframe(encoder, &spd_if);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -124,12 +228,18 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
- sdvox |= intel_hdmi->color_range;
+ if (!HAS_PCH_SPLIT(dev))
+ sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+ if (intel_crtc->bpp > 24)
+ sdvox |= COLOR_FORMAT_12bpc;
+ else
+ sdvox |= COLOR_FORMAT_8bpc;
+
/* Required on CPT */
if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
sdvox |= HDMI_MODE_SELECT;
@@ -150,6 +260,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
POSTING_READ(intel_hdmi->sdvox_reg);
intel_hdmi_set_avi_infoframe(encoder);
+ intel_hdmi_set_spd_infoframe(encoder);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -427,6 +538,11 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
intel_hdmi->sdvox_reg = sdvox_reg;
+ if (!HAS_PCH_SPLIT(dev))
+ intel_hdmi->write_infoframe = i9xx_write_infoframe;
+ else
+ intel_hdmi->write_infoframe = ironlake_write_infoframe;
+
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_hdmi_add_properties(intel_hdmi, connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b28f7bd9f88..31da77f5c05 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -72,14 +72,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg;
+ u32 ctl_reg, lvds_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
+ stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
lvds_reg = LVDS;
+ stat_reg = PP_STATUS;
}
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
@@ -94,17 +96,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
intel_lvds->pfit_control,
intel_lvds->pfit_pgm_ratios);
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) {
- DRM_ERROR("timed out waiting for panel to power off\n");
- } else {
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
- intel_lvds->pfit_dirty = false;
- }
+
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+ intel_lvds->pfit_dirty = false;
}
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_reg);
+ if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(dev);
}
@@ -113,24 +114,25 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 ctl_reg, lvds_reg;
+ u32 ctl_reg, lvds_reg, stat_reg;
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
+ stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
lvds_reg = LVDS;
+ stat_reg = PP_STATUS;
}
intel_panel_disable_backlight(dev);
I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+ if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
if (intel_lvds->pfit_control) {
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
-
I915_WRITE(PFIT_CONTROL, 0);
intel_lvds->pfit_dirty = true;
}
@@ -398,53 +400,21 @@ out:
static void intel_lvds_prepare(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- /* We try to do the minimum that is necessary in order to unlock
- * the registers for mode setting.
- *
- * On Ironlake, this is quite simple as we just set the unlock key
- * and ignore all subtleties. (This may cause some issues...)
- *
+ /*
* Prior to Ironlake, we must disable the pipe if we want to adjust
* the panel fitter. However at all other times we can just reset
* the registers regardless.
*/
-
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PCH_PP_CONTROL,
- I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
- } else if (intel_lvds->pfit_dirty) {
- I915_WRITE(PP_CONTROL,
- (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
- & ~POWER_TARGET_ON);
- } else {
- I915_WRITE(PP_CONTROL,
- I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
- }
+ if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
+ intel_lvds_disable(intel_lvds);
}
static void intel_lvds_commit(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- /* Undo any unlocking done in prepare to prevent accidental
- * adjustment of the registers.
- */
- if (HAS_PCH_SPLIT(dev)) {
- u32 val = I915_READ(PCH_PP_CONTROL);
- if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
- I915_WRITE(PCH_PP_CONTROL, val & 0x3);
- } else {
- u32 val = I915_READ(PP_CONTROL);
- if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
- I915_WRITE(PP_CONTROL, val & 0x3);
- }
-
/* Always do a full power on as we do not know what state
* we were left in.
*/
@@ -582,6 +552,8 @@ static void intel_lvds_destroy(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_panel_destroy_backlight(dev);
+
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
@@ -690,6 +662,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "Dell OptiPlex FX170",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "AOpen Mini PC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
@@ -1032,6 +1012,19 @@ out:
pwm = I915_READ(BLC_PWM_PCH_CTL1);
pwm |= PWM_PCH_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
+ I915_WRITE(PCH_PP_CONTROL,
+ I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+ } else {
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
+ I915_WRITE(PP_CONTROL,
+ I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
dev_priv->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
@@ -1041,6 +1034,9 @@ out:
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
+
+ intel_panel_setup_backlight(dev);
+
return true;
failed:
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d2c71042290..b8e8158bb16 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -227,7 +227,6 @@ void intel_opregion_asle_intr(struct drm_device *dev)
asle->aslc = asle_stat;
}
-/* Only present on Ironlake+ */
void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -297,19 +296,26 @@ static int intel_opregion_video_event(struct notifier_block *nb,
/* The only video events relevant to opregion are 0x80. These indicate
either a docking event, lid switch or display switch request. In
Linux, these are handled by the dock, button and video drivers.
- We might want to fix the video driver to be opregion-aware in
- future, but right now we just indicate to the firmware that the
- request has been handled */
+ */
struct opregion_acpi *acpi;
+ struct acpi_bus_event *event = data;
+ int ret = NOTIFY_OK;
+
+ if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
+ return NOTIFY_DONE;
if (!system_opregion)
return NOTIFY_DONE;
acpi = system_opregion->acpi;
+
+ if (event->type == 0x80 && !(acpi->cevt & 0x1))
+ ret = NOTIFY_BAD;
+
acpi->csts = 0;
- return NOTIFY_OK;
+ return ret;
}
static struct notifier_block intel_opregion_notifier = {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a670c006982..d3603808682 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -773,14 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
if (ret != 0)
return ret;
- ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
- if (ret != 0)
- goto out_unpin;
-
ret = i915_gem_object_put_fence(new_bo);
if (ret)
goto out_unpin;
@@ -1409,6 +1405,11 @@ void intel_setup_overlay(struct drm_device *dev)
overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
if (!overlay)
return;
+
+ mutex_lock(&dev->struct_mutex);
+ if (WARN_ON(dev_priv->overlay))
+ goto out_free;
+
overlay->dev = dev;
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
@@ -1448,7 +1449,7 @@ void intel_setup_overlay(struct drm_device *dev)
regs = intel_overlay_map_regs(overlay);
if (!regs)
- goto out_free_bo;
+ goto out_unpin_bo;
memset(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
@@ -1457,14 +1458,17 @@ void intel_setup_overlay(struct drm_device *dev)
intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay;
+ mutex_unlock(&dev->struct_mutex);
DRM_INFO("initialized overlay support\n");
return;
out_unpin_bo:
- i915_gem_object_unpin(reg_bo);
+ if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ i915_gem_object_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(&reg_bo->base);
out_free:
+ mutex_unlock(&dev->struct_mutex);
kfree(overlay);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a06ff07a4d3..a9e0c7bcd31 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -83,11 +83,15 @@ intel_pch_panel_fitting(struct drm_device *dev,
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
if (scaled_width > scaled_height) { /* pillar */
width = scaled_height / mode->vdisplay;
+ if (width & 1)
+ width++;
x = (adjusted_mode->hdisplay - width + 1) / 2;
y = 0;
height = adjusted_mode->vdisplay;
} else if (scaled_width < scaled_height) { /* letter */
height = scaled_width / mode->hdisplay;
+ if (height & 1)
+ height++;
y = (adjusted_mode->vdisplay - height + 1) / 2;
x = 0;
width = adjusted_mode->hdisplay;
@@ -273,7 +277,7 @@ void intel_panel_enable_backlight(struct drm_device *dev)
dev_priv->backlight_enabled = true;
}
-void intel_panel_setup_backlight(struct drm_device *dev)
+static void intel_panel_init_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -305,3 +309,73 @@ intel_panel_detect(struct drm_device *dev)
return connector_status_unknown;
}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+static int intel_panel_update_status(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ intel_panel_set_backlight(dev, bd->props.brightness);
+ return 0;
+}
+
+static int intel_panel_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ return intel_panel_get_backlight(dev);
+}
+
+static const struct backlight_ops intel_panel_bl_ops = {
+ .update_status = intel_panel_update_status,
+ .get_brightness = intel_panel_get_brightness,
+};
+
+int intel_panel_setup_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct backlight_properties props;
+ struct drm_connector *connector;
+
+ intel_panel_init_backlight(dev);
+
+ if (dev_priv->int_lvds_connector)
+ connector = dev_priv->int_lvds_connector;
+ else if (dev_priv->int_edp_connector)
+ connector = dev_priv->int_edp_connector;
+ else
+ return -ENODEV;
+
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = intel_panel_get_max_backlight(dev);
+ dev_priv->backlight =
+ backlight_device_register("intel_backlight",
+ &connector->kdev, dev,
+ &intel_panel_bl_ops, &props);
+
+ if (IS_ERR(dev_priv->backlight)) {
+ DRM_ERROR("Failed to register backlight: %ld\n",
+ PTR_ERR(dev_priv->backlight));
+ dev_priv->backlight = NULL;
+ return -ENODEV;
+ }
+ dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev);
+ return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (dev_priv->backlight)
+ backlight_device_unregister(dev_priv->backlight);
+}
+#else
+int intel_panel_setup_backlight(struct drm_device *dev)
+{
+ intel_panel_init_backlight(dev);
+ return 0;
+}
+
+void intel_panel_destroy_backlight(struct drm_device *dev)
+{
+ return;
+}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 95c4b142993..c30626ea9f9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -236,7 +236,8 @@ init_pipe_control(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
- obj->cache_level = I915_CACHE_LLC;
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true);
if (ret)
@@ -289,6 +290,10 @@ static int init_render_ring(struct intel_ring_buffer *ring)
if (IS_GEN6(dev) || IS_GEN7(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
+ if (IS_GEN7(dev))
+ I915_WRITE(GFX_MODE_GEN7,
+ GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+ GFX_MODE_ENABLE(GFX_REPLAY_MODE));
}
if (INTEL_INFO(dev)->gen >= 6) {
@@ -776,7 +781,8 @@ static int init_status_page(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
- obj->cache_level = I915_CACHE_LLC;
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) {
@@ -1319,6 +1325,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->get_seqno = pc_render_get_seqno;
}
+ if (!I915_NEED_GFX_HWS(dev))
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c0e0ee63fbf..39ac2b634ae 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -165,7 +165,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
{
- return intel_wait_ring_buffer(ring, ring->space - 8);
+ return intel_wait_ring_buffer(ring, ring->size - 8);
}
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 113e4e7264c..210d570fd51 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1236,6 +1236,8 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
struct drm_connector *connector)
{
struct drm_encoder *encoder = &intel_tv->base.base;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -1258,6 +1260,10 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
/* Poll for TV detection */
tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
+ if (intel_crtc->pipe == 1)
+ tv_ctl |= TV_ENC_PIPEB_SELECT;
+ else
+ tv_ctl &= ~TV_ENC_PIPEB_SELECT;
tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
tv_dac |= (TVDAC_STATE_CHG_EN |
@@ -1277,26 +1283,26 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
to_intel_crtc(intel_tv->base.base.crtc)->pipe);
type = -1;
- if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
- DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
- /*
- * A B C
- * 0 1 1 Composite
- * 1 0 X svideo
- * 0 0 0 Component
- */
- if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG_KMS("Detected Composite TV connection\n");
- type = DRM_MODE_CONNECTOR_Composite;
- } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG_KMS("Detected S-Video TV connection\n");
- type = DRM_MODE_CONNECTOR_SVIDEO;
- } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG_KMS("Detected Component TV connection\n");
- type = DRM_MODE_CONNECTOR_Component;
- } else {
- DRM_DEBUG_KMS("Unrecognised TV connection\n");
- }
+ tv_dac = I915_READ(TV_DAC);
+ DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ /*
+ * A B C
+ * 0 1 1 Composite
+ * 1 0 X svideo
+ * 0 0 0 Component
+ */
+ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+ DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ type = DRM_MODE_CONNECTOR_Composite;
+ } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+ DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ type = DRM_MODE_CONNECTOR_SVIDEO;
+ } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+ DRM_DEBUG_KMS("Detected Component TV connection\n");
+ type = DRM_MODE_CONNECTOR_Component;
+ } else {
+ DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ type = -1;
}
I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 729d5fd7c88..b311faba34f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -135,13 +135,14 @@ static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
int i;
if (dev_priv->card_type >= NV_50) {
- uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8;
-
- if (!vbios_vram)
- vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000;
+ u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8;
+ if (!addr) {
+ addr = (u64)nv_rd32(dev, 0x1700) << 16;
+ addr += 0xf0000;
+ }
old_bar0_pramin = nv_rd32(dev, 0x1700);
- nv_wr32(dev, 0x1700, vbios_vram >> 16);
+ nv_wr32(dev, 0x1700, addr >> 16);
}
/* bail if no rom signature */
@@ -5186,7 +5187,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
load_table_ptr = ROM16(bios->data[bitentry->offset]);
if (load_table_ptr == 0x0) {
- NV_ERROR(dev, "Pointer to BIT loadval table invalid\n");
+ NV_DEBUG(dev, "Pointer to BIT loadval table invalid\n");
return -EINVAL;
}
@@ -5965,6 +5966,12 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
if (cte->type == DCB_CONNECTOR_HDMI_1)
cte->type = DCB_CONNECTOR_DVI_I;
}
+
+ /* Gigabyte GV-NX86T512H */
+ if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
+ if (cte->type == DCB_CONNECTOR_HDMI_1)
+ cte->type = DCB_CONNECTOR_DVI_I;
+ }
}
static const u8 hpd_gpio[16] = {
@@ -6377,6 +6384,37 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
}
}
+ /* Some other twisted XFX board (rhbz#694914)
+ *
+ * The DVI/VGA encoder combo that's supposed to represent the
+ * DVI-I connector actually point at two different ones, and
+ * the HDMI connector ends up paired with the VGA instead.
+ *
+ * Connector table is missing anything for VGA at all, pointing it
+ * an invalid conntab entry 2 so we figure it out ourself.
+ */
+ if (nv_match_device(dev, 0x0615, 0x1682, 0x2605)) {
+ if (idx == 0) {
+ *conn = 0x02002300; /* VGA, connector 2 */
+ *conf = 0x00000028;
+ } else
+ if (idx == 1) {
+ *conn = 0x01010312; /* DVI, connector 0 */
+ *conf = 0x00020030;
+ } else
+ if (idx == 2) {
+ *conn = 0x04020310; /* VGA, connector 0 */
+ *conf = 0x00000028;
+ } else
+ if (idx == 3) {
+ *conn = 0x02021322; /* HDMI, connector 1 */
+ *conf = 0x00020010;
+ } else {
+ *conn = 0x0000000e; /* EOL */
+ *conf = 0x00000000;
+ }
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2ad49cbf7c8..890d50e4d68 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,16 +49,12 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
- if (nvbo->vma.node) {
- nouveau_vm_unmap(&nvbo->vma);
- nouveau_vm_put(&nvbo->vma);
- }
kfree(nvbo);
}
static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
- int *align, int *size, int *page_shift)
+ int *align, int *size)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
@@ -82,67 +78,51 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
}
}
} else {
- if (likely(dev_priv->chan_vm)) {
- if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
- *page_shift = dev_priv->chan_vm->lpg_shift;
- else
- *page_shift = dev_priv->chan_vm->spg_shift;
- } else {
- *page_shift = 12;
- }
-
- *size = roundup(*size, (1 << *page_shift));
- *align = max((1 << *page_shift), *align);
+ *size = roundup(*size, (1 << nvbo->page_shift));
+ *align = max((1 << nvbo->page_shift), *align);
}
*size = roundup(*size, PAGE_SIZE);
}
int
-nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
- int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, struct nouveau_bo **pnvbo)
+nouveau_bo_new(struct drm_device *dev, int size, int align,
+ uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+ struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
- int ret = 0, page_shift = 0;
+ int ret;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
return -ENOMEM;
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
+ INIT_LIST_HEAD(&nvbo->vma_list);
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
- align >>= PAGE_SHIFT;
-
- if (dev_priv->chan_vm) {
- ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
- NV_MEM_ACCESS_RW, &nvbo->vma);
- if (ret) {
- kfree(nvbo);
- return ret;
- }
+ nvbo->page_shift = 12;
+ if (dev_priv->bar1_vm) {
+ if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
+ nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
}
+ nouveau_bo_fixup_align(nvbo, flags, &align, &size);
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
- nvbo->channel = chan;
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
- ttm_bo_type_device, &nvbo->placement, align, 0,
- false, NULL, size, nouveau_bo_del_ttm);
+ ttm_bo_type_device, &nvbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, size,
+ nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
}
- nvbo->channel = NULL;
- if (nvbo->vma.node)
- nvbo->bo.offset = nvbo->vma.offset;
*pnvbo = nvbo;
return 0;
}
@@ -312,8 +292,6 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
- if (nvbo->vma.node)
- nvbo->bo.offset = nvbo->vma.offset;
return 0;
}
@@ -440,7 +418,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
- man->gpu_offset = dev_priv->gart_info.aper_base;
break;
default:
NV_ERROR(dev, "Unknown GART type: %d\n",
@@ -501,19 +478,12 @@ static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *old_node = old_mem->mm_node;
- struct nouveau_mem *new_node = new_mem->mm_node;
- struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_mem *node = old_mem->mm_node;
+ u64 src_offset = node->vma[0].offset;
+ u64 dst_offset = node->vma[1].offset;
u32 page_count = new_mem->num_pages;
- u64 src_offset, dst_offset;
int ret;
- src_offset = old_node->tmp_vma.offset;
- if (new_node->tmp_vma.node)
- dst_offset = new_node->tmp_vma.offset;
- else
- dst_offset = nvbo->vma.offset;
-
page_count = new_mem->num_pages;
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
@@ -547,19 +517,13 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *old_node = old_mem->mm_node;
- struct nouveau_mem *new_node = new_mem->mm_node;
+ struct nouveau_mem *node = old_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
- u64 src_offset, dst_offset;
+ u64 src_offset = node->vma[0].offset;
+ u64 dst_offset = node->vma[1].offset;
int ret;
- src_offset = old_node->tmp_vma.offset;
- if (new_node->tmp_vma.node)
- dst_offset = new_node->tmp_vma.offset;
- else
- dst_offset = nvbo->vma.offset;
-
while (length) {
u32 amount, stride, height;
@@ -695,6 +659,27 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
}
static int
+nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
+ struct ttm_mem_reg *mem, struct nouveau_vma *vma)
+{
+ struct nouveau_mem *node = mem->mm_node;
+ int ret;
+
+ ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
+ node->page_shift, NV_MEM_ACCESS_RO, vma);
+ if (ret)
+ return ret;
+
+ if (mem->mem_type == TTM_PL_VRAM)
+ nouveau_vm_map(vma, node);
+ else
+ nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+
+ return 0;
+}
+
+static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
@@ -711,31 +696,20 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
}
- /* create temporary vma for old memory, this will get cleaned
- * up after ttm destroys the ttm_mem_reg
+ /* create temporary vmas for the transfer and attach them to the
+ * old nouveau_mem node, these will get cleaned up after ttm has
+ * destroyed the ttm_mem_reg
*/
if (dev_priv->card_type >= NV_50) {
struct nouveau_mem *node = old_mem->mm_node;
- if (!node->tmp_vma.node) {
- u32 page_shift = nvbo->vma.node->type;
- if (old_mem->mem_type == TTM_PL_TT)
- page_shift = nvbo->vma.vm->spg_shift;
-
- ret = nouveau_vm_get(chan->vm,
- old_mem->num_pages << PAGE_SHIFT,
- page_shift, NV_MEM_ACCESS_RO,
- &node->tmp_vma);
- if (ret)
- goto out;
- }
- if (old_mem->mem_type == TTM_PL_VRAM)
- nouveau_vm_map(&node->tmp_vma, node);
- else {
- nouveau_vm_map_sg(&node->tmp_vma, 0,
- old_mem->num_pages << PAGE_SHIFT,
- node, node->pages);
- }
+ ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
+ if (ret)
+ goto out;
+
+ ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
+ if (ret)
+ goto out;
}
if (dev_priv->card_type < NV_50)
@@ -762,7 +736,6 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
struct ttm_mem_reg tmp_mem;
@@ -782,23 +755,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- if (dev_priv->card_type >= NV_50) {
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_mem *node = tmp_mem.mm_node;
- struct nouveau_vma *vma = &nvbo->vma;
- if (vma->node->type != vma->vm->spg_shift)
- vma = &node->tmp_vma;
- nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
- node, node->pages);
- }
-
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
-
- if (dev_priv->card_type >= NV_50) {
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- nouveau_vm_unmap(&nvbo->vma);
- }
-
if (ret)
goto out;
@@ -844,30 +801,22 @@ out:
static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_vma *vma = &nvbo->vma;
- struct nouveau_vm *vm = vma->vm;
-
- if (dev_priv->card_type < NV_50)
- return;
-
- switch (new_mem->mem_type) {
- case TTM_PL_VRAM:
- nouveau_vm_map(vma, node);
- break;
- case TTM_PL_TT:
- if (vma->node->type != vm->spg_shift) {
+ struct nouveau_vma *vma;
+
+ list_for_each_entry(vma, &nvbo->vma_list, head) {
+ if (new_mem->mem_type == TTM_PL_VRAM) {
+ nouveau_vm_map(vma, new_mem->mm_node);
+ } else
+ if (new_mem->mem_type == TTM_PL_TT &&
+ nvbo->page_shift == vma->vm->spg_shift) {
+ nouveau_vm_map_sg(vma, 0, new_mem->
+ num_pages << PAGE_SHIFT,
+ node, node->pages);
+ } else {
nouveau_vm_unmap(vma);
- vma = &node->tmp_vma;
}
- nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
- node, node->pages);
- break;
- default:
- nouveau_vm_unmap(&nvbo->vma);
- break;
}
}
@@ -1113,3 +1062,54 @@ struct ttm_bo_driver nouveau_bo_driver = {
.io_mem_free = &nouveau_ttm_io_mem_free,
};
+struct nouveau_vma *
+nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
+{
+ struct nouveau_vma *vma;
+ list_for_each_entry(vma, &nvbo->vma_list, head) {
+ if (vma->vm == vm)
+ return vma;
+ }
+
+ return NULL;
+}
+
+int
+nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
+ struct nouveau_vma *vma)
+{
+ const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ struct nouveau_mem *node = nvbo->bo.mem.mm_node;
+ int ret;
+
+ ret = nouveau_vm_get(vm, size, nvbo->page_shift,
+ NV_MEM_ACCESS_RW, vma);
+ if (ret)
+ return ret;
+
+ if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
+ else
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ nouveau_vm_map_sg(vma, 0, size, node, node->pages);
+
+ list_add_tail(&vma->head, &nvbo->vma_list);
+ vma->refcount = 1;
+ return 0;
+}
+
+void
+nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
+{
+ if (vma->node) {
+ if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ ttm_bo_wait(&nvbo->bo, false, false, false);
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
+ nouveau_vm_unmap(vma);
+ }
+
+ nouveau_vm_put(vma);
+ list_del(&vma->head);
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index a7583a8ddb0..b0d753f45bb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -27,40 +27,63 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
static int
-nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
+nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
{
+ u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_bo *pb = chan->pushbuf_bo;
- struct nouveau_gpuobj *pushbuf = NULL;
- int ret = 0;
+ int ret;
+
+ /* allocate buffer object */
+ ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, &chan->pushbuf_bo);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
+ if (ret)
+ goto out;
+
+ ret = nouveau_bo_map(chan->pushbuf_bo);
+ if (ret)
+ goto out;
+ /* create DMA object covering the entire memtype where the push
+ * buffer resides, userspace can submit its own push buffers from
+ * anywhere within the same memtype.
+ */
+ chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
if (dev_priv->card_type >= NV_50) {
+ ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
+ &chan->pushbuf_vma);
+ if (ret)
+ goto out;
+
if (dev_priv->card_type < NV_C0) {
ret = nouveau_gpuobj_dma_new(chan,
NV_CLASS_DMA_IN_MEMORY, 0,
(1ULL << 40),
NV_MEM_ACCESS_RO,
NV_MEM_TARGET_VM,
- &pushbuf);
+ &chan->pushbuf);
}
- chan->pushbuf_base = pb->bo.offset;
+ chan->pushbuf_base = chan->pushbuf_vma.offset;
} else
- if (pb->bo.mem.mem_type == TTM_PL_TT) {
+ if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->gart_info.aper_size,
NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_GART, &pushbuf);
- chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
+ NV_MEM_TARGET_GART,
+ &chan->pushbuf);
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_VRAM, &pushbuf);
- chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
+ NV_MEM_TARGET_VRAM,
+ &chan->pushbuf);
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in
@@ -70,47 +93,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
pci_resource_start(dev->pdev, 1),
dev_priv->fb_available_size,
NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_PCI, &pushbuf);
- chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
+ NV_MEM_TARGET_PCI,
+ &chan->pushbuf);
}
- nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
- nouveau_gpuobj_ref(NULL, &pushbuf);
- return ret;
-}
-
-static struct nouveau_bo *
-nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
-{
- struct nouveau_bo *pushbuf = NULL;
- int location, ret;
-
- if (nouveau_vram_pushbuf)
- location = TTM_PL_FLAG_VRAM;
- else
- location = TTM_PL_FLAG_TT;
-
- ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
- if (ret) {
- NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
- return NULL;
- }
-
- ret = nouveau_bo_pin(pushbuf, location);
- if (ret) {
- NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
- nouveau_bo_ref(NULL, &pushbuf);
- return NULL;
- }
-
- ret = nouveau_bo_map(pushbuf);
+out:
if (ret) {
- nouveau_bo_unpin(pushbuf);
- nouveau_bo_ref(NULL, &pushbuf);
- return NULL;
+ NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
+ nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
+ nouveau_gpuobj_ref(NULL, &chan->pushbuf);
+ if (chan->pushbuf_bo) {
+ nouveau_bo_unmap(chan->pushbuf_bo);
+ nouveau_bo_ref(NULL, &chan->pushbuf_bo);
+ }
}
- return pushbuf;
+ return 0;
}
/* allocates and initializes a fifo for user space consumption */
@@ -121,6 +119,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan;
unsigned long flags;
int ret;
@@ -160,19 +159,14 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
INIT_LIST_HEAD(&chan->nvsw.flip);
INIT_LIST_HEAD(&chan->fence.pending);
- /* Allocate DMA push buffer */
- chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
- if (!chan->pushbuf_bo) {
- ret = -ENOMEM;
- NV_ERROR(dev, "pushbuf %d\n", ret);
+ /* setup channel's memory and vm */
+ ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
+ if (ret) {
+ NV_ERROR(dev, "gpuobj %d\n", ret);
nouveau_channel_put(&chan);
return ret;
}
- nouveau_dma_pre_init(chan);
- chan->user_put = 0x40;
- chan->user_get = 0x44;
-
/* Allocate space for per-channel fixed notifier memory */
ret = nouveau_notifier_init_channel(chan);
if (ret) {
@@ -181,21 +175,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return ret;
}
- /* Setup channel's default objects */
- ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
+ /* Allocate DMA push buffer */
+ ret = nouveau_channel_pushbuf_init(chan);
if (ret) {
- NV_ERROR(dev, "gpuobj %d\n", ret);
+ NV_ERROR(dev, "pushbuf %d\n", ret);
nouveau_channel_put(&chan);
return ret;
}
- /* Create a dma object for the push buffer */
- ret = nouveau_channel_pushbuf_ctxdma_init(chan);
- if (ret) {
- NV_ERROR(dev, "pbctxdma %d\n", ret);
- nouveau_channel_put(&chan);
- return ret;
- }
+ nouveau_dma_pre_init(chan);
+ chan->user_put = 0x40;
+ chan->user_get = 0x44;
/* disable the fifo caches */
pfifo->reassign(dev, false);
@@ -220,6 +210,11 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
nouveau_debugfs_channel_init(chan);
NV_DEBUG(dev, "channel %d initialised\n", chan->id);
+ if (fpriv) {
+ spin_lock(&fpriv->lock);
+ list_add(&chan->list, &fpriv->channels);
+ spin_unlock(&fpriv->lock);
+ }
*chan_ret = chan;
return 0;
}
@@ -236,29 +231,23 @@ nouveau_channel_get_unlocked(struct nouveau_channel *ref)
}
struct nouveau_channel *
-nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
+nouveau_channel_get(struct drm_file *file_priv, int id)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_channel *chan;
- unsigned long flags;
-
- if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
- return ERR_PTR(-EINVAL);
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-
- if (unlikely(!chan))
- return ERR_PTR(-EINVAL);
- if (unlikely(file_priv && chan->file_priv != file_priv)) {
- nouveau_channel_put_unlocked(&chan);
- return ERR_PTR(-EINVAL);
+ spin_lock(&fpriv->lock);
+ list_for_each_entry(chan, &fpriv->channels, list) {
+ if (chan->id == id) {
+ chan = nouveau_channel_get_unlocked(chan);
+ spin_unlock(&fpriv->lock);
+ mutex_lock(&chan->mutex);
+ return chan;
+ }
}
+ spin_unlock(&fpriv->lock);
- mutex_lock(&chan->mutex);
- return chan;
+ return ERR_PTR(-EINVAL);
}
void
@@ -312,12 +301,14 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
/* destroy any resources the channel owned */
nouveau_gpuobj_ref(NULL, &chan->pushbuf);
if (chan->pushbuf_bo) {
+ nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
nouveau_bo_unmap(chan->pushbuf_bo);
nouveau_bo_unpin(chan->pushbuf_bo);
nouveau_bo_ref(NULL, &chan->pushbuf_bo);
}
- nouveau_gpuobj_channel_takedown(chan);
+ nouveau_ramht_ref(NULL, &chan->ramht, chan);
nouveau_notifier_takedown_channel(chan);
+ nouveau_gpuobj_channel_takedown(chan);
nouveau_channel_ref(NULL, pchan);
}
@@ -383,10 +374,11 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
for (i = 0; i < engine->fifo.channels; i++) {
- chan = nouveau_channel_get(dev, file_priv, i);
+ chan = nouveau_channel_get(file_priv, i);
if (IS_ERR(chan))
continue;
+ list_del(&chan->list);
atomic_dec(&chan->users);
nouveau_channel_put(&chan);
}
@@ -459,10 +451,11 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
struct drm_nouveau_channel_free *req = data;
struct nouveau_channel *chan;
- chan = nouveau_channel_get(dev, file_priv, req->channel);
+ chan = nouveau_channel_get(file_priv, req->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
+ list_del(&chan->list);
atomic_dec(&chan->users);
nouveau_channel_put(&chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1595d0b6e81..939d4df0777 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -40,7 +40,7 @@
static void nouveau_connector_hotplug(void *, int);
static struct nouveau_encoder *
-find_encoder_by_type(struct drm_connector *connector, int type)
+find_encoder(struct drm_connector *connector, int type)
{
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder;
@@ -170,8 +170,8 @@ nouveau_connector_of_detect(struct drm_connector *connector)
struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
if (!dn ||
- !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) ||
- (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG))))
+ !((nv_encoder = find_encoder(connector, OUTPUT_TMDS)) ||
+ (nv_encoder = find_encoder(connector, OUTPUT_ANALOG))))
return NULL;
for_each_child_of_node(dn, cn) {
@@ -233,6 +233,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
+ struct nouveau_encoder *nv_partner;
struct nouveau_i2c_chan *i2c;
int type;
@@ -266,19 +267,22 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
* same i2c channel so the value returned from ddc_detect
* isn't necessarily correct.
*/
- if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+ nv_partner = NULL;
+ if (nv_encoder->dcb->type == OUTPUT_TMDS)
+ nv_partner = find_encoder(connector, OUTPUT_ANALOG);
+ if (nv_encoder->dcb->type == OUTPUT_ANALOG)
+ nv_partner = find_encoder(connector, OUTPUT_TMDS);
+
+ if (nv_partner && ((nv_encoder->dcb->type == OUTPUT_ANALOG &&
+ nv_partner->dcb->type == OUTPUT_TMDS) ||
+ (nv_encoder->dcb->type == OUTPUT_TMDS &&
+ nv_partner->dcb->type == OUTPUT_ANALOG))) {
if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
type = OUTPUT_TMDS;
else
type = OUTPUT_ANALOG;
- nv_encoder = find_encoder_by_type(connector, type);
- if (!nv_encoder) {
- NV_ERROR(dev, "Detected %d encoder on %s, "
- "but no object!\n", type,
- drm_get_connector_name(connector));
- return connector_status_disconnected;
- }
+ nv_encoder = find_encoder(connector, type);
}
nouveau_connector_set_encoder(connector, nv_encoder);
@@ -292,9 +296,9 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
}
detect_analog:
- nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
+ nv_encoder = find_encoder(connector, OUTPUT_ANALOG);
if (!nv_encoder && !nouveau_tv_disable)
- nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
+ nv_encoder = find_encoder(connector, OUTPUT_TV);
if (nv_encoder && force) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_encoder_helper_funcs *helper =
@@ -327,7 +331,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
+ nv_encoder = find_encoder(connector, OUTPUT_LVDS);
if (!nv_encoder)
return connector_status_disconnected;
@@ -405,7 +409,7 @@ nouveau_connector_force(struct drm_connector *connector)
} else
type = OUTPUT_ANY;
- nv_encoder = find_encoder_by_type(connector, type);
+ nv_encoder = find_encoder(connector, type);
if (!nv_encoder) {
NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
drm_get_connector_name(connector));
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 568caedd721..00bc6eaad55 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -167,8 +167,13 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
int delta, int length)
{
struct nouveau_bo *pb = chan->pushbuf_bo;
- uint64_t offset = bo->bo.offset + delta;
+ struct nouveau_vma *vma;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
+ u64 offset;
+
+ vma = nouveau_bo_vma_find(bo, chan->vm);
+ BUG_ON(!vma);
+ offset = vma->offset + delta;
BUG_ON(chan->dma.ib_free < 1);
nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 02c6f37d8bd..b30ddd8d2e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -73,7 +73,7 @@ int nouveau_ignorelid = 0;
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
MODULE_PARM_DESC(noaccel, "Disable all acceleration");
-int nouveau_noaccel = 0;
+int nouveau_noaccel = -1;
module_param_named(noaccel, nouveau_noaccel, int, 0400);
MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
@@ -119,6 +119,10 @@ MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
int nouveau_msi;
module_param_named(msi, nouveau_msi, int, 0400);
+MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
+int nouveau_ctxfw;
+module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
+
int nouveau_fbpercrtc;
#if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -210,10 +214,13 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pfifo->unload_context(dev);
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
- if (dev_priv->eng[e]) {
- ret = dev_priv->eng[e]->fini(dev, e);
- if (ret)
- goto out_abort;
+ if (!dev_priv->eng[e])
+ continue;
+
+ ret = dev_priv->eng[e]->fini(dev, e, true);
+ if (ret) {
+ NV_ERROR(dev, "... engine %d failed: %d\n", i, ret);
+ goto out_abort;
}
}
@@ -354,7 +361,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
+ u32 offset = nv_crtc->cursor.nvbo->bo.offset;
nv_crtc->cursor.set_offset(nv_crtc, offset);
nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
@@ -389,7 +396,9 @@ static struct drm_driver driver = {
.firstopen = nouveau_firstopen,
.lastclose = nouveau_lastclose,
.unload = nouveau_unload,
+ .open = nouveau_open,
.preclose = nouveau_preclose,
+ .postclose = nouveau_postclose,
#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
.debugfs_init = nouveau_debugfs_init,
.debugfs_cleanup = nouveau_debugfs_takedown,
@@ -420,6 +429,8 @@ static struct drm_driver driver = {
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
+ .gem_open_object = nouveau_gem_object_open,
+ .gem_close_object = nouveau_gem_object_close,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9c56331941e..d7d51deb34b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -46,9 +46,17 @@
#include "ttm/ttm_module.h"
struct nouveau_fpriv {
- struct ttm_object_file *tfile;
+ spinlock_t lock;
+ struct list_head channels;
+ struct nouveau_vm *vm;
};
+static inline struct nouveau_fpriv *
+nouveau_fpriv(struct drm_file *file_priv)
+{
+ return file_priv ? file_priv->driver_priv : NULL;
+}
+
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
#include "nouveau_drm.h"
@@ -69,7 +77,7 @@ struct nouveau_mem {
struct drm_device *dev;
struct nouveau_vma bar_vma;
- struct nouveau_vma tmp_vma;
+ struct nouveau_vma vma[2];
u8 page_shift;
struct drm_mm_node *tag;
@@ -107,7 +115,8 @@ struct nouveau_bo {
struct nouveau_channel *channel;
- struct nouveau_vma vma;
+ struct list_head vma_list;
+ unsigned page_shift;
uint32_t tile_mode;
uint32_t tile_flags;
@@ -176,9 +185,10 @@ struct nouveau_gpuobj {
uint32_t flags;
u32 size;
- u32 pinst;
- u32 cinst;
- u64 vinst;
+ u32 pinst; /* PRAMIN BAR offset */
+ u32 cinst; /* Channel offset */
+ u64 vinst; /* VRAM address */
+ u64 linst; /* VM address */
uint32_t engine;
uint32_t class;
@@ -201,6 +211,7 @@ enum nouveau_channel_mutex_class {
struct nouveau_channel {
struct drm_device *dev;
+ struct list_head list;
int id;
/* references to the channel data structure */
@@ -228,15 +239,18 @@ struct nouveau_channel {
uint32_t sequence;
uint32_t sequence_ack;
atomic_t last_sequence_irq;
+ struct nouveau_vma vma;
} fence;
/* DMA push buffer */
struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo;
+ struct nouveau_vma pushbuf_vma;
uint32_t pushbuf_base;
/* Notifier memory */
struct nouveau_bo *notifier_bo;
+ struct nouveau_vma notifier_vma;
struct drm_mm notifier_heap;
/* PFIFO context */
@@ -278,6 +292,7 @@ struct nouveau_channel {
uint32_t sw_subchannel[8];
+ struct nouveau_vma dispc_vma[2];
struct {
struct nouveau_gpuobj *vblsem;
uint32_t vblsem_head;
@@ -297,7 +312,7 @@ struct nouveau_channel {
struct nouveau_exec_engine {
void (*destroy)(struct drm_device *, int engine);
int (*init)(struct drm_device *, int engine);
- int (*fini)(struct drm_device *, int engine);
+ int (*fini)(struct drm_device *, int engine, bool suspend);
int (*context_new)(struct nouveau_channel *, int engine);
void (*context_del)(struct nouveau_channel *, int engine);
int (*object_new)(struct nouveau_channel *, int engine,
@@ -314,7 +329,8 @@ struct nouveau_instmem_engine {
int (*suspend)(struct drm_device *dev);
void (*resume)(struct drm_device *dev);
- int (*get)(struct nouveau_gpuobj *, u32 size, u32 align);
+ int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *,
+ u32 size, u32 align);
void (*put)(struct nouveau_gpuobj *);
int (*map)(struct nouveau_gpuobj *);
void (*unmap)(struct nouveau_gpuobj *);
@@ -445,9 +461,9 @@ struct nouveau_pm_level {
struct nouveau_pm_temp_sensor_constants {
u16 offset_constant;
s16 offset_mult;
- u16 offset_div;
- u16 slope_mult;
- u16 slope_div;
+ s16 offset_div;
+ s16 slope_mult;
+ s16 slope_div;
};
struct nouveau_pm_threshold_temp {
@@ -488,7 +504,10 @@ struct nouveau_pm_engine {
};
struct nouveau_vram_engine {
+ struct nouveau_mm *mm;
+
int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *dev);
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
u32 type, struct nouveau_mem **);
void (*put)(struct drm_device *, struct nouveau_mem **);
@@ -608,6 +627,7 @@ enum nouveau_card_type {
struct drm_nouveau_private {
struct drm_device *dev;
+ bool noaccel;
/* the card type, takes NV_* as values */
enum nouveau_card_type card_type;
@@ -700,7 +720,6 @@ struct drm_nouveau_private {
/* VRAM/fb configuration */
uint64_t vram_size;
uint64_t vram_sys_base;
- u32 vram_rblock_size;
uint64_t fb_phys;
uint64_t fb_available_size;
@@ -784,12 +803,15 @@ extern int nouveau_override_conntype;
extern char *nouveau_perflvl;
extern int nouveau_perflvl_wr;
extern int nouveau_msi;
+extern int nouveau_ctxfw;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
/* nouveau_state.c */
+extern int nouveau_open(struct drm_device *, struct drm_file *);
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
+extern void nouveau_postclose(struct drm_device *, struct drm_file *);
extern int nouveau_load(struct drm_device *, unsigned long flags);
extern int nouveau_firstopen(struct drm_device *);
extern void nouveau_lastclose(struct drm_device *);
@@ -847,7 +869,7 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
extern struct nouveau_channel *
nouveau_channel_get_unlocked(struct nouveau_channel *);
extern struct nouveau_channel *
-nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
+nouveau_channel_get(struct drm_file *, int id);
extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
extern void nouveau_channel_put(struct nouveau_channel **);
extern void nouveau_channel_ref(struct nouveau_channel *chan,
@@ -1120,7 +1142,6 @@ extern int nvc0_fifo_unload_context(struct drm_device *);
/* nv04_graph.c */
extern int nv04_graph_create(struct drm_device *);
-extern void nv04_graph_fifo_access(struct drm_device *, bool);
extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data);
@@ -1169,7 +1190,8 @@ extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *);
extern int nv04_instmem_suspend(struct drm_device *);
extern void nv04_instmem_resume(struct drm_device *);
-extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
+ u32 size, u32 align);
extern void nv04_instmem_put(struct nouveau_gpuobj *);
extern int nv04_instmem_map(struct nouveau_gpuobj *);
extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
@@ -1180,7 +1202,8 @@ extern int nv50_instmem_init(struct drm_device *);
extern void nv50_instmem_takedown(struct drm_device *);
extern int nv50_instmem_suspend(struct drm_device *);
extern void nv50_instmem_resume(struct drm_device *);
-extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
+ u32 size, u32 align);
extern void nv50_instmem_put(struct nouveau_gpuobj *);
extern int nv50_instmem_map(struct nouveau_gpuobj *);
extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
@@ -1247,10 +1270,9 @@ extern int nv04_crtc_create(struct drm_device *, int index);
/* nouveau_bo.c */
extern struct ttm_bo_driver nouveau_bo_driver;
-extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
- int size, int align, uint32_t flags,
- uint32_t tile_mode, uint32_t tile_flags,
- struct nouveau_bo **);
+extern int nouveau_bo_new(struct drm_device *, int size, int align,
+ uint32_t flags, uint32_t tile_mode,
+ uint32_t tile_flags, struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1265,6 +1287,12 @@ extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu);
+extern struct nouveau_vma *
+nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
+extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
+ struct nouveau_vma *);
+extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
+
/* nouveau_fence.c */
struct nouveau_fence;
extern int nouveau_fence_init(struct drm_device *);
@@ -1310,12 +1338,14 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
}
/* nouveau_gem.c */
-extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
- int size, int align, uint32_t domain,
- uint32_t tile_mode, uint32_t tile_flags,
- struct nouveau_bo **);
+extern int nouveau_gem_new(struct drm_device *, int size, int align,
+ uint32_t domain, uint32_t tile_mode,
+ uint32_t tile_flags, struct nouveau_bo **);
extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
+extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
+extern void nouveau_gem_object_close(struct drm_gem_object *,
+ struct drm_file *);
extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index a3a88ad00f8..95c843e684b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -30,6 +30,7 @@
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
+ struct nouveau_vma vma;
u32 r_dma;
u32 r_format;
u32 r_pitch;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 39aee6d4daf..14a8627efe4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -279,6 +279,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
struct fb_info *info;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
+ struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd mode_cmd;
struct pci_dev *pdev = dev->pdev;
@@ -296,8 +297,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
- ret = nouveau_gem_new(dev, dev_priv->channel, size, 0,
- NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
+ ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
+ 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(dev, "failed to allocate framebuffer\n");
goto out;
@@ -318,6 +319,15 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
goto out;
}
+ chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
+ if (chan && dev_priv->card_type >= NV_50) {
+ ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
+ if (ret) {
+ NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
+ chan = NULL;
+ }
+ }
+
mutex_lock(&dev->struct_mutex);
info = framebuffer_alloc(0, device);
@@ -448,6 +458,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
+ nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 7347075ca5b..8d02d875376 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -336,6 +336,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
+ u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret;
if (dev_priv->chipset < 0x84) {
@@ -345,13 +346,10 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
OUT_RING (chan, NvSema);
- OUT_RING (chan, sema->mem->start);
+ OUT_RING (chan, offset);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
- u64 offset = vma->offset + sema->mem->start;
-
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
@@ -364,9 +362,6 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
OUT_RING (chan, 1);
OUT_RING (chan, 1); /* ACQUIRE_EQ */
} else {
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
- u64 offset = vma->offset + sema->mem->start;
-
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
@@ -394,6 +389,7 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
+ u64 offset = chan->fence.vma.offset + sema->mem->start;
int ret;
if (dev_priv->chipset < 0x84) {
@@ -403,14 +399,11 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvSema);
- OUT_RING (chan, sema->mem->start);
+ OUT_RING (chan, offset);
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
- u64 offset = vma->offset + sema->mem->start;
-
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
@@ -423,9 +416,6 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
OUT_RING (chan, 1);
OUT_RING (chan, 2); /* RELEASE */
} else {
- struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
- u64 offset = vma->offset + sema->mem->start;
-
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
@@ -540,6 +530,12 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
nouveau_gpuobj_ref(NULL, &obj);
if (ret)
return ret;
+ } else {
+ /* map fence bo into channel's vm */
+ ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
+ &chan->fence.vma);
+ if (ret)
+ return ret;
}
INIT_LIST_HEAD(&chan->fence.pending);
@@ -551,10 +547,10 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
void
nouveau_fence_channel_fini(struct nouveau_channel *chan)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *tmp, *fence;
spin_lock(&chan->fence.lock);
-
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true;
list_del(&fence->entry);
@@ -564,8 +560,9 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
kref_put(&fence->refcount, nouveau_fence_del);
}
-
spin_unlock(&chan->fence.lock);
+
+ nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
}
int
@@ -577,7 +574,7 @@ nouveau_fence_init(struct drm_device *dev)
/* Create a shared VRAM heap for cross-channel sync. */
if (USE_SEMA(dev)) {
- ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->fence.bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index b52e4601824..5f0bc57fdaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -60,9 +60,71 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
}
int
-nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
- int size, int align, uint32_t domain, uint32_t tile_mode,
- uint32_t tile_flags, struct nouveau_bo **pnvbo)
+nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
+{
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_vma *vma;
+ int ret;
+
+ if (!fpriv->vm)
+ return 0;
+
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ if (ret)
+ return ret;
+
+ vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+ if (!vma) {
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (!vma) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
+ if (ret) {
+ kfree(vma);
+ goto out;
+ }
+ } else {
+ vma->refcount++;
+ }
+
+out:
+ ttm_bo_unreserve(&nvbo->bo);
+ return ret;
+}
+
+void
+nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
+{
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_vma *vma;
+ int ret;
+
+ if (!fpriv->vm)
+ return;
+
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ if (ret)
+ return;
+
+ vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+ if (vma) {
+ if (--vma->refcount == 0) {
+ nouveau_bo_vma_del(nvbo, vma);
+ kfree(vma);
+ }
+ }
+ ttm_bo_unreserve(&nvbo->bo);
+}
+
+int
+nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
+ uint32_t tile_mode, uint32_t tile_flags,
+ struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
@@ -76,7 +138,7 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
- ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
+ ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
tile_flags, pnvbo);
if (ret)
return ret;
@@ -103,17 +165,28 @@ nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
}
static int
-nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
+nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
+ struct drm_nouveau_gem_info *rep)
{
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_vma *vma;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
- rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->offset = nvbo->bo.offset;
+ if (fpriv->vm) {
+ vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+ if (!vma)
+ return -EINVAL;
+
+ rep->offset = vma->offset;
+ }
+
+ rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->map_handle = nvbo->bo.addr_space_offset;
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
@@ -127,7 +200,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
- struct nouveau_channel *chan = NULL;
int ret = 0;
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
@@ -138,28 +210,21 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (req->channel_hint) {
- chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
- }
-
- ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
+ ret = nouveau_gem_new(dev, req->info.size, req->align,
req->info.domain, req->info.tile_mode,
req->info.tile_flags, &nvbo);
- if (chan)
- nouveau_channel_put(&chan);
if (ret)
return ret;
- ret = nouveau_gem_info(nvbo->gem, &req->info);
- if (ret)
- goto out;
-
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+ if (ret == 0) {
+ ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
+ if (ret)
+ drm_gem_handle_delete(file_priv, req->info.handle);
+ }
+
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(nvbo->gem);
-out:
return ret;
}
@@ -318,6 +383,7 @@ static int
validate_list(struct nouveau_channel *chan, struct list_head *list,
struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
(void __force __user *)(uintptr_t)user_pbbo_ptr;
struct drm_device *dev = chan->dev;
@@ -356,24 +422,26 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- if (nvbo->bo.offset == b->presumed.offset &&
- ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
- b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
- (nvbo->bo.mem.mem_type == TTM_PL_TT &&
- b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
- continue;
+ if (dev_priv->card_type < NV_50) {
+ if (nvbo->bo.offset == b->presumed.offset &&
+ ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
+ (nvbo->bo.mem.mem_type == TTM_PL_TT &&
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
+ continue;
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
- else
- b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
- b->presumed.offset = nvbo->bo.offset;
- b->presumed.valid = 0;
- relocs++;
-
- if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
- &b->presumed, sizeof(b->presumed)))
- return -EFAULT;
+ if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
+ else
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ b->presumed.offset = nvbo->bo.offset;
+ b->presumed.valid = 0;
+ relocs++;
+
+ if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
+ &b->presumed, sizeof(b->presumed)))
+ return -EFAULT;
+ }
}
return relocs;
@@ -548,7 +616,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0;
- chan = nouveau_channel_get(dev, file_priv, req->channel);
+ chan = nouveau_channel_get(file_priv, req->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
@@ -782,7 +850,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
if (!gem)
return -ENOENT;
- ret = nouveau_gem_info(gem, req);
+ ret = nouveau_gem_info(file_priv, gem, req);
drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 2ba7265bc96..868c7fd7485 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -79,7 +79,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
int i;
stat = nv_rd32(dev, NV03_PMC_INTR_0);
- if (!stat)
+ if (stat == 0 || stat == ~0)
return IRQ_NONE;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 5ee14d216ce..f9ae2fc3d6f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -397,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
dma_bits = 40;
} else
- if (0 && drm_pci_device_is_pcie(dev) &&
+ if (0 && pci_is_pcie(dev->pdev) &&
dev_priv->chipset > 0x40 &&
dev_priv->chipset != 0x45) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
@@ -423,38 +423,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- /* reserve space at end of VRAM for PRAMIN */
- if (dev_priv->card_type >= NV_50) {
- dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
- } else
- if (dev_priv->card_type >= NV_40) {
- u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
- u32 rsvd;
-
- /* estimate grctx size, the magics come from nv40_grctx.c */
- if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
- else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
- else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
- else rsvd = 0x4a40 * vs;
- rsvd += 16 * 1024;
- rsvd *= dev_priv->engine.fifo.channels;
-
- /* pciegart table */
- if (drm_pci_device_is_pcie(dev))
- rsvd += 512 * 1024;
-
- /* object storage */
- rsvd += 512 * 1024;
-
- dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
- } else {
- dev_priv->ramin_rsvd_vram = 512 * 1024;
- }
-
- ret = dev_priv->engine.vram.init(dev);
- if (ret)
- return ret;
-
NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
if (dev_priv->vram_sys_base) {
NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
@@ -479,7 +447,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
}
if (dev_priv->card_type < NV_50) {
- ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->vga_ram);
if (ret == 0)
ret = nouveau_bo_pin(dev_priv->vga_ram,
@@ -729,37 +697,31 @@ nouveau_mem_timing_fini(struct drm_device *dev)
}
static int
-nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
- struct nouveau_mm *mm;
- u64 size, block, rsvd;
- int ret;
-
- rsvd = (256 * 1024); /* vga memory */
- size = (p_size << PAGE_SHIFT) - rsvd;
- block = dev_priv->vram_rblock_size;
-
- ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
- if (ret)
- return ret;
-
- man->priv = mm;
+ /* nothing to do */
return 0;
}
static int
nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
{
- struct nouveau_mm *mm = man->priv;
- int ret;
+ /* nothing to do */
+ return 0;
+}
- ret = nouveau_mm_fini(&mm);
- if (ret)
- return ret;
+static inline void
+nouveau_mem_node_cleanup(struct nouveau_mem *node)
+{
+ if (node->vma[0].node) {
+ nouveau_vm_unmap(&node->vma[0]);
+ nouveau_vm_put(&node->vma[0]);
+ }
- man->priv = NULL;
- return 0;
+ if (node->vma[1].node) {
+ nouveau_vm_unmap(&node->vma[1]);
+ nouveau_vm_put(&node->vma[1]);
+ }
}
static void
@@ -768,14 +730,9 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
- struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev;
- if (node->tmp_vma.node) {
- nouveau_vm_unmap(&node->tmp_vma);
- nouveau_vm_put(&node->tmp_vma);
- }
-
+ nouveau_mem_node_cleanup(mem->mm_node);
vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
}
@@ -794,7 +751,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
int ret;
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
- size_nc = 1 << nvbo->vma.node->type;
+ size_nc = 1 << nvbo->page_shift;
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
@@ -804,9 +761,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
return (ret == -ENOSPC) ? 0 : ret;
}
- node->page_shift = 12;
- if (nvbo->vma.node)
- node->page_shift = nvbo->vma.node->type;
+ node->page_shift = nvbo->page_shift;
mem->mm_node = node;
mem->start = node->offset >> PAGE_SHIFT;
@@ -862,15 +817,9 @@ static void
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- struct nouveau_mem *node = mem->mm_node;
-
- if (node->tmp_vma.node) {
- nouveau_vm_unmap(&node->tmp_vma);
- nouveau_vm_put(&node->tmp_vma);
- }
-
+ nouveau_mem_node_cleanup(mem->mm_node);
+ kfree(mem->mm_node);
mem->mm_node = NULL;
- kfree(node);
}
static int
@@ -880,11 +829,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_vma *vma = &nvbo->vma;
- struct nouveau_vm *vm = vma->vm;
struct nouveau_mem *node;
- int ret;
if (unlikely((mem->num_pages << PAGE_SHIFT) >=
dev_priv->gart_info.aper_size))
@@ -893,24 +838,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
+ node->page_shift = 12;
- /* This node must be for evicting large-paged VRAM
- * to system memory. Due to a nv50 limitation of
- * not being able to mix large/small pages within
- * the same PDE, we need to create a temporary
- * small-paged VMA for the eviction.
- */
- if (vma->node->type != vm->spg_shift) {
- ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
- vm->spg_shift, NV_MEM_ACCESS_RW,
- &node->tmp_vma);
- if (ret) {
- kfree(node);
- return ret;
- }
- }
-
- node->page_shift = nvbo->vma.node->type;
mem->mm_node = node;
mem->start = 0;
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 7609756b6fa..1640dec3b82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -158,11 +158,18 @@ int
nouveau_mm_fini(struct nouveau_mm **prmm)
{
struct nouveau_mm *rmm = *prmm;
- struct nouveau_mm_node *heap =
+ struct nouveau_mm_node *node, *heap =
list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
- if (!list_is_singular(&rmm->nodes))
+ if (!list_is_singular(&rmm->nodes)) {
+ printk(KERN_ERR "nouveau_mm not empty at destroy time!\n");
+ list_for_each_entry(node, &rmm->nodes, nl_entry) {
+ printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
+ node->type, node->offset, node->length);
+ }
+ WARN_ON(1);
return -EBUSY;
+ }
kfree(heap);
kfree(rmm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index 1f7483aae9a..b9c016d2155 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -52,6 +52,7 @@ int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *);
+void nv50_vram_fini(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_mem **);
void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 5b39718ae1f..6abdbe6530a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,6 +34,7 @@ int
nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *ntfy = NULL;
uint32_t flags, ttmpl;
int ret;
@@ -46,7 +47,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
ttmpl = TTM_PL_FLAG_TT;
}
- ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
+ ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret)
return ret;
@@ -58,14 +59,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
if (ret)
goto out_err;
+ if (dev_priv->card_type >= NV_50) {
+ ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma);
+ if (ret)
+ goto out_err;
+ }
+
ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
if (ret)
goto out_err;
chan->notifier_bo = ntfy;
out_err:
- if (ret)
+ if (ret) {
+ nouveau_bo_vma_del(ntfy, &chan->notifier_vma);
drm_gem_object_unreference_unlocked(ntfy->gem);
+ }
return ret;
}
@@ -78,6 +87,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
if (!chan->notifier_bo)
return;
+ nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma);
nouveau_bo_unmap(chan->notifier_bo);
mutex_lock(&dev->struct_mutex);
nouveau_bo_unpin(chan->notifier_bo);
@@ -122,10 +132,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
target = NV_MEM_TARGET_VRAM;
else
target = NV_MEM_TARGET_GART;
- offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
+ offset = chan->notifier_bo->bo.offset;
} else {
target = NV_MEM_TARGET_VM;
- offset = chan->notifier_bo->vma.offset;
+ offset = chan->notifier_vma.offset;
}
offset += mem->start;
@@ -183,7 +193,7 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
if (unlikely(dev_priv->card_type >= NV_C0))
return -EINVAL;
- chan = nouveau_channel_get(dev, file_priv, na->channel);
+ chan = nouveau_channel_get(file_priv, na->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 8f97016f5b2..159b7c437d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -125,7 +125,7 @@ nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
int ret = -EINVAL;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (chid > 0 && chid < dev_priv->engine.fifo.channels)
+ if (chid >= 0 && chid < dev_priv->engine.fifo.channels)
chan = dev_priv->channels.ptr[chid];
if (chan)
ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
@@ -191,7 +191,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
spin_unlock(&dev_priv->ramin_lock);
- if (chan) {
+ if (!(flags & NVOBJ_FLAG_VM) && chan) {
ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
if (ramin)
ramin = drm_mm_get_block(ramin, size, align);
@@ -208,7 +208,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
gpuobj->vinst = ramin->start + chan->ramin->vinst;
gpuobj->node = ramin;
} else {
- ret = instmem->get(gpuobj, size, align);
+ ret = instmem->get(gpuobj, chan, size, align);
if (ret) {
nouveau_gpuobj_ref(NULL, &gpuobj);
return ret;
@@ -690,35 +690,64 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
return 0;
}
+static int
+nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *pgd = NULL;
+ struct nouveau_vm_pgd *vpgd;
+ int ret, i;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
+ if (ret)
+ return ret;
+
+ /* create page directory for this vm if none currently exists,
+ * will be destroyed automagically when last reference to the
+ * vm is removed
+ */
+ if (list_empty(&vm->pgd_list)) {
+ ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
+ if (ret)
+ return ret;
+ }
+ nouveau_vm_ref(vm, &chan->vm, pgd);
+ nouveau_gpuobj_ref(NULL, &pgd);
+
+ /* point channel at vm's page directory */
+ vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
+ nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
+ nv_wo32(chan->ramin, 0x0208, 0xffffffff);
+ nv_wo32(chan->ramin, 0x020c, 0x000000ff);
+
+ /* map display semaphore buffers into channel's vm */
+ for (i = 0; i < 2; i++) {
+ struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
+
+ ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
+ &chan->dispc_vma[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int
nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
uint32_t vram_h, uint32_t tt_h)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
+ struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
int ret, i;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
-
- if (dev_priv->card_type == NV_C0) {
- struct nouveau_vm *vm = dev_priv->chan_vm;
- struct nouveau_vm_pgd *vpgd;
-
- ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
- &chan->ramin);
- if (ret)
- return ret;
-
- nouveau_vm_ref(vm, &chan->vm, NULL);
-
- vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
- nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
- nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
- nv_wo32(chan->ramin, 0x0208, 0xffffffff);
- nv_wo32(chan->ramin, 0x020c, 0x000000ff);
- return 0;
- }
+ if (dev_priv->card_type == NV_C0)
+ return nvc0_gpuobj_channel_init(chan, vm);
/* Allocate a chunk of memory for per-channel object storage */
ret = nouveau_gpuobj_channel_init_pramin(chan);
@@ -731,7 +760,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
* - Allocate per-channel page-directory
* - Link with shared channel VM
*/
- if (dev_priv->chan_vm) {
+ if (vm) {
u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
u64 vm_vinst = chan->ramin->vinst + pgd_offs;
u32 vm_pinst = chan->ramin->pinst;
@@ -744,7 +773,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
if (ret)
return ret;
- nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
+ nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
}
/* RAMHT */
@@ -768,7 +797,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct nouveau_gpuobj *sem = NULL;
struct nv50_display_crtc *dispc =
&nv50_display(dev)->crtc[i];
- u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
+ u64 offset = dispc->sem.bo->bo.offset;
ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
NV_MEM_ACCESS_RW,
@@ -841,13 +870,22 @@ void
nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
NV_DEBUG(dev, "ch%d\n", chan->id);
- nouveau_ramht_ref(NULL, &chan->ramht, chan);
+ if (dev_priv->card_type >= NV_50) {
+ struct nv50_display *disp = nv50_display(dev);
+
+ for (i = 0; i < 2; i++) {
+ struct nv50_display_crtc *dispc = &disp->crtc[i];
+ nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
+ }
- nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
- nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+ nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
+ nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+ }
if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
@@ -909,7 +947,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
if (init->handle == ~0)
return -EINVAL;
- chan = nouveau_channel_get(dev, file_priv, init->channel);
+ chan = nouveau_channel_get(file_priv, init->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
@@ -936,7 +974,7 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct nouveau_channel *chan;
int ret;
- chan = nouveau_channel_get(dev, file_priv, objfree->channel);
+ chan = nouveau_channel_get(file_priv, objfree->channel);
if (IS_ERR(chan))
return PTR_ERR(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 82fad914e64..c444cadbf84 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -429,7 +429,7 @@ nouveau_sgdma_init(struct drm_device *dev)
u32 aper_size, align;
int ret;
- if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
+ if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
aper_size = 512 * 1024 * 1024;
else
aper_size = 64 * 1024 * 1024;
@@ -458,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_HW;
dev_priv->gart_info.func = &nv50_sgdma_backend;
} else
- if (0 && drm_pci_device_is_pcie(dev) &&
+ if (0 && pci_is_pcie(dev->pdev) &&
dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
if (nv44_graph_class(dev)) {
dev_priv->gart_info.func = &nv44_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 144f79a350a..10656e430b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -91,6 +91,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
engine->vram.init = nouveau_mem_detect;
+ engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x10:
@@ -139,6 +140,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
engine->vram.init = nouveau_mem_detect;
+ engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x20:
@@ -187,6 +189,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.clock_pre = nv04_pm_clock_pre;
engine->pm.clock_set = nv04_pm_clock_set;
engine->vram.init = nouveau_mem_detect;
+ engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x30:
@@ -237,6 +240,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->vram.init = nouveau_mem_detect;
+ engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x40:
@@ -289,6 +293,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->pm.temp_get = nv40_temp_get;
engine->vram.init = nouveau_mem_detect;
+ engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
break;
case 0x50:
@@ -366,12 +371,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
else
engine->pm.temp_get = nv40_temp_get;
engine->vram.init = nv50_vram_init;
+ engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nv50_vram_new;
engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nv50_vram_flags_valid;
break;
case 0xC0:
- case 0xD0:
engine->instmem.init = nvc0_instmem_init;
engine->instmem.takedown = nvc0_instmem_takedown;
engine->instmem.suspend = nvc0_instmem_suspend;
@@ -412,9 +417,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
engine->vram.init = nvc0_vram_init;
+ engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nvc0_vram_new;
engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nvc0_vram_flags_valid;
+ engine->pm.temp_get = nv84_temp_get;
break;
default:
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -448,8 +455,8 @@ nouveau_card_init_channel(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
- ret = nouveau_channel_alloc(dev, &dev_priv->channel,
- (struct drm_file *)-2, NvDmaFB, NvDmaTT);
+ ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
+ NvDmaFB, NvDmaTT);
if (ret)
return ret;
@@ -528,7 +535,7 @@ nouveau_card_init(struct drm_device *dev)
nouveau_pm_init(dev);
- ret = nouveau_mem_vram_init(dev);
+ ret = engine->vram.init(dev);
if (ret)
goto out_bios;
@@ -540,10 +547,14 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_gpuobj;
- ret = nouveau_mem_gart_init(dev);
+ ret = nouveau_mem_vram_init(dev);
if (ret)
goto out_instmem;
+ ret = nouveau_mem_gart_init(dev);
+ if (ret)
+ goto out_ttmvram;
+
/* PMC */
ret = engine->mc.init(dev);
if (ret)
@@ -564,7 +575,7 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_timer;
- if (!nouveau_noaccel) {
+ if (!dev_priv->noaccel) {
switch (dev_priv->card_type) {
case NV_04:
nv04_graph_create(dev);
@@ -676,14 +687,14 @@ out_vblank:
drm_vblank_cleanup(dev);
engine->display.destroy(dev);
out_fifo:
- if (!nouveau_noaccel)
+ if (!dev_priv->noaccel)
engine->fifo.takedown(dev);
out_engine:
- if (!nouveau_noaccel) {
+ if (!dev_priv->noaccel) {
for (e = e - 1; e >= 0; e--) {
if (!dev_priv->eng[e])
continue;
- dev_priv->eng[e]->fini(dev, e);
+ dev_priv->eng[e]->fini(dev, e, false);
dev_priv->eng[e]->destroy(dev,e );
}
}
@@ -697,12 +708,14 @@ out_mc:
engine->mc.takedown(dev);
out_gart:
nouveau_mem_gart_fini(dev);
+out_ttmvram:
+ nouveau_mem_vram_fini(dev);
out_instmem:
engine->instmem.takedown(dev);
out_gpuobj:
nouveau_gpuobj_takedown(dev);
out_vram:
- nouveau_mem_vram_fini(dev);
+ engine->vram.takedown(dev);
out_bios:
nouveau_pm_fini(dev);
nouveau_bios_takedown(dev);
@@ -719,16 +732,21 @@ static void nouveau_card_takedown(struct drm_device *dev)
struct nouveau_engine *engine = &dev_priv->engine;
int e;
+ drm_kms_helper_poll_fini(dev);
+ nouveau_fbcon_fini(dev);
+
if (dev_priv->channel) {
- nouveau_fence_fini(dev);
nouveau_channel_put_unlocked(&dev_priv->channel);
+ nouveau_fence_fini(dev);
}
- if (!nouveau_noaccel) {
+ engine->display.destroy(dev);
+
+ if (!dev_priv->noaccel) {
engine->fifo.takedown(dev);
for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
if (dev_priv->eng[e]) {
- dev_priv->eng[e]->fini(dev, e);
+ dev_priv->eng[e]->fini(dev, e, false);
dev_priv->eng[e]->destroy(dev,e );
}
}
@@ -749,10 +767,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
mutex_unlock(&dev->struct_mutex);
nouveau_mem_gart_fini(dev);
+ nouveau_mem_vram_fini(dev);
engine->instmem.takedown(dev);
nouveau_gpuobj_takedown(dev);
- nouveau_mem_vram_fini(dev);
+ engine->vram.takedown(dev);
nouveau_irq_fini(dev);
drm_vblank_cleanup(dev);
@@ -763,6 +782,41 @@ static void nouveau_card_takedown(struct drm_device *dev)
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
+int
+nouveau_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fpriv *fpriv;
+ int ret;
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (unlikely(!fpriv))
+ return -ENOMEM;
+
+ spin_lock_init(&fpriv->lock);
+ INIT_LIST_HEAD(&fpriv->channels);
+
+ if (dev_priv->card_type == NV_50) {
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
+ &fpriv->vm);
+ if (ret) {
+ kfree(fpriv);
+ return ret;
+ }
+ } else
+ if (dev_priv->card_type >= NV_C0) {
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
+ &fpriv->vm);
+ if (ret) {
+ kfree(fpriv);
+ return ret;
+ }
+ }
+
+ file_priv->driver_priv = fpriv;
+ return 0;
+}
+
/* here a client dies, release the stuff that was allocated for its
* file_priv */
void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
@@ -770,6 +824,14 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
nouveau_channel_cleanup(dev, file_priv);
}
+void
+nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+ nouveau_vm_ref(NULL, &fpriv->vm, NULL);
+ kfree(fpriv);
+}
+
/* first module load, setup the mmio/fb mapping */
/* KMS: we need mmio at load time, not when the first drm client opens. */
int nouveau_firstopen(struct drm_device *dev)
@@ -923,7 +985,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->card_type = NV_50;
break;
case 0xc0:
- case 0xd0:
dev_priv->card_type = NV_C0;
break;
default:
@@ -935,6 +996,25 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
+ /* Determine whether we'll attempt acceleration or not, some
+ * cards are disabled by default here due to them being known
+ * non-functional, or never been tested due to lack of hw.
+ */
+ dev_priv->noaccel = !!nouveau_noaccel;
+ if (nouveau_noaccel == -1) {
+ switch (dev_priv->chipset) {
+ case 0xc1: /* known broken */
+ case 0xc8: /* never tested */
+ NV_INFO(dev, "acceleration disabled by default, pass "
+ "noaccel=0 to force enable\n");
+ dev_priv->noaccel = true;
+ break;
+ default:
+ dev_priv->noaccel = false;
+ break;
+ }
+ }
+
ret = nouveau_remove_conflicting_drivers(dev);
if (ret)
goto err_mmio;
@@ -999,11 +1079,7 @@ void nouveau_lastclose(struct drm_device *dev)
int nouveau_unload(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- drm_kms_helper_poll_fini(dev);
- nouveau_fbcon_fini(dev);
- engine->display.destroy(dev);
nouveau_card_takedown(dev);
iounmap(dev_priv->mmio);
@@ -1033,7 +1109,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_BUS_TYPE:
if (drm_pci_device_is_agp(dev))
getparam->value = NV_AGP;
- else if (drm_pci_device_is_pcie(dev))
+ else if (pci_is_pcie(dev->pdev))
getparam->value = NV_PCIE;
else
getparam->value = NV_PCI;
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 649b0413b09..081ca7b03e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -43,7 +43,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
/* Set the default sensor's contants */
sensor->offset_constant = 0;
- sensor->offset_mult = 1;
+ sensor->offset_mult = 0;
sensor->offset_div = 1;
sensor->slope_mult = 1;
sensor->slope_div = 1;
@@ -99,6 +99,13 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
sensor->slope_mult = 431;
sensor->slope_div = 10000;
break;
+
+ case 0x67:
+ sensor->offset_mult = -26149;
+ sensor->offset_div = 100;
+ sensor->slope_mult = 484;
+ sensor->slope_div = 10000;
+ break;
}
}
@@ -109,7 +116,7 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
/* Read the entries from the table */
for (i = 0; i < entries; i++) {
- u16 value = ROM16(temp[1]);
+ s16 value = ROM16(temp[1]);
switch (temp[0]) {
case 0x01:
@@ -160,8 +167,8 @@ nv40_sensor_setup(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
- u32 offset = sensor->offset_mult / sensor->offset_div;
- u32 sensor_calibration;
+ s32 offset = sensor->offset_mult / sensor->offset_div;
+ s32 sensor_calibration;
/* set up the sensors */
sensor_calibration = 120 - offset - sensor->offset_constant;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 519a6b4bba4..244fd38fdb8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -369,23 +369,26 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
}
static void
-nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
{
struct nouveau_vm_pgd *vpgd, *tmp;
+ struct nouveau_gpuobj *pgd = NULL;
- if (!pgd)
+ if (!mpgd)
return;
mutex_lock(&vm->mm->mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
- if (vpgd->obj != pgd)
- continue;
-
- list_del(&vpgd->head);
- nouveau_gpuobj_ref(NULL, &vpgd->obj);
- kfree(vpgd);
+ if (vpgd->obj == mpgd) {
+ pgd = vpgd->obj;
+ list_del(&vpgd->head);
+ kfree(vpgd);
+ break;
+ }
}
mutex_unlock(&vm->mm->mutex);
+
+ nouveau_gpuobj_ref(NULL, &pgd);
}
static void
@@ -396,8 +399,8 @@ nouveau_vm_del(struct nouveau_vm *vm)
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
nouveau_vm_unlink(vm, vpgd->obj);
}
- WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
+ nouveau_mm_fini(&vm->mm);
kfree(vm->pgt);
kfree(vm);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index c48a9fc2b47..579ca8cc223 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -41,6 +41,8 @@ struct nouveau_vm_pgd {
};
struct nouveau_vma {
+ struct list_head head;
+ int refcount;
struct nouveau_vm *vm;
struct nouveau_mm_node *node;
u64 offset;
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index f1a3ae49199..118261d4927 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -1035,7 +1035,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
- ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index 3626ee7db3b..dbdea8ed392 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -450,13 +450,13 @@ nv04_graph_context_del(struct nouveau_channel *chan, int engine)
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv04_graph_fifo_access(dev, false);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
/* Unload the context if it's the currently active one */
if (nv04_graph_channel(dev) == chan)
nv04_graph_unload_context(dev);
- nv04_graph_fifo_access(dev, true);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -538,24 +538,18 @@ nv04_graph_init(struct drm_device *dev, int engine)
}
static int
-nv04_graph_fini(struct drm_device *dev, int engine)
+nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ return -EBUSY;
+ }
nv04_graph_unload_context(dev);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
return 0;
}
-void
-nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
-{
- if (enabled)
- nv_wr32(dev, NV04_PGRAPH_FIFO,
- nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
- else
- nv_wr32(dev, NV04_PGRAPH_FIFO,
- nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
-}
-
static int
nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index b8611b95531..c1248e0740a 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -28,6 +28,31 @@ int nv04_instmem_init(struct drm_device *dev)
/* RAMIN always available */
dev_priv->ramin_available = true;
+ /* Reserve space at end of VRAM for PRAMIN */
+ if (dev_priv->card_type >= NV_40) {
+ u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
+ u32 rsvd;
+
+ /* estimate grctx size, the magics come from nv40_grctx.c */
+ if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
+ else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
+ else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
+ else rsvd = 0x4a40 * vs;
+ rsvd += 16 * 1024;
+ rsvd *= dev_priv->engine.fifo.channels;
+
+ /* pciegart table */
+ if (pci_is_pcie(dev->pdev))
+ rsvd += 512 * 1024;
+
+ /* object storage */
+ rsvd += 512 * 1024;
+
+ dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
+ } else {
+ dev_priv->ramin_rsvd_vram = 512 * 1024;
+ }
+
/* Setup shared RAMHT */
ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
NVOBJ_FLAG_ZERO_ALLOC, &ramht);
@@ -112,7 +137,8 @@ nv04_instmem_resume(struct drm_device *dev)
}
int
-nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
+nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
+ u32 size, u32 align)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_mm_node *ramin = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 0930c6cb88e..7255e4a4d3f 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -708,8 +708,8 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
- nv04_graph_fifo_access(dev, true);
- nv04_graph_fifo_access(dev, false);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
/* Restore the FIFO state */
for (i = 0; i < ARRAY_SIZE(fifo); i++)
@@ -879,13 +879,13 @@ nv10_graph_context_del(struct nouveau_channel *chan, int engine)
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv04_graph_fifo_access(dev, false);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
/* Unload the context if it's the currently active one */
if (nv10_graph_channel(dev) == chan)
nv10_graph_unload_context(dev);
- nv04_graph_fifo_access(dev, true);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -957,8 +957,13 @@ nv10_graph_init(struct drm_device *dev, int engine)
}
static int
-nv10_graph_fini(struct drm_device *dev, int engine)
+nv10_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ return -EBUSY;
+ }
nv10_graph_unload_context(dev);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index affc7d7dd02..183e37512ef 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -454,13 +454,13 @@ nv20_graph_context_del(struct nouveau_channel *chan, int engine)
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv04_graph_fifo_access(dev, false);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
/* Unload the context if it's the currently active one */
if (nv10_graph_channel(dev) == chan)
nv20_graph_unload_context(dev);
- nv04_graph_fifo_access(dev, true);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -654,8 +654,13 @@ nv30_graph_init(struct drm_device *dev, int engine)
}
int
-nv20_graph_fini(struct drm_device *dev, int engine)
+nv20_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ return -EBUSY;
+ }
nv20_graph_unload_context(dev);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
return 0;
@@ -753,6 +758,7 @@ nv20_graph_create(struct drm_device *dev)
break;
default:
NV_ERROR(dev, "PGRAPH: unknown chipset\n");
+ kfree(pgraph);
return 0;
}
} else {
@@ -774,6 +780,7 @@ nv20_graph_create(struct drm_device *dev)
break;
default:
NV_ERROR(dev, "PGRAPH: unknown chipset\n");
+ kfree(pgraph);
return 0;
}
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 5beb01b8ace..ba14a93d8af 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -35,89 +35,6 @@ struct nv40_graph_engine {
u32 grctx_size;
};
-static struct nouveau_channel *
-nv40_graph_channel(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *grctx;
- uint32_t inst;
- int i;
-
- inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
- if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
- return NULL;
- inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
-
- for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
- if (!dev_priv->channels.ptr[i])
- continue;
-
- grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
- if (grctx && grctx->pinst == inst)
- return dev_priv->channels.ptr[i];
- }
-
- return NULL;
-}
-
-static int
-nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
-{
- uint32_t old_cp, tv = 1000, tmp;
- int i;
-
- old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
-
- tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
- tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
- NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
-
- tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
- tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
-
- nouveau_wait_for_idle(dev);
-
- for (i = 0; i < tv; i++) {
- if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
- break;
- }
-
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
-
- if (i == tv) {
- uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
- NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
- NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
- ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
- ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
- NV_ERROR(dev, "0x40030C = 0x%08x\n",
- nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-nv40_graph_unload_context(struct drm_device *dev)
-{
- uint32_t inst;
- int ret;
-
- inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
- if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
- return 0;
- inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
-
- ret = nv40_graph_transfer_context(dev, inst, 1);
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
- return ret;
-}
-
static int
nv40_graph_context_new(struct nouveau_channel *chan, int engine)
{
@@ -163,16 +80,16 @@ nv40_graph_context_del(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *grctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 inst = 0x01000000 | (grctx->pinst >> 4);
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv04_graph_fifo_access(dev, false);
-
- /* Unload the context if it's the currently active one */
- if (nv40_graph_channel(dev) == chan)
- nv40_graph_unload_context(dev);
-
- nv04_graph_fifo_access(dev, true);
+ nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
+ if (nv_rd32(dev, 0x40032c) == inst)
+ nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
+ if (nv_rd32(dev, 0x400330) == inst)
+ nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
+ nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
@@ -429,9 +346,20 @@ nv40_graph_init(struct drm_device *dev, int engine)
}
static int
-nv40_graph_fini(struct drm_device *dev, int engine)
+nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
- nv40_graph_unload_context(dev);
+ u32 inst = nv_rd32(dev, 0x40032c);
+ if (inst & 0x01000000) {
+ nv_wr32(dev, 0x400720, 0x00000000);
+ nv_wr32(dev, 0x400784, inst);
+ nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
+ nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
+ if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
+ u32 insn = nv_rd32(dev, 0x400308);
+ NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
+ }
+ nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_mpeg.c b/drivers/gpu/drm/nouveau/nv40_mpeg.c
index 6d2af292a2e..ad03a0e1fc7 100644
--- a/drivers/gpu/drm/nouveau/nv40_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv40_mpeg.c
@@ -137,7 +137,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine)
}
static int
-nv40_mpeg_fini(struct drm_device *dev, int engine)
+nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
{
/*XXX: context save? */
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index ebabacf38da..46ad59ea218 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -104,7 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
OUT_RING(evo, nv_crtc->lut.depth == 8 ?
NV50_EVO_CRTC_CLUT_MODE_OFF :
NV50_EVO_CRTC_CLUT_MODE_ON);
- OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
+ OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
if (dev_priv->chipset != 0x50) {
BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
OUT_RING(evo, NvEvoVRAM);
@@ -372,7 +372,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nouveau_bo_unmap(cursor);
- nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
+ nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
nv_crtc->cursor.show(nv_crtc, true);
out:
@@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
}
- nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
+ nv_crtc->fb.offset = fb->nvbo->bo.offset;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
@@ -747,7 +747,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
}
nv_crtc->lut.depth = 0;
- ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
@@ -773,7 +773,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
- ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 08da478ba54..db1a5f4b711 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -415,8 +415,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* synchronise with the rendering channel, if necessary */
if (likely(chan)) {
- u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
-
ret = RING_SPACE(chan, 10);
if (ret) {
WIND_RING(evo);
@@ -438,6 +436,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
else
OUT_RING (chan, chan->vram_handle);
} else {
+ u64 offset = chan->dispc_vma[nv_crtc->index].offset;
+ offset += dispc->sem.offset;
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
@@ -484,7 +484,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (evo, 0x00000000);
OUT_RING (evo, 0x00000000);
BEGIN_RING(evo, 0, 0x0800, 5);
- OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
+ OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
OUT_RING (evo, 0);
OUT_RING (evo, (fb->height << 16) | fb->width);
OUT_RING (evo, nv_fb->r_pitch);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index c8e83c1a4de..c99d9751880 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -38,6 +38,7 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
return;
*pevo = NULL;
+ nouveau_ramht_ref(NULL, &evo->ramht, evo);
nouveau_gpuobj_channel_takedown(evo);
nouveau_bo_unmap(evo->pushbuf_bo);
nouveau_bo_ref(NULL, &evo->pushbuf_bo);
@@ -116,7 +117,7 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
evo->user_get = 4;
evo->user_put = 0;
- ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+ ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
&evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
@@ -153,7 +154,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
{
struct drm_device *dev = evo->dev;
int id = evo->id, ret, i;
- u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
+ u64 pushbuf = evo->pushbuf_bo->bo.offset;
u32 tmp;
tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
@@ -331,16 +332,15 @@ nv50_evo_create(struct drm_device *dev)
if (ret)
goto err;
- ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, &dispc->sem.bo);
if (!ret) {
- offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
-
ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
if (!ret)
ret = nouveau_bo_map(dispc->sem.bo);
if (ret)
nouveau_bo_ref(NULL, &dispc->sem.bo);
+ offset = dispc->sem.bo->bo.offset;
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 791ded1c5c6..dc75a720652 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -159,7 +159,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
+ struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
int ret, format;
switch (info->var.bits_per_pixel) {
@@ -247,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
- OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, upper_32_bits(fb->vma.offset));
+ OUT_RING(chan, lower_32_bits(fb->vma.offset));
BEGIN_RING(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -256,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
- OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
+ OUT_RING(chan, upper_32_bits(fb->vma.offset));
+ OUT_RING(chan, lower_32_bits(fb->vma.offset));
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index e25cbb46789..d43c46caa76 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -31,7 +31,6 @@
#include "nouveau_grctx.h"
#include "nouveau_dma.h"
#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
#include "nv50_evo.h"
struct nv50_graph_engine {
@@ -125,7 +124,6 @@ static void
nv50_graph_init_reset(struct drm_device *dev)
{
uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
-
NV_DEBUG(dev, "\n");
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
@@ -255,9 +253,13 @@ nv50_graph_init(struct drm_device *dev, int engine)
}
static int
-nv50_graph_fini(struct drm_device *dev, int engine)
+nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
- NV_DEBUG(dev, "\n");
+ nv_mask(dev, 0x400500, 0x00010001, 0x00000000);
+ if (!nv_wait(dev, 0x400700, ~0, 0) && suspend) {
+ nv_mask(dev, 0x400500, 0x00010001, 0x00010001);
+ return -EBUSY;
+ }
nv50_graph_unload_context(dev);
nv_wr32(dev, 0x40013c, 0x00000000);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 4f95a1e5822..a7c12c94a5a 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -305,9 +305,9 @@ struct nv50_gpuobj_node {
u32 align;
};
-
int
-nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
+nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
+ u32 size, u32 align)
{
struct drm_device *dev = gpuobj->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -336,7 +336,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
flags |= NV_MEM_ACCESS_SYS;
- ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
+ ret = nouveau_vm_get(chan->vm, size, 12, flags,
&node->chan_vma);
if (ret) {
vram->put(dev, &node->vram);
@@ -345,7 +345,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
}
nouveau_vm_map(&node->chan_vma, node->vram);
- gpuobj->vinst = node->chan_vma.offset;
+ gpuobj->linst = node->chan_vma.offset;
}
gpuobj->size = size;
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
index 1dc5913f78c..b57a2d180ad 100644
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ b/drivers/gpu/drm/nouveau/nv50_mpeg.c
@@ -160,7 +160,7 @@ nv50_mpeg_init(struct drm_device *dev, int engine)
}
static int
-nv50_mpeg_fini(struct drm_device *dev, int engine)
+nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
{
/*XXX: context save for s/r */
nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index c25c5938642..ffe8b483b7b 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -318,6 +318,8 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry)
uint32_t tmp;
tmp = nv_rd32(dev, 0x61c700 + (or * 0x800));
+ if (!tmp)
+ tmp = nv_rd32(dev, 0x610798 + (or * 8));
switch ((tmp & 0x00000f00) >> 8) {
case 8:
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 1a0dd491a0e..40b84f22d81 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -156,7 +156,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
pinstmem->flush(vm->dev);
/* BAR */
- if (vm != dev_priv->chan_vm) {
+ if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
nv50_vm_flush_engine(vm->dev, 6);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index ffbc3d8cf5b..af32daecd1e 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -51,9 +51,7 @@ void
nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
- struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm *mm = dev_priv->engine.vram.mm;
struct nouveau_mm_node *this;
struct nouveau_mem *mem;
@@ -84,9 +82,7 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
u32 memtype, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
- struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm *mm = dev_priv->engine.vram.mm;
struct nouveau_mm_node *r;
struct nouveau_mem *mem;
int comp = (memtype & 0x300) >> 8;
@@ -190,22 +186,35 @@ int
nv50_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 rblock, length;
dev_priv->vram_size = nv_rd32(dev, 0x10020c);
dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
dev_priv->vram_size &= 0xffffffff00ULL;
- switch (dev_priv->chipset) {
- case 0xaa:
- case 0xac:
- case 0xaf:
+ /* IGPs, no funky reordering happens here, they don't have VRAM */
+ if (dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac ||
+ dev_priv->chipset == 0xaf) {
dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
- dev_priv->vram_rblock_size = 4096;
- break;
- default:
- dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
- break;
+ rblock = 4096 >> 12;
+ } else {
+ rblock = nv50_vram_rblock(dev) >> 12;
}
- return 0;
+ length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
+
+ return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
+}
+
+void
+nv50_vram_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+
+ nouveau_mm_fini(&vram->mm);
}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index 75b809a5174..edece9c616e 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -138,7 +138,7 @@ nv84_crypt_isr(struct drm_device *dev)
}
static int
-nv84_crypt_fini(struct drm_device *dev, int engine)
+nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
{
nv_wr32(dev, 0x102140, 0x00000000);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
index b86820a6122..8f356d58e40 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ b/drivers/gpu/drm/nouveau/nva3_copy.c
@@ -140,7 +140,7 @@ nva3_copy_init(struct drm_device *dev, int engine)
}
static int
-nva3_copy_fini(struct drm_device *dev, int engine)
+nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
{
nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c
index 208fa7ab3f4..dddf006f6d8 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.c
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.c
@@ -48,14 +48,14 @@ nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
struct nouveau_gpuobj *ctx = NULL;
int ret;
- ret = nouveau_gpuobj_new(dev, NULL, 256, 256,
+ ret = nouveau_gpuobj_new(dev, chan, 256, 256,
NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
NVOBJ_FLAG_ZERO_ALLOC, &ctx);
if (ret)
return ret;
- nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->vinst));
- nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->vinst));
+ nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
+ nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
dev_priv->engine.instmem.flush(dev);
chan->engctx[engine] = ctx;
@@ -127,7 +127,7 @@ nvc0_copy_init(struct drm_device *dev, int engine)
}
static int
-nvc0_copy_fini(struct drm_device *dev, int engine)
+nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
{
struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 26a996025dd..08e6b118f02 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Red Hat Inc.
+ * Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,16 +23,80 @@
*/
#include "drmP.h"
-
+#include "drm.h"
#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+struct nvc0_fb_priv {
+ struct page *r100c10_page;
+ dma_addr_t r100c10;
+};
+
+static void
+nvc0_fb_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nvc0_fb_priv *priv = pfb->priv;
+
+ if (priv->r100c10_page) {
+ pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c10_page);
+ }
+
+ kfree(priv);
+ pfb->priv = NULL;
+}
+
+static int
+nvc0_fb_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nvc0_fb_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ pfb->priv = priv;
+
+ priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!priv->r100c10_page) {
+ nvc0_fb_destroy(dev);
+ return -ENOMEM;
+ }
+
+ priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
+ nvc0_fb_destroy(dev);
+ return -EFAULT;
+ }
+
+ return 0;
+}
int
nvc0_fb_init(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fb_priv *priv;
+ int ret;
+
+ if (!dev_priv->engine.fb.priv) {
+ ret = nvc0_fb_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = dev_priv->engine.fb.priv;
+
+ nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
return 0;
}
void
nvc0_fb_takedown(struct drm_device *dev)
{
+ nvc0_fb_destroy(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index fa5d4c23438..a495e48197c 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -159,7 +159,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
+ struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
int ret, format;
ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
@@ -203,8 +203,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
OUT_RING (chan, 0x0000902d);
BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
- OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset));
- OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset));
+ OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
+ OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
OUT_RING (chan, 0);
BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
@@ -249,8 +249,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
- OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, upper_32_bits(fb->vma.offset));
+ OUT_RING (chan, lower_32_bits(fb->vma.offset));
BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
@@ -260,8 +260,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
- OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
+ OUT_RING (chan, upper_32_bits(fb->vma.offset));
+ OUT_RING (chan, lower_32_bits(fb->vma.offset));
FIRE_RING (chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index fb4f5943e01..6f9f341c3e8 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -210,10 +210,10 @@ nvc0_fifo_unload_context(struct drm_device *dev)
int i;
for (i = 0; i < 128; i++) {
- if (!(nv_rd32(dev, 0x003004 + (i * 4)) & 1))
+ if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
continue;
- nv_mask(dev, 0x003004 + (i * 4), 0x00000001, 0x00000000);
+ nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
nv_wr32(dev, 0x002634, i);
if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index ca6db204d64..5b2f6f42046 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -28,7 +28,34 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+
#include "nvc0_graph.h"
+#include "nvc0_grhub.fuc.h"
+#include "nvc0_grgpc.fuc.h"
+
+static void
+nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
+{
+ NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
+ nv_rd32(dev, base + 0x400));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
+ nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
+ nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
+}
+
+static void
+nvc0_graph_ctxctl_debug(struct drm_device *dev)
+{
+ u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
+ u32 gpc;
+
+ nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
+ for (gpc = 0; gpc < gpcnr; gpc++)
+ nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
+}
static int
nvc0_graph_load_context(struct nouveau_channel *chan)
@@ -72,24 +99,44 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
if (!ctx)
return -ENOMEM;
- nvc0_graph_load_context(chan);
-
- nv_wo32(grch->grctx, 0x1c, 1);
- nv_wo32(grch->grctx, 0x20, 0);
- nv_wo32(grch->grctx, 0x28, 0);
- nv_wo32(grch->grctx, 0x2c, 0);
- dev_priv->engine.instmem.flush(dev);
-
- ret = nvc0_grctx_generate(chan);
- if (ret) {
- kfree(ctx);
- return ret;
+ if (!nouveau_ctxfw) {
+ nv_wr32(dev, 0x409840, 0x80000000);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000001);
+ if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
+ NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
+ nvc0_graph_ctxctl_debug(dev);
+ ret = -EBUSY;
+ goto err;
+ }
+ } else {
+ nvc0_graph_load_context(chan);
+
+ nv_wo32(grch->grctx, 0x1c, 1);
+ nv_wo32(grch->grctx, 0x20, 0);
+ nv_wo32(grch->grctx, 0x28, 0);
+ nv_wo32(grch->grctx, 0x2c, 0);
+ dev_priv->engine.instmem.flush(dev);
}
- ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
- if (ret) {
- kfree(ctx);
- return ret;
+ ret = nvc0_grctx_generate(chan);
+ if (ret)
+ goto err;
+
+ if (!nouveau_ctxfw) {
+ nv_wr32(dev, 0x409840, 0x80000000);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
+ NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
+ nvc0_graph_ctxctl_debug(dev);
+ ret = -EBUSY;
+ goto err;
+ }
+ } else {
+ ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
+ if (ret)
+ goto err;
}
for (i = 0; i < priv->grctx_size; i += 4)
@@ -97,6 +144,10 @@ nvc0_graph_construct_context(struct nouveau_channel *chan)
priv->grctx_vals = ctx;
return 0;
+
+err:
+ kfree(ctx);
+ return ret;
}
static int
@@ -108,50 +159,50 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
int i = 0, gpc, tp, ret;
u32 magic;
- ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM,
+ ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
&grch->unk408004);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM,
+ ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
&grch->unk40800c);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
+ ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
&grch->unk418810);
if (ret)
return ret;
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM,
+ ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
&grch->mmio);
if (ret)
return ret;
nv_wo32(grch->mmio, i++ * 4, 0x00408004);
- nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00408008);
nv_wo32(grch->mmio, i++ * 4, 0x80000018);
nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
- nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00408010);
nv_wo32(grch->mmio, i++ * 4, 0x80000000);
nv_wo32(grch->mmio, i++ * 4, 0x00418810);
- nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
nv_wo32(grch->mmio, i++ * 4, 0x00419848);
- nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12);
+ nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
nv_wo32(grch->mmio, i++ * 4, 0x00419004);
- nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x00419008);
nv_wo32(grch->mmio, i++ * 4, 0x00000000);
nv_wo32(grch->mmio, i++ * 4, 0x00418808);
- nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
nv_wo32(grch->mmio, i++ * 4, 0x80000018);
@@ -159,7 +210,7 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
nv_wo32(grch->mmio, i++ * 4, 0x00405830);
nv_wo32(grch->mmio, i++ * 4, magic);
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) {
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) {
u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
nv_wo32(grch->mmio, i++ * 4, reg);
nv_wo32(grch->mmio, i++ * 4, magic);
@@ -186,7 +237,7 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
return -ENOMEM;
chan->engctx[NVOBJ_ENGINE_GR] = grch;
- ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
+ ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
&grch->grctx);
if (ret)
@@ -197,8 +248,8 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
if (ret)
goto error;
- nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4);
- nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst));
+ nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
+ nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
pinstmem->flush(dev);
if (!priv->grctx_vals) {
@@ -210,15 +261,20 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
for (i = 0; i < priv->grctx_size; i += 4)
nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
- nv_wo32(grctx, 0xf4, 0);
- nv_wo32(grctx, 0xf8, 0);
- nv_wo32(grctx, 0x10, grch->mmio_nr);
- nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
- nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
- nv_wo32(grctx, 0x1c, 1);
- nv_wo32(grctx, 0x20, 0);
- nv_wo32(grctx, 0x28, 0);
- nv_wo32(grctx, 0x2c, 0);
+ if (!nouveau_ctxfw) {
+ nv_wo32(grctx, 0x00, grch->mmio_nr);
+ nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
+ } else {
+ nv_wo32(grctx, 0xf4, 0);
+ nv_wo32(grctx, 0xf8, 0);
+ nv_wo32(grctx, 0x10, grch->mmio_nr);
+ nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x1c, 1);
+ nv_wo32(grctx, 0x20, 0);
+ nv_wo32(grctx, 0x28, 0);
+ nv_wo32(grctx, 0x2c, 0);
+ }
pinstmem->flush(dev);
return 0;
@@ -248,7 +304,7 @@ nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
}
static int
-nvc0_graph_fini(struct drm_device *dev, int engine)
+nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
{
return 0;
}
@@ -296,6 +352,7 @@ static void
nvc0_graph_init_gpc_0(struct drm_device *dev)
{
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total);
u32 data[TP_MAX / 8];
u8 tpnr[GPC_MAX];
int i, gpc, tpc;
@@ -307,13 +364,6 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
* 465: 3/4/4/0 4 7
* 470: 3/3/4/4 5 5
* 480: 3/4/4/4 6 6
- *
- * magicgpc918
- * 450: 00200000 00000000001000000000000000000000
- * 460: 00124925 00000000000100100100100100100101
- * 465: 000ba2e9 00000000000010111010001011101001
- * 470: 00092493 00000000000010010010010010010011
- * 480: 00088889 00000000000010001000100010001001
*/
memset(data, 0x00, sizeof(data));
@@ -336,10 +386,10 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
priv->tp_nr[gpc]);
nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
- nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918);
+ nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
}
@@ -419,8 +469,51 @@ nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
static int
nvc0_graph_init_ctxctl(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
u32 r000260;
+ int i;
+
+ if (!nouveau_ctxfw) {
+ /* load HUB microcode */
+ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x4091c0, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
+ nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
+
+ nv_wr32(dev, 0x409180, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, 0x409188, i >> 6);
+ nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
+ }
+
+ /* load GPC microcode */
+ nv_wr32(dev, 0x41a1c0, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
+ nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
+
+ nv_wr32(dev, 0x41a180, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, 0x41a188, i >> 6);
+ nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
+ }
+ nv_wr32(dev, 0x000260, r000260);
+
+ /* start HUB ucode running, it'll init the GPCs */
+ nv_wr32(dev, 0x409800, dev_priv->chipset);
+ nv_wr32(dev, 0x40910c, 0x00000000);
+ nv_wr32(dev, 0x409100, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
+ NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
+ nvc0_graph_ctxctl_debug(dev);
+ return -EBUSY;
+ }
+
+ priv->grctx_size = nv_rd32(dev, 0x409804);
+ return 0;
+ }
/* load fuc microcode */
r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
@@ -528,6 +621,22 @@ nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
}
static void
+nvc0_graph_ctxctl_isr(struct drm_device *dev)
+{
+ u32 ustat = nv_rd32(dev, 0x409c18);
+
+ if (ustat & 0x00000001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
+ if (ustat & 0x00080000)
+ NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
+ if (ustat & ~0x00080001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
+
+ nvc0_graph_ctxctl_debug(dev);
+ nv_wr32(dev, 0x409c20, ustat);
+}
+
+static void
nvc0_graph_isr(struct drm_device *dev)
{
u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
@@ -578,11 +687,7 @@ nvc0_graph_isr(struct drm_device *dev)
}
if (stat & 0x00080000) {
- u32 ustat = nv_rd32(dev, 0x409c18);
-
- NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
-
- nv_wr32(dev, 0x409c20, ustat);
+ nvc0_graph_ctxctl_isr(dev);
nv_wr32(dev, 0x400100, 0x00080000);
stat &= ~0x00080000;
}
@@ -606,7 +711,7 @@ nvc0_runk140_isr(struct drm_device *dev)
u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
- NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
+ NV_DEBUG(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
units &= ~(1 << unit);
}
}
@@ -651,10 +756,12 @@ nvc0_graph_destroy(struct drm_device *dev, int engine)
{
struct nvc0_graph_priv *priv = nv_engine(dev, engine);
- nvc0_graph_destroy_fw(&priv->fuc409c);
- nvc0_graph_destroy_fw(&priv->fuc409d);
- nvc0_graph_destroy_fw(&priv->fuc41ac);
- nvc0_graph_destroy_fw(&priv->fuc41ad);
+ if (nouveau_ctxfw) {
+ nvc0_graph_destroy_fw(&priv->fuc409c);
+ nvc0_graph_destroy_fw(&priv->fuc409d);
+ nvc0_graph_destroy_fw(&priv->fuc41ac);
+ nvc0_graph_destroy_fw(&priv->fuc41ad);
+ }
nouveau_irq_unregister(dev, 12);
nouveau_irq_unregister(dev, 25);
@@ -675,13 +782,10 @@ nvc0_graph_create(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_graph_priv *priv;
int ret, gpc, i;
+ u32 fermi;
- switch (dev_priv->chipset) {
- case 0xc0:
- case 0xc3:
- case 0xc4:
- break;
- default:
+ fermi = nvc0_graph_class(dev);
+ if (!fermi) {
NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
return 0;
}
@@ -701,15 +805,17 @@ nvc0_graph_create(struct drm_device *dev)
nouveau_irq_register(dev, 12, nvc0_graph_isr);
nouveau_irq_register(dev, 25, nvc0_runk140_isr);
- if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
- nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
- nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
- nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
- ret = 0;
- goto error;
+ if (nouveau_ctxfw) {
+ NV_INFO(dev, "PGRAPH: using external firmware\n");
+ if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
+ nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
+ nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
+ nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
+ ret = 0;
+ goto error;
+ }
}
-
ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
if (ret)
goto error;
@@ -735,25 +841,28 @@ nvc0_graph_create(struct drm_device *dev)
case 0xc0:
if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
priv->magic_not_rop_nr = 0x07;
- /* filled values up to tp_total, the rest 0 */
- priv->magicgpc918 = 0x000ba2e9;
} else
if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
priv->magic_not_rop_nr = 0x05;
- priv->magicgpc918 = 0x00092493;
} else
if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
priv->magic_not_rop_nr = 0x06;
- priv->magicgpc918 = 0x00088889;
}
break;
case 0xc3: /* 450, 4/0/0/0, 2 */
priv->magic_not_rop_nr = 0x03;
- priv->magicgpc918 = 0x00200000;
break;
case 0xc4: /* 460, 3/4/0/0, 4 */
priv->magic_not_rop_nr = 0x01;
- priv->magicgpc918 = 0x00124925;
+ break;
+ case 0xc1: /* 2/0/0/0, 1 */
+ priv->magic_not_rop_nr = 0x01;
+ break;
+ case 0xc8: /* 4/4/3/4, 5 */
+ priv->magic_not_rop_nr = 0x06;
+ break;
+ case 0xce: /* 4/4/0/0, 4 */
+ priv->magic_not_rop_nr = 0x03;
break;
}
@@ -763,13 +872,16 @@ nvc0_graph_create(struct drm_device *dev)
priv->tp_nr[3], priv->rop_nr);
/* use 0xc3's values... */
priv->magic_not_rop_nr = 0x03;
- priv->magicgpc918 = 0x00200000;
}
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
+ if (fermi >= 0x9197)
+ NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
+ if (fermi >= 0x9297)
+ NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
new file mode 100644
index 00000000000..2a4b6dc8f9d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
@@ -0,0 +1,400 @@
+/* fuc microcode util functions for nvc0 PGRAPH
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
+define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
+
+ifdef(`include_code', `
+// Error codes
+define(`E_BAD_COMMAND', 0x01)
+define(`E_CMD_OVERFLOW', 0x02)
+
+// Util macros to help with debugging ucode hangs etc
+define(`T_WAIT', 0)
+define(`T_MMCTX', 1)
+define(`T_STRWAIT', 2)
+define(`T_STRINIT', 3)
+define(`T_AUTO', 4)
+define(`T_CHAN', 5)
+define(`T_LOAD', 6)
+define(`T_SAVE', 7)
+define(`T_LCHAN', 8)
+define(`T_LCTXH', 9)
+
+define(`trace_set', `
+ mov $r8 0x83c
+ shl b32 $r8 6
+ clear b32 $r9
+ bset $r9 $1
+ iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
+')
+
+define(`trace_clr', `
+ mov $r8 0x85c
+ shl b32 $r8 6
+ clear b32 $r9
+ bset $r9 $1
+ iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
+')
+
+// queue_put - add request to queue
+//
+// In : $r13 queue pointer
+// $r14 command
+// $r15 data
+//
+queue_put:
+ // make sure we have space..
+ ld b32 $r8 D[$r13 + 0x0] // GET
+ ld b32 $r9 D[$r13 + 0x4] // PUT
+ xor $r8 8
+ cmpu b32 $r8 $r9
+ bra ne queue_put_next
+ mov $r15 E_CMD_OVERFLOW
+ call error
+ ret
+
+ // store cmd/data on queue
+ queue_put_next:
+ and $r8 $r9 7
+ shl b32 $r8 3
+ add b32 $r8 $r13
+ add b32 $r8 8
+ st b32 D[$r8 + 0x0] $r14
+ st b32 D[$r8 + 0x4] $r15
+
+ // update PUT
+ add b32 $r9 1
+ and $r9 0xf
+ st b32 D[$r13 + 0x4] $r9
+ ret
+
+// queue_get - fetch request from queue
+//
+// In : $r13 queue pointer
+//
+// Out: $p1 clear on success (data available)
+// $r14 command
+// $r15 data
+//
+queue_get:
+ bset $flags $p1
+ ld b32 $r8 D[$r13 + 0x0] // GET
+ ld b32 $r9 D[$r13 + 0x4] // PUT
+ cmpu b32 $r8 $r9
+ bra e queue_get_done
+ // fetch first cmd/data pair
+ and $r9 $r8 7
+ shl b32 $r9 3
+ add b32 $r9 $r13
+ add b32 $r9 8
+ ld b32 $r14 D[$r9 + 0x0]
+ ld b32 $r15 D[$r9 + 0x4]
+
+ // update GET
+ add b32 $r8 1
+ and $r8 0xf
+ st b32 D[$r13 + 0x0] $r8
+ bclr $flags $p1
+queue_get_done:
+ ret
+
+// nv_rd32 - read 32-bit value from nv register
+//
+// In : $r14 register
+// Out: $r15 value
+//
+nv_rd32:
+ mov $r11 0x728
+ shl b32 $r11 6
+ mov b32 $r12 $r14
+ bset $r12 31 // MMIO_CTRL_PENDING
+ iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_rd32_wait:
+ iord $r12 I[$r11 + 0x000]
+ xbit $r12 $r12 31
+ bra ne nv_rd32_wait
+ mov $r10 6 // DONE_MMIO_RD
+ call wait_doneo
+ iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
+ ret
+
+// nv_wr32 - write 32-bit value to nv register
+//
+// In : $r14 register
+// $r15 value
+//
+nv_wr32:
+ mov $r11 0x728
+ shl b32 $r11 6
+ iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
+ mov b32 $r12 $r14
+ bset $r12 31 // MMIO_CTRL_PENDING
+ bset $r12 30 // MMIO_CTRL_WRITE
+ iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_wr32_wait:
+ iord $r12 I[$r11 + 0x000]
+ xbit $r12 $r12 31
+ bra ne nv_wr32_wait
+ ret
+
+// (re)set watchdog timer
+//
+// In : $r15 timeout
+//
+watchdog_reset:
+ mov $r8 0x430
+ shl b32 $r8 6
+ bset $r15 31
+ iowr I[$r8 + 0x000] $r15
+ ret
+
+// clear watchdog timer
+watchdog_clear:
+ mov $r8 0x430
+ shl b32 $r8 6
+ iowr I[$r8 + 0x000] $r0
+ ret
+
+// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
+//
+// In : $r10 bit to wait on
+//
+define(`wait_done', `
+$1:
+ trace_set(T_WAIT);
+ mov $r8 0x818
+ shl b32 $r8 6
+ iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit
+ wait_done_$1:
+ mov $r8 0x400
+ shl b32 $r8 6
+ iord $r8 I[$r8 + 0x000] // DONE
+ xbit $r8 $r8 $r10
+ bra $2 wait_done_$1
+ trace_clr(T_WAIT)
+ ret
+')
+wait_done(wait_donez, ne)
+wait_done(wait_doneo, e)
+
+// mmctx_size - determine size of a mmio list transfer
+//
+// In : $r14 mmio list head
+// $r15 mmio list tail
+// Out: $r15 transfer size (in bytes)
+//
+mmctx_size:
+ clear b32 $r9
+ nv_mmctx_size_loop:
+ ld b32 $r8 D[$r14]
+ shr b32 $r8 26
+ add b32 $r8 1
+ shl b32 $r8 2
+ add b32 $r9 $r8
+ add b32 $r14 4
+ cmpu b32 $r14 $r15
+ bra ne nv_mmctx_size_loop
+ mov b32 $r15 $r9
+ ret
+
+// mmctx_xfer - execute a list of mmio transfers
+//
+// In : $r10 flags
+// bit 0: direction (0 = save, 1 = load)
+// bit 1: set if first transfer
+// bit 2: set if last transfer
+// $r11 base
+// $r12 mmio list head
+// $r13 mmio list tail
+// $r14 multi_stride
+// $r15 multi_mask
+//
+mmctx_xfer:
+ trace_set(T_MMCTX)
+ mov $r8 0x710
+ shl b32 $r8 6
+ clear b32 $r9
+ or $r11 $r11
+ bra e mmctx_base_disabled
+ iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
+ bset $r9 0 // BASE_EN
+ mmctx_base_disabled:
+ or $r14 $r14
+ bra e mmctx_multi_disabled
+ iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
+ iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
+ bset $r9 1 // MULTI_EN
+ mmctx_multi_disabled:
+ add b32 $r8 0x100
+
+ xbit $r11 $r10 0
+ shl b32 $r11 16 // DIR
+ bset $r11 12 // QLIMIT = 0x10
+ xbit $r14 $r10 1
+ shl b32 $r14 17
+ or $r11 $r14 // START_TRIGGER
+ iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+
+ // loop over the mmio list, and send requests to the hw
+ mmctx_exec_loop:
+ // wait for space in mmctx queue
+ mmctx_wait_free:
+ iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+ and $r14 0x1f
+ bra e mmctx_wait_free
+
+ // queue up an entry
+ ld b32 $r14 D[$r12]
+ or $r14 $r9
+ iowr I[$r8 + 0x300] $r14
+ add b32 $r12 4
+ cmpu b32 $r12 $r13
+ bra ne mmctx_exec_loop
+
+ xbit $r11 $r10 2
+ bra ne mmctx_stop
+ // wait for queue to empty
+ mmctx_fini_wait:
+ iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ and $r11 0x1f
+ cmpu b32 $r11 0x10
+ bra ne mmctx_fini_wait
+ mov $r10 2 // DONE_MMCTX
+ call wait_donez
+ bra mmctx_done
+ mmctx_stop:
+ xbit $r11 $r10 0
+ shl b32 $r11 16 // DIR
+ bset $r11 12 // QLIMIT = 0x10
+ bset $r11 18 // STOP_TRIGGER
+ iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+ mmctx_stop_wait:
+ // wait for STOP_TRIGGER to clear
+ iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ xbit $r11 $r11 18
+ bra ne mmctx_stop_wait
+ mmctx_done:
+ trace_clr(T_MMCTX)
+ ret
+
+// Wait for DONE_STRAND
+//
+strand_wait:
+ push $r10
+ mov $r10 2
+ call wait_donez
+ pop $r10
+ ret
+
+// unknown - call before issuing strand commands
+//
+strand_pre:
+ mov $r8 0x4afc
+ sethi $r8 0x20000
+ mov $r9 0xc
+ iowr I[$r8] $r9
+ call strand_wait
+ ret
+
+// unknown - call after issuing strand commands
+//
+strand_post:
+ mov $r8 0x4afc
+ sethi $r8 0x20000
+ mov $r9 0xd
+ iowr I[$r8] $r9
+ call strand_wait
+ ret
+
+// Selects strand set?!
+//
+// In: $r14 id
+//
+strand_set:
+ mov $r10 0x4ffc
+ sethi $r10 0x20000
+ sub b32 $r11 $r10 0x500
+ mov $r12 0xf
+ iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
+ mov $r12 0xb
+ iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
+ call strand_wait
+ iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
+ mov $r12 0xa
+ iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
+ call strand_wait
+ ret
+
+// Initialise strand context data
+//
+// In : $r15 context base
+// Out: $r15 context size (in bytes)
+//
+// Strandset(?) 3 hardcoded currently
+//
+strand_ctx_init:
+ trace_set(T_STRINIT)
+ call strand_pre
+ mov $r14 3
+ call strand_set
+ mov $r10 0x46fc
+ sethi $r10 0x20000
+ add b32 $r11 $r10 0x400
+ iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
+ mov $r12 1
+ iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
+ call strand_wait
+ sub b32 $r12 $r0 1
+ iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
+ mov $r12 2
+ iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
+ call strand_wait
+ call strand_post
+
+ // read the size of each strand, poke the context offset of
+ // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
+ // about it later then.
+ mov $r8 0x880
+ shl b32 $r8 6
+ iord $r9 I[$r8 + 0x000] // STRANDS
+ add b32 $r8 0x2200
+ shr b32 $r14 $r15 8
+ ctx_init_strand_loop:
+ iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
+ iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE
+ iord $r10 I[$r8 + 0x200] // STRAND_SIZE
+ shr b32 $r10 6
+ add b32 $r10 1
+ add b32 $r14 $r10
+ add b32 $r8 4
+ sub b32 $r9 1
+ bra ne ctx_init_strand_loop
+
+ shl b32 $r14 8
+ sub b32 $r15 $r14 $r15
+ trace_clr(T_STRINIT)
+ ret
+')
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index f5d184e0689..55689e99728 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -57,8 +57,7 @@ struct nvc0_graph_priv {
struct nouveau_gpuobj *unk4188b4;
struct nouveau_gpuobj *unk4188b8;
- u8 magic_not_rop_nr;
- u32 magicgpc918;
+ u8 magic_not_rop_nr;
};
struct nvc0_graph_chan {
@@ -72,4 +71,25 @@ struct nvc0_graph_chan {
int nvc0_grctx_generate(struct nouveau_channel *);
+/* nvc0_graph.c uses this also to determine supported chipsets */
+static inline u32
+nvc0_graph_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ case 0xc3:
+ case 0xc4:
+ case 0xce: /* guess, mmio trace shows only 0x9097 state */
+ return 0x9097;
+ case 0xc1:
+ return 0x9197;
+ case 0xc8:
+ return 0x9297;
+ default:
+ return 0;
+ }
+}
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index 6df06611413..31018eaf527 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -45,6 +45,9 @@ nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
static void
nvc0_grctx_generate_9097(struct drm_device *dev)
{
+ u32 fermi = nvc0_graph_class(dev);
+ u32 mthd;
+
nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
@@ -824,134 +827,10 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
- nv_mthd(dev, 0x9097, 0x3400, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3404, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3408, 0x00000000);
- nv_mthd(dev, 0x9097, 0x340c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
- nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
- nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
- nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
- nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
- nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
- nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
- nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
- nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
- nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
- nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
- nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
- nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
- nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
- nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
- nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
- nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
- nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
- nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
- nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
- nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
+ if (fermi == 0x9097) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(dev, 0x9097, mthd, 0x00000000);
+ }
nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
@@ -1321,6 +1200,37 @@ nvc0_grctx_generate_9097(struct drm_device *dev)
}
static void
+nvc0_grctx_generate_9197(struct drm_device *dev)
+{
+ u32 fermi = nvc0_graph_class(dev);
+ u32 mthd;
+
+ if (fermi == 0x9197) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(dev, 0x9197, mthd, 0x00000000);
+ }
+ nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001);
+}
+
+static void
+nvc0_grctx_generate_9297(struct drm_device *dev)
+{
+ u32 fermi = nvc0_graph_class(dev);
+ u32 mthd;
+
+ if (fermi == 0x9297) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(dev, 0x9297, mthd, 0x00000000);
+ }
+ nv_mthd(dev, 0x9297, 0x036c, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x0370, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x07a4, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x07a8, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x0374, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x0378, 0x00000020);
+}
+
+static void
nvc0_grctx_generate_902d(struct drm_device *dev)
{
nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
@@ -1559,8 +1469,15 @@ nvc0_grctx_generate_unk47xx(struct drm_device *dev)
static void
nvc0_grctx_generate_shaders(struct drm_device *dev)
{
- nv_wr32(dev, 0x405800, 0x078000bf);
- nv_wr32(dev, 0x405830, 0x02180000);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset != 0xc1) {
+ nv_wr32(dev, 0x405800, 0x078000bf);
+ nv_wr32(dev, 0x405830, 0x02180000);
+ } else {
+ nv_wr32(dev, 0x405800, 0x0f8000bf);
+ nv_wr32(dev, 0x405830, 0x02180218);
+ }
nv_wr32(dev, 0x405834, 0x00000000);
nv_wr32(dev, 0x405838, 0x00000000);
nv_wr32(dev, 0x405854, 0x00000000);
@@ -1586,10 +1503,16 @@ nvc0_grctx_generate_unk60xx(struct drm_device *dev)
static void
nvc0_grctx_generate_unk64xx(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
nv_wr32(dev, 0x4064a8, 0x00000000);
nv_wr32(dev, 0x4064ac, 0x00003fff);
nv_wr32(dev, 0x4064b4, 0x00000000);
nv_wr32(dev, 0x4064b8, 0x00000000);
+ if (dev_priv->chipset == 0xc1) {
+ nv_wr32(dev, 0x4064c0, 0x80140078);
+ nv_wr32(dev, 0x4064c4, 0x0086ffff);
+ }
}
static void
@@ -1622,21 +1545,14 @@ static void
nvc0_grctx_generate_rop(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chipset = dev_priv->chipset;
/* ROPC_BROADCAST */
nv_wr32(dev, 0x408800, 0x02802a3c);
nv_wr32(dev, 0x408804, 0x00000040);
- nv_wr32(dev, 0x408808, 0x0003e00d);
- switch (dev_priv->chipset) {
- case 0xc0:
- nv_wr32(dev, 0x408900, 0x0080b801);
- break;
- case 0xc3:
- case 0xc4:
- nv_wr32(dev, 0x408900, 0x3080b801);
- break;
- }
- nv_wr32(dev, 0x408904, 0x02000001);
+ nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001);
nv_wr32(dev, 0x408908, 0x00c80929);
nv_wr32(dev, 0x40890c, 0x00000000);
nv_wr32(dev, 0x408980, 0x0000011d);
@@ -1645,6 +1561,8 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
static void
nvc0_grctx_generate_gpc(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chipset = dev_priv->chipset;
int i;
/* GPC_BROADCAST */
@@ -1676,7 +1594,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x41880c, 0x00000000);
nv_wr32(dev, 0x418810, 0x00000000);
nv_wr32(dev, 0x418828, 0x00008442);
- nv_wr32(dev, 0x418830, 0x00000001);
+ nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001);
nv_wr32(dev, 0x4188d8, 0x00000008);
nv_wr32(dev, 0x4188e0, 0x01000000);
nv_wr32(dev, 0x4188e8, 0x00000000);
@@ -1684,7 +1602,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x4188f0, 0x00000000);
nv_wr32(dev, 0x4188f4, 0x00000000);
nv_wr32(dev, 0x4188f8, 0x00000000);
- nv_wr32(dev, 0x4188fc, 0x00100000);
+ nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018);
nv_wr32(dev, 0x41891c, 0x00ff00ff);
nv_wr32(dev, 0x418924, 0x00000000);
nv_wr32(dev, 0x418928, 0x00ffff00);
@@ -1715,6 +1633,8 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418c24, 0x00000000);
nv_wr32(dev, 0x418c28, 0x00000000);
nv_wr32(dev, 0x418c2c, 0x00000000);
+ if (chipset == 0xc1)
+ nv_wr32(dev, 0x418c6c, 0x00000001);
nv_wr32(dev, 0x418c80, 0x20200004);
nv_wr32(dev, 0x418c8c, 0x00000001);
nv_wr32(dev, 0x419000, 0x00000780);
@@ -1727,10 +1647,13 @@ static void
nvc0_grctx_generate_tp(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chipset = dev_priv->chipset;
/* GPC_BROADCAST.TP_BROADCAST */
+ nv_wr32(dev, 0x419818, 0x00000000);
+ nv_wr32(dev, 0x41983c, 0x00038bc7);
nv_wr32(dev, 0x419848, 0x00000000);
- nv_wr32(dev, 0x419864, 0x0000012a);
+ nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129);
nv_wr32(dev, 0x419888, 0x00000000);
nv_wr32(dev, 0x419a00, 0x000001f0);
nv_wr32(dev, 0x419a04, 0x00000001);
@@ -1740,8 +1663,8 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419a14, 0x00000200);
nv_wr32(dev, 0x419a1c, 0x00000000);
nv_wr32(dev, 0x419a20, 0x00000800);
- if (dev_priv->chipset != 0xc0)
- nv_wr32(dev, 0x00419ac4, 0x0007f440); /* 0xc3 */
+ if (chipset != 0xc0 && chipset != 0xc8)
+ nv_wr32(dev, 0x00419ac4, 0x0007f440);
nv_wr32(dev, 0x419b00, 0x0a418820);
nv_wr32(dev, 0x419b04, 0x062080e6);
nv_wr32(dev, 0x419b08, 0x020398a4);
@@ -1749,17 +1672,19 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419b10, 0x0a418820);
nv_wr32(dev, 0x419b14, 0x000000e6);
nv_wr32(dev, 0x419bd0, 0x00900103);
- nv_wr32(dev, 0x419be0, 0x00000001);
+ nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001);
nv_wr32(dev, 0x419be4, 0x00000000);
nv_wr32(dev, 0x419c00, 0x00000002);
nv_wr32(dev, 0x419c04, 0x00000006);
nv_wr32(dev, 0x419c08, 0x00000002);
nv_wr32(dev, 0x419c20, 0x00000000);
- nv_wr32(dev, 0x419cbc, 0x28137606);
+ nv_wr32(dev, 0x419cb0, 0x00060048); //XXX: 0xce 0x00020048
nv_wr32(dev, 0x419ce8, 0x00000000);
nv_wr32(dev, 0x419cf4, 0x00000183);
- nv_wr32(dev, 0x419d20, 0x02180000);
+ nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
nv_wr32(dev, 0x419d24, 0x00001fff);
+ if (chipset == 0xc1)
+ nv_wr32(dev, 0x419d44, 0x02180218);
nv_wr32(dev, 0x419e04, 0x00000000);
nv_wr32(dev, 0x419e08, 0x00000000);
nv_wr32(dev, 0x419e0c, 0x00000000);
@@ -1785,11 +1710,11 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419e8c, 0x00000000);
nv_wr32(dev, 0x419e90, 0x00000000);
nv_wr32(dev, 0x419e98, 0x00000000);
- if (dev_priv->chipset != 0xc0)
+ if (chipset != 0xc0 && chipset != 0xc8)
nv_wr32(dev, 0x419ee0, 0x00011110);
nv_wr32(dev, 0x419f50, 0x00000000);
nv_wr32(dev, 0x419f54, 0x00000000);
- if (dev_priv->chipset != 0xc0)
+ if (chipset != 0xc0 && chipset != 0xc8)
nv_wr32(dev, 0x419f58, 0x00000000);
}
@@ -1801,6 +1726,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
struct drm_device *dev = chan->dev;
int i, gpc, tp, id;
+ u32 fermi = nvc0_graph_class(dev);
u32 r000260, tmp;
r000260 = nv_rd32(dev, 0x000260);
@@ -1857,10 +1783,11 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_wr32(dev, 0x40587c, 0x00000000);
if (1) {
- const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 };
+ const u8 chipset_tp_max[] = { 16, 4, 0, 4, 8, 0, 0, 0,
+ 16, 0, 0, 0, 0, 0, 8, 0 };
u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
u8 tpnr[GPC_MAX];
- u8 data[32];
+ u8 data[TP_MAX];
memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
memset(data, 0x1f, sizeof(data));
@@ -2633,6 +2560,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x0000053f, 0xffff0000);
nv_icmd(dev, 0x00000585, 0x0000003f);
nv_icmd(dev, 0x00000576, 0x00000003);
+ if (dev_priv->chipset == 0xc1)
+ nv_icmd(dev, 0x0000057b, 0x00000059);
nv_icmd(dev, 0x00000586, 0x00000040);
nv_icmd(dev, 0x00000582, 0x00000080);
nv_icmd(dev, 0x00000583, 0x00000080);
@@ -2865,6 +2794,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_wr32(dev, 0x404154, 0x00000400);
nvc0_grctx_generate_9097(dev);
+ if (fermi >= 0x9197)
+ nvc0_grctx_generate_9197(dev);
+ if (fermi >= 0x9297)
+ nvc0_grctx_generate_9297(dev);
nvc0_grctx_generate_902d(dev);
nvc0_grctx_generate_9039(dev);
nvc0_grctx_generate_90c0(dev);
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
new file mode 100644
index 00000000000..0ec2add72a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
@@ -0,0 +1,474 @@
+/* fuc microcode for nvc0 PGRAPH/GPC
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h
+ */
+
+/* TODO
+ * - bracket certain functions with scratch writes, useful for debugging
+ * - watchdog timer around ctx operations
+ */
+
+.section nvc0_grgpc_data
+include(`nvc0_graph.fuc')
+gpc_id: .b32 0
+gpc_mmio_list_head: .b32 0
+gpc_mmio_list_tail: .b32 0
+
+tpc_count: .b32 0
+tpc_mask: .b32 0
+tpc_mmio_list_head: .b32 0
+tpc_mmio_list_tail: .b32 0
+
+cmd_queue: queue_init
+
+// chipset descriptions
+chipsets:
+.b8 0xc0 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc0_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc0_tpc_mmio_tail
+.b8 0xc1 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc1_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc1_tpc_mmio_tail
+.b8 0xc3 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc0_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc3_tpc_mmio_tail
+.b8 0xc4 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc0_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc3_tpc_mmio_tail
+.b8 0xc8 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc0_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc0_tpc_mmio_tail
+.b8 0xce 0 0 0
+.b16 nvc0_gpc_mmio_head
+.b16 nvc0_gpc_mmio_tail
+.b16 nvc0_tpc_mmio_head
+.b16 nvc3_tpc_mmio_tail
+.b8 0 0 0 0
+
+// GPC mmio lists
+nvc0_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 6)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+nvc0_gpc_mmio_tail:
+mmctx_data(0x000c6c, 1);
+nvc1_gpc_mmio_tail:
+
+// TPC mmio lists
+nvc0_tpc_mmio_head:
+mmctx_data(0x000018, 1)
+mmctx_data(0x00003c, 1)
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x000300, 6)
+mmctx_data(0x0003d0, 1)
+mmctx_data(0x0003e0, 2)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 1)
+mmctx_data(0x0004b0, 1)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000520, 2)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 20)
+mmctx_data(0x000698, 1)
+mmctx_data(0x000750, 2)
+nvc0_tpc_mmio_tail:
+mmctx_data(0x000758, 1)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x0004bc, 1)
+mmctx_data(0x0006e0, 1)
+nvc3_tpc_mmio_tail:
+mmctx_data(0x000544, 1)
+nvc1_tpc_mmio_tail:
+
+
+.section nvc0_grgpc_code
+bra init
+define(`include_code')
+include(`nvc0_graph.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nvc0_graph.fuc)
+//
+error:
+ push $r14
+ mov $r14 -0x67ec // 0x9814
+ sethi $r14 0x400000
+ call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
+ add b32 $r14 0x41c
+ mov $r15 1
+ call nv_wr32 // HUB_CTXCTL_INTR_UP_SET
+ pop $r14
+ ret
+
+// GPC fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+// CC_SCRATCH[1]: context base
+//
+// Output:
+// CC_SCRATCH[0]:
+// 31:31: set to signal completion
+// CC_SCRATCH[1]:
+// 31:0: GPC context size
+//
+init:
+ clear b32 $r0
+ mov $sp $r0
+
+ // enable fifo access
+ mov $r1 0x1200
+ mov $r2 2
+ iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+
+ // setup i0 handler, and route all interrupts to it
+ mov $r1 ih
+ mov $iv0 $r1
+ mov $r1 0x400
+ iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
+
+ // enable fifo interrupt
+ mov $r2 4
+ iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+
+ // enable interrupts
+ bset $flags ie0
+
+ // figure out which GPC we are, and how many TPCs we have
+ mov $r1 0x608
+ shl b32 $r1 6
+ iord $r2 I[$r1 + 0x000] // UNITS
+ mov $r3 1
+ and $r2 0x1f
+ shl b32 $r3 $r2
+ sub b32 $r3 1
+ st b32 D[$r0 + tpc_count] $r2
+ st b32 D[$r0 + tpc_mask] $r3
+ add b32 $r1 0x400
+ iord $r2 I[$r1 + 0x000] // MYINDEX
+ st b32 D[$r0 + gpc_id] $r2
+
+ // find context data for this chipset
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
+ mov $r1 chipsets - 12
+ init_find_chipset:
+ add b32 $r1 12
+ ld b32 $r3 D[$r1 + 0x00]
+ cmpu b32 $r3 $r2
+ bra e init_context
+ cmpu b32 $r3 0
+ bra ne init_find_chipset
+ // unknown chipset
+ ret
+
+ // initialise context base, and size tracking
+ init_context:
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base
+ clear b32 $r3 // track GPC context size here
+
+ // set mmctx base addresses now so we don't have to do it later,
+ // they don't currently ever change
+ mov $r4 0x700
+ shl b32 $r4 6
+ shr b32 $r5 $r2 8
+ iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
+ iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
+
+ // calculate GPC mmio context size, store the chipset-specific
+ // mmio list pointers somewhere we can get at them later without
+ // re-parsing the chipset list
+ clear b32 $r14
+ clear b32 $r15
+ ld b16 $r14 D[$r1 + 4]
+ ld b16 $r15 D[$r1 + 6]
+ st b16 D[$r0 + gpc_mmio_list_head] $r14
+ st b16 D[$r0 + gpc_mmio_list_tail] $r15
+ call mmctx_size
+ add b32 $r2 $r15
+ add b32 $r3 $r15
+
+ // calculate per-TPC mmio context size, store the list pointers
+ ld b16 $r14 D[$r1 + 8]
+ ld b16 $r15 D[$r1 + 10]
+ st b16 D[$r0 + tpc_mmio_list_head] $r14
+ st b16 D[$r0 + tpc_mmio_list_tail] $r15
+ call mmctx_size
+ ld b32 $r14 D[$r0 + tpc_count]
+ mulu $r14 $r15
+ add b32 $r2 $r14
+ add b32 $r3 $r14
+
+ // round up base/size to 256 byte boundary (for strand SWBASE)
+ add b32 $r4 0x1300
+ shr b32 $r3 2
+ iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
+ shr b32 $r2 8
+ shr b32 $r3 6
+ add b32 $r2 1
+ add b32 $r3 1
+ shl b32 $r2 8
+ shl b32 $r3 8
+
+ // calculate size of strand context data
+ mov b32 $r15 $r2
+ call strand_ctx_init
+ add b32 $r3 $r15
+
+ // save context size, and tell HUB we're done
+ mov $r1 0x800
+ shl b32 $r1 6
+ iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size
+ add b32 $r1 0x800
+ clear b32 $r2
+ bset $r2 31
+ iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+ bset $flags $p0
+ sleep $p0
+ mov $r13 cmd_queue
+ call queue_get
+ bra $p1 main
+
+ // 0x0000-0x0003 are all context transfers
+ cmpu b32 $r14 0x04
+ bra nc main_not_ctx_xfer
+ // fetch $flags and mask off $p1/$p2
+ mov $r1 $flags
+ mov $r2 0x0006
+ not b32 $r2
+ and $r1 $r2
+ // set $p1/$p2 according to transfer type
+ shl b32 $r14 1
+ or $r1 $r14
+ mov $flags $r1
+ // transfer context data
+ call ctx_xfer
+ bra main
+
+ main_not_ctx_xfer:
+ shl b32 $r15 $r14 16
+ or $r15 E_BAD_COMMAND
+ call error
+ bra main
+
+// interrupt handler
+ih:
+ push $r8
+ mov $r8 $flags
+ push $r8
+ push $r9
+ push $r10
+ push $r11
+ push $r13
+ push $r14
+ push $r15
+
+ // incoming fifo command?
+ iord $r10 I[$r0 + 0x200] // INTR
+ and $r11 $r10 0x00000004
+ bra e ih_no_fifo
+ // queue incoming fifo command for later processing
+ mov $r11 0x1900
+ mov $r13 cmd_queue
+ iord $r14 I[$r11 + 0x100] // FIFO_CMD
+ iord $r15 I[$r11 + 0x000] // FIFO_DATA
+ call queue_put
+ add b32 $r11 0x400
+ mov $r14 1
+ iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+
+ // ack, and wake up main()
+ ih_no_fifo:
+ iowr I[$r0 + 0x100] $r10 // INTR_ACK
+
+ pop $r15
+ pop $r14
+ pop $r13
+ pop $r11
+ pop $r10
+ pop $r9
+ pop $r8
+ mov $flags $r8
+ pop $r8
+ bclr $flags $p0
+ iret
+
+// Set this GPC's bit in HUB_BAR, used to signal completion of various
+// activities to the HUB fuc
+//
+hub_barrier_done:
+ mov $r15 1
+ ld b32 $r14 D[$r0 + gpc_id]
+ shl b32 $r15 $r14
+ mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
+ sethi $r14 0x400000
+ call nv_wr32
+ ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off? Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+ mov $r14 0x614
+ shl b32 $r14 6
+ mov $r15 0x020
+ iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
+ mov $r15 8
+ ctx_redswitch_delay:
+ sub b32 $r15 1
+ bra ne ctx_redswitch_delay
+ mov $r15 0xa20
+ iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
+ ret
+
+// Transfer GPC context data between GPU and storage area
+//
+// In: $r15 context base address
+// $p1 clear on save, set on load
+// $p2 set if opposite direction done/will be done, so:
+// on save it means: "a load will follow this save"
+// on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+ // set context base address
+ mov $r1 0xa04
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r15// MEM_BASE
+ bra not $p1 ctx_xfer_not_load
+ call ctx_redswitch
+ ctx_xfer_not_load:
+
+ // strands
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xc
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
+ call strand_wait
+ mov $r2 0x47fc
+ sethi $r2 0x20000
+ iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
+ xbit $r2 $flags $p1
+ add b32 $r2 3
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+ // mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 2 // first
+ mov $r11 0x0000
+ sethi $r11 0x500000
+ ld b32 $r12 D[$r0 + gpc_id]
+ shl b32 $r12 15
+ add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
+ ld b32 $r12 D[$r0 + gpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + gpc_mmio_list_tail]
+ mov $r14 0 // not multi
+ call mmctx_xfer
+
+ // per-TPC mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 4 // last
+ mov $r11 0x4000
+ sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
+ ld b32 $r12 D[$r0 + gpc_id]
+ shl b32 $r12 15
+ add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
+ ld b32 $r12 D[$r0 + tpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + tpc_mmio_list_tail]
+ ld b32 $r15 D[$r0 + tpc_mask]
+ mov $r14 0x800 // stride = 0x800
+ call mmctx_xfer
+
+ // wait for strands to finish
+ call strand_wait
+
+ // if load, or a save without a load following, do some
+ // unknown stuff that's done after finishing a block of
+ // strand commands
+ bra $p1 ctx_xfer_post
+ bra not $p2 ctx_xfer_done
+ ctx_xfer_post:
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xd
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
+ call strand_wait
+
+ // mark completion in HUB's barrier
+ ctx_xfer_done:
+ call hub_barrier_done
+ ret
+
+.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
new file mode 100644
index 00000000000..1896c898f5b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
@@ -0,0 +1,483 @@
+uint32_t nvc0_grgpc_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x000000c0,
+ 0x011000b0,
+ 0x01640114,
+ 0x000000c1,
+ 0x011400b0,
+ 0x01780114,
+ 0x000000c3,
+ 0x011000b0,
+ 0x01740114,
+ 0x000000c4,
+ 0x011000b0,
+ 0x01740114,
+ 0x000000c8,
+ 0x011000b0,
+ 0x01640114,
+ 0x000000ce,
+ 0x011000b0,
+ 0x01740114,
+ 0x00000000,
+ 0x00000380,
+ 0x14000400,
+ 0x20000450,
+ 0x00000600,
+ 0x00000684,
+ 0x10000700,
+ 0x00000800,
+ 0x08000808,
+ 0x00000828,
+ 0x00000830,
+ 0x000008d8,
+ 0x000008e0,
+ 0x140008e8,
+ 0x0000091c,
+ 0x08000924,
+ 0x00000b00,
+ 0x14000b08,
+ 0x00000bb8,
+ 0x00000c08,
+ 0x1c000c10,
+ 0x00000c80,
+ 0x00000c8c,
+ 0x08001000,
+ 0x00001014,
+ 0x00000c6c,
+ 0x00000018,
+ 0x0000003c,
+ 0x00000048,
+ 0x00000064,
+ 0x00000088,
+ 0x14000200,
+ 0x0400021c,
+ 0x14000300,
+ 0x000003d0,
+ 0x040003e0,
+ 0x08000400,
+ 0x00000420,
+ 0x000004b0,
+ 0x000004e8,
+ 0x000004f4,
+ 0x04000520,
+ 0x0c000604,
+ 0x4c000644,
+ 0x00000698,
+ 0x04000750,
+ 0x00000758,
+ 0x000002c4,
+ 0x000004bc,
+ 0x000006e0,
+ 0x00000544,
+};
+
+uint32_t nvc0_grgpc_code[] = {
+ 0x03060ef5,
+ 0x9800d898,
+ 0x86f001d9,
+ 0x0489b808,
+ 0xf00c1bf4,
+ 0x21f502f7,
+ 0x00f802ec,
+ 0xb60798c4,
+ 0x8dbb0384,
+ 0x0880b600,
+ 0x80008e80,
+ 0x90b6018f,
+ 0x0f94f001,
+ 0xf801d980,
+ 0x0131f400,
+ 0x9800d898,
+ 0x89b801d9,
+ 0x210bf404,
+ 0xb60789c4,
+ 0x9dbb0394,
+ 0x0890b600,
+ 0x98009e98,
+ 0x80b6019f,
+ 0x0f84f001,
+ 0xf400d880,
+ 0x00f80132,
+ 0x0728b7f1,
+ 0xb906b4b6,
+ 0xc9f002ec,
+ 0x00bcd01f,
+ 0xc800bccf,
+ 0x1bf41fcc,
+ 0x06a7f0fa,
+ 0x010321f5,
+ 0xf840bfcf,
+ 0x28b7f100,
+ 0x06b4b607,
+ 0xb980bfd0,
+ 0xc9f002ec,
+ 0x1ec9f01f,
+ 0xcf00bcd0,
+ 0xccc800bc,
+ 0xfa1bf41f,
+ 0x87f100f8,
+ 0x84b60430,
+ 0x1ff9f006,
+ 0xf8008fd0,
+ 0x3087f100,
+ 0x0684b604,
+ 0xf80080d0,
+ 0x3c87f100,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d000,
+ 0x081887f1,
+ 0xd00684b6,
+ 0x87f1008a,
+ 0x84b60400,
+ 0x0088cf06,
+ 0xf4888aff,
+ 0x87f1f31b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00099,
+ 0xf100f800,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00099f0,
+ 0x87f10089,
+ 0x84b60818,
+ 0x008ad006,
+ 0x040087f1,
+ 0xcf0684b6,
+ 0x8aff0088,
+ 0xf30bf488,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0099f094,
+ 0xf80089d0,
+ 0x9894bd00,
+ 0x85b600e8,
+ 0x0180b61a,
+ 0xbb0284b6,
+ 0xe0b60098,
+ 0x04efb804,
+ 0xb9eb1bf4,
+ 0x00f8029f,
+ 0x083c87f1,
+ 0xbd0684b6,
+ 0x0199f094,
+ 0xf10089d0,
+ 0xb6071087,
+ 0x94bd0684,
+ 0xf405bbfd,
+ 0x8bd0090b,
+ 0x0099f000,
+ 0xf405eefd,
+ 0x8ed00c0b,
+ 0xc08fd080,
+ 0xb70199f0,
+ 0xc8010080,
+ 0xb4b600ab,
+ 0x0cb9f010,
+ 0xb601aec8,
+ 0xbefd11e4,
+ 0x008bd005,
+ 0xf0008ecf,
+ 0x0bf41fe4,
+ 0x00ce98fa,
+ 0xd005e9fd,
+ 0xc0b6c08e,
+ 0x04cdb804,
+ 0xc8e81bf4,
+ 0x1bf402ab,
+ 0x008bcf18,
+ 0xb01fb4f0,
+ 0x1bf410b4,
+ 0x02a7f0f7,
+ 0xf4c921f4,
+ 0xabc81b0e,
+ 0x10b4b600,
+ 0xf00cb9f0,
+ 0x8bd012b9,
+ 0x008bcf00,
+ 0xf412bbc8,
+ 0x87f1fa1b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00199,
+ 0xf900f800,
+ 0x02a7f0a0,
+ 0xfcc921f4,
+ 0xf100f8a0,
+ 0xf04afc87,
+ 0x97f00283,
+ 0x0089d00c,
+ 0x020721f5,
+ 0x87f100f8,
+ 0x83f04afc,
+ 0x0d97f002,
+ 0xf50089d0,
+ 0xf8020721,
+ 0xfca7f100,
+ 0x02a3f04f,
+ 0x0500aba2,
+ 0xd00fc7f0,
+ 0xc7f000ac,
+ 0x00bcd00b,
+ 0x020721f5,
+ 0xf000aed0,
+ 0xbcd00ac7,
+ 0x0721f500,
+ 0xf100f802,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x21f50089,
+ 0xe7f00213,
+ 0x3921f503,
+ 0xfca7f102,
+ 0x02a3f046,
+ 0x0400aba0,
+ 0xf040a0d0,
+ 0xbcd001c7,
+ 0x0721f500,
+ 0x010c9202,
+ 0xf000acd0,
+ 0xbcd002c7,
+ 0x0721f500,
+ 0x2621f502,
+ 0x8087f102,
+ 0x0684b608,
+ 0xb70089cf,
+ 0x95220080,
+ 0x8ed008fe,
+ 0x408ed000,
+ 0xb6808acf,
+ 0xa0b606a5,
+ 0x00eabb01,
+ 0xb60480b6,
+ 0x1bf40192,
+ 0x08e4b6e8,
+ 0xf1f2efbc,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x00f80089,
+ 0xe7f1e0f9,
+ 0xe3f09814,
+ 0x8d21f440,
+ 0x041ce0b7,
+ 0xf401f7f0,
+ 0xe0fc8d21,
+ 0x04bd00f8,
+ 0xf10004fe,
+ 0xf0120017,
+ 0x12d00227,
+ 0x3e17f100,
+ 0x0010fe04,
+ 0x040017f1,
+ 0xf0c010d0,
+ 0x12d00427,
+ 0x1031f400,
+ 0x060817f1,
+ 0xcf0614b6,
+ 0x37f00012,
+ 0x1f24f001,
+ 0xb60432bb,
+ 0x02800132,
+ 0x04038003,
+ 0x040010b7,
+ 0x800012cf,
+ 0x27f10002,
+ 0x24b60800,
+ 0x0022cf06,
+ 0xb65817f0,
+ 0x13980c10,
+ 0x0432b800,
+ 0xb00b0bf4,
+ 0x1bf40034,
+ 0xf100f8f1,
+ 0xb6080027,
+ 0x22cf0624,
+ 0xf134bd40,
+ 0xb6070047,
+ 0x25950644,
+ 0x0045d008,
+ 0xbd4045d0,
+ 0x58f4bde4,
+ 0x1f58021e,
+ 0x020e4003,
+ 0xf5040f40,
+ 0xbb013d21,
+ 0x3fbb002f,
+ 0x041e5800,
+ 0x40051f58,
+ 0x0f400a0e,
+ 0x3d21f50c,
+ 0x030e9801,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x0040b700,
+ 0x0235b613,
+ 0xb60043d0,
+ 0x35b60825,
+ 0x0120b606,
+ 0xb60130b6,
+ 0x34b60824,
+ 0x022fb908,
+ 0x026321f5,
+ 0xf1003fbb,
+ 0xb6080017,
+ 0x13d00614,
+ 0x0010b740,
+ 0xf024bd08,
+ 0x12d01f29,
+ 0x0031f400,
+ 0xf00028f4,
+ 0x21f41cd7,
+ 0xf401f439,
+ 0xf404e4b0,
+ 0x81fe1e18,
+ 0x0627f001,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x21f50018,
+ 0x0ef404c3,
+ 0x10ef94d3,
+ 0xf501f5f0,
+ 0xf402ec21,
+ 0x80f9c60e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0x800acff0,
+ 0xf404abc4,
+ 0xb7f11d0b,
+ 0xd7f01900,
+ 0x40becf1c,
+ 0xf400bfcf,
+ 0xb0b70421,
+ 0xe7f00400,
+ 0x00bed001,
+ 0xfc400ad0,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x32f480fc,
+ 0xf001f800,
+ 0x0e9801f7,
+ 0x04febb00,
+ 0x9418e7f1,
+ 0xf440e3f0,
+ 0x00f88d21,
+ 0x0614e7f1,
+ 0xf006e4b6,
+ 0xefd020f7,
+ 0x08f7f000,
+ 0xf401f2b6,
+ 0xf7f1fd1b,
+ 0xefd00a20,
+ 0xf100f800,
+ 0xb60a0417,
+ 0x1fd00614,
+ 0x0711f400,
+ 0x04a421f5,
+ 0x4afc17f1,
+ 0xf00213f0,
+ 0x12d00c27,
+ 0x0721f500,
+ 0xfc27f102,
+ 0x0223f047,
+ 0xf00020d0,
+ 0x20b6012c,
+ 0x0012d003,
+ 0xf001acf0,
+ 0xb7f002a5,
+ 0x50b3f000,
+ 0xb6000c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0xf0020d98,
+ 0x21f500e7,
+ 0xacf0015c,
+ 0x04a5f001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6000c,
+ 0x00bcbb0f,
+ 0x98050c98,
+ 0x0f98060d,
+ 0x00e7f104,
+ 0x5c21f508,
+ 0x0721f501,
+ 0x0601f402,
+ 0xf11412f4,
+ 0xf04afc17,
+ 0x27f00213,
+ 0x0012d00d,
+ 0x020721f5,
+ 0x048f21f5,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
new file mode 100644
index 00000000000..a1a599124cf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
@@ -0,0 +1,808 @@
+/* fuc microcode for nvc0 PGRAPH/HUB
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
+ */
+
+.section nvc0_grhub_data
+include(`nvc0_graph.fuc')
+gpc_count: .b32 0
+rop_count: .b32 0
+cmd_queue: queue_init
+hub_mmio_list_head: .b32 0
+hub_mmio_list_tail: .b32 0
+
+ctx_current: .b32 0
+
+chipsets:
+.b8 0xc0 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc0_hub_mmio_tail
+.b8 0xc1 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc1_hub_mmio_tail
+.b8 0xc3 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc0_hub_mmio_tail
+.b8 0xc4 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc0_hub_mmio_tail
+.b8 0xc8 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc0_hub_mmio_tail
+.b8 0xce 0 0 0
+.b16 nvc0_hub_mmio_head
+.b16 nvc0_hub_mmio_tail
+.b8 0 0 0 0
+
+nvc0_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404004, 11)
+mmctx_data(0x404044, 1)
+mmctx_data(0x404094, 14)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 2)
+mmctx_data(0x404174, 3)
+mmctx_data(0x404200, 8)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 32)
+mmctx_data(0x404698, 21)
+mmctx_data(0x4046f0, 2)
+mmctx_data(0x404700, 22)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 2)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408900, 4)
+mmctx_data(0x408980, 1)
+nvc0_hub_mmio_tail:
+mmctx_data(0x4064c0, 2)
+nvc1_hub_mmio_tail:
+
+.align 256
+chan_data:
+chan_mmio_count: .b32 0
+chan_mmio_address: .b32 0
+
+.align 256
+xfer_data: .b32 0
+
+.section nvc0_grhub_code
+bra init
+define(`include_code')
+include(`nvc0_graph.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nvc0_graph.fuc)
+//
+error:
+ push $r14
+ mov $r14 0x814
+ shl b32 $r14 6
+ iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code
+ mov $r14 0xc1c
+ shl b32 $r14 6
+ mov $r15 1
+ iowr I[$r14 + 0x000] $r15 // INTR_UP_SET
+ pop $r14
+ ret
+
+// HUB fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+//
+// Output:
+// CC_SCRATCH[0]:
+// 31:31: set to signal completion
+// CC_SCRATCH[1]:
+// 31:0: total PGRAPH context size
+//
+init:
+ clear b32 $r0
+ mov $sp $r0
+ mov $xdbase $r0
+
+ // enable fifo access
+ mov $r1 0x1200
+ mov $r2 2
+ iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+
+ // setup i0 handler, and route all interrupts to it
+ mov $r1 ih
+ mov $iv0 $r1
+ mov $r1 0x400
+ iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
+
+ // route HUB_CHANNEL_SWITCH to fuc interrupt 8
+ mov $r3 0x404
+ shl b32 $r3 6
+ mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
+ iowr I[$r3 + 0x000] $r2
+
+ // not sure what these are, route them because NVIDIA does, and
+ // the IRQ handler will signal the host if we ever get one.. we
+ // may find out if/why we need to handle these if so..
+ //
+ mov $r2 0x2004
+ iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
+ mov $r2 0x200b
+ iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
+ mov $r2 0x200c
+ iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
+
+ // enable all INTR_UP interrupts
+ mov $r2 0xc24
+ shl b32 $r2 6
+ not b32 $r3 $r0
+ iowr I[$r2] $r3
+
+ // enable fifo, ctxsw, 9, 10, 15 interrupts
+ mov $r2 -0x78fc // 0x8704
+ sethi $r2 0
+ iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+
+ // fifo level triggered, rest edge
+ sub b32 $r1 0x100
+ mov $r2 4
+ iowr I[$r1] $r2
+
+ // enable interrupts
+ bset $flags ie0
+
+ // fetch enabled GPC/ROP counts
+ mov $r14 -0x69fc // 0x409604
+ sethi $r14 0x400000
+ call nv_rd32
+ extr $r1 $r15 16:20
+ st b32 D[$r0 + rop_count] $r1
+ and $r15 0x1f
+ st b32 D[$r0 + gpc_count] $r15
+
+ // set BAR_REQMASK to GPC mask
+ mov $r1 1
+ shl b32 $r1 $r15
+ sub b32 $r1 1
+ mov $r2 0x40c
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r1
+ iowr I[$r2 + 0x100] $r1
+
+ // find context data for this chipset
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
+ mov $r15 chipsets - 8
+ init_find_chipset:
+ add b32 $r15 8
+ ld b32 $r3 D[$r15 + 0x00]
+ cmpu b32 $r3 $r2
+ bra e init_context
+ cmpu b32 $r3 0
+ bra ne init_find_chipset
+ // unknown chipset
+ ret
+
+ // context size calculation, reserve first 256 bytes for use by fuc
+ init_context:
+ mov $r1 256
+
+ // calculate size of mmio context data
+ ld b16 $r14 D[$r15 + 4]
+ ld b16 $r15 D[$r15 + 6]
+ sethi $r14 0
+ st b32 D[$r0 + hub_mmio_list_head] $r14
+ st b32 D[$r0 + hub_mmio_list_tail] $r15
+ call mmctx_size
+
+ // set mmctx base addresses now so we don't have to do it later,
+ // they don't (currently) ever change
+ mov $r3 0x700
+ shl b32 $r3 6
+ shr b32 $r4 $r1 8
+ iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
+ iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
+ add b32 $r3 0x1300
+ add b32 $r1 $r15
+ shr b32 $r15 2
+ iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
+
+ // strands, base offset needs to be aligned to 256 bytes
+ shr b32 $r1 8
+ add b32 $r1 1
+ shl b32 $r1 8
+ mov b32 $r15 $r1
+ call strand_ctx_init
+ add b32 $r1 $r15
+
+ // initialise each GPC in sequence by passing in the offset of its
+ // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
+ // has previously been uploaded by the host) running.
+ //
+ // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
+ // when it has completed, and return the size of its context data
+ // in GPCn_CC_SCRATCH[1]
+ //
+ ld b32 $r3 D[$r0 + gpc_count]
+ mov $r4 0x2000
+ sethi $r4 0x500000
+ init_gpc:
+ // setup, and start GPC ucode running
+ add b32 $r14 $r4 0x804
+ mov b32 $r15 $r1
+ call nv_wr32 // CC_SCRATCH[1] = ctx offset
+ add b32 $r14 $r4 0x800
+ mov b32 $r15 $r2
+ call nv_wr32 // CC_SCRATCH[0] = chipset
+ add b32 $r14 $r4 0x10c
+ clear b32 $r15
+ call nv_wr32
+ add b32 $r14 $r4 0x104
+ call nv_wr32 // ENTRY
+ add b32 $r14 $r4 0x100
+ mov $r15 2 // CTRL_START_TRIGGER
+ call nv_wr32 // CTRL
+
+ // wait for it to complete, and adjust context size
+ add b32 $r14 $r4 0x800
+ init_gpc_wait:
+ call nv_rd32
+ xbit $r15 $r15 31
+ bra e init_gpc_wait
+ add b32 $r14 $r4 0x804
+ call nv_rd32
+ add b32 $r1 $r15
+
+ // next!
+ add b32 $r4 0x8000
+ sub b32 $r3 1
+ bra ne init_gpc
+
+ // save context size, and tell host we're ready
+ mov $r2 0x800
+ shl b32 $r2 6
+ iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size
+ add b32 $r2 0x800
+ clear b32 $r1
+ bset $r1 31
+ iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+ // sleep until we have something to do
+ bset $flags $p0
+ sleep $p0
+ mov $r13 cmd_queue
+ call queue_get
+ bra $p1 main
+
+ // context switch, requested by GPU?
+ cmpu b32 $r14 0x4001
+ bra ne main_not_ctx_switch
+ trace_set(T_AUTO)
+ mov $r1 0xb00
+ shl b32 $r1 6
+ iord $r2 I[$r1 + 0x100] // CHAN_NEXT
+ iord $r1 I[$r1 + 0x000] // CHAN_CUR
+
+ xbit $r3 $r1 31
+ bra e chsw_no_prev
+ xbit $r3 $r2 31
+ bra e chsw_prev_no_next
+ push $r2
+ mov b32 $r2 $r1
+ trace_set(T_SAVE)
+ bclr $flags $p1
+ bset $flags $p2
+ call ctx_xfer
+ trace_clr(T_SAVE);
+ pop $r2
+ trace_set(T_LOAD);
+ bset $flags $p1
+ call ctx_xfer
+ trace_clr(T_LOAD);
+ bra chsw_done
+ chsw_prev_no_next:
+ push $r2
+ mov b32 $r2 $r1
+ bclr $flags $p1
+ bclr $flags $p2
+ call ctx_xfer
+ pop $r2
+ mov $r1 0xb00
+ shl b32 $r1 6
+ iowr I[$r1] $r2
+ bra chsw_done
+ chsw_no_prev:
+ xbit $r3 $r2 31
+ bra e chsw_done
+ bset $flags $p1
+ bclr $flags $p2
+ call ctx_xfer
+
+ // ack the context switch request
+ chsw_done:
+ mov $r1 0xb0c
+ shl b32 $r1 6
+ mov $r2 1
+ iowr I[$r1 + 0x000] $r2 // 0x409b0c
+ trace_clr(T_AUTO)
+ bra main
+
+ // request to set current channel? (*not* a context switch)
+ main_not_ctx_switch:
+ cmpu b32 $r14 0x0001
+ bra ne main_not_ctx_chan
+ mov b32 $r2 $r15
+ call ctx_chan
+ bra main_done
+
+ // request to store current channel context?
+ main_not_ctx_chan:
+ cmpu b32 $r14 0x0002
+ bra ne main_not_ctx_save
+ trace_set(T_SAVE)
+ bclr $flags $p1
+ bclr $flags $p2
+ call ctx_xfer
+ trace_clr(T_SAVE)
+ bra main_done
+
+ main_not_ctx_save:
+ shl b32 $r15 $r14 16
+ or $r15 E_BAD_COMMAND
+ call error
+ bra main
+
+ main_done:
+ mov $r1 0x820
+ shl b32 $r1 6
+ clear b32 $r2
+ bset $r2 31
+ iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
+ bra main
+
+// interrupt handler
+ih:
+ push $r8
+ mov $r8 $flags
+ push $r8
+ push $r9
+ push $r10
+ push $r11
+ push $r13
+ push $r14
+ push $r15
+
+ // incoming fifo command?
+ iord $r10 I[$r0 + 0x200] // INTR
+ and $r11 $r10 0x00000004
+ bra e ih_no_fifo
+ // queue incoming fifo command for later processing
+ mov $r11 0x1900
+ mov $r13 cmd_queue
+ iord $r14 I[$r11 + 0x100] // FIFO_CMD
+ iord $r15 I[$r11 + 0x000] // FIFO_DATA
+ call queue_put
+ add b32 $r11 0x400
+ mov $r14 1
+ iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+
+ // context switch request?
+ ih_no_fifo:
+ and $r11 $r10 0x00000100
+ bra e ih_no_ctxsw
+ // enqueue a context switch for later processing
+ mov $r13 cmd_queue
+ mov $r14 0x4001
+ call queue_put
+
+ // anything we didn't handle, bring it to the host's attention
+ ih_no_ctxsw:
+ mov $r11 0x104
+ not b32 $r11
+ and $r11 $r10 $r11
+ bra e ih_no_other
+ mov $r10 0xc1c
+ shl b32 $r10 6
+ iowr I[$r10] $r11 // INTR_UP_SET
+
+ // ack, and wake up main()
+ ih_no_other:
+ iowr I[$r0 + 0x100] $r10 // INTR_ACK
+
+ pop $r15
+ pop $r14
+ pop $r13
+ pop $r11
+ pop $r10
+ pop $r9
+ pop $r8
+ mov $flags $r8
+ pop $r8
+ bclr $flags $p0
+ iret
+
+// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
+ctx_4160s:
+ mov $r14 0x4160
+ sethi $r14 0x400000
+ mov $r15 1
+ call nv_wr32
+ ctx_4160s_wait:
+ call nv_rd32
+ xbit $r15 $r15 4
+ bra e ctx_4160s_wait
+ ret
+
+// Without clearing again at end of xfer, some things cause PGRAPH
+// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
+// still function with it set however...
+ctx_4160c:
+ mov $r14 0x4160
+ sethi $r14 0x400000
+ clear b32 $r15
+ call nv_wr32
+ ret
+
+// Again, not real sure
+//
+// In: $r15 value to set 0x404170 to
+//
+ctx_4170s:
+ mov $r14 0x4170
+ sethi $r14 0x400000
+ or $r15 0x10
+ call nv_wr32
+ ret
+
+// Waits for a ctx_4170s() call to complete
+//
+ctx_4170w:
+ mov $r14 0x4170
+ sethi $r14 0x400000
+ call nv_rd32
+ and $r15 0x10
+ bra ne ctx_4170w
+ ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off? Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+ mov $r14 0x614
+ shl b32 $r14 6
+ mov $r15 0x270
+ iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
+ mov $r15 8
+ ctx_redswitch_delay:
+ sub b32 $r15 1
+ bra ne ctx_redswitch_delay
+ mov $r15 0x770
+ iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+ ret
+
+// Not a clue what this is for, except that unless the value is 0x10, the
+// strand context is saved (and presumably restored) incorrectly..
+//
+// In: $r15 value to set to (0x00/0x10 are used)
+//
+ctx_86c:
+ mov $r14 0x86c
+ shl b32 $r14 6
+ iowr I[$r14] $r15 // HUB(0x86c) = val
+ mov $r14 -0x75ec
+ sethi $r14 0x400000
+ call nv_wr32 // ROP(0xa14) = val
+ mov $r14 -0x5794
+ sethi $r14 0x410000
+ call nv_wr32 // GPC(0x86c) = val
+ ret
+
+// ctx_load - load's a channel's ctxctl data, and selects its vm
+//
+// In: $r2 channel address
+//
+ctx_load:
+ trace_set(T_CHAN)
+
+ // switch to channel, somewhat magic in parts..
+ mov $r10 12 // DONE_UNK12
+ call wait_donez
+ mov $r1 0xa24
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r0 // 0x409a24
+ mov $r3 0xb00
+ shl b32 $r3 6
+ iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
+ mov $r1 0xa0c
+ shl b32 $r1 6
+ mov $r4 7
+ iowr I[$r1 + 0x000] $r2 // MEM_CHAN
+ iowr I[$r1 + 0x100] $r4 // MEM_CMD
+ ctx_chan_wait_0:
+ iord $r4 I[$r1 + 0x100]
+ and $r4 0x1f
+ bra ne ctx_chan_wait_0
+ iowr I[$r3 + 0x000] $r2 // CHAN_CUR
+
+ // load channel header, fetch PGRAPH context pointer
+ mov $xtargets $r0
+ bclr $r2 31
+ shl b32 $r2 4
+ add b32 $r2 2
+
+ trace_set(T_LCHAN)
+ mov $r1 0xa04
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r2 // MEM_BASE
+ mov $r1 0xa20
+ shl b32 $r1 6
+ mov $r2 0x0002
+ sethi $r2 0x80000000
+ iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
+ mov $r1 0x10 // chan + 0x0210
+ mov $r2 xfer_data
+ sethi $r2 0x00020000 // 16 bytes
+ xdld $r1 $r2
+ xdwait
+ trace_clr(T_LCHAN)
+
+ // update current context
+ ld b32 $r1 D[$r0 + xfer_data + 4]
+ shl b32 $r1 24
+ ld b32 $r2 D[$r0 + xfer_data + 0]
+ shr b32 $r2 8
+ or $r1 $r2
+ st b32 D[$r0 + ctx_current] $r1
+
+ // set transfer base to start of context, and fetch context header
+ trace_set(T_LCTXH)
+ mov $r2 0xa04
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r1 // MEM_BASE
+ mov $r2 1
+ mov $r1 0xa20
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
+ mov $r1 chan_data
+ sethi $r1 0x00060000 // 256 bytes
+ xdld $r0 $r1
+ xdwait
+ trace_clr(T_LCTXH)
+
+ trace_clr(T_CHAN)
+ ret
+
+// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
+// the active channel for ctxctl, but not actually transfer
+// any context data. intended for use only during initial
+// context construction.
+//
+// In: $r2 channel address
+//
+ctx_chan:
+ call ctx_4160s
+ call ctx_load
+ mov $r10 12 // DONE_UNK12
+ call wait_donez
+ mov $r1 0xa10
+ shl b32 $r1 6
+ mov $r2 5
+ iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
+ ctx_chan_wait:
+ iord $r2 I[$r1 + 0x000]
+ or $r2 $r2
+ bra ne ctx_chan_wait
+ call ctx_4160c
+ ret
+
+// Execute per-context state overrides list
+//
+// Only executed on the first load of a channel. Might want to look into
+// removing this and having the host directly modify the channel's context
+// to change this state... The nouveau DRM already builds this list as
+// it's definitely needed for NVIDIA's, so we may as well use it for now
+//
+// Input: $r1 mmio list length
+//
+ctx_mmio_exec:
+ // set transfer base to be the mmio list
+ ld b32 $r3 D[$r0 + chan_mmio_address]
+ mov $r2 0xa04
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r3 // MEM_BASE
+
+ clear b32 $r3
+ ctx_mmio_loop:
+ // fetch next 256 bytes of mmio list if necessary
+ and $r4 $r3 0xff
+ bra ne ctx_mmio_pull
+ mov $r5 xfer_data
+ sethi $r5 0x00060000 // 256 bytes
+ xdld $r3 $r5
+ xdwait
+
+ // execute a single list entry
+ ctx_mmio_pull:
+ ld b32 $r14 D[$r4 + xfer_data + 0x00]
+ ld b32 $r15 D[$r4 + xfer_data + 0x04]
+ call nv_wr32
+
+ // next!
+ add b32 $r3 8
+ sub b32 $r1 1
+ bra ne ctx_mmio_loop
+
+ // set transfer base back to the current context
+ ctx_mmio_done:
+ ld b32 $r3 D[$r0 + ctx_current]
+ iowr I[$r2 + 0x000] $r3 // MEM_BASE
+
+ // disable the mmio list now, we don't need/want to execute it again
+ st b32 D[$r0 + chan_mmio_count] $r0
+ mov $r1 chan_data
+ sethi $r1 0x00060000 // 256 bytes
+ xdst $r0 $r1
+ xdwait
+ ret
+
+// Transfer HUB context data between GPU and storage area
+//
+// In: $r2 channel address
+// $p1 clear on save, set on load
+// $p2 set if opposite direction done/will be done, so:
+// on save it means: "a load will follow this save"
+// on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+ bra not $p1 ctx_xfer_pre
+ bra $p2 ctx_xfer_pre_load
+ ctx_xfer_pre:
+ mov $r15 0x10
+ call ctx_86c
+ call ctx_4160s
+ bra not $p1 ctx_xfer_exec
+
+ ctx_xfer_pre_load:
+ mov $r15 2
+ call ctx_4170s
+ call ctx_4170w
+ call ctx_redswitch
+ clear b32 $r15
+ call ctx_4170s
+ call ctx_load
+
+ // fetch context pointer, and initiate xfer on all GPCs
+ ctx_xfer_exec:
+ ld b32 $r1 D[$r0 + ctx_current]
+ mov $r2 0x414
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
+ mov $r14 -0x5b00
+ sethi $r14 0x410000
+ mov b32 $r15 $r1
+ call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
+ add b32 $r14 4
+ xbit $r15 $flags $p1
+ xbit $r2 $flags $p2
+ shl b32 $r2 1
+ or $r15 $r2
+ call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+
+ // strands
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xc
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
+ call strand_wait
+ mov $r2 0x47fc
+ sethi $r2 0x20000
+ iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
+ xbit $r2 $flags $p1
+ add b32 $r2 3
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+ // mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 6 // first, last
+ mov $r11 0 // base = 0
+ ld b32 $r12 D[$r0 + hub_mmio_list_head]
+ ld b32 $r13 D[$r0 + hub_mmio_list_tail]
+ mov $r14 0 // not multi
+ call mmctx_xfer
+
+ // wait for GPCs to all complete
+ mov $r10 8 // DONE_BAR
+ call wait_doneo
+
+ // wait for strand xfer to complete
+ call strand_wait
+
+ // post-op
+ bra $p1 ctx_xfer_post
+ mov $r10 12 // DONE_UNK12
+ call wait_donez
+ mov $r1 0xa10
+ shl b32 $r1 6
+ mov $r2 5
+ iowr I[$r1] $r2 // MEM_CMD
+ ctx_xfer_post_save_wait:
+ iord $r2 I[$r1]
+ or $r2 $r2
+ bra ne ctx_xfer_post_save_wait
+
+ bra $p2 ctx_xfer_done
+ ctx_xfer_post:
+ mov $r15 2
+ call ctx_4170s
+ clear b32 $r15
+ call ctx_86c
+ call strand_post
+ call ctx_4170w
+ clear b32 $r15
+ call ctx_4170s
+
+ bra not $p1 ctx_xfer_no_post_mmio
+ ld b32 $r1 D[$r0 + chan_mmio_count]
+ or $r1 $r1
+ bra e ctx_xfer_no_post_mmio
+ call ctx_mmio_exec
+
+ ctx_xfer_no_post_mmio:
+ call ctx_4160c
+
+ ctx_xfer_done:
+ ret
+
+.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
new file mode 100644
index 00000000000..b3b541b6d04
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
@@ -0,0 +1,838 @@
+uint32_t nvc0_grhub_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x000000c0,
+ 0x012c0090,
+ 0x000000c1,
+ 0x01300090,
+ 0x000000c3,
+ 0x012c0090,
+ 0x000000c4,
+ 0x012c0090,
+ 0x000000c8,
+ 0x012c0090,
+ 0x000000ce,
+ 0x012c0090,
+ 0x00000000,
+ 0x0417e91c,
+ 0x04400204,
+ 0x28404004,
+ 0x00404044,
+ 0x34404094,
+ 0x184040d0,
+ 0x004040f8,
+ 0x08404130,
+ 0x08404150,
+ 0x04404164,
+ 0x08404174,
+ 0x1c404200,
+ 0x34404404,
+ 0x0c404460,
+ 0x00404480,
+ 0x00404498,
+ 0x0c404604,
+ 0x7c404618,
+ 0x50404698,
+ 0x044046f0,
+ 0x54404700,
+ 0x00405800,
+ 0x08405830,
+ 0x00405854,
+ 0x0c405870,
+ 0x04405a00,
+ 0x00405a18,
+ 0x00406020,
+ 0x0c406028,
+ 0x044064a8,
+ 0x044064b4,
+ 0x00407804,
+ 0x1440780c,
+ 0x004078bc,
+ 0x18408000,
+ 0x00408064,
+ 0x08408800,
+ 0x0c408900,
+ 0x00408980,
+ 0x044064c0,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nvc0_grhub_code[] = {
+ 0x03090ef5,
+ 0x9800d898,
+ 0x86f001d9,
+ 0x0489b808,
+ 0xf00c1bf4,
+ 0x21f502f7,
+ 0x00f802ec,
+ 0xb60798c4,
+ 0x8dbb0384,
+ 0x0880b600,
+ 0x80008e80,
+ 0x90b6018f,
+ 0x0f94f001,
+ 0xf801d980,
+ 0x0131f400,
+ 0x9800d898,
+ 0x89b801d9,
+ 0x210bf404,
+ 0xb60789c4,
+ 0x9dbb0394,
+ 0x0890b600,
+ 0x98009e98,
+ 0x80b6019f,
+ 0x0f84f001,
+ 0xf400d880,
+ 0x00f80132,
+ 0x0728b7f1,
+ 0xb906b4b6,
+ 0xc9f002ec,
+ 0x00bcd01f,
+ 0xc800bccf,
+ 0x1bf41fcc,
+ 0x06a7f0fa,
+ 0x010321f5,
+ 0xf840bfcf,
+ 0x28b7f100,
+ 0x06b4b607,
+ 0xb980bfd0,
+ 0xc9f002ec,
+ 0x1ec9f01f,
+ 0xcf00bcd0,
+ 0xccc800bc,
+ 0xfa1bf41f,
+ 0x87f100f8,
+ 0x84b60430,
+ 0x1ff9f006,
+ 0xf8008fd0,
+ 0x3087f100,
+ 0x0684b604,
+ 0xf80080d0,
+ 0x3c87f100,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d000,
+ 0x081887f1,
+ 0xd00684b6,
+ 0x87f1008a,
+ 0x84b60400,
+ 0x0088cf06,
+ 0xf4888aff,
+ 0x87f1f31b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00099,
+ 0xf100f800,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00099f0,
+ 0x87f10089,
+ 0x84b60818,
+ 0x008ad006,
+ 0x040087f1,
+ 0xcf0684b6,
+ 0x8aff0088,
+ 0xf30bf488,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0099f094,
+ 0xf80089d0,
+ 0x9894bd00,
+ 0x85b600e8,
+ 0x0180b61a,
+ 0xbb0284b6,
+ 0xe0b60098,
+ 0x04efb804,
+ 0xb9eb1bf4,
+ 0x00f8029f,
+ 0x083c87f1,
+ 0xbd0684b6,
+ 0x0199f094,
+ 0xf10089d0,
+ 0xb6071087,
+ 0x94bd0684,
+ 0xf405bbfd,
+ 0x8bd0090b,
+ 0x0099f000,
+ 0xf405eefd,
+ 0x8ed00c0b,
+ 0xc08fd080,
+ 0xb70199f0,
+ 0xc8010080,
+ 0xb4b600ab,
+ 0x0cb9f010,
+ 0xb601aec8,
+ 0xbefd11e4,
+ 0x008bd005,
+ 0xf0008ecf,
+ 0x0bf41fe4,
+ 0x00ce98fa,
+ 0xd005e9fd,
+ 0xc0b6c08e,
+ 0x04cdb804,
+ 0xc8e81bf4,
+ 0x1bf402ab,
+ 0x008bcf18,
+ 0xb01fb4f0,
+ 0x1bf410b4,
+ 0x02a7f0f7,
+ 0xf4c921f4,
+ 0xabc81b0e,
+ 0x10b4b600,
+ 0xf00cb9f0,
+ 0x8bd012b9,
+ 0x008bcf00,
+ 0xf412bbc8,
+ 0x87f1fa1b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00199,
+ 0xf900f800,
+ 0x02a7f0a0,
+ 0xfcc921f4,
+ 0xf100f8a0,
+ 0xf04afc87,
+ 0x97f00283,
+ 0x0089d00c,
+ 0x020721f5,
+ 0x87f100f8,
+ 0x83f04afc,
+ 0x0d97f002,
+ 0xf50089d0,
+ 0xf8020721,
+ 0xfca7f100,
+ 0x02a3f04f,
+ 0x0500aba2,
+ 0xd00fc7f0,
+ 0xc7f000ac,
+ 0x00bcd00b,
+ 0x020721f5,
+ 0xf000aed0,
+ 0xbcd00ac7,
+ 0x0721f500,
+ 0xf100f802,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x21f50089,
+ 0xe7f00213,
+ 0x3921f503,
+ 0xfca7f102,
+ 0x02a3f046,
+ 0x0400aba0,
+ 0xf040a0d0,
+ 0xbcd001c7,
+ 0x0721f500,
+ 0x010c9202,
+ 0xf000acd0,
+ 0xbcd002c7,
+ 0x0721f500,
+ 0x2621f502,
+ 0x8087f102,
+ 0x0684b608,
+ 0xb70089cf,
+ 0x95220080,
+ 0x8ed008fe,
+ 0x408ed000,
+ 0xb6808acf,
+ 0xa0b606a5,
+ 0x00eabb01,
+ 0xb60480b6,
+ 0x1bf40192,
+ 0x08e4b6e8,
+ 0xf1f2efbc,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x00f80089,
+ 0xe7f1e0f9,
+ 0xe4b60814,
+ 0x00efd006,
+ 0x0c1ce7f1,
+ 0xf006e4b6,
+ 0xefd001f7,
+ 0xf8e0fc00,
+ 0xfe04bd00,
+ 0x07fe0004,
+ 0x0017f100,
+ 0x0227f012,
+ 0xf10012d0,
+ 0xfe05b917,
+ 0x17f10010,
+ 0x10d00400,
+ 0x0437f1c0,
+ 0x0634b604,
+ 0x200327f1,
+ 0xf10032d0,
+ 0xd0200427,
+ 0x27f10132,
+ 0x32d0200b,
+ 0x0c27f102,
+ 0x0732d020,
+ 0x0c2427f1,
+ 0xb90624b6,
+ 0x23d00003,
+ 0x0427f100,
+ 0x0023f087,
+ 0xb70012d0,
+ 0xf0010012,
+ 0x12d00427,
+ 0x1031f400,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xf1c76821,
+ 0x01018090,
+ 0x801ff4f0,
+ 0x17f0000f,
+ 0x041fbb01,
+ 0xf10112b6,
+ 0xb6040c27,
+ 0x21d00624,
+ 0x4021d000,
+ 0x080027f1,
+ 0xcf0624b6,
+ 0xf7f00022,
+ 0x08f0b654,
+ 0xb800f398,
+ 0x0bf40432,
+ 0x0034b00b,
+ 0xf8f11bf4,
+ 0x0017f100,
+ 0x02fe5801,
+ 0xf003ff58,
+ 0x0e8000e3,
+ 0x150f8014,
+ 0x013d21f5,
+ 0x070037f1,
+ 0x950634b6,
+ 0x34d00814,
+ 0x4034d000,
+ 0x130030b7,
+ 0xb6001fbb,
+ 0x3fd002f5,
+ 0x0815b600,
+ 0xb60110b6,
+ 0x1fb90814,
+ 0x6321f502,
+ 0x001fbb02,
+ 0xf1000398,
+ 0xf0200047,
+ 0x4ea05043,
+ 0x1fb90804,
+ 0x8d21f402,
+ 0x08004ea0,
+ 0xf4022fb9,
+ 0x4ea08d21,
+ 0xf4bd010c,
+ 0xa08d21f4,
+ 0xf401044e,
+ 0x4ea08d21,
+ 0xf7f00100,
+ 0x8d21f402,
+ 0x08004ea0,
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x0027f1b4,
+ 0x0624b608,
+ 0xb74021d0,
+ 0xbd080020,
+ 0x1f19f014,
+ 0xf40021d0,
+ 0x28f40031,
+ 0x08d7f000,
+ 0xf43921f4,
+ 0xe4b1f401,
+ 0x1bf54001,
+ 0x87f100d1,
+ 0x84b6083c,
+ 0xf094bd06,
+ 0x89d00499,
+ 0x0017f100,
+ 0x0614b60b,
+ 0xcf4012cf,
+ 0x13c80011,
+ 0x7e0bf41f,
+ 0xf41f23c8,
+ 0x20f95a0b,
+ 0xf10212b9,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00799f0,
+ 0x32f40089,
+ 0x0231f401,
+ 0x082921f5,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0799f094,
+ 0xfc0089d0,
+ 0x3c87f120,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d006,
+ 0xf50131f4,
+ 0xf1082921,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00699f0,
+ 0x0ef40089,
+ 0xb920f931,
+ 0x32f40212,
+ 0x0232f401,
+ 0x082921f5,
+ 0x17f120fc,
+ 0x14b60b00,
+ 0x0012d006,
+ 0xc8130ef4,
+ 0x0bf41f23,
+ 0x0131f40d,
+ 0xf50232f4,
+ 0xf1082921,
+ 0xb60b0c17,
+ 0x27f00614,
+ 0x0012d001,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0499f094,
+ 0xf50089d0,
+ 0xb0ff200e,
+ 0x1bf401e4,
+ 0x02f2b90d,
+ 0x07b521f5,
+ 0xb0420ef4,
+ 0x1bf402e4,
+ 0x3c87f12e,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d007,
+ 0xf40132f4,
+ 0x21f50232,
+ 0x87f10829,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00799,
+ 0x110ef400,
+ 0xf010ef94,
+ 0x21f501f5,
+ 0x0ef502ec,
+ 0x17f1fed1,
+ 0x14b60820,
+ 0xf024bd06,
+ 0x12d01f29,
+ 0xbe0ef500,
+ 0xfe80f9fe,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0xc4800acf,
+ 0x0bf404ab,
+ 0x00b7f11d,
+ 0x08d7f019,
+ 0xcf40becf,
+ 0x21f400bf,
+ 0x00b0b704,
+ 0x01e7f004,
+ 0xe400bed0,
+ 0xf40100ab,
+ 0xd7f00d0b,
+ 0x01e7f108,
+ 0x0421f440,
+ 0x0104b7f1,
+ 0xabffb0bd,
+ 0x0d0bf4b4,
+ 0x0c1ca7f1,
+ 0xd006a4b6,
+ 0x0ad000ab,
+ 0xfcf0fc40,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+ 0x60e7f101,
+ 0x40e3f041,
+ 0xf401f7f0,
+ 0x21f48d21,
+ 0x04ffc868,
+ 0xf8fa0bf4,
+ 0x60e7f100,
+ 0x40e3f041,
+ 0x21f4f4bd,
+ 0xf100f88d,
+ 0xf04170e7,
+ 0xf5f040e3,
+ 0x8d21f410,
+ 0xe7f100f8,
+ 0xe3f04170,
+ 0x6821f440,
+ 0xf410f4f0,
+ 0x00f8f31b,
+ 0x0614e7f1,
+ 0xf106e4b6,
+ 0xd00270f7,
+ 0xf7f000ef,
+ 0x01f2b608,
+ 0xf1fd1bf4,
+ 0xd00770f7,
+ 0x00f800ef,
+ 0x086ce7f1,
+ 0xd006e4b6,
+ 0xe7f100ef,
+ 0xe3f08a14,
+ 0x8d21f440,
+ 0xa86ce7f1,
+ 0xf441e3f0,
+ 0x00f88d21,
+ 0x083c87f1,
+ 0xbd0684b6,
+ 0x0599f094,
+ 0xf00089d0,
+ 0x21f40ca7,
+ 0x2417f1c9,
+ 0x0614b60a,
+ 0xf10010d0,
+ 0xb60b0037,
+ 0x32d00634,
+ 0x0c17f140,
+ 0x0614b60a,
+ 0xd00747f0,
+ 0x14d00012,
+ 0x4014cf40,
+ 0xf41f44f0,
+ 0x32d0fa1b,
+ 0x000bfe00,
+ 0xb61f2af0,
+ 0x20b60424,
+ 0x3c87f102,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d008,
+ 0x0a0417f1,
+ 0xd00614b6,
+ 0x17f10012,
+ 0x14b60a20,
+ 0x0227f006,
+ 0x800023f1,
+ 0xf00012d0,
+ 0x27f11017,
+ 0x23f00300,
+ 0x0512fa02,
+ 0x87f103f8,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00899,
+ 0xc1019800,
+ 0x981814b6,
+ 0x25b6c002,
+ 0x0512fd08,
+ 0xf1160180,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00999f0,
+ 0x27f10089,
+ 0x24b60a04,
+ 0x0021d006,
+ 0xf10127f0,
+ 0xb60a2017,
+ 0x12d00614,
+ 0x0017f100,
+ 0x0613f002,
+ 0xf80501fa,
+ 0x5c87f103,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d009,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0599f094,
+ 0xf80089d0,
+ 0x3121f500,
+ 0xb821f506,
+ 0x0ca7f006,
+ 0xf1c921f4,
+ 0xb60a1017,
+ 0x27f00614,
+ 0x0012d005,
+ 0xfd0012cf,
+ 0x1bf40522,
+ 0x4921f5fa,
+ 0x9800f806,
+ 0x27f18103,
+ 0x24b60a04,
+ 0x0023d006,
+ 0x34c434bd,
+ 0x0f1bf4ff,
+ 0x030057f1,
+ 0xfa0653f0,
+ 0x03f80535,
+ 0x98c04e98,
+ 0x21f4c14f,
+ 0x0830b68d,
+ 0xf40112b6,
+ 0x0398df1b,
+ 0x0023d016,
+ 0xf1800080,
+ 0xf0020017,
+ 0x01fa0613,
+ 0xf803f806,
+ 0x0611f400,
+ 0xf01102f4,
+ 0x21f510f7,
+ 0x21f50698,
+ 0x11f40631,
+ 0x02f7f01c,
+ 0x065721f5,
+ 0x066621f5,
+ 0x067821f5,
+ 0x21f5f4bd,
+ 0x21f50657,
+ 0x019806b8,
+ 0x1427f116,
+ 0x0624b604,
+ 0xf10020d0,
+ 0xf0a500e7,
+ 0x1fb941e3,
+ 0x8d21f402,
+ 0xf004e0b6,
+ 0x2cf001fc,
+ 0x0124b602,
+ 0xf405f2fd,
+ 0x17f18d21,
+ 0x13f04afc,
+ 0x0c27f002,
+ 0xf50012d0,
+ 0xf1020721,
+ 0xf047fc27,
+ 0x20d00223,
+ 0x012cf000,
+ 0xd00320b6,
+ 0xacf00012,
+ 0x06a5f001,
+ 0x9800b7f0,
+ 0x0d98140c,
+ 0x00e7f015,
+ 0x015c21f5,
+ 0xf508a7f0,
+ 0xf5010321,
+ 0xf4020721,
+ 0xa7f02201,
+ 0xc921f40c,
+ 0x0a1017f1,
+ 0xf00614b6,
+ 0x12d00527,
+ 0x0012cf00,
+ 0xf40522fd,
+ 0x02f4fa1b,
+ 0x02f7f032,
+ 0x065721f5,
+ 0x21f5f4bd,
+ 0x21f50698,
+ 0x21f50226,
+ 0xf4bd0666,
+ 0x065721f5,
+ 0x981011f4,
+ 0x11fd8001,
+ 0x070bf405,
+ 0x07df21f5,
+ 0x064921f5,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 82357d2df1f..b701c439c92 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -32,7 +32,6 @@ struct nvc0_instmem_priv {
struct nouveau_channel *bar1;
struct nouveau_gpuobj *bar3_pgd;
struct nouveau_channel *bar3;
- struct nouveau_gpuobj *chan_pgd;
};
int
@@ -181,17 +180,11 @@ nvc0_instmem_init(struct drm_device *dev)
goto error;
/* channel vm */
- ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm);
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
+ &dev_priv->chan_vm);
if (ret)
goto error;
- ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
- if (ret)
- goto error;
-
- nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
- nouveau_vm_ref(NULL, &vm, NULL);
-
nvc0_instmem_resume(dev);
return 0;
error:
@@ -211,8 +204,7 @@ nvc0_instmem_takedown(struct drm_device *dev)
nv_wr32(dev, 0x1704, 0x00000000);
nv_wr32(dev, 0x1714, 0x00000000);
- nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd);
- nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
+ nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
nvc0_channel_del(&priv->bar1);
nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index a179e6c55af..9e352944a35 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -105,7 +105,11 @@ nvc0_vm_flush(struct nouveau_vm *vm)
struct drm_device *dev = vm->dev;
struct nouveau_vm_pgd *vpgd;
unsigned long flags;
- u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
+ u32 engine;
+
+ engine = 1;
+ if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm)
+ engine |= 4;
pinstmem->flush(vm->dev);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index 67c6ec6f34e..e45a24d84e9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -61,9 +61,7 @@ nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
u32 type, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
- struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm *mm = dev_priv->engine.vram.mm;
struct nouveau_mm_node *r;
struct nouveau_mem *mem;
int ret;
@@ -105,9 +103,15 @@ int
nvc0_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 length;
dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
- dev_priv->vram_rblock_size = 4096;
- return 0;
+
+ length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
+
+ return nouveau_mm_init(&vram->mm, rsvd_head, length, 1);
}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 3896ef81110..9f363e0c4b6 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -5,6 +5,7 @@
ccflags-y := -Iinclude/drm
hostprogs-y := mkregtable
+clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
quiet_cmd_mkregtable = MKREGTABLE $@
cmd_mkregtable = $(obj)/mkregtable $< > $@
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index ebdb0fdb834..e88c64417a8 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1245,6 +1245,9 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
char name[512];
int i;
+ if (!ctx)
+ return NULL;
+
ctx->card = card;
ctx->bios = bios;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9541995e4b2..c742944d380 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -764,7 +764,7 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
}
static void atombios_crtc_program_pll(struct drm_crtc *crtc,
- int crtc_id,
+ u32 crtc_id,
int pll_id,
u32 encoder_mode,
u32 encoder_id,
@@ -851,8 +851,7 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v5.ucPpll = pll_id;
break;
case 6:
- args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id;
- args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
+ args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10);
args.v6.ucRefDiv = ref_div;
args.v6.usFbDiv = cpu_to_le16(fb_div);
args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8c0f9e36ff8..7ad43c6b1db 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -613,6 +613,18 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
return true;
}
+bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
+{
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if (!radeon_dp_get_link_status(radeon_connector, link_status))
+ return false;
+ if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
+ return false;
+ return true;
+}
+
struct radeon_dp_link_train_info {
struct radeon_device *rdev;
struct drm_encoder *encoder;
@@ -627,6 +639,7 @@ struct radeon_dp_link_train_info {
u8 train_set[4];
u8 link_status[DP_LINK_STATUS_SIZE];
u8 tries;
+ bool use_dpencoder;
};
static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
@@ -646,7 +659,7 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
int rtp = 0;
/* set training pattern on the source */
- if (ASIC_IS_DCE4(dp_info->rdev)) {
+ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
switch (tp) {
case DP_TRAINING_PATTERN_1:
rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
@@ -706,7 +719,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
/* start training on the source */
- if (ASIC_IS_DCE4(dp_info->rdev))
+ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
atombios_dig_encoder_setup(dp_info->encoder,
ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
else
@@ -731,7 +744,7 @@ static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info
DP_TRAINING_PATTERN_DISABLE);
/* disable the training pattern on the source */
- if (ASIC_IS_DCE4(dp_info->rdev))
+ if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
atombios_dig_encoder_setup(dp_info->encoder,
ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
else
@@ -869,7 +882,8 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
struct radeon_dp_link_train_info dp_info;
- u8 tmp;
+ int index;
+ u8 tmp, frev, crev;
if (!radeon_encoder->enc_priv)
return;
@@ -884,6 +898,18 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
(dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
return;
+ /* DPEncoderService newer than 1.1 can't program properly the
+ * training pattern. When facing such version use the
+ * DIGXEncoderControl (X== 1 | 2)
+ */
+ dp_info.use_dpencoder = true;
+ index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+ if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
+ if (crev > 1) {
+ dp_info.use_dpencoder = false;
+ }
+ }
+
dp_info.enc_id = 0;
if (dig->dig_encoder)
dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 445af798163..fb5fa089886 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -743,7 +743,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
!evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
!evergreen_check_latency_hiding(&wm) ||
(rdev->disp_priority == 2)) {
- DRM_INFO("force priority to high\n");
+ DRM_DEBUG_KMS("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
@@ -985,17 +985,19 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
{
save->vga_control[0] = RREG32(D1VGA_CONTROL);
save->vga_control[1] = RREG32(D2VGA_CONTROL);
- save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
- save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
- save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
- save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
+ save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
+ save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
+ save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
@@ -1004,35 +1006,45 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
WREG32(VGA_RENDER_CONTROL, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(D1VGA_CONTROL, 0);
WREG32(D2VGA_CONTROL, 0);
- WREG32(EVERGREEN_D3VGA_CONTROL, 0);
- WREG32(EVERGREEN_D4VGA_CONTROL, 0);
- WREG32(EVERGREEN_D5VGA_CONTROL, 0);
- WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ if (rdev->num_crtc >= 4) {
+ WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ }
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1055,7 +1067,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
@@ -1073,7 +1085,8 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
-
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
@@ -1101,31 +1114,41 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
/* Restore video state */
WREG32(D1VGA_CONTROL, save->vga_control[0]);
WREG32(D2VGA_CONTROL, save->vga_control[1]);
- WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
- WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
- WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
- WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+ if (rdev->num_crtc >= 4) {
+ WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
+ WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
+ WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+ }
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
@@ -1359,9 +1382,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
-#ifdef __BIG_ENDIAN
- RB_RPTR_SWAP(2) |
-#endif
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -1977,7 +1997,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
- gb_backend_map = 0x00006420;
+ gb_backend_map = 0x00002200;
break;
default:
gb_backend_map =
@@ -2013,9 +2033,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.tile_config |= (3 << 0);
break;
}
- /* num banks is 8 on all fusion asics */
+ /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
- rdev->config.evergreen.tile_config |= 8 << 4;
+ rdev->config.evergreen.tile_config |= 1 << 4;
else
rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
@@ -2024,6 +2044,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
+ rdev->config.evergreen.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -2248,7 +2269,10 @@ int evergreen_mc_init(struct radeon_device *rdev)
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
- tmp = RREG32(MC_ARB_RAMCFG);
+ if (rdev->flags & RADEON_IS_IGP)
+ tmp = RREG32(FUS_MC_ARB_RAMCFG);
+ else
+ tmp = RREG32(MC_ARB_RAMCFG);
if (tmp & CHANSIZE_OVERRIDE) {
chansize = 16;
} else if (tmp & CHANSIZE_MASK) {
@@ -2414,18 +2438,22 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
@@ -2544,19 +2572,25 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ if (rdev->num_crtc >= 4) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ }
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
@@ -2580,53 +2614,57 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (rdev->num_crtc >= 4) {
+ rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
-
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
-
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
-
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
-
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
-
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->num_crtc >= 4) {
+ if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+ }
+
+ if (rdev->num_crtc >= 6) {
+ if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+ }
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
@@ -2721,6 +2759,9 @@ int evergreen_irq_process(struct radeon_device *rdev)
return IRQ_NONE;
}
restart_ih:
+ /* Order reading of wptr vs. reading of IH ring data */
+ rmb();
+
/* display interrupts */
evergreen_irq_ack(rdev);
@@ -3234,6 +3275,7 @@ void evergreen_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 57f3bc17b87..2eb251858e7 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -252,7 +252,7 @@ draw_auto(struct radeon_device *rdev)
}
-/* emits 36 */
+/* emits 39 */
static void
set_default_state(struct radeon_device *rdev)
{
@@ -531,6 +531,11 @@ set_default_state(struct radeon_device *rdev)
radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, 0);
+ /* setup LDS */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, 0x10001000);
+
/* SQ config */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
@@ -773,7 +778,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
- ring_size += 52; /* shaders + def state */
+ ring_size += 55; /* shaders + def state */
ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
ring_size += 10; /* fence emit for done copy */
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 23d36417158..a134790903d 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -428,7 +428,7 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
i = (reg >> 7);
- if (i > last_reg) {
+ if (i >= last_reg) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
@@ -856,7 +856,6 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
case SQ_PGM_START_PS:
case SQ_PGM_START_HS:
case SQ_PGM_START_LS:
- case GDS_ADDR_BASE:
case SQ_CONST_MEM_BASE:
case SQ_ALU_CONST_CACHE_GS_0:
case SQ_ALU_CONST_CACHE_GS_1:
@@ -946,6 +945,34 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
+ case SX_MEMORY_EXPORT_BASE:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ case CAYMAN_SX_SCATTER_EXPORT_BASE:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
default:
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
@@ -1153,6 +1180,34 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return r;
}
break;
+ case PACKET3_DISPATCH_DIRECT:
+ if (pkt->count != 3) {
+ DRM_ERROR("bad DISPATCH_DIRECT\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
+ break;
+ case PACKET3_DISPATCH_INDIRECT:
+ if (pkt->count != 1) {
+ DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ return -EINVAL;
+ }
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ return -EINVAL;
+ }
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ r = evergreen_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
+ break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
DRM_ERROR("bad WAIT_REG_MEM\n");
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 1636e344982..7363d9dec90 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -351,6 +351,7 @@
#define COLOR_BUFFER_SIZE(x) ((x) << 0)
#define POSITION_BUFFER_SIZE(x) ((x) << 8)
#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_MEMORY_EXPORT_BASE 0x9010
#define SX_MISC 0x28350
#define CB_PERF_CTR0_SEL_0 0x9A20
@@ -466,7 +467,7 @@
#define IH_RB_WPTR_ADDR_LO 0x3e14
#define IH_CNTL 0x3e18
# define ENABLE_INTR (1 << 0)
-# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP(x) ((x) << 1)
# define IH_MC_SWAP_NONE 0
# define IH_MC_SWAP_16BIT 1
# define IH_MC_SWAP_32BIT 2
@@ -547,7 +548,7 @@
# define LB_D5_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD5_INTERRUPT (1 << 17)
# define DC_HPD5_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150
# define LB_D6_VLINE_INTERRUPT (1 << 2)
# define LB_D6_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD6_INTERRUPT (1 << 17)
@@ -1122,6 +1123,7 @@
#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0
#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7
+#define CAYMAN_SX_SCATTER_EXPORT_BASE 0x28358
/* cayman packet3 addition */
#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 16caafeadf5..44c4750f451 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -833,6 +833,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+ rdev->config.cayman.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -1581,6 +1582,7 @@ void cayman_fini(struct radeon_device *rdev)
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 9736746da2d..4672869cdb2 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -320,7 +320,7 @@
#define CGTS_USER_TCC_DISABLE 0x914C
#define TCC_DISABLE_MASK 0xFFFF0000
#define TCC_DISABLE_SHIFT 16
-#define CGTS_SM_CTRL_REG 0x915C
+#define CGTS_SM_CTRL_REG 0x9150
#define OVERRIDE (1 << 21)
#define TA_CNTL_AUX 0x9508
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f79d2ccb675..aa5571b73aa 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1662,6 +1662,7 @@ void r600_gpu_init(struct radeon_device *rdev)
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
rdev->config.r600.tile_config = tiling_config;
+ rdev->config.r600.backend_map = backend_map;
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
@@ -2212,9 +2213,6 @@ int r600_cp_resume(struct radeon_device *rdev)
/* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
-#ifdef __BIG_ENDIAN
- RB_RPTR_SWAP(2) |
-#endif
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
@@ -2628,6 +2626,7 @@ void r600_fini(struct radeon_device *rdev)
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
@@ -2993,10 +2992,6 @@ int r600_irq_init(struct radeon_device *rdev)
/* RPTR_REARM only works if msi's are enabled */
if (rdev->msi_enabled)
ih_cntl |= RPTR_REARM;
-
-#ifdef __BIG_ENDIAN
- ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
-#endif
WREG32(IH_CNTL, ih_cntl);
/* force the active interrupt state to all disabled */
@@ -3307,6 +3302,10 @@ int r600_irq_process(struct radeon_device *rdev)
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
+ /* No MSIs, need a dummy read to flush PCI DMAs */
+ if (!rdev->msi_enabled)
+ RREG32(IH_RB_WPTR);
+
wptr = r600_get_ih_wptr(rdev);
rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
@@ -3319,6 +3318,9 @@ int r600_irq_process(struct radeon_device *rdev)
}
restart_ih:
+ /* Order reading of wptr vs. reading of IH ring data */
+ rmb();
+
/* display interrupts */
r600_irq_ack(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index c3ab959bdc7..45fd592f960 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1802,8 +1802,8 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
- RADEON_BUF_SWAP_32BIT |
- RADEON_RB_NO_UPDATE |
+ R600_BUF_SWAP_32BIT |
+ R600_RB_NO_UPDATE |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
@@ -1820,15 +1820,15 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
- RADEON_BUF_SWAP_32BIT |
- RADEON_RB_NO_UPDATE |
- RADEON_RB_RPTR_WR_ENA |
+ R600_BUF_SWAP_32BIT |
+ R600_RB_NO_UPDATE |
+ R600_RB_RPTR_WR_ENA |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#else
RADEON_WRITE(R600_CP_RB_CNTL,
- RADEON_RB_NO_UPDATE |
- RADEON_RB_RPTR_WR_ENA |
+ R600_RB_NO_UPDATE |
+ R600_RB_RPTR_WR_ENA |
(dev_priv->ring.rptr_update_l2qw << 8) |
dev_priv->ring.size_l2qw);
#endif
@@ -1851,13 +1851,8 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
- ((unsigned long) dev->sg->virtual)
+ dev_priv->gart_vm_start;
}
- RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
-#ifdef __BIG_ENDIAN
- (2 << 0) |
-#endif
- (rptr_addr & 0xfffffffc));
- RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
- upper_32_bits(rptr_addr));
+ RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc));
+ RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr));
#ifdef __BIG_ENDIAN
RADEON_WRITE(R600_CP_RB_CNTL,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 909bda8dd55..cf83aa05a68 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -915,12 +915,11 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
{
struct r600_cs_track *track = (struct r600_cs_track *)p->track;
struct radeon_cs_reloc *reloc;
- u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
u32 m, i, tmp, *ib;
int r;
i = (reg >> 7);
- if (i > last_reg) {
+ if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
@@ -1200,6 +1199,15 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
+ case SX_MEMORY_EXPORT_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
default:
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index f140a0d5cb5..0245ae6c204 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -536,7 +536,7 @@
#define IH_RB_WPTR_ADDR_LO 0x3e14
#define IH_CNTL 0x3e18
# define ENABLE_INTR (1 << 0)
-# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP(x) ((x) << 1)
# define IH_MC_SWAP_NONE 0
# define IH_MC_SWAP_16BIT 1
# define IH_MC_SWAP_32BIT 2
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 27f45579e64..32807baf55e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -60,7 +60,7 @@
* are considered as fatal)
*/
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
@@ -179,6 +179,7 @@ void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
+int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
@@ -1002,6 +1003,7 @@ struct r600_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
+ unsigned backend_map;
struct r100_gpu_lockup lockup;
};
@@ -1027,6 +1029,7 @@ struct rv770_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
+ unsigned backend_map;
struct r100_gpu_lockup lockup;
};
@@ -1053,6 +1056,7 @@ struct evergreen_asic {
unsigned tiling_npipes;
unsigned tiling_group_size;
unsigned tile_config;
+ unsigned backend_map;
struct r100_gpu_lockup lockup;
};
@@ -1173,7 +1177,7 @@ struct radeon_device {
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
- void *rmmio;
+ void __iomem *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
radeon_rreg_t pll_rreg;
@@ -1250,20 +1254,20 @@ int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
if (reg < rdev->rmmio_size)
- return readl(((void __iomem *)rdev->rmmio) + reg);
+ return readl((rdev->rmmio) + reg);
else {
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
+ return readl((rdev->rmmio) + RADEON_MM_DATA);
}
}
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
if (reg < rdev->rmmio_size)
- writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ writel(v, (rdev->rmmio) + reg);
else {
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ writel(reg, (rdev->rmmio) + RADEON_MM_INDEX);
+ writel(v, (rdev->rmmio) + RADEON_MM_DATA);
}
}
@@ -1295,10 +1299,10 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
/*
* Registers read & write functions.
*/
-#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
-#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
-#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
-#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
+#define RREG8(reg) readb((rdev->rmmio) + (reg))
+#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
+#define RREG16(reg) readw((rdev->rmmio) + (reg))
+#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
#define RREG32(reg) r100_mm_rreg(rdev, (reg))
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index b2449629537..df8218bb83a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -625,7 +625,7 @@ static struct radeon_asic r600_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
- .copy_dma = &r600_copy_blit,
+ .copy_dma = NULL,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -672,7 +672,7 @@ static struct radeon_asic rs780_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
- .copy_dma = &r600_copy_blit,
+ .copy_dma = NULL,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -719,7 +719,7 @@ static struct radeon_asic rv770_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
- .copy_dma = &r600_copy_blit,
+ .copy_dma = NULL,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -766,7 +766,7 @@ static struct radeon_asic evergreen_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
- .copy_dma = &evergreen_copy_blit,
+ .copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -813,7 +813,7 @@ static struct radeon_asic sumo_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
- .copy_dma = &evergreen_copy_blit,
+ .copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -860,7 +860,7 @@ static struct radeon_asic btc_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
- .copy_dma = &evergreen_copy_blit,
+ .copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
@@ -907,7 +907,7 @@ static struct radeon_asic cayman_asic = {
.fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
- .copy_dma = &evergreen_copy_blit,
+ .copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 1e725d9f767..bf2b61584cd 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2320,6 +2320,14 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
le16_to_cpu(clock_info->r600.usVDDC);
}
+ /* patch up vddc if necessary */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) {
+ u16 vddc;
+
+ if (radeon_atom_get_max_vddc(rdev, &vddc) == 0)
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
+ }
+
if (rdev->flags & RADEON_IS_IGP) {
/* skip invalid modes */
if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
@@ -2630,7 +2638,35 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+int radeon_atom_get_max_vddc(struct radeon_device *rdev,
+ u16 *voltage)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+
+ switch (crev) {
+ case 1:
+ return -EINVAL;
+ case 2:
+ args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
+ args.v2.ucVoltageMode = 0;
+ args.v2.usVoltageLevel = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.v2.usVoltageLevel);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+ return 0;
+}
void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 1aba85cad1a..229a20f10e2 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -104,7 +104,7 @@ static bool radeon_read_bios(struct radeon_device *rdev)
static bool radeon_atrm_get_bios(struct radeon_device *rdev)
{
int ret;
- int size = 64 * 1024;
+ int size = 256 * 1024;
int i;
if (!radeon_atrm_supported(rdev->pdev))
@@ -331,7 +331,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(RV370_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -350,7 +350,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
@@ -367,7 +367,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
/* restore regs */
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(RV370_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -390,7 +390,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ if (rdev->flags & RADEON_IS_PCIE)
+ bus_cntl = RREG32(RV370_BUS_CNTL);
+ else
+ bus_cntl = RREG32(RADEON_BUS_CNTL);
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
crtc2_gen_cntl = 0;
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
@@ -412,7 +415,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ if (rdev->flags & RADEON_IS_PCIE)
+ WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+ else
+ WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
/* Turn off mem requests and CRTC for both controllers */
WREG32(RADEON_CRTC_GEN_CNTL,
@@ -439,7 +445,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
/* restore regs */
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ if (rdev->flags & RADEON_IS_PCIE)
+ WREG32(RV370_BUS_CNTL, bus_cntl);
+ else
+ WREG32(RADEON_BUS_CNTL, bus_cntl);
WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 2d48e7a1474..dcd0863e31a 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -96,7 +96,7 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
* Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
* tree. Hopefully, ATI OF driver is kind enough to fill these
*/
-static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
+static bool radeon_read_clocks_OF(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct device_node *dp = rdev->pdev->dev.of_node;
@@ -166,7 +166,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
return true;
}
#else
-static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
+static bool radeon_read_clocks_OF(struct drm_device *dev)
{
return false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e4594676a07..e0138b674ac 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -779,7 +779,8 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
}
}
}
- } else if (rdev->family >= CHIP_R200) {
+ } else if ((rdev->family == CHIP_R200) ||
+ (rdev->family >= CHIP_R300)) {
/* 0x68 */
i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
@@ -2556,6 +2557,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
u16 offset, misc, misc2 = 0;
u8 rev, blocks, tmp;
int state_index = 0;
+ struct radeon_i2c_bus_rec i2c_bus;
rdev->pm.default_power_state_index = -1;
@@ -2574,7 +2576,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
if (offset) {
u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0;
- struct radeon_i2c_bus_rec i2c_bus;
rev = RBIOS8(offset);
@@ -2616,6 +2617,25 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
+ } else {
+ /* boards with a thermal chip, but no overdrive table */
+
+ /* Asus 9600xt has an f75375 on the monid bus */
+ if ((dev->pdev->device == 0x4152) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0xc002)) {
+ i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ if (rdev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = "f75375";
+ info.addr = 0x28;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+ name, info.addr);
+ }
+ }
}
if (rdev->flags & RADEON_IS_MOBILITY) {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cbfca3a24fd..4f0c1ecac72 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -52,20 +52,28 @@ void radeon_connector_hotplug(struct drm_connector *connector)
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ /* bail if the connector does not have hpd pin, e.g.,
+ * VGA, TV, etc.
+ */
+ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
+ return;
+
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
- /* powering up/down the eDP panel generates hpd events which
- * can interfere with modesetting.
- */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ /* if the connector is already off, don't turn it back on */
+ if (connector->dpms != DRM_MODE_DPMS_ON)
return;
- /* pre-r600 did not always have the hpd pins mapped accurately to connectors */
- if (rdev->family >= CHIP_R600) {
- if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+ /* just deal with DP (not eDP) here. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ int saved_dpms = connector->dpms;
+
+ if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
+ radeon_dp_needs_link_train(radeon_connector))
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
else
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ connector->dpms = saved_dpms;
}
}
@@ -424,16 +432,73 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
return 0;
}
+/*
+ * Some integrated ATI Radeon chipset implementations (e. g.
+ * Asus M2A-VM HDMI) may indicate the availability of a DDC,
+ * even when there's no monitor connected. For these connectors
+ * following DDC probe extension will be applied: check also for the
+ * availability of EDID with at least a correct EDID header. Only then,
+ * DDC is assumed to be available. This prevents drm_get_edid() and
+ * drm_edid_block_valid() from periodically dumping data and kernel
+ * errors into the logs and onto the terminal.
+ */
+static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
+ uint32_t supported_device,
+ int connector_type)
+{
+ /* Asus M2A-VM HDMI board sends data to i2c bus even,
+ * if HDMI add-on card is not plugged in or HDMI is disabled in
+ * BIOS. Valid DDC can only be assumed, if also a valid EDID header
+ * can be retrieved via i2c bus during DDC probe */
+ if ((dev->pdev->device == 0x791e) &&
+ (dev->pdev->subsystem_vendor == 0x1043) &&
+ (dev->pdev->subsystem_device == 0x826d)) {
+ if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return true;
+ }
+ /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus
+ * for a DVI connector that is not implemented */
+ if ((dev->pdev->device == 0x796e) &&
+ (dev->pdev->subsystem_vendor == 0x1019) &&
+ (dev->pdev->subsystem_device == 0x2615)) {
+ if ((connector_type == DRM_MODE_CONNECTOR_DVID) &&
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return true;
+ }
+ /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100
+ * (RS690M) sends data to i2c bus for a HDMI connector that
+ * is not implemented */
+ if ((dev->pdev->device == 0x791f) &&
+ (dev->pdev->subsystem_vendor == 0x1179) &&
+ (dev->pdev->subsystem_device == 0xff68)) {
+ if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
+ return true;
+ }
+
+ /* Default: no EDID header probe required for DDC probing */
+ return false;
+}
+
static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+ struct drm_display_mode *t, *mode;
+
+ /* If the EDID preferred mode doesn't match the native mode, use it */
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+ if (mode->type & DRM_MODE_TYPE_PREFERRED) {
+ if (mode->hdisplay != native_mode->hdisplay ||
+ mode->vdisplay != native_mode->vdisplay)
+ memcpy(native_mode, mode, sizeof(*mode));
+ }
+ }
/* Try to get native mode details from EDID if necessary */
if (!native_mode->clock) {
- struct drm_display_mode *t, *mode;
-
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->hdisplay == native_mode->hdisplay &&
mode->vdisplay == native_mode->vdisplay) {
@@ -444,6 +509,7 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
}
}
}
+
if (!native_mode->clock) {
DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
radeon_encoder->rmx_type = RMX_OFF;
@@ -655,7 +721,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -827,7 +894,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
bool dret = false;
if (radeon_connector->ddc_bus)
- dret = radeon_ddc_probe(radeon_connector);
+ dret = radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe);
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -1245,7 +1313,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
- if (radeon_ddc_probe(radeon_connector))
+ if (radeon_ddc_probe(radeon_connector,
+ radeon_connector->requires_extended_probe))
ret = connector_status_connected;
}
}
@@ -1400,6 +1469,9 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
+ radeon_connector->requires_extended_probe =
+ radeon_connector_needs_extended_probe(rdev, supported_device,
+ connector_type);
radeon_connector->router = *router;
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
@@ -1746,6 +1818,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->devices = supported_device;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
+ radeon_connector->requires_extended_probe =
+ radeon_connector_needs_extended_probe(rdev, supported_device,
+ connector_type);
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 75867792a4e..045ec59478f 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2115,7 +2115,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
if (drm_pci_device_is_agp(dev))
dev_priv->flags |= RADEON_IS_AGP;
- else if (drm_pci_device_is_pcie(dev))
+ else if (pci_is_pcie(dev->pdev))
dev_priv->flags |= RADEON_IS_PCIE;
else
dev_priv->flags |= RADEON_IS_PCI;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7cfaa7e2f3b..b51e15725c6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -32,6 +32,7 @@
#include <drm/radeon_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
+#include <linux/efi.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
@@ -300,6 +301,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
+ mc->real_vram_size = radeon_vram_limit;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
@@ -348,6 +351,9 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
+ if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+ return false;
+
/* first check CRTCs */
if (ASIC_IS_DCE41(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
@@ -704,8 +710,9 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->gpu_lockup = false;
rdev->accel_working = false;
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
- radeon_family_name[rdev->family], pdev->vendor, pdev->device);
+ DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
+ radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
/* mutex initialization are all done here so we
* can recall function without having locking issues */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 292f73f0ddb..1a858944e4f 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -282,7 +282,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
work = radeon_crtc->unpin_work;
if (work == NULL ||
- !radeon_fence_signaled(work->fence)) {
+ (work->fence && !radeon_fence_signaled(work->fence))) {
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
@@ -348,7 +348,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct radeon_framebuffer *new_radeon_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
- struct radeon_fence *fence;
struct radeon_unpin_work *work;
unsigned long flags;
u32 tiling_flags, pitch_pixels;
@@ -359,16 +358,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
if (work == NULL)
return -ENOMEM;
- r = radeon_fence_create(rdev, &fence);
- if (unlikely(r != 0)) {
- kfree(work);
- DRM_ERROR("flip queue: failed to create fence.\n");
- return -ENOMEM;
- }
work->event = event;
work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
- work->fence = radeon_fence_ref(fence);
old_radeon_fb = to_radeon_framebuffer(crtc->fb);
new_radeon_fb = to_radeon_framebuffer(fb);
/* schedule unpin of the old buffer */
@@ -377,6 +369,10 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
drm_gem_object_reference(obj);
rbo = gem_to_radeon_bo(obj);
work->old_rbo = rbo;
+ obj = new_radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
+ if (rbo->tbo.sync_obj)
+ work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
INIT_WORK(&work->work, radeon_unpin_work_func);
/* We borrow the event spin lock for protecting unpin_work */
@@ -391,9 +387,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
/* pin the new buffer */
- obj = new_radeon_fb->obj;
- rbo = gem_to_radeon_bo(obj);
-
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
work->old_rbo, rbo);
@@ -461,37 +454,18 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
goto pflip_cleanup1;
}
- /* 32 ought to cover us */
- r = radeon_ring_lock(rdev, 32);
- if (r) {
- DRM_ERROR("failed to lock the ring before flip\n");
- goto pflip_cleanup2;
- }
-
- /* emit the fence */
- radeon_fence_emit(rdev, fence);
/* set the proper interrupt */
radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
- /* fire the ring */
- radeon_ring_unlock_commit(rdev);
return 0;
-pflip_cleanup2:
- drm_vblank_put(dev, radeon_crtc->crtc_id);
-
pflip_cleanup1:
- r = radeon_bo_reserve(rbo, false);
- if (unlikely(r != 0)) {
+ if (unlikely(radeon_bo_reserve(rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
goto pflip_cleanup;
}
- r = radeon_bo_unpin(rbo);
- if (unlikely(r != 0)) {
- radeon_bo_unreserve(rbo);
- r = -EINVAL;
+ if (unlikely(radeon_bo_unpin(rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
- goto pflip_cleanup;
}
radeon_bo_unreserve(rbo);
@@ -501,7 +475,7 @@ pflip_cleanup:
unlock_free:
drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
spin_unlock_irqrestore(&dev->event_lock, flags);
- radeon_fence_unref(&fence);
+ radeon_fence_unref(&work->fence);
kfree(work);
return r;
@@ -777,8 +751,17 @@ static int radeon_ddc_dump(struct drm_connector *connector)
if (!radeon_connector->ddc_bus)
return -1;
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
+ /* Log EDID retrieval status here. In particular with regard to
+ * connectors with requires_extended_probe flag set, that will prevent
+ * function radeon_dvi_detect() to fetch EDID on this connector,
+ * as long as there is no valid EDID header found */
if (edid) {
+ DRM_INFO("Radeon display connector %s: Found valid EDID",
+ drm_get_connector_name(connector));
kfree(edid);
+ } else {
+ DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID",
+ drm_get_connector_name(connector));
}
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 73dfbe8e5f9..e71d2ed7fa1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -51,9 +51,10 @@
* 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
* 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
* 2.10.0 - fusion 2D tiling
+ * 2.11.0 - backend map, initial compute support for the CS checker
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 10
+#define KMS_DRIVER_MINOR 11
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b293487e5aa..319d85d7e75 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -2323,6 +2323,9 @@ radeon_add_atom_encoder(struct drm_device *dev,
default:
encoder->possible_crtcs = 0x3;
break;
+ case 4:
+ encoder->possible_crtcs = 0xf;
+ break;
case 6:
encoder->possible_crtcs = 0x3f;
break;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 021d2b6b556..7fd4e3e5ad5 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -29,7 +29,7 @@
* Dave Airlie
*/
#include <linux/seq_file.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 781196db792..6c111c1fa3f 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -32,17 +32,17 @@
* radeon_ddc_probe
*
*/
-bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe)
{
- u8 out_buf[] = { 0x0, 0x0};
- u8 buf[2];
+ u8 out = 0x0;
+ u8 buf[8];
int ret;
struct i2c_msg msgs[] = {
{
.addr = 0x50,
.flags = 0,
.len = 1,
- .buf = out_buf,
+ .buf = &out,
},
{
.addr = 0x50,
@@ -52,15 +52,31 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
}
};
+ /* Read 8 bytes from i2c for extended probe of EDID header */
+ if (requires_extended_probe)
+ msgs[1].len = 8;
+
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
- if (ret == 2)
- return true;
-
- return false;
+ if (ret != 2)
+ /* Couldn't find an accessible DDC on this connector */
+ return false;
+ if (requires_extended_probe) {
+ /* Probe also for valid EDID header
+ * EDID header starts with:
+ * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+ * Only the first 6 bytes must be valid as
+ * drm_edid_block_valid() can fix the last 2 bytes */
+ if (drm_edid_header_is_valid(buf) < 6) {
+ /* Couldn't find an accessible EDID on this
+ * connector */
+ return false;
+ }
+ }
+ return true;
}
/* bit banging i2c */
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index bd58af65858..be2c1224e68 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -60,7 +60,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
/* update BUS flag */
if (drm_pci_device_is_agp(dev)) {
flags |= RADEON_IS_AGP;
- } else if (drm_pci_device_is_pcie(dev)) {
+ } else if (pci_is_pcie(dev->pdev)) {
flags |= RADEON_IS_PCIE;
} else {
flags |= RADEON_IS_PCI;
@@ -237,6 +237,19 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_FUSION_GART_WORKING:
value = 1;
break;
+ case RADEON_INFO_BACKEND_MAP:
+ if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.backend_map;
+ else if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.backend_map;
+ else if (rdev->family >= CHIP_RV770)
+ value = rdev->config.rv770.backend_map;
+ else if (rdev->family >= CHIP_R600)
+ value = rdev->config.r600.backend_map;
+ else {
+ return -EINVAL;
+ }
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index ed95155c4b1..988548efea9 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -139,7 +139,7 @@ static int init_heap(struct mem_block **heap, int start, int size)
if (!blocks)
return -ENOMEM;
- *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
+ *heap = kzalloc(sizeof(**heap), GFP_KERNEL);
if (!*heap) {
kfree(blocks);
return -ENOMEM;
@@ -150,7 +150,6 @@ static int init_heap(struct mem_block **heap, int start, int size)
blocks->file_priv = NULL;
blocks->next = blocks->prev = *heap;
- memset(*heap, 0, sizeof(**heap));
(*heap)->file_priv = (struct drm_file *) - 1;
(*heap)->next = (*heap)->prev = blocks;
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6df4e3cec0c..68820f5f630 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -438,6 +438,9 @@ struct radeon_connector {
struct radeon_i2c_chan *ddc_bus;
/* some systems have an hdmi and vga port with a shared ddc line */
bool shared_ddc;
+ /* for some Radeon chip families we apply an additional EDID header
+ check as part of the DDC probe */
+ bool requires_extended_probe;
bool use_digital;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
@@ -476,6 +479,7 @@ extern void radeon_dp_set_link_config(struct drm_connector *connector,
struct drm_display_mode *mode);
extern void radeon_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector);
+extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
@@ -514,7 +518,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector,
+ bool requires_extended_probe);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index aaa19dc418a..6fabe89fa6a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -594,6 +594,9 @@ int radeon_pm_init(struct radeon_device *rdev)
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
+ if (rdev->pm.default_vddci)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+ SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index ec93a75369e..b4ce8645570 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -300,6 +300,8 @@
# define RADEON_BUS_READ_BURST (1 << 30)
#define RADEON_BUS_CNTL1 0x0034
# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
+#define RV370_BUS_CNTL 0x004c
+# define RV370_BUS_BIOS_DIS_ROM (1 << 2)
/* rv370/rv380, rv410, r423/r430/r480, r5xx */
#define RADEON_MSI_REARM_EN 0x0160
# define RV370_MSI_REARM_EN (1 << 0)
@@ -3293,7 +3295,7 @@
# define RADEON_RB_BUFSZ_MASK (0x3f << 0)
# define RADEON_RB_BLKSZ_SHIFT 8
# define RADEON_RB_BLKSZ_MASK (0x3f << 8)
-# define RADEON_BUF_SWAP_32BIT (1 << 17)
+# define RADEON_BUF_SWAP_32BIT (2 << 16)
# define RADEON_MAX_FETCH_SHIFT 18
# define RADEON_MAX_FETCH_MASK (0x3 << 18)
# define RADEON_RB_NO_UPDATE (1 << 27)
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index dee4a0c1b4b..602fa3541c4 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -40,10 +40,14 @@ void radeon_test_moves(struct radeon_device *rdev)
size = 1024 * 1024;
/* Number of tests =
- * (Total GTT - IB pool - writeback page - ring buffer) / test size
+ * (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
- rdev->cp.ring_size)) / size;
+ n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
+ if (rdev->wb.wb_obj)
+ n -= RADEON_GPU_PAGE_SIZE;
+ if (rdev->ih.ring_obj)
+ n -= rdev->ih.ring_size;
+ n /= size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
if (!gtt_obj) {
@@ -132,9 +136,15 @@ void radeon_test_moves(struct radeon_device *rdev)
gtt_start++, vram_start++) {
if (*vram_start != gtt_start) {
DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
- "expected 0x%p (GTT map 0x%p-0x%p)\n",
- i, *vram_start, gtt_start, gtt_map,
- gtt_end);
+ "expected 0x%p (GTT/VRAM offset "
+ "0x%16llx/0x%16llx)\n",
+ i, *vram_start, gtt_start,
+ (unsigned long long)
+ (gtt_addr - rdev->mc.gtt_start +
+ (void*)gtt_start - gtt_map),
+ (unsigned long long)
+ (vram_addr - rdev->mc.vram_start +
+ (void*)gtt_start - gtt_map));
radeon_bo_kunmap(vram_obj);
goto out_cleanup;
}
@@ -175,9 +185,15 @@ void radeon_test_moves(struct radeon_device *rdev)
gtt_start++, vram_start++) {
if (*gtt_start != vram_start) {
DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
- "expected 0x%p (VRAM map 0x%p-0x%p)\n",
- i, *gtt_start, vram_start, vram_map,
- vram_end);
+ "expected 0x%p (VRAM/GTT offset "
+ "0x%16llx/0x%16llx)\n",
+ i, *gtt_start, vram_start,
+ (unsigned long long)
+ (vram_addr - rdev->mc.vram_start +
+ (void*)vram_start - vram_map),
+ (unsigned long long)
+ (gtt_addr - rdev->mc.gtt_start +
+ (void*)vram_start - vram_map));
radeon_bo_kunmap(gtt_obj[i]);
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 60125ddba1e..9b86fb0e412 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -450,6 +450,29 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
return -EINVAL;
mem->bus.base = rdev->mc.aper_base;
mem->bus.is_iomem = true;
+#ifdef __alpha__
+ /*
+ * Alpha: use bus.addr to hold the ioremap() return,
+ * so we can modify bus.base below.
+ */
+ if (mem->placement & TTM_PL_FLAG_WC)
+ mem->bus.addr =
+ ioremap_wc(mem->bus.base + mem->bus.offset,
+ mem->bus.size);
+ else
+ mem->bus.addr =
+ ioremap_nocache(mem->bus.base + mem->bus.offset,
+ mem->bus.size);
+
+ /*
+ * Alpha: Use just the bus offset plus
+ * the hose/domain memory base for bus.base.
+ * It then can be used to build PTEs for VRAM
+ * access, as done in ttm_bo_vm_fault().
+ */
+ mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
+ rdev->ddev->hose->dense_mem_base;
+#endif
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
index 0aa8e85a945..2316977eb92 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -208,6 +208,7 @@ cayman 0x9400
0x0002834C PA_SC_VPORT_ZMAX_15
0x00028350 SX_MISC
0x00028354 SX_SURFACE_SYNC
+0x0002835C SX_SCATTER_EXPORT_SIZE
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
0x00028388 SQ_VTX_SEMANTIC_2
@@ -432,6 +433,7 @@ cayman 0x9400
0x00028700 SPI_STACK_MGMT
0x00028704 SPI_WAVE_MGMT_1
0x00028708 SPI_WAVE_MGMT_2
+0x00028720 GDS_ADDR_BASE
0x00028724 GDS_ADDR_SIZE
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 0e28cae7ea4..161737a28c2 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -44,6 +44,7 @@ evergreen 0x9400
0x00008E28 SQ_STATIC_THREAD_MGMT_3
0x00008E2C SQ_LDS_RESOURCE_MGMT
0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009014 SX_MEMORY_EXPORT_SIZE
0x00009100 SPI_CONFIG_CNTL
0x0000913C SPI_CONFIG_CNTL_1
0x00009508 TA_CNTL_AUX
@@ -442,7 +443,9 @@ evergreen 0x9400
0x000286EC SPI_COMPUTE_NUM_THREAD_X
0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x00028720 GDS_ADDR_BASE
0x00028724 GDS_ADDR_SIZE
+0x00028728 GDS_ORDERED_WAVE_PER_SE
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
0x00028788 CB_BLEND2_CONTROL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index ea49752ee99..0380c5c15f8 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -429,6 +429,7 @@ r600 0x9400
0x00028438 SX_ALPHA_REF
0x00028410 SX_ALPHA_TEST_CONTROL
0x00028350 SX_MISC
+0x00009014 SX_MEMORY_EXPORT_SIZE
0x00009604 TC_INVALIDATE
0x00009400 TD_FILTER4
0x00009404 TD_FILTER4_1
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6e3b11e5abb..4b5d0e6974a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -426,7 +426,7 @@ int rs600_gart_init(struct radeon_device *rdev)
return radeon_gart_table_vram_alloc(rdev);
}
-int rs600_gart_enable(struct radeon_device *rdev)
+static int rs600_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
@@ -440,8 +440,8 @@ int rs600_gart_enable(struct radeon_device *rdev)
return r;
radeon_gart_restore(rdev);
/* Enable bus master */
- tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
- WREG32(R_00004C_BUS_CNTL, tmp);
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
/* FIXME: setup default page */
WREG32_MC(R_000100_MC_PT0_CNTL,
(S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
@@ -530,7 +530,7 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
- writeq(addr, ((void __iomem *)ptr) + (i * 8));
+ writeq(addr, ptr + (i * 8));
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 6f508ffd103..4720d000d44 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -575,6 +575,12 @@ static void rv770_program_channel_remap(struct radeon_device *rdev)
else
tcp_chan_steer = 0x00fac688;
+ /* RV770 CE has special chremap setup */
+ if (rdev->pdev->device == 0x944e) {
+ tcp_chan_steer = 0x00b08b08;
+ mc_shared_chremap = 0x00b08b08;
+ }
+
WREG32(TCP_CHAN_STEER, tcp_chan_steer);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
@@ -772,6 +778,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
(cc_rb_backend_disable >> 16));
rdev->config.rv770.tile_config = gb_tiling_config;
+ rdev->config.rv770.backend_map = backend_map;
gb_tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, gb_tiling_config);
@@ -1362,6 +1369,7 @@ void rv770_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rv770_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index ef940bad63f..194303c177a 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -48,8 +48,8 @@ enum sis_family {
#define SIS_BASE (dev_priv->mmio)
-#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg);
-#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val);
+#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg)
+#define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val)
typedef struct drm_sis_private {
drm_local_map_t *mmio;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2e618b5ac46..a4d38d85909 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -37,7 +37,7 @@
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
@@ -353,8 +353,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
ret = ttm_tt_set_user(bo->ttm, current,
bo->buffer_start, bo->num_pages);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
break;
default:
printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
@@ -390,10 +392,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
* Create and bind a ttm if required.
*/
- if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
- ret = ttm_bo_add_ttm(bo, false);
- if (ret)
- goto out_err;
+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
+ if (bo->ttm == NULL) {
+ ret = ttm_bo_add_ttm(bo, false);
+ if (ret)
+ goto out_err;
+ }
ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 77dbf408c0d..ae3c6f5dd2b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- ttm_bo_free_old_node(bo);
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
(bo->ttm != NULL)) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
}
+ ttm_bo_free_old_node(bo);
} else {
/**
* This should help pipeline ordinary buffer moves.
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index de41e55a944..075daf44bce 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -30,7 +30,7 @@
#include "ttm/ttm_lock.h"
#include "ttm/ttm_module.h"
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index ebddd443d91..93577f2e295 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -55,7 +55,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct ttm_object_file {
struct ttm_object_device *tdev;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index d948575717b..727e93daac3 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -40,7 +40,7 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_page_alloc.h"
@@ -355,7 +355,7 @@ restart:
if (nr_free)
goto restart;
- /* Not allowed to fall tough or break because
+ /* Not allowed to fall through or break because
* following context is inside spinlock while we are
* outside here.
*/
@@ -556,7 +556,7 @@ out:
}
/**
- * Fill the given pool if there isn't enough pages and requested number of
+ * Fill the given pool if there aren't enough pages and the requested number of
* pages is small.
*/
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
@@ -576,8 +576,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
pool->fill_lock = true;
- /* If allocation request is small and there is not enough
- * pages in pool we fill the pool first */
+ /* If allocation request is small and there are not enough
+ * pages in a pool we fill the pool up first. */
if (count < _manager->options.small
&& count > pool->npages) {
struct list_head new_pages;
@@ -614,9 +614,9 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
}
/**
- * Cut count nubmer of pages from the pool and put them to return list
+ * Cut 'count' number of pages from the pool and put them on the return list.
*
- * @return count of pages still to allocate to fill the request.
+ * @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages, int ttm_flags,
@@ -637,7 +637,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
goto out;
}
/* find the last pages to include for requested number of pages. Split
- * pool to begin and halves to reduce search space. */
+ * pool to begin and halve it to reduce search space. */
if (count <= pool->npages/2) {
i = 0;
list_for_each(p, &pool->list) {
@@ -651,7 +651,7 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
break;
}
}
- /* Cut count number of pages from pool */
+ /* Cut 'count' number of pages from the pool */
list_cut_position(pages, &pool->list, p);
pool->npages -= count;
count = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 90e23e0bfad..58c271ebc0f 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,6 +31,7 @@
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/swap.h>
#include <linux/slab.h>
@@ -484,7 +485,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
for (i = 0; i < ttm->num_pages; ++i) {
- from_page = read_mapping_page(swap_space, i, NULL);
+ from_page = shmem_read_mapping_page(swap_space, i);
if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page);
goto out_err;
@@ -557,7 +558,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
- to_page = read_mapping_page(swap_space, i, NULL);
+ to_page = shmem_read_mapping_page(swap_space, i);
if (unlikely(IS_ERR(to_page))) {
ret = PTR_ERR(to_page);
goto out_err;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index f1a52f9e729..07ce02da78a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -585,11 +585,10 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
return -ENOSYS;
}
- overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return -ENOMEM;
- memset(overlay, 0, sizeof(*overlay));
mutex_init(&overlay->mutex);
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
overlay->stream[i].buf = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 5408b1b7996..bfe1bcce7f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -612,11 +612,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->sizes[0].height == 64 &&
srf->format == SVGA3D_A8R8G8B8) {
- srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
- /* clear the image */
- if (srf->snooper.image) {
- memset(srf->snooper.image, 0x00, 64 * 64 * 4);
- } else {
+ /* allocate image area and clear it */
+ srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
+ if (!srf->snooper.image) {
DRM_ERROR("Failed to allocate cursor_image\n");
ret = -ENOMEM;
goto out_err1;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 8a1021f2e31..c72f1c0b5e6 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1171,10 +1171,9 @@ static int vga_arb_open(struct inode *inode, struct file *file)
pr_debug("%s\n", __func__);
- priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
- memset(priv, 0, sizeof(*priv));
spin_lock_init(&priv->lock);
file->private_data = priv;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 36ca465c00c..306b15f39c9 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -172,6 +172,20 @@ config HID_EZKEY
---help---
Support for Ezkey BTC 8193 keyboard.
+config HID_HOLTEK
+ tristate "Holtek On Line Grip based game controller support"
+ depends on USB_HID
+ ---help---
+ Say Y here if you have a Holtek On Line Grip based game controller.
+
+config HOLTEK_FF
+ bool "Holtek On Line Grip force feedback support"
+ depends on HID_HOLTEK
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you have a Holtek On Line Grip based game controller
+ and want to have force feedback support for it.
+
config HID_KEYTOUCH
tristate "Keytouch HID devices"
depends on USB_HID
@@ -322,6 +336,7 @@ config HID_MULTITOUCH
- Stantum multitouch panels
- Touch International Panels
- Unitec Panels
+ - XAT optical touch panels
If unsure, say N.
@@ -435,6 +450,7 @@ config HID_QUANTA
config HID_ROCCAT
tristate "Roccat special event support"
depends on USB_HID
+ select HID_ROCCAT_COMMON
---help---
Support for Roccat special events.
Say Y here if you have a Roccat mouse or keyboard and want OSD or
@@ -442,44 +458,40 @@ config HID_ROCCAT
config HID_ROCCAT_COMMON
tristate
+ depends on HID_ROCCAT
config HID_ROCCAT_ARVO
tristate "Roccat Arvo keyboard support"
depends on USB_HID
- select HID_ROCCAT
- select HID_ROCCAT_COMMON
+ depends on HID_ROCCAT
---help---
Support for Roccat Arvo keyboard.
config HID_ROCCAT_KONE
tristate "Roccat Kone Mouse support"
depends on USB_HID
- select HID_ROCCAT
- select HID_ROCCAT_COMMON
+ depends on HID_ROCCAT
---help---
Support for Roccat Kone mouse.
config HID_ROCCAT_KONEPLUS
tristate "Roccat Kone[+] mouse support"
depends on USB_HID
- select HID_ROCCAT
- select HID_ROCCAT_COMMON
+ depends on HID_ROCCAT
---help---
Support for Roccat Kone[+] mouse.
config HID_ROCCAT_KOVAPLUS
tristate "Roccat Kova[+] mouse support"
depends on USB_HID
- select HID_ROCCAT
- select HID_ROCCAT_COMMON
+ depends on HID_ROCCAT
---help---
Support for Roccat Kova[+] mouse.
config HID_ROCCAT_PYRA
tristate "Roccat Pyra mouse support"
depends on USB_HID
- select HID_ROCCAT
- select HID_ROCCAT_COMMON
+ depends on HID_ROCCAT
---help---
Support for Roccat Pyra mouse.
@@ -495,6 +507,12 @@ config HID_SONY
---help---
Support for Sony PS3 controller.
+config HID_SPEEDLINK
+ tristate "Speedlink VAD Cezanne mouse support"
+ depends on USB_HID
+ ---help---
+ Support for Speedlink Vicious and Divine Cezanne mouse.
+
config HID_SUNPLUS
tristate "Sunplus wireless desktop"
depends on USB_HID
@@ -568,6 +586,12 @@ config HID_WACOM_POWER_SUPPLY
Say Y here if you want to enable power supply status monitoring for
Wacom Bluetooth devices.
+config HID_WIIMOTE
+ tristate "Nintendo Wii Remote support"
+ depends on BT_HIDP
+ ---help---
+ Support for the Nintendo Wii Remote bluetooth device.
+
config HID_ZEROPLUS
tristate "Zeroplus based game controller support"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index f8cc4ea7335..0a0a38e9fd2 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
+obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_HID_ROCCAT_PYRA) += hid-roccat-pyra.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
+obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
@@ -73,6 +75,7 @@ obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o
obj-$(CONFIG_HID_WACOM) += hid-wacom.o
obj-$(CONFIG_HID_WALTOP) += hid-waltop.o
+obj-$(CONFIG_HID_WIIMOTE) += hid-wiimote.o
obj-$(CONFIG_USB_HID) += usbhid/
obj-$(CONFIG_USB_MOUSE) += usbhid/
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index b4554288de0..121514149e0 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -154,6 +154,7 @@ static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id)
error = hid_hw_open(hdev);
if (error) {
dev_err(&hdev->dev, "hw open failed\n");
+ hid_hw_stop(hdev);
return error;
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f7440e8ce3e..1a5cf0c9cfc 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1388,6 +1388,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
@@ -1423,12 +1424,15 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
@@ -1490,6 +1494,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0709) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
@@ -1498,11 +1503,14 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ }
};
@@ -1770,7 +1778,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_YUREX) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
@@ -1911,6 +1918,11 @@ static bool hid_ignore(struct hid_device *hdev)
hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
return true;
break;
+ case USB_VENDOR_ID_JESS:
+ if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
+ hdev->type == HID_TYPE_USBNONE)
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index 81877c67cae..a5dc13fe367 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -126,7 +126,12 @@ static int ems_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err;
}
- emsff_init(hdev);
+ ret = emsff_init(hdev);
+ if (ret) {
+ dev_err(&hdev->dev, "force feedback init failed\n");
+ hid_hw_stop(hdev);
+ goto err;
+ }
return 0;
err:
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
new file mode 100644
index 00000000000..91e3a032112
--- /dev/null
+++ b/drivers/hid/hid-holtekff.c
@@ -0,0 +1,240 @@
+/*
+ * Force feedback support for Holtek On Line Grip based gamepads
+ *
+ * These include at least a Brazilian "Clone Joypad Super Power Fire"
+ * which uses vendor ID 0x1241 and identifies as "HOLTEK On Line Grip".
+ *
+ * Copyright (c) 2011 Anssi Hannula <anssi.hannula@iki.fi>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+
+#ifdef CONFIG_HOLTEK_FF
+#include "usbhid/usbhid.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
+
+/*
+ * These commands and parameters are currently known:
+ *
+ * byte 0: command id:
+ * 01 set effect parameters
+ * 02 play specified effect
+ * 03 stop specified effect
+ * 04 stop all effects
+ * 06 stop all effects
+ * (the difference between 04 and 06 isn't known; win driver
+ * sends 06,04 on application init, and 06 otherwise)
+ *
+ * Commands 01 and 02 need to be sent as pairs, i.e. you need to send 01
+ * before each 02.
+ *
+ * The rest of the bytes are parameters. Command 01 takes all of them, and
+ * commands 02,03 take only the effect id.
+ *
+ * byte 1:
+ * bits 0-3: effect id:
+ * 1: very strong rumble
+ * 2: periodic rumble, short intervals
+ * 3: very strong rumble
+ * 4: periodic rumble, long intervals
+ * 5: weak periodic rumble, long intervals
+ * 6: weak periodic rumble, short intervals
+ * 7: periodic rumble, short intervals
+ * 8: strong periodic rumble, short intervals
+ * 9: very strong rumble
+ * a: causes an error
+ * b: very strong periodic rumble, very short intervals
+ * c-f: nothing
+ * bit 6: right (weak) motor enabled
+ * bit 7: left (strong) motor enabled
+ *
+ * bytes 2-3: time in milliseconds, big-endian
+ * bytes 5-6: unknown (win driver seems to use at least 10e0 with effect 1
+ * and 0014 with effect 6)
+ * byte 7:
+ * bits 0-3: effect magnitude
+ */
+
+#define HOLTEKFF_MSG_LENGTH 7
+
+static const u8 start_effect_1[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 stop_all4[] = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 stop_all6[] = { 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+struct holtekff_device {
+ struct hid_field *field;
+};
+
+static void holtekff_send(struct holtekff_device *holtekff,
+ struct hid_device *hid,
+ const u8 data[HOLTEKFF_MSG_LENGTH])
+{
+ int i;
+
+ for (i = 0; i < HOLTEKFF_MSG_LENGTH; i++) {
+ holtekff->field->value[i] = data[i];
+ }
+
+ dbg_hid("sending %02x %02x %02x %02x %02x %02x %02x\n", data[0],
+ data[1], data[2], data[3], data[4], data[5], data[6]);
+
+ usbhid_submit_report(hid, holtekff->field->report, USB_DIR_OUT);
+}
+
+static int holtekff_play(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct holtekff_device *holtekff = data;
+ int left, right;
+ /* effect type 1, length 65535 msec */
+ u8 buf[HOLTEKFF_MSG_LENGTH] =
+ { 0x01, 0x01, 0xff, 0xff, 0x10, 0xe0, 0x00 };
+
+ left = effect->u.rumble.strong_magnitude;
+ right = effect->u.rumble.weak_magnitude;
+ dbg_hid("called with 0x%04x 0x%04x\n", left, right);
+
+ if (!left && !right) {
+ holtekff_send(holtekff, hid, stop_all6);
+ return 0;
+ }
+
+ if (left)
+ buf[1] |= 0x80;
+ if (right)
+ buf[1] |= 0x40;
+
+ /* The device takes a single magnitude, so we just sum them up. */
+ buf[6] = min(0xf, (left >> 12) + (right >> 12));
+
+ holtekff_send(holtekff, hid, buf);
+ holtekff_send(holtekff, hid, start_effect_1);
+
+ return 0;
+}
+
+static int holtekff_init(struct hid_device *hid)
+{
+ struct holtekff_device *holtekff;
+ struct hid_report *report;
+ struct hid_input *hidinput = list_entry(hid->inputs.next,
+ struct hid_input, list);
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+ int error;
+
+ if (list_empty(report_list)) {
+ hid_err(hid, "no output report found\n");
+ return -ENODEV;
+ }
+
+ report = list_entry(report_list->next, struct hid_report, list);
+
+ if (report->maxfield < 1 || report->field[0]->report_count != 7) {
+ hid_err(hid, "unexpected output report layout\n");
+ return -ENODEV;
+ }
+
+ holtekff = kzalloc(sizeof(*holtekff), GFP_KERNEL);
+ if (!holtekff)
+ return -ENOMEM;
+
+ set_bit(FF_RUMBLE, dev->ffbit);
+
+ holtekff->field = report->field[0];
+
+ /* initialize the same way as win driver does */
+ holtekff_send(holtekff, hid, stop_all4);
+ holtekff_send(holtekff, hid, stop_all6);
+
+ error = input_ff_create_memless(dev, holtekff, holtekff_play);
+ if (error) {
+ kfree(holtekff);
+ return error;
+ }
+
+ hid_info(hid, "Force feedback for Holtek On Line Grip based devices by Anssi Hannula <anssi.hannula@iki.fi>\n");
+
+ return 0;
+}
+#else
+static inline int holtekff_init(struct hid_device *hid)
+{
+ return 0;
+}
+#endif
+
+static int holtek_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ goto err;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ goto err;
+ }
+
+ holtekff_init(hdev);
+
+ return 0;
+err:
+ return ret;
+}
+
+static const struct hid_device_id holtek_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, holtek_devices);
+
+static struct hid_driver holtek_driver = {
+ .name = "holtek",
+ .id_table = holtek_devices,
+ .probe = holtek_probe,
+};
+
+static int __init holtek_init(void)
+{
+ return hid_register_driver(&holtek_driver);
+}
+
+static void __exit holtek_exit(void)
+{
+ hid_unregister_driver(&holtek_driver);
+}
+
+module_init(holtek_init);
+module_exit(holtek_exit);
+
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index aecb5a4b8d6..db63ccf21cc 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -350,6 +350,9 @@
#define USB_VENDOR_ID_ILITEK 0x222a
#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
+#define USB_VENDOR_ID_HOLTEK 0x1241
+#define USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP 0x5015
+
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
@@ -449,6 +452,7 @@
#define USB_VENDOR_ID_LUMIO 0x202e
#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
+#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
@@ -471,6 +475,8 @@
#define USB_DEVICE_ID_MS_LK6K 0x00f9
#define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701
#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
+#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -494,6 +500,9 @@
#define USB_VENDOR_ID_NEXTWINDOW 0x1926
#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
+#define USB_VENDOR_ID_NINTENDO 0x057e
+#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
+
#define USB_VENDOR_ID_NTRIG 0x1b96
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN 0x0001
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1 0x0003
@@ -629,6 +638,7 @@
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005
+#define USB_DEVICE_ID_UCLOGIC_TABLET_WP1062 0x0064
#define USB_VENDOR_ID_UNITEC 0x227d
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
@@ -662,6 +672,12 @@
#define USB_VENDOR_ID_WISEGROUP_LTD2 0x6677
#define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802
+#define USB_VENDOR_ID_X_TENSIONS 0x1ae7
+#define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001
+
+#define USB_VENDOR_ID_XAT 0x2505
+#define USB_DEVICE_ID_XAT_CSR 0x0220
+
#define USB_VENDOR_ID_YEALINK 0x6993
#define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 21f205f0925..a7f916e8fc3 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -41,6 +41,66 @@
#define LG_FF3 0x1000
#define LG_FF4 0x2000
+/* Size of the original descriptor of the Driving Force Pro wheel */
+#define DFP_RDESC_ORIG_SIZE 97
+
+/* Fixed report descriptor for Logitech Driving Force Pro wheel controller
+ *
+ * The original descriptor hides the separate throttle and brake axes in
+ * a custom vendor usage page, providing only a combined value as
+ * GenericDesktop.Y.
+ * This descriptor removes the combined Y axis and instead reports
+ * separate throttle (Y) and brake (RZ).
+ */
+static __u8 dfp_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0E, /* Report Size (14), */
+0x14, /* Logical Minimum (0), */
+0x26, 0xFF, 0x3F, /* Logical Maximum (16383), */
+0x34, /* Physical Minimum (0), */
+0x46, 0xFF, 0x3F, /* Physical Maximum (16383), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x0E, /* Report Count (14), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x0E, /* Usage Maximum (0Eh), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x04, /* Report Size (4), */
+0x25, 0x07, /* Logical Maximum (7), */
+0x46, 0x3B, 0x01, /* Physical Maximum (315), */
+0x65, 0x14, /* Unit (Degrees), */
+0x09, 0x39, /* Usage (Hat Switch), */
+0x81, 0x42, /* Input (Variable, Nullstate), */
+0x65, 0x00, /* Unit, */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x75, 0x08, /* Report Size (8), */
+0x81, 0x01, /* Input (Constant), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x35, /* Usage (Rz), */
+0x81, 0x02, /* Input (Variable), */
+0x81, 0x01, /* Input (Constant), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x09, 0x02, /* Usage (02h), */
+0x95, 0x07, /* Report Count (7), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
+
/*
* Certain Logitech keyboards send in report #3 keys which are far
* above the logical maximum described in descriptor. This extends
@@ -74,6 +134,18 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[47] = 0x95;
rdesc[48] = 0x0B;
}
+
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
+ if (*rsize == DFP_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Driving Force Pro report descriptor\n");
+ rdesc = dfp_rdesc_fixed;
+ *rsize = sizeof(dfp_rdesc_fixed);
+ }
+ break;
+ }
+
return rdesc;
}
@@ -380,7 +452,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL),
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL),
- .driver_data = LG_FF },
+ .driver_data = LG_NOGET | LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ),
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 0f6fc54dc19..e5c699b6c6f 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -23,11 +23,12 @@
#include "hid-ids.h"
-#define MS_HIDINPUT 0x01
-#define MS_ERGONOMY 0x02
-#define MS_PRESENTER 0x04
-#define MS_RDESC 0x08
-#define MS_NOGET 0x10
+#define MS_HIDINPUT 0x01
+#define MS_ERGONOMY 0x02
+#define MS_PRESENTER 0x04
+#define MS_RDESC 0x08
+#define MS_NOGET 0x10
+#define MS_DUPLICATE_USAGES 0x20
/*
* Microsoft Wireless Desktop Receiver (Model 1028) has
@@ -109,6 +110,18 @@ static int ms_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
+static int ms_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+
+ if (quirks & MS_DUPLICATE_USAGES)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
static int ms_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
@@ -179,8 +192,12 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_ERGONOMY | MS_RDESC },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
.driver_data = MS_PRESENTER },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
+ .driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
.driver_data = MS_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
+ .driver_data = MS_DUPLICATE_USAGES },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
@@ -193,6 +210,7 @@ static struct hid_driver ms_driver = {
.id_table = ms_devices,
.report_fixup = ms_report_fixup,
.input_mapping = ms_input_mapping,
+ .input_mapped = ms_input_mapped,
.event = ms_event,
.probe = ms_probe,
};
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 0b2dcd0ee59..58d0e7aaf08 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -271,6 +271,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
}
return 1;
case HID_DG_CONTACTID:
+ if (!td->maxcontacts)
+ td->maxcontacts = MT_DEFAULT_MAXCONTACT;
input_mt_init_slots(hi->input, td->maxcontacts);
td->last_slot_field = usage->hid;
td->last_field_index = field->index;
@@ -547,9 +549,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret)
goto fail;
- if (!td->maxcontacts)
- td->maxcontacts = MT_DEFAULT_MAXCONTACT;
-
td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot),
GFP_KERNEL);
if (!td->slots) {
@@ -677,6 +676,9 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
USB_DEVICE_ID_CRYSTALTOUCH) },
+ { .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
+ HID_USB_DEVICE(USB_VENDOR_ID_LUMIO,
+ USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
@@ -707,10 +709,10 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
USB_DEVICE_ID_MTP)},
{ .driver_data = MT_CLS_CONFIDENCE,
- HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
+ HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
USB_DEVICE_ID_MTP_STM)},
{ .driver_data = MT_CLS_CONFIDENCE,
- HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
+ HID_USB_DEVICE(USB_VENDOR_ID_STANTUM_SITRONIX,
USB_DEVICE_ID_MTP_SITRONIX)},
/* Touch International panels */
@@ -725,6 +727,10 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_DEFAULT,
HID_USB_DEVICE(USB_VENDOR_ID_UNITEC,
USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
+ /* XAT */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XAT,
+ USB_DEVICE_ID_XAT_CSR) },
{ }
};
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index ab19f2905d2..158b389d0fb 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -44,8 +44,6 @@ struct pk_device {
struct pcmidi_snd *pm; /* pcmidi device context */
};
-struct pcmidi_snd;
-
struct pcmidi_sustain {
unsigned long in_use;
struct pcmidi_snd *pm;
@@ -242,7 +240,7 @@ drop_note:
return;
}
-void pcmidi_sustained_note_release(unsigned long data)
+static void pcmidi_sustained_note_release(unsigned long data)
{
struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
@@ -250,7 +248,7 @@ void pcmidi_sustained_note_release(unsigned long data)
pms->in_use = 0;
}
-void init_sustain_timers(struct pcmidi_snd *pm)
+static void init_sustain_timers(struct pcmidi_snd *pm)
{
struct pcmidi_sustain *pms;
unsigned i;
@@ -264,7 +262,7 @@ void init_sustain_timers(struct pcmidi_snd *pm)
}
}
-void stop_sustain_timers(struct pcmidi_snd *pm)
+static void stop_sustain_timers(struct pcmidi_snd *pm)
{
struct pcmidi_sustain *pms;
unsigned i;
@@ -499,7 +497,7 @@ static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data)
return 1;
}
-int pcmidi_handle_report(
+static int pcmidi_handle_report(
struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size)
{
int ret = 0;
@@ -518,7 +516,8 @@ int pcmidi_handle_report(
return ret;
}
-void pcmidi_setup_extra_keys(struct pcmidi_snd *pm, struct input_dev *input)
+static void pcmidi_setup_extra_keys(
+ struct pcmidi_snd *pm, struct input_dev *input)
{
/* reassigned functionality for N/A keys
MY PICTURES => KEY_WORDPROCESSOR
@@ -602,7 +601,7 @@ static struct snd_rawmidi_ops pcmidi_in_ops = {
.trigger = pcmidi_in_trigger
};
-int pcmidi_snd_initialise(struct pcmidi_snd *pm)
+static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
{
static int dev;
struct snd_card *card;
@@ -720,7 +719,7 @@ fail:
return err;
}
-int pcmidi_snd_terminate(struct pcmidi_snd *pm)
+static int pcmidi_snd_terminate(struct pcmidi_snd *pm)
{
if (pm->card) {
stop_sustain_timers(pm);
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index 2307471d96d..093bfad00b0 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -39,7 +39,7 @@ static ssize_t arvo_sysfs_show_mode_key(struct device *dev,
int retval;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_MODE_KEY,
+ retval = roccat_common_receive(usb_dev, ARVO_COMMAND_MODE_KEY,
&temp_buf, sizeof(struct arvo_mode_key));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -67,7 +67,7 @@ static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
temp_buf.state = state;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_MODE_KEY,
+ retval = roccat_common_send(usb_dev, ARVO_COMMAND_MODE_KEY,
&temp_buf, sizeof(struct arvo_mode_key));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -87,7 +87,7 @@ static ssize_t arvo_sysfs_show_key_mask(struct device *dev,
int retval;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_KEY_MASK,
+ retval = roccat_common_receive(usb_dev, ARVO_COMMAND_KEY_MASK,
&temp_buf, sizeof(struct arvo_key_mask));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -115,7 +115,7 @@ static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
temp_buf.key_mask = key_mask;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_KEY_MASK,
+ retval = roccat_common_send(usb_dev, ARVO_COMMAND_KEY_MASK,
&temp_buf, sizeof(struct arvo_key_mask));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -130,7 +130,7 @@ static int arvo_get_actual_profile(struct usb_device *usb_dev)
struct arvo_actual_profile temp_buf;
int retval;
- retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common_receive(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
&temp_buf, sizeof(struct arvo_actual_profile));
if (retval)
@@ -163,11 +163,14 @@ static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
if (retval)
return retval;
+ if (profile < 1 || profile > 5)
+ return -EINVAL;
+
temp_buf.command = ARVO_COMMAND_ACTUAL_PROFILE;
temp_buf.actual_profile = profile;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common_send(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
&temp_buf, sizeof(struct arvo_actual_profile));
if (!retval) {
arvo->actual_profile = profile;
@@ -225,7 +228,7 @@ static ssize_t arvo_sysfs_write_button(struct file *fp,
loff_t off, size_t count)
{
return arvo_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct arvo_button), ARVO_USB_COMMAND_BUTTON);
+ sizeof(struct arvo_button), ARVO_COMMAND_BUTTON);
}
static ssize_t arvo_sysfs_read_info(struct file *fp,
@@ -233,7 +236,7 @@ static ssize_t arvo_sysfs_read_info(struct file *fp,
loff_t off, size_t count)
{
return arvo_sysfs_read(fp, kobj, buf, off, count,
- sizeof(struct arvo_info), ARVO_USB_COMMAND_INFO);
+ sizeof(struct arvo_info), ARVO_COMMAND_INFO);
}
@@ -399,7 +402,7 @@ static int arvo_raw_event(struct hid_device *hdev,
if (size != 3)
return 0;
- if (arvo->roccat_claimed)
+ if (arvo && arvo->roccat_claimed)
arvo_report_to_chrdev(arvo, data);
return 0;
diff --git a/drivers/hid/hid-roccat-arvo.h b/drivers/hid/hid-roccat-arvo.h
index d284a781c99..ce8415e4f00 100644
--- a/drivers/hid/hid-roccat-arvo.h
+++ b/drivers/hid/hid-roccat-arvo.h
@@ -46,19 +46,6 @@ enum arvo_commands {
ARVO_COMMAND_ACTUAL_PROFILE = 0x7,
};
-enum arvo_usb_commands {
- ARVO_USB_COMMAND_MODE_KEY = 0x303,
- /*
- * read/write
- * Read uses both index bytes as profile/key indexes
- * Write has index 0, profile/key is determined by payload
- */
- ARVO_USB_COMMAND_BUTTON = 0x304,
- ARVO_USB_COMMAND_INFO = 0x305,
- ARVO_USB_COMMAND_KEY_MASK = 0x306,
- ARVO_USB_COMMAND_ACTUAL_PROFILE = 0x307,
-};
-
struct arvo_special_report {
uint8_t unknown1; /* always 0x01 */
uint8_t event;
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index 13b1eb0c8c6..edf898dee28 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -11,10 +11,16 @@
* any later version.
*/
+#include <linux/hid.h>
#include <linux/slab.h>
#include "hid-roccat-common.h"
-int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
+static inline uint16_t roccat_common_feature_report(uint8_t report_id)
+{
+ return 0x300 | report_id;
+}
+
+int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
void *data, uint size)
{
char *buf;
@@ -25,9 +31,10 @@ int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
return -ENOMEM;
len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
+ HID_REQ_GET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+ roccat_common_feature_report(report_id),
+ 0, buf, size, USB_CTRL_SET_TIMEOUT);
memcpy(data, buf, size);
kfree(buf);
@@ -35,7 +42,7 @@ int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
}
EXPORT_SYMBOL_GPL(roccat_common_receive);
-int roccat_common_send(struct usb_device *usb_dev, uint usb_command,
+int roccat_common_send(struct usb_device *usb_dev, uint report_id,
void const *data, uint size)
{
char *buf;
@@ -48,9 +55,10 @@ int roccat_common_send(struct usb_device *usb_dev, uint usb_command,
memcpy(buf, data, size);
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
+ HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+ roccat_common_feature_report(report_id),
+ 0, buf, size, USB_CTRL_SET_TIMEOUT);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
index fe45fae05bb..9a5bc61f969 100644
--- a/drivers/hid/hid-roccat-common.h
+++ b/drivers/hid/hid-roccat-common.h
@@ -15,9 +15,9 @@
#include <linux/usb.h>
#include <linux/types.h>
-int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
+int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
void *data, uint size);
-int roccat_common_send(struct usb_device *usb_dev, uint usb_command,
+int roccat_common_send(struct usb_device *usb_dev, uint report_id,
void const *data, uint size);
#endif
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index a57838d1526..2b8f3a31ffb 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -37,6 +37,47 @@
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+static int kone_receive(struct usb_device *usb_dev, uint usb_command,
+ void *data, uint size)
+{
+ char *buf;
+ int len;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ HID_REQ_GET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+ memcpy(data, buf, size);
+ kfree(buf);
+ return ((len < 0) ? len : ((len != size) ? -EIO : 0));
+}
+
+static int kone_send(struct usb_device *usb_dev, uint usb_command,
+ void const *data, uint size)
+{
+ char *buf;
+ int len;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ memcpy(buf, data, size);
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ HID_REQ_SET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+ kfree(buf);
+ return ((len < 0) ? len : ((len != size) ? -EIO : 0));
+}
+
/* kone_class is used for creating sysfs attributes via roccat char device */
static struct class *kone_class;
@@ -68,7 +109,7 @@ static int kone_check_write(struct usb_device *usb_dev)
*/
msleep(80);
- retval = roccat_common_receive(usb_dev,
+ retval = kone_receive(usb_dev,
kone_command_confirm_write, &data, 1);
if (retval)
return retval;
@@ -96,7 +137,7 @@ static int kone_check_write(struct usb_device *usb_dev)
static int kone_get_settings(struct usb_device *usb_dev,
struct kone_settings *buf)
{
- return roccat_common_receive(usb_dev, kone_command_settings, buf,
+ return kone_receive(usb_dev, kone_command_settings, buf,
sizeof(struct kone_settings));
}
@@ -109,7 +150,7 @@ static int kone_set_settings(struct usb_device *usb_dev,
struct kone_settings const *settings)
{
int retval;
- retval = roccat_common_send(usb_dev, kone_command_settings,
+ retval = kone_send(usb_dev, kone_command_settings,
settings, sizeof(struct kone_settings));
if (retval)
return retval;
@@ -182,7 +223,7 @@ static int kone_get_weight(struct usb_device *usb_dev, int *result)
int retval;
uint8_t data;
- retval = roccat_common_receive(usb_dev, kone_command_weight, &data, 1);
+ retval = kone_receive(usb_dev, kone_command_weight, &data, 1);
if (retval)
return retval;
@@ -201,7 +242,7 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
int retval;
uint16_t data;
- retval = roccat_common_receive(usb_dev, kone_command_firmware_version,
+ retval = kone_receive(usb_dev, kone_command_firmware_version,
&data, 2);
if (retval)
return retval;
@@ -384,7 +425,7 @@ static int kone_tcu_command(struct usb_device *usb_dev, int number)
{
unsigned char value;
value = number;
- return roccat_common_send(usb_dev, kone_command_calibrate, &value, 1);
+ return kone_send(usb_dev, kone_command_calibrate, &value, 1);
}
/*
@@ -791,6 +832,9 @@ static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
if (size != sizeof(struct kone_mouse_event))
return 0;
+ if (kone == NULL)
+ return 0;
+
/*
* Firmware 1.38 introduced new behaviour for tilt and special buttons.
* Pressed button is reported in each movement event.
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
index 4109a028e13..64abb5b8a59 100644
--- a/drivers/hid/hid-roccat-kone.h
+++ b/drivers/hid/hid-roccat-kone.h
@@ -166,7 +166,7 @@ enum kone_mouse_events {
/* osd events are thought to be display on screen */
kone_mouse_event_osd_dpi = 0xa0,
kone_mouse_event_osd_profile = 0xb0,
- /* TODO clarify meaning and occurrence of kone_mouse_event_calibration */
+ /* TODO clarify meaning and occurence of kone_mouse_event_calibration */
kone_mouse_event_calibration = 0xc0,
kone_mouse_event_call_overlong_macro = 0xe0,
/* switch events notify if user changed values with mousebutton click */
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 5b640a7a15a..59e47770fa1 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -50,7 +50,7 @@ static int koneplus_send_control(struct usb_device *usb_dev, uint value,
control.value = value;
control.request = request;
- return roccat_common_send(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
+ return roccat_common_send(usb_dev, KONEPLUS_COMMAND_CONTROL,
&control, sizeof(struct koneplus_control));
}
@@ -60,7 +60,7 @@ static int koneplus_receive_control_status(struct usb_device *usb_dev)
struct koneplus_control control;
do {
- retval = roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
+ retval = roccat_common_receive(usb_dev, KONEPLUS_COMMAND_CONTROL,
&control, sizeof(struct koneplus_control));
/* check if we get a completely wrong answer */
@@ -120,7 +120,7 @@ static int koneplus_select_profile(struct usb_device *usb_dev, uint number,
static int koneplus_get_info(struct usb_device *usb_dev,
struct koneplus_info *buf)
{
- return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_INFO,
+ return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_INFO,
buf, sizeof(struct koneplus_info));
}
@@ -134,14 +134,14 @@ static int koneplus_get_profile_settings(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct koneplus_profile_settings));
}
static int koneplus_set_profile_settings(struct usb_device *usb_dev,
struct koneplus_profile_settings const *settings)
{
- return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ return koneplus_send(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
settings, sizeof(struct koneplus_profile_settings));
}
@@ -155,14 +155,14 @@ static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct koneplus_profile_buttons));
}
static int koneplus_set_profile_buttons(struct usb_device *usb_dev,
struct koneplus_profile_buttons const *buttons)
{
- return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ return koneplus_send(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
buttons, sizeof(struct koneplus_profile_buttons));
}
@@ -172,7 +172,7 @@ static int koneplus_get_actual_profile(struct usb_device *usb_dev)
struct koneplus_actual_profile buf;
int retval;
- retval = roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common_receive(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct koneplus_actual_profile));
return retval ? retval : buf.actual_profile;
@@ -187,7 +187,7 @@ static int koneplus_set_actual_profile(struct usb_device *usb_dev,
buf.size = sizeof(struct koneplus_actual_profile);
buf.actual_profile = new_profile;
- return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ return koneplus_send(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct koneplus_actual_profile));
}
@@ -240,12 +240,20 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
return real_size;
}
+static ssize_t koneplus_sysfs_write_talk(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return koneplus_sysfs_write(fp, kobj, buf, off, count,
+ sizeof(struct koneplus_talk), KONEPLUS_COMMAND_TALK);
+}
+
static ssize_t koneplus_sysfs_write_macro(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_macro), KONEPLUS_USB_COMMAND_MACRO);
+ sizeof(struct koneplus_macro), KONEPLUS_COMMAND_MACRO);
}
static ssize_t koneplus_sysfs_read_sensor(struct file *fp,
@@ -253,7 +261,7 @@ static ssize_t koneplus_sysfs_read_sensor(struct file *fp,
loff_t off, size_t count)
{
return koneplus_sysfs_read(fp, kobj, buf, off, count,
- sizeof(struct koneplus_sensor), KONEPLUS_USB_COMMAND_SENSOR);
+ sizeof(struct koneplus_sensor), KONEPLUS_COMMAND_SENSOR);
}
static ssize_t koneplus_sysfs_write_sensor(struct file *fp,
@@ -261,7 +269,7 @@ static ssize_t koneplus_sysfs_write_sensor(struct file *fp,
loff_t off, size_t count)
{
return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_sensor), KONEPLUS_USB_COMMAND_SENSOR);
+ sizeof(struct koneplus_sensor), KONEPLUS_COMMAND_SENSOR);
}
static ssize_t koneplus_sysfs_write_tcu(struct file *fp,
@@ -269,7 +277,7 @@ static ssize_t koneplus_sysfs_write_tcu(struct file *fp,
loff_t off, size_t count)
{
return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_tcu), KONEPLUS_USB_COMMAND_TCU);
+ sizeof(struct koneplus_tcu), KONEPLUS_COMMAND_TCU);
}
static ssize_t koneplus_sysfs_read_tcu_image(struct file *fp,
@@ -277,7 +285,7 @@ static ssize_t koneplus_sysfs_read_tcu_image(struct file *fp,
loff_t off, size_t count)
{
return koneplus_sysfs_read(fp, kobj, buf, off, count,
- sizeof(struct koneplus_tcu_image), KONEPLUS_USB_COMMAND_TCU);
+ sizeof(struct koneplus_tcu_image), KONEPLUS_COMMAND_TCU);
}
static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
@@ -423,6 +431,9 @@ static ssize_t koneplus_sysfs_set_actual_profile(struct device *dev,
if (retval)
return retval;
+ if (profile > 4)
+ return -EINVAL;
+
mutex_lock(&koneplus->koneplus_lock);
retval = koneplus_set_actual_profile(usb_dev, profile);
@@ -431,7 +442,7 @@ static ssize_t koneplus_sysfs_set_actual_profile(struct device *dev,
return retval;
}
- koneplus->actual_profile = profile;
+ koneplus_profile_activated(koneplus, profile);
roccat_report.type = KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE;
roccat_report.data1 = profile + 1;
@@ -557,6 +568,11 @@ static struct bin_attribute koneplus_bin_attributes[] = {
.size = sizeof(struct koneplus_macro),
.write = koneplus_sysfs_write_macro
},
+ {
+ .attr = { .name = "talk", .mode = 0220 },
+ .size = sizeof(struct koneplus_talk),
+ .write = koneplus_sysfs_write_talk
+ },
__ATTR_NULL
};
@@ -738,6 +754,9 @@ static int koneplus_raw_event(struct hid_device *hdev,
!= USB_INTERFACE_PROTOCOL_MOUSE)
return 0;
+ if (koneplus == NULL)
+ return 0;
+
koneplus_keep_values_up_to_date(koneplus, data);
if (koneplus->roccat_claimed)
diff --git a/drivers/hid/hid-roccat-koneplus.h b/drivers/hid/hid-roccat-koneplus.h
index c57a376ab8a..c03332a4fa9 100644
--- a/drivers/hid/hid-roccat-koneplus.h
+++ b/drivers/hid/hid-roccat-koneplus.h
@@ -14,6 +14,12 @@
#include <linux/types.h>
+struct koneplus_talk {
+ uint8_t command; /* KONEPLUS_COMMAND_TALK */
+ uint8_t size; /* always 0x10 */
+ uint8_t data[14];
+} __packed;
+
/*
* case 1: writes request 80 and reads value 1
*
@@ -137,26 +143,14 @@ enum koneplus_commands {
KONEPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
KONEPLUS_COMMAND_MACRO = 0x8,
KONEPLUS_COMMAND_INFO = 0x9,
+ KONEPLUS_COMMAND_TCU = 0xc,
KONEPLUS_COMMAND_E = 0xe,
KONEPLUS_COMMAND_SENSOR = 0xf,
+ KONEPLUS_COMMAND_TALK = 0x10,
KONEPLUS_COMMAND_FIRMWARE_WRITE = 0x1b,
KONEPLUS_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c,
};
-enum koneplus_usb_commands {
- KONEPLUS_USB_COMMAND_CONTROL = 0x304,
- KONEPLUS_USB_COMMAND_ACTUAL_PROFILE = 0x305,
- KONEPLUS_USB_COMMAND_PROFILE_SETTINGS = 0x306,
- KONEPLUS_USB_COMMAND_PROFILE_BUTTONS = 0x307,
- KONEPLUS_USB_COMMAND_MACRO = 0x308,
- KONEPLUS_USB_COMMAND_INFO = 0x309,
- KONEPLUS_USB_COMMAND_TCU = 0x30c,
- KONEPLUS_USB_COMMAND_E = 0x30e,
- KONEPLUS_USB_COMMAND_SENSOR = 0x30f,
- KONEPLUS_USB_COMMAND_FIRMWARE_WRITE = 0x31b,
- KONEPLUS_USB_COMMAND_FIRMWARE_WRITE_CONTROL = 0x31c,
-};
-
enum koneplus_mouse_report_numbers {
KONEPLUS_MOUSE_REPORT_NUMBER_HID = 1,
KONEPLUS_MOUSE_REPORT_NUMBER_AUDIO = 2,
@@ -193,6 +187,7 @@ enum koneplus_mouse_report_button_types {
* data2 = action
*/
KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 0xf0,
+ KONEPLUS_MOUSE_REPORT_TALK = 0xff,
};
enum koneplus_mouse_report_button_action {
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 984be2f8967..1f8336e3f58 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -58,7 +58,7 @@ static int kovaplus_send_control(struct usb_device *usb_dev, uint value,
control.value = value;
control.request = request;
- retval = roccat_common_send(usb_dev, KOVAPLUS_USB_COMMAND_CONTROL,
+ retval = roccat_common_send(usb_dev, KOVAPLUS_COMMAND_CONTROL,
&control, sizeof(struct kovaplus_control));
return retval;
@@ -70,7 +70,7 @@ static int kovaplus_receive_control_status(struct usb_device *usb_dev)
struct kovaplus_control control;
do {
- retval = roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_CONTROL,
+ retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_CONTROL,
&control, sizeof(struct kovaplus_control));
/* check if we get a completely wrong answer */
@@ -90,7 +90,7 @@ static int kovaplus_receive_control_status(struct usb_device *usb_dev)
if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
return -EINVAL;
- hid_err(usb_dev, "kovaplus_receive_control_status: "
+ hid_err(usb_dev, "roccat_common_receive_control_status: "
"unknown response value 0x%x\n", control.value);
return -EINVAL;
} while (1);
@@ -119,7 +119,7 @@ static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
static int kovaplus_get_info(struct usb_device *usb_dev,
struct kovaplus_info *buf)
{
- return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_INFO,
+ return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
buf, sizeof(struct kovaplus_info));
}
@@ -133,14 +133,14 @@ static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct kovaplus_profile_settings));
}
static int kovaplus_set_profile_settings(struct usb_device *usb_dev,
struct kovaplus_profile_settings const *settings)
{
- return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
settings, sizeof(struct kovaplus_profile_settings));
}
@@ -154,14 +154,14 @@ static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct kovaplus_profile_buttons));
}
static int kovaplus_set_profile_buttons(struct usb_device *usb_dev,
struct kovaplus_profile_buttons const *buttons)
{
- return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buttons, sizeof(struct kovaplus_profile_buttons));
}
@@ -171,7 +171,7 @@ static int kovaplus_get_actual_profile(struct usb_device *usb_dev)
struct kovaplus_actual_profile buf;
int retval;
- retval = roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
return retval ? retval : buf.actual_profile;
@@ -186,7 +186,7 @@ static int kovaplus_set_actual_profile(struct usb_device *usb_dev,
buf.size = sizeof(struct kovaplus_actual_profile);
buf.actual_profile = new_profile;
- return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
}
@@ -337,7 +337,7 @@ static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
mutex_lock(&kovaplus->kovaplus_lock);
retval = kovaplus_set_actual_profile(usb_dev, profile);
- kovaplus->actual_profile = profile;
+ kovaplus_profile_activated(kovaplus, profile);
mutex_unlock(&kovaplus->kovaplus_lock);
if (retval)
return retval;
@@ -662,6 +662,9 @@ static int kovaplus_raw_event(struct hid_device *hdev,
!= USB_INTERFACE_PROTOCOL_MOUSE)
return 0;
+ if (kovaplus == NULL)
+ return 0;
+
kovaplus_keep_values_up_to_date(kovaplus, data);
if (kovaplus->roccat_claimed)
diff --git a/drivers/hid/hid-roccat-kovaplus.h b/drivers/hid/hid-roccat-kovaplus.h
index ce40607d21c..fb2aed44a8e 100644
--- a/drivers/hid/hid-roccat-kovaplus.h
+++ b/drivers/hid/hid-roccat-kovaplus.h
@@ -83,15 +83,6 @@ enum kovaplus_commands {
KOVAPLUS_COMMAND_A = 0xa,
};
-enum kovaplus_usb_commands {
- KOVAPLUS_USB_COMMAND_CONTROL = 0x304,
- KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE = 0x305,
- KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS = 0x306,
- KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS = 0x307,
- KOVAPLUS_USB_COMMAND_INFO = 0x309,
- KOVAPLUS_USB_COMMAND_A = 0x30a,
-};
-
enum kovaplus_mouse_report_numbers {
KOVAPLUS_MOUSE_REPORT_NUMBER_MOUSE = 1,
KOVAPLUS_MOUSE_REPORT_NUMBER_AUDIO = 2,
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 38280c055a1..8140776bd8c 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -53,7 +53,7 @@ static int pyra_send_control(struct usb_device *usb_dev, int value,
control.value = value;
control.request = request;
- return roccat_common_send(usb_dev, PYRA_USB_COMMAND_CONTROL,
+ return roccat_common_send(usb_dev, PYRA_COMMAND_CONTROL,
&control, sizeof(struct pyra_control));
}
@@ -64,7 +64,7 @@ static int pyra_receive_control_status(struct usb_device *usb_dev)
do {
msleep(10);
- retval = roccat_common_receive(usb_dev, PYRA_USB_COMMAND_CONTROL,
+ retval = roccat_common_receive(usb_dev, PYRA_COMMAND_CONTROL,
&control, sizeof(struct pyra_control));
/* requested too early, try again */
@@ -89,7 +89,7 @@ static int pyra_get_profile_settings(struct usb_device *usb_dev,
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_PROFILE_SETTINGS,
+ return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct pyra_profile_settings));
}
@@ -101,20 +101,20 @@ static int pyra_get_profile_buttons(struct usb_device *usb_dev,
PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_PROFILE_BUTTONS,
+ return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct pyra_profile_buttons));
}
static int pyra_get_settings(struct usb_device *usb_dev,
struct pyra_settings *buf)
{
- return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_SETTINGS,
+ return roccat_common_receive(usb_dev, PYRA_COMMAND_SETTINGS,
buf, sizeof(struct pyra_settings));
}
static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf)
{
- return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_INFO,
+ return roccat_common_receive(usb_dev, PYRA_COMMAND_INFO,
buf, sizeof(struct pyra_info));
}
@@ -131,26 +131,22 @@ static int pyra_send(struct usb_device *usb_dev, uint command,
static int pyra_set_profile_settings(struct usb_device *usb_dev,
struct pyra_profile_settings const *settings)
{
- return pyra_send(usb_dev, PYRA_USB_COMMAND_PROFILE_SETTINGS, settings,
+ return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS, settings,
sizeof(struct pyra_profile_settings));
}
static int pyra_set_profile_buttons(struct usb_device *usb_dev,
struct pyra_profile_buttons const *buttons)
{
- return pyra_send(usb_dev, PYRA_USB_COMMAND_PROFILE_BUTTONS, buttons,
+ return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS, buttons,
sizeof(struct pyra_profile_buttons));
}
static int pyra_set_settings(struct usb_device *usb_dev,
struct pyra_settings const *settings)
{
- int retval;
- retval = roccat_common_send(usb_dev, PYRA_USB_COMMAND_SETTINGS, settings,
+ return pyra_send(usb_dev, PYRA_COMMAND_SETTINGS, settings,
sizeof(struct pyra_settings));
- if (retval)
- return retval;
- return pyra_receive_control_status(usb_dev);
}
static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
@@ -641,6 +637,9 @@ static int pyra_raw_event(struct hid_device *hdev, struct hid_report *report,
!= USB_INTERFACE_PROTOCOL_MOUSE)
return 0;
+ if (pyra == NULL)
+ return 0;
+
pyra_keep_values_up_to_date(pyra, data);
if (pyra->roccat_claimed)
diff --git a/drivers/hid/hid-roccat-pyra.h b/drivers/hid/hid-roccat-pyra.h
index 14cbbe1621e..0442d7fa2dc 100644
--- a/drivers/hid/hid-roccat-pyra.h
+++ b/drivers/hid/hid-roccat-pyra.h
@@ -83,15 +83,6 @@ enum pyra_commands {
PYRA_COMMAND_B = 0xb
};
-enum pyra_usb_commands {
- PYRA_USB_COMMAND_CONTROL = 0x304,
- PYRA_USB_COMMAND_SETTINGS = 0x305,
- PYRA_USB_COMMAND_PROFILE_SETTINGS = 0x306,
- PYRA_USB_COMMAND_PROFILE_BUTTONS = 0x307,
- PYRA_USB_COMMAND_INFO = 0x309,
- PYRA_USB_COMMAND_B = 0x30b /* writes 3 bytes */
-};
-
enum pyra_mouse_report_numbers {
PYRA_MOUSE_REPORT_NUMBER_HID = 1,
PYRA_MOUSE_REPORT_NUMBER_AUDIO = 2,
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 936c911fdca..5cd25bd907f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -28,6 +28,12 @@
#define SIXAXIS_CONTROLLER_USB (1 << 1)
#define SIXAXIS_CONTROLLER_BT (1 << 2)
+static const u8 sixaxis_rdesc_fixup[] = {
+ 0x95, 0x13, 0x09, 0x01, 0x81, 0x02, 0x95, 0x0C,
+ 0x81, 0x01, 0x75, 0x10, 0x95, 0x04, 0x26, 0xFF,
+ 0x03, 0x46, 0xFF, 0x03, 0x09, 0x01, 0x81, 0x02
+};
+
struct sony_sc {
unsigned long quirks;
};
@@ -43,9 +49,37 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n");
rdesc[55] = 0x06;
}
+
+ /* The HID descriptor exposed over BT has a trailing zero byte */
+ if ((((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize == 148) ||
+ ((sc->quirks & SIXAXIS_CONTROLLER_BT) && *rsize == 149)) &&
+ rdesc[83] == 0x75) {
+ hid_info(hdev, "Fixing up Sony Sixaxis report descriptor\n");
+ memcpy((void *)&rdesc[83], (void *)&sixaxis_rdesc_fixup,
+ sizeof(sixaxis_rdesc_fixup));
+ }
return rdesc;
}
+static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
+ __u8 *rd, int size)
+{
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ /* Sixaxis HID report has acclerometers/gyro with MSByte first, this
+ * has to be BYTE_SWAPPED before passing up to joystick interface
+ */
+ if ((sc->quirks & (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)) &&
+ rd[0] == 0x01 && size == 49) {
+ swap(rd[41], rd[42]);
+ swap(rd[43], rd[44]);
+ swap(rd[45], rd[46]);
+ swap(rd[47], rd[48]);
+ }
+
+ return 0;
+}
+
/*
* The Sony Sixaxis does not handle HID Output Reports on the Interrupt EP
* like it should according to usbhid/hid-core.c::usbhid_output_raw_report()
@@ -194,6 +228,7 @@ static struct hid_driver sony_driver = {
.probe = sony_probe,
.remove = sony_remove,
.report_fixup = sony_report_fixup,
+ .raw_event = sony_raw_event
};
static int __init sony_init(void)
diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
new file mode 100644
index 00000000000..60201374171
--- /dev/null
+++ b/drivers/hid/hid-speedlink.c
@@ -0,0 +1,89 @@
+/*
+ * HID driver for Speedlink Vicious and Divine Cezanne (USB mouse).
+ * Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from
+ * the HID descriptor.
+ *
+ * Copyright (c) 2011 Stefan Kriwanek <mail@stefankriwanek.de>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+
+static const struct hid_device_id speedlink_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE)},
+ { }
+};
+
+static int speedlink_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ /*
+ * The Cezanne mouse has a second "keyboard" USB endpoint for it is
+ * able to map keyboard events to the button presses.
+ * It sends a standard keyboard report descriptor, though, whose
+ * LEDs we ignore.
+ */
+ switch (usage->hid & HID_USAGE_PAGE) {
+ case HID_UP_LED:
+ return -1;
+ }
+ return 0;
+}
+
+static int speedlink_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ /* No other conditions due to usage_table. */
+ /* Fix "jumpy" cursor (invalid events sent by device). */
+ if (value == 256)
+ return 1;
+ /* Drop useless distance 0 events (on button clicks etc.) as well */
+ if (value == 0)
+ return 1;
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(hid, speedlink_devices);
+
+static const struct hid_usage_id speedlink_grabbed_usages[] = {
+ { HID_GD_X, EV_REL, 0 },
+ { HID_GD_Y, EV_REL, 1 },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver speedlink_driver = {
+ .name = "speedlink",
+ .id_table = speedlink_devices,
+ .usage_table = speedlink_grabbed_usages,
+ .input_mapping = speedlink_input_mapping,
+ .event = speedlink_event,
+};
+
+static int __init speedlink_init(void)
+{
+ return hid_register_driver(&speedlink_driver);
+}
+
+static void __exit speedlink_exit(void)
+{
+ hid_unregister_driver(&speedlink_driver);
+}
+
+module_init(speedlink_init);
+module_exit(speedlink_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 05fdc85a76e..e15732f1a22 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -343,6 +343,193 @@ static __u8 wp8060u_rdesc_fixed[] = {
};
/*
+ * Original WP1062 report descriptor.
+ *
+ * Only report ID 9 is actually used.
+ *
+ * Usage Page (Digitizer), ; Digitizer (0Dh)
+ * Usage (Pen), ; Pen (02h, application collection)
+ * Collection (Application),
+ * Report ID (7),
+ * Usage (Stylus), ; Stylus (20h, logical collection)
+ * Collection (Physical),
+ * Usage (Tip Switch), ; Tip switch (42h, momentary control)
+ * Usage (Barrel Switch), ; Barrel switch (44h, momentary control)
+ * Usage (Eraser), ; Eraser (45h, momentary control)
+ * Logical Minimum (0),
+ * Logical Maximum (1),
+ * Report Size (1),
+ * Report Count (3),
+ * Input (Variable),
+ * Report Count (3),
+ * Input (Constant, Variable),
+ * Usage (In Range), ; In range (32h, momentary control)
+ * Report Count (1),
+ * Input (Variable),
+ * Report Count (1),
+ * Input (Constant, Variable),
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (X), ; X (30h, dynamic value)
+ * Report Size (16),
+ * Report Count (1),
+ * Push,
+ * Unit Exponent (13),
+ * Unit (Inch),
+ * Physical Minimum (0),
+ * Physical Maximum (10000),
+ * Logical Maximum (20000),
+ * Input (Variable),
+ * Usage (Y), ; Y (31h, dynamic value)
+ * Physical Maximum (6583),
+ * Logical Maximum (13166),
+ * Input (Variable),
+ * Pop,
+ * Usage Page (Digitizer), ; Digitizer (0Dh)
+ * Usage (Tip Pressure), ; Tip pressure (30h, dynamic value)
+ * Logical Maximum (1023),
+ * Input (Variable),
+ * Report Size (16),
+ * End Collection,
+ * End Collection,
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (Mouse), ; Mouse (02h, application collection)
+ * Collection (Application),
+ * Report ID (8),
+ * Usage (Pointer), ; Pointer (01h, physical collection)
+ * Collection (Physical),
+ * Usage Page (Button), ; Button (09h)
+ * Usage Minimum (01h),
+ * Usage Maximum (03h),
+ * Logical Minimum (0),
+ * Logical Maximum (1),
+ * Report Count (3),
+ * Report Size (1),
+ * Input (Variable),
+ * Report Count (5),
+ * Input (Constant),
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (X), ; X (30h, dynamic value)
+ * Usage (Y), ; Y (31h, dynamic value)
+ * Usage (Wheel), ; Wheel (38h, dynamic value)
+ * Usage (00h),
+ * Logical Minimum (-127),
+ * Logical Maximum (127),
+ * Report Size (8),
+ * Report Count (4),
+ * Input (Variable, Relative),
+ * End Collection,
+ * End Collection,
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (Mouse), ; Mouse (02h, application collection)
+ * Collection (Application),
+ * Report ID (9),
+ * Usage (Pointer), ; Pointer (01h, physical collection)
+ * Collection (Physical),
+ * Usage Page (Button), ; Button (09h)
+ * Usage Minimum (01h),
+ * Usage Maximum (03h),
+ * Logical Minimum (0),
+ * Logical Maximum (1),
+ * Report Count (3),
+ * Report Size (1),
+ * Input (Variable),
+ * Report Count (4),
+ * Input (Constant),
+ * Usage Page (Digitizer), ; Digitizer (0Dh)
+ * Usage (In Range), ; In range (32h, momentary control)
+ * Report Count (1),
+ * Input (Variable),
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (X), ; X (30h, dynamic value)
+ * Report Size (16),
+ * Report Count (1),
+ * Push,
+ * Unit Exponent (13),
+ * Unit (Inch),
+ * Physical Minimum (0),
+ * Physical Maximum (10000),
+ * Logical Maximum (20000),
+ * Input (Variable),
+ * Usage (Y), ; Y (31h, dynamic value)
+ * Physical Maximum (6583),
+ * Logical Maximum (13166),
+ * Input (Variable),
+ * Pop,
+ * Usage Page (Digitizer), ; Digitizer (0Dh)
+ * Usage (Tip Pressure), ; Tip pressure (30h, dynamic value)
+ * Logical Maximum (1023),
+ * Report Count (1),
+ * Report Size (16),
+ * Input (Variable),
+ * End Collection,
+ * End Collection,
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (00h),
+ * Collection (Application),
+ * Report ID (4),
+ * Logical Minimum (0),
+ * Logical Maximum (255),
+ * Usage (00h),
+ * Report Size (8),
+ * Report Count (3),
+ * Feature (Variable),
+ * End Collection
+ */
+
+/* Size of the original descriptor of WP1062 tablet */
+#define WP1062_RDESC_ORIG_SIZE 254
+
+/*
+ * Fixed WP1062 report descriptor.
+ *
+ * Removed unused reports, corrected second barrel button usage code, physical
+ * units.
+ */
+static __u8 wp1062_rdesc_fixed[] = {
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x02, /* Usage (Pen), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x09, /* Report ID (9), */
+ 0x09, 0x20, /* Usage (Stylus), */
+ 0xA0, /* Collection (Physical), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x09, 0x44, /* Usage (Barrel Switch), */
+ 0x09, 0x46, /* Usage (Tablet Pick), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x04, /* Report Count (4), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x09, 0x32, /* Usage (In Range), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x14, /* Logical Minimum (0), */
+ 0xA4, /* Push, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x55, 0xFD, /* Unit Exponent (-3), */
+ 0x65, 0x13, /* Unit (Inch), */
+ 0x34, /* Physical Minimum (0), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x10, 0x27, /* Physical Maximum (10000), */
+ 0x26, 0x20, 0x4E, /* Logical Maximum (20000), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x46, 0xB7, 0x19, /* Physical Maximum (6583), */
+ 0x26, 0x6E, 0x33, /* Logical Maximum (13166), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xB4, /* Pop, */
+ 0x09, 0x30, /* Usage (Tip Pressure), */
+ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+};
+
+/*
* Original PF1209 report descriptor.
*
* The descriptor is similar to WPXXXXU descriptors, with an addition of a
@@ -584,6 +771,12 @@ static __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(wp8060u_rdesc_fixed);
}
break;
+ case USB_DEVICE_ID_UCLOGIC_TABLET_WP1062:
+ if (*rsize == WP1062_RDESC_ORIG_SIZE) {
+ rdesc = wp1062_rdesc_fixed;
+ *rsize = sizeof(wp1062_rdesc_fixed);
+ }
+ break;
}
return rdesc;
@@ -598,6 +791,8 @@ static const struct hid_device_id uclogic_devices[] = {
USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
+ USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ }
};
MODULE_DEVICE_TABLE(hid, uclogic_devices);
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c
new file mode 100644
index 00000000000..a594383ce03
--- /dev/null
+++ b/drivers/hid/hid-wiimote.c
@@ -0,0 +1,489 @@
+/*
+ * HID driver for Nintendo Wiimote devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include "hid-ids.h"
+
+#define WIIMOTE_VERSION "0.1"
+#define WIIMOTE_NAME "Nintendo Wii Remote"
+#define WIIMOTE_BUFSIZE 32
+
+struct wiimote_buf {
+ __u8 data[HID_MAX_BUFFER_SIZE];
+ size_t size;
+};
+
+struct wiimote_state {
+ spinlock_t lock;
+ __u8 flags;
+};
+
+struct wiimote_data {
+ atomic_t ready;
+ struct hid_device *hdev;
+ struct input_dev *input;
+
+ spinlock_t qlock;
+ __u8 head;
+ __u8 tail;
+ struct wiimote_buf outq[WIIMOTE_BUFSIZE];
+ struct work_struct worker;
+
+ struct wiimote_state state;
+};
+
+#define WIIPROTO_FLAG_LED1 0x01
+#define WIIPROTO_FLAG_LED2 0x02
+#define WIIPROTO_FLAG_LED3 0x04
+#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
+ WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
+
+enum wiiproto_reqs {
+ WIIPROTO_REQ_LED = 0x11,
+ WIIPROTO_REQ_DRM_K = 0x30,
+};
+
+enum wiiproto_keys {
+ WIIPROTO_KEY_LEFT,
+ WIIPROTO_KEY_RIGHT,
+ WIIPROTO_KEY_UP,
+ WIIPROTO_KEY_DOWN,
+ WIIPROTO_KEY_PLUS,
+ WIIPROTO_KEY_MINUS,
+ WIIPROTO_KEY_ONE,
+ WIIPROTO_KEY_TWO,
+ WIIPROTO_KEY_A,
+ WIIPROTO_KEY_B,
+ WIIPROTO_KEY_HOME,
+ WIIPROTO_KEY_COUNT
+};
+
+static __u16 wiiproto_keymap[] = {
+ KEY_LEFT, /* WIIPROTO_KEY_LEFT */
+ KEY_RIGHT, /* WIIPROTO_KEY_RIGHT */
+ KEY_UP, /* WIIPROTO_KEY_UP */
+ KEY_DOWN, /* WIIPROTO_KEY_DOWN */
+ KEY_NEXT, /* WIIPROTO_KEY_PLUS */
+ KEY_PREVIOUS, /* WIIPROTO_KEY_MINUS */
+ BTN_1, /* WIIPROTO_KEY_ONE */
+ BTN_2, /* WIIPROTO_KEY_TWO */
+ BTN_A, /* WIIPROTO_KEY_A */
+ BTN_B, /* WIIPROTO_KEY_B */
+ BTN_MODE, /* WIIPROTO_KEY_HOME */
+};
+
+#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \
+ dev))
+
+static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
+ size_t count)
+{
+ __u8 *buf;
+ ssize_t ret;
+
+ if (!hdev->hid_output_raw_report)
+ return -ENODEV;
+
+ buf = kmemdup(buffer, count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hdev->hid_output_raw_report(hdev, buf, count, HID_OUTPUT_REPORT);
+
+ kfree(buf);
+ return ret;
+}
+
+static void wiimote_worker(struct work_struct *work)
+{
+ struct wiimote_data *wdata = container_of(work, struct wiimote_data,
+ worker);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->qlock, flags);
+
+ while (wdata->head != wdata->tail) {
+ spin_unlock_irqrestore(&wdata->qlock, flags);
+ wiimote_hid_send(wdata->hdev, wdata->outq[wdata->tail].data,
+ wdata->outq[wdata->tail].size);
+ spin_lock_irqsave(&wdata->qlock, flags);
+
+ wdata->tail = (wdata->tail + 1) % WIIMOTE_BUFSIZE;
+ }
+
+ spin_unlock_irqrestore(&wdata->qlock, flags);
+}
+
+static void wiimote_queue(struct wiimote_data *wdata, const __u8 *buffer,
+ size_t count)
+{
+ unsigned long flags;
+ __u8 newhead;
+
+ if (count > HID_MAX_BUFFER_SIZE) {
+ hid_warn(wdata->hdev, "Sending too large output report\n");
+ return;
+ }
+
+ /*
+ * Copy new request into our output queue and check whether the
+ * queue is full. If it is full, discard this request.
+ * If it is empty we need to start a new worker that will
+ * send out the buffer to the hid device.
+ * If the queue is not empty, then there must be a worker
+ * that is currently sending out our buffer and this worker
+ * will reschedule itself until the queue is empty.
+ */
+
+ spin_lock_irqsave(&wdata->qlock, flags);
+
+ memcpy(wdata->outq[wdata->head].data, buffer, count);
+ wdata->outq[wdata->head].size = count;
+ newhead = (wdata->head + 1) % WIIMOTE_BUFSIZE;
+
+ if (wdata->head == wdata->tail) {
+ wdata->head = newhead;
+ schedule_work(&wdata->worker);
+ } else if (newhead != wdata->tail) {
+ wdata->head = newhead;
+ } else {
+ hid_warn(wdata->hdev, "Output queue is full");
+ }
+
+ spin_unlock_irqrestore(&wdata->qlock, flags);
+}
+
+static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
+{
+ __u8 cmd[2];
+
+ leds &= WIIPROTO_FLAGS_LEDS;
+ if ((wdata->state.flags & WIIPROTO_FLAGS_LEDS) == leds)
+ return;
+ wdata->state.flags = (wdata->state.flags & ~WIIPROTO_FLAGS_LEDS) | leds;
+
+ cmd[0] = WIIPROTO_REQ_LED;
+ cmd[1] = 0;
+
+ if (leds & WIIPROTO_FLAG_LED1)
+ cmd[1] |= 0x10;
+ if (leds & WIIPROTO_FLAG_LED2)
+ cmd[1] |= 0x20;
+ if (leds & WIIPROTO_FLAG_LED3)
+ cmd[1] |= 0x40;
+ if (leds & WIIPROTO_FLAG_LED4)
+ cmd[1] |= 0x80;
+
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+#define wiifs_led_show_set(num) \
+static ssize_t wiifs_led_show_##num(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct wiimote_data *wdata = dev_to_wii(dev); \
+ unsigned long flags; \
+ int state; \
+ \
+ if (!atomic_read(&wdata->ready)) \
+ return -EBUSY; \
+ \
+ spin_lock_irqsave(&wdata->state.lock, flags); \
+ state = !!(wdata->state.flags & WIIPROTO_FLAG_LED##num); \
+ spin_unlock_irqrestore(&wdata->state.lock, flags); \
+ \
+ return sprintf(buf, "%d\n", state); \
+} \
+static ssize_t wiifs_led_set_##num(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ struct wiimote_data *wdata = dev_to_wii(dev); \
+ int tmp = simple_strtoul(buf, NULL, 10); \
+ unsigned long flags; \
+ __u8 state; \
+ \
+ if (!atomic_read(&wdata->ready)) \
+ return -EBUSY; \
+ \
+ spin_lock_irqsave(&wdata->state.lock, flags); \
+ \
+ state = wdata->state.flags; \
+ \
+ if (tmp) \
+ wiiproto_req_leds(wdata, state | WIIPROTO_FLAG_LED##num);\
+ else \
+ wiiproto_req_leds(wdata, state & ~WIIPROTO_FLAG_LED##num);\
+ \
+ spin_unlock_irqrestore(&wdata->state.lock, flags); \
+ \
+ return count; \
+} \
+static DEVICE_ATTR(led##num, S_IRUGO | S_IWUSR, wiifs_led_show_##num, \
+ wiifs_led_set_##num)
+
+wiifs_led_show_set(1);
+wiifs_led_show_set(2);
+wiifs_led_show_set(3);
+wiifs_led_show_set(4);
+
+static int wiimote_input_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+
+ if (!atomic_read(&wdata->ready))
+ return -EBUSY;
+ /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
+ smp_rmb();
+
+ return 0;
+}
+
+static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
+{
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_LEFT],
+ !!(payload[0] & 0x01));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_RIGHT],
+ !!(payload[0] & 0x02));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_DOWN],
+ !!(payload[0] & 0x04));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_UP],
+ !!(payload[0] & 0x08));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_PLUS],
+ !!(payload[0] & 0x10));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_TWO],
+ !!(payload[1] & 0x01));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_ONE],
+ !!(payload[1] & 0x02));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_B],
+ !!(payload[1] & 0x04));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_A],
+ !!(payload[1] & 0x08));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_MINUS],
+ !!(payload[1] & 0x10));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_HOME],
+ !!(payload[1] & 0x80));
+ input_sync(wdata->input);
+}
+
+struct wiiproto_handler {
+ __u8 id;
+ size_t size;
+ void (*func)(struct wiimote_data *wdata, const __u8 *payload);
+};
+
+static struct wiiproto_handler handlers[] = {
+ { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
+ { .id = 0 }
+};
+
+static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+ struct wiimote_data *wdata = hid_get_drvdata(hdev);
+ struct wiiproto_handler *h;
+ int i;
+ unsigned long flags;
+
+ if (!atomic_read(&wdata->ready))
+ return -EBUSY;
+ /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
+ smp_rmb();
+
+ if (size < 1)
+ return -EINVAL;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+
+ for (i = 0; handlers[i].id; ++i) {
+ h = &handlers[i];
+ if (h->id == raw_data[0] && h->size < size)
+ h->func(wdata, &raw_data[1]);
+ }
+
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+}
+
+static struct wiimote_data *wiimote_create(struct hid_device *hdev)
+{
+ struct wiimote_data *wdata;
+ int i;
+
+ wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
+ if (!wdata)
+ return NULL;
+
+ wdata->input = input_allocate_device();
+ if (!wdata->input) {
+ kfree(wdata);
+ return NULL;
+ }
+
+ wdata->hdev = hdev;
+ hid_set_drvdata(hdev, wdata);
+
+ input_set_drvdata(wdata->input, wdata);
+ wdata->input->event = wiimote_input_event;
+ wdata->input->dev.parent = &wdata->hdev->dev;
+ wdata->input->id.bustype = wdata->hdev->bus;
+ wdata->input->id.vendor = wdata->hdev->vendor;
+ wdata->input->id.product = wdata->hdev->product;
+ wdata->input->id.version = wdata->hdev->version;
+ wdata->input->name = WIIMOTE_NAME;
+
+ set_bit(EV_KEY, wdata->input->evbit);
+ for (i = 0; i < WIIPROTO_KEY_COUNT; ++i)
+ set_bit(wiiproto_keymap[i], wdata->input->keybit);
+
+ spin_lock_init(&wdata->qlock);
+ INIT_WORK(&wdata->worker, wiimote_worker);
+
+ spin_lock_init(&wdata->state.lock);
+
+ return wdata;
+}
+
+static void wiimote_destroy(struct wiimote_data *wdata)
+{
+ kfree(wdata);
+}
+
+static int wiimote_hid_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct wiimote_data *wdata;
+ int ret;
+
+ wdata = wiimote_create(hdev);
+ if (!wdata) {
+ hid_err(hdev, "Can't alloc device\n");
+ return -ENOMEM;
+ }
+
+ ret = device_create_file(&hdev->dev, &dev_attr_led1);
+ if (ret)
+ goto err;
+ ret = device_create_file(&hdev->dev, &dev_attr_led2);
+ if (ret)
+ goto err;
+ ret = device_create_file(&hdev->dev, &dev_attr_led3);
+ if (ret)
+ goto err;
+ ret = device_create_file(&hdev->dev, &dev_attr_led4);
+ if (ret)
+ goto err;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "HID parse failed\n");
+ goto err;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "HW start failed\n");
+ goto err;
+ }
+
+ ret = input_register_device(wdata->input);
+ if (ret) {
+ hid_err(hdev, "Cannot register input device\n");
+ goto err_stop;
+ }
+
+ /* smp_wmb: Write wdata->xy first before wdata->ready is set to 1 */
+ smp_wmb();
+ atomic_set(&wdata->ready, 1);
+ hid_info(hdev, "New device registered\n");
+
+ /* by default set led1 after device initialization */
+ spin_lock_irq(&wdata->state.lock);
+ wiiproto_req_leds(wdata, WIIPROTO_FLAG_LED1);
+ spin_unlock_irq(&wdata->state.lock);
+
+ return 0;
+
+err_stop:
+ hid_hw_stop(hdev);
+err:
+ input_free_device(wdata->input);
+ device_remove_file(&hdev->dev, &dev_attr_led1);
+ device_remove_file(&hdev->dev, &dev_attr_led2);
+ device_remove_file(&hdev->dev, &dev_attr_led3);
+ device_remove_file(&hdev->dev, &dev_attr_led4);
+ wiimote_destroy(wdata);
+ return ret;
+}
+
+static void wiimote_hid_remove(struct hid_device *hdev)
+{
+ struct wiimote_data *wdata = hid_get_drvdata(hdev);
+
+ hid_info(hdev, "Device removed\n");
+
+ device_remove_file(&hdev->dev, &dev_attr_led1);
+ device_remove_file(&hdev->dev, &dev_attr_led2);
+ device_remove_file(&hdev->dev, &dev_attr_led3);
+ device_remove_file(&hdev->dev, &dev_attr_led4);
+
+ hid_hw_stop(hdev);
+ input_unregister_device(wdata->input);
+
+ cancel_work_sync(&wdata->worker);
+ wiimote_destroy(wdata);
+}
+
+static const struct hid_device_id wiimote_hid_devices[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
+ USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, wiimote_hid_devices);
+
+static struct hid_driver wiimote_hid_driver = {
+ .name = "wiimote",
+ .id_table = wiimote_hid_devices,
+ .probe = wiimote_hid_probe,
+ .remove = wiimote_hid_remove,
+ .raw_event = wiimote_hid_event,
+};
+
+static int __init wiimote_init(void)
+{
+ int ret;
+
+ ret = hid_register_driver(&wiimote_hid_driver);
+ if (ret)
+ pr_err("Can't register wiimote hid driver\n");
+
+ return ret;
+}
+
+static void __exit wiimote_exit(void)
+{
+ hid_unregister_driver(&wiimote_hid_driver);
+}
+
+module_init(wiimote_init);
+module_exit(wiimote_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
+MODULE_DESCRIPTION(WIIMOTE_NAME " Device Driver");
+MODULE_VERSION(WIIMOTE_VERSION);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 38c261a40c7..ad978f5748d 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1191,6 +1191,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
if (intf->cur_altsetting->desc.bInterfaceProtocol ==
USB_INTERFACE_PROTOCOL_MOUSE)
hid->type = HID_TYPE_USBMOUSE;
+ else if (intf->cur_altsetting->desc.bInterfaceProtocol == 0)
+ hid->type = HID_TYPE_USBNONE;
if (dev->manufacturer)
strlcpy(hid->name, dev->manufacturer, sizeof(hid->name));
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 16db83c83c8..0b62c3c6b7c 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -333,7 +333,7 @@ config SENSORS_F71882FG
F71858FG
F71862FG
F71863FG
- F71869F/E
+ F71869F/E/A
F71882FG
F71883FG
F71889FG/ED/A
@@ -623,7 +623,7 @@ config SENSORS_LM90
LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008,
- and Winbond/Nuvoton W83L771W/G/AWG/ASG sensor chips.
+ Winbond/Nuvoton W83L771W/G/AWG/ASG and Philips SA56004 sensor chips.
This driver can also be built as a module. If so, the module
will be called lm90.
@@ -694,14 +694,24 @@ config SENSORS_LTC4261
be called ltc4261.
config SENSORS_LM95241
- tristate "National Semiconductor LM95241 sensor chip"
+ tristate "National Semiconductor LM95241 and compatibles"
depends on I2C
help
- If you say yes here you get support for LM95241 sensor chip.
+ If you say yes here you get support for LM95231 and LM95241 sensor
+ chips.
This driver can also be built as a module. If so, the module
will be called lm95241.
+config SENSORS_LM95245
+ tristate "National Semiconductor LM95245 sensor chip"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for LM95245 sensor chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called lm95245.
+
config SENSORS_MAX1111
tristate "Maxim MAX1111 Multichannel, Serial 8-bit ADC chip"
depends on SPI_MASTER
@@ -736,6 +746,16 @@ config SENSORS_MAX1619
This driver can also be built as a module. If so, the module
will be called max1619.
+config SENSORS_MAX1668
+ tristate "Maxim MAX1668 and compatibles"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for MAX1668, MAX1989 and
+ MAX1805 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max1668.
+
config SENSORS_MAX6639
tristate "Maxim MAX6639 sensor chip"
depends on I2C && EXPERIMENTAL
@@ -767,6 +787,20 @@ config SENSORS_MAX6650
This driver can also be built as a module. If so, the module
will be called max6650.
+config SENSORS_NTC_THERMISTOR
+ tristate "NTC thermistor support"
+ depends on EXPERIMENTAL
+ help
+ This driver supports NTC thermistors sensor reading and its
+ interpretation. The driver can also monitor the temperature and
+ send notifications about the temperature.
+
+ Currently, this driver supports
+ NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333.
+
+ This driver can also be built as a module. If so, the module
+ will be called ntc-thermistor.
+
config SENSORS_PC87360
tristate "National Semiconductor PC87360 family"
select HWMON_VID
@@ -807,92 +841,7 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
-config PMBUS
- tristate "PMBus support"
- depends on I2C && EXPERIMENTAL
- default n
- help
- Say yes here if you want to enable PMBus support.
-
- This driver can also be built as a module. If so, the module will
- be called pmbus_core.
-
-if PMBUS
-
-config SENSORS_PMBUS
- tristate "Generic PMBus devices"
- default n
- help
- If you say yes here you get hardware monitoring support for generic
- PMBus devices, including but not limited to BMR450, BMR451, BMR453,
- BMR454, and LTC2978.
-
- This driver can also be built as a module. If so, the module will
- be called pmbus.
-
-config SENSORS_ADM1275
- tristate "Analog Devices ADM1275"
- default n
- help
- If you say yes here you get hardware monitoring support for Analog
- Devices ADM1275 Hot-Swap Controller and Digital Power Monitor.
-
- This driver can also be built as a module. If so, the module will
- be called adm1275.
-
-config SENSORS_MAX16064
- tristate "Maxim MAX16064"
- default n
- help
- If you say yes here you get hardware monitoring support for Maxim
- MAX16064.
-
- This driver can also be built as a module. If so, the module will
- be called max16064.
-
-config SENSORS_MAX34440
- tristate "Maxim MAX34440/MAX34441"
- default n
- help
- If you say yes here you get hardware monitoring support for Maxim
- MAX34440 and MAX34441.
-
- This driver can also be built as a module. If so, the module will
- be called max34440.
-
-config SENSORS_MAX8688
- tristate "Maxim MAX8688"
- default n
- help
- If you say yes here you get hardware monitoring support for Maxim
- MAX8688.
-
- This driver can also be built as a module. If so, the module will
- be called max8688.
-
-config SENSORS_UCD9000
- tristate "TI UCD90120, UCD90124, UCD9090, UCD90910"
- default n
- help
- If you say yes here you get hardware monitoring support for TI
- UCD90120, UCD90124, UCD9090, UCD90910 Sequencer and System Health
- Controllers.
-
- This driver can also be built as a module. If so, the module will
- be called ucd9000.
-
-config SENSORS_UCD9200
- tristate "TI UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, UCD9248"
- default n
- help
- If you say yes here you get hardware monitoring support for TI
- UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, and UCD9248
- Digital PWM System Controllers.
-
- This driver can also be built as a module. If so, the module will
- be called ucd9200.
-
-endif # PMBUS
+source drivers/hwmon/pmbus/Kconfig
config SENSORS_SHT15
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
@@ -1041,8 +990,13 @@ config SENSORS_SMSC47B397
This driver can also be built as a module. If so, the module
will be called smsc47b397.
+config SENSORS_SCH56XX_COMMON
+ tristate
+ default n
+
config SENSORS_SCH5627
tristate "SMSC SCH5627"
+ select SENSORS_SCH56XX_COMMON
help
If you say yes here you get support for the hardware monitoring
features of the SMSC SCH5627 Super-I/O chip.
@@ -1050,6 +1004,21 @@ config SENSORS_SCH5627
This driver can also be built as a module. If so, the module
will be called sch5627.
+config SENSORS_SCH5636
+ tristate "SMSC SCH5636"
+ select SENSORS_SCH56XX_COMMON
+ help
+ SMSC SCH5636 Super I/O chips include an embedded microcontroller for
+ hardware monitoring solutions, allowing motherboard manufacturers to
+ create their own custom hwmon solution based upon the SCH5636.
+
+ Currently this driver only supports the Fujitsu Theseus SCH5636 based
+ hwmon solution. Say yes here if you want support for the Fujitsu
+ Theseus' hardware monitoring features.
+
+ This driver can also be built as a module. If so, the module
+ will be called sch5636.
+
config SENSORS_ADS1015
tristate "Texas Instruments ADS1015"
depends on I2C
@@ -1142,6 +1111,7 @@ config SENSORS_TWL4030_MADC
config SENSORS_VIA_CPUTEMP
tristate "VIA CPU temperature sensor"
depends on X86
+ select HWMON_VID
help
If you say yes here you get support for the temperature
sensor inside your CPU. Supported are all known variants of
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 28061cfa0cd..3c9ccefea79 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_SENSORS_LM90) += lm90.o
obj-$(CONFIG_SENSORS_LM92) += lm92.o
obj-$(CONFIG_SENSORS_LM93) += lm93.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
+obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
@@ -87,15 +88,19 @@ obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX16065) += max16065.o
obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
+obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
+obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
obj-$(CONFIG_SENSORS_S3C) += s3c-hwmon.o
+obj-$(CONFIG_SENSORS_SCH56XX_COMMON)+= sch56xx-common.o
obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o
+obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o
obj-$(CONFIG_SENSORS_SHT15) += sht15.o
obj-$(CONFIG_SENSORS_SHT21) += sht21.o
obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
@@ -119,15 +124,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
-# PMBus drivers
-obj-$(CONFIG_PMBUS) += pmbus_core.o
-obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
-obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
-obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
-obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
-obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
-obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
-obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
+obj-$(CONFIG_PMBUS) += pmbus/
ccflags-$(CONFIG_HWMON_DEBUG_CHIP) := -DDEBUG
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index dcb78a7a804..00e98517f94 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -674,6 +674,7 @@ static int atk_debugfs_gitm_get(void *p, u64 *val)
else
err = -EIO;
+ ACPI_FREE(ret);
return err;
}
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 0070d5476dd..59d83e83da7 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -44,7 +44,9 @@
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
-#define MAX_ATTRS 5 /* Maximum no of per-core attrs */
+#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
+#define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */
+#define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
#ifdef CONFIG_SMP
@@ -67,6 +69,9 @@
* This value is passed as "id" field to rdmsr/wrmsr functions.
* @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS,
* from where the temperature values should be read.
+ * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT,
+ * from where the thresholds are read.
+ * @attr_size: Total number of pre-core attrs displayed in the sysfs.
* @is_pkg_data: If this is 1, the temp_data holds pkgtemp data.
* Otherwise, temp_data holds coretemp data.
* @valid: If this is 1, the current temperature is valid.
@@ -74,15 +79,18 @@
struct temp_data {
int temp;
int ttarget;
+ int tmin;
int tjmax;
unsigned long last_updated;
unsigned int cpu;
u32 cpu_core_id;
u32 status_reg;
+ u32 intrpt_reg;
+ int attr_size;
bool is_pkg_data;
bool valid;
- struct sensor_device_attribute sd_attrs[MAX_ATTRS];
- char attr_name[MAX_ATTRS][CORETEMP_NAME_LENGTH];
+ struct sensor_device_attribute sd_attrs[TOTAL_ATTRS];
+ char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH];
struct mutex update_lock;
};
@@ -135,6 +143,19 @@ static ssize_t show_crit_alarm(struct device *dev,
return sprintf(buf, "%d\n", (eax >> 5) & 1);
}
+static ssize_t show_max_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ u32 eax, edx;
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct platform_data *pdata = dev_get_drvdata(dev);
+ struct temp_data *tdata = pdata->core_data[attr->index];
+
+ rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
+
+ return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1));
+}
+
static ssize_t show_tjmax(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -153,6 +174,83 @@ static ssize_t show_ttarget(struct device *dev,
return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget);
}
+static ssize_t store_ttarget(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct platform_data *pdata = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct temp_data *tdata = pdata->core_data[attr->index];
+ u32 eax, edx;
+ unsigned long val;
+ int diff;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ /*
+ * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms
+ * of milli degree celsius. Hence don't accept val > (127 * 1000)
+ */
+ if (val > tdata->tjmax || val > 127000)
+ return -EINVAL;
+
+ diff = (tdata->tjmax - val) / 1000;
+
+ mutex_lock(&tdata->update_lock);
+ rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
+ eax = (eax & ~THERM_MASK_THRESHOLD1) |
+ (diff << THERM_SHIFT_THRESHOLD1);
+ wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
+ tdata->ttarget = val;
+ mutex_unlock(&tdata->update_lock);
+
+ return count;
+}
+
+static ssize_t show_tmin(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct platform_data *pdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin);
+}
+
+static ssize_t store_tmin(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct platform_data *pdata = dev_get_drvdata(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct temp_data *tdata = pdata->core_data[attr->index];
+ u32 eax, edx;
+ unsigned long val;
+ int diff;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ /*
+ * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms
+ * of milli degree celsius. Hence don't accept val > (127 * 1000)
+ */
+ if (val > tdata->tjmax || val > 127000)
+ return -EINVAL;
+
+ diff = (tdata->tjmax - val) / 1000;
+
+ mutex_lock(&tdata->update_lock);
+ rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx);
+ eax = (eax & ~THERM_MASK_THRESHOLD0) |
+ (diff << THERM_SHIFT_THRESHOLD0);
+ wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx);
+ tdata->tmin = val;
+ mutex_unlock(&tdata->update_lock);
+
+ return count;
+}
+
static ssize_t show_temp(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -344,23 +442,31 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
int attr_no)
{
int err, i;
- static ssize_t (*rd_ptr[MAX_ATTRS]) (struct device *dev,
+ static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev,
struct device_attribute *devattr, char *buf) = {
- show_label, show_crit_alarm, show_ttarget,
- show_temp, show_tjmax };
- static const char *names[MAX_ATTRS] = {
+ show_label, show_crit_alarm, show_temp, show_tjmax,
+ show_max_alarm, show_ttarget, show_tmin };
+ static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count) = { NULL, NULL, NULL, NULL, NULL,
+ store_ttarget, store_tmin };
+ static const char *names[TOTAL_ATTRS] = {
"temp%d_label", "temp%d_crit_alarm",
- "temp%d_max", "temp%d_input",
- "temp%d_crit" };
+ "temp%d_input", "temp%d_crit",
+ "temp%d_max_alarm", "temp%d_max",
+ "temp%d_max_hyst" };
- for (i = 0; i < MAX_ATTRS; i++) {
+ for (i = 0; i < tdata->attr_size; i++) {
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
attr_no);
sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
+ if (rw_ptr[i]) {
+ tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR;
+ tdata->sd_attrs[i].dev_attr.store = rw_ptr[i];
+ }
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
- tdata->sd_attrs[i].dev_attr.store = NULL;
tdata->sd_attrs[i].index = attr_no;
err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr);
if (err)
@@ -374,38 +480,6 @@ exit_free:
return err;
}
-static void update_ttarget(__u8 cpu_model, struct temp_data *tdata,
- struct device *dev)
-{
- int err;
- u32 eax, edx;
-
- /*
- * Initialize ttarget value. Eventually this will be
- * initialized with the value from MSR_IA32_THERM_INTERRUPT
- * register. If IA32_TEMPERATURE_TARGET is supported, this
- * value will be over written below.
- * To Do: Patch to initialize ttarget from MSR_IA32_THERM_INTERRUPT
- */
- tdata->ttarget = tdata->tjmax - 20000;
-
- /*
- * Read the still undocumented IA32_TEMPERATURE_TARGET. It exists
- * on older CPUs but not in this register,
- * Atoms don't have it either.
- */
- if (cpu_model > 0xe && cpu_model != 0x1c) {
- err = rdmsr_safe_on_cpu(tdata->cpu,
- MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
- if (err) {
- dev_warn(dev,
- "Unable to read IA32_TEMPERATURE_TARGET MSR\n");
- } else {
- tdata->ttarget = tdata->tjmax -
- ((eax >> 8) & 0xff) * 1000;
- }
- }
-}
static int __devinit chk_ucode_version(struct platform_device *pdev)
{
@@ -464,9 +538,12 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS :
MSR_IA32_THERM_STATUS;
+ tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT :
+ MSR_IA32_THERM_INTERRUPT;
tdata->is_pkg_data = pkg_flag;
tdata->cpu = cpu;
tdata->cpu_core_id = TO_CORE_ID(cpu);
+ tdata->attr_size = MAX_CORE_ATTRS;
mutex_init(&tdata->update_lock);
return tdata;
}
@@ -516,7 +593,17 @@ static int create_core_data(struct platform_data *pdata,
else
tdata->tjmax = get_tjmax(c, cpu, &pdev->dev);
- update_ttarget(c->x86_model, tdata, &pdev->dev);
+ /*
+ * Test if we can access the intrpt register. If so, increase the
+ * 'size' enough to have ttarget/tmin/max_alarm interfaces.
+ * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT
+ */
+ err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx);
+ if (!err) {
+ tdata->attr_size += MAX_THRESH_ATTRS;
+ tdata->ttarget = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
+ }
+
pdata->core_data[attr_no] = tdata;
/* Create sysfs interfaces */
@@ -553,7 +640,7 @@ static void coretemp_remove_core(struct platform_data *pdata,
struct temp_data *tdata = pdata->core_data[indx];
/* Remove the sysfs attributes */
- for (i = 0; i < MAX_ATTRS; i++)
+ for (i = 0; i < tdata->attr_size; i++)
device_remove_file(dev, &tdata->sd_attrs[i].dev_attr);
kfree(pdata->core_data[indx]);
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index e0ef32378ac..0064432f361 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -78,8 +78,9 @@ static u16 emc6w201_read16(struct i2c_client *client, u8 reg)
lsb = i2c_smbus_read_byte_data(client, reg);
msb = i2c_smbus_read_byte_data(client, reg + 1);
- if (lsb < 0 || msb < 0) {
- dev_err(&client->dev, "16-bit read failed at 0x%02x\n", reg);
+ if (unlikely(lsb < 0 || msb < 0)) {
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 16, "read", reg);
return 0xFFFF; /* Arbitrary value */
}
@@ -95,10 +96,39 @@ static int emc6w201_write16(struct i2c_client *client, u8 reg, u16 val)
int err;
err = i2c_smbus_write_byte_data(client, reg, val & 0xff);
- if (!err)
+ if (likely(!err))
err = i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
- if (err < 0)
- dev_err(&client->dev, "16-bit write failed at 0x%02x\n", reg);
+ if (unlikely(err < 0))
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 16, "write", reg);
+
+ return err;
+}
+
+/* Read 8-bit value from register */
+static u8 emc6w201_read8(struct i2c_client *client, u8 reg)
+{
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, reg);
+ if (unlikely(val < 0)) {
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 8, "read", reg);
+ return 0x00; /* Arbitrary value */
+ }
+
+ return val;
+}
+
+/* Write 8-bit value to register */
+static int emc6w201_write8(struct i2c_client *client, u8 reg, u8 val)
+{
+ int err;
+
+ err = i2c_smbus_write_byte_data(client, reg, val);
+ if (unlikely(err < 0))
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 8, "write", reg);
return err;
}
@@ -114,25 +144,25 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
for (nr = 0; nr < 6; nr++) {
data->in[input][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_IN(nr));
data->in[min][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_IN_LOW(nr));
data->in[max][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_IN_HIGH(nr));
}
for (nr = 0; nr < 6; nr++) {
data->temp[input][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_TEMP(nr));
data->temp[min][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_TEMP_LOW(nr));
data->temp[max][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_TEMP_HIGH(nr));
}
@@ -192,7 +222,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
mutex_lock(&data->update_lock);
data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255);
- err = i2c_smbus_write_byte_data(client, reg, data->in[sf][nr]);
+ err = emc6w201_write8(client, reg, data->in[sf][nr]);
mutex_unlock(&data->update_lock);
return err < 0 ? err : count;
@@ -229,7 +259,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
mutex_lock(&data->update_lock);
data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128);
- err = i2c_smbus_write_byte_data(client, reg, data->temp[sf][nr]);
+ err = emc6w201_write8(client, reg, data->temp[sf][nr]);
mutex_unlock(&data->update_lock);
return err < 0 ? err : count;
@@ -444,7 +474,7 @@ static int emc6w201_detect(struct i2c_client *client,
/* Check configuration */
config = i2c_smbus_read_byte_data(client, EMC6W201_REG_CONFIG);
- if ((config & 0xF4) != 0x04)
+ if (config < 0 || (config & 0xF4) != 0x04)
return -ENODEV;
if (!(config & 0x01)) {
dev_err(&client->dev, "Monitoring not enabled\n");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index a4a94a096c9..2d96ed2bf8e 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -52,6 +52,7 @@
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
#define SIO_F71869_ID 0x0814 /* Chipset ID */
+#define SIO_F71869A_ID 0x1007 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
#define SIO_F71889E_ID 0x0909 /* Chipset ID */
@@ -108,8 +109,8 @@ static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71882fg, f71889fg,
- f71889ed, f71889a, f8000, f81865f };
+enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71869a, f71882fg,
+ f71889fg, f71889ed, f71889a, f8000, f81865f };
static const char *f71882fg_names[] = {
"f71808e",
@@ -117,6 +118,7 @@ static const char *f71882fg_names[] = {
"f71858fg",
"f71862fg",
"f71869", /* Both f71869f and f71869e, reg. compatible and same id */
+ "f71869a",
"f71882fg",
"f71889fg", /* f81801u too, same id */
"f71889ed",
@@ -131,6 +133,7 @@ static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
[f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
[f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ [f71869a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71882fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71889fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71889ed] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
@@ -145,6 +148,7 @@ static const char f71882fg_has_in1_alarm[] = {
[f71858fg] = 0,
[f71862fg] = 0,
[f71869] = 0,
+ [f71869a] = 0,
[f71882fg] = 1,
[f71889fg] = 1,
[f71889ed] = 1,
@@ -159,6 +163,7 @@ static const char f71882fg_fan_has_beep[] = {
[f71858fg] = 0,
[f71862fg] = 1,
[f71869] = 1,
+ [f71869a] = 1,
[f71882fg] = 1,
[f71889fg] = 1,
[f71889ed] = 1,
@@ -173,6 +178,7 @@ static const char f71882fg_nr_fans[] = {
[f71858fg] = 3,
[f71862fg] = 3,
[f71869] = 3,
+ [f71869a] = 3,
[f71882fg] = 4,
[f71889fg] = 3,
[f71889ed] = 3,
@@ -187,6 +193,7 @@ static const char f71882fg_temp_has_beep[] = {
[f71858fg] = 0,
[f71862fg] = 1,
[f71869] = 1,
+ [f71869a] = 1,
[f71882fg] = 1,
[f71889fg] = 1,
[f71889ed] = 1,
@@ -201,6 +208,7 @@ static const char f71882fg_nr_temps[] = {
[f71858fg] = 3,
[f71862fg] = 3,
[f71869] = 3,
+ [f71869a] = 3,
[f71882fg] = 3,
[f71889fg] = 3,
[f71889ed] = 3,
@@ -2243,6 +2251,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71808e:
case f71808a:
case f71869:
+ case f71869a:
/* These always have signed auto point temps */
data->auto_point_temp_signed = 1;
/* Fall through to select correct fan/pwm reg bank! */
@@ -2305,6 +2314,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71808e:
case f71808a:
case f71869:
+ case f71869a:
case f71889fg:
case f71889ed:
case f71889a:
@@ -2528,6 +2538,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
case SIO_F71869_ID:
sio_data->type = f71869;
break;
+ case SIO_F71869A_ID:
+ sio_data->type = f71869a;
+ break;
case SIO_F71882_ID:
sio_data->type = f71882fg;
break;
@@ -2662,7 +2675,7 @@ static void __exit f71882fg_exit(void)
}
MODULE_DESCRIPTION("F71882FG Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans Edgington, Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans Edgington, Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
module_init(f71882fg_init);
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index ec588026f0a..131ea8625f0 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -273,7 +273,7 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
#define DIV_FROM_REG(val) (1 << (val))
#define FAN_FROM_REG(val,div) ((val)==0 ? 0 : (480000/((val) << (div))))
-#define FAN_TO_REG(val,div) ((val)<=0?0:SENSORS_LIMIT((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255));
+#define FAN_TO_REG(val,div) ((val)<=0?0:SENSORS_LIMIT((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
char *buf)
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 2582bfef6cc..932da8a5aaf 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -140,7 +140,11 @@ int vid_from_reg(int val, u8 vrm)
return(val & 0x10 ? 975 - (val & 0xF) * 25 :
1750 - val * 50);
case 13:
+ case 131:
val &= 0x3f;
+ /* Exception for Eden ULV 500 MHz */
+ if (vrm == 131 && val == 0x3f)
+ val++;
return(1708 - val * 16);
case 14: /* Intel Core */
/* compute in uV, round to mV */
@@ -202,14 +206,48 @@ static struct vrm_model vrm_models[] = {
{X86_VENDOR_CENTAUR, 0x6, 0x7, ANY, 85}, /* Eden ESP/Ezra */
{X86_VENDOR_CENTAUR, 0x6, 0x8, 0x7, 85}, /* Ezra T */
- {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85}, /* Nemiah */
+ {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85}, /* Nehemiah */
{X86_VENDOR_CENTAUR, 0x6, 0x9, ANY, 17}, /* C3-M, Eden-N */
{X86_VENDOR_CENTAUR, 0x6, 0xA, 0x7, 0}, /* No information */
- {X86_VENDOR_CENTAUR, 0x6, 0xA, ANY, 13}, /* C7, Esther */
+ {X86_VENDOR_CENTAUR, 0x6, 0xA, ANY, 13}, /* C7-M, C7, Eden (Esther) */
+ {X86_VENDOR_CENTAUR, 0x6, 0xD, ANY, 134}, /* C7-D, C7-M, C7, Eden (Esther) */
{X86_VENDOR_UNKNOWN, ANY, ANY, ANY, 0} /* stop here */
};
+/*
+ * Special case for VIA model D: there are two different possible
+ * VID tables, so we have to figure out first, which one must be
+ * used. This resolves temporary drm value 134 to 14 (Intel Core
+ * 7-bit VID), 13 (Pentium M 6-bit VID) or 131 (Pentium M 6-bit VID
+ * + quirk for Eden ULV 500 MHz).
+ * Note: something similar might be needed for model A, I'm not sure.
+ */
+static u8 get_via_model_d_vrm(void)
+{
+ unsigned int vid, brand, dummy;
+ static const char *brands[4] = {
+ "C7-M", "C7", "Eden", "C7-D"
+ };
+
+ rdmsr(0x198, dummy, vid);
+ vid &= 0xff;
+
+ rdmsr(0x1154, brand, dummy);
+ brand = ((brand >> 4) ^ (brand >> 2)) & 0x03;
+
+ if (vid > 0x3f) {
+ pr_info("Using %d-bit VID table for VIA %s CPU\n",
+ 7, brands[brand]);
+ return 14;
+ } else {
+ pr_info("Using %d-bit VID table for VIA %s CPU\n",
+ 6, brands[brand]);
+ /* Enable quirk for Eden */
+ return brand == 2 ? 131 : 13;
+ }
+}
+
static u8 find_vrm(u8 eff_family, u8 eff_model, u8 eff_stepping, u8 vendor)
{
int i = 0;
@@ -247,6 +285,8 @@ u8 vid_which_vrm(void)
eff_model += ((eax & 0x000F0000)>>16)<<4;
}
vrm_ret = find_vrm(eff_family, eff_model, eff_stepping, c->x86_vendor);
+ if (vrm_ret == 134)
+ vrm_ret = get_via_model_d_vrm();
if (vrm_ret == 0)
pr_info("Unknown VRM version of your x86 CPU\n");
return vrm_ret;
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 1a409c5bc9b..c316294c48b 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -432,13 +432,15 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
aem_send_message(ipmi);
res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT);
- if (!res)
- return -ETIMEDOUT;
+ if (!res) {
+ res = -ETIMEDOUT;
+ goto out;
+ }
if (ipmi->rx_result || ipmi->rx_msg_len != rs_size ||
memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) {
- kfree(rs_resp);
- return -ENOENT;
+ res = -ENOENT;
+ goto out;
}
switch (size) {
@@ -463,8 +465,11 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
break;
}
}
+ res = 0;
- return 0;
+out:
+ kfree(rs_resp);
+ return res;
}
/* Update AEM energy registers */
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index bb6405b9200..d912649fac5 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -1172,6 +1172,32 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
+
+static ssize_t clear_intrusion(struct device *dev, struct device_attribute
+ *attr, const char *buf, size_t count)
+{
+ struct it87_data *data = dev_get_drvdata(dev);
+ long val;
+ int config;
+
+ if (strict_strtol(buf, 10, &val) < 0 || val != 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ config = it87_read_value(data, IT87_REG_CONFIG);
+ if (config < 0) {
+ count = config;
+ } else {
+ config |= 1 << 5;
+ it87_write_value(data, IT87_REG_CONFIG, config);
+ /* Invalidate cache to force re-read */
+ data->valid = 0;
+ }
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 8);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 9);
static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 10);
@@ -1188,6 +1214,8 @@ static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 16);
static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 17);
static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 18);
+static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
+ show_alarm, clear_intrusion, 4);
static ssize_t show_beep(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -1350,6 +1378,7 @@ static struct attribute *it87_attributes[] = {
&sensor_dev_attr_temp3_alarm.dev_attr.attr,
&dev_attr_alarms.attr,
+ &sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
&dev_attr_name.attr,
NULL
};
@@ -1538,7 +1567,7 @@ static struct attribute *it87_attributes_label[] = {
};
static const struct attribute_group it87_group_label = {
- .attrs = it87_attributes_vid,
+ .attrs = it87_attributes_label,
};
/* SuperIO detection - will change isa_address if a chip is found */
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 4cb24eafe31..6df0b468171 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -2,7 +2,7 @@
lm78.c - Part of lm_sensors, Linux kernel modules for hardware
monitoring
Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
- Copyright (c) 2007 Jean Delvare <khali@linux-fr.org>
+ Copyright (c) 2007, 2011 Jean Delvare <khali@linux-fr.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -26,23 +26,21 @@
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
-#include <linux/platform_device.h>
-#include <linux/ioport.h>
#include <linux/hwmon.h>
#include <linux/hwmon-vid.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
-#include <linux/io.h>
-/* ISA device, if found */
-static struct platform_device *pdev;
+#ifdef CONFIG_ISA
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#endif
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
0x2e, 0x2f, I2C_CLIENT_END };
-static unsigned short isa_address = 0x290;
-
enum chips { lm78, lm79 };
/* Many LM78 constants specified below */
@@ -143,50 +141,12 @@ struct lm78_data {
};
-static int lm78_i2c_detect(struct i2c_client *client,
- struct i2c_board_info *info);
-static int lm78_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int lm78_i2c_remove(struct i2c_client *client);
-
-static int __devinit lm78_isa_probe(struct platform_device *pdev);
-static int __devexit lm78_isa_remove(struct platform_device *pdev);
-
static int lm78_read_value(struct lm78_data *data, u8 reg);
static int lm78_write_value(struct lm78_data *data, u8 reg, u8 value);
static struct lm78_data *lm78_update_device(struct device *dev);
static void lm78_init_device(struct lm78_data *data);
-static const struct i2c_device_id lm78_i2c_id[] = {
- { "lm78", lm78 },
- { "lm79", lm79 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, lm78_i2c_id);
-
-static struct i2c_driver lm78_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "lm78",
- },
- .probe = lm78_i2c_probe,
- .remove = lm78_i2c_remove,
- .id_table = lm78_i2c_id,
- .detect = lm78_i2c_detect,
- .address_list = normal_i2c,
-};
-
-static struct platform_driver lm78_isa_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "lm78",
- },
- .probe = lm78_isa_probe,
- .remove = __devexit_p(lm78_isa_remove),
-};
-
-
/* 7 Voltages */
static ssize_t show_in(struct device *dev, struct device_attribute *da,
char *buf)
@@ -514,6 +474,16 @@ static const struct attribute_group lm78_group = {
.attrs = lm78_attributes,
};
+/*
+ * ISA related code
+ */
+#ifdef CONFIG_ISA
+
+/* ISA device, if found */
+static struct platform_device *pdev;
+
+static unsigned short isa_address = 0x290;
+
/* I2C devices get this name attribute automatically, but for ISA devices
we must create it by ourselves. */
static ssize_t show_name(struct device *dev, struct device_attribute
@@ -525,6 +495,11 @@ static ssize_t show_name(struct device *dev, struct device_attribute
}
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+static struct lm78_data *lm78_data_if_isa(void)
+{
+ return pdev ? platform_get_drvdata(pdev) : NULL;
+}
+
/* Returns 1 if the I2C chip appears to be an alias of the ISA chip */
static int lm78_alias_detect(struct i2c_client *client, u8 chipid)
{
@@ -558,12 +533,24 @@ static int lm78_alias_detect(struct i2c_client *client, u8 chipid)
return 1;
}
+#else /* !CONFIG_ISA */
+
+static int lm78_alias_detect(struct i2c_client *client, u8 chipid)
+{
+ return 0;
+}
+
+static struct lm78_data *lm78_data_if_isa(void)
+{
+ return NULL;
+}
+#endif /* CONFIG_ISA */
static int lm78_i2c_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
int i;
- struct lm78_data *isa = pdev ? platform_get_drvdata(pdev) : NULL;
+ struct lm78_data *isa = lm78_data_if_isa();
const char *client_name;
struct i2c_adapter *adapter = client->adapter;
int address = client->addr;
@@ -663,76 +650,24 @@ static int lm78_i2c_remove(struct i2c_client *client)
return 0;
}
-static int __devinit lm78_isa_probe(struct platform_device *pdev)
-{
- int err;
- struct lm78_data *data;
- struct resource *res;
-
- /* Reserve the ISA region */
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start + LM78_ADDR_REG_OFFSET, 2, "lm78")) {
- err = -EBUSY;
- goto exit;
- }
-
- if (!(data = kzalloc(sizeof(struct lm78_data), GFP_KERNEL))) {
- err = -ENOMEM;
- goto exit_release_region;
- }
- mutex_init(&data->lock);
- data->isa_addr = res->start;
- platform_set_drvdata(pdev, data);
-
- if (lm78_read_value(data, LM78_REG_CHIPID) & 0x80) {
- data->type = lm79;
- data->name = "lm79";
- } else {
- data->type = lm78;
- data->name = "lm78";
- }
-
- /* Initialize the LM78 chip */
- lm78_init_device(data);
-
- /* Register sysfs hooks */
- if ((err = sysfs_create_group(&pdev->dev.kobj, &lm78_group))
- || (err = device_create_file(&pdev->dev, &dev_attr_name)))
- goto exit_remove_files;
-
- data->hwmon_dev = hwmon_device_register(&pdev->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove_files;
- }
-
- return 0;
-
- exit_remove_files:
- sysfs_remove_group(&pdev->dev.kobj, &lm78_group);
- device_remove_file(&pdev->dev, &dev_attr_name);
- kfree(data);
- exit_release_region:
- release_region(res->start + LM78_ADDR_REG_OFFSET, 2);
- exit:
- return err;
-}
-
-static int __devexit lm78_isa_remove(struct platform_device *pdev)
-{
- struct lm78_data *data = platform_get_drvdata(pdev);
- struct resource *res;
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&pdev->dev.kobj, &lm78_group);
- device_remove_file(&pdev->dev, &dev_attr_name);
- kfree(data);
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start + LM78_ADDR_REG_OFFSET, 2);
+static const struct i2c_device_id lm78_i2c_id[] = {
+ { "lm78", lm78 },
+ { "lm79", lm79 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm78_i2c_id);
- return 0;
-}
+static struct i2c_driver lm78_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "lm78",
+ },
+ .probe = lm78_i2c_probe,
+ .remove = lm78_i2c_remove,
+ .id_table = lm78_i2c_id,
+ .detect = lm78_i2c_detect,
+ .address_list = normal_i2c,
+};
/* The SMBus locks itself, but ISA access must be locked explicitly!
We don't want to lock the whole ISA bus, so we lock each client
@@ -743,6 +678,7 @@ static int lm78_read_value(struct lm78_data *data, u8 reg)
{
struct i2c_client *client = data->client;
+#ifdef CONFIG_ISA
if (!client) { /* ISA device */
int res;
mutex_lock(&data->lock);
@@ -751,6 +687,7 @@ static int lm78_read_value(struct lm78_data *data, u8 reg)
mutex_unlock(&data->lock);
return res;
} else
+#endif
return i2c_smbus_read_byte_data(client, reg);
}
@@ -765,6 +702,7 @@ static int lm78_write_value(struct lm78_data *data, u8 reg, u8 value)
{
struct i2c_client *client = data->client;
+#ifdef CONFIG_ISA
if (!client) { /* ISA device */
mutex_lock(&data->lock);
outb_p(reg, data->isa_addr + LM78_ADDR_REG_OFFSET);
@@ -772,6 +710,7 @@ static int lm78_write_value(struct lm78_data *data, u8 reg, u8 value)
mutex_unlock(&data->lock);
return 0;
} else
+#endif
return i2c_smbus_write_byte_data(client, reg, value);
}
@@ -849,6 +788,88 @@ static struct lm78_data *lm78_update_device(struct device *dev)
return data;
}
+#ifdef CONFIG_ISA
+static int __devinit lm78_isa_probe(struct platform_device *pdev)
+{
+ int err;
+ struct lm78_data *data;
+ struct resource *res;
+
+ /* Reserve the ISA region */
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!request_region(res->start + LM78_ADDR_REG_OFFSET, 2, "lm78")) {
+ err = -EBUSY;
+ goto exit;
+ }
+
+ data = kzalloc(sizeof(struct lm78_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit_release_region;
+ }
+ mutex_init(&data->lock);
+ data->isa_addr = res->start;
+ platform_set_drvdata(pdev, data);
+
+ if (lm78_read_value(data, LM78_REG_CHIPID) & 0x80) {
+ data->type = lm79;
+ data->name = "lm79";
+ } else {
+ data->type = lm78;
+ data->name = "lm78";
+ }
+
+ /* Initialize the LM78 chip */
+ lm78_init_device(data);
+
+ /* Register sysfs hooks */
+ if ((err = sysfs_create_group(&pdev->dev.kobj, &lm78_group))
+ || (err = device_create_file(&pdev->dev, &dev_attr_name)))
+ goto exit_remove_files;
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_remove_files;
+ }
+
+ return 0;
+
+ exit_remove_files:
+ sysfs_remove_group(&pdev->dev.kobj, &lm78_group);
+ device_remove_file(&pdev->dev, &dev_attr_name);
+ kfree(data);
+ exit_release_region:
+ release_region(res->start + LM78_ADDR_REG_OFFSET, 2);
+ exit:
+ return err;
+}
+
+static int __devexit lm78_isa_remove(struct platform_device *pdev)
+{
+ struct lm78_data *data = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&pdev->dev.kobj, &lm78_group);
+ device_remove_file(&pdev->dev, &dev_attr_name);
+ kfree(data);
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ release_region(res->start + LM78_ADDR_REG_OFFSET, 2);
+
+ return 0;
+}
+
+static struct platform_driver lm78_isa_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "lm78",
+ },
+ .probe = lm78_isa_probe,
+ .remove = __devexit_p(lm78_isa_remove),
+};
+
/* return 1 if a supported chip is found, 0 otherwise */
static int __init lm78_isa_found(unsigned short address)
{
@@ -969,12 +990,10 @@ static int __init lm78_isa_device_add(unsigned short address)
return err;
}
-static int __init sm_lm78_init(void)
+static int __init lm78_isa_register(void)
{
int res;
- /* We register the ISA device first, so that we can skip the
- * registration of an I2C interface to the same device. */
if (lm78_isa_found(isa_address)) {
res = platform_driver_register(&lm78_isa_driver);
if (res)
@@ -986,32 +1005,62 @@ static int __init sm_lm78_init(void)
goto exit_unreg_isa_driver;
}
- res = i2c_add_driver(&lm78_driver);
- if (res)
- goto exit_unreg_isa_device;
-
return 0;
- exit_unreg_isa_device:
- platform_device_unregister(pdev);
exit_unreg_isa_driver:
platform_driver_unregister(&lm78_isa_driver);
exit:
return res;
}
-static void __exit sm_lm78_exit(void)
+static void lm78_isa_unregister(void)
{
if (pdev) {
platform_device_unregister(pdev);
platform_driver_unregister(&lm78_isa_driver);
}
- i2c_del_driver(&lm78_driver);
}
+#else /* !CONFIG_ISA */
+static int __init lm78_isa_register(void)
+{
+ return 0;
+}
+
+static void lm78_isa_unregister(void)
+{
+}
+#endif /* CONFIG_ISA */
+static int __init sm_lm78_init(void)
+{
+ int res;
+
+ /* We register the ISA device first, so that we can skip the
+ * registration of an I2C interface to the same device. */
+ res = lm78_isa_register();
+ if (res)
+ goto exit;
+
+ res = i2c_add_driver(&lm78_driver);
+ if (res)
+ goto exit_unreg_isa_device;
+
+ return 0;
+
+ exit_unreg_isa_device:
+ lm78_isa_unregister();
+ exit:
+ return res;
+}
+
+static void __exit sm_lm78_exit(void)
+{
+ lm78_isa_unregister();
+ i2c_del_driver(&lm78_driver);
+}
-MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
+MODULE_AUTHOR("Frodo Looijaard, Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("LM78/LM79 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 2f94f950480..90ddb877421 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -54,6 +54,9 @@
* and extended mode. They are mostly compatible with LM90 except for a data
* format difference for the temperature value registers.
*
+ * This driver also supports the SA56004 from Philips. This device is
+ * pin-compatible with the LM86, the ED/EDP parts are also address-compatible.
+ *
* Since the LM90 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
* concern all supported chipsets, unless mentioned otherwise.
@@ -96,13 +99,15 @@
* MAX6659 can have address 0x4c, 0x4d or 0x4e.
* MAX6680 and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
* 0x4c, 0x4d or 0x4e.
+ * SA56004 can have address 0x48 through 0x4F.
*/
static const unsigned short normal_i2c[] = {
- 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
+ 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x48, 0x49, 0x4a, 0x4b, 0x4c,
+ 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
- max6646, w83l771, max6696 };
+ max6646, w83l771, max6696, sa56004 };
/*
* The LM90 registers
@@ -152,6 +157,10 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define MAX6659_REG_R_LOCAL_EMERG 0x17
#define MAX6659_REG_W_LOCAL_EMERG 0x17
+/* SA56004 registers */
+
+#define SA56004_REG_R_LOCAL_TEMPL 0x22
+
#define LM90_DEF_CONVRATE_RVAL 6 /* Def conversion rate register value */
#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
@@ -161,7 +170,6 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_FLAG_ADT7461_EXT (1 << 0) /* ADT7461 extended mode */
/* Device features */
#define LM90_HAVE_OFFSET (1 << 1) /* temperature offset register */
-#define LM90_HAVE_LOCAL_EXT (1 << 2) /* extended local temperature */
#define LM90_HAVE_REM_LIMIT_EXT (1 << 3) /* extended remote limit */
#define LM90_HAVE_EMERGENCY (1 << 4) /* 3rd upper (emergency) limit */
#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */
@@ -192,6 +200,7 @@ static const struct i2c_device_id lm90_id[] = {
{ "max6696", max6696 },
{ "nct1008", adt7461 },
{ "w83l771", w83l771 },
+ { "sa56004", sa56004 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm90_id);
@@ -204,6 +213,7 @@ struct lm90_params {
u16 alert_alarms; /* Which alarm bits trigger ALERT# */
/* Upper 8 bits for max6695/96 */
u8 max_convrate; /* Maximum conversion rate register value */
+ u8 reg_local_ext; /* Extended local temp register (optional) */
};
static const struct lm90_params lm90_params[] = {
@@ -235,19 +245,20 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 9,
},
[max6646] = {
- .flags = LM90_HAVE_LOCAL_EXT,
.alert_alarms = 0x7c,
.max_convrate = 6,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6657] = {
- .flags = LM90_HAVE_LOCAL_EXT,
.alert_alarms = 0x7c,
.max_convrate = 8,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6659] = {
- .flags = LM90_HAVE_LOCAL_EXT | LM90_HAVE_EMERGENCY,
+ .flags = LM90_HAVE_EMERGENCY,
.alert_alarms = 0x7c,
.max_convrate = 8,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6680] = {
.flags = LM90_HAVE_OFFSET,
@@ -255,16 +266,23 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 7,
},
[max6696] = {
- .flags = LM90_HAVE_LOCAL_EXT | LM90_HAVE_EMERGENCY
+ .flags = LM90_HAVE_EMERGENCY
| LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
.alert_alarms = 0x187c,
.max_convrate = 6,
+ .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[w83l771] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
.alert_alarms = 0x7c,
.max_convrate = 8,
},
+ [sa56004] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .alert_alarms = 0x7b,
+ .max_convrate = 9,
+ .reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
+ },
};
/*
@@ -286,6 +304,7 @@ struct lm90_data {
u16 alert_alarms; /* Which alarm bits trigger ALERT# */
/* Upper 8 bits for max6695/96 */
u8 max_convrate; /* Maximum conversion rate */
+ u8 reg_local_ext; /* local extension register offset */
/* registers values */
s8 temp8[8]; /* 0: local low limit
@@ -452,9 +471,9 @@ static struct lm90_data *lm90_update_device(struct device *dev)
lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT, &data->temp8[3]);
lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
- if (data->flags & LM90_HAVE_LOCAL_EXT) {
+ if (data->reg_local_ext) {
lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
- MAX6657_REG_R_LOCAL_TEMPL,
+ data->reg_local_ext,
&data->temp11[4]);
} else {
if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
@@ -1092,7 +1111,7 @@ static int lm90_detect(struct i2c_client *new_client,
struct i2c_adapter *adapter = new_client->adapter;
int address = new_client->addr;
const char *name = NULL;
- int man_id, chip_id, reg_config1, reg_convrate;
+ int man_id, chip_id, reg_config1, reg_config2, reg_convrate;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
@@ -1108,15 +1127,16 @@ static int lm90_detect(struct i2c_client *new_client,
LM90_REG_R_CONVRATE)) < 0)
return -ENODEV;
- if ((address == 0x4C || address == 0x4D)
- && man_id == 0x01) { /* National Semiconductor */
- int reg_config2;
-
+ if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
reg_config2 = i2c_smbus_read_byte_data(new_client,
LM90_REG_R_CONFIG2);
if (reg_config2 < 0)
return -ENODEV;
+ } else
+ reg_config2 = 0; /* Make compiler happy */
+ if ((address == 0x4C || address == 0x4D)
+ && man_id == 0x01) { /* National Semiconductor */
if ((reg_config1 & 0x2A) == 0x00
&& (reg_config2 & 0xF8) == 0x00
&& reg_convrate <= 0x09) {
@@ -1245,13 +1265,6 @@ static int lm90_detect(struct i2c_client *new_client,
} else
if (address == 0x4C
&& man_id == 0x5C) { /* Winbond/Nuvoton */
- int reg_config2;
-
- reg_config2 = i2c_smbus_read_byte_data(new_client,
- LM90_REG_R_CONFIG2);
- if (reg_config2 < 0)
- return -ENODEV;
-
if ((reg_config1 & 0x2A) == 0x00
&& (reg_config2 & 0xF8) == 0x00) {
if (chip_id == 0x01 /* W83L771W/G */
@@ -1263,6 +1276,15 @@ static int lm90_detect(struct i2c_client *new_client,
name = "w83l771";
}
}
+ } else
+ if (address >= 0x48 && address <= 0x4F
+ && man_id == 0xA1) { /* NXP Semiconductor/Philips */
+ if (chip_id == 0x00
+ && (reg_config1 & 0x2A) == 0x00
+ && (reg_config2 & 0xFE) == 0x00
+ && reg_convrate <= 0x09) {
+ name = "sa56004";
+ }
}
if (!name) { /* identification failed */
@@ -1368,6 +1390,7 @@ static int lm90_probe(struct i2c_client *new_client,
/* Set chip capabilities */
data->flags = lm90_params[data->kind].flags;
+ data->reg_local_ext = lm90_params[data->kind].reg_local_ext;
/* Set maximum conversion rate */
data->max_convrate = lm90_params[data->kind].max_convrate;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 1a6dfb6df1e..513901d592a 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -74,8 +74,9 @@ static const unsigned short normal_i2c[] = {
#define TT_OFF 0
#define TT_ON 1
#define TT_MASK 7
-#define MANUFACTURER_ID 0x01
-#define DEFAULT_REVISION 0xA4
+#define NATSEMI_MAN_ID 0x01
+#define LM95231_CHIP_ID 0xA1
+#define LM95241_CHIP_ID 0xA4
static const u8 lm95241_reg_address[] = {
LM95241_REG_R_LOCAL_TEMPH,
@@ -98,11 +99,16 @@ struct lm95241_data {
};
/* Conversions */
-static int TempFromReg(u8 val_h, u8 val_l)
+static int temp_from_reg_signed(u8 val_h, u8 val_l)
{
- if (val_h & 0x80)
- return val_h - 0x100;
- return val_h * 1000 + val_l * 1000 / 256;
+ s16 val_hl = (val_h << 8) | val_l;
+ return val_hl * 1000 / 256;
+}
+
+static int temp_from_reg_unsigned(u8 val_h, u8 val_l)
+{
+ u16 val_hl = (val_h << 8) | val_l;
+ return val_hl * 1000 / 256;
}
static struct lm95241_data *lm95241_update_device(struct device *dev)
@@ -135,10 +141,13 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm95241_data *data = lm95241_update_device(dev);
+ int index = to_sensor_dev_attr(attr)->index;
return snprintf(buf, PAGE_SIZE - 1, "%d\n",
- TempFromReg(data->temp[to_sensor_dev_attr(attr)->index],
- data->temp[to_sensor_dev_attr(attr)->index + 1]));
+ index == 0 || (data->config & (1 << (index / 2))) ?
+ temp_from_reg_signed(data->temp[index], data->temp[index + 1]) :
+ temp_from_reg_unsigned(data->temp[index],
+ data->temp[index + 1]));
}
static ssize_t show_type(struct device *dev, struct device_attribute *attr,
@@ -330,20 +339,25 @@ static int lm95241_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
- int address = new_client->addr;
const char *name;
+ int mfg_id, chip_id;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
- if ((i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID)
- == MANUFACTURER_ID)
- && (i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID)
- >= DEFAULT_REVISION)) {
- name = DEVNAME;
- } else {
- dev_dbg(&adapter->dev, "LM95241 detection failed at 0x%02x\n",
- address);
+ mfg_id = i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID);
+ if (mfg_id != NATSEMI_MAN_ID)
+ return -ENODEV;
+
+ chip_id = i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID);
+ switch (chip_id) {
+ case LM95231_CHIP_ID:
+ name = "lm95231";
+ break;
+ case LM95241_CHIP_ID:
+ name = "lm95241";
+ break;
+ default:
return -ENODEV;
}
@@ -423,7 +437,8 @@ static int lm95241_remove(struct i2c_client *client)
/* Driver data (common to all clients) */
static const struct i2c_device_id lm95241_id[] = {
- { DEVNAME, 0 },
+ { "lm95231", 0 },
+ { "lm95241", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm95241_id);
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
new file mode 100644
index 00000000000..dce9e68241e
--- /dev/null
+++ b/drivers/hwmon/lm95245.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright (C) 2011 Alexander Stein <alexander.stein@systec-electronic.com>
+ *
+ * The LM95245 is a sensor chip made by National Semiconductors.
+ * It reports up to two temperatures (its own plus an external one).
+ * Complete datasheet can be obtained from National's website at:
+ * http://www.national.com/ds.cgi/LM/LM95245.pdf
+ *
+ * This driver is based on lm95241.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+
+#define DEVNAME "lm95245"
+
+static const unsigned short normal_i2c[] = {
+ 0x18, 0x19, 0x29, 0x4c, 0x4d, I2C_CLIENT_END };
+
+/* LM95245 registers */
+/* general registers */
+#define LM95245_REG_RW_CONFIG1 0x03
+#define LM95245_REG_RW_CONVERS_RATE 0x04
+#define LM95245_REG_W_ONE_SHOT 0x0F
+
+/* diode configuration */
+#define LM95245_REG_RW_CONFIG2 0xBF
+#define LM95245_REG_RW_REMOTE_OFFH 0x11
+#define LM95245_REG_RW_REMOTE_OFFL 0x12
+
+/* status registers */
+#define LM95245_REG_R_STATUS1 0x02
+#define LM95245_REG_R_STATUS2 0x33
+
+/* limit registers */
+#define LM95245_REG_RW_REMOTE_OS_LIMIT 0x07
+#define LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT 0x20
+#define LM95245_REG_RW_REMOTE_TCRIT_LIMIT 0x19
+#define LM95245_REG_RW_COMMON_HYSTERESIS 0x21
+
+/* temperature signed */
+#define LM95245_REG_R_LOCAL_TEMPH_S 0x00
+#define LM95245_REG_R_LOCAL_TEMPL_S 0x30
+#define LM95245_REG_R_REMOTE_TEMPH_S 0x01
+#define LM95245_REG_R_REMOTE_TEMPL_S 0x10
+/* temperature unsigned */
+#define LM95245_REG_R_REMOTE_TEMPH_U 0x31
+#define LM95245_REG_R_REMOTE_TEMPL_U 0x32
+
+/* id registers */
+#define LM95245_REG_R_MAN_ID 0xFE
+#define LM95245_REG_R_CHIP_ID 0xFF
+
+/* LM95245 specific bitfields */
+#define CFG_STOP 0x40
+#define CFG_REMOTE_TCRIT_MASK 0x10
+#define CFG_REMOTE_OS_MASK 0x08
+#define CFG_LOCAL_TCRIT_MASK 0x04
+#define CFG_LOCAL_OS_MASK 0x02
+
+#define CFG2_OS_A0 0x40
+#define CFG2_DIODE_FAULT_OS 0x20
+#define CFG2_DIODE_FAULT_TCRIT 0x10
+#define CFG2_REMOTE_TT 0x08
+#define CFG2_REMOTE_FILTER_DIS 0x00
+#define CFG2_REMOTE_FILTER_EN 0x06
+
+/* conversation rate in ms */
+#define RATE_CR0063 0x00
+#define RATE_CR0364 0x01
+#define RATE_CR1000 0x02
+#define RATE_CR2500 0x03
+
+#define STATUS1_DIODE_FAULT 0x04
+#define STATUS1_RTCRIT 0x02
+#define STATUS1_LOC 0x01
+
+#define MANUFACTURER_ID 0x01
+#define DEFAULT_REVISION 0xB3
+
+static const u8 lm95245_reg_address[] = {
+ LM95245_REG_R_LOCAL_TEMPH_S,
+ LM95245_REG_R_LOCAL_TEMPL_S,
+ LM95245_REG_R_REMOTE_TEMPH_S,
+ LM95245_REG_R_REMOTE_TEMPL_S,
+ LM95245_REG_R_REMOTE_TEMPH_U,
+ LM95245_REG_R_REMOTE_TEMPL_U,
+ LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT,
+ LM95245_REG_RW_REMOTE_TCRIT_LIMIT,
+ LM95245_REG_RW_COMMON_HYSTERESIS,
+ LM95245_REG_R_STATUS1,
+};
+
+/* Client data (each client gets its own) */
+struct lm95245_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ unsigned long last_updated; /* in jiffies */
+ unsigned long interval; /* in msecs */
+ bool valid; /* zero until following fields are valid */
+ /* registers values */
+ u8 regs[ARRAY_SIZE(lm95245_reg_address)];
+ u8 config1, config2;
+};
+
+/* Conversions */
+static int temp_from_reg_unsigned(u8 val_h, u8 val_l)
+{
+ return val_h * 1000 + val_l * 1000 / 256;
+}
+
+static int temp_from_reg_signed(u8 val_h, u8 val_l)
+{
+ if (val_h & 0x80)
+ return (val_h - 0x100) * 1000;
+ return temp_from_reg_unsigned(val_h, val_l);
+}
+
+static struct lm95245_data *lm95245_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated
+ + msecs_to_jiffies(data->interval)) || !data->valid) {
+ int i;
+
+ dev_dbg(&client->dev, "Updating lm95245 data.\n");
+ for (i = 0; i < ARRAY_SIZE(lm95245_reg_address); i++)
+ data->regs[i]
+ = i2c_smbus_read_byte_data(client,
+ lm95245_reg_address[i]);
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
+static unsigned long lm95245_read_conversion_rate(struct i2c_client *client)
+{
+ int rate;
+ unsigned long interval;
+
+ rate = i2c_smbus_read_byte_data(client, LM95245_REG_RW_CONVERS_RATE);
+
+ switch (rate) {
+ case RATE_CR0063:
+ interval = 63;
+ break;
+ case RATE_CR0364:
+ interval = 364;
+ break;
+ case RATE_CR1000:
+ interval = 1000;
+ break;
+ case RATE_CR2500:
+ default:
+ interval = 2500;
+ break;
+ }
+
+ return interval;
+}
+
+static unsigned long lm95245_set_conversion_rate(struct i2c_client *client,
+ unsigned long interval)
+{
+ int rate;
+
+ if (interval <= 63) {
+ interval = 63;
+ rate = RATE_CR0063;
+ } else if (interval <= 364) {
+ interval = 364;
+ rate = RATE_CR0364;
+ } else if (interval <= 1000) {
+ interval = 1000;
+ rate = RATE_CR1000;
+ } else {
+ interval = 2500;
+ rate = RATE_CR2500;
+ }
+
+ i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONVERS_RATE, rate);
+
+ return interval;
+}
+
+/* Sysfs stuff */
+static ssize_t show_input(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95245_data *data = lm95245_update_device(dev);
+ int temp;
+ int index = to_sensor_dev_attr(attr)->index;
+
+ /*
+ * Index 0 (Local temp) is always signed
+ * Index 2 (Remote temp) has both signed and unsigned data
+ * use signed calculation for remote if signed bit is set
+ */
+ if (index == 0 || data->regs[index] & 0x80)
+ temp = temp_from_reg_signed(data->regs[index],
+ data->regs[index + 1]);
+ else
+ temp = temp_from_reg_unsigned(data->regs[index + 2],
+ data->regs[index + 3]);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", temp);
+}
+
+static ssize_t show_limit(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95245_data *data = lm95245_update_device(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+ data->regs[index] * 1000);
+}
+
+static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+ int index = to_sensor_dev_attr(attr)->index;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ val /= 1000;
+
+ val = SENSORS_LIMIT(val, 0, (index == 6 ? 127 : 255));
+
+ mutex_lock(&data->update_lock);
+
+ data->valid = 0;
+
+ i2c_smbus_write_byte_data(client, lm95245_reg_address[index], val);
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ val /= 1000;
+
+ val = SENSORS_LIMIT(val, 0, 31);
+
+ mutex_lock(&data->update_lock);
+
+ data->valid = 0;
+
+ /* shared crit hysteresis */
+ i2c_smbus_write_byte_data(client, LM95245_REG_RW_COMMON_HYSTERESIS,
+ val);
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_type(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+
+ return snprintf(buf, PAGE_SIZE - 1,
+ data->config2 & CFG2_REMOTE_TT ? "1\n" : "2\n");
+}
+
+static ssize_t set_type(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+ if (val != 1 && val != 2)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ if (val == 1)
+ data->config2 |= CFG2_REMOTE_TT;
+ else
+ data->config2 &= ~CFG2_REMOTE_TT;
+
+ data->valid = 0;
+
+ i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONFIG2,
+ data->config2);
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95245_data *data = lm95245_update_device(dev);
+ int index = to_sensor_dev_attr(attr)->index;
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+ !!(data->regs[9] & index));
+}
+
+static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct lm95245_data *data = lm95245_update_device(dev);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%lu\n", data->interval);
+}
+
+static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm95245_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+
+ data->interval = lm95245_set_conversion_rate(client, val);
+
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_limit,
+ set_limit, 6);
+static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
+ set_crit_hyst, 8);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL,
+ STATUS1_LOC);
+
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_limit,
+ set_limit, 7);
+static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_limit,
+ set_crit_hyst, 8);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL,
+ STATUS1_RTCRIT);
+static SENSOR_DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type,
+ set_type, 0);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL,
+ STATUS1_DIODE_FAULT);
+
+static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
+ set_interval);
+
+static struct attribute *lm95245_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_type.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &dev_attr_update_interval.attr,
+ NULL
+};
+
+static const struct attribute_group lm95245_group = {
+ .attrs = lm95245_attributes,
+};
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int lm95245_detect(struct i2c_client *new_client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = new_client->adapter;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ if (i2c_smbus_read_byte_data(new_client, LM95245_REG_R_MAN_ID)
+ != MANUFACTURER_ID
+ || i2c_smbus_read_byte_data(new_client, LM95245_REG_R_CHIP_ID)
+ != DEFAULT_REVISION)
+ return -ENODEV;
+
+ strlcpy(info->type, DEVNAME, I2C_NAME_SIZE);
+ return 0;
+}
+
+static void lm95245_init_client(struct i2c_client *client)
+{
+ struct lm95245_data *data = i2c_get_clientdata(client);
+
+ data->valid = 0;
+ data->interval = lm95245_read_conversion_rate(client);
+
+ data->config1 = i2c_smbus_read_byte_data(client,
+ LM95245_REG_RW_CONFIG1);
+ data->config2 = i2c_smbus_read_byte_data(client,
+ LM95245_REG_RW_CONFIG2);
+
+ if (data->config1 & CFG_STOP) {
+ /* Clear the standby bit */
+ data->config1 &= ~CFG_STOP;
+ i2c_smbus_write_byte_data(client, LM95245_REG_RW_CONFIG1,
+ data->config1);
+ }
+}
+
+static int lm95245_probe(struct i2c_client *new_client,
+ const struct i2c_device_id *id)
+{
+ struct lm95245_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct lm95245_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(new_client, data);
+ mutex_init(&data->update_lock);
+
+ /* Initialize the LM95245 chip */
+ lm95245_init_client(new_client);
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&new_client->dev.kobj, &lm95245_group);
+ if (err)
+ goto exit_free;
+
+ data->hwmon_dev = hwmon_device_register(&new_client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto exit_remove_files;
+ }
+
+ return 0;
+
+exit_remove_files:
+ sysfs_remove_group(&new_client->dev.kobj, &lm95245_group);
+exit_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int lm95245_remove(struct i2c_client *client)
+{
+ struct lm95245_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &lm95245_group);
+
+ kfree(data);
+ return 0;
+}
+
+/* Driver data (common to all clients) */
+static const struct i2c_device_id lm95245_id[] = {
+ { DEVNAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm95245_id);
+
+static struct i2c_driver lm95245_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DEVNAME,
+ },
+ .probe = lm95245_probe,
+ .remove = lm95245_remove,
+ .id_table = lm95245_id,
+ .detect = lm95245_detect,
+ .address_list = normal_i2c,
+};
+
+static int __init sensors_lm95245_init(void)
+{
+ return i2c_add_driver(&lm95245_driver);
+}
+
+static void __exit sensors_lm95245_exit(void)
+{
+ i2c_del_driver(&lm95245_driver);
+}
+
+MODULE_AUTHOR("Alexander Stein <alexander.stein@systec-electronic.com>");
+MODULE_DESCRIPTION("LM95245 sensor driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_lm95245_init);
+module_exit(sensors_lm95245_exit);
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 12a54aa2977..c97b78ef911 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -38,8 +38,10 @@ struct max1111_data {
struct device *hwmon_dev;
struct spi_message msg;
struct spi_transfer xfer[2];
- uint8_t *tx_buf;
- uint8_t *rx_buf;
+ uint8_t tx_buf[MAX1111_TX_BUF_SIZE];
+ uint8_t rx_buf[MAX1111_RX_BUF_SIZE];
+ struct mutex drvdata_lock;
+ /* protect msg, xfer and buffers from multiple access */
};
static int max1111_read(struct device *dev, int channel)
@@ -48,6 +50,9 @@ static int max1111_read(struct device *dev, int channel)
uint8_t v1, v2;
int err;
+ /* writing to drvdata struct is not thread safe, wait on mutex */
+ mutex_lock(&data->drvdata_lock);
+
data->tx_buf[0] = (channel << MAX1111_CTRL_SEL_SH) |
MAX1111_CTRL_PD0 | MAX1111_CTRL_PD1 |
MAX1111_CTRL_SGL | MAX1111_CTRL_UNI | MAX1111_CTRL_STR;
@@ -55,12 +60,15 @@ static int max1111_read(struct device *dev, int channel)
err = spi_sync(data->spi, &data->msg);
if (err < 0) {
dev_err(dev, "spi_sync failed with %d\n", err);
+ mutex_unlock(&data->drvdata_lock);
return err;
}
v1 = data->rx_buf[0];
v2 = data->rx_buf[1];
+ mutex_unlock(&data->drvdata_lock);
+
if ((v1 & 0xc0) || (v2 & 0x3f))
return -EINVAL;
@@ -123,33 +131,23 @@ static const struct attribute_group max1111_attr_group = {
.attrs = max1111_attributes,
};
-static int setup_transfer(struct max1111_data *data)
+static int __devinit setup_transfer(struct max1111_data *data)
{
struct spi_message *m;
struct spi_transfer *x;
- data->tx_buf = kmalloc(MAX1111_TX_BUF_SIZE, GFP_KERNEL);
- if (!data->tx_buf)
- return -ENOMEM;
-
- data->rx_buf = kmalloc(MAX1111_RX_BUF_SIZE, GFP_KERNEL);
- if (!data->rx_buf) {
- kfree(data->tx_buf);
- return -ENOMEM;
- }
-
m = &data->msg;
x = &data->xfer[0];
spi_message_init(m);
x->tx_buf = &data->tx_buf[0];
- x->len = 1;
+ x->len = MAX1111_TX_BUF_SIZE;
spi_message_add_tail(x, m);
x++;
x->rx_buf = &data->rx_buf[0];
- x->len = 2;
+ x->len = MAX1111_RX_BUF_SIZE;
spi_message_add_tail(x, m);
return 0;
@@ -176,13 +174,15 @@ static int __devinit max1111_probe(struct spi_device *spi)
if (err)
goto err_free_data;
+ mutex_init(&data->drvdata_lock);
+
data->spi = spi;
spi_set_drvdata(spi, data);
err = sysfs_create_group(&spi->dev.kobj, &max1111_attr_group);
if (err) {
dev_err(&spi->dev, "failed to create attribute group\n");
- goto err_free_all;
+ goto err_free_data;
}
data->hwmon_dev = hwmon_device_register(&spi->dev);
@@ -199,9 +199,6 @@ static int __devinit max1111_probe(struct spi_device *spi)
err_remove:
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
-err_free_all:
- kfree(data->rx_buf);
- kfree(data->tx_buf);
err_free_data:
kfree(data);
return err;
@@ -213,8 +210,7 @@ static int __devexit max1111_remove(struct spi_device *spi)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
- kfree(data->rx_buf);
- kfree(data->tx_buf);
+ mutex_destroy(&data->drvdata_lock);
kfree(data);
return 0;
}
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
new file mode 100644
index 00000000000..20d1b2ddffb
--- /dev/null
+++ b/drivers/hwmon/max1668.c
@@ -0,0 +1,502 @@
+/*
+ Copyright (c) 2011 David George <david.george@ska.ac.za>
+
+ based on adm1021.c
+ some credit to Christoph Scheurer, but largely a rewrite
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+/* Addresses to scan */
+static unsigned short max1668_addr_list[] = {
+ 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
+
+/* max1668 registers */
+
+#define MAX1668_REG_TEMP(nr) (nr)
+#define MAX1668_REG_STAT1 0x05
+#define MAX1668_REG_STAT2 0x06
+#define MAX1668_REG_MAN_ID 0xfe
+#define MAX1668_REG_DEV_ID 0xff
+
+/* limits */
+
+/* write high limits */
+#define MAX1668_REG_LIMH_WR(nr) (0x13 + 2 * (nr))
+/* write low limits */
+#define MAX1668_REG_LIML_WR(nr) (0x14 + 2 * (nr))
+/* read high limits */
+#define MAX1668_REG_LIMH_RD(nr) (0x08 + 2 * (nr))
+/* read low limits */
+#define MAX1668_REG_LIML_RD(nr) (0x09 + 2 * (nr))
+
+/* manufacturer and device ID Constants */
+#define MAN_ID_MAXIM 0x4d
+#define DEV_ID_MAX1668 0x3
+#define DEV_ID_MAX1805 0x5
+#define DEV_ID_MAX1989 0xb
+
+/* read only mode module parameter */
+static int read_only;
+module_param(read_only, bool, 0);
+MODULE_PARM_DESC(read_only, "Don't set any values, read only mode");
+
+enum chips { max1668, max1805, max1989 };
+
+struct max1668_data {
+ struct device *hwmon_dev;
+ enum chips type;
+
+ struct mutex update_lock;
+ char valid; /* !=0 if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+
+ /* 1x local and 4x remote */
+ s8 temp_max[5];
+ s8 temp_min[5];
+ s8 temp[5];
+ u16 alarms;
+};
+
+static struct max1668_data *max1668_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max1668_data *data = i2c_get_clientdata(client);
+ struct max1668_data *ret = data;
+ s32 val;
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (data->valid && !time_after(jiffies,
+ data->last_updated + HZ + HZ / 2))
+ goto abort;
+
+ for (i = 0; i < 5; i++) {
+ val = i2c_smbus_read_byte_data(client, MAX1668_REG_TEMP(i));
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp[i] = (s8) val;
+
+ val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIMH_RD(i));
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp_max[i] = (s8) val;
+
+ val = i2c_smbus_read_byte_data(client, MAX1668_REG_LIML_RD(i));
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp_min[i] = (s8) val;
+ }
+
+ val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT1);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->alarms = val << 8;
+
+ val = i2c_smbus_read_byte_data(client, MAX1668_REG_STAT2);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->alarms |= val;
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+abort:
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+}
+
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct max1668_data *data = max1668_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temp[index] * 1000);
+}
+
+static ssize_t show_temp_max(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct max1668_data *data = max1668_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temp_max[index] * 1000);
+}
+
+static ssize_t show_temp_min(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct max1668_data *data = max1668_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temp_min[index] * 1000);
+}
+
+static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int index = to_sensor_dev_attr(attr)->index;
+ struct max1668_data *data = max1668_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1);
+}
+
+static ssize_t show_fault(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct max1668_data *data = max1668_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%u\n",
+ (data->alarms & (1 << 12)) && data->temp[index] == 127);
+}
+
+static ssize_t set_temp_max(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max1668_data *data = i2c_get_clientdata(client);
+ long temp;
+ int ret;
+
+ ret = kstrtol(buf, 10, &temp);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ data->temp_max[index] = SENSORS_LIMIT(temp/1000, -128, 127);
+ if (i2c_smbus_write_byte_data(client,
+ MAX1668_REG_LIMH_WR(index),
+ data->temp_max[index]))
+ count = -EIO;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t set_temp_min(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int index = to_sensor_dev_attr(devattr)->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max1668_data *data = i2c_get_clientdata(client);
+ long temp;
+ int ret;
+
+ ret = kstrtol(buf, 10, &temp);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127);
+ if (i2c_smbus_write_byte_data(client,
+ MAX1668_REG_LIML_WR(index),
+ data->temp_max[index]))
+ count = -EIO;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp_max,
+ set_temp_max, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO, show_temp_min,
+ set_temp_min, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO, show_temp_max,
+ set_temp_max, 1);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO, show_temp_min,
+ set_temp_min, 1);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO, show_temp_max,
+ set_temp_max, 2);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO, show_temp_min,
+ set_temp_min, 2);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO, show_temp_max,
+ set_temp_max, 3);
+static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO, show_temp_min,
+ set_temp_min, 3);
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp5_max, S_IRUGO, show_temp_max,
+ set_temp_max, 4);
+static SENSOR_DEVICE_ATTR(temp5_min, S_IRUGO, show_temp_min,
+ set_temp_min, 4);
+
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp5_min_alarm, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 0);
+
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_fault, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_fault, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_fault, NULL, 4);
+
+/* Attributes common to MAX1668, MAX1989 and MAX1805 */
+static struct attribute *max1668_attribute_common[] = {
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp3_max.dev_attr.attr,
+ &sensor_dev_attr_temp3_min.dev_attr.attr,
+ &sensor_dev_attr_temp3_input.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp3_fault.dev_attr.attr,
+ NULL
+};
+
+/* Attributes not present on MAX1805 */
+static struct attribute *max1668_attribute_unique[] = {
+ &sensor_dev_attr_temp4_max.dev_attr.attr,
+ &sensor_dev_attr_temp4_min.dev_attr.attr,
+ &sensor_dev_attr_temp4_input.dev_attr.attr,
+ &sensor_dev_attr_temp5_max.dev_attr.attr,
+ &sensor_dev_attr_temp5_min.dev_attr.attr,
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+
+ &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp5_min_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_temp4_fault.dev_attr.attr,
+ &sensor_dev_attr_temp5_fault.dev_attr.attr,
+ NULL
+};
+
+static mode_t max1668_attribute_mode(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ int ret = S_IRUGO;
+ if (read_only)
+ return ret;
+ if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp4_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp5_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp4_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp5_min.dev_attr.attr)
+ ret |= S_IWUSR;
+ return ret;
+}
+
+static const struct attribute_group max1668_group_common = {
+ .attrs = max1668_attribute_common,
+ .is_visible = max1668_attribute_mode
+};
+
+static const struct attribute_group max1668_group_unique = {
+ .attrs = max1668_attribute_unique,
+ .is_visible = max1668_attribute_mode
+};
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max1668_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ const char *type_name;
+ int man_id, dev_id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ /* Check for unsupported part */
+ man_id = i2c_smbus_read_byte_data(client, MAX1668_REG_MAN_ID);
+ if (man_id != MAN_ID_MAXIM)
+ return -ENODEV;
+
+ dev_id = i2c_smbus_read_byte_data(client, MAX1668_REG_DEV_ID);
+ if (dev_id < 0)
+ return -ENODEV;
+
+ type_name = NULL;
+ if (dev_id == DEV_ID_MAX1668)
+ type_name = "max1668";
+ else if (dev_id == DEV_ID_MAX1805)
+ type_name = "max1805";
+ else if (dev_id == DEV_ID_MAX1989)
+ type_name = "max1989";
+
+ if (!type_name)
+ return -ENODEV;
+
+ strlcpy(info->type, type_name, I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int max1668_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct max1668_data *data;
+ int err;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(struct max1668_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ data->type = id->driver_data;
+ mutex_init(&data->update_lock);
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &max1668_group_common);
+ if (err)
+ goto error_free;
+
+ if (data->type == max1668 || data->type == max1989) {
+ err = sysfs_create_group(&client->dev.kobj,
+ &max1668_group_unique);
+ if (err)
+ goto error_sysrem0;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto error_sysrem1;
+ }
+
+ return 0;
+
+error_sysrem1:
+ if (data->type == max1668 || data->type == max1989)
+ sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
+error_sysrem0:
+ sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
+error_free:
+ kfree(data);
+ return err;
+}
+
+static int max1668_remove(struct i2c_client *client)
+{
+ struct max1668_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ if (data->type == max1668 || data->type == max1989)
+ sysfs_remove_group(&client->dev.kobj, &max1668_group_unique);
+
+ sysfs_remove_group(&client->dev.kobj, &max1668_group_common);
+
+ kfree(data);
+ return 0;
+}
+
+static const struct i2c_device_id max1668_id[] = {
+ { "max1668", max1668 },
+ { "max1805", max1805 },
+ { "max1989", max1989 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max1668_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max1668_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max1668",
+ },
+ .probe = max1668_probe,
+ .remove = max1668_remove,
+ .id_table = max1668_id,
+ .detect = max1668_detect,
+ .address_list = max1668_addr_list,
+};
+
+static int __init sensors_max1668_init(void)
+{
+ return i2c_add_driver(&max1668_driver);
+}
+
+static void __exit sensors_max1668_exit(void)
+{
+ i2c_del_driver(&max1668_driver);
+}
+
+MODULE_AUTHOR("David George <david.george@ska.ac.za>");
+MODULE_DESCRIPTION("MAX1668 remote temperature sensor driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_max1668_init)
+module_exit(sensors_max1668_exit)
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
new file mode 100644
index 00000000000..d7926f4336b
--- /dev/null
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -0,0 +1,453 @@
+/*
+ * ntc_thermistor.c - NTC Thermistors
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/math64.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+
+#include <linux/platform_data/ntc_thermistor.h>
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+struct ntc_compensation {
+ int temp_C;
+ unsigned int ohm;
+};
+
+/*
+ * A compensation table should be sorted by the values of .ohm
+ * in descending order.
+ * The following compensation tables are from the specification of Murata NTC
+ * Thermistors Datasheet
+ */
+const struct ntc_compensation ncpXXwb473[] = {
+ { .temp_C = -40, .ohm = 1747920 },
+ { .temp_C = -35, .ohm = 1245428 },
+ { .temp_C = -30, .ohm = 898485 },
+ { .temp_C = -25, .ohm = 655802 },
+ { .temp_C = -20, .ohm = 483954 },
+ { .temp_C = -15, .ohm = 360850 },
+ { .temp_C = -10, .ohm = 271697 },
+ { .temp_C = -5, .ohm = 206463 },
+ { .temp_C = 0, .ohm = 158214 },
+ { .temp_C = 5, .ohm = 122259 },
+ { .temp_C = 10, .ohm = 95227 },
+ { .temp_C = 15, .ohm = 74730 },
+ { .temp_C = 20, .ohm = 59065 },
+ { .temp_C = 25, .ohm = 47000 },
+ { .temp_C = 30, .ohm = 37643 },
+ { .temp_C = 35, .ohm = 30334 },
+ { .temp_C = 40, .ohm = 24591 },
+ { .temp_C = 45, .ohm = 20048 },
+ { .temp_C = 50, .ohm = 16433 },
+ { .temp_C = 55, .ohm = 13539 },
+ { .temp_C = 60, .ohm = 11209 },
+ { .temp_C = 65, .ohm = 9328 },
+ { .temp_C = 70, .ohm = 7798 },
+ { .temp_C = 75, .ohm = 6544 },
+ { .temp_C = 80, .ohm = 5518 },
+ { .temp_C = 85, .ohm = 4674 },
+ { .temp_C = 90, .ohm = 3972 },
+ { .temp_C = 95, .ohm = 3388 },
+ { .temp_C = 100, .ohm = 2902 },
+ { .temp_C = 105, .ohm = 2494 },
+ { .temp_C = 110, .ohm = 2150 },
+ { .temp_C = 115, .ohm = 1860 },
+ { .temp_C = 120, .ohm = 1615 },
+ { .temp_C = 125, .ohm = 1406 },
+};
+const struct ntc_compensation ncpXXwl333[] = {
+ { .temp_C = -40, .ohm = 1610154 },
+ { .temp_C = -35, .ohm = 1130850 },
+ { .temp_C = -30, .ohm = 802609 },
+ { .temp_C = -25, .ohm = 575385 },
+ { .temp_C = -20, .ohm = 416464 },
+ { .temp_C = -15, .ohm = 304219 },
+ { .temp_C = -10, .ohm = 224193 },
+ { .temp_C = -5, .ohm = 166623 },
+ { .temp_C = 0, .ohm = 124850 },
+ { .temp_C = 5, .ohm = 94287 },
+ { .temp_C = 10, .ohm = 71747 },
+ { .temp_C = 15, .ohm = 54996 },
+ { .temp_C = 20, .ohm = 42455 },
+ { .temp_C = 25, .ohm = 33000 },
+ { .temp_C = 30, .ohm = 25822 },
+ { .temp_C = 35, .ohm = 20335 },
+ { .temp_C = 40, .ohm = 16115 },
+ { .temp_C = 45, .ohm = 12849 },
+ { .temp_C = 50, .ohm = 10306 },
+ { .temp_C = 55, .ohm = 8314 },
+ { .temp_C = 60, .ohm = 6746 },
+ { .temp_C = 65, .ohm = 5503 },
+ { .temp_C = 70, .ohm = 4513 },
+ { .temp_C = 75, .ohm = 3721 },
+ { .temp_C = 80, .ohm = 3084 },
+ { .temp_C = 85, .ohm = 2569 },
+ { .temp_C = 90, .ohm = 2151 },
+ { .temp_C = 95, .ohm = 1809 },
+ { .temp_C = 100, .ohm = 1529 },
+ { .temp_C = 105, .ohm = 1299 },
+ { .temp_C = 110, .ohm = 1108 },
+ { .temp_C = 115, .ohm = 949 },
+ { .temp_C = 120, .ohm = 817 },
+ { .temp_C = 125, .ohm = 707 },
+};
+
+struct ntc_data {
+ struct device *hwmon_dev;
+ struct ntc_thermistor_platform_data *pdata;
+ const struct ntc_compensation *comp;
+ struct device *dev;
+ int n_comp;
+ char name[PLATFORM_NAME_SIZE];
+};
+
+static inline u64 div64_u64_safe(u64 dividend, u64 divisor)
+{
+ if (divisor == 0 && dividend == 0)
+ return 0;
+ if (divisor == 0)
+ return UINT_MAX;
+ return div64_u64(dividend, divisor);
+}
+
+static unsigned int get_ohm_of_thermistor(struct ntc_data *data,
+ unsigned int uV)
+{
+ struct ntc_thermistor_platform_data *pdata = data->pdata;
+ u64 mV = uV / 1000;
+ u64 pmV = pdata->pullup_uV / 1000;
+ u64 N, puO, pdO;
+ puO = pdata->pullup_ohm;
+ pdO = pdata->pulldown_ohm;
+
+ if (mV == 0) {
+ if (pdata->connect == NTC_CONNECTED_POSITIVE)
+ return UINT_MAX;
+ return 0;
+ }
+ if (mV >= pmV)
+ return (pdata->connect == NTC_CONNECTED_POSITIVE) ?
+ 0 : UINT_MAX;
+
+ if (pdata->connect == NTC_CONNECTED_POSITIVE && puO == 0)
+ N = div64_u64_safe(pdO * (pmV - mV), mV);
+ else if (pdata->connect == NTC_CONNECTED_GROUND && pdO == 0)
+ N = div64_u64_safe(puO * mV, pmV - mV);
+ else if (pdata->connect == NTC_CONNECTED_POSITIVE)
+ N = div64_u64_safe(pdO * puO * (pmV - mV),
+ puO * mV - pdO * (pmV - mV));
+ else
+ N = div64_u64_safe(pdO * puO * mV, pdO * (pmV - mV) - puO * mV);
+
+ return (unsigned int) N;
+}
+
+static int lookup_comp(struct ntc_data *data,
+ unsigned int ohm, int *i_low, int *i_high)
+{
+ int start, end, mid = -1;
+
+ /* Do a binary search on compensation table */
+ start = 0;
+ end = data->n_comp;
+
+ while (end > start) {
+ mid = start + (end - start) / 2;
+ if (data->comp[mid].ohm < ohm)
+ end = mid;
+ else if (data->comp[mid].ohm > ohm)
+ start = mid + 1;
+ else
+ break;
+ }
+
+ if (mid == 0) {
+ if (data->comp[mid].ohm > ohm) {
+ *i_high = mid;
+ *i_low = mid + 1;
+ return 0;
+ } else {
+ *i_low = mid;
+ *i_high = -1;
+ return -EINVAL;
+ }
+ }
+ if (mid == (data->n_comp - 1)) {
+ if (data->comp[mid].ohm <= ohm) {
+ *i_low = mid;
+ *i_high = mid - 1;
+ return 0;
+ } else {
+ *i_low = -1;
+ *i_high = mid;
+ return -EINVAL;
+ }
+ }
+
+ if (data->comp[mid].ohm <= ohm) {
+ *i_low = mid;
+ *i_high = mid - 1;
+ }
+ if (data->comp[mid].ohm > ohm) {
+ *i_low = mid + 1;
+ *i_high = mid;
+ }
+
+ return 0;
+}
+
+static int get_temp_mC(struct ntc_data *data, unsigned int ohm, int *temp)
+{
+ int low, high;
+ int ret;
+
+ ret = lookup_comp(data, ohm, &low, &high);
+ if (ret) {
+ /* Unable to use linear approximation */
+ if (low != -1)
+ *temp = data->comp[low].temp_C * 1000;
+ else if (high != -1)
+ *temp = data->comp[high].temp_C * 1000;
+ else
+ return ret;
+ } else {
+ *temp = data->comp[low].temp_C * 1000 +
+ ((data->comp[high].temp_C - data->comp[low].temp_C) *
+ 1000 * ((int)ohm - (int)data->comp[low].ohm)) /
+ ((int)data->comp[high].ohm - (int)data->comp[low].ohm);
+ }
+
+ return 0;
+}
+
+static int ntc_thermistor_read(struct ntc_data *data, int *temp)
+{
+ int ret;
+ int read_ohm, read_uV;
+ unsigned int ohm = 0;
+
+ if (data->pdata->read_ohm) {
+ read_ohm = data->pdata->read_ohm();
+ if (read_ohm < 0)
+ return read_ohm;
+ ohm = (unsigned int)read_ohm;
+ }
+
+ if (data->pdata->read_uV) {
+ read_uV = data->pdata->read_uV();
+ if (read_uV < 0)
+ return read_uV;
+ ohm = get_ohm_of_thermistor(data, (unsigned int)read_uV);
+ }
+
+ ret = get_temp_mC(data, ohm, temp);
+ if (ret) {
+ dev_dbg(data->dev, "Sensor reading function not available.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t ntc_show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ntc_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", data->name);
+}
+
+static ssize_t ntc_show_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "4\n");
+}
+
+static ssize_t ntc_show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ntc_data *data = dev_get_drvdata(dev);
+ int temp, ret;
+
+ ret = ntc_thermistor_read(data, &temp);
+ if (ret)
+ return ret;
+ return sprintf(buf, "%d\n", temp);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO, ntc_show_type, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ntc_show_temp, NULL, 0);
+static DEVICE_ATTR(name, S_IRUGO, ntc_show_name, NULL);
+
+static struct attribute *ntc_attributes[] = {
+ &dev_attr_name.attr,
+ &sensor_dev_attr_temp1_type.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ntc_attr_group = {
+ .attrs = ntc_attributes,
+};
+
+static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
+{
+ struct ntc_data *data;
+ struct ntc_thermistor_platform_data *pdata = pdev->dev.platform_data;
+ int ret = 0;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform init data supplied.\n");
+ return -ENODEV;
+ }
+
+ /* Either one of the two is required. */
+ if (!pdata->read_uV && !pdata->read_ohm) {
+ dev_err(&pdev->dev, "Both read_uV and read_ohm missing."
+ "Need either one of the two.\n");
+ return -EINVAL;
+ }
+
+ if (pdata->read_uV && pdata->read_ohm) {
+ dev_warn(&pdev->dev, "Only one of read_uV and read_ohm "
+ "is needed; ignoring read_uV.\n");
+ pdata->read_uV = NULL;
+ }
+
+ if (pdata->read_uV && (pdata->pullup_uV == 0 ||
+ (pdata->pullup_ohm == 0 && pdata->connect ==
+ NTC_CONNECTED_GROUND) ||
+ (pdata->pulldown_ohm == 0 && pdata->connect ==
+ NTC_CONNECTED_POSITIVE) ||
+ (pdata->connect != NTC_CONNECTED_POSITIVE &&
+ pdata->connect != NTC_CONNECTED_GROUND))) {
+ dev_err(&pdev->dev, "Required data to use read_uV not "
+ "supplied.\n");
+ return -EINVAL;
+ }
+
+ data = kzalloc(sizeof(struct ntc_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &pdev->dev;
+ data->pdata = pdata;
+ strncpy(data->name, pdev->id_entry->name, PLATFORM_NAME_SIZE);
+
+ switch (pdev->id_entry->driver_data) {
+ case TYPE_NCPXXWB473:
+ data->comp = ncpXXwb473;
+ data->n_comp = ARRAY_SIZE(ncpXXwb473);
+ break;
+ case TYPE_NCPXXWL333:
+ data->comp = ncpXXwl333;
+ data->n_comp = ARRAY_SIZE(ncpXXwl333);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n",
+ pdev->id_entry->driver_data,
+ pdev->id_entry->name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ ret = sysfs_create_group(&data->dev->kobj, &ntc_attr_group);
+ if (ret) {
+ dev_err(data->dev, "unable to create sysfs files\n");
+ goto err;
+ }
+
+ data->hwmon_dev = hwmon_device_register(data->dev);
+ if (IS_ERR_OR_NULL(data->hwmon_dev)) {
+ dev_err(data->dev, "unable to register as hwmon device.\n");
+ ret = -EINVAL;
+ goto err_after_sysfs;
+ }
+
+ dev_info(&pdev->dev, "Thermistor %s:%d (type: %s/%lu) successfully probed.\n",
+ pdev->name, pdev->id, pdev->id_entry->name,
+ pdev->id_entry->driver_data);
+ return 0;
+err_after_sysfs:
+ sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
+err:
+ kfree(data);
+ return ret;
+}
+
+static int __devexit ntc_thermistor_remove(struct platform_device *pdev)
+{
+ struct ntc_data *data = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct platform_device_id ntc_thermistor_id[] = {
+ { "ncp15wb473", TYPE_NCPXXWB473 },
+ { "ncp18wb473", TYPE_NCPXXWB473 },
+ { "ncp21wb473", TYPE_NCPXXWB473 },
+ { "ncp03wb473", TYPE_NCPXXWB473 },
+ { "ncp15wl333", TYPE_NCPXXWL333 },
+ { },
+};
+
+static struct platform_driver ntc_thermistor_driver = {
+ .driver = {
+ .name = "ntc-thermistor",
+ .owner = THIS_MODULE,
+ },
+ .probe = ntc_thermistor_probe,
+ .remove = __devexit_p(ntc_thermistor_remove),
+ .id_table = ntc_thermistor_id,
+};
+
+static int __init ntc_thermistor_init(void)
+{
+ return platform_driver_register(&ntc_thermistor_driver);
+}
+
+module_init(ntc_thermistor_init);
+
+static void __exit ntc_thermistor_cleanup(void)
+{
+ platform_driver_unregister(&ntc_thermistor_driver);
+}
+
+module_exit(ntc_thermistor_cleanup);
+
+MODULE_DESCRIPTION("NTC Thermistor Driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ntc-thermistor");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
new file mode 100644
index 00000000000..c9237b9dcff
--- /dev/null
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -0,0 +1,100 @@
+#
+# PMBus chip drivers configuration
+#
+
+menuconfig PMBUS
+ tristate "PMBus support"
+ depends on I2C && EXPERIMENTAL
+ default n
+ help
+ Say yes here if you want to enable PMBus support.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus_core.
+
+if PMBUS
+
+config SENSORS_PMBUS
+ tristate "Generic PMBus devices"
+ default y
+ help
+ If you say yes here you get hardware monitoring support for generic
+ PMBus devices, including but not limited to ADP4000, BMR450, BMR451,
+ BMR453, BMR454, LTC2978, NCP4200, and NCP4208.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus.
+
+config SENSORS_ADM1275
+ tristate "Analog Devices ADM1275"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Analog
+ Devices ADM1275 Hot-Swap Controller and Digital Power Monitor.
+
+ This driver can also be built as a module. If so, the module will
+ be called adm1275.
+
+config SENSORS_LM25066
+ tristate "National Semiconductor LM25066 and compatibles"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for National
+ Semiconductor LM25066, LM5064, and LM5066.
+
+ This driver can also be built as a module. If so, the module will
+ be called lm25066.
+
+config SENSORS_MAX16064
+ tristate "Maxim MAX16064"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX16064.
+
+ This driver can also be built as a module. If so, the module will
+ be called max16064.
+
+config SENSORS_MAX34440
+ tristate "Maxim MAX34440/MAX34441"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX34440 and MAX34441.
+
+ This driver can also be built as a module. If so, the module will
+ be called max34440.
+
+config SENSORS_MAX8688
+ tristate "Maxim MAX8688"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX8688.
+
+ This driver can also be built as a module. If so, the module will
+ be called max8688.
+
+config SENSORS_UCD9000
+ tristate "TI UCD90120, UCD90124, UCD9090, UCD90910"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for TI
+ UCD90120, UCD90124, UCD9090, UCD90910 Sequencer and System Health
+ Controllers.
+
+ This driver can also be built as a module. If so, the module will
+ be called ucd9000.
+
+config SENSORS_UCD9200
+ tristate "TI UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, UCD9248"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for TI
+ UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, and UCD9248
+ Digital PWM System Controllers.
+
+ This driver can also be built as a module. If so, the module will
+ be called ucd9200.
+
+endif # PMBUS
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
new file mode 100644
index 00000000000..623eedb1ed9
--- /dev/null
+++ b/drivers/hwmon/pmbus/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for PMBus chip drivers.
+#
+
+obj-$(CONFIG_PMBUS) += pmbus_core.o
+obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
+obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
+obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
+obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
+obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
+obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
diff --git a/drivers/hwmon/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index c2ee2048ab9..c936e278230 100644
--- a/drivers/hwmon/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -23,15 +23,73 @@
#include <linux/i2c.h>
#include "pmbus.h"
+#define ADM1275_PEAK_IOUT 0xd0
+#define ADM1275_PEAK_VIN 0xd1
+#define ADM1275_PEAK_VOUT 0xd2
#define ADM1275_PMON_CONFIG 0xd4
#define ADM1275_VIN_VOUT_SELECT (1 << 6)
#define ADM1275_VRANGE (1 << 5)
+static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ if (page)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_IOUT);
+ break;
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VOUT);
+ break;
+ case PMBUS_VIRT_READ_VIN_MAX:
+ ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VIN);
+ break;
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int ret;
+
+ if (page)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_IOUT, 0);
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VOUT, 0);
+ break;
+ case PMBUS_VIRT_RESET_VIN_HISTORY:
+ ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VIN, 0);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int config;
+ int ret;
struct pmbus_driver_info *info;
if (!i2c_check_functionality(client->adapter,
@@ -43,30 +101,35 @@ static int adm1275_probe(struct i2c_client *client,
return -ENOMEM;
config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
- if (config < 0)
- return config;
+ if (config < 0) {
+ ret = config;
+ goto err_mem;
+ }
info->pages = 1;
- info->direct[PSC_VOLTAGE_IN] = true;
- info->direct[PSC_VOLTAGE_OUT] = true;
- info->direct[PSC_CURRENT_OUT] = true;
- info->m[PSC_CURRENT_OUT] = 800;
+ info->format[PSC_VOLTAGE_IN] = direct;
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ info->format[PSC_CURRENT_OUT] = direct;
+ info->m[PSC_CURRENT_OUT] = 807;
info->b[PSC_CURRENT_OUT] = 20475;
info->R[PSC_CURRENT_OUT] = -1;
info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
+ info->read_word_data = adm1275_read_word_data;
+ info->write_word_data = adm1275_write_word_data;
+
if (config & ADM1275_VRANGE) {
- info->m[PSC_VOLTAGE_IN] = 19045;
+ info->m[PSC_VOLTAGE_IN] = 19199;
info->b[PSC_VOLTAGE_IN] = 0;
info->R[PSC_VOLTAGE_IN] = -2;
- info->m[PSC_VOLTAGE_OUT] = 19045;
+ info->m[PSC_VOLTAGE_OUT] = 19199;
info->b[PSC_VOLTAGE_OUT] = 0;
info->R[PSC_VOLTAGE_OUT] = -2;
} else {
- info->m[PSC_VOLTAGE_IN] = 6666;
+ info->m[PSC_VOLTAGE_IN] = 6720;
info->b[PSC_VOLTAGE_IN] = 0;
info->R[PSC_VOLTAGE_IN] = -1;
- info->m[PSC_VOLTAGE_OUT] = 6666;
+ info->m[PSC_VOLTAGE_OUT] = 6720;
info->b[PSC_VOLTAGE_OUT] = 0;
info->R[PSC_VOLTAGE_OUT] = -1;
}
@@ -76,7 +139,14 @@ static int adm1275_probe(struct i2c_client *client,
else
info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
- return pmbus_do_probe(client, id, info);
+ ret = pmbus_do_probe(client, id, info);
+ if (ret)
+ goto err_mem;
+ return 0;
+
+err_mem:
+ kfree(info);
+ return ret;
}
static int adm1275_remove(struct i2c_client *client)
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
new file mode 100644
index 00000000000..ac254fba551
--- /dev/null
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -0,0 +1,352 @@
+/*
+ * Hardware monitoring driver for LM25066 / LM5064 / LM5066
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+enum chips { lm25066, lm5064, lm5066 };
+
+#define LM25066_READ_VAUX 0xd0
+#define LM25066_MFR_READ_IIN 0xd1
+#define LM25066_MFR_READ_PIN 0xd2
+#define LM25066_MFR_IIN_OC_WARN_LIMIT 0xd3
+#define LM25066_MFR_PIN_OP_WARN_LIMIT 0xd4
+#define LM25066_READ_PIN_PEAK 0xd5
+#define LM25066_CLEAR_PIN_PEAK 0xd6
+#define LM25066_DEVICE_SETUP 0xd9
+#define LM25066_READ_AVG_VIN 0xdc
+#define LM25066_READ_AVG_VOUT 0xdd
+#define LM25066_READ_AVG_IIN 0xde
+#define LM25066_READ_AVG_PIN 0xdf
+
+#define LM25066_DEV_SETUP_CL (1 << 4) /* Current limit */
+
+struct lm25066_data {
+ int id;
+ struct pmbus_driver_info info;
+};
+
+#define to_lm25066_data(x) container_of(x, struct lm25066_data, info)
+
+static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct lm25066_data *data = to_lm25066_data(info);
+ int ret;
+
+ if (page > 1)
+ return -EINVAL;
+
+ /* Map READ_VAUX into READ_VOUT register on page 1 */
+ if (page == 1) {
+ switch (reg) {
+ case PMBUS_READ_VOUT:
+ ret = pmbus_read_word_data(client, 0,
+ LM25066_READ_VAUX);
+ if (ret < 0)
+ break;
+ /* Adjust returned value to match VOUT coefficients */
+ switch (data->id) {
+ case lm25066:
+ /* VOUT: 4.54 mV VAUX: 283.2 uV LSB */
+ ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
+ break;
+ case lm5064:
+ /* VOUT: 4.53 mV VAUX: 700 uV LSB */
+ ret = DIV_ROUND_CLOSEST(ret * 70, 453);
+ break;
+ case lm5066:
+ /* VOUT: 2.18 mV VAUX: 725 uV LSB */
+ ret = DIV_ROUND_CLOSEST(ret * 725, 2180);
+ break;
+ }
+ break;
+ default:
+ /* No other valid registers on page 1 */
+ ret = -EINVAL;
+ break;
+ }
+ goto done;
+ }
+
+ switch (reg) {
+ case PMBUS_READ_IIN:
+ ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_IIN);
+ break;
+ case PMBUS_READ_PIN:
+ ret = pmbus_read_word_data(client, 0, LM25066_MFR_READ_PIN);
+ break;
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, 0,
+ LM25066_MFR_IIN_OC_WARN_LIMIT);
+ break;
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, 0,
+ LM25066_MFR_PIN_OP_WARN_LIMIT);
+ break;
+ case PMBUS_VIRT_READ_VIN_AVG:
+ ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VIN);
+ break;
+ case PMBUS_VIRT_READ_VOUT_AVG:
+ ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_VOUT);
+ break;
+ case PMBUS_VIRT_READ_IIN_AVG:
+ ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_IIN);
+ break;
+ case PMBUS_VIRT_READ_PIN_AVG:
+ ret = pmbus_read_word_data(client, 0, LM25066_READ_AVG_PIN);
+ break;
+ case PMBUS_VIRT_READ_PIN_MAX:
+ ret = pmbus_read_word_data(client, 0, LM25066_READ_PIN_PEAK);
+ break;
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+done:
+ return ret;
+}
+
+static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int ret;
+
+ if (page > 1)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_IIN_OC_WARN_LIMIT:
+ ret = pmbus_write_word_data(client, 0,
+ LM25066_MFR_IIN_OC_WARN_LIMIT,
+ word);
+ break;
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ ret = pmbus_write_word_data(client, 0,
+ LM25066_MFR_PIN_OP_WARN_LIMIT,
+ word);
+ break;
+ case PMBUS_VIRT_RESET_PIN_HISTORY:
+ ret = pmbus_write_byte(client, 0, LM25066_CLEAR_PIN_PEAK);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int lm25066_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ if (page > 1)
+ return -EINVAL;
+
+ if (page == 0)
+ return pmbus_write_byte(client, 0, value);
+
+ return 0;
+}
+
+static int lm25066_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int config;
+ int ret;
+ struct lm25066_data *data;
+ struct pmbus_driver_info *info;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(struct lm25066_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ config = i2c_smbus_read_byte_data(client, LM25066_DEVICE_SETUP);
+ if (config < 0) {
+ ret = config;
+ goto err_mem;
+ }
+
+ data->id = id->driver_data;
+ info = &data->info;
+
+ info->pages = 2;
+ info->format[PSC_VOLTAGE_IN] = direct;
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ info->format[PSC_CURRENT_IN] = direct;
+ info->format[PSC_TEMPERATURE] = direct;
+ info->format[PSC_POWER] = direct;
+
+ info->m[PSC_TEMPERATURE] = 16;
+ info->b[PSC_TEMPERATURE] = 0;
+ info->R[PSC_TEMPERATURE] = 0;
+
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_PIN | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ info->func[1] = PMBUS_HAVE_VOUT;
+
+ info->read_word_data = lm25066_read_word_data;
+ info->write_word_data = lm25066_write_word_data;
+ info->write_byte = lm25066_write_byte;
+
+ switch (id->driver_data) {
+ case lm25066:
+ info->m[PSC_VOLTAGE_IN] = 22070;
+ info->b[PSC_VOLTAGE_IN] = 0;
+ info->R[PSC_VOLTAGE_IN] = -2;
+ info->m[PSC_VOLTAGE_OUT] = 22070;
+ info->b[PSC_VOLTAGE_OUT] = 0;
+ info->R[PSC_VOLTAGE_OUT] = -2;
+
+ if (config & LM25066_DEV_SETUP_CL) {
+ info->m[PSC_CURRENT_IN] = 6852;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 369;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -2;
+ } else {
+ info->m[PSC_CURRENT_IN] = 13661;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 736;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -2;
+ }
+ break;
+ case lm5064:
+ info->m[PSC_VOLTAGE_IN] = 22075;
+ info->b[PSC_VOLTAGE_IN] = 0;
+ info->R[PSC_VOLTAGE_IN] = -2;
+ info->m[PSC_VOLTAGE_OUT] = 22075;
+ info->b[PSC_VOLTAGE_OUT] = 0;
+ info->R[PSC_VOLTAGE_OUT] = -2;
+
+ if (config & LM25066_DEV_SETUP_CL) {
+ info->m[PSC_CURRENT_IN] = 6713;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 3619;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -3;
+ } else {
+ info->m[PSC_CURRENT_IN] = 13426;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 7238;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -3;
+ }
+ break;
+ case lm5066:
+ info->m[PSC_VOLTAGE_IN] = 4587;
+ info->b[PSC_VOLTAGE_IN] = 0;
+ info->R[PSC_VOLTAGE_IN] = -2;
+ info->m[PSC_VOLTAGE_OUT] = 4587;
+ info->b[PSC_VOLTAGE_OUT] = 0;
+ info->R[PSC_VOLTAGE_OUT] = -2;
+
+ if (config & LM25066_DEV_SETUP_CL) {
+ info->m[PSC_CURRENT_IN] = 10753;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 1204;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -3;
+ } else {
+ info->m[PSC_CURRENT_IN] = 5405;
+ info->b[PSC_CURRENT_IN] = 0;
+ info->R[PSC_CURRENT_IN] = -2;
+ info->m[PSC_POWER] = 605;
+ info->b[PSC_POWER] = 0;
+ info->R[PSC_POWER] = -3;
+ }
+ break;
+ default:
+ ret = -ENODEV;
+ goto err_mem;
+ }
+
+ ret = pmbus_do_probe(client, id, info);
+ if (ret)
+ goto err_mem;
+ return 0;
+
+err_mem:
+ kfree(data);
+ return ret;
+}
+
+static int lm25066_remove(struct i2c_client *client)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ const struct lm25066_data *data = to_lm25066_data(info);
+ int ret;
+
+ ret = pmbus_do_remove(client);
+ kfree(data);
+ return ret;
+}
+
+static const struct i2c_device_id lm25066_id[] = {
+ {"lm25066", lm25066},
+ {"lm5064", lm5064},
+ {"lm5066", lm5066},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, lm25066_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver lm25066_driver = {
+ .driver = {
+ .name = "lm25066",
+ },
+ .probe = lm25066_probe,
+ .remove = lm25066_remove,
+ .id_table = lm25066_id,
+};
+
+static int __init lm25066_init(void)
+{
+ return i2c_add_driver(&lm25066_driver);
+}
+
+static void __exit lm25066_exit(void)
+{
+ i2c_del_driver(&lm25066_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for LM25066/LM5064/LM5066");
+MODULE_LICENSE("GPL");
+module_init(lm25066_init);
+module_exit(lm25066_exit);
diff --git a/drivers/hwmon/max16064.c b/drivers/hwmon/pmbus/max16064.c
index 1d6d717060d..e50b296e8db 100644
--- a/drivers/hwmon/max16064.c
+++ b/drivers/hwmon/pmbus/max16064.c
@@ -25,11 +25,60 @@
#include <linux/i2c.h>
#include "pmbus.h"
+#define MAX16064_MFR_VOUT_PEAK 0xd4
+#define MAX16064_MFR_TEMPERATURE_PEAK 0xd6
+
+static int max16064_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, page,
+ MAX16064_MFR_VOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ ret = pmbus_read_word_data(client, page,
+ MAX16064_MFR_TEMPERATURE_PEAK);
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int max16064_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ ret = pmbus_write_word_data(client, page,
+ MAX16064_MFR_VOUT_PEAK, 0);
+ break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = pmbus_write_word_data(client, page,
+ MAX16064_MFR_TEMPERATURE_PEAK,
+ 0xffff);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
static struct pmbus_driver_info max16064_info = {
.pages = 4,
- .direct[PSC_VOLTAGE_IN] = true,
- .direct[PSC_VOLTAGE_OUT] = true,
- .direct[PSC_TEMPERATURE] = true,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
.m[PSC_VOLTAGE_IN] = 19995,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = -1,
@@ -44,6 +93,8 @@ static struct pmbus_driver_info max16064_info = {
.func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
.func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
.func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .read_word_data = max16064_read_word_data,
+ .write_word_data = max16064_write_word_data,
};
static int max16064_probe(struct i2c_client *client,
diff --git a/drivers/hwmon/max34440.c b/drivers/hwmon/pmbus/max34440.c
index db11e1a175b..fda621d2e45 100644
--- a/drivers/hwmon/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -27,11 +27,70 @@
enum chips { max34440, max34441 };
+#define MAX34440_MFR_VOUT_PEAK 0xd4
+#define MAX34440_MFR_IOUT_PEAK 0xd5
+#define MAX34440_MFR_TEMPERATURE_PEAK 0xd6
+
#define MAX34440_STATUS_OC_WARN (1 << 0)
#define MAX34440_STATUS_OC_FAULT (1 << 1)
#define MAX34440_STATUS_OT_FAULT (1 << 5)
#define MAX34440_STATUS_OT_WARN (1 << 6)
+static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, page,
+ MAX34440_MFR_VOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = pmbus_read_word_data(client, page,
+ MAX34440_MFR_IOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ ret = pmbus_read_word_data(client, page,
+ MAX34440_MFR_TEMPERATURE_PEAK);
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int max34440_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ ret = pmbus_write_word_data(client, page,
+ MAX34440_MFR_VOUT_PEAK, 0);
+ break;
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ ret = pmbus_write_word_data(client, page,
+ MAX34440_MFR_IOUT_PEAK, 0);
+ break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = pmbus_write_word_data(client, page,
+ MAX34440_MFR_TEMPERATURE_PEAK,
+ 0xffff);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
{
int ret;
@@ -72,10 +131,10 @@ static int max34440_read_byte_data(struct i2c_client *client, int page, int reg)
static struct pmbus_driver_info max34440_info[] = {
[max34440] = {
.pages = 14,
- .direct[PSC_VOLTAGE_IN] = true,
- .direct[PSC_VOLTAGE_OUT] = true,
- .direct[PSC_TEMPERATURE] = true,
- .direct[PSC_CURRENT_OUT] = true,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
.m[PSC_VOLTAGE_IN] = 1,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = 3, /* R = 0 in datasheet reflects mV */
@@ -109,14 +168,16 @@ static struct pmbus_driver_info max34440_info[] = {
.func[12] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
},
[max34441] = {
.pages = 12,
- .direct[PSC_VOLTAGE_IN] = true,
- .direct[PSC_VOLTAGE_OUT] = true,
- .direct[PSC_TEMPERATURE] = true,
- .direct[PSC_CURRENT_OUT] = true,
- .direct[PSC_FAN] = true,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_FAN] = direct,
.m[PSC_VOLTAGE_IN] = 1,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = 3,
@@ -150,6 +211,8 @@ static struct pmbus_driver_info max34440_info[] = {
.func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_byte_data = max34440_read_byte_data,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
},
};
diff --git a/drivers/hwmon/max8688.c b/drivers/hwmon/pmbus/max8688.c
index 7fb93f4e9f2..c3e72f1a3cf 100644
--- a/drivers/hwmon/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -25,6 +25,9 @@
#include <linux/i2c.h>
#include "pmbus.h"
+#define MAX8688_MFR_VOUT_PEAK 0xd4
+#define MAX8688_MFR_IOUT_PEAK 0xd5
+#define MAX8688_MFR_TEMPERATURE_PEAK 0xd6
#define MAX8688_MFG_STATUS 0xd8
#define MAX8688_STATUS_OC_FAULT (1 << 4)
@@ -37,6 +40,62 @@
#define MAX8688_STATUS_OT_FAULT (1 << 13)
#define MAX8688_STATUS_OT_WARNING (1 << 14)
+static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ if (page)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VOUT_MAX:
+ ret = pmbus_read_word_data(client, 0, MAX8688_MFR_VOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = pmbus_read_word_data(client, 0, MAX8688_MFR_IOUT_PEAK);
+ break;
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ ret = pmbus_read_word_data(client, 0,
+ MAX8688_MFR_TEMPERATURE_PEAK);
+ break;
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = 0;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int max8688_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_RESET_VOUT_HISTORY:
+ ret = pmbus_write_word_data(client, 0, MAX8688_MFR_VOUT_PEAK,
+ 0);
+ break;
+ case PMBUS_VIRT_RESET_IOUT_HISTORY:
+ ret = pmbus_write_word_data(client, 0, MAX8688_MFR_IOUT_PEAK,
+ 0);
+ break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = pmbus_write_word_data(client, 0,
+ MAX8688_MFR_TEMPERATURE_PEAK,
+ 0xffff);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
{
int ret = 0;
@@ -91,10 +150,10 @@ static int max8688_read_byte_data(struct i2c_client *client, int page, int reg)
static struct pmbus_driver_info max8688_info = {
.pages = 1,
- .direct[PSC_VOLTAGE_IN] = true,
- .direct[PSC_VOLTAGE_OUT] = true,
- .direct[PSC_TEMPERATURE] = true,
- .direct[PSC_CURRENT_OUT] = true,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
.m[PSC_VOLTAGE_IN] = 19995,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = -1,
@@ -111,6 +170,8 @@ static struct pmbus_driver_info max8688_info = {
| PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
| PMBUS_HAVE_STATUS_TEMP,
.read_byte_data = max8688_read_byte_data,
+ .read_word_data = max8688_read_word_data,
+ .write_word_data = max8688_write_word_data,
};
static int max8688_probe(struct i2c_client *client,
diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 98e2e28899e..73de9f1f319 100644
--- a/drivers/hwmon/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -47,22 +47,29 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
if (info->func[0]
&& pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT))
info->func[0] |= PMBUS_HAVE_STATUS_INPUT;
- if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
+ if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_12) &&
+ pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
info->func[0] |= PMBUS_HAVE_FAN12;
if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
}
- if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
+ if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_34) &&
+ pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
info->func[0] |= PMBUS_HAVE_FAN34;
if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
}
- if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1)) {
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1))
info->func[0] |= PMBUS_HAVE_TEMP;
- if (pmbus_check_byte_register(client, 0,
- PMBUS_STATUS_TEMPERATURE))
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_2))
+ info->func[0] |= PMBUS_HAVE_TEMP2;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_3))
+ info->func[0] |= PMBUS_HAVE_TEMP3;
+ if (info->func[0] & (PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2
+ | PMBUS_HAVE_TEMP3)
+ && pmbus_check_byte_register(client, 0,
+ PMBUS_STATUS_TEMPERATURE))
info->func[0] |= PMBUS_HAVE_STATUS_TEMP;
- }
/* Sensors detected on all pages */
for (page = 0; page < info->pages; page++) {
@@ -89,6 +96,8 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
static int pmbus_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
+ int ret = 0;
+
if (!info->pages) {
/*
* Check if the PAGE command is supported. If it is,
@@ -110,6 +119,27 @@ static int pmbus_identify(struct i2c_client *client,
}
}
+ if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
+ int vout_mode;
+
+ vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (vout_mode >= 0 && vout_mode != 0xff) {
+ switch (vout_mode >> 5) {
+ case 0:
+ break;
+ case 1:
+ info->format[PSC_VOLTAGE_OUT] = vid;
+ break;
+ case 2:
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ break;
+ default:
+ ret = -ENODEV;
+ goto abort;
+ }
+ }
+ }
+
/*
* We should check if the COEFFICIENTS register is supported.
* If it is, and the chip is configured for direct mode, we can read
@@ -118,13 +148,18 @@ static int pmbus_identify(struct i2c_client *client,
*
* To do this, we will need access to a chip which actually supports the
* COEFFICIENTS command, since the command is too complex to implement
- * without testing it.
+ * without testing it. Until then, abort if a chip configured for direct
+ * mode was detected.
*/
+ if (info->format[PSC_VOLTAGE_OUT] == direct) {
+ ret = -ENODEV;
+ goto abort;
+ }
/* Try to find sensor groups */
pmbus_find_sensor_groups(client, info);
-
- return 0;
+abort:
+ return ret;
}
static int pmbus_probe(struct i2c_client *client,
@@ -165,11 +200,14 @@ static int pmbus_remove(struct i2c_client *client)
* Use driver_data to set the number of pages supported by the chip.
*/
static const struct i2c_device_id pmbus_id[] = {
+ {"adp4000", 1},
{"bmr450", 1},
{"bmr451", 1},
{"bmr453", 1},
{"bmr454", 1},
{"ltc2978", 8},
+ {"ncp4200", 1},
+ {"ncp4208", 1},
{"pmbus", 0},
{}
};
diff --git a/drivers/hwmon/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 50647ab7235..a6ae20ffef6 100644
--- a/drivers/hwmon/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -126,6 +126,42 @@
#define PMBUS_MFR_SERIAL 0x9E
/*
+ * Virtual registers.
+ * Useful to support attributes which are not supported by standard PMBus
+ * registers but exist as manufacturer specific registers on individual chips.
+ * Must be mapped to real registers in device specific code.
+ *
+ * Semantics:
+ * Virtual registers are all word size.
+ * READ registers are read-only; writes are either ignored or return an error.
+ * RESET registers are read/write. Reading returns zero (used for detection),
+ * writing any value causes the associated history to be reset.
+ */
+#define PMBUS_VIRT_BASE 0x100
+#define PMBUS_VIRT_READ_TEMP_MIN (PMBUS_VIRT_BASE + 0)
+#define PMBUS_VIRT_READ_TEMP_MAX (PMBUS_VIRT_BASE + 1)
+#define PMBUS_VIRT_RESET_TEMP_HISTORY (PMBUS_VIRT_BASE + 2)
+#define PMBUS_VIRT_READ_VIN_AVG (PMBUS_VIRT_BASE + 3)
+#define PMBUS_VIRT_READ_VIN_MIN (PMBUS_VIRT_BASE + 4)
+#define PMBUS_VIRT_READ_VIN_MAX (PMBUS_VIRT_BASE + 5)
+#define PMBUS_VIRT_RESET_VIN_HISTORY (PMBUS_VIRT_BASE + 6)
+#define PMBUS_VIRT_READ_IIN_AVG (PMBUS_VIRT_BASE + 7)
+#define PMBUS_VIRT_READ_IIN_MIN (PMBUS_VIRT_BASE + 8)
+#define PMBUS_VIRT_READ_IIN_MAX (PMBUS_VIRT_BASE + 9)
+#define PMBUS_VIRT_RESET_IIN_HISTORY (PMBUS_VIRT_BASE + 10)
+#define PMBUS_VIRT_READ_PIN_AVG (PMBUS_VIRT_BASE + 11)
+#define PMBUS_VIRT_READ_PIN_MAX (PMBUS_VIRT_BASE + 12)
+#define PMBUS_VIRT_RESET_PIN_HISTORY (PMBUS_VIRT_BASE + 13)
+#define PMBUS_VIRT_READ_VOUT_AVG (PMBUS_VIRT_BASE + 14)
+#define PMBUS_VIRT_READ_VOUT_MIN (PMBUS_VIRT_BASE + 15)
+#define PMBUS_VIRT_READ_VOUT_MAX (PMBUS_VIRT_BASE + 16)
+#define PMBUS_VIRT_RESET_VOUT_HISTORY (PMBUS_VIRT_BASE + 17)
+#define PMBUS_VIRT_READ_IOUT_AVG (PMBUS_VIRT_BASE + 18)
+#define PMBUS_VIRT_READ_IOUT_MIN (PMBUS_VIRT_BASE + 19)
+#define PMBUS_VIRT_READ_IOUT_MAX (PMBUS_VIRT_BASE + 20)
+#define PMBUS_VIRT_RESET_IOUT_HISTORY (PMBUS_VIRT_BASE + 21)
+
+/*
* CAPABILITY
*/
#define PB_CAPABILITY_SMBALERT (1<<4)
@@ -266,11 +302,11 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
+enum pmbus_data_format { linear = 0, direct, vid };
+
struct pmbus_driver_info {
int pages; /* Total number of pages */
- bool direct[PSC_NUM_CLASSES];
- /* true if device uses direct data format
- for the given sensor class */
+ enum pmbus_data_format format[PSC_NUM_CLASSES];
/*
* Support one set of coefficients for each sensor type
* Used for chips providing data in direct mode.
@@ -286,6 +322,10 @@ struct pmbus_driver_info {
* necessary.
*/
int (*read_byte_data)(struct i2c_client *client, int page, int reg);
+ int (*read_word_data)(struct i2c_client *client, int page, int reg);
+ int (*write_word_data)(struct i2c_client *client, int page, int reg,
+ u16 word);
+ int (*write_byte)(struct i2c_client *client, int page, u8 value);
/*
* The identify function determines supported PMBus functionality.
* This function is only necessary if a chip driver supports multiple
@@ -299,6 +339,9 @@ struct pmbus_driver_info {
int pmbus_set_page(struct i2c_client *client, u8 page);
int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
+int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
+int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
+int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
void pmbus_clear_faults(struct i2c_client *client);
bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 354770ed318..a561c3a0e91 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -33,14 +33,18 @@
/*
* Constants needed to determine number of sensors, booleans, and labels.
*/
-#define PMBUS_MAX_INPUT_SENSORS 11 /* 6*volt, 3*curr, 2*power */
-#define PMBUS_VOUT_SENSORS_PER_PAGE 5 /* input, min, max, lcrit,
- crit */
-#define PMBUS_IOUT_SENSORS_PER_PAGE 4 /* input, min, max, crit */
+#define PMBUS_MAX_INPUT_SENSORS 22 /* 10*volt, 7*curr, 5*power */
+#define PMBUS_VOUT_SENSORS_PER_PAGE 9 /* input, min, max, lcrit,
+ crit, lowest, highest, avg,
+ reset */
+#define PMBUS_IOUT_SENSORS_PER_PAGE 8 /* input, min, max, crit,
+ lowest, highest, avg,
+ reset */
#define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */
#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */
-#define PMBUS_MAX_SENSORS_PER_TEMP 5 /* input, min, max, lcrit,
- crit */
+#define PMBUS_MAX_SENSORS_PER_TEMP 8 /* input, min, max, lcrit,
+ crit, lowest, highest,
+ reset */
#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm,
lcrit_alarm, crit_alarm;
@@ -74,11 +78,13 @@
#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1)
+#define PMBUS_NAME_SIZE 24
+
struct pmbus_sensor {
- char name[I2C_NAME_SIZE]; /* sysfs sensor name */
+ char name[PMBUS_NAME_SIZE]; /* sysfs sensor name */
struct sensor_device_attribute attribute;
u8 page; /* page number */
- u8 reg; /* register */
+ u16 reg; /* register */
enum pmbus_sensor_classes class; /* sensor class */
bool update; /* runtime sensor update needed */
int data; /* Sensor data.
@@ -86,14 +92,14 @@ struct pmbus_sensor {
};
struct pmbus_boolean {
- char name[I2C_NAME_SIZE]; /* sysfs boolean name */
+ char name[PMBUS_NAME_SIZE]; /* sysfs boolean name */
struct sensor_device_attribute attribute;
};
struct pmbus_label {
- char name[I2C_NAME_SIZE]; /* sysfs label name */
+ char name[PMBUS_NAME_SIZE]; /* sysfs label name */
struct sensor_device_attribute attribute;
- char label[I2C_NAME_SIZE]; /* label */
+ char label[PMBUS_NAME_SIZE]; /* label */
};
struct pmbus_data {
@@ -162,19 +168,39 @@ int pmbus_set_page(struct i2c_client *client, u8 page)
}
EXPORT_SYMBOL_GPL(pmbus_set_page);
-static int pmbus_write_byte(struct i2c_client *client, u8 page, u8 value)
+int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
{
int rv;
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
+ if (page >= 0) {
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+ }
return i2c_smbus_write_byte(client, value);
}
+EXPORT_SYMBOL_GPL(pmbus_write_byte);
+
+/*
+ * _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if
+ * a device specific mapping funcion exists and calls it if necessary.
+ */
+static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->write_byte) {
+ status = info->write_byte(client, page, value);
+ if (status != -ENODATA)
+ return status;
+ }
+ return pmbus_write_byte(client, page, value);
+}
-static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
- u16 word)
+int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word)
{
int rv;
@@ -184,6 +210,28 @@ static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
return i2c_smbus_write_word_data(client, reg, word);
}
+EXPORT_SYMBOL_GPL(pmbus_write_word_data);
+
+/*
+ * _pmbus_write_word_data() is similar to pmbus_write_word_data(), but checks if
+ * a device specific mapping function exists and calls it if necessary.
+ */
+static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->write_word_data) {
+ status = info->write_word_data(client, page, reg, word);
+ if (status != -ENODATA)
+ return status;
+ }
+ if (reg >= PMBUS_VIRT_BASE)
+ return -EINVAL;
+ return pmbus_write_word_data(client, page, reg, word);
+}
int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
{
@@ -197,20 +245,61 @@ int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
}
EXPORT_SYMBOL_GPL(pmbus_read_word_data);
-static int pmbus_read_byte_data(struct i2c_client *client, u8 page, u8 reg)
+/*
+ * _pmbus_read_word_data() is similar to pmbus_read_word_data(), but checks if
+ * a device specific mapping function exists and calls it if necessary.
+ */
+static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->read_word_data) {
+ status = info->read_word_data(client, page, reg);
+ if (status != -ENODATA)
+ return status;
+ }
+ if (reg >= PMBUS_VIRT_BASE)
+ return -EINVAL;
+ return pmbus_read_word_data(client, page, reg);
+}
+
+int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
+ if (page >= 0) {
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+ }
return i2c_smbus_read_byte_data(client, reg);
}
+EXPORT_SYMBOL_GPL(pmbus_read_byte_data);
+
+/*
+ * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if
+ * a device specific mapping function exists and calls it if necessary.
+ */
+static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->read_byte_data) {
+ status = info->read_byte_data(client, page, reg);
+ if (status != -ENODATA)
+ return status;
+ }
+ return pmbus_read_byte_data(client, page, reg);
+}
static void pmbus_clear_fault_page(struct i2c_client *client, int page)
{
- pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
+ _pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
}
void pmbus_clear_faults(struct i2c_client *client)
@@ -223,13 +312,13 @@ void pmbus_clear_faults(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_clear_faults);
-static int pmbus_check_status_cml(struct i2c_client *client, int page)
+static int pmbus_check_status_cml(struct i2c_client *client)
{
int status, status2;
- status = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE);
+ status = pmbus_read_byte_data(client, -1, PMBUS_STATUS_BYTE);
if (status < 0 || (status & PB_STATUS_CML)) {
- status2 = pmbus_read_byte_data(client, page, PMBUS_STATUS_CML);
+ status2 = pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
return -EINVAL;
}
@@ -241,10 +330,10 @@ bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
int rv;
struct pmbus_data *data = i2c_get_clientdata(client);
- rv = pmbus_read_byte_data(client, page, reg);
+ rv = _pmbus_read_byte_data(client, page, reg);
if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
- rv = pmbus_check_status_cml(client, page);
- pmbus_clear_fault_page(client, page);
+ rv = pmbus_check_status_cml(client);
+ pmbus_clear_fault_page(client, -1);
return rv >= 0;
}
EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
@@ -254,10 +343,10 @@ bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
int rv;
struct pmbus_data *data = i2c_get_clientdata(client);
- rv = pmbus_read_word_data(client, page, reg);
+ rv = _pmbus_read_word_data(client, page, reg);
if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
- rv = pmbus_check_status_cml(client, page);
- pmbus_clear_fault_page(client, page);
+ rv = pmbus_check_status_cml(client);
+ pmbus_clear_fault_page(client, -1);
return rv >= 0;
}
EXPORT_SYMBOL_GPL(pmbus_check_word_register);
@@ -270,24 +359,6 @@ const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
-/*
- * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if
- * a device specific mapping funcion exists and calls it if necessary.
- */
-static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
-{
- struct pmbus_data *data = i2c_get_clientdata(client);
- const struct pmbus_driver_info *info = data->info;
- int status;
-
- if (info->read_byte_data) {
- status = info->read_byte_data(client, page, reg);
- if (status != -ENODATA)
- return status;
- }
- return pmbus_read_byte_data(client, page, reg);
-}
-
static struct pmbus_data *pmbus_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -347,8 +418,9 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
if (!data->valid || sensor->update)
sensor->data
- = pmbus_read_word_data(client, sensor->page,
- sensor->reg);
+ = _pmbus_read_word_data(client,
+ sensor->page,
+ sensor->reg);
}
pmbus_clear_faults(client);
data->last_updated = jiffies;
@@ -362,8 +434,8 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
* Convert linear sensor values to milli- or micro-units
* depending on sensor type.
*/
-static int pmbus_reg2data_linear(struct pmbus_data *data,
- struct pmbus_sensor *sensor)
+static long pmbus_reg2data_linear(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
{
s16 exponent;
s32 mantissa;
@@ -397,15 +469,15 @@ static int pmbus_reg2data_linear(struct pmbus_data *data,
else
val >>= -exponent;
- return (int)val;
+ return val;
}
/*
* Convert direct sensor values to milli- or micro-units
* depending on sensor type.
*/
-static int pmbus_reg2data_direct(struct pmbus_data *data,
- struct pmbus_sensor *sensor)
+static long pmbus_reg2data_direct(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
{
long val = (s16) sensor->data;
long m, b, R;
@@ -440,18 +512,40 @@ static int pmbus_reg2data_direct(struct pmbus_data *data,
R++;
}
- return (int)((val - b) / m);
+ return (val - b) / m;
}
-static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
+/*
+ * Convert VID sensor values to milli- or micro-units
+ * depending on sensor type.
+ * We currently only support VR11.
+ */
+static long pmbus_reg2data_vid(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
{
- int val;
+ long val = sensor->data;
- if (data->info->direct[sensor->class])
+ if (val < 0x02 || val > 0xb2)
+ return 0;
+ return DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
+}
+
+static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
+{
+ long val;
+
+ switch (data->info->format[sensor->class]) {
+ case direct:
val = pmbus_reg2data_direct(data, sensor);
- else
+ break;
+ case vid:
+ val = pmbus_reg2data_vid(data, sensor);
+ break;
+ case linear:
+ default:
val = pmbus_reg2data_linear(data, sensor);
-
+ break;
+ }
return val;
}
@@ -561,16 +655,31 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
return val;
}
+static u16 pmbus_data2reg_vid(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ val = SENSORS_LIMIT(val, 500, 1600);
+
+ return 2 + DIV_ROUND_CLOSEST((1600 - val) * 100, 625);
+}
+
static u16 pmbus_data2reg(struct pmbus_data *data,
enum pmbus_sensor_classes class, long val)
{
u16 regval;
- if (data->info->direct[class])
+ switch (data->info->format[class]) {
+ case direct:
regval = pmbus_data2reg_direct(data, class, val);
- else
+ break;
+ case vid:
+ regval = pmbus_data2reg_vid(data, class, val);
+ break;
+ case linear:
+ default:
regval = pmbus_data2reg_linear(data, class, val);
-
+ break;
+ }
return regval;
}
@@ -619,7 +728,7 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
if (!s1 && !s2)
*val = !!regval;
else {
- int v1, v2;
+ long v1, v2;
struct pmbus_sensor *sensor1, *sensor2;
sensor1 = &data->sensors[s1];
@@ -661,7 +770,7 @@ static ssize_t pmbus_show_sensor(struct device *dev,
if (sensor->data < 0)
return sensor->data;
- return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor));
+ return snprintf(buf, PAGE_SIZE, "%ld\n", pmbus_reg2data(data, sensor));
}
static ssize_t pmbus_set_sensor(struct device *dev,
@@ -682,7 +791,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
mutex_lock(&data->update_lock);
regval = pmbus_data2reg(data, sensor->class, val);
- ret = pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
+ ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
if (ret < 0)
rv = ret;
else
@@ -867,7 +976,8 @@ static void pmbus_find_max_attr(struct i2c_client *client,
* and its associated alarm attribute.
*/
struct pmbus_limit_attr {
- u8 reg; /* Limit register */
+ u16 reg; /* Limit register */
+ bool update; /* True if register needs updates */
const char *attr; /* Attribute name */
const char *alarm; /* Alarm attribute name */
u32 sbit; /* Alarm attribute status bit */
@@ -912,9 +1022,10 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
if (pmbus_check_word_register(client, page, l->reg)) {
cindex = data->num_sensors;
pmbus_add_sensor(data, name, l->attr, index, page,
- l->reg, attr->class, attr->update,
+ l->reg, attr->class,
+ attr->update || l->update,
false);
- if (info->func[page] & attr->sfunc) {
+ if (l->sbit && (info->func[page] & attr->sfunc)) {
if (attr->compare) {
pmbus_add_boolean_cmp(data, name,
l->alarm, index,
@@ -953,9 +1064,11 @@ static void pmbus_add_sensor_attrs_one(struct i2c_client *client,
index, page, cbase, attr);
/*
* Add generic alarm attribute only if there are no individual
- * alarm attributes, and if there is a global alarm bit.
+ * alarm attributes, if there is a global alarm bit, and if
+ * the generic status register for this page is accessible.
*/
- if (!have_alarm && attr->gbit)
+ if (!have_alarm && attr->gbit &&
+ pmbus_check_byte_register(client, page, PMBUS_STATUS_BYTE))
pmbus_add_boolean_reg(data, name, "alarm", index,
PB_STATUS_BASE + page,
attr->gbit);
@@ -1008,6 +1121,21 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
.attr = "crit",
.alarm = "crit_alarm",
.sbit = PB_VOLTAGE_OV_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_READ_VIN_AVG,
+ .update = true,
+ .attr = "average",
+ }, {
+ .reg = PMBUS_VIRT_READ_VIN_MIN,
+ .update = true,
+ .attr = "lowest",
+ }, {
+ .reg = PMBUS_VIRT_READ_VIN_MAX,
+ .update = true,
+ .attr = "highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_VIN_HISTORY,
+ .attr = "reset_history",
},
};
@@ -1032,6 +1160,21 @@ static const struct pmbus_limit_attr vout_limit_attrs[] = {
.attr = "crit",
.alarm = "crit_alarm",
.sbit = PB_VOLTAGE_OV_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_READ_VOUT_AVG,
+ .update = true,
+ .attr = "average",
+ }, {
+ .reg = PMBUS_VIRT_READ_VOUT_MIN,
+ .update = true,
+ .attr = "lowest",
+ }, {
+ .reg = PMBUS_VIRT_READ_VOUT_MAX,
+ .update = true,
+ .attr = "highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_VOUT_HISTORY,
+ .attr = "reset_history",
}
};
@@ -1078,6 +1221,21 @@ static const struct pmbus_limit_attr iin_limit_attrs[] = {
.attr = "crit",
.alarm = "crit_alarm",
.sbit = PB_IIN_OC_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_READ_IIN_AVG,
+ .update = true,
+ .attr = "average",
+ }, {
+ .reg = PMBUS_VIRT_READ_IIN_MIN,
+ .update = true,
+ .attr = "lowest",
+ }, {
+ .reg = PMBUS_VIRT_READ_IIN_MAX,
+ .update = true,
+ .attr = "highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_IIN_HISTORY,
+ .attr = "reset_history",
}
};
@@ -1097,6 +1255,21 @@ static const struct pmbus_limit_attr iout_limit_attrs[] = {
.attr = "crit",
.alarm = "crit_alarm",
.sbit = PB_IOUT_OC_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_READ_IOUT_AVG,
+ .update = true,
+ .attr = "average",
+ }, {
+ .reg = PMBUS_VIRT_READ_IOUT_MIN,
+ .update = true,
+ .attr = "lowest",
+ }, {
+ .reg = PMBUS_VIRT_READ_IOUT_MAX,
+ .update = true,
+ .attr = "highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_IOUT_HISTORY,
+ .attr = "reset_history",
}
};
@@ -1132,6 +1305,17 @@ static const struct pmbus_limit_attr pin_limit_attrs[] = {
.attr = "max",
.alarm = "alarm",
.sbit = PB_PIN_OP_WARNING,
+ }, {
+ .reg = PMBUS_VIRT_READ_PIN_AVG,
+ .update = true,
+ .attr = "average",
+ }, {
+ .reg = PMBUS_VIRT_READ_PIN_MAX,
+ .update = true,
+ .attr = "input_highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_PIN_HISTORY,
+ .attr = "reset_history",
}
};
@@ -1200,6 +1384,39 @@ static const struct pmbus_limit_attr temp_limit_attrs[] = {
.attr = "crit",
.alarm = "crit_alarm",
.sbit = PB_TEMP_OT_FAULT,
+ }, {
+ .reg = PMBUS_VIRT_READ_TEMP_MIN,
+ .attr = "lowest",
+ }, {
+ .reg = PMBUS_VIRT_READ_TEMP_MAX,
+ .attr = "highest",
+ }, {
+ .reg = PMBUS_VIRT_RESET_TEMP_HISTORY,
+ .attr = "reset_history",
+ }
+};
+
+static const struct pmbus_limit_attr temp_limit_attrs23[] = {
+ {
+ .reg = PMBUS_UT_WARN_LIMIT,
+ .attr = "min",
+ .alarm = "min_alarm",
+ .sbit = PB_TEMP_UT_WARNING,
+ }, {
+ .reg = PMBUS_UT_FAULT_LIMIT,
+ .attr = "lcrit",
+ .alarm = "lcrit_alarm",
+ .sbit = PB_TEMP_UT_FAULT,
+ }, {
+ .reg = PMBUS_OT_WARN_LIMIT,
+ .attr = "max",
+ .alarm = "max_alarm",
+ .sbit = PB_TEMP_OT_WARNING,
+ }, {
+ .reg = PMBUS_OT_FAULT_LIMIT,
+ .attr = "crit",
+ .alarm = "crit_alarm",
+ .sbit = PB_TEMP_OT_FAULT,
}
};
@@ -1226,8 +1443,8 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
.sfunc = PMBUS_HAVE_STATUS_TEMP,
.sbase = PB_STATUS_TEMP_BASE,
.gbit = PB_STATUS_TEMPERATURE,
- .limit = temp_limit_attrs,
- .nlimit = ARRAY_SIZE(temp_limit_attrs),
+ .limit = temp_limit_attrs23,
+ .nlimit = ARRAY_SIZE(temp_limit_attrs23),
}, {
.reg = PMBUS_READ_TEMPERATURE_3,
.class = PSC_TEMPERATURE,
@@ -1238,8 +1455,8 @@ static const struct pmbus_sensor_attr temp_attributes[] = {
.sfunc = PMBUS_HAVE_STATUS_TEMP,
.sbase = PB_STATUS_TEMP_BASE,
.gbit = PB_STATUS_TEMPERATURE,
- .limit = temp_limit_attrs,
- .nlimit = ARRAY_SIZE(temp_limit_attrs),
+ .limit = temp_limit_attrs23,
+ .nlimit = ARRAY_SIZE(temp_limit_attrs23),
}
};
@@ -1380,7 +1597,7 @@ static int pmbus_identify_common(struct i2c_client *client,
*/
switch (vout_mode >> 5) {
case 0: /* linear mode */
- if (data->info->direct[PSC_VOLTAGE_OUT])
+ if (data->info->format[PSC_VOLTAGE_OUT] != linear)
return -ENODEV;
exponent = vout_mode & 0x1f;
@@ -1389,8 +1606,12 @@ static int pmbus_identify_common(struct i2c_client *client,
exponent |= ~0x1f;
data->exponent = exponent;
break;
+ case 1: /* VID mode */
+ if (data->info->format[PSC_VOLTAGE_OUT] != vid)
+ return -ENODEV;
+ break;
case 2: /* direct mode */
- if (!data->info->direct[PSC_VOLTAGE_OUT])
+ if (data->info->format[PSC_VOLTAGE_OUT] != direct)
return -ENODEV;
break;
default:
@@ -1430,14 +1651,9 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
- /*
- * Bail out if status register or PMBus revision register
- * does not exist.
- */
- if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0
- || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) {
- dev_err(&client->dev,
- "Status or revision register not found\n");
+ /* Bail out if PMBus status register does not exist. */
+ if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) {
+ dev_err(&client->dev, "PMBus status register not found\n");
ret = -ENODEV;
goto out_data;
}
@@ -1462,18 +1678,6 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
ret = -EINVAL;
goto out_data;
}
- /*
- * Bail out if more than one page was configured, but we can not
- * select the highest page. This is an indication that the wrong
- * chip type was selected. Better bail out now than keep
- * returning errors later on.
- */
- if (info->pages > 1 && pmbus_set_page(client, info->pages - 1) < 0) {
- dev_err(&client->dev, "Failed to select page %d\n",
- info->pages - 1);
- ret = -EINVAL;
- goto out_data;
- }
ret = pmbus_identify_common(client, data);
if (ret < 0) {
diff --git a/drivers/hwmon/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index ace1c731973..ace1c731973 100644
--- a/drivers/hwmon/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
diff --git a/drivers/hwmon/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index ffcc1cf3609..ffcc1cf3609 100644
--- a/drivers/hwmon/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 020c87273ea..e3b5c6039c2 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -28,33 +28,15 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
-#include <linux/io.h>
-#include <linux/acpi.h>
-#include <linux/delay.h>
+#include "sch56xx-common.h"
#define DRVNAME "sch5627"
#define DEVNAME DRVNAME /* We only support one model */
-#define SIO_SCH5627_EM_LD 0x0C /* Embedded Microcontroller LD */
-#define SIO_UNLOCK_KEY 0x55 /* Key to enable Super-I/O */
-#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
-
-#define SIO_REG_LDSEL 0x07 /* Logical device select */
-#define SIO_REG_DEVID 0x20 /* Device ID */
-#define SIO_REG_ENABLE 0x30 /* Logical device enable */
-#define SIO_REG_ADDR 0x66 /* Logical device address (2 bytes) */
-
-#define SIO_SCH5627_ID 0xC6 /* Chipset ID */
-
-#define REGION_LENGTH 9
-
#define SCH5627_HWMON_ID 0xa5
#define SCH5627_COMPANY_ID 0x5c
#define SCH5627_PRIMARY_ID 0xa0
-#define SCH5627_CMD_READ 0x02
-#define SCH5627_CMD_WRITE 0x03
-
#define SCH5627_REG_BUILD_CODE 0x39
#define SCH5627_REG_BUILD_ID 0x3a
#define SCH5627_REG_HWMON_ID 0x3c
@@ -111,182 +93,6 @@ struct sch5627_data {
u16 in[SCH5627_NO_IN];
};
-static struct platform_device *sch5627_pdev;
-
-/* Super I/O functions */
-static inline int superio_inb(int base, int reg)
-{
- outb(reg, base);
- return inb(base + 1);
-}
-
-static inline int superio_enter(int base)
-{
- /* Don't step on other drivers' I/O space by accident */
- if (!request_muxed_region(base, 2, DRVNAME)) {
- pr_err("I/O address 0x%04x already in use\n", base);
- return -EBUSY;
- }
-
- outb(SIO_UNLOCK_KEY, base);
-
- return 0;
-}
-
-static inline void superio_select(int base, int ld)
-{
- outb(SIO_REG_LDSEL, base);
- outb(ld, base + 1);
-}
-
-static inline void superio_exit(int base)
-{
- outb(SIO_LOCK_KEY, base);
- release_region(base, 2);
-}
-
-static int sch5627_send_cmd(struct sch5627_data *data, u8 cmd, u16 reg, u8 v)
-{
- u8 val;
- int i;
- /*
- * According to SMSC for the commands we use the maximum time for
- * the EM to respond is 15 ms, but testing shows in practice it
- * responds within 15-32 reads, so we first busy poll, and if
- * that fails sleep a bit and try again until we are way past
- * the 15 ms maximum response time.
- */
- const int max_busy_polls = 64;
- const int max_lazy_polls = 32;
-
- /* (Optional) Write-Clear the EC to Host Mailbox Register */
- val = inb(data->addr + 1);
- outb(val, data->addr + 1);
-
- /* Set Mailbox Address Pointer to first location in Region 1 */
- outb(0x00, data->addr + 2);
- outb(0x80, data->addr + 3);
-
- /* Write Request Packet Header */
- outb(cmd, data->addr + 4); /* VREG Access Type read:0x02 write:0x03 */
- outb(0x01, data->addr + 5); /* # of Entries: 1 Byte (8-bit) */
- outb(0x04, data->addr + 2); /* Mailbox AP to first data entry loc. */
-
- /* Write Value field */
- if (cmd == SCH5627_CMD_WRITE)
- outb(v, data->addr + 4);
-
- /* Write Address field */
- outb(reg & 0xff, data->addr + 6);
- outb(reg >> 8, data->addr + 7);
-
- /* Execute the Random Access Command */
- outb(0x01, data->addr); /* Write 01h to the Host-to-EC register */
-
- /* EM Interface Polling "Algorithm" */
- for (i = 0; i < max_busy_polls + max_lazy_polls; i++) {
- if (i >= max_busy_polls)
- msleep(1);
- /* Read Interrupt source Register */
- val = inb(data->addr + 8);
- /* Write Clear the interrupt source bits */
- if (val)
- outb(val, data->addr + 8);
- /* Command Completed ? */
- if (val & 0x01)
- break;
- }
- if (i == max_busy_polls + max_lazy_polls) {
- pr_err("Max retries exceeded reading virtual "
- "register 0x%04hx (%d)\n", reg, 1);
- return -EIO;
- }
-
- /*
- * According to SMSC we may need to retry this, but sofar I've always
- * seen this succeed in 1 try.
- */
- for (i = 0; i < max_busy_polls; i++) {
- /* Read EC-to-Host Register */
- val = inb(data->addr + 1);
- /* Command Completed ? */
- if (val == 0x01)
- break;
-
- if (i == 0)
- pr_warn("EC reports: 0x%02x reading virtual register "
- "0x%04hx\n", (unsigned int)val, reg);
- }
- if (i == max_busy_polls) {
- pr_err("Max retries exceeded reading virtual "
- "register 0x%04hx (%d)\n", reg, 2);
- return -EIO;
- }
-
- /*
- * According to the SMSC app note we should now do:
- *
- * Set Mailbox Address Pointer to first location in Region 1 *
- * outb(0x00, data->addr + 2);
- * outb(0x80, data->addr + 3);
- *
- * But if we do that things don't work, so let's not.
- */
-
- /* Read Value field */
- if (cmd == SCH5627_CMD_READ)
- return inb(data->addr + 4);
-
- return 0;
-}
-
-static int sch5627_read_virtual_reg(struct sch5627_data *data, u16 reg)
-{
- return sch5627_send_cmd(data, SCH5627_CMD_READ, reg, 0);
-}
-
-static int sch5627_write_virtual_reg(struct sch5627_data *data,
- u16 reg, u8 val)
-{
- return sch5627_send_cmd(data, SCH5627_CMD_WRITE, reg, val);
-}
-
-static int sch5627_read_virtual_reg16(struct sch5627_data *data, u16 reg)
-{
- int lsb, msb;
-
- /* Read LSB first, this will cause the matching MSB to be latched */
- lsb = sch5627_read_virtual_reg(data, reg);
- if (lsb < 0)
- return lsb;
-
- msb = sch5627_read_virtual_reg(data, reg + 1);
- if (msb < 0)
- return msb;
-
- return lsb | (msb << 8);
-}
-
-static int sch5627_read_virtual_reg12(struct sch5627_data *data, u16 msb_reg,
- u16 lsn_reg, int high_nibble)
-{
- int msb, lsn;
-
- /* Read MSB first, this will cause the matching LSN to be latched */
- msb = sch5627_read_virtual_reg(data, msb_reg);
- if (msb < 0)
- return msb;
-
- lsn = sch5627_read_virtual_reg(data, lsn_reg);
- if (lsn < 0)
- return lsn;
-
- if (high_nibble)
- return (msb << 4) | (lsn >> 4);
- else
- return (msb << 4) | (lsn & 0x0f);
-}
-
static struct sch5627_data *sch5627_update_device(struct device *dev)
{
struct sch5627_data *data = dev_get_drvdata(dev);
@@ -297,7 +103,7 @@ static struct sch5627_data *sch5627_update_device(struct device *dev)
/* Trigger a Vbat voltage measurement every 5 minutes */
if (time_after(jiffies, data->last_battery + 300 * HZ)) {
- sch5627_write_virtual_reg(data, SCH5627_REG_CTRL,
+ sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
data->control | 0x10);
data->last_battery = jiffies;
}
@@ -305,7 +111,7 @@ static struct sch5627_data *sch5627_update_device(struct device *dev)
/* Cache the values for 1 second */
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
for (i = 0; i < SCH5627_NO_TEMPS; i++) {
- val = sch5627_read_virtual_reg12(data,
+ val = sch56xx_read_virtual_reg12(data->addr,
SCH5627_REG_TEMP_MSB[i],
SCH5627_REG_TEMP_LSN[i],
SCH5627_REG_TEMP_HIGH_NIBBLE[i]);
@@ -317,7 +123,7 @@ static struct sch5627_data *sch5627_update_device(struct device *dev)
}
for (i = 0; i < SCH5627_NO_FANS; i++) {
- val = sch5627_read_virtual_reg16(data,
+ val = sch56xx_read_virtual_reg16(data->addr,
SCH5627_REG_FAN[i]);
if (unlikely(val < 0)) {
ret = ERR_PTR(val);
@@ -327,7 +133,7 @@ static struct sch5627_data *sch5627_update_device(struct device *dev)
}
for (i = 0; i < SCH5627_NO_IN; i++) {
- val = sch5627_read_virtual_reg12(data,
+ val = sch56xx_read_virtual_reg12(data->addr,
SCH5627_REG_IN_MSB[i],
SCH5627_REG_IN_LSN[i],
SCH5627_REG_IN_HIGH_NIBBLE[i]);
@@ -355,18 +161,21 @@ static int __devinit sch5627_read_limits(struct sch5627_data *data)
* Note what SMSC calls ABS, is what lm_sensors calls max
* (aka high), and HIGH is what lm_sensors calls crit.
*/
- val = sch5627_read_virtual_reg(data, SCH5627_REG_TEMP_ABS[i]);
+ val = sch56xx_read_virtual_reg(data->addr,
+ SCH5627_REG_TEMP_ABS[i]);
if (val < 0)
return val;
data->temp_max[i] = val;
- val = sch5627_read_virtual_reg(data, SCH5627_REG_TEMP_HIGH[i]);
+ val = sch56xx_read_virtual_reg(data->addr,
+ SCH5627_REG_TEMP_HIGH[i]);
if (val < 0)
return val;
data->temp_crit[i] = val;
}
for (i = 0; i < SCH5627_NO_FANS; i++) {
- val = sch5627_read_virtual_reg16(data, SCH5627_REG_FAN_MIN[i]);
+ val = sch56xx_read_virtual_reg16(data->addr,
+ SCH5627_REG_FAN_MIN[i]);
if (val < 0)
return val;
data->fan_min[i] = val;
@@ -667,7 +476,7 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
mutex_init(&data->update_lock);
platform_set_drvdata(pdev, data);
- val = sch5627_read_virtual_reg(data, SCH5627_REG_HWMON_ID);
+ val = sch56xx_read_virtual_reg(data->addr, SCH5627_REG_HWMON_ID);
if (val < 0) {
err = val;
goto error;
@@ -679,7 +488,7 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
goto error;
}
- val = sch5627_read_virtual_reg(data, SCH5627_REG_COMPANY_ID);
+ val = sch56xx_read_virtual_reg(data->addr, SCH5627_REG_COMPANY_ID);
if (val < 0) {
err = val;
goto error;
@@ -691,7 +500,7 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
goto error;
}
- val = sch5627_read_virtual_reg(data, SCH5627_REG_PRIMARY_ID);
+ val = sch56xx_read_virtual_reg(data->addr, SCH5627_REG_PRIMARY_ID);
if (val < 0) {
err = val;
goto error;
@@ -703,25 +512,28 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
goto error;
}
- build_code = sch5627_read_virtual_reg(data, SCH5627_REG_BUILD_CODE);
+ build_code = sch56xx_read_virtual_reg(data->addr,
+ SCH5627_REG_BUILD_CODE);
if (build_code < 0) {
err = build_code;
goto error;
}
- build_id = sch5627_read_virtual_reg16(data, SCH5627_REG_BUILD_ID);
+ build_id = sch56xx_read_virtual_reg16(data->addr,
+ SCH5627_REG_BUILD_ID);
if (build_id < 0) {
err = build_id;
goto error;
}
- hwmon_rev = sch5627_read_virtual_reg(data, SCH5627_REG_HWMON_REV);
+ hwmon_rev = sch56xx_read_virtual_reg(data->addr,
+ SCH5627_REG_HWMON_REV);
if (hwmon_rev < 0) {
err = hwmon_rev;
goto error;
}
- val = sch5627_read_virtual_reg(data, SCH5627_REG_CTRL);
+ val = sch56xx_read_virtual_reg(data->addr, SCH5627_REG_CTRL);
if (val < 0) {
err = val;
goto error;
@@ -734,7 +546,7 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
}
/* Trigger a Vbat voltage measurement, so that we get a valid reading
the first time we read Vbat */
- sch5627_write_virtual_reg(data, SCH5627_REG_CTRL,
+ sch56xx_write_virtual_reg(data->addr, SCH5627_REG_CTRL,
data->control | 0x10);
data->last_battery = jiffies;
@@ -746,6 +558,7 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
if (err)
goto error;
+ pr_info("found %s chip at %#hx\n", DEVNAME, data->addr);
pr_info("firmware build: code 0x%02X, id 0x%04X, hwmon: rev 0x%02X\n",
build_code, build_id, hwmon_rev);
@@ -768,85 +581,6 @@ error:
return err;
}
-static int __init sch5627_find(int sioaddr, unsigned short *address)
-{
- u8 devid;
- int err = superio_enter(sioaddr);
- if (err)
- return err;
-
- devid = superio_inb(sioaddr, SIO_REG_DEVID);
- if (devid != SIO_SCH5627_ID) {
- pr_debug("Unsupported device id: 0x%02x\n",
- (unsigned int)devid);
- err = -ENODEV;
- goto exit;
- }
-
- superio_select(sioaddr, SIO_SCH5627_EM_LD);
-
- if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
- pr_warn("Device not activated\n");
- err = -ENODEV;
- goto exit;
- }
-
- /*
- * Warning the order of the low / high byte is the other way around
- * as on most other superio devices!!
- */
- *address = superio_inb(sioaddr, SIO_REG_ADDR) |
- superio_inb(sioaddr, SIO_REG_ADDR + 1) << 8;
- if (*address == 0) {
- pr_warn("Base address not set\n");
- err = -ENODEV;
- goto exit;
- }
-
- pr_info("Found %s chip at %#hx\n", DEVNAME, *address);
-exit:
- superio_exit(sioaddr);
- return err;
-}
-
-static int __init sch5627_device_add(unsigned short address)
-{
- struct resource res = {
- .start = address,
- .end = address + REGION_LENGTH - 1,
- .flags = IORESOURCE_IO,
- };
- int err;
-
- sch5627_pdev = platform_device_alloc(DRVNAME, address);
- if (!sch5627_pdev)
- return -ENOMEM;
-
- res.name = sch5627_pdev->name;
- err = acpi_check_resource_conflict(&res);
- if (err)
- goto exit_device_put;
-
- err = platform_device_add_resources(sch5627_pdev, &res, 1);
- if (err) {
- pr_err("Device resource addition failed\n");
- goto exit_device_put;
- }
-
- err = platform_device_add(sch5627_pdev);
- if (err) {
- pr_err("Device addition failed\n");
- goto exit_device_put;
- }
-
- return 0;
-
-exit_device_put:
- platform_device_put(sch5627_pdev);
-
- return err;
-}
-
static struct platform_driver sch5627_driver = {
.driver = {
.owner = THIS_MODULE,
@@ -858,36 +592,16 @@ static struct platform_driver sch5627_driver = {
static int __init sch5627_init(void)
{
- int err = -ENODEV;
- unsigned short address;
-
- if (sch5627_find(0x4e, &address) && sch5627_find(0x2e, &address))
- goto exit;
-
- err = platform_driver_register(&sch5627_driver);
- if (err)
- goto exit;
-
- err = sch5627_device_add(address);
- if (err)
- goto exit_driver;
-
- return 0;
-
-exit_driver:
- platform_driver_unregister(&sch5627_driver);
-exit:
- return err;
+ return platform_driver_register(&sch5627_driver);
}
static void __exit sch5627_exit(void)
{
- platform_device_unregister(sch5627_pdev);
platform_driver_unregister(&sch5627_driver);
}
MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
module_init(sch5627_init);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
new file mode 100644
index 00000000000..244407aa79f
--- /dev/null
+++ b/drivers/hwmon/sch5636.c
@@ -0,0 +1,539 @@
+/***************************************************************************
+ * Copyright (C) 2011 Hans de Goede <hdegoede@redhat.com> *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the *
+ * Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ ***************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include "sch56xx-common.h"
+
+#define DRVNAME "sch5636"
+#define DEVNAME "theseus" /* We only support one model for now */
+
+#define SCH5636_REG_FUJITSU_ID 0x780
+#define SCH5636_REG_FUJITSU_REV 0x783
+
+#define SCH5636_NO_INS 5
+#define SCH5636_NO_TEMPS 16
+#define SCH5636_NO_FANS 8
+
+static const u16 SCH5636_REG_IN_VAL[SCH5636_NO_INS] = {
+ 0x22, 0x23, 0x24, 0x25, 0x189 };
+static const u16 SCH5636_REG_IN_FACTORS[SCH5636_NO_INS] = {
+ 4400, 1500, 4000, 4400, 16000 };
+static const char * const SCH5636_IN_LABELS[SCH5636_NO_INS] = {
+ "3.3V", "VREF", "VBAT", "3.3AUX", "12V" };
+
+static const u16 SCH5636_REG_TEMP_VAL[SCH5636_NO_TEMPS] = {
+ 0x2B, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x180, 0x181,
+ 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C };
+#define SCH5636_REG_TEMP_CTRL(i) (0x790 + (i))
+#define SCH5636_TEMP_WORKING 0x01
+#define SCH5636_TEMP_ALARM 0x02
+#define SCH5636_TEMP_DEACTIVATED 0x80
+
+static const u16 SCH5636_REG_FAN_VAL[SCH5636_NO_FANS] = {
+ 0x2C, 0x2E, 0x30, 0x32, 0x62, 0x64, 0x66, 0x68 };
+#define SCH5636_REG_FAN_CTRL(i) (0x880 + (i))
+/* FAULT in datasheet, but acts as an alarm */
+#define SCH5636_FAN_ALARM 0x04
+#define SCH5636_FAN_NOT_PRESENT 0x08
+#define SCH5636_FAN_DEACTIVATED 0x80
+
+
+struct sch5636_data {
+ unsigned short addr;
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ char valid; /* !=0 if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+ u8 in[SCH5636_NO_INS];
+ u8 temp_val[SCH5636_NO_TEMPS];
+ u8 temp_ctrl[SCH5636_NO_TEMPS];
+ u16 fan_val[SCH5636_NO_FANS];
+ u8 fan_ctrl[SCH5636_NO_FANS];
+};
+
+static struct sch5636_data *sch5636_update_device(struct device *dev)
+{
+ struct sch5636_data *data = dev_get_drvdata(dev);
+ struct sch5636_data *ret = data;
+ int i, val;
+
+ mutex_lock(&data->update_lock);
+
+ /* Cache the values for 1 second */
+ if (data->valid && !time_after(jiffies, data->last_updated + HZ))
+ goto abort;
+
+ for (i = 0; i < SCH5636_NO_INS; i++) {
+ val = sch56xx_read_virtual_reg(data->addr,
+ SCH5636_REG_IN_VAL[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->in[i] = val;
+ }
+
+ for (i = 0; i < SCH5636_NO_TEMPS; i++) {
+ if (data->temp_ctrl[i] & SCH5636_TEMP_DEACTIVATED)
+ continue;
+
+ val = sch56xx_read_virtual_reg(data->addr,
+ SCH5636_REG_TEMP_VAL[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp_val[i] = val;
+
+ val = sch56xx_read_virtual_reg(data->addr,
+ SCH5636_REG_TEMP_CTRL(i));
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->temp_ctrl[i] = val;
+ /* Alarms need to be explicitly write-cleared */
+ if (val & SCH5636_TEMP_ALARM) {
+ sch56xx_write_virtual_reg(data->addr,
+ SCH5636_REG_TEMP_CTRL(i), val);
+ }
+ }
+
+ for (i = 0; i < SCH5636_NO_FANS; i++) {
+ if (data->fan_ctrl[i] & SCH5636_FAN_DEACTIVATED)
+ continue;
+
+ val = sch56xx_read_virtual_reg16(data->addr,
+ SCH5636_REG_FAN_VAL[i]);
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->fan_val[i] = val;
+
+ val = sch56xx_read_virtual_reg(data->addr,
+ SCH5636_REG_FAN_CTRL(i));
+ if (unlikely(val < 0)) {
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->fan_ctrl[i] = val;
+ /* Alarms need to be explicitly write-cleared */
+ if (val & SCH5636_FAN_ALARM) {
+ sch56xx_write_virtual_reg(data->addr,
+ SCH5636_REG_FAN_CTRL(i), val);
+ }
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static int reg_to_rpm(u16 reg)
+{
+ if (reg == 0)
+ return -EIO;
+ if (reg == 0xffff)
+ return 0;
+
+ return 5400540 / reg;
+}
+
+static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DEVNAME);
+}
+
+static ssize_t show_in_value(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5636_data *data = sch5636_update_device(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = DIV_ROUND_CLOSEST(
+ data->in[attr->index] * SCH5636_REG_IN_FACTORS[attr->index],
+ 255);
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_in_label(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ SCH5636_IN_LABELS[attr->index]);
+}
+
+static ssize_t show_temp_value(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5636_data *data = sch5636_update_device(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = (data->temp_val[attr->index] - 64) * 1000;
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_temp_fault(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5636_data *data = sch5636_update_device(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = (data->temp_ctrl[attr->index] & SCH5636_TEMP_WORKING) ? 0 : 1;
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_temp_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5636_data *data = sch5636_update_device(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = (data->temp_ctrl[attr->index] & SCH5636_TEMP_ALARM) ? 1 : 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_fan_value(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5636_data *data = sch5636_update_device(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = reg_to_rpm(data->fan_val[attr->index]);
+ if (val < 0)
+ return val;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_fan_fault(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5636_data *data = sch5636_update_device(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = (data->fan_ctrl[attr->index] & SCH5636_FAN_NOT_PRESENT) ? 1 : 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t show_fan_alarm(struct device *dev, struct device_attribute
+ *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct sch5636_data *data = sch5636_update_device(dev);
+ int val;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ val = (data->fan_ctrl[attr->index] & SCH5636_FAN_ALARM) ? 1 : 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static struct sensor_device_attribute sch5636_attr[] = {
+ SENSOR_ATTR(name, 0444, show_name, NULL, 0),
+ SENSOR_ATTR(in0_input, 0444, show_in_value, NULL, 0),
+ SENSOR_ATTR(in0_label, 0444, show_in_label, NULL, 0),
+ SENSOR_ATTR(in1_input, 0444, show_in_value, NULL, 1),
+ SENSOR_ATTR(in1_label, 0444, show_in_label, NULL, 1),
+ SENSOR_ATTR(in2_input, 0444, show_in_value, NULL, 2),
+ SENSOR_ATTR(in2_label, 0444, show_in_label, NULL, 2),
+ SENSOR_ATTR(in3_input, 0444, show_in_value, NULL, 3),
+ SENSOR_ATTR(in3_label, 0444, show_in_label, NULL, 3),
+ SENSOR_ATTR(in4_input, 0444, show_in_value, NULL, 4),
+ SENSOR_ATTR(in4_label, 0444, show_in_label, NULL, 4),
+};
+
+static struct sensor_device_attribute sch5636_temp_attr[] = {
+ SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0),
+ SENSOR_ATTR(temp1_fault, 0444, show_temp_fault, NULL, 0),
+ SENSOR_ATTR(temp1_alarm, 0444, show_temp_alarm, NULL, 0),
+ SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1),
+ SENSOR_ATTR(temp2_fault, 0444, show_temp_fault, NULL, 1),
+ SENSOR_ATTR(temp2_alarm, 0444, show_temp_alarm, NULL, 1),
+ SENSOR_ATTR(temp3_input, 0444, show_temp_value, NULL, 2),
+ SENSOR_ATTR(temp3_fault, 0444, show_temp_fault, NULL, 2),
+ SENSOR_ATTR(temp3_alarm, 0444, show_temp_alarm, NULL, 2),
+ SENSOR_ATTR(temp4_input, 0444, show_temp_value, NULL, 3),
+ SENSOR_ATTR(temp4_fault, 0444, show_temp_fault, NULL, 3),
+ SENSOR_ATTR(temp4_alarm, 0444, show_temp_alarm, NULL, 3),
+ SENSOR_ATTR(temp5_input, 0444, show_temp_value, NULL, 4),
+ SENSOR_ATTR(temp5_fault, 0444, show_temp_fault, NULL, 4),
+ SENSOR_ATTR(temp5_alarm, 0444, show_temp_alarm, NULL, 4),
+ SENSOR_ATTR(temp6_input, 0444, show_temp_value, NULL, 5),
+ SENSOR_ATTR(temp6_fault, 0444, show_temp_fault, NULL, 5),
+ SENSOR_ATTR(temp6_alarm, 0444, show_temp_alarm, NULL, 5),
+ SENSOR_ATTR(temp7_input, 0444, show_temp_value, NULL, 6),
+ SENSOR_ATTR(temp7_fault, 0444, show_temp_fault, NULL, 6),
+ SENSOR_ATTR(temp7_alarm, 0444, show_temp_alarm, NULL, 6),
+ SENSOR_ATTR(temp8_input, 0444, show_temp_value, NULL, 7),
+ SENSOR_ATTR(temp8_fault, 0444, show_temp_fault, NULL, 7),
+ SENSOR_ATTR(temp8_alarm, 0444, show_temp_alarm, NULL, 7),
+ SENSOR_ATTR(temp9_input, 0444, show_temp_value, NULL, 8),
+ SENSOR_ATTR(temp9_fault, 0444, show_temp_fault, NULL, 8),
+ SENSOR_ATTR(temp9_alarm, 0444, show_temp_alarm, NULL, 8),
+ SENSOR_ATTR(temp10_input, 0444, show_temp_value, NULL, 9),
+ SENSOR_ATTR(temp10_fault, 0444, show_temp_fault, NULL, 9),
+ SENSOR_ATTR(temp10_alarm, 0444, show_temp_alarm, NULL, 9),
+ SENSOR_ATTR(temp11_input, 0444, show_temp_value, NULL, 10),
+ SENSOR_ATTR(temp11_fault, 0444, show_temp_fault, NULL, 10),
+ SENSOR_ATTR(temp11_alarm, 0444, show_temp_alarm, NULL, 10),
+ SENSOR_ATTR(temp12_input, 0444, show_temp_value, NULL, 11),
+ SENSOR_ATTR(temp12_fault, 0444, show_temp_fault, NULL, 11),
+ SENSOR_ATTR(temp12_alarm, 0444, show_temp_alarm, NULL, 11),
+ SENSOR_ATTR(temp13_input, 0444, show_temp_value, NULL, 12),
+ SENSOR_ATTR(temp13_fault, 0444, show_temp_fault, NULL, 12),
+ SENSOR_ATTR(temp13_alarm, 0444, show_temp_alarm, NULL, 12),
+ SENSOR_ATTR(temp14_input, 0444, show_temp_value, NULL, 13),
+ SENSOR_ATTR(temp14_fault, 0444, show_temp_fault, NULL, 13),
+ SENSOR_ATTR(temp14_alarm, 0444, show_temp_alarm, NULL, 13),
+ SENSOR_ATTR(temp15_input, 0444, show_temp_value, NULL, 14),
+ SENSOR_ATTR(temp15_fault, 0444, show_temp_fault, NULL, 14),
+ SENSOR_ATTR(temp15_alarm, 0444, show_temp_alarm, NULL, 14),
+ SENSOR_ATTR(temp16_input, 0444, show_temp_value, NULL, 15),
+ SENSOR_ATTR(temp16_fault, 0444, show_temp_fault, NULL, 15),
+ SENSOR_ATTR(temp16_alarm, 0444, show_temp_alarm, NULL, 15),
+};
+
+static struct sensor_device_attribute sch5636_fan_attr[] = {
+ SENSOR_ATTR(fan1_input, 0444, show_fan_value, NULL, 0),
+ SENSOR_ATTR(fan1_fault, 0444, show_fan_fault, NULL, 0),
+ SENSOR_ATTR(fan1_alarm, 0444, show_fan_alarm, NULL, 0),
+ SENSOR_ATTR(fan2_input, 0444, show_fan_value, NULL, 1),
+ SENSOR_ATTR(fan2_fault, 0444, show_fan_fault, NULL, 1),
+ SENSOR_ATTR(fan2_alarm, 0444, show_fan_alarm, NULL, 1),
+ SENSOR_ATTR(fan3_input, 0444, show_fan_value, NULL, 2),
+ SENSOR_ATTR(fan3_fault, 0444, show_fan_fault, NULL, 2),
+ SENSOR_ATTR(fan3_alarm, 0444, show_fan_alarm, NULL, 2),
+ SENSOR_ATTR(fan4_input, 0444, show_fan_value, NULL, 3),
+ SENSOR_ATTR(fan4_fault, 0444, show_fan_fault, NULL, 3),
+ SENSOR_ATTR(fan4_alarm, 0444, show_fan_alarm, NULL, 3),
+ SENSOR_ATTR(fan5_input, 0444, show_fan_value, NULL, 4),
+ SENSOR_ATTR(fan5_fault, 0444, show_fan_fault, NULL, 4),
+ SENSOR_ATTR(fan5_alarm, 0444, show_fan_alarm, NULL, 4),
+ SENSOR_ATTR(fan6_input, 0444, show_fan_value, NULL, 5),
+ SENSOR_ATTR(fan6_fault, 0444, show_fan_fault, NULL, 5),
+ SENSOR_ATTR(fan6_alarm, 0444, show_fan_alarm, NULL, 5),
+ SENSOR_ATTR(fan7_input, 0444, show_fan_value, NULL, 6),
+ SENSOR_ATTR(fan7_fault, 0444, show_fan_fault, NULL, 6),
+ SENSOR_ATTR(fan7_alarm, 0444, show_fan_alarm, NULL, 6),
+ SENSOR_ATTR(fan8_input, 0444, show_fan_value, NULL, 7),
+ SENSOR_ATTR(fan8_fault, 0444, show_fan_fault, NULL, 7),
+ SENSOR_ATTR(fan8_alarm, 0444, show_fan_alarm, NULL, 7),
+};
+
+static int sch5636_remove(struct platform_device *pdev)
+{
+ struct sch5636_data *data = platform_get_drvdata(pdev);
+ int i;
+
+ if (data->hwmon_dev)
+ hwmon_device_unregister(data->hwmon_dev);
+
+ for (i = 0; i < ARRAY_SIZE(sch5636_attr); i++)
+ device_remove_file(&pdev->dev, &sch5636_attr[i].dev_attr);
+
+ for (i = 0; i < SCH5636_NO_TEMPS * 3; i++)
+ device_remove_file(&pdev->dev,
+ &sch5636_temp_attr[i].dev_attr);
+
+ for (i = 0; i < SCH5636_NO_FANS * 3; i++)
+ device_remove_file(&pdev->dev,
+ &sch5636_fan_attr[i].dev_attr);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(data);
+
+ return 0;
+}
+
+static int __devinit sch5636_probe(struct platform_device *pdev)
+{
+ struct sch5636_data *data;
+ int i, err, val, revision[2];
+ char id[4];
+
+ data = kzalloc(sizeof(struct sch5636_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
+ mutex_init(&data->update_lock);
+ platform_set_drvdata(pdev, data);
+
+ for (i = 0; i < 3; i++) {
+ val = sch56xx_read_virtual_reg(data->addr,
+ SCH5636_REG_FUJITSU_ID + i);
+ if (val < 0) {
+ pr_err("Could not read Fujitsu id byte at %#x\n",
+ SCH5636_REG_FUJITSU_ID + i);
+ err = val;
+ goto error;
+ }
+ id[i] = val;
+ }
+ id[i] = '\0';
+
+ if (strcmp(id, "THS")) {
+ pr_err("Unknown Fujitsu id: %02x%02x%02x\n",
+ id[0], id[1], id[2]);
+ err = -ENODEV;
+ goto error;
+ }
+
+ for (i = 0; i < 2; i++) {
+ val = sch56xx_read_virtual_reg(data->addr,
+ SCH5636_REG_FUJITSU_REV + i);
+ if (val < 0) {
+ err = val;
+ goto error;
+ }
+ revision[i] = val;
+ }
+ pr_info("Found %s chip at %#hx, revison: %d.%02d\n", DEVNAME,
+ data->addr, revision[0], revision[1]);
+
+ /* Read all temp + fan ctrl registers to determine which are active */
+ for (i = 0; i < SCH5636_NO_TEMPS; i++) {
+ val = sch56xx_read_virtual_reg(data->addr,
+ SCH5636_REG_TEMP_CTRL(i));
+ if (unlikely(val < 0)) {
+ err = val;
+ goto error;
+ }
+ data->temp_ctrl[i] = val;
+ }
+
+ for (i = 0; i < SCH5636_NO_FANS; i++) {
+ val = sch56xx_read_virtual_reg(data->addr,
+ SCH5636_REG_FAN_CTRL(i));
+ if (unlikely(val < 0)) {
+ err = val;
+ goto error;
+ }
+ data->fan_ctrl[i] = val;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sch5636_attr); i++) {
+ err = device_create_file(&pdev->dev,
+ &sch5636_attr[i].dev_attr);
+ if (err)
+ goto error;
+ }
+
+ for (i = 0; i < (SCH5636_NO_TEMPS * 3); i++) {
+ if (data->temp_ctrl[i/3] & SCH5636_TEMP_DEACTIVATED)
+ continue;
+
+ err = device_create_file(&pdev->dev,
+ &sch5636_temp_attr[i].dev_attr);
+ if (err)
+ goto error;
+ }
+
+ for (i = 0; i < (SCH5636_NO_FANS * 3); i++) {
+ if (data->fan_ctrl[i/3] & SCH5636_FAN_DEACTIVATED)
+ continue;
+
+ err = device_create_file(&pdev->dev,
+ &sch5636_fan_attr[i].dev_attr);
+ if (err)
+ goto error;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ data->hwmon_dev = NULL;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ sch5636_remove(pdev);
+ return err;
+}
+
+static struct platform_driver sch5636_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRVNAME,
+ },
+ .probe = sch5636_probe,
+ .remove = sch5636_remove,
+};
+
+static int __init sch5636_init(void)
+{
+ return platform_driver_register(&sch5636_driver);
+}
+
+static void __exit sch5636_exit(void)
+{
+ platform_driver_unregister(&sch5636_driver);
+}
+
+MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
+
+module_init(sch5636_init);
+module_exit(sch5636_exit);
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
new file mode 100644
index 00000000000..fac32ee0b10
--- /dev/null
+++ b/drivers/hwmon/sch56xx-common.c
@@ -0,0 +1,340 @@
+/***************************************************************************
+ * Copyright (C) 2010-2011 Hans de Goede <hdegoede@redhat.com> *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the *
+ * Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ ***************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include "sch56xx-common.h"
+
+#define SIO_SCH56XX_LD_EM 0x0C /* Embedded uController Logical Dev */
+#define SIO_UNLOCK_KEY 0x55 /* Key to enable Super-I/O */
+#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
+
+#define SIO_REG_LDSEL 0x07 /* Logical device select */
+#define SIO_REG_DEVID 0x20 /* Device ID */
+#define SIO_REG_ENABLE 0x30 /* Logical device enable */
+#define SIO_REG_ADDR 0x66 /* Logical device address (2 bytes) */
+
+#define SIO_SCH5627_ID 0xC6 /* Chipset ID */
+#define SIO_SCH5636_ID 0xC7 /* Chipset ID */
+
+#define REGION_LENGTH 9
+
+#define SCH56XX_CMD_READ 0x02
+#define SCH56XX_CMD_WRITE 0x03
+
+static struct platform_device *sch56xx_pdev;
+
+/* Super I/O functions */
+static inline int superio_inb(int base, int reg)
+{
+ outb(reg, base);
+ return inb(base + 1);
+}
+
+static inline int superio_enter(int base)
+{
+ /* Don't step on other drivers' I/O space by accident */
+ if (!request_muxed_region(base, 2, "sch56xx")) {
+ pr_err("I/O address 0x%04x already in use\n", base);
+ return -EBUSY;
+ }
+
+ outb(SIO_UNLOCK_KEY, base);
+
+ return 0;
+}
+
+static inline void superio_select(int base, int ld)
+{
+ outb(SIO_REG_LDSEL, base);
+ outb(ld, base + 1);
+}
+
+static inline void superio_exit(int base)
+{
+ outb(SIO_LOCK_KEY, base);
+ release_region(base, 2);
+}
+
+static int sch56xx_send_cmd(u16 addr, u8 cmd, u16 reg, u8 v)
+{
+ u8 val;
+ int i;
+ /*
+ * According to SMSC for the commands we use the maximum time for
+ * the EM to respond is 15 ms, but testing shows in practice it
+ * responds within 15-32 reads, so we first busy poll, and if
+ * that fails sleep a bit and try again until we are way past
+ * the 15 ms maximum response time.
+ */
+ const int max_busy_polls = 64;
+ const int max_lazy_polls = 32;
+
+ /* (Optional) Write-Clear the EC to Host Mailbox Register */
+ val = inb(addr + 1);
+ outb(val, addr + 1);
+
+ /* Set Mailbox Address Pointer to first location in Region 1 */
+ outb(0x00, addr + 2);
+ outb(0x80, addr + 3);
+
+ /* Write Request Packet Header */
+ outb(cmd, addr + 4); /* VREG Access Type read:0x02 write:0x03 */
+ outb(0x01, addr + 5); /* # of Entries: 1 Byte (8-bit) */
+ outb(0x04, addr + 2); /* Mailbox AP to first data entry loc. */
+
+ /* Write Value field */
+ if (cmd == SCH56XX_CMD_WRITE)
+ outb(v, addr + 4);
+
+ /* Write Address field */
+ outb(reg & 0xff, addr + 6);
+ outb(reg >> 8, addr + 7);
+
+ /* Execute the Random Access Command */
+ outb(0x01, addr); /* Write 01h to the Host-to-EC register */
+
+ /* EM Interface Polling "Algorithm" */
+ for (i = 0; i < max_busy_polls + max_lazy_polls; i++) {
+ if (i >= max_busy_polls)
+ msleep(1);
+ /* Read Interrupt source Register */
+ val = inb(addr + 8);
+ /* Write Clear the interrupt source bits */
+ if (val)
+ outb(val, addr + 8);
+ /* Command Completed ? */
+ if (val & 0x01)
+ break;
+ }
+ if (i == max_busy_polls + max_lazy_polls) {
+ pr_err("Max retries exceeded reading virtual "
+ "register 0x%04hx (%d)\n", reg, 1);
+ return -EIO;
+ }
+
+ /*
+ * According to SMSC we may need to retry this, but sofar I've always
+ * seen this succeed in 1 try.
+ */
+ for (i = 0; i < max_busy_polls; i++) {
+ /* Read EC-to-Host Register */
+ val = inb(addr + 1);
+ /* Command Completed ? */
+ if (val == 0x01)
+ break;
+
+ if (i == 0)
+ pr_warn("EC reports: 0x%02x reading virtual register "
+ "0x%04hx\n", (unsigned int)val, reg);
+ }
+ if (i == max_busy_polls) {
+ pr_err("Max retries exceeded reading virtual "
+ "register 0x%04hx (%d)\n", reg, 2);
+ return -EIO;
+ }
+
+ /*
+ * According to the SMSC app note we should now do:
+ *
+ * Set Mailbox Address Pointer to first location in Region 1 *
+ * outb(0x00, addr + 2);
+ * outb(0x80, addr + 3);
+ *
+ * But if we do that things don't work, so let's not.
+ */
+
+ /* Read Value field */
+ if (cmd == SCH56XX_CMD_READ)
+ return inb(addr + 4);
+
+ return 0;
+}
+
+int sch56xx_read_virtual_reg(u16 addr, u16 reg)
+{
+ return sch56xx_send_cmd(addr, SCH56XX_CMD_READ, reg, 0);
+}
+EXPORT_SYMBOL(sch56xx_read_virtual_reg);
+
+int sch56xx_write_virtual_reg(u16 addr, u16 reg, u8 val)
+{
+ return sch56xx_send_cmd(addr, SCH56XX_CMD_WRITE, reg, val);
+}
+EXPORT_SYMBOL(sch56xx_write_virtual_reg);
+
+int sch56xx_read_virtual_reg16(u16 addr, u16 reg)
+{
+ int lsb, msb;
+
+ /* Read LSB first, this will cause the matching MSB to be latched */
+ lsb = sch56xx_read_virtual_reg(addr, reg);
+ if (lsb < 0)
+ return lsb;
+
+ msb = sch56xx_read_virtual_reg(addr, reg + 1);
+ if (msb < 0)
+ return msb;
+
+ return lsb | (msb << 8);
+}
+EXPORT_SYMBOL(sch56xx_read_virtual_reg16);
+
+int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
+ int high_nibble)
+{
+ int msb, lsn;
+
+ /* Read MSB first, this will cause the matching LSN to be latched */
+ msb = sch56xx_read_virtual_reg(addr, msb_reg);
+ if (msb < 0)
+ return msb;
+
+ lsn = sch56xx_read_virtual_reg(addr, lsn_reg);
+ if (lsn < 0)
+ return lsn;
+
+ if (high_nibble)
+ return (msb << 4) | (lsn >> 4);
+ else
+ return (msb << 4) | (lsn & 0x0f);
+}
+EXPORT_SYMBOL(sch56xx_read_virtual_reg12);
+
+static int __init sch56xx_find(int sioaddr, unsigned short *address,
+ const char **name)
+{
+ u8 devid;
+ int err;
+
+ err = superio_enter(sioaddr);
+ if (err)
+ return err;
+
+ devid = superio_inb(sioaddr, SIO_REG_DEVID);
+ switch (devid) {
+ case SIO_SCH5627_ID:
+ *name = "sch5627";
+ break;
+ case SIO_SCH5636_ID:
+ *name = "sch5636";
+ break;
+ default:
+ pr_debug("Unsupported device id: 0x%02x\n",
+ (unsigned int)devid);
+ err = -ENODEV;
+ goto exit;
+ }
+
+ superio_select(sioaddr, SIO_SCH56XX_LD_EM);
+
+ if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
+ pr_warn("Device not activated\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ /*
+ * Warning the order of the low / high byte is the other way around
+ * as on most other superio devices!!
+ */
+ *address = superio_inb(sioaddr, SIO_REG_ADDR) |
+ superio_inb(sioaddr, SIO_REG_ADDR + 1) << 8;
+ if (*address == 0) {
+ pr_warn("Base address not set\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+exit:
+ superio_exit(sioaddr);
+ return err;
+}
+
+static int __init sch56xx_device_add(unsigned short address, const char *name)
+{
+ struct resource res = {
+ .start = address,
+ .end = address + REGION_LENGTH - 1,
+ .flags = IORESOURCE_IO,
+ };
+ int err;
+
+ sch56xx_pdev = platform_device_alloc(name, address);
+ if (!sch56xx_pdev)
+ return -ENOMEM;
+
+ res.name = sch56xx_pdev->name;
+ err = acpi_check_resource_conflict(&res);
+ if (err)
+ goto exit_device_put;
+
+ err = platform_device_add_resources(sch56xx_pdev, &res, 1);
+ if (err) {
+ pr_err("Device resource addition failed\n");
+ goto exit_device_put;
+ }
+
+ err = platform_device_add(sch56xx_pdev);
+ if (err) {
+ pr_err("Device addition failed\n");
+ goto exit_device_put;
+ }
+
+ return 0;
+
+exit_device_put:
+ platform_device_put(sch56xx_pdev);
+
+ return err;
+}
+
+static int __init sch56xx_init(void)
+{
+ int err;
+ unsigned short address;
+ const char *name;
+
+ err = sch56xx_find(0x4e, &address, &name);
+ if (err)
+ err = sch56xx_find(0x2e, &address, &name);
+ if (err)
+ return err;
+
+ return sch56xx_device_add(address, name);
+}
+
+static void __exit sch56xx_exit(void)
+{
+ platform_device_unregister(sch56xx_pdev);
+}
+
+MODULE_DESCRIPTION("SMSC SCH56xx Hardware Monitoring Common Code");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
+
+module_init(sch56xx_init);
+module_exit(sch56xx_exit);
diff --git a/drivers/hwmon/sch56xx-common.h b/drivers/hwmon/sch56xx-common.h
new file mode 100644
index 00000000000..d5eaf3b9ebf
--- /dev/null
+++ b/drivers/hwmon/sch56xx-common.h
@@ -0,0 +1,24 @@
+/***************************************************************************
+ * Copyright (C) 2010-2011 Hans de Goede <hdegoede@redhat.com> *
+ * *
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the *
+ * Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ ***************************************************************************/
+
+int sch56xx_read_virtual_reg(u16 addr, u16 reg);
+int sch56xx_write_virtual_reg(u16 addr, u16 reg, u8 val);
+int sch56xx_read_virtual_reg16(u16 addr, u16 reg);
+int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
+ int high_nibble);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index cf4330b352e..fe4104c6b76 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -32,7 +32,7 @@
#include <linux/sht15.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/* Commands */
#define SHT15_MEASURE_TEMP 0x03
@@ -671,7 +671,7 @@ static ssize_t sht15_show_status(struct device *dev,
* @buf: sysfs buffer to read the new heater state from.
* @count: length of the data.
*
- * Will be called on read access to heater_enable sysfs attribute.
+ * Will be called on write access to heater_enable sysfs attribute.
* Returns number of bytes actually decoded, negative errno on error.
*/
static ssize_t sht15_store_heater(struct device *dev,
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index 0d18de424c6..8eac67d769f 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/hwmon.h>
+#include <linux/hwmon-vid.h>
#include <linux/sysfs.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
@@ -48,8 +49,10 @@ enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME };
struct via_cputemp_data {
struct device *hwmon_dev;
const char *name;
+ u8 vrm;
u32 id;
- u32 msr;
+ u32 msr_temp;
+ u32 msr_vid;
};
/*
@@ -77,13 +80,27 @@ static ssize_t show_temp(struct device *dev,
u32 eax, edx;
int err;
- err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
+ err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx);
if (err)
return -EAGAIN;
return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000);
}
+static ssize_t show_cpu_vid(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct via_cputemp_data *data = dev_get_drvdata(dev);
+ u32 eax, edx;
+ int err;
+
+ err = rdmsr_safe_on_cpu(data->id, data->msr_vid, &eax, &edx);
+ if (err)
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", vid_from_reg(~edx & 0x7f, data->vrm));
+}
+
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL,
SHOW_TEMP);
static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL);
@@ -100,6 +117,9 @@ static const struct attribute_group via_cputemp_group = {
.attrs = via_cputemp_attributes,
};
+/* Optional attributes */
+static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_cpu_vid, NULL);
+
static int __devinit via_cputemp_probe(struct platform_device *pdev)
{
struct via_cputemp_data *data;
@@ -122,11 +142,12 @@ static int __devinit via_cputemp_probe(struct platform_device *pdev)
/* C7 A */
case 0xD:
/* C7 D */
- data->msr = 0x1169;
+ data->msr_temp = 0x1169;
+ data->msr_vid = 0x198;
break;
case 0xF:
/* Nano */
- data->msr = 0x1423;
+ data->msr_temp = 0x1423;
break;
default:
err = -ENODEV;
@@ -134,7 +155,7 @@ static int __devinit via_cputemp_probe(struct platform_device *pdev)
}
/* test if we can access the TEMPERATURE MSR */
- err = rdmsr_safe_on_cpu(data->id, data->msr, &eax, &edx);
+ err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx);
if (err) {
dev_err(&pdev->dev,
"Unable to access TEMPERATURE MSR, giving up\n");
@@ -147,6 +168,15 @@ static int __devinit via_cputemp_probe(struct platform_device *pdev)
if (err)
goto exit_free;
+ if (data->msr_vid)
+ data->vrm = vid_which_vrm();
+
+ if (data->vrm) {
+ err = device_create_file(&pdev->dev, &dev_attr_cpu0_vid);
+ if (err)
+ goto exit_remove;
+ }
+
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
@@ -158,6 +188,8 @@ static int __devinit via_cputemp_probe(struct platform_device *pdev)
return 0;
exit_remove:
+ if (data->vrm)
+ device_remove_file(&pdev->dev, &dev_attr_cpu0_vid);
sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
exit_free:
platform_set_drvdata(pdev, NULL);
@@ -171,6 +203,8 @@ static int __devexit via_cputemp_remove(struct platform_device *pdev)
struct via_cputemp_data *data = platform_get_drvdata(pdev);
hwmon_device_unregister(data->hwmon_dev);
+ if (data->vrm)
+ device_remove_file(&pdev->dev, &dev_attr_cpu0_vid);
sysfs_remove_group(&pdev->dev.kobj, &via_cputemp_group);
platform_set_drvdata(pdev, NULL);
kfree(data);
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 30f06e956bf..5f13c62e64b 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -7,7 +7,7 @@ menuconfig I2C
depends on HAS_IOMEM
select RT_MUTEXES
---help---
- I2C (pronounce: I-square-C) is a slow serial bus protocol used in
+ I2C (pronounce: I-squared-C) is a slow serial bus protocol used in
many micro controller applications and developed by Philips. SMBus,
or System Management Bus is a subset of the I2C protocol. More
information is contained in the directory <file:Documentation/i2c/>,
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index dd364171f9c..b6807db7b36 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -1,23 +1,23 @@
/*
- Copyright (c) 2000 Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- Mark D. Studebaker <mdsxyz123@yahoo.com>,
- Dan Eaton <dan.eaton@rocketlogix.com> and
- Stephen Rousset<stephen.rousset@rocketlogix.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright (c) 2000 Frodo Looijaard <frodol@dds.nl>,
+ * Philip Edelbrock <phil@netroedge.com>,
+ * Mark D. Studebaker <mdsxyz123@yahoo.com>,
+ * Dan Eaton <dan.eaton@rocketlogix.com> and
+ * Stephen Rousset <stephen.rousset@rocketlogix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
@@ -254,8 +254,8 @@ static int ali1535_transaction(struct i2c_adapter *adap)
if (temp & (ALI1535_STS_ERR | ALI1535_STS_BUSY)) {
/* do a clear-on-write */
outb_p(0xFF, SMBHSTSTS);
- if ((temp = inb_p(SMBHSTSTS)) &
- (ALI1535_STS_ERR | ALI1535_STS_BUSY)) {
+ temp = inb_p(SMBHSTSTS);
+ if (temp & (ALI1535_STS_ERR | ALI1535_STS_BUSY)) {
/* This is probably going to be correctable only by a
* power reset as one of the bits now appears to be
* stuck */
@@ -267,9 +267,8 @@ static int ali1535_transaction(struct i2c_adapter *adap)
}
} else {
/* check and clear done bit */
- if (temp & ALI1535_STS_DONE) {
+ if (temp & ALI1535_STS_DONE)
outb_p(temp, SMBHSTSTS);
- }
}
/* start the transaction by writing anything to the start register */
@@ -278,7 +277,7 @@ static int ali1535_transaction(struct i2c_adapter *adap)
/* We will always wait for a fraction of a second! */
timeout = 0;
do {
- msleep(1);
+ usleep_range(1000, 2000);
temp = inb_p(SMBHSTSTS);
} while (((temp & ALI1535_STS_BUSY) && !(temp & ALI1535_STS_IDLE))
&& (timeout++ < MAX_TIMEOUT));
@@ -325,12 +324,12 @@ static int ali1535_transaction(struct i2c_adapter *adap)
/* take consequent actions for error conditions */
if (!(temp & ALI1535_STS_DONE)) {
/* issue "kill" to reset host controller */
- outb_p(ALI1535_KILL,SMBHSTTYP);
- outb_p(0xFF,SMBHSTSTS);
+ outb_p(ALI1535_KILL, SMBHSTTYP);
+ outb_p(0xFF, SMBHSTSTS);
} else if (temp & ALI1535_STS_ERR) {
/* issue "timeout" to reset all devices on bus */
- outb_p(ALI1535_T_OUT,SMBHSTTYP);
- outb_p(0xFF,SMBHSTSTS);
+ outb_p(ALI1535_T_OUT, SMBHSTTYP);
+ outb_p(0xFF, SMBHSTSTS);
}
return result;
@@ -351,7 +350,7 @@ static s32 ali1535_access(struct i2c_adapter *adap, u16 addr,
for (timeout = 0;
(timeout < MAX_TIMEOUT) && !(temp & ALI1535_STS_IDLE);
timeout++) {
- msleep(1);
+ usleep_range(1000, 2000);
temp = inb_p(SMBHSTSTS);
}
if (timeout >= MAX_TIMEOUT)
@@ -480,12 +479,12 @@ static struct i2c_adapter ali1535_adapter = {
.algo = &smbus_algorithm,
};
-static const struct pci_device_id ali1535_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(ali1535_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ },
};
-MODULE_DEVICE_TABLE (pci, ali1535_ids);
+MODULE_DEVICE_TABLE(pci, ali1535_ids);
static int __devinit ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 52b545a795f..cbc98aea5b0 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -193,7 +193,13 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
return;
}
if (twi_int_status & MCOMP) {
- if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
+ if ((read_MASTER_CTL(iface) & MEN) == 0 &&
+ (iface->cur_mode == TWI_I2C_MODE_REPEAT ||
+ iface->cur_mode == TWI_I2C_MODE_COMBINED)) {
+ iface->result = -1;
+ write_INT_MASK(iface, 0);
+ write_MASTER_CTL(iface, 0);
+ } else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
if (iface->readNum == 0) {
/* set the read number to 1 and ask for manual
* stop in block combine mode
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 3a20961bef1..b1d9cd28d8d 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -662,11 +662,8 @@ static int __devinit cpm_i2c_probe(struct platform_device *ofdev)
/* register new adapter to i2c module... */
data = of_get_property(ofdev->dev.of_node, "linux,i2c-index", &len);
- if (data && len == 4) {
- cpm->adap.nr = *data;
- result = i2c_add_numbered_adapter(&cpm->adap);
- } else
- result = i2c_add_adapter(&cpm->adap);
+ cpm->adap.nr = (data && len == 4) ? be32_to_cpup(data) : -1;
+ result = i2c_add_numbered_adapter(&cpm->adap);
if (result < 0) {
dev_err(&ofdev->dev, "Unable to register with I2C\n");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 8abfa4a03ce..ce1a32b71e4 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -673,32 +673,33 @@ static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
/* transfer not completed */
adap->pch_i2c_xfer_in_progress = true;
- pmsg = &msgs[0];
- pmsg->flags |= adap->pch_buff_mode_en;
- status = pmsg->flags;
- pch_dbg(adap,
- "After invoking I2C_MODE_SEL :flag= 0x%x\n", status);
- /* calculate sub address length and message length */
- /* these are applicable only for buffer mode */
- subaddrlen = pmsg->buf[0];
- /* calculate actual message length excluding
- * the sub address fields */
- msglen = (pmsg->len) - (subaddrlen + 1);
- if (status & (I2C_M_RD)) {
- pch_dbg(adap, "invoking pch_i2c_readbytes\n");
- ret = pch_i2c_readbytes(i2c_adap, pmsg, (i + 1 == num),
- (i == 0));
- } else {
- pch_dbg(adap, "invoking pch_i2c_writebytes\n");
- ret = pch_i2c_writebytes(i2c_adap, pmsg, (i + 1 == num),
- (i == 0));
+ for (i = 0; i < num && ret >= 0; i++) {
+ pmsg = &msgs[i];
+ pmsg->flags |= adap->pch_buff_mode_en;
+ status = pmsg->flags;
+ pch_dbg(adap,
+ "After invoking I2C_MODE_SEL :flag= 0x%x\n", status);
+ /* calculate sub address length and message length */
+ /* these are applicable only for buffer mode */
+ subaddrlen = pmsg->buf[0];
+ /* calculate actual message length excluding
+ * the sub address fields */
+ msglen = (pmsg->len) - (subaddrlen + 1);
+
+ if ((status & (I2C_M_RD)) != false) {
+ ret = pch_i2c_readbytes(i2c_adap, pmsg, (i + 1 == num),
+ (i == 0));
+ } else {
+ ret = pch_i2c_writebytes(i2c_adap, pmsg, (i + 1 == num),
+ (i == 0));
+ }
}
adap->pch_i2c_xfer_in_progress = false; /* transfer completed */
mutex_unlock(&pch_mutex);
- return ret;
+ return (ret < 0) ? ret : num;
}
/**
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 3df1bc80f37..3876a2478bd 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -227,7 +227,7 @@ static int highlander_i2c_read(struct highlander_i2c_dev *dev)
/*
* The R0P7780LC0011RL FPGA needs a significant delay between
- * data read cycles, otherwise the transciever gets confused and
+ * data read cycles, otherwise the transceiver gets confused and
* garbage is returned when the read is subsequently aborted.
*
* It is not sufficient to wait for BBSY.
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 58a58c7eaa1..1a766cf74f6 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -204,7 +204,7 @@ struct omap_i2c_dev {
u16 errata;
};
-const static u8 reg_map[] = {
+static const u8 reg_map[] = {
[OMAP_I2C_REV_REG] = 0x00,
[OMAP_I2C_IE_REG] = 0x01,
[OMAP_I2C_STAT_REG] = 0x02,
@@ -225,7 +225,7 @@ const static u8 reg_map[] = {
[OMAP_I2C_BUFSTAT_REG] = 0x10,
};
-const static u8 omap4_reg_map[] = {
+static const u8 omap4_reg_map[] = {
[OMAP_I2C_REV_REG] = 0x04,
[OMAP_I2C_IE_REG] = 0x2c,
[OMAP_I2C_STAT_REG] = 0x28,
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index f59224a5c76..d6036465099 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1079,7 +1079,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
* The reason to do so is to avoid sysfs names that only make
* sense when there are multiple adapters.
*/
- i2c->adap.nr = dev->id != -1 ? dev->id : 0;
+ i2c->adap.nr = dev->id;
snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u",
i2c->adap.nr);
@@ -1142,10 +1142,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.dev.of_node = dev->dev.of_node;
#endif
- if (i2c_type == REGS_CE4100)
- ret = i2c_add_adapter(&i2c->adap);
- else
- ret = i2c_add_numbered_adapter(&i2c->adap);
+ ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
printk(KERN_INFO "I2C: Failed to add bus\n");
goto eadapt;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 6c00c107ebf..f84a63c6dd9 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -248,12 +248,12 @@ static inline int is_msgend(struct s3c24xx_i2c *i2c)
return i2c->msg_ptr >= i2c->msg->len;
}
-/* i2s_s3c_irq_nextbyte
+/* i2c_s3c_irq_nextbyte
*
* process an interrupt and work out what to do
*/
-static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
+static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
{
unsigned long tmp;
unsigned char byte;
@@ -264,7 +264,6 @@ static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
case STATE_IDLE:
dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
goto out;
- break;
case STATE_STOP:
dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
@@ -444,7 +443,7 @@ static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id)
/* pretty much this leaves us with the fact that we've
* transmitted or received whatever byte we last sent */
- i2s_s3c_irq_nextbyte(i2c, status);
+ i2c_s3c_irq_nextbyte(i2c, status);
out:
return IRQ_HANDLED;
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index cb5d01e279c..c64ba736f48 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -341,10 +341,7 @@ static int __devinit s6i2c_probe(struct platform_device *dev)
i2c_wr16(iface, S6_I2C_TXTL, 0);
platform_set_drvdata(dev, iface);
- if (bus_num < 0)
- rc = i2c_add_adapter(p_adap);
- else
- rc = i2c_add_numbered_adapter(p_adap);
+ rc = i2c_add_numbered_adapter(p_adap);
if (rc)
goto err_irq_free;
return 0;
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index dd39c1eb03e..26c352a0929 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -234,7 +234,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
if (taos->state != TAOS_STATE_IDLE) {
err = -ENODEV;
- dev_dbg(&serio->dev, "TAOS EVM reset failed (state=%d, "
+ dev_err(&serio->dev, "TAOS EVM reset failed (state=%d, "
"pos=%d)\n", taos->state, taos->pos);
goto exit_close;
}
@@ -255,7 +255,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
msecs_to_jiffies(250));
if (taos->state != TAOS_STATE_IDLE) {
err = -ENODEV;
- dev_err(&adapter->dev, "Echo off failed "
+ dev_err(&serio->dev, "TAOS EVM echo off failed "
"(state=%d)\n", taos->state);
goto exit_close;
}
@@ -263,7 +263,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
err = i2c_add_adapter(adapter);
if (err)
goto exit_close;
- dev_dbg(&serio->dev, "Connected to TAOS EVM\n");
+ dev_info(&serio->dev, "Connected to TAOS EVM\n");
taos->client = taos_instantiate_device(adapter);
return 0;
@@ -288,7 +288,7 @@ static void taos_disconnect(struct serio *serio)
serio_set_drvdata(serio, NULL);
kfree(taos);
- dev_dbg(&serio->dev, "Disconnected from TAOS EVM\n");
+ dev_info(&serio->dev, "Disconnected from TAOS EVM\n");
}
static struct serio_device_id taos_serio_ids[] = {
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 4d9319665e3..2440b741197 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/i2c-tegra.h>
+#include <linux/of_i2c.h>
#include <asm/unaligned.h>
@@ -40,8 +41,10 @@
#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
#define I2C_STATUS 0x01C
#define I2C_SL_CNFG 0x020
+#define I2C_SL_CNFG_NACK (1<<1)
#define I2C_SL_CNFG_NEWSL (1<<2)
#define I2C_SL_ADDR1 0x02c
+#define I2C_SL_ADDR2 0x030
#define I2C_TX_FIFO 0x050
#define I2C_RX_FIFO 0x054
#define I2C_PACKET_TRANSFER_STATUS 0x058
@@ -337,7 +340,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
if (!i2c_dev->is_dvc) {
u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
- i2c_writel(i2c_dev, sl_cfg | I2C_SL_CNFG_NEWSL, I2C_SL_CNFG);
+ sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL;
+ i2c_writel(i2c_dev, sl_cfg, I2C_SL_CNFG);
+ i2c_writel(i2c_dev, 0xfc, I2C_SL_ADDR1);
+ i2c_writel(i2c_dev, 0x00, I2C_SL_ADDR2);
+
}
val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
@@ -540,6 +547,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
struct resource *iomem;
struct clk *clk;
struct clk *i2c_clk;
+ const unsigned int *prop;
void *base;
int irq;
int ret = 0;
@@ -597,7 +605,17 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->irq = irq;
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
- i2c_dev->bus_clk_rate = pdata ? pdata->bus_clk_rate : 100000;
+
+ i2c_dev->bus_clk_rate = 100000; /* default clock rate */
+ if (pdata) {
+ i2c_dev->bus_clk_rate = pdata->bus_clk_rate;
+
+ } else if (i2c_dev->dev->of_node) { /* if there is a device tree node ... */
+ prop = of_get_property(i2c_dev->dev->of_node,
+ "clock-frequency", NULL);
+ if (prop)
+ i2c_dev->bus_clk_rate = be32_to_cpup(prop);
+ }
if (pdev->id == 3)
i2c_dev->is_dvc = 1;
@@ -627,6 +645,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->adapter.algo = &tegra_i2c_algo;
i2c_dev->adapter.dev.parent = &pdev->dev;
i2c_dev->adapter.nr = pdev->id;
+ i2c_dev->adapter.dev.of_node = pdev->dev.of_node;
ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
if (ret) {
@@ -634,6 +653,8 @@ static int tegra_i2c_probe(struct platform_device *pdev)
goto err_free_irq;
}
+ of_i2c_register_devices(&i2c_dev->adapter);
+
return 0;
err_free_irq:
free_irq(i2c_dev->irq, i2c_dev);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 9a58994ff7e..131079a3e29 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -925,6 +925,9 @@ EXPORT_SYMBOL(i2c_add_adapter);
* or otherwise built in to the system's mainboard, and where i2c_board_info
* is used to properly configure I2C devices.
*
+ * If the requested bus number is set to -1, then this function will behave
+ * identically to i2c_add_adapter, and will dynamically assign a bus number.
+ *
* If no devices have pre-been declared for this bus, then be sure to
* register the adapter before any dynamically allocated ones. Otherwise
* the required bus ID may not be available.
@@ -940,6 +943,8 @@ int i2c_add_numbered_adapter(struct i2c_adapter *adap)
int id;
int status;
+ if (adap->nr == -1) /* -1 means dynamically assign bus id */
+ return i2c_add_adapter(adap);
if (adap->nr & ~MAX_ID_MASK)
return -EINVAL;
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/pca954x.c
index 54e1ce73534..6f895366463 100644
--- a/drivers/i2c/muxes/pca954x.c
+++ b/drivers/i2c/muxes/pca954x.c
@@ -201,10 +201,11 @@ static int pca954x_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
- /* Read the mux register at addr to verify
- * that the mux is in fact present.
+ /* Write the mux register at addr to verify
+ * that the mux is in fact present. This also
+ * initializes the mux to disconnected state.
*/
- if (i2c_smbus_read_byte(client) < 0) {
+ if (i2c_smbus_write_byte(client, 0) < 0) {
dev_warn(&client->dev, "probe failed\n");
goto exit_free;
}
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index 3be60da5212..67cbcfa3512 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -141,6 +141,8 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
}
+ if (hwif->index > 0)
+ pci_dev_put(dev);
}
static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index 542603b394e..962693b10a1 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/ata_platform.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
static void __devinit plat_ide_setup_ports(struct ide_hw *hw,
@@ -95,7 +96,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start);
hw.dev = &pdev->dev;
- d.irq_flags = res_irq->flags;
+ d.irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+ d.irq_flags |= IRQF_SHARED;
+
if (mmio)
d.host_flags |= IDE_HFLAG_MMIO;
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 9e8f4e1b0cc..712c7904d03 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -342,7 +342,7 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
return -ENODEV;
}
- mem_size = mem->end - mem->start + 1;
+ mem_size = resource_size(mem);
if (request_mem_region(mem->start, mem_size, "palm_bk3710") == NULL) {
printk(KERN_ERR "failed to request memory region\n");
return -EBUSY;
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index bed3e39aac9..71c23195497 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -551,10 +551,10 @@ static int __init tx4939ide_probe(struct platform_device *pdev)
return -ENODEV;
if (!devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start + 1, "tx4938ide"))
+ resource_size(res), "tx4938ide"))
return -EBUSY;
mapbase = (unsigned long)devm_ioremap(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!mapbase)
return -EBUSY;
memset(&hw, 0, sizeof(hw));
diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile
index 68999137ded..800a3894af0 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/ieee802154/Makefile
@@ -1,3 +1 @@
obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
-
-ccflags-y := -DDEBUG -DCONFIG_FFD
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index a5a49a1baae..eb0e2ccc79a 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -370,8 +370,6 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
return -ENOMEM;
}
- phy->dev.platform_data = dev;
-
memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef",
dev->addr_len);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 8e21d457b89..236ad9a89c0 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -215,7 +215,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- neigh_event_send(rt->dst.neighbour, NULL);
+ neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
ret = -ENODATA;
if (neigh)
goto release;
@@ -273,9 +273,10 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
}
- neigh = dst->neighbour;
+ neigh = dst_get_neighbour(dst);
if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- neigh_event_send(dst->neighbour, NULL);
+ if (neigh)
+ neigh_event_send(neigh, NULL);
ret = -ENODATA;
goto put;
}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f9ba7d74dfc..9353992f9ee 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -302,7 +302,8 @@ static void ib_cache_event(struct ib_event_handler *handler,
event->event == IB_EVENT_LID_CHANGE ||
event->event == IB_EVENT_PKEY_CHANGE ||
event->event == IB_EVENT_SM_CHANGE ||
- event->event == IB_EVENT_CLIENT_REREGISTER) {
+ event->event == IB_EVENT_CLIENT_REREGISTER ||
+ event->event == IB_EVENT_GID_CHANGE) {
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (work) {
INIT_WORK(&work->work, ib_cache_task);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f62f52fb9ec..fc0f2bd9ca8 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3641,7 +3641,8 @@ static struct kobj_type cm_port_obj_type = {
static char *cm_devnode(struct device *dev, mode_t *mode)
{
- *mode = 0666;
+ if (mode)
+ *mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index b6a33b3c516..ca4c5dcd713 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -359,6 +359,10 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
+ if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
+ id_priv->id.ps == RDMA_PS_IPOIB)
+ return -EINVAL;
+
mutex_lock(&lock);
iboe_addr_get_sgid(dev_addr, &iboe_gid);
memcpy(&gid, dev_addr->src_dev_addr +
@@ -406,11 +410,6 @@ static int cma_disable_callback(struct rdma_id_private *id_priv,
return 0;
}
-static int cma_has_cm_dev(struct rdma_id_private *id_priv)
-{
- return (id_priv->id.device && id_priv->cm_id.ib);
-}
-
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps,
enum ib_qp_type qp_type)
@@ -920,11 +919,11 @@ void rdma_destroy_id(struct rdma_cm_id *id)
if (id_priv->cma_dev) {
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
case RDMA_TRANSPORT_IB:
- if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
+ if (id_priv->cm_id.ib)
ib_destroy_cm_id(id_priv->cm_id.ib);
break;
case RDMA_TRANSPORT_IWARP:
- if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
+ if (id_priv->cm_id.iw)
iw_destroy_cm_id(id_priv->cm_id.iw);
break;
default:
@@ -1085,12 +1084,12 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
if (cma_get_net_info(ib_event->private_data, listen_id->ps,
&ip_ver, &port, &src, &dst))
- goto err;
+ return NULL;
id = rdma_create_id(listen_id->event_handler, listen_id->context,
listen_id->ps, ib_event->param.req_rcvd.qp_type);
if (IS_ERR(id))
- goto err;
+ return NULL;
cma_save_net_info(&id->route.addr, &listen_id->route.addr,
ip_ver, port, src, dst);
@@ -1100,7 +1099,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
GFP_KERNEL);
if (!rt->path_rec)
- goto destroy_id;
+ goto err;
rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
if (rt->num_paths == 2)
@@ -1114,7 +1113,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
&rt->addr.dev_addr);
if (ret)
- goto destroy_id;
+ goto err;
}
rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
@@ -1122,9 +1121,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
id_priv->state = RDMA_CM_CONNECT;
return id_priv;
-destroy_id:
- rdma_destroy_id(id);
err:
+ rdma_destroy_id(id);
return NULL;
}
@@ -1468,13 +1466,15 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
{
struct ib_cm_compare_data compare_data;
struct sockaddr *addr;
+ struct ib_cm_id *id;
__be64 svc_id;
int ret;
- id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
- id_priv);
- if (IS_ERR(id_priv->cm_id.ib))
- return PTR_ERR(id_priv->cm_id.ib);
+ id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
+ if (IS_ERR(id))
+ return PTR_ERR(id);
+
+ id_priv->cm_id.ib = id;
addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
svc_id = cma_get_service_id(id_priv->id.ps, addr);
@@ -1497,12 +1497,15 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
{
int ret;
struct sockaddr_in *sin;
+ struct iw_cm_id *id;
+
+ id = iw_create_cm_id(id_priv->id.device,
+ iw_conn_req_handler,
+ id_priv);
+ if (IS_ERR(id))
+ return PTR_ERR(id);
- id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
- iw_conn_req_handler,
- id_priv);
- if (IS_ERR(id_priv->cm_id.iw))
- return PTR_ERR(id_priv->cm_id.iw);
+ id_priv->cm_id.iw = id;
sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
id_priv->cm_id.iw->local_addr = *sin;
@@ -2484,6 +2487,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
{
struct ib_cm_sidr_req_param req;
struct rdma_route *route;
+ struct ib_cm_id *id;
int ret;
req.private_data_len = sizeof(struct cma_hdr) +
@@ -2501,12 +2505,13 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
if (ret)
goto out;
- id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
- cma_sidr_rep_handler, id_priv);
- if (IS_ERR(id_priv->cm_id.ib)) {
- ret = PTR_ERR(id_priv->cm_id.ib);
+ id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
+ id_priv);
+ if (IS_ERR(id)) {
+ ret = PTR_ERR(id);
goto out;
}
+ id_priv->cm_id.ib = id;
req.path = route->path_rec;
req.service_id = cma_get_service_id(id_priv->id.ps,
@@ -2530,6 +2535,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
struct ib_cm_req_param req;
struct rdma_route *route;
void *private_data;
+ struct ib_cm_id *id;
int offset, ret;
memset(&req, 0, sizeof req);
@@ -2543,12 +2549,12 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memcpy(private_data + offset, conn_param->private_data,
conn_param->private_data_len);
- id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
- id_priv);
- if (IS_ERR(id_priv->cm_id.ib)) {
- ret = PTR_ERR(id_priv->cm_id.ib);
+ id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
+ if (IS_ERR(id)) {
+ ret = PTR_ERR(id);
goto out;
}
+ id_priv->cm_id.ib = id;
route = &id_priv->id.route;
ret = cma_format_hdr(private_data, id_priv->id.ps, route);
@@ -2577,8 +2583,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
out:
- if (ret && !IS_ERR(id_priv->cm_id.ib)) {
- ib_destroy_cm_id(id_priv->cm_id.ib);
+ if (ret && !IS_ERR(id)) {
+ ib_destroy_cm_id(id);
id_priv->cm_id.ib = NULL;
}
@@ -2595,10 +2601,8 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
struct iw_cm_conn_param iw_param;
cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
- if (IS_ERR(cm_id)) {
- ret = PTR_ERR(cm_id);
- goto out;
- }
+ if (IS_ERR(cm_id))
+ return PTR_ERR(cm_id);
id_priv->cm_id.iw = cm_id;
@@ -2622,7 +2626,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
iw_param.qpn = conn_param->qp_num;
ret = iw_cm_connect(cm_id, &iw_param);
out:
- if (ret && !IS_ERR(cm_id)) {
+ if (ret) {
iw_destroy_cm_id(cm_id);
id_priv->cm_id.iw = NULL;
}
@@ -2795,7 +2799,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_has_cm_dev(id_priv))
+ if (!id_priv->cm_id.ib)
return -EINVAL;
switch (id->device->node_type) {
@@ -2817,7 +2821,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_has_cm_dev(id_priv))
+ if (!id_priv->cm_id.ib)
return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) {
@@ -2848,7 +2852,7 @@ int rdma_disconnect(struct rdma_cm_id *id)
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_has_cm_dev(id_priv))
+ if (!id_priv->cm_id.ib)
return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) {
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 4007f721d25..e711de400a0 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -627,6 +627,9 @@ int ib_modify_device(struct ib_device *device,
int device_modify_mask,
struct ib_device_modify *device_modify)
{
+ if (!device->modify_device)
+ return -ENOSYS;
+
return device->modify_device(device, device_modify_mask,
device_modify);
}
@@ -647,6 +650,9 @@ int ib_modify_port(struct ib_device *device,
u8 port_num, int port_modify_mask,
struct ib_port_modify *port_modify)
{
+ if (!device->modify_port)
+ return -ENOSYS;
+
if (port_num < start_port(device) || port_num > end_port(device))
return -EINVAL;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 4a5abaf0a25..9227f4acd79 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -148,7 +148,7 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
return netlink_dump_start(nls, skb, nlh,
client->cb_table[op].dump,
- NULL);
+ NULL, 0);
}
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e49a85f8a44..56898b6578a 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -826,7 +826,8 @@ static void ib_uverbs_remove_one(struct ib_device *device)
static char *uverbs_devnode(struct device *dev, mode_t *mode)
{
- *mode = 0666;
+ if (mode)
+ *mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 0cfc455630d..444470a28de 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -36,6 +36,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
+#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index aeebc4d37e3..f101bb73be6 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -99,14 +99,6 @@ static int c2_query_port(struct ib_device *ibdev,
return 0;
}
-static int c2_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
- struct ib_port_modify *props)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return 0;
-}
-
static int c2_query_pkey(struct ib_device *ibdev,
u8 port, u16 index, u16 * pkey)
{
@@ -817,7 +809,6 @@ int c2_register_device(struct c2_dev *dev)
dev->ibdev.dma_device = &dev->pcidev->dev;
dev->ibdev.query_device = c2_query_device;
dev->ibdev.query_port = c2_query_port;
- dev->ibdev.modify_port = c2_modify_port;
dev->ibdev.query_pkey = c2_query_pkey;
dev->ibdev.query_gid = c2_query_gid;
dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 0a5008fbeba..17bf9d95463 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1328,6 +1328,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
struct iwch_ep *child_ep, *parent_ep = ctx;
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int hwtid = GET_TID(req);
+ struct neighbour *neigh;
struct dst_entry *dst;
struct l2t_entry *l2t;
struct rtable *rt;
@@ -1364,7 +1365,8 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
- l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
+ neigh = dst_get_neighbour(dst);
+ l2t = t3_l2t_get(tdev, neigh, neigh->dev);
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1874,10 +1876,11 @@ static int is_loopback_dst(struct iw_cm_id *cm_id)
int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
- int err = 0;
struct iwch_dev *h = to_iwch_dev(cm_id->device);
+ struct neighbour *neigh;
struct iwch_ep *ep;
struct rtable *rt;
+ int err = 0;
if (is_loopback_dst(cm_id)) {
err = -ENOSYS;
@@ -1933,9 +1936,10 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
+ neigh = dst_get_neighbour(ep->dst);
+
/* get a l2t entry */
- ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
- ep->dst->neighbour->dev);
+ ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 2e2741307af..c7d9411f295 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -61,13 +61,6 @@
#include "iwch_user.h"
#include "common.h"
-static int iwch_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
- struct ib_port_modify *props)
-{
- return -ENOSYS;
-}
-
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
struct ib_ah_attr *ah_attr)
{
@@ -1392,7 +1385,6 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
dev->ibdev.query_device = iwch_query_device;
dev->ibdev.query_port = iwch_query_port;
- dev->ibdev.modify_port = iwch_modify_port;
dev->ibdev.query_pkey = iwch_query_pkey;
dev->ibdev.query_gid = iwch_query_gid;
dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f660cd04ec2..77f769d9227 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1325,6 +1325,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
+ struct neighbour *neigh;
struct dst_entry *dst;
struct l2t_entry *l2t;
struct rtable *rt;
@@ -1357,11 +1358,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
dst = &rt->dst;
- if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
+ neigh = dst_get_neighbour(dst);
+ if (neigh->dev->flags & IFF_LOOPBACK) {
pdev = ip_dev_find(&init_net, peer_ip);
BUG_ON(!pdev);
- l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
- pdev, 0);
+ l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0);
mtu = pdev->mtu;
tx_chan = cxgb4_port_chan(pdev);
smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
@@ -1372,17 +1373,16 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
dev_put(pdev);
} else {
- l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
- dst->neighbour->dev, 0);
+ l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0);
mtu = dst_mtu(dst);
- tx_chan = cxgb4_port_chan(dst->neighbour->dev);
- smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
+ tx_chan = cxgb4_port_chan(neigh->dev);
+ smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
- txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
- ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev);
+ txq_idx = cxgb4_port_idx(neigh->dev) * step;
+ ctrlq_idx = cxgb4_port_idx(neigh->dev);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(dst->neighbour->dev) * step];
+ cxgb4_port_idx(neigh->dev) * step];
}
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
@@ -1463,9 +1463,9 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
int disconnect = 1;
int release = 0;
- int abort = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(hdr);
+ int ret;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1501,10 +1501,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
start_ep_timer(ep);
__state_set(&ep->com, CLOSING);
attrs.next_state = C4IW_QP_STATE_CLOSING;
- abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+ ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
- peer_close_upcall(ep);
- disconnect = 1;
+ if (ret != -ECONNRESET) {
+ peer_close_upcall(ep);
+ disconnect = 1;
+ }
break;
case ABORTING:
disconnect = 0;
@@ -1845,6 +1847,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct c4iw_ep *ep;
struct rtable *rt;
struct net_device *pdev;
+ struct neighbour *neigh;
int step;
if ((conn_param->ord > c4iw_max_read_depth) ||
@@ -1906,14 +1909,15 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
+ neigh = dst_get_neighbour(ep->dst);
+
/* get a l2t entry */
- if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
+ if (neigh->dev->flags & IFF_LOOPBACK) {
PDBG("%s LOOPBACK\n", __func__);
pdev = ip_dev_find(&init_net,
cm_id->remote_addr.sin_addr.s_addr);
ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- ep->dst->neighbour,
- pdev, 0);
+ neigh, pdev, 0);
ep->mtu = pdev->mtu;
ep->tx_chan = cxgb4_port_chan(pdev);
ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
@@ -1928,20 +1932,18 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
dev_put(pdev);
} else {
ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- ep->dst->neighbour,
- ep->dst->neighbour->dev, 0);
+ neigh, neigh->dev, 0);
ep->mtu = dst_mtu(ep->dst);
- ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
- ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) &
- 0x7F) << 1;
+ ep->tx_chan = cxgb4_port_chan(neigh->dev);
+ ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
step = ep->com.dev->rdev.lldi.ntxq /
ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
- ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev);
+ ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
+ ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
step = ep->com.dev->rdev.lldi.nrxq /
ep->com.dev->rdev.lldi.nchan;
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(ep->dst->neighbour->dev) * step];
+ cxgb4_port_idx(neigh->dev) * step];
}
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
@@ -2109,15 +2111,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
break;
}
- mutex_unlock(&ep->com.mutex);
if (close) {
- if (abrupt)
- ret = abort_connection(ep, NULL, gfp);
- else
+ if (abrupt) {
+ close_complete_upcall(ep);
+ ret = send_abort(ep, NULL, gfp);
+ } else
ret = send_halfclose(ep, gfp);
if (ret)
fatal = 1;
}
+ mutex_unlock(&ep->com.mutex);
if (fatal)
release_ep_resources(ep);
return ret;
@@ -2301,6 +2304,31 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
+static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct cpl_abort_req_rss *req = cplhdr(skb);
+ struct c4iw_ep *ep;
+ struct tid_info *t = dev->rdev.lldi.tids;
+ unsigned int tid = GET_TID(req);
+
+ ep = lookup_tid(t, tid);
+ if (is_neg_adv_abort(req->status)) {
+ PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
+ ep->hwtid);
+ kfree_skb(skb);
+ return 0;
+ }
+ PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
+ ep->com.state);
+
+ /*
+ * Wake up any threads in rdma_init() or rdma_fini().
+ */
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ sched(dev, skb);
+ return 0;
+}
+
/*
* Most upcalls from the T4 Core go to sched() to
* schedule the processing on a work queue.
@@ -2317,7 +2345,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
[CPL_PASS_ESTABLISH] = sched,
[CPL_PEER_CLOSE] = sched,
[CPL_CLOSE_CON_RPL] = sched,
- [CPL_ABORT_REQ_RSS] = sched,
+ [CPL_ABORT_REQ_RSS] = peer_abort_intr,
[CPL_RDMA_TERMINATE] = sched,
[CPL_FW4_ACK] = sched,
[CPL_SET_TCB_RPL] = set_tcb_rpl,
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 8d8f8add6fc..1720dc790d1 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -801,6 +801,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
if (ucontext) {
memsize = roundup(memsize, PAGE_SIZE);
hwentries = memsize / sizeof *chp->cq.queue;
+ while (hwentries > T4_MAX_IQ_SIZE) {
+ memsize -= PAGE_SIZE;
+ hwentries = memsize / sizeof *chp->cq.queue;
+ }
}
chp->cq.size = hwentries;
chp->cq.memsize = memsize;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 273ffe49525..40c835309e4 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -31,7 +31,7 @@
*/
#include <rdma/ib_umem.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "iw_cxgb4.h"
@@ -625,7 +625,7 @@ pbl_done:
mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
mhp->attr.va_fbo = virt;
mhp->attr.page_size = shift - 12;
- mhp->attr.len = (u32) length;
+ mhp->attr.len = length;
err = register_mem(rhp, php, mhp, shift);
if (err)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 5b9e4220ca0..247fe706e7f 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -58,13 +58,6 @@ static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
-static int c4iw_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
- struct ib_port_modify *props)
-{
- return -ENOSYS;
-}
-
static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
struct ib_ah_attr *ah_attr)
{
@@ -456,7 +449,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
dev->ibdev.query_device = c4iw_query_device;
dev->ibdev.query_port = c4iw_query_port;
- dev->ibdev.modify_port = c4iw_modify_port;
dev->ibdev.query_pkey = c4iw_query_pkey;
dev->ibdev.query_gid = c4iw_query_gid;
dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 3b773b05a89..a41578e48c7 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1207,11 +1207,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
c4iw_get_ep(&qhp->ep->com);
}
ret = rdma_fini(rhp, qhp, ep);
- if (ret) {
- if (internal)
- c4iw_get_ep(&qhp->ep->com);
+ if (ret)
goto err;
- }
break;
case C4IW_QP_STATE_TERMINATE:
set_state(qhp, C4IW_QP_STATE_TERMINATE);
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 4fb50d58b49..407ff392415 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -37,6 +37,7 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/genalloc.h>
+#include <linux/ratelimit.h>
#include "iw_cxgb4.h"
#define RANDOM_SIZE 16
@@ -311,8 +312,8 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
- if (!addr && printk_ratelimit())
- printk(KERN_WARNING MOD "%s: Out of PBL memory\n",
+ if (!addr)
+ printk_ratelimited(KERN_WARNING MOD "%s: Out of PBL memory\n",
pci_name(rdev->lldi.pdev));
return (u32)addr;
}
@@ -373,8 +374,8 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
- if (!addr && printk_ratelimit())
- printk(KERN_WARNING MOD "%s: Out of RQT memory\n",
+ if (!addr)
+ printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev));
return (u32)addr;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index f09914cccf5..54c0d23bad9 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -58,7 +58,7 @@
#include <linux/cpu.h>
#include <linux/device.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/abs_addr.h>
#include <asm/ibmebus.h>
#include <asm/io.h>
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index ee79a2d97b1..8697eca1435 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -40,6 +40,7 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/jiffies.h>
+#include <linux/cpu.h>
#include <asm/pgtable.h>
#include "ipath_kernel.h"
@@ -1684,17 +1685,19 @@ static int find_best_unit(struct file *fp,
* information. There may be some issues with dual core numbering
* as well. This needs more work prior to release.
*/
- if (!cpumask_empty(&current->cpus_allowed) &&
- !cpumask_full(&current->cpus_allowed)) {
+ if (!cpumask_empty(tsk_cpus_allowed(current)) &&
+ !cpumask_full(tsk_cpus_allowed(current))) {
int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
- for (i = 0; i < ncpus; i++)
- if (cpumask_test_cpu(i, &current->cpus_allowed)) {
+ get_online_cpus();
+ for_each_online_cpu(i)
+ if (cpumask_test_cpu(i, tsk_cpus_allowed(current))) {
ipath_cdbg(PROC, "%s[%u] affinity set for "
"cpu %d/%d\n", current->comm,
current->pid, i, ncpus);
curcpu = i;
nset++;
}
+ put_online_cpus();
if (curcpu != -1 && nset != ncpus) {
if (npresent) {
prefunit = curcpu / (ncpus / npresent);
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index ceb98ee7866..43f2d0424d4 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -32,6 +32,7 @@
*/
#include <rdma/ib_smi.h>
+#include <rdma/ib_pma.h>
#include "ipath_kernel.h"
#include "ipath_verbs.h"
@@ -789,151 +790,18 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
return recv_subn_get_pkeytable(smp, ibdev);
}
-#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
-#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
-#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
-#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
-#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
-#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
-
-struct ib_perf {
- u8 base_version;
- u8 mgmt_class;
- u8 class_version;
- u8 method;
- __be16 status;
- __be16 unused;
- __be64 tid;
- __be16 attr_id;
- __be16 resv;
- __be32 attr_mod;
- u8 reserved[40];
- u8 data[192];
-} __attribute__ ((packed));
-
-struct ib_pma_classportinfo {
- u8 base_version;
- u8 class_version;
- __be16 cap_mask;
- u8 reserved[3];
- u8 resp_time_value; /* only lower 5 bits */
- union ib_gid redirect_gid;
- __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 redirect_lid;
- __be16 redirect_pkey;
- __be32 redirect_qp; /* only lower 24 bits */
- __be32 redirect_qkey;
- union ib_gid trap_gid;
- __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 trap_lid;
- __be16 trap_pkey;
- __be32 trap_hl_qp; /* 8, 24 bits respectively */
- __be32 trap_qkey;
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplescontrol {
- u8 opcode;
- u8 port_select;
- u8 tick;
- u8 counter_width; /* only lower 3 bits */
- __be32 counter_mask0_9; /* 2, 10 * 3, bits */
- __be16 counter_mask10_14; /* 1, 5 * 3, bits */
- u8 sample_mechanisms;
- u8 sample_status; /* only lower 2 bits */
- __be64 option_mask;
- __be64 vendor_mask;
- __be32 sample_start;
- __be32 sample_interval;
- __be16 tag;
- __be16 counter_select[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplesresult {
- __be16 tag;
- __be16 sample_status; /* only lower 2 bits */
- __be32 counter[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplesresult_ext {
- __be16 tag;
- __be16 sample_status; /* only lower 2 bits */
- __be32 extended_width; /* only upper 2 bits */
- __be64 counter[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portcounters {
- u8 reserved;
- u8 port_select;
- __be16 counter_select;
- __be16 symbol_error_counter;
- u8 link_error_recovery_counter;
- u8 link_downed_counter;
- __be16 port_rcv_errors;
- __be16 port_rcv_remphys_errors;
- __be16 port_rcv_switch_relay_errors;
- __be16 port_xmit_discards;
- u8 port_xmit_constraint_errors;
- u8 port_rcv_constraint_errors;
- u8 reserved1;
- u8 lli_ebor_errors; /* 4, 4, bits */
- __be16 reserved2;
- __be16 vl15_dropped;
- __be32 port_xmit_data;
- __be32 port_rcv_data;
- __be32 port_xmit_packets;
- __be32 port_rcv_packets;
-} __attribute__ ((packed));
-
-#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
-#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
-#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
-#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
-#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
-#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
-#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
-#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
-#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
-#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
-#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
-#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
-#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
-
-struct ib_pma_portcounters_ext {
- u8 reserved;
- u8 port_select;
- __be16 counter_select;
- __be32 reserved1;
- __be64 port_xmit_data;
- __be64 port_rcv_data;
- __be64 port_xmit_packets;
- __be64 port_rcv_packets;
- __be64 port_unicast_xmit_packets;
- __be64 port_unicast_rcv_packets;
- __be64 port_multicast_xmit_packets;
- __be64 port_multicast_rcv_packets;
-} __attribute__ ((packed));
-
-#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
-#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
-#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
-#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
-#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
-#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
-#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
-#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
-
-static int recv_pma_get_classportinfo(struct ib_perf *pmp)
+static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp)
{
- struct ib_pma_classportinfo *p =
- (struct ib_pma_classportinfo *)pmp->data;
+ struct ib_class_port_info *p =
+ (struct ib_class_port_info *)pmp->data;
memset(pmp->data, 0, sizeof(pmp->data));
- if (pmp->attr_mod != 0)
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0)
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
/* Indicate AllPortSelect is valid (only one port anyway) */
- p->cap_mask = cpu_to_be16(1 << 8);
+ p->capability_mask = cpu_to_be16(1 << 8);
p->base_version = 1;
p->class_version = 1;
/*
@@ -957,7 +825,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
-static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
+static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
@@ -970,9 +838,9 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 ||
+ if (pmp->mad_hdr.attr_mod != 0 ||
(port_select != port && port_select != 0xFF))
- pmp->status |= IB_SMP_INVALID_FIELD;
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
/*
* Ticks are 10x the link transfer period which for 2.5Gbs is 4
* nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
@@ -1006,7 +874,7 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
+static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
@@ -1017,9 +885,9 @@ static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
u8 status;
int ret;
- if (pmp->attr_mod != 0 ||
+ if (pmp->mad_hdr.attr_mod != 0 ||
(p->port_select != port && p->port_select != 0xFF)) {
- pmp->status |= IB_SMP_INVALID_FIELD;
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
@@ -1093,7 +961,7 @@ static u64 get_counter(struct ipath_ibdev *dev,
return ret;
}
-static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
+static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp,
struct ib_device *ibdev)
{
struct ib_pma_portsamplesresult *p =
@@ -1118,7 +986,7 @@ static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
+static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev)
{
struct ib_pma_portsamplesresult_ext *p =
@@ -1145,7 +1013,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int recv_pma_get_portcounters(struct ib_perf *pmp,
+static int recv_pma_get_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1179,9 +1047,9 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 ||
+ if (pmp->mad_hdr.attr_mod != 0 ||
(port_select != port && port_select != 0xFF))
- pmp->status |= IB_SMP_INVALID_FIELD;
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
if (cntrs.symbol_error_counter > 0xFFFFUL)
p->symbol_error_counter = cpu_to_be16(0xFFFF);
@@ -1216,7 +1084,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
cntrs.local_link_integrity_errors = 0xFUL;
if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = cpu_to_be16(0xFFFF);
@@ -1244,7 +1112,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
+static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters_ext *p =
@@ -1265,9 +1133,9 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 ||
+ if (pmp->mad_hdr.attr_mod != 0 ||
(port_select != port && port_select != 0xFF))
- pmp->status |= IB_SMP_INVALID_FIELD;
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
p->port_xmit_data = cpu_to_be64(swords);
p->port_rcv_data = cpu_to_be64(rwords);
@@ -1281,7 +1149,7 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int recv_pma_set_portcounters(struct ib_perf *pmp,
+static int recv_pma_set_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1344,7 +1212,7 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp,
return recv_pma_get_portcounters(pmp, ibdev, port);
}
-static int recv_pma_set_portcounters_ext(struct ib_perf *pmp,
+static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1518,19 +1386,19 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
- struct ib_perf *pmp = (struct ib_perf *)out_mad;
+ struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
int ret;
*out_mad = *in_mad;
- if (pmp->class_version != 1) {
- pmp->status |= IB_SMP_UNSUP_VERSION;
+ if (pmp->mad_hdr.class_version != 1) {
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
- switch (pmp->method) {
+ switch (pmp->mad_hdr.method) {
case IB_MGMT_METHOD_GET:
- switch (pmp->attr_id) {
+ switch (pmp->mad_hdr.attr_id) {
case IB_PMA_CLASS_PORT_INFO:
ret = recv_pma_get_classportinfo(pmp);
goto bail;
@@ -1554,13 +1422,13 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
port_num);
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
case IB_MGMT_METHOD_SET:
- switch (pmp->attr_id) {
+ switch (pmp->mad_hdr.attr_id) {
case IB_PMA_PORT_SAMPLES_CONTROL:
ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
port_num);
@@ -1574,7 +1442,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
port_num);
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
@@ -1588,7 +1456,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METHOD;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
ret = reply((struct ib_smp *) pmp);
}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 57ffa50f509..f36da994a85 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -35,6 +35,7 @@
#include <linux/mlx4/cmd.h>
#include <linux/gfp.h>
+#include <rdma/ib_pma.h>
#include "mlx4_ib.h"
@@ -232,7 +233,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
}
}
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
@@ -302,6 +303,71 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
+static void edit_counter(struct mlx4_counter *cnt,
+ struct ib_pma_portcounters *pma_cnt)
+{
+ pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
+ pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
+ pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
+ pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
+}
+
+static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ int err;
+ u32 inmod = dev->counters[port_num - 1] & 0xffff;
+ u8 mode;
+
+ if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
+ return -EINVAL;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
+ if (IS_ERR(mailbox))
+ return IB_MAD_RESULT_FAILURE;
+
+ err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
+ MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
+ if (err)
+ err = IB_MAD_RESULT_FAILURE;
+ else {
+ memset(out_mad->data, 0, sizeof out_mad->data);
+ mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
+ switch (mode & 0xf) {
+ case 0:
+ edit_counter(mailbox->buf,
+ (void *)(out_mad->data + 40));
+ err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+ break;
+ default:
+ err = IB_MAD_RESULT_FAILURE;
+ }
+ }
+
+ mlx4_free_cmd_mailbox(dev->dev, mailbox);
+
+ return err;
+}
+
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ switch (rdma_port_get_link_layer(ibdev, port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+ case IB_LINK_LAYER_ETHERNET:
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+ default:
+ return -EINVAL;
+ }
+}
+
static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *mad_send_wc)
{
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index fbe1973f77b..fa643f4f4e2 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -816,7 +816,7 @@ static void update_gids_task(struct work_struct *work)
memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
event.device = &gw->dev->ib_dev;
event.element.port_num = gw->port;
- event.event = IB_EVENT_LID_CHANGE;
+ event.event = IB_EVENT_GID_CHANGE;
ib_dispatch_event(&event);
}
@@ -1098,11 +1098,21 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (init_node_data(ibdev))
goto err_map;
+ for (i = 0; i < ibdev->num_ports; ++i) {
+ if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
+ IB_LINK_LAYER_ETHERNET) {
+ err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
+ if (err)
+ ibdev->counters[i] = -1;
+ } else
+ ibdev->counters[i] = -1;
+ }
+
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
if (ib_register_device(&ibdev->ib_dev, NULL))
- goto err_map;
+ goto err_counter;
if (mlx4_ib_mad_init(ibdev))
goto err_reg;
@@ -1132,6 +1142,10 @@ err_notif:
err_reg:
ib_unregister_device(&ibdev->ib_dev);
+err_counter:
+ for (; i; --i)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+
err_map:
iounmap(ibdev->uar_map);
@@ -1160,7 +1174,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->iboe.nb.notifier_call = NULL;
}
iounmap(ibdev->uar_map);
-
+ for (p = 0; p < ibdev->num_ports; ++p)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 2a322f21049..e4bf2cff866 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -193,6 +193,7 @@ struct mlx4_ib_dev {
struct mutex cap_mask_mutex;
bool ib_active;
struct mlx4_ib_iboe iboe;
+ int counters[MLX4_MAX_PORTS];
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 2001f20a436..3a91d9d8dc5 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -893,7 +893,6 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
--path->static_rate;
} else
path->static_rate = 0;
- path->counter_index = 0xff;
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
@@ -1034,6 +1033,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
+ if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ if (dev->counters[qp->port - 1] != -1) {
+ context->pri_path.counter_index =
+ dev->counters[qp->port - 1];
+ optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
+ } else
+ context->pri_path.counter_index = 0xff;
+ }
+
if (attr_mask & IB_QP_PKEY_INDEX) {
context->pri_path.pkey_index = attr->pkey_index;
optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 7bfa2a16495..3082b3b3d62 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -301,6 +301,38 @@ static int mthca_cmd_post(struct mthca_dev *dev,
return err;
}
+
+static int mthca_status_to_errno(u8 status)
+{
+ static const int trans_table[] = {
+ [MTHCA_CMD_STAT_INTERNAL_ERR] = -EIO,
+ [MTHCA_CMD_STAT_BAD_OP] = -EPERM,
+ [MTHCA_CMD_STAT_BAD_PARAM] = -EINVAL,
+ [MTHCA_CMD_STAT_BAD_SYS_STATE] = -ENXIO,
+ [MTHCA_CMD_STAT_BAD_RESOURCE] = -EBADF,
+ [MTHCA_CMD_STAT_RESOURCE_BUSY] = -EBUSY,
+ [MTHCA_CMD_STAT_DDR_MEM_ERR] = -ENOMEM,
+ [MTHCA_CMD_STAT_EXCEED_LIM] = -ENOMEM,
+ [MTHCA_CMD_STAT_BAD_RES_STATE] = -EBADF,
+ [MTHCA_CMD_STAT_BAD_INDEX] = -EBADF,
+ [MTHCA_CMD_STAT_BAD_NVMEM] = -EFAULT,
+ [MTHCA_CMD_STAT_BAD_QPEE_STATE] = -EINVAL,
+ [MTHCA_CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
+ [MTHCA_CMD_STAT_REG_BOUND] = -EBUSY,
+ [MTHCA_CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
+ [MTHCA_CMD_STAT_BAD_PKT] = -EBADMSG,
+ [MTHCA_CMD_STAT_BAD_SIZE] = -ENOMEM,
+ };
+
+ if (status >= ARRAY_SIZE(trans_table) ||
+ (status != MTHCA_CMD_STAT_OK
+ && trans_table[status] == 0))
+ return -EINVAL;
+
+ return trans_table[status];
+}
+
+
static int mthca_cmd_poll(struct mthca_dev *dev,
u64 in_param,
u64 *out_param,
@@ -308,11 +340,11 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
u32 in_modifier,
u8 op_modifier,
u16 op,
- unsigned long timeout,
- u8 *status)
+ unsigned long timeout)
{
int err = 0;
unsigned long end;
+ u8 status;
down(&dev->cmd.poll_sem);
@@ -341,7 +373,12 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
(u64) be32_to_cpu((__force __be32)
__raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
- *status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
+ status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
+ if (status) {
+ mthca_dbg(dev, "Command %02x completed with status %02x\n",
+ op, status);
+ err = mthca_status_to_errno(status);
+ }
out:
up(&dev->cmd.poll_sem);
@@ -374,8 +411,7 @@ static int mthca_cmd_wait(struct mthca_dev *dev,
u32 in_modifier,
u8 op_modifier,
u16 op,
- unsigned long timeout,
- u8 *status)
+ unsigned long timeout)
{
int err = 0;
struct mthca_cmd_context *context;
@@ -407,10 +443,11 @@ static int mthca_cmd_wait(struct mthca_dev *dev,
if (err)
goto out;
- *status = context->status;
- if (*status)
+ if (context->status) {
mthca_dbg(dev, "Command %02x completed with status %02x\n",
- op, *status);
+ op, context->status);
+ err = mthca_status_to_errno(context->status);
+ }
if (out_is_imm)
*out_param = context->out_param;
@@ -432,17 +469,16 @@ static int mthca_cmd_box(struct mthca_dev *dev,
u32 in_modifier,
u8 op_modifier,
u16 op,
- unsigned long timeout,
- u8 *status)
+ unsigned long timeout)
{
if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
return mthca_cmd_wait(dev, in_param, &out_param, 0,
in_modifier, op_modifier, op,
- timeout, status);
+ timeout);
else
return mthca_cmd_poll(dev, in_param, &out_param, 0,
in_modifier, op_modifier, op,
- timeout, status);
+ timeout);
}
/* Invoke a command with no output parameter */
@@ -451,11 +487,10 @@ static int mthca_cmd(struct mthca_dev *dev,
u32 in_modifier,
u8 op_modifier,
u16 op,
- unsigned long timeout,
- u8 *status)
+ unsigned long timeout)
{
return mthca_cmd_box(dev, in_param, 0, in_modifier,
- op_modifier, op, timeout, status);
+ op_modifier, op, timeout);
}
/*
@@ -469,17 +504,16 @@ static int mthca_cmd_imm(struct mthca_dev *dev,
u32 in_modifier,
u8 op_modifier,
u16 op,
- unsigned long timeout,
- u8 *status)
+ unsigned long timeout)
{
if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
return mthca_cmd_wait(dev, in_param, out_param, 1,
in_modifier, op_modifier, op,
- timeout, status);
+ timeout);
else
return mthca_cmd_poll(dev, in_param, out_param, 1,
in_modifier, op_modifier, op,
- timeout, status);
+ timeout);
}
int mthca_cmd_init(struct mthca_dev *dev)
@@ -596,14 +630,14 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
kfree(mailbox);
}
-int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
+int mthca_SYS_EN(struct mthca_dev *dev)
{
u64 out;
int ret;
- ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D, status);
+ ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D);
- if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR)
+ if (ret == -ENOMEM)
mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, "
"sladdr=%d, SPD source=%s\n",
(int) (out >> 6) & 0xf, (int) (out >> 4) & 3,
@@ -612,13 +646,13 @@ int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
return ret;
}
-int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
+int mthca_SYS_DIS(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C);
}
static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
- u64 virt, u8 *status)
+ u64 virt)
{
struct mthca_mailbox *mailbox;
struct mthca_icm_iter iter;
@@ -666,8 +700,8 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
if (++nent == MTHCA_MAILBOX_SIZE / 16) {
err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
- CMD_TIME_CLASS_B, status);
- if (err || *status)
+ CMD_TIME_CLASS_B);
+ if (err)
goto out;
nent = 0;
}
@@ -676,7 +710,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
if (nent)
err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
switch (op) {
case CMD_MAP_FA:
@@ -696,19 +730,19 @@ out:
return err;
}
-int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
+int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm)
{
- return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1, status);
+ return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1);
}
-int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status)
+int mthca_UNMAP_FA(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B);
}
-int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
+int mthca_RUN_FW(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A);
}
static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
@@ -737,7 +771,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
}
-int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
+int mthca_QUERY_FW(struct mthca_dev *dev)
{
struct mthca_mailbox *mailbox;
u32 *outbox;
@@ -771,7 +805,7 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
if (err)
goto out;
@@ -843,7 +877,7 @@ out:
return err;
}
-int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
+int mthca_ENABLE_LAM(struct mthca_dev *dev)
{
struct mthca_mailbox *mailbox;
u8 info;
@@ -864,14 +898,11 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
- CMD_TIME_CLASS_C, status);
+ CMD_TIME_CLASS_C);
if (err)
goto out;
- if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE)
- goto out;
-
MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET);
MTHCA_GET(dev->ddr_end, outbox, ENABLE_LAM_END_OFFSET);
MTHCA_GET(info, outbox, ENABLE_LAM_INFO_OFFSET);
@@ -896,12 +927,12 @@ out:
return err;
}
-int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status)
+int mthca_DISABLE_LAM(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C);
}
-int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
+int mthca_QUERY_DDR(struct mthca_dev *dev)
{
struct mthca_mailbox *mailbox;
u8 info;
@@ -922,7 +953,7 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
if (err)
goto out;
@@ -952,7 +983,7 @@ out:
}
int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
- struct mthca_dev_lim *dev_lim, u8 *status)
+ struct mthca_dev_lim *dev_lim)
{
struct mthca_mailbox *mailbox;
u32 *outbox;
@@ -1028,7 +1059,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
if (err)
goto out;
@@ -1232,7 +1263,7 @@ static void get_board_id(void *vsd, char *board_id)
}
int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
- struct mthca_adapter *adapter, u8 *status)
+ struct mthca_adapter *adapter)
{
struct mthca_mailbox *mailbox;
u32 *outbox;
@@ -1251,7 +1282,7 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
if (err)
goto out;
@@ -1275,8 +1306,7 @@ out:
}
int mthca_INIT_HCA(struct mthca_dev *dev,
- struct mthca_init_hca_param *param,
- u8 *status)
+ struct mthca_init_hca_param *param)
{
struct mthca_mailbox *mailbox;
__be32 *inbox;
@@ -1393,7 +1423,8 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET);
}
- err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, CMD_TIME_CLASS_D, status);
+ err = mthca_cmd(dev, mailbox->dma, 0, 0,
+ CMD_INIT_HCA, CMD_TIME_CLASS_D);
mthca_free_mailbox(dev, mailbox);
return err;
@@ -1401,7 +1432,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
int mthca_INIT_IB(struct mthca_dev *dev,
struct mthca_init_ib_param *param,
- int port, u8 *status)
+ int port)
{
struct mthca_mailbox *mailbox;
u32 *inbox;
@@ -1445,24 +1476,24 @@ int mthca_INIT_IB(struct mthca_dev *dev,
MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET);
err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
mthca_free_mailbox(dev, mailbox);
return err;
}
-int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status)
+int mthca_CLOSE_IB(struct mthca_dev *dev, int port)
{
- return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A, status);
+ return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A);
}
-int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
+int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic)
{
- return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C, status);
+ return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C);
}
int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
- int port, u8 *status)
+ int port)
{
struct mthca_mailbox *mailbox;
u32 *inbox;
@@ -1491,18 +1522,18 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET);
err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
mthca_free_mailbox(dev, mailbox);
return err;
}
-int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status)
+int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt)
{
- return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status);
+ return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt);
}
-int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
+int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt)
{
struct mthca_mailbox *mailbox;
__be64 *inbox;
@@ -1517,7 +1548,7 @@ int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status
inbox[1] = cpu_to_be64(dma_addr);
err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
mthca_free_mailbox(dev, mailbox);
@@ -1528,31 +1559,31 @@ int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status
return err;
}
-int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status)
+int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count)
{
mthca_dbg(dev, "Unmapping %d pages at %llx from ICM.\n",
page_count, (unsigned long long) virt);
- return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status);
+ return mthca_cmd(dev, virt, page_count, 0,
+ CMD_UNMAP_ICM, CMD_TIME_CLASS_B);
}
-int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
+int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm)
{
- return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1, status);
+ return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1);
}
-int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status)
+int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B);
}
-int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
- u8 *status)
+int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages)
{
- int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE,
- CMD_TIME_CLASS_A, status);
+ int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0,
+ 0, CMD_SET_ICM_SIZE, CMD_TIME_CLASS_A);
- if (ret || status)
+ if (ret)
return ret;
/*
@@ -1566,74 +1597,73 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
}
int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int mpt_index, u8 *status)
+ int mpt_index)
{
return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
}
int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int mpt_index, u8 *status)
+ int mpt_index)
{
return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
!mailbox, CMD_HW2SW_MPT,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
}
int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int num_mtt, u8 *status)
+ int num_mtt)
{
return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
}
-int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status)
+int mthca_SYNC_TPT(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B);
}
int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
- int eq_num, u8 *status)
+ int eq_num)
{
mthca_dbg(dev, "%s mask %016llx for eqn %d\n",
unmap ? "Clearing" : "Setting",
(unsigned long long) event_mask, eq_num);
return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
- 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
+ 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
}
int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int eq_num, u8 *status)
+ int eq_num)
{
return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int eq_num, u8 *status)
+ int eq_num)
{
return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
CMD_HW2SW_EQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int cq_num, u8 *status)
+ int cq_num)
{
return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int cq_num, u8 *status)
+ int cq_num)
{
return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
CMD_HW2SW_CQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
-int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size,
- u8 *status)
+int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size)
{
struct mthca_mailbox *mailbox;
__be32 *inbox;
@@ -1657,44 +1687,43 @@ int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size,
MTHCA_PUT(inbox, lkey, RESIZE_CQ_LKEY_OFFSET);
err = mthca_cmd(dev, mailbox->dma, cq_num, 1, CMD_RESIZE_CQ,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int srq_num, u8 *status)
+ int srq_num)
{
return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int srq_num, u8 *status)
+ int srq_num)
{
return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
CMD_HW2SW_SRQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
- struct mthca_mailbox *mailbox, u8 *status)
+ struct mthca_mailbox *mailbox)
{
return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
- CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status);
+ CMD_QUERY_SRQ, CMD_TIME_CLASS_A);
}
-int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
+int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit)
{
return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
}
int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
enum ib_qp_state next, u32 num, int is_ee,
- struct mthca_mailbox *mailbox, u32 optmask,
- u8 *status)
+ struct mthca_mailbox *mailbox, u32 optmask)
{
static const u16 op[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
@@ -1755,7 +1784,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
(!!is_ee << 24) | num, op_mod,
- op[cur][next], CMD_TIME_CLASS_C, status);
+ op[cur][next], CMD_TIME_CLASS_C);
if (0 && mailbox) {
int i;
@@ -1789,21 +1818,20 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
}
err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num,
- op_mod, op[cur][next], CMD_TIME_CLASS_C, status);
+ op_mod, op[cur][next], CMD_TIME_CLASS_C);
}
return err;
}
int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
- struct mthca_mailbox *mailbox, u8 *status)
+ struct mthca_mailbox *mailbox)
{
return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
- CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status);
+ CMD_QUERY_QPEE, CMD_TIME_CLASS_A);
}
-int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
- u8 *status)
+int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
{
u8 op_mod;
@@ -1825,12 +1853,12 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
}
return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
}
int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
- void *in_mad, void *response_mad, u8 *status)
+ void *in_mad, void *response_mad)
{
struct mthca_mailbox *inmailbox, *outmailbox;
void *inbox;
@@ -1897,9 +1925,9 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
in_modifier, op_modifier,
- CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
+ CMD_MAD_IFC, CMD_TIME_CLASS_C);
- if (!err && !*status)
+ if (!err)
memcpy(response_mad, outmailbox->buf, 256);
mthca_free_mailbox(dev, inmailbox);
@@ -1908,33 +1936,33 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
}
int mthca_READ_MGM(struct mthca_dev *dev, int index,
- struct mthca_mailbox *mailbox, u8 *status)
+ struct mthca_mailbox *mailbox)
{
return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
- CMD_READ_MGM, CMD_TIME_CLASS_A, status);
+ CMD_READ_MGM, CMD_TIME_CLASS_A);
}
int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
- struct mthca_mailbox *mailbox, u8 *status)
+ struct mthca_mailbox *mailbox)
{
return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- u16 *hash, u8 *status)
+ u16 *hash)
{
u64 imm;
int err;
err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
*hash = imm;
return err;
}
-int mthca_NOP(struct mthca_dev *dev, u8 *status)
+int mthca_NOP(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100), status);
+ return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100));
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index 6efd3265f24..f952244c54d 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -252,79 +252,74 @@ struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
gfp_t gfp_mask);
void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox);
-int mthca_SYS_EN(struct mthca_dev *dev, u8 *status);
-int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status);
-int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
-int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status);
-int mthca_RUN_FW(struct mthca_dev *dev, u8 *status);
-int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status);
-int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status);
-int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status);
-int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status);
+int mthca_SYS_EN(struct mthca_dev *dev);
+int mthca_SYS_DIS(struct mthca_dev *dev);
+int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm);
+int mthca_UNMAP_FA(struct mthca_dev *dev);
+int mthca_RUN_FW(struct mthca_dev *dev);
+int mthca_QUERY_FW(struct mthca_dev *dev);
+int mthca_ENABLE_LAM(struct mthca_dev *dev);
+int mthca_DISABLE_LAM(struct mthca_dev *dev);
+int mthca_QUERY_DDR(struct mthca_dev *dev);
int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
- struct mthca_dev_lim *dev_lim, u8 *status);
+ struct mthca_dev_lim *dev_lim);
int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
- struct mthca_adapter *adapter, u8 *status);
+ struct mthca_adapter *adapter);
int mthca_INIT_HCA(struct mthca_dev *dev,
- struct mthca_init_hca_param *param,
- u8 *status);
+ struct mthca_init_hca_param *param);
int mthca_INIT_IB(struct mthca_dev *dev,
struct mthca_init_ib_param *param,
- int port, u8 *status);
-int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status);
-int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status);
+ int port);
+int mthca_CLOSE_IB(struct mthca_dev *dev, int port);
+int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic);
int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
- int port, u8 *status);
-int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status);
-int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status);
-int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status);
-int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
-int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status);
-int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
- u8 *status);
+ int port);
+int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt);
+int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt);
+int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count);
+int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm);
+int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev);
+int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages);
int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int mpt_index, u8 *status);
+ int mpt_index);
int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int mpt_index, u8 *status);
+ int mpt_index);
int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int num_mtt, u8 *status);
-int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status);
+ int num_mtt);
+int mthca_SYNC_TPT(struct mthca_dev *dev);
int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
- int eq_num, u8 *status);
+ int eq_num);
int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int eq_num, u8 *status);
+ int eq_num);
int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int eq_num, u8 *status);
+ int eq_num);
int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int cq_num, u8 *status);
+ int cq_num);
int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int cq_num, u8 *status);
-int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size,
- u8 *status);
+ int cq_num);
+int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size);
int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int srq_num, u8 *status);
+ int srq_num);
int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int srq_num, u8 *status);
+ int srq_num);
int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
- struct mthca_mailbox *mailbox, u8 *status);
-int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
+ struct mthca_mailbox *mailbox);
+int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit);
int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
enum ib_qp_state next, u32 num, int is_ee,
- struct mthca_mailbox *mailbox, u32 optmask,
- u8 *status);
+ struct mthca_mailbox *mailbox, u32 optmask);
int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
- struct mthca_mailbox *mailbox, u8 *status);
-int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
- u8 *status);
+ struct mthca_mailbox *mailbox);
+int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn);
int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
- void *in_mad, void *response_mad, u8 *status);
+ void *in_mad, void *response_mad);
int mthca_READ_MGM(struct mthca_dev *dev, int index,
- struct mthca_mailbox *mailbox, u8 *status);
+ struct mthca_mailbox *mailbox);
int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
- struct mthca_mailbox *mailbox, u8 *status);
+ struct mthca_mailbox *mailbox);
int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- u16 *hash, u8 *status);
-int mthca_NOP(struct mthca_dev *dev, u8 *status);
+ u16 *hash);
+int mthca_NOP(struct mthca_dev *dev);
#endif /* MTHCA_CMD_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 18ee3fa4b88..53157b86a1b 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -779,7 +779,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
struct mthca_mailbox *mailbox;
struct mthca_cq_context *cq_context;
int err = -ENOMEM;
- u8 status;
cq->ibcq.cqe = nent - 1;
cq->is_kernel = !ctx;
@@ -847,19 +846,12 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq_context->state_db = cpu_to_be32(cq->arm_db_index);
}
- err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
+ err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn);
if (err) {
mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
goto err_out_free_mr;
}
- if (status) {
- mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto err_out_free_mr;
- }
-
spin_lock_irq(&dev->cq_table.lock);
if (mthca_array_set(&dev->cq_table.cq,
cq->cqn & (dev->limits.num_cqs - 1),
@@ -915,7 +907,6 @@ void mthca_free_cq(struct mthca_dev *dev,
{
struct mthca_mailbox *mailbox;
int err;
- u8 status;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
@@ -923,11 +914,9 @@ void mthca_free_cq(struct mthca_dev *dev,
return;
}
- err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
+ err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn);
if (err)
mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
- else if (status)
- mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
if (0) {
__be32 *ctx = mailbox->buf;
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 76785c653c1..7c9d35f39d7 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -474,7 +474,6 @@ static int mthca_create_eq(struct mthca_dev *dev,
struct mthca_eq_context *eq_context;
int err = -ENOMEM;
int i;
- u8 status;
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
@@ -543,15 +542,9 @@ static int mthca_create_eq(struct mthca_dev *dev,
eq_context->intr = intr;
eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
- err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
+ err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn);
if (err) {
- mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);
- goto err_out_free_mr;
- }
- if (status) {
- mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",
- status);
- err = -EINVAL;
+ mthca_warn(dev, "SW2HW_EQ returned %d\n", err);
goto err_out_free_mr;
}
@@ -597,7 +590,6 @@ static void mthca_free_eq(struct mthca_dev *dev,
{
struct mthca_mailbox *mailbox;
int err;
- u8 status;
int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
PAGE_SIZE;
int i;
@@ -606,11 +598,9 @@ static void mthca_free_eq(struct mthca_dev *dev,
if (IS_ERR(mailbox))
return;
- err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
+ err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn);
if (err)
- mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);
- if (status)
- mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);
+ mthca_warn(dev, "HW2SW_EQ returned %d\n", err);
dev->eq_table.arm_mask &= ~eq->eqn_mask;
@@ -738,7 +728,6 @@ static void mthca_unmap_eq_regs(struct mthca_dev *dev)
int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
{
int ret;
- u8 status;
/*
* We assume that mapping one page is enough for the whole EQ
@@ -757,9 +746,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
return -ENOMEM;
}
- ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);
- if (!ret && status)
- ret = -EINVAL;
+ ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
if (ret) {
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
@@ -771,9 +758,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
void mthca_unmap_eq_icm(struct mthca_dev *dev)
{
- u8 status;
-
- mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);
+ mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page);
@@ -782,7 +767,6 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev)
int mthca_init_eq_table(struct mthca_dev *dev)
{
int err;
- u8 status;
u8 intr;
int i;
@@ -864,22 +848,16 @@ int mthca_init_eq_table(struct mthca_dev *dev)
}
err = mthca_MAP_EQ(dev, async_mask(dev),
- 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
+ 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
if (err)
mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
- if (status)
- mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",
- dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);
err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
- 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
+ 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
if (err)
mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
- if (status)
- mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
- dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
for (i = 0; i < MTHCA_NUM_EQ; ++i)
if (mthca_is_memfree(dev))
@@ -909,15 +887,14 @@ err_out_free:
void mthca_cleanup_eq_table(struct mthca_dev *dev)
{
- u8 status;
int i;
mthca_free_irqs(dev);
mthca_MAP_EQ(dev, async_mask(dev),
- 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
+ 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
- 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
+ 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
for (i = 0; i < MTHCA_NUM_EQ; ++i)
mthca_free_eq(dev, &dev->eq_table.eq[i]);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 03a59534f59..b6f7f457fc5 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -201,7 +201,6 @@ int mthca_process_mad(struct ib_device *ibdev,
struct ib_mad *out_mad)
{
int err;
- u8 status;
u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0;
struct ib_port_attr pattr;
@@ -252,17 +251,11 @@ int mthca_process_mad(struct ib_device *ibdev,
err = mthca_MAD_IFC(to_mdev(ibdev),
mad_flags & IB_MAD_IGNORE_MKEY,
mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad,
- &status);
- if (err) {
- mthca_err(to_mdev(ibdev), "MAD_IFC failed\n");
- return IB_MAD_RESULT_FAILURE;
- }
- if (status == MTHCA_CMD_STAT_BAD_PKT)
+ port_num, in_wc, in_grh, in_mad, out_mad);
+ if (err == -EBADMSG)
return IB_MAD_RESULT_SUCCESS;
- if (status) {
- mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n",
- status);
+ else if (err) {
+ mthca_err(to_mdev(ibdev), "MAD_IFC returned %d\n", err);
return IB_MAD_RESULT_FAILURE;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index f24b79b805f..aa12a533ae9 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -149,7 +149,7 @@ static int mthca_tune_pci(struct mthca_dev *mdev)
} else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
- if (pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP)) {
+ if (pci_is_pcie(mdev->pdev)) {
if (pcie_set_readrq(mdev->pdev, 4096)) {
mthca_err(mdev, "Couldn't write PCI Express read request, "
"aborting.\n");
@@ -165,19 +165,14 @@ static int mthca_tune_pci(struct mthca_dev *mdev)
static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
{
int err;
- u8 status;
mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
- err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
+ err = mthca_QUERY_DEV_LIM(mdev, dev_lim);
if (err) {
- mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
+ mthca_err(mdev, "QUERY_DEV_LIM command returned %d"
+ ", aborting.\n", err);
return err;
}
- if (status) {
- mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, "
- "aborting.\n", status);
- return -EINVAL;
- }
if (dev_lim->min_page_sz > PAGE_SIZE) {
mthca_err(mdev, "HCA minimum page size of %d bigger than "
"kernel PAGE_SIZE of %ld, aborting.\n",
@@ -293,49 +288,32 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
static int mthca_init_tavor(struct mthca_dev *mdev)
{
s64 size;
- u8 status;
int err;
struct mthca_dev_lim dev_lim;
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
- err = mthca_SYS_EN(mdev, &status);
+ err = mthca_SYS_EN(mdev);
if (err) {
- mthca_err(mdev, "SYS_EN command failed, aborting.\n");
+ mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err);
return err;
}
- if (status) {
- mthca_err(mdev, "SYS_EN returned status 0x%02x, "
- "aborting.\n", status);
- return -EINVAL;
- }
- err = mthca_QUERY_FW(mdev, &status);
+ err = mthca_QUERY_FW(mdev);
if (err) {
- mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
- goto err_disable;
- }
- if (status) {
- mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "QUERY_FW command returned %d,"
+ " aborting.\n", err);
goto err_disable;
}
- err = mthca_QUERY_DDR(mdev, &status);
+ err = mthca_QUERY_DDR(mdev);
if (err) {
- mthca_err(mdev, "QUERY_DDR command failed, aborting.\n");
- goto err_disable;
- }
- if (status) {
- mthca_err(mdev, "QUERY_DDR returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err);
goto err_disable;
}
err = mthca_dev_lim(mdev, &dev_lim);
if (err) {
- mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
+ mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err);
goto err_disable;
}
@@ -351,29 +329,22 @@ static int mthca_init_tavor(struct mthca_dev *mdev)
goto err_disable;
}
- err = mthca_INIT_HCA(mdev, &init_hca, &status);
+ err = mthca_INIT_HCA(mdev, &init_hca);
if (err) {
- mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
- goto err_disable;
- }
- if (status) {
- mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
goto err_disable;
}
return 0;
err_disable:
- mthca_SYS_DIS(mdev, &status);
+ mthca_SYS_DIS(mdev);
return err;
}
static int mthca_load_fw(struct mthca_dev *mdev)
{
- u8 status;
int err;
/* FIXME: use HCA-attached memory for FW if present */
@@ -386,31 +357,21 @@ static int mthca_load_fw(struct mthca_dev *mdev)
return -ENOMEM;
}
- err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status);
+ err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm);
if (err) {
- mthca_err(mdev, "MAP_FA command failed, aborting.\n");
- goto err_free;
- }
- if (status) {
- mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err);
goto err_free;
}
- err = mthca_RUN_FW(mdev, &status);
+ err = mthca_RUN_FW(mdev);
if (err) {
- mthca_err(mdev, "RUN_FW command failed, aborting.\n");
- goto err_unmap_fa;
- }
- if (status) {
- mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err);
goto err_unmap_fa;
}
return 0;
err_unmap_fa:
- mthca_UNMAP_FA(mdev, &status);
+ mthca_UNMAP_FA(mdev);
err_free:
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
@@ -423,19 +384,13 @@ static int mthca_init_icm(struct mthca_dev *mdev,
u64 icm_size)
{
u64 aux_pages;
- u8 status;
int err;
- err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status);
+ err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
if (err) {
- mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n");
+ mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err);
return err;
}
- if (status) {
- mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, "
- "aborting.\n", status);
- return -EINVAL;
- }
mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
(unsigned long long) icm_size >> 10,
@@ -448,14 +403,9 @@ static int mthca_init_icm(struct mthca_dev *mdev,
return -ENOMEM;
}
- err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status);
+ err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm);
if (err) {
- mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n");
- goto err_free_aux;
- }
- if (status) {
- mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err);
goto err_free_aux;
}
@@ -596,7 +546,7 @@ err_unmap_eq:
mthca_unmap_eq_icm(mdev);
err_unmap_aux:
- mthca_UNMAP_ICM_AUX(mdev, &status);
+ mthca_UNMAP_ICM_AUX(mdev);
err_free_aux:
mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
@@ -606,7 +556,6 @@ err_free_aux:
static void mthca_free_icms(struct mthca_dev *mdev)
{
- u8 status;
mthca_free_icm_table(mdev, mdev->mcg_table.table);
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
@@ -619,7 +568,7 @@ static void mthca_free_icms(struct mthca_dev *mdev)
mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
mthca_unmap_eq_icm(mdev);
- mthca_UNMAP_ICM_AUX(mdev, &status);
+ mthca_UNMAP_ICM_AUX(mdev);
mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
}
@@ -629,43 +578,32 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
s64 icm_size;
- u8 status;
int err;
- err = mthca_QUERY_FW(mdev, &status);
+ err = mthca_QUERY_FW(mdev);
if (err) {
- mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
+ mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err);
return err;
}
- if (status) {
- mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
- "aborting.\n", status);
- return -EINVAL;
- }
- err = mthca_ENABLE_LAM(mdev, &status);
- if (err) {
- mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n");
- return err;
- }
- if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
+ err = mthca_ENABLE_LAM(mdev);
+ if (err == -EAGAIN) {
mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
- } else if (status) {
- mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, "
- "aborting.\n", status);
- return -EINVAL;
+ } else if (err) {
+ mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err);
+ return err;
}
err = mthca_load_fw(mdev);
if (err) {
- mthca_err(mdev, "Failed to start FW, aborting.\n");
+ mthca_err(mdev, "Loading FW returned %d, aborting.\n", err);
goto err_disable;
}
err = mthca_dev_lim(mdev, &dev_lim);
if (err) {
- mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
+ mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err);
goto err_stop_fw;
}
@@ -685,15 +623,9 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
if (err)
goto err_stop_fw;
- err = mthca_INIT_HCA(mdev, &init_hca, &status);
+ err = mthca_INIT_HCA(mdev, &init_hca);
if (err) {
- mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
- goto err_free_icm;
- }
- if (status) {
- mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
goto err_free_icm;
}
@@ -703,37 +635,34 @@ err_free_icm:
mthca_free_icms(mdev);
err_stop_fw:
- mthca_UNMAP_FA(mdev, &status);
+ mthca_UNMAP_FA(mdev);
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
err_disable:
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
- mthca_DISABLE_LAM(mdev, &status);
+ mthca_DISABLE_LAM(mdev);
return err;
}
static void mthca_close_hca(struct mthca_dev *mdev)
{
- u8 status;
-
- mthca_CLOSE_HCA(mdev, 0, &status);
+ mthca_CLOSE_HCA(mdev, 0);
if (mthca_is_memfree(mdev)) {
mthca_free_icms(mdev);
- mthca_UNMAP_FA(mdev, &status);
+ mthca_UNMAP_FA(mdev);
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
- mthca_DISABLE_LAM(mdev, &status);
+ mthca_DISABLE_LAM(mdev);
} else
- mthca_SYS_DIS(mdev, &status);
+ mthca_SYS_DIS(mdev);
}
static int mthca_init_hca(struct mthca_dev *mdev)
{
- u8 status;
int err;
struct mthca_adapter adapter;
@@ -745,15 +674,9 @@ static int mthca_init_hca(struct mthca_dev *mdev)
if (err)
return err;
- err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
+ err = mthca_QUERY_ADAPTER(mdev, &adapter);
if (err) {
- mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
- goto err_close;
- }
- if (status) {
- mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err);
goto err_close;
}
@@ -772,7 +695,6 @@ err_close:
static int mthca_setup_hca(struct mthca_dev *dev)
{
int err;
- u8 status;
MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
@@ -833,8 +755,8 @@ static int mthca_setup_hca(struct mthca_dev *dev)
goto err_eq_table_free;
}
- err = mthca_NOP(dev, &status);
- if (err || status) {
+ err = mthca_NOP(dev);
+ if (err) {
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
mthca_warn(dev, "NOP command failed to generate interrupt "
"(IRQ %d).\n",
@@ -1166,7 +1088,6 @@ err_disable_pdev:
static void __mthca_remove_one(struct pci_dev *pdev)
{
struct mthca_dev *mdev = pci_get_drvdata(pdev);
- u8 status;
int p;
if (mdev) {
@@ -1174,7 +1095,7 @@ static void __mthca_remove_one(struct pci_dev *pdev)
mthca_unregister_device(mdev);
for (p = 1; p <= mdev->limits.num_ports; ++p)
- mthca_CLOSE_IB(mdev, p, &status);
+ mthca_CLOSE_IB(mdev, p);
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 515790a606e..6304ae8f4a6 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -68,7 +68,6 @@ static int find_mgm(struct mthca_dev *dev,
struct mthca_mgm *mgm = mgm_mailbox->buf;
u8 *mgid;
int err;
- u8 status;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
@@ -77,12 +76,9 @@ static int find_mgm(struct mthca_dev *dev,
memcpy(mgid, gid, 16);
- err = mthca_MGID_HASH(dev, mailbox, hash, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "MGID_HASH returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_MGID_HASH(dev, mailbox, hash);
+ if (err) {
+ mthca_err(dev, "MGID_HASH failed (%d)\n", err);
goto out;
}
@@ -93,12 +89,9 @@ static int find_mgm(struct mthca_dev *dev,
*prev = -1;
do {
- err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "READ_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_READ_MGM(dev, *index, mgm_mailbox);
+ if (err) {
+ mthca_err(dev, "READ_MGM failed (%d)\n", err);
goto out;
}
@@ -134,7 +127,6 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int link = 0;
int i;
int err;
- u8 status;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
@@ -160,12 +152,9 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out;
}
- err = mthca_READ_MGM(dev, index, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "READ_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_READ_MGM(dev, index, mailbox);
+ if (err) {
+ mthca_err(dev, "READ_MGM failed (%d)\n", err);
goto out;
}
memset(mgm, 0, sizeof *mgm);
@@ -189,11 +178,9 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out;
}
- err = mthca_WRITE_MGM(dev, index, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
+ err = mthca_WRITE_MGM(dev, index, mailbox);
+ if (err) {
+ mthca_err(dev, "WRITE_MGM failed %d\n", err);
err = -EINVAL;
goto out;
}
@@ -201,24 +188,17 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (!link)
goto out;
- err = mthca_READ_MGM(dev, prev, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "READ_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_READ_MGM(dev, prev, mailbox);
+ if (err) {
+ mthca_err(dev, "READ_MGM failed %d\n", err);
goto out;
}
mgm->next_gid_index = cpu_to_be32(index << 6);
- err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
+ err = mthca_WRITE_MGM(dev, prev, mailbox);
if (err)
- goto out;
- if (status) {
- mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
- err = -EINVAL;
- }
+ mthca_err(dev, "WRITE_MGM returned %d\n", err);
out:
if (err && link && index != -1) {
@@ -240,7 +220,6 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int prev, index;
int i, loc;
int err;
- u8 status;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
@@ -275,12 +254,9 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
mgm->qp[loc] = mgm->qp[i - 1];
mgm->qp[i - 1] = 0;
- err = mthca_WRITE_MGM(dev, index, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_WRITE_MGM(dev, index, mailbox);
+ if (err) {
+ mthca_err(dev, "WRITE_MGM returned %d\n", err);
goto out;
}
@@ -292,24 +268,17 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6;
if (amgm_index_to_free) {
err = mthca_READ_MGM(dev, amgm_index_to_free,
- mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "READ_MGM returned status %02x\n",
- status);
- err = -EINVAL;
+ mailbox);
+ if (err) {
+ mthca_err(dev, "READ_MGM returned %d\n", err);
goto out;
}
} else
memset(mgm->gid, 0, 16);
- err = mthca_WRITE_MGM(dev, index, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_WRITE_MGM(dev, index, mailbox);
+ if (err) {
+ mthca_err(dev, "WRITE_MGM returned %d\n", err);
goto out;
}
if (amgm_index_to_free) {
@@ -319,23 +288,17 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
} else {
/* Remove entry from AMGM */
int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
- err = mthca_READ_MGM(dev, prev, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "READ_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_READ_MGM(dev, prev, mailbox);
+ if (err) {
+ mthca_err(dev, "READ_MGM returned %d\n", err);
goto out;
}
mgm->next_gid_index = cpu_to_be32(curr_next_index << 6);
- err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_WRITE_MGM(dev, prev, mailbox);
+ if (err) {
+ mthca_err(dev, "WRITE_MGM returned %d\n", err);
goto out;
}
BUG_ON(index < dev->limits.num_mgms);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 8c2a83732b5..7d2e42dd692 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -223,7 +223,6 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
{
int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
int ret = 0;
- u8 status;
mutex_lock(&table->mutex);
@@ -240,8 +239,8 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
goto out;
}
- if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
- &status) || status) {
+ if (mthca_MAP_ICM(dev, table->icm[i],
+ table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
mthca_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
ret = -ENOMEM;
@@ -258,7 +257,6 @@ out:
void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
{
int i;
- u8 status;
if (!mthca_is_memfree(dev))
return;
@@ -269,8 +267,7 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o
if (--table->icm[i]->refcount == 0) {
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
- MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
- &status);
+ MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
mthca_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
}
@@ -366,7 +363,6 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
int num_icm;
unsigned chunk_size;
int i;
- u8 status;
obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
@@ -396,8 +392,8 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
__GFP_NOWARN, use_coherent);
if (!table->icm[i])
goto err;
- if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
- &status) || status) {
+ if (mthca_MAP_ICM(dev, table->icm[i],
+ virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
mthca_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
goto err;
@@ -416,8 +412,7 @@ err:
for (i = 0; i < num_icm; ++i)
if (table->icm[i]) {
mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
- MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
- &status);
+ MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
mthca_free_icm(dev, table->icm[i], table->coherent);
}
@@ -429,13 +424,12 @@ err:
void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
{
int i;
- u8 status;
for (i = 0; i < table->num_icm; ++i)
if (table->icm[i]) {
- mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
- MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
- &status);
+ mthca_UNMAP_ICM(dev,
+ table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
+ MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
mthca_free_icm(dev, table->icm[i], table->coherent);
}
@@ -454,7 +448,6 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
{
struct page *pages[1];
int ret = 0;
- u8 status;
int i;
if (!mthca_is_memfree(dev))
@@ -494,9 +487,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
}
ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
- mthca_uarc_virt(dev, uar, i), &status);
- if (!ret && status)
- ret = -EINVAL;
+ mthca_uarc_virt(dev, uar, i));
if (ret) {
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
put_page(sg_page(&db_tab->page[i].mem));
@@ -557,14 +548,13 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
struct mthca_user_db_table *db_tab)
{
int i;
- u8 status;
if (!mthca_is_memfree(dev))
return;
for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
if (db_tab->page[i].uvirt) {
- mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
+ mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
put_page(sg_page(&db_tab->page[i].mem));
}
@@ -581,7 +571,6 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
int i, j;
struct mthca_db_page *page;
int ret = 0;
- u8 status;
mutex_lock(&dev->db_tab->mutex);
@@ -644,9 +633,7 @@ alloc:
memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
ret = mthca_MAP_ICM_page(dev, page->mapping,
- mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
- if (!ret && status)
- ret = -EINVAL;
+ mthca_uarc_virt(dev, &dev->driver_uar, i));
if (ret) {
dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
page->db_rec, page->mapping);
@@ -678,7 +665,6 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
{
int i, j;
struct mthca_db_page *page;
- u8 status;
i = db_index / MTHCA_DB_REC_PER_PAGE;
j = db_index % MTHCA_DB_REC_PER_PAGE;
@@ -694,7 +680,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
i >= dev->db_tab->max_group1 - 1) {
- mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
+ mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
page->db_rec, page->mapping);
@@ -745,7 +731,6 @@ int mthca_init_db_tab(struct mthca_dev *dev)
void mthca_cleanup_db_tab(struct mthca_dev *dev)
{
int i;
- u8 status;
if (!mthca_is_memfree(dev))
return;
@@ -763,7 +748,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
- mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
+ mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
dev->db_tab->page[i].db_rec,
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 44045c8846d..ab876f928a1 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -257,7 +257,6 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
struct mthca_mailbox *mailbox;
__be64 *mtt_entry;
int err = 0;
- u8 status;
int i;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -281,17 +280,11 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
if (i & 1)
mtt_entry[i + 2] = 0;
- err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status);
+ err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1);
if (err) {
mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
goto out;
}
- if (status) {
- mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto out;
- }
list_len -= i;
start_index += i;
@@ -441,7 +434,6 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
u32 key;
int i;
int err;
- u8 status;
WARN_ON(buffer_size_shift >= 32);
@@ -497,16 +489,10 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
}
err = mthca_SW2HW_MPT(dev, mailbox,
- key & (dev->limits.num_mpts - 1),
- &status);
+ key & (dev->limits.num_mpts - 1));
if (err) {
mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_out_mailbox;
- } else if (status) {
- mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto err_out_mailbox;
}
mthca_free_mailbox(dev, mailbox);
@@ -567,17 +553,12 @@ static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
{
int err;
- u8 status;
err = mthca_HW2SW_MPT(dev, NULL,
key_to_hw_index(dev, mr->ibmr.lkey) &
- (dev->limits.num_mpts - 1),
- &status);
+ (dev->limits.num_mpts - 1));
if (err)
mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
- else if (status)
- mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n",
- status);
mthca_free_region(dev, mr->ibmr.lkey);
mthca_free_mtt(dev, mr->mtt);
@@ -590,7 +571,6 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
struct mthca_mailbox *mailbox;
u64 mtt_seg;
u32 key, idx;
- u8 status;
int list_len = mr->attr.max_pages;
int err = -ENOMEM;
int i;
@@ -672,18 +652,11 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
}
err = mthca_SW2HW_MPT(dev, mailbox,
- key & (dev->limits.num_mpts - 1),
- &status);
+ key & (dev->limits.num_mpts - 1));
if (err) {
mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_out_mailbox_free;
}
- if (status) {
- mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto err_out_mailbox_free;
- }
mthca_free_mailbox(dev, mailbox);
return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1e0b4b6074a..365fe0e1419 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -63,8 +63,6 @@ static int mthca_query_device(struct ib_device *ibdev,
int err = -ENOMEM;
struct mthca_dev *mdev = to_mdev(ibdev);
- u8 status;
-
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
@@ -78,14 +76,9 @@ static int mthca_query_device(struct ib_device *ibdev,
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(mdev, 1, 1,
- 1, NULL, NULL, in_mad, out_mad,
- &status);
+ 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
props->device_cap_flags = mdev->device_cap_flags;
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
@@ -141,7 +134,6 @@ static int mthca_query_port(struct ib_device *ibdev,
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
- u8 status;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -155,14 +147,9 @@ static int mthca_query_port(struct ib_device *ibdev,
in_mad->attr_mod = cpu_to_be32(port);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
- port, NULL, NULL, in_mad, out_mad,
- &status);
+ port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
props->lmc = out_mad->data[34] & 0x7;
@@ -214,7 +201,6 @@ static int mthca_modify_port(struct ib_device *ibdev,
struct mthca_set_ib_param set_ib;
struct ib_port_attr attr;
int err;
- u8 status;
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
return -ERESTARTSYS;
@@ -229,14 +215,9 @@ static int mthca_modify_port(struct ib_device *ibdev,
set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
~props->clr_port_cap_mask;
- err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
+ err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
-
out:
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
return err;
@@ -248,7 +229,6 @@ static int mthca_query_pkey(struct ib_device *ibdev,
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
- u8 status;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -260,14 +240,9 @@ static int mthca_query_pkey(struct ib_device *ibdev,
in_mad->attr_mod = cpu_to_be32(index / 32);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
- port, NULL, NULL, in_mad, out_mad,
- &status);
+ port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
@@ -283,7 +258,6 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
- u8 status;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -295,14 +269,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
in_mad->attr_mod = cpu_to_be32(port);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
- port, NULL, NULL, in_mad, out_mad,
- &status);
+ port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
memcpy(gid->raw, out_mad->data + 8, 8);
@@ -311,14 +280,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
in_mad->attr_mod = cpu_to_be32(index / 8);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
- port, NULL, NULL, in_mad, out_mad,
- &status);
+ port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
@@ -800,7 +764,6 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
struct mthca_cq *cq = to_mcq(ibcq);
struct mthca_resize_cq ucmd;
u32 lkey;
- u8 status;
int ret;
if (entries < 1 || entries > dev->limits.max_cqes)
@@ -827,9 +790,7 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
lkey = ucmd.lkey;
}
- ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status);
- if (status)
- ret = -EINVAL;
+ ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
if (ret) {
if (cq->resize_buf) {
@@ -1161,7 +1122,6 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
{
struct ib_fmr *fmr;
int err;
- u8 status;
struct mthca_dev *mdev = NULL;
list_for_each_entry(fmr, fmr_list, list) {
@@ -1182,12 +1142,8 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
list_for_each_entry(fmr, fmr_list, list)
mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
- err = mthca_SYNC_TPT(mdev, &status);
- if (err)
- return err;
- if (status)
- return -EINVAL;
- return 0;
+ err = mthca_SYNC_TPT(mdev);
+ return err;
}
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
@@ -1253,7 +1209,6 @@ static int mthca_init_node_data(struct mthca_dev *dev)
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
- u8 status;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -1264,28 +1219,18 @@ static int mthca_init_node_data(struct mthca_dev *dev)
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mthca_MAD_IFC(dev, 1, 1,
- 1, NULL, NULL, in_mad, out_mad,
- &status);
+ 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(dev, 1, 1,
- 1, NULL, NULL, in_mad, out_mad,
- &status);
+ 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
if (mthca_is_memfree(dev))
dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index a34c9d38e82..9601049e14d 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -308,7 +308,6 @@ static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
static void init_port(struct mthca_dev *dev, int port)
{
int err;
- u8 status;
struct mthca_init_ib_param param;
memset(&param, 0, sizeof param);
@@ -319,11 +318,9 @@ static void init_port(struct mthca_dev *dev, int port)
param.gid_cap = dev->limits.gid_table_len;
param.pkey_cap = dev->limits.pkey_table_len;
- err = mthca_INIT_IB(dev, &param, port, &status);
+ err = mthca_INIT_IB(dev, &param, port);
if (err)
mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
- if (status)
- mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
}
static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
@@ -433,7 +430,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
struct mthca_qp_param *qp_param;
struct mthca_qp_context *context;
int mthca_state;
- u8 status;
mutex_lock(&qp->mutex);
@@ -448,12 +444,9 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
goto out;
}
- err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
- if (err)
- goto out_mailbox;
- if (status) {
- mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox);
+ if (err) {
+ mthca_warn(dev, "QUERY_QP failed (%d)\n", err);
goto out_mailbox;
}
@@ -555,7 +548,6 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
struct mthca_qp_param *qp_param;
struct mthca_qp_context *qp_context;
u32 sqd_event = 0;
- u8 status;
int err = -EINVAL;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -781,13 +773,10 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
sqd_event = 1 << 31;
err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
- mailbox, sqd_event, &status);
- if (err)
- goto out_mailbox;
- if (status) {
- mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
- cur_state, new_state, status);
- err = -EINVAL;
+ mailbox, sqd_event);
+ if (err) {
+ mthca_warn(dev, "modify QP %d->%d returned %d.\n",
+ cur_state, new_state, err);
goto out_mailbox;
}
@@ -817,7 +806,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
cur_state != IB_QPS_ERR &&
(new_state == IB_QPS_RESET ||
new_state == IB_QPS_ERR))
- mthca_CLOSE_IB(dev, qp->port, &status);
+ mthca_CLOSE_IB(dev, qp->port);
}
/*
@@ -1429,7 +1418,6 @@ static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
void mthca_free_qp(struct mthca_dev *dev,
struct mthca_qp *qp)
{
- u8 status;
struct mthca_cq *send_cq;
struct mthca_cq *recv_cq;
@@ -1454,7 +1442,7 @@ void mthca_free_qp(struct mthca_dev *dev,
if (qp->state != IB_QPS_RESET)
mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
- NULL, 0, &status);
+ NULL, 0);
/*
* If this is a userspace QP, the buffers, MR, CQs and so on
@@ -2263,7 +2251,6 @@ void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
int mthca_init_qp_table(struct mthca_dev *dev)
{
int err;
- u8 status;
int i;
spin_lock_init(&dev->qp_table.lock);
@@ -2290,15 +2277,10 @@ int mthca_init_qp_table(struct mthca_dev *dev)
for (i = 0; i < 2; ++i) {
err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
- dev->qp_table.sqp_start + i * 2,
- &status);
- if (err)
- goto err_out;
- if (status) {
+ dev->qp_table.sqp_start + i * 2);
+ if (err) {
mthca_warn(dev, "CONF_SPECIAL_QP returned "
- "status %02x, aborting.\n",
- status);
- err = -EINVAL;
+ "%d, aborting.\n", err);
goto err_out;
}
}
@@ -2306,7 +2288,7 @@ int mthca_init_qp_table(struct mthca_dev *dev)
err_out:
for (i = 0; i < 2; ++i)
- mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
+ mthca_CONF_SPECIAL_QP(dev, i, 0);
mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
mthca_alloc_cleanup(&dev->qp_table.alloc);
@@ -2317,10 +2299,9 @@ int mthca_init_qp_table(struct mthca_dev *dev)
void mthca_cleanup_qp_table(struct mthca_dev *dev)
{
int i;
- u8 status;
for (i = 0; i < 2; ++i)
- mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
+ mthca_CONF_SPECIAL_QP(dev, i, 0);
mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
mthca_alloc_cleanup(&dev->qp_table.alloc);
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 2a13a163d33..4fa3534ec23 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -113,7 +113,7 @@ int mthca_reset(struct mthca_dev *mdev)
}
hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
- hca_pcie_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP);
+ hca_pcie_cap = pci_pcie_cap(mdev->pdev);
if (bridge) {
bridge_header = kmalloc(256, GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 4fabe62aab8..d22f970480c 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -200,7 +200,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
struct ib_srq_attr *attr, struct mthca_srq *srq)
{
struct mthca_mailbox *mailbox;
- u8 status;
int ds;
int err;
@@ -266,18 +265,12 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
else
mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
- err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
+ err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
if (err) {
mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
goto err_out_free_buf;
}
- if (status) {
- mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto err_out_free_buf;
- }
spin_lock_irq(&dev->srq_table.lock);
if (mthca_array_set(&dev->srq_table.srq,
@@ -299,11 +292,9 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
return 0;
err_out_free_srq:
- err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
+ err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
if (err)
mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
- else if (status)
- mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
err_out_free_buf:
if (!pd->ibpd.uobject)
@@ -340,7 +331,6 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
{
struct mthca_mailbox *mailbox;
int err;
- u8 status;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
@@ -348,11 +338,9 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
return;
}
- err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
+ err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
if (err)
mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
- else if (status)
- mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
spin_lock_irq(&dev->srq_table.lock);
mthca_array_clear(&dev->srq_table.srq,
@@ -378,8 +366,7 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
- int ret;
- u8 status;
+ int ret = 0;
/* We don't support resizing SRQs (yet?) */
if (attr_mask & IB_SRQ_MAX_WR)
@@ -391,16 +378,11 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return -EINVAL;
mutex_lock(&srq->mutex);
- ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
+ ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit);
mutex_unlock(&srq->mutex);
-
- if (ret)
- return ret;
- if (status)
- return -EINVAL;
}
- return 0;
+ return ret;
}
int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
@@ -410,14 +392,13 @@ int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
struct mthca_mailbox *mailbox;
struct mthca_arbel_srq_context *arbel_ctx;
struct mthca_tavor_srq_context *tavor_ctx;
- u8 status;
int err;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
+ err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox);
if (err)
goto out;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index e74cdf9ef47..c118663e443 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -34,7 +34,7 @@
#define TCPOPT_TIMESTAMP 8
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -1151,7 +1151,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
}
if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
- neigh_event_send(rt->dst.neighbour, NULL);
+ neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
ip_rt_put(rt);
return rc;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 96fa9a4cafd..be36cbeae63 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2917,24 +2917,19 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
goto skip_rx_indicate0;
- if ((cqe_misc & NES_NIC_CQE_TAG_VALID) &&
- (nesvnic->vlan_grp != NULL)) {
+ if (cqe_misc & NES_NIC_CQE_TAG_VALID) {
vlan_tag = (u16)(le32_to_cpu(
cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX])
>> 16);
nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
nesvnic->netdev->name, vlan_tag);
- if (nes_use_lro)
- lro_vlan_hwaccel_receive_skb(&nesvnic->lro_mgr, rx_skb,
- nesvnic->vlan_grp, vlan_tag, NULL);
- else
- nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag);
- } else {
- if (nes_use_lro)
- lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
- else
- nes_netif_rx(rx_skb);
+
+ __vlan_hwaccel_put_tag(rx_skb, vlan_tag);
}
+ if (nes_use_lro)
+ lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
+ else
+ netif_receive_skb(rx_skb);
skip_rx_indicate0:
;
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 91594116f94..c3241479ec0 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1211,7 +1211,6 @@ struct nes_vnic {
/* void *mem; */
struct nes_device *nesdev;
struct net_device *netdev;
- struct vlan_group *vlan_grp;
atomic_t rx_skbs_needed;
atomic_t rx_skb_timer_running;
int budget;
@@ -1357,7 +1356,4 @@ struct nes_terminate_hdr {
#define NES_LINK_RECHECK_DELAY msecs_to_jiffies(50)
#define NES_LINK_RECHECK_MAX 60
-#define nes_vlan_rx vlan_hwaccel_receive_skb
-#define nes_netif_rx netif_receive_skb
-
#endif /* __NES_HW_H */
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index d3a1c41cfd2..9d7ffebff21 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1584,23 +1584,19 @@ static const struct ethtool_ops nes_ethtool_ops = {
.set_pauseparam = nes_netdev_set_pauseparam,
};
-
-static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, u32 features)
{
- struct nes_vnic *nesvnic = netdev_priv(netdev);
- struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
unsigned long flags;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
- nesvnic->vlan_grp = grp;
nes_debug(NES_DBG_NETDEV, "%s: %s\n", __func__, netdev->name);
/* Enable/Disable VLAN Stripping */
u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
- if (grp)
+ if (features & NETIF_F_HW_VLAN_RX)
u32temp &= 0xfdffffff;
else
u32temp |= 0x02000000;
@@ -1609,17 +1605,44 @@ static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_g
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
+static u32 nes_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int nes_set_features(struct net_device *netdev, u32 features)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u32 changed = netdev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ nes_vlan_mode(netdev, nesdev, features);
+
+ return 0;
+}
+
static const struct net_device_ops nes_netdev_ops = {
- .ndo_open = nes_netdev_open,
+ .ndo_open = nes_netdev_open,
.ndo_stop = nes_netdev_stop,
- .ndo_start_xmit = nes_netdev_start_xmit,
+ .ndo_start_xmit = nes_netdev_start_xmit,
.ndo_get_stats = nes_netdev_get_stats,
- .ndo_tx_timeout = nes_netdev_tx_timeout,
+ .ndo_tx_timeout = nes_netdev_tx_timeout,
.ndo_set_mac_address = nes_netdev_set_mac_address,
.ndo_set_multicast_list = nes_netdev_set_multicast_list,
.ndo_change_mtu = nes_netdev_change_mtu,
.ndo_validate_addr = eth_validate_addr,
- .ndo_vlan_rx_register = nes_netdev_vlan_rx_register,
+ .ndo_fix_features = nes_fix_features,
+ .ndo_set_features = nes_set_features,
};
/**
@@ -1656,7 +1679,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
netdev->ethtool_ops = &nes_ethtool_ops;
netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
- netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->features |= NETIF_F_HW_VLAN_TX;
/* Fill in the port structure */
nesvnic->netdev = netdev;
@@ -1683,7 +1706,8 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
netdev->dev_addr[5] = (u8)u64temp;
memcpy(netdev->perm_addr, netdev->dev_addr, 6);
- netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM;
+ netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_RX;
if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV))
netdev->hw_features |= NETIF_F_TSO;
netdev->features |= netdev->hw_features;
@@ -1815,6 +1839,8 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
nes_init_phy(nesdev);
}
+ nes_vlan_mode(netdev, nesdev, netdev->features);
+
return netdev;
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 95ca93ceeda..9f2f7d4b119 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -605,16 +605,6 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
/**
- * nes_modify_port
- */
-static int nes_modify_port(struct ib_device *ibdev, u8 port,
- int port_modify_mask, struct ib_port_modify *props)
-{
- return 0;
-}
-
-
-/**
* nes_query_pkey
*/
static int nes_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
@@ -3882,7 +3872,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev;
nesibdev->ibdev.query_device = nes_query_device;
nesibdev->ibdev.query_port = nes_query_port;
- nesibdev->ibdev.modify_port = nes_modify_port;
nesibdev->ibdev.query_pkey = nes_query_pkey;
nesibdev->ibdev.query_gid = nes_query_gid;
nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 769a1d9da4b..c9624ea8720 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1012,6 +1012,8 @@ struct qib_devdata {
u8 psxmitwait_supported;
/* cycle length of PS* counters in HW (in picoseconds) */
u16 psxmitwait_check_rate;
+ /* high volume overflow errors defered to tasklet */
+ struct tasklet_struct error_tasklet;
};
/* hol_state values */
@@ -1433,6 +1435,7 @@ extern struct mutex qib_mutex;
struct qib_hwerror_msgs {
u64 mask;
const char *msg;
+ size_t sz;
};
#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 406fca50d03..26253039d2c 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1527,6 +1527,7 @@ done_chk_sdma:
struct qib_filedata *fd = fp->private_data;
const struct qib_ctxtdata *rcd = fd->rcd;
const struct qib_devdata *dd = rcd->dd;
+ unsigned int weight;
if (dd->flags & QIB_HAS_SEND_DMA) {
fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
@@ -1545,8 +1546,8 @@ done_chk_sdma:
* it just means that sooner or later we don't recommend
* a cpu, and let the scheduler do it's best.
*/
- if (!ret && cpus_weight(current->cpus_allowed) >=
- qib_cpulist_count) {
+ weight = cpumask_weight(tsk_cpus_allowed(current));
+ if (!ret && weight >= qib_cpulist_count) {
int cpu;
cpu = find_first_zero_bit(qib_cpulist,
qib_cpulist_count);
@@ -1554,13 +1555,13 @@ done_chk_sdma:
__set_bit(cpu, qib_cpulist);
fd->rec_cpu_num = cpu;
}
- } else if (cpus_weight(current->cpus_allowed) == 1 &&
- test_bit(first_cpu(current->cpus_allowed),
+ } else if (weight == 1 &&
+ test_bit(cpumask_first(tsk_cpus_allowed(current)),
qib_cpulist))
qib_devinfo(dd->pcidev, "%s PID %u affinity "
"set to cpu %d; already allocated\n",
current->comm, current->pid,
- first_cpu(current->cpus_allowed));
+ cpumask_first(tsk_cpus_allowed(current)));
}
mutex_unlock(&qib_mutex);
@@ -1904,8 +1905,9 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
struct qib_ctxtdata *rcd;
unsigned ctxt;
int ret = 0;
+ unsigned long flags;
- spin_lock(&ppd->dd->uctxt_lock);
+ spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
ctxt++) {
rcd = ppd->dd->rcd[ctxt];
@@ -1924,7 +1926,7 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
ret = 1;
break;
}
- spin_unlock(&ppd->dd->uctxt_lock);
+ spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
return ret;
}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index c765a2eb04c..e1f947446c2 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2434,6 +2434,7 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
int lsb, ret = 0, setforce = 0;
u16 lcmd, licmd;
unsigned long flags;
+ u32 tmp = 0;
switch (which) {
case QIB_IB_CFG_LIDLMC:
@@ -2467,9 +2468,6 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
maskr = IBA7220_IBC_WIDTH_MASK;
lsb = IBA7220_IBC_WIDTH_SHIFT;
setforce = 1;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
break;
case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
@@ -2643,6 +2641,28 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
goto bail;
}
qib_set_ib_7220_lstate(ppd, lcmd, licmd);
+
+ maskr = IBA7220_IBC_WIDTH_MASK;
+ lsb = IBA7220_IBC_WIDTH_SHIFT;
+ tmp = (ppd->cpspec->ibcddrctrl >> lsb) & maskr;
+ /* If the width active on the chip does not match the
+ * width in the shadow register, write the new active
+ * width to the chip.
+ * We don't have to worry about speed as the speed is taken
+ * care of by set_7220_ibspeed_fast called by ib_updown.
+ */
+ if (ppd->link_width_enabled-1 != tmp) {
+ ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
+ ppd->cpspec->ibcddrctrl |=
+ (((u64)(ppd->link_width_enabled-1) & maskr) <<
+ lsb);
+ qib_write_kreg(dd, kr_ibcddrctrl,
+ ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
goto bail;
case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 9f53e68a096..5ea9ece23b3 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -114,6 +114,10 @@ static ushort qib_singleport;
module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
+static ushort qib_krcvq01_no_msi;
+module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
+MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
+
/*
* Receive header queue sizes
*/
@@ -397,7 +401,6 @@ MODULE_PARM_DESC(txselect, \
#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
#define crp_txlenerr CREG_IDX(TxLenErrCnt)
-#define crp_txlenerr CREG_IDX(TxLenErrCnt)
#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
@@ -469,6 +472,8 @@ static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
#define IB_7322_LT_STATE_CFGENH 0x10
#define IB_7322_LT_STATE_CFGTEST 0x11
+#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
+#define IB_7322_LT_STATE_CFGWAITENH 0x13
/* link state machine states from IBC */
#define IB_7322_L_STATE_DOWN 0x0
@@ -498,8 +503,10 @@ static const u8 qib_7322_physportstate[0x20] = {
IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH,
+ [IB_7322_LT_STATE_CFGWAITRMTTEST] =
+ IB_PHYSPORTSTATE_CFG_TRAIN,
+ [IB_7322_LT_STATE_CFGWAITENH] =
+ IB_PHYSPORTSTATE_CFG_WAIT_ENH,
[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
@@ -1103,9 +1110,9 @@ static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname , .sz = sizeof(#fldname) }
#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
- fldname##Mask##_##port), .msg = #fldname }
+ fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
@@ -1123,14 +1130,16 @@ static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
HWE_AUTO(statusValidNoEop),
HWE_AUTO(LATriggered),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname, .sz = sizeof(#fldname) }
#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname, .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
+ E_AUTO(RcvEgrFullErr),
+ E_AUTO(RcvHdrFullErr),
E_AUTO(ResetNegated),
E_AUTO(HardwareErr),
E_AUTO(InvalidAddrErr),
@@ -1143,9 +1152,7 @@ static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
E_AUTO(SendSpecialTriggerErr),
E_AUTO(SDmaWrongPortErr),
E_AUTO(SDmaBufMaskDuplicateErr),
- E_AUTO(RcvHdrFullErr),
- E_AUTO(RcvEgrFullErr),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
@@ -1155,7 +1162,8 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
/*
* SDmaHaltErr is not really an error, make it clearer;
*/
- {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
+ {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
+ .sz = 11},
E_P_AUTO(SDmaDescAddrMisalignErr),
E_P_AUTO(SDmaUnexpDataErr),
E_P_AUTO(SDmaMissingDwErr),
@@ -1191,7 +1199,7 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
E_P_AUTO(RcvICRCErr),
E_P_AUTO(RcvVCRCErr),
E_P_AUTO(RcvFormatErr),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
/*
@@ -1199,17 +1207,17 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
* context
*/
#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname, .sz = sizeof(#fldname) }
/* Below generates "auto-message" for interrupts specific to a port */
#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_0), \
SYM_LSB(IntMask, fldname##Mask##_1)), \
- .msg = #fldname "_P" }
+ .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
/* For some reason, the SerDesTrimDone bits are reversed */
#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_1), \
SYM_LSB(IntMask, fldname##Mask##_0)), \
- .msg = #fldname "_P" }
+ .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
/*
* Below generates "auto-message" for interrupts specific to a context,
* with ctxt-number appended
@@ -1217,7 +1225,7 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##0IntMask), \
SYM_LSB(IntMask, fldname##17IntMask)), \
- .msg = #fldname "_C"}
+ .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
INTR_AUTO_P(SDmaInt),
@@ -1231,11 +1239,12 @@ static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
INTR_AUTO_P(SendDoneInt),
INTR_AUTO(SendBufAvailInt),
INTR_AUTO_C(RcvAvail),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
#define TXSYMPTOM_AUTO_P(fldname) \
- { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
+ { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
+ .msg = #fldname, .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs hdrchk_msgs[] = {
TXSYMPTOM_AUTO_P(NonKeyPacket),
TXSYMPTOM_AUTO_P(GRHFail),
@@ -1244,7 +1253,7 @@ static const struct qib_hwerror_msgs hdrchk_msgs[] = {
TXSYMPTOM_AUTO_P(SLIDFail),
TXSYMPTOM_AUTO_P(RawIPV6),
TXSYMPTOM_AUTO_P(PacketTooSmall),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
@@ -1289,7 +1298,7 @@ static void err_decode(char *msg, size_t len, u64 errs,
u64 these, lmask;
int took, multi, n = 0;
- while (msp && msp->mask) {
+ while (errs && msp && msp->mask) {
multi = (msp->mask & (msp->mask - 1));
while (errs & msp->mask) {
these = (errs & msp->mask);
@@ -1300,9 +1309,14 @@ static void err_decode(char *msg, size_t len, u64 errs,
*msg++ = ',';
len--;
}
- took = scnprintf(msg, len, "%s", msp->msg);
+ BUG_ON(!msp->sz);
+ /* msp->sz counts the nul */
+ took = min_t(size_t, msp->sz - (size_t)1, len);
+ memcpy(msg, msp->msg, took);
len -= took;
msg += took;
+ if (len)
+ *msg = '\0';
}
errs &= ~lmask;
if (len && multi) {
@@ -1640,6 +1654,14 @@ done:
return;
}
+static void qib_error_tasklet(unsigned long data)
+{
+ struct qib_devdata *dd = (struct qib_devdata *)data;
+
+ handle_7322_errors(dd);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
static void reenable_chase(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
@@ -1692,7 +1714,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
break;
}
- if (ibclt == IB_7322_LT_STATE_CFGTEST &&
+ if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
+ ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
+ ibclt == IB_7322_LT_STATE_LINKUP) &&
(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
force_h1(ppd);
ppd->cpspec->qdr_reforce = 1;
@@ -2719,8 +2743,10 @@ static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
unknown_7322_ibits(dd, istat);
if (istat & QIB_I_GPIO)
unknown_7322_gpio_intr(dd);
- if (istat & QIB_I_C_ERROR)
- handle_7322_errors(dd);
+ if (istat & QIB_I_C_ERROR) {
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+ tasklet_schedule(&dd->error_tasklet);
+ }
if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
handle_7322_p_errors(dd->rcd[0]->ppd);
if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
@@ -3119,6 +3145,8 @@ try_intx:
arg = dd->rcd[ctxt];
if (!arg)
continue;
+ if (qib_krcvq01_no_msi && ctxt < 2)
+ continue;
lsb = QIB_I_RCVAVAIL_LSB + ctxt;
handler = qib_7322pintr;
name = QIB_DRV_NAME " (kctx)";
@@ -3153,6 +3181,8 @@ try_intx:
for (i = 0; i < ARRAY_SIZE(redirect); i++)
qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
dd->cspec->main_int_mask = mask;
+ tasklet_init(&dd->error_tasklet, qib_error_tasklet,
+ (unsigned long)dd);
bail:;
}
@@ -6782,6 +6812,10 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
(i >= ARRAY_SIZE(irq_table) &&
dd->rcd[i - ARRAY_SIZE(irq_table)]))
actual_cnt++;
+ /* reduce by ctxt's < 2 */
+ if (qib_krcvq01_no_msi)
+ actual_cnt -= dd->num_pports;
+
tabsize = actual_cnt;
dd->cspec->msix_entries = kmalloc(tabsize *
sizeof(struct msix_entry), GFP_KERNEL);
@@ -7301,12 +7335,17 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
{
u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
- printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
- ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
- if (enable)
+ u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
+
+ if (enable && !state) {
+ printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
+ ppd->dd->unit, ppd->port);
data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
- else
+ } else if (!enable && state) {
+ printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
+ ppd->dd->unit, ppd->port);
data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+ }
qib_write_kreg_port(ppd, krp_serdesctrl, data);
}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index a693c56ec8a..6ae57d23004 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -96,8 +96,12 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
* states, or if it transitions from any of the up (INIT or better)
* states into any of the down states (except link recovery), then
* call the chip-specific code to take appropriate actions.
+ *
+ * ppd->lflags could be 0 if this is the first time the interrupt
+ * handlers has been called but the link is already up.
*/
- if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) &&
+ if (lstate >= IB_PORT_INIT &&
+ (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
ltstate == IB_PHYSPORTSTATE_LINKUP) {
/* transitioned to UP */
if (dd->f_ib_updown(ppd, 1, ibcs))
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 8fd3df5bf04..3b3745f261f 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1125,22 +1125,22 @@ static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
}
-static int pma_get_classportinfo(struct ib_perf *pmp,
+static int pma_get_classportinfo(struct ib_pma_mad *pmp,
struct ib_device *ibdev)
{
- struct ib_pma_classportinfo *p =
- (struct ib_pma_classportinfo *)pmp->data;
+ struct ib_class_port_info *p =
+ (struct ib_class_port_info *)pmp->data;
struct qib_devdata *dd = dd_from_ibdev(ibdev);
memset(pmp->data, 0, sizeof(pmp->data));
- if (pmp->attr_mod != 0)
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0)
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
/* Note that AllPortSelect is not valid */
p->base_version = 1;
p->class_version = 1;
- p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
/*
* Set the most significant bit of CM2 to indicate support for
* congestion statistics
@@ -1154,7 +1154,7 @@ static int pma_get_classportinfo(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portsamplescontrol(struct ib_perf *pmp,
+static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
@@ -1169,8 +1169,8 @@ static int pma_get_portsamplescontrol(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 || port_select != port) {
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
goto bail;
}
spin_lock_irqsave(&ibp->lock, flags);
@@ -1192,7 +1192,7 @@ bail:
return reply((struct ib_smp *) pmp);
}
-static int pma_set_portsamplescontrol(struct ib_perf *pmp,
+static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
@@ -1205,8 +1205,8 @@ static int pma_set_portsamplescontrol(struct ib_perf *pmp,
u8 status, xmit_flags;
int ret;
- if (pmp->attr_mod != 0 || p->port_select != port) {
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
@@ -1321,7 +1321,7 @@ static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
return ret;
}
-static int pma_get_portsamplesresult(struct ib_perf *pmp,
+static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplesresult *p =
@@ -1360,7 +1360,7 @@ static int pma_get_portsamplesresult(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
+static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplesresult_ext *p =
@@ -1402,7 +1402,7 @@ static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portcounters(struct ib_perf *pmp,
+static int pma_get_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1436,8 +1436,8 @@ static int pma_get_portcounters(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 || port_select != port)
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
if (cntrs.symbol_error_counter > 0xFFFFUL)
p->symbol_error_counter = cpu_to_be16(0xFFFF);
@@ -1472,7 +1472,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,
cntrs.local_link_integrity_errors = 0xFUL;
if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = cpu_to_be16(0xFFFF);
@@ -1500,7 +1500,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portcounters_cong(struct ib_perf *pmp,
+static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
/* Congestion PMA packets start at offset 24 not 64 */
@@ -1510,7 +1510,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = dd_from_ppd(ppd);
- u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;
+ u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
u64 xmit_wait_counter;
unsigned long flags;
@@ -1519,9 +1519,9 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
* SET method ends up calling this anyway.
*/
if (!dd->psxmitwait_supported)
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
if (port_select != port)
- pmp->status |= IB_SMP_INVALID_FIELD;
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
qib_get_counters(ppd, &cntrs);
spin_lock_irqsave(&ppd->ibport_data.lock, flags);
@@ -1603,7 +1603,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
cntrs.local_link_integrity_errors = 0xFUL;
if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = cpu_to_be16(0xFFFF);
@@ -1613,7 +1613,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
return reply((struct ib_smp *)pmp);
}
-static int pma_get_portcounters_ext(struct ib_perf *pmp,
+static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters_ext *p =
@@ -1626,8 +1626,8 @@ static int pma_get_portcounters_ext(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 || port_select != port) {
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
goto bail;
}
@@ -1652,7 +1652,7 @@ bail:
return reply((struct ib_smp *) pmp);
}
-static int pma_set_portcounters(struct ib_perf *pmp,
+static int pma_set_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1715,14 +1715,14 @@ static int pma_set_portcounters(struct ib_perf *pmp,
return pma_get_portcounters(pmp, ibdev, port);
}
-static int pma_set_portcounters_cong(struct ib_perf *pmp,
+static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = dd_from_ppd(ppd);
struct qib_verbs_counters cntrs;
- u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;
+ u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
int ret = 0;
unsigned long flags;
@@ -1766,7 +1766,7 @@ static int pma_set_portcounters_cong(struct ib_perf *pmp,
return ret;
}
-static int pma_set_portcounters_ext(struct ib_perf *pmp,
+static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1959,19 +1959,19 @@ static int process_perf(struct ib_device *ibdev, u8 port,
struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
- struct ib_perf *pmp = (struct ib_perf *)out_mad;
+ struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
int ret;
*out_mad = *in_mad;
- if (pmp->class_version != 1) {
- pmp->status |= IB_SMP_UNSUP_VERSION;
+ if (pmp->mad_hdr.class_version != 1) {
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
- switch (pmp->method) {
+ switch (pmp->mad_hdr.method) {
case IB_MGMT_METHOD_GET:
- switch (pmp->attr_id) {
+ switch (pmp->mad_hdr.attr_id) {
case IB_PMA_CLASS_PORT_INFO:
ret = pma_get_classportinfo(pmp, ibdev);
goto bail;
@@ -1994,13 +1994,13 @@ static int process_perf(struct ib_device *ibdev, u8 port,
ret = pma_get_portcounters_cong(pmp, ibdev, port);
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
case IB_MGMT_METHOD_SET:
- switch (pmp->attr_id) {
+ switch (pmp->mad_hdr.attr_id) {
case IB_PMA_PORT_SAMPLES_CONTROL:
ret = pma_set_portsamplescontrol(pmp, ibdev, port);
goto bail;
@@ -2014,7 +2014,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
ret = pma_set_portcounters_cong(pmp, ibdev, port);
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
@@ -2030,7 +2030,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METHOD;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
ret = reply((struct ib_smp *) pmp);
}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
index 7840ab593bc..ecc416cdbaa 100644
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -32,6 +32,8 @@
* SOFTWARE.
*/
+#include <rdma/ib_pma.h>
+
#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
@@ -180,109 +182,8 @@ struct ib_vl_weight_elem {
#define IB_VLARB_HIGHPRI_0_31 3
#define IB_VLARB_HIGHPRI_32_63 4
-/*
- * PMA class portinfo capability mask bits
- */
-#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
-#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
-#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
-
-#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
-#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
-#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
-#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
-#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
-#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
-struct ib_perf {
- u8 base_version;
- u8 mgmt_class;
- u8 class_version;
- u8 method;
- __be16 status;
- __be16 unused;
- __be64 tid;
- __be16 attr_id;
- __be16 resv;
- __be32 attr_mod;
- u8 reserved[40];
- u8 data[192];
-} __attribute__ ((packed));
-
-struct ib_pma_classportinfo {
- u8 base_version;
- u8 class_version;
- __be16 cap_mask;
- u8 reserved[3];
- u8 resp_time_value; /* only lower 5 bits */
- union ib_gid redirect_gid;
- __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 redirect_lid;
- __be16 redirect_pkey;
- __be32 redirect_qp; /* only lower 24 bits */
- __be32 redirect_qkey;
- union ib_gid trap_gid;
- __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 trap_lid;
- __be16 trap_pkey;
- __be32 trap_hl_qp; /* 8, 24 bits respectively */
- __be32 trap_qkey;
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplescontrol {
- u8 opcode;
- u8 port_select;
- u8 tick;
- u8 counter_width; /* only lower 3 bits */
- __be32 counter_mask0_9; /* 2, 10 * 3, bits */
- __be16 counter_mask10_14; /* 1, 5 * 3, bits */
- u8 sample_mechanisms;
- u8 sample_status; /* only lower 2 bits */
- __be64 option_mask;
- __be64 vendor_mask;
- __be32 sample_start;
- __be32 sample_interval;
- __be16 tag;
- __be16 counter_select[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplesresult {
- __be16 tag;
- __be16 sample_status; /* only lower 2 bits */
- __be32 counter[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplesresult_ext {
- __be16 tag;
- __be16 sample_status; /* only lower 2 bits */
- __be32 extended_width; /* only upper 2 bits */
- __be64 counter[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portcounters {
- u8 reserved;
- u8 port_select;
- __be16 counter_select;
- __be16 symbol_error_counter;
- u8 link_error_recovery_counter;
- u8 link_downed_counter;
- __be16 port_rcv_errors;
- __be16 port_rcv_remphys_errors;
- __be16 port_rcv_switch_relay_errors;
- __be16 port_xmit_discards;
- u8 port_xmit_constraint_errors;
- u8 port_rcv_constraint_errors;
- u8 reserved1;
- u8 lli_ebor_errors; /* 4, 4, bits */
- __be16 reserved2;
- __be16 vl15_dropped;
- __be32 port_xmit_data;
- __be32 port_rcv_data;
- __be32 port_xmit_packets;
- __be32 port_rcv_packets;
-} __attribute__ ((packed));
-
struct ib_pma_portcounters_cong {
u8 reserved;
u8 reserved1;
@@ -297,7 +198,7 @@ struct ib_pma_portcounters_cong {
u8 port_xmit_constraint_errors;
u8 port_rcv_constraint_errors;
u8 reserved2;
- u8 lli_ebor_errors; /* 4, 4, bits */
+ u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
__be16 reserved3;
__be16 vl15_dropped;
__be64 port_xmit_data;
@@ -316,49 +217,11 @@ struct ib_pma_portcounters_cong {
/* number of 4nsec cycles equaling 2secs */
#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
-#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
-#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
-#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
-#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
-#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
-#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
-#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
-#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
-#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
-#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
-#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
-#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
-#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
-
#define IB_PMA_SEL_CONG_ALL 0x01
#define IB_PMA_SEL_CONG_PORT_DATA 0x02
#define IB_PMA_SEL_CONG_XMIT 0x04
#define IB_PMA_SEL_CONG_ROUTING 0x08
-struct ib_pma_portcounters_ext {
- u8 reserved;
- u8 port_select;
- __be16 counter_select;
- __be32 reserved1;
- __be64 port_xmit_data;
- __be64 port_rcv_data;
- __be64 port_xmit_packets;
- __be64 port_rcv_packets;
- __be64 port_unicast_xmit_packets;
- __be64 port_unicast_rcv_packets;
- __be64 port_multicast_xmit_packets;
- __be64 port_multicast_rcv_packets;
-} __attribute__ ((packed));
-
-#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
-#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
-#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
-#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
-#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
-#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
-#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
-#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
-
/*
* The PortSamplesControl.CounterMasks field is an array of 3 bit fields
* which specify the N'th counter's capabilities. See ch. 16.1.3.2.
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 891cc2ff5f0..4426782ad28 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -255,7 +255,7 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
u16 linkstat, speed;
int pos = 0, pose, ret = 1;
- pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ pose = pci_pcie_cap(dd->pcidev);
if (!pose) {
qib_dev_err(dd, "Can't find PCI Express capability!\n");
/* set up something... */
@@ -509,7 +509,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
qib_devinfo(dd->pcidev, "Parent not root\n");
return 1;
}
- ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ ppos = pci_pcie_cap(parent);
if (!ppos)
return 1;
if (parent->vendor != 0x8086)
@@ -578,14 +578,14 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
qib_devinfo(dd->pcidev, "Parent not root\n");
goto bail;
}
- ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ ppos = pci_pcie_cap(parent);
if (ppos) {
pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
} else
goto bail;
/* Find out supported and configured values for endpoint (us) */
- epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ epos = pci_pcie_cap(dd->pcidev);
if (epos) {
pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index d50a33fe8bb..14d129de432 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -507,6 +507,18 @@ static ssize_t show_nctxts(struct device *device,
dd->first_user_ctxt);
}
+static ssize_t show_nfreectxts(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* Return the number of free user ports (contexts) available. */
+ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
+ dd->first_user_ctxt - (u32)qib_stats.sps_ctxts);
+}
+
static ssize_t show_serial(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -604,6 +616,7 @@ static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
+static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
@@ -617,6 +630,7 @@ static struct device_attribute *qib_attributes[] = {
&dev_attr_board_id,
&dev_attr_version,
&dev_attr_nctxts,
+ &dev_attr_nfreectxts,
&dev_attr_serial,
&dev_attr_boardversion,
&dev_attr_logged_errors,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 7b6985a2e65..b3cc1e062b1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -45,7 +45,7 @@
#include <net/neighbour.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 86addca9ddf..fe89c4660d5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -560,9 +560,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
struct ipoib_neigh *neigh;
+ struct neighbour *n;
unsigned long flags;
- neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour, skb->dev);
+ n = dst_get_neighbour(skb_dst(skb));
+ neigh = ipoib_neigh_alloc(n, skb->dev);
if (!neigh) {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
@@ -571,9 +573,9 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, skb_dst(skb)->neighbour->ha + 4);
+ path = __path_find(dev, n->ha + 4);
if (!path) {
- path = path_rec_create(dev, skb_dst(skb)->neighbour->ha + 4);
+ path = path_rec_create(dev, n->ha + 4);
if (!path)
goto err_path;
@@ -607,7 +609,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
}
} else {
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(n->ha));
return;
}
} else {
@@ -637,17 +639,20 @@ err_drop:
static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
+ struct dst_entry *dst = skb_dst(skb);
+ struct neighbour *n;
/* Look up path record for unicasts */
- if (skb_dst(skb)->neighbour->ha[4] != 0xff) {
+ n = dst_get_neighbour(dst);
+ if (n->ha[4] != 0xff) {
neigh_add_path(skb, dev);
return;
}
/* Add in the P_Key for multicasts */
- skb_dst(skb)->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
- skb_dst(skb)->neighbour->ha[9] = priv->pkey & 0xff;
- ipoib_mcast_send(dev, skb_dst(skb)->neighbour->ha + 4, skb);
+ n->ha[8] = (priv->pkey >> 8) & 0xff;
+ n->ha[9] = priv->pkey & 0xff;
+ ipoib_mcast_send(dev, n->ha + 4, skb);
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -712,18 +717,22 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh;
+ struct neighbour *n = NULL;
unsigned long flags;
- if (likely(skb_dst(skb) && skb_dst(skb)->neighbour)) {
- if (unlikely(!*to_ipoib_neigh(skb_dst(skb)->neighbour))) {
+ if (likely(skb_dst(skb)))
+ n = dst_get_neighbour(skb_dst(skb));
+
+ if (likely(n)) {
+ if (unlikely(!*to_ipoib_neigh(n))) {
ipoib_path_lookup(skb, dev);
return NETDEV_TX_OK;
}
- neigh = *to_ipoib_neigh(skb_dst(skb)->neighbour);
+ neigh = *to_ipoib_neigh(n);
if (unlikely((memcmp(&neigh->dgid.raw,
- skb_dst(skb)->neighbour->ha + 4,
+ n->ha + 4,
sizeof(union ib_gid))) ||
(neigh->dev != dev))) {
spin_lock_irqsave(&priv->lock, flags);
@@ -749,7 +758,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
} else if (neigh->ah) {
- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
+ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
return NETDEV_TX_OK;
}
@@ -812,6 +821,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
const void *daddr, const void *saddr, unsigned len)
{
struct ipoib_header *header;
+ struct dst_entry *dst;
+ struct neighbour *n;
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@ -823,7 +834,11 @@ static int ipoib_hard_header(struct sk_buff *skb,
* destination address onto the front of the skb so we can
* figure out where to send the packet later.
*/
- if ((!skb_dst(skb) || !skb_dst(skb)->neighbour) && daddr) {
+ dst = skb_dst(skb);
+ n = NULL;
+ if (dst)
+ n = dst_get_neighbour(dst);
+ if ((!dst || !n) && daddr) {
struct ipoib_pseudoheader *phdr =
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 3871ac66355..ecea4fe1ed0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -258,11 +258,15 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
netif_tx_lock_bh(dev);
while (!skb_queue_empty(&mcast->pkt_queue)) {
struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
+ struct dst_entry *dst = skb_dst(skb);
+ struct neighbour *n = NULL;
+
netif_tx_unlock_bh(dev);
skb->dev = dev;
-
- if (!skb_dst(skb) || !skb_dst(skb)->neighbour) {
+ if (dst)
+ n = dst_get_neighbour(dst);
+ if (!dst || !n) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof (struct ipoib_pseudoheader));
}
@@ -715,11 +719,13 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
out:
if (mcast && mcast->ah) {
- if (skb_dst(skb) &&
- skb_dst(skb)->neighbour &&
- !*to_ipoib_neigh(skb_dst(skb)->neighbour)) {
- struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb_dst(skb)->neighbour,
- skb->dev);
+ struct dst_entry *dst = skb_dst(skb);
+ struct neighbour *n = NULL;
+ if (dst)
+ n = dst_get_neighbour(dst);
+ if (n && !*to_ipoib_neigh(n)) {
+ struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
+ skb->dev);
if (neigh) {
kref_get(&mcast->ah->ref);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 8db008de539..9c61b9c2c59 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -101,13 +101,17 @@ iscsi_iser_recv(struct iscsi_conn *conn,
/* verify PDU length */
datalen = ntoh24(hdr->dlength);
- if (datalen != rx_data_len) {
- printk(KERN_ERR "iscsi_iser: datalen %d (hdr) != %d (IB) \n",
- datalen, rx_data_len);
+ if (datalen > rx_data_len || (datalen + 4) < rx_data_len) {
+ iser_err("wrong datalen %d (hdr), %d (IB)\n",
+ datalen, rx_data_len);
rc = ISCSI_ERR_DATALEN;
goto error;
}
+ if (datalen != rx_data_len)
+ iser_dbg("aligned datalen (%d) hdr, %d (IB)\n",
+ datalen, rx_data_len);
+
/* read AHS */
ahslen = hdr->hlength * 4;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 2f02ab0ccc1..db6f3ce9f3b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -45,6 +45,7 @@
#include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h>
+#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/list.h>
@@ -88,7 +89,7 @@
} while (0)
#define SHIFT_4K 12
-#define SIZE_4K (1UL << SHIFT_4K)
+#define SIZE_4K (1ULL << SHIFT_4K)
#define MASK_4K (~(SIZE_4K-1))
/* support up to 512KB in one RDMA */
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 95a08a8ca8a..f299de6b419 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -271,7 +271,7 @@ int iser_send_command(struct iscsi_conn *conn,
unsigned long edtl;
int err;
struct iser_data_buf *data_buf;
- struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
@@ -412,7 +412,7 @@ int iser_send_control(struct iscsi_conn *conn,
memcpy(iser_conn->ib_conn->login_buf, task->data,
task->data_count);
tx_dsg->addr = iser_conn->ib_conn->login_dma;
- tx_dsg->length = data_seg_len;
+ tx_dsg->length = task->data_count;
tx_dsg->lkey = device->mr->lkey;
mdesc->num_sge = 2;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index ee165fdcb59..0bfa545675b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -39,7 +39,7 @@
#include <linux/random.h>
#include <linux/jiffies.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -2127,6 +2127,8 @@ static ssize_t srp_create_target(struct device *dev,
return -ENOMEM;
target_host->transportt = ib_srp_transport_template;
+ target_host->max_channel = 0;
+ target_host->max_id = 1;
target_host->max_lun = SRP_MAX_LUN;
target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 5b8f59d6c3e..c351aa421f8 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -47,7 +47,7 @@ static void gameport_disconnect_port(struct gameport *gameport);
#if defined(__i386__)
-#include <asm/i8253.h>
+#include <linux/i8253.h>
#define DELTA(x,y) ((y)-(x)+((y)<(x)?1193182/HZ:0))
#define GET_TIME(x) do { x = get_time_pit(); } while (0)
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index c02131785a3..358cd7ee905 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -136,7 +136,7 @@ struct analog_port {
#ifdef __i386__
-#include <asm/i8253.h>
+#include <linux/i8253.h>
#define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0)
#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index f2e8b9a347d..e7cc51d0fb3 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -520,7 +520,8 @@ static void pmic8xxx_kp_close(struct input_dev *dev)
*/
static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
{
- const struct pm8xxx_keypad_platform_data *pdata = mfd_get_data(pdev);
+ const struct pm8xxx_keypad_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
const struct matrix_keymap_data *keymap_data;
struct pmic8xxx_kp *kp;
int rc;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 01344280e14..c9104bb4db0 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -305,7 +305,7 @@ config INPUT_TWL4030_PWRBUTTON
config INPUT_TWL4030_VIBRA
tristate "Support for TWL4030 Vibrator"
depends on TWL4030_CORE
- select TWL4030_CODEC
+ select MFD_TWL4030_AUDIO
select INPUT_FF_MEMLESS
help
This option enables support for TWL4030 Vibrator Driver.
@@ -313,6 +313,17 @@ config INPUT_TWL4030_VIBRA
To compile this driver as a module, choose M here. The module will
be called twl4030_vibra.
+config INPUT_TWL6040_VIBRA
+ tristate "Support for TWL6040 Vibrator"
+ depends on TWL4030_CORE
+ select TWL6040_CORE
+ select INPUT_FF_MEMLESS
+ help
+ This option enables support for TWL6040 Vibrator Driver.
+
+ To compile this driver as a module, choose M here. The module will
+ be called twl6040_vibra.
+
config INPUT_UINPUT
tristate "User level driver support"
help
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index be39d813354..299ad5edba8 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o
obj-$(CONFIG_INPUT_TWL4030_VIBRA) += twl4030-vibra.o
+obj-$(CONFIG_INPUT_TWL6040_VIBRA) += twl6040-vibra.o
obj-$(CONFIG_INPUT_UINPUT) += uinput.o
obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o
obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index f080dd31499..34f4d2e0f50 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/i8253.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/platform_device.h>
@@ -25,14 +26,6 @@ MODULE_DESCRIPTION("PC Speaker beeper driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pcspkr");
-#if defined(CONFIG_MIPS) || defined(CONFIG_X86)
-/* Use the global PIT lock ! */
-#include <asm/i8253.h>
-#else
-#include <asm/8253pit.h>
-static DEFINE_RAW_SPINLOCK(i8253_lock);
-#endif
-
static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
unsigned int count = 0;
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 97e07e786e4..b3cfb9c71e6 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -90,7 +90,8 @@ static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
unsigned int delay;
u8 pon_cntl;
struct pmic8xxx_pwrkey *pwrkey;
- const struct pm8xxx_pwrkey_platform_data *pdata = mfd_get_data(pdev);
+ const struct pm8xxx_pwrkey_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "power key platform data not supplied\n");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 014dd4ad0d4..3c1a432c14d 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/i2c/twl.h>
-#include <linux/mfd/twl4030-codec.h>
+#include <linux/mfd/twl4030-audio.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -67,7 +67,7 @@ static void vibra_enable(struct vibra_info *info)
{
u8 reg;
- twl4030_codec_enable_resource(TWL4030_CODEC_RES_POWER);
+ twl4030_audio_enable_resource(TWL4030_AUDIO_RES_POWER);
/* turn H-Bridge on */
twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE,
@@ -75,7 +75,7 @@ static void vibra_enable(struct vibra_info *info)
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
(reg | TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
- twl4030_codec_enable_resource(TWL4030_CODEC_RES_APLL);
+ twl4030_audio_enable_resource(TWL4030_AUDIO_RES_APLL);
info->enabled = true;
}
@@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info)
twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
(reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL);
- twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL);
- twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER);
+ twl4030_audio_disable_resource(TWL4030_AUDIO_RES_APLL);
+ twl4030_audio_disable_resource(TWL4030_AUDIO_RES_POWER);
info->enabled = false;
}
@@ -196,7 +196,7 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
{
- struct twl4030_codec_vibra_data *pdata = pdev->dev.platform_data;
+ struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
struct vibra_info *info;
int ret;
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
new file mode 100644
index 00000000000..c43002e7ec7
--- /dev/null
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -0,0 +1,423 @@
+/*
+ * twl6040-vibra.c - TWL6040 Vibrator driver
+ *
+ * Author: Jorge Eduardo Candelaria <jorge.candelaria@ti.com>
+ * Author: Misael Lopez Cruz <misael.lopez@ti.com>
+ *
+ * Copyright: (C) 2011 Texas Instruments, Inc.
+ *
+ * Based on twl4030-vibra.c by Henrik Saari <henrik.saari@nokia.com>
+ * Felipe Balbi <felipe.balbi@nokia.com>
+ * Jari Vanhala <ext-javi.vanhala@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/i2c/twl.h>
+#include <linux/mfd/twl6040.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+
+#define EFFECT_DIR_180_DEG 0x8000
+
+/* Recommended modulation index 85% */
+#define TWL6040_VIBRA_MOD 85
+
+#define TWL6040_NUM_SUPPLIES 2
+
+struct vibra_info {
+ struct device *dev;
+ struct input_dev *input_dev;
+ struct workqueue_struct *workqueue;
+ struct work_struct play_work;
+ struct mutex mutex;
+ int irq;
+
+ bool enabled;
+ int weak_speed;
+ int strong_speed;
+ int direction;
+
+ unsigned int vibldrv_res;
+ unsigned int vibrdrv_res;
+ unsigned int viblmotor_res;
+ unsigned int vibrmotor_res;
+
+ struct regulator_bulk_data supplies[TWL6040_NUM_SUPPLIES];
+
+ struct twl6040 *twl6040;
+};
+
+static irqreturn_t twl6040_vib_irq_handler(int irq, void *data)
+{
+ struct vibra_info *info = data;
+ struct twl6040 *twl6040 = info->twl6040;
+ u8 status;
+
+ status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
+ if (status & TWL6040_VIBLOCDET) {
+ dev_warn(info->dev, "Left Vibrator overcurrent detected\n");
+ twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLL,
+ TWL6040_VIBENAL);
+ }
+ if (status & TWL6040_VIBROCDET) {
+ dev_warn(info->dev, "Right Vibrator overcurrent detected\n");
+ twl6040_clear_bits(twl6040, TWL6040_REG_VIBCTLR,
+ TWL6040_VIBENAR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void twl6040_vibra_enable(struct vibra_info *info)
+{
+ struct twl6040 *twl6040 = info->twl6040;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(info->supplies), info->supplies);
+ if (ret) {
+ dev_err(info->dev, "failed to enable regulators %d\n", ret);
+ return;
+ }
+
+ twl6040_power(info->twl6040, 1);
+ if (twl6040->rev <= TWL6040_REV_ES1_1) {
+ /*
+ * ERRATA: Disable overcurrent protection for at least
+ * 3ms when enabling vibrator drivers to avoid false
+ * overcurrent detection
+ */
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
+ TWL6040_VIBENAL | TWL6040_VIBCTRLL);
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
+ TWL6040_VIBENAR | TWL6040_VIBCTRLR);
+ usleep_range(3000, 3500);
+ }
+
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL,
+ TWL6040_VIBENAL);
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR,
+ TWL6040_VIBENAR);
+
+ info->enabled = true;
+}
+
+static void twl6040_vibra_disable(struct vibra_info *info)
+{
+ struct twl6040 *twl6040 = info->twl6040;
+
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLL, 0x00);
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBCTLR, 0x00);
+ twl6040_power(info->twl6040, 0);
+
+ regulator_bulk_disable(ARRAY_SIZE(info->supplies), info->supplies);
+
+ info->enabled = false;
+}
+
+static u8 twl6040_vibra_code(int vddvib, int vibdrv_res, int motor_res,
+ int speed, int direction)
+{
+ int vpk, max_code;
+ u8 vibdat;
+
+ /* output swing */
+ vpk = (vddvib * motor_res * TWL6040_VIBRA_MOD) /
+ (100 * (vibdrv_res + motor_res));
+
+ /* 50mV per VIBDAT code step */
+ max_code = vpk / 50;
+ if (max_code > TWL6040_VIBDAT_MAX)
+ max_code = TWL6040_VIBDAT_MAX;
+
+ /* scale speed to max allowed code */
+ vibdat = (u8)((speed * max_code) / USHRT_MAX);
+
+ /* 2's complement for direction > 180 degrees */
+ vibdat *= direction;
+
+ return vibdat;
+}
+
+static void twl6040_vibra_set_effect(struct vibra_info *info)
+{
+ struct twl6040 *twl6040 = info->twl6040;
+ u8 vibdatl, vibdatr;
+ int volt;
+
+ /* weak motor */
+ volt = regulator_get_voltage(info->supplies[0].consumer) / 1000;
+ vibdatl = twl6040_vibra_code(volt, info->vibldrv_res,
+ info->viblmotor_res,
+ info->weak_speed, info->direction);
+
+ /* strong motor */
+ volt = regulator_get_voltage(info->supplies[1].consumer) / 1000;
+ vibdatr = twl6040_vibra_code(volt, info->vibrdrv_res,
+ info->vibrmotor_res,
+ info->strong_speed, info->direction);
+
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBDATL, vibdatl);
+ twl6040_reg_write(twl6040, TWL6040_REG_VIBDATR, vibdatr);
+}
+
+static void vibra_play_work(struct work_struct *work)
+{
+ struct vibra_info *info = container_of(work,
+ struct vibra_info, play_work);
+
+ mutex_lock(&info->mutex);
+
+ if (info->weak_speed || info->strong_speed) {
+ if (!info->enabled)
+ twl6040_vibra_enable(info);
+
+ twl6040_vibra_set_effect(info);
+ } else if (info->enabled)
+ twl6040_vibra_disable(info);
+
+ mutex_unlock(&info->mutex);
+}
+
+static int vibra_play(struct input_dev *input, void *data,
+ struct ff_effect *effect)
+{
+ struct vibra_info *info = input_get_drvdata(input);
+ int ret;
+
+ info->weak_speed = effect->u.rumble.weak_magnitude;
+ info->strong_speed = effect->u.rumble.strong_magnitude;
+ info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
+
+ ret = queue_work(info->workqueue, &info->play_work);
+ if (!ret) {
+ dev_info(&input->dev, "work is already on queue\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void twl6040_vibra_close(struct input_dev *input)
+{
+ struct vibra_info *info = input_get_drvdata(input);
+
+ cancel_work_sync(&info->play_work);
+
+ mutex_lock(&info->mutex);
+
+ if (info->enabled)
+ twl6040_vibra_disable(info);
+
+ mutex_unlock(&info->mutex);
+}
+
+#if CONFIG_PM_SLEEP
+static int twl6040_vibra_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vibra_info *info = platform_get_drvdata(pdev);
+
+ mutex_lock(&info->mutex);
+
+ if (info->enabled)
+ twl6040_vibra_disable(info);
+
+ mutex_unlock(&info->mutex);
+
+ return 0;
+}
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
+
+static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
+{
+ struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
+ struct vibra_info *info;
+ int ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform_data not available\n");
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "couldn't allocate memory\n");
+ return -ENOMEM;
+ }
+
+ info->dev = &pdev->dev;
+ info->twl6040 = dev_get_drvdata(pdev->dev.parent);
+ info->vibldrv_res = pdata->vibldrv_res;
+ info->vibrdrv_res = pdata->vibrdrv_res;
+ info->viblmotor_res = pdata->viblmotor_res;
+ info->vibrmotor_res = pdata->vibrmotor_res;
+ if ((!info->vibldrv_res && !info->viblmotor_res) ||
+ (!info->vibrdrv_res && !info->vibrmotor_res)) {
+ dev_err(info->dev, "invalid vibra driver/motor resistance\n");
+ ret = -EINVAL;
+ goto err_kzalloc;
+ }
+
+ info->irq = platform_get_irq(pdev, 0);
+ if (info->irq < 0) {
+ dev_err(info->dev, "invalid irq\n");
+ ret = -EINVAL;
+ goto err_kzalloc;
+ }
+
+ mutex_init(&info->mutex);
+
+ info->input_dev = input_allocate_device();
+ if (info->input_dev == NULL) {
+ dev_err(info->dev, "couldn't allocate input device\n");
+ ret = -ENOMEM;
+ goto err_kzalloc;
+ }
+
+ input_set_drvdata(info->input_dev, info);
+
+ info->input_dev->name = "twl6040:vibrator";
+ info->input_dev->id.version = 1;
+ info->input_dev->dev.parent = pdev->dev.parent;
+ info->input_dev->close = twl6040_vibra_close;
+ __set_bit(FF_RUMBLE, info->input_dev->ffbit);
+
+ ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
+ if (ret < 0) {
+ dev_err(info->dev, "couldn't register vibrator to FF\n");
+ goto err_ialloc;
+ }
+
+ ret = input_register_device(info->input_dev);
+ if (ret < 0) {
+ dev_err(info->dev, "couldn't register input device\n");
+ goto err_iff;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ ret = request_threaded_irq(info->irq, NULL, twl6040_vib_irq_handler, 0,
+ "twl6040_irq_vib", info);
+ if (ret) {
+ dev_err(info->dev, "VIB IRQ request failed: %d\n", ret);
+ goto err_irq;
+ }
+
+ info->supplies[0].supply = "vddvibl";
+ info->supplies[1].supply = "vddvibr";
+ ret = regulator_bulk_get(info->dev, ARRAY_SIZE(info->supplies),
+ info->supplies);
+ if (ret) {
+ dev_err(info->dev, "couldn't get regulators %d\n", ret);
+ goto err_regulator;
+ }
+
+ if (pdata->vddvibl_uV) {
+ ret = regulator_set_voltage(info->supplies[0].consumer,
+ pdata->vddvibl_uV,
+ pdata->vddvibl_uV);
+ if (ret) {
+ dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
+ ret);
+ goto err_voltage;
+ }
+ }
+
+ if (pdata->vddvibr_uV) {
+ ret = regulator_set_voltage(info->supplies[1].consumer,
+ pdata->vddvibr_uV,
+ pdata->vddvibr_uV);
+ if (ret) {
+ dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
+ ret);
+ goto err_voltage;
+ }
+ }
+
+ info->workqueue = alloc_workqueue("twl6040-vibra", 0, 0);
+ if (info->workqueue == NULL) {
+ dev_err(info->dev, "couldn't create workqueue\n");
+ ret = -ENOMEM;
+ goto err_voltage;
+ }
+ INIT_WORK(&info->play_work, vibra_play_work);
+
+ return 0;
+
+err_voltage:
+ regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
+err_regulator:
+ free_irq(info->irq, info);
+err_irq:
+ input_unregister_device(info->input_dev);
+ info->input_dev = NULL;
+err_iff:
+ if (info->input_dev)
+ input_ff_destroy(info->input_dev);
+err_ialloc:
+ input_free_device(info->input_dev);
+err_kzalloc:
+ kfree(info);
+ return ret;
+}
+
+static int __devexit twl6040_vibra_remove(struct platform_device *pdev)
+{
+ struct vibra_info *info = platform_get_drvdata(pdev);
+
+ input_unregister_device(info->input_dev);
+ free_irq(info->irq, info);
+ regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
+ destroy_workqueue(info->workqueue);
+ kfree(info);
+
+ return 0;
+}
+
+static struct platform_driver twl6040_vibra_driver = {
+ .probe = twl6040_vibra_probe,
+ .remove = __devexit_p(twl6040_vibra_remove),
+ .driver = {
+ .name = "twl6040-vibra",
+ .owner = THIS_MODULE,
+ .pm = &twl6040_vibra_pm_ops,
+ },
+};
+
+static int __init twl6040_vibra_init(void)
+{
+ return platform_driver_register(&twl6040_vibra_driver);
+}
+module_init(twl6040_vibra_init);
+
+static void __exit twl6040_vibra_exit(void)
+{
+ platform_driver_unregister(&twl6040_vibra_driver);
+}
+module_exit(twl6040_vibra_exit);
+
+MODULE_ALIAS("platform:twl6040-vibra");
+MODULE_DESCRIPTION("TWL6040 Vibra driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jorge Eduardo Candelaria <jorge.candelaria@ti.com>");
+MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 980af94ba9c..07a8363f3c5 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -210,7 +210,7 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
/*
* Some devices (Synaptics) peform the reset before
* ACKing the reset command, and so it can take a long
- * time before the ACK arrrives.
+ * time before the ACK arrives.
*/
if (ps2_sendbyte(ps2dev, command & 0xff,
command == PS2_CMD_RESET_BAT ? 1000 : 200))
diff --git a/drivers/input/serio/sa1111ps2.c b/drivers/input/serio/sa1111ps2.c
index d55874e5d1c..44fc8b4bcd8 100644
--- a/drivers/input/serio/sa1111ps2.c
+++ b/drivers/input/serio/sa1111ps2.c
@@ -300,8 +300,7 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
out:
sa1111_disable_device(ps2if->dev);
- release_mem_region(dev->res.start,
- dev->res.end - dev->res.start + 1);
+ release_mem_region(dev->res.start, resource_size(&dev->res));
free:
sa1111_set_drvdata(dev, NULL);
kfree(ps2if);
@@ -317,8 +316,7 @@ static int __devexit ps2_remove(struct sa1111_dev *dev)
struct ps2if *ps2if = sa1111_get_drvdata(dev);
serio_unregister_port(ps2if->io);
- release_mem_region(dev->res.start,
- dev->res.end - dev->res.start + 1);
+ release_mem_region(dev->res.start, resource_size(&dev->res));
sa1111_set_drvdata(dev, NULL);
kfree(ps2if);
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 80baa53da5b..d64c5a43aaa 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -23,7 +23,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
-
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
new file mode 100644
index 00000000000..b57b3fa492f
--- /dev/null
+++ b/drivers/iommu/Kconfig
@@ -0,0 +1,110 @@
+# IOMMU_API always gets selected by whoever wants it.
+config IOMMU_API
+ bool
+
+menuconfig IOMMU_SUPPORT
+ bool "IOMMU Hardware Support"
+ default y
+ ---help---
+ Say Y here if you want to compile device drivers for IO Memory
+ Management Units into the kernel. These devices usually allow to
+ remap DMA requests and/or remap interrupts from other devices on the
+ system.
+
+if IOMMU_SUPPORT
+
+# MSM IOMMU support
+config MSM_IOMMU
+ bool "MSM IOMMU Support"
+ depends on ARCH_MSM8X60 || ARCH_MSM8960
+ select IOMMU_API
+ help
+ Support for the IOMMUs found on certain Qualcomm SOCs.
+ These IOMMUs allow virtualization of the address space used by most
+ cores within the multimedia subsystem.
+
+ If unsure, say N here.
+
+config IOMMU_PGTABLES_L2
+ def_bool y
+ depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
+
+# AMD IOMMU support
+config AMD_IOMMU
+ bool "AMD IOMMU support"
+ select SWIOTLB
+ select PCI_MSI
+ select PCI_IOV
+ select IOMMU_API
+ depends on X86_64 && PCI && ACPI
+ ---help---
+ With this option you can enable support for AMD IOMMU hardware in
+ your system. An IOMMU is a hardware component which provides
+ remapping of DMA memory accesses from devices. With an AMD IOMMU you
+ can isolate the the DMA memory of different devices and protect the
+ system from misbehaving device drivers or hardware.
+
+ You can find out if your system has an AMD IOMMU if you look into
+ your BIOS for an option to enable it or if you have an IVRS ACPI
+ table.
+
+config AMD_IOMMU_STATS
+ bool "Export AMD IOMMU statistics to debugfs"
+ depends on AMD_IOMMU
+ select DEBUG_FS
+ ---help---
+ This option enables code in the AMD IOMMU driver to collect various
+ statistics about whats happening in the driver and exports that
+ information to userspace via debugfs.
+ If unsure, say N.
+
+# Intel IOMMU support
+config DMAR
+ bool "Support for DMA Remapping Devices"
+ depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
+ select IOMMU_API
+ help
+ DMA remapping (DMAR) devices support enables independent address
+ translations for Direct Memory Access (DMA) from devices.
+ These DMA remapping devices are reported via ACPI tables
+ and include PCI device scope covered by these DMA
+ remapping devices.
+
+config DMAR_DEFAULT_ON
+ def_bool y
+ prompt "Enable DMA Remapping Devices by default"
+ depends on DMAR
+ help
+ Selecting this option will enable a DMAR device at boot time if
+ one is found. If this option is not selected, DMAR support can
+ be enabled by passing intel_iommu=on to the kernel.
+
+config DMAR_BROKEN_GFX_WA
+ bool "Workaround broken graphics drivers (going away soon)"
+ depends on DMAR && BROKEN && X86
+ ---help---
+ Current Graphics drivers tend to use physical address
+ for DMA and avoid using DMA APIs. Setting this config
+ option permits the IOMMU driver to set a unity map for
+ all the OS-visible memory. Hence the driver can continue
+ to use physical addresses for DMA, at least until this
+ option is removed in the 2.6.32 kernel.
+
+config DMAR_FLOPPY_WA
+ def_bool y
+ depends on DMAR && X86
+ ---help---
+ Floppy disk drivers are known to bypass DMA API calls
+ thereby failing to work when IOMMU is enabled. This
+ workaround will setup a 1:1 mapping for the first
+ 16MiB to make floppy (an ISA device) work.
+
+config INTR_REMAP
+ bool "Support for Interrupt Remapping (EXPERIMENTAL)"
+ depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
+ ---help---
+ Supports Interrupt remapping for IO-APIC and MSI devices.
+ To use x2apic mode in the CPU's which support x2APIC enhancements or
+ to support platforms with CPU's having > 8 bit APIC ID, say Y.
+
+endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
new file mode 100644
index 00000000000..4d4d77df7ca
--- /dev/null
+++ b/drivers/iommu/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
+obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
+obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
new file mode 100644
index 00000000000..a14f8dc2346
--- /dev/null
+++ b/drivers/iommu/amd_iommu.c
@@ -0,0 +1,2824 @@
+/*
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Leo Duran <leo.duran@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ats.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu-helper.h>
+#include <linux/iommu.h>
+#include <linux/delay.h>
+#include <linux/amd-iommu.h>
+#include <asm/msidef.h>
+#include <asm/proto.h>
+#include <asm/iommu.h>
+#include <asm/gart.h>
+#include <asm/dma.h>
+
+#include "amd_iommu_proto.h"
+#include "amd_iommu_types.h"
+
+#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
+
+#define LOOP_TIMEOUT 100000
+
+static DEFINE_RWLOCK(amd_iommu_devtable_lock);
+
+/* A list of preallocated protection domains */
+static LIST_HEAD(iommu_pd_list);
+static DEFINE_SPINLOCK(iommu_pd_list_lock);
+
+/* List of all available dev_data structures */
+static LIST_HEAD(dev_data_list);
+static DEFINE_SPINLOCK(dev_data_list_lock);
+
+/*
+ * Domain for untranslated devices - only allocated
+ * if iommu=pt passed on kernel cmd line.
+ */
+static struct protection_domain *pt_domain;
+
+static struct iommu_ops amd_iommu_ops;
+
+/*
+ * general struct to manage commands send to an IOMMU
+ */
+struct iommu_cmd {
+ u32 data[4];
+};
+
+static void update_domain(struct protection_domain *domain);
+
+/****************************************************************************
+ *
+ * Helper functions
+ *
+ ****************************************************************************/
+
+static struct iommu_dev_data *alloc_dev_data(u16 devid)
+{
+ struct iommu_dev_data *dev_data;
+ unsigned long flags;
+
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return NULL;
+
+ dev_data->devid = devid;
+ atomic_set(&dev_data->bind, 0);
+
+ spin_lock_irqsave(&dev_data_list_lock, flags);
+ list_add_tail(&dev_data->dev_data_list, &dev_data_list);
+ spin_unlock_irqrestore(&dev_data_list_lock, flags);
+
+ return dev_data;
+}
+
+static void free_dev_data(struct iommu_dev_data *dev_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_data_list_lock, flags);
+ list_del(&dev_data->dev_data_list);
+ spin_unlock_irqrestore(&dev_data_list_lock, flags);
+
+ kfree(dev_data);
+}
+
+static struct iommu_dev_data *search_dev_data(u16 devid)
+{
+ struct iommu_dev_data *dev_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_data_list_lock, flags);
+ list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
+ if (dev_data->devid == devid)
+ goto out_unlock;
+ }
+
+ dev_data = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_data_list_lock, flags);
+
+ return dev_data;
+}
+
+static struct iommu_dev_data *find_dev_data(u16 devid)
+{
+ struct iommu_dev_data *dev_data;
+
+ dev_data = search_dev_data(devid);
+
+ if (dev_data == NULL)
+ dev_data = alloc_dev_data(devid);
+
+ return dev_data;
+}
+
+static inline u16 get_device_id(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return calc_devid(pdev->bus->number, pdev->devfn);
+}
+
+static struct iommu_dev_data *get_dev_data(struct device *dev)
+{
+ return dev->archdata.iommu;
+}
+
+/*
+ * In this function the list of preallocated protection domains is traversed to
+ * find the domain for a specific device
+ */
+static struct dma_ops_domain *find_protection_domain(u16 devid)
+{
+ struct dma_ops_domain *entry, *ret = NULL;
+ unsigned long flags;
+ u16 alias = amd_iommu_alias_table[devid];
+
+ if (list_empty(&iommu_pd_list))
+ return NULL;
+
+ spin_lock_irqsave(&iommu_pd_list_lock, flags);
+
+ list_for_each_entry(entry, &iommu_pd_list, list) {
+ if (entry->target_dev == devid ||
+ entry->target_dev == alias) {
+ ret = entry;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+
+ return ret;
+}
+
+/*
+ * This function checks if the driver got a valid device from the caller to
+ * avoid dereferencing invalid pointers.
+ */
+static bool check_device(struct device *dev)
+{
+ u16 devid;
+
+ if (!dev || !dev->dma_mask)
+ return false;
+
+ /* No device or no PCI device */
+ if (dev->bus != &pci_bus_type)
+ return false;
+
+ devid = get_device_id(dev);
+
+ /* Out of our scope? */
+ if (devid > amd_iommu_last_bdf)
+ return false;
+
+ if (amd_iommu_rlookup_table[devid] == NULL)
+ return false;
+
+ return true;
+}
+
+static int iommu_init_device(struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+ u16 alias;
+
+ if (dev->archdata.iommu)
+ return 0;
+
+ dev_data = find_dev_data(get_device_id(dev));
+ if (!dev_data)
+ return -ENOMEM;
+
+ alias = amd_iommu_alias_table[dev_data->devid];
+ if (alias != dev_data->devid) {
+ struct iommu_dev_data *alias_data;
+
+ alias_data = find_dev_data(alias);
+ if (alias_data == NULL) {
+ pr_err("AMD-Vi: Warning: Unhandled device %s\n",
+ dev_name(dev));
+ free_dev_data(dev_data);
+ return -ENOTSUPP;
+ }
+ dev_data->alias_data = alias_data;
+ }
+
+ dev->archdata.iommu = dev_data;
+
+ return 0;
+}
+
+static void iommu_ignore_device(struct device *dev)
+{
+ u16 devid, alias;
+
+ devid = get_device_id(dev);
+ alias = amd_iommu_alias_table[devid];
+
+ memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
+ memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
+
+ amd_iommu_rlookup_table[devid] = NULL;
+ amd_iommu_rlookup_table[alias] = NULL;
+}
+
+static void iommu_uninit_device(struct device *dev)
+{
+ /*
+ * Nothing to do here - we keep dev_data around for unplugged devices
+ * and reuse it when the device is re-plugged - not doing so would
+ * introduce a ton of races.
+ */
+}
+
+void __init amd_iommu_uninit_devices(void)
+{
+ struct iommu_dev_data *dev_data, *n;
+ struct pci_dev *pdev = NULL;
+
+ for_each_pci_dev(pdev) {
+
+ if (!check_device(&pdev->dev))
+ continue;
+
+ iommu_uninit_device(&pdev->dev);
+ }
+
+ /* Free all of our dev_data structures */
+ list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
+ free_dev_data(dev_data);
+}
+
+int __init amd_iommu_init_devices(void)
+{
+ struct pci_dev *pdev = NULL;
+ int ret = 0;
+
+ for_each_pci_dev(pdev) {
+
+ if (!check_device(&pdev->dev))
+ continue;
+
+ ret = iommu_init_device(&pdev->dev);
+ if (ret == -ENOTSUPP)
+ iommu_ignore_device(&pdev->dev);
+ else if (ret)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+
+ amd_iommu_uninit_devices();
+
+ return ret;
+}
+#ifdef CONFIG_AMD_IOMMU_STATS
+
+/*
+ * Initialization code for statistics collection
+ */
+
+DECLARE_STATS_COUNTER(compl_wait);
+DECLARE_STATS_COUNTER(cnt_map_single);
+DECLARE_STATS_COUNTER(cnt_unmap_single);
+DECLARE_STATS_COUNTER(cnt_map_sg);
+DECLARE_STATS_COUNTER(cnt_unmap_sg);
+DECLARE_STATS_COUNTER(cnt_alloc_coherent);
+DECLARE_STATS_COUNTER(cnt_free_coherent);
+DECLARE_STATS_COUNTER(cross_page);
+DECLARE_STATS_COUNTER(domain_flush_single);
+DECLARE_STATS_COUNTER(domain_flush_all);
+DECLARE_STATS_COUNTER(alloced_io_mem);
+DECLARE_STATS_COUNTER(total_map_requests);
+
+static struct dentry *stats_dir;
+static struct dentry *de_fflush;
+
+static void amd_iommu_stats_add(struct __iommu_counter *cnt)
+{
+ if (stats_dir == NULL)
+ return;
+
+ cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
+ &cnt->value);
+}
+
+static void amd_iommu_stats_init(void)
+{
+ stats_dir = debugfs_create_dir("amd-iommu", NULL);
+ if (stats_dir == NULL)
+ return;
+
+ de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
+ (u32 *)&amd_iommu_unmap_flush);
+
+ amd_iommu_stats_add(&compl_wait);
+ amd_iommu_stats_add(&cnt_map_single);
+ amd_iommu_stats_add(&cnt_unmap_single);
+ amd_iommu_stats_add(&cnt_map_sg);
+ amd_iommu_stats_add(&cnt_unmap_sg);
+ amd_iommu_stats_add(&cnt_alloc_coherent);
+ amd_iommu_stats_add(&cnt_free_coherent);
+ amd_iommu_stats_add(&cross_page);
+ amd_iommu_stats_add(&domain_flush_single);
+ amd_iommu_stats_add(&domain_flush_all);
+ amd_iommu_stats_add(&alloced_io_mem);
+ amd_iommu_stats_add(&total_map_requests);
+}
+
+#endif
+
+/****************************************************************************
+ *
+ * Interrupt handling functions
+ *
+ ****************************************************************************/
+
+static void dump_dte_entry(u16 devid)
+{
+ int i;
+
+ for (i = 0; i < 8; ++i)
+ pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
+ amd_iommu_dev_table[devid].data[i]);
+}
+
+static void dump_command(unsigned long phys_addr)
+{
+ struct iommu_cmd *cmd = phys_to_virt(phys_addr);
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
+}
+
+static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
+{
+ u32 *event = __evt;
+ int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+ int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+ int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ u64 address = (u64)(((u64)event[3]) << 32) | event[2];
+
+ printk(KERN_ERR "AMD-Vi: Event logged [");
+
+ switch (type) {
+ case EVENT_TYPE_ILL_DEV:
+ printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
+ "address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address, flags);
+ dump_dte_entry(devid);
+ break;
+ case EVENT_TYPE_IO_FAULT:
+ printk("IO_PAGE_FAULT device=%02x:%02x.%x "
+ "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ domid, address, flags);
+ break;
+ case EVENT_TYPE_DEV_TAB_ERR:
+ printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+ "address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address, flags);
+ break;
+ case EVENT_TYPE_PAGE_TAB_ERR:
+ printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
+ "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ domid, address, flags);
+ break;
+ case EVENT_TYPE_ILL_CMD:
+ printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
+ dump_command(address);
+ break;
+ case EVENT_TYPE_CMD_HARD_ERR:
+ printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
+ "flags=0x%04x]\n", address, flags);
+ break;
+ case EVENT_TYPE_IOTLB_INV_TO:
+ printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
+ "address=0x%016llx]\n",
+ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address);
+ break;
+ case EVENT_TYPE_INV_DEV_REQ:
+ printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
+ "address=0x%016llx flags=0x%04x]\n",
+ PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
+ address, flags);
+ break;
+ default:
+ printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
+ }
+}
+
+static void iommu_poll_events(struct amd_iommu *iommu)
+{
+ u32 head, tail;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+
+ while (head != tail) {
+ iommu_print_event(iommu, iommu->evt_buf + head);
+ head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
+ }
+
+ writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+irqreturn_t amd_iommu_int_thread(int irq, void *data)
+{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu)
+ iommu_poll_events(iommu);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_handler(int irq, void *data)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+/****************************************************************************
+ *
+ * IOMMU command queuing functions
+ *
+ ****************************************************************************/
+
+static int wait_on_sem(volatile u64 *sem)
+{
+ int i = 0;
+
+ while (*sem == 0 && i < LOOP_TIMEOUT) {
+ udelay(1);
+ i += 1;
+ }
+
+ if (i == LOOP_TIMEOUT) {
+ pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void copy_cmd_to_buffer(struct amd_iommu *iommu,
+ struct iommu_cmd *cmd,
+ u32 tail)
+{
+ u8 *target;
+
+ target = iommu->cmd_buf + tail;
+ tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
+
+ /* Copy command to buffer */
+ memcpy(target, cmd, sizeof(*cmd));
+
+ /* Tell the IOMMU about it */
+ writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+}
+
+static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
+{
+ WARN_ON(address & 0x7ULL);
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
+ cmd->data[1] = upper_32_bits(__pa(address));
+ cmd->data[2] = 1;
+ CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
+}
+
+static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->data[0] = devid;
+ CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
+}
+
+static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
+ size_t size, u16 domid, int pde)
+{
+ u64 pages;
+ int s;
+
+ pages = iommu_num_pages(address, size, PAGE_SIZE);
+ s = 0;
+
+ if (pages > 1) {
+ /*
+ * If we have to flush more than one page, flush all
+ * TLB entries for this domain
+ */
+ address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+ s = 1;
+ }
+
+ address &= PAGE_MASK;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->data[1] |= domid;
+ cmd->data[2] = lower_32_bits(address);
+ cmd->data[3] = upper_32_bits(address);
+ CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
+ if (s) /* size bit - we flush more than one 4kb page */
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+ if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+}
+
+static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
+ u64 address, size_t size)
+{
+ u64 pages;
+ int s;
+
+ pages = iommu_num_pages(address, size, PAGE_SIZE);
+ s = 0;
+
+ if (pages > 1) {
+ /*
+ * If we have to flush more than one page, flush all
+ * TLB entries for this domain
+ */
+ address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+ s = 1;
+ }
+
+ address &= PAGE_MASK;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->data[0] = devid;
+ cmd->data[0] |= (qdep & 0xff) << 24;
+ cmd->data[1] = devid;
+ cmd->data[2] = lower_32_bits(address);
+ cmd->data[3] = upper_32_bits(address);
+ CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
+ if (s)
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+}
+
+static void build_inv_all(struct iommu_cmd *cmd)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ CMD_SET_TYPE(cmd, CMD_INV_ALL);
+}
+
+/*
+ * Writes the command to the IOMMUs command buffer and informs the
+ * hardware about the new command.
+ */
+static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+{
+ u32 left, tail, head, next_tail;
+ unsigned long flags;
+
+ WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
+
+again:
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+ next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
+ left = (head - next_tail) % iommu->cmd_buf_size;
+
+ if (left <= 2) {
+ struct iommu_cmd sync_cmd;
+ volatile u64 sem = 0;
+ int ret;
+
+ build_completion_wait(&sync_cmd, (u64)&sem);
+ copy_cmd_to_buffer(iommu, &sync_cmd, tail);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ if ((ret = wait_on_sem(&sem)) != 0)
+ return ret;
+
+ goto again;
+ }
+
+ copy_cmd_to_buffer(iommu, cmd, tail);
+
+ /* We need to sync now to make sure all commands are processed */
+ iommu->need_sync = true;
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return 0;
+}
+
+/*
+ * This function queues a completion wait command into the command
+ * buffer of an IOMMU
+ */
+static int iommu_completion_wait(struct amd_iommu *iommu)
+{
+ struct iommu_cmd cmd;
+ volatile u64 sem = 0;
+ int ret;
+
+ if (!iommu->need_sync)
+ return 0;
+
+ build_completion_wait(&cmd, (u64)&sem);
+
+ ret = iommu_queue_command(iommu, &cmd);
+ if (ret)
+ return ret;
+
+ return wait_on_sem(&sem);
+}
+
+static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
+{
+ struct iommu_cmd cmd;
+
+ build_inv_dte(&cmd, devid);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+
+static void iommu_flush_dte_all(struct amd_iommu *iommu)
+{
+ u32 devid;
+
+ for (devid = 0; devid <= 0xffff; ++devid)
+ iommu_flush_dte(iommu, devid);
+
+ iommu_completion_wait(iommu);
+}
+
+/*
+ * This function uses heavy locking and may disable irqs for some time. But
+ * this is no issue because it is only called during resume.
+ */
+static void iommu_flush_tlb_all(struct amd_iommu *iommu)
+{
+ u32 dom_id;
+
+ for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
+ struct iommu_cmd cmd;
+ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+ dom_id, 1);
+ iommu_queue_command(iommu, &cmd);
+ }
+
+ iommu_completion_wait(iommu);
+}
+
+static void iommu_flush_all(struct amd_iommu *iommu)
+{
+ struct iommu_cmd cmd;
+
+ build_inv_all(&cmd);
+
+ iommu_queue_command(iommu, &cmd);
+ iommu_completion_wait(iommu);
+}
+
+void iommu_flush_all_caches(struct amd_iommu *iommu)
+{
+ if (iommu_feature(iommu, FEATURE_IA)) {
+ iommu_flush_all(iommu);
+ } else {
+ iommu_flush_dte_all(iommu);
+ iommu_flush_tlb_all(iommu);
+ }
+}
+
+/*
+ * Command send function for flushing on-device TLB
+ */
+static int device_flush_iotlb(struct iommu_dev_data *dev_data,
+ u64 address, size_t size)
+{
+ struct amd_iommu *iommu;
+ struct iommu_cmd cmd;
+ int qdep;
+
+ qdep = dev_data->ats.qdep;
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+ build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+
+/*
+ * Command send function for invalidating a device table entry
+ */
+static int device_flush_dte(struct iommu_dev_data *dev_data)
+{
+ struct amd_iommu *iommu;
+ int ret;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+ ret = iommu_flush_dte(iommu, dev_data->devid);
+ if (ret)
+ return ret;
+
+ if (dev_data->ats.enabled)
+ ret = device_flush_iotlb(dev_data, 0, ~0UL);
+
+ return ret;
+}
+
+/*
+ * TLB invalidation function which is called from the mapping functions.
+ * It invalidates a single PTE if the range to flush is within a single
+ * page. Otherwise it flushes the whole TLB of the IOMMU.
+ */
+static void __domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size, int pde)
+{
+ struct iommu_dev_data *dev_data;
+ struct iommu_cmd cmd;
+ int ret = 0, i;
+
+ build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
+
+ for (i = 0; i < amd_iommus_present; ++i) {
+ if (!domain->dev_iommu[i])
+ continue;
+
+ /*
+ * Devices of this domain are behind this IOMMU
+ * We need a TLB flush
+ */
+ ret |= iommu_queue_command(amd_iommus[i], &cmd);
+ }
+
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+
+ if (!dev_data->ats.enabled)
+ continue;
+
+ ret |= device_flush_iotlb(dev_data, address, size);
+ }
+
+ WARN_ON(ret);
+}
+
+static void domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size)
+{
+ __domain_flush_pages(domain, address, size, 0);
+}
+
+/* Flush the whole IO/TLB for a given protection domain */
+static void domain_flush_tlb(struct protection_domain *domain)
+{
+ __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
+}
+
+/* Flush the whole IO/TLB for a given protection domain - including PDE */
+static void domain_flush_tlb_pde(struct protection_domain *domain)
+{
+ __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
+}
+
+static void domain_flush_complete(struct protection_domain *domain)
+{
+ int i;
+
+ for (i = 0; i < amd_iommus_present; ++i) {
+ if (!domain->dev_iommu[i])
+ continue;
+
+ /*
+ * Devices of this domain are behind this IOMMU
+ * We need to wait for completion of all commands.
+ */
+ iommu_completion_wait(amd_iommus[i]);
+ }
+}
+
+
+/*
+ * This function flushes the DTEs for all devices in domain
+ */
+static void domain_flush_devices(struct protection_domain *domain)
+{
+ struct iommu_dev_data *dev_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ list_for_each_entry(dev_data, &domain->dev_list, list)
+ device_flush_dte(dev_data);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+/****************************************************************************
+ *
+ * The functions below are used the create the page table mappings for
+ * unity mapped regions.
+ *
+ ****************************************************************************/
+
+/*
+ * This function is used to add another level to an IO page table. Adding
+ * another level increases the size of the address space by 9 bits to a size up
+ * to 64 bits.
+ */
+static bool increase_address_space(struct protection_domain *domain,
+ gfp_t gfp)
+{
+ u64 *pte;
+
+ if (domain->mode == PAGE_MODE_6_LEVEL)
+ /* address space already 64 bit large */
+ return false;
+
+ pte = (void *)get_zeroed_page(gfp);
+ if (!pte)
+ return false;
+
+ *pte = PM_LEVEL_PDE(domain->mode,
+ virt_to_phys(domain->pt_root));
+ domain->pt_root = pte;
+ domain->mode += 1;
+ domain->updated = true;
+
+ return true;
+}
+
+static u64 *alloc_pte(struct protection_domain *domain,
+ unsigned long address,
+ unsigned long page_size,
+ u64 **pte_page,
+ gfp_t gfp)
+{
+ int level, end_lvl;
+ u64 *pte, *page;
+
+ BUG_ON(!is_power_of_2(page_size));
+
+ while (address > PM_LEVEL_SIZE(domain->mode))
+ increase_address_space(domain, gfp);
+
+ level = domain->mode - 1;
+ pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+ address = PAGE_SIZE_ALIGN(address, page_size);
+ end_lvl = PAGE_SIZE_LEVEL(page_size);
+
+ while (level > end_lvl) {
+ if (!IOMMU_PTE_PRESENT(*pte)) {
+ page = (u64 *)get_zeroed_page(gfp);
+ if (!page)
+ return NULL;
+ *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
+ }
+
+ /* No level skipping support yet */
+ if (PM_PTE_LEVEL(*pte) != level)
+ return NULL;
+
+ level -= 1;
+
+ pte = IOMMU_PTE_PAGE(*pte);
+
+ if (pte_page && level == end_lvl)
+ *pte_page = pte;
+
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ }
+
+ return pte;
+}
+
+/*
+ * This function checks if there is a PTE for a given dma address. If
+ * there is one, it returns the pointer to it.
+ */
+static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
+{
+ int level;
+ u64 *pte;
+
+ if (address > PM_LEVEL_SIZE(domain->mode))
+ return NULL;
+
+ level = domain->mode - 1;
+ pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+
+ while (level > 0) {
+
+ /* Not Present */
+ if (!IOMMU_PTE_PRESENT(*pte))
+ return NULL;
+
+ /* Large PTE */
+ if (PM_PTE_LEVEL(*pte) == 0x07) {
+ unsigned long pte_mask, __pte;
+
+ /*
+ * If we have a series of large PTEs, make
+ * sure to return a pointer to the first one.
+ */
+ pte_mask = PTE_PAGE_SIZE(*pte);
+ pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
+ __pte = ((unsigned long)pte) & pte_mask;
+
+ return (u64 *)__pte;
+ }
+
+ /* No level skipping support yet */
+ if (PM_PTE_LEVEL(*pte) != level)
+ return NULL;
+
+ level -= 1;
+
+ /* Walk to the next level */
+ pte = IOMMU_PTE_PAGE(*pte);
+ pte = &pte[PM_LEVEL_INDEX(level, address)];
+ }
+
+ return pte;
+}
+
+/*
+ * Generic mapping functions. It maps a physical address into a DMA
+ * address space. It allocates the page table pages if necessary.
+ * In the future it can be extended to a generic mapping function
+ * supporting all features of AMD IOMMU page tables like level skipping
+ * and full 64 bit address spaces.
+ */
+static int iommu_map_page(struct protection_domain *dom,
+ unsigned long bus_addr,
+ unsigned long phys_addr,
+ int prot,
+ unsigned long page_size)
+{
+ u64 __pte, *pte;
+ int i, count;
+
+ if (!(prot & IOMMU_PROT_MASK))
+ return -EINVAL;
+
+ bus_addr = PAGE_ALIGN(bus_addr);
+ phys_addr = PAGE_ALIGN(phys_addr);
+ count = PAGE_SIZE_PTE_COUNT(page_size);
+ pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
+
+ for (i = 0; i < count; ++i)
+ if (IOMMU_PTE_PRESENT(pte[i]))
+ return -EBUSY;
+
+ if (page_size > PAGE_SIZE) {
+ __pte = PAGE_SIZE_PTE(phys_addr, page_size);
+ __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
+ } else
+ __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
+
+ if (prot & IOMMU_PROT_IR)
+ __pte |= IOMMU_PTE_IR;
+ if (prot & IOMMU_PROT_IW)
+ __pte |= IOMMU_PTE_IW;
+
+ for (i = 0; i < count; ++i)
+ pte[i] = __pte;
+
+ update_domain(dom);
+
+ return 0;
+}
+
+static unsigned long iommu_unmap_page(struct protection_domain *dom,
+ unsigned long bus_addr,
+ unsigned long page_size)
+{
+ unsigned long long unmap_size, unmapped;
+ u64 *pte;
+
+ BUG_ON(!is_power_of_2(page_size));
+
+ unmapped = 0;
+
+ while (unmapped < page_size) {
+
+ pte = fetch_pte(dom, bus_addr);
+
+ if (!pte) {
+ /*
+ * No PTE for this address
+ * move forward in 4kb steps
+ */
+ unmap_size = PAGE_SIZE;
+ } else if (PM_PTE_LEVEL(*pte) == 0) {
+ /* 4kb PTE found for this address */
+ unmap_size = PAGE_SIZE;
+ *pte = 0ULL;
+ } else {
+ int count, i;
+
+ /* Large PTE found which maps this address */
+ unmap_size = PTE_PAGE_SIZE(*pte);
+ count = PAGE_SIZE_PTE_COUNT(unmap_size);
+ for (i = 0; i < count; i++)
+ pte[i] = 0ULL;
+ }
+
+ bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
+ unmapped += unmap_size;
+ }
+
+ BUG_ON(!is_power_of_2(unmapped));
+
+ return unmapped;
+}
+
+/*
+ * This function checks if a specific unity mapping entry is needed for
+ * this specific IOMMU.
+ */
+static int iommu_for_unity_map(struct amd_iommu *iommu,
+ struct unity_map_entry *entry)
+{
+ u16 bdf, i;
+
+ for (i = entry->devid_start; i <= entry->devid_end; ++i) {
+ bdf = amd_iommu_alias_table[i];
+ if (amd_iommu_rlookup_table[bdf] == iommu)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * This function actually applies the mapping to the page table of the
+ * dma_ops domain.
+ */
+static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
+ struct unity_map_entry *e)
+{
+ u64 addr;
+ int ret;
+
+ for (addr = e->address_start; addr < e->address_end;
+ addr += PAGE_SIZE) {
+ ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
+ PAGE_SIZE);
+ if (ret)
+ return ret;
+ /*
+ * if unity mapping is in aperture range mark the page
+ * as allocated in the aperture
+ */
+ if (addr < dma_dom->aperture_size)
+ __set_bit(addr >> PAGE_SHIFT,
+ dma_dom->aperture[0]->bitmap);
+ }
+
+ return 0;
+}
+
+/*
+ * Init the unity mappings for a specific IOMMU in the system
+ *
+ * Basically iterates over all unity mapping entries and applies them to
+ * the default domain DMA of that IOMMU if necessary.
+ */
+static int iommu_init_unity_mappings(struct amd_iommu *iommu)
+{
+ struct unity_map_entry *entry;
+ int ret;
+
+ list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+ if (!iommu_for_unity_map(iommu, entry))
+ continue;
+ ret = dma_ops_unity_map(iommu->default_dom, entry);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Inits the unity mappings required for a specific device
+ */
+static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
+ u16 devid)
+{
+ struct unity_map_entry *e;
+ int ret;
+
+ list_for_each_entry(e, &amd_iommu_unity_map, list) {
+ if (!(devid >= e->devid_start && devid <= e->devid_end))
+ continue;
+ ret = dma_ops_unity_map(dma_dom, e);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/****************************************************************************
+ *
+ * The next functions belong to the address allocator for the dma_ops
+ * interface functions. They work like the allocators in the other IOMMU
+ * drivers. Its basically a bitmap which marks the allocated pages in
+ * the aperture. Maybe it could be enhanced in the future to a more
+ * efficient allocator.
+ *
+ ****************************************************************************/
+
+/*
+ * The address allocator core functions.
+ *
+ * called with domain->lock held
+ */
+
+/*
+ * Used to reserve address ranges in the aperture (e.g. for exclusion
+ * ranges.
+ */
+static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
+ unsigned long start_page,
+ unsigned int pages)
+{
+ unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
+
+ if (start_page + pages > last_page)
+ pages = last_page - start_page;
+
+ for (i = start_page; i < start_page + pages; ++i) {
+ int index = i / APERTURE_RANGE_PAGES;
+ int page = i % APERTURE_RANGE_PAGES;
+ __set_bit(page, dom->aperture[index]->bitmap);
+ }
+}
+
+/*
+ * This function is used to add a new aperture range to an existing
+ * aperture in case of dma_ops domain allocation or address allocation
+ * failure.
+ */
+static int alloc_new_range(struct dma_ops_domain *dma_dom,
+ bool populate, gfp_t gfp)
+{
+ int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
+ struct amd_iommu *iommu;
+ unsigned long i, old_size;
+
+#ifdef CONFIG_IOMMU_STRESS
+ populate = false;
+#endif
+
+ if (index >= APERTURE_MAX_RANGES)
+ return -ENOMEM;
+
+ dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
+ if (!dma_dom->aperture[index])
+ return -ENOMEM;
+
+ dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
+ if (!dma_dom->aperture[index]->bitmap)
+ goto out_free;
+
+ dma_dom->aperture[index]->offset = dma_dom->aperture_size;
+
+ if (populate) {
+ unsigned long address = dma_dom->aperture_size;
+ int i, num_ptes = APERTURE_RANGE_PAGES / 512;
+ u64 *pte, *pte_page;
+
+ for (i = 0; i < num_ptes; ++i) {
+ pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
+ &pte_page, gfp);
+ if (!pte)
+ goto out_free;
+
+ dma_dom->aperture[index]->pte_pages[i] = pte_page;
+
+ address += APERTURE_RANGE_SIZE / 64;
+ }
+ }
+
+ old_size = dma_dom->aperture_size;
+ dma_dom->aperture_size += APERTURE_RANGE_SIZE;
+
+ /* Reserve address range used for MSI messages */
+ if (old_size < MSI_ADDR_BASE_LO &&
+ dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
+ unsigned long spage;
+ int pages;
+
+ pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
+ spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
+
+ dma_ops_reserve_addresses(dma_dom, spage, pages);
+ }
+
+ /* Initialize the exclusion range if necessary */
+ for_each_iommu(iommu) {
+ if (iommu->exclusion_start &&
+ iommu->exclusion_start >= dma_dom->aperture[index]->offset
+ && iommu->exclusion_start < dma_dom->aperture_size) {
+ unsigned long startpage;
+ int pages = iommu_num_pages(iommu->exclusion_start,
+ iommu->exclusion_length,
+ PAGE_SIZE);
+ startpage = iommu->exclusion_start >> PAGE_SHIFT;
+ dma_ops_reserve_addresses(dma_dom, startpage, pages);
+ }
+ }
+
+ /*
+ * Check for areas already mapped as present in the new aperture
+ * range and mark those pages as reserved in the allocator. Such
+ * mappings may already exist as a result of requested unity
+ * mappings for devices.
+ */
+ for (i = dma_dom->aperture[index]->offset;
+ i < dma_dom->aperture_size;
+ i += PAGE_SIZE) {
+ u64 *pte = fetch_pte(&dma_dom->domain, i);
+ if (!pte || !IOMMU_PTE_PRESENT(*pte))
+ continue;
+
+ dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
+ }
+
+ update_domain(&dma_dom->domain);
+
+ return 0;
+
+out_free:
+ update_domain(&dma_dom->domain);
+
+ free_page((unsigned long)dma_dom->aperture[index]->bitmap);
+
+ kfree(dma_dom->aperture[index]);
+ dma_dom->aperture[index] = NULL;
+
+ return -ENOMEM;
+}
+
+static unsigned long dma_ops_area_alloc(struct device *dev,
+ struct dma_ops_domain *dom,
+ unsigned int pages,
+ unsigned long align_mask,
+ u64 dma_mask,
+ unsigned long start)
+{
+ unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
+ int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
+ int i = start >> APERTURE_RANGE_SHIFT;
+ unsigned long boundary_size;
+ unsigned long address = -1;
+ unsigned long limit;
+
+ next_bit >>= PAGE_SHIFT;
+
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ PAGE_SIZE) >> PAGE_SHIFT;
+
+ for (;i < max_index; ++i) {
+ unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
+
+ if (dom->aperture[i]->offset >= dma_mask)
+ break;
+
+ limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
+ dma_mask >> PAGE_SHIFT);
+
+ address = iommu_area_alloc(dom->aperture[i]->bitmap,
+ limit, next_bit, pages, 0,
+ boundary_size, align_mask);
+ if (address != -1) {
+ address = dom->aperture[i]->offset +
+ (address << PAGE_SHIFT);
+ dom->next_address = address + (pages << PAGE_SHIFT);
+ break;
+ }
+
+ next_bit = 0;
+ }
+
+ return address;
+}
+
+static unsigned long dma_ops_alloc_addresses(struct device *dev,
+ struct dma_ops_domain *dom,
+ unsigned int pages,
+ unsigned long align_mask,
+ u64 dma_mask)
+{
+ unsigned long address;
+
+#ifdef CONFIG_IOMMU_STRESS
+ dom->next_address = 0;
+ dom->need_flush = true;
+#endif
+
+ address = dma_ops_area_alloc(dev, dom, pages, align_mask,
+ dma_mask, dom->next_address);
+
+ if (address == -1) {
+ dom->next_address = 0;
+ address = dma_ops_area_alloc(dev, dom, pages, align_mask,
+ dma_mask, 0);
+ dom->need_flush = true;
+ }
+
+ if (unlikely(address == -1))
+ address = DMA_ERROR_CODE;
+
+ WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
+
+ return address;
+}
+
+/*
+ * The address free function.
+ *
+ * called with domain->lock held
+ */
+static void dma_ops_free_addresses(struct dma_ops_domain *dom,
+ unsigned long address,
+ unsigned int pages)
+{
+ unsigned i = address >> APERTURE_RANGE_SHIFT;
+ struct aperture_range *range = dom->aperture[i];
+
+ BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
+
+#ifdef CONFIG_IOMMU_STRESS
+ if (i < 4)
+ return;
+#endif
+
+ if (address >= dom->next_address)
+ dom->need_flush = true;
+
+ address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
+
+ bitmap_clear(range->bitmap, address, pages);
+
+}
+
+/****************************************************************************
+ *
+ * The next functions belong to the domain allocation. A domain is
+ * allocated for every IOMMU as the default domain. If device isolation
+ * is enabled, every device get its own domain. The most important thing
+ * about domains is the page table mapping the DMA address space they
+ * contain.
+ *
+ ****************************************************************************/
+
+/*
+ * This function adds a protection domain to the global protection domain list
+ */
+static void add_domain_to_list(struct protection_domain *domain)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&amd_iommu_pd_lock, flags);
+ list_add(&domain->list, &amd_iommu_pd_list);
+ spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
+}
+
+/*
+ * This function removes a protection domain to the global
+ * protection domain list
+ */
+static void del_domain_from_list(struct protection_domain *domain)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&amd_iommu_pd_lock, flags);
+ list_del(&domain->list);
+ spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
+}
+
+static u16 domain_id_alloc(void)
+{
+ unsigned long flags;
+ int id;
+
+ write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
+ BUG_ON(id == 0);
+ if (id > 0 && id < MAX_DOMAIN_ID)
+ __set_bit(id, amd_iommu_pd_alloc_bitmap);
+ else
+ id = 0;
+ write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+
+ return id;
+}
+
+static void domain_id_free(int id)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ if (id > 0 && id < MAX_DOMAIN_ID)
+ __clear_bit(id, amd_iommu_pd_alloc_bitmap);
+ write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+}
+
+static void free_pagetable(struct protection_domain *domain)
+{
+ int i, j;
+ u64 *p1, *p2, *p3;
+
+ p1 = domain->pt_root;
+
+ if (!p1)
+ return;
+
+ for (i = 0; i < 512; ++i) {
+ if (!IOMMU_PTE_PRESENT(p1[i]))
+ continue;
+
+ p2 = IOMMU_PTE_PAGE(p1[i]);
+ for (j = 0; j < 512; ++j) {
+ if (!IOMMU_PTE_PRESENT(p2[j]))
+ continue;
+ p3 = IOMMU_PTE_PAGE(p2[j]);
+ free_page((unsigned long)p3);
+ }
+
+ free_page((unsigned long)p2);
+ }
+
+ free_page((unsigned long)p1);
+
+ domain->pt_root = NULL;
+}
+
+/*
+ * Free a domain, only used if something went wrong in the
+ * allocation path and we need to free an already allocated page table
+ */
+static void dma_ops_domain_free(struct dma_ops_domain *dom)
+{
+ int i;
+
+ if (!dom)
+ return;
+
+ del_domain_from_list(&dom->domain);
+
+ free_pagetable(&dom->domain);
+
+ for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
+ if (!dom->aperture[i])
+ continue;
+ free_page((unsigned long)dom->aperture[i]->bitmap);
+ kfree(dom->aperture[i]);
+ }
+
+ kfree(dom);
+}
+
+/*
+ * Allocates a new protection domain usable for the dma_ops functions.
+ * It also initializes the page table and the address allocator data
+ * structures required for the dma_ops interface
+ */
+static struct dma_ops_domain *dma_ops_domain_alloc(void)
+{
+ struct dma_ops_domain *dma_dom;
+
+ dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
+ if (!dma_dom)
+ return NULL;
+
+ spin_lock_init(&dma_dom->domain.lock);
+
+ dma_dom->domain.id = domain_id_alloc();
+ if (dma_dom->domain.id == 0)
+ goto free_dma_dom;
+ INIT_LIST_HEAD(&dma_dom->domain.dev_list);
+ dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
+ dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ dma_dom->domain.flags = PD_DMA_OPS_MASK;
+ dma_dom->domain.priv = dma_dom;
+ if (!dma_dom->domain.pt_root)
+ goto free_dma_dom;
+
+ dma_dom->need_flush = false;
+ dma_dom->target_dev = 0xffff;
+
+ add_domain_to_list(&dma_dom->domain);
+
+ if (alloc_new_range(dma_dom, true, GFP_KERNEL))
+ goto free_dma_dom;
+
+ /*
+ * mark the first page as allocated so we never return 0 as
+ * a valid dma-address. So we can use 0 as error value
+ */
+ dma_dom->aperture[0]->bitmap[0] = 1;
+ dma_dom->next_address = 0;
+
+
+ return dma_dom;
+
+free_dma_dom:
+ dma_ops_domain_free(dma_dom);
+
+ return NULL;
+}
+
+/*
+ * little helper function to check whether a given protection domain is a
+ * dma_ops domain
+ */
+static bool dma_ops_domain(struct protection_domain *domain)
+{
+ return domain->flags & PD_DMA_OPS_MASK;
+}
+
+static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
+{
+ u64 pte_root = virt_to_phys(domain->pt_root);
+ u32 flags = 0;
+
+ pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
+ << DEV_ENTRY_MODE_SHIFT;
+ pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
+
+ if (ats)
+ flags |= DTE_FLAG_IOTLB;
+
+ amd_iommu_dev_table[devid].data[3] |= flags;
+ amd_iommu_dev_table[devid].data[2] = domain->id;
+ amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
+ amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
+}
+
+static void clear_dte_entry(u16 devid)
+{
+ /* remove entry from the device table seen by the hardware */
+ amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
+ amd_iommu_dev_table[devid].data[1] = 0;
+ amd_iommu_dev_table[devid].data[2] = 0;
+
+ amd_iommu_apply_erratum_63(devid);
+}
+
+static void do_attach(struct iommu_dev_data *dev_data,
+ struct protection_domain *domain)
+{
+ struct amd_iommu *iommu;
+ bool ats;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+ ats = dev_data->ats.enabled;
+
+ /* Update data structures */
+ dev_data->domain = domain;
+ list_add(&dev_data->list, &domain->dev_list);
+ set_dte_entry(dev_data->devid, domain, ats);
+
+ /* Do reference counting */
+ domain->dev_iommu[iommu->index] += 1;
+ domain->dev_cnt += 1;
+
+ /* Flush the DTE entry */
+ device_flush_dte(dev_data);
+}
+
+static void do_detach(struct iommu_dev_data *dev_data)
+{
+ struct amd_iommu *iommu;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+ /* decrease reference counters */
+ dev_data->domain->dev_iommu[iommu->index] -= 1;
+ dev_data->domain->dev_cnt -= 1;
+
+ /* Update data structures */
+ dev_data->domain = NULL;
+ list_del(&dev_data->list);
+ clear_dte_entry(dev_data->devid);
+
+ /* Flush the DTE entry */
+ device_flush_dte(dev_data);
+}
+
+/*
+ * If a device is not yet associated with a domain, this function does
+ * assigns it visible for the hardware
+ */
+static int __attach_device(struct iommu_dev_data *dev_data,
+ struct protection_domain *domain)
+{
+ int ret;
+
+ /* lock domain */
+ spin_lock(&domain->lock);
+
+ if (dev_data->alias_data != NULL) {
+ struct iommu_dev_data *alias_data = dev_data->alias_data;
+
+ /* Some sanity checks */
+ ret = -EBUSY;
+ if (alias_data->domain != NULL &&
+ alias_data->domain != domain)
+ goto out_unlock;
+
+ if (dev_data->domain != NULL &&
+ dev_data->domain != domain)
+ goto out_unlock;
+
+ /* Do real assignment */
+ if (alias_data->domain == NULL)
+ do_attach(alias_data, domain);
+
+ atomic_inc(&alias_data->bind);
+ }
+
+ if (dev_data->domain == NULL)
+ do_attach(dev_data, domain);
+
+ atomic_inc(&dev_data->bind);
+
+ ret = 0;
+
+out_unlock:
+
+ /* ready */
+ spin_unlock(&domain->lock);
+
+ return ret;
+}
+
+/*
+ * If a device is not yet associated with a domain, this function does
+ * assigns it visible for the hardware
+ */
+static int attach_device(struct device *dev,
+ struct protection_domain *domain)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct iommu_dev_data *dev_data;
+ unsigned long flags;
+ int ret;
+
+ dev_data = get_dev_data(dev);
+
+ if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
+ dev_data->ats.enabled = true;
+ dev_data->ats.qdep = pci_ats_queue_depth(pdev);
+ }
+
+ write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ ret = __attach_device(dev_data, domain);
+ write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+
+ /*
+ * We might boot into a crash-kernel here. The crashed kernel
+ * left the caches in the IOMMU dirty. So we have to flush
+ * here to evict all dirty stuff.
+ */
+ domain_flush_tlb_pde(domain);
+
+ return ret;
+}
+
+/*
+ * Removes a device from a protection domain (unlocked)
+ */
+static void __detach_device(struct iommu_dev_data *dev_data)
+{
+ struct protection_domain *domain;
+ unsigned long flags;
+
+ BUG_ON(!dev_data->domain);
+
+ domain = dev_data->domain;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ if (dev_data->alias_data != NULL) {
+ struct iommu_dev_data *alias_data = dev_data->alias_data;
+
+ if (atomic_dec_and_test(&alias_data->bind))
+ do_detach(alias_data);
+ }
+
+ if (atomic_dec_and_test(&dev_data->bind))
+ do_detach(dev_data);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ /*
+ * If we run in passthrough mode the device must be assigned to the
+ * passthrough domain if it is detached from any other domain.
+ * Make sure we can deassign from the pt_domain itself.
+ */
+ if (iommu_pass_through &&
+ (dev_data->domain == NULL && domain != pt_domain))
+ __attach_device(dev_data, pt_domain);
+}
+
+/*
+ * Removes a device from a protection domain (with devtable_lock held)
+ */
+static void detach_device(struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+ unsigned long flags;
+
+ dev_data = get_dev_data(dev);
+
+ /* lock device table */
+ write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ __detach_device(dev_data);
+ write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+
+ if (dev_data->ats.enabled) {
+ pci_disable_ats(to_pci_dev(dev));
+ dev_data->ats.enabled = false;
+ }
+}
+
+/*
+ * Find out the protection domain structure for a given PCI device. This
+ * will give us the pointer to the page table root for example.
+ */
+static struct protection_domain *domain_for_device(struct device *dev)
+{
+ struct iommu_dev_data *dev_data;
+ struct protection_domain *dom = NULL;
+ unsigned long flags;
+
+ dev_data = get_dev_data(dev);
+
+ if (dev_data->domain)
+ return dev_data->domain;
+
+ if (dev_data->alias_data != NULL) {
+ struct iommu_dev_data *alias_data = dev_data->alias_data;
+
+ read_lock_irqsave(&amd_iommu_devtable_lock, flags);
+ if (alias_data->domain != NULL) {
+ __attach_device(dev_data, alias_data->domain);
+ dom = alias_data->domain;
+ }
+ read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ }
+
+ return dom;
+}
+
+static int device_change_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ u16 devid;
+ struct protection_domain *domain;
+ struct dma_ops_domain *dma_domain;
+ struct amd_iommu *iommu;
+ unsigned long flags;
+
+ if (!check_device(dev))
+ return 0;
+
+ devid = get_device_id(dev);
+ iommu = amd_iommu_rlookup_table[devid];
+
+ switch (action) {
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+
+ domain = domain_for_device(dev);
+
+ if (!domain)
+ goto out;
+ if (iommu_pass_through)
+ break;
+ detach_device(dev);
+ break;
+ case BUS_NOTIFY_ADD_DEVICE:
+
+ iommu_init_device(dev);
+
+ domain = domain_for_device(dev);
+
+ /* allocate a protection domain if a device is added */
+ dma_domain = find_protection_domain(devid);
+ if (dma_domain)
+ goto out;
+ dma_domain = dma_ops_domain_alloc();
+ if (!dma_domain)
+ goto out;
+ dma_domain->target_dev = devid;
+
+ spin_lock_irqsave(&iommu_pd_list_lock, flags);
+ list_add_tail(&dma_domain->list, &iommu_pd_list);
+ spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+
+ iommu_uninit_device(dev);
+
+ default:
+ goto out;
+ }
+
+ iommu_completion_wait(iommu);
+
+out:
+ return 0;
+}
+
+static struct notifier_block device_nb = {
+ .notifier_call = device_change_notifier,
+};
+
+void amd_iommu_init_notifier(void)
+{
+ bus_register_notifier(&pci_bus_type, &device_nb);
+}
+
+/*****************************************************************************
+ *
+ * The next functions belong to the dma_ops mapping/unmapping code.
+ *
+ *****************************************************************************/
+
+/*
+ * In the dma_ops path we only have the struct device. This function
+ * finds the corresponding IOMMU, the protection domain and the
+ * requestor id for a given device.
+ * If the device is not yet associated with a domain this is also done
+ * in this function.
+ */
+static struct protection_domain *get_domain(struct device *dev)
+{
+ struct protection_domain *domain;
+ struct dma_ops_domain *dma_dom;
+ u16 devid = get_device_id(dev);
+
+ if (!check_device(dev))
+ return ERR_PTR(-EINVAL);
+
+ domain = domain_for_device(dev);
+ if (domain != NULL && !dma_ops_domain(domain))
+ return ERR_PTR(-EBUSY);
+
+ if (domain != NULL)
+ return domain;
+
+ /* Device not bount yet - bind it */
+ dma_dom = find_protection_domain(devid);
+ if (!dma_dom)
+ dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
+ attach_device(dev, &dma_dom->domain);
+ DUMP_printk("Using protection domain %d for device %s\n",
+ dma_dom->domain.id, dev_name(dev));
+
+ return &dma_dom->domain;
+}
+
+static void update_device_table(struct protection_domain *domain)
+{
+ struct iommu_dev_data *dev_data;
+
+ list_for_each_entry(dev_data, &domain->dev_list, list)
+ set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
+}
+
+static void update_domain(struct protection_domain *domain)
+{
+ if (!domain->updated)
+ return;
+
+ update_device_table(domain);
+
+ domain_flush_devices(domain);
+ domain_flush_tlb_pde(domain);
+
+ domain->updated = false;
+}
+
+/*
+ * This function fetches the PTE for a given address in the aperture
+ */
+static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
+ unsigned long address)
+{
+ struct aperture_range *aperture;
+ u64 *pte, *pte_page;
+
+ aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
+ if (!aperture)
+ return NULL;
+
+ pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
+ if (!pte) {
+ pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
+ GFP_ATOMIC);
+ aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
+ } else
+ pte += PM_LEVEL_INDEX(0, address);
+
+ update_domain(&dom->domain);
+
+ return pte;
+}
+
+/*
+ * This is the generic map function. It maps one 4kb page at paddr to
+ * the given address in the DMA address space for the domain.
+ */
+static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
+ unsigned long address,
+ phys_addr_t paddr,
+ int direction)
+{
+ u64 *pte, __pte;
+
+ WARN_ON(address > dom->aperture_size);
+
+ paddr &= PAGE_MASK;
+
+ pte = dma_ops_get_pte(dom, address);
+ if (!pte)
+ return DMA_ERROR_CODE;
+
+ __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
+
+ if (direction == DMA_TO_DEVICE)
+ __pte |= IOMMU_PTE_IR;
+ else if (direction == DMA_FROM_DEVICE)
+ __pte |= IOMMU_PTE_IW;
+ else if (direction == DMA_BIDIRECTIONAL)
+ __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
+
+ WARN_ON(*pte);
+
+ *pte = __pte;
+
+ return (dma_addr_t)address;
+}
+
+/*
+ * The generic unmapping function for on page in the DMA address space.
+ */
+static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
+ unsigned long address)
+{
+ struct aperture_range *aperture;
+ u64 *pte;
+
+ if (address >= dom->aperture_size)
+ return;
+
+ aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
+ if (!aperture)
+ return;
+
+ pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
+ if (!pte)
+ return;
+
+ pte += PM_LEVEL_INDEX(0, address);
+
+ WARN_ON(!*pte);
+
+ *pte = 0ULL;
+}
+
+/*
+ * This function contains common code for mapping of a physically
+ * contiguous memory region into DMA address space. It is used by all
+ * mapping functions provided with this IOMMU driver.
+ * Must be called with the domain lock held.
+ */
+static dma_addr_t __map_single(struct device *dev,
+ struct dma_ops_domain *dma_dom,
+ phys_addr_t paddr,
+ size_t size,
+ int dir,
+ bool align,
+ u64 dma_mask)
+{
+ dma_addr_t offset = paddr & ~PAGE_MASK;
+ dma_addr_t address, start, ret;
+ unsigned int pages;
+ unsigned long align_mask = 0;
+ int i;
+
+ pages = iommu_num_pages(paddr, size, PAGE_SIZE);
+ paddr &= PAGE_MASK;
+
+ INC_STATS_COUNTER(total_map_requests);
+
+ if (pages > 1)
+ INC_STATS_COUNTER(cross_page);
+
+ if (align)
+ align_mask = (1UL << get_order(size)) - 1;
+
+retry:
+ address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
+ dma_mask);
+ if (unlikely(address == DMA_ERROR_CODE)) {
+ /*
+ * setting next_address here will let the address
+ * allocator only scan the new allocated range in the
+ * first run. This is a small optimization.
+ */
+ dma_dom->next_address = dma_dom->aperture_size;
+
+ if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
+ goto out;
+
+ /*
+ * aperture was successfully enlarged by 128 MB, try
+ * allocation again
+ */
+ goto retry;
+ }
+
+ start = address;
+ for (i = 0; i < pages; ++i) {
+ ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
+ if (ret == DMA_ERROR_CODE)
+ goto out_unmap;
+
+ paddr += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ address += offset;
+
+ ADD_STATS_COUNTER(alloced_io_mem, size);
+
+ if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
+ domain_flush_tlb(&dma_dom->domain);
+ dma_dom->need_flush = false;
+ } else if (unlikely(amd_iommu_np_cache))
+ domain_flush_pages(&dma_dom->domain, address, size);
+
+out:
+ return address;
+
+out_unmap:
+
+ for (--i; i >= 0; --i) {
+ start -= PAGE_SIZE;
+ dma_ops_domain_unmap(dma_dom, start);
+ }
+
+ dma_ops_free_addresses(dma_dom, address, pages);
+
+ return DMA_ERROR_CODE;
+}
+
+/*
+ * Does the reverse of the __map_single function. Must be called with
+ * the domain lock held too
+ */
+static void __unmap_single(struct dma_ops_domain *dma_dom,
+ dma_addr_t dma_addr,
+ size_t size,
+ int dir)
+{
+ dma_addr_t flush_addr;
+ dma_addr_t i, start;
+ unsigned int pages;
+
+ if ((dma_addr == DMA_ERROR_CODE) ||
+ (dma_addr + size > dma_dom->aperture_size))
+ return;
+
+ flush_addr = dma_addr;
+ pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
+ dma_addr &= PAGE_MASK;
+ start = dma_addr;
+
+ for (i = 0; i < pages; ++i) {
+ dma_ops_domain_unmap(dma_dom, start);
+ start += PAGE_SIZE;
+ }
+
+ SUB_STATS_COUNTER(alloced_io_mem, size);
+
+ dma_ops_free_addresses(dma_dom, dma_addr, pages);
+
+ if (amd_iommu_unmap_flush || dma_dom->need_flush) {
+ domain_flush_pages(&dma_dom->domain, flush_addr, size);
+ dma_dom->need_flush = false;
+ }
+}
+
+/*
+ * The exported map_single function for dma_ops.
+ */
+static dma_addr_t map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ unsigned long flags;
+ struct protection_domain *domain;
+ dma_addr_t addr;
+ u64 dma_mask;
+ phys_addr_t paddr = page_to_phys(page) + offset;
+
+ INC_STATS_COUNTER(cnt_map_single);
+
+ domain = get_domain(dev);
+ if (PTR_ERR(domain) == -EINVAL)
+ return (dma_addr_t)paddr;
+ else if (IS_ERR(domain))
+ return DMA_ERROR_CODE;
+
+ dma_mask = *dev->dma_mask;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ addr = __map_single(dev, domain->priv, paddr, size, dir, false,
+ dma_mask);
+ if (addr == DMA_ERROR_CODE)
+ goto out;
+
+ domain_flush_complete(domain);
+
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return addr;
+}
+
+/*
+ * The exported unmap_single function for dma_ops.
+ */
+static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ unsigned long flags;
+ struct protection_domain *domain;
+
+ INC_STATS_COUNTER(cnt_unmap_single);
+
+ domain = get_domain(dev);
+ if (IS_ERR(domain))
+ return;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ __unmap_single(domain->priv, dma_addr, size, dir);
+
+ domain_flush_complete(domain);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+/*
+ * This is a special map_sg function which is used if we should map a
+ * device which is not handled by an AMD IOMMU in the system.
+ */
+static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
+ int nelems, int dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sglist, s, nelems, i) {
+ s->dma_address = (dma_addr_t)sg_phys(s);
+ s->dma_length = s->length;
+ }
+
+ return nelems;
+}
+
+/*
+ * The exported map_sg function for dma_ops (handles scatter-gather
+ * lists).
+ */
+static int map_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ unsigned long flags;
+ struct protection_domain *domain;
+ int i;
+ struct scatterlist *s;
+ phys_addr_t paddr;
+ int mapped_elems = 0;
+ u64 dma_mask;
+
+ INC_STATS_COUNTER(cnt_map_sg);
+
+ domain = get_domain(dev);
+ if (PTR_ERR(domain) == -EINVAL)
+ return map_sg_no_iommu(dev, sglist, nelems, dir);
+ else if (IS_ERR(domain))
+ return 0;
+
+ dma_mask = *dev->dma_mask;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ for_each_sg(sglist, s, nelems, i) {
+ paddr = sg_phys(s);
+
+ s->dma_address = __map_single(dev, domain->priv,
+ paddr, s->length, dir, false,
+ dma_mask);
+
+ if (s->dma_address) {
+ s->dma_length = s->length;
+ mapped_elems++;
+ } else
+ goto unmap;
+ }
+
+ domain_flush_complete(domain);
+
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return mapped_elems;
+unmap:
+ for_each_sg(sglist, s, mapped_elems, i) {
+ if (s->dma_address)
+ __unmap_single(domain->priv, s->dma_address,
+ s->dma_length, dir);
+ s->dma_address = s->dma_length = 0;
+ }
+
+ mapped_elems = 0;
+
+ goto out;
+}
+
+/*
+ * The exported map_sg function for dma_ops (handles scatter-gather
+ * lists).
+ */
+static void unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ unsigned long flags;
+ struct protection_domain *domain;
+ struct scatterlist *s;
+ int i;
+
+ INC_STATS_COUNTER(cnt_unmap_sg);
+
+ domain = get_domain(dev);
+ if (IS_ERR(domain))
+ return;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ for_each_sg(sglist, s, nelems, i) {
+ __unmap_single(domain->priv, s->dma_address,
+ s->dma_length, dir);
+ s->dma_address = s->dma_length = 0;
+ }
+
+ domain_flush_complete(domain);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+/*
+ * The exported alloc_coherent function for dma_ops.
+ */
+static void *alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag)
+{
+ unsigned long flags;
+ void *virt_addr;
+ struct protection_domain *domain;
+ phys_addr_t paddr;
+ u64 dma_mask = dev->coherent_dma_mask;
+
+ INC_STATS_COUNTER(cnt_alloc_coherent);
+
+ domain = get_domain(dev);
+ if (PTR_ERR(domain) == -EINVAL) {
+ virt_addr = (void *)__get_free_pages(flag, get_order(size));
+ *dma_addr = __pa(virt_addr);
+ return virt_addr;
+ } else if (IS_ERR(domain))
+ return NULL;
+
+ dma_mask = dev->coherent_dma_mask;
+ flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+ flag |= __GFP_ZERO;
+
+ virt_addr = (void *)__get_free_pages(flag, get_order(size));
+ if (!virt_addr)
+ return NULL;
+
+ paddr = virt_to_phys(virt_addr);
+
+ if (!dma_mask)
+ dma_mask = *dev->dma_mask;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ *dma_addr = __map_single(dev, domain->priv, paddr,
+ size, DMA_BIDIRECTIONAL, true, dma_mask);
+
+ if (*dma_addr == DMA_ERROR_CODE) {
+ spin_unlock_irqrestore(&domain->lock, flags);
+ goto out_free;
+ }
+
+ domain_flush_complete(domain);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return virt_addr;
+
+out_free:
+
+ free_pages((unsigned long)virt_addr, get_order(size));
+
+ return NULL;
+}
+
+/*
+ * The exported free_coherent function for dma_ops.
+ */
+static void free_coherent(struct device *dev, size_t size,
+ void *virt_addr, dma_addr_t dma_addr)
+{
+ unsigned long flags;
+ struct protection_domain *domain;
+
+ INC_STATS_COUNTER(cnt_free_coherent);
+
+ domain = get_domain(dev);
+ if (IS_ERR(domain))
+ goto free_mem;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
+
+ domain_flush_complete(domain);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+free_mem:
+ free_pages((unsigned long)virt_addr, get_order(size));
+}
+
+/*
+ * This function is called by the DMA layer to find out if we can handle a
+ * particular device. It is part of the dma_ops.
+ */
+static int amd_iommu_dma_supported(struct device *dev, u64 mask)
+{
+ return check_device(dev);
+}
+
+/*
+ * The function for pre-allocating protection domains.
+ *
+ * If the driver core informs the DMA layer if a driver grabs a device
+ * we don't need to preallocate the protection domains anymore.
+ * For now we have to.
+ */
+static void prealloc_protection_domains(void)
+{
+ struct pci_dev *dev = NULL;
+ struct dma_ops_domain *dma_dom;
+ u16 devid;
+
+ for_each_pci_dev(dev) {
+
+ /* Do we handle this device? */
+ if (!check_device(&dev->dev))
+ continue;
+
+ /* Is there already any domain for it? */
+ if (domain_for_device(&dev->dev))
+ continue;
+
+ devid = get_device_id(&dev->dev);
+
+ dma_dom = dma_ops_domain_alloc();
+ if (!dma_dom)
+ continue;
+ init_unity_mappings_for_device(dma_dom, devid);
+ dma_dom->target_dev = devid;
+
+ attach_device(&dev->dev, &dma_dom->domain);
+
+ list_add_tail(&dma_dom->list, &iommu_pd_list);
+ }
+}
+
+static struct dma_map_ops amd_iommu_dma_ops = {
+ .alloc_coherent = alloc_coherent,
+ .free_coherent = free_coherent,
+ .map_page = map_page,
+ .unmap_page = unmap_page,
+ .map_sg = map_sg,
+ .unmap_sg = unmap_sg,
+ .dma_supported = amd_iommu_dma_supported,
+};
+
+static unsigned device_dma_ops_init(void)
+{
+ struct pci_dev *pdev = NULL;
+ unsigned unhandled = 0;
+
+ for_each_pci_dev(pdev) {
+ if (!check_device(&pdev->dev)) {
+ unhandled += 1;
+ continue;
+ }
+
+ pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+ }
+
+ return unhandled;
+}
+
+/*
+ * The function which clues the AMD IOMMU driver into dma_ops.
+ */
+
+void __init amd_iommu_init_api(void)
+{
+ register_iommu(&amd_iommu_ops);
+}
+
+int __init amd_iommu_init_dma_ops(void)
+{
+ struct amd_iommu *iommu;
+ int ret, unhandled;
+
+ /*
+ * first allocate a default protection domain for every IOMMU we
+ * found in the system. Devices not assigned to any other
+ * protection domain will be assigned to the default one.
+ */
+ for_each_iommu(iommu) {
+ iommu->default_dom = dma_ops_domain_alloc();
+ if (iommu->default_dom == NULL)
+ return -ENOMEM;
+ iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
+ ret = iommu_init_unity_mappings(iommu);
+ if (ret)
+ goto free_domains;
+ }
+
+ /*
+ * Pre-allocate the protection domains for each device.
+ */
+ prealloc_protection_domains();
+
+ iommu_detected = 1;
+ swiotlb = 0;
+
+ /* Make the driver finally visible to the drivers */
+ unhandled = device_dma_ops_init();
+ if (unhandled && max_pfn > MAX_DMA32_PFN) {
+ /* There are unhandled devices - initialize swiotlb for them */
+ swiotlb = 1;
+ }
+
+ amd_iommu_stats_init();
+
+ return 0;
+
+free_domains:
+
+ for_each_iommu(iommu) {
+ if (iommu->default_dom)
+ dma_ops_domain_free(iommu->default_dom);
+ }
+
+ return ret;
+}
+
+/*****************************************************************************
+ *
+ * The following functions belong to the exported interface of AMD IOMMU
+ *
+ * This interface allows access to lower level functions of the IOMMU
+ * like protection domain handling and assignement of devices to domains
+ * which is not possible with the dma_ops interface.
+ *
+ *****************************************************************************/
+
+static void cleanup_domain(struct protection_domain *domain)
+{
+ struct iommu_dev_data *dev_data, *next;
+ unsigned long flags;
+
+ write_lock_irqsave(&amd_iommu_devtable_lock, flags);
+
+ list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
+ __detach_device(dev_data);
+ atomic_set(&dev_data->bind, 0);
+ }
+
+ write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+}
+
+static void protection_domain_free(struct protection_domain *domain)
+{
+ if (!domain)
+ return;
+
+ del_domain_from_list(domain);
+
+ if (domain->id)
+ domain_id_free(domain->id);
+
+ kfree(domain);
+}
+
+static struct protection_domain *protection_domain_alloc(void)
+{
+ struct protection_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ spin_lock_init(&domain->lock);
+ mutex_init(&domain->api_lock);
+ domain->id = domain_id_alloc();
+ if (!domain->id)
+ goto out_err;
+ INIT_LIST_HEAD(&domain->dev_list);
+
+ add_domain_to_list(domain);
+
+ return domain;
+
+out_err:
+ kfree(domain);
+
+ return NULL;
+}
+
+static int amd_iommu_domain_init(struct iommu_domain *dom)
+{
+ struct protection_domain *domain;
+
+ domain = protection_domain_alloc();
+ if (!domain)
+ goto out_free;
+
+ domain->mode = PAGE_MODE_3_LEVEL;
+ domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!domain->pt_root)
+ goto out_free;
+
+ dom->priv = domain;
+
+ return 0;
+
+out_free:
+ protection_domain_free(domain);
+
+ return -ENOMEM;
+}
+
+static void amd_iommu_domain_destroy(struct iommu_domain *dom)
+{
+ struct protection_domain *domain = dom->priv;
+
+ if (!domain)
+ return;
+
+ if (domain->dev_cnt > 0)
+ cleanup_domain(domain);
+
+ BUG_ON(domain->dev_cnt != 0);
+
+ free_pagetable(domain);
+
+ protection_domain_free(domain);
+
+ dom->priv = NULL;
+}
+
+static void amd_iommu_detach_device(struct iommu_domain *dom,
+ struct device *dev)
+{
+ struct iommu_dev_data *dev_data = dev->archdata.iommu;
+ struct amd_iommu *iommu;
+ u16 devid;
+
+ if (!check_device(dev))
+ return;
+
+ devid = get_device_id(dev);
+
+ if (dev_data->domain != NULL)
+ detach_device(dev);
+
+ iommu = amd_iommu_rlookup_table[devid];
+ if (!iommu)
+ return;
+
+ iommu_completion_wait(iommu);
+}
+
+static int amd_iommu_attach_device(struct iommu_domain *dom,
+ struct device *dev)
+{
+ struct protection_domain *domain = dom->priv;
+ struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
+ int ret;
+
+ if (!check_device(dev))
+ return -EINVAL;
+
+ dev_data = dev->archdata.iommu;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+ if (!iommu)
+ return -EINVAL;
+
+ if (dev_data->domain)
+ detach_device(dev);
+
+ ret = attach_device(dev, domain);
+
+ iommu_completion_wait(iommu);
+
+ return ret;
+}
+
+static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
+ phys_addr_t paddr, int gfp_order, int iommu_prot)
+{
+ unsigned long page_size = 0x1000UL << gfp_order;
+ struct protection_domain *domain = dom->priv;
+ int prot = 0;
+ int ret;
+
+ if (iommu_prot & IOMMU_READ)
+ prot |= IOMMU_PROT_IR;
+ if (iommu_prot & IOMMU_WRITE)
+ prot |= IOMMU_PROT_IW;
+
+ mutex_lock(&domain->api_lock);
+ ret = iommu_map_page(domain, iova, paddr, prot, page_size);
+ mutex_unlock(&domain->api_lock);
+
+ return ret;
+}
+
+static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+ int gfp_order)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long page_size, unmap_size;
+
+ page_size = 0x1000UL << gfp_order;
+
+ mutex_lock(&domain->api_lock);
+ unmap_size = iommu_unmap_page(domain, iova, page_size);
+ mutex_unlock(&domain->api_lock);
+
+ domain_flush_tlb_pde(domain);
+
+ return get_order(unmap_size);
+}
+
+static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
+ unsigned long iova)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long offset_mask;
+ phys_addr_t paddr;
+ u64 *pte, __pte;
+
+ pte = fetch_pte(domain, iova);
+
+ if (!pte || !IOMMU_PTE_PRESENT(*pte))
+ return 0;
+
+ if (PM_PTE_LEVEL(*pte) == 0)
+ offset_mask = PAGE_SIZE - 1;
+ else
+ offset_mask = PTE_PAGE_SIZE(*pte) - 1;
+
+ __pte = *pte & PM_ADDR_MASK;
+ paddr = (__pte & ~offset_mask) | (iova & offset_mask);
+
+ return paddr;
+}
+
+static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
+ unsigned long cap)
+{
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct iommu_ops amd_iommu_ops = {
+ .domain_init = amd_iommu_domain_init,
+ .domain_destroy = amd_iommu_domain_destroy,
+ .attach_dev = amd_iommu_attach_device,
+ .detach_dev = amd_iommu_detach_device,
+ .map = amd_iommu_map,
+ .unmap = amd_iommu_unmap,
+ .iova_to_phys = amd_iommu_iova_to_phys,
+ .domain_has_cap = amd_iommu_domain_has_cap,
+};
+
+/*****************************************************************************
+ *
+ * The next functions do a basic initialization of IOMMU for pass through
+ * mode
+ *
+ * In passthrough mode the IOMMU is initialized and enabled but not used for
+ * DMA-API translation.
+ *
+ *****************************************************************************/
+
+int __init amd_iommu_init_passthrough(void)
+{
+ struct amd_iommu *iommu;
+ struct pci_dev *dev = NULL;
+ u16 devid;
+
+ /* allocate passthrough domain */
+ pt_domain = protection_domain_alloc();
+ if (!pt_domain)
+ return -ENOMEM;
+
+ pt_domain->mode |= PAGE_MODE_NONE;
+
+ for_each_pci_dev(dev) {
+ if (!check_device(&dev->dev))
+ continue;
+
+ devid = get_device_id(&dev->dev);
+
+ iommu = amd_iommu_rlookup_table[devid];
+ if (!iommu)
+ continue;
+
+ attach_device(&dev->dev, pt_domain);
+ }
+
+ pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
+
+ return 0;
+}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
new file mode 100644
index 00000000000..82d2410f420
--- /dev/null
+++ b/drivers/iommu/amd_iommu_init.c
@@ -0,0 +1,1574 @@
+/*
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Leo Duran <leo.duran@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/amd-iommu.h>
+#include <asm/pci-direct.h>
+#include <asm/iommu.h>
+#include <asm/gart.h>
+#include <asm/x86_init.h>
+#include <asm/iommu_table.h>
+
+#include "amd_iommu_proto.h"
+#include "amd_iommu_types.h"
+
+/*
+ * definitions for the ACPI scanning code
+ */
+#define IVRS_HEADER_LENGTH 48
+
+#define ACPI_IVHD_TYPE 0x10
+#define ACPI_IVMD_TYPE_ALL 0x20
+#define ACPI_IVMD_TYPE 0x21
+#define ACPI_IVMD_TYPE_RANGE 0x22
+
+#define IVHD_DEV_ALL 0x01
+#define IVHD_DEV_SELECT 0x02
+#define IVHD_DEV_SELECT_RANGE_START 0x03
+#define IVHD_DEV_RANGE_END 0x04
+#define IVHD_DEV_ALIAS 0x42
+#define IVHD_DEV_ALIAS_RANGE 0x43
+#define IVHD_DEV_EXT_SELECT 0x46
+#define IVHD_DEV_EXT_SELECT_RANGE 0x47
+
+#define IVHD_FLAG_HT_TUN_EN_MASK 0x01
+#define IVHD_FLAG_PASSPW_EN_MASK 0x02
+#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
+#define IVHD_FLAG_ISOC_EN_MASK 0x08
+
+#define IVMD_FLAG_EXCL_RANGE 0x08
+#define IVMD_FLAG_UNITY_MAP 0x01
+
+#define ACPI_DEVFLAG_INITPASS 0x01
+#define ACPI_DEVFLAG_EXTINT 0x02
+#define ACPI_DEVFLAG_NMI 0x04
+#define ACPI_DEVFLAG_SYSMGT1 0x10
+#define ACPI_DEVFLAG_SYSMGT2 0x20
+#define ACPI_DEVFLAG_LINT0 0x40
+#define ACPI_DEVFLAG_LINT1 0x80
+#define ACPI_DEVFLAG_ATSDIS 0x10000000
+
+/*
+ * ACPI table definitions
+ *
+ * These data structures are laid over the table to parse the important values
+ * out of it.
+ */
+
+/*
+ * structure describing one IOMMU in the ACPI table. Typically followed by one
+ * or more ivhd_entrys.
+ */
+struct ivhd_header {
+ u8 type;
+ u8 flags;
+ u16 length;
+ u16 devid;
+ u16 cap_ptr;
+ u64 mmio_phys;
+ u16 pci_seg;
+ u16 info;
+ u32 reserved;
+} __attribute__((packed));
+
+/*
+ * A device entry describing which devices a specific IOMMU translates and
+ * which requestor ids they use.
+ */
+struct ivhd_entry {
+ u8 type;
+ u16 devid;
+ u8 flags;
+ u32 ext;
+} __attribute__((packed));
+
+/*
+ * An AMD IOMMU memory definition structure. It defines things like exclusion
+ * ranges for devices and regions that should be unity mapped.
+ */
+struct ivmd_header {
+ u8 type;
+ u8 flags;
+ u16 length;
+ u16 devid;
+ u16 aux;
+ u64 resv;
+ u64 range_start;
+ u64 range_length;
+} __attribute__((packed));
+
+bool amd_iommu_dump;
+
+static int __initdata amd_iommu_detected;
+static bool __initdata amd_iommu_disabled;
+
+u16 amd_iommu_last_bdf; /* largest PCI device id we have
+ to handle */
+LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
+ we find in ACPI */
+bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
+
+LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
+ system */
+
+/* Array to assign indices to IOMMUs*/
+struct amd_iommu *amd_iommus[MAX_IOMMUS];
+int amd_iommus_present;
+
+/* IOMMUs have a non-present cache? */
+bool amd_iommu_np_cache __read_mostly;
+bool amd_iommu_iotlb_sup __read_mostly = true;
+
+/*
+ * The ACPI table parsing functions set this variable on an error
+ */
+static int __initdata amd_iommu_init_err;
+
+/*
+ * List of protection domains - used during resume
+ */
+LIST_HEAD(amd_iommu_pd_list);
+spinlock_t amd_iommu_pd_lock;
+
+/*
+ * Pointer to the device table which is shared by all AMD IOMMUs
+ * it is indexed by the PCI device id or the HT unit id and contains
+ * information about the domain the device belongs to as well as the
+ * page table root pointer.
+ */
+struct dev_table_entry *amd_iommu_dev_table;
+
+/*
+ * The alias table is a driver specific data structure which contains the
+ * mappings of the PCI device ids to the actual requestor ids on the IOMMU.
+ * More than one device can share the same requestor id.
+ */
+u16 *amd_iommu_alias_table;
+
+/*
+ * The rlookup table is used to find the IOMMU which is responsible
+ * for a specific device. It is also indexed by the PCI device id.
+ */
+struct amd_iommu **amd_iommu_rlookup_table;
+
+/*
+ * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
+ * to know which ones are already in use.
+ */
+unsigned long *amd_iommu_pd_alloc_bitmap;
+
+static u32 dev_table_size; /* size of the device table */
+static u32 alias_table_size; /* size of the alias table */
+static u32 rlookup_table_size; /* size if the rlookup table */
+
+/*
+ * This function flushes all internal caches of
+ * the IOMMU used by this driver.
+ */
+extern void iommu_flush_all_caches(struct amd_iommu *iommu);
+
+static inline void update_last_devid(u16 devid)
+{
+ if (devid > amd_iommu_last_bdf)
+ amd_iommu_last_bdf = devid;
+}
+
+static inline unsigned long tbl_size(int entry_size)
+{
+ unsigned shift = PAGE_SHIFT +
+ get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
+
+ return 1UL << shift;
+}
+
+/* Access to l1 and l2 indexed register spaces */
+
+static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
+{
+ u32 val;
+
+ pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
+ pci_read_config_dword(iommu->dev, 0xfc, &val);
+ return val;
+}
+
+static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
+{
+ pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
+ pci_write_config_dword(iommu->dev, 0xfc, val);
+ pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
+}
+
+static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
+{
+ u32 val;
+
+ pci_write_config_dword(iommu->dev, 0xf0, address);
+ pci_read_config_dword(iommu->dev, 0xf4, &val);
+ return val;
+}
+
+static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
+{
+ pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
+ pci_write_config_dword(iommu->dev, 0xf4, val);
+}
+
+/****************************************************************************
+ *
+ * AMD IOMMU MMIO register space handling functions
+ *
+ * These functions are used to program the IOMMU device registers in
+ * MMIO space required for that driver.
+ *
+ ****************************************************************************/
+
+/*
+ * This function set the exclusion range in the IOMMU. DMA accesses to the
+ * exclusion range are passed through untranslated
+ */
+static void iommu_set_exclusion_range(struct amd_iommu *iommu)
+{
+ u64 start = iommu->exclusion_start & PAGE_MASK;
+ u64 limit = (start + iommu->exclusion_length) & PAGE_MASK;
+ u64 entry;
+
+ if (!iommu->exclusion_start)
+ return;
+
+ entry = start | MMIO_EXCL_ENABLE_MASK;
+ memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
+ &entry, sizeof(entry));
+
+ entry = limit;
+ memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
+ &entry, sizeof(entry));
+}
+
+/* Programs the physical address of the device table into the IOMMU hardware */
+static void __init iommu_set_device_table(struct amd_iommu *iommu)
+{
+ u64 entry;
+
+ BUG_ON(iommu->mmio_base == NULL);
+
+ entry = virt_to_phys(amd_iommu_dev_table);
+ entry |= (dev_table_size >> 12) - 1;
+ memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
+ &entry, sizeof(entry));
+}
+
+/* Generic functions to enable/disable certain features of the IOMMU. */
+static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
+{
+ u32 ctrl;
+
+ ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl |= (1 << bit);
+ writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+}
+
+static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
+{
+ u32 ctrl;
+
+ ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl &= ~(1 << bit);
+ writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+}
+
+/* Function to enable the hardware */
+static void iommu_enable(struct amd_iommu *iommu)
+{
+ static const char * const feat_str[] = {
+ "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
+ "IA", "GA", "HE", "PC", NULL
+ };
+ int i;
+
+ printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
+ dev_name(&iommu->dev->dev), iommu->cap_ptr);
+
+ if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
+ printk(KERN_CONT " extended features: ");
+ for (i = 0; feat_str[i]; ++i)
+ if (iommu_feature(iommu, (1ULL << i)))
+ printk(KERN_CONT " %s", feat_str[i]);
+ }
+ printk(KERN_CONT "\n");
+
+ iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
+}
+
+static void iommu_disable(struct amd_iommu *iommu)
+{
+ /* Disable command buffer */
+ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+
+ /* Disable event logging and event interrupts */
+ iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
+ iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+
+ /* Disable IOMMU hardware itself */
+ iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
+}
+
+/*
+ * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
+ * the system has one.
+ */
+static u8 * __init iommu_map_mmio_space(u64 address)
+{
+ u8 *ret;
+
+ if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
+ pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
+ address);
+ pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n");
+ return NULL;
+ }
+
+ ret = ioremap_nocache(address, MMIO_REGION_LENGTH);
+ if (ret != NULL)
+ return ret;
+
+ release_mem_region(address, MMIO_REGION_LENGTH);
+
+ return NULL;
+}
+
+static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
+{
+ if (iommu->mmio_base)
+ iounmap(iommu->mmio_base);
+ release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH);
+}
+
+/****************************************************************************
+ *
+ * The functions below belong to the first pass of AMD IOMMU ACPI table
+ * parsing. In this pass we try to find out the highest device id this
+ * code has to handle. Upon this information the size of the shared data
+ * structures is determined later.
+ *
+ ****************************************************************************/
+
+/*
+ * This function calculates the length of a given IVHD entry
+ */
+static inline int ivhd_entry_length(u8 *ivhd)
+{
+ return 0x04 << (*ivhd >> 6);
+}
+
+/*
+ * This function reads the last device id the IOMMU has to handle from the PCI
+ * capability header for this IOMMU
+ */
+static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr)
+{
+ u32 cap;
+
+ cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET);
+ update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap)));
+
+ return 0;
+}
+
+/*
+ * After reading the highest device id from the IOMMU PCI capability header
+ * this function looks if there is a higher device id defined in the ACPI table
+ */
+static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
+{
+ u8 *p = (void *)h, *end = (void *)h;
+ struct ivhd_entry *dev;
+
+ p += sizeof(*h);
+ end += h->length;
+
+ find_last_devid_on_pci(PCI_BUS(h->devid),
+ PCI_SLOT(h->devid),
+ PCI_FUNC(h->devid),
+ h->cap_ptr);
+
+ while (p < end) {
+ dev = (struct ivhd_entry *)p;
+ switch (dev->type) {
+ case IVHD_DEV_SELECT:
+ case IVHD_DEV_RANGE_END:
+ case IVHD_DEV_ALIAS:
+ case IVHD_DEV_EXT_SELECT:
+ /* all the above subfield types refer to device ids */
+ update_last_devid(dev->devid);
+ break;
+ default:
+ break;
+ }
+ p += ivhd_entry_length(p);
+ }
+
+ WARN_ON(p != end);
+
+ return 0;
+}
+
+/*
+ * Iterate over all IVHD entries in the ACPI table and find the highest device
+ * id which we need to handle. This is the first of three functions which parse
+ * the ACPI table. So we check the checksum here.
+ */
+static int __init find_last_devid_acpi(struct acpi_table_header *table)
+{
+ int i;
+ u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
+ struct ivhd_header *h;
+
+ /*
+ * Validate checksum here so we don't need to do it when
+ * we actually parse the table
+ */
+ for (i = 0; i < table->length; ++i)
+ checksum += p[i];
+ if (checksum != 0) {
+ /* ACPI table corrupt */
+ amd_iommu_init_err = -ENODEV;
+ return 0;
+ }
+
+ p += IVRS_HEADER_LENGTH;
+
+ end += table->length;
+ while (p < end) {
+ h = (struct ivhd_header *)p;
+ switch (h->type) {
+ case ACPI_IVHD_TYPE:
+ find_last_devid_from_ivhd(h);
+ break;
+ default:
+ break;
+ }
+ p += h->length;
+ }
+ WARN_ON(p != end);
+
+ return 0;
+}
+
+/****************************************************************************
+ *
+ * The following functions belong the the code path which parses the ACPI table
+ * the second time. In this ACPI parsing iteration we allocate IOMMU specific
+ * data structures, initialize the device/alias/rlookup table and also
+ * basically initialize the hardware.
+ *
+ ****************************************************************************/
+
+/*
+ * Allocates the command buffer. This buffer is per AMD IOMMU. We can
+ * write commands to that buffer later and the IOMMU will execute them
+ * asynchronously
+ */
+static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
+{
+ u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(CMD_BUFFER_SIZE));
+
+ if (cmd_buf == NULL)
+ return NULL;
+
+ iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
+
+ return cmd_buf;
+}
+
+/*
+ * This function resets the command buffer if the IOMMU stopped fetching
+ * commands from it.
+ */
+void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
+{
+ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+
+ writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+
+ iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
+}
+
+/*
+ * This function writes the command buffer address to the hardware and
+ * enables it.
+ */
+static void iommu_enable_command_buffer(struct amd_iommu *iommu)
+{
+ u64 entry;
+
+ BUG_ON(iommu->cmd_buf == NULL);
+
+ entry = (u64)virt_to_phys(iommu->cmd_buf);
+ entry |= MMIO_CMD_SIZE_512;
+
+ memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
+ &entry, sizeof(entry));
+
+ amd_iommu_reset_cmd_buffer(iommu);
+ iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
+}
+
+static void __init free_command_buffer(struct amd_iommu *iommu)
+{
+ free_pages((unsigned long)iommu->cmd_buf,
+ get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
+}
+
+/* allocates the memory where the IOMMU will log its events to */
+static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
+{
+ iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(EVT_BUFFER_SIZE));
+
+ if (iommu->evt_buf == NULL)
+ return NULL;
+
+ iommu->evt_buf_size = EVT_BUFFER_SIZE;
+
+ return iommu->evt_buf;
+}
+
+static void iommu_enable_event_buffer(struct amd_iommu *iommu)
+{
+ u64 entry;
+
+ BUG_ON(iommu->evt_buf == NULL);
+
+ entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
+
+ memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
+ &entry, sizeof(entry));
+
+ /* set head and tail to zero manually */
+ writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+
+ iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+}
+
+static void __init free_event_buffer(struct amd_iommu *iommu)
+{
+ free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
+}
+
+/* sets a specific bit in the device table entry. */
+static void set_dev_entry_bit(u16 devid, u8 bit)
+{
+ int i = (bit >> 5) & 0x07;
+ int _bit = bit & 0x1f;
+
+ amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
+}
+
+static int get_dev_entry_bit(u16 devid, u8 bit)
+{
+ int i = (bit >> 5) & 0x07;
+ int _bit = bit & 0x1f;
+
+ return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
+}
+
+
+void amd_iommu_apply_erratum_63(u16 devid)
+{
+ int sysmgt;
+
+ sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
+ (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
+
+ if (sysmgt == 0x01)
+ set_dev_entry_bit(devid, DEV_ENTRY_IW);
+}
+
+/* Writes the specific IOMMU for a device into the rlookup table */
+static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
+{
+ amd_iommu_rlookup_table[devid] = iommu;
+}
+
+/*
+ * This function takes the device specific flags read from the ACPI
+ * table and sets up the device table entry with that information
+ */
+static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
+ u16 devid, u32 flags, u32 ext_flags)
+{
+ if (flags & ACPI_DEVFLAG_INITPASS)
+ set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
+ if (flags & ACPI_DEVFLAG_EXTINT)
+ set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
+ if (flags & ACPI_DEVFLAG_NMI)
+ set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
+ if (flags & ACPI_DEVFLAG_SYSMGT1)
+ set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
+ if (flags & ACPI_DEVFLAG_SYSMGT2)
+ set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
+ if (flags & ACPI_DEVFLAG_LINT0)
+ set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
+ if (flags & ACPI_DEVFLAG_LINT1)
+ set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
+
+ amd_iommu_apply_erratum_63(devid);
+
+ set_iommu_for_device(iommu, devid);
+}
+
+/*
+ * Reads the device exclusion range from ACPI and initialize IOMMU with
+ * it
+ */
+static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
+{
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+ if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
+ return;
+
+ if (iommu) {
+ /*
+ * We only can configure exclusion ranges per IOMMU, not
+ * per device. But we can enable the exclusion range per
+ * device. This is done here
+ */
+ set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
+ iommu->exclusion_start = m->range_start;
+ iommu->exclusion_length = m->range_length;
+ }
+}
+
+/*
+ * This function reads some important data from the IOMMU PCI space and
+ * initializes the driver data structure with it. It reads the hardware
+ * capabilities and the first/last device entries
+ */
+static void __init init_iommu_from_pci(struct amd_iommu *iommu)
+{
+ int cap_ptr = iommu->cap_ptr;
+ u32 range, misc, low, high;
+ int i, j;
+
+ pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
+ &iommu->cap);
+ pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
+ &range);
+ pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
+ &misc);
+
+ iommu->first_device = calc_devid(MMIO_GET_BUS(range),
+ MMIO_GET_FD(range));
+ iommu->last_device = calc_devid(MMIO_GET_BUS(range),
+ MMIO_GET_LD(range));
+ iommu->evt_msi_num = MMIO_MSI_NUM(misc);
+
+ if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
+ amd_iommu_iotlb_sup = false;
+
+ /* read extended feature bits */
+ low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
+ high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
+
+ iommu->features = ((u64)high << 32) | low;
+
+ if (!is_rd890_iommu(iommu->dev))
+ return;
+
+ /*
+ * Some rd890 systems may not be fully reconfigured by the BIOS, so
+ * it's necessary for us to store this information so it can be
+ * reprogrammed on resume
+ */
+
+ pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
+ &iommu->stored_addr_lo);
+ pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
+ &iommu->stored_addr_hi);
+
+ /* Low bit locks writes to configuration space */
+ iommu->stored_addr_lo &= ~1;
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 0x12; j++)
+ iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
+
+ for (i = 0; i < 0x83; i++)
+ iommu->stored_l2[i] = iommu_read_l2(iommu, i);
+}
+
+/*
+ * Takes a pointer to an AMD IOMMU entry in the ACPI table and
+ * initializes the hardware and our data structures with it.
+ */
+static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
+ struct ivhd_header *h)
+{
+ u8 *p = (u8 *)h;
+ u8 *end = p, flags = 0;
+ u16 devid = 0, devid_start = 0, devid_to = 0;
+ u32 dev_i, ext_flags = 0;
+ bool alias = false;
+ struct ivhd_entry *e;
+
+ /*
+ * First save the recommended feature enable bits from ACPI
+ */
+ iommu->acpi_flags = h->flags;
+
+ /*
+ * Done. Now parse the device entries
+ */
+ p += sizeof(struct ivhd_header);
+ end += h->length;
+
+
+ while (p < end) {
+ e = (struct ivhd_entry *)p;
+ switch (e->type) {
+ case IVHD_DEV_ALL:
+
+ DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x"
+ " last device %02x:%02x.%x flags: %02x\n",
+ PCI_BUS(iommu->first_device),
+ PCI_SLOT(iommu->first_device),
+ PCI_FUNC(iommu->first_device),
+ PCI_BUS(iommu->last_device),
+ PCI_SLOT(iommu->last_device),
+ PCI_FUNC(iommu->last_device),
+ e->flags);
+
+ for (dev_i = iommu->first_device;
+ dev_i <= iommu->last_device; ++dev_i)
+ set_dev_entry_from_acpi(iommu, dev_i,
+ e->flags, 0);
+ break;
+ case IVHD_DEV_SELECT:
+
+ DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
+ "flags: %02x\n",
+ PCI_BUS(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags);
+
+ devid = e->devid;
+ set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+ break;
+ case IVHD_DEV_SELECT_RANGE_START:
+
+ DUMP_printk(" DEV_SELECT_RANGE_START\t "
+ "devid: %02x:%02x.%x flags: %02x\n",
+ PCI_BUS(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags);
+
+ devid_start = e->devid;
+ flags = e->flags;
+ ext_flags = 0;
+ alias = false;
+ break;
+ case IVHD_DEV_ALIAS:
+
+ DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
+ "flags: %02x devid_to: %02x:%02x.%x\n",
+ PCI_BUS(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags,
+ PCI_BUS(e->ext >> 8),
+ PCI_SLOT(e->ext >> 8),
+ PCI_FUNC(e->ext >> 8));
+
+ devid = e->devid;
+ devid_to = e->ext >> 8;
+ set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
+ set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
+ amd_iommu_alias_table[devid] = devid_to;
+ break;
+ case IVHD_DEV_ALIAS_RANGE:
+
+ DUMP_printk(" DEV_ALIAS_RANGE\t\t "
+ "devid: %02x:%02x.%x flags: %02x "
+ "devid_to: %02x:%02x.%x\n",
+ PCI_BUS(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags,
+ PCI_BUS(e->ext >> 8),
+ PCI_SLOT(e->ext >> 8),
+ PCI_FUNC(e->ext >> 8));
+
+ devid_start = e->devid;
+ flags = e->flags;
+ devid_to = e->ext >> 8;
+ ext_flags = 0;
+ alias = true;
+ break;
+ case IVHD_DEV_EXT_SELECT:
+
+ DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
+ "flags: %02x ext: %08x\n",
+ PCI_BUS(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags, e->ext);
+
+ devid = e->devid;
+ set_dev_entry_from_acpi(iommu, devid, e->flags,
+ e->ext);
+ break;
+ case IVHD_DEV_EXT_SELECT_RANGE:
+
+ DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
+ "%02x:%02x.%x flags: %02x ext: %08x\n",
+ PCI_BUS(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid),
+ e->flags, e->ext);
+
+ devid_start = e->devid;
+ flags = e->flags;
+ ext_flags = e->ext;
+ alias = false;
+ break;
+ case IVHD_DEV_RANGE_END:
+
+ DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
+ PCI_BUS(e->devid),
+ PCI_SLOT(e->devid),
+ PCI_FUNC(e->devid));
+
+ devid = e->devid;
+ for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
+ if (alias) {
+ amd_iommu_alias_table[dev_i] = devid_to;
+ set_dev_entry_from_acpi(iommu,
+ devid_to, flags, ext_flags);
+ }
+ set_dev_entry_from_acpi(iommu, dev_i,
+ flags, ext_flags);
+ }
+ break;
+ default:
+ break;
+ }
+
+ p += ivhd_entry_length(p);
+ }
+}
+
+/* Initializes the device->iommu mapping for the driver */
+static int __init init_iommu_devices(struct amd_iommu *iommu)
+{
+ u32 i;
+
+ for (i = iommu->first_device; i <= iommu->last_device; ++i)
+ set_iommu_for_device(iommu, i);
+
+ return 0;
+}
+
+static void __init free_iommu_one(struct amd_iommu *iommu)
+{
+ free_command_buffer(iommu);
+ free_event_buffer(iommu);
+ iommu_unmap_mmio_space(iommu);
+}
+
+static void __init free_iommu_all(void)
+{
+ struct amd_iommu *iommu, *next;
+
+ for_each_iommu_safe(iommu, next) {
+ list_del(&iommu->list);
+ free_iommu_one(iommu);
+ kfree(iommu);
+ }
+}
+
+/*
+ * This function clues the initialization function for one IOMMU
+ * together and also allocates the command buffer and programs the
+ * hardware. It does NOT enable the IOMMU. This is done afterwards.
+ */
+static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
+{
+ spin_lock_init(&iommu->lock);
+
+ /* Add IOMMU to internal data structures */
+ list_add_tail(&iommu->list, &amd_iommu_list);
+ iommu->index = amd_iommus_present++;
+
+ if (unlikely(iommu->index >= MAX_IOMMUS)) {
+ WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
+ return -ENOSYS;
+ }
+
+ /* Index is fine - add IOMMU to the array */
+ amd_iommus[iommu->index] = iommu;
+
+ /*
+ * Copy data from ACPI table entry to the iommu struct
+ */
+ iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
+ if (!iommu->dev)
+ return 1;
+
+ iommu->cap_ptr = h->cap_ptr;
+ iommu->pci_seg = h->pci_seg;
+ iommu->mmio_phys = h->mmio_phys;
+ iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
+ if (!iommu->mmio_base)
+ return -ENOMEM;
+
+ iommu->cmd_buf = alloc_command_buffer(iommu);
+ if (!iommu->cmd_buf)
+ return -ENOMEM;
+
+ iommu->evt_buf = alloc_event_buffer(iommu);
+ if (!iommu->evt_buf)
+ return -ENOMEM;
+
+ iommu->int_enabled = false;
+
+ init_iommu_from_pci(iommu);
+ init_iommu_from_acpi(iommu, h);
+ init_iommu_devices(iommu);
+
+ if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
+ amd_iommu_np_cache = true;
+
+ return pci_enable_device(iommu->dev);
+}
+
+/*
+ * Iterates over all IOMMU entries in the ACPI table, allocates the
+ * IOMMU structure and initializes it with init_iommu_one()
+ */
+static int __init init_iommu_all(struct acpi_table_header *table)
+{
+ u8 *p = (u8 *)table, *end = (u8 *)table;
+ struct ivhd_header *h;
+ struct amd_iommu *iommu;
+ int ret;
+
+ end += table->length;
+ p += IVRS_HEADER_LENGTH;
+
+ while (p < end) {
+ h = (struct ivhd_header *)p;
+ switch (*p) {
+ case ACPI_IVHD_TYPE:
+
+ DUMP_printk("device: %02x:%02x.%01x cap: %04x "
+ "seg: %d flags: %01x info %04x\n",
+ PCI_BUS(h->devid), PCI_SLOT(h->devid),
+ PCI_FUNC(h->devid), h->cap_ptr,
+ h->pci_seg, h->flags, h->info);
+ DUMP_printk(" mmio-addr: %016llx\n",
+ h->mmio_phys);
+
+ iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
+ if (iommu == NULL) {
+ amd_iommu_init_err = -ENOMEM;
+ return 0;
+ }
+
+ ret = init_iommu_one(iommu, h);
+ if (ret) {
+ amd_iommu_init_err = ret;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ p += h->length;
+
+ }
+ WARN_ON(p != end);
+
+ return 0;
+}
+
+/****************************************************************************
+ *
+ * The following functions initialize the MSI interrupts for all IOMMUs
+ * in the system. Its a bit challenging because there could be multiple
+ * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
+ * pci_dev.
+ *
+ ****************************************************************************/
+
+static int iommu_setup_msi(struct amd_iommu *iommu)
+{
+ int r;
+
+ if (pci_enable_msi(iommu->dev))
+ return 1;
+
+ r = request_threaded_irq(iommu->dev->irq,
+ amd_iommu_int_handler,
+ amd_iommu_int_thread,
+ 0, "AMD-Vi",
+ iommu->dev);
+
+ if (r) {
+ pci_disable_msi(iommu->dev);
+ return 1;
+ }
+
+ iommu->int_enabled = true;
+ iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
+
+ return 0;
+}
+
+static int iommu_init_msi(struct amd_iommu *iommu)
+{
+ if (iommu->int_enabled)
+ return 0;
+
+ if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI))
+ return iommu_setup_msi(iommu);
+
+ return 1;
+}
+
+/****************************************************************************
+ *
+ * The next functions belong to the third pass of parsing the ACPI
+ * table. In this last pass the memory mapping requirements are
+ * gathered (like exclusion and unity mapping reanges).
+ *
+ ****************************************************************************/
+
+static void __init free_unity_maps(void)
+{
+ struct unity_map_entry *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+/* called when we find an exclusion range definition in ACPI */
+static int __init init_exclusion_range(struct ivmd_header *m)
+{
+ int i;
+
+ switch (m->type) {
+ case ACPI_IVMD_TYPE:
+ set_device_exclusion_range(m->devid, m);
+ break;
+ case ACPI_IVMD_TYPE_ALL:
+ for (i = 0; i <= amd_iommu_last_bdf; ++i)
+ set_device_exclusion_range(i, m);
+ break;
+ case ACPI_IVMD_TYPE_RANGE:
+ for (i = m->devid; i <= m->aux; ++i)
+ set_device_exclusion_range(i, m);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* called for unity map ACPI definition */
+static int __init init_unity_map_range(struct ivmd_header *m)
+{
+ struct unity_map_entry *e = 0;
+ char *s;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (e == NULL)
+ return -ENOMEM;
+
+ switch (m->type) {
+ default:
+ kfree(e);
+ return 0;
+ case ACPI_IVMD_TYPE:
+ s = "IVMD_TYPEi\t\t\t";
+ e->devid_start = e->devid_end = m->devid;
+ break;
+ case ACPI_IVMD_TYPE_ALL:
+ s = "IVMD_TYPE_ALL\t\t";
+ e->devid_start = 0;
+ e->devid_end = amd_iommu_last_bdf;
+ break;
+ case ACPI_IVMD_TYPE_RANGE:
+ s = "IVMD_TYPE_RANGE\t\t";
+ e->devid_start = m->devid;
+ e->devid_end = m->aux;
+ break;
+ }
+ e->address_start = PAGE_ALIGN(m->range_start);
+ e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
+ e->prot = m->flags >> 1;
+
+ DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
+ " range_start: %016llx range_end: %016llx flags: %x\n", s,
+ PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start),
+ PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end),
+ PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
+ e->address_start, e->address_end, m->flags);
+
+ list_add_tail(&e->list, &amd_iommu_unity_map);
+
+ return 0;
+}
+
+/* iterates over all memory definitions we find in the ACPI table */
+static int __init init_memory_definitions(struct acpi_table_header *table)
+{
+ u8 *p = (u8 *)table, *end = (u8 *)table;
+ struct ivmd_header *m;
+
+ end += table->length;
+ p += IVRS_HEADER_LENGTH;
+
+ while (p < end) {
+ m = (struct ivmd_header *)p;
+ if (m->flags & IVMD_FLAG_EXCL_RANGE)
+ init_exclusion_range(m);
+ else if (m->flags & IVMD_FLAG_UNITY_MAP)
+ init_unity_map_range(m);
+
+ p += m->length;
+ }
+
+ return 0;
+}
+
+/*
+ * Init the device table to not allow DMA access for devices and
+ * suppress all page faults
+ */
+static void init_device_table(void)
+{
+ u32 devid;
+
+ for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+ set_dev_entry_bit(devid, DEV_ENTRY_VALID);
+ set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
+ }
+}
+
+static void iommu_init_flags(struct amd_iommu *iommu)
+{
+ iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
+ iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
+ iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
+
+ iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
+ iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
+ iommu_feature_disable(iommu, CONTROL_ISOC_EN);
+
+ /*
+ * make IOMMU memory accesses cache coherent
+ */
+ iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+}
+
+static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
+{
+ int i, j;
+ u32 ioc_feature_control;
+ struct pci_dev *pdev = NULL;
+
+ /* RD890 BIOSes may not have completely reconfigured the iommu */
+ if (!is_rd890_iommu(iommu->dev))
+ return;
+
+ /*
+ * First, we need to ensure that the iommu is enabled. This is
+ * controlled by a register in the northbridge
+ */
+ pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
+
+ if (!pdev)
+ return;
+
+ /* Select Northbridge indirect register 0x75 and enable writing */
+ pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
+ pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
+
+ /* Enable the iommu */
+ if (!(ioc_feature_control & 0x1))
+ pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
+
+ pci_dev_put(pdev);
+
+ /* Restore the iommu BAR */
+ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+ iommu->stored_addr_lo);
+ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
+ iommu->stored_addr_hi);
+
+ /* Restore the l1 indirect regs for each of the 6 l1s */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 0x12; j++)
+ iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
+
+ /* Restore the l2 indirect regs */
+ for (i = 0; i < 0x83; i++)
+ iommu_write_l2(iommu, i, iommu->stored_l2[i]);
+
+ /* Lock PCI setup registers */
+ pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
+ iommu->stored_addr_lo | 1);
+}
+
+/*
+ * This function finally enables all IOMMUs found in the system after
+ * they have been initialized
+ */
+static void enable_iommus(void)
+{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ iommu_disable(iommu);
+ iommu_init_flags(iommu);
+ iommu_set_device_table(iommu);
+ iommu_enable_command_buffer(iommu);
+ iommu_enable_event_buffer(iommu);
+ iommu_set_exclusion_range(iommu);
+ iommu_init_msi(iommu);
+ iommu_enable(iommu);
+ iommu_flush_all_caches(iommu);
+ }
+}
+
+static void disable_iommus(void)
+{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu)
+ iommu_disable(iommu);
+}
+
+/*
+ * Suspend/Resume support
+ * disable suspend until real resume implemented
+ */
+
+static void amd_iommu_resume(void)
+{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu)
+ iommu_apply_resume_quirks(iommu);
+
+ /* re-load the hardware */
+ enable_iommus();
+
+ /*
+ * we have to flush after the IOMMUs are enabled because a
+ * disabled IOMMU will never execute the commands we send
+ */
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+}
+
+static int amd_iommu_suspend(void)
+{
+ /* disable IOMMUs to go out of the way for BIOS */
+ disable_iommus();
+
+ return 0;
+}
+
+static struct syscore_ops amd_iommu_syscore_ops = {
+ .suspend = amd_iommu_suspend,
+ .resume = amd_iommu_resume,
+};
+
+/*
+ * This is the core init function for AMD IOMMU hardware in the system.
+ * This function is called from the generic x86 DMA layer initialization
+ * code.
+ *
+ * This function basically parses the ACPI table for AMD IOMMU (IVRS)
+ * three times:
+ *
+ * 1 pass) Find the highest PCI device id the driver has to handle.
+ * Upon this information the size of the data structures is
+ * determined that needs to be allocated.
+ *
+ * 2 pass) Initialize the data structures just allocated with the
+ * information in the ACPI table about available AMD IOMMUs
+ * in the system. It also maps the PCI devices in the
+ * system to specific IOMMUs
+ *
+ * 3 pass) After the basic data structures are allocated and
+ * initialized we update them with information about memory
+ * remapping requirements parsed out of the ACPI table in
+ * this last pass.
+ *
+ * After that the hardware is initialized and ready to go. In the last
+ * step we do some Linux specific things like registering the driver in
+ * the dma_ops interface and initializing the suspend/resume support
+ * functions. Finally it prints some information about AMD IOMMUs and
+ * the driver state and enables the hardware.
+ */
+static int __init amd_iommu_init(void)
+{
+ int i, ret = 0;
+
+ /*
+ * First parse ACPI tables to find the largest Bus/Dev/Func
+ * we need to handle. Upon this information the shared data
+ * structures for the IOMMUs in the system will be allocated
+ */
+ if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
+ return -ENODEV;
+
+ ret = amd_iommu_init_err;
+ if (ret)
+ goto out;
+
+ dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
+ alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
+ rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
+
+ ret = -ENOMEM;
+
+ /* Device table - directly used by all IOMMUs */
+ amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(dev_table_size));
+ if (amd_iommu_dev_table == NULL)
+ goto out;
+
+ /*
+ * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
+ * IOMMU see for that device
+ */
+ amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(alias_table_size));
+ if (amd_iommu_alias_table == NULL)
+ goto free;
+
+ /* IOMMU rlookup table - find the IOMMU for a specific device */
+ amd_iommu_rlookup_table = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(rlookup_table_size));
+ if (amd_iommu_rlookup_table == NULL)
+ goto free;
+
+ amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(MAX_DOMAIN_ID/8));
+ if (amd_iommu_pd_alloc_bitmap == NULL)
+ goto free;
+
+ /* init the device table */
+ init_device_table();
+
+ /*
+ * let all alias entries point to itself
+ */
+ for (i = 0; i <= amd_iommu_last_bdf; ++i)
+ amd_iommu_alias_table[i] = i;
+
+ /*
+ * never allocate domain 0 because its used as the non-allocated and
+ * error value placeholder
+ */
+ amd_iommu_pd_alloc_bitmap[0] = 1;
+
+ spin_lock_init(&amd_iommu_pd_lock);
+
+ /*
+ * now the data structures are allocated and basically initialized
+ * start the real acpi table scan
+ */
+ ret = -ENODEV;
+ if (acpi_table_parse("IVRS", init_iommu_all) != 0)
+ goto free;
+
+ if (amd_iommu_init_err) {
+ ret = amd_iommu_init_err;
+ goto free;
+ }
+
+ if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
+ goto free;
+
+ if (amd_iommu_init_err) {
+ ret = amd_iommu_init_err;
+ goto free;
+ }
+
+ ret = amd_iommu_init_devices();
+ if (ret)
+ goto free;
+
+ enable_iommus();
+
+ if (iommu_pass_through)
+ ret = amd_iommu_init_passthrough();
+ else
+ ret = amd_iommu_init_dma_ops();
+
+ if (ret)
+ goto free_disable;
+
+ amd_iommu_init_api();
+
+ amd_iommu_init_notifier();
+
+ register_syscore_ops(&amd_iommu_syscore_ops);
+
+ if (iommu_pass_through)
+ goto out;
+
+ if (amd_iommu_unmap_flush)
+ printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
+ else
+ printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
+
+ x86_platform.iommu_shutdown = disable_iommus;
+out:
+ return ret;
+
+free_disable:
+ disable_iommus();
+
+free:
+ amd_iommu_uninit_devices();
+
+ free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
+ get_order(MAX_DOMAIN_ID/8));
+
+ free_pages((unsigned long)amd_iommu_rlookup_table,
+ get_order(rlookup_table_size));
+
+ free_pages((unsigned long)amd_iommu_alias_table,
+ get_order(alias_table_size));
+
+ free_pages((unsigned long)amd_iommu_dev_table,
+ get_order(dev_table_size));
+
+ free_iommu_all();
+
+ free_unity_maps();
+
+#ifdef CONFIG_GART_IOMMU
+ /*
+ * We failed to initialize the AMD IOMMU - try fallback to GART
+ * if possible.
+ */
+ gart_iommu_init();
+
+#endif
+
+ goto out;
+}
+
+/****************************************************************************
+ *
+ * Early detect code. This code runs at IOMMU detection time in the DMA
+ * layer. It just looks if there is an IVRS ACPI table to detect AMD
+ * IOMMUs
+ *
+ ****************************************************************************/
+static int __init early_amd_iommu_detect(struct acpi_table_header *table)
+{
+ return 0;
+}
+
+int __init amd_iommu_detect(void)
+{
+ if (no_iommu || (iommu_detected && !gart_iommu_aperture))
+ return -ENODEV;
+
+ if (amd_iommu_disabled)
+ return -ENODEV;
+
+ if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
+ iommu_detected = 1;
+ amd_iommu_detected = 1;
+ x86_init.iommu.iommu_init = amd_iommu_init;
+
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
+ return 1;
+ }
+ return -ENODEV;
+}
+
+/****************************************************************************
+ *
+ * Parsing functions for the AMD IOMMU specific kernel command line
+ * options.
+ *
+ ****************************************************************************/
+
+static int __init parse_amd_iommu_dump(char *str)
+{
+ amd_iommu_dump = true;
+
+ return 1;
+}
+
+static int __init parse_amd_iommu_options(char *str)
+{
+ for (; *str; ++str) {
+ if (strncmp(str, "fullflush", 9) == 0)
+ amd_iommu_unmap_flush = true;
+ if (strncmp(str, "off", 3) == 0)
+ amd_iommu_disabled = true;
+ }
+
+ return 1;
+}
+
+__setup("amd_iommu_dump", parse_amd_iommu_dump);
+__setup("amd_iommu=", parse_amd_iommu_options);
+
+IOMMU_INIT_FINISH(amd_iommu_detect,
+ gart_iommu_hole_init,
+ 0,
+ 0);
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
new file mode 100644
index 00000000000..7ffaa64410b
--- /dev/null
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
+#define _ASM_X86_AMD_IOMMU_PROTO_H
+
+#include "amd_iommu_types.h"
+
+extern int amd_iommu_init_dma_ops(void);
+extern int amd_iommu_init_passthrough(void);
+extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
+extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
+extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
+extern int amd_iommu_init_devices(void);
+extern void amd_iommu_uninit_devices(void);
+extern void amd_iommu_init_notifier(void);
+extern void amd_iommu_init_api(void);
+#ifndef CONFIG_AMD_IOMMU_STATS
+
+static inline void amd_iommu_stats_init(void) { }
+
+#endif /* !CONFIG_AMD_IOMMU_STATS */
+
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+ return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+ (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
+{
+ if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+ return false;
+
+ return !!(iommu->features & f);
+}
+
+#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
new file mode 100644
index 00000000000..5b9c5075e81
--- /dev/null
+++ b/drivers/iommu/amd_iommu_types.h
@@ -0,0 +1,585 @@
+/*
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Leo Duran <leo.duran@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
+#define _ASM_X86_AMD_IOMMU_TYPES_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+/*
+ * Maximum number of IOMMUs supported
+ */
+#define MAX_IOMMUS 32
+
+/*
+ * some size calculation constants
+ */
+#define DEV_TABLE_ENTRY_SIZE 32
+#define ALIAS_TABLE_ENTRY_SIZE 2
+#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
+
+/* Length of the MMIO region for the AMD IOMMU */
+#define MMIO_REGION_LENGTH 0x4000
+
+/* Capability offsets used by the driver */
+#define MMIO_CAP_HDR_OFFSET 0x00
+#define MMIO_RANGE_OFFSET 0x0c
+#define MMIO_MISC_OFFSET 0x10
+
+/* Masks, shifts and macros to parse the device range capability */
+#define MMIO_RANGE_LD_MASK 0xff000000
+#define MMIO_RANGE_FD_MASK 0x00ff0000
+#define MMIO_RANGE_BUS_MASK 0x0000ff00
+#define MMIO_RANGE_LD_SHIFT 24
+#define MMIO_RANGE_FD_SHIFT 16
+#define MMIO_RANGE_BUS_SHIFT 8
+#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
+#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
+#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
+#define MMIO_MSI_NUM(x) ((x) & 0x1f)
+
+/* Flag masks for the AMD IOMMU exclusion range */
+#define MMIO_EXCL_ENABLE_MASK 0x01ULL
+#define MMIO_EXCL_ALLOW_MASK 0x02ULL
+
+/* Used offsets into the MMIO space */
+#define MMIO_DEV_TABLE_OFFSET 0x0000
+#define MMIO_CMD_BUF_OFFSET 0x0008
+#define MMIO_EVT_BUF_OFFSET 0x0010
+#define MMIO_CONTROL_OFFSET 0x0018
+#define MMIO_EXCL_BASE_OFFSET 0x0020
+#define MMIO_EXCL_LIMIT_OFFSET 0x0028
+#define MMIO_EXT_FEATURES 0x0030
+#define MMIO_CMD_HEAD_OFFSET 0x2000
+#define MMIO_CMD_TAIL_OFFSET 0x2008
+#define MMIO_EVT_HEAD_OFFSET 0x2010
+#define MMIO_EVT_TAIL_OFFSET 0x2018
+#define MMIO_STATUS_OFFSET 0x2020
+
+
+/* Extended Feature Bits */
+#define FEATURE_PREFETCH (1ULL<<0)
+#define FEATURE_PPR (1ULL<<1)
+#define FEATURE_X2APIC (1ULL<<2)
+#define FEATURE_NX (1ULL<<3)
+#define FEATURE_GT (1ULL<<4)
+#define FEATURE_IA (1ULL<<6)
+#define FEATURE_GA (1ULL<<7)
+#define FEATURE_HE (1ULL<<8)
+#define FEATURE_PC (1ULL<<9)
+
+/* MMIO status bits */
+#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
+
+/* event logging constants */
+#define EVENT_ENTRY_SIZE 0x10
+#define EVENT_TYPE_SHIFT 28
+#define EVENT_TYPE_MASK 0xf
+#define EVENT_TYPE_ILL_DEV 0x1
+#define EVENT_TYPE_IO_FAULT 0x2
+#define EVENT_TYPE_DEV_TAB_ERR 0x3
+#define EVENT_TYPE_PAGE_TAB_ERR 0x4
+#define EVENT_TYPE_ILL_CMD 0x5
+#define EVENT_TYPE_CMD_HARD_ERR 0x6
+#define EVENT_TYPE_IOTLB_INV_TO 0x7
+#define EVENT_TYPE_INV_DEV_REQ 0x8
+#define EVENT_DEVID_MASK 0xffff
+#define EVENT_DEVID_SHIFT 0
+#define EVENT_DOMID_MASK 0xffff
+#define EVENT_DOMID_SHIFT 0
+#define EVENT_FLAGS_MASK 0xfff
+#define EVENT_FLAGS_SHIFT 0x10
+
+/* feature control bits */
+#define CONTROL_IOMMU_EN 0x00ULL
+#define CONTROL_HT_TUN_EN 0x01ULL
+#define CONTROL_EVT_LOG_EN 0x02ULL
+#define CONTROL_EVT_INT_EN 0x03ULL
+#define CONTROL_COMWAIT_EN 0x04ULL
+#define CONTROL_PASSPW_EN 0x08ULL
+#define CONTROL_RESPASSPW_EN 0x09ULL
+#define CONTROL_COHERENT_EN 0x0aULL
+#define CONTROL_ISOC_EN 0x0bULL
+#define CONTROL_CMDBUF_EN 0x0cULL
+#define CONTROL_PPFLOG_EN 0x0dULL
+#define CONTROL_PPFINT_EN 0x0eULL
+
+/* command specific defines */
+#define CMD_COMPL_WAIT 0x01
+#define CMD_INV_DEV_ENTRY 0x02
+#define CMD_INV_IOMMU_PAGES 0x03
+#define CMD_INV_IOTLB_PAGES 0x04
+#define CMD_INV_ALL 0x08
+
+#define CMD_COMPL_WAIT_STORE_MASK 0x01
+#define CMD_COMPL_WAIT_INT_MASK 0x02
+#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
+#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
+
+#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
+
+/* macros and definitions for device table entries */
+#define DEV_ENTRY_VALID 0x00
+#define DEV_ENTRY_TRANSLATION 0x01
+#define DEV_ENTRY_IR 0x3d
+#define DEV_ENTRY_IW 0x3e
+#define DEV_ENTRY_NO_PAGE_FAULT 0x62
+#define DEV_ENTRY_EX 0x67
+#define DEV_ENTRY_SYSMGT1 0x68
+#define DEV_ENTRY_SYSMGT2 0x69
+#define DEV_ENTRY_INIT_PASS 0xb8
+#define DEV_ENTRY_EINT_PASS 0xb9
+#define DEV_ENTRY_NMI_PASS 0xba
+#define DEV_ENTRY_LINT0_PASS 0xbe
+#define DEV_ENTRY_LINT1_PASS 0xbf
+#define DEV_ENTRY_MODE_MASK 0x07
+#define DEV_ENTRY_MODE_SHIFT 0x09
+
+/* constants to configure the command buffer */
+#define CMD_BUFFER_SIZE 8192
+#define CMD_BUFFER_UNINITIALIZED 1
+#define CMD_BUFFER_ENTRIES 512
+#define MMIO_CMD_SIZE_SHIFT 56
+#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
+
+/* constants for event buffer handling */
+#define EVT_BUFFER_SIZE 8192 /* 512 entries */
+#define EVT_LEN_MASK (0x9ULL << 56)
+
+#define PAGE_MODE_NONE 0x00
+#define PAGE_MODE_1_LEVEL 0x01
+#define PAGE_MODE_2_LEVEL 0x02
+#define PAGE_MODE_3_LEVEL 0x03
+#define PAGE_MODE_4_LEVEL 0x04
+#define PAGE_MODE_5_LEVEL 0x05
+#define PAGE_MODE_6_LEVEL 0x06
+
+#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
+#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
+ ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
+ (0xffffffffffffffffULL))
+#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
+#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
+#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
+ IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
+#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
+
+#define PM_MAP_4k 0
+#define PM_ADDR_MASK 0x000ffffffffff000ULL
+#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
+ (~((1ULL << (12 + ((lvl) * 9))) - 1)))
+#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
+
+/*
+ * Returns the page table level to use for a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_LEVEL(pagesize) \
+ ((__ffs(pagesize) - 12) / 9)
+/*
+ * Returns the number of ptes to use for a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_PTE_COUNT(pagesize) \
+ (1ULL << ((__ffs(pagesize) - 12) % 9))
+
+/*
+ * Aligns a given io-virtual address to a given page size
+ * Pagesize is expected to be a power-of-two
+ */
+#define PAGE_SIZE_ALIGN(address, pagesize) \
+ ((address) & ~((pagesize) - 1))
+/*
+ * Creates an IOMMU PTE for an address an a given pagesize
+ * The PTE has no permission bits set
+ * Pagesize is expected to be a power-of-two larger than 4096
+ */
+#define PAGE_SIZE_PTE(address, pagesize) \
+ (((address) | ((pagesize) - 1)) & \
+ (~(pagesize >> 1)) & PM_ADDR_MASK)
+
+/*
+ * Takes a PTE value with mode=0x07 and returns the page size it maps
+ */
+#define PTE_PAGE_SIZE(pte) \
+ (1ULL << (1 + ffz(((pte) | 0xfffULL))))
+
+#define IOMMU_PTE_P (1ULL << 0)
+#define IOMMU_PTE_TV (1ULL << 1)
+#define IOMMU_PTE_U (1ULL << 59)
+#define IOMMU_PTE_FC (1ULL << 60)
+#define IOMMU_PTE_IR (1ULL << 61)
+#define IOMMU_PTE_IW (1ULL << 62)
+
+#define DTE_FLAG_IOTLB 0x01
+
+#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
+#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
+#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
+#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
+
+#define IOMMU_PROT_MASK 0x03
+#define IOMMU_PROT_IR 0x01
+#define IOMMU_PROT_IW 0x02
+
+/* IOMMU capabilities */
+#define IOMMU_CAP_IOTLB 24
+#define IOMMU_CAP_NPCACHE 26
+#define IOMMU_CAP_EFR 27
+
+#define MAX_DOMAIN_ID 65536
+
+/* FIXME: move this macro to <linux/pci.h> */
+#define PCI_BUS(x) (((x) >> 8) & 0xff)
+
+/* Protection domain flags */
+#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
+#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
+ domain for an IOMMU */
+#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
+ translation */
+
+extern bool amd_iommu_dump;
+#define DUMP_printk(format, arg...) \
+ do { \
+ if (amd_iommu_dump) \
+ printk(KERN_INFO "AMD-Vi: " format, ## arg); \
+ } while(0);
+
+/* global flag if IOMMUs cache non-present entries */
+extern bool amd_iommu_np_cache;
+/* Only true if all IOMMUs support device IOTLBs */
+extern bool amd_iommu_iotlb_sup;
+
+/*
+ * Make iterating over all IOMMUs easier
+ */
+#define for_each_iommu(iommu) \
+ list_for_each_entry((iommu), &amd_iommu_list, list)
+#define for_each_iommu_safe(iommu, next) \
+ list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
+
+#define APERTURE_RANGE_SHIFT 27 /* 128 MB */
+#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
+#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
+#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
+#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
+#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
+
+/*
+ * This structure contains generic data for IOMMU protection domains
+ * independent of their use.
+ */
+struct protection_domain {
+ struct list_head list; /* for list of all protection domains */
+ struct list_head dev_list; /* List of all devices in this domain */
+ spinlock_t lock; /* mostly used to lock the page table*/
+ struct mutex api_lock; /* protect page tables in the iommu-api path */
+ u16 id; /* the domain id written to the device table */
+ int mode; /* paging mode (0-6 levels) */
+ u64 *pt_root; /* page table root pointer */
+ unsigned long flags; /* flags to find out type of domain */
+ bool updated; /* complete domain flush required */
+ unsigned dev_cnt; /* devices assigned to this domain */
+ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
+ void *priv; /* private data */
+
+};
+
+/*
+ * This struct contains device specific data for the IOMMU
+ */
+struct iommu_dev_data {
+ struct list_head list; /* For domain->dev_list */
+ struct list_head dev_data_list; /* For global dev_data_list */
+ struct iommu_dev_data *alias_data;/* The alias dev_data */
+ struct protection_domain *domain; /* Domain the device is bound to */
+ atomic_t bind; /* Domain attach reverent count */
+ u16 devid; /* PCI Device ID */
+ struct {
+ bool enabled;
+ int qdep;
+ } ats; /* ATS state */
+};
+
+/*
+ * For dynamic growth the aperture size is split into ranges of 128MB of
+ * DMA address space each. This struct represents one such range.
+ */
+struct aperture_range {
+
+ /* address allocation bitmap */
+ unsigned long *bitmap;
+
+ /*
+ * Array of PTE pages for the aperture. In this array we save all the
+ * leaf pages of the domain page table used for the aperture. This way
+ * we don't need to walk the page table to find a specific PTE. We can
+ * just calculate its address in constant time.
+ */
+ u64 *pte_pages[64];
+
+ unsigned long offset;
+};
+
+/*
+ * Data container for a dma_ops specific protection domain
+ */
+struct dma_ops_domain {
+ struct list_head list;
+
+ /* generic protection domain information */
+ struct protection_domain domain;
+
+ /* size of the aperture for the mappings */
+ unsigned long aperture_size;
+
+ /* address we start to search for free addresses */
+ unsigned long next_address;
+
+ /* address space relevant data */
+ struct aperture_range *aperture[APERTURE_MAX_RANGES];
+
+ /* This will be set to true when TLB needs to be flushed */
+ bool need_flush;
+
+ /*
+ * if this is a preallocated domain, keep the device for which it was
+ * preallocated in this variable
+ */
+ u16 target_dev;
+};
+
+/*
+ * Structure where we save information about one hardware AMD IOMMU in the
+ * system.
+ */
+struct amd_iommu {
+ struct list_head list;
+
+ /* Index within the IOMMU array */
+ int index;
+
+ /* locks the accesses to the hardware */
+ spinlock_t lock;
+
+ /* Pointer to PCI device of this IOMMU */
+ struct pci_dev *dev;
+
+ /* physical address of MMIO space */
+ u64 mmio_phys;
+ /* virtual address of MMIO space */
+ u8 *mmio_base;
+
+ /* capabilities of that IOMMU read from ACPI */
+ u32 cap;
+
+ /* flags read from acpi table */
+ u8 acpi_flags;
+
+ /* Extended features */
+ u64 features;
+
+ /*
+ * Capability pointer. There could be more than one IOMMU per PCI
+ * device function if there are more than one AMD IOMMU capability
+ * pointers.
+ */
+ u16 cap_ptr;
+
+ /* pci domain of this IOMMU */
+ u16 pci_seg;
+
+ /* first device this IOMMU handles. read from PCI */
+ u16 first_device;
+ /* last device this IOMMU handles. read from PCI */
+ u16 last_device;
+
+ /* start of exclusion range of that IOMMU */
+ u64 exclusion_start;
+ /* length of exclusion range of that IOMMU */
+ u64 exclusion_length;
+
+ /* command buffer virtual address */
+ u8 *cmd_buf;
+ /* size of command buffer */
+ u32 cmd_buf_size;
+
+ /* size of event buffer */
+ u32 evt_buf_size;
+ /* event buffer virtual address */
+ u8 *evt_buf;
+ /* MSI number for event interrupt */
+ u16 evt_msi_num;
+
+ /* true if interrupts for this IOMMU are already enabled */
+ bool int_enabled;
+
+ /* if one, we need to send a completion wait command */
+ bool need_sync;
+
+ /* default dma_ops domain for that IOMMU */
+ struct dma_ops_domain *default_dom;
+
+ /*
+ * We can't rely on the BIOS to restore all values on reinit, so we
+ * need to stash them
+ */
+
+ /* The iommu BAR */
+ u32 stored_addr_lo;
+ u32 stored_addr_hi;
+
+ /*
+ * Each iommu has 6 l1s, each of which is documented as having 0x12
+ * registers
+ */
+ u32 stored_l1[6][0x12];
+
+ /* The l2 indirect registers */
+ u32 stored_l2[0x83];
+};
+
+/*
+ * List with all IOMMUs in the system. This list is not locked because it is
+ * only written and read at driver initialization or suspend time
+ */
+extern struct list_head amd_iommu_list;
+
+/*
+ * Array with pointers to each IOMMU struct
+ * The indices are referenced in the protection domains
+ */
+extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
+
+/* Number of IOMMUs present in the system */
+extern int amd_iommus_present;
+
+/*
+ * Declarations for the global list of all protection domains
+ */
+extern spinlock_t amd_iommu_pd_lock;
+extern struct list_head amd_iommu_pd_list;
+
+/*
+ * Structure defining one entry in the device table
+ */
+struct dev_table_entry {
+ u32 data[8];
+};
+
+/*
+ * One entry for unity mappings parsed out of the ACPI table.
+ */
+struct unity_map_entry {
+ struct list_head list;
+
+ /* starting device id this entry is used for (including) */
+ u16 devid_start;
+ /* end device id this entry is used for (including) */
+ u16 devid_end;
+
+ /* start address to unity map (including) */
+ u64 address_start;
+ /* end address to unity map (including) */
+ u64 address_end;
+
+ /* required protection */
+ int prot;
+};
+
+/*
+ * List of all unity mappings. It is not locked because as runtime it is only
+ * read. It is created at ACPI table parsing time.
+ */
+extern struct list_head amd_iommu_unity_map;
+
+/*
+ * Data structures for device handling
+ */
+
+/*
+ * Device table used by hardware. Read and write accesses by software are
+ * locked with the amd_iommu_pd_table lock.
+ */
+extern struct dev_table_entry *amd_iommu_dev_table;
+
+/*
+ * Alias table to find requestor ids to device ids. Not locked because only
+ * read on runtime.
+ */
+extern u16 *amd_iommu_alias_table;
+
+/*
+ * Reverse lookup table to find the IOMMU which translates a specific device.
+ */
+extern struct amd_iommu **amd_iommu_rlookup_table;
+
+/* size of the dma_ops aperture as power of 2 */
+extern unsigned amd_iommu_aperture_order;
+
+/* largest PCI device id we expect translation requests for */
+extern u16 amd_iommu_last_bdf;
+
+/* allocation bitmap for domain ids */
+extern unsigned long *amd_iommu_pd_alloc_bitmap;
+
+/*
+ * If true, the addresses will be flushed on unmap time, not when
+ * they are reused
+ */
+extern bool amd_iommu_unmap_flush;
+
+/* takes bus and device/function and returns the device id
+ * FIXME: should that be in generic PCI code? */
+static inline u16 calc_devid(u8 bus, u8 devfn)
+{
+ return (((u16)bus) << 8) | devfn;
+}
+
+#ifdef CONFIG_AMD_IOMMU_STATS
+
+struct __iommu_counter {
+ char *name;
+ struct dentry *dent;
+ u64 value;
+};
+
+#define DECLARE_STATS_COUNTER(nm) \
+ static struct __iommu_counter nm = { \
+ .name = #nm, \
+ }
+
+#define INC_STATS_COUNTER(name) name.value += 1
+#define ADD_STATS_COUNTER(name, x) name.value += (x)
+#define SUB_STATS_COUNTER(name, x) name.value -= (x)
+
+#else /* CONFIG_AMD_IOMMU_STATS */
+
+#define DECLARE_STATS_COUNTER(name)
+#define INC_STATS_COUNTER(name)
+#define ADD_STATS_COUNTER(name, x)
+#define SUB_STATS_COUNTER(name, x)
+
+#endif /* CONFIG_AMD_IOMMU_STATS */
+
+#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/drivers/pci/dmar.c b/drivers/iommu/dmar.c
index 3dc9befa5ae..3dc9befa5ae 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/iommu/dmar.c
diff --git a/drivers/pci/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f02c34d26d1..c621c98c99d 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -42,7 +42,6 @@
#include <linux/pci-ats.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
-#include "pci.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
diff --git a/drivers/pci/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 3607faf28a4..1a89d4a2cad 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -13,7 +13,6 @@
#include "intr_remapping.h"
#include <acpi/acpi.h>
#include <asm/pci-direct.h>
-#include "pci.h"
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
diff --git a/drivers/pci/intr_remapping.h b/drivers/iommu/intr_remapping.h
index 5662fecfee6..5662fecfee6 100644
--- a/drivers/pci/intr_remapping.h
+++ b/drivers/iommu/intr_remapping.h
diff --git a/drivers/base/iommu.c b/drivers/iommu/iommu.c
index 6e6b6a11b3c..6e6b6a11b3c 100644
--- a/drivers/base/iommu.c
+++ b/drivers/iommu/iommu.c
diff --git a/drivers/pci/iova.c b/drivers/iommu/iova.c
index c5c274ab5c5..c5c274ab5c5 100644
--- a/drivers/pci/iova.c
+++ b/drivers/iommu/iova.c
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
new file mode 100644
index 00000000000..1a584e077c6
--- /dev/null
+++ b/drivers/iommu/msm_iommu.c
@@ -0,0 +1,731 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/clk.h>
+
+#include <asm/cacheflush.h>
+#include <asm/sizes.h>
+
+#include <mach/iommu_hw-8xxx.h>
+#include <mach/iommu.h>
+
+#define MRC(reg, processor, op1, crn, crm, op2) \
+__asm__ __volatile__ ( \
+" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
+: "=r" (reg))
+
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+static int msm_iommu_tex_class[4];
+
+DEFINE_SPINLOCK(msm_iommu_lock);
+
+struct msm_priv {
+ unsigned long *pgtable;
+ struct list_head list_attached;
+};
+
+static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ int ret;
+
+ ret = clk_enable(drvdata->pclk);
+ if (ret)
+ goto fail;
+
+ if (drvdata->clk) {
+ ret = clk_enable(drvdata->clk);
+ if (ret)
+ clk_disable(drvdata->pclk);
+ }
+fail:
+ return ret;
+}
+
+static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ if (drvdata->clk)
+ clk_disable(drvdata->clk);
+ clk_disable(drvdata->pclk);
+}
+
+static int __flush_iotlb(struct iommu_domain *domain)
+{
+ struct msm_priv *priv = domain->priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+#ifndef CONFIG_IOMMU_PGTABLES_L2
+ unsigned long *fl_table = priv->pgtable;
+ int i;
+
+ if (!list_empty(&priv->list_attached)) {
+ dmac_flush_range(fl_table, fl_table + SZ_16K);
+
+ for (i = 0; i < NUM_FL_PTE; i++)
+ if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
+ void *sl_table = __va(fl_table[i] &
+ FL_BASE_MASK);
+ dmac_flush_range(sl_table, sl_table + SZ_4K);
+ }
+ }
+#endif
+
+ list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+ if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
+ BUG();
+
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ BUG_ON(!iommu_drvdata);
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
+ __disable_clocks(iommu_drvdata);
+ }
+fail:
+ return ret;
+}
+
+static void __reset_context(void __iomem *base, int ctx)
+{
+ SET_BPRCOSH(base, ctx, 0);
+ SET_BPRCISH(base, ctx, 0);
+ SET_BPRCNSH(base, ctx, 0);
+ SET_BPSHCFG(base, ctx, 0);
+ SET_BPMTCFG(base, ctx, 0);
+ SET_ACTLR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_BFBCR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_CTX_TLBIALL(base, ctx, 0);
+ SET_TLBFLPTER(base, ctx, 0);
+ SET_TLBSLPTER(base, ctx, 0);
+ SET_TLBLKCR(base, ctx, 0);
+ SET_PRRR(base, ctx, 0);
+ SET_NMRR(base, ctx, 0);
+}
+
+static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
+{
+ unsigned int prrr, nmrr;
+ __reset_context(base, ctx);
+
+ /* Set up HTW mode */
+ /* TLB miss configuration: perform HTW on miss */
+ SET_TLBMCFG(base, ctx, 0x3);
+
+ /* V2P configuration: HTW for access */
+ SET_V2PCFG(base, ctx, 0x3);
+
+ SET_TTBCR(base, ctx, 0);
+ SET_TTBR0_PA(base, ctx, (pgtable >> 14));
+
+ /* Invalidate the TLB for this context */
+ SET_CTX_TLBIALL(base, ctx, 0);
+
+ /* Set interrupt number to "secure" interrupt */
+ SET_IRPTNDX(base, ctx, 0);
+
+ /* Enable context fault interrupt */
+ SET_CFEIE(base, ctx, 1);
+
+ /* Stall access on a context fault and let the handler deal with it */
+ SET_CFCFG(base, ctx, 1);
+
+ /* Redirect all cacheable requests to L2 slave port. */
+ SET_RCISH(base, ctx, 1);
+ SET_RCOSH(base, ctx, 1);
+ SET_RCNSH(base, ctx, 1);
+
+ /* Turn on TEX Remap */
+ SET_TRE(base, ctx, 1);
+
+ /* Set TEX remap attributes */
+ RCP15_PRRR(prrr);
+ RCP15_NMRR(nmrr);
+ SET_PRRR(base, ctx, prrr);
+ SET_NMRR(base, ctx, nmrr);
+
+ /* Turn on BFB prefetch */
+ SET_BFBDFE(base, ctx, 1);
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+ /* Configure page tables as inner-cacheable and shareable to reduce
+ * the TLB miss penalty.
+ */
+ SET_TTBR0_SH(base, ctx, 1);
+ SET_TTBR1_SH(base, ctx, 1);
+
+ SET_TTBR0_NOS(base, ctx, 1);
+ SET_TTBR1_NOS(base, ctx, 1);
+
+ SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
+ SET_TTBR0_IRGNL(base, ctx, 1);
+
+ SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
+ SET_TTBR1_IRGNL(base, ctx, 1);
+
+ SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
+ SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
+#endif
+
+ /* Enable the MMU */
+ SET_M(base, ctx, 1);
+}
+
+static int msm_iommu_domain_init(struct iommu_domain *domain)
+{
+ struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+
+ if (!priv)
+ goto fail_nomem;
+
+ INIT_LIST_HEAD(&priv->list_attached);
+ priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
+ get_order(SZ_16K));
+
+ if (!priv->pgtable)
+ goto fail_nomem;
+
+ memset(priv->pgtable, 0, SZ_16K);
+ domain->priv = priv;
+ return 0;
+
+fail_nomem:
+ kfree(priv);
+ return -ENOMEM;
+}
+
+static void msm_iommu_domain_destroy(struct iommu_domain *domain)
+{
+ struct msm_priv *priv;
+ unsigned long flags;
+ unsigned long *fl_table;
+ int i;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ priv = domain->priv;
+ domain->priv = NULL;
+
+ if (priv) {
+ fl_table = priv->pgtable;
+
+ for (i = 0; i < NUM_FL_PTE; i++)
+ if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
+ free_page((unsigned long) __va(((fl_table[i]) &
+ FL_BASE_MASK)));
+
+ free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
+ priv->pgtable = NULL;
+ }
+
+ kfree(priv);
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct msm_priv *priv;
+ struct msm_iommu_ctx_dev *ctx_dev;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_ctx_drvdata *tmp_drvdata;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+
+ priv = domain->priv;
+
+ if (!priv || !dev) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ ctx_dev = dev->platform_data;
+
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!list_empty(&ctx_drvdata->attached_elm)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+ if (tmp_drvdata == ctx_drvdata) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ __program_context(iommu_drvdata->base, ctx_dev->num,
+ __pa(priv->pgtable));
+
+ __disable_clocks(iommu_drvdata);
+ list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+ ret = __flush_iotlb(domain);
+
+fail:
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+ return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_priv *priv;
+ struct msm_iommu_ctx_dev *ctx_dev;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ priv = domain->priv;
+
+ if (!priv || !dev)
+ goto fail;
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ ctx_dev = dev->platform_data;
+
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
+ goto fail;
+
+ ret = __flush_iotlb(domain);
+ if (ret)
+ goto fail;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ __reset_context(iommu_drvdata->base, ctx_dev->num);
+ __disable_clocks(iommu_drvdata);
+ list_del_init(&ctx_drvdata->attached_elm);
+
+fail:
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+ phys_addr_t pa, int order, int prot)
+{
+ struct msm_priv *priv;
+ unsigned long flags;
+ unsigned long *fl_table;
+ unsigned long *fl_pte;
+ unsigned long fl_offset;
+ unsigned long *sl_table;
+ unsigned long *sl_pte;
+ unsigned long sl_offset;
+ unsigned int pgprot;
+ size_t len = 0x1000UL << order;
+ int ret = 0, tex, sh;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+
+ sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
+ tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
+
+ if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ priv = domain->priv;
+ if (!priv) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ fl_table = priv->pgtable;
+
+ if (len != SZ_16M && len != SZ_1M &&
+ len != SZ_64K && len != SZ_4K) {
+ pr_debug("Bad size: %d\n", len);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!fl_table) {
+ pr_debug("Null page table\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (len == SZ_16M || len == SZ_1M) {
+ pgprot = sh ? FL_SHARED : 0;
+ pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? FL_TEX0 : 0;
+ } else {
+ pgprot = sh ? SL_SHARED : 0;
+ pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? SL_TEX0 : 0;
+ }
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
+
+ if (len == SZ_16M) {
+ int i = 0;
+ for (i = 0; i < 16; i++)
+ *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
+ FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
+ FL_SHARED | FL_NG | pgprot;
+ }
+
+ if (len == SZ_1M)
+ *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
+ FL_TYPE_SECT | FL_SHARED | pgprot;
+
+ /* Need a 2nd level table */
+ if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
+ unsigned long *sl;
+ sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
+ get_order(SZ_4K));
+
+ if (!sl) {
+ pr_debug("Could not allocate second level table\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ memset(sl, 0, SZ_4K);
+ *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
+ }
+
+ sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ sl_pte = sl_table + sl_offset;
+
+
+ if (len == SZ_4K)
+ *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
+ SL_SHARED | SL_TYPE_SMALL | pgprot;
+
+ if (len == SZ_64K) {
+ int i;
+
+ for (i = 0; i < 16; i++)
+ *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
+ SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
+ }
+
+ ret = __flush_iotlb(domain);
+fail:
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+ return ret;
+}
+
+static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ int order)
+{
+ struct msm_priv *priv;
+ unsigned long flags;
+ unsigned long *fl_table;
+ unsigned long *fl_pte;
+ unsigned long fl_offset;
+ unsigned long *sl_table;
+ unsigned long *sl_pte;
+ unsigned long sl_offset;
+ size_t len = 0x1000UL << order;
+ int i, ret = 0;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+
+ priv = domain->priv;
+
+ if (!priv) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ fl_table = priv->pgtable;
+
+ if (len != SZ_16M && len != SZ_1M &&
+ len != SZ_64K && len != SZ_4K) {
+ pr_debug("Bad length: %d\n", len);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!fl_table) {
+ pr_debug("Null page table\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
+
+ if (*fl_pte == 0) {
+ pr_debug("First level PTE is 0\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ /* Unmap supersection */
+ if (len == SZ_16M)
+ for (i = 0; i < 16; i++)
+ *(fl_pte+i) = 0;
+
+ if (len == SZ_1M)
+ *fl_pte = 0;
+
+ sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ sl_pte = sl_table + sl_offset;
+
+ if (len == SZ_64K) {
+ for (i = 0; i < 16; i++)
+ *(sl_pte+i) = 0;
+ }
+
+ if (len == SZ_4K)
+ *sl_pte = 0;
+
+ if (len == SZ_4K || len == SZ_64K) {
+ int used = 0;
+
+ for (i = 0; i < NUM_SL_PTE; i++)
+ if (sl_table[i])
+ used = 1;
+ if (!used) {
+ free_page((unsigned long)sl_table);
+ *fl_pte = 0;
+ }
+ }
+
+ ret = __flush_iotlb(domain);
+fail:
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+ return ret;
+}
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+ unsigned long va)
+{
+ struct msm_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ unsigned int par;
+ unsigned long flags;
+ void __iomem *base;
+ phys_addr_t ret = 0;
+ int ctx;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+
+ priv = domain->priv;
+ if (list_empty(&priv->list_attached))
+ goto fail;
+
+ ctx_drvdata = list_entry(priv->list_attached.next,
+ struct msm_iommu_ctx_drvdata, attached_elm);
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+
+ base = iommu_drvdata->base;
+ ctx = ctx_drvdata->num;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ /* Invalidate context TLB */
+ SET_CTX_TLBIALL(base, ctx, 0);
+ SET_V2PPR(base, ctx, va & V2Pxx_VA);
+
+ par = GET_PAR(base, ctx);
+
+ /* We are dealing with a supersection */
+ if (GET_NOFAULT_SS(base, ctx))
+ ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
+ else /* Upper 20 bits from PAR, lower 12 from VA */
+ ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
+
+ if (GET_FAULT(base, ctx))
+ ret = 0;
+
+ __disable_clocks(iommu_drvdata);
+fail:
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+ return ret;
+}
+
+static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
+ unsigned long cap)
+{
+ return 0;
+}
+
+static void print_ctx_regs(void __iomem *base, int ctx)
+{
+ unsigned int fsr = GET_FSR(base, ctx);
+ pr_err("FAR = %08x PAR = %08x\n",
+ GET_FAR(base, ctx), GET_PAR(base, ctx));
+ pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
+ (fsr & 0x02) ? "TF " : "",
+ (fsr & 0x04) ? "AFF " : "",
+ (fsr & 0x08) ? "APF " : "",
+ (fsr & 0x10) ? "TLBMF " : "",
+ (fsr & 0x20) ? "HTWDEEF " : "",
+ (fsr & 0x40) ? "HTWSEEF " : "",
+ (fsr & 0x80) ? "MHF " : "",
+ (fsr & 0x10000) ? "SL " : "",
+ (fsr & 0x40000000) ? "SS " : "",
+ (fsr & 0x80000000) ? "MULTI " : "");
+
+ pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
+ GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
+ pr_err("TTBR0 = %08x TTBR1 = %08x\n",
+ GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
+ pr_err("SCTLR = %08x ACTLR = %08x\n",
+ GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
+ pr_err("PRRR = %08x NMRR = %08x\n",
+ GET_PRRR(base, ctx), GET_NMRR(base, ctx));
+}
+
+irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
+{
+ struct msm_iommu_drvdata *drvdata = dev_id;
+ void __iomem *base;
+ unsigned int fsr;
+ int i, ret;
+
+ spin_lock(&msm_iommu_lock);
+
+ if (!drvdata) {
+ pr_err("Invalid device ID in context interrupt handler\n");
+ goto fail;
+ }
+
+ base = drvdata->base;
+
+ pr_err("Unexpected IOMMU page fault!\n");
+ pr_err("base = %08x\n", (unsigned int) base);
+
+ ret = __enable_clocks(drvdata);
+ if (ret)
+ goto fail;
+
+ for (i = 0; i < drvdata->ncb; i++) {
+ fsr = GET_FSR(base, i);
+ if (fsr) {
+ pr_err("Fault occurred in context %d.\n", i);
+ pr_err("Interesting registers:\n");
+ print_ctx_regs(base, i);
+ SET_FSR(base, i, 0x4000000F);
+ }
+ }
+ __disable_clocks(drvdata);
+fail:
+ spin_unlock(&msm_iommu_lock);
+ return 0;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+ .domain_init = msm_iommu_domain_init,
+ .domain_destroy = msm_iommu_domain_destroy,
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .iova_to_phys = msm_iommu_iova_to_phys,
+ .domain_has_cap = msm_iommu_domain_has_cap
+};
+
+static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+{
+ int i = 0;
+ unsigned int prrr = 0;
+ unsigned int nmrr = 0;
+ int c_icp, c_ocp, c_mt, c_nos;
+
+ RCP15_PRRR(prrr);
+ RCP15_NMRR(nmrr);
+
+ for (i = 0; i < NUM_TEX_CLASS; i++) {
+ c_nos = PRRR_NOS(prrr, i);
+ c_mt = PRRR_MT(prrr, i);
+ c_icp = NMRR_ICP(nmrr, i);
+ c_ocp = NMRR_OCP(nmrr, i);
+
+ if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static void __init setup_iommu_tex_classes(void)
+{
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
+ get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
+ get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
+ get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
+ get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
+}
+
+static int __init msm_iommu_init(void)
+{
+ setup_iommu_tex_classes();
+ register_iommu(&msm_iommu_ops);
+ return 0;
+}
+
+subsys_initcall(msm_iommu_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
new file mode 100644
index 00000000000..8e8fb079852
--- /dev/null
+++ b/drivers/iommu/msm_iommu_dev.c
@@ -0,0 +1,422 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <mach/iommu_hw-8xxx.h>
+#include <mach/iommu.h>
+#include <mach/clk.h>
+
+struct iommu_ctx_iter_data {
+ /* input */
+ const char *name;
+
+ /* output */
+ struct device *dev;
+};
+
+static struct platform_device *msm_iommu_root_dev;
+
+static int each_iommu_ctx(struct device *dev, void *data)
+{
+ struct iommu_ctx_iter_data *res = data;
+ struct msm_iommu_ctx_dev *c = dev->platform_data;
+
+ if (!res || !c || !c->name || !res->name)
+ return -EINVAL;
+
+ if (!strcmp(res->name, c->name)) {
+ res->dev = dev;
+ return 1;
+ }
+ return 0;
+}
+
+static int each_iommu(struct device *dev, void *data)
+{
+ return device_for_each_child(dev, data, each_iommu_ctx);
+}
+
+struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ struct iommu_ctx_iter_data r;
+ int found;
+
+ if (!msm_iommu_root_dev) {
+ pr_err("No root IOMMU device.\n");
+ goto fail;
+ }
+
+ r.name = ctx_name;
+ found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu);
+
+ if (!found) {
+ pr_err("Could not find context <%s>\n", ctx_name);
+ goto fail;
+ }
+
+ return r.dev;
+fail:
+ return NULL;
+}
+EXPORT_SYMBOL(msm_iommu_get_ctx);
+
+static void msm_iommu_reset(void __iomem *base, int ncb)
+{
+ int ctx;
+
+ SET_RPUE(base, 0);
+ SET_RPUEIE(base, 0);
+ SET_ESRRESTORE(base, 0);
+ SET_TBE(base, 0);
+ SET_CR(base, 0);
+ SET_SPDMBE(base, 0);
+ SET_TESTBUSCR(base, 0);
+ SET_TLBRSW(base, 0);
+ SET_GLOBAL_TLBIALL(base, 0);
+ SET_RPU_ACR(base, 0);
+ SET_TLBLKCRWE(base, 1);
+
+ for (ctx = 0; ctx < ncb; ctx++) {
+ SET_BPRCOSH(base, ctx, 0);
+ SET_BPRCISH(base, ctx, 0);
+ SET_BPRCNSH(base, ctx, 0);
+ SET_BPSHCFG(base, ctx, 0);
+ SET_BPMTCFG(base, ctx, 0);
+ SET_ACTLR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_BFBCR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_CTX_TLBIALL(base, ctx, 0);
+ SET_TLBFLPTER(base, ctx, 0);
+ SET_TLBSLPTER(base, ctx, 0);
+ SET_TLBLKCR(base, ctx, 0);
+ SET_PRRR(base, ctx, 0);
+ SET_NMRR(base, ctx, 0);
+ SET_CONTEXTIDR(base, ctx, 0);
+ }
+}
+
+static int msm_iommu_probe(struct platform_device *pdev)
+{
+ struct resource *r, *r2;
+ struct clk *iommu_clk;
+ struct clk *iommu_pclk;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
+ void __iomem *regs_base;
+ resource_size_t len;
+ int ret, irq, par;
+
+ if (pdev->id == -1) {
+ msm_iommu_root_dev = pdev;
+ return 0;
+ }
+
+ drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
+
+ if (!drvdata) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (!iommu_dev) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ iommu_pclk = clk_get(NULL, "smmu_pclk");
+ if (IS_ERR(iommu_pclk)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = clk_enable(iommu_pclk);
+ if (ret)
+ goto fail_enable;
+
+ iommu_clk = clk_get(&pdev->dev, "iommu_clk");
+
+ if (!IS_ERR(iommu_clk)) {
+ if (clk_get_rate(iommu_clk) == 0)
+ clk_set_min_rate(iommu_clk, 1);
+
+ ret = clk_enable(iommu_clk);
+ if (ret) {
+ clk_put(iommu_clk);
+ goto fail_pclk;
+ }
+ } else
+ iommu_clk = NULL;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
+
+ if (!r) {
+ ret = -ENODEV;
+ goto fail_clk;
+ }
+
+ len = resource_size(r);
+
+ r2 = request_mem_region(r->start, len, r->name);
+ if (!r2) {
+ pr_err("Could not request memory region: start=%p, len=%d\n",
+ (void *) r->start, len);
+ ret = -EBUSY;
+ goto fail_clk;
+ }
+
+ regs_base = ioremap(r2->start, len);
+
+ if (!regs_base) {
+ pr_err("Could not ioremap: start=%p, len=%d\n",
+ (void *) r2->start, len);
+ ret = -EBUSY;
+ goto fail_mem;
+ }
+
+ irq = platform_get_irq_byname(pdev, "secure_irq");
+ if (irq < 0) {
+ ret = -ENODEV;
+ goto fail_io;
+ }
+
+ msm_iommu_reset(regs_base, iommu_dev->ncb);
+
+ SET_M(regs_base, 0, 1);
+ SET_PAR(regs_base, 0, 0);
+ SET_V2PCFG(regs_base, 0, 1);
+ SET_V2PPR(regs_base, 0, 0);
+ par = GET_PAR(regs_base, 0);
+ SET_V2PCFG(regs_base, 0, 0);
+ SET_M(regs_base, 0, 0);
+
+ if (!par) {
+ pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
+ ret = -ENODEV;
+ goto fail_io;
+ }
+
+ ret = request_irq(irq, msm_iommu_fault_handler, 0,
+ "msm_iommu_secure_irpt_handler", drvdata);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
+ goto fail_io;
+ }
+
+
+ drvdata->pclk = iommu_pclk;
+ drvdata->clk = iommu_clk;
+ drvdata->base = regs_base;
+ drvdata->irq = irq;
+ drvdata->ncb = iommu_dev->ncb;
+
+ pr_info("device %s mapped at %p, irq %d with %d ctx banks\n",
+ iommu_dev->name, regs_base, irq, iommu_dev->ncb);
+
+ platform_set_drvdata(pdev, drvdata);
+
+ if (iommu_clk)
+ clk_disable(iommu_clk);
+
+ clk_disable(iommu_pclk);
+
+ return 0;
+fail_io:
+ iounmap(regs_base);
+fail_mem:
+ release_mem_region(r->start, len);
+fail_clk:
+ if (iommu_clk) {
+ clk_disable(iommu_clk);
+ clk_put(iommu_clk);
+ }
+fail_pclk:
+ clk_disable(iommu_pclk);
+fail_enable:
+ clk_put(iommu_pclk);
+fail:
+ kfree(drvdata);
+ return ret;
+}
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_drvdata *drv = NULL;
+
+ drv = platform_get_drvdata(pdev);
+ if (drv) {
+ if (drv->clk)
+ clk_put(drv->clk);
+ clk_put(drv->pclk);
+ memset(drv, 0, sizeof(*drv));
+ kfree(drv);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return 0;
+}
+
+static int msm_iommu_ctx_probe(struct platform_device *pdev)
+{
+ struct msm_iommu_ctx_dev *c = pdev->dev.platform_data;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL;
+ int i, ret;
+ if (!c || !pdev->dev.parent) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+
+ if (!drvdata) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL);
+ if (!ctx_drvdata) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ ctx_drvdata->num = c->num;
+ ctx_drvdata->pdev = pdev;
+
+ INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
+ platform_set_drvdata(pdev, ctx_drvdata);
+
+ ret = clk_enable(drvdata->pclk);
+ if (ret)
+ goto fail;
+
+ if (drvdata->clk) {
+ ret = clk_enable(drvdata->clk);
+ if (ret) {
+ clk_disable(drvdata->pclk);
+ goto fail;
+ }
+ }
+
+ /* Program the M2V tables for this context */
+ for (i = 0; i < MAX_NUM_MIDS; i++) {
+ int mid = c->mids[i];
+ if (mid == -1)
+ break;
+
+ SET_M2VCBR_N(drvdata->base, mid, 0);
+ SET_CBACR_N(drvdata->base, c->num, 0);
+
+ /* Set VMID = 0 */
+ SET_VMID(drvdata->base, mid, 0);
+
+ /* Set the context number for that MID to this context */
+ SET_CBNDX(drvdata->base, mid, c->num);
+
+ /* Set MID associated with this context bank to 0*/
+ SET_CBVMID(drvdata->base, c->num, 0);
+
+ /* Set the ASID for TLB tagging for this context */
+ SET_CONTEXTIDR_ASID(drvdata->base, c->num, c->num);
+
+ /* Set security bit override to be Non-secure */
+ SET_NSCFG(drvdata->base, mid, 3);
+ }
+
+ if (drvdata->clk)
+ clk_disable(drvdata->clk);
+ clk_disable(drvdata->pclk);
+
+ dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
+ return 0;
+fail:
+ kfree(ctx_drvdata);
+ return ret;
+}
+
+static int msm_iommu_ctx_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_ctx_drvdata *drv = NULL;
+ drv = platform_get_drvdata(pdev);
+ if (drv) {
+ memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
+ kfree(drv);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return 0;
+}
+
+static struct platform_driver msm_iommu_driver = {
+ .driver = {
+ .name = "msm_iommu",
+ },
+ .probe = msm_iommu_probe,
+ .remove = msm_iommu_remove,
+};
+
+static struct platform_driver msm_iommu_ctx_driver = {
+ .driver = {
+ .name = "msm_iommu_ctx",
+ },
+ .probe = msm_iommu_ctx_probe,
+ .remove = msm_iommu_ctx_remove,
+};
+
+static int __init msm_iommu_driver_init(void)
+{
+ int ret;
+ ret = platform_driver_register(&msm_iommu_driver);
+ if (ret != 0) {
+ pr_err("Failed to register IOMMU driver\n");
+ goto error;
+ }
+
+ ret = platform_driver_register(&msm_iommu_ctx_driver);
+ if (ret != 0) {
+ pr_err("Failed to register IOMMU context driver\n");
+ goto error;
+ }
+
+error:
+ return ret;
+}
+
+static void __exit msm_iommu_driver_exit(void)
+{
+ platform_driver_unregister(&msm_iommu_ctx_driver);
+ platform_driver_unregister(&msm_iommu_driver);
+}
+
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 6dd360734cf..212efaf9a4e 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -34,7 +34,7 @@
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/list.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define GIG_VERSION {0, 5, 0, 0}
#define GIG_COMPAT {0, 4, 0, 0}
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index 472a2af7944..861b6511f3e 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -20,6 +20,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index f6f3c87cc7c..a440d7fff0a 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -152,6 +152,7 @@
#define HFC_MULTI_VERSION "2.03"
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index b01a7be1300..3261de18a91 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -44,6 +44,7 @@
*
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index bc0529ac88a..6218775ce87 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -38,6 +38,7 @@
*
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index 64ecc6f5ffa..d2ffb1d9b83 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -20,6 +20,7 @@
*
*/
+#include <linux/irqreturn.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mISDNhw.h>
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index db25b6b2ae3..5ef9f11ee74 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -20,6 +20,7 @@
*
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index 9e07246bb9e..4d0d41ea122 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/pci.h>
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 9e84870b971..e10e0284533 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index de1c669c7b1..0a5c42a3f12 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/mman.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/wait.h>
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 8b0a7d86b30..478ebab54ca 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/isapnp.h>
#include <linux/kmod.h>
diff --git a/drivers/isdn/i4l/isdn_bsdcomp.c b/drivers/isdn/i4l/isdn_bsdcomp.c
index 02d9918705d..aa0b6a6f5ef 100644
--- a/drivers/isdn/i4l/isdn_bsdcomp.c
+++ b/drivers/isdn/i4l/isdn_bsdcomp.c
@@ -155,7 +155,7 @@ struct bsd_db {
#define LAST 255
#define MAXCODE(b) ((1 << (b)) - 1)
-#define BADCODEM1 MAXCODE(MAX_BSD_BITS);
+#define BADCODEM1 MAXCODE(MAX_BSD_BITS)
#define BSD_HASH(prefix,suffix,hshift) ((((unsigned long)(suffix))<<(hshift)) \
^ (unsigned long)(prefix))
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 97988111e45..1f73d7f7e02 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1983,13 +1983,14 @@ isdn_net_rebuild_header(struct sk_buff *skb)
return ret;
}
-static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh)
+static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
+ __be16 type)
{
const struct net_device *dev = neigh->dev;
isdn_net_local *lp = netdev_priv(dev);
if (lp->p_encap == ISDN_NET_ENCAP_ETHER)
- return eth_header_cache(neigh, hh);
+ return eth_header_cache(neigh, hh, type);
return -1;
}
@@ -2531,6 +2532,9 @@ static void _isdn_setup(struct net_device *dev)
/* Setup the generic properties */
dev->flags = IFF_NOARP|IFF_POINTOPOINT;
+
+ /* isdn prepends a header in the tx path, can't share skbs */
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->header_ops = NULL;
dev->netdev_ops = &isdn_netdev_ops;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 713d43b4e56..b591e726a6f 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -92,7 +92,7 @@ config LEDS_NET48XX
config LEDS_NET5501
tristate "LED Support for Soekris net5501 series Error LED"
depends on LEDS_TRIGGERS
- depends on X86 && LEDS_GPIO_PLATFORM && GPIO_CS5535
+ depends on X86 && GPIO_CS5535
select LEDS_TRIGGER_DEFAULT_ON
default n
help
@@ -182,23 +182,6 @@ config LEDS_GPIO
defined as platform devices and/or OpenFirmware platform devices.
The code to use these bindings can be selected below.
-config LEDS_GPIO_PLATFORM
- bool "Platform device bindings for GPIO LEDs"
- depends on LEDS_GPIO
- default y
- help
- Let the leds-gpio driver drive LEDs which have been defined as
- platform devices. If you don't know what this means, say yes.
-
-config LEDS_GPIO_OF
- bool "OpenFirmware platform device bindings for GPIO LEDs"
- depends on LEDS_GPIO && OF_DEVICE
- default y
- help
- Let the leds-gpio driver drive LEDs which have been defined as
- of_platform devices. For instance, LEDs which are listed in a "dts"
- file.
-
config LEDS_LP3944
tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
depends on LEDS_CLASS
@@ -382,6 +365,7 @@ config LEDS_NS2
config LEDS_NETXBIG
tristate "LED support for Big Network series LEDs"
depends on MACH_NET2BIG_V2 || MACH_NET5BIG_V2
+ depends on LEDS_CLASS
default y
help
This option enable support for LEDs found on the LaCie 2Big
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index b0480c8fbcb..3d8bc327a68 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -165,7 +165,7 @@ static inline int sizeof_gpio_leds_priv(int num_leds)
}
/* Code to create from OpenFirmware platform devices */
-#ifdef CONFIG_LEDS_GPIO_OF
+#ifdef CONFIG_OF_GPIO
static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *child;
@@ -223,13 +223,13 @@ static const struct of_device_id of_gpio_leds_match[] = {
{ .compatible = "gpio-leds", },
{},
};
-#else
+#else /* CONFIG_OF_GPIO */
static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_device *pdev)
{
return NULL;
}
#define of_gpio_leds_match NULL
-#endif
+#endif /* CONFIG_OF_GPIO */
static int __devinit gpio_led_probe(struct platform_device *pdev)
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 4d7ce7631ac..3dd7090a9a9 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -68,17 +68,16 @@
#define LM3530_ALS2_IMP_SHIFT (4)
/* Zone Boundary Register defaults */
-#define LM3530_DEF_ZB_0 (0x33)
-#define LM3530_DEF_ZB_1 (0x66)
-#define LM3530_DEF_ZB_2 (0x99)
-#define LM3530_DEF_ZB_3 (0xCC)
+#define LM3530_ALS_ZB_MAX (4)
+#define LM3530_ALS_WINDOW_mV (1000)
+#define LM3530_ALS_OFFSET_mV (4)
/* Zone Target Register defaults */
-#define LM3530_DEF_ZT_0 (0x19)
-#define LM3530_DEF_ZT_1 (0x33)
+#define LM3530_DEF_ZT_0 (0x7F)
+#define LM3530_DEF_ZT_1 (0x66)
#define LM3530_DEF_ZT_2 (0x4C)
-#define LM3530_DEF_ZT_3 (0x66)
-#define LM3530_DEF_ZT_4 (0x7F)
+#define LM3530_DEF_ZT_3 (0x33)
+#define LM3530_DEF_ZT_4 (0x19)
struct lm3530_mode_map {
const char *mode;
@@ -150,6 +149,8 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
u8 als_imp_sel = 0;
u8 brightness;
u8 reg_val[LM3530_REG_MAX];
+ u8 zones[LM3530_ALS_ZB_MAX];
+ u32 als_vmin, als_vmax, als_vstep;
struct lm3530_platform_data *pltfm = drvdata->pdata;
struct i2c_client *client = drvdata->client;
@@ -161,6 +162,26 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
gen_config |= (LM3530_ENABLE_I2C);
if (drvdata->mode == LM3530_BL_MODE_ALS) {
+ if (pltfm->als_vmax == 0) {
+ pltfm->als_vmin = als_vmin = 0;
+ pltfm->als_vmin = als_vmax = LM3530_ALS_WINDOW_mV;
+ }
+
+ als_vmin = pltfm->als_vmin;
+ als_vmax = pltfm->als_vmax;
+
+ if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
+ pltfm->als_vmax = als_vmax =
+ als_vmin + LM3530_ALS_WINDOW_mV;
+
+ /* n zone boundary makes n+1 zones */
+ als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
+
+ for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
+ zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
+ als_vstep + (i * als_vstep)) * LED_FULL)
+ / 1000;
+
als_config =
(pltfm->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
(LM3530_ENABLE_ALS) |
@@ -169,6 +190,7 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
als_imp_sel =
(pltfm->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
(pltfm->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
+
}
if (drvdata->mode == LM3530_BL_MODE_PWM)
@@ -190,10 +212,10 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
reg_val[3] = 0x00; /* LM3530_ALS_ZONE_REG */
reg_val[4] = als_imp_sel; /* LM3530_ALS_IMP_SELECT */
reg_val[5] = brightness; /* LM3530_BRT_CTRL_REG */
- reg_val[6] = LM3530_DEF_ZB_0; /* LM3530_ALS_ZB0_REG */
- reg_val[7] = LM3530_DEF_ZB_1; /* LM3530_ALS_ZB1_REG */
- reg_val[8] = LM3530_DEF_ZB_2; /* LM3530_ALS_ZB2_REG */
- reg_val[9] = LM3530_DEF_ZB_3; /* LM3530_ALS_ZB3_REG */
+ reg_val[6] = zones[0]; /* LM3530_ALS_ZB0_REG */
+ reg_val[7] = zones[1]; /* LM3530_ALS_ZB1_REG */
+ reg_val[8] = zones[2]; /* LM3530_ALS_ZB2_REG */
+ reg_val[9] = zones[3]; /* LM3530_ALS_ZB3_REG */
reg_val[10] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */
reg_val[11] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */
reg_val[12] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */
@@ -265,6 +287,24 @@ static void lm3530_brightness_set(struct led_classdev *led_cdev,
}
}
+static ssize_t lm3530_mode_get(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = container_of(
+ dev->parent, struct i2c_client, dev);
+ struct lm3530_data *drvdata = i2c_get_clientdata(client);
+ int i, len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mode_map); i++)
+ if (drvdata->mode == mode_map[i].mode_val)
+ len += sprintf(buf + len, "[%s] ", mode_map[i].mode);
+ else
+ len += sprintf(buf + len, "%s ", mode_map[i].mode);
+
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
*attr, const char *buf, size_t size)
@@ -298,8 +338,7 @@ static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
return sizeof(drvdata->mode);
}
-
-static DEVICE_ATTR(mode, 0644, NULL, lm3530_mode_set);
+static DEVICE_ATTR(mode, 0644, lm3530_mode_get, lm3530_mode_set);
static int __devinit lm3530_probe(struct i2c_client *client,
const struct i2c_device_id *id)
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index c0cff64a1ae..9fc122c81f0 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -593,7 +593,7 @@ static void lp5521_unregister_sysfs(struct i2c_client *client)
&lp5521_led_attribute_group);
}
-static int __init lp5521_init_led(struct lp5521_led *led,
+static int __devinit lp5521_init_led(struct lp5521_led *led,
struct i2c_client *client,
int chan, struct lp5521_platform_data *pdata)
{
@@ -637,7 +637,7 @@ static int __init lp5521_init_led(struct lp5521_led *led,
return 0;
}
-static int lp5521_probe(struct i2c_client *client,
+static int __devinit lp5521_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lp5521_chip *chip;
@@ -744,7 +744,7 @@ fail1:
return ret;
}
-static int lp5521_remove(struct i2c_client *client)
+static int __devexit lp5521_remove(struct i2c_client *client)
{
struct lp5521_chip *chip = i2c_get_clientdata(client);
int i;
@@ -775,7 +775,7 @@ static struct i2c_driver lp5521_driver = {
.name = "lp5521",
},
.probe = lp5521_probe,
- .remove = lp5521_remove,
+ .remove = __devexit_p(lp5521_remove),
.id_table = lp5521_id,
};
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index e19fed25f13..5971e309b23 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -826,7 +826,7 @@ static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
return 0;
}
-static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
+static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
int chan, struct lp5523_platform_data *pdata)
{
char name[32];
@@ -872,7 +872,7 @@ static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
static struct i2c_driver lp5523_driver;
-static int lp5523_probe(struct i2c_client *client,
+static int __devinit lp5523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lp5523_chip *chip;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index d8d3a1e910a..a2c874623e3 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -88,7 +88,7 @@ static const struct pca9532_chip_info pca9532_chip_info_tbl[] = {
static struct i2c_driver pca9532_driver = {
.driver = {
- .name = "pca953x",
+ .name = "leds-pca953x",
},
.probe = pca9532_probe,
.remove = pca9532_remove,
diff --git a/drivers/leds/leds-sunfire.c b/drivers/leds/leds-sunfire.c
index ab6d18f5c39..1757396b20b 100644
--- a/drivers/leds/leds-sunfire.c
+++ b/drivers/leds/leds-sunfire.c
@@ -127,17 +127,19 @@ static int __devinit sunfire_led_generic_probe(struct platform_device *pdev,
struct led_type *types)
{
struct sunfire_drvdata *p;
- int i, err = -EINVAL;
+ int i, err;
if (pdev->num_resources != 1) {
printk(KERN_ERR PFX "Wrong number of resources %d, should be 1\n",
pdev->num_resources);
+ err = -EINVAL;
goto out;
}
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
printk(KERN_ERR PFX "Could not allocate struct sunfire_drvdata\n");
+ err = -ENOMEM;
goto out;
}
@@ -160,14 +162,14 @@ static int __devinit sunfire_led_generic_probe(struct platform_device *pdev,
dev_set_drvdata(&pdev->dev, p);
- err = 0;
-out:
- return err;
+ return 0;
out_unregister_led_cdevs:
for (i--; i >= 0; i--)
led_classdev_unregister(&p->leds[i].led_cdev);
- goto out;
+ kfree(p);
+out:
+ return err;
}
static int __devexit sunfire_led_generic_remove(struct platform_device *pdev)
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index efa202499e3..2535933c49f 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -117,7 +117,7 @@ static __init int map_switcher(void)
/*
* Now the Switcher is mapped at the right address, we can't fail!
- * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
+ * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
*/
memcpy(switcher_vma->addr, start_switcher_text,
end_switcher_text - start_switcher_text);
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index daaf8663164..28433a155d6 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -375,11 +375,9 @@ static bool direct_trap(unsigned int num)
/*
* The Host needs to see page faults (for shadow paging and to save the
* fault address), general protection faults (in/out emulation) and
- * device not available (TS handling), invalid opcode fault (kvm hcall),
- * and of course, the hypercall trap.
+ * device not available (TS handling) and of course, the hypercall trap.
*/
- return num != 14 && num != 13 && num != 7 &&
- num != 6 && num != LGUEST_TRAP_ENTRY;
+ return num != 14 && num != 13 && num != 7 && num != LGUEST_TRAP_ENTRY;
}
/*:*/
@@ -429,8 +427,8 @@ void pin_stack_pages(struct lg_cpu *cpu)
/*
* Direct traps also mean that we need to know whenever the Guest wants to use
- * a different kernel stack, so we can change the IDT entries to use that
- * stack. The IDT entries expect a virtual address, so unlike most addresses
+ * a different kernel stack, so we can change the guest TSS to use that
+ * stack. The TSS entries expect a virtual address, so unlike most addresses
* the Guest gives us, the "esp" (stack pointer) value here is virtual, not
* physical.
*
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 9136411fadd..295df06e659 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -59,6 +59,8 @@ struct lg_cpu {
struct lguest_pages *last_pages;
+ /* Initialization mode: linear map everything. */
+ bool linear_pages;
int cpu_pgd; /* Which pgd this cpu is currently using */
/* If a hypercall was asked for, this points to the arguments. */
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 69c84a1d88e..5289ffa2e50 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -109,6 +109,17 @@ static u32 lg_get_features(struct virtio_device *vdev)
}
/*
+ * To notify on reset or feature finalization, we (ab)use the NOTIFY
+ * hypercall, with the descriptor address of the device.
+ */
+static void status_notify(struct virtio_device *vdev)
+{
+ unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices;
+
+ hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0);
+}
+
+/*
* The virtio core takes the features the Host offers, and copies the ones
* supported by the driver into the vdev->features array. Once that's all
* sorted out, this routine is called so we can tell the Host which features we
@@ -135,6 +146,9 @@ static void lg_finalize_features(struct virtio_device *vdev)
if (test_bit(i, vdev->features))
out_features[i / 8] |= (1 << (i % 8));
}
+
+ /* Tell Host we've finished with this device's feature negotiation */
+ status_notify(vdev);
}
/* Once they've found a field, getting a copy of it is easy. */
@@ -168,28 +182,21 @@ static u8 lg_get_status(struct virtio_device *vdev)
return to_lgdev(vdev)->desc->status;
}
-/*
- * To notify on status updates, we (ab)use the NOTIFY hypercall, with the
- * descriptor address of the device. A zero status means "reset".
- */
-static void set_status(struct virtio_device *vdev, u8 status)
-{
- unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices;
-
- /* We set the status. */
- to_lgdev(vdev)->desc->status = status;
- hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0);
-}
-
static void lg_set_status(struct virtio_device *vdev, u8 status)
{
BUG_ON(!status);
- set_status(vdev, status);
+ to_lgdev(vdev)->desc->status = status;
+
+ /* Tell Host immediately if we failed. */
+ if (status & VIRTIO_CONFIG_S_FAILED)
+ status_notify(vdev);
}
static void lg_reset(struct virtio_device *vdev)
{
- set_status(vdev, 0);
+ /* 0 status means "reset" */
+ to_lgdev(vdev)->desc->status = 0;
+ status_notify(vdev);
}
/*
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 948c547b8e9..f97e625241a 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -1,8 +1,10 @@
-/*P:200 This contains all the /dev/lguest code, whereby the userspace launcher
- * controls and communicates with the Guest. For example, the first write will
- * tell us the Guest's memory layout and entry point. A read will run the
- * Guest until something happens, such as a signal or the Guest doing a NOTIFY
- * out to the Launcher.
+/*P:200 This contains all the /dev/lguest code, whereby the userspace
+ * launcher controls and communicates with the Guest. For example,
+ * the first write will tell us the Guest's memory layout and entry
+ * point. A read will run the Guest until something happens, such as
+ * a signal or the Guest doing a NOTIFY out to the Launcher. There is
+ * also a way for the Launcher to attach eventfds to particular NOTIFY
+ * values instead of returning from the read() call.
:*/
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
@@ -357,8 +359,8 @@ static int initialize(struct file *file, const unsigned long __user *input)
goto free_eventfds;
/*
- * Initialize the Guest's shadow page tables, using the toplevel
- * address the Launcher gave us. This allocates memory, so can fail.
+ * Initialize the Guest's shadow page tables. This allocates
+ * memory, so can fail.
*/
err = init_guest_pagetable(lg);
if (err)
@@ -516,6 +518,7 @@ static const struct file_operations lguest_fops = {
.read = read,
.llseek = default_llseek,
};
+/*:*/
/*
* This is a textbook example of a "misc" character device. Populate a "struct
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index d21578ee95d..3b62be160a6 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -17,7 +17,6 @@
#include <linux/percpu.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
-#include <asm/bootparam.h>
#include "lg.h"
/*M:008
@@ -156,7 +155,7 @@ static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
}
/*
- * These functions are just like the above two, except they access the Guest
+ * These functions are just like the above, except they access the Guest
* page tables. Hence they return a Guest address.
*/
static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
@@ -196,7 +195,7 @@ static unsigned long gpte_addr(struct lg_cpu *cpu,
#endif
/*:*/
-/*M:014
+/*M:007
* get_pfn is slow: we could probably try to grab batches of pages here as
* an optimization (ie. pre-faulting).
:*/
@@ -325,10 +324,15 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
#endif
/* First step: get the top-level Guest page table entry. */
- gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
- /* Toplevel not present? We can't map it in. */
- if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
- return false;
+ if (unlikely(cpu->linear_pages)) {
+ /* Faking up a linear mapping. */
+ gpgd = __pgd(CHECK_GPGD_MASK);
+ } else {
+ gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+ /* Toplevel not present? We can't map it in. */
+ if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
+ return false;
+ }
/* Now look at the matching shadow entry. */
spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
@@ -353,10 +357,15 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
}
#ifdef CONFIG_X86_PAE
- gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
- /* Middle level not present? We can't map it in. */
- if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
- return false;
+ if (unlikely(cpu->linear_pages)) {
+ /* Faking up a linear mapping. */
+ gpmd = __pmd(_PAGE_TABLE);
+ } else {
+ gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
+ /* Middle level not present? We can't map it in. */
+ if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+ return false;
+ }
/* Now look at the matching shadow entry. */
spmd = spmd_addr(cpu, *spgd, vaddr);
@@ -397,8 +406,13 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
#endif
- /* Read the actual PTE value. */
- gpte = lgread(cpu, gpte_ptr, pte_t);
+ if (unlikely(cpu->linear_pages)) {
+ /* Linear? Make up a PTE which points to same page. */
+ gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
+ } else {
+ /* Read the actual PTE value. */
+ gpte = lgread(cpu, gpte_ptr, pte_t);
+ }
/* If this page isn't in the Guest page tables, we can't page it in. */
if (!(pte_flags(gpte) & _PAGE_PRESENT))
@@ -454,7 +468,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
* Finally, we write the Guest PTE entry back: we've set the
* _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
*/
- lgwrite(cpu, gpte_ptr, pte_t, gpte);
+ if (likely(!cpu->linear_pages))
+ lgwrite(cpu, gpte_ptr, pte_t, gpte);
/*
* The fault is fixed, the page table is populated, the mapping
@@ -612,6 +627,11 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
#ifdef CONFIG_X86_PAE
pmd_t gpmd;
#endif
+
+ /* Still not set up? Just map 1:1. */
+ if (unlikely(cpu->linear_pages))
+ return vaddr;
+
/* First step: get the top-level Guest page table entry. */
gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
/* Toplevel not present? We can't map it in. */
@@ -708,32 +728,6 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
return next;
}
-/*H:430
- * (iv) Switching page tables
- *
- * Now we've seen all the page table setting and manipulation, let's see
- * what happens when the Guest changes page tables (ie. changes the top-level
- * pgdir). This occurs on almost every context switch.
- */
-void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
-{
- int newpgdir, repin = 0;
-
- /* Look to see if we have this one already. */
- newpgdir = find_pgdir(cpu->lg, pgtable);
- /*
- * If not, we allocate or mug an existing one: if it's a fresh one,
- * repin gets set to 1.
- */
- if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
- newpgdir = new_pgdir(cpu, pgtable, &repin);
- /* Change the current pgd index to the new one. */
- cpu->cpu_pgd = newpgdir;
- /* If it was completely blank, we map in the Guest kernel stack */
- if (repin)
- pin_stack_pages(cpu);
-}
-
/*H:470
* Finally, a routine which throws away everything: all PGD entries in all
* the shadow page tables, including the Guest's kernel mappings. This is used
@@ -780,6 +774,44 @@ void guest_pagetable_clear_all(struct lg_cpu *cpu)
/* We need the Guest kernel stack mapped again. */
pin_stack_pages(cpu);
}
+
+/*H:430
+ * (iv) Switching page tables
+ *
+ * Now we've seen all the page table setting and manipulation, let's see
+ * what happens when the Guest changes page tables (ie. changes the top-level
+ * pgdir). This occurs on almost every context switch.
+ */
+void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
+{
+ int newpgdir, repin = 0;
+
+ /*
+ * The very first time they call this, we're actually running without
+ * any page tables; we've been making it up. Throw them away now.
+ */
+ if (unlikely(cpu->linear_pages)) {
+ release_all_pagetables(cpu->lg);
+ cpu->linear_pages = false;
+ /* Force allocation of a new pgdir. */
+ newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
+ } else {
+ /* Look to see if we have this one already. */
+ newpgdir = find_pgdir(cpu->lg, pgtable);
+ }
+
+ /*
+ * If not, we allocate or mug an existing one: if it's a fresh one,
+ * repin gets set to 1.
+ */
+ if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
+ newpgdir = new_pgdir(cpu, pgtable, &repin);
+ /* Change the current pgd index to the new one. */
+ cpu->cpu_pgd = newpgdir;
+ /* If it was completely blank, we map in the Guest kernel stack */
+ if (repin)
+ pin_stack_pages(cpu);
+}
/*:*/
/*M:009
@@ -919,168 +951,26 @@ void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
}
#endif
-/*H:505
- * To get through boot, we construct simple identity page mappings (which
- * set virtual == physical) and linear mappings which will get the Guest far
- * enough into the boot to create its own. The linear mapping means we
- * simplify the Guest boot, but it makes assumptions about their PAGE_OFFSET,
- * as you'll see.
- *
- * We lay them out of the way, just below the initrd (which is why we need to
- * know its size here).
- */
-static unsigned long setup_pagetables(struct lguest *lg,
- unsigned long mem,
- unsigned long initrd_size)
-{
- pgd_t __user *pgdir;
- pte_t __user *linear;
- unsigned long mem_base = (unsigned long)lg->mem_base;
- unsigned int mapped_pages, i, linear_pages;
-#ifdef CONFIG_X86_PAE
- pmd_t __user *pmds;
- unsigned int j;
- pgd_t pgd;
- pmd_t pmd;
-#else
- unsigned int phys_linear;
-#endif
-
- /*
- * We have mapped_pages frames to map, so we need linear_pages page
- * tables to map them.
- */
- mapped_pages = mem / PAGE_SIZE;
- linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE;
-
- /* We put the toplevel page directory page at the top of memory. */
- pgdir = (pgd_t *)(mem + mem_base - initrd_size - PAGE_SIZE);
-
- /* Now we use the next linear_pages pages as pte pages */
- linear = (void *)pgdir - linear_pages * PAGE_SIZE;
-
-#ifdef CONFIG_X86_PAE
- /*
- * And the single mid page goes below that. We only use one, but
- * that's enough to map 1G, which definitely gets us through boot.
- */
- pmds = (void *)linear - PAGE_SIZE;
-#endif
- /*
- * Linear mapping is easy: put every page's address into the
- * mapping in order.
- */
- for (i = 0; i < mapped_pages; i++) {
- pte_t pte;
- pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER));
- if (copy_to_user(&linear[i], &pte, sizeof(pte)) != 0)
- return -EFAULT;
- }
-
-#ifdef CONFIG_X86_PAE
- /*
- * Make the Guest PMD entries point to the corresponding place in the
- * linear mapping (up to one page worth of PMD).
- */
- for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD;
- i += PTRS_PER_PTE, j++) {
- pmd = pfn_pmd(((unsigned long)&linear[i] - mem_base)/PAGE_SIZE,
- __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
-
- if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0)
- return -EFAULT;
- }
-
- /* One PGD entry, pointing to that PMD page. */
- pgd = __pgd(((unsigned long)pmds - mem_base) | _PAGE_PRESENT);
- /* Copy it in as the first PGD entry (ie. addresses 0-1G). */
- if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0)
- return -EFAULT;
- /*
- * And the other PGD entry to make the linear mapping at PAGE_OFFSET
- */
- if (copy_to_user(&pgdir[KERNEL_PGD_BOUNDARY], &pgd, sizeof(pgd)))
- return -EFAULT;
-#else
- /*
- * The top level points to the linear page table pages above.
- * We setup the identity and linear mappings here.
- */
- phys_linear = (unsigned long)linear - mem_base;
- for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
- pgd_t pgd;
- /*
- * Create a PGD entry which points to the right part of the
- * linear PTE pages.
- */
- pgd = __pgd((phys_linear + i * sizeof(pte_t)) |
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
-
- /*
- * Copy it into the PGD page at 0 and PAGE_OFFSET.
- */
- if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd))
- || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET)
- + i / PTRS_PER_PTE],
- &pgd, sizeof(pgd)))
- return -EFAULT;
- }
-#endif
-
- /*
- * We return the top level (guest-physical) address: we remember where
- * this is to write it into lguest_data when the Guest initializes.
- */
- return (unsigned long)pgdir - mem_base;
-}
-
/*H:500
* (vii) Setting up the page tables initially.
*
- * When a Guest is first created, the Launcher tells us where the toplevel of
- * its first page table is. We set some things up here:
+ * When a Guest is first created, set initialize a shadow page table which
+ * we will populate on future faults. The Guest doesn't have any actual
+ * pagetables yet, so we set linear_pages to tell demand_page() to fake it
+ * for the moment.
*/
int init_guest_pagetable(struct lguest *lg)
{
- u64 mem;
- u32 initrd_size;
- struct boot_params __user *boot = (struct boot_params *)lg->mem_base;
-#ifdef CONFIG_X86_PAE
- pgd_t *pgd;
- pmd_t *pmd_table;
-#endif
- /*
- * Get the Guest memory size and the ramdisk size from the boot header
- * located at lg->mem_base (Guest address 0).
- */
- if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem))
- || get_user(initrd_size, &boot->hdr.ramdisk_size))
- return -EFAULT;
+ struct lg_cpu *cpu = &lg->cpus[0];
+ int allocated = 0;
- /*
- * We start on the first shadow page table, and give it a blank PGD
- * page.
- */
- lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size);
- if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir))
- return lg->pgdirs[0].gpgdir;
- lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
- if (!lg->pgdirs[0].pgdir)
+ /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
+ cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
+ if (!allocated)
return -ENOMEM;
-#ifdef CONFIG_X86_PAE
- /* For PAE, we also create the initial mid-level. */
- pgd = lg->pgdirs[0].pgdir;
- pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
- if (!pmd_table)
- return -ENOMEM;
-
- set_pgd(pgd + SWITCHER_PGD_INDEX,
- __pgd(__pa(pmd_table) | _PAGE_PRESENT));
-#endif
-
- /* This is the current page table. */
- lg->cpus[0].cpu_pgd = 0;
+ /* We start with a linear mapping until the initialize. */
+ cpu->linear_pages = true;
return 0;
}
@@ -1095,10 +985,10 @@ void page_table_guest_data_init(struct lg_cpu *cpu)
* of virtual addresses used by the Switcher.
*/
|| put_user(RESERVE_MEM * 1024 * 1024,
- &cpu->lg->lguest_data->reserve_mem)
- || put_user(cpu->lg->pgdirs[0].gpgdir,
- &cpu->lg->lguest_data->pgdir))
+ &cpu->lg->lguest_data->reserve_mem)) {
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
+ return;
+ }
/*
* In flush_user_mappings() we loop from 0 to
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 9f1659c3d1f..65af42f2d59 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -269,10 +269,10 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
static int emulate_insn(struct lg_cpu *cpu)
{
u8 insn;
- unsigned int insnlen = 0, in = 0, shift = 0;
+ unsigned int insnlen = 0, in = 0, small_operand = 0;
/*
* The eip contains the *virtual* address of the Guest's instruction:
- * guest_pa just subtracts the Guest's page_offset.
+ * walk the Guest's page tables to find the "physical" address.
*/
unsigned long physaddr = guest_pa(cpu, cpu->regs->eip);
@@ -300,11 +300,10 @@ static int emulate_insn(struct lg_cpu *cpu)
}
/*
- * 0x66 is an "operand prefix". It means it's using the upper 16 bits
- * of the eax register.
+ * 0x66 is an "operand prefix". It means a 16, not 32 bit in/out.
*/
if (insn == 0x66) {
- shift = 16;
+ small_operand = 1;
/* The instruction is 1 byte so far, read the next byte. */
insnlen = 1;
insn = lgread(cpu, physaddr + insnlen, u8);
@@ -340,11 +339,14 @@ static int emulate_insn(struct lg_cpu *cpu)
* traditionally means "there's nothing there".
*/
if (in) {
- /* Lower bit tells is whether it's a 16 or 32 bit access */
- if (insn & 0x1)
- cpu->regs->eax = 0xFFFFFFFF;
- else
- cpu->regs->eax |= (0xFFFF << shift);
+ /* Lower bit tells means it's a 32/16 bit access */
+ if (insn & 0x1) {
+ if (small_operand)
+ cpu->regs->eax |= 0xFFFF;
+ else
+ cpu->regs->eax = 0xFFFFFFFF;
+ } else
+ cpu->regs->eax |= 0xFF;
}
/* Finally, we've "done" the instruction, so move past it. */
cpu->regs->eip += insnlen;
@@ -352,69 +354,6 @@ static int emulate_insn(struct lg_cpu *cpu)
return 1;
}
-/*
- * Our hypercalls mechanism used to be based on direct software interrupts.
- * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to
- * change over to using kvm hypercalls.
- *
- * KVM_HYPERCALL is actually a "vmcall" instruction, which generates an invalid
- * opcode fault (fault 6) on non-VT cpus, so the easiest solution seemed to be
- * an *emulation approach*: if the fault was really produced by an hypercall
- * (is_hypercall() does exactly this check), we can just call the corresponding
- * hypercall host implementation function.
- *
- * But these invalid opcode faults are notably slower than software interrupts.
- * So we implemented the *patching (or rewriting) approach*: every time we hit
- * the KVM_HYPERCALL opcode in Guest code, we patch it to the old "int 0x1f"
- * opcode, so next time the Guest calls this hypercall it will use the
- * faster trap mechanism.
- *
- * Matias even benchmarked it to convince you: this shows the average cycle
- * cost of a hypercall. For each alternative solution mentioned above we've
- * made 5 runs of the benchmark:
- *
- * 1) direct software interrupt: 2915, 2789, 2764, 2721, 2898
- * 2) emulation technique: 3410, 3681, 3466, 3392, 3780
- * 3) patching (rewrite) technique: 2977, 2975, 2891, 2637, 2884
- *
- * One two-line function is worth a 20% hypercall speed boost!
- */
-static void rewrite_hypercall(struct lg_cpu *cpu)
-{
- /*
- * This are the opcodes we use to patch the Guest. The opcode for "int
- * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we
- * complete the sequence with a NOP (0x90).
- */
- u8 insn[3] = {0xcd, 0x1f, 0x90};
-
- __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn));
- /*
- * The above write might have caused a copy of that page to be made
- * (if it was read-only). We need to make sure the Guest has
- * up-to-date pagetables. As this doesn't happen often, we can just
- * drop them all.
- */
- guest_pagetable_clear_all(cpu);
-}
-
-static bool is_hypercall(struct lg_cpu *cpu)
-{
- u8 insn[3];
-
- /*
- * This must be the Guest kernel trying to do something.
- * The bottom two bits of the CS segment register are the privilege
- * level.
- */
- if ((cpu->regs->cs & 3) != GUEST_PL)
- return false;
-
- /* Is it a vmcall? */
- __lgread(cpu, insn, guest_pa(cpu, cpu->regs->eip), sizeof(insn));
- return insn[0] == 0x0f && insn[1] == 0x01 && insn[2] == 0xc1;
-}
-
/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
void lguest_arch_handle_trap(struct lg_cpu *cpu)
{
@@ -429,20 +368,6 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
if (emulate_insn(cpu))
return;
}
- /*
- * If KVM is active, the vmcall instruction triggers a General
- * Protection Fault. Normally it triggers an invalid opcode
- * fault (6):
- */
- case 6:
- /*
- * We need to check if ring == GUEST_PL and faulting
- * instruction == vmcall.
- */
- if (is_hypercall(cpu)) {
- rewrite_hypercall(cpu);
- return;
- }
break;
case 14: /* We've intercepted a Page Fault. */
/*
@@ -486,7 +411,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
* These values mean a real interrupt occurred, in which case
* the Host handler has already been run. We just do a
* friendly check if another process should now be run, then
- * return to run the Guest again
+ * return to run the Guest again.
*/
cond_resched();
return;
@@ -536,7 +461,7 @@ void __init lguest_arch_host_init(void)
int i;
/*
- * Most of the i386/switcher.S doesn't care that it's been moved; on
+ * Most of the x86/switcher_32.S doesn't care that it's been moved; on
* Intel, jumps are relative, and it doesn't access any references to
* external code or data.
*
@@ -664,7 +589,7 @@ void __init lguest_arch_host_init(void)
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
}
put_online_cpus();
-};
+}
/*:*/
void __exit lguest_arch_host_fini(void)
@@ -747,8 +672,6 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu)
/*:*/
/*L:030
- * lguest_arch_setup_regs()
- *
* Most of the Guest's registers are left alone: we used get_zeroed_page() to
* allocate the structure, so they will be 0.
*/
diff --git a/drivers/macintosh/nvram.c b/drivers/macintosh/nvram.c
index a271c8218d8..f0e03e7937e 100644
--- a/drivers/macintosh/nvram.c
+++ b/drivers/macintosh/nvram.c
@@ -21,12 +21,16 @@
static loff_t nvram_llseek(struct file *file, loff_t offset, int origin)
{
switch (origin) {
+ case 0:
+ break;
case 1:
offset += file->f_pos;
break;
case 2:
offset += NVRAM_SIZE;
break;
+ default:
+ offset = -1;
}
if (offset < 0)
return -EINVAL;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 8420129fc5e..f75a66e7d31 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -241,12 +241,13 @@ config DM_MIRROR
needed for live data migration tools such as 'pvmove'.
config DM_RAID
- tristate "RAID 4/5/6 target (EXPERIMENTAL)"
+ tristate "RAID 1/4/5/6 target (EXPERIMENTAL)"
depends on BLK_DEV_DM && EXPERIMENTAL
+ select MD_RAID1
select MD_RAID456
select BLK_DEV_MD
---help---
- A dm target that supports RAID4, RAID5 and RAID6 mappings
+ A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings
A RAID-5 set of N drives with a capacity of C MB per drive provides
the capacity of C * (N - 1) MB, and protects against a failure
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 574b09afedd..0dc6546b77a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -29,7 +29,6 @@
#include "md.h"
#include "bitmap.h"
-#include <linux/dm-dirty-log.h>
/* debug macros */
#define DEBUG 0
@@ -775,10 +774,8 @@ static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned lon
* 0 or page 1
*/
static inline struct page *filemap_get_page(struct bitmap *bitmap,
- unsigned long chunk)
+ unsigned long chunk)
{
- if (bitmap->filemap == NULL)
- return NULL;
if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
return NULL;
return bitmap->filemap[file_page_index(bitmap, chunk)
@@ -878,28 +875,19 @@ enum bitmap_page_attr {
static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- if (page)
- __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
- else
- __set_bit(attr, &bitmap->logattrs);
+ __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
}
static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- if (page)
- __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
- else
- __clear_bit(attr, &bitmap->logattrs);
+ __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
}
static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
enum bitmap_page_attr attr)
{
- if (page)
- return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
- else
- return test_bit(attr, &bitmap->logattrs);
+ return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
}
/*
@@ -912,30 +900,26 @@ static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *p
static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
{
unsigned long bit;
- struct page *page = NULL;
+ struct page *page;
void *kaddr;
unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
- if (!bitmap->filemap) {
- struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
- if (log)
- log->type->mark_region(log, chunk);
- } else {
+ if (!bitmap->filemap)
+ return;
- page = filemap_get_page(bitmap, chunk);
- if (!page)
- return;
- bit = file_page_offset(bitmap, chunk);
+ page = filemap_get_page(bitmap, chunk);
+ if (!page)
+ return;
+ bit = file_page_offset(bitmap, chunk);
- /* set the bit */
- kaddr = kmap_atomic(page, KM_USER0);
- if (bitmap->flags & BITMAP_HOSTENDIAN)
- set_bit(bit, kaddr);
- else
- __test_and_set_bit_le(bit, kaddr);
- kunmap_atomic(kaddr, KM_USER0);
- PRINTK("set file bit %lu page %lu\n", bit, page->index);
- }
+ /* set the bit */
+ kaddr = kmap_atomic(page, KM_USER0);
+ if (bitmap->flags & BITMAP_HOSTENDIAN)
+ set_bit(bit, kaddr);
+ else
+ __set_bit_le(bit, kaddr);
+ kunmap_atomic(kaddr, KM_USER0);
+ PRINTK("set file bit %lu page %lu\n", bit, page->index);
/* record page number so it gets flushed to disk when unplug occurs */
set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
}
@@ -952,16 +936,6 @@ void bitmap_unplug(struct bitmap *bitmap)
if (!bitmap)
return;
- if (!bitmap->filemap) {
- /* Must be using a dirty_log */
- struct dm_dirty_log *log = bitmap->mddev->bitmap_info.log;
- dirty = test_and_clear_bit(BITMAP_PAGE_DIRTY, &bitmap->logattrs);
- need_write = test_and_clear_bit(BITMAP_PAGE_NEEDWRITE, &bitmap->logattrs);
- if (dirty || need_write)
- if (log->type->flush(log))
- bitmap->flags |= BITMAP_WRITE_ERROR;
- goto out;
- }
/* look at each page to see if there are any set bits that need to be
* flushed out to disk */
@@ -990,7 +964,6 @@ void bitmap_unplug(struct bitmap *bitmap)
else
md_super_wait(bitmap->mddev);
}
-out:
if (bitmap->flags & BITMAP_WRITE_ERROR)
bitmap_file_kick(bitmap);
}
@@ -1199,7 +1172,6 @@ void bitmap_daemon_work(mddev_t *mddev)
struct page *page = NULL, *lastpage = NULL;
sector_t blocks;
void *paddr;
- struct dm_dirty_log *log = mddev->bitmap_info.log;
/* Use a mutex to guard daemon_work against
* bitmap_destroy.
@@ -1224,12 +1196,11 @@ void bitmap_daemon_work(mddev_t *mddev)
spin_lock_irqsave(&bitmap->lock, flags);
for (j = 0; j < bitmap->chunks; j++) {
bitmap_counter_t *bmc;
- if (!bitmap->filemap) {
- if (!log)
- /* error or shutdown */
- break;
- } else
- page = filemap_get_page(bitmap, j);
+ if (!bitmap->filemap)
+ /* error or shutdown */
+ break;
+
+ page = filemap_get_page(bitmap, j);
if (page != lastpage) {
/* skip this page unless it's marked as needing cleaning */
@@ -1298,17 +1269,16 @@ void bitmap_daemon_work(mddev_t *mddev)
-1);
/* clear the bit */
- if (page) {
- paddr = kmap_atomic(page, KM_USER0);
- if (bitmap->flags & BITMAP_HOSTENDIAN)
- clear_bit(file_page_offset(bitmap, j),
- paddr);
- else
- __test_and_clear_bit_le(file_page_offset(bitmap, j),
- paddr);
- kunmap_atomic(paddr, KM_USER0);
- } else
- log->type->clear_region(log, j);
+ paddr = kmap_atomic(page, KM_USER0);
+ if (bitmap->flags & BITMAP_HOSTENDIAN)
+ clear_bit(file_page_offset(bitmap, j),
+ paddr);
+ else
+ __clear_bit_le(
+ file_page_offset(bitmap,
+ j),
+ paddr);
+ kunmap_atomic(paddr, KM_USER0);
}
} else
j |= PAGE_COUNTER_MASK;
@@ -1316,16 +1286,12 @@ void bitmap_daemon_work(mddev_t *mddev)
spin_unlock_irqrestore(&bitmap->lock, flags);
/* now sync the final page */
- if (lastpage != NULL || log != NULL) {
+ if (lastpage != NULL) {
spin_lock_irqsave(&bitmap->lock, flags);
if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
- if (lastpage)
- write_page(bitmap, lastpage, 0);
- else
- if (log->type->flush(log))
- bitmap->flags |= BITMAP_WRITE_ERROR;
+ write_page(bitmap, lastpage, 0);
} else {
set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
spin_unlock_irqrestore(&bitmap->lock, flags);
@@ -1767,12 +1733,10 @@ int bitmap_create(mddev_t *mddev)
BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
if (!file
- && !mddev->bitmap_info.offset
- && !mddev->bitmap_info.log) /* bitmap disabled, nothing to do */
+ && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
return 0;
BUG_ON(file && mddev->bitmap_info.offset);
- BUG_ON(mddev->bitmap_info.offset && mddev->bitmap_info.log);
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
@@ -1863,6 +1827,7 @@ int bitmap_create(mddev_t *mddev)
int bitmap_load(mddev_t *mddev)
{
int err = 0;
+ sector_t start = 0;
sector_t sector = 0;
struct bitmap *bitmap = mddev->bitmap;
@@ -1881,24 +1846,14 @@ int bitmap_load(mddev_t *mddev)
}
bitmap_close_sync(bitmap);
- if (mddev->bitmap_info.log) {
- unsigned long i;
- struct dm_dirty_log *log = mddev->bitmap_info.log;
- for (i = 0; i < bitmap->chunks; i++)
- if (!log->type->in_sync(log, i, 1))
- bitmap_set_memory_bits(bitmap,
- (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap),
- 1);
- } else {
- sector_t start = 0;
- if (mddev->degraded == 0
- || bitmap->events_cleared == mddev->events)
- /* no need to keep dirty bits to optimise a
- * re-add of a missing device */
- start = mddev->recovery_cp;
-
- err = bitmap_init_from_disk(bitmap, start);
- }
+ if (mddev->degraded == 0
+ || bitmap->events_cleared == mddev->events)
+ /* no need to keep dirty bits to optimise a
+ * re-add of a missing device */
+ start = mddev->recovery_cp;
+
+ err = bitmap_init_from_disk(bitmap, start);
+
if (err)
goto out;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index b2a127e891a..a28f2e5588c 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -212,10 +212,6 @@ struct bitmap {
unsigned long file_pages; /* number of pages in the file */
int last_page_size; /* bytes in the last page */
- unsigned long logattrs; /* used when filemap_attr doesn't exist
- * because we are working with a dirty_log
- */
-
unsigned long flags;
int allclean;
@@ -237,7 +233,6 @@ struct bitmap {
wait_queue_head_t behind_wait;
struct sysfs_dirent *sysfs_can_clear;
-
};
/* the bitmap API */
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index c8827ffd85b..49da55c1528 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -19,7 +19,7 @@
#include <linux/workqueue.h>
#include <linux/backing-dev.h>
#include <linux/percpu.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/scatterlist.h>
#include <asm/page.h>
#include <asm/unaligned.h>
@@ -30,7 +30,6 @@
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "crypt"
-#define MESG_STR(x) x, sizeof(x)
/*
* context holding the current state of a multi-part conversion
@@ -239,7 +238,7 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
memset(iv, 0, cc->iv_size);
- *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
+ *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
return 0;
}
@@ -248,7 +247,7 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
memset(iv, 0, cc->iv_size);
- *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
+ *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
return 0;
}
@@ -415,7 +414,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
memset(iv, 0, cc->iv_size);
- *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
+ *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
return 0;
@@ -1575,11 +1574,17 @@ bad_mem:
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
- unsigned int key_size;
+ unsigned int key_size, opt_params;
unsigned long long tmpll;
int ret;
+ struct dm_arg_set as;
+ const char *opt_string;
+
+ static struct dm_arg _args[] = {
+ {0, 1, "Invalid number of feature args"},
+ };
- if (argc != 5) {
+ if (argc < 5) {
ti->error = "Not enough arguments";
return -EINVAL;
}
@@ -1648,6 +1653,30 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
cc->start = tmpll;
+ argv += 5;
+ argc -= 5;
+
+ /* Optional parameters */
+ if (argc) {
+ as.argc = argc;
+ as.argv = argv;
+
+ ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
+ if (ret)
+ goto bad;
+
+ opt_string = dm_shift_arg(&as);
+
+ if (opt_params == 1 && opt_string &&
+ !strcasecmp(opt_string, "allow_discards"))
+ ti->num_discard_requests = 1;
+ else if (opt_params) {
+ ret = -EINVAL;
+ ti->error = "Invalid feature arguments";
+ goto bad;
+ }
+ }
+
ret = -ENOMEM;
cc->io_queue = alloc_workqueue("kcryptd_io",
WQ_NON_REENTRANT|
@@ -1682,9 +1711,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
struct dm_crypt_io *io;
struct crypt_config *cc;
- if (bio->bi_rw & REQ_FLUSH) {
+ /*
+ * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
+ * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
+ * - for REQ_DISCARD caller must use flush if IO ordering matters
+ */
+ if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
cc = ti->private;
bio->bi_bdev = cc->dev->bdev;
+ if (bio_sectors(bio))
+ bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
return DM_MAPIO_REMAPPED;
}
@@ -1727,6 +1763,10 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
cc->dev->name, (unsigned long long)cc->start);
+
+ if (ti->num_discard_requests)
+ DMEMIT(" 1 allow_discards");
+
break;
}
return 0;
@@ -1770,12 +1810,12 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
if (argc < 2)
goto error;
- if (!strnicmp(argv[0], MESG_STR("key"))) {
+ if (!strcasecmp(argv[0], "key")) {
if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
DMWARN("not suspended during key manipulation.");
return -EINVAL;
}
- if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
+ if (argc == 3 && !strcasecmp(argv[1], "set")) {
ret = crypt_set_key(cc, argv[2]);
if (ret)
return ret;
@@ -1783,7 +1823,7 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
ret = cc->iv_gen_ops->init(cc);
return ret;
}
- if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
+ if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
ret = cc->iv_gen_ops->wipe(cc);
if (ret)
@@ -1823,7 +1863,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index ea790623c30..89f73ca22cf 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2003 Sistina Software (UK) Limited.
- * Copyright (C) 2004, 2010 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@@ -15,6 +15,9 @@
#define DM_MSG_PREFIX "flakey"
+#define all_corrupt_bio_flags_match(bio, fc) \
+ (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
+
/*
* Flakey: Used for testing only, simulates intermittent,
* catastrophic device failure.
@@ -25,60 +28,189 @@ struct flakey_c {
sector_t start;
unsigned up_interval;
unsigned down_interval;
+ unsigned long flags;
+ unsigned corrupt_bio_byte;
+ unsigned corrupt_bio_rw;
+ unsigned corrupt_bio_value;
+ unsigned corrupt_bio_flags;
+};
+
+enum feature_flag_bits {
+ DROP_WRITES
};
+static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ struct dm_target *ti)
+{
+ int r;
+ unsigned argc;
+ const char *arg_name;
+
+ static struct dm_arg _args[] = {
+ {0, 6, "Invalid number of feature args"},
+ {1, UINT_MAX, "Invalid corrupt bio byte"},
+ {0, 255, "Invalid corrupt value to write into bio byte (0-255)"},
+ {0, UINT_MAX, "Invalid corrupt bio flags mask"},
+ };
+
+ /* No feature arguments supplied. */
+ if (!as->argc)
+ return 0;
+
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
+ if (r)
+ return r;
+
+ while (argc) {
+ arg_name = dm_shift_arg(as);
+ argc--;
+
+ /*
+ * drop_writes
+ */
+ if (!strcasecmp(arg_name, "drop_writes")) {
+ if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
+ ti->error = "Feature drop_writes duplicated";
+ return -EINVAL;
+ }
+
+ continue;
+ }
+
+ /*
+ * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
+ */
+ if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
+ if (!argc)
+ ti->error = "Feature corrupt_bio_byte requires parameters";
+
+ r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
+ if (r)
+ return r;
+ argc--;
+
+ /*
+ * Direction r or w?
+ */
+ arg_name = dm_shift_arg(as);
+ if (!strcasecmp(arg_name, "w"))
+ fc->corrupt_bio_rw = WRITE;
+ else if (!strcasecmp(arg_name, "r"))
+ fc->corrupt_bio_rw = READ;
+ else {
+ ti->error = "Invalid corrupt bio direction (r or w)";
+ return -EINVAL;
+ }
+ argc--;
+
+ /*
+ * Value of byte (0-255) to write in place of correct one.
+ */
+ r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error);
+ if (r)
+ return r;
+ argc--;
+
+ /*
+ * Only corrupt bios with these flags set.
+ */
+ r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error);
+ if (r)
+ return r;
+ argc--;
+
+ continue;
+ }
+
+ ti->error = "Unrecognised flakey feature requested";
+ return -EINVAL;
+ }
+
+ if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
+ ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
- * Construct a flakey mapping: <dev_path> <offset> <up interval> <down interval>
+ * Construct a flakey mapping:
+ * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*]
+ *
+ * Feature args:
+ * [drop_writes]
+ * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>]
+ *
+ * Nth_byte starts from 1 for the first byte.
+ * Direction is r for READ or w for WRITE.
+ * bio_flags is ignored if 0.
*/
static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
+ static struct dm_arg _args[] = {
+ {0, UINT_MAX, "Invalid up interval"},
+ {0, UINT_MAX, "Invalid down interval"},
+ };
+
+ int r;
struct flakey_c *fc;
- unsigned long long tmp;
+ unsigned long long tmpll;
+ struct dm_arg_set as;
+ const char *devname;
- if (argc != 4) {
- ti->error = "dm-flakey: Invalid argument count";
+ as.argc = argc;
+ as.argv = argv;
+
+ if (argc < 4) {
+ ti->error = "Invalid argument count";
return -EINVAL;
}
- fc = kmalloc(sizeof(*fc), GFP_KERNEL);
+ fc = kzalloc(sizeof(*fc), GFP_KERNEL);
if (!fc) {
- ti->error = "dm-flakey: Cannot allocate linear context";
+ ti->error = "Cannot allocate linear context";
return -ENOMEM;
}
fc->start_time = jiffies;
- if (sscanf(argv[1], "%llu", &tmp) != 1) {
- ti->error = "dm-flakey: Invalid device sector";
+ devname = dm_shift_arg(&as);
+
+ if (sscanf(dm_shift_arg(&as), "%llu", &tmpll) != 1) {
+ ti->error = "Invalid device sector";
goto bad;
}
- fc->start = tmp;
+ fc->start = tmpll;
- if (sscanf(argv[2], "%u", &fc->up_interval) != 1) {
- ti->error = "dm-flakey: Invalid up interval";
+ r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error);
+ if (r)
goto bad;
- }
- if (sscanf(argv[3], "%u", &fc->down_interval) != 1) {
- ti->error = "dm-flakey: Invalid down interval";
+ r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error);
+ if (r)
goto bad;
- }
if (!(fc->up_interval + fc->down_interval)) {
- ti->error = "dm-flakey: Total (up + down) interval is zero";
+ ti->error = "Total (up + down) interval is zero";
goto bad;
}
if (fc->up_interval + fc->down_interval < fc->up_interval) {
- ti->error = "dm-flakey: Interval overflow";
+ ti->error = "Interval overflow";
goto bad;
}
- if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &fc->dev)) {
- ti->error = "dm-flakey: Device lookup failed";
+ r = parse_features(&as, fc, ti);
+ if (r)
+ goto bad;
+
+ if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) {
+ ti->error = "Device lookup failed";
goto bad;
}
ti->num_flush_requests = 1;
+ ti->num_discard_requests = 1;
ti->private = fc;
return 0;
@@ -99,7 +231,7 @@ static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector)
{
struct flakey_c *fc = ti->private;
- return fc->start + (bi_sector - ti->begin);
+ return fc->start + dm_target_offset(ti, bi_sector);
}
static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
@@ -111,6 +243,25 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
bio->bi_sector = flakey_map_sector(ti, bio->bi_sector);
}
+static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
+{
+ unsigned bio_bytes = bio_cur_bytes(bio);
+ char *data = bio_data(bio);
+
+ /*
+ * Overwrite the Nth byte of the data returned.
+ */
+ if (data && bio_bytes >= fc->corrupt_bio_byte) {
+ data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
+
+ DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
+ "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
+ bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
+ (bio_data_dir(bio) == WRITE) ? 'w' : 'r',
+ bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes);
+ }
+}
+
static int flakey_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
@@ -119,18 +270,71 @@ static int flakey_map(struct dm_target *ti, struct bio *bio,
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
- if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval)
+ if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
+ /*
+ * Flag this bio as submitted while down.
+ */
+ map_context->ll = 1;
+
+ /*
+ * Map reads as normal.
+ */
+ if (bio_data_dir(bio) == READ)
+ goto map_bio;
+
+ /*
+ * Drop writes?
+ */
+ if (test_bit(DROP_WRITES, &fc->flags)) {
+ bio_endio(bio, 0);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /*
+ * Corrupt matching writes.
+ */
+ if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
+ if (all_corrupt_bio_flags_match(bio, fc))
+ corrupt_bio_data(bio, fc);
+ goto map_bio;
+ }
+
+ /*
+ * By default, error all I/O.
+ */
return -EIO;
+ }
+map_bio:
flakey_map_bio(ti, bio);
return DM_MAPIO_REMAPPED;
}
+static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+ int error, union map_info *map_context)
+{
+ struct flakey_c *fc = ti->private;
+ unsigned bio_submitted_while_down = map_context->ll;
+
+ /*
+ * Corrupt successful READs while in down state.
+ * If flags were specified, only corrupt those that match.
+ */
+ if (!error && bio_submitted_while_down &&
+ (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc))
+ corrupt_bio_data(bio, fc);
+
+ return error;
+}
+
static int flakey_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
+ unsigned sz = 0;
struct flakey_c *fc = ti->private;
+ unsigned drop_writes;
switch (type) {
case STATUSTYPE_INFO:
@@ -138,9 +342,22 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
- snprintf(result, maxlen, "%s %llu %u %u", fc->dev->name,
- (unsigned long long)fc->start, fc->up_interval,
- fc->down_interval);
+ DMEMIT("%s %llu %u %u ", fc->dev->name,
+ (unsigned long long)fc->start, fc->up_interval,
+ fc->down_interval);
+
+ drop_writes = test_bit(DROP_WRITES, &fc->flags);
+ DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
+
+ if (drop_writes)
+ DMEMIT("drop_writes ");
+
+ if (fc->corrupt_bio_byte)
+ DMEMIT("corrupt_bio_byte %u %c %u %u ",
+ fc->corrupt_bio_byte,
+ (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r',
+ fc->corrupt_bio_value, fc->corrupt_bio_flags);
+
break;
}
return 0;
@@ -177,11 +394,12 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
.map = flakey_map,
+ .end_io = flakey_end_io,
.status = flakey_status,
.ioctl = flakey_ioctl,
.merge = flakey_merge,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2067288f61f..ad2eba40e31 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -38,6 +38,8 @@ struct io {
struct dm_io_client *client;
io_notify_fn callback;
void *context;
+ void *vma_invalidate_address;
+ unsigned long vma_invalidate_size;
} __attribute__((aligned(DM_IO_MAX_REGIONS)));
static struct kmem_cache *_dm_io_cache;
@@ -116,6 +118,10 @@ static void dec_count(struct io *io, unsigned int region, int error)
set_bit(region, &io->error_bits);
if (atomic_dec_and_test(&io->count)) {
+ if (io->vma_invalidate_size)
+ invalidate_kernel_vmap_range(io->vma_invalidate_address,
+ io->vma_invalidate_size);
+
if (io->sleeper)
wake_up_process(io->sleeper);
@@ -159,6 +165,9 @@ struct dpages {
unsigned context_u;
void *context_ptr;
+
+ void *vma_invalidate_address;
+ unsigned long vma_invalidate_size;
};
/*
@@ -377,6 +386,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->sleeper = current;
io->client = client;
+ io->vma_invalidate_address = dp->vma_invalidate_address;
+ io->vma_invalidate_size = dp->vma_invalidate_size;
+
dispatch_io(rw, num_regions, where, dp, io, 1);
while (1) {
@@ -415,13 +427,21 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io->callback = fn;
io->context = context;
+ io->vma_invalidate_address = dp->vma_invalidate_address;
+ io->vma_invalidate_size = dp->vma_invalidate_size;
+
dispatch_io(rw, num_regions, where, dp, io, 0);
return 0;
}
-static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
+static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
+ unsigned long size)
{
/* Set up dpages based on memory type */
+
+ dp->vma_invalidate_address = NULL;
+ dp->vma_invalidate_size = 0;
+
switch (io_req->mem.type) {
case DM_IO_PAGE_LIST:
list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
@@ -432,6 +452,11 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
break;
case DM_IO_VMA:
+ flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
+ if ((io_req->bi_rw & RW_MASK) == READ) {
+ dp->vma_invalidate_address = io_req->mem.ptr.vma;
+ dp->vma_invalidate_size = size;
+ }
vm_dp_init(dp, io_req->mem.ptr.vma);
break;
@@ -460,7 +485,7 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
int r;
struct dpages dp;
- r = dp_init(io_req, &dp);
+ r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
if (r)
return r;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 4cacdad2270..2e9a3ca37bd 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -128,6 +128,24 @@ static struct hash_cell *__get_uuid_cell(const char *str)
return NULL;
}
+static struct hash_cell *__get_dev_cell(uint64_t dev)
+{
+ struct mapped_device *md;
+ struct hash_cell *hc;
+
+ md = dm_get_md(huge_decode_dev(dev));
+ if (!md)
+ return NULL;
+
+ hc = dm_get_mdptr(md);
+ if (!hc) {
+ dm_put(md);
+ return NULL;
+ }
+
+ return hc;
+}
+
/*-----------------------------------------------------------------
* Inserting, removing and renaming a device.
*---------------------------------------------------------------*/
@@ -718,25 +736,45 @@ static int dev_create(struct dm_ioctl *param, size_t param_size)
*/
static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
{
- struct mapped_device *md;
- void *mdptr = NULL;
+ struct hash_cell *hc = NULL;
- if (*param->uuid)
- return __get_uuid_cell(param->uuid);
+ if (*param->uuid) {
+ if (*param->name || param->dev)
+ return NULL;
- if (*param->name)
- return __get_name_cell(param->name);
+ hc = __get_uuid_cell(param->uuid);
+ if (!hc)
+ return NULL;
+ } else if (*param->name) {
+ if (param->dev)
+ return NULL;
- md = dm_get_md(huge_decode_dev(param->dev));
- if (!md)
- goto out;
+ hc = __get_name_cell(param->name);
+ if (!hc)
+ return NULL;
+ } else if (param->dev) {
+ hc = __get_dev_cell(param->dev);
+ if (!hc)
+ return NULL;
+ } else
+ return NULL;
- mdptr = dm_get_mdptr(md);
- if (!mdptr)
- dm_put(md);
+ /*
+ * Sneakily write in both the name and the uuid
+ * while we have the cell.
+ */
+ strlcpy(param->name, hc->name, sizeof(param->name));
+ if (hc->uuid)
+ strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
+ else
+ param->uuid[0] = '\0';
-out:
- return mdptr;
+ if (hc->new_map)
+ param->flags |= DM_INACTIVE_PRESENT_FLAG;
+ else
+ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+
+ return hc;
}
static struct mapped_device *find_device(struct dm_ioctl *param)
@@ -746,24 +784,8 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
down_read(&_hash_lock);
hc = __find_device_hash_cell(param);
- if (hc) {
+ if (hc)
md = hc->md;
-
- /*
- * Sneakily write in both the name and the uuid
- * while we have the cell.
- */
- strlcpy(param->name, hc->name, sizeof(param->name));
- if (hc->uuid)
- strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
- else
- param->uuid[0] = '\0';
-
- if (hc->new_map)
- param->flags |= DM_INACTIVE_PRESENT_FLAG;
- else
- param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
- }
up_read(&_hash_lock);
return md;
@@ -1402,6 +1424,11 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
goto out;
}
+ if (!argc) {
+ DMWARN("Empty message received.");
+ goto out;
+ }
+
table = dm_get_live_table(md);
if (!table)
goto out_argv;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 819e37eaaeb..f8214702963 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -10,7 +10,7 @@
*/
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/init.h>
@@ -224,8 +224,6 @@ struct kcopyd_job {
unsigned int num_dests;
struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
- sector_t offset;
- unsigned int nr_pages;
struct page_list *pages;
/*
@@ -380,7 +378,7 @@ static int run_io_job(struct kcopyd_job *job)
.bi_rw = job->rw,
.mem.type = DM_IO_PAGE_LIST,
.mem.ptr.pl = job->pages,
- .mem.offset = job->offset,
+ .mem.offset = 0,
.notify.fn = complete_io,
.notify.context = job,
.client = job->kc->io_client,
@@ -397,10 +395,9 @@ static int run_io_job(struct kcopyd_job *job)
static int run_pages_job(struct kcopyd_job *job)
{
int r;
+ unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
- job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
- PAGE_SIZE >> 9);
- r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
+ r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
if (!r) {
/* this job is ready for io */
push(&job->kc->io_jobs, job);
@@ -602,8 +599,6 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->num_dests = num_dests;
memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
- job->offset = 0;
- job->nr_pages = 0;
job->pages = NULL;
job->fn = fn;
@@ -622,6 +617,37 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
}
EXPORT_SYMBOL(dm_kcopyd_copy);
+void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
+ dm_kcopyd_notify_fn fn, void *context)
+{
+ struct kcopyd_job *job;
+
+ job = mempool_alloc(kc->job_pool, GFP_NOIO);
+
+ memset(job, 0, sizeof(struct kcopyd_job));
+ job->kc = kc;
+ job->fn = fn;
+ job->context = context;
+
+ atomic_inc(&kc->nr_jobs);
+
+ return job;
+}
+EXPORT_SYMBOL(dm_kcopyd_prepare_callback);
+
+void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
+{
+ struct kcopyd_job *job = j;
+ struct dm_kcopyd_client *kc = job->kc;
+
+ job->read_err = read_err;
+ job->write_err = write_err;
+
+ push(&kc->complete_jobs, job);
+ wake(kc);
+}
+EXPORT_SYMBOL(dm_kcopyd_do_callback);
+
/*
* Cancels a kcopyd job, eg. someone might be deactivating a
* mirror.
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index aa2e0c374ab..1021c898601 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -394,8 +394,7 @@ static int flush_by_group(struct log_c *lc, struct list_head *flush_list)
group[count] = fe->region;
count++;
- list_del(&fe->list);
- list_add(&fe->list, &tmp_list);
+ list_move(&fe->list, &tmp_list);
type = fe->type;
if (count >= MAX_FLUSH_GROUP_COUNT)
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 948e3f4925b..3b52bb72bd1 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -197,15 +197,21 @@ EXPORT_SYMBOL(dm_dirty_log_destroy);
#define MIRROR_DISK_VERSION 2
#define LOG_OFFSET 2
-struct log_header {
- uint32_t magic;
+struct log_header_disk {
+ __le32 magic;
/*
* Simple, incrementing version. no backward
* compatibility.
*/
+ __le32 version;
+ __le64 nr_regions;
+} __packed;
+
+struct log_header_core {
+ uint32_t magic;
uint32_t version;
- sector_t nr_regions;
+ uint64_t nr_regions;
};
struct log_c {
@@ -239,10 +245,10 @@ struct log_c {
int log_dev_failed;
int log_dev_flush_failed;
struct dm_dev *log_dev;
- struct log_header header;
+ struct log_header_core header;
struct dm_io_region header_location;
- struct log_header *disk_header;
+ struct log_header_disk *disk_header;
};
/*
@@ -251,34 +257,34 @@ struct log_c {
*/
static inline int log_test_bit(uint32_t *bs, unsigned bit)
{
- return test_bit_le(bit, (unsigned long *) bs) ? 1 : 0;
+ return test_bit_le(bit, bs) ? 1 : 0;
}
static inline void log_set_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
- __test_and_set_bit_le(bit, (unsigned long *) bs);
+ __set_bit_le(bit, bs);
l->touched_cleaned = 1;
}
static inline void log_clear_bit(struct log_c *l,
uint32_t *bs, unsigned bit)
{
- __test_and_clear_bit_le(bit, (unsigned long *) bs);
+ __clear_bit_le(bit, bs);
l->touched_dirtied = 1;
}
/*----------------------------------------------------------------
* Header IO
*--------------------------------------------------------------*/
-static void header_to_disk(struct log_header *core, struct log_header *disk)
+static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk)
{
disk->magic = cpu_to_le32(core->magic);
disk->version = cpu_to_le32(core->version);
disk->nr_regions = cpu_to_le64(core->nr_regions);
}
-static void header_from_disk(struct log_header *core, struct log_header *disk)
+static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk)
{
core->magic = le32_to_cpu(disk->magic);
core->version = le32_to_cpu(disk->version);
@@ -486,7 +492,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
lc->sync_count = (sync == NOSYNC) ? region_count : 0;
- lc->recovering_bits = vmalloc(bitset_size);
+ lc->recovering_bits = vzalloc(bitset_size);
if (!lc->recovering_bits) {
DMWARN("couldn't allocate sync bitset");
vfree(lc->sync_bits);
@@ -498,7 +504,6 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
kfree(lc);
return -ENOMEM;
}
- memset(lc->recovering_bits, 0, bitset_size);
lc->sync_search = 0;
log->context = lc;
@@ -739,8 +744,7 @@ static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
return 0;
do {
- *region = find_next_zero_bit_le(
- (unsigned long *) lc->sync_bits,
+ *region = find_next_zero_bit_le(lc->sync_bits,
lc->region_count,
lc->sync_search);
lc->sync_search = *region + 1;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aa4e570c2cb..5e0090ef418 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -19,10 +19,9 @@
#include <linux/time.h>
#include <linux/workqueue.h>
#include <scsi/scsi_dh.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define DM_MSG_PREFIX "multipath"
-#define MESG_STR(x) x, sizeof(x)
#define DM_PG_INIT_DELAY_MSECS 2000
#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
@@ -505,80 +504,29 @@ static void trigger_event(struct work_struct *work)
* <#paths> <#per-path selector args>
* [<path> [<arg>]* ]+ ]+
*---------------------------------------------------------------*/
-struct param {
- unsigned min;
- unsigned max;
- char *error;
-};
-
-static int read_param(struct param *param, char *str, unsigned *v, char **error)
-{
- if (!str ||
- (sscanf(str, "%u", v) != 1) ||
- (*v < param->min) ||
- (*v > param->max)) {
- *error = param->error;
- return -EINVAL;
- }
-
- return 0;
-}
-
-struct arg_set {
- unsigned argc;
- char **argv;
-};
-
-static char *shift(struct arg_set *as)
-{
- char *r;
-
- if (as->argc) {
- as->argc--;
- r = *as->argv;
- as->argv++;
- return r;
- }
-
- return NULL;
-}
-
-static void consume(struct arg_set *as, unsigned n)
-{
- BUG_ON (as->argc < n);
- as->argc -= n;
- as->argv += n;
-}
-
-static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
+static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
struct dm_target *ti)
{
int r;
struct path_selector_type *pst;
unsigned ps_argc;
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{0, 1024, "invalid number of path selector args"},
};
- pst = dm_get_path_selector(shift(as));
+ pst = dm_get_path_selector(dm_shift_arg(as));
if (!pst) {
ti->error = "unknown path selector type";
return -EINVAL;
}
- r = read_param(_params, shift(as), &ps_argc, &ti->error);
+ r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
if (r) {
dm_put_path_selector(pst);
return -EINVAL;
}
- if (ps_argc > as->argc) {
- dm_put_path_selector(pst);
- ti->error = "not enough arguments for path selector";
- return -EINVAL;
- }
-
r = pst->create(&pg->ps, ps_argc, as->argv);
if (r) {
dm_put_path_selector(pst);
@@ -587,12 +535,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg,
}
pg->ps.type = pst;
- consume(as, ps_argc);
+ dm_consume_args(as, ps_argc);
return 0;
}
-static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
+static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
struct dm_target *ti)
{
int r;
@@ -609,7 +557,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
if (!p)
return ERR_PTR(-ENOMEM);
- r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table),
+ r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
&p->path.dev);
if (r) {
ti->error = "error getting device";
@@ -660,16 +608,16 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
return ERR_PTR(r);
}
-static struct priority_group *parse_priority_group(struct arg_set *as,
+static struct priority_group *parse_priority_group(struct dm_arg_set *as,
struct multipath *m)
{
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{1, 1024, "invalid number of paths"},
{0, 1024, "invalid number of selector args"}
};
int r;
- unsigned i, nr_selector_args, nr_params;
+ unsigned i, nr_selector_args, nr_args;
struct priority_group *pg;
struct dm_target *ti = m->ti;
@@ -693,26 +641,26 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
/*
* read the paths
*/
- r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error);
+ r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
if (r)
goto bad;
- r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error);
+ r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
if (r)
goto bad;
- nr_params = 1 + nr_selector_args;
+ nr_args = 1 + nr_selector_args;
for (i = 0; i < pg->nr_pgpaths; i++) {
struct pgpath *pgpath;
- struct arg_set path_args;
+ struct dm_arg_set path_args;
- if (as->argc < nr_params) {
+ if (as->argc < nr_args) {
ti->error = "not enough path parameters";
r = -EINVAL;
goto bad;
}
- path_args.argc = nr_params;
+ path_args.argc = nr_args;
path_args.argv = as->argv;
pgpath = parse_path(&path_args, &pg->ps, ti);
@@ -723,7 +671,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
pgpath->pg = pg;
list_add_tail(&pgpath->list, &pg->pgpaths);
- consume(as, nr_params);
+ dm_consume_args(as, nr_args);
}
return pg;
@@ -733,28 +681,23 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
return ERR_PTR(r);
}
-static int parse_hw_handler(struct arg_set *as, struct multipath *m)
+static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
{
unsigned hw_argc;
int ret;
struct dm_target *ti = m->ti;
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{0, 1024, "invalid number of hardware handler args"},
};
- if (read_param(_params, shift(as), &hw_argc, &ti->error))
+ if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
return -EINVAL;
if (!hw_argc)
return 0;
- if (hw_argc > as->argc) {
- ti->error = "not enough arguments for hardware handler";
- return -EINVAL;
- }
-
- m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL);
+ m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
request_module("scsi_dh_%s", m->hw_handler_name);
if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
ti->error = "unknown hardware handler type";
@@ -778,7 +721,7 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
j = sprintf(p, "%s", as->argv[i]);
}
- consume(as, hw_argc - 1);
+ dm_consume_args(as, hw_argc - 1);
return 0;
fail:
@@ -787,20 +730,20 @@ fail:
return ret;
}
-static int parse_features(struct arg_set *as, struct multipath *m)
+static int parse_features(struct dm_arg_set *as, struct multipath *m)
{
int r;
unsigned argc;
struct dm_target *ti = m->ti;
- const char *param_name;
+ const char *arg_name;
- static struct param _params[] = {
+ static struct dm_arg _args[] = {
{0, 5, "invalid number of feature args"},
{1, 50, "pg_init_retries must be between 1 and 50"},
{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
};
- r = read_param(_params, shift(as), &argc, &ti->error);
+ r = dm_read_arg_group(_args, as, &argc, &ti->error);
if (r)
return -EINVAL;
@@ -808,26 +751,24 @@ static int parse_features(struct arg_set *as, struct multipath *m)
return 0;
do {
- param_name = shift(as);
+ arg_name = dm_shift_arg(as);
argc--;
- if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) {
+ if (!strcasecmp(arg_name, "queue_if_no_path")) {
r = queue_if_no_path(m, 1, 0);
continue;
}
- if (!strnicmp(param_name, MESG_STR("pg_init_retries")) &&
+ if (!strcasecmp(arg_name, "pg_init_retries") &&
(argc >= 1)) {
- r = read_param(_params + 1, shift(as),
- &m->pg_init_retries, &ti->error);
+ r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
argc--;
continue;
}
- if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) &&
+ if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
(argc >= 1)) {
- r = read_param(_params + 2, shift(as),
- &m->pg_init_delay_msecs, &ti->error);
+ r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
argc--;
continue;
}
@@ -842,15 +783,15 @@ static int parse_features(struct arg_set *as, struct multipath *m)
static int multipath_ctr(struct dm_target *ti, unsigned int argc,
char **argv)
{
- /* target parameters */
- static struct param _params[] = {
+ /* target arguments */
+ static struct dm_arg _args[] = {
{0, 1024, "invalid number of priority groups"},
{0, 1024, "invalid initial priority group number"},
};
int r;
struct multipath *m;
- struct arg_set as;
+ struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
@@ -871,11 +812,11 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
if (r)
goto bad;
- r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error);
+ r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
if (r)
goto bad;
- r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error);
+ r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
if (r)
goto bad;
@@ -1505,10 +1446,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
}
if (argc == 1) {
- if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) {
+ if (!strcasecmp(argv[0], "queue_if_no_path")) {
r = queue_if_no_path(m, 1, 0);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) {
+ } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
r = queue_if_no_path(m, 0, 0);
goto out;
}
@@ -1519,18 +1460,18 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
goto out;
}
- if (!strnicmp(argv[0], MESG_STR("disable_group"))) {
+ if (!strcasecmp(argv[0], "disable_group")) {
r = bypass_pg_num(m, argv[1], 1);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) {
+ } else if (!strcasecmp(argv[0], "enable_group")) {
r = bypass_pg_num(m, argv[1], 0);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) {
+ } else if (!strcasecmp(argv[0], "switch_group")) {
r = switch_pg_num(m, argv[1]);
goto out;
- } else if (!strnicmp(argv[0], MESG_STR("reinstate_path")))
+ } else if (!strcasecmp(argv[0], "reinstate_path"))
action = reinstate_path;
- else if (!strnicmp(argv[0], MESG_STR("fail_path")))
+ else if (!strcasecmp(argv[0], "fail_path"))
action = fail_path;
else {
DMWARN("Unrecognised multipath message received.");
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index f92b6cea9d9..03a837aa5ce 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -20,7 +20,7 @@
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/module.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define DM_MSG_PREFIX "multipath queue-length"
#define QL_MIN_IO 128
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index e5d8904fc8f..a002dd85db1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -8,19 +8,19 @@
#include <linux/slab.h>
#include "md.h"
+#include "raid1.h"
#include "raid5.h"
-#include "dm.h"
#include "bitmap.h"
+#include <linux/device-mapper.h>
+
#define DM_MSG_PREFIX "raid"
/*
- * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then
- * make it so the flag doesn't set anything.
+ * The following flags are used by dm-raid.c to set up the array state.
+ * They must be cleared before md_run is called.
*/
-#ifndef MD_SYNC_STATE_FORCED
-#define MD_SYNC_STATE_FORCED 0
-#endif
+#define FirstUse 10 /* rdev flag */
struct raid_dev {
/*
@@ -43,14 +43,15 @@ struct raid_dev {
/*
* Flags for rs->print_flags field.
*/
-#define DMPF_DAEMON_SLEEP 0x1
-#define DMPF_MAX_WRITE_BEHIND 0x2
-#define DMPF_SYNC 0x4
-#define DMPF_NOSYNC 0x8
-#define DMPF_STRIPE_CACHE 0x10
-#define DMPF_MIN_RECOVERY_RATE 0x20
-#define DMPF_MAX_RECOVERY_RATE 0x40
-
+#define DMPF_SYNC 0x1
+#define DMPF_NOSYNC 0x2
+#define DMPF_REBUILD 0x4
+#define DMPF_DAEMON_SLEEP 0x8
+#define DMPF_MIN_RECOVERY_RATE 0x10
+#define DMPF_MAX_RECOVERY_RATE 0x20
+#define DMPF_MAX_WRITE_BEHIND 0x40
+#define DMPF_STRIPE_CACHE 0x80
+#define DMPF_REGION_SIZE 0X100
struct raid_set {
struct dm_target *ti;
@@ -72,6 +73,7 @@ static struct raid_type {
const unsigned level; /* RAID level. */
const unsigned algorithm; /* RAID algorithm. */
} raid_types[] = {
+ {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
{"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
{"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
{"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
@@ -105,7 +107,8 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
}
sectors_per_dev = ti->len;
- if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
+ if ((raid_type->level > 1) &&
+ sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
ti->error = "Target length not divisible by number of data devices";
return ERR_PTR(-EINVAL);
}
@@ -147,9 +150,16 @@ static void context_free(struct raid_set *rs)
{
int i;
- for (i = 0; i < rs->md.raid_disks; i++)
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ if (rs->dev[i].meta_dev)
+ dm_put_device(rs->ti, rs->dev[i].meta_dev);
+ if (rs->dev[i].rdev.sb_page)
+ put_page(rs->dev[i].rdev.sb_page);
+ rs->dev[i].rdev.sb_page = NULL;
+ rs->dev[i].rdev.sb_loaded = 0;
if (rs->dev[i].data_dev)
dm_put_device(rs->ti, rs->dev[i].data_dev);
+ }
kfree(rs);
}
@@ -159,7 +169,16 @@ static void context_free(struct raid_set *rs)
* <meta_dev>: meta device name or '-' if missing
* <data_dev>: data device name or '-' if missing
*
- * This code parses those words.
+ * The following are permitted:
+ * - -
+ * - <data_dev>
+ * <meta_dev> <data_dev>
+ *
+ * The following is not allowed:
+ * <meta_dev> -
+ *
+ * This code parses those words. If there is a failure,
+ * the caller must use context_free to unwind the operations.
*/
static int dev_parms(struct raid_set *rs, char **argv)
{
@@ -182,8 +201,16 @@ static int dev_parms(struct raid_set *rs, char **argv)
rs->dev[i].rdev.mddev = &rs->md;
if (strcmp(argv[0], "-")) {
- rs->ti->error = "Metadata devices not supported";
- return -EINVAL;
+ ret = dm_get_device(rs->ti, argv[0],
+ dm_table_get_mode(rs->ti->table),
+ &rs->dev[i].meta_dev);
+ rs->ti->error = "RAID metadata device lookup failure";
+ if (ret)
+ return ret;
+
+ rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
+ if (!rs->dev[i].rdev.sb_page)
+ return -ENOMEM;
}
if (!strcmp(argv[1], "-")) {
@@ -193,6 +220,10 @@ static int dev_parms(struct raid_set *rs, char **argv)
return -EINVAL;
}
+ rs->ti->error = "No data device supplied with metadata device";
+ if (rs->dev[i].meta_dev)
+ return -EINVAL;
+
continue;
}
@@ -204,6 +235,10 @@ static int dev_parms(struct raid_set *rs, char **argv)
return ret;
}
+ if (rs->dev[i].meta_dev) {
+ metadata_available = 1;
+ rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
+ }
rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
@@ -235,33 +270,109 @@ static int dev_parms(struct raid_set *rs, char **argv)
}
/*
+ * validate_region_size
+ * @rs
+ * @region_size: region size in sectors. If 0, pick a size (4MiB default).
+ *
+ * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
+ * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
+ *
+ * Returns: 0 on success, -EINVAL on failure.
+ */
+static int validate_region_size(struct raid_set *rs, unsigned long region_size)
+{
+ unsigned long min_region_size = rs->ti->len / (1 << 21);
+
+ if (!region_size) {
+ /*
+ * Choose a reasonable default. All figures in sectors.
+ */
+ if (min_region_size > (1 << 13)) {
+ DMINFO("Choosing default region size of %lu sectors",
+ region_size);
+ region_size = min_region_size;
+ } else {
+ DMINFO("Choosing default region size of 4MiB");
+ region_size = 1 << 13; /* sectors */
+ }
+ } else {
+ /*
+ * Validate user-supplied value.
+ */
+ if (region_size > rs->ti->len) {
+ rs->ti->error = "Supplied region size is too large";
+ return -EINVAL;
+ }
+
+ if (region_size < min_region_size) {
+ DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
+ region_size, min_region_size);
+ rs->ti->error = "Supplied region size is too small";
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(region_size)) {
+ rs->ti->error = "Region size is not a power of 2";
+ return -EINVAL;
+ }
+
+ if (region_size < rs->md.chunk_sectors) {
+ rs->ti->error = "Region size is smaller than the chunk size";
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Convert sectors to bytes.
+ */
+ rs->md.bitmap_info.chunksize = (region_size << 9);
+
+ return 0;
+}
+
+/*
* Possible arguments are...
- * RAID456:
* <chunk_size> [optional_args]
*
- * Optional args:
- * [[no]sync] Force or prevent recovery of the entire array
+ * Argument definitions
+ * <chunk_size> The number of sectors per disk that
+ * will form the "stripe"
+ * [[no]sync] Force or prevent recovery of the
+ * entire array
* [rebuild <idx>] Rebuild the drive indicated by the index
- * [daemon_sleep <ms>] Time between bitmap daemon work to clear bits
+ * [daemon_sleep <ms>] Time between bitmap daemon work to
+ * clear bits
* [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
* [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
+ * [write_mostly <idx>] Indicate a write mostly drive via index
* [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
+ * [region_size <sectors>] Defines granularity of bitmap
*/
static int parse_raid_params(struct raid_set *rs, char **argv,
unsigned num_raid_params)
{
unsigned i, rebuild_cnt = 0;
- unsigned long value;
+ unsigned long value, region_size = 0;
char *key;
/*
* First, parse the in-order required arguments
+ * "chunk_size" is the only argument of this type.
*/
- if ((strict_strtoul(argv[0], 10, &value) < 0) ||
- !is_power_of_2(value) || (value < 8)) {
+ if ((strict_strtoul(argv[0], 10, &value) < 0)) {
rs->ti->error = "Bad chunk size";
return -EINVAL;
+ } else if (rs->raid_type->level == 1) {
+ if (value)
+ DMERR("Ignoring chunk size parameter for RAID 1");
+ value = 0;
+ } else if (!is_power_of_2(value)) {
+ rs->ti->error = "Chunk size must be a power of 2";
+ return -EINVAL;
+ } else if (value < 8) {
+ rs->ti->error = "Chunk size value is too small";
+ return -EINVAL;
}
rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
@@ -269,22 +380,39 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
num_raid_params--;
/*
- * Second, parse the unordered optional arguments
+ * We set each individual device as In_sync with a completed
+ * 'recovery_offset'. If there has been a device failure or
+ * replacement then one of the following cases applies:
+ *
+ * 1) User specifies 'rebuild'.
+ * - Device is reset when param is read.
+ * 2) A new device is supplied.
+ * - No matching superblock found, resets device.
+ * 3) Device failure was transient and returns on reload.
+ * - Failure noticed, resets device for bitmap replay.
+ * 4) Device hadn't completed recovery after previous failure.
+ * - Superblock is read and overrides recovery_offset.
+ *
+ * What is found in the superblocks of the devices is always
+ * authoritative, unless 'rebuild' or '[no]sync' was specified.
*/
- for (i = 0; i < rs->md.raid_disks; i++)
+ for (i = 0; i < rs->md.raid_disks; i++) {
set_bit(In_sync, &rs->dev[i].rdev.flags);
+ rs->dev[i].rdev.recovery_offset = MaxSector;
+ }
+ /*
+ * Second, parse the unordered optional arguments
+ */
for (i = 0; i < num_raid_params; i++) {
- if (!strcmp(argv[i], "nosync")) {
+ if (!strcasecmp(argv[i], "nosync")) {
rs->md.recovery_cp = MaxSector;
rs->print_flags |= DMPF_NOSYNC;
- rs->md.flags |= MD_SYNC_STATE_FORCED;
continue;
}
- if (!strcmp(argv[i], "sync")) {
+ if (!strcasecmp(argv[i], "sync")) {
rs->md.recovery_cp = 0;
rs->print_flags |= DMPF_SYNC;
- rs->md.flags |= MD_SYNC_STATE_FORCED;
continue;
}
@@ -300,9 +428,13 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
}
- if (!strcmp(key, "rebuild")) {
- if (++rebuild_cnt > rs->raid_type->parity_devs) {
- rs->ti->error = "Too many rebuild drives given";
+ if (!strcasecmp(key, "rebuild")) {
+ rebuild_cnt++;
+ if (((rs->raid_type->level != 1) &&
+ (rebuild_cnt > rs->raid_type->parity_devs)) ||
+ ((rs->raid_type->level == 1) &&
+ (rebuild_cnt > (rs->md.raid_disks - 1)))) {
+ rs->ti->error = "Too many rebuild devices specified for given RAID type";
return -EINVAL;
}
if (value > rs->md.raid_disks) {
@@ -311,7 +443,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
clear_bit(In_sync, &rs->dev[value].rdev.flags);
rs->dev[value].rdev.recovery_offset = 0;
- } else if (!strcmp(key, "max_write_behind")) {
+ rs->print_flags |= DMPF_REBUILD;
+ } else if (!strcasecmp(key, "write_mostly")) {
+ if (rs->raid_type->level != 1) {
+ rs->ti->error = "write_mostly option is only valid for RAID1";
+ return -EINVAL;
+ }
+ if (value > rs->md.raid_disks) {
+ rs->ti->error = "Invalid write_mostly drive index given";
+ return -EINVAL;
+ }
+ set_bit(WriteMostly, &rs->dev[value].rdev.flags);
+ } else if (!strcasecmp(key, "max_write_behind")) {
+ if (rs->raid_type->level != 1) {
+ rs->ti->error = "max_write_behind option is only valid for RAID1";
+ return -EINVAL;
+ }
rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
/*
@@ -324,14 +471,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
}
rs->md.bitmap_info.max_write_behind = value;
- } else if (!strcmp(key, "daemon_sleep")) {
+ } else if (!strcasecmp(key, "daemon_sleep")) {
rs->print_flags |= DMPF_DAEMON_SLEEP;
if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
rs->ti->error = "daemon sleep period out of range";
return -EINVAL;
}
rs->md.bitmap_info.daemon_sleep = value;
- } else if (!strcmp(key, "stripe_cache")) {
+ } else if (!strcasecmp(key, "stripe_cache")) {
rs->print_flags |= DMPF_STRIPE_CACHE;
/*
@@ -348,20 +495,23 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "Bad stripe_cache size";
return -EINVAL;
}
- } else if (!strcmp(key, "min_recovery_rate")) {
+ } else if (!strcasecmp(key, "min_recovery_rate")) {
rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
if (value > INT_MAX) {
rs->ti->error = "min_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_min = (int)value;
- } else if (!strcmp(key, "max_recovery_rate")) {
+ } else if (!strcasecmp(key, "max_recovery_rate")) {
rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
if (value > INT_MAX) {
rs->ti->error = "max_recovery_rate out of range";
return -EINVAL;
}
rs->md.sync_speed_max = (int)value;
+ } else if (!strcasecmp(key, "region_size")) {
+ rs->print_flags |= DMPF_REGION_SIZE;
+ region_size = value;
} else {
DMERR("Unable to parse RAID parameter: %s", key);
rs->ti->error = "Unable to parse RAID parameters";
@@ -369,6 +519,19 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
}
+ if (validate_region_size(rs, region_size))
+ return -EINVAL;
+
+ if (rs->md.chunk_sectors)
+ rs->ti->split_io = rs->md.chunk_sectors;
+ else
+ rs->ti->split_io = region_size;
+
+ if (rs->md.chunk_sectors)
+ rs->ti->split_io = rs->md.chunk_sectors;
+ else
+ rs->ti->split_io = region_size;
+
/* Assume there are no metadata devices until the drives are parsed */
rs->md.persistent = 0;
rs->md.external = 1;
@@ -387,17 +550,351 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
{
struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
+ if (rs->raid_type->level == 1)
+ return md_raid1_congested(&rs->md, bits);
+
return md_raid5_congested(&rs->md, bits);
}
/*
+ * This structure is never routinely used by userspace, unlike md superblocks.
+ * Devices with this superblock should only ever be accessed via device-mapper.
+ */
+#define DM_RAID_MAGIC 0x64526D44
+struct dm_raid_superblock {
+ __le32 magic; /* "DmRd" */
+ __le32 features; /* Used to indicate possible future changes */
+
+ __le32 num_devices; /* Number of devices in this array. (Max 64) */
+ __le32 array_position; /* The position of this drive in the array */
+
+ __le64 events; /* Incremented by md when superblock updated */
+ __le64 failed_devices; /* Bit field of devices to indicate failures */
+
+ /*
+ * This offset tracks the progress of the repair or replacement of
+ * an individual drive.
+ */
+ __le64 disk_recovery_offset;
+
+ /*
+ * This offset tracks the progress of the initial array
+ * synchronisation/parity calculation.
+ */
+ __le64 array_resync_offset;
+
+ /*
+ * RAID characteristics
+ */
+ __le32 level;
+ __le32 layout;
+ __le32 stripe_sectors;
+
+ __u8 pad[452]; /* Round struct to 512 bytes. */
+ /* Always set to 0 when writing. */
+} __packed;
+
+static int read_disk_sb(mdk_rdev_t *rdev, int size)
+{
+ BUG_ON(!rdev->sb_page);
+
+ if (rdev->sb_loaded)
+ return 0;
+
+ if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
+ DMERR("Failed to read device superblock");
+ return -EINVAL;
+ }
+
+ rdev->sb_loaded = 1;
+
+ return 0;
+}
+
+static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ mdk_rdev_t *r, *t;
+ uint64_t failed_devices;
+ struct dm_raid_superblock *sb;
+
+ sb = page_address(rdev->sb_page);
+ failed_devices = le64_to_cpu(sb->failed_devices);
+
+ rdev_for_each(r, t, mddev)
+ if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags))
+ failed_devices |= (1ULL << r->raid_disk);
+
+ memset(sb, 0, sizeof(*sb));
+
+ sb->magic = cpu_to_le32(DM_RAID_MAGIC);
+ sb->features = cpu_to_le32(0); /* No features yet */
+
+ sb->num_devices = cpu_to_le32(mddev->raid_disks);
+ sb->array_position = cpu_to_le32(rdev->raid_disk);
+
+ sb->events = cpu_to_le64(mddev->events);
+ sb->failed_devices = cpu_to_le64(failed_devices);
+
+ sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
+ sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
+
+ sb->level = cpu_to_le32(mddev->level);
+ sb->layout = cpu_to_le32(mddev->layout);
+ sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
+}
+
+/*
+ * super_load
+ *
+ * This function creates a superblock if one is not found on the device
+ * and will decide which superblock to use if there's a choice.
+ *
+ * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
+ */
+static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev)
+{
+ int ret;
+ struct dm_raid_superblock *sb;
+ struct dm_raid_superblock *refsb;
+ uint64_t events_sb, events_refsb;
+
+ rdev->sb_start = 0;
+ rdev->sb_size = sizeof(*sb);
+
+ ret = read_disk_sb(rdev, rdev->sb_size);
+ if (ret)
+ return ret;
+
+ sb = page_address(rdev->sb_page);
+ if (sb->magic != cpu_to_le32(DM_RAID_MAGIC)) {
+ super_sync(rdev->mddev, rdev);
+
+ set_bit(FirstUse, &rdev->flags);
+
+ /* Force writing of superblocks to disk */
+ set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
+
+ /* Any superblock is better than none, choose that if given */
+ return refdev ? 0 : 1;
+ }
+
+ if (!refdev)
+ return 1;
+
+ events_sb = le64_to_cpu(sb->events);
+
+ refsb = page_address(refdev->sb_page);
+ events_refsb = le64_to_cpu(refsb->events);
+
+ return (events_sb > events_refsb) ? 1 : 0;
+}
+
+static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ int role;
+ struct raid_set *rs = container_of(mddev, struct raid_set, md);
+ uint64_t events_sb;
+ uint64_t failed_devices;
+ struct dm_raid_superblock *sb;
+ uint32_t new_devs = 0;
+ uint32_t rebuilds = 0;
+ mdk_rdev_t *r, *t;
+ struct dm_raid_superblock *sb2;
+
+ sb = page_address(rdev->sb_page);
+ events_sb = le64_to_cpu(sb->events);
+ failed_devices = le64_to_cpu(sb->failed_devices);
+
+ /*
+ * Initialise to 1 if this is a new superblock.
+ */
+ mddev->events = events_sb ? : 1;
+
+ /*
+ * Reshaping is not currently allowed
+ */
+ if ((le32_to_cpu(sb->level) != mddev->level) ||
+ (le32_to_cpu(sb->layout) != mddev->layout) ||
+ (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) {
+ DMERR("Reshaping arrays not yet supported.");
+ return -EINVAL;
+ }
+
+ /* We can only change the number of devices in RAID1 right now */
+ if ((rs->raid_type->level != 1) &&
+ (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
+ DMERR("Reshaping arrays not yet supported.");
+ return -EINVAL;
+ }
+
+ if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
+ mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
+
+ /*
+ * During load, we set FirstUse if a new superblock was written.
+ * There are two reasons we might not have a superblock:
+ * 1) The array is brand new - in which case, all of the
+ * devices must have their In_sync bit set. Also,
+ * recovery_cp must be 0, unless forced.
+ * 2) This is a new device being added to an old array
+ * and the new device needs to be rebuilt - in which
+ * case the In_sync bit will /not/ be set and
+ * recovery_cp must be MaxSector.
+ */
+ rdev_for_each(r, t, mddev) {
+ if (!test_bit(In_sync, &r->flags)) {
+ if (!test_bit(FirstUse, &r->flags))
+ DMERR("Superblock area of "
+ "rebuild device %d should have been "
+ "cleared.", r->raid_disk);
+ set_bit(FirstUse, &r->flags);
+ rebuilds++;
+ } else if (test_bit(FirstUse, &r->flags))
+ new_devs++;
+ }
+
+ if (!rebuilds) {
+ if (new_devs == mddev->raid_disks) {
+ DMINFO("Superblocks created for new array");
+ set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
+ } else if (new_devs) {
+ DMERR("New device injected "
+ "into existing array without 'rebuild' "
+ "parameter specified");
+ return -EINVAL;
+ }
+ } else if (new_devs) {
+ DMERR("'rebuild' devices cannot be "
+ "injected into an array with other first-time devices");
+ return -EINVAL;
+ } else if (mddev->recovery_cp != MaxSector) {
+ DMERR("'rebuild' specified while array is not in-sync");
+ return -EINVAL;
+ }
+
+ /*
+ * Now we set the Faulty bit for those devices that are
+ * recorded in the superblock as failed.
+ */
+ rdev_for_each(r, t, mddev) {
+ if (!r->sb_page)
+ continue;
+ sb2 = page_address(r->sb_page);
+ sb2->failed_devices = 0;
+
+ /*
+ * Check for any device re-ordering.
+ */
+ if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
+ role = le32_to_cpu(sb2->array_position);
+ if (role != r->raid_disk) {
+ if (rs->raid_type->level != 1) {
+ rs->ti->error = "Cannot change device "
+ "positions in RAID array";
+ return -EINVAL;
+ }
+ DMINFO("RAID1 device #%d now at position #%d",
+ role, r->raid_disk);
+ }
+
+ /*
+ * Partial recovery is performed on
+ * returning failed devices.
+ */
+ if (failed_devices & (1 << role))
+ set_bit(Faulty, &r->flags);
+ }
+ }
+
+ return 0;
+}
+
+static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ struct dm_raid_superblock *sb = page_address(rdev->sb_page);
+
+ /*
+ * If mddev->events is not set, we know we have not yet initialized
+ * the array.
+ */
+ if (!mddev->events && super_init_validation(mddev, rdev))
+ return -EINVAL;
+
+ mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */
+ rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
+ if (!test_bit(FirstUse, &rdev->flags)) {
+ rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
+ if (rdev->recovery_offset != MaxSector)
+ clear_bit(In_sync, &rdev->flags);
+ }
+
+ /*
+ * If a device comes back, set it as not In_sync and no longer faulty.
+ */
+ if (test_bit(Faulty, &rdev->flags)) {
+ clear_bit(Faulty, &rdev->flags);
+ clear_bit(In_sync, &rdev->flags);
+ rdev->saved_raid_disk = rdev->raid_disk;
+ rdev->recovery_offset = 0;
+ }
+
+ clear_bit(FirstUse, &rdev->flags);
+
+ return 0;
+}
+
+/*
+ * Analyse superblocks and select the freshest.
+ */
+static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
+{
+ int ret;
+ mdk_rdev_t *rdev, *freshest, *tmp;
+ mddev_t *mddev = &rs->md;
+
+ freshest = NULL;
+ rdev_for_each(rdev, tmp, mddev) {
+ if (!rdev->meta_bdev)
+ continue;
+
+ ret = super_load(rdev, freshest);
+
+ switch (ret) {
+ case 1:
+ freshest = rdev;
+ break;
+ case 0:
+ break;
+ default:
+ ti->error = "Failed to load superblock";
+ return ret;
+ }
+ }
+
+ if (!freshest)
+ return 0;
+
+ /*
+ * Validation of the freshest device provides the source of
+ * validation for the remaining devices.
+ */
+ ti->error = "Unable to assemble array: Invalid superblocks";
+ if (super_validate(mddev, freshest))
+ return -EINVAL;
+
+ rdev_for_each(rdev, tmp, mddev)
+ if ((rdev != freshest) && super_validate(mddev, rdev))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
* Construct a RAID4/5/6 mapping:
* Args:
* <raid_type> <#raid_params> <raid_params> \
* <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
*
- * ** metadata devices are not supported yet, use '-' instead **
- *
* <raid_params> varies by <raid_type>. See 'parse_raid_params' for
* details on possible <raid_params>.
*/
@@ -465,8 +962,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (ret)
goto bad;
+ rs->md.sync_super = super_sync;
+ ret = analyse_superblocks(ti, rs);
+ if (ret)
+ goto bad;
+
INIT_WORK(&rs->md.event_work, do_table_event);
- ti->split_io = rs->md.chunk_sectors;
ti->private = rs;
mutex_lock(&rs->md.reconfig_mutex);
@@ -482,6 +983,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
+ mddev_suspend(&rs->md);
return 0;
bad:
@@ -546,12 +1048,17 @@ static int raid_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
/* The string you would use to construct this array */
- for (i = 0; i < rs->md.raid_disks; i++)
- if (rs->dev[i].data_dev &&
+ for (i = 0; i < rs->md.raid_disks; i++) {
+ if ((rs->print_flags & DMPF_REBUILD) &&
+ rs->dev[i].data_dev &&
!test_bit(In_sync, &rs->dev[i].rdev.flags))
- raid_param_cnt++; /* for rebuilds */
+ raid_param_cnt += 2; /* for rebuilds */
+ if (rs->dev[i].data_dev &&
+ test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+ raid_param_cnt += 2;
+ }
- raid_param_cnt += (hweight64(rs->print_flags) * 2);
+ raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2);
if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
raid_param_cnt--;
@@ -565,7 +1072,8 @@ static int raid_status(struct dm_target *ti, status_type_t type,
DMEMIT(" nosync");
for (i = 0; i < rs->md.raid_disks; i++)
- if (rs->dev[i].data_dev &&
+ if ((rs->print_flags & DMPF_REBUILD) &&
+ rs->dev[i].data_dev &&
!test_bit(In_sync, &rs->dev[i].rdev.flags))
DMEMIT(" rebuild %u", i);
@@ -579,6 +1087,11 @@ static int raid_status(struct dm_target *ti, status_type_t type,
if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
+ for (i = 0; i < rs->md.raid_disks; i++)
+ if (rs->dev[i].data_dev &&
+ test_bit(WriteMostly, &rs->dev[i].rdev.flags))
+ DMEMIT(" write_mostly %u", i);
+
if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
DMEMIT(" max_write_behind %lu",
rs->md.bitmap_info.max_write_behind);
@@ -591,9 +1104,16 @@ static int raid_status(struct dm_target *ti, status_type_t type,
conf ? conf->max_nr_stripes * 2 : 0);
}
+ if (rs->print_flags & DMPF_REGION_SIZE)
+ DMEMIT(" region_size %lu",
+ rs->md.bitmap_info.chunksize >> 9);
+
DMEMIT(" %d", rs->md.raid_disks);
for (i = 0; i < rs->md.raid_disks; i++) {
- DMEMIT(" -"); /* metadata device */
+ if (rs->dev[i].meta_dev)
+ DMEMIT(" %s", rs->dev[i].meta_dev->name);
+ else
+ DMEMIT(" -");
if (rs->dev[i].data_dev)
DMEMIT(" %s", rs->dev[i].data_dev->name);
@@ -650,12 +1170,13 @@ static void raid_resume(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
+ bitmap_load(&rs->md);
mddev_resume(&rs->md);
}
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 135c2f1fdbf..d1f1d701710 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -58,25 +58,30 @@
#define NUM_SNAPSHOT_HDR_CHUNKS 1
struct disk_header {
- uint32_t magic;
+ __le32 magic;
/*
* Is this snapshot valid. There is no way of recovering
* an invalid snapshot.
*/
- uint32_t valid;
+ __le32 valid;
/*
* Simple, incrementing version. no backward
* compatibility.
*/
- uint32_t version;
+ __le32 version;
/* In sectors */
- uint32_t chunk_size;
-};
+ __le32 chunk_size;
+} __packed;
struct disk_exception {
+ __le64 old_chunk;
+ __le64 new_chunk;
+} __packed;
+
+struct core_exception {
uint64_t old_chunk;
uint64_t new_chunk;
};
@@ -169,10 +174,9 @@ static int alloc_area(struct pstore *ps)
if (!ps->area)
goto err_area;
- ps->zero_area = vmalloc(len);
+ ps->zero_area = vzalloc(len);
if (!ps->zero_area)
goto err_zero_area;
- memset(ps->zero_area, 0, len);
ps->header_area = vmalloc(len);
if (!ps->header_area)
@@ -396,32 +400,32 @@ static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
}
static void read_exception(struct pstore *ps,
- uint32_t index, struct disk_exception *result)
+ uint32_t index, struct core_exception *result)
{
- struct disk_exception *e = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, index);
/* copy it */
- result->old_chunk = le64_to_cpu(e->old_chunk);
- result->new_chunk = le64_to_cpu(e->new_chunk);
+ result->old_chunk = le64_to_cpu(de->old_chunk);
+ result->new_chunk = le64_to_cpu(de->new_chunk);
}
static void write_exception(struct pstore *ps,
- uint32_t index, struct disk_exception *de)
+ uint32_t index, struct core_exception *e)
{
- struct disk_exception *e = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, index);
/* copy it */
- e->old_chunk = cpu_to_le64(de->old_chunk);
- e->new_chunk = cpu_to_le64(de->new_chunk);
+ de->old_chunk = cpu_to_le64(e->old_chunk);
+ de->new_chunk = cpu_to_le64(e->new_chunk);
}
static void clear_exception(struct pstore *ps, uint32_t index)
{
- struct disk_exception *e = get_exception(ps, index);
+ struct disk_exception *de = get_exception(ps, index);
/* clear it */
- e->old_chunk = 0;
- e->new_chunk = 0;
+ de->old_chunk = 0;
+ de->new_chunk = 0;
}
/*
@@ -437,13 +441,13 @@ static int insert_exceptions(struct pstore *ps,
{
int r;
unsigned int i;
- struct disk_exception de;
+ struct core_exception e;
/* presume the area is full */
*full = 1;
for (i = 0; i < ps->exceptions_per_area; i++) {
- read_exception(ps, i, &de);
+ read_exception(ps, i, &e);
/*
* If the new_chunk is pointing at the start of
@@ -451,7 +455,7 @@ static int insert_exceptions(struct pstore *ps,
* is we know that we've hit the end of the
* exceptions. Therefore the area is not full.
*/
- if (de.new_chunk == 0LL) {
+ if (e.new_chunk == 0LL) {
ps->current_committed = i;
*full = 0;
break;
@@ -460,13 +464,13 @@ static int insert_exceptions(struct pstore *ps,
/*
* Keep track of the start of the free chunks.
*/
- if (ps->next_free <= de.new_chunk)
- ps->next_free = de.new_chunk + 1;
+ if (ps->next_free <= e.new_chunk)
+ ps->next_free = e.new_chunk + 1;
/*
* Otherwise we add the exception to the snapshot.
*/
- r = callback(callback_context, de.old_chunk, de.new_chunk);
+ r = callback(callback_context, e.old_chunk, e.new_chunk);
if (r)
return r;
}
@@ -563,7 +567,7 @@ static int persistent_read_metadata(struct dm_exception_store *store,
ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
sizeof(struct disk_exception);
ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
- sizeof(*ps->callbacks));
+ sizeof(*ps->callbacks));
if (!ps->callbacks)
return -ENOMEM;
@@ -641,12 +645,12 @@ static void persistent_commit_exception(struct dm_exception_store *store,
{
unsigned int i;
struct pstore *ps = get_info(store);
- struct disk_exception de;
+ struct core_exception ce;
struct commit_callback *cb;
- de.old_chunk = e->old_chunk;
- de.new_chunk = e->new_chunk;
- write_exception(ps, ps->current_committed++, &de);
+ ce.old_chunk = e->old_chunk;
+ ce.new_chunk = e->new_chunk;
+ write_exception(ps, ps->current_committed++, &ce);
/*
* Add the callback to the back of the array. This code
@@ -670,7 +674,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
* If we completely filled the current area, then wipe the next one.
*/
if ((ps->current_committed == ps->exceptions_per_area) &&
- zero_disk_area(ps, ps->current_area + 1))
+ zero_disk_area(ps, ps->current_area + 1))
ps->valid = 0;
/*
@@ -701,7 +705,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
chunk_t *last_new_chunk)
{
struct pstore *ps = get_info(store);
- struct disk_exception de;
+ struct core_exception ce;
int nr_consecutive;
int r;
@@ -722,9 +726,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
ps->current_committed = ps->exceptions_per_area;
}
- read_exception(ps, ps->current_committed - 1, &de);
- *last_old_chunk = de.old_chunk;
- *last_new_chunk = de.new_chunk;
+ read_exception(ps, ps->current_committed - 1, &ce);
+ *last_old_chunk = ce.old_chunk;
+ *last_new_chunk = ce.new_chunk;
/*
* Find number of consecutive chunks within the current area,
@@ -733,9 +737,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store,
for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
nr_consecutive++) {
read_exception(ps, ps->current_committed - 1 - nr_consecutive,
- &de);
- if (de.old_chunk != *last_old_chunk - nr_consecutive ||
- de.new_chunk != *last_new_chunk - nr_consecutive)
+ &ce);
+ if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
+ ce.new_chunk != *last_new_chunk - nr_consecutive)
break;
}
@@ -753,7 +757,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i);
- r = area_io(ps, WRITE);
+ r = area_io(ps, WRITE_FLUSH_FUA);
if (r < 0)
return r;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 9ecff5f3023..6f758870fc1 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -30,16 +30,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
((ti)->type->name == dm_snapshot_merge_target_name)
/*
- * The percentage increment we will wake up users at
- */
-#define WAKE_UP_PERCENT 5
-
-/*
- * kcopyd priority of snapshot operations
- */
-#define SNAPSHOT_COPY_PRIORITY 2
-
-/*
* The size of the mempool used to track chunks in use.
*/
#define MIN_IOS 256
@@ -180,6 +170,13 @@ struct dm_snap_pending_exception {
* kcopyd.
*/
int started;
+
+ /*
+ * For writing a complete chunk, bypassing the copy.
+ */
+ struct bio *full_bio;
+ bio_end_io_t *full_bio_end_io;
+ void *full_bio_private;
};
/*
@@ -1055,8 +1052,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
- ti->error = "Cannot allocate snapshot context private "
- "structure";
+ ti->error = "Cannot allocate private snapshot structure";
r = -ENOMEM;
goto bad;
}
@@ -1380,6 +1376,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
struct dm_snapshot *s = pe->snap;
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
+ struct bio *full_bio = NULL;
int error = 0;
if (!success) {
@@ -1415,10 +1412,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
*/
dm_insert_exception(&s->complete, e);
- out:
+out:
dm_remove_exception(&pe->e);
snapshot_bios = bio_list_get(&pe->snapshot_bios);
origin_bios = bio_list_get(&pe->origin_bios);
+ full_bio = pe->full_bio;
+ if (full_bio) {
+ full_bio->bi_end_io = pe->full_bio_end_io;
+ full_bio->bi_private = pe->full_bio_private;
+ }
free_pending_exception(pe);
increment_pending_exceptions_done_count();
@@ -1426,10 +1428,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
up_write(&s->lock);
/* Submit any pending write bios */
- if (error)
+ if (error) {
+ if (full_bio)
+ bio_io_error(full_bio);
error_bios(snapshot_bios);
- else
+ } else {
+ if (full_bio)
+ bio_endio(full_bio, 0);
flush_bios(snapshot_bios);
+ }
retry_origin_bios(s, origin_bios);
}
@@ -1480,8 +1487,33 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
- dm_kcopyd_copy(s->kcopyd_client,
- &src, 1, &dest, 0, copy_callback, pe);
+ dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
+}
+
+static void full_bio_end_io(struct bio *bio, int error)
+{
+ void *callback_data = bio->bi_private;
+
+ dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
+}
+
+static void start_full_bio(struct dm_snap_pending_exception *pe,
+ struct bio *bio)
+{
+ struct dm_snapshot *s = pe->snap;
+ void *callback_data;
+
+ pe->full_bio = bio;
+ pe->full_bio_end_io = bio->bi_end_io;
+ pe->full_bio_private = bio->bi_private;
+
+ callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
+ copy_callback, pe);
+
+ bio->bi_end_io = full_bio_end_io;
+ bio->bi_private = callback_data;
+
+ generic_make_request(bio);
}
static struct dm_snap_pending_exception *
@@ -1519,6 +1551,7 @@ __find_pending_exception(struct dm_snapshot *s,
bio_list_init(&pe->origin_bios);
bio_list_init(&pe->snapshot_bios);
pe->started = 0;
+ pe->full_bio = NULL;
if (s->store->type->prepare_exception(s->store, &pe->e)) {
free_pending_exception(pe);
@@ -1612,10 +1645,19 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
}
remap_exception(s, &pe->e, bio, chunk);
- bio_list_add(&pe->snapshot_bios, bio);
r = DM_MAPIO_SUBMITTED;
+ if (!pe->started &&
+ bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
+ pe->started = 1;
+ up_write(&s->lock);
+ start_full_bio(pe, bio);
+ goto out;
+ }
+
+ bio_list_add(&pe->snapshot_bios, bio);
+
if (!pe->started) {
/* this is protected by snap->lock */
pe->started = 1;
@@ -1628,9 +1670,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
map_context->ptr = track_chunk(s, chunk);
}
- out_unlock:
+out_unlock:
up_write(&s->lock);
- out:
+out:
return r;
}
@@ -1974,7 +2016,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
pe_to_start_now = pe;
}
- next_snapshot:
+next_snapshot:
up_write(&snap->lock);
if (pe_to_start_now) {
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 451c3bb176d..986b8754bb0 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -17,7 +17,7 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define DM_MSG_PREFIX "table"
@@ -54,7 +54,6 @@ struct dm_table {
sector_t *highs;
struct dm_target *targets;
- unsigned discards_supported:1;
unsigned integrity_supported:1;
/*
@@ -154,12 +153,11 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
return NULL;
size = nmemb * elem_size;
- addr = vmalloc(size);
- if (addr)
- memset(addr, 0, size);
+ addr = vzalloc(size);
return addr;
}
+EXPORT_SYMBOL(dm_vcalloc);
/*
* highs, and targets are managed as dynamic arrays during a
@@ -209,7 +207,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
INIT_LIST_HEAD(&t->devices);
INIT_LIST_HEAD(&t->target_callbacks);
atomic_set(&t->holders, 0);
- t->discards_supported = 1;
if (!num_targets)
num_targets = KEYS_PER_NODE;
@@ -281,6 +278,7 @@ void dm_table_get(struct dm_table *t)
{
atomic_inc(&t->holders);
}
+EXPORT_SYMBOL(dm_table_get);
void dm_table_put(struct dm_table *t)
{
@@ -290,6 +288,7 @@ void dm_table_put(struct dm_table *t)
smp_mb__before_atomic_dec();
atomic_dec(&t->holders);
}
+EXPORT_SYMBOL(dm_table_put);
/*
* Checks to see if we need to extend highs or targets.
@@ -455,13 +454,14 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
* Add a device to the list, or just increment the usage count if
* it's already present.
*/
-static int __table_get_device(struct dm_table *t, struct dm_target *ti,
- const char *path, fmode_t mode, struct dm_dev **result)
+int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ struct dm_dev **result)
{
int r;
dev_t uninitialized_var(dev);
struct dm_dev_internal *dd;
unsigned int major, minor;
+ struct dm_table *t = ti->table;
BUG_ON(!t);
@@ -509,6 +509,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
*result = &dd->dm_dev;
return 0;
}
+EXPORT_SYMBOL(dm_get_device);
int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
@@ -539,23 +540,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
* If not we'll force DM to use PAGE_SIZE or
* smaller I/O, just to be safe.
*/
-
- if (q->merge_bvec_fn && !ti->type->merge)
+ if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
blk_limits_max_hw_sectors(limits,
(unsigned int) (PAGE_SIZE >> 9));
return 0;
}
EXPORT_SYMBOL_GPL(dm_set_device_limits);
-int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
- struct dm_dev **result)
-{
- return __table_get_device(ti->table, ti, path, mode, result);
-}
-
-
/*
- * Decrement a devices use count and remove it if necessary.
+ * Decrement a device's use count and remove it if necessary.
*/
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
@@ -568,6 +561,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
kfree(dd);
}
}
+EXPORT_SYMBOL(dm_put_device);
/*
* Checks to see if the target joins onto the end of the table.
@@ -791,8 +785,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
- if (!tgt->num_discard_requests)
- t->discards_supported = 0;
+ if (!tgt->num_discard_requests && tgt->discards_supported)
+ DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.",
+ dm_device_name(t->md), type);
return 0;
@@ -802,6 +797,63 @@ int dm_table_add_target(struct dm_table *t, const char *type,
return r;
}
+/*
+ * Target argument parsing helpers.
+ */
+static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *value, char **error, unsigned grouped)
+{
+ const char *arg_str = dm_shift_arg(arg_set);
+
+ if (!arg_str ||
+ (sscanf(arg_str, "%u", value) != 1) ||
+ (*value < arg->min) ||
+ (*value > arg->max) ||
+ (grouped && arg_set->argc < *value)) {
+ *error = arg->error;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *value, char **error)
+{
+ return validate_next_arg(arg, arg_set, value, error, 0);
+}
+EXPORT_SYMBOL(dm_read_arg);
+
+int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *value, char **error)
+{
+ return validate_next_arg(arg, arg_set, value, error, 1);
+}
+EXPORT_SYMBOL(dm_read_arg_group);
+
+const char *dm_shift_arg(struct dm_arg_set *as)
+{
+ char *r;
+
+ if (as->argc) {
+ as->argc--;
+ r = *as->argv;
+ as->argv++;
+ return r;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(dm_shift_arg);
+
+void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
+{
+ BUG_ON(as->argc < num_args);
+ as->argc -= num_args;
+ as->argv += num_args;
+}
+EXPORT_SYMBOL(dm_consume_args);
+
static int dm_table_set_type(struct dm_table *t)
{
unsigned i;
@@ -1077,11 +1129,13 @@ void dm_table_event(struct dm_table *t)
t->event_fn(t->event_context);
mutex_unlock(&_event_lock);
}
+EXPORT_SYMBOL(dm_table_event);
sector_t dm_table_get_size(struct dm_table *t)
{
return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
}
+EXPORT_SYMBOL(dm_table_get_size);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
{
@@ -1194,9 +1248,45 @@ static void dm_table_set_integrity(struct dm_table *t)
blk_get_integrity(template_disk));
}
+static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ unsigned flush = (*(unsigned *)data);
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+
+ return q && (q->flush_flags & flush);
+}
+
+static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
+{
+ struct dm_target *ti;
+ unsigned i = 0;
+
+ /*
+ * Require at least one underlying device to support flushes.
+ * t->devices includes internal dm devices such as mirror logs
+ * so we need to use iterate_devices here, which targets
+ * supporting flushes must provide.
+ */
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+
+ if (!ti->num_flush_requests)
+ continue;
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, device_flush_capable, &flush))
+ return 1;
+ }
+
+ return 0;
+}
+
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
+ unsigned flush = 0;
+
/*
* Copy table's limits to the DM device's request_queue
*/
@@ -1207,6 +1297,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ if (dm_table_supports_flush(t, REQ_FLUSH)) {
+ flush |= REQ_FLUSH;
+ if (dm_table_supports_flush(t, REQ_FUA))
+ flush |= REQ_FUA;
+ }
+ blk_queue_flush(q, flush);
+
dm_table_set_integrity(t);
/*
@@ -1237,6 +1334,7 @@ fmode_t dm_table_get_mode(struct dm_table *t)
{
return t->mode;
}
+EXPORT_SYMBOL(dm_table_get_mode);
static void suspend_targets(struct dm_table *t, unsigned postsuspend)
{
@@ -1345,6 +1443,7 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
}
+EXPORT_SYMBOL(dm_table_get_md);
static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
@@ -1359,19 +1458,19 @@ bool dm_table_supports_discards(struct dm_table *t)
struct dm_target *ti;
unsigned i = 0;
- if (!t->discards_supported)
- return 0;
-
/*
* Unless any target used by the table set discards_supported,
* require at least one underlying device to support discards.
* t->devices includes internal dm devices such as mirror logs
* so we need to use iterate_devices here, which targets
- * supporting discard must provide.
+ * supporting discard selectively must provide.
*/
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
+ if (!ti->num_discard_requests)
+ continue;
+
if (ti->discards_supported)
return 1;
@@ -1382,13 +1481,3 @@ bool dm_table_supports_discards(struct dm_table *t)
return 0;
}
-
-EXPORT_SYMBOL(dm_vcalloc);
-EXPORT_SYMBOL(dm_get_device);
-EXPORT_SYMBOL(dm_put_device);
-EXPORT_SYMBOL(dm_table_event);
-EXPORT_SYMBOL(dm_table_get_size);
-EXPORT_SYMBOL(dm_table_get_mode);
-EXPORT_SYMBOL(dm_table_get_md);
-EXPORT_SYMBOL(dm_table_put);
-EXPORT_SYMBOL(dm_table_get);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0cf68b47887..52b39f335bb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -37,6 +37,8 @@ static const char *_name = DM_NAME;
static unsigned int major = 0;
static unsigned int _major = 0;
+static DEFINE_IDR(_minor_idr);
+
static DEFINE_SPINLOCK(_minor_lock);
/*
* For bio-based dm.
@@ -109,6 +111,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define DMF_FREEING 3
#define DMF_DELETING 4
#define DMF_NOFLUSH_SUSPENDING 5
+#define DMF_MERGE_IS_OPTIONAL 6
/*
* Work processed by per-device workqueue.
@@ -313,6 +316,12 @@ static void __exit dm_exit(void)
while (i--)
_exits[i]();
+
+ /*
+ * Should be empty by this point.
+ */
+ idr_remove_all(&_minor_idr);
+ idr_destroy(&_minor_idr);
}
/*
@@ -1171,7 +1180,8 @@ static int __clone_and_map_discard(struct clone_info *ci)
/*
* Even though the device advertised discard support,
- * reconfiguration might have changed that since the
+ * that does not mean every target supports it, and
+ * reconfiguration might also have changed that since the
* check was performed.
*/
if (!ti->num_discard_requests)
@@ -1705,8 +1715,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
-static DEFINE_IDR(_minor_idr);
-
static void free_minor(int minor)
{
spin_lock(&_minor_lock);
@@ -1800,7 +1808,6 @@ static void dm_init_md_queue(struct mapped_device *md)
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
- blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
}
/*
@@ -1986,6 +1993,59 @@ static void __set_size(struct mapped_device *md, sector_t size)
}
/*
+ * Return 1 if the queue has a compulsory merge_bvec_fn function.
+ *
+ * If this function returns 0, then the device is either a non-dm
+ * device without a merge_bvec_fn, or it is a dm device that is
+ * able to split any bios it receives that are too big.
+ */
+int dm_queue_merge_is_compulsory(struct request_queue *q)
+{
+ struct mapped_device *dev_md;
+
+ if (!q->merge_bvec_fn)
+ return 0;
+
+ if (q->make_request_fn == dm_request) {
+ dev_md = q->queuedata;
+ if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
+ return 0;
+ }
+
+ return 1;
+}
+
+static int dm_device_merge_is_compulsory(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct block_device *bdev = dev->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ return dm_queue_merge_is_compulsory(q);
+}
+
+/*
+ * Return 1 if it is acceptable to ignore merge_bvec_fn based
+ * on the properties of the underlying devices.
+ */
+static int dm_table_merge_is_optional(struct dm_table *table)
+{
+ unsigned i = 0;
+ struct dm_target *ti;
+
+ while (i < dm_table_get_num_targets(table)) {
+ ti = dm_table_get_target(table, i++);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
* Returns old map, which caller must destroy.
*/
static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
@@ -1995,6 +2055,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct request_queue *q = md->queue;
sector_t size;
unsigned long flags;
+ int merge_is_optional;
size = dm_table_get_size(t);
@@ -2020,10 +2081,16 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
__bind_mempools(md, t);
+ merge_is_optional = dm_table_merge_is_optional(t);
+
write_lock_irqsave(&md->map_lock, flags);
old_map = md->map;
md->map = t;
dm_table_set_restrictions(t, q, limits);
+ if (merge_is_optional)
+ set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
+ else
+ clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
write_unlock_irqrestore(&md->map_lock, flags);
return old_map;
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 1aaf16746da..6745dbd278a 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -66,6 +66,8 @@ int dm_table_alloc_md_mempools(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
+int dm_queue_merge_is_compulsory(struct request_queue *q);
+
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
void dm_set_md_type(struct mapped_device *md, unsigned type);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index abfb59a61ed..6cd2c313e80 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -213,12 +213,6 @@ static int linear_run (mddev_t *mddev)
return md_integrity_register(mddev);
}
-static void free_conf(struct rcu_head *head)
-{
- linear_conf_t *conf = container_of(head, linear_conf_t, rcu);
- kfree(conf);
-}
-
static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
{
/* Adding a drive to a linear array allows the array to grow.
@@ -247,7 +241,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
- call_rcu(&oldconf->rcu, free_conf);
+ kfree_rcu(oldconf, rcu);
return 0;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4332fc2f25d..8e221a20f5d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -215,6 +215,55 @@ struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);
+void md_trim_bio(struct bio *bio, int offset, int size)
+{
+ /* 'bio' is a cloned bio which we need to trim to match
+ * the given offset and size.
+ * This requires adjusting bi_sector, bi_size, and bi_io_vec
+ */
+ int i;
+ struct bio_vec *bvec;
+ int sofar = 0;
+
+ size <<= 9;
+ if (offset == 0 && size == bio->bi_size)
+ return;
+
+ bio->bi_sector += offset;
+ bio->bi_size = size;
+ offset <<= 9;
+ clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+
+ while (bio->bi_idx < bio->bi_vcnt &&
+ bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
+ /* remove this whole bio_vec */
+ offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
+ bio->bi_idx++;
+ }
+ if (bio->bi_idx < bio->bi_vcnt) {
+ bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
+ bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
+ }
+ /* avoid any complications with bi_idx being non-zero*/
+ if (bio->bi_idx) {
+ memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
+ (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
+ bio->bi_vcnt -= bio->bi_idx;
+ bio->bi_idx = 0;
+ }
+ /* Make sure vcnt and last bv are not too big */
+ bio_for_each_segment(bvec, bio, i) {
+ if (sofar + bvec->bv_len > size)
+ bvec->bv_len = size - sofar;
+ if (bvec->bv_len == 0) {
+ bio->bi_vcnt = i;
+ break;
+ }
+ sofar += bvec->bv_len;
+ }
+}
+EXPORT_SYMBOL_GPL(md_trim_bio);
+
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -757,6 +806,10 @@ static void free_disk_sb(mdk_rdev_t * rdev)
rdev->sb_start = 0;
rdev->sectors = 0;
}
+ if (rdev->bb_page) {
+ put_page(rdev->bb_page);
+ rdev->bb_page = NULL;
+ }
}
@@ -1025,7 +1078,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
ret = -EINVAL;
bdevname(rdev->bdev, b);
- sb = (mdp_super_t*)page_address(rdev->sb_page);
+ sb = page_address(rdev->sb_page);
if (sb->md_magic != MD_SB_MAGIC) {
printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
@@ -1054,6 +1107,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
rdev->preferred_minor = sb->md_minor;
rdev->data_offset = 0;
rdev->sb_size = MD_SB_BYTES;
+ rdev->badblocks.shift = -1;
if (sb->level == LEVEL_MULTIPATH)
rdev->desc_nr = -1;
@@ -1064,7 +1118,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
ret = 1;
} else {
__u64 ev1, ev2;
- mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
+ mdp_super_t *refsb = page_address(refdev->sb_page);
if (!uuid_equal(refsb, sb)) {
printk(KERN_WARNING "md: %s has different UUID to %s\n",
b, bdevname(refdev->bdev,b2));
@@ -1099,7 +1153,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
{
mdp_disk_t *desc;
- mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
+ mdp_super_t *sb = page_address(rdev->sb_page);
__u64 ev1 = md_event(sb);
rdev->raid_disk = -1;
@@ -1230,7 +1284,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
rdev->sb_size = MD_SB_BYTES;
- sb = (mdp_super_t*)page_address(rdev->sb_page);
+ sb = page_address(rdev->sb_page);
memset(sb, 0, sizeof(*sb));
@@ -1395,6 +1449,8 @@ static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
return cpu_to_le32(csum);
}
+static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
+ int acknowledged);
static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
{
struct mdp_superblock_1 *sb;
@@ -1435,7 +1491,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
if (ret) return ret;
- sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
+ sb = page_address(rdev->sb_page);
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
sb->major_version != cpu_to_le32(1) ||
@@ -1473,12 +1529,52 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
else
rdev->desc_nr = le32_to_cpu(sb->dev_number);
+ if (!rdev->bb_page) {
+ rdev->bb_page = alloc_page(GFP_KERNEL);
+ if (!rdev->bb_page)
+ return -ENOMEM;
+ }
+ if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
+ rdev->badblocks.count == 0) {
+ /* need to load the bad block list.
+ * Currently we limit it to one page.
+ */
+ s32 offset;
+ sector_t bb_sector;
+ u64 *bbp;
+ int i;
+ int sectors = le16_to_cpu(sb->bblog_size);
+ if (sectors > (PAGE_SIZE / 512))
+ return -EINVAL;
+ offset = le32_to_cpu(sb->bblog_offset);
+ if (offset == 0)
+ return -EINVAL;
+ bb_sector = (long long)offset;
+ if (!sync_page_io(rdev, bb_sector, sectors << 9,
+ rdev->bb_page, READ, true))
+ return -EIO;
+ bbp = (u64 *)page_address(rdev->bb_page);
+ rdev->badblocks.shift = sb->bblog_shift;
+ for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
+ u64 bb = le64_to_cpu(*bbp);
+ int count = bb & (0x3ff);
+ u64 sector = bb >> 10;
+ sector <<= sb->bblog_shift;
+ count <<= sb->bblog_shift;
+ if (bb + 1 == 0)
+ break;
+ if (md_set_badblocks(&rdev->badblocks,
+ sector, count, 1) == 0)
+ return -EINVAL;
+ }
+ } else if (sb->bblog_offset == 0)
+ rdev->badblocks.shift = -1;
+
if (!refdev) {
ret = 1;
} else {
__u64 ev1, ev2;
- struct mdp_superblock_1 *refsb =
- (struct mdp_superblock_1*)page_address(refdev->sb_page);
+ struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
sb->level != refsb->level ||
@@ -1513,7 +1609,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
{
- struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
+ struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
__u64 ev1 = le64_to_cpu(sb->events);
rdev->raid_disk = -1;
@@ -1619,13 +1715,12 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
int max_dev, i;
/* make rdev->sb match mddev and rdev data. */
- sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
+ sb = page_address(rdev->sb_page);
sb->feature_map = 0;
sb->pad0 = 0;
sb->recovery_offset = cpu_to_le64(0);
memset(sb->pad1, 0, sizeof(sb->pad1));
- memset(sb->pad2, 0, sizeof(sb->pad2));
memset(sb->pad3, 0, sizeof(sb->pad3));
sb->utime = cpu_to_le64((__u64)mddev->utime);
@@ -1665,6 +1760,40 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
}
+ if (rdev->badblocks.count == 0)
+ /* Nothing to do for bad blocks*/ ;
+ else if (sb->bblog_offset == 0)
+ /* Cannot record bad blocks on this device */
+ md_error(mddev, rdev);
+ else {
+ struct badblocks *bb = &rdev->badblocks;
+ u64 *bbp = (u64 *)page_address(rdev->bb_page);
+ u64 *p = bb->page;
+ sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
+ if (bb->changed) {
+ unsigned seq;
+
+retry:
+ seq = read_seqbegin(&bb->lock);
+
+ memset(bbp, 0xff, PAGE_SIZE);
+
+ for (i = 0 ; i < bb->count ; i++) {
+ u64 internal_bb = *p++;
+ u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
+ | BB_LEN(internal_bb));
+ *bbp++ = cpu_to_le64(store_bb);
+ }
+ if (read_seqretry(&bb->lock, seq))
+ goto retry;
+
+ bb->sector = (rdev->sb_start +
+ (int)le32_to_cpu(sb->bblog_offset));
+ bb->size = le16_to_cpu(sb->bblog_size);
+ bb->changed = 0;
+ }
+ }
+
max_dev = 0;
list_for_each_entry(rdev2, &mddev->disks, same_set)
if (rdev2->desc_nr+1 > max_dev)
@@ -1724,7 +1853,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
num_sectors = max_sectors;
rdev->sb_start = sb_start;
}
- sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page);
+ sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
sb->super_offset = rdev->sb_start;
sb->sb_csum = calc_sb_1_csum(sb);
@@ -1922,7 +2051,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
/* May as well allow recovery to be retried once */
- mddev->recovery_disabled = 0;
+ mddev->recovery_disabled++;
return 0;
@@ -1953,6 +2082,9 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
rdev->sysfs_state = NULL;
+ kfree(rdev->badblocks.page);
+ rdev->badblocks.count = 0;
+ rdev->badblocks.page = NULL;
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state". We also need
* to delay it due to rcu usage.
@@ -2127,10 +2259,10 @@ static void print_rdev(mdk_rdev_t *rdev, int major_version)
printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
switch (major_version) {
case 0:
- print_sb_90((mdp_super_t*)page_address(rdev->sb_page));
+ print_sb_90(page_address(rdev->sb_page));
break;
case 1:
- print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page));
+ print_sb_1(page_address(rdev->sb_page));
break;
}
} else
@@ -2194,6 +2326,7 @@ static void md_update_sb(mddev_t * mddev, int force_change)
mdk_rdev_t *rdev;
int sync_req;
int nospares = 0;
+ int any_badblocks_changed = 0;
repeat:
/* First make sure individual recovery_offsets are correct */
@@ -2208,8 +2341,18 @@ repeat:
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
- if (!mddev->external)
+ if (!mddev->external) {
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->badblocks.changed) {
+ md_ack_all_badblocks(&rdev->badblocks);
+ md_error(mddev, rdev);
+ }
+ clear_bit(Blocked, &rdev->flags);
+ clear_bit(BlockedBadBlocks, &rdev->flags);
+ wake_up(&rdev->blocked_wait);
+ }
+ }
wake_up(&mddev->sb_wait);
return;
}
@@ -2265,6 +2408,14 @@ repeat:
MD_BUG();
mddev->events --;
}
+
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->badblocks.changed)
+ any_badblocks_changed++;
+ if (test_bit(Faulty, &rdev->flags))
+ set_bit(FaultRecorded, &rdev->flags);
+ }
+
sync_sbs(mddev, nospares);
spin_unlock_irq(&mddev->write_lock);
@@ -2290,6 +2441,13 @@ repeat:
bdevname(rdev->bdev,b),
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
+ if (rdev->badblocks.size) {
+ md_super_write(mddev, rdev,
+ rdev->badblocks.sector,
+ rdev->badblocks.size << 9,
+ rdev->bb_page);
+ rdev->badblocks.size = 0;
+ }
} else
dprintk(")\n");
@@ -2313,6 +2471,15 @@ repeat:
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (test_and_clear_bit(FaultRecorded, &rdev->flags))
+ clear_bit(Blocked, &rdev->flags);
+
+ if (any_badblocks_changed)
+ md_ack_all_badblocks(&rdev->badblocks);
+ clear_bit(BlockedBadBlocks, &rdev->flags);
+ wake_up(&rdev->blocked_wait);
+ }
}
/* words written to sysfs files may, or may not, be \n terminated.
@@ -2347,7 +2514,8 @@ state_show(mdk_rdev_t *rdev, char *page)
char *sep = "";
size_t len = 0;
- if (test_bit(Faulty, &rdev->flags)) {
+ if (test_bit(Faulty, &rdev->flags) ||
+ rdev->badblocks.unacked_exist) {
len+= sprintf(page+len, "%sfaulty",sep);
sep = ",";
}
@@ -2359,7 +2527,8 @@ state_show(mdk_rdev_t *rdev, char *page)
len += sprintf(page+len, "%swrite_mostly",sep);
sep = ",";
}
- if (test_bit(Blocked, &rdev->flags)) {
+ if (test_bit(Blocked, &rdev->flags) ||
+ rdev->badblocks.unacked_exist) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
@@ -2368,6 +2537,10 @@ state_show(mdk_rdev_t *rdev, char *page)
len += sprintf(page+len, "%sspare", sep);
sep = ",";
}
+ if (test_bit(WriteErrorSeen, &rdev->flags)) {
+ len += sprintf(page+len, "%swrite_error", sep);
+ sep = ",";
+ }
return len+sprintf(page+len, "\n");
}
@@ -2375,13 +2548,15 @@ static ssize_t
state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
/* can write
- * faulty - simulates and error
+ * faulty - simulates an error
* remove - disconnects the device
* writemostly - sets write_mostly
* -writemostly - clears write_mostly
- * blocked - sets the Blocked flag
- * -blocked - clears the Blocked flag
+ * blocked - sets the Blocked flags
+ * -blocked - clears the Blocked and possibly simulates an error
* insync - sets Insync providing device isn't active
+ * write_error - sets WriteErrorSeen
+ * -write_error - clears WriteErrorSeen
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -2408,7 +2583,15 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
set_bit(Blocked, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-blocked")) {
+ if (!test_bit(Faulty, &rdev->flags) &&
+ test_bit(BlockedBadBlocks, &rdev->flags)) {
+ /* metadata handler doesn't understand badblocks,
+ * so we need to fail the device
+ */
+ md_error(rdev->mddev, rdev);
+ }
clear_bit(Blocked, &rdev->flags);
+ clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
@@ -2417,6 +2600,12 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
+ } else if (cmd_match(buf, "write_error")) {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ err = 0;
+ } else if (cmd_match(buf, "-write_error")) {
+ clear_bit(WriteErrorSeen, &rdev->flags);
+ err = 0;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -2459,7 +2648,6 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
char *e;
int err;
- char nm[20];
int slot = simple_strtoul(buf, &e, 10);
if (strncmp(buf, "none", 4)==0)
slot = -1;
@@ -2482,8 +2670,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
hot_remove_disk(rdev->mddev, rdev->raid_disk);
if (err)
return err;
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&rdev->mddev->kobj, nm);
+ sysfs_unlink_rdev(rdev->mddev, rdev);
rdev->raid_disk = -1;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
@@ -2522,8 +2709,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
return err;
} else
sysfs_notify_dirent_safe(rdev->sysfs_state);
- sprintf(nm, "rd%d", rdev->raid_disk);
- if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm))
+ if (sysfs_link_rdev(rdev->mddev, rdev))
/* failure here is OK */;
/* don't wakeup anyone, leave that to userspace. */
} else {
@@ -2712,6 +2898,39 @@ static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t le
static struct rdev_sysfs_entry rdev_recovery_start =
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
+
+static ssize_t
+badblocks_show(struct badblocks *bb, char *page, int unack);
+static ssize_t
+badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
+
+static ssize_t bb_show(mdk_rdev_t *rdev, char *page)
+{
+ return badblocks_show(&rdev->badblocks, page, 0);
+}
+static ssize_t bb_store(mdk_rdev_t *rdev, const char *page, size_t len)
+{
+ int rv = badblocks_store(&rdev->badblocks, page, len, 0);
+ /* Maybe that ack was all we needed */
+ if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
+ wake_up(&rdev->blocked_wait);
+ return rv;
+}
+static struct rdev_sysfs_entry rdev_bad_blocks =
+__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
+
+
+static ssize_t ubb_show(mdk_rdev_t *rdev, char *page)
+{
+ return badblocks_show(&rdev->badblocks, page, 1);
+}
+static ssize_t ubb_store(mdk_rdev_t *rdev, const char *page, size_t len)
+{
+ return badblocks_store(&rdev->badblocks, page, len, 1);
+}
+static struct rdev_sysfs_entry rdev_unack_bad_blocks =
+__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
+
static struct attribute *rdev_default_attrs[] = {
&rdev_state.attr,
&rdev_errors.attr,
@@ -2719,6 +2938,8 @@ static struct attribute *rdev_default_attrs[] = {
&rdev_offset.attr,
&rdev_size.attr,
&rdev_recovery_start.attr,
+ &rdev_bad_blocks.attr,
+ &rdev_unack_bad_blocks.attr,
NULL,
};
static ssize_t
@@ -2782,7 +3003,7 @@ static struct kobj_type rdev_ktype = {
.default_attrs = rdev_default_attrs,
};
-void md_rdev_init(mdk_rdev_t *rdev)
+int md_rdev_init(mdk_rdev_t *rdev)
{
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
@@ -2792,12 +3013,27 @@ void md_rdev_init(mdk_rdev_t *rdev)
rdev->sb_events = 0;
rdev->last_read_error.tv_sec = 0;
rdev->last_read_error.tv_nsec = 0;
+ rdev->sb_loaded = 0;
+ rdev->bb_page = NULL;
atomic_set(&rdev->nr_pending, 0);
atomic_set(&rdev->read_errors, 0);
atomic_set(&rdev->corrected_errors, 0);
INIT_LIST_HEAD(&rdev->same_set);
init_waitqueue_head(&rdev->blocked_wait);
+
+ /* Add space to store bad block list.
+ * This reserves the space even on arrays where it cannot
+ * be used - I wonder if that matters
+ */
+ rdev->badblocks.count = 0;
+ rdev->badblocks.shift = 0;
+ rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ seqlock_init(&rdev->badblocks.lock);
+ if (rdev->badblocks.page == NULL)
+ return -ENOMEM;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(md_rdev_init);
/*
@@ -2823,8 +3059,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
return ERR_PTR(-ENOMEM);
}
- md_rdev_init(rdev);
- if ((err = alloc_disk_sb(rdev)))
+ err = md_rdev_init(rdev);
+ if (err)
+ goto abort_free;
+ err = alloc_disk_sb(rdev);
+ if (err)
goto abort_free;
err = lock_rdev(rdev, newdev, super_format == -2);
@@ -2860,15 +3099,17 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
goto abort_free;
}
}
+ if (super_format == -1)
+ /* hot-add for 0.90, or non-persistent: so no badblocks */
+ rdev->badblocks.shift = -1;
return rdev;
abort_free:
- if (rdev->sb_page) {
- if (rdev->bdev)
- unlock_rdev(rdev);
- free_disk_sb(rdev);
- }
+ if (rdev->bdev)
+ unlock_rdev(rdev);
+ free_disk_sb(rdev);
+ kfree(rdev->badblocks.page);
kfree(rdev);
return ERR_PTR(err);
}
@@ -3149,15 +3390,13 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
}
list_for_each_entry(rdev, &mddev->disks, same_set) {
- char nm[20];
if (rdev->raid_disk < 0)
continue;
if (rdev->new_raid_disk >= mddev->raid_disks)
rdev->new_raid_disk = -1;
if (rdev->new_raid_disk == rdev->raid_disk)
continue;
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ sysfs_unlink_rdev(mddev, rdev);
}
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->raid_disk < 0)
@@ -3168,11 +3407,10 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
if (rdev->raid_disk < 0)
clear_bit(In_sync, &rdev->flags);
else {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- if(sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
- printk("md: cannot register %s for %s after level change\n",
- nm, mdname(mddev));
+ if (sysfs_link_rdev(mddev, rdev))
+ printk(KERN_WARNING "md: cannot register rd%d"
+ " for %s after level change\n",
+ rdev->raid_disk, mdname(mddev));
}
}
@@ -4504,7 +4742,8 @@ int md_run(mddev_t *mddev)
}
if (mddev->bio_set == NULL)
- mddev->bio_set = bioset_create(BIO_POOL_SIZE, sizeof(mddev));
+ mddev->bio_set = bioset_create(BIO_POOL_SIZE,
+ sizeof(mddev_t *));
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
@@ -4621,12 +4860,9 @@ int md_run(mddev_t *mddev)
smp_wmb();
mddev->ready = 1;
list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk >= 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
+ if (rdev->raid_disk >= 0)
+ if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
- }
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -4854,11 +5090,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
sysfs_notify_dirent_safe(mddev->sysfs_state);
list_for_each_entry(rdev, &mddev->disks, same_set)
- if (rdev->raid_disk >= 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
- }
+ if (rdev->raid_disk >= 0)
+ sysfs_unlink_rdev(mddev, rdev);
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
@@ -6198,18 +6431,7 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
if (!rdev || test_bit(Faulty, &rdev->flags))
return;
- if (mddev->external)
- set_bit(Blocked, &rdev->flags);
-/*
- dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
- mdname(mddev),
- MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
- __builtin_return_address(0),__builtin_return_address(1),
- __builtin_return_address(2),__builtin_return_address(3));
-*/
- if (!mddev->pers)
- return;
- if (!mddev->pers->error_handler)
+ if (!mddev->pers || !mddev->pers->error_handler)
return;
mddev->pers->error_handler(mddev,rdev);
if (mddev->degraded)
@@ -6394,16 +6616,11 @@ static void md_seq_stop(struct seq_file *seq, void *v)
mddev_put(mddev);
}
-struct mdstat_info {
- int event;
-};
-
static int md_seq_show(struct seq_file *seq, void *v)
{
mddev_t *mddev = v;
sector_t sectors;
mdk_rdev_t *rdev;
- struct mdstat_info *mi = seq->private;
struct bitmap *bitmap;
if (v == (void*)1) {
@@ -6415,7 +6632,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
- mi->event = atomic_read(&md_event_count);
+ seq->poll_event = atomic_read(&md_event_count);
return 0;
}
if (v == (void*)2) {
@@ -6527,26 +6744,21 @@ static const struct seq_operations md_seq_ops = {
static int md_seq_open(struct inode *inode, struct file *file)
{
+ struct seq_file *seq;
int error;
- struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
- if (mi == NULL)
- return -ENOMEM;
error = seq_open(file, &md_seq_ops);
if (error)
- kfree(mi);
- else {
- struct seq_file *p = file->private_data;
- p->private = mi;
- mi->event = atomic_read(&md_event_count);
- }
+ return error;
+
+ seq = file->private_data;
+ seq->poll_event = atomic_read(&md_event_count);
return error;
}
static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
{
- struct seq_file *m = filp->private_data;
- struct mdstat_info *mi = m->private;
+ struct seq_file *seq = filp->private_data;
int mask;
poll_wait(filp, &md_event_waiters, wait);
@@ -6554,7 +6766,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
/* always allow read */
mask = POLLIN | POLLRDNORM;
- if (mi->event != atomic_read(&md_event_count))
+ if (seq->poll_event != atomic_read(&md_event_count))
mask |= POLLERR | POLLPRI;
return mask;
}
@@ -6943,11 +7155,14 @@ void md_do_sync(mddev_t *mddev)
atomic_add(sectors, &mddev->recovery_active);
}
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ break;
+
j += sectors;
if (j>1) mddev->curr_resync = j;
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
- /* this is the earliers that rebuilt will be
+ /* this is the earliest that rebuild will be
* visible in /proc/mdstat
*/
md_new_event(mddev);
@@ -6956,10 +7171,6 @@ void md_do_sync(mddev_t *mddev)
continue;
last_check = io_sectors;
-
- if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
- break;
-
repeat:
if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
/* step marks */
@@ -7077,28 +7288,23 @@ static int remove_and_add_spares(mddev_t *mddev)
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev->raid_disk)==0) {
- char nm[20];
- sprintf(nm,"rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
}
- if (mddev->degraded && !mddev->recovery_disabled) {
+ if (mddev->degraded) {
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
- !test_bit(Blocked, &rdev->flags))
+ !test_bit(Faulty, &rdev->flags))
spares++;
if (rdev->raid_disk < 0
&& !test_bit(Faulty, &rdev->flags)) {
rdev->recovery_offset = 0;
if (mddev->pers->
hot_add_disk(mddev, rdev) == 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- if (sysfs_create_link(&mddev->kobj,
- &rdev->kobj, nm))
+ if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
spares++;
md_new_event(mddev);
@@ -7147,6 +7353,8 @@ static void reap_sync_thread(mddev_t *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
+ if (mddev->event_work.func)
+ queue_work(md_misc_wq, &mddev->event_work);
}
/*
@@ -7179,9 +7387,6 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->bitmap)
bitmap_daemon_work(mddev);
- if (mddev->ro)
- return;
-
if (signal_pending(current)) {
if (mddev->pers->sync_request && !mddev->external) {
printk(KERN_INFO "md: %s in immediate safe mode\n",
@@ -7218,9 +7423,7 @@ void md_check_recovery(mddev_t *mddev)
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
mddev, rdev->raid_disk)==0) {
- char nm[20];
- sprintf(nm,"rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
}
@@ -7340,12 +7543,499 @@ void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
{
sysfs_notify_dirent_safe(rdev->sysfs_state);
wait_event_timeout(rdev->blocked_wait,
- !test_bit(Blocked, &rdev->flags),
+ !test_bit(Blocked, &rdev->flags) &&
+ !test_bit(BlockedBadBlocks, &rdev->flags),
msecs_to_jiffies(5000));
rdev_dec_pending(rdev, mddev);
}
EXPORT_SYMBOL(md_wait_for_blocked_rdev);
+
+/* Bad block management.
+ * We can record which blocks on each device are 'bad' and so just
+ * fail those blocks, or that stripe, rather than the whole device.
+ * Entries in the bad-block table are 64bits wide. This comprises:
+ * Length of bad-range, in sectors: 0-511 for lengths 1-512
+ * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
+ * A 'shift' can be set so that larger blocks are tracked and
+ * consequently larger devices can be covered.
+ * 'Acknowledged' flag - 1 bit. - the most significant bit.
+ *
+ * Locking of the bad-block table uses a seqlock so md_is_badblock
+ * might need to retry if it is very unlucky.
+ * We will sometimes want to check for bad blocks in a bi_end_io function,
+ * so we use the write_seqlock_irq variant.
+ *
+ * When looking for a bad block we specify a range and want to
+ * know if any block in the range is bad. So we binary-search
+ * to the last range that starts at-or-before the given endpoint,
+ * (or "before the sector after the target range")
+ * then see if it ends after the given start.
+ * We return
+ * 0 if there are no known bad blocks in the range
+ * 1 if there are known bad block which are all acknowledged
+ * -1 if there are bad blocks which have not yet been acknowledged in metadata.
+ * plus the start/length of the first bad section we overlap.
+ */
+int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
+ sector_t *first_bad, int *bad_sectors)
+{
+ int hi;
+ int lo = 0;
+ u64 *p = bb->page;
+ int rv = 0;
+ sector_t target = s + sectors;
+ unsigned seq;
+
+ if (bb->shift > 0) {
+ /* round the start down, and the end up */
+ s >>= bb->shift;
+ target += (1<<bb->shift) - 1;
+ target >>= bb->shift;
+ sectors = target - s;
+ }
+ /* 'target' is now the first block after the bad range */
+
+retry:
+ seq = read_seqbegin(&bb->lock);
+
+ hi = bb->count;
+
+ /* Binary search between lo and hi for 'target'
+ * i.e. for the last range that starts before 'target'
+ */
+ /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
+ * are known not to be the last range before target.
+ * VARIANT: hi-lo is the number of possible
+ * ranges, and decreases until it reaches 1
+ */
+ while (hi - lo > 1) {
+ int mid = (lo + hi) / 2;
+ sector_t a = BB_OFFSET(p[mid]);
+ if (a < target)
+ /* This could still be the one, earlier ranges
+ * could not. */
+ lo = mid;
+ else
+ /* This and later ranges are definitely out. */
+ hi = mid;
+ }
+ /* 'lo' might be the last that started before target, but 'hi' isn't */
+ if (hi > lo) {
+ /* need to check all range that end after 's' to see if
+ * any are unacknowledged.
+ */
+ while (lo >= 0 &&
+ BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
+ if (BB_OFFSET(p[lo]) < target) {
+ /* starts before the end, and finishes after
+ * the start, so they must overlap
+ */
+ if (rv != -1 && BB_ACK(p[lo]))
+ rv = 1;
+ else
+ rv = -1;
+ *first_bad = BB_OFFSET(p[lo]);
+ *bad_sectors = BB_LEN(p[lo]);
+ }
+ lo--;
+ }
+ }
+
+ if (read_seqretry(&bb->lock, seq))
+ goto retry;
+
+ return rv;
+}
+EXPORT_SYMBOL_GPL(md_is_badblock);
+
+/*
+ * Add a range of bad blocks to the table.
+ * This might extend the table, or might contract it
+ * if two adjacent ranges can be merged.
+ * We binary-search to find the 'insertion' point, then
+ * decide how best to handle it.
+ */
+static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
+ int acknowledged)
+{
+ u64 *p;
+ int lo, hi;
+ int rv = 1;
+
+ if (bb->shift < 0)
+ /* badblocks are disabled */
+ return 0;
+
+ if (bb->shift) {
+ /* round the start down, and the end up */
+ sector_t next = s + sectors;
+ s >>= bb->shift;
+ next += (1<<bb->shift) - 1;
+ next >>= bb->shift;
+ sectors = next - s;
+ }
+
+ write_seqlock_irq(&bb->lock);
+
+ p = bb->page;
+ lo = 0;
+ hi = bb->count;
+ /* Find the last range that starts at-or-before 's' */
+ while (hi - lo > 1) {
+ int mid = (lo + hi) / 2;
+ sector_t a = BB_OFFSET(p[mid]);
+ if (a <= s)
+ lo = mid;
+ else
+ hi = mid;
+ }
+ if (hi > lo && BB_OFFSET(p[lo]) > s)
+ hi = lo;
+
+ if (hi > lo) {
+ /* we found a range that might merge with the start
+ * of our new range
+ */
+ sector_t a = BB_OFFSET(p[lo]);
+ sector_t e = a + BB_LEN(p[lo]);
+ int ack = BB_ACK(p[lo]);
+ if (e >= s) {
+ /* Yes, we can merge with a previous range */
+ if (s == a && s + sectors >= e)
+ /* new range covers old */
+ ack = acknowledged;
+ else
+ ack = ack && acknowledged;
+
+ if (e < s + sectors)
+ e = s + sectors;
+ if (e - a <= BB_MAX_LEN) {
+ p[lo] = BB_MAKE(a, e-a, ack);
+ s = e;
+ } else {
+ /* does not all fit in one range,
+ * make p[lo] maximal
+ */
+ if (BB_LEN(p[lo]) != BB_MAX_LEN)
+ p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
+ s = a + BB_MAX_LEN;
+ }
+ sectors = e - s;
+ }
+ }
+ if (sectors && hi < bb->count) {
+ /* 'hi' points to the first range that starts after 's'.
+ * Maybe we can merge with the start of that range */
+ sector_t a = BB_OFFSET(p[hi]);
+ sector_t e = a + BB_LEN(p[hi]);
+ int ack = BB_ACK(p[hi]);
+ if (a <= s + sectors) {
+ /* merging is possible */
+ if (e <= s + sectors) {
+ /* full overlap */
+ e = s + sectors;
+ ack = acknowledged;
+ } else
+ ack = ack && acknowledged;
+
+ a = s;
+ if (e - a <= BB_MAX_LEN) {
+ p[hi] = BB_MAKE(a, e-a, ack);
+ s = e;
+ } else {
+ p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
+ s = a + BB_MAX_LEN;
+ }
+ sectors = e - s;
+ lo = hi;
+ hi++;
+ }
+ }
+ if (sectors == 0 && hi < bb->count) {
+ /* we might be able to combine lo and hi */
+ /* Note: 's' is at the end of 'lo' */
+ sector_t a = BB_OFFSET(p[hi]);
+ int lolen = BB_LEN(p[lo]);
+ int hilen = BB_LEN(p[hi]);
+ int newlen = lolen + hilen - (s - a);
+ if (s >= a && newlen < BB_MAX_LEN) {
+ /* yes, we can combine them */
+ int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
+ p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
+ memmove(p + hi, p + hi + 1,
+ (bb->count - hi - 1) * 8);
+ bb->count--;
+ }
+ }
+ while (sectors) {
+ /* didn't merge (it all).
+ * Need to add a range just before 'hi' */
+ if (bb->count >= MD_MAX_BADBLOCKS) {
+ /* No room for more */
+ rv = 0;
+ break;
+ } else {
+ int this_sectors = sectors;
+ memmove(p + hi + 1, p + hi,
+ (bb->count - hi) * 8);
+ bb->count++;
+
+ if (this_sectors > BB_MAX_LEN)
+ this_sectors = BB_MAX_LEN;
+ p[hi] = BB_MAKE(s, this_sectors, acknowledged);
+ sectors -= this_sectors;
+ s += this_sectors;
+ }
+ }
+
+ bb->changed = 1;
+ if (!acknowledged)
+ bb->unacked_exist = 1;
+ write_sequnlock_irq(&bb->lock);
+
+ return rv;
+}
+
+int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors,
+ int acknowledged)
+{
+ int rv = md_set_badblocks(&rdev->badblocks,
+ s + rdev->data_offset, sectors, acknowledged);
+ if (rv) {
+ /* Make sure they get written out promptly */
+ set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
+ md_wakeup_thread(rdev->mddev->thread);
+ }
+ return rv;
+}
+EXPORT_SYMBOL_GPL(rdev_set_badblocks);
+
+/*
+ * Remove a range of bad blocks from the table.
+ * This may involve extending the table if we spilt a region,
+ * but it must not fail. So if the table becomes full, we just
+ * drop the remove request.
+ */
+static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
+{
+ u64 *p;
+ int lo, hi;
+ sector_t target = s + sectors;
+ int rv = 0;
+
+ if (bb->shift > 0) {
+ /* When clearing we round the start up and the end down.
+ * This should not matter as the shift should align with
+ * the block size and no rounding should ever be needed.
+ * However it is better the think a block is bad when it
+ * isn't than to think a block is not bad when it is.
+ */
+ s += (1<<bb->shift) - 1;
+ s >>= bb->shift;
+ target >>= bb->shift;
+ sectors = target - s;
+ }
+
+ write_seqlock_irq(&bb->lock);
+
+ p = bb->page;
+ lo = 0;
+ hi = bb->count;
+ /* Find the last range that starts before 'target' */
+ while (hi - lo > 1) {
+ int mid = (lo + hi) / 2;
+ sector_t a = BB_OFFSET(p[mid]);
+ if (a < target)
+ lo = mid;
+ else
+ hi = mid;
+ }
+ if (hi > lo) {
+ /* p[lo] is the last range that could overlap the
+ * current range. Earlier ranges could also overlap,
+ * but only this one can overlap the end of the range.
+ */
+ if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
+ /* Partial overlap, leave the tail of this range */
+ int ack = BB_ACK(p[lo]);
+ sector_t a = BB_OFFSET(p[lo]);
+ sector_t end = a + BB_LEN(p[lo]);
+
+ if (a < s) {
+ /* we need to split this range */
+ if (bb->count >= MD_MAX_BADBLOCKS) {
+ rv = 0;
+ goto out;
+ }
+ memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
+ bb->count++;
+ p[lo] = BB_MAKE(a, s-a, ack);
+ lo++;
+ }
+ p[lo] = BB_MAKE(target, end - target, ack);
+ /* there is no longer an overlap */
+ hi = lo;
+ lo--;
+ }
+ while (lo >= 0 &&
+ BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
+ /* This range does overlap */
+ if (BB_OFFSET(p[lo]) < s) {
+ /* Keep the early parts of this range. */
+ int ack = BB_ACK(p[lo]);
+ sector_t start = BB_OFFSET(p[lo]);
+ p[lo] = BB_MAKE(start, s - start, ack);
+ /* now low doesn't overlap, so.. */
+ break;
+ }
+ lo--;
+ }
+ /* 'lo' is strictly before, 'hi' is strictly after,
+ * anything between needs to be discarded
+ */
+ if (hi - lo > 1) {
+ memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
+ bb->count -= (hi - lo - 1);
+ }
+ }
+
+ bb->changed = 1;
+out:
+ write_sequnlock_irq(&bb->lock);
+ return rv;
+}
+
+int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors)
+{
+ return md_clear_badblocks(&rdev->badblocks,
+ s + rdev->data_offset,
+ sectors);
+}
+EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
+
+/*
+ * Acknowledge all bad blocks in a list.
+ * This only succeeds if ->changed is clear. It is used by
+ * in-kernel metadata updates
+ */
+void md_ack_all_badblocks(struct badblocks *bb)
+{
+ if (bb->page == NULL || bb->changed)
+ /* no point even trying */
+ return;
+ write_seqlock_irq(&bb->lock);
+
+ if (bb->changed == 0) {
+ u64 *p = bb->page;
+ int i;
+ for (i = 0; i < bb->count ; i++) {
+ if (!BB_ACK(p[i])) {
+ sector_t start = BB_OFFSET(p[i]);
+ int len = BB_LEN(p[i]);
+ p[i] = BB_MAKE(start, len, 1);
+ }
+ }
+ bb->unacked_exist = 0;
+ }
+ write_sequnlock_irq(&bb->lock);
+}
+EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
+
+/* sysfs access to bad-blocks list.
+ * We present two files.
+ * 'bad-blocks' lists sector numbers and lengths of ranges that
+ * are recorded as bad. The list is truncated to fit within
+ * the one-page limit of sysfs.
+ * Writing "sector length" to this file adds an acknowledged
+ * bad block list.
+ * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
+ * been acknowledged. Writing to this file adds bad blocks
+ * without acknowledging them. This is largely for testing.
+ */
+
+static ssize_t
+badblocks_show(struct badblocks *bb, char *page, int unack)
+{
+ size_t len;
+ int i;
+ u64 *p = bb->page;
+ unsigned seq;
+
+ if (bb->shift < 0)
+ return 0;
+
+retry:
+ seq = read_seqbegin(&bb->lock);
+
+ len = 0;
+ i = 0;
+
+ while (len < PAGE_SIZE && i < bb->count) {
+ sector_t s = BB_OFFSET(p[i]);
+ unsigned int length = BB_LEN(p[i]);
+ int ack = BB_ACK(p[i]);
+ i++;
+
+ if (unack && ack)
+ continue;
+
+ len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
+ (unsigned long long)s << bb->shift,
+ length << bb->shift);
+ }
+ if (unack && len == 0)
+ bb->unacked_exist = 0;
+
+ if (read_seqretry(&bb->lock, seq))
+ goto retry;
+
+ return len;
+}
+
+#define DO_DEBUG 1
+
+static ssize_t
+badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
+{
+ unsigned long long sector;
+ int length;
+ char newline;
+#ifdef DO_DEBUG
+ /* Allow clearing via sysfs *only* for testing/debugging.
+ * Normally only a successful write may clear a badblock
+ */
+ int clear = 0;
+ if (page[0] == '-') {
+ clear = 1;
+ page++;
+ }
+#endif /* DO_DEBUG */
+
+ switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
+ case 3:
+ if (newline != '\n')
+ return -EINVAL;
+ case 2:
+ if (length <= 0)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+#ifdef DO_DEBUG
+ if (clear) {
+ md_clear_badblocks(bb, sector, length);
+ return len;
+ }
+#endif /* DO_DEBUG */
+ if (md_set_badblocks(bb, sector, length, !unack))
+ return len;
+ else
+ return -ENOSPC;
+}
+
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1c26c7a08ae..1e586bb4452 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -29,6 +29,13 @@
typedef struct mddev_s mddev_t;
typedef struct mdk_rdev_s mdk_rdev_t;
+/* Bad block numbers are stored sorted in a single page.
+ * 64bits is used for each block or extent.
+ * 54 bits are sector number, 9 bits are extent size,
+ * 1 bit is an 'acknowledged' flag.
+ */
+#define MD_MAX_BADBLOCKS (PAGE_SIZE/8)
+
/*
* MD's 'extended' device
*/
@@ -48,7 +55,7 @@ struct mdk_rdev_s
struct block_device *meta_bdev;
struct block_device *bdev; /* block device handle */
- struct page *sb_page;
+ struct page *sb_page, *bb_page;
int sb_loaded;
__u64 sb_events;
sector_t data_offset; /* start of data in array */
@@ -74,9 +81,29 @@ struct mdk_rdev_s
#define In_sync 2 /* device is in_sync with rest of array */
#define WriteMostly 4 /* Avoid reading if at all possible */
#define AutoDetected 7 /* added by auto-detect */
-#define Blocked 8 /* An error occurred on an externally
- * managed array, don't allow writes
+#define Blocked 8 /* An error occurred but has not yet
+ * been acknowledged by the metadata
+ * handler, so don't allow writes
* until it is cleared */
+#define WriteErrorSeen 9 /* A write error has been seen on this
+ * device
+ */
+#define FaultRecorded 10 /* Intermediate state for clearing
+ * Blocked. The Fault is/will-be
+ * recorded in the metadata, but that
+ * metadata hasn't been stored safely
+ * on disk yet.
+ */
+#define BlockedBadBlocks 11 /* A writer is blocked because they
+ * found an unacknowledged bad-block.
+ * This can safely be cleared at any
+ * time, and the writer will re-check.
+ * It may be set at any time, and at
+ * worst the writer will timeout and
+ * re-check. So setting it as
+ * accurately as possible is good, but
+ * not absolutely critical.
+ */
wait_queue_head_t blocked_wait;
int desc_nr; /* descriptor index in the superblock */
@@ -111,8 +138,54 @@ struct mdk_rdev_s
struct sysfs_dirent *sysfs_state; /* handle for 'state'
* sysfs entry */
+
+ struct badblocks {
+ int count; /* count of bad blocks */
+ int unacked_exist; /* there probably are unacknowledged
+ * bad blocks. This is only cleared
+ * when a read discovers none
+ */
+ int shift; /* shift from sectors to block size
+ * a -ve shift means badblocks are
+ * disabled.*/
+ u64 *page; /* badblock list */
+ int changed;
+ seqlock_t lock;
+
+ sector_t sector;
+ sector_t size; /* in sectors */
+ } badblocks;
};
+#define BB_LEN_MASK (0x00000000000001FFULL)
+#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
+#define BB_ACK_MASK (0x8000000000000000ULL)
+#define BB_MAX_LEN 512
+#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
+#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
+#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
+#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
+
+extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
+ sector_t *first_bad, int *bad_sectors);
+static inline int is_badblock(mdk_rdev_t *rdev, sector_t s, int sectors,
+ sector_t *first_bad, int *bad_sectors)
+{
+ if (unlikely(rdev->badblocks.count)) {
+ int rv = md_is_badblock(&rdev->badblocks, rdev->data_offset + s,
+ sectors,
+ first_bad, bad_sectors);
+ if (rv)
+ *first_bad -= rdev->data_offset;
+ return rv;
+ }
+ return 0;
+}
+extern int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors,
+ int acknowledged);
+extern int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors);
+extern void md_ack_all_badblocks(struct badblocks *bb);
+
struct mddev_s
{
void *private;
@@ -239,9 +312,12 @@ struct mddev_s
#define MD_RECOVERY_FROZEN 9
unsigned long recovery;
- int recovery_disabled; /* if we detect that recovery
- * will always fail, set this
- * so we don't loop trying */
+ /* If a RAID personality determines that recovery (of a particular
+ * device) will fail due to a read error on the source device, it
+ * takes a copy of this number and does not attempt recovery again
+ * until this number changes.
+ */
+ int recovery_disabled;
int in_sync; /* know to not need resync */
/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
@@ -304,11 +380,6 @@ struct mddev_s
* hot-adding a bitmap. It should
* eventually be settable by sysfs.
*/
- /* When md is serving under dm, it might use a
- * dirty_log to store the bits.
- */
- struct dm_dirty_log *log;
-
struct mutex mutex;
unsigned long chunksize;
unsigned long daemon_sleep; /* how many jiffies between updates? */
@@ -413,6 +484,20 @@ static inline char * mdname (mddev_t * mddev)
return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
}
+static inline int sysfs_link_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+}
+
+static inline void sysfs_unlink_rdev(mddev_t *mddev, mdk_rdev_t *rdev)
+{
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+}
+
/*
* iterates through some rdev ringlist. It's safe to remove the
* current 'rdev'. Dont touch 'tmp' though.
@@ -505,7 +590,7 @@ extern void mddev_init(mddev_t *mddev);
extern int md_run(mddev_t *mddev);
extern void md_stop(mddev_t *mddev);
extern void md_stop_writes(mddev_t *mddev);
-extern void md_rdev_init(mdk_rdev_t *rdev);
+extern int md_rdev_init(mdk_rdev_t *rdev);
extern void mddev_suspend(mddev_t *mddev);
extern void mddev_resume(mddev_t *mddev);
@@ -514,4 +599,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
mddev_t *mddev);
extern int mddev_check_plugged(mddev_t *mddev);
+extern void md_trim_bio(struct bio *bio, int offset, int size);
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f7431b6d844..32323f0afd8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -35,16 +35,13 @@
#include <linux/delay.h>
#include <linux/blkdev.h>
#include <linux/seq_file.h>
+#include <linux/ratelimit.h>
#include "md.h"
#include "raid1.h"
#include "bitmap.h"
#define DEBUG 0
-#if DEBUG
-#define PRINTK(x...) printk(x)
-#else
-#define PRINTK(x...)
-#endif
+#define PRINTK(x...) do { if (DEBUG) printk(x); } while (0)
/*
* Number of guaranteed r1bios in case of extreme VM load:
@@ -166,7 +163,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
for (i = 0; i < conf->raid_disks; i++) {
struct bio **bio = r1_bio->bios + i;
- if (*bio && *bio != IO_BLOCKED)
+ if (!BIO_SPECIAL(*bio))
bio_put(*bio);
*bio = NULL;
}
@@ -176,12 +173,6 @@ static void free_r1bio(r1bio_t *r1_bio)
{
conf_t *conf = r1_bio->mddev->private;
- /*
- * Wake up any possible resync thread that waits for the device
- * to go idle.
- */
- allow_barrier(conf);
-
put_all_bios(conf, r1_bio);
mempool_free(r1_bio, conf->r1bio_pool);
}
@@ -222,6 +213,33 @@ static void reschedule_retry(r1bio_t *r1_bio)
* operation and are ready to return a success/failure code to the buffer
* cache layer.
*/
+static void call_bio_endio(r1bio_t *r1_bio)
+{
+ struct bio *bio = r1_bio->master_bio;
+ int done;
+ conf_t *conf = r1_bio->mddev->private;
+
+ if (bio->bi_phys_segments) {
+ unsigned long flags;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio->bi_phys_segments--;
+ done = (bio->bi_phys_segments == 0);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ } else
+ done = 1;
+
+ if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ if (done) {
+ bio_endio(bio, 0);
+ /*
+ * Wake up any possible resync thread that waits for the device
+ * to go idle.
+ */
+ allow_barrier(conf);
+ }
+}
+
static void raid_end_bio_io(r1bio_t *r1_bio)
{
struct bio *bio = r1_bio->master_bio;
@@ -234,8 +252,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
(unsigned long long) bio->bi_sector +
(bio->bi_size >> 9) - 1);
- bio_endio(bio,
- test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
+ call_bio_endio(r1_bio);
}
free_r1bio(r1_bio);
}
@@ -287,36 +304,52 @@ static void raid1_end_read_request(struct bio *bio, int error)
* oops, read error:
*/
char b[BDEVNAME_SIZE];
- if (printk_ratelimit())
- printk(KERN_ERR "md/raid1:%s: %s: rescheduling sector %llu\n",
- mdname(conf->mddev),
- bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
+ printk_ratelimited(
+ KERN_ERR "md/raid1:%s: %s: "
+ "rescheduling sector %llu\n",
+ mdname(conf->mddev),
+ bdevname(conf->mirrors[mirror].rdev->bdev,
+ b),
+ (unsigned long long)r1_bio->sector);
+ set_bit(R1BIO_ReadError, &r1_bio->state);
reschedule_retry(r1_bio);
}
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
}
+static void close_write(r1bio_t *r1_bio)
+{
+ /* it really is the end of this request */
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
+ /* free extra copy of the data pages */
+ int i = r1_bio->behind_page_count;
+ while (i--)
+ safe_put_page(r1_bio->behind_bvecs[i].bv_page);
+ kfree(r1_bio->behind_bvecs);
+ r1_bio->behind_bvecs = NULL;
+ }
+ /* clear the bitmap if all writes complete successfully */
+ bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
+ r1_bio->sectors,
+ !test_bit(R1BIO_Degraded, &r1_bio->state),
+ test_bit(R1BIO_BehindIO, &r1_bio->state));
+ md_write_end(r1_bio->mddev);
+}
+
static void r1_bio_write_done(r1bio_t *r1_bio)
{
- if (atomic_dec_and_test(&r1_bio->remaining))
- {
- /* it really is the end of this request */
- if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
- /* free extra copy of the data pages */
- int i = r1_bio->behind_page_count;
- while (i--)
- safe_put_page(r1_bio->behind_pages[i]);
- kfree(r1_bio->behind_pages);
- r1_bio->behind_pages = NULL;
- }
- /* clear the bitmap if all writes complete successfully */
- bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
- r1_bio->sectors,
- !test_bit(R1BIO_Degraded, &r1_bio->state),
- test_bit(R1BIO_BehindIO, &r1_bio->state));
- md_write_end(r1_bio->mddev);
- raid_end_bio_io(r1_bio);
+ if (!atomic_dec_and_test(&r1_bio->remaining))
+ return;
+
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ reschedule_retry(r1_bio);
+ else {
+ close_write(r1_bio);
+ if (test_bit(R1BIO_MadeGood, &r1_bio->state))
+ reschedule_retry(r1_bio);
+ else
+ raid_end_bio_io(r1_bio);
}
}
@@ -336,13 +369,11 @@ static void raid1_end_write_request(struct bio *bio, int error)
/*
* 'one mirror IO has finished' event handler:
*/
- r1_bio->bios[mirror] = NULL;
- to_put = bio;
if (!uptodate) {
- md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
- /* an I/O failed, we can't clear the bitmap */
- set_bit(R1BIO_Degraded, &r1_bio->state);
- } else
+ set_bit(WriteErrorSeen,
+ &conf->mirrors[mirror].rdev->flags);
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ } else {
/*
* Set R1BIO_Uptodate in our master bio, so that we
* will return a good error code for to the higher
@@ -353,8 +384,22 @@ static void raid1_end_write_request(struct bio *bio, int error)
* to user-side. So if something waits for IO, then it
* will wait for the 'master' bio.
*/
+ sector_t first_bad;
+ int bad_sectors;
+
+ r1_bio->bios[mirror] = NULL;
+ to_put = bio;
set_bit(R1BIO_Uptodate, &r1_bio->state);
+ /* Maybe we can clear some bad blocks. */
+ if (is_badblock(conf->mirrors[mirror].rdev,
+ r1_bio->sector, r1_bio->sectors,
+ &first_bad, &bad_sectors)) {
+ r1_bio->bios[mirror] = IO_MADE_GOOD;
+ set_bit(R1BIO_MadeGood, &r1_bio->state);
+ }
+ }
+
update_head_pos(mirror, r1_bio);
if (behind) {
@@ -377,11 +422,13 @@ static void raid1_end_write_request(struct bio *bio, int error)
(unsigned long long) mbio->bi_sector,
(unsigned long long) mbio->bi_sector +
(mbio->bi_size >> 9) - 1);
- bio_endio(mbio, 0);
+ call_bio_endio(r1_bio);
}
}
}
- rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
+ if (r1_bio->bios[mirror] == NULL)
+ rdev_dec_pending(conf->mirrors[mirror].rdev,
+ conf->mddev);
/*
* Let's see if all mirrored write operations have finished
@@ -408,10 +455,11 @@ static void raid1_end_write_request(struct bio *bio, int error)
*
* The rdev for the device selected will have nr_pending incremented.
*/
-static int read_balance(conf_t *conf, r1bio_t *r1_bio)
+static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors)
{
const sector_t this_sector = r1_bio->sector;
- const int sectors = r1_bio->sectors;
+ int sectors;
+ int best_good_sectors;
int start_disk;
int best_disk;
int i;
@@ -426,8 +474,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
* We take the first readable disk when above the resync window.
*/
retry:
+ sectors = r1_bio->sectors;
best_disk = -1;
best_dist = MaxSector;
+ best_good_sectors = 0;
+
if (conf->mddev->recovery_cp < MaxSector &&
(this_sector + sectors >= conf->next_resync)) {
choose_first = 1;
@@ -439,6 +490,9 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
for (i = 0 ; i < conf->raid_disks ; i++) {
sector_t dist;
+ sector_t first_bad;
+ int bad_sectors;
+
int disk = start_disk + i;
if (disk >= conf->raid_disks)
disk -= conf->raid_disks;
@@ -461,6 +515,35 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
/* This is a reasonable device to use. It might
* even be best.
*/
+ if (is_badblock(rdev, this_sector, sectors,
+ &first_bad, &bad_sectors)) {
+ if (best_dist < MaxSector)
+ /* already have a better device */
+ continue;
+ if (first_bad <= this_sector) {
+ /* cannot read here. If this is the 'primary'
+ * device, then we must not read beyond
+ * bad_sectors from another device..
+ */
+ bad_sectors -= (this_sector - first_bad);
+ if (choose_first && sectors > bad_sectors)
+ sectors = bad_sectors;
+ if (best_good_sectors > sectors)
+ best_good_sectors = sectors;
+
+ } else {
+ sector_t good_sectors = first_bad - this_sector;
+ if (good_sectors > best_good_sectors) {
+ best_good_sectors = good_sectors;
+ best_disk = disk;
+ }
+ if (choose_first)
+ break;
+ }
+ continue;
+ } else
+ best_good_sectors = sectors;
+
dist = abs(this_sector - conf->mirrors[disk].head_position);
if (choose_first
/* Don't change to another disk for sequential reads */
@@ -489,10 +572,12 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
rdev_dec_pending(rdev, conf->mddev);
goto retry;
}
+ sectors = best_good_sectors;
conf->next_seq_sect = this_sector + sectors;
conf->last_used = best_disk;
}
rcu_read_unlock();
+ *max_sectors = sectors;
return best_disk;
}
@@ -672,30 +757,31 @@ static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio)
{
int i;
struct bio_vec *bvec;
- struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page*),
+ struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
GFP_NOIO);
- if (unlikely(!pages))
+ if (unlikely(!bvecs))
return;
bio_for_each_segment(bvec, bio, i) {
- pages[i] = alloc_page(GFP_NOIO);
- if (unlikely(!pages[i]))
+ bvecs[i] = *bvec;
+ bvecs[i].bv_page = alloc_page(GFP_NOIO);
+ if (unlikely(!bvecs[i].bv_page))
goto do_sync_io;
- memcpy(kmap(pages[i]) + bvec->bv_offset,
- kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
- kunmap(pages[i]);
+ memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
+ kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
+ kunmap(bvecs[i].bv_page);
kunmap(bvec->bv_page);
}
- r1_bio->behind_pages = pages;
+ r1_bio->behind_bvecs = bvecs;
r1_bio->behind_page_count = bio->bi_vcnt;
set_bit(R1BIO_BehindIO, &r1_bio->state);
return;
do_sync_io:
for (i = 0; i < bio->bi_vcnt; i++)
- if (pages[i])
- put_page(pages[i]);
- kfree(pages);
+ if (bvecs[i].bv_page)
+ put_page(bvecs[i].bv_page);
+ kfree(bvecs);
PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
}
@@ -705,7 +791,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mirror_info_t *mirror;
r1bio_t *r1_bio;
struct bio *read_bio;
- int i, targets = 0, disks;
+ int i, disks;
struct bitmap *bitmap;
unsigned long flags;
const int rw = bio_data_dir(bio);
@@ -713,6 +799,9 @@ static int make_request(mddev_t *mddev, struct bio * bio)
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
mdk_rdev_t *blocked_rdev;
int plugged;
+ int first_clone;
+ int sectors_handled;
+ int max_sectors;
/*
* Register the new request and wait if the reconstruction
@@ -759,11 +848,24 @@ static int make_request(mddev_t *mddev, struct bio * bio)
r1_bio->mddev = mddev;
r1_bio->sector = bio->bi_sector;
+ /* We might need to issue multiple reads to different
+ * devices if there are bad blocks around, so we keep
+ * track of the number of reads in bio->bi_phys_segments.
+ * If this is 0, there is only one r1_bio and no locking
+ * will be needed when requests complete. If it is
+ * non-zero, then it is the number of not-completed requests.
+ */
+ bio->bi_phys_segments = 0;
+ clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+
if (rw == READ) {
/*
* read balancing logic:
*/
- int rdisk = read_balance(conf, r1_bio);
+ int rdisk;
+
+read_again:
+ rdisk = read_balance(conf, r1_bio, &max_sectors);
if (rdisk < 0) {
/* couldn't find anywhere to read from */
@@ -784,6 +886,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
r1_bio->read_disk = rdisk;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector,
+ max_sectors);
r1_bio->bios[rdisk] = read_bio;
@@ -793,16 +897,52 @@ static int make_request(mddev_t *mddev, struct bio * bio)
read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r1_bio;
- generic_make_request(read_bio);
+ if (max_sectors < r1_bio->sectors) {
+ /* could not read all from this device, so we will
+ * need another r1_bio.
+ */
+
+ sectors_handled = (r1_bio->sector + max_sectors
+ - bio->bi_sector);
+ r1_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+ /* Cannot call generic_make_request directly
+ * as that will be queued in __make_request
+ * and subsequent mempool_alloc might block waiting
+ * for it. So hand bio over to raid1d.
+ */
+ reschedule_retry(r1_bio);
+
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+ r1_bio->state = 0;
+ r1_bio->mddev = mddev;
+ r1_bio->sector = bio->bi_sector + sectors_handled;
+ goto read_again;
+ } else
+ generic_make_request(read_bio);
return 0;
}
/*
* WRITE:
*/
- /* first select target devices under spinlock and
+ /* first select target devices under rcu_lock and
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
+ * If there are known/acknowledged bad blocks on any device on
+ * which we have seen a write error, we want to avoid writing those
+ * blocks.
+ * This potentially requires several writes to write around
+ * the bad blocks. Each set of writes gets it's own r1bio
+ * with a set of bios attached.
*/
plugged = mddev_check_plugged(mddev);
@@ -810,6 +950,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
retry_write:
blocked_rdev = NULL;
rcu_read_lock();
+ max_sectors = r1_bio->sectors;
for (i = 0; i < disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
@@ -817,17 +958,56 @@ static int make_request(mddev_t *mddev, struct bio * bio)
blocked_rdev = rdev;
break;
}
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- atomic_inc(&rdev->nr_pending);
- if (test_bit(Faulty, &rdev->flags)) {
+ r1_bio->bios[i] = NULL;
+ if (!rdev || test_bit(Faulty, &rdev->flags)) {
+ set_bit(R1BIO_Degraded, &r1_bio->state);
+ continue;
+ }
+
+ atomic_inc(&rdev->nr_pending);
+ if (test_bit(WriteErrorSeen, &rdev->flags)) {
+ sector_t first_bad;
+ int bad_sectors;
+ int is_bad;
+
+ is_bad = is_badblock(rdev, r1_bio->sector,
+ max_sectors,
+ &first_bad, &bad_sectors);
+ if (is_bad < 0) {
+ /* mustn't write here until the bad block is
+ * acknowledged*/
+ set_bit(BlockedBadBlocks, &rdev->flags);
+ blocked_rdev = rdev;
+ break;
+ }
+ if (is_bad && first_bad <= r1_bio->sector) {
+ /* Cannot write here at all */
+ bad_sectors -= (r1_bio->sector - first_bad);
+ if (bad_sectors < max_sectors)
+ /* mustn't write more than bad_sectors
+ * to other devices yet
+ */
+ max_sectors = bad_sectors;
rdev_dec_pending(rdev, mddev);
- r1_bio->bios[i] = NULL;
- } else {
- r1_bio->bios[i] = bio;
- targets++;
+ /* We don't set R1BIO_Degraded as that
+ * only applies if the disk is
+ * missing, so it might be re-added,
+ * and we want to know to recover this
+ * chunk.
+ * In this case the device is here,
+ * and the fact that this chunk is not
+ * in-sync is recorded in the bad
+ * block log
+ */
+ continue;
}
- } else
- r1_bio->bios[i] = NULL;
+ if (is_bad) {
+ int good_sectors = first_bad - r1_bio->sector;
+ if (good_sectors < max_sectors)
+ max_sectors = good_sectors;
+ }
+ }
+ r1_bio->bios[i] = bio;
}
rcu_read_unlock();
@@ -838,51 +1018,57 @@ static int make_request(mddev_t *mddev, struct bio * bio)
for (j = 0; j < i; j++)
if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
-
+ r1_bio->state = 0;
allow_barrier(conf);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf);
goto retry_write;
}
- BUG_ON(targets == 0); /* we never fail the last device */
-
- if (targets < conf->raid_disks) {
- /* array is degraded, we will not clear the bitmap
- * on I/O completion (see raid1_end_write_request) */
- set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (max_sectors < r1_bio->sectors) {
+ /* We are splitting this write into multiple parts, so
+ * we need to prepare for allocating another r1_bio.
+ */
+ r1_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
}
-
- /* do behind I/O ?
- * Not if there are too many, or cannot allocate memory,
- * or a reader on WriteMostly is waiting for behind writes
- * to flush */
- if (bitmap &&
- (atomic_read(&bitmap->behind_writes)
- < mddev->bitmap_info.max_write_behind) &&
- !waitqueue_active(&bitmap->behind_wait))
- alloc_behind_pages(bio, r1_bio);
+ sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector;
atomic_set(&r1_bio->remaining, 1);
atomic_set(&r1_bio->behind_remaining, 0);
- bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
- test_bit(R1BIO_BehindIO, &r1_bio->state));
+ first_clone = 1;
for (i = 0; i < disks; i++) {
struct bio *mbio;
if (!r1_bio->bios[i])
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- r1_bio->bios[i] = mbio;
-
- mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
- mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
- mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_rw = WRITE | do_flush_fua | do_sync;
- mbio->bi_private = r1_bio;
-
- if (r1_bio->behind_pages) {
+ md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors);
+
+ if (first_clone) {
+ /* do behind I/O ?
+ * Not if there are too many, or cannot
+ * allocate memory, or a reader on WriteMostly
+ * is waiting for behind writes to flush */
+ if (bitmap &&
+ (atomic_read(&bitmap->behind_writes)
+ < mddev->bitmap_info.max_write_behind) &&
+ !waitqueue_active(&bitmap->behind_wait))
+ alloc_behind_pages(mbio, r1_bio);
+
+ bitmap_startwrite(bitmap, r1_bio->sector,
+ r1_bio->sectors,
+ test_bit(R1BIO_BehindIO,
+ &r1_bio->state));
+ first_clone = 0;
+ }
+ if (r1_bio->behind_bvecs) {
struct bio_vec *bvec;
int j;
@@ -894,11 +1080,20 @@ static int make_request(mddev_t *mddev, struct bio * bio)
* them all
*/
__bio_for_each_segment(bvec, mbio, j, 0)
- bvec->bv_page = r1_bio->behind_pages[j];
+ bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
atomic_inc(&r1_bio->behind_remaining);
}
+ r1_bio->bios[i] = mbio;
+
+ mbio->bi_sector = (r1_bio->sector +
+ conf->mirrors[i].rdev->data_offset);
+ mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+ mbio->bi_end_io = raid1_end_write_request;
+ mbio->bi_rw = WRITE | do_flush_fua | do_sync;
+ mbio->bi_private = r1_bio;
+
atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
@@ -909,6 +1104,19 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
+ if (sectors_handled < (bio->bi_size >> 9)) {
+ /* We need another r1_bio. It has already been counted
+ * in bio->bi_phys_segments
+ */
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+ r1_bio->state = 0;
+ r1_bio->mddev = mddev;
+ r1_bio->sector = bio->bi_sector + sectors_handled;
+ goto retry_write;
+ }
+
if (do_sync || !bitmap || !plugged)
md_wakeup_thread(mddev->thread);
@@ -952,9 +1160,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
* However don't try a recovery from this drive as
* it is very likely to fail.
*/
- mddev->recovery_disabled = 1;
+ conf->recovery_disabled = mddev->recovery_disabled;
return;
}
+ set_bit(Blocked, &rdev->flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) {
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
@@ -1027,7 +1236,7 @@ static int raid1_spare_active(mddev_t *mddev)
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
count++;
- sysfs_notify_dirent(rdev->sysfs_state);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
}
}
spin_lock_irqsave(&conf->device_lock, flags);
@@ -1048,6 +1257,9 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
int first = 0;
int last = mddev->raid_disks - 1;
+ if (mddev->recovery_disabled == conf->recovery_disabled)
+ return -EBUSY;
+
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
@@ -1103,7 +1315,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
* is not possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
- !mddev->recovery_disabled &&
+ mddev->recovery_disabled != conf->recovery_disabled &&
mddev->degraded < conf->raid_disks) {
err = -EBUSY;
goto abort;
@@ -1155,6 +1367,8 @@ static void end_sync_write(struct bio *bio, int error)
conf_t *conf = mddev->private;
int i;
int mirror=0;
+ sector_t first_bad;
+ int bad_sectors;
for (i = 0; i < conf->raid_disks; i++)
if (r1_bio->bios[i] == bio) {
@@ -1172,18 +1386,48 @@ static void end_sync_write(struct bio *bio, int error)
s += sync_blocks;
sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0);
- md_error(mddev, conf->mirrors[mirror].rdev);
- }
+ set_bit(WriteErrorSeen,
+ &conf->mirrors[mirror].rdev->flags);
+ set_bit(R1BIO_WriteError, &r1_bio->state);
+ } else if (is_badblock(conf->mirrors[mirror].rdev,
+ r1_bio->sector,
+ r1_bio->sectors,
+ &first_bad, &bad_sectors) &&
+ !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
+ r1_bio->sector,
+ r1_bio->sectors,
+ &first_bad, &bad_sectors)
+ )
+ set_bit(R1BIO_MadeGood, &r1_bio->state);
update_head_pos(mirror, r1_bio);
if (atomic_dec_and_test(&r1_bio->remaining)) {
- sector_t s = r1_bio->sectors;
- put_buf(r1_bio);
- md_done_sync(mddev, s, uptodate);
+ int s = r1_bio->sectors;
+ if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+ test_bit(R1BIO_WriteError, &r1_bio->state))
+ reschedule_retry(r1_bio);
+ else {
+ put_buf(r1_bio);
+ md_done_sync(mddev, s, uptodate);
+ }
}
}
+static int r1_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
+ int sectors, struct page *page, int rw)
+{
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ /* success */
+ return 1;
+ if (rw == WRITE)
+ set_bit(WriteErrorSeen, &rdev->flags);
+ /* need to record an error - either for the block or the device */
+ if (!rdev_set_badblocks(rdev, sector, sectors, 0))
+ md_error(rdev->mddev, rdev);
+ return 0;
+}
+
static int fix_sync_read_error(r1bio_t *r1_bio)
{
/* Try some synchronous reads of other devices to get
@@ -1193,6 +1437,9 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
* We don't need to freeze the array, because being in an
* active sync request, there is no normal IO, and
* no overlapping syncs.
+ * We don't need to check is_badblock() again as we
+ * made sure that anything with a bad block in range
+ * will have bi_end_io clear.
*/
mddev_t *mddev = r1_bio->mddev;
conf_t *conf = mddev->private;
@@ -1217,9 +1464,7 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
* active, and resync is currently active
*/
rdev = conf->mirrors[d].rdev;
- if (sync_page_io(rdev,
- sect,
- s<<9,
+ if (sync_page_io(rdev, sect, s<<9,
bio->bi_io_vec[idx].bv_page,
READ, false)) {
success = 1;
@@ -1233,16 +1478,36 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
if (!success) {
char b[BDEVNAME_SIZE];
- /* Cannot read from anywhere, array is toast */
- md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
+ int abort = 0;
+ /* Cannot read from anywhere, this block is lost.
+ * Record a bad block on each device. If that doesn't
+ * work just disable and interrupt the recovery.
+ * Don't fail devices as that won't really help.
+ */
printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
" for block %llu\n",
mdname(mddev),
bdevname(bio->bi_bdev, b),
(unsigned long long)r1_bio->sector);
- md_done_sync(mddev, r1_bio->sectors, 0);
- put_buf(r1_bio);
- return 0;
+ for (d = 0; d < conf->raid_disks; d++) {
+ rdev = conf->mirrors[d].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ continue;
+ if (!rdev_set_badblocks(rdev, sect, s, 0))
+ abort = 1;
+ }
+ if (abort) {
+ mddev->recovery_disabled = 1;
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ md_done_sync(mddev, r1_bio->sectors, 0);
+ put_buf(r1_bio);
+ return 0;
+ }
+ /* Try next page */
+ sectors -= s;
+ sect += s;
+ idx++;
+ continue;
}
start = d;
@@ -1254,16 +1519,12 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
rdev = conf->mirrors[d].rdev;
- if (sync_page_io(rdev,
- sect,
- s<<9,
- bio->bi_io_vec[idx].bv_page,
- WRITE, false) == 0) {
+ if (r1_sync_page_io(rdev, sect, s,
+ bio->bi_io_vec[idx].bv_page,
+ WRITE) == 0) {
r1_bio->bios[d]->bi_end_io = NULL;
rdev_dec_pending(rdev, mddev);
- md_error(mddev, rdev);
- } else
- atomic_add(s, &rdev->corrected_errors);
+ }
}
d = start;
while (d != r1_bio->read_disk) {
@@ -1273,12 +1534,10 @@ static int fix_sync_read_error(r1bio_t *r1_bio)
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
rdev = conf->mirrors[d].rdev;
- if (sync_page_io(rdev,
- sect,
- s<<9,
- bio->bi_io_vec[idx].bv_page,
- READ, false) == 0)
- md_error(mddev, rdev);
+ if (r1_sync_page_io(rdev, sect, s,
+ bio->bi_io_vec[idx].bv_page,
+ READ) != 0)
+ atomic_add(s, &rdev->corrected_errors);
}
sectors -= s;
sect += s;
@@ -1420,7 +1679,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
*
* 1. Retries failed read operations on working mirrors.
* 2. Updates the raid superblock when problems encounter.
- * 3. Performs writes following reads for array syncronising.
+ * 3. Performs writes following reads for array synchronising.
*/
static void fix_read_error(conf_t *conf, int read_disk,
@@ -1443,9 +1702,14 @@ static void fix_read_error(conf_t *conf, int read_disk,
* which is the thread that might remove
* a device. If raid1d ever becomes multi-threaded....
*/
+ sector_t first_bad;
+ int bad_sectors;
+
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
+ is_badblock(rdev, sect, s,
+ &first_bad, &bad_sectors) == 0 &&
sync_page_io(rdev, sect, s<<9,
conf->tmppage, READ, false))
success = 1;
@@ -1457,8 +1721,10 @@ static void fix_read_error(conf_t *conf, int read_disk,
} while (!success && d != read_disk);
if (!success) {
- /* Cannot read from anywhere -- bye bye array */
- md_error(mddev, conf->mirrors[read_disk].rdev);
+ /* Cannot read from anywhere - mark it bad */
+ mdk_rdev_t *rdev = conf->mirrors[read_disk].rdev;
+ if (!rdev_set_badblocks(rdev, sect, s, 0))
+ md_error(mddev, rdev);
break;
}
/* write it back and re-read */
@@ -1469,13 +1735,9 @@ static void fix_read_error(conf_t *conf, int read_disk,
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
- test_bit(In_sync, &rdev->flags)) {
- if (sync_page_io(rdev, sect, s<<9,
- conf->tmppage, WRITE, false)
- == 0)
- /* Well, this device is dead */
- md_error(mddev, rdev);
- }
+ test_bit(In_sync, &rdev->flags))
+ r1_sync_page_io(rdev, sect, s,
+ conf->tmppage, WRITE);
}
d = start;
while (d != read_disk) {
@@ -1486,12 +1748,8 @@ static void fix_read_error(conf_t *conf, int read_disk,
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags)) {
- if (sync_page_io(rdev, sect, s<<9,
- conf->tmppage, READ, false)
- == 0)
- /* Well, this device is dead */
- md_error(mddev, rdev);
- else {
+ if (r1_sync_page_io(rdev, sect, s,
+ conf->tmppage, READ)) {
atomic_add(s, &rdev->corrected_errors);
printk(KERN_INFO
"md/raid1:%s: read error corrected "
@@ -1508,21 +1766,255 @@ static void fix_read_error(conf_t *conf, int read_disk,
}
}
+static void bi_complete(struct bio *bio, int error)
+{
+ complete((struct completion *)bio->bi_private);
+}
+
+static int submit_bio_wait(int rw, struct bio *bio)
+{
+ struct completion event;
+ rw |= REQ_SYNC;
+
+ init_completion(&event);
+ bio->bi_private = &event;
+ bio->bi_end_io = bi_complete;
+ submit_bio(rw, bio);
+ wait_for_completion(&event);
+
+ return test_bit(BIO_UPTODATE, &bio->bi_flags);
+}
+
+static int narrow_write_error(r1bio_t *r1_bio, int i)
+{
+ mddev_t *mddev = r1_bio->mddev;
+ conf_t *conf = mddev->private;
+ mdk_rdev_t *rdev = conf->mirrors[i].rdev;
+ int vcnt, idx;
+ struct bio_vec *vec;
+
+ /* bio has the data to be written to device 'i' where
+ * we just recently had a write error.
+ * We repeatedly clone the bio and trim down to one block,
+ * then try the write. Where the write fails we record
+ * a bad block.
+ * It is conceivable that the bio doesn't exactly align with
+ * blocks. We must handle this somehow.
+ *
+ * We currently own a reference on the rdev.
+ */
+
+ int block_sectors;
+ sector_t sector;
+ int sectors;
+ int sect_to_write = r1_bio->sectors;
+ int ok = 1;
+
+ if (rdev->badblocks.shift < 0)
+ return 0;
+
+ block_sectors = 1 << rdev->badblocks.shift;
+ sector = r1_bio->sector;
+ sectors = ((sector + block_sectors)
+ & ~(sector_t)(block_sectors - 1))
+ - sector;
+
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
+ vcnt = r1_bio->behind_page_count;
+ vec = r1_bio->behind_bvecs;
+ idx = 0;
+ while (vec[idx].bv_page == NULL)
+ idx++;
+ } else {
+ vcnt = r1_bio->master_bio->bi_vcnt;
+ vec = r1_bio->master_bio->bi_io_vec;
+ idx = r1_bio->master_bio->bi_idx;
+ }
+ while (sect_to_write) {
+ struct bio *wbio;
+ if (sectors > sect_to_write)
+ sectors = sect_to_write;
+ /* Write at 'sector' for 'sectors'*/
+
+ wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
+ memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
+ wbio->bi_sector = r1_bio->sector;
+ wbio->bi_rw = WRITE;
+ wbio->bi_vcnt = vcnt;
+ wbio->bi_size = r1_bio->sectors << 9;
+ wbio->bi_idx = idx;
+
+ md_trim_bio(wbio, sector - r1_bio->sector, sectors);
+ wbio->bi_sector += rdev->data_offset;
+ wbio->bi_bdev = rdev->bdev;
+ if (submit_bio_wait(WRITE, wbio) == 0)
+ /* failure! */
+ ok = rdev_set_badblocks(rdev, sector,
+ sectors, 0)
+ && ok;
+
+ bio_put(wbio);
+ sect_to_write -= sectors;
+ sector += sectors;
+ sectors = block_sectors;
+ }
+ return ok;
+}
+
+static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio)
+{
+ int m;
+ int s = r1_bio->sectors;
+ for (m = 0; m < conf->raid_disks ; m++) {
+ mdk_rdev_t *rdev = conf->mirrors[m].rdev;
+ struct bio *bio = r1_bio->bios[m];
+ if (bio->bi_end_io == NULL)
+ continue;
+ if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+ test_bit(R1BIO_MadeGood, &r1_bio->state)) {
+ rdev_clear_badblocks(rdev, r1_bio->sector, s);
+ }
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+ test_bit(R1BIO_WriteError, &r1_bio->state)) {
+ if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
+ md_error(conf->mddev, rdev);
+ }
+ }
+ put_buf(r1_bio);
+ md_done_sync(conf->mddev, s, 1);
+}
+
+static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio)
+{
+ int m;
+ for (m = 0; m < conf->raid_disks ; m++)
+ if (r1_bio->bios[m] == IO_MADE_GOOD) {
+ mdk_rdev_t *rdev = conf->mirrors[m].rdev;
+ rdev_clear_badblocks(rdev,
+ r1_bio->sector,
+ r1_bio->sectors);
+ rdev_dec_pending(rdev, conf->mddev);
+ } else if (r1_bio->bios[m] != NULL) {
+ /* This drive got a write error. We need to
+ * narrow down and record precise write
+ * errors.
+ */
+ if (!narrow_write_error(r1_bio, m)) {
+ md_error(conf->mddev,
+ conf->mirrors[m].rdev);
+ /* an I/O failed, we can't clear the bitmap */
+ set_bit(R1BIO_Degraded, &r1_bio->state);
+ }
+ rdev_dec_pending(conf->mirrors[m].rdev,
+ conf->mddev);
+ }
+ if (test_bit(R1BIO_WriteError, &r1_bio->state))
+ close_write(r1_bio);
+ raid_end_bio_io(r1_bio);
+}
+
+static void handle_read_error(conf_t *conf, r1bio_t *r1_bio)
+{
+ int disk;
+ int max_sectors;
+ mddev_t *mddev = conf->mddev;
+ struct bio *bio;
+ char b[BDEVNAME_SIZE];
+ mdk_rdev_t *rdev;
+
+ clear_bit(R1BIO_ReadError, &r1_bio->state);
+ /* we got a read error. Maybe the drive is bad. Maybe just
+ * the block and we can fix it.
+ * We freeze all other IO, and try reading the block from
+ * other devices. When we find one, we re-write
+ * and check it that fixes the read error.
+ * This is all done synchronously while the array is
+ * frozen
+ */
+ if (mddev->ro == 0) {
+ freeze_array(conf);
+ fix_read_error(conf, r1_bio->read_disk,
+ r1_bio->sector, r1_bio->sectors);
+ unfreeze_array(conf);
+ } else
+ md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
+
+ bio = r1_bio->bios[r1_bio->read_disk];
+ bdevname(bio->bi_bdev, b);
+read_more:
+ disk = read_balance(conf, r1_bio, &max_sectors);
+ if (disk == -1) {
+ printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
+ " read error for block %llu\n",
+ mdname(mddev), b, (unsigned long long)r1_bio->sector);
+ raid_end_bio_io(r1_bio);
+ } else {
+ const unsigned long do_sync
+ = r1_bio->master_bio->bi_rw & REQ_SYNC;
+ if (bio) {
+ r1_bio->bios[r1_bio->read_disk] =
+ mddev->ro ? IO_BLOCKED : NULL;
+ bio_put(bio);
+ }
+ r1_bio->read_disk = disk;
+ bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
+ md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors);
+ r1_bio->bios[r1_bio->read_disk] = bio;
+ rdev = conf->mirrors[disk].rdev;
+ printk_ratelimited(KERN_ERR
+ "md/raid1:%s: redirecting sector %llu"
+ " to other mirror: %s\n",
+ mdname(mddev),
+ (unsigned long long)r1_bio->sector,
+ bdevname(rdev->bdev, b));
+ bio->bi_sector = r1_bio->sector + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ bio->bi_end_io = raid1_end_read_request;
+ bio->bi_rw = READ | do_sync;
+ bio->bi_private = r1_bio;
+ if (max_sectors < r1_bio->sectors) {
+ /* Drat - have to split this up more */
+ struct bio *mbio = r1_bio->master_bio;
+ int sectors_handled = (r1_bio->sector + max_sectors
+ - mbio->bi_sector);
+ r1_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (mbio->bi_phys_segments == 0)
+ mbio->bi_phys_segments = 2;
+ else
+ mbio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+ generic_make_request(bio);
+ bio = NULL;
+
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+ r1_bio->master_bio = mbio;
+ r1_bio->sectors = (mbio->bi_size >> 9)
+ - sectors_handled;
+ r1_bio->state = 0;
+ set_bit(R1BIO_ReadError, &r1_bio->state);
+ r1_bio->mddev = mddev;
+ r1_bio->sector = mbio->bi_sector + sectors_handled;
+
+ goto read_more;
+ } else
+ generic_make_request(bio);
+ }
+}
+
static void raid1d(mddev_t *mddev)
{
r1bio_t *r1_bio;
- struct bio *bio;
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
- mdk_rdev_t *rdev;
struct blk_plug plug;
md_check_recovery(mddev);
blk_start_plug(&plug);
for (;;) {
- char b[BDEVNAME_SIZE];
if (atomic_read(&mddev->plug_cnt) == 0)
flush_pending_writes(conf);
@@ -1539,62 +2031,26 @@ static void raid1d(mddev_t *mddev)
mddev = r1_bio->mddev;
conf = mddev->private;
- if (test_bit(R1BIO_IsSync, &r1_bio->state))
- sync_request_write(mddev, r1_bio);
- else {
- int disk;
-
- /* we got a read error. Maybe the drive is bad. Maybe just
- * the block and we can fix it.
- * We freeze all other IO, and try reading the block from
- * other devices. When we find one, we re-write
- * and check it that fixes the read error.
- * This is all done synchronously while the array is
- * frozen
+ if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
+ if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+ test_bit(R1BIO_WriteError, &r1_bio->state))
+ handle_sync_write_finished(conf, r1_bio);
+ else
+ sync_request_write(mddev, r1_bio);
+ } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+ test_bit(R1BIO_WriteError, &r1_bio->state))
+ handle_write_finished(conf, r1_bio);
+ else if (test_bit(R1BIO_ReadError, &r1_bio->state))
+ handle_read_error(conf, r1_bio);
+ else
+ /* just a partial read to be scheduled from separate
+ * context
*/
- if (mddev->ro == 0) {
- freeze_array(conf);
- fix_read_error(conf, r1_bio->read_disk,
- r1_bio->sector,
- r1_bio->sectors);
- unfreeze_array(conf);
- } else
- md_error(mddev,
- conf->mirrors[r1_bio->read_disk].rdev);
-
- bio = r1_bio->bios[r1_bio->read_disk];
- if ((disk=read_balance(conf, r1_bio)) == -1) {
- printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev),
- bdevname(bio->bi_bdev,b),
- (unsigned long long)r1_bio->sector);
- raid_end_bio_io(r1_bio);
- } else {
- const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
- r1_bio->bios[r1_bio->read_disk] =
- mddev->ro ? IO_BLOCKED : NULL;
- r1_bio->read_disk = disk;
- bio_put(bio);
- bio = bio_clone_mddev(r1_bio->master_bio,
- GFP_NOIO, mddev);
- r1_bio->bios[r1_bio->read_disk] = bio;
- rdev = conf->mirrors[disk].rdev;
- if (printk_ratelimit())
- printk(KERN_ERR "md/raid1:%s: redirecting sector %llu to"
- " other mirror: %s\n",
- mdname(mddev),
- (unsigned long long)r1_bio->sector,
- bdevname(rdev->bdev,b));
- bio->bi_sector = r1_bio->sector + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
- bio->bi_end_io = raid1_end_read_request;
- bio->bi_rw = READ | do_sync;
- bio->bi_private = r1_bio;
- generic_make_request(bio);
- }
- }
+ generic_make_request(r1_bio->bios[r1_bio->read_disk]);
+
cond_resched();
+ if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ md_check_recovery(mddev);
}
blk_finish_plug(&plug);
}
@@ -1636,6 +2092,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
int write_targets = 0, read_targets = 0;
sector_t sync_blocks;
int still_degraded = 0;
+ int good_sectors = RESYNC_SECTORS;
+ int min_bad = 0; /* number of sectors that are bad in all devices */
if (!conf->r1buf_pool)
if (init_resync(conf))
@@ -1723,36 +2181,89 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev == NULL ||
- test_bit(Faulty, &rdev->flags)) {
+ test_bit(Faulty, &rdev->flags)) {
still_degraded = 1;
- continue;
} else if (!test_bit(In_sync, &rdev->flags)) {
bio->bi_rw = WRITE;
bio->bi_end_io = end_sync_write;
write_targets ++;
} else {
/* may need to read from here */
- bio->bi_rw = READ;
- bio->bi_end_io = end_sync_read;
- if (test_bit(WriteMostly, &rdev->flags)) {
- if (wonly < 0)
- wonly = i;
- } else {
- if (disk < 0)
- disk = i;
+ sector_t first_bad = MaxSector;
+ int bad_sectors;
+
+ if (is_badblock(rdev, sector_nr, good_sectors,
+ &first_bad, &bad_sectors)) {
+ if (first_bad > sector_nr)
+ good_sectors = first_bad - sector_nr;
+ else {
+ bad_sectors -= (sector_nr - first_bad);
+ if (min_bad == 0 ||
+ min_bad > bad_sectors)
+ min_bad = bad_sectors;
+ }
+ }
+ if (sector_nr < first_bad) {
+ if (test_bit(WriteMostly, &rdev->flags)) {
+ if (wonly < 0)
+ wonly = i;
+ } else {
+ if (disk < 0)
+ disk = i;
+ }
+ bio->bi_rw = READ;
+ bio->bi_end_io = end_sync_read;
+ read_targets++;
}
- read_targets++;
}
- atomic_inc(&rdev->nr_pending);
- bio->bi_sector = sector_nr + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
- bio->bi_private = r1_bio;
+ if (bio->bi_end_io) {
+ atomic_inc(&rdev->nr_pending);
+ bio->bi_sector = sector_nr + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ bio->bi_private = r1_bio;
+ }
}
rcu_read_unlock();
if (disk < 0)
disk = wonly;
r1_bio->read_disk = disk;
+ if (read_targets == 0 && min_bad > 0) {
+ /* These sectors are bad on all InSync devices, so we
+ * need to mark them bad on all write targets
+ */
+ int ok = 1;
+ for (i = 0 ; i < conf->raid_disks ; i++)
+ if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
+ mdk_rdev_t *rdev =
+ rcu_dereference(conf->mirrors[i].rdev);
+ ok = rdev_set_badblocks(rdev, sector_nr,
+ min_bad, 0
+ ) && ok;
+ }
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ *skipped = 1;
+ put_buf(r1_bio);
+
+ if (!ok) {
+ /* Cannot record the badblocks, so need to
+ * abort the resync.
+ * If there are multiple read targets, could just
+ * fail the really bad ones ???
+ */
+ conf->recovery_disabled = mddev->recovery_disabled;
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ return 0;
+ } else
+ return min_bad;
+
+ }
+ if (min_bad > 0 && min_bad < good_sectors) {
+ /* only resync enough to reach the next bad->good
+ * transition */
+ good_sectors = min_bad;
+ }
+
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
/* extra read targets are also write targets */
write_targets += read_targets-1;
@@ -1769,6 +2280,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (max_sector > mddev->resync_max)
max_sector = mddev->resync_max; /* Don't do IO beyond here */
+ if (max_sector > sector_nr + good_sectors)
+ max_sector = sector_nr + good_sectors;
nr_sectors = 0;
sync_blocks = 0;
do {
@@ -2154,18 +2667,13 @@ static int raid1_reshape(mddev_t *mddev)
for (d = d2 = 0; d < conf->raid_disks; d++) {
mdk_rdev_t *rdev = conf->mirrors[d].rdev;
if (rdev && rdev->raid_disk != d2) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = d2;
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
- if (sysfs_create_link(&mddev->kobj,
- &rdev->kobj, nm))
+ sysfs_unlink_rdev(mddev, rdev);
+ if (sysfs_link_rdev(mddev, rdev))
printk(KERN_WARNING
- "md/raid1:%s: cannot register "
- "%s\n",
- mdname(mddev), nm);
+ "md/raid1:%s: cannot register rd%d\n",
+ mdname(mddev), rdev->raid_disk);
}
if (rdev)
newmirrors[d2++].rdev = rdev;
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index e743a64fac4..e0d676b4897 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -48,6 +48,12 @@ struct r1_private_data_s {
* (fresh device added).
* Cleared when a sync completes.
*/
+ int recovery_disabled; /* when the same as
+ * mddev->recovery_disabled
+ * we don't allow recovery
+ * to be attempted as we
+ * expect a read error
+ */
wait_queue_head_t wait_barrier;
@@ -95,7 +101,7 @@ struct r1bio_s {
struct list_head retry_list;
/* Next two are only valid when R1BIO_BehindIO is set */
- struct page **behind_pages;
+ struct bio_vec *behind_bvecs;
int behind_page_count;
/*
* if the IO is in WRITE direction, then multiple bios are used.
@@ -110,13 +116,24 @@ struct r1bio_s {
* correct the read error. To keep track of bad blocks on a per-bio
* level, we store IO_BLOCKED in the appropriate 'bios' pointer
*/
-#define IO_BLOCKED ((struct bio*)1)
+#define IO_BLOCKED ((struct bio *)1)
+/* When we successfully write to a known bad-block, we need to remove the
+ * bad-block marking which must be done from process context. So we record
+ * the success by setting bios[n] to IO_MADE_GOOD
+ */
+#define IO_MADE_GOOD ((struct bio *)2)
+
+#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
/* bits for r1bio.state */
#define R1BIO_Uptodate 0
#define R1BIO_IsSync 1
#define R1BIO_Degraded 2
#define R1BIO_BehindIO 3
+/* Set ReadError on bios that experience a readerror so that
+ * raid1d knows what to do with them.
+ */
+#define R1BIO_ReadError 4
/* For write-behind requests, we call bi_end_io when
* the last non-write-behind device completes, providing
* any write was successful. Otherwise we call when
@@ -125,6 +142,11 @@ struct r1bio_s {
* Record that bi_end_io was called with this flag...
*/
#define R1BIO_Returned 6
+/* If a write for this request means we can clear some
+ * known-bad-block records, we set this flag
+ */
+#define R1BIO_MadeGood 7
+#define R1BIO_WriteError 8
extern int md_raid1_congested(mddev_t *mddev, int bits);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6e846688962..8b29cd4f01c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/blkdev.h>
#include <linux/seq_file.h>
+#include <linux/ratelimit.h>
#include "md.h"
#include "raid10.h"
#include "raid0.h"
@@ -123,7 +124,14 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
for (j = 0 ; j < nalloc; j++) {
bio = r10_bio->devs[j].bio;
for (i = 0; i < RESYNC_PAGES; i++) {
- page = alloc_page(gfp_flags);
+ if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
+ &conf->mddev->recovery)) {
+ /* we can share bv_page's during recovery */
+ struct bio *rbio = r10_bio->devs[0].bio;
+ page = rbio->bi_io_vec[i].bv_page;
+ get_page(page);
+ } else
+ page = alloc_page(gfp_flags);
if (unlikely(!page))
goto out_free_pages;
@@ -173,7 +181,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
for (i = 0; i < conf->copies; i++) {
struct bio **bio = & r10_bio->devs[i].bio;
- if (*bio && *bio != IO_BLOCKED)
+ if (!BIO_SPECIAL(*bio))
bio_put(*bio);
*bio = NULL;
}
@@ -183,12 +191,6 @@ static void free_r10bio(r10bio_t *r10_bio)
{
conf_t *conf = r10_bio->mddev->private;
- /*
- * Wake up any possible resync thread that waits for the device
- * to go idle.
- */
- allow_barrier(conf);
-
put_all_bios(conf, r10_bio);
mempool_free(r10_bio, conf->r10bio_pool);
}
@@ -227,9 +229,27 @@ static void reschedule_retry(r10bio_t *r10_bio)
static void raid_end_bio_io(r10bio_t *r10_bio)
{
struct bio *bio = r10_bio->master_bio;
+ int done;
+ conf_t *conf = r10_bio->mddev->private;
- bio_endio(bio,
- test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
+ if (bio->bi_phys_segments) {
+ unsigned long flags;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio->bi_phys_segments--;
+ done = (bio->bi_phys_segments == 0);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ } else
+ done = 1;
+ if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ if (done) {
+ bio_endio(bio, 0);
+ /*
+ * Wake up any possible resync thread that waits for the device
+ * to go idle.
+ */
+ allow_barrier(conf);
+ }
free_r10bio(r10_bio);
}
@@ -244,6 +264,26 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
r10_bio->devs[slot].addr + (r10_bio->sectors);
}
+/*
+ * Find the disk number which triggered given bio
+ */
+static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio,
+ struct bio *bio, int *slotp)
+{
+ int slot;
+
+ for (slot = 0; slot < conf->copies; slot++)
+ if (r10_bio->devs[slot].bio == bio)
+ break;
+
+ BUG_ON(slot == conf->copies);
+ update_head_pos(slot, r10_bio);
+
+ if (slotp)
+ *slotp = slot;
+ return r10_bio->devs[slot].devnum;
+}
+
static void raid10_end_read_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -277,34 +317,45 @@ static void raid10_end_read_request(struct bio *bio, int error)
* oops, read error - keep the refcount on the rdev
*/
char b[BDEVNAME_SIZE];
- if (printk_ratelimit())
- printk(KERN_ERR "md/raid10:%s: %s: rescheduling sector %llu\n",
- mdname(conf->mddev),
- bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
+ printk_ratelimited(KERN_ERR
+ "md/raid10:%s: %s: rescheduling sector %llu\n",
+ mdname(conf->mddev),
+ bdevname(conf->mirrors[dev].rdev->bdev, b),
+ (unsigned long long)r10_bio->sector);
+ set_bit(R10BIO_ReadError, &r10_bio->state);
reschedule_retry(r10_bio);
}
}
+static void close_write(r10bio_t *r10_bio)
+{
+ /* clear the bitmap if all writes complete successfully */
+ bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
+ r10_bio->sectors,
+ !test_bit(R10BIO_Degraded, &r10_bio->state),
+ 0);
+ md_write_end(r10_bio->mddev);
+}
+
static void raid10_end_write_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t *r10_bio = bio->bi_private;
- int slot, dev;
+ int dev;
+ int dec_rdev = 1;
conf_t *conf = r10_bio->mddev->private;
+ int slot;
- for (slot = 0; slot < conf->copies; slot++)
- if (r10_bio->devs[slot].bio == bio)
- break;
- dev = r10_bio->devs[slot].devnum;
+ dev = find_bio_disk(conf, r10_bio, bio, &slot);
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
if (!uptodate) {
- md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
- /* an I/O failed, we can't clear the bitmap */
- set_bit(R10BIO_Degraded, &r10_bio->state);
- } else
+ set_bit(WriteErrorSeen, &conf->mirrors[dev].rdev->flags);
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ dec_rdev = 0;
+ } else {
/*
* Set R10BIO_Uptodate in our master bio, so that
* we will return a good error code for to the higher
@@ -314,9 +365,22 @@ static void raid10_end_write_request(struct bio *bio, int error)
* user-side. So if something waits for IO, then it will
* wait for the 'master' bio.
*/
+ sector_t first_bad;
+ int bad_sectors;
+
set_bit(R10BIO_Uptodate, &r10_bio->state);
- update_head_pos(slot, r10_bio);
+ /* Maybe we can clear some bad blocks. */
+ if (is_badblock(conf->mirrors[dev].rdev,
+ r10_bio->devs[slot].addr,
+ r10_bio->sectors,
+ &first_bad, &bad_sectors)) {
+ bio_put(bio);
+ r10_bio->devs[slot].bio = IO_MADE_GOOD;
+ dec_rdev = 0;
+ set_bit(R10BIO_MadeGood, &r10_bio->state);
+ }
+ }
/*
*
@@ -324,16 +388,18 @@ static void raid10_end_write_request(struct bio *bio, int error)
* already.
*/
if (atomic_dec_and_test(&r10_bio->remaining)) {
- /* clear the bitmap if all writes complete successfully */
- bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
- r10_bio->sectors,
- !test_bit(R10BIO_Degraded, &r10_bio->state),
- 0);
- md_write_end(r10_bio->mddev);
- raid_end_bio_io(r10_bio);
+ if (test_bit(R10BIO_WriteError, &r10_bio->state))
+ reschedule_retry(r10_bio);
+ else {
+ close_write(r10_bio);
+ if (test_bit(R10BIO_MadeGood, &r10_bio->state))
+ reschedule_retry(r10_bio);
+ else
+ raid_end_bio_io(r10_bio);
+ }
}
-
- rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+ if (dec_rdev)
+ rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
}
@@ -484,11 +550,12 @@ static int raid10_mergeable_bvec(struct request_queue *q,
* FIXME: possibly should rethink readbalancing and do it differently
* depending on near_copies / far_copies geometry.
*/
-static int read_balance(conf_t *conf, r10bio_t *r10_bio)
+static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors)
{
const sector_t this_sector = r10_bio->sector;
int disk, slot;
- const int sectors = r10_bio->sectors;
+ int sectors = r10_bio->sectors;
+ int best_good_sectors;
sector_t new_distance, best_dist;
mdk_rdev_t *rdev;
int do_balance;
@@ -497,8 +564,10 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
raid10_find_phys(conf, r10_bio);
rcu_read_lock();
retry:
+ sectors = r10_bio->sectors;
best_slot = -1;
best_dist = MaxSector;
+ best_good_sectors = 0;
do_balance = 1;
/*
* Check if we can balance. We can balance on the whole
@@ -511,6 +580,10 @@ retry:
do_balance = 0;
for (slot = 0; slot < conf->copies ; slot++) {
+ sector_t first_bad;
+ int bad_sectors;
+ sector_t dev_sector;
+
if (r10_bio->devs[slot].bio == IO_BLOCKED)
continue;
disk = r10_bio->devs[slot].devnum;
@@ -520,6 +593,37 @@ retry:
if (!test_bit(In_sync, &rdev->flags))
continue;
+ dev_sector = r10_bio->devs[slot].addr;
+ if (is_badblock(rdev, dev_sector, sectors,
+ &first_bad, &bad_sectors)) {
+ if (best_dist < MaxSector)
+ /* Already have a better slot */
+ continue;
+ if (first_bad <= dev_sector) {
+ /* Cannot read here. If this is the
+ * 'primary' device, then we must not read
+ * beyond 'bad_sectors' from another device.
+ */
+ bad_sectors -= (dev_sector - first_bad);
+ if (!do_balance && sectors > bad_sectors)
+ sectors = bad_sectors;
+ if (best_good_sectors > sectors)
+ best_good_sectors = sectors;
+ } else {
+ sector_t good_sectors =
+ first_bad - dev_sector;
+ if (good_sectors > best_good_sectors) {
+ best_good_sectors = good_sectors;
+ best_slot = slot;
+ }
+ if (!do_balance)
+ /* Must read from here */
+ break;
+ }
+ continue;
+ } else
+ best_good_sectors = sectors;
+
if (!do_balance)
break;
@@ -561,6 +665,7 @@ retry:
} else
disk = -1;
rcu_read_unlock();
+ *max_sectors = best_good_sectors;
return disk;
}
@@ -734,6 +839,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
unsigned long flags;
mdk_rdev_t *blocked_rdev;
int plugged;
+ int sectors_handled;
+ int max_sectors;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
@@ -808,12 +915,26 @@ static int make_request(mddev_t *mddev, struct bio * bio)
r10_bio->sector = bio->bi_sector;
r10_bio->state = 0;
+ /* We might need to issue multiple reads to different
+ * devices if there are bad blocks around, so we keep
+ * track of the number of reads in bio->bi_phys_segments.
+ * If this is 0, there is only one r10_bio and no locking
+ * will be needed when the request completes. If it is
+ * non-zero, then it is the number of not-completed requests.
+ */
+ bio->bi_phys_segments = 0;
+ clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+
if (rw == READ) {
/*
* read balancing logic:
*/
- int disk = read_balance(conf, r10_bio);
- int slot = r10_bio->read_slot;
+ int disk;
+ int slot;
+
+read_again:
+ disk = read_balance(conf, r10_bio, &max_sectors);
+ slot = r10_bio->read_slot;
if (disk < 0) {
raid_end_bio_io(r10_bio);
return 0;
@@ -821,6 +942,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
mirror = conf->mirrors + disk;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
r10_bio->devs[slot].bio = read_bio;
@@ -831,7 +954,37 @@ static int make_request(mddev_t *mddev, struct bio * bio)
read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r10_bio;
- generic_make_request(read_bio);
+ if (max_sectors < r10_bio->sectors) {
+ /* Could not read all from this device, so we will
+ * need another r10_bio.
+ */
+ sectors_handled = (r10_bio->sectors + max_sectors
+ - bio->bi_sector);
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock(&conf->device_lock);
+ /* Cannot call generic_make_request directly
+ * as that will be queued in __generic_make_request
+ * and subsequent mempool_alloc might block
+ * waiting for it. so hand bio over to raid10d.
+ */
+ reschedule_retry(r10_bio);
+
+ r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+ r10_bio->master_bio = bio;
+ r10_bio->sectors = ((bio->bi_size >> 9)
+ - sectors_handled);
+ r10_bio->state = 0;
+ r10_bio->mddev = mddev;
+ r10_bio->sector = bio->bi_sector + sectors_handled;
+ goto read_again;
+ } else
+ generic_make_request(read_bio);
return 0;
}
@@ -841,13 +994,22 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* first select target devices under rcu_lock and
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
+ * If there are known/acknowledged bad blocks on any device
+ * on which we have seen a write error, we want to avoid
+ * writing to those blocks. This potentially requires several
+ * writes to write around the bad blocks. Each set of writes
+ * gets its own r10_bio with a set of bios attached. The number
+ * of r10_bios is recored in bio->bi_phys_segments just as with
+ * the read case.
*/
plugged = mddev_check_plugged(mddev);
raid10_find_phys(conf, r10_bio);
- retry_write:
+retry_write:
blocked_rdev = NULL;
rcu_read_lock();
+ max_sectors = r10_bio->sectors;
+
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
@@ -856,13 +1018,55 @@ static int make_request(mddev_t *mddev, struct bio * bio)
blocked_rdev = rdev;
break;
}
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- atomic_inc(&rdev->nr_pending);
- r10_bio->devs[i].bio = bio;
- } else {
- r10_bio->devs[i].bio = NULL;
+ r10_bio->devs[i].bio = NULL;
+ if (!rdev || test_bit(Faulty, &rdev->flags)) {
set_bit(R10BIO_Degraded, &r10_bio->state);
+ continue;
}
+ if (test_bit(WriteErrorSeen, &rdev->flags)) {
+ sector_t first_bad;
+ sector_t dev_sector = r10_bio->devs[i].addr;
+ int bad_sectors;
+ int is_bad;
+
+ is_bad = is_badblock(rdev, dev_sector,
+ max_sectors,
+ &first_bad, &bad_sectors);
+ if (is_bad < 0) {
+ /* Mustn't write here until the bad block
+ * is acknowledged
+ */
+ atomic_inc(&rdev->nr_pending);
+ set_bit(BlockedBadBlocks, &rdev->flags);
+ blocked_rdev = rdev;
+ break;
+ }
+ if (is_bad && first_bad <= dev_sector) {
+ /* Cannot write here at all */
+ bad_sectors -= (dev_sector - first_bad);
+ if (bad_sectors < max_sectors)
+ /* Mustn't write more than bad_sectors
+ * to other devices yet
+ */
+ max_sectors = bad_sectors;
+ /* We don't set R10BIO_Degraded as that
+ * only applies if the disk is missing,
+ * so it might be re-added, and we want to
+ * know to recover this chunk.
+ * In this case the device is here, and the
+ * fact that this chunk is not in-sync is
+ * recorded in the bad block log.
+ */
+ continue;
+ }
+ if (is_bad) {
+ int good_sectors = first_bad - dev_sector;
+ if (good_sectors < max_sectors)
+ max_sectors = good_sectors;
+ }
+ }
+ r10_bio->devs[i].bio = bio;
+ atomic_inc(&rdev->nr_pending);
}
rcu_read_unlock();
@@ -882,8 +1086,22 @@ static int make_request(mddev_t *mddev, struct bio * bio)
goto retry_write;
}
+ if (max_sectors < r10_bio->sectors) {
+ /* We are splitting this into multiple parts, so
+ * we need to prepare for allocating another r10_bio.
+ */
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+ }
+ sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+
atomic_set(&r10_bio->remaining, 1);
- bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
+ bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
for (i = 0; i < conf->copies; i++) {
struct bio *mbio;
@@ -892,10 +1110,12 @@ static int make_request(mddev_t *mddev, struct bio * bio)
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
r10_bio->devs[i].bio = mbio;
- mbio->bi_sector = r10_bio->devs[i].addr+
- conf->mirrors[d].rdev->data_offset;
+ mbio->bi_sector = (r10_bio->devs[i].addr+
+ conf->mirrors[d].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
mbio->bi_rw = WRITE | do_sync | do_fua;
@@ -920,6 +1140,21 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
+ if (sectors_handled < (bio->bi_size >> 9)) {
+ /* We need another r10_bio. It has already been counted
+ * in bio->bi_phys_segments.
+ */
+ r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+ r10_bio->master_bio = bio;
+ r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+
+ r10_bio->mddev = mddev;
+ r10_bio->sector = bio->bi_sector + sectors_handled;
+ r10_bio->state = 0;
+ goto retry_write;
+ }
+
if (do_sync || !mddev->bitmap || !plugged)
md_wakeup_thread(mddev->thread);
return 0;
@@ -949,6 +1184,30 @@ static void status(struct seq_file *seq, mddev_t *mddev)
seq_printf(seq, "]");
}
+/* check if there are enough drives for
+ * every block to appear on atleast one.
+ * Don't consider the device numbered 'ignore'
+ * as we might be about to remove it.
+ */
+static int enough(conf_t *conf, int ignore)
+{
+ int first = 0;
+
+ do {
+ int n = conf->copies;
+ int cnt = 0;
+ while (n--) {
+ if (conf->mirrors[first].rdev &&
+ first != ignore)
+ cnt++;
+ first = (first+1) % conf->raid_disks;
+ }
+ if (cnt == 0)
+ return 0;
+ } while (first != 0);
+ return 1;
+}
+
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{
char b[BDEVNAME_SIZE];
@@ -961,13 +1220,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
* else mark the drive as failed
*/
if (test_bit(In_sync, &rdev->flags)
- && conf->raid_disks-mddev->degraded == 1)
+ && !enough(conf, rdev->raid_disk))
/*
* Don't fail the drive, just return an IO error.
- * The test should really be more sophisticated than
- * "working_disks == 1", but it isn't critical, and
- * can wait until we do more sophisticated "is the drive
- * really dead" tests...
*/
return;
if (test_and_clear_bit(In_sync, &rdev->flags)) {
@@ -980,6 +1235,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
}
+ set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
printk(KERN_ALERT
@@ -1022,27 +1278,6 @@ static void close_sync(conf_t *conf)
conf->r10buf_pool = NULL;
}
-/* check if there are enough drives for
- * every block to appear on atleast one
- */
-static int enough(conf_t *conf)
-{
- int first = 0;
-
- do {
- int n = conf->copies;
- int cnt = 0;
- while (n--) {
- if (conf->mirrors[first].rdev)
- cnt++;
- first = (first+1) % conf->raid_disks;
- }
- if (cnt == 0)
- return 0;
- } while (first != 0);
- return 1;
-}
-
static int raid10_spare_active(mddev_t *mddev)
{
int i;
@@ -1078,7 +1313,6 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
conf_t *conf = mddev->private;
int err = -EEXIST;
int mirror;
- mirror_info_t *p;
int first = 0;
int last = conf->raid_disks - 1;
@@ -1087,44 +1321,47 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* very different from resync
*/
return -EBUSY;
- if (!enough(conf))
+ if (!enough(conf, -1))
return -EINVAL;
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
- if (rdev->saved_raid_disk >= 0 &&
- rdev->saved_raid_disk >= first &&
+ if (rdev->saved_raid_disk >= first &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
mirror = rdev->saved_raid_disk;
else
mirror = first;
- for ( ; mirror <= last ; mirror++)
- if ( !(p=conf->mirrors+mirror)->rdev) {
-
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- /* as we don't honour merge_bvec_fn, we must
- * never risk violating it, so limit
- * ->max_segments to one lying with a single
- * page, as a one page request is never in
- * violation.
- */
- if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
- blk_queue_max_segments(mddev->queue, 1);
- blk_queue_segment_boundary(mddev->queue,
- PAGE_CACHE_SIZE - 1);
- }
+ for ( ; mirror <= last ; mirror++) {
+ mirror_info_t *p = &conf->mirrors[mirror];
+ if (p->recovery_disabled == mddev->recovery_disabled)
+ continue;
+ if (!p->rdev)
+ continue;
- p->head_position = 0;
- rdev->raid_disk = mirror;
- err = 0;
- if (rdev->saved_raid_disk != mirror)
- conf->fullsync = 1;
- rcu_assign_pointer(p->rdev, rdev);
- break;
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ /* as we don't honour merge_bvec_fn, we must
+ * never risk violating it, so limit
+ * ->max_segments to one lying with a single
+ * page, as a one page request is never in
+ * violation.
+ */
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
}
+ p->head_position = 0;
+ rdev->raid_disk = mirror;
+ err = 0;
+ if (rdev->saved_raid_disk != mirror)
+ conf->fullsync = 1;
+ rcu_assign_pointer(p->rdev, rdev);
+ break;
+ }
+
md_integrity_add_rdev(rdev, mddev);
print_conf(conf);
return err;
@@ -1149,7 +1386,8 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
* is not possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
- enough(conf)) {
+ mddev->recovery_disabled != p->recovery_disabled &&
+ enough(conf, -1)) {
err = -EBUSY;
goto abort;
}
@@ -1174,24 +1412,18 @@ static void end_sync_read(struct bio *bio, int error)
{
r10bio_t *r10_bio = bio->bi_private;
conf_t *conf = r10_bio->mddev->private;
- int i,d;
+ int d;
- for (i=0; i<conf->copies; i++)
- if (r10_bio->devs[i].bio == bio)
- break;
- BUG_ON(i == conf->copies);
- update_head_pos(i, r10_bio);
- d = r10_bio->devs[i].devnum;
+ d = find_bio_disk(conf, r10_bio, bio, NULL);
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
set_bit(R10BIO_Uptodate, &r10_bio->state);
- else {
+ else
+ /* The write handler will notice the lack of
+ * R10BIO_Uptodate and record any errors etc
+ */
atomic_add(r10_bio->sectors,
&conf->mirrors[d].rdev->corrected_errors);
- if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
- md_error(r10_bio->mddev,
- conf->mirrors[d].rdev);
- }
/* for reconstruct, we always reschedule after a read.
* for resync, only after all reads
@@ -1206,40 +1438,60 @@ static void end_sync_read(struct bio *bio, int error)
}
}
-static void end_sync_write(struct bio *bio, int error)
+static void end_sync_request(r10bio_t *r10_bio)
{
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- r10bio_t *r10_bio = bio->bi_private;
mddev_t *mddev = r10_bio->mddev;
- conf_t *conf = mddev->private;
- int i,d;
-
- for (i = 0; i < conf->copies; i++)
- if (r10_bio->devs[i].bio == bio)
- break;
- d = r10_bio->devs[i].devnum;
- if (!uptodate)
- md_error(mddev, conf->mirrors[d].rdev);
-
- update_head_pos(i, r10_bio);
-
- rdev_dec_pending(conf->mirrors[d].rdev, mddev);
while (atomic_dec_and_test(&r10_bio->remaining)) {
if (r10_bio->master_bio == NULL) {
/* the primary of several recovery bios */
sector_t s = r10_bio->sectors;
- put_buf(r10_bio);
+ if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+ test_bit(R10BIO_WriteError, &r10_bio->state))
+ reschedule_retry(r10_bio);
+ else
+ put_buf(r10_bio);
md_done_sync(mddev, s, 1);
break;
} else {
r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
- put_buf(r10_bio);
+ if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+ test_bit(R10BIO_WriteError, &r10_bio->state))
+ reschedule_retry(r10_bio);
+ else
+ put_buf(r10_bio);
r10_bio = r10_bio2;
}
}
}
+static void end_sync_write(struct bio *bio, int error)
+{
+ int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ r10bio_t *r10_bio = bio->bi_private;
+ mddev_t *mddev = r10_bio->mddev;
+ conf_t *conf = mddev->private;
+ int d;
+ sector_t first_bad;
+ int bad_sectors;
+ int slot;
+
+ d = find_bio_disk(conf, r10_bio, bio, &slot);
+
+ if (!uptodate) {
+ set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags);
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ } else if (is_badblock(conf->mirrors[d].rdev,
+ r10_bio->devs[slot].addr,
+ r10_bio->sectors,
+ &first_bad, &bad_sectors))
+ set_bit(R10BIO_MadeGood, &r10_bio->state);
+
+ rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+
+ end_sync_request(r10_bio);
+}
+
/*
* Note: sync and recover and handled very differently for raid10
* This code is for resync.
@@ -1299,11 +1551,12 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
if (j == vcnt)
continue;
mddev->resync_mismatches += r10_bio->sectors;
+ if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ /* Don't fix anything. */
+ continue;
}
- if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
- /* Don't fix anything. */
- continue;
- /* Ok, we need to write this bio
+ /* Ok, we need to write this bio, either to correct an
+ * inconsistency or to correct an unreadable block.
* First we need to fixup bv_offset, bv_len and
* bi_vecs, as the read request might have corrupted these
*/
@@ -1355,32 +1608,107 @@ done:
* The second for writing.
*
*/
+static void fix_recovery_read_error(r10bio_t *r10_bio)
+{
+ /* We got a read error during recovery.
+ * We repeat the read in smaller page-sized sections.
+ * If a read succeeds, write it to the new device or record
+ * a bad block if we cannot.
+ * If a read fails, record a bad block on both old and
+ * new devices.
+ */
+ mddev_t *mddev = r10_bio->mddev;
+ conf_t *conf = mddev->private;
+ struct bio *bio = r10_bio->devs[0].bio;
+ sector_t sect = 0;
+ int sectors = r10_bio->sectors;
+ int idx = 0;
+ int dr = r10_bio->devs[0].devnum;
+ int dw = r10_bio->devs[1].devnum;
+
+ while (sectors) {
+ int s = sectors;
+ mdk_rdev_t *rdev;
+ sector_t addr;
+ int ok;
+
+ if (s > (PAGE_SIZE>>9))
+ s = PAGE_SIZE >> 9;
+
+ rdev = conf->mirrors[dr].rdev;
+ addr = r10_bio->devs[0].addr + sect,
+ ok = sync_page_io(rdev,
+ addr,
+ s << 9,
+ bio->bi_io_vec[idx].bv_page,
+ READ, false);
+ if (ok) {
+ rdev = conf->mirrors[dw].rdev;
+ addr = r10_bio->devs[1].addr + sect;
+ ok = sync_page_io(rdev,
+ addr,
+ s << 9,
+ bio->bi_io_vec[idx].bv_page,
+ WRITE, false);
+ if (!ok)
+ set_bit(WriteErrorSeen, &rdev->flags);
+ }
+ if (!ok) {
+ /* We don't worry if we cannot set a bad block -
+ * it really is bad so there is no loss in not
+ * recording it yet
+ */
+ rdev_set_badblocks(rdev, addr, s, 0);
+
+ if (rdev != conf->mirrors[dw].rdev) {
+ /* need bad block on destination too */
+ mdk_rdev_t *rdev2 = conf->mirrors[dw].rdev;
+ addr = r10_bio->devs[1].addr + sect;
+ ok = rdev_set_badblocks(rdev2, addr, s, 0);
+ if (!ok) {
+ /* just abort the recovery */
+ printk(KERN_NOTICE
+ "md/raid10:%s: recovery aborted"
+ " due to read error\n",
+ mdname(mddev));
+
+ conf->mirrors[dw].recovery_disabled
+ = mddev->recovery_disabled;
+ set_bit(MD_RECOVERY_INTR,
+ &mddev->recovery);
+ break;
+ }
+ }
+ }
+
+ sectors -= s;
+ sect += s;
+ idx++;
+ }
+}
static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
{
conf_t *conf = mddev->private;
- int i, d;
- struct bio *bio, *wbio;
+ int d;
+ struct bio *wbio;
+ if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
+ fix_recovery_read_error(r10_bio);
+ end_sync_request(r10_bio);
+ return;
+ }
- /* move the pages across to the second bio
+ /*
+ * share the pages with the first bio
* and submit the write request
*/
- bio = r10_bio->devs[0].bio;
wbio = r10_bio->devs[1].bio;
- for (i=0; i < wbio->bi_vcnt; i++) {
- struct page *p = bio->bi_io_vec[i].bv_page;
- bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
- wbio->bi_io_vec[i].bv_page = p;
- }
d = r10_bio->devs[1].devnum;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
- if (test_bit(R10BIO_Uptodate, &r10_bio->state))
- generic_make_request(wbio);
- else
- bio_endio(wbio, -EIO);
+ generic_make_request(wbio);
}
@@ -1421,6 +1749,26 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
}
+static int r10_sync_page_io(mdk_rdev_t *rdev, sector_t sector,
+ int sectors, struct page *page, int rw)
+{
+ sector_t first_bad;
+ int bad_sectors;
+
+ if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
+ && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
+ return -1;
+ if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+ /* success */
+ return 1;
+ if (rw == WRITE)
+ set_bit(WriteErrorSeen, &rdev->flags);
+ /* need to record an error - either for the block or the device */
+ if (!rdev_set_badblocks(rdev, sector, sectors, 0))
+ md_error(rdev->mddev, rdev);
+ return 0;
+}
+
/*
* This is a kernel thread which:
*
@@ -1476,10 +1824,15 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
rcu_read_lock();
do {
+ sector_t first_bad;
+ int bad_sectors;
+
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev &&
- test_bit(In_sync, &rdev->flags)) {
+ test_bit(In_sync, &rdev->flags) &&
+ is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
+ &first_bad, &bad_sectors) == 0) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
success = sync_page_io(rdev,
@@ -1499,9 +1852,19 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
rcu_read_unlock();
if (!success) {
- /* Cannot read from anywhere -- bye bye array */
+ /* Cannot read from anywhere, just mark the block
+ * as bad on the first device to discourage future
+ * reads.
+ */
int dn = r10_bio->devs[r10_bio->read_slot].devnum;
- md_error(mddev, conf->mirrors[dn].rdev);
+ rdev = conf->mirrors[dn].rdev;
+
+ if (!rdev_set_badblocks(
+ rdev,
+ r10_bio->devs[r10_bio->read_slot].addr
+ + sect,
+ s, 0))
+ md_error(mddev, rdev);
break;
}
@@ -1516,80 +1879,82 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
sl--;
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
- if (rdev &&
- test_bit(In_sync, &rdev->flags)) {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- atomic_add(s, &rdev->corrected_errors);
- if (sync_page_io(rdev,
- r10_bio->devs[sl].addr +
- sect,
- s<<9, conf->tmppage, WRITE, false)
- == 0) {
- /* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: read correction "
- "write failed"
- " (%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(
- sect + rdev->data_offset),
- bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
- "drive\n",
- mdname(mddev),
- bdevname(rdev->bdev, b));
- md_error(mddev, rdev);
- }
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
+ if (!rdev ||
+ !test_bit(In_sync, &rdev->flags))
+ continue;
+
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ if (r10_sync_page_io(rdev,
+ r10_bio->devs[sl].addr +
+ sect,
+ s<<9, conf->tmppage, WRITE)
+ == 0) {
+ /* Well, this device is dead */
+ printk(KERN_NOTICE
+ "md/raid10:%s: read correction "
+ "write failed"
+ " (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(
+ sect + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing "
+ "drive\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
}
+ rdev_dec_pending(rdev, mddev);
+ rcu_read_lock();
}
sl = start;
while (sl != r10_bio->read_slot) {
+ char b[BDEVNAME_SIZE];
if (sl==0)
sl = conf->copies;
sl--;
d = r10_bio->devs[sl].devnum;
rdev = rcu_dereference(conf->mirrors[d].rdev);
- if (rdev &&
- test_bit(In_sync, &rdev->flags)) {
- char b[BDEVNAME_SIZE];
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- if (sync_page_io(rdev,
- r10_bio->devs[sl].addr +
- sect,
- s<<9, conf->tmppage,
- READ, false) == 0) {
- /* Well, this device is dead */
- printk(KERN_NOTICE
- "md/raid10:%s: unable to read back "
- "corrected sectors"
- " (%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(
- sect + rdev->data_offset),
- bdevname(rdev->bdev, b));
- printk(KERN_NOTICE "md/raid10:%s: %s: failing drive\n",
- mdname(mddev),
- bdevname(rdev->bdev, b));
-
- md_error(mddev, rdev);
- } else {
- printk(KERN_INFO
- "md/raid10:%s: read error corrected"
- " (%d sectors at %llu on %s)\n",
- mdname(mddev), s,
- (unsigned long long)(
- sect + rdev->data_offset),
- bdevname(rdev->bdev, b));
- }
+ if (!rdev ||
+ !test_bit(In_sync, &rdev->flags))
+ continue;
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ switch (r10_sync_page_io(rdev,
+ r10_bio->devs[sl].addr +
+ sect,
+ s<<9, conf->tmppage,
+ READ)) {
+ case 0:
+ /* Well, this device is dead */
+ printk(KERN_NOTICE
+ "md/raid10:%s: unable to read back "
+ "corrected sectors"
+ " (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(
+ sect + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ printk(KERN_NOTICE "md/raid10:%s: %s: failing "
+ "drive\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b));
+ break;
+ case 1:
+ printk(KERN_INFO
+ "md/raid10:%s: read error corrected"
+ " (%d sectors at %llu on %s)\n",
+ mdname(mddev), s,
+ (unsigned long long)(
+ sect + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ atomic_add(s, &rdev->corrected_errors);
}
+
+ rdev_dec_pending(rdev, mddev);
+ rcu_read_lock();
}
rcu_read_unlock();
@@ -1598,21 +1963,254 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
}
}
+static void bi_complete(struct bio *bio, int error)
+{
+ complete((struct completion *)bio->bi_private);
+}
+
+static int submit_bio_wait(int rw, struct bio *bio)
+{
+ struct completion event;
+ rw |= REQ_SYNC;
+
+ init_completion(&event);
+ bio->bi_private = &event;
+ bio->bi_end_io = bi_complete;
+ submit_bio(rw, bio);
+ wait_for_completion(&event);
+
+ return test_bit(BIO_UPTODATE, &bio->bi_flags);
+}
+
+static int narrow_write_error(r10bio_t *r10_bio, int i)
+{
+ struct bio *bio = r10_bio->master_bio;
+ mddev_t *mddev = r10_bio->mddev;
+ conf_t *conf = mddev->private;
+ mdk_rdev_t *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
+ /* bio has the data to be written to slot 'i' where
+ * we just recently had a write error.
+ * We repeatedly clone the bio and trim down to one block,
+ * then try the write. Where the write fails we record
+ * a bad block.
+ * It is conceivable that the bio doesn't exactly align with
+ * blocks. We must handle this.
+ *
+ * We currently own a reference to the rdev.
+ */
+
+ int block_sectors;
+ sector_t sector;
+ int sectors;
+ int sect_to_write = r10_bio->sectors;
+ int ok = 1;
+
+ if (rdev->badblocks.shift < 0)
+ return 0;
+
+ block_sectors = 1 << rdev->badblocks.shift;
+ sector = r10_bio->sector;
+ sectors = ((r10_bio->sector + block_sectors)
+ & ~(sector_t)(block_sectors - 1))
+ - sector;
+
+ while (sect_to_write) {
+ struct bio *wbio;
+ if (sectors > sect_to_write)
+ sectors = sect_to_write;
+ /* Write at 'sector' for 'sectors' */
+ wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(wbio, sector - bio->bi_sector, sectors);
+ wbio->bi_sector = (r10_bio->devs[i].addr+
+ rdev->data_offset+
+ (sector - r10_bio->sector));
+ wbio->bi_bdev = rdev->bdev;
+ if (submit_bio_wait(WRITE, wbio) == 0)
+ /* Failure! */
+ ok = rdev_set_badblocks(rdev, sector,
+ sectors, 0)
+ && ok;
+
+ bio_put(wbio);
+ sect_to_write -= sectors;
+ sector += sectors;
+ sectors = block_sectors;
+ }
+ return ok;
+}
+
+static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
+{
+ int slot = r10_bio->read_slot;
+ int mirror = r10_bio->devs[slot].devnum;
+ struct bio *bio;
+ conf_t *conf = mddev->private;
+ mdk_rdev_t *rdev;
+ char b[BDEVNAME_SIZE];
+ unsigned long do_sync;
+ int max_sectors;
+
+ /* we got a read error. Maybe the drive is bad. Maybe just
+ * the block and we can fix it.
+ * We freeze all other IO, and try reading the block from
+ * other devices. When we find one, we re-write
+ * and check it that fixes the read error.
+ * This is all done synchronously while the array is
+ * frozen.
+ */
+ if (mddev->ro == 0) {
+ freeze_array(conf);
+ fix_read_error(conf, mddev, r10_bio);
+ unfreeze_array(conf);
+ }
+ rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
+
+ bio = r10_bio->devs[slot].bio;
+ bdevname(bio->bi_bdev, b);
+ r10_bio->devs[slot].bio =
+ mddev->ro ? IO_BLOCKED : NULL;
+read_more:
+ mirror = read_balance(conf, r10_bio, &max_sectors);
+ if (mirror == -1) {
+ printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
+ " read error for block %llu\n",
+ mdname(mddev), b,
+ (unsigned long long)r10_bio->sector);
+ raid_end_bio_io(r10_bio);
+ bio_put(bio);
+ return;
+ }
+
+ do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+ if (bio)
+ bio_put(bio);
+ slot = r10_bio->read_slot;
+ rdev = conf->mirrors[mirror].rdev;
+ printk_ratelimited(
+ KERN_ERR
+ "md/raid10:%s: %s: redirecting"
+ "sector %llu to another mirror\n",
+ mdname(mddev),
+ bdevname(rdev->bdev, b),
+ (unsigned long long)r10_bio->sector);
+ bio = bio_clone_mddev(r10_bio->master_bio,
+ GFP_NOIO, mddev);
+ md_trim_bio(bio,
+ r10_bio->sector - bio->bi_sector,
+ max_sectors);
+ r10_bio->devs[slot].bio = bio;
+ bio->bi_sector = r10_bio->devs[slot].addr
+ + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ bio->bi_rw = READ | do_sync;
+ bio->bi_private = r10_bio;
+ bio->bi_end_io = raid10_end_read_request;
+ if (max_sectors < r10_bio->sectors) {
+ /* Drat - have to split this up more */
+ struct bio *mbio = r10_bio->master_bio;
+ int sectors_handled =
+ r10_bio->sector + max_sectors
+ - mbio->bi_sector;
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (mbio->bi_phys_segments == 0)
+ mbio->bi_phys_segments = 2;
+ else
+ mbio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+ generic_make_request(bio);
+ bio = NULL;
+
+ r10_bio = mempool_alloc(conf->r10bio_pool,
+ GFP_NOIO);
+ r10_bio->master_bio = mbio;
+ r10_bio->sectors = (mbio->bi_size >> 9)
+ - sectors_handled;
+ r10_bio->state = 0;
+ set_bit(R10BIO_ReadError,
+ &r10_bio->state);
+ r10_bio->mddev = mddev;
+ r10_bio->sector = mbio->bi_sector
+ + sectors_handled;
+
+ goto read_more;
+ } else
+ generic_make_request(bio);
+}
+
+static void handle_write_completed(conf_t *conf, r10bio_t *r10_bio)
+{
+ /* Some sort of write request has finished and it
+ * succeeded in writing where we thought there was a
+ * bad block. So forget the bad block.
+ * Or possibly if failed and we need to record
+ * a bad block.
+ */
+ int m;
+ mdk_rdev_t *rdev;
+
+ if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
+ test_bit(R10BIO_IsRecover, &r10_bio->state)) {
+ for (m = 0; m < conf->copies; m++) {
+ int dev = r10_bio->devs[m].devnum;
+ rdev = conf->mirrors[dev].rdev;
+ if (r10_bio->devs[m].bio == NULL)
+ continue;
+ if (test_bit(BIO_UPTODATE,
+ &r10_bio->devs[m].bio->bi_flags)) {
+ rdev_clear_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors);
+ } else {
+ if (!rdev_set_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors, 0))
+ md_error(conf->mddev, rdev);
+ }
+ }
+ put_buf(r10_bio);
+ } else {
+ for (m = 0; m < conf->copies; m++) {
+ int dev = r10_bio->devs[m].devnum;
+ struct bio *bio = r10_bio->devs[m].bio;
+ rdev = conf->mirrors[dev].rdev;
+ if (bio == IO_MADE_GOOD) {
+ rdev_clear_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors);
+ rdev_dec_pending(rdev, conf->mddev);
+ } else if (bio != NULL &&
+ !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+ if (!narrow_write_error(r10_bio, m)) {
+ md_error(conf->mddev, rdev);
+ set_bit(R10BIO_Degraded,
+ &r10_bio->state);
+ }
+ rdev_dec_pending(rdev, conf->mddev);
+ }
+ }
+ if (test_bit(R10BIO_WriteError,
+ &r10_bio->state))
+ close_write(r10_bio);
+ raid_end_bio_io(r10_bio);
+ }
+}
+
static void raid10d(mddev_t *mddev)
{
r10bio_t *r10_bio;
- struct bio *bio;
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
- mdk_rdev_t *rdev;
struct blk_plug plug;
md_check_recovery(mddev);
blk_start_plug(&plug);
for (;;) {
- char b[BDEVNAME_SIZE];
flush_pending_writes(conf);
@@ -1628,64 +2226,26 @@ static void raid10d(mddev_t *mddev)
mddev = r10_bio->mddev;
conf = mddev->private;
- if (test_bit(R10BIO_IsSync, &r10_bio->state))
+ if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+ test_bit(R10BIO_WriteError, &r10_bio->state))
+ handle_write_completed(conf, r10_bio);
+ else if (test_bit(R10BIO_IsSync, &r10_bio->state))
sync_request_write(mddev, r10_bio);
else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
recovery_request_write(mddev, r10_bio);
+ else if (test_bit(R10BIO_ReadError, &r10_bio->state))
+ handle_read_error(mddev, r10_bio);
else {
- int slot = r10_bio->read_slot;
- int mirror = r10_bio->devs[slot].devnum;
- /* we got a read error. Maybe the drive is bad. Maybe just
- * the block and we can fix it.
- * We freeze all other IO, and try reading the block from
- * other devices. When we find one, we re-write
- * and check it that fixes the read error.
- * This is all done synchronously while the array is
- * frozen.
+ /* just a partial read to be scheduled from a
+ * separate context
*/
- if (mddev->ro == 0) {
- freeze_array(conf);
- fix_read_error(conf, mddev, r10_bio);
- unfreeze_array(conf);
- }
- rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
-
- bio = r10_bio->devs[slot].bio;
- r10_bio->devs[slot].bio =
- mddev->ro ? IO_BLOCKED : NULL;
- mirror = read_balance(conf, r10_bio);
- if (mirror == -1) {
- printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
- " read error for block %llu\n",
- mdname(mddev),
- bdevname(bio->bi_bdev,b),
- (unsigned long long)r10_bio->sector);
- raid_end_bio_io(r10_bio);
- bio_put(bio);
- } else {
- const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
- bio_put(bio);
- slot = r10_bio->read_slot;
- rdev = conf->mirrors[mirror].rdev;
- if (printk_ratelimit())
- printk(KERN_ERR "md/raid10:%s: %s: redirecting sector %llu to"
- " another mirror\n",
- mdname(mddev),
- bdevname(rdev->bdev,b),
- (unsigned long long)r10_bio->sector);
- bio = bio_clone_mddev(r10_bio->master_bio,
- GFP_NOIO, mddev);
- r10_bio->devs[slot].bio = bio;
- bio->bi_sector = r10_bio->devs[slot].addr
- + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
- bio->bi_rw = READ | do_sync;
- bio->bi_private = r10_bio;
- bio->bi_end_io = raid10_end_read_request;
- generic_make_request(bio);
- }
+ int slot = r10_bio->read_slot;
+ generic_make_request(r10_bio->devs[slot].bio);
}
+
cond_resched();
+ if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ md_check_recovery(mddev);
}
blk_finish_plug(&plug);
}
@@ -1746,7 +2306,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
int i;
int max_sync;
sector_t sync_blocks;
-
sector_t sectors_skipped = 0;
int chunks_skipped = 0;
@@ -1828,7 +2387,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
/* recovery... the complicated one */
- int j, k;
+ int j;
r10_bio = NULL;
for (i=0 ; i<conf->raid_disks; i++) {
@@ -1836,6 +2395,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
r10bio_t *rb2;
sector_t sect;
int must_sync;
+ int any_working;
if (conf->mirrors[i].rdev == NULL ||
test_bit(In_sync, &conf->mirrors[i].rdev->flags))
@@ -1887,19 +2447,42 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
must_sync = bitmap_start_sync(mddev->bitmap, sect,
&sync_blocks, still_degraded);
+ any_working = 0;
for (j=0; j<conf->copies;j++) {
+ int k;
int d = r10_bio->devs[j].devnum;
+ sector_t from_addr, to_addr;
+ mdk_rdev_t *rdev;
+ sector_t sector, first_bad;
+ int bad_sectors;
if (!conf->mirrors[d].rdev ||
!test_bit(In_sync, &conf->mirrors[d].rdev->flags))
continue;
/* This is where we read from */
+ any_working = 1;
+ rdev = conf->mirrors[d].rdev;
+ sector = r10_bio->devs[j].addr;
+
+ if (is_badblock(rdev, sector, max_sync,
+ &first_bad, &bad_sectors)) {
+ if (first_bad > sector)
+ max_sync = first_bad - sector;
+ else {
+ bad_sectors -= (sector
+ - first_bad);
+ if (max_sync > bad_sectors)
+ max_sync = bad_sectors;
+ continue;
+ }
+ }
bio = r10_bio->devs[0].bio;
bio->bi_next = biolist;
biolist = bio;
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
- bio->bi_sector = r10_bio->devs[j].addr +
+ from_addr = r10_bio->devs[j].addr;
+ bio->bi_sector = from_addr +
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
@@ -1916,26 +2499,48 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_write;
bio->bi_rw = WRITE;
- bio->bi_sector = r10_bio->devs[k].addr +
+ to_addr = r10_bio->devs[k].addr;
+ bio->bi_sector = to_addr +
conf->mirrors[i].rdev->data_offset;
bio->bi_bdev = conf->mirrors[i].rdev->bdev;
r10_bio->devs[0].devnum = d;
+ r10_bio->devs[0].addr = from_addr;
r10_bio->devs[1].devnum = i;
+ r10_bio->devs[1].addr = to_addr;
break;
}
if (j == conf->copies) {
- /* Cannot recover, so abort the recovery */
+ /* Cannot recover, so abort the recovery or
+ * record a bad block */
put_buf(r10_bio);
if (rb2)
atomic_dec(&rb2->remaining);
r10_bio = rb2;
- if (!test_and_set_bit(MD_RECOVERY_INTR,
- &mddev->recovery))
- printk(KERN_INFO "md/raid10:%s: insufficient "
- "working devices for recovery.\n",
- mdname(mddev));
+ if (any_working) {
+ /* problem is that there are bad blocks
+ * on other device(s)
+ */
+ int k;
+ for (k = 0; k < conf->copies; k++)
+ if (r10_bio->devs[k].devnum == i)
+ break;
+ if (!rdev_set_badblocks(
+ conf->mirrors[i].rdev,
+ r10_bio->devs[k].addr,
+ max_sync, 0))
+ any_working = 0;
+ }
+ if (!any_working) {
+ if (!test_and_set_bit(MD_RECOVERY_INTR,
+ &mddev->recovery))
+ printk(KERN_INFO "md/raid10:%s: insufficient "
+ "working devices for recovery.\n",
+ mdname(mddev));
+ conf->mirrors[i].recovery_disabled
+ = mddev->recovery_disabled;
+ }
break;
}
}
@@ -1979,12 +2584,28 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
for (i=0; i<conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
+ sector_t first_bad, sector;
+ int bad_sectors;
+
bio = r10_bio->devs[i].bio;
bio->bi_end_io = NULL;
clear_bit(BIO_UPTODATE, &bio->bi_flags);
if (conf->mirrors[d].rdev == NULL ||
test_bit(Faulty, &conf->mirrors[d].rdev->flags))
continue;
+ sector = r10_bio->devs[i].addr;
+ if (is_badblock(conf->mirrors[d].rdev,
+ sector, max_sync,
+ &first_bad, &bad_sectors)) {
+ if (first_bad > sector)
+ max_sync = first_bad - sector;
+ else {
+ bad_sectors -= (sector - first_bad);
+ if (max_sync > bad_sectors)
+ max_sync = max_sync;
+ continue;
+ }
+ }
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
bio->bi_next = biolist;
@@ -1992,7 +2613,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
bio->bi_private = r10_bio;
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
- bio->bi_sector = r10_bio->devs[i].addr +
+ bio->bi_sector = sector +
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
count++;
@@ -2079,7 +2700,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
return sectors_skipped + nr_sectors;
giveup:
/* There is nowhere to write, so all non-sync
- * drives must be failed, so try the next chunk...
+ * drives must be failed or in resync, all drives
+ * have a bad block, so try the next chunk...
*/
if (sector_nr + max_sync < max_sector)
max_sector = sector_nr + max_sync;
@@ -2249,6 +2871,7 @@ static int run(mddev_t *mddev)
(conf->raid_disks / conf->near_copies));
list_for_each_entry(rdev, &mddev->disks, same_set) {
+
disk_idx = rdev->raid_disk;
if (disk_idx >= conf->raid_disks
|| disk_idx < 0)
@@ -2271,7 +2894,7 @@ static int run(mddev_t *mddev)
disk->head_position = 0;
}
/* need to check that every block has at least one working mirror */
- if (!enough(conf)) {
+ if (!enough(conf, -1)) {
printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
mdname(mddev));
goto out_free_conf;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 944b1104d3b..79cb52a0d4a 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -6,6 +6,11 @@ typedef struct mirror_info mirror_info_t;
struct mirror_info {
mdk_rdev_t *rdev;
sector_t head_position;
+ int recovery_disabled; /* matches
+ * mddev->recovery_disabled
+ * when we shouldn't try
+ * recovering this device.
+ */
};
typedef struct r10bio_s r10bio_t;
@@ -113,10 +118,26 @@ struct r10bio_s {
* level, we store IO_BLOCKED in the appropriate 'bios' pointer
*/
#define IO_BLOCKED ((struct bio*)1)
+/* When we successfully write to a known bad-block, we need to remove the
+ * bad-block marking which must be done from process context. So we record
+ * the success by setting devs[n].bio to IO_MADE_GOOD
+ */
+#define IO_MADE_GOOD ((struct bio *)2)
+
+#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
/* bits for r10bio.state */
#define R10BIO_Uptodate 0
#define R10BIO_IsSync 1
#define R10BIO_IsRecover 2
#define R10BIO_Degraded 3
+/* Set ReadError on bios that experience a read error
+ * so that raid10d knows what to do with them.
+ */
+#define R10BIO_ReadError 4
+/* If a write for this request means we can clear some
+ * known-bad-block records, we set this flag.
+ */
+#define R10BIO_MadeGood 5
+#define R10BIO_WriteError 6
#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b72edf35ec5..dbae459fb02 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -51,6 +51,7 @@
#include <linux/seq_file.h>
#include <linux/cpu.h>
#include <linux/slab.h>
+#include <linux/ratelimit.h>
#include "md.h"
#include "raid5.h"
#include "raid0.h"
@@ -96,8 +97,6 @@
#define __inline__
#endif
-#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
-
/*
* We maintain a biased count of active stripes in the bottom 16 bits of
* bi_phys_segments, and a count of processed stripes in the upper 16 bits
@@ -341,7 +340,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
(unsigned long long)sh->sector, i, dev->toread,
dev->read, dev->towrite, dev->written,
test_bit(R5_LOCKED, &dev->flags));
- BUG();
+ WARN_ON(1);
}
dev->flags = 0;
raid5_build_block(sh, i, previous);
@@ -527,6 +526,36 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
+ /* We have already checked bad blocks for reads. Now
+ * need to check for writes.
+ */
+ while ((rw & WRITE) && rdev &&
+ test_bit(WriteErrorSeen, &rdev->flags)) {
+ sector_t first_bad;
+ int bad_sectors;
+ int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
+ &first_bad, &bad_sectors);
+ if (!bad)
+ break;
+
+ if (bad < 0) {
+ set_bit(BlockedBadBlocks, &rdev->flags);
+ if (!conf->mddev->external &&
+ conf->mddev->flags) {
+ /* It is very unlikely, but we might
+ * still need to write out the
+ * bad block log - better give it
+ * a chance*/
+ md_check_recovery(conf->mddev);
+ }
+ md_wait_for_blocked_rdev(rdev, conf->mddev);
+ } else {
+ /* Acknowledged bad block - skip the write */
+ rdev_dec_pending(rdev, conf->mddev);
+ rdev = NULL;
+ }
+ }
+
if (rdev) {
if (s->syncing || s->expanding || s->expanded)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
@@ -548,10 +577,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
- if ((rw & WRITE) &&
- test_bit(R5_ReWrite, &sh->dev[i].flags))
- atomic_add(STRIPE_SECTORS,
- &rdev->corrected_errors);
generic_make_request(bi);
} else {
if (rw & WRITE)
@@ -1020,12 +1045,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
struct bio *wbi;
- spin_lock(&sh->lock);
+ spin_lock_irq(&sh->raid_conf->device_lock);
chosen = dev->towrite;
dev->towrite = NULL;
BUG_ON(dev->written);
wbi = dev->written = chosen;
- spin_unlock(&sh->lock);
+ spin_unlock_irq(&sh->raid_conf->device_lock);
while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
@@ -1315,12 +1340,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
static int grow_one_stripe(raid5_conf_t *conf)
{
struct stripe_head *sh;
- sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
+ sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
if (!sh)
return 0;
- memset(sh, 0, sizeof(*sh) + (conf->pool_size-1)*sizeof(struct r5dev));
+
sh->raid_conf = conf;
- spin_lock_init(&sh->lock);
#ifdef CONFIG_MULTICORE_RAID456
init_waitqueue_head(&sh->ops.wait_for_ops);
#endif
@@ -1435,14 +1459,11 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
return -ENOMEM;
for (i = conf->max_nr_stripes; i; i--) {
- nsh = kmem_cache_alloc(sc, GFP_KERNEL);
+ nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
if (!nsh)
break;
- memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
-
nsh->raid_conf = conf;
- spin_lock_init(&nsh->lock);
#ifdef CONFIG_MULTICORE_RAID456
init_waitqueue_head(&nsh->ops.wait_for_ops);
#endif
@@ -1587,12 +1608,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
rdev = conf->disks[i].rdev;
- printk_rl(KERN_INFO "md/raid:%s: read error corrected"
- " (%lu sectors at %llu on %s)\n",
- mdname(conf->mddev), STRIPE_SECTORS,
- (unsigned long long)(sh->sector
- + rdev->data_offset),
- bdevname(rdev->bdev, b));
+ printk_ratelimited(
+ KERN_INFO
+ "md/raid:%s: read error corrected"
+ " (%lu sectors at %llu on %s)\n",
+ mdname(conf->mddev), STRIPE_SECTORS,
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdevname(rdev->bdev, b));
+ atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
}
@@ -1606,22 +1630,24 @@ static void raid5_end_read_request(struct bio * bi, int error)
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
if (conf->mddev->degraded >= conf->max_degraded)
- printk_rl(KERN_WARNING
- "md/raid:%s: read error not correctable "
- "(sector %llu on %s).\n",
- mdname(conf->mddev),
- (unsigned long long)(sh->sector
- + rdev->data_offset),
- bdn);
+ printk_ratelimited(
+ KERN_WARNING
+ "md/raid:%s: read error not correctable "
+ "(sector %llu on %s).\n",
+ mdname(conf->mddev),
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdn);
else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
/* Oh, no!!! */
- printk_rl(KERN_WARNING
- "md/raid:%s: read error NOT corrected!! "
- "(sector %llu on %s).\n",
- mdname(conf->mddev),
- (unsigned long long)(sh->sector
- + rdev->data_offset),
- bdn);
+ printk_ratelimited(
+ KERN_WARNING
+ "md/raid:%s: read error NOT corrected!! "
+ "(sector %llu on %s).\n",
+ mdname(conf->mddev),
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdn);
else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
printk(KERN_WARNING
@@ -1649,6 +1675,8 @@ static void raid5_end_write_request(struct bio *bi, int error)
raid5_conf_t *conf = sh->raid_conf;
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
+ sector_t first_bad;
+ int bad_sectors;
for (i=0 ; i<disks; i++)
if (bi == &sh->dev[i].req)
@@ -1662,8 +1690,12 @@ static void raid5_end_write_request(struct bio *bi, int error)
return;
}
- if (!uptodate)
- md_error(conf->mddev, conf->disks[i].rdev);
+ if (!uptodate) {
+ set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
+ set_bit(R5_WriteError, &sh->dev[i].flags);
+ } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
+ &first_bad, &bad_sectors))
+ set_bit(R5_MadeGood, &sh->dev[i].flags);
rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
@@ -1710,6 +1742,7 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
}
+ set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
printk(KERN_ALERT
@@ -1760,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
/*
* Select the parity disk based on the user selected algorithm.
*/
- pd_idx = qd_idx = ~0;
+ pd_idx = qd_idx = -1;
switch(conf->level) {
case 4:
pd_idx = data_disks;
@@ -2143,12 +2176,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
raid5_conf_t *conf = sh->raid_conf;
int firstwrite=0;
- pr_debug("adding bh b#%llu to stripe s#%llu\n",
+ pr_debug("adding bi b#%llu to stripe s#%llu\n",
(unsigned long long)bi->bi_sector,
(unsigned long long)sh->sector);
- spin_lock(&sh->lock);
spin_lock_irq(&conf->device_lock);
if (forwrite) {
bip = &sh->dev[dd_idx].towrite;
@@ -2169,19 +2201,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
bi->bi_next = *bip;
*bip = bi;
bi->bi_phys_segments++;
- spin_unlock_irq(&conf->device_lock);
- spin_unlock(&sh->lock);
-
- pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
- (unsigned long long)bi->bi_sector,
- (unsigned long long)sh->sector, dd_idx);
-
- if (conf->mddev->bitmap && firstwrite) {
- bitmap_startwrite(conf->mddev->bitmap, sh->sector,
- STRIPE_SECTORS, 0);
- sh->bm_seq = conf->seq_flush+1;
- set_bit(STRIPE_BIT_DELAY, &sh->state);
- }
if (forwrite) {
/* check if page is covered */
@@ -2196,12 +2215,23 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
}
+ spin_unlock_irq(&conf->device_lock);
+
+ pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
+ (unsigned long long)(*bip)->bi_sector,
+ (unsigned long long)sh->sector, dd_idx);
+
+ if (conf->mddev->bitmap && firstwrite) {
+ bitmap_startwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS, 0);
+ sh->bm_seq = conf->seq_flush+1;
+ set_bit(STRIPE_BIT_DELAY, &sh->state);
+ }
return 1;
overlap:
set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
spin_unlock_irq(&conf->device_lock);
- spin_unlock(&sh->lock);
return 0;
}
@@ -2238,9 +2268,18 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
rcu_read_lock();
rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && test_bit(In_sync, &rdev->flags))
- /* multiple read failures in one stripe */
- md_error(conf->mddev, rdev);
+ atomic_inc(&rdev->nr_pending);
+ else
+ rdev = NULL;
rcu_read_unlock();
+ if (rdev) {
+ if (!rdev_set_badblocks(
+ rdev,
+ sh->sector,
+ STRIPE_SECTORS, 0))
+ md_error(conf->mddev, rdev);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
}
spin_lock_irq(&conf->device_lock);
/* fail all writes first */
@@ -2308,6 +2347,10 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
if (bitmap_end)
bitmap_endwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS, 0, 0);
+ /* If we were in the middle of a write the parity block might
+ * still be locked - so just clear all R5_LOCKED flags
+ */
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
}
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
@@ -2315,109 +2358,73 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
md_wakeup_thread(conf->mddev->thread);
}
-/* fetch_block5 - checks the given member device to see if its data needs
- * to be read or computed to satisfy a request.
- *
- * Returns 1 when no more member devices need to be checked, otherwise returns
- * 0 to tell the loop in handle_stripe_fill5 to continue
- */
-static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
- int disk_idx, int disks)
-{
- struct r5dev *dev = &sh->dev[disk_idx];
- struct r5dev *failed_dev = &sh->dev[s->failed_num];
-
- /* is the data in this block needed, and can we get it? */
- if (!test_bit(R5_LOCKED, &dev->flags) &&
- !test_bit(R5_UPTODATE, &dev->flags) &&
- (dev->toread ||
- (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
- s->syncing || s->expanding ||
- (s->failed &&
- (failed_dev->toread ||
- (failed_dev->towrite &&
- !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
- /* We would like to get this block, possibly by computing it,
- * otherwise read it if the backing disk is insync
- */
- if ((s->uptodate == disks - 1) &&
- (s->failed && disk_idx == s->failed_num)) {
- set_bit(STRIPE_COMPUTE_RUN, &sh->state);
- set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
- set_bit(R5_Wantcompute, &dev->flags);
- sh->ops.target = disk_idx;
- sh->ops.target2 = -1;
- s->req_compute = 1;
- /* Careful: from this point on 'uptodate' is in the eye
- * of raid_run_ops which services 'compute' operations
- * before writes. R5_Wantcompute flags a block that will
- * be R5_UPTODATE by the time it is needed for a
- * subsequent operation.
- */
- s->uptodate++;
- return 1; /* uptodate + compute == disks */
- } else if (test_bit(R5_Insync, &dev->flags)) {
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- s->locked++;
- pr_debug("Reading block %d (sync=%d)\n", disk_idx,
- s->syncing);
- }
- }
-
- return 0;
-}
-
-/**
- * handle_stripe_fill5 - read or compute data to satisfy pending requests.
- */
-static void handle_stripe_fill5(struct stripe_head *sh,
- struct stripe_head_state *s, int disks)
+static void
+handle_failed_sync(raid5_conf_t *conf, struct stripe_head *sh,
+ struct stripe_head_state *s)
{
+ int abort = 0;
int i;
- /* look for blocks to read/compute, skip this if a compute
- * is already in flight, or if the stripe contents are in the
- * midst of changing due to a write
+ md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
+ clear_bit(STRIPE_SYNCING, &sh->state);
+ s->syncing = 0;
+ /* There is nothing more to do for sync/check/repair.
+ * For recover we need to record a bad block on all
+ * non-sync devices, or abort the recovery
*/
- if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
- !sh->reconstruct_state)
- for (i = disks; i--; )
- if (fetch_block5(sh, s, i, disks))
- break;
- set_bit(STRIPE_HANDLE, &sh->state);
+ if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
+ return;
+ /* During recovery devices cannot be removed, so locking and
+ * refcounting of rdevs is not needed
+ */
+ for (i = 0; i < conf->raid_disks; i++) {
+ mdk_rdev_t *rdev = conf->disks[i].rdev;
+ if (!rdev
+ || test_bit(Faulty, &rdev->flags)
+ || test_bit(In_sync, &rdev->flags))
+ continue;
+ if (!rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
+ abort = 1;
+ }
+ if (abort) {
+ conf->recovery_disabled = conf->mddev->recovery_disabled;
+ set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery);
+ }
}
-/* fetch_block6 - checks the given member device to see if its data needs
+/* fetch_block - checks the given member device to see if its data needs
* to be read or computed to satisfy a request.
*
* Returns 1 when no more member devices need to be checked, otherwise returns
- * 0 to tell the loop in handle_stripe_fill6 to continue
+ * 0 to tell the loop in handle_stripe_fill to continue
*/
-static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
- struct r6_state *r6s, int disk_idx, int disks)
+static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
+ int disk_idx, int disks)
{
struct r5dev *dev = &sh->dev[disk_idx];
- struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]],
- &sh->dev[r6s->failed_num[1]] };
+ struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
+ &sh->dev[s->failed_num[1]] };
+ /* is the data in this block needed, and can we get it? */
if (!test_bit(R5_LOCKED, &dev->flags) &&
!test_bit(R5_UPTODATE, &dev->flags) &&
(dev->toread ||
(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
s->syncing || s->expanding ||
- (s->failed >= 1 &&
- (fdev[0]->toread || s->to_write)) ||
- (s->failed >= 2 &&
- (fdev[1]->toread || s->to_write)))) {
+ (s->failed >= 1 && fdev[0]->toread) ||
+ (s->failed >= 2 && fdev[1]->toread) ||
+ (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
+ !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
+ (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
/* we would like to get this block, possibly by computing it,
* otherwise read it if the backing disk is insync
*/
BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
BUG_ON(test_bit(R5_Wantread, &dev->flags));
if ((s->uptodate == disks - 1) &&
- (s->failed && (disk_idx == r6s->failed_num[0] ||
- disk_idx == r6s->failed_num[1]))) {
+ (s->failed && (disk_idx == s->failed_num[0] ||
+ disk_idx == s->failed_num[1]))) {
/* have disk failed, and we're requested to fetch it;
* do compute it
*/
@@ -2429,6 +2436,12 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
sh->ops.target = disk_idx;
sh->ops.target2 = -1; /* no 2nd target */
s->req_compute = 1;
+ /* Careful: from this point on 'uptodate' is in the eye
+ * of raid_run_ops which services 'compute' operations
+ * before writes. R5_Wantcompute flags a block that will
+ * be R5_UPTODATE by the time it is needed for a
+ * subsequent operation.
+ */
s->uptodate++;
return 1;
} else if (s->uptodate == disks-2 && s->failed >= 2) {
@@ -2469,11 +2482,11 @@ static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
}
/**
- * handle_stripe_fill6 - read or compute data to satisfy pending requests.
+ * handle_stripe_fill - read or compute data to satisfy pending requests.
*/
-static void handle_stripe_fill6(struct stripe_head *sh,
- struct stripe_head_state *s, struct r6_state *r6s,
- int disks)
+static void handle_stripe_fill(struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
{
int i;
@@ -2484,7 +2497,7 @@ static void handle_stripe_fill6(struct stripe_head *sh,
if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
!sh->reconstruct_state)
for (i = disks; i--; )
- if (fetch_block6(sh, s, r6s, i, disks))
+ if (fetch_block(sh, s, i, disks))
break;
set_bit(STRIPE_HANDLE, &sh->state);
}
@@ -2540,11 +2553,19 @@ static void handle_stripe_clean_event(raid5_conf_t *conf,
md_wakeup_thread(conf->mddev->thread);
}
-static void handle_stripe_dirtying5(raid5_conf_t *conf,
- struct stripe_head *sh, struct stripe_head_state *s, int disks)
+static void handle_stripe_dirtying(raid5_conf_t *conf,
+ struct stripe_head *sh,
+ struct stripe_head_state *s,
+ int disks)
{
int rmw = 0, rcw = 0, i;
- for (i = disks; i--; ) {
+ if (conf->max_degraded == 2) {
+ /* RAID6 requires 'rcw' in current implementation
+ * Calculate the real rcw later - for now fake it
+ * look like rcw is cheaper
+ */
+ rcw = 1; rmw = 2;
+ } else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
if ((dev->towrite || i == sh->pd_idx) &&
@@ -2591,16 +2612,19 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
}
}
}
- if (rcw <= rmw && rcw > 0)
+ if (rcw <= rmw && rcw > 0) {
/* want reconstruct write, but need to get some data */
+ rcw = 0;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (!test_bit(R5_OVERWRITE, &dev->flags) &&
- i != sh->pd_idx &&
+ i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags)) &&
- test_bit(R5_Insync, &dev->flags)) {
+ test_bit(R5_Wantcompute, &dev->flags))) {
+ rcw++;
+ if (!test_bit(R5_Insync, &dev->flags))
+ continue; /* it's a failed drive */
if (
test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
pr_debug("Read_old block "
@@ -2614,6 +2638,7 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
}
}
}
+ }
/* now if nothing is locked, and if we have enough data,
* we can start a write request
*/
@@ -2630,53 +2655,6 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf,
schedule_reconstruction(sh, s, rcw == 0, 0);
}
-static void handle_stripe_dirtying6(raid5_conf_t *conf,
- struct stripe_head *sh, struct stripe_head_state *s,
- struct r6_state *r6s, int disks)
-{
- int rcw = 0, pd_idx = sh->pd_idx, i;
- int qd_idx = sh->qd_idx;
-
- set_bit(STRIPE_HANDLE, &sh->state);
- for (i = disks; i--; ) {
- struct r5dev *dev = &sh->dev[i];
- /* check if we haven't enough data */
- if (!test_bit(R5_OVERWRITE, &dev->flags) &&
- i != pd_idx && i != qd_idx &&
- !test_bit(R5_LOCKED, &dev->flags) &&
- !(test_bit(R5_UPTODATE, &dev->flags) ||
- test_bit(R5_Wantcompute, &dev->flags))) {
- rcw++;
- if (!test_bit(R5_Insync, &dev->flags))
- continue; /* it's a failed drive */
-
- if (
- test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- pr_debug("Read_old stripe %llu "
- "block %d for Reconstruct\n",
- (unsigned long long)sh->sector, i);
- set_bit(R5_LOCKED, &dev->flags);
- set_bit(R5_Wantread, &dev->flags);
- s->locked++;
- } else {
- pr_debug("Request delayed stripe %llu "
- "block %d for Reconstruct\n",
- (unsigned long long)sh->sector, i);
- set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- }
- }
- }
- /* now if nothing is locked, and if we have enough data, we can start a
- * write request
- */
- if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
- s->locked == 0 && rcw == 0 &&
- !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
- schedule_reconstruction(sh, s, 1, 0);
- }
-}
-
static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
struct stripe_head_state *s, int disks)
{
@@ -2695,7 +2673,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
s->uptodate--;
break;
}
- dev = &sh->dev[s->failed_num];
+ dev = &sh->dev[s->failed_num[0]];
/* fall through */
case check_state_compute_result:
sh->check_state = check_state_idle;
@@ -2767,7 +2745,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
struct stripe_head_state *s,
- struct r6_state *r6s, int disks)
+ int disks)
{
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
@@ -2786,14 +2764,14 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
switch (sh->check_state) {
case check_state_idle:
/* start a new check operation if there are < 2 failures */
- if (s->failed == r6s->q_failed) {
+ if (s->failed == s->q_failed) {
/* The only possible failed device holds Q, so it
* makes sense to check P (If anything else were failed,
* we would have used P to recreate it).
*/
sh->check_state = check_state_run;
}
- if (!r6s->q_failed && s->failed < 2) {
+ if (!s->q_failed && s->failed < 2) {
/* Q is not failed, and we didn't use it to generate
* anything, so it makes sense to check it
*/
@@ -2835,13 +2813,13 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
*/
BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
if (s->failed == 2) {
- dev = &sh->dev[r6s->failed_num[1]];
+ dev = &sh->dev[s->failed_num[1]];
s->locked++;
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
}
if (s->failed >= 1) {
- dev = &sh->dev[r6s->failed_num[0]];
+ dev = &sh->dev[s->failed_num[0]];
s->locked++;
set_bit(R5_LOCKED, &dev->flags);
set_bit(R5_Wantwrite, &dev->flags);
@@ -2928,8 +2906,7 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
}
}
-static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
- struct r6_state *r6s)
+static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh)
{
int i;
@@ -2971,7 +2948,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
for (j = 0; j < conf->raid_disks; j++)
if (j != sh2->pd_idx &&
- (!r6s || j != sh2->qd_idx) &&
+ j != sh2->qd_idx &&
!test_bit(R5_Expanded, &sh2->dev[j].flags))
break;
if (j == conf->raid_disks) {
@@ -3006,43 +2983,35 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
*
*/
-static void handle_stripe5(struct stripe_head *sh)
+static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
{
raid5_conf_t *conf = sh->raid_conf;
- int disks = sh->disks, i;
- struct bio *return_bi = NULL;
- struct stripe_head_state s;
+ int disks = sh->disks;
struct r5dev *dev;
- mdk_rdev_t *blocked_rdev = NULL;
- int prexor;
- int dec_preread_active = 0;
+ int i;
- memset(&s, 0, sizeof(s));
- pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
- "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
- atomic_read(&sh->count), sh->pd_idx, sh->check_state,
- sh->reconstruct_state);
+ memset(s, 0, sizeof(*s));
- spin_lock(&sh->lock);
- clear_bit(STRIPE_HANDLE, &sh->state);
- clear_bit(STRIPE_DELAYED, &sh->state);
-
- s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
- s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
+ s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
+ s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
+ s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
+ s->failed_num[0] = -1;
+ s->failed_num[1] = -1;
/* Now to look around and see what can be done */
rcu_read_lock();
+ spin_lock_irq(&conf->device_lock);
for (i=disks; i--; ) {
mdk_rdev_t *rdev;
+ sector_t first_bad;
+ int bad_sectors;
+ int is_bad = 0;
dev = &sh->dev[i];
- pr_debug("check %d: state 0x%lx toread %p read %p write %p "
- "written %p\n", i, dev->flags, dev->toread, dev->read,
- dev->towrite, dev->written);
-
- /* maybe we can request a biofill operation
+ pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
+ i, dev->flags, dev->toread, dev->towrite, dev->written);
+ /* maybe we can reply to a read
*
* new wantfill requests are only permitted while
* ops_complete_biofill is guaranteed to be inactive
@@ -3052,37 +3021,74 @@ static void handle_stripe5(struct stripe_head *sh)
set_bit(R5_Wantfill, &dev->flags);
/* now count some things */
- if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
- if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
- if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
+ if (test_bit(R5_LOCKED, &dev->flags))
+ s->locked++;
+ if (test_bit(R5_UPTODATE, &dev->flags))
+ s->uptodate++;
+ if (test_bit(R5_Wantcompute, &dev->flags)) {
+ s->compute++;
+ BUG_ON(s->compute > 2);
+ }
if (test_bit(R5_Wantfill, &dev->flags))
- s.to_fill++;
+ s->to_fill++;
else if (dev->toread)
- s.to_read++;
+ s->to_read++;
if (dev->towrite) {
- s.to_write++;
+ s->to_write++;
if (!test_bit(R5_OVERWRITE, &dev->flags))
- s.non_overwrite++;
+ s->non_overwrite++;
}
if (dev->written)
- s.written++;
+ s->written++;
rdev = rcu_dereference(conf->disks[i].rdev);
- if (blocked_rdev == NULL &&
- rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
- blocked_rdev = rdev;
- atomic_inc(&rdev->nr_pending);
+ if (rdev) {
+ is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
+ &first_bad, &bad_sectors);
+ if (s->blocked_rdev == NULL
+ && (test_bit(Blocked, &rdev->flags)
+ || is_bad < 0)) {
+ if (is_bad < 0)
+ set_bit(BlockedBadBlocks,
+ &rdev->flags);
+ s->blocked_rdev = rdev;
+ atomic_inc(&rdev->nr_pending);
+ }
}
clear_bit(R5_Insync, &dev->flags);
if (!rdev)
/* Not in-sync */;
- else if (test_bit(In_sync, &rdev->flags))
+ else if (is_bad) {
+ /* also not in-sync */
+ if (!test_bit(WriteErrorSeen, &rdev->flags)) {
+ /* treat as in-sync, but with a read error
+ * which we can now try to correct
+ */
+ set_bit(R5_Insync, &dev->flags);
+ set_bit(R5_ReadError, &dev->flags);
+ }
+ } else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
else {
- /* could be in-sync depending on recovery/reshape status */
+ /* in sync if before recovery_offset */
if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
set_bit(R5_Insync, &dev->flags);
}
+ if (test_bit(R5_WriteError, &dev->flags)) {
+ clear_bit(R5_Insync, &dev->flags);
+ if (!test_bit(Faulty, &rdev->flags)) {
+ s->handle_bad_blocks = 1;
+ atomic_inc(&rdev->nr_pending);
+ } else
+ clear_bit(R5_WriteError, &dev->flags);
+ }
+ if (test_bit(R5_MadeGood, &dev->flags)) {
+ if (!test_bit(Faulty, &rdev->flags)) {
+ s->handle_bad_blocks = 1;
+ atomic_inc(&rdev->nr_pending);
+ } else
+ clear_bit(R5_MadeGood, &dev->flags);
+ }
if (!test_bit(R5_Insync, &dev->flags)) {
/* The ReadError flag will just be confusing now */
clear_bit(R5_ReadError, &dev->flags);
@@ -3091,313 +3097,60 @@ static void handle_stripe5(struct stripe_head *sh)
if (test_bit(R5_ReadError, &dev->flags))
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(R5_Insync, &dev->flags)) {
- s.failed++;
- s.failed_num = i;
+ if (s->failed < 2)
+ s->failed_num[s->failed] = i;
+ s->failed++;
}
}
+ spin_unlock_irq(&conf->device_lock);
rcu_read_unlock();
-
- if (unlikely(blocked_rdev)) {
- if (s.syncing || s.expanding || s.expanded ||
- s.to_write || s.written) {
- set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
- }
- /* There is nothing for the blocked_rdev to block */
- rdev_dec_pending(blocked_rdev, conf->mddev);
- blocked_rdev = NULL;
- }
-
- if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
- set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
- set_bit(STRIPE_BIOFILL_RUN, &sh->state);
- }
-
- pr_debug("locked=%d uptodate=%d to_read=%d"
- " to_write=%d failed=%d failed_num=%d\n",
- s.locked, s.uptodate, s.to_read, s.to_write,
- s.failed, s.failed_num);
- /* check if the array has lost two devices and, if so, some requests might
- * need to be failed
- */
- if (s.failed > 1 && s.to_read+s.to_write+s.written)
- handle_failed_stripe(conf, sh, &s, disks, &return_bi);
- if (s.failed > 1 && s.syncing) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,0);
- clear_bit(STRIPE_SYNCING, &sh->state);
- s.syncing = 0;
- }
-
- /* might be able to return some write requests if the parity block
- * is safe, or on a failed drive
- */
- dev = &sh->dev[sh->pd_idx];
- if ( s.written &&
- ((test_bit(R5_Insync, &dev->flags) &&
- !test_bit(R5_LOCKED, &dev->flags) &&
- test_bit(R5_UPTODATE, &dev->flags)) ||
- (s.failed == 1 && s.failed_num == sh->pd_idx)))
- handle_stripe_clean_event(conf, sh, disks, &return_bi);
-
- /* Now we might consider reading some blocks, either to check/generate
- * parity, or to satisfy requests
- * or to load a block that is being partially written.
- */
- if (s.to_read || s.non_overwrite ||
- (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
- handle_stripe_fill5(sh, &s, disks);
-
- /* Now we check to see if any write operations have recently
- * completed
- */
- prexor = 0;
- if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
- prexor = 1;
- if (sh->reconstruct_state == reconstruct_state_drain_result ||
- sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
- sh->reconstruct_state = reconstruct_state_idle;
-
- /* All the 'written' buffers and the parity block are ready to
- * be written back to disk
- */
- BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
- for (i = disks; i--; ) {
- dev = &sh->dev[i];
- if (test_bit(R5_LOCKED, &dev->flags) &&
- (i == sh->pd_idx || dev->written)) {
- pr_debug("Writing block %d\n", i);
- set_bit(R5_Wantwrite, &dev->flags);
- if (prexor)
- continue;
- if (!test_bit(R5_Insync, &dev->flags) ||
- (i == sh->pd_idx && s.failed == 0))
- set_bit(STRIPE_INSYNC, &sh->state);
- }
- }
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- dec_preread_active = 1;
- }
-
- /* Now to consider new write requests and what else, if anything
- * should be read. We do not handle new writes when:
- * 1/ A 'write' operation (copy+xor) is already in flight.
- * 2/ A 'check' operation is in flight, as it may clobber the parity
- * block.
- */
- if (s.to_write && !sh->reconstruct_state && !sh->check_state)
- handle_stripe_dirtying5(conf, sh, &s, disks);
-
- /* maybe we need to check and possibly fix the parity for this stripe
- * Any reads will already have been scheduled, so we just see if enough
- * data is available. The parity check is held off while parity
- * dependent operations are in flight.
- */
- if (sh->check_state ||
- (s.syncing && s.locked == 0 &&
- !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
- !test_bit(STRIPE_INSYNC, &sh->state)))
- handle_parity_checks5(conf, sh, &s, disks);
-
- if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,1);
- clear_bit(STRIPE_SYNCING, &sh->state);
- }
-
- /* If the failed drive is just a ReadError, then we might need to progress
- * the repair/check process
- */
- if (s.failed == 1 && !conf->mddev->ro &&
- test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
- && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
- && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
- ) {
- dev = &sh->dev[s.failed_num];
- if (!test_bit(R5_ReWrite, &dev->flags)) {
- set_bit(R5_Wantwrite, &dev->flags);
- set_bit(R5_ReWrite, &dev->flags);
- set_bit(R5_LOCKED, &dev->flags);
- s.locked++;
- } else {
- /* let's read it back */
- set_bit(R5_Wantread, &dev->flags);
- set_bit(R5_LOCKED, &dev->flags);
- s.locked++;
- }
- }
-
- /* Finish reconstruct operations initiated by the expansion process */
- if (sh->reconstruct_state == reconstruct_state_result) {
- struct stripe_head *sh2
- = get_active_stripe(conf, sh->sector, 1, 1, 1);
- if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
- /* sh cannot be written until sh2 has been read.
- * so arrange for sh to be delayed a little
- */
- set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
- &sh2->state))
- atomic_inc(&conf->preread_active_stripes);
- release_stripe(sh2);
- goto unlock;
- }
- if (sh2)
- release_stripe(sh2);
-
- sh->reconstruct_state = reconstruct_state_idle;
- clear_bit(STRIPE_EXPANDING, &sh->state);
- for (i = conf->raid_disks; i--; ) {
- set_bit(R5_Wantwrite, &sh->dev[i].flags);
- set_bit(R5_LOCKED, &sh->dev[i].flags);
- s.locked++;
- }
- }
-
- if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
- !sh->reconstruct_state) {
- /* Need to write out all blocks after computing parity */
- sh->disks = conf->raid_disks;
- stripe_set_idx(sh->sector, conf, 0, sh);
- schedule_reconstruction(sh, &s, 1, 1);
- } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
- clear_bit(STRIPE_EXPAND_READY, &sh->state);
- atomic_dec(&conf->reshape_stripes);
- wake_up(&conf->wait_for_overlap);
- md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
- }
-
- if (s.expanding && s.locked == 0 &&
- !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
- handle_stripe_expansion(conf, sh, NULL);
-
- unlock:
- spin_unlock(&sh->lock);
-
- /* wait for this device to become unblocked */
- if (unlikely(blocked_rdev))
- md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
-
- if (s.ops_request)
- raid_run_ops(sh, s.ops_request);
-
- ops_run_io(sh, &s);
-
- if (dec_preread_active) {
- /* We delay this until after ops_run_io so that if make_request
- * is waiting on a flush, it won't continue until the writes
- * have actually been submitted.
- */
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) <
- IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
- return_io(return_bi);
}
-static void handle_stripe6(struct stripe_head *sh)
+static void handle_stripe(struct stripe_head *sh)
{
+ struct stripe_head_state s;
raid5_conf_t *conf = sh->raid_conf;
+ int i;
+ int prexor;
int disks = sh->disks;
- struct bio *return_bi = NULL;
- int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx;
- struct stripe_head_state s;
- struct r6_state r6s;
- struct r5dev *dev, *pdev, *qdev;
- mdk_rdev_t *blocked_rdev = NULL;
- int dec_preread_active = 0;
+ struct r5dev *pdev, *qdev;
+
+ clear_bit(STRIPE_HANDLE, &sh->state);
+ if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) {
+ /* already being handled, ensure it gets handled
+ * again when current action finishes */
+ set_bit(STRIPE_HANDLE, &sh->state);
+ return;
+ }
+
+ if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
+ set_bit(STRIPE_SYNCING, &sh->state);
+ clear_bit(STRIPE_INSYNC, &sh->state);
+ }
+ clear_bit(STRIPE_DELAYED, &sh->state);
pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
(unsigned long long)sh->sector, sh->state,
- atomic_read(&sh->count), pd_idx, qd_idx,
+ atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
sh->check_state, sh->reconstruct_state);
- memset(&s, 0, sizeof(s));
-
- spin_lock(&sh->lock);
- clear_bit(STRIPE_HANDLE, &sh->state);
- clear_bit(STRIPE_DELAYED, &sh->state);
-
- s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
- s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
- /* Now to look around and see what can be done */
-
- rcu_read_lock();
- for (i=disks; i--; ) {
- mdk_rdev_t *rdev;
- dev = &sh->dev[i];
- pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
- i, dev->flags, dev->toread, dev->towrite, dev->written);
- /* maybe we can reply to a read
- *
- * new wantfill requests are only permitted while
- * ops_complete_biofill is guaranteed to be inactive
- */
- if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
- !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
- set_bit(R5_Wantfill, &dev->flags);
+ analyse_stripe(sh, &s);
- /* now count some things */
- if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
- if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
- if (test_bit(R5_Wantcompute, &dev->flags)) {
- s.compute++;
- BUG_ON(s.compute > 2);
- }
-
- if (test_bit(R5_Wantfill, &dev->flags)) {
- s.to_fill++;
- } else if (dev->toread)
- s.to_read++;
- if (dev->towrite) {
- s.to_write++;
- if (!test_bit(R5_OVERWRITE, &dev->flags))
- s.non_overwrite++;
- }
- if (dev->written)
- s.written++;
- rdev = rcu_dereference(conf->disks[i].rdev);
- if (blocked_rdev == NULL &&
- rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
- blocked_rdev = rdev;
- atomic_inc(&rdev->nr_pending);
- }
- clear_bit(R5_Insync, &dev->flags);
- if (!rdev)
- /* Not in-sync */;
- else if (test_bit(In_sync, &rdev->flags))
- set_bit(R5_Insync, &dev->flags);
- else {
- /* in sync if before recovery_offset */
- if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
- set_bit(R5_Insync, &dev->flags);
- }
- if (!test_bit(R5_Insync, &dev->flags)) {
- /* The ReadError flag will just be confusing now */
- clear_bit(R5_ReadError, &dev->flags);
- clear_bit(R5_ReWrite, &dev->flags);
- }
- if (test_bit(R5_ReadError, &dev->flags))
- clear_bit(R5_Insync, &dev->flags);
- if (!test_bit(R5_Insync, &dev->flags)) {
- if (s.failed < 2)
- r6s.failed_num[s.failed] = i;
- s.failed++;
- }
+ if (s.handle_bad_blocks) {
+ set_bit(STRIPE_HANDLE, &sh->state);
+ goto finish;
}
- rcu_read_unlock();
- if (unlikely(blocked_rdev)) {
+ if (unlikely(s.blocked_rdev)) {
if (s.syncing || s.expanding || s.expanded ||
s.to_write || s.written) {
set_bit(STRIPE_HANDLE, &sh->state);
- goto unlock;
+ goto finish;
}
/* There is nothing for the blocked_rdev to block */
- rdev_dec_pending(blocked_rdev, conf->mddev);
- blocked_rdev = NULL;
+ rdev_dec_pending(s.blocked_rdev, conf->mddev);
+ s.blocked_rdev = NULL;
}
if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
@@ -3408,83 +3161,88 @@ static void handle_stripe6(struct stripe_head *sh)
pr_debug("locked=%d uptodate=%d to_read=%d"
" to_write=%d failed=%d failed_num=%d,%d\n",
s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
- r6s.failed_num[0], r6s.failed_num[1]);
- /* check if the array has lost >2 devices and, if so, some requests
- * might need to be failed
+ s.failed_num[0], s.failed_num[1]);
+ /* check if the array has lost more than max_degraded devices and,
+ * if so, some requests might need to be failed.
*/
- if (s.failed > 2 && s.to_read+s.to_write+s.written)
- handle_failed_stripe(conf, sh, &s, disks, &return_bi);
- if (s.failed > 2 && s.syncing) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,0);
- clear_bit(STRIPE_SYNCING, &sh->state);
- s.syncing = 0;
- }
+ if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written)
+ handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
+ if (s.failed > conf->max_degraded && s.syncing)
+ handle_failed_sync(conf, sh, &s);
/*
* might be able to return some write requests if the parity blocks
* are safe, or on a failed drive
*/
- pdev = &sh->dev[pd_idx];
- r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx)
- || (s.failed >= 2 && r6s.failed_num[1] == pd_idx);
- qdev = &sh->dev[qd_idx];
- r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx)
- || (s.failed >= 2 && r6s.failed_num[1] == qd_idx);
-
- if ( s.written &&
- ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
+ pdev = &sh->dev[sh->pd_idx];
+ s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
+ || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
+ qdev = &sh->dev[sh->qd_idx];
+ s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
+ || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
+ || conf->level < 6;
+
+ if (s.written &&
+ (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
&& !test_bit(R5_LOCKED, &pdev->flags)
&& test_bit(R5_UPTODATE, &pdev->flags)))) &&
- ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
+ (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
&& !test_bit(R5_LOCKED, &qdev->flags)
&& test_bit(R5_UPTODATE, &qdev->flags)))))
- handle_stripe_clean_event(conf, sh, disks, &return_bi);
+ handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
/* Now we might consider reading some blocks, either to check/generate
* parity, or to satisfy requests
* or to load a block that is being partially written.
*/
- if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
- (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
- handle_stripe_fill6(sh, &s, &r6s, disks);
+ if (s.to_read || s.non_overwrite
+ || (conf->level == 6 && s.to_write && s.failed)
+ || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
+ handle_stripe_fill(sh, &s, disks);
/* Now we check to see if any write operations have recently
* completed
*/
- if (sh->reconstruct_state == reconstruct_state_drain_result) {
-
+ prexor = 0;
+ if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
+ prexor = 1;
+ if (sh->reconstruct_state == reconstruct_state_drain_result ||
+ sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
sh->reconstruct_state = reconstruct_state_idle;
- /* All the 'written' buffers and the parity blocks are ready to
+
+ /* All the 'written' buffers and the parity block are ready to
* be written back to disk
*/
BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
- BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags));
+ BUG_ON(sh->qd_idx >= 0 &&
+ !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
for (i = disks; i--; ) {
- dev = &sh->dev[i];
+ struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_LOCKED, &dev->flags) &&
- (i == sh->pd_idx || i == qd_idx ||
- dev->written)) {
+ (i == sh->pd_idx || i == sh->qd_idx ||
+ dev->written)) {
pr_debug("Writing block %d\n", i);
- BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
set_bit(R5_Wantwrite, &dev->flags);
+ if (prexor)
+ continue;
if (!test_bit(R5_Insync, &dev->flags) ||
- ((i == sh->pd_idx || i == qd_idx) &&
- s.failed == 0))
+ ((i == sh->pd_idx || i == sh->qd_idx) &&
+ s.failed == 0))
set_bit(STRIPE_INSYNC, &sh->state);
}
}
if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- dec_preread_active = 1;
+ s.dec_preread_active = 1;
}
/* Now to consider new write requests and what else, if anything
* should be read. We do not handle new writes when:
- * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
+ * 1/ A 'write' operation (copy+xor) is already in flight.
* 2/ A 'check' operation is in flight, as it may clobber the parity
* block.
*/
if (s.to_write && !sh->reconstruct_state && !sh->check_state)
- handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
+ handle_stripe_dirtying(conf, sh, &s, disks);
/* maybe we need to check and possibly fix the parity for this stripe
* Any reads will already have been scheduled, so we just see if enough
@@ -3494,20 +3252,24 @@ static void handle_stripe6(struct stripe_head *sh)
if (sh->check_state ||
(s.syncing && s.locked == 0 &&
!test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
- !test_bit(STRIPE_INSYNC, &sh->state)))
- handle_parity_checks6(conf, sh, &s, &r6s, disks);
+ !test_bit(STRIPE_INSYNC, &sh->state))) {
+ if (conf->level == 6)
+ handle_parity_checks6(conf, sh, &s, disks);
+ else
+ handle_parity_checks5(conf, sh, &s, disks);
+ }
if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
- md_done_sync(conf->mddev, STRIPE_SECTORS,1);
+ md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
clear_bit(STRIPE_SYNCING, &sh->state);
}
/* If the failed drives are just a ReadError, then we might need
* to progress the repair/check process
*/
- if (s.failed <= 2 && !conf->mddev->ro)
+ if (s.failed <= conf->max_degraded && !conf->mddev->ro)
for (i = 0; i < s.failed; i++) {
- dev = &sh->dev[r6s.failed_num[i]];
+ struct r5dev *dev = &sh->dev[s.failed_num[i]];
if (test_bit(R5_ReadError, &dev->flags)
&& !test_bit(R5_LOCKED, &dev->flags)
&& test_bit(R5_UPTODATE, &dev->flags)
@@ -3526,8 +3288,26 @@ static void handle_stripe6(struct stripe_head *sh)
}
}
+
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
+ struct stripe_head *sh_src
+ = get_active_stripe(conf, sh->sector, 1, 1, 1);
+ if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
+ /* sh cannot be written until sh_src has been read.
+ * so arrange for sh to be delayed a little
+ */
+ set_bit(STRIPE_DELAYED, &sh->state);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
+ &sh_src->state))
+ atomic_inc(&conf->preread_active_stripes);
+ release_stripe(sh_src);
+ goto finish;
+ }
+ if (sh_src)
+ release_stripe(sh_src);
+
sh->reconstruct_state = reconstruct_state_idle;
clear_bit(STRIPE_EXPANDING, &sh->state);
for (i = conf->raid_disks; i--; ) {
@@ -3539,24 +3319,7 @@ static void handle_stripe6(struct stripe_head *sh)
if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
!sh->reconstruct_state) {
- struct stripe_head *sh2
- = get_active_stripe(conf, sh->sector, 1, 1, 1);
- if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
- /* sh cannot be written until sh2 has been read.
- * so arrange for sh to be delayed a little
- */
- set_bit(STRIPE_DELAYED, &sh->state);
- set_bit(STRIPE_HANDLE, &sh->state);
- if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
- &sh2->state))
- atomic_inc(&conf->preread_active_stripes);
- release_stripe(sh2);
- goto unlock;
- }
- if (sh2)
- release_stripe(sh2);
-
- /* Need to write out all blocks after computing P&Q */
+ /* Need to write out all blocks after computing parity */
sh->disks = conf->raid_disks;
stripe_set_idx(sh->sector, conf, 0, sh);
schedule_reconstruction(sh, &s, 1, 1);
@@ -3569,22 +3332,39 @@ static void handle_stripe6(struct stripe_head *sh)
if (s.expanding && s.locked == 0 &&
!test_bit(STRIPE_COMPUTE_RUN, &sh->state))
- handle_stripe_expansion(conf, sh, &r6s);
-
- unlock:
- spin_unlock(&sh->lock);
+ handle_stripe_expansion(conf, sh);
+finish:
/* wait for this device to become unblocked */
- if (unlikely(blocked_rdev))
- md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
+ if (unlikely(s.blocked_rdev))
+ md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+
+ if (s.handle_bad_blocks)
+ for (i = disks; i--; ) {
+ mdk_rdev_t *rdev;
+ struct r5dev *dev = &sh->dev[i];
+ if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
+ /* We own a safe reference to the rdev */
+ rdev = conf->disks[i].rdev;
+ if (!rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
+ md_error(conf->mddev, rdev);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
+ if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
+ rdev = conf->disks[i].rdev;
+ rdev_clear_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
+ }
if (s.ops_request)
raid_run_ops(sh, s.ops_request);
ops_run_io(sh, &s);
-
- if (dec_preread_active) {
+ if (s.dec_preread_active) {
/* We delay this until after ops_run_io so that if make_request
* is waiting on a flush, it won't continue until the writes
* have actually been submitted.
@@ -3595,15 +3375,9 @@ static void handle_stripe6(struct stripe_head *sh)
md_wakeup_thread(conf->mddev->thread);
}
- return_io(return_bi);
-}
+ return_io(s.return_bi);
-static void handle_stripe(struct stripe_head *sh)
-{
- if (sh->raid_conf->level == 6)
- handle_stripe6(sh);
- else
- handle_stripe5(sh);
+ clear_bit(STRIPE_ACTIVE, &sh->state);
}
static void raid5_activate_delayed(raid5_conf_t *conf)
@@ -3833,6 +3607,9 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
rcu_read_lock();
rdev = rcu_dereference(conf->disks[dd_idx].rdev);
if (rdev && test_bit(In_sync, &rdev->flags)) {
+ sector_t first_bad;
+ int bad_sectors;
+
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
@@ -3840,8 +3617,10 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
align_bi->bi_sector += rdev->data_offset;
- if (!bio_fits_rdev(align_bi)) {
- /* too big in some way */
+ if (!bio_fits_rdev(align_bi) ||
+ is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
+ &first_bad, &bad_sectors)) {
+ /* too big in some way, or has a known bad block */
bio_put(align_bi);
rdev_dec_pending(rdev, mddev);
return 0;
@@ -4016,7 +3795,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
}
}
- if (bio_data_dir(bi) == WRITE &&
+ if (rw == WRITE &&
logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
release_stripe(sh);
@@ -4034,7 +3813,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
}
if (test_bit(STRIPE_EXPANDING, &sh->state) ||
- !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
+ !add_stripe_bio(sh, bi, dd_idx, rw)) {
/* Stripe is busy expanding or
* add failed due to overlap. Flush everything
* and wait a while
@@ -4375,10 +4154,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
- spin_lock(&sh->lock);
- set_bit(STRIPE_SYNCING, &sh->state);
- clear_bit(STRIPE_INSYNC, &sh->state);
- spin_unlock(&sh->lock);
+ set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
handle_stripe(sh);
release_stripe(sh);
@@ -4509,6 +4285,9 @@ static void raid5d(mddev_t *mddev)
release_stripe(sh);
cond_resched();
+ if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ md_check_recovery(mddev);
+
spin_lock_irq(&conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);
@@ -5313,6 +5092,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number)
* isn't possible.
*/
if (!test_bit(Faulty, &rdev->flags) &&
+ mddev->recovery_disabled != conf->recovery_disabled &&
!has_failed(conf) &&
number < conf->raid_disks) {
err = -EBUSY;
@@ -5341,6 +5121,9 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
int first = 0;
int last = conf->raid_disks - 1;
+ if (mddev->recovery_disabled == conf->recovery_disabled)
+ return -EBUSY;
+
if (has_failed(conf))
/* no point adding a device */
return -EINVAL;
@@ -5519,16 +5302,14 @@ static int raid5_start_reshape(mddev_t *mddev)
if (rdev->raid_disk < 0 &&
!test_bit(Faulty, &rdev->flags)) {
if (raid5_add_disk(mddev, rdev) == 0) {
- char nm[20];
if (rdev->raid_disk
>= conf->previous_raid_disks) {
set_bit(In_sync, &rdev->flags);
added_devices++;
} else
rdev->recovery_offset = 0;
- sprintf(nm, "rd%d", rdev->raid_disk);
- if (sysfs_create_link(&mddev->kobj,
- &rdev->kobj, nm))
+
+ if (sysfs_link_rdev(mddev, rdev))
/* Failure here is OK */;
}
} else if (rdev->raid_disk >= conf->previous_raid_disks
@@ -5624,9 +5405,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
d++) {
mdk_rdev_t *rdev = conf->disks[d].rdev;
if (rdev && raid5_remove_disk(mddev, d) == 0) {
- char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 3ca77a2613b..11b9566184b 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -6,11 +6,11 @@
/*
*
- * Each stripe contains one buffer per disc. Each buffer can be in
+ * Each stripe contains one buffer per device. Each buffer can be in
* one of a number of states stored in "flags". Changes between
- * these states happen *almost* exclusively under a per-stripe
- * spinlock. Some very specific changes can happen in bi_end_io, and
- * these are not protected by the spin lock.
+ * these states happen *almost* exclusively under the protection of the
+ * STRIPE_ACTIVE flag. Some very specific changes can happen in bi_end_io, and
+ * these are not protected by STRIPE_ACTIVE.
*
* The flag bits that are used to represent these states are:
* R5_UPTODATE and R5_LOCKED
@@ -76,12 +76,10 @@
* block and the cached buffer are successfully written, any buffer on
* a written list can be returned with b_end_io.
*
- * The write list and read list both act as fifos. The read list is
- * protected by the device_lock. The write and written lists are
- * protected by the stripe lock. The device_lock, which can be
- * claimed while the stipe lock is held, is only for list
- * manipulations and will only be held for a very short time. It can
- * be claimed from interrupts.
+ * The write list and read list both act as fifos. The read list,
+ * write list and written list are protected by the device_lock.
+ * The device_lock is only for list manipulations and will only be
+ * held for a very short time. It can be claimed from interrupts.
*
*
* Stripes in the stripe cache can be on one of two lists (or on
@@ -96,7 +94,6 @@
*
* The inactive_list, handle_list and hash bucket lists are all protected by the
* device_lock.
- * - stripes on the inactive_list never have their stripe_lock held.
* - stripes have a reference counter. If count==0, they are on a list.
* - If a stripe might need handling, STRIPE_HANDLE is set.
* - When refcount reaches zero, then if STRIPE_HANDLE it is put on
@@ -116,10 +113,10 @@
* attach a request to an active stripe (add_stripe_bh())
* lockdev attach-buffer unlockdev
* handle a stripe (handle_stripe())
- * lockstripe clrSTRIPE_HANDLE ...
+ * setSTRIPE_ACTIVE, clrSTRIPE_HANDLE ...
* (lockdev check-buffers unlockdev) ..
* change-state ..
- * record io/ops needed unlockstripe schedule io/ops
+ * record io/ops needed clearSTRIPE_ACTIVE schedule io/ops
* release an active stripe (release_stripe())
* lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
*
@@ -128,8 +125,7 @@
* on a cached buffer, and plus one if the stripe is undergoing stripe
* operations.
*
- * Stripe operations are performed outside the stripe lock,
- * the stripe operations are:
+ * The stripe operations are:
* -copying data between the stripe cache and user application buffers
* -computing blocks to save a disk access, or to recover a missing block
* -updating the parity on a write operation (reconstruct write and
@@ -159,7 +155,8 @@
*/
/*
- * Operations state - intermediate states that are visible outside of sh->lock
+ * Operations state - intermediate states that are visible outside of
+ * STRIPE_ACTIVE.
* In general _idle indicates nothing is running, _run indicates a data
* processing operation is active, and _result means the data processing result
* is stable and can be acted upon. For simple operations like biofill and
@@ -209,7 +206,6 @@ struct stripe_head {
short ddf_layout;/* use DDF ordering to calculate Q */
unsigned long state; /* state flags */
atomic_t count; /* nr of active thread/requests */
- spinlock_t lock;
int bm_seq; /* sequence number for bitmap flushes */
int disks; /* disks in stripe */
enum check_states check_state;
@@ -240,19 +236,20 @@ struct stripe_head {
};
/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
- * for handle_stripe. It is only valid under spin_lock(sh->lock);
+ * for handle_stripe.
*/
struct stripe_head_state {
int syncing, expanding, expanded;
int locked, uptodate, to_read, to_write, failed, written;
int to_fill, compute, req_compute, non_overwrite;
- int failed_num;
+ int failed_num[2];
+ int p_failed, q_failed;
+ int dec_preread_active;
unsigned long ops_request;
-};
-/* r6_state - extra state data only relevant to r6 */
-struct r6_state {
- int p_failed, q_failed, failed_num[2];
+ struct bio *return_bi;
+ mdk_rdev_t *blocked_rdev;
+ int handle_bad_blocks;
};
/* Flags */
@@ -268,14 +265,16 @@ struct r6_state {
#define R5_ReWrite 9 /* have tried to over-write the readerror */
#define R5_Expanded 10 /* This block now has post-expand data */
-#define R5_Wantcompute 11 /* compute_block in progress treat as
- * uptodate
- */
-#define R5_Wantfill 12 /* dev->toread contains a bio that needs
- * filling
- */
-#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
-#define R5_WantFUA 14 /* Write should be FUA */
+#define R5_Wantcompute 11 /* compute_block in progress treat as
+ * uptodate
+ */
+#define R5_Wantfill 12 /* dev->toread contains a bio that needs
+ * filling
+ */
+#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
+#define R5_WantFUA 14 /* Write should be FUA */
+#define R5_WriteError 15 /* got a write error - need to record it */
+#define R5_MadeGood 16 /* A bad block has been fixed by writing to it*/
/*
* Write method
*/
@@ -289,21 +288,25 @@ struct r6_state {
/*
* Stripe state
*/
-#define STRIPE_HANDLE 2
-#define STRIPE_SYNCING 3
-#define STRIPE_INSYNC 4
-#define STRIPE_PREREAD_ACTIVE 5
-#define STRIPE_DELAYED 6
-#define STRIPE_DEGRADED 7
-#define STRIPE_BIT_DELAY 8
-#define STRIPE_EXPANDING 9
-#define STRIPE_EXPAND_SOURCE 10
-#define STRIPE_EXPAND_READY 11
-#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
-#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
-#define STRIPE_BIOFILL_RUN 14
-#define STRIPE_COMPUTE_RUN 15
-#define STRIPE_OPS_REQ_PENDING 16
+enum {
+ STRIPE_ACTIVE,
+ STRIPE_HANDLE,
+ STRIPE_SYNC_REQUESTED,
+ STRIPE_SYNCING,
+ STRIPE_INSYNC,
+ STRIPE_PREREAD_ACTIVE,
+ STRIPE_DELAYED,
+ STRIPE_DEGRADED,
+ STRIPE_BIT_DELAY,
+ STRIPE_EXPANDING,
+ STRIPE_EXPAND_SOURCE,
+ STRIPE_EXPAND_READY,
+ STRIPE_IO_STARTED, /* do not count towards 'bypass_count' */
+ STRIPE_FULL_WRITE, /* all blocks are set to be overwritten */
+ STRIPE_BIOFILL_RUN,
+ STRIPE_COMPUTE_RUN,
+ STRIPE_OPS_REQ_PENDING,
+};
/*
* Operation request flags
@@ -336,7 +339,7 @@ struct r6_state {
* PREREAD_ACTIVE.
* In stripe_handle, if we find pre-reading is necessary, we do it if
* PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
- * HANDLE gets cleared if stripe_handle leave nothing locked.
+ * HANDLE gets cleared if stripe_handle leaves nothing locked.
*/
@@ -399,7 +402,7 @@ struct raid5_private_data {
* (fresh device added).
* Cleared when a sync completes.
*/
-
+ int recovery_disabled;
/* per cpu variables */
struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 6995940b633..9575db429df 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -68,7 +68,6 @@ config VIDEO_V4L2_SUBDEV_API
config DVB_CORE
tristate "DVB for Linux"
- depends on NET && INET
select CRC32
help
DVB core utility functions for device handling, software fallbacks etc.
@@ -85,6 +84,19 @@ config DVB_CORE
If unsure say N.
+config DVB_NET
+ bool "DVB Network Support"
+ default (NET && INET)
+ depends on NET && INET && DVB_CORE
+ help
+ This option enables DVB Network Support which is a part of the DVB
+ standard. It is used, for example, by automatic firmware updates used
+ on Set-Top-Boxes. It can also be used to access the Internet via the
+ DVB card, if the network provider supports it.
+
+ You may want to disable the network support on embedded devices. If
+ unsure say Y.
+
config VIDEO_MEDIA
tristate
default (DVB_CORE && (VIDEO_DEV = n)) || (VIDEO_DEV && (DVB_CORE = n)) || (DVB_CORE && VIDEO_DEV)
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 22d3ca36370..996302ae210 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -23,6 +23,7 @@ config MEDIA_TUNER
depends on VIDEO_MEDIA && I2C
select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_XC4000 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MT20XX if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TEA5761 if !MEDIA_TUNER_CUSTOMISE
@@ -152,6 +153,15 @@ config MEDIA_TUNER_XC5000
This device is only used inside a SiP called together with a
demodulator for now.
+config MEDIA_TUNER_XC4000
+ tristate "Xceive XC4000 silicon tuner"
+ depends on VIDEO_MEDIA && I2C
+ default m if MEDIA_TUNER_CUSTOMISE
+ help
+ A driver for the silicon tuner XC4000 from Xceive.
+ This device is only used inside a SiP called together with a
+ demodulator for now.
+
config MEDIA_TUNER_MXL5005S
tristate "MaxLinear MSL5005S silicon tuner"
depends on VIDEO_MEDIA && I2C
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
index 2cb4f532784..20d24fca2cf 100644
--- a/drivers/media/common/tuners/Makefile
+++ b/drivers/media/common/tuners/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MEDIA_TUNER_TDA9887) += tda9887.o
obj-$(CONFIG_MEDIA_TUNER_TDA827X) += tda827x.o
obj-$(CONFIG_MEDIA_TUNER_TDA18271) += tda18271.o
obj-$(CONFIG_MEDIA_TUNER_XC5000) += xc5000.o
+obj-$(CONFIG_MEDIA_TUNER_XC4000) += xc4000.o
obj-$(CONFIG_MEDIA_TUNER_MT2060) += mt2060.o
obj-$(CONFIG_MEDIA_TUNER_MT2266) += mt2266.o
obj-$(CONFIG_MEDIA_TUNER_QT1010) += qt1010.o
diff --git a/drivers/media/common/tuners/tuner-types.c b/drivers/media/common/tuners/tuner-types.c
index afba6dc5e08..94a603a6084 100644
--- a/drivers/media/common/tuners/tuner-types.c
+++ b/drivers/media/common/tuners/tuner-types.c
@@ -1805,6 +1805,10 @@ struct tunertype tuners[] = {
.name = "Xceive 5000 tuner",
/* see xc5000.c for details */
},
+ [TUNER_XC4000] = { /* Xceive 4000 */
+ .name = "Xceive 4000 tuner",
+ /* see xc4000.c for details */
+ },
[TUNER_TCL_MF02GIP_5N] = { /* TCL tuner MF02GIP-5N-E */
.name = "TCL tuner MF02GIP-5N-E",
.params = tuner_tcl_mf02gip_5n_params,
diff --git a/drivers/media/common/tuners/xc4000.c b/drivers/media/common/tuners/xc4000.c
new file mode 100644
index 00000000000..634f4d9b6c6
--- /dev/null
+++ b/drivers/media/common/tuners/xc4000.c
@@ -0,0 +1,1691 @@
+/*
+ * Driver for Xceive XC4000 "QAM/8VSB single chip tuner"
+ *
+ * Copyright (c) 2007 Xceive Corporation
+ * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
+ * Copyright (c) 2009 Devin Heitmueller <dheitmueller@kernellabs.com>
+ * Copyright (c) 2009 Davide Ferri <d.ferri@zero11.it>
+ * Copyright (c) 2010 Istvan Varga <istvan_v@mailbox.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/videodev2.h>
+#include <linux/delay.h>
+#include <linux/dvb/frontend.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <asm/unaligned.h>
+
+#include "dvb_frontend.h"
+
+#include "xc4000.h"
+#include "tuner-i2c.h"
+#include "tuner-xc2028-types.h"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debugging level (0 to 2, default: 0 (off)).");
+
+static int no_poweroff;
+module_param(no_poweroff, int, 0644);
+MODULE_PARM_DESC(no_poweroff, "Power management (1: disabled, 2: enabled, "
+ "0 (default): use device-specific default mode).");
+
+static int audio_std;
+module_param(audio_std, int, 0644);
+MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly "
+ "needs to know what audio standard is needed for some video standards "
+ "with audio A2 or NICAM. The valid settings are a sum of:\n"
+ " 1: use NICAM/B or A2/B instead of NICAM/A or A2/A\n"
+ " 2: use A2 instead of NICAM or BTSC\n"
+ " 4: use SECAM/K3 instead of K1\n"
+ " 8: use PAL-D/K audio for SECAM-D/K\n"
+ "16: use FM radio input 1 instead of input 2\n"
+ "32: use mono audio (the lower three bits are ignored)");
+
+static char firmware_name[30];
+module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
+MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
+ "default firmware name.");
+
+static DEFINE_MUTEX(xc4000_list_mutex);
+static LIST_HEAD(hybrid_tuner_instance_list);
+
+#define dprintk(level, fmt, arg...) if (debug >= level) \
+ printk(KERN_INFO "%s: " fmt, "xc4000", ## arg)
+
+/* struct for storing firmware table */
+struct firmware_description {
+ unsigned int type;
+ v4l2_std_id id;
+ __u16 int_freq;
+ unsigned char *ptr;
+ unsigned int size;
+};
+
+struct firmware_properties {
+ unsigned int type;
+ v4l2_std_id id;
+ v4l2_std_id std_req;
+ __u16 int_freq;
+ unsigned int scode_table;
+ int scode_nr;
+};
+
+struct xc4000_priv {
+ struct tuner_i2c_props i2c_props;
+ struct list_head hybrid_tuner_instance_list;
+ struct firmware_description *firm;
+ int firm_size;
+ u32 if_khz;
+ u32 freq_hz;
+ u32 bandwidth;
+ u8 video_standard;
+ u8 rf_mode;
+ u8 default_pm;
+ u8 dvb_amplitude;
+ u8 set_smoothedcvbs;
+ u8 ignore_i2c_write_errors;
+ __u16 firm_version;
+ struct firmware_properties cur_fw;
+ __u16 hwmodel;
+ __u16 hwvers;
+ struct mutex lock;
+};
+
+#define XC4000_AUDIO_STD_B 1
+#define XC4000_AUDIO_STD_A2 2
+#define XC4000_AUDIO_STD_K3 4
+#define XC4000_AUDIO_STD_L 8
+#define XC4000_AUDIO_STD_INPUT1 16
+#define XC4000_AUDIO_STD_MONO 32
+
+#define XC4000_DEFAULT_FIRMWARE "dvb-fe-xc4000-1.4.fw"
+
+/* Misc Defines */
+#define MAX_TV_STANDARD 24
+#define XC_MAX_I2C_WRITE_LENGTH 64
+#define XC_POWERED_DOWN 0x80000000U
+
+/* Signal Types */
+#define XC_RF_MODE_AIR 0
+#define XC_RF_MODE_CABLE 1
+
+/* Product id */
+#define XC_PRODUCT_ID_FW_NOT_LOADED 0x2000
+#define XC_PRODUCT_ID_XC4000 0x0FA0
+#define XC_PRODUCT_ID_XC4100 0x1004
+
+/* Registers (Write-only) */
+#define XREG_INIT 0x00
+#define XREG_VIDEO_MODE 0x01
+#define XREG_AUDIO_MODE 0x02
+#define XREG_RF_FREQ 0x03
+#define XREG_D_CODE 0x04
+#define XREG_DIRECTSITTING_MODE 0x05
+#define XREG_SEEK_MODE 0x06
+#define XREG_POWER_DOWN 0x08
+#define XREG_SIGNALSOURCE 0x0A
+#define XREG_SMOOTHEDCVBS 0x0E
+#define XREG_AMPLITUDE 0x10
+
+/* Registers (Read-only) */
+#define XREG_ADC_ENV 0x00
+#define XREG_QUALITY 0x01
+#define XREG_FRAME_LINES 0x02
+#define XREG_HSYNC_FREQ 0x03
+#define XREG_LOCK 0x04
+#define XREG_FREQ_ERROR 0x05
+#define XREG_SNR 0x06
+#define XREG_VERSION 0x07
+#define XREG_PRODUCT_ID 0x08
+
+/*
+ Basic firmware description. This will remain with
+ the driver for documentation purposes.
+
+ This represents an I2C firmware file encoded as a
+ string of unsigned char. Format is as follows:
+
+ char[0 ]=len0_MSB -> len = len_MSB * 256 + len_LSB
+ char[1 ]=len0_LSB -> length of first write transaction
+ char[2 ]=data0 -> first byte to be sent
+ char[3 ]=data1
+ char[4 ]=data2
+ char[ ]=...
+ char[M ]=dataN -> last byte to be sent
+ char[M+1]=len1_MSB -> len = len_MSB * 256 + len_LSB
+ char[M+2]=len1_LSB -> length of second write transaction
+ char[M+3]=data0
+ char[M+4]=data1
+ ...
+ etc.
+
+ The [len] value should be interpreted as follows:
+
+ len= len_MSB _ len_LSB
+ len=1111_1111_1111_1111 : End of I2C_SEQUENCE
+ len=0000_0000_0000_0000 : Reset command: Do hardware reset
+ len=0NNN_NNNN_NNNN_NNNN : Normal transaction: number of bytes = {1:32767)
+ len=1WWW_WWWW_WWWW_WWWW : Wait command: wait for {1:32767} ms
+
+ For the RESET and WAIT commands, the two following bytes will contain
+ immediately the length of the following transaction.
+*/
+
+struct XC_TV_STANDARD {
+ const char *Name;
+ u16 audio_mode;
+ u16 video_mode;
+ u16 int_freq;
+};
+
+/* Tuner standards */
+#define XC4000_MN_NTSC_PAL_BTSC 0
+#define XC4000_MN_NTSC_PAL_A2 1
+#define XC4000_MN_NTSC_PAL_EIAJ 2
+#define XC4000_MN_NTSC_PAL_Mono 3
+#define XC4000_BG_PAL_A2 4
+#define XC4000_BG_PAL_NICAM 5
+#define XC4000_BG_PAL_MONO 6
+#define XC4000_I_PAL_NICAM 7
+#define XC4000_I_PAL_NICAM_MONO 8
+#define XC4000_DK_PAL_A2 9
+#define XC4000_DK_PAL_NICAM 10
+#define XC4000_DK_PAL_MONO 11
+#define XC4000_DK_SECAM_A2DK1 12
+#define XC4000_DK_SECAM_A2LDK3 13
+#define XC4000_DK_SECAM_A2MONO 14
+#define XC4000_DK_SECAM_NICAM 15
+#define XC4000_L_SECAM_NICAM 16
+#define XC4000_LC_SECAM_NICAM 17
+#define XC4000_DTV6 18
+#define XC4000_DTV8 19
+#define XC4000_DTV7_8 20
+#define XC4000_DTV7 21
+#define XC4000_FM_Radio_INPUT2 22
+#define XC4000_FM_Radio_INPUT1 23
+
+static struct XC_TV_STANDARD xc4000_standard[MAX_TV_STANDARD] = {
+ {"M/N-NTSC/PAL-BTSC", 0x0000, 0x80A0, 4500},
+ {"M/N-NTSC/PAL-A2", 0x0000, 0x80A0, 4600},
+ {"M/N-NTSC/PAL-EIAJ", 0x0040, 0x80A0, 4500},
+ {"M/N-NTSC/PAL-Mono", 0x0078, 0x80A0, 4500},
+ {"B/G-PAL-A2", 0x0000, 0x8159, 5640},
+ {"B/G-PAL-NICAM", 0x0004, 0x8159, 5740},
+ {"B/G-PAL-MONO", 0x0078, 0x8159, 5500},
+ {"I-PAL-NICAM", 0x0080, 0x8049, 6240},
+ {"I-PAL-NICAM-MONO", 0x0078, 0x8049, 6000},
+ {"D/K-PAL-A2", 0x0000, 0x8049, 6380},
+ {"D/K-PAL-NICAM", 0x0080, 0x8049, 6200},
+ {"D/K-PAL-MONO", 0x0078, 0x8049, 6500},
+ {"D/K-SECAM-A2 DK1", 0x0000, 0x8049, 6340},
+ {"D/K-SECAM-A2 L/DK3", 0x0000, 0x8049, 6000},
+ {"D/K-SECAM-A2 MONO", 0x0078, 0x8049, 6500},
+ {"D/K-SECAM-NICAM", 0x0080, 0x8049, 6200},
+ {"L-SECAM-NICAM", 0x8080, 0x0009, 6200},
+ {"L'-SECAM-NICAM", 0x8080, 0x4009, 6200},
+ {"DTV6", 0x00C0, 0x8002, 0},
+ {"DTV8", 0x00C0, 0x800B, 0},
+ {"DTV7/8", 0x00C0, 0x801B, 0},
+ {"DTV7", 0x00C0, 0x8007, 0},
+ {"FM Radio-INPUT2", 0x0008, 0x9800, 10700},
+ {"FM Radio-INPUT1", 0x0008, 0x9000, 10700}
+};
+
+static int xc4000_readreg(struct xc4000_priv *priv, u16 reg, u16 *val);
+static int xc4000_tuner_reset(struct dvb_frontend *fe);
+static void xc_debug_dump(struct xc4000_priv *priv);
+
+static int xc_send_i2c_data(struct xc4000_priv *priv, u8 *buf, int len)
+{
+ struct i2c_msg msg = { .addr = priv->i2c_props.addr,
+ .flags = 0, .buf = buf, .len = len };
+ if (i2c_transfer(priv->i2c_props.adap, &msg, 1) != 1) {
+ if (priv->ignore_i2c_write_errors == 0) {
+ printk(KERN_ERR "xc4000: I2C write failed (len=%i)\n",
+ len);
+ if (len == 4) {
+ printk(KERN_ERR "bytes %02x %02x %02x %02x\n", buf[0],
+ buf[1], buf[2], buf[3]);
+ }
+ return -EREMOTEIO;
+ }
+ }
+ return 0;
+}
+
+static int xc4000_tuner_reset(struct dvb_frontend *fe)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ int ret;
+
+ dprintk(1, "%s()\n", __func__);
+
+ if (fe->callback) {
+ ret = fe->callback(((fe->dvb) && (fe->dvb->priv)) ?
+ fe->dvb->priv :
+ priv->i2c_props.adap->algo_data,
+ DVB_FRONTEND_COMPONENT_TUNER,
+ XC4000_TUNER_RESET, 0);
+ if (ret) {
+ printk(KERN_ERR "xc4000: reset failed\n");
+ return -EREMOTEIO;
+ }
+ } else {
+ printk(KERN_ERR "xc4000: no tuner reset callback function, "
+ "fatal\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int xc_write_reg(struct xc4000_priv *priv, u16 regAddr, u16 i2cData)
+{
+ u8 buf[4];
+ int result;
+
+ buf[0] = (regAddr >> 8) & 0xFF;
+ buf[1] = regAddr & 0xFF;
+ buf[2] = (i2cData >> 8) & 0xFF;
+ buf[3] = i2cData & 0xFF;
+ result = xc_send_i2c_data(priv, buf, 4);
+
+ return result;
+}
+
+static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+
+ int i, nbytes_to_send, result;
+ unsigned int len, pos, index;
+ u8 buf[XC_MAX_I2C_WRITE_LENGTH];
+
+ index = 0;
+ while ((i2c_sequence[index] != 0xFF) ||
+ (i2c_sequence[index + 1] != 0xFF)) {
+ len = i2c_sequence[index] * 256 + i2c_sequence[index+1];
+ if (len == 0x0000) {
+ /* RESET command */
+ /* NOTE: this is ignored, as the reset callback was */
+ /* already called by check_firmware() */
+ index += 2;
+ } else if (len & 0x8000) {
+ /* WAIT command */
+ msleep(len & 0x7FFF);
+ index += 2;
+ } else {
+ /* Send i2c data whilst ensuring individual transactions
+ * do not exceed XC_MAX_I2C_WRITE_LENGTH bytes.
+ */
+ index += 2;
+ buf[0] = i2c_sequence[index];
+ buf[1] = i2c_sequence[index + 1];
+ pos = 2;
+ while (pos < len) {
+ if ((len - pos) > XC_MAX_I2C_WRITE_LENGTH - 2)
+ nbytes_to_send =
+ XC_MAX_I2C_WRITE_LENGTH;
+ else
+ nbytes_to_send = (len - pos + 2);
+ for (i = 2; i < nbytes_to_send; i++) {
+ buf[i] = i2c_sequence[index + pos +
+ i - 2];
+ }
+ result = xc_send_i2c_data(priv, buf,
+ nbytes_to_send);
+
+ if (result != 0)
+ return result;
+
+ pos += nbytes_to_send - 2;
+ }
+ index += len;
+ }
+ }
+ return 0;
+}
+
+static int xc_set_tv_standard(struct xc4000_priv *priv,
+ u16 video_mode, u16 audio_mode)
+{
+ int ret;
+ dprintk(1, "%s(0x%04x,0x%04x)\n", __func__, video_mode, audio_mode);
+ dprintk(1, "%s() Standard = %s\n",
+ __func__,
+ xc4000_standard[priv->video_standard].Name);
+
+ /* Don't complain when the request fails because of i2c stretching */
+ priv->ignore_i2c_write_errors = 1;
+
+ ret = xc_write_reg(priv, XREG_VIDEO_MODE, video_mode);
+ if (ret == 0)
+ ret = xc_write_reg(priv, XREG_AUDIO_MODE, audio_mode);
+
+ priv->ignore_i2c_write_errors = 0;
+
+ return ret;
+}
+
+static int xc_set_signal_source(struct xc4000_priv *priv, u16 rf_mode)
+{
+ dprintk(1, "%s(%d) Source = %s\n", __func__, rf_mode,
+ rf_mode == XC_RF_MODE_AIR ? "ANTENNA" : "CABLE");
+
+ if ((rf_mode != XC_RF_MODE_AIR) && (rf_mode != XC_RF_MODE_CABLE)) {
+ rf_mode = XC_RF_MODE_CABLE;
+ printk(KERN_ERR
+ "%s(), Invalid mode, defaulting to CABLE",
+ __func__);
+ }
+ return xc_write_reg(priv, XREG_SIGNALSOURCE, rf_mode);
+}
+
+static const struct dvb_tuner_ops xc4000_tuner_ops;
+
+static int xc_set_rf_frequency(struct xc4000_priv *priv, u32 freq_hz)
+{
+ u16 freq_code;
+
+ dprintk(1, "%s(%u)\n", __func__, freq_hz);
+
+ if ((freq_hz > xc4000_tuner_ops.info.frequency_max) ||
+ (freq_hz < xc4000_tuner_ops.info.frequency_min))
+ return -EINVAL;
+
+ freq_code = (u16)(freq_hz / 15625);
+
+ /* WAS: Starting in firmware version 1.1.44, Xceive recommends using the
+ FINERFREQ for all normal tuning (the doc indicates reg 0x03 should
+ only be used for fast scanning for channel lock) */
+ /* WAS: XREG_FINERFREQ */
+ return xc_write_reg(priv, XREG_RF_FREQ, freq_code);
+}
+
+static int xc_get_adc_envelope(struct xc4000_priv *priv, u16 *adc_envelope)
+{
+ return xc4000_readreg(priv, XREG_ADC_ENV, adc_envelope);
+}
+
+static int xc_get_frequency_error(struct xc4000_priv *priv, u32 *freq_error_hz)
+{
+ int result;
+ u16 regData;
+ u32 tmp;
+
+ result = xc4000_readreg(priv, XREG_FREQ_ERROR, &regData);
+ if (result != 0)
+ return result;
+
+ tmp = (u32)regData & 0xFFFFU;
+ tmp = (tmp < 0x8000U ? tmp : 0x10000U - tmp);
+ (*freq_error_hz) = tmp * 15625;
+ return result;
+}
+
+static int xc_get_lock_status(struct xc4000_priv *priv, u16 *lock_status)
+{
+ return xc4000_readreg(priv, XREG_LOCK, lock_status);
+}
+
+static int xc_get_version(struct xc4000_priv *priv,
+ u8 *hw_majorversion, u8 *hw_minorversion,
+ u8 *fw_majorversion, u8 *fw_minorversion)
+{
+ u16 data;
+ int result;
+
+ result = xc4000_readreg(priv, XREG_VERSION, &data);
+ if (result != 0)
+ return result;
+
+ (*hw_majorversion) = (data >> 12) & 0x0F;
+ (*hw_minorversion) = (data >> 8) & 0x0F;
+ (*fw_majorversion) = (data >> 4) & 0x0F;
+ (*fw_minorversion) = data & 0x0F;
+
+ return 0;
+}
+
+static int xc_get_hsync_freq(struct xc4000_priv *priv, u32 *hsync_freq_hz)
+{
+ u16 regData;
+ int result;
+
+ result = xc4000_readreg(priv, XREG_HSYNC_FREQ, &regData);
+ if (result != 0)
+ return result;
+
+ (*hsync_freq_hz) = ((regData & 0x0fff) * 763)/100;
+ return result;
+}
+
+static int xc_get_frame_lines(struct xc4000_priv *priv, u16 *frame_lines)
+{
+ return xc4000_readreg(priv, XREG_FRAME_LINES, frame_lines);
+}
+
+static int xc_get_quality(struct xc4000_priv *priv, u16 *quality)
+{
+ return xc4000_readreg(priv, XREG_QUALITY, quality);
+}
+
+static u16 xc_wait_for_lock(struct xc4000_priv *priv)
+{
+ u16 lock_state = 0;
+ int watchdog_count = 40;
+
+ while ((lock_state == 0) && (watchdog_count > 0)) {
+ xc_get_lock_status(priv, &lock_state);
+ if (lock_state != 1) {
+ msleep(5);
+ watchdog_count--;
+ }
+ }
+ return lock_state;
+}
+
+static int xc_tune_channel(struct xc4000_priv *priv, u32 freq_hz)
+{
+ int found = 1;
+ int result;
+
+ dprintk(1, "%s(%u)\n", __func__, freq_hz);
+
+ /* Don't complain when the request fails because of i2c stretching */
+ priv->ignore_i2c_write_errors = 1;
+ result = xc_set_rf_frequency(priv, freq_hz);
+ priv->ignore_i2c_write_errors = 0;
+
+ if (result != 0)
+ return 0;
+
+ /* wait for lock only in analog TV mode */
+ if ((priv->cur_fw.type & (FM | DTV6 | DTV7 | DTV78 | DTV8)) == 0) {
+ if (xc_wait_for_lock(priv) != 1)
+ found = 0;
+ }
+
+ /* Wait for stats to stabilize.
+ * Frame Lines needs two frame times after initial lock
+ * before it is valid.
+ */
+ msleep(debug ? 100 : 10);
+
+ if (debug)
+ xc_debug_dump(priv);
+
+ return found;
+}
+
+static int xc4000_readreg(struct xc4000_priv *priv, u16 reg, u16 *val)
+{
+ u8 buf[2] = { reg >> 8, reg & 0xff };
+ u8 bval[2] = { 0, 0 };
+ struct i2c_msg msg[2] = {
+ { .addr = priv->i2c_props.addr,
+ .flags = 0, .buf = &buf[0], .len = 2 },
+ { .addr = priv->i2c_props.addr,
+ .flags = I2C_M_RD, .buf = &bval[0], .len = 2 },
+ };
+
+ if (i2c_transfer(priv->i2c_props.adap, msg, 2) != 2) {
+ printk(KERN_ERR "xc4000: I2C read failed\n");
+ return -EREMOTEIO;
+ }
+
+ *val = (bval[0] << 8) | bval[1];
+ return 0;
+}
+
+#define dump_firm_type(t) dump_firm_type_and_int_freq(t, 0)
+static void dump_firm_type_and_int_freq(unsigned int type, u16 int_freq)
+{
+ if (type & BASE)
+ printk(KERN_CONT "BASE ");
+ if (type & INIT1)
+ printk(KERN_CONT "INIT1 ");
+ if (type & F8MHZ)
+ printk(KERN_CONT "F8MHZ ");
+ if (type & MTS)
+ printk(KERN_CONT "MTS ");
+ if (type & D2620)
+ printk(KERN_CONT "D2620 ");
+ if (type & D2633)
+ printk(KERN_CONT "D2633 ");
+ if (type & DTV6)
+ printk(KERN_CONT "DTV6 ");
+ if (type & QAM)
+ printk(KERN_CONT "QAM ");
+ if (type & DTV7)
+ printk(KERN_CONT "DTV7 ");
+ if (type & DTV78)
+ printk(KERN_CONT "DTV78 ");
+ if (type & DTV8)
+ printk(KERN_CONT "DTV8 ");
+ if (type & FM)
+ printk(KERN_CONT "FM ");
+ if (type & INPUT1)
+ printk(KERN_CONT "INPUT1 ");
+ if (type & LCD)
+ printk(KERN_CONT "LCD ");
+ if (type & NOGD)
+ printk(KERN_CONT "NOGD ");
+ if (type & MONO)
+ printk(KERN_CONT "MONO ");
+ if (type & ATSC)
+ printk(KERN_CONT "ATSC ");
+ if (type & IF)
+ printk(KERN_CONT "IF ");
+ if (type & LG60)
+ printk(KERN_CONT "LG60 ");
+ if (type & ATI638)
+ printk(KERN_CONT "ATI638 ");
+ if (type & OREN538)
+ printk(KERN_CONT "OREN538 ");
+ if (type & OREN36)
+ printk(KERN_CONT "OREN36 ");
+ if (type & TOYOTA388)
+ printk(KERN_CONT "TOYOTA388 ");
+ if (type & TOYOTA794)
+ printk(KERN_CONT "TOYOTA794 ");
+ if (type & DIBCOM52)
+ printk(KERN_CONT "DIBCOM52 ");
+ if (type & ZARLINK456)
+ printk(KERN_CONT "ZARLINK456 ");
+ if (type & CHINA)
+ printk(KERN_CONT "CHINA ");
+ if (type & F6MHZ)
+ printk(KERN_CONT "F6MHZ ");
+ if (type & INPUT2)
+ printk(KERN_CONT "INPUT2 ");
+ if (type & SCODE)
+ printk(KERN_CONT "SCODE ");
+ if (type & HAS_IF)
+ printk(KERN_CONT "HAS_IF_%d ", int_freq);
+}
+
+static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
+ v4l2_std_id *id)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ int i, best_i = -1;
+ unsigned int best_nr_diffs = 255U;
+
+ if (!priv->firm) {
+ printk(KERN_ERR "Error! firmware not loaded\n");
+ return -EINVAL;
+ }
+
+ if (((type & ~SCODE) == 0) && (*id == 0))
+ *id = V4L2_STD_PAL;
+
+ /* Seek for generic video standard match */
+ for (i = 0; i < priv->firm_size; i++) {
+ v4l2_std_id id_diff_mask =
+ (priv->firm[i].id ^ (*id)) & (*id);
+ unsigned int type_diff_mask =
+ (priv->firm[i].type ^ type)
+ & (BASE_TYPES | DTV_TYPES | LCD | NOGD | MONO | SCODE);
+ unsigned int nr_diffs;
+
+ if (type_diff_mask
+ & (BASE | INIT1 | FM | DTV6 | DTV7 | DTV78 | DTV8 | SCODE))
+ continue;
+
+ nr_diffs = hweight64(id_diff_mask) + hweight32(type_diff_mask);
+ if (!nr_diffs) /* Supports all the requested standards */
+ goto found;
+
+ if (nr_diffs < best_nr_diffs) {
+ best_nr_diffs = nr_diffs;
+ best_i = i;
+ }
+ }
+
+ /* FIXME: Would make sense to seek for type "hint" match ? */
+ if (best_i < 0) {
+ i = -ENOENT;
+ goto ret;
+ }
+
+ if (best_nr_diffs > 0U) {
+ printk(KERN_WARNING
+ "Selecting best matching firmware (%u bits differ) for "
+ "type=(%x), id %016llx:\n",
+ best_nr_diffs, type, (unsigned long long)*id);
+ i = best_i;
+ }
+
+found:
+ *id = priv->firm[i].id;
+
+ret:
+ if (debug) {
+ printk(KERN_DEBUG "%s firmware for type=",
+ (i < 0) ? "Can't find" : "Found");
+ dump_firm_type(type);
+ printk(KERN_DEBUG "(%x), id %016llx.\n", type, (unsigned long long)*id);
+ }
+ return i;
+}
+
+static int load_firmware(struct dvb_frontend *fe, unsigned int type,
+ v4l2_std_id *id)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ int pos, rc;
+ unsigned char *p;
+
+ pos = seek_firmware(fe, type, id);
+ if (pos < 0)
+ return pos;
+
+ p = priv->firm[pos].ptr;
+
+ /* Don't complain when the request fails because of i2c stretching */
+ priv->ignore_i2c_write_errors = 1;
+
+ rc = xc_load_i2c_sequence(fe, p);
+
+ priv->ignore_i2c_write_errors = 0;
+
+ return rc;
+}
+
+static int xc4000_fwupload(struct dvb_frontend *fe)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ const struct firmware *fw = NULL;
+ const unsigned char *p, *endp;
+ int rc = 0;
+ int n, n_array;
+ char name[33];
+ const char *fname;
+
+ if (firmware_name[0] != '\0')
+ fname = firmware_name;
+ else
+ fname = XC4000_DEFAULT_FIRMWARE;
+
+ dprintk(1, "Reading firmware %s\n", fname);
+ rc = request_firmware(&fw, fname, priv->i2c_props.adap->dev.parent);
+ if (rc < 0) {
+ if (rc == -ENOENT)
+ printk(KERN_ERR "Error: firmware %s not found.\n", fname);
+ else
+ printk(KERN_ERR "Error %d while requesting firmware %s\n",
+ rc, fname);
+
+ return rc;
+ }
+ p = fw->data;
+ endp = p + fw->size;
+
+ if (fw->size < sizeof(name) - 1 + 2 + 2) {
+ printk(KERN_ERR "Error: firmware file %s has invalid size!\n",
+ fname);
+ goto corrupt;
+ }
+
+ memcpy(name, p, sizeof(name) - 1);
+ name[sizeof(name) - 1] = '\0';
+ p += sizeof(name) - 1;
+
+ priv->firm_version = get_unaligned_le16(p);
+ p += 2;
+
+ n_array = get_unaligned_le16(p);
+ p += 2;
+
+ dprintk(1, "Loading %d firmware images from %s, type: %s, ver %d.%d\n",
+ n_array, fname, name,
+ priv->firm_version >> 8, priv->firm_version & 0xff);
+
+ priv->firm = kzalloc(sizeof(*priv->firm) * n_array, GFP_KERNEL);
+ if (priv->firm == NULL) {
+ printk(KERN_ERR "Not enough memory to load firmware file.\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+ priv->firm_size = n_array;
+
+ n = -1;
+ while (p < endp) {
+ __u32 type, size;
+ v4l2_std_id id;
+ __u16 int_freq = 0;
+
+ n++;
+ if (n >= n_array) {
+ printk(KERN_ERR "More firmware images in file than "
+ "were expected!\n");
+ goto corrupt;
+ }
+
+ /* Checks if there's enough bytes to read */
+ if (endp - p < sizeof(type) + sizeof(id) + sizeof(size))
+ goto header;
+
+ type = get_unaligned_le32(p);
+ p += sizeof(type);
+
+ id = get_unaligned_le64(p);
+ p += sizeof(id);
+
+ if (type & HAS_IF) {
+ int_freq = get_unaligned_le16(p);
+ p += sizeof(int_freq);
+ if (endp - p < sizeof(size))
+ goto header;
+ }
+
+ size = get_unaligned_le32(p);
+ p += sizeof(size);
+
+ if (!size || size > endp - p) {
+ printk(KERN_ERR "Firmware type (%x), id %llx is corrupted (size=%d, expected %d)\n",
+ type, (unsigned long long)id,
+ (unsigned)(endp - p), size);
+ goto corrupt;
+ }
+
+ priv->firm[n].ptr = kzalloc(size, GFP_KERNEL);
+ if (priv->firm[n].ptr == NULL) {
+ printk(KERN_ERR "Not enough memory to load firmware file.\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ if (debug) {
+ printk(KERN_DEBUG "Reading firmware type ");
+ dump_firm_type_and_int_freq(type, int_freq);
+ printk(KERN_DEBUG "(%x), id %llx, size=%d.\n",
+ type, (unsigned long long)id, size);
+ }
+
+ memcpy(priv->firm[n].ptr, p, size);
+ priv->firm[n].type = type;
+ priv->firm[n].id = id;
+ priv->firm[n].size = size;
+ priv->firm[n].int_freq = int_freq;
+
+ p += size;
+ }
+
+ if (n + 1 != priv->firm_size) {
+ printk(KERN_ERR "Firmware file is incomplete!\n");
+ goto corrupt;
+ }
+
+ goto done;
+
+header:
+ printk(KERN_ERR "Firmware header is incomplete!\n");
+corrupt:
+ rc = -EINVAL;
+ printk(KERN_ERR "Error: firmware file is corrupted!\n");
+
+done:
+ release_firmware(fw);
+ if (rc == 0)
+ dprintk(1, "Firmware files loaded.\n");
+
+ return rc;
+}
+
+static int load_scode(struct dvb_frontend *fe, unsigned int type,
+ v4l2_std_id *id, __u16 int_freq, int scode)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ int pos, rc;
+ unsigned char *p;
+ u8 scode_buf[13];
+ u8 indirect_mode[5];
+
+ dprintk(1, "%s called int_freq=%d\n", __func__, int_freq);
+
+ if (!int_freq) {
+ pos = seek_firmware(fe, type, id);
+ if (pos < 0)
+ return pos;
+ } else {
+ for (pos = 0; pos < priv->firm_size; pos++) {
+ if ((priv->firm[pos].int_freq == int_freq) &&
+ (priv->firm[pos].type & HAS_IF))
+ break;
+ }
+ if (pos == priv->firm_size)
+ return -ENOENT;
+ }
+
+ p = priv->firm[pos].ptr;
+
+ if (priv->firm[pos].size != 12 * 16 || scode >= 16)
+ return -EINVAL;
+ p += 12 * scode;
+
+ if (debug) {
+ tuner_info("Loading SCODE for type=");
+ dump_firm_type_and_int_freq(priv->firm[pos].type,
+ priv->firm[pos].int_freq);
+ printk(KERN_CONT "(%x), id %016llx.\n", priv->firm[pos].type,
+ (unsigned long long)*id);
+ }
+
+ scode_buf[0] = 0x00;
+ memcpy(&scode_buf[1], p, 12);
+
+ /* Enter direct-mode */
+ rc = xc_write_reg(priv, XREG_DIRECTSITTING_MODE, 0);
+ if (rc < 0) {
+ printk(KERN_ERR "failed to put device into direct mode!\n");
+ return -EIO;
+ }
+
+ rc = xc_send_i2c_data(priv, scode_buf, 13);
+ if (rc != 0) {
+ /* Even if the send failed, make sure we set back to indirect
+ mode */
+ printk(KERN_ERR "Failed to set scode %d\n", rc);
+ }
+
+ /* Switch back to indirect-mode */
+ memset(indirect_mode, 0, sizeof(indirect_mode));
+ indirect_mode[4] = 0x88;
+ xc_send_i2c_data(priv, indirect_mode, sizeof(indirect_mode));
+ msleep(10);
+
+ return 0;
+}
+
+static int check_firmware(struct dvb_frontend *fe, unsigned int type,
+ v4l2_std_id std, __u16 int_freq)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ struct firmware_properties new_fw;
+ int rc = 0, is_retry = 0;
+ u16 hwmodel;
+ v4l2_std_id std0;
+ u8 hw_major, hw_minor, fw_major, fw_minor;
+
+ dprintk(1, "%s called\n", __func__);
+
+ if (!priv->firm) {
+ rc = xc4000_fwupload(fe);
+ if (rc < 0)
+ return rc;
+ }
+
+retry:
+ new_fw.type = type;
+ new_fw.id = std;
+ new_fw.std_req = std;
+ new_fw.scode_table = SCODE;
+ new_fw.scode_nr = 0;
+ new_fw.int_freq = int_freq;
+
+ dprintk(1, "checking firmware, user requested type=");
+ if (debug) {
+ dump_firm_type(new_fw.type);
+ printk(KERN_CONT "(%x), id %016llx, ", new_fw.type,
+ (unsigned long long)new_fw.std_req);
+ if (!int_freq)
+ printk(KERN_CONT "scode_tbl ");
+ else
+ printk(KERN_CONT "int_freq %d, ", new_fw.int_freq);
+ printk(KERN_CONT "scode_nr %d\n", new_fw.scode_nr);
+ }
+
+ /* No need to reload base firmware if it matches */
+ if (priv->cur_fw.type & BASE) {
+ dprintk(1, "BASE firmware not changed.\n");
+ goto skip_base;
+ }
+
+ /* Updating BASE - forget about all currently loaded firmware */
+ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
+
+ /* Reset is needed before loading firmware */
+ rc = xc4000_tuner_reset(fe);
+ if (rc < 0)
+ goto fail;
+
+ /* BASE firmwares are all std0 */
+ std0 = 0;
+ rc = load_firmware(fe, BASE, &std0);
+ if (rc < 0) {
+ printk(KERN_ERR "Error %d while loading base firmware\n", rc);
+ goto fail;
+ }
+
+ /* Load INIT1, if needed */
+ dprintk(1, "Load init1 firmware, if exists\n");
+
+ rc = load_firmware(fe, BASE | INIT1, &std0);
+ if (rc == -ENOENT)
+ rc = load_firmware(fe, BASE | INIT1, &std0);
+ if (rc < 0 && rc != -ENOENT) {
+ tuner_err("Error %d while loading init1 firmware\n",
+ rc);
+ goto fail;
+ }
+
+skip_base:
+ /*
+ * No need to reload standard specific firmware if base firmware
+ * was not reloaded and requested video standards have not changed.
+ */
+ if (priv->cur_fw.type == (BASE | new_fw.type) &&
+ priv->cur_fw.std_req == std) {
+ dprintk(1, "Std-specific firmware already loaded.\n");
+ goto skip_std_specific;
+ }
+
+ /* Reloading std-specific firmware forces a SCODE update */
+ priv->cur_fw.scode_table = 0;
+
+ /* Load the standard firmware */
+ rc = load_firmware(fe, new_fw.type, &new_fw.id);
+
+ if (rc < 0)
+ goto fail;
+
+skip_std_specific:
+ if (priv->cur_fw.scode_table == new_fw.scode_table &&
+ priv->cur_fw.scode_nr == new_fw.scode_nr) {
+ dprintk(1, "SCODE firmware already loaded.\n");
+ goto check_device;
+ }
+
+ /* Load SCODE firmware, if exists */
+ rc = load_scode(fe, new_fw.type | new_fw.scode_table, &new_fw.id,
+ new_fw.int_freq, new_fw.scode_nr);
+ if (rc != 0)
+ dprintk(1, "load scode failed %d\n", rc);
+
+check_device:
+ rc = xc4000_readreg(priv, XREG_PRODUCT_ID, &hwmodel);
+
+ if (xc_get_version(priv, &hw_major, &hw_minor, &fw_major,
+ &fw_minor) != 0) {
+ printk(KERN_ERR "Unable to read tuner registers.\n");
+ goto fail;
+ }
+
+ dprintk(1, "Device is Xceive %d version %d.%d, "
+ "firmware version %d.%d\n",
+ hwmodel, hw_major, hw_minor, fw_major, fw_minor);
+
+ /* Check firmware version against what we downloaded. */
+ if (priv->firm_version != ((fw_major << 8) | fw_minor)) {
+ printk(KERN_WARNING
+ "Incorrect readback of firmware version %d.%d.\n",
+ fw_major, fw_minor);
+ goto fail;
+ }
+
+ /* Check that the tuner hardware model remains consistent over time. */
+ if (priv->hwmodel == 0 &&
+ (hwmodel == XC_PRODUCT_ID_XC4000 ||
+ hwmodel == XC_PRODUCT_ID_XC4100)) {
+ priv->hwmodel = hwmodel;
+ priv->hwvers = (hw_major << 8) | hw_minor;
+ } else if (priv->hwmodel == 0 || priv->hwmodel != hwmodel ||
+ priv->hwvers != ((hw_major << 8) | hw_minor)) {
+ printk(KERN_WARNING
+ "Read invalid device hardware information - tuner "
+ "hung?\n");
+ goto fail;
+ }
+
+ memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw));
+
+ /*
+ * By setting BASE in cur_fw.type only after successfully loading all
+ * firmwares, we can:
+ * 1. Identify that BASE firmware with type=0 has been loaded;
+ * 2. Tell whether BASE firmware was just changed the next time through.
+ */
+ priv->cur_fw.type |= BASE;
+
+ return 0;
+
+fail:
+ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
+ if (!is_retry) {
+ msleep(50);
+ is_retry = 1;
+ dprintk(1, "Retrying firmware load\n");
+ goto retry;
+ }
+
+ if (rc == -ENOENT)
+ rc = -EINVAL;
+ return rc;
+}
+
+static void xc_debug_dump(struct xc4000_priv *priv)
+{
+ u16 adc_envelope;
+ u32 freq_error_hz = 0;
+ u16 lock_status;
+ u32 hsync_freq_hz = 0;
+ u16 frame_lines;
+ u16 quality;
+ u8 hw_majorversion = 0, hw_minorversion = 0;
+ u8 fw_majorversion = 0, fw_minorversion = 0;
+
+ xc_get_adc_envelope(priv, &adc_envelope);
+ dprintk(1, "*** ADC envelope (0-1023) = %d\n", adc_envelope);
+
+ xc_get_frequency_error(priv, &freq_error_hz);
+ dprintk(1, "*** Frequency error = %d Hz\n", freq_error_hz);
+
+ xc_get_lock_status(priv, &lock_status);
+ dprintk(1, "*** Lock status (0-Wait, 1-Locked, 2-No-signal) = %d\n",
+ lock_status);
+
+ xc_get_version(priv, &hw_majorversion, &hw_minorversion,
+ &fw_majorversion, &fw_minorversion);
+ dprintk(1, "*** HW: V%02x.%02x, FW: V%02x.%02x\n",
+ hw_majorversion, hw_minorversion,
+ fw_majorversion, fw_minorversion);
+
+ if (priv->video_standard < XC4000_DTV6) {
+ xc_get_hsync_freq(priv, &hsync_freq_hz);
+ dprintk(1, "*** Horizontal sync frequency = %d Hz\n",
+ hsync_freq_hz);
+
+ xc_get_frame_lines(priv, &frame_lines);
+ dprintk(1, "*** Frame lines = %d\n", frame_lines);
+ }
+
+ xc_get_quality(priv, &quality);
+ dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
+}
+
+static int xc4000_set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ unsigned int type;
+ int ret = -EREMOTEIO;
+
+ dprintk(1, "%s() frequency=%d (Hz)\n", __func__, params->frequency);
+
+ mutex_lock(&priv->lock);
+
+ if (fe->ops.info.type == FE_ATSC) {
+ dprintk(1, "%s() ATSC\n", __func__);
+ switch (params->u.vsb.modulation) {
+ case VSB_8:
+ case VSB_16:
+ dprintk(1, "%s() VSB modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_AIR;
+ priv->freq_hz = params->frequency - 1750000;
+ priv->bandwidth = BANDWIDTH_6_MHZ;
+ priv->video_standard = XC4000_DTV6;
+ type = DTV6;
+ break;
+ case QAM_64:
+ case QAM_256:
+ case QAM_AUTO:
+ dprintk(1, "%s() QAM modulation\n", __func__);
+ priv->rf_mode = XC_RF_MODE_CABLE;
+ priv->freq_hz = params->frequency - 1750000;
+ priv->bandwidth = BANDWIDTH_6_MHZ;
+ priv->video_standard = XC4000_DTV6;
+ type = DTV6;
+ break;
+ default:
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else if (fe->ops.info.type == FE_OFDM) {
+ dprintk(1, "%s() OFDM\n", __func__);
+ switch (params->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ priv->bandwidth = BANDWIDTH_6_MHZ;
+ priv->video_standard = XC4000_DTV6;
+ priv->freq_hz = params->frequency - 1750000;
+ type = DTV6;
+ break;
+ case BANDWIDTH_7_MHZ:
+ priv->bandwidth = BANDWIDTH_7_MHZ;
+ priv->video_standard = XC4000_DTV7;
+ priv->freq_hz = params->frequency - 2250000;
+ type = DTV7;
+ break;
+ case BANDWIDTH_8_MHZ:
+ priv->bandwidth = BANDWIDTH_8_MHZ;
+ priv->video_standard = XC4000_DTV8;
+ priv->freq_hz = params->frequency - 2750000;
+ type = DTV8;
+ break;
+ case BANDWIDTH_AUTO:
+ if (params->frequency < 400000000) {
+ priv->bandwidth = BANDWIDTH_7_MHZ;
+ priv->freq_hz = params->frequency - 2250000;
+ } else {
+ priv->bandwidth = BANDWIDTH_8_MHZ;
+ priv->freq_hz = params->frequency - 2750000;
+ }
+ priv->video_standard = XC4000_DTV7_8;
+ type = DTV78;
+ break;
+ default:
+ printk(KERN_ERR "xc4000 bandwidth not set!\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ priv->rf_mode = XC_RF_MODE_AIR;
+ } else {
+ printk(KERN_ERR "xc4000 modulation type not supported!\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dprintk(1, "%s() frequency=%d (compensated)\n",
+ __func__, priv->freq_hz);
+
+ /* Make sure the correct firmware type is loaded */
+ if (check_firmware(fe, type, 0, priv->if_khz) != 0)
+ goto fail;
+
+ ret = xc_set_signal_source(priv, priv->rf_mode);
+ if (ret != 0) {
+ printk(KERN_ERR "xc4000: xc_set_signal_source(%d) failed\n",
+ priv->rf_mode);
+ goto fail;
+ } else {
+ u16 video_mode, audio_mode;
+ video_mode = xc4000_standard[priv->video_standard].video_mode;
+ audio_mode = xc4000_standard[priv->video_standard].audio_mode;
+ if (type == DTV6 && priv->firm_version != 0x0102)
+ video_mode |= 0x0001;
+ ret = xc_set_tv_standard(priv, video_mode, audio_mode);
+ if (ret != 0) {
+ printk(KERN_ERR "xc4000: xc_set_tv_standard failed\n");
+ /* DJH - do not return when it fails... */
+ /* goto fail; */
+ }
+ }
+
+ if (xc_write_reg(priv, XREG_D_CODE, 0) == 0)
+ ret = 0;
+ if (priv->dvb_amplitude != 0) {
+ if (xc_write_reg(priv, XREG_AMPLITUDE,
+ (priv->firm_version != 0x0102 ||
+ priv->dvb_amplitude != 134 ?
+ priv->dvb_amplitude : 132)) != 0)
+ ret = -EREMOTEIO;
+ }
+ if (priv->set_smoothedcvbs != 0) {
+ if (xc_write_reg(priv, XREG_SMOOTHEDCVBS, 1) != 0)
+ ret = -EREMOTEIO;
+ }
+ if (ret != 0) {
+ printk(KERN_ERR "xc4000: setting registers failed\n");
+ /* goto fail; */
+ }
+
+ xc_tune_channel(priv, priv->freq_hz);
+
+ ret = 0;
+
+fail:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int xc4000_set_analog_params(struct dvb_frontend *fe,
+ struct analog_parameters *params)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ unsigned int type = 0;
+ int ret = -EREMOTEIO;
+
+ if (params->mode == V4L2_TUNER_RADIO) {
+ dprintk(1, "%s() frequency=%d (in units of 62.5Hz)\n",
+ __func__, params->frequency);
+
+ mutex_lock(&priv->lock);
+
+ params->std = 0;
+ priv->freq_hz = params->frequency * 125L / 2;
+
+ if (audio_std & XC4000_AUDIO_STD_INPUT1) {
+ priv->video_standard = XC4000_FM_Radio_INPUT1;
+ type = FM | INPUT1;
+ } else {
+ priv->video_standard = XC4000_FM_Radio_INPUT2;
+ type = FM | INPUT2;
+ }
+
+ goto tune_channel;
+ }
+
+ dprintk(1, "%s() frequency=%d (in units of 62.5khz)\n",
+ __func__, params->frequency);
+
+ mutex_lock(&priv->lock);
+
+ /* params->frequency is in units of 62.5khz */
+ priv->freq_hz = params->frequency * 62500;
+
+ params->std &= V4L2_STD_ALL;
+ /* if std is not defined, choose one */
+ if (!params->std)
+ params->std = V4L2_STD_PAL_BG;
+
+ if (audio_std & XC4000_AUDIO_STD_MONO)
+ type = MONO;
+
+ if (params->std & V4L2_STD_MN) {
+ params->std = V4L2_STD_MN;
+ if (audio_std & XC4000_AUDIO_STD_MONO) {
+ priv->video_standard = XC4000_MN_NTSC_PAL_Mono;
+ } else if (audio_std & XC4000_AUDIO_STD_A2) {
+ params->std |= V4L2_STD_A2;
+ priv->video_standard = XC4000_MN_NTSC_PAL_A2;
+ } else {
+ params->std |= V4L2_STD_BTSC;
+ priv->video_standard = XC4000_MN_NTSC_PAL_BTSC;
+ }
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_PAL_BG) {
+ params->std = V4L2_STD_PAL_BG;
+ if (audio_std & XC4000_AUDIO_STD_MONO) {
+ priv->video_standard = XC4000_BG_PAL_MONO;
+ } else if (!(audio_std & XC4000_AUDIO_STD_A2)) {
+ if (!(audio_std & XC4000_AUDIO_STD_B)) {
+ params->std |= V4L2_STD_NICAM_A;
+ priv->video_standard = XC4000_BG_PAL_NICAM;
+ } else {
+ params->std |= V4L2_STD_NICAM_B;
+ priv->video_standard = XC4000_BG_PAL_NICAM;
+ }
+ } else {
+ if (!(audio_std & XC4000_AUDIO_STD_B)) {
+ params->std |= V4L2_STD_A2_A;
+ priv->video_standard = XC4000_BG_PAL_A2;
+ } else {
+ params->std |= V4L2_STD_A2_B;
+ priv->video_standard = XC4000_BG_PAL_A2;
+ }
+ }
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_PAL_I) {
+ /* default to NICAM audio standard */
+ params->std = V4L2_STD_PAL_I | V4L2_STD_NICAM;
+ if (audio_std & XC4000_AUDIO_STD_MONO)
+ priv->video_standard = XC4000_I_PAL_NICAM_MONO;
+ else
+ priv->video_standard = XC4000_I_PAL_NICAM;
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_PAL_DK) {
+ params->std = V4L2_STD_PAL_DK;
+ if (audio_std & XC4000_AUDIO_STD_MONO) {
+ priv->video_standard = XC4000_DK_PAL_MONO;
+ } else if (audio_std & XC4000_AUDIO_STD_A2) {
+ params->std |= V4L2_STD_A2;
+ priv->video_standard = XC4000_DK_PAL_A2;
+ } else {
+ params->std |= V4L2_STD_NICAM;
+ priv->video_standard = XC4000_DK_PAL_NICAM;
+ }
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_SECAM_DK) {
+ /* default to A2 audio standard */
+ params->std = V4L2_STD_SECAM_DK | V4L2_STD_A2;
+ if (audio_std & XC4000_AUDIO_STD_L) {
+ type = 0;
+ priv->video_standard = XC4000_DK_SECAM_NICAM;
+ } else if (audio_std & XC4000_AUDIO_STD_MONO) {
+ priv->video_standard = XC4000_DK_SECAM_A2MONO;
+ } else if (audio_std & XC4000_AUDIO_STD_K3) {
+ params->std |= V4L2_STD_SECAM_K3;
+ priv->video_standard = XC4000_DK_SECAM_A2LDK3;
+ } else {
+ priv->video_standard = XC4000_DK_SECAM_A2DK1;
+ }
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_SECAM_L) {
+ /* default to NICAM audio standard */
+ type = 0;
+ params->std = V4L2_STD_SECAM_L | V4L2_STD_NICAM;
+ priv->video_standard = XC4000_L_SECAM_NICAM;
+ goto tune_channel;
+ }
+
+ if (params->std & V4L2_STD_SECAM_LC) {
+ /* default to NICAM audio standard */
+ type = 0;
+ params->std = V4L2_STD_SECAM_LC | V4L2_STD_NICAM;
+ priv->video_standard = XC4000_LC_SECAM_NICAM;
+ goto tune_channel;
+ }
+
+tune_channel:
+ /* FIXME: it could be air. */
+ priv->rf_mode = XC_RF_MODE_CABLE;
+
+ if (check_firmware(fe, type, params->std,
+ xc4000_standard[priv->video_standard].int_freq) != 0)
+ goto fail;
+
+ ret = xc_set_signal_source(priv, priv->rf_mode);
+ if (ret != 0) {
+ printk(KERN_ERR
+ "xc4000: xc_set_signal_source(%d) failed\n",
+ priv->rf_mode);
+ goto fail;
+ } else {
+ u16 video_mode, audio_mode;
+ video_mode = xc4000_standard[priv->video_standard].video_mode;
+ audio_mode = xc4000_standard[priv->video_standard].audio_mode;
+ if (priv->video_standard < XC4000_BG_PAL_A2) {
+ if (type & NOGD)
+ video_mode &= 0xFF7F;
+ } else if (priv->video_standard < XC4000_I_PAL_NICAM) {
+ if (priv->firm_version == 0x0102)
+ video_mode &= 0xFEFF;
+ if (audio_std & XC4000_AUDIO_STD_B)
+ video_mode |= 0x0080;
+ }
+ ret = xc_set_tv_standard(priv, video_mode, audio_mode);
+ if (ret != 0) {
+ printk(KERN_ERR "xc4000: xc_set_tv_standard failed\n");
+ goto fail;
+ }
+ }
+
+ if (xc_write_reg(priv, XREG_D_CODE, 0) == 0)
+ ret = 0;
+ if (xc_write_reg(priv, XREG_AMPLITUDE, 1) != 0)
+ ret = -EREMOTEIO;
+ if (priv->set_smoothedcvbs != 0) {
+ if (xc_write_reg(priv, XREG_SMOOTHEDCVBS, 1) != 0)
+ ret = -EREMOTEIO;
+ }
+ if (ret != 0) {
+ printk(KERN_ERR "xc4000: setting registers failed\n");
+ goto fail;
+ }
+
+ xc_tune_channel(priv, priv->freq_hz);
+
+ ret = 0;
+
+fail:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+
+ *freq = priv->freq_hz;
+
+ if (debug) {
+ mutex_lock(&priv->lock);
+ if ((priv->cur_fw.type
+ & (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) {
+ u16 snr = 0;
+ if (xc4000_readreg(priv, XREG_SNR, &snr) == 0) {
+ mutex_unlock(&priv->lock);
+ dprintk(1, "%s() freq = %u, SNR = %d\n",
+ __func__, *freq, snr);
+ return 0;
+ }
+ }
+ mutex_unlock(&priv->lock);
+ }
+
+ dprintk(1, "%s()\n", __func__);
+
+ return 0;
+}
+
+static int xc4000_get_bandwidth(struct dvb_frontend *fe, u32 *bw)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ dprintk(1, "%s()\n", __func__);
+
+ *bw = priv->bandwidth;
+ return 0;
+}
+
+static int xc4000_get_status(struct dvb_frontend *fe, u32 *status)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ u16 lock_status = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->cur_fw.type & BASE)
+ xc_get_lock_status(priv, &lock_status);
+
+ *status = (lock_status == 1 ?
+ TUNER_STATUS_LOCKED | TUNER_STATUS_STEREO : 0);
+ if (priv->cur_fw.type & (DTV6 | DTV7 | DTV78 | DTV8))
+ *status &= (~TUNER_STATUS_STEREO);
+
+ mutex_unlock(&priv->lock);
+
+ dprintk(2, "%s() lock_status = %d\n", __func__, lock_status);
+
+ return 0;
+}
+
+static int xc4000_sleep(struct dvb_frontend *fe)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+ int ret = 0;
+
+ dprintk(1, "%s()\n", __func__);
+
+ mutex_lock(&priv->lock);
+
+ /* Avoid firmware reload on slow devices */
+ if ((no_poweroff == 2 ||
+ (no_poweroff == 0 && priv->default_pm != 0)) &&
+ (priv->cur_fw.type & BASE) != 0) {
+ /* force reset and firmware reload */
+ priv->cur_fw.type = XC_POWERED_DOWN;
+
+ if (xc_write_reg(priv, XREG_POWER_DOWN, 0) != 0) {
+ printk(KERN_ERR
+ "xc4000: %s() unable to shutdown tuner\n",
+ __func__);
+ ret = -EREMOTEIO;
+ }
+ msleep(20);
+ }
+
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int xc4000_init(struct dvb_frontend *fe)
+{
+ dprintk(1, "%s()\n", __func__);
+
+ return 0;
+}
+
+static int xc4000_release(struct dvb_frontend *fe)
+{
+ struct xc4000_priv *priv = fe->tuner_priv;
+
+ dprintk(1, "%s()\n", __func__);
+
+ mutex_lock(&xc4000_list_mutex);
+
+ if (priv)
+ hybrid_tuner_release_state(priv);
+
+ mutex_unlock(&xc4000_list_mutex);
+
+ fe->tuner_priv = NULL;
+
+ return 0;
+}
+
+static const struct dvb_tuner_ops xc4000_tuner_ops = {
+ .info = {
+ .name = "Xceive XC4000",
+ .frequency_min = 1000000,
+ .frequency_max = 1023000000,
+ .frequency_step = 50000,
+ },
+
+ .release = xc4000_release,
+ .init = xc4000_init,
+ .sleep = xc4000_sleep,
+
+ .set_params = xc4000_set_params,
+ .set_analog_params = xc4000_set_analog_params,
+ .get_frequency = xc4000_get_frequency,
+ .get_bandwidth = xc4000_get_bandwidth,
+ .get_status = xc4000_get_status
+};
+
+struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct xc4000_config *cfg)
+{
+ struct xc4000_priv *priv = NULL;
+ int instance;
+ u16 id = 0;
+
+ dprintk(1, "%s(%d-%04x)\n", __func__,
+ i2c ? i2c_adapter_id(i2c) : -1,
+ cfg ? cfg->i2c_address : -1);
+
+ mutex_lock(&xc4000_list_mutex);
+
+ instance = hybrid_tuner_request_state(struct xc4000_priv, priv,
+ hybrid_tuner_instance_list,
+ i2c, cfg->i2c_address, "xc4000");
+ switch (instance) {
+ case 0:
+ goto fail;
+ break;
+ case 1:
+ /* new tuner instance */
+ priv->bandwidth = BANDWIDTH_6_MHZ;
+ /* set default configuration */
+ priv->if_khz = 4560;
+ priv->default_pm = 0;
+ priv->dvb_amplitude = 134;
+ priv->set_smoothedcvbs = 1;
+ mutex_init(&priv->lock);
+ fe->tuner_priv = priv;
+ break;
+ default:
+ /* existing tuner instance */
+ fe->tuner_priv = priv;
+ break;
+ }
+
+ if (cfg->if_khz != 0) {
+ /* copy configuration if provided by the caller */
+ priv->if_khz = cfg->if_khz;
+ priv->default_pm = cfg->default_pm;
+ priv->dvb_amplitude = cfg->dvb_amplitude;
+ priv->set_smoothedcvbs = cfg->set_smoothedcvbs;
+ }
+
+ /* Check if firmware has been loaded. It is possible that another
+ instance of the driver has loaded the firmware.
+ */
+
+ if (instance == 1) {
+ if (xc4000_readreg(priv, XREG_PRODUCT_ID, &id) != 0)
+ goto fail;
+ } else {
+ id = ((priv->cur_fw.type & BASE) != 0 ?
+ priv->hwmodel : XC_PRODUCT_ID_FW_NOT_LOADED);
+ }
+
+ switch (id) {
+ case XC_PRODUCT_ID_XC4000:
+ case XC_PRODUCT_ID_XC4100:
+ printk(KERN_INFO
+ "xc4000: Successfully identified at address 0x%02x\n",
+ cfg->i2c_address);
+ printk(KERN_INFO
+ "xc4000: Firmware has been loaded previously\n");
+ break;
+ case XC_PRODUCT_ID_FW_NOT_LOADED:
+ printk(KERN_INFO
+ "xc4000: Successfully identified at address 0x%02x\n",
+ cfg->i2c_address);
+ printk(KERN_INFO
+ "xc4000: Firmware has not been loaded previously\n");
+ break;
+ default:
+ printk(KERN_ERR
+ "xc4000: Device not found at addr 0x%02x (0x%x)\n",
+ cfg->i2c_address, id);
+ goto fail;
+ }
+
+ mutex_unlock(&xc4000_list_mutex);
+
+ memcpy(&fe->ops.tuner_ops, &xc4000_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+
+ if (instance == 1) {
+ int ret;
+ mutex_lock(&priv->lock);
+ ret = xc4000_fwupload(fe);
+ mutex_unlock(&priv->lock);
+ if (ret != 0)
+ goto fail2;
+ }
+
+ return fe;
+fail:
+ mutex_unlock(&xc4000_list_mutex);
+fail2:
+ xc4000_release(fe);
+ return NULL;
+}
+EXPORT_SYMBOL(xc4000_attach);
+
+MODULE_AUTHOR("Steven Toth, Davide Ferri");
+MODULE_DESCRIPTION("Xceive xc4000 silicon tuner driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/tuners/xc4000.h b/drivers/media/common/tuners/xc4000.h
new file mode 100644
index 00000000000..e6a44d151cb
--- /dev/null
+++ b/drivers/media/common/tuners/xc4000.h
@@ -0,0 +1,67 @@
+/*
+ * Driver for Xceive XC4000 "QAM/8VSB single chip tuner"
+ *
+ * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __XC4000_H__
+#define __XC4000_H__
+
+#include <linux/firmware.h>
+
+struct dvb_frontend;
+struct i2c_adapter;
+
+struct xc4000_config {
+ u8 i2c_address;
+ /* if non-zero, power management is enabled by default */
+ u8 default_pm;
+ /* value to be written to XREG_AMPLITUDE in DVB-T mode (0: no write) */
+ u8 dvb_amplitude;
+ /* if non-zero, register 0x0E is set to filter analog TV video output */
+ u8 set_smoothedcvbs;
+ /* IF for DVB-T */
+ u32 if_khz;
+};
+
+/* xc4000 callback command */
+#define XC4000_TUNER_RESET 0
+
+/* For each bridge framework, when it attaches either analog or digital,
+ * it has to store a reference back to its _core equivalent structure,
+ * so that it can service the hardware by steering gpio's etc.
+ * Each bridge implementation is different so cast devptr accordingly.
+ * The xc4000 driver cares not for this value, other than ensuring
+ * it's passed back to a bridge during tuner_callback().
+ */
+
+#if defined(CONFIG_MEDIA_TUNER_XC4000) || (defined(CONFIG_MEDIA_TUNER_XC4000_MODULE) && defined(MODULE))
+extern struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct xc4000_config *cfg);
+#else
+static inline struct dvb_frontend *xc4000_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ struct xc4000_config *cfg)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb/Kconfig b/drivers/media/dvb/Kconfig
index ee214c3b63d..f6e40b3a44c 100644
--- a/drivers/media/dvb/Kconfig
+++ b/drivers/media/dvb/Kconfig
@@ -80,6 +80,10 @@ comment "Supported nGene Adapters"
depends on DVB_CORE && PCI && I2C
source "drivers/media/dvb/ngene/Kconfig"
+comment "Supported ddbridge ('Octopus') Adapters"
+ depends on DVB_CORE && PCI && I2C
+ source "drivers/media/dvb/ddbridge/Kconfig"
+
comment "Supported DVB Frontends"
depends on DVB_CORE
source "drivers/media/dvb/frontends/Kconfig"
diff --git a/drivers/media/dvb/Makefile b/drivers/media/dvb/Makefile
index a1a08758a6f..b2cefe637a6 100644
--- a/drivers/media/dvb/Makefile
+++ b/drivers/media/dvb/Makefile
@@ -15,6 +15,7 @@ obj-y := dvb-core/ \
dm1105/ \
pt1/ \
mantis/ \
- ngene/
+ ngene/ \
+ ddbridge/
obj-$(CONFIG_DVB_FIREDTV) += firewire/
diff --git a/drivers/media/dvb/b2c2/flexcop-common.h b/drivers/media/dvb/b2c2/flexcop-common.h
index 9e2148a1996..437912e4982 100644
--- a/drivers/media/dvb/b2c2/flexcop-common.h
+++ b/drivers/media/dvb/b2c2/flexcop-common.h
@@ -6,6 +6,7 @@
#ifndef __FLEXCOP_COMMON_H__
#define __FLEXCOP_COMMON_H__
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/mutex.h>
diff --git a/drivers/media/dvb/bt8xx/dvb-bt8xx.c b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
index 1e1106dcd06..521d6910498 100644
--- a/drivers/media/dvb/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/dvb/bt8xx/dvb-bt8xx.c
@@ -892,7 +892,7 @@ static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
if (!(bttv_pci_dev = bttv_get_pcidev(card->bttv_nr))) {
printk("dvb_bt8xx: no pci device for card %d\n", card->bttv_nr);
kfree(card);
- return -EFAULT;
+ return -ENODEV;
}
if (!(card->bt = dvb_bt8xx_878_match(card->bttv_nr, bttv_pci_dev))) {
@@ -902,7 +902,7 @@ static int __devinit dvb_bt8xx_probe(struct bttv_sub_device *sub)
"installed, try removing it.\n");
kfree(card);
- return -EFAULT;
+ return -ENODEV;
}
mutex_init(&card->bt->gpio_lock);
diff --git a/drivers/media/dvb/ddbridge/Kconfig b/drivers/media/dvb/ddbridge/Kconfig
new file mode 100644
index 00000000000..d099e1a12c8
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/Kconfig
@@ -0,0 +1,18 @@
+config DVB_DDBRIDGE
+ tristate "Digital Devices bridge support"
+ depends on DVB_CORE && PCI && I2C
+ select DVB_LNBP21 if !DVB_FE_CUSTOMISE
+ select DVB_STV6110x if !DVB_FE_CUSTOMISE
+ select DVB_STV090x if !DVB_FE_CUSTOMISE
+ select DVB_DRXK if !DVB_FE_CUSTOMISE
+ select DVB_TDA18271C2DD if !DVB_FE_CUSTOMISE
+ ---help---
+ Support for cards with the Digital Devices PCI express bridge:
+ - Octopus PCIe Bridge
+ - Octopus mini PCIe Bridge
+ - Octopus LE
+ - DuoFlex S2 Octopus
+ - DuoFlex CT Octopus
+ - cineS2(v6)
+
+ Say Y if you own such a card and want to use it.
diff --git a/drivers/media/dvb/ddbridge/Makefile b/drivers/media/dvb/ddbridge/Makefile
new file mode 100644
index 00000000000..de4fe193c3e
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the ddbridge device driver
+#
+
+ddbridge-objs := ddbridge-core.o
+
+obj-$(CONFIG_DVB_DDBRIDGE) += ddbridge.o
+
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
+EXTRA_CFLAGS += -Idrivers/media/dvb/frontends/
+EXTRA_CFLAGS += -Idrivers/media/common/tuners/
+
+# For the staging CI driver cxd2099
+EXTRA_CFLAGS += -Idrivers/staging/cxd2099/
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
new file mode 100644
index 00000000000..573d540f213
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
@@ -0,0 +1,1719 @@
+/*
+ * ddbridge.c: Digital Devices PCIe bridge driver
+ *
+ * Copyright (C) 2010-2011 Digital Devices GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/timer.h>
+#include <linux/version.h>
+#include <linux/i2c.h>
+#include <linux/swab.h>
+#include <linux/vmalloc.h>
+#include "ddbridge.h"
+
+#include "ddbridge-regs.h"
+
+#include "tda18271c2dd.h"
+#include "stv6110x.h"
+#include "stv090x.h"
+#include "lnbh24.h"
+#include "drxk.h"
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+/* MSI had problems with lost interrupts, fixed but needs testing */
+#undef CONFIG_PCI_MSI
+
+/******************************************************************************/
+
+static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
+{
+ struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1 } };
+ return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
+}
+
+static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr, u8 reg, u8 *val)
+{
+ struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
+ .buf = &reg, .len = 1 },
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1 } };
+ return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
+}
+
+static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
+ u16 reg, u8 *val)
+{
+ u8 msg[2] = {reg>>8, reg&0xff};
+ struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
+ .buf = msg, .len = 2},
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1} };
+ return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
+}
+
+static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
+{
+ struct ddb *dev = i2c->dev;
+ int stat;
+ u32 val;
+
+ i2c->done = 0;
+ ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND);
+ stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ);
+ if (stat <= 0) {
+ printk(KERN_ERR "I2C timeout\n");
+ { /* MSI debugging*/
+ u32 istat = ddbreadl(INTERRUPT_STATUS);
+ printk(KERN_ERR "IRS %08x\n", istat);
+ ddbwritel(istat, INTERRUPT_ACK);
+ }
+ return -EIO;
+ }
+ val = ddbreadl(i2c->regs+I2C_COMMAND);
+ if (val & 0x70000)
+ return -EIO;
+ return 0;
+}
+
+static int ddb_i2c_master_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg msg[], int num)
+{
+ struct ddb_i2c *i2c = (struct ddb_i2c *)i2c_get_adapdata(adapter);
+ struct ddb *dev = i2c->dev;
+ u8 addr = 0;
+
+ if (num)
+ addr = msg[0].addr;
+
+ if (num == 2 && msg[1].flags & I2C_M_RD &&
+ !(msg[0].flags & I2C_M_RD)) {
+ memcpy_toio(dev->regs + I2C_TASKMEM_BASE + i2c->wbuf,
+ msg[0].buf, msg[0].len);
+ ddbwritel(msg[0].len|(msg[1].len << 16),
+ i2c->regs+I2C_TASKLENGTH);
+ if (!ddb_i2c_cmd(i2c, addr, 1)) {
+ memcpy_fromio(msg[1].buf,
+ dev->regs + I2C_TASKMEM_BASE + i2c->rbuf,
+ msg[1].len);
+ return num;
+ }
+ }
+
+ if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
+ ddbcpyto(I2C_TASKMEM_BASE + i2c->wbuf, msg[0].buf, msg[0].len);
+ ddbwritel(msg[0].len, i2c->regs + I2C_TASKLENGTH);
+ if (!ddb_i2c_cmd(i2c, addr, 2))
+ return num;
+ }
+ if (num == 1 && (msg[0].flags & I2C_M_RD)) {
+ ddbwritel(msg[0].len << 16, i2c->regs + I2C_TASKLENGTH);
+ if (!ddb_i2c_cmd(i2c, addr, 3)) {
+ ddbcpyfrom(msg[0].buf,
+ I2C_TASKMEM_BASE + i2c->rbuf, msg[0].len);
+ return num;
+ }
+ }
+ return -EIO;
+}
+
+
+static u32 ddb_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_EMUL;
+}
+
+struct i2c_algorithm ddb_i2c_algo = {
+ .master_xfer = ddb_i2c_master_xfer,
+ .functionality = ddb_i2c_functionality,
+};
+
+static void ddb_i2c_release(struct ddb *dev)
+{
+ int i;
+ struct ddb_i2c *i2c;
+ struct i2c_adapter *adap;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ i2c = &dev->i2c[i];
+ adap = &i2c->adap;
+ i2c_del_adapter(adap);
+ }
+}
+
+static int ddb_i2c_init(struct ddb *dev)
+{
+ int i, j, stat = 0;
+ struct ddb_i2c *i2c;
+ struct i2c_adapter *adap;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ i2c = &dev->i2c[i];
+ i2c->dev = dev;
+ i2c->nr = i;
+ i2c->wbuf = i * (I2C_TASKMEM_SIZE / 4);
+ i2c->rbuf = i2c->wbuf + (I2C_TASKMEM_SIZE / 8);
+ i2c->regs = 0x80 + i * 0x20;
+ ddbwritel(I2C_SPEED_100, i2c->regs + I2C_TIMING);
+ ddbwritel((i2c->rbuf << 16) | i2c->wbuf,
+ i2c->regs + I2C_TASKADDRESS);
+ init_waitqueue_head(&i2c->wq);
+
+ adap = &i2c->adap;
+ i2c_set_adapdata(adap, i2c);
+#ifdef I2C_ADAP_CLASS_TV_DIGITAL
+ adap->class = I2C_ADAP_CLASS_TV_DIGITAL|I2C_CLASS_TV_ANALOG;
+#else
+#ifdef I2C_CLASS_TV_ANALOG
+ adap->class = I2C_CLASS_TV_ANALOG;
+#endif
+#endif
+ strcpy(adap->name, "ddbridge");
+ adap->algo = &ddb_i2c_algo;
+ adap->algo_data = (void *)i2c;
+ adap->dev.parent = &dev->pdev->dev;
+ stat = i2c_add_adapter(adap);
+ if (stat)
+ break;
+ }
+ if (stat)
+ for (j = 0; j < i; j++) {
+ i2c = &dev->i2c[j];
+ adap = &i2c->adap;
+ i2c_del_adapter(adap);
+ }
+ return stat;
+}
+
+
+/******************************************************************************/
+/******************************************************************************/
+/******************************************************************************/
+
+#if 0
+static void set_table(struct ddb *dev, u32 off,
+ dma_addr_t *pbuf, u32 num)
+{
+ u32 i, base;
+ u64 mem;
+
+ base = DMA_BASE_ADDRESS_TABLE + off;
+ for (i = 0; i < num; i++) {
+ mem = pbuf[i];
+ ddbwritel(mem & 0xffffffff, base + i * 8);
+ ddbwritel(mem >> 32, base + i * 8 + 4);
+ }
+}
+#endif
+
+static void ddb_address_table(struct ddb *dev)
+{
+ u32 i, j, base;
+ u64 mem;
+ dma_addr_t *pbuf;
+
+ for (i = 0; i < dev->info->port_num * 2; i++) {
+ base = DMA_BASE_ADDRESS_TABLE + i * 0x100;
+ pbuf = dev->input[i].pbuf;
+ for (j = 0; j < dev->input[i].dma_buf_num; j++) {
+ mem = pbuf[j];
+ ddbwritel(mem & 0xffffffff, base + j * 8);
+ ddbwritel(mem >> 32, base + j * 8 + 4);
+ }
+ }
+ for (i = 0; i < dev->info->port_num; i++) {
+ base = DMA_BASE_ADDRESS_TABLE + 0x800 + i * 0x100;
+ pbuf = dev->output[i].pbuf;
+ for (j = 0; j < dev->output[i].dma_buf_num; j++) {
+ mem = pbuf[j];
+ ddbwritel(mem & 0xffffffff, base + j * 8);
+ ddbwritel(mem >> 32, base + j * 8 + 4);
+ }
+ }
+}
+
+static void io_free(struct pci_dev *pdev, u8 **vbuf,
+ dma_addr_t *pbuf, u32 size, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ if (vbuf[i]) {
+ pci_free_consistent(pdev, size, vbuf[i], pbuf[i]);
+ vbuf[i] = 0;
+ }
+ }
+}
+
+static int io_alloc(struct pci_dev *pdev, u8 **vbuf,
+ dma_addr_t *pbuf, u32 size, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ vbuf[i] = pci_alloc_consistent(pdev, size, &pbuf[i]);
+ if (!vbuf[i])
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int ddb_buffers_alloc(struct ddb *dev)
+{
+ int i;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ switch (port->class) {
+ case DDB_PORT_TUNER:
+ if (io_alloc(dev->pdev, port->input[0]->vbuf,
+ port->input[0]->pbuf,
+ port->input[0]->dma_buf_size,
+ port->input[0]->dma_buf_num) < 0)
+ return -1;
+ if (io_alloc(dev->pdev, port->input[1]->vbuf,
+ port->input[1]->pbuf,
+ port->input[1]->dma_buf_size,
+ port->input[1]->dma_buf_num) < 0)
+ return -1;
+ break;
+ case DDB_PORT_CI:
+ if (io_alloc(dev->pdev, port->input[0]->vbuf,
+ port->input[0]->pbuf,
+ port->input[0]->dma_buf_size,
+ port->input[0]->dma_buf_num) < 0)
+ return -1;
+ if (io_alloc(dev->pdev, port->output->vbuf,
+ port->output->pbuf,
+ port->output->dma_buf_size,
+ port->output->dma_buf_num) < 0)
+ return -1;
+ break;
+ default:
+ break;
+ }
+ }
+ ddb_address_table(dev);
+ return 0;
+}
+
+static void ddb_buffers_free(struct ddb *dev)
+{
+ int i;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ io_free(dev->pdev, port->input[0]->vbuf,
+ port->input[0]->pbuf,
+ port->input[0]->dma_buf_size,
+ port->input[0]->dma_buf_num);
+ io_free(dev->pdev, port->input[1]->vbuf,
+ port->input[1]->pbuf,
+ port->input[1]->dma_buf_size,
+ port->input[1]->dma_buf_num);
+ io_free(dev->pdev, port->output->vbuf,
+ port->output->pbuf,
+ port->output->dma_buf_size,
+ port->output->dma_buf_num);
+ }
+}
+
+static void ddb_input_start(struct ddb_input *input)
+{
+ struct ddb *dev = input->port->dev;
+
+ spin_lock_irq(&input->lock);
+ input->cbuf = 0;
+ input->coff = 0;
+
+ /* reset */
+ ddbwritel(0, TS_INPUT_CONTROL(input->nr));
+ ddbwritel(2, TS_INPUT_CONTROL(input->nr));
+ ddbwritel(0, TS_INPUT_CONTROL(input->nr));
+
+ ddbwritel((1 << 16) |
+ (input->dma_buf_num << 11) |
+ (input->dma_buf_size >> 7),
+ DMA_BUFFER_SIZE(input->nr));
+ ddbwritel(0, DMA_BUFFER_ACK(input->nr));
+
+ ddbwritel(1, DMA_BASE_WRITE);
+ ddbwritel(3, DMA_BUFFER_CONTROL(input->nr));
+ ddbwritel(9, TS_INPUT_CONTROL(input->nr));
+ input->running = 1;
+ spin_unlock_irq(&input->lock);
+}
+
+static void ddb_input_stop(struct ddb_input *input)
+{
+ struct ddb *dev = input->port->dev;
+
+ spin_lock_irq(&input->lock);
+ ddbwritel(0, TS_INPUT_CONTROL(input->nr));
+ ddbwritel(0, DMA_BUFFER_CONTROL(input->nr));
+ input->running = 0;
+ spin_unlock_irq(&input->lock);
+}
+
+static void ddb_output_start(struct ddb_output *output)
+{
+ struct ddb *dev = output->port->dev;
+
+ spin_lock_irq(&output->lock);
+ output->cbuf = 0;
+ output->coff = 0;
+ ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
+ ddbwritel(2, TS_OUTPUT_CONTROL(output->nr));
+ ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
+ ddbwritel(0x3c, TS_OUTPUT_CONTROL(output->nr));
+ ddbwritel((1 << 16) |
+ (output->dma_buf_num << 11) |
+ (output->dma_buf_size >> 7),
+ DMA_BUFFER_SIZE(output->nr + 8));
+ ddbwritel(0, DMA_BUFFER_ACK(output->nr + 8));
+
+ ddbwritel(1, DMA_BASE_READ);
+ ddbwritel(3, DMA_BUFFER_CONTROL(output->nr + 8));
+ /* ddbwritel(0xbd, TS_OUTPUT_CONTROL(output->nr)); */
+ ddbwritel(0x1d, TS_OUTPUT_CONTROL(output->nr));
+ output->running = 1;
+ spin_unlock_irq(&output->lock);
+}
+
+static void ddb_output_stop(struct ddb_output *output)
+{
+ struct ddb *dev = output->port->dev;
+
+ spin_lock_irq(&output->lock);
+ ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
+ ddbwritel(0, DMA_BUFFER_CONTROL(output->nr + 8));
+ output->running = 0;
+ spin_unlock_irq(&output->lock);
+}
+
+static u32 ddb_output_free(struct ddb_output *output)
+{
+ u32 idx, off, stat = output->stat;
+ s32 diff;
+
+ idx = (stat >> 11) & 0x1f;
+ off = (stat & 0x7ff) << 7;
+
+ if (output->cbuf != idx) {
+ if ((((output->cbuf + 1) % output->dma_buf_num) == idx) &&
+ (output->dma_buf_size - output->coff <= 188))
+ return 0;
+ return 188;
+ }
+ diff = off - output->coff;
+ if (diff <= 0 || diff > 188)
+ return 188;
+ return 0;
+}
+
+static ssize_t ddb_output_write(struct ddb_output *output,
+ const u8 *buf, size_t count)
+{
+ struct ddb *dev = output->port->dev;
+ u32 idx, off, stat = output->stat;
+ u32 left = count, len;
+
+ idx = (stat >> 11) & 0x1f;
+ off = (stat & 0x7ff) << 7;
+
+ while (left) {
+ len = output->dma_buf_size - output->coff;
+ if ((((output->cbuf + 1) % output->dma_buf_num) == idx) &&
+ (off == 0)) {
+ if (len <= 188)
+ break;
+ len -= 188;
+ }
+ if (output->cbuf == idx) {
+ if (off > output->coff) {
+#if 1
+ len = off - output->coff;
+ len -= (len % 188);
+ if (len <= 188)
+
+#endif
+ break;
+ len -= 188;
+ }
+ }
+ if (len > left)
+ len = left;
+ if (copy_from_user(output->vbuf[output->cbuf] + output->coff,
+ buf, len))
+ return -EIO;
+ left -= len;
+ buf += len;
+ output->coff += len;
+ if (output->coff == output->dma_buf_size) {
+ output->coff = 0;
+ output->cbuf = ((output->cbuf + 1) % output->dma_buf_num);
+ }
+ ddbwritel((output->cbuf << 11) | (output->coff >> 7),
+ DMA_BUFFER_ACK(output->nr + 8));
+ }
+ return count - left;
+}
+
+static u32 ddb_input_avail(struct ddb_input *input)
+{
+ struct ddb *dev = input->port->dev;
+ u32 idx, off, stat = input->stat;
+ u32 ctrl = ddbreadl(DMA_BUFFER_CONTROL(input->nr));
+
+ idx = (stat >> 11) & 0x1f;
+ off = (stat & 0x7ff) << 7;
+
+ if (ctrl & 4) {
+ printk(KERN_ERR "IA %d %d %08x\n", idx, off, ctrl);
+ ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr));
+ return 0;
+ }
+ if (input->cbuf != idx)
+ return 188;
+ return 0;
+}
+
+static size_t ddb_input_read(struct ddb_input *input, u8 *buf, size_t count)
+{
+ struct ddb *dev = input->port->dev;
+ u32 left = count;
+ u32 idx, off, free, stat = input->stat;
+ int ret;
+
+ idx = (stat >> 11) & 0x1f;
+ off = (stat & 0x7ff) << 7;
+
+ while (left) {
+ if (input->cbuf == idx)
+ return count - left;
+ free = input->dma_buf_size - input->coff;
+ if (free > left)
+ free = left;
+ ret = copy_to_user(buf, input->vbuf[input->cbuf] +
+ input->coff, free);
+ input->coff += free;
+ if (input->coff == input->dma_buf_size) {
+ input->coff = 0;
+ input->cbuf = (input->cbuf+1) % input->dma_buf_num;
+ }
+ left -= free;
+ ddbwritel((input->cbuf << 11) | (input->coff >> 7),
+ DMA_BUFFER_ACK(input->nr));
+ }
+ return count;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/******************************************************************************/
+
+#if 0
+static struct ddb_input *fe2input(struct ddb *dev, struct dvb_frontend *fe)
+{
+ int i;
+
+ for (i = 0; i < dev->info->port_num * 2; i++) {
+ if (dev->input[i].fe == fe)
+ return &dev->input[i];
+ }
+ return NULL;
+}
+#endif
+
+static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct ddb_input *input = fe->sec_priv;
+ struct ddb_port *port = input->port;
+ int status;
+
+ if (enable) {
+ mutex_lock(&port->i2c_gate_lock);
+ status = input->gate_ctrl(fe, 1);
+ } else {
+ status = input->gate_ctrl(fe, 0);
+ mutex_unlock(&port->i2c_gate_lock);
+ }
+ return status;
+}
+
+static int demod_attach_drxk(struct ddb_input *input)
+{
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct dvb_frontend *fe;
+ struct drxk_config config;
+
+ memset(&config, 0, sizeof(config));
+ config.adr = 0x29 + (input->nr & 1);
+
+ fe = input->fe = dvb_attach(drxk_attach, &config, i2c, &input->fe2);
+ if (!input->fe) {
+ printk(KERN_ERR "No DRXK found!\n");
+ return -ENODEV;
+ }
+ fe->sec_priv = input;
+ input->gate_ctrl = fe->ops.i2c_gate_ctrl;
+ fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+ return 0;
+}
+
+static int tuner_attach_tda18271(struct ddb_input *input)
+{
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct dvb_frontend *fe;
+
+ if (input->fe->ops.i2c_gate_ctrl)
+ input->fe->ops.i2c_gate_ctrl(input->fe, 1);
+ fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60);
+ if (!fe) {
+ printk(KERN_ERR "No TDA18271 found!\n");
+ return -ENODEV;
+ }
+ if (input->fe->ops.i2c_gate_ctrl)
+ input->fe->ops.i2c_gate_ctrl(input->fe, 0);
+ return 0;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/******************************************************************************/
+
+static struct stv090x_config stv0900 = {
+ .device = STV0900,
+ .demod_mode = STV090x_DUAL,
+ .clk_mode = STV090x_CLK_EXT,
+
+ .xtal = 27000000,
+ .address = 0x69,
+
+ .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+ .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+
+ .repeater_level = STV090x_RPTLEVEL_16,
+
+ .adc1_range = STV090x_ADC_1Vpp,
+ .adc2_range = STV090x_ADC_1Vpp,
+
+ .diseqc_envelope_mode = true,
+};
+
+static struct stv090x_config stv0900_aa = {
+ .device = STV0900,
+ .demod_mode = STV090x_DUAL,
+ .clk_mode = STV090x_CLK_EXT,
+
+ .xtal = 27000000,
+ .address = 0x68,
+
+ .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+ .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+
+ .repeater_level = STV090x_RPTLEVEL_16,
+
+ .adc1_range = STV090x_ADC_1Vpp,
+ .adc2_range = STV090x_ADC_1Vpp,
+
+ .diseqc_envelope_mode = true,
+};
+
+static struct stv6110x_config stv6110a = {
+ .addr = 0x60,
+ .refclk = 27000000,
+ .clk_div = 1,
+};
+
+static struct stv6110x_config stv6110b = {
+ .addr = 0x63,
+ .refclk = 27000000,
+ .clk_div = 1,
+};
+
+static int demod_attach_stv0900(struct ddb_input *input, int type)
+{
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
+
+ input->fe = dvb_attach(stv090x_attach, feconf, i2c,
+ (input->nr & 1) ? STV090x_DEMODULATOR_1
+ : STV090x_DEMODULATOR_0);
+ if (!input->fe) {
+ printk(KERN_ERR "No STV0900 found!\n");
+ return -ENODEV;
+ }
+ if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0,
+ 0, (input->nr & 1) ?
+ (0x09 - type) : (0x0b - type))) {
+ printk(KERN_ERR "No LNBH24 found!\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int tuner_attach_stv6110(struct ddb_input *input, int type)
+{
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
+ struct stv6110x_config *tunerconf = (input->nr & 1) ?
+ &stv6110b : &stv6110a;
+ struct stv6110x_devctl *ctl;
+
+ ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c);
+ if (!ctl) {
+ printk(KERN_ERR "No STV6110X found!\n");
+ return -ENODEV;
+ }
+ printk(KERN_INFO "attach tuner input %d adr %02x\n",
+ input->nr, tunerconf->addr);
+
+ feconf->tuner_init = ctl->tuner_init;
+ feconf->tuner_sleep = ctl->tuner_sleep;
+ feconf->tuner_set_mode = ctl->tuner_set_mode;
+ feconf->tuner_set_frequency = ctl->tuner_set_frequency;
+ feconf->tuner_get_frequency = ctl->tuner_get_frequency;
+ feconf->tuner_set_bandwidth = ctl->tuner_set_bandwidth;
+ feconf->tuner_get_bandwidth = ctl->tuner_get_bandwidth;
+ feconf->tuner_set_bbgain = ctl->tuner_set_bbgain;
+ feconf->tuner_get_bbgain = ctl->tuner_get_bbgain;
+ feconf->tuner_set_refclk = ctl->tuner_set_refclk;
+ feconf->tuner_get_status = ctl->tuner_get_status;
+
+ return 0;
+}
+
+static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
+ int (*start_feed)(struct dvb_demux_feed *),
+ int (*stop_feed)(struct dvb_demux_feed *),
+ void *priv)
+{
+ dvbdemux->priv = priv;
+
+ dvbdemux->filternum = 256;
+ dvbdemux->feednum = 256;
+ dvbdemux->start_feed = start_feed;
+ dvbdemux->stop_feed = stop_feed;
+ dvbdemux->write_to_decoder = NULL;
+ dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING);
+ return dvb_dmx_init(dvbdemux);
+}
+
+static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
+ struct dvb_demux *dvbdemux,
+ struct dmx_frontend *hw_frontend,
+ struct dmx_frontend *mem_frontend,
+ struct dvb_adapter *dvb_adapter)
+{
+ int ret;
+
+ dmxdev->filternum = 256;
+ dmxdev->demux = &dvbdemux->dmx;
+ dmxdev->capabilities = 0;
+ ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
+ if (ret < 0)
+ return ret;
+
+ hw_frontend->source = DMX_FRONTEND_0;
+ dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
+ mem_frontend->source = DMX_MEMORY_FE;
+ dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
+ return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
+}
+
+static int start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct ddb_input *input = dvbdmx->priv;
+
+ if (!input->users)
+ ddb_input_start(input);
+
+ return ++input->users;
+}
+
+static int stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct ddb_input *input = dvbdmx->priv;
+
+ if (--input->users)
+ return input->users;
+
+ ddb_input_stop(input);
+ return 0;
+}
+
+
+static void dvb_input_detach(struct ddb_input *input)
+{
+ struct dvb_adapter *adap = &input->adap;
+ struct dvb_demux *dvbdemux = &input->demux;
+
+ switch (input->attached) {
+ case 5:
+ if (input->fe2)
+ dvb_unregister_frontend(input->fe2);
+ if (input->fe) {
+ dvb_unregister_frontend(input->fe);
+ dvb_frontend_detach(input->fe);
+ input->fe = NULL;
+ }
+ case 4:
+ dvb_net_release(&input->dvbnet);
+
+ case 3:
+ dvbdemux->dmx.close(&dvbdemux->dmx);
+ dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
+ &input->hw_frontend);
+ dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
+ &input->mem_frontend);
+ dvb_dmxdev_release(&input->dmxdev);
+
+ case 2:
+ dvb_dmx_release(&input->demux);
+
+ case 1:
+ dvb_unregister_adapter(adap);
+ }
+ input->attached = 0;
+}
+
+static int dvb_input_attach(struct ddb_input *input)
+{
+ int ret;
+ struct ddb_port *port = input->port;
+ struct dvb_adapter *adap = &input->adap;
+ struct dvb_demux *dvbdemux = &input->demux;
+
+ ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE,
+ &input->port->dev->pdev->dev,
+ adapter_nr);
+ if (ret < 0) {
+ printk(KERN_ERR "ddbridge: Could not register adapter."
+ "Check if you enabled enough adapters in dvb-core!\n");
+ return ret;
+ }
+ input->attached = 1;
+
+ ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
+ start_feed,
+ stop_feed, input);
+ if (ret < 0)
+ return ret;
+ input->attached = 2;
+
+ ret = my_dvb_dmxdev_ts_card_init(&input->dmxdev, &input->demux,
+ &input->hw_frontend,
+ &input->mem_frontend, adap);
+ if (ret < 0)
+ return ret;
+ input->attached = 3;
+
+ ret = dvb_net_init(adap, &input->dvbnet, input->dmxdev.demux);
+ if (ret < 0)
+ return ret;
+ input->attached = 4;
+
+ input->fe = 0;
+ switch (port->type) {
+ case DDB_TUNER_DVBS_ST:
+ if (demod_attach_stv0900(input, 0) < 0)
+ return -ENODEV;
+ if (tuner_attach_stv6110(input, 0) < 0)
+ return -ENODEV;
+ if (input->fe) {
+ if (dvb_register_frontend(adap, input->fe) < 0)
+ return -ENODEV;
+ }
+ break;
+ case DDB_TUNER_DVBS_ST_AA:
+ if (demod_attach_stv0900(input, 1) < 0)
+ return -ENODEV;
+ if (tuner_attach_stv6110(input, 1) < 0)
+ return -ENODEV;
+ if (input->fe) {
+ if (dvb_register_frontend(adap, input->fe) < 0)
+ return -ENODEV;
+ }
+ break;
+ case DDB_TUNER_DVBCT_TR:
+ if (demod_attach_drxk(input) < 0)
+ return -ENODEV;
+ if (tuner_attach_tda18271(input) < 0)
+ return -ENODEV;
+ if (input->fe) {
+ if (dvb_register_frontend(adap, input->fe) < 0)
+ return -ENODEV;
+ }
+ if (input->fe2) {
+ if (dvb_register_frontend(adap, input->fe2) < 0)
+ return -ENODEV;
+ input->fe2->tuner_priv = input->fe->tuner_priv;
+ memcpy(&input->fe2->ops.tuner_ops,
+ &input->fe->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ }
+ break;
+ }
+ input->attached = 5;
+ return 0;
+}
+
+/****************************************************************************/
+/****************************************************************************/
+
+static ssize_t ts_write(struct file *file, const char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct ddb_output *output = dvbdev->priv;
+ size_t left = count;
+ int stat;
+
+ while (left) {
+ if (ddb_output_free(output) < 188) {
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ if (wait_event_interruptible(
+ output->wq, ddb_output_free(output) >= 188) < 0)
+ break;
+ }
+ stat = ddb_output_write(output, buf, left);
+ if (stat < 0)
+ break;
+ buf += stat;
+ left -= stat;
+ }
+ return (left == count) ? -EAGAIN : (count - left);
+}
+
+static ssize_t ts_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct ddb_output *output = dvbdev->priv;
+ struct ddb_input *input = output->port->input[0];
+ int left, read;
+
+ count -= count % 188;
+ left = count;
+ while (left) {
+ if (ddb_input_avail(input) < 188) {
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ if (wait_event_interruptible(
+ input->wq, ddb_input_avail(input) >= 188) < 0)
+ break;
+ }
+ read = ddb_input_read(input, buf, left);
+ left -= read;
+ buf += read;
+ }
+ return (left == count) ? -EAGAIN : (count - left);
+}
+
+static unsigned int ts_poll(struct file *file, poll_table *wait)
+{
+ /*
+ struct dvb_device *dvbdev = file->private_data;
+ struct ddb_output *output = dvbdev->priv;
+ struct ddb_input *input = output->port->input[0];
+ */
+ unsigned int mask = 0;
+
+#if 0
+ if (data_avail_to_read)
+ mask |= POLLIN | POLLRDNORM;
+ if (data_avail_to_write)
+ mask |= POLLOUT | POLLWRNORM;
+
+ poll_wait(file, &read_queue, wait);
+ poll_wait(file, &write_queue, wait);
+#endif
+ return mask;
+}
+
+static const struct file_operations ci_fops = {
+ .owner = THIS_MODULE,
+ .read = ts_read,
+ .write = ts_write,
+ .open = dvb_generic_open,
+ .release = dvb_generic_release,
+ .poll = ts_poll,
+ .mmap = 0,
+};
+
+static struct dvb_device dvbdev_ci = {
+ .priv = 0,
+ .readers = -1,
+ .writers = -1,
+ .users = -1,
+ .fops = &ci_fops,
+};
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+static void input_tasklet(unsigned long data)
+{
+ struct ddb_input *input = (struct ddb_input *) data;
+ struct ddb *dev = input->port->dev;
+
+ spin_lock(&input->lock);
+ if (!input->running) {
+ spin_unlock(&input->lock);
+ return;
+ }
+ input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr));
+
+ if (input->port->class == DDB_PORT_TUNER) {
+ if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))
+ printk(KERN_ERR "Overflow input %d\n", input->nr);
+ while (input->cbuf != ((input->stat >> 11) & 0x1f)
+ || (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))) {
+ dvb_dmx_swfilter_packets(&input->demux,
+ input->vbuf[input->cbuf],
+ input->dma_buf_size / 188);
+
+ input->cbuf = (input->cbuf + 1) % input->dma_buf_num;
+ ddbwritel((input->cbuf << 11),
+ DMA_BUFFER_ACK(input->nr));
+ input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr));
+ }
+ }
+ if (input->port->class == DDB_PORT_CI)
+ wake_up(&input->wq);
+ spin_unlock(&input->lock);
+}
+
+static void output_tasklet(unsigned long data)
+{
+ struct ddb_output *output = (struct ddb_output *) data;
+ struct ddb *dev = output->port->dev;
+
+ spin_lock(&output->lock);
+ if (!output->running) {
+ spin_unlock(&output->lock);
+ return;
+ }
+ output->stat = ddbreadl(DMA_BUFFER_CURRENT(output->nr + 8));
+ wake_up(&output->wq);
+ spin_unlock(&output->lock);
+}
+
+
+struct cxd2099_cfg cxd_cfg = {
+ .bitrate = 62000,
+ .adr = 0x40,
+ .polarity = 1,
+ .clock_mode = 1,
+};
+
+static int ddb_ci_attach(struct ddb_port *port)
+{
+ int ret;
+
+ ret = dvb_register_adapter(&port->output->adap,
+ "DDBridge",
+ THIS_MODULE,
+ &port->dev->pdev->dev,
+ adapter_nr);
+ if (ret < 0)
+ return ret;
+ port->en = cxd2099_attach(&cxd_cfg, port, &port->i2c->adap);
+ if (!port->en) {
+ dvb_unregister_adapter(&port->output->adap);
+ return -ENODEV;
+ }
+ ddb_input_start(port->input[0]);
+ ddb_output_start(port->output);
+ dvb_ca_en50221_init(&port->output->adap,
+ port->en, 0, 1);
+ ret = dvb_register_device(&port->output->adap, &port->output->dev,
+ &dvbdev_ci, (void *) port->output,
+ DVB_DEVICE_SEC);
+ return ret;
+}
+
+static int ddb_port_attach(struct ddb_port *port)
+{
+ int ret = 0;
+
+ switch (port->class) {
+ case DDB_PORT_TUNER:
+ ret = dvb_input_attach(port->input[0]);
+ if (ret < 0)
+ break;
+ ret = dvb_input_attach(port->input[1]);
+ break;
+ case DDB_PORT_CI:
+ ret = ddb_ci_attach(port);
+ break;
+ default:
+ break;
+ }
+ if (ret < 0)
+ printk(KERN_ERR "port_attach on port %d failed\n", port->nr);
+ return ret;
+}
+
+static int ddb_ports_attach(struct ddb *dev)
+{
+ int i, ret = 0;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ ret = ddb_port_attach(port);
+ if (ret < 0)
+ break;
+ }
+ return ret;
+}
+
+static void ddb_ports_detach(struct ddb *dev)
+{
+ int i;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ switch (port->class) {
+ case DDB_PORT_TUNER:
+ dvb_input_detach(port->input[0]);
+ dvb_input_detach(port->input[1]);
+ break;
+ case DDB_PORT_CI:
+ if (port->output->dev)
+ dvb_unregister_device(port->output->dev);
+ if (port->en) {
+ ddb_input_stop(port->input[0]);
+ ddb_output_stop(port->output);
+ dvb_ca_en50221_release(port->en);
+ kfree(port->en);
+ port->en = 0;
+ dvb_unregister_adapter(&port->output->adap);
+ }
+ break;
+ }
+ }
+}
+
+/****************************************************************************/
+/****************************************************************************/
+
+static int port_has_ci(struct ddb_port *port)
+{
+ u8 val;
+ return i2c_read_reg(&port->i2c->adap, 0x40, 0, &val) ? 0 : 1;
+}
+
+static int port_has_stv0900(struct ddb_port *port)
+{
+ u8 val;
+ if (i2c_read_reg16(&port->i2c->adap, 0x69, 0xf100, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static int port_has_stv0900_aa(struct ddb_port *port)
+{
+ u8 val;
+ if (i2c_read_reg16(&port->i2c->adap, 0x68, 0xf100, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static int port_has_drxks(struct ddb_port *port)
+{
+ u8 val;
+ if (i2c_read(&port->i2c->adap, 0x29, &val) < 0)
+ return 0;
+ if (i2c_read(&port->i2c->adap, 0x2a, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static void ddb_port_probe(struct ddb_port *port)
+{
+ struct ddb *dev = port->dev;
+ char *modname = "NO MODULE";
+
+ port->class = DDB_PORT_NONE;
+
+ if (port_has_ci(port)) {
+ modname = "CI";
+ port->class = DDB_PORT_CI;
+ ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_stv0900(port)) {
+ modname = "DUAL DVB-S2";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBS_ST;
+ ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_stv0900_aa(port)) {
+ modname = "DUAL DVB-S2";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBS_ST_AA;
+ ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_drxks(port)) {
+ modname = "DUAL DVB-C/T";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBCT_TR;
+ ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
+ }
+ printk(KERN_INFO "Port %d (TAB %d): %s\n",
+ port->nr, port->nr+1, modname);
+}
+
+static void ddb_input_init(struct ddb_port *port, int nr)
+{
+ struct ddb *dev = port->dev;
+ struct ddb_input *input = &dev->input[nr];
+
+ input->nr = nr;
+ input->port = port;
+ input->dma_buf_num = INPUT_DMA_BUFS;
+ input->dma_buf_size = INPUT_DMA_SIZE;
+ ddbwritel(0, TS_INPUT_CONTROL(nr));
+ ddbwritel(2, TS_INPUT_CONTROL(nr));
+ ddbwritel(0, TS_INPUT_CONTROL(nr));
+ ddbwritel(0, DMA_BUFFER_ACK(nr));
+ tasklet_init(&input->tasklet, input_tasklet, (unsigned long) input);
+ spin_lock_init(&input->lock);
+ init_waitqueue_head(&input->wq);
+}
+
+static void ddb_output_init(struct ddb_port *port, int nr)
+{
+ struct ddb *dev = port->dev;
+ struct ddb_output *output = &dev->output[nr];
+ output->nr = nr;
+ output->port = port;
+ output->dma_buf_num = OUTPUT_DMA_BUFS;
+ output->dma_buf_size = OUTPUT_DMA_SIZE;
+
+ ddbwritel(0, TS_OUTPUT_CONTROL(nr));
+ ddbwritel(2, TS_OUTPUT_CONTROL(nr));
+ ddbwritel(0, TS_OUTPUT_CONTROL(nr));
+ tasklet_init(&output->tasklet, output_tasklet, (unsigned long) output);
+ init_waitqueue_head(&output->wq);
+}
+
+static void ddb_ports_init(struct ddb *dev)
+{
+ int i;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ port->dev = dev;
+ port->nr = i;
+ port->i2c = &dev->i2c[i];
+ port->input[0] = &dev->input[2 * i];
+ port->input[1] = &dev->input[2 * i + 1];
+ port->output = &dev->output[i];
+
+ mutex_init(&port->i2c_gate_lock);
+ ddb_port_probe(port);
+ ddb_input_init(port, 2 * i);
+ ddb_input_init(port, 2 * i + 1);
+ ddb_output_init(port, i);
+ }
+}
+
+static void ddb_ports_release(struct ddb *dev)
+{
+ int i;
+ struct ddb_port *port;
+
+ for (i = 0; i < dev->info->port_num; i++) {
+ port = &dev->port[i];
+ port->dev = dev;
+ tasklet_kill(&port->input[0]->tasklet);
+ tasklet_kill(&port->input[1]->tasklet);
+ tasklet_kill(&port->output->tasklet);
+ }
+}
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+static void irq_handle_i2c(struct ddb *dev, int n)
+{
+ struct ddb_i2c *i2c = &dev->i2c[n];
+
+ i2c->done = 1;
+ wake_up(&i2c->wq);
+}
+
+static irqreturn_t irq_handler(int irq, void *dev_id)
+{
+ struct ddb *dev = (struct ddb *) dev_id;
+ u32 s = ddbreadl(INTERRUPT_STATUS);
+
+ if (!s)
+ return IRQ_NONE;
+
+ do {
+ ddbwritel(s, INTERRUPT_ACK);
+
+ if (s & 0x00000001)
+ irq_handle_i2c(dev, 0);
+ if (s & 0x00000002)
+ irq_handle_i2c(dev, 1);
+ if (s & 0x00000004)
+ irq_handle_i2c(dev, 2);
+ if (s & 0x00000008)
+ irq_handle_i2c(dev, 3);
+
+ if (s & 0x00000100)
+ tasklet_schedule(&dev->input[0].tasklet);
+ if (s & 0x00000200)
+ tasklet_schedule(&dev->input[1].tasklet);
+ if (s & 0x00000400)
+ tasklet_schedule(&dev->input[2].tasklet);
+ if (s & 0x00000800)
+ tasklet_schedule(&dev->input[3].tasklet);
+ if (s & 0x00001000)
+ tasklet_schedule(&dev->input[4].tasklet);
+ if (s & 0x00002000)
+ tasklet_schedule(&dev->input[5].tasklet);
+ if (s & 0x00004000)
+ tasklet_schedule(&dev->input[6].tasklet);
+ if (s & 0x00008000)
+ tasklet_schedule(&dev->input[7].tasklet);
+
+ if (s & 0x00010000)
+ tasklet_schedule(&dev->output[0].tasklet);
+ if (s & 0x00020000)
+ tasklet_schedule(&dev->output[1].tasklet);
+ if (s & 0x00040000)
+ tasklet_schedule(&dev->output[2].tasklet);
+ if (s & 0x00080000)
+ tasklet_schedule(&dev->output[3].tasklet);
+
+ /* if (s & 0x000f0000) printk(KERN_DEBUG "%08x\n", istat); */
+ } while ((s = ddbreadl(INTERRUPT_STATUS)));
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/******************************************************************************/
+
+static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
+{
+ u32 data, shift;
+
+ if (wlen > 4)
+ ddbwritel(1, SPI_CONTROL);
+ while (wlen > 4) {
+ /* FIXME: check for big-endian */
+ data = swab32(*(u32 *)wbuf);
+ wbuf += 4;
+ wlen -= 4;
+ ddbwritel(data, SPI_DATA);
+ while (ddbreadl(SPI_CONTROL) & 0x0004)
+ ;
+ }
+
+ if (rlen)
+ ddbwritel(0x0001 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL);
+ else
+ ddbwritel(0x0003 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL);
+
+ data = 0;
+ shift = ((4 - wlen) * 8);
+ while (wlen) {
+ data <<= 8;
+ data |= *wbuf;
+ wlen--;
+ wbuf++;
+ }
+ if (shift)
+ data <<= shift;
+ ddbwritel(data, SPI_DATA);
+ while (ddbreadl(SPI_CONTROL) & 0x0004)
+ ;
+
+ if (!rlen) {
+ ddbwritel(0, SPI_CONTROL);
+ return 0;
+ }
+ if (rlen > 4)
+ ddbwritel(1, SPI_CONTROL);
+
+ while (rlen > 4) {
+ ddbwritel(0xffffffff, SPI_DATA);
+ while (ddbreadl(SPI_CONTROL) & 0x0004)
+ ;
+ data = ddbreadl(SPI_DATA);
+ *(u32 *) rbuf = swab32(data);
+ rbuf += 4;
+ rlen -= 4;
+ }
+ ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL);
+ ddbwritel(0xffffffff, SPI_DATA);
+ while (ddbreadl(SPI_CONTROL) & 0x0004)
+ ;
+
+ data = ddbreadl(SPI_DATA);
+ ddbwritel(0, SPI_CONTROL);
+
+ if (rlen < 4)
+ data <<= ((4 - rlen) * 8);
+
+ while (rlen > 0) {
+ *rbuf = ((data >> 24) & 0xff);
+ data <<= 8;
+ rbuf++;
+ rlen--;
+ }
+ return 0;
+}
+
+#define DDB_MAGIC 'd'
+
+struct ddb_flashio {
+ __u8 *write_buf;
+ __u32 write_len;
+ __u8 *read_buf;
+ __u32 read_len;
+};
+
+#define IOCTL_DDB_FLASHIO _IOWR(DDB_MAGIC, 0x00, struct ddb_flashio)
+
+#define DDB_NAME "ddbridge"
+
+static u32 ddb_num;
+static struct ddb *ddbs[32];
+static struct class *ddb_class;
+static int ddb_major;
+
+static int ddb_open(struct inode *inode, struct file *file)
+{
+ struct ddb *dev = ddbs[iminor(inode)];
+
+ file->private_data = dev;
+ return 0;
+}
+
+static long ddb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ddb *dev = file->private_data;
+ void *parg = (void *)arg;
+ int res = -EFAULT;
+
+ switch (cmd) {
+ case IOCTL_DDB_FLASHIO:
+ {
+ struct ddb_flashio fio;
+ u8 *rbuf, *wbuf;
+
+ if (copy_from_user(&fio, parg, sizeof(fio)))
+ break;
+ if (fio.write_len + fio.read_len > 1028) {
+ printk(KERN_ERR "IOBUF too small\n");
+ return -ENOMEM;
+ }
+ wbuf = &dev->iobuf[0];
+ if (!wbuf)
+ return -ENOMEM;
+ rbuf = wbuf + fio.write_len;
+ if (copy_from_user(wbuf, fio.write_buf, fio.write_len)) {
+ vfree(wbuf);
+ break;
+ }
+ res = flashio(dev, wbuf, fio.write_len,
+ rbuf, fio.read_len);
+ if (copy_to_user(fio.read_buf, rbuf, fio.read_len))
+ res = -EFAULT;
+ break;
+ }
+ default:
+ break;
+ }
+ return res;
+}
+
+static const struct file_operations ddb_fops = {
+ .unlocked_ioctl = ddb_ioctl,
+ .open = ddb_open,
+};
+
+static char *ddb_devnode(struct device *device, mode_t *mode)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+
+ return kasprintf(GFP_KERNEL, "ddbridge/card%d", dev->nr);
+}
+
+static int ddb_class_create(void)
+{
+ ddb_major = register_chrdev(0, DDB_NAME, &ddb_fops);
+ if (ddb_major < 0)
+ return ddb_major;
+
+ ddb_class = class_create(THIS_MODULE, DDB_NAME);
+ if (IS_ERR(ddb_class)) {
+ unregister_chrdev(ddb_major, DDB_NAME);
+ return -1;
+ }
+ ddb_class->devnode = ddb_devnode;
+ return 0;
+}
+
+static void ddb_class_destroy(void)
+{
+ class_destroy(ddb_class);
+ unregister_chrdev(ddb_major, DDB_NAME);
+}
+
+static int ddb_device_create(struct ddb *dev)
+{
+ dev->nr = ddb_num++;
+ dev->ddb_dev = device_create(ddb_class, NULL,
+ MKDEV(ddb_major, dev->nr),
+ dev, "ddbridge%d", dev->nr);
+ ddbs[dev->nr] = dev;
+ if (IS_ERR(dev->ddb_dev))
+ return -1;
+ return 0;
+}
+
+static void ddb_device_destroy(struct ddb *dev)
+{
+ ddb_num--;
+ if (IS_ERR(dev->ddb_dev))
+ return;
+ device_destroy(ddb_class, MKDEV(ddb_major, 0));
+}
+
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+static void ddb_unmap(struct ddb *dev)
+{
+ if (dev->regs)
+ iounmap(dev->regs);
+ vfree(dev);
+}
+
+
+static void __devexit ddb_remove(struct pci_dev *pdev)
+{
+ struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev);
+
+ ddb_ports_detach(dev);
+ ddb_i2c_release(dev);
+
+ ddbwritel(0, INTERRUPT_ENABLE);
+ free_irq(dev->pdev->irq, dev);
+#ifdef CONFIG_PCI_MSI
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+#endif
+ ddb_ports_release(dev);
+ ddb_buffers_free(dev);
+ ddb_device_destroy(dev);
+
+ ddb_unmap(dev);
+ pci_set_drvdata(pdev, 0);
+ pci_disable_device(pdev);
+}
+
+
+static int __devinit ddb_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct ddb *dev;
+ int stat = 0;
+ int irq_flag = IRQF_SHARED;
+
+ if (pci_enable_device(pdev) < 0)
+ return -ENODEV;
+
+ dev = vmalloc(sizeof(struct ddb));
+ if (dev == NULL)
+ return -ENOMEM;
+ memset(dev, 0, sizeof(struct ddb));
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+ dev->info = (struct ddb_info *) id->driver_data;
+ printk(KERN_INFO "DDBridge driver detected: %s\n", dev->info->name);
+
+ dev->regs = ioremap(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+ if (!dev->regs) {
+ stat = -ENOMEM;
+ goto fail;
+ }
+ printk(KERN_INFO "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4));
+
+#ifdef CONFIG_PCI_MSI
+ if (pci_msi_enabled())
+ stat = pci_enable_msi(dev->pdev);
+ if (stat) {
+ printk(KERN_INFO ": MSI not available.\n");
+ } else {
+ irq_flag = 0;
+ dev->msi = 1;
+ }
+#endif
+ stat = request_irq(dev->pdev->irq, irq_handler,
+ irq_flag, "DDBridge", (void *) dev);
+ if (stat < 0)
+ goto fail1;
+ ddbwritel(0, DMA_BASE_WRITE);
+ ddbwritel(0, DMA_BASE_READ);
+ ddbwritel(0xffffffff, INTERRUPT_ACK);
+ ddbwritel(0xfff0f, INTERRUPT_ENABLE);
+ ddbwritel(0, MSI1_ENABLE);
+
+ if (ddb_i2c_init(dev) < 0)
+ goto fail1;
+ ddb_ports_init(dev);
+ if (ddb_buffers_alloc(dev) < 0) {
+ printk(KERN_INFO ": Could not allocate buffer memory\n");
+ goto fail2;
+ }
+ if (ddb_ports_attach(dev) < 0)
+ goto fail3;
+ ddb_device_create(dev);
+ return 0;
+
+fail3:
+ ddb_ports_detach(dev);
+ printk(KERN_ERR "fail3\n");
+ ddb_ports_release(dev);
+fail2:
+ printk(KERN_ERR "fail2\n");
+ ddb_buffers_free(dev);
+fail1:
+ printk(KERN_ERR "fail1\n");
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+ free_irq(dev->pdev->irq, dev);
+fail:
+ printk(KERN_ERR "fail\n");
+ ddb_unmap(dev);
+ pci_set_drvdata(pdev, 0);
+ pci_disable_device(pdev);
+ return -1;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/******************************************************************************/
+
+static struct ddb_info ddb_none = {
+ .type = DDB_NONE,
+ .name = "Digital Devices PCIe bridge",
+};
+
+static struct ddb_info ddb_octopus = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Octopus DVB adapter",
+ .port_num = 4,
+};
+
+static struct ddb_info ddb_octopus_le = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Octopus LE DVB adapter",
+ .port_num = 2,
+};
+
+static struct ddb_info ddb_v6 = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Cine S2 V6 DVB adapter",
+ .port_num = 3,
+};
+
+#define DDVID 0xdd01 /* Digital Devices Vendor ID */
+
+#define DDB_ID(_vend, _dev, _subvend, _subdev, _driverdata) { \
+ .vendor = _vend, .device = _dev, \
+ .subvendor = _subvend, .subdevice = _subdev, \
+ .driver_data = (unsigned long)&_driverdata }
+
+static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
+ DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0010, ddb_octopus),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0020, ddb_v6),
+ /* in case sub-ids got deleted in flash */
+ DDB_ID(DDVID, 0x0003, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, ddb_id_tbl);
+
+
+static struct pci_driver ddb_pci_driver = {
+ .name = "DDBridge",
+ .id_table = ddb_id_tbl,
+ .probe = ddb_probe,
+ .remove = ddb_remove,
+};
+
+static __init int module_init_ddbridge(void)
+{
+ printk(KERN_INFO "Digital Devices PCIE bridge driver, "
+ "Copyright (C) 2010-11 Digital Devices GmbH\n");
+ if (ddb_class_create())
+ return -1;
+ return pci_register_driver(&ddb_pci_driver);
+}
+
+static __exit void module_exit_ddbridge(void)
+{
+ pci_unregister_driver(&ddb_pci_driver);
+ ddb_class_destroy();
+}
+
+module_init(module_init_ddbridge);
+module_exit(module_exit_ddbridge);
+
+MODULE_DESCRIPTION("Digital Devices PCIe Bridge");
+MODULE_AUTHOR("Ralph Metzler");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.5");
diff --git a/drivers/media/dvb/ddbridge/ddbridge-regs.h b/drivers/media/dvb/ddbridge/ddbridge-regs.h
new file mode 100644
index 00000000000..a3ccb318b50
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/ddbridge-regs.h
@@ -0,0 +1,151 @@
+/*
+ * ddbridge-regs.h: Digital Devices PCIe bridge driver
+ *
+ * Copyright (C) 2010-2011 Digital Devices GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+/* DD-DVBBridgeV1.h 273 2010-09-17 05:03:16Z manfred */
+
+/* Register Definitions */
+
+#define CUR_REGISTERMAP_VERSION 0x10000
+
+#define HARDWARE_VERSION 0x00
+#define REGISTERMAP_VERSION 0x04
+
+/* ------------------------------------------------------------------------- */
+/* SPI Controller */
+
+#define SPI_CONTROL 0x10
+#define SPI_DATA 0x14
+
+/* ------------------------------------------------------------------------- */
+
+/* Interrupt controller */
+/* How many MSI's are available depends on HW (Min 2 max 8) */
+/* How many are usable also depends on Host platform */
+
+#define INTERRUPT_BASE (0x40)
+
+#define INTERRUPT_ENABLE (INTERRUPT_BASE + 0x00)
+#define MSI0_ENABLE (INTERRUPT_BASE + 0x00)
+#define MSI1_ENABLE (INTERRUPT_BASE + 0x04)
+#define MSI2_ENABLE (INTERRUPT_BASE + 0x08)
+#define MSI3_ENABLE (INTERRUPT_BASE + 0x0C)
+#define MSI4_ENABLE (INTERRUPT_BASE + 0x10)
+#define MSI5_ENABLE (INTERRUPT_BASE + 0x14)
+#define MSI6_ENABLE (INTERRUPT_BASE + 0x18)
+#define MSI7_ENABLE (INTERRUPT_BASE + 0x1C)
+
+#define INTERRUPT_STATUS (INTERRUPT_BASE + 0x20)
+#define INTERRUPT_ACK (INTERRUPT_BASE + 0x20)
+
+#define INTMASK_I2C1 (0x00000001)
+#define INTMASK_I2C2 (0x00000002)
+#define INTMASK_I2C3 (0x00000004)
+#define INTMASK_I2C4 (0x00000008)
+
+#define INTMASK_CIRQ1 (0x00000010)
+#define INTMASK_CIRQ2 (0x00000020)
+#define INTMASK_CIRQ3 (0x00000040)
+#define INTMASK_CIRQ4 (0x00000080)
+
+#define INTMASK_TSINPUT1 (0x00000100)
+#define INTMASK_TSINPUT2 (0x00000200)
+#define INTMASK_TSINPUT3 (0x00000400)
+#define INTMASK_TSINPUT4 (0x00000800)
+#define INTMASK_TSINPUT5 (0x00001000)
+#define INTMASK_TSINPUT6 (0x00002000)
+#define INTMASK_TSINPUT7 (0x00004000)
+#define INTMASK_TSINPUT8 (0x00008000)
+
+#define INTMASK_TSOUTPUT1 (0x00010000)
+#define INTMASK_TSOUTPUT2 (0x00020000)
+#define INTMASK_TSOUTPUT3 (0x00040000)
+#define INTMASK_TSOUTPUT4 (0x00080000)
+
+/* ------------------------------------------------------------------------- */
+/* I2C Master Controller */
+
+#define I2C_BASE (0x80) /* Byte offset */
+
+#define I2C_COMMAND (0x00)
+#define I2C_TIMING (0x04)
+#define I2C_TASKLENGTH (0x08) /* High read, low write */
+#define I2C_TASKADDRESS (0x0C) /* High read, low write */
+
+#define I2C_MONITOR (0x1C)
+
+#define I2C_BASE_1 (I2C_BASE + 0x00)
+#define I2C_BASE_2 (I2C_BASE + 0x20)
+#define I2C_BASE_3 (I2C_BASE + 0x40)
+#define I2C_BASE_4 (I2C_BASE + 0x60)
+
+#define I2C_BASE_N(i) (I2C_BASE + (i) * 0x20)
+
+#define I2C_TASKMEM_BASE (0x1000) /* Byte offset */
+#define I2C_TASKMEM_SIZE (0x1000)
+
+#define I2C_SPEED_400 (0x04030404)
+#define I2C_SPEED_200 (0x09080909)
+#define I2C_SPEED_154 (0x0C0B0C0C)
+#define I2C_SPEED_100 (0x13121313)
+#define I2C_SPEED_77 (0x19181919)
+#define I2C_SPEED_50 (0x27262727)
+
+
+/* ------------------------------------------------------------------------- */
+/* DMA Controller */
+
+#define DMA_BASE_WRITE (0x100)
+#define DMA_BASE_READ (0x140)
+
+#define DMA_CONTROL (0x00) /* 64 */
+#define DMA_ERROR (0x04) /* 65 ( only read instance ) */
+
+#define DMA_DIAG_CONTROL (0x1C) /* 71 */
+#define DMA_DIAG_PACKETCOUNTER_LOW (0x20) /* 72 */
+#define DMA_DIAG_PACKETCOUNTER_HIGH (0x24) /* 73 */
+#define DMA_DIAG_TIMECOUNTER_LOW (0x28) /* 74 */
+#define DMA_DIAG_TIMECOUNTER_HIGH (0x2C) /* 75 */
+#define DMA_DIAG_RECHECKCOUNTER (0x30) /* 76 ( Split completions on read ) */
+#define DMA_DIAG_WAITTIMEOUTINIT (0x34) /* 77 */
+#define DMA_DIAG_WAITOVERFLOWCOUNTER (0x38) /* 78 */
+#define DMA_DIAG_WAITCOUNTER (0x3C) /* 79 */
+
+/* ------------------------------------------------------------------------- */
+/* DMA Buffer */
+
+#define TS_INPUT_BASE (0x200)
+#define TS_INPUT_CONTROL(i) (TS_INPUT_BASE + (i) * 16 + 0x00)
+
+#define TS_OUTPUT_BASE (0x280)
+#define TS_OUTPUT_CONTROL(i) (TS_OUTPUT_BASE + (i) * 16 + 0x00)
+
+#define DMA_BUFFER_BASE (0x300)
+
+#define DMA_BUFFER_CONTROL(i) (DMA_BUFFER_BASE + (i) * 16 + 0x00)
+#define DMA_BUFFER_ACK(i) (DMA_BUFFER_BASE + (i) * 16 + 0x04)
+#define DMA_BUFFER_CURRENT(i) (DMA_BUFFER_BASE + (i) * 16 + 0x08)
+#define DMA_BUFFER_SIZE(i) (DMA_BUFFER_BASE + (i) * 16 + 0x0c)
+
+#define DMA_BASE_ADDRESS_TABLE (0x2000)
+#define DMA_BASE_ADDRESS_TABLE_ENTRIES (512)
+
diff --git a/drivers/media/dvb/ddbridge/ddbridge.h b/drivers/media/dvb/ddbridge/ddbridge.h
new file mode 100644
index 00000000000..6d14893218f
--- /dev/null
+++ b/drivers/media/dvb/ddbridge/ddbridge.h
@@ -0,0 +1,187 @@
+/*
+ * ddbridge.h: Digital Devices PCIe bridge driver
+ *
+ * Copyright (C) 2010-2011 Digital Devices GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef _DDBRIDGE_H_
+#define _DDBRIDGE_H_
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <asm/dma.h>
+#include <linux/dvb/frontend.h>
+#include <linux/dvb/ca.h>
+#include <linux/dvb/video.h>
+#include <linux/dvb/audio.h>
+#include <linux/socket.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_ringbuffer.h"
+#include "dvb_ca_en50221.h"
+#include "dvb_net.h"
+#include "cxd2099.h"
+
+#define DDB_MAX_I2C 4
+#define DDB_MAX_PORT 4
+#define DDB_MAX_INPUT 8
+#define DDB_MAX_OUTPUT 4
+
+struct ddb_info {
+ int type;
+#define DDB_NONE 0
+#define DDB_OCTOPUS 1
+ char *name;
+ int port_num;
+ u32 port_type[DDB_MAX_PORT];
+};
+
+/* DMA_SIZE MUST be divisible by 188 and 128 !!! */
+
+#define INPUT_DMA_MAX_BUFS 32 /* hardware table limit */
+#define INPUT_DMA_BUFS 8
+#define INPUT_DMA_SIZE (128*47*21)
+
+#define OUTPUT_DMA_MAX_BUFS 32
+#define OUTPUT_DMA_BUFS 8
+#define OUTPUT_DMA_SIZE (128*47*21)
+
+struct ddb;
+struct ddb_port;
+
+struct ddb_input {
+ struct ddb_port *port;
+ u32 nr;
+ int attached;
+
+ dma_addr_t pbuf[INPUT_DMA_MAX_BUFS];
+ u8 *vbuf[INPUT_DMA_MAX_BUFS];
+ u32 dma_buf_num;
+ u32 dma_buf_size;
+
+ struct tasklet_struct tasklet;
+ spinlock_t lock;
+ wait_queue_head_t wq;
+ int running;
+ u32 stat;
+ u32 cbuf;
+ u32 coff;
+
+ struct dvb_adapter adap;
+ struct dvb_device *dev;
+ struct dvb_frontend *fe;
+ struct dvb_frontend *fe2;
+ struct dmxdev dmxdev;
+ struct dvb_demux demux;
+ struct dvb_net dvbnet;
+ struct dmx_frontend hw_frontend;
+ struct dmx_frontend mem_frontend;
+ int users;
+ int (*gate_ctrl)(struct dvb_frontend *, int);
+};
+
+struct ddb_output {
+ struct ddb_port *port;
+ u32 nr;
+ dma_addr_t pbuf[OUTPUT_DMA_MAX_BUFS];
+ u8 *vbuf[OUTPUT_DMA_MAX_BUFS];
+ u32 dma_buf_num;
+ u32 dma_buf_size;
+ struct tasklet_struct tasklet;
+ spinlock_t lock;
+ wait_queue_head_t wq;
+ int running;
+ u32 stat;
+ u32 cbuf;
+ u32 coff;
+
+ struct dvb_adapter adap;
+ struct dvb_device *dev;
+};
+
+struct ddb_i2c {
+ struct ddb *dev;
+ u32 nr;
+ struct i2c_adapter adap;
+ struct i2c_adapter adap2;
+ u32 regs;
+ u32 rbuf;
+ u32 wbuf;
+ int done;
+ wait_queue_head_t wq;
+};
+
+struct ddb_port {
+ struct ddb *dev;
+ u32 nr;
+ struct ddb_i2c *i2c;
+ struct mutex i2c_gate_lock;
+ u32 class;
+#define DDB_PORT_NONE 0
+#define DDB_PORT_CI 1
+#define DDB_PORT_TUNER 2
+ u32 type;
+#define DDB_TUNER_NONE 0
+#define DDB_TUNER_DVBS_ST 1
+#define DDB_TUNER_DVBS_ST_AA 2
+#define DDB_TUNER_DVBCT_TR 16
+#define DDB_TUNER_DVBCT_ST 17
+ u32 adr;
+
+ struct ddb_input *input[2];
+ struct ddb_output *output;
+ struct dvb_ca_en50221 *en;
+};
+
+struct ddb {
+ struct pci_dev *pdev;
+ unsigned char *regs;
+ struct ddb_port port[DDB_MAX_PORT];
+ struct ddb_i2c i2c[DDB_MAX_I2C];
+ struct ddb_input input[DDB_MAX_INPUT];
+ struct ddb_output output[DDB_MAX_OUTPUT];
+
+ struct device *ddb_dev;
+ int nr;
+ u8 iobuf[1028];
+
+ struct ddb_info *info;
+ int msi;
+};
+
+/****************************************************************************/
+
+#define ddbwritel(_val, _adr) writel((_val), \
+ (char *) (dev->regs+(_adr)))
+#define ddbreadl(_adr) readl((char *) (dev->regs+(_adr)))
+#define ddbcpyto(_adr, _src, _count) memcpy_toio((char *) \
+ (dev->regs+(_adr)), (_src), (_count))
+#define ddbcpyfrom(_dst, _adr, _count) memcpy_fromio((_dst), (char *) \
+ (dev->regs+(_adr)), (_count))
+
+/****************************************************************************/
+
+#endif
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index b2b0c45f32a..55e6533f15e 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -22,6 +22,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
diff --git a/drivers/media/dvb/dvb-core/Makefile b/drivers/media/dvb/dvb-core/Makefile
index 0b5182835cc..8f22bcd7c1f 100644
--- a/drivers/media/dvb/dvb-core/Makefile
+++ b/drivers/media/dvb/dvb-core/Makefile
@@ -2,8 +2,10 @@
# Makefile for the kernel DVB device drivers.
#
+dvb-net-$(CONFIG_DVB_NET) := dvb_net.o
+
dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o \
dvb_ca_en50221.o dvb_frontend.o \
- dvb_net.o dvb_ringbuffer.o dvb_math.o
+ $(dvb-net-y) dvb_ringbuffer.o dvb_math.o
obj-$(CONFIG_DVB_CORE) += dvb-core.o
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 98278041d75..efe9c30605e 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -904,7 +904,7 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
.buffer = b \
}
-static struct dtv_cmds_h dtv_cmds[] = {
+static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_TUNE, 1, 0),
_DTV_CMD(DTV_CLEAR, 1, 0),
@@ -966,6 +966,7 @@ static struct dtv_cmds_h dtv_cmds[] = {
_DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
_DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
+ _DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
/* Get */
_DTV_CMD(DTV_DISEQC_SLAVE_REPLY, 0, 1),
@@ -1988,6 +1989,14 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) {
if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0)
goto err0;
+
+ /* If we took control of the bus, we need to force
+ reinitialization. This is because many ts_bus_ctrl()
+ functions strobe the RESET pin on the demod, and if the
+ frontend thread already exists then the dvb_init() routine
+ won't get called (which is what usually does initial
+ register configuration). */
+ fepriv->reinitialise = 1;
}
if ((ret = dvb_generic_open (inode, file)) < 0)
diff --git a/drivers/media/dvb/dvb-core/dvb_net.h b/drivers/media/dvb/dvb-core/dvb_net.h
index 3a3126cae03..1e53acd50cf 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.h
+++ b/drivers/media/dvb/dvb-core/dvb_net.h
@@ -32,6 +32,8 @@
#define DVB_NET_DEVICES_MAX 10
+#ifdef CONFIG_DVB_NET
+
struct dvb_net {
struct dvb_device *dvbdev;
struct net_device *device[DVB_NET_DEVICES_MAX];
@@ -40,8 +42,25 @@ struct dvb_net {
struct dmx_demux *demux;
};
-
void dvb_net_release(struct dvb_net *);
int dvb_net_init(struct dvb_adapter *, struct dvb_net *, struct dmx_demux *);
+#else
+
+struct dvb_net {
+ struct dvb_device *dvbdev;
+};
+
+static inline void dvb_net_release(struct dvb_net *dvbnet)
+{
+}
+
+static inline int dvb_net_init(struct dvb_adapter *adap,
+ struct dvb_net *dvbnet, struct dmx_demux *dmx)
+{
+ return 0;
+}
+
+#endif /* ifdef CONFIG_DVB_NET */
+
#endif
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index e85304c59a2..5d73dec8ac0 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -81,6 +81,7 @@ config DVB_USB_DIB0700
select MEDIA_TUNER_MT2266 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_XC4000 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MXL5007T if !MEDIA_TUNER_CUSTOMISE
help
Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index 100ebc37e99..d7ad05fc383 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -91,7 +91,6 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req)
case GET_CONFIG:
case READ_MEMORY:
case RECONNECT_USB:
- case GET_IR_CODE:
write = 0;
break;
case READ_I2C:
@@ -164,13 +163,6 @@ static int af9015_rw_udev(struct usb_device *udev, struct req_t *req)
deb_xfer("<<< ");
debug_dump(buf, act_len, deb_xfer);
- /* remote controller query status is 1 if remote code is not received */
- if (req->cmd == GET_IR_CODE && buf[1] == 1) {
- buf[1] = 0; /* clear command "error" status */
- memset(&buf[2], 0, req->data_len);
- buf[3] = 1; /* no remote code received mark */
- }
-
/* check status */
if (buf[1]) {
err("command failed:%d", buf[1]);
@@ -292,6 +284,10 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
}
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
+ if (msg[i].len > 3 || msg[i+1].len > 61) {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
if (msg[i].addr ==
af9015_af9013_config[0].demod_address)
req.cmd = READ_MEMORY;
@@ -306,12 +302,16 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
ret = af9015_ctrl_msg(d, &req);
i += 2;
} else if (msg[i].flags & I2C_M_RD) {
- ret = -EINVAL;
+ if (msg[i].len > 61) {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
if (msg[i].addr ==
- af9015_af9013_config[0].demod_address)
+ af9015_af9013_config[0].demod_address) {
+ ret = -EINVAL;
goto error;
- else
- req.cmd = READ_I2C;
+ }
+ req.cmd = READ_I2C;
req.i2c_addr = msg[i].addr;
req.addr = addr;
req.mbox = mbox;
@@ -321,6 +321,10 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
ret = af9015_ctrl_msg(d, &req);
i += 1;
} else {
+ if (msg[i].len > 21) {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
if (msg[i].addr ==
af9015_af9013_config[0].demod_address)
req.cmd = WRITE_MEMORY;
@@ -735,6 +739,7 @@ static const struct af9015_rc_setup af9015_rc_setup_hashes[] = {
{ 0xb8feb708, RC_MAP_MSI_DIGIVOX_II },
{ 0xa3703d00, RC_MAP_ALINK_DTU_M },
{ 0x9b7dc64e, RC_MAP_TOTAL_MEDIA_IN_HAND }, /* MYGICTV U718 */
+ { 0x5d49e3db, RC_MAP_DIGITTRADE }, /* LC-Power LC-USB-DVBT */
{ }
};
@@ -749,6 +754,8 @@ static const struct af9015_rc_setup af9015_rc_setup_usbids[] = {
RC_MAP_AZUREWAVE_AD_TU700 },
{ (USB_VID_MSI_2 << 16) + USB_PID_MSI_DIGI_VOX_MINI_III,
RC_MAP_MSI_DIGIVOX_III },
+ { (USB_VID_MSI_2 << 16) + USB_PID_MSI_DIGIVOX_DUO,
+ RC_MAP_MSI_DIGIVOX_III },
{ (USB_VID_LEADTEK << 16) + USB_PID_WINFAST_DTV_DONGLE_GOLD,
RC_MAP_LEADTEK_Y04G0051 },
{ (USB_VID_AVERMEDIA << 16) + USB_PID_AVERMEDIA_VOLAR_X,
@@ -759,6 +766,8 @@ static const struct af9015_rc_setup af9015_rc_setup_usbids[] = {
RC_MAP_DIGITALNOW_TINYTWIN },
{ (USB_VID_GTEK << 16) + USB_PID_TINYTWIN_3,
RC_MAP_DIGITALNOW_TINYTWIN },
+ { (USB_VID_KWORLD_2 << 16) + USB_PID_SVEON_STV22,
+ RC_MAP_MSI_DIGIVOX_III },
{ }
};
@@ -1082,44 +1091,11 @@ error:
return ret;
}
-/* init 2nd I2C adapter */
-static int af9015_i2c_init(struct dvb_usb_device *d)
-{
- int ret;
- struct af9015_state *state = d->priv;
- deb_info("%s:\n", __func__);
-
- strncpy(state->i2c_adap.name, d->desc->name,
- sizeof(state->i2c_adap.name));
- state->i2c_adap.algo = d->props.i2c_algo;
- state->i2c_adap.algo_data = NULL;
- state->i2c_adap.dev.parent = &d->udev->dev;
-
- i2c_set_adapdata(&state->i2c_adap, d);
-
- ret = i2c_add_adapter(&state->i2c_adap);
- if (ret < 0)
- err("could not add i2c adapter");
-
- return ret;
-}
-
static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
{
int ret;
- struct af9015_state *state = adap->dev->priv;
- struct i2c_adapter *i2c_adap;
-
- if (adap->id == 0) {
- /* select I2C adapter */
- i2c_adap = &adap->dev->i2c_adap;
-
- deb_info("%s: init I2C\n", __func__);
- ret = af9015_i2c_init(adap->dev);
- } else {
- /* select I2C adapter */
- i2c_adap = &state->i2c_adap;
+ if (adap->id == 1) {
/* copy firmware to 2nd demodulator */
if (af9015_config.dual_mode) {
ret = af9015_copy_firmware(adap->dev);
@@ -1136,7 +1112,7 @@ static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
/* attach demodulator */
adap->fe = dvb_attach(af9013_attach, &af9015_af9013_config[adap->id],
- i2c_adap);
+ &adap->dev->i2c_adap);
return adap->fe == NULL ? -ENODEV : 0;
}
@@ -1206,57 +1182,56 @@ static struct mxl5007t_config af9015_mxl5007t_config = {
static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
{
- struct af9015_state *state = adap->dev->priv;
- struct i2c_adapter *i2c_adap;
int ret;
deb_info("%s:\n", __func__);
- /* select I2C adapter */
- if (adap->id == 0)
- i2c_adap = &adap->dev->i2c_adap;
- else
- i2c_adap = &state->i2c_adap;
-
switch (af9015_af9013_config[adap->id].tuner) {
case AF9013_TUNER_MT2060:
case AF9013_TUNER_MT2060_2:
- ret = dvb_attach(mt2060_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(mt2060_attach, adap->fe, &adap->dev->i2c_adap,
&af9015_mt2060_config,
af9015_config.mt2060_if1[adap->id])
== NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_QT1010:
case AF9013_TUNER_QT1010A:
- ret = dvb_attach(qt1010_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(qt1010_attach, adap->fe, &adap->dev->i2c_adap,
&af9015_qt1010_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_TDA18271:
- ret = dvb_attach(tda18271_attach, adap->fe, 0xc0, i2c_adap,
+ ret = dvb_attach(tda18271_attach, adap->fe, 0xc0,
+ &adap->dev->i2c_adap,
&af9015_tda18271_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_TDA18218:
- ret = dvb_attach(tda18218_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(tda18218_attach, adap->fe,
+ &adap->dev->i2c_adap,
&af9015_tda18218_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MXL5003D:
- ret = dvb_attach(mxl5005s_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(mxl5005s_attach, adap->fe,
+ &adap->dev->i2c_adap,
&af9015_mxl5003_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MXL5005D:
case AF9013_TUNER_MXL5005R:
- ret = dvb_attach(mxl5005s_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(mxl5005s_attach, adap->fe,
+ &adap->dev->i2c_adap,
&af9015_mxl5005_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_ENV77H11D5:
- ret = dvb_attach(dvb_pll_attach, adap->fe, 0xc0, i2c_adap,
+ ret = dvb_attach(dvb_pll_attach, adap->fe, 0xc0,
+ &adap->dev->i2c_adap,
DVB_PLL_TDA665X) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MC44S803:
- ret = dvb_attach(mc44s803_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(mc44s803_attach, adap->fe,
+ &adap->dev->i2c_adap,
&af9015_mc44s803_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_MXL5007T:
- ret = dvb_attach(mxl5007t_attach, adap->fe, i2c_adap,
+ ret = dvb_attach(mxl5007t_attach, adap->fe,
+ &adap->dev->i2c_adap,
0xc0, &af9015_mxl5007t_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_UNKNOWN:
@@ -1309,6 +1284,7 @@ static struct usb_device_id af9015_usb_table[] = {
USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC)},
/* 35 */{USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A850T)},
{USB_DEVICE(USB_VID_GTEK, USB_PID_TINYTWIN_3)},
+ {USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22)},
{0},
};
MODULE_DEVICE_TABLE(usb, af9015_usb_table);
@@ -1502,7 +1478,7 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.i2c_algo = &af9015_i2c_algo,
- .num_device_descs = 9, /* check max from dvb-usb.h */
+ .num_device_descs = 10, /* check max from dvb-usb.h */
.devices = {
{
.name = "Xtensions XD-380",
@@ -1554,6 +1530,11 @@ static struct dvb_usb_device_properties af9015_properties[] = {
.cold_ids = {&af9015_usb_table[20], NULL},
.warm_ids = {NULL},
},
+ {
+ .name = "Sveon STV22 Dual USB DVB-T Tuner HDTV",
+ .cold_ids = {&af9015_usb_table[37], NULL},
+ .warm_ids = {NULL},
+ },
}
}, {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
@@ -1704,33 +1685,11 @@ static int af9015_usb_probe(struct usb_interface *intf,
return ret;
}
-static void af9015_i2c_exit(struct dvb_usb_device *d)
-{
- struct af9015_state *state = d->priv;
- deb_info("%s:\n", __func__);
-
- /* remove 2nd I2C adapter */
- if (d->state & DVB_USB_STATE_I2C)
- i2c_del_adapter(&state->i2c_adap);
-}
-
-static void af9015_usb_device_exit(struct usb_interface *intf)
-{
- struct dvb_usb_device *d = usb_get_intfdata(intf);
- deb_info("%s:\n", __func__);
-
- /* remove 2nd I2C adapter */
- if (d != NULL && d->desc != NULL)
- af9015_i2c_exit(d);
-
- dvb_usb_device_exit(intf);
-}
-
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver af9015_usb_driver = {
.name = "dvb_usb_af9015",
.probe = af9015_usb_probe,
- .disconnect = af9015_usb_device_exit,
+ .disconnect = dvb_usb_device_exit,
.id_table = af9015_usb_table,
};
diff --git a/drivers/media/dvb/dvb-usb/af9015.h b/drivers/media/dvb/dvb-usb/af9015.h
index beb3004f00b..6252ea6c190 100644
--- a/drivers/media/dvb/dvb-usb/af9015.h
+++ b/drivers/media/dvb/dvb-usb/af9015.h
@@ -99,7 +99,6 @@ enum af9015_ir_mode {
};
struct af9015_state {
- struct i2c_adapter i2c_adap; /* I2C adapter for 2nd FE */
u8 rc_repeat;
u32 rc_keycode;
u8 rc_last[4];
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 7c327b54308..2cbf19a52e3 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -347,15 +347,17 @@ static struct isl6423_config anysee_isl6423_config = {
* PCB: ?
* parts: DNOS404ZH102A(MT352, DTT7579(?))
*
- * E30 VID=04b4 PID=861f HW=2 FW=2.1 Product=????????
- * PCB: ?
+ * E30 VID=04b4 PID=861f HW=2 FW=2.1 "anysee-T(LP)"
+ * PCB: PCB 507T (rev1.61)
* parts: DNOS404ZH103A(ZL10353, DTT7579(?))
+ * OEA=0a OEB=00 OEC=00 OED=ff OEE=00
+ * IOA=45 IOB=ff IOC=00 IOD=ff IOE=00
*
* E30 Plus VID=04b4 PID=861f HW=6 FW=1.0 "anysee"
* PCB: 507CD (rev1.1)
* parts: DNOS404ZH103A(ZL10353, DTT7579(?)), CST56I01
- * OEA=80 OEB=00 OEC=00 OED=ff OEF=fe
- * IOA=4f IOB=ff IOC=00 IOD=06 IOF=01
+ * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe
+ * IOA=4f IOB=ff IOC=00 IOD=06 IOE=01
* IOD[0] ZL10353 1=enabled
* IOA[7] TS 0=enabled
* tuner is not behind ZL10353 I2C-gate (no care if gate disabled or not)
@@ -363,30 +365,30 @@ static struct isl6423_config anysee_isl6423_config = {
* E30 C Plus VID=04b4 PID=861f HW=10 FW=1.0 "anysee-DC(LP)"
* PCB: 507DC (rev0.2)
* parts: TDA10023, DTOS403IH102B TM, CST56I01
- * OEA=80 OEB=00 OEC=00 OED=ff OEF=fe
- * IOA=4f IOB=ff IOC=00 IOD=26 IOF=01
+ * OEA=80 OEB=00 OEC=00 OED=ff OEE=fe
+ * IOA=4f IOB=ff IOC=00 IOD=26 IOE=01
* IOD[0] TDA10023 1=enabled
*
* E30 S2 Plus VID=04b4 PID=861f HW=11 FW=0.1 "anysee-S2(LP)"
* PCB: 507SI (rev2.1)
* parts: BS2N10WCC01(CX24116, CX24118), ISL6423, TDA8024
- * OEA=80 OEB=00 OEC=ff OED=ff OEF=fe
- * IOA=4d IOB=ff IOC=00 IOD=26 IOF=01
+ * OEA=80 OEB=00 OEC=ff OED=ff OEE=fe
+ * IOA=4d IOB=ff IOC=00 IOD=26 IOE=01
* IOD[0] CX24116 1=enabled
*
* E30 C Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)"
* PCB: 507FA (rev0.4)
* parts: TDA10023, DTOS403IH102B TM, TDA8024
- * OEA=80 OEB=00 OEC=ff OED=ff OEF=ff
- * IOA=4d IOB=ff IOC=00 IOD=00 IOF=c0
+ * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff
+ * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0
* IOD[5] TDA10023 1=enabled
* IOE[0] tuner 1=enabled
*
* E30 Combo Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)"
* PCB: 507FA (rev1.1)
* parts: ZL10353, TDA10023, DTOS403IH102B TM, TDA8024
- * OEA=80 OEB=00 OEC=ff OED=ff OEF=ff
- * IOA=4d IOB=ff IOC=00 IOD=00 IOF=c0
+ * OEA=80 OEB=00 OEC=ff OED=ff OEE=ff
+ * IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0
* DVB-C:
* IOD[5] TDA10023 1=enabled
* IOE[0] tuner 1=enabled
@@ -398,8 +400,8 @@ static struct isl6423_config anysee_isl6423_config = {
* E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
* PCB: 508TC (rev0.6)
* parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212)
- * OEA=80 OEB=00 OEC=03 OED=f7 OEF=ff
- * IOA=4d IOB=00 IOC=cc IOD=48 IOF=e4
+ * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
+ * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4
* IOA[7] TS 1=enabled
* IOE[4] TDA18212 1=enabled
* DVB-C:
@@ -414,11 +416,34 @@ static struct isl6423_config anysee_isl6423_config = {
* E7 S2 VID=1c73 PID=861f HW=19 FW=0.4 AMTCI=0.5 "anysee-E7S2(LP)"
* PCB: 508S2 (rev0.7)
* parts: DNBU10512IST(STV0903, STV6110), ISL6423
- * OEA=80 OEB=00 OEC=03 OED=f7 OEF=ff
- * IOA=4d IOB=00 IOC=c4 IOD=08 IOF=e4
+ * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
+ * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4
* IOA[7] TS 1=enabled
* IOE[5] STV0903 1=enabled
*
+ * E7 PTC VID=1c73 PID=861f HW=21 FW=0.1 AMTCI=?? "anysee-E7PTC(LP)"
+ * PCB: 508PTC (rev0.5)
+ * parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212)
+ * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
+ * IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4
+ * IOA[7] TS 1=enabled
+ * IOE[4] TDA18212 1=enabled
+ * DVB-C:
+ * IOD[6] ZL10353 0=disabled
+ * IOD[5] TDA10023 1=enabled
+ * IOE[0] IF 1=enabled
+ * DVB-T:
+ * IOD[5] TDA10023 0=disabled
+ * IOD[6] ZL10353 1=enabled
+ * IOE[0] IF 0=enabled
+ *
+ * E7 S2 VID=1c73 PID=861f HW=22 FW=0.1 AMTCI=?? "anysee-E7PS2(LP)"
+ * PCB: 508PS2 (rev0.4)
+ * parts: DNBU10512IST(STV0903, STV6110), ISL6423
+ * OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
+ * IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4
+ * IOA[7] TS 1=enabled
+ * IOE[5] STV0903 1=enabled
*/
static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
@@ -459,7 +484,7 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
state->hw = hw_info[0];
switch (state->hw) {
- case ANYSEE_HW_02: /* 2 */
+ case ANYSEE_HW_507T: /* 2 */
/* E30 */
/* attach demod */
@@ -593,7 +618,9 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
break;
case ANYSEE_HW_508TC: /* 18 */
+ case ANYSEE_HW_508PTC: /* 21 */
/* E7 TC */
+ /* E7 PTC */
/* enable transport stream on IOA[7] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (1 << 7), 0x80);
@@ -650,7 +677,9 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
break;
case ANYSEE_HW_508S2: /* 19 */
+ case ANYSEE_HW_508PS2: /* 22 */
/* E7 S2 */
+ /* E7 PS2 */
/* enable transport stream on IOA[7] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (1 << 7), 0x80);
@@ -687,7 +716,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
deb_info("%s:\n", __func__);
switch (state->hw) {
- case ANYSEE_HW_02: /* 2 */
+ case ANYSEE_HW_507T: /* 2 */
/* E30 */
/* attach tuner */
@@ -762,7 +791,9 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
break;
case ANYSEE_HW_508TC: /* 18 */
+ case ANYSEE_HW_508PTC: /* 21 */
/* E7 TC */
+ /* E7 PTC */
/* enable tuner on IOE[4] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 4), 0x10);
@@ -775,7 +806,9 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
break;
case ANYSEE_HW_508S2: /* 19 */
+ case ANYSEE_HW_508PS2: /* 22 */
/* E7 S2 */
+ /* E7 PS2 */
/* attach tuner */
fe = dvb_attach(stv6110_attach, adap->fe,
diff --git a/drivers/media/dvb/dvb-usb/anysee.h b/drivers/media/dvb/dvb-usb/anysee.h
index a7673aa1e00..ad6ccd1ea2d 100644
--- a/drivers/media/dvb/dvb-usb/anysee.h
+++ b/drivers/media/dvb/dvb-usb/anysee.h
@@ -61,13 +61,15 @@ struct anysee_state {
u8 seq;
};
-#define ANYSEE_HW_02 2 /* E30 */
-#define ANYSEE_HW_507CD 6 /* E30 Plus */
-#define ANYSEE_HW_507DC 10 /* E30 C Plus */
-#define ANYSEE_HW_507SI 11 /* E30 S2 Plus */
-#define ANYSEE_HW_507FA 15 /* E30 Combo Plus / E30 C Plus */
-#define ANYSEE_HW_508TC 18 /* E7 TC */
-#define ANYSEE_HW_508S2 19 /* E7 S2 */
+#define ANYSEE_HW_507T 2 /* E30 */
+#define ANYSEE_HW_507CD 6 /* E30 Plus */
+#define ANYSEE_HW_507DC 10 /* E30 C Plus */
+#define ANYSEE_HW_507SI 11 /* E30 S2 Plus */
+#define ANYSEE_HW_507FA 15 /* E30 Combo Plus / E30 C Plus */
+#define ANYSEE_HW_508TC 18 /* E7 TC */
+#define ANYSEE_HW_508S2 19 /* E7 S2 */
+#define ANYSEE_HW_508PTC 21 /* E7 PTC Plus */
+#define ANYSEE_HW_508PS2 22 /* E7 PS2 Plus */
#define REG_IOA 0x80 /* Port A (bit addressable) */
#define REG_IOB 0x90 /* Port B (bit addressable) */
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index c519ad5eb73..d0ea5b64f6b 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -17,6 +17,7 @@
#include "mt2266.h"
#include "tuner-xc2028.h"
#include "xc5000.h"
+#include "xc4000.h"
#include "s5h1411.h"
#include "dib0070.h"
#include "dib0090.h"
@@ -2655,6 +2656,156 @@ static int xc5000_tuner_attach(struct dvb_usb_adapter *adap)
== NULL ? -ENODEV : 0;
}
+static int dib0700_xc4000_tuner_callback(void *priv, int component,
+ int command, int arg)
+{
+ struct dvb_usb_adapter *adap = priv;
+
+ if (command == XC4000_TUNER_RESET) {
+ /* Reset the tuner */
+ dib7000p_set_gpio(adap->fe, 8, 0, 0);
+ msleep(10);
+ dib7000p_set_gpio(adap->fe, 8, 0, 1);
+ } else {
+ err("xc4000: unknown tuner callback command: %d\n", command);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct dibx000_agc_config stk7700p_7000p_xc4000_agc_config = {
+ .band_caps = BAND_UHF | BAND_VHF,
+ .setup = 0x64,
+ .inv_gain = 0x02c8,
+ .time_stabiliz = 0x15,
+ .alpha_level = 0x00,
+ .thlock = 0x76,
+ .wbd_inv = 0x01,
+ .wbd_ref = 0x0b33,
+ .wbd_sel = 0x00,
+ .wbd_alpha = 0x02,
+ .agc1_max = 0x00,
+ .agc1_min = 0x00,
+ .agc2_max = 0x9b26,
+ .agc2_min = 0x26ca,
+ .agc1_pt1 = 0x00,
+ .agc1_pt2 = 0x00,
+ .agc1_pt3 = 0x00,
+ .agc1_slope1 = 0x00,
+ .agc1_slope2 = 0x00,
+ .agc2_pt1 = 0x00,
+ .agc2_pt2 = 0x80,
+ .agc2_slope1 = 0x1d,
+ .agc2_slope2 = 0x1d,
+ .alpha_mant = 0x11,
+ .alpha_exp = 0x1b,
+ .beta_mant = 0x17,
+ .beta_exp = 0x33,
+ .perform_agc_softsplit = 0x00,
+};
+
+static struct dibx000_bandwidth_config stk7700p_xc4000_pll_config = {
+ 60000, 30000, /* internal, sampling */
+ 1, 8, 3, 1, 0, /* pll_cfg: prediv, ratio, range, reset, bypass */
+ 0, 0, 1, 1, 0, /* misc: refdiv, bypclk_div, IO_CLK_en_core, */
+ /* ADClkSrc, modulo */
+ (3 << 14) | (1 << 12) | 524, /* sad_cfg: refsel, sel, freq_15k */
+ 39370534, /* ifreq */
+ 20452225, /* timf */
+ 30000000 /* xtal */
+};
+
+/* FIXME: none of these inputs are validated yet */
+static struct dib7000p_config pctv_340e_config = {
+ .output_mpeg2_in_188_bytes = 1,
+
+ .agc_config_count = 1,
+ .agc = &stk7700p_7000p_xc4000_agc_config,
+ .bw = &stk7700p_xc4000_pll_config,
+
+ .gpio_dir = DIB7000M_GPIO_DEFAULT_DIRECTIONS,
+ .gpio_val = DIB7000M_GPIO_DEFAULT_VALUES,
+ .gpio_pwm_pos = DIB7000M_GPIO_DEFAULT_PWM_POS,
+};
+
+/* PCTV 340e GPIOs map:
+ dib0700:
+ GPIO2 - CX25843 sleep
+ GPIO3 - CS5340 reset
+ GPIO5 - IRD
+ GPIO6 - Power Supply
+ GPIO8 - LNA (1=off 0=on)
+ GPIO10 - CX25843 reset
+ dib7000:
+ GPIO8 - xc4000 reset
+ */
+static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap)
+{
+ struct dib0700_state *st = adap->dev->priv;
+
+ /* Power Supply on */
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0);
+ msleep(50);
+ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+ msleep(100); /* Allow power supply to settle before probing */
+
+ /* cx25843 reset */
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+ msleep(1); /* cx25843 datasheet say 350us required */
+ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+
+ /* LNA off for now */
+ dib0700_set_gpio(adap->dev, GPIO8, GPIO_OUT, 1);
+
+ /* Put the CX25843 to sleep for now since we're in digital mode */
+ dib0700_set_gpio(adap->dev, GPIO2, GPIO_OUT, 1);
+
+ /* FIXME: not verified yet */
+ dib0700_ctrl_clock(adap->dev, 72, 1);
+
+ msleep(500);
+
+ if (dib7000pc_detection(&adap->dev->i2c_adap) == 0) {
+ /* Demodulator not found for some reason? */
+ return -ENODEV;
+ }
+
+ adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x12,
+ &pctv_340e_config);
+ st->is_dib7000pc = 1;
+
+ return adap->fe == NULL ? -ENODEV : 0;
+}
+
+static struct xc4000_config dib7000p_xc4000_tunerconfig = {
+ .i2c_address = 0x61,
+ .default_pm = 1,
+ .dvb_amplitude = 0,
+ .set_smoothedcvbs = 0,
+ .if_khz = 5400
+};
+
+static int xc4000_tuner_attach(struct dvb_usb_adapter *adap)
+{
+ struct i2c_adapter *tun_i2c;
+
+ /* The xc4000 is not on the main i2c bus */
+ tun_i2c = dib7000p_get_i2c_master(adap->fe,
+ DIBX000_I2C_INTERFACE_TUNER, 1);
+ if (tun_i2c == NULL) {
+ printk(KERN_ERR "Could not reach tuner i2c bus\n");
+ return 0;
+ }
+
+ /* Setup the reset callback */
+ adap->fe->callback = dib0700_xc4000_tuner_callback;
+
+ return dvb_attach(xc4000_attach, adap->fe, tun_i2c,
+ &dib7000p_xc4000_tunerconfig)
+ == NULL ? -ENODEV : 0;
+}
+
static struct lgdt3305_config hcw_lgdt3305_config = {
.i2c_addr = 0x0e,
.mpeg_mode = LGDT3305_MPEG_PARALLEL,
@@ -2802,6 +2953,8 @@ struct usb_device_id dib0700_usb_id_table[] = {
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7090PVR) },
{ USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_AIRSTAR_TELESTICK_2) },
/* 75 */{ USB_DEVICE(USB_VID_MEDION, USB_PID_CREATIX_CTX1921) },
+ { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV340E) },
+ { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV340E_SE) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -3772,6 +3925,41 @@ struct dvb_usb_device_properties dib0700_devices[] = {
RC_TYPE_NEC,
.change_protocol = dib0700_change_protocol,
},
+ }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+ .num_adapters = 1,
+ .adapter = {
+ {
+ .frontend_attach = pctv340e_frontend_attach,
+ .tuner_attach = xc4000_tuner_attach,
+
+ DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+
+ .size_of_priv = sizeof(struct
+ dib0700_adapter_state),
+ },
+ },
+
+ .num_device_descs = 2,
+ .devices = {
+ { "Pinnacle PCTV 340e HD Pro USB Stick",
+ { &dib0700_usb_id_table[76], NULL },
+ { NULL },
+ },
+ { "Pinnacle PCTV Hybrid Stick Solo",
+ { &dib0700_usb_id_table[77], NULL },
+ { NULL },
+ },
+ },
+ .rc.core = {
+ .rc_interval = DEFAULT_RC_INTERVAL,
+ .rc_codes = RC_MAP_DIB0700_RC5_TABLE,
+ .module_name = "dib0700",
+ .rc_query = dib0700_rc_query_old_firmware,
+ .allowed_protos = RC_TYPE_RC5 |
+ RC_TYPE_RC6 |
+ RC_TYPE_NEC,
+ .change_protocol = dib0700_change_protocol,
+ },
},
};
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 21b15495d2d..2a79b8fb3e8 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -230,6 +230,8 @@
#define USB_PID_PINNACLE_PCTV310E 0x3211
#define USB_PID_PINNACLE_PCTV801E 0x023a
#define USB_PID_PINNACLE_PCTV801E_SE 0x023b
+#define USB_PID_PINNACLE_PCTV340E 0x023d
+#define USB_PID_PINNACLE_PCTV340E_SE 0x023e
#define USB_PID_PINNACLE_PCTV73A 0x0243
#define USB_PID_PINNACLE_PCTV73ESE 0x0245
#define USB_PID_PINNACLE_PCTV74E 0x0246
@@ -313,6 +315,7 @@
#define USB_PID_FRIIO_WHITE 0x0001
#define USB_PID_TVWAY_PLUS 0x0002
#define USB_PID_SVEON_STV20 0xe39d
+#define USB_PID_SVEON_STV22 0xe401
#define USB_PID_AZUREWAVE_AZ6027 0x3275
#define USB_PID_TERRATEC_DVBS2CI_V1 0x10a4
#define USB_PID_TERRATEC_DVBS2CI_V2 0x10ac
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h
index 76a80968482..7d35d078342 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb.h
@@ -85,7 +85,7 @@ static inline u8 rc5_data(struct rc_map_table *key)
return key->scancode & 0xff;
}
-static inline u8 rc5_scan(struct rc_map_table *key)
+static inline u16 rc5_scan(struct rc_map_table *key)
{
return key->scancode & 0xffff;
}
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.h b/drivers/media/dvb/dvb-usb/gp8psk.h
index 831749a518c..ed32b9da484 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.h
+++ b/drivers/media/dvb/dvb-usb/gp8psk.h
@@ -78,9 +78,6 @@ extern int dvb_usb_gp8psk_debug;
#define ADV_MOD_DVB_BPSK 9 /* DVB-S BPSK */
#define GET_USB_SPEED 0x07
- #define USB_SPEED_LOW 0
- #define USB_SPEED_FULL 1
- #define USB_SPEED_HIGH 2
#define RESET_FX2 0x13
diff --git a/drivers/media/dvb/dvb-usb/technisat-usb2.c b/drivers/media/dvb/dvb-usb/technisat-usb2.c
index 08f8842ad28..473b95ed4d5 100644
--- a/drivers/media/dvb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/dvb/dvb-usb/technisat-usb2.c
@@ -765,10 +765,8 @@ static void technisat_usb2_disconnect(struct usb_interface *intf)
/* work and stuff was only created when the device is is hot-state */
if (dev != NULL) {
struct technisat_usb2_state *state = dev->priv;
- if (state != NULL) {
+ if (state != NULL)
cancel_delayed_work_sync(&state->green_led_work);
- flush_scheduled_work();
- }
}
dvb_usb_device_exit(intf);
diff --git a/drivers/media/dvb/dvb-usb/vp7045.h b/drivers/media/dvb/dvb-usb/vp7045.h
index 969688f8526..cf5ec46f8bb 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.h
+++ b/drivers/media/dvb/dvb-usb/vp7045.h
@@ -36,9 +36,6 @@
#define Tuner_Power_OFF 0
#define GET_USB_SPEED 0x07
- #define USB_SPEED_LOW 0
- #define USB_SPEED_FULL 1
- #define USB_SPEED_HIGH 2
#define LOCK_TUNER_COMMAND 0x09
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index 21c52e3b522..489ae824586 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -1208,7 +1208,7 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
if (r->response != AVC_RESPONSE_ACCEPTED) {
dev_err(fdtv->device,
"CA PMT failed with response 0x%x\n", r->response);
- ret = -EFAULT;
+ ret = -EACCES;
}
out:
mutex_unlock(&fdtv->avc_mutex);
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
index 8ffb565f070..e5ebdbfe8c1 100644
--- a/drivers/media/dvb/firewire/firedtv-ci.c
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -45,11 +45,6 @@ static int fdtv_get_ca_flags(struct firedtv_tuner_status *stat)
return flags;
}
-static int fdtv_ca_reset(struct firedtv *fdtv)
-{
- return avc_ca_reset(fdtv) ? -EFAULT : 0;
-}
-
static int fdtv_ca_get_caps(void *arg)
{
struct ca_caps *cap = arg;
@@ -65,12 +60,14 @@ static int fdtv_ca_get_slot_info(struct firedtv *fdtv, void *arg)
{
struct firedtv_tuner_status stat;
struct ca_slot_info *slot = arg;
+ int err;
- if (avc_tuner_status(fdtv, &stat))
- return -EFAULT;
+ err = avc_tuner_status(fdtv, &stat);
+ if (err)
+ return err;
if (slot->num != 0)
- return -EFAULT;
+ return -EACCES;
slot->type = CA_CI;
slot->flags = fdtv_get_ca_flags(&stat);
@@ -81,21 +78,21 @@ static int fdtv_ca_app_info(struct firedtv *fdtv, void *arg)
{
struct ca_msg *reply = arg;
- return avc_ca_app_info(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
+ return avc_ca_app_info(fdtv, reply->msg, &reply->length);
}
static int fdtv_ca_info(struct firedtv *fdtv, void *arg)
{
struct ca_msg *reply = arg;
- return avc_ca_info(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
+ return avc_ca_info(fdtv, reply->msg, &reply->length);
}
static int fdtv_ca_get_mmi(struct firedtv *fdtv, void *arg)
{
struct ca_msg *reply = arg;
- return avc_ca_get_mmi(fdtv, reply->msg, &reply->length) ? -EFAULT : 0;
+ return avc_ca_get_mmi(fdtv, reply->msg, &reply->length);
}
static int fdtv_ca_get_msg(struct firedtv *fdtv, void *arg)
@@ -111,14 +108,15 @@ static int fdtv_ca_get_msg(struct firedtv *fdtv, void *arg)
err = fdtv_ca_info(fdtv, arg);
break;
default:
- if (avc_tuner_status(fdtv, &stat))
- err = -EFAULT;
- else if (stat.ca_mmi == 1)
+ err = avc_tuner_status(fdtv, &stat);
+ if (err)
+ break;
+ if (stat.ca_mmi == 1)
err = fdtv_ca_get_mmi(fdtv, arg);
else {
dev_info(fdtv->device, "unhandled CA message 0x%08x\n",
fdtv->ca_last_command);
- err = -EFAULT;
+ err = -EACCES;
}
}
fdtv->ca_last_command = 0;
@@ -141,7 +139,7 @@ static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg)
data_length = msg->msg[3];
}
- return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length) ? -EFAULT : 0;
+ return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length);
}
static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
@@ -170,7 +168,7 @@ static int fdtv_ca_send_msg(struct firedtv *fdtv, void *arg)
default:
dev_err(fdtv->device, "unhandled CA message 0x%08x\n",
fdtv->ca_last_command);
- err = -EFAULT;
+ err = -EACCES;
}
return err;
}
@@ -184,7 +182,7 @@ static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg)
switch (cmd) {
case CA_RESET:
- err = fdtv_ca_reset(fdtv);
+ err = avc_ca_reset(fdtv);
break;
case CA_GET_CAP:
err = fdtv_ca_get_caps(arg);
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index 44b816f2601..32e08e35152 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -49,6 +49,27 @@ config DVB_STV6110x
help
A Silicon tuner that supports DVB-S and DVB-S2 modes
+comment "Multistandard (cable + terrestrial) frontends"
+ depends on DVB_CORE
+
+config DVB_DRXK
+ tristate "Micronas DRXK based"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ Micronas DRX-K DVB-C/T demodulator.
+
+ Say Y when you want to support this frontend.
+
+config DVB_TDA18271C2DD
+ tristate "NXP TDA18271C2 silicon tuner"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ NXP TDA18271 silicon tuner.
+
+ Say Y when you want to support this tuner.
+
comment "DVB-S (satellite) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 2f3a6f736d6..6a6ba053ead 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -10,6 +10,7 @@ stv0900-objs = stv0900_core.o stv0900_sw.o
au8522-objs = au8522_dig.o au8522_decoder.o
drxd-objs = drxd_firm.o drxd_hard.o
cxd2820r-objs = cxd2820r_core.o cxd2820r_c.o cxd2820r_t.o cxd2820r_t2.o
+drxk-objs := drxk_hard.o
obj-$(CONFIG_DVB_PLL) += dvb-pll.o
obj-$(CONFIG_DVB_STV0299) += stv0299.o
@@ -88,4 +89,6 @@ obj-$(CONFIG_DVB_MB86A20S) += mb86a20s.o
obj-$(CONFIG_DVB_IX2505V) += ix2505v.o
obj-$(CONFIG_DVB_STV0367) += stv0367.o
obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o
+obj-$(CONFIG_DVB_DRXK) += drxk.o
+obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c
index b537891a4cc..2b248c12f40 100644
--- a/drivers/media/dvb/frontends/au8522_decoder.c
+++ b/drivers/media/dvb/frontends/au8522_decoder.c
@@ -692,7 +692,7 @@ static int au8522_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
/* Interrogate the decoder to see if we are getting a real signal */
lock_status = au8522_readreg(state, 0x00);
if (lock_status == 0xa2)
- vt->signal = 0x01;
+ vt->signal = 0xffff;
else
vt->signal = 0x00;
diff --git a/drivers/media/dvb/frontends/cx24113.c b/drivers/media/dvb/frontends/cx24113.c
index e9ee55592fd..c341d57d5e8 100644
--- a/drivers/media/dvb/frontends/cx24113.c
+++ b/drivers/media/dvb/frontends/cx24113.c
@@ -31,8 +31,8 @@
static int debug;
-#define info(args...) do { printk(KERN_INFO "CX24113: " args); } while (0)
-#define err(args...) do { printk(KERN_ERR "CX24113: " args); } while (0)
+#define cx_info(args...) do { printk(KERN_INFO "CX24113: " args); } while (0)
+#define cx_err(args...) do { printk(KERN_ERR "CX24113: " args); } while (0)
#define dprintk(args...) \
do { \
@@ -341,7 +341,7 @@ static void cx24113_calc_pll_nf(struct cx24113_state *state, u16 *n, s32 *f)
} while (N < 6 && R < 3);
if (N < 6) {
- err("strange frequency: N < 6\n");
+ cx_err("strange frequency: N < 6\n");
return;
}
F = freq_hz;
@@ -563,7 +563,7 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
kzalloc(sizeof(struct cx24113_state), GFP_KERNEL);
int rc;
if (state == NULL) {
- err("Unable to kzalloc\n");
+ cx_err("Unable to kzalloc\n");
goto error;
}
@@ -571,7 +571,7 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
state->config = config;
state->i2c = i2c;
- info("trying to detect myself\n");
+ cx_info("trying to detect myself\n");
/* making a dummy read, because of some expected troubles
* after power on */
@@ -579,24 +579,24 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe,
rc = cx24113_readreg(state, 0x00);
if (rc < 0) {
- info("CX24113 not found.\n");
+ cx_info("CX24113 not found.\n");
goto error;
}
state->rev = rc;
switch (rc) {
case 0x43:
- info("detected CX24113 variant\n");
+ cx_info("detected CX24113 variant\n");
break;
case REV_CX24113:
- info("successfully detected\n");
+ cx_info("successfully detected\n");
break;
default:
- err("unsupported device id: %x\n", state->rev);
+ cx_err("unsupported device id: %x\n", state->rev);
goto error;
}
state->ver = cx24113_readreg(state, 0x01);
- info("version: %x\n", state->ver);
+ cx_info("version: %x\n", state->ver);
/* create dvb_frontend */
memcpy(&fe->ops.tuner_ops, &cx24113_tuner_ops,
diff --git a/drivers/media/dvb/frontends/cx24116.c b/drivers/media/dvb/frontends/cx24116.c
index 95c6465b87a..ccd05255d52 100644
--- a/drivers/media/dvb/frontends/cx24116.c
+++ b/drivers/media/dvb/frontends/cx24116.c
@@ -1452,11 +1452,7 @@ tuned: /* Set/Reset B/W */
cmd.args[0x00] = CMD_BANDWIDTH;
cmd.args[0x01] = 0x00;
cmd.len = 0x02;
- ret = cx24116_cmd_execute(fe, &cmd);
- if (ret != 0)
- return ret;
-
- return ret;
+ return cx24116_cmd_execute(fe, &cmd);
}
static int cx24116_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *params,
diff --git a/drivers/media/dvb/frontends/cxd2820r.h b/drivers/media/dvb/frontends/cxd2820r.h
index ad17845123d..2906582dc94 100644
--- a/drivers/media/dvb/frontends/cxd2820r.h
+++ b/drivers/media/dvb/frontends/cxd2820r.h
@@ -55,13 +55,13 @@ struct cxd2820r_config {
* Default: 0
* Values: 0, 1
*/
- int if_agc_polarity:1;
+ bool if_agc_polarity;
/* Spectrum inversion.
* Default: 0
* Values: 0, 1
*/
- int spec_inv:1;
+ bool spec_inv;
/* IFs for all used modes.
* Default: none, must set
diff --git a/drivers/media/dvb/frontends/cxd2820r_core.c b/drivers/media/dvb/frontends/cxd2820r_core.c
index 0779f69db79..d416e85589e 100644
--- a/drivers/media/dvb/frontends/cxd2820r_core.c
+++ b/drivers/media/dvb/frontends/cxd2820r_core.c
@@ -314,6 +314,8 @@ static int cxd2820r_set_frontend(struct dvb_frontend *fe,
} else if (c->delivery_system == SYS_DVBT2) {
/* DVB-T => DVB-T2 */
ret = cxd2820r_sleep_t(fe);
+ if (ret)
+ break;
ret = cxd2820r_set_frontend_t2(fe, p);
}
break;
@@ -324,6 +326,8 @@ static int cxd2820r_set_frontend(struct dvb_frontend *fe,
} else if (c->delivery_system == SYS_DVBT) {
/* DVB-T2 => DVB-T */
ret = cxd2820r_sleep_t2(fe);
+ if (ret)
+ break;
ret = cxd2820r_set_frontend_t(fe, p);
}
break;
@@ -740,12 +744,13 @@ static int cxd2820r_tuner_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg msg[], int num)
{
struct cxd2820r_priv *priv = i2c_get_adapdata(i2c_adap);
- u8 obuf[msg[0].len + 2];
+ int ret;
+ u8 *obuf = kmalloc(msg[0].len + 2, GFP_KERNEL);
struct i2c_msg msg2[2] = {
{
.addr = priv->cfg.i2c_address,
.flags = 0,
- .len = sizeof(obuf),
+ .len = msg[0].len + 2,
.buf = obuf,
}, {
.addr = priv->cfg.i2c_address,
@@ -755,15 +760,24 @@ static int cxd2820r_tuner_i2c_xfer(struct i2c_adapter *i2c_adap,
}
};
+ if (!obuf)
+ return -ENOMEM;
+
obuf[0] = 0x09;
obuf[1] = (msg[0].addr << 1);
if (num == 2) { /* I2C read */
obuf[1] = (msg[0].addr << 1) | I2C_M_RD; /* I2C RD flag */
- msg2[0].len = sizeof(obuf) - 1; /* maybe HW bug ? */
+ msg2[0].len = msg[0].len + 2 - 1; /* '-1' maybe HW bug ? */
}
memcpy(&obuf[2], msg[0].buf, msg[0].len);
- return i2c_transfer(priv->i2c, msg2, num);
+ ret = i2c_transfer(priv->i2c, msg2, num);
+ if (ret < 0)
+ warn("tuner i2c failed ret:%d", ret);
+
+ kfree(obuf);
+
+ return ret;
}
static struct i2c_algorithm cxd2820r_tuner_i2c_algo = {
diff --git a/drivers/media/dvb/frontends/cxd2820r_priv.h b/drivers/media/dvb/frontends/cxd2820r_priv.h
index 25adbeefa6d..0c0ebc9d5c4 100644
--- a/drivers/media/dvb/frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb/frontends/cxd2820r_priv.h
@@ -55,13 +55,13 @@ struct cxd2820r_priv {
struct mutex fe_lock; /* FE lock */
int active_fe:2; /* FE lock, -1=NONE, 0=DVB-T/T2, 1=DVB-C */
- int ber_running:1;
+ bool ber_running;
u8 bank[2];
u8 gpio[3];
fe_delivery_system_t delivery_system;
- int last_tune_failed:1; /* for switch between T and T2 tune */
+ bool last_tune_failed; /* for switch between T and T2 tune */
};
/* cxd2820r_core.c */
diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c
index 0c9f40c2a25..a64a538ba36 100644
--- a/drivers/media/dvb/frontends/dib7000p.c
+++ b/drivers/media/dvb/frontends/dib7000p.c
@@ -2336,6 +2336,11 @@ struct dvb_frontend *dib7000p_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
request_firmware() will hit an OOPS (this should be moved somewhere
more common) */
+ /* FIXME: make sure the dev.parent field is initialized, or else
+ request_firmware() will hit an OOPS (this should be moved somewhere
+ more common) */
+ st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent;
+
dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr);
/* init 7090 tuner adapter */
diff --git a/drivers/media/dvb/frontends/drxd_hard.c b/drivers/media/dvb/frontends/drxd_hard.c
index ea4c1c361d2..2238bf0be95 100644
--- a/drivers/media/dvb/frontends/drxd_hard.c
+++ b/drivers/media/dvb/frontends/drxd_hard.c
@@ -28,7 +28,6 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <asm/div64.h>
#include "dvb_frontend.h"
@@ -233,7 +232,7 @@ static int i2c_read(struct i2c_adapter *adap,
return 0;
}
-inline u32 MulDiv32(u32 a, u32 b, u32 c)
+static inline u32 MulDiv32(u32 a, u32 b, u32 c)
{
u64 tmp64;
@@ -910,14 +909,16 @@ static int load_firmware(struct drxd_state *state, const char *fw_name)
return -EIO;
}
- state->microcode = kzalloc(fw->size, GFP_KERNEL);
+ state->microcode = kmalloc(fw->size, GFP_KERNEL);
if (state->microcode == NULL) {
- printk(KERN_ERR "drxd: firmware load failure: nomemory\n");
+ release_firmware(fw);
+ printk(KERN_ERR "drxd: firmware load failure: no memory\n");
return -ENOMEM;
}
memcpy(state->microcode, fw->data, fw->size);
state->microcode_length = fw->size;
+ release_firmware(fw);
return 0;
}
diff --git a/drivers/media/dvb/frontends/drxk.h b/drivers/media/dvb/frontends/drxk.h
new file mode 100644
index 00000000000..58baf419560
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk.h
@@ -0,0 +1,47 @@
+#ifndef _DRXK_H_
+#define _DRXK_H_
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+/**
+ * struct drxk_config - Configure the initial parameters for DRX-K
+ *
+ * adr: I2C Address of the DRX-K
+ * single_master: Device is on the single master mode
+ * no_i2c_bridge: Don't switch the I2C bridge to talk with tuner
+ * antenna_gpio: GPIO bit used to control the antenna
+ * antenna_dvbt: GPIO bit for changing antenna to DVB-C. A value of 1
+ * means that 1=DVBC, 0 = DVBT. Zero means the opposite.
+ * microcode_name: Name of the firmware file with the microcode
+ *
+ * On the *_gpio vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
+ * UIO-3.
+ */
+struct drxk_config {
+ u8 adr;
+ bool single_master;
+ bool no_i2c_bridge;
+
+ bool antenna_dvbt;
+ u16 antenna_gpio;
+
+ const char *microcode_name;
+};
+
+#if defined(CONFIG_DVB_DRXK) || (defined(CONFIG_DVB_DRXK_MODULE) \
+ && defined(MODULE))
+extern struct dvb_frontend *drxk_attach(const struct drxk_config *config,
+ struct i2c_adapter *i2c,
+ struct dvb_frontend **fe_t);
+#else
+static inline struct dvb_frontend *drxk_attach(const struct drxk_config *config,
+ struct i2c_adapter *i2c,
+ struct dvb_frontend **fe_t)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
new file mode 100644
index 00000000000..41b083820da
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk_hard.c
@@ -0,0 +1,6454 @@
+/*
+ * drxk_hard: DRX-K DVB-C/T demodulator driver
+ *
+ * Copyright (C) 2010-2011 Digital Devices GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/version.h>
+#include <asm/div64.h>
+
+#include "dvb_frontend.h"
+#include "drxk.h"
+#include "drxk_hard.h"
+
+static int PowerDownDVBT(struct drxk_state *state, bool setPowerMode);
+static int PowerDownQAM(struct drxk_state *state);
+static int SetDVBTStandard(struct drxk_state *state,
+ enum OperationMode oMode);
+static int SetQAMStandard(struct drxk_state *state,
+ enum OperationMode oMode);
+static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
+ s32 tunerFreqOffset);
+static int SetDVBTStandard(struct drxk_state *state,
+ enum OperationMode oMode);
+static int DVBTStart(struct drxk_state *state);
+static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
+ s32 tunerFreqOffset);
+static int GetQAMLockStatus(struct drxk_state *state, u32 *pLockStatus);
+static int GetDVBTLockStatus(struct drxk_state *state, u32 *pLockStatus);
+static int SwitchAntennaToQAM(struct drxk_state *state);
+static int SwitchAntennaToDVBT(struct drxk_state *state);
+
+static bool IsDVBT(struct drxk_state *state)
+{
+ return state->m_OperationMode == OM_DVBT;
+}
+
+static bool IsQAM(struct drxk_state *state)
+{
+ return state->m_OperationMode == OM_QAM_ITU_A ||
+ state->m_OperationMode == OM_QAM_ITU_B ||
+ state->m_OperationMode == OM_QAM_ITU_C;
+}
+
+bool IsA1WithPatchCode(struct drxk_state *state)
+{
+ return state->m_DRXK_A1_PATCH_CODE;
+}
+
+bool IsA1WithRomCode(struct drxk_state *state)
+{
+ return state->m_DRXK_A1_ROM_CODE;
+}
+
+#define NOA1ROM 0
+
+#define DRXDAP_FASI_SHORT_FORMAT(addr) (((addr) & 0xFC30FF80) == 0)
+#define DRXDAP_FASI_LONG_FORMAT(addr) (((addr) & 0xFC30FF80) != 0)
+
+#define DEFAULT_MER_83 165
+#define DEFAULT_MER_93 250
+
+#ifndef DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH
+#define DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH (0x02)
+#endif
+
+#ifndef DRXK_MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH
+#define DRXK_MPEG_PARALLEL_OUTPUT_PIN_DRIVE_STRENGTH (0x03)
+#endif
+
+#ifndef DRXK_MPEG_OUTPUT_CLK_DRIVE_STRENGTH
+#define DRXK_MPEG_OUTPUT_CLK_DRIVE_STRENGTH (0x06)
+#endif
+
+#define DEFAULT_DRXK_MPEG_LOCK_TIMEOUT 700
+#define DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT 500
+
+#ifndef DRXK_KI_RAGC_ATV
+#define DRXK_KI_RAGC_ATV 4
+#endif
+#ifndef DRXK_KI_IAGC_ATV
+#define DRXK_KI_IAGC_ATV 6
+#endif
+#ifndef DRXK_KI_DAGC_ATV
+#define DRXK_KI_DAGC_ATV 7
+#endif
+
+#ifndef DRXK_KI_RAGC_QAM
+#define DRXK_KI_RAGC_QAM 3
+#endif
+#ifndef DRXK_KI_IAGC_QAM
+#define DRXK_KI_IAGC_QAM 4
+#endif
+#ifndef DRXK_KI_DAGC_QAM
+#define DRXK_KI_DAGC_QAM 7
+#endif
+#ifndef DRXK_KI_RAGC_DVBT
+#define DRXK_KI_RAGC_DVBT (IsA1WithPatchCode(state) ? 3 : 2)
+#endif
+#ifndef DRXK_KI_IAGC_DVBT
+#define DRXK_KI_IAGC_DVBT (IsA1WithPatchCode(state) ? 4 : 2)
+#endif
+#ifndef DRXK_KI_DAGC_DVBT
+#define DRXK_KI_DAGC_DVBT (IsA1WithPatchCode(state) ? 10 : 7)
+#endif
+
+#ifndef DRXK_AGC_DAC_OFFSET
+#define DRXK_AGC_DAC_OFFSET (0x800)
+#endif
+
+#ifndef DRXK_BANDWIDTH_8MHZ_IN_HZ
+#define DRXK_BANDWIDTH_8MHZ_IN_HZ (0x8B8249L)
+#endif
+
+#ifndef DRXK_BANDWIDTH_7MHZ_IN_HZ
+#define DRXK_BANDWIDTH_7MHZ_IN_HZ (0x7A1200L)
+#endif
+
+#ifndef DRXK_BANDWIDTH_6MHZ_IN_HZ
+#define DRXK_BANDWIDTH_6MHZ_IN_HZ (0x68A1B6L)
+#endif
+
+#ifndef DRXK_QAM_SYMBOLRATE_MAX
+#define DRXK_QAM_SYMBOLRATE_MAX (7233000)
+#endif
+
+#define DRXK_BL_ROM_OFFSET_TAPS_DVBT 56
+#define DRXK_BL_ROM_OFFSET_TAPS_ITU_A 64
+#define DRXK_BL_ROM_OFFSET_TAPS_ITU_C 0x5FE0
+#define DRXK_BL_ROM_OFFSET_TAPS_BG 24
+#define DRXK_BL_ROM_OFFSET_TAPS_DKILLP 32
+#define DRXK_BL_ROM_OFFSET_TAPS_NTSC 40
+#define DRXK_BL_ROM_OFFSET_TAPS_FM 48
+#define DRXK_BL_ROM_OFFSET_UCODE 0
+
+#define DRXK_BLC_TIMEOUT 100
+
+#define DRXK_BLCC_NR_ELEMENTS_TAPS 2
+#define DRXK_BLCC_NR_ELEMENTS_UCODE 6
+
+#define DRXK_BLDC_NR_ELEMENTS_TAPS 28
+
+#ifndef DRXK_OFDM_NE_NOTCH_WIDTH
+#define DRXK_OFDM_NE_NOTCH_WIDTH (4)
+#endif
+
+#define DRXK_QAM_SL_SIG_POWER_QAM16 (40960)
+#define DRXK_QAM_SL_SIG_POWER_QAM32 (20480)
+#define DRXK_QAM_SL_SIG_POWER_QAM64 (43008)
+#define DRXK_QAM_SL_SIG_POWER_QAM128 (20992)
+#define DRXK_QAM_SL_SIG_POWER_QAM256 (43520)
+
+static unsigned int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages");
+
+#define dprintk(level, fmt, arg...) do { \
+if (debug >= level) \
+ printk(KERN_DEBUG "drxk: %s" fmt, __func__, ## arg); \
+} while (0)
+
+
+static inline u32 MulDiv32(u32 a, u32 b, u32 c)
+{
+ u64 tmp64;
+
+ tmp64 = (u64) a * (u64) b;
+ do_div(tmp64, c);
+
+ return (u32) tmp64;
+}
+
+inline u32 Frac28a(u32 a, u32 c)
+{
+ int i = 0;
+ u32 Q1 = 0;
+ u32 R0 = 0;
+
+ R0 = (a % c) << 4; /* 32-28 == 4 shifts possible at max */
+ Q1 = a / c; /* integer part, only the 4 least significant bits
+ will be visible in the result */
+
+ /* division using radix 16, 7 nibbles in the result */
+ for (i = 0; i < 7; i++) {
+ Q1 = (Q1 << 4) | (R0 / c);
+ R0 = (R0 % c) << 4;
+ }
+ /* rounding */
+ if ((R0 >> 3) >= c)
+ Q1++;
+
+ return Q1;
+}
+
+static u32 Log10Times100(u32 x)
+{
+ static const u8 scale = 15;
+ static const u8 indexWidth = 5;
+ u8 i = 0;
+ u32 y = 0;
+ u32 d = 0;
+ u32 k = 0;
+ u32 r = 0;
+ /*
+ log2lut[n] = (1<<scale) * 200 * log2(1.0 + ((1.0/(1<<INDEXWIDTH)) * n))
+ 0 <= n < ((1<<INDEXWIDTH)+1)
+ */
+
+ static const u32 log2lut[] = {
+ 0, /* 0.000000 */
+ 290941, /* 290941.300628 */
+ 573196, /* 573196.476418 */
+ 847269, /* 847269.179851 */
+ 1113620, /* 1113620.489452 */
+ 1372674, /* 1372673.576986 */
+ 1624818, /* 1624817.752104 */
+ 1870412, /* 1870411.981536 */
+ 2109788, /* 2109787.962654 */
+ 2343253, /* 2343252.817465 */
+ 2571091, /* 2571091.461923 */
+ 2793569, /* 2793568.696416 */
+ 3010931, /* 3010931.055901 */
+ 3223408, /* 3223408.452106 */
+ 3431216, /* 3431215.635215 */
+ 3634553, /* 3634553.498355 */
+ 3833610, /* 3833610.244726 */
+ 4028562, /* 4028562.434393 */
+ 4219576, /* 4219575.925308 */
+ 4406807, /* 4406806.721144 */
+ 4590402, /* 4590401.736809 */
+ 4770499, /* 4770499.491025 */
+ 4947231, /* 4947230.734179 */
+ 5120719, /* 5120719.018555 */
+ 5291081, /* 5291081.217197 */
+ 5458428, /* 5458427.996830 */
+ 5622864, /* 5622864.249668 */
+ 5784489, /* 5784489.488298 */
+ 5943398, /* 5943398.207380 */
+ 6099680, /* 6099680.215452 */
+ 6253421, /* 6253420.939751 */
+ 6404702, /* 6404701.706649 */
+ 6553600, /* 6553600.000000 */
+ };
+
+
+ if (x == 0)
+ return 0;
+
+ /* Scale x (normalize) */
+ /* computing y in log(x/y) = log(x) - log(y) */
+ if ((x & ((0xffffffff) << (scale + 1))) == 0) {
+ for (k = scale; k > 0; k--) {
+ if (x & (((u32) 1) << scale))
+ break;
+ x <<= 1;
+ }
+ } else {
+ for (k = scale; k < 31; k++) {
+ if ((x & (((u32) (-1)) << (scale + 1))) == 0)
+ break;
+ x >>= 1;
+ }
+ }
+ /*
+ Now x has binary point between bit[scale] and bit[scale-1]
+ and 1.0 <= x < 2.0 */
+
+ /* correction for divison: log(x) = log(x/y)+log(y) */
+ y = k * ((((u32) 1) << scale) * 200);
+
+ /* remove integer part */
+ x &= ((((u32) 1) << scale) - 1);
+ /* get index */
+ i = (u8) (x >> (scale - indexWidth));
+ /* compute delta (x - a) */
+ d = x & ((((u32) 1) << (scale - indexWidth)) - 1);
+ /* compute log, multiplication (d* (..)) must be within range ! */
+ y += log2lut[i] +
+ ((d * (log2lut[i + 1] - log2lut[i])) >> (scale - indexWidth));
+ /* Conver to log10() */
+ y /= 108853; /* (log2(10) << scale) */
+ r = (y >> 1);
+ /* rounding */
+ if (y & ((u32) 1))
+ r++;
+ return r;
+}
+
+/****************************************************************************/
+/* I2C **********************************************************************/
+/****************************************************************************/
+
+static int i2c_read1(struct i2c_adapter *adapter, u8 adr, u8 *val)
+{
+ struct i2c_msg msgs[1] = { {.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1}
+ };
+
+ return i2c_transfer(adapter, msgs, 1);
+}
+
+static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
+{
+ int status;
+ struct i2c_msg msg = {
+ .addr = adr, .flags = 0, .buf = data, .len = len };
+
+ dprintk(3, ":");
+ if (debug > 2) {
+ int i;
+ for (i = 0; i < len; i++)
+ printk(KERN_CONT " %02x", data[i]);
+ printk(KERN_CONT "\n");
+ }
+ status = i2c_transfer(adap, &msg, 1);
+ if (status >= 0 && status != 1)
+ status = -EIO;
+
+ if (status < 0)
+ printk(KERN_ERR "drxk: i2c write error at addr 0x%02x\n", adr);
+
+ return status;
+}
+
+static int i2c_read(struct i2c_adapter *adap,
+ u8 adr, u8 *msg, int len, u8 *answ, int alen)
+{
+ int status;
+ struct i2c_msg msgs[2] = {
+ {.addr = adr, .flags = 0,
+ .buf = msg, .len = len},
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = answ, .len = alen}
+ };
+
+ status = i2c_transfer(adap, msgs, 2);
+ if (status != 2) {
+ if (debug > 2)
+ printk(KERN_CONT ": ERROR!\n");
+ if (status >= 0)
+ status = -EIO;
+
+ printk(KERN_ERR "drxk: i2c read error at addr 0x%02x\n", adr);
+ return status;
+ }
+ if (debug > 2) {
+ int i;
+ dprintk(2, ": read from ");
+ for (i = 0; i < len; i++)
+ printk(KERN_CONT " %02x", msg[i]);
+ printk(KERN_CONT "Value = ");
+ for (i = 0; i < alen; i++)
+ printk(KERN_CONT " %02x", answ[i]);
+ printk(KERN_CONT "\n");
+ }
+ return 0;
+}
+
+static int read16_flags(struct drxk_state *state, u32 reg, u16 *data, u8 flags)
+{
+ int status;
+ u8 adr = state->demod_address, mm1[4], mm2[2], len;
+
+ if (state->single_master)
+ flags |= 0xC0;
+
+ if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
+ mm1[0] = (((reg << 1) & 0xFF) | 0x01);
+ mm1[1] = ((reg >> 16) & 0xFF);
+ mm1[2] = ((reg >> 24) & 0xFF) | flags;
+ mm1[3] = ((reg >> 7) & 0xFF);
+ len = 4;
+ } else {
+ mm1[0] = ((reg << 1) & 0xFF);
+ mm1[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
+ len = 2;
+ }
+ dprintk(2, "(0x%08x, 0x%02x)\n", reg, flags);
+ status = i2c_read(state->i2c, adr, mm1, len, mm2, 2);
+ if (status < 0)
+ return status;
+ if (data)
+ *data = mm2[0] | (mm2[1] << 8);
+
+ return 0;
+}
+
+static int read16(struct drxk_state *state, u32 reg, u16 *data)
+{
+ return read16_flags(state, reg, data, 0);
+}
+
+static int read32_flags(struct drxk_state *state, u32 reg, u32 *data, u8 flags)
+{
+ int status;
+ u8 adr = state->demod_address, mm1[4], mm2[4], len;
+
+ if (state->single_master)
+ flags |= 0xC0;
+
+ if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
+ mm1[0] = (((reg << 1) & 0xFF) | 0x01);
+ mm1[1] = ((reg >> 16) & 0xFF);
+ mm1[2] = ((reg >> 24) & 0xFF) | flags;
+ mm1[3] = ((reg >> 7) & 0xFF);
+ len = 4;
+ } else {
+ mm1[0] = ((reg << 1) & 0xFF);
+ mm1[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
+ len = 2;
+ }
+ dprintk(2, "(0x%08x, 0x%02x)\n", reg, flags);
+ status = i2c_read(state->i2c, adr, mm1, len, mm2, 4);
+ if (status < 0)
+ return status;
+ if (data)
+ *data = mm2[0] | (mm2[1] << 8) |
+ (mm2[2] << 16) | (mm2[3] << 24);
+
+ return 0;
+}
+
+static int read32(struct drxk_state *state, u32 reg, u32 *data)
+{
+ return read32_flags(state, reg, data, 0);
+}
+
+static int write16_flags(struct drxk_state *state, u32 reg, u16 data, u8 flags)
+{
+ u8 adr = state->demod_address, mm[6], len;
+
+ if (state->single_master)
+ flags |= 0xC0;
+ if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
+ mm[0] = (((reg << 1) & 0xFF) | 0x01);
+ mm[1] = ((reg >> 16) & 0xFF);
+ mm[2] = ((reg >> 24) & 0xFF) | flags;
+ mm[3] = ((reg >> 7) & 0xFF);
+ len = 4;
+ } else {
+ mm[0] = ((reg << 1) & 0xFF);
+ mm[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
+ len = 2;
+ }
+ mm[len] = data & 0xff;
+ mm[len + 1] = (data >> 8) & 0xff;
+
+ dprintk(2, "(0x%08x, 0x%04x, 0x%02x)\n", reg, data, flags);
+ return i2c_write(state->i2c, adr, mm, len + 2);
+}
+
+static int write16(struct drxk_state *state, u32 reg, u16 data)
+{
+ return write16_flags(state, reg, data, 0);
+}
+
+static int write32_flags(struct drxk_state *state, u32 reg, u32 data, u8 flags)
+{
+ u8 adr = state->demod_address, mm[8], len;
+
+ if (state->single_master)
+ flags |= 0xC0;
+ if (DRXDAP_FASI_LONG_FORMAT(reg) || (flags != 0)) {
+ mm[0] = (((reg << 1) & 0xFF) | 0x01);
+ mm[1] = ((reg >> 16) & 0xFF);
+ mm[2] = ((reg >> 24) & 0xFF) | flags;
+ mm[3] = ((reg >> 7) & 0xFF);
+ len = 4;
+ } else {
+ mm[0] = ((reg << 1) & 0xFF);
+ mm[1] = (((reg >> 16) & 0x0F) | ((reg >> 18) & 0xF0));
+ len = 2;
+ }
+ mm[len] = data & 0xff;
+ mm[len + 1] = (data >> 8) & 0xff;
+ mm[len + 2] = (data >> 16) & 0xff;
+ mm[len + 3] = (data >> 24) & 0xff;
+ dprintk(2, "(0x%08x, 0x%08x, 0x%02x)\n", reg, data, flags);
+
+ return i2c_write(state->i2c, adr, mm, len + 4);
+}
+
+static int write32(struct drxk_state *state, u32 reg, u32 data)
+{
+ return write32_flags(state, reg, data, 0);
+}
+
+static int write_block(struct drxk_state *state, u32 Address,
+ const int BlockSize, const u8 pBlock[])
+{
+ int status = 0, BlkSize = BlockSize;
+ u8 Flags = 0;
+
+ if (state->single_master)
+ Flags |= 0xC0;
+
+ while (BlkSize > 0) {
+ int Chunk = BlkSize > state->m_ChunkSize ?
+ state->m_ChunkSize : BlkSize;
+ u8 *AdrBuf = &state->Chunk[0];
+ u32 AdrLength = 0;
+
+ if (DRXDAP_FASI_LONG_FORMAT(Address) || (Flags != 0)) {
+ AdrBuf[0] = (((Address << 1) & 0xFF) | 0x01);
+ AdrBuf[1] = ((Address >> 16) & 0xFF);
+ AdrBuf[2] = ((Address >> 24) & 0xFF);
+ AdrBuf[3] = ((Address >> 7) & 0xFF);
+ AdrBuf[2] |= Flags;
+ AdrLength = 4;
+ if (Chunk == state->m_ChunkSize)
+ Chunk -= 2;
+ } else {
+ AdrBuf[0] = ((Address << 1) & 0xFF);
+ AdrBuf[1] = (((Address >> 16) & 0x0F) |
+ ((Address >> 18) & 0xF0));
+ AdrLength = 2;
+ }
+ memcpy(&state->Chunk[AdrLength], pBlock, Chunk);
+ dprintk(2, "(0x%08x, 0x%02x)\n", Address, Flags);
+ if (debug > 1) {
+ int i;
+ if (pBlock)
+ for (i = 0; i < Chunk; i++)
+ printk(KERN_CONT " %02x", pBlock[i]);
+ printk(KERN_CONT "\n");
+ }
+ status = i2c_write(state->i2c, state->demod_address,
+ &state->Chunk[0], Chunk + AdrLength);
+ if (status < 0) {
+ printk(KERN_ERR "drxk: %s: i2c write error at addr 0x%02x\n",
+ __func__, Address);
+ break;
+ }
+ pBlock += Chunk;
+ Address += (Chunk >> 1);
+ BlkSize -= Chunk;
+ }
+ return status;
+}
+
+#ifndef DRXK_MAX_RETRIES_POWERUP
+#define DRXK_MAX_RETRIES_POWERUP 20
+#endif
+
+int PowerUpDevice(struct drxk_state *state)
+{
+ int status;
+ u8 data = 0;
+ u16 retryCount = 0;
+
+ dprintk(1, "\n");
+
+ status = i2c_read1(state->i2c, state->demod_address, &data);
+ if (status < 0) {
+ do {
+ data = 0;
+ status = i2c_write(state->i2c, state->demod_address,
+ &data, 1);
+ msleep(10);
+ retryCount++;
+ if (status < 0)
+ continue;
+ status = i2c_read1(state->i2c, state->demod_address,
+ &data);
+ } while (status < 0 &&
+ (retryCount < DRXK_MAX_RETRIES_POWERUP));
+ if (status < 0 && retryCount >= DRXK_MAX_RETRIES_POWERUP)
+ goto error;
+ }
+
+ /* Make sure all clk domains are active */
+ status = write16(state, SIO_CC_PWD_MODE__A, SIO_CC_PWD_MODE_LEVEL_NONE);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
+ if (status < 0)
+ goto error;
+ /* Enable pll lock tests */
+ status = write16(state, SIO_CC_PLL_LOCK__A, 1);
+ if (status < 0)
+ goto error;
+
+ state->m_currentPowerMode = DRX_POWER_UP;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+
+static int init_state(struct drxk_state *state)
+{
+ /*
+ * FIXME: most (all?) of the values bellow should be moved into
+ * struct drxk_config, as they are probably board-specific
+ */
+ u32 ulVSBIfAgcMode = DRXK_AGC_CTRL_AUTO;
+ u32 ulVSBIfAgcOutputLevel = 0;
+ u32 ulVSBIfAgcMinLevel = 0;
+ u32 ulVSBIfAgcMaxLevel = 0x7FFF;
+ u32 ulVSBIfAgcSpeed = 3;
+
+ u32 ulVSBRfAgcMode = DRXK_AGC_CTRL_AUTO;
+ u32 ulVSBRfAgcOutputLevel = 0;
+ u32 ulVSBRfAgcMinLevel = 0;
+ u32 ulVSBRfAgcMaxLevel = 0x7FFF;
+ u32 ulVSBRfAgcSpeed = 3;
+ u32 ulVSBRfAgcTop = 9500;
+ u32 ulVSBRfAgcCutOffCurrent = 4000;
+
+ u32 ulATVIfAgcMode = DRXK_AGC_CTRL_AUTO;
+ u32 ulATVIfAgcOutputLevel = 0;
+ u32 ulATVIfAgcMinLevel = 0;
+ u32 ulATVIfAgcMaxLevel = 0;
+ u32 ulATVIfAgcSpeed = 3;
+
+ u32 ulATVRfAgcMode = DRXK_AGC_CTRL_OFF;
+ u32 ulATVRfAgcOutputLevel = 0;
+ u32 ulATVRfAgcMinLevel = 0;
+ u32 ulATVRfAgcMaxLevel = 0;
+ u32 ulATVRfAgcTop = 9500;
+ u32 ulATVRfAgcCutOffCurrent = 4000;
+ u32 ulATVRfAgcSpeed = 3;
+
+ u32 ulQual83 = DEFAULT_MER_83;
+ u32 ulQual93 = DEFAULT_MER_93;
+
+ u32 ulDVBTStaticTSClock = 1;
+ u32 ulDVBCStaticTSClock = 1;
+
+ u32 ulMpegLockTimeOut = DEFAULT_DRXK_MPEG_LOCK_TIMEOUT;
+ u32 ulDemodLockTimeOut = DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT;
+
+ /* io_pad_cfg register (8 bit reg.) MSB bit is 1 (default value) */
+ /* io_pad_cfg_mode output mode is drive always */
+ /* io_pad_cfg_drive is set to power 2 (23 mA) */
+ u32 ulGPIOCfg = 0x0113;
+ u32 ulSerialMode = 1;
+ u32 ulInvertTSClock = 0;
+ u32 ulTSDataStrength = DRXK_MPEG_SERIAL_OUTPUT_PIN_DRIVE_STRENGTH;
+ u32 ulTSClockkStrength = DRXK_MPEG_OUTPUT_CLK_DRIVE_STRENGTH;
+ u32 ulDVBTBitrate = 50000000;
+ u32 ulDVBCBitrate = DRXK_QAM_SYMBOLRATE_MAX * 8;
+
+ u32 ulInsertRSByte = 0;
+
+ u32 ulRfMirror = 1;
+ u32 ulPowerDown = 0;
+
+ dprintk(1, "\n");
+
+ state->m_hasLNA = false;
+ state->m_hasDVBT = false;
+ state->m_hasDVBC = false;
+ state->m_hasATV = false;
+ state->m_hasOOB = false;
+ state->m_hasAudio = false;
+
+ state->m_ChunkSize = 124;
+
+ state->m_oscClockFreq = 0;
+ state->m_smartAntInverted = false;
+ state->m_bPDownOpenBridge = false;
+
+ /* real system clock frequency in kHz */
+ state->m_sysClockFreq = 151875;
+ /* Timing div, 250ns/Psys */
+ /* Timing div, = (delay (nano seconds) * sysclk (kHz))/ 1000 */
+ state->m_HICfgTimingDiv = ((state->m_sysClockFreq / 1000) *
+ HI_I2C_DELAY) / 1000;
+ /* Clipping */
+ if (state->m_HICfgTimingDiv > SIO_HI_RA_RAM_PAR_2_CFG_DIV__M)
+ state->m_HICfgTimingDiv = SIO_HI_RA_RAM_PAR_2_CFG_DIV__M;
+ state->m_HICfgWakeUpKey = (state->demod_address << 1);
+ /* port/bridge/power down ctrl */
+ state->m_HICfgCtrl = SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE;
+
+ state->m_bPowerDown = (ulPowerDown != 0);
+
+ state->m_DRXK_A1_PATCH_CODE = false;
+ state->m_DRXK_A1_ROM_CODE = false;
+ state->m_DRXK_A2_ROM_CODE = false;
+ state->m_DRXK_A3_ROM_CODE = false;
+ state->m_DRXK_A2_PATCH_CODE = false;
+ state->m_DRXK_A3_PATCH_CODE = false;
+
+ /* Init AGC and PGA parameters */
+ /* VSB IF */
+ state->m_vsbIfAgcCfg.ctrlMode = (ulVSBIfAgcMode);
+ state->m_vsbIfAgcCfg.outputLevel = (ulVSBIfAgcOutputLevel);
+ state->m_vsbIfAgcCfg.minOutputLevel = (ulVSBIfAgcMinLevel);
+ state->m_vsbIfAgcCfg.maxOutputLevel = (ulVSBIfAgcMaxLevel);
+ state->m_vsbIfAgcCfg.speed = (ulVSBIfAgcSpeed);
+ state->m_vsbPgaCfg = 140;
+
+ /* VSB RF */
+ state->m_vsbRfAgcCfg.ctrlMode = (ulVSBRfAgcMode);
+ state->m_vsbRfAgcCfg.outputLevel = (ulVSBRfAgcOutputLevel);
+ state->m_vsbRfAgcCfg.minOutputLevel = (ulVSBRfAgcMinLevel);
+ state->m_vsbRfAgcCfg.maxOutputLevel = (ulVSBRfAgcMaxLevel);
+ state->m_vsbRfAgcCfg.speed = (ulVSBRfAgcSpeed);
+ state->m_vsbRfAgcCfg.top = (ulVSBRfAgcTop);
+ state->m_vsbRfAgcCfg.cutOffCurrent = (ulVSBRfAgcCutOffCurrent);
+ state->m_vsbPreSawCfg.reference = 0x07;
+ state->m_vsbPreSawCfg.usePreSaw = true;
+
+ state->m_Quality83percent = DEFAULT_MER_83;
+ state->m_Quality93percent = DEFAULT_MER_93;
+ if (ulQual93 <= 500 && ulQual83 < ulQual93) {
+ state->m_Quality83percent = ulQual83;
+ state->m_Quality93percent = ulQual93;
+ }
+
+ /* ATV IF */
+ state->m_atvIfAgcCfg.ctrlMode = (ulATVIfAgcMode);
+ state->m_atvIfAgcCfg.outputLevel = (ulATVIfAgcOutputLevel);
+ state->m_atvIfAgcCfg.minOutputLevel = (ulATVIfAgcMinLevel);
+ state->m_atvIfAgcCfg.maxOutputLevel = (ulATVIfAgcMaxLevel);
+ state->m_atvIfAgcCfg.speed = (ulATVIfAgcSpeed);
+
+ /* ATV RF */
+ state->m_atvRfAgcCfg.ctrlMode = (ulATVRfAgcMode);
+ state->m_atvRfAgcCfg.outputLevel = (ulATVRfAgcOutputLevel);
+ state->m_atvRfAgcCfg.minOutputLevel = (ulATVRfAgcMinLevel);
+ state->m_atvRfAgcCfg.maxOutputLevel = (ulATVRfAgcMaxLevel);
+ state->m_atvRfAgcCfg.speed = (ulATVRfAgcSpeed);
+ state->m_atvRfAgcCfg.top = (ulATVRfAgcTop);
+ state->m_atvRfAgcCfg.cutOffCurrent = (ulATVRfAgcCutOffCurrent);
+ state->m_atvPreSawCfg.reference = 0x04;
+ state->m_atvPreSawCfg.usePreSaw = true;
+
+
+ /* DVBT RF */
+ state->m_dvbtRfAgcCfg.ctrlMode = DRXK_AGC_CTRL_OFF;
+ state->m_dvbtRfAgcCfg.outputLevel = 0;
+ state->m_dvbtRfAgcCfg.minOutputLevel = 0;
+ state->m_dvbtRfAgcCfg.maxOutputLevel = 0xFFFF;
+ state->m_dvbtRfAgcCfg.top = 0x2100;
+ state->m_dvbtRfAgcCfg.cutOffCurrent = 4000;
+ state->m_dvbtRfAgcCfg.speed = 1;
+
+
+ /* DVBT IF */
+ state->m_dvbtIfAgcCfg.ctrlMode = DRXK_AGC_CTRL_AUTO;
+ state->m_dvbtIfAgcCfg.outputLevel = 0;
+ state->m_dvbtIfAgcCfg.minOutputLevel = 0;
+ state->m_dvbtIfAgcCfg.maxOutputLevel = 9000;
+ state->m_dvbtIfAgcCfg.top = 13424;
+ state->m_dvbtIfAgcCfg.cutOffCurrent = 0;
+ state->m_dvbtIfAgcCfg.speed = 3;
+ state->m_dvbtIfAgcCfg.FastClipCtrlDelay = 30;
+ state->m_dvbtIfAgcCfg.IngainTgtMax = 30000;
+ /* state->m_dvbtPgaCfg = 140; */
+
+ state->m_dvbtPreSawCfg.reference = 4;
+ state->m_dvbtPreSawCfg.usePreSaw = false;
+
+ /* QAM RF */
+ state->m_qamRfAgcCfg.ctrlMode = DRXK_AGC_CTRL_OFF;
+ state->m_qamRfAgcCfg.outputLevel = 0;
+ state->m_qamRfAgcCfg.minOutputLevel = 6023;
+ state->m_qamRfAgcCfg.maxOutputLevel = 27000;
+ state->m_qamRfAgcCfg.top = 0x2380;
+ state->m_qamRfAgcCfg.cutOffCurrent = 4000;
+ state->m_qamRfAgcCfg.speed = 3;
+
+ /* QAM IF */
+ state->m_qamIfAgcCfg.ctrlMode = DRXK_AGC_CTRL_AUTO;
+ state->m_qamIfAgcCfg.outputLevel = 0;
+ state->m_qamIfAgcCfg.minOutputLevel = 0;
+ state->m_qamIfAgcCfg.maxOutputLevel = 9000;
+ state->m_qamIfAgcCfg.top = 0x0511;
+ state->m_qamIfAgcCfg.cutOffCurrent = 0;
+ state->m_qamIfAgcCfg.speed = 3;
+ state->m_qamIfAgcCfg.IngainTgtMax = 5119;
+ state->m_qamIfAgcCfg.FastClipCtrlDelay = 50;
+
+ state->m_qamPgaCfg = 140;
+ state->m_qamPreSawCfg.reference = 4;
+ state->m_qamPreSawCfg.usePreSaw = false;
+
+ state->m_OperationMode = OM_NONE;
+ state->m_DrxkState = DRXK_UNINITIALIZED;
+
+ /* MPEG output configuration */
+ state->m_enableMPEGOutput = true; /* If TRUE; enable MPEG ouput */
+ state->m_insertRSByte = false; /* If TRUE; insert RS byte */
+ state->m_enableParallel = true; /* If TRUE;
+ parallel out otherwise serial */
+ state->m_invertDATA = false; /* If TRUE; invert DATA signals */
+ state->m_invertERR = false; /* If TRUE; invert ERR signal */
+ state->m_invertSTR = false; /* If TRUE; invert STR signals */
+ state->m_invertVAL = false; /* If TRUE; invert VAL signals */
+ state->m_invertCLK = (ulInvertTSClock != 0); /* If TRUE; invert CLK signals */
+ state->m_DVBTStaticCLK = (ulDVBTStaticTSClock != 0);
+ state->m_DVBCStaticCLK = (ulDVBCStaticTSClock != 0);
+ /* If TRUE; static MPEG clockrate will be used;
+ otherwise clockrate will adapt to the bitrate of the TS */
+
+ state->m_DVBTBitrate = ulDVBTBitrate;
+ state->m_DVBCBitrate = ulDVBCBitrate;
+
+ state->m_TSDataStrength = (ulTSDataStrength & 0x07);
+ state->m_TSClockkStrength = (ulTSClockkStrength & 0x07);
+
+ /* Maximum bitrate in b/s in case static clockrate is selected */
+ state->m_mpegTsStaticBitrate = 19392658;
+ state->m_disableTEIhandling = false;
+
+ if (ulInsertRSByte)
+ state->m_insertRSByte = true;
+
+ state->m_MpegLockTimeOut = DEFAULT_DRXK_MPEG_LOCK_TIMEOUT;
+ if (ulMpegLockTimeOut < 10000)
+ state->m_MpegLockTimeOut = ulMpegLockTimeOut;
+ state->m_DemodLockTimeOut = DEFAULT_DRXK_DEMOD_LOCK_TIMEOUT;
+ if (ulDemodLockTimeOut < 10000)
+ state->m_DemodLockTimeOut = ulDemodLockTimeOut;
+
+ /* QAM defaults */
+ state->m_Constellation = DRX_CONSTELLATION_AUTO;
+ state->m_qamInterleaveMode = DRXK_QAM_I12_J17;
+ state->m_fecRsPlen = 204 * 8; /* fecRsPlen annex A */
+ state->m_fecRsPrescale = 1;
+
+ state->m_sqiSpeed = DRXK_DVBT_SQI_SPEED_MEDIUM;
+ state->m_agcFastClipCtrlDelay = 0;
+
+ state->m_GPIOCfg = (ulGPIOCfg);
+
+ state->m_bPowerDown = false;
+ state->m_currentPowerMode = DRX_POWER_DOWN;
+
+ state->m_enableParallel = (ulSerialMode == 0);
+
+ state->m_rfmirror = (ulRfMirror == 0);
+ state->m_IfAgcPol = false;
+ return 0;
+}
+
+static int DRXX_Open(struct drxk_state *state)
+{
+ int status = 0;
+ u32 jtag = 0;
+ u16 bid = 0;
+ u16 key = 0;
+
+ dprintk(1, "\n");
+ /* stop lock indicator process */
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+ /* Check device id */
+ status = read16(state, SIO_TOP_COMM_KEY__A, &key);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
+ if (status < 0)
+ goto error;
+ status = read32(state, SIO_TOP_JTAGID_LO__A, &jtag);
+ if (status < 0)
+ goto error;
+ status = read16(state, SIO_PDR_UIO_IN_HI__A, &bid);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_TOP_COMM_KEY__A, key);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int GetDeviceCapabilities(struct drxk_state *state)
+{
+ u16 sioPdrOhwCfg = 0;
+ u32 sioTopJtagidLo = 0;
+ int status;
+ const char *spin = "";
+
+ dprintk(1, "\n");
+
+ /* driver 0.9.0 */
+ /* stop lock indicator process */
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
+ if (status < 0)
+ goto error;
+ status = read16(state, SIO_PDR_OHW_CFG__A, &sioPdrOhwCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000);
+ if (status < 0)
+ goto error;
+
+ switch ((sioPdrOhwCfg & SIO_PDR_OHW_CFG_FREF_SEL__M)) {
+ case 0:
+ /* ignore (bypass ?) */
+ break;
+ case 1:
+ /* 27 MHz */
+ state->m_oscClockFreq = 27000;
+ break;
+ case 2:
+ /* 20.25 MHz */
+ state->m_oscClockFreq = 20250;
+ break;
+ case 3:
+ /* 4 MHz */
+ state->m_oscClockFreq = 20250;
+ break;
+ default:
+ printk(KERN_ERR "drxk: Clock Frequency is unkonwn\n");
+ return -EINVAL;
+ }
+ /*
+ Determine device capabilities
+ Based on pinning v14
+ */
+ status = read32(state, SIO_TOP_JTAGID_LO__A, &sioTopJtagidLo);
+ if (status < 0)
+ goto error;
+ /* driver 0.9.0 */
+ switch ((sioTopJtagidLo >> 29) & 0xF) {
+ case 0:
+ state->m_deviceSpin = DRXK_SPIN_A1;
+ spin = "A1";
+ break;
+ case 2:
+ state->m_deviceSpin = DRXK_SPIN_A2;
+ spin = "A2";
+ break;
+ case 3:
+ state->m_deviceSpin = DRXK_SPIN_A3;
+ spin = "A3";
+ break;
+ default:
+ state->m_deviceSpin = DRXK_SPIN_UNKNOWN;
+ status = -EINVAL;
+ printk(KERN_ERR "drxk: Spin unknown\n");
+ goto error2;
+ }
+ switch ((sioTopJtagidLo >> 12) & 0xFF) {
+ case 0x13:
+ /* typeId = DRX3913K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = false;
+ state->m_hasAudio = false;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = true;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = false;
+ state->m_hasGPIO1 = false;
+ state->m_hasIRQN = false;
+ break;
+ case 0x15:
+ /* typeId = DRX3915K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = false;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = false;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x16:
+ /* typeId = DRX3916K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = false;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = false;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x18:
+ /* typeId = DRX3918K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = true;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = false;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x21:
+ /* typeId = DRX3921K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = true;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = true;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x23:
+ /* typeId = DRX3923K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = true;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = true;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x25:
+ /* typeId = DRX3925K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = true;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = true;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ case 0x26:
+ /* typeId = DRX3926K_TYPE_ID */
+ state->m_hasLNA = false;
+ state->m_hasOOB = false;
+ state->m_hasATV = true;
+ state->m_hasAudio = false;
+ state->m_hasDVBT = true;
+ state->m_hasDVBC = true;
+ state->m_hasSAWSW = true;
+ state->m_hasGPIO2 = true;
+ state->m_hasGPIO1 = true;
+ state->m_hasIRQN = false;
+ break;
+ default:
+ printk(KERN_ERR "drxk: DeviceID 0x%02x not supported\n",
+ ((sioTopJtagidLo >> 12) & 0xFF));
+ status = -EINVAL;
+ goto error2;
+ }
+
+ printk(KERN_INFO
+ "drxk: detected a drx-39%02xk, spin %s, xtal %d.%03d MHz\n",
+ ((sioTopJtagidLo >> 12) & 0xFF), spin,
+ state->m_oscClockFreq / 1000,
+ state->m_oscClockFreq % 1000);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+error2:
+ return status;
+}
+
+static int HI_Command(struct drxk_state *state, u16 cmd, u16 *pResult)
+{
+ int status;
+ bool powerdown_cmd;
+
+ dprintk(1, "\n");
+
+ /* Write command */
+ status = write16(state, SIO_HI_RA_RAM_CMD__A, cmd);
+ if (status < 0)
+ goto error;
+ if (cmd == SIO_HI_RA_RAM_CMD_RESET)
+ msleep(1);
+
+ powerdown_cmd =
+ (bool) ((cmd == SIO_HI_RA_RAM_CMD_CONFIG) &&
+ ((state->m_HICfgCtrl) &
+ SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M) ==
+ SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ);
+ if (powerdown_cmd == false) {
+ /* Wait until command rdy */
+ u32 retryCount = 0;
+ u16 waitCmd;
+
+ do {
+ msleep(1);
+ retryCount += 1;
+ status = read16(state, SIO_HI_RA_RAM_CMD__A,
+ &waitCmd);
+ } while ((status < 0) && (retryCount < DRXK_MAX_RETRIES)
+ && (waitCmd != 0));
+ if (status < 0)
+ goto error;
+ status = read16(state, SIO_HI_RA_RAM_RES__A, pResult);
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int HI_CfgCommand(struct drxk_state *state)
+{
+ int status;
+
+ dprintk(1, "\n");
+
+ mutex_lock(&state->mutex);
+
+ status = write16(state, SIO_HI_RA_RAM_PAR_6__A, state->m_HICfgTimeout);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_HI_RA_RAM_PAR_5__A, state->m_HICfgCtrl);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_HI_RA_RAM_PAR_4__A, state->m_HICfgWakeUpKey);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_HI_RA_RAM_PAR_3__A, state->m_HICfgBridgeDelay);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_HI_RA_RAM_PAR_2__A, state->m_HICfgTimingDiv);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_HI_RA_RAM_PAR_1__A, SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY);
+ if (status < 0)
+ goto error;
+ status = HI_Command(state, SIO_HI_RA_RAM_CMD_CONFIG, 0);
+ if (status < 0)
+ goto error;
+
+ state->m_HICfgCtrl &= ~SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ;
+error:
+ mutex_unlock(&state->mutex);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int InitHI(struct drxk_state *state)
+{
+ dprintk(1, "\n");
+
+ state->m_HICfgWakeUpKey = (state->demod_address << 1);
+ state->m_HICfgTimeout = 0x96FF;
+ /* port/bridge/power down ctrl */
+ state->m_HICfgCtrl = SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE;
+
+ return HI_CfgCommand(state);
+}
+
+static int MPEGTSConfigurePins(struct drxk_state *state, bool mpegEnable)
+{
+ int status = -1;
+ u16 sioPdrMclkCfg = 0;
+ u16 sioPdrMdxCfg = 0;
+
+ dprintk(1, "\n");
+
+ /* stop lock indicator process */
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+
+ /* MPEG TS pad configuration */
+ status = write16(state, SIO_TOP_COMM_KEY__A, 0xFABA);
+ if (status < 0)
+ goto error;
+
+ if (mpegEnable == false) {
+ /* Set MPEG TS pads to inputmode */
+ status = write16(state, SIO_PDR_MSTRT_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MERR_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MCLK_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MVAL_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD0_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD1_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD2_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD3_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD4_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD5_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD6_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD7_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ } else {
+ /* Enable MPEG output */
+ sioPdrMdxCfg =
+ ((state->m_TSDataStrength <<
+ SIO_PDR_MD0_CFG_DRIVE__B) | 0x0003);
+ sioPdrMclkCfg = ((state->m_TSClockkStrength <<
+ SIO_PDR_MCLK_CFG_DRIVE__B) |
+ 0x0003);
+
+ status = write16(state, SIO_PDR_MSTRT_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MERR_CFG__A, 0x0000); /* Disable */
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MVAL_CFG__A, 0x0000); /* Disable */
+ if (status < 0)
+ goto error;
+ if (state->m_enableParallel == true) {
+ /* paralel -> enable MD1 to MD7 */
+ status = write16(state, SIO_PDR_MD1_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD2_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD3_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD4_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD5_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD6_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD7_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ } else {
+ sioPdrMdxCfg = ((state->m_TSDataStrength <<
+ SIO_PDR_MD0_CFG_DRIVE__B)
+ | 0x0003);
+ /* serial -> disable MD1 to MD7 */
+ status = write16(state, SIO_PDR_MD1_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD2_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD3_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD4_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD5_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD6_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD7_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ }
+ status = write16(state, SIO_PDR_MCLK_CFG__A, sioPdrMclkCfg);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_PDR_MD0_CFG__A, sioPdrMdxCfg);
+ if (status < 0)
+ goto error;
+ }
+ /* Enable MB output over MPEG pads and ctl input */
+ status = write16(state, SIO_PDR_MON_CFG__A, 0x0000);
+ if (status < 0)
+ goto error;
+ /* Write nomagic word to enable pdr reg write */
+ status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int MPEGTSDisable(struct drxk_state *state)
+{
+ dprintk(1, "\n");
+
+ return MPEGTSConfigurePins(state, false);
+}
+
+static int BLChainCmd(struct drxk_state *state,
+ u16 romOffset, u16 nrOfElements, u32 timeOut)
+{
+ u16 blStatus = 0;
+ int status;
+ unsigned long end;
+
+ dprintk(1, "\n");
+ mutex_lock(&state->mutex);
+ status = write16(state, SIO_BL_MODE__A, SIO_BL_MODE_CHAIN);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_CHAIN_ADDR__A, romOffset);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_CHAIN_LEN__A, nrOfElements);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_ENABLE__A, SIO_BL_ENABLE_ON);
+ if (status < 0)
+ goto error;
+
+ end = jiffies + msecs_to_jiffies(timeOut);
+ do {
+ msleep(1);
+ status = read16(state, SIO_BL_STATUS__A, &blStatus);
+ if (status < 0)
+ goto error;
+ } while ((blStatus == 0x1) &&
+ ((time_is_after_jiffies(end))));
+
+ if (blStatus == 0x1) {
+ printk(KERN_ERR "drxk: SIO not ready\n");
+ status = -EINVAL;
+ goto error2;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+error2:
+ mutex_unlock(&state->mutex);
+ return status;
+}
+
+
+static int DownloadMicrocode(struct drxk_state *state,
+ const u8 pMCImage[], u32 Length)
+{
+ const u8 *pSrc = pMCImage;
+ u16 Flags;
+ u16 Drain;
+ u32 Address;
+ u16 nBlocks;
+ u16 BlockSize;
+ u16 BlockCRC;
+ u32 offset = 0;
+ u32 i;
+ int status = 0;
+
+ dprintk(1, "\n");
+
+ /* down the drain (we don care about MAGIC_WORD) */
+ Drain = (pSrc[0] << 8) | pSrc[1];
+ pSrc += sizeof(u16);
+ offset += sizeof(u16);
+ nBlocks = (pSrc[0] << 8) | pSrc[1];
+ pSrc += sizeof(u16);
+ offset += sizeof(u16);
+
+ for (i = 0; i < nBlocks; i += 1) {
+ Address = (pSrc[0] << 24) | (pSrc[1] << 16) |
+ (pSrc[2] << 8) | pSrc[3];
+ pSrc += sizeof(u32);
+ offset += sizeof(u32);
+
+ BlockSize = ((pSrc[0] << 8) | pSrc[1]) * sizeof(u16);
+ pSrc += sizeof(u16);
+ offset += sizeof(u16);
+
+ Flags = (pSrc[0] << 8) | pSrc[1];
+ pSrc += sizeof(u16);
+ offset += sizeof(u16);
+
+ BlockCRC = (pSrc[0] << 8) | pSrc[1];
+ pSrc += sizeof(u16);
+ offset += sizeof(u16);
+
+ if (offset + BlockSize > Length) {
+ printk(KERN_ERR "drxk: Firmware is corrupted.\n");
+ return -EINVAL;
+ }
+
+ status = write_block(state, Address, BlockSize, pSrc);
+ if (status < 0) {
+ printk(KERN_ERR "drxk: Error %d while loading firmware\n", status);
+ break;
+ }
+ pSrc += BlockSize;
+ offset += BlockSize;
+ }
+ return status;
+}
+
+static int DVBTEnableOFDMTokenRing(struct drxk_state *state, bool enable)
+{
+ int status;
+ u16 data = 0;
+ u16 desiredCtrl = SIO_OFDM_SH_OFDM_RING_ENABLE_ON;
+ u16 desiredStatus = SIO_OFDM_SH_OFDM_RING_STATUS_ENABLED;
+ unsigned long end;
+
+ dprintk(1, "\n");
+
+ if (enable == false) {
+ desiredCtrl = SIO_OFDM_SH_OFDM_RING_ENABLE_OFF;
+ desiredStatus = SIO_OFDM_SH_OFDM_RING_STATUS_DOWN;
+ }
+
+ status = read16(state, SIO_OFDM_SH_OFDM_RING_STATUS__A, &data);
+ if (status >= 0 && data == desiredStatus) {
+ /* tokenring already has correct status */
+ return status;
+ }
+ /* Disable/enable dvbt tokenring bridge */
+ status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, desiredCtrl);
+
+ end = jiffies + msecs_to_jiffies(DRXK_OFDM_TR_SHUTDOWN_TIMEOUT);
+ do {
+ status = read16(state, SIO_OFDM_SH_OFDM_RING_STATUS__A, &data);
+ if ((status >= 0 && data == desiredStatus) || time_is_after_jiffies(end))
+ break;
+ msleep(1);
+ } while (1);
+ if (data != desiredStatus) {
+ printk(KERN_ERR "drxk: SIO not ready\n");
+ return -EINVAL;
+ }
+ return status;
+}
+
+static int MPEGTSStop(struct drxk_state *state)
+{
+ int status = 0;
+ u16 fecOcSncMode = 0;
+ u16 fecOcIprMode = 0;
+
+ dprintk(1, "\n");
+
+ /* Gracefull shutdown (byte boundaries) */
+ status = read16(state, FEC_OC_SNC_MODE__A, &fecOcSncMode);
+ if (status < 0)
+ goto error;
+ fecOcSncMode |= FEC_OC_SNC_MODE_SHUTDOWN__M;
+ status = write16(state, FEC_OC_SNC_MODE__A, fecOcSncMode);
+ if (status < 0)
+ goto error;
+
+ /* Suppress MCLK during absence of data */
+ status = read16(state, FEC_OC_IPR_MODE__A, &fecOcIprMode);
+ if (status < 0)
+ goto error;
+ fecOcIprMode |= FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__M;
+ status = write16(state, FEC_OC_IPR_MODE__A, fecOcIprMode);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int scu_command(struct drxk_state *state,
+ u16 cmd, u8 parameterLen,
+ u16 *parameter, u8 resultLen, u16 *result)
+{
+#if (SCU_RAM_PARAM_0__A - SCU_RAM_PARAM_15__A) != 15
+#error DRXK register mapping no longer compatible with this routine!
+#endif
+ u16 curCmd = 0;
+ int status = -EINVAL;
+ unsigned long end;
+ u8 buffer[34];
+ int cnt = 0, ii;
+ const char *p;
+ char errname[30];
+
+ dprintk(1, "\n");
+
+ if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) ||
+ ((resultLen > 0) && (result == NULL)))
+ goto error;
+
+ mutex_lock(&state->mutex);
+
+ /* assume that the command register is ready
+ since it is checked afterwards */
+ for (ii = parameterLen - 1; ii >= 0; ii -= 1) {
+ buffer[cnt++] = (parameter[ii] & 0xFF);
+ buffer[cnt++] = ((parameter[ii] >> 8) & 0xFF);
+ }
+ buffer[cnt++] = (cmd & 0xFF);
+ buffer[cnt++] = ((cmd >> 8) & 0xFF);
+
+ write_block(state, SCU_RAM_PARAM_0__A -
+ (parameterLen - 1), cnt, buffer);
+ /* Wait until SCU has processed command */
+ end = jiffies + msecs_to_jiffies(DRXK_MAX_WAITTIME);
+ do {
+ msleep(1);
+ status = read16(state, SCU_RAM_COMMAND__A, &curCmd);
+ if (status < 0)
+ goto error;
+ } while (!(curCmd == DRX_SCU_READY) && (time_is_after_jiffies(end)));
+ if (curCmd != DRX_SCU_READY) {
+ printk(KERN_ERR "drxk: SCU not ready\n");
+ status = -EIO;
+ goto error2;
+ }
+ /* read results */
+ if ((resultLen > 0) && (result != NULL)) {
+ s16 err;
+ int ii;
+
+ for (ii = resultLen - 1; ii >= 0; ii -= 1) {
+ status = read16(state, SCU_RAM_PARAM_0__A - ii, &result[ii]);
+ if (status < 0)
+ goto error;
+ }
+
+ /* Check if an error was reported by SCU */
+ err = (s16)result[0];
+ if (err >= 0)
+ goto error;
+
+ /* check for the known error codes */
+ switch (err) {
+ case SCU_RESULT_UNKCMD:
+ p = "SCU_RESULT_UNKCMD";
+ break;
+ case SCU_RESULT_UNKSTD:
+ p = "SCU_RESULT_UNKSTD";
+ break;
+ case SCU_RESULT_SIZE:
+ p = "SCU_RESULT_SIZE";
+ break;
+ case SCU_RESULT_INVPAR:
+ p = "SCU_RESULT_INVPAR";
+ break;
+ default: /* Other negative values are errors */
+ sprintf(errname, "ERROR: %d\n", err);
+ p = errname;
+ }
+ printk(KERN_ERR "drxk: %s while sending cmd 0x%04x with params:", p, cmd);
+ print_hex_dump_bytes("drxk: ", DUMP_PREFIX_NONE, buffer, cnt);
+ status = -EINVAL;
+ goto error2;
+ }
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+error2:
+ mutex_unlock(&state->mutex);
+ return status;
+}
+
+static int SetIqmAf(struct drxk_state *state, bool active)
+{
+ u16 data = 0;
+ int status;
+
+ dprintk(1, "\n");
+
+ /* Configure IQM */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+
+ if (!active) {
+ data |= (IQM_AF_STDBY_STDBY_ADC_STANDBY
+ | IQM_AF_STDBY_STDBY_AMP_STANDBY
+ | IQM_AF_STDBY_STDBY_PD_STANDBY
+ | IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY
+ | IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY);
+ } else {
+ data &= ((~IQM_AF_STDBY_STDBY_ADC_STANDBY)
+ & (~IQM_AF_STDBY_STDBY_AMP_STANDBY)
+ & (~IQM_AF_STDBY_STDBY_PD_STANDBY)
+ & (~IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY)
+ & (~IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY)
+ );
+ }
+ status = write16(state, IQM_AF_STDBY__A, data);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int CtrlPowerMode(struct drxk_state *state, enum DRXPowerMode *mode)
+{
+ int status = 0;
+ u16 sioCcPwdMode = 0;
+
+ dprintk(1, "\n");
+
+ /* Check arguments */
+ if (mode == NULL)
+ return -EINVAL;
+
+ switch (*mode) {
+ case DRX_POWER_UP:
+ sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_NONE;
+ break;
+ case DRXK_POWER_DOWN_OFDM:
+ sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_OFDM;
+ break;
+ case DRXK_POWER_DOWN_CORE:
+ sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_CLOCK;
+ break;
+ case DRXK_POWER_DOWN_PLL:
+ sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_PLL;
+ break;
+ case DRX_POWER_DOWN:
+ sioCcPwdMode = SIO_CC_PWD_MODE_LEVEL_OSC;
+ break;
+ default:
+ /* Unknow sleep mode */
+ return -EINVAL;
+ }
+
+ /* If already in requested power mode, do nothing */
+ if (state->m_currentPowerMode == *mode)
+ return 0;
+
+ /* For next steps make sure to start from DRX_POWER_UP mode */
+ if (state->m_currentPowerMode != DRX_POWER_UP) {
+ status = PowerUpDevice(state);
+ if (status < 0)
+ goto error;
+ status = DVBTEnableOFDMTokenRing(state, true);
+ if (status < 0)
+ goto error;
+ }
+
+ if (*mode == DRX_POWER_UP) {
+ /* Restore analog & pin configuartion */
+ } else {
+ /* Power down to requested mode */
+ /* Backup some register settings */
+ /* Set pins with possible pull-ups connected
+ to them in input mode */
+ /* Analog power down */
+ /* ADC power down */
+ /* Power down device */
+ /* stop all comm_exec */
+ /* Stop and power down previous standard */
+ switch (state->m_OperationMode) {
+ case OM_DVBT:
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = PowerDownDVBT(state, false);
+ if (status < 0)
+ goto error;
+ break;
+ case OM_QAM_ITU_A:
+ case OM_QAM_ITU_C:
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = PowerDownQAM(state);
+ if (status < 0)
+ goto error;
+ break;
+ default:
+ break;
+ }
+ status = DVBTEnableOFDMTokenRing(state, false);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_CC_PWD_MODE__A, sioCcPwdMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
+ if (status < 0)
+ goto error;
+
+ if (*mode != DRXK_POWER_DOWN_OFDM) {
+ state->m_HICfgCtrl |=
+ SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ;
+ status = HI_CfgCommand(state);
+ if (status < 0)
+ goto error;
+ }
+ }
+ state->m_currentPowerMode = *mode;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int PowerDownDVBT(struct drxk_state *state, bool setPowerMode)
+{
+ enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM;
+ u16 cmdResult = 0;
+ u16 data = 0;
+ int status;
+
+ dprintk(1, "\n");
+
+ status = read16(state, SCU_COMM_EXEC__A, &data);
+ if (status < 0)
+ goto error;
+ if (data == SCU_COMM_EXEC_ACTIVE) {
+ /* Send OFDM stop command */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+ /* Send OFDM reset command */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+ }
+
+ /* Reset datapath for OFDM, processors first */
+ status = write16(state, OFDM_SC_COMM_EXEC__A, OFDM_SC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_LC_COMM_EXEC__A, OFDM_LC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_STOP);
+ if (status < 0)
+ goto error;
+
+ /* powerdown AFE */
+ status = SetIqmAf(state, false);
+ if (status < 0)
+ goto error;
+
+ /* powerdown to OFDM mode */
+ if (setPowerMode) {
+ status = CtrlPowerMode(state, &powerMode);
+ if (status < 0)
+ goto error;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SetOperationMode(struct drxk_state *state,
+ enum OperationMode oMode)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+ /*
+ Stop and power down previous standard
+ TODO investigate total power down instead of partial
+ power down depending on "previous" standard.
+ */
+
+ /* disable HW lock indicator */
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+
+ /* Device is already at the required mode */
+ if (state->m_OperationMode == oMode)
+ return 0;
+
+ switch (state->m_OperationMode) {
+ /* OM_NONE was added for start up */
+ case OM_NONE:
+ break;
+ case OM_DVBT:
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = PowerDownDVBT(state, true);
+ if (status < 0)
+ goto error;
+ state->m_OperationMode = OM_NONE;
+ break;
+ case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_C:
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = PowerDownQAM(state);
+ if (status < 0)
+ goto error;
+ state->m_OperationMode = OM_NONE;
+ break;
+ case OM_QAM_ITU_B:
+ default:
+ status = -EINVAL;
+ goto error;
+ }
+
+ /*
+ Power up new standard
+ */
+ switch (oMode) {
+ case OM_DVBT:
+ state->m_OperationMode = oMode;
+ status = SetDVBTStandard(state, oMode);
+ if (status < 0)
+ goto error;
+ break;
+ case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_C:
+ state->m_OperationMode = oMode;
+ status = SetQAMStandard(state, oMode);
+ if (status < 0)
+ goto error;
+ break;
+ case OM_QAM_ITU_B:
+ default:
+ status = -EINVAL;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int Start(struct drxk_state *state, s32 offsetFreq,
+ s32 IntermediateFrequency)
+{
+ int status = -EINVAL;
+
+ u16 IFreqkHz;
+ s32 OffsetkHz = offsetFreq / 1000;
+
+ dprintk(1, "\n");
+ if (state->m_DrxkState != DRXK_STOPPED &&
+ state->m_DrxkState != DRXK_DTV_STARTED)
+ goto error;
+
+ state->m_bMirrorFreqSpect = (state->param.inversion == INVERSION_ON);
+
+ if (IntermediateFrequency < 0) {
+ state->m_bMirrorFreqSpect = !state->m_bMirrorFreqSpect;
+ IntermediateFrequency = -IntermediateFrequency;
+ }
+
+ switch (state->m_OperationMode) {
+ case OM_QAM_ITU_A:
+ case OM_QAM_ITU_C:
+ IFreqkHz = (IntermediateFrequency / 1000);
+ status = SetQAM(state, IFreqkHz, OffsetkHz);
+ if (status < 0)
+ goto error;
+ state->m_DrxkState = DRXK_DTV_STARTED;
+ break;
+ case OM_DVBT:
+ IFreqkHz = (IntermediateFrequency / 1000);
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = SetDVBT(state, IFreqkHz, OffsetkHz);
+ if (status < 0)
+ goto error;
+ status = DVBTStart(state);
+ if (status < 0)
+ goto error;
+ state->m_DrxkState = DRXK_DTV_STARTED;
+ break;
+ default:
+ break;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int ShutDown(struct drxk_state *state)
+{
+ dprintk(1, "\n");
+
+ MPEGTSStop(state);
+ return 0;
+}
+
+static int GetLockStatus(struct drxk_state *state, u32 *pLockStatus,
+ u32 Time)
+{
+ int status = -EINVAL;
+
+ dprintk(1, "\n");
+
+ if (pLockStatus == NULL)
+ goto error;
+
+ *pLockStatus = NOT_LOCKED;
+
+ /* define the SCU command code */
+ switch (state->m_OperationMode) {
+ case OM_QAM_ITU_A:
+ case OM_QAM_ITU_B:
+ case OM_QAM_ITU_C:
+ status = GetQAMLockStatus(state, pLockStatus);
+ break;
+ case OM_DVBT:
+ status = GetDVBTLockStatus(state, pLockStatus);
+ break;
+ default:
+ break;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int MPEGTSStart(struct drxk_state *state)
+{
+ int status;
+
+ u16 fecOcSncMode = 0;
+
+ /* Allow OC to sync again */
+ status = read16(state, FEC_OC_SNC_MODE__A, &fecOcSncMode);
+ if (status < 0)
+ goto error;
+ fecOcSncMode &= ~FEC_OC_SNC_MODE_SHUTDOWN__M;
+ status = write16(state, FEC_OC_SNC_MODE__A, fecOcSncMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_SNC_UNLOCK__A, 1);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int MPEGTSDtoInit(struct drxk_state *state)
+{
+ int status;
+
+ dprintk(1, "\n");
+
+ /* Rate integration settings */
+ status = write16(state, FEC_OC_RCN_CTL_STEP_LO__A, 0x0000);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_RCN_CTL_STEP_HI__A, 0x000C);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_RCN_GAIN__A, 0x000A);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_AVR_PARM_A__A, 0x0008);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_AVR_PARM_B__A, 0x0006);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_TMD_HI_MARGIN__A, 0x0680);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_TMD_LO_MARGIN__A, 0x0080);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_TMD_COUNT__A, 0x03F4);
+ if (status < 0)
+ goto error;
+
+ /* Additional configuration */
+ status = write16(state, FEC_OC_OCR_INVERT__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_SNC_LWM__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_SNC_HWM__A, 12);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int MPEGTSDtoSetup(struct drxk_state *state,
+ enum OperationMode oMode)
+{
+ int status;
+
+ u16 fecOcRegMode = 0; /* FEC_OC_MODE register value */
+ u16 fecOcRegIprMode = 0; /* FEC_OC_IPR_MODE register value */
+ u16 fecOcDtoMode = 0; /* FEC_OC_IPR_INVERT register value */
+ u16 fecOcFctMode = 0; /* FEC_OC_IPR_INVERT register value */
+ u16 fecOcDtoPeriod = 2; /* FEC_OC_IPR_INVERT register value */
+ u16 fecOcDtoBurstLen = 188; /* FEC_OC_IPR_INVERT register value */
+ u32 fecOcRcnCtlRate = 0; /* FEC_OC_IPR_INVERT register value */
+ u16 fecOcTmdMode = 0;
+ u16 fecOcTmdIntUpdRate = 0;
+ u32 maxBitRate = 0;
+ bool staticCLK = false;
+
+ dprintk(1, "\n");
+
+ /* Check insertion of the Reed-Solomon parity bytes */
+ status = read16(state, FEC_OC_MODE__A, &fecOcRegMode);
+ if (status < 0)
+ goto error;
+ status = read16(state, FEC_OC_IPR_MODE__A, &fecOcRegIprMode);
+ if (status < 0)
+ goto error;
+ fecOcRegMode &= (~FEC_OC_MODE_PARITY__M);
+ fecOcRegIprMode &= (~FEC_OC_IPR_MODE_MVAL_DIS_PAR__M);
+ if (state->m_insertRSByte == true) {
+ /* enable parity symbol forward */
+ fecOcRegMode |= FEC_OC_MODE_PARITY__M;
+ /* MVAL disable during parity bytes */
+ fecOcRegIprMode |= FEC_OC_IPR_MODE_MVAL_DIS_PAR__M;
+ /* TS burst length to 204 */
+ fecOcDtoBurstLen = 204;
+ }
+
+ /* Check serial or parrallel output */
+ fecOcRegIprMode &= (~(FEC_OC_IPR_MODE_SERIAL__M));
+ if (state->m_enableParallel == false) {
+ /* MPEG data output is serial -> set ipr_mode[0] */
+ fecOcRegIprMode |= FEC_OC_IPR_MODE_SERIAL__M;
+ }
+
+ switch (oMode) {
+ case OM_DVBT:
+ maxBitRate = state->m_DVBTBitrate;
+ fecOcTmdMode = 3;
+ fecOcRcnCtlRate = 0xC00000;
+ staticCLK = state->m_DVBTStaticCLK;
+ break;
+ case OM_QAM_ITU_A: /* fallthrough */
+ case OM_QAM_ITU_C:
+ fecOcTmdMode = 0x0004;
+ fecOcRcnCtlRate = 0xD2B4EE; /* good for >63 Mb/s */
+ maxBitRate = state->m_DVBCBitrate;
+ staticCLK = state->m_DVBCStaticCLK;
+ break;
+ default:
+ status = -EINVAL;
+ } /* switch (standard) */
+ if (status < 0)
+ goto error;
+
+ /* Configure DTO's */
+ if (staticCLK) {
+ u32 bitRate = 0;
+
+ /* Rational DTO for MCLK source (static MCLK rate),
+ Dynamic DTO for optimal grouping
+ (avoid intra-packet gaps),
+ DTO offset enable to sync TS burst with MSTRT */
+ fecOcDtoMode = (FEC_OC_DTO_MODE_DYNAMIC__M |
+ FEC_OC_DTO_MODE_OFFSET_ENABLE__M);
+ fecOcFctMode = (FEC_OC_FCT_MODE_RAT_ENA__M |
+ FEC_OC_FCT_MODE_VIRT_ENA__M);
+
+ /* Check user defined bitrate */
+ bitRate = maxBitRate;
+ if (bitRate > 75900000UL) { /* max is 75.9 Mb/s */
+ bitRate = 75900000UL;
+ }
+ /* Rational DTO period:
+ dto_period = (Fsys / bitrate) - 2
+
+ Result should be floored,
+ to make sure >= requested bitrate
+ */
+ fecOcDtoPeriod = (u16) (((state->m_sysClockFreq)
+ * 1000) / bitRate);
+ if (fecOcDtoPeriod <= 2)
+ fecOcDtoPeriod = 0;
+ else
+ fecOcDtoPeriod -= 2;
+ fecOcTmdIntUpdRate = 8;
+ } else {
+ /* (commonAttr->staticCLK == false) => dynamic mode */
+ fecOcDtoMode = FEC_OC_DTO_MODE_DYNAMIC__M;
+ fecOcFctMode = FEC_OC_FCT_MODE__PRE;
+ fecOcTmdIntUpdRate = 5;
+ }
+
+ /* Write appropriate registers with requested configuration */
+ status = write16(state, FEC_OC_DTO_BURST_LEN__A, fecOcDtoBurstLen);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_DTO_PERIOD__A, fecOcDtoPeriod);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_DTO_MODE__A, fecOcDtoMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_FCT_MODE__A, fecOcFctMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_MODE__A, fecOcRegMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_IPR_MODE__A, fecOcRegIprMode);
+ if (status < 0)
+ goto error;
+
+ /* Rate integration settings */
+ status = write32(state, FEC_OC_RCN_CTL_RATE_LO__A, fecOcRcnCtlRate);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_TMD_INT_UPD_RATE__A, fecOcTmdIntUpdRate);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_TMD_MODE__A, fecOcTmdMode);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int MPEGTSConfigurePolarity(struct drxk_state *state)
+{
+ u16 fecOcRegIprInvert = 0;
+
+ /* Data mask for the output data byte */
+ u16 InvertDataMask =
+ FEC_OC_IPR_INVERT_MD7__M | FEC_OC_IPR_INVERT_MD6__M |
+ FEC_OC_IPR_INVERT_MD5__M | FEC_OC_IPR_INVERT_MD4__M |
+ FEC_OC_IPR_INVERT_MD3__M | FEC_OC_IPR_INVERT_MD2__M |
+ FEC_OC_IPR_INVERT_MD1__M | FEC_OC_IPR_INVERT_MD0__M;
+
+ dprintk(1, "\n");
+
+ /* Control selective inversion of output bits */
+ fecOcRegIprInvert &= (~(InvertDataMask));
+ if (state->m_invertDATA == true)
+ fecOcRegIprInvert |= InvertDataMask;
+ fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MERR__M));
+ if (state->m_invertERR == true)
+ fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MERR__M;
+ fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MSTRT__M));
+ if (state->m_invertSTR == true)
+ fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MSTRT__M;
+ fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MVAL__M));
+ if (state->m_invertVAL == true)
+ fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MVAL__M;
+ fecOcRegIprInvert &= (~(FEC_OC_IPR_INVERT_MCLK__M));
+ if (state->m_invertCLK == true)
+ fecOcRegIprInvert |= FEC_OC_IPR_INVERT_MCLK__M;
+
+ return write16(state, FEC_OC_IPR_INVERT__A, fecOcRegIprInvert);
+}
+
+#define SCU_RAM_AGC_KI_INV_RF_POL__M 0x4000
+
+static int SetAgcRf(struct drxk_state *state,
+ struct SCfgAgc *pAgcCfg, bool isDTV)
+{
+ int status = -EINVAL;
+ u16 data = 0;
+ struct SCfgAgc *pIfAgcSettings;
+
+ dprintk(1, "\n");
+
+ if (pAgcCfg == NULL)
+ goto error;
+
+ switch (pAgcCfg->ctrlMode) {
+ case DRXK_AGC_CTRL_AUTO:
+ /* Enable RF AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data &= ~IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+
+ /* Enable SCU RF AGC loop */
+ data &= ~SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M;
+
+ /* Polarity */
+ if (state->m_RfAgcPol)
+ data |= SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Set speed (using complementary reduction value) */
+ status = read16(state, SCU_RAM_AGC_KI_RED__A, &data);
+ if (status < 0)
+ goto error;
+
+ data &= ~SCU_RAM_AGC_KI_RED_RAGC_RED__M;
+ data |= (~(pAgcCfg->speed <<
+ SCU_RAM_AGC_KI_RED_RAGC_RED__B)
+ & SCU_RAM_AGC_KI_RED_RAGC_RED__M);
+
+ status = write16(state, SCU_RAM_AGC_KI_RED__A, data);
+ if (status < 0)
+ goto error;
+
+ if (IsDVBT(state))
+ pIfAgcSettings = &state->m_dvbtIfAgcCfg;
+ else if (IsQAM(state))
+ pIfAgcSettings = &state->m_qamIfAgcCfg;
+ else
+ pIfAgcSettings = &state->m_atvIfAgcCfg;
+ if (pIfAgcSettings == NULL) {
+ status = -EINVAL;
+ goto error;
+ }
+
+ /* Set TOP, only if IF-AGC is in AUTO mode */
+ if (pIfAgcSettings->ctrlMode == DRXK_AGC_CTRL_AUTO)
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pAgcCfg->top);
+ if (status < 0)
+ goto error;
+
+ /* Cut-Off current */
+ status = write16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, pAgcCfg->cutOffCurrent);
+ if (status < 0)
+ goto error;
+
+ /* Max. output level */
+ status = write16(state, SCU_RAM_AGC_RF_MAX__A, pAgcCfg->maxOutputLevel);
+ if (status < 0)
+ goto error;
+
+ break;
+
+ case DRXK_AGC_CTRL_USER:
+ /* Enable RF AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data &= ~IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Disable SCU RF AGC loop */
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+ data |= SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M;
+ if (state->m_RfAgcPol)
+ data |= SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_CONFIG_INV_RF_POL__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+
+ /* SCU c.o.c. to 0, enabling full control range */
+ status = write16(state, SCU_RAM_AGC_RF_IACCU_HI_CO__A, 0);
+ if (status < 0)
+ goto error;
+
+ /* Write value to output pin */
+ status = write16(state, SCU_RAM_AGC_RF_IACCU_HI__A, pAgcCfg->outputLevel);
+ if (status < 0)
+ goto error;
+ break;
+
+ case DRXK_AGC_CTRL_OFF:
+ /* Disable RF AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data |= IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Disable SCU RF AGC loop */
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+ data |= SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+ break;
+
+ default:
+ status = -EINVAL;
+
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+#define SCU_RAM_AGC_KI_INV_IF_POL__M 0x2000
+
+static int SetAgcIf(struct drxk_state *state,
+ struct SCfgAgc *pAgcCfg, bool isDTV)
+{
+ u16 data = 0;
+ int status = 0;
+ struct SCfgAgc *pRfAgcSettings;
+
+ dprintk(1, "\n");
+
+ switch (pAgcCfg->ctrlMode) {
+ case DRXK_AGC_CTRL_AUTO:
+
+ /* Enable IF AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data &= ~IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+
+ /* Enable SCU IF AGC loop */
+ data &= ~SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M;
+
+ /* Polarity */
+ if (state->m_IfAgcPol)
+ data |= SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Set speed (using complementary reduction value) */
+ status = read16(state, SCU_RAM_AGC_KI_RED__A, &data);
+ if (status < 0)
+ goto error;
+ data &= ~SCU_RAM_AGC_KI_RED_IAGC_RED__M;
+ data |= (~(pAgcCfg->speed <<
+ SCU_RAM_AGC_KI_RED_IAGC_RED__B)
+ & SCU_RAM_AGC_KI_RED_IAGC_RED__M);
+
+ status = write16(state, SCU_RAM_AGC_KI_RED__A, data);
+ if (status < 0)
+ goto error;
+
+ if (IsQAM(state))
+ pRfAgcSettings = &state->m_qamRfAgcCfg;
+ else
+ pRfAgcSettings = &state->m_atvRfAgcCfg;
+ if (pRfAgcSettings == NULL)
+ return -1;
+ /* Restore TOP */
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pRfAgcSettings->top);
+ if (status < 0)
+ goto error;
+ break;
+
+ case DRXK_AGC_CTRL_USER:
+
+ /* Enable IF AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data &= ~IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+
+ /* Disable SCU IF AGC loop */
+ data |= SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M;
+
+ /* Polarity */
+ if (state->m_IfAgcPol)
+ data |= SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
+ else
+ data &= ~SCU_RAM_AGC_CONFIG_INV_IF_POL__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Write value to output pin */
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, pAgcCfg->outputLevel);
+ if (status < 0)
+ goto error;
+ break;
+
+ case DRXK_AGC_CTRL_OFF:
+
+ /* Disable If AGC DAC */
+ status = read16(state, IQM_AF_STDBY__A, &data);
+ if (status < 0)
+ goto error;
+ data |= IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY;
+ status = write16(state, IQM_AF_STDBY__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Disable SCU IF AGC loop */
+ status = read16(state, SCU_RAM_AGC_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+ data |= SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M;
+ status = write16(state, SCU_RAM_AGC_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+ break;
+ } /* switch (agcSettingsIf->ctrlMode) */
+
+ /* always set the top to support
+ configurations without if-loop */
+ status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MIN__A, pAgcCfg->top);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int ReadIFAgc(struct drxk_state *state, u32 *pValue)
+{
+ u16 agcDacLvl;
+ int status;
+ u16 Level = 0;
+
+ dprintk(1, "\n");
+
+ status = read16(state, IQM_AF_AGC_IF__A, &agcDacLvl);
+ if (status < 0) {
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+ }
+
+ *pValue = 0;
+
+ if (agcDacLvl > DRXK_AGC_DAC_OFFSET)
+ Level = agcDacLvl - DRXK_AGC_DAC_OFFSET;
+ if (Level < 14000)
+ *pValue = (14000 - Level) / 4;
+ else
+ *pValue = 0;
+
+ return status;
+}
+
+static int GetQAMSignalToNoise(struct drxk_state *state,
+ s32 *pSignalToNoise)
+{
+ int status = 0;
+ u16 qamSlErrPower = 0; /* accum. error between
+ raw and sliced symbols */
+ u32 qamSlSigPower = 0; /* used for MER, depends of
+ QAM constellation */
+ u32 qamSlMer = 0; /* QAM MER */
+
+ dprintk(1, "\n");
+
+ /* MER calculation */
+
+ /* get the register value needed for MER */
+ status = read16(state, QAM_SL_ERR_POWER__A, &qamSlErrPower);
+ if (status < 0) {
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return -EINVAL;
+ }
+
+ switch (state->param.u.qam.modulation) {
+ case QAM_16:
+ qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM16 << 2;
+ break;
+ case QAM_32:
+ qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM32 << 2;
+ break;
+ case QAM_64:
+ qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM64 << 2;
+ break;
+ case QAM_128:
+ qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM128 << 2;
+ break;
+ default:
+ case QAM_256:
+ qamSlSigPower = DRXK_QAM_SL_SIG_POWER_QAM256 << 2;
+ break;
+ }
+
+ if (qamSlErrPower > 0) {
+ qamSlMer = Log10Times100(qamSlSigPower) -
+ Log10Times100((u32) qamSlErrPower);
+ }
+ *pSignalToNoise = qamSlMer;
+
+ return status;
+}
+
+static int GetDVBTSignalToNoise(struct drxk_state *state,
+ s32 *pSignalToNoise)
+{
+ int status;
+ u16 regData = 0;
+ u32 EqRegTdSqrErrI = 0;
+ u32 EqRegTdSqrErrQ = 0;
+ u16 EqRegTdSqrErrExp = 0;
+ u16 EqRegTdTpsPwrOfs = 0;
+ u16 EqRegTdReqSmbCnt = 0;
+ u32 tpsCnt = 0;
+ u32 SqrErrIQ = 0;
+ u32 a = 0;
+ u32 b = 0;
+ u32 c = 0;
+ u32 iMER = 0;
+ u16 transmissionParams = 0;
+
+ dprintk(1, "\n");
+
+ status = read16(state, OFDM_EQ_TOP_TD_TPS_PWR_OFS__A, &EqRegTdTpsPwrOfs);
+ if (status < 0)
+ goto error;
+ status = read16(state, OFDM_EQ_TOP_TD_REQ_SMB_CNT__A, &EqRegTdReqSmbCnt);
+ if (status < 0)
+ goto error;
+ status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_EXP__A, &EqRegTdSqrErrExp);
+ if (status < 0)
+ goto error;
+ status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_I__A, &regData);
+ if (status < 0)
+ goto error;
+ /* Extend SQR_ERR_I operational range */
+ EqRegTdSqrErrI = (u32) regData;
+ if ((EqRegTdSqrErrExp > 11) &&
+ (EqRegTdSqrErrI < 0x00000FFFUL)) {
+ EqRegTdSqrErrI += 0x00010000UL;
+ }
+ status = read16(state, OFDM_EQ_TOP_TD_SQR_ERR_Q__A, &regData);
+ if (status < 0)
+ goto error;
+ /* Extend SQR_ERR_Q operational range */
+ EqRegTdSqrErrQ = (u32) regData;
+ if ((EqRegTdSqrErrExp > 11) &&
+ (EqRegTdSqrErrQ < 0x00000FFFUL))
+ EqRegTdSqrErrQ += 0x00010000UL;
+
+ status = read16(state, OFDM_SC_RA_RAM_OP_PARAM__A, &transmissionParams);
+ if (status < 0)
+ goto error;
+
+ /* Check input data for MER */
+
+ /* MER calculation (in 0.1 dB) without math.h */
+ if ((EqRegTdTpsPwrOfs == 0) || (EqRegTdReqSmbCnt == 0))
+ iMER = 0;
+ else if ((EqRegTdSqrErrI + EqRegTdSqrErrQ) == 0) {
+ /* No error at all, this must be the HW reset value
+ * Apparently no first measurement yet
+ * Set MER to 0.0 */
+ iMER = 0;
+ } else {
+ SqrErrIQ = (EqRegTdSqrErrI + EqRegTdSqrErrQ) <<
+ EqRegTdSqrErrExp;
+ if ((transmissionParams &
+ OFDM_SC_RA_RAM_OP_PARAM_MODE__M)
+ == OFDM_SC_RA_RAM_OP_PARAM_MODE_2K)
+ tpsCnt = 17;
+ else
+ tpsCnt = 68;
+
+ /* IMER = 100 * log10 (x)
+ where x = (EqRegTdTpsPwrOfs^2 *
+ EqRegTdReqSmbCnt * tpsCnt)/SqrErrIQ
+
+ => IMER = a + b -c
+ where a = 100 * log10 (EqRegTdTpsPwrOfs^2)
+ b = 100 * log10 (EqRegTdReqSmbCnt * tpsCnt)
+ c = 100 * log10 (SqrErrIQ)
+ */
+
+ /* log(x) x = 9bits * 9bits->18 bits */
+ a = Log10Times100(EqRegTdTpsPwrOfs *
+ EqRegTdTpsPwrOfs);
+ /* log(x) x = 16bits * 7bits->23 bits */
+ b = Log10Times100(EqRegTdReqSmbCnt * tpsCnt);
+ /* log(x) x = (16bits + 16bits) << 15 ->32 bits */
+ c = Log10Times100(SqrErrIQ);
+
+ iMER = a + b;
+ /* No negative MER, clip to zero */
+ if (iMER > c)
+ iMER -= c;
+ else
+ iMER = 0;
+ }
+ *pSignalToNoise = iMER;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int GetSignalToNoise(struct drxk_state *state, s32 *pSignalToNoise)
+{
+ dprintk(1, "\n");
+
+ *pSignalToNoise = 0;
+ switch (state->m_OperationMode) {
+ case OM_DVBT:
+ return GetDVBTSignalToNoise(state, pSignalToNoise);
+ case OM_QAM_ITU_A:
+ case OM_QAM_ITU_C:
+ return GetQAMSignalToNoise(state, pSignalToNoise);
+ default:
+ break;
+ }
+ return 0;
+}
+
+#if 0
+static int GetDVBTQuality(struct drxk_state *state, s32 *pQuality)
+{
+ /* SNR Values for quasi errorfree reception rom Nordig 2.2 */
+ int status = 0;
+
+ dprintk(1, "\n");
+
+ static s32 QE_SN[] = {
+ 51, /* QPSK 1/2 */
+ 69, /* QPSK 2/3 */
+ 79, /* QPSK 3/4 */
+ 89, /* QPSK 5/6 */
+ 97, /* QPSK 7/8 */
+ 108, /* 16-QAM 1/2 */
+ 131, /* 16-QAM 2/3 */
+ 146, /* 16-QAM 3/4 */
+ 156, /* 16-QAM 5/6 */
+ 160, /* 16-QAM 7/8 */
+ 165, /* 64-QAM 1/2 */
+ 187, /* 64-QAM 2/3 */
+ 202, /* 64-QAM 3/4 */
+ 216, /* 64-QAM 5/6 */
+ 225, /* 64-QAM 7/8 */
+ };
+
+ *pQuality = 0;
+
+ do {
+ s32 SignalToNoise = 0;
+ u16 Constellation = 0;
+ u16 CodeRate = 0;
+ u32 SignalToNoiseRel;
+ u32 BERQuality;
+
+ status = GetDVBTSignalToNoise(state, &SignalToNoise);
+ if (status < 0)
+ break;
+ status = read16(state, OFDM_EQ_TOP_TD_TPS_CONST__A, &Constellation);
+ if (status < 0)
+ break;
+ Constellation &= OFDM_EQ_TOP_TD_TPS_CONST__M;
+
+ status = read16(state, OFDM_EQ_TOP_TD_TPS_CODE_HP__A, &CodeRate);
+ if (status < 0)
+ break;
+ CodeRate &= OFDM_EQ_TOP_TD_TPS_CODE_HP__M;
+
+ if (Constellation > OFDM_EQ_TOP_TD_TPS_CONST_64QAM ||
+ CodeRate > OFDM_EQ_TOP_TD_TPS_CODE_LP_7_8)
+ break;
+ SignalToNoiseRel = SignalToNoise -
+ QE_SN[Constellation * 5 + CodeRate];
+ BERQuality = 100;
+
+ if (SignalToNoiseRel < -70)
+ *pQuality = 0;
+ else if (SignalToNoiseRel < 30)
+ *pQuality = ((SignalToNoiseRel + 70) *
+ BERQuality) / 100;
+ else
+ *pQuality = BERQuality;
+ } while (0);
+ return 0;
+};
+
+static int GetDVBCQuality(struct drxk_state *state, s32 *pQuality)
+{
+ int status = 0;
+ *pQuality = 0;
+
+ dprintk(1, "\n");
+
+ do {
+ u32 SignalToNoise = 0;
+ u32 BERQuality = 100;
+ u32 SignalToNoiseRel = 0;
+
+ status = GetQAMSignalToNoise(state, &SignalToNoise);
+ if (status < 0)
+ break;
+
+ switch (state->param.u.qam.modulation) {
+ case QAM_16:
+ SignalToNoiseRel = SignalToNoise - 200;
+ break;
+ case QAM_32:
+ SignalToNoiseRel = SignalToNoise - 230;
+ break; /* Not in NorDig */
+ case QAM_64:
+ SignalToNoiseRel = SignalToNoise - 260;
+ break;
+ case QAM_128:
+ SignalToNoiseRel = SignalToNoise - 290;
+ break;
+ default:
+ case QAM_256:
+ SignalToNoiseRel = SignalToNoise - 320;
+ break;
+ }
+
+ if (SignalToNoiseRel < -70)
+ *pQuality = 0;
+ else if (SignalToNoiseRel < 30)
+ *pQuality = ((SignalToNoiseRel + 70) *
+ BERQuality) / 100;
+ else
+ *pQuality = BERQuality;
+ } while (0);
+
+ return status;
+}
+
+static int GetQuality(struct drxk_state *state, s32 *pQuality)
+{
+ dprintk(1, "\n");
+
+ switch (state->m_OperationMode) {
+ case OM_DVBT:
+ return GetDVBTQuality(state, pQuality);
+ case OM_QAM_ITU_A:
+ return GetDVBCQuality(state, pQuality);
+ default:
+ break;
+ }
+
+ return 0;
+}
+#endif
+
+/* Free data ram in SIO HI */
+#define SIO_HI_RA_RAM_USR_BEGIN__A 0x420040
+#define SIO_HI_RA_RAM_USR_END__A 0x420060
+
+#define DRXK_HI_ATOMIC_BUF_START (SIO_HI_RA_RAM_USR_BEGIN__A)
+#define DRXK_HI_ATOMIC_BUF_END (SIO_HI_RA_RAM_USR_BEGIN__A + 7)
+#define DRXK_HI_ATOMIC_READ SIO_HI_RA_RAM_PAR_3_ACP_RW_READ
+#define DRXK_HI_ATOMIC_WRITE SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE
+
+#define DRXDAP_FASI_ADDR2BLOCK(addr) (((addr) >> 22) & 0x3F)
+#define DRXDAP_FASI_ADDR2BANK(addr) (((addr) >> 16) & 0x3F)
+#define DRXDAP_FASI_ADDR2OFFSET(addr) ((addr) & 0x7FFF)
+
+static int ConfigureI2CBridge(struct drxk_state *state, bool bEnableBridge)
+{
+ int status = -EINVAL;
+
+ dprintk(1, "\n");
+
+ if (state->m_DrxkState == DRXK_UNINITIALIZED)
+ goto error;
+ if (state->m_DrxkState == DRXK_POWERED_DOWN)
+ goto error;
+
+ if (state->no_i2c_bridge)
+ return 0;
+
+ status = write16(state, SIO_HI_RA_RAM_PAR_1__A, SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY);
+ if (status < 0)
+ goto error;
+ if (bEnableBridge) {
+ status = write16(state, SIO_HI_RA_RAM_PAR_2__A, SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED);
+ if (status < 0)
+ goto error;
+ } else {
+ status = write16(state, SIO_HI_RA_RAM_PAR_2__A, SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN);
+ if (status < 0)
+ goto error;
+ }
+
+ status = HI_Command(state, SIO_HI_RA_RAM_CMD_BRDCTRL, 0);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SetPreSaw(struct drxk_state *state,
+ struct SCfgPreSaw *pPreSawCfg)
+{
+ int status = -EINVAL;
+
+ dprintk(1, "\n");
+
+ if ((pPreSawCfg == NULL)
+ || (pPreSawCfg->reference > IQM_AF_PDREF__M))
+ goto error;
+
+ status = write16(state, IQM_AF_PDREF__A, pPreSawCfg->reference);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int BLDirectCmd(struct drxk_state *state, u32 targetAddr,
+ u16 romOffset, u16 nrOfElements, u32 timeOut)
+{
+ u16 blStatus = 0;
+ u16 offset = (u16) ((targetAddr >> 0) & 0x00FFFF);
+ u16 blockbank = (u16) ((targetAddr >> 16) & 0x000FFF);
+ int status;
+ unsigned long end;
+
+ dprintk(1, "\n");
+
+ mutex_lock(&state->mutex);
+ status = write16(state, SIO_BL_MODE__A, SIO_BL_MODE_DIRECT);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_TGT_HDR__A, blockbank);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_TGT_ADDR__A, offset);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_SRC_ADDR__A, romOffset);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_SRC_LEN__A, nrOfElements);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_BL_ENABLE__A, SIO_BL_ENABLE_ON);
+ if (status < 0)
+ goto error;
+
+ end = jiffies + msecs_to_jiffies(timeOut);
+ do {
+ status = read16(state, SIO_BL_STATUS__A, &blStatus);
+ if (status < 0)
+ goto error;
+ } while ((blStatus == 0x1) && time_is_after_jiffies(end));
+ if (blStatus == 0x1) {
+ printk(KERN_ERR "drxk: SIO not ready\n");
+ status = -EINVAL;
+ goto error2;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+error2:
+ mutex_unlock(&state->mutex);
+ return status;
+
+}
+
+static int ADCSyncMeasurement(struct drxk_state *state, u16 *count)
+{
+ u16 data = 0;
+ int status;
+
+ dprintk(1, "\n");
+
+ /* Start measurement */
+ status = write16(state, IQM_AF_COMM_EXEC__A, IQM_AF_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_START_LOCK__A, 1);
+ if (status < 0)
+ goto error;
+
+ *count = 0;
+ status = read16(state, IQM_AF_PHASE0__A, &data);
+ if (status < 0)
+ goto error;
+ if (data == 127)
+ *count = *count + 1;
+ status = read16(state, IQM_AF_PHASE1__A, &data);
+ if (status < 0)
+ goto error;
+ if (data == 127)
+ *count = *count + 1;
+ status = read16(state, IQM_AF_PHASE2__A, &data);
+ if (status < 0)
+ goto error;
+ if (data == 127)
+ *count = *count + 1;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int ADCSynchronization(struct drxk_state *state)
+{
+ u16 count = 0;
+ int status;
+
+ dprintk(1, "\n");
+
+ status = ADCSyncMeasurement(state, &count);
+ if (status < 0)
+ goto error;
+
+ if (count == 1) {
+ /* Try sampling on a diffrent edge */
+ u16 clkNeg = 0;
+
+ status = read16(state, IQM_AF_CLKNEG__A, &clkNeg);
+ if (status < 0)
+ goto error;
+ if ((clkNeg | IQM_AF_CLKNEG_CLKNEGDATA__M) ==
+ IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS) {
+ clkNeg &= (~(IQM_AF_CLKNEG_CLKNEGDATA__M));
+ clkNeg |=
+ IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_NEG;
+ } else {
+ clkNeg &= (~(IQM_AF_CLKNEG_CLKNEGDATA__M));
+ clkNeg |=
+ IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS;
+ }
+ status = write16(state, IQM_AF_CLKNEG__A, clkNeg);
+ if (status < 0)
+ goto error;
+ status = ADCSyncMeasurement(state, &count);
+ if (status < 0)
+ goto error;
+ }
+
+ if (count < 2)
+ status = -EINVAL;
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SetFrequencyShifter(struct drxk_state *state,
+ u16 intermediateFreqkHz,
+ s32 tunerFreqOffset, bool isDTV)
+{
+ bool selectPosImage = false;
+ u32 rfFreqResidual = tunerFreqOffset;
+ u32 fmFrequencyShift = 0;
+ bool tunerMirror = !state->m_bMirrorFreqSpect;
+ u32 adcFreq;
+ bool adcFlip;
+ int status;
+ u32 ifFreqActual;
+ u32 samplingFrequency = (u32) (state->m_sysClockFreq / 3);
+ u32 frequencyShift;
+ bool imageToSelect;
+
+ dprintk(1, "\n");
+
+ /*
+ Program frequency shifter
+ No need to account for mirroring on RF
+ */
+ if (isDTV) {
+ if ((state->m_OperationMode == OM_QAM_ITU_A) ||
+ (state->m_OperationMode == OM_QAM_ITU_C) ||
+ (state->m_OperationMode == OM_DVBT))
+ selectPosImage = true;
+ else
+ selectPosImage = false;
+ }
+ if (tunerMirror)
+ /* tuner doesn't mirror */
+ ifFreqActual = intermediateFreqkHz +
+ rfFreqResidual + fmFrequencyShift;
+ else
+ /* tuner mirrors */
+ ifFreqActual = intermediateFreqkHz -
+ rfFreqResidual - fmFrequencyShift;
+ if (ifFreqActual > samplingFrequency / 2) {
+ /* adc mirrors */
+ adcFreq = samplingFrequency - ifFreqActual;
+ adcFlip = true;
+ } else {
+ /* adc doesn't mirror */
+ adcFreq = ifFreqActual;
+ adcFlip = false;
+ }
+
+ frequencyShift = adcFreq;
+ imageToSelect = state->m_rfmirror ^ tunerMirror ^
+ adcFlip ^ selectPosImage;
+ state->m_IqmFsRateOfs =
+ Frac28a((frequencyShift), samplingFrequency);
+
+ if (imageToSelect)
+ state->m_IqmFsRateOfs = ~state->m_IqmFsRateOfs + 1;
+
+ /* Program frequency shifter with tuner offset compensation */
+ /* frequencyShift += tunerFreqOffset; TODO */
+ status = write32(state, IQM_FS_RATE_OFS_LO__A,
+ state->m_IqmFsRateOfs);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int InitAGC(struct drxk_state *state, bool isDTV)
+{
+ u16 ingainTgt = 0;
+ u16 ingainTgtMin = 0;
+ u16 ingainTgtMax = 0;
+ u16 clpCyclen = 0;
+ u16 clpSumMin = 0;
+ u16 clpDirTo = 0;
+ u16 snsSumMin = 0;
+ u16 snsSumMax = 0;
+ u16 clpSumMax = 0;
+ u16 snsDirTo = 0;
+ u16 kiInnergainMin = 0;
+ u16 ifIaccuHiTgt = 0;
+ u16 ifIaccuHiTgtMin = 0;
+ u16 ifIaccuHiTgtMax = 0;
+ u16 data = 0;
+ u16 fastClpCtrlDelay = 0;
+ u16 clpCtrlMode = 0;
+ int status = 0;
+
+ dprintk(1, "\n");
+
+ /* Common settings */
+ snsSumMax = 1023;
+ ifIaccuHiTgtMin = 2047;
+ clpCyclen = 500;
+ clpSumMax = 1023;
+
+ /* AGCInit() not available for DVBT; init done in microcode */
+ if (!IsQAM(state)) {
+ printk(KERN_ERR "drxk: %s: mode %d is not DVB-C\n", __func__, state->m_OperationMode);
+ return -EINVAL;
+ }
+
+ /* FIXME: Analog TV AGC require different settings */
+
+ /* Standard specific settings */
+ clpSumMin = 8;
+ clpDirTo = (u16) -9;
+ clpCtrlMode = 0;
+ snsSumMin = 8;
+ snsDirTo = (u16) -9;
+ kiInnergainMin = (u16) -1030;
+ ifIaccuHiTgtMax = 0x2380;
+ ifIaccuHiTgt = 0x2380;
+ ingainTgtMin = 0x0511;
+ ingainTgt = 0x0511;
+ ingainTgtMax = 5119;
+ fastClpCtrlDelay = state->m_qamIfAgcCfg.FastClipCtrlDelay;
+
+ status = write16(state, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, fastClpCtrlDelay);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_AGC_CLP_CTRL_MODE__A, clpCtrlMode);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_INGAIN_TGT__A, ingainTgt);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MIN__A, ingainTgtMin);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MAX__A, ingainTgtMax);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A, ifIaccuHiTgtMin);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A, ifIaccuHiTgtMax);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_LO__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_RF_IACCU_HI__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_RF_IACCU_LO__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_SUM_MAX__A, clpSumMax);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_SUM_MAX__A, snsSumMax);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_AGC_KI_INNERGAIN_MIN__A, kiInnergainMin);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_IF_IACCU_HI_TGT__A, ifIaccuHiTgt);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_CYCLEN__A, clpCyclen);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_AGC_RF_SNS_DEV_MAX__A, 1023);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_RF_SNS_DEV_MIN__A, (u16) -1023);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__A, 50);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_AGC_KI_MAXMINGAIN_TH__A, 20);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_SUM_MIN__A, clpSumMin);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_SUM_MIN__A, snsSumMin);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_DIR_TO__A, clpDirTo);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_DIR_TO__A, snsDirTo);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_KI_MINGAIN__A, 0x7fff);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_KI_MAXGAIN__A, 0x0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_KI_MIN__A, 0x0117);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_KI_MAX__A, 0x0657);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_SUM__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_CYCCNT__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_DIR_WD__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_CLP_DIR_STP__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_SUM__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_CYCCNT__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_DIR_WD__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_DIR_STP__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_SNS_CYCLEN__A, 500);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_KI_CYCLEN__A, 500);
+ if (status < 0)
+ goto error;
+
+ /* Initialize inner-loop KI gain factors */
+ status = read16(state, SCU_RAM_AGC_KI__A, &data);
+ if (status < 0)
+ goto error;
+
+ data = 0x0657;
+ data &= ~SCU_RAM_AGC_KI_RF__M;
+ data |= (DRXK_KI_RAGC_QAM << SCU_RAM_AGC_KI_RF__B);
+ data &= ~SCU_RAM_AGC_KI_IF__M;
+ data |= (DRXK_KI_IAGC_QAM << SCU_RAM_AGC_KI_IF__B);
+
+ status = write16(state, SCU_RAM_AGC_KI__A, data);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int DVBTQAMGetAccPktErr(struct drxk_state *state, u16 *packetErr)
+{
+ int status;
+
+ dprintk(1, "\n");
+ if (packetErr == NULL)
+ status = write16(state, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, 0);
+ else
+ status = read16(state, SCU_RAM_FEC_ACCUM_PKT_FAILURES__A, packetErr);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int DVBTScCommand(struct drxk_state *state,
+ u16 cmd, u16 subcmd,
+ u16 param0, u16 param1, u16 param2,
+ u16 param3, u16 param4)
+{
+ u16 curCmd = 0;
+ u16 errCode = 0;
+ u16 retryCnt = 0;
+ u16 scExec = 0;
+ int status;
+
+ dprintk(1, "\n");
+ status = read16(state, OFDM_SC_COMM_EXEC__A, &scExec);
+ if (scExec != 1) {
+ /* SC is not running */
+ status = -EINVAL;
+ }
+ if (status < 0)
+ goto error;
+
+ /* Wait until sc is ready to receive command */
+ retryCnt = 0;
+ do {
+ msleep(1);
+ status = read16(state, OFDM_SC_RA_RAM_CMD__A, &curCmd);
+ retryCnt++;
+ } while ((curCmd != 0) && (retryCnt < DRXK_MAX_RETRIES));
+ if (retryCnt >= DRXK_MAX_RETRIES && (status < 0))
+ goto error;
+
+ /* Write sub-command */
+ switch (cmd) {
+ /* All commands using sub-cmd */
+ case OFDM_SC_RA_RAM_CMD_PROC_START:
+ case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
+ case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
+ status = write16(state, OFDM_SC_RA_RAM_CMD_ADDR__A, subcmd);
+ if (status < 0)
+ goto error;
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+
+ /* Write needed parameters and the command */
+ switch (cmd) {
+ /* All commands using 5 parameters */
+ /* All commands using 4 parameters */
+ /* All commands using 3 parameters */
+ /* All commands using 2 parameters */
+ case OFDM_SC_RA_RAM_CMD_PROC_START:
+ case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
+ case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
+ status = write16(state, OFDM_SC_RA_RAM_PARAM1__A, param1);
+ /* All commands using 1 parameters */
+ case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
+ case OFDM_SC_RA_RAM_CMD_USER_IO:
+ status = write16(state, OFDM_SC_RA_RAM_PARAM0__A, param0);
+ /* All commands using 0 parameters */
+ case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
+ case OFDM_SC_RA_RAM_CMD_NULL:
+ /* Write command */
+ status = write16(state, OFDM_SC_RA_RAM_CMD__A, cmd);
+ break;
+ default:
+ /* Unknown command */
+ status = -EINVAL;
+ }
+ if (status < 0)
+ goto error;
+
+ /* Wait until sc is ready processing command */
+ retryCnt = 0;
+ do {
+ msleep(1);
+ status = read16(state, OFDM_SC_RA_RAM_CMD__A, &curCmd);
+ retryCnt++;
+ } while ((curCmd != 0) && (retryCnt < DRXK_MAX_RETRIES));
+ if (retryCnt >= DRXK_MAX_RETRIES && (status < 0))
+ goto error;
+
+ /* Check for illegal cmd */
+ status = read16(state, OFDM_SC_RA_RAM_CMD_ADDR__A, &errCode);
+ if (errCode == 0xFFFF) {
+ /* illegal command */
+ status = -EINVAL;
+ }
+ if (status < 0)
+ goto error;
+
+ /* Retreive results parameters from SC */
+ switch (cmd) {
+ /* All commands yielding 5 results */
+ /* All commands yielding 4 results */
+ /* All commands yielding 3 results */
+ /* All commands yielding 2 results */
+ /* All commands yielding 1 result */
+ case OFDM_SC_RA_RAM_CMD_USER_IO:
+ case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
+ status = read16(state, OFDM_SC_RA_RAM_PARAM0__A, &(param0));
+ /* All commands yielding 0 results */
+ case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
+ case OFDM_SC_RA_RAM_CMD_SET_TIMER:
+ case OFDM_SC_RA_RAM_CMD_PROC_START:
+ case OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM:
+ case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
+ case OFDM_SC_RA_RAM_CMD_NULL:
+ break;
+ default:
+ /* Unknown command */
+ status = -EINVAL;
+ break;
+ } /* switch (cmd->cmd) */
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int PowerUpDVBT(struct drxk_state *state)
+{
+ enum DRXPowerMode powerMode = DRX_POWER_UP;
+ int status;
+
+ dprintk(1, "\n");
+ status = CtrlPowerMode(state, &powerMode);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int DVBTCtrlSetIncEnable(struct drxk_state *state, bool *enabled)
+{
+ int status;
+
+ dprintk(1, "\n");
+ if (*enabled == true)
+ status = write16(state, IQM_CF_BYPASSDET__A, 0);
+ else
+ status = write16(state, IQM_CF_BYPASSDET__A, 1);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+#define DEFAULT_FR_THRES_8K 4000
+static int DVBTCtrlSetFrEnable(struct drxk_state *state, bool *enabled)
+{
+
+ int status;
+
+ dprintk(1, "\n");
+ if (*enabled == true) {
+ /* write mask to 1 */
+ status = write16(state, OFDM_SC_RA_RAM_FR_THRES_8K__A,
+ DEFAULT_FR_THRES_8K);
+ } else {
+ /* write mask to 0 */
+ status = write16(state, OFDM_SC_RA_RAM_FR_THRES_8K__A, 0);
+ }
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int DVBTCtrlSetEchoThreshold(struct drxk_state *state,
+ struct DRXKCfgDvbtEchoThres_t *echoThres)
+{
+ u16 data = 0;
+ int status;
+
+ dprintk(1, "\n");
+ status = read16(state, OFDM_SC_RA_RAM_ECHO_THRES__A, &data);
+ if (status < 0)
+ goto error;
+
+ switch (echoThres->fftMode) {
+ case DRX_FFTMODE_2K:
+ data &= ~OFDM_SC_RA_RAM_ECHO_THRES_2K__M;
+ data |= ((echoThres->threshold <<
+ OFDM_SC_RA_RAM_ECHO_THRES_2K__B)
+ & (OFDM_SC_RA_RAM_ECHO_THRES_2K__M));
+ break;
+ case DRX_FFTMODE_8K:
+ data &= ~OFDM_SC_RA_RAM_ECHO_THRES_8K__M;
+ data |= ((echoThres->threshold <<
+ OFDM_SC_RA_RAM_ECHO_THRES_8K__B)
+ & (OFDM_SC_RA_RAM_ECHO_THRES_8K__M));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status = write16(state, OFDM_SC_RA_RAM_ECHO_THRES__A, data);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int DVBTCtrlSetSqiSpeed(struct drxk_state *state,
+ enum DRXKCfgDvbtSqiSpeed *speed)
+{
+ int status = -EINVAL;
+
+ dprintk(1, "\n");
+
+ switch (*speed) {
+ case DRXK_DVBT_SQI_SPEED_FAST:
+ case DRXK_DVBT_SQI_SPEED_MEDIUM:
+ case DRXK_DVBT_SQI_SPEED_SLOW:
+ break;
+ default:
+ goto error;
+ }
+ status = write16(state, SCU_RAM_FEC_PRE_RS_BER_FILTER_SH__A,
+ (u16) *speed);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief Activate DVBT specific presets
+* \param demod instance of demodulator.
+* \return DRXStatus_t.
+*
+* Called in DVBTSetStandard
+*
+*/
+static int DVBTActivatePresets(struct drxk_state *state)
+{
+ int status;
+ bool setincenable = false;
+ bool setfrenable = true;
+
+ struct DRXKCfgDvbtEchoThres_t echoThres2k = { 0, DRX_FFTMODE_2K };
+ struct DRXKCfgDvbtEchoThres_t echoThres8k = { 0, DRX_FFTMODE_8K };
+
+ dprintk(1, "\n");
+ status = DVBTCtrlSetIncEnable(state, &setincenable);
+ if (status < 0)
+ goto error;
+ status = DVBTCtrlSetFrEnable(state, &setfrenable);
+ if (status < 0)
+ goto error;
+ status = DVBTCtrlSetEchoThreshold(state, &echoThres2k);
+ if (status < 0)
+ goto error;
+ status = DVBTCtrlSetEchoThreshold(state, &echoThres8k);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_AGC_INGAIN_TGT_MAX__A, state->m_dvbtIfAgcCfg.IngainTgtMax);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief Initialize channelswitch-independent settings for DVBT.
+* \param demod instance of demodulator.
+* \return DRXStatus_t.
+*
+* For ROM code channel filter taps are loaded from the bootloader. For microcode
+* the DVB-T taps from the drxk_filters.h are used.
+*/
+static int SetDVBTStandard(struct drxk_state *state,
+ enum OperationMode oMode)
+{
+ u16 cmdResult = 0;
+ u16 data = 0;
+ int status;
+
+ dprintk(1, "\n");
+
+ PowerUpDVBT(state);
+ /* added antenna switch */
+ SwitchAntennaToDVBT(state);
+ /* send OFDM reset command */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ /* send OFDM setenv command */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ /* reset datapath for OFDM, processors first */
+ status = write16(state, OFDM_SC_COMM_EXEC__A, OFDM_SC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_LC_COMM_EXEC__A, OFDM_LC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_STOP);
+ if (status < 0)
+ goto error;
+
+ /* IQM setup */
+ /* synchronize on ofdstate->m_festart */
+ status = write16(state, IQM_AF_UPD_SEL__A, 1);
+ if (status < 0)
+ goto error;
+ /* window size for clipping ADC detection */
+ status = write16(state, IQM_AF_CLP_LEN__A, 0);
+ if (status < 0)
+ goto error;
+ /* window size for for sense pre-SAW detection */
+ status = write16(state, IQM_AF_SNS_LEN__A, 0);
+ if (status < 0)
+ goto error;
+ /* sense threshold for sense pre-SAW detection */
+ status = write16(state, IQM_AF_AMUX__A, IQM_AF_AMUX_SIGNAL2ADC);
+ if (status < 0)
+ goto error;
+ status = SetIqmAf(state, true);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_AF_AGC_RF__A, 0);
+ if (status < 0)
+ goto error;
+
+ /* Impulse noise cruncher setup */
+ status = write16(state, IQM_AF_INC_LCT__A, 0); /* crunch in IQM_CF */
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_DET_LCT__A, 0); /* detect in IQM_CF */
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_WND_LEN__A, 3); /* peak detector window length */
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_RC_STRETCH__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_OUT_ENA__A, 0x4); /* enable output 2 */
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_DS_ENA__A, 0x4); /* decimate output 2 */
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_SCALE__A, 1600);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_SCALE_SH__A, 0);
+ if (status < 0)
+ goto error;
+
+ /* virtual clipping threshold for clipping ADC detection */
+ status = write16(state, IQM_AF_CLP_TH__A, 448);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_DATATH__A, 495); /* crunching threshold */
+ if (status < 0)
+ goto error;
+
+ status = BLChainCmd(state, DRXK_BL_ROM_OFFSET_TAPS_DVBT, DRXK_BLCC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_CF_PKDTH__A, 2); /* peak detector threshold */
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_POW_MEAS_LEN__A, 2);
+ if (status < 0)
+ goto error;
+ /* enable power measurement interrupt */
+ status = write16(state, IQM_CF_COMM_INT_MSK__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_ACTIVE);
+ if (status < 0)
+ goto error;
+
+ /* IQM will not be reset from here, sync ADC and update/init AGC */
+ status = ADCSynchronization(state);
+ if (status < 0)
+ goto error;
+ status = SetPreSaw(state, &state->m_dvbtPreSawCfg);
+ if (status < 0)
+ goto error;
+
+ /* Halt SCU to enable safe non-atomic accesses */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
+ if (status < 0)
+ goto error;
+
+ status = SetAgcRf(state, &state->m_dvbtRfAgcCfg, true);
+ if (status < 0)
+ goto error;
+ status = SetAgcIf(state, &state->m_dvbtIfAgcCfg, true);
+ if (status < 0)
+ goto error;
+
+ /* Set Noise Estimation notch width and enable DC fix */
+ status = read16(state, OFDM_SC_RA_RAM_CONFIG__A, &data);
+ if (status < 0)
+ goto error;
+ data |= OFDM_SC_RA_RAM_CONFIG_NE_FIX_ENABLE__M;
+ status = write16(state, OFDM_SC_RA_RAM_CONFIG__A, data);
+ if (status < 0)
+ goto error;
+
+ /* Activate SCU to enable SCU commands */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+
+ if (!state->m_DRXK_A3_ROM_CODE) {
+ /* AGCInit() is not done for DVBT, so set agcFastClipCtrlDelay */
+ status = write16(state, SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A, state->m_dvbtIfAgcCfg.FastClipCtrlDelay);
+ if (status < 0)
+ goto error;
+ }
+
+ /* OFDM_SC setup */
+#ifdef COMPILE_FOR_NONRT
+ status = write16(state, OFDM_SC_RA_RAM_BE_OPT_DELAY__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_BE_OPT_INIT_DELAY__A, 2);
+ if (status < 0)
+ goto error;
+#endif
+
+ /* FEC setup */
+ status = write16(state, FEC_DI_INPUT_CTL__A, 1); /* OFDM input */
+ if (status < 0)
+ goto error;
+
+
+#ifdef COMPILE_FOR_NONRT
+ status = write16(state, FEC_RS_MEASUREMENT_PERIOD__A, 0x400);
+ if (status < 0)
+ goto error;
+#else
+ status = write16(state, FEC_RS_MEASUREMENT_PERIOD__A, 0x1000);
+ if (status < 0)
+ goto error;
+#endif
+ status = write16(state, FEC_RS_MEASUREMENT_PRESCALE__A, 0x0001);
+ if (status < 0)
+ goto error;
+
+ /* Setup MPEG bus */
+ status = MPEGTSDtoSetup(state, OM_DVBT);
+ if (status < 0)
+ goto error;
+ /* Set DVBT Presets */
+ status = DVBTActivatePresets(state);
+ if (status < 0)
+ goto error;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+/**
+* \brief Start dvbt demodulating for channel.
+* \param demod instance of demodulator.
+* \return DRXStatus_t.
+*/
+static int DVBTStart(struct drxk_state *state)
+{
+ u16 param1;
+ int status;
+ /* DRXKOfdmScCmd_t scCmd; */
+
+ dprintk(1, "\n");
+ /* Start correct processes to get in lock */
+ /* DRXK: OFDM_SC_RA_RAM_PROC_LOCKTRACK is no longer in mapfile! */
+ param1 = OFDM_SC_RA_RAM_LOCKTRACK_MIN;
+ status = DVBTScCommand(state, OFDM_SC_RA_RAM_CMD_PROC_START, 0, OFDM_SC_RA_RAM_SW_EVENT_RUN_NMASK__M, param1, 0, 0, 0);
+ if (status < 0)
+ goto error;
+ /* Start FEC OC */
+ status = MPEGTSStart(state);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+
+/*============================================================================*/
+
+/**
+* \brief Set up dvbt demodulator for channel.
+* \param demod instance of demodulator.
+* \return DRXStatus_t.
+* // original DVBTSetChannel()
+*/
+static int SetDVBT(struct drxk_state *state, u16 IntermediateFreqkHz,
+ s32 tunerFreqOffset)
+{
+ u16 cmdResult = 0;
+ u16 transmissionParams = 0;
+ u16 operationMode = 0;
+ u32 iqmRcRateOfs = 0;
+ u32 bandwidth = 0;
+ u16 param1;
+ int status;
+
+ dprintk(1, "IF =%d, TFO = %d\n", IntermediateFreqkHz, tunerFreqOffset);
+
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ /* Halt SCU to enable safe non-atomic accesses */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
+ if (status < 0)
+ goto error;
+
+ /* Stop processors */
+ status = write16(state, OFDM_SC_COMM_EXEC__A, OFDM_SC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_LC_COMM_EXEC__A, OFDM_LC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+
+ /* Mandatory fix, always stop CP, required to set spl offset back to
+ hardware default (is set to 0 by ucode during pilot detection */
+ status = write16(state, OFDM_CP_COMM_EXEC__A, OFDM_CP_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+
+ /*== Write channel settings to device =====================================*/
+
+ /* mode */
+ switch (state->param.u.ofdm.transmission_mode) {
+ case TRANSMISSION_MODE_AUTO:
+ default:
+ operationMode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M;
+ /* fall through , try first guess DRX_FFTMODE_8K */
+ case TRANSMISSION_MODE_8K:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K;
+ break;
+ case TRANSMISSION_MODE_2K:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_MODE_2K;
+ break;
+ }
+
+ /* guard */
+ switch (state->param.u.ofdm.guard_interval) {
+ default:
+ case GUARD_INTERVAL_AUTO:
+ operationMode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M;
+ /* fall through , try first guess DRX_GUARD_1DIV4 */
+ case GUARD_INTERVAL_1_4:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4;
+ break;
+ case GUARD_INTERVAL_1_32:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_32;
+ break;
+ case GUARD_INTERVAL_1_16:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_16;
+ break;
+ case GUARD_INTERVAL_1_8:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_8;
+ break;
+ }
+
+ /* hierarchy */
+ switch (state->param.u.ofdm.hierarchy_information) {
+ case HIERARCHY_AUTO:
+ case HIERARCHY_NONE:
+ default:
+ operationMode |= OFDM_SC_RA_RAM_OP_AUTO_HIER__M;
+ /* fall through , try first guess SC_RA_RAM_OP_PARAM_HIER_NO */
+ /* transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_NO; */
+ /* break; */
+ case HIERARCHY_1:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A1;
+ break;
+ case HIERARCHY_2:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A2;
+ break;
+ case HIERARCHY_4:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A4;
+ break;
+ }
+
+
+ /* constellation */
+ switch (state->param.u.ofdm.constellation) {
+ case QAM_AUTO:
+ default:
+ operationMode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M;
+ /* fall through , try first guess DRX_CONSTELLATION_QAM64 */
+ case QAM_64:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64;
+ break;
+ case QPSK:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QPSK;
+ break;
+ case QAM_16:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM16;
+ break;
+ }
+#if 0
+ /* No hierachical channels support in BDA */
+ /* Priority (only for hierarchical channels) */
+ switch (channel->priority) {
+ case DRX_PRIORITY_LOW:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_LO;
+ WR16(devAddr, OFDM_EC_SB_PRIOR__A,
+ OFDM_EC_SB_PRIOR_LO);
+ break;
+ case DRX_PRIORITY_HIGH:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI;
+ WR16(devAddr, OFDM_EC_SB_PRIOR__A,
+ OFDM_EC_SB_PRIOR_HI));
+ break;
+ case DRX_PRIORITY_UNKNOWN: /* fall through */
+ default:
+ status = -EINVAL;
+ goto error;
+ }
+#else
+ /* Set Priorty high */
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI;
+ status = write16(state, OFDM_EC_SB_PRIOR__A, OFDM_EC_SB_PRIOR_HI);
+ if (status < 0)
+ goto error;
+#endif
+
+ /* coderate */
+ switch (state->param.u.ofdm.code_rate_HP) {
+ case FEC_AUTO:
+ default:
+ operationMode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M;
+ /* fall through , try first guess DRX_CODERATE_2DIV3 */
+ case FEC_2_3:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3;
+ break;
+ case FEC_1_2:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_1_2;
+ break;
+ case FEC_3_4:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_3_4;
+ break;
+ case FEC_5_6:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_5_6;
+ break;
+ case FEC_7_8:
+ transmissionParams |= OFDM_SC_RA_RAM_OP_PARAM_RATE_7_8;
+ break;
+ }
+
+ /* SAW filter selection: normaly not necesarry, but if wanted
+ the application can select a SAW filter via the driver by using UIOs */
+ /* First determine real bandwidth (Hz) */
+ /* Also set delay for impulse noise cruncher */
+ /* Also set parameters for EC_OC fix, note EC_OC_REG_TMD_HIL_MAR is changed
+ by SC for fix for some 8K,1/8 guard but is restored by InitEC and ResetEC
+ functions */
+ switch (state->param.u.ofdm.bandwidth) {
+ case BANDWIDTH_AUTO:
+ case BANDWIDTH_8_MHZ:
+ bandwidth = DRXK_BANDWIDTH_8MHZ_IN_HZ;
+ status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 3052);
+ if (status < 0)
+ goto error;
+ /* cochannel protection for PAL 8 MHz */
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 7);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 7);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 7);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1);
+ if (status < 0)
+ goto error;
+ break;
+ case BANDWIDTH_7_MHZ:
+ bandwidth = DRXK_BANDWIDTH_7MHZ_IN_HZ;
+ status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 3491);
+ if (status < 0)
+ goto error;
+ /* cochannel protection for PAL 7 MHz */
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1);
+ if (status < 0)
+ goto error;
+ break;
+ case BANDWIDTH_6_MHZ:
+ bandwidth = DRXK_BANDWIDTH_6MHZ_IN_HZ;
+ status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A, 4073);
+ if (status < 0)
+ goto error;
+ /* cochannel protection for NTSC 6 MHz */
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A, 19);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A, 19);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A, 14);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A, 1);
+ if (status < 0)
+ goto error;
+ break;
+ default:
+ status = -EINVAL;
+ goto error;
+ }
+
+ if (iqmRcRateOfs == 0) {
+ /* Now compute IQM_RC_RATE_OFS
+ (((SysFreq/BandWidth)/2)/2) -1) * 2^23)
+ =>
+ ((SysFreq / BandWidth) * (2^21)) - (2^23)
+ */
+ /* (SysFreq / BandWidth) * (2^28) */
+ /* assert (MAX(sysClk)/MIN(bandwidth) < 16)
+ => assert(MAX(sysClk) < 16*MIN(bandwidth))
+ => assert(109714272 > 48000000) = true so Frac 28 can be used */
+ iqmRcRateOfs = Frac28a((u32)
+ ((state->m_sysClockFreq *
+ 1000) / 3), bandwidth);
+ /* (SysFreq / BandWidth) * (2^21), rounding before truncating */
+ if ((iqmRcRateOfs & 0x7fL) >= 0x40)
+ iqmRcRateOfs += 0x80L;
+ iqmRcRateOfs = iqmRcRateOfs >> 7;
+ /* ((SysFreq / BandWidth) * (2^21)) - (2^23) */
+ iqmRcRateOfs = iqmRcRateOfs - (1 << 23);
+ }
+
+ iqmRcRateOfs &=
+ ((((u32) IQM_RC_RATE_OFS_HI__M) <<
+ IQM_RC_RATE_OFS_LO__W) | IQM_RC_RATE_OFS_LO__M);
+ status = write32(state, IQM_RC_RATE_OFS_LO__A, iqmRcRateOfs);
+ if (status < 0)
+ goto error;
+
+ /* Bandwidth setting done */
+
+#if 0
+ status = DVBTSetFrequencyShift(demod, channel, tunerOffset);
+ if (status < 0)
+ goto error;
+#endif
+ status = SetFrequencyShifter(state, IntermediateFreqkHz, tunerFreqOffset, true);
+ if (status < 0)
+ goto error;
+
+ /*== Start SC, write channel settings to SC ===============================*/
+
+ /* Activate SCU to enable SCU commands */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+
+ /* Enable SC after setting all other parameters */
+ status = write16(state, OFDM_SC_COMM_STATE__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, OFDM_SC_COMM_EXEC__A, 1);
+ if (status < 0)
+ goto error;
+
+
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_OFDM | SCU_RAM_COMMAND_CMD_DEMOD_START, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ /* Write SC parameter registers, set all AUTO flags in operation mode */
+ param1 = (OFDM_SC_RA_RAM_OP_AUTO_MODE__M |
+ OFDM_SC_RA_RAM_OP_AUTO_GUARD__M |
+ OFDM_SC_RA_RAM_OP_AUTO_CONST__M |
+ OFDM_SC_RA_RAM_OP_AUTO_HIER__M |
+ OFDM_SC_RA_RAM_OP_AUTO_RATE__M);
+ status = DVBTScCommand(state, OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM,
+ 0, transmissionParams, param1, 0, 0, 0);
+ if (status < 0)
+ goto error;
+
+ if (!state->m_DRXK_A3_ROM_CODE)
+ status = DVBTCtrlSetSqiSpeed(state, &state->m_sqiSpeed);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+
+/*============================================================================*/
+
+/**
+* \brief Retreive lock status .
+* \param demod Pointer to demodulator instance.
+* \param lockStat Pointer to lock status structure.
+* \return DRXStatus_t.
+*
+*/
+static int GetDVBTLockStatus(struct drxk_state *state, u32 *pLockStatus)
+{
+ int status;
+ const u16 mpeg_lock_mask = (OFDM_SC_RA_RAM_LOCK_MPEG__M |
+ OFDM_SC_RA_RAM_LOCK_FEC__M);
+ const u16 fec_lock_mask = (OFDM_SC_RA_RAM_LOCK_FEC__M);
+ const u16 demod_lock_mask = OFDM_SC_RA_RAM_LOCK_DEMOD__M;
+
+ u16 ScRaRamLock = 0;
+ u16 ScCommExec = 0;
+
+ dprintk(1, "\n");
+
+ *pLockStatus = NOT_LOCKED;
+ /* driver 0.9.0 */
+ /* Check if SC is running */
+ status = read16(state, OFDM_SC_COMM_EXEC__A, &ScCommExec);
+ if (status < 0)
+ goto end;
+ if (ScCommExec == OFDM_SC_COMM_EXEC_STOP)
+ goto end;
+
+ status = read16(state, OFDM_SC_RA_RAM_LOCK__A, &ScRaRamLock);
+ if (status < 0)
+ goto end;
+
+ if ((ScRaRamLock & mpeg_lock_mask) == mpeg_lock_mask)
+ *pLockStatus = MPEG_LOCK;
+ else if ((ScRaRamLock & fec_lock_mask) == fec_lock_mask)
+ *pLockStatus = FEC_LOCK;
+ else if ((ScRaRamLock & demod_lock_mask) == demod_lock_mask)
+ *pLockStatus = DEMOD_LOCK;
+ else if (ScRaRamLock & OFDM_SC_RA_RAM_LOCK_NODVBT__M)
+ *pLockStatus = NEVER_LOCK;
+end:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int PowerUpQAM(struct drxk_state *state)
+{
+ enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM;
+ int status;
+
+ dprintk(1, "\n");
+ status = CtrlPowerMode(state, &powerMode);
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+
+/** Power Down QAM */
+static int PowerDownQAM(struct drxk_state *state)
+{
+ u16 data = 0;
+ u16 cmdResult;
+ int status = 0;
+
+ dprintk(1, "\n");
+ status = read16(state, SCU_COMM_EXEC__A, &data);
+ if (status < 0)
+ goto error;
+ if (data == SCU_COMM_EXEC_ACTIVE) {
+ /*
+ STOP demodulator
+ QAM and HW blocks
+ */
+ /* stop all comstate->m_exec */
+ status = write16(state, QAM_COMM_EXEC__A, QAM_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_STOP, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+ }
+ /* powerdown AFE */
+ status = SetIqmAf(state, false);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief Setup of the QAM Measurement intervals for signal quality
+* \param demod instance of demod.
+* \param constellation current constellation.
+* \return DRXStatus_t.
+*
+* NOTE:
+* Take into account that for certain settings the errorcounters can overflow.
+* The implementation does not check this.
+*
+*/
+static int SetQAMMeasurement(struct drxk_state *state,
+ enum EDrxkConstellation constellation,
+ u32 symbolRate)
+{
+ u32 fecBitsDesired = 0; /* BER accounting period */
+ u32 fecRsPeriodTotal = 0; /* Total period */
+ u16 fecRsPrescale = 0; /* ReedSolomon Measurement Prescale */
+ u16 fecRsPeriod = 0; /* Value for corresponding I2C register */
+ int status = 0;
+
+ dprintk(1, "\n");
+
+ fecRsPrescale = 1;
+ /* fecBitsDesired = symbolRate [kHz] *
+ FrameLenght [ms] *
+ (constellation + 1) *
+ SyncLoss (== 1) *
+ ViterbiLoss (==1)
+ */
+ switch (constellation) {
+ case DRX_CONSTELLATION_QAM16:
+ fecBitsDesired = 4 * symbolRate;
+ break;
+ case DRX_CONSTELLATION_QAM32:
+ fecBitsDesired = 5 * symbolRate;
+ break;
+ case DRX_CONSTELLATION_QAM64:
+ fecBitsDesired = 6 * symbolRate;
+ break;
+ case DRX_CONSTELLATION_QAM128:
+ fecBitsDesired = 7 * symbolRate;
+ break;
+ case DRX_CONSTELLATION_QAM256:
+ fecBitsDesired = 8 * symbolRate;
+ break;
+ default:
+ status = -EINVAL;
+ }
+ if (status < 0)
+ goto error;
+
+ fecBitsDesired /= 1000; /* symbolRate [Hz] -> symbolRate [kHz] */
+ fecBitsDesired *= 500; /* meas. period [ms] */
+
+ /* Annex A/C: bits/RsPeriod = 204 * 8 = 1632 */
+ /* fecRsPeriodTotal = fecBitsDesired / 1632 */
+ fecRsPeriodTotal = (fecBitsDesired / 1632UL) + 1; /* roughly ceil */
+
+ /* fecRsPeriodTotal = fecRsPrescale * fecRsPeriod */
+ fecRsPrescale = 1 + (u16) (fecRsPeriodTotal >> 16);
+ if (fecRsPrescale == 0) {
+ /* Divide by zero (though impossible) */
+ status = -EINVAL;
+ if (status < 0)
+ goto error;
+ }
+ fecRsPeriod =
+ ((u16) fecRsPeriodTotal +
+ (fecRsPrescale >> 1)) / fecRsPrescale;
+
+ /* write corresponding registers */
+ status = write16(state, FEC_RS_MEASUREMENT_PERIOD__A, fecRsPeriod);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_RS_MEASUREMENT_PRESCALE__A, fecRsPrescale);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_OC_SNC_FAIL_PERIOD__A, fecRsPeriod);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SetQAM16(struct drxk_state *state)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+ /* QAM Equalizer Setup */
+ /* Equalizer */
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 13517);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 13517);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 13517);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 13517);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 13517);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 13517);
+ if (status < 0)
+ goto error;
+ /* Decision Feedback Equalizer */
+ status = write16(state, QAM_DQ_QUAL_FUN0__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN1__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN2__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN3__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN4__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_SY_SYNC_HWM__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_AWM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_LWM__A, 3);
+ if (status < 0)
+ goto error;
+
+ /* QAM Slicer Settings */
+ status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM16);
+ if (status < 0)
+ goto error;
+
+ /* QAM Loop Controller Coeficients */
+ status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 20);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 20);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 32);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 10);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM State Machine (FSM) Thresholds */
+
+ status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 140);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 95);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 120);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 230);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 105);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 24);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM FSM Tracking Parameters */
+
+ status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 220);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 25);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -65);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -127);
+ if (status < 0)
+ goto error;
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief QAM32 specific setup
+* \param demod instance of demod.
+* \return DRXStatus_t.
+*/
+static int SetQAM32(struct drxk_state *state)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+
+ /* QAM Equalizer Setup */
+ /* Equalizer */
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 6707);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 6707);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 6707);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 6707);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 6707);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 6707);
+ if (status < 0)
+ goto error;
+
+ /* Decision Feedback Equalizer */
+ status = write16(state, QAM_DQ_QUAL_FUN0__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN1__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN2__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN3__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN4__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_SY_SYNC_HWM__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_AWM__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_LWM__A, 3);
+ if (status < 0)
+ goto error;
+
+ /* QAM Slicer Settings */
+
+ status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM32);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM Loop Controller Coeficients */
+
+ status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 20);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 20);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 0);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM State Machine (FSM) Thresholds */
+
+ status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 90);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 100);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 170);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 100);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 10);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM FSM Tracking Parameters */
+
+ status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 140);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) -8);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) -16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -26);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -56);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -86);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief QAM64 specific setup
+* \param demod instance of demod.
+* \return DRXStatus_t.
+*/
+static int SetQAM64(struct drxk_state *state)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+ /* QAM Equalizer Setup */
+ /* Equalizer */
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 13336);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 12618);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 11988);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 13809);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 13809);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 15609);
+ if (status < 0)
+ goto error;
+
+ /* Decision Feedback Equalizer */
+ status = write16(state, QAM_DQ_QUAL_FUN0__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN1__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN2__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN3__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN4__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_SY_SYNC_HWM__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_AWM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_LWM__A, 3);
+ if (status < 0)
+ goto error;
+
+ /* QAM Slicer Settings */
+ status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM64);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM Loop Controller Coeficients */
+
+ status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 30);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 100);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 30);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 25);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 48);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 10);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM State Machine (FSM) Thresholds */
+
+ status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 100);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 60);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 110);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 200);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 95);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 15);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM FSM Tracking Parameters */
+
+ status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 141);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 7);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -45);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -80);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief QAM128 specific setup
+* \param demod: instance of demod.
+* \return DRXStatus_t.
+*/
+static int SetQAM128(struct drxk_state *state)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+ /* QAM Equalizer Setup */
+ /* Equalizer */
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 6564);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 6598);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 6394);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 6409);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 6656);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 7238);
+ if (status < 0)
+ goto error;
+
+ /* Decision Feedback Equalizer */
+ status = write16(state, QAM_DQ_QUAL_FUN0__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN1__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN2__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN3__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN4__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_SY_SYNC_HWM__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_AWM__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_LWM__A, 3);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM Slicer Settings */
+
+ status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM128);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM Loop Controller Coeficients */
+
+ status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 120);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 60);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 25);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 64);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 0);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM State Machine (FSM) Thresholds */
+
+ status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 60);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 100);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 140);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 100);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 5);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 12);
+ if (status < 0)
+ goto error;
+
+ /* QAM FSM Tracking Parameters */
+
+ status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 65);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) -1);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) -12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -23);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief QAM256 specific setup
+* \param demod: instance of demod.
+* \return DRXStatus_t.
+*/
+static int SetQAM256(struct drxk_state *state)
+{
+ int status = 0;
+
+ dprintk(1, "\n");
+ /* QAM Equalizer Setup */
+ /* Equalizer */
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD0__A, 11502);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD1__A, 12084);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD2__A, 12543);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD3__A, 12931);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD4__A, 13629);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_EQ_CMA_RAD5__A, 15385);
+ if (status < 0)
+ goto error;
+
+ /* Decision Feedback Equalizer */
+ status = write16(state, QAM_DQ_QUAL_FUN0__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN1__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN2__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN3__A, 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN4__A, 6);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_DQ_QUAL_FUN5__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_SY_SYNC_HWM__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_AWM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_SYNC_LWM__A, 3);
+ if (status < 0)
+ goto error;
+
+ /* QAM Slicer Settings */
+
+ status = write16(state, SCU_RAM_QAM_SL_SIG_POWER__A, DRXK_QAM_SL_SIG_POWER_QAM256);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM Loop Controller Coeficients */
+
+ status = write16(state, SCU_RAM_QAM_LC_CA_FINE__A, 15);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CA_COARSE__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_MEDIUM__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EP_COARSE__A, 24);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_FINE__A, 12);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_MEDIUM__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_EI_COARSE__A, 16);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_LC_CP_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_MEDIUM__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CP_COARSE__A, 250);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_MEDIUM__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CI_COARSE__A, 125);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_FINE__A, 16);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_MEDIUM__A, 25);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF_COARSE__A, 48);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_FINE__A, 5);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_MEDIUM__A, 10);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_LC_CF1_COARSE__A, 10);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM State Machine (FSM) Thresholds */
+
+ status = write16(state, SCU_RAM_QAM_FSM_RTH__A, 50);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FTH__A, 60);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_CTH__A, 80);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_PTH__A, 100);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_QTH__A, 150);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_MTH__A, 110);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SCU_RAM_QAM_FSM_RATE_LIM__A, 40);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_COUNT_LIM__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_FREQ_LIM__A, 12);
+ if (status < 0)
+ goto error;
+
+
+ /* QAM FSM Tracking Parameters */
+
+ status = write16(state, SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A, (u16) 8);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A, (u16) 74);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A, (u16) 18);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A, (u16) 13);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A, (u16) 7);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A, (u16) 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A, (u16) -8);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+
+/*============================================================================*/
+/**
+* \brief Reset QAM block.
+* \param demod: instance of demod.
+* \param channel: pointer to channel data.
+* \return DRXStatus_t.
+*/
+static int QAMResetQAM(struct drxk_state *state)
+{
+ int status;
+ u16 cmdResult;
+
+ dprintk(1, "\n");
+ /* Stop QAM comstate->m_exec */
+ status = write16(state, QAM_COMM_EXEC__A, QAM_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_RESET, 0, NULL, 1, &cmdResult);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief Set QAM symbolrate.
+* \param demod: instance of demod.
+* \param channel: pointer to channel data.
+* \return DRXStatus_t.
+*/
+static int QAMSetSymbolrate(struct drxk_state *state)
+{
+ u32 adcFrequency = 0;
+ u32 symbFreq = 0;
+ u32 iqmRcRate = 0;
+ u16 ratesel = 0;
+ u32 lcSymbRate = 0;
+ int status;
+
+ dprintk(1, "\n");
+ /* Select & calculate correct IQM rate */
+ adcFrequency = (state->m_sysClockFreq * 1000) / 3;
+ ratesel = 0;
+ /* printk(KERN_DEBUG "drxk: SR %d\n", state->param.u.qam.symbol_rate); */
+ if (state->param.u.qam.symbol_rate <= 1188750)
+ ratesel = 3;
+ else if (state->param.u.qam.symbol_rate <= 2377500)
+ ratesel = 2;
+ else if (state->param.u.qam.symbol_rate <= 4755000)
+ ratesel = 1;
+ status = write16(state, IQM_FD_RATESEL__A, ratesel);
+ if (status < 0)
+ goto error;
+
+ /*
+ IqmRcRate = ((Fadc / (symbolrate * (4<<ratesel))) - 1) * (1<<23)
+ */
+ symbFreq = state->param.u.qam.symbol_rate * (1 << ratesel);
+ if (symbFreq == 0) {
+ /* Divide by zero */
+ status = -EINVAL;
+ goto error;
+ }
+ iqmRcRate = (adcFrequency / symbFreq) * (1 << 21) +
+ (Frac28a((adcFrequency % symbFreq), symbFreq) >> 7) -
+ (1 << 23);
+ status = write32(state, IQM_RC_RATE_OFS_LO__A, iqmRcRate);
+ if (status < 0)
+ goto error;
+ state->m_iqmRcRate = iqmRcRate;
+ /*
+ LcSymbFreq = round (.125 * symbolrate / adcFreq * (1<<15))
+ */
+ symbFreq = state->param.u.qam.symbol_rate;
+ if (adcFrequency == 0) {
+ /* Divide by zero */
+ status = -EINVAL;
+ goto error;
+ }
+ lcSymbRate = (symbFreq / adcFrequency) * (1 << 12) +
+ (Frac28a((symbFreq % adcFrequency), adcFrequency) >>
+ 16);
+ if (lcSymbRate > 511)
+ lcSymbRate = 511;
+ status = write16(state, QAM_LC_SYMBOL_FREQ__A, (u16) lcSymbRate);
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+/*============================================================================*/
+
+/**
+* \brief Get QAM lock status.
+* \param demod: instance of demod.
+* \param channel: pointer to channel data.
+* \return DRXStatus_t.
+*/
+
+static int GetQAMLockStatus(struct drxk_state *state, u32 *pLockStatus)
+{
+ int status;
+ u16 Result[2] = { 0, 0 };
+
+ dprintk(1, "\n");
+ *pLockStatus = NOT_LOCKED;
+ status = scu_command(state,
+ SCU_RAM_COMMAND_STANDARD_QAM |
+ SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK, 0, NULL, 2,
+ Result);
+ if (status < 0)
+ printk(KERN_ERR "drxk: %s status = %08x\n", __func__, status);
+
+ if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_DEMOD_LOCKED) {
+ /* 0x0000 NOT LOCKED */
+ } else if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_LOCKED) {
+ /* 0x4000 DEMOD LOCKED */
+ *pLockStatus = DEMOD_LOCK;
+ } else if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_NEVER_LOCK) {
+ /* 0x8000 DEMOD + FEC LOCKED (system lock) */
+ *pLockStatus = MPEG_LOCK;
+ } else {
+ /* 0xC000 NEVER LOCKED */
+ /* (system will never be able to lock to the signal) */
+ /* TODO: check this, intermediate & standard specific lock states are not
+ taken into account here */
+ *pLockStatus = NEVER_LOCK;
+ }
+ return status;
+}
+
+#define QAM_MIRROR__M 0x03
+#define QAM_MIRROR_NORMAL 0x00
+#define QAM_MIRRORED 0x01
+#define QAM_MIRROR_AUTO_ON 0x02
+#define QAM_LOCKRANGE__M 0x10
+#define QAM_LOCKRANGE_NORMAL 0x10
+
+static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
+ s32 tunerFreqOffset)
+{
+ int status;
+ u16 setParamParameters[4] = { 0, 0, 0, 0 };
+ u16 cmdResult;
+
+ dprintk(1, "\n");
+ /*
+ * STEP 1: reset demodulator
+ * resets FEC DI and FEC RS
+ * resets QAM block
+ * resets SCU variables
+ */
+ status = write16(state, FEC_DI_COMM_EXEC__A, FEC_DI_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_RS_COMM_EXEC__A, FEC_RS_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = QAMResetQAM(state);
+ if (status < 0)
+ goto error;
+
+ /*
+ * STEP 2: configure demodulator
+ * -set params; resets IQM,QAM,FEC HW; initializes some
+ * SCU variables
+ */
+ status = QAMSetSymbolrate(state);
+ if (status < 0)
+ goto error;
+
+ /* Set params */
+ switch (state->param.u.qam.modulation) {
+ case QAM_256:
+ state->m_Constellation = DRX_CONSTELLATION_QAM256;
+ break;
+ case QAM_AUTO:
+ case QAM_64:
+ state->m_Constellation = DRX_CONSTELLATION_QAM64;
+ break;
+ case QAM_16:
+ state->m_Constellation = DRX_CONSTELLATION_QAM16;
+ break;
+ case QAM_32:
+ state->m_Constellation = DRX_CONSTELLATION_QAM32;
+ break;
+ case QAM_128:
+ state->m_Constellation = DRX_CONSTELLATION_QAM128;
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
+ if (status < 0)
+ goto error;
+ setParamParameters[0] = state->m_Constellation; /* constellation */
+ setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */
+ if (state->m_OperationMode == OM_QAM_ITU_C)
+ setParamParameters[2] = QAM_TOP_ANNEX_C;
+ else
+ setParamParameters[2] = QAM_TOP_ANNEX_A;
+ setParamParameters[3] |= (QAM_MIRROR_AUTO_ON);
+ /* Env parameters */
+ /* check for LOCKRANGE Extented */
+ /* setParamParameters[3] |= QAM_LOCKRANGE_NORMAL; */
+
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, 4, setParamParameters, 1, &cmdResult);
+ if (status < 0) {
+ /* Fall-back to the simpler call */
+ if (state->m_OperationMode == OM_QAM_ITU_C)
+ setParamParameters[0] = QAM_TOP_ANNEX_C;
+ else
+ setParamParameters[0] = QAM_TOP_ANNEX_A;
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV, 1, setParamParameters, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ setParamParameters[0] = state->m_Constellation; /* constellation */
+ setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, 2, setParamParameters, 1, &cmdResult);
+ }
+ if (status < 0)
+ goto error;
+
+ /*
+ * STEP 3: enable the system in a mode where the ADC provides valid
+ * signal setup constellation independent registers
+ */
+#if 0
+ status = SetFrequency(channel, tunerFreqOffset));
+ if (status < 0)
+ goto error;
+#endif
+ status = SetFrequencyShifter(state, IntermediateFreqkHz, tunerFreqOffset, true);
+ if (status < 0)
+ goto error;
+
+ /* Setup BER measurement */
+ status = SetQAMMeasurement(state, state->m_Constellation, state->param.u. qam.symbol_rate);
+ if (status < 0)
+ goto error;
+
+ /* Reset default values */
+ status = write16(state, IQM_CF_SCALE_SH__A, IQM_CF_SCALE_SH__PRE);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_SY_TIMEOUT__A, QAM_SY_TIMEOUT__PRE);
+ if (status < 0)
+ goto error;
+
+ /* Reset default LC values */
+ status = write16(state, QAM_LC_RATE_LIMIT__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_LPF_FACTORP__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_LPF_FACTORI__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_MODE__A, 7);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, QAM_LC_QUAL_TAB0__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB1__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB2__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB3__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB4__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB5__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB6__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB8__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB9__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB10__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB12__A, 2);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB15__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB16__A, 3);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB20__A, 4);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_LC_QUAL_TAB25__A, 4);
+ if (status < 0)
+ goto error;
+
+ /* Mirroring, QAM-block starting point not inverted */
+ status = write16(state, QAM_SY_SP_INV__A, QAM_SY_SP_INV_SPECTRUM_INV_DIS);
+ if (status < 0)
+ goto error;
+
+ /* Halt SCU to enable safe non-atomic accesses */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
+ if (status < 0)
+ goto error;
+
+ /* STEP 4: constellation specific setup */
+ switch (state->param.u.qam.modulation) {
+ case QAM_16:
+ status = SetQAM16(state);
+ break;
+ case QAM_32:
+ status = SetQAM32(state);
+ break;
+ case QAM_AUTO:
+ case QAM_64:
+ status = SetQAM64(state);
+ break;
+ case QAM_128:
+ status = SetQAM128(state);
+ break;
+ case QAM_256:
+ status = SetQAM256(state);
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
+ if (status < 0)
+ goto error;
+
+ /* Activate SCU to enable SCU commands */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+
+ /* Re-configure MPEG output, requires knowledge of channel bitrate */
+ /* extAttr->currentChannel.constellation = channel->constellation; */
+ /* extAttr->currentChannel.symbolrate = channel->symbolrate; */
+ status = MPEGTSDtoSetup(state, state->m_OperationMode);
+ if (status < 0)
+ goto error;
+
+ /* Start processes */
+ status = MPEGTSStart(state);
+ if (status < 0)
+ goto error;
+ status = write16(state, FEC_COMM_EXEC__A, FEC_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+ status = write16(state, QAM_COMM_EXEC__A, QAM_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_ACTIVE);
+ if (status < 0)
+ goto error;
+
+ /* STEP 5: start QAM demodulator (starts FEC, QAM and IQM HW) */
+ status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_START, 0, NULL, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ /* update global DRXK data container */
+/*? extAttr->qamInterleaveMode = DRXK_QAM_I12_J17; */
+
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SetQAMStandard(struct drxk_state *state,
+ enum OperationMode oMode)
+{
+ int status;
+#ifdef DRXK_QAM_TAPS
+#define DRXK_QAMA_TAPS_SELECT
+#include "drxk_filters.h"
+#undef DRXK_QAMA_TAPS_SELECT
+#endif
+
+ dprintk(1, "\n");
+
+ /* added antenna switch */
+ SwitchAntennaToQAM(state);
+
+ /* Ensure correct power-up mode */
+ status = PowerUpQAM(state);
+ if (status < 0)
+ goto error;
+ /* Reset QAM block */
+ status = QAMResetQAM(state);
+ if (status < 0)
+ goto error;
+
+ /* Setup IQM */
+
+ status = write16(state, IQM_COMM_EXEC__A, IQM_COMM_EXEC_B_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_AMUX__A, IQM_AF_AMUX_SIGNAL2ADC);
+ if (status < 0)
+ goto error;
+
+ /* Upload IQM Channel Filter settings by
+ boot loader from ROM table */
+ switch (oMode) {
+ case OM_QAM_ITU_A:
+ status = BLChainCmd(state, DRXK_BL_ROM_OFFSET_TAPS_ITU_A, DRXK_BLCC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
+ break;
+ case OM_QAM_ITU_C:
+ status = BLDirectCmd(state, IQM_CF_TAP_RE0__A, DRXK_BL_ROM_OFFSET_TAPS_ITU_C, DRXK_BLDC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
+ if (status < 0)
+ goto error;
+ status = BLDirectCmd(state, IQM_CF_TAP_IM0__A, DRXK_BL_ROM_OFFSET_TAPS_ITU_C, DRXK_BLDC_NR_ELEMENTS_TAPS, DRXK_BLC_TIMEOUT);
+ break;
+ default:
+ status = -EINVAL;
+ }
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_CF_OUT_ENA__A, (1 << IQM_CF_OUT_ENA_QAM__B));
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_SYMMETRIC__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_MIDTAP__A, ((1 << IQM_CF_MIDTAP_RE__B) | (1 << IQM_CF_MIDTAP_IM__B)));
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_RC_STRETCH__A, 21);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_CLP_LEN__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_CLP_TH__A, 448);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_SNS_LEN__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_POW_MEAS_LEN__A, 0);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, IQM_FS_ADJ_SEL__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_RC_ADJ_SEL__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_ADJ_SEL__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_UPD_SEL__A, 0);
+ if (status < 0)
+ goto error;
+
+ /* IQM Impulse Noise Processing Unit */
+ status = write16(state, IQM_CF_CLP_VAL__A, 500);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_DATATH__A, 1000);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_BYPASSDET__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_DET_LCT__A, 0);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_WND_LEN__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_CF_PKDTH__A, 1);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_INC_BYPASS__A, 1);
+ if (status < 0)
+ goto error;
+
+ /* turn on IQMAF. Must be done before setAgc**() */
+ status = SetIqmAf(state, true);
+ if (status < 0)
+ goto error;
+ status = write16(state, IQM_AF_START_LOCK__A, 0x01);
+ if (status < 0)
+ goto error;
+
+ /* IQM will not be reset from here, sync ADC and update/init AGC */
+ status = ADCSynchronization(state);
+ if (status < 0)
+ goto error;
+
+ /* Set the FSM step period */
+ status = write16(state, SCU_RAM_QAM_FSM_STEP_PERIOD__A, 2000);
+ if (status < 0)
+ goto error;
+
+ /* Halt SCU to enable safe non-atomic accesses */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_HOLD);
+ if (status < 0)
+ goto error;
+
+ /* No more resets of the IQM, current standard correctly set =>
+ now AGCs can be configured. */
+
+ status = InitAGC(state, true);
+ if (status < 0)
+ goto error;
+ status = SetPreSaw(state, &(state->m_qamPreSawCfg));
+ if (status < 0)
+ goto error;
+
+ /* Configure AGC's */
+ status = SetAgcRf(state, &(state->m_qamRfAgcCfg), true);
+ if (status < 0)
+ goto error;
+ status = SetAgcIf(state, &(state->m_qamIfAgcCfg), true);
+ if (status < 0)
+ goto error;
+
+ /* Activate SCU to enable SCU commands */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int WriteGPIO(struct drxk_state *state)
+{
+ int status;
+ u16 value = 0;
+
+ dprintk(1, "\n");
+ /* stop lock indicator process */
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+
+ /* Write magic word to enable pdr reg write */
+ status = write16(state, SIO_TOP_COMM_KEY__A, SIO_TOP_COMM_KEY_KEY);
+ if (status < 0)
+ goto error;
+
+ if (state->m_hasSAWSW) {
+ if (state->UIO_mask & 0x0001) { /* UIO-1 */
+ /* write to io pad configuration register - output mode */
+ status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
+ if (status < 0)
+ goto error;
+
+ /* use corresponding bit in io data output registar */
+ status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value);
+ if (status < 0)
+ goto error;
+ if ((state->m_GPIO & 0x0001) == 0)
+ value &= 0x7FFF; /* write zero to 15th bit - 1st UIO */
+ else
+ value |= 0x8000; /* write one to 15th bit - 1st UIO */
+ /* write back to io data output register */
+ status = write16(state, SIO_PDR_UIO_OUT_LO__A, value);
+ if (status < 0)
+ goto error;
+ }
+ if (state->UIO_mask & 0x0002) { /* UIO-2 */
+ /* write to io pad configuration register - output mode */
+ status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
+ if (status < 0)
+ goto error;
+
+ /* use corresponding bit in io data output registar */
+ status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value);
+ if (status < 0)
+ goto error;
+ if ((state->m_GPIO & 0x0002) == 0)
+ value &= 0xBFFF; /* write zero to 14th bit - 2st UIO */
+ else
+ value |= 0x4000; /* write one to 14th bit - 2st UIO */
+ /* write back to io data output register */
+ status = write16(state, SIO_PDR_UIO_OUT_LO__A, value);
+ if (status < 0)
+ goto error;
+ }
+ if (state->UIO_mask & 0x0004) { /* UIO-3 */
+ /* write to io pad configuration register - output mode */
+ status = write16(state, SIO_PDR_SMA_TX_CFG__A, state->m_GPIOCfg);
+ if (status < 0)
+ goto error;
+
+ /* use corresponding bit in io data output registar */
+ status = read16(state, SIO_PDR_UIO_OUT_LO__A, &value);
+ if (status < 0)
+ goto error;
+ if ((state->m_GPIO & 0x0004) == 0)
+ value &= 0xFFFB; /* write zero to 2nd bit - 3rd UIO */
+ else
+ value |= 0x0004; /* write one to 2nd bit - 3rd UIO */
+ /* write back to io data output register */
+ status = write16(state, SIO_PDR_UIO_OUT_LO__A, value);
+ if (status < 0)
+ goto error;
+ }
+ }
+ /* Write magic word to disable pdr reg write */
+ status = write16(state, SIO_TOP_COMM_KEY__A, 0x0000);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SwitchAntennaToQAM(struct drxk_state *state)
+{
+ int status = 0;
+ bool gpio_state;
+
+ dprintk(1, "\n");
+
+ if (!state->antenna_gpio)
+ return 0;
+
+ gpio_state = state->m_GPIO & state->antenna_gpio;
+
+ if (state->antenna_dvbt ^ gpio_state) {
+ /* Antenna is on DVB-T mode. Switch */
+ if (state->antenna_dvbt)
+ state->m_GPIO &= ~state->antenna_gpio;
+ else
+ state->m_GPIO |= state->antenna_gpio;
+ status = WriteGPIO(state);
+ }
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+static int SwitchAntennaToDVBT(struct drxk_state *state)
+{
+ int status = 0;
+ bool gpio_state;
+
+ dprintk(1, "\n");
+
+ if (!state->antenna_gpio)
+ return 0;
+
+ gpio_state = state->m_GPIO & state->antenna_gpio;
+
+ if (!(state->antenna_dvbt ^ gpio_state)) {
+ /* Antenna is on DVB-C mode. Switch */
+ if (state->antenna_dvbt)
+ state->m_GPIO |= state->antenna_gpio;
+ else
+ state->m_GPIO &= ~state->antenna_gpio;
+ status = WriteGPIO(state);
+ }
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+}
+
+
+static int PowerDownDevice(struct drxk_state *state)
+{
+ /* Power down to requested mode */
+ /* Backup some register settings */
+ /* Set pins with possible pull-ups connected to them in input mode */
+ /* Analog power down */
+ /* ADC power down */
+ /* Power down device */
+ int status;
+
+ dprintk(1, "\n");
+ if (state->m_bPDownOpenBridge) {
+ /* Open I2C bridge before power down of DRXK */
+ status = ConfigureI2CBridge(state, true);
+ if (status < 0)
+ goto error;
+ }
+ /* driver 0.9.0 */
+ status = DVBTEnableOFDMTokenRing(state, false);
+ if (status < 0)
+ goto error;
+
+ status = write16(state, SIO_CC_PWD_MODE__A, SIO_CC_PWD_MODE_LEVEL_CLOCK);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
+ if (status < 0)
+ goto error;
+ state->m_HICfgCtrl |= SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ;
+ status = HI_CfgCommand(state);
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static int load_microcode(struct drxk_state *state, const char *mc_name)
+{
+ const struct firmware *fw = NULL;
+ int err = 0;
+
+ dprintk(1, "\n");
+
+ err = request_firmware(&fw, mc_name, state->i2c->dev.parent);
+ if (err < 0) {
+ printk(KERN_ERR
+ "drxk: Could not load firmware file %s.\n", mc_name);
+ printk(KERN_INFO
+ "drxk: Copy %s to your hotplug directory!\n", mc_name);
+ return err;
+ }
+ err = DownloadMicrocode(state, fw->data, fw->size);
+ release_firmware(fw);
+ return err;
+}
+
+static int init_drxk(struct drxk_state *state)
+{
+ int status = 0;
+ enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM;
+ u16 driverVersion;
+
+ dprintk(1, "\n");
+ if ((state->m_DrxkState == DRXK_UNINITIALIZED)) {
+ status = PowerUpDevice(state);
+ if (status < 0)
+ goto error;
+ status = DRXX_Open(state);
+ if (status < 0)
+ goto error;
+ /* Soft reset of OFDM-, sys- and osc-clockdomain */
+ status = write16(state, SIO_CC_SOFT_RST__A, SIO_CC_SOFT_RST_OFDM__M | SIO_CC_SOFT_RST_SYS__M | SIO_CC_SOFT_RST_OSC__M);
+ if (status < 0)
+ goto error;
+ status = write16(state, SIO_CC_UPDATE__A, SIO_CC_UPDATE_KEY);
+ if (status < 0)
+ goto error;
+ /* TODO is this needed, if yes how much delay in worst case scenario */
+ msleep(1);
+ state->m_DRXK_A3_PATCH_CODE = true;
+ status = GetDeviceCapabilities(state);
+ if (status < 0)
+ goto error;
+
+ /* Bridge delay, uses oscilator clock */
+ /* Delay = (delay (nano seconds) * oscclk (kHz))/ 1000 */
+ /* SDA brdige delay */
+ state->m_HICfgBridgeDelay =
+ (u16) ((state->m_oscClockFreq / 1000) *
+ HI_I2C_BRIDGE_DELAY) / 1000;
+ /* Clipping */
+ if (state->m_HICfgBridgeDelay >
+ SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M) {
+ state->m_HICfgBridgeDelay =
+ SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M;
+ }
+ /* SCL bridge delay, same as SDA for now */
+ state->m_HICfgBridgeDelay +=
+ state->m_HICfgBridgeDelay <<
+ SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__B;
+
+ status = InitHI(state);
+ if (status < 0)
+ goto error;
+ /* disable various processes */
+#if NOA1ROM
+ if (!(state->m_DRXK_A1_ROM_CODE)
+ && !(state->m_DRXK_A2_ROM_CODE))
+#endif
+ {
+ status = write16(state, SCU_RAM_GPIO__A, SCU_RAM_GPIO_HW_LOCK_IND_DISABLE);
+ if (status < 0)
+ goto error;
+ }
+
+ /* disable MPEG port */
+ status = MPEGTSDisable(state);
+ if (status < 0)
+ goto error;
+
+ /* Stop AUD and SCU */
+ status = write16(state, AUD_COMM_EXEC__A, AUD_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+
+ /* enable token-ring bus through OFDM block for possible ucode upload */
+ status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, SIO_OFDM_SH_OFDM_RING_ENABLE_ON);
+ if (status < 0)
+ goto error;
+
+ /* include boot loader section */
+ status = write16(state, SIO_BL_COMM_EXEC__A, SIO_BL_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+ status = BLChainCmd(state, 0, 6, 100);
+ if (status < 0)
+ goto error;
+
+ if (!state->microcode_name)
+ load_microcode(state, "drxk_a3.mc");
+ else
+ load_microcode(state, state->microcode_name);
+
+ /* disable token-ring bus through OFDM block for possible ucode upload */
+ status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, SIO_OFDM_SH_OFDM_RING_ENABLE_OFF);
+ if (status < 0)
+ goto error;
+
+ /* Run SCU for a little while to initialize microcode version numbers */
+ status = write16(state, SCU_COMM_EXEC__A, SCU_COMM_EXEC_ACTIVE);
+ if (status < 0)
+ goto error;
+ status = DRXX_Open(state);
+ if (status < 0)
+ goto error;
+ /* added for test */
+ msleep(30);
+
+ powerMode = DRXK_POWER_DOWN_OFDM;
+ status = CtrlPowerMode(state, &powerMode);
+ if (status < 0)
+ goto error;
+
+ /* Stamp driver version number in SCU data RAM in BCD code
+ Done to enable field application engineers to retreive drxdriver version
+ via I2C from SCU RAM.
+ Not using SCU command interface for SCU register access since no
+ microcode may be present.
+ */
+ driverVersion =
+ (((DRXK_VERSION_MAJOR / 100) % 10) << 12) +
+ (((DRXK_VERSION_MAJOR / 10) % 10) << 8) +
+ ((DRXK_VERSION_MAJOR % 10) << 4) +
+ (DRXK_VERSION_MINOR % 10);
+ status = write16(state, SCU_RAM_DRIVER_VER_HI__A, driverVersion);
+ if (status < 0)
+ goto error;
+ driverVersion =
+ (((DRXK_VERSION_PATCH / 1000) % 10) << 12) +
+ (((DRXK_VERSION_PATCH / 100) % 10) << 8) +
+ (((DRXK_VERSION_PATCH / 10) % 10) << 4) +
+ (DRXK_VERSION_PATCH % 10);
+ status = write16(state, SCU_RAM_DRIVER_VER_LO__A, driverVersion);
+ if (status < 0)
+ goto error;
+
+ printk(KERN_INFO "DRXK driver version %d.%d.%d\n",
+ DRXK_VERSION_MAJOR, DRXK_VERSION_MINOR,
+ DRXK_VERSION_PATCH);
+
+ /* Dirty fix of default values for ROM/PATCH microcode
+ Dirty because this fix makes it impossible to setup suitable values
+ before calling DRX_Open. This solution requires changes to RF AGC speed
+ to be done via the CTRL function after calling DRX_Open */
+
+ /* m_dvbtRfAgcCfg.speed = 3; */
+
+ /* Reset driver debug flags to 0 */
+ status = write16(state, SCU_RAM_DRIVER_DEBUG__A, 0);
+ if (status < 0)
+ goto error;
+ /* driver 0.9.0 */
+ /* Setup FEC OC:
+ NOTE: No more full FEC resets allowed afterwards!! */
+ status = write16(state, FEC_COMM_EXEC__A, FEC_COMM_EXEC_STOP);
+ if (status < 0)
+ goto error;
+ /* MPEGTS functions are still the same */
+ status = MPEGTSDtoInit(state);
+ if (status < 0)
+ goto error;
+ status = MPEGTSStop(state);
+ if (status < 0)
+ goto error;
+ status = MPEGTSConfigurePolarity(state);
+ if (status < 0)
+ goto error;
+ status = MPEGTSConfigurePins(state, state->m_enableMPEGOutput);
+ if (status < 0)
+ goto error;
+ /* added: configure GPIO */
+ status = WriteGPIO(state);
+ if (status < 0)
+ goto error;
+
+ state->m_DrxkState = DRXK_STOPPED;
+
+ if (state->m_bPowerDown) {
+ status = PowerDownDevice(state);
+ if (status < 0)
+ goto error;
+ state->m_DrxkState = DRXK_POWERED_DOWN;
+ } else
+ state->m_DrxkState = DRXK_STOPPED;
+ }
+error:
+ if (status < 0)
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+
+ return status;
+}
+
+static void drxk_c_release(struct dvb_frontend *fe)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "\n");
+ kfree(state);
+}
+
+static int drxk_c_init(struct dvb_frontend *fe)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "\n");
+ if (mutex_trylock(&state->ctlock) == 0)
+ return -EBUSY;
+ SetOperationMode(state, OM_QAM_ITU_A);
+ return 0;
+}
+
+static int drxk_c_sleep(struct dvb_frontend *fe)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "\n");
+ ShutDown(state);
+ mutex_unlock(&state->ctlock);
+ return 0;
+}
+
+static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "%s\n", enable ? "enable" : "disable");
+ return ConfigureI2CBridge(state, enable ? true : false);
+}
+
+static int drxk_set_parameters(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+ u32 IF;
+
+ dprintk(1, "\n");
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe, p);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ state->param = *p;
+ fe->ops.tuner_ops.get_frequency(fe, &IF);
+ Start(state, 0, IF);
+
+ /* printk(KERN_DEBUG "drxk: %s IF=%d done\n", __func__, IF); */
+
+ return 0;
+}
+
+static int drxk_c_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ dprintk(1, "\n");
+ return 0;
+}
+
+static int drxk_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+ u32 stat;
+
+ dprintk(1, "\n");
+ *status = 0;
+ GetLockStatus(state, &stat, 0);
+ if (stat == MPEG_LOCK)
+ *status |= 0x1f;
+ if (stat == FEC_LOCK)
+ *status |= 0x0f;
+ if (stat == DEMOD_LOCK)
+ *status |= 0x07;
+ return 0;
+}
+
+static int drxk_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ dprintk(1, "\n");
+
+ *ber = 0;
+ return 0;
+}
+
+static int drxk_read_signal_strength(struct dvb_frontend *fe,
+ u16 *strength)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+ u32 val = 0;
+
+ dprintk(1, "\n");
+ ReadIFAgc(state, &val);
+ *strength = val & 0xffff;
+ return 0;
+}
+
+static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+ s32 snr2;
+
+ dprintk(1, "\n");
+ GetSignalToNoise(state, &snr2);
+ *snr = snr2 & 0xffff;
+ return 0;
+}
+
+static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+ u16 err;
+
+ dprintk(1, "\n");
+ DVBTQAMGetAccPktErr(state, &err);
+ *ucblocks = (u32) err;
+ return 0;
+}
+
+static int drxk_c_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings
+ *sets)
+{
+ dprintk(1, "\n");
+ sets->min_delay_ms = 3000;
+ sets->max_drift = 0;
+ sets->step_size = 0;
+ return 0;
+}
+
+static void drxk_t_release(struct dvb_frontend *fe)
+{
+ /*
+ * There's nothing to release here, as the state struct
+ * is already freed by drxk_c_release.
+ */
+}
+
+static int drxk_t_init(struct dvb_frontend *fe)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "\n");
+ if (mutex_trylock(&state->ctlock) == 0)
+ return -EBUSY;
+ SetOperationMode(state, OM_DVBT);
+ return 0;
+}
+
+static int drxk_t_sleep(struct dvb_frontend *fe)
+{
+ struct drxk_state *state = fe->demodulator_priv;
+
+ dprintk(1, "\n");
+ mutex_unlock(&state->ctlock);
+ return 0;
+}
+
+static int drxk_t_get_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ dprintk(1, "\n");
+
+ return 0;
+}
+
+static struct dvb_frontend_ops drxk_c_ops = {
+ .info = {
+ .name = "DRXK DVB-C",
+ .type = FE_QAM,
+ .frequency_stepsize = 62500,
+ .frequency_min = 47000000,
+ .frequency_max = 862000000,
+ .symbol_rate_min = 870000,
+ .symbol_rate_max = 11700000,
+ .caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO},
+ .release = drxk_c_release,
+ .init = drxk_c_init,
+ .sleep = drxk_c_sleep,
+ .i2c_gate_ctrl = drxk_gate_ctrl,
+
+ .set_frontend = drxk_set_parameters,
+ .get_frontend = drxk_c_get_frontend,
+ .get_tune_settings = drxk_c_get_tune_settings,
+
+ .read_status = drxk_read_status,
+ .read_ber = drxk_read_ber,
+ .read_signal_strength = drxk_read_signal_strength,
+ .read_snr = drxk_read_snr,
+ .read_ucblocks = drxk_read_ucblocks,
+};
+
+static struct dvb_frontend_ops drxk_t_ops = {
+ .info = {
+ .name = "DRXK DVB-T",
+ .type = FE_OFDM,
+ .frequency_min = 47125000,
+ .frequency_max = 865000000,
+ .frequency_stepsize = 166667,
+ .frequency_tolerance = 0,
+ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_16 | FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS},
+ .release = drxk_t_release,
+ .init = drxk_t_init,
+ .sleep = drxk_t_sleep,
+ .i2c_gate_ctrl = drxk_gate_ctrl,
+
+ .set_frontend = drxk_set_parameters,
+ .get_frontend = drxk_t_get_frontend,
+
+ .read_status = drxk_read_status,
+ .read_ber = drxk_read_ber,
+ .read_signal_strength = drxk_read_signal_strength,
+ .read_snr = drxk_read_snr,
+ .read_ucblocks = drxk_read_ucblocks,
+};
+
+struct dvb_frontend *drxk_attach(const struct drxk_config *config,
+ struct i2c_adapter *i2c,
+ struct dvb_frontend **fe_t)
+{
+ struct drxk_state *state = NULL;
+ u8 adr = config->adr;
+
+ dprintk(1, "\n");
+ state = kzalloc(sizeof(struct drxk_state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ state->i2c = i2c;
+ state->demod_address = adr;
+ state->single_master = config->single_master;
+ state->microcode_name = config->microcode_name;
+ state->no_i2c_bridge = config->no_i2c_bridge;
+ state->antenna_gpio = config->antenna_gpio;
+ state->antenna_dvbt = config->antenna_dvbt;
+
+ /* NOTE: as more UIO bits will be used, add them to the mask */
+ state->UIO_mask = config->antenna_gpio;
+
+ /* Default gpio to DVB-C */
+ if (!state->antenna_dvbt && state->antenna_gpio)
+ state->m_GPIO |= state->antenna_gpio;
+ else
+ state->m_GPIO &= ~state->antenna_gpio;
+
+ mutex_init(&state->mutex);
+ mutex_init(&state->ctlock);
+
+ memcpy(&state->c_frontend.ops, &drxk_c_ops,
+ sizeof(struct dvb_frontend_ops));
+ memcpy(&state->t_frontend.ops, &drxk_t_ops,
+ sizeof(struct dvb_frontend_ops));
+ state->c_frontend.demodulator_priv = state;
+ state->t_frontend.demodulator_priv = state;
+
+ init_state(state);
+ if (init_drxk(state) < 0)
+ goto error;
+ *fe_t = &state->t_frontend;
+
+ return &state->c_frontend;
+
+error:
+ printk(KERN_ERR "drxk: not found\n");
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(drxk_attach);
+
+MODULE_DESCRIPTION("DRX-K driver");
+MODULE_AUTHOR("Ralph Metzler");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/drxk_hard.h b/drivers/media/dvb/frontends/drxk_hard.h
new file mode 100644
index 00000000000..a05c32eecdc
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk_hard.h
@@ -0,0 +1,348 @@
+#include "drxk_map.h"
+
+#define DRXK_VERSION_MAJOR 0
+#define DRXK_VERSION_MINOR 9
+#define DRXK_VERSION_PATCH 4300
+
+#define HI_I2C_DELAY 42
+#define HI_I2C_BRIDGE_DELAY 350
+#define DRXK_MAX_RETRIES 100
+
+#define DRIVER_4400 1
+
+#define DRXX_JTAGID 0x039210D9
+#define DRXX_J_JTAGID 0x239310D9
+#define DRXX_K_JTAGID 0x039210D9
+
+#define DRX_UNKNOWN 254
+#define DRX_AUTO 255
+
+#define DRX_SCU_READY 0
+#define DRXK_MAX_WAITTIME (200)
+#define SCU_RESULT_OK 0
+#define SCU_RESULT_SIZE -4
+#define SCU_RESULT_INVPAR -3
+#define SCU_RESULT_UNKSTD -2
+#define SCU_RESULT_UNKCMD -1
+
+#ifndef DRXK_OFDM_TR_SHUTDOWN_TIMEOUT
+#define DRXK_OFDM_TR_SHUTDOWN_TIMEOUT (200)
+#endif
+
+#define DRXK_8VSB_MPEG_BIT_RATE 19392658UL /*bps*/
+#define DRXK_DVBT_MPEG_BIT_RATE 32000000UL /*bps*/
+#define DRXK_QAM16_MPEG_BIT_RATE 27000000UL /*bps*/
+#define DRXK_QAM32_MPEG_BIT_RATE 33000000UL /*bps*/
+#define DRXK_QAM64_MPEG_BIT_RATE 40000000UL /*bps*/
+#define DRXK_QAM128_MPEG_BIT_RATE 46000000UL /*bps*/
+#define DRXK_QAM256_MPEG_BIT_RATE 52000000UL /*bps*/
+#define DRXK_MAX_MPEG_BIT_RATE 52000000UL /*bps*/
+
+#define IQM_CF_OUT_ENA_OFDM__M 0x4
+#define IQM_FS_ADJ_SEL_B_QAM 0x1
+#define IQM_FS_ADJ_SEL_B_OFF 0x0
+#define IQM_FS_ADJ_SEL_B_VSB 0x2
+#define IQM_RC_ADJ_SEL_B_OFF 0x0
+#define IQM_RC_ADJ_SEL_B_QAM 0x1
+#define IQM_RC_ADJ_SEL_B_VSB 0x2
+
+enum OperationMode {
+ OM_NONE,
+ OM_QAM_ITU_A,
+ OM_QAM_ITU_B,
+ OM_QAM_ITU_C,
+ OM_DVBT
+};
+
+enum DRXPowerMode {
+ DRX_POWER_UP = 0,
+ DRX_POWER_MODE_1,
+ DRX_POWER_MODE_2,
+ DRX_POWER_MODE_3,
+ DRX_POWER_MODE_4,
+ DRX_POWER_MODE_5,
+ DRX_POWER_MODE_6,
+ DRX_POWER_MODE_7,
+ DRX_POWER_MODE_8,
+
+ DRX_POWER_MODE_9,
+ DRX_POWER_MODE_10,
+ DRX_POWER_MODE_11,
+ DRX_POWER_MODE_12,
+ DRX_POWER_MODE_13,
+ DRX_POWER_MODE_14,
+ DRX_POWER_MODE_15,
+ DRX_POWER_MODE_16,
+ DRX_POWER_DOWN = 255
+};
+
+
+/** /brief Intermediate power mode for DRXK, power down OFDM clock domain */
+#ifndef DRXK_POWER_DOWN_OFDM
+#define DRXK_POWER_DOWN_OFDM DRX_POWER_MODE_1
+#endif
+
+/** /brief Intermediate power mode for DRXK, power down core (sysclk) */
+#ifndef DRXK_POWER_DOWN_CORE
+#define DRXK_POWER_DOWN_CORE DRX_POWER_MODE_9
+#endif
+
+/** /brief Intermediate power mode for DRXK, power down pll (only osc runs) */
+#ifndef DRXK_POWER_DOWN_PLL
+#define DRXK_POWER_DOWN_PLL DRX_POWER_MODE_10
+#endif
+
+
+enum AGC_CTRL_MODE { DRXK_AGC_CTRL_AUTO = 0, DRXK_AGC_CTRL_USER, DRXK_AGC_CTRL_OFF };
+enum EDrxkState { DRXK_UNINITIALIZED = 0, DRXK_STOPPED, DRXK_DTV_STARTED, DRXK_ATV_STARTED, DRXK_POWERED_DOWN };
+enum EDrxkCoefArrayIndex {
+ DRXK_COEF_IDX_MN = 0,
+ DRXK_COEF_IDX_FM ,
+ DRXK_COEF_IDX_L ,
+ DRXK_COEF_IDX_LP ,
+ DRXK_COEF_IDX_BG ,
+ DRXK_COEF_IDX_DK ,
+ DRXK_COEF_IDX_I ,
+ DRXK_COEF_IDX_MAX
+};
+enum EDrxkSifAttenuation {
+ DRXK_SIF_ATTENUATION_0DB,
+ DRXK_SIF_ATTENUATION_3DB,
+ DRXK_SIF_ATTENUATION_6DB,
+ DRXK_SIF_ATTENUATION_9DB
+};
+enum EDrxkConstellation {
+ DRX_CONSTELLATION_BPSK = 0,
+ DRX_CONSTELLATION_QPSK,
+ DRX_CONSTELLATION_PSK8,
+ DRX_CONSTELLATION_QAM16,
+ DRX_CONSTELLATION_QAM32,
+ DRX_CONSTELLATION_QAM64,
+ DRX_CONSTELLATION_QAM128,
+ DRX_CONSTELLATION_QAM256,
+ DRX_CONSTELLATION_QAM512,
+ DRX_CONSTELLATION_QAM1024,
+ DRX_CONSTELLATION_UNKNOWN = DRX_UNKNOWN,
+ DRX_CONSTELLATION_AUTO = DRX_AUTO
+};
+enum EDrxkInterleaveMode {
+ DRXK_QAM_I12_J17 = 16,
+ DRXK_QAM_I_UNKNOWN = DRX_UNKNOWN
+};
+enum {
+ DRXK_SPIN_A1 = 0,
+ DRXK_SPIN_A2,
+ DRXK_SPIN_A3,
+ DRXK_SPIN_UNKNOWN
+};
+
+enum DRXKCfgDvbtSqiSpeed {
+ DRXK_DVBT_SQI_SPEED_FAST = 0,
+ DRXK_DVBT_SQI_SPEED_MEDIUM,
+ DRXK_DVBT_SQI_SPEED_SLOW,
+ DRXK_DVBT_SQI_SPEED_UNKNOWN = DRX_UNKNOWN
+} ;
+
+enum DRXFftmode_t {
+ DRX_FFTMODE_2K = 0,
+ DRX_FFTMODE_4K,
+ DRX_FFTMODE_8K,
+ DRX_FFTMODE_UNKNOWN = DRX_UNKNOWN,
+ DRX_FFTMODE_AUTO = DRX_AUTO
+};
+
+enum DRXMPEGStrWidth_t {
+ DRX_MPEG_STR_WIDTH_1,
+ DRX_MPEG_STR_WIDTH_8
+};
+
+enum DRXQamLockRange_t {
+ DRX_QAM_LOCKRANGE_NORMAL,
+ DRX_QAM_LOCKRANGE_EXTENDED
+};
+
+struct DRXKCfgDvbtEchoThres_t {
+ u16 threshold;
+ enum DRXFftmode_t fftMode;
+} ;
+
+struct SCfgAgc {
+ enum AGC_CTRL_MODE ctrlMode; /* off, user, auto */
+ u16 outputLevel; /* range dependent on AGC */
+ u16 minOutputLevel; /* range dependent on AGC */
+ u16 maxOutputLevel; /* range dependent on AGC */
+ u16 speed; /* range dependent on AGC */
+ u16 top; /* rf-agc take over point */
+ u16 cutOffCurrent; /* rf-agc is accelerated if output current
+ is below cut-off current */
+ u16 IngainTgtMax;
+ u16 FastClipCtrlDelay;
+};
+
+struct SCfgPreSaw {
+ u16 reference; /* pre SAW reference value, range 0 .. 31 */
+ bool usePreSaw; /* TRUE algorithms must use pre SAW sense */
+};
+
+struct DRXKOfdmScCmd_t {
+ u16 cmd; /**< Command number */
+ u16 subcmd; /**< Sub-command parameter*/
+ u16 param0; /**< General purpous param */
+ u16 param1; /**< General purpous param */
+ u16 param2; /**< General purpous param */
+ u16 param3; /**< General purpous param */
+ u16 param4; /**< General purpous param */
+};
+
+struct drxk_state {
+ struct dvb_frontend c_frontend;
+ struct dvb_frontend t_frontend;
+ struct dvb_frontend_parameters param;
+ struct device *dev;
+
+ struct i2c_adapter *i2c;
+ u8 demod_address;
+ void *priv;
+
+ struct mutex mutex;
+ struct mutex ctlock;
+
+ u32 m_Instance; /**< Channel 1,2,3 or 4 */
+
+ int m_ChunkSize;
+ u8 Chunk[256];
+
+ bool m_hasLNA;
+ bool m_hasDVBT;
+ bool m_hasDVBC;
+ bool m_hasAudio;
+ bool m_hasATV;
+ bool m_hasOOB;
+ bool m_hasSAWSW; /**< TRUE if mat_tx is available */
+ bool m_hasGPIO1; /**< TRUE if mat_rx is available */
+ bool m_hasGPIO2; /**< TRUE if GPIO is available */
+ bool m_hasIRQN; /**< TRUE if IRQN is available */
+ u16 m_oscClockFreq;
+ u16 m_HICfgTimingDiv;
+ u16 m_HICfgBridgeDelay;
+ u16 m_HICfgWakeUpKey;
+ u16 m_HICfgTimeout;
+ u16 m_HICfgCtrl;
+ s32 m_sysClockFreq; /**< system clock frequency in kHz */
+
+ enum EDrxkState m_DrxkState; /**< State of Drxk (init,stopped,started) */
+ enum OperationMode m_OperationMode; /**< digital standards */
+ struct SCfgAgc m_vsbRfAgcCfg; /**< settings for VSB RF-AGC */
+ struct SCfgAgc m_vsbIfAgcCfg; /**< settings for VSB IF-AGC */
+ u16 m_vsbPgaCfg; /**< settings for VSB PGA */
+ struct SCfgPreSaw m_vsbPreSawCfg; /**< settings for pre SAW sense */
+ s32 m_Quality83percent; /**< MER level (*0.1 dB) for 83% quality indication */
+ s32 m_Quality93percent; /**< MER level (*0.1 dB) for 93% quality indication */
+ bool m_smartAntInverted;
+ bool m_bDebugEnableBridge;
+ bool m_bPDownOpenBridge; /**< only open DRXK bridge before power-down once it has been accessed */
+ bool m_bPowerDown; /**< Power down when not used */
+
+ u32 m_IqmFsRateOfs; /**< frequency shift as written to DRXK register (28bit fixpoint) */
+
+ bool m_enableMPEGOutput; /**< If TRUE, enable MPEG output */
+ bool m_insertRSByte; /**< If TRUE, insert RS byte */
+ bool m_enableParallel; /**< If TRUE, parallel out otherwise serial */
+ bool m_invertDATA; /**< If TRUE, invert DATA signals */
+ bool m_invertERR; /**< If TRUE, invert ERR signal */
+ bool m_invertSTR; /**< If TRUE, invert STR signals */
+ bool m_invertVAL; /**< If TRUE, invert VAL signals */
+ bool m_invertCLK; /**< If TRUE, invert CLK signals */
+ bool m_DVBCStaticCLK;
+ bool m_DVBTStaticCLK; /**< If TRUE, static MPEG clockrate will
+ be used, otherwise clockrate will
+ adapt to the bitrate of the TS */
+ u32 m_DVBTBitrate;
+ u32 m_DVBCBitrate;
+
+ u8 m_TSDataStrength;
+ u8 m_TSClockkStrength;
+
+ enum DRXMPEGStrWidth_t m_widthSTR; /**< MPEG start width */
+ u32 m_mpegTsStaticBitrate; /**< Maximum bitrate in b/s in case
+ static clockrate is selected */
+
+ /* LARGE_INTEGER m_StartTime; */ /**< Contains the time of the last demod start */
+ s32 m_MpegLockTimeOut; /**< WaitForLockStatus Timeout (counts from start time) */
+ s32 m_DemodLockTimeOut; /**< WaitForLockStatus Timeout (counts from start time) */
+
+ bool m_disableTEIhandling;
+
+ bool m_RfAgcPol;
+ bool m_IfAgcPol;
+
+ struct SCfgAgc m_atvRfAgcCfg; /**< settings for ATV RF-AGC */
+ struct SCfgAgc m_atvIfAgcCfg; /**< settings for ATV IF-AGC */
+ struct SCfgPreSaw m_atvPreSawCfg; /**< settings for ATV pre SAW sense */
+ bool m_phaseCorrectionBypass;
+ s16 m_atvTopVidPeak;
+ u16 m_atvTopNoiseTh;
+ enum EDrxkSifAttenuation m_sifAttenuation;
+ bool m_enableCVBSOutput;
+ bool m_enableSIFOutput;
+ bool m_bMirrorFreqSpect;
+ enum EDrxkConstellation m_Constellation; /**< Constellation type of the channel */
+ u32 m_CurrSymbolRate; /**< Current QAM symbol rate */
+ struct SCfgAgc m_qamRfAgcCfg; /**< settings for QAM RF-AGC */
+ struct SCfgAgc m_qamIfAgcCfg; /**< settings for QAM IF-AGC */
+ u16 m_qamPgaCfg; /**< settings for QAM PGA */
+ struct SCfgPreSaw m_qamPreSawCfg; /**< settings for QAM pre SAW sense */
+ enum EDrxkInterleaveMode m_qamInterleaveMode; /**< QAM Interleave mode */
+ u16 m_fecRsPlen;
+ u16 m_fecRsPrescale;
+
+ enum DRXKCfgDvbtSqiSpeed m_sqiSpeed;
+
+ u16 m_GPIO;
+ u16 m_GPIOCfg;
+
+ struct SCfgAgc m_dvbtRfAgcCfg; /**< settings for QAM RF-AGC */
+ struct SCfgAgc m_dvbtIfAgcCfg; /**< settings for QAM IF-AGC */
+ struct SCfgPreSaw m_dvbtPreSawCfg; /**< settings for QAM pre SAW sense */
+
+ u16 m_agcFastClipCtrlDelay;
+ bool m_adcCompPassed;
+ u16 m_adcCompCoef[64];
+ u16 m_adcState;
+
+ u8 *m_microcode;
+ int m_microcode_length;
+ bool m_DRXK_A1_PATCH_CODE;
+ bool m_DRXK_A1_ROM_CODE;
+ bool m_DRXK_A2_ROM_CODE;
+ bool m_DRXK_A3_ROM_CODE;
+ bool m_DRXK_A2_PATCH_CODE;
+ bool m_DRXK_A3_PATCH_CODE;
+
+ bool m_rfmirror;
+ u8 m_deviceSpin;
+ u32 m_iqmRcRate;
+
+ enum DRXPowerMode m_currentPowerMode;
+
+ /*
+ * Configurable parameters at the driver. They stores the values found
+ * at struct drxk_config.
+ */
+
+ u16 UIO_mask; /* Bits used by UIO */
+
+ bool single_master;
+ bool no_i2c_bridge;
+ bool antenna_dvbt;
+ u16 antenna_gpio;
+
+ const char *microcode_name;
+};
+
+#define NEVER_LOCK 0
+#define NOT_LOCKED 1
+#define DEMOD_LOCK 2
+#define FEC_LOCK 3
+#define MPEG_LOCK 4
+
diff --git a/drivers/media/dvb/frontends/drxk_map.h b/drivers/media/dvb/frontends/drxk_map.h
new file mode 100644
index 00000000000..9b11a832886
--- /dev/null
+++ b/drivers/media/dvb/frontends/drxk_map.h
@@ -0,0 +1,449 @@
+#define AUD_COMM_EXEC__A 0x1000000
+#define AUD_COMM_EXEC_STOP 0x0
+#define FEC_COMM_EXEC__A 0x1C00000
+#define FEC_COMM_EXEC_STOP 0x0
+#define FEC_COMM_EXEC_ACTIVE 0x1
+#define FEC_DI_COMM_EXEC__A 0x1C20000
+#define FEC_DI_COMM_EXEC_STOP 0x0
+#define FEC_DI_INPUT_CTL__A 0x1C20016
+#define FEC_RS_COMM_EXEC__A 0x1C30000
+#define FEC_RS_COMM_EXEC_STOP 0x0
+#define FEC_RS_MEASUREMENT_PERIOD__A 0x1C30012
+#define FEC_RS_MEASUREMENT_PRESCALE__A 0x1C30013
+#define FEC_OC_MODE__A 0x1C40011
+#define FEC_OC_MODE_PARITY__M 0x1
+#define FEC_OC_DTO_MODE__A 0x1C40014
+#define FEC_OC_DTO_MODE_DYNAMIC__M 0x1
+#define FEC_OC_DTO_MODE_OFFSET_ENABLE__M 0x4
+#define FEC_OC_DTO_PERIOD__A 0x1C40015
+#define FEC_OC_DTO_BURST_LEN__A 0x1C40018
+#define FEC_OC_FCT_MODE__A 0x1C4001A
+#define FEC_OC_FCT_MODE__PRE 0x0
+#define FEC_OC_FCT_MODE_RAT_ENA__M 0x1
+#define FEC_OC_FCT_MODE_VIRT_ENA__M 0x2
+#define FEC_OC_TMD_MODE__A 0x1C4001E
+#define FEC_OC_TMD_COUNT__A 0x1C4001F
+#define FEC_OC_TMD_HI_MARGIN__A 0x1C40020
+#define FEC_OC_TMD_LO_MARGIN__A 0x1C40021
+#define FEC_OC_TMD_INT_UPD_RATE__A 0x1C40023
+#define FEC_OC_AVR_PARM_A__A 0x1C40026
+#define FEC_OC_AVR_PARM_B__A 0x1C40027
+#define FEC_OC_RCN_GAIN__A 0x1C4002E
+#define FEC_OC_RCN_CTL_RATE_LO__A 0x1C40030
+#define FEC_OC_RCN_CTL_STEP_LO__A 0x1C40032
+#define FEC_OC_RCN_CTL_STEP_HI__A 0x1C40033
+#define FEC_OC_SNC_MODE__A 0x1C40040
+#define FEC_OC_SNC_MODE_SHUTDOWN__M 0x10
+#define FEC_OC_SNC_LWM__A 0x1C40041
+#define FEC_OC_SNC_HWM__A 0x1C40042
+#define FEC_OC_SNC_UNLOCK__A 0x1C40043
+#define FEC_OC_SNC_FAIL_PERIOD__A 0x1C40046
+#define FEC_OC_IPR_MODE__A 0x1C40048
+#define FEC_OC_IPR_MODE_SERIAL__M 0x1
+#define FEC_OC_IPR_MODE_MCLK_DIS_DAT_ABS__M 0x4
+#define FEC_OC_IPR_MODE_MVAL_DIS_PAR__M 0x10
+#define FEC_OC_IPR_INVERT__A 0x1C40049
+#define FEC_OC_IPR_INVERT_MD0__M 0x1
+#define FEC_OC_IPR_INVERT_MD1__M 0x2
+#define FEC_OC_IPR_INVERT_MD2__M 0x4
+#define FEC_OC_IPR_INVERT_MD3__M 0x8
+#define FEC_OC_IPR_INVERT_MD4__M 0x10
+#define FEC_OC_IPR_INVERT_MD5__M 0x20
+#define FEC_OC_IPR_INVERT_MD6__M 0x40
+#define FEC_OC_IPR_INVERT_MD7__M 0x80
+#define FEC_OC_IPR_INVERT_MERR__M 0x100
+#define FEC_OC_IPR_INVERT_MSTRT__M 0x200
+#define FEC_OC_IPR_INVERT_MVAL__M 0x400
+#define FEC_OC_IPR_INVERT_MCLK__M 0x800
+#define FEC_OC_OCR_INVERT__A 0x1C40052
+#define IQM_COMM_EXEC__A 0x1800000
+#define IQM_COMM_EXEC_B_STOP 0x0
+#define IQM_COMM_EXEC_B_ACTIVE 0x1
+#define IQM_FS_RATE_OFS_LO__A 0x1820010
+#define IQM_FS_ADJ_SEL__A 0x1820014
+#define IQM_FS_ADJ_SEL_B_OFF 0x0
+#define IQM_FS_ADJ_SEL_B_QAM 0x1
+#define IQM_FS_ADJ_SEL_B_VSB 0x2
+#define IQM_FD_RATESEL__A 0x1830010
+#define IQM_RC_RATE_OFS_LO__A 0x1840010
+#define IQM_RC_RATE_OFS_LO__W 16
+#define IQM_RC_RATE_OFS_LO__M 0xFFFF
+#define IQM_RC_RATE_OFS_HI__M 0xFF
+#define IQM_RC_ADJ_SEL__A 0x1840014
+#define IQM_RC_ADJ_SEL_B_OFF 0x0
+#define IQM_RC_ADJ_SEL_B_QAM 0x1
+#define IQM_RC_ADJ_SEL_B_VSB 0x2
+#define IQM_RC_STRETCH__A 0x1840016
+#define IQM_CF_COMM_INT_MSK__A 0x1860006
+#define IQM_CF_SYMMETRIC__A 0x1860010
+#define IQM_CF_MIDTAP__A 0x1860011
+#define IQM_CF_MIDTAP_RE__B 0
+#define IQM_CF_MIDTAP_IM__B 1
+#define IQM_CF_OUT_ENA__A 0x1860012
+#define IQM_CF_OUT_ENA_QAM__B 1
+#define IQM_CF_OUT_ENA_OFDM__M 0x4
+#define IQM_CF_ADJ_SEL__A 0x1860013
+#define IQM_CF_SCALE__A 0x1860014
+#define IQM_CF_SCALE_SH__A 0x1860015
+#define IQM_CF_SCALE_SH__PRE 0x0
+#define IQM_CF_POW_MEAS_LEN__A 0x1860017
+#define IQM_CF_DS_ENA__A 0x1860019
+#define IQM_CF_TAP_RE0__A 0x1860020
+#define IQM_CF_TAP_IM0__A 0x1860040
+#define IQM_CF_CLP_VAL__A 0x1860060
+#define IQM_CF_DATATH__A 0x1860061
+#define IQM_CF_PKDTH__A 0x1860062
+#define IQM_CF_WND_LEN__A 0x1860063
+#define IQM_CF_DET_LCT__A 0x1860064
+#define IQM_CF_BYPASSDET__A 0x1860067
+#define IQM_AF_COMM_EXEC__A 0x1870000
+#define IQM_AF_COMM_EXEC_ACTIVE 0x1
+#define IQM_AF_CLKNEG__A 0x1870012
+#define IQM_AF_CLKNEG_CLKNEGDATA__M 0x2
+#define IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS 0x0
+#define IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_NEG 0x2
+#define IQM_AF_START_LOCK__A 0x187001B
+#define IQM_AF_PHASE0__A 0x187001C
+#define IQM_AF_PHASE1__A 0x187001D
+#define IQM_AF_PHASE2__A 0x187001E
+#define IQM_AF_CLP_LEN__A 0x1870023
+#define IQM_AF_CLP_TH__A 0x1870024
+#define IQM_AF_SNS_LEN__A 0x1870026
+#define IQM_AF_AGC_IF__A 0x1870028
+#define IQM_AF_AGC_RF__A 0x1870029
+#define IQM_AF_PDREF__A 0x187002B
+#define IQM_AF_PDREF__M 0x1F
+#define IQM_AF_STDBY__A 0x187002C
+#define IQM_AF_STDBY_STDBY_ADC_STANDBY 0x2
+#define IQM_AF_STDBY_STDBY_AMP_STANDBY 0x4
+#define IQM_AF_STDBY_STDBY_PD_STANDBY 0x8
+#define IQM_AF_STDBY_STDBY_TAGC_IF_STANDBY 0x10
+#define IQM_AF_STDBY_STDBY_TAGC_RF_STANDBY 0x20
+#define IQM_AF_AMUX__A 0x187002D
+#define IQM_AF_AMUX_SIGNAL2ADC 0x1
+#define IQM_AF_UPD_SEL__A 0x187002F
+#define IQM_AF_INC_LCT__A 0x1870034
+#define IQM_AF_INC_BYPASS__A 0x1870036
+#define OFDM_CP_COMM_EXEC__A 0x2800000
+#define OFDM_CP_COMM_EXEC_STOP 0x0
+#define OFDM_EC_SB_PRIOR__A 0x3410013
+#define OFDM_EC_SB_PRIOR_HI 0x0
+#define OFDM_EC_SB_PRIOR_LO 0x1
+#define OFDM_EQ_TOP_TD_TPS_CONST__A 0x3010054
+#define OFDM_EQ_TOP_TD_TPS_CONST__M 0x3
+#define OFDM_EQ_TOP_TD_TPS_CONST_64QAM 0x2
+#define OFDM_EQ_TOP_TD_TPS_CODE_HP__A 0x3010056
+#define OFDM_EQ_TOP_TD_TPS_CODE_HP__M 0x7
+#define OFDM_EQ_TOP_TD_TPS_CODE_LP_7_8 0x4
+#define OFDM_EQ_TOP_TD_SQR_ERR_I__A 0x301005E
+#define OFDM_EQ_TOP_TD_SQR_ERR_Q__A 0x301005F
+#define OFDM_EQ_TOP_TD_SQR_ERR_EXP__A 0x3010060
+#define OFDM_EQ_TOP_TD_REQ_SMB_CNT__A 0x3010061
+#define OFDM_EQ_TOP_TD_TPS_PWR_OFS__A 0x3010062
+#define OFDM_LC_COMM_EXEC__A 0x3800000
+#define OFDM_LC_COMM_EXEC_STOP 0x0
+#define OFDM_SC_COMM_EXEC__A 0x3C00000
+#define OFDM_SC_COMM_EXEC_STOP 0x0
+#define OFDM_SC_COMM_STATE__A 0x3C00001
+#define OFDM_SC_RA_RAM_PARAM0__A 0x3C20040
+#define OFDM_SC_RA_RAM_PARAM1__A 0x3C20041
+#define OFDM_SC_RA_RAM_CMD_ADDR__A 0x3C20042
+#define OFDM_SC_RA_RAM_CMD__A 0x3C20043
+#define OFDM_SC_RA_RAM_CMD_NULL 0x0
+#define OFDM_SC_RA_RAM_CMD_PROC_START 0x1
+#define OFDM_SC_RA_RAM_CMD_SET_PREF_PARAM 0x3
+#define OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM 0x4
+#define OFDM_SC_RA_RAM_CMD_GET_OP_PARAM 0x5
+#define OFDM_SC_RA_RAM_CMD_USER_IO 0x6
+#define OFDM_SC_RA_RAM_CMD_SET_TIMER 0x7
+#define OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING 0x8
+#define OFDM_SC_RA_RAM_SW_EVENT_RUN_NMASK__M 0x1
+#define OFDM_SC_RA_RAM_LOCKTRACK_MIN 0x1
+#define OFDM_SC_RA_RAM_OP_PARAM__A 0x3C20048
+#define OFDM_SC_RA_RAM_OP_PARAM_MODE__M 0x3
+#define OFDM_SC_RA_RAM_OP_PARAM_MODE_2K 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_MODE_8K 0x1
+#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_32 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_16 0x4
+#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_8 0x8
+#define OFDM_SC_RA_RAM_OP_PARAM_GUARD_4 0xC
+#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QPSK 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM16 0x10
+#define OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64 0x20
+#define OFDM_SC_RA_RAM_OP_PARAM_HIER_NO 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A1 0x40
+#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A2 0x80
+#define OFDM_SC_RA_RAM_OP_PARAM_HIER_A4 0xC0
+#define OFDM_SC_RA_RAM_OP_PARAM_RATE_1_2 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3 0x200
+#define OFDM_SC_RA_RAM_OP_PARAM_RATE_3_4 0x400
+#define OFDM_SC_RA_RAM_OP_PARAM_RATE_5_6 0x600
+#define OFDM_SC_RA_RAM_OP_PARAM_RATE_7_8 0x800
+#define OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI 0x0
+#define OFDM_SC_RA_RAM_OP_PARAM_PRIO_LO 0x1000
+#define OFDM_SC_RA_RAM_OP_AUTO_MODE__M 0x1
+#define OFDM_SC_RA_RAM_OP_AUTO_GUARD__M 0x2
+#define OFDM_SC_RA_RAM_OP_AUTO_CONST__M 0x4
+#define OFDM_SC_RA_RAM_OP_AUTO_HIER__M 0x8
+#define OFDM_SC_RA_RAM_OP_AUTO_RATE__M 0x10
+#define OFDM_SC_RA_RAM_LOCK__A 0x3C2004B
+#define OFDM_SC_RA_RAM_LOCK_DEMOD__M 0x1
+#define OFDM_SC_RA_RAM_LOCK_FEC__M 0x2
+#define OFDM_SC_RA_RAM_LOCK_MPEG__M 0x4
+#define OFDM_SC_RA_RAM_LOCK_NODVBT__M 0x8
+#define OFDM_SC_RA_RAM_BE_OPT_DELAY__A 0x3C2004D
+#define OFDM_SC_RA_RAM_BE_OPT_INIT_DELAY__A 0x3C2004E
+#define OFDM_SC_RA_RAM_ECHO_THRES__A 0x3C2004F
+#define OFDM_SC_RA_RAM_ECHO_THRES_8K__B 0
+#define OFDM_SC_RA_RAM_ECHO_THRES_8K__M 0xFF
+#define OFDM_SC_RA_RAM_ECHO_THRES_2K__B 8
+#define OFDM_SC_RA_RAM_ECHO_THRES_2K__M 0xFF00
+#define OFDM_SC_RA_RAM_CONFIG__A 0x3C20050
+#define OFDM_SC_RA_RAM_CONFIG_NE_FIX_ENABLE__M 0x800
+#define OFDM_SC_RA_RAM_FR_THRES_8K__A 0x3C2007D
+#define OFDM_SC_RA_RAM_NI_INIT_2K_PER_LEFT__A 0x3C200E0
+#define OFDM_SC_RA_RAM_NI_INIT_2K_PER_RIGHT__A 0x3C200E1
+#define OFDM_SC_RA_RAM_NI_INIT_8K_PER_LEFT__A 0x3C200E3
+#define OFDM_SC_RA_RAM_NI_INIT_8K_PER_RIGHT__A 0x3C200E4
+#define OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A 0x3C200F8
+#define QAM_COMM_EXEC__A 0x1400000
+#define QAM_COMM_EXEC_STOP 0x0
+#define QAM_COMM_EXEC_ACTIVE 0x1
+#define QAM_TOP_ANNEX_A 0x0
+#define QAM_TOP_ANNEX_C 0x2
+#define QAM_SL_ERR_POWER__A 0x1430017
+#define QAM_DQ_QUAL_FUN0__A 0x1440018
+#define QAM_DQ_QUAL_FUN1__A 0x1440019
+#define QAM_DQ_QUAL_FUN2__A 0x144001A
+#define QAM_DQ_QUAL_FUN3__A 0x144001B
+#define QAM_DQ_QUAL_FUN4__A 0x144001C
+#define QAM_DQ_QUAL_FUN5__A 0x144001D
+#define QAM_LC_MODE__A 0x1450010
+#define QAM_LC_QUAL_TAB0__A 0x1450018
+#define QAM_LC_QUAL_TAB1__A 0x1450019
+#define QAM_LC_QUAL_TAB2__A 0x145001A
+#define QAM_LC_QUAL_TAB3__A 0x145001B
+#define QAM_LC_QUAL_TAB4__A 0x145001C
+#define QAM_LC_QUAL_TAB5__A 0x145001D
+#define QAM_LC_QUAL_TAB6__A 0x145001E
+#define QAM_LC_QUAL_TAB8__A 0x145001F
+#define QAM_LC_QUAL_TAB9__A 0x1450020
+#define QAM_LC_QUAL_TAB10__A 0x1450021
+#define QAM_LC_QUAL_TAB12__A 0x1450022
+#define QAM_LC_QUAL_TAB15__A 0x1450023
+#define QAM_LC_QUAL_TAB16__A 0x1450024
+#define QAM_LC_QUAL_TAB20__A 0x1450025
+#define QAM_LC_QUAL_TAB25__A 0x1450026
+#define QAM_LC_LPF_FACTORP__A 0x1450028
+#define QAM_LC_LPF_FACTORI__A 0x1450029
+#define QAM_LC_RATE_LIMIT__A 0x145002A
+#define QAM_LC_SYMBOL_FREQ__A 0x145002B
+#define QAM_SY_TIMEOUT__A 0x1470011
+#define QAM_SY_TIMEOUT__PRE 0x3A98
+#define QAM_SY_SYNC_LWM__A 0x1470012
+#define QAM_SY_SYNC_AWM__A 0x1470013
+#define QAM_SY_SYNC_HWM__A 0x1470014
+#define QAM_SY_SP_INV__A 0x1470017
+#define QAM_SY_SP_INV_SPECTRUM_INV_DIS 0x0
+#define SCU_COMM_EXEC__A 0x800000
+#define SCU_COMM_EXEC_STOP 0x0
+#define SCU_COMM_EXEC_ACTIVE 0x1
+#define SCU_COMM_EXEC_HOLD 0x2
+#define SCU_RAM_DRIVER_DEBUG__A 0x831EBF
+#define SCU_RAM_QAM_FSM_STEP_PERIOD__A 0x831EC4
+#define SCU_RAM_GPIO__A 0x831EC7
+#define SCU_RAM_GPIO_HW_LOCK_IND_DISABLE 0x0
+#define SCU_RAM_AGC_CLP_CTRL_MODE__A 0x831EC8
+#define SCU_RAM_FEC_ACCUM_PKT_FAILURES__A 0x831ECB
+#define SCU_RAM_FEC_PRE_RS_BER_FILTER_SH__A 0x831F05
+#define SCU_RAM_AGC_FAST_SNS_CTRL_DELAY__A 0x831F15
+#define SCU_RAM_AGC_KI_CYCLEN__A 0x831F17
+#define SCU_RAM_AGC_SNS_CYCLEN__A 0x831F18
+#define SCU_RAM_AGC_RF_SNS_DEV_MAX__A 0x831F19
+#define SCU_RAM_AGC_RF_SNS_DEV_MIN__A 0x831F1A
+#define SCU_RAM_AGC_RF_MAX__A 0x831F1B
+#define SCU_RAM_AGC_CONFIG__A 0x831F24
+#define SCU_RAM_AGC_CONFIG_DISABLE_RF_AGC__M 0x1
+#define SCU_RAM_AGC_CONFIG_DISABLE_IF_AGC__M 0x2
+#define SCU_RAM_AGC_CONFIG_INV_IF_POL__M 0x100
+#define SCU_RAM_AGC_CONFIG_INV_RF_POL__M 0x200
+#define SCU_RAM_AGC_KI__A 0x831F25
+#define SCU_RAM_AGC_KI_RF__B 4
+#define SCU_RAM_AGC_KI_RF__M 0xF0
+#define SCU_RAM_AGC_KI_IF__B 8
+#define SCU_RAM_AGC_KI_IF__M 0xF00
+#define SCU_RAM_AGC_KI_RED__A 0x831F26
+#define SCU_RAM_AGC_KI_RED_RAGC_RED__B 2
+#define SCU_RAM_AGC_KI_RED_RAGC_RED__M 0xC
+#define SCU_RAM_AGC_KI_RED_IAGC_RED__B 4
+#define SCU_RAM_AGC_KI_RED_IAGC_RED__M 0x30
+#define SCU_RAM_AGC_KI_INNERGAIN_MIN__A 0x831F27
+#define SCU_RAM_AGC_KI_MINGAIN__A 0x831F28
+#define SCU_RAM_AGC_KI_MAXGAIN__A 0x831F29
+#define SCU_RAM_AGC_KI_MAXMINGAIN_TH__A 0x831F2A
+#define SCU_RAM_AGC_KI_MIN__A 0x831F2B
+#define SCU_RAM_AGC_KI_MAX__A 0x831F2C
+#define SCU_RAM_AGC_CLP_SUM__A 0x831F2D
+#define SCU_RAM_AGC_CLP_SUM_MIN__A 0x831F2E
+#define SCU_RAM_AGC_CLP_SUM_MAX__A 0x831F2F
+#define SCU_RAM_AGC_CLP_CYCLEN__A 0x831F30
+#define SCU_RAM_AGC_CLP_CYCCNT__A 0x831F31
+#define SCU_RAM_AGC_CLP_DIR_TO__A 0x831F32
+#define SCU_RAM_AGC_CLP_DIR_WD__A 0x831F33
+#define SCU_RAM_AGC_CLP_DIR_STP__A 0x831F34
+#define SCU_RAM_AGC_SNS_SUM__A 0x831F35
+#define SCU_RAM_AGC_SNS_SUM_MIN__A 0x831F36
+#define SCU_RAM_AGC_SNS_SUM_MAX__A 0x831F37
+#define SCU_RAM_AGC_SNS_CYCCNT__A 0x831F38
+#define SCU_RAM_AGC_SNS_DIR_TO__A 0x831F39
+#define SCU_RAM_AGC_SNS_DIR_WD__A 0x831F3A
+#define SCU_RAM_AGC_SNS_DIR_STP__A 0x831F3B
+#define SCU_RAM_AGC_INGAIN_TGT__A 0x831F3D
+#define SCU_RAM_AGC_INGAIN_TGT_MIN__A 0x831F3E
+#define SCU_RAM_AGC_INGAIN_TGT_MAX__A 0x831F3F
+#define SCU_RAM_AGC_IF_IACCU_HI__A 0x831F40
+#define SCU_RAM_AGC_IF_IACCU_LO__A 0x831F41
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT__A 0x831F42
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MIN__A 0x831F43
+#define SCU_RAM_AGC_IF_IACCU_HI_TGT_MAX__A 0x831F44
+#define SCU_RAM_AGC_RF_IACCU_HI__A 0x831F45
+#define SCU_RAM_AGC_RF_IACCU_LO__A 0x831F46
+#define SCU_RAM_AGC_RF_IACCU_HI_CO__A 0x831F47
+#define SCU_RAM_QAM_FSM_MEDIAN_AV_MULT__A 0x831F84
+#define SCU_RAM_QAM_FSM_RADIUS_AV_LIMIT__A 0x831F85
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET1__A 0x831F86
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET2__A 0x831F87
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET3__A 0x831F88
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET4__A 0x831F89
+#define SCU_RAM_QAM_FSM_LCAVG_OFFSET5__A 0x831F8A
+#define SCU_RAM_QAM_FSM_RTH__A 0x831F8E
+#define SCU_RAM_QAM_FSM_FTH__A 0x831F8F
+#define SCU_RAM_QAM_FSM_PTH__A 0x831F90
+#define SCU_RAM_QAM_FSM_MTH__A 0x831F91
+#define SCU_RAM_QAM_FSM_CTH__A 0x831F92
+#define SCU_RAM_QAM_FSM_QTH__A 0x831F93
+#define SCU_RAM_QAM_FSM_RATE_LIM__A 0x831F94
+#define SCU_RAM_QAM_FSM_FREQ_LIM__A 0x831F95
+#define SCU_RAM_QAM_FSM_COUNT_LIM__A 0x831F96
+#define SCU_RAM_QAM_LC_CA_COARSE__A 0x831F97
+#define SCU_RAM_QAM_LC_CA_FINE__A 0x831F99
+#define SCU_RAM_QAM_LC_CP_COARSE__A 0x831F9A
+#define SCU_RAM_QAM_LC_CP_MEDIUM__A 0x831F9B
+#define SCU_RAM_QAM_LC_CP_FINE__A 0x831F9C
+#define SCU_RAM_QAM_LC_CI_COARSE__A 0x831F9D
+#define SCU_RAM_QAM_LC_CI_MEDIUM__A 0x831F9E
+#define SCU_RAM_QAM_LC_CI_FINE__A 0x831F9F
+#define SCU_RAM_QAM_LC_EP_COARSE__A 0x831FA0
+#define SCU_RAM_QAM_LC_EP_MEDIUM__A 0x831FA1
+#define SCU_RAM_QAM_LC_EP_FINE__A 0x831FA2
+#define SCU_RAM_QAM_LC_EI_COARSE__A 0x831FA3
+#define SCU_RAM_QAM_LC_EI_MEDIUM__A 0x831FA4
+#define SCU_RAM_QAM_LC_EI_FINE__A 0x831FA5
+#define SCU_RAM_QAM_LC_CF_COARSE__A 0x831FA6
+#define SCU_RAM_QAM_LC_CF_MEDIUM__A 0x831FA7
+#define SCU_RAM_QAM_LC_CF_FINE__A 0x831FA8
+#define SCU_RAM_QAM_LC_CF1_COARSE__A 0x831FA9
+#define SCU_RAM_QAM_LC_CF1_MEDIUM__A 0x831FAA
+#define SCU_RAM_QAM_LC_CF1_FINE__A 0x831FAB
+#define SCU_RAM_QAM_SL_SIG_POWER__A 0x831FAC
+#define SCU_RAM_QAM_EQ_CMA_RAD0__A 0x831FAD
+#define SCU_RAM_QAM_EQ_CMA_RAD1__A 0x831FAE
+#define SCU_RAM_QAM_EQ_CMA_RAD2__A 0x831FAF
+#define SCU_RAM_QAM_EQ_CMA_RAD3__A 0x831FB0
+#define SCU_RAM_QAM_EQ_CMA_RAD4__A 0x831FB1
+#define SCU_RAM_QAM_EQ_CMA_RAD5__A 0x831FB2
+#define SCU_RAM_QAM_LOCKED_LOCKED_DEMOD_LOCKED 0x4000
+#define SCU_RAM_QAM_LOCKED_LOCKED_LOCKED 0x8000
+#define SCU_RAM_QAM_LOCKED_LOCKED_NEVER_LOCK 0xC000
+#define SCU_RAM_AGC_FAST_CLP_CTRL_DELAY__A 0x831FEA
+#define SCU_RAM_DRIVER_VER_HI__A 0x831FEB
+#define SCU_RAM_DRIVER_VER_LO__A 0x831FEC
+#define SCU_RAM_PARAM_15__A 0x831FED
+#define SCU_RAM_PARAM_0__A 0x831FFC
+#define SCU_RAM_COMMAND__A 0x831FFD
+#define SCU_RAM_COMMAND_CMD_DEMOD_RESET 0x1
+#define SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV 0x2
+#define SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM 0x3
+#define SCU_RAM_COMMAND_CMD_DEMOD_START 0x4
+#define SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK 0x5
+#define SCU_RAM_COMMAND_CMD_DEMOD_STOP 0x9
+#define SCU_RAM_COMMAND_STANDARD_QAM 0x200
+#define SCU_RAM_COMMAND_STANDARD_OFDM 0x400
+#define SIO_TOP_COMM_KEY__A 0x41000F
+#define SIO_TOP_COMM_KEY_KEY 0xFABA
+#define SIO_TOP_JTAGID_LO__A 0x410012
+#define SIO_HI_RA_RAM_RES__A 0x420031
+#define SIO_HI_RA_RAM_CMD__A 0x420032
+#define SIO_HI_RA_RAM_CMD_RESET 0x2
+#define SIO_HI_RA_RAM_CMD_CONFIG 0x3
+#define SIO_HI_RA_RAM_CMD_BRDCTRL 0x7
+#define SIO_HI_RA_RAM_PAR_1__A 0x420033
+#define SIO_HI_RA_RAM_PAR_1_PAR1_SEC_KEY 0x3945
+#define SIO_HI_RA_RAM_PAR_2__A 0x420034
+#define SIO_HI_RA_RAM_PAR_2_CFG_DIV__M 0x7F
+#define SIO_HI_RA_RAM_PAR_2_BRD_CFG_OPEN 0x0
+#define SIO_HI_RA_RAM_PAR_2_BRD_CFG_CLOSED 0x4
+#define SIO_HI_RA_RAM_PAR_3__A 0x420035
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SDA__M 0x7F
+#define SIO_HI_RA_RAM_PAR_3_CFG_DBL_SCL__B 7
+#define SIO_HI_RA_RAM_PAR_3_ACP_RW_READ 0x0
+#define SIO_HI_RA_RAM_PAR_3_ACP_RW_WRITE 0x8
+#define SIO_HI_RA_RAM_PAR_4__A 0x420036
+#define SIO_HI_RA_RAM_PAR_5__A 0x420037
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLV0_SLAVE 0x1
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M 0x8
+#define SIO_HI_RA_RAM_PAR_5_CFG_SLEEP_ZZZ 0x8
+#define SIO_HI_RA_RAM_PAR_6__A 0x420038
+#define SIO_CC_PLL_LOCK__A 0x450012
+#define SIO_CC_PWD_MODE__A 0x450015
+#define SIO_CC_PWD_MODE_LEVEL_NONE 0x0
+#define SIO_CC_PWD_MODE_LEVEL_OFDM 0x1
+#define SIO_CC_PWD_MODE_LEVEL_CLOCK 0x2
+#define SIO_CC_PWD_MODE_LEVEL_PLL 0x3
+#define SIO_CC_PWD_MODE_LEVEL_OSC 0x4
+#define SIO_CC_SOFT_RST__A 0x450016
+#define SIO_CC_SOFT_RST_OFDM__M 0x1
+#define SIO_CC_SOFT_RST_SYS__M 0x2
+#define SIO_CC_SOFT_RST_OSC__M 0x4
+#define SIO_CC_UPDATE__A 0x450017
+#define SIO_CC_UPDATE_KEY 0xFABA
+#define SIO_OFDM_SH_OFDM_RING_ENABLE__A 0x470010
+#define SIO_OFDM_SH_OFDM_RING_ENABLE_OFF 0x0
+#define SIO_OFDM_SH_OFDM_RING_ENABLE_ON 0x1
+#define SIO_OFDM_SH_OFDM_RING_STATUS__A 0x470012
+#define SIO_OFDM_SH_OFDM_RING_STATUS_DOWN 0x0
+#define SIO_OFDM_SH_OFDM_RING_STATUS_ENABLED 0x1
+#define SIO_BL_COMM_EXEC__A 0x480000
+#define SIO_BL_COMM_EXEC_ACTIVE 0x1
+#define SIO_BL_STATUS__A 0x480010
+#define SIO_BL_MODE__A 0x480011
+#define SIO_BL_MODE_DIRECT 0x0
+#define SIO_BL_MODE_CHAIN 0x1
+#define SIO_BL_ENABLE__A 0x480012
+#define SIO_BL_ENABLE_ON 0x1
+#define SIO_BL_TGT_HDR__A 0x480014
+#define SIO_BL_TGT_ADDR__A 0x480015
+#define SIO_BL_SRC_ADDR__A 0x480016
+#define SIO_BL_SRC_LEN__A 0x480017
+#define SIO_BL_CHAIN_ADDR__A 0x480018
+#define SIO_BL_CHAIN_LEN__A 0x480019
+#define SIO_PDR_MON_CFG__A 0x7F0010
+#define SIO_PDR_UIO_IN_HI__A 0x7F0015
+#define SIO_PDR_UIO_OUT_LO__A 0x7F0016
+#define SIO_PDR_OHW_CFG__A 0x7F001F
+#define SIO_PDR_OHW_CFG_FREF_SEL__M 0x3
+#define SIO_PDR_MSTRT_CFG__A 0x7F0025
+#define SIO_PDR_MERR_CFG__A 0x7F0026
+#define SIO_PDR_MCLK_CFG__A 0x7F0028
+#define SIO_PDR_MCLK_CFG_DRIVE__B 3
+#define SIO_PDR_MVAL_CFG__A 0x7F0029
+#define SIO_PDR_MD0_CFG__A 0x7F002A
+#define SIO_PDR_MD0_CFG_DRIVE__B 3
+#define SIO_PDR_MD1_CFG__A 0x7F002B
+#define SIO_PDR_MD2_CFG__A 0x7F002C
+#define SIO_PDR_MD3_CFG__A 0x7F002D
+#define SIO_PDR_MD4_CFG__A 0x7F002F
+#define SIO_PDR_MD5_CFG__A 0x7F0030
+#define SIO_PDR_MD6_CFG__A 0x7F0031
+#define SIO_PDR_MD7_CFG__A 0x7F0032
+#define SIO_PDR_SMA_TX_CFG__A 0x7F0038
diff --git a/drivers/media/dvb/frontends/itd1000.c b/drivers/media/dvb/frontends/itd1000.c
index f7a40a18777..aa9ccb821fa 100644
--- a/drivers/media/dvb/frontends/itd1000.c
+++ b/drivers/media/dvb/frontends/itd1000.c
@@ -35,21 +35,18 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
-#define deb(args...) do { \
+#define itd_dbg(args...) do { \
if (debug) { \
printk(KERN_DEBUG "ITD1000: " args);\
- printk("\n"); \
} \
} while (0)
-#define warn(args...) do { \
+#define itd_warn(args...) do { \
printk(KERN_WARNING "ITD1000: " args); \
- printk("\n"); \
} while (0)
-#define info(args...) do { \
+#define itd_info(args...) do { \
printk(KERN_INFO "ITD1000: " args); \
- printk("\n"); \
} while (0)
/* don't write more than one byte with flexcop behind */
@@ -62,7 +59,7 @@ static int itd1000_write_regs(struct itd1000_state *state, u8 reg, u8 v[], u8 le
buf[0] = reg;
memcpy(&buf[1], v, len);
- /* deb("wr %02x: %02x", reg, v[0]); */
+ /* itd_dbg("wr %02x: %02x\n", reg, v[0]); */
if (i2c_transfer(state->i2c, &msg, 1) != 1) {
printk(KERN_WARNING "itd1000 I2C write failed\n");
@@ -83,7 +80,7 @@ static int itd1000_read_reg(struct itd1000_state *state, u8 reg)
itd1000_write_regs(state, (reg - 1) & 0xff, &state->shadow[(reg - 1) & 0xff], 1);
if (i2c_transfer(state->i2c, msg, 2) != 2) {
- warn("itd1000 I2C read failed");
+ itd_warn("itd1000 I2C read failed\n");
return -EREMOTEIO;
}
return val;
@@ -127,14 +124,14 @@ static void itd1000_set_lpf_bw(struct itd1000_state *state, u32 symbol_rate)
u8 bbgvmin = itd1000_read_reg(state, BBGVMIN) & 0xf0;
u8 bw = itd1000_read_reg(state, BW) & 0xf0;
- deb("symbol_rate = %d", symbol_rate);
+ itd_dbg("symbol_rate = %d\n", symbol_rate);
/* not sure what is that ? - starting to download the table */
itd1000_write_reg(state, CON1, con1 | (1 << 1));
for (i = 0; i < ARRAY_SIZE(itd1000_lpf_pga); i++)
if (symbol_rate < itd1000_lpf_pga[i].symbol_rate) {
- deb("symrate: index: %d pgaext: %x, bbgvmin: %x", i, itd1000_lpf_pga[i].pgaext, itd1000_lpf_pga[i].bbgvmin);
+ itd_dbg("symrate: index: %d pgaext: %x, bbgvmin: %x\n", i, itd1000_lpf_pga[i].pgaext, itd1000_lpf_pga[i].bbgvmin);
itd1000_write_reg(state, PLLFH, pllfh | (itd1000_lpf_pga[i].pgaext << 4));
itd1000_write_reg(state, BBGVMIN, bbgvmin | (itd1000_lpf_pga[i].bbgvmin));
itd1000_write_reg(state, BW, bw | (i & 0x0f));
@@ -182,7 +179,7 @@ static void itd1000_set_vco(struct itd1000_state *state, u32 freq_khz)
adcout = itd1000_read_reg(state, PLLLOCK) & 0x0f;
- deb("VCO: %dkHz: %d -> ADCOUT: %d %02x", freq_khz, itd1000_vcorg[i].vcorg, adcout, vco_chp1_i2c);
+ itd_dbg("VCO: %dkHz: %d -> ADCOUT: %d %02x\n", freq_khz, itd1000_vcorg[i].vcorg, adcout, vco_chp1_i2c);
if (adcout > 13) {
if (!(itd1000_vcorg[i].vcorg == 7 || itd1000_vcorg[i].vcorg == 15))
@@ -232,7 +229,7 @@ static void itd1000_set_lo(struct itd1000_state *state, u32 freq_khz)
pllf = (u32) tmp;
state->frequency = ((plln * 1000) + (pllf * 1000)/1048576) * 2*FREF;
- deb("frequency: %dkHz (wanted) %dkHz (set), PLLF = %d, PLLN = %d", freq_khz, state->frequency, pllf, plln);
+ itd_dbg("frequency: %dkHz (wanted) %dkHz (set), PLLF = %d, PLLN = %d\n", freq_khz, state->frequency, pllf, plln);
itd1000_write_reg(state, PLLNH, 0x80); /* PLLNH */;
itd1000_write_reg(state, PLLNL, plln & 0xff);
@@ -242,7 +239,7 @@ static void itd1000_set_lo(struct itd1000_state *state, u32 freq_khz)
for (i = 0; i < ARRAY_SIZE(itd1000_fre_values); i++) {
if (freq_khz <= itd1000_fre_values[i].freq) {
- deb("fre_values: %d", i);
+ itd_dbg("fre_values: %d\n", i);
itd1000_write_reg(state, RFTR, itd1000_fre_values[i].values[0]);
for (j = 0; j < 9; j++)
itd1000_write_reg(state, RFST1+j, itd1000_fre_values[i].values[j+1]);
@@ -382,7 +379,7 @@ struct dvb_frontend *itd1000_attach(struct dvb_frontend *fe, struct i2c_adapter
kfree(state);
return NULL;
}
- info("successfully identified (ID: %d)", i);
+ itd_info("successfully identified (ID: %d)\n", i);
memset(state->shadow, 0xff, sizeof(state->shadow));
for (i = 0x65; i < 0x9c; i++)
diff --git a/drivers/media/dvb/frontends/nxt6000.c b/drivers/media/dvb/frontends/nxt6000.c
index a763ec756f7..6599b8fea9e 100644
--- a/drivers/media/dvb/frontends/nxt6000.c
+++ b/drivers/media/dvb/frontends/nxt6000.c
@@ -50,7 +50,7 @@ static int nxt6000_writereg(struct nxt6000_state* state, u8 reg, u8 data)
if ((ret = i2c_transfer(state->i2c, &msg, 1)) != 1)
dprintk("nxt6000: nxt6000_write error (reg: 0x%02X, data: 0x%02X, ret: %d)\n", reg, data, ret);
- return (ret != 1) ? -EFAULT : 0;
+ return (ret != 1) ? -EIO : 0;
}
static u8 nxt6000_readreg(struct nxt6000_state* state, u8 reg)
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 17f8cdf8afe..3879d2e378a 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -634,7 +634,7 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
struct s5h1420_state* state = fe->demodulator_priv;
int frequency_delta;
struct dvb_frontend_tune_settings fesettings;
- uint8_t clock_settting;
+ uint8_t clock_setting;
dprintk("enter %s\n", __func__);
@@ -684,19 +684,19 @@ static int s5h1420_set_frontend(struct dvb_frontend* fe,
switch (state->fclk) {
default:
case 88000000:
- clock_settting = 80;
+ clock_setting = 80;
break;
case 86000000:
- clock_settting = 78;
+ clock_setting = 78;
break;
case 80000000:
- clock_settting = 72;
+ clock_setting = 72;
break;
case 59000000:
- clock_settting = 51;
+ clock_setting = 51;
break;
case 44000000:
- clock_settting = 36;
+ clock_setting = 36;
break;
}
dprintk("pll01: %d, ToneFreq: %d\n", state->fclk/1000000 - 8, (state->fclk + (TONE_FREQ * 32) - 1) / (TONE_FREQ * 32));
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.c b/drivers/media/dvb/frontends/tda18271c2dd.c
new file mode 100644
index 00000000000..0384e8da4f5
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda18271c2dd.c
@@ -0,0 +1,1251 @@
+/*
+ * tda18271c2dd: Driver for the TDA18271C2 tuner
+ *
+ * Copyright (C) 2010 Digital Devices GmbH
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/version.h>
+#include <asm/div64.h>
+
+#include "dvb_frontend.h"
+
+struct SStandardParam {
+ s32 m_IFFrequency;
+ u32 m_BandWidth;
+ u8 m_EP3_4_0;
+ u8 m_EB22;
+};
+
+struct SMap {
+ u32 m_Frequency;
+ u8 m_Param;
+};
+
+struct SMapI {
+ u32 m_Frequency;
+ s32 m_Param;
+};
+
+struct SMap2 {
+ u32 m_Frequency;
+ u8 m_Param1;
+ u8 m_Param2;
+};
+
+struct SRFBandMap {
+ u32 m_RF_max;
+ u32 m_RF1_Default;
+ u32 m_RF2_Default;
+ u32 m_RF3_Default;
+};
+
+enum ERegister {
+ ID = 0,
+ TM,
+ PL,
+ EP1, EP2, EP3, EP4, EP5,
+ CPD, CD1, CD2, CD3,
+ MPD, MD1, MD2, MD3,
+ EB1, EB2, EB3, EB4, EB5, EB6, EB7, EB8, EB9, EB10,
+ EB11, EB12, EB13, EB14, EB15, EB16, EB17, EB18, EB19, EB20,
+ EB21, EB22, EB23,
+ NUM_REGS
+};
+
+struct tda_state {
+ struct i2c_adapter *i2c;
+ u8 adr;
+
+ u32 m_Frequency;
+ u32 IF;
+
+ u8 m_IFLevelAnalog;
+ u8 m_IFLevelDigital;
+ u8 m_IFLevelDVBC;
+ u8 m_IFLevelDVBT;
+
+ u8 m_EP4;
+ u8 m_EP3_Standby;
+
+ bool m_bMaster;
+
+ s32 m_SettlingTime;
+
+ u8 m_Regs[NUM_REGS];
+
+ /* Tracking filter settings for band 0..6 */
+ u32 m_RF1[7];
+ s32 m_RF_A1[7];
+ s32 m_RF_B1[7];
+ u32 m_RF2[7];
+ s32 m_RF_A2[7];
+ s32 m_RF_B2[7];
+ u32 m_RF3[7];
+
+ u8 m_TMValue_RFCal; /* Calibration temperatur */
+
+ bool m_bFMInput; /* true to use Pin 8 for FM Radio */
+
+};
+
+static int PowerScan(struct tda_state *state,
+ u8 RFBand, u32 RF_in,
+ u32 *pRF_Out, bool *pbcal);
+
+static int i2c_readn(struct i2c_adapter *adapter, u8 adr, u8 *data, int len)
+{
+ struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
+ .buf = data, .len = len} };
+ return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
+}
+
+static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
+{
+ struct i2c_msg msg = {.addr = adr, .flags = 0,
+ .buf = data, .len = len};
+
+ if (i2c_transfer(adap, &msg, 1) != 1) {
+ printk(KERN_ERR "tda18271c2dd: i2c write error at addr %i\n", adr);
+ return -1;
+ }
+ return 0;
+}
+
+static int WriteRegs(struct tda_state *state,
+ u8 SubAddr, u8 *Regs, u16 nRegs)
+{
+ u8 data[nRegs+1];
+
+ data[0] = SubAddr;
+ memcpy(data + 1, Regs, nRegs);
+ return i2c_write(state->i2c, state->adr, data, nRegs+1);
+}
+
+static int WriteReg(struct tda_state *state, u8 SubAddr, u8 Reg)
+{
+ u8 msg[2] = {SubAddr, Reg};
+
+ return i2c_write(state->i2c, state->adr, msg, 2);
+}
+
+static int Read(struct tda_state *state, u8 * Regs)
+{
+ return i2c_readn(state->i2c, state->adr, Regs, 16);
+}
+
+static int ReadExtented(struct tda_state *state, u8 * Regs)
+{
+ return i2c_readn(state->i2c, state->adr, Regs, NUM_REGS);
+}
+
+static int UpdateRegs(struct tda_state *state, u8 RegFrom, u8 RegTo)
+{
+ return WriteRegs(state, RegFrom,
+ &state->m_Regs[RegFrom], RegTo-RegFrom+1);
+}
+static int UpdateReg(struct tda_state *state, u8 Reg)
+{
+ return WriteReg(state, Reg, state->m_Regs[Reg]);
+}
+
+#include "tda18271c2dd_maps.h"
+
+static void reset(struct tda_state *state)
+{
+ u32 ulIFLevelAnalog = 0;
+ u32 ulIFLevelDigital = 2;
+ u32 ulIFLevelDVBC = 7;
+ u32 ulIFLevelDVBT = 6;
+ u32 ulXTOut = 0;
+ u32 ulStandbyMode = 0x06; /* Send in stdb, but leave osc on */
+ u32 ulSlave = 0;
+ u32 ulFMInput = 0;
+ u32 ulSettlingTime = 100;
+
+ state->m_Frequency = 0;
+ state->m_SettlingTime = 100;
+ state->m_IFLevelAnalog = (ulIFLevelAnalog & 0x07) << 2;
+ state->m_IFLevelDigital = (ulIFLevelDigital & 0x07) << 2;
+ state->m_IFLevelDVBC = (ulIFLevelDVBC & 0x07) << 2;
+ state->m_IFLevelDVBT = (ulIFLevelDVBT & 0x07) << 2;
+
+ state->m_EP4 = 0x20;
+ if (ulXTOut != 0)
+ state->m_EP4 |= 0x40;
+
+ state->m_EP3_Standby = ((ulStandbyMode & 0x07) << 5) | 0x0F;
+ state->m_bMaster = (ulSlave == 0);
+
+ state->m_SettlingTime = ulSettlingTime;
+
+ state->m_bFMInput = (ulFMInput == 2);
+}
+
+static bool SearchMap1(struct SMap Map[],
+ u32 Frequency, u8 *pParam)
+{
+ int i = 0;
+
+ while ((Map[i].m_Frequency != 0) && (Frequency > Map[i].m_Frequency))
+ i += 1;
+ if (Map[i].m_Frequency == 0)
+ return false;
+ *pParam = Map[i].m_Param;
+ return true;
+}
+
+static bool SearchMap2(struct SMapI Map[],
+ u32 Frequency, s32 *pParam)
+{
+ int i = 0;
+
+ while ((Map[i].m_Frequency != 0) &&
+ (Frequency > Map[i].m_Frequency))
+ i += 1;
+ if (Map[i].m_Frequency == 0)
+ return false;
+ *pParam = Map[i].m_Param;
+ return true;
+}
+
+static bool SearchMap3(struct SMap2 Map[], u32 Frequency,
+ u8 *pParam1, u8 *pParam2)
+{
+ int i = 0;
+
+ while ((Map[i].m_Frequency != 0) &&
+ (Frequency > Map[i].m_Frequency))
+ i += 1;
+ if (Map[i].m_Frequency == 0)
+ return false;
+ *pParam1 = Map[i].m_Param1;
+ *pParam2 = Map[i].m_Param2;
+ return true;
+}
+
+static bool SearchMap4(struct SRFBandMap Map[],
+ u32 Frequency, u8 *pRFBand)
+{
+ int i = 0;
+
+ while (i < 7 && (Frequency > Map[i].m_RF_max))
+ i += 1;
+ if (i == 7)
+ return false;
+ *pRFBand = i;
+ return true;
+}
+
+static int ThermometerRead(struct tda_state *state, u8 *pTM_Value)
+{
+ int status = 0;
+
+ do {
+ u8 Regs[16];
+ state->m_Regs[TM] |= 0x10;
+ status = UpdateReg(state, TM);
+ if (status < 0)
+ break;
+ status = Read(state, Regs);
+ if (status < 0)
+ break;
+ if (((Regs[TM] & 0x0F) == 0 && (Regs[TM] & 0x20) == 0x20) ||
+ ((Regs[TM] & 0x0F) == 8 && (Regs[TM] & 0x20) == 0x00)) {
+ state->m_Regs[TM] ^= 0x20;
+ status = UpdateReg(state, TM);
+ if (status < 0)
+ break;
+ msleep(10);
+ status = Read(state, Regs);
+ if (status < 0)
+ break;
+ }
+ *pTM_Value = (Regs[TM] & 0x20)
+ ? m_Thermometer_Map_2[Regs[TM] & 0x0F]
+ : m_Thermometer_Map_1[Regs[TM] & 0x0F] ;
+ state->m_Regs[TM] &= ~0x10; /* Thermometer off */
+ status = UpdateReg(state, TM);
+ if (status < 0)
+ break;
+ state->m_Regs[EP4] &= ~0x03; /* CAL_mode = 0 ????????? */
+ status = UpdateReg(state, EP4);
+ if (status < 0)
+ break;
+ } while (0);
+
+ return status;
+}
+
+static int StandBy(struct tda_state *state)
+{
+ int status = 0;
+ do {
+ state->m_Regs[EB12] &= ~0x20; /* PD_AGC1_Det = 0 */
+ status = UpdateReg(state, EB12);
+ if (status < 0)
+ break;
+ state->m_Regs[EB18] &= ~0x83; /* AGC1_loop_off = 0, AGC1_Gain = 6 dB */
+ status = UpdateReg(state, EB18);
+ if (status < 0)
+ break;
+ state->m_Regs[EB21] |= 0x03; /* AGC2_Gain = -6 dB */
+ state->m_Regs[EP3] = state->m_EP3_Standby;
+ status = UpdateReg(state, EP3);
+ if (status < 0)
+ break;
+ state->m_Regs[EB23] &= ~0x06; /* ForceLP_Fc2_En = 0, LP_Fc[2] = 0 */
+ status = UpdateRegs(state, EB21, EB23);
+ if (status < 0)
+ break;
+ } while (0);
+ return status;
+}
+
+static int CalcMainPLL(struct tda_state *state, u32 freq)
+{
+
+ u8 PostDiv;
+ u8 Div;
+ u64 OscFreq;
+ u32 MainDiv;
+
+ if (!SearchMap3(m_Main_PLL_Map, freq, &PostDiv, &Div))
+ return -EINVAL;
+
+ OscFreq = (u64) freq * (u64) Div;
+ OscFreq *= (u64) 16384;
+ do_div(OscFreq, (u64)16000000);
+ MainDiv = OscFreq;
+
+ state->m_Regs[MPD] = PostDiv & 0x77;
+ state->m_Regs[MD1] = ((MainDiv >> 16) & 0x7F);
+ state->m_Regs[MD2] = ((MainDiv >> 8) & 0xFF);
+ state->m_Regs[MD3] = (MainDiv & 0xFF);
+
+ return UpdateRegs(state, MPD, MD3);
+}
+
+static int CalcCalPLL(struct tda_state *state, u32 freq)
+{
+ u8 PostDiv;
+ u8 Div;
+ u64 OscFreq;
+ u32 CalDiv;
+
+ if (!SearchMap3(m_Cal_PLL_Map, freq, &PostDiv, &Div))
+ return -EINVAL;
+
+ OscFreq = (u64)freq * (u64)Div;
+ /* CalDiv = u32( OscFreq * 16384 / 16000000 ); */
+ OscFreq *= (u64)16384;
+ do_div(OscFreq, (u64)16000000);
+ CalDiv = OscFreq;
+
+ state->m_Regs[CPD] = PostDiv;
+ state->m_Regs[CD1] = ((CalDiv >> 16) & 0xFF);
+ state->m_Regs[CD2] = ((CalDiv >> 8) & 0xFF);
+ state->m_Regs[CD3] = (CalDiv & 0xFF);
+
+ return UpdateRegs(state, CPD, CD3);
+}
+
+static int CalibrateRF(struct tda_state *state,
+ u8 RFBand, u32 freq, s32 *pCprog)
+{
+ int status = 0;
+ u8 Regs[NUM_REGS];
+ do {
+ u8 BP_Filter = 0;
+ u8 GainTaper = 0;
+ u8 RFC_K = 0;
+ u8 RFC_M = 0;
+
+ state->m_Regs[EP4] &= ~0x03; /* CAL_mode = 0 */
+ status = UpdateReg(state, EP4);
+ if (status < 0)
+ break;
+ state->m_Regs[EB18] |= 0x03; /* AGC1_Gain = 3 */
+ status = UpdateReg(state, EB18);
+ if (status < 0)
+ break;
+
+ /* Switching off LT (as datasheet says) causes calibration on C1 to fail */
+ /* (Readout of Cprog is allways 255) */
+ if (state->m_Regs[ID] != 0x83) /* C1: ID == 83, C2: ID == 84 */
+ state->m_Regs[EP3] |= 0x40; /* SM_LT = 1 */
+
+ if (!(SearchMap1(m_BP_Filter_Map, freq, &BP_Filter) &&
+ SearchMap1(m_GainTaper_Map, freq, &GainTaper) &&
+ SearchMap3(m_KM_Map, freq, &RFC_K, &RFC_M)))
+ return -EINVAL;
+
+ state->m_Regs[EP1] = (state->m_Regs[EP1] & ~0x07) | BP_Filter;
+ state->m_Regs[EP2] = (RFBand << 5) | GainTaper;
+
+ state->m_Regs[EB13] = (state->m_Regs[EB13] & ~0x7C) | (RFC_K << 4) | (RFC_M << 2);
+
+ status = UpdateRegs(state, EP1, EP3);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EB13);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB4] |= 0x20; /* LO_ForceSrce = 1 */
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB7] |= 0x20; /* CAL_ForceSrce = 1 */
+ status = UpdateReg(state, EB7);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB14] = 0; /* RFC_Cprog = 0 */
+ status = UpdateReg(state, EB14);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB20] &= ~0x20; /* ForceLock = 0; */
+ status = UpdateReg(state, EB20);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EP4] |= 0x03; /* CAL_Mode = 3 */
+ status = UpdateRegs(state, EP4, EP5);
+ if (status < 0)
+ break;
+
+ status = CalcCalPLL(state, freq);
+ if (status < 0)
+ break;
+ status = CalcMainPLL(state, freq + 1000000);
+ if (status < 0)
+ break;
+
+ msleep(5);
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB4] &= ~0x20; /* LO_ForceSrce = 0 */
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB7] &= ~0x20; /* CAL_ForceSrce = 0 */
+ status = UpdateReg(state, EB7);
+ if (status < 0)
+ break;
+ msleep(10);
+
+ state->m_Regs[EB20] |= 0x20; /* ForceLock = 1; */
+ status = UpdateReg(state, EB20);
+ if (status < 0)
+ break;
+ msleep(60);
+
+ state->m_Regs[EP4] &= ~0x03; /* CAL_Mode = 0 */
+ state->m_Regs[EP3] &= ~0x40; /* SM_LT = 0 */
+ state->m_Regs[EB18] &= ~0x03; /* AGC1_Gain = 0 */
+ status = UpdateReg(state, EB18);
+ if (status < 0)
+ break;
+ status = UpdateRegs(state, EP3, EP4);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+
+ status = ReadExtented(state, Regs);
+ if (status < 0)
+ break;
+
+ *pCprog = Regs[EB14];
+
+ } while (0);
+ return status;
+}
+
+static int RFTrackingFiltersInit(struct tda_state *state,
+ u8 RFBand)
+{
+ int status = 0;
+
+ u32 RF1 = m_RF_Band_Map[RFBand].m_RF1_Default;
+ u32 RF2 = m_RF_Band_Map[RFBand].m_RF2_Default;
+ u32 RF3 = m_RF_Band_Map[RFBand].m_RF3_Default;
+ bool bcal = false;
+
+ s32 Cprog_cal1 = 0;
+ s32 Cprog_table1 = 0;
+ s32 Cprog_cal2 = 0;
+ s32 Cprog_table2 = 0;
+ s32 Cprog_cal3 = 0;
+ s32 Cprog_table3 = 0;
+
+ state->m_RF_A1[RFBand] = 0;
+ state->m_RF_B1[RFBand] = 0;
+ state->m_RF_A2[RFBand] = 0;
+ state->m_RF_B2[RFBand] = 0;
+
+ do {
+ status = PowerScan(state, RFBand, RF1, &RF1, &bcal);
+ if (status < 0)
+ break;
+ if (bcal) {
+ status = CalibrateRF(state, RFBand, RF1, &Cprog_cal1);
+ if (status < 0)
+ break;
+ }
+ SearchMap2(m_RF_Cal_Map, RF1, &Cprog_table1);
+ if (!bcal)
+ Cprog_cal1 = Cprog_table1;
+ state->m_RF_B1[RFBand] = Cprog_cal1 - Cprog_table1;
+ /* state->m_RF_A1[RF_Band] = ???? */
+
+ if (RF2 == 0)
+ break;
+
+ status = PowerScan(state, RFBand, RF2, &RF2, &bcal);
+ if (status < 0)
+ break;
+ if (bcal) {
+ status = CalibrateRF(state, RFBand, RF2, &Cprog_cal2);
+ if (status < 0)
+ break;
+ }
+ SearchMap2(m_RF_Cal_Map, RF2, &Cprog_table2);
+ if (!bcal)
+ Cprog_cal2 = Cprog_table2;
+
+ state->m_RF_A1[RFBand] =
+ (Cprog_cal2 - Cprog_table2 - Cprog_cal1 + Cprog_table1) /
+ ((s32)(RF2) - (s32)(RF1));
+
+ if (RF3 == 0)
+ break;
+
+ status = PowerScan(state, RFBand, RF3, &RF3, &bcal);
+ if (status < 0)
+ break;
+ if (bcal) {
+ status = CalibrateRF(state, RFBand, RF3, &Cprog_cal3);
+ if (status < 0)
+ break;
+ }
+ SearchMap2(m_RF_Cal_Map, RF3, &Cprog_table3);
+ if (!bcal)
+ Cprog_cal3 = Cprog_table3;
+ state->m_RF_A2[RFBand] = (Cprog_cal3 - Cprog_table3 - Cprog_cal2 + Cprog_table2) / ((s32)(RF3) - (s32)(RF2));
+ state->m_RF_B2[RFBand] = Cprog_cal2 - Cprog_table2;
+
+ } while (0);
+
+ state->m_RF1[RFBand] = RF1;
+ state->m_RF2[RFBand] = RF2;
+ state->m_RF3[RFBand] = RF3;
+
+#if 0
+ printk(KERN_ERR "tda18271c2dd: %s %d RF1 = %d A1 = %d B1 = %d RF2 = %d A2 = %d B2 = %d RF3 = %d\n", __func__,
+ RFBand, RF1, state->m_RF_A1[RFBand], state->m_RF_B1[RFBand], RF2,
+ state->m_RF_A2[RFBand], state->m_RF_B2[RFBand], RF3);
+#endif
+
+ return status;
+}
+
+static int PowerScan(struct tda_state *state,
+ u8 RFBand, u32 RF_in, u32 *pRF_Out, bool *pbcal)
+{
+ int status = 0;
+ do {
+ u8 Gain_Taper = 0;
+ s32 RFC_Cprog = 0;
+ u8 CID_Target = 0;
+ u8 CountLimit = 0;
+ u32 freq_MainPLL;
+ u8 Regs[NUM_REGS];
+ u8 CID_Gain;
+ s32 Count = 0;
+ int sign = 1;
+ bool wait = false;
+
+ if (!(SearchMap2(m_RF_Cal_Map, RF_in, &RFC_Cprog) &&
+ SearchMap1(m_GainTaper_Map, RF_in, &Gain_Taper) &&
+ SearchMap3(m_CID_Target_Map, RF_in, &CID_Target, &CountLimit))) {
+
+ printk(KERN_ERR "tda18271c2dd: %s Search map failed\n", __func__);
+ return -EINVAL;
+ }
+
+ state->m_Regs[EP2] = (RFBand << 5) | Gain_Taper;
+ state->m_Regs[EB14] = (RFC_Cprog);
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EB14);
+ if (status < 0)
+ break;
+
+ freq_MainPLL = RF_in + 1000000;
+ status = CalcMainPLL(state, freq_MainPLL);
+ if (status < 0)
+ break;
+ msleep(5);
+ state->m_Regs[EP4] = (state->m_Regs[EP4] & ~0x03) | 1; /* CAL_mode = 1 */
+ status = UpdateReg(state, EP4);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP2); /* Launch power measurement */
+ if (status < 0)
+ break;
+ status = ReadExtented(state, Regs);
+ if (status < 0)
+ break;
+ CID_Gain = Regs[EB10] & 0x3F;
+ state->m_Regs[ID] = Regs[ID]; /* Chip version, (needed for C1 workarround in CalibrateRF) */
+
+ *pRF_Out = RF_in;
+
+ while (CID_Gain < CID_Target) {
+ freq_MainPLL = RF_in + sign * Count + 1000000;
+ status = CalcMainPLL(state, freq_MainPLL);
+ if (status < 0)
+ break;
+ msleep(wait ? 5 : 1);
+ wait = false;
+ status = UpdateReg(state, EP2); /* Launch power measurement */
+ if (status < 0)
+ break;
+ status = ReadExtented(state, Regs);
+ if (status < 0)
+ break;
+ CID_Gain = Regs[EB10] & 0x3F;
+ Count += 200000;
+
+ if (Count < CountLimit * 100000)
+ continue;
+ if (sign < 0)
+ break;
+
+ sign = -sign;
+ Count = 200000;
+ wait = true;
+ }
+ status = status;
+ if (status < 0)
+ break;
+ if (CID_Gain >= CID_Target) {
+ *pbcal = true;
+ *pRF_Out = freq_MainPLL - 1000000;
+ } else
+ *pbcal = false;
+ } while (0);
+
+ return status;
+}
+
+static int PowerScanInit(struct tda_state *state)
+{
+ int status = 0;
+ do {
+ state->m_Regs[EP3] = (state->m_Regs[EP3] & ~0x1F) | 0x12;
+ state->m_Regs[EP4] = (state->m_Regs[EP4] & ~0x1F); /* If level = 0, Cal mode = 0 */
+ status = UpdateRegs(state, EP3, EP4);
+ if (status < 0)
+ break;
+ state->m_Regs[EB18] = (state->m_Regs[EB18] & ~0x03); /* AGC 1 Gain = 0 */
+ status = UpdateReg(state, EB18);
+ if (status < 0)
+ break;
+ state->m_Regs[EB21] = (state->m_Regs[EB21] & ~0x03); /* AGC 2 Gain = 0 (Datasheet = 3) */
+ state->m_Regs[EB23] = (state->m_Regs[EB23] | 0x06); /* ForceLP_Fc2_En = 1, LPFc[2] = 1 */
+ status = UpdateRegs(state, EB21, EB23);
+ if (status < 0)
+ break;
+ } while (0);
+ return status;
+}
+
+static int CalcRFFilterCurve(struct tda_state *state)
+{
+ int status = 0;
+ do {
+ msleep(200); /* Temperature stabilisation */
+ status = PowerScanInit(state);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 0);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 1);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 2);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 3);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 4);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 5);
+ if (status < 0)
+ break;
+ status = RFTrackingFiltersInit(state, 6);
+ if (status < 0)
+ break;
+ status = ThermometerRead(state, &state->m_TMValue_RFCal); /* also switches off Cal mode !!! */
+ if (status < 0)
+ break;
+ } while (0);
+
+ return status;
+}
+
+static int FixedContentsI2CUpdate(struct tda_state *state)
+{
+ static u8 InitRegs[] = {
+ 0x08, 0x80, 0xC6,
+ 0xDF, 0x16, 0x60, 0x80,
+ 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xFC, 0x01, 0x84, 0x41,
+ 0x01, 0x84, 0x40, 0x07,
+ 0x00, 0x00, 0x96, 0x3F,
+ 0xC1, 0x00, 0x8F, 0x00,
+ 0x00, 0x8C, 0x00, 0x20,
+ 0xB3, 0x48, 0xB0,
+ };
+ int status = 0;
+ memcpy(&state->m_Regs[TM], InitRegs, EB23 - TM + 1);
+ do {
+ status = UpdateRegs(state, TM, EB23);
+ if (status < 0)
+ break;
+
+ /* AGC1 gain setup */
+ state->m_Regs[EB17] = 0x00;
+ status = UpdateReg(state, EB17);
+ if (status < 0)
+ break;
+ state->m_Regs[EB17] = 0x03;
+ status = UpdateReg(state, EB17);
+ if (status < 0)
+ break;
+ state->m_Regs[EB17] = 0x43;
+ status = UpdateReg(state, EB17);
+ if (status < 0)
+ break;
+ state->m_Regs[EB17] = 0x4C;
+ status = UpdateReg(state, EB17);
+ if (status < 0)
+ break;
+
+ /* IRC Cal Low band */
+ state->m_Regs[EP3] = 0x1F;
+ state->m_Regs[EP4] = 0x66;
+ state->m_Regs[EP5] = 0x81;
+ state->m_Regs[CPD] = 0xCC;
+ state->m_Regs[CD1] = 0x6C;
+ state->m_Regs[CD2] = 0x00;
+ state->m_Regs[CD3] = 0x00;
+ state->m_Regs[MPD] = 0xC5;
+ state->m_Regs[MD1] = 0x77;
+ state->m_Regs[MD2] = 0x08;
+ state->m_Regs[MD3] = 0x00;
+ status = UpdateRegs(state, EP2, MD3); /* diff between sw and datasheet (ep3-md3) */
+ if (status < 0)
+ break;
+
+#if 0
+ state->m_Regs[EB4] = 0x61; /* missing in sw */
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+ msleep(1);
+ state->m_Regs[EB4] = 0x41;
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+#endif
+
+ msleep(5);
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+ msleep(5);
+
+ state->m_Regs[EP5] = 0x85;
+ state->m_Regs[CPD] = 0xCB;
+ state->m_Regs[CD1] = 0x66;
+ state->m_Regs[CD2] = 0x70;
+ status = UpdateRegs(state, EP3, CD3);
+ if (status < 0)
+ break;
+ msleep(5);
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ msleep(30);
+
+ /* IRC Cal mid band */
+ state->m_Regs[EP5] = 0x82;
+ state->m_Regs[CPD] = 0xA8;
+ state->m_Regs[CD2] = 0x00;
+ state->m_Regs[MPD] = 0xA1; /* Datasheet = 0xA9 */
+ state->m_Regs[MD1] = 0x73;
+ state->m_Regs[MD2] = 0x1A;
+ status = UpdateRegs(state, EP3, MD3);
+ if (status < 0)
+ break;
+
+ msleep(5);
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+ msleep(5);
+
+ state->m_Regs[EP5] = 0x86;
+ state->m_Regs[CPD] = 0xA8;
+ state->m_Regs[CD1] = 0x66;
+ state->m_Regs[CD2] = 0xA0;
+ status = UpdateRegs(state, EP3, CD3);
+ if (status < 0)
+ break;
+ msleep(5);
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ msleep(30);
+
+ /* IRC Cal high band */
+ state->m_Regs[EP5] = 0x83;
+ state->m_Regs[CPD] = 0x98;
+ state->m_Regs[CD1] = 0x65;
+ state->m_Regs[CD2] = 0x00;
+ state->m_Regs[MPD] = 0x91; /* Datasheet = 0x91 */
+ state->m_Regs[MD1] = 0x71;
+ state->m_Regs[MD2] = 0xCD;
+ status = UpdateRegs(state, EP3, MD3);
+ if (status < 0)
+ break;
+ msleep(5);
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+ msleep(5);
+ state->m_Regs[EP5] = 0x87;
+ state->m_Regs[CD1] = 0x65;
+ state->m_Regs[CD2] = 0x50;
+ status = UpdateRegs(state, EP3, CD3);
+ if (status < 0)
+ break;
+ msleep(5);
+ status = UpdateReg(state, EP2);
+ if (status < 0)
+ break;
+ msleep(30);
+
+ /* Back to normal */
+ state->m_Regs[EP4] = 0x64;
+ status = UpdateReg(state, EP4);
+ if (status < 0)
+ break;
+ status = UpdateReg(state, EP1);
+ if (status < 0)
+ break;
+
+ } while (0);
+ return status;
+}
+
+static int InitCal(struct tda_state *state)
+{
+ int status = 0;
+
+ do {
+ status = FixedContentsI2CUpdate(state);
+ if (status < 0)
+ break;
+ status = CalcRFFilterCurve(state);
+ if (status < 0)
+ break;
+ status = StandBy(state);
+ if (status < 0)
+ break;
+ /* m_bInitDone = true; */
+ } while (0);
+ return status;
+};
+
+static int RFTrackingFiltersCorrection(struct tda_state *state,
+ u32 Frequency)
+{
+ int status = 0;
+ s32 Cprog_table;
+ u8 RFBand;
+ u8 dCoverdT;
+
+ if (!SearchMap2(m_RF_Cal_Map, Frequency, &Cprog_table) ||
+ !SearchMap4(m_RF_Band_Map, Frequency, &RFBand) ||
+ !SearchMap1(m_RF_Cal_DC_Over_DT_Map, Frequency, &dCoverdT))
+
+ return -EINVAL;
+
+ do {
+ u8 TMValue_Current;
+ u32 RF1 = state->m_RF1[RFBand];
+ u32 RF2 = state->m_RF1[RFBand];
+ u32 RF3 = state->m_RF1[RFBand];
+ s32 RF_A1 = state->m_RF_A1[RFBand];
+ s32 RF_B1 = state->m_RF_B1[RFBand];
+ s32 RF_A2 = state->m_RF_A2[RFBand];
+ s32 RF_B2 = state->m_RF_B2[RFBand];
+ s32 Capprox = 0;
+ int TComp;
+
+ state->m_Regs[EP3] &= ~0xE0; /* Power up */
+ status = UpdateReg(state, EP3);
+ if (status < 0)
+ break;
+
+ status = ThermometerRead(state, &TMValue_Current);
+ if (status < 0)
+ break;
+
+ if (RF3 == 0 || Frequency < RF2)
+ Capprox = RF_A1 * ((s32)(Frequency) - (s32)(RF1)) + RF_B1 + Cprog_table;
+ else
+ Capprox = RF_A2 * ((s32)(Frequency) - (s32)(RF2)) + RF_B2 + Cprog_table;
+
+ TComp = (int)(dCoverdT) * ((int)(TMValue_Current) - (int)(state->m_TMValue_RFCal))/1000;
+
+ Capprox += TComp;
+
+ if (Capprox < 0)
+ Capprox = 0;
+ else if (Capprox > 255)
+ Capprox = 255;
+
+
+ /* TODO Temperature compensation. There is defenitely a scale factor */
+ /* missing in the datasheet, so leave it out for now. */
+ state->m_Regs[EB14] = Capprox;
+
+ status = UpdateReg(state, EB14);
+ if (status < 0)
+ break;
+
+ } while (0);
+ return status;
+}
+
+static int ChannelConfiguration(struct tda_state *state,
+ u32 Frequency, int Standard)
+{
+
+ s32 IntermediateFrequency = m_StandardTable[Standard].m_IFFrequency;
+ int status = 0;
+
+ u8 BP_Filter = 0;
+ u8 RF_Band = 0;
+ u8 GainTaper = 0;
+ u8 IR_Meas = 0;
+
+ state->IF = IntermediateFrequency;
+ /* printk("tda18271c2dd: %s Freq = %d Standard = %d IF = %d\n", __func__, Frequency, Standard, IntermediateFrequency); */
+ /* get values from tables */
+
+ if (!(SearchMap1(m_BP_Filter_Map, Frequency, &BP_Filter) &&
+ SearchMap1(m_GainTaper_Map, Frequency, &GainTaper) &&
+ SearchMap1(m_IR_Meas_Map, Frequency, &IR_Meas) &&
+ SearchMap4(m_RF_Band_Map, Frequency, &RF_Band))) {
+
+ printk(KERN_ERR "tda18271c2dd: %s SearchMap failed\n", __func__);
+ return -EINVAL;
+ }
+
+ do {
+ state->m_Regs[EP3] = (state->m_Regs[EP3] & ~0x1F) | m_StandardTable[Standard].m_EP3_4_0;
+ state->m_Regs[EP3] &= ~0x04; /* switch RFAGC to high speed mode */
+
+ /* m_EP4 default for XToutOn, CAL_Mode (0) */
+ state->m_Regs[EP4] = state->m_EP4 | ((Standard > HF_AnalogMax) ? state->m_IFLevelDigital : state->m_IFLevelAnalog);
+ /* state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDigital; */
+ if (Standard <= HF_AnalogMax)
+ state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelAnalog;
+ else if (Standard <= HF_ATSC)
+ state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDVBT;
+ else if (Standard <= HF_DVBC)
+ state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDVBC;
+ else
+ state->m_Regs[EP4] = state->m_EP4 | state->m_IFLevelDigital;
+
+ if ((Standard == HF_FM_Radio) && state->m_bFMInput)
+ state->m_Regs[EP4] |= 80;
+
+ state->m_Regs[MPD] &= ~0x80;
+ if (Standard > HF_AnalogMax)
+ state->m_Regs[MPD] |= 0x80; /* Add IF_notch for digital */
+
+ state->m_Regs[EB22] = m_StandardTable[Standard].m_EB22;
+
+ /* Note: This is missing from flowchart in TDA18271 specification ( 1.5 MHz cutoff for FM ) */
+ if (Standard == HF_FM_Radio)
+ state->m_Regs[EB23] |= 0x06; /* ForceLP_Fc2_En = 1, LPFc[2] = 1 */
+ else
+ state->m_Regs[EB23] &= ~0x06; /* ForceLP_Fc2_En = 0, LPFc[2] = 0 */
+
+ status = UpdateRegs(state, EB22, EB23);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EP1] = (state->m_Regs[EP1] & ~0x07) | 0x40 | BP_Filter; /* Dis_Power_level = 1, Filter */
+ state->m_Regs[EP5] = (state->m_Regs[EP5] & ~0x07) | IR_Meas;
+ state->m_Regs[EP2] = (RF_Band << 5) | GainTaper;
+
+ state->m_Regs[EB1] = (state->m_Regs[EB1] & ~0x07) |
+ (state->m_bMaster ? 0x04 : 0x00); /* CALVCO_FortLOn = MS */
+ /* AGC1_always_master = 0 */
+ /* AGC_firstn = 0 */
+ status = UpdateReg(state, EB1);
+ if (status < 0)
+ break;
+
+ if (state->m_bMaster) {
+ status = CalcMainPLL(state, Frequency + IntermediateFrequency);
+ if (status < 0)
+ break;
+ status = UpdateRegs(state, TM, EP5);
+ if (status < 0)
+ break;
+ state->m_Regs[EB4] |= 0x20; /* LO_forceSrce = 1 */
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+ msleep(1);
+ state->m_Regs[EB4] &= ~0x20; /* LO_forceSrce = 0 */
+ status = UpdateReg(state, EB4);
+ if (status < 0)
+ break;
+ } else {
+ u8 PostDiv = 0;
+ u8 Div;
+ status = CalcCalPLL(state, Frequency + IntermediateFrequency);
+ if (status < 0)
+ break;
+
+ SearchMap3(m_Cal_PLL_Map, Frequency + IntermediateFrequency, &PostDiv, &Div);
+ state->m_Regs[MPD] = (state->m_Regs[MPD] & ~0x7F) | (PostDiv & 0x77);
+ status = UpdateReg(state, MPD);
+ if (status < 0)
+ break;
+ status = UpdateRegs(state, TM, EP5);
+ if (status < 0)
+ break;
+
+ state->m_Regs[EB7] |= 0x20; /* CAL_forceSrce = 1 */
+ status = UpdateReg(state, EB7);
+ if (status < 0)
+ break;
+ msleep(1);
+ state->m_Regs[EB7] &= ~0x20; /* CAL_forceSrce = 0 */
+ status = UpdateReg(state, EB7);
+ if (status < 0)
+ break;
+ }
+ msleep(20);
+ if (Standard != HF_FM_Radio)
+ state->m_Regs[EP3] |= 0x04; /* RFAGC to normal mode */
+ status = UpdateReg(state, EP3);
+ if (status < 0)
+ break;
+
+ } while (0);
+ return status;
+}
+
+static int sleep(struct dvb_frontend *fe)
+{
+ struct tda_state *state = fe->tuner_priv;
+
+ StandBy(state);
+ return 0;
+}
+
+static int init(struct dvb_frontend *fe)
+{
+ return 0;
+}
+
+static int release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+ return 0;
+}
+
+/*
+ * As defined on EN 300 429 Annex A and on ITU-T J.83 annex A, the DVB-C
+ * roll-off factor is 0.15.
+ * According with the specs, the amount of the needed bandwith is given by:
+ * Bw = Symbol_rate * (1 + 0.15)
+ * As such, the maximum symbol rate supported by 6 MHz is
+ * max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds
+ *NOTE: For ITU-T J.83 Annex C, the roll-off factor is 0.13. So:
+ * max_symbol_rate = 6 MHz / 1.13 = 5309735 Baud
+ * That means that an adjustment is needed for Japan,
+ * but, as currently DRX-K is hardcoded to Annex A, let's stick
+ * with 0.15 roll-off factor.
+ */
+#define MAX_SYMBOL_RATE_6MHz 5217391
+
+static int set_params(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct tda_state *state = fe->tuner_priv;
+ int status = 0;
+ int Standard;
+
+ state->m_Frequency = params->frequency;
+
+ if (fe->ops.info.type == FE_OFDM)
+ switch (params->u.ofdm.bandwidth) {
+ case BANDWIDTH_6_MHZ:
+ Standard = HF_DVBT_6MHZ;
+ break;
+ case BANDWIDTH_7_MHZ:
+ Standard = HF_DVBT_7MHZ;
+ break;
+ default:
+ case BANDWIDTH_8_MHZ:
+ Standard = HF_DVBT_8MHZ;
+ break;
+ }
+ else if (fe->ops.info.type == FE_QAM) {
+ if (params->u.qam.symbol_rate <= MAX_SYMBOL_RATE_6MHz)
+ Standard = HF_DVBC_6MHZ;
+ else
+ Standard = HF_DVBC_8MHZ;
+ } else
+ return -EINVAL;
+ do {
+ status = RFTrackingFiltersCorrection(state, params->frequency);
+ if (status < 0)
+ break;
+ status = ChannelConfiguration(state, params->frequency, Standard);
+ if (status < 0)
+ break;
+
+ msleep(state->m_SettlingTime); /* Allow AGC's to settle down */
+ } while (0);
+ return status;
+}
+
+#if 0
+static int GetSignalStrength(s32 *pSignalStrength, u32 RFAgc, u32 IFAgc)
+{
+ if (IFAgc < 500) {
+ /* Scale this from 0 to 50000 */
+ *pSignalStrength = IFAgc * 100;
+ } else {
+ /* Scale range 500-1500 to 50000-80000 */
+ *pSignalStrength = 50000 + (IFAgc - 500) * 30;
+ }
+
+ return 0;
+}
+#endif
+
+static int get_frequency(struct dvb_frontend *fe, u32 *frequency)
+{
+ struct tda_state *state = fe->tuner_priv;
+
+ *frequency = state->IF;
+ return 0;
+}
+
+static int get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
+{
+ /* struct tda_state *state = fe->tuner_priv; */
+ /* *bandwidth = priv->bandwidth; */
+ return 0;
+}
+
+
+static struct dvb_tuner_ops tuner_ops = {
+ .info = {
+ .name = "NXP TDA18271C2D",
+ .frequency_min = 47125000,
+ .frequency_max = 865000000,
+ .frequency_step = 62500
+ },
+ .init = init,
+ .sleep = sleep,
+ .set_params = set_params,
+ .release = release,
+ .get_frequency = get_frequency,
+ .get_bandwidth = get_bandwidth,
+};
+
+struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 adr)
+{
+ struct tda_state *state;
+
+ state = kzalloc(sizeof(struct tda_state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ fe->tuner_priv = state;
+ state->adr = adr;
+ state->i2c = i2c;
+ memcpy(&fe->ops.tuner_ops, &tuner_ops, sizeof(struct dvb_tuner_ops));
+ reset(state);
+ InitCal(state);
+
+ return fe;
+}
+EXPORT_SYMBOL_GPL(tda18271c2dd_attach);
+
+MODULE_DESCRIPTION("TDA18271C2 driver");
+MODULE_AUTHOR("DD");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/tda18271c2dd.h b/drivers/media/dvb/frontends/tda18271c2dd.h
new file mode 100644
index 00000000000..1389c74e12c
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda18271c2dd.h
@@ -0,0 +1,16 @@
+#ifndef _TDA18271C2DD_H_
+#define _TDA18271C2DD_H_
+#if defined(CONFIG_DVB_TDA18271C2DD) || (defined(CONFIG_DVB_TDA18271C2DD_MODULE) \
+ && defined(MODULE))
+struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 adr);
+#else
+static inline struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 adr)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/drivers/media/dvb/frontends/tda18271c2dd_maps.h b/drivers/media/dvb/frontends/tda18271c2dd_maps.h
new file mode 100644
index 00000000000..b87661b9df1
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda18271c2dd_maps.h
@@ -0,0 +1,814 @@
+enum HF_S {
+ HF_None = 0, HF_B, HF_DK, HF_G, HF_I, HF_L, HF_L1, HF_MN, HF_FM_Radio,
+ HF_AnalogMax, HF_DVBT_6MHZ, HF_DVBT_7MHZ, HF_DVBT_8MHZ,
+ HF_DVBT, HF_ATSC, HF_DVBC_6MHZ, HF_DVBC_7MHZ,
+ HF_DVBC_8MHZ, HF_DVBC
+};
+
+struct SStandardParam m_StandardTable[] = {
+ { 0, 0, 0x00, 0x00 }, /* HF_None */
+ { 6000000, 7000000, 0x1D, 0x2C }, /* HF_B, */
+ { 6900000, 8000000, 0x1E, 0x2C }, /* HF_DK, */
+ { 7100000, 8000000, 0x1E, 0x2C }, /* HF_G, */
+ { 7250000, 8000000, 0x1E, 0x2C }, /* HF_I, */
+ { 6900000, 8000000, 0x1E, 0x2C }, /* HF_L, */
+ { 1250000, 8000000, 0x1E, 0x2C }, /* HF_L1, */
+ { 5400000, 6000000, 0x1C, 0x2C }, /* HF_MN, */
+ { 1250000, 500000, 0x18, 0x2C }, /* HF_FM_Radio, */
+ { 0, 0, 0x00, 0x00 }, /* HF_AnalogMax (Unused) */
+ { 3300000, 6000000, 0x1C, 0x58 }, /* HF_DVBT_6MHZ */
+ { 3500000, 7000000, 0x1C, 0x37 }, /* HF_DVBT_7MHZ */
+ { 4000000, 8000000, 0x1D, 0x37 }, /* HF_DVBT_8MHZ */
+ { 0, 0, 0x00, 0x00 }, /* HF_DVBT (Unused) */
+ { 5000000, 6000000, 0x1C, 0x37 }, /* HF_ATSC (center = 3.25 MHz) */
+ { 4000000, 6000000, 0x1D, 0x58 }, /* HF_DVBC_6MHZ (Chicago) */
+ { 4500000, 7000000, 0x1E, 0x37 }, /* HF_DVBC_7MHZ (not documented by NXP) */
+ { 5000000, 8000000, 0x1F, 0x37 }, /* HF_DVBC_8MHZ */
+ { 0, 0, 0x00, 0x00 }, /* HF_DVBC (Unused) */
+};
+
+struct SMap m_BP_Filter_Map[] = {
+ { 62000000, 0x00 },
+ { 84000000, 0x01 },
+ { 100000000, 0x02 },
+ { 140000000, 0x03 },
+ { 170000000, 0x04 },
+ { 180000000, 0x05 },
+ { 865000000, 0x06 },
+ { 0, 0x00 }, /* Table End */
+};
+
+static struct SMapI m_RF_Cal_Map[] = {
+ { 41000000, 0x0F },
+ { 43000000, 0x1C },
+ { 45000000, 0x2F },
+ { 46000000, 0x39 },
+ { 47000000, 0x40 },
+ { 47900000, 0x50 },
+ { 49100000, 0x16 },
+ { 50000000, 0x18 },
+ { 51000000, 0x20 },
+ { 53000000, 0x28 },
+ { 55000000, 0x2B },
+ { 56000000, 0x32 },
+ { 57000000, 0x35 },
+ { 58000000, 0x3E },
+ { 59000000, 0x43 },
+ { 60000000, 0x4E },
+ { 61100000, 0x55 },
+ { 63000000, 0x0F },
+ { 64000000, 0x11 },
+ { 65000000, 0x12 },
+ { 66000000, 0x15 },
+ { 67000000, 0x16 },
+ { 68000000, 0x17 },
+ { 70000000, 0x19 },
+ { 71000000, 0x1C },
+ { 72000000, 0x1D },
+ { 73000000, 0x1F },
+ { 74000000, 0x20 },
+ { 75000000, 0x21 },
+ { 76000000, 0x24 },
+ { 77000000, 0x25 },
+ { 78000000, 0x27 },
+ { 80000000, 0x28 },
+ { 81000000, 0x29 },
+ { 82000000, 0x2D },
+ { 83000000, 0x2E },
+ { 84000000, 0x2F },
+ { 85000000, 0x31 },
+ { 86000000, 0x33 },
+ { 87000000, 0x34 },
+ { 88000000, 0x35 },
+ { 89000000, 0x37 },
+ { 90000000, 0x38 },
+ { 91000000, 0x39 },
+ { 93000000, 0x3C },
+ { 94000000, 0x3E },
+ { 95000000, 0x3F },
+ { 96000000, 0x40 },
+ { 97000000, 0x42 },
+ { 99000000, 0x45 },
+ { 100000000, 0x46 },
+ { 102000000, 0x48 },
+ { 103000000, 0x4A },
+ { 105000000, 0x4D },
+ { 106000000, 0x4E },
+ { 107000000, 0x50 },
+ { 108000000, 0x51 },
+ { 110000000, 0x54 },
+ { 111000000, 0x56 },
+ { 112000000, 0x57 },
+ { 113000000, 0x58 },
+ { 114000000, 0x59 },
+ { 115000000, 0x5C },
+ { 116000000, 0x5D },
+ { 117000000, 0x5F },
+ { 119000000, 0x60 },
+ { 120000000, 0x64 },
+ { 121000000, 0x65 },
+ { 122000000, 0x66 },
+ { 123000000, 0x68 },
+ { 124000000, 0x69 },
+ { 125000000, 0x6C },
+ { 126000000, 0x6D },
+ { 127000000, 0x6E },
+ { 128000000, 0x70 },
+ { 129000000, 0x71 },
+ { 130000000, 0x75 },
+ { 131000000, 0x77 },
+ { 132000000, 0x78 },
+ { 133000000, 0x7B },
+ { 134000000, 0x7E },
+ { 135000000, 0x81 },
+ { 136000000, 0x82 },
+ { 137000000, 0x87 },
+ { 138000000, 0x88 },
+ { 139000000, 0x8D },
+ { 140000000, 0x8E },
+ { 141000000, 0x91 },
+ { 142000000, 0x95 },
+ { 143000000, 0x9A },
+ { 144000000, 0x9D },
+ { 145000000, 0xA1 },
+ { 146000000, 0xA2 },
+ { 147000000, 0xA4 },
+ { 148000000, 0xA9 },
+ { 149000000, 0xAE },
+ { 150000000, 0xB0 },
+ { 151000000, 0xB1 },
+ { 152000000, 0xB7 },
+ { 152600000, 0xBD },
+ { 154000000, 0x20 },
+ { 155000000, 0x22 },
+ { 156000000, 0x24 },
+ { 157000000, 0x25 },
+ { 158000000, 0x27 },
+ { 159000000, 0x29 },
+ { 160000000, 0x2C },
+ { 161000000, 0x2D },
+ { 163000000, 0x2E },
+ { 164000000, 0x2F },
+ { 164700000, 0x30 },
+ { 166000000, 0x11 },
+ { 167000000, 0x12 },
+ { 168000000, 0x13 },
+ { 169000000, 0x14 },
+ { 170000000, 0x15 },
+ { 172000000, 0x16 },
+ { 173000000, 0x17 },
+ { 174000000, 0x18 },
+ { 175000000, 0x1A },
+ { 176000000, 0x1B },
+ { 178000000, 0x1D },
+ { 179000000, 0x1E },
+ { 180000000, 0x1F },
+ { 181000000, 0x20 },
+ { 182000000, 0x21 },
+ { 183000000, 0x22 },
+ { 184000000, 0x24 },
+ { 185000000, 0x25 },
+ { 186000000, 0x26 },
+ { 187000000, 0x27 },
+ { 188000000, 0x29 },
+ { 189000000, 0x2A },
+ { 190000000, 0x2C },
+ { 191000000, 0x2D },
+ { 192000000, 0x2E },
+ { 193000000, 0x2F },
+ { 194000000, 0x30 },
+ { 195000000, 0x33 },
+ { 196000000, 0x35 },
+ { 198000000, 0x36 },
+ { 200000000, 0x38 },
+ { 201000000, 0x3C },
+ { 202000000, 0x3D },
+ { 203500000, 0x3E },
+ { 206000000, 0x0E },
+ { 208000000, 0x0F },
+ { 212000000, 0x10 },
+ { 216000000, 0x11 },
+ { 217000000, 0x12 },
+ { 218000000, 0x13 },
+ { 220000000, 0x14 },
+ { 222000000, 0x15 },
+ { 225000000, 0x16 },
+ { 228000000, 0x17 },
+ { 231000000, 0x18 },
+ { 234000000, 0x19 },
+ { 235000000, 0x1A },
+ { 236000000, 0x1B },
+ { 237000000, 0x1C },
+ { 240000000, 0x1D },
+ { 242000000, 0x1E },
+ { 244000000, 0x1F },
+ { 247000000, 0x20 },
+ { 249000000, 0x21 },
+ { 252000000, 0x22 },
+ { 253000000, 0x23 },
+ { 254000000, 0x24 },
+ { 256000000, 0x25 },
+ { 259000000, 0x26 },
+ { 262000000, 0x27 },
+ { 264000000, 0x28 },
+ { 267000000, 0x29 },
+ { 269000000, 0x2A },
+ { 271000000, 0x2B },
+ { 273000000, 0x2C },
+ { 275000000, 0x2D },
+ { 277000000, 0x2E },
+ { 279000000, 0x2F },
+ { 282000000, 0x30 },
+ { 284000000, 0x31 },
+ { 286000000, 0x32 },
+ { 287000000, 0x33 },
+ { 290000000, 0x34 },
+ { 293000000, 0x35 },
+ { 295000000, 0x36 },
+ { 297000000, 0x37 },
+ { 300000000, 0x38 },
+ { 303000000, 0x39 },
+ { 305000000, 0x3A },
+ { 306000000, 0x3B },
+ { 307000000, 0x3C },
+ { 310000000, 0x3D },
+ { 312000000, 0x3E },
+ { 315000000, 0x3F },
+ { 318000000, 0x40 },
+ { 320000000, 0x41 },
+ { 323000000, 0x42 },
+ { 324000000, 0x43 },
+ { 325000000, 0x44 },
+ { 327000000, 0x45 },
+ { 331000000, 0x46 },
+ { 334000000, 0x47 },
+ { 337000000, 0x48 },
+ { 339000000, 0x49 },
+ { 340000000, 0x4A },
+ { 341000000, 0x4B },
+ { 343000000, 0x4C },
+ { 345000000, 0x4D },
+ { 349000000, 0x4E },
+ { 352000000, 0x4F },
+ { 353000000, 0x50 },
+ { 355000000, 0x51 },
+ { 357000000, 0x52 },
+ { 359000000, 0x53 },
+ { 361000000, 0x54 },
+ { 362000000, 0x55 },
+ { 364000000, 0x56 },
+ { 368000000, 0x57 },
+ { 370000000, 0x58 },
+ { 372000000, 0x59 },
+ { 375000000, 0x5A },
+ { 376000000, 0x5B },
+ { 377000000, 0x5C },
+ { 379000000, 0x5D },
+ { 382000000, 0x5E },
+ { 384000000, 0x5F },
+ { 385000000, 0x60 },
+ { 386000000, 0x61 },
+ { 388000000, 0x62 },
+ { 390000000, 0x63 },
+ { 393000000, 0x64 },
+ { 394000000, 0x65 },
+ { 396000000, 0x66 },
+ { 397000000, 0x67 },
+ { 398000000, 0x68 },
+ { 400000000, 0x69 },
+ { 402000000, 0x6A },
+ { 403000000, 0x6B },
+ { 407000000, 0x6C },
+ { 408000000, 0x6D },
+ { 409000000, 0x6E },
+ { 410000000, 0x6F },
+ { 411000000, 0x70 },
+ { 412000000, 0x71 },
+ { 413000000, 0x72 },
+ { 414000000, 0x73 },
+ { 417000000, 0x74 },
+ { 418000000, 0x75 },
+ { 420000000, 0x76 },
+ { 422000000, 0x77 },
+ { 423000000, 0x78 },
+ { 424000000, 0x79 },
+ { 427000000, 0x7A },
+ { 428000000, 0x7B },
+ { 429000000, 0x7D },
+ { 432000000, 0x7F },
+ { 434000000, 0x80 },
+ { 435000000, 0x81 },
+ { 436000000, 0x83 },
+ { 437000000, 0x84 },
+ { 438000000, 0x85 },
+ { 439000000, 0x86 },
+ { 440000000, 0x87 },
+ { 441000000, 0x88 },
+ { 442000000, 0x89 },
+ { 445000000, 0x8A },
+ { 446000000, 0x8B },
+ { 447000000, 0x8C },
+ { 448000000, 0x8E },
+ { 449000000, 0x8F },
+ { 450000000, 0x90 },
+ { 452000000, 0x91 },
+ { 453000000, 0x93 },
+ { 454000000, 0x94 },
+ { 456000000, 0x96 },
+ { 457800000, 0x98 },
+ { 461000000, 0x11 },
+ { 468000000, 0x12 },
+ { 472000000, 0x13 },
+ { 473000000, 0x14 },
+ { 474000000, 0x15 },
+ { 481000000, 0x16 },
+ { 486000000, 0x17 },
+ { 491000000, 0x18 },
+ { 498000000, 0x19 },
+ { 499000000, 0x1A },
+ { 501000000, 0x1B },
+ { 506000000, 0x1C },
+ { 511000000, 0x1D },
+ { 516000000, 0x1E },
+ { 520000000, 0x1F },
+ { 521000000, 0x20 },
+ { 525000000, 0x21 },
+ { 529000000, 0x22 },
+ { 533000000, 0x23 },
+ { 539000000, 0x24 },
+ { 541000000, 0x25 },
+ { 547000000, 0x26 },
+ { 549000000, 0x27 },
+ { 551000000, 0x28 },
+ { 556000000, 0x29 },
+ { 561000000, 0x2A },
+ { 563000000, 0x2B },
+ { 565000000, 0x2C },
+ { 569000000, 0x2D },
+ { 571000000, 0x2E },
+ { 577000000, 0x2F },
+ { 580000000, 0x30 },
+ { 582000000, 0x31 },
+ { 584000000, 0x32 },
+ { 588000000, 0x33 },
+ { 591000000, 0x34 },
+ { 596000000, 0x35 },
+ { 598000000, 0x36 },
+ { 603000000, 0x37 },
+ { 604000000, 0x38 },
+ { 606000000, 0x39 },
+ { 612000000, 0x3A },
+ { 615000000, 0x3B },
+ { 617000000, 0x3C },
+ { 621000000, 0x3D },
+ { 622000000, 0x3E },
+ { 625000000, 0x3F },
+ { 632000000, 0x40 },
+ { 633000000, 0x41 },
+ { 634000000, 0x42 },
+ { 642000000, 0x43 },
+ { 643000000, 0x44 },
+ { 647000000, 0x45 },
+ { 650000000, 0x46 },
+ { 652000000, 0x47 },
+ { 657000000, 0x48 },
+ { 661000000, 0x49 },
+ { 662000000, 0x4A },
+ { 665000000, 0x4B },
+ { 667000000, 0x4C },
+ { 670000000, 0x4D },
+ { 673000000, 0x4E },
+ { 676000000, 0x4F },
+ { 677000000, 0x50 },
+ { 681000000, 0x51 },
+ { 683000000, 0x52 },
+ { 686000000, 0x53 },
+ { 688000000, 0x54 },
+ { 689000000, 0x55 },
+ { 691000000, 0x56 },
+ { 695000000, 0x57 },
+ { 698000000, 0x58 },
+ { 703000000, 0x59 },
+ { 704000000, 0x5A },
+ { 705000000, 0x5B },
+ { 707000000, 0x5C },
+ { 710000000, 0x5D },
+ { 712000000, 0x5E },
+ { 717000000, 0x5F },
+ { 718000000, 0x60 },
+ { 721000000, 0x61 },
+ { 722000000, 0x62 },
+ { 723000000, 0x63 },
+ { 725000000, 0x64 },
+ { 727000000, 0x65 },
+ { 730000000, 0x66 },
+ { 732000000, 0x67 },
+ { 735000000, 0x68 },
+ { 740000000, 0x69 },
+ { 741000000, 0x6A },
+ { 742000000, 0x6B },
+ { 743000000, 0x6C },
+ { 745000000, 0x6D },
+ { 747000000, 0x6E },
+ { 748000000, 0x6F },
+ { 750000000, 0x70 },
+ { 752000000, 0x71 },
+ { 754000000, 0x72 },
+ { 757000000, 0x73 },
+ { 758000000, 0x74 },
+ { 760000000, 0x75 },
+ { 763000000, 0x76 },
+ { 764000000, 0x77 },
+ { 766000000, 0x78 },
+ { 767000000, 0x79 },
+ { 768000000, 0x7A },
+ { 773000000, 0x7B },
+ { 774000000, 0x7C },
+ { 776000000, 0x7D },
+ { 777000000, 0x7E },
+ { 778000000, 0x7F },
+ { 779000000, 0x80 },
+ { 781000000, 0x81 },
+ { 783000000, 0x82 },
+ { 784000000, 0x83 },
+ { 785000000, 0x84 },
+ { 786000000, 0x85 },
+ { 793000000, 0x86 },
+ { 794000000, 0x87 },
+ { 795000000, 0x88 },
+ { 797000000, 0x89 },
+ { 799000000, 0x8A },
+ { 801000000, 0x8B },
+ { 802000000, 0x8C },
+ { 803000000, 0x8D },
+ { 804000000, 0x8E },
+ { 810000000, 0x90 },
+ { 811000000, 0x91 },
+ { 812000000, 0x92 },
+ { 814000000, 0x93 },
+ { 816000000, 0x94 },
+ { 817000000, 0x96 },
+ { 818000000, 0x97 },
+ { 820000000, 0x98 },
+ { 821000000, 0x99 },
+ { 822000000, 0x9A },
+ { 828000000, 0x9B },
+ { 829000000, 0x9D },
+ { 830000000, 0x9F },
+ { 831000000, 0xA0 },
+ { 833000000, 0xA1 },
+ { 835000000, 0xA2 },
+ { 836000000, 0xA3 },
+ { 837000000, 0xA4 },
+ { 838000000, 0xA6 },
+ { 840000000, 0xA8 },
+ { 842000000, 0xA9 },
+ { 845000000, 0xAA },
+ { 846000000, 0xAB },
+ { 847000000, 0xAD },
+ { 848000000, 0xAE },
+ { 852000000, 0xAF },
+ { 853000000, 0xB0 },
+ { 858000000, 0xB1 },
+ { 860000000, 0xB2 },
+ { 861000000, 0xB3 },
+ { 862000000, 0xB4 },
+ { 863000000, 0xB6 },
+ { 864000000, 0xB8 },
+ { 865000000, 0xB9 },
+ { 0, 0x00 }, /* Table End */
+};
+
+
+static struct SMap2 m_KM_Map[] = {
+ { 47900000, 3, 2 },
+ { 61100000, 3, 1 },
+ { 350000000, 3, 0 },
+ { 720000000, 2, 1 },
+ { 865000000, 3, 3 },
+ { 0, 0x00 }, /* Table End */
+};
+
+static struct SMap2 m_Main_PLL_Map[] = {
+ { 33125000, 0x57, 0xF0 },
+ { 35500000, 0x56, 0xE0 },
+ { 38188000, 0x55, 0xD0 },
+ { 41375000, 0x54, 0xC0 },
+ { 45125000, 0x53, 0xB0 },
+ { 49688000, 0x52, 0xA0 },
+ { 55188000, 0x51, 0x90 },
+ { 62125000, 0x50, 0x80 },
+ { 66250000, 0x47, 0x78 },
+ { 71000000, 0x46, 0x70 },
+ { 76375000, 0x45, 0x68 },
+ { 82750000, 0x44, 0x60 },
+ { 90250000, 0x43, 0x58 },
+ { 99375000, 0x42, 0x50 },
+ { 110375000, 0x41, 0x48 },
+ { 124250000, 0x40, 0x40 },
+ { 132500000, 0x37, 0x3C },
+ { 142000000, 0x36, 0x38 },
+ { 152750000, 0x35, 0x34 },
+ { 165500000, 0x34, 0x30 },
+ { 180500000, 0x33, 0x2C },
+ { 198750000, 0x32, 0x28 },
+ { 220750000, 0x31, 0x24 },
+ { 248500000, 0x30, 0x20 },
+ { 265000000, 0x27, 0x1E },
+ { 284000000, 0x26, 0x1C },
+ { 305500000, 0x25, 0x1A },
+ { 331000000, 0x24, 0x18 },
+ { 361000000, 0x23, 0x16 },
+ { 397500000, 0x22, 0x14 },
+ { 441500000, 0x21, 0x12 },
+ { 497000000, 0x20, 0x10 },
+ { 530000000, 0x17, 0x0F },
+ { 568000000, 0x16, 0x0E },
+ { 611000000, 0x15, 0x0D },
+ { 662000000, 0x14, 0x0C },
+ { 722000000, 0x13, 0x0B },
+ { 795000000, 0x12, 0x0A },
+ { 883000000, 0x11, 0x09 },
+ { 994000000, 0x10, 0x08 },
+ { 0, 0x00, 0x00 }, /* Table End */
+};
+
+static struct SMap2 m_Cal_PLL_Map[] = {
+ { 33813000, 0xDD, 0xD0 },
+ { 36625000, 0xDC, 0xC0 },
+ { 39938000, 0xDB, 0xB0 },
+ { 43938000, 0xDA, 0xA0 },
+ { 48813000, 0xD9, 0x90 },
+ { 54938000, 0xD8, 0x80 },
+ { 62813000, 0xD3, 0x70 },
+ { 67625000, 0xCD, 0x68 },
+ { 73250000, 0xCC, 0x60 },
+ { 79875000, 0xCB, 0x58 },
+ { 87875000, 0xCA, 0x50 },
+ { 97625000, 0xC9, 0x48 },
+ { 109875000, 0xC8, 0x40 },
+ { 125625000, 0xC3, 0x38 },
+ { 135250000, 0xBD, 0x34 },
+ { 146500000, 0xBC, 0x30 },
+ { 159750000, 0xBB, 0x2C },
+ { 175750000, 0xBA, 0x28 },
+ { 195250000, 0xB9, 0x24 },
+ { 219750000, 0xB8, 0x20 },
+ { 251250000, 0xB3, 0x1C },
+ { 270500000, 0xAD, 0x1A },
+ { 293000000, 0xAC, 0x18 },
+ { 319500000, 0xAB, 0x16 },
+ { 351500000, 0xAA, 0x14 },
+ { 390500000, 0xA9, 0x12 },
+ { 439500000, 0xA8, 0x10 },
+ { 502500000, 0xA3, 0x0E },
+ { 541000000, 0x9D, 0x0D },
+ { 586000000, 0x9C, 0x0C },
+ { 639000000, 0x9B, 0x0B },
+ { 703000000, 0x9A, 0x0A },
+ { 781000000, 0x99, 0x09 },
+ { 879000000, 0x98, 0x08 },
+ { 0, 0x00, 0x00 }, /* Table End */
+};
+
+static struct SMap m_GainTaper_Map[] = {
+ { 45400000, 0x1F },
+ { 45800000, 0x1E },
+ { 46200000, 0x1D },
+ { 46700000, 0x1C },
+ { 47100000, 0x1B },
+ { 47500000, 0x1A },
+ { 47900000, 0x19 },
+ { 49600000, 0x17 },
+ { 51200000, 0x16 },
+ { 52900000, 0x15 },
+ { 54500000, 0x14 },
+ { 56200000, 0x13 },
+ { 57800000, 0x12 },
+ { 59500000, 0x11 },
+ { 61100000, 0x10 },
+ { 67600000, 0x0D },
+ { 74200000, 0x0C },
+ { 80700000, 0x0B },
+ { 87200000, 0x0A },
+ { 93800000, 0x09 },
+ { 100300000, 0x08 },
+ { 106900000, 0x07 },
+ { 113400000, 0x06 },
+ { 119900000, 0x05 },
+ { 126500000, 0x04 },
+ { 133000000, 0x03 },
+ { 139500000, 0x02 },
+ { 146100000, 0x01 },
+ { 152600000, 0x00 },
+ { 154300000, 0x1F },
+ { 156100000, 0x1E },
+ { 157800000, 0x1D },
+ { 159500000, 0x1C },
+ { 161200000, 0x1B },
+ { 163000000, 0x1A },
+ { 164700000, 0x19 },
+ { 170200000, 0x17 },
+ { 175800000, 0x16 },
+ { 181300000, 0x15 },
+ { 186900000, 0x14 },
+ { 192400000, 0x13 },
+ { 198000000, 0x12 },
+ { 203500000, 0x11 },
+ { 216200000, 0x14 },
+ { 228900000, 0x13 },
+ { 241600000, 0x12 },
+ { 254400000, 0x11 },
+ { 267100000, 0x10 },
+ { 279800000, 0x0F },
+ { 292500000, 0x0E },
+ { 305200000, 0x0D },
+ { 317900000, 0x0C },
+ { 330700000, 0x0B },
+ { 343400000, 0x0A },
+ { 356100000, 0x09 },
+ { 368800000, 0x08 },
+ { 381500000, 0x07 },
+ { 394200000, 0x06 },
+ { 406900000, 0x05 },
+ { 419700000, 0x04 },
+ { 432400000, 0x03 },
+ { 445100000, 0x02 },
+ { 457800000, 0x01 },
+ { 476300000, 0x19 },
+ { 494800000, 0x18 },
+ { 513300000, 0x17 },
+ { 531800000, 0x16 },
+ { 550300000, 0x15 },
+ { 568900000, 0x14 },
+ { 587400000, 0x13 },
+ { 605900000, 0x12 },
+ { 624400000, 0x11 },
+ { 642900000, 0x10 },
+ { 661400000, 0x0F },
+ { 679900000, 0x0E },
+ { 698400000, 0x0D },
+ { 716900000, 0x0C },
+ { 735400000, 0x0B },
+ { 753900000, 0x0A },
+ { 772500000, 0x09 },
+ { 791000000, 0x08 },
+ { 809500000, 0x07 },
+ { 828000000, 0x06 },
+ { 846500000, 0x05 },
+ { 865000000, 0x04 },
+ { 0, 0x00 }, /* Table End */
+};
+
+static struct SMap m_RF_Cal_DC_Over_DT_Map[] = {
+ { 47900000, 0x00 },
+ { 55000000, 0x00 },
+ { 61100000, 0x0A },
+ { 64000000, 0x0A },
+ { 82000000, 0x14 },
+ { 84000000, 0x19 },
+ { 119000000, 0x1C },
+ { 124000000, 0x20 },
+ { 129000000, 0x2A },
+ { 134000000, 0x32 },
+ { 139000000, 0x39 },
+ { 144000000, 0x3E },
+ { 149000000, 0x3F },
+ { 152600000, 0x40 },
+ { 154000000, 0x40 },
+ { 164700000, 0x41 },
+ { 203500000, 0x32 },
+ { 353000000, 0x19 },
+ { 356000000, 0x1A },
+ { 359000000, 0x1B },
+ { 363000000, 0x1C },
+ { 366000000, 0x1D },
+ { 369000000, 0x1E },
+ { 373000000, 0x1F },
+ { 376000000, 0x20 },
+ { 379000000, 0x21 },
+ { 383000000, 0x22 },
+ { 386000000, 0x23 },
+ { 389000000, 0x24 },
+ { 393000000, 0x25 },
+ { 396000000, 0x26 },
+ { 399000000, 0x27 },
+ { 402000000, 0x28 },
+ { 404000000, 0x29 },
+ { 407000000, 0x2A },
+ { 409000000, 0x2B },
+ { 412000000, 0x2C },
+ { 414000000, 0x2D },
+ { 417000000, 0x2E },
+ { 419000000, 0x2F },
+ { 422000000, 0x30 },
+ { 424000000, 0x31 },
+ { 427000000, 0x32 },
+ { 429000000, 0x33 },
+ { 432000000, 0x34 },
+ { 434000000, 0x35 },
+ { 437000000, 0x36 },
+ { 439000000, 0x37 },
+ { 442000000, 0x38 },
+ { 444000000, 0x39 },
+ { 447000000, 0x3A },
+ { 449000000, 0x3B },
+ { 457800000, 0x3C },
+ { 465000000, 0x0F },
+ { 477000000, 0x12 },
+ { 483000000, 0x14 },
+ { 502000000, 0x19 },
+ { 508000000, 0x1B },
+ { 519000000, 0x1C },
+ { 522000000, 0x1D },
+ { 524000000, 0x1E },
+ { 534000000, 0x1F },
+ { 549000000, 0x20 },
+ { 554000000, 0x22 },
+ { 584000000, 0x24 },
+ { 589000000, 0x26 },
+ { 658000000, 0x27 },
+ { 664000000, 0x2C },
+ { 669000000, 0x2D },
+ { 699000000, 0x2E },
+ { 704000000, 0x30 },
+ { 709000000, 0x31 },
+ { 714000000, 0x32 },
+ { 724000000, 0x33 },
+ { 729000000, 0x36 },
+ { 739000000, 0x38 },
+ { 744000000, 0x39 },
+ { 749000000, 0x3B },
+ { 754000000, 0x3C },
+ { 759000000, 0x3D },
+ { 764000000, 0x3E },
+ { 769000000, 0x3F },
+ { 774000000, 0x40 },
+ { 779000000, 0x41 },
+ { 784000000, 0x43 },
+ { 789000000, 0x46 },
+ { 794000000, 0x48 },
+ { 799000000, 0x4B },
+ { 804000000, 0x4F },
+ { 809000000, 0x54 },
+ { 814000000, 0x59 },
+ { 819000000, 0x5D },
+ { 824000000, 0x61 },
+ { 829000000, 0x68 },
+ { 834000000, 0x6E },
+ { 839000000, 0x75 },
+ { 844000000, 0x7E },
+ { 849000000, 0x82 },
+ { 854000000, 0x84 },
+ { 859000000, 0x8F },
+ { 865000000, 0x9A },
+ { 0, 0x00 }, /* Table End */
+};
+
+
+static struct SMap m_IR_Meas_Map[] = {
+ { 200000000, 0x05 },
+ { 400000000, 0x06 },
+ { 865000000, 0x07 },
+ { 0, 0x00 }, /* Table End */
+};
+
+static struct SMap2 m_CID_Target_Map[] = {
+ { 46000000, 0x04, 18 },
+ { 52200000, 0x0A, 15 },
+ { 70100000, 0x01, 40 },
+ { 136800000, 0x18, 40 },
+ { 156700000, 0x18, 40 },
+ { 186250000, 0x0A, 40 },
+ { 230000000, 0x0A, 40 },
+ { 345000000, 0x18, 40 },
+ { 426000000, 0x0E, 40 },
+ { 489500000, 0x1E, 40 },
+ { 697500000, 0x32, 40 },
+ { 842000000, 0x3A, 40 },
+ { 0, 0x00, 0 }, /* Table End */
+};
+
+static struct SRFBandMap m_RF_Band_Map[7] = {
+ { 47900000, 46000000, 0, 0},
+ { 61100000, 52200000, 0, 0},
+ { 152600000, 70100000, 136800000, 0},
+ { 164700000, 156700000, 0, 0},
+ { 203500000, 186250000, 0, 0},
+ { 457800000, 230000000, 345000000, 426000000},
+ { 865000000, 489500000, 697500000, 842000000},
+};
+
+u8 m_Thermometer_Map_1[16] = {
+ 60, 62, 66, 64,
+ 74, 72, 68, 70,
+ 90, 88, 84, 86,
+ 76, 78, 82, 80,
+};
+
+u8 m_Thermometer_Map_2[16] = {
+ 92, 94, 98, 96,
+ 106, 104, 100, 102,
+ 122, 120, 116, 118,
+ 108, 110, 114, 112,
+};
diff --git a/drivers/media/dvb/mantis/mantis_ca.c b/drivers/media/dvb/mantis/mantis_ca.c
index 330216febd7..3d704690900 100644
--- a/drivers/media/dvb/mantis/mantis_ca.c
+++ b/drivers/media/dvb/mantis/mantis_ca.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
+#include <asm/io.h>
#include "dmxdev.h"
#include "dvbdev.h"
diff --git a/drivers/media/dvb/mantis/mantis_common.h b/drivers/media/dvb/mantis/mantis_common.h
index bd400d21b81..49dbca145bb 100644
--- a/drivers/media/dvb/mantis/mantis_common.h
+++ b/drivers/media/dvb/mantis/mantis_common.h
@@ -21,6 +21,7 @@
#ifndef __MANTIS_COMMON_H
#define __MANTIS_COMMON_H
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
diff --git a/drivers/media/dvb/mantis/mantis_evm.c b/drivers/media/dvb/mantis/mantis_evm.c
index 9f73c2cfc9e..36f2256ebb0 100644
--- a/drivers/media/dvb/mantis/mantis_evm.c
+++ b/drivers/media/dvb/mantis/mantis_evm.c
@@ -23,6 +23,7 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
+#include <asm/io.h>
#include "dmxdev.h"
#include "dvbdev.h"
diff --git a/drivers/media/dvb/mantis/mantis_hif.c b/drivers/media/dvb/mantis/mantis_hif.c
index 5772ebb3a69..672cf4d2462 100644
--- a/drivers/media/dvb/mantis/mantis_hif.c
+++ b/drivers/media/dvb/mantis/mantis_hif.c
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
+#include <asm/io.h>
#include "dmxdev.h"
#include "dvbdev.h"
diff --git a/drivers/media/dvb/mantis/mantis_ioc.c b/drivers/media/dvb/mantis/mantis_ioc.c
index 479086dbb9a..24fcdc63d6d 100644
--- a/drivers/media/dvb/mantis/mantis_ioc.c
+++ b/drivers/media/dvb/mantis/mantis_ioc.c
@@ -24,6 +24,7 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
+#include <asm/io.h>
#include "dmxdev.h"
#include "dvbdev.h"
diff --git a/drivers/media/dvb/mantis/mantis_pcmcia.c b/drivers/media/dvb/mantis/mantis_pcmcia.c
index 5cb545b913f..2f188c08966 100644
--- a/drivers/media/dvb/mantis/mantis_pcmcia.c
+++ b/drivers/media/dvb/mantis/mantis_pcmcia.c
@@ -23,6 +23,7 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
+#include <asm/io.h>
#include "dmxdev.h"
#include "dvbdev.h"
diff --git a/drivers/media/dvb/mantis/mantis_uart.c b/drivers/media/dvb/mantis/mantis_uart.c
index f807c8ba26e..18340dafa42 100644
--- a/drivers/media/dvb/mantis/mantis_uart.c
+++ b/drivers/media/dvb/mantis/mantis_uart.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
+#include <asm/io.h>
#include <linux/signal.h>
#include <linux/sched.h>
diff --git a/drivers/media/dvb/mantis/mantis_vp1034.c b/drivers/media/dvb/mantis/mantis_vp1034.c
index 26bc0cbe84d..430ae84ce52 100644
--- a/drivers/media/dvb/mantis/mantis_vp1034.c
+++ b/drivers/media/dvb/mantis/mantis_vp1034.c
@@ -21,6 +21,7 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
+#include <asm/io.h>
#include "dmxdev.h"
#include "dvbdev.h"
diff --git a/drivers/media/dvb/ngene/Kconfig b/drivers/media/dvb/ngene/Kconfig
index cec242b7c00..64c84702ba5 100644
--- a/drivers/media/dvb/ngene/Kconfig
+++ b/drivers/media/dvb/ngene/Kconfig
@@ -5,6 +5,8 @@ config DVB_NGENE
select DVB_STV6110x if !DVB_FE_CUSTOMISE
select DVB_STV090x if !DVB_FE_CUSTOMISE
select DVB_LGDT330X if !DVB_FE_CUSTOMISE
+ select DVB_DRXK if !DVB_FE_CUSTOMISE
+ select DVB_TDA18271C2DD if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMISE
---help---
Support for Micronas PCI express cards with nGene bridge.
diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
index fcf4be901ec..05641922836 100644
--- a/drivers/media/dvb/ngene/ngene-cards.c
+++ b/drivers/media/dvb/ngene/ngene-cards.c
@@ -40,6 +40,8 @@
#include "lnbh24.h"
#include "lgdt330x.h"
#include "mt2131.h"
+#include "tda18271c2dd.h"
+#include "drxk.h"
/****************************************************************************/
@@ -83,6 +85,49 @@ static int tuner_attach_stv6110(struct ngene_channel *chan)
}
+static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct ngene_channel *chan = fe->sec_priv;
+ int status;
+
+ if (enable) {
+ down(&chan->dev->pll_mutex);
+ status = chan->gate_ctrl(fe, 1);
+ } else {
+ status = chan->gate_ctrl(fe, 0);
+ up(&chan->dev->pll_mutex);
+ }
+ return status;
+}
+
+static int tuner_attach_tda18271(struct ngene_channel *chan)
+{
+ struct i2c_adapter *i2c;
+ struct dvb_frontend *fe;
+
+ i2c = &chan->dev->channel[0].i2c_adapter;
+ if (chan->fe->ops.i2c_gate_ctrl)
+ chan->fe->ops.i2c_gate_ctrl(chan->fe, 1);
+ fe = dvb_attach(tda18271c2dd_attach, chan->fe, i2c, 0x60);
+ if (chan->fe->ops.i2c_gate_ctrl)
+ chan->fe->ops.i2c_gate_ctrl(chan->fe, 0);
+ if (!fe) {
+ printk(KERN_ERR "No TDA18271 found!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int tuner_attach_probe(struct ngene_channel *chan)
+{
+ if (chan->demod_type == 0)
+ return tuner_attach_stv6110(chan);
+ if (chan->demod_type == 1)
+ return tuner_attach_tda18271(chan);
+ return -EINVAL;
+}
+
static int demod_attach_stv0900(struct ngene_channel *chan)
{
struct i2c_adapter *i2c;
@@ -130,6 +175,60 @@ static void cineS2_tuner_i2c_lock(struct dvb_frontend *fe, int lock)
up(&chan->dev->pll_mutex);
}
+static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
+{
+ struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1 } };
+ return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
+}
+
+static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
+ u16 reg, u8 *val)
+{
+ u8 msg[2] = {reg>>8, reg&0xff};
+ struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
+ .buf = msg, .len = 2},
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1} };
+ return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
+}
+
+static int port_has_stv0900(struct i2c_adapter *i2c, int port)
+{
+ u8 val;
+ if (i2c_read_reg16(i2c, 0x68+port/2, 0xf100, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static int port_has_drxk(struct i2c_adapter *i2c, int port)
+{
+ u8 val;
+
+ if (i2c_read(i2c, 0x29+port, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static int demod_attach_drxk(struct ngene_channel *chan,
+ struct i2c_adapter *i2c)
+{
+ struct drxk_config config;
+
+ memset(&config, 0, sizeof(config));
+ config.adr = 0x29 + (chan->number ^ 2);
+
+ chan->fe = dvb_attach(drxk_attach, &config, i2c, &chan->fe2);
+ if (!chan->fe) {
+ printk(KERN_ERR "No DRXK found!\n");
+ return -ENODEV;
+ }
+ chan->fe->sec_priv = chan;
+ chan->gate_ctrl = chan->fe->ops.i2c_gate_ctrl;
+ chan->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+ return 0;
+}
+
static int cineS2_probe(struct ngene_channel *chan)
{
struct i2c_adapter *i2c;
@@ -144,43 +243,42 @@ static int cineS2_probe(struct ngene_channel *chan)
else
i2c = &chan->dev->channel[1].i2c_adapter;
- fe_conf = chan->dev->card_info->fe_config[chan->number];
- i2c_msg.addr = fe_conf->address;
-
- /* probe demod */
- i2c_msg.len = 2;
- buf[0] = 0xf1;
- buf[1] = 0x00;
- rc = i2c_transfer(i2c, &i2c_msg, 1);
- if (rc != 1)
- return -ENODEV;
-
- /* demod found, attach it */
- rc = demod_attach_stv0900(chan);
- if (rc < 0 || chan->number < 2)
- return rc;
-
- /* demod #2: reprogram outputs DPN1 & DPN2 */
- i2c_msg.len = 3;
- buf[0] = 0xf1;
- switch (chan->number) {
- case 2:
- buf[1] = 0x5c;
- buf[2] = 0xc2;
- break;
- case 3:
- buf[1] = 0x61;
- buf[2] = 0xcc;
- break;
- default:
+ if (port_has_stv0900(i2c, chan->number)) {
+ chan->demod_type = 0;
+ fe_conf = chan->dev->card_info->fe_config[chan->number];
+ /* demod found, attach it */
+ rc = demod_attach_stv0900(chan);
+ if (rc < 0 || chan->number < 2)
+ return rc;
+
+ /* demod #2: reprogram outputs DPN1 & DPN2 */
+ i2c_msg.addr = fe_conf->address;
+ i2c_msg.len = 3;
+ buf[0] = 0xf1;
+ switch (chan->number) {
+ case 2:
+ buf[1] = 0x5c;
+ buf[2] = 0xc2;
+ break;
+ case 3:
+ buf[1] = 0x61;
+ buf[2] = 0xcc;
+ break;
+ default:
+ return -ENODEV;
+ }
+ rc = i2c_transfer(i2c, &i2c_msg, 1);
+ if (rc != 1) {
+ printk(KERN_ERR DEVICE_NAME ": could not setup DPNx\n");
+ return -EIO;
+ }
+ } else if (port_has_drxk(i2c, chan->number^2)) {
+ chan->demod_type = 1;
+ demod_attach_drxk(chan, i2c);
+ } else {
+ printk(KERN_ERR "No demod found on chan %d\n", chan->number);
return -ENODEV;
}
- rc = i2c_transfer(i2c, &i2c_msg, 1);
- if (rc != 1) {
- printk(KERN_ERR DEVICE_NAME ": could not setup DPNx\n");
- return -EIO;
- }
-
return 0;
}
@@ -306,7 +404,7 @@ static struct ngene_info ngene_info_satixS2v2 = {
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
@@ -321,7 +419,7 @@ static struct ngene_info ngene_info_cineS2v5 = {
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110},
+ .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
@@ -331,13 +429,13 @@ static struct ngene_info ngene_info_cineS2v5 = {
};
-static struct ngene_info ngene_info_duoFlexS2 = {
+static struct ngene_info ngene_info_duoFlex = {
.type = NGENE_SIDEWINDER,
- .name = "Digital Devices DuoFlex S2 miniPCIe",
+ .name = "Digital Devices DuoFlex PCIe or miniPCIe",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {cineS2_probe, cineS2_probe, cineS2_probe, cineS2_probe},
- .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_stv6110},
+ .tuner_attach = {tuner_attach_probe, tuner_attach_probe, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
@@ -385,8 +483,8 @@ static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
NGENE_ID(0x18c3, 0xdb02, ngene_info_satixS2v2),
NGENE_ID(0x18c3, 0xdd00, ngene_info_cineS2v5),
- NGENE_ID(0x18c3, 0xdd10, ngene_info_duoFlexS2),
- NGENE_ID(0x18c3, 0xdd20, ngene_info_duoFlexS2),
+ NGENE_ID(0x18c3, 0xdd10, ngene_info_duoFlex),
+ NGENE_ID(0x18c3, 0xdd20, ngene_info_duoFlex),
NGENE_ID(0x1461, 0x062e, ngene_info_m780),
{0}
};
diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c
index 6927c726ce3..f129a9303f8 100644
--- a/drivers/media/dvb/ngene/ngene-core.c
+++ b/drivers/media/dvb/ngene/ngene-core.c
@@ -41,7 +41,7 @@
#include "ngene.h"
-static int one_adapter = 1;
+static int one_adapter;
module_param(one_adapter, int, 0444);
MODULE_PARM_DESC(one_adapter, "Use only one adapter.");
@@ -461,7 +461,7 @@ static u8 TSFeatureDecoderSetup[8 * 5] = {
0x42, 0x00, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00,
0x40, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXH */
0x71, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* DRXHser */
- 0x72, 0x06, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* S2ser */
+ 0x72, 0x00, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* S2ser */
0x40, 0x07, 0x00, 0x02, 0x02, 0xbc, 0x00, 0x00, /* LGDT3303 */
};
@@ -507,7 +507,7 @@ void FillTSBuffer(void *Buffer, int Length, u32 Flags)
{
u32 *ptr = Buffer;
- memset(Buffer, 0xff, Length);
+ memset(Buffer, TS_FILLER, Length);
while (Length > 0) {
if (Flags & DF_SWAP32)
*ptr = 0x471FFF10;
@@ -1443,6 +1443,9 @@ static void release_channel(struct ngene_channel *chan)
chan->ci_dev = NULL;
}
+ if (chan->fe2)
+ dvb_unregister_frontend(chan->fe2);
+
if (chan->fe) {
dvb_unregister_frontend(chan->fe);
dvb_frontend_detach(chan->fe);
@@ -1534,6 +1537,14 @@ static int init_channel(struct ngene_channel *chan)
goto err;
chan->has_demux = true;
}
+ if (chan->fe2) {
+ if (dvb_register_frontend(adapter, chan->fe2) < 0)
+ goto err;
+ chan->fe2->tuner_priv = chan->fe->tuner_priv;
+ memcpy(&chan->fe2->ops.tuner_ops,
+ &chan->fe->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ }
if (chan->has_demux) {
ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
@@ -1571,11 +1582,18 @@ static int init_channels(struct ngene *dev)
return 0;
}
+static struct cxd2099_cfg cxd_cfg = {
+ .bitrate = 62000,
+ .adr = 0x40,
+ .polarity = 0,
+ .clock_mode = 0,
+};
+
static void cxd_attach(struct ngene *dev)
{
struct ngene_ci *ci = &dev->ci;
- ci->en = cxd2099_attach(0x40, dev, &dev->channel[0].i2c_adapter);
+ ci->en = cxd2099_attach(&cxd_cfg, dev, &dev->channel[0].i2c_adapter);
ci->dev = dev;
return;
}
diff --git a/drivers/media/dvb/ngene/ngene-dvb.c b/drivers/media/dvb/ngene/ngene-dvb.c
index 0b494323316..fcb16a615aa 100644
--- a/drivers/media/dvb/ngene/ngene-dvb.c
+++ b/drivers/media/dvb/ngene/ngene-dvb.c
@@ -118,6 +118,16 @@ static void swap_buffer(u32 *p, u32 len)
}
}
+/* start of filler packet */
+static u8 fill_ts[] = { 0x47, 0x1f, 0xff, 0x10, TS_FILLER };
+
+/* #define DEBUG_CI_XFER */
+#ifdef DEBUG_CI_XFER
+static u32 ok;
+static u32 overflow;
+static u32 stripped;
+#endif
+
void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
{
struct ngene_channel *chan = priv;
@@ -126,21 +136,41 @@ void *tsin_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
if (flags & DF_SWAP32)
swap_buffer(buf, len);
+
if (dev->ci.en && chan->number == 2) {
- if (dvb_ringbuffer_free(&dev->tsin_rbuf) > len) {
- dvb_ringbuffer_write(&dev->tsin_rbuf, buf, len);
- wake_up_interruptible(&dev->tsin_rbuf.queue);
+ while (len >= 188) {
+ if (memcmp(buf, fill_ts, sizeof fill_ts) != 0) {
+ if (dvb_ringbuffer_free(&dev->tsin_rbuf) >= 188) {
+ dvb_ringbuffer_write(&dev->tsin_rbuf, buf, 188);
+ wake_up(&dev->tsin_rbuf.queue);
+#ifdef DEBUG_CI_XFER
+ ok++;
+#endif
+ }
+#ifdef DEBUG_CI_XFER
+ else
+ overflow++;
+#endif
+ }
+#ifdef DEBUG_CI_XFER
+ else
+ stripped++;
+
+ if (ok % 100 == 0 && overflow)
+ printk(KERN_WARNING "%s: ok %u overflow %u dropped %u\n", __func__, ok, overflow, stripped);
+#endif
+ buf += 188;
+ len -= 188;
}
- return 0;
+ return NULL;
}
- if (chan->users > 0) {
+
+ if (chan->users > 0)
dvb_dmx_swfilter(&chan->demux, buf, len);
- }
+
return NULL;
}
-u8 fill_ts[188] = { 0x47, 0x1f, 0xff, 0x10 };
-
void *tsout_exchange(void *priv, void *buf, u32 len, u32 clock, u32 flags)
{
struct ngene_channel *chan = priv;
diff --git a/drivers/media/dvb/ngene/ngene.h b/drivers/media/dvb/ngene/ngene.h
index 40fce9e3ae6..5443dc0caea 100644
--- a/drivers/media/dvb/ngene/ngene.h
+++ b/drivers/media/dvb/ngene/ngene.h
@@ -641,8 +641,11 @@ struct ngene_channel {
int mode;
bool has_adapter;
bool has_demux;
+ int demod_type;
+ int (*gate_ctrl)(struct dvb_frontend *, int);
struct dvb_frontend *fe;
+ struct dvb_frontend *fe2;
struct dmxdev dmxdev;
struct dvb_demux demux;
struct dvb_net dvbnet;
@@ -786,6 +789,8 @@ struct ngene {
u8 uart_rbuf[UART_RBUF_LEN];
int uart_rp, uart_wp;
+#define TS_FILLER 0x6f
+
u8 *tsout_buf;
#define TSOUT_BUF_SIZE (512*188*8)
struct dvb_ringbuffer tsout_rbuf;
@@ -852,7 +857,7 @@ struct ngene_info {
};
#ifdef NGENE_V4L
-struct ngene_format{
+struct ngene_format {
char *name;
int fourcc; /* video4linux 2 */
int btformat; /* BT848_COLOR_FMT_* */
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index 7cb79ec685f..80fb5100446 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -26,6 +26,7 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 78765ed2806..7331e8450d1 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -1147,7 +1147,7 @@ static int smscore_validate_client(struct smscore_device_t *coredev,
if (!client) {
sms_err("bad parameter.");
- return -EFAULT;
+ return -EINVAL;
}
registered_client = smscore_find_client(coredev, data_type, id);
if (registered_client == client)
diff --git a/drivers/media/dvb/siano/smscoreapi.h b/drivers/media/dvb/siano/smscoreapi.h
index 8ecadecaa9d..c592ae09039 100644
--- a/drivers/media/dvb/siano/smscoreapi.h
+++ b/drivers/media/dvb/siano/smscoreapi.h
@@ -22,7 +22,6 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#ifndef __SMS_CORE_API_H__
#define __SMS_CORE_API_H__
-#include <linux/version.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/mm.h>
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index e4c97fd6f05..52798a111e1 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -168,7 +168,7 @@ config RADIO_MAXIRADIO
config RADIO_MIROPCM20
tristate "miroSOUND PCM20 radio"
- depends on ISA && VIDEO_V4L2 && SND
+ depends on ISA && ISA_DMA_API && VIDEO_V4L2 && SND
select SND_ISA
select SND_MIRO
---help---
@@ -201,7 +201,7 @@ config RADIO_SF16FMI
config RADIO_SF16FMR2
tristate "SF16FMR2 Radio"
- depends on ISA && VIDEO_V4L2
+ depends on ISA && VIDEO_V4L2 && SND
---help---
Choose Y here if you have one of these FM radio cards.
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 3d8cc425fa6..25e58cbf35f 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -102,10 +102,7 @@
/*
* Version Information
*/
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
-
-#define DRIVER_VERSION "v0.46"
-#define RADIO_VERSION KERNEL_VERSION(0, 4, 6)
+#define DRIVER_VERSION "0.4.7"
#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>"
#define DRIVER_DESC "D-Link DSB-R100 USB FM radio driver"
@@ -335,7 +332,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "dsbr100", sizeof(v->driver));
strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER;
return 0;
}
@@ -647,3 +643,4 @@ module_exit (dsbr100_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 4ce10dbeadd..1c3f8440a55 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -33,7 +33,6 @@
#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* msleep */
#include <linux/videodev2.h> /* kernel radio structs */
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -41,6 +40,7 @@
MODULE_AUTHOR("M.Kirkwood");
MODULE_DESCRIPTION("A driver for the RadioTrack/RadioReveal radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
#ifndef CONFIG_RADIO_RTRACK_PORT
#define CONFIG_RADIO_RTRACK_PORT -1
@@ -53,8 +53,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the RadioTrack card (0x20f or 0x30f)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct rtrack
{
struct v4l2_device v4l2_dev;
@@ -223,7 +221,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-aimslab", sizeof(v->driver));
strlcpy(v->card, "RadioTrack", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index dd8a6ab0d43..eed7b084073 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -30,7 +30,6 @@
#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <linux/videodev2.h> /* kernel radio structs */
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -38,6 +37,7 @@
MODULE_AUTHOR("Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
MODULE_DESCRIPTION("A driver for the Aztech radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
/* acceptable ports: 0x350 (JP3 shorted), 0x358 (JP3 open) */
@@ -53,8 +53,6 @@ module_param(io, int, 0);
module_param(radio_nr, int, 0);
MODULE_PARM_DESC(io, "I/O address of the Aztech card (0x350 or 0x358)");
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct aztech
{
struct v4l2_device v4l2_dev;
@@ -188,7 +186,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-aztech", sizeof(v->driver));
strlcpy(v->card, "Aztech Radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index bc9ad0897c5..16a089fad90 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -30,7 +30,6 @@
* Changed API to V4L2
*/
-#include <linux/version.h>
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
@@ -46,6 +45,7 @@
MODULE_AUTHOR("Fred Gleason, Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
MODULE_DESCRIPTION("A driver for the ADS Cadet AM/FM/RDS radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.3.4");
static int io = -1; /* default to isapnp activation */
static int radio_nr = -1;
@@ -54,8 +54,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of Cadet card (0x330,0x332,0x334,0x336,0x338,0x33a,0x33c,0x33e)");
module_param(radio_nr, int, 0);
-#define CADET_VERSION KERNEL_VERSION(0, 3, 3)
-
#define RDS_BUFFER 256
#define RDS_RX_FLAG 1
#define MBS_RX_FLAG 2
@@ -361,7 +359,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "ADS Cadet", sizeof(v->driver));
strlcpy(v->card, "ADS Cadet", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = CADET_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE;
return 0;
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 259936422e4..edadc8449a3 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -21,21 +21,19 @@
#include <linux/ioport.h> /* request_region */
#include <linux/delay.h> /* udelay */
#include <linux/videodev2.h> /* kernel radio structs */
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/mutex.h>
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 3)
-
/*
* Module info.
*/
-MODULE_AUTHOR("Jonas Munsin, Pekka Seppänen <pexu@kapsi.fi>");
+MODULE_AUTHOR("Jonas Munsin, Pekka Seppänen <pexu@kapsi.fi>");
MODULE_DESCRIPTION("A driver for the GemTek Radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.4");
/*
* Module params.
@@ -387,7 +385,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-gemtek", sizeof(v->driver));
strlcpy(v->card, "GemTek", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index e83e8400302..f872a54cf3d 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -40,15 +40,18 @@
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/videodev2.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h>
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#define DRIVER_VERSION "0.7.8"
+
+
MODULE_AUTHOR("Dimitromanolakis Apostolos, apdim@grecian.net");
MODULE_DESCRIPTION("Radio driver for the Guillemot Maxi Radio FM2000 radio.");
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
static int radio_nr = -1;
module_param(radio_nr, int, 0);
@@ -58,10 +61,6 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "activates debug info");
-#define DRIVER_VERSION "0.77"
-
-#define RADIO_VERSION KERNEL_VERSION(0, 7, 7)
-
#define dprintk(dev, num, fmt, arg...) \
v4l2_dbg(num, debug, &dev->v4l2_dev, fmt, ## arg)
@@ -195,7 +194,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-maxiradio", sizeof(v->driver));
strlcpy(v->card, "Maxi Radio FM2000 radio", sizeof(v->card));
snprintf(v->bus_info, sizeof(v->bus_info), "PCI:%s", pci_name(dev->pdev));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index b3a635b9582..1742bd8110b 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -63,18 +63,17 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/usb.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/mutex.h>
/* driver and module definitions */
#define DRIVER_AUTHOR "Alexey Klimov <klimov.linux@gmail.com>"
#define DRIVER_DESC "AverMedia MR 800 USB FM radio driver"
-#define DRIVER_VERSION "0.11"
-#define RADIO_VERSION KERNEL_VERSION(0, 1, 1)
+#define DRIVER_VERSION "0.1.2"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
#define USB_AMRADIO_VENDOR 0x07ca
#define USB_AMRADIO_PRODUCT 0xb800
@@ -301,7 +300,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-mr800", sizeof(v->driver));
strlcpy(v->card, "AverMedia MR 800 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER;
return 0;
}
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index 8d6ea591bd1..3628be617ee 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -15,7 +15,6 @@
#include <linux/delay.h> /* udelay */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/mutex.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -23,6 +22,7 @@
MODULE_AUTHOR("Ben Pfaff");
MODULE_DESCRIPTION("A driver for the RadioTrack II radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
#ifndef CONFIG_RADIO_RTRACK2_PORT
#define CONFIG_RADIO_RTRACK2_PORT -1
@@ -35,8 +35,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the RadioTrack card (0x20c or 0x30c)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct rtrack2
{
struct v4l2_device v4l2_dev;
@@ -121,7 +119,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-rtrack2", sizeof(v->driver));
strlcpy(v->card, "RadioTrack II", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index b5a5f89e238..22c5743bf9d 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -16,7 +16,6 @@
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
*/
-#include <linux/version.h>
#include <linux/kernel.h> /* __setup */
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
@@ -32,6 +31,7 @@
MODULE_AUTHOR("Petr Vandrovec, vandrove@vc.cvut.cz and M. Kirkwood");
MODULE_DESCRIPTION("A driver for the SF16-FMI and SF16-FMP radio.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
static int io = -1;
static int radio_nr = -1;
@@ -40,8 +40,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the SF16-FMI or SF16-FMP card (0x284 or 0x384)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct fmi
{
struct v4l2_device v4l2_dev;
@@ -134,7 +132,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-sf16fmi", sizeof(v->driver));
strlcpy(v->card, "SF16-FMx radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 87bad7678d9..2dd485996ba 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -1,441 +1,209 @@
-/* SF16FMR2 radio driver for Linux radio support
- * heavily based on fmi driver...
- * (c) 2000-2002 Ziglio Frediano, freddy77@angelfire.com
+/* SF16-FMR2 radio driver for Linux
+ * Copyright (c) 2011 Ondrej Zary
*
- * Notes on the hardware
- *
- * Frequency control is done digitally -- ie out(port,encodefreq(95.8));
- * No volume control - only mute/unmute - you have to use line volume
- *
- * For read stereo/mono you must wait 0.1 sec after set frequency and
- * card unmuted so I set frequency on unmute
- * Signal handling seem to work only on autoscanning (not implemented)
- *
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Original driver was (c) 2000-2002 Ziglio Frediano, freddy77@angelfire.com
+ * but almost nothing remained here after conversion to generic TEA575x
+ * implementation
*/
+#include <linux/delay.h>
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
-#include <linux/delay.h> /* udelay */
-#include <linux/videodev2.h> /* kernel radio structs */
-#include <linux/mutex.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
+#include <sound/tea575x-tuner.h>
-MODULE_AUTHOR("Ziglio Frediano, freddy77@angelfire.com");
-MODULE_DESCRIPTION("A driver for the SF16FMR2 radio.");
+MODULE_AUTHOR("Ondrej Zary");
+MODULE_DESCRIPTION("MediaForte SF16-FMR2 FM radio card driver");
MODULE_LICENSE("GPL");
-static int io = 0x384;
-static int radio_nr = -1;
-
-module_param(io, int, 0);
-MODULE_PARM_DESC(io, "I/O address of the SF16FMR2 card (should be 0x384, if do not work try 0x284)");
-module_param(radio_nr, int, 0);
-
-#define RADIO_VERSION KERNEL_VERSION(0,0,2)
-
-#define AUD_VOL_INDEX 1
-
-#undef DEBUG
-//#define DEBUG 1
-
-#ifdef DEBUG
-# define debug_print(s) printk s
-#else
-# define debug_print(s)
-#endif
-
-/* this should be static vars for module size */
-struct fmr2
-{
- struct v4l2_device v4l2_dev;
- struct video_device vdev;
- struct mutex lock;
+struct fmr2 {
int io;
- int curvol; /* 0-15 */
- int mute;
- int stereo; /* card is producing stereo audio */
- unsigned long curfreq; /* freq in kHz */
- int card_type;
+ struct snd_tea575x tea;
+ struct v4l2_ctrl *volume;
+ struct v4l2_ctrl *balance;
};
+/* the port is hardwired so no need to support multiple cards */
+#define FMR2_PORT 0x384
static struct fmr2 fmr2_card;
-/* hw precision is 12.5 kHz
- * It is only useful to give freq in interval of 200 (=0.0125Mhz),
- * other bits will be truncated
- */
-#define RSF16_ENCODE(x) ((x) / 200 + 856)
-#define RSF16_MINFREQ (87 * 16000)
-#define RSF16_MAXFREQ (108 * 16000)
-
-static inline void wait(int n, int io)
-{
- for (; n; --n)
- inb(io);
-}
-
-static void outbits(int bits, unsigned int data, int nWait, int io)
-{
- int bit;
-
- for (; --bits >= 0;) {
- bit = (data >> bits) & 1;
- outb(bit, io);
- wait(nWait, io);
- outb(bit | 2, io);
- wait(nWait, io);
- outb(bit, io);
- wait(nWait, io);
- }
-}
-
-static inline void fmr2_mute(int io)
-{
- outb(0x00, io);
- wait(4, io);
-}
-
-static inline void fmr2_unmute(int io)
-{
- outb(0x04, io);
- wait(4, io);
-}
-
-static inline int fmr2_stereo_mode(int io)
-{
- int n = inb(io);
-
- outb(6, io);
- inb(io);
- n = ((n >> 3) & 1) ^ 1;
- debug_print((KERN_DEBUG "stereo: %d\n", n));
- return n;
-}
-
-static int fmr2_product_info(struct fmr2 *dev)
-{
- int n = inb(dev->io);
-
- n &= 0xC1;
- if (n == 0) {
- /* this should support volume set */
- dev->card_type = 12;
- return 0;
- }
- /* not volume (mine is 11) */
- dev->card_type = (n == 128) ? 11 : 0;
- return n;
-}
+/* TEA575x tuner pins */
+#define STR_DATA (1 << 0)
+#define STR_CLK (1 << 1)
+#define STR_WREN (1 << 2)
+#define STR_MOST (1 << 3)
+/* PT2254A/TC9154A volume control pins */
+#define PT_ST (1 << 4)
+#define PT_CK (1 << 5)
+#define PT_DATA (1 << 6)
+/* volume control presence pin */
+#define FMR2_HASVOL (1 << 7)
-static inline int fmr2_getsigstr(struct fmr2 *dev)
+static void fmr2_tea575x_set_pins(struct snd_tea575x *tea, u8 pins)
{
- /* !!! works only if scanning freq */
- int res = 0xffff;
-
- outb(5, dev->io);
- wait(4, dev->io);
- if (!(inb(dev->io) & 1))
- res = 0;
- debug_print((KERN_DEBUG "signal: %d\n", res));
- return res;
-}
-
-/* set frequency and unmute card */
-static int fmr2_setfreq(struct fmr2 *dev)
-{
- unsigned long freq = dev->curfreq;
-
- fmr2_mute(dev->io);
-
- /* 0x42 for mono output
- * 0x102 forward scanning
- * 0x182 scansione avanti
- */
- outbits(9, 0x2, 3, dev->io);
- outbits(16, RSF16_ENCODE(freq), 2, dev->io);
-
- fmr2_unmute(dev->io);
+ struct fmr2 *fmr2 = tea->private_data;
+ u8 bits = 0;
- /* wait 0.11 sec */
- msleep(110);
+ bits |= (pins & TEA575X_DATA) ? STR_DATA : 0;
+ bits |= (pins & TEA575X_CLK) ? STR_CLK : 0;
+ /* WRITE_ENABLE is inverted, DATA must be high during read */
+ bits |= (pins & TEA575X_WREN) ? 0 : STR_WREN | STR_DATA;
- /* NOTE if mute this stop radio
- you must set freq on unmute */
- dev->stereo = fmr2_stereo_mode(dev->io);
- return 0;
-}
-
-/* !!! not tested, in my card this doesn't work !!! */
-static int fmr2_setvolume(struct fmr2 *dev)
-{
- int vol[16] = { 0x021, 0x084, 0x090, 0x104,
- 0x110, 0x204, 0x210, 0x402,
- 0x404, 0x408, 0x410, 0x801,
- 0x802, 0x804, 0x808, 0x810 };
- int i, a;
- int n = vol[dev->curvol & 0x0f];
-
- if (dev->card_type != 11)
- return 1;
-
- for (i = 12; --i >= 0; ) {
- a = ((n >> i) & 1) << 6; /* if (a==0) a = 0; else a = 0x40; */
- outb(a | 4, dev->io);
- wait(4, dev->io);
- outb(a | 0x24, dev->io);
- wait(4, dev->io);
- outb(a | 4, dev->io);
- wait(4, dev->io);
- }
- for (i = 6; --i >= 0; ) {
- a = ((0x18 >> i) & 1) << 6;
- outb(a | 4, dev->io);
- wait(4, dev->io);
- outb(a | 0x24, dev->io);
- wait(4, dev->io);
- outb(a | 4, dev->io);
- wait(4, dev->io);
- }
- wait(4, dev->io);
- outb(0x14, dev->io);
- return 0;
+ outb(bits, fmr2->io);
}
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *v)
+static u8 fmr2_tea575x_get_pins(struct snd_tea575x *tea)
{
- strlcpy(v->driver, "radio-sf16fmr2", sizeof(v->driver));
- strlcpy(v->card, "SF16-FMR2 radio", sizeof(v->card));
- strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
- v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
- return 0;
-}
-
-static int vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *v)
-{
- struct fmr2 *fmr2 = video_drvdata(file);
-
- if (v->index > 0)
- return -EINVAL;
+ struct fmr2 *fmr2 = tea->private_data;
+ u8 bits = inb(fmr2->io);
- strlcpy(v->name, "FM", sizeof(v->name));
- v->type = V4L2_TUNER_RADIO;
-
- v->rangelow = RSF16_MINFREQ;
- v->rangehigh = RSF16_MAXFREQ;
- v->rxsubchans = fmr2->stereo ? V4L2_TUNER_SUB_STEREO :
- V4L2_TUNER_SUB_MONO;
- v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW;
- v->audmode = V4L2_TUNER_MODE_STEREO;
- mutex_lock(&fmr2->lock);
- v->signal = fmr2_getsigstr(fmr2);
- mutex_unlock(&fmr2->lock);
- return 0;
+ return (bits & STR_DATA) ? TEA575X_DATA : 0 |
+ (bits & STR_MOST) ? TEA575X_MOST : 0;
}
-static int vidioc_s_tuner(struct file *file, void *priv,
- struct v4l2_tuner *v)
+static void fmr2_tea575x_set_direction(struct snd_tea575x *tea, bool output)
{
- return v->index ? -EINVAL : 0;
}
-static int vidioc_s_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
-{
- struct fmr2 *fmr2 = video_drvdata(file);
+static struct snd_tea575x_ops fmr2_tea_ops = {
+ .set_pins = fmr2_tea575x_set_pins,
+ .get_pins = fmr2_tea575x_get_pins,
+ .set_direction = fmr2_tea575x_set_direction,
+};
- if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
- return -EINVAL;
- if (f->frequency < RSF16_MINFREQ ||
- f->frequency > RSF16_MAXFREQ)
- return -EINVAL;
- /* rounding in steps of 200 to match the freq
- that will be used */
- fmr2->curfreq = (f->frequency / 200) * 200;
-
- /* set card freq (if not muted) */
- if (fmr2->curvol && !fmr2->mute) {
- mutex_lock(&fmr2->lock);
- fmr2_setfreq(fmr2);
- mutex_unlock(&fmr2->lock);
+/* TC9154A/PT2254A volume control */
+
+/* 18-bit shift register bit definitions */
+#define TC9154A_ATT_MAJ_0DB (1 << 0)
+#define TC9154A_ATT_MAJ_10DB (1 << 1)
+#define TC9154A_ATT_MAJ_20DB (1 << 2)
+#define TC9154A_ATT_MAJ_30DB (1 << 3)
+#define TC9154A_ATT_MAJ_40DB (1 << 4)
+#define TC9154A_ATT_MAJ_50DB (1 << 5)
+#define TC9154A_ATT_MAJ_60DB (1 << 6)
+
+#define TC9154A_ATT_MIN_0DB (1 << 7)
+#define TC9154A_ATT_MIN_2DB (1 << 8)
+#define TC9154A_ATT_MIN_4DB (1 << 9)
+#define TC9154A_ATT_MIN_6DB (1 << 10)
+#define TC9154A_ATT_MIN_8DB (1 << 11)
+/* bit 12 is ignored */
+#define TC9154A_CHANNEL_LEFT (1 << 13)
+#define TC9154A_CHANNEL_RIGHT (1 << 14)
+/* bits 15, 16, 17 must be 0 */
+
+#define TC9154A_ATT_MAJ(x) (1 << x)
+#define TC9154A_ATT_MIN(x) (1 << (7 + x))
+
+static void tc9154a_set_pins(struct fmr2 *fmr2, u8 pins)
+{
+ if (!fmr2->tea.mute)
+ pins |= STR_WREN;
+
+ outb(pins, fmr2->io);
+}
+
+static void tc9154a_set_attenuation(struct fmr2 *fmr2, int att, u32 channel)
+{
+ int i;
+ u32 reg;
+ u8 bit;
+
+ reg = TC9154A_ATT_MAJ(att / 10) | TC9154A_ATT_MIN((att % 10) / 2);
+ reg |= channel;
+ /* write 18-bit shift register, LSB first */
+ for (i = 0; i < 18; i++) {
+ bit = reg & (1 << i) ? PT_DATA : 0;
+ tc9154a_set_pins(fmr2, bit);
+ udelay(5);
+ tc9154a_set_pins(fmr2, bit | PT_CK);
+ udelay(5);
+ tc9154a_set_pins(fmr2, bit);
}
- return 0;
-}
-
-static int vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *f)
-{
- struct fmr2 *fmr2 = video_drvdata(file);
-
- if (f->tuner != 0)
- return -EINVAL;
- f->type = V4L2_TUNER_RADIO;
- f->frequency = fmr2->curfreq;
- return 0;
-}
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct fmr2 *fmr2 = video_drvdata(file);
-
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- case V4L2_CID_AUDIO_VOLUME:
- /* Only card_type == 11 implements volume */
- if (fmr2->card_type == 11)
- return v4l2_ctrl_query_fill(qc, 0, 15, 1, 0);
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
- }
- return -EINVAL;
+ /* latch register data */
+ udelay(5);
+ tc9154a_set_pins(fmr2, PT_ST);
+ udelay(5);
+ tc9154a_set_pins(fmr2, 0);
}
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
+static int fmr2_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct fmr2 *fmr2 = video_drvdata(file);
+ struct snd_tea575x *tea = container_of(ctrl->handler, struct snd_tea575x, ctrl_handler);
+ struct fmr2 *fmr2 = tea->private_data;
+ int volume, balance, left, right;
switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- ctrl->value = fmr2->mute;
- return 0;
case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = fmr2->curvol;
- return 0;
- }
- return -EINVAL;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct fmr2 *fmr2 = video_drvdata(file);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- fmr2->mute = ctrl->value;
+ volume = ctrl->val;
+ balance = fmr2->balance->cur.val;
break;
- case V4L2_CID_AUDIO_VOLUME:
- fmr2->curvol = ctrl->value;
+ case V4L2_CID_AUDIO_BALANCE:
+ balance = ctrl->val;
+ volume = fmr2->volume->cur.val;
break;
default:
return -EINVAL;
}
-#ifdef DEBUG
- if (fmr2->curvol && !fmr2->mute)
- printk(KERN_DEBUG "unmute\n");
- else
- printk(KERN_DEBUG "mute\n");
-#endif
-
- mutex_lock(&fmr2->lock);
- if (fmr2->curvol && !fmr2->mute) {
- fmr2_setvolume(fmr2);
- /* Set frequency and unmute card */
- fmr2_setfreq(fmr2);
- } else
- fmr2_mute(fmr2->io);
- mutex_unlock(&fmr2->lock);
- return 0;
-}
-
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
+ left = right = volume;
+ if (balance < 0)
+ right = max(0, right + balance);
+ if (balance > 0)
+ left = max(0, left - balance);
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- return i ? -EINVAL : 0;
-}
+ tc9154a_set_attenuation(fmr2, abs(left - 68), TC9154A_CHANNEL_LEFT);
+ tc9154a_set_attenuation(fmr2, abs(right - 68), TC9154A_CHANNEL_RIGHT);
-static int vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- a->index = 0;
- strlcpy(a->name, "Radio", sizeof(a->name));
- a->capability = V4L2_AUDCAP_STEREO;
return 0;
}
-static int vidioc_s_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
+static const struct v4l2_ctrl_ops fmr2_ctrl_ops = {
+ .s_ctrl = fmr2_s_ctrl,
+};
+
+static int fmr2_tea_ext_init(struct snd_tea575x *tea)
{
- return a->index ? -EINVAL : 0;
-}
+ struct fmr2 *fmr2 = tea->private_data;
-static const struct v4l2_file_operations fmr2_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = video_ioctl2,
-};
+ if (inb(fmr2->io) & FMR2_HASVOL) {
+ fmr2->volume = v4l2_ctrl_new_std(&tea->ctrl_handler, &fmr2_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 68, 2, 56);
+ fmr2->balance = v4l2_ctrl_new_std(&tea->ctrl_handler, &fmr2_ctrl_ops, V4L2_CID_AUDIO_BALANCE, -68, 68, 2, 0);
+ if (tea->ctrl_handler.error) {
+ printk(KERN_ERR "radio-sf16fmr2: can't initialize contrls\n");
+ return tea->ctrl_handler.error;
+ }
+ }
-static const struct v4l2_ioctl_ops fmr2_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_g_tuner = vidioc_g_tuner,
- .vidioc_s_tuner = vidioc_s_tuner,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_g_frequency = vidioc_g_frequency,
- .vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
-};
+ return 0;
+}
static int __init fmr2_init(void)
{
struct fmr2 *fmr2 = &fmr2_card;
- struct v4l2_device *v4l2_dev = &fmr2->v4l2_dev;
- int res;
- strlcpy(v4l2_dev->name, "sf16fmr2", sizeof(v4l2_dev->name));
- fmr2->io = io;
- fmr2->stereo = 1;
- mutex_init(&fmr2->lock);
+ fmr2->io = FMR2_PORT;
- if (!request_region(fmr2->io, 2, "sf16fmr2")) {
- v4l2_err(v4l2_dev, "request_region failed!\n");
+ if (!request_region(fmr2->io, 2, "SF16-FMR2")) {
+ printk(KERN_ERR "radio-sf16fmr2: I/O port 0x%x already in use\n", fmr2->io);
return -EBUSY;
}
- res = v4l2_device_register(NULL, v4l2_dev);
- if (res < 0) {
- release_region(fmr2->io, 2);
- v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
- return res;
- }
+ fmr2->tea.private_data = fmr2;
+ fmr2->tea.ops = &fmr2_tea_ops;
+ fmr2->tea.ext_init = fmr2_tea_ext_init;
+ strlcpy(fmr2->tea.card, "SF16-FMR2", sizeof(fmr2->tea.card));
+ strcpy(fmr2->tea.bus_info, "ISA");
- strlcpy(fmr2->vdev.name, v4l2_dev->name, sizeof(fmr2->vdev.name));
- fmr2->vdev.v4l2_dev = v4l2_dev;
- fmr2->vdev.fops = &fmr2_fops;
- fmr2->vdev.ioctl_ops = &fmr2_ioctl_ops;
- fmr2->vdev.release = video_device_release_empty;
- video_set_drvdata(&fmr2->vdev, fmr2);
-
- /* mute card - prevents noisy bootups */
- fmr2_mute(fmr2->io);
- fmr2_product_info(fmr2);
-
- if (video_register_device(&fmr2->vdev, VFL_TYPE_RADIO, radio_nr) < 0) {
- v4l2_device_unregister(v4l2_dev);
+ if (snd_tea575x_init(&fmr2->tea)) {
+ printk(KERN_ERR "radio-sf16fmr2: Unable to detect TEA575x tuner\n");
release_region(fmr2->io, 2);
- return -EINVAL;
+ return -ENODEV;
}
- v4l2_info(v4l2_dev, "SF16FMR2 radio card driver at 0x%x.\n", fmr2->io);
- debug_print((KERN_DEBUG "card_type %d\n", fmr2->card_type));
+ printk(KERN_INFO "radio-sf16fmr2: SF16-FMR2 radio card at 0x%x.\n", fmr2->io);
return 0;
}
@@ -443,22 +211,9 @@ static void __exit fmr2_exit(void)
{
struct fmr2 *fmr2 = &fmr2_card;
- video_unregister_device(&fmr2->vdev);
- v4l2_device_unregister(&fmr2->v4l2_dev);
+ snd_tea575x_exit(&fmr2->tea);
release_region(fmr2->io, 2);
}
module_init(fmr2_init);
module_exit(fmr2_exit);
-
-#ifndef MODULE
-
-static int __init fmr2_setup_io(char *str)
-{
- get_option(&str, &io);
- return 1;
-}
-
-__setup("sf16fmr2=", fmr2_setup_io);
-
-#endif
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 0e71d816c72..95ddcc4845d 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -39,10 +39,8 @@
#include <linux/i2c.h> /* I2C */
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
-#define DRIVER_VERSION "v0.01"
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 1)
+#define DRIVER_VERSION "0.0.2"
#define DRIVER_AUTHOR "Fabio Belavenuto <belavenuto@gmail.com>"
#define DRIVER_DESC "A driver for the TEA5764 radio chip for EZX Phones."
@@ -300,7 +298,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->card, dev->name, sizeof(v->card));
snprintf(v->bus_info, sizeof(v->bus_info),
"I2C:%s", dev_name(&dev->dev));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
@@ -595,8 +592,9 @@ static void __exit tea5764_exit(void)
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
-module_param(use_xtal, int, 1);
+module_param(use_xtal, int, 0);
MODULE_PARM_DESC(use_xtal, "Chip have a xtal connected in board");
module_param(radio_nr, int, 0);
MODULE_PARM_DESC(radio_nr, "video4linux device number to use");
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index a3266391705..f2ed9cc3cf3 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -29,7 +29,6 @@
#include <linux/ioport.h> /* request_region */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/mutex.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -37,6 +36,7 @@
MODULE_AUTHOR("R.OFFERMANNS & others");
MODULE_DESCRIPTION("A driver for the TerraTec ActiveRadio Standalone radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
#ifndef CONFIG_RADIO_TERRATEC_PORT
#define CONFIG_RADIO_TERRATEC_PORT 0x590
@@ -49,8 +49,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the TerraTec ActiveRadio card (0x590 or 0x591)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
static struct v4l2_queryctrl radio_qctrl[] = {
{
.id = V4L2_CID_AUDIO_MUTE,
@@ -205,7 +203,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-terratec", sizeof(v->driver));
strlcpy(v->card, "ActiveRadio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c
index a185610b376..f17b540d68a 100644
--- a/drivers/media/radio/radio-timb.c
+++ b/drivers/media/radio/radio-timb.c
@@ -16,7 +16,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/version.h>
#include <linux/io.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
@@ -44,7 +43,6 @@ static int timbradio_vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, DRIVER_NAME, sizeof(v->driver));
strlcpy(v->card, "Timberdale Radio", sizeof(v->card));
snprintf(v->bus_info, sizeof(v->bus_info), "platform:"DRIVER_NAME);
- v->version = KERNEL_VERSION(0, 0, 1);
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
@@ -245,4 +243,5 @@ module_exit(timbradio_exit);
MODULE_DESCRIPTION("Timberdale Radio driver");
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.0.2");
MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c
index 22fa9cc28ab..b3f45a019d8 100644
--- a/drivers/media/radio/radio-trust.c
+++ b/drivers/media/radio/radio-trust.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/videodev2.h>
#include <linux/io.h>
#include <media/v4l2-device.h>
@@ -28,6 +27,7 @@
MODULE_AUTHOR("Eric Lammerts, Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
MODULE_DESCRIPTION("A driver for the Trust FM Radio card.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
/* acceptable ports: 0x350 (JP3 shorted), 0x358 (JP3 open) */
@@ -42,8 +42,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the Trust FM Radio card (0x350 or 0x358)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct trust {
struct v4l2_device v4l2_dev;
struct video_device vdev;
@@ -196,7 +194,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-trust", sizeof(v->driver));
strlcpy(v->card, "Trust FM Radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index 8dbbf08f220..398726abc0c 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -31,15 +31,17 @@
#include <linux/module.h> /* Modules */
#include <linux/init.h> /* Initdata */
#include <linux/ioport.h> /* request_region */
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#define DRIVER_VERSION "0.1.2"
+
MODULE_AUTHOR("Dr. Henrik Seidel");
MODULE_DESCRIPTION("A driver for the Typhoon radio card (a.k.a. EcoRadio).");
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
#ifndef CONFIG_RADIO_TYPHOON_PORT
#define CONFIG_RADIO_TYPHOON_PORT -1
@@ -61,9 +63,7 @@ static unsigned long mutefreq = CONFIG_RADIO_TYPHOON_MUTEFREQ;
module_param(mutefreq, ulong, 0);
MODULE_PARM_DESC(mutefreq, "Frequency used when muting the card (in kHz)");
-#define RADIO_VERSION KERNEL_VERSION(0, 1, 1)
-
-#define BANNER "Typhoon Radio Card driver v0.1.1\n"
+#define BANNER "Typhoon Radio Card driver v" DRIVER_VERSION "\n"
struct typhoon {
struct v4l2_device v4l2_dev;
@@ -171,7 +171,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-typhoon", sizeof(v->driver));
strlcpy(v->card, "Typhoon Radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 459f7272d32..46cacf84504 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1382,7 +1382,7 @@ static int wl1273_fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->cur.val = wl1273_fm_get_tx_ctune(radio);
+ ctrl->val = wl1273_fm_get_tx_ctune(radio);
break;
default:
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index af99c5bd88c..f5613b94820 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -35,7 +35,6 @@
#include <linux/delay.h> /* udelay, msleep */
#include <linux/videodev2.h> /* kernel radio structs */
#include <linux/mutex.h>
-#include <linux/version.h> /* for KERNEL_VERSION MACRO */
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -43,6 +42,7 @@
MODULE_AUTHOR("C.van Schaik");
MODULE_DESCRIPTION("A driver for the Zoltrix Radio Plus.");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
#ifndef CONFIG_RADIO_ZOLTRIX_PORT
#define CONFIG_RADIO_ZOLTRIX_PORT -1
@@ -55,8 +55,6 @@ module_param(io, int, 0);
MODULE_PARM_DESC(io, "I/O address of the Zoltrix Radio Plus (0x20c or 0x30c)");
module_param(radio_nr, int, 0);
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
struct zoltrix {
struct v4l2_device v4l2_dev;
struct video_device vdev;
@@ -228,7 +226,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "radio-zoltrix", sizeof(v->driver));
strlcpy(v->card, "Zoltrix Radio", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->version = RADIO_VERSION;
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index a2a67772c42..fd3541b0e91 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -24,10 +24,9 @@
/* driver definitions */
#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>";
-#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 1)
#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers"
-#define DRIVER_VERSION "1.0.1"
+#define DRIVER_VERSION "1.0.2"
/* kernel includes */
#include <linux/i2c.h>
@@ -248,7 +247,6 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
{
strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));
strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
- capability->version = DRIVER_KERNEL_VERSION;
capability->capabilities = V4L2_CAP_HW_FREQ_SEEK |
V4L2_CAP_TUNER | V4L2_CAP_RADIO;
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 392e84fe90e..4cf537043f9 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -29,7 +29,6 @@
/* driver definitions */
#define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>"
-#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 10)
#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
#define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers"
#define DRIVER_VERSION "1.0.10"
@@ -626,7 +625,6 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
usb_make_path(radio->usbdev, capability->bus_info,
sizeof(capability->bus_info));
- capability->version = DRIVER_KERNEL_VERSION;
capability->capabilities = V4L2_CAP_HW_FREQ_SEEK |
V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
@@ -699,7 +697,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
radio->videodev = video_device_alloc();
if (!radio->videodev) {
retval = -ENOMEM;
- goto err_intbuffer;
+ goto err_urb;
}
memcpy(radio->videodev, &si470x_viddev_template,
sizeof(si470x_viddev_template));
@@ -790,6 +788,8 @@ err_all:
kfree(radio->buffer);
err_video:
video_device_release(radio->videodev);
+err_urb:
+ usb_free_urb(radio->int_in_urb);
err_intbuffer:
kfree(radio->int_in_buffer);
err_radio:
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 68da001b09d..f300a55ed85 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -32,7 +32,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/input.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/mutex.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
index deca2e06ff2..c9f4a8e65dc 100644
--- a/drivers/media/radio/si4713-i2c.c
+++ b/drivers/media/radio/si4713-i2c.c
@@ -1033,7 +1033,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev,
char ps_name[MAX_RDS_PS_NAME + 1];
len = control->size - 1;
- if (len > MAX_RDS_PS_NAME) {
+ if (len < 0 || len > MAX_RDS_PS_NAME) {
rval = -ERANGE;
goto exit;
}
@@ -1057,7 +1057,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev,
char radio_text[MAX_RDS_RADIO_TEXT + 1];
len = control->size - 1;
- if (len > MAX_RDS_RADIO_TEXT) {
+ if (len < 0 || len > MAX_RDS_RADIO_TEXT) {
rval = -ERANGE;
goto exit;
}
diff --git a/drivers/media/radio/wl128x/fmdrv.h b/drivers/media/radio/wl128x/fmdrv.h
index 1a45a5d847b..d84ad9dad32 100644
--- a/drivers/media/radio/wl128x/fmdrv.h
+++ b/drivers/media/radio/wl128x/fmdrv.h
@@ -28,14 +28,11 @@
#include <sound/core.h>
#include <sound/initval.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
-#define FM_DRV_VERSION "0.10"
-/* Should match with FM_DRV_VERSION */
-#define FM_DRV_RADIO_VERSION KERNEL_VERSION(0, 0, 1)
+#define FM_DRV_VERSION "0.1.1"
#define FM_DRV_NAME "ti_fmdrv"
#define FM_DRV_CARD_SHORT_NAME "TI FM Radio"
#define FM_DRV_CARD_LONG_NAME "Texas Instruments FM Radio"
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index 87010724f91..8c0e1927697 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -175,7 +175,6 @@ static int fm_v4l2_vidioc_querycap(struct file *file, void *priv,
strlcpy(capability->card, FM_DRV_CARD_SHORT_NAME,
sizeof(capability->card));
sprintf(capability->bus_info, "UART");
- capability->version = FM_DRV_RADIO_VERSION;
capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
V4L2_CAP_RADIO | V4L2_CAP_MODULATOR |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE |
@@ -191,7 +190,7 @@ static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->cur.val = fm_tx_get_tune_cap_val(fmdev);
+ ctrl->val = fm_tx_get_tune_cap_val(fmdev);
break;
default:
fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 7d4bbc226d0..899f783d92f 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -87,6 +87,17 @@ config IR_RC5_SZ_DECODER
uses an IR protocol that is almost standard RC-5, but not quite,
as it uses an additional bit).
+config IR_MCE_KBD_DECODER
+ tristate "Enable IR raw decoder for the MCE keyboard/mouse protocol"
+ depends on RC_CORE
+ select BITREVERSE
+ default y
+
+ ---help---
+ Enable this option if you have a Microsoft Remote Keyboard for
+ Windows Media Center Edition, which you would like to use with
+ a raw IR receiver in your system.
+
config IR_LIRC_CODEC
tristate "Enable IR to LIRC bridge"
depends on RC_CORE
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 52830e5f4ea..f224db027c4 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o
obj-$(CONFIG_IR_JVC_DECODER) += ir-jvc-decoder.o
obj-$(CONFIG_IR_SONY_DECODER) += ir-sony-decoder.o
obj-$(CONFIG_IR_RC5_SZ_DECODER) += ir-rc5-sz-decoder.o
+obj-$(CONFIG_IR_MCE_KBD_DECODER) += ir-mce_kbd-decoder.o
obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
# stand-alone IR receivers/transmitters
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index a43ed6c41bf..2b9c2569d74 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -953,13 +953,13 @@ static void ene_set_idle(struct rc_dev *rdev, bool idle)
}
/* outside interface: transmit */
-static int ene_transmit(struct rc_dev *rdev, int *buf, u32 n)
+static int ene_transmit(struct rc_dev *rdev, unsigned *buf, unsigned n)
{
struct ene_device *dev = rdev->priv;
unsigned long flags;
dev->tx_buffer = buf;
- dev->tx_len = n / sizeof(int);
+ dev->tx_len = n;
dev->tx_pos = 0;
dev->tx_reg = 0;
dev->tx_done = 0;
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index 337a41d4450..017c209cdf8 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -235,7 +235,7 @@ struct ene_device {
bool tx_sample_pulse; /* current sample is pulse */
/* TX buffer */
- int *tx_buffer; /* input samples buffer*/
+ unsigned *tx_buffer; /* input samples buffer*/
int tx_pos; /* position in that bufer */
int tx_len; /* current len of tx buffer */
int tx_done; /* done transmitting */
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 8fa539dde1b..7f7079b12f2 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -597,12 +597,17 @@ static void __devexit fintek_remove(struct pnp_dev *pdev)
static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
{
struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+ unsigned long flags;
fit_dbg("%s called", __func__);
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+
/* disable all CIR interrupts */
fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
fintek_config_mode_enable(fintek);
/* disable cir logical dev */
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 3f3c7071626..6bc35eeb653 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -307,6 +307,14 @@ static const struct {
/* 0xffdc iMON MCE VFD */
{ 0x00010000ffffffeell, KEY_VOLUMEUP },
{ 0x01000000ffffffeell, KEY_VOLUMEDOWN },
+ { 0x00000001ffffffeell, KEY_MUTE },
+ { 0x0000000fffffffeell, KEY_MEDIA },
+ { 0x00000012ffffffeell, KEY_UP },
+ { 0x00000013ffffffeell, KEY_DOWN },
+ { 0x00000014ffffffeell, KEY_LEFT },
+ { 0x00000015ffffffeell, KEY_RIGHT },
+ { 0x00000016ffffffeell, KEY_ENTER },
+ { 0x00000017ffffffeell, KEY_ESC },
/* iMON Knob values */
{ 0x000100ffffffffeell, KEY_VOLUMEUP },
{ 0x010000ffffffffeell, KEY_VOLUMEDOWN },
@@ -1582,16 +1590,16 @@ static void imon_incoming_packet(struct imon_context *ictx,
/* Only panel type events left to process now */
spin_lock_irqsave(&ictx->kc_lock, flags);
+ do_gettimeofday(&t);
/* KEY_MUTE repeats from knob need to be suppressed */
if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) {
- do_gettimeofday(&t);
msec = tv2int(&t, &prev_time);
- prev_time = t;
if (msec < ictx->idev->rep[REP_DELAY]) {
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
}
}
+ prev_time = t;
kc = ictx->kc;
spin_unlock_irqrestore(&ictx->kc_lock, flags);
@@ -1603,7 +1611,9 @@ static void imon_incoming_packet(struct imon_context *ictx,
input_report_key(ictx->idev, kc, 0);
input_sync(ictx->idev);
+ spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->last_keycode = kc;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
@@ -1740,6 +1750,8 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
detected_display_type = IMON_DISPLAY_TYPE_VFD;
break;
/* iMON VFD, MCE IR */
+ case 0x46:
+ case 0x7e:
case 0x9e:
dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
@@ -1755,6 +1767,9 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
dev_info(ictx->dev, "Unknown 0xffdc device, "
"defaulting to VFD and iMON IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ /* We don't know which one it is, allow user to set the
+ * RC6 one from userspace if OTHER wasn't correct. */
+ allowed_protos |= RC_TYPE_RC6;
break;
}
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index 1c5cc65ea1e..e5eeec4da76 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -103,19 +103,19 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char *buf,
{
struct lirc_codec *lirc;
struct rc_dev *dev;
- int *txbuf; /* buffer with values to transmit */
- int ret = 0;
+ unsigned int *txbuf; /* buffer with values to transmit */
+ ssize_t ret = 0;
size_t count;
lirc = lirc_get_pdata(file);
if (!lirc)
return -EFAULT;
- if (n % sizeof(int))
+ if (n < sizeof(unsigned) || n % sizeof(unsigned))
return -EINVAL;
- count = n / sizeof(int);
- if (count > LIRCBUF_SIZE || count % 2 == 0 || n % sizeof(int) != 0)
+ count = n / sizeof(unsigned);
+ if (count > LIRCBUF_SIZE || count % 2 == 0)
return -EINVAL;
txbuf = memdup_user(buf, n);
@@ -129,7 +129,10 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char *buf,
}
if (dev->tx_ir)
- ret = dev->tx_ir(dev, txbuf, (u32)n);
+ ret = dev->tx_ir(dev, txbuf, count);
+
+ if (ret > 0)
+ ret *= sizeof(unsigned);
out:
kfree(txbuf);
diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
new file mode 100644
index 00000000000..3784ebf80ec
--- /dev/null
+++ b/drivers/media/rc/ir-mce_kbd-decoder.c
@@ -0,0 +1,449 @@
+/* ir-mce_kbd-decoder.c - A decoder for the RC6-ish keyboard/mouse IR protocol
+ * used by the Microsoft Remote Keyboard for Windows Media Center Edition,
+ * referred to by Microsoft's Windows Media Center remote specification docs
+ * as "an internal protocol called MCIR-2".
+ *
+ * Copyright (C) 2011 by Jarod Wilson <jarod@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+
+#include "rc-core-priv.h"
+
+/*
+ * This decoder currently supports:
+ * - MCIR-2 29-bit IR signals used for mouse movement and buttons
+ * - MCIR-2 32-bit IR signals used for standard keyboard keys
+ *
+ * The media keys on the keyboard send RC-6 signals that are inditinguishable
+ * from the keys of the same name on the stock MCE remote, and will be handled
+ * by the standard RC-6 decoder, and be made available to the system via the
+ * input device for the remote, rather than the keyboard/mouse one.
+ */
+
+#define MCIR2_UNIT 333333 /* ns */
+#define MCIR2_HEADER_NBITS 5
+#define MCIR2_MOUSE_NBITS 29
+#define MCIR2_KEYBOARD_NBITS 32
+#define MCIR2_PREFIX_PULSE (8 * MCIR2_UNIT)
+#define MCIR2_PREFIX_SPACE (1 * MCIR2_UNIT)
+#define MCIR2_MAX_LEN (3 * MCIR2_UNIT)
+#define MCIR2_BIT_START (1 * MCIR2_UNIT)
+#define MCIR2_BIT_END (1 * MCIR2_UNIT)
+#define MCIR2_BIT_0 (1 * MCIR2_UNIT)
+#define MCIR2_BIT_SET (2 * MCIR2_UNIT)
+#define MCIR2_MODE_MASK 0xf /* for the header bits */
+#define MCIR2_KEYBOARD_HEADER 0x4
+#define MCIR2_MOUSE_HEADER 0x1
+#define MCIR2_MASK_KEYS_START 0xe0
+
+enum mce_kbd_mode {
+ MCIR2_MODE_KEYBOARD,
+ MCIR2_MODE_MOUSE,
+ MCIR2_MODE_UNKNOWN,
+};
+
+enum mce_kbd_state {
+ STATE_INACTIVE,
+ STATE_HEADER_BIT_START,
+ STATE_HEADER_BIT_END,
+ STATE_BODY_BIT_START,
+ STATE_BODY_BIT_END,
+ STATE_FINISHED,
+};
+
+static unsigned char kbd_keycodes[256] = {
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_A,
+ KEY_B, KEY_C, KEY_D, KEY_E, KEY_F,
+ KEY_G, KEY_H, KEY_I, KEY_J, KEY_K,
+ KEY_L, KEY_M, KEY_N, KEY_O, KEY_P,
+ KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U,
+ KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z,
+ KEY_1, KEY_2, KEY_3, KEY_4, KEY_5,
+ KEY_6, KEY_7, KEY_8, KEY_9, KEY_0,
+ KEY_ENTER, KEY_ESC, KEY_BACKSPACE, KEY_TAB, KEY_SPACE,
+ KEY_MINUS, KEY_EQUAL, KEY_LEFTBRACE, KEY_RIGHTBRACE, KEY_BACKSLASH,
+ KEY_RESERVED, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_GRAVE, KEY_COMMA,
+ KEY_DOT, KEY_SLASH, KEY_CAPSLOCK, KEY_F1, KEY_F2,
+ KEY_F3, KEY_F4, KEY_F5, KEY_F6, KEY_F7,
+ KEY_F8, KEY_F9, KEY_F10, KEY_F11, KEY_F12,
+ KEY_SYSRQ, KEY_SCROLLLOCK, KEY_PAUSE, KEY_INSERT, KEY_HOME,
+ KEY_PAGEUP, KEY_DELETE, KEY_END, KEY_PAGEDOWN, KEY_RIGHT,
+ KEY_LEFT, KEY_DOWN, KEY_UP, KEY_NUMLOCK, KEY_KPSLASH,
+ KEY_KPASTERISK, KEY_KPMINUS, KEY_KPPLUS, KEY_KPENTER, KEY_KP1,
+ KEY_KP2, KEY_KP3, KEY_KP4, KEY_KP5, KEY_KP6,
+ KEY_KP7, KEY_KP8, KEY_KP9, KEY_KP0, KEY_KPDOT,
+ KEY_102ND, KEY_COMPOSE, KEY_POWER, KEY_KPEQUAL, KEY_F13,
+ KEY_F14, KEY_F15, KEY_F16, KEY_F17, KEY_F18,
+ KEY_F19, KEY_F20, KEY_F21, KEY_F22, KEY_F23,
+ KEY_F24, KEY_OPEN, KEY_HELP, KEY_PROPS, KEY_FRONT,
+ KEY_STOP, KEY_AGAIN, KEY_UNDO, KEY_CUT, KEY_COPY,
+ KEY_PASTE, KEY_FIND, KEY_MUTE, KEY_VOLUMEUP, KEY_VOLUMEDOWN,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_KPCOMMA, KEY_RESERVED,
+ KEY_RO, KEY_KATAKANAHIRAGANA, KEY_YEN, KEY_HENKAN, KEY_MUHENKAN,
+ KEY_KPJPCOMMA, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_HANGUEL,
+ KEY_HANJA, KEY_KATAKANA, KEY_HIRAGANA, KEY_ZENKAKUHANKAKU, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_LEFTCTRL,
+ KEY_LEFTSHIFT, KEY_LEFTALT, KEY_LEFTMETA, KEY_RIGHTCTRL, KEY_RIGHTSHIFT,
+ KEY_RIGHTALT, KEY_RIGHTMETA, KEY_PLAYPAUSE, KEY_STOPCD, KEY_PREVIOUSSONG,
+ KEY_NEXTSONG, KEY_EJECTCD, KEY_VOLUMEUP, KEY_VOLUMEDOWN, KEY_MUTE,
+ KEY_WWW, KEY_BACK, KEY_FORWARD, KEY_STOP, KEY_FIND,
+ KEY_SCROLLUP, KEY_SCROLLDOWN, KEY_EDIT, KEY_SLEEP, KEY_COFFEE,
+ KEY_REFRESH, KEY_CALC, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED,
+ KEY_RESERVED
+};
+
+static void mce_kbd_rx_timeout(unsigned long data)
+{
+ struct mce_kbd_dec *mce_kbd = (struct mce_kbd_dec *)data;
+ int i;
+ unsigned char maskcode;
+
+ IR_dprintk(2, "timer callback clearing all keys\n");
+
+ for (i = 0; i < 7; i++) {
+ maskcode = kbd_keycodes[MCIR2_MASK_KEYS_START + i];
+ input_report_key(mce_kbd->idev, maskcode, 0);
+ }
+
+ for (i = 0; i < MCIR2_MASK_KEYS_START; i++)
+ input_report_key(mce_kbd->idev, kbd_keycodes[i], 0);
+}
+
+static enum mce_kbd_mode mce_kbd_mode(struct mce_kbd_dec *data)
+{
+ switch (data->header & MCIR2_MODE_MASK) {
+ case MCIR2_KEYBOARD_HEADER:
+ return MCIR2_MODE_KEYBOARD;
+ case MCIR2_MOUSE_HEADER:
+ return MCIR2_MODE_MOUSE;
+ default:
+ return MCIR2_MODE_UNKNOWN;
+ }
+}
+
+static void ir_mce_kbd_process_keyboard_data(struct input_dev *idev,
+ u32 scancode)
+{
+ u8 keydata = (scancode >> 8) & 0xff;
+ u8 shiftmask = scancode & 0xff;
+ unsigned char keycode, maskcode;
+ int i, keystate;
+
+ IR_dprintk(1, "keyboard: keydata = 0x%02x, shiftmask = 0x%02x\n",
+ keydata, shiftmask);
+
+ for (i = 0; i < 7; i++) {
+ maskcode = kbd_keycodes[MCIR2_MASK_KEYS_START + i];
+ if (shiftmask & (1 << i))
+ keystate = 1;
+ else
+ keystate = 0;
+ input_report_key(idev, maskcode, keystate);
+ }
+
+ if (keydata) {
+ keycode = kbd_keycodes[keydata];
+ input_report_key(idev, keycode, 1);
+ } else {
+ for (i = 0; i < MCIR2_MASK_KEYS_START; i++)
+ input_report_key(idev, kbd_keycodes[i], 0);
+ }
+}
+
+static void ir_mce_kbd_process_mouse_data(struct input_dev *idev, u32 scancode)
+{
+ /* raw mouse coordinates */
+ u8 xdata = (scancode >> 7) & 0x7f;
+ u8 ydata = (scancode >> 14) & 0x7f;
+ int x, y;
+ /* mouse buttons */
+ bool right = scancode & 0x40;
+ bool left = scancode & 0x20;
+
+ if (xdata & 0x40)
+ x = -((~xdata & 0x7f) + 1);
+ else
+ x = xdata;
+
+ if (ydata & 0x40)
+ y = -((~ydata & 0x7f) + 1);
+ else
+ y = ydata;
+
+ IR_dprintk(1, "mouse: x = %d, y = %d, btns = %s%s\n",
+ x, y, left ? "L" : "", right ? "R" : "");
+
+ input_report_rel(idev, REL_X, x);
+ input_report_rel(idev, REL_Y, y);
+
+ input_report_key(idev, BTN_LEFT, left);
+ input_report_key(idev, BTN_RIGHT, right);
+}
+
+/**
+ * ir_mce_kbd_decode() - Decode one mce_kbd pulse or space
+ * @dev: the struct rc_dev descriptor of the device
+ * @ev: the struct ir_raw_event descriptor of the pulse/space
+ *
+ * This function returns -EINVAL if the pulse violates the state machine
+ */
+static int ir_mce_kbd_decode(struct rc_dev *dev, struct ir_raw_event ev)
+{
+ struct mce_kbd_dec *data = &dev->raw->mce_kbd;
+ u32 scancode;
+ unsigned long delay;
+
+ if (!(dev->raw->enabled_protocols & RC_TYPE_MCE_KBD))
+ return 0;
+
+ if (!is_timing_event(ev)) {
+ if (ev.reset)
+ data->state = STATE_INACTIVE;
+ return 0;
+ }
+
+ if (!geq_margin(ev.duration, MCIR2_UNIT, MCIR2_UNIT / 2))
+ goto out;
+
+again:
+ IR_dprintk(2, "started at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+
+ if (!geq_margin(ev.duration, MCIR2_UNIT, MCIR2_UNIT / 2))
+ return 0;
+
+ switch (data->state) {
+
+ case STATE_INACTIVE:
+ if (!ev.pulse)
+ break;
+
+ /* Note: larger margin on first pulse since each MCIR2_UNIT
+ is quite short and some hardware takes some time to
+ adjust to the signal */
+ if (!eq_margin(ev.duration, MCIR2_PREFIX_PULSE, MCIR2_UNIT))
+ break;
+
+ data->state = STATE_HEADER_BIT_START;
+ data->count = 0;
+ data->header = 0;
+ return 0;
+
+ case STATE_HEADER_BIT_START:
+ if (geq_margin(ev.duration, MCIR2_MAX_LEN, MCIR2_UNIT / 2))
+ break;
+
+ data->header <<= 1;
+ if (ev.pulse)
+ data->header |= 1;
+ data->count++;
+ data->state = STATE_HEADER_BIT_END;
+ return 0;
+
+ case STATE_HEADER_BIT_END:
+ if (!is_transition(&ev, &dev->raw->prev_ev))
+ break;
+
+ decrease_duration(&ev, MCIR2_BIT_END);
+
+ if (data->count != MCIR2_HEADER_NBITS) {
+ data->state = STATE_HEADER_BIT_START;
+ goto again;
+ }
+
+ switch (mce_kbd_mode(data)) {
+ case MCIR2_MODE_KEYBOARD:
+ data->wanted_bits = MCIR2_KEYBOARD_NBITS;
+ break;
+ case MCIR2_MODE_MOUSE:
+ data->wanted_bits = MCIR2_MOUSE_NBITS;
+ break;
+ default:
+ IR_dprintk(1, "not keyboard or mouse data\n");
+ goto out;
+ }
+
+ data->count = 0;
+ data->body = 0;
+ data->state = STATE_BODY_BIT_START;
+ goto again;
+
+ case STATE_BODY_BIT_START:
+ if (geq_margin(ev.duration, MCIR2_MAX_LEN, MCIR2_UNIT / 2))
+ break;
+
+ data->body <<= 1;
+ if (ev.pulse)
+ data->body |= 1;
+ data->count++;
+ data->state = STATE_BODY_BIT_END;
+ return 0;
+
+ case STATE_BODY_BIT_END:
+ if (!is_transition(&ev, &dev->raw->prev_ev))
+ break;
+
+ if (data->count == data->wanted_bits)
+ data->state = STATE_FINISHED;
+ else
+ data->state = STATE_BODY_BIT_START;
+
+ decrease_duration(&ev, MCIR2_BIT_END);
+ goto again;
+
+ case STATE_FINISHED:
+ if (ev.pulse)
+ break;
+
+ switch (data->wanted_bits) {
+ case MCIR2_KEYBOARD_NBITS:
+ scancode = data->body & 0xffff;
+ IR_dprintk(1, "keyboard data 0x%08x\n", data->body);
+ if (dev->timeout)
+ delay = usecs_to_jiffies(dev->timeout / 1000);
+ else
+ delay = msecs_to_jiffies(100);
+ mod_timer(&data->rx_timeout, jiffies + delay);
+ /* Pass data to keyboard buffer parser */
+ ir_mce_kbd_process_keyboard_data(data->idev, scancode);
+ break;
+ case MCIR2_MOUSE_NBITS:
+ scancode = data->body & 0x1fffff;
+ IR_dprintk(1, "mouse data 0x%06x\n", scancode);
+ /* Pass data to mouse buffer parser */
+ ir_mce_kbd_process_mouse_data(data->idev, scancode);
+ break;
+ default:
+ IR_dprintk(1, "not keyboard or mouse data\n");
+ goto out;
+ }
+
+ data->state = STATE_INACTIVE;
+ input_sync(data->idev);
+ return 0;
+ }
+
+out:
+ IR_dprintk(1, "failed at state %i (%uus %s)\n",
+ data->state, TO_US(ev.duration), TO_STR(ev.pulse));
+ data->state = STATE_INACTIVE;
+ input_sync(data->idev);
+ return -EINVAL;
+}
+
+static int ir_mce_kbd_register(struct rc_dev *dev)
+{
+ struct mce_kbd_dec *mce_kbd = &dev->raw->mce_kbd;
+ struct input_dev *idev;
+ int i, ret;
+
+ idev = input_allocate_device();
+ if (!idev)
+ return -ENOMEM;
+
+ snprintf(mce_kbd->name, sizeof(mce_kbd->name),
+ "MCE IR Keyboard/Mouse (%s)", dev->driver_name);
+ strlcat(mce_kbd->phys, "/input0", sizeof(mce_kbd->phys));
+
+ idev->name = mce_kbd->name;
+ idev->phys = mce_kbd->phys;
+
+ /* Keyboard bits */
+ set_bit(EV_KEY, idev->evbit);
+ set_bit(EV_REP, idev->evbit);
+ for (i = 0; i < sizeof(kbd_keycodes); i++)
+ set_bit(kbd_keycodes[i], idev->keybit);
+
+ /* Mouse bits */
+ set_bit(EV_REL, idev->evbit);
+ set_bit(REL_X, idev->relbit);
+ set_bit(REL_Y, idev->relbit);
+ set_bit(BTN_LEFT, idev->keybit);
+ set_bit(BTN_RIGHT, idev->keybit);
+
+ /* Report scancodes too */
+ set_bit(EV_MSC, idev->evbit);
+ set_bit(MSC_SCAN, idev->mscbit);
+
+ setup_timer(&mce_kbd->rx_timeout, mce_kbd_rx_timeout,
+ (unsigned long)mce_kbd);
+
+ input_set_drvdata(idev, mce_kbd);
+
+#if 0
+ /* Adding this reference means two input devices are associated with
+ * this rc-core device, which ir-keytable doesn't cope with yet */
+ idev->dev.parent = &dev->dev;
+#endif
+
+ ret = input_register_device(idev);
+ if (ret < 0) {
+ input_free_device(idev);
+ return -EIO;
+ }
+
+ mce_kbd->idev = idev;
+
+ return 0;
+}
+
+static int ir_mce_kbd_unregister(struct rc_dev *dev)
+{
+ struct mce_kbd_dec *mce_kbd = &dev->raw->mce_kbd;
+ struct input_dev *idev = mce_kbd->idev;
+
+ del_timer_sync(&mce_kbd->rx_timeout);
+ input_unregister_device(idev);
+
+ return 0;
+}
+
+static struct ir_raw_handler mce_kbd_handler = {
+ .protocols = RC_TYPE_MCE_KBD,
+ .decode = ir_mce_kbd_decode,
+ .raw_register = ir_mce_kbd_register,
+ .raw_unregister = ir_mce_kbd_unregister,
+};
+
+static int __init ir_mce_kbd_decode_init(void)
+{
+ ir_raw_handler_register(&mce_kbd_handler);
+
+ printk(KERN_INFO "IR MCE Keyboard/mouse protocol handler initialized\n");
+ return 0;
+}
+
+static void __exit ir_mce_kbd_decode_exit(void)
+{
+ ir_raw_handler_unregister(&mce_kbd_handler);
+}
+
+module_init(ir_mce_kbd_decode_init);
+module_exit(ir_mce_kbd_decode_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("MCE Keyboard/mouse IR protocol decoder");
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 11c19d8d0ee..27808bb59eb 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -114,18 +114,20 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
s64 delta; /* ns */
DEFINE_IR_RAW_EVENT(ev);
int rc = 0;
+ int delay;
if (!dev->raw)
return -EINVAL;
now = ktime_get();
delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
+ delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
/* Check for a long duration since last event or if we're
* being called for the first time, note that delta can't
* possibly be negative.
*/
- if (delta > IR_MAX_DURATION || !dev->raw->last_type)
+ if (delta > delay || !dev->raw->last_type)
type |= IR_START_EVENT;
else
ev.duration = delta;
@@ -353,6 +355,7 @@ static void init_decoders(struct work_struct *work)
load_rc6_decode();
load_jvc_decode();
load_sony_decode();
+ load_mce_kbd_decode();
load_lirc_codec();
/* If needed, we may later add some init code. In this case,
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index e716b931cf7..682009d76cd 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -42,7 +42,6 @@
#include <linux/bitops.h>
#include <media/rc-core.h>
#include <linux/pci_ids.h>
-#include <linux/delay.h>
#include "ite-cir.h"
@@ -383,7 +382,7 @@ static int ite_set_tx_duty_cycle(struct rc_dev *rcdev, u32 duty_cycle)
/* transmit out IR pulses; what you get here is a batch of alternating
* pulse/space/pulse/space lengths that we should write out completely through
* the FIFO, blocking on a full FIFO */
-static int ite_tx_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
+static int ite_tx_ir(struct rc_dev *rcdev, unsigned *txbuf, unsigned n)
{
unsigned long flags;
struct ite_dev *dev = rcdev->priv;
@@ -399,9 +398,6 @@ static int ite_tx_ir(struct rc_dev *rcdev, int *txbuf, u32 n)
/* clear the array just in case */
memset(last_sent, 0, ARRAY_SIZE(last_sent));
- /* n comes in bytes; convert to ints */
- n /= sizeof(int);
-
spin_lock_irqsave(&dev->lock, flags);
/* let everybody know we're now transmitting */
@@ -1347,6 +1343,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 0: ITE8704 */
.model = "ITE8704 CIR transceiver",
.io_region_size = IT87_IOREG_LENGTH,
+ .io_rsrc_no = 0,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1371,6 +1368,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 1: ITE8713 */
.model = "ITE8713 CIR transceiver",
.io_region_size = IT87_IOREG_LENGTH,
+ .io_rsrc_no = 0,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1395,6 +1393,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 2: ITE8708 */
.model = "ITE8708 CIR transceiver",
.io_region_size = IT8708_IOREG_LENGTH,
+ .io_rsrc_no = 0,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1420,6 +1419,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 3: ITE8709 */
.model = "ITE8709 CIR transceiver",
.io_region_size = IT8709_IOREG_LENGTH,
+ .io_rsrc_no = 2,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1461,6 +1461,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
struct rc_dev *rdev = NULL;
int ret = -ENOMEM;
int model_no;
+ int io_rsrc_no;
ite_dbg("%s called", __func__);
@@ -1490,10 +1491,11 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
/* get the description for the device */
dev_desc = &ite_dev_descs[model_no];
+ io_rsrc_no = dev_desc->io_rsrc_no;
/* validate pnp resources */
- if (!pnp_port_valid(pdev, 0) ||
- pnp_port_len(pdev, 0) != dev_desc->io_region_size) {
+ if (!pnp_port_valid(pdev, io_rsrc_no) ||
+ pnp_port_len(pdev, io_rsrc_no) != dev_desc->io_region_size) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
goto failure;
}
@@ -1504,7 +1506,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
}
/* store resource values */
- itdev->cir_addr = pnp_port_start(pdev, 0);
+ itdev->cir_addr = pnp_port_start(pdev, io_rsrc_no);
itdev->cir_irq = pnp_irq(pdev, 0);
/* initialize spinlocks */
diff --git a/drivers/media/rc/ite-cir.h b/drivers/media/rc/ite-cir.h
index 16a19f5fd71..aa899a0b975 100644
--- a/drivers/media/rc/ite-cir.h
+++ b/drivers/media/rc/ite-cir.h
@@ -57,6 +57,9 @@ struct ite_dev_params {
/* size of the I/O region */
int io_region_size;
+ /* IR pnp I/O resource number */
+ int io_rsrc_no;
+
/* true if the hardware supports transmission */
bool hw_tx_capable;
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
index bb10ffe086b..8d558ae6345 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
@@ -15,43 +15,39 @@
/* Pinnacle PCTV HD 800i mini remote */
static struct rc_map_table pinnacle_pctv_hd[] = {
-
- { 0x0f, KEY_1 },
- { 0x15, KEY_2 },
- { 0x10, KEY_3 },
- { 0x18, KEY_4 },
- { 0x1b, KEY_5 },
- { 0x1e, KEY_6 },
- { 0x11, KEY_7 },
- { 0x21, KEY_8 },
- { 0x12, KEY_9 },
- { 0x27, KEY_0 },
-
- { 0x24, KEY_ZOOM },
- { 0x2a, KEY_SUBTITLE },
-
- { 0x00, KEY_MUTE },
- { 0x01, KEY_ENTER }, /* Pinnacle Logo */
- { 0x39, KEY_POWER },
-
- { 0x03, KEY_VOLUMEUP },
- { 0x09, KEY_VOLUMEDOWN },
- { 0x06, KEY_CHANNELUP },
- { 0x0c, KEY_CHANNELDOWN },
-
- { 0x2d, KEY_REWIND },
- { 0x30, KEY_PLAYPAUSE },
- { 0x33, KEY_FASTFORWARD },
- { 0x3c, KEY_STOP },
- { 0x36, KEY_RECORD },
- { 0x3f, KEY_EPG }, /* Labeled "?" */
+ /* Key codes for the tiny Pinnacle remote*/
+ { 0x0700, KEY_MUTE },
+ { 0x0701, KEY_MENU }, /* Pinnacle logo */
+ { 0x0739, KEY_POWER },
+ { 0x0703, KEY_VOLUMEUP },
+ { 0x0709, KEY_VOLUMEDOWN },
+ { 0x0706, KEY_CHANNELUP },
+ { 0x070c, KEY_CHANNELDOWN },
+ { 0x070f, KEY_1 },
+ { 0x0715, KEY_2 },
+ { 0x0710, KEY_3 },
+ { 0x0718, KEY_4 },
+ { 0x071b, KEY_5 },
+ { 0x071e, KEY_6 },
+ { 0x0711, KEY_7 },
+ { 0x0721, KEY_8 },
+ { 0x0712, KEY_9 },
+ { 0x0727, KEY_0 },
+ { 0x0724, KEY_ZOOM }, /* 'Square' key */
+ { 0x072a, KEY_SUBTITLE }, /* 'T' key */
+ { 0x072d, KEY_REWIND },
+ { 0x0730, KEY_PLAYPAUSE },
+ { 0x0733, KEY_FASTFORWARD },
+ { 0x0736, KEY_RECORD },
+ { 0x073c, KEY_STOP },
+ { 0x073f, KEY_HELP }, /* '?' key */
};
static struct rc_map_list pinnacle_pctv_hd_map = {
.map = {
.scan = pinnacle_pctv_hd,
.size = ARRAY_SIZE(pinnacle_pctv_hd),
- .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
+ .rc_type = RC_TYPE_RC5,
.name = RC_MAP_PINNACLE_PCTV_HD,
}
};
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 01b69bcc866..c3907e211d3 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -29,7 +29,7 @@ static struct rc_map_table rc6_mce[] = {
{ 0x800f040a, KEY_DELETE },
{ 0x800f040b, KEY_ENTER },
- { 0x800f040c, KEY_POWER }, /* PC Power */
+ { 0x800f040c, KEY_SLEEP }, /* Formerly PC Power */
{ 0x800f040d, KEY_MEDIA }, /* Windows MCE button */
{ 0x800f040e, KEY_MUTE },
{ 0x800f040f, KEY_INFO },
@@ -44,7 +44,6 @@ static struct rc_map_table rc6_mce[] = {
{ 0x800f0416, KEY_PLAY },
{ 0x800f0417, KEY_RECORD },
{ 0x800f0418, KEY_PAUSE },
- { 0x800f046e, KEY_PLAYPAUSE },
{ 0x800f0419, KEY_STOP },
{ 0x800f041a, KEY_NEXT },
{ 0x800f041b, KEY_PREVIOUS },
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index fd237ab120b..27997a9ceb0 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -55,6 +55,8 @@ struct irctl {
struct lirc_buffer *buf;
unsigned int chunk_size;
+ struct cdev *cdev;
+
struct task_struct *task;
long jiffies_to_wait;
};
@@ -62,7 +64,6 @@ struct irctl {
static DEFINE_MUTEX(lirc_dev_lock);
static struct irctl *irctls[MAX_IRCTL_DEVICES];
-static struct cdev cdevs[MAX_IRCTL_DEVICES];
/* Only used for sysfs but defined to void otherwise */
static struct class *lirc_class;
@@ -167,9 +168,13 @@ static struct file_operations lirc_dev_fops = {
static int lirc_cdev_add(struct irctl *ir)
{
- int retval;
+ int retval = -ENOMEM;
struct lirc_driver *d = &ir->d;
- struct cdev *cdev = &cdevs[d->minor];
+ struct cdev *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ goto err_out;
if (d->fops) {
cdev_init(cdev, d->fops);
@@ -180,12 +185,20 @@ static int lirc_cdev_add(struct irctl *ir)
}
retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
if (retval)
- return retval;
+ goto err_out;
retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
- if (retval)
+ if (retval) {
kobject_put(&cdev->kobj);
+ goto err_out;
+ }
+
+ ir->cdev = cdev;
+
+ return 0;
+err_out:
+ kfree(cdev);
return retval;
}
@@ -214,7 +227,7 @@ int lirc_register_driver(struct lirc_driver *d)
if (MAX_IRCTL_DEVICES <= d->minor) {
dev_err(d->dev, "lirc_dev: lirc_register_driver: "
"\"minor\" must be between 0 and %d (%d)!\n",
- MAX_IRCTL_DEVICES-1, d->minor);
+ MAX_IRCTL_DEVICES - 1, d->minor);
err = -EBADRQC;
goto out;
}
@@ -369,7 +382,7 @@ int lirc_unregister_driver(int minor)
if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
printk(KERN_ERR "lirc_dev: %s: minor (%d) must be between "
- "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES-1);
+ "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES - 1);
return -EBADRQC;
}
@@ -380,7 +393,7 @@ int lirc_unregister_driver(int minor)
return -ENOENT;
}
- cdev = &cdevs[minor];
+ cdev = ir->cdev;
mutex_lock(&lirc_dev_lock);
@@ -410,6 +423,7 @@ int lirc_unregister_driver(int minor)
} else {
lirc_irctl_cleanup(ir);
cdev_del(cdev);
+ kfree(cdev);
kfree(ir);
irctls[minor] = NULL;
}
@@ -453,7 +467,7 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
goto error;
}
- cdev = &cdevs[iminor(inode)];
+ cdev = ir->cdev;
if (try_module_get(cdev->owner)) {
ir->open++;
retval = ir->d.set_use_inc(ir->d.data);
@@ -484,13 +498,15 @@ EXPORT_SYMBOL(lirc_dev_fop_open);
int lirc_dev_fop_close(struct inode *inode, struct file *file)
{
struct irctl *ir = irctls[iminor(inode)];
- struct cdev *cdev = &cdevs[iminor(inode)];
+ struct cdev *cdev;
if (!ir) {
printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
return -EINVAL;
}
+ cdev = ir->cdev;
+
dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor);
WARN_ON(mutex_lock_killable(&lirc_dev_lock));
@@ -503,6 +519,7 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
lirc_irctl_cleanup(ir);
cdev_del(cdev);
irctls[ir->d.minor] = NULL;
+ kfree(cdev);
kfree(ir);
}
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index ad927fcaa02..85ff9a1ffb3 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -108,6 +108,12 @@ static int debug = 1;
static int debug;
#endif
+#define mce_dbg(dev, fmt, ...) \
+ do { \
+ if (debug) \
+ dev_info(dev, fmt, ## __VA_ARGS__); \
+ } while (0)
+
/* general constants */
#define SEND_FLAG_IN_PROGRESS 1
#define SEND_FLAG_COMPLETE 2
@@ -246,6 +252,9 @@ static struct usb_device_id mceusb_dev_table[] = {
.driver_info = MCE_GEN2_TX_INV },
/* SMK eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_SMK, 0x0338) },
+ /* SMK/I-O Data GV-MC7/RCKIT Receiver */
+ { USB_DEVICE(VENDOR_SMK, 0x0353),
+ .driver_info = MCE_GEN2_NO_TX },
/* Tatung eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_TATUNG, 0x9150) },
/* Shuttle eHome Infrared Transceiver */
@@ -549,9 +558,10 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
inout, data1);
break;
case MCE_CMD_S_TIMEOUT:
- /* value is in units of 50us, so x*50/100 or x/2 ms */
+ /* value is in units of 50us, so x*50/1000 ms */
dev_info(dev, "%s receive timeout of %d ms\n",
- inout, ((data1 << 8) | data2) / 2);
+ inout,
+ ((data1 << 8) | data2) * MCE_TIME_UNIT / 1000);
break;
case MCE_CMD_G_TIMEOUT:
dev_info(dev, "Get receive timeout\n");
@@ -606,12 +616,15 @@ static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
if (ir) {
len = urb->actual_length;
- dev_dbg(ir->dev, "callback called (status=%d len=%d)\n",
+ mce_dbg(ir->dev, "callback called (status=%d len=%d)\n",
urb->status, len);
mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true);
}
+ /* the transfer buffer and urb were allocated in mce_request_packet */
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
}
/* request incoming or send outgoing usb packet - used to initialize remote */
@@ -655,17 +668,17 @@ static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
return;
}
- dev_dbg(dev, "receive request called (size=%#x)\n", size);
+ mce_dbg(dev, "receive request called (size=%#x)\n", size);
async_urb->transfer_buffer_length = size;
async_urb->dev = ir->usbdev;
res = usb_submit_urb(async_urb, GFP_ATOMIC);
if (res) {
- dev_dbg(dev, "receive request FAILED! (res=%d)\n", res);
+ mce_dbg(dev, "receive request FAILED! (res=%d)\n", res);
return;
}
- dev_dbg(dev, "receive request complete (res=%d)\n", res);
+ mce_dbg(dev, "receive request complete (res=%d)\n", res);
}
static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
@@ -673,26 +686,24 @@ static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
mce_request_packet(ir, data, size, MCEUSB_TX);
}
-static void mce_sync_in(struct mceusb_dev *ir, unsigned char *data, int size)
+static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
{
- mce_request_packet(ir, data, size, MCEUSB_RX);
+ mce_request_packet(ir, NULL, size, MCEUSB_RX);
}
/* Send data out the IR blaster port(s) */
-static int mceusb_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
+static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
struct mceusb_dev *ir = dev->priv;
int i, ret = 0;
- int count, cmdcount = 0;
+ int cmdcount = 0;
unsigned char *cmdbuf; /* MCE command buffer */
long signal_duration = 0; /* Singnal length in us */
struct timeval start_time, end_time;
do_gettimeofday(&start_time);
- count = n / sizeof(int);
-
- cmdbuf = kzalloc(sizeof(int) * MCE_CMDBUF_SIZE, GFP_KERNEL);
+ cmdbuf = kzalloc(sizeof(unsigned) * MCE_CMDBUF_SIZE, GFP_KERNEL);
if (!cmdbuf)
return -ENOMEM;
@@ -761,7 +772,7 @@ static int mceusb_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
out:
kfree(cmdbuf);
- return ret ? ret : n;
+ return ret ? ret : count;
}
/* Sets active IR outputs -- mce devices typically have two */
@@ -794,7 +805,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
ir->carrier = carrier;
cmdbuf[2] = MCE_CMD_SIG_END;
cmdbuf[3] = MCE_IRDATA_TRAILER;
- dev_dbg(ir->dev, "%s: disabling carrier "
+ mce_dbg(ir->dev, "%s: disabling carrier "
"modulation\n", __func__);
mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
return carrier;
@@ -806,7 +817,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
ir->carrier = carrier;
cmdbuf[2] = prescaler;
cmdbuf[3] = divisor;
- dev_dbg(ir->dev, "%s: requesting %u HZ "
+ mce_dbg(ir->dev, "%s: requesting %u HZ "
"carrier\n", __func__, carrier);
/* Transmit new carrier to mce device */
@@ -835,7 +846,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
switch (ir->buf_in[index]) {
/* 2-byte return value commands */
case MCE_CMD_S_TIMEOUT:
- ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
+ ir->rc->timeout = US_TO_NS((hi << 8 | lo) * MCE_TIME_UNIT);
break;
/* 1-byte return value commands */
@@ -879,7 +890,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
* US_TO_NS(MCE_TIME_UNIT);
- dev_dbg(ir->dev, "Storing %s with duration %d\n",
+ mce_dbg(ir->dev, "Storing %s with duration %d\n",
rawir.pulse ? "pulse" : "space",
rawir.duration);
@@ -911,7 +922,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
if (ir->parser_state != CMD_HEADER && !ir->rem)
ir->parser_state = CMD_HEADER;
}
- dev_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
+ mce_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
ir_raw_event_handle(ir->rc);
}
@@ -933,7 +944,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
ir->send_flags = SEND_FLAG_COMPLETE;
- dev_dbg(ir->dev, "setup answer received %d bytes\n",
+ mce_dbg(ir->dev, "setup answer received %d bytes\n",
buf_len);
}
@@ -951,7 +962,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
case -EPIPE:
default:
- dev_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
+ mce_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
break;
}
@@ -961,7 +972,6 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
static void mceusb_gen1_init(struct mceusb_dev *ir)
{
int ret;
- int maxp = ir->len_in;
struct device *dev = ir->dev;
char *data;
@@ -978,8 +988,8 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
data, USB_CTRL_MSG_SZ, HZ * 3);
- dev_dbg(dev, "%s - ret = %d\n", __func__, ret);
- dev_dbg(dev, "%s - data[0] = %d, data[1] = %d\n",
+ mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - data[0] = %d, data[1] = %d\n",
__func__, data[0], data[1]);
/* set feature: bit rate 38400 bps */
@@ -987,71 +997,56 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
0xc04e, 0x0000, NULL, 0, HZ * 3);
- dev_dbg(dev, "%s - ret = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
/* bRequest 4: set char length to 8 bits */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
4, USB_TYPE_VENDOR,
0x0808, 0x0000, NULL, 0, HZ * 3);
- dev_dbg(dev, "%s - retB = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - retB = %d\n", __func__, ret);
/* bRequest 2: set handshaking to use DTR/DSR */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
2, USB_TYPE_VENDOR,
0x0000, 0x0100, NULL, 0, HZ * 3);
- dev_dbg(dev, "%s - retC = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - retC = %d\n", __func__, ret);
/* device reset */
mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
- mce_sync_in(ir, NULL, maxp);
/* get hw/sw revision? */
mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
- mce_sync_in(ir, NULL, maxp);
kfree(data);
};
static void mceusb_gen2_init(struct mceusb_dev *ir)
{
- int maxp = ir->len_in;
-
/* device reset */
mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
- mce_sync_in(ir, NULL, maxp);
/* get hw/sw revision? */
mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
- mce_sync_in(ir, NULL, maxp);
/* unknown what the next two actually return... */
mce_async_out(ir, GET_UNKNOWN, sizeof(GET_UNKNOWN));
- mce_sync_in(ir, NULL, maxp);
mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
- mce_sync_in(ir, NULL, maxp);
}
static void mceusb_get_parameters(struct mceusb_dev *ir)
{
- int maxp = ir->len_in;
-
/* get the carrier and frequency */
mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
- mce_sync_in(ir, NULL, maxp);
- if (!ir->flags.no_tx) {
+ if (!ir->flags.no_tx)
/* get the transmitter bitmask */
mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
- mce_sync_in(ir, NULL, maxp);
- }
/* get receiver timeout value */
mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
- mce_sync_in(ir, NULL, maxp);
/* get receiver sensor setting */
mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
- mce_sync_in(ir, NULL, maxp);
}
static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
@@ -1082,7 +1077,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc->priv = ir;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->allowed_protos = RC_TYPE_ALL;
- rc->timeout = US_TO_NS(1000);
+ rc->timeout = MS_TO_NS(100);
if (!ir->flags.no_tx) {
rc->s_tx_mask = mceusb_set_tx_mask;
rc->s_tx_carrier = mceusb_set_tx_carrier;
@@ -1122,7 +1117,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
bool tx_mask_normal;
int ir_intfnum;
- dev_dbg(&intf->dev, "%s called\n", __func__);
+ mce_dbg(&intf->dev, "%s called\n", __func__);
idesc = intf->cur_altsetting;
@@ -1150,7 +1145,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ep_in = ep;
ep_in->bmAttributes = USB_ENDPOINT_XFER_INT;
ep_in->bInterval = 1;
- dev_dbg(&intf->dev, "acceptable inbound endpoint "
+ mce_dbg(&intf->dev, "acceptable inbound endpoint "
"found\n");
}
@@ -1165,12 +1160,12 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ep_out = ep;
ep_out->bmAttributes = USB_ENDPOINT_XFER_INT;
ep_out->bInterval = 1;
- dev_dbg(&intf->dev, "acceptable outbound endpoint "
+ mce_dbg(&intf->dev, "acceptable outbound endpoint "
"found\n");
}
}
if (ep_in == NULL) {
- dev_dbg(&intf->dev, "inbound and/or endpoint not found\n");
+ mce_dbg(&intf->dev, "inbound and/or endpoint not found\n");
return -ENODEV;
}
@@ -1215,16 +1210,16 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
if (!ir->rc)
goto rc_dev_fail;
- /* flush buffers on the device */
- mce_sync_in(ir, NULL, maxp);
- mce_sync_in(ir, NULL, maxp);
-
/* wire up inbound data handler */
usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in,
maxp, (usb_complete_t) mceusb_dev_recv, ir, ep_in->bInterval);
ir->urb_in->transfer_dma = ir->dma_in;
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ /* flush buffers on the device */
+ mce_dbg(&intf->dev, "Flushing receive buffers\n");
+ mce_flush_rx_buffer(ir, maxp);
+
/* initialize device */
if (ir->flags.microsoft_gen1)
mceusb_gen1_init(ir);
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index bf3060ea610..eae05b50047 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -546,24 +546,18 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
* number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
* set TXFCONT as 0xff, until buf_count less than 0xff.
*/
-static int nvt_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
+static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
{
struct nvt_dev *nvt = dev->priv;
unsigned long flags;
- size_t cur_count;
unsigned int i;
u8 iren;
int ret;
spin_lock_irqsave(&nvt->tx.lock, flags);
- if (n >= TX_BUF_LEN) {
- nvt->tx.buf_count = cur_count = TX_BUF_LEN;
- ret = TX_BUF_LEN;
- } else {
- nvt->tx.buf_count = cur_count = n;
- ret = n;
- }
+ ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
+ nvt->tx.buf_count = (ret * sizeof(unsigned));
memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
@@ -991,7 +985,6 @@ static int nvt_open(struct rc_dev *dev)
unsigned long flags;
spin_lock_irqsave(&nvt->nvt_lock, flags);
- nvt->in_use = true;
nvt_enable_cir(nvt);
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
@@ -1004,7 +997,6 @@ static void nvt_close(struct rc_dev *dev)
unsigned long flags;
spin_lock_irqsave(&nvt->nvt_lock, flags);
- nvt->in_use = false;
nvt_disable_cir(nvt);
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
}
@@ -1112,7 +1104,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
rdev->dev.parent = &pdev->dev;
rdev->driver_name = NVT_DRIVER_NAME;
rdev->map_name = RC_MAP_RC6_MCE;
- rdev->timeout = US_TO_NS(1000);
+ rdev->timeout = MS_TO_NS(100);
/* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
#if 0
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 379795d61ea..1241fc89a36 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -70,7 +70,6 @@ struct nvt_dev {
struct ir_raw_event rawir;
spinlock_t nvt_lock;
- bool in_use;
/* for rx */
u8 buf[RX_BUF_LEN];
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 873b3878975..04c2c722b6e 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -84,6 +84,17 @@ struct ir_raw_event_ctrl {
unsigned count;
unsigned wanted_bits;
} rc5_sz;
+ struct mce_kbd_dec {
+ struct input_dev *idev;
+ struct timer_list rx_timeout;
+ char name[64];
+ char phys[64];
+ int state;
+ u8 header;
+ u32 body;
+ unsigned count;
+ unsigned wanted_bits;
+ } mce_kbd;
struct lirc_codec {
struct rc_dev *dev;
struct lirc_driver *drv;
@@ -182,6 +193,13 @@ void ir_raw_init(void);
#define load_sony_decode() 0
#endif
+/* from ir-mce_kbd-decoder.c */
+#ifdef CONFIG_IR_MCE_KBD_DECODER_MODULE
+#define load_mce_kbd_decode() request_module("ir-mce_kbd-decoder")
+#else
+#define load_mce_kbd_decode() 0
+#endif
+
/* from ir-lirc-codec.c */
#ifdef CONFIG_IR_LIRC_CODEC_MODULE
#define load_lirc_codec() request_module("ir-lirc-codec")
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index cc846b2619c..efc6a514348 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -101,21 +101,14 @@ static int loop_set_rx_carrier_range(struct rc_dev *dev, u32 min, u32 max)
return 0;
}
-static int loop_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
+static int loop_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
{
struct loopback_dev *lodev = dev->priv;
u32 rxmask;
- unsigned count;
unsigned total_duration = 0;
unsigned i;
DEFINE_IR_RAW_EVENT(rawir);
- if (n == 0 || n % sizeof(int)) {
- dprintk("invalid tx buffer size\n");
- return -EINVAL;
- }
-
- count = n / sizeof(int);
for (i = 0; i < count; i++)
total_duration += abs(txbuf[i]);
@@ -142,7 +135,7 @@ static int loop_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
for (i = 0; i < count; i++) {
rawir.pulse = i % 2 ? false : true;
- rawir.duration = abs(txbuf[i]) * 1000;
+ rawir.duration = txbuf[i] * 1000;
if (rawir.duration)
ir_raw_event_store_with_filter(dev, &rawir);
}
@@ -158,7 +151,7 @@ out:
/* Lirc expects this function to take as long as the total duration */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(total_duration));
- return n;
+ return count;
}
static void loop_set_idle(struct rc_dev *dev, bool enable)
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index f57cd5677ac..51a23f48bc7 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -522,18 +522,20 @@ EXPORT_SYMBOL_GPL(rc_g_keycode_from_table);
/**
* ir_do_keyup() - internal function to signal the release of a keypress
* @dev: the struct rc_dev descriptor of the device
+ * @sync: whether or not to call input_sync
*
* This function is used internally to release a keypress, it must be
* called with keylock held.
*/
-static void ir_do_keyup(struct rc_dev *dev)
+static void ir_do_keyup(struct rc_dev *dev, bool sync)
{
if (!dev->keypressed)
return;
IR_dprintk(1, "keyup key 0x%04x\n", dev->last_keycode);
input_report_key(dev->input_dev, dev->last_keycode, 0);
- input_sync(dev->input_dev);
+ if (sync)
+ input_sync(dev->input_dev);
dev->keypressed = false;
}
@@ -549,7 +551,7 @@ void rc_keyup(struct rc_dev *dev)
unsigned long flags;
spin_lock_irqsave(&dev->keylock, flags);
- ir_do_keyup(dev);
+ ir_do_keyup(dev, true);
spin_unlock_irqrestore(&dev->keylock, flags);
}
EXPORT_SYMBOL_GPL(rc_keyup);
@@ -578,7 +580,7 @@ static void ir_timer_keyup(unsigned long cookie)
*/
spin_lock_irqsave(&dev->keylock, flags);
if (time_is_before_eq_jiffies(dev->keyup_jiffies))
- ir_do_keyup(dev);
+ ir_do_keyup(dev, true);
spin_unlock_irqrestore(&dev->keylock, flags);
}
@@ -597,6 +599,7 @@ void rc_repeat(struct rc_dev *dev)
spin_lock_irqsave(&dev->keylock, flags);
input_event(dev->input_dev, EV_MSC, MSC_SCAN, dev->last_scancode);
+ input_sync(dev->input_dev);
if (!dev->keypressed)
goto out;
@@ -622,29 +625,28 @@ EXPORT_SYMBOL_GPL(rc_repeat);
static void ir_do_keydown(struct rc_dev *dev, int scancode,
u32 keycode, u8 toggle)
{
- input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
+ bool new_event = !dev->keypressed ||
+ dev->last_scancode != scancode ||
+ dev->last_toggle != toggle;
- /* Repeat event? */
- if (dev->keypressed &&
- dev->last_scancode == scancode &&
- dev->last_toggle == toggle)
- return;
-
- /* Release old keypress */
- ir_do_keyup(dev);
+ if (new_event && dev->keypressed)
+ ir_do_keyup(dev, false);
- dev->last_scancode = scancode;
- dev->last_toggle = toggle;
- dev->last_keycode = keycode;
+ input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
- if (keycode == KEY_RESERVED)
- return;
+ if (new_event && keycode != KEY_RESERVED) {
+ /* Register a keypress */
+ dev->keypressed = true;
+ dev->last_scancode = scancode;
+ dev->last_toggle = toggle;
+ dev->last_keycode = keycode;
+
+ IR_dprintk(1, "%s: key down event, "
+ "key 0x%04x, scancode 0x%04x\n",
+ dev->input_name, keycode, scancode);
+ input_report_key(dev->input_dev, keycode, 1);
+ }
- /* Register a keypress */
- dev->keypressed = true;
- IR_dprintk(1, "%s: key down event, key 0x%04x, scancode 0x%04x\n",
- dev->input_name, keycode, scancode);
- input_report_key(dev->input_dev, dev->last_keycode, 1);
input_sync(dev->input_dev);
}
@@ -733,6 +735,7 @@ static struct {
{ RC_TYPE_JVC, "jvc" },
{ RC_TYPE_SONY, "sony" },
{ RC_TYPE_RC5_SZ, "rc-5-sz" },
+ { RC_TYPE_MCE_KBD, "mce_kbd" },
{ RC_TYPE_LIRC, "lirc" },
{ RC_TYPE_OTHER, "other" },
};
@@ -1097,7 +1100,6 @@ int rc_register_device(struct rc_dev *dev)
if (rc < 0)
goto out_input;
}
- mutex_unlock(&dev->lock);
if (dev->change_protocol) {
rc = dev->change_protocol(dev, rc_map->rc_type);
@@ -1105,6 +1107,8 @@ int rc_register_device(struct rc_dev *dev)
goto out_raw;
}
+ mutex_unlock(&dev->lock);
+
IR_dprintk(1, "Registered rc%ld (driver: %s, remote: %s, mode %s)\n",
dev->devno,
dev->driver_name ? dev->driver_name : "unknown",
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 5147767ccb7..a1660447791 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -205,6 +205,7 @@ struct redrat3_dev {
/* rx signal timeout timer */
struct timer_list rx_timeout;
+ u32 hw_timeout;
/* Is the device currently receiving? */
bool recv_in_progress;
@@ -414,20 +415,10 @@ static u32 redrat3_us_to_len(u32 microsec)
}
-/* timer callback to send long trailing space on receive timeout */
+/* timer callback to send reset event */
static void redrat3_rx_timeout(unsigned long data)
{
struct redrat3_dev *rr3 = (struct redrat3_dev *)data;
- DEFINE_IR_RAW_EVENT(rawir);
-
- rawir.pulse = false;
- rawir.duration = rr3->rc->timeout;
- rr3_dbg(rr3->dev, "storing trailing space with duration %d\n",
- rawir.duration);
- ir_raw_event_store_with_filter(rr3->rc, &rawir);
-
- rr3_dbg(rr3->dev, "calling ir_raw_event_handle\n");
- ir_raw_event_handle(rr3->rc);
rr3_dbg(rr3->dev, "calling ir_raw_event_reset\n");
ir_raw_event_reset(rr3->rc);
@@ -438,7 +429,7 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
DEFINE_IR_RAW_EVENT(rawir);
struct redrat3_signal_header header;
struct device *dev;
- int i;
+ int i, trailer = 0;
unsigned long delay;
u32 mod_freq, single_len;
u16 *len_vals;
@@ -464,7 +455,8 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
if (!(header.length >= RR3_HEADER_LENGTH))
dev_warn(dev, "read returned less than rr3 header len\n");
- delay = usecs_to_jiffies(rr3->rc->timeout / 1000);
+ /* Make sure we reset the IR kfifo after a bit of inactivity */
+ delay = usecs_to_jiffies(rr3->hw_timeout);
mod_timer(&rr3->rx_timeout, jiffies + delay);
memcpy(&tmp32, sig_data + RR3_PAUSE_OFFSET, sizeof(tmp32));
@@ -506,9 +498,6 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
u16 val = len_vals[data_vals[i]];
single_len = redrat3_len_to_us((u32)be16_to_cpu(val));
- /* cap the value to IR_MAX_DURATION */
- single_len &= IR_MAX_DURATION;
-
/* we should always get pulse/space/pulse/space samples */
if (i % 2)
rawir.pulse = false;
@@ -516,6 +505,12 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
rawir.pulse = true;
rawir.duration = US_TO_NS(single_len);
+ /* Save initial pulse length to fudge trailer */
+ if (i == 0)
+ trailer = rawir.duration;
+ /* cap the value to IR_MAX_DURATION */
+ rawir.duration &= IR_MAX_DURATION;
+
rr3_dbg(dev, "storing %s with duration %d (i: %d)\n",
rawir.pulse ? "pulse" : "space", rawir.duration, i);
ir_raw_event_store_with_filter(rr3->rc, &rawir);
@@ -525,7 +520,10 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
if (i % 2) {
rawir.pulse = false;
/* this duration is made up, and may not be ideal... */
- rawir.duration = rr3->rc->timeout / 2;
+ if (trailer < US_TO_NS(1000))
+ rawir.duration = US_TO_NS(2800);
+ else
+ rawir.duration = trailer;
rr3_dbg(dev, "storing trailing space with duration %d\n",
rawir.duration);
ir_raw_event_store_with_filter(rr3->rc, &rawir);
@@ -629,36 +627,31 @@ static inline void redrat3_delete(struct redrat3_dev *rr3,
kfree(rr3);
}
-static u32 redrat3_get_timeout(struct device *dev,
- struct rc_dev *rc, struct usb_device *udev)
+static u32 redrat3_get_timeout(struct redrat3_dev *rr3)
{
u32 *tmp;
- u32 timeout = MS_TO_NS(150); /* a sane default, if things go haywire */
+ u32 timeout = MS_TO_US(150); /* a sane default, if things go haywire */
int len, ret, pipe;
len = sizeof(*tmp);
tmp = kzalloc(len, GFP_KERNEL);
if (!tmp) {
- dev_warn(dev, "Memory allocation faillure\n");
+ dev_warn(rr3->dev, "Memory allocation faillure\n");
return timeout;
}
- pipe = usb_rcvctrlpipe(udev, 0);
- ret = usb_control_msg(udev, pipe, RR3_GET_IR_PARAM,
+ pipe = usb_rcvctrlpipe(rr3->udev, 0);
+ ret = usb_control_msg(rr3->udev, pipe, RR3_GET_IR_PARAM,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, HZ * 5);
if (ret != len) {
- dev_warn(dev, "Failed to read timeout from hardware\n");
+ dev_warn(rr3->dev, "Failed to read timeout from hardware\n");
return timeout;
}
- timeout = US_TO_NS(redrat3_len_to_us(be32_to_cpu(*tmp)));
- if (timeout < rc->min_timeout)
- timeout = rc->min_timeout;
- else if (timeout > rc->max_timeout)
- timeout = rc->max_timeout;
+ timeout = redrat3_len_to_us(be32_to_cpu(*tmp));
- rr3_dbg(dev, "Got timeout of %d ms\n", timeout / (1000 * 1000));
+ rr3_dbg(rr3->dev, "Got timeout of %d ms\n", timeout / 1000);
return timeout;
}
@@ -1110,9 +1103,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
rc->priv = rr3;
rc->driver_type = RC_DRIVER_IR_RAW;
rc->allowed_protos = RC_TYPE_ALL;
- rc->min_timeout = MS_TO_NS(RR3_RX_MIN_TIMEOUT);
- rc->max_timeout = MS_TO_NS(RR3_RX_MAX_TIMEOUT);
- rc->timeout = redrat3_get_timeout(dev, rc, rr3->udev);
+ rc->timeout = US_TO_NS(2750);
rc->tx_ir = redrat3_transmit_ir;
rc->s_tx_carrier = redrat3_set_tx_carrier;
rc->driver_name = DRIVER_NAME;
@@ -1186,7 +1177,7 @@ static int __devinit redrat3_dev_probe(struct usb_interface *intf,
rr3 = kzalloc(sizeof(*rr3), GFP_KERNEL);
if (rr3 == NULL) {
dev_err(dev, "Memory allocation failure\n");
- goto error;
+ goto no_endpoints;
}
rr3->dev = &intf->dev;
@@ -1242,6 +1233,9 @@ static int __devinit redrat3_dev_probe(struct usb_interface *intf,
if (retval < 0)
goto error;
+ /* store current hardware timeout, in us, will use for kfifo resets */
+ rr3->hw_timeout = redrat3_get_timeout(rr3);
+
/* default.. will get overridden by any sends with a freq defined */
rr3->carrier = 38000;
@@ -1280,6 +1274,7 @@ static void __devexit redrat3_dev_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
rc_unregister_device(rr3->rc);
+ del_timer_sync(&rr3->rx_timeout);
redrat3_delete(rr3, udev);
rr3_ftr(&intf->dev, "RedRat3 IR Transceiver now disconnected\n");
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 5d06b899e85..bec8abc965f 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -6,8 +6,8 @@
* could probably support others (Winbond WEC102X, NatSemi, etc)
* with minor modifications.
*
- * Original Author: David Härdeman <david@hardeman.nu>
- * Copyright (C) 2009 - 2010 David Härdeman <david@hardeman.nu>
+ * Original Author: David Härdeman <david@hardeman.nu>
+ * Copyright (C) 2009 - 2011 David Härdeman <david@hardeman.nu>
*
* Dedicated to my daughter Matilda, without whose loving attention this
* driver would have been finished in half the time and with a fraction
@@ -577,16 +577,12 @@ wbcir_txmask(struct rc_dev *dev, u32 mask)
}
static int
-wbcir_tx(struct rc_dev *dev, int *buf, u32 bufsize)
+wbcir_tx(struct rc_dev *dev, unsigned *buf, unsigned count)
{
struct wbcir_data *data = dev->priv;
- u32 count;
unsigned i;
unsigned long flags;
- /* bufsize has been sanity checked by the caller */
- count = bufsize / sizeof(int);
-
/* Not sure if this is possible, but better safe than sorry */
spin_lock_irqsave(&data->spinlock, flags);
if (data->txstate != WBCIR_TXSTATE_INACTIVE) {
@@ -876,18 +872,8 @@ wbcir_init_hw(struct wbcir_data *data)
/* prescaler 1.0, tx/rx fifo lvl 16 */
outb(0x30, data->sbase + WBCIR_REG_SP3_EXCR2);
- /* Set baud divisor to generate one byte per bit/cell */
- switch (protocol) {
- case IR_PROTOCOL_RC5:
- outb(0xA7, data->sbase + WBCIR_REG_SP3_BGDL);
- break;
- case IR_PROTOCOL_RC6:
- outb(0x53, data->sbase + WBCIR_REG_SP3_BGDL);
- break;
- case IR_PROTOCOL_NEC:
- outb(0x69, data->sbase + WBCIR_REG_SP3_BGDL);
- break;
- }
+ /* Set baud divisor to sample every 10 us */
+ outb(0x0F, data->sbase + WBCIR_REG_SP3_BGDL);
outb(0x00, data->sbase + WBCIR_REG_SP3_BGDH);
/* Set CEIR mode */
@@ -896,9 +882,9 @@ wbcir_init_hw(struct wbcir_data *data)
inb(data->sbase + WBCIR_REG_SP3_LSR); /* Clear LSR */
inb(data->sbase + WBCIR_REG_SP3_MSR); /* Clear MSR */
- /* Disable RX demod, run-length encoding/decoding, set freq span */
+ /* Disable RX demod, enable run-length enc/dec, set freq span */
wbcir_select_bank(data, WBCIR_BANK_7);
- outb(0x10, data->sbase + WBCIR_REG_SP3_RCCFG);
+ outb(0x90, data->sbase + WBCIR_REG_SP3_RCCFG);
/* Disable timer */
wbcir_select_bank(data, WBCIR_BANK_4);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index bb53de7fe40..f574dc012ca 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -489,6 +489,15 @@ config VIDEO_TCM825X
This is a driver for the Toshiba TCM825x VGA camera sensor.
It is used for example in Nokia N800.
+comment "Flash devices"
+
+config VIDEO_ADP1653
+ tristate "ADP1653 flash support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ ---help---
+ This is a driver for the ADP1653 flash controller. It is used for
+ example in Nokia N900.
+
comment "Video improvement chips"
config VIDEO_UPD64031A
@@ -707,6 +716,8 @@ source "drivers/media/video/cx18/Kconfig"
source "drivers/media/video/saa7164/Kconfig"
+source "drivers/media/video/marvell-ccic/Kconfig"
+
config VIDEO_M32R_AR
tristate "AR devices"
depends on M32R && VIDEO_V4L2
@@ -726,15 +737,6 @@ config VIDEO_M32R_AR_M64278
To compile this driver as a module, choose M here: the
module will be called arv.
-config VIDEO_CAFE_CCIC
- tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
- depends on PCI && I2C && VIDEO_V4L2
- select VIDEO_OV7670
- ---help---
- This is a video4linux2 driver for the Marvell 88ALP01 integrated
- CMOS camera controller. This is the controller found on first-
- generation OLPC systems.
-
config VIDEO_SR030PC30
tristate "SR030PC30 VGA camera sensor support"
depends on I2C && VIDEO_V4L2
@@ -846,6 +848,12 @@ config SOC_CAMERA_OV2640
help
This is a ov2640 camera driver
+config SOC_CAMERA_OV5642
+ tristate "ov5642 camera support"
+ depends on SOC_CAMERA && I2C
+ help
+ This is a V4L2 camera driver for the OmniVision OV5642 sensor
+
config SOC_CAMERA_OV6650
tristate "ov6650 sensor support"
depends on SOC_CAMERA && I2C
@@ -952,6 +960,14 @@ config VIDEO_SAMSUNG_S5P_FIMC
To compile this driver as a module, choose M here: the
module will be called s5p-fimc.
+config VIDEO_ATMEL_ISI
+ tristate "ATMEL Image Sensor Interface (ISI) support"
+ depends on VIDEO_DEV && SOC_CAMERA && ARCH_AT91
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This module makes the ATMEL Image Sensor Interface available
+ as a v4l2 device.
+
config VIDEO_S5P_MIPI_CSIS
tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver"
depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P && VIDEO_V4L2_SUBDEV_API
@@ -961,6 +977,8 @@ config VIDEO_S5P_MIPI_CSIS
To compile this driver as a module, choose M here: the
module will be called s5p-csis.
+source "drivers/media/video/s5p-tv/Kconfig"
+
#
# USB Multimedia device configuration
#
@@ -1056,4 +1074,12 @@ config VIDEO_MEM2MEM_TESTDEV
framework.
+config VIDEO_SAMSUNG_S5P_MFC
+ tristate "Samsung S5P MFC 5.1 Video Codec"
+ depends on VIDEO_DEV && VIDEO_V4L2 && PLAT_S5P
+ select VIDEOBUF2_DMA_CONTIG
+ default n
+ help
+ MFC 5.1 driver for V4L2.
+
endif # V4L_MEM2MEM_DRIVERS
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index f0fecd6f6a3..272390072ae 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o
obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o
obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/
+obj-$(CONFIG_VIDEO_ADP1653) += adp1653.o
obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o
obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o
obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
obj-$(CONFIG_SOC_CAMERA_OV2640) += ov2640.o
+obj-$(CONFIG_SOC_CAMERA_OV5642) += ov5642.o
obj-$(CONFIG_SOC_CAMERA_OV6650) += ov6650.o
obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o
@@ -127,7 +129,8 @@ obj-$(CONFIG_VIDEO_M32R_AR_M64278) += arv.o
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
-obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
+obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
+obj-$(CONFIG_VIDEO_MMP_CAMERA) += marvell-ccic/
obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
@@ -166,8 +169,11 @@ obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o
+obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_FIMC) += s5p-fimc/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc/
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_TV) += s5p-tv/
obj-$(CONFIG_ARCH_DAVINCI) += davinci/
diff --git a/drivers/media/video/adp1653.c b/drivers/media/video/adp1653.c
new file mode 100644
index 00000000000..be7befd6094
--- /dev/null
+++ b/drivers/media/video/adp1653.c
@@ -0,0 +1,491 @@
+/*
+ * drivers/media/video/adp1653.c
+ *
+ * Copyright (C) 2008--2011 Nokia Corporation
+ *
+ * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ *
+ * Contributors:
+ * Sakari Ailus <sakari.ailus@maxwell.research.nokia.com>
+ * Tuukka Toivonen <tuukkat76@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * TODO:
+ * - fault interrupt handling
+ * - hardware strobe
+ * - power doesn't need to be ON if all lights are off
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <media/adp1653.h>
+#include <media/v4l2-device.h>
+
+#define TIMEOUT_MAX 820000
+#define TIMEOUT_STEP 54600
+#define TIMEOUT_MIN (TIMEOUT_MAX - ADP1653_REG_CONFIG_TMR_SET_MAX \
+ * TIMEOUT_STEP)
+#define TIMEOUT_US_TO_CODE(t) ((TIMEOUT_MAX + (TIMEOUT_STEP / 2) - (t)) \
+ / TIMEOUT_STEP)
+#define TIMEOUT_CODE_TO_US(c) (TIMEOUT_MAX - (c) * TIMEOUT_STEP)
+
+/* Write values into ADP1653 registers. */
+static int adp1653_update_hw(struct adp1653_flash *flash)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ u8 out_sel;
+ u8 config = 0;
+ int rval;
+
+ out_sel = ADP1653_INDICATOR_INTENSITY_uA_TO_REG(
+ flash->indicator_intensity->val)
+ << ADP1653_REG_OUT_SEL_ILED_SHIFT;
+
+ switch (flash->led_mode->val) {
+ case V4L2_FLASH_LED_MODE_NONE:
+ break;
+ case V4L2_FLASH_LED_MODE_FLASH:
+ /* Flash mode, light on with strobe, duration from timer */
+ config = ADP1653_REG_CONFIG_TMR_CFG;
+ config |= TIMEOUT_US_TO_CODE(flash->flash_timeout->val)
+ << ADP1653_REG_CONFIG_TMR_SET_SHIFT;
+ break;
+ case V4L2_FLASH_LED_MODE_TORCH:
+ /* Torch mode, light immediately on, duration indefinite */
+ out_sel |= ADP1653_FLASH_INTENSITY_mA_TO_REG(
+ flash->torch_intensity->val)
+ << ADP1653_REG_OUT_SEL_HPLED_SHIFT;
+ break;
+ }
+
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel);
+ if (rval < 0)
+ return rval;
+
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_CONFIG, config);
+ if (rval < 0)
+ return rval;
+
+ return 0;
+}
+
+static int adp1653_get_fault(struct adp1653_flash *flash)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int fault;
+ int rval;
+
+ fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT);
+ if (IS_ERR_VALUE(fault))
+ return fault;
+
+ flash->fault |= fault;
+
+ if (!flash->fault)
+ return 0;
+
+ /* Clear faults. */
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
+ if (IS_ERR_VALUE(rval))
+ return rval;
+
+ flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE;
+
+ rval = adp1653_update_hw(flash);
+ if (IS_ERR_VALUE(rval))
+ return rval;
+
+ return flash->fault;
+}
+
+static int adp1653_strobe(struct adp1653_flash *flash, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ u8 out_sel = ADP1653_INDICATOR_INTENSITY_uA_TO_REG(
+ flash->indicator_intensity->val)
+ << ADP1653_REG_OUT_SEL_ILED_SHIFT;
+ int rval;
+
+ if (flash->led_mode->val != V4L2_FLASH_LED_MODE_FLASH)
+ return -EBUSY;
+
+ if (!enable)
+ return i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL,
+ out_sel);
+
+ out_sel |= ADP1653_FLASH_INTENSITY_mA_TO_REG(
+ flash->flash_intensity->val)
+ << ADP1653_REG_OUT_SEL_HPLED_SHIFT;
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, out_sel);
+ if (rval)
+ return rval;
+
+ /* Software strobe using i2c */
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE,
+ ADP1653_REG_SW_STROBE_SW_STROBE);
+ if (rval)
+ return rval;
+ return i2c_smbus_write_byte_data(client, ADP1653_REG_SW_STROBE, 0);
+}
+
+/* --------------------------------------------------------------------------
+ * V4L2 controls
+ */
+
+static int adp1653_get_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct adp1653_flash *flash =
+ container_of(ctrl->handler, struct adp1653_flash, ctrls);
+ int rval;
+
+ rval = adp1653_get_fault(flash);
+ if (IS_ERR_VALUE(rval))
+ return rval;
+
+ ctrl->cur.val = 0;
+
+ if (flash->fault & ADP1653_REG_FAULT_FLT_SCP)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
+ if (flash->fault & ADP1653_REG_FAULT_FLT_OT)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
+ if (flash->fault & ADP1653_REG_FAULT_FLT_TMR)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT;
+ if (flash->fault & ADP1653_REG_FAULT_FLT_OV)
+ ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE;
+
+ flash->fault = 0;
+
+ return 0;
+}
+
+static int adp1653_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct adp1653_flash *flash =
+ container_of(ctrl->handler, struct adp1653_flash, ctrls);
+ int rval;
+
+ rval = adp1653_get_fault(flash);
+ if (IS_ERR_VALUE(rval))
+ return rval;
+ if ((rval & (ADP1653_REG_FAULT_FLT_SCP |
+ ADP1653_REG_FAULT_FLT_OT |
+ ADP1653_REG_FAULT_FLT_OV)) &&
+ (ctrl->id == V4L2_CID_FLASH_STROBE ||
+ ctrl->id == V4L2_CID_FLASH_TORCH_INTENSITY ||
+ ctrl->id == V4L2_CID_FLASH_LED_MODE))
+ return -EBUSY;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FLASH_STROBE:
+ return adp1653_strobe(flash, 1);
+ case V4L2_CID_FLASH_STROBE_STOP:
+ return adp1653_strobe(flash, 0);
+ }
+
+ return adp1653_update_hw(flash);
+}
+
+static const struct v4l2_ctrl_ops adp1653_ctrl_ops = {
+ .g_volatile_ctrl = adp1653_get_ctrl,
+ .s_ctrl = adp1653_set_ctrl,
+};
+
+static int adp1653_init_controls(struct adp1653_flash *flash)
+{
+ struct v4l2_ctrl *fault;
+
+ v4l2_ctrl_handler_init(&flash->ctrls, 9);
+
+ flash->led_mode =
+ v4l2_ctrl_new_std_menu(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_LED_MODE,
+ V4L2_FLASH_LED_MODE_TORCH, ~0x7, 0);
+ v4l2_ctrl_new_std_menu(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_STROBE_SOURCE,
+ V4L2_FLASH_STROBE_SOURCE_SOFTWARE, ~0x1, 0);
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_STROBE, 0, 0, 0, 0);
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0);
+ flash->flash_timeout =
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_TIMEOUT, TIMEOUT_MIN,
+ flash->platform_data->max_flash_timeout,
+ TIMEOUT_STEP,
+ flash->platform_data->max_flash_timeout);
+ flash->flash_intensity =
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_INTENSITY,
+ ADP1653_FLASH_INTENSITY_MIN,
+ flash->platform_data->max_flash_intensity,
+ 1, flash->platform_data->max_flash_intensity);
+ flash->torch_intensity =
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_TORCH_INTENSITY,
+ ADP1653_TORCH_INTENSITY_MIN,
+ flash->platform_data->max_torch_intensity,
+ ADP1653_FLASH_INTENSITY_STEP,
+ flash->platform_data->max_torch_intensity);
+ flash->indicator_intensity =
+ v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_INDICATOR_INTENSITY,
+ ADP1653_INDICATOR_INTENSITY_MIN,
+ flash->platform_data->max_indicator_intensity,
+ ADP1653_INDICATOR_INTENSITY_STEP,
+ ADP1653_INDICATOR_INTENSITY_MIN);
+ fault = v4l2_ctrl_new_std(&flash->ctrls, &adp1653_ctrl_ops,
+ V4L2_CID_FLASH_FAULT, 0,
+ V4L2_FLASH_FAULT_OVER_VOLTAGE
+ | V4L2_FLASH_FAULT_OVER_TEMPERATURE
+ | V4L2_FLASH_FAULT_SHORT_CIRCUIT, 0, 0);
+
+ if (flash->ctrls.error)
+ return flash->ctrls.error;
+
+ fault->is_volatile = 1;
+
+ flash->subdev.ctrl_handler = &flash->ctrls;
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ * V4L2 subdev operations
+ */
+
+static int
+adp1653_init_device(struct adp1653_flash *flash)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
+ int rval;
+
+ /* Clear FAULT register by writing zero to OUT_SEL */
+ rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
+ if (rval < 0) {
+ dev_err(&client->dev, "failed writing fault register\n");
+ return -EIO;
+ }
+
+ mutex_lock(&flash->ctrls.lock);
+ /* Reset faults before reading new ones. */
+ flash->fault = 0;
+ rval = adp1653_get_fault(flash);
+ mutex_unlock(&flash->ctrls.lock);
+ if (rval > 0) {
+ dev_err(&client->dev, "faults detected: 0x%1.1x\n", rval);
+ return -EIO;
+ }
+
+ mutex_lock(&flash->ctrls.lock);
+ rval = adp1653_update_hw(flash);
+ mutex_unlock(&flash->ctrls.lock);
+ if (rval) {
+ dev_err(&client->dev,
+ "adp1653_update_hw failed at %s\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+__adp1653_set_power(struct adp1653_flash *flash, int on)
+{
+ int ret;
+
+ ret = flash->platform_data->power(&flash->subdev, on);
+ if (ret < 0)
+ return ret;
+
+ if (!on)
+ return 0;
+
+ ret = adp1653_init_device(flash);
+ if (ret < 0)
+ flash->platform_data->power(&flash->subdev, 0);
+
+ return ret;
+}
+
+static int
+adp1653_set_power(struct v4l2_subdev *subdev, int on)
+{
+ struct adp1653_flash *flash = to_adp1653_flash(subdev);
+ int ret = 0;
+
+ mutex_lock(&flash->power_lock);
+
+ /* If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (flash->power_count == !on) {
+ ret = __adp1653_set_power(flash, !!on);
+ if (ret < 0)
+ goto done;
+ }
+
+ /* Update the power count. */
+ flash->power_count += on ? 1 : -1;
+ WARN_ON(flash->power_count < 0);
+
+done:
+ mutex_unlock(&flash->power_lock);
+ return ret;
+}
+
+static int adp1653_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return adp1653_set_power(sd, 1);
+}
+
+static int adp1653_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return adp1653_set_power(sd, 0);
+}
+
+static const struct v4l2_subdev_core_ops adp1653_core_ops = {
+ .s_power = adp1653_set_power,
+};
+
+static const struct v4l2_subdev_ops adp1653_ops = {
+ .core = &adp1653_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops adp1653_internal_ops = {
+ .open = adp1653_open,
+ .close = adp1653_close,
+};
+
+/* --------------------------------------------------------------------------
+ * I2C driver
+ */
+#ifdef CONFIG_PM
+
+static int adp1653_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct adp1653_flash *flash = to_adp1653_flash(subdev);
+
+ if (!flash->power_count)
+ return 0;
+
+ return __adp1653_set_power(flash, 0);
+}
+
+static int adp1653_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct adp1653_flash *flash = to_adp1653_flash(subdev);
+
+ if (!flash->power_count)
+ return 0;
+
+ return __adp1653_set_power(flash, 1);
+}
+
+#else
+
+#define adp1653_suspend NULL
+#define adp1653_resume NULL
+
+#endif /* CONFIG_PM */
+
+static int adp1653_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct adp1653_flash *flash;
+ int ret;
+
+ flash = kzalloc(sizeof(*flash), GFP_KERNEL);
+ if (flash == NULL)
+ return -ENOMEM;
+
+ flash->platform_data = client->dev.platform_data;
+
+ mutex_init(&flash->power_lock);
+
+ v4l2_i2c_subdev_init(&flash->subdev, client, &adp1653_ops);
+ flash->subdev.internal_ops = &adp1653_internal_ops;
+ flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ adp1653_init_controls(flash);
+
+ ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0);
+ if (ret < 0)
+ kfree(flash);
+
+ return ret;
+}
+
+static int __exit adp1653_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+ struct adp1653_flash *flash = to_adp1653_flash(subdev);
+
+ v4l2_device_unregister_subdev(&flash->subdev);
+ v4l2_ctrl_handler_free(&flash->ctrls);
+ media_entity_cleanup(&flash->subdev.entity);
+ kfree(flash);
+ return 0;
+}
+
+static const struct i2c_device_id adp1653_id_table[] = {
+ { ADP1653_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adp1653_id_table);
+
+static struct dev_pm_ops adp1653_pm_ops = {
+ .suspend = adp1653_suspend,
+ .resume = adp1653_resume,
+};
+
+static struct i2c_driver adp1653_i2c_driver = {
+ .driver = {
+ .name = ADP1653_NAME,
+ .pm = &adp1653_pm_ops,
+ },
+ .probe = adp1653_probe,
+ .remove = __exit_p(adp1653_remove),
+ .id_table = adp1653_id_table,
+};
+
+static int __init adp1653_init(void)
+{
+ int rval;
+
+ rval = i2c_add_driver(&adp1653_i2c_driver);
+ if (rval)
+ printk(KERN_ALERT "%s: failed at i2c_add_driver\n", __func__);
+
+ return rval;
+}
+
+static void __exit adp1653_exit(void)
+{
+ i2c_del_driver(&adp1653_i2c_driver);
+}
+
+module_init(adp1653_init);
+module_exit(adp1653_exit);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
+MODULE_DESCRIPTION("Analog Devices ADP1653 LED flash driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index f989f2820d8..b6ed44aebe3 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/sched.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
@@ -54,7 +53,7 @@
*/
#define USE_INT 0 /* Don't modify */
-#define VERSION "0.04"
+#define VERSION "0.0.5"
#define ar_inl(addr) inl((unsigned long)(addr))
#define ar_outl(val, addr) outl((unsigned long)(val), (unsigned long)(addr))
@@ -404,7 +403,6 @@ static int ar_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, ar->vdev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Colour AR VGA", sizeof(vcap->card));
strlcpy(vcap->bus_info, "Platform", sizeof(vcap->bus_info));
- vcap->version = KERNEL_VERSION(0, 0, 4);
vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
@@ -879,3 +877,4 @@ module_exit(ar_cleanup_module);
MODULE_AUTHOR("Takeo Takahashi <takahashi.takeo@renesas.com>");
MODULE_DESCRIPTION("Colour AR M64278(VGA) for Video4Linux");
MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION);
diff --git a/drivers/media/video/atmel-isi.c b/drivers/media/video/atmel-isi.c
new file mode 100644
index 00000000000..7b89f00501b
--- /dev/null
+++ b/drivers/media/video/atmel-isi.c
@@ -0,0 +1,1048 @@
+/*
+ * Copyright (c) 2011 Atmel Corporation
+ * Josh Wu, <josh.wu@atmel.com>
+ *
+ * Based on previous work by Lars Haring, <lars.haring@atmel.com>
+ * and Sedji Gaouaou
+ * Based on the bttv driver for Bt848 with respective copyright holders
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/atmel-isi.h>
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define MAX_BUFFER_NUM 32
+#define MAX_SUPPORT_WIDTH 2048
+#define MAX_SUPPORT_HEIGHT 2048
+#define VID_LIMIT_BYTES (16 * 1024 * 1024)
+#define MIN_FRAME_RATE 15
+#define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE)
+
+/* ISI states */
+enum {
+ ISI_STATE_IDLE = 0,
+ ISI_STATE_READY,
+ ISI_STATE_WAIT_SOF,
+};
+
+/* Frame buffer descriptor */
+struct fbd {
+ /* Physical address of the frame buffer */
+ u32 fb_address;
+ /* DMA Control Register(only in HISI2) */
+ u32 dma_ctrl;
+ /* Physical address of the next fbd */
+ u32 next_fbd_address;
+};
+
+static void set_dma_ctrl(struct fbd *fb_desc, u32 ctrl)
+{
+ fb_desc->dma_ctrl = ctrl;
+}
+
+struct isi_dma_desc {
+ struct list_head list;
+ struct fbd *p_fbd;
+ u32 fbd_phys;
+};
+
+/* Frame buffer data */
+struct frame_buffer {
+ struct vb2_buffer vb;
+ struct isi_dma_desc *p_dma_desc;
+ struct list_head list;
+};
+
+struct atmel_isi {
+ /* Protects the access of variables shared with the ISR */
+ spinlock_t lock;
+ void __iomem *regs;
+
+ int sequence;
+ /* State of the ISI module in capturing mode */
+ int state;
+
+ /* Wait queue for waiting for SOF */
+ wait_queue_head_t vsync_wq;
+
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ /* Allocate descriptors for dma buffer use */
+ struct fbd *p_fb_descriptors;
+ u32 fb_descriptors_phys;
+ struct list_head dma_desc_head;
+ struct isi_dma_desc dma_desc[MAX_BUFFER_NUM];
+
+ struct completion complete;
+ struct clk *pclk;
+ unsigned int irq;
+
+ struct isi_platform_data *pdata;
+
+ struct list_head video_buffer_list;
+ struct frame_buffer *active;
+
+ struct soc_camera_device *icd;
+ struct soc_camera_host soc_host;
+};
+
+static void isi_writel(struct atmel_isi *isi, u32 reg, u32 val)
+{
+ writel(val, isi->regs + reg);
+}
+static u32 isi_readl(struct atmel_isi *isi, u32 reg)
+{
+ return readl(isi->regs + reg);
+}
+
+static int configure_geometry(struct atmel_isi *isi, u32 width,
+ u32 height, enum v4l2_mbus_pixelcode code)
+{
+ u32 cfg2, cr;
+
+ switch (code) {
+ /* YUV, including grey */
+ case V4L2_MBUS_FMT_Y8_1X8:
+ cr = ISI_CFG2_GRAYSCALE;
+ break;
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_MODE_3;
+ break;
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_MODE_2;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_MODE_1;
+ break;
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ cr = ISI_CFG2_YCC_SWAP_DEFAULT;
+ break;
+ /* RGB, TODO */
+ default:
+ return -EINVAL;
+ }
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+
+ cfg2 = isi_readl(isi, ISI_CFG2);
+ cfg2 |= cr;
+ /* Set width */
+ cfg2 &= ~(ISI_CFG2_IM_HSIZE_MASK);
+ cfg2 |= ((width - 1) << ISI_CFG2_IM_HSIZE_OFFSET) &
+ ISI_CFG2_IM_HSIZE_MASK;
+ /* Set height */
+ cfg2 &= ~(ISI_CFG2_IM_VSIZE_MASK);
+ cfg2 |= ((height - 1) << ISI_CFG2_IM_VSIZE_OFFSET)
+ & ISI_CFG2_IM_VSIZE_MASK;
+ isi_writel(isi, ISI_CFG2, cfg2);
+
+ return 0;
+}
+
+static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi)
+{
+ if (isi->active) {
+ struct vb2_buffer *vb = &isi->active->vb;
+ struct frame_buffer *buf = isi->active;
+
+ list_del_init(&buf->list);
+ do_gettimeofday(&vb->v4l2_buf.timestamp);
+ vb->v4l2_buf.sequence = isi->sequence++;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+
+ if (list_empty(&isi->video_buffer_list)) {
+ isi->active = NULL;
+ } else {
+ /* start next dma frame. */
+ isi->active = list_entry(isi->video_buffer_list.next,
+ struct frame_buffer, list);
+ isi_writel(isi, ISI_DMA_C_DSCR,
+ isi->active->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_C_CTRL,
+ ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+ }
+ return IRQ_HANDLED;
+}
+
+/* ISI interrupt service routine */
+static irqreturn_t isi_interrupt(int irq, void *dev_id)
+{
+ struct atmel_isi *isi = dev_id;
+ u32 status, mask, pending;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&isi->lock);
+
+ status = isi_readl(isi, ISI_STATUS);
+ mask = isi_readl(isi, ISI_INTMASK);
+ pending = status & mask;
+
+ if (pending & ISI_CTRL_SRST) {
+ complete(&isi->complete);
+ isi_writel(isi, ISI_INTDIS, ISI_CTRL_SRST);
+ ret = IRQ_HANDLED;
+ } else if (pending & ISI_CTRL_DIS) {
+ complete(&isi->complete);
+ isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS);
+ ret = IRQ_HANDLED;
+ } else {
+ if ((pending & ISI_SR_VSYNC) &&
+ (isi->state == ISI_STATE_IDLE)) {
+ isi->state = ISI_STATE_READY;
+ wake_up_interruptible(&isi->vsync_wq);
+ ret = IRQ_HANDLED;
+ }
+ if (likely(pending & ISI_SR_CXFR_DONE))
+ ret = atmel_isi_handle_streaming(isi);
+ }
+
+ spin_unlock(&isi->lock);
+ return ret;
+}
+
+#define WAIT_ISI_RESET 1
+#define WAIT_ISI_DISABLE 0
+static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset)
+{
+ unsigned long timeout;
+ /*
+ * The reset or disable will only succeed if we have a
+ * pixel clock from the camera.
+ */
+ init_completion(&isi->complete);
+
+ if (wait_reset) {
+ isi_writel(isi, ISI_INTEN, ISI_CTRL_SRST);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_SRST);
+ } else {
+ isi_writel(isi, ISI_INTEN, ISI_CTRL_DIS);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ }
+
+ timeout = wait_for_completion_timeout(&isi->complete,
+ msecs_to_jiffies(100));
+ if (timeout == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ unsigned long size;
+ int ret, bytes_per_line;
+
+ /* Reset ISI */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
+ if (ret < 0) {
+ dev_err(icd->parent, "Reset ISI timed out\n");
+ return ret;
+ }
+ /* Disable all interrupts */
+ isi_writel(isi, ISI_INTDIS, ~0UL);
+
+ bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ size = bytes_per_line * icd->user_height;
+
+ if (!*nbuffers || *nbuffers > MAX_BUFFER_NUM)
+ *nbuffers = MAX_BUFFER_NUM;
+
+ if (size * *nbuffers > VID_LIMIT_BYTES)
+ *nbuffers = VID_LIMIT_BYTES / size;
+
+ *nplanes = 1;
+ sizes[0] = size;
+ alloc_ctxs[0] = isi->alloc_ctx;
+
+ isi->sequence = 0;
+ isi->active = NULL;
+
+ dev_dbg(icd->parent, "%s, count=%d, size=%ld\n", __func__,
+ *nbuffers, size);
+
+ return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+
+ buf->p_dma_desc = NULL;
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ unsigned long size;
+ struct isi_dma_desc *desc;
+ int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
+ icd->current_fmt->host_fmt);
+
+ if (bytes_per_line < 0)
+ return bytes_per_line;
+
+ size = bytes_per_line * icd->user_height;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(icd->parent, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(&buf->vb, 0, size);
+
+ if (!buf->p_dma_desc) {
+ if (list_empty(&isi->dma_desc_head)) {
+ dev_err(icd->parent, "Not enough dma descriptors.\n");
+ return -EINVAL;
+ } else {
+ /* Get an available descriptor */
+ desc = list_entry(isi->dma_desc_head.next,
+ struct isi_dma_desc, list);
+ /* Delete the descriptor since now it is used */
+ list_del_init(&desc->list);
+
+ /* Initialize the dma descriptor */
+ desc->p_fbd->fb_address =
+ vb2_dma_contig_plane_paddr(vb, 0);
+ desc->p_fbd->next_fbd_address = 0;
+ set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB);
+
+ buf->p_dma_desc = desc;
+ }
+ }
+ return 0;
+}
+
+static void buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+
+ /* This descriptor is available now and we add to head list */
+ if (buf->p_dma_desc)
+ list_add(&buf->p_dma_desc->list, &isi->dma_desc_head);
+}
+
+static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer)
+{
+ u32 ctrl, cfg1;
+
+ cfg1 = isi_readl(isi, ISI_CFG1);
+ /* Enable irq: cxfr for the codec path, pxfr for the preview path */
+ isi_writel(isi, ISI_INTEN,
+ ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+ /* Check if already in a frame */
+ if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) {
+ dev_err(isi->icd->parent, "Already in frame handling.\n");
+ return;
+ }
+
+ isi_writel(isi, ISI_DMA_C_DSCR, buffer->p_dma_desc->fbd_phys);
+ isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE);
+ isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH);
+
+ /* Enable linked list */
+ cfg1 |= isi->pdata->frate | ISI_CFG1_DISCR;
+
+ /* Enable codec path and ISI */
+ ctrl = ISI_CTRL_CDC | ISI_CTRL_EN;
+ isi_writel(isi, ISI_CTRL, ctrl);
+ isi_writel(isi, ISI_CFG1, cfg1);
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&isi->lock, flags);
+ list_add_tail(&buf->list, &isi->video_buffer_list);
+
+ if (isi->active == NULL) {
+ isi->active = buf;
+ start_dma(isi, buf);
+ }
+ spin_unlock_irqrestore(&isi->lock, flags);
+}
+
+static int start_streaming(struct vb2_queue *vq)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+
+ u32 sr = 0;
+ int ret;
+
+ spin_lock_irq(&isi->lock);
+ isi->state = ISI_STATE_IDLE;
+ /* Clear any pending SOF interrupt */
+ sr = isi_readl(isi, ISI_STATUS);
+ /* Enable VSYNC interrupt for SOF */
+ isi_writel(isi, ISI_INTEN, ISI_SR_VSYNC);
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_EN);
+ spin_unlock_irq(&isi->lock);
+
+ dev_dbg(icd->parent, "Waiting for SOF\n");
+ ret = wait_event_interruptible(isi->vsync_wq,
+ isi->state != ISI_STATE_IDLE);
+ if (ret)
+ return ret;
+
+ if (isi->state != ISI_STATE_READY)
+ return -EIO;
+
+ spin_lock_irq(&isi->lock);
+ isi->state = ISI_STATE_WAIT_SOF;
+ isi_writel(isi, ISI_INTDIS, ISI_SR_VSYNC);
+ spin_unlock_irq(&isi->lock);
+
+ return 0;
+}
+
+/* abort streaming and wait for last buffer */
+static int stop_streaming(struct vb2_queue *vq)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct frame_buffer *buf, *node;
+ int ret = 0;
+ unsigned long timeout;
+
+ spin_lock_irq(&isi->lock);
+ isi->active = NULL;
+ /* Release all active buffers */
+ list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irq(&isi->lock);
+
+ timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ;
+ /* Wait until the end of the current frame. */
+ while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) &&
+ time_before(jiffies, timeout))
+ msleep(1);
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(icd->parent,
+ "Timeout waiting for finishing codec request\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Disable interrupts */
+ isi_writel(isi, ISI_INTDIS,
+ ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE);
+
+ /* Disable ISI and wait for it is done */
+ ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE);
+ if (ret < 0)
+ dev_err(icd->parent, "Disable ISI timed out\n");
+
+ return ret;
+}
+
+static struct vb2_ops isi_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_cleanup = buffer_cleanup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = soc_camera_unlock,
+ .wait_finish = soc_camera_lock,
+};
+
+/* ------------------------------------------------------------------
+ SOC camera operations for the device
+ ------------------------------------------------------------------*/
+static int isi_camera_init_videobuf(struct vb2_queue *q,
+ struct soc_camera_device *icd)
+{
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP;
+ q->drv_priv = icd;
+ q->buf_struct_size = sizeof(struct frame_buffer);
+ q->ops = &isi_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+
+ return vb2_queue_init(q);
+}
+
+static int isi_camera_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+ if (!xlate) {
+ dev_warn(icd->parent, "Format %x not found\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+
+ dev_dbg(icd->parent, "Plan to set format %dx%d\n",
+ pix->width, pix->height);
+
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ ret = configure_geometry(isi, pix->width, pix->height, xlate->code);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+ icd->current_fmt = xlate;
+
+ dev_dbg(icd->parent, "Finally set format %dx%d\n",
+ pix->width, pix->height);
+
+ return ret;
+}
+
+static int isi_camera_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ u32 pixfmt = pix->pixelformat;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (pixfmt && !xlate) {
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+
+ /* limit to Atmel ISI hardware capabilities */
+ if (pix->height > MAX_SUPPORT_HEIGHT)
+ pix->height = MAX_SUPPORT_HEIGHT;
+ if (pix->width > MAX_SUPPORT_WIDTH)
+ pix->width = MAX_SUPPORT_WIDTH;
+
+ /* limit to sensor capabilities */
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->colorspace = mf.colorspace;
+
+ switch (mf.field) {
+ case V4L2_FIELD_ANY:
+ pix->field = V4L2_FIELD_NONE;
+ break;
+ case V4L2_FIELD_NONE:
+ break;
+ default:
+ dev_err(icd->parent, "Field type %d unsupported.\n",
+ mf.field);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct soc_mbus_pixelfmt isi_camera_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .name = "Packed YUV422 16 bit",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ },
+};
+
+/* This will be corrected as we get more formats */
+static bool isi_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+ return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample == 8 &&
+ fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+static unsigned long make_bus_param(struct atmel_isi *isi)
+{
+ unsigned long flags;
+ /*
+ * Platform specified synchronization and pixel clock polarities are
+ * only a recommendation and are only used during probing. Atmel ISI
+ * camera interface only works in master mode, i.e., uses HSYNC and
+ * VSYNC signals from the sensor
+ */
+ flags = SOCAM_MASTER |
+ SOCAM_HSYNC_ACTIVE_HIGH |
+ SOCAM_HSYNC_ACTIVE_LOW |
+ SOCAM_VSYNC_ACTIVE_HIGH |
+ SOCAM_VSYNC_ACTIVE_LOW |
+ SOCAM_PCLK_SAMPLE_RISING |
+ SOCAM_PCLK_SAMPLE_FALLING |
+ SOCAM_DATA_ACTIVE_HIGH;
+
+ if (isi->pdata->data_width_flags & ISI_DATAWIDTH_10)
+ flags |= SOCAM_DATAWIDTH_10;
+
+ if (isi->pdata->data_width_flags & ISI_DATAWIDTH_8)
+ flags |= SOCAM_DATAWIDTH_8;
+
+ if (flags & SOCAM_DATAWIDTH_MASK)
+ return flags;
+
+ return 0;
+}
+
+static int isi_camera_try_bus_param(struct soc_camera_device *icd,
+ unsigned char buswidth)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ unsigned long camera_flags;
+ int ret;
+
+ camera_flags = icd->ops->query_bus_param(icd);
+ ret = soc_camera_bus_param_compatible(camera_flags,
+ make_bus_param(isi));
+ if (!ret)
+ return -EINVAL;
+ return 0;
+}
+
+
+static int isi_camera_get_formats(struct soc_camera_device *icd,
+ unsigned int idx,
+ struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ int formats = 0, ret;
+ /* sensor format */
+ enum v4l2_mbus_pixelcode code;
+ /* soc camera host format */
+ const struct soc_mbus_pixelfmt *fmt;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ /* No more formats */
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_err(icd->parent,
+ "Invalid format code #%u: %d\n", idx, code);
+ return 0;
+ }
+
+ /* This also checks support for the requested bits-per-sample */
+ ret = isi_camera_try_bus_param(icd, fmt->bits_per_sample);
+ if (ret < 0) {
+ dev_err(icd->parent,
+ "Fail to try the bus parameters.\n");
+ return 0;
+ }
+
+ switch (code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ case V4L2_MBUS_FMT_VYUY8_2X8:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ case V4L2_MBUS_FMT_YVYU8_2X8:
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = &isi_camera_formats[0];
+ xlate->code = code;
+ xlate++;
+ dev_dbg(icd->parent, "Providing format %s using code %d\n",
+ isi_camera_formats[0].name, code);
+ }
+ break;
+ default:
+ if (!isi_camera_packing_supported(fmt))
+ return 0;
+ if (xlate)
+ dev_dbg(icd->parent,
+ "Providing format %s in pass-through mode\n",
+ fmt->name);
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ }
+
+ return formats;
+}
+
+/* Called with .video_lock held */
+static int isi_camera_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ int ret;
+
+ if (isi->icd)
+ return -EBUSY;
+
+ ret = clk_enable(isi->pclk);
+ if (ret)
+ return ret;
+
+ isi->icd = icd;
+ dev_dbg(icd->parent, "Atmel ISI Camera driver attached to camera %d\n",
+ icd->devnum);
+ return 0;
+}
+/* Called with .video_lock held */
+static void isi_camera_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+
+ BUG_ON(icd != isi->icd);
+
+ clk_disable(isi->pclk);
+ isi->icd = NULL;
+
+ dev_dbg(icd->parent, "Atmel ISI Camera driver detached from camera %d\n",
+ icd->devnum);
+}
+
+static unsigned int isi_camera_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static int isi_camera_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "atmel-isi");
+ strcpy(cap->card, "Atmel Image Sensor Interface");
+ cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING);
+ return 0;
+}
+
+static int isi_camera_set_bus_param(struct soc_camera_device *icd, u32 pixfmt)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct atmel_isi *isi = ici->priv;
+ unsigned long bus_flags, camera_flags, common_flags;
+ int ret;
+ u32 cfg1 = 0;
+
+ camera_flags = icd->ops->query_bus_param(icd);
+
+ bus_flags = make_bus_param(isi);
+ common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags);
+ dev_dbg(icd->parent, "Flags cam: 0x%lx host: 0x%lx common: 0x%lx\n",
+ camera_flags, bus_flags, common_flags);
+ if (!common_flags)
+ return -EINVAL;
+
+ /* Make choises, based on platform preferences */
+ if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) {
+ if (isi->pdata->hsync_act_low)
+ common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) {
+ if (isi->pdata->vsync_act_low)
+ common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) &&
+ (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) {
+ if (isi->pdata->pclk_act_falling)
+ common_flags &= ~SOCAM_PCLK_SAMPLE_RISING;
+ else
+ common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING;
+ }
+
+ ret = icd->ops->set_bus_param(icd, common_flags);
+ if (ret < 0) {
+ dev_dbg(icd->parent, "Camera set_bus_param(%lx) returned %d\n",
+ common_flags, ret);
+ return ret;
+ }
+
+ /* set bus param for ISI */
+ if (common_flags & SOCAM_HSYNC_ACTIVE_LOW)
+ cfg1 |= ISI_CFG1_HSYNC_POL_ACTIVE_LOW;
+ if (common_flags & SOCAM_VSYNC_ACTIVE_LOW)
+ cfg1 |= ISI_CFG1_VSYNC_POL_ACTIVE_LOW;
+ if (common_flags & SOCAM_PCLK_SAMPLE_FALLING)
+ cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING;
+
+ if (isi->pdata->has_emb_sync)
+ cfg1 |= ISI_CFG1_EMB_SYNC;
+ if (isi->pdata->isi_full_mode)
+ cfg1 |= ISI_CFG1_FULL_MODE;
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+ isi_writel(isi, ISI_CFG1, cfg1);
+
+ return 0;
+}
+
+static struct soc_camera_host_ops isi_soc_camera_host_ops = {
+ .owner = THIS_MODULE,
+ .add = isi_camera_add_device,
+ .remove = isi_camera_remove_device,
+ .set_fmt = isi_camera_set_fmt,
+ .try_fmt = isi_camera_try_fmt,
+ .get_formats = isi_camera_get_formats,
+ .init_videobuf2 = isi_camera_init_videobuf,
+ .poll = isi_camera_poll,
+ .querycap = isi_camera_querycap,
+ .set_bus_param = isi_camera_set_bus_param,
+};
+
+/* -----------------------------------------------------------------------*/
+static int __devexit atmel_isi_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct atmel_isi *isi = container_of(soc_host,
+ struct atmel_isi, soc_host);
+
+ free_irq(isi->irq, isi);
+ soc_camera_host_unregister(soc_host);
+ vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct fbd) * MAX_BUFFER_NUM,
+ isi->p_fb_descriptors,
+ isi->fb_descriptors_phys);
+
+ iounmap(isi->regs);
+ clk_put(isi->pclk);
+ kfree(isi);
+
+ return 0;
+}
+
+static int __devinit atmel_isi_probe(struct platform_device *pdev)
+{
+ unsigned int irq;
+ struct atmel_isi *isi;
+ struct clk *pclk;
+ struct resource *regs;
+ int ret, i;
+ struct device *dev = &pdev->dev;
+ struct soc_camera_host *soc_host;
+ struct isi_platform_data *pdata;
+
+ pdata = dev->platform_data;
+ if (!pdata || !pdata->data_width_flags) {
+ dev_err(&pdev->dev,
+ "No config available for Atmel ISI\n");
+ return -EINVAL;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
+ pclk = clk_get(&pdev->dev, "isi_clk");
+ if (IS_ERR(pclk))
+ return PTR_ERR(pclk);
+
+ isi = kzalloc(sizeof(struct atmel_isi), GFP_KERNEL);
+ if (!isi) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Can't allocate interface!\n");
+ goto err_alloc_isi;
+ }
+
+ isi->pclk = pclk;
+ isi->pdata = pdata;
+ isi->active = NULL;
+ spin_lock_init(&isi->lock);
+ init_waitqueue_head(&isi->vsync_wq);
+ INIT_LIST_HEAD(&isi->video_buffer_list);
+ INIT_LIST_HEAD(&isi->dma_desc_head);
+
+ isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct fbd) * MAX_BUFFER_NUM,
+ &isi->fb_descriptors_phys,
+ GFP_KERNEL);
+ if (!isi->p_fb_descriptors) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Can't allocate descriptors!\n");
+ goto err_alloc_descriptors;
+ }
+
+ for (i = 0; i < MAX_BUFFER_NUM; i++) {
+ isi->dma_desc[i].p_fbd = isi->p_fb_descriptors + i;
+ isi->dma_desc[i].fbd_phys = isi->fb_descriptors_phys +
+ i * sizeof(struct fbd);
+ list_add(&isi->dma_desc[i].list, &isi->dma_desc_head);
+ }
+
+ isi->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(isi->alloc_ctx)) {
+ ret = PTR_ERR(isi->alloc_ctx);
+ goto err_alloc_ctx;
+ }
+
+ isi->regs = ioremap(regs->start, resource_size(regs));
+ if (!isi->regs) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_req_irq;
+ }
+
+ ret = request_irq(irq, isi_interrupt, 0, "isi", isi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ goto err_req_irq;
+ }
+ isi->irq = irq;
+
+ soc_host = &isi->soc_host;
+ soc_host->drv_name = "isi-camera";
+ soc_host->ops = &isi_soc_camera_host_ops;
+ soc_host->priv = isi;
+ soc_host->v4l2_dev.dev = &pdev->dev;
+ soc_host->nr = pdev->id;
+
+ ret = soc_camera_host_register(soc_host);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register soc camera host\n");
+ goto err_register_soc_camera_host;
+ }
+ return 0;
+
+err_register_soc_camera_host:
+ free_irq(isi->irq, isi);
+err_req_irq:
+ iounmap(isi->regs);
+err_ioremap:
+ vb2_dma_contig_cleanup_ctx(isi->alloc_ctx);
+err_alloc_ctx:
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct fbd) * MAX_BUFFER_NUM,
+ isi->p_fb_descriptors,
+ isi->fb_descriptors_phys);
+err_alloc_descriptors:
+ kfree(isi);
+err_alloc_isi:
+ clk_put(isi->pclk);
+
+ return ret;
+}
+
+static struct platform_driver atmel_isi_driver = {
+ .probe = atmel_isi_probe,
+ .remove = __devexit_p(atmel_isi_remove),
+ .driver = {
+ .name = "atmel_isi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init atmel_isi_init_module(void)
+{
+ return platform_driver_probe(&atmel_isi_driver, &atmel_isi_probe);
+}
+
+static void __exit atmel_isi_exit(void)
+{
+ platform_driver_unregister(&atmel_isi_driver);
+}
+module_init(atmel_isi_init_module);
+module_exit(atmel_isi_exit);
+
+MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");
+MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/video/au0828/au0828-core.c b/drivers/media/video/au0828/au0828-core.c
index ca342e4c61f..1e4ce5068ec 100644
--- a/drivers/media/video/au0828/au0828-core.c
+++ b/drivers/media/video/au0828/au0828-core.c
@@ -292,3 +292,4 @@ module_exit(au0828_exit);
MODULE_DESCRIPTION("Driver for Auvitek AU0828 based products");
MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.2");
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index c03eb29a9ee..0b3e481ffe8 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -33,7 +33,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/suspend.h>
-#include <linux/version.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-chip-ident.h>
@@ -43,8 +42,6 @@
static DEFINE_MUTEX(au0828_sysfs_lock);
-#define AU0828_VERSION_CODE KERNEL_VERSION(0, 0, 1)
-
/* ------------------------------------------------------------------
Videobuf operations
------------------------------------------------------------------*/
@@ -1254,8 +1251,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, dev->board.name, sizeof(cap->card));
strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
- cap->version = AU0828_VERSION_CODE;
-
/*set the device capabilities */
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VBI_CAPTURE |
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 3c9e6c7e7b5..5b15f63bf06 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -2892,13 +2892,10 @@ void __devinit bttv_idcard(struct bttv *btv)
{
unsigned int gpiobits;
int i,type;
- unsigned short tmp;
/* read PCI subsystem ID */
- pci_read_config_word(btv->c.pci, PCI_SUBSYSTEM_ID, &tmp);
- btv->cardid = tmp << 16;
- pci_read_config_word(btv->c.pci, PCI_SUBSYSTEM_VENDOR_ID, &tmp);
- btv->cardid |= tmp;
+ btv->cardid = btv->c.pci->subsystem_device << 16;
+ btv->cardid |= btv->c.pci->subsystem_vendor;
if (0 != btv->cardid && 0xffffffff != btv->cardid) {
/* look for the card */
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index a97cf2750bd..14444de67d5 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -57,6 +57,7 @@
#include <media/saa6588.h>
+#define BTTV_VERSION "0.9.19"
unsigned int bttv_num; /* number of Bt848s in use */
struct bttv *bttvs[BTTV_MAX];
@@ -163,6 +164,7 @@ MODULE_PARM_DESC(radio_nr, "radio device numbers");
MODULE_DESCRIPTION("bttv - v4l/v4l2 driver module for bt848/878 based cards");
MODULE_AUTHOR("Ralph Metzler & Marcus Metzler & Gerd Knorr");
MODULE_LICENSE("GPL");
+MODULE_VERSION(BTTV_VERSION);
/* ----------------------------------------------------------------------- */
/* sysfs */
@@ -2616,7 +2618,6 @@ static int bttv_querycap(struct file *file, void *priv,
strlcpy(cap->card, btv->video_dev->name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"PCI:%s", pci_name(btv->c.pci));
- cap->version = BTTV_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VBI_CAPTURE |
@@ -3416,7 +3417,6 @@ static int radio_querycap(struct file *file, void *priv,
strcpy(cap->driver, "bttv");
strlcpy(cap->card, btv->radio_dev->name, sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(btv->c.pci));
- cap->version = BTTV_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
@@ -3474,7 +3474,7 @@ static int radio_s_tuner(struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
- bttv_call_all(btv, tuner, g_tuner, t);
+ bttv_call_all(btv, tuner, s_tuner, t);
return 0;
}
@@ -4585,14 +4585,8 @@ static int __init bttv_init_module(void)
bttv_num = 0;
- printk(KERN_INFO "bttv: driver version %d.%d.%d loaded\n",
- (BTTV_VERSION_CODE >> 16) & 0xff,
- (BTTV_VERSION_CODE >> 8) & 0xff,
- BTTV_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "bttv: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "bttv: driver version %s loaded\n",
+ BTTV_VERSION);
if (gbuffers < 2 || gbuffers > VIDEO_MAX_FRAME)
gbuffers = 2;
if (gbufsize > BTTV_MAX_FBUF)
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index 9b776faf074..318edf2830b 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -25,9 +25,6 @@
#ifndef _BTTVP_H_
#define _BTTVP_H_
-#include <linux/version.h>
-#define BTTV_VERSION_CODE KERNEL_VERSION(0,9,18)
-
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/i2c.h>
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index c1193506131..f09df9dffaa 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -71,7 +71,6 @@ OTHER DEALINGS IN THE SOFTWARE.
#include <linux/mm.h>
#include <linux/parport.h>
#include <linux/sched.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
@@ -647,7 +646,6 @@ static int qcam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->version = KERNEL_VERSION(0, 0, 2);
vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
@@ -895,6 +893,7 @@ static struct qcam *qcam_init(struct parport *port)
if (v4l2_device_register(NULL, v4l2_dev) < 0) {
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ kfree(qcam);
return NULL;
}
@@ -1092,3 +1091,4 @@ module_init(init_bw_qcams);
module_exit(exit_bw_qcams);
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.3");
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 24fc00965a1..cd8ff047318 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -35,7 +35,6 @@
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <asm/uaccess.h>
#include <media/v4l2-device.h>
@@ -517,7 +516,6 @@ static int qcam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Color Quickcam", sizeof(vcap->card));
strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->version = KERNEL_VERSION(0, 0, 3);
vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
@@ -752,6 +750,7 @@ static struct qcam *qcam_init(struct parport *port)
if (v4l2_device_register(NULL, v4l2_dev) < 0) {
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
+ kfree(qcam);
return NULL;
}
@@ -886,6 +885,7 @@ static void __exit cqcam_cleanup(void)
MODULE_AUTHOR("Philip Blundell <philb@gnu.org>");
MODULE_DESCRIPTION(BANNER);
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.4");
module_init(cqcam_init);
module_exit(cqcam_cleanup);
diff --git a/drivers/media/video/cafe_ccic-regs.h b/drivers/media/video/cafe_ccic-regs.h
deleted file mode 100644
index 8e2a87cdc79..00000000000
--- a/drivers/media/video/cafe_ccic-regs.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Register definitions for the m88alp01 camera interface. Offsets in bytes
- * as given in the spec.
- *
- * Copyright 2006 One Laptop Per Child Association, Inc.
- *
- * Written by Jonathan Corbet, corbet@lwn.net.
- *
- * This file may be distributed under the terms of the GNU General
- * Public License, version 2.
- */
-#define REG_Y0BAR 0x00
-#define REG_Y1BAR 0x04
-#define REG_Y2BAR 0x08
-/* ... */
-
-#define REG_IMGPITCH 0x24 /* Image pitch register */
-#define IMGP_YP_SHFT 2 /* Y pitch params */
-#define IMGP_YP_MASK 0x00003ffc /* Y pitch field */
-#define IMGP_UVP_SHFT 18 /* UV pitch (planar) */
-#define IMGP_UVP_MASK 0x3ffc0000
-#define REG_IRQSTATRAW 0x28 /* RAW IRQ Status */
-#define IRQ_EOF0 0x00000001 /* End of frame 0 */
-#define IRQ_EOF1 0x00000002 /* End of frame 1 */
-#define IRQ_EOF2 0x00000004 /* End of frame 2 */
-#define IRQ_SOF0 0x00000008 /* Start of frame 0 */
-#define IRQ_SOF1 0x00000010 /* Start of frame 1 */
-#define IRQ_SOF2 0x00000020 /* Start of frame 2 */
-#define IRQ_OVERFLOW 0x00000040 /* FIFO overflow */
-#define IRQ_TWSIW 0x00010000 /* TWSI (smbus) write */
-#define IRQ_TWSIR 0x00020000 /* TWSI read */
-#define IRQ_TWSIE 0x00040000 /* TWSI error */
-#define TWSIIRQS (IRQ_TWSIW|IRQ_TWSIR|IRQ_TWSIE)
-#define FRAMEIRQS (IRQ_EOF0|IRQ_EOF1|IRQ_EOF2|IRQ_SOF0|IRQ_SOF1|IRQ_SOF2)
-#define ALLIRQS (TWSIIRQS|FRAMEIRQS|IRQ_OVERFLOW)
-#define REG_IRQMASK 0x2c /* IRQ mask - same bits as IRQSTAT */
-#define REG_IRQSTAT 0x30 /* IRQ status / clear */
-
-#define REG_IMGSIZE 0x34 /* Image size */
-#define IMGSZ_V_MASK 0x1fff0000
-#define IMGSZ_V_SHIFT 16
-#define IMGSZ_H_MASK 0x00003fff
-#define REG_IMGOFFSET 0x38 /* IMage offset */
-
-#define REG_CTRL0 0x3c /* Control 0 */
-#define C0_ENABLE 0x00000001 /* Makes the whole thing go */
-
-/* Mask for all the format bits */
-#define C0_DF_MASK 0x00fffffc /* Bits 2-23 */
-
-/* RGB ordering */
-#define C0_RGB4_RGBX 0x00000000
-#define C0_RGB4_XRGB 0x00000004
-#define C0_RGB4_BGRX 0x00000008
-#define C0_RGB4_XBGR 0x0000000c
-#define C0_RGB5_RGGB 0x00000000
-#define C0_RGB5_GRBG 0x00000004
-#define C0_RGB5_GBRG 0x00000008
-#define C0_RGB5_BGGR 0x0000000c
-
-/* Spec has two fields for DIN and DOUT, but they must match, so
- combine them here. */
-#define C0_DF_YUV 0x00000000 /* Data is YUV */
-#define C0_DF_RGB 0x000000a0 /* ... RGB */
-#define C0_DF_BAYER 0x00000140 /* ... Bayer */
-/* 8-8-8 must be missing from the below - ask */
-#define C0_RGBF_565 0x00000000
-#define C0_RGBF_444 0x00000800
-#define C0_RGB_BGR 0x00001000 /* Blue comes first */
-#define C0_YUV_PLANAR 0x00000000 /* YUV 422 planar format */
-#define C0_YUV_PACKED 0x00008000 /* YUV 422 packed */
-#define C0_YUV_420PL 0x0000a000 /* YUV 420 planar */
-/* Think that 420 packed must be 111 - ask */
-#define C0_YUVE_YUYV 0x00000000 /* Y1CbY0Cr */
-#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
-#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
-#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
-#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
-#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
-#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
-#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
-/* Bayer bits 18,19 if needed */
-#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
-#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
-#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
-#define C0_DOWNSCALE 0x08000000 /* Enable downscaler */
-#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
-#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
-#define CO_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
-
-
-#define REG_CTRL1 0x40 /* Control 1 */
-#define C1_444ALPHA 0x00f00000 /* Alpha field in RGB444 */
-#define C1_ALPHA_SHFT 20
-#define C1_DMAB32 0x00000000 /* 32-byte DMA burst */
-#define C1_DMAB16 0x02000000 /* 16-byte DMA burst */
-#define C1_DMAB64 0x04000000 /* 64-byte DMA burst */
-#define C1_DMAB_MASK 0x06000000
-#define C1_TWOBUFS 0x08000000 /* Use only two DMA buffers */
-#define C1_PWRDWN 0x10000000 /* Power down */
-
-#define REG_CLKCTRL 0x88 /* Clock control */
-#define CLK_DIV_MASK 0x0000ffff /* Upper bits RW "reserved" */
-
-#define REG_GPR 0xb4 /* General purpose register. This
- controls inputs to the power and reset
- pins on the OV7670 used with OLPC;
- other deployments could differ. */
-#define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
-#define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
-#define GPR_C1 0x00000002 /* Control 1 value */
-/*
- * Control 0 is wired to reset on OLPC machines. For ov7x sensors,
- * it is active low, for 0v6x, instead, it's active high. What
- * fun.
- */
-#define GPR_C0 0x00000001 /* Control 0 value */
-
-#define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
-#define TWSIC0_EN 0x00000001 /* TWSI enable */
-#define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
-#define TWSIC0_SID 0x000003fc /* Slave ID */
-#define TWSIC0_SID_SHIFT 2
-#define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
-#define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
-#define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
-
-#define REG_TWSIC1 0xbc /* TWSI control 1 */
-#define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
-#define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
-#define TWSIC1_ADDR_SHIFT 16
-#define TWSIC1_READ 0x01000000 /* Set for read op */
-#define TWSIC1_WSTAT 0x02000000 /* Write status */
-#define TWSIC1_RVALID 0x04000000 /* Read data valid */
-#define TWSIC1_ERROR 0x08000000 /* Something screwed up */
-
-
-#define REG_UBAR 0xc4 /* Upper base address register */
-
-/*
- * Here's the weird global control registers which are said to live
- * way up here.
- */
-#define REG_GL_CSR 0x3004 /* Control/status register */
-#define GCSR_SRS 0x00000001 /* SW Reset set */
-#define GCSR_SRC 0x00000002 /* SW Reset clear */
-#define GCSR_MRS 0x00000004 /* Master reset set */
-#define GCSR_MRC 0x00000008 /* HW Reset clear */
-#define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
-#define REG_GL_IMASK 0x300c /* Interrupt mask register */
-#define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
-
-#define REG_GL_FCR 0x3038 /* GPIO functional control register */
-#define GFCR_GPIO_ON 0x08 /* Camera GPIO enabled */
-#define REG_GL_GPIOR 0x315c /* GPIO register */
-#define GGPIO_OUT 0x80000 /* GPIO output */
-#define GGPIO_VAL 0x00008 /* Output pin value */
-
-#define REG_LEN REG_GL_IMASK + 4
-
-
-/*
- * Useful stuff that probably belongs somewhere global.
- */
-#define VGA_WIDTH 640
-#define VGA_HEIGHT 480
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
deleted file mode 100644
index 66470339849..00000000000
--- a/drivers/media/video/cafe_ccic.c
+++ /dev/null
@@ -1,2267 +0,0 @@
-/*
- * A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
- * multifunction chip. Currently works with the Omnivision OV7670
- * sensor.
- *
- * The data sheet for this device can be found at:
- * http://www.marvell.com/products/pc_connectivity/88alp01/
- *
- * Copyright 2006 One Laptop Per Child Association, Inc.
- * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
- *
- * Written by Jonathan Corbet, corbet@lwn.net.
- *
- * v4l2_device/v4l2_subdev conversion by:
- * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
- *
- * Note: this conversion is untested! Please contact the linux-media
- * mailinglist if you can test this, together with the test results.
- *
- * This file may be distributed under the terms of the GNU General
- * Public License, version 2.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/dmi.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/videodev2.h>
-#include <linux/slab.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-chip-ident.h>
-#include <linux/device.h>
-#include <linux/wait.h>
-#include <linux/list.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/vmalloc.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-#include "ov7670.h"
-#include "cafe_ccic-regs.h"
-
-#define CAFE_VERSION 0x000002
-
-
-/*
- * Parameters.
- */
-MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
-MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
-MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("Video");
-
-/*
- * Internal DMA buffer management. Since the controller cannot do S/G I/O,
- * we must have physically contiguous buffers to bring frames into.
- * These parameters control how many buffers we use, whether we
- * allocate them at load time (better chance of success, but nails down
- * memory) or when somebody tries to use the camera (riskier), and,
- * for load-time allocation, how big they should be.
- *
- * The controller can cycle through three buffers. We could use
- * more by flipping pointers around, but it probably makes little
- * sense.
- */
-
-#define MAX_DMA_BUFS 3
-static int alloc_bufs_at_read;
-module_param(alloc_bufs_at_read, bool, 0444);
-MODULE_PARM_DESC(alloc_bufs_at_read,
- "Non-zero value causes DMA buffers to be allocated when the "
- "video capture device is read, rather than at module load "
- "time. This saves memory, but decreases the chances of "
- "successfully getting those buffers.");
-
-static int n_dma_bufs = 3;
-module_param(n_dma_bufs, uint, 0644);
-MODULE_PARM_DESC(n_dma_bufs,
- "The number of DMA buffers to allocate. Can be either two "
- "(saves memory, makes timing tighter) or three.");
-
-static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
-module_param(dma_buf_size, uint, 0444);
-MODULE_PARM_DESC(dma_buf_size,
- "The size of the allocated DMA buffers. If actual operating "
- "parameters require larger buffers, an attempt to reallocate "
- "will be made.");
-
-static int min_buffers = 1;
-module_param(min_buffers, uint, 0644);
-MODULE_PARM_DESC(min_buffers,
- "The minimum number of streaming I/O buffers we are willing "
- "to work with.");
-
-static int max_buffers = 10;
-module_param(max_buffers, uint, 0644);
-MODULE_PARM_DESC(max_buffers,
- "The maximum number of streaming I/O buffers an application "
- "will be allowed to allocate. These buffers are big and live "
- "in vmalloc space.");
-
-static int flip;
-module_param(flip, bool, 0444);
-MODULE_PARM_DESC(flip,
- "If set, the sensor will be instructed to flip the image "
- "vertically.");
-
-
-enum cafe_state {
- S_NOTREADY, /* Not yet initialized */
- S_IDLE, /* Just hanging around */
- S_FLAKED, /* Some sort of problem */
- S_SINGLEREAD, /* In read() */
- S_SPECREAD, /* Speculative read (for future read()) */
- S_STREAMING /* Streaming data */
-};
-
-/*
- * Tracking of streaming I/O buffers.
- */
-struct cafe_sio_buffer {
- struct list_head list;
- struct v4l2_buffer v4lbuf;
- char *buffer; /* Where it lives in kernel space */
- int mapcount;
- struct cafe_camera *cam;
-};
-
-/*
- * A description of one of our devices.
- * Locking: controlled by s_mutex. Certain fields, however, require
- * the dev_lock spinlock; they are marked as such by comments.
- * dev_lock is also required for access to device registers.
- */
-struct cafe_camera
-{
- struct v4l2_device v4l2_dev;
- enum cafe_state state;
- unsigned long flags; /* Buffer status, mainly (dev_lock) */
- int users; /* How many open FDs */
- struct file *owner; /* Who has data access (v4l2) */
-
- /*
- * Subsystem structures.
- */
- struct pci_dev *pdev;
- struct video_device vdev;
- struct i2c_adapter i2c_adapter;
- struct v4l2_subdev *sensor;
- unsigned short sensor_addr;
-
- unsigned char __iomem *regs;
- struct list_head dev_list; /* link to other devices */
-
- /* DMA buffers */
- unsigned int nbufs; /* How many are alloc'd */
- int next_buf; /* Next to consume (dev_lock) */
- unsigned int dma_buf_size; /* allocated size */
- void *dma_bufs[MAX_DMA_BUFS]; /* Internal buffer addresses */
- dma_addr_t dma_handles[MAX_DMA_BUFS]; /* Buffer bus addresses */
- unsigned int specframes; /* Unconsumed spec frames (dev_lock) */
- unsigned int sequence; /* Frame sequence number */
- unsigned int buf_seq[MAX_DMA_BUFS]; /* Sequence for individual buffers */
-
- /* Streaming buffers */
- unsigned int n_sbufs; /* How many we have */
- struct cafe_sio_buffer *sb_bufs; /* The array of housekeeping structs */
- struct list_head sb_avail; /* Available for data (we own) (dev_lock) */
- struct list_head sb_full; /* With data (user space owns) (dev_lock) */
- struct tasklet_struct s_tasklet;
-
- /* Current operating parameters */
- u32 sensor_type; /* Currently ov7670 only */
- struct v4l2_pix_format pix_format;
- enum v4l2_mbus_pixelcode mbus_code;
-
- /* Locks */
- struct mutex s_mutex; /* Access to this structure */
- spinlock_t dev_lock; /* Access to device */
-
- /* Misc */
- wait_queue_head_t smbus_wait; /* Waiting on i2c events */
- wait_queue_head_t iowait; /* Waiting on frame data */
-};
-
-/*
- * Status flags. Always manipulated with bit operations.
- */
-#define CF_BUF0_VALID 0 /* Buffers valid - first three */
-#define CF_BUF1_VALID 1
-#define CF_BUF2_VALID 2
-#define CF_DMA_ACTIVE 3 /* A frame is incoming */
-#define CF_CONFIG_NEEDED 4 /* Must configure hardware */
-
-#define sensor_call(cam, o, f, args...) \
- v4l2_subdev_call(cam->sensor, o, f, ##args)
-
-static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
-{
- return container_of(dev, struct cafe_camera, v4l2_dev);
-}
-
-static struct cafe_format_struct {
- __u8 *desc;
- __u32 pixelformat;
- int bpp; /* Bytes per pixel */
- enum v4l2_mbus_pixelcode mbus_code;
-} cafe_formats[] = {
- {
- .desc = "YUYV 4:2:2",
- .pixelformat = V4L2_PIX_FMT_YUYV,
- .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
- .bpp = 2,
- },
- {
- .desc = "RGB 444",
- .pixelformat = V4L2_PIX_FMT_RGB444,
- .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
- .bpp = 2,
- },
- {
- .desc = "RGB 565",
- .pixelformat = V4L2_PIX_FMT_RGB565,
- .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
- .bpp = 2,
- },
- {
- .desc = "Raw RGB Bayer",
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
- .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
- .bpp = 1
- },
-};
-#define N_CAFE_FMTS ARRAY_SIZE(cafe_formats)
-
-static struct cafe_format_struct *cafe_find_format(u32 pixelformat)
-{
- unsigned i;
-
- for (i = 0; i < N_CAFE_FMTS; i++)
- if (cafe_formats[i].pixelformat == pixelformat)
- return cafe_formats + i;
- /* Not found? Then return the first format. */
- return cafe_formats;
-}
-
-/*
- * Start over with DMA buffers - dev_lock needed.
- */
-static void cafe_reset_buffers(struct cafe_camera *cam)
-{
- int i;
-
- cam->next_buf = -1;
- for (i = 0; i < cam->nbufs; i++)
- clear_bit(i, &cam->flags);
- cam->specframes = 0;
-}
-
-static inline int cafe_needs_config(struct cafe_camera *cam)
-{
- return test_bit(CF_CONFIG_NEEDED, &cam->flags);
-}
-
-static void cafe_set_config_needed(struct cafe_camera *cam, int needed)
-{
- if (needed)
- set_bit(CF_CONFIG_NEEDED, &cam->flags);
- else
- clear_bit(CF_CONFIG_NEEDED, &cam->flags);
-}
-
-
-
-
-/*
- * Debugging and related.
- */
-#define cam_err(cam, fmt, arg...) \
- dev_err(&(cam)->pdev->dev, fmt, ##arg);
-#define cam_warn(cam, fmt, arg...) \
- dev_warn(&(cam)->pdev->dev, fmt, ##arg);
-#define cam_dbg(cam, fmt, arg...) \
- dev_dbg(&(cam)->pdev->dev, fmt, ##arg);
-
-
-/* ---------------------------------------------------------------------*/
-
-/*
- * Device register I/O
- */
-static inline void cafe_reg_write(struct cafe_camera *cam, unsigned int reg,
- unsigned int val)
-{
- iowrite32(val, cam->regs + reg);
-}
-
-static inline unsigned int cafe_reg_read(struct cafe_camera *cam,
- unsigned int reg)
-{
- return ioread32(cam->regs + reg);
-}
-
-
-static inline void cafe_reg_write_mask(struct cafe_camera *cam, unsigned int reg,
- unsigned int val, unsigned int mask)
-{
- unsigned int v = cafe_reg_read(cam, reg);
-
- v = (v & ~mask) | (val & mask);
- cafe_reg_write(cam, reg, v);
-}
-
-static inline void cafe_reg_clear_bit(struct cafe_camera *cam,
- unsigned int reg, unsigned int val)
-{
- cafe_reg_write_mask(cam, reg, 0, val);
-}
-
-static inline void cafe_reg_set_bit(struct cafe_camera *cam,
- unsigned int reg, unsigned int val)
-{
- cafe_reg_write_mask(cam, reg, val, val);
-}
-
-
-
-/* -------------------------------------------------------------------- */
-/*
- * The I2C/SMBUS interface to the camera itself starts here. The
- * controller handles SMBUS itself, presenting a relatively simple register
- * interface; all we have to do is to tell it where to route the data.
- */
-#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
-
-static int cafe_smbus_write_done(struct cafe_camera *cam)
-{
- unsigned long flags;
- int c1;
-
- /*
- * We must delay after the interrupt, or the controller gets confused
- * and never does give us good status. Fortunately, we don't do this
- * often.
- */
- udelay(20);
- spin_lock_irqsave(&cam->dev_lock, flags);
- c1 = cafe_reg_read(cam, REG_TWSIC1);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
-}
-
-static int cafe_smbus_write_data(struct cafe_camera *cam,
- u16 addr, u8 command, u8 value)
-{
- unsigned int rval;
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
- rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
- /*
- * Marvell sez set clkdiv to all 1's for now.
- */
- rval |= TWSIC0_CLKDIV;
- cafe_reg_write(cam, REG_TWSIC0, rval);
- (void) cafe_reg_read(cam, REG_TWSIC1); /* force write */
- rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
- cafe_reg_write(cam, REG_TWSIC1, rval);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-
- /* Unfortunately, reading TWSIC1 too soon after sending a command
- * causes the device to die.
- * Use a busy-wait because we often send a large quantity of small
- * commands at-once; using msleep() would cause a lot of context
- * switches which take longer than 2ms, resulting in a noticeable
- * boot-time and capture-start delays.
- */
- mdelay(2);
-
- /*
- * Another sad fact is that sometimes, commands silently complete but
- * cafe_smbus_write_done() never becomes aware of this.
- * This happens at random and appears to possible occur with any
- * command.
- * We don't understand why this is. We work around this issue
- * with the timeout in the wait below, assuming that all commands
- * complete within the timeout.
- */
- wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(cam),
- CAFE_SMBUS_TIMEOUT);
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- rval = cafe_reg_read(cam, REG_TWSIC1);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-
- if (rval & TWSIC1_WSTAT) {
- cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
- command, value);
- return -EIO;
- }
- if (rval & TWSIC1_ERROR) {
- cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
- command, value);
- return -EIO;
- }
- return 0;
-}
-
-
-
-static int cafe_smbus_read_done(struct cafe_camera *cam)
-{
- unsigned long flags;
- int c1;
-
- /*
- * We must delay after the interrupt, or the controller gets confused
- * and never does give us good status. Fortunately, we don't do this
- * often.
- */
- udelay(20);
- spin_lock_irqsave(&cam->dev_lock, flags);
- c1 = cafe_reg_read(cam, REG_TWSIC1);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
-}
-
-
-
-static int cafe_smbus_read_data(struct cafe_camera *cam,
- u16 addr, u8 command, u8 *value)
-{
- unsigned int rval;
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
- rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
- /*
- * Marvel sez set clkdiv to all 1's for now.
- */
- rval |= TWSIC0_CLKDIV;
- cafe_reg_write(cam, REG_TWSIC0, rval);
- (void) cafe_reg_read(cam, REG_TWSIC1); /* force write */
- rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
- cafe_reg_write(cam, REG_TWSIC1, rval);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-
- wait_event_timeout(cam->smbus_wait,
- cafe_smbus_read_done(cam), CAFE_SMBUS_TIMEOUT);
- spin_lock_irqsave(&cam->dev_lock, flags);
- rval = cafe_reg_read(cam, REG_TWSIC1);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-
- if (rval & TWSIC1_ERROR) {
- cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
- return -EIO;
- }
- if (! (rval & TWSIC1_RVALID)) {
- cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
- command);
- return -EIO;
- }
- *value = rval & 0xff;
- return 0;
-}
-
-/*
- * Perform a transfer over SMBUS. This thing is called under
- * the i2c bus lock, so we shouldn't race with ourselves...
- */
-static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
- unsigned short flags, char rw, u8 command,
- int size, union i2c_smbus_data *data)
-{
- struct v4l2_device *v4l2_dev = i2c_get_adapdata(adapter);
- struct cafe_camera *cam = to_cam(v4l2_dev);
- int ret = -EINVAL;
-
- /*
- * This interface would appear to only do byte data ops. OK
- * it can do word too, but the cam chip has no use for that.
- */
- if (size != I2C_SMBUS_BYTE_DATA) {
- cam_err(cam, "funky xfer size %d\n", size);
- return -EINVAL;
- }
-
- if (rw == I2C_SMBUS_WRITE)
- ret = cafe_smbus_write_data(cam, addr, command, data->byte);
- else if (rw == I2C_SMBUS_READ)
- ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
- return ret;
-}
-
-
-static void cafe_smbus_enable_irq(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_reg_set_bit(cam, REG_IRQMASK, TWSIIRQS);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-}
-
-static u32 cafe_smbus_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_SMBUS_READ_BYTE_DATA |
- I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
-}
-
-static struct i2c_algorithm cafe_smbus_algo = {
- .smbus_xfer = cafe_smbus_xfer,
- .functionality = cafe_smbus_func
-};
-
-/* Somebody is on the bus */
-static void cafe_ctlr_stop_dma(struct cafe_camera *cam);
-static void cafe_ctlr_power_down(struct cafe_camera *cam);
-
-static int cafe_smbus_setup(struct cafe_camera *cam)
-{
- struct i2c_adapter *adap = &cam->i2c_adapter;
- int ret;
-
- cafe_smbus_enable_irq(cam);
- adap->owner = THIS_MODULE;
- adap->algo = &cafe_smbus_algo;
- strcpy(adap->name, "cafe_ccic");
- adap->dev.parent = &cam->pdev->dev;
- i2c_set_adapdata(adap, &cam->v4l2_dev);
- ret = i2c_add_adapter(adap);
- if (ret)
- printk(KERN_ERR "Unable to register cafe i2c adapter\n");
- return ret;
-}
-
-static void cafe_smbus_shutdown(struct cafe_camera *cam)
-{
- i2c_del_adapter(&cam->i2c_adapter);
-}
-
-
-/* ------------------------------------------------------------------- */
-/*
- * Deal with the controller.
- */
-
-/*
- * Do everything we think we need to have the interface operating
- * according to the desired format.
- */
-static void cafe_ctlr_dma(struct cafe_camera *cam)
-{
- /*
- * Store the first two Y buffers (we aren't supporting
- * planar formats for now, so no UV bufs). Then either
- * set the third if it exists, or tell the controller
- * to just use two.
- */
- cafe_reg_write(cam, REG_Y0BAR, cam->dma_handles[0]);
- cafe_reg_write(cam, REG_Y1BAR, cam->dma_handles[1]);
- if (cam->nbufs > 2) {
- cafe_reg_write(cam, REG_Y2BAR, cam->dma_handles[2]);
- cafe_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
- }
- else
- cafe_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
- cafe_reg_write(cam, REG_UBAR, 0); /* 32 bits only for now */
-}
-
-static void cafe_ctlr_image(struct cafe_camera *cam)
-{
- int imgsz;
- struct v4l2_pix_format *fmt = &cam->pix_format;
-
- imgsz = ((fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK) |
- (fmt->bytesperline & IMGSZ_H_MASK);
- cafe_reg_write(cam, REG_IMGSIZE, imgsz);
- cafe_reg_write(cam, REG_IMGOFFSET, 0);
- /* YPITCH just drops the last two bits */
- cafe_reg_write_mask(cam, REG_IMGPITCH, fmt->bytesperline,
- IMGP_YP_MASK);
- /*
- * Tell the controller about the image format we are using.
- */
- switch (cam->pix_format.pixelformat) {
- case V4L2_PIX_FMT_YUYV:
- cafe_reg_write_mask(cam, REG_CTRL0,
- C0_DF_YUV|C0_YUV_PACKED|C0_YUVE_YUYV,
- C0_DF_MASK);
- break;
-
- case V4L2_PIX_FMT_RGB444:
- cafe_reg_write_mask(cam, REG_CTRL0,
- C0_DF_RGB|C0_RGBF_444|C0_RGB4_XRGB,
- C0_DF_MASK);
- /* Alpha value? */
- break;
-
- case V4L2_PIX_FMT_RGB565:
- cafe_reg_write_mask(cam, REG_CTRL0,
- C0_DF_RGB|C0_RGBF_565|C0_RGB5_BGGR,
- C0_DF_MASK);
- break;
-
- default:
- cam_err(cam, "Unknown format %x\n", cam->pix_format.pixelformat);
- break;
- }
- /*
- * Make sure it knows we want to use hsync/vsync.
- */
- cafe_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC,
- C0_SIFM_MASK);
-}
-
-
-/*
- * Configure the controller for operation; caller holds the
- * device mutex.
- */
-static int cafe_ctlr_configure(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_ctlr_dma(cam);
- cafe_ctlr_image(cam);
- cafe_set_config_needed(cam, 0);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- return 0;
-}
-
-static void cafe_ctlr_irq_enable(struct cafe_camera *cam)
-{
- /*
- * Clear any pending interrupts, since we do not
- * expect to have I/O active prior to enabling.
- */
- cafe_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
- cafe_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
-}
-
-static void cafe_ctlr_irq_disable(struct cafe_camera *cam)
-{
- cafe_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
-}
-
-/*
- * Make the controller start grabbing images. Everything must
- * be set up before doing this.
- */
-static void cafe_ctlr_start(struct cafe_camera *cam)
-{
- /* set_bit performs a read, so no other barrier should be
- needed here */
- cafe_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
-}
-
-static void cafe_ctlr_stop(struct cafe_camera *cam)
-{
- cafe_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
-}
-
-static void cafe_ctlr_init(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- /*
- * Added magic to bring up the hardware on the B-Test board
- */
- cafe_reg_write(cam, 0x3038, 0x8);
- cafe_reg_write(cam, 0x315c, 0x80008);
- /*
- * Go through the dance needed to wake the device up.
- * Note that these registers are global and shared
- * with the NAND and SD devices. Interaction between the
- * three still needs to be examined.
- */
- cafe_reg_write(cam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
- cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
- cafe_reg_write(cam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
- /*
- * Here we must wait a bit for the controller to come around.
- */
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- msleep(5);
- spin_lock_irqsave(&cam->dev_lock, flags);
-
- cafe_reg_write(cam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
- cafe_reg_set_bit(cam, REG_GL_IMASK, GIMSK_CCIC_EN);
- /*
- * Make sure it's not powered down.
- */
- cafe_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
- /*
- * Turn off the enable bit. It sure should be off anyway,
- * but it's good to be sure.
- */
- cafe_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
- /*
- * Mask all interrupts.
- */
- cafe_reg_write(cam, REG_IRQMASK, 0);
- /*
- * Clock the sensor appropriately. Controller clock should
- * be 48MHz, sensor "typical" value is half that.
- */
- cafe_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-}
-
-
-/*
- * Stop the controller, and don't return until we're really sure that no
- * further DMA is going on.
- */
-static void cafe_ctlr_stop_dma(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- /*
- * Theory: stop the camera controller (whether it is operating
- * or not). Delay briefly just in case we race with the SOF
- * interrupt, then wait until no DMA is active.
- */
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_ctlr_stop(cam);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- mdelay(1);
- wait_event_timeout(cam->iowait,
- !test_bit(CF_DMA_ACTIVE, &cam->flags), HZ);
- if (test_bit(CF_DMA_ACTIVE, &cam->flags))
- cam_err(cam, "Timeout waiting for DMA to end\n");
- /* This would be bad news - what now? */
- spin_lock_irqsave(&cam->dev_lock, flags);
- cam->state = S_IDLE;
- cafe_ctlr_irq_disable(cam);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-}
-
-/*
- * Power up and down.
- */
-static void cafe_ctlr_power_up(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
- /*
- * Part one of the sensor dance: turn the global
- * GPIO signal on.
- */
- cafe_reg_write(cam, REG_GL_FCR, GFCR_GPIO_ON);
- cafe_reg_write(cam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
- /*
- * Put the sensor into operational mode (assumes OLPC-style
- * wiring). Control 0 is reset - set to 1 to operate.
- * Control 1 is power down, set to 0 to operate.
- */
- cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
-/* mdelay(1); */ /* Marvell says 1ms will do it */
- cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
-/* mdelay(1); */ /* Enough? */
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- msleep(5); /* Just to be sure */
-}
-
-static void cafe_ctlr_power_down(struct cafe_camera *cam)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_reg_write(cam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
- cafe_reg_write(cam, REG_GL_FCR, GFCR_GPIO_ON);
- cafe_reg_write(cam, REG_GL_GPIOR, GGPIO_OUT);
- cafe_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-}
-
-/* -------------------------------------------------------------------- */
-/*
- * Communications with the sensor.
- */
-
-static int __cafe_cam_reset(struct cafe_camera *cam)
-{
- return sensor_call(cam, core, reset, 0);
-}
-
-/*
- * We have found the sensor on the i2c. Let's try to have a
- * conversation.
- */
-static int cafe_cam_init(struct cafe_camera *cam)
-{
- struct v4l2_dbg_chip_ident chip;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- if (cam->state != S_NOTREADY)
- cam_warn(cam, "Cam init with device in funky state %d",
- cam->state);
- ret = __cafe_cam_reset(cam);
- if (ret)
- goto out;
- chip.ident = V4L2_IDENT_NONE;
- chip.match.type = V4L2_CHIP_MATCH_I2C_ADDR;
- chip.match.addr = cam->sensor_addr;
- ret = sensor_call(cam, core, g_chip_ident, &chip);
- if (ret)
- goto out;
- cam->sensor_type = chip.ident;
- if (cam->sensor_type != V4L2_IDENT_OV7670) {
- cam_err(cam, "Unsupported sensor type 0x%x", cam->sensor_type);
- ret = -EINVAL;
- goto out;
- }
-/* Get/set parameters? */
- ret = 0;
- cam->state = S_IDLE;
- out:
- cafe_ctlr_power_down(cam);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-/*
- * Configure the sensor to match the parameters we have. Caller should
- * hold s_mutex
- */
-static int cafe_cam_set_flip(struct cafe_camera *cam)
-{
- struct v4l2_control ctrl;
-
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_VFLIP;
- ctrl.value = flip;
- return sensor_call(cam, core, s_ctrl, &ctrl);
-}
-
-
-static int cafe_cam_configure(struct cafe_camera *cam)
-{
- struct v4l2_mbus_framefmt mbus_fmt;
- int ret;
-
- v4l2_fill_mbus_format(&mbus_fmt, &cam->pix_format, cam->mbus_code);
- ret = sensor_call(cam, core, init, 0);
- if (ret == 0)
- ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
- /*
- * OV7670 does weird things if flip is set *before* format...
- */
- ret += cafe_cam_set_flip(cam);
- return ret;
-}
-
-/* -------------------------------------------------------------------- */
-/*
- * DMA buffer management. These functions need s_mutex held.
- */
-
-/* FIXME: this is inefficient as hell, since dma_alloc_coherent just
- * does a get_free_pages() call, and we waste a good chunk of an orderN
- * allocation. Should try to allocate the whole set in one chunk.
- */
-static int cafe_alloc_dma_bufs(struct cafe_camera *cam, int loadtime)
-{
- int i;
-
- cafe_set_config_needed(cam, 1);
- if (loadtime)
- cam->dma_buf_size = dma_buf_size;
- else
- cam->dma_buf_size = cam->pix_format.sizeimage;
- if (n_dma_bufs > 3)
- n_dma_bufs = 3;
-
- cam->nbufs = 0;
- for (i = 0; i < n_dma_bufs; i++) {
- cam->dma_bufs[i] = dma_alloc_coherent(&cam->pdev->dev,
- cam->dma_buf_size, cam->dma_handles + i,
- GFP_KERNEL);
- if (cam->dma_bufs[i] == NULL) {
- cam_warn(cam, "Failed to allocate DMA buffer\n");
- break;
- }
- /* For debug, remove eventually */
- memset(cam->dma_bufs[i], 0xcc, cam->dma_buf_size);
- (cam->nbufs)++;
- }
-
- switch (cam->nbufs) {
- case 1:
- dma_free_coherent(&cam->pdev->dev, cam->dma_buf_size,
- cam->dma_bufs[0], cam->dma_handles[0]);
- cam->nbufs = 0;
- case 0:
- cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
- return -ENOMEM;
-
- case 2:
- if (n_dma_bufs > 2)
- cam_warn(cam, "Will limp along with only 2 buffers\n");
- break;
- }
- return 0;
-}
-
-static void cafe_free_dma_bufs(struct cafe_camera *cam)
-{
- int i;
-
- for (i = 0; i < cam->nbufs; i++) {
- dma_free_coherent(&cam->pdev->dev, cam->dma_buf_size,
- cam->dma_bufs[i], cam->dma_handles[i]);
- cam->dma_bufs[i] = NULL;
- }
- cam->nbufs = 0;
-}
-
-
-
-
-
-/* ----------------------------------------------------------------------- */
-/*
- * Here starts the V4L2 interface code.
- */
-
-/*
- * Read an image from the device.
- */
-static ssize_t cafe_deliver_buffer(struct cafe_camera *cam,
- char __user *buffer, size_t len, loff_t *pos)
-{
- int bufno;
- unsigned long flags;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- if (cam->next_buf < 0) {
- cam_err(cam, "deliver_buffer: No next buffer\n");
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- return -EIO;
- }
- bufno = cam->next_buf;
- clear_bit(bufno, &cam->flags);
- if (++(cam->next_buf) >= cam->nbufs)
- cam->next_buf = 0;
- if (! test_bit(cam->next_buf, &cam->flags))
- cam->next_buf = -1;
- cam->specframes = 0;
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-
- if (len > cam->pix_format.sizeimage)
- len = cam->pix_format.sizeimage;
- if (copy_to_user(buffer, cam->dma_bufs[bufno], len))
- return -EFAULT;
- (*pos) += len;
- return len;
-}
-
-/*
- * Get everything ready, and start grabbing frames.
- */
-static int cafe_read_setup(struct cafe_camera *cam, enum cafe_state state)
-{
- int ret;
- unsigned long flags;
-
- /*
- * Configuration. If we still don't have DMA buffers,
- * make one last, desperate attempt.
- */
- if (cam->nbufs == 0)
- if (cafe_alloc_dma_bufs(cam, 0))
- return -ENOMEM;
-
- if (cafe_needs_config(cam)) {
- cafe_cam_configure(cam);
- ret = cafe_ctlr_configure(cam);
- if (ret)
- return ret;
- }
-
- /*
- * Turn it loose.
- */
- spin_lock_irqsave(&cam->dev_lock, flags);
- cafe_reset_buffers(cam);
- cafe_ctlr_irq_enable(cam);
- cam->state = state;
- cafe_ctlr_start(cam);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- return 0;
-}
-
-
-static ssize_t cafe_v4l_read(struct file *filp,
- char __user *buffer, size_t len, loff_t *pos)
-{
- struct cafe_camera *cam = filp->private_data;
- int ret = 0;
-
- /*
- * Perhaps we're in speculative read mode and already
- * have data?
- */
- mutex_lock(&cam->s_mutex);
- if (cam->state == S_SPECREAD) {
- if (cam->next_buf >= 0) {
- ret = cafe_deliver_buffer(cam, buffer, len, pos);
- if (ret != 0)
- goto out_unlock;
- }
- } else if (cam->state == S_FLAKED || cam->state == S_NOTREADY) {
- ret = -EIO;
- goto out_unlock;
- } else if (cam->state != S_IDLE) {
- ret = -EBUSY;
- goto out_unlock;
- }
-
- /*
- * v4l2: multiple processes can open the device, but only
- * one gets to grab data from it.
- */
- if (cam->owner && cam->owner != filp) {
- ret = -EBUSY;
- goto out_unlock;
- }
- cam->owner = filp;
-
- /*
- * Do setup if need be.
- */
- if (cam->state != S_SPECREAD) {
- ret = cafe_read_setup(cam, S_SINGLEREAD);
- if (ret)
- goto out_unlock;
- }
- /*
- * Wait for something to happen. This should probably
- * be interruptible (FIXME).
- */
- wait_event_timeout(cam->iowait, cam->next_buf >= 0, HZ);
- if (cam->next_buf < 0) {
- cam_err(cam, "read() operation timed out\n");
- cafe_ctlr_stop_dma(cam);
- ret = -EIO;
- goto out_unlock;
- }
- /*
- * Give them their data and we should be done.
- */
- ret = cafe_deliver_buffer(cam, buffer, len, pos);
-
- out_unlock:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-
-
-
-
-
-
-/*
- * Streaming I/O support.
- */
-
-
-
-static int cafe_vidioc_streamon(struct file *filp, void *priv,
- enum v4l2_buf_type type)
-{
- struct cafe_camera *cam = filp->private_data;
- int ret = -EINVAL;
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- goto out;
- mutex_lock(&cam->s_mutex);
- if (cam->state != S_IDLE || cam->n_sbufs == 0)
- goto out_unlock;
-
- cam->sequence = 0;
- ret = cafe_read_setup(cam, S_STREAMING);
-
- out_unlock:
- mutex_unlock(&cam->s_mutex);
- out:
- return ret;
-}
-
-
-static int cafe_vidioc_streamoff(struct file *filp, void *priv,
- enum v4l2_buf_type type)
-{
- struct cafe_camera *cam = filp->private_data;
- int ret = -EINVAL;
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- goto out;
- mutex_lock(&cam->s_mutex);
- if (cam->state != S_STREAMING)
- goto out_unlock;
-
- cafe_ctlr_stop_dma(cam);
- ret = 0;
-
- out_unlock:
- mutex_unlock(&cam->s_mutex);
- out:
- return ret;
-}
-
-
-
-static int cafe_setup_siobuf(struct cafe_camera *cam, int index)
-{
- struct cafe_sio_buffer *buf = cam->sb_bufs + index;
-
- INIT_LIST_HEAD(&buf->list);
- buf->v4lbuf.length = PAGE_ALIGN(cam->pix_format.sizeimage);
- buf->buffer = vmalloc_user(buf->v4lbuf.length);
- if (buf->buffer == NULL)
- return -ENOMEM;
- buf->mapcount = 0;
- buf->cam = cam;
-
- buf->v4lbuf.index = index;
- buf->v4lbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf->v4lbuf.field = V4L2_FIELD_NONE;
- buf->v4lbuf.memory = V4L2_MEMORY_MMAP;
- /*
- * Offset: must be 32-bit even on a 64-bit system. videobuf-dma-sg
- * just uses the length times the index, but the spec warns
- * against doing just that - vma merging problems. So we
- * leave a gap between each pair of buffers.
- */
- buf->v4lbuf.m.offset = 2*index*buf->v4lbuf.length;
- return 0;
-}
-
-static int cafe_free_sio_buffers(struct cafe_camera *cam)
-{
- int i;
-
- /*
- * If any buffers are mapped, we cannot free them at all.
- */
- for (i = 0; i < cam->n_sbufs; i++)
- if (cam->sb_bufs[i].mapcount > 0)
- return -EBUSY;
- /*
- * OK, let's do it.
- */
- for (i = 0; i < cam->n_sbufs; i++)
- vfree(cam->sb_bufs[i].buffer);
- cam->n_sbufs = 0;
- kfree(cam->sb_bufs);
- cam->sb_bufs = NULL;
- INIT_LIST_HEAD(&cam->sb_avail);
- INIT_LIST_HEAD(&cam->sb_full);
- return 0;
-}
-
-
-
-static int cafe_vidioc_reqbufs(struct file *filp, void *priv,
- struct v4l2_requestbuffers *req)
-{
- struct cafe_camera *cam = filp->private_data;
- int ret = 0; /* Silence warning */
-
- /*
- * Make sure it's something we can do. User pointers could be
- * implemented without great pain, but that's not been done yet.
- */
- if (req->memory != V4L2_MEMORY_MMAP)
- return -EINVAL;
- /*
- * If they ask for zero buffers, they really want us to stop streaming
- * (if it's happening) and free everything. Should we check owner?
- */
- mutex_lock(&cam->s_mutex);
- if (req->count == 0) {
- if (cam->state == S_STREAMING)
- cafe_ctlr_stop_dma(cam);
- ret = cafe_free_sio_buffers (cam);
- goto out;
- }
- /*
- * Device needs to be idle and working. We *could* try to do the
- * right thing in S_SPECREAD by shutting things down, but it
- * probably doesn't matter.
- */
- if (cam->state != S_IDLE || (cam->owner && cam->owner != filp)) {
- ret = -EBUSY;
- goto out;
- }
- cam->owner = filp;
-
- if (req->count < min_buffers)
- req->count = min_buffers;
- else if (req->count > max_buffers)
- req->count = max_buffers;
- if (cam->n_sbufs > 0) {
- ret = cafe_free_sio_buffers(cam);
- if (ret)
- goto out;
- }
-
- cam->sb_bufs = kzalloc(req->count*sizeof(struct cafe_sio_buffer),
- GFP_KERNEL);
- if (cam->sb_bufs == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- for (cam->n_sbufs = 0; cam->n_sbufs < req->count; (cam->n_sbufs++)) {
- ret = cafe_setup_siobuf(cam, cam->n_sbufs);
- if (ret)
- break;
- }
-
- if (cam->n_sbufs == 0) /* no luck at all - ret already set */
- kfree(cam->sb_bufs);
- req->count = cam->n_sbufs; /* In case of partial success */
-
- out:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-static int cafe_vidioc_querybuf(struct file *filp, void *priv,
- struct v4l2_buffer *buf)
-{
- struct cafe_camera *cam = filp->private_data;
- int ret = -EINVAL;
-
- mutex_lock(&cam->s_mutex);
- if (buf->index >= cam->n_sbufs)
- goto out;
- *buf = cam->sb_bufs[buf->index].v4lbuf;
- ret = 0;
- out:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-static int cafe_vidioc_qbuf(struct file *filp, void *priv,
- struct v4l2_buffer *buf)
-{
- struct cafe_camera *cam = filp->private_data;
- struct cafe_sio_buffer *sbuf;
- int ret = -EINVAL;
- unsigned long flags;
-
- mutex_lock(&cam->s_mutex);
- if (buf->index >= cam->n_sbufs)
- goto out;
- sbuf = cam->sb_bufs + buf->index;
- if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_QUEUED) {
- ret = 0; /* Already queued?? */
- goto out;
- }
- if (sbuf->v4lbuf.flags & V4L2_BUF_FLAG_DONE) {
- /* Spec doesn't say anything, seems appropriate tho */
- ret = -EBUSY;
- goto out;
- }
- sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_QUEUED;
- spin_lock_irqsave(&cam->dev_lock, flags);
- list_add(&sbuf->list, &cam->sb_avail);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- ret = 0;
- out:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-static int cafe_vidioc_dqbuf(struct file *filp, void *priv,
- struct v4l2_buffer *buf)
-{
- struct cafe_camera *cam = filp->private_data;
- struct cafe_sio_buffer *sbuf;
- int ret = -EINVAL;
- unsigned long flags;
-
- mutex_lock(&cam->s_mutex);
- if (cam->state != S_STREAMING)
- goto out_unlock;
- if (list_empty(&cam->sb_full) && filp->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- while (list_empty(&cam->sb_full) && cam->state == S_STREAMING) {
- mutex_unlock(&cam->s_mutex);
- if (wait_event_interruptible(cam->iowait,
- !list_empty(&cam->sb_full))) {
- ret = -ERESTARTSYS;
- goto out;
- }
- mutex_lock(&cam->s_mutex);
- }
-
- if (cam->state != S_STREAMING)
- ret = -EINTR;
- else {
- spin_lock_irqsave(&cam->dev_lock, flags);
- /* Should probably recheck !list_empty() here */
- sbuf = list_entry(cam->sb_full.next,
- struct cafe_sio_buffer, list);
- list_del_init(&sbuf->list);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_DONE;
- *buf = sbuf->v4lbuf;
- ret = 0;
- }
-
- out_unlock:
- mutex_unlock(&cam->s_mutex);
- out:
- return ret;
-}
-
-
-
-static void cafe_v4l_vm_open(struct vm_area_struct *vma)
-{
- struct cafe_sio_buffer *sbuf = vma->vm_private_data;
- /*
- * Locking: done under mmap_sem, so we don't need to
- * go back to the camera lock here.
- */
- sbuf->mapcount++;
-}
-
-
-static void cafe_v4l_vm_close(struct vm_area_struct *vma)
-{
- struct cafe_sio_buffer *sbuf = vma->vm_private_data;
-
- mutex_lock(&sbuf->cam->s_mutex);
- sbuf->mapcount--;
- /* Docs say we should stop I/O too... */
- if (sbuf->mapcount == 0)
- sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED;
- mutex_unlock(&sbuf->cam->s_mutex);
-}
-
-static const struct vm_operations_struct cafe_v4l_vm_ops = {
- .open = cafe_v4l_vm_open,
- .close = cafe_v4l_vm_close
-};
-
-
-static int cafe_v4l_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct cafe_camera *cam = filp->private_data;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- int ret = -EINVAL;
- int i;
- struct cafe_sio_buffer *sbuf = NULL;
-
- if (! (vma->vm_flags & VM_WRITE) || ! (vma->vm_flags & VM_SHARED))
- return -EINVAL;
- /*
- * Find the buffer they are looking for.
- */
- mutex_lock(&cam->s_mutex);
- for (i = 0; i < cam->n_sbufs; i++)
- if (cam->sb_bufs[i].v4lbuf.m.offset == offset) {
- sbuf = cam->sb_bufs + i;
- break;
- }
- if (sbuf == NULL)
- goto out;
-
- ret = remap_vmalloc_range(vma, sbuf->buffer, 0);
- if (ret)
- goto out;
- vma->vm_flags |= VM_DONTEXPAND;
- vma->vm_private_data = sbuf;
- vma->vm_ops = &cafe_v4l_vm_ops;
- sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED;
- cafe_v4l_vm_open(vma);
- ret = 0;
- out:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-
-static int cafe_v4l_open(struct file *filp)
-{
- struct cafe_camera *cam = video_drvdata(filp);
-
- filp->private_data = cam;
-
- mutex_lock(&cam->s_mutex);
- if (cam->users == 0) {
- cafe_ctlr_power_up(cam);
- __cafe_cam_reset(cam);
- cafe_set_config_needed(cam, 1);
- /* FIXME make sure this is complete */
- }
- (cam->users)++;
- mutex_unlock(&cam->s_mutex);
- return 0;
-}
-
-
-static int cafe_v4l_release(struct file *filp)
-{
- struct cafe_camera *cam = filp->private_data;
-
- mutex_lock(&cam->s_mutex);
- (cam->users)--;
- if (filp == cam->owner) {
- cafe_ctlr_stop_dma(cam);
- cafe_free_sio_buffers(cam);
- cam->owner = NULL;
- }
- if (cam->users == 0) {
- cafe_ctlr_power_down(cam);
- if (alloc_bufs_at_read)
- cafe_free_dma_bufs(cam);
- }
- mutex_unlock(&cam->s_mutex);
- return 0;
-}
-
-
-
-static unsigned int cafe_v4l_poll(struct file *filp,
- struct poll_table_struct *pt)
-{
- struct cafe_camera *cam = filp->private_data;
-
- poll_wait(filp, &cam->iowait, pt);
- if (cam->next_buf >= 0)
- return POLLIN | POLLRDNORM;
- return 0;
-}
-
-
-
-static int cafe_vidioc_queryctrl(struct file *filp, void *priv,
- struct v4l2_queryctrl *qc)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, queryctrl, qc);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-static int cafe_vidioc_g_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, g_ctrl, ctrl);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-static int cafe_vidioc_s_ctrl(struct file *filp, void *priv,
- struct v4l2_control *ctrl)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, core, s_ctrl, ctrl);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-
-
-
-
-static int cafe_vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- strcpy(cap->driver, "cafe_ccic");
- strcpy(cap->card, "cafe_ccic");
- cap->version = CAFE_VERSION;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
- return 0;
-}
-
-
-/*
- * The default format we use until somebody says otherwise.
- */
-static const struct v4l2_pix_format cafe_def_pix_format = {
- .width = VGA_WIDTH,
- .height = VGA_HEIGHT,
- .pixelformat = V4L2_PIX_FMT_YUYV,
- .field = V4L2_FIELD_NONE,
- .bytesperline = VGA_WIDTH*2,
- .sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
-};
-
-static const enum v4l2_mbus_pixelcode cafe_def_mbus_code =
- V4L2_MBUS_FMT_YUYV8_2X8;
-
-static int cafe_vidioc_enum_fmt_vid_cap(struct file *filp,
- void *priv, struct v4l2_fmtdesc *fmt)
-{
- if (fmt->index >= N_CAFE_FMTS)
- return -EINVAL;
- strlcpy(fmt->description, cafe_formats[fmt->index].desc,
- sizeof(fmt->description));
- fmt->pixelformat = cafe_formats[fmt->index].pixelformat;
- return 0;
-}
-
-static int cafe_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
- struct v4l2_format *fmt)
-{
- struct cafe_camera *cam = priv;
- struct cafe_format_struct *f;
- struct v4l2_pix_format *pix = &fmt->fmt.pix;
- struct v4l2_mbus_framefmt mbus_fmt;
- int ret;
-
- f = cafe_find_format(pix->pixelformat);
- pix->pixelformat = f->pixelformat;
- v4l2_fill_mbus_format(&mbus_fmt, pix, f->mbus_code);
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
- mutex_unlock(&cam->s_mutex);
- v4l2_fill_pix_format(pix, &mbus_fmt);
- pix->bytesperline = pix->width * f->bpp;
- pix->sizeimage = pix->height * pix->bytesperline;
- return ret;
-}
-
-static int cafe_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
- struct v4l2_format *fmt)
-{
- struct cafe_camera *cam = priv;
- struct cafe_format_struct *f;
- int ret;
-
- /*
- * Can't do anything if the device is not idle
- * Also can't if there are streaming buffers in place.
- */
- if (cam->state != S_IDLE || cam->n_sbufs > 0)
- return -EBUSY;
-
- f = cafe_find_format(fmt->fmt.pix.pixelformat);
-
- /*
- * See if the formatting works in principle.
- */
- ret = cafe_vidioc_try_fmt_vid_cap(filp, priv, fmt);
- if (ret)
- return ret;
- /*
- * Now we start to change things for real, so let's do it
- * under lock.
- */
- mutex_lock(&cam->s_mutex);
- cam->pix_format = fmt->fmt.pix;
- cam->mbus_code = f->mbus_code;
-
- /*
- * Make sure we have appropriate DMA buffers.
- */
- ret = -ENOMEM;
- if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
- cafe_free_dma_bufs(cam);
- if (cam->nbufs == 0) {
- if (cafe_alloc_dma_bufs(cam, 0))
- goto out;
- }
- /*
- * It looks like this might work, so let's program the sensor.
- */
- ret = cafe_cam_configure(cam);
- if (! ret)
- ret = cafe_ctlr_configure(cam);
- out:
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-/*
- * Return our stored notion of how the camera is/should be configured.
- * The V4l2 spec wants us to be smarter, and actually get this from
- * the camera (and not mess with it at open time). Someday.
- */
-static int cafe_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
- struct v4l2_format *f)
-{
- struct cafe_camera *cam = priv;
-
- f->fmt.pix = cam->pix_format;
- return 0;
-}
-
-/*
- * We only have one input - the sensor - so minimize the nonsense here.
- */
-static int cafe_vidioc_enum_input(struct file *filp, void *priv,
- struct v4l2_input *input)
-{
- if (input->index != 0)
- return -EINVAL;
-
- input->type = V4L2_INPUT_TYPE_CAMERA;
- input->std = V4L2_STD_ALL; /* Not sure what should go here */
- strcpy(input->name, "Camera");
- return 0;
-}
-
-static int cafe_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int cafe_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- if (i != 0)
- return -EINVAL;
- return 0;
-}
-
-/* from vivi.c */
-static int cafe_vidioc_s_std(struct file *filp, void *priv, v4l2_std_id *a)
-{
- return 0;
-}
-
-/*
- * G/S_PARM. Most of this is done by the sensor, but we are
- * the level which controls the number of read buffers.
- */
-static int cafe_vidioc_g_parm(struct file *filp, void *priv,
- struct v4l2_streamparm *parms)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, g_parm, parms);
- mutex_unlock(&cam->s_mutex);
- parms->parm.capture.readbuffers = n_dma_bufs;
- return ret;
-}
-
-static int cafe_vidioc_s_parm(struct file *filp, void *priv,
- struct v4l2_streamparm *parms)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, s_parm, parms);
- mutex_unlock(&cam->s_mutex);
- parms->parm.capture.readbuffers = n_dma_bufs;
- return ret;
-}
-
-static int cafe_vidioc_g_chip_ident(struct file *file, void *priv,
- struct v4l2_dbg_chip_ident *chip)
-{
- struct cafe_camera *cam = priv;
-
- chip->ident = V4L2_IDENT_NONE;
- chip->revision = 0;
- if (v4l2_chip_match_host(&chip->match)) {
- chip->ident = V4L2_IDENT_CAFE;
- return 0;
- }
- return sensor_call(cam, core, g_chip_ident, chip);
-}
-
-static int cafe_vidioc_enum_framesizes(struct file *filp, void *priv,
- struct v4l2_frmsizeenum *sizes)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, enum_framesizes, sizes);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-static int cafe_vidioc_enum_frameintervals(struct file *filp, void *priv,
- struct v4l2_frmivalenum *interval)
-{
- struct cafe_camera *cam = priv;
- int ret;
-
- mutex_lock(&cam->s_mutex);
- ret = sensor_call(cam, video, enum_frameintervals, interval);
- mutex_unlock(&cam->s_mutex);
- return ret;
-}
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int cafe_vidioc_g_register(struct file *file, void *priv,
- struct v4l2_dbg_register *reg)
-{
- struct cafe_camera *cam = priv;
-
- if (v4l2_chip_match_host(&reg->match)) {
- reg->val = cafe_reg_read(cam, reg->reg);
- reg->size = 4;
- return 0;
- }
- return sensor_call(cam, core, g_register, reg);
-}
-
-static int cafe_vidioc_s_register(struct file *file, void *priv,
- struct v4l2_dbg_register *reg)
-{
- struct cafe_camera *cam = priv;
-
- if (v4l2_chip_match_host(&reg->match)) {
- cafe_reg_write(cam, reg->reg, reg->val);
- return 0;
- }
- return sensor_call(cam, core, s_register, reg);
-}
-#endif
-
-/*
- * This template device holds all of those v4l2 methods; we
- * clone it for specific real devices.
- */
-
-static const struct v4l2_file_operations cafe_v4l_fops = {
- .owner = THIS_MODULE,
- .open = cafe_v4l_open,
- .release = cafe_v4l_release,
- .read = cafe_v4l_read,
- .poll = cafe_v4l_poll,
- .mmap = cafe_v4l_mmap,
- .unlocked_ioctl = video_ioctl2,
-};
-
-static const struct v4l2_ioctl_ops cafe_v4l_ioctl_ops = {
- .vidioc_querycap = cafe_vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = cafe_vidioc_enum_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = cafe_vidioc_try_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = cafe_vidioc_s_fmt_vid_cap,
- .vidioc_g_fmt_vid_cap = cafe_vidioc_g_fmt_vid_cap,
- .vidioc_enum_input = cafe_vidioc_enum_input,
- .vidioc_g_input = cafe_vidioc_g_input,
- .vidioc_s_input = cafe_vidioc_s_input,
- .vidioc_s_std = cafe_vidioc_s_std,
- .vidioc_reqbufs = cafe_vidioc_reqbufs,
- .vidioc_querybuf = cafe_vidioc_querybuf,
- .vidioc_qbuf = cafe_vidioc_qbuf,
- .vidioc_dqbuf = cafe_vidioc_dqbuf,
- .vidioc_streamon = cafe_vidioc_streamon,
- .vidioc_streamoff = cafe_vidioc_streamoff,
- .vidioc_queryctrl = cafe_vidioc_queryctrl,
- .vidioc_g_ctrl = cafe_vidioc_g_ctrl,
- .vidioc_s_ctrl = cafe_vidioc_s_ctrl,
- .vidioc_g_parm = cafe_vidioc_g_parm,
- .vidioc_s_parm = cafe_vidioc_s_parm,
- .vidioc_enum_framesizes = cafe_vidioc_enum_framesizes,
- .vidioc_enum_frameintervals = cafe_vidioc_enum_frameintervals,
- .vidioc_g_chip_ident = cafe_vidioc_g_chip_ident,
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .vidioc_g_register = cafe_vidioc_g_register,
- .vidioc_s_register = cafe_vidioc_s_register,
-#endif
-};
-
-static struct video_device cafe_v4l_template = {
- .name = "cafe",
- .tvnorms = V4L2_STD_NTSC_M,
- .current_norm = V4L2_STD_NTSC_M, /* make mplayer happy */
-
- .fops = &cafe_v4l_fops,
- .ioctl_ops = &cafe_v4l_ioctl_ops,
- .release = video_device_release_empty,
-};
-
-
-/* ---------------------------------------------------------------------- */
-/*
- * Interrupt handler stuff
- */
-
-
-
-static void cafe_frame_tasklet(unsigned long data)
-{
- struct cafe_camera *cam = (struct cafe_camera *) data;
- int i;
- unsigned long flags;
- struct cafe_sio_buffer *sbuf;
-
- spin_lock_irqsave(&cam->dev_lock, flags);
- for (i = 0; i < cam->nbufs; i++) {
- int bufno = cam->next_buf;
- if (bufno < 0) { /* "will never happen" */
- cam_err(cam, "No valid bufs in tasklet!\n");
- break;
- }
- if (++(cam->next_buf) >= cam->nbufs)
- cam->next_buf = 0;
- if (! test_bit(bufno, &cam->flags))
- continue;
- if (list_empty(&cam->sb_avail))
- break; /* Leave it valid, hope for better later */
- clear_bit(bufno, &cam->flags);
- sbuf = list_entry(cam->sb_avail.next,
- struct cafe_sio_buffer, list);
- /*
- * Drop the lock during the big copy. This *should* be safe...
- */
- spin_unlock_irqrestore(&cam->dev_lock, flags);
- memcpy(sbuf->buffer, cam->dma_bufs[bufno],
- cam->pix_format.sizeimage);
- sbuf->v4lbuf.bytesused = cam->pix_format.sizeimage;
- sbuf->v4lbuf.sequence = cam->buf_seq[bufno];
- sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
- sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE;
- spin_lock_irqsave(&cam->dev_lock, flags);
- list_move_tail(&sbuf->list, &cam->sb_full);
- }
- if (! list_empty(&cam->sb_full))
- wake_up(&cam->iowait);
- spin_unlock_irqrestore(&cam->dev_lock, flags);
-}
-
-
-
-static void cafe_frame_complete(struct cafe_camera *cam, int frame)
-{
- /*
- * Basic frame housekeeping.
- */
- if (test_bit(frame, &cam->flags) && printk_ratelimit())
- cam_err(cam, "Frame overrun on %d, frames lost\n", frame);
- set_bit(frame, &cam->flags);
- clear_bit(CF_DMA_ACTIVE, &cam->flags);
- if (cam->next_buf < 0)
- cam->next_buf = frame;
- cam->buf_seq[frame] = ++(cam->sequence);
-
- switch (cam->state) {
- /*
- * If in single read mode, try going speculative.
- */
- case S_SINGLEREAD:
- cam->state = S_SPECREAD;
- cam->specframes = 0;
- wake_up(&cam->iowait);
- break;
-
- /*
- * If we are already doing speculative reads, and nobody is
- * reading them, just stop.
- */
- case S_SPECREAD:
- if (++(cam->specframes) >= cam->nbufs) {
- cafe_ctlr_stop(cam);
- cafe_ctlr_irq_disable(cam);
- cam->state = S_IDLE;
- }
- wake_up(&cam->iowait);
- break;
- /*
- * For the streaming case, we defer the real work to the
- * camera tasklet.
- *
- * FIXME: if the application is not consuming the buffers,
- * we should eventually put things on hold and restart in
- * vidioc_dqbuf().
- */
- case S_STREAMING:
- tasklet_schedule(&cam->s_tasklet);
- break;
-
- default:
- cam_err(cam, "Frame interrupt in non-operational state\n");
- break;
- }
-}
-
-
-
-
-static void cafe_frame_irq(struct cafe_camera *cam, unsigned int irqs)
-{
- unsigned int frame;
-
- cafe_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
- /*
- * Handle any frame completions. There really should
- * not be more than one of these, or we have fallen
- * far behind.
- */
- for (frame = 0; frame < cam->nbufs; frame++)
- if (irqs & (IRQ_EOF0 << frame))
- cafe_frame_complete(cam, frame);
- /*
- * If a frame starts, note that we have DMA active. This
- * code assumes that we won't get multiple frame interrupts
- * at once; may want to rethink that.
- */
- if (irqs & (IRQ_SOF0 | IRQ_SOF1 | IRQ_SOF2))
- set_bit(CF_DMA_ACTIVE, &cam->flags);
-}
-
-
-
-static irqreturn_t cafe_irq(int irq, void *data)
-{
- struct cafe_camera *cam = data;
- unsigned int irqs;
-
- spin_lock(&cam->dev_lock);
- irqs = cafe_reg_read(cam, REG_IRQSTAT);
- if ((irqs & ALLIRQS) == 0) {
- spin_unlock(&cam->dev_lock);
- return IRQ_NONE;
- }
- if (irqs & FRAMEIRQS)
- cafe_frame_irq(cam, irqs);
- if (irqs & TWSIIRQS) {
- cafe_reg_write(cam, REG_IRQSTAT, TWSIIRQS);
- wake_up(&cam->smbus_wait);
- }
- spin_unlock(&cam->dev_lock);
- return IRQ_HANDLED;
-}
-
-
-/* -------------------------------------------------------------------------- */
-/*
- * PCI interface stuff.
- */
-
-static const struct dmi_system_id olpc_xo1_dmi[] = {
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "OLPC"),
- DMI_MATCH(DMI_PRODUCT_NAME, "XO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "1"),
- },
- },
- { }
-};
-
-static int cafe_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- int ret;
- struct cafe_camera *cam;
- struct ov7670_config sensor_cfg = {
- /* This controller only does SMBUS */
- .use_smbus = true,
-
- /*
- * Exclude QCIF mode, because it only captures a tiny portion
- * of the sensor FOV
- */
- .min_width = 320,
- .min_height = 240,
- };
- struct i2c_board_info ov7670_info = {
- .type = "ov7670",
- .addr = 0x42,
- .platform_data = &sensor_cfg,
- };
-
- /*
- * Start putting together one of our big camera structures.
- */
- ret = -ENOMEM;
- cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
- if (cam == NULL)
- goto out;
- ret = v4l2_device_register(&pdev->dev, &cam->v4l2_dev);
- if (ret)
- goto out_free;
-
- mutex_init(&cam->s_mutex);
- spin_lock_init(&cam->dev_lock);
- cam->state = S_NOTREADY;
- cafe_set_config_needed(cam, 1);
- init_waitqueue_head(&cam->smbus_wait);
- init_waitqueue_head(&cam->iowait);
- cam->pdev = pdev;
- cam->pix_format = cafe_def_pix_format;
- cam->mbus_code = cafe_def_mbus_code;
- INIT_LIST_HEAD(&cam->dev_list);
- INIT_LIST_HEAD(&cam->sb_avail);
- INIT_LIST_HEAD(&cam->sb_full);
- tasklet_init(&cam->s_tasklet, cafe_frame_tasklet, (unsigned long) cam);
- /*
- * Get set up on the PCI bus.
- */
- ret = pci_enable_device(pdev);
- if (ret)
- goto out_unreg;
- pci_set_master(pdev);
-
- ret = -EIO;
- cam->regs = pci_iomap(pdev, 0, 0);
- if (! cam->regs) {
- printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
- goto out_unreg;
- }
- ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
- if (ret)
- goto out_iounmap;
- /*
- * Initialize the controller and leave it powered up. It will
- * stay that way until the sensor driver shows up.
- */
- cafe_ctlr_init(cam);
- cafe_ctlr_power_up(cam);
- /*
- * Set up I2C/SMBUS communications. We have to drop the mutex here
- * because the sensor could attach in this call chain, leading to
- * unsightly deadlocks.
- */
- ret = cafe_smbus_setup(cam);
- if (ret)
- goto out_freeirq;
-
- /* Apply XO-1 clock speed */
- if (dmi_check_system(olpc_xo1_dmi))
- sensor_cfg.clock_speed = 45;
-
- cam->sensor_addr = ov7670_info.addr;
- cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, &cam->i2c_adapter,
- &ov7670_info, NULL);
- if (cam->sensor == NULL) {
- ret = -ENODEV;
- goto out_smbus;
- }
-
- ret = cafe_cam_init(cam);
- if (ret)
- goto out_smbus;
-
- /*
- * Get the v4l2 setup done.
- */
- mutex_lock(&cam->s_mutex);
- cam->vdev = cafe_v4l_template;
- cam->vdev.debug = 0;
-/* cam->vdev.debug = V4L2_DEBUG_IOCTL_ARG;*/
- cam->vdev.v4l2_dev = &cam->v4l2_dev;
- ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
- if (ret)
- goto out_unlock;
- video_set_drvdata(&cam->vdev, cam);
-
- /*
- * If so requested, try to get our DMA buffers now.
- */
- if (!alloc_bufs_at_read) {
- if (cafe_alloc_dma_bufs(cam, 1))
- cam_warn(cam, "Unable to alloc DMA buffers at load"
- " will try again later.");
- }
-
- mutex_unlock(&cam->s_mutex);
- return 0;
-
-out_unlock:
- mutex_unlock(&cam->s_mutex);
-out_smbus:
- cafe_smbus_shutdown(cam);
-out_freeirq:
- cafe_ctlr_power_down(cam);
- free_irq(pdev->irq, cam);
-out_iounmap:
- pci_iounmap(pdev, cam->regs);
-out_free:
- v4l2_device_unregister(&cam->v4l2_dev);
-out_unreg:
- kfree(cam);
-out:
- return ret;
-}
-
-
-/*
- * Shut down an initialized device
- */
-static void cafe_shutdown(struct cafe_camera *cam)
-{
-/* FIXME: Make sure we take care of everything here */
- if (cam->n_sbufs > 0)
- /* What if they are still mapped? Shouldn't be, but... */
- cafe_free_sio_buffers(cam);
- cafe_ctlr_stop_dma(cam);
- cafe_ctlr_power_down(cam);
- cafe_smbus_shutdown(cam);
- cafe_free_dma_bufs(cam);
- free_irq(cam->pdev->irq, cam);
- pci_iounmap(cam->pdev, cam->regs);
- video_unregister_device(&cam->vdev);
-}
-
-
-static void cafe_pci_remove(struct pci_dev *pdev)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
- struct cafe_camera *cam = to_cam(v4l2_dev);
-
- if (cam == NULL) {
- printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
- return;
- }
- mutex_lock(&cam->s_mutex);
- if (cam->users > 0)
- cam_warn(cam, "Removing a device with users!\n");
- cafe_shutdown(cam);
- v4l2_device_unregister(&cam->v4l2_dev);
- kfree(cam);
-/* No unlock - it no longer exists */
-}
-
-
-#ifdef CONFIG_PM
-/*
- * Basic power management.
- */
-static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
- struct cafe_camera *cam = to_cam(v4l2_dev);
- int ret;
- enum cafe_state cstate;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
- cstate = cam->state; /* HACK - stop_dma sets to idle */
- cafe_ctlr_stop_dma(cam);
- cafe_ctlr_power_down(cam);
- pci_disable_device(pdev);
- cam->state = cstate;
- return 0;
-}
-
-
-static int cafe_pci_resume(struct pci_dev *pdev)
-{
- struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
- struct cafe_camera *cam = to_cam(v4l2_dev);
- int ret = 0;
-
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
-
- if (ret) {
- cam_warn(cam, "Unable to re-enable device on resume!\n");
- return ret;
- }
- cafe_ctlr_init(cam);
-
- mutex_lock(&cam->s_mutex);
- if (cam->users > 0) {
- cafe_ctlr_power_up(cam);
- __cafe_cam_reset(cam);
- } else {
- cafe_ctlr_power_down(cam);
- }
- mutex_unlock(&cam->s_mutex);
-
- set_bit(CF_CONFIG_NEEDED, &cam->flags);
- if (cam->state == S_SPECREAD)
- cam->state = S_IDLE; /* Don't bother restarting */
- else if (cam->state == S_SINGLEREAD || cam->state == S_STREAMING)
- ret = cafe_read_setup(cam, cam->state);
- return ret;
-}
-
-#endif /* CONFIG_PM */
-
-
-static struct pci_device_id cafe_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
- PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, cafe_ids);
-
-static struct pci_driver cafe_pci_driver = {
- .name = "cafe1000-ccic",
- .id_table = cafe_ids,
- .probe = cafe_pci_probe,
- .remove = cafe_pci_remove,
-#ifdef CONFIG_PM
- .suspend = cafe_pci_suspend,
- .resume = cafe_pci_resume,
-#endif
-};
-
-
-
-
-static int __init cafe_init(void)
-{
- int ret;
-
- printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
- CAFE_VERSION);
- ret = pci_register_driver(&cafe_pci_driver);
- if (ret) {
- printk(KERN_ERR "Unable to register cafe_ccic driver\n");
- goto out;
- }
- ret = 0;
-
- out:
- return ret;
-}
-
-
-static void __exit cafe_exit(void)
-{
- pci_unregister_driver(&cafe_pci_driver);
-}
-
-module_init(cafe_init);
-module_exit(cafe_exit);
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 6d6d1843791..ab252188981 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -31,7 +31,6 @@
#ifndef __CPIA2_H__
#define __CPIA2_H__
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <linux/usb.h>
@@ -43,10 +42,6 @@
/* define for verbose debug output */
//#define _CPIA2_DEBUG_
-#define CPIA2_MAJ_VER 3
-#define CPIA2_MIN_VER 0
-#define CPIA2_PATCH_VER 0
-
/***
* Image defines
***/
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 40eb6326e48..077eb1db80a 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -29,8 +29,7 @@
* Alan Cox <alan@lxorguk.ukuu.org.uk>
****************************************************************************/
-#include <linux/version.h>
-
+#define CPIA_VERSION "3.0.1"
#include <linux/module.h>
#include <linux/time.h>
@@ -80,6 +79,7 @@ MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");
MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");
MODULE_SUPPORTED_DEVICE("video");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CPIA_VERSION);
#define ABOUT "V4L-Driver for Vision CPiA2 based cameras"
@@ -465,9 +465,6 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
if (usb_make_path(cam->dev, vc->bus_info, sizeof(vc->bus_info)) <0)
memset(vc->bus_info,0, sizeof(vc->bus_info));
- vc->version = KERNEL_VERSION(CPIA2_MAJ_VER, CPIA2_MIN_VER,
- CPIA2_PATCH_VER);
-
vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
@@ -1558,8 +1555,8 @@ static void __init check_parameters(void)
*****************************************************************************/
static int __init cpia2_init(void)
{
- LOG("%s v%d.%d.%d\n",
- ABOUT, CPIA2_MAJ_VER, CPIA2_MIN_VER, CPIA2_PATCH_VER);
+ LOG("%s v%s\n",
+ ABOUT, CPIA_VERSION);
check_parameters();
cpia2_usb_init();
return 0;
@@ -1579,4 +1576,3 @@ static void __exit cpia2_exit(void)
module_init(cpia2_init);
module_exit(cpia2_exit);
-
diff --git a/drivers/media/video/cx18/cx18-alsa-main.c b/drivers/media/video/cx18/cx18-alsa-main.c
index d50d69da387..a1e6c2a3247 100644
--- a/drivers/media/video/cx18/cx18-alsa-main.c
+++ b/drivers/media/video/cx18/cx18-alsa-main.c
@@ -192,6 +192,7 @@ static int snd_cx18_init(struct v4l2_device *v4l2_dev)
err_exit_free:
if (sc != NULL)
snd_card_free(sc);
+ kfree(cxsc);
err_exit:
return ret;
}
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 086427288de..18342072306 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -25,7 +25,6 @@
#ifndef CX18_DRIVER_H
#define CX18_DRIVER_H
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index 1933d4d11bf..afe0a29e720 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -469,7 +469,6 @@ static int cx18_querycap(struct file *file, void *fh,
strlcpy(vcap->card, cx->card_name, sizeof(vcap->card));
snprintf(vcap->bus_info, sizeof(vcap->bus_info),
"PCI:%s", pci_name(cx->pci_dev));
- vcap->version = CX18_DRIVER_VERSION; /* version */
vcap->capabilities = cx->v4l2_cap; /* capabilities */
return 0;
}
@@ -695,14 +694,10 @@ static int cx18_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
cx18_call_all(cx, tuner, g_tuner, vt);
- if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
+ if (vt->type == V4L2_TUNER_RADIO)
strlcpy(vt->name, "cx18 Radio Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_RADIO;
- } else {
+ else
strlcpy(vt->name, "cx18 TV Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_ANALOG_TV;
- }
-
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-version.h b/drivers/media/video/cx18/cx18-version.h
index cd189b6bbe2..fed48b6bb67 100644
--- a/drivers/media/video/cx18/cx18-version.h
+++ b/drivers/media/video/cx18/cx18-version.h
@@ -23,12 +23,6 @@
#define CX18_VERSION_H
#define CX18_DRIVER_NAME "cx18"
-#define CX18_DRIVER_VERSION_MAJOR 1
-#define CX18_DRIVER_VERSION_MINOR 5
-#define CX18_DRIVER_VERSION_PATCHLEVEL 0
-
-#define CX18_VERSION __stringify(CX18_DRIVER_VERSION_MAJOR) "." __stringify(CX18_DRIVER_VERSION_MINOR) "." __stringify(CX18_DRIVER_VERSION_PATCHLEVEL)
-#define CX18_DRIVER_VERSION KERNEL_VERSION(CX18_DRIVER_VERSION_MAJOR, \
- CX18_DRIVER_VERSION_MINOR, CX18_DRIVER_VERSION_PATCHLEVEL)
+#define CX18_VERSION "1.5.1"
#endif
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index 8d781341576..53ff26e7abf 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -355,6 +355,8 @@ int cx231xx_afe_update_power_control(struct cx231xx *dev,
case CX231XX_BOARD_HAUPPAUGE_EXETER:
case CX231XX_BOARD_HAUPPAUGE_USBLIVE2:
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
if (avmode == POLARIS_AVMODE_ANALOGT_TV) {
while (afe_power_status != (FLD_PWRDN_TUNING_BIAS |
FLD_PWRDN_ENABLE_PLL)) {
@@ -1733,6 +1735,8 @@ int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard)
break;
case CX231XX_BOARD_CNXT_RDE_253S:
case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
func_mode = 0x01;
break;
default:
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 22703815a31..53dae2a8272 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -387,6 +387,7 @@ struct cx231xx_board cx231xx_boards[] = {
.norm = V4L2_STD_NTSC,
.no_alt_vanc = 1,
.external_av = 1,
+ .dont_use_port_3 = 1,
.input = {{
.type = CX231XX_VMUX_COMPOSITE1,
.vmux = CX231XX_VIN_2_1,
@@ -532,6 +533,76 @@ struct cx231xx_board cx231xx_boards[] = {
.gpio = NULL,
} },
},
+ [CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL] = {
+ .name = "Hauppauge WinTV USB2 FM (PAL)",
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .norm = V4L2_STD_PAL,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ } },
+ },
+ [CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC] = {
+ .name = "Hauppauge WinTV USB2 FM (NTSC)",
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
+ .tuner_gpio = RDE250_XCV_TUNER,
+ .tuner_sif_gpio = 0x05,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x0c,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 1,
+ .norm = V4L2_STD_NTSC,
+
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ } },
+ },
};
const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
@@ -553,6 +624,10 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_CNXT_RDE_250},
{USB_DEVICE(0x0572, 0x58A0),
.driver_info = CX231XX_BOARD_CNXT_RDU_250},
+ {USB_DEVICE(0x2040, 0xb110),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL},
+ {USB_DEVICE(0x2040, 0xb111),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC},
{USB_DEVICE(0x2040, 0xb120),
.driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER},
{USB_DEVICE(0x2040, 0xb140),
@@ -1051,6 +1126,9 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
if (assoc_desc->bFirstInterface != ifnum) {
cx231xx_err(DRIVER_NAME ": Not found "
"matching IAD interface\n");
+ cx231xx_devused &= ~(1 << nr);
+ kfree(dev);
+ dev = NULL;
return -ENODEV;
}
diff --git a/drivers/media/video/cx231xx/cx231xx-core.c b/drivers/media/video/cx231xx/cx231xx-core.c
index abe500feb7d..d4457f9488e 100644
--- a/drivers/media/video/cx231xx/cx231xx-core.c
+++ b/drivers/media/video/cx231xx/cx231xx-core.c
@@ -742,6 +742,8 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
case CX231XX_BOARD_CNXT_RDU_253S:
case CX231XX_BOARD_HAUPPAUGE_EXETER:
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
break;
default:
@@ -1381,6 +1383,8 @@ int cx231xx_dev_init(struct cx231xx *dev)
case CX231XX_BOARD_CNXT_RDU_253S:
case CX231XX_BOARD_HAUPPAUGE_EXETER:
case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
+ case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
break;
default:
diff --git a/drivers/media/video/cx231xx/cx231xx-video.c b/drivers/media/video/cx231xx/cx231xx-video.c
index a69c24d8db0..6e81f970dc7 100644
--- a/drivers/media/video/cx231xx/cx231xx-video.c
+++ b/drivers/media/video/cx231xx/cx231xx-video.c
@@ -29,7 +29,6 @@
#include <linux/bitmap.h>
#include <linux/usb.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -45,7 +44,7 @@
#include "cx231xx.h"
#include "cx231xx-vbi.h"
-#define CX231XX_VERSION_CODE KERNEL_VERSION(0, 0, 1)
+#define CX231XX_VERSION "0.0.2"
#define DRIVER_AUTHOR "Srinivasa Deevi <srinivasa.deevi@conexant.com>"
#define DRIVER_DESC "Conexant cx231xx based USB video device driver"
@@ -70,6 +69,7 @@ do {\
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX231XX_VERSION);
static unsigned int card[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
static unsigned int video_nr[] = {[0 ... (CX231XX_MAXBOARDS - 1)] = UNSET };
@@ -1179,7 +1179,8 @@ static int vidioc_enum_input(struct file *file, void *priv,
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
- unsigned int n;
+ u32 gen_stat;
+ unsigned int ret, n;
n = i->index;
if (n >= MAX_CX231XX_INPUT)
@@ -1198,6 +1199,18 @@ static int vidioc_enum_input(struct file *file, void *priv,
i->std = dev->vdev->tvnorms;
+ /* If they are asking about the active input, read signal status */
+ if (n == dev->video_input) {
+ ret = cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS,
+ GEN_STAT, 2, &gen_stat, 4);
+ if (ret > 0) {
+ if ((gen_stat & FLD_VPRES) == 0x00)
+ i->status |= V4L2_IN_ST_NO_SIGNAL;
+ if ((gen_stat & FLD_HLOCK) == 0x00)
+ i->status |= V4L2_IN_ST_NO_H_LOCK;
+ }
+ }
+
return 0;
}
@@ -1869,8 +1882,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = CX231XX_VERSION_CODE;
-
cap->capabilities = V4L2_CAP_VBI_CAPTURE |
#if 0
V4L2_CAP_SLICED_VBI_CAPTURE |
@@ -2057,7 +2068,6 @@ static int radio_querycap(struct file *file, void *priv,
strlcpy(cap->card, cx231xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = CX231XX_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
}
@@ -2570,11 +2580,8 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
{
int ret;
- cx231xx_info("%s: v4l2 driver version %d.%d.%d\n",
- dev->name,
- (CX231XX_VERSION_CODE >> 16) & 0xff,
- (CX231XX_VERSION_CODE >> 8) & 0xff,
- CX231XX_VERSION_CODE & 0xff);
+ cx231xx_info("%s: v4l2 driver version %s\n",
+ dev->name, CX231XX_VERSION);
/* set default norm */
/*dev->norm = cx231xx_video_template.current_norm; */
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index 46dd8406781..2000bc64c49 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -43,7 +43,7 @@
#include "cx231xx-conf-reg.h"
#define DRIVER_NAME "cx231xx"
-#define PWR_SLEEP_INTERVAL 5
+#define PWR_SLEEP_INTERVAL 10
/* I2C addresses for control block in Cx231xx */
#define AFE_DEVICE_ADDRESS 0x60
@@ -67,6 +67,8 @@
#define CX231XX_BOARD_PV_XCAPTURE_USB 11
#define CX231XX_BOARD_KWORLD_UB430_USB_HYBRID 12
#define CX231XX_BOARD_ICONBIT_U100 13
+#define CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL 14
+#define CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC 15
/* Limits minimum and default number of buffers */
#define CX231XX_MIN_BUF 4
@@ -112,7 +114,6 @@
V4L2_STD_PAL_BG | V4L2_STD_PAL_DK | V4L2_STD_PAL_I | \
V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | \
V4L2_STD_PAL_60 | V4L2_STD_SECAM_L | V4L2_STD_SECAM_DK)
-#define CX231xx_VERSION_CODE KERNEL_VERSION(0, 0, 2)
#define SLEEP_S5H1432 30
#define CX23417_OSC_EN 8
diff --git a/drivers/media/video/cx23885/altera-ci.c b/drivers/media/video/cx23885/altera-ci.c
index 678539b2acf..1fa8927f0d3 100644
--- a/drivers/media/video/cx23885/altera-ci.c
+++ b/drivers/media/video/cx23885/altera-ci.c
@@ -52,7 +52,6 @@
* | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
* +-------+-------+-------+-------+-------+-------+-------+-------+
*/
-#include <linux/version.h>
#include <media/videobuf-dma-sg.h>
#include <media/videobuf-dvb.h>
#include "altera-ci.h"
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 9a98dc55f65..67c4a59bd88 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1359,7 +1359,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, cx23885_boards[tsport->dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->version = CX23885_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 934185cca75..76b7563de39 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -29,11 +29,17 @@
#include "../../../staging/altera-stapl/altera.h"
#include "cx23885.h"
#include "tuner-xc2028.h"
+#include "netup-eeprom.h"
#include "netup-init.h"
#include "altera-ci.h"
+#include "xc4000.h"
#include "xc5000.h"
#include "cx23888-ir.h"
+static unsigned int netup_card_rev = 1;
+module_param(netup_card_rev, int, 0644);
+MODULE_PARM_DESC(netup_card_rev,
+ "NetUP Dual DVB-T/C CI card revision");
static unsigned int enable_885_ir;
module_param(enable_885_ir, int, 0644);
MODULE_PARM_DESC(enable_885_ir,
@@ -175,6 +181,34 @@ struct cx23885_board cx23885_boards[] = {
.name = "Leadtek Winfast PxDVR3200 H",
.portc = CX23885_MPEG_DVB,
},
+ [CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000] = {
+ .name = "Leadtek Winfast PxDVR3200 H XC4000",
+ .porta = CX23885_ANALOG_VIDEO,
+ .portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_XC4000,
+ .tuner_addr = 0x61,
+ .radio_type = TUNER_XC4000,
+ .radio_addr = 0x61,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN2_CH1 |
+ CX25840_VIN5_CH2 |
+ CX25840_NONE0_CH3,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_COMPOSITE1,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_SVIDEO_LUMA3 |
+ CX25840_SVIDEO_CHROMA4,
+ }, {
+ .type = CX23885_VMUX_COMPONENT,
+ .vmux = CX25840_VIN7_CH1 |
+ CX25840_VIN6_CH2 |
+ CX25840_VIN8_CH3 |
+ CX25840_COMPONENT_ON,
+ } },
+ },
[CX23885_BOARD_COMPRO_VIDEOMATE_E650F] = {
.name = "Compro VideoMate E650F",
.portc = CX23885_MPEG_DVB,
@@ -433,6 +467,10 @@ struct cx23885_subid cx23885_subids[] = {
.subdevice = 0x6681,
.card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H,
}, {
+ .subvendor = 0x107d,
+ .subdevice = 0x6f39,
+ .card = CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000,
+ }, {
.subvendor = 0x185b,
.subdevice = 0xe800,
.card = CX23885_BOARD_COMPRO_VIDEOMATE_E650F,
@@ -749,6 +787,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
case CX23885_BOARD_HAUPPAUGE_HVR1500:
case CX23885_BOARD_HAUPPAUGE_HVR1500Q:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
@@ -909,6 +948,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_set(GP0_IO, 0x000f000f);
break;
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
@@ -1097,12 +1137,19 @@ int cx23885_ir_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1200:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
- case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* FIXME: Implement me */
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
+ ret = cx23888_ir_probe(dev);
+ if (ret)
+ break;
+ dev->sd_ir = cx23885_find_hw(dev, CX23885_HW_888_IR);
+ v4l2_subdev_call(dev->sd_cx25840, core, s_io_pin_config,
+ ir_rx_pin_cfg_count, ir_rx_pin_cfg);
+ break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
ret = cx23888_ir_probe(dev);
@@ -1156,6 +1203,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
void cx23885_ir_fini(struct cx23885_dev *dev)
{
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
cx23885_irq_remove(dev, PCI_MSK_IR);
@@ -1199,6 +1247,7 @@ int netup_jtag_io(void *device, int tms, int tdi, int read_tdo)
void cx23885_ir_pci_int_enable(struct cx23885_dev *dev)
{
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
if (dev->sd_ir)
@@ -1325,6 +1374,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
@@ -1353,10 +1403,12 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H:
+ case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
case CX23885_BOARD_COMPRO_VIDEOMATE_E650F:
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
@@ -1383,6 +1435,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
const struct firmware *fw;
const char *filename = "dvb-netup-altera-01.fw";
char *action = "configure";
+ static struct netup_card_info cinfo;
struct altera_config netup_config = {
.dev = dev,
.action = action,
@@ -1391,6 +1444,21 @@ void cx23885_card_setup(struct cx23885_dev *dev)
netup_initialize(dev);
+ netup_get_card_info(&dev->i2c_bus[0].i2c_adap, &cinfo);
+ if (netup_card_rev)
+ cinfo.rev = netup_card_rev;
+
+ switch (cinfo.rev) {
+ case 0x4:
+ filename = "dvb-netup-altera-04.fw";
+ break;
+ default:
+ filename = "dvb-netup-altera-01.fw";
+ break;
+ }
+ printk(KERN_INFO "NetUP card rev=0x%x fw_filename=%s\n",
+ cinfo.rev, filename);
+
ret = request_firmware(&fw, filename, &dev->pci->dev);
if (ret != 0)
printk(KERN_ERR "did not find the firmware file. (%s) "
diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c
index 64d9b2136ff..ee41a8882f5 100644
--- a/drivers/media/video/cx23885/cx23885-core.c
+++ b/drivers/media/video/cx23885/cx23885-core.c
@@ -42,6 +42,7 @@
MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX23885_VERSION);
static unsigned int debug;
module_param(debug, int, 0644);
@@ -2060,12 +2061,8 @@ static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
goto fail_irq;
}
- if (!pci_enable_msi(pci_dev))
- err = request_irq(pci_dev->irq, cx23885_irq,
- IRQF_DISABLED, dev->name, dev);
- else
- err = request_irq(pci_dev->irq, cx23885_irq,
- IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
+ err = request_irq(pci_dev->irq, cx23885_irq,
+ IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
@@ -2114,7 +2111,6 @@ static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
/* unregister stuff */
free_irq(pci_dev->irq, dev);
- pci_disable_msi(pci_dev);
cx23885_dev_unregister(dev);
v4l2_device_unregister(v4l2_dev);
@@ -2152,14 +2148,8 @@ static struct pci_driver cx23885_pci_driver = {
static int __init cx23885_init(void)
{
- printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n",
- (CX23885_VERSION_CODE >> 16) & 0xff,
- (CX23885_VERSION_CODE >> 8) & 0xff,
- CX23885_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx23885 driver version %s loaded\n",
+ CX23885_VERSION);
return pci_register_driver(&cx23885_pci_driver);
}
@@ -2170,5 +2160,3 @@ static void __exit cx23885_fini(void)
module_init(cx23885_init);
module_exit(cx23885_fini);
-
-/* ----------------------------------------------------------- */
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index 3c315f94cc8..aa83f07b1b0 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -37,6 +37,7 @@
#include "tda8290.h"
#include "tda18271.h"
#include "lgdt330x.h"
+#include "xc4000.h"
#include "xc5000.h"
#include "max2165.h"
#include "tda10048.h"
@@ -921,6 +922,26 @@ static int dvb_register(struct cx23885_tsport *port)
fe->ops.tuner_ops.set_config(fe, &ctl);
}
break;
+ case CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000:
+ i2c_bus = &dev->i2c_bus[0];
+
+ fe0->dvb.frontend = dvb_attach(zl10353_attach,
+ &dvico_fusionhdtv_xc3028,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ struct dvb_frontend *fe;
+ struct xc4000_config cfg = {
+ .i2c_address = 0x61,
+ .default_pm = 0,
+ .dvb_amplitude = 134,
+ .set_smoothedcvbs = 1,
+ .if_khz = 4560
+ };
+
+ fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
+ &dev->i2c_bus[1].i2c_adap, &cfg);
+ }
+ break;
case CX23885_BOARD_TBS_6920:
i2c_bus = &dev->i2c_bus[1];
@@ -1249,7 +1270,7 @@ int cx23885_dvb_unregister(struct cx23885_tsport *port)
* implement MFE support.
*/
fe0 = videobuf_dvb_get_frontend(&port->frontends, 1);
- if (fe0->dvb.frontend)
+ if (fe0 && fe0->dvb.frontend)
videobuf_dvb_unregister_bus(&port->frontends);
switch (port->dev->board) {
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index e97cafd8398..ce765e3f77b 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -82,6 +82,7 @@ void cx23885_input_rx_work_handler(struct cx23885_dev *dev, u32 events)
return;
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_TEVII_S470:
@@ -133,6 +134,7 @@ static int cx23885_input_ir_start(struct cx23885_dev *dev)
v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params);
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
@@ -229,6 +231,9 @@ static void cx23885_input_ir_stop(struct cx23885_dev *dev)
v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params);
v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params);
}
+ flush_work_sync(&dev->cx25840_work);
+ flush_work_sync(&dev->ir_rx_work);
+ flush_work_sync(&dev->ir_tx_work);
}
static void cx23885_input_ir_close(struct rc_dev *rc)
@@ -257,6 +262,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
return -ENODEV;
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index ee57f6bedbe..896bb32dbf0 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1000,7 +1000,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, cx23885_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCIe:%s", pci_name(dev->pci));
- cap->version = CX23885_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index c186473fc57..d86bc0b1317 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -36,10 +36,9 @@
#include "cx23885-reg.h"
#include "media/cx2341x.h"
-#include <linux/version.h>
#include <linux/mutex.h>
-#define CX23885_VERSION_CODE KERNEL_VERSION(0, 0, 2)
+#define CX23885_VERSION "0.0.3"
#define UNSET (-1U)
@@ -86,6 +85,7 @@
#define CX23885_BOARD_LEADTEK_WINFAST_PXTV1200 28
#define CX23885_BOARD_GOTVIEW_X5_3D_HYBRID 29
#define CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF 30
+#define CX23885_BOARD_LEADTEK_WINFAST_PXDVR3200_H_XC4000 31
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 423c1af8a78..68d1240f493 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -113,6 +113,8 @@ MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards");
MODULE_AUTHOR("Ricardo Cerqueira");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX88_VERSION);
+
MODULE_SUPPORTED_DEVICE("{{Conexant,23881},"
"{{Conexant,23882},"
"{{Conexant,23883}");
@@ -973,14 +975,8 @@ static struct pci_driver cx88_audio_pci_driver = {
*/
static int __init cx88_audio_init(void)
{
- printk(KERN_INFO "cx2388x alsa driver version %d.%d.%d loaded\n",
- (CX88_VERSION_CODE >> 16) & 0xff,
- (CX88_VERSION_CODE >> 8) & 0xff,
- CX88_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx2388x alsa driver version %s loaded\n",
+ CX88_VERSION);
return pci_register_driver(&cx88_audio_pci_driver);
}
@@ -994,10 +990,3 @@ static void __exit cx88_audio_fini(void)
module_init(cx88_audio_init);
module_exit(cx88_audio_fini);
-
-/* ----------------------------------------------------------- */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 11e49bbc4a6..e46446a449c 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -42,6 +42,7 @@
MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards");
MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX88_VERSION);
static unsigned int mpegbufs = 32;
module_param(mpegbufs,int,0644);
@@ -730,7 +731,6 @@ static int vidioc_querycap (struct file *file, void *priv,
strcpy(cap->driver, "cx88_blackbird");
strlcpy(cap->card, core->board.name, sizeof(cap->card));
sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci));
- cap->version = CX88_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
@@ -1368,14 +1368,8 @@ static struct cx8802_driver cx8802_blackbird_driver = {
static int __init blackbird_init(void)
{
- printk(KERN_INFO "cx2388x blackbird driver version %d.%d.%d loaded\n",
- (CX88_VERSION_CODE >> 16) & 0xff,
- (CX88_VERSION_CODE >> 8) & 0xff,
- CX88_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx2388x blackbird driver version %s loaded\n",
+ CX88_VERSION);
return cx8802_register_driver(&cx8802_blackbird_driver);
}
@@ -1389,11 +1383,3 @@ module_exit(blackbird_fini);
module_param_named(video_debug,cx8802_mpeg_template.debug, int, 0644);
MODULE_PARM_DESC(debug,"enable debug messages [video]");
-
-/* ----------------------------------------------------------- */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index 27222c92b60..0d719faafd8 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -28,6 +28,7 @@
#include "cx88.h"
#include "tea5767.h"
+#include "xc4000.h"
static unsigned int tuner[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
static unsigned int radio[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
@@ -2119,6 +2120,99 @@ static const struct cx88_board cx88_boards[] = {
},
.mpeg = CX88_MPEG_DVB,
},
+ [CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
+ .name = "Leadtek WinFast DTV1800 H (XC4000)",
+ .tuner_type = TUNER_XC4000,
+ .radio_type = TUNER_XC4000,
+ .tuner_addr = 0x61,
+ .radio_addr = 0x61,
+ /*
+ * GPIO setting
+ *
+ * 2: mute (0=off,1=on)
+ * 12: tuner reset pin
+ * 13: audio source (0=tuner audio,1=line in)
+ * 14: FM (0=on,1=off ???)
+ */
+ .input = {{
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6040, /* pin 13 = 0, pin 14 = 1 */
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
+ .gpio2 = 0x0000,
+ }, {
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6060, /* pin 13 = 1, pin 14 = 1 */
+ .gpio2 = 0x0000,
+ }},
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0x0400, /* pin 2 = 0 */
+ .gpio1 = 0x6000, /* pin 13 = 0, pin 14 = 0 */
+ .gpio2 = 0x0000,
+ },
+ .mpeg = CX88_MPEG_DVB,
+ },
+ [CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
+ .name = "Leadtek WinFast DTV2000 H PLUS",
+ .tuner_type = TUNER_XC4000,
+ .radio_type = TUNER_XC4000,
+ .tuner_addr = 0x61,
+ .radio_addr = 0x61,
+ /*
+ * GPIO
+ * 2: 1: mute audio
+ * 12: 0: reset XC4000
+ * 13: 1: audio input is line in (0: tuner)
+ * 14: 0: FM radio
+ * 16: 0: RF input is cable
+ */
+ .input = {{
+ .type = CX88_VMUX_TELEVISION,
+ .vmux = 0,
+ .gpio0 = 0x0403,
+ .gpio1 = 0xF0D7,
+ .gpio2 = 0x0101,
+ .gpio3 = 0x0000,
+ }, {
+ .type = CX88_VMUX_CABLE,
+ .vmux = 0,
+ .gpio0 = 0x0403,
+ .gpio1 = 0xF0D7,
+ .gpio2 = 0x0100,
+ .gpio3 = 0x0000,
+ }, {
+ .type = CX88_VMUX_COMPOSITE1,
+ .vmux = 1,
+ .gpio0 = 0x0403, /* was 0x0407 */
+ .gpio1 = 0xF0F7,
+ .gpio2 = 0x0101,
+ .gpio3 = 0x0000,
+ }, {
+ .type = CX88_VMUX_SVIDEO,
+ .vmux = 2,
+ .gpio0 = 0x0403, /* was 0x0407 */
+ .gpio1 = 0xF0F7,
+ .gpio2 = 0x0101,
+ .gpio3 = 0x0000,
+ }},
+ .radio = {
+ .type = CX88_RADIO,
+ .gpio0 = 0x0403,
+ .gpio1 = 0xF097,
+ .gpio2 = 0x0100,
+ .gpio3 = 0x0000,
+ },
+ .mpeg = CX88_MPEG_DVB,
+ },
[CX88_BOARD_PROF_7301] = {
.name = "Prof 7301 DVB-S/S2",
.tuner_type = UNSET,
@@ -2581,6 +2675,15 @@ static const struct cx88_subid cx88_subids[] = {
.subdevice = 0x6654,
.card = CX88_BOARD_WINFAST_DTV1800H,
}, {
+ /* WinFast DTV1800 H with XC4000 tuner */
+ .subvendor = 0x107d,
+ .subdevice = 0x6f38,
+ .card = CX88_BOARD_WINFAST_DTV1800H_XC4000,
+ }, {
+ .subvendor = 0x107d,
+ .subdevice = 0x6f42,
+ .card = CX88_BOARD_WINFAST_DTV2000H_PLUS,
+ }, {
/* PVR2000 PAL Model [107d:6630] */
.subvendor = 0x107d,
.subdevice = 0x6630,
@@ -2846,6 +2949,23 @@ static int cx88_xc3028_winfast1800h_callback(struct cx88_core *core,
return -EINVAL;
}
+static int cx88_xc4000_winfast2000h_plus_callback(struct cx88_core *core,
+ int command, int arg)
+{
+ switch (command) {
+ case XC4000_TUNER_RESET:
+ /* GPIO 12 (xc4000 tuner reset) */
+ cx_set(MO_GP1_IO, 0x1010);
+ mdelay(50);
+ cx_clear(MO_GP1_IO, 0x10);
+ mdelay(75);
+ cx_set(MO_GP1_IO, 0x10);
+ mdelay(75);
+ return 0;
+ }
+ return -EINVAL;
+}
+
/* ------------------------------------------------------------------- */
/* some Divco specific stuff */
static int cx88_pv_8000gt_callback(struct cx88_core *core,
@@ -2948,6 +3068,19 @@ static int cx88_xc2028_tuner_callback(struct cx88_core *core,
return -EINVAL;
}
+static int cx88_xc4000_tuner_callback(struct cx88_core *core,
+ int command, int arg)
+{
+ /* Board-specific callbacks */
+ switch (core->boardnr) {
+ case CX88_BOARD_WINFAST_DTV1800H_XC4000:
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
+ return cx88_xc4000_winfast2000h_plus_callback(core,
+ command, arg);
+ }
+ return -EINVAL;
+}
+
/* ----------------------------------------------------------------------- */
/* Tuner callback function. Currently only needed for the Pinnacle *
* PCTV HD 800i with an xc5000 sillicon tuner. This is used for both *
@@ -3022,6 +3155,9 @@ int cx88_tuner_callback(void *priv, int component, int command, int arg)
case TUNER_XC2028:
info_printk(core, "Calling XC2028/3028 callback\n");
return cx88_xc2028_tuner_callback(core, command, arg);
+ case TUNER_XC4000:
+ info_printk(core, "Calling XC4000 callback\n");
+ return cx88_xc4000_tuner_callback(core, command, arg);
case TUNER_XC5000:
info_printk(core, "Calling XC5000 callback\n");
return cx88_xc5000_tuner_callback(core, command, arg);
@@ -3109,13 +3245,13 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
case CX88_BOARD_WINFAST_DTV1800H:
- /* GPIO 12 (xc3028 tuner reset) */
- cx_set(MO_GP1_IO, 0x1010);
- mdelay(50);
- cx_clear(MO_GP1_IO, 0x10);
- mdelay(50);
- cx_set(MO_GP1_IO, 0x10);
- mdelay(50);
+ cx88_xc3028_winfast1800h_callback(core, XC2028_TUNER_RESET, 0);
+ break;
+
+ case CX88_BOARD_WINFAST_DTV1800H_XC4000:
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
+ cx88_xc4000_winfast2000h_plus_callback(core,
+ XC4000_TUNER_RESET, 0);
break;
case CX88_BOARD_TWINHAN_VP1027_DVBS:
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index 2e145f0a5fd..fbcaa1c5b09 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -636,6 +636,9 @@ int cx88_reset(struct cx88_core *core)
cx_write(MO_PCI_INTSTAT, 0xFFFFFFFF); // Clear PCI int
cx_write(MO_INT1_STAT, 0xFFFFFFFF); // Clear RISC int
+ /* set default notch filter */
+ cx_andor(MO_HTOTAL, 0x1800, (HLNotchFilter4xFsc << 11));
+
/* Reset on-board parts */
cx_write(MO_SRST_IO, 0);
msleep(10);
@@ -759,8 +762,8 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
if (nocomb)
value |= (3 << 5); // disable comb filter
- cx_write(MO_FILTER_EVEN, value);
- cx_write(MO_FILTER_ODD, value);
+ cx_andor(MO_FILTER_EVEN, 0x7ffc7f, value); /* preserve PEAKEN, PSEL */
+ cx_andor(MO_FILTER_ODD, 0x7ffc7f, value);
dprintk(1,"set_scale: filter 0x%04x\n", value);
return 0;
@@ -994,10 +997,10 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
// htotal
tmp64 = norm_htotal(norm) * (u64)vdec_clock;
do_div(tmp64, fsc8);
- htotal = (u32)tmp64 | (HLNotchFilter4xFsc << 11);
+ htotal = (u32)tmp64;
dprintk(1,"set_tvnorm: MO_HTOTAL 0x%08x [old=0x%08x,htotal=%d]\n",
htotal, cx_read(MO_HTOTAL), (u32)tmp64);
- cx_write(MO_HTOTAL, htotal);
+ cx_andor(MO_HTOTAL, 0x07ff, htotal);
// vbi stuff, set vbi offset to 10 (for 20 Clk*2 pixels), this makes
// the effective vbi offset ~244 samples, the same as the Bt8x8
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index c69df7ebb6a..cf3d33ab541 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -41,6 +41,7 @@
#include "or51132.h"
#include "lgdt330x.h"
#include "s5h1409.h"
+#include "xc4000.h"
#include "xc5000.h"
#include "nxt200x.h"
#include "cx24123.h"
@@ -63,6 +64,7 @@ MODULE_DESCRIPTION("driver for cx2388x based DVB cards");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
module_param(debug, int, 0644);
@@ -605,6 +607,39 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
return 0;
}
+static int attach_xc4000(struct cx8802_dev *dev, struct xc4000_config *cfg)
+{
+ struct dvb_frontend *fe;
+ struct videobuf_dvb_frontend *fe0 = NULL;
+
+ /* Get the first frontend */
+ fe0 = videobuf_dvb_get_frontend(&dev->frontends, 1);
+ if (!fe0)
+ return -EINVAL;
+
+ if (!fe0->dvb.frontend) {
+ printk(KERN_ERR "%s/2: dvb frontend not attached. "
+ "Can't attach xc4000\n",
+ dev->core->name);
+ return -EINVAL;
+ }
+
+ fe = dvb_attach(xc4000_attach, fe0->dvb.frontend, &dev->core->i2c_adap,
+ cfg);
+ if (!fe) {
+ printk(KERN_ERR "%s/2: xc4000 attach failed\n",
+ dev->core->name);
+ dvb_frontend_detach(fe0->dvb.frontend);
+ dvb_unregister_frontend(fe0->dvb.frontend);
+ fe0->dvb.frontend = NULL;
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "%s/2: xc4000 attached\n", dev->core->name);
+
+ return 0;
+}
+
static int cx24116_set_ts_param(struct dvb_frontend *fe,
int is_punctured)
{
@@ -1294,7 +1329,25 @@ static int dvb_register(struct cx8802_dev *dev)
goto frontend_detach;
}
break;
- case CX88_BOARD_GENIATECH_X8000_MT:
+ case CX88_BOARD_WINFAST_DTV1800H_XC4000:
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
+ fe0->dvb.frontend = dvb_attach(zl10353_attach,
+ &cx88_pinnacle_hybrid_pctv,
+ &core->i2c_adap);
+ if (fe0->dvb.frontend) {
+ struct xc4000_config cfg = {
+ .i2c_address = 0x61,
+ .default_pm = 0,
+ .dvb_amplitude = 134,
+ .set_smoothedcvbs = 1,
+ .if_khz = 4560
+ };
+ fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
+ if (attach_xc4000(dev, &cfg) < 0)
+ goto frontend_detach;
+ }
+ break;
+ case CX88_BOARD_GENIATECH_X8000_MT:
dev->ts_gen_cntrl = 0x00;
fe0->dvb.frontend = dvb_attach(zl10353_attach,
@@ -1577,6 +1630,11 @@ static int cx8802_dvb_advise_acquire(struct cx8802_driver *drv)
udelay(1000);
break;
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
+ /* set RF input to AIR for DVB-T (GPIO 16) */
+ cx_write(MO_GP2_IO, 0x0101);
+ break;
+
default:
err = -ENODEV;
}
@@ -1692,14 +1750,8 @@ static struct cx8802_driver cx8802_dvb_driver = {
static int __init dvb_init(void)
{
- printk(KERN_INFO "cx88/2: cx2388x dvb driver version %d.%d.%d loaded\n",
- (CX88_VERSION_CODE >> 16) & 0xff,
- (CX88_VERSION_CODE >> 8) & 0xff,
- CX88_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx88/2: cx2388x dvb driver version %s loaded\n",
+ CX88_VERSION);
return cx8802_register_driver(&cx8802_dvb_driver);
}
@@ -1710,10 +1762,3 @@ static void __exit dvb_fini(void)
module_init(dvb_init);
module_exit(dvb_fini);
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * compile-command: "make DVB=1"
- * End:
- */
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index 3f442003623..e614201b5ed 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -100,6 +100,8 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
break;
case CX88_BOARD_WINFAST_DTV1000:
case CX88_BOARD_WINFAST_DTV1800H:
+ case CX88_BOARD_WINFAST_DTV1800H_XC4000:
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
gpio = (gpio & 0x6ff) | ((cx_read(MO_GP1_IO) << 8) & 0x900);
auxgpio = gpio;
@@ -289,6 +291,8 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
case CX88_BOARD_WINFAST_DTV2000H:
case CX88_BOARD_WINFAST_DTV2000H_J:
case CX88_BOARD_WINFAST_DTV1800H:
+ case CX88_BOARD_WINFAST_DTV1800H_XC4000:
+ case CX88_BOARD_WINFAST_DTV2000H_PLUS:
ir_codes = RC_MAP_WINFAST;
ir->gpio_addr = MO_GP0_IO;
ir->mask_keycode = 0x8f8;
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 1a7b983f829..cd5386ee210 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -39,6 +39,7 @@ MODULE_AUTHOR("Jelle Foks <jelle@foks.us>");
MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX88_VERSION);
static unsigned int debug;
module_param(debug,int,0644);
@@ -613,13 +614,17 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
core->active_type_id != drv->type_id)
return -EBUSY;
- core->input = 0;
- for (i = 0;
- i < (sizeof(core->board.input) / sizeof(struct cx88_input));
- i++) {
- if (core->board.input[i].type == CX88_VMUX_DVB) {
- core->input = i;
- break;
+ if (drv->type_id == CX88_MPEG_DVB) {
+ /* When switching to DVB, always set the input to the tuner */
+ core->last_analog_input = core->input;
+ core->input = 0;
+ for (i = 0;
+ i < (sizeof(core->board.input) / sizeof(struct cx88_input));
+ i++) {
+ if (core->board.input[i].type == CX88_VMUX_DVB) {
+ core->input = i;
+ break;
+ }
}
}
@@ -644,6 +649,12 @@ static int cx8802_request_release(struct cx8802_driver *drv)
if (drv->advise_release && --core->active_ref == 0)
{
+ if (drv->type_id == CX88_MPEG_DVB) {
+ /* If the DVB driver is releasing, reset the input
+ state to the last configured analog input */
+ core->input = core->last_analog_input;
+ }
+
drv->advise_release(drv);
core->active_type_id = CX88_BOARD_NONE;
mpeg_dbg(1,"%s() Post release GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
@@ -890,14 +901,8 @@ static struct pci_driver cx8802_pci_driver = {
static int __init cx8802_init(void)
{
- printk(KERN_INFO "cx88/2: cx2388x MPEG-TS Driver Manager version %d.%d.%d loaded\n",
- (CX88_VERSION_CODE >> 16) & 0xff,
- (CX88_VERSION_CODE >> 8) & 0xff,
- CX88_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx88/2: cx2388x MPEG-TS Driver Manager version %s loaded\n",
+ CX88_VERSION);
return pci_register_driver(&cx8802_pci_driver);
}
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index cef4f282e5a..60d28fdd779 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -45,6 +45,7 @@
MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
+MODULE_VERSION(CX88_VERSION);
/* ------------------------------------------------------------------ */
@@ -220,7 +221,23 @@ static const struct cx88_ctrl cx8800_ctls[] = {
.reg = MO_UV_SATURATION,
.mask = 0x00ff,
.shift = 0,
- },{
+ }, {
+ .v = {
+ .id = V4L2_CID_SHARPNESS,
+ .name = "Sharpness",
+ .minimum = 0,
+ .maximum = 4,
+ .step = 1,
+ .default_value = 0x0,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ },
+ .off = 0,
+ /* NOTE: the value is converted and written to both even
+ and odd registers in the code */
+ .reg = MO_FILTER_ODD,
+ .mask = 7 << 7,
+ .shift = 7,
+ }, {
.v = {
.id = V4L2_CID_CHROMA_AGC,
.name = "Chroma AGC",
@@ -245,6 +262,20 @@ static const struct cx88_ctrl cx8800_ctls[] = {
.mask = 1 << 9,
.shift = 9,
}, {
+ .v = {
+ .id = V4L2_CID_BAND_STOP_FILTER,
+ .name = "Notch filter",
+ .minimum = 0,
+ .maximum = 3,
+ .step = 1,
+ .default_value = 0x0,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ },
+ .off = 0,
+ .reg = MO_HTOTAL,
+ .mask = 3 << 11,
+ .shift = 11,
+ }, {
/* --- audio --- */
.v = {
.id = V4L2_CID_AUDIO_MUTE,
@@ -300,8 +331,10 @@ const u32 cx88_user_ctrls[] = {
V4L2_CID_AUDIO_VOLUME,
V4L2_CID_AUDIO_BALANCE,
V4L2_CID_AUDIO_MUTE,
+ V4L2_CID_SHARPNESS,
V4L2_CID_CHROMA_AGC,
V4L2_CID_COLOR_KILLER,
+ V4L2_CID_BAND_STOP_FILTER,
0
};
EXPORT_SYMBOL(cx88_user_ctrls);
@@ -962,6 +995,10 @@ int cx88_get_control (struct cx88_core *core, struct v4l2_control *ctl)
case V4L2_CID_AUDIO_VOLUME:
ctl->value = 0x3f - (value & 0x3f);
break;
+ case V4L2_CID_SHARPNESS:
+ ctl->value = ((value & 0x0200) ? (((value & 0x0180) >> 7) + 1)
+ : 0);
+ break;
default:
ctl->value = ((value + (c->off << c->shift)) & c->mask) >> c->shift;
break;
@@ -1039,6 +1076,12 @@ int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl)
}
mask=0xffff;
break;
+ case V4L2_CID_SHARPNESS:
+ /* 0b000, 0b100, 0b101, 0b110, or 0b111 */
+ value = (ctl->value < 1 ? 0 : ((ctl->value + 3) << 7));
+ /* needs to be set for both fields */
+ cx_andor(MO_FILTER_EVEN, mask, value);
+ break;
case V4L2_CID_CHROMA_AGC:
/* Do not allow chroma AGC to be enabled for SECAM */
value = ((ctl->value - c->off) << c->shift) & c->mask;
@@ -1161,7 +1204,6 @@ static int vidioc_querycap (struct file *file, void *priv,
strcpy(cap->driver, "cx8800");
strlcpy(cap->card, core->board.name, sizeof(cap->card));
sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci));
- cap->version = CX88_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
@@ -1480,7 +1522,6 @@ static int radio_querycap (struct file *file, void *priv,
strcpy(cap->driver, "cx8800");
strlcpy(cap->card, core->board.name, sizeof(cap->card));
sprintf(cap->bus_info,"PCI:%s", pci_name(dev->pci));
- cap->version = CX88_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
}
@@ -2139,14 +2180,8 @@ static struct pci_driver cx8800_pci_driver = {
static int __init cx8800_init(void)
{
- printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %d.%d.%d loaded\n",
- (CX88_VERSION_CODE >> 16) & 0xff,
- (CX88_VERSION_CODE >> 8) & 0xff,
- CX88_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %s loaded\n",
+ CX88_VERSION);
return pci_register_driver(&cx8800_pci_driver);
}
@@ -2157,11 +2192,3 @@ static void __exit cx8800_fini(void)
module_init(cx8800_init);
module_exit(cx8800_fini);
-
-/* ----------------------------------------------------------- */
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- * kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
- */
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index a399a8b086b..fa8d307e1a3 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -39,9 +39,9 @@
#include "cx88-reg.h"
#include "tuner-xc2028.h"
-#include <linux/version.h>
#include <linux/mutex.h>
-#define CX88_VERSION_CODE KERNEL_VERSION(0, 0, 8)
+
+#define CX88_VERSION "0.0.9"
#define UNSET (-1U)
@@ -242,6 +242,8 @@ extern const struct sram_channel const cx88_sram_channels[];
#define CX88_BOARD_SAMSUNG_SMT_7020 84
#define CX88_BOARD_TWINHAN_VP1027_DVBS 85
#define CX88_BOARD_TEVII_S464 86
+#define CX88_BOARD_WINFAST_DTV2000H_PLUS 87
+#define CX88_BOARD_WINFAST_DTV1800H_XC4000 88
enum cx88_itype {
CX88_VMUX_COMPOSITE1 = 1,
@@ -375,6 +377,7 @@ struct cx88_core {
u32 audiomode_manual;
u32 audiomode_current;
u32 input;
+ u32 last_analog_input;
u32 astat;
u32 use_nicam;
unsigned long last_change;
diff --git a/drivers/media/video/davinci/Kconfig b/drivers/media/video/davinci/Kconfig
index 6b195403564..60a456ebdc7 100644
--- a/drivers/media/video/davinci/Kconfig
+++ b/drivers/media/video/davinci/Kconfig
@@ -91,3 +91,26 @@ config VIDEO_ISIF
To compile this driver as a module, choose M here: the
module will be called vpfe.
+
+config VIDEO_DM644X_VPBE
+ tristate "DM644X VPBE HW module"
+ depends on ARCH_DAVINCI_DM644x
+ select VIDEO_VPSS_SYSTEM
+ select VIDEOBUF_DMA_CONTIG
+ help
+ Enables VPBE modules used for display on a DM644x
+ SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vpbe.
+
+
+config VIDEO_VPBE_DISPLAY
+ tristate "VPBE V4L2 Display driver"
+ depends on ARCH_DAVINCI_DM644x
+ select VIDEO_DM644X_VPBE
+ help
+ Enables VPBE V4L2 Display driver on a DM644x device
+
+ To compile this driver as a module, choose M here: the
+ module will be called vpbe_display.
diff --git a/drivers/media/video/davinci/Makefile b/drivers/media/video/davinci/Makefile
index a37955745aa..ae7dafb689a 100644
--- a/drivers/media/video/davinci/Makefile
+++ b/drivers/media/video/davinci/Makefile
@@ -16,3 +16,5 @@ obj-$(CONFIG_VIDEO_VPFE_CAPTURE) += vpfe_capture.o
obj-$(CONFIG_VIDEO_DM6446_CCDC) += dm644x_ccdc.o
obj-$(CONFIG_VIDEO_DM355_CCDC) += dm355_ccdc.o
obj-$(CONFIG_VIDEO_ISIF) += isif.o
+obj-$(CONFIG_VIDEO_DM644X_VPBE) += vpbe.o vpbe_osd.o vpbe_venc.o
+obj-$(CONFIG_VIDEO_VPBE_DISPLAY) += vpbe_display.o
diff --git a/drivers/media/video/davinci/vpbe.c b/drivers/media/video/davinci/vpbe.c
new file mode 100644
index 00000000000..d773d30de22
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe.c
@@ -0,0 +1,864 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe.h>
+#include <media/davinci/vpss.h>
+#include <media/davinci/vpbe_venc.h>
+
+#define VPBE_DEFAULT_OUTPUT "Composite"
+#define VPBE_DEFAULT_MODE "ntsc"
+
+static char *def_output = VPBE_DEFAULT_OUTPUT;
+static char *def_mode = VPBE_DEFAULT_MODE;
+static int debug;
+
+module_param(def_output, charp, S_IRUGO);
+module_param(def_mode, charp, S_IRUGO);
+module_param(debug, int, 0644);
+
+MODULE_PARM_DESC(def_output, "vpbe output name (default:Composite)");
+MODULE_PARM_DESC(def_mode, "vpbe output mode name (default:ntsc");
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+MODULE_DESCRIPTION("TI DMXXX VPBE Display controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+
+/**
+ * vpbe_current_encoder_info - Get config info for current encoder
+ * @vpbe_dev - vpbe device ptr
+ *
+ * Return ptr to current encoder config info
+ */
+static struct encoder_config_info*
+vpbe_current_encoder_info(struct vpbe_device *vpbe_dev)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int index = vpbe_dev->current_sd_index;
+
+ return ((index == 0) ? &cfg->venc :
+ &cfg->ext_encoders[index-1]);
+}
+
+/**
+ * vpbe_find_encoder_sd_index - Given a name find encoder sd index
+ *
+ * @vpbe_config - ptr to vpbe cfg
+ * @output_index - index used by application
+ *
+ * Return sd index of the encoder
+ */
+static int vpbe_find_encoder_sd_index(struct vpbe_config *cfg,
+ int index)
+{
+ char *encoder_name = cfg->outputs[index].subdev_name;
+ int i;
+
+ /* Venc is always first */
+ if (!strcmp(encoder_name, cfg->venc.module_name))
+ return 0;
+
+ for (i = 0; i < cfg->num_ext_encoders; i++) {
+ if (!strcmp(encoder_name,
+ cfg->ext_encoders[i].module_name))
+ return i+1;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_g_cropcap - Get crop capabilities of the display
+ * @vpbe_dev - vpbe device ptr
+ * @cropcap - cropcap is a ptr to struct v4l2_cropcap
+ *
+ * Update the crop capabilities in crop cap for current
+ * mode
+ */
+static int vpbe_g_cropcap(struct vpbe_device *vpbe_dev,
+ struct v4l2_cropcap *cropcap)
+{
+ if (NULL == cropcap)
+ return -EINVAL;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.top = 0;
+ cropcap->bounds.width = vpbe_dev->current_timings.xres;
+ cropcap->bounds.height = vpbe_dev->current_timings.yres;
+ cropcap->defrect = cropcap->bounds;
+
+ return 0;
+}
+
+/**
+ * vpbe_enum_outputs - enumerate outputs
+ * @vpbe_dev - vpbe device ptr
+ * @output - ptr to v4l2_output structure
+ *
+ * Enumerates the outputs available at the vpbe display
+ * returns the status, -EINVAL if end of output list
+ */
+static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
+ struct v4l2_output *output)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int temp_index = output->index;
+
+ if (temp_index >= cfg->num_outputs)
+ return -EINVAL;
+
+ *output = cfg->outputs[temp_index].output;
+ output->index = temp_index;
+
+ return 0;
+}
+
+static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ if (NULL == mode)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if (!strcmp(mode, var.name)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int vpbe_get_current_mode_info(struct vpbe_device *vpbe_dev,
+ struct vpbe_enc_mode_info *mode_info)
+{
+ if (NULL == mode_info)
+ return -EINVAL;
+
+ *mode_info = vpbe_dev->current_timings;
+
+ return 0;
+}
+
+static int vpbe_get_dv_preset_info(struct vpbe_device *vpbe_dev,
+ unsigned int dv_preset)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if ((var.timings_type & VPBE_ENC_DV_PRESET) &&
+ (var.timings.dv_preset == dv_preset)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/* Get std by std id */
+static int vpbe_get_std_info(struct vpbe_device *vpbe_dev,
+ v4l2_std_id std_id)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if ((var.timings_type & VPBE_ENC_STD) &&
+ (var.timings.std_id & std_id)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int vpbe_get_std_info_by_name(struct vpbe_device *vpbe_dev,
+ char *std_name)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct vpbe_enc_mode_info var;
+ int curr_output = vpbe_dev->current_out_index;
+ int i;
+
+ for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
+ var = cfg->outputs[curr_output].modes[i];
+ if (!strcmp(var.name, std_name)) {
+ vpbe_dev->current_timings = var;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_set_output - Set output
+ * @vpbe_dev - vpbe device ptr
+ * @index - index of output
+ *
+ * Set vpbe output to the output specified by the index
+ */
+static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
+{
+ struct encoder_config_info *curr_enc_info =
+ vpbe_current_encoder_info(vpbe_dev);
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int enc_out_index;
+ int sd_index;
+ int ret = 0;
+
+ if (index >= cfg->num_outputs)
+ return -EINVAL;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ sd_index = vpbe_dev->current_sd_index;
+ enc_out_index = cfg->outputs[index].output.index;
+ /*
+ * Currently we switch the encoder based on output selected
+ * by the application. If media controller is implemented later
+ * there is will be an API added to setup_link between venc
+ * and external encoder. So in that case below comparison always
+ * match and encoder will not be switched. But if application
+ * chose not to use media controller, then this provides current
+ * way of switching encoder at the venc output.
+ */
+ if (strcmp(curr_enc_info->module_name,
+ cfg->outputs[index].subdev_name)) {
+ /* Need to switch the encoder at the output */
+ sd_index = vpbe_find_encoder_sd_index(cfg, index);
+ if (sd_index < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ret)
+ goto out;
+ }
+
+ /* Set output at the encoder */
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_routing, 0, enc_out_index, 0);
+ if (ret)
+ goto out;
+
+ /*
+ * It is assumed that venc or extenal encoder will set a default
+ * mode in the sub device. For external encoder or LCD pannel output,
+ * we also need to set up the lcd port for the required mode. So setup
+ * the lcd port for the default mode that is configured in the board
+ * arch/arm/mach-davinci/board-dm355-evm.setup file for the external
+ * encoder.
+ */
+ ret = vpbe_get_mode_info(vpbe_dev,
+ cfg->outputs[index].default_mode);
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ vpbe_dev->current_sd_index = sd_index;
+ vpbe_dev->current_out_index = index;
+ }
+out:
+ mutex_unlock(&vpbe_dev->lock);
+ return ret;
+}
+
+static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < cfg->num_outputs; i++) {
+ if (!strcmp(def_output,
+ cfg->outputs[i].output.name)) {
+ ret = vpbe_set_output(vpbe_dev, i);
+ if (!ret)
+ vpbe_dev->current_out_index = i;
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/**
+ * vpbe_get_output - Get output
+ * @vpbe_dev - vpbe device ptr
+ *
+ * return current vpbe output to the the index
+ */
+static unsigned int vpbe_get_output(struct vpbe_device *vpbe_dev)
+{
+ return vpbe_dev->current_out_index;
+}
+
+/**
+ * vpbe_s_dv_preset - Set the given preset timings in the encoder
+ *
+ * Sets the preset if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_s_dv_preset(struct vpbe_device *vpbe_dev,
+ struct v4l2_dv_preset *dv_preset)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ int sd_index = vpbe_dev->current_sd_index;
+ int ret;
+
+
+ if (!(cfg->outputs[out_index].output.capabilities &
+ V4L2_OUT_CAP_PRESETS))
+ return -EINVAL;
+
+ ret = vpbe_get_dv_preset_info(vpbe_dev, dv_preset->preset);
+
+ if (ret)
+ return ret;
+
+ mutex_lock(&vpbe_dev->lock);
+
+
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_dv_preset, dv_preset);
+ /* set the lcd controller output for the given mode */
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ }
+ mutex_unlock(&vpbe_dev->lock);
+
+ return ret;
+}
+
+/**
+ * vpbe_g_dv_preset - Get the preset in the current encoder
+ *
+ * Get the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_g_dv_preset(struct vpbe_device *vpbe_dev,
+ struct v4l2_dv_preset *dv_preset)
+{
+ if (vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_DV_PRESET) {
+ dv_preset->preset = vpbe_dev->current_timings.timings.dv_preset;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_enum_dv_presets - Enumerate the dv presets in the current encoder
+ *
+ * Get the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_enum_dv_presets(struct vpbe_device *vpbe_dev,
+ struct v4l2_dv_enum_preset *preset_info)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ struct vpbe_output *output = &cfg->outputs[out_index];
+ int j = 0;
+ int i;
+
+ if (!(output->output.capabilities & V4L2_OUT_CAP_PRESETS))
+ return -EINVAL;
+
+ for (i = 0; i < output->num_modes; i++) {
+ if (output->modes[i].timings_type == VPBE_ENC_DV_PRESET) {
+ if (j == preset_info->index)
+ break;
+ j++;
+ }
+ }
+
+ if (i == output->num_modes)
+ return -EINVAL;
+
+ return v4l_fill_dv_preset_info(output->modes[i].timings.dv_preset,
+ preset_info);
+}
+
+/**
+ * vpbe_s_std - Set the given standard in the encoder
+ *
+ * Sets the standard if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
+{
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ int out_index = vpbe_dev->current_out_index;
+ int sd_index = vpbe_dev->current_sd_index;
+ int ret;
+
+ if (!(cfg->outputs[out_index].output.capabilities &
+ V4L2_OUT_CAP_STD))
+ return -EINVAL;
+
+ ret = vpbe_get_std_info(vpbe_dev, *std_id);
+ if (ret)
+ return ret;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
+ s_std_output, *std_id);
+ /* set the lcd controller output for the given mode */
+ if (!ret) {
+ struct osd_state *osd_device = vpbe_dev->osd_device;
+
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+ }
+ mutex_unlock(&vpbe_dev->lock);
+
+ return ret;
+}
+
+/**
+ * vpbe_g_std - Get the standard in the current encoder
+ *
+ * Get the standard in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
+{
+ struct vpbe_enc_mode_info cur_timings = vpbe_dev->current_timings;
+
+ if (cur_timings.timings_type & VPBE_ENC_STD) {
+ *std_id = cur_timings.timings.std_id;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_set_mode - Set mode in the current encoder using mode info
+ *
+ * Use the mode string to decide what timings to set in the encoder
+ * This is typically useful when fbset command is used to change the current
+ * timings by specifying a string to indicate the timings.
+ */
+static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
+ struct vpbe_enc_mode_info *mode_info)
+{
+ struct vpbe_enc_mode_info *preset_mode = NULL;
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+ struct v4l2_dv_preset dv_preset;
+ struct osd_state *osd_device;
+ int out_index = vpbe_dev->current_out_index;
+ int ret = 0;
+ int i;
+
+ if ((NULL == mode_info) || (NULL == mode_info->name))
+ return -EINVAL;
+
+ for (i = 0; i < cfg->outputs[out_index].num_modes; i++) {
+ if (!strcmp(mode_info->name,
+ cfg->outputs[out_index].modes[i].name)) {
+ preset_mode = &cfg->outputs[out_index].modes[i];
+ /*
+ * it may be one of the 3 timings type. Check and
+ * invoke right API
+ */
+ if (preset_mode->timings_type & VPBE_ENC_STD)
+ return vpbe_s_std(vpbe_dev,
+ &preset_mode->timings.std_id);
+ if (preset_mode->timings_type & VPBE_ENC_DV_PRESET) {
+ dv_preset.preset =
+ preset_mode->timings.dv_preset;
+ return vpbe_s_dv_preset(vpbe_dev, &dv_preset);
+ }
+ }
+ }
+
+ /* Only custom timing should reach here */
+ if (preset_mode == NULL)
+ return -EINVAL;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ osd_device = vpbe_dev->osd_device;
+ vpbe_dev->current_timings = *preset_mode;
+ osd_device->ops.set_left_margin(osd_device,
+ vpbe_dev->current_timings.left_margin);
+ osd_device->ops.set_top_margin(osd_device,
+ vpbe_dev->current_timings.upper_margin);
+
+ mutex_unlock(&vpbe_dev->lock);
+
+ return ret;
+}
+
+static int vpbe_set_default_mode(struct vpbe_device *vpbe_dev)
+{
+ int ret;
+
+ ret = vpbe_get_std_info_by_name(vpbe_dev, def_mode);
+ if (ret)
+ return ret;
+
+ /* set the default mode in the encoder */
+ return vpbe_set_mode(vpbe_dev, &vpbe_dev->current_timings);
+}
+
+static int platform_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpbe_device *vpbe_dev = data;
+
+ if (strcmp("vpbe-osd", pdev->name) == 0)
+ vpbe_dev->osd_device = platform_get_drvdata(pdev);
+
+ return 0;
+}
+
+/**
+ * vpbe_initialize() - Initialize the vpbe display controller
+ * @vpbe_dev - vpbe device ptr
+ *
+ * Master frame buffer device drivers calls this to initialize vpbe
+ * display controller. This will then registers v4l2 device and the sub
+ * devices and sets a current encoder sub device for display. v4l2 display
+ * device driver is the master and frame buffer display device driver is
+ * the slave. Frame buffer display driver checks the initialized during
+ * probe and exit if not initialized. Returns status.
+ */
+static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
+{
+ struct encoder_config_info *enc_info;
+ struct v4l2_subdev **enc_subdev;
+ struct osd_state *osd_device;
+ struct i2c_adapter *i2c_adap;
+ int output_index;
+ int num_encoders;
+ int ret = 0;
+ int err;
+ int i;
+
+ /*
+ * v4l2 abd FBDev frame buffer devices will get the vpbe_dev pointer
+ * from the platform device by iteration of platform drivers and
+ * matching with device name
+ */
+ if (NULL == vpbe_dev || NULL == dev) {
+ printk(KERN_ERR "Null device pointers.\n");
+ return -ENODEV;
+ }
+
+ if (vpbe_dev->initialized)
+ return 0;
+
+ mutex_lock(&vpbe_dev->lock);
+
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
+ /* We have dac clock available for platform */
+ vpbe_dev->dac_clk = clk_get(vpbe_dev->pdev, "vpss_dac");
+ if (IS_ERR(vpbe_dev->dac_clk)) {
+ ret = PTR_ERR(vpbe_dev->dac_clk);
+ goto vpbe_unlock;
+ }
+ if (clk_enable(vpbe_dev->dac_clk)) {
+ ret = -ENODEV;
+ goto vpbe_unlock;
+ }
+ }
+
+ /* first enable vpss clocks */
+ vpss_enable_clock(VPSS_VPBE_CLOCK, 1);
+
+ /* First register a v4l2 device */
+ ret = v4l2_device_register(dev, &vpbe_dev->v4l2_dev);
+ if (ret) {
+ v4l2_err(dev->driver,
+ "Unable to register v4l2 device.\n");
+ goto vpbe_fail_clock;
+ }
+ v4l2_info(&vpbe_dev->v4l2_dev, "vpbe v4l2 device registered\n");
+
+ err = bus_for_each_dev(&platform_bus_type, NULL, vpbe_dev,
+ platform_device_get);
+ if (err < 0)
+ return err;
+
+ vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
+ vpbe_dev->cfg->venc.module_name);
+ /* register venc sub device */
+ if (vpbe_dev->venc == NULL) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "vpbe unable to init venc sub device\n");
+ ret = -ENODEV;
+ goto vpbe_fail_v4l2_device;
+ }
+ /* initialize osd device */
+ osd_device = vpbe_dev->osd_device;
+
+ if (NULL != osd_device->ops.initialize) {
+ err = osd_device->ops.initialize(osd_device);
+ if (err) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "unable to initialize the OSD device");
+ err = -ENOMEM;
+ goto vpbe_fail_v4l2_device;
+ }
+ }
+
+ /*
+ * Register any external encoders that are configured. At index 0 we
+ * store venc sd index.
+ */
+ num_encoders = vpbe_dev->cfg->num_ext_encoders + 1;
+ vpbe_dev->encoders = kmalloc(
+ sizeof(struct v4l2_subdev *)*num_encoders,
+ GFP_KERNEL);
+ if (NULL == vpbe_dev->encoders) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "unable to allocate memory for encoders sub devices");
+ ret = -ENOMEM;
+ goto vpbe_fail_v4l2_device;
+ }
+
+ i2c_adap = i2c_get_adapter(vpbe_dev->cfg->i2c_adapter_id);
+ for (i = 0; i < (vpbe_dev->cfg->num_ext_encoders + 1); i++) {
+ if (i == 0) {
+ /* venc is at index 0 */
+ enc_subdev = &vpbe_dev->encoders[i];
+ *enc_subdev = vpbe_dev->venc;
+ continue;
+ }
+ enc_info = &vpbe_dev->cfg->ext_encoders[i];
+ if (enc_info->is_i2c) {
+ enc_subdev = &vpbe_dev->encoders[i];
+ *enc_subdev = v4l2_i2c_new_subdev_board(
+ &vpbe_dev->v4l2_dev, i2c_adap,
+ &enc_info->board_info, NULL);
+ if (*enc_subdev)
+ v4l2_info(&vpbe_dev->v4l2_dev,
+ "v4l2 sub device %s registered\n",
+ enc_info->module_name);
+ else {
+ v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s"
+ " failed to register",
+ enc_info->module_name);
+ ret = -ENODEV;
+ goto vpbe_fail_sd_register;
+ }
+ } else
+ v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders"
+ " currently not supported");
+ }
+
+ /* set the current encoder and output to that of venc by default */
+ vpbe_dev->current_sd_index = 0;
+ vpbe_dev->current_out_index = 0;
+ output_index = 0;
+
+ mutex_unlock(&vpbe_dev->lock);
+
+ printk(KERN_NOTICE "Setting default output to %s\n", def_output);
+ ret = vpbe_set_default_output(vpbe_dev);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
+ def_output);
+ return ret;
+ }
+
+ printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
+ ret = vpbe_set_default_mode(vpbe_dev);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
+ def_mode);
+ return ret;
+ }
+ vpbe_dev->initialized = 1;
+ /* TBD handling of bootargs for default output and mode */
+ return 0;
+
+vpbe_fail_sd_register:
+ kfree(vpbe_dev->encoders);
+vpbe_fail_v4l2_device:
+ v4l2_device_unregister(&vpbe_dev->v4l2_dev);
+vpbe_fail_clock:
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+ clk_put(vpbe_dev->dac_clk);
+vpbe_unlock:
+ mutex_unlock(&vpbe_dev->lock);
+ return ret;
+}
+
+/**
+ * vpbe_deinitialize() - de-initialize the vpbe display controller
+ * @dev - Master and slave device ptr
+ *
+ * vpbe_master and slave frame buffer devices calls this to de-initialize
+ * the display controller. It is called when master and slave device
+ * driver modules are removed and no longer requires the display controller.
+ */
+static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
+{
+ v4l2_device_unregister(&vpbe_dev->v4l2_dev);
+ if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0)
+ clk_put(vpbe_dev->dac_clk);
+
+ kfree(vpbe_dev->encoders);
+ vpbe_dev->initialized = 0;
+ /* disable vpss clocks */
+ vpss_enable_clock(VPSS_VPBE_CLOCK, 0);
+}
+
+static struct vpbe_device_ops vpbe_dev_ops = {
+ .g_cropcap = vpbe_g_cropcap,
+ .enum_outputs = vpbe_enum_outputs,
+ .set_output = vpbe_set_output,
+ .get_output = vpbe_get_output,
+ .s_dv_preset = vpbe_s_dv_preset,
+ .g_dv_preset = vpbe_g_dv_preset,
+ .enum_dv_presets = vpbe_enum_dv_presets,
+ .s_std = vpbe_s_std,
+ .g_std = vpbe_g_std,
+ .initialize = vpbe_initialize,
+ .deinitialize = vpbe_deinitialize,
+ .get_mode_info = vpbe_get_current_mode_info,
+ .set_mode = vpbe_set_mode,
+};
+
+static __devinit int vpbe_probe(struct platform_device *pdev)
+{
+ struct vpbe_device *vpbe_dev;
+ struct vpbe_config *cfg;
+ int ret = -EINVAL;
+
+ if (pdev->dev.platform_data == NULL) {
+ v4l2_err(pdev->dev.driver, "No platform data\n");
+ return -ENODEV;
+ }
+ cfg = pdev->dev.platform_data;
+
+ if (!cfg->module_name[0] ||
+ !cfg->osd.module_name[0] ||
+ !cfg->venc.module_name[0]) {
+ v4l2_err(pdev->dev.driver, "vpbe display module names not"
+ " defined\n");
+ return ret;
+ }
+
+ vpbe_dev = kzalloc(sizeof(*vpbe_dev), GFP_KERNEL);
+ if (vpbe_dev == NULL) {
+ v4l2_err(pdev->dev.driver, "Unable to allocate memory"
+ " for vpbe_device\n");
+ return -ENOMEM;
+ }
+ vpbe_dev->cfg = cfg;
+ vpbe_dev->ops = vpbe_dev_ops;
+ vpbe_dev->pdev = &pdev->dev;
+
+ if (cfg->outputs->num_modes > 0)
+ vpbe_dev->current_timings = vpbe_dev->cfg->outputs[0].modes[0];
+ else
+ return -ENODEV;
+
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, vpbe_dev);
+ mutex_init(&vpbe_dev->lock);
+
+ return 0;
+}
+
+static int vpbe_remove(struct platform_device *device)
+{
+ struct vpbe_device *vpbe_dev = platform_get_drvdata(device);
+
+ kfree(vpbe_dev);
+
+ return 0;
+}
+
+static struct platform_driver vpbe_driver = {
+ .driver = {
+ .name = "vpbe_controller",
+ .owner = THIS_MODULE,
+ },
+ .probe = vpbe_probe,
+ .remove = vpbe_remove,
+};
+
+/**
+ * vpbe_init: initialize the vpbe driver
+ *
+ * This function registers device and driver to the kernel
+ */
+static __init int vpbe_init(void)
+{
+ return platform_driver_register(&vpbe_driver);
+}
+
+/**
+ * vpbe_cleanup : cleanup function for vpbe driver
+ *
+ * This will un-registers the device and driver to the kernel
+ */
+static void vpbe_cleanup(void)
+{
+ platform_driver_unregister(&vpbe_driver);
+}
+
+/* Function for module initialization and cleanup */
+module_init(vpbe_init);
+module_exit(vpbe_cleanup);
diff --git a/drivers/media/video/davinci/vpbe_display.c b/drivers/media/video/davinci/vpbe_display.c
new file mode 100644
index 00000000000..7f1d83a6d57
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_display.c
@@ -0,0 +1,1860 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include <asm/pgtable.h>
+#include <mach/cputype.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_display.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe.h>
+#include <media/davinci/vpbe_venc.h>
+#include <media/davinci/vpbe_osd.h>
+#include "vpbe_venc_regs.h"
+
+#define VPBE_DISPLAY_DRIVER "vpbe-v4l2"
+
+static int debug;
+
+#define VPBE_DISPLAY_SD_BUF_SIZE (720*576*2)
+#define VPBE_DEFAULT_NUM_BUFS 3
+
+module_param(debug, int, 0644);
+
+static int venc_is_second_field(struct vpbe_display *disp_dev)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int ret;
+ int val;
+
+ ret = v4l2_subdev_call(vpbe_dev->venc,
+ core,
+ ioctl,
+ VENC_GET_FLD,
+ &val);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in getting Field ID 0\n");
+ }
+ return val;
+}
+
+static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
+ struct vpbe_layer *layer)
+{
+ struct timespec timevalue;
+
+ if (layer->cur_frm == layer->next_frm)
+ return;
+ ktime_get_ts(&timevalue);
+ layer->cur_frm->ts.tv_sec = timevalue.tv_sec;
+ layer->cur_frm->ts.tv_usec = timevalue.tv_nsec / NSEC_PER_USEC;
+ layer->cur_frm->state = VIDEOBUF_DONE;
+ wake_up_interruptible(&layer->cur_frm->done);
+ /* Make cur_frm pointing to next_frm */
+ layer->cur_frm = layer->next_frm;
+}
+
+static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
+ struct vpbe_layer *layer)
+{
+ struct osd_state *osd_device = disp_obj->osd_device;
+ unsigned long addr;
+
+ spin_lock(&disp_obj->dma_queue_lock);
+ if (list_empty(&layer->dma_queue) ||
+ (layer->cur_frm != layer->next_frm)) {
+ spin_unlock(&disp_obj->dma_queue_lock);
+ return;
+ }
+ /*
+ * one field is displayed configure
+ * the next frame if it is available
+ * otherwise hold on current frame
+ * Get next from the buffer queue
+ */
+ layer->next_frm = list_entry(
+ layer->dma_queue.next,
+ struct videobuf_buffer,
+ queue);
+ /* Remove that from the buffer queue */
+ list_del(&layer->next_frm->queue);
+ spin_unlock(&disp_obj->dma_queue_lock);
+ /* Mark state of the frame to active */
+ layer->next_frm->state = VIDEOBUF_ACTIVE;
+ addr = videobuf_to_dma_contig(layer->next_frm);
+ osd_device->ops.start_layer(osd_device,
+ layer->layer_info.id,
+ addr,
+ disp_obj->cbcr_ofst);
+}
+
+/* interrupt service routine */
+static irqreturn_t venc_isr(int irq, void *arg)
+{
+ struct vpbe_display *disp_dev = (struct vpbe_display *)arg;
+ struct vpbe_layer *layer;
+ static unsigned last_event;
+ unsigned event = 0;
+ int fid;
+ int i;
+
+ if ((NULL == arg) || (NULL == disp_dev->dev[0]))
+ return IRQ_HANDLED;
+
+ if (venc_is_second_field(disp_dev))
+ event |= VENC_SECOND_FIELD;
+ else
+ event |= VENC_FIRST_FIELD;
+
+ if (event == (last_event & ~VENC_END_OF_FRAME)) {
+ /*
+ * If the display is non-interlaced, then we need to flag the
+ * end-of-frame event at every interrupt regardless of the
+ * value of the FIDST bit. We can conclude that the display is
+ * non-interlaced if the value of the FIDST bit is unchanged
+ * from the previous interrupt.
+ */
+ event |= VENC_END_OF_FRAME;
+ } else if (event == VENC_SECOND_FIELD) {
+ /* end-of-frame for interlaced display */
+ event |= VENC_END_OF_FRAME;
+ }
+ last_event = event;
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ layer = disp_dev->dev[i];
+ /* If streaming is started in this layer */
+ if (!layer->started)
+ continue;
+
+ if (layer->layer_first_int) {
+ layer->layer_first_int = 0;
+ continue;
+ }
+ /* Check the field format */
+ if ((V4L2_FIELD_NONE == layer->pix_fmt.field) &&
+ (event & VENC_END_OF_FRAME)) {
+ /* Progressive mode */
+
+ vpbe_isr_even_field(disp_dev, layer);
+ vpbe_isr_odd_field(disp_dev, layer);
+ } else {
+ /* Interlaced mode */
+
+ layer->field_id ^= 1;
+ if (event & VENC_FIRST_FIELD)
+ fid = 0;
+ else
+ fid = 1;
+
+ /*
+ * If field id does not match with store
+ * field id
+ */
+ if (fid != layer->field_id) {
+ /* Make them in sync */
+ layer->field_id = fid;
+ continue;
+ }
+ /*
+ * device field id and local field id are
+ * in sync. If this is even field
+ */
+ if (0 == fid)
+ vpbe_isr_even_field(disp_dev, layer);
+ else /* odd field */
+ vpbe_isr_odd_field(disp_dev, layer);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * vpbe_buffer_prepare()
+ * This is the callback function called from videobuf_qbuf() function
+ * the buffer is prepared and user space virtual address is converted into
+ * physical address
+ */
+static int vpbe_buffer_prepare(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned long addr;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe_buffer_prepare\n");
+
+ /* If buffer is not initialized, initialize it */
+ if (VIDEOBUF_NEEDS_INIT == vb->state) {
+ vb->width = layer->pix_fmt.width;
+ vb->height = layer->pix_fmt.height;
+ vb->size = layer->pix_fmt.sizeimage;
+ vb->field = field;
+
+ ret = videobuf_iolock(q, vb, NULL);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Failed to map \
+ user address\n");
+ return -EINVAL;
+ }
+
+ addr = videobuf_to_dma_contig(vb);
+
+ if (q->streaming) {
+ if (!IS_ALIGNED(addr, 8)) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "buffer_prepare:offset is \
+ not aligned to 32 bytes\n");
+ return -EINVAL;
+ }
+ }
+ vb->state = VIDEOBUF_PREPARED;
+ }
+ return 0;
+}
+
+/*
+ * vpbe_buffer_setup()
+ * This function allocates memory for the buffers
+ */
+static int vpbe_buffer_setup(struct videobuf_queue *q,
+ unsigned int *count,
+ unsigned int *size)
+{
+ /* Get the file handle object and layer object */
+ struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
+
+ *size = layer->pix_fmt.sizeimage;
+
+ /* Store number of buffers allocated in numbuffer member */
+ if (*count < VPBE_DEFAULT_NUM_BUFS)
+ *count = layer->numbuffers = VPBE_DEFAULT_NUM_BUFS;
+
+ return 0;
+}
+
+/*
+ * vpbe_buffer_queue()
+ * This function adds the buffer to DMA queue
+ */
+static void vpbe_buffer_queue(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ /* Get the file handle object and layer object */
+ struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned long flags;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe_buffer_queue\n");
+
+ /* add the buffer to the DMA queue */
+ spin_lock_irqsave(&disp->dma_queue_lock, flags);
+ list_add_tail(&vb->queue, &layer->dma_queue);
+ spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
+ /* Change state of the buffer */
+ vb->state = VIDEOBUF_QUEUED;
+}
+
+/*
+ * vpbe_buffer_release()
+ * This function is called from the videobuf layer to free memory allocated to
+ * the buffers
+ */
+static void vpbe_buffer_release(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
+{
+ /* Get the file handle object and layer object */
+ struct vpbe_fh *fh = q->priv_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe_buffer_release\n");
+
+ if (V4L2_MEMORY_USERPTR != layer->memory)
+ videobuf_dma_contig_free(q, vb);
+
+ vb->state = VIDEOBUF_NEEDS_INIT;
+}
+
+static struct videobuf_queue_ops video_qops = {
+ .buf_setup = vpbe_buffer_setup,
+ .buf_prepare = vpbe_buffer_prepare,
+ .buf_queue = vpbe_buffer_queue,
+ .buf_release = vpbe_buffer_release,
+};
+
+static
+struct vpbe_layer*
+_vpbe_display_get_other_win_layer(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer)
+{
+ enum vpbe_display_device_id thiswin, otherwin;
+ thiswin = layer->device_id;
+
+ otherwin = (thiswin == VPBE_DISPLAY_DEVICE_0) ?
+ VPBE_DISPLAY_DEVICE_1 : VPBE_DISPLAY_DEVICE_0;
+ return disp_dev->dev[otherwin];
+}
+
+static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer)
+{
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ unsigned long addr;
+ int ret;
+
+ addr = videobuf_to_dma_contig(layer->cur_frm);
+ /* Set address in the display registers */
+ osd_device->ops.start_layer(osd_device,
+ layer->layer_info.id,
+ addr,
+ disp_dev->cbcr_ofst);
+
+ ret = osd_device->ops.enable_layer(osd_device,
+ layer->layer_info.id, 0);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in enabling osd window layer 0\n");
+ return -1;
+ }
+
+ /* Enable the window */
+ layer->layer_info.enable = 1;
+ if (cfg->pixfmt == PIXFMT_NV12) {
+ struct vpbe_layer *otherlayer =
+ _vpbe_display_get_other_win_layer(disp_dev, layer);
+
+ ret = osd_device->ops.enable_layer(osd_device,
+ otherlayer->layer_info.id, 1);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in enabling osd window layer 1\n");
+ return -1;
+ }
+ otherlayer->layer_info.enable = 1;
+ }
+ return 0;
+}
+
+static void
+vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer,
+ int expected_xsize, int expected_ysize)
+{
+ struct display_layer_info *layer_info = &layer->layer_info;
+ struct v4l2_pix_format *pixfmt = &layer->pix_fmt;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int calculated_xsize;
+ int h_exp = 0;
+ int v_exp = 0;
+ int h_scale;
+ int v_scale;
+
+ v4l2_std_id standard_id = vpbe_dev->current_timings.timings.std_id;
+
+ /*
+ * Application initially set the image format. Current display
+ * size is obtained from the vpbe display controller. expected_xsize
+ * and expected_ysize are set through S_CROP ioctl. Based on this,
+ * driver will calculate the scale factors for vertical and
+ * horizontal direction so that the image is displayed scaled
+ * and expanded. Application uses expansion to display the image
+ * in a square pixel. Otherwise it is displayed using displays
+ * pixel aspect ratio.It is expected that application chooses
+ * the crop coordinates for cropped or scaled display. if crop
+ * size is less than the image size, it is displayed cropped or
+ * it is displayed scaled and/or expanded.
+ *
+ * to begin with, set the crop window same as expected. Later we
+ * will override with scaled window size
+ */
+
+ cfg->xsize = pixfmt->width;
+ cfg->ysize = pixfmt->height;
+ layer_info->h_zoom = ZOOM_X1; /* no horizontal zoom */
+ layer_info->v_zoom = ZOOM_X1; /* no horizontal zoom */
+ layer_info->h_exp = H_EXP_OFF; /* no horizontal zoom */
+ layer_info->v_exp = V_EXP_OFF; /* no horizontal zoom */
+
+ if (pixfmt->width < expected_xsize) {
+ h_scale = vpbe_dev->current_timings.xres / pixfmt->width;
+ if (h_scale < 2)
+ h_scale = 1;
+ else if (h_scale >= 4)
+ h_scale = 4;
+ else
+ h_scale = 2;
+ cfg->xsize *= h_scale;
+ if (cfg->xsize < expected_xsize) {
+ if ((standard_id & V4L2_STD_525_60) ||
+ (standard_id & V4L2_STD_625_50)) {
+ calculated_xsize = (cfg->xsize *
+ VPBE_DISPLAY_H_EXP_RATIO_N) /
+ VPBE_DISPLAY_H_EXP_RATIO_D;
+ if (calculated_xsize <= expected_xsize) {
+ h_exp = 1;
+ cfg->xsize = calculated_xsize;
+ }
+ }
+ }
+ if (h_scale == 2)
+ layer_info->h_zoom = ZOOM_X2;
+ else if (h_scale == 4)
+ layer_info->h_zoom = ZOOM_X4;
+ if (h_exp)
+ layer_info->h_exp = H_EXP_9_OVER_8;
+ } else {
+ /* no scaling, only cropping. Set display area to crop area */
+ cfg->xsize = expected_xsize;
+ }
+
+ if (pixfmt->height < expected_ysize) {
+ v_scale = expected_ysize / pixfmt->height;
+ if (v_scale < 2)
+ v_scale = 1;
+ else if (v_scale >= 4)
+ v_scale = 4;
+ else
+ v_scale = 2;
+ cfg->ysize *= v_scale;
+ if (cfg->ysize < expected_ysize) {
+ if ((standard_id & V4L2_STD_625_50)) {
+ calculated_xsize = (cfg->ysize *
+ VPBE_DISPLAY_V_EXP_RATIO_N) /
+ VPBE_DISPLAY_V_EXP_RATIO_D;
+ if (calculated_xsize <= expected_ysize) {
+ v_exp = 1;
+ cfg->ysize = calculated_xsize;
+ }
+ }
+ }
+ if (v_scale == 2)
+ layer_info->v_zoom = ZOOM_X2;
+ else if (v_scale == 4)
+ layer_info->v_zoom = ZOOM_X4;
+ if (v_exp)
+ layer_info->h_exp = V_EXP_6_OVER_5;
+ } else {
+ /* no scaling, only cropping. Set display area to crop area */
+ cfg->ysize = expected_ysize;
+ }
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "crop display xsize = %d, ysize = %d\n",
+ cfg->xsize, cfg->ysize);
+}
+
+static void vpbe_disp_adj_position(struct vpbe_display *disp_dev,
+ struct vpbe_layer *layer,
+ int top, int left)
+{
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+
+ cfg->xpos = min((unsigned int)left,
+ vpbe_dev->current_timings.xres - cfg->xsize);
+ cfg->ypos = min((unsigned int)top,
+ vpbe_dev->current_timings.yres - cfg->ysize);
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "new xpos = %d, ypos = %d\n",
+ cfg->xpos, cfg->ypos);
+}
+
+static void vpbe_disp_check_window_params(struct vpbe_display *disp_dev,
+ struct v4l2_rect *c)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+
+ if ((c->width == 0) ||
+ ((c->width + c->left) > vpbe_dev->current_timings.xres))
+ c->width = vpbe_dev->current_timings.xres - c->left;
+
+ if ((c->height == 0) || ((c->height + c->top) >
+ vpbe_dev->current_timings.yres))
+ c->height = vpbe_dev->current_timings.yres - c->top;
+
+ /* window height must be even for interlaced display */
+ if (vpbe_dev->current_timings.interlaced)
+ c->height &= (~0x01);
+
+}
+
+/**
+ * vpbe_try_format()
+ * If user application provides width and height, and have bytesperline set
+ * to zero, driver calculates bytesperline and sizeimage based on hardware
+ * limits.
+ */
+static int vpbe_try_format(struct vpbe_display *disp_dev,
+ struct v4l2_pix_format *pixfmt, int check)
+{
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ int min_height = 1;
+ int min_width = 32;
+ int max_height;
+ int max_width;
+ int bpp;
+
+ if ((pixfmt->pixelformat != V4L2_PIX_FMT_UYVY) &&
+ (pixfmt->pixelformat != V4L2_PIX_FMT_NV12))
+ /* choose default as V4L2_PIX_FMT_UYVY */
+ pixfmt->pixelformat = V4L2_PIX_FMT_UYVY;
+
+ /* Check the field format */
+ if ((pixfmt->field != V4L2_FIELD_INTERLACED) &&
+ (pixfmt->field != V4L2_FIELD_NONE)) {
+ if (vpbe_dev->current_timings.interlaced)
+ pixfmt->field = V4L2_FIELD_INTERLACED;
+ else
+ pixfmt->field = V4L2_FIELD_NONE;
+ }
+
+ if (pixfmt->field == V4L2_FIELD_INTERLACED)
+ min_height = 2;
+
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+ bpp = 1;
+ else
+ bpp = 2;
+
+ max_width = vpbe_dev->current_timings.xres;
+ max_height = vpbe_dev->current_timings.yres;
+
+ min_width /= bpp;
+
+ if (!pixfmt->width || (pixfmt->width < min_width) ||
+ (pixfmt->width > max_width)) {
+ pixfmt->width = vpbe_dev->current_timings.xres;
+ }
+
+ if (!pixfmt->height || (pixfmt->height < min_height) ||
+ (pixfmt->height > max_height)) {
+ pixfmt->height = vpbe_dev->current_timings.yres;
+ }
+
+ if (pixfmt->bytesperline < (pixfmt->width * bpp))
+ pixfmt->bytesperline = pixfmt->width * bpp;
+
+ /* Make the bytesperline 32 byte aligned */
+ pixfmt->bytesperline = ((pixfmt->width * bpp + 31) & ~31);
+
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height +
+ (pixfmt->bytesperline * pixfmt->height >> 1);
+ else
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ return 0;
+}
+
+static int vpbe_display_g_priority(struct file *file, void *priv,
+ enum v4l2_priority *p)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+
+ *p = v4l2_prio_max(&layer->prio);
+
+ return 0;
+}
+
+static int vpbe_display_s_priority(struct file *file, void *priv,
+ enum v4l2_priority p)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ int ret;
+
+ ret = v4l2_prio_change(&layer->prio, &fh->prio, p);
+
+ return ret;
+}
+
+static int vpbe_display_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ cap->version = VPBE_DISPLAY_VERSION_CODE;
+ cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ strlcpy(cap->driver, VPBE_DISPLAY_DRIVER, sizeof(cap->driver));
+ strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
+ strlcpy(cap->card, vpbe_dev->cfg->module_name, sizeof(cap->card));
+
+ return 0;
+}
+
+static int vpbe_display_s_crop(struct file *file, void *priv,
+ struct v4l2_crop *crop)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ struct v4l2_rect *rect = &crop->c;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_S_CROP, layer id = %d\n", layer->device_id);
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+ return -EINVAL;
+ }
+
+ if (rect->top < 0)
+ rect->top = 0;
+ if (rect->left < 0)
+ rect->left = 0;
+
+ vpbe_disp_check_window_params(disp_dev, rect);
+
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+
+ vpbe_disp_calculate_scale_factor(disp_dev, layer,
+ rect->width,
+ rect->height);
+ vpbe_disp_adj_position(disp_dev, layer, rect->top,
+ rect->left);
+ ret = osd_device->ops.set_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in set layer config:\n");
+ return -EINVAL;
+ }
+
+ /* apply zooming and h or v expansion */
+ osd_device->ops.set_zoom(osd_device,
+ layer->layer_info.id,
+ layer->layer_info.h_zoom,
+ layer->layer_info.v_zoom);
+ ret = osd_device->ops.set_vid_expansion(osd_device,
+ layer->layer_info.h_exp,
+ layer->layer_info.v_exp);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in set vid expansion:\n");
+ return -EINVAL;
+ }
+
+ if ((layer->layer_info.h_zoom != ZOOM_X1) ||
+ (layer->layer_info.v_zoom != ZOOM_X1) ||
+ (layer->layer_info.h_exp != H_EXP_OFF) ||
+ (layer->layer_info.v_exp != V_EXP_OFF))
+ /* Enable expansion filter */
+ osd_device->ops.set_interpolation_filter(osd_device, 1);
+ else
+ osd_device->ops.set_interpolation_filter(osd_device, 0);
+
+ return 0;
+}
+
+static int vpbe_display_g_crop(struct file *file, void *priv,
+ struct v4l2_crop *crop)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct osd_state *osd_device = fh->disp_dev->osd_device;
+ struct v4l2_rect *rect = &crop->c;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_G_CROP, layer id = %d\n",
+ layer->device_id);
+
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buf type\n");
+ ret = -EINVAL;
+ }
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ rect->top = cfg->ypos;
+ rect->left = cfg->xpos;
+ rect->width = cfg->xsize;
+ rect->height = cfg->ysize;
+
+ return 0;
+}
+
+static int vpbe_display_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *cropcap)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
+
+ cropcap->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.top = 0;
+ cropcap->bounds.width = vpbe_dev->current_timings.xres;
+ cropcap->bounds.height = vpbe_dev->current_timings.yres;
+ cropcap->pixelaspect = vpbe_dev->current_timings.aspect;
+ cropcap->defrect = cropcap->bounds;
+ return 0;
+}
+
+static int vpbe_display_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_G_FMT, layer id = %d\n",
+ layer->device_id);
+
+ /* If buffer type is video output */
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+ /* Fill in the information about format */
+ fmt->fmt.pix = layer->pix_fmt;
+
+ return 0;
+}
+
+static int vpbe_display_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned int index = 0;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_ENUM_FMT, layer id = %d\n",
+ layer->device_id);
+ if (fmt->index > 1) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid format index\n");
+ return -EINVAL;
+ }
+
+ /* Fill in the information about format */
+ index = fmt->index;
+ memset(fmt, 0, sizeof(*fmt));
+ fmt->index = index;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ if (index == 0) {
+ strcpy(fmt->description, "YUV 4:2:2 - UYVY");
+ fmt->pixelformat = V4L2_PIX_FMT_UYVY;
+ } else {
+ strcpy(fmt->description, "Y/CbCr 4:2:0");
+ fmt->pixelformat = V4L2_PIX_FMT_NV12;
+ }
+
+ return 0;
+}
+
+static int vpbe_display_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_S_FMT, layer id = %d\n",
+ layer->device_id);
+
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+ /* Check for valid pixel format */
+ ret = vpbe_try_format(disp_dev, pixfmt, 1);
+ if (ret)
+ return ret;
+
+ /* YUV420 is requested, check availability of the
+ other video window */
+
+ layer->pix_fmt = *pixfmt;
+
+ /* Get osd layer config */
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ /* Store the pixel format in the layer object */
+ cfg->xsize = pixfmt->width;
+ cfg->ysize = pixfmt->height;
+ cfg->line_length = pixfmt->bytesperline;
+ cfg->ypos = 0;
+ cfg->xpos = 0;
+ cfg->interlaced = vpbe_dev->current_timings.interlaced;
+
+ if (V4L2_PIX_FMT_UYVY == pixfmt->pixelformat)
+ cfg->pixfmt = PIXFMT_YCbCrI;
+
+ /* Change of the default pixel format for both video windows */
+ if (V4L2_PIX_FMT_NV12 == pixfmt->pixelformat) {
+ struct vpbe_layer *otherlayer;
+ cfg->pixfmt = PIXFMT_NV12;
+ otherlayer = _vpbe_display_get_other_win_layer(disp_dev,
+ layer);
+ otherlayer->layer_info.config.pixfmt = PIXFMT_NV12;
+ }
+
+ /* Set the layer config in the osd window */
+ ret = osd_device->ops.set_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+ if (ret < 0) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Error in S_FMT params:\n");
+ return -EINVAL;
+ }
+
+ /* Readback and fill the local copy of current pix format */
+ osd_device->ops.get_layer_config(osd_device,
+ layer->layer_info.id, cfg);
+
+ return 0;
+}
+
+static int vpbe_display_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_TRY_FMT\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
+ return -EINVAL;
+ }
+
+ /* Check for valid field format */
+ return vpbe_try_format(disp_dev, pixfmt, 0);
+
+}
+
+/**
+ * vpbe_display_s_std - Set the given standard in the encoder
+ *
+ * Sets the standard if supported by the current encoder. Return the status.
+ * 0 - success & -EINVAL on error
+ */
+static int vpbe_display_s_std(struct file *file, void *priv,
+ v4l2_std_id *std_id)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_STD\n");
+
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+ if (NULL != vpbe_dev->ops.s_std) {
+ ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set standard for sub devices\n");
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_g_std - Get the standard in the current encoder
+ *
+ * Get the standard in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int vpbe_display_g_std(struct file *file, void *priv,
+ v4l2_std_id *std_id)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_STD\n");
+
+ /* Get the standard from the current encoder */
+ if (vpbe_dev->current_timings.timings_type & VPBE_ENC_STD) {
+ *std_id = vpbe_dev->current_timings.timings.std_id;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * vpbe_display_enum_output - enumerate outputs
+ *
+ * Enumerates the outputs available at the vpbe display
+ * returns the status, -EINVAL if end of output list
+ */
+static int vpbe_display_enum_output(struct file *file, void *priv,
+ struct v4l2_output *output)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n");
+
+ /* Enumerate outputs */
+
+ if (NULL == vpbe_dev->ops.enum_outputs)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.enum_outputs(vpbe_dev, output);
+ if (ret) {
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "Failed to enumerate outputs\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_s_output - Set output to
+ * the output specified by the index
+ */
+static int vpbe_display_s_output(struct file *file, void *priv,
+ unsigned int i)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_OUTPUT\n");
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+ if (NULL == vpbe_dev->ops.set_output)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.set_output(vpbe_dev, i);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set output for sub devices\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_g_output - Get output from subdevice
+ * for a given by the index
+ */
+static int vpbe_display_g_output(struct file *file, void *priv,
+ unsigned int *i)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_OUTPUT\n");
+ /* Get the standard from the current encoder */
+ *i = vpbe_dev->current_out_index;
+
+ return 0;
+}
+
+/**
+ * vpbe_display_enum_dv_presets - Enumerate the dv presets
+ *
+ * enum the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_enum_dv_presets(struct file *file, void *priv,
+ struct v4l2_dv_enum_preset *preset)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_PRESETS\n");
+
+ /* Enumerate outputs */
+ if (NULL == vpbe_dev->ops.enum_dv_presets)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.enum_dv_presets(vpbe_dev, preset);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to enumerate dv presets info\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vpbe_display_s_dv_preset - Set the dv presets
+ *
+ * Set the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_s_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *preset)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_PRESETS\n");
+
+
+ /* If streaming is started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Streaming is started\n");
+ return -EBUSY;
+ }
+
+ /* Set the given standard in the encoder */
+ if (NULL != vpbe_dev->ops.s_dv_preset)
+ return -EINVAL;
+
+ ret = vpbe_dev->ops.s_dv_preset(vpbe_dev, preset);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Failed to set the dv presets info\n");
+ return -EINVAL;
+ }
+ /* set the current norm to zero to be consistent. If STD is used
+ * v4l2 layer will set the norm properly on successful s_std call
+ */
+ layer->video_dev.current_norm = 0;
+
+ return 0;
+}
+
+/**
+ * vpbe_display_g_dv_preset - Set the dv presets
+ *
+ * Get the preset in the current encoder. Return the status. 0 - success
+ * -EINVAL on error
+ */
+static int
+vpbe_display_g_dv_preset(struct file *file, void *priv,
+ struct v4l2_dv_preset *dv_preset)
+{
+ struct vpbe_fh *fh = priv;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_PRESETS\n");
+
+ /* Get the given standard in the encoder */
+
+ if (vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_DV_PRESET) {
+ dv_preset->preset =
+ vpbe_dev->current_timings.timings.dv_preset;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vpbe_display_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct osd_state *osd_device = fh->disp_dev->osd_device;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_STREAMOFF,layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If io is allowed for this file handle, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+
+ /* If streaming is not started, return error */
+ if (!layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "streaming not started in layer"
+ " id = %d\n", layer->device_id);
+ return -EINVAL;
+ }
+
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+ layer->started = 0;
+ ret = videobuf_streamoff(&layer->buffer_queue);
+
+ return ret;
+}
+
+static int vpbe_display_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type buf_type)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ int ret;
+
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_STREAMON, layerid=%d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If file handle is not allowed IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+ /* If Streaming is already started, return error */
+ if (layer->started) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "layer is already streaming\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Call videobuf_streamon to start streaming
+ * in videobuf
+ */
+ ret = videobuf_streamon(&layer->buffer_queue);
+ if (ret) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "error in videobuf_streamon\n");
+ return ret;
+ }
+ /* If buffer queue is empty, return error */
+ if (list_empty(&layer->dma_queue)) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "buffer queue is empty\n");
+ goto streamoff;
+ }
+ /* Get the next frame from the buffer queue */
+ layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
+ struct videobuf_buffer, queue);
+ /* Remove buffer from the buffer queue */
+ list_del(&layer->cur_frm->queue);
+ /* Mark state of the current frame to active */
+ layer->cur_frm->state = VIDEOBUF_ACTIVE;
+ /* Initialize field_id and started member */
+ layer->field_id = 0;
+
+ /* Set parameters in OSD and VENC */
+ ret = vpbe_set_osd_display_params(disp_dev, layer);
+ if (ret < 0)
+ goto streamoff;
+
+ /*
+ * if request format is yuv420 semiplanar, need to
+ * enable both video windows
+ */
+ layer->started = 1;
+
+ layer->layer_first_int = 1;
+
+ return ret;
+streamoff:
+ ret = videobuf_streamoff(&layer->buffer_queue);
+ return ret;
+}
+
+static int vpbe_display_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_DQBUF, layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+ /* If this file handle is not allowed to do IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+ if (file->f_flags & O_NONBLOCK)
+ /* Call videobuf_dqbuf for non blocking mode */
+ ret = videobuf_dqbuf(&layer->buffer_queue, buf, 1);
+ else
+ /* Call videobuf_dqbuf for blocking mode */
+ ret = videobuf_dqbuf(&layer->buffer_queue, buf, 0);
+
+ return ret;
+}
+
+static int vpbe_display_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_QBUF, layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If this file handle is not allowed to do IO, return error */
+ if (!fh->io_allowed) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "No io_allowed\n");
+ return -EACCES;
+ }
+
+ return videobuf_qbuf(&layer->buffer_queue, p);
+}
+
+static int vpbe_display_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "VIDIOC_QUERYBUF, layer id = %d\n",
+ layer->device_id);
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* Call videobuf_querybuf to get information */
+ ret = videobuf_querybuf(&layer->buffer_queue, buf);
+
+ return ret;
+}
+
+static int vpbe_display_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *req_buf)
+{
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_reqbufs\n");
+
+ if (V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "Invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ /* If io users of the layer is not zero, return error */
+ if (0 != layer->io_usrs) {
+ v4l2_err(&vpbe_dev->v4l2_dev, "not IO user\n");
+ return -EBUSY;
+ }
+ /* Initialize videobuf queue as per the buffer type */
+ videobuf_queue_dma_contig_init(&layer->buffer_queue,
+ &video_qops,
+ vpbe_dev->pdev,
+ &layer->irqlock,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT,
+ layer->pix_fmt.field,
+ sizeof(struct videobuf_buffer),
+ fh, NULL);
+
+ /* Set io allowed member of file handle to TRUE */
+ fh->io_allowed = 1;
+ /* Increment io usrs member of layer object to 1 */
+ layer->io_usrs = 1;
+ /* Store type of memory requested in layer object */
+ layer->memory = req_buf->memory;
+ /* Initialize buffer queue */
+ INIT_LIST_HEAD(&layer->dma_queue);
+ /* Allocate buffers */
+ ret = videobuf_reqbufs(&layer->buffer_queue, req_buf);
+
+ return ret;
+}
+
+/*
+ * vpbe_display_mmap()
+ * It is used to map kernel space buffers into user spaces
+ */
+static int vpbe_display_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ /* Get the layer object and file handle object */
+ struct vpbe_fh *fh = filep->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_mmap\n");
+
+ return videobuf_mmap_mapper(&layer->buffer_queue, vma);
+}
+
+/* vpbe_display_poll(): It is used for select/poll system call
+ */
+static unsigned int vpbe_display_poll(struct file *filep, poll_table *wait)
+{
+ struct vpbe_fh *fh = filep->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
+ unsigned int err = 0;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_poll\n");
+ if (layer->started)
+ err = videobuf_poll_stream(filep, &layer->buffer_queue, wait);
+ return err;
+}
+
+/*
+ * vpbe_display_open()
+ * It creates object of file handle structure and stores it in private_data
+ * member of filepointer
+ */
+static int vpbe_display_open(struct file *file)
+{
+ struct vpbe_fh *fh = NULL;
+ struct vpbe_layer *layer = video_drvdata(file);
+ struct vpbe_display *disp_dev = layer->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_state *osd_device = disp_dev->osd_device;
+ int err;
+
+ /* Allocate memory for the file handle object */
+ fh = kmalloc(sizeof(struct vpbe_fh), GFP_KERNEL);
+ if (fh == NULL) {
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "unable to allocate memory for file handle object\n");
+ return -ENOMEM;
+ }
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe display open plane = %d\n",
+ layer->device_id);
+
+ /* store pointer to fh in private_data member of filep */
+ file->private_data = fh;
+ fh->layer = layer;
+ fh->disp_dev = disp_dev;
+
+ if (!layer->usrs) {
+
+ /* First claim the layer for this device */
+ err = osd_device->ops.request_layer(osd_device,
+ layer->layer_info.id);
+ if (err < 0) {
+ /* Couldn't get layer */
+ v4l2_err(&vpbe_dev->v4l2_dev,
+ "Display Manager failed to allocate layer\n");
+ kfree(fh);
+ return -EINVAL;
+ }
+ }
+ /* Increment layer usrs counter */
+ layer->usrs++;
+ /* Set io_allowed member to false */
+ fh->io_allowed = 0;
+ /* Initialize priority of this instance to default priority */
+ fh->prio = V4L2_PRIORITY_UNSET;
+ v4l2_prio_open(&layer->prio, &fh->prio);
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
+ "vpbe display device opened successfully\n");
+ return 0;
+}
+
+/*
+ * vpbe_display_release()
+ * This function deletes buffer queue, frees the buffers and the davinci
+ * display file * handle
+ */
+static int vpbe_display_release(struct file *file)
+{
+ /* Get the layer object and file handle object */
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_layer *layer = fh->layer;
+ struct osd_layer_config *cfg = &layer->layer_info.config;
+ struct vpbe_display *disp_dev = fh->disp_dev;
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct osd_state *osd_device = disp_dev->osd_device;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_release\n");
+
+ /* if this instance is doing IO */
+ if (fh->io_allowed) {
+ /* Reset io_usrs member of layer object */
+ layer->io_usrs = 0;
+
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+ layer->started = 0;
+ /* Free buffers allocated */
+ videobuf_queue_cancel(&layer->buffer_queue);
+ videobuf_mmap_free(&layer->buffer_queue);
+ }
+
+ /* Decrement layer usrs counter */
+ layer->usrs--;
+ /* If this file handle has initialize encoder device, reset it */
+ if (!layer->usrs) {
+ if (cfg->pixfmt == PIXFMT_NV12) {
+ struct vpbe_layer *otherlayer;
+ otherlayer =
+ _vpbe_display_get_other_win_layer(disp_dev, layer);
+ osd_device->ops.disable_layer(osd_device,
+ otherlayer->layer_info.id);
+ osd_device->ops.release_layer(osd_device,
+ otherlayer->layer_info.id);
+ }
+ osd_device->ops.disable_layer(osd_device,
+ layer->layer_info.id);
+ osd_device->ops.release_layer(osd_device,
+ layer->layer_info.id);
+ }
+ /* Close the priority */
+ v4l2_prio_close(&layer->prio, fh->prio);
+ file->private_data = NULL;
+
+ /* Free memory allocated to file handle object */
+ kfree(fh);
+
+ disp_dev->cbcr_ofst = 0;
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int vpbe_display_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ struct v4l2_dbg_match *match = &reg->match;
+
+ if (match->type >= 2) {
+ v4l2_subdev_call(vpbe_dev->venc,
+ core,
+ g_register,
+ reg);
+ }
+
+ return 0;
+}
+
+static int vpbe_display_s_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ return 0;
+}
+#endif
+
+/* vpbe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
+ .vidioc_querycap = vpbe_display_querycap,
+ .vidioc_g_fmt_vid_out = vpbe_display_g_fmt,
+ .vidioc_enum_fmt_vid_out = vpbe_display_enum_fmt,
+ .vidioc_s_fmt_vid_out = vpbe_display_s_fmt,
+ .vidioc_try_fmt_vid_out = vpbe_display_try_fmt,
+ .vidioc_reqbufs = vpbe_display_reqbufs,
+ .vidioc_querybuf = vpbe_display_querybuf,
+ .vidioc_qbuf = vpbe_display_qbuf,
+ .vidioc_dqbuf = vpbe_display_dqbuf,
+ .vidioc_streamon = vpbe_display_streamon,
+ .vidioc_streamoff = vpbe_display_streamoff,
+ .vidioc_cropcap = vpbe_display_cropcap,
+ .vidioc_g_crop = vpbe_display_g_crop,
+ .vidioc_s_crop = vpbe_display_s_crop,
+ .vidioc_g_priority = vpbe_display_g_priority,
+ .vidioc_s_priority = vpbe_display_s_priority,
+ .vidioc_s_std = vpbe_display_s_std,
+ .vidioc_g_std = vpbe_display_g_std,
+ .vidioc_enum_output = vpbe_display_enum_output,
+ .vidioc_s_output = vpbe_display_s_output,
+ .vidioc_g_output = vpbe_display_g_output,
+ .vidioc_s_dv_preset = vpbe_display_s_dv_preset,
+ .vidioc_g_dv_preset = vpbe_display_g_dv_preset,
+ .vidioc_enum_dv_presets = vpbe_display_enum_dv_presets,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = vpbe_display_g_register,
+ .vidioc_s_register = vpbe_display_s_register,
+#endif
+};
+
+static struct v4l2_file_operations vpbe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpbe_display_open,
+ .release = vpbe_display_release,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vpbe_display_mmap,
+ .poll = vpbe_display_poll
+};
+
+static int vpbe_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpbe_display *vpbe_disp = data;
+
+ if (strcmp("vpbe_controller", pdev->name) == 0)
+ vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
+
+ if (strcmp("vpbe-osd", pdev->name) == 0)
+ vpbe_disp->osd_device = platform_get_drvdata(pdev);
+
+ return 0;
+}
+
+static __devinit int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
+ struct platform_device *pdev)
+{
+ struct vpbe_layer *vpbe_display_layer = NULL;
+ struct video_device *vbd = NULL;
+
+ /* Allocate memory for four plane display objects */
+
+ disp_dev->dev[i] =
+ kzalloc(sizeof(struct vpbe_layer), GFP_KERNEL);
+
+ /* If memory allocation fails, return error */
+ if (!disp_dev->dev[i]) {
+ printk(KERN_ERR "ran out of memory\n");
+ return -ENOMEM;
+ }
+ spin_lock_init(&disp_dev->dev[i]->irqlock);
+ mutex_init(&disp_dev->dev[i]->opslock);
+
+ /* Get the pointer to the layer object */
+ vpbe_display_layer = disp_dev->dev[i];
+ vbd = &vpbe_display_layer->video_dev;
+ /* Initialize field of video device */
+ vbd->release = video_device_release_empty;
+ vbd->fops = &vpbe_fops;
+ vbd->ioctl_ops = &vpbe_ioctl_ops;
+ vbd->minor = -1;
+ vbd->v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
+ vbd->lock = &vpbe_display_layer->opslock;
+
+ if (disp_dev->vpbe_dev->current_timings.timings_type &
+ VPBE_ENC_STD) {
+ vbd->tvnorms = (V4L2_STD_525_60 | V4L2_STD_625_50);
+ vbd->current_norm =
+ disp_dev->vpbe_dev->
+ current_timings.timings.std_id;
+ } else
+ vbd->current_norm = 0;
+
+ snprintf(vbd->name, sizeof(vbd->name),
+ "DaVinci_VPBE Display_DRIVER_V%d.%d.%d",
+ (VPBE_DISPLAY_VERSION_CODE >> 16) & 0xff,
+ (VPBE_DISPLAY_VERSION_CODE >> 8) & 0xff,
+ (VPBE_DISPLAY_VERSION_CODE) & 0xff);
+
+ vpbe_display_layer->device_id = i;
+
+ vpbe_display_layer->layer_info.id =
+ ((i == VPBE_DISPLAY_DEVICE_0) ? WIN_VID0 : WIN_VID1);
+
+ /* Initialize prio member of layer object */
+ v4l2_prio_init(&vpbe_display_layer->prio);
+
+ return 0;
+}
+
+static __devinit int register_device(struct vpbe_layer *vpbe_display_layer,
+ struct vpbe_display *disp_dev,
+ struct platform_device *pdev) {
+ int err;
+
+ v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
+ "Trying to register VPBE display device.\n");
+ v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
+ "layer=%x,layer->video_dev=%x\n",
+ (int)vpbe_display_layer,
+ (int)&vpbe_display_layer->video_dev);
+
+ err = video_register_device(&vpbe_display_layer->video_dev,
+ VFL_TYPE_GRABBER,
+ -1);
+ if (err)
+ return -ENODEV;
+
+ vpbe_display_layer->disp_dev = disp_dev;
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, disp_dev);
+ video_set_drvdata(&vpbe_display_layer->video_dev,
+ vpbe_display_layer);
+
+ return 0;
+}
+
+
+
+/*
+ * vpbe_display_probe()
+ * This function creates device entries by register itself to the V4L2 driver
+ * and initializes fields of each layer objects
+ */
+static __devinit int vpbe_display_probe(struct platform_device *pdev)
+{
+ struct vpbe_layer *vpbe_display_layer;
+ struct vpbe_display *disp_dev;
+ struct resource *res = NULL;
+ int k;
+ int i;
+ int err;
+ int irq;
+
+ printk(KERN_DEBUG "vpbe_display_probe\n");
+ /* Allocate memory for vpbe_display */
+ disp_dev = kzalloc(sizeof(struct vpbe_display), GFP_KERNEL);
+ if (!disp_dev) {
+ printk(KERN_ERR "ran out of memory\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&disp_dev->dma_queue_lock);
+ /*
+ * Scan all the platform devices to find the vpbe
+ * controller device and get the vpbe_dev object
+ */
+ err = bus_for_each_dev(&platform_bus_type, NULL, disp_dev,
+ vpbe_device_get);
+ if (err < 0)
+ return err;
+ /* Initialize the vpbe display controller */
+ if (NULL != disp_dev->vpbe_dev->ops.initialize) {
+ err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
+ disp_dev->vpbe_dev);
+ if (err) {
+ v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+ "Error initing vpbe\n");
+ err = -ENOMEM;
+ goto probe_out;
+ }
+ }
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ if (init_vpbe_layer(i, disp_dev, pdev)) {
+ err = -ENODEV;
+ goto probe_out;
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+ "Unable to get VENC interrupt resource\n");
+ err = -ENODEV;
+ goto probe_out;
+ }
+
+ irq = res->start;
+ if (request_irq(irq, venc_isr, IRQF_DISABLED, VPBE_DISPLAY_DRIVER,
+ disp_dev)) {
+ v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
+ "Unable to request interrupt\n");
+ err = -ENODEV;
+ goto probe_out;
+ }
+
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
+ err = -ENODEV;
+ goto probe_out;
+ }
+ }
+
+ printk(KERN_DEBUG "Successfully completed the probing of vpbe v4l2 device\n");
+ return 0;
+
+probe_out:
+ free_irq(res->start, disp_dev);
+ for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
+ /* Get the pointer to the layer object */
+ vpbe_display_layer = disp_dev->dev[k];
+ /* Unregister video device */
+ if (vpbe_display_layer) {
+ video_unregister_device(
+ &vpbe_display_layer->video_dev);
+ kfree(disp_dev->dev[k]);
+ }
+ }
+ kfree(disp_dev);
+ return err;
+}
+
+/*
+ * vpbe_display_remove()
+ * It un-register hardware layer from V4L2 driver
+ */
+static int vpbe_display_remove(struct platform_device *pdev)
+{
+ struct vpbe_layer *vpbe_display_layer;
+ struct vpbe_display *disp_dev = platform_get_drvdata(pdev);
+ struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
+ struct resource *res;
+ int i;
+
+ v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
+
+ /* unregister irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ free_irq(res->start, disp_dev);
+
+ /* deinitialize the vpbe display controller */
+ if (NULL != vpbe_dev->ops.deinitialize)
+ vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
+ /* un-register device */
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the layer object */
+ vpbe_display_layer = disp_dev->dev[i];
+ /* Unregister video device */
+ video_unregister_device(&vpbe_display_layer->video_dev);
+
+ }
+ for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
+ kfree(disp_dev->dev[i]);
+ disp_dev->dev[i] = NULL;
+ }
+
+ return 0;
+}
+
+static struct platform_driver vpbe_display_driver = {
+ .driver = {
+ .name = VPBE_DISPLAY_DRIVER,
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+ },
+ .probe = vpbe_display_probe,
+ .remove = __devexit_p(vpbe_display_remove),
+};
+
+/*
+ * vpbe_display_init()
+ * This function registers device and driver to the kernel, requests irq
+ * handler and allocates memory for layer objects
+ */
+static __devinit int vpbe_display_init(void)
+{
+ int err;
+
+ printk(KERN_DEBUG "vpbe_display_init\n");
+
+ /* Register driver to the kernel */
+ err = platform_driver_register(&vpbe_display_driver);
+ if (0 != err)
+ return err;
+
+ printk(KERN_DEBUG "vpbe_display_init:"
+ "VPBE V4L2 Display Driver V1.0 loaded\n");
+ return 0;
+}
+
+/*
+ * vpbe_display_cleanup()
+ * This function un-registers device and driver to the kernel, frees requested
+ * irq handler and de-allocates memory allocated for layer objects.
+ */
+static void vpbe_display_cleanup(void)
+{
+ printk(KERN_DEBUG "vpbe_display_cleanup\n");
+
+ /* platform driver unregister */
+ platform_driver_unregister(&vpbe_display_driver);
+}
+
+/* Function for module initialization and cleanup */
+module_init(vpbe_display_init);
+module_exit(vpbe_display_cleanup);
+
+MODULE_DESCRIPTION("TI DM644x/DM355/DM365 VPBE Display controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/video/davinci/vpbe_osd.c b/drivers/media/video/davinci/vpbe_osd.c
new file mode 100644
index 00000000000..5352884998f
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_osd.c
@@ -0,0 +1,1231 @@
+/*
+ * Copyright (C) 2007-2010 Texas Instruments Inc
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ *
+ * Andy Lowe (alowe@mvista.com), MontaVista Software
+ * - Initial version
+ * Murali Karicheri (mkaricheri@gmail.com), Texas Instruments Ltd.
+ * - ported to sub device interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <mach/io.h>
+#include <mach/cputype.h>
+#include <mach/hardware.h>
+
+#include <media/davinci/vpss.h>
+#include <media/v4l2-device.h>
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe_osd.h>
+
+#include <linux/io.h>
+#include "vpbe_osd_regs.h"
+
+#define MODULE_NAME VPBE_OSD_SUBDEV_NAME
+
+/* register access routines */
+static inline u32 osd_read(struct osd_state *sd, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ return readl(osd->osd_base + offset);
+}
+
+static inline u32 osd_write(struct osd_state *sd, u32 val, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ writel(val, osd->osd_base + offset);
+
+ return val;
+}
+
+static inline u32 osd_set(struct osd_state *sd, u32 mask, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ u32 addr = osd->osd_base + offset;
+ u32 val = readl(addr) | mask;
+
+ writel(val, addr);
+
+ return val;
+}
+
+static inline u32 osd_clear(struct osd_state *sd, u32 mask, u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ u32 addr = osd->osd_base + offset;
+ u32 val = readl(addr) & ~mask;
+
+ writel(val, addr);
+
+ return val;
+}
+
+static inline u32 osd_modify(struct osd_state *sd, u32 mask, u32 val,
+ u32 offset)
+{
+ struct osd_state *osd = sd;
+
+ u32 addr = osd->osd_base + offset;
+ u32 new_val = (readl(addr) & ~mask) | (val & mask);
+
+ writel(new_val, addr);
+
+ return new_val;
+}
+
+/* define some macros for layer and pixfmt classification */
+#define is_osd_win(layer) (((layer) == WIN_OSD0) || ((layer) == WIN_OSD1))
+#define is_vid_win(layer) (((layer) == WIN_VID0) || ((layer) == WIN_VID1))
+#define is_rgb_pixfmt(pixfmt) \
+ (((pixfmt) == PIXFMT_RGB565) || ((pixfmt) == PIXFMT_RGB888))
+#define is_yc_pixfmt(pixfmt) \
+ (((pixfmt) == PIXFMT_YCbCrI) || ((pixfmt) == PIXFMT_YCrCbI) || \
+ ((pixfmt) == PIXFMT_NV12))
+#define MAX_WIN_SIZE OSD_VIDWIN0XP_V0X
+#define MAX_LINE_LENGTH (OSD_VIDWIN0OFST_V0LO << 5)
+
+/**
+ * _osd_dm6446_vid0_pingpong() - field inversion fix for DM6446
+ * @sd - ptr to struct osd_state
+ * @field_inversion - inversion flag
+ * @fb_base_phys - frame buffer address
+ * @lconfig - ptr to layer config
+ *
+ * This routine implements a workaround for the field signal inversion silicon
+ * erratum described in Advisory 1.3.8 for the DM6446. The fb_base_phys and
+ * lconfig parameters apply to the vid0 window. This routine should be called
+ * whenever the vid0 layer configuration or start address is modified, or when
+ * the OSD field inversion setting is modified.
+ * Returns: 1 if the ping-pong buffers need to be toggled in the vsync isr, or
+ * 0 otherwise
+ */
+static int _osd_dm6446_vid0_pingpong(struct osd_state *sd,
+ int field_inversion,
+ unsigned long fb_base_phys,
+ const struct osd_layer_config *lconfig)
+{
+ struct osd_platform_data *pdata;
+
+ pdata = (struct osd_platform_data *)sd->dev->platform_data;
+ if (pdata->field_inv_wa_enable) {
+
+ if (!field_inversion || !lconfig->interlaced) {
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_PPVWIN0ADR);
+ osd_modify(sd, OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, 0,
+ OSD_MISCCTL);
+ return 0;
+ } else {
+ unsigned miscctl = OSD_MISCCTL_PPRV;
+
+ osd_write(sd,
+ (fb_base_phys & ~0x1F) - lconfig->line_length,
+ OSD_VIDWIN0ADR);
+ osd_write(sd,
+ (fb_base_phys & ~0x1F) + lconfig->line_length,
+ OSD_PPVWIN0ADR);
+ osd_modify(sd,
+ OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, miscctl,
+ OSD_MISCCTL);
+
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void _osd_set_field_inversion(struct osd_state *sd, int enable)
+{
+ unsigned fsinv = 0;
+
+ if (enable)
+ fsinv = OSD_MODE_FSINV;
+
+ osd_modify(sd, OSD_MODE_FSINV, fsinv, OSD_MODE);
+}
+
+static void _osd_set_blink_attribute(struct osd_state *sd, int enable,
+ enum osd_blink_interval blink)
+{
+ u32 osdatrmd = 0;
+
+ if (enable) {
+ osdatrmd |= OSD_OSDATRMD_BLNK;
+ osdatrmd |= blink << OSD_OSDATRMD_BLNKINT_SHIFT;
+ }
+ /* caller must ensure that OSD1 is configured in attribute mode */
+ osd_modify(sd, OSD_OSDATRMD_BLNKINT | OSD_OSDATRMD_BLNK, osdatrmd,
+ OSD_OSDATRMD);
+}
+
+static void _osd_set_rom_clut(struct osd_state *sd,
+ enum osd_rom_clut rom_clut)
+{
+ if (rom_clut == ROM_CLUT0)
+ osd_clear(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
+ else
+ osd_set(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
+}
+
+static void _osd_set_palette_map(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ unsigned char pixel_value,
+ unsigned char clut_index,
+ enum osd_pix_format pixfmt)
+{
+ static const int map_2bpp[] = { 0, 5, 10, 15 };
+ static const int map_1bpp[] = { 0, 15 };
+ int bmp_offset;
+ int bmp_shift;
+ int bmp_mask;
+ int bmp_reg;
+
+ switch (pixfmt) {
+ case PIXFMT_1BPP:
+ bmp_reg = map_1bpp[pixel_value & 0x1];
+ break;
+ case PIXFMT_2BPP:
+ bmp_reg = map_2bpp[pixel_value & 0x3];
+ break;
+ case PIXFMT_4BPP:
+ bmp_reg = pixel_value & 0xf;
+ break;
+ default:
+ return;
+ }
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ bmp_offset = OSD_W0BMP01 + (bmp_reg >> 1) * sizeof(u32);
+ break;
+ case OSDWIN_OSD1:
+ bmp_offset = OSD_W1BMP01 + (bmp_reg >> 1) * sizeof(u32);
+ break;
+ default:
+ return;
+ }
+
+ if (bmp_reg & 1) {
+ bmp_shift = 8;
+ bmp_mask = 0xff << 8;
+ } else {
+ bmp_shift = 0;
+ bmp_mask = 0xff;
+ }
+
+ osd_modify(sd, bmp_mask, clut_index << bmp_shift, bmp_offset);
+}
+
+static void _osd_set_rec601_attenuation(struct osd_state *sd,
+ enum osd_win_layer osdwin, int enable)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
+ enable ? OSD_OSDWIN0MD_ATN0E : 0,
+ OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
+ enable ? OSD_OSDWIN1MD_ATN1E : 0,
+ OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_set_blending_factor(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ enum osd_blending_factor blend)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_modify(sd, OSD_OSDWIN0MD_BLND0,
+ blend << OSD_OSDWIN0MD_BLND0_SHIFT, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_modify(sd, OSD_OSDWIN1MD_BLND1,
+ blend << OSD_OSDWIN1MD_BLND1_SHIFT, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_enable_color_key(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ unsigned colorkey,
+ enum osd_pix_format pixfmt)
+{
+ switch (pixfmt) {
+ case PIXFMT_RGB565:
+ osd_write(sd, colorkey & OSD_TRANSPVAL_RGBTRANS,
+ OSD_TRANSPVAL);
+ break;
+ default:
+ break;
+ }
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_set(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_set(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_disable_color_key(struct osd_state *sd,
+ enum osd_win_layer osdwin)
+{
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ osd_clear(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ osd_clear(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_set_osd_clut(struct osd_state *sd,
+ enum osd_win_layer osdwin,
+ enum osd_clut clut)
+{
+ u32 winmd = 0;
+
+ switch (osdwin) {
+ case OSDWIN_OSD0:
+ if (clut == RAM_CLUT)
+ winmd |= OSD_OSDWIN0MD_CLUTS0;
+ osd_modify(sd, OSD_OSDWIN0MD_CLUTS0, winmd, OSD_OSDWIN0MD);
+ break;
+ case OSDWIN_OSD1:
+ if (clut == RAM_CLUT)
+ winmd |= OSD_OSDWIN1MD_CLUTS1;
+ osd_modify(sd, OSD_OSDWIN1MD_CLUTS1, winmd, OSD_OSDWIN1MD);
+ break;
+ }
+}
+
+static void _osd_set_zoom(struct osd_state *sd, enum osd_layer layer,
+ enum osd_zoom_factor h_zoom,
+ enum osd_zoom_factor v_zoom)
+{
+ u32 winmd = 0;
+
+ switch (layer) {
+ case WIN_OSD0:
+ winmd |= (h_zoom << OSD_OSDWIN0MD_OHZ0_SHIFT);
+ winmd |= (v_zoom << OSD_OSDWIN0MD_OVZ0_SHIFT);
+ osd_modify(sd, OSD_OSDWIN0MD_OHZ0 | OSD_OSDWIN0MD_OVZ0, winmd,
+ OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ winmd |= (h_zoom << OSD_VIDWINMD_VHZ0_SHIFT);
+ winmd |= (v_zoom << OSD_VIDWINMD_VVZ0_SHIFT);
+ osd_modify(sd, OSD_VIDWINMD_VHZ0 | OSD_VIDWINMD_VVZ0, winmd,
+ OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ winmd |= (h_zoom << OSD_OSDWIN1MD_OHZ1_SHIFT);
+ winmd |= (v_zoom << OSD_OSDWIN1MD_OVZ1_SHIFT);
+ osd_modify(sd, OSD_OSDWIN1MD_OHZ1 | OSD_OSDWIN1MD_OVZ1, winmd,
+ OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ winmd |= (h_zoom << OSD_VIDWINMD_VHZ1_SHIFT);
+ winmd |= (v_zoom << OSD_VIDWINMD_VVZ1_SHIFT);
+ osd_modify(sd, OSD_VIDWINMD_VHZ1 | OSD_VIDWINMD_VVZ1, winmd,
+ OSD_VIDWINMD);
+ break;
+ }
+}
+
+static void _osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ switch (layer) {
+ case WIN_OSD0:
+ osd_clear(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ osd_clear(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ /* disable attribute mode as well as disabling the window */
+ osd_clear(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
+ OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ osd_clear(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
+ break;
+ }
+}
+
+static void osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (!win->is_enabled) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return;
+ }
+ win->is_enabled = 0;
+
+ _osd_disable_layer(sd, layer);
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void _osd_enable_attribute_mode(struct osd_state *sd)
+{
+ /* enable attribute mode for OSD1 */
+ osd_set(sd, OSD_OSDWIN1MD_OASW, OSD_OSDWIN1MD);
+}
+
+static void _osd_enable_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ switch (layer) {
+ case WIN_OSD0:
+ osd_set(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
+ break;
+ case WIN_VID0:
+ osd_set(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
+ break;
+ case WIN_OSD1:
+ /* enable OSD1 and disable attribute mode */
+ osd_modify(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
+ OSD_OSDWIN1MD_OACT1, OSD_OSDWIN1MD);
+ break;
+ case WIN_VID1:
+ osd_set(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
+ break;
+ }
+}
+
+static int osd_enable_layer(struct osd_state *sd, enum osd_layer layer,
+ int otherwin)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ /*
+ * use otherwin flag to know this is the other vid window
+ * in YUV420 mode, if is, skip this check
+ */
+ if (!otherwin && (!win->is_allocated ||
+ !win->fb_base_phys ||
+ !cfg->line_length ||
+ !cfg->xsize ||
+ !cfg->ysize)) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return -1;
+ }
+
+ if (win->is_enabled) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return 0;
+ }
+ win->is_enabled = 1;
+
+ if (cfg->pixfmt != PIXFMT_OSD_ATTR)
+ _osd_enable_layer(sd, layer);
+ else {
+ _osd_enable_attribute_mode(sd);
+ _osd_set_blink_attribute(sd, osd->is_blinking, osd->blink);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+static void _osd_start_layer(struct osd_state *sd, enum osd_layer layer,
+ unsigned long fb_base_phys,
+ unsigned long cbcr_ofst)
+{
+ switch (layer) {
+ case WIN_OSD0:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN0ADR);
+ break;
+ case WIN_VID0:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
+ break;
+ case WIN_OSD1:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN1ADR);
+ break;
+ case WIN_VID1:
+ osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN1ADR);
+ break;
+ }
+}
+
+static void osd_start_layer(struct osd_state *sd, enum osd_layer layer,
+ unsigned long fb_base_phys,
+ unsigned long cbcr_ofst)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->fb_base_phys = fb_base_phys & ~0x1F;
+ _osd_start_layer(sd, layer, fb_base_phys, cbcr_ofst);
+
+ if (layer == WIN_VID0) {
+ osd->pingpong =
+ _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
+ win->fb_base_phys,
+ cfg);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void osd_get_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ *lconfig = win->lconfig;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+/**
+ * try_layer_config() - Try a specific configuration for the layer
+ * @sd - ptr to struct osd_state
+ * @layer - layer to configure
+ * @lconfig - layer configuration to try
+ *
+ * If the requested lconfig is completely rejected and the value of lconfig on
+ * exit is the current lconfig, then try_layer_config() returns 1. Otherwise,
+ * try_layer_config() returns 0. A return value of 0 does not necessarily mean
+ * that the value of lconfig on exit is identical to the value of lconfig on
+ * entry, but merely that it represents a change from the current lconfig.
+ */
+static int try_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ int bad_config;
+
+ /* verify that the pixel format is compatible with the layer */
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ case PIXFMT_2BPP:
+ case PIXFMT_4BPP:
+ case PIXFMT_8BPP:
+ case PIXFMT_RGB565:
+ bad_config = !is_osd_win(layer);
+ break;
+ case PIXFMT_YCbCrI:
+ case PIXFMT_YCrCbI:
+ bad_config = !is_vid_win(layer);
+ break;
+ case PIXFMT_RGB888:
+ bad_config = !is_vid_win(layer);
+ break;
+ case PIXFMT_NV12:
+ bad_config = 1;
+ break;
+ case PIXFMT_OSD_ATTR:
+ bad_config = (layer != WIN_OSD1);
+ break;
+ default:
+ bad_config = 1;
+ break;
+ }
+ if (bad_config) {
+ /*
+ * The requested pixel format is incompatible with the layer,
+ * so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return bad_config;
+ }
+
+ /* DM6446: */
+ /* only one OSD window at a time can use RGB pixel formats */
+ if (is_osd_win(layer) && is_rgb_pixfmt(lconfig->pixfmt)) {
+ enum osd_pix_format pixfmt;
+ if (layer == WIN_OSD0)
+ pixfmt = osd->win[WIN_OSD1].lconfig.pixfmt;
+ else
+ pixfmt = osd->win[WIN_OSD0].lconfig.pixfmt;
+
+ if (is_rgb_pixfmt(pixfmt)) {
+ /*
+ * The other OSD window is already configured for an
+ * RGB, so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return 1;
+ }
+ }
+
+ /* DM6446: only one video window at a time can use RGB888 */
+ if (is_vid_win(layer) && lconfig->pixfmt == PIXFMT_RGB888) {
+ enum osd_pix_format pixfmt;
+
+ if (layer == WIN_VID0)
+ pixfmt = osd->win[WIN_VID1].lconfig.pixfmt;
+ else
+ pixfmt = osd->win[WIN_VID0].lconfig.pixfmt;
+
+ if (pixfmt == PIXFMT_RGB888) {
+ /*
+ * The other video window is already configured for
+ * RGB888, so keep the current layer configuration.
+ */
+ *lconfig = win->lconfig;
+ return 1;
+ }
+ }
+
+ /* window dimensions must be non-zero */
+ if (!lconfig->line_length || !lconfig->xsize || !lconfig->ysize) {
+ *lconfig = win->lconfig;
+ return 1;
+ }
+
+ /* round line_length up to a multiple of 32 */
+ lconfig->line_length = ((lconfig->line_length + 31) / 32) * 32;
+ lconfig->line_length =
+ min(lconfig->line_length, (unsigned)MAX_LINE_LENGTH);
+ lconfig->xsize = min(lconfig->xsize, (unsigned)MAX_WIN_SIZE);
+ lconfig->ysize = min(lconfig->ysize, (unsigned)MAX_WIN_SIZE);
+ lconfig->xpos = min(lconfig->xpos, (unsigned)MAX_WIN_SIZE);
+ lconfig->ypos = min(lconfig->ypos, (unsigned)MAX_WIN_SIZE);
+ lconfig->interlaced = (lconfig->interlaced != 0);
+ if (lconfig->interlaced) {
+ /* ysize and ypos must be even for interlaced displays */
+ lconfig->ysize &= ~1;
+ lconfig->ypos &= ~1;
+ }
+
+ return 0;
+}
+
+static void _osd_disable_vid_rgb888(struct osd_state *sd)
+{
+ /*
+ * The DM6446 supports RGB888 pixel format in a single video window.
+ * This routine disables RGB888 pixel format for both video windows.
+ * The caller must ensure that neither video window is currently
+ * configured for RGB888 pixel format.
+ */
+ osd_clear(sd, OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+}
+
+static void _osd_enable_vid_rgb888(struct osd_state *sd,
+ enum osd_layer layer)
+{
+ /*
+ * The DM6446 supports RGB888 pixel format in a single video window.
+ * This routine enables RGB888 pixel format for the specified video
+ * window. The caller must ensure that the other video window is not
+ * currently configured for RGB888 pixel format, as this routine will
+ * disable RGB888 pixel format for the other window.
+ */
+ if (layer == WIN_VID0) {
+ osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL_RGBEN, OSD_MISCCTL);
+ } else if (layer == WIN_VID1) {
+ osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
+ OSD_MISCCTL);
+ }
+}
+
+static void _osd_set_cbcr_order(struct osd_state *sd,
+ enum osd_pix_format pixfmt)
+{
+ /*
+ * The caller must ensure that all windows using YC pixfmt use the same
+ * Cb/Cr order.
+ */
+ if (pixfmt == PIXFMT_YCbCrI)
+ osd_clear(sd, OSD_MODE_CS, OSD_MODE);
+ else if (pixfmt == PIXFMT_YCrCbI)
+ osd_set(sd, OSD_MODE_CS, OSD_MODE);
+}
+
+static void _osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
+ const struct osd_layer_config *lconfig)
+{
+ u32 winmd = 0, winmd_mask = 0, bmw = 0;
+
+ _osd_set_cbcr_order(sd, lconfig->pixfmt);
+
+ switch (layer) {
+ case WIN_OSD0:
+ winmd_mask |= OSD_OSDWIN0MD_RGB0E;
+ if (lconfig->pixfmt == PIXFMT_RGB565)
+ winmd |= OSD_OSDWIN0MD_RGB0E;
+
+ winmd_mask |= OSD_OSDWIN0MD_BMW0 | OSD_OSDWIN0MD_OFF0;
+
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ bmw = 0;
+ break;
+ case PIXFMT_2BPP:
+ bmw = 1;
+ break;
+ case PIXFMT_4BPP:
+ bmw = 2;
+ break;
+ case PIXFMT_8BPP:
+ bmw = 3;
+ break;
+ default:
+ break;
+ }
+ winmd |= (bmw << OSD_OSDWIN0MD_BMW0_SHIFT);
+
+ if (lconfig->interlaced)
+ winmd |= OSD_OSDWIN0MD_OFF0;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN0MD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN0OFST);
+ osd_write(sd, lconfig->xpos, OSD_OSDWIN0XP);
+ osd_write(sd, lconfig->xsize, OSD_OSDWIN0XL);
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN0YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN0YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_OSDWIN0YP);
+ osd_write(sd, lconfig->ysize, OSD_OSDWIN0YL);
+ }
+ break;
+ case WIN_VID0:
+ winmd_mask |= OSD_VIDWINMD_VFF0;
+ if (lconfig->interlaced)
+ winmd |= OSD_VIDWINMD_VFF0;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN0OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
+ /*
+ * For YUV420P format the register contents are
+ * duplicated in both VID registers
+ */
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN0YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
+ }
+ break;
+ case WIN_OSD1:
+ /*
+ * The caller must ensure that OSD1 is disabled prior to
+ * switching from a normal mode to attribute mode or from
+ * attribute mode to a normal mode.
+ */
+ if (lconfig->pixfmt == PIXFMT_OSD_ATTR) {
+ winmd_mask |=
+ OSD_OSDWIN1MD_ATN1E | OSD_OSDWIN1MD_RGB1E |
+ OSD_OSDWIN1MD_CLUTS1 |
+ OSD_OSDWIN1MD_BLND1 | OSD_OSDWIN1MD_TE1;
+ } else {
+ winmd_mask |= OSD_OSDWIN1MD_RGB1E;
+ if (lconfig->pixfmt == PIXFMT_RGB565)
+ winmd |= OSD_OSDWIN1MD_RGB1E;
+
+ winmd_mask |= OSD_OSDWIN1MD_BMW1;
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ bmw = 0;
+ break;
+ case PIXFMT_2BPP:
+ bmw = 1;
+ break;
+ case PIXFMT_4BPP:
+ bmw = 2;
+ break;
+ case PIXFMT_8BPP:
+ bmw = 3;
+ break;
+ default:
+ break;
+ }
+ winmd |= (bmw << OSD_OSDWIN1MD_BMW1_SHIFT);
+ }
+
+ winmd_mask |= OSD_OSDWIN1MD_OFF1;
+ if (lconfig->interlaced)
+ winmd |= OSD_OSDWIN1MD_OFF1;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN1MD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN1OFST);
+ osd_write(sd, lconfig->xpos, OSD_OSDWIN1XP);
+ osd_write(sd, lconfig->xsize, OSD_OSDWIN1XL);
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN1YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN1YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_OSDWIN1YP);
+ osd_write(sd, lconfig->ysize, OSD_OSDWIN1YL);
+ }
+ break;
+ case WIN_VID1:
+ winmd_mask |= OSD_VIDWINMD_VFF1;
+ if (lconfig->interlaced)
+ winmd |= OSD_VIDWINMD_VFF1;
+
+ osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
+ osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN1OFST);
+ osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
+ osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
+ /*
+ * For YUV420P format the register contents are
+ * duplicated in both VID registers
+ */
+ osd_modify(sd, OSD_MISCCTL_S420D, ~OSD_MISCCTL_S420D,
+ OSD_MISCCTL);
+
+ if (lconfig->interlaced) {
+ osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN1YL);
+ } else {
+ osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
+ osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
+ }
+ break;
+ }
+}
+
+static int osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
+ struct osd_layer_config *lconfig)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+ int reject_config;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ reject_config = try_layer_config(sd, layer, lconfig);
+ if (reject_config) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return reject_config;
+ }
+
+ /* update the current Cb/Cr order */
+ if (is_yc_pixfmt(lconfig->pixfmt))
+ osd->yc_pixfmt = lconfig->pixfmt;
+
+ /*
+ * If we are switching OSD1 from normal mode to attribute mode or from
+ * attribute mode to normal mode, then we must disable the window.
+ */
+ if (layer == WIN_OSD1) {
+ if (((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt != PIXFMT_OSD_ATTR)) ||
+ ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt == PIXFMT_OSD_ATTR))) {
+ win->is_enabled = 0;
+ _osd_disable_layer(sd, layer);
+ }
+ }
+
+ _osd_set_layer_config(sd, layer, lconfig);
+
+ if (layer == WIN_OSD1) {
+ struct osd_osdwin_state *osdwin_state =
+ &osd->osdwin[OSDWIN_OSD1];
+
+ if ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt == PIXFMT_OSD_ATTR)) {
+ /*
+ * We just switched OSD1 from attribute mode to normal
+ * mode, so we must initialize the CLUT select, the
+ * blend factor, transparency colorkey enable, and
+ * attenuation enable (DM6446 only) bits in the
+ * OSDWIN1MD register.
+ */
+ _osd_set_osd_clut(sd, OSDWIN_OSD1,
+ osdwin_state->clut);
+ _osd_set_blending_factor(sd, OSDWIN_OSD1,
+ osdwin_state->blend);
+ if (osdwin_state->colorkey_blending) {
+ _osd_enable_color_key(sd, OSDWIN_OSD1,
+ osdwin_state->
+ colorkey,
+ lconfig->pixfmt);
+ } else
+ _osd_disable_color_key(sd, OSDWIN_OSD1);
+ _osd_set_rec601_attenuation(sd, OSDWIN_OSD1,
+ osdwin_state->
+ rec601_attenuation);
+ } else if ((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
+ (cfg->pixfmt != PIXFMT_OSD_ATTR)) {
+ /*
+ * We just switched OSD1 from normal mode to attribute
+ * mode, so we must initialize the blink enable and
+ * blink interval bits in the OSDATRMD register.
+ */
+ _osd_set_blink_attribute(sd, osd->is_blinking,
+ osd->blink);
+ }
+ }
+
+ /*
+ * If we just switched to a 1-, 2-, or 4-bits-per-pixel bitmap format
+ * then configure a default palette map.
+ */
+ if ((lconfig->pixfmt != cfg->pixfmt) &&
+ ((lconfig->pixfmt == PIXFMT_1BPP) ||
+ (lconfig->pixfmt == PIXFMT_2BPP) ||
+ (lconfig->pixfmt == PIXFMT_4BPP))) {
+ enum osd_win_layer osdwin =
+ ((layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1);
+ struct osd_osdwin_state *osdwin_state =
+ &osd->osdwin[osdwin];
+ unsigned char clut_index;
+ unsigned char clut_entries = 0;
+
+ switch (lconfig->pixfmt) {
+ case PIXFMT_1BPP:
+ clut_entries = 2;
+ break;
+ case PIXFMT_2BPP:
+ clut_entries = 4;
+ break;
+ case PIXFMT_4BPP:
+ clut_entries = 16;
+ break;
+ default:
+ break;
+ }
+ /*
+ * The default palette map maps the pixel value to the clut
+ * index, i.e. pixel value 0 maps to clut entry 0, pixel value
+ * 1 maps to clut entry 1, etc.
+ */
+ for (clut_index = 0; clut_index < 16; clut_index++) {
+ osdwin_state->palette_map[clut_index] = clut_index;
+ if (clut_index < clut_entries) {
+ _osd_set_palette_map(sd, osdwin, clut_index,
+ clut_index,
+ lconfig->pixfmt);
+ }
+ }
+ }
+
+ *cfg = *lconfig;
+ /* DM6446: configure the RGB888 enable and window selection */
+ if (osd->win[WIN_VID0].lconfig.pixfmt == PIXFMT_RGB888)
+ _osd_enable_vid_rgb888(sd, WIN_VID0);
+ else if (osd->win[WIN_VID1].lconfig.pixfmt == PIXFMT_RGB888)
+ _osd_enable_vid_rgb888(sd, WIN_VID1);
+ else
+ _osd_disable_vid_rgb888(sd);
+
+ if (layer == WIN_VID0) {
+ osd->pingpong =
+ _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
+ win->fb_base_phys,
+ cfg);
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+static void osd_init_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ enum osd_win_layer osdwin;
+ struct osd_osdwin_state *osdwin_state;
+ struct osd_layer_config *cfg = &win->lconfig;
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->is_enabled = 0;
+ _osd_disable_layer(sd, layer);
+
+ win->h_zoom = ZOOM_X1;
+ win->v_zoom = ZOOM_X1;
+ _osd_set_zoom(sd, layer, win->h_zoom, win->v_zoom);
+
+ win->fb_base_phys = 0;
+ _osd_start_layer(sd, layer, win->fb_base_phys, 0);
+
+ cfg->line_length = 0;
+ cfg->xsize = 0;
+ cfg->ysize = 0;
+ cfg->xpos = 0;
+ cfg->ypos = 0;
+ cfg->interlaced = 0;
+ switch (layer) {
+ case WIN_OSD0:
+ case WIN_OSD1:
+ osdwin = (layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1;
+ osdwin_state = &osd->osdwin[osdwin];
+ /*
+ * Other code relies on the fact that OSD windows default to a
+ * bitmap pixel format when they are deallocated, so don't
+ * change this default pixel format.
+ */
+ cfg->pixfmt = PIXFMT_8BPP;
+ _osd_set_layer_config(sd, layer, cfg);
+ osdwin_state->clut = RAM_CLUT;
+ _osd_set_osd_clut(sd, osdwin, osdwin_state->clut);
+ osdwin_state->colorkey_blending = 0;
+ _osd_disable_color_key(sd, osdwin);
+ osdwin_state->blend = OSD_8_VID_0;
+ _osd_set_blending_factor(sd, osdwin, osdwin_state->blend);
+ osdwin_state->rec601_attenuation = 0;
+ _osd_set_rec601_attenuation(sd, osdwin,
+ osdwin_state->
+ rec601_attenuation);
+ if (osdwin == OSDWIN_OSD1) {
+ osd->is_blinking = 0;
+ osd->blink = BLINK_X1;
+ }
+ break;
+ case WIN_VID0:
+ case WIN_VID1:
+ cfg->pixfmt = osd->yc_pixfmt;
+ _osd_set_layer_config(sd, layer, cfg);
+ break;
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static void osd_release_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (!win->is_allocated) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+ osd_init_layer(sd, layer);
+ spin_lock_irqsave(&osd->lock, flags);
+
+ win->is_allocated = 0;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+}
+
+static int osd_request_layer(struct osd_state *sd, enum osd_layer layer)
+{
+ struct osd_state *osd = sd;
+ struct osd_window_state *win = &osd->win[layer];
+ unsigned long flags;
+
+ spin_lock_irqsave(&osd->lock, flags);
+
+ if (win->is_allocated) {
+ spin_unlock_irqrestore(&osd->lock, flags);
+ return -1;
+ }
+ win->is_allocated = 1;
+
+ spin_unlock_irqrestore(&osd->lock, flags);
+
+ return 0;
+}
+
+static void _osd_init(struct osd_state *sd)
+{
+ osd_write(sd, 0, OSD_MODE);
+ osd_write(sd, 0, OSD_VIDWINMD);
+ osd_write(sd, 0, OSD_OSDWIN0MD);
+ osd_write(sd, 0, OSD_OSDWIN1MD);
+ osd_write(sd, 0, OSD_RECTCUR);
+ osd_write(sd, 0, OSD_MISCCTL);
+}
+
+static void osd_set_left_margin(struct osd_state *sd, u32 val)
+{
+ osd_write(sd, val, OSD_BASEPX);
+}
+
+static void osd_set_top_margin(struct osd_state *sd, u32 val)
+{
+ osd_write(sd, val, OSD_BASEPY);
+}
+
+static int osd_initialize(struct osd_state *osd)
+{
+ if (osd == NULL)
+ return -ENODEV;
+ _osd_init(osd);
+
+ /* set default Cb/Cr order */
+ osd->yc_pixfmt = PIXFMT_YCbCrI;
+
+ _osd_set_field_inversion(osd, osd->field_inversion);
+ _osd_set_rom_clut(osd, osd->rom_clut);
+
+ osd_init_layer(osd, WIN_OSD0);
+ osd_init_layer(osd, WIN_VID0);
+ osd_init_layer(osd, WIN_OSD1);
+ osd_init_layer(osd, WIN_VID1);
+
+ return 0;
+}
+
+static const struct vpbe_osd_ops osd_ops = {
+ .initialize = osd_initialize,
+ .request_layer = osd_request_layer,
+ .release_layer = osd_release_layer,
+ .enable_layer = osd_enable_layer,
+ .disable_layer = osd_disable_layer,
+ .set_layer_config = osd_set_layer_config,
+ .get_layer_config = osd_get_layer_config,
+ .start_layer = osd_start_layer,
+ .set_left_margin = osd_set_left_margin,
+ .set_top_margin = osd_set_top_margin,
+};
+
+static int osd_probe(struct platform_device *pdev)
+{
+ struct osd_platform_data *pdata;
+ struct osd_state *osd;
+ struct resource *res;
+ int ret = 0;
+
+ osd = kzalloc(sizeof(struct osd_state), GFP_KERNEL);
+ if (osd == NULL)
+ return -ENOMEM;
+
+ osd->dev = &pdev->dev;
+ pdata = (struct osd_platform_data *)pdev->dev.platform_data;
+ osd->vpbe_type = (enum vpbe_version)pdata->vpbe_type;
+ if (NULL == pdev->dev.platform_data) {
+ dev_err(osd->dev, "No platform data defined for OSD"
+ " sub device\n");
+ ret = -ENOENT;
+ goto free_mem;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(osd->dev, "Unable to get OSD register address map\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+ osd->osd_base_phys = res->start;
+ osd->osd_size = res->end - res->start + 1;
+ if (!request_mem_region(osd->osd_base_phys, osd->osd_size,
+ MODULE_NAME)) {
+ dev_err(osd->dev, "Unable to reserve OSD MMIO region\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+ osd->osd_base = (unsigned long)ioremap_nocache(res->start,
+ osd->osd_size);
+ if (!osd->osd_base) {
+ dev_err(osd->dev, "Unable to map the OSD region\n");
+ ret = -ENODEV;
+ goto release_mem_region;
+ }
+ spin_lock_init(&osd->lock);
+ osd->ops = osd_ops;
+ platform_set_drvdata(pdev, osd);
+ dev_notice(osd->dev, "OSD sub device probe success\n");
+ return ret;
+
+release_mem_region:
+ release_mem_region(osd->osd_base_phys, osd->osd_size);
+free_mem:
+ kfree(osd);
+ return ret;
+}
+
+static int osd_remove(struct platform_device *pdev)
+{
+ struct osd_state *osd = platform_get_drvdata(pdev);
+
+ iounmap((void *)osd->osd_base);
+ release_mem_region(osd->osd_base_phys, osd->osd_size);
+ kfree(osd);
+ return 0;
+}
+
+static struct platform_driver osd_driver = {
+ .probe = osd_probe,
+ .remove = osd_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int osd_init(void)
+{
+ if (platform_driver_register(&osd_driver)) {
+ printk(KERN_ERR "Unable to register davinci osd driver\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void osd_exit(void)
+{
+ platform_driver_unregister(&osd_driver);
+}
+
+module_init(osd_init);
+module_exit(osd_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci OSD Manager Driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/video/davinci/vpbe_osd_regs.h b/drivers/media/video/davinci/vpbe_osd_regs.h
new file mode 100644
index 00000000000..584520f3af6
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_osd_regs.h
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2006-2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _VPBE_OSD_REGS_H
+#define _VPBE_OSD_REGS_H
+
+/* VPBE Global Registers */
+#define VPBE_PID 0x0
+#define VPBE_PCR 0x4
+
+/* VPSS CLock Registers */
+#define VPSSCLK_PID 0x00
+#define VPSSCLK_CLKCTRL 0x04
+
+/* VPSS Buffer Logic Registers */
+#define VPSSBL_PID 0x00
+#define VPSSBL_PCR 0x04
+#define VPSSBL_BCR 0x08
+#define VPSSBL_INTSTAT 0x0C
+#define VPSSBL_INTSEL 0x10
+#define VPSSBL_EVTSEL 0x14
+#define VPSSBL_MEMCTRL 0x18
+#define VPSSBL_CCDCMUX 0x1C
+
+/* DM365 ISP5 system configuration */
+#define ISP5_PID 0x0
+#define ISP5_PCCR 0x4
+#define ISP5_BCR 0x8
+#define ISP5_INTSTAT 0xC
+#define ISP5_INTSEL1 0x10
+#define ISP5_INTSEL2 0x14
+#define ISP5_INTSEL3 0x18
+#define ISP5_EVTSEL 0x1c
+#define ISP5_CCDCMUX 0x20
+
+/* VPBE On-Screen Display Subsystem Registers (OSD) */
+#define OSD_MODE 0x00
+#define OSD_VIDWINMD 0x04
+#define OSD_OSDWIN0MD 0x08
+#define OSD_OSDWIN1MD 0x0C
+#define OSD_OSDATRMD 0x0C
+#define OSD_RECTCUR 0x10
+#define OSD_VIDWIN0OFST 0x18
+#define OSD_VIDWIN1OFST 0x1C
+#define OSD_OSDWIN0OFST 0x20
+#define OSD_OSDWIN1OFST 0x24
+#define OSD_VIDWINADH 0x28
+#define OSD_VIDWIN0ADL 0x2C
+#define OSD_VIDWIN0ADR 0x2C
+#define OSD_VIDWIN1ADL 0x30
+#define OSD_VIDWIN1ADR 0x30
+#define OSD_OSDWINADH 0x34
+#define OSD_OSDWIN0ADL 0x38
+#define OSD_OSDWIN0ADR 0x38
+#define OSD_OSDWIN1ADL 0x3C
+#define OSD_OSDWIN1ADR 0x3C
+#define OSD_BASEPX 0x40
+#define OSD_BASEPY 0x44
+#define OSD_VIDWIN0XP 0x48
+#define OSD_VIDWIN0YP 0x4C
+#define OSD_VIDWIN0XL 0x50
+#define OSD_VIDWIN0YL 0x54
+#define OSD_VIDWIN1XP 0x58
+#define OSD_VIDWIN1YP 0x5C
+#define OSD_VIDWIN1XL 0x60
+#define OSD_VIDWIN1YL 0x64
+#define OSD_OSDWIN0XP 0x68
+#define OSD_OSDWIN0YP 0x6C
+#define OSD_OSDWIN0XL 0x70
+#define OSD_OSDWIN0YL 0x74
+#define OSD_OSDWIN1XP 0x78
+#define OSD_OSDWIN1YP 0x7C
+#define OSD_OSDWIN1XL 0x80
+#define OSD_OSDWIN1YL 0x84
+#define OSD_CURXP 0x88
+#define OSD_CURYP 0x8C
+#define OSD_CURXL 0x90
+#define OSD_CURYL 0x94
+#define OSD_W0BMP01 0xA0
+#define OSD_W0BMP23 0xA4
+#define OSD_W0BMP45 0xA8
+#define OSD_W0BMP67 0xAC
+#define OSD_W0BMP89 0xB0
+#define OSD_W0BMPAB 0xB4
+#define OSD_W0BMPCD 0xB8
+#define OSD_W0BMPEF 0xBC
+#define OSD_W1BMP01 0xC0
+#define OSD_W1BMP23 0xC4
+#define OSD_W1BMP45 0xC8
+#define OSD_W1BMP67 0xCC
+#define OSD_W1BMP89 0xD0
+#define OSD_W1BMPAB 0xD4
+#define OSD_W1BMPCD 0xD8
+#define OSD_W1BMPEF 0xDC
+#define OSD_VBNDRY 0xE0
+#define OSD_EXTMODE 0xE4
+#define OSD_MISCCTL 0xE8
+#define OSD_CLUTRAMYCB 0xEC
+#define OSD_CLUTRAMCR 0xF0
+#define OSD_TRANSPVAL 0xF4
+#define OSD_TRANSPVALL 0xF4
+#define OSD_TRANSPVALU 0xF8
+#define OSD_TRANSPBMPIDX 0xFC
+#define OSD_PPVWIN0ADR 0xFC
+
+/* bit definitions */
+#define VPBE_PCR_VENC_DIV (1 << 1)
+#define VPBE_PCR_CLK_OFF (1 << 0)
+
+#define VPSSBL_INTSTAT_HSSIINT (1 << 14)
+#define VPSSBL_INTSTAT_CFALDINT (1 << 13)
+#define VPSSBL_INTSTAT_IPIPE_INT5 (1 << 12)
+#define VPSSBL_INTSTAT_IPIPE_INT4 (1 << 11)
+#define VPSSBL_INTSTAT_IPIPE_INT3 (1 << 10)
+#define VPSSBL_INTSTAT_IPIPE_INT2 (1 << 9)
+#define VPSSBL_INTSTAT_IPIPE_INT1 (1 << 8)
+#define VPSSBL_INTSTAT_IPIPE_INT0 (1 << 7)
+#define VPSSBL_INTSTAT_IPIPEIFINT (1 << 6)
+#define VPSSBL_INTSTAT_OSDINT (1 << 5)
+#define VPSSBL_INTSTAT_VENCINT (1 << 4)
+#define VPSSBL_INTSTAT_H3AINT (1 << 3)
+#define VPSSBL_INTSTAT_CCDC_VDINT2 (1 << 2)
+#define VPSSBL_INTSTAT_CCDC_VDINT1 (1 << 1)
+#define VPSSBL_INTSTAT_CCDC_VDINT0 (1 << 0)
+
+/* DM365 ISP5 bit definitions */
+#define ISP5_INTSTAT_VENCINT (1 << 21)
+#define ISP5_INTSTAT_OSDINT (1 << 20)
+
+/* VMOD TVTYP options for HDMD=0 */
+#define SDTV_NTSC 0
+#define SDTV_PAL 1
+/* VMOD TVTYP options for HDMD=1 */
+#define HDTV_525P 0
+#define HDTV_625P 1
+#define HDTV_1080I 2
+#define HDTV_720P 3
+
+#define OSD_MODE_CS (1 << 15)
+#define OSD_MODE_OVRSZ (1 << 14)
+#define OSD_MODE_OHRSZ (1 << 13)
+#define OSD_MODE_EF (1 << 12)
+#define OSD_MODE_VVRSZ (1 << 11)
+#define OSD_MODE_VHRSZ (1 << 10)
+#define OSD_MODE_FSINV (1 << 9)
+#define OSD_MODE_BCLUT (1 << 8)
+#define OSD_MODE_CABG_SHIFT 0
+#define OSD_MODE_CABG (0xff << 0)
+
+#define OSD_VIDWINMD_VFINV (1 << 15)
+#define OSD_VIDWINMD_V1EFC (1 << 14)
+#define OSD_VIDWINMD_VHZ1_SHIFT 12
+#define OSD_VIDWINMD_VHZ1 (3 << 12)
+#define OSD_VIDWINMD_VVZ1_SHIFT 10
+#define OSD_VIDWINMD_VVZ1 (3 << 10)
+#define OSD_VIDWINMD_VFF1 (1 << 9)
+#define OSD_VIDWINMD_ACT1 (1 << 8)
+#define OSD_VIDWINMD_V0EFC (1 << 6)
+#define OSD_VIDWINMD_VHZ0_SHIFT 4
+#define OSD_VIDWINMD_VHZ0 (3 << 4)
+#define OSD_VIDWINMD_VVZ0_SHIFT 2
+#define OSD_VIDWINMD_VVZ0 (3 << 2)
+#define OSD_VIDWINMD_VFF0 (1 << 1)
+#define OSD_VIDWINMD_ACT0 (1 << 0)
+
+#define OSD_OSDWIN0MD_ATN0E (1 << 14)
+#define OSD_OSDWIN0MD_RGB0E (1 << 13)
+#define OSD_OSDWIN0MD_BMP0MD_SHIFT 13
+#define OSD_OSDWIN0MD_BMP0MD (3 << 13)
+#define OSD_OSDWIN0MD_CLUTS0 (1 << 12)
+#define OSD_OSDWIN0MD_OHZ0_SHIFT 10
+#define OSD_OSDWIN0MD_OHZ0 (3 << 10)
+#define OSD_OSDWIN0MD_OVZ0_SHIFT 8
+#define OSD_OSDWIN0MD_OVZ0 (3 << 8)
+#define OSD_OSDWIN0MD_BMW0_SHIFT 6
+#define OSD_OSDWIN0MD_BMW0 (3 << 6)
+#define OSD_OSDWIN0MD_BLND0_SHIFT 3
+#define OSD_OSDWIN0MD_BLND0 (7 << 3)
+#define OSD_OSDWIN0MD_TE0 (1 << 2)
+#define OSD_OSDWIN0MD_OFF0 (1 << 1)
+#define OSD_OSDWIN0MD_OACT0 (1 << 0)
+
+#define OSD_OSDWIN1MD_OASW (1 << 15)
+#define OSD_OSDWIN1MD_ATN1E (1 << 14)
+#define OSD_OSDWIN1MD_RGB1E (1 << 13)
+#define OSD_OSDWIN1MD_BMP1MD_SHIFT 13
+#define OSD_OSDWIN1MD_BMP1MD (3 << 13)
+#define OSD_OSDWIN1MD_CLUTS1 (1 << 12)
+#define OSD_OSDWIN1MD_OHZ1_SHIFT 10
+#define OSD_OSDWIN1MD_OHZ1 (3 << 10)
+#define OSD_OSDWIN1MD_OVZ1_SHIFT 8
+#define OSD_OSDWIN1MD_OVZ1 (3 << 8)
+#define OSD_OSDWIN1MD_BMW1_SHIFT 6
+#define OSD_OSDWIN1MD_BMW1 (3 << 6)
+#define OSD_OSDWIN1MD_BLND1_SHIFT 3
+#define OSD_OSDWIN1MD_BLND1 (7 << 3)
+#define OSD_OSDWIN1MD_TE1 (1 << 2)
+#define OSD_OSDWIN1MD_OFF1 (1 << 1)
+#define OSD_OSDWIN1MD_OACT1 (1 << 0)
+
+#define OSD_OSDATRMD_OASW (1 << 15)
+#define OSD_OSDATRMD_OHZA_SHIFT 10
+#define OSD_OSDATRMD_OHZA (3 << 10)
+#define OSD_OSDATRMD_OVZA_SHIFT 8
+#define OSD_OSDATRMD_OVZA (3 << 8)
+#define OSD_OSDATRMD_BLNKINT_SHIFT 6
+#define OSD_OSDATRMD_BLNKINT (3 << 6)
+#define OSD_OSDATRMD_OFFA (1 << 1)
+#define OSD_OSDATRMD_BLNK (1 << 0)
+
+#define OSD_RECTCUR_RCAD_SHIFT 8
+#define OSD_RECTCUR_RCAD (0xff << 8)
+#define OSD_RECTCUR_CLUTSR (1 << 7)
+#define OSD_RECTCUR_RCHW_SHIFT 4
+#define OSD_RECTCUR_RCHW (7 << 4)
+#define OSD_RECTCUR_RCVW_SHIFT 1
+#define OSD_RECTCUR_RCVW (7 << 1)
+#define OSD_RECTCUR_RCACT (1 << 0)
+
+#define OSD_VIDWIN0OFST_V0LO (0x1ff << 0)
+
+#define OSD_VIDWIN1OFST_V1LO (0x1ff << 0)
+
+#define OSD_OSDWIN0OFST_O0LO (0x1ff << 0)
+
+#define OSD_OSDWIN1OFST_O1LO (0x1ff << 0)
+
+#define OSD_WINOFST_AH_SHIFT 9
+
+#define OSD_VIDWIN0OFST_V0AH (0xf << 9)
+#define OSD_VIDWIN1OFST_V1AH (0xf << 9)
+#define OSD_OSDWIN0OFST_O0AH (0xf << 9)
+#define OSD_OSDWIN1OFST_O1AH (0xf << 9)
+
+#define OSD_VIDWINADH_V1AH_SHIFT 8
+#define OSD_VIDWINADH_V1AH (0x7f << 8)
+#define OSD_VIDWINADH_V0AH_SHIFT 0
+#define OSD_VIDWINADH_V0AH (0x7f << 0)
+
+#define OSD_VIDWIN0ADL_V0AL (0xffff << 0)
+
+#define OSD_VIDWIN1ADL_V1AL (0xffff << 0)
+
+#define OSD_OSDWINADH_O1AH_SHIFT 8
+#define OSD_OSDWINADH_O1AH (0x7f << 8)
+#define OSD_OSDWINADH_O0AH_SHIFT 0
+#define OSD_OSDWINADH_O0AH (0x7f << 0)
+
+#define OSD_OSDWIN0ADL_O0AL (0xffff << 0)
+
+#define OSD_OSDWIN1ADL_O1AL (0xffff << 0)
+
+#define OSD_BASEPX_BPX (0x3ff << 0)
+
+#define OSD_BASEPY_BPY (0x1ff << 0)
+
+#define OSD_VIDWIN0XP_V0X (0x7ff << 0)
+
+#define OSD_VIDWIN0YP_V0Y (0x7ff << 0)
+
+#define OSD_VIDWIN0XL_V0W (0x7ff << 0)
+
+#define OSD_VIDWIN0YL_V0H (0x7ff << 0)
+
+#define OSD_VIDWIN1XP_V1X (0x7ff << 0)
+
+#define OSD_VIDWIN1YP_V1Y (0x7ff << 0)
+
+#define OSD_VIDWIN1XL_V1W (0x7ff << 0)
+
+#define OSD_VIDWIN1YL_V1H (0x7ff << 0)
+
+#define OSD_OSDWIN0XP_W0X (0x7ff << 0)
+
+#define OSD_OSDWIN0YP_W0Y (0x7ff << 0)
+
+#define OSD_OSDWIN0XL_W0W (0x7ff << 0)
+
+#define OSD_OSDWIN0YL_W0H (0x7ff << 0)
+
+#define OSD_OSDWIN1XP_W1X (0x7ff << 0)
+
+#define OSD_OSDWIN1YP_W1Y (0x7ff << 0)
+
+#define OSD_OSDWIN1XL_W1W (0x7ff << 0)
+
+#define OSD_OSDWIN1YL_W1H (0x7ff << 0)
+
+#define OSD_CURXP_RCSX (0x7ff << 0)
+
+#define OSD_CURYP_RCSY (0x7ff << 0)
+
+#define OSD_CURXL_RCSW (0x7ff << 0)
+
+#define OSD_CURYL_RCSH (0x7ff << 0)
+
+#define OSD_EXTMODE_EXPMDSEL (1 << 15)
+#define OSD_EXTMODE_SCRNHEXP_SHIFT 13
+#define OSD_EXTMODE_SCRNHEXP (3 << 13)
+#define OSD_EXTMODE_SCRNVEXP (1 << 12)
+#define OSD_EXTMODE_OSD1BLDCHR (1 << 11)
+#define OSD_EXTMODE_OSD0BLDCHR (1 << 10)
+#define OSD_EXTMODE_ATNOSD1EN (1 << 9)
+#define OSD_EXTMODE_ATNOSD0EN (1 << 8)
+#define OSD_EXTMODE_OSDHRSZ15 (1 << 7)
+#define OSD_EXTMODE_VIDHRSZ15 (1 << 6)
+#define OSD_EXTMODE_ZMFILV1HEN (1 << 5)
+#define OSD_EXTMODE_ZMFILV1VEN (1 << 4)
+#define OSD_EXTMODE_ZMFILV0HEN (1 << 3)
+#define OSD_EXTMODE_ZMFILV0VEN (1 << 2)
+#define OSD_EXTMODE_EXPFILHEN (1 << 1)
+#define OSD_EXTMODE_EXPFILVEN (1 << 0)
+
+#define OSD_MISCCTL_BLDSEL (1 << 15)
+#define OSD_MISCCTL_S420D (1 << 14)
+#define OSD_MISCCTL_BMAPT (1 << 13)
+#define OSD_MISCCTL_DM365M (1 << 12)
+#define OSD_MISCCTL_RGBEN (1 << 7)
+#define OSD_MISCCTL_RGBWIN (1 << 6)
+#define OSD_MISCCTL_DMANG (1 << 6)
+#define OSD_MISCCTL_TMON (1 << 5)
+#define OSD_MISCCTL_RSEL (1 << 4)
+#define OSD_MISCCTL_CPBSY (1 << 3)
+#define OSD_MISCCTL_PPSW (1 << 2)
+#define OSD_MISCCTL_PPRV (1 << 1)
+
+#define OSD_CLUTRAMYCB_Y_SHIFT 8
+#define OSD_CLUTRAMYCB_Y (0xff << 8)
+#define OSD_CLUTRAMYCB_CB_SHIFT 0
+#define OSD_CLUTRAMYCB_CB (0xff << 0)
+
+#define OSD_CLUTRAMCR_CR_SHIFT 8
+#define OSD_CLUTRAMCR_CR (0xff << 8)
+#define OSD_CLUTRAMCR_CADDR_SHIFT 0
+#define OSD_CLUTRAMCR_CADDR (0xff << 0)
+
+#define OSD_TRANSPVAL_RGBTRANS (0xffff << 0)
+
+#define OSD_TRANSPVALL_RGBL (0xffff << 0)
+
+#define OSD_TRANSPVALU_Y_SHIFT 8
+#define OSD_TRANSPVALU_Y (0xff << 8)
+#define OSD_TRANSPVALU_RGBU_SHIFT 0
+#define OSD_TRANSPVALU_RGBU (0xff << 0)
+
+#define OSD_TRANSPBMPIDX_BMP1_SHIFT 8
+#define OSD_TRANSPBMPIDX_BMP1 (0xff << 8)
+#define OSD_TRANSPBMPIDX_BMP0_SHIFT 0
+#define OSD_TRANSPBMPIDX_BMP0 0xff
+
+#endif /* _DAVINCI_VPBE_H_ */
diff --git a/drivers/media/video/davinci/vpbe_venc.c b/drivers/media/video/davinci/vpbe_venc.c
new file mode 100644
index 00000000000..03a3e5c65ee
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_venc.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright (C) 2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+
+#include <mach/hardware.h>
+#include <mach/mux.h>
+#include <mach/io.h>
+#include <mach/i2c.h>
+
+#include <linux/io.h>
+
+#include <media/davinci/vpbe_types.h>
+#include <media/davinci/vpbe_venc.h>
+#include <media/davinci/vpss.h>
+#include <media/v4l2-device.h>
+
+#include "vpbe_venc_regs.h"
+
+#define MODULE_NAME VPBE_VENC_SUBDEV_NAME
+
+static int debug = 2;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level 0-2");
+
+struct venc_state {
+ struct v4l2_subdev sd;
+ struct venc_callback *callback;
+ struct venc_platform_data *pdata;
+ struct device *pdev;
+ u32 output;
+ v4l2_std_id std;
+ spinlock_t lock;
+ void __iomem *venc_base;
+ void __iomem *vdaccfg_reg;
+};
+
+static inline struct venc_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct venc_state, sd);
+}
+
+static inline u32 venc_read(struct v4l2_subdev *sd, u32 offset)
+{
+ struct venc_state *venc = to_state(sd);
+
+ return readl(venc->venc_base + offset);
+}
+
+static inline u32 venc_write(struct v4l2_subdev *sd, u32 offset, u32 val)
+{
+ struct venc_state *venc = to_state(sd);
+
+ writel(val, (venc->venc_base + offset));
+
+ return val;
+}
+
+static inline u32 venc_modify(struct v4l2_subdev *sd, u32 offset,
+ u32 val, u32 mask)
+{
+ u32 new_val = (venc_read(sd, offset) & ~mask) | (val & mask);
+
+ venc_write(sd, offset, new_val);
+
+ return new_val;
+}
+
+static inline u32 vdaccfg_write(struct v4l2_subdev *sd, u32 val)
+{
+ struct venc_state *venc = to_state(sd);
+
+ writel(val, venc->vdaccfg_reg);
+
+ val = readl(venc->vdaccfg_reg);
+
+ return val;
+}
+
+/* This function sets the dac of the VPBE for various outputs
+ */
+static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
+{
+ switch (out_index) {
+ case 0:
+ v4l2_dbg(debug, 1, sd, "Setting output to Composite\n");
+ venc_write(sd, VENC_DACSEL, 0);
+ break;
+ case 1:
+ v4l2_dbg(debug, 1, sd, "Setting output to S-Video\n");
+ venc_write(sd, VENC_DACSEL, 0x210);
+ break;
+ case 2:
+ venc_write(sd, VENC_DACSEL, 0x543);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
+{
+ v4l2_dbg(debug, 2, sd, "venc_enabledigitaloutput\n");
+
+ if (benable) {
+ venc_write(sd, VENC_VMOD, 0);
+ venc_write(sd, VENC_CVBS, 0);
+ venc_write(sd, VENC_LCDOUT, 0);
+ venc_write(sd, VENC_HSPLS, 0);
+ venc_write(sd, VENC_HSTART, 0);
+ venc_write(sd, VENC_HVALID, 0);
+ venc_write(sd, VENC_HINT, 0);
+ venc_write(sd, VENC_VSPLS, 0);
+ venc_write(sd, VENC_VSTART, 0);
+ venc_write(sd, VENC_VVALID, 0);
+ venc_write(sd, VENC_VINT, 0);
+ venc_write(sd, VENC_YCCCTL, 0);
+ venc_write(sd, VENC_DACSEL, 0);
+
+ } else {
+ venc_write(sd, VENC_VMOD, 0);
+ /* disable VCLK output pin enable */
+ venc_write(sd, VENC_VIDCTL, 0x141);
+
+ /* Disable output sync pins */
+ venc_write(sd, VENC_SYNCCTL, 0);
+
+ /* Disable DCLOCK */
+ venc_write(sd, VENC_DCLKCTL, 0);
+ venc_write(sd, VENC_DRGBX1, 0x0000057C);
+
+ /* Disable LCD output control (accepting default polarity) */
+ venc_write(sd, VENC_LCDOUT, 0);
+ venc_write(sd, VENC_CMPNT, 0x100);
+ venc_write(sd, VENC_HSPLS, 0);
+ venc_write(sd, VENC_HINT, 0);
+ venc_write(sd, VENC_HSTART, 0);
+ venc_write(sd, VENC_HVALID, 0);
+
+ venc_write(sd, VENC_VSPLS, 0);
+ venc_write(sd, VENC_VINT, 0);
+ venc_write(sd, VENC_VSTART, 0);
+ venc_write(sd, VENC_VVALID, 0);
+
+ venc_write(sd, VENC_HSDLY, 0);
+ venc_write(sd, VENC_VSDLY, 0);
+
+ venc_write(sd, VENC_YCCCTL, 0);
+ venc_write(sd, VENC_VSTARTA, 0);
+
+ /* Set OSD clock and OSD Sync Adavance registers */
+ venc_write(sd, VENC_OSDCLK0, 1);
+ venc_write(sd, VENC_OSDCLK1, 2);
+ }
+}
+
+/*
+ * setting NTSC mode
+ */
+static int venc_set_ntsc(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_ntsc\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+ if (pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_525_60) < 0)
+ return -EINVAL;
+
+ venc_enabledigitaloutput(sd, 0);
+
+ /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+ /* Set REC656 Mode */
+ venc_write(sd, VENC_YCCCTL, 0x1);
+ venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAUPS);
+
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
+ venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_write(sd, VENC_DACTST, 0x0);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * setting PAL mode
+ */
+static int venc_set_pal(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+
+ v4l2_dbg(debug, 2, sd, "venc_set_pal\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
+ if (venc->pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_625_50) < 0)
+ return -EINVAL;
+
+ venc_enabledigitaloutput(sd, 0);
+
+ /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
+ venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
+ /* Set REC656 Mode */
+ venc_write(sd, VENC_YCCCTL, 0x1);
+
+ venc_modify(sd, VENC_SYNCCTL, 1 << VENC_SYNCCTL_OVD_SHIFT,
+ VENC_SYNCCTL_OVD);
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD,
+ (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD,
+ (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
+ venc_modify(sd, VENC_VMOD,
+ (1 << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_write(sd, VENC_DACTST, 0x0);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * venc_set_480p59_94
+ *
+ * This function configures the video encoder to EDTV(525p) component setting.
+ */
+static int venc_set_480p59_94(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_480p59_94\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_480P59_94) < 0)
+ return -EINVAL;
+
+ venc_enabledigitaloutput(sd, 0);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+ VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+ VENC_VDPRO_DAUPS);
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_525P << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
+ VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
+
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+/*
+ * venc_set_625p
+ *
+ * This function configures the video encoder to HDTV(625p) component setting
+ */
+static int venc_set_576p50(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ struct venc_platform_data *pdata = venc->pdata;
+
+ v4l2_dbg(debug, 2, sd, "venc_set_576p50\n");
+
+ /* Setup clock at VPSS & VENC for SD */
+ if (pdata->setup_clock(VPBE_ENC_DV_PRESET, V4L2_DV_576P50) < 0)
+ return -EINVAL;
+
+ venc_enabledigitaloutput(sd, 0);
+
+ venc_write(sd, VENC_OSDCLK0, 0);
+ venc_write(sd, VENC_OSDCLK1, 1);
+
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
+ VENC_VDPRO_DAFRQ);
+ venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
+ VENC_VDPRO_DAUPS);
+
+ venc_write(sd, VENC_VMOD, 0);
+ venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
+ VENC_VMOD_VIE);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
+ venc_modify(sd, VENC_VMOD, (HDTV_625P << VENC_VMOD_TVTYP_SHIFT),
+ VENC_VMOD_TVTYP);
+
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
+ VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
+ venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
+
+ return 0;
+}
+
+static int venc_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
+{
+ v4l2_dbg(debug, 1, sd, "venc_s_std_output\n");
+
+ if (norm & V4L2_STD_525_60)
+ return venc_set_ntsc(sd);
+ else if (norm & V4L2_STD_625_50)
+ return venc_set_pal(sd);
+
+ return -EINVAL;
+}
+
+static int venc_s_dv_preset(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *dv_preset)
+{
+ v4l2_dbg(debug, 1, sd, "venc_s_dv_preset\n");
+
+ if (dv_preset->preset == V4L2_DV_576P50)
+ return venc_set_576p50(sd);
+ else if (dv_preset->preset == V4L2_DV_480P59_94)
+ return venc_set_480p59_94(sd);
+
+ return -EINVAL;
+}
+
+static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
+ u32 config)
+{
+ struct venc_state *venc = to_state(sd);
+ int ret;
+
+ v4l2_dbg(debug, 1, sd, "venc_s_routing\n");
+
+ ret = venc_set_dac(sd, output);
+ if (!ret)
+ venc->output = output;
+
+ return ret;
+}
+
+static long venc_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd,
+ void *arg)
+{
+ u32 val;
+
+ switch (cmd) {
+ case VENC_GET_FLD:
+ val = venc_read(sd, VENC_VSTAT);
+ *((int *)arg) = ((val & VENC_VSTAT_FIDST) ==
+ VENC_VSTAT_FIDST);
+ break;
+ default:
+ v4l2_err(sd, "Wrong IOCTL cmd\n");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops venc_core_ops = {
+ .ioctl = venc_ioctl,
+};
+
+static const struct v4l2_subdev_video_ops venc_video_ops = {
+ .s_routing = venc_s_routing,
+ .s_std_output = venc_s_std_output,
+ .s_dv_preset = venc_s_dv_preset,
+};
+
+static const struct v4l2_subdev_ops venc_ops = {
+ .core = &venc_core_ops,
+ .video = &venc_video_ops,
+};
+
+static int venc_initialize(struct v4l2_subdev *sd)
+{
+ struct venc_state *venc = to_state(sd);
+ int ret;
+
+ /* Set default to output to composite and std to NTSC */
+ venc->output = 0;
+ venc->std = V4L2_STD_525_60;
+
+ ret = venc_s_routing(sd, 0, venc->output, 0);
+ if (ret < 0) {
+ v4l2_err(sd, "Error setting output during init\n");
+ return -EINVAL;
+ }
+
+ ret = venc_s_std_output(sd, venc->std);
+ if (ret < 0) {
+ v4l2_err(sd, "Error setting std during init\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int venc_device_get(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct venc_state **venc = data;
+
+ if (strcmp(MODULE_NAME, pdev->name) == 0)
+ *venc = platform_get_drvdata(pdev);
+
+ return 0;
+}
+
+struct v4l2_subdev *venc_sub_dev_init(struct v4l2_device *v4l2_dev,
+ const char *venc_name)
+{
+ struct venc_state *venc;
+ int err;
+
+ err = bus_for_each_dev(&platform_bus_type, NULL, &venc,
+ venc_device_get);
+ if (venc == NULL)
+ return NULL;
+
+ v4l2_subdev_init(&venc->sd, &venc_ops);
+
+ strcpy(venc->sd.name, venc_name);
+ if (v4l2_device_register_subdev(v4l2_dev, &venc->sd) < 0) {
+ v4l2_err(v4l2_dev,
+ "vpbe unable to register venc sub device\n");
+ return NULL;
+ }
+ if (venc_initialize(&venc->sd)) {
+ v4l2_err(v4l2_dev,
+ "vpbe venc initialization failed\n");
+ return NULL;
+ }
+
+ return &venc->sd;
+}
+EXPORT_SYMBOL(venc_sub_dev_init);
+
+static int venc_probe(struct platform_device *pdev)
+{
+ struct venc_state *venc;
+ struct resource *res;
+ int ret;
+
+ venc = kzalloc(sizeof(struct venc_state), GFP_KERNEL);
+ if (venc == NULL)
+ return -ENOMEM;
+
+ venc->pdev = &pdev->dev;
+ venc->pdata = pdev->dev.platform_data;
+ if (NULL == venc->pdata) {
+ dev_err(venc->pdev, "Unable to get platform data for"
+ " VENC sub device");
+ ret = -ENOENT;
+ goto free_mem;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(venc->pdev,
+ "Unable to get VENC register address map\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res), "venc")) {
+ dev_err(venc->pdev, "Unable to reserve VENC MMIO region\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ venc->venc_base = ioremap_nocache(res->start, resource_size(res));
+ if (!venc->venc_base) {
+ dev_err(venc->pdev, "Unable to map VENC IO space\n");
+ ret = -ENODEV;
+ goto release_venc_mem_region;
+ }
+
+ spin_lock_init(&venc->lock);
+ platform_set_drvdata(pdev, venc);
+ dev_notice(venc->pdev, "VENC sub device probe success\n");
+ return 0;
+
+release_venc_mem_region:
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+free_mem:
+ kfree(venc);
+ return ret;
+}
+
+static int venc_remove(struct platform_device *pdev)
+{
+ struct venc_state *venc = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iounmap((void *)venc->venc_base);
+ release_mem_region(res->start, resource_size(res));
+ kfree(venc);
+
+ return 0;
+}
+
+static struct platform_driver venc_driver = {
+ .probe = venc_probe,
+ .remove = venc_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int venc_init(void)
+{
+ if (platform_driver_register(&venc_driver)) {
+ printk(KERN_ERR "Unable to register venc driver\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void venc_exit(void)
+{
+ platform_driver_unregister(&venc_driver);
+ return;
+}
+
+module_init(venc_init);
+module_exit(venc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("VPBE VENC Driver");
+MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/video/davinci/vpbe_venc_regs.h b/drivers/media/video/davinci/vpbe_venc_regs.h
new file mode 100644
index 00000000000..947cb151077
--- /dev/null
+++ b/drivers/media/video/davinci/vpbe_venc_regs.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2006-2010 Texas Instruments Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2..
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _VPBE_VENC_REGS_H
+#define _VPBE_VENC_REGS_H
+
+/* VPBE Video Encoder / Digital LCD Subsystem Registers (VENC) */
+#define VENC_VMOD 0x00
+#define VENC_VIDCTL 0x04
+#define VENC_VDPRO 0x08
+#define VENC_SYNCCTL 0x0C
+#define VENC_HSPLS 0x10
+#define VENC_VSPLS 0x14
+#define VENC_HINT 0x18
+#define VENC_HSTART 0x1C
+#define VENC_HVALID 0x20
+#define VENC_VINT 0x24
+#define VENC_VSTART 0x28
+#define VENC_VVALID 0x2C
+#define VENC_HSDLY 0x30
+#define VENC_VSDLY 0x34
+#define VENC_YCCCTL 0x38
+#define VENC_RGBCTL 0x3C
+#define VENC_RGBCLP 0x40
+#define VENC_LINECTL 0x44
+#define VENC_CULLLINE 0x48
+#define VENC_LCDOUT 0x4C
+#define VENC_BRTS 0x50
+#define VENC_BRTW 0x54
+#define VENC_ACCTL 0x58
+#define VENC_PWMP 0x5C
+#define VENC_PWMW 0x60
+#define VENC_DCLKCTL 0x64
+#define VENC_DCLKPTN0 0x68
+#define VENC_DCLKPTN1 0x6C
+#define VENC_DCLKPTN2 0x70
+#define VENC_DCLKPTN3 0x74
+#define VENC_DCLKPTN0A 0x78
+#define VENC_DCLKPTN1A 0x7C
+#define VENC_DCLKPTN2A 0x80
+#define VENC_DCLKPTN3A 0x84
+#define VENC_DCLKHS 0x88
+#define VENC_DCLKHSA 0x8C
+#define VENC_DCLKHR 0x90
+#define VENC_DCLKVS 0x94
+#define VENC_DCLKVR 0x98
+#define VENC_CAPCTL 0x9C
+#define VENC_CAPDO 0xA0
+#define VENC_CAPDE 0xA4
+#define VENC_ATR0 0xA8
+#define VENC_ATR1 0xAC
+#define VENC_ATR2 0xB0
+#define VENC_VSTAT 0xB8
+#define VENC_RAMADR 0xBC
+#define VENC_RAMPORT 0xC0
+#define VENC_DACTST 0xC4
+#define VENC_YCOLVL 0xC8
+#define VENC_SCPROG 0xCC
+#define VENC_CVBS 0xDC
+#define VENC_CMPNT 0xE0
+#define VENC_ETMG0 0xE4
+#define VENC_ETMG1 0xE8
+#define VENC_ETMG2 0xEC
+#define VENC_ETMG3 0xF0
+#define VENC_DACSEL 0xF4
+#define VENC_ARGBX0 0x100
+#define VENC_ARGBX1 0x104
+#define VENC_ARGBX2 0x108
+#define VENC_ARGBX3 0x10C
+#define VENC_ARGBX4 0x110
+#define VENC_DRGBX0 0x114
+#define VENC_DRGBX1 0x118
+#define VENC_DRGBX2 0x11C
+#define VENC_DRGBX3 0x120
+#define VENC_DRGBX4 0x124
+#define VENC_VSTARTA 0x128
+#define VENC_OSDCLK0 0x12C
+#define VENC_OSDCLK1 0x130
+#define VENC_HVLDCL0 0x134
+#define VENC_HVLDCL1 0x138
+#define VENC_OSDHADV 0x13C
+#define VENC_CLKCTL 0x140
+#define VENC_GAMCTL 0x144
+#define VENC_XHINTVL 0x174
+
+/* bit definitions */
+#define VPBE_PCR_VENC_DIV (1 << 1)
+#define VPBE_PCR_CLK_OFF (1 << 0)
+
+#define VENC_VMOD_VDMD_SHIFT 12
+#define VENC_VMOD_VDMD_YCBCR16 0
+#define VENC_VMOD_VDMD_YCBCR8 1
+#define VENC_VMOD_VDMD_RGB666 2
+#define VENC_VMOD_VDMD_RGB8 3
+#define VENC_VMOD_VDMD_EPSON 4
+#define VENC_VMOD_VDMD_CASIO 5
+#define VENC_VMOD_VDMD_UDISPQVGA 6
+#define VENC_VMOD_VDMD_STNLCD 7
+#define VENC_VMOD_VIE_SHIFT 1
+#define VENC_VMOD_VDMD (7 << 12)
+#define VENC_VMOD_ITLCL (1 << 11)
+#define VENC_VMOD_ITLC (1 << 10)
+#define VENC_VMOD_NSIT (1 << 9)
+#define VENC_VMOD_HDMD (1 << 8)
+#define VENC_VMOD_TVTYP_SHIFT 6
+#define VENC_VMOD_TVTYP (3 << 6)
+#define VENC_VMOD_SLAVE (1 << 5)
+#define VENC_VMOD_VMD (1 << 4)
+#define VENC_VMOD_BLNK (1 << 3)
+#define VENC_VMOD_VIE (1 << 1)
+#define VENC_VMOD_VENC (1 << 0)
+
+/* VMOD TVTYP options for HDMD=0 */
+#define SDTV_NTSC 0
+#define SDTV_PAL 1
+/* VMOD TVTYP options for HDMD=1 */
+#define HDTV_525P 0
+#define HDTV_625P 1
+#define HDTV_1080I 2
+#define HDTV_720P 3
+
+#define VENC_VIDCTL_VCLKP (1 << 14)
+#define VENC_VIDCTL_VCLKE_SHIFT 13
+#define VENC_VIDCTL_VCLKE (1 << 13)
+#define VENC_VIDCTL_VCLKZ_SHIFT 12
+#define VENC_VIDCTL_VCLKZ (1 << 12)
+#define VENC_VIDCTL_SYDIR_SHIFT 8
+#define VENC_VIDCTL_SYDIR (1 << 8)
+#define VENC_VIDCTL_DOMD_SHIFT 4
+#define VENC_VIDCTL_DOMD (3 << 4)
+#define VENC_VIDCTL_YCDIR_SHIFT 0
+#define VENC_VIDCTL_YCDIR (1 << 0)
+
+#define VENC_VDPRO_ATYCC_SHIFT 5
+#define VENC_VDPRO_ATYCC (1 << 5)
+#define VENC_VDPRO_ATCOM_SHIFT 4
+#define VENC_VDPRO_ATCOM (1 << 4)
+#define VENC_VDPRO_DAFRQ (1 << 3)
+#define VENC_VDPRO_DAUPS (1 << 2)
+#define VENC_VDPRO_CUPS (1 << 1)
+#define VENC_VDPRO_YUPS (1 << 0)
+
+#define VENC_SYNCCTL_VPL_SHIFT 3
+#define VENC_SYNCCTL_VPL (1 << 3)
+#define VENC_SYNCCTL_HPL_SHIFT 2
+#define VENC_SYNCCTL_HPL (1 << 2)
+#define VENC_SYNCCTL_SYEV_SHIFT 1
+#define VENC_SYNCCTL_SYEV (1 << 1)
+#define VENC_SYNCCTL_SYEH_SHIFT 0
+#define VENC_SYNCCTL_SYEH (1 << 0)
+#define VENC_SYNCCTL_OVD_SHIFT 14
+#define VENC_SYNCCTL_OVD (1 << 14)
+
+#define VENC_DCLKCTL_DCKEC_SHIFT 11
+#define VENC_DCLKCTL_DCKEC (1 << 11)
+#define VENC_DCLKCTL_DCKPW_SHIFT 0
+#define VENC_DCLKCTL_DCKPW (0x3f << 0)
+
+#define VENC_VSTAT_FIDST (1 << 4)
+
+#define VENC_CMPNT_MRGB_SHIFT 14
+#define VENC_CMPNT_MRGB (1 << 14)
+
+#endif /* _VPBE_VENC_REGS_H */
diff --git a/drivers/media/video/davinci/vpif.c b/drivers/media/video/davinci/vpif.c
index 9f3bfc1eb24..af9680273ff 100644
--- a/drivers/media/video/davinci/vpif.c
+++ b/drivers/media/video/davinci/vpif.c
@@ -422,7 +422,7 @@ static int __init vpif_probe(struct platform_device *pdev)
if (!res)
return -ENOENT;
- res_len = res->end - res->start + 1;
+ res_len = resource_size(res);
res = request_mem_region(res->start, res_len, res->name);
if (!res)
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index d93ad74a34c..49e4deb5004 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -33,7 +33,6 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/version.h>
#include <linux/slab.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -44,6 +43,7 @@
MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(VPIF_CAPTURE_VERSION);
#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg)
#define vpif_dbg(level, debug, fmt, arg...) \
@@ -1677,7 +1677,6 @@ static int vpif_querycap(struct file *file, void *priv,
{
struct vpif_capture_config *config = vpif_dev->platform_data;
- cap->version = VPIF_CAPTURE_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
strlcpy(cap->driver, "vpif capture", sizeof(cap->driver));
strlcpy(cap->bus_info, "DM646x Platform", sizeof(cap->bus_info));
@@ -2211,10 +2210,8 @@ static __init int vpif_probe(struct platform_device *pdev)
vfd->v4l2_dev = &vpif_obj.v4l2_dev;
vfd->release = video_device_release;
snprintf(vfd->name, sizeof(vfd->name),
- "DM646x_VPIFCapture_DRIVER_V%d.%d.%d",
- (VPIF_CAPTURE_VERSION_CODE >> 16) & 0xff,
- (VPIF_CAPTURE_VERSION_CODE >> 8) & 0xff,
- (VPIF_CAPTURE_VERSION_CODE) & 0xff);
+ "DM646x_VPIFCapture_DRIVER_V%s",
+ VPIF_CAPTURE_VERSION);
/* Set video_dev to the video device */
ch->video_dev = vfd;
}
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index 7a4196dfdce..064550f5ce4 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -23,7 +23,6 @@
/* Header files */
#include <linux/videodev2.h>
-#include <linux/version.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
@@ -33,11 +32,7 @@
#include "vpif.h"
/* Macros */
-#define VPIF_MAJOR_RELEASE 0
-#define VPIF_MINOR_RELEASE 0
-#define VPIF_BUILD 1
-#define VPIF_CAPTURE_VERSION_CODE ((VPIF_MAJOR_RELEASE << 16) | \
- (VPIF_MINOR_RELEASE << 8) | VPIF_BUILD)
+#define VPIF_CAPTURE_VERSION "0.0.2"
#define VPIF_VALID_FIELD(field) (((V4L2_FIELD_ANY == field) || \
(V4L2_FIELD_NONE == field)) || \
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index cdf659abdc2..286f0291004 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -29,7 +29,6 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/version.h>
#include <linux/slab.h>
#include <asm/irq.h>
@@ -47,6 +46,7 @@
MODULE_DESCRIPTION("TI DaVinci VPIF Display driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(VPIF_DISPLAY_VERSION);
#define DM646X_V4L2_STD (V4L2_STD_525_60 | V4L2_STD_625_50)
@@ -701,7 +701,6 @@ static int vpif_querycap(struct file *file, void *priv,
{
struct vpif_display_config *config = vpif_dev->platform_data;
- cap->version = VPIF_DISPLAY_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
strlcpy(cap->driver, "vpif display", sizeof(cap->driver));
strlcpy(cap->bus_info, "Platform", sizeof(cap->bus_info));
@@ -1740,10 +1739,8 @@ static __init int vpif_probe(struct platform_device *pdev)
vfd->v4l2_dev = &vpif_obj.v4l2_dev;
vfd->release = video_device_release;
snprintf(vfd->name, sizeof(vfd->name),
- "DM646x_VPIFDisplay_DRIVER_V%d.%d.%d",
- (VPIF_DISPLAY_VERSION_CODE >> 16) & 0xff,
- (VPIF_DISPLAY_VERSION_CODE >> 8) & 0xff,
- (VPIF_DISPLAY_VERSION_CODE) & 0xff);
+ "DM646x_VPIFDisplay_DRIVER_V%s",
+ VPIF_DISPLAY_VERSION);
/* Set video_dev to the video device */
ch->video_dev = vfd;
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index b53aaa88307..5d1936dafed 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -18,7 +18,6 @@
/* Header files */
#include <linux/videodev2.h>
-#include <linux/version.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
@@ -27,12 +26,7 @@
#include "vpif.h"
/* Macros */
-#define VPIF_MAJOR_RELEASE (0)
-#define VPIF_MINOR_RELEASE (0)
-#define VPIF_BUILD (1)
-
-#define VPIF_DISPLAY_VERSION_CODE \
- ((VPIF_MAJOR_RELEASE << 16) | (VPIF_MINOR_RELEASE << 8) | VPIF_BUILD)
+#define VPIF_DISPLAY_VERSION "0.0.2"
#define VPIF_VALID_FIELD(field) \
(((V4L2_FIELD_ANY == field) || (V4L2_FIELD_NONE == field)) || \
diff --git a/drivers/media/video/em28xx/Kconfig b/drivers/media/video/em28xx/Kconfig
index 3cb78f26df9..281ee427c2a 100644
--- a/drivers/media/video/em28xx/Kconfig
+++ b/drivers/media/video/em28xx/Kconfig
@@ -3,7 +3,6 @@ config VIDEO_EM28XX
depends on VIDEO_DEV && I2C
select VIDEO_TUNER
select VIDEO_TVEEPROM
- depends on RC_CORE
select VIDEOBUF_VMALLOC
select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
select VIDEO_TVP5150 if VIDEO_HELPER_CHIPS_AUTO
@@ -40,7 +39,18 @@ config VIDEO_EM28XX_DVB
select DVB_S921 if !DVB_FE_CUSTOMISE
select DVB_DRXD if !DVB_FE_CUSTOMISE
select DVB_CXD2820R if !DVB_FE_CUSTOMISE
+ select DVB_DRXK if !DVB_FE_CUSTOMISE
+ select DVB_TDA18271C2DD if !DVB_FE_CUSTOMISE
select VIDEOBUF_DVB
---help---
This adds support for DVB cards based on the
Empiatech em28xx chips.
+
+config VIDEO_EM28XX_RC
+ bool "EM28XX Remote Controller support"
+ depends on RC_CORE
+ depends on VIDEO_EM28XX
+ depends on !(RC_CORE=m && VIDEO_EM28XX=y)
+ default y
+ ---help---
+ Enables Remote Controller support on em28xx driver.
diff --git a/drivers/media/video/em28xx/Makefile b/drivers/media/video/em28xx/Makefile
index d0f093d1d0d..38aaa004f57 100644
--- a/drivers/media/video/em28xx/Makefile
+++ b/drivers/media/video/em28xx/Makefile
@@ -1,5 +1,7 @@
-em28xx-objs := em28xx-video.o em28xx-i2c.o em28xx-cards.o em28xx-core.o \
- em28xx-input.o em28xx-vbi.o
+em28xx-y := em28xx-video.o em28xx-i2c.o em28xx-cards.o
+em28xx-y += em28xx-core.o em28xx-vbi.o
+
+em28xx-$(CONFIG_VIDEO_EM28XX_RC) += em28xx-input.o
em28xx-alsa-objs := em28xx-audio.o
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 3c48a72eb7d..cff0768afbf 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -3,9 +3,9 @@
*
* Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com>
*
- * Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (C) 2007-2011 Mauro Carvalho Chehab <mchehab@redhat.com>
* - Port to work with the in-kernel driver
- * - Several cleanups
+ * - Cleanups, fixes, alsa-controls, etc.
*
* This driver is based on my previous au600 usb pstn audio driver
* and inherits all the copyrights
@@ -41,6 +41,7 @@
#include <sound/info.h>
#include <sound/initval.h>
#include <sound/control.h>
+#include <sound/tlv.h>
#include <media/v4l2-common.h>
#include "em28xx.h"
@@ -212,9 +213,12 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
for (i = 0; i < EM28XX_AUDIO_BUFS; i++) {
errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
if (errCode) {
+ em28xx_errdev("submit of audio urb failed\n");
em28xx_deinit_isoc_audio(dev);
+ atomic_set(&dev->stream_started, 0);
return errCode;
}
+
}
return 0;
@@ -245,6 +249,7 @@ static struct snd_pcm_hardware snd_em28xx_hw_capture = {
.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BATCH |
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
@@ -276,24 +281,27 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
return -ENODEV;
}
- /* Sets volume, mute, etc */
+ runtime->hw = snd_em28xx_hw_capture;
+ if ((dev->alt == 0 || dev->audio_ifnum) && dev->adev.users == 0) {
+ if (dev->audio_ifnum)
+ dev->alt = 1;
+ else
+ dev->alt = 7;
- dev->mute = 0;
- mutex_lock(&dev->lock);
- ret = em28xx_audio_analog_set(dev);
- if (ret < 0)
- goto err;
+ dprintk("changing alternate number on interface %d to %d\n",
+ dev->audio_ifnum, dev->alt);
+ usb_set_interface(dev->udev, dev->audio_ifnum, dev->alt);
- runtime->hw = snd_em28xx_hw_capture;
- if (dev->alt == 0 && dev->adev.users == 0) {
- int errCode;
- dev->alt = 7;
- dprintk("changing alternate number to 7\n");
- errCode = usb_set_interface(dev->udev, 0, 7);
- }
+ /* Sets volume, mute, etc */
+ dev->mute = 0;
+ mutex_lock(&dev->lock);
+ ret = em28xx_audio_analog_set(dev);
+ if (ret < 0)
+ goto err;
- dev->adev.users++;
- mutex_unlock(&dev->lock);
+ dev->adev.users++;
+ mutex_unlock(&dev->lock);
+ }
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
dev->adev.capture_pcm_substream = substream;
@@ -342,6 +350,8 @@ static int snd_em28xx_hw_capture_params(struct snd_pcm_substream *substream,
ret = snd_pcm_alloc_vmalloc_buffer(substream,
params_buffer_bytes(hw_params));
+ if (ret < 0)
+ return ret;
format = params_format(hw_params);
rate = params_rate(hw_params);
channels = params_channels(hw_params);
@@ -393,20 +403,24 @@ static int snd_em28xx_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct em28xx *dev = snd_pcm_substream_chip(substream);
- int retval;
+ int retval = 0;
switch (cmd) {
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
+ case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
case SNDRV_PCM_TRIGGER_START:
atomic_set(&dev->stream_started, 1);
break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
+ case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
- atomic_set(&dev->stream_started, 1);
+ atomic_set(&dev->stream_started, 0);
break;
default:
retval = -EINVAL;
}
schedule_work(&dev->wq_trigger);
- return 0;
+ return retval;
}
static snd_pcm_uframes_t snd_em28xx_capture_pointer(struct snd_pcm_substream
@@ -432,6 +446,179 @@ static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
return vmalloc_to_page(pageptr);
}
+/*
+ * AC97 volume control support
+ */
+static int em28xx_vol_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *info)
+{
+ info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ info->count = 2;
+ info->value.integer.min = 0;
+ info->value.integer.max = 0x1f;
+
+ return 0;
+}
+
+static int em28xx_vol_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ u16 val = (0x1f - (value->value.integer.value[0] & 0x1f)) |
+ (0x1f - (value->value.integer.value[1] & 0x1f)) << 8;
+ int rc;
+
+ mutex_lock(&dev->lock);
+ rc = em28xx_read_ac97(dev, kcontrol->private_value);
+ if (rc < 0)
+ goto err;
+
+ val |= rc & 0x8000; /* Preserve the mute flag */
+
+ rc = em28xx_write_ac97(dev, kcontrol->private_value, val);
+ if (rc < 0)
+ goto err;
+
+ dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n",
+ (val & 0x8000) ? "muted " : "",
+ 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
+ val, (int)kcontrol->private_value);
+
+err:
+ mutex_unlock(&dev->lock);
+ return rc;
+}
+
+static int em28xx_vol_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ int val;
+
+ mutex_lock(&dev->lock);
+ val = em28xx_read_ac97(dev, kcontrol->private_value);
+ mutex_unlock(&dev->lock);
+ if (val < 0)
+ return val;
+
+ dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n",
+ (val & 0x8000) ? "muted " : "",
+ 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
+ val, (int)kcontrol->private_value);
+
+ value->value.integer.value[0] = 0x1f - (val & 0x1f);
+ value->value.integer.value[1] = 0x1f - ((val << 8) & 0x1f);
+
+ return 0;
+}
+
+static int em28xx_vol_put_mute(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ u16 val = value->value.integer.value[0];
+ int rc;
+
+ mutex_lock(&dev->lock);
+ rc = em28xx_read_ac97(dev, kcontrol->private_value);
+ if (rc < 0)
+ goto err;
+
+ if (val)
+ rc &= 0x1f1f;
+ else
+ rc |= 0x8000;
+
+ rc = em28xx_write_ac97(dev, kcontrol->private_value, rc);
+ if (rc < 0)
+ goto err;
+
+ dprintk("%sleft vol %d, right vol %d (0x%04x) to ac97 volume control 0x%04x\n",
+ (val & 0x8000) ? "muted " : "",
+ 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
+ val, (int)kcontrol->private_value);
+
+err:
+ mutex_unlock(&dev->lock);
+ return rc;
+}
+
+static int em28xx_vol_get_mute(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *value)
+{
+ struct em28xx *dev = snd_kcontrol_chip(kcontrol);
+ int val;
+
+ mutex_lock(&dev->lock);
+ val = em28xx_read_ac97(dev, kcontrol->private_value);
+ mutex_unlock(&dev->lock);
+ if (val < 0)
+ return val;
+
+ if (val & 0x8000)
+ value->value.integer.value[0] = 0;
+ else
+ value->value.integer.value[0] = 1;
+
+ dprintk("%sleft vol %d, right vol %d (0x%04x) from ac97 volume control 0x%04x\n",
+ (val & 0x8000) ? "muted " : "",
+ 0x1f - ((val >> 8) & 0x1f), 0x1f - (val & 0x1f),
+ val, (int)kcontrol->private_value);
+
+ return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(em28xx_db_scale, -3450, 150, 0);
+
+static int em28xx_cvol_new(struct snd_card *card, struct em28xx *dev,
+ char *name, int id)
+{
+ int err;
+ char ctl_name[44];
+ struct snd_kcontrol *kctl;
+ struct snd_kcontrol_new tmp;
+
+ memset (&tmp, 0, sizeof(tmp));
+ tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ tmp.private_value = id,
+ tmp.name = ctl_name,
+
+ /* Add Mute Control */
+ sprintf(ctl_name, "%s Switch", name);
+ tmp.get = em28xx_vol_get_mute;
+ tmp.put = em28xx_vol_put_mute;
+ tmp.info = snd_ctl_boolean_mono_info;
+ kctl = snd_ctl_new1(&tmp, dev);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ return err;
+ dprintk("Added control %s for ac97 volume control 0x%04x\n",
+ ctl_name, id);
+
+ memset (&tmp, 0, sizeof(tmp));
+ tmp.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ tmp.private_value = id,
+ tmp.name = ctl_name,
+
+ /* Add Volume Control */
+ sprintf(ctl_name, "%s Volume", name);
+ tmp.get = em28xx_vol_get;
+ tmp.put = em28xx_vol_put;
+ tmp.info = em28xx_vol_info;
+ tmp.tlv.p = em28xx_db_scale,
+ kctl = snd_ctl_new1(&tmp, dev);
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ return err;
+ dprintk("Added control %s for ac97 volume control 0x%04x\n",
+ ctl_name, id);
+
+ return 0;
+}
+
+/*
+ * register/unregister code and data
+ */
static struct snd_pcm_ops snd_em28xx_pcm_capture = {
.open = snd_em28xx_capture_open,
.close = snd_em28xx_pcm_close,
@@ -452,17 +639,17 @@ static int em28xx_audio_init(struct em28xx *dev)
static int devnr;
int err;
- if (dev->has_alsa_audio != 1) {
+ if (!dev->has_alsa_audio || dev->audio_ifnum < 0) {
/* This device does not support the extension (in this case
the device is expecting the snd-usb-audio module or
doesn't have analog audio support at all) */
return 0;
}
- printk(KERN_INFO "em28xx-audio.c: probing for em28x1 "
- "non standard usbaudio\n");
+ printk(KERN_INFO "em28xx-audio.c: probing for em28xx Audio Vendor Class\n");
printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus "
"Rechberger\n");
+ printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2007-2011 Mauro Carvalho Chehab\n");
err = snd_card_create(index[devnr], "Em28xx Audio", THIS_MODULE, 0,
&card);
@@ -488,6 +675,22 @@ static int em28xx_audio_init(struct em28xx *dev)
INIT_WORK(&dev->wq_trigger, audio_trigger);
+ if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
+ em28xx_cvol_new(card, dev, "Video", AC97_VIDEO_VOL);
+ em28xx_cvol_new(card, dev, "Line In", AC97_LINEIN_VOL);
+ em28xx_cvol_new(card, dev, "Phone", AC97_PHONE_VOL);
+ em28xx_cvol_new(card, dev, "Microphone", AC97_PHONE_VOL);
+ em28xx_cvol_new(card, dev, "CD", AC97_CD_VOL);
+ em28xx_cvol_new(card, dev, "AUX", AC97_AUX_VOL);
+ em28xx_cvol_new(card, dev, "PCM", AC97_PCM_OUT_VOL);
+
+ em28xx_cvol_new(card, dev, "Master", AC97_MASTER_VOL);
+ em28xx_cvol_new(card, dev, "Line", AC97_LINE_LEVEL_VOL);
+ em28xx_cvol_new(card, dev, "Mono", AC97_MASTER_MONO_VOL);
+ em28xx_cvol_new(card, dev, "LFE", AC97_LFE_MASTER_VOL);
+ em28xx_cvol_new(card, dev, "Surround", AC97_SURR_MASTER_VOL);
+ }
+
err = snd_card_register(card);
if (err < 0) {
snd_card_free(card);
@@ -538,7 +741,7 @@ static void __exit em28xx_alsa_unregister(void)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Markus Rechberger <mrechberger@gmail.com>");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
MODULE_DESCRIPTION("Em28xx Audio driver");
module_init(em28xx_alsa_register);
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 4e37375decf..3e3959fee41 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -289,7 +289,7 @@ static struct em28xx_reg_seq leadership_reset[] = {
{ -1, -1, -1, -1},
};
-/* 2013:024f PCTV Systems nanoStick T2 290e
+/* 2013:024f PCTV nanoStick T2 290e
* GPIO_6 - demod reset
* GPIO_7 - LED
*/
@@ -300,6 +300,23 @@ static struct em28xx_reg_seq pctv_290e[] = {
{-1, -1, -1, -1},
};
+#if 0
+static struct em28xx_reg_seq terratec_h5_gpio[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ {EM2874_R80_GPIO, 0xf6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xf2, 0xff, 50},
+ {EM2874_R80_GPIO, 0xf6, 0xff, 50},
+ { -1, -1, -1, -1},
+};
+
+static struct em28xx_reg_seq terratec_h5_digital[] = {
+ {EM2874_R80_GPIO, 0xf6, 0xff, 10},
+ {EM2874_R80_GPIO, 0xe6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xa6, 0xff, 10},
+ { -1, -1, -1, -1},
+};
+#endif
+
/*
* Board definitions
*/
@@ -843,6 +860,19 @@ struct em28xx_board em28xx_boards[] = {
.gpio = terratec_cinergy_USB_XS_FR_analog,
} },
},
+ [EM2884_BOARD_TERRATEC_H5] = {
+ .name = "Terratec Cinergy H5",
+ .has_dvb = 1,
+#if 0
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .tuner_addr = 0x41,
+ .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */
+ .tuner_gpio = terratec_h5_gpio,
+#endif
+ .i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
+ EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ },
[EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900] = {
.name = "Hauppauge WinTV HVR 900",
.tda9887_conf = TDA9887_PRESENT,
@@ -1259,7 +1289,7 @@ struct em28xx_board em28xx_boards[] = {
} },
},
- [EM2874_LEADERSHIP_ISDBT] = {
+ [EM2874_BOARD_LEADERSHIP_ISDBT] = {
.i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_100_KHZ,
@@ -1319,7 +1349,6 @@ struct em28xx_board em28xx_boards[] = {
},
[EM2880_BOARD_KWORLD_DVB_305U] = {
.name = "KWorld DVB-T 305U",
- .valid = EM28XX_BOARD_NOT_VALIDATED,
.tuner_type = TUNER_XC2028,
.tuner_gpio = default_tuner_gpio,
.decoder = EM28XX_TVP5150,
@@ -1770,16 +1799,16 @@ struct em28xx_board em28xx_boards[] = {
.dvb_gpio = kworld_a340_digital,
.tuner_gpio = default_tuner_gpio,
},
- /* 2013:024f PCTV Systems nanoStick T2 290e.
+ /* 2013:024f PCTV nanoStick T2 290e.
* Empia EM28174, Sony CXD2820R and NXP TDA18271HD/C2 */
[EM28174_BOARD_PCTV_290E] = {
+ .name = "PCTV nanoStick T2 290e",
.i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
EM28XX_I2C_CLK_WAIT_ENABLE | EM28XX_I2C_FREQ_100_KHZ,
- .xclk = EM28XX_XCLK_FREQUENCY_12MHZ,
- .name = "PCTV Systems nanoStick T2 290e",
.tuner_type = TUNER_ABSENT,
.tuner_gpio = pctv_290e,
.has_dvb = 1,
+ .ir_codes = RC_MAP_PINNACLE_PCTV_HD,
},
};
const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
@@ -1855,8 +1884,10 @@ struct usb_device_id em28xx_id_table[] = {
{ USB_DEVICE(0x0ccd, 0x0042),
.driver_info = EM2882_BOARD_TERRATEC_HYBRID_XS },
{ USB_DEVICE(0x0ccd, 0x0043),
- .driver_info = EM2870_BOARD_TERRATEC_XS },
- { USB_DEVICE(0x0ccd, 0x0047),
+ .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ { USB_DEVICE(0x0ccd, 0x10a2), /* Rev. 1 */
+ .driver_info = EM2884_BOARD_TERRATEC_H5 },
+ { USB_DEVICE(0x0ccd, 0x10ad), /* Rev. 2 */
.driver_info = EM2880_BOARD_TERRATEC_PRODIGY_XS },
{ USB_DEVICE(0x0ccd, 0x0084),
.driver_info = EM2860_BOARD_TERRATEC_AV350 },
@@ -1937,7 +1968,7 @@ static struct em28xx_hash_table em28xx_i2c_hash[] = {
{0x77800080, EM2860_BOARD_TVP5150_REFERENCE_DESIGN, TUNER_ABSENT},
{0xc51200e3, EM2820_BOARD_GADMEI_TVR200, TUNER_LG_PAL_NEW_TAPC},
{0x4ba50080, EM2861_BOARD_GADMEI_UTV330PLUS, TUNER_TNF_5335MF},
- {0x6b800080, EM2874_LEADERSHIP_ISDBT, TUNER_ABSENT},
+ {0x6b800080, EM2874_BOARD_LEADERSHIP_ISDBT, TUNER_ABSENT},
};
/* I2C possible address to saa7115, tvp5150, msp3400, tvaudio */
@@ -2660,10 +2691,9 @@ void em28xx_card_setup(struct em28xx *dev)
.addr = 0xba >> 1,
.platform_data = &pdata,
};
- struct v4l2_subdev *sd;
pdata.xtal = dev->sensor_xtal;
- sd = v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap,
+ v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap,
&mt9v011_info, NULL);
}
@@ -2842,11 +2872,26 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
em28xx_info("chip ID is em2882/em2883\n");
dev->wait_after_write = 0;
break;
+ case CHIP_ID_EM2884:
+ em28xx_info("chip ID is em2884\n");
+ dev->reg_gpio_num = EM2874_R80_GPIO;
+ dev->wait_after_write = 0;
+ break;
default:
em28xx_info("em28xx chip ID = %d\n", dev->chip_id);
}
}
+ if (dev->is_audio_only) {
+ errCode = em28xx_audio_setup(dev);
+ if (errCode)
+ return -ENODEV;
+ em28xx_add_into_devlist(dev);
+ em28xx_init_extension(dev);
+
+ return 0;
+ }
+
/* Prepopulate cached GPO register content */
retval = em28xx_read_reg(dev, dev->reg_gpo_num);
if (retval >= 0)
@@ -2947,6 +2992,9 @@ fail_reg_devices:
return retval;
}
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+
/*
* em28xx_usb_probe()
* checks for supported devices
@@ -2956,15 +3004,15 @@ static int em28xx_usb_probe(struct usb_interface *interface,
{
const struct usb_endpoint_descriptor *endpoint;
struct usb_device *udev;
- struct usb_interface *uif;
struct em28xx *dev = NULL;
int retval;
- int i, nr, ifnum, isoc_pipe;
+ bool is_audio_only = false, has_audio = false;
+ int i, nr, isoc_pipe;
+ const int ifnum = interface->altsetting[0].desc.bInterfaceNumber;
char *speed;
char descr[255] = "";
udev = usb_get_dev(interface_to_usbdev(interface));
- ifnum = interface->altsetting[0].desc.bInterfaceNumber;
/* Check to see next free device and mark as used */
nr = find_first_zero_bit(&em28xx_devused, EM28XX_MAXBOARDS);
@@ -2984,6 +3032,19 @@ static int em28xx_usb_probe(struct usb_interface *interface,
goto err;
}
+ /* Get endpoints */
+ for (i = 0; i < interface->num_altsetting; i++) {
+ int ep;
+
+ for (ep = 0; ep < interface->altsetting[i].desc.bNumEndpoints; ep++) {
+ struct usb_host_endpoint *e;
+ e = &interface->altsetting[i].endpoint[ep];
+
+ if (e->desc.bEndpointAddress == 0x83)
+ has_audio = true;
+ }
+ }
+
endpoint = &interface->cur_altsetting->endpoint[0].desc;
/* check if the device has the iso in endpoint at the correct place */
@@ -3003,19 +3064,22 @@ static int em28xx_usb_probe(struct usb_interface *interface,
check_interface = 0;
if (!check_interface) {
- em28xx_err(DRIVER_NAME " video device (%04x:%04x): "
- "interface %i, class %i found.\n",
- le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct),
- ifnum,
- interface->altsetting[0].desc.bInterfaceClass);
-
- em28xx_err(DRIVER_NAME " This is an anciliary "
- "interface not used by the driver\n");
-
- em28xx_devused &= ~(1<<nr);
- retval = -ENODEV;
- goto err;
+ if (has_audio) {
+ is_audio_only = true;
+ } else {
+ em28xx_err(DRIVER_NAME " video device (%04x:%04x): "
+ "interface %i, class %i found.\n",
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct),
+ ifnum,
+ interface->altsetting[0].desc.bInterfaceClass);
+ em28xx_err(DRIVER_NAME " This is an anciliary "
+ "interface not used by the driver\n");
+
+ em28xx_devused &= ~(1<<nr);
+ retval = -ENODEV;
+ goto err;
+ }
}
}
@@ -3045,8 +3109,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
if (*descr)
strlcat(descr, " ", sizeof(descr));
- printk(DRIVER_NAME ": New device %s@ %s Mbps "
- "(%04x:%04x, interface %d, class %d)\n",
+ printk(KERN_INFO DRIVER_NAME
+ ": New device %s@ %s Mbps (%04x:%04x, interface %d, class %d)\n",
descr,
speed,
le16_to_cpu(udev->descriptor.idVendor),
@@ -3054,6 +3118,11 @@ static int em28xx_usb_probe(struct usb_interface *interface,
ifnum,
interface->altsetting->desc.bInterfaceNumber);
+ if (has_audio)
+ printk(KERN_INFO DRIVER_NAME
+ ": Audio Vendor Class interface %i found\n",
+ ifnum);
+
/*
* Make sure we have 480 Mbps of bandwidth, otherwise things like
* video stream wouldn't likely work, since 12 Mbps is generally
@@ -3089,10 +3158,13 @@ static int em28xx_usb_probe(struct usb_interface *interface,
dev->devno = nr;
dev->model = id->driver_info;
dev->alt = -1;
+ dev->is_audio_only = is_audio_only;
+ dev->has_alsa_audio = has_audio;
+ dev->audio_ifnum = ifnum;
/* Checks if audio is provided by some interface */
for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
- uif = udev->config->interface[i];
+ struct usb_interface *uif = udev->config->interface[i];
if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
dev->has_audio_class = 1;
break;
@@ -3100,9 +3172,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
}
/* compute alternate max packet sizes */
- uif = udev->actconfig->interface[0];
-
- dev->num_alt = uif->num_altsetting;
+ dev->num_alt = interface->num_altsetting;
dev->alt_max_pkt_size = kmalloc(32 * dev->num_alt, GFP_KERNEL);
if (dev->alt_max_pkt_size == NULL) {
@@ -3114,14 +3184,21 @@ static int em28xx_usb_probe(struct usb_interface *interface,
}
for (i = 0; i < dev->num_alt ; i++) {
- u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
- dev->alt_max_pkt_size[i] =
- (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
+ u16 tmp = le16_to_cpu(interface->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
+ unsigned int size = tmp & 0x7ff;
+
+ if (udev->speed == USB_SPEED_HIGH)
+ size = size * hb_mult(tmp);
+
+ dev->alt_max_pkt_size[i] = size;
}
if ((card[nr] >= 0) && (card[nr] < em28xx_bcount))
dev->model = card[nr];
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(interface, dev);
+
/* allocate device struct */
mutex_init(&dev->lock);
mutex_lock(&dev->lock);
@@ -3133,9 +3210,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
goto err;
}
- /* save our data pointer in this interface device */
- usb_set_intfdata(interface, dev);
-
request_modules(dev);
/* Should be the last thing to do, to avoid newer udev's to
@@ -3164,6 +3238,13 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
if (!dev)
return;
+ if (dev->is_audio_only) {
+ mutex_lock(&dev->lock);
+ em28xx_close_extension(dev);
+ mutex_unlock(&dev->lock);
+ return;
+ }
+
em28xx_info("disconnecting %s\n", dev->vdev->name);
flush_request_modules(dev);
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index e33f145d867..57b1b5c6d88 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -211,6 +211,7 @@ int em28xx_write_reg(struct em28xx *dev, u16 reg, u8 val)
{
return em28xx_write_regs(dev, reg, &val, 1);
}
+EXPORT_SYMBOL_GPL(em28xx_write_reg);
/*
* em28xx_write_reg_bits()
@@ -286,6 +287,7 @@ int em28xx_read_ac97(struct em28xx *dev, u8 reg)
return ret;
return le16_to_cpu(val);
}
+EXPORT_SYMBOL_GPL(em28xx_read_ac97);
/*
* em28xx_write_ac97()
@@ -313,13 +315,14 @@ int em28xx_write_ac97(struct em28xx *dev, u8 reg, u16 val)
return 0;
}
+EXPORT_SYMBOL_GPL(em28xx_write_ac97);
-struct em28xx_vol_table {
+struct em28xx_vol_itable {
enum em28xx_amux mux;
u8 reg;
};
-static struct em28xx_vol_table inputs[] = {
+static struct em28xx_vol_itable inputs[] = {
{ EM28XX_AMUX_VIDEO, AC97_VIDEO_VOL },
{ EM28XX_AMUX_LINE_IN, AC97_LINEIN_VOL },
{ EM28XX_AMUX_PHONE, AC97_PHONE_VOL },
@@ -403,7 +406,12 @@ static int em28xx_set_audio_source(struct em28xx *dev)
return ret;
}
-static const struct em28xx_vol_table outputs[] = {
+struct em28xx_vol_otable {
+ enum em28xx_aout mux;
+ u8 reg;
+};
+
+static const struct em28xx_vol_otable outputs[] = {
{ EM28XX_AOUT_MASTER, AC97_MASTER_VOL },
{ EM28XX_AOUT_LINE, AC97_LINE_LEVEL_VOL },
{ EM28XX_AOUT_MONO, AC97_MASTER_MONO_VOL },
@@ -492,17 +500,13 @@ int em28xx_audio_setup(struct em28xx *dev)
if (dev->chip_id == CHIP_ID_EM2870 || dev->chip_id == CHIP_ID_EM2874
|| dev->chip_id == CHIP_ID_EM28174) {
/* Digital only device - don't load any alsa module */
- dev->audio_mode.has_audio = 0;
- dev->has_audio_class = 0;
- dev->has_alsa_audio = 0;
+ dev->audio_mode.has_audio = false;
+ dev->has_audio_class = false;
+ dev->has_alsa_audio = false;
return 0;
}
- /* If device doesn't support Usb Audio Class, use vendor class */
- if (!dev->has_audio_class)
- dev->has_alsa_audio = 1;
-
- dev->audio_mode.has_audio = 1;
+ dev->audio_mode.has_audio = true;
/* See how this device is configured */
cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
@@ -512,8 +516,8 @@ int em28xx_audio_setup(struct em28xx *dev)
cfg = EM28XX_CHIPCFG_AC97; /* Be conservative */
} else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) == 0x00) {
/* The device doesn't have vendor audio at all */
- dev->has_alsa_audio = 0;
- dev->audio_mode.has_audio = 0;
+ dev->has_alsa_audio = false;
+ dev->audio_mode.has_audio = false;
return 0;
} else if ((cfg & EM28XX_CHIPCFG_AUDIOMASK) ==
EM28XX_CHIPCFG_I2S_3_SAMPRATES) {
@@ -542,8 +546,8 @@ int em28xx_audio_setup(struct em28xx *dev)
*/
em28xx_warn("AC97 chip type couldn't be determined\n");
dev->audio_mode.ac97 = EM28XX_NO_AC97;
- dev->has_alsa_audio = 0;
- dev->audio_mode.has_audio = 0;
+ dev->has_alsa_audio = false;
+ dev->audio_mode.has_audio = false;
goto init_audio;
}
@@ -615,7 +619,9 @@ int em28xx_capture_start(struct em28xx *dev, int start)
{
int rc;
- if (dev->chip_id == CHIP_ID_EM2874 || dev->chip_id == CHIP_ID_EM28174) {
+ if (dev->chip_id == CHIP_ID_EM2874 ||
+ dev->chip_id == CHIP_ID_EM2884 ||
+ dev->chip_id == CHIP_ID_EM28174) {
/* The Transport Stream Enable Register moved in em2874 */
if (!start) {
rc = em28xx_write_reg_bits(dev, EM2874_R5F_TS_ENABLE,
@@ -884,6 +890,7 @@ int em28xx_gpio_set(struct em28xx *dev, struct em28xx_reg_seq *gpio)
}
return rc;
}
+EXPORT_SYMBOL_GPL(em28xx_gpio_set);
int em28xx_set_mode(struct em28xx *dev, enum em28xx_mode set_mode)
{
@@ -917,7 +924,7 @@ EXPORT_SYMBOL_GPL(em28xx_set_mode);
static void em28xx_irq_callback(struct urb *urb)
{
struct em28xx *dev = urb->context;
- int rc, i;
+ int i;
switch (urb->status) {
case 0: /* success */
@@ -934,7 +941,7 @@ static void em28xx_irq_callback(struct urb *urb)
/* Copy data from URB */
spin_lock(&dev->slock);
- rc = dev->isoc_ctl.isoc_copy(dev, urb);
+ dev->isoc_ctl.isoc_copy(dev, urb);
spin_unlock(&dev->slock);
/* Reset urb buffers */
@@ -1106,17 +1113,19 @@ EXPORT_SYMBOL_GPL(em28xx_init_isoc);
int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev)
{
unsigned int chip_cfg2;
- unsigned int packet_size = 564;
-
- if (dev->chip_id == CHIP_ID_EM2874) {
- /* FIXME - for now assume 564 like it was before, but the
- em2874 code should be added to return the proper value... */
- packet_size = 564;
- } else if (dev->chip_id == CHIP_ID_EM28174) {
- /* FIXME same as em2874. 564 was enough for 22 Mbit DVB-T
- but too much for 44 Mbit DVB-C. */
- packet_size = 752;
- } else {
+ unsigned int packet_size;
+
+ switch (dev->chip_id) {
+ case CHIP_ID_EM2710:
+ case CHIP_ID_EM2750:
+ case CHIP_ID_EM2800:
+ case CHIP_ID_EM2820:
+ case CHIP_ID_EM2840:
+ case CHIP_ID_EM2860:
+ /* No DVB support */
+ return -EINVAL;
+ case CHIP_ID_EM2870:
+ case CHIP_ID_EM2883:
/* TS max packet size stored in bits 1-0 of R01 */
chip_cfg2 = em28xx_read_reg(dev, EM28XX_R01_CHIPCFG2);
switch (chip_cfg2 & EM28XX_CHIPCFG2_TS_PACKETSIZE_MASK) {
@@ -1133,9 +1142,24 @@ int em28xx_isoc_dvb_max_packetsize(struct em28xx *dev)
packet_size = 752;
break;
}
+ break;
+ case CHIP_ID_EM2874:
+ /*
+ * FIXME: for now assumes 564 like it was before, but the
+ * em2874 code should be added to return the proper value
+ */
+ packet_size = 564;
+ break;
+ case CHIP_ID_EM2884:
+ case CHIP_ID_EM28174:
+ default:
+ /*
+ * FIXME: same as em2874. 564 was enough for 22 Mbit DVB-T
+ * but not enough for 44 Mbit DVB-C.
+ */
+ packet_size = 752;
}
- em28xx_coredbg("dvb max packet size=%d\n", packet_size);
return packet_size;
}
EXPORT_SYMBOL_GPL(em28xx_isoc_dvb_max_packetsize);
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 7904ca4b691..e5916dee409 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -1,7 +1,7 @@
/*
DVB device driver for em28xx
- (c) 2008 Mauro Carvalho Chehab <mchehab@infradead.org>
+ (c) 2008-2011 Mauro Carvalho Chehab <mchehab@infradead.org>
(c) 2008 Devin Heitmueller <devin.heitmueller@gmail.com>
- Fixes for the driver to properly work with HVR-950
@@ -40,6 +40,8 @@
#include "s921.h"
#include "drxd.h"
#include "cxd2820r.h"
+#include "tda18271c2dd.h"
+#include "drxk.h"
MODULE_DESCRIPTION("driver for em28xx based DVB cards");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
@@ -73,6 +75,11 @@ struct em28xx_dvb {
struct dmx_frontend fe_hw;
struct dmx_frontend fe_mem;
struct dvb_net net;
+
+ /* Due to DRX-K - probably need changes */
+ int (*gate_ctrl)(struct dvb_frontend *, int);
+ struct semaphore pll_mutex;
+ bool dont_attach_fe1;
};
@@ -160,6 +167,11 @@ static int start_streaming(struct em28xx_dvb *dvb)
return rc;
max_dvb_packet_size = em28xx_isoc_dvb_max_packetsize(dev);
+ if (max_dvb_packet_size < 0)
+ return max_dvb_packet_size;
+ dprintk(1, "Using %d buffers each with %d bytes\n",
+ EM28XX_DVB_NUM_BUFS,
+ max_dvb_packet_size);
return em28xx_init_isoc(dev, EM28XX_DVB_MAX_PACKETS,
EM28XX_DVB_NUM_BUFS, max_dvb_packet_size,
@@ -295,6 +307,79 @@ static struct drxd_config em28xx_drxd = {
.disable_i2c_gate_ctrl = 1,
};
+struct drxk_config terratec_h5_drxk = {
+ .adr = 0x29,
+ .single_master = 1,
+ .no_i2c_bridge = 1,
+ .microcode_name = "dvb-usb-terratec-h5-drxk.fw",
+};
+
+static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct em28xx_dvb *dvb = fe->sec_priv;
+ int status;
+
+ if (!dvb)
+ return -EINVAL;
+
+ if (enable) {
+ down(&dvb->pll_mutex);
+ status = dvb->gate_ctrl(fe, 1);
+ } else {
+ status = dvb->gate_ctrl(fe, 0);
+ up(&dvb->pll_mutex);
+ }
+ return status;
+}
+
+static void terratec_h5_init(struct em28xx *dev)
+{
+ int i;
+ struct em28xx_reg_seq terratec_h5_init[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ {EM2874_R80_GPIO, 0xf6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xf2, 0xff, 50},
+ {EM2874_R80_GPIO, 0xf6, 0xff, 100},
+ { -1, -1, -1, -1},
+ };
+ struct em28xx_reg_seq terratec_h5_end[] = {
+ {EM2874_R80_GPIO, 0xe6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xa6, 0xff, 50},
+ {EM2874_R80_GPIO, 0xe6, 0xff, 100},
+ { -1, -1, -1, -1},
+ };
+ struct {
+ unsigned char r[4];
+ int len;
+ } regs[] = {
+ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
+ {{ 0x01, 0x02 }, 2},
+ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
+ {{ 0x01, 0x00, 0x03, 0xa0 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0x73, 0xaf }, 4},
+ {{ 0x04, 0x00 }, 2},
+ {{ 0x00, 0x04 }, 2},
+ {{ 0x00, 0x04, 0x00, 0x0a }, 4},
+ {{ 0x04, 0x14 }, 2},
+ {{ 0x04, 0x14, 0x00, 0x00 }, 4},
+ };
+
+ em28xx_gpio_set(dev, terratec_h5_init);
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
+ msleep(10);
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x45);
+ msleep(10);
+
+ dev->i2c_client.addr = 0x82 >> 1;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++)
+ i2c_master_send(&dev->i2c_client, regs[i].r, regs[i].len);
+ em28xx_gpio_set(dev, terratec_h5_end);
+};
+
static int mt352_terratec_xs_init(struct dvb_frontend *fe)
{
/* Values extracted from a USB trace of the Terratec Windows driver */
@@ -516,7 +601,7 @@ static void unregister_dvb(struct em28xx_dvb *dvb)
if (dvb->fe[1])
dvb_unregister_frontend(dvb->fe[1]);
dvb_unregister_frontend(dvb->fe[0]);
- if (dvb->fe[1])
+ if (dvb->fe[1] && !dvb->dont_attach_fe1)
dvb_frontend_detach(dvb->fe[1]);
dvb_frontend_detach(dvb->fe[0]);
dvb_unregister_adapter(&dvb->adapter);
@@ -546,7 +631,7 @@ static int dvb_init(struct em28xx *dev)
em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
/* init frontend */
switch (dev->model) {
- case EM2874_LEADERSHIP_ISDBT:
+ case EM2874_BOARD_LEADERSHIP_ISDBT:
dvb->fe[0] = dvb_attach(s921_attach,
&sharp_isdbt, &dev->i2c_adap);
@@ -689,6 +774,41 @@ static int dvb_init(struct em28xx *dev)
}
}
break;
+ case EM2884_BOARD_TERRATEC_H5:
+ terratec_h5_init(dev);
+
+ dvb->dont_attach_fe1 = 1;
+
+ dvb->fe[0] = dvb_attach(drxk_attach, &terratec_h5_drxk, &dev->i2c_adap, &dvb->fe[1]);
+ if (!dvb->fe[0]) {
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* FIXME: do we need a pll semaphore? */
+ dvb->fe[0]->sec_priv = dvb;
+ sema_init(&dvb->pll_mutex, 1);
+ dvb->gate_ctrl = dvb->fe[0]->ops.i2c_gate_ctrl;
+ dvb->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+ dvb->fe[1]->id = 1;
+
+ /* Attach tda18271 to DVB-C frontend */
+ if (dvb->fe[0]->ops.i2c_gate_ctrl)
+ dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 1);
+ if (!dvb_attach(tda18271c2dd_attach, dvb->fe[0], &dev->i2c_adap, 0x60)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ if (dvb->fe[0]->ops.i2c_gate_ctrl)
+ dvb->fe[0]->ops.i2c_gate_ctrl(dvb->fe[0], 0);
+
+ /* Hack - needed by drxk/tda18271c2dd */
+ dvb->fe[1]->tuner_priv = dvb->fe[0]->tuner_priv;
+ memcpy(&dvb->fe[1]->ops.tuner_ops,
+ &dvb->fe[0]->ops.tuner_ops,
+ sizeof(dvb->fe[0]->ops.tuner_ops));
+
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 4739fc7e6eb..36f5a9bc8b7 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -181,16 +181,25 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, unsigned char addr,
/*
* em28xx_i2c_send_bytes()
- * untested for more than 4 bytes
*/
static int em28xx_i2c_send_bytes(void *data, unsigned char addr, char *buf,
short len, int stop)
{
int wrcount = 0;
struct em28xx *dev = (struct em28xx *)data;
+ int write_timeout, ret;
wrcount = dev->em28xx_write_regs_req(dev, stop ? 2 : 3, addr, buf, len);
+ /* Seems to be required after a write */
+ for (write_timeout = EM2800_I2C_WRITE_TIMEOUT; write_timeout > 0;
+ write_timeout -= 5) {
+ ret = dev->em28xx_read_reg(dev, 0x05);
+ if (!ret)
+ break;
+ msleep(5);
+ }
+
return wrcount;
}
@@ -218,9 +227,7 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, unsigned char addr,
*/
static int em28xx_i2c_check_for_device(struct em28xx *dev, unsigned char addr)
{
- char msg;
int ret;
- msg = addr;
ret = dev->em28xx_read_reg_req(dev, 2, addr);
if (ret < 0) {
@@ -332,7 +339,9 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned char *eedata, int len)
struct em28xx_eeprom *em_eeprom = (void *)eedata;
int i, err, size = len, block;
- if (dev->chip_id == CHIP_ID_EM2874 || dev->chip_id == CHIP_ID_EM28174) {
+ if (dev->chip_id == CHIP_ID_EM2874 ||
+ dev->chip_id == CHIP_ID_EM28174 ||
+ dev->chip_id == CHIP_ID_EM2884) {
/* Empia switched to a 16-bit addressable eeprom in newer
devices. While we could certainly write a routine to read
the eeprom, there is nothing of use in there that cannot be
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index ba1ba8648c8..5d12b14282e 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -372,6 +372,7 @@ int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
ir->get_key = default_polling_getkey;
break;
case CHIP_ID_EM2874:
+ case CHIP_ID_EM28174:
ir->get_key = em2874_polling_getkey;
em28xx_write_regs(dev, EM2874_R50_IR_CONFIG, &ir_config, 1);
break;
diff --git a/drivers/media/video/em28xx/em28xx-reg.h b/drivers/media/video/em28xx/em28xx-reg.h
index e92a28ede43..66f792361b9 100644
--- a/drivers/media/video/em28xx/em28xx-reg.h
+++ b/drivers/media/video/em28xx/em28xx-reg.h
@@ -201,6 +201,7 @@ enum em28xx_chip_id {
CHIP_ID_EM2870 = 35,
CHIP_ID_EM2883 = 36,
CHIP_ID_EM2874 = 65,
+ CHIP_ID_EM2884 = 68,
CHIP_ID_EM28174 = 113,
};
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index 7b6461d2d1f..d176dc0394e 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -32,7 +32,6 @@
#include <linux/bitmap.h>
#include <linux/usb.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -50,7 +49,8 @@
"Sascha Sommer <saschasommer@freenet.de>"
#define DRIVER_DESC "Empia em28xx based USB video device driver"
-#define EM28XX_VERSION_CODE KERNEL_VERSION(0, 1, 2)
+
+#define EM28XX_VERSION "0.1.3"
#define em28xx_videodbg(fmt, arg...) do {\
if (video_debug) \
@@ -72,6 +72,7 @@ do {\
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(EM28XX_VERSION);
static unsigned int video_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
static unsigned int vbi_nr[] = {[0 ... (EM28XX_MAXBOARDS - 1)] = UNSET };
@@ -1757,8 +1758,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = EM28XX_VERSION_CODE;
-
cap->capabilities =
V4L2_CAP_SLICED_VBI_CAPTURE |
V4L2_CAP_VIDEO_CAPTURE |
@@ -1976,7 +1975,6 @@ static int radio_querycap(struct file *file, void *priv,
strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = EM28XX_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
}
@@ -2450,10 +2448,8 @@ int em28xx_register_analog_devices(struct em28xx *dev)
u8 val;
int ret;
- printk(KERN_INFO "%s: v4l2 driver version %d.%d.%d\n",
- dev->name,
- (EM28XX_VERSION_CODE >> 16) & 0xff,
- (EM28XX_VERSION_CODE >> 8) & 0xff, EM28XX_VERSION_CODE & 0xff);
+ printk(KERN_INFO "%s: v4l2 driver version %s\n",
+ dev->name, EM28XX_VERSION);
/* set default norm */
dev->norm = em28xx_video_template.current_norm;
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 3cca3312245..d80658bf3da 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -117,9 +117,9 @@
#define EM2800_BOARD_VC211A 74
#define EM2882_BOARD_DIKOM_DK300 75
#define EM2870_BOARD_KWORLD_A340 76
-#define EM2874_LEADERSHIP_ISDBT 77
+#define EM2874_BOARD_LEADERSHIP_ISDBT 77
#define EM28174_BOARD_PCTV_290E 78
-
+#define EM2884_BOARD_TERRATEC_H5 79
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -487,6 +487,8 @@ struct em28xx {
int devno; /* marks the number of this device */
enum em28xx_chip_id chip_id;
+ int audio_ifnum;
+
struct v4l2_device v4l2_dev;
struct em28xx_board board;
@@ -503,6 +505,7 @@ struct em28xx {
unsigned int has_audio_class:1;
unsigned int has_alsa_audio:1;
+ unsigned int is_audio_only:1;
/* Controls audio streaming */
struct work_struct wq_trigger; /* Trigger to start/stop audio for alsa module */
@@ -697,6 +700,9 @@ int em28xx_tuner_callback(void *ptr, int component, int command, int arg);
void em28xx_release_resources(struct em28xx *dev);
/* Provided by em28xx-input.c */
+
+#ifdef CONFIG_VIDEO_EM28XX_RC
+
int em28xx_get_key_terratec(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
int em28xx_get_key_em_haup(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw);
int em28xx_get_key_pinnacle_usb_grey(struct IR_i2c *ir, u32 *ir_key,
@@ -709,6 +715,20 @@ void em28xx_deregister_snapshot_button(struct em28xx *dev);
int em28xx_ir_init(struct em28xx *dev);
int em28xx_ir_fini(struct em28xx *dev);
+#else
+
+#define em28xx_get_key_terratec NULL
+#define em28xx_get_key_em_haup NULL
+#define em28xx_get_key_pinnacle_usb_grey NULL
+#define em28xx_get_key_winfast_usbii_deluxe NULL
+
+static inline void em28xx_register_snapshot_button(struct em28xx *dev) {}
+static inline void em28xx_deregister_snapshot_button(struct em28xx *dev) {}
+static inline int em28xx_ir_init(struct em28xx *dev) { return 0; }
+static inline int em28xx_ir_fini(struct em28xx *dev) { return 0; }
+
+#endif
+
/* Provided by em28xx-vbi.c */
extern struct videobuf_queue_ops em28xx_vbi_qops;
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
index bf66189cb26..14bb907d650 100644
--- a/drivers/media/video/et61x251/et61x251.h
+++ b/drivers/media/video/et61x251/et61x251.h
@@ -21,7 +21,6 @@
#ifndef _ET61X251_H_
#define _ET61X251_H_
-#include <linux/version.h>
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index a982750dcef..9a1e80a1e14 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
***************************************************************************/
+#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -48,8 +49,7 @@
#define ET61X251_MODULE_AUTHOR "(C) 2006-2007 Luca Risolia"
#define ET61X251_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
#define ET61X251_MODULE_LICENSE "GPL"
-#define ET61X251_MODULE_VERSION "1:1.09"
-#define ET61X251_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 9)
+#define ET61X251_MODULE_VERSION "1.1.10"
/*****************************************************************************/
@@ -1579,7 +1579,7 @@ et61x251_vidioc_querycap(struct et61x251_device* cam, void __user * arg)
{
struct v4l2_capability cap = {
.driver = "et61x251",
- .version = ET61X251_MODULE_VERSION_CODE,
+ .version = LINUX_VERSION_CODE,
.capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING,
};
@@ -2480,16 +2480,8 @@ static long et61x251_ioctl_v4l2(struct file *filp,
case VIDIOC_S_PARM:
return et61x251_vidioc_s_parm(cam, arg);
- case VIDIOC_G_STD:
- case VIDIOC_S_STD:
- case VIDIOC_QUERYSTD:
- case VIDIOC_ENUMSTD:
- case VIDIOC_QUERYMENU:
- case VIDIOC_ENUM_FRAMEINTERVALS:
- return -EINVAL;
-
default:
- return -EINVAL;
+ return -ENOTTY;
}
}
diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c
index 908d7012c3f..27cb197d0bd 100644
--- a/drivers/media/video/fsl-viu.c
+++ b/drivers/media/video/fsl-viu.c
@@ -23,19 +23,13 @@
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf-dma-contig.h>
#define DRV_NAME "fsl_viu"
-#define VIU_MAJOR_VERSION 0
-#define VIU_MINOR_VERSION 5
-#define VIU_RELEASE 0
-#define VIU_VERSION KERNEL_VERSION(VIU_MAJOR_VERSION, \
- VIU_MINOR_VERSION, \
- VIU_RELEASE)
+#define VIU_VERSION "0.5.1"
#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
@@ -610,7 +604,6 @@ static int vidioc_querycap(struct file *file, void *priv,
{
strcpy(cap->driver, "viu");
strcpy(cap->card, "viu");
- cap->version = VIU_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_OVERLAY |
@@ -1684,3 +1677,4 @@ module_exit(viu_exit);
MODULE_DESCRIPTION("Freescale Video-In(VIU)");
MODULE_AUTHOR("Hongjun Chen");
MODULE_LICENSE("GPL");
+MODULE_VERSION(VIU_VERSION);
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 34ae2c29979..43d9a20caeb 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -179,6 +179,16 @@ config USB_GSPCA_PAC7311
To compile this driver as a module, choose M here: the
module will be called gspca_pac7311.
+config USB_GSPCA_SE401
+ tristate "SE401 USB Camera Driver"
+ depends on VIDEO_V4L2 && USB_GSPCA
+ help
+ Say Y here if you want support for cameras based on the
+ Endpoints (formerly known as AOX) se401 chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_se401.
+
config USB_GSPCA_SN9C2028
tristate "SONIX Dual-Mode USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/video/gspca/Makefile b/drivers/media/video/gspca/Makefile
index 802fbe1bff4..d6364a86333 100644
--- a/drivers/media/video/gspca/Makefile
+++ b/drivers/media/video/gspca/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_USB_GSPCA_OV534_9) += gspca_ov534_9.o
obj-$(CONFIG_USB_GSPCA_PAC207) += gspca_pac207.o
obj-$(CONFIG_USB_GSPCA_PAC7302) += gspca_pac7302.o
obj-$(CONFIG_USB_GSPCA_PAC7311) += gspca_pac7311.o
+obj-$(CONFIG_USB_GSPCA_SE401) += gspca_se401.o
obj-$(CONFIG_USB_GSPCA_SN9C2028) += gspca_sn9c2028.o
obj-$(CONFIG_USB_GSPCA_SN9C20X) += gspca_sn9c20x.o
obj-$(CONFIG_USB_GSPCA_SONIXB) += gspca_sonixb.o
@@ -58,6 +59,7 @@ gspca_ov534_9-objs := ov534_9.o
gspca_pac207-objs := pac207.o
gspca_pac7302-objs := pac7302.o
gspca_pac7311-objs := pac7311.o
+gspca_se401-objs := se401.o
gspca_sn9c2028-objs := sn9c2028.o
gspca_sn9c20x-objs := sn9c20x.o
gspca_sonixb-objs := sonixb.o
diff --git a/drivers/media/video/gspca/gl860/gl860.h b/drivers/media/video/gspca/gl860/gl860.h
index 49ad4acbf60..0330a0293b9 100644
--- a/drivers/media/video/gspca/gl860/gl860.h
+++ b/drivers/media/video/gspca/gl860/gl860.h
@@ -18,7 +18,6 @@
*/
#ifndef GL860_DEV_H
#define GL860_DEV_H
-#include <linux/version.h>
#include "gspca.h"
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 08ce9948d99..5da4879f47f 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -24,7 +24,6 @@
#define MODULE_NAME "gspca"
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
@@ -51,11 +50,12 @@
#error "DEF_NURBS too big"
#endif
+#define DRIVER_VERSION_NUMBER "2.13.0"
+
MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA USB Camera Driver");
MODULE_LICENSE("GPL");
-
-#define DRIVER_VERSION_NUMBER KERNEL_VERSION(2, 13, 0)
+MODULE_VERSION(DRIVER_VERSION_NUMBER);
#ifdef GSPCA_DEBUG
int gspca_debug = D_ERR | D_PROBE;
@@ -443,8 +443,11 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
} else {
switch (gspca_dev->last_packet_type) {
case DISCARD_PACKET:
- if (packet_type == LAST_PACKET)
+ if (packet_type == LAST_PACKET) {
gspca_dev->last_packet_type = packet_type;
+ gspca_dev->image = NULL;
+ gspca_dev->image_len = 0;
+ }
return;
case LAST_PACKET:
return;
@@ -1278,10 +1281,10 @@ static int vidioc_querycap(struct file *file, void *priv,
ret = -ENODEV;
goto out;
}
- strncpy((char *) cap->driver, gspca_dev->sd_desc->name,
+ strlcpy((char *) cap->driver, gspca_dev->sd_desc->name,
sizeof cap->driver);
if (gspca_dev->dev->product != NULL) {
- strncpy((char *) cap->card, gspca_dev->dev->product,
+ strlcpy((char *) cap->card, gspca_dev->dev->product,
sizeof cap->card);
} else {
snprintf((char *) cap->card, sizeof cap->card,
@@ -1291,7 +1294,6 @@ static int vidioc_querycap(struct file *file, void *priv,
}
usb_make_path(gspca_dev->dev, (char *) cap->bus_info,
sizeof(cap->bus_info));
- cap->version = DRIVER_VERSION_NUMBER;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
| V4L2_CAP_STREAMING
| V4L2_CAP_READWRITE;
@@ -1460,7 +1462,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
return -EINVAL;
input->type = V4L2_INPUT_TYPE_CAMERA;
input->status = gspca_dev->cam.input_flags;
- strncpy(input->name, gspca_dev->sd_desc->name,
+ strlcpy(input->name, gspca_dev->sd_desc->name,
sizeof input->name);
return 0;
}
@@ -2478,10 +2480,7 @@ EXPORT_SYMBOL(gspca_auto_gain_n_exposure);
/* -- module insert / remove -- */
static int __init gspca_init(void)
{
- info("v%d.%d.%d registered",
- (DRIVER_VERSION_NUMBER >> 16) & 0xff,
- (DRIVER_VERSION_NUMBER >> 8) & 0xff,
- DRIVER_VERSION_NUMBER & 0xff);
+ info("v" DRIVER_VERSION_NUMBER " registered");
return 0;
}
static void __exit gspca_exit(void)
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 057e287b915..0800433b209 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -134,6 +134,7 @@ enum sensors {
SEN_OV7670,
SEN_OV76BE,
SEN_OV8610,
+ SEN_OV9600,
};
/* Note this is a bit of a hack, but the w9968cf driver needs the code for all
@@ -340,6 +341,10 @@ static const unsigned ctrl_dis[] = {
(1 << EXPOSURE) |
(1 << AUTOGAIN) |
(1 << FREQ),
+[SEN_OV9600] = ((1 << NCTRL) - 1) /* no control */
+ ^ ((1 << EXPOSURE) /* but exposure */
+ | (1 << AUTOGAIN)), /* and autogain */
+
};
static const struct v4l2_pix_format ov519_vga_mode[] = {
@@ -525,6 +530,17 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
};
+static const struct v4l2_pix_format ovfx2_ov9600_mode[] = {
+ {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .priv = 1},
+ {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 1280,
+ .sizeimage = 1280 * 1024,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
/* Registers common to OV511 / OV518 */
#define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */
@@ -1807,6 +1823,22 @@ static const struct ov_i2c_regvals norm_7660[] = {
| OV7670_COM8_AEC},
{0xa1, 0xc8}
};
+static const struct ov_i2c_regvals norm_9600[] = {
+ {0x12, 0x80},
+ {0x0c, 0x28},
+ {0x11, 0x80},
+ {0x13, 0xb5},
+ {0x14, 0x3e},
+ {0x1b, 0x04},
+ {0x24, 0xb0},
+ {0x25, 0x90},
+ {0x26, 0x94},
+ {0x35, 0x90},
+ {0x37, 0x07},
+ {0x38, 0x08},
+ {0x01, 0x8e},
+ {0x02, 0x85}
+};
/* 7670. Defaults taken from OmniVision provided data,
* as provided by Jonathan Corbet of OLPC */
@@ -2400,9 +2432,12 @@ static int ov518_i2c_r(struct sd *sd, u8 reg)
/* Initiate 2-byte write cycle */
reg_w(sd, R518_I2C_CTL, 0x03);
+ reg_r8(sd, R518_I2C_CTL);
/* Initiate 2-byte read cycle */
reg_w(sd, R518_I2C_CTL, 0x05);
+ reg_r8(sd, R518_I2C_CTL);
+
value = reg_r(sd, R51x_I2C_DATA);
PDEBUG(D_USBI, "ov518_i2c_r %02x %02x", reg, value);
return value;
@@ -2686,7 +2721,7 @@ static void write_i2c_regvals(struct sd *sd,
*
***************************************************************************/
-/* This initializes the OV2x10 / OV3610 / OV3620 */
+/* This initializes the OV2x10 / OV3610 / OV3620 / OV9600 */
static void ov_hires_configure(struct sd *sd)
{
int high, low;
@@ -2702,19 +2737,32 @@ static void ov_hires_configure(struct sd *sd)
high = i2c_r(sd, 0x0a);
low = i2c_r(sd, 0x0b);
/* info("%x, %x", high, low); */
- if (high == 0x96 && low == 0x40) {
- PDEBUG(D_PROBE, "Sensor is an OV2610");
- sd->sensor = SEN_OV2610;
- } else if (high == 0x96 && low == 0x41) {
- PDEBUG(D_PROBE, "Sensor is an OV2610AE");
- sd->sensor = SEN_OV2610AE;
- } else if (high == 0x36 && (low & 0x0f) == 0x00) {
- PDEBUG(D_PROBE, "Sensor is an OV3610");
- sd->sensor = SEN_OV3610;
- } else {
- err("Error unknown sensor type: %02x%02x",
- high, low);
+ switch (high) {
+ case 0x96:
+ switch (low) {
+ case 0x40:
+ PDEBUG(D_PROBE, "Sensor is a OV2610");
+ sd->sensor = SEN_OV2610;
+ return;
+ case 0x41:
+ PDEBUG(D_PROBE, "Sensor is a OV2610AE");
+ sd->sensor = SEN_OV2610AE;
+ return;
+ case 0xb1:
+ PDEBUG(D_PROBE, "Sensor is a OV9600");
+ sd->sensor = SEN_OV9600;
+ return;
+ }
+ break;
+ case 0x36:
+ if ((low & 0x0f) == 0x00) {
+ PDEBUG(D_PROBE, "Sensor is a OV3610");
+ sd->sensor = SEN_OV3610;
+ return;
+ }
+ break;
}
+ err("Error unknown sensor type: %02x%02x", high, low);
}
/* This initializes the OV8110, OV8610 sensor. The OV8110 uses
@@ -3400,6 +3448,10 @@ static int sd_init(struct gspca_dev *gspca_dev)
cam->cam_mode = ovfx2_ov3610_mode;
cam->nmodes = ARRAY_SIZE(ovfx2_ov3610_mode);
break;
+ case SEN_OV9600:
+ cam->cam_mode = ovfx2_ov9600_mode;
+ cam->nmodes = ARRAY_SIZE(ovfx2_ov9600_mode);
+ break;
default:
if (sd->sif) {
cam->cam_mode = ov519_sif_mode;
@@ -3497,6 +3549,12 @@ static int sd_init(struct gspca_dev *gspca_dev)
case SEN_OV8610:
write_i2c_regvals(sd, norm_8610, ARRAY_SIZE(norm_8610));
break;
+ case SEN_OV9600:
+ write_i2c_regvals(sd, norm_9600, ARRAY_SIZE(norm_9600));
+
+ /* enable autoexpo */
+/* i2c_w_mask(sd, 0x13, 0x05, 0x05); */
+ break;
}
return gspca_dev->usb_err;
error:
@@ -4085,6 +4143,33 @@ static void mode_init_ov_sensor_regs(struct sd *sd)
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
break;
+ case SEN_OV9600: {
+ const struct ov_i2c_regvals *vals;
+ static const struct ov_i2c_regvals sxga_15[] = {
+ {0x11, 0x80}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75}
+ };
+ static const struct ov_i2c_regvals sxga_7_5[] = {
+ {0x11, 0x81}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75}
+ };
+ static const struct ov_i2c_regvals vga_30[] = {
+ {0x11, 0x81}, {0x14, 0x7e}, {0x24, 0x70}, {0x25, 0x60}
+ };
+ static const struct ov_i2c_regvals vga_15[] = {
+ {0x11, 0x83}, {0x14, 0x3e}, {0x24, 0x80}, {0x25, 0x70}
+ };
+
+ /* frame rates:
+ * 15fps / 7.5 fps for 1280x1024
+ * 30fps / 15fps for 640x480
+ */
+ i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0x40);
+ if (qvga)
+ vals = sd->frame_rate < 30 ? vga_15 : vga_30;
+ else
+ vals = sd->frame_rate < 15 ? sxga_7_5 : sxga_15;
+ write_i2c_regvals(sd, vals, ARRAY_SIZE(sxga_15));
+ return;
+ }
default:
return;
}
@@ -4120,6 +4205,7 @@ static void set_ov_sensor_window(struct sd *sd)
case SEN_OV2610AE:
case SEN_OV3610:
case SEN_OV7670:
+ case SEN_OV9600:
mode_init_ov_sensor_regs(sd);
return;
case SEN_OV7660:
@@ -4920,7 +5006,8 @@ static const struct sd_desc sd_desc = {
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
{USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
- {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
+ {USB_DEVICE(0x041e, 0x405f),
+ .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4064),
diff --git a/drivers/media/video/gspca/se401.c b/drivers/media/video/gspca/se401.c
new file mode 100644
index 00000000000..4c283c24c75
--- /dev/null
+++ b/drivers/media/video/gspca/se401.c
@@ -0,0 +1,774 @@
+/*
+ * GSPCA Endpoints (formerly known as AOX) se401 USB Camera sub Driver
+ *
+ * Copyright (C) 2011 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on the v4l1 se401 driver which is:
+ *
+ * Copyright (c) 2000 Jeroen B. Vreeken (pe1rxq@amsat.org)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define MODULE_NAME "se401"
+
+#define BULK_SIZE 4096
+#define PACKET_SIZE 1024
+#define READ_REQ_SIZE 64
+#define MAX_MODES ((READ_REQ_SIZE - 6) / 4)
+/* The se401 compression algorithm uses a fixed quant factor, which
+ can be configured by setting the high nibble of the SE401_OPERATINGMODE
+ feature. This needs to exactly match what is in libv4l! */
+#define SE401_QUANT_FACT 8
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include "gspca.h"
+#include "se401.h"
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Endpoints se401");
+MODULE_LICENSE("GPL");
+
+/* controls */
+enum e_ctrl {
+ BRIGHTNESS,
+ GAIN,
+ EXPOSURE,
+ FREQ,
+ NCTRL /* number of controls */
+};
+
+/* exposure change state machine states */
+enum {
+ EXPO_CHANGED,
+ EXPO_DROP_FRAME,
+ EXPO_NO_CHANGE,
+};
+
+/* specific webcam descriptor */
+struct sd {
+ struct gspca_dev gspca_dev; /* !! must be the first item */
+ struct gspca_ctrl ctrls[NCTRL];
+ struct v4l2_pix_format fmts[MAX_MODES];
+ int pixels_read;
+ int packet_read;
+ u8 packet[PACKET_SIZE];
+ u8 restart_stream;
+ u8 button_state;
+ u8 resetlevel;
+ u8 resetlevel_frame_count;
+ int resetlevel_adjust_dir;
+ int expo_change_state;
+};
+
+static void setbrightness(struct gspca_dev *gspca_dev);
+static void setgain(struct gspca_dev *gspca_dev);
+static void setexposure(struct gspca_dev *gspca_dev);
+
+static const struct ctrl sd_ctrls[NCTRL] = {
+[BRIGHTNESS] = {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 15,
+ },
+ .set_control = setbrightness
+ },
+[GAIN] = {
+ {
+ .id = V4L2_CID_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Gain",
+ .minimum = 0,
+ .maximum = 50, /* Really 63 but > 50 is not pretty */
+ .step = 1,
+ .default_value = 25,
+ },
+ .set_control = setgain
+ },
+[EXPOSURE] = {
+ {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 32767,
+ .step = 1,
+ .default_value = 15000,
+ },
+ .set_control = setexposure
+ },
+[FREQ] = {
+ {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Light frequency filter",
+ .minimum = 0,
+ .maximum = 2,
+ .step = 1,
+ .default_value = 0,
+ },
+ .set_control = setexposure
+ },
+};
+
+static void se401_write_req(struct gspca_dev *gspca_dev, u16 req, u16 value,
+ int silent)
+{
+ int err;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+
+ err = usb_control_msg(gspca_dev->dev,
+ usb_sndctrlpipe(gspca_dev->dev, 0), req,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, 0, NULL, 0, 1000);
+ if (err < 0) {
+ if (!silent)
+ err("write req failed req %#04x val %#04x error %d",
+ req, value, err);
+ gspca_dev->usb_err = err;
+ }
+}
+
+static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
+{
+ int err;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+
+ if (USB_BUF_SZ < READ_REQ_SIZE) {
+ err("USB_BUF_SZ too small!!");
+ gspca_dev->usb_err = -ENOBUFS;
+ return;
+ }
+
+ err = usb_control_msg(gspca_dev->dev,
+ usb_rcvctrlpipe(gspca_dev->dev, 0), req,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, 0, gspca_dev->usb_buf, READ_REQ_SIZE, 1000);
+ if (err < 0) {
+ if (!silent)
+ err("read req failed req %#04x error %d", req, err);
+ gspca_dev->usb_err = err;
+ }
+}
+
+static void se401_set_feature(struct gspca_dev *gspca_dev,
+ u16 selector, u16 param)
+{
+ int err;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+
+ err = usb_control_msg(gspca_dev->dev,
+ usb_sndctrlpipe(gspca_dev->dev, 0),
+ SE401_REQ_SET_EXT_FEATURE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ param, selector, NULL, 0, 1000);
+ if (err < 0) {
+ err("set feature failed sel %#04x param %#04x error %d",
+ selector, param, err);
+ gspca_dev->usb_err = err;
+ }
+}
+
+static int se401_get_feature(struct gspca_dev *gspca_dev, u16 selector)
+{
+ int err;
+
+ if (gspca_dev->usb_err < 0)
+ return gspca_dev->usb_err;
+
+ if (USB_BUF_SZ < 2) {
+ err("USB_BUF_SZ too small!!");
+ gspca_dev->usb_err = -ENOBUFS;
+ return gspca_dev->usb_err;
+ }
+
+ err = usb_control_msg(gspca_dev->dev,
+ usb_rcvctrlpipe(gspca_dev->dev, 0),
+ SE401_REQ_GET_EXT_FEATURE,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, selector, gspca_dev->usb_buf, 2, 1000);
+ if (err < 0) {
+ err("get feature failed sel %#04x error %d", selector, err);
+ gspca_dev->usb_err = err;
+ return err;
+ }
+ return gspca_dev->usb_buf[0] | (gspca_dev->usb_buf[1] << 8);
+}
+
+static void setbrightness(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (gspca_dev->ctrl_dis & (1 << BRIGHTNESS))
+ return;
+
+ /* HDG: this does not seem to do anything on my cam */
+ se401_write_req(gspca_dev, SE401_REQ_SET_BRT,
+ sd->ctrls[BRIGHTNESS].val, 0);
+}
+
+static void setgain(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u16 gain = 63 - sd->ctrls[GAIN].val;
+
+ /* red color gain */
+ se401_set_feature(gspca_dev, HV7131_REG_ARCG, gain);
+ /* green color gain */
+ se401_set_feature(gspca_dev, HV7131_REG_AGCG, gain);
+ /* blue color gain */
+ se401_set_feature(gspca_dev, HV7131_REG_ABCG, gain);
+}
+
+static void setexposure(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int integration = sd->ctrls[EXPOSURE].val << 6;
+ u8 expose_h, expose_m, expose_l;
+
+ /* Do this before the set_feature calls, for proper timing wrt
+ the interrupt driven pkt_scan. Note we may still race but that
+ is not a big issue, the expo change state machine is merely for
+ avoiding underexposed frames getting send out, if one sneaks
+ through so be it */
+ sd->expo_change_state = EXPO_CHANGED;
+
+ if (sd->ctrls[FREQ].val == V4L2_CID_POWER_LINE_FREQUENCY_50HZ)
+ integration = integration - integration % 106667;
+ if (sd->ctrls[FREQ].val == V4L2_CID_POWER_LINE_FREQUENCY_60HZ)
+ integration = integration - integration % 88889;
+
+ expose_h = (integration >> 16);
+ expose_m = (integration >> 8);
+ expose_l = integration;
+
+ /* integration time low */
+ se401_set_feature(gspca_dev, HV7131_REG_TITL, expose_l);
+ /* integration time mid */
+ se401_set_feature(gspca_dev, HV7131_REG_TITM, expose_m);
+ /* integration time high */
+ se401_set_feature(gspca_dev, HV7131_REG_TITU, expose_h);
+}
+
+static int sd_config(struct gspca_dev *gspca_dev,
+ const struct usb_device_id *id)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct cam *cam = &gspca_dev->cam;
+ u8 *cd = gspca_dev->usb_buf;
+ int i, j, n;
+ int widths[MAX_MODES], heights[MAX_MODES];
+
+ /* Read the camera descriptor */
+ se401_read_req(gspca_dev, SE401_REQ_GET_CAMERA_DESCRIPTOR, 1);
+ if (gspca_dev->usb_err) {
+ /* Sometimes after being idle for a while the se401 won't
+ respond and needs a good kicking */
+ usb_reset_device(gspca_dev->dev);
+ gspca_dev->usb_err = 0;
+ se401_read_req(gspca_dev, SE401_REQ_GET_CAMERA_DESCRIPTOR, 0);
+ }
+
+ /* Some cameras start with their LED on */
+ se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 0, 0);
+ if (gspca_dev->usb_err)
+ return gspca_dev->usb_err;
+
+ if (cd[1] != 0x41) {
+ err("Wrong descriptor type");
+ return -ENODEV;
+ }
+
+ if (!(cd[2] & SE401_FORMAT_BAYER)) {
+ err("Bayer format not supported!");
+ return -ENODEV;
+ }
+
+ if (cd[3])
+ info("ExtraFeatures: %d", cd[3]);
+
+ n = cd[4] | (cd[5] << 8);
+ if (n > MAX_MODES) {
+ err("Too many frame sizes");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < n ; i++) {
+ widths[i] = cd[6 + i * 4 + 0] | (cd[6 + i * 4 + 1] << 8);
+ heights[i] = cd[6 + i * 4 + 2] | (cd[6 + i * 4 + 3] << 8);
+ }
+
+ for (i = 0; i < n ; i++) {
+ sd->fmts[i].width = widths[i];
+ sd->fmts[i].height = heights[i];
+ sd->fmts[i].field = V4L2_FIELD_NONE;
+ sd->fmts[i].colorspace = V4L2_COLORSPACE_SRGB;
+ sd->fmts[i].priv = 1;
+
+ /* janggu compression only works for 1/4th or 1/16th res */
+ for (j = 0; j < n; j++) {
+ if (widths[j] / 2 == widths[i] &&
+ heights[j] / 2 == heights[i]) {
+ sd->fmts[i].priv = 2;
+ break;
+ }
+ }
+ /* 1/16th if available too is better then 1/4th, because
+ we then use a larger area of the sensor */
+ for (j = 0; j < n; j++) {
+ if (widths[j] / 4 == widths[i] &&
+ heights[j] / 4 == heights[i]) {
+ sd->fmts[i].priv = 4;
+ break;
+ }
+ }
+
+ if (sd->fmts[i].priv == 1) {
+ /* Not a 1/4th or 1/16th res, use bayer */
+ sd->fmts[i].pixelformat = V4L2_PIX_FMT_SBGGR8;
+ sd->fmts[i].bytesperline = widths[i];
+ sd->fmts[i].sizeimage = widths[i] * heights[i];
+ info("Frame size: %dx%d bayer", widths[i], heights[i]);
+ } else {
+ /* Found a match use janggu compression */
+ sd->fmts[i].pixelformat = V4L2_PIX_FMT_SE401;
+ sd->fmts[i].bytesperline = 0;
+ sd->fmts[i].sizeimage = widths[i] * heights[i] * 3;
+ info("Frame size: %dx%d 1/%dth janggu",
+ widths[i], heights[i],
+ sd->fmts[i].priv * sd->fmts[i].priv);
+ }
+ }
+
+ cam->cam_mode = sd->fmts;
+ cam->nmodes = n;
+ cam->bulk = 1;
+ cam->bulk_size = BULK_SIZE;
+ cam->bulk_nurbs = 4;
+ cam->ctrls = sd->ctrls;
+ gspca_dev->nbalt = 1; /* Ignore the bogus isoc alt settings */
+ sd->resetlevel = 0x2d; /* Set initial resetlevel */
+
+ /* See if the camera supports brightness */
+ se401_read_req(gspca_dev, SE401_REQ_GET_BRT, 1);
+ if (gspca_dev->usb_err) {
+ gspca_dev->ctrl_dis = (1 << BRIGHTNESS);
+ gspca_dev->usb_err = 0;
+ }
+
+ return 0;
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ return 0;
+}
+
+/* -- start the camera -- */
+static int sd_start(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ int mult = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
+ int mode = 0;
+
+ se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 1, 1);
+ if (gspca_dev->usb_err) {
+ /* Sometimes after being idle for a while the se401 won't
+ respond and needs a good kicking */
+ usb_reset_device(gspca_dev->dev);
+ gspca_dev->usb_err = 0;
+ se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 1, 0);
+ }
+ se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 1, 0);
+
+ se401_set_feature(gspca_dev, HV7131_REG_MODE_B, 0x05);
+
+ /* set size + mode */
+ se401_write_req(gspca_dev, SE401_REQ_SET_WIDTH,
+ gspca_dev->width * mult, 0);
+ se401_write_req(gspca_dev, SE401_REQ_SET_HEIGHT,
+ gspca_dev->height * mult, 0);
+ /*
+ * HDG: disabled this as it does not seem to do anything
+ * se401_write_req(gspca_dev, SE401_REQ_SET_OUTPUT_MODE,
+ * SE401_FORMAT_BAYER, 0);
+ */
+
+ switch (mult) {
+ case 1: /* Raw bayer */
+ mode = 0x03; break;
+ case 2: /* 1/4th janggu */
+ mode = SE401_QUANT_FACT << 4; break;
+ case 4: /* 1/16th janggu */
+ mode = (SE401_QUANT_FACT << 4) | 0x02; break;
+ }
+ se401_set_feature(gspca_dev, SE401_OPERATINGMODE, mode);
+
+ setbrightness(gspca_dev);
+ setgain(gspca_dev);
+ setexposure(gspca_dev);
+ se401_set_feature(gspca_dev, HV7131_REG_ARLV, sd->resetlevel);
+
+ sd->packet_read = 0;
+ sd->pixels_read = 0;
+ sd->restart_stream = 0;
+ sd->resetlevel_frame_count = 0;
+ sd->resetlevel_adjust_dir = 0;
+ sd->expo_change_state = EXPO_NO_CHANGE;
+
+ se401_write_req(gspca_dev, SE401_REQ_START_CONTINUOUS_CAPTURE, 0, 0);
+
+ return gspca_dev->usb_err;
+}
+
+static void sd_stopN(struct gspca_dev *gspca_dev)
+{
+ se401_write_req(gspca_dev, SE401_REQ_STOP_CONTINUOUS_CAPTURE, 0, 0);
+ se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 0, 0);
+ se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 0, 0);
+}
+
+static void sd_dq_callback(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ unsigned int ahrc, alrc;
+ int oldreset, adjust_dir;
+
+ /* Restart the stream if requested do so by pkt_scan */
+ if (sd->restart_stream) {
+ sd_stopN(gspca_dev);
+ sd_start(gspca_dev);
+ sd->restart_stream = 0;
+ }
+
+ /* Automatically adjust sensor reset level
+ Hyundai have some really nice docs about this and other sensor
+ related stuff on their homepage: www.hei.co.kr */
+ sd->resetlevel_frame_count++;
+ if (sd->resetlevel_frame_count < 20)
+ return;
+
+ /* For some reason this normally read-only register doesn't get reset
+ to zero after reading them just once... */
+ se401_get_feature(gspca_dev, HV7131_REG_HIREFNOH);
+ se401_get_feature(gspca_dev, HV7131_REG_HIREFNOL);
+ se401_get_feature(gspca_dev, HV7131_REG_LOREFNOH);
+ se401_get_feature(gspca_dev, HV7131_REG_LOREFNOL);
+ ahrc = 256*se401_get_feature(gspca_dev, HV7131_REG_HIREFNOH) +
+ se401_get_feature(gspca_dev, HV7131_REG_HIREFNOL);
+ alrc = 256*se401_get_feature(gspca_dev, HV7131_REG_LOREFNOH) +
+ se401_get_feature(gspca_dev, HV7131_REG_LOREFNOL);
+
+ /* Not an exact science, but it seems to work pretty well... */
+ oldreset = sd->resetlevel;
+ if (alrc > 10) {
+ while (alrc >= 10 && sd->resetlevel < 63) {
+ sd->resetlevel++;
+ alrc /= 2;
+ }
+ } else if (ahrc > 20) {
+ while (ahrc >= 20 && sd->resetlevel > 0) {
+ sd->resetlevel--;
+ ahrc /= 2;
+ }
+ }
+ /* Detect ping-pong-ing and halve adjustment to avoid overshoot */
+ if (sd->resetlevel > oldreset)
+ adjust_dir = 1;
+ else
+ adjust_dir = -1;
+ if (sd->resetlevel_adjust_dir &&
+ sd->resetlevel_adjust_dir != adjust_dir)
+ sd->resetlevel = oldreset + (sd->resetlevel - oldreset) / 2;
+
+ if (sd->resetlevel != oldreset) {
+ sd->resetlevel_adjust_dir = adjust_dir;
+ se401_set_feature(gspca_dev, HV7131_REG_ARLV, sd->resetlevel);
+ }
+
+ sd->resetlevel_frame_count = 0;
+}
+
+static void sd_complete_frame(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ switch (sd->expo_change_state) {
+ case EXPO_CHANGED:
+ /* The exposure was changed while this frame
+ was being send, so this frame is ok */
+ sd->expo_change_state = EXPO_DROP_FRAME;
+ break;
+ case EXPO_DROP_FRAME:
+ /* The exposure was changed while this frame
+ was being captured, drop it! */
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ sd->expo_change_state = EXPO_NO_CHANGE;
+ break;
+ case EXPO_NO_CHANGE:
+ break;
+ }
+ gspca_frame_add(gspca_dev, LAST_PACKET, data, len);
+}
+
+static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ int imagesize = gspca_dev->width * gspca_dev->height;
+ int i, plen, bits, pixels, info, count;
+
+ if (sd->restart_stream)
+ return;
+
+ /* Sometimes a 1024 bytes garbage bulk packet is send between frames */
+ if (gspca_dev->last_packet_type == LAST_PACKET && len == 1024) {
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
+
+ i = 0;
+ while (i < len) {
+ /* Read header if not already be present from prev bulk pkt */
+ if (sd->packet_read < 4) {
+ count = 4 - sd->packet_read;
+ if (count > len - i)
+ count = len - i;
+ memcpy(&sd->packet[sd->packet_read], &data[i], count);
+ sd->packet_read += count;
+ i += count;
+ if (sd->packet_read < 4)
+ break;
+ }
+ bits = sd->packet[3] + (sd->packet[2] << 8);
+ pixels = sd->packet[1] + ((sd->packet[0] & 0x3f) << 8);
+ info = (sd->packet[0] & 0xc0) >> 6;
+ plen = ((bits + 47) >> 4) << 1;
+ /* Sanity checks */
+ if (plen > 1024) {
+ err("invalid packet len %d restarting stream", plen);
+ goto error;
+ }
+ if (info == 3) {
+ err("unknown frame info value restarting stream");
+ goto error;
+ }
+
+ /* Read (remainder of) packet contents */
+ count = plen - sd->packet_read;
+ if (count > len - i)
+ count = len - i;
+ memcpy(&sd->packet[sd->packet_read], &data[i], count);
+ sd->packet_read += count;
+ i += count;
+ if (sd->packet_read < plen)
+ break;
+
+ sd->pixels_read += pixels;
+ sd->packet_read = 0;
+
+ switch (info) {
+ case 0: /* Frame data */
+ gspca_frame_add(gspca_dev, INTER_PACKET, sd->packet,
+ plen);
+ break;
+ case 1: /* EOF */
+ if (sd->pixels_read != imagesize) {
+ err("frame size %d expected %d",
+ sd->pixels_read, imagesize);
+ goto error;
+ }
+ sd_complete_frame(gspca_dev, sd->packet, plen);
+ return; /* Discard the rest of the bulk packet !! */
+ case 2: /* SOF */
+ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->packet,
+ plen);
+ sd->pixels_read = pixels;
+ break;
+ }
+ }
+ return;
+
+error:
+ sd->restart_stream = 1;
+ /* Give userspace a 0 bytes frame, so our dq callback gets
+ called and it can restart the stream */
+ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
+ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
+}
+
+static void sd_pkt_scan_bayer(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ struct cam *cam = &gspca_dev->cam;
+ int imagesize = cam->cam_mode[gspca_dev->curr_mode].sizeimage;
+
+ if (gspca_dev->image_len == 0) {
+ gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
+ return;
+ }
+
+ if (gspca_dev->image_len + len >= imagesize) {
+ sd_complete_frame(gspca_dev, data, len);
+ return;
+ }
+
+ gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
+}
+
+static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ int mult = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
+
+ if (len == 0)
+ return;
+
+ if (mult == 1) /* mult == 1 means raw bayer */
+ sd_pkt_scan_bayer(gspca_dev, data, len);
+ else
+ sd_pkt_scan_janggu(gspca_dev, data, len);
+}
+
+static int sd_querymenu(struct gspca_dev *gspca_dev,
+ struct v4l2_querymenu *menu)
+{
+ switch (menu->id) {
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ switch (menu->index) {
+ case V4L2_CID_POWER_LINE_FREQUENCY_DISABLED:
+ strcpy((char *) menu->name, "NoFliker");
+ return 0;
+ case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
+ strcpy((char *) menu->name, "50 Hz");
+ return 0;
+ case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
+ strcpy((char *) menu->name, "60 Hz");
+ return 0;
+ }
+ break;
+ }
+ return -EINVAL;
+}
+
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ u8 state;
+
+ if (len != 2)
+ return -EINVAL;
+
+ switch (data[0]) {
+ case 0:
+ case 1:
+ state = data[0];
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (sd->button_state != state) {
+ input_report_key(gspca_dev->input_dev, KEY_CAMERA, state);
+ input_sync(gspca_dev->input_dev);
+ sd->button_state = state;
+ }
+
+ return 0;
+}
+#endif
+
+/* sub-driver description */
+static const struct sd_desc sd_desc = {
+ .name = MODULE_NAME,
+ .ctrls = sd_ctrls,
+ .nctrls = ARRAY_SIZE(sd_ctrls),
+ .config = sd_config,
+ .init = sd_init,
+ .start = sd_start,
+ .stopN = sd_stopN,
+ .dq_callback = sd_dq_callback,
+ .pkt_scan = sd_pkt_scan,
+ .querymenu = sd_querymenu,
+#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
+ .int_pkt_scan = sd_int_pkt_scan,
+#endif
+};
+
+/* -- module initialisation -- */
+static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x03e8, 0x0004)}, /* Endpoints/Aox SE401 */
+ {USB_DEVICE(0x0471, 0x030b)}, /* Philips PCVC665K */
+ {USB_DEVICE(0x047d, 0x5001)}, /* Kensington 67014 */
+ {USB_DEVICE(0x047d, 0x5002)}, /* Kensington 6701(5/7) */
+ {USB_DEVICE(0x047d, 0x5003)}, /* Kensington 67016 */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, device_table);
+
+/* -- device connect -- */
+static int sd_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
+ THIS_MODULE);
+}
+
+static int sd_pre_reset(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static int sd_post_reset(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static struct usb_driver sd_driver = {
+ .name = MODULE_NAME,
+ .id_table = device_table,
+ .probe = sd_probe,
+ .disconnect = gspca_disconnect,
+#ifdef CONFIG_PM
+ .suspend = gspca_suspend,
+ .resume = gspca_resume,
+#endif
+ .pre_reset = sd_pre_reset,
+ .post_reset = sd_post_reset,
+};
+
+/* -- module insert / remove -- */
+static int __init sd_mod_init(void)
+{
+ return usb_register(&sd_driver);
+}
+static void __exit sd_mod_exit(void)
+{
+ usb_deregister(&sd_driver);
+}
+
+module_init(sd_mod_init);
+module_exit(sd_mod_exit);
diff --git a/drivers/media/video/gspca/se401.h b/drivers/media/video/gspca/se401.h
new file mode 100644
index 00000000000..96d8ebf3cf5
--- /dev/null
+++ b/drivers/media/video/gspca/se401.h
@@ -0,0 +1,90 @@
+/*
+ * GSPCA Endpoints (formerly known as AOX) se401 USB Camera sub Driver
+ *
+ * Copyright (C) 2011 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on the v4l1 se401 driver which is:
+ *
+ * Copyright (c) 2000 Jeroen B. Vreeken (pe1rxq@amsat.org)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#define SE401_REQ_GET_CAMERA_DESCRIPTOR 0x06
+#define SE401_REQ_START_CONTINUOUS_CAPTURE 0x41
+#define SE401_REQ_STOP_CONTINUOUS_CAPTURE 0x42
+#define SE401_REQ_CAPTURE_FRAME 0x43
+#define SE401_REQ_GET_BRT 0x44
+#define SE401_REQ_SET_BRT 0x45
+#define SE401_REQ_GET_WIDTH 0x4c
+#define SE401_REQ_SET_WIDTH 0x4d
+#define SE401_REQ_GET_HEIGHT 0x4e
+#define SE401_REQ_SET_HEIGHT 0x4f
+#define SE401_REQ_GET_OUTPUT_MODE 0x50
+#define SE401_REQ_SET_OUTPUT_MODE 0x51
+#define SE401_REQ_GET_EXT_FEATURE 0x52
+#define SE401_REQ_SET_EXT_FEATURE 0x53
+#define SE401_REQ_CAMERA_POWER 0x56
+#define SE401_REQ_LED_CONTROL 0x57
+#define SE401_REQ_BIOS 0xff
+
+#define SE401_BIOS_READ 0x07
+
+#define SE401_FORMAT_BAYER 0x40
+
+/* Hyundai hv7131b registers
+ 7121 and 7141 should be the same (haven't really checked...) */
+/* Mode registers: */
+#define HV7131_REG_MODE_A 0x00
+#define HV7131_REG_MODE_B 0x01
+#define HV7131_REG_MODE_C 0x02
+/* Frame registers: */
+#define HV7131_REG_FRSU 0x10
+#define HV7131_REG_FRSL 0x11
+#define HV7131_REG_FCSU 0x12
+#define HV7131_REG_FCSL 0x13
+#define HV7131_REG_FWHU 0x14
+#define HV7131_REG_FWHL 0x15
+#define HV7131_REG_FWWU 0x16
+#define HV7131_REG_FWWL 0x17
+/* Timing registers: */
+#define HV7131_REG_THBU 0x20
+#define HV7131_REG_THBL 0x21
+#define HV7131_REG_TVBU 0x22
+#define HV7131_REG_TVBL 0x23
+#define HV7131_REG_TITU 0x25
+#define HV7131_REG_TITM 0x26
+#define HV7131_REG_TITL 0x27
+#define HV7131_REG_TMCD 0x28
+/* Adjust Registers: */
+#define HV7131_REG_ARLV 0x30
+#define HV7131_REG_ARCG 0x31
+#define HV7131_REG_AGCG 0x32
+#define HV7131_REG_ABCG 0x33
+#define HV7131_REG_APBV 0x34
+#define HV7131_REG_ASLP 0x54
+/* Offset Registers: */
+#define HV7131_REG_OFSR 0x50
+#define HV7131_REG_OFSG 0x51
+#define HV7131_REG_OFSB 0x52
+/* REset level statistics registers: */
+#define HV7131_REG_LOREFNOH 0x57
+#define HV7131_REG_LOREFNOL 0x58
+#define HV7131_REG_HIREFNOH 0x59
+#define HV7131_REG_HIREFNOL 0x5a
+
+/* se401 registers */
+#define SE401_OPERATINGMODE 0x2000
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index b089c0d3ee9..6ec23290218 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -247,7 +247,6 @@ static const struct cmd spca504A_clicksmart420_init_data[] = {
{0x30, 0x0004, 0x000a},
{0xb0, 0x0001, 0x0000},
-
{0xa1, 0x0080, 0x0001},
{0x30, 0x0049, 0x0000},
{0x30, 0x0060, 0x0005},
@@ -256,8 +255,6 @@ static const struct cmd spca504A_clicksmart420_init_data[] = {
{0x00, 0x0000, 0x2000},
{0x00, 0x0013, 0x2301},
{0x00, 0x0003, 0x2000},
- {0x00, 0x0000, 0x2000},
-
};
/* clicksmart 420 open data ? */
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 7e762d55109..d1d733b9359 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -1387,7 +1387,7 @@ static int sd_querymenu(struct gspca_dev *gspca_dev,
return 0;
case V4L2_CID_EFFECTS:
if ((unsigned) menu->index < ARRAY_SIZE(effects_control)) {
- strncpy((char *) menu->name,
+ strlcpy((char *) menu->name,
effects_control[menu->index],
sizeof menu->name);
return 0;
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index a27d93b503a..441dacf642b 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/usb.h>
#include <linux/mutex.h>
#include <linux/i2c.h>
@@ -474,5 +474,6 @@ module_init(hdpvr_init);
module_exit(hdpvr_exit);
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.2.1");
MODULE_AUTHOR("Janne Grunau");
MODULE_DESCRIPTION("Hauppauge HD PVR driver");
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index 514aea76eaa..087f7c08cb8 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -17,7 +17,6 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/mutex.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/videodev2.h>
@@ -574,7 +573,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strcpy(cap->driver, "hdpvr");
strcpy(cap->card, "Hauppauge HD PVR");
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = HDPVR_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_AUDIO |
V4L2_CAP_READWRITE;
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index 072f23c570f..d6439db1d18 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -18,12 +18,6 @@
#include <media/v4l2-device.h>
#include <media/ir-kbd-i2c.h>
-#define HDPVR_MAJOR_VERSION 0
-#define HDPVR_MINOR_VERSION 2
-#define HDPVR_RELEASE 0
-#define HDPVR_VERSION \
- KERNEL_VERSION(HDPVR_MAJOR_VERSION, HDPVR_MINOR_VERSION, HDPVR_RELEASE)
-
#define HDPVR_MAX 8
#define HDPVR_I2C_MAX_SIZE 128
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 84bdf0f42a8..8f9cc17b518 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -36,7 +36,6 @@
* using information provided by Jiun-Kuei Jung @ AVerMedia.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
index a7f54b010a5..38f052257f4 100644
--- a/drivers/media/video/ivtv/ivtv-fileops.c
+++ b/drivers/media/video/ivtv/ivtv-fileops.c
@@ -722,8 +722,8 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
/* If there are subscribed events, then only use the new event
API instead of the old video.h based API. */
- if (!list_empty(&id->fh.events->subscribed)) {
- poll_wait(filp, &id->fh.events->wait, wait);
+ if (!list_empty(&id->fh.subscribed)) {
+ poll_wait(filp, &id->fh.wait, wait);
/* Turn off the old-style vsync events */
clear_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags);
if (v4l2_event_pending(&id->fh))
@@ -750,6 +750,7 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
+ unsigned res = 0;
/* Start a capture if there is none */
if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
@@ -769,12 +770,16 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
/* add stream's waitq to the poll list */
IVTV_DEBUG_HI_FILE("Encoder poll\n");
poll_wait(filp, &s->waitq, wait);
+ if (v4l2_event_pending(&id->fh))
+ res |= POLLPRI;
+ else
+ poll_wait(filp, &id->fh.wait, wait);
if (s->q_full.length || s->q_io.length)
- return POLLIN | POLLRDNORM;
+ return res | POLLIN | POLLRDNORM;
if (eof)
- return POLLHUP;
- return 0;
+ return res | POLLHUP;
+ return res;
}
void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
@@ -961,10 +966,6 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
return -ENOMEM;
}
v4l2_fh_init(&item->fh, s->vdev);
- if (s->type == IVTV_DEC_STREAM_TYPE_YUV ||
- s->type == IVTV_DEC_STREAM_TYPE_MPG) {
- res = v4l2_event_alloc(&item->fh, 60);
- }
if (res < 0) {
v4l2_fh_exit(&item->fh);
kfree(item);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index f9e347dae73..3e5c090af11 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -757,7 +757,6 @@ static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vc
strlcpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver));
strlcpy(vcap->card, itv->card_name, sizeof(vcap->card));
snprintf(vcap->bus_info, sizeof(vcap->bus_info), "PCI:%s", pci_name(itv->pdev));
- vcap->version = IVTV_DRIVER_VERSION; /* version */
vcap->capabilities = itv->v4l2_cap; /* capabilities */
return 0;
}
@@ -1184,14 +1183,10 @@ static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
ivtv_call_all(itv, tuner, g_tuner, vt);
- if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
+ if (vt->type == V4L2_TUNER_RADIO)
strlcpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_RADIO;
- } else {
+ else
strlcpy(vt->name, "ivtv TV Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_ANALOG_TV;
- }
-
return 0;
}
@@ -1455,11 +1450,11 @@ static int ivtv_subscribe_event(struct v4l2_fh *fh, struct v4l2_event_subscripti
switch (sub->type) {
case V4L2_EVENT_VSYNC:
case V4L2_EVENT_EOS:
- break;
+ case V4L2_EVENT_CTRL:
+ return v4l2_event_subscribe(fh, sub, 0);
default:
return -EINVAL;
}
- return v4l2_event_subscribe(fh, sub);
}
static int ivtv_log_status(struct file *file, void *fh)
diff --git a/drivers/media/video/ivtv/ivtv-version.h b/drivers/media/video/ivtv/ivtv-version.h
index b67a4048f5a..a20f346fcad 100644
--- a/drivers/media/video/ivtv/ivtv-version.h
+++ b/drivers/media/video/ivtv/ivtv-version.h
@@ -21,11 +21,6 @@
#define IVTV_VERSION_H
#define IVTV_DRIVER_NAME "ivtv"
-#define IVTV_DRIVER_VERSION_MAJOR 1
-#define IVTV_DRIVER_VERSION_MINOR 4
-#define IVTV_DRIVER_VERSION_PATCHLEVEL 2
-
-#define IVTV_VERSION __stringify(IVTV_DRIVER_VERSION_MAJOR) "." __stringify(IVTV_DRIVER_VERSION_MINOR) "." __stringify(IVTV_DRIVER_VERSION_PATCHLEVEL)
-#define IVTV_DRIVER_VERSION KERNEL_VERSION(IVTV_DRIVER_VERSION_MAJOR,IVTV_DRIVER_VERSION_MINOR,IVTV_DRIVER_VERSION_PATCHLEVEL)
+#define IVTV_VERSION "1.4.3"
#endif
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 10b55c85448..89d09a8914f 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -2,10 +2,10 @@
* Header for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -106,23 +106,23 @@ struct m5mols_capture {
* The each value according to each scenemode is recommended in the documents.
*/
struct m5mols_scenemode {
- u32 metering;
- u32 ev_bias;
- u32 wb_mode;
- u32 wb_preset;
- u32 chroma_en;
- u32 chroma_lvl;
- u32 edge_en;
- u32 edge_lvl;
- u32 af_range;
- u32 fd_mode;
- u32 mcc;
- u32 light;
- u32 flash;
- u32 tone;
- u32 iso;
- u32 capt_mode;
- u32 wdr;
+ u8 metering;
+ u8 ev_bias;
+ u8 wb_mode;
+ u8 wb_preset;
+ u8 chroma_en;
+ u8 chroma_lvl;
+ u8 edge_en;
+ u8 edge_lvl;
+ u8 af_range;
+ u8 fd_mode;
+ u8 mcc;
+ u8 light;
+ u8 flash;
+ u8 tone;
+ u8 iso;
+ u8 capt_mode;
+ u8 wdr;
};
/**
@@ -154,7 +154,6 @@ struct m5mols_version {
u8 str[VERSION_STRING_SIZE];
u8 af;
};
-#define VERSION_SIZE sizeof(struct m5mols_version)
/**
* struct m5mols_info - M-5MOLS driver data structure
@@ -216,9 +215,9 @@ struct m5mols_info {
bool lock_ae;
bool lock_awb;
u8 resolution;
- u32 interrupt;
- u32 mode;
- u32 mode_save;
+ u8 interrupt;
+ u8 mode;
+ u8 mode_save;
int (*set_power)(struct device *dev, int on);
};
@@ -256,9 +255,11 @@ struct m5mols_info {
* +-------+---+----------+-----+------+------+------+------+
* - d[0..3]: according to size1
*/
-int m5mols_read(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
+int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg_comb, u8 *val);
+int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg_comb, u16 *val);
+int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val);
-int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 value);
/*
* Mode operation of the M-5MOLS
@@ -280,12 +281,12 @@ int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
* The available executing order between each modes are as follows:
* PARAMETER <---> MONITOR <---> CAPTURE
*/
-int m5mols_mode(struct m5mols_info *info, u32 mode);
+int m5mols_mode(struct m5mols_info *info, u8 mode);
-int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg);
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg);
int m5mols_sync_controls(struct m5mols_info *info);
int m5mols_start_capture(struct m5mols_info *info);
-int m5mols_do_scenemode(struct m5mols_info *info, u32 mode);
+int m5mols_do_scenemode(struct m5mols_info *info, u8 mode);
int m5mols_lock_3a(struct m5mols_info *info, bool lock);
int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
index d71a3903b60..3248ac80571 100644
--- a/drivers/media/video/m5mols/m5mols_capture.c
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -2,10 +2,10 @@
* The Capture code for Fujitsu M-5MOLS ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,11 +18,9 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/videodev2.h>
-#include <linux/version.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
@@ -58,9 +56,9 @@ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
{
u32 num, den;
- int ret = m5mols_read(sd, addr_num, &num);
+ int ret = m5mols_read_u32(sd, addr_num, &num);
if (!ret)
- ret = m5mols_read(sd, addr_den, &den);
+ ret = m5mols_read_u32(sd, addr_den, &den);
if (ret)
return ret;
*val = den == 0 ? 0 : num / den;
@@ -99,20 +97,20 @@ static int m5mols_capture_info(struct m5mols_info *info)
if (ret)
return ret;
- ret = m5mols_read(sd, EXIF_INFO_ISO, (u32 *)&exif->iso_speed);
+ ret = m5mols_read_u16(sd, EXIF_INFO_ISO, &exif->iso_speed);
if (!ret)
- ret = m5mols_read(sd, EXIF_INFO_FLASH, (u32 *)&exif->flash);
+ ret = m5mols_read_u16(sd, EXIF_INFO_FLASH, &exif->flash);
if (!ret)
- ret = m5mols_read(sd, EXIF_INFO_SDR, (u32 *)&exif->sdr);
+ ret = m5mols_read_u16(sd, EXIF_INFO_SDR, &exif->sdr);
if (!ret)
- ret = m5mols_read(sd, EXIF_INFO_QVAL, (u32 *)&exif->qval);
+ ret = m5mols_read_u16(sd, EXIF_INFO_QVAL, &exif->qval);
if (ret)
return ret;
if (!ret)
- ret = m5mols_read(sd, CAPC_IMAGE_SIZE, &info->cap.main);
+ ret = m5mols_read_u32(sd, CAPC_IMAGE_SIZE, &info->cap.main);
if (!ret)
- ret = m5mols_read(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
+ ret = m5mols_read_u32(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
if (!ret)
info->cap.total = info->cap.main + info->cap.thumb;
@@ -122,7 +120,7 @@ static int m5mols_capture_info(struct m5mols_info *info)
int m5mols_start_capture(struct m5mols_info *info)
{
struct v4l2_subdev *sd = &info->sd;
- u32 resolution = info->resolution;
+ u8 resolution = info->resolution;
int timeout;
int ret;
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
index 817c16fec36..d135d20d09c 100644
--- a/drivers/media/video/m5mols/m5mols_controls.c
+++ b/drivers/media/video/m5mols/m5mols_controls.c
@@ -2,10 +2,10 @@
* Controls for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -130,7 +130,7 @@ static struct m5mols_scenemode m5mols_default_scenemode[] = {
*
* WARNING: The execution order is important. Do not change the order.
*/
-int m5mols_do_scenemode(struct m5mols_info *info, u32 mode)
+int m5mols_do_scenemode(struct m5mols_info *info, u8 mode)
{
struct v4l2_subdev *sd = &info->sd;
struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode];
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 76eac26e84a..fb8e4a7a9dd 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -2,10 +2,10 @@
* Driver for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,7 +18,6 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/videodev2.h>
@@ -133,13 +132,13 @@ static u32 m5mols_swap_byte(u8 *data, u8 length)
/**
* m5mols_read - I2C read function
* @reg: combination of size, category and command for the I2C packet
+ * @size: desired size of I2C packet
* @val: read value
*/
-int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
+static int m5mols_read(struct v4l2_subdev *sd, u32 size, u32 reg, u32 *val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1];
- u8 size = I2C_SIZE(reg);
u8 category = I2C_CATEGORY(reg);
u8 cmd = I2C_COMMAND(reg);
struct i2c_msg msg[2];
@@ -149,11 +148,6 @@ int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
if (!client->adapter)
return -ENODEV;
- if (size != 1 && size != 2 && size != 4) {
- v4l2_err(sd, "Wrong data size\n");
- return -EINVAL;
- }
-
msg[0].addr = client->addr;
msg[0].flags = 0;
msg[0].len = 5;
@@ -184,6 +178,52 @@ int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
return 0;
}
+int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg, u8 *val)
+{
+ u32 val_32;
+ int ret;
+
+ if (I2C_SIZE(reg) != 1) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32);
+ if (ret)
+ return ret;
+
+ *val = (u8)val_32;
+ return ret;
+}
+
+int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg, u16 *val)
+{
+ u32 val_32;
+ int ret;
+
+ if (I2C_SIZE(reg) != 2) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32);
+ if (ret)
+ return ret;
+
+ *val = (u16)val_32;
+ return ret;
+}
+
+int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg, u32 *val)
+{
+ if (I2C_SIZE(reg) != 4) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ return m5mols_read(sd, I2C_SIZE(reg), reg, val);
+}
+
/**
* m5mols_write - I2C command write function
* @reg: combination of size, category and command for the I2C packet
@@ -231,13 +271,14 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
return 0;
}
-int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 mask)
{
- u32 busy, i;
+ u8 busy;
+ int i;
int ret;
for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) {
- ret = m5mols_read(sd, I2C_REG(category, cmd, 1), &busy);
+ ret = m5mols_read_u8(sd, I2C_REG(category, cmd, 1), &busy);
if (ret < 0)
return ret;
if ((busy & mask) == mask)
@@ -252,14 +293,14 @@ int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
* Before writing desired interrupt value the INT_FACTOR register should
* be read to clear pending interrupts.
*/
-int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg)
{
struct m5mols_info *info = to_m5mols(sd);
- u32 mask = is_available_af(info) ? REG_INT_AF : 0;
- u32 dummy;
+ u8 mask = is_available_af(info) ? REG_INT_AF : 0;
+ u8 dummy;
int ret;
- ret = m5mols_read(sd, SYSTEM_INT_FACTOR, &dummy);
+ ret = m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &dummy);
if (!ret)
ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask);
return ret;
@@ -271,7 +312,7 @@ int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
* It always accompanies a little delay changing the M-5MOLS mode, so it is
* needed checking current busy status to guarantee right mode.
*/
-static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
+static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
{
int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode);
@@ -286,16 +327,16 @@ static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
* can be guaranteed only when the sensor is operating in mode which which
* a command belongs to.
*/
-int m5mols_mode(struct m5mols_info *info, u32 mode)
+int m5mols_mode(struct m5mols_info *info, u8 mode)
{
struct v4l2_subdev *sd = &info->sd;
int ret = -EINVAL;
- u32 reg;
+ u8 reg;
if (mode < REG_PARAMETER && mode > REG_CAPTURE)
return ret;
- ret = m5mols_read(sd, SYSTEM_SYSMODE, &reg);
+ ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
if ((!ret && reg == mode) || ret)
return ret;
@@ -344,41 +385,37 @@ int m5mols_mode(struct m5mols_info *info, u32 mode)
static int m5mols_get_version(struct v4l2_subdev *sd)
{
struct m5mols_info *info = to_m5mols(sd);
- union {
- struct m5mols_version ver;
- u8 bytes[VERSION_SIZE];
- } version;
- u32 *value;
- u8 cmd = CAT0_VER_CUSTOMER;
+ struct m5mols_version *ver = &info->ver;
+ u8 *str = ver->str;
+ int i;
int ret;
- do {
- value = (u32 *)&version.bytes[cmd];
- ret = m5mols_read(sd, SYSTEM_CMD(cmd), value);
- if (ret)
- return ret;
- } while (cmd++ != CAT0_VER_AWB);
+ ret = m5mols_read_u8(sd, SYSTEM_VER_CUSTOMER, &ver->customer);
+ if (!ret)
+ ret = m5mols_read_u8(sd, SYSTEM_VER_PROJECT, &ver->project);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_FIRMWARE, &ver->fw);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_HARDWARE, &ver->hw);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_PARAMETER, &ver->param);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_AWB, &ver->awb);
+ if (!ret)
+ ret = m5mols_read_u8(sd, AF_VERSION, &ver->af);
+ if (ret)
+ return ret;
- do {
- value = (u32 *)&version.bytes[cmd];
- ret = m5mols_read(sd, SYSTEM_VER_STRING, value);
+ for (i = 0; i < VERSION_STRING_SIZE; i++) {
+ ret = m5mols_read_u8(sd, SYSTEM_VER_STRING, &str[i]);
if (ret)
return ret;
- if (cmd >= VERSION_SIZE - 1)
- return -EINVAL;
- } while (version.bytes[cmd++]);
-
- value = (u32 *)&version.bytes[cmd];
- ret = m5mols_read(sd, AF_VERSION, value);
- if (ret)
- return ret;
+ }
- /* store version information swapped for being readable */
- info->ver = version.ver;
- info->ver.fw = be16_to_cpu(info->ver.fw);
- info->ver.hw = be16_to_cpu(info->ver.hw);
- info->ver.param = be16_to_cpu(info->ver.param);
- info->ver.awb = be16_to_cpu(info->ver.awb);
+ ver->fw = be16_to_cpu(ver->fw);
+ ver->hw = be16_to_cpu(ver->hw);
+ ver->param = be16_to_cpu(ver->param);
+ ver->awb = be16_to_cpu(ver->awb);
v4l2_info(sd, "Manufacturer\t[%s]\n",
is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
@@ -722,7 +759,7 @@ static int m5mols_init_controls(struct m5mols_info *info)
int ret;
/* Determine value's range & step of controls for various FW version */
- ret = m5mols_read(sd, AE_MAX_GAIN_MON, (u32 *)&max_exposure);
+ ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &max_exposure);
if (!ret)
step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
if (ret)
@@ -842,18 +879,18 @@ static void m5mols_irq_work(struct work_struct *work)
struct m5mols_info *info =
container_of(work, struct m5mols_info, work_irq);
struct v4l2_subdev *sd = &info->sd;
- u32 reg;
+ u8 reg;
int ret;
if (!is_powered(info) ||
- m5mols_read(sd, SYSTEM_INT_FACTOR, &info->interrupt))
+ m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &info->interrupt))
return;
switch (info->interrupt & REG_INT_MASK) {
case REG_INT_AF:
if (!is_available_af(info))
break;
- ret = m5mols_read(sd, AF_STATUS, &reg);
+ ret = m5mols_read_u8(sd, AF_STATUS, &reg);
v4l2_dbg(2, m5mols_debug, sd, "AF %s\n",
reg == REG_AF_FAIL ? "Failed" :
reg == REG_AF_SUCCESS ? "Success" :
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
index b83e36fc6ac..c755bd6edfe 100644
--- a/drivers/media/video/m5mols/m5mols_reg.h
+++ b/drivers/media/video/m5mols/m5mols_reg.h
@@ -2,10 +2,10 @@
* Register map for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,13 +56,24 @@
* more specific contents, see definition if file m5mols.h.
*/
#define CAT0_VER_CUSTOMER 0x00 /* customer version */
-#define CAT0_VER_AWB 0x09 /* Auto WB version */
+#define CAT0_VER_PROJECT 0x01 /* project version */
+#define CAT0_VER_FIRMWARE 0x02 /* Firmware version */
+#define CAT0_VER_HARDWARE 0x04 /* Hardware version */
+#define CAT0_VER_PARAMETER 0x06 /* Parameter version */
+#define CAT0_VER_AWB 0x08 /* Auto WB version */
#define CAT0_VER_STRING 0x0a /* string including M-5MOLS */
#define CAT0_SYSMODE 0x0b /* SYSTEM mode register */
#define CAT0_STATUS 0x0c /* SYSTEM mode status register */
#define CAT0_INT_FACTOR 0x10 /* interrupt pending register */
#define CAT0_INT_ENABLE 0x11 /* interrupt enable register */
+#define SYSTEM_VER_CUSTOMER I2C_REG(CAT_SYSTEM, CAT0_VER_CUSTOMER, 1)
+#define SYSTEM_VER_PROJECT I2C_REG(CAT_SYSTEM, CAT0_VER_PROJECT, 1)
+#define SYSTEM_VER_FIRMWARE I2C_REG(CAT_SYSTEM, CAT0_VER_FIRMWARE, 2)
+#define SYSTEM_VER_HARDWARE I2C_REG(CAT_SYSTEM, CAT0_VER_HARDWARE, 2)
+#define SYSTEM_VER_PARAMETER I2C_REG(CAT_SYSTEM, CAT0_VER_PARAMETER, 2)
+#define SYSTEM_VER_AWB I2C_REG(CAT_SYSTEM, CAT0_VER_AWB, 2)
+
#define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1)
#define REG_SYSINIT 0x00 /* SYSTEM mode */
#define REG_PARAMETER 0x01 /* PARAMETER mode */
@@ -382,8 +393,8 @@
#define REG_CAP_START_MAIN 0x01
#define REG_CAP_START_THUMB 0x03
-#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 1)
-#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 1)
+#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 4)
+#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 4)
/*
* Category F - Flash
diff --git a/drivers/media/video/marvell-ccic/Kconfig b/drivers/media/video/marvell-ccic/Kconfig
new file mode 100644
index 00000000000..bf739e3b339
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/Kconfig
@@ -0,0 +1,23 @@
+config VIDEO_CAFE_CCIC
+ tristate "Marvell 88ALP01 (Cafe) CMOS Camera Controller support"
+ depends on PCI && I2C && VIDEO_V4L2
+ select VIDEO_OV7670
+ select VIDEOBUF2_VMALLOC
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a video4linux2 driver for the Marvell 88ALP01 integrated
+ CMOS camera controller. This is the controller found on first-
+ generation OLPC systems.
+
+config VIDEO_MMP_CAMERA
+ tristate "Marvell Armada 610 integrated camera controller support"
+ depends on ARCH_MMP && I2C && VIDEO_V4L2
+ select VIDEO_OV7670
+ select I2C_GPIO
+ select VIDEOBUF2_DMA_SG
+ ---help---
+ This is a Video4Linux2 driver for the integrated camera
+ controller found on Marvell Armada 610 application
+ processors (and likely beyond). This is the controller found
+ in OLPC XO 1.75 systems.
+
diff --git a/drivers/media/video/marvell-ccic/Makefile b/drivers/media/video/marvell-ccic/Makefile
new file mode 100644
index 00000000000..05a792c579a
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_VIDEO_CAFE_CCIC) += cafe_ccic.o
+cafe_ccic-y := cafe-driver.o mcam-core.o
+
+obj-$(CONFIG_VIDEO_MMP_CAMERA) += mmp_camera.o
+mmp_camera-y := mmp-driver.o mcam-core.o
+
diff --git a/drivers/media/video/marvell-ccic/cafe-driver.c b/drivers/media/video/marvell-ccic/cafe-driver.c
new file mode 100644
index 00000000000..d030f9beae8
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/cafe-driver.c
@@ -0,0 +1,654 @@
+/*
+ * A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
+ * multifunction chip. Currently works with the Omnivision OV7670
+ * sensor.
+ *
+ * The data sheet for this device can be found at:
+ * http://www.marvell.com/products/pc_connectivity/88alp01/
+ *
+ * Copyright 2006-11 One Laptop Per Child Association, Inc.
+ * Copyright 2006-11 Jonathan Corbet <corbet@lwn.net>
+ *
+ * Written by Jonathan Corbet, corbet@lwn.net.
+ *
+ * v4l2_device/v4l2_subdev conversion by:
+ * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-chip-ident.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "mcam-core.h"
+
+#define CAFE_VERSION 0x000002
+
+
+/*
+ * Parameters.
+ */
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("Video");
+
+
+
+
+struct cafe_camera {
+ int registered; /* Fully initialized? */
+ struct mcam_camera mcam;
+ struct pci_dev *pdev;
+ wait_queue_head_t smbus_wait; /* Waiting on i2c events */
+};
+
+/*
+ * Most of the camera controller registers are defined in mcam-core.h,
+ * but the Cafe platform has some additional registers of its own;
+ * they are described here.
+ */
+
+/*
+ * "General purpose register" has a couple of GPIOs used for sensor
+ * power and reset on OLPC XO 1.0 systems.
+ */
+#define REG_GPR 0xb4
+#define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
+#define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
+#define GPR_C1 0x00000002 /* Control 1 value */
+/*
+ * Control 0 is wired to reset on OLPC machines. For ov7x sensors,
+ * it is active low.
+ */
+#define GPR_C0 0x00000001 /* Control 0 value */
+
+/*
+ * These registers control the SMBUS module for communicating
+ * with the sensor.
+ */
+#define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
+#define TWSIC0_EN 0x00000001 /* TWSI enable */
+#define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
+#define TWSIC0_SID 0x000003fc /* Slave ID */
+/*
+ * Subtle trickery: the slave ID field starts with bit 2. But the
+ * Linux i2c stack wants to treat the bottommost bit as a separate
+ * read/write bit, which is why slave ID's are usually presented
+ * >>1. For consistency with that behavior, we shift over three
+ * bits instead of two.
+ */
+#define TWSIC0_SID_SHIFT 3
+#define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
+#define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
+#define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
+
+#define REG_TWSIC1 0xbc /* TWSI control 1 */
+#define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
+#define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
+#define TWSIC1_ADDR_SHIFT 16
+#define TWSIC1_READ 0x01000000 /* Set for read op */
+#define TWSIC1_WSTAT 0x02000000 /* Write status */
+#define TWSIC1_RVALID 0x04000000 /* Read data valid */
+#define TWSIC1_ERROR 0x08000000 /* Something screwed up */
+
+/*
+ * Here's the weird global control registers
+ */
+#define REG_GL_CSR 0x3004 /* Control/status register */
+#define GCSR_SRS 0x00000001 /* SW Reset set */
+#define GCSR_SRC 0x00000002 /* SW Reset clear */
+#define GCSR_MRS 0x00000004 /* Master reset set */
+#define GCSR_MRC 0x00000008 /* HW Reset clear */
+#define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
+#define REG_GL_IMASK 0x300c /* Interrupt mask register */
+#define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
+
+#define REG_GL_FCR 0x3038 /* GPIO functional control register */
+#define GFCR_GPIO_ON 0x08 /* Camera GPIO enabled */
+#define REG_GL_GPIOR 0x315c /* GPIO register */
+#define GGPIO_OUT 0x80000 /* GPIO output */
+#define GGPIO_VAL 0x00008 /* Output pin value */
+
+#define REG_LEN (REG_GL_IMASK + 4)
+
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+ dev_err(&(cam)->pdev->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+ dev_warn(&(cam)->pdev->dev, fmt, ##arg);
+
+/* -------------------------------------------------------------------- */
+/*
+ * The I2C/SMBUS interface to the camera itself starts here. The
+ * controller handles SMBUS itself, presenting a relatively simple register
+ * interface; all we have to do is to tell it where to route the data.
+ */
+#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
+
+static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
+{
+ struct mcam_camera *m = container_of(dev, struct mcam_camera, v4l2_dev);
+ return container_of(m, struct cafe_camera, mcam);
+}
+
+
+static int cafe_smbus_write_done(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+ int c1;
+
+ /*
+ * We must delay after the interrupt, or the controller gets confused
+ * and never does give us good status. Fortunately, we don't do this
+ * often.
+ */
+ udelay(20);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ c1 = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
+}
+
+static int cafe_smbus_write_data(struct cafe_camera *cam,
+ u16 addr, u8 command, u8 value)
+{
+ unsigned int rval;
+ unsigned long flags;
+ struct mcam_camera *mcam = &cam->mcam;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
+ rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
+ /*
+ * Marvell sez set clkdiv to all 1's for now.
+ */
+ rval |= TWSIC0_CLKDIV;
+ mcam_reg_write(mcam, REG_TWSIC0, rval);
+ (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
+ rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
+ mcam_reg_write(mcam, REG_TWSIC1, rval);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ /* Unfortunately, reading TWSIC1 too soon after sending a command
+ * causes the device to die.
+ * Use a busy-wait because we often send a large quantity of small
+ * commands at-once; using msleep() would cause a lot of context
+ * switches which take longer than 2ms, resulting in a noticeable
+ * boot-time and capture-start delays.
+ */
+ mdelay(2);
+
+ /*
+ * Another sad fact is that sometimes, commands silently complete but
+ * cafe_smbus_write_done() never becomes aware of this.
+ * This happens at random and appears to possible occur with any
+ * command.
+ * We don't understand why this is. We work around this issue
+ * with the timeout in the wait below, assuming that all commands
+ * complete within the timeout.
+ */
+ wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(mcam),
+ CAFE_SMBUS_TIMEOUT);
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ if (rval & TWSIC1_WSTAT) {
+ cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
+ command, value);
+ return -EIO;
+ }
+ if (rval & TWSIC1_ERROR) {
+ cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
+ command, value);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+
+static int cafe_smbus_read_done(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+ int c1;
+
+ /*
+ * We must delay after the interrupt, or the controller gets confused
+ * and never does give us good status. Fortunately, we don't do this
+ * often.
+ */
+ udelay(20);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ c1 = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
+}
+
+
+
+static int cafe_smbus_read_data(struct cafe_camera *cam,
+ u16 addr, u8 command, u8 *value)
+{
+ unsigned int rval;
+ unsigned long flags;
+ struct mcam_camera *mcam = &cam->mcam;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
+ rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
+ /*
+ * Marvel sez set clkdiv to all 1's for now.
+ */
+ rval |= TWSIC0_CLKDIV;
+ mcam_reg_write(mcam, REG_TWSIC0, rval);
+ (void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
+ rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
+ mcam_reg_write(mcam, REG_TWSIC1, rval);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ wait_event_timeout(cam->smbus_wait,
+ cafe_smbus_read_done(mcam), CAFE_SMBUS_TIMEOUT);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ rval = mcam_reg_read(mcam, REG_TWSIC1);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+
+ if (rval & TWSIC1_ERROR) {
+ cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
+ return -EIO;
+ }
+ if (!(rval & TWSIC1_RVALID)) {
+ cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
+ command);
+ return -EIO;
+ }
+ *value = rval & 0xff;
+ return 0;
+}
+
+/*
+ * Perform a transfer over SMBUS. This thing is called under
+ * the i2c bus lock, so we shouldn't race with ourselves...
+ */
+static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char rw, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct cafe_camera *cam = i2c_get_adapdata(adapter);
+ int ret = -EINVAL;
+
+ /*
+ * This interface would appear to only do byte data ops. OK
+ * it can do word too, but the cam chip has no use for that.
+ */
+ if (size != I2C_SMBUS_BYTE_DATA) {
+ cam_err(cam, "funky xfer size %d\n", size);
+ return -EINVAL;
+ }
+
+ if (rw == I2C_SMBUS_WRITE)
+ ret = cafe_smbus_write_data(cam, addr, command, data->byte);
+ else if (rw == I2C_SMBUS_READ)
+ ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
+ return ret;
+}
+
+
+static void cafe_smbus_enable_irq(struct cafe_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->mcam.dev_lock, flags);
+ mcam_reg_set_bit(&cam->mcam, REG_IRQMASK, TWSIIRQS);
+ spin_unlock_irqrestore(&cam->mcam.dev_lock, flags);
+}
+
+static u32 cafe_smbus_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_READ_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+}
+
+static struct i2c_algorithm cafe_smbus_algo = {
+ .smbus_xfer = cafe_smbus_xfer,
+ .functionality = cafe_smbus_func
+};
+
+static int cafe_smbus_setup(struct cafe_camera *cam)
+{
+ struct i2c_adapter *adap;
+ int ret;
+
+ adap = kzalloc(sizeof(*adap), GFP_KERNEL);
+ if (adap == NULL)
+ return -ENOMEM;
+ cam->mcam.i2c_adapter = adap;
+ cafe_smbus_enable_irq(cam);
+ adap->owner = THIS_MODULE;
+ adap->algo = &cafe_smbus_algo;
+ strcpy(adap->name, "cafe_ccic");
+ adap->dev.parent = &cam->pdev->dev;
+ i2c_set_adapdata(adap, cam);
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ printk(KERN_ERR "Unable to register cafe i2c adapter\n");
+ return ret;
+}
+
+static void cafe_smbus_shutdown(struct cafe_camera *cam)
+{
+ i2c_del_adapter(cam->mcam.i2c_adapter);
+ kfree(cam->mcam.i2c_adapter);
+}
+
+
+/*
+ * Controller-level stuff
+ */
+
+static void cafe_ctlr_init(struct mcam_camera *mcam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+ /*
+ * Added magic to bring up the hardware on the B-Test board
+ */
+ mcam_reg_write(mcam, 0x3038, 0x8);
+ mcam_reg_write(mcam, 0x315c, 0x80008);
+ /*
+ * Go through the dance needed to wake the device up.
+ * Note that these registers are global and shared
+ * with the NAND and SD devices. Interaction between the
+ * three still needs to be examined.
+ */
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
+ /*
+ * Here we must wait a bit for the controller to come around.
+ */
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+ msleep(5);
+ spin_lock_irqsave(&mcam->dev_lock, flags);
+
+ mcam_reg_write(mcam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
+ mcam_reg_set_bit(mcam, REG_GL_IMASK, GIMSK_CCIC_EN);
+ /*
+ * Mask all interrupts.
+ */
+ mcam_reg_write(mcam, REG_IRQMASK, 0);
+ spin_unlock_irqrestore(&mcam->dev_lock, flags);
+}
+
+
+static void cafe_ctlr_power_up(struct mcam_camera *mcam)
+{
+ /*
+ * Part one of the sensor dance: turn the global
+ * GPIO signal on.
+ */
+ mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
+ mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
+ /*
+ * Put the sensor into operational mode (assumes OLPC-style
+ * wiring). Control 0 is reset - set to 1 to operate.
+ * Control 1 is power down, set to 0 to operate.
+ */
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
+}
+
+static void cafe_ctlr_power_down(struct mcam_camera *mcam)
+{
+ mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
+ mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
+ mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT);
+}
+
+
+
+/*
+ * The platform interrupt handler.
+ */
+static irqreturn_t cafe_irq(int irq, void *data)
+{
+ struct cafe_camera *cam = data;
+ struct mcam_camera *mcam = &cam->mcam;
+ unsigned int irqs, handled;
+
+ spin_lock(&mcam->dev_lock);
+ irqs = mcam_reg_read(mcam, REG_IRQSTAT);
+ handled = cam->registered && mccic_irq(mcam, irqs);
+ if (irqs & TWSIIRQS) {
+ mcam_reg_write(mcam, REG_IRQSTAT, TWSIIRQS);
+ wake_up(&cam->smbus_wait);
+ handled = 1;
+ }
+ spin_unlock(&mcam->dev_lock);
+ return IRQ_RETVAL(handled);
+}
+
+
+/* -------------------------------------------------------------------------- */
+/*
+ * PCI interface stuff.
+ */
+
+static int cafe_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ struct cafe_camera *cam;
+ struct mcam_camera *mcam;
+
+ /*
+ * Start putting together one of our big camera structures.
+ */
+ ret = -ENOMEM;
+ cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
+ if (cam == NULL)
+ goto out;
+ cam->pdev = pdev;
+ mcam = &cam->mcam;
+ mcam->chip_id = V4L2_IDENT_CAFE;
+ spin_lock_init(&mcam->dev_lock);
+ init_waitqueue_head(&cam->smbus_wait);
+ mcam->plat_power_up = cafe_ctlr_power_up;
+ mcam->plat_power_down = cafe_ctlr_power_down;
+ mcam->dev = &pdev->dev;
+ /*
+ * Set the clock speed for the XO 1; I don't believe this
+ * driver has ever run anywhere else.
+ */
+ mcam->clock_speed = 45;
+ mcam->use_smbus = 1;
+ /*
+ * Vmalloc mode for buffers is traditional with this driver.
+ * We *might* be able to run DMA_contig, especially on a system
+ * with CMA in it.
+ */
+ mcam->buffer_mode = B_vmalloc;
+ /*
+ * Get set up on the PCI bus.
+ */
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out_free;
+ pci_set_master(pdev);
+
+ ret = -EIO;
+ mcam->regs = pci_iomap(pdev, 0, 0);
+ if (!mcam->regs) {
+ printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
+ goto out_disable;
+ }
+ ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
+ if (ret)
+ goto out_iounmap;
+
+ /*
+ * Initialize the controller and leave it powered up. It will
+ * stay that way until the sensor driver shows up.
+ */
+ cafe_ctlr_init(mcam);
+ cafe_ctlr_power_up(mcam);
+ /*
+ * Set up I2C/SMBUS communications. We have to drop the mutex here
+ * because the sensor could attach in this call chain, leading to
+ * unsightly deadlocks.
+ */
+ ret = cafe_smbus_setup(cam);
+ if (ret)
+ goto out_pdown;
+
+ ret = mccic_register(mcam);
+ if (ret == 0) {
+ cam->registered = 1;
+ return 0;
+ }
+
+ cafe_smbus_shutdown(cam);
+out_pdown:
+ cafe_ctlr_power_down(mcam);
+ free_irq(pdev->irq, cam);
+out_iounmap:
+ pci_iounmap(pdev, mcam->regs);
+out_disable:
+ pci_disable_device(pdev);
+out_free:
+ kfree(cam);
+out:
+ return ret;
+}
+
+
+/*
+ * Shut down an initialized device
+ */
+static void cafe_shutdown(struct cafe_camera *cam)
+{
+ mccic_shutdown(&cam->mcam);
+ cafe_smbus_shutdown(cam);
+ free_irq(cam->pdev->irq, cam);
+ pci_iounmap(cam->pdev, cam->mcam.regs);
+}
+
+
+static void cafe_pci_remove(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+
+ if (cam == NULL) {
+ printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
+ return;
+ }
+ cafe_shutdown(cam);
+ kfree(cam);
+}
+
+
+#ifdef CONFIG_PM
+/*
+ * Basic power management.
+ */
+static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+ mccic_suspend(&cam->mcam);
+ pci_disable_device(pdev);
+ return 0;
+}
+
+
+static int cafe_pci_resume(struct pci_dev *pdev)
+{
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
+ struct cafe_camera *cam = to_cam(v4l2_dev);
+ int ret = 0;
+
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+
+ if (ret) {
+ cam_warn(cam, "Unable to re-enable device on resume!\n");
+ return ret;
+ }
+ cafe_ctlr_init(&cam->mcam);
+ return mccic_resume(&cam->mcam);
+}
+
+#endif /* CONFIG_PM */
+
+static struct pci_device_id cafe_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
+ PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, cafe_ids);
+
+static struct pci_driver cafe_pci_driver = {
+ .name = "cafe1000-ccic",
+ .id_table = cafe_ids,
+ .probe = cafe_pci_probe,
+ .remove = cafe_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = cafe_pci_suspend,
+ .resume = cafe_pci_resume,
+#endif
+};
+
+
+
+
+static int __init cafe_init(void)
+{
+ int ret;
+
+ printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
+ CAFE_VERSION);
+ ret = pci_register_driver(&cafe_pci_driver);
+ if (ret) {
+ printk(KERN_ERR "Unable to register cafe_ccic driver\n");
+ goto out;
+ }
+ ret = 0;
+
+out:
+ return ret;
+}
+
+
+static void __exit cafe_exit(void)
+{
+ pci_unregister_driver(&cafe_pci_driver);
+}
+
+module_init(cafe_init);
+module_exit(cafe_exit);
diff --git a/drivers/media/video/marvell-ccic/mcam-core.c b/drivers/media/video/marvell-ccic/mcam-core.c
new file mode 100644
index 00000000000..83c14514cd5
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/mcam-core.c
@@ -0,0 +1,1843 @@
+/*
+ * The Marvell camera core. This device appears in a number of settings,
+ * so it needs platform-specific support outside of the core.
+ *
+ * Copyright 2011 Jonathan Corbet corbet@lwn.net
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/ov7670.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "mcam-core.h"
+
+/*
+ * Basic frame stats - to be deleted shortly
+ */
+static int frames;
+static int singles;
+static int delivered;
+
+#ifdef MCAM_MODE_VMALLOC
+/*
+ * Internal DMA buffer management. Since the controller cannot do S/G I/O,
+ * we must have physically contiguous buffers to bring frames into.
+ * These parameters control how many buffers we use, whether we
+ * allocate them at load time (better chance of success, but nails down
+ * memory) or when somebody tries to use the camera (riskier), and,
+ * for load-time allocation, how big they should be.
+ *
+ * The controller can cycle through three buffers. We could use
+ * more by flipping pointers around, but it probably makes little
+ * sense.
+ */
+
+static int alloc_bufs_at_read;
+module_param(alloc_bufs_at_read, bool, 0444);
+MODULE_PARM_DESC(alloc_bufs_at_read,
+ "Non-zero value causes DMA buffers to be allocated when the "
+ "video capture device is read, rather than at module load "
+ "time. This saves memory, but decreases the chances of "
+ "successfully getting those buffers. This parameter is "
+ "only used in the vmalloc buffer mode");
+
+static int n_dma_bufs = 3;
+module_param(n_dma_bufs, uint, 0644);
+MODULE_PARM_DESC(n_dma_bufs,
+ "The number of DMA buffers to allocate. Can be either two "
+ "(saves memory, makes timing tighter) or three.");
+
+static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
+module_param(dma_buf_size, uint, 0444);
+MODULE_PARM_DESC(dma_buf_size,
+ "The size of the allocated DMA buffers. If actual operating "
+ "parameters require larger buffers, an attempt to reallocate "
+ "will be made.");
+#else /* MCAM_MODE_VMALLOC */
+static const int alloc_bufs_at_read = 0;
+static const int n_dma_bufs = 3; /* Used by S/G_PARM */
+#endif /* MCAM_MODE_VMALLOC */
+
+static int flip;
+module_param(flip, bool, 0444);
+MODULE_PARM_DESC(flip,
+ "If set, the sensor will be instructed to flip the image "
+ "vertically.");
+
+static int buffer_mode = -1;
+module_param(buffer_mode, int, 0444);
+MODULE_PARM_DESC(buffer_mode,
+ "Set the buffer mode to be used; default is to go with what "
+ "the platform driver asks for. Set to 0 for vmalloc, 1 for "
+ "DMA contiguous.");
+
+/*
+ * Status flags. Always manipulated with bit operations.
+ */
+#define CF_BUF0_VALID 0 /* Buffers valid - first three */
+#define CF_BUF1_VALID 1
+#define CF_BUF2_VALID 2
+#define CF_DMA_ACTIVE 3 /* A frame is incoming */
+#define CF_CONFIG_NEEDED 4 /* Must configure hardware */
+#define CF_SINGLE_BUFFER 5 /* Running with a single buffer */
+#define CF_SG_RESTART 6 /* SG restart needed */
+
+#define sensor_call(cam, o, f, args...) \
+ v4l2_subdev_call(cam->sensor, o, f, ##args)
+
+static struct mcam_format_struct {
+ __u8 *desc;
+ __u32 pixelformat;
+ int bpp; /* Bytes per pixel */
+ enum v4l2_mbus_pixelcode mbus_code;
+} mcam_formats[] = {
+ {
+ .desc = "YUYV 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ },
+ {
+ .desc = "RGB 444",
+ .pixelformat = V4L2_PIX_FMT_RGB444,
+ .mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
+ .bpp = 2,
+ },
+ {
+ .desc = "RGB 565",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
+ .bpp = 2,
+ },
+ {
+ .desc = "Raw RGB Bayer",
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
+ .bpp = 1
+ },
+};
+#define N_MCAM_FMTS ARRAY_SIZE(mcam_formats)
+
+static struct mcam_format_struct *mcam_find_format(u32 pixelformat)
+{
+ unsigned i;
+
+ for (i = 0; i < N_MCAM_FMTS; i++)
+ if (mcam_formats[i].pixelformat == pixelformat)
+ return mcam_formats + i;
+ /* Not found? Then return the first format. */
+ return mcam_formats;
+}
+
+/*
+ * The default format we use until somebody says otherwise.
+ */
+static const struct v4l2_pix_format mcam_def_pix_format = {
+ .width = VGA_WIDTH,
+ .height = VGA_HEIGHT,
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .field = V4L2_FIELD_NONE,
+ .bytesperline = VGA_WIDTH*2,
+ .sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
+};
+
+static const enum v4l2_mbus_pixelcode mcam_def_mbus_code =
+ V4L2_MBUS_FMT_YUYV8_2X8;
+
+
+/*
+ * The two-word DMA descriptor format used by the Armada 610 and like. There
+ * Is a three-word format as well (set C1_DESC_3WORD) where the third
+ * word is a pointer to the next descriptor, but we don't use it. Two-word
+ * descriptors have to be contiguous in memory.
+ */
+struct mcam_dma_desc {
+ u32 dma_addr;
+ u32 segment_len;
+};
+
+/*
+ * Our buffer type for working with videobuf2. Note that the vb2
+ * developers have decreed that struct vb2_buffer must be at the
+ * beginning of this structure.
+ */
+struct mcam_vb_buffer {
+ struct vb2_buffer vb_buf;
+ struct list_head queue;
+ struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
+ dma_addr_t dma_desc_pa; /* Descriptor physical address */
+ int dma_desc_nent; /* Number of mapped descriptors */
+};
+
+static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct mcam_vb_buffer, vb_buf);
+}
+
+/*
+ * Hand a completed buffer back to user space.
+ */
+static void mcam_buffer_done(struct mcam_camera *cam, int frame,
+ struct vb2_buffer *vbuf)
+{
+ vbuf->v4l2_buf.bytesused = cam->pix_format.sizeimage;
+ vbuf->v4l2_buf.sequence = cam->buf_seq[frame];
+ vb2_set_plane_payload(vbuf, 0, cam->pix_format.sizeimage);
+ vb2_buffer_done(vbuf, VB2_BUF_STATE_DONE);
+}
+
+
+
+/*
+ * Debugging and related.
+ */
+#define cam_err(cam, fmt, arg...) \
+ dev_err((cam)->dev, fmt, ##arg);
+#define cam_warn(cam, fmt, arg...) \
+ dev_warn((cam)->dev, fmt, ##arg);
+#define cam_dbg(cam, fmt, arg...) \
+ dev_dbg((cam)->dev, fmt, ##arg);
+
+
+/*
+ * Flag manipulation helpers
+ */
+static void mcam_reset_buffers(struct mcam_camera *cam)
+{
+ int i;
+
+ cam->next_buf = -1;
+ for (i = 0; i < cam->nbufs; i++)
+ clear_bit(i, &cam->flags);
+}
+
+static inline int mcam_needs_config(struct mcam_camera *cam)
+{
+ return test_bit(CF_CONFIG_NEEDED, &cam->flags);
+}
+
+static void mcam_set_config_needed(struct mcam_camera *cam, int needed)
+{
+ if (needed)
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ else
+ clear_bit(CF_CONFIG_NEEDED, &cam->flags);
+}
+
+/* ------------------------------------------------------------------- */
+/*
+ * Make the controller start grabbing images. Everything must
+ * be set up before doing this.
+ */
+static void mcam_ctlr_start(struct mcam_camera *cam)
+{
+ /* set_bit performs a read, so no other barrier should be
+ needed here */
+ mcam_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
+}
+
+static void mcam_ctlr_stop(struct mcam_camera *cam)
+{
+ mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
+}
+
+/* ------------------------------------------------------------------- */
+
+#ifdef MCAM_MODE_VMALLOC
+/*
+ * Code specific to the vmalloc buffer mode.
+ */
+
+/*
+ * Allocate in-kernel DMA buffers for vmalloc mode.
+ */
+static int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
+{
+ int i;
+
+ mcam_set_config_needed(cam, 1);
+ if (loadtime)
+ cam->dma_buf_size = dma_buf_size;
+ else
+ cam->dma_buf_size = cam->pix_format.sizeimage;
+ if (n_dma_bufs > 3)
+ n_dma_bufs = 3;
+
+ cam->nbufs = 0;
+ for (i = 0; i < n_dma_bufs; i++) {
+ cam->dma_bufs[i] = dma_alloc_coherent(cam->dev,
+ cam->dma_buf_size, cam->dma_handles + i,
+ GFP_KERNEL);
+ if (cam->dma_bufs[i] == NULL) {
+ cam_warn(cam, "Failed to allocate DMA buffer\n");
+ break;
+ }
+ (cam->nbufs)++;
+ }
+
+ switch (cam->nbufs) {
+ case 1:
+ dma_free_coherent(cam->dev, cam->dma_buf_size,
+ cam->dma_bufs[0], cam->dma_handles[0]);
+ cam->nbufs = 0;
+ case 0:
+ cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
+ return -ENOMEM;
+
+ case 2:
+ if (n_dma_bufs > 2)
+ cam_warn(cam, "Will limp along with only 2 buffers\n");
+ break;
+ }
+ return 0;
+}
+
+static void mcam_free_dma_bufs(struct mcam_camera *cam)
+{
+ int i;
+
+ for (i = 0; i < cam->nbufs; i++) {
+ dma_free_coherent(cam->dev, cam->dma_buf_size,
+ cam->dma_bufs[i], cam->dma_handles[i]);
+ cam->dma_bufs[i] = NULL;
+ }
+ cam->nbufs = 0;
+}
+
+
+/*
+ * Set up DMA buffers when operating in vmalloc mode
+ */
+static void mcam_ctlr_dma_vmalloc(struct mcam_camera *cam)
+{
+ /*
+ * Store the first two Y buffers (we aren't supporting
+ * planar formats for now, so no UV bufs). Then either
+ * set the third if it exists, or tell the controller
+ * to just use two.
+ */
+ mcam_reg_write(cam, REG_Y0BAR, cam->dma_handles[0]);
+ mcam_reg_write(cam, REG_Y1BAR, cam->dma_handles[1]);
+ if (cam->nbufs > 2) {
+ mcam_reg_write(cam, REG_Y2BAR, cam->dma_handles[2]);
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ } else
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ if (cam->chip_id == V4L2_IDENT_CAFE)
+ mcam_reg_write(cam, REG_UBAR, 0); /* 32 bits only */
+}
+
+/*
+ * Copy data out to user space in the vmalloc case
+ */
+static void mcam_frame_tasklet(unsigned long data)
+{
+ struct mcam_camera *cam = (struct mcam_camera *) data;
+ int i;
+ unsigned long flags;
+ struct mcam_vb_buffer *buf;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ for (i = 0; i < cam->nbufs; i++) {
+ int bufno = cam->next_buf;
+
+ if (cam->state != S_STREAMING || bufno < 0)
+ break; /* I/O got stopped */
+ if (++(cam->next_buf) >= cam->nbufs)
+ cam->next_buf = 0;
+ if (!test_bit(bufno, &cam->flags))
+ continue;
+ if (list_empty(&cam->buffers)) {
+ singles++;
+ break; /* Leave it valid, hope for better later */
+ }
+ delivered++;
+ clear_bit(bufno, &cam->flags);
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
+ queue);
+ list_del_init(&buf->queue);
+ /*
+ * Drop the lock during the big copy. This *should* be safe...
+ */
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ memcpy(vb2_plane_vaddr(&buf->vb_buf, 0), cam->dma_bufs[bufno],
+ cam->pix_format.sizeimage);
+ mcam_buffer_done(cam, bufno, &buf->vb_buf);
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ }
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+
+/*
+ * Make sure our allocated buffers are up to the task.
+ */
+static int mcam_check_dma_buffers(struct mcam_camera *cam)
+{
+ if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
+ mcam_free_dma_bufs(cam);
+ if (cam->nbufs == 0)
+ return mcam_alloc_dma_bufs(cam, 0);
+ return 0;
+}
+
+static void mcam_vmalloc_done(struct mcam_camera *cam, int frame)
+{
+ tasklet_schedule(&cam->s_tasklet);
+}
+
+#else /* MCAM_MODE_VMALLOC */
+
+static inline int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
+{
+ return 0;
+}
+
+static inline void mcam_free_dma_bufs(struct mcam_camera *cam)
+{
+ return;
+}
+
+static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
+{
+ return 0;
+}
+
+
+
+#endif /* MCAM_MODE_VMALLOC */
+
+
+#ifdef MCAM_MODE_DMA_CONTIG
+/* ---------------------------------------------------------------------- */
+/*
+ * DMA-contiguous code.
+ */
+/*
+ * Set up a contiguous buffer for the given frame. Here also is where
+ * the underrun strategy is set: if there is no buffer available, reuse
+ * the buffer from the other BAR and set the CF_SINGLE_BUFFER flag to
+ * keep the interrupt handler from giving that buffer back to user
+ * space. In this way, we always have a buffer to DMA to and don't
+ * have to try to play games stopping and restarting the controller.
+ */
+static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf;
+ /*
+ * If there are no available buffers, go into single mode
+ */
+ if (list_empty(&cam->buffers)) {
+ buf = cam->vb_bufs[frame ^ 0x1];
+ cam->vb_bufs[frame] = buf;
+ mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
+ vb2_dma_contig_plane_paddr(&buf->vb_buf, 0));
+ set_bit(CF_SINGLE_BUFFER, &cam->flags);
+ singles++;
+ return;
+ }
+ /*
+ * OK, we have a buffer we can use.
+ */
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
+ list_del_init(&buf->queue);
+ mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
+ vb2_dma_contig_plane_paddr(&buf->vb_buf, 0));
+ cam->vb_bufs[frame] = buf;
+ clear_bit(CF_SINGLE_BUFFER, &cam->flags);
+}
+
+/*
+ * Initial B_DMA_contig setup.
+ */
+static void mcam_ctlr_dma_contig(struct mcam_camera *cam)
+{
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
+ cam->nbufs = 2;
+ mcam_set_contig_buffer(cam, 0);
+ mcam_set_contig_buffer(cam, 1);
+}
+
+/*
+ * Frame completion handling.
+ */
+static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf = cam->vb_bufs[frame];
+
+ if (!test_bit(CF_SINGLE_BUFFER, &cam->flags)) {
+ delivered++;
+ mcam_buffer_done(cam, frame, &buf->vb_buf);
+ }
+ mcam_set_contig_buffer(cam, frame);
+}
+
+#endif /* MCAM_MODE_DMA_CONTIG */
+
+#ifdef MCAM_MODE_DMA_SG
+/* ---------------------------------------------------------------------- */
+/*
+ * Scatter/gather-specific code.
+ */
+
+/*
+ * Set up the next buffer for S/G I/O; caller should be sure that
+ * the controller is stopped and a buffer is available.
+ */
+static void mcam_sg_next_buffer(struct mcam_camera *cam)
+{
+ struct mcam_vb_buffer *buf;
+
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
+ list_del_init(&buf->queue);
+ mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
+ mcam_reg_write(cam, REG_DESC_LEN_Y,
+ buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
+ mcam_reg_write(cam, REG_DESC_LEN_U, 0);
+ mcam_reg_write(cam, REG_DESC_LEN_V, 0);
+ cam->vb_bufs[0] = buf;
+}
+
+/*
+ * Initial B_DMA_sg setup
+ */
+static void mcam_ctlr_dma_sg(struct mcam_camera *cam)
+{
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_3WORD);
+ mcam_sg_next_buffer(cam);
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
+ cam->nbufs = 3;
+}
+
+
+/*
+ * Frame completion with S/G is trickier. We can't muck with
+ * a descriptor chain on the fly, since the controller buffers it
+ * internally. So we have to actually stop and restart; Marvell
+ * says this is the way to do it.
+ *
+ * Of course, stopping is easier said than done; experience shows
+ * that the controller can start a frame *after* C0_ENABLE has been
+ * cleared. So when running in S/G mode, the controller is "stopped"
+ * on receipt of the start-of-frame interrupt. That means we can
+ * safely change the DMA descriptor array here and restart things
+ * (assuming there's another buffer waiting to go).
+ */
+static void mcam_dma_sg_done(struct mcam_camera *cam, int frame)
+{
+ struct mcam_vb_buffer *buf = cam->vb_bufs[0];
+
+ /*
+ * Very Bad Not Good Things happen if you don't clear
+ * C1_DESC_ENA before making any descriptor changes.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
+ /*
+ * If we have another buffer available, put it in and
+ * restart the engine.
+ */
+ if (!list_empty(&cam->buffers)) {
+ mcam_sg_next_buffer(cam);
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
+ mcam_ctlr_start(cam);
+ /*
+ * Otherwise set CF_SG_RESTART and the controller will
+ * be restarted once another buffer shows up.
+ */
+ } else {
+ set_bit(CF_SG_RESTART, &cam->flags);
+ singles++;
+ }
+ /*
+ * Now we can give the completed frame back to user space.
+ */
+ delivered++;
+ mcam_buffer_done(cam, frame, &buf->vb_buf);
+}
+
+
+/*
+ * Scatter/gather mode requires stopping the controller between
+ * frames so we can put in a new DMA descriptor array. If no new
+ * buffer exists at frame completion, the controller is left stopped;
+ * this function is charged with gettig things going again.
+ */
+static void mcam_sg_restart(struct mcam_camera *cam)
+{
+ mcam_ctlr_dma_sg(cam);
+ mcam_ctlr_start(cam);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+}
+
+#else /* MCAM_MODE_DMA_SG */
+
+static inline void mcam_sg_restart(struct mcam_camera *cam)
+{
+ return;
+}
+
+#endif /* MCAM_MODE_DMA_SG */
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Buffer-mode-independent controller code.
+ */
+
+/*
+ * Image format setup
+ */
+static void mcam_ctlr_image(struct mcam_camera *cam)
+{
+ int imgsz;
+ struct v4l2_pix_format *fmt = &cam->pix_format;
+
+ imgsz = ((fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK) |
+ (fmt->bytesperline & IMGSZ_H_MASK);
+ mcam_reg_write(cam, REG_IMGSIZE, imgsz);
+ mcam_reg_write(cam, REG_IMGOFFSET, 0);
+ /* YPITCH just drops the last two bits */
+ mcam_reg_write_mask(cam, REG_IMGPITCH, fmt->bytesperline,
+ IMGP_YP_MASK);
+ /*
+ * Tell the controller about the image format we are using.
+ */
+ switch (cam->pix_format.pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV|C0_YUV_PACKED|C0_YUVE_YUYV,
+ C0_DF_MASK);
+ break;
+
+ case V4L2_PIX_FMT_RGB444:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_RGB|C0_RGBF_444|C0_RGB4_XRGB,
+ C0_DF_MASK);
+ /* Alpha value? */
+ break;
+
+ case V4L2_PIX_FMT_RGB565:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_RGB|C0_RGBF_565|C0_RGB5_BGGR,
+ C0_DF_MASK);
+ break;
+
+ default:
+ cam_err(cam, "Unknown format %x\n", cam->pix_format.pixelformat);
+ break;
+ }
+ /*
+ * Make sure it knows we want to use hsync/vsync.
+ */
+ mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC,
+ C0_SIFM_MASK);
+}
+
+
+/*
+ * Configure the controller for operation; caller holds the
+ * device mutex.
+ */
+static int mcam_ctlr_configure(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ cam->dma_setup(cam);
+ mcam_ctlr_image(cam);
+ mcam_set_config_needed(cam, 0);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return 0;
+}
+
+static void mcam_ctlr_irq_enable(struct mcam_camera *cam)
+{
+ /*
+ * Clear any pending interrupts, since we do not
+ * expect to have I/O active prior to enabling.
+ */
+ mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
+ mcam_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
+}
+
+static void mcam_ctlr_irq_disable(struct mcam_camera *cam)
+{
+ mcam_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
+}
+
+
+
+static void mcam_ctlr_init(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ /*
+ * Make sure it's not powered down.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
+ /*
+ * Turn off the enable bit. It sure should be off anyway,
+ * but it's good to be sure.
+ */
+ mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
+ /*
+ * Clock the sensor appropriately. Controller clock should
+ * be 48MHz, sensor "typical" value is half that.
+ */
+ mcam_reg_write_mask(cam, REG_CLKCTRL, 2, CLK_DIV_MASK);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+
+/*
+ * Stop the controller, and don't return until we're really sure that no
+ * further DMA is going on.
+ */
+static void mcam_ctlr_stop_dma(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ /*
+ * Theory: stop the camera controller (whether it is operating
+ * or not). Delay briefly just in case we race with the SOF
+ * interrupt, then wait until no DMA is active.
+ */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ clear_bit(CF_SG_RESTART, &cam->flags);
+ mcam_ctlr_stop(cam);
+ cam->state = S_IDLE;
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ msleep(40);
+ if (test_bit(CF_DMA_ACTIVE, &cam->flags))
+ cam_err(cam, "Timeout waiting for DMA to end\n");
+ /* This would be bad news - what now? */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ mcam_ctlr_irq_disable(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+/*
+ * Power up and down.
+ */
+static void mcam_ctlr_power_up(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ cam->plat_power_up(cam);
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ msleep(5); /* Just to be sure */
+}
+
+static void mcam_ctlr_power_down(struct mcam_camera *cam)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ /*
+ * School of hard knocks department: be sure we do any register
+ * twiddling on the controller *before* calling the platform
+ * power down routine.
+ */
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
+ cam->plat_power_down(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+}
+
+/* -------------------------------------------------------------------- */
+/*
+ * Communications with the sensor.
+ */
+
+static int __mcam_cam_reset(struct mcam_camera *cam)
+{
+ return sensor_call(cam, core, reset, 0);
+}
+
+/*
+ * We have found the sensor on the i2c. Let's try to have a
+ * conversation.
+ */
+static int mcam_cam_init(struct mcam_camera *cam)
+{
+ struct v4l2_dbg_chip_ident chip;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ if (cam->state != S_NOTREADY)
+ cam_warn(cam, "Cam init with device in funky state %d",
+ cam->state);
+ ret = __mcam_cam_reset(cam);
+ if (ret)
+ goto out;
+ chip.ident = V4L2_IDENT_NONE;
+ chip.match.type = V4L2_CHIP_MATCH_I2C_ADDR;
+ chip.match.addr = cam->sensor_addr;
+ ret = sensor_call(cam, core, g_chip_ident, &chip);
+ if (ret)
+ goto out;
+ cam->sensor_type = chip.ident;
+ if (cam->sensor_type != V4L2_IDENT_OV7670) {
+ cam_err(cam, "Unsupported sensor type 0x%x", cam->sensor_type);
+ ret = -EINVAL;
+ goto out;
+ }
+/* Get/set parameters? */
+ ret = 0;
+ cam->state = S_IDLE;
+out:
+ mcam_ctlr_power_down(cam);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+/*
+ * Configure the sensor to match the parameters we have. Caller should
+ * hold s_mutex
+ */
+static int mcam_cam_set_flip(struct mcam_camera *cam)
+{
+ struct v4l2_control ctrl;
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_CID_VFLIP;
+ ctrl.value = flip;
+ return sensor_call(cam, core, s_ctrl, &ctrl);
+}
+
+
+static int mcam_cam_configure(struct mcam_camera *cam)
+{
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ v4l2_fill_mbus_format(&mbus_fmt, &cam->pix_format, cam->mbus_code);
+ ret = sensor_call(cam, core, init, 0);
+ if (ret == 0)
+ ret = sensor_call(cam, video, s_mbus_fmt, &mbus_fmt);
+ /*
+ * OV7670 does weird things if flip is set *before* format...
+ */
+ ret += mcam_cam_set_flip(cam);
+ return ret;
+}
+
+/*
+ * Get everything ready, and start grabbing frames.
+ */
+static int mcam_read_setup(struct mcam_camera *cam)
+{
+ int ret;
+ unsigned long flags;
+
+ /*
+ * Configuration. If we still don't have DMA buffers,
+ * make one last, desperate attempt.
+ */
+ if (cam->buffer_mode == B_vmalloc && cam->nbufs == 0 &&
+ mcam_alloc_dma_bufs(cam, 0))
+ return -ENOMEM;
+
+ if (mcam_needs_config(cam)) {
+ mcam_cam_configure(cam);
+ ret = mcam_ctlr_configure(cam);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Turn it loose.
+ */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ mcam_reset_buffers(cam);
+ mcam_ctlr_irq_enable(cam);
+ cam->state = S_STREAMING;
+ mcam_ctlr_start(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+/*
+ * Videobuf2 interface code.
+ */
+
+static int mcam_vb_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
+ unsigned int *num_planes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+ int minbufs = (cam->buffer_mode == B_DMA_contig) ? 3 : 2;
+
+ sizes[0] = cam->pix_format.sizeimage;
+ *num_planes = 1; /* Someday we have to support planar formats... */
+ if (*nbufs < minbufs)
+ *nbufs = minbufs;
+ if (cam->buffer_mode == B_DMA_contig)
+ alloc_ctxs[0] = cam->vb_alloc_ctx;
+ return 0;
+}
+
+
+static void mcam_vb_buf_queue(struct vb2_buffer *vb)
+{
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+ int start;
+
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ start = (cam->state == S_BUFWAIT) && !list_empty(&cam->buffers);
+ list_add(&mvb->queue, &cam->buffers);
+ if (test_bit(CF_SG_RESTART, &cam->flags))
+ mcam_sg_restart(cam);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ if (start)
+ mcam_read_setup(cam);
+}
+
+
+/*
+ * vb2 uses these to release the mutex when waiting in dqbuf. I'm
+ * not actually sure we need to do this (I'm not sure that vb2_dqbuf() needs
+ * to be called with the mutex held), but better safe than sorry.
+ */
+static void mcam_vb_wait_prepare(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+ mutex_unlock(&cam->s_mutex);
+}
+
+static void mcam_vb_wait_finish(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+ mutex_lock(&cam->s_mutex);
+}
+
+/*
+ * These need to be called with the mutex held from vb2
+ */
+static int mcam_vb_start_streaming(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+
+ if (cam->state != S_IDLE)
+ return -EINVAL;
+ cam->sequence = 0;
+ /*
+ * Videobuf2 sneakily hoards all the buffers and won't
+ * give them to us until *after* streaming starts. But
+ * we can't actually start streaming until we have a
+ * destination. So go into a wait state and hope they
+ * give us buffers soon.
+ */
+ if (cam->buffer_mode != B_vmalloc && list_empty(&cam->buffers)) {
+ cam->state = S_BUFWAIT;
+ return 0;
+ }
+ return mcam_read_setup(cam);
+}
+
+static int mcam_vb_stop_streaming(struct vb2_queue *vq)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vq);
+ unsigned long flags;
+
+ if (cam->state == S_BUFWAIT) {
+ /* They never gave us buffers */
+ cam->state = S_IDLE;
+ return 0;
+ }
+ if (cam->state != S_STREAMING)
+ return -EINVAL;
+ mcam_ctlr_stop_dma(cam);
+ /*
+ * VB2 reclaims the buffers, so we need to forget
+ * about them.
+ */
+ spin_lock_irqsave(&cam->dev_lock, flags);
+ INIT_LIST_HEAD(&cam->buffers);
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return 0;
+}
+
+
+static const struct vb2_ops mcam_vb2_ops = {
+ .queue_setup = mcam_vb_queue_setup,
+ .buf_queue = mcam_vb_buf_queue,
+ .start_streaming = mcam_vb_start_streaming,
+ .stop_streaming = mcam_vb_stop_streaming,
+ .wait_prepare = mcam_vb_wait_prepare,
+ .wait_finish = mcam_vb_wait_finish,
+};
+
+
+#ifdef MCAM_MODE_DMA_SG
+/*
+ * Scatter/gather mode uses all of the above functions plus a
+ * few extras to deal with DMA mapping.
+ */
+static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
+{
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
+
+ mvb->dma_desc = dma_alloc_coherent(cam->dev,
+ ndesc * sizeof(struct mcam_dma_desc),
+ &mvb->dma_desc_pa, GFP_KERNEL);
+ if (mvb->dma_desc == NULL) {
+ cam_err(cam, "Unable to get DMA descriptor array\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+ struct mcam_dma_desc *desc = mvb->dma_desc;
+ struct scatterlist *sg;
+ int i;
+
+ mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages,
+ DMA_FROM_DEVICE);
+ if (mvb->dma_desc_nent <= 0)
+ return -EIO; /* Not sure what's right here */
+ for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) {
+ desc->dma_addr = sg_dma_address(sg);
+ desc->segment_len = sg_dma_len(sg);
+ desc++;
+ }
+ return 0;
+}
+
+static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+
+ dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE);
+ return 0;
+}
+
+static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
+ struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
+ int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
+
+ dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc),
+ mvb->dma_desc, mvb->dma_desc_pa);
+}
+
+
+static const struct vb2_ops mcam_vb2_sg_ops = {
+ .queue_setup = mcam_vb_queue_setup,
+ .buf_init = mcam_vb_sg_buf_init,
+ .buf_prepare = mcam_vb_sg_buf_prepare,
+ .buf_queue = mcam_vb_buf_queue,
+ .buf_finish = mcam_vb_sg_buf_finish,
+ .buf_cleanup = mcam_vb_sg_buf_cleanup,
+ .start_streaming = mcam_vb_start_streaming,
+ .stop_streaming = mcam_vb_stop_streaming,
+ .wait_prepare = mcam_vb_wait_prepare,
+ .wait_finish = mcam_vb_wait_finish,
+};
+
+#endif /* MCAM_MODE_DMA_SG */
+
+static int mcam_setup_vb2(struct mcam_camera *cam)
+{
+ struct vb2_queue *vq = &cam->vb_queue;
+
+ memset(vq, 0, sizeof(*vq));
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->drv_priv = cam;
+ INIT_LIST_HEAD(&cam->buffers);
+ switch (cam->buffer_mode) {
+ case B_DMA_contig:
+#ifdef MCAM_MODE_DMA_CONTIG
+ vq->ops = &mcam_vb2_ops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
+ vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ cam->dma_setup = mcam_ctlr_dma_contig;
+ cam->frame_complete = mcam_dma_contig_done;
+#endif
+ break;
+ case B_DMA_sg:
+#ifdef MCAM_MODE_DMA_SG
+ vq->ops = &mcam_vb2_sg_ops;
+ vq->mem_ops = &vb2_dma_sg_memops;
+ vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ cam->dma_setup = mcam_ctlr_dma_sg;
+ cam->frame_complete = mcam_dma_sg_done;
+#endif
+ break;
+ case B_vmalloc:
+#ifdef MCAM_MODE_VMALLOC
+ tasklet_init(&cam->s_tasklet, mcam_frame_tasklet,
+ (unsigned long) cam);
+ vq->ops = &mcam_vb2_ops;
+ vq->mem_ops = &vb2_vmalloc_memops;
+ vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
+ vq->io_modes = VB2_MMAP;
+ cam->dma_setup = mcam_ctlr_dma_vmalloc;
+ cam->frame_complete = mcam_vmalloc_done;
+#endif
+ break;
+ }
+ return vb2_queue_init(vq);
+}
+
+static void mcam_cleanup_vb2(struct mcam_camera *cam)
+{
+ vb2_queue_release(&cam->vb_queue);
+#ifdef MCAM_MODE_DMA_CONTIG
+ if (cam->buffer_mode == B_DMA_contig)
+ vb2_dma_contig_cleanup_ctx(cam->vb_alloc_ctx);
+#endif
+}
+
+
+/* ---------------------------------------------------------------------- */
+/*
+ * The long list of V4L2 ioctl() operations.
+ */
+
+static int mcam_vidioc_streamon(struct file *filp, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_streamon(&cam->vb_queue, type);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_streamoff(struct file *filp, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_streamoff(&cam->vb_queue, type);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_reqbufs(struct file *filp, void *priv,
+ struct v4l2_requestbuffers *req)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_reqbufs(&cam->vb_queue, req);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_querybuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_querybuf(&cam->vb_queue, buf);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int mcam_vidioc_qbuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_qbuf(&cam->vb_queue, buf);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int mcam_vidioc_dqbuf(struct file *filp, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_dqbuf(&cam->vb_queue, buf, filp->f_flags & O_NONBLOCK);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+
+static int mcam_vidioc_queryctrl(struct file *filp, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, core, queryctrl, qc);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_g_ctrl(struct file *filp, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, core, g_ctrl, ctrl);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_s_ctrl(struct file *filp, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, core, s_ctrl, ctrl);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "marvell_ccic");
+ strcpy(cap->card, "marvell_ccic");
+ cap->version = 1;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+
+static int mcam_vidioc_enum_fmt_vid_cap(struct file *filp,
+ void *priv, struct v4l2_fmtdesc *fmt)
+{
+ if (fmt->index >= N_MCAM_FMTS)
+ return -EINVAL;
+ strlcpy(fmt->description, mcam_formats[fmt->index].desc,
+ sizeof(fmt->description));
+ fmt->pixelformat = mcam_formats[fmt->index].pixelformat;
+ return 0;
+}
+
+static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct mcam_camera *cam = priv;
+ struct mcam_format_struct *f;
+ struct v4l2_pix_format *pix = &fmt->fmt.pix;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ int ret;
+
+ f = mcam_find_format(pix->pixelformat);
+ pix->pixelformat = f->pixelformat;
+ v4l2_fill_mbus_format(&mbus_fmt, pix, f->mbus_code);
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
+ mutex_unlock(&cam->s_mutex);
+ v4l2_fill_pix_format(pix, &mbus_fmt);
+ pix->bytesperline = pix->width * f->bpp;
+ pix->sizeimage = pix->height * pix->bytesperline;
+ return ret;
+}
+
+static int mcam_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct mcam_camera *cam = priv;
+ struct mcam_format_struct *f;
+ int ret;
+
+ /*
+ * Can't do anything if the device is not idle
+ * Also can't if there are streaming buffers in place.
+ */
+ if (cam->state != S_IDLE || cam->vb_queue.num_buffers > 0)
+ return -EBUSY;
+
+ f = mcam_find_format(fmt->fmt.pix.pixelformat);
+
+ /*
+ * See if the formatting works in principle.
+ */
+ ret = mcam_vidioc_try_fmt_vid_cap(filp, priv, fmt);
+ if (ret)
+ return ret;
+ /*
+ * Now we start to change things for real, so let's do it
+ * under lock.
+ */
+ mutex_lock(&cam->s_mutex);
+ cam->pix_format = fmt->fmt.pix;
+ cam->mbus_code = f->mbus_code;
+
+ /*
+ * Make sure we have appropriate DMA buffers.
+ */
+ if (cam->buffer_mode == B_vmalloc) {
+ ret = mcam_check_dma_buffers(cam);
+ if (ret)
+ goto out;
+ }
+ mcam_set_config_needed(cam, 1);
+ ret = 0;
+out:
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+/*
+ * Return our stored notion of how the camera is/should be configured.
+ * The V4l2 spec wants us to be smarter, and actually get this from
+ * the camera (and not mess with it at open time). Someday.
+ */
+static int mcam_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
+ struct v4l2_format *f)
+{
+ struct mcam_camera *cam = priv;
+
+ f->fmt.pix = cam->pix_format;
+ return 0;
+}
+
+/*
+ * We only have one input - the sensor - so minimize the nonsense here.
+ */
+static int mcam_vidioc_enum_input(struct file *filp, void *priv,
+ struct v4l2_input *input)
+{
+ if (input->index != 0)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ input->std = V4L2_STD_ALL; /* Not sure what should go here */
+ strcpy(input->name, "Camera");
+ return 0;
+}
+
+static int mcam_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int mcam_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
+{
+ if (i != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/* from vivi.c */
+static int mcam_vidioc_s_std(struct file *filp, void *priv, v4l2_std_id *a)
+{
+ return 0;
+}
+
+/*
+ * G/S_PARM. Most of this is done by the sensor, but we are
+ * the level which controls the number of read buffers.
+ */
+static int mcam_vidioc_g_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parms)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, g_parm, parms);
+ mutex_unlock(&cam->s_mutex);
+ parms->parm.capture.readbuffers = n_dma_bufs;
+ return ret;
+}
+
+static int mcam_vidioc_s_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parms)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, s_parm, parms);
+ mutex_unlock(&cam->s_mutex);
+ parms->parm.capture.readbuffers = n_dma_bufs;
+ return ret;
+}
+
+static int mcam_vidioc_g_chip_ident(struct file *file, void *priv,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct mcam_camera *cam = priv;
+
+ chip->ident = V4L2_IDENT_NONE;
+ chip->revision = 0;
+ if (v4l2_chip_match_host(&chip->match)) {
+ chip->ident = cam->chip_id;
+ return 0;
+ }
+ return sensor_call(cam, core, g_chip_ident, chip);
+}
+
+static int mcam_vidioc_enum_framesizes(struct file *filp, void *priv,
+ struct v4l2_frmsizeenum *sizes)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, enum_framesizes, sizes);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+static int mcam_vidioc_enum_frameintervals(struct file *filp, void *priv,
+ struct v4l2_frmivalenum *interval)
+{
+ struct mcam_camera *cam = priv;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = sensor_call(cam, video, enum_frameintervals, interval);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int mcam_vidioc_g_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ struct mcam_camera *cam = priv;
+
+ if (v4l2_chip_match_host(&reg->match)) {
+ reg->val = mcam_reg_read(cam, reg->reg);
+ reg->size = 4;
+ return 0;
+ }
+ return sensor_call(cam, core, g_register, reg);
+}
+
+static int mcam_vidioc_s_register(struct file *file, void *priv,
+ struct v4l2_dbg_register *reg)
+{
+ struct mcam_camera *cam = priv;
+
+ if (v4l2_chip_match_host(&reg->match)) {
+ mcam_reg_write(cam, reg->reg, reg->val);
+ return 0;
+ }
+ return sensor_call(cam, core, s_register, reg);
+}
+#endif
+
+static const struct v4l2_ioctl_ops mcam_v4l_ioctl_ops = {
+ .vidioc_querycap = mcam_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = mcam_vidioc_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = mcam_vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = mcam_vidioc_s_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = mcam_vidioc_g_fmt_vid_cap,
+ .vidioc_enum_input = mcam_vidioc_enum_input,
+ .vidioc_g_input = mcam_vidioc_g_input,
+ .vidioc_s_input = mcam_vidioc_s_input,
+ .vidioc_s_std = mcam_vidioc_s_std,
+ .vidioc_reqbufs = mcam_vidioc_reqbufs,
+ .vidioc_querybuf = mcam_vidioc_querybuf,
+ .vidioc_qbuf = mcam_vidioc_qbuf,
+ .vidioc_dqbuf = mcam_vidioc_dqbuf,
+ .vidioc_streamon = mcam_vidioc_streamon,
+ .vidioc_streamoff = mcam_vidioc_streamoff,
+ .vidioc_queryctrl = mcam_vidioc_queryctrl,
+ .vidioc_g_ctrl = mcam_vidioc_g_ctrl,
+ .vidioc_s_ctrl = mcam_vidioc_s_ctrl,
+ .vidioc_g_parm = mcam_vidioc_g_parm,
+ .vidioc_s_parm = mcam_vidioc_s_parm,
+ .vidioc_enum_framesizes = mcam_vidioc_enum_framesizes,
+ .vidioc_enum_frameintervals = mcam_vidioc_enum_frameintervals,
+ .vidioc_g_chip_ident = mcam_vidioc_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = mcam_vidioc_g_register,
+ .vidioc_s_register = mcam_vidioc_s_register,
+#endif
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Our various file operations.
+ */
+static int mcam_v4l_open(struct file *filp)
+{
+ struct mcam_camera *cam = video_drvdata(filp);
+ int ret = 0;
+
+ filp->private_data = cam;
+
+ frames = singles = delivered = 0;
+ mutex_lock(&cam->s_mutex);
+ if (cam->users == 0) {
+ ret = mcam_setup_vb2(cam);
+ if (ret)
+ goto out;
+ mcam_ctlr_power_up(cam);
+ __mcam_cam_reset(cam);
+ mcam_set_config_needed(cam, 1);
+ }
+ (cam->users)++;
+out:
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_v4l_release(struct file *filp)
+{
+ struct mcam_camera *cam = filp->private_data;
+
+ cam_err(cam, "Release, %d frames, %d singles, %d delivered\n", frames,
+ singles, delivered);
+ mutex_lock(&cam->s_mutex);
+ (cam->users)--;
+ if (filp == cam->owner) {
+ mcam_ctlr_stop_dma(cam);
+ cam->owner = NULL;
+ }
+ if (cam->users == 0) {
+ mcam_cleanup_vb2(cam);
+ mcam_ctlr_power_down(cam);
+ if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
+ mcam_free_dma_bufs(cam);
+ }
+ mutex_unlock(&cam->s_mutex);
+ return 0;
+}
+
+static ssize_t mcam_v4l_read(struct file *filp,
+ char __user *buffer, size_t len, loff_t *pos)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_read(&cam->vb_queue, buffer, len, pos,
+ filp->f_flags & O_NONBLOCK);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+
+static unsigned int mcam_v4l_poll(struct file *filp,
+ struct poll_table_struct *pt)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_poll(&cam->vb_queue, filp, pt);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+static int mcam_v4l_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct mcam_camera *cam = filp->private_data;
+ int ret;
+
+ mutex_lock(&cam->s_mutex);
+ ret = vb2_mmap(&cam->vb_queue, vma);
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+}
+
+
+
+static const struct v4l2_file_operations mcam_v4l_fops = {
+ .owner = THIS_MODULE,
+ .open = mcam_v4l_open,
+ .release = mcam_v4l_release,
+ .read = mcam_v4l_read,
+ .poll = mcam_v4l_poll,
+ .mmap = mcam_v4l_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+
+/*
+ * This template device holds all of those v4l2 methods; we
+ * clone it for specific real devices.
+ */
+static struct video_device mcam_v4l_template = {
+ .name = "mcam",
+ .tvnorms = V4L2_STD_NTSC_M,
+ .current_norm = V4L2_STD_NTSC_M, /* make mplayer happy */
+
+ .fops = &mcam_v4l_fops,
+ .ioctl_ops = &mcam_v4l_ioctl_ops,
+ .release = video_device_release_empty,
+};
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Interrupt handler stuff
+ */
+static void mcam_frame_complete(struct mcam_camera *cam, int frame)
+{
+ /*
+ * Basic frame housekeeping.
+ */
+ set_bit(frame, &cam->flags);
+ clear_bit(CF_DMA_ACTIVE, &cam->flags);
+ cam->next_buf = frame;
+ cam->buf_seq[frame] = ++(cam->sequence);
+ frames++;
+ /*
+ * "This should never happen"
+ */
+ if (cam->state != S_STREAMING)
+ return;
+ /*
+ * Process the frame and set up the next one.
+ */
+ cam->frame_complete(cam, frame);
+}
+
+
+/*
+ * The interrupt handler; this needs to be called from the
+ * platform irq handler with the lock held.
+ */
+int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
+{
+ unsigned int frame, handled = 0;
+
+ mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
+ /*
+ * Handle any frame completions. There really should
+ * not be more than one of these, or we have fallen
+ * far behind.
+ *
+ * When running in S/G mode, the frame number lacks any
+ * real meaning - there's only one descriptor array - but
+ * the controller still picks a different one to signal
+ * each time.
+ */
+ for (frame = 0; frame < cam->nbufs; frame++)
+ if (irqs & (IRQ_EOF0 << frame)) {
+ mcam_frame_complete(cam, frame);
+ handled = 1;
+ }
+ /*
+ * If a frame starts, note that we have DMA active. This
+ * code assumes that we won't get multiple frame interrupts
+ * at once; may want to rethink that.
+ */
+ if (irqs & (IRQ_SOF0 | IRQ_SOF1 | IRQ_SOF2)) {
+ set_bit(CF_DMA_ACTIVE, &cam->flags);
+ handled = 1;
+ if (cam->buffer_mode == B_DMA_sg)
+ mcam_ctlr_stop(cam);
+ }
+ return handled;
+}
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Registration and such.
+ */
+static struct ov7670_config sensor_cfg = {
+ /*
+ * Exclude QCIF mode, because it only captures a tiny portion
+ * of the sensor FOV
+ */
+ .min_width = 320,
+ .min_height = 240,
+};
+
+
+int mccic_register(struct mcam_camera *cam)
+{
+ struct i2c_board_info ov7670_info = {
+ .type = "ov7670",
+ .addr = 0x42 >> 1,
+ .platform_data = &sensor_cfg,
+ };
+ int ret;
+
+ /*
+ * Validate the requested buffer mode.
+ */
+ if (buffer_mode >= 0)
+ cam->buffer_mode = buffer_mode;
+ if (cam->buffer_mode == B_DMA_sg &&
+ cam->chip_id == V4L2_IDENT_CAFE) {
+ printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, "
+ "attempting vmalloc mode instead\n");
+ cam->buffer_mode = B_vmalloc;
+ }
+ if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
+ printk(KERN_ERR "marvell-cam: buffer mode %d unsupported\n",
+ cam->buffer_mode);
+ return -EINVAL;
+ }
+ /*
+ * Register with V4L
+ */
+ ret = v4l2_device_register(cam->dev, &cam->v4l2_dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&cam->s_mutex);
+ cam->state = S_NOTREADY;
+ mcam_set_config_needed(cam, 1);
+ cam->pix_format = mcam_def_pix_format;
+ cam->mbus_code = mcam_def_mbus_code;
+ INIT_LIST_HEAD(&cam->buffers);
+ mcam_ctlr_init(cam);
+
+ /*
+ * Try to find the sensor.
+ */
+ sensor_cfg.clock_speed = cam->clock_speed;
+ sensor_cfg.use_smbus = cam->use_smbus;
+ cam->sensor_addr = ov7670_info.addr;
+ cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev,
+ cam->i2c_adapter, &ov7670_info, NULL);
+ if (cam->sensor == NULL) {
+ ret = -ENODEV;
+ goto out_unregister;
+ }
+
+ ret = mcam_cam_init(cam);
+ if (ret)
+ goto out_unregister;
+ /*
+ * Get the v4l2 setup done.
+ */
+ mutex_lock(&cam->s_mutex);
+ cam->vdev = mcam_v4l_template;
+ cam->vdev.debug = 0;
+ cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto out;
+ video_set_drvdata(&cam->vdev, cam);
+
+ /*
+ * If so requested, try to get our DMA buffers now.
+ */
+ if (cam->buffer_mode == B_vmalloc && !alloc_bufs_at_read) {
+ if (mcam_alloc_dma_bufs(cam, 1))
+ cam_warn(cam, "Unable to alloc DMA buffers at load"
+ " will try again later.");
+ }
+
+out:
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+out_unregister:
+ v4l2_device_unregister(&cam->v4l2_dev);
+ return ret;
+}
+
+
+void mccic_shutdown(struct mcam_camera *cam)
+{
+ /*
+ * If we have no users (and we really, really should have no
+ * users) the device will already be powered down. Trying to
+ * take it down again will wedge the machine, which is frowned
+ * upon.
+ */
+ if (cam->users > 0) {
+ cam_warn(cam, "Removing a device with users!\n");
+ mcam_ctlr_power_down(cam);
+ }
+ vb2_queue_release(&cam->vb_queue);
+ if (cam->buffer_mode == B_vmalloc)
+ mcam_free_dma_bufs(cam);
+ video_unregister_device(&cam->vdev);
+ v4l2_device_unregister(&cam->v4l2_dev);
+}
+
+/*
+ * Power management
+ */
+#ifdef CONFIG_PM
+
+void mccic_suspend(struct mcam_camera *cam)
+{
+ enum mcam_state cstate = cam->state;
+
+ mcam_ctlr_stop_dma(cam);
+ mcam_ctlr_power_down(cam);
+ cam->state = cstate;
+}
+
+int mccic_resume(struct mcam_camera *cam)
+{
+ int ret = 0;
+
+ mutex_lock(&cam->s_mutex);
+ if (cam->users > 0) {
+ mcam_ctlr_power_up(cam);
+ __mcam_cam_reset(cam);
+ } else {
+ mcam_ctlr_power_down(cam);
+ }
+ mutex_unlock(&cam->s_mutex);
+
+ set_bit(CF_CONFIG_NEEDED, &cam->flags);
+ if (cam->state == S_STREAMING)
+ ret = mcam_read_setup(cam);
+ return ret;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/media/video/marvell-ccic/mcam-core.h b/drivers/media/video/marvell-ccic/mcam-core.h
new file mode 100644
index 00000000000..917200e6325
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/mcam-core.h
@@ -0,0 +1,323 @@
+/*
+ * Marvell camera core structures.
+ *
+ * Copyright 2011 Jonathan Corbet corbet@lwn.net
+ */
+#ifndef _MCAM_CORE_H
+#define _MCAM_CORE_H
+
+#include <linux/list.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-core.h>
+
+/*
+ * Create our own symbols for the supported buffer modes, but, for now,
+ * base them entirely on which videobuf2 options have been selected.
+ */
+#if defined(CONFIG_VIDEOBUF2_VMALLOC) || defined(CONFIG_VIDEOBUF2_VMALLOC_MODULE)
+#define MCAM_MODE_VMALLOC 1
+#endif
+
+#if defined(CONFIG_VIDEOBUF2_DMA_CONTIG) || defined(CONFIG_VIDEOBUF2_DMA_CONTIG_MODULE)
+#define MCAM_MODE_DMA_CONTIG 1
+#endif
+
+#if defined(CONFIG_VIDEOBUF2_DMA_SG) || defined(CONFIG_VIDEOBUF2_DMA_SG_MODULE)
+#define MCAM_MODE_DMA_SG 1
+#endif
+
+#if !defined(MCAM_MODE_VMALLOC) && !defined(MCAM_MODE_DMA_CONTIG) && \
+ !defined(MCAM_MODE_DMA_SG)
+#error One of the videobuf buffer modes must be selected in the config
+#endif
+
+
+enum mcam_state {
+ S_NOTREADY, /* Not yet initialized */
+ S_IDLE, /* Just hanging around */
+ S_FLAKED, /* Some sort of problem */
+ S_STREAMING, /* Streaming data */
+ S_BUFWAIT /* streaming requested but no buffers yet */
+};
+#define MAX_DMA_BUFS 3
+
+/*
+ * Different platforms work best with different buffer modes, so we
+ * let the platform pick.
+ */
+enum mcam_buffer_mode {
+ B_vmalloc = 0,
+ B_DMA_contig = 1,
+ B_DMA_sg = 2
+};
+
+/*
+ * Is a given buffer mode supported by the current kernel configuration?
+ */
+static inline int mcam_buffer_mode_supported(enum mcam_buffer_mode mode)
+{
+ switch (mode) {
+#ifdef MCAM_MODE_VMALLOC
+ case B_vmalloc:
+#endif
+#ifdef MCAM_MODE_DMA_CONTIG
+ case B_DMA_contig:
+#endif
+#ifdef MCAM_MODE_DMA_SG
+ case B_DMA_sg:
+#endif
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+/*
+ * A description of one of our devices.
+ * Locking: controlled by s_mutex. Certain fields, however, require
+ * the dev_lock spinlock; they are marked as such by comments.
+ * dev_lock is also required for access to device registers.
+ */
+struct mcam_camera {
+ /*
+ * These fields should be set by the platform code prior to
+ * calling mcam_register().
+ */
+ struct i2c_adapter *i2c_adapter;
+ unsigned char __iomem *regs;
+ spinlock_t dev_lock;
+ struct device *dev; /* For messages, dma alloc */
+ unsigned int chip_id;
+ short int clock_speed; /* Sensor clock speed, default 30 */
+ short int use_smbus; /* SMBUS or straight I2c? */
+ enum mcam_buffer_mode buffer_mode;
+ /*
+ * Callbacks from the core to the platform code.
+ */
+ void (*plat_power_up) (struct mcam_camera *cam);
+ void (*plat_power_down) (struct mcam_camera *cam);
+
+ /*
+ * Everything below here is private to the mcam core and
+ * should not be touched by the platform code.
+ */
+ struct v4l2_device v4l2_dev;
+ enum mcam_state state;
+ unsigned long flags; /* Buffer status, mainly (dev_lock) */
+ int users; /* How many open FDs */
+ struct file *owner; /* Who has data access (v4l2) */
+
+ /*
+ * Subsystem structures.
+ */
+ struct video_device vdev;
+ struct v4l2_subdev *sensor;
+ unsigned short sensor_addr;
+
+ /* Videobuf2 stuff */
+ struct vb2_queue vb_queue;
+ struct list_head buffers; /* Available frames */
+
+ unsigned int nbufs; /* How many are alloc'd */
+ int next_buf; /* Next to consume (dev_lock) */
+
+ /* DMA buffers - vmalloc mode */
+#ifdef MCAM_MODE_VMALLOC
+ unsigned int dma_buf_size; /* allocated size */
+ void *dma_bufs[MAX_DMA_BUFS]; /* Internal buffer addresses */
+ dma_addr_t dma_handles[MAX_DMA_BUFS]; /* Buffer bus addresses */
+ struct tasklet_struct s_tasklet;
+#endif
+ unsigned int sequence; /* Frame sequence number */
+ unsigned int buf_seq[MAX_DMA_BUFS]; /* Sequence for individual bufs */
+
+ /* DMA buffers - DMA modes */
+ struct mcam_vb_buffer *vb_bufs[MAX_DMA_BUFS];
+ struct vb2_alloc_ctx *vb_alloc_ctx;
+
+ /* Mode-specific ops, set at open time */
+ void (*dma_setup)(struct mcam_camera *cam);
+ void (*frame_complete)(struct mcam_camera *cam, int frame);
+
+ /* Current operating parameters */
+ u32 sensor_type; /* Currently ov7670 only */
+ struct v4l2_pix_format pix_format;
+ enum v4l2_mbus_pixelcode mbus_code;
+
+ /* Locks */
+ struct mutex s_mutex; /* Access to this structure */
+};
+
+
+/*
+ * Register I/O functions. These are here because the platform code
+ * may legitimately need to mess with the register space.
+ */
+/*
+ * Device register I/O
+ */
+static inline void mcam_reg_write(struct mcam_camera *cam, unsigned int reg,
+ unsigned int val)
+{
+ iowrite32(val, cam->regs + reg);
+}
+
+static inline unsigned int mcam_reg_read(struct mcam_camera *cam,
+ unsigned int reg)
+{
+ return ioread32(cam->regs + reg);
+}
+
+
+static inline void mcam_reg_write_mask(struct mcam_camera *cam, unsigned int reg,
+ unsigned int val, unsigned int mask)
+{
+ unsigned int v = mcam_reg_read(cam, reg);
+
+ v = (v & ~mask) | (val & mask);
+ mcam_reg_write(cam, reg, v);
+}
+
+static inline void mcam_reg_clear_bit(struct mcam_camera *cam,
+ unsigned int reg, unsigned int val)
+{
+ mcam_reg_write_mask(cam, reg, 0, val);
+}
+
+static inline void mcam_reg_set_bit(struct mcam_camera *cam,
+ unsigned int reg, unsigned int val)
+{
+ mcam_reg_write_mask(cam, reg, val, val);
+}
+
+/*
+ * Functions for use by platform code.
+ */
+int mccic_register(struct mcam_camera *cam);
+int mccic_irq(struct mcam_camera *cam, unsigned int irqs);
+void mccic_shutdown(struct mcam_camera *cam);
+#ifdef CONFIG_PM
+void mccic_suspend(struct mcam_camera *cam);
+int mccic_resume(struct mcam_camera *cam);
+#endif
+
+/*
+ * Register definitions for the m88alp01 camera interface. Offsets in bytes
+ * as given in the spec.
+ */
+#define REG_Y0BAR 0x00
+#define REG_Y1BAR 0x04
+#define REG_Y2BAR 0x08
+/* ... */
+
+#define REG_IMGPITCH 0x24 /* Image pitch register */
+#define IMGP_YP_SHFT 2 /* Y pitch params */
+#define IMGP_YP_MASK 0x00003ffc /* Y pitch field */
+#define IMGP_UVP_SHFT 18 /* UV pitch (planar) */
+#define IMGP_UVP_MASK 0x3ffc0000
+#define REG_IRQSTATRAW 0x28 /* RAW IRQ Status */
+#define IRQ_EOF0 0x00000001 /* End of frame 0 */
+#define IRQ_EOF1 0x00000002 /* End of frame 1 */
+#define IRQ_EOF2 0x00000004 /* End of frame 2 */
+#define IRQ_SOF0 0x00000008 /* Start of frame 0 */
+#define IRQ_SOF1 0x00000010 /* Start of frame 1 */
+#define IRQ_SOF2 0x00000020 /* Start of frame 2 */
+#define IRQ_OVERFLOW 0x00000040 /* FIFO overflow */
+#define IRQ_TWSIW 0x00010000 /* TWSI (smbus) write */
+#define IRQ_TWSIR 0x00020000 /* TWSI read */
+#define IRQ_TWSIE 0x00040000 /* TWSI error */
+#define TWSIIRQS (IRQ_TWSIW|IRQ_TWSIR|IRQ_TWSIE)
+#define FRAMEIRQS (IRQ_EOF0|IRQ_EOF1|IRQ_EOF2|IRQ_SOF0|IRQ_SOF1|IRQ_SOF2)
+#define ALLIRQS (TWSIIRQS|FRAMEIRQS|IRQ_OVERFLOW)
+#define REG_IRQMASK 0x2c /* IRQ mask - same bits as IRQSTAT */
+#define REG_IRQSTAT 0x30 /* IRQ status / clear */
+
+#define REG_IMGSIZE 0x34 /* Image size */
+#define IMGSZ_V_MASK 0x1fff0000
+#define IMGSZ_V_SHIFT 16
+#define IMGSZ_H_MASK 0x00003fff
+#define REG_IMGOFFSET 0x38 /* IMage offset */
+
+#define REG_CTRL0 0x3c /* Control 0 */
+#define C0_ENABLE 0x00000001 /* Makes the whole thing go */
+
+/* Mask for all the format bits */
+#define C0_DF_MASK 0x00fffffc /* Bits 2-23 */
+
+/* RGB ordering */
+#define C0_RGB4_RGBX 0x00000000
+#define C0_RGB4_XRGB 0x00000004
+#define C0_RGB4_BGRX 0x00000008
+#define C0_RGB4_XBGR 0x0000000c
+#define C0_RGB5_RGGB 0x00000000
+#define C0_RGB5_GRBG 0x00000004
+#define C0_RGB5_GBRG 0x00000008
+#define C0_RGB5_BGGR 0x0000000c
+
+/* Spec has two fields for DIN and DOUT, but they must match, so
+ combine them here. */
+#define C0_DF_YUV 0x00000000 /* Data is YUV */
+#define C0_DF_RGB 0x000000a0 /* ... RGB */
+#define C0_DF_BAYER 0x00000140 /* ... Bayer */
+/* 8-8-8 must be missing from the below - ask */
+#define C0_RGBF_565 0x00000000
+#define C0_RGBF_444 0x00000800
+#define C0_RGB_BGR 0x00001000 /* Blue comes first */
+#define C0_YUV_PLANAR 0x00000000 /* YUV 422 planar format */
+#define C0_YUV_PACKED 0x00008000 /* YUV 422 packed */
+#define C0_YUV_420PL 0x0000a000 /* YUV 420 planar */
+/* Think that 420 packed must be 111 - ask */
+#define C0_YUVE_YUYV 0x00000000 /* Y1CbY0Cr */
+#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
+#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
+#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
+#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
+#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
+#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
+#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
+/* Bayer bits 18,19 if needed */
+#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
+#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
+#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
+#define C0_DOWNSCALE 0x08000000 /* Enable downscaler */
+#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
+#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
+#define CO_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
+
+/* Bits below C1_444ALPHA are not present in Cafe */
+#define REG_CTRL1 0x40 /* Control 1 */
+#define C1_CLKGATE 0x00000001 /* Sensor clock gate */
+#define C1_DESC_ENA 0x00000100 /* DMA descriptor enable */
+#define C1_DESC_3WORD 0x00000200 /* Three-word descriptors used */
+#define C1_444ALPHA 0x00f00000 /* Alpha field in RGB444 */
+#define C1_ALPHA_SHFT 20
+#define C1_DMAB32 0x00000000 /* 32-byte DMA burst */
+#define C1_DMAB16 0x02000000 /* 16-byte DMA burst */
+#define C1_DMAB64 0x04000000 /* 64-byte DMA burst */
+#define C1_DMAB_MASK 0x06000000
+#define C1_TWOBUFS 0x08000000 /* Use only two DMA buffers */
+#define C1_PWRDWN 0x10000000 /* Power down */
+
+#define REG_CLKCTRL 0x88 /* Clock control */
+#define CLK_DIV_MASK 0x0000ffff /* Upper bits RW "reserved" */
+
+/* This appears to be a Cafe-only register */
+#define REG_UBAR 0xc4 /* Upper base address register */
+
+/* Armada 610 DMA descriptor registers */
+#define REG_DMA_DESC_Y 0x200
+#define REG_DMA_DESC_U 0x204
+#define REG_DMA_DESC_V 0x208
+#define REG_DESC_LEN_Y 0x20c /* Lengths are in bytes */
+#define REG_DESC_LEN_U 0x210
+#define REG_DESC_LEN_V 0x214
+
+/*
+ * Useful stuff that probably belongs somewhere global.
+ */
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+
+#endif /* _MCAM_CORE_H */
diff --git a/drivers/media/video/marvell-ccic/mmp-driver.c b/drivers/media/video/marvell-ccic/mmp-driver.c
new file mode 100644
index 00000000000..d6b76454137
--- /dev/null
+++ b/drivers/media/video/marvell-ccic/mmp-driver.c
@@ -0,0 +1,340 @@
+/*
+ * Support for the camera device found on Marvell MMP processors; known
+ * to work with the Armada 610 as used in the OLPC 1.75 system.
+ *
+ * Copyright 2011 Jonathan Corbet <corbet@lwn.net>
+ *
+ * This file may be distributed under the terms of the GNU General
+ * Public License, version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-gpio.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/mmp-camera.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+
+#include "mcam-core.h"
+
+MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
+MODULE_LICENSE("GPL");
+
+struct mmp_camera {
+ void *power_regs;
+ struct platform_device *pdev;
+ struct mcam_camera mcam;
+ struct list_head devlist;
+ int irq;
+};
+
+static inline struct mmp_camera *mcam_to_cam(struct mcam_camera *mcam)
+{
+ return container_of(mcam, struct mmp_camera, mcam);
+}
+
+/*
+ * A silly little infrastructure so we can keep track of our devices.
+ * Chances are that we will never have more than one of them, but
+ * the Armada 610 *does* have two controllers...
+ */
+
+static LIST_HEAD(mmpcam_devices);
+static struct mutex mmpcam_devices_lock;
+
+static void mmpcam_add_device(struct mmp_camera *cam)
+{
+ mutex_lock(&mmpcam_devices_lock);
+ list_add(&cam->devlist, &mmpcam_devices);
+ mutex_unlock(&mmpcam_devices_lock);
+}
+
+static void mmpcam_remove_device(struct mmp_camera *cam)
+{
+ mutex_lock(&mmpcam_devices_lock);
+ list_del(&cam->devlist);
+ mutex_unlock(&mmpcam_devices_lock);
+}
+
+/*
+ * Platform dev remove passes us a platform_device, and there's
+ * no handy unused drvdata to stash a backpointer in. So just
+ * dig it out of our list.
+ */
+static struct mmp_camera *mmpcam_find_device(struct platform_device *pdev)
+{
+ struct mmp_camera *cam;
+
+ mutex_lock(&mmpcam_devices_lock);
+ list_for_each_entry(cam, &mmpcam_devices, devlist) {
+ if (cam->pdev == pdev) {
+ mutex_unlock(&mmpcam_devices_lock);
+ return cam;
+ }
+ }
+ mutex_unlock(&mmpcam_devices_lock);
+ return NULL;
+}
+
+
+
+
+/*
+ * Power-related registers; this almost certainly belongs
+ * somewhere else.
+ *
+ * ARMADA 610 register manual, sec 7.2.1, p1842.
+ */
+#define CPU_SUBSYS_PMU_BASE 0xd4282800
+#define REG_CCIC_DCGCR 0x28 /* CCIC dyn clock gate ctrl reg */
+#define REG_CCIC_CRCR 0x50 /* CCIC clk reset ctrl reg */
+
+/*
+ * Power control.
+ */
+static void mmpcam_power_up(struct mcam_camera *mcam)
+{
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+ struct mmp_camera_platform_data *pdata;
+/*
+ * Turn on power and clocks to the controller.
+ */
+ iowrite32(0x3f, cam->power_regs + REG_CCIC_DCGCR);
+ iowrite32(0x3805b, cam->power_regs + REG_CCIC_CRCR);
+ mdelay(1);
+/*
+ * Provide power to the sensor.
+ */
+ mcam_reg_write(mcam, REG_CLKCTRL, 0x60000002);
+ pdata = cam->pdev->dev.platform_data;
+ gpio_set_value(pdata->sensor_power_gpio, 1);
+ mdelay(5);
+ mcam_reg_clear_bit(mcam, REG_CTRL1, 0x10000000);
+ gpio_set_value(pdata->sensor_reset_gpio, 0); /* reset is active low */
+ mdelay(5);
+ gpio_set_value(pdata->sensor_reset_gpio, 1); /* reset is active low */
+ mdelay(5);
+}
+
+static void mmpcam_power_down(struct mcam_camera *mcam)
+{
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+ struct mmp_camera_platform_data *pdata;
+/*
+ * Turn off clocks and set reset lines
+ */
+ iowrite32(0, cam->power_regs + REG_CCIC_DCGCR);
+ iowrite32(0, cam->power_regs + REG_CCIC_CRCR);
+/*
+ * Shut down the sensor.
+ */
+ pdata = cam->pdev->dev.platform_data;
+ gpio_set_value(pdata->sensor_power_gpio, 0);
+ gpio_set_value(pdata->sensor_reset_gpio, 0);
+}
+
+
+static irqreturn_t mmpcam_irq(int irq, void *data)
+{
+ struct mcam_camera *mcam = data;
+ unsigned int irqs, handled;
+
+ spin_lock(&mcam->dev_lock);
+ irqs = mcam_reg_read(mcam, REG_IRQSTAT);
+ handled = mccic_irq(mcam, irqs);
+ spin_unlock(&mcam->dev_lock);
+ return IRQ_RETVAL(handled);
+}
+
+
+static int mmpcam_probe(struct platform_device *pdev)
+{
+ struct mmp_camera *cam;
+ struct mcam_camera *mcam;
+ struct resource *res;
+ struct mmp_camera_platform_data *pdata;
+ int ret;
+
+ cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+ if (cam == NULL)
+ return -ENOMEM;
+ cam->pdev = pdev;
+ INIT_LIST_HEAD(&cam->devlist);
+
+ mcam = &cam->mcam;
+ mcam->platform = MHP_Armada610;
+ mcam->plat_power_up = mmpcam_power_up;
+ mcam->plat_power_down = mmpcam_power_down;
+ mcam->dev = &pdev->dev;
+ mcam->use_smbus = 0;
+ mcam->chip_id = V4L2_IDENT_ARMADA610;
+ mcam->buffer_mode = B_DMA_sg;
+ spin_lock_init(&mcam->dev_lock);
+ /*
+ * Get our I/O memory.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no iomem resource!\n");
+ ret = -ENODEV;
+ goto out_free;
+ }
+ mcam->regs = ioremap(res->start, resource_size(res));
+ if (mcam->regs == NULL) {
+ dev_err(&pdev->dev, "MMIO ioremap fail\n");
+ ret = -ENODEV;
+ goto out_free;
+ }
+ /*
+ * Power/clock memory is elsewhere; get it too. Perhaps this
+ * should really be managed outside of this driver?
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no power resource!\n");
+ ret = -ENODEV;
+ goto out_unmap1;
+ }
+ cam->power_regs = ioremap(res->start, resource_size(res));
+ if (cam->power_regs == NULL) {
+ dev_err(&pdev->dev, "power MMIO ioremap fail\n");
+ ret = -ENODEV;
+ goto out_unmap1;
+ }
+ /*
+ * Find the i2c adapter. This assumes, of course, that the
+ * i2c bus is already up and functioning.
+ */
+ pdata = pdev->dev.platform_data;
+ mcam->i2c_adapter = platform_get_drvdata(pdata->i2c_device);
+ if (mcam->i2c_adapter == NULL) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "No i2c adapter\n");
+ goto out_unmap2;
+ }
+ /*
+ * Sensor GPIO pins.
+ */
+ ret = gpio_request(pdata->sensor_power_gpio, "cam-power");
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get sensor power gpio %d",
+ pdata->sensor_power_gpio);
+ goto out_unmap2;
+ }
+ gpio_direction_output(pdata->sensor_power_gpio, 0);
+ ret = gpio_request(pdata->sensor_reset_gpio, "cam-reset");
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get sensor reset gpio %d",
+ pdata->sensor_reset_gpio);
+ goto out_gpio;
+ }
+ gpio_direction_output(pdata->sensor_reset_gpio, 0);
+ /*
+ * Power the device up and hand it off to the core.
+ */
+ mmpcam_power_up(mcam);
+ ret = mccic_register(mcam);
+ if (ret)
+ goto out_gpio2;
+ /*
+ * Finally, set up our IRQ now that the core is ready to
+ * deal with it.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ ret = -ENODEV;
+ goto out_unregister;
+ }
+ cam->irq = res->start;
+ ret = request_irq(cam->irq, mmpcam_irq, IRQF_SHARED,
+ "mmp-camera", mcam);
+ if (ret == 0) {
+ mmpcam_add_device(cam);
+ return 0;
+ }
+
+out_unregister:
+ mccic_shutdown(mcam);
+out_gpio2:
+ mmpcam_power_down(mcam);
+ gpio_free(pdata->sensor_reset_gpio);
+out_gpio:
+ gpio_free(pdata->sensor_power_gpio);
+out_unmap2:
+ iounmap(cam->power_regs);
+out_unmap1:
+ iounmap(mcam->regs);
+out_free:
+ kfree(cam);
+ return ret;
+}
+
+
+static int mmpcam_remove(struct mmp_camera *cam)
+{
+ struct mcam_camera *mcam = &cam->mcam;
+ struct mmp_camera_platform_data *pdata;
+
+ mmpcam_remove_device(cam);
+ free_irq(cam->irq, mcam);
+ mccic_shutdown(mcam);
+ mmpcam_power_down(mcam);
+ pdata = cam->pdev->dev.platform_data;
+ gpio_free(pdata->sensor_reset_gpio);
+ gpio_free(pdata->sensor_power_gpio);
+ iounmap(cam->power_regs);
+ iounmap(mcam->regs);
+ kfree(cam);
+ return 0;
+}
+
+static int mmpcam_platform_remove(struct platform_device *pdev)
+{
+ struct mmp_camera *cam = mmpcam_find_device(pdev);
+
+ if (cam == NULL)
+ return -ENODEV;
+ return mmpcam_remove(cam);
+}
+
+
+static struct platform_driver mmpcam_driver = {
+ .probe = mmpcam_probe,
+ .remove = mmpcam_platform_remove,
+ .driver = {
+ .name = "mmp-camera",
+ .owner = THIS_MODULE
+ }
+};
+
+
+static int __init mmpcam_init_module(void)
+{
+ mutex_init(&mmpcam_devices_lock);
+ return platform_driver_register(&mmpcam_driver);
+}
+
+static void __exit mmpcam_exit_module(void)
+{
+ platform_driver_unregister(&mmpcam_driver);
+ /*
+ * platform_driver_unregister() should have emptied the list
+ */
+ if (!list_empty(&mmpcam_devices))
+ printk(KERN_ERR "mmp_camera leaving devices behind\n");
+}
+
+module_init(mmpcam_init_module);
+module_exit(mmpcam_exit_module);
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index b03d74e09a3..166bf9349c1 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
-#include <linux/version.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -35,7 +34,7 @@
MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
MODULE_LICENSE("GPL");
-
+MODULE_VERSION("0.1.1");
#define MIN_W 32
#define MIN_H 32
@@ -380,7 +379,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
- cap->version = KERNEL_VERSION(0, 1, 0);
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
| V4L2_CAP_STREAMING;
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index de5d481b032..c43c81f5f97 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -480,12 +480,14 @@ static int msp_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
struct msp_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (state->radio)
+ if (vt->type != V4L2_TUNER_ANALOG_TV)
return 0;
- if (state->opmode == OPMODE_AUTOSELECT)
- msp_detect_stereo(client);
- vt->audmode = state->audmode;
- vt->rxsubchans = state->rxsubchans;
+ if (!state->radio) {
+ if (state->opmode == OPMODE_AUTOSELECT)
+ msp_detect_stereo(client);
+ vt->rxsubchans = state->rxsubchans;
+ }
+ vt->audmode = state->audmode;
vt->capability |= V4L2_TUNER_CAP_STEREO |
V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
return 0;
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index e2bbd8c35c9..4da9cca939c 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -603,13 +603,9 @@ static int mt9m001_video_probe(struct soc_camera_device *icd,
unsigned long flags;
int ret;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/* Enable the chip */
data = reg_write(client, MT9M001_CHIP_ENABLE, 1);
@@ -675,8 +671,8 @@ static void mt9m001_video_remove(struct soc_camera_device *icd)
{
struct soc_camera_link *icl = to_soc_camera_link(icd);
- dev_dbg(&icd->dev, "Video removed: %p, %p\n",
- icd->dev.parent, icd->vdev);
+ dev_dbg(icd->pdev, "Video removed: %p, %p\n",
+ icd->parent, icd->vdev);
if (icl->free_bus)
icl->free_bus(icl);
}
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index ebebed92962..a357aa889fc 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -63,6 +63,12 @@
#define MT9M111_RESET_RESTART_FRAME (1 << 1)
#define MT9M111_RESET_RESET_MODE (1 << 0)
+#define MT9M111_RM_FULL_POWER_RD (0 << 10)
+#define MT9M111_RM_LOW_POWER_RD (1 << 10)
+#define MT9M111_RM_COL_SKIP_4X (1 << 5)
+#define MT9M111_RM_ROW_SKIP_4X (1 << 4)
+#define MT9M111_RM_COL_SKIP_2X (1 << 3)
+#define MT9M111_RM_ROW_SKIP_2X (1 << 2)
#define MT9M111_RMB_MIRROR_COLS (1 << 1)
#define MT9M111_RMB_MIRROR_ROWS (1 << 0)
#define MT9M111_CTXT_CTRL_RESTART (1 << 15)
@@ -95,7 +101,8 @@
#define MT9M111_OPMODE_AUTOEXPO_EN (1 << 14)
#define MT9M111_OPMODE_AUTOWHITEBAL_EN (1 << 1)
-
+#define MT9M111_OUTFMT_FLIP_BAYER_COL (1 << 9)
+#define MT9M111_OUTFMT_FLIP_BAYER_ROW (1 << 8)
#define MT9M111_OUTFMT_PROCESSED_BAYER (1 << 14)
#define MT9M111_OUTFMT_BYPASS_IFP (1 << 10)
#define MT9M111_OUTFMT_INV_PIX_CLOCK (1 << 9)
@@ -110,9 +117,8 @@
#define MT9M111_OUTFMT_TST_RAMP_FRAME (3 << 4)
#define MT9M111_OUTFMT_SHIFT_3_UP (1 << 3)
#define MT9M111_OUTFMT_AVG_CHROMA (1 << 2)
-#define MT9M111_OUTFMT_SWAP_YCbCr_C_Y (1 << 1)
-#define MT9M111_OUTFMT_SWAP_RGB_EVEN (1 << 1)
-#define MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr (1 << 0)
+#define MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN (1 << 1)
+#define MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B (1 << 0)
/*
* Camera control register addresses (0x200..0x2ff not implemented)
@@ -122,6 +128,8 @@
#define reg_write(reg, val) mt9m111_reg_write(client, MT9M111_##reg, (val))
#define reg_set(reg, val) mt9m111_reg_set(client, MT9M111_##reg, (val))
#define reg_clear(reg, val) mt9m111_reg_clear(client, MT9M111_##reg, (val))
+#define reg_mask(reg, val, mask) mt9m111_reg_mask(client, MT9M111_##reg, \
+ (val), (mask))
#define MT9M111_MIN_DARK_ROWS 8
#define MT9M111_MIN_DARK_COLS 26
@@ -153,7 +161,11 @@ static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
{V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
{V4L2_MBUS_FMT_VYUY8_2X8, V4L2_COLORSPACE_JPEG},
{V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_BGR565_2X8_LE, V4L2_COLORSPACE_SRGB},
+ {V4L2_MBUS_FMT_BGR565_2X8_BE, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB},
{V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
};
@@ -169,6 +181,8 @@ struct mt9m111 {
* from v4l2-chip-ident.h */
enum mt9m111_context context;
struct v4l2_rect rect;
+ struct mutex power_lock; /* lock to protect power_count */
+ int power_count;
const struct mt9m111_datafmt *fmt;
unsigned int gain;
unsigned char autoexposure;
@@ -176,10 +190,6 @@ struct mt9m111 {
unsigned int powered:1;
unsigned int hflip:1;
unsigned int vflip:1;
- unsigned int swap_rgb_even_odd:1;
- unsigned int swap_rgb_red_blue:1;
- unsigned int swap_yuv_y_chromas:1;
- unsigned int swap_yuv_cb_cr:1;
unsigned int autowhitebalance:1;
};
@@ -248,12 +258,26 @@ static int mt9m111_reg_clear(struct i2c_client *client, const u16 reg,
int ret;
ret = mt9m111_reg_read(client, reg);
- return mt9m111_reg_write(client, reg, ret & ~data);
+ if (ret >= 0)
+ ret = mt9m111_reg_write(client, reg, ret & ~data);
+ return ret;
}
-static int mt9m111_set_context(struct i2c_client *client,
+static int mt9m111_reg_mask(struct i2c_client *client, const u16 reg,
+ const u16 data, const u16 mask)
+{
+ int ret;
+
+ ret = mt9m111_reg_read(client, reg);
+ if (ret >= 0)
+ ret = mt9m111_reg_write(client, reg, (ret & ~mask) | data);
+ return ret;
+}
+
+static int mt9m111_set_context(struct mt9m111 *mt9m111,
enum mt9m111_context ctxt)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int valB = MT9M111_CTXT_CTRL_RESTART | MT9M111_CTXT_CTRL_DEFECTCOR_B
| MT9M111_CTXT_CTRL_RESIZE_B | MT9M111_CTXT_CTRL_CTRL2_B
| MT9M111_CTXT_CTRL_GAMMA_B | MT9M111_CTXT_CTRL_READ_MODE_B
@@ -267,10 +291,10 @@ static int mt9m111_set_context(struct i2c_client *client,
return reg_write(CONTEXT_CONTROL, valA);
}
-static int mt9m111_setup_rect(struct i2c_client *client,
+static int mt9m111_setup_rect(struct mt9m111 *mt9m111,
struct v4l2_rect *rect)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret, is_raw_format;
int width = rect->width;
int height = rect->height;
@@ -312,81 +336,9 @@ static int mt9m111_setup_rect(struct i2c_client *client,
return ret;
}
-static int mt9m111_setup_pixfmt(struct i2c_client *client, u16 outfmt)
+static int mt9m111_enable(struct mt9m111 *mt9m111)
{
- int ret;
- u16 mask = MT9M111_OUTFMT_PROCESSED_BAYER | MT9M111_OUTFMT_RGB |
- MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_SWAP_RGB_EVEN |
- MT9M111_OUTFMT_RGB565 | MT9M111_OUTFMT_RGB555 |
- MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr |
- MT9M111_OUTFMT_SWAP_YCbCr_C_Y;
-
- ret = reg_read(OUTPUT_FORMAT_CTRL2_A);
- if (ret >= 0)
- ret = reg_write(OUTPUT_FORMAT_CTRL2_A, (ret & ~mask) | outfmt);
- if (!ret)
- ret = reg_read(OUTPUT_FORMAT_CTRL2_B);
- if (ret >= 0)
- ret = reg_write(OUTPUT_FORMAT_CTRL2_B, (ret & ~mask) | outfmt);
-
- return ret;
-}
-
-static int mt9m111_setfmt_bayer8(struct i2c_client *client)
-{
- return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_PROCESSED_BAYER |
- MT9M111_OUTFMT_RGB);
-}
-
-static int mt9m111_setfmt_bayer10(struct i2c_client *client)
-{
- return mt9m111_setup_pixfmt(client, MT9M111_OUTFMT_BYPASS_IFP);
-}
-
-static int mt9m111_setfmt_rgb565(struct i2c_client *client)
-{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
- int val = 0;
-
- if (mt9m111->swap_rgb_red_blue)
- val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr;
- if (mt9m111->swap_rgb_even_odd)
- val |= MT9M111_OUTFMT_SWAP_RGB_EVEN;
- val |= MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565;
-
- return mt9m111_setup_pixfmt(client, val);
-}
-
-static int mt9m111_setfmt_rgb555(struct i2c_client *client)
-{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
- int val = 0;
-
- if (mt9m111->swap_rgb_red_blue)
- val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr;
- if (mt9m111->swap_rgb_even_odd)
- val |= MT9M111_OUTFMT_SWAP_RGB_EVEN;
- val |= MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555;
-
- return mt9m111_setup_pixfmt(client, val);
-}
-
-static int mt9m111_setfmt_yuv(struct i2c_client *client)
-{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
- int val = 0;
-
- if (mt9m111->swap_yuv_cb_cr)
- val |= MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr;
- if (mt9m111->swap_yuv_y_chromas)
- val |= MT9M111_OUTFMT_SWAP_YCbCr_C_Y;
-
- return mt9m111_setup_pixfmt(client, val);
-}
-
-static int mt9m111_enable(struct i2c_client *client)
-{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
ret = reg_set(RESET, MT9M111_RESET_CHIP_ENABLE);
@@ -395,8 +347,9 @@ static int mt9m111_enable(struct i2c_client *client)
return ret;
}
-static int mt9m111_reset(struct i2c_client *client)
+static int mt9m111_reset(struct mt9m111 *mt9m111)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
ret = reg_set(RESET, MT9M111_RESET_RESET_MODE);
@@ -424,11 +377,9 @@ static int mt9m111_set_bus_param(struct soc_camera_device *icd, unsigned long f)
return 0;
}
-static int mt9m111_make_rect(struct i2c_client *client,
+static int mt9m111_make_rect(struct mt9m111 *mt9m111,
struct v4l2_rect *rect)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
-
if (mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
mt9m111->fmt->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE) {
/* Bayer format - even size lengths */
@@ -444,14 +395,14 @@ static int mt9m111_make_rect(struct i2c_client *client,
soc_camera_limit_side(&rect->top, &rect->height,
MT9M111_MIN_DARK_ROWS, 2, MT9M111_MAX_HEIGHT);
- return mt9m111_setup_rect(client, rect);
+ return mt9m111_setup_rect(mt9m111, rect);
}
static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
struct v4l2_rect rect = a->c;
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
int ret;
dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n",
@@ -460,7 +411,7 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- ret = mt9m111_make_rect(client, &rect);
+ ret = mt9m111_make_rect(mt9m111, &rect);
if (!ret)
mt9m111->rect = rect;
return ret;
@@ -468,8 +419,7 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
a->c = mt9m111->rect;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -496,8 +446,7 @@ static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int mt9m111_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
mf->width = mt9m111->rect.width;
mf->height = mt9m111->rect.height;
@@ -508,51 +457,73 @@ static int mt9m111_g_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int mt9m111_set_pixfmt(struct i2c_client *client,
+static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111,
enum v4l2_mbus_pixelcode code)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
+ u16 data_outfmt2, mask_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER |
+ MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_RGB |
+ MT9M111_OUTFMT_RGB565 | MT9M111_OUTFMT_RGB555 |
+ MT9M111_OUTFMT_RGB444x | MT9M111_OUTFMT_RGBx444 |
+ MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
+ MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
int ret;
switch (code) {
case V4L2_MBUS_FMT_SBGGR8_1X8:
- ret = mt9m111_setfmt_bayer8(client);
+ data_outfmt2 = MT9M111_OUTFMT_PROCESSED_BAYER |
+ MT9M111_OUTFMT_RGB;
break;
case V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE:
- ret = mt9m111_setfmt_bayer10(client);
+ data_outfmt2 = MT9M111_OUTFMT_BYPASS_IFP | MT9M111_OUTFMT_RGB;
break;
case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE:
- ret = mt9m111_setfmt_rgb555(client);
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555 |
+ MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
+ break;
+ case V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE:
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB555;
break;
case V4L2_MBUS_FMT_RGB565_2X8_LE:
- ret = mt9m111_setfmt_rgb565(client);
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
+ MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
+ break;
+ case V4L2_MBUS_FMT_RGB565_2X8_BE:
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565;
+ break;
+ case V4L2_MBUS_FMT_BGR565_2X8_BE:
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
+ MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
+ break;
+ case V4L2_MBUS_FMT_BGR565_2X8_LE:
+ data_outfmt2 = MT9M111_OUTFMT_RGB | MT9M111_OUTFMT_RGB565 |
+ MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
+ MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
break;
case V4L2_MBUS_FMT_UYVY8_2X8:
- mt9m111->swap_yuv_y_chromas = 0;
- mt9m111->swap_yuv_cb_cr = 0;
- ret = mt9m111_setfmt_yuv(client);
+ data_outfmt2 = 0;
break;
case V4L2_MBUS_FMT_VYUY8_2X8:
- mt9m111->swap_yuv_y_chromas = 0;
- mt9m111->swap_yuv_cb_cr = 1;
- ret = mt9m111_setfmt_yuv(client);
+ data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
break;
case V4L2_MBUS_FMT_YUYV8_2X8:
- mt9m111->swap_yuv_y_chromas = 1;
- mt9m111->swap_yuv_cb_cr = 0;
- ret = mt9m111_setfmt_yuv(client);
+ data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN;
break;
case V4L2_MBUS_FMT_YVYU8_2X8:
- mt9m111->swap_yuv_y_chromas = 1;
- mt9m111->swap_yuv_cb_cr = 1;
- ret = mt9m111_setfmt_yuv(client);
+ data_outfmt2 = MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN |
+ MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B;
break;
default:
- dev_err(&client->dev, "Pixel format not handled : %x\n",
- code);
- ret = -EINVAL;
+ dev_err(&client->dev, "Pixel format not handled: %x\n", code);
+ return -EINVAL;
}
+ ret = reg_mask(OUTPUT_FORMAT_CTRL2_A, data_outfmt2,
+ mask_outfmt2);
+ if (!ret)
+ ret = reg_mask(OUTPUT_FORMAT_CTRL2_B, data_outfmt2,
+ mask_outfmt2);
+
return ret;
}
@@ -561,7 +532,7 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
const struct mt9m111_datafmt *fmt;
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
struct v4l2_rect rect = {
.left = mt9m111->rect.left,
.top = mt9m111->rect.top,
@@ -579,9 +550,9 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
"%s code=%x left=%d, top=%d, width=%d, height=%d\n", __func__,
mf->code, rect.left, rect.top, rect.width, rect.height);
- ret = mt9m111_make_rect(client, &rect);
+ ret = mt9m111_make_rect(mt9m111, &rect);
if (!ret)
- ret = mt9m111_set_pixfmt(client, mf->code);
+ ret = mt9m111_set_pixfmt(mt9m111, mf->code);
if (!ret) {
mt9m111->rect = rect;
mt9m111->fmt = fmt;
@@ -594,8 +565,7 @@ static int mt9m111_s_fmt(struct v4l2_subdev *sd,
static int mt9m111_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
const struct mt9m111_datafmt *fmt;
bool bayer = mf->code == V4L2_MBUS_FMT_SBGGR8_1X8 ||
mf->code == V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE;
@@ -635,7 +605,7 @@ static int mt9m111_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
return -EINVAL;
@@ -726,21 +696,16 @@ static const struct v4l2_queryctrl mt9m111_controls[] = {
}
};
-static int mt9m111_resume(struct soc_camera_device *icd);
-static int mt9m111_suspend(struct soc_camera_device *icd, pm_message_t state);
-
static struct soc_camera_ops mt9m111_ops = {
- .suspend = mt9m111_suspend,
- .resume = mt9m111_resume,
.query_bus_param = mt9m111_query_bus_param,
.set_bus_param = mt9m111_set_bus_param,
.controls = mt9m111_controls,
.num_controls = ARRAY_SIZE(mt9m111_controls),
};
-static int mt9m111_set_flip(struct i2c_client *client, int flip, int mask)
+static int mt9m111_set_flip(struct mt9m111 *mt9m111, int flip, int mask)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
if (mt9m111->context == HIGHPOWER) {
@@ -758,8 +723,9 @@ static int mt9m111_set_flip(struct i2c_client *client, int flip, int mask)
return ret;
}
-static int mt9m111_get_global_gain(struct i2c_client *client)
+static int mt9m111_get_global_gain(struct mt9m111 *mt9m111)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int data;
data = reg_read(GLOBAL_GAIN);
@@ -769,9 +735,9 @@ static int mt9m111_get_global_gain(struct i2c_client *client)
return data;
}
-static int mt9m111_set_global_gain(struct i2c_client *client, int gain)
+static int mt9m111_set_global_gain(struct mt9m111 *mt9m111, int gain)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
u16 val;
if (gain > 63 * 2 * 2)
@@ -788,9 +754,9 @@ static int mt9m111_set_global_gain(struct i2c_client *client, int gain)
return reg_write(GLOBAL_GAIN, val);
}
-static int mt9m111_set_autoexposure(struct i2c_client *client, int on)
+static int mt9m111_set_autoexposure(struct mt9m111 *mt9m111, int on)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
if (on)
@@ -804,9 +770,9 @@ static int mt9m111_set_autoexposure(struct i2c_client *client, int on)
return ret;
}
-static int mt9m111_set_autowhitebalance(struct i2c_client *client, int on)
+static int mt9m111_set_autowhitebalance(struct mt9m111 *mt9m111, int on)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
if (on)
@@ -823,7 +789,7 @@ static int mt9m111_set_autowhitebalance(struct i2c_client *client, int on)
static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
int data;
switch (ctrl->id) {
@@ -848,7 +814,7 @@ static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
ctrl->value = !!(data & MT9M111_RMB_MIRROR_COLS);
break;
case V4L2_CID_GAIN:
- data = mt9m111_get_global_gain(client);
+ data = mt9m111_get_global_gain(mt9m111);
if (data < 0)
return data;
ctrl->value = data;
@@ -865,8 +831,7 @@ static int mt9m111_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
const struct v4l2_queryctrl *qctrl;
int ret;
@@ -877,22 +842,22 @@ static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
switch (ctrl->id) {
case V4L2_CID_VFLIP:
mt9m111->vflip = ctrl->value;
- ret = mt9m111_set_flip(client, ctrl->value,
+ ret = mt9m111_set_flip(mt9m111, ctrl->value,
MT9M111_RMB_MIRROR_ROWS);
break;
case V4L2_CID_HFLIP:
mt9m111->hflip = ctrl->value;
- ret = mt9m111_set_flip(client, ctrl->value,
+ ret = mt9m111_set_flip(mt9m111, ctrl->value,
MT9M111_RMB_MIRROR_COLS);
break;
case V4L2_CID_GAIN:
- ret = mt9m111_set_global_gain(client, ctrl->value);
+ ret = mt9m111_set_global_gain(mt9m111, ctrl->value);
break;
case V4L2_CID_EXPOSURE_AUTO:
- ret = mt9m111_set_autoexposure(client, ctrl->value);
+ ret = mt9m111_set_autoexposure(mt9m111, ctrl->value);
break;
case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = mt9m111_set_autowhitebalance(client, ctrl->value);
+ ret = mt9m111_set_autowhitebalance(mt9m111, ctrl->value);
break;
default:
ret = -EINVAL;
@@ -901,60 +866,52 @@ static int mt9m111_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return ret;
}
-static int mt9m111_suspend(struct soc_camera_device *icd, pm_message_t state)
+static int mt9m111_suspend(struct mt9m111 *mt9m111)
{
- struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
- struct mt9m111 *mt9m111 = to_mt9m111(client);
-
- mt9m111->gain = mt9m111_get_global_gain(client);
+ mt9m111->gain = mt9m111_get_global_gain(mt9m111);
return 0;
}
-static int mt9m111_restore_state(struct i2c_client *client)
+static void mt9m111_restore_state(struct mt9m111 *mt9m111)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
-
- mt9m111_set_context(client, mt9m111->context);
- mt9m111_set_pixfmt(client, mt9m111->fmt->code);
- mt9m111_setup_rect(client, &mt9m111->rect);
- mt9m111_set_flip(client, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS);
- mt9m111_set_flip(client, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS);
- mt9m111_set_global_gain(client, mt9m111->gain);
- mt9m111_set_autoexposure(client, mt9m111->autoexposure);
- mt9m111_set_autowhitebalance(client, mt9m111->autowhitebalance);
- return 0;
+ mt9m111_set_context(mt9m111, mt9m111->context);
+ mt9m111_set_pixfmt(mt9m111, mt9m111->fmt->code);
+ mt9m111_setup_rect(mt9m111, &mt9m111->rect);
+ mt9m111_set_flip(mt9m111, mt9m111->hflip, MT9M111_RMB_MIRROR_COLS);
+ mt9m111_set_flip(mt9m111, mt9m111->vflip, MT9M111_RMB_MIRROR_ROWS);
+ mt9m111_set_global_gain(mt9m111, mt9m111->gain);
+ mt9m111_set_autoexposure(mt9m111, mt9m111->autoexposure);
+ mt9m111_set_autowhitebalance(mt9m111, mt9m111->autowhitebalance);
}
-static int mt9m111_resume(struct soc_camera_device *icd)
+static int mt9m111_resume(struct mt9m111 *mt9m111)
{
- struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
- struct mt9m111 *mt9m111 = to_mt9m111(client);
int ret = 0;
if (mt9m111->powered) {
- ret = mt9m111_enable(client);
+ ret = mt9m111_enable(mt9m111);
if (!ret)
- ret = mt9m111_reset(client);
+ ret = mt9m111_reset(mt9m111);
if (!ret)
- ret = mt9m111_restore_state(client);
+ mt9m111_restore_state(mt9m111);
}
return ret;
}
-static int mt9m111_init(struct i2c_client *client)
+static int mt9m111_init(struct mt9m111 *mt9m111)
{
- struct mt9m111 *mt9m111 = to_mt9m111(client);
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
int ret;
mt9m111->context = HIGHPOWER;
- ret = mt9m111_enable(client);
+ ret = mt9m111_enable(mt9m111);
if (!ret)
- ret = mt9m111_reset(client);
+ ret = mt9m111_reset(mt9m111);
if (!ret)
- ret = mt9m111_set_context(client, mt9m111->context);
+ ret = mt9m111_set_context(mt9m111, mt9m111->context);
if (!ret)
- ret = mt9m111_set_autoexposure(client, mt9m111->autoexposure);
+ ret = mt9m111_set_autoexposure(mt9m111, mt9m111->autoexposure);
if (ret)
dev_err(&client->dev, "mt9m111 init failed: %d\n", ret);
return ret;
@@ -971,20 +928,13 @@ static int mt9m111_video_probe(struct soc_camera_device *icd,
s32 data;
int ret;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
mt9m111->autoexposure = 1;
mt9m111->autowhitebalance = 1;
- mt9m111->swap_rgb_even_odd = 1;
- mt9m111->swap_rgb_red_blue = 1;
-
data = reg_read(CHIP_VERSION);
switch (data) {
@@ -1005,16 +955,51 @@ static int mt9m111_video_probe(struct soc_camera_device *icd,
goto ei2c;
}
- ret = mt9m111_init(client);
+ ret = mt9m111_init(mt9m111);
ei2c:
return ret;
}
+static int mt9m111_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&mt9m111->power_lock);
+
+ /*
+ * If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (mt9m111->power_count == !on) {
+ if (on) {
+ ret = mt9m111_resume(mt9m111);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to resume the sensor: %d\n", ret);
+ goto out;
+ }
+ } else {
+ mt9m111_suspend(mt9m111);
+ }
+ }
+
+ /* Update the power count. */
+ mt9m111->power_count += on ? 1 : -1;
+ WARN_ON(mt9m111->power_count < 0);
+
+out:
+ mutex_unlock(&mt9m111->power_lock);
+ return ret;
+}
+
static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
.g_ctrl = mt9m111_g_ctrl,
.s_ctrl = mt9m111_s_ctrl,
.g_chip_ident = mt9m111_g_chip_ident,
+ .s_power = mt9m111_s_power,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9m111_g_register,
.s_register = mt9m111_s_register,
diff --git a/drivers/media/video/mt9t031.c b/drivers/media/video/mt9t031.c
index 7ce279c3751..30547cc3f89 100644
--- a/drivers/media/video/mt9t031.c
+++ b/drivers/media/video/mt9t031.c
@@ -700,8 +700,7 @@ static int mt9t031_runtime_suspend(struct device *dev)
static int mt9t031_runtime_resume(struct device *dev)
{
struct video_device *vdev = to_video_device(dev);
- struct soc_camera_device *icd = container_of(vdev->parent,
- struct soc_camera_device, dev);
+ struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct mt9t031 *mt9t031 = to_mt9t031(client);
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index bffa9ee1096..d2e0a50063a 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -1057,13 +1057,9 @@ static int mt9t112_camera_probe(struct soc_camera_device *icd,
const char *devname;
int chipid;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* check and show chip ID
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index 4904d25f689..893a8b8f514 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -54,11 +54,20 @@ static struct v4l2_queryctrl mt9v011_qctrl[] = {
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain",
.minimum = 0,
- .maximum = (1 << 10) - 1,
+ .maximum = (1 << 12) - 1 - 0x0020,
.step = 1,
.default_value = 0x0020,
.flags = 0,
}, {
+ .id = V4L2_CID_EXPOSURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Exposure",
+ .minimum = 0,
+ .maximum = 2047,
+ .step = 1,
+ .default_value = 0x01fc,
+ .flags = 0,
+ }, {
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red Balance",
@@ -105,7 +114,8 @@ struct mt9v011 {
unsigned hflip:1;
unsigned vflip:1;
- u16 global_gain, red_bal, blue_bal;
+ u16 global_gain, exposure;
+ s16 red_bal, blue_bal;
};
static inline struct mt9v011 *to_mt9v011(struct v4l2_subdev *sd)
@@ -180,24 +190,68 @@ static const struct i2c_reg_value mt9v011_init_default[] = {
{ R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */
};
+
+static u16 calc_mt9v011_gain(s16 lineargain)
+{
+
+ u16 digitalgain = 0;
+ u16 analogmult = 0;
+ u16 analoginit = 0;
+
+ if (lineargain < 0)
+ lineargain = 0;
+
+ /* recommended minimum */
+ lineargain += 0x0020;
+
+ if (lineargain > 2047)
+ lineargain = 2047;
+
+ if (lineargain > 1023) {
+ digitalgain = 3;
+ analogmult = 3;
+ analoginit = lineargain / 16;
+ } else if (lineargain > 511) {
+ digitalgain = 1;
+ analogmult = 3;
+ analoginit = lineargain / 8;
+ } else if (lineargain > 255) {
+ analogmult = 3;
+ analoginit = lineargain / 4;
+ } else if (lineargain > 127) {
+ analogmult = 1;
+ analoginit = lineargain / 2;
+ } else
+ analoginit = lineargain;
+
+ return analoginit + (analogmult << 7) + (digitalgain << 9);
+
+}
+
static void set_balance(struct v4l2_subdev *sd)
{
struct mt9v011 *core = to_mt9v011(sd);
- u16 green1_gain, green2_gain, blue_gain, red_gain;
+ u16 green_gain, blue_gain, red_gain;
+ u16 exposure;
+ s16 bal;
- green1_gain = core->global_gain;
- green2_gain = core->global_gain;
+ exposure = core->exposure;
- blue_gain = core->global_gain +
- core->global_gain * core->blue_bal / (1 << 9);
+ green_gain = calc_mt9v011_gain(core->global_gain);
- red_gain = core->global_gain +
- core->global_gain * core->blue_bal / (1 << 9);
+ bal = core->global_gain;
+ bal += (core->blue_bal * core->global_gain / (1 << 7));
+ blue_gain = calc_mt9v011_gain(bal);
- mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green1_gain);
- mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green1_gain);
+ bal = core->global_gain;
+ bal += (core->red_bal * core->global_gain / (1 << 7));
+ red_gain = calc_mt9v011_gain(bal);
+
+ mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green_gain);
+ mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green_gain);
mt9v011_write(sd, R2C_MT9V011_BLUE_GAIN, blue_gain);
mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain);
+ mt9v011_write(sd, R09_MT9V011_SHUTTER_WIDTH, exposure);
}
static void calc_fps(struct v4l2_subdev *sd, u32 *numerator, u32 *denominator)
@@ -286,7 +340,7 @@ static void set_res(struct v4l2_subdev *sd)
* be missing.
*/
- hstart = 14 + (640 - core->width) / 2;
+ hstart = 20 + (640 - core->width) / 2;
mt9v011_write(sd, R02_MT9V011_COLSTART, hstart);
mt9v011_write(sd, R04_MT9V011_WIDTH, core->width);
mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width);
@@ -338,6 +392,9 @@ static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
case V4L2_CID_GAIN:
ctrl->value = core->global_gain;
return 0;
+ case V4L2_CID_EXPOSURE:
+ ctrl->value = core->exposure;
+ return 0;
case V4L2_CID_RED_BALANCE:
ctrl->value = core->red_bal;
return 0;
@@ -392,6 +449,9 @@ static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
case V4L2_CID_GAIN:
core->global_gain = ctrl->value;
break;
+ case V4L2_CID_EXPOSURE:
+ core->exposure = ctrl->value;
+ break;
case V4L2_CID_RED_BALANCE:
core->red_bal = ctrl->value;
break;
@@ -598,6 +658,7 @@ static int mt9v011_probe(struct i2c_client *c,
}
core->global_gain = 0x0024;
+ core->exposure = 0x01fc;
core->width = 640;
core->height = 480;
core->xtal = 27000000; /* Hz */
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index fc76ed1c08e..51b0fccbfe7 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -728,9 +728,9 @@ static int mt9v022_video_probe(struct soc_camera_device *icd,
int ret;
unsigned long flags;
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/* Read out the chip version register */
data = reg_read(client, MT9V022_CHIP_VERSION);
@@ -809,8 +809,8 @@ static void mt9v022_video_remove(struct soc_camera_device *icd)
{
struct soc_camera_link *icl = to_soc_camera_link(icd);
- dev_dbg(&icd->dev, "Video removed: %p, %p\n",
- icd->dev.parent, icd->vdev);
+ dev_dbg(icd->pdev, "Video removed: %p, %p\n",
+ icd->parent, icd->vdev);
if (icl->free_bus)
icl->free_bus(icl);
}
diff --git a/drivers/media/video/mt9v032.c b/drivers/media/video/mt9v032.c
index 1319c2c48af..c64e1dc4cb4 100644
--- a/drivers/media/video/mt9v032.c
+++ b/drivers/media/video/mt9v032.c
@@ -31,14 +31,14 @@
#define MT9V032_CHIP_VERSION 0x00
#define MT9V032_CHIP_ID_REV1 0x1311
#define MT9V032_CHIP_ID_REV3 0x1313
-#define MT9V032_ROW_START 0x01
-#define MT9V032_ROW_START_MIN 4
-#define MT9V032_ROW_START_DEF 10
-#define MT9V032_ROW_START_MAX 482
-#define MT9V032_COLUMN_START 0x02
+#define MT9V032_COLUMN_START 0x01
#define MT9V032_COLUMN_START_MIN 1
-#define MT9V032_COLUMN_START_DEF 2
+#define MT9V032_COLUMN_START_DEF 1
#define MT9V032_COLUMN_START_MAX 752
+#define MT9V032_ROW_START 0x02
+#define MT9V032_ROW_START_MIN 4
+#define MT9V032_ROW_START_DEF 5
+#define MT9V032_ROW_START_MAX 482
#define MT9V032_WINDOW_HEIGHT 0x03
#define MT9V032_WINDOW_HEIGHT_MIN 1
#define MT9V032_WINDOW_HEIGHT_DEF 480
@@ -420,13 +420,13 @@ static int mt9v032_set_crop(struct v4l2_subdev *subdev,
struct v4l2_rect *__crop;
struct v4l2_rect rect;
- /* Clamp the crop rectangle boundaries and align them to a multiple of 2
- * pixels.
+ /* Clamp the crop rectangle boundaries and align them to a non multiple
+ * of 2 pixels to ensure a GRBG Bayer pattern.
*/
- rect.left = clamp(ALIGN(crop->rect.left, 2),
+ rect.left = clamp(ALIGN(crop->rect.left + 1, 2) - 1,
MT9V032_COLUMN_START_MIN,
MT9V032_COLUMN_START_MAX);
- rect.top = clamp(ALIGN(crop->rect.top, 2),
+ rect.top = clamp(ALIGN(crop->rect.top + 1, 2) - 1,
MT9V032_ROW_START_MIN,
MT9V032_ROW_START_MAX);
rect.width = clamp(ALIGN(crop->rect.width, 2),
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index bc0c23a1009..087db12a3a6 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -31,7 +31,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/soc_camera.h>
@@ -73,7 +72,7 @@
#define CSISR_SOF_INT (1 << 16)
#define CSISR_DRDY (1 << 0)
-#define VERSION_CODE KERNEL_VERSION(0, 0, 1)
+#define DRIVER_VERSION "0.0.2"
#define DRIVER_NAME "mx1-camera"
#define CSI_IRQ_MASK (CSISR_SFF_OR_INT | CSISR_RFF_OR_INT | \
@@ -142,7 +141,7 @@ static int mx1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
*count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
- dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
+ dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
return 0;
}
@@ -154,7 +153,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx1_buffer *buf)
BUG_ON(in_interrupt());
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
/*
@@ -179,7 +178,7 @@ static int mx1_videobuf_prepare(struct videobuf_queue *vq,
if (bytes_per_line < 0)
return bytes_per_line;
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
/* Added list head initialization on alloc */
@@ -232,7 +231,7 @@ out:
static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev)
{
struct videobuf_buffer *vbuf = &pcdev->active->vb;
- struct device *dev = pcdev->icd->dev.parent;
+ struct device *dev = pcdev->icd->parent;
int ret;
if (unlikely(!pcdev->active)) {
@@ -256,11 +255,11 @@ static void mx1_videobuf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx1_camera_dev *pcdev = ici->priv;
struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
list_add_tail(&vb->queue, &pcdev->capture);
@@ -287,7 +286,7 @@ static void mx1_videobuf_release(struct videobuf_queue *vq,
struct mx1_buffer *buf = container_of(vb, struct mx1_buffer, vb);
#ifdef DEBUG
struct soc_camera_device *icd = vq->priv_data;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -343,7 +342,7 @@ static void mx1_camera_wakeup(struct mx1_camera_dev *pcdev,
static void mx1_camera_dma_irq(int channel, void *data)
{
struct mx1_camera_dev *pcdev = data;
- struct device *dev = pcdev->icd->dev.parent;
+ struct device *dev = pcdev->icd->parent;
struct mx1_buffer *buf;
struct videobuf_buffer *vb;
unsigned long flags;
@@ -378,10 +377,10 @@ static struct videobuf_queue_ops mx1_videobuf_ops = {
static void mx1_camera_init_videobuf(struct videobuf_queue *q,
struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx1_camera_dev *pcdev = ici->priv;
- videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, icd->dev.parent,
+ videobuf_queue_dma_contig_init(q, &mx1_videobuf_ops, icd->parent,
&pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_NONE,
sizeof(struct mx1_buffer), icd, &icd->video_lock);
@@ -401,7 +400,7 @@ static int mclk_get_divisor(struct mx1_camera_dev *pcdev)
*/
div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1;
- dev_dbg(pcdev->icd->dev.parent,
+ dev_dbg(pcdev->icd->parent,
"System clock %lukHz, target freq %dkHz, divisor %lu\n",
lcdclk / 1000, mclk / 1000, div);
@@ -412,7 +411,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
{
unsigned int csicr1 = CSICR1_EN;
- dev_dbg(pcdev->icd->dev.parent, "Activate device\n");
+ dev_dbg(pcdev->icd->parent, "Activate device\n");
clk_enable(pcdev->clk);
@@ -428,7 +427,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
{
- dev_dbg(pcdev->icd->dev.parent, "Deactivate device\n");
+ dev_dbg(pcdev->icd->parent, "Deactivate device\n");
/* Disable all CSI interface */
__raw_writel(0x00, pcdev->base + CSICR1);
@@ -442,29 +441,25 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
*/
static int mx1_camera_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx1_camera_dev *pcdev = ici->priv;
- int ret;
- if (pcdev->icd) {
- ret = -EBUSY;
- goto ebusy;
- }
+ if (pcdev->icd)
+ return -EBUSY;
- dev_info(icd->dev.parent, "MX1 Camera driver attached to camera %d\n",
+ dev_info(icd->parent, "MX1 Camera driver attached to camera %d\n",
icd->devnum);
mx1_camera_activate(pcdev);
pcdev->icd = icd;
-ebusy:
- return ret;
+ return 0;
}
static void mx1_camera_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx1_camera_dev *pcdev = ici->priv;
unsigned int csicr1;
@@ -477,7 +472,7 @@ static void mx1_camera_remove_device(struct soc_camera_device *icd)
/* Stop DMA engine */
imx_dma_disable(pcdev->dma_chan);
- dev_info(icd->dev.parent, "MX1 Camera driver detached from camera %d\n",
+ dev_info(icd->parent, "MX1 Camera driver detached from camera %d\n",
icd->devnum);
mx1_camera_deactivate(pcdev);
@@ -495,7 +490,7 @@ static int mx1_camera_set_crop(struct soc_camera_device *icd,
static int mx1_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx1_camera_dev *pcdev = ici->priv;
unsigned long camera_flags, common_flags;
unsigned int csicr1;
@@ -566,14 +561,14 @@ static int mx1_camera_set_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n",
+ dev_warn(icd->parent, "Format %x not found\n",
pix->pixelformat);
return -EINVAL;
}
buswidth = xlate->host_fmt->bits_per_sample;
if (buswidth > 8) {
- dev_warn(icd->dev.parent,
+ dev_warn(icd->parent,
"bits-per-sample %d for format %x unsupported\n",
buswidth, pix->pixelformat);
return -EINVAL;
@@ -613,7 +608,7 @@ static int mx1_camera_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n",
+ dev_warn(icd->parent, "Format %x not found\n",
pix->pixelformat);
return -EINVAL;
}
@@ -680,7 +675,6 @@ static int mx1_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, "i.MX1/i.MXL Camera", sizeof(cap->card));
- cap->version = VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
@@ -887,4 +881,5 @@ module_exit(mx1_camera_exit);
MODULE_DESCRIPTION("i.MX1/i.MXL SoC Camera Host driver");
MODULE_AUTHOR("Paulius Zaleckas <paulius.zaleckas@teltonika.lt>");
MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRIVER_VERSION);
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 4eab1c62031..ec2410c0c80 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -23,7 +23,6 @@
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
@@ -47,7 +46,7 @@
#include <asm/dma.h>
#define MX2_CAM_DRV_NAME "mx2-camera"
-#define MX2_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5)
+#define MX2_CAM_VERSION "0.0.6"
#define MX2_CAM_DRIVER_DESCRIPTION "i.MX2x_Camera"
/* reset values */
@@ -278,7 +277,7 @@ static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)
*/
static int mx2_camera_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
int ret;
u32 csicr1;
@@ -303,7 +302,7 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
pcdev->icd = icd;
- dev_info(icd->dev.parent, "Camera driver attached to camera %d\n",
+ dev_info(icd->parent, "Camera driver attached to camera %d\n",
icd->devnum);
return 0;
@@ -311,12 +310,12 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
static void mx2_camera_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
BUG_ON(icd != pcdev->icd);
- dev_info(icd->dev.parent, "Camera driver detached from camera %d\n",
+ dev_info(icd->parent, "Camera driver detached from camera %d\n",
icd->devnum);
mx2_camera_deactivate(pcdev);
@@ -437,7 +436,7 @@ static int mx2_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
- dev_dbg(&icd->dev, "count=%d, size=%d\n", *count, *size);
+ dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
if (bytes_per_line < 0)
return bytes_per_line;
@@ -457,7 +456,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx2_buffer *buf)
struct soc_camera_device *icd = vq->priv_data;
struct videobuf_buffer *vb = &buf->vb;
- dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
/*
@@ -467,7 +466,7 @@ static void free_buffer(struct videobuf_queue *vq, struct mx2_buffer *buf)
videobuf_waiton(vq, vb, 0, 0);
videobuf_dma_contig_free(vq, vb);
- dev_dbg(&icd->dev, "%s freed\n", __func__);
+ dev_dbg(icd->parent, "%s freed\n", __func__);
vb->state = VIDEOBUF_NEEDS_INIT;
}
@@ -481,7 +480,7 @@ static int mx2_videobuf_prepare(struct videobuf_queue *vq,
icd->current_fmt->host_fmt);
int ret = 0;
- dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
if (bytes_per_line < 0)
@@ -533,12 +532,12 @@ static void mx2_videobuf_queue(struct videobuf_queue *vq,
{
struct soc_camera_device *icd = vq->priv_data;
struct soc_camera_host *ici =
- to_soc_camera_host(icd->dev.parent);
+ to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
unsigned long flags;
- dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
spin_lock_irqsave(&pcdev->lock, flags);
@@ -611,27 +610,27 @@ static void mx2_videobuf_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
unsigned long flags;
#ifdef DEBUG
- dev_dbg(&icd->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
switch (vb->state) {
case VIDEOBUF_ACTIVE:
- dev_info(&icd->dev, "%s (active)\n", __func__);
+ dev_info(icd->parent, "%s (active)\n", __func__);
break;
case VIDEOBUF_QUEUED:
- dev_info(&icd->dev, "%s (queued)\n", __func__);
+ dev_info(icd->parent, "%s (queued)\n", __func__);
break;
case VIDEOBUF_PREPARED:
- dev_info(&icd->dev, "%s (prepared)\n", __func__);
+ dev_info(icd->parent, "%s (prepared)\n", __func__);
break;
default:
- dev_info(&icd->dev, "%s (unknown) %d\n", __func__,
+ dev_info(icd->parent, "%s (unknown) %d\n", __func__,
vb->state);
break;
}
@@ -678,7 +677,7 @@ static struct videobuf_queue_ops mx2_videobuf_ops = {
static void mx2_camera_init_videobuf(struct videobuf_queue *q,
struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
videobuf_queue_dma_contig_init(q, &mx2_videobuf_ops, pcdev->dev,
@@ -719,7 +718,7 @@ static void mx27_camera_emma_buf_init(struct soc_camera_device *icd,
int bytesperline)
{
struct soc_camera_host *ici =
- to_soc_camera_host(icd->dev.parent);
+ to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
writel(pcdev->discard_buffer_dma,
@@ -772,7 +771,7 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd,
__u32 pixfmt)
{
struct soc_camera_host *ici =
- to_soc_camera_host(icd->dev.parent);
+ to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
unsigned long camera_flags, common_flags;
int ret = 0;
@@ -891,7 +890,7 @@ static int mx2_camera_set_crop(struct soc_camera_device *icd,
if (ret < 0)
return ret;
- dev_dbg(icd->dev.parent, "Sensor cropped %dx%d\n",
+ dev_dbg(icd->parent, "Sensor cropped %dx%d\n",
mf.width, mf.height);
icd->user_width = mf.width;
@@ -911,7 +910,7 @@ static int mx2_camera_set_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n",
+ dev_warn(icd->parent, "Format %x not found\n",
pix->pixelformat);
return -EINVAL;
}
@@ -951,7 +950,7 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (pixfmt && !xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt);
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -974,11 +973,16 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
if (pix->bytesperline < 0)
return pix->bytesperline;
pix->sizeimage = pix->height * pix->bytesperline;
- if (pix->sizeimage > (4 * 0x3ffff)) { /* CSIRXCNT limit */
- dev_warn(icd->dev.parent,
- "Image size (%u) above limit\n",
- pix->sizeimage);
- return -EINVAL;
+ /* Check against the CSIRXCNT limit */
+ if (pix->sizeimage > 4 * 0x3ffff) {
+ /* Adjust geometry, preserve aspect ratio */
+ unsigned int new_height = int_sqrt(4 * 0x3ffff *
+ pix->height / pix->bytesperline);
+ pix->width = new_height * pix->width / pix->height;
+ pix->height = new_height;
+ pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
+ xlate->host_fmt);
+ BUG_ON(pix->bytesperline < 0);
}
}
@@ -996,7 +1000,7 @@ static int mx2_camera_try_fmt(struct soc_camera_device *icd,
if (mf.field == V4L2_FIELD_ANY)
mf.field = V4L2_FIELD_NONE;
if (mf.field != V4L2_FIELD_NONE) {
- dev_err(icd->dev.parent, "Field type %d unsupported.\n",
+ dev_err(icd->parent, "Field type %d unsupported.\n",
mf.field);
return -EINVAL;
}
@@ -1014,7 +1018,6 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
- cap->version = MX2_CAM_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
@@ -1523,3 +1526,4 @@ module_exit(mx2_camera_exit);
MODULE_DESCRIPTION("i.MX27/i.MX25 SoC Camera Host driver");
MODULE_AUTHOR("Sascha Hauer <sha@pengutronix.de>");
MODULE_LICENSE("GPL");
+MODULE_VERSION(MX2_CAM_VERSION);
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index c7680eb8366..c045b47803a 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -195,7 +194,7 @@ static int mx3_videobuf_setup(struct vb2_queue *vq,
unsigned long sizes[], void *alloc_ctxs[])
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
@@ -224,7 +223,7 @@ static int mx3_videobuf_setup(struct vb2_queue *vq,
static int mx3_videobuf_prepare(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
struct scatterlist *sg;
@@ -242,7 +241,7 @@ static int mx3_videobuf_prepare(struct vb2_buffer *vb)
new_size = bytes_per_line * icd->user_height;
if (vb2_plane_size(vb, 0) < new_size) {
- dev_err(icd->dev.parent, "Buffer too small (%lu < %zu)\n",
+ dev_err(icd->parent, "Buffer too small (%lu < %zu)\n",
vb2_plane_size(vb, 0), new_size);
return -ENOBUFS;
}
@@ -284,7 +283,7 @@ static enum pixel_fmt fourcc_to_ipu_pix(__u32 fourcc)
static void mx3_videobuf_queue(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct mx3_camera_buffer *buf = to_mx3_vb(vb);
struct dma_async_tx_descriptor *txd = buf->txd;
@@ -337,7 +336,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
spin_unlock_irq(&mx3_cam->lock);
cookie = txd->tx_submit(txd);
- dev_dbg(icd->dev.parent, "Submitted cookie %d DMA 0x%08x\n",
+ dev_dbg(icd->parent, "Submitted cookie %d DMA 0x%08x\n",
cookie, sg_dma_address(&buf->sg));
if (cookie >= 0)
@@ -358,13 +357,13 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
static void mx3_videobuf_release(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct mx3_camera_buffer *buf = to_mx3_vb(vb);
struct dma_async_tx_descriptor *txd = buf->txd;
unsigned long flags;
- dev_dbg(icd->dev.parent,
+ dev_dbg(icd->parent,
"Release%s DMA 0x%08x, queue %sempty\n",
mx3_cam->active == buf ? " active" : "", sg_dma_address(&buf->sg),
list_empty(&buf->queue) ? "" : "not ");
@@ -403,7 +402,7 @@ static int mx3_videobuf_init(struct vb2_buffer *vb)
static int mx3_stop_streaming(struct vb2_queue *q)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(q);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
struct dma_chan *chan;
@@ -499,7 +498,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
clk_enable(mx3_cam->clk);
rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk);
- dev_dbg(icd->dev.parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
+ dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
if (rate)
clk_set_rate(mx3_cam->clk, rate);
}
@@ -507,7 +506,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
/* Called with .video_lock held */
static int mx3_camera_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
if (mx3_cam->icd)
@@ -517,7 +516,7 @@ static int mx3_camera_add_device(struct soc_camera_device *icd)
mx3_cam->icd = icd;
- dev_info(icd->dev.parent, "MX3 Camera driver attached to camera %d\n",
+ dev_info(icd->parent, "MX3 Camera driver attached to camera %d\n",
icd->devnum);
return 0;
@@ -526,7 +525,7 @@ static int mx3_camera_add_device(struct soc_camera_device *icd)
/* Called with .video_lock held */
static void mx3_camera_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct idmac_channel **ichan = &mx3_cam->idmac_channel[0];
@@ -541,7 +540,7 @@ static void mx3_camera_remove_device(struct soc_camera_device *icd)
mx3_cam->icd = NULL;
- dev_info(icd->dev.parent, "MX3 Camera driver detached from camera %d\n",
+ dev_info(icd->parent, "MX3 Camera driver detached from camera %d\n",
icd->devnum);
}
@@ -608,12 +607,12 @@ static int test_platform_param(struct mx3_camera_dev *mx3_cam,
static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
const unsigned int depth)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
unsigned long bus_flags, camera_flags;
int ret = test_platform_param(mx3_cam, depth, &bus_flags);
- dev_dbg(icd->dev.parent, "request bus width %d bit: %d\n", depth, ret);
+ dev_dbg(icd->parent, "request bus width %d bit: %d\n", depth, ret);
if (ret < 0)
return ret;
@@ -622,7 +621,7 @@ static int mx3_camera_try_bus_param(struct soc_camera_device *icd,
ret = soc_camera_bus_param_compatible(camera_flags, bus_flags);
if (ret < 0)
- dev_warn(icd->dev.parent,
+ dev_warn(icd->parent,
"Flags incompatible: camera %lx, host %lx\n",
camera_flags, bus_flags);
@@ -676,7 +675,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
struct soc_camera_format_xlate *xlate)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
int formats = 0, ret;
enum v4l2_mbus_pixelcode code;
const struct soc_mbus_pixelfmt *fmt;
@@ -688,7 +687,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
fmt = soc_mbus_get_fmtdesc(code);
if (!fmt) {
- dev_warn(icd->dev.parent,
+ dev_warn(icd->parent,
"Unsupported format code #%u: %d\n", idx, code);
return 0;
}
@@ -816,7 +815,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
struct v4l2_crop *a)
{
struct v4l2_rect *rect = &a->c;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_framefmt mf;
@@ -849,7 +848,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
configure_geometry(mx3_cam, mf.width, mf.height,
icd->current_fmt->host_fmt);
- dev_dbg(icd->dev.parent, "Sensor cropped %dx%d\n",
+ dev_dbg(icd->parent, "Sensor cropped %dx%d\n",
mf.width, mf.height);
icd->user_width = mf.width;
@@ -861,7 +860,7 @@ static int mx3_camera_set_crop(struct soc_camera_device *icd,
static int mx3_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
@@ -871,13 +870,13 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n",
+ dev_warn(icd->parent, "Format %x not found\n",
pix->pixelformat);
return -EINVAL;
}
stride_align(&pix->width);
- dev_dbg(icd->dev.parent, "Set format %dx%d\n", pix->width, pix->height);
+ dev_dbg(icd->parent, "Set format %dx%d\n", pix->width, pix->height);
/*
* Might have to perform a complete interface initialisation like in
@@ -913,13 +912,7 @@ static int mx3_camera_set_fmt(struct soc_camera_device *icd,
pix->colorspace = mf.colorspace;
icd->current_fmt = xlate;
- pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- if (pix->bytesperline < 0)
- return pix->bytesperline;
- pix->sizeimage = pix->height * pix->bytesperline;
-
- dev_dbg(icd->dev.parent, "Sensor set %dx%d\n", pix->width, pix->height);
+ dev_dbg(icd->parent, "Sensor set %dx%d\n", pix->width, pix->height);
return ret;
}
@@ -936,7 +929,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (pixfmt && !xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt);
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -946,12 +939,6 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
if (pix->width > 4096)
pix->width = 4096;
- pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- if (pix->bytesperline < 0)
- return pix->bytesperline;
- pix->sizeimage = pix->height * pix->bytesperline;
-
/* limit to sensor capabilities */
mf.width = pix->width;
mf.height = pix->height;
@@ -974,7 +961,7 @@ static int mx3_camera_try_fmt(struct soc_camera_device *icd,
case V4L2_FIELD_NONE:
break;
default:
- dev_err(icd->dev.parent, "Field type %d unsupported.\n",
+ dev_err(icd->parent, "Field type %d unsupported.\n",
mf.field);
ret = -EINVAL;
}
@@ -1000,7 +987,6 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
- cap->version = KERNEL_VERSION(0, 2, 2);
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
@@ -1008,7 +994,7 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx3_camera_dev *mx3_cam = ici->priv;
unsigned long bus_flags, camera_flags, common_flags;
u32 dw, sens_conf;
@@ -1016,7 +1002,7 @@ static int mx3_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
int buswidth;
int ret;
const struct soc_camera_format_xlate *xlate;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
fmt = soc_mbus_get_fmtdesc(icd->current_fmt->code);
if (!fmt)
@@ -1325,4 +1311,5 @@ module_exit(mx3_camera_exit);
MODULE_DESCRIPTION("i.MX3x SoC Camera Host driver");
MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.2.3");
MODULE_ALIAS("platform:" MX3_CAM_DRV_NAME);
diff --git a/drivers/media/video/omap/Kconfig b/drivers/media/video/omap/Kconfig
index e63233fd2aa..390ab094f9f 100644
--- a/drivers/media/video/omap/Kconfig
+++ b/drivers/media/video/omap/Kconfig
@@ -1,11 +1,14 @@
+config VIDEO_OMAP2_VOUT_VRFB
+ bool
+
config VIDEO_OMAP2_VOUT
tristate "OMAP2/OMAP3 V4L2-Display driver"
depends on ARCH_OMAP2 || ARCH_OMAP3
select VIDEOBUF_GEN
select VIDEOBUF_DMA_CONTIG
select OMAP2_DSS
- select OMAP2_VRAM
- select OMAP2_VRFB
+ select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
+ select VIDEO_OMAP2_VOUT_VRFB if VIDEO_OMAP2_VOUT && OMAP2_VRFB
default n
---help---
V4L2 Display driver support for OMAP2/3 based boards.
diff --git a/drivers/media/video/omap/Makefile b/drivers/media/video/omap/Makefile
index b28788070ae..fc410b438f7 100644
--- a/drivers/media/video/omap/Makefile
+++ b/drivers/media/video/omap/Makefile
@@ -4,4 +4,5 @@
# OMAP2/3 Display driver
omap-vout-y := omap_vout.o omap_voutlib.o
+omap-vout-$(CONFIG_VIDEO_OMAP2_VOUT_VRFB) += omap_vout_vrfb.o
obj-$(CONFIG_VIDEO_OMAP2_VOUT) += omap-vout.o
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 4ada9be1d43..b5ef3622244 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -35,28 +35,26 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
#include <linux/irq.h>
#include <linux/videodev2.h>
-#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <plat/dma.h>
-#include <plat/vram.h>
#include <plat/vrfb.h>
#include <video/omapdss.h>
#include "omap_voutlib.h"
#include "omap_voutdef.h"
+#include "omap_vout_vrfb.h"
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("OMAP Video for Linux Video out driver");
MODULE_LICENSE("GPL");
-
/* Driver Configuration macros */
#define VOUT_NAME "omap_vout"
@@ -65,31 +63,6 @@ enum omap_vout_channels {
OMAP_VIDEO2,
};
-enum dma_channel_state {
- DMA_CHAN_NOT_ALLOTED,
- DMA_CHAN_ALLOTED,
-};
-
-#define QQVGA_WIDTH 160
-#define QQVGA_HEIGHT 120
-
-/* Max Resolution supported by the driver */
-#define VID_MAX_WIDTH 1280 /* Largest width */
-#define VID_MAX_HEIGHT 720 /* Largest height */
-
-/* Mimimum requirement is 2x2 for DSS */
-#define VID_MIN_WIDTH 2
-#define VID_MIN_HEIGHT 2
-
-/* 2048 x 2048 is max res supported by OMAP display controller */
-#define MAX_PIXELS_PER_LINE 2048
-
-#define VRFB_TX_TIMEOUT 1000
-#define VRFB_NUM_BUFS 4
-
-/* Max buffer size tobe allocated during init */
-#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
-
static struct videobuf_queue_ops video_vbq_ops;
/* Variables configurable through module params*/
static u32 video1_numbuffers = 3;
@@ -129,7 +102,7 @@ module_param(debug, bool, S_IRUGO);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/* list of image formats supported by OMAP2 video pipelines */
-const static struct v4l2_fmtdesc omap_formats[] = {
+static const struct v4l2_fmtdesc omap_formats[] = {
{
/* Note: V4L2 defines RGB565 as:
*
@@ -172,84 +145,6 @@ const static struct v4l2_fmtdesc omap_formats[] = {
#define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats))
/*
- * Allocate buffers
- */
-static unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
-{
- u32 order, size;
- unsigned long virt_addr, addr;
-
- size = PAGE_ALIGN(buf_size);
- order = get_order(size);
- virt_addr = __get_free_pages(GFP_KERNEL | GFP_DMA, order);
- addr = virt_addr;
-
- if (virt_addr) {
- while (size > 0) {
- SetPageReserved(virt_to_page(addr));
- addr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- }
- *phys_addr = (u32) virt_to_phys((void *) virt_addr);
- return virt_addr;
-}
-
-/*
- * Free buffers
- */
-static void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
-{
- u32 order, size;
- unsigned long addr = virtaddr;
-
- size = PAGE_ALIGN(buf_size);
- order = get_order(size);
-
- while (size > 0) {
- ClearPageReserved(virt_to_page(addr));
- addr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- free_pages((unsigned long) virtaddr, order);
-}
-
-/*
- * Function for allocating video buffers
- */
-static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
- unsigned int *count, int startindex)
-{
- int i, j;
-
- for (i = 0; i < *count; i++) {
- if (!vout->smsshado_virt_addr[i]) {
- vout->smsshado_virt_addr[i] =
- omap_vout_alloc_buffer(vout->smsshado_size,
- &vout->smsshado_phy_addr[i]);
- }
- if (!vout->smsshado_virt_addr[i] && startindex != -1) {
- if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
- break;
- }
- if (!vout->smsshado_virt_addr[i]) {
- for (j = 0; j < i; j++) {
- omap_vout_free_buffer(
- vout->smsshado_virt_addr[j],
- vout->smsshado_size);
- vout->smsshado_virt_addr[j] = 0;
- vout->smsshado_phy_addr[j] = 0;
- }
- *count = 0;
- return -ENOMEM;
- }
- memset((void *) vout->smsshado_virt_addr[i], 0,
- vout->smsshado_size);
- }
- return 0;
-}
-
-/*
* Try format
*/
static int omap_vout_try_format(struct v4l2_pix_format *pix)
@@ -342,73 +237,9 @@ static u32 omap_vout_uservirt_to_phys(u32 virtp)
}
/*
- * Wakes up the application once the DMA transfer to VRFB space is completed.
- */
-static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
-{
- struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
-
- t->tx_status = 1;
- wake_up_interruptible(&t->wait);
-}
-
-/*
- * Release the VRFB context once the module exits
- */
-static void omap_vout_release_vrfb(struct omap_vout_device *vout)
-{
- int i;
-
- for (i = 0; i < VRFB_NUM_BUFS; i++)
- omap_vrfb_release_ctx(&vout->vrfb_context[i]);
-
- if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
- vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
- omap_free_dma(vout->vrfb_dma_tx.dma_ch);
- }
-}
-
-/*
- * Return true if rotation is 90 or 270
- */
-static inline int rotate_90_or_270(const struct omap_vout_device *vout)
-{
- return (vout->rotation == dss_rotation_90_degree ||
- vout->rotation == dss_rotation_270_degree);
-}
-
-/*
- * Return true if rotation is enabled
- */
-static inline int rotation_enabled(const struct omap_vout_device *vout)
-{
- return vout->rotation || vout->mirror;
-}
-
-/*
- * Reverse the rotation degree if mirroring is enabled
- */
-static inline int calc_rotation(const struct omap_vout_device *vout)
-{
- if (!vout->mirror)
- return vout->rotation;
-
- switch (vout->rotation) {
- case dss_rotation_90_degree:
- return dss_rotation_270_degree;
- case dss_rotation_270_degree:
- return dss_rotation_90_degree;
- case dss_rotation_180_degree:
- return dss_rotation_0_degree;
- default:
- return dss_rotation_180_degree;
- }
-}
-
-/*
* Free the V4L2 buffers
*/
-static void omap_vout_free_buffers(struct omap_vout_device *vout)
+void omap_vout_free_buffers(struct omap_vout_device *vout)
{
int i, numbuffers;
@@ -425,52 +256,6 @@ static void omap_vout_free_buffers(struct omap_vout_device *vout)
}
/*
- * Free VRFB buffers
- */
-static void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
-{
- int j;
-
- for (j = 0; j < VRFB_NUM_BUFS; j++) {
- omap_vout_free_buffer(vout->smsshado_virt_addr[j],
- vout->smsshado_size);
- vout->smsshado_virt_addr[j] = 0;
- vout->smsshado_phy_addr[j] = 0;
- }
-}
-
-/*
- * Allocate the buffers for the VRFB space. Data is copied from V4L2
- * buffers to the VRFB buffers using the DMA engine.
- */
-static int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
- unsigned int *count, unsigned int startindex)
-{
- int i;
- bool yuv_mode;
-
- /* Allocate the VRFB buffers only if the buffers are not
- * allocated during init time.
- */
- if ((rotation_enabled(vout)) && !vout->vrfb_static_allocation)
- if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
- return -ENOMEM;
-
- if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
- vout->dss_mode == OMAP_DSS_COLOR_UYVY)
- yuv_mode = true;
- else
- yuv_mode = false;
-
- for (i = 0; i < *count; i++)
- omap_vrfb_setup(&vout->vrfb_context[i],
- vout->smsshado_phy_addr[i], vout->pix.width,
- vout->pix.height, vout->bpp, yuv_mode);
-
- return 0;
-}
-
-/*
* Convert V4L2 rotation to DSS rotation
* V4L2 understand 0, 90, 180, 270.
* Convert to 0, 1, 2 and 3 respectively for DSS
@@ -499,124 +284,38 @@ static int v4l2_rot_to_dss_rot(int v4l2_rotation,
return ret;
}
-/*
- * Calculate the buffer offsets from which the streaming should
- * start. This offset calculation is mainly required because of
- * the VRFB 32 pixels alignment with rotation.
- */
static int omap_vout_calculate_offset(struct omap_vout_device *vout)
{
- struct omap_overlay *ovl;
- enum dss_rotation rotation;
struct omapvideo_info *ovid;
- bool mirroring = vout->mirror;
- struct omap_dss_device *cur_display;
struct v4l2_rect *crop = &vout->crop;
struct v4l2_pix_format *pix = &vout->pix;
int *cropped_offset = &vout->cropped_offset;
- int vr_ps = 1, ps = 2, temp_ps = 2;
- int offset = 0, ctop = 0, cleft = 0, line_length = 0;
+ int ps = 2, line_length = 0;
ovid = &vout->vid_info;
- ovl = ovid->overlays[0];
- /* get the display device attached to the overlay */
- if (!ovl->manager || !ovl->manager->device)
- return -1;
- cur_display = ovl->manager->device;
- rotation = calc_rotation(vout);
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ omap_vout_calculate_vrfb_offset(vout);
+ } else {
+ vout->line_length = line_length = pix->width;
- if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
- V4L2_PIX_FMT_UYVY == pix->pixelformat) {
- if (rotation_enabled(vout)) {
- /*
- * ps - Actual pixel size for YUYV/UYVY for
- * VRFB/Mirroring is 4 bytes
- * vr_ps - Virtually pixel size for YUYV/UYVY is
- * 2 bytes
- */
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat)
+ ps = 2;
+ else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat)
ps = 4;
- vr_ps = 2;
- } else {
- ps = 2; /* otherwise the pixel size is 2 byte */
- }
- } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
- ps = 4;
- } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
- ps = 3;
- }
- vout->ps = ps;
- vout->vr_ps = vr_ps;
-
- if (rotation_enabled(vout)) {
- line_length = MAX_PIXELS_PER_LINE;
- ctop = (pix->height - crop->height) - crop->top;
- cleft = (pix->width - crop->width) - crop->left;
- } else {
- line_length = pix->width;
- }
- vout->line_length = line_length;
- switch (rotation) {
- case dss_rotation_90_degree:
- offset = vout->vrfb_context[0].yoffset *
- vout->vrfb_context[0].bytespp;
- temp_ps = ps / vr_ps;
- if (mirroring == 0) {
- *cropped_offset = offset + line_length *
- temp_ps * cleft + crop->top * temp_ps;
- } else {
- *cropped_offset = offset + line_length * temp_ps *
- cleft + crop->top * temp_ps + (line_length *
- ((crop->width / (vr_ps)) - 1) * ps);
- }
- break;
- case dss_rotation_180_degree:
- offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
- vout->vrfb_context[0].bytespp) +
- (vout->vrfb_context[0].xoffset *
- vout->vrfb_context[0].bytespp));
- if (mirroring == 0) {
- *cropped_offset = offset + (line_length * ps * ctop) +
- (cleft / vr_ps) * ps;
+ else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat)
+ ps = 3;
- } else {
- *cropped_offset = offset + (line_length * ps * ctop) +
- (cleft / vr_ps) * ps + (line_length *
- (crop->height - 1) * ps);
- }
- break;
- case dss_rotation_270_degree:
- offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
- vout->vrfb_context[0].bytespp;
- temp_ps = ps / vr_ps;
- if (mirroring == 0) {
- *cropped_offset = offset + line_length *
- temp_ps * crop->left + ctop * ps;
- } else {
- *cropped_offset = offset + line_length *
- temp_ps * crop->left + ctop * ps +
- (line_length * ((crop->width / vr_ps) - 1) *
- ps);
- }
- break;
- case dss_rotation_0_degree:
- if (mirroring == 0) {
- *cropped_offset = (line_length * ps) *
- crop->top + (crop->left / vr_ps) * ps;
- } else {
- *cropped_offset = (line_length * ps) *
- crop->top + (crop->left / vr_ps) * ps +
- (line_length * (crop->height - 1) * ps);
- }
- break;
- default:
- *cropped_offset = (line_length * ps * crop->top) /
- vr_ps + (crop->left * ps) / vr_ps +
- ((crop->width / vr_ps) - 1) * ps;
- break;
+ vout->ps = ps;
+
+ *cropped_offset = (line_length * ps) *
+ crop->top + crop->left * ps;
}
+
v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n",
- __func__, *cropped_offset);
+ __func__, vout->cropped_offset);
+
return 0;
}
@@ -664,7 +363,7 @@ static int video_mode_to_dss_mode(struct omap_vout_device *vout)
/*
* Setup the overlay
*/
-int omapvid_setup_overlay(struct omap_vout_device *vout,
+static int omapvid_setup_overlay(struct omap_vout_device *vout,
struct omap_overlay *ovl, int posx, int posy, int outw,
int outh, u32 addr)
{
@@ -687,7 +386,7 @@ int omapvid_setup_overlay(struct omap_vout_device *vout,
/* Setup the input plane parameters according to
* rotation value selected.
*/
- if (rotate_90_or_270(vout)) {
+ if (is_rotation_90_or_270(vout)) {
cropheight = vout->crop.width;
cropwidth = vout->crop.height;
pixheight = vout->pix.width;
@@ -711,7 +410,7 @@ int omapvid_setup_overlay(struct omap_vout_device *vout,
info.out_width = outw;
info.out_height = outh;
info.global_alpha = vout->win.global_alpha;
- if (!rotation_enabled(vout)) {
+ if (!is_rotation_enabled(vout)) {
info.rotation = 0;
info.rotation_type = OMAP_DSS_ROT_DMA;
info.screen_width = pixwidth;
@@ -744,7 +443,7 @@ setup_ovl_err:
/*
* Initialize the overlay structure
*/
-int omapvid_init(struct omap_vout_device *vout, u32 addr)
+static int omapvid_init(struct omap_vout_device *vout, u32 addr)
{
int ret = 0, i;
struct v4l2_window *win;
@@ -809,7 +508,7 @@ omapvid_init_err:
/*
* Apply the changes set the go bit of DSS
*/
-int omapvid_apply_changes(struct omap_vout_device *vout)
+static int omapvid_apply_changes(struct omap_vout_device *vout)
{
int i;
struct omap_overlay *ovl;
@@ -825,7 +524,7 @@ int omapvid_apply_changes(struct omap_vout_device *vout)
return 0;
}
-void omap_vout_isr(void *arg, unsigned int irqstatus)
+static void omap_vout_isr(void *arg, unsigned int irqstatus)
{
int ret;
u32 addr, fid;
@@ -848,10 +547,20 @@ void omap_vout_isr(void *arg, unsigned int irqstatus)
spin_lock(&vout->vbq_lock);
do_gettimeofday(&timevalue);
- if (cur_display->type == OMAP_DISPLAY_TYPE_DPI) {
- if (!(irqstatus & DISPC_IRQ_VSYNC))
- goto vout_isr_err;
+ if (cur_display->type != OMAP_DISPLAY_TYPE_VENC) {
+ switch (cur_display->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ if (!(irqstatus & (DISPC_IRQ_VSYNC | DISPC_IRQ_VSYNC2)))
+ goto vout_isr_err;
+ break;
+ case OMAP_DISPLAY_TYPE_HDMI:
+ if (!(irqstatus & DISPC_IRQ_EVSYNC_EVEN))
+ goto vout_isr_err;
+ break;
+ default:
+ goto vout_isr_err;
+ }
if (!vout->first_int && (vout->cur_frm != vout->next_frm)) {
vout->cur_frm->ts = timevalue;
vout->cur_frm->state = VIDEOBUF_DONE;
@@ -875,7 +584,7 @@ void omap_vout_isr(void *arg, unsigned int irqstatus)
ret = omapvid_init(vout, addr);
if (ret)
printk(KERN_ERR VOUT_NAME
- "failed to set overlay info\n");
+ "failed to set overlay info\n");
/* Enable the pipeline and set the Go bit */
ret = omapvid_apply_changes(vout);
if (ret)
@@ -954,6 +663,7 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
int startindex = 0, i, j;
u32 phy_addr = 0, virt_addr = 0;
struct omap_vout_device *vout = q->priv_data;
+ struct omapvideo_info *ovid = &vout->vid_info;
if (!vout)
return -EINVAL;
@@ -966,13 +676,10 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
if (V4L2_MEMORY_MMAP == vout->memory && *count < startindex)
*count = startindex;
- if ((rotation_enabled(vout)) && *count > VRFB_NUM_BUFS)
- *count = VRFB_NUM_BUFS;
-
- /* If rotation is enabled, allocate memory for VRFB space also */
- if (rotation_enabled(vout))
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
if (omap_vout_vrfb_buffer_setup(vout, count, startindex))
return -ENOMEM;
+ }
if (V4L2_MEMORY_MMAP != vout->memory)
return 0;
@@ -982,14 +689,25 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
startindex = (vout->vid == OMAP_VIDEO1) ?
video1_numbuffers : video2_numbuffers;
+ /* Check the size of the buffer */
+ if (*size > vout->buffer_size) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "buffer allocation mismatch [%u] [%u]\n",
+ *size, vout->buffer_size);
+ return -ENOMEM;
+ }
+
for (i = startindex; i < *count; i++) {
vout->buffer_size = *size;
virt_addr = omap_vout_alloc_buffer(vout->buffer_size,
&phy_addr);
if (!virt_addr) {
- if (!rotation_enabled(vout))
+ if (ovid->rotation_type == VOUT_ROT_NONE) {
break;
+ } else {
+ if (!is_rotation_enabled(vout))
+ break;
/* Free the VRFB buffers if no space for V4L2 buffers */
for (j = i; j < *count; j++) {
omap_vout_free_buffer(
@@ -997,6 +715,7 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
vout->smsshado_size);
vout->smsshado_virt_addr[j] = 0;
vout->smsshado_phy_addr[j] = 0;
+ }
}
}
vout->buf_virt_addr[i] = virt_addr;
@@ -1009,9 +728,9 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
/*
* Free the V4L2 buffers additionally allocated than default
- * number of buffers and free all the VRFB buffers
+ * number of buffers
*/
-static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
+static void omap_vout_free_extra_buffers(struct omap_vout_device *vout)
{
int num_buffers = 0, i;
@@ -1026,20 +745,6 @@ static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
vout->buf_virt_addr[i] = 0;
vout->buf_phy_addr[i] = 0;
}
- /* Free the VRFB buffers only if they are allocated
- * during reqbufs. Don't free if init time allocated
- */
- if (!vout->vrfb_static_allocation) {
- for (i = 0; i < VRFB_NUM_BUFS; i++) {
- if (vout->smsshado_virt_addr[i]) {
- omap_vout_free_buffer(
- vout->smsshado_virt_addr[i],
- vout->smsshado_size);
- vout->smsshado_virt_addr[i] = 0;
- vout->smsshado_phy_addr[i] = 0;
- }
- }
- }
vout->buffer_allocated = num_buffers;
}
@@ -1051,16 +756,11 @@ static void omap_vout_free_allbuffers(struct omap_vout_device *vout)
* buffer into VRFB memory space before giving it to the DSS.
*/
static int omap_vout_buffer_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
+ struct videobuf_buffer *vb,
+ enum v4l2_field field)
{
- dma_addr_t dmabuf;
- struct vid_vrfb_dma *tx;
- enum dss_rotation rotation;
struct omap_vout_device *vout = q->priv_data;
- u32 dest_frame_index = 0, src_element_index = 0;
- u32 dest_element_index = 0, src_frame_index = 0;
- u32 elem_count = 0, frame_count = 0, pixsize = 2;
+ struct omapvideo_info *ovid = &vout->vid_info;
if (VIDEOBUF_NEEDS_INIT == vb->state) {
vb->width = vout->pix.width;
@@ -1079,66 +779,24 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
vout->queued_buf_addr[vb->i] = (u8 *)
omap_vout_uservirt_to_phys(vb->baddr);
} else {
- vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
- }
+ u32 addr, dma_addr;
+ unsigned long size;
- if (!rotation_enabled(vout))
- return 0;
+ addr = (unsigned long) vout->buf_virt_addr[vb->i];
+ size = (unsigned long) vb->size;
- dmabuf = vout->buf_phy_addr[vb->i];
- /* If rotation is enabled, copy input buffer into VRFB
- * memory space using DMA. We are copying input buffer
- * into VRFB memory space of desired angle and DSS will
- * read image VRFB memory for 0 degree angle
- */
- pixsize = vout->bpp * vout->vrfb_bpp;
- /*
- * DMA transfer in double index mode
- */
+ dma_addr = dma_map_single(vout->vid_dev->v4l2_dev.dev, (void *) addr,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(vout->vid_dev->v4l2_dev.dev, dma_addr))
+ v4l2_err(&vout->vid_dev->v4l2_dev, "dma_map_single failed\n");
- /* Frame index */
- dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
- (vout->pix.width * vout->bpp)) + 1;
-
- /* Source and destination parameters */
- src_element_index = 0;
- src_frame_index = 0;
- dest_element_index = 1;
- /* Number of elements per frame */
- elem_count = vout->pix.width * vout->bpp;
- frame_count = vout->pix.height;
- tx = &vout->vrfb_dma_tx;
- tx->tx_status = 0;
- omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
- tx->dev_id, 0x0);
- /* src_port required only for OMAP1 */
- omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dmabuf, src_element_index, src_frame_index);
- /*set dma source burst mode for VRFB */
- omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
- rotation = calc_rotation(vout);
-
- /* dest_port required only for OMAP1 */
- omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
- vout->vrfb_context[vb->i].paddr[0], dest_element_index,
- dest_frame_index);
- /*set dma dest burst mode for VRFB */
- omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
- omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
-
- omap_start_dma(tx->dma_ch);
- interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
-
- if (tx->tx_status == 0) {
- omap_stop_dma(tx->dma_ch);
- return -EINVAL;
+ vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
}
- /* Store buffers physical address into an array. Addresses
- * from this array will be used to configure DSS */
- vout->queued_buf_addr[vb->i] = (u8 *)
- vout->vrfb_context[vb->i].paddr[rotation];
- return 0;
+
+ if (ovid->rotation_type == VOUT_ROT_VRFB)
+ return omap_vout_prepare_vrfb(vout, vb);
+ else
+ return 0;
}
/*
@@ -1228,6 +886,14 @@ static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
(vma->vm_pgoff << PAGE_SHIFT));
return -EINVAL;
}
+ /* Check the size of the buffer */
+ if (size > vout->buffer_size) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "insufficient memory [%lu] [%u]\n",
+ size, vout->buffer_size);
+ return -ENOMEM;
+ }
+
q->bufs[i]->baddr = vma->vm_start;
vma->vm_flags |= VM_RESERVED;
@@ -1282,7 +948,15 @@ static int omap_vout_release(struct file *file)
"Unable to apply changes\n");
/* Free all buffers */
- omap_vout_free_allbuffers(vout);
+ omap_vout_free_extra_buffers(vout);
+
+ /* Free the VRFB buffers only if they are allocated
+ * during reqbufs. Don't free if init time allocated
+ */
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ if (!vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+ }
videobuf_mmap_free(q);
/* Even if apply changes fails we should continue
@@ -1291,7 +965,7 @@ static int omap_vout_release(struct file *file)
u32 mask = 0;
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN |
- DISPC_IRQ_EVSYNC_ODD;
+ DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2;
omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
vout->streaming = 0;
@@ -1367,10 +1041,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
int index = fmt->index;
- enum v4l2_buf_type type = fmt->type;
- fmt->index = index;
- fmt->type = type;
if (index >= NUM_OUTPUT_FORMATS)
return -EINVAL;
@@ -1441,7 +1112,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
/* We dont support RGB24-packed mode if vrfb rotation
* is enabled*/
- if ((rotation_enabled(vout)) &&
+ if ((is_rotation_enabled(vout)) &&
f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) {
ret = -EINVAL;
goto s_fmt_vid_out_exit;
@@ -1449,7 +1120,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh,
/* get the framebuffer parameters */
- if (rotate_90_or_270(vout)) {
+ if (is_rotation_90_or_270(vout)) {
vout->fbuf.fmt.height = timing->x_res;
vout->fbuf.fmt.width = timing->y_res;
} else {
@@ -1539,10 +1210,7 @@ static int vidioc_enum_fmt_vid_overlay(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
int index = fmt->index;
- enum v4l2_buf_type type = fmt->type;
- fmt->index = index;
- fmt->type = type;
if (index >= NUM_OUTPUT_FORMATS)
return -EINVAL;
@@ -1629,7 +1297,7 @@ static int vidioc_s_crop(struct file *file, void *fh, struct v4l2_crop *crop)
/* get the display device attached to the overlay */
timing = &ovl->manager->device->panel.timings;
- if (rotate_90_or_270(vout)) {
+ if (is_rotation_90_or_270(vout)) {
vout->fbuf.fmt.height = timing->x_res;
vout->fbuf.fmt.width = timing->y_res;
} else {
@@ -1709,9 +1377,17 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
switch (a->id) {
case V4L2_CID_ROTATE:
{
+ struct omapvideo_info *ovid;
int rotation = a->value;
+ ovid = &vout->vid_info;
+
mutex_lock(&vout->lock);
+ if (rotation && ovid->rotation_type == VOUT_ROT_NONE) {
+ mutex_unlock(&vout->lock);
+ ret = -ERANGE;
+ break;
+ }
if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
mutex_unlock(&vout->lock);
@@ -1767,6 +1443,11 @@ static int vidioc_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
ovl = ovid->overlays[0];
mutex_lock(&vout->lock);
+ if (mirror && ovid->rotation_type == VOUT_ROT_NONE) {
+ mutex_unlock(&vout->lock);
+ ret = -ERANGE;
+ break;
+ }
if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) {
mutex_unlock(&vout->lock);
@@ -1877,7 +1558,7 @@ static int vidioc_qbuf(struct file *file, void *fh,
}
}
- if ((rotation_enabled(vout)) &&
+ if ((is_rotation_enabled(vout)) &&
vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) {
v4l2_warn(&vout->vid_dev->v4l2_dev,
"DMA Channel not allocated for Rotation\n");
@@ -1892,15 +1573,28 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
struct omap_vout_device *vout = fh;
struct videobuf_queue *q = &vout->vbq;
+ int ret;
+ u32 addr;
+ unsigned long size;
+ struct videobuf_buffer *vb;
+
+ vb = q->bufs[b->index];
+
if (!vout->streaming)
return -EINVAL;
if (file->f_flags & O_NONBLOCK)
/* Call videobuf_dqbuf for non blocking mode */
- return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
+ ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
else
/* Call videobuf_dqbuf for blocking mode */
- return videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+ ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+
+ addr = (unsigned long) vout->buf_phy_addr[vb->i];
+ size = (unsigned long) vb->size;
+ dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr,
+ size, DMA_TO_DEVICE);
+ return ret;
}
static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
@@ -1949,7 +1643,8 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
addr = (unsigned long) vout->queued_buf_addr[vout->cur_frm->i]
+ vout->cropped_offset;
- mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+ | DISPC_IRQ_VSYNC2;
omap_dispc_register_isr(omap_vout_isr, vout, mask);
@@ -1999,7 +1694,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
return -EINVAL;
vout->streaming = 0;
- mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD;
+ mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
+ | DISPC_IRQ_VSYNC2;
omap_dispc_unregister_isr(omap_vout_isr, vout, mask);
@@ -2212,7 +1908,8 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
vout->mirror = 0;
vout->control[2].id = V4L2_CID_HFLIP;
vout->control[2].value = 0;
- vout->vrfb_bpp = 2;
+ if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
+ vout->vrfb_bpp = 2;
control[1].id = V4L2_CID_BG_COLOR;
control[1].value = 0;
@@ -2244,17 +1941,15 @@ static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
int vid_num)
{
u32 numbuffers;
- int ret = 0, i, j;
- int image_width, image_height;
- struct video_device *vfd;
+ int ret = 0, i;
+ struct omapvideo_info *ovid;
struct omap_vout_device *vout;
- int static_vrfb_allocation = 0, vrfb_num_bufs = VRFB_NUM_BUFS;
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
struct omap2video_device *vid_dev =
container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
vout = vid_dev->vouts[vid_num];
- vfd = vout->vfd;
+ ovid = &vout->vid_info;
numbuffers = (vid_num == 0) ? video1_numbuffers : video2_numbuffers;
vout->buffer_size = (vid_num == 0) ? video1_bufsize : video2_bufsize;
@@ -2271,66 +1966,16 @@ static int __init omap_vout_setup_video_bufs(struct platform_device *pdev,
}
}
- for (i = 0; i < VRFB_NUM_BUFS; i++) {
- if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
- dev_info(&pdev->dev, ": VRFB allocation failed\n");
- for (j = 0; j < i; j++)
- omap_vrfb_release_ctx(&vout->vrfb_context[j]);
- ret = -ENOMEM;
- goto free_buffers;
- }
- }
vout->cropped_offset = 0;
- /* Calculate VRFB memory size */
- /* allocate for worst case size */
- image_width = VID_MAX_WIDTH / TILE_SIZE;
- if (VID_MAX_WIDTH % TILE_SIZE)
- image_width++;
-
- image_width = image_width * TILE_SIZE;
- image_height = VID_MAX_HEIGHT / TILE_SIZE;
-
- if (VID_MAX_HEIGHT % TILE_SIZE)
- image_height++;
-
- image_height = image_height * TILE_SIZE;
- vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
-
- /*
- * Request and Initialize DMA, for DMA based VRFB transfer
- */
- vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
- vout->vrfb_dma_tx.dma_ch = -1;
- vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
- ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
- omap_vout_vrfb_dma_tx_callback,
- (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
- if (ret < 0) {
- vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
- dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
- " video%d\n", vfd->minor);
- }
- init_waitqueue_head(&vout->vrfb_dma_tx.wait);
-
- /* Allocate VRFB buffers if selected through bootargs */
- static_vrfb_allocation = (vid_num == 0) ?
- vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
-
- /* statically allocated the VRFB buffer is done through
- commands line aruments */
- if (static_vrfb_allocation) {
- if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
- ret = -ENOMEM;
- goto release_vrfb_ctx;
- }
- vout->vrfb_static_allocation = 1;
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ int static_vrfb_allocation = (vid_num == 0) ?
+ vid1_static_vrfb_alloc : vid2_static_vrfb_alloc;
+ ret = omap_vout_setup_vrfb_bufs(pdev, vid_num,
+ static_vrfb_allocation);
}
- return 0;
-release_vrfb_ctx:
- for (j = 0; j < VRFB_NUM_BUFS; j++)
- omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+ return ret;
free_buffers:
for (i = 0; i < numbuffers; i++) {
@@ -2373,6 +2018,10 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
vout->vid_info.num_overlays = 1;
vout->vid_info.id = k + 1;
+ /* Set VRFB as rotation_type for omap2 and omap3 */
+ if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ vout->vid_info.rotation_type = VOUT_ROT_VRFB;
+
/* Setup the default configuration for the video devices
*/
if (omap_vout_setup_video_data(vout) != 0) {
@@ -2391,7 +2040,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
/* Register the Video device with V4L2
*/
vfd = vout->vfd;
- if (video_register_device(vfd, VFL_TYPE_GRABBER, k + 1) < 0) {
+ if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
dev_err(&pdev->dev, ": Could not register "
"Video for Linux device\n");
vfd->minor = -1;
@@ -2406,7 +2055,8 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
goto success;
error2:
- omap_vout_release_vrfb(vout);
+ if (vout->vid_info.rotation_type == VOUT_ROT_VRFB)
+ omap_vout_release_vrfb(vout);
omap_vout_free_buffers(vout);
error1:
video_device_release(vfd);
@@ -2427,11 +2077,13 @@ success:
static void omap_vout_cleanup_device(struct omap_vout_device *vout)
{
struct video_device *vfd;
+ struct omapvideo_info *ovid;
if (!vout)
return;
vfd = vout->vfd;
+ ovid = &vout->vid_info;
if (vfd) {
if (!video_is_registered(vfd)) {
/*
@@ -2447,14 +2099,15 @@ static void omap_vout_cleanup_device(struct omap_vout_device *vout)
video_unregister_device(vfd);
}
}
-
- omap_vout_release_vrfb(vout);
+ if (ovid->rotation_type == VOUT_ROT_VRFB) {
+ omap_vout_release_vrfb(vout);
+ /* Free the VRFB buffer if allocated
+ * init time
+ */
+ if (vout->vrfb_static_allocation)
+ omap_vout_free_vrfb_buffers(vout);
+ }
omap_vout_free_buffers(vout);
- /* Free the VRFB buffer if allocated
- * init time
- */
- if (vout->vrfb_static_allocation)
- omap_vout_free_vrfb_buffers(vout);
kfree(vout);
}
diff --git a/drivers/media/video/omap/omap_vout_vrfb.c b/drivers/media/video/omap/omap_vout_vrfb.c
new file mode 100644
index 00000000000..ebebcac4922
--- /dev/null
+++ b/drivers/media/video/omap/omap_vout_vrfb.c
@@ -0,0 +1,390 @@
+/*
+ * omap_vout_vrfb.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include <media/videobuf-dma-contig.h>
+#include <media/v4l2-device.h>
+
+#include <plat/dma.h>
+#include <plat/vrfb.h>
+
+#include "omap_voutdef.h"
+#include "omap_voutlib.h"
+
+/*
+ * Function for allocating video buffers
+ */
+static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
+ unsigned int *count, int startindex)
+{
+ int i, j;
+
+ for (i = 0; i < *count; i++) {
+ if (!vout->smsshado_virt_addr[i]) {
+ vout->smsshado_virt_addr[i] =
+ omap_vout_alloc_buffer(vout->smsshado_size,
+ &vout->smsshado_phy_addr[i]);
+ }
+ if (!vout->smsshado_virt_addr[i] && startindex != -1) {
+ if (V4L2_MEMORY_MMAP == vout->memory && i >= startindex)
+ break;
+ }
+ if (!vout->smsshado_virt_addr[i]) {
+ for (j = 0; j < i; j++) {
+ omap_vout_free_buffer(
+ vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+ *count = 0;
+ return -ENOMEM;
+ }
+ memset((void *) vout->smsshado_virt_addr[i], 0,
+ vout->smsshado_size);
+ }
+ return 0;
+}
+
+/*
+ * Wakes up the application once the DMA transfer to VRFB space is completed.
+ */
+static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
+{
+ struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
+
+ t->tx_status = 1;
+ wake_up_interruptible(&t->wait);
+}
+
+/*
+ * Free VRFB buffers
+ */
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
+{
+ int j;
+
+ for (j = 0; j < VRFB_NUM_BUFS; j++) {
+ omap_vout_free_buffer(vout->smsshado_virt_addr[j],
+ vout->smsshado_size);
+ vout->smsshado_virt_addr[j] = 0;
+ vout->smsshado_phy_addr[j] = 0;
+ }
+}
+
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ u32 static_vrfb_allocation)
+{
+ int ret = 0, i, j;
+ struct omap_vout_device *vout;
+ struct video_device *vfd;
+ int image_width, image_height;
+ int vrfb_num_bufs = VRFB_NUM_BUFS;
+ struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
+ struct omap2video_device *vid_dev =
+ container_of(v4l2_dev, struct omap2video_device, v4l2_dev);
+
+ vout = vid_dev->vouts[vid_num];
+ vfd = vout->vfd;
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++) {
+ if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) {
+ dev_info(&pdev->dev, ": VRFB allocation failed\n");
+ for (j = 0; j < i; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+ ret = -ENOMEM;
+ goto free_buffers;
+ }
+ }
+
+ /* Calculate VRFB memory size */
+ /* allocate for worst case size */
+ image_width = VID_MAX_WIDTH / TILE_SIZE;
+ if (VID_MAX_WIDTH % TILE_SIZE)
+ image_width++;
+
+ image_width = image_width * TILE_SIZE;
+ image_height = VID_MAX_HEIGHT / TILE_SIZE;
+
+ if (VID_MAX_HEIGHT % TILE_SIZE)
+ image_height++;
+
+ image_height = image_height * TILE_SIZE;
+ vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2);
+
+ /*
+ * Request and Initialize DMA, for DMA based VRFB transfer
+ */
+ vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
+ vout->vrfb_dma_tx.dma_ch = -1;
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
+ ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
+ omap_vout_vrfb_dma_tx_callback,
+ (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
+ if (ret < 0) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
+ " video%d\n", vfd->minor);
+ }
+ init_waitqueue_head(&vout->vrfb_dma_tx.wait);
+
+ /* statically allocated the VRFB buffer is done through
+ commands line aruments */
+ if (static_vrfb_allocation) {
+ if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) {
+ ret = -ENOMEM;
+ goto release_vrfb_ctx;
+ }
+ vout->vrfb_static_allocation = 1;
+ }
+ return 0;
+
+release_vrfb_ctx:
+ for (j = 0; j < VRFB_NUM_BUFS; j++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[j]);
+free_buffers:
+ omap_vout_free_buffers(vout);
+
+ return ret;
+}
+
+/*
+ * Release the VRFB context once the module exits
+ */
+void omap_vout_release_vrfb(struct omap_vout_device *vout)
+{
+ int i;
+
+ for (i = 0; i < VRFB_NUM_BUFS; i++)
+ omap_vrfb_release_ctx(&vout->vrfb_context[i]);
+
+ if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ omap_free_dma(vout->vrfb_dma_tx.dma_ch);
+ }
+}
+
+/*
+ * Allocate the buffers for the VRFB space. Data is copied from V4L2
+ * buffers to the VRFB buffers using the DMA engine.
+ */
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+{
+ int i;
+ bool yuv_mode;
+
+ if (!is_rotation_enabled(vout))
+ return 0;
+
+ /* If rotation is enabled, allocate memory for VRFB space also */
+ *count = *count > VRFB_NUM_BUFS ? VRFB_NUM_BUFS : *count;
+
+ /* Allocate the VRFB buffers only if the buffers are not
+ * allocated during init time.
+ */
+ if (!vout->vrfb_static_allocation)
+ if (omap_vout_allocate_vrfb_buffers(vout, count, startindex))
+ return -ENOMEM;
+
+ if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 ||
+ vout->dss_mode == OMAP_DSS_COLOR_UYVY)
+ yuv_mode = true;
+ else
+ yuv_mode = false;
+
+ for (i = 0; i < *count; i++)
+ omap_vrfb_setup(&vout->vrfb_context[i],
+ vout->smsshado_phy_addr[i], vout->pix.width,
+ vout->pix.height, vout->bpp, yuv_mode);
+
+ return 0;
+}
+
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb)
+{
+ dma_addr_t dmabuf;
+ struct vid_vrfb_dma *tx;
+ enum dss_rotation rotation;
+ u32 dest_frame_index = 0, src_element_index = 0;
+ u32 dest_element_index = 0, src_frame_index = 0;
+ u32 elem_count = 0, frame_count = 0, pixsize = 2;
+
+ if (!is_rotation_enabled(vout))
+ return 0;
+
+ dmabuf = vout->buf_phy_addr[vb->i];
+ /* If rotation is enabled, copy input buffer into VRFB
+ * memory space using DMA. We are copying input buffer
+ * into VRFB memory space of desired angle and DSS will
+ * read image VRFB memory for 0 degree angle
+ */
+ pixsize = vout->bpp * vout->vrfb_bpp;
+ /*
+ * DMA transfer in double index mode
+ */
+
+ /* Frame index */
+ dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
+ (vout->pix.width * vout->bpp)) + 1;
+
+ /* Source and destination parameters */
+ src_element_index = 0;
+ src_frame_index = 0;
+ dest_element_index = 1;
+ /* Number of elements per frame */
+ elem_count = vout->pix.width * vout->bpp;
+ frame_count = vout->pix.height;
+ tx = &vout->vrfb_dma_tx;
+ tx->tx_status = 0;
+ omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
+ (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
+ tx->dev_id, 0x0);
+ /* src_port required only for OMAP1 */
+ omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
+ dmabuf, src_element_index, src_frame_index);
+ /*set dma source burst mode for VRFB */
+ omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ rotation = calc_rotation(vout);
+
+ /* dest_port required only for OMAP1 */
+ omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
+ vout->vrfb_context[vb->i].paddr[0], dest_element_index,
+ dest_frame_index);
+ /*set dma dest burst mode for VRFB */
+ omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
+
+ omap_start_dma(tx->dma_ch);
+ interruptible_sleep_on_timeout(&tx->wait, VRFB_TX_TIMEOUT);
+
+ if (tx->tx_status == 0) {
+ omap_stop_dma(tx->dma_ch);
+ return -EINVAL;
+ }
+ /* Store buffers physical address into an array. Addresses
+ * from this array will be used to configure DSS */
+ vout->queued_buf_addr[vb->i] = (u8 *)
+ vout->vrfb_context[vb->i].paddr[rotation];
+ return 0;
+}
+
+/*
+ * Calculate the buffer offsets from which the streaming should
+ * start. This offset calculation is mainly required because of
+ * the VRFB 32 pixels alignment with rotation.
+ */
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout)
+{
+ enum dss_rotation rotation;
+ bool mirroring = vout->mirror;
+ struct v4l2_rect *crop = &vout->crop;
+ struct v4l2_pix_format *pix = &vout->pix;
+ int *cropped_offset = &vout->cropped_offset;
+ int vr_ps = 1, ps = 2, temp_ps = 2;
+ int offset = 0, ctop = 0, cleft = 0, line_length = 0;
+
+ rotation = calc_rotation(vout);
+
+ if (V4L2_PIX_FMT_YUYV == pix->pixelformat ||
+ V4L2_PIX_FMT_UYVY == pix->pixelformat) {
+ if (is_rotation_enabled(vout)) {
+ /*
+ * ps - Actual pixel size for YUYV/UYVY for
+ * VRFB/Mirroring is 4 bytes
+ * vr_ps - Virtually pixel size for YUYV/UYVY is
+ * 2 bytes
+ */
+ ps = 4;
+ vr_ps = 2;
+ } else {
+ ps = 2; /* otherwise the pixel size is 2 byte */
+ }
+ } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) {
+ ps = 4;
+ } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) {
+ ps = 3;
+ }
+ vout->ps = ps;
+ vout->vr_ps = vr_ps;
+
+ if (is_rotation_enabled(vout)) {
+ line_length = MAX_PIXELS_PER_LINE;
+ ctop = (pix->height - crop->height) - crop->top;
+ cleft = (pix->width - crop->width) - crop->left;
+ } else {
+ line_length = pix->width;
+ }
+ vout->line_length = line_length;
+ switch (rotation) {
+ case dss_rotation_90_degree:
+ offset = vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * cleft + crop->top * temp_ps;
+ } else {
+ *cropped_offset = offset + line_length * temp_ps *
+ cleft + crop->top * temp_ps + (line_length *
+ ((crop->width / (vr_ps)) - 1) * ps);
+ }
+ break;
+ case dss_rotation_180_degree:
+ offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset *
+ vout->vrfb_context[0].bytespp) +
+ (vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp));
+ if (mirroring == 0) {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps;
+
+ } else {
+ *cropped_offset = offset + (line_length * ps * ctop) +
+ (cleft / vr_ps) * ps + (line_length *
+ (crop->height - 1) * ps);
+ }
+ break;
+ case dss_rotation_270_degree:
+ offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset *
+ vout->vrfb_context[0].bytespp;
+ temp_ps = ps / vr_ps;
+ if (mirroring == 0) {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps;
+ } else {
+ *cropped_offset = offset + line_length *
+ temp_ps * crop->left + ctop * ps +
+ (line_length * ((crop->width / vr_ps) - 1) *
+ ps);
+ }
+ break;
+ case dss_rotation_0_degree:
+ if (mirroring == 0) {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps;
+ } else {
+ *cropped_offset = (line_length * ps) *
+ crop->top + (crop->left / vr_ps) * ps +
+ (line_length * (crop->height - 1) * ps);
+ }
+ break;
+ default:
+ *cropped_offset = (line_length * ps * crop->top) /
+ vr_ps + (crop->left * ps) / vr_ps +
+ ((crop->width / vr_ps) - 1) * ps;
+ break;
+ }
+}
diff --git a/drivers/media/video/omap/omap_vout_vrfb.h b/drivers/media/video/omap/omap_vout_vrfb.h
new file mode 100644
index 00000000000..ffde741e059
--- /dev/null
+++ b/drivers/media/video/omap/omap_vout_vrfb.h
@@ -0,0 +1,40 @@
+/*
+ * omap_vout_vrfb.h
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#ifndef OMAP_VOUT_VRFB_H
+#define OMAP_VOUT_VRFB_H
+
+#ifdef CONFIG_VIDEO_OMAP2_VOUT_VRFB
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout);
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ u32 static_vrfb_allocation);
+void omap_vout_release_vrfb(struct omap_vout_device *vout);
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex);
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb);
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout);
+#else
+void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) { }
+int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
+ u32 static_vrfb_allocation)
+ { return 0; }
+void omap_vout_release_vrfb(struct omap_vout_device *vout) { }
+int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
+ unsigned int *count, unsigned int startindex)
+ { return 0; }
+int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ struct videobuf_buffer *vb)
+ { return 0; }
+void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout) { }
+#endif
+
+#endif
diff --git a/drivers/media/video/omap/omap_voutdef.h b/drivers/media/video/omap/omap_voutdef.h
index 659497b8499..d793501cafc 100644
--- a/drivers/media/video/omap/omap_voutdef.h
+++ b/drivers/media/video/omap/omap_voutdef.h
@@ -12,6 +12,7 @@
#define OMAP_VOUTDEF_H
#include <video/omapdss.h>
+#include <plat/vrfb.h>
#define YUYV_BPP 2
#define RGB565_BPP 2
@@ -27,6 +28,31 @@
#define MAX_DISPLAYS 3
#define MAX_MANAGERS 3
+#define QQVGA_WIDTH 160
+#define QQVGA_HEIGHT 120
+
+/* Max Resolution supported by the driver */
+#define VID_MAX_WIDTH 1280 /* Largest width */
+#define VID_MAX_HEIGHT 720 /* Largest height */
+
+/* Mimimum requirement is 2x2 for DSS */
+#define VID_MIN_WIDTH 2
+#define VID_MIN_HEIGHT 2
+
+/* 2048 x 2048 is max res supported by OMAP display controller */
+#define MAX_PIXELS_PER_LINE 2048
+
+#define VRFB_TX_TIMEOUT 1000
+#define VRFB_NUM_BUFS 4
+
+/* Max buffer size tobe allocated during init */
+#define OMAP_VOUT_MAX_BUF_SIZE (VID_MAX_WIDTH*VID_MAX_HEIGHT*4)
+
+enum dma_channel_state {
+ DMA_CHAN_NOT_ALLOTED,
+ DMA_CHAN_ALLOTED,
+};
+
/* Enum for Rotation
* DSS understands rotation in 0, 1, 2, 3 context
* while V4L2 driver understands it as 0, 90, 180, 270
@@ -37,6 +63,18 @@ enum dss_rotation {
dss_rotation_180_degree = 2,
dss_rotation_270_degree = 3,
};
+
+/* Enum for choosing rotation type for vout
+ * DSS2 doesn't understand no rotation as an
+ * option while V4L2 driver doesn't support
+ * rotation in the case where VRFB is not built in
+ * the kernel
+ */
+enum vout_rotaion_type {
+ VOUT_ROT_NONE = 0,
+ VOUT_ROT_VRFB = 1,
+};
+
/*
* This structure is used to store the DMA transfer parameters
* for VRFB hidden buffer
@@ -53,6 +91,7 @@ struct omapvideo_info {
int id;
int num_overlays;
struct omap_overlay *overlays[MAX_OVLS];
+ enum vout_rotaion_type rotation_type;
};
struct omap2video_device {
@@ -144,4 +183,43 @@ struct omap_vout_device {
int io_allowed;
};
+
+/*
+ * Return true if rotation is 90 or 270
+ */
+static inline int is_rotation_90_or_270(const struct omap_vout_device *vout)
+{
+ return (vout->rotation == dss_rotation_90_degree ||
+ vout->rotation == dss_rotation_270_degree);
+}
+
+/*
+ * Return true if rotation is enabled
+ */
+static inline int is_rotation_enabled(const struct omap_vout_device *vout)
+{
+ return vout->rotation || vout->mirror;
+}
+
+/*
+ * Reverse the rotation degree if mirroring is enabled
+ */
+static inline int calc_rotation(const struct omap_vout_device *vout)
+{
+ if (!vout->mirror)
+ return vout->rotation;
+
+ switch (vout->rotation) {
+ case dss_rotation_90_degree:
+ return dss_rotation_270_degree;
+ case dss_rotation_270_degree:
+ return dss_rotation_90_degree;
+ case dss_rotation_180_degree:
+ return dss_rotation_0_degree;
+ default:
+ return dss_rotation_180_degree;
+ }
+}
+
+void omap_vout_free_buffers(struct omap_vout_device *vout);
#endif /* ifndef OMAP_VOUTDEF_H */
diff --git a/drivers/media/video/omap/omap_voutlib.c b/drivers/media/video/omap/omap_voutlib.c
index 2aa6a76c5e5..115408b9274 100644
--- a/drivers/media/video/omap/omap_voutlib.c
+++ b/drivers/media/video/omap/omap_voutlib.c
@@ -24,8 +24,12 @@
#include <linux/types.h>
#include <linux/videodev2.h>
+#include <linux/dma-mapping.h>
+
#include <plat/cpu.h>
+#include "omap_voutlib.h"
+
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("OMAP Video library");
MODULE_LICENSE("GPL");
@@ -193,7 +197,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
return -EINVAL;
if (cpu_is_omap24xx()) {
- if (crop->height != win->w.height) {
+ if (try_crop.height != win->w.height) {
/* If we're resizing vertically, we can't support a
* crop width wider than 768 pixels.
*/
@@ -202,7 +206,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
}
}
/* vertical resizing */
- vresize = (1024 * crop->height) / win->w.height;
+ vresize = (1024 * try_crop.height) / win->w.height;
if (cpu_is_omap24xx() && (vresize > 2048))
vresize = 2048;
else if (cpu_is_omap34xx() && (vresize > 4096))
@@ -221,7 +225,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
try_crop.height = 2;
}
/* horizontal resizing */
- hresize = (1024 * crop->width) / win->w.width;
+ hresize = (1024 * try_crop.width) / win->w.width;
if (cpu_is_omap24xx() && (hresize > 2048))
hresize = 2048;
else if (cpu_is_omap34xx() && (hresize > 4096))
@@ -291,3 +295,45 @@ void omap_vout_new_format(struct v4l2_pix_format *pix,
}
EXPORT_SYMBOL_GPL(omap_vout_new_format);
+/*
+ * Allocate buffers
+ */
+unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr)
+{
+ u32 order, size;
+ unsigned long virt_addr, addr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+ virt_addr = __get_free_pages(GFP_KERNEL, order);
+ addr = virt_addr;
+
+ if (virt_addr) {
+ while (size > 0) {
+ SetPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+ *phys_addr = (u32) virt_to_phys((void *) virt_addr);
+ return virt_addr;
+}
+
+/*
+ * Free buffers
+ */
+void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size)
+{
+ u32 order, size;
+ unsigned long addr = virtaddr;
+
+ size = PAGE_ALIGN(buf_size);
+ order = get_order(size);
+
+ while (size > 0) {
+ ClearPageReserved(virt_to_page(addr));
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ free_pages((unsigned long) virtaddr, order);
+}
diff --git a/drivers/media/video/omap/omap_voutlib.h b/drivers/media/video/omap/omap_voutlib.h
index a60b16e8bfc..e51750a597e 100644
--- a/drivers/media/video/omap/omap_voutlib.h
+++ b/drivers/media/video/omap/omap_voutlib.h
@@ -12,23 +12,25 @@
#ifndef OMAP_VOUTLIB_H
#define OMAP_VOUTLIB_H
-extern void omap_vout_default_crop(struct v4l2_pix_format *pix,
+void omap_vout_default_crop(struct v4l2_pix_format *pix,
struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop);
-extern int omap_vout_new_crop(struct v4l2_pix_format *pix,
+int omap_vout_new_crop(struct v4l2_pix_format *pix,
struct v4l2_rect *crop, struct v4l2_window *win,
struct v4l2_framebuffer *fbuf,
const struct v4l2_rect *new_crop);
-extern int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
+int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
struct v4l2_window *new_win);
-extern int omap_vout_new_window(struct v4l2_rect *crop,
+int omap_vout_new_window(struct v4l2_rect *crop,
struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
struct v4l2_window *new_win);
-extern void omap_vout_new_format(struct v4l2_pix_format *pix,
+void omap_vout_new_format(struct v4l2_pix_format *pix,
struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
struct v4l2_window *win);
+unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr);
+void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size);
#endif /* #ifndef OMAP_VOUTLIB_H */
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index e7cfc85b0a1..8a947e603ac 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -26,7 +26,6 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <media/omap1_camera.h>
#include <media/soc_camera.h>
@@ -38,7 +37,7 @@
#define DRIVER_NAME "omap1-camera"
-#define VERSION_CODE KERNEL_VERSION(0, 0, 1)
+#define DRIVER_VERSION "0.0.2"
/*
@@ -208,7 +207,7 @@ static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
struct soc_camera_device *icd = vq->priv_data;
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
if (bytes_per_line < 0)
@@ -222,7 +221,7 @@ static int omap1_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (*size * *count > MAX_VIDEO_MEM * 1024 * 1024)
*count = (MAX_VIDEO_MEM * 1024 * 1024) / *size;
- dev_dbg(icd->dev.parent,
+ dev_dbg(icd->parent,
"%s: count=%d, size=%d\n", __func__, *count, *size);
return 0;
@@ -241,7 +240,7 @@ static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf,
videobuf_dma_contig_free(vq, vb);
} else {
struct soc_camera_device *icd = vq->priv_data;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
videobuf_dma_unmap(dev, dma);
@@ -258,7 +257,7 @@ static int omap1_videobuf_prepare(struct videobuf_queue *vq,
struct omap1_cam_buf *buf = container_of(vb, struct omap1_cam_buf, vb);
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
int ret;
@@ -490,7 +489,7 @@ static void omap1_videobuf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
struct omap1_cam_buf *buf;
u32 mode;
@@ -519,7 +518,7 @@ static void omap1_videobuf_queue(struct videobuf_queue *vq,
pcdev->active = buf;
pcdev->ready = NULL;
- dev_dbg(icd->dev.parent,
+ dev_dbg(icd->parent,
"%s: capture not active, setup FIFO, start DMA\n", __func__);
mode = CAM_READ_CACHE(pcdev, MODE) & ~THRESHOLD_MASK;
mode |= THRESHOLD_LEVEL(pcdev->vb_mode) << THRESHOLD_SHIFT;
@@ -543,8 +542,8 @@ static void omap1_videobuf_release(struct videobuf_queue *vq,
struct omap1_cam_buf *buf =
container_of(vb, struct omap1_cam_buf, vb);
struct soc_camera_device *icd = vq->priv_data;
- struct device *dev = icd->dev.parent;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct omap1_cam_dev *pcdev = ici->priv;
switch (vb->state) {
@@ -573,7 +572,7 @@ static void videobuf_done(struct omap1_cam_dev *pcdev,
{
struct omap1_cam_buf *buf = pcdev->active;
struct videobuf_buffer *vb;
- struct device *dev = pcdev->icd->dev.parent;
+ struct device *dev = pcdev->icd->parent;
if (WARN_ON(!buf)) {
suspend_capture(pcdev);
@@ -799,7 +798,7 @@ out:
static irqreturn_t cam_isr(int irq, void *data)
{
struct omap1_cam_dev *pcdev = data;
- struct device *dev = pcdev->icd->dev.parent;
+ struct device *dev = pcdev->icd->parent;
struct omap1_cam_buf *buf = pcdev->active;
u32 it_status;
unsigned long flags;
@@ -909,7 +908,7 @@ static void sensor_reset(struct omap1_cam_dev *pcdev, bool reset)
*/
static int omap1_cam_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
u32 ctrlclock;
@@ -952,14 +951,14 @@ static int omap1_cam_add_device(struct soc_camera_device *icd)
pcdev->icd = icd;
- dev_dbg(icd->dev.parent, "OMAP1 Camera driver attached to camera %d\n",
+ dev_dbg(icd->parent, "OMAP1 Camera driver attached to camera %d\n",
icd->devnum);
return 0;
}
static void omap1_cam_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
u32 ctrlclock;
@@ -985,7 +984,7 @@ static void omap1_cam_remove_device(struct soc_camera_device *icd)
pcdev->icd = NULL;
- dev_dbg(icd->dev.parent,
+ dev_dbg(icd->parent,
"OMAP1 Camera driver detached from camera %d\n", icd->devnum);
}
@@ -1070,7 +1069,7 @@ static int omap1_cam_get_formats(struct soc_camera_device *icd,
unsigned int idx, struct soc_camera_format_xlate *xlate)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
int formats = 0, ret;
enum v4l2_mbus_pixelcode code;
const struct soc_mbus_pixelfmt *fmt;
@@ -1222,9 +1221,9 @@ static int omap1_cam_set_crop(struct soc_camera_device *icd,
struct v4l2_rect *rect = &crop->c;
const struct soc_camera_format_xlate *xlate = icd->current_fmt;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct omap1_cam_dev *pcdev = ici->priv;
- struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf;
int ret;
@@ -1270,8 +1269,8 @@ static int omap1_cam_set_fmt(struct soc_camera_device *icd,
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate;
- struct device *dev = icd->dev.parent;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct omap1_cam_dev *pcdev = ici->priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_mbus_framefmt mf;
@@ -1326,7 +1325,7 @@ static int omap1_cam_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %#x not found\n",
+ dev_warn(icd->parent, "Format %#x not found\n",
pix->pixelformat);
return -EINVAL;
}
@@ -1362,7 +1361,7 @@ static int omap1_cam_mmap_mapper(struct videobuf_queue *q,
struct vm_area_struct *vma)
{
struct soc_camera_device *icd = q->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
int ret;
@@ -1377,17 +1376,17 @@ static int omap1_cam_mmap_mapper(struct videobuf_queue *q,
static void omap1_cam_init_videobuf(struct videobuf_queue *q,
struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct omap1_cam_dev *pcdev = ici->priv;
if (!sg_mode)
videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops,
- icd->dev.parent, &pcdev->lock,
+ icd->parent, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
else
videobuf_queue_sg_init(q, &omap1_videobuf_ops,
- icd->dev.parent, &pcdev->lock,
+ icd->parent, &pcdev->lock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
sizeof(struct omap1_cam_buf), icd, &icd->video_lock);
@@ -1431,7 +1430,6 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
- cap->version = VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
@@ -1440,9 +1438,9 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
static int omap1_cam_set_bus_param(struct soc_camera_device *icd,
__u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct omap1_cam_dev *pcdev = ici->priv;
- struct device *dev = icd->dev.parent;
const struct soc_camera_format_xlate *xlate;
const struct soc_mbus_pixelfmt *fmt;
unsigned long camera_flags, common_flags;
@@ -1718,4 +1716,5 @@ MODULE_PARM_DESC(sg_mode, "videobuf mode, 0: dma-contig (default), 1: dma-sg");
MODULE_DESCRIPTION("OMAP1 Camera Interface driver");
MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
MODULE_LICENSE("GPL v2");
+MODULE_LICENSE(DRIVER_VERSION);
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index f6626e87dbc..eb97bff7116 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -31,7 +31,6 @@
#include <linux/interrupt.h>
#include <linux/videodev2.h>
#include <linux/pci.h> /* needed for videobufs */
-#include <linux/version.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -43,7 +42,7 @@
#include "omap24xxcam.h"
-#define OMAP24XXCAM_VERSION KERNEL_VERSION(0, 0, 0)
+#define OMAP24XXCAM_VERSION "0.0.1"
#define RESET_TIMEOUT_NS 10000
@@ -309,11 +308,11 @@ static int omap24xxcam_vbq_alloc_mmap_buffer(struct videobuf_buffer *vb)
order--;
/* try to allocate as many contiguous pages as possible */
- page = alloc_pages(GFP_KERNEL | GFP_DMA, order);
+ page = alloc_pages(GFP_KERNEL, order);
/* if allocation fails, try to allocate smaller amount */
while (page == NULL) {
order--;
- page = alloc_pages(GFP_KERNEL | GFP_DMA, order);
+ page = alloc_pages(GFP_KERNEL, order);
if (page == NULL && !order) {
err = -ENOMEM;
goto out;
@@ -993,7 +992,6 @@ static int vidioc_querycap(struct file *file, void *fh,
strlcpy(cap->driver, CAM_NAME, sizeof(cap->driver));
strlcpy(cap->card, cam->vfd->name, sizeof(cap->card));
- cap->version = OMAP24XXCAM_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
@@ -1768,14 +1766,13 @@ static int __devinit omap24xxcam_probe(struct platform_device *pdev)
dev_err(cam->dev, "no mem resource?\n");
goto err;
}
- if (!request_mem_region(mem->start, (mem->end - mem->start) + 1,
- pdev->name)) {
+ if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
dev_err(cam->dev,
"cannot reserve camera register I/O region\n");
goto err;
}
cam->mmio_base_phys = mem->start;
- cam->mmio_size = (mem->end - mem->start) + 1;
+ cam->mmio_size = resource_size(mem);
/* map the region */
cam->mmio_base = (unsigned long)
@@ -1889,6 +1886,7 @@ static void __exit omap24xxcam_cleanup(void)
MODULE_AUTHOR("Sakari Ailus <sakari.ailus@nokia.com>");
MODULE_DESCRIPTION("OMAP24xx Video for Linux camera driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(OMAP24XXCAM_VERSION);
module_param(video_nr, int, 0);
MODULE_PARM_DESC(video_nr,
"Minor number for video device (-1 ==> auto assign)");
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index c9fd04ee70a..5cea2bbd701 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -1748,7 +1748,7 @@ static int isp_register_entities(struct isp_device *isp)
goto done;
/* Register external entities */
- for (subdevs = pdata->subdevs; subdevs->subdevs; ++subdevs) {
+ for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
struct v4l2_subdev *sensor;
struct media_entity *input;
unsigned int flags;
@@ -2234,3 +2234,4 @@ module_exit(isp_cleanup);
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("TI OMAP3 ISP driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(ISP_VIDEO_DRIVER_VERSION);
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h
index 2620c405f5e..529e582ef94 100644
--- a/drivers/media/video/omap3isp/isp.h
+++ b/drivers/media/video/omap3isp/isp.h
@@ -139,6 +139,10 @@ struct isp_reg {
* 3 - CAMEXT[13:6] -> CAM[7:0]
* @clk_pol: Pixel clock polarity
* 0 - Non Inverted, 1 - Inverted
+ * @hs_pol: Horizontal synchronization polarity
+ * 0 - Active high, 1 - Active low
+ * @vs_pol: Vertical synchronization polarity
+ * 0 - Active high, 1 - Active low
* @bridge: CCDC Bridge input control
* ISPCTRL_PAR_BRIDGE_DISABLE - Disable
* ISPCTRL_PAR_BRIDGE_LENDIAN - Little endian
@@ -147,6 +151,8 @@ struct isp_reg {
struct isp_parallel_platform_data {
unsigned int data_lane_shift:2;
unsigned int clk_pol:1;
+ unsigned int hs_pol:1;
+ unsigned int vs_pol:1;
unsigned int bridge:4;
};
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 39d501bda63..9d3459de04b 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -1148,6 +1148,8 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
omap3isp_configure_bridge(isp, ccdc->input, pdata, shift);
ccdc->syncif.datsz = depth_out;
+ ccdc->syncif.hdpol = pdata ? pdata->hs_pol : 0;
+ ccdc->syncif.vdpol = pdata ? pdata->vs_pol : 0;
ccdc_config_sync_if(ccdc, &ccdc->syncif);
/* CCDC_PAD_SINK */
@@ -1691,7 +1693,7 @@ static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
if (sub->type != V4L2_EVENT_OMAP3ISP_HS_VS)
return -EINVAL;
- return v4l2_event_subscribe(fh, sub);
+ return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS);
}
static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
@@ -2162,7 +2164,6 @@ static int ccdc_init_entities(struct isp_ccdc_device *ccdc)
sd->grp_id = 1 << 16; /* group ID for isp subdevs */
v4l2_set_subdevdata(sd, ccdc);
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
- sd->nevents = OMAP3ISP_CCDC_NEVENTS;
pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
@@ -2257,8 +2258,6 @@ int omap3isp_ccdc_init(struct isp_device *isp)
ccdc->syncif.fldout = 0;
ccdc->syncif.fldpol = 0;
ccdc->syncif.fldstat = 0;
- ccdc->syncif.hdpol = 0;
- ccdc->syncif.vdpol = 0;
ccdc->clamp.oblen = 0;
ccdc->clamp.dcsubval = 0;
diff --git a/drivers/media/video/omap3isp/ispccp2.c b/drivers/media/video/omap3isp/ispccp2.c
index 0e16cab8e08..ec9e395f333 100644
--- a/drivers/media/video/omap3isp/ispccp2.c
+++ b/drivers/media/video/omap3isp/ispccp2.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
#include "isp.h"
#include "ispreg.h"
@@ -163,6 +164,9 @@ static void ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
int i;
+ if (enable && ccp2->vdds_csib)
+ regulator_enable(ccp2->vdds_csib);
+
/* Enable/Disable all the LCx channels */
for (i = 0; i < CCP2_LCx_CHANS_NUM; i++)
isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(i),
@@ -186,6 +190,9 @@ static void ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable)
ISPCCP2_LC01_IRQENABLE,
ISPCCP2_LC01_IRQSTATUS_LC0_FS_IRQ);
}
+
+ if (!enable && ccp2->vdds_csib)
+ regulator_disable(ccp2->vdds_csib);
}
/*
@@ -1137,6 +1144,9 @@ error:
*/
void omap3isp_ccp2_cleanup(struct isp_device *isp)
{
+ struct isp_ccp2_device *ccp2 = &isp->isp_ccp2;
+
+ regulator_put(ccp2->vdds_csib);
}
/*
@@ -1151,14 +1161,27 @@ int omap3isp_ccp2_init(struct isp_device *isp)
init_waitqueue_head(&ccp2->wait);
- /* On the OMAP36xx, the CCP2 uses the CSI PHY1 or PHY2, shared with
+ /*
+ * On the OMAP34xx the CSI1 receiver is operated in the CSIb IO
+ * complex, which is powered by vdds_csib power rail. Hence the
+ * request for the regulator.
+ *
+ * On the OMAP36xx, the CCP2 uses the CSI PHY1 or PHY2, shared with
* the CSI2c or CSI2a receivers. The PHY then needs to be explicitly
* configured.
*
* TODO: Don't hardcode the usage of PHY1 (shared with CSI2c).
*/
- if (isp->revision == ISP_REVISION_15_0)
+ if (isp->revision == ISP_REVISION_2_0) {
+ ccp2->vdds_csib = regulator_get(isp->dev, "vdds_csib");
+ if (IS_ERR(ccp2->vdds_csib)) {
+ dev_dbg(isp->dev,
+ "Could not get regulator vdds_csib\n");
+ ccp2->vdds_csib = NULL;
+ }
+ } else if (isp->revision == ISP_REVISION_15_0) {
ccp2->phy = &isp->isp_csiphy1;
+ }
ret = ccp2_init_entities(ccp2);
if (ret < 0)
diff --git a/drivers/media/video/omap3isp/ispccp2.h b/drivers/media/video/omap3isp/ispccp2.h
index 5505a86a9a7..6674e9de2cd 100644
--- a/drivers/media/video/omap3isp/ispccp2.h
+++ b/drivers/media/video/omap3isp/ispccp2.h
@@ -81,6 +81,7 @@ struct isp_ccp2_device {
struct isp_interface_mem_config mem_cfg;
struct isp_video video_in;
struct isp_csiphy *phy;
+ struct regulator *vdds_csib;
unsigned int error;
enum isp_pipeline_stream_state state;
wait_queue_head_t wait;
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index b44cb685236..808065948ac 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -1032,7 +1032,6 @@ static int isp_stat_init_entities(struct ispstat *stat, const char *name,
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
- subdev->nevents = STAT_NEVENTS;
v4l2_set_subdevdata(subdev, stat);
stat->pad.flags = MEDIA_PAD_FL_SINK;
@@ -1050,7 +1049,7 @@ int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
if (sub->type != stat->event_type)
return -EINVAL;
- return v4l2_event_subscribe(fh, sub);
+ return v4l2_event_subscribe(fh, sub, STAT_NEVENTS);
}
int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index 9cd8f1aa567..fd965adfd59 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -695,7 +695,6 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
strlcpy(cap->card, video->video.name, sizeof(cap->card));
strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
- cap->version = ISP_VIDEO_DRIVER_VERSION;
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
diff --git a/drivers/media/video/omap3isp/ispvideo.h b/drivers/media/video/omap3isp/ispvideo.h
index 911bea64e78..53160aa24e6 100644
--- a/drivers/media/video/omap3isp/ispvideo.h
+++ b/drivers/media/video/omap3isp/ispvideo.h
@@ -27,7 +27,6 @@
#define OMAP3_ISP_VIDEO_H
#include <linux/v4l2-mediabus.h>
-#include <linux/version.h>
#include <media/media-entity.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
@@ -35,7 +34,7 @@
#include "ispqueue.h"
#define ISP_VIDEO_DRIVER_NAME "ispvideo"
-#define ISP_VIDEO_DRIVER_VERSION KERNEL_VERSION(0, 0, 1)
+#define ISP_VIDEO_DRIVER_VERSION "0.0.2"
struct isp_device;
struct isp_video;
diff --git a/drivers/media/video/ov2640.c b/drivers/media/video/ov2640.c
index 0cea0cf3667..9ce2fa037b9 100644
--- a/drivers/media/video/ov2640.c
+++ b/drivers/media/video/ov2640.c
@@ -1031,16 +1031,9 @@ static int ov2640_video_probe(struct soc_camera_device *icd,
const char *devname;
int ret;
- /*
- * we must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface) {
- dev_err(&client->dev, "Parent missing or invalid!\n");
- ret = -ENODEV;
- goto err;
- }
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* check and show product ID and manufacturer ID
diff --git a/drivers/media/video/ov5642.c b/drivers/media/video/ov5642.c
new file mode 100644
index 00000000000..349a4ad3ccc
--- /dev/null
+++ b/drivers/media/video/ov5642.c
@@ -0,0 +1,1012 @@
+/*
+ * Driver for OV5642 CMOS Image Sensor from Omnivision
+ *
+ * Copyright (C) 2011, Bastian Hecht <hechtb@gmail.com>
+ *
+ * Based on Sony IMX074 Camera Driver
+ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * Based on Omnivision OV7670 Camera Driver
+ * Copyright (C) 2006-7 Jonathan Corbet <corbet@lwn.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/module.h>
+
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-subdev.h>
+
+/* OV5642 registers */
+#define REG_CHIP_ID_HIGH 0x300a
+#define REG_CHIP_ID_LOW 0x300b
+
+#define REG_WINDOW_START_X_HIGH 0x3800
+#define REG_WINDOW_START_X_LOW 0x3801
+#define REG_WINDOW_START_Y_HIGH 0x3802
+#define REG_WINDOW_START_Y_LOW 0x3803
+#define REG_WINDOW_WIDTH_HIGH 0x3804
+#define REG_WINDOW_WIDTH_LOW 0x3805
+#define REG_WINDOW_HEIGHT_HIGH 0x3806
+#define REG_WINDOW_HEIGHT_LOW 0x3807
+#define REG_OUT_WIDTH_HIGH 0x3808
+#define REG_OUT_WIDTH_LOW 0x3809
+#define REG_OUT_HEIGHT_HIGH 0x380a
+#define REG_OUT_HEIGHT_LOW 0x380b
+#define REG_OUT_TOTAL_WIDTH_HIGH 0x380c
+#define REG_OUT_TOTAL_WIDTH_LOW 0x380d
+#define REG_OUT_TOTAL_HEIGHT_HIGH 0x380e
+#define REG_OUT_TOTAL_HEIGHT_LOW 0x380f
+
+/*
+ * define standard resolution.
+ * Works currently only for up to 720 lines
+ * eg. 320x240, 640x480, 800x600, 1280x720, 2048x720
+ */
+
+#define OV5642_WIDTH 1280
+#define OV5642_HEIGHT 720
+#define OV5642_TOTAL_WIDTH 3200
+#define OV5642_TOTAL_HEIGHT 2000
+#define OV5642_SENSOR_SIZE_X 2592
+#define OV5642_SENSOR_SIZE_Y 1944
+
+struct regval_list {
+ u16 reg_num;
+ u8 value;
+};
+
+static struct regval_list ov5642_default_regs_init[] = {
+ { 0x3103, 0x93 },
+ { 0x3008, 0x82 },
+ { 0x3017, 0x7f },
+ { 0x3018, 0xfc },
+ { 0x3810, 0xc2 },
+ { 0x3615, 0xf0 },
+ { 0x3000, 0x0 },
+ { 0x3001, 0x0 },
+ { 0x3002, 0x0 },
+ { 0x3003, 0x0 },
+ { 0x3004, 0xff },
+ { 0x3030, 0x2b },
+ { 0x3011, 0x8 },
+ { 0x3010, 0x10 },
+ { 0x3604, 0x60 },
+ { 0x3622, 0x60 },
+ { 0x3621, 0x9 },
+ { 0x3709, 0x0 },
+ { 0x4000, 0x21 },
+ { 0x401d, 0x22 },
+ { 0x3600, 0x54 },
+ { 0x3605, 0x4 },
+ { 0x3606, 0x3f },
+ { 0x3c01, 0x80 },
+ { 0x300d, 0x22 },
+ { 0x3623, 0x22 },
+ { 0x5000, 0x4f },
+ { 0x5020, 0x4 },
+ { 0x5181, 0x79 },
+ { 0x5182, 0x0 },
+ { 0x5185, 0x22 },
+ { 0x5197, 0x1 },
+ { 0x5500, 0xa },
+ { 0x5504, 0x0 },
+ { 0x5505, 0x7f },
+ { 0x5080, 0x8 },
+ { 0x300e, 0x18 },
+ { 0x4610, 0x0 },
+ { 0x471d, 0x5 },
+ { 0x4708, 0x6 },
+ { 0x370c, 0xa0 },
+ { 0x5687, 0x94 },
+ { 0x501f, 0x0 },
+ { 0x5000, 0x4f },
+ { 0x5001, 0xcf },
+ { 0x4300, 0x30 },
+ { 0x4300, 0x30 },
+ { 0x460b, 0x35 },
+ { 0x471d, 0x0 },
+ { 0x3002, 0xc },
+ { 0x3002, 0x0 },
+ { 0x4713, 0x3 },
+ { 0x471c, 0x50 },
+ { 0x4721, 0x2 },
+ { 0x4402, 0x90 },
+ { 0x460c, 0x22 },
+ { 0x3815, 0x44 },
+ { 0x3503, 0x7 },
+ { 0x3501, 0x73 },
+ { 0x3502, 0x80 },
+ { 0x350b, 0x0 },
+ { 0x3818, 0xc8 },
+ { 0x3824, 0x11 },
+ { 0x3a00, 0x78 },
+ { 0x3a1a, 0x4 },
+ { 0x3a13, 0x30 },
+ { 0x3a18, 0x0 },
+ { 0x3a19, 0x7c },
+ { 0x3a08, 0x12 },
+ { 0x3a09, 0xc0 },
+ { 0x3a0a, 0xf },
+ { 0x3a0b, 0xa0 },
+ { 0x350c, 0x7 },
+ { 0x350d, 0xd0 },
+ { 0x3a0d, 0x8 },
+ { 0x3a0e, 0x6 },
+ { 0x3500, 0x0 },
+ { 0x3501, 0x0 },
+ { 0x3502, 0x0 },
+ { 0x350a, 0x0 },
+ { 0x350b, 0x0 },
+ { 0x3503, 0x0 },
+ { 0x3a0f, 0x3c },
+ { 0x3a10, 0x32 },
+ { 0x3a1b, 0x3c },
+ { 0x3a1e, 0x32 },
+ { 0x3a11, 0x80 },
+ { 0x3a1f, 0x20 },
+ { 0x3030, 0x2b },
+ { 0x3a02, 0x0 },
+ { 0x3a03, 0x7d },
+ { 0x3a04, 0x0 },
+ { 0x3a14, 0x0 },
+ { 0x3a15, 0x7d },
+ { 0x3a16, 0x0 },
+ { 0x3a00, 0x78 },
+ { 0x3a08, 0x9 },
+ { 0x3a09, 0x60 },
+ { 0x3a0a, 0x7 },
+ { 0x3a0b, 0xd0 },
+ { 0x3a0d, 0x10 },
+ { 0x3a0e, 0xd },
+ { 0x4407, 0x4 },
+ { 0x5193, 0x70 },
+ { 0x589b, 0x0 },
+ { 0x589a, 0xc0 },
+ { 0x401e, 0x20 },
+ { 0x4001, 0x42 },
+ { 0x401c, 0x6 },
+ { 0x3825, 0xac },
+ { 0x3827, 0xc },
+ { 0x528a, 0x1 },
+ { 0x528b, 0x4 },
+ { 0x528c, 0x8 },
+ { 0x528d, 0x10 },
+ { 0x528e, 0x20 },
+ { 0x528f, 0x28 },
+ { 0x5290, 0x30 },
+ { 0x5292, 0x0 },
+ { 0x5293, 0x1 },
+ { 0x5294, 0x0 },
+ { 0x5295, 0x4 },
+ { 0x5296, 0x0 },
+ { 0x5297, 0x8 },
+ { 0x5298, 0x0 },
+ { 0x5299, 0x10 },
+ { 0x529a, 0x0 },
+ { 0x529b, 0x20 },
+ { 0x529c, 0x0 },
+ { 0x529d, 0x28 },
+ { 0x529e, 0x0 },
+ { 0x529f, 0x30 },
+ { 0x5282, 0x0 },
+ { 0x5300, 0x0 },
+ { 0x5301, 0x20 },
+ { 0x5302, 0x0 },
+ { 0x5303, 0x7c },
+ { 0x530c, 0x0 },
+ { 0x530d, 0xc },
+ { 0x530e, 0x20 },
+ { 0x530f, 0x80 },
+ { 0x5310, 0x20 },
+ { 0x5311, 0x80 },
+ { 0x5308, 0x20 },
+ { 0x5309, 0x40 },
+ { 0x5304, 0x0 },
+ { 0x5305, 0x30 },
+ { 0x5306, 0x0 },
+ { 0x5307, 0x80 },
+ { 0x5314, 0x8 },
+ { 0x5315, 0x20 },
+ { 0x5319, 0x30 },
+ { 0x5316, 0x10 },
+ { 0x5317, 0x0 },
+ { 0x5318, 0x2 },
+ { 0x5380, 0x1 },
+ { 0x5381, 0x0 },
+ { 0x5382, 0x0 },
+ { 0x5383, 0x4e },
+ { 0x5384, 0x0 },
+ { 0x5385, 0xf },
+ { 0x5386, 0x0 },
+ { 0x5387, 0x0 },
+ { 0x5388, 0x1 },
+ { 0x5389, 0x15 },
+ { 0x538a, 0x0 },
+ { 0x538b, 0x31 },
+ { 0x538c, 0x0 },
+ { 0x538d, 0x0 },
+ { 0x538e, 0x0 },
+ { 0x538f, 0xf },
+ { 0x5390, 0x0 },
+ { 0x5391, 0xab },
+ { 0x5392, 0x0 },
+ { 0x5393, 0xa2 },
+ { 0x5394, 0x8 },
+ { 0x5480, 0x14 },
+ { 0x5481, 0x21 },
+ { 0x5482, 0x36 },
+ { 0x5483, 0x57 },
+ { 0x5484, 0x65 },
+ { 0x5485, 0x71 },
+ { 0x5486, 0x7d },
+ { 0x5487, 0x87 },
+ { 0x5488, 0x91 },
+ { 0x5489, 0x9a },
+ { 0x548a, 0xaa },
+ { 0x548b, 0xb8 },
+ { 0x548c, 0xcd },
+ { 0x548d, 0xdd },
+ { 0x548e, 0xea },
+ { 0x548f, 0x1d },
+ { 0x5490, 0x5 },
+ { 0x5491, 0x0 },
+ { 0x5492, 0x4 },
+ { 0x5493, 0x20 },
+ { 0x5494, 0x3 },
+ { 0x5495, 0x60 },
+ { 0x5496, 0x2 },
+ { 0x5497, 0xb8 },
+ { 0x5498, 0x2 },
+ { 0x5499, 0x86 },
+ { 0x549a, 0x2 },
+ { 0x549b, 0x5b },
+ { 0x549c, 0x2 },
+ { 0x549d, 0x3b },
+ { 0x549e, 0x2 },
+ { 0x549f, 0x1c },
+ { 0x54a0, 0x2 },
+ { 0x54a1, 0x4 },
+ { 0x54a2, 0x1 },
+ { 0x54a3, 0xed },
+ { 0x54a4, 0x1 },
+ { 0x54a5, 0xc5 },
+ { 0x54a6, 0x1 },
+ { 0x54a7, 0xa5 },
+ { 0x54a8, 0x1 },
+ { 0x54a9, 0x6c },
+ { 0x54aa, 0x1 },
+ { 0x54ab, 0x41 },
+ { 0x54ac, 0x1 },
+ { 0x54ad, 0x20 },
+ { 0x54ae, 0x0 },
+ { 0x54af, 0x16 },
+ { 0x54b0, 0x1 },
+ { 0x54b1, 0x20 },
+ { 0x54b2, 0x0 },
+ { 0x54b3, 0x10 },
+ { 0x54b4, 0x0 },
+ { 0x54b5, 0xf0 },
+ { 0x54b6, 0x0 },
+ { 0x54b7, 0xdf },
+ { 0x5402, 0x3f },
+ { 0x5403, 0x0 },
+ { 0x3406, 0x0 },
+ { 0x5180, 0xff },
+ { 0x5181, 0x52 },
+ { 0x5182, 0x11 },
+ { 0x5183, 0x14 },
+ { 0x5184, 0x25 },
+ { 0x5185, 0x24 },
+ { 0x5186, 0x6 },
+ { 0x5187, 0x8 },
+ { 0x5188, 0x8 },
+ { 0x5189, 0x7c },
+ { 0x518a, 0x60 },
+ { 0x518b, 0xb2 },
+ { 0x518c, 0xb2 },
+ { 0x518d, 0x44 },
+ { 0x518e, 0x3d },
+ { 0x518f, 0x58 },
+ { 0x5190, 0x46 },
+ { 0x5191, 0xf8 },
+ { 0x5192, 0x4 },
+ { 0x5193, 0x70 },
+ { 0x5194, 0xf0 },
+ { 0x5195, 0xf0 },
+ { 0x5196, 0x3 },
+ { 0x5197, 0x1 },
+ { 0x5198, 0x4 },
+ { 0x5199, 0x12 },
+ { 0x519a, 0x4 },
+ { 0x519b, 0x0 },
+ { 0x519c, 0x6 },
+ { 0x519d, 0x82 },
+ { 0x519e, 0x0 },
+ { 0x5025, 0x80 },
+ { 0x3a0f, 0x38 },
+ { 0x3a10, 0x30 },
+ { 0x3a1b, 0x3a },
+ { 0x3a1e, 0x2e },
+ { 0x3a11, 0x60 },
+ { 0x3a1f, 0x10 },
+ { 0x5688, 0xa6 },
+ { 0x5689, 0x6a },
+ { 0x568a, 0xea },
+ { 0x568b, 0xae },
+ { 0x568c, 0xa6 },
+ { 0x568d, 0x6a },
+ { 0x568e, 0x62 },
+ { 0x568f, 0x26 },
+ { 0x5583, 0x40 },
+ { 0x5584, 0x40 },
+ { 0x5580, 0x2 },
+ { 0x5000, 0xcf },
+ { 0x5800, 0x27 },
+ { 0x5801, 0x19 },
+ { 0x5802, 0x12 },
+ { 0x5803, 0xf },
+ { 0x5804, 0x10 },
+ { 0x5805, 0x15 },
+ { 0x5806, 0x1e },
+ { 0x5807, 0x2f },
+ { 0x5808, 0x15 },
+ { 0x5809, 0xd },
+ { 0x580a, 0xa },
+ { 0x580b, 0x9 },
+ { 0x580c, 0xa },
+ { 0x580d, 0xc },
+ { 0x580e, 0x12 },
+ { 0x580f, 0x19 },
+ { 0x5810, 0xb },
+ { 0x5811, 0x7 },
+ { 0x5812, 0x4 },
+ { 0x5813, 0x3 },
+ { 0x5814, 0x3 },
+ { 0x5815, 0x6 },
+ { 0x5816, 0xa },
+ { 0x5817, 0xf },
+ { 0x5818, 0xa },
+ { 0x5819, 0x5 },
+ { 0x581a, 0x1 },
+ { 0x581b, 0x0 },
+ { 0x581c, 0x0 },
+ { 0x581d, 0x3 },
+ { 0x581e, 0x8 },
+ { 0x581f, 0xc },
+ { 0x5820, 0xa },
+ { 0x5821, 0x5 },
+ { 0x5822, 0x1 },
+ { 0x5823, 0x0 },
+ { 0x5824, 0x0 },
+ { 0x5825, 0x3 },
+ { 0x5826, 0x8 },
+ { 0x5827, 0xc },
+ { 0x5828, 0xe },
+ { 0x5829, 0x8 },
+ { 0x582a, 0x6 },
+ { 0x582b, 0x4 },
+ { 0x582c, 0x5 },
+ { 0x582d, 0x7 },
+ { 0x582e, 0xb },
+ { 0x582f, 0x12 },
+ { 0x5830, 0x18 },
+ { 0x5831, 0x10 },
+ { 0x5832, 0xc },
+ { 0x5833, 0xa },
+ { 0x5834, 0xb },
+ { 0x5835, 0xe },
+ { 0x5836, 0x15 },
+ { 0x5837, 0x19 },
+ { 0x5838, 0x32 },
+ { 0x5839, 0x1f },
+ { 0x583a, 0x18 },
+ { 0x583b, 0x16 },
+ { 0x583c, 0x17 },
+ { 0x583d, 0x1e },
+ { 0x583e, 0x26 },
+ { 0x583f, 0x53 },
+ { 0x5840, 0x10 },
+ { 0x5841, 0xf },
+ { 0x5842, 0xd },
+ { 0x5843, 0xc },
+ { 0x5844, 0xe },
+ { 0x5845, 0x9 },
+ { 0x5846, 0x11 },
+ { 0x5847, 0x10 },
+ { 0x5848, 0x10 },
+ { 0x5849, 0x10 },
+ { 0x584a, 0x10 },
+ { 0x584b, 0xe },
+ { 0x584c, 0x10 },
+ { 0x584d, 0x10 },
+ { 0x584e, 0x11 },
+ { 0x584f, 0x10 },
+ { 0x5850, 0xf },
+ { 0x5851, 0xc },
+ { 0x5852, 0xf },
+ { 0x5853, 0x10 },
+ { 0x5854, 0x10 },
+ { 0x5855, 0xf },
+ { 0x5856, 0xe },
+ { 0x5857, 0xb },
+ { 0x5858, 0x10 },
+ { 0x5859, 0xd },
+ { 0x585a, 0xd },
+ { 0x585b, 0xc },
+ { 0x585c, 0xc },
+ { 0x585d, 0xc },
+ { 0x585e, 0xb },
+ { 0x585f, 0xc },
+ { 0x5860, 0xc },
+ { 0x5861, 0xc },
+ { 0x5862, 0xd },
+ { 0x5863, 0x8 },
+ { 0x5864, 0x11 },
+ { 0x5865, 0x18 },
+ { 0x5866, 0x18 },
+ { 0x5867, 0x19 },
+ { 0x5868, 0x17 },
+ { 0x5869, 0x19 },
+ { 0x586a, 0x16 },
+ { 0x586b, 0x13 },
+ { 0x586c, 0x13 },
+ { 0x586d, 0x12 },
+ { 0x586e, 0x13 },
+ { 0x586f, 0x16 },
+ { 0x5870, 0x14 },
+ { 0x5871, 0x12 },
+ { 0x5872, 0x10 },
+ { 0x5873, 0x11 },
+ { 0x5874, 0x11 },
+ { 0x5875, 0x16 },
+ { 0x5876, 0x14 },
+ { 0x5877, 0x11 },
+ { 0x5878, 0x10 },
+ { 0x5879, 0xf },
+ { 0x587a, 0x10 },
+ { 0x587b, 0x14 },
+ { 0x587c, 0x13 },
+ { 0x587d, 0x12 },
+ { 0x587e, 0x11 },
+ { 0x587f, 0x11 },
+ { 0x5880, 0x12 },
+ { 0x5881, 0x15 },
+ { 0x5882, 0x14 },
+ { 0x5883, 0x15 },
+ { 0x5884, 0x15 },
+ { 0x5885, 0x15 },
+ { 0x5886, 0x13 },
+ { 0x5887, 0x17 },
+ { 0x3710, 0x10 },
+ { 0x3632, 0x51 },
+ { 0x3702, 0x10 },
+ { 0x3703, 0xb2 },
+ { 0x3704, 0x18 },
+ { 0x370b, 0x40 },
+ { 0x370d, 0x3 },
+ { 0x3631, 0x1 },
+ { 0x3632, 0x52 },
+ { 0x3606, 0x24 },
+ { 0x3620, 0x96 },
+ { 0x5785, 0x7 },
+ { 0x3a13, 0x30 },
+ { 0x3600, 0x52 },
+ { 0x3604, 0x48 },
+ { 0x3606, 0x1b },
+ { 0x370d, 0xb },
+ { 0x370f, 0xc0 },
+ { 0x3709, 0x1 },
+ { 0x3823, 0x0 },
+ { 0x5007, 0x0 },
+ { 0x5009, 0x0 },
+ { 0x5011, 0x0 },
+ { 0x5013, 0x0 },
+ { 0x519e, 0x0 },
+ { 0x5086, 0x0 },
+ { 0x5087, 0x0 },
+ { 0x5088, 0x0 },
+ { 0x5089, 0x0 },
+ { 0x302b, 0x0 },
+ { 0x3503, 0x7 },
+ { 0x3011, 0x8 },
+ { 0x350c, 0x2 },
+ { 0x350d, 0xe4 },
+ { 0x3621, 0xc9 },
+ { 0x370a, 0x81 },
+ { 0xffff, 0xff },
+};
+
+static struct regval_list ov5642_default_regs_finalise[] = {
+ { 0x3810, 0xc2 },
+ { 0x3818, 0xc9 },
+ { 0x381c, 0x10 },
+ { 0x381d, 0xa0 },
+ { 0x381e, 0x5 },
+ { 0x381f, 0xb0 },
+ { 0x3820, 0x0 },
+ { 0x3821, 0x0 },
+ { 0x3824, 0x11 },
+ { 0x3a08, 0x1b },
+ { 0x3a09, 0xc0 },
+ { 0x3a0a, 0x17 },
+ { 0x3a0b, 0x20 },
+ { 0x3a0d, 0x2 },
+ { 0x3a0e, 0x1 },
+ { 0x401c, 0x4 },
+ { 0x5682, 0x5 },
+ { 0x5683, 0x0 },
+ { 0x5686, 0x2 },
+ { 0x5687, 0xcc },
+ { 0x5001, 0x4f },
+ { 0x589b, 0x6 },
+ { 0x589a, 0xc5 },
+ { 0x3503, 0x0 },
+ { 0x460c, 0x20 },
+ { 0x460b, 0x37 },
+ { 0x471c, 0xd0 },
+ { 0x471d, 0x5 },
+ { 0x3815, 0x1 },
+ { 0x3818, 0xc1 },
+ { 0x501f, 0x0 },
+ { 0x5002, 0xe0 },
+ { 0x4300, 0x32 }, /* UYVY */
+ { 0x3002, 0x1c },
+ { 0x4800, 0x14 },
+ { 0x4801, 0xf },
+ { 0x3007, 0x3b },
+ { 0x300e, 0x4 },
+ { 0x4803, 0x50 },
+ { 0x3815, 0x1 },
+ { 0x4713, 0x2 },
+ { 0x4842, 0x1 },
+ { 0x300f, 0xe },
+ { 0x3003, 0x3 },
+ { 0x3003, 0x1 },
+ { 0xffff, 0xff },
+};
+
+struct ov5642_datafmt {
+ enum v4l2_mbus_pixelcode code;
+ enum v4l2_colorspace colorspace;
+};
+
+struct ov5642 {
+ struct v4l2_subdev subdev;
+ const struct ov5642_datafmt *fmt;
+};
+
+static const struct ov5642_datafmt ov5642_colour_fmts[] = {
+ {V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
+};
+
+static struct ov5642 *to_ov5642(const struct i2c_client *client)
+{
+ return container_of(i2c_get_clientdata(client), struct ov5642, subdev);
+}
+
+/* Find a data format by a pixel code in an array */
+static const struct ov5642_datafmt
+ *ov5642_find_datafmt(enum v4l2_mbus_pixelcode code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ov5642_colour_fmts); i++)
+ if (ov5642_colour_fmts[i].code == code)
+ return ov5642_colour_fmts + i;
+
+ return NULL;
+}
+
+static int reg_read(struct i2c_client *client, u16 reg, u8 *val)
+{
+ int ret;
+ /* We have 16-bit i2c addresses - care for endianess */
+ unsigned char data[2] = { reg >> 8, reg & 0xff };
+
+ ret = i2c_master_send(client, data, 2);
+ if (ret < 2) {
+ dev_err(&client->dev, "%s: i2c read error, reg: %x\n",
+ __func__, reg);
+ return ret < 0 ? ret : -EIO;
+ }
+
+ ret = i2c_master_recv(client, val, 1);
+ if (ret < 1) {
+ dev_err(&client->dev, "%s: i2c read error, reg: %x\n",
+ __func__, reg);
+ return ret < 0 ? ret : -EIO;
+ }
+ return 0;
+}
+
+static int reg_write(struct i2c_client *client, u16 reg, u8 val)
+{
+ int ret;
+ unsigned char data[3] = { reg >> 8, reg & 0xff, val };
+
+ ret = i2c_master_send(client, data, 3);
+ if (ret < 3) {
+ dev_err(&client->dev, "%s: i2c write error, reg: %x\n",
+ __func__, reg);
+ return ret < 0 ? ret : -EIO;
+ }
+
+ return 0;
+}
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int ov5642_get_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 val;
+
+ if (reg->reg & ~0xffff)
+ return -EINVAL;
+
+ reg->size = 1;
+
+ ret = reg_read(client, reg->reg, &val);
+ if (!ret)
+ reg->val = (__u64)val;
+
+ return ret;
+}
+
+static int ov5642_set_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg & ~0xffff || reg->val & ~0xff)
+ return -EINVAL;
+
+ return reg_write(client, reg->reg, reg->val);
+}
+#endif
+
+static int ov5642_write_array(struct i2c_client *client,
+ struct regval_list *vals)
+{
+ while (vals->reg_num != 0xffff || vals->value != 0xff) {
+ int ret = reg_write(client, vals->reg_num, vals->value);
+ if (ret < 0)
+ return ret;
+ vals++;
+ }
+ dev_dbg(&client->dev, "Register list loaded\n");
+ return 0;
+}
+
+static int ov5642_set_resolution(struct i2c_client *client)
+{
+ int ret;
+ u8 start_x_high = ((OV5642_SENSOR_SIZE_X - OV5642_WIDTH) / 2) >> 8;
+ u8 start_x_low = ((OV5642_SENSOR_SIZE_X - OV5642_WIDTH) / 2) & 0xff;
+ u8 start_y_high = ((OV5642_SENSOR_SIZE_Y - OV5642_HEIGHT) / 2) >> 8;
+ u8 start_y_low = ((OV5642_SENSOR_SIZE_Y - OV5642_HEIGHT) / 2) & 0xff;
+
+ u8 width_high = OV5642_WIDTH >> 8;
+ u8 width_low = OV5642_WIDTH & 0xff;
+ u8 height_high = OV5642_HEIGHT >> 8;
+ u8 height_low = OV5642_HEIGHT & 0xff;
+
+ u8 total_width_high = OV5642_TOTAL_WIDTH >> 8;
+ u8 total_width_low = OV5642_TOTAL_WIDTH & 0xff;
+ u8 total_height_high = OV5642_TOTAL_HEIGHT >> 8;
+ u8 total_height_low = OV5642_TOTAL_HEIGHT & 0xff;
+
+ ret = reg_write(client, REG_WINDOW_START_X_HIGH, start_x_high);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_START_X_LOW, start_x_low);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_START_Y_HIGH, start_y_high);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_START_Y_LOW, start_y_low);
+
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_WIDTH_HIGH, width_high);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_WIDTH_LOW , width_low);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_HEIGHT_HIGH, height_high);
+ if (!ret)
+ ret = reg_write(client, REG_WINDOW_HEIGHT_LOW, height_low);
+
+ if (!ret)
+ ret = reg_write(client, REG_OUT_WIDTH_HIGH, width_high);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_WIDTH_LOW , width_low);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_HEIGHT_HIGH, height_high);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_HEIGHT_LOW, height_low);
+
+ if (!ret)
+ ret = reg_write(client, REG_OUT_TOTAL_WIDTH_HIGH, total_width_high);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_TOTAL_WIDTH_LOW, total_width_low);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_TOTAL_HEIGHT_HIGH, total_height_high);
+ if (!ret)
+ ret = reg_write(client, REG_OUT_TOTAL_HEIGHT_LOW, total_height_low);
+
+ return ret;
+}
+
+static int ov5642_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ const struct ov5642_datafmt *fmt = ov5642_find_datafmt(mf->code);
+
+ dev_dbg(sd->v4l2_dev->dev, "%s(%u) width: %u heigth: %u\n",
+ __func__, mf->code, mf->width, mf->height);
+
+ if (!fmt) {
+ mf->code = ov5642_colour_fmts[0].code;
+ mf->colorspace = ov5642_colour_fmts[0].colorspace;
+ }
+
+ mf->width = OV5642_WIDTH;
+ mf->height = OV5642_HEIGHT;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int ov5642_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5642 *priv = to_ov5642(client);
+
+ dev_dbg(sd->v4l2_dev->dev, "%s(%u)\n", __func__, mf->code);
+
+ /* MIPI CSI could have changed the format, double-check */
+ if (!ov5642_find_datafmt(mf->code))
+ return -EINVAL;
+
+ ov5642_try_fmt(sd, mf);
+
+ priv->fmt = ov5642_find_datafmt(mf->code);
+
+ ov5642_write_array(client, ov5642_default_regs_init);
+ ov5642_set_resolution(client);
+ ov5642_write_array(client, ov5642_default_regs_finalise);
+
+ return 0;
+}
+
+static int ov5642_g_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov5642 *priv = to_ov5642(client);
+
+ const struct ov5642_datafmt *fmt = priv->fmt;
+
+ mf->code = fmt->code;
+ mf->colorspace = fmt->colorspace;
+ mf->width = OV5642_WIDTH;
+ mf->height = OV5642_HEIGHT;
+ mf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static int ov5642_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index >= ARRAY_SIZE(ov5642_colour_fmts))
+ return -EINVAL;
+
+ *code = ov5642_colour_fmts[index].code;
+ return 0;
+}
+
+static int ov5642_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
+ return -EINVAL;
+
+ if (id->match.addr != client->addr)
+ return -ENODEV;
+
+ id->ident = V4L2_IDENT_OV5642;
+ id->revision = 0;
+
+ return 0;
+}
+
+static int ov5642_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ struct v4l2_rect *rect = &a->c;
+
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ rect->top = 0;
+ rect->left = 0;
+ rect->width = OV5642_WIDTH;
+ rect->height = OV5642_HEIGHT;
+
+ return 0;
+}
+
+static int ov5642_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = OV5642_WIDTH;
+ a->bounds.height = OV5642_HEIGHT;
+ a->defrect = a->bounds;
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops ov5642_subdev_video_ops = {
+ .s_mbus_fmt = ov5642_s_fmt,
+ .g_mbus_fmt = ov5642_g_fmt,
+ .try_mbus_fmt = ov5642_try_fmt,
+ .enum_mbus_fmt = ov5642_enum_fmt,
+ .g_crop = ov5642_g_crop,
+ .cropcap = ov5642_cropcap,
+};
+
+static struct v4l2_subdev_core_ops ov5642_subdev_core_ops = {
+ .g_chip_ident = ov5642_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = ov5642_get_register,
+ .s_register = ov5642_set_register,
+#endif
+};
+
+static struct v4l2_subdev_ops ov5642_subdev_ops = {
+ .core = &ov5642_subdev_core_ops,
+ .video = &ov5642_subdev_video_ops,
+};
+
+/*
+ * We have to provide soc-camera operations, but we don't have anything to say
+ * there. The MIPI CSI2 driver will provide .query_bus_param and .set_bus_param
+ */
+static unsigned long soc_ov5642_query_bus_param(struct soc_camera_device *icd)
+{
+ return 0;
+}
+
+static int soc_ov5642_set_bus_param(struct soc_camera_device *icd,
+ unsigned long flags)
+{
+ return -EINVAL;
+}
+
+static struct soc_camera_ops soc_ov5642_ops = {
+ .query_bus_param = soc_ov5642_query_bus_param,
+ .set_bus_param = soc_ov5642_set_bus_param,
+};
+
+static int ov5642_video_probe(struct soc_camera_device *icd,
+ struct i2c_client *client)
+{
+ int ret;
+ u8 id_high, id_low;
+ u16 id;
+
+ /* Read sensor Model ID */
+ ret = reg_read(client, REG_CHIP_ID_HIGH, &id_high);
+ if (ret < 0)
+ return ret;
+
+ id = id_high << 8;
+
+ ret = reg_read(client, REG_CHIP_ID_LOW, &id_low);
+ if (ret < 0)
+ return ret;
+
+ id |= id_low;
+
+ dev_info(&client->dev, "Chip ID 0x%04x detected\n", id);
+
+ if (id != 0x5642)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int ov5642_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct ov5642 *priv;
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl;
+ int ret;
+
+ if (!icd) {
+ dev_err(&client->dev, "OV5642: missing soc-camera data!\n");
+ return -EINVAL;
+ }
+
+ icl = to_soc_camera_link(icd);
+ if (!icl) {
+ dev_err(&client->dev, "OV5642: missing platform data!\n");
+ return -EINVAL;
+ }
+
+ priv = kzalloc(sizeof(struct ov5642), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&priv->subdev, client, &ov5642_subdev_ops);
+
+ icd->ops = &soc_ov5642_ops;
+ priv->fmt = &ov5642_colour_fmts[0];
+
+ ret = ov5642_video_probe(icd, client);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ icd->ops = NULL;
+ kfree(priv);
+ return ret;
+}
+
+static int ov5642_remove(struct i2c_client *client)
+{
+ struct ov5642 *priv = to_ov5642(client);
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+
+ icd->ops = NULL;
+ if (icl->free_bus)
+ icl->free_bus(icl);
+ kfree(priv);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov5642_id[] = {
+ { "ov5642", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ov5642_id);
+
+static struct i2c_driver ov5642_i2c_driver = {
+ .driver = {
+ .name = "ov5642",
+ },
+ .probe = ov5642_probe,
+ .remove = ov5642_remove,
+ .id_table = ov5642_id,
+};
+
+static int __init ov5642_mod_init(void)
+{
+ return i2c_add_driver(&ov5642_i2c_driver);
+}
+
+static void __exit ov5642_mod_exit(void)
+{
+ i2c_del_driver(&ov5642_i2c_driver);
+}
+
+module_init(ov5642_mod_init);
+module_exit(ov5642_mod_exit);
+
+MODULE_DESCRIPTION("Omnivision OV5642 Camera driver");
+MODULE_AUTHOR("Bastian Hecht <hechtb@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index d4e7c11553c..8aa05853128 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -19,8 +19,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-mediabus.h>
-
-#include "ov7670.h"
+#include <media/ov7670.h>
MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors");
diff --git a/drivers/media/video/ov7670.h b/drivers/media/video/ov7670.h
deleted file mode 100644
index b133bc12303..00000000000
--- a/drivers/media/video/ov7670.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * A V4L2 driver for OmniVision OV7670 cameras.
- *
- * Copyright 2010 One Laptop Per Child
- *
- * This file may be distributed under the terms of the GNU General
- * Public License, version 2.
- */
-
-#ifndef __OV7670_H
-#define __OV7670_H
-
-struct ov7670_config {
- int min_width; /* Filter out smaller sizes */
- int min_height; /* Filter out smaller sizes */
- int clock_speed; /* External clock speed (MHz) */
- bool use_smbus; /* Use smbus I/O instead of I2C */
-};
-
-#endif
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index 48895ef863f..397870f076c 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -1032,13 +1032,9 @@ static int ov772x_video_probe(struct soc_camera_device *icd,
u8 pid, ver;
const char *devname;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* check and show product ID and manufacturer ID
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index 5173ac449dd..3681a6ff081 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -657,16 +657,9 @@ static int ov9640_video_probe(struct soc_camera_device *icd,
const char *devname;
int ret = 0;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface) {
- dev_err(&client->dev, "Parent missing or invalid!\n");
- ret = -ENODEV;
- goto err;
- }
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* check and show product ID and manufacturer ID
diff --git a/drivers/media/video/ov9740.c b/drivers/media/video/ov9740.c
index 4d4ee4faca6..edd1ffcca30 100644
--- a/drivers/media/video/ov9740.c
+++ b/drivers/media/video/ov9740.c
@@ -44,12 +44,12 @@
#define OV9740_Y_ADDR_START_LO 0x0347
#define OV9740_X_ADDR_END_HI 0x0348
#define OV9740_X_ADDR_END_LO 0x0349
-#define OV9740_Y_ADDR_END_HI 0x034A
-#define OV9740_Y_ADDR_END_LO 0x034B
-#define OV9740_X_OUTPUT_SIZE_HI 0x034C
-#define OV9740_X_OUTPUT_SIZE_LO 0x034D
-#define OV9740_Y_OUTPUT_SIZE_HI 0x034E
-#define OV9740_Y_OUTPUT_SIZE_LO 0x034F
+#define OV9740_Y_ADDR_END_HI 0x034a
+#define OV9740_Y_ADDR_END_LO 0x034b
+#define OV9740_X_OUTPUT_SIZE_HI 0x034c
+#define OV9740_X_OUTPUT_SIZE_LO 0x034d
+#define OV9740_Y_OUTPUT_SIZE_HI 0x034e
+#define OV9740_Y_OUTPUT_SIZE_LO 0x034f
/* IO Control Registers */
#define OV9740_IO_CREL00 0x3002
@@ -68,6 +68,7 @@
#define OV9740_ANALOG_CTRL04 0x3604
#define OV9740_ANALOG_CTRL10 0x3610
#define OV9740_ANALOG_CTRL12 0x3612
+#define OV9740_ANALOG_CTRL15 0x3615
#define OV9740_ANALOG_CTRL20 0x3620
#define OV9740_ANALOG_CTRL21 0x3621
#define OV9740_ANALOG_CTRL22 0x3622
@@ -89,28 +90,28 @@
#define OV9740_TIMING_CTRL35 0x3835
/* Banding Filter */
-#define OV9740_AEC_MAXEXPO_60_H 0x3A02
-#define OV9740_AEC_MAXEXPO_60_L 0x3A03
-#define OV9740_AEC_B50_STEP_HI 0x3A08
-#define OV9740_AEC_B50_STEP_LO 0x3A09
-#define OV9740_AEC_B60_STEP_HI 0x3A0A
-#define OV9740_AEC_B60_STEP_LO 0x3A0B
-#define OV9740_AEC_CTRL0D 0x3A0D
-#define OV9740_AEC_CTRL0E 0x3A0E
-#define OV9740_AEC_MAXEXPO_50_H 0x3A14
-#define OV9740_AEC_MAXEXPO_50_L 0x3A15
+#define OV9740_AEC_MAXEXPO_60_H 0x3a02
+#define OV9740_AEC_MAXEXPO_60_L 0x3a03
+#define OV9740_AEC_B50_STEP_HI 0x3a08
+#define OV9740_AEC_B50_STEP_LO 0x3a09
+#define OV9740_AEC_B60_STEP_HI 0x3a0a
+#define OV9740_AEC_B60_STEP_LO 0x3a0b
+#define OV9740_AEC_CTRL0D 0x3a0d
+#define OV9740_AEC_CTRL0E 0x3a0e
+#define OV9740_AEC_MAXEXPO_50_H 0x3a14
+#define OV9740_AEC_MAXEXPO_50_L 0x3a15
/* AEC/AGC Control */
#define OV9740_AEC_ENABLE 0x3503
-#define OV9740_GAIN_CEILING_01 0x3A18
-#define OV9740_GAIN_CEILING_02 0x3A19
-#define OV9740_AEC_HI_THRESHOLD 0x3A11
-#define OV9740_AEC_3A1A 0x3A1A
-#define OV9740_AEC_CTRL1B_WPT2 0x3A1B
-#define OV9740_AEC_CTRL0F_WPT 0x3A0F
-#define OV9740_AEC_CTRL10_BPT 0x3A10
-#define OV9740_AEC_CTRL1E_BPT2 0x3A1E
-#define OV9740_AEC_LO_THRESHOLD 0x3A1F
+#define OV9740_GAIN_CEILING_01 0x3a18
+#define OV9740_GAIN_CEILING_02 0x3a19
+#define OV9740_AEC_HI_THRESHOLD 0x3a11
+#define OV9740_AEC_3A1A 0x3a1a
+#define OV9740_AEC_CTRL1B_WPT2 0x3a1b
+#define OV9740_AEC_CTRL0F_WPT 0x3a0f
+#define OV9740_AEC_CTRL10_BPT 0x3a10
+#define OV9740_AEC_CTRL1E_BPT2 0x3a1e
+#define OV9740_AEC_LO_THRESHOLD 0x3a1f
/* BLC Control */
#define OV9740_BLC_AUTO_ENABLE 0x4002
@@ -132,7 +133,7 @@
#define OV9740_VT_SYS_CLK_DIV 0x0303
#define OV9740_VT_PIX_CLK_DIV 0x0301
#define OV9740_PLL_CTRL3010 0x3010
-#define OV9740_VFIFO_CTRL00 0x460E
+#define OV9740_VFIFO_CTRL00 0x460e
/* ISP Control */
#define OV9740_ISP_CTRL00 0x5000
@@ -141,9 +142,9 @@
#define OV9740_ISP_CTRL05 0x5005
#define OV9740_ISP_CTRL12 0x5012
#define OV9740_ISP_CTRL19 0x5019
-#define OV9740_ISP_CTRL1A 0x501A
-#define OV9740_ISP_CTRL1E 0x501E
-#define OV9740_ISP_CTRL1F 0x501F
+#define OV9740_ISP_CTRL1A 0x501a
+#define OV9740_ISP_CTRL1E 0x501e
+#define OV9740_ISP_CTRL1F 0x501f
#define OV9740_ISP_CTRL20 0x5020
#define OV9740_ISP_CTRL21 0x5021
@@ -158,12 +159,12 @@
#define OV9740_AWB_ADV_CTRL04 0x5187
#define OV9740_AWB_ADV_CTRL05 0x5188
#define OV9740_AWB_ADV_CTRL06 0x5189
-#define OV9740_AWB_ADV_CTRL07 0x518A
-#define OV9740_AWB_ADV_CTRL08 0x518B
-#define OV9740_AWB_ADV_CTRL09 0x518C
-#define OV9740_AWB_ADV_CTRL10 0x518D
-#define OV9740_AWB_ADV_CTRL11 0x518E
-#define OV9740_AWB_CTRL0F 0x518F
+#define OV9740_AWB_ADV_CTRL07 0x518a
+#define OV9740_AWB_ADV_CTRL08 0x518b
+#define OV9740_AWB_ADV_CTRL09 0x518c
+#define OV9740_AWB_ADV_CTRL10 0x518d
+#define OV9740_AWB_ADV_CTRL11 0x518e
+#define OV9740_AWB_CTRL0F 0x518f
#define OV9740_AWB_CTRL10 0x5190
#define OV9740_AWB_CTRL11 0x5191
#define OV9740_AWB_CTRL12 0x5192
@@ -180,27 +181,8 @@
#define OV9740_MIPI_CTRL_3012 0x3012
#define OV9740_SC_CMMM_MIPI_CTR 0x3014
-/* supported resolutions */
-enum {
- OV9740_VGA,
- OV9740_720P,
-};
-
-struct ov9740_resolution {
- unsigned int width;
- unsigned int height;
-};
-
-static struct ov9740_resolution ov9740_resolutions[] = {
- [OV9740_VGA] = {
- .width = 640,
- .height = 480,
- },
- [OV9740_720P] = {
- .width = 1280,
- .height = 720,
- },
-};
+#define OV9740_MAX_WIDTH 1280
+#define OV9740_MAX_HEIGHT 720
/* Misc. structures */
struct ov9740_reg {
@@ -219,9 +201,16 @@ struct ov9740_priv {
bool flag_vflip;
bool flag_hflip;
+
+ /* For suspend/resume. */
+ struct v4l2_mbus_framefmt current_mf;
+ bool current_enable;
};
static const struct ov9740_reg ov9740_defaults[] = {
+ /* Software Reset */
+ { OV9740_SOFTWARE_RESET, 0x01 },
+
/* Banding Filter */
{ OV9740_AEC_B50_STEP_HI, 0x00 },
{ OV9740_AEC_B50_STEP_LO, 0xe8 },
@@ -241,36 +230,36 @@ static const struct ov9740_reg ov9740_defaults[] = {
/* Un-documented OV9740 registers */
{ 0x5800, 0x29 }, { 0x5801, 0x25 }, { 0x5802, 0x20 }, { 0x5803, 0x21 },
{ 0x5804, 0x26 }, { 0x5805, 0x2e }, { 0x5806, 0x11 }, { 0x5807, 0x0c },
- { 0x5808, 0x09 }, { 0x5809, 0x0a }, { 0x580A, 0x0e }, { 0x580B, 0x16 },
- { 0x580C, 0x06 }, { 0x580D, 0x02 }, { 0x580E, 0x00 }, { 0x580F, 0x00 },
+ { 0x5808, 0x09 }, { 0x5809, 0x0a }, { 0x580a, 0x0e }, { 0x580b, 0x16 },
+ { 0x580c, 0x06 }, { 0x580d, 0x02 }, { 0x580e, 0x00 }, { 0x580f, 0x00 },
{ 0x5810, 0x04 }, { 0x5811, 0x0a }, { 0x5812, 0x05 }, { 0x5813, 0x02 },
{ 0x5814, 0x00 }, { 0x5815, 0x00 }, { 0x5816, 0x03 }, { 0x5817, 0x09 },
- { 0x5818, 0x0f }, { 0x5819, 0x0a }, { 0x581A, 0x07 }, { 0x581B, 0x08 },
- { 0x581C, 0x0b }, { 0x581D, 0x14 }, { 0x581E, 0x28 }, { 0x581F, 0x23 },
+ { 0x5818, 0x0f }, { 0x5819, 0x0a }, { 0x581a, 0x07 }, { 0x581b, 0x08 },
+ { 0x581c, 0x0b }, { 0x581d, 0x14 }, { 0x581e, 0x28 }, { 0x581f, 0x23 },
{ 0x5820, 0x1d }, { 0x5821, 0x1e }, { 0x5822, 0x24 }, { 0x5823, 0x2a },
{ 0x5824, 0x4f }, { 0x5825, 0x6f }, { 0x5826, 0x5f }, { 0x5827, 0x7f },
- { 0x5828, 0x9f }, { 0x5829, 0x5f }, { 0x582A, 0x8f }, { 0x582B, 0x9e },
- { 0x582C, 0x8f }, { 0x582D, 0x9f }, { 0x582E, 0x4f }, { 0x582F, 0x87 },
+ { 0x5828, 0x9f }, { 0x5829, 0x5f }, { 0x582a, 0x8f }, { 0x582b, 0x9e },
+ { 0x582c, 0x8f }, { 0x582d, 0x9f }, { 0x582e, 0x4f }, { 0x582f, 0x87 },
{ 0x5830, 0x86 }, { 0x5831, 0x97 }, { 0x5832, 0xae }, { 0x5833, 0x3f },
{ 0x5834, 0x8e }, { 0x5835, 0x7c }, { 0x5836, 0x7e }, { 0x5837, 0xaf },
- { 0x5838, 0x8f }, { 0x5839, 0x8f }, { 0x583A, 0x9f }, { 0x583B, 0x7f },
- { 0x583C, 0x5f },
+ { 0x5838, 0x8f }, { 0x5839, 0x8f }, { 0x583a, 0x9f }, { 0x583b, 0x7f },
+ { 0x583c, 0x5f },
/* Y Gamma */
{ 0x5480, 0x07 }, { 0x5481, 0x18 }, { 0x5482, 0x2c }, { 0x5483, 0x4e },
{ 0x5484, 0x5e }, { 0x5485, 0x6b }, { 0x5486, 0x77 }, { 0x5487, 0x82 },
- { 0x5488, 0x8c }, { 0x5489, 0x95 }, { 0x548A, 0xa4 }, { 0x548B, 0xb1 },
- { 0x548C, 0xc6 }, { 0x548D, 0xd8 }, { 0x548E, 0xe9 },
+ { 0x5488, 0x8c }, { 0x5489, 0x95 }, { 0x548a, 0xa4 }, { 0x548b, 0xb1 },
+ { 0x548c, 0xc6 }, { 0x548d, 0xd8 }, { 0x548e, 0xe9 },
/* UV Gamma */
{ 0x5490, 0x0f }, { 0x5491, 0xff }, { 0x5492, 0x0d }, { 0x5493, 0x05 },
{ 0x5494, 0x07 }, { 0x5495, 0x1a }, { 0x5496, 0x04 }, { 0x5497, 0x01 },
- { 0x5498, 0x03 }, { 0x5499, 0x53 }, { 0x549A, 0x02 }, { 0x549B, 0xeb },
- { 0x549C, 0x02 }, { 0x549D, 0xa0 }, { 0x549E, 0x02 }, { 0x549F, 0x67 },
- { 0x54A0, 0x02 }, { 0x54A1, 0x3b }, { 0x54A2, 0x02 }, { 0x54A3, 0x18 },
- { 0x54A4, 0x01 }, { 0x54A5, 0xe7 }, { 0x54A6, 0x01 }, { 0x54A7, 0xc3 },
- { 0x54A8, 0x01 }, { 0x54A9, 0x94 }, { 0x54AA, 0x01 }, { 0x54AB, 0x72 },
- { 0x54AC, 0x01 }, { 0x54AD, 0x57 },
+ { 0x5498, 0x03 }, { 0x5499, 0x53 }, { 0x549a, 0x02 }, { 0x549b, 0xeb },
+ { 0x549c, 0x02 }, { 0x549d, 0xa0 }, { 0x549e, 0x02 }, { 0x549f, 0x67 },
+ { 0x54a0, 0x02 }, { 0x54a1, 0x3b }, { 0x54a2, 0x02 }, { 0x54a3, 0x18 },
+ { 0x54a4, 0x01 }, { 0x54a5, 0xe7 }, { 0x54a6, 0x01 }, { 0x54a7, 0xc3 },
+ { 0x54a8, 0x01 }, { 0x54a9, 0x94 }, { 0x54aa, 0x01 }, { 0x54ab, 0x72 },
+ { 0x54ac, 0x01 }, { 0x54ad, 0x57 },
/* AWB */
{ OV9740_AWB_CTRL00, 0xf0 },
@@ -296,18 +285,18 @@ static const struct ov9740_reg ov9740_defaults[] = {
{ OV9740_AWB_CTRL14, 0x00 },
/* CIP */
- { 0x530D, 0x12 },
+ { 0x530d, 0x12 },
/* CMX */
{ 0x5380, 0x01 }, { 0x5381, 0x00 }, { 0x5382, 0x00 }, { 0x5383, 0x17 },
{ 0x5384, 0x00 }, { 0x5385, 0x01 }, { 0x5386, 0x00 }, { 0x5387, 0x00 },
- { 0x5388, 0x00 }, { 0x5389, 0xe0 }, { 0x538A, 0x00 }, { 0x538B, 0x20 },
- { 0x538C, 0x00 }, { 0x538D, 0x00 }, { 0x538E, 0x00 }, { 0x538F, 0x16 },
+ { 0x5388, 0x00 }, { 0x5389, 0xe0 }, { 0x538a, 0x00 }, { 0x538b, 0x20 },
+ { 0x538c, 0x00 }, { 0x538d, 0x00 }, { 0x538e, 0x00 }, { 0x538f, 0x16 },
{ 0x5390, 0x00 }, { 0x5391, 0x9c }, { 0x5392, 0x00 }, { 0x5393, 0xa0 },
{ 0x5394, 0x18 },
/* 50/60 Detection */
- { 0x3C0A, 0x9c }, { 0x3C0B, 0x3f },
+ { 0x3c0a, 0x9c }, { 0x3c0b, 0x3f },
/* Output Select */
{ OV9740_IO_OUTPUT_SEL01, 0x00 },
@@ -333,6 +322,7 @@ static const struct ov9740_reg ov9740_defaults[] = {
{ OV9740_ANALOG_CTRL10, 0xa1 },
{ OV9740_ANALOG_CTRL12, 0x24 },
{ OV9740_ANALOG_CTRL22, 0x9f },
+ { OV9740_ANALOG_CTRL15, 0xf0 },
/* Sensor Control */
{ OV9740_SENSOR_CTRL03, 0x42 },
@@ -385,7 +375,7 @@ static const struct ov9740_reg ov9740_defaults[] = {
{ OV9740_LN_LENGTH_PCK_LO, 0x62 },
/* MIPI Control */
- { OV9740_MIPI_CTRL00, 0x44 },
+ { OV9740_MIPI_CTRL00, 0x44 }, /* 0x64 for discontinuous clk */
{ OV9740_MIPI_3837, 0x01 },
{ OV9740_MIPI_CTRL01, 0x0f },
{ OV9740_MIPI_CTRL03, 0x05 },
@@ -393,54 +383,9 @@ static const struct ov9740_reg ov9740_defaults[] = {
{ OV9740_VFIFO_RD_CTRL, 0x16 },
{ OV9740_MIPI_CTRL_3012, 0x70 },
{ OV9740_SC_CMMM_MIPI_CTR, 0x01 },
-};
-
-static const struct ov9740_reg ov9740_regs_vga[] = {
- { OV9740_X_ADDR_START_HI, 0x00 },
- { OV9740_X_ADDR_START_LO, 0xa0 },
- { OV9740_Y_ADDR_START_HI, 0x00 },
- { OV9740_Y_ADDR_START_LO, 0x00 },
- { OV9740_X_ADDR_END_HI, 0x04 },
- { OV9740_X_ADDR_END_LO, 0x63 },
- { OV9740_Y_ADDR_END_HI, 0x02 },
- { OV9740_Y_ADDR_END_LO, 0xd3 },
- { OV9740_X_OUTPUT_SIZE_HI, 0x02 },
- { OV9740_X_OUTPUT_SIZE_LO, 0x80 },
- { OV9740_Y_OUTPUT_SIZE_HI, 0x01 },
- { OV9740_Y_OUTPUT_SIZE_LO, 0xe0 },
- { OV9740_ISP_CTRL1E, 0x03 },
- { OV9740_ISP_CTRL1F, 0xc0 },
- { OV9740_ISP_CTRL20, 0x02 },
- { OV9740_ISP_CTRL21, 0xd0 },
- { OV9740_VFIFO_READ_START_HI, 0x01 },
- { OV9740_VFIFO_READ_START_LO, 0x40 },
- { OV9740_ISP_CTRL00, 0xff },
- { OV9740_ISP_CTRL01, 0xff },
- { OV9740_ISP_CTRL03, 0xff },
-};
-static const struct ov9740_reg ov9740_regs_720p[] = {
- { OV9740_X_ADDR_START_HI, 0x00 },
- { OV9740_X_ADDR_START_LO, 0x00 },
- { OV9740_Y_ADDR_START_HI, 0x00 },
- { OV9740_Y_ADDR_START_LO, 0x00 },
- { OV9740_X_ADDR_END_HI, 0x05 },
- { OV9740_X_ADDR_END_LO, 0x03 },
- { OV9740_Y_ADDR_END_HI, 0x02 },
- { OV9740_Y_ADDR_END_LO, 0xd3 },
- { OV9740_X_OUTPUT_SIZE_HI, 0x05 },
- { OV9740_X_OUTPUT_SIZE_LO, 0x00 },
- { OV9740_Y_OUTPUT_SIZE_HI, 0x02 },
- { OV9740_Y_OUTPUT_SIZE_LO, 0xd0 },
- { OV9740_ISP_CTRL1E, 0x05 },
- { OV9740_ISP_CTRL1F, 0x00 },
- { OV9740_ISP_CTRL20, 0x02 },
- { OV9740_ISP_CTRL21, 0xd0 },
- { OV9740_VFIFO_READ_START_HI, 0x02 },
- { OV9740_VFIFO_READ_START_LO, 0x30 },
- { OV9740_ISP_CTRL00, 0xff },
- { OV9740_ISP_CTRL01, 0xef },
- { OV9740_ISP_CTRL03, 0xff },
+ /* YUYV order */
+ { OV9740_ISP_CTRL19, 0x02 },
};
static enum v4l2_mbus_pixelcode ov9740_codes[] = {
@@ -537,7 +482,8 @@ static int ov9740_reg_rmw(struct i2c_client *client, u16 reg, u8 set, u8 unset)
ret = ov9740_reg_read(client, reg, &val);
if (ret < 0) {
dev_err(&client->dev,
- "[Read]-Modify-Write of register %02x failed!\n", reg);
+ "[Read]-Modify-Write of register 0x%04x failed!\n",
+ reg);
return ret;
}
@@ -547,7 +493,8 @@ static int ov9740_reg_rmw(struct i2c_client *client, u16 reg, u8 set, u8 unset)
ret = ov9740_reg_write(client, reg, val);
if (ret < 0) {
dev_err(&client->dev,
- "Read-Modify-[Write] of register %02x failed!\n", reg);
+ "Read-Modify-[Write] of register 0x%04x failed!\n",
+ reg);
return ret;
}
@@ -608,6 +555,8 @@ static int ov9740_s_stream(struct v4l2_subdev *sd, int enable)
0x00);
}
+ priv->current_enable = enable;
+
return ret;
}
@@ -630,126 +579,127 @@ static unsigned long ov9740_query_bus_param(struct soc_camera_device *icd)
return soc_camera_apply_sensor_flags(icl, flags);
}
-/* Get status of additional camera capabilities */
-static int ov9740_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct ov9740_priv *priv = to_ov9740(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- ctrl->value = priv->flag_vflip;
- break;
- case V4L2_CID_HFLIP:
- ctrl->value = priv->flag_hflip;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Set status of additional camera capabilities */
-static int ov9740_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct ov9740_priv *priv = to_ov9740(sd);
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- priv->flag_vflip = ctrl->value;
- break;
- case V4L2_CID_HFLIP:
- priv->flag_hflip = ctrl->value;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Get chip identification */
-static int ov9740_g_chip_ident(struct v4l2_subdev *sd,
- struct v4l2_dbg_chip_ident *id)
+/* select nearest higher resolution for capture */
+static void ov9740_res_roundup(u32 *width, u32 *height)
{
- struct ov9740_priv *priv = to_ov9740(sd);
+ /* Width must be a multiple of 4 pixels. */
+ *width = ALIGN(*width, 4);
- id->ident = priv->ident;
- id->revision = priv->revision;
+ /* Max resolution is 1280x720 (720p). */
+ if (*width > OV9740_MAX_WIDTH)
+ *width = OV9740_MAX_WIDTH;
- return 0;
+ if (*height > OV9740_MAX_HEIGHT)
+ *height = OV9740_MAX_HEIGHT;
}
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int ov9740_get_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
+/* Setup registers according to resolution and color encoding */
+static int ov9740_set_res(struct i2c_client *client, u32 width, u32 height)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u32 x_start;
+ u32 y_start;
+ u32 x_end;
+ u32 y_end;
+ bool scaling = 0;
+ u32 scale_input_x;
+ u32 scale_input_y;
int ret;
- u8 val;
-
- if (reg->reg & ~0xffff)
- return -EINVAL;
- reg->size = 2;
-
- ret = ov9740_reg_read(client, reg->reg, &val);
- if (ret)
- return ret;
-
- reg->val = (__u64)val;
+ if ((width != OV9740_MAX_WIDTH) || (height != OV9740_MAX_HEIGHT))
+ scaling = 1;
- return ret;
-}
-
-static int ov9740_set_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
+ /*
+ * Try to use as much of the sensor area as possible when supporting
+ * smaller resolutions. Depending on the aspect ratio of the
+ * chosen resolution, we can either use the full width of the sensor,
+ * or the full height of the sensor (or both if the aspect ratio is
+ * the same as 1280x720.
+ */
+ if ((OV9740_MAX_WIDTH * height) > (OV9740_MAX_HEIGHT * width)) {
+ scale_input_x = (OV9740_MAX_HEIGHT * width) / height;
+ scale_input_y = OV9740_MAX_HEIGHT;
+ } else {
+ scale_input_x = OV9740_MAX_WIDTH;
+ scale_input_y = (OV9740_MAX_WIDTH * height) / width;
+ }
- if (reg->reg & ~0xffff || reg->val & ~0xff)
- return -EINVAL;
+ /* These describe the area of the sensor to use. */
+ x_start = (OV9740_MAX_WIDTH - scale_input_x) / 2;
+ y_start = (OV9740_MAX_HEIGHT - scale_input_y) / 2;
+ x_end = x_start + scale_input_x - 1;
+ y_end = y_start + scale_input_y - 1;
- return ov9740_reg_write(client, reg->reg, reg->val);
-}
-#endif
+ ret = ov9740_reg_write(client, OV9740_X_ADDR_START_HI, x_start >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_X_ADDR_START_LO, x_start & 0xff);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_ADDR_START_HI, y_start >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_ADDR_START_LO, y_start & 0xff);
+ if (ret)
+ goto done;
-/* select nearest higher resolution for capture */
-static void ov9740_res_roundup(u32 *width, u32 *height)
-{
- int i;
+ ret = ov9740_reg_write(client, OV9740_X_ADDR_END_HI, x_end >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_X_ADDR_END_LO, x_end & 0xff);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_ADDR_END_HI, y_end >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_ADDR_END_LO, y_end & 0xff);
+ if (ret)
+ goto done;
- for (i = 0; i < ARRAY_SIZE(ov9740_resolutions); i++)
- if ((ov9740_resolutions[i].width >= *width) &&
- (ov9740_resolutions[i].height >= *height)) {
- *width = ov9740_resolutions[i].width;
- *height = ov9740_resolutions[i].height;
- return;
- }
+ ret = ov9740_reg_write(client, OV9740_X_OUTPUT_SIZE_HI, width >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_X_OUTPUT_SIZE_LO, width & 0xff);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_OUTPUT_SIZE_HI, height >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_Y_OUTPUT_SIZE_LO, height & 0xff);
+ if (ret)
+ goto done;
- *width = ov9740_resolutions[OV9740_720P].width;
- *height = ov9740_resolutions[OV9740_720P].height;
-}
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL1E, scale_input_x >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL1F, scale_input_x & 0xff);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL20, scale_input_y >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL21, scale_input_y & 0xff);
+ if (ret)
+ goto done;
-/* Setup registers according to resolution and color encoding */
-static int ov9740_set_res(struct i2c_client *client, u32 width)
-{
- int ret;
+ ret = ov9740_reg_write(client, OV9740_VFIFO_READ_START_HI,
+ (scale_input_x - width) >> 8);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_VFIFO_READ_START_LO,
+ (scale_input_x - width) & 0xff);
+ if (ret)
+ goto done;
- /* select register configuration for given resolution */
- if (width == ov9740_resolutions[OV9740_VGA].width) {
- dev_dbg(&client->dev, "Setting image size to 640x480\n");
- ret = ov9740_reg_write_array(client, ov9740_regs_vga,
- ARRAY_SIZE(ov9740_regs_vga));
- } else if (width == ov9740_resolutions[OV9740_720P].width) {
- dev_dbg(&client->dev, "Setting image size to 1280x720\n");
- ret = ov9740_reg_write_array(client, ov9740_regs_720p,
- ARRAY_SIZE(ov9740_regs_720p));
- } else {
- dev_err(&client->dev, "Failed to select resolution!\n");
- return -EINVAL;
- }
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL00, 0xff);
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL01, 0xef |
+ (scaling << 4));
+ if (ret)
+ goto done;
+ ret = ov9740_reg_write(client, OV9740_ISP_CTRL03, 0xff);
+done:
return ret;
}
@@ -758,6 +708,7 @@ static int ov9740_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ov9740_priv *priv = to_ov9740(sd);
enum v4l2_colorspace cspace;
enum v4l2_mbus_pixelcode code = mf->code;
int ret;
@@ -777,13 +728,15 @@ static int ov9740_s_fmt(struct v4l2_subdev *sd,
if (ret < 0)
return ret;
- ret = ov9740_set_res(client, mf->width);
+ ret = ov9740_set_res(client, mf->width, mf->height);
if (ret < 0)
return ret;
mf->code = code;
mf->colorspace = cspace;
+ memcpy(&priv->current_mf, mf, sizeof(struct v4l2_mbus_framefmt));
+
return ret;
}
@@ -814,8 +767,8 @@ static int ov9740_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
{
a->bounds.left = 0;
a->bounds.top = 0;
- a->bounds.width = ov9740_resolutions[OV9740_720P].width;
- a->bounds.height = ov9740_resolutions[OV9740_720P].height;
+ a->bounds.width = OV9740_MAX_WIDTH;
+ a->bounds.height = OV9740_MAX_HEIGHT;
a->defrect = a->bounds;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
a->pixelaspect.numerator = 1;
@@ -828,13 +781,115 @@ static int ov9740_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
{
a->c.left = 0;
a->c.top = 0;
- a->c.width = ov9740_resolutions[OV9740_720P].width;
- a->c.height = ov9740_resolutions[OV9740_720P].height;
+ a->c.width = OV9740_MAX_WIDTH;
+ a->c.height = OV9740_MAX_HEIGHT;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
return 0;
}
+/* Get status of additional camera capabilities */
+static int ov9740_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct ov9740_priv *priv = to_ov9740(sd);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ ctrl->value = priv->flag_vflip;
+ break;
+ case V4L2_CID_HFLIP:
+ ctrl->value = priv->flag_hflip;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set status of additional camera capabilities */
+static int ov9740_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+{
+ struct ov9740_priv *priv = to_ov9740(sd);
+
+ switch (ctrl->id) {
+ case V4L2_CID_VFLIP:
+ priv->flag_vflip = ctrl->value;
+ break;
+ case V4L2_CID_HFLIP:
+ priv->flag_hflip = ctrl->value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Get chip identification */
+static int ov9740_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct ov9740_priv *priv = to_ov9740(sd);
+
+ id->ident = priv->ident;
+ id->revision = priv->revision;
+
+ return 0;
+}
+
+static int ov9740_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov9740_priv *priv = to_ov9740(sd);
+
+ if (!priv->current_enable)
+ return 0;
+
+ if (on) {
+ ov9740_s_fmt(sd, &priv->current_mf);
+ ov9740_s_stream(sd, priv->current_enable);
+ } else {
+ ov9740_s_stream(sd, 0);
+ priv->current_enable = true;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int ov9740_get_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ u8 val;
+
+ if (reg->reg & ~0xffff)
+ return -EINVAL;
+
+ reg->size = 2;
+
+ ret = ov9740_reg_read(client, reg->reg, &val);
+ if (ret)
+ return ret;
+
+ reg->val = (__u64)val;
+
+ return ret;
+}
+
+static int ov9740_set_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (reg->reg & ~0xffff || reg->val & ~0xff)
+ return -EINVAL;
+
+ return ov9740_reg_write(client, reg->reg, reg->val);
+}
+#endif
+
static int ov9740_video_probe(struct soc_camera_device *icd,
struct i2c_client *client)
{
@@ -843,16 +898,9 @@ static int ov9740_video_probe(struct soc_camera_device *icd,
u8 modelhi, modello;
int ret;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface) {
- dev_err(&client->dev, "Parent missing or invalid!\n");
- ret = -ENODEV;
- goto err;
- }
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* check and show product ID and manufacturer ID
@@ -901,24 +949,24 @@ static struct soc_camera_ops ov9740_ops = {
.num_controls = ARRAY_SIZE(ov9740_controls),
};
+static struct v4l2_subdev_video_ops ov9740_video_ops = {
+ .s_stream = ov9740_s_stream,
+ .s_mbus_fmt = ov9740_s_fmt,
+ .try_mbus_fmt = ov9740_try_fmt,
+ .enum_mbus_fmt = ov9740_enum_fmt,
+ .cropcap = ov9740_cropcap,
+ .g_crop = ov9740_g_crop,
+};
+
static struct v4l2_subdev_core_ops ov9740_core_ops = {
.g_ctrl = ov9740_g_ctrl,
.s_ctrl = ov9740_s_ctrl,
.g_chip_ident = ov9740_g_chip_ident,
+ .s_power = ov9740_s_power,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov9740_get_register,
.s_register = ov9740_set_register,
#endif
-
-};
-
-static struct v4l2_subdev_video_ops ov9740_video_ops = {
- .s_stream = ov9740_s_stream,
- .s_mbus_fmt = ov9740_s_fmt,
- .try_mbus_fmt = ov9740_try_fmt,
- .enum_mbus_fmt = ov9740_enum_fmt,
- .cropcap = ov9740_cropcap,
- .g_crop = ov9740_g_crop,
};
static struct v4l2_subdev_ops ov9740_subdev_ops = {
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index 7551907f8c2..e753b5e4d2c 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -28,7 +28,6 @@
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <asm/io.h>
@@ -39,7 +38,7 @@
#include <media/v4l2-device.h>
MODULE_LICENSE("GPL");
-
+MODULE_VERSION("0.0.4");
#define MOTOROLA 1
#define PHILIPS2 2 /* SAA7191 */
@@ -678,7 +677,6 @@ static int pms_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, dev->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, "Mediavision PMS", sizeof(vcap->card));
strlcpy(vcap->bus_info, "ISA", sizeof(vcap->bus_info));
- vcap->version = KERNEL_VERSION(0, 0, 3);
vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 9d0dd08f57f..e98d3821279 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -3046,6 +3046,8 @@ static void pvr2_subdev_update(struct pvr2_hdw *hdw)
if (hdw->input_dirty || hdw->audiomode_dirty || hdw->force_dirty) {
struct v4l2_tuner vt;
memset(&vt, 0, sizeof(vt));
+ vt.type = (hdw->input_val == PVR2_CVAL_INPUT_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
vt.audmode = hdw->audiomode_val;
v4l2_device_call_all(&hdw->v4l2_dev, 0, tuner, s_tuner, &vt);
}
@@ -5171,6 +5173,8 @@ void pvr2_hdw_status_poll(struct pvr2_hdw *hdw)
{
struct v4l2_tuner *vtp = &hdw->tuner_signal_info;
memset(vtp, 0, sizeof(*vtp));
+ vtp->type = (hdw->input_val == PVR2_CVAL_INPUT_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
hdw->tuner_signal_stale = 0;
/* Note: There apparently is no replacement for VIDIOC_CROPCAP
using v4l2-subdev - therefore we can't support that AT ALL right
diff --git a/drivers/media/video/pvrusb2/pvrusb2-main.c b/drivers/media/video/pvrusb2/pvrusb2-main.c
index 2254194aad5..c1d9bb61cd7 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-main.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-main.c
@@ -168,6 +168,7 @@ module_exit(pvr_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.9.1");
/*
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 38761142a4d..e27f8ab7696 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -91,7 +91,7 @@ static struct v4l2_capability pvr_capability ={
.driver = "pvrusb2",
.card = "Hauppauge WinTV pvr-usb2",
.bus_info = "usb",
- .version = KERNEL_VERSION(0, 9, 0),
+ .version = LINUX_VERSION_CODE,
.capabilities = (V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
V4L2_CAP_READWRITE),
@@ -369,11 +369,6 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
break;
}
- case VIDIOC_S_AUDIO:
- {
- ret = -EINVAL;
- break;
- }
case VIDIOC_G_TUNER:
{
struct v4l2_tuner *vt = (struct v4l2_tuner *)arg;
@@ -850,7 +845,7 @@ static long pvr2_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
#endif
default :
- ret = -EINVAL;
+ ret = -ENOTTY;
break;
}
diff --git a/drivers/media/video/pwc/Kconfig b/drivers/media/video/pwc/Kconfig
index 8da42e4f1ba..d63d0a85003 100644
--- a/drivers/media/video/pwc/Kconfig
+++ b/drivers/media/video/pwc/Kconfig
@@ -1,6 +1,7 @@
config USB_PWC
tristate "USB Philips Cameras"
depends on VIDEO_V4L2
+ select VIDEOBUF2_VMALLOC
---help---
Say Y or M here if you want to use one of these Philips & OEM
webcams:
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 1593f8deb81..3977addf3ba 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -3,6 +3,7 @@
video modes.
(C) 1999-2003 Nemosoft Unv.
(C) 2004-2006 Luc Saillard (luc@saillard.org)
+ (C) 2011 Hans de Goede <hdegoede@redhat.com>
NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
driver and thus may have bugs that are not present in the original version.
@@ -43,61 +44,12 @@
#include <asm/errno.h>
#include "pwc.h"
-#include "pwc-uncompress.h"
#include "pwc-kiara.h"
#include "pwc-timon.h"
#include "pwc-dec1.h"
#include "pwc-dec23.h"
-/* Request types: video */
-#define SET_LUM_CTL 0x01
-#define GET_LUM_CTL 0x02
-#define SET_CHROM_CTL 0x03
-#define GET_CHROM_CTL 0x04
-#define SET_STATUS_CTL 0x05
-#define GET_STATUS_CTL 0x06
-#define SET_EP_STREAM_CTL 0x07
-#define GET_EP_STREAM_CTL 0x08
-#define GET_XX_CTL 0x09
-#define SET_XX_CTL 0x0A
-#define GET_XY_CTL 0x0B
-#define SET_XY_CTL 0x0C
-#define SET_MPT_CTL 0x0D
-#define GET_MPT_CTL 0x0E
-
-/* Selectors for the Luminance controls [GS]ET_LUM_CTL */
-#define AGC_MODE_FORMATTER 0x2000
-#define PRESET_AGC_FORMATTER 0x2100
-#define SHUTTER_MODE_FORMATTER 0x2200
-#define PRESET_SHUTTER_FORMATTER 0x2300
-#define PRESET_CONTOUR_FORMATTER 0x2400
-#define AUTO_CONTOUR_FORMATTER 0x2500
-#define BACK_LIGHT_COMPENSATION_FORMATTER 0x2600
-#define CONTRAST_FORMATTER 0x2700
-#define DYNAMIC_NOISE_CONTROL_FORMATTER 0x2800
-#define FLICKERLESS_MODE_FORMATTER 0x2900
-#define AE_CONTROL_SPEED 0x2A00
-#define BRIGHTNESS_FORMATTER 0x2B00
-#define GAMMA_FORMATTER 0x2C00
-
-/* Selectors for the Chrominance controls [GS]ET_CHROM_CTL */
-#define WB_MODE_FORMATTER 0x1000
-#define AWB_CONTROL_SPEED_FORMATTER 0x1100
-#define AWB_CONTROL_DELAY_FORMATTER 0x1200
-#define PRESET_MANUAL_RED_GAIN_FORMATTER 0x1300
-#define PRESET_MANUAL_BLUE_GAIN_FORMATTER 0x1400
-#define COLOUR_MODE_FORMATTER 0x1500
-#define SATURATION_MODE_FORMATTER1 0x1600
-#define SATURATION_MODE_FORMATTER2 0x1700
-
-/* Selectors for the Status controls [GS]ET_STATUS_CTL */
-#define SAVE_USER_DEFAULTS_FORMATTER 0x0200
-#define RESTORE_USER_DEFAULTS_FORMATTER 0x0300
-#define RESTORE_FACTORY_DEFAULTS_FORMATTER 0x0400
-#define READ_AGC_FORMATTER 0x0500
-#define READ_SHUTTER_FORMATTER 0x0600
-#define READ_RED_GAIN_FORMATTER 0x0700
-#define READ_BLUE_GAIN_FORMATTER 0x0800
+/* Selectors for status controls used only in this file */
#define GET_STATUS_B00 0x0B00
#define SENSOR_TYPE_FORMATTER1 0x0C00
#define GET_STATUS_3000 0x3000
@@ -116,11 +68,6 @@
/* Formatters for the Video Endpoint controls [GS]ET_EP_STREAM_CTL */
#define VIDEO_OUTPUT_CONTROL_FORMATTER 0x0100
-/* Formatters for the motorized pan & tilt [GS]ET_MPT_CTL */
-#define PT_RELATIVE_CONTROL_FORMATTER 0x01
-#define PT_RESET_CONTROL_FORMATTER 0x02
-#define PT_STATUS_FORMATTER 0x03
-
static const char *size2name[PSZ_MAX] =
{
"subQCIF",
@@ -160,7 +107,7 @@ static void pwc_set_image_buffer_size(struct pwc_device *pdev);
/****************************************************************************/
static int _send_control_msg(struct pwc_device *pdev,
- u8 request, u16 value, int index, void *buf, int buflen, int timeout)
+ u8 request, u16 value, int index, void *buf, int buflen)
{
int rc;
void *kbuf = NULL;
@@ -177,7 +124,7 @@ static int _send_control_msg(struct pwc_device *pdev,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value,
index,
- kbuf, buflen, timeout);
+ kbuf, buflen, USB_CTRL_SET_TIMEOUT);
kfree(kbuf);
return rc;
@@ -197,9 +144,13 @@ static int recv_control_msg(struct pwc_device *pdev,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value,
pdev->vcinterface,
- kbuf, buflen, 500);
+ kbuf, buflen, USB_CTRL_GET_TIMEOUT);
memcpy(buf, kbuf, buflen);
kfree(kbuf);
+
+ if (rc < 0)
+ PWC_ERROR("recv_control_msg error %d req %02x val %04x\n",
+ rc, request, value);
return rc;
}
@@ -210,18 +161,16 @@ static inline int send_video_command(struct pwc_device *pdev,
SET_EP_STREAM_CTL,
VIDEO_OUTPUT_CONTROL_FORMATTER,
index,
- buf, buflen, 1000);
+ buf, buflen);
}
-static inline int send_control_msg(struct pwc_device *pdev,
+int send_control_msg(struct pwc_device *pdev,
u8 request, u16 value, void *buf, int buflen)
{
return _send_control_msg(pdev,
- request, value, pdev->vcinterface, buf, buflen, 500);
+ request, value, pdev->vcinterface, buf, buflen);
}
-
-
static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
{
unsigned char buf[3];
@@ -261,8 +210,11 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
return ret;
}
- if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
- pwc_dec1_init(pdev->type, pdev->release, buf, pdev->decompress_data);
+ if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
+ ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf);
+ if (ret < 0)
+ return ret;
+ }
pdev->cmd_len = 3;
memcpy(pdev->cmd_buf, buf, 3);
@@ -321,8 +273,11 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, i
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
- pwc_dec23_init(pdev, pdev->type, buf);
+ if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
+ ret = pwc_dec23_init(pdev, pdev->type, buf);
+ if (ret < 0)
+ return ret;
+ }
pdev->cmd_len = 13;
memcpy(pdev->cmd_buf, buf, 13);
@@ -394,8 +349,11 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, i
if (ret < 0)
return ret;
- if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
- pwc_dec23_init(pdev, pdev->type, buf);
+ if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
+ ret = pwc_dec23_init(pdev, pdev->type, buf);
+ if (ret < 0)
+ return ret;
+ }
pdev->cmd_len = 12;
memcpy(pdev->cmd_buf, buf, 12);
@@ -452,6 +410,7 @@ int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frame
}
pdev->view.x = width;
pdev->view.y = height;
+ pdev->vcompression = compression;
pdev->frame_total_size = pdev->frame_size + pdev->frame_header_size + pdev->frame_trailer_size;
pwc_set_image_buffer_size(pdev);
PWC_DEBUG_SIZE("Set viewport to %dx%d, image size is %dx%d.\n", width, height, pwc_image_sizes[size].x, pwc_image_sizes[size].y);
@@ -511,13 +470,9 @@ unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned i
return ret;
}
-#define BLACK_Y 0
-#define BLACK_U 128
-#define BLACK_V 128
-
static void pwc_set_image_buffer_size(struct pwc_device *pdev)
{
- int i, factor = 0;
+ int factor = 0;
/* for V4L2_PIX_FMT_YUV420 */
switch (pdev->pixfmt) {
@@ -541,442 +496,108 @@ static void pwc_set_image_buffer_size(struct pwc_device *pdev)
*/
pdev->offset.x = ((pdev->view.x - pdev->image.x) / 2) & 0xFFFC;
pdev->offset.y = ((pdev->view.y - pdev->image.y) / 2) & 0xFFFE;
-
- /* Fill buffers with black colors */
- for (i = 0; i < pwc_mbufs; i++) {
- unsigned char *p = pdev->image_data + pdev->images[i].offset;
- memset(p, BLACK_Y, pdev->view.x * pdev->view.y);
- p += pdev->view.x * pdev->view.y;
- memset(p, BLACK_U, pdev->view.x * pdev->view.y/4);
- p += pdev->view.x * pdev->view.y/4;
- memset(p, BLACK_V, pdev->view.x * pdev->view.y/4);
- }
}
-
-
-/* BRIGHTNESS */
-
-int pwc_get_brightness(struct pwc_device *pdev)
+int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
- char buf;
int ret;
+ u8 buf;
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, BRIGHTNESS_FORMATTER, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
if (ret < 0)
return ret;
- return buf;
-}
-int pwc_set_brightness(struct pwc_device *pdev, int value)
-{
- char buf;
-
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- buf = (value >> 9) & 0x7f;
- return send_control_msg(pdev,
- SET_LUM_CTL, BRIGHTNESS_FORMATTER, &buf, sizeof(buf));
+ *data = buf;
+ return 0;
}
-/* CONTRAST */
-
-int pwc_get_contrast(struct pwc_device *pdev)
+int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
{
- char buf;
int ret;
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, CONTRAST_FORMATTER, &buf, sizeof(buf));
+ ret = send_control_msg(pdev, request, value, &data, sizeof(data));
if (ret < 0)
return ret;
- return buf;
-}
-int pwc_set_contrast(struct pwc_device *pdev, int value)
-{
- char buf;
-
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- buf = (value >> 10) & 0x3f;
- return send_control_msg(pdev,
- SET_LUM_CTL, CONTRAST_FORMATTER, &buf, sizeof(buf));
+ return 0;
}
-/* GAMMA */
-
-int pwc_get_gamma(struct pwc_device *pdev)
+int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
- char buf;
int ret;
+ s8 buf;
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, GAMMA_FORMATTER, &buf, sizeof(buf));
+ ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
if (ret < 0)
return ret;
- return buf;
-}
-
-int pwc_set_gamma(struct pwc_device *pdev, int value)
-{
- char buf;
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- buf = (value >> 11) & 0x1f;
- return send_control_msg(pdev,
- SET_LUM_CTL, GAMMA_FORMATTER, &buf, sizeof(buf));
-}
-
-
-/* SATURATION */
-
-/* return a value between [-100 , 100] */
-int pwc_get_saturation(struct pwc_device *pdev, int *value)
-{
- char buf;
- int ret, saturation_register;
-
- if (pdev->type < 675)
- return -EINVAL;
- if (pdev->type < 730)
- saturation_register = SATURATION_MODE_FORMATTER2;
- else
- saturation_register = SATURATION_MODE_FORMATTER1;
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, saturation_register, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = (signed)buf;
+ *data = buf;
return 0;
}
-/* @param value saturation color between [-100 , 100] */
-int pwc_set_saturation(struct pwc_device *pdev, int value)
+int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
{
- char buf;
- int saturation_register;
-
- if (pdev->type < 675)
- return -EINVAL;
- if (value < -100)
- value = -100;
- if (value > 100)
- value = 100;
- if (pdev->type < 730)
- saturation_register = SATURATION_MODE_FORMATTER2;
- else
- saturation_register = SATURATION_MODE_FORMATTER1;
- return send_control_msg(pdev,
- SET_CHROM_CTL, saturation_register, &buf, sizeof(buf));
-}
-
-/* AGC */
-
-int pwc_set_agc(struct pwc_device *pdev, int mode, int value)
-{
- char buf;
int ret;
+ u8 buf[2];
- if (mode)
- buf = 0x0; /* auto */
- else
- buf = 0xff; /* fixed */
-
- ret = send_control_msg(pdev,
- SET_LUM_CTL, AGC_MODE_FORMATTER, &buf, sizeof(buf));
-
- if (!mode && ret >= 0) {
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- buf = (value >> 10) & 0x3F;
- ret = send_control_msg(pdev,
- SET_LUM_CTL, PRESET_AGC_FORMATTER, &buf, sizeof(buf));
- }
+ ret = recv_control_msg(pdev, request, value, buf, sizeof(buf));
if (ret < 0)
return ret;
+
+ *data = (buf[1] << 8) | buf[0];
return 0;
}
-int pwc_get_agc(struct pwc_device *pdev, int *value)
+int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data)
{
- unsigned char buf;
int ret;
+ u8 buf[2];
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, AGC_MODE_FORMATTER, &buf, sizeof(buf));
+ buf[0] = data & 0xff;
+ buf[1] = data >> 8;
+ ret = send_control_msg(pdev, request, value, buf, sizeof(buf));
if (ret < 0)
return ret;
- if (buf != 0) { /* fixed */
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, PRESET_AGC_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- if (buf > 0x3F)
- buf = 0x3F;
- *value = (buf << 10);
- }
- else { /* auto */
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, READ_AGC_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- /* Gah... this value ranges from 0x00 ... 0x9F */
- if (buf > 0x9F)
- buf = 0x9F;
- *value = -(48 + buf * 409);
- }
-
return 0;
}
-int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value)
-{
- char buf[2];
- int speed, ret;
-
-
- if (mode)
- buf[0] = 0x0; /* auto */
- else
- buf[0] = 0xff; /* fixed */
-
- ret = send_control_msg(pdev,
- SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1);
-
- if (!mode && ret >= 0) {
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
-
- if (DEVICE_USE_CODEC2(pdev->type)) {
- /* speed ranges from 0x0 to 0x290 (656) */
- speed = (value / 100);
- buf[1] = speed >> 8;
- buf[0] = speed & 0xff;
- } else if (DEVICE_USE_CODEC3(pdev->type)) {
- /* speed seems to range from 0x0 to 0xff */
- buf[1] = 0;
- buf[0] = value >> 8;
- }
-
- ret = send_control_msg(pdev,
- SET_LUM_CTL, PRESET_SHUTTER_FORMATTER,
- &buf, sizeof(buf));
- }
- return ret;
-}
-
-/* This function is not exported to v4l1, so output values between 0 -> 256 */
-int pwc_get_shutter_speed(struct pwc_device *pdev, int *value)
+int pwc_button_ctrl(struct pwc_device *pdev, u16 value)
{
- unsigned char buf[2];
int ret;
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, READ_SHUTTER_FORMATTER, &buf, sizeof(buf));
+ ret = send_control_msg(pdev, SET_STATUS_CTL, value, NULL, 0);
if (ret < 0)
return ret;
- *value = buf[0] + (buf[1] << 8);
- if (DEVICE_USE_CODEC2(pdev->type)) {
- /* speed ranges from 0x0 to 0x290 (656) */
- *value *= 256/656;
- } else if (DEVICE_USE_CODEC3(pdev->type)) {
- /* speed seems to range from 0x0 to 0xff */
- }
+
return 0;
}
-
/* POWER */
-
-int pwc_camera_power(struct pwc_device *pdev, int power)
+void pwc_camera_power(struct pwc_device *pdev, int power)
{
char buf;
+ int r;
+
+ if (!pdev->power_save)
+ return;
if (pdev->type < 675 || (pdev->type < 730 && pdev->release < 6))
- return 0; /* Not supported by Nala or Timon < release 6 */
+ return; /* Not supported by Nala or Timon < release 6 */
if (power)
buf = 0x00; /* active */
else
buf = 0xFF; /* power save */
- return send_control_msg(pdev,
+ r = send_control_msg(pdev,
SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER,
&buf, sizeof(buf));
-}
-
-
-
-/* private calls */
-
-int pwc_restore_user(struct pwc_device *pdev)
-{
- return send_control_msg(pdev,
- SET_STATUS_CTL, RESTORE_USER_DEFAULTS_FORMATTER, NULL, 0);
-}
-
-int pwc_save_user(struct pwc_device *pdev)
-{
- return send_control_msg(pdev,
- SET_STATUS_CTL, SAVE_USER_DEFAULTS_FORMATTER, NULL, 0);
-}
-
-int pwc_restore_factory(struct pwc_device *pdev)
-{
- return send_control_msg(pdev,
- SET_STATUS_CTL, RESTORE_FACTORY_DEFAULTS_FORMATTER, NULL, 0);
-}
-
- /* ************************************************* */
- /* Patch by Alvarado: (not in the original version */
-
- /*
- * the camera recognizes modes from 0 to 4:
- *
- * 00: indoor (incandescant lighting)
- * 01: outdoor (sunlight)
- * 02: fluorescent lighting
- * 03: manual
- * 04: auto
- */
-int pwc_set_awb(struct pwc_device *pdev, int mode)
-{
- char buf;
- int ret;
-
- if (mode < 0)
- mode = 0;
-
- if (mode > 4)
- mode = 4;
-
- buf = mode & 0x07; /* just the lowest three bits */
-
- ret = send_control_msg(pdev,
- SET_CHROM_CTL, WB_MODE_FORMATTER, &buf, sizeof(buf));
-
- if (ret < 0)
- return ret;
- return 0;
-}
-
-int pwc_get_awb(struct pwc_device *pdev)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, WB_MODE_FORMATTER, &buf, sizeof(buf));
-
- if (ret < 0)
- return ret;
- return buf;
-}
-
-int pwc_set_red_gain(struct pwc_device *pdev, int value)
-{
- unsigned char buf;
-
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- /* only the msb is considered */
- buf = value >> 8;
- return send_control_msg(pdev,
- SET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER,
- &buf, sizeof(buf));
-}
-
-int pwc_get_red_gain(struct pwc_device *pdev, int *value)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER,
- &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = buf << 8;
- return 0;
-}
-
-
-int pwc_set_blue_gain(struct pwc_device *pdev, int value)
-{
- unsigned char buf;
-
- if (value < 0)
- value = 0;
- if (value > 0xffff)
- value = 0xffff;
- /* only the msb is considered */
- buf = value >> 8;
- return send_control_msg(pdev,
- SET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER,
- &buf, sizeof(buf));
-}
-
-int pwc_get_blue_gain(struct pwc_device *pdev, int *value)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER,
- &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = buf << 8;
- return 0;
-}
-
-/* The following two functions are different, since they only read the
- internal red/blue gains, which may be different from the manual
- gains set or read above.
- */
-static int pwc_read_red_gain(struct pwc_device *pdev, int *value)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, READ_RED_GAIN_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = buf << 8;
- return 0;
+ if (r < 0)
+ PWC_ERROR("Failed to power %s camera (%d)\n",
+ power ? "on" : "off", r);
}
-static int pwc_read_blue_gain(struct pwc_device *pdev, int *value)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_STATUS_CTL, READ_BLUE_GAIN_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *value = buf << 8;
- return 0;
-}
-
-
static int pwc_set_wb_speed(struct pwc_device *pdev, int speed)
{
unsigned char buf;
@@ -1028,6 +649,7 @@ static int pwc_get_wb_delay(struct pwc_device *pdev, int *value)
int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
{
unsigned char buf[2];
+ int r;
if (pdev->type < 730)
return 0;
@@ -1045,8 +667,12 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
buf[0] = on_value;
buf[1] = off_value;
- return send_control_msg(pdev,
+ r = send_control_msg(pdev,
SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
+ if (r < 0)
+ PWC_ERROR("Failed to set LED on/off time (%d)\n", r);
+
+ return r;
}
static int pwc_get_leds(struct pwc_device *pdev, int *on_value, int *off_value)
@@ -1069,164 +695,6 @@ static int pwc_get_leds(struct pwc_device *pdev, int *on_value, int *off_value)
return 0;
}
-int pwc_set_contour(struct pwc_device *pdev, int contour)
-{
- unsigned char buf;
- int ret;
-
- if (contour < 0)
- buf = 0xff; /* auto contour on */
- else
- buf = 0x0; /* auto contour off */
- ret = send_control_msg(pdev,
- SET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
-
- if (contour < 0)
- return 0;
- if (contour > 0xffff)
- contour = 0xffff;
-
- buf = (contour >> 10); /* contour preset is [0..3f] */
- ret = send_control_msg(pdev,
- SET_LUM_CTL, PRESET_CONTOUR_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- return 0;
-}
-
-int pwc_get_contour(struct pwc_device *pdev, int *contour)
-{
- unsigned char buf;
- int ret;
-
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
-
- if (buf == 0) {
- /* auto mode off, query current preset value */
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, PRESET_CONTOUR_FORMATTER,
- &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *contour = buf << 10;
- }
- else
- *contour = -1;
- return 0;
-}
-
-
-int pwc_set_backlight(struct pwc_device *pdev, int backlight)
-{
- unsigned char buf;
-
- if (backlight)
- buf = 0xff;
- else
- buf = 0x0;
- return send_control_msg(pdev,
- SET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER,
- &buf, sizeof(buf));
-}
-
-int pwc_get_backlight(struct pwc_device *pdev, int *backlight)
-{
- int ret;
- unsigned char buf;
-
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER,
- &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *backlight = !!buf;
- return 0;
-}
-
-int pwc_set_colour_mode(struct pwc_device *pdev, int colour)
-{
- unsigned char buf;
-
- if (colour)
- buf = 0xff;
- else
- buf = 0x0;
- return send_control_msg(pdev,
- SET_CHROM_CTL, COLOUR_MODE_FORMATTER, &buf, sizeof(buf));
-}
-
-int pwc_get_colour_mode(struct pwc_device *pdev, int *colour)
-{
- int ret;
- unsigned char buf;
-
- ret = recv_control_msg(pdev,
- GET_CHROM_CTL, COLOUR_MODE_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *colour = !!buf;
- return 0;
-}
-
-
-int pwc_set_flicker(struct pwc_device *pdev, int flicker)
-{
- unsigned char buf;
-
- if (flicker)
- buf = 0xff;
- else
- buf = 0x0;
- return send_control_msg(pdev,
- SET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, &buf, sizeof(buf));
-}
-
-int pwc_get_flicker(struct pwc_device *pdev, int *flicker)
-{
- int ret;
- unsigned char buf;
-
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *flicker = !!buf;
- return 0;
-}
-
-int pwc_set_dynamic_noise(struct pwc_device *pdev, int noise)
-{
- unsigned char buf;
-
- if (noise < 0)
- noise = 0;
- if (noise > 3)
- noise = 3;
- buf = noise;
- return send_control_msg(pdev,
- SET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER,
- &buf, sizeof(buf));
-}
-
-int pwc_get_dynamic_noise(struct pwc_device *pdev, int *noise)
-{
- int ret;
- unsigned char buf;
-
- ret = recv_control_msg(pdev,
- GET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER,
- &buf, sizeof(buf));
- if (ret < 0)
- return ret;
- *noise = buf;
- return 0;
-}
-
static int _pwc_mpt_reset(struct pwc_device *pdev, int flags)
{
unsigned char buf;
@@ -1309,7 +777,7 @@ static int pwc_mpt_get_status(struct pwc_device *pdev, struct pwc_mpt_status *st
return 0;
}
-
+#ifdef CONFIG_USB_PWC_DEBUG
int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
{
unsigned char buf;
@@ -1332,7 +800,7 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
*sensor = buf;
return 0;
}
-
+#endif
/* End of Add-Ons */
/* ************************************************* */
@@ -1356,37 +824,41 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
/* copy local variable to arg */
#define ARG_OUT(ARG_name) /* nothing */
+/*
+ * Our ctrls use native values, but the old custom pwc ioctl interface expects
+ * values from 0 - 65535, define 2 helper functions to scale things. */
+static int pwc_ioctl_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ return v4l2_ctrl_g_ctrl(ctrl) * 65535 / ctrl->maximum;
+}
+
+static int pwc_ioctl_s_ctrl(struct v4l2_ctrl *ctrl, int val)
+{
+ return v4l2_ctrl_s_ctrl(ctrl, val * ctrl->maximum / 65535);
+}
+
long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
{
long ret = 0;
switch(cmd) {
case VIDIOCPWCRUSER:
- {
- if (pwc_restore_user(pdev))
- ret = -EINVAL;
+ ret = pwc_button_ctrl(pdev, RESTORE_USER_DEFAULTS_FORMATTER);
break;
- }
case VIDIOCPWCSUSER:
- {
- if (pwc_save_user(pdev))
- ret = -EINVAL;
+ ret = pwc_button_ctrl(pdev, SAVE_USER_DEFAULTS_FORMATTER);
break;
- }
case VIDIOCPWCFACTORY:
- {
- if (pwc_restore_factory(pdev))
- ret = -EINVAL;
+ ret = pwc_button_ctrl(pdev, RESTORE_FACTORY_DEFAULTS_FORMATTER);
break;
- }
case VIDIOCPWCSCQUAL:
{
ARG_DEF(int, qual)
- if (pdev->iso_init) {
+ if (vb2_is_streaming(&pdev->vb_queue)) {
ret = -EBUSY;
break;
}
@@ -1396,8 +868,6 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
ret = -EINVAL;
else
ret = pwc_set_video_mode(pdev, pdev->view.x, pdev->view.y, pdev->vframes, ARGR(qual), pdev->vsnapshot);
- if (ret >= 0)
- pdev->vcompression = ARGR(qual);
break;
}
@@ -1414,7 +884,7 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
{
ARG_DEF(struct pwc_probe, probe)
- strcpy(ARGR(probe).name, pdev->vdev->name);
+ strcpy(ARGR(probe).name, pdev->vdev.name);
ARGR(probe).type = pdev->type;
ARG_OUT(probe)
break;
@@ -1432,71 +902,59 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
case VIDIOCPWCSAGC:
{
ARG_DEF(int, agc)
-
ARG_IN(agc)
- if (pwc_set_agc(pdev, ARGR(agc) < 0 ? 1 : 0, ARGR(agc)))
- ret = -EINVAL;
+ ret = v4l2_ctrl_s_ctrl(pdev->autogain, ARGR(agc) < 0);
+ if (ret == 0 && ARGR(agc) >= 0)
+ ret = pwc_ioctl_s_ctrl(pdev->gain, ARGR(agc));
break;
}
case VIDIOCPWCGAGC:
{
ARG_DEF(int, agc)
-
- if (pwc_get_agc(pdev, ARGA(agc)))
- ret = -EINVAL;
+ if (v4l2_ctrl_g_ctrl(pdev->autogain))
+ ARGR(agc) = -1;
+ else
+ ARGR(agc) = pwc_ioctl_g_ctrl(pdev->gain);
ARG_OUT(agc)
break;
}
case VIDIOCPWCSSHUTTER:
{
- ARG_DEF(int, shutter_speed)
-
- ARG_IN(shutter_speed)
- ret = pwc_set_shutter_speed(pdev, ARGR(shutter_speed) < 0 ? 1 : 0, ARGR(shutter_speed));
+ ARG_DEF(int, shutter)
+ ARG_IN(shutter)
+ ret = v4l2_ctrl_s_ctrl(pdev->exposure_auto,
+ /* Menu idx 0 = auto, idx 1 = manual */
+ ARGR(shutter) >= 0);
+ if (ret == 0 && ARGR(shutter) >= 0)
+ ret = pwc_ioctl_s_ctrl(pdev->exposure, ARGR(shutter));
break;
}
case VIDIOCPWCSAWB:
{
ARG_DEF(struct pwc_whitebalance, wb)
-
ARG_IN(wb)
- ret = pwc_set_awb(pdev, ARGR(wb).mode);
- if (ret >= 0 && ARGR(wb).mode == PWC_WB_MANUAL) {
- pwc_set_red_gain(pdev, ARGR(wb).manual_red);
- pwc_set_blue_gain(pdev, ARGR(wb).manual_blue);
- }
+ ret = v4l2_ctrl_s_ctrl(pdev->auto_white_balance,
+ ARGR(wb).mode);
+ if (ret == 0 && ARGR(wb).mode == PWC_WB_MANUAL)
+ ret = pwc_ioctl_s_ctrl(pdev->red_balance,
+ ARGR(wb).manual_red);
+ if (ret == 0 && ARGR(wb).mode == PWC_WB_MANUAL)
+ ret = pwc_ioctl_s_ctrl(pdev->blue_balance,
+ ARGR(wb).manual_blue);
break;
}
case VIDIOCPWCGAWB:
{
ARG_DEF(struct pwc_whitebalance, wb)
-
- memset(ARGA(wb), 0, sizeof(struct pwc_whitebalance));
- ARGR(wb).mode = pwc_get_awb(pdev);
- if (ARGR(wb).mode < 0)
- ret = -EINVAL;
- else {
- if (ARGR(wb).mode == PWC_WB_MANUAL) {
- ret = pwc_get_red_gain(pdev, &ARGR(wb).manual_red);
- if (ret < 0)
- break;
- ret = pwc_get_blue_gain(pdev, &ARGR(wb).manual_blue);
- if (ret < 0)
- break;
- }
- if (ARGR(wb).mode == PWC_WB_AUTO) {
- ret = pwc_read_red_gain(pdev, &ARGR(wb).read_red);
- if (ret < 0)
- break;
- ret = pwc_read_blue_gain(pdev, &ARGR(wb).read_blue);
- if (ret < 0)
- break;
- }
- }
+ ARGR(wb).mode = v4l2_ctrl_g_ctrl(pdev->auto_white_balance);
+ ARGR(wb).manual_red = ARGR(wb).read_red =
+ pwc_ioctl_g_ctrl(pdev->red_balance);
+ ARGR(wb).manual_blue = ARGR(wb).read_blue =
+ pwc_ioctl_g_ctrl(pdev->blue_balance);
ARG_OUT(wb)
break;
}
@@ -1550,17 +1008,20 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
case VIDIOCPWCSCONTOUR:
{
ARG_DEF(int, contour)
-
ARG_IN(contour)
- ret = pwc_set_contour(pdev, ARGR(contour));
+ ret = v4l2_ctrl_s_ctrl(pdev->autocontour, ARGR(contour) < 0);
+ if (ret == 0 && ARGR(contour) >= 0)
+ ret = pwc_ioctl_s_ctrl(pdev->contour, ARGR(contour));
break;
}
case VIDIOCPWCGCONTOUR:
{
ARG_DEF(int, contour)
-
- ret = pwc_get_contour(pdev, ARGA(contour));
+ if (v4l2_ctrl_g_ctrl(pdev->autocontour))
+ ARGR(contour) = -1;
+ else
+ ARGR(contour) = pwc_ioctl_g_ctrl(pdev->contour);
ARG_OUT(contour)
break;
}
@@ -1568,17 +1029,15 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
case VIDIOCPWCSBACKLIGHT:
{
ARG_DEF(int, backlight)
-
ARG_IN(backlight)
- ret = pwc_set_backlight(pdev, ARGR(backlight));
+ ret = v4l2_ctrl_s_ctrl(pdev->backlight, ARGR(backlight));
break;
}
case VIDIOCPWCGBACKLIGHT:
{
ARG_DEF(int, backlight)
-
- ret = pwc_get_backlight(pdev, ARGA(backlight));
+ ARGR(backlight) = v4l2_ctrl_g_ctrl(pdev->backlight);
ARG_OUT(backlight)
break;
}
@@ -1586,17 +1045,15 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
case VIDIOCPWCSFLICKER:
{
ARG_DEF(int, flicker)
-
ARG_IN(flicker)
- ret = pwc_set_flicker(pdev, ARGR(flicker));
+ ret = v4l2_ctrl_s_ctrl(pdev->flicker, ARGR(flicker));
break;
}
case VIDIOCPWCGFLICKER:
{
ARG_DEF(int, flicker)
-
- ret = pwc_get_flicker(pdev, ARGA(flicker));
+ ARGR(flicker) = v4l2_ctrl_g_ctrl(pdev->flicker);
ARG_OUT(flicker)
break;
}
@@ -1604,17 +1061,15 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
case VIDIOCPWCSDYNNOISE:
{
ARG_DEF(int, dynnoise)
-
ARG_IN(dynnoise)
- ret = pwc_set_dynamic_noise(pdev, ARGR(dynnoise));
+ ret = v4l2_ctrl_s_ctrl(pdev->noise_reduction, ARGR(dynnoise));
break;
}
case VIDIOCPWCGDYNNOISE:
{
ARG_DEF(int, dynnoise)
-
- ret = pwc_get_dynamic_noise(pdev, ARGA(dynnoise));
+ ARGR(dynnoise) = v4l2_ctrl_g_ctrl(pdev->noise_reduction);
ARG_OUT(dynnoise);
break;
}
diff --git a/drivers/media/video/pwc/pwc-dec1.c b/drivers/media/video/pwc/pwc-dec1.c
index c29593f589e..be0e02cb487 100644
--- a/drivers/media/video/pwc/pwc-dec1.c
+++ b/drivers/media/video/pwc/pwc-dec1.c
@@ -22,29 +22,19 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
-
-
#include "pwc-dec1.h"
-
-void pwc_dec1_init(int type, int release, void *buffer, void *table)
+int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer)
{
+ struct pwc_dec1_private *pdec;
-}
-
-void pwc_dec1_exit(void)
-{
+ if (pwc->decompress_data == NULL) {
+ pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
+ if (pdec == NULL)
+ return -ENOMEM;
+ pwc->decompress_data = pdec;
+ }
+ pdec = pwc->decompress_data;
-
-
-}
-
-int pwc_dec1_alloc(struct pwc_device *pwc)
-{
- pwc->decompress_data = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
- if (pwc->decompress_data == NULL)
- return -ENOMEM;
return 0;
}
-
diff --git a/drivers/media/video/pwc/pwc-dec1.h b/drivers/media/video/pwc/pwc-dec1.h
index 8b62ddcc5c7..a57d8601080 100644
--- a/drivers/media/video/pwc/pwc-dec1.h
+++ b/drivers/media/video/pwc/pwc-dec1.h
@@ -22,8 +22,6 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
-
#ifndef PWC_DEC1_H
#define PWC_DEC1_H
@@ -32,12 +30,8 @@
struct pwc_dec1_private
{
int version;
-
};
-int pwc_dec1_alloc(struct pwc_device *pwc);
-void pwc_dec1_init(int type, int release, void *buffer, void *private_data);
-void pwc_dec1_exit(void);
+int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer);
#endif
-
diff --git a/drivers/media/video/pwc/pwc-dec23.c b/drivers/media/video/pwc/pwc-dec23.c
index 0c801b8f3ec..06a4e877ba4 100644
--- a/drivers/media/video/pwc/pwc-dec23.c
+++ b/drivers/media/video/pwc/pwc-dec23.c
@@ -916,27 +916,5 @@ void pwc_dec23_decompress(const struct pwc_device *pwc,
pout_planar_v += pwc->view.x;
}
-
}
-
}
-
-void pwc_dec23_exit(void)
-{
- /* Do nothing */
-
-}
-
-/**
- * Allocate a private structure used by lookup table.
- * You must call kfree() to free the memory allocated.
- */
-int pwc_dec23_alloc(struct pwc_device *pwc)
-{
- pwc->decompress_data = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
- if (pwc->decompress_data == NULL)
- return -ENOMEM;
- return 0;
-}
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pwc/pwc-dec23.h b/drivers/media/video/pwc/pwc-dec23.h
index 1c55298ad15..a0ac4f3dff8 100644
--- a/drivers/media/video/pwc/pwc-dec23.h
+++ b/drivers/media/video/pwc/pwc-dec23.h
@@ -49,19 +49,9 @@ struct pwc_dec23_private
};
-
-int pwc_dec23_alloc(struct pwc_device *pwc);
int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd);
-void pwc_dec23_exit(void);
void pwc_dec23_decompress(const struct pwc_device *pwc,
const void *src,
void *dst,
int flags);
-
-
-
#endif
-
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
-
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 356cd42b593..51ca3589b1b 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -2,6 +2,7 @@
USB and Video4Linux interface part.
(C) 1999-2004 Nemosoft Unv.
(C) 2004-2006 Luc Saillard (luc@saillard.org)
+ (C) 2011 Hans de Goede <hdegoede@redhat.com>
NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
driver and thus may have bugs that are not present in the original version.
@@ -40,7 +41,7 @@
Oh yes, convention: to disctinguish between all the various pointers to
device-structures, I use these names for the pointer variables:
udev: struct usb_device *
- vdev: struct video_device *
+ vdev: struct video_device (member of pwc_dev)
pdev: struct pwc_devive *
*/
@@ -74,7 +75,6 @@
#include "pwc-timon.h"
#include "pwc-dec23.h"
#include "pwc-dec1.h"
-#include "pwc-uncompress.h"
/* Function prototypes and driver templates */
@@ -116,6 +116,7 @@ MODULE_DEVICE_TABLE(usb, pwc_device_table);
static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id *id);
static void usb_pwc_disconnect(struct usb_interface *intf);
+static void pwc_isoc_cleanup(struct pwc_device *pdev);
static struct usb_driver pwc_driver = {
.name = "Philips webcam", /* name */
@@ -127,14 +128,11 @@ static struct usb_driver pwc_driver = {
#define MAX_DEV_HINTS 20
#define MAX_ISOC_ERRORS 20
-static int default_size = PSZ_QCIF;
static int default_fps = 10;
-static int default_fbufs = 3; /* Default number of frame buffers */
- int pwc_mbufs = 2; /* Default number of mmap() buffers */
#ifdef CONFIG_USB_PWC_DEBUG
int pwc_trace = PWC_DEBUG_LEVEL;
#endif
-static int power_save;
+static int power_save = -1;
static int led_on = 100, led_off; /* defaults to LED that is on while in use */
static int pwc_preferred_compression = 1; /* 0..3 = uncompressed..high */
static struct {
@@ -152,6 +150,7 @@ static ssize_t pwc_video_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
static unsigned int pwc_video_poll(struct file *file, poll_table *wait);
static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma);
+static void pwc_video_release(struct video_device *vfd);
static const struct v4l2_file_operations pwc_fops = {
.owner = THIS_MODULE,
@@ -164,427 +163,28 @@ static const struct v4l2_file_operations pwc_fops = {
};
static struct video_device pwc_template = {
.name = "Philips Webcam", /* Filled in later */
- .release = video_device_release,
+ .release = pwc_video_release,
.fops = &pwc_fops,
+ .ioctl_ops = &pwc_ioctl_ops,
};
/***************************************************************************/
-
-/* Okay, this is some magic that I worked out and the reasoning behind it...
-
- The biggest problem with any USB device is of course: "what to do
- when the user unplugs the device while it is in use by an application?"
- We have several options:
- 1) Curse them with the 7 plagues when they do (requires divine intervention)
- 2) Tell them not to (won't work: they'll do it anyway)
- 3) Oops the kernel (this will have a negative effect on a user's uptime)
- 4) Do something sensible.
-
- Of course, we go for option 4.
-
- It happens that this device will be linked to two times, once from
- usb_device and once from the video_device in their respective 'private'
- pointers. This is done when the device is probed() and all initialization
- succeeded. The pwc_device struct links back to both structures.
-
- When a device is unplugged while in use it will be removed from the
- list of known USB devices; I also de-register it as a V4L device, but
- unfortunately I can't free the memory since the struct is still in use
- by the file descriptor. This free-ing is then deferend until the first
- opportunity. Crude, but it works.
-
- A small 'advantage' is that if a user unplugs the cam and plugs it back
- in, it should get assigned the same video device minor, but unfortunately
- it's non-trivial to re-link the cam back to the video device... (that
- would surely be magic! :))
-*/
-
-/***************************************************************************/
/* Private functions */
-/* Here we want the physical address of the memory.
- * This is used when initializing the contents of the area.
- */
-
-
-
-static void *pwc_rvmalloc(unsigned long size)
-{
- void * mem;
- unsigned long adr;
-
- mem=vmalloc_32(size);
- if (!mem)
- return NULL;
-
- memset(mem, 0, size); /* Clear the ram out, no junk to the user */
- adr=(unsigned long) mem;
- while (size > 0)
- {
- SetPageReserved(vmalloc_to_page((void *)adr));
- adr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- return mem;
-}
-
-static void pwc_rvfree(void * mem, unsigned long size)
-{
- unsigned long adr;
-
- if (!mem)
- return;
-
- adr=(unsigned long) mem;
- while ((long) size > 0)
- {
- ClearPageReserved(vmalloc_to_page((void *)adr));
- adr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- vfree(mem);
-}
-
-
-
-
-static int pwc_allocate_buffers(struct pwc_device *pdev)
-{
- int i, err;
- void *kbuf;
-
- PWC_DEBUG_MEMORY(">> pwc_allocate_buffers(pdev = 0x%p)\n", pdev);
-
- if (pdev == NULL)
- return -ENXIO;
-
- /* Allocate Isochronuous pipe buffers */
- for (i = 0; i < MAX_ISO_BUFS; i++) {
- if (pdev->sbuf[i].data == NULL) {
- kbuf = kzalloc(ISO_BUFFER_SIZE, GFP_KERNEL);
- if (kbuf == NULL) {
- PWC_ERROR("Failed to allocate iso buffer %d.\n", i);
- return -ENOMEM;
- }
- PWC_DEBUG_MEMORY("Allocated iso buffer at %p.\n", kbuf);
- pdev->sbuf[i].data = kbuf;
- }
- }
-
- /* Allocate frame buffer structure */
- if (pdev->fbuf == NULL) {
- kbuf = kzalloc(default_fbufs * sizeof(struct pwc_frame_buf), GFP_KERNEL);
- if (kbuf == NULL) {
- PWC_ERROR("Failed to allocate frame buffer structure.\n");
- return -ENOMEM;
- }
- PWC_DEBUG_MEMORY("Allocated frame buffer structure at %p.\n", kbuf);
- pdev->fbuf = kbuf;
- }
-
- /* create frame buffers, and make circular ring */
- for (i = 0; i < default_fbufs; i++) {
- if (pdev->fbuf[i].data == NULL) {
- kbuf = vzalloc(PWC_FRAME_SIZE); /* need vmalloc since frame buffer > 128K */
- if (kbuf == NULL) {
- PWC_ERROR("Failed to allocate frame buffer %d.\n", i);
- return -ENOMEM;
- }
- PWC_DEBUG_MEMORY("Allocated frame buffer %d at %p.\n", i, kbuf);
- pdev->fbuf[i].data = kbuf;
- }
- }
-
- /* Allocate decompressor table space */
- if (DEVICE_USE_CODEC1(pdev->type))
- err = pwc_dec1_alloc(pdev);
- else
- err = pwc_dec23_alloc(pdev);
-
- if (err) {
- PWC_ERROR("Failed to allocate decompress table.\n");
- return err;
- }
-
- /* Allocate image buffer; double buffer for mmap() */
- kbuf = pwc_rvmalloc(pwc_mbufs * pdev->len_per_image);
- if (kbuf == NULL) {
- PWC_ERROR("Failed to allocate image buffer(s). needed (%d)\n",
- pwc_mbufs * pdev->len_per_image);
- return -ENOMEM;
- }
- PWC_DEBUG_MEMORY("Allocated image buffer at %p.\n", kbuf);
- pdev->image_data = kbuf;
- for (i = 0; i < pwc_mbufs; i++) {
- pdev->images[i].offset = i * pdev->len_per_image;
- pdev->images[i].vma_use_count = 0;
- }
- for (; i < MAX_IMAGES; i++) {
- pdev->images[i].offset = 0;
- }
-
- kbuf = NULL;
-
- PWC_DEBUG_MEMORY("<< pwc_allocate_buffers()\n");
- return 0;
-}
-
-static void pwc_free_buffers(struct pwc_device *pdev)
-{
- int i;
-
- PWC_DEBUG_MEMORY("Entering free_buffers(%p).\n", pdev);
-
- if (pdev == NULL)
- return;
- /* Release Iso-pipe buffers */
- for (i = 0; i < MAX_ISO_BUFS; i++)
- if (pdev->sbuf[i].data != NULL) {
- PWC_DEBUG_MEMORY("Freeing ISO buffer at %p.\n", pdev->sbuf[i].data);
- kfree(pdev->sbuf[i].data);
- pdev->sbuf[i].data = NULL;
- }
-
- /* The same for frame buffers */
- if (pdev->fbuf != NULL) {
- for (i = 0; i < default_fbufs; i++) {
- if (pdev->fbuf[i].data != NULL) {
- PWC_DEBUG_MEMORY("Freeing frame buffer %d at %p.\n", i, pdev->fbuf[i].data);
- vfree(pdev->fbuf[i].data);
- pdev->fbuf[i].data = NULL;
- }
- }
- kfree(pdev->fbuf);
- pdev->fbuf = NULL;
- }
-
- /* Intermediate decompression buffer & tables */
- if (pdev->decompress_data != NULL) {
- PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n", pdev->decompress_data);
- kfree(pdev->decompress_data);
- pdev->decompress_data = NULL;
- }
-
- /* Release image buffers */
- if (pdev->image_data != NULL) {
- PWC_DEBUG_MEMORY("Freeing image buffer at %p.\n", pdev->image_data);
- pwc_rvfree(pdev->image_data, pwc_mbufs * pdev->len_per_image);
- }
- pdev->image_data = NULL;
-
- PWC_DEBUG_MEMORY("Leaving free_buffers().\n");
-}
-
-/* The frame & image buffer mess.
-
- Yes, this is a mess. Well, it used to be simple, but alas... In this
- module, 3 buffers schemes are used to get the data from the USB bus to
- the user program. The first scheme involves the ISO buffers (called thus
- since they transport ISO data from the USB controller), and not really
- interesting. Suffices to say the data from this buffer is quickly
- gathered in an interrupt handler (pwc_isoc_handler) and placed into the
- frame buffer.
-
- The frame buffer is the second scheme, and is the central element here.
- It collects the data from a single frame from the camera (hence, the
- name). Frames are delimited by the USB camera with a short USB packet,
- so that's easy to detect. The frame buffers form a list that is filled
- by the camera+USB controller and drained by the user process through
- either read() or mmap().
-
- The image buffer is the third scheme, in which frames are decompressed
- and converted into planar format. For mmap() there is more than
- one image buffer available.
-
- The frame buffers provide the image buffering. In case the user process
- is a bit slow, this introduces lag and some undesired side-effects.
- The problem arises when the frame buffer is full. I used to drop the last
- frame, which makes the data in the queue stale very quickly. But dropping
- the frame at the head of the queue proved to be a litte bit more difficult.
- I tried a circular linked scheme, but this introduced more problems than
- it solved.
-
- Because filling and draining are completely asynchronous processes, this
- requires some fiddling with pointers and mutexes.
-
- Eventually, I came up with a system with 2 lists: an 'empty' frame list
- and a 'full' frame list:
- * Initially, all frame buffers but one are on the 'empty' list; the one
- remaining buffer is our initial fill frame.
- * If a frame is needed for filling, we try to take it from the 'empty'
- list, unless that list is empty, in which case we take the buffer at
- the head of the 'full' list.
- * When our fill buffer has been filled, it is appended to the 'full'
- list.
- * If a frame is needed by read() or mmap(), it is taken from the head of
- the 'full' list, handled, and then appended to the 'empty' list. If no
- buffer is present on the 'full' list, we wait.
- The advantage is that the buffer that is currently being decompressed/
- converted, is on neither list, and thus not in our way (any other scheme
- I tried had the problem of old data lingering in the queue).
-
- Whatever strategy you choose, it always remains a tradeoff: with more
- frame buffers the chances of a missed frame are reduced. On the other
- hand, on slower machines it introduces lag because the queue will
- always be full.
- */
-
-/**
- \brief Find next frame buffer to fill. Take from empty or full list, whichever comes first.
- */
-static int pwc_next_fill_frame(struct pwc_device *pdev)
-{
- int ret;
- unsigned long flags;
-
- ret = 0;
- spin_lock_irqsave(&pdev->ptrlock, flags);
- if (pdev->fill_frame != NULL) {
- /* append to 'full' list */
- if (pdev->full_frames == NULL) {
- pdev->full_frames = pdev->fill_frame;
- pdev->full_frames_tail = pdev->full_frames;
- }
- else {
- pdev->full_frames_tail->next = pdev->fill_frame;
- pdev->full_frames_tail = pdev->fill_frame;
- }
- }
- if (pdev->empty_frames != NULL) {
- /* We have empty frames available. That's easy */
- pdev->fill_frame = pdev->empty_frames;
- pdev->empty_frames = pdev->empty_frames->next;
- }
- else {
- /* Hmm. Take it from the full list */
- /* sanity check */
- if (pdev->full_frames == NULL) {
- PWC_ERROR("Neither empty or full frames available!\n");
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
- return -EINVAL;
- }
- pdev->fill_frame = pdev->full_frames;
- pdev->full_frames = pdev->full_frames->next;
- ret = 1;
- }
- pdev->fill_frame->next = NULL;
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
- return ret;
-}
-
-
-/**
- \brief Reset all buffers, pointers and lists, except for the image_used[] buffer.
-
- If the image_used[] buffer is cleared too, mmap()/VIDIOCSYNC will run into trouble.
- */
-static void pwc_reset_buffers(struct pwc_device *pdev)
-{
- int i;
- unsigned long flags;
-
- PWC_DEBUG_MEMORY(">> %s __enter__\n", __func__);
-
- spin_lock_irqsave(&pdev->ptrlock, flags);
- pdev->full_frames = NULL;
- pdev->full_frames_tail = NULL;
- for (i = 0; i < default_fbufs; i++) {
- pdev->fbuf[i].filled = 0;
- if (i > 0)
- pdev->fbuf[i].next = &pdev->fbuf[i - 1];
- else
- pdev->fbuf->next = NULL;
- }
- pdev->empty_frames = &pdev->fbuf[default_fbufs - 1];
- pdev->empty_frames_tail = pdev->fbuf;
- pdev->read_frame = NULL;
- pdev->fill_frame = pdev->empty_frames;
- pdev->empty_frames = pdev->empty_frames->next;
-
- pdev->image_read_pos = 0;
- pdev->fill_image = 0;
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
-
- PWC_DEBUG_MEMORY("<< %s __leaving__\n", __func__);
-}
-
-
-/**
- \brief Do all the handling for getting one frame: get pointer, decompress, advance pointers.
- */
-int pwc_handle_frame(struct pwc_device *pdev)
-{
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&pdev->ptrlock, flags);
- /* First grab our read_frame; this is removed from all lists, so
- we can release the lock after this without problems */
- if (pdev->read_frame != NULL) {
- /* This can't theoretically happen */
- PWC_ERROR("Huh? Read frame still in use?\n");
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
- return ret;
- }
-
-
- if (pdev->full_frames == NULL) {
- PWC_ERROR("Woops. No frames ready.\n");
- }
- else {
- pdev->read_frame = pdev->full_frames;
- pdev->full_frames = pdev->full_frames->next;
- pdev->read_frame->next = NULL;
- }
-
- if (pdev->read_frame != NULL) {
- /* Decompression is a lengthy process, so it's outside of the lock.
- This gives the isoc_handler the opportunity to fill more frames
- in the mean time.
- */
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
- ret = pwc_decompress(pdev);
- spin_lock_irqsave(&pdev->ptrlock, flags);
-
- /* We're done with read_buffer, tack it to the end of the empty buffer list */
- if (pdev->empty_frames == NULL) {
- pdev->empty_frames = pdev->read_frame;
- pdev->empty_frames_tail = pdev->empty_frames;
- }
- else {
- pdev->empty_frames_tail->next = pdev->read_frame;
- pdev->empty_frames_tail = pdev->read_frame;
- }
- pdev->read_frame = NULL;
- }
- spin_unlock_irqrestore(&pdev->ptrlock, flags);
- return ret;
-}
-
-/**
- \brief Advance pointers of image buffer (after each user request)
-*/
-void pwc_next_image(struct pwc_device *pdev)
+struct pwc_frame_buf *pwc_get_next_fill_buf(struct pwc_device *pdev)
{
- pdev->image_used[pdev->fill_image] = 0;
- pdev->fill_image = (pdev->fill_image + 1) % pwc_mbufs;
-}
-
-/**
- * Print debug information when a frame is discarded because all of our buffer
- * is full
- */
-static void pwc_frame_dumped(struct pwc_device *pdev)
-{
- pdev->vframes_dumped++;
- if (pdev->vframe_count < FRAME_LOWMARK)
- return;
-
- if (pdev->vframes_dumped < 20)
- PWC_DEBUG_FLOW("Dumping frame %d\n", pdev->vframe_count);
- else if (pdev->vframes_dumped == 20)
- PWC_DEBUG_FLOW("Dumping frame %d (last message)\n",
- pdev->vframe_count);
+ unsigned long flags = 0;
+ struct pwc_frame_buf *buf = NULL;
+
+ spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
+ if (list_empty(&pdev->queued_bufs))
+ goto leave;
+
+ buf = list_entry(pdev->queued_bufs.next, struct pwc_frame_buf, list);
+ list_del(&buf->list);
+leave:
+ spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
+ return buf;
}
static void pwc_snapshot_button(struct pwc_device *pdev, int down)
@@ -604,9 +204,9 @@ static void pwc_snapshot_button(struct pwc_device *pdev, int down)
#endif
}
-static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_buf *fbuf)
+static void pwc_frame_complete(struct pwc_device *pdev)
{
- int awake = 0;
+ struct pwc_frame_buf *fbuf = pdev->fill_buf;
/* The ToUCam Fun CMOS sensor causes the firmware to send 2 or 3 bogus
frames on the USB wire after an exposure change. This conditition is
@@ -618,7 +218,6 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
if (ptr[1] == 1 && ptr[0] & 0x10) {
PWC_TRACE("Hyundai CMOS sensor bug. Dropping frame.\n");
pdev->drop_frames += 2;
- pdev->vframes_error++;
}
if ((ptr[0] ^ pdev->vmirror) & 0x01) {
pwc_snapshot_button(pdev, ptr[0] & 0x01);
@@ -641,8 +240,7 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
*/
if (fbuf->filled == 4)
pdev->drop_frames++;
- }
- else if (pdev->type == 740 || pdev->type == 720) {
+ } else if (pdev->type == 740 || pdev->type == 720) {
unsigned char *ptr = (unsigned char *)fbuf->data;
if ((ptr[0] ^ pdev->vmirror) & 0x01) {
pwc_snapshot_button(pdev, ptr[0] & 0x01);
@@ -650,33 +248,23 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
pdev->vmirror = ptr[0] & 0x03;
}
- /* In case we were instructed to drop the frame, do so silently.
- The buffer pointers are not updated either (but the counters are reset below).
- */
- if (pdev->drop_frames > 0)
+ /* In case we were instructed to drop the frame, do so silently. */
+ if (pdev->drop_frames > 0) {
pdev->drop_frames--;
- else {
+ } else {
/* Check for underflow first */
if (fbuf->filled < pdev->frame_total_size) {
PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes);"
" discarded.\n", fbuf->filled);
- pdev->vframes_error++;
- }
- else {
- /* Send only once per EOF */
- awake = 1; /* delay wake_ups */
-
- /* Find our next frame to fill. This will always succeed, since we
- * nick a frame from either empty or full list, but if we had to
- * take it from the full list, it means a frame got dropped.
- */
- if (pwc_next_fill_frame(pdev))
- pwc_frame_dumped(pdev);
-
+ } else {
+ fbuf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
+ fbuf->vb.v4l2_buf.sequence = pdev->vframe_count;
+ vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+ pdev->fill_buf = NULL;
+ pdev->vsync = 0;
}
} /* !drop_frames */
pdev->vframe_count++;
- return awake;
}
/* This gets called for the Isochronous pipe (video). This is done in
@@ -684,24 +272,20 @@ static int pwc_rcv_short_packet(struct pwc_device *pdev, const struct pwc_frame_
*/
static void pwc_isoc_handler(struct urb *urb)
{
- struct pwc_device *pdev;
+ struct pwc_device *pdev = (struct pwc_device *)urb->context;
int i, fst, flen;
- int awake;
- struct pwc_frame_buf *fbuf;
- unsigned char *fillptr = NULL, *iso_buf = NULL;
-
- awake = 0;
- pdev = (struct pwc_device *)urb->context;
- if (pdev == NULL) {
- PWC_ERROR("isoc_handler() called with NULL device?!\n");
- return;
- }
+ unsigned char *iso_buf = NULL;
- if (urb->status == -ENOENT || urb->status == -ECONNRESET) {
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN) {
PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronuously.\n", urb, urb->status == -ENOENT ? "" : "a");
return;
}
- if (urb->status != -EINPROGRESS && urb->status != 0) {
+
+ if (pdev->fill_buf == NULL)
+ pdev->fill_buf = pwc_get_next_fill_buf(pdev);
+
+ if (urb->status != 0) {
const char *errmsg;
errmsg = "Unknown";
@@ -713,29 +297,21 @@ static void pwc_isoc_handler(struct urb *urb)
case -EILSEQ: errmsg = "CRC/Timeout (could be anything)"; break;
case -ETIME: errmsg = "Device does not respond"; break;
}
- PWC_DEBUG_FLOW("pwc_isoc_handler() called with status %d [%s].\n", urb->status, errmsg);
- /* Give up after a number of contiguous errors on the USB bus.
- Appearantly something is wrong so we simulate an unplug event.
- */
+ PWC_ERROR("pwc_isoc_handler() called with status %d [%s].\n",
+ urb->status, errmsg);
+ /* Give up after a number of contiguous errors */
if (++pdev->visoc_errors > MAX_ISOC_ERRORS)
{
- PWC_INFO("Too many ISOC errors, bailing out.\n");
- pdev->error_status = EIO;
- awake = 1;
- wake_up_interruptible(&pdev->frameq);
+ PWC_ERROR("Too many ISOC errors, bailing out.\n");
+ if (pdev->fill_buf) {
+ vb2_buffer_done(&pdev->fill_buf->vb,
+ VB2_BUF_STATE_ERROR);
+ pdev->fill_buf = NULL;
+ }
}
- goto handler_end; // ugly, but practical
- }
-
- fbuf = pdev->fill_frame;
- if (fbuf == NULL) {
- PWC_ERROR("pwc_isoc_handler without valid fill frame.\n");
- awake = 1;
+ pdev->vsync = 0; /* Drop the current frame */
goto handler_end;
}
- else {
- fillptr = fbuf->data + fbuf->filled;
- }
/* Reset ISOC error counter. We did get here, after all. */
pdev->visoc_errors = 0;
@@ -749,89 +325,73 @@ static void pwc_isoc_handler(struct urb *urb)
fst = urb->iso_frame_desc[i].status;
flen = urb->iso_frame_desc[i].actual_length;
iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
- if (fst == 0) {
- if (flen > 0) { /* if valid data... */
- if (pdev->vsync > 0) { /* ...and we are not sync-hunting... */
- pdev->vsync = 2;
-
- /* ...copy data to frame buffer, if possible */
- if (flen + fbuf->filled > pdev->frame_total_size) {
- PWC_DEBUG_FLOW("Frame buffer overflow (flen = %d, frame_total_size = %d).\n", flen, pdev->frame_total_size);
- pdev->vsync = 0; /* Hmm, let's wait for an EOF (end-of-frame) */
- pdev->vframes_error++;
- }
- else {
- memmove(fillptr, iso_buf, flen);
- fillptr += flen;
- }
- }
+ if (fst != 0) {
+ PWC_ERROR("Iso frame %d has error %d\n", i, fst);
+ continue;
+ }
+ if (flen > 0 && pdev->vsync) {
+ struct pwc_frame_buf *fbuf = pdev->fill_buf;
+
+ if (pdev->vsync == 1) {
+ do_gettimeofday(&fbuf->vb.v4l2_buf.timestamp);
+ pdev->vsync = 2;
+ }
+
+ if (flen + fbuf->filled > pdev->frame_total_size) {
+ PWC_ERROR("Frame overflow (%d > %d)\n",
+ flen + fbuf->filled,
+ pdev->frame_total_size);
+ pdev->vsync = 0; /* Let's wait for an EOF */
+ } else {
+ memcpy(fbuf->data + fbuf->filled, iso_buf,
+ flen);
fbuf->filled += flen;
- } /* ..flen > 0 */
-
- if (flen < pdev->vlast_packet_size) {
- /* Shorter packet... We probably have the end of an image-frame;
- wake up read() process and let select()/poll() do something.
- Decompression is done in user time over there.
- */
- if (pdev->vsync == 2) {
- if (pwc_rcv_short_packet(pdev, fbuf)) {
- awake = 1;
- fbuf = pdev->fill_frame;
- }
- }
- fbuf->filled = 0;
- fillptr = fbuf->data;
+ }
+ }
+ if (flen < pdev->vlast_packet_size) {
+ /* Shorter packet... end of frame */
+ if (pdev->vsync == 2)
+ pwc_frame_complete(pdev);
+ if (pdev->fill_buf == NULL)
+ pdev->fill_buf = pwc_get_next_fill_buf(pdev);
+ if (pdev->fill_buf) {
+ pdev->fill_buf->filled = 0;
pdev->vsync = 1;
}
-
- pdev->vlast_packet_size = flen;
- } /* ..status == 0 */
- else {
- /* This is normally not interesting to the user, unless
- * you are really debugging something, default = 0 */
- static int iso_error;
- iso_error++;
- if (iso_error < 20)
- PWC_DEBUG_FLOW("Iso frame %d of USB has error %d\n", i, fst);
}
+ pdev->vlast_packet_size = flen;
}
handler_end:
- if (awake)
- wake_up_interruptible(&pdev->frameq);
-
- urb->dev = pdev->udev;
i = usb_submit_urb(urb, GFP_ATOMIC);
if (i != 0)
PWC_ERROR("Error (%d) re-submitting urb in pwc_isoc_handler.\n", i);
}
-
-int pwc_isoc_init(struct pwc_device *pdev)
+static int pwc_isoc_init(struct pwc_device *pdev)
{
struct usb_device *udev;
struct urb *urb;
int i, j, ret;
-
struct usb_interface *intf;
struct usb_host_interface *idesc = NULL;
- if (pdev == NULL)
- return -EFAULT;
if (pdev->iso_init)
return 0;
+
pdev->vsync = 0;
+ pdev->vlast_packet_size = 0;
+ pdev->fill_buf = NULL;
+ pdev->vframe_count = 0;
+ pdev->visoc_errors = 0;
udev = pdev->udev;
/* Get the current alternate interface, adjust packet size */
- if (!udev->actconfig)
- return -EFAULT;
intf = usb_ifnum_to_if(udev, 0);
if (intf)
idesc = usb_altnum_to_altsetting(intf, pdev->valternate);
-
if (!idesc)
- return -EFAULT;
+ return -EIO;
/* Search video endpoint */
pdev->vmax_packet_size = -1;
@@ -854,34 +414,32 @@ int pwc_isoc_init(struct pwc_device *pdev)
if (ret < 0)
return ret;
+ /* Allocate and init Isochronuous urbs */
for (i = 0; i < MAX_ISO_BUFS; i++) {
urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
if (urb == NULL) {
PWC_ERROR("Failed to allocate urb %d\n", i);
- ret = -ENOMEM;
- break;
+ pdev->iso_init = 1;
+ pwc_isoc_cleanup(pdev);
+ return -ENOMEM;
}
- pdev->sbuf[i].urb = urb;
+ pdev->urbs[i] = urb;
PWC_DEBUG_MEMORY("Allocated URB at 0x%p\n", urb);
- }
- if (ret) {
- /* De-allocate in reverse order */
- while (i--) {
- usb_free_urb(pdev->sbuf[i].urb);
- pdev->sbuf[i].urb = NULL;
- }
- return ret;
- }
-
- /* init URB structure */
- for (i = 0; i < MAX_ISO_BUFS; i++) {
- urb = pdev->sbuf[i].urb;
urb->interval = 1; // devik
urb->dev = udev;
urb->pipe = usb_rcvisocpipe(udev, pdev->vendpoint);
- urb->transfer_flags = URB_ISO_ASAP;
- urb->transfer_buffer = pdev->sbuf[i].data;
+ urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_buffer = usb_alloc_coherent(udev,
+ ISO_BUFFER_SIZE,
+ GFP_KERNEL,
+ &urb->transfer_dma);
+ if (urb->transfer_buffer == NULL) {
+ PWC_ERROR("Failed to allocate urb buffer %d\n", i);
+ pdev->iso_init = 1;
+ pwc_isoc_cleanup(pdev);
+ return -ENOMEM;
+ }
urb->transfer_buffer_length = ISO_BUFFER_SIZE;
urb->complete = pwc_isoc_handler;
urb->context = pdev;
@@ -895,14 +453,14 @@ int pwc_isoc_init(struct pwc_device *pdev)
/* link */
for (i = 0; i < MAX_ISO_BUFS; i++) {
- ret = usb_submit_urb(pdev->sbuf[i].urb, GFP_KERNEL);
+ ret = usb_submit_urb(pdev->urbs[i], GFP_KERNEL);
if (ret) {
PWC_ERROR("isoc_init() submit_urb %d failed with error %d\n", i, ret);
pdev->iso_init = 1;
pwc_isoc_cleanup(pdev);
return ret;
}
- PWC_DEBUG_MEMORY("URB 0x%p submitted.\n", pdev->sbuf[i].urb);
+ PWC_DEBUG_MEMORY("URB 0x%p submitted.\n", pdev->urbs[i]);
}
/* All is done... */
@@ -917,12 +475,9 @@ static void pwc_iso_stop(struct pwc_device *pdev)
/* Unlinking ISOC buffers one by one */
for (i = 0; i < MAX_ISO_BUFS; i++) {
- struct urb *urb;
-
- urb = pdev->sbuf[i].urb;
- if (urb) {
- PWC_DEBUG_MEMORY("Unlinking URB %p\n", urb);
- usb_kill_urb(urb);
+ if (pdev->urbs[i]) {
+ PWC_DEBUG_MEMORY("Unlinking URB %p\n", pdev->urbs[i]);
+ usb_kill_urb(pdev->urbs[i]);
}
}
}
@@ -933,40 +488,51 @@ static void pwc_iso_free(struct pwc_device *pdev)
/* Freeing ISOC buffers one by one */
for (i = 0; i < MAX_ISO_BUFS; i++) {
- struct urb *urb;
-
- urb = pdev->sbuf[i].urb;
- if (urb) {
+ if (pdev->urbs[i]) {
PWC_DEBUG_MEMORY("Freeing URB\n");
- usb_free_urb(urb);
- pdev->sbuf[i].urb = NULL;
+ if (pdev->urbs[i]->transfer_buffer) {
+ usb_free_coherent(pdev->udev,
+ pdev->urbs[i]->transfer_buffer_length,
+ pdev->urbs[i]->transfer_buffer,
+ pdev->urbs[i]->transfer_dma);
+ }
+ usb_free_urb(pdev->urbs[i]);
+ pdev->urbs[i] = NULL;
}
}
}
-void pwc_isoc_cleanup(struct pwc_device *pdev)
+static void pwc_isoc_cleanup(struct pwc_device *pdev)
{
PWC_DEBUG_OPEN(">> pwc_isoc_cleanup()\n");
- if (pdev == NULL)
- return;
+
if (pdev->iso_init == 0)
return;
pwc_iso_stop(pdev);
pwc_iso_free(pdev);
-
- /* Stop camera, but only if we are sure the camera is still there (unplug
- is signalled by EPIPE)
- */
- if (pdev->error_status != EPIPE) {
- PWC_DEBUG_OPEN("Setting alternate interface 0.\n");
- usb_set_interface(pdev->udev, 0, 0);
- }
+ usb_set_interface(pdev->udev, 0, 0);
pdev->iso_init = 0;
PWC_DEBUG_OPEN("<< pwc_isoc_cleanup()\n");
}
+/*
+ * Release all queued buffers, no need to take queued_bufs_lock, since all
+ * iso urbs have been killed when we're called so pwc_isoc_handler won't run.
+ */
+static void pwc_cleanup_queued_bufs(struct pwc_device *pdev)
+{
+ while (!list_empty(&pdev->queued_bufs)) {
+ struct pwc_frame_buf *buf;
+
+ buf = list_entry(pdev->queued_bufs.next, struct pwc_frame_buf,
+ list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+}
+
/*********
* sysfs
*********/
@@ -1016,16 +582,15 @@ static ssize_t show_snapshot_button_status(struct device *class_dev,
static DEVICE_ATTR(button, S_IRUGO | S_IWUSR, show_snapshot_button_status,
NULL);
-static int pwc_create_sysfs_files(struct video_device *vdev)
+static int pwc_create_sysfs_files(struct pwc_device *pdev)
{
- struct pwc_device *pdev = video_get_drvdata(vdev);
int rc;
- rc = device_create_file(&vdev->dev, &dev_attr_button);
+ rc = device_create_file(&pdev->vdev.dev, &dev_attr_button);
if (rc)
goto err;
if (pdev->features & FEATURE_MOTOR_PANTILT) {
- rc = device_create_file(&vdev->dev, &dev_attr_pan_tilt);
+ rc = device_create_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
if (rc)
goto err_button;
}
@@ -1033,19 +598,17 @@ static int pwc_create_sysfs_files(struct video_device *vdev)
return 0;
err_button:
- device_remove_file(&vdev->dev, &dev_attr_button);
+ device_remove_file(&pdev->vdev.dev, &dev_attr_button);
err:
PWC_ERROR("Could not create sysfs files.\n");
return rc;
}
-static void pwc_remove_sysfs_files(struct video_device *vdev)
+static void pwc_remove_sysfs_files(struct pwc_device *pdev)
{
- struct pwc_device *pdev = video_get_drvdata(vdev);
-
if (pdev->features & FEATURE_MOTOR_PANTILT)
- device_remove_file(&vdev->dev, &dev_attr_pan_tilt);
- device_remove_file(&vdev->dev, &dev_attr_button);
+ device_remove_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
+ device_remove_file(&pdev->vdev.dev, &dev_attr_button);
}
#ifdef CONFIG_USB_PWC_DEBUG
@@ -1083,353 +646,235 @@ static const char *pwc_sensor_type_to_string(unsigned int sensor_type)
static int pwc_video_open(struct file *file)
{
- int i, ret;
struct video_device *vdev = video_devdata(file);
struct pwc_device *pdev;
PWC_DEBUG_OPEN(">> video_open called(vdev = 0x%p).\n", vdev);
pdev = video_get_drvdata(vdev);
- BUG_ON(!pdev);
- if (pdev->vopen) {
- PWC_DEBUG_OPEN("I'm busy, someone is using the device.\n");
- return -EBUSY;
- }
+ if (!pdev->udev)
+ return -ENODEV;
- pwc_construct(pdev); /* set min/max sizes correct */
- if (!pdev->usb_init) {
- PWC_DEBUG_OPEN("Doing first time initialization.\n");
- pdev->usb_init = 1;
+ file->private_data = vdev;
+ PWC_DEBUG_OPEN("<< video_open() returns 0.\n");
+ return 0;
+}
- /* Query sensor type */
- ret = pwc_get_cmos_sensor(pdev, &i);
- if (ret >= 0)
- {
- PWC_DEBUG_OPEN("This %s camera is equipped with a %s (%d).\n",
- pdev->vdev->name,
- pwc_sensor_type_to_string(i), i);
- }
- }
+static void pwc_video_release(struct video_device *vfd)
+{
+ struct pwc_device *pdev = container_of(vfd, struct pwc_device, vdev);
+ int hint;
+
+ /* search device_hint[] table if we occupy a slot, by any chance */
+ for (hint = 0; hint < MAX_DEV_HINTS; hint++)
+ if (device_hint[hint].pdev == pdev)
+ device_hint[hint].pdev = NULL;
- /* Turn on camera */
- if (power_save) {
- i = pwc_camera_power(pdev, 1);
- if (i < 0)
- PWC_DEBUG_OPEN("Failed to restore power to the camera! (%d)\n", i);
+ /* Free intermediate decompression buffer & tables */
+ if (pdev->decompress_data != NULL) {
+ PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n",
+ pdev->decompress_data);
+ kfree(pdev->decompress_data);
+ pdev->decompress_data = NULL;
}
- /* Set LED on/off time */
- if (pwc_set_leds(pdev, led_on, led_off) < 0)
- PWC_DEBUG_OPEN("Failed to set LED on/off time.\n");
+ v4l2_ctrl_handler_free(&pdev->ctrl_handler);
- /* So far, so good. Allocate memory. */
- i = pwc_allocate_buffers(pdev);
- if (i < 0) {
- PWC_DEBUG_OPEN("Failed to allocate buffers memory.\n");
- pwc_free_buffers(pdev);
- return i;
- }
+ kfree(pdev);
+}
- /* Reset buffers & parameters */
- pwc_reset_buffers(pdev);
- for (i = 0; i < pwc_mbufs; i++)
- pdev->image_used[i] = 0;
- pdev->vframe_count = 0;
- pdev->vframes_dumped = 0;
- pdev->vframes_error = 0;
- pdev->visoc_errors = 0;
- pdev->error_status = 0;
- pwc_construct(pdev); /* set min/max sizes correct */
+static int pwc_video_close(struct file *file)
+{
+ struct video_device *vdev = file->private_data;
+ struct pwc_device *pdev;
- /* Set some defaults */
- pdev->vsnapshot = 0;
+ PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
- /* Set video size, first try the last used video size
- (or the default one); if that fails try QCIF/10 or QSIF/10;
- it that fails too, give up.
- */
- i = pwc_set_video_mode(pdev, pwc_image_sizes[pdev->vsize].x, pwc_image_sizes[pdev->vsize].y, pdev->vframes, pdev->vcompression, 0);
- if (i) {
- unsigned int default_resolution;
- PWC_DEBUG_OPEN("First attempt at set_video_mode failed.\n");
- if (pdev->type>= 730)
- default_resolution = PSZ_QSIF;
- else
- default_resolution = PSZ_QCIF;
-
- i = pwc_set_video_mode(pdev,
- pwc_image_sizes[default_resolution].x,
- pwc_image_sizes[default_resolution].y,
- 10,
- pdev->vcompression,
- 0);
- }
- if (i) {
- PWC_DEBUG_OPEN("Second attempt at set_video_mode failed.\n");
- pwc_free_buffers(pdev);
- return i;
+ pdev = video_get_drvdata(vdev);
+ if (pdev->capt_file == file) {
+ vb2_queue_release(&pdev->vb_queue);
+ pdev->capt_file = NULL;
}
- /* Initialize the webcam to sane value */
- pwc_set_brightness(pdev, 0x7fff);
- pwc_set_agc(pdev, 1, 0);
-
- pdev->vopen++;
- file->private_data = vdev;
- PWC_DEBUG_OPEN("<< video_open() returns 0.\n");
+ PWC_DEBUG_OPEN("<< video_close()\n");
return 0;
}
+static ssize_t pwc_video_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *vdev = file->private_data;
+ struct pwc_device *pdev = video_get_drvdata(vdev);
+
+ if (!pdev->udev)
+ return -ENODEV;
+
+ if (pdev->capt_file != NULL &&
+ pdev->capt_file != file)
+ return -EBUSY;
+
+ pdev->capt_file = file;
+
+ return vb2_read(&pdev->vb_queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+}
-static void pwc_cleanup(struct pwc_device *pdev)
+static unsigned int pwc_video_poll(struct file *file, poll_table *wait)
{
- pwc_remove_sysfs_files(pdev->vdev);
- video_unregister_device(pdev->vdev);
+ struct video_device *vdev = file->private_data;
+ struct pwc_device *pdev = video_get_drvdata(vdev);
-#ifdef CONFIG_USB_PWC_INPUT_EVDEV
- if (pdev->button_dev)
- input_unregister_device(pdev->button_dev);
-#endif
+ if (!pdev->udev)
+ return POLL_ERR;
- kfree(pdev);
+ return vb2_poll(&pdev->vb_queue, file, wait);
}
-/* Note that all cleanup is done in the reverse order as in _open */
-static int pwc_video_close(struct file *file)
+static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *vdev = file->private_data;
- struct pwc_device *pdev;
- int i, hint;
+ struct pwc_device *pdev = video_get_drvdata(vdev);
- PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
+ if (pdev->capt_file != file)
+ return -EBUSY;
- pdev = video_get_drvdata(vdev);
- if (pdev->vopen == 0)
- PWC_DEBUG_MODULE("video_close() called on closed device?\n");
+ return vb2_mmap(&pdev->vb_queue, vma);
+}
- /* Dump statistics, but only if a reasonable amount of frames were
- processed (to prevent endless log-entries in case of snap-shot
- programs)
- */
- if (pdev->vframe_count > 20)
- PWC_DEBUG_MODULE("Closing video device: %d frames received, dumped %d frames, %d frames with errors.\n", pdev->vframe_count, pdev->vframes_dumped, pdev->vframes_error);
+/***************************************************************************/
+/* Videobuf2 operations */
- if (DEVICE_USE_CODEC1(pdev->type))
- pwc_dec1_exit();
- else
- pwc_dec23_exit();
+static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
- pwc_isoc_cleanup(pdev);
- pwc_free_buffers(pdev);
-
- /* Turn off LEDS and power down camera, but only when not unplugged */
- if (!pdev->unplugged) {
- /* Turn LEDs off */
- if (pwc_set_leds(pdev, 0, 0) < 0)
- PWC_DEBUG_MODULE("Failed to set LED on/off time.\n");
- if (power_save) {
- i = pwc_camera_power(pdev, 0);
- if (i < 0)
- PWC_ERROR("Failed to power down camera (%d)\n", i);
- }
- pdev->vopen--;
- PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen);
- } else {
- pwc_cleanup(pdev);
- /* search device_hint[] table if we occupy a slot, by any chance */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++)
- if (device_hint[hint].pdev == pdev)
- device_hint[hint].pdev = NULL;
- }
+ if (*nbuffers < MIN_FRAMES)
+ *nbuffers = MIN_FRAMES;
+ else if (*nbuffers > MAX_FRAMES)
+ *nbuffers = MAX_FRAMES;
+
+ *nplanes = 1;
+
+ sizes[0] = PAGE_ALIGN((pdev->abs_max.x * pdev->abs_max.y * 3) / 2);
return 0;
}
-/*
- * FIXME: what about two parallel reads ????
- * ANSWER: Not supported. You can't open the device more than once,
- despite what the V4L1 interface says. First, I don't see
- the need, second there's no mechanism of alerting the
- 2nd/3rd/... process of events like changing image size.
- And I don't see the point of blocking that for the
- 2nd/3rd/... process.
- In multi-threaded environments reading parallel from any
- device is tricky anyhow.
- */
-
-static ssize_t pwc_video_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static int buffer_init(struct vb2_buffer *vb)
{
- struct video_device *vdev = file->private_data;
- struct pwc_device *pdev;
- int noblock = file->f_flags & O_NONBLOCK;
- DECLARE_WAITQUEUE(wait, current);
- int bytes_to_read, rv = 0;
- void *image_buffer_addr;
-
- PWC_DEBUG_READ("pwc_video_read(vdev=0x%p, buf=%p, count=%zd) called.\n",
- vdev, buf, count);
- if (vdev == NULL)
- return -EFAULT;
- pdev = video_get_drvdata(vdev);
- if (pdev == NULL)
- return -EFAULT;
+ struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
- if (pdev->error_status) {
- rv = -pdev->error_status; /* Something happened, report what. */
- goto err_out;
- }
+ /* need vmalloc since frame buffer > 128K */
+ buf->data = vzalloc(PWC_FRAME_SIZE);
+ if (buf->data == NULL)
+ return -ENOMEM;
- /* Start the stream (if not already started) */
- rv = pwc_isoc_init(pdev);
- if (rv)
- goto err_out;
-
- /* In case we're doing partial reads, we don't have to wait for a frame */
- if (pdev->image_read_pos == 0) {
- /* Do wait queueing according to the (doc)book */
- add_wait_queue(&pdev->frameq, &wait);
- while (pdev->full_frames == NULL) {
- /* Check for unplugged/etc. here */
- if (pdev->error_status) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- rv = -pdev->error_status ;
- goto err_out;
- }
- if (noblock) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- rv = -EWOULDBLOCK;
- goto err_out;
- }
- if (signal_pending(current)) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- rv = -ERESTARTSYS;
- goto err_out;
- }
- mutex_unlock(&pdev->modlock);
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&pdev->modlock);
- }
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
+ return 0;
+}
- /* Decompress and release frame */
- if (pwc_handle_frame(pdev)) {
- rv = -EFAULT;
- goto err_out;
- }
- }
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
- PWC_DEBUG_READ("Copying data to user space.\n");
- if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
- bytes_to_read = pdev->frame_size + sizeof(struct pwc_raw_frame);
- else
- bytes_to_read = pdev->view.size;
-
- /* copy bytes to user space; we allow for partial reads */
- if (count + pdev->image_read_pos > bytes_to_read)
- count = bytes_to_read - pdev->image_read_pos;
- image_buffer_addr = pdev->image_data;
- image_buffer_addr += pdev->images[pdev->fill_image].offset;
- image_buffer_addr += pdev->image_read_pos;
- if (copy_to_user(buf, image_buffer_addr, count)) {
- rv = -EFAULT;
- goto err_out;
- }
- pdev->image_read_pos += count;
- if (pdev->image_read_pos >= bytes_to_read) { /* All data has been read */
- pdev->image_read_pos = 0;
- pwc_next_image(pdev);
- }
- return count;
-err_out:
- return rv;
+ /* Don't allow queing new buffers after device disconnection */
+ if (!pdev->udev)
+ return -ENODEV;
+
+ return 0;
}
-static unsigned int pwc_video_poll(struct file *file, poll_table *wait)
+static int buffer_finish(struct vb2_buffer *vb)
{
- struct video_device *vdev = file->private_data;
- struct pwc_device *pdev;
- int ret;
+ struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
- if (vdev == NULL)
- return -EFAULT;
- pdev = video_get_drvdata(vdev);
- if (pdev == NULL)
- return -EFAULT;
+ /*
+ * Application has called dqbuf and is getting back a buffer we've
+ * filled, take the pwc data we've stored in buf->data and decompress
+ * it into a usable format, storing the result in the vb2_buffer
+ */
+ return pwc_decompress(pdev, buf);
+}
- /* Start the stream (if not already started) */
- ret = pwc_isoc_init(pdev);
- if (ret)
- return ret;
+static void buffer_cleanup(struct vb2_buffer *vb)
+{
+ struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+
+ vfree(buf->data);
+}
- poll_wait(file, &pdev->frameq, wait);
- if (pdev->error_status)
- return POLLERR;
- if (pdev->full_frames != NULL) /* we have frames waiting */
- return (POLLIN | POLLRDNORM);
+static void buffer_queue(struct vb2_buffer *vb)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct pwc_frame_buf *buf = container_of(vb, struct pwc_frame_buf, vb);
+ unsigned long flags = 0;
- return 0;
+ spin_lock_irqsave(&pdev->queued_bufs_lock, flags);
+ list_add_tail(&buf->list, &pdev->queued_bufs);
+ spin_unlock_irqrestore(&pdev->queued_bufs_lock, flags);
}
-static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma)
+static int start_streaming(struct vb2_queue *vq)
{
- struct video_device *vdev = file->private_data;
- struct pwc_device *pdev;
- unsigned long start;
- unsigned long size;
- unsigned long page, pos = 0;
- int index;
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
- PWC_DEBUG_MEMORY(">> %s\n", __func__);
- pdev = video_get_drvdata(vdev);
- size = vma->vm_end - vma->vm_start;
- start = vma->vm_start;
+ if (!pdev->udev)
+ return -ENODEV;
- /* Find the idx buffer for this mapping */
- for (index = 0; index < pwc_mbufs; index++) {
- pos = pdev->images[index].offset;
- if ((pos>>PAGE_SHIFT) == vma->vm_pgoff)
- break;
+ /* Turn on camera and set LEDS on */
+ pwc_camera_power(pdev, 1);
+ if (pdev->power_save) {
+ /* Restore video mode */
+ pwc_set_video_mode(pdev, pdev->view.x, pdev->view.y,
+ pdev->vframes, pdev->vcompression,
+ pdev->vsnapshot);
}
- if (index == MAX_IMAGES)
- return -EINVAL;
- if (index == 0) {
- /*
- * Special case for v4l1. In v4l1, we map only one big buffer,
- * but in v4l2 each buffer is mapped
- */
- unsigned long total_size;
- total_size = pwc_mbufs * pdev->len_per_image;
- if (size != pdev->len_per_image && size != total_size) {
- PWC_ERROR("Wrong size (%lu) needed to be len_per_image=%d or total_size=%lu\n",
- size, pdev->len_per_image, total_size);
- return -EINVAL;
- }
- } else if (size > pdev->len_per_image)
- return -EINVAL;
-
- vma->vm_flags |= VM_IO; /* from 2.6.9-acX */
-
- pos += (unsigned long)pdev->image_data;
- while (size > 0) {
- page = vmalloc_to_pfn((void *)pos);
- if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
- return -EAGAIN;
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
- if (size > PAGE_SIZE)
- size -= PAGE_SIZE;
- else
- size = 0;
+ pwc_set_leds(pdev, led_on, led_off);
+
+ return pwc_isoc_init(pdev);
+}
+
+static int stop_streaming(struct vb2_queue *vq)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+
+ if (pdev->udev) {
+ pwc_set_leds(pdev, 0, 0);
+ pwc_camera_power(pdev, 0);
+ pwc_isoc_cleanup(pdev);
}
+ pwc_cleanup_queued_bufs(pdev);
+
return 0;
}
+static void pwc_lock(struct vb2_queue *vq)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ mutex_lock(&pdev->modlock);
+}
+
+static void pwc_unlock(struct vb2_queue *vq)
+{
+ struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ mutex_unlock(&pdev->modlock);
+}
+
+static struct vb2_ops pwc_vb_queue_ops = {
+ .queue_setup = queue_setup,
+ .buf_init = buffer_init,
+ .buf_prepare = buffer_prepare,
+ .buf_finish = buffer_finish,
+ .buf_cleanup = buffer_cleanup,
+ .buf_queue = buffer_queue,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = pwc_unlock,
+ .wait_finish = pwc_lock,
+};
+
/***************************************************************************/
/* USB functions */
@@ -1445,6 +890,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
int hint, rc;
int features = 0;
int video_nr = -1; /* default: use next available device */
+ int my_power_save = power_save;
char serial_number[30], *name;
vendor_id = le16_to_cpu(udev->descriptor.idVendor);
@@ -1552,6 +998,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
PWC_INFO("Logitech QuickCam 4000 Pro USB webcam detected.\n");
name = "Logitech QuickCam Pro 4000";
type_id = 740; /* CCD sensor */
+ if (my_power_save == -1)
+ my_power_save = 1;
break;
case 0x08b3:
PWC_INFO("Logitech QuickCam Zoom USB webcam detected.\n");
@@ -1562,12 +1010,15 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
PWC_INFO("Logitech QuickCam Zoom (new model) USB webcam detected.\n");
name = "Logitech QuickCam Zoom";
type_id = 740; /* CCD sensor */
- power_save = 1;
+ if (my_power_save == -1)
+ my_power_save = 1;
break;
case 0x08b5:
PWC_INFO("Logitech QuickCam Orbit/Sphere USB webcam detected.\n");
name = "Logitech QuickCam Orbit";
type_id = 740; /* CCD sensor */
+ if (my_power_save == -1)
+ my_power_save = 1;
features |= FEATURE_MOTOR_PANTILT;
break;
case 0x08b6:
@@ -1622,6 +1073,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
PWC_INFO("Creative Labs Webcam 5 detected.\n");
name = "Creative Labs Webcam 5";
type_id = 730;
+ if (my_power_save == -1)
+ my_power_save = 1;
break;
case 0x4011:
PWC_INFO("Creative Labs Webcam Pro Ex detected.\n");
@@ -1679,6 +1132,9 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
else
return -ENODEV; /* Not any of the know types; but the list keeps growing. */
+ if (my_power_save == -1)
+ my_power_save = 0;
+
memset(serial_number, 0, 30);
usb_string(udev, udev->descriptor.iSerialNumber, serial_number, 29);
PWC_DEBUG_PROBE("Device serial number is %s\n", serial_number);
@@ -1693,7 +1149,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
return -ENOMEM;
}
pdev->type = type_id;
- pdev->vsize = default_size;
pdev->vframes = default_fps;
strcpy(pdev->serial, serial_number);
pdev->features = features;
@@ -1707,27 +1162,33 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->angle_range.tilt_min = -3000;
pdev->angle_range.tilt_max = 2500;
}
+ pwc_construct(pdev); /* set min/max sizes correct */
mutex_init(&pdev->modlock);
- spin_lock_init(&pdev->ptrlock);
+ mutex_init(&pdev->udevlock);
+ spin_lock_init(&pdev->queued_bufs_lock);
+ INIT_LIST_HEAD(&pdev->queued_bufs);
pdev->udev = udev;
- init_waitqueue_head(&pdev->frameq);
pdev->vcompression = pwc_preferred_compression;
-
- /* Allocate video_device structure */
- pdev->vdev = video_device_alloc();
- if (!pdev->vdev) {
- PWC_ERROR("Err, cannot allocate video_device struture. Failing probe.");
- rc = -ENOMEM;
- goto err_free_mem;
- }
- memcpy(pdev->vdev, &pwc_template, sizeof(pwc_template));
- pdev->vdev->parent = &intf->dev;
- pdev->vdev->lock = &pdev->modlock;
- pdev->vdev->ioctl_ops = &pwc_ioctl_ops;
- strcpy(pdev->vdev->name, name);
- video_set_drvdata(pdev->vdev, pdev);
+ pdev->power_save = my_power_save;
+
+ /* Init videobuf2 queue structure */
+ memset(&pdev->vb_queue, 0, sizeof(pdev->vb_queue));
+ pdev->vb_queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ pdev->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ pdev->vb_queue.drv_priv = pdev;
+ pdev->vb_queue.buf_struct_size = sizeof(struct pwc_frame_buf);
+ pdev->vb_queue.ops = &pwc_vb_queue_ops;
+ pdev->vb_queue.mem_ops = &vb2_vmalloc_memops;
+ vb2_queue_init(&pdev->vb_queue);
+
+ /* Init video_device structure */
+ memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
+ pdev->vdev.parent = &intf->dev;
+ pdev->vdev.lock = &pdev->modlock;
+ strcpy(pdev->vdev.name, name);
+ video_set_drvdata(&pdev->vdev, pdev);
pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
@@ -1746,8 +1207,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
}
}
- pdev->vdev->release = video_device_release;
-
/* occupy slot */
if (hint < MAX_DEV_HINTS)
device_hint[hint].pdev = pdev;
@@ -1755,20 +1214,46 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
PWC_DEBUG_PROBE("probe() function returning struct at 0x%p.\n", pdev);
usb_set_intfdata(intf, pdev);
+#ifdef CONFIG_USB_PWC_DEBUG
+ /* Query sensor type */
+ if (pwc_get_cmos_sensor(pdev, &rc) >= 0) {
+ PWC_DEBUG_OPEN("This %s camera is equipped with a %s (%d).\n",
+ pdev->vdev.name,
+ pwc_sensor_type_to_string(rc), rc);
+ }
+#endif
+
/* Set the leds off */
pwc_set_leds(pdev, 0, 0);
+
+ /* Setup intial videomode */
+ rc = pwc_set_video_mode(pdev, pdev->view_max.x, pdev->view_max.y,
+ pdev->vframes, pdev->vcompression, 0);
+ if (rc)
+ goto err_free_mem;
+
+ /* Register controls (and read default values from camera */
+ rc = pwc_init_controls(pdev);
+ if (rc) {
+ PWC_ERROR("Failed to register v4l2 controls (%d).\n", rc);
+ goto err_free_mem;
+ }
+
+ pdev->vdev.ctrl_handler = &pdev->ctrl_handler;
+
+ /* And powerdown the camera until streaming starts */
pwc_camera_power(pdev, 0);
- rc = video_register_device(pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+ rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
if (rc < 0) {
PWC_ERROR("Failed to register as video device (%d).\n", rc);
- goto err_video_release;
+ goto err_free_controls;
}
- rc = pwc_create_sysfs_files(pdev->vdev);
+ rc = pwc_create_sysfs_files(pdev);
if (rc)
goto err_video_unreg;
- PWC_INFO("Registered as %s.\n", video_device_node_name(pdev->vdev));
+ PWC_INFO("Registered as %s.\n", video_device_node_name(&pdev->vdev));
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
/* register webcam snapshot button input device */
@@ -1776,7 +1261,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
if (!pdev->button_dev) {
PWC_ERROR("Err, insufficient memory for webcam snapshot button device.");
rc = -ENOMEM;
- pwc_remove_sysfs_files(pdev->vdev);
+ pwc_remove_sysfs_files(pdev);
goto err_video_unreg;
}
@@ -1794,7 +1279,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
if (rc) {
input_free_device(pdev->button_dev);
pdev->button_dev = NULL;
- pwc_remove_sysfs_files(pdev->vdev);
+ pwc_remove_sysfs_files(pdev);
goto err_video_unreg;
}
#endif
@@ -1804,11 +1289,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
err_video_unreg:
if (hint < MAX_DEV_HINTS)
device_hint[hint].pdev = NULL;
- video_unregister_device(pdev->vdev);
- pdev->vdev = NULL; /* So we don't try to release it below */
-err_video_release:
- video_device_release(pdev->vdev);
+ video_unregister_device(&pdev->vdev);
+err_free_controls:
+ v4l2_ctrl_handler_free(&pdev->ctrl_handler);
err_free_mem:
+ usb_set_intfdata(intf, NULL);
kfree(pdev);
return rc;
}
@@ -1816,50 +1301,27 @@ err_free_mem:
/* The user yanked out the cable... */
static void usb_pwc_disconnect(struct usb_interface *intf)
{
- struct pwc_device *pdev;
- int hint;
+ struct pwc_device *pdev = usb_get_intfdata(intf);
- pdev = usb_get_intfdata (intf);
+ mutex_lock(&pdev->udevlock);
mutex_lock(&pdev->modlock);
- usb_set_intfdata (intf, NULL);
- if (pdev == NULL) {
- PWC_ERROR("pwc_disconnect() Called without private pointer.\n");
- goto disconnect_out;
- }
- if (pdev->udev == NULL) {
- PWC_ERROR("pwc_disconnect() already called for %p\n", pdev);
- goto disconnect_out;
- }
- if (pdev->udev != interface_to_usbdev(intf)) {
- PWC_ERROR("pwc_disconnect() Woops: pointer mismatch udev/pdev.\n");
- goto disconnect_out;
- }
-
- /* We got unplugged; this is signalled by an EPIPE error code */
- if (pdev->vopen) {
- PWC_INFO("Disconnected while webcam is in use!\n");
- pdev->error_status = EPIPE;
- }
- /* Alert waiting processes */
- wake_up_interruptible(&pdev->frameq);
- /* Wait until device is closed */
- if (pdev->vopen) {
- pdev->unplugged = 1;
- pwc_iso_stop(pdev);
- } else {
- /* Device is closed, so we can safely unregister it */
- PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n");
-
-disconnect_out:
- /* search device_hint[] table if we occupy a slot, by any chance */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++)
- if (device_hint[hint].pdev == pdev)
- device_hint[hint].pdev = NULL;
- }
+ usb_set_intfdata(intf, NULL);
+ /* No need to keep the urbs around after disconnection */
+ pwc_isoc_cleanup(pdev);
+ pwc_cleanup_queued_bufs(pdev);
+ pdev->udev = NULL;
mutex_unlock(&pdev->modlock);
- pwc_cleanup(pdev);
+ mutex_unlock(&pdev->udevlock);
+
+ pwc_remove_sysfs_files(pdev);
+ video_unregister_device(&pdev->vdev);
+
+#ifdef CONFIG_USB_PWC_INPUT_EVDEV
+ if (pdev->button_dev)
+ input_unregister_device(pdev->button_dev);
+#endif
}
@@ -1867,36 +1329,27 @@ disconnect_out:
* Initialization code & module stuff
*/
-static char *size;
static int fps;
-static int fbufs;
-static int mbufs;
static int compression = -1;
static int leds[2] = { -1, -1 };
static unsigned int leds_nargs;
static char *dev_hint[MAX_DEV_HINTS];
static unsigned int dev_hint_nargs;
-module_param(size, charp, 0444);
module_param(fps, int, 0444);
-module_param(fbufs, int, 0444);
-module_param(mbufs, int, 0444);
#ifdef CONFIG_USB_PWC_DEBUG
module_param_named(trace, pwc_trace, int, 0644);
#endif
-module_param(power_save, int, 0444);
+module_param(power_save, int, 0644);
module_param(compression, int, 0444);
module_param_array(leds, int, &leds_nargs, 0444);
module_param_array(dev_hint, charp, &dev_hint_nargs, 0444);
-MODULE_PARM_DESC(size, "Initial image size. One of sqcif, qsif, qcif, sif, cif, vga");
MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
-MODULE_PARM_DESC(fbufs, "Number of internal frame buffers to reserve");
-MODULE_PARM_DESC(mbufs, "Number of external (mmap()ed) image buffers");
#ifdef CONFIG_USB_PWC_DEBUG
MODULE_PARM_DESC(trace, "For debugging purposes");
#endif
-MODULE_PARM_DESC(power_save, "Turn power save feature in camera on or off");
+MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off");
MODULE_PARM_DESC(compression, "Preferred compression quality. Range 0 (uncompressed) to 3 (high compression)");
MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
MODULE_PARM_DESC(dev_hint, "Device node hints");
@@ -1909,14 +1362,19 @@ MODULE_VERSION( PWC_VERSION );
static int __init usb_pwc_init(void)
{
- int i, sz;
- char *sizenames[PSZ_MAX] = { "sqcif", "qsif", "qcif", "sif", "cif", "vga" };
+ int i;
+#ifdef CONFIG_USB_PWC_DEBUG
PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n");
PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
+ if (pwc_trace >= 0) {
+ PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
+ }
+#endif
+
if (fps) {
if (fps < 4 || fps > 30) {
PWC_ERROR("Framerate out of bounds (4-30).\n");
@@ -1926,41 +1384,6 @@ static int __init usb_pwc_init(void)
PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps);
}
- if (size) {
- /* string; try matching with array */
- for (sz = 0; sz < PSZ_MAX; sz++) {
- if (!strcmp(sizenames[sz], size)) { /* Found! */
- default_size = sz;
- break;
- }
- }
- if (sz == PSZ_MAX) {
- PWC_ERROR("Size not recognized; try size=[sqcif | qsif | qcif | sif | cif | vga].\n");
- return -EINVAL;
- }
- PWC_DEBUG_MODULE("Default image size set to %s [%dx%d].\n", sizenames[default_size], pwc_image_sizes[default_size].x, pwc_image_sizes[default_size].y);
- }
- if (mbufs) {
- if (mbufs < 1 || mbufs > MAX_IMAGES) {
- PWC_ERROR("Illegal number of mmap() buffers; use a number between 1 and %d.\n", MAX_IMAGES);
- return -EINVAL;
- }
- pwc_mbufs = mbufs;
- PWC_DEBUG_MODULE("Number of image buffers set to %d.\n", pwc_mbufs);
- }
- if (fbufs) {
- if (fbufs < 2 || fbufs > MAX_FRAMES) {
- PWC_ERROR("Illegal number of frame buffers; use a number between 2 and %d.\n", MAX_FRAMES);
- return -EINVAL;
- }
- default_fbufs = fbufs;
- PWC_DEBUG_MODULE("Number of frame buffers set to %d.\n", default_fbufs);
- }
-#ifdef CONFIG_USB_PWC_DEBUG
- if (pwc_trace >= 0) {
- PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
- }
-#endif
if (compression >= 0) {
if (compression > 3) {
PWC_ERROR("Invalid compression setting; use a number between 0 (uncompressed) and 3 (high).\n");
@@ -1969,8 +1392,6 @@ static int __init usb_pwc_init(void)
pwc_preferred_compression = compression;
PWC_DEBUG_MODULE("Preferred compression set to %d.\n", pwc_preferred_compression);
}
- if (power_save)
- PWC_DEBUG_MODULE("Enabling power save on open/close.\n");
if (leds[0] >= 0)
led_on = leds[0];
if (leds[1] >= 0)
diff --git a/drivers/media/video/pwc/pwc-ioctl.h b/drivers/media/video/pwc/pwc-ioctl.h
deleted file mode 100644
index 8c0cae7b3da..00000000000
--- a/drivers/media/video/pwc/pwc-ioctl.h
+++ /dev/null
@@ -1,323 +0,0 @@
-#ifndef PWC_IOCTL_H
-#define PWC_IOCTL_H
-
-/* (C) 2001-2004 Nemosoft Unv.
- (C) 2004-2006 Luc Saillard (luc@saillard.org)
-
- NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
- driver and thus may have bugs that are not present in the original version.
- Please send bug reports and support requests to <luc@saillard.org>.
- The decompression routines have been implemented by reverse-engineering the
- Nemosoft binary pwcx module. Caveat emptor.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-/* This is pwc-ioctl.h belonging to PWC 10.0.10
- It contains structures and defines to communicate from user space
- directly to the driver.
- */
-
-/*
- Changes
- 2001/08/03 Alvarado Added ioctl constants to access methods for
- changing white balance and red/blue gains
- 2002/12/15 G. H. Fernandez-Toribio VIDIOCGREALSIZE
- 2003/12/13 Nemosft Unv. Some modifications to make interfacing to
- PWCX easier
- */
-
-/* These are private ioctl() commands, specific for the Philips webcams.
- They contain functions not found in other webcams, and settings not
- specified in the Video4Linux API.
-
- The #define names are built up like follows:
- VIDIOC VIDeo IOCtl prefix
- PWC Philps WebCam
- G optional: Get
- S optional: Set
- ... the function
- */
-
-#include <linux/types.h>
-#include <linux/version.h>
-
- /* Enumeration of image sizes */
-#define PSZ_SQCIF 0x00
-#define PSZ_QSIF 0x01
-#define PSZ_QCIF 0x02
-#define PSZ_SIF 0x03
-#define PSZ_CIF 0x04
-#define PSZ_VGA 0x05
-#define PSZ_MAX 6
-
-
-/* The frame rate is encoded in the video_window.flags parameter using
- the upper 16 bits, since some flags are defined nowadays. The following
- defines provide a mask and shift to filter out this value.
- This value can also be passing using the private flag when using v4l2 and
- VIDIOC_S_FMT ioctl.
-
- In 'Snapshot' mode the camera freezes its automatic exposure and colour
- balance controls.
- */
-#define PWC_FPS_SHIFT 16
-#define PWC_FPS_MASK 0x00FF0000
-#define PWC_FPS_FRMASK 0x003F0000
-#define PWC_FPS_SNAPSHOT 0x00400000
-#define PWC_QLT_MASK 0x03000000
-#define PWC_QLT_SHIFT 24
-
-
-/* structure for transferring x & y coordinates */
-struct pwc_coord
-{
- int x, y; /* guess what */
- int size; /* size, or offset */
-};
-
-
-/* Used with VIDIOCPWCPROBE */
-struct pwc_probe
-{
- char name[32];
- int type;
-};
-
-struct pwc_serial
-{
- char serial[30]; /* String with serial number. Contains terminating 0 */
-};
-
-/* pwc_whitebalance.mode values */
-#define PWC_WB_INDOOR 0
-#define PWC_WB_OUTDOOR 1
-#define PWC_WB_FL 2
-#define PWC_WB_MANUAL 3
-#define PWC_WB_AUTO 4
-
-/* Used with VIDIOCPWC[SG]AWB (Auto White Balance).
- Set mode to one of the PWC_WB_* values above.
- *red and *blue are the respective gains of these colour components inside
- the camera; range 0..65535
- When 'mode' == PWC_WB_MANUAL, 'manual_red' and 'manual_blue' are set or read;
- otherwise undefined.
- 'read_red' and 'read_blue' are read-only.
-*/
-struct pwc_whitebalance
-{
- int mode;
- int manual_red, manual_blue; /* R/W */
- int read_red, read_blue; /* R/O */
-};
-
-/*
- 'control_speed' and 'control_delay' are used in automatic whitebalance mode,
- and tell the camera how fast it should react to changes in lighting, and
- with how much delay. Valid values are 0..65535.
-*/
-struct pwc_wb_speed
-{
- int control_speed;
- int control_delay;
-
-};
-
-/* Used with VIDIOCPWC[SG]LED */
-struct pwc_leds
-{
- int led_on; /* Led on-time; range = 0..25000 */
- int led_off; /* Led off-time; range = 0..25000 */
-};
-
-/* Image size (used with GREALSIZE) */
-struct pwc_imagesize
-{
- int width;
- int height;
-};
-
-/* Defines and structures for Motorized Pan & Tilt */
-#define PWC_MPT_PAN 0x01
-#define PWC_MPT_TILT 0x02
-#define PWC_MPT_TIMEOUT 0x04 /* for status */
-
-/* Set angles; when absolute != 0, the angle is absolute and the
- driver calculates the relative offset for you. This can only
- be used with VIDIOCPWCSANGLE; VIDIOCPWCGANGLE always returns
- absolute angles.
- */
-struct pwc_mpt_angles
-{
- int absolute; /* write-only */
- int pan; /* degrees * 100 */
- int tilt; /* degress * 100 */
-};
-
-/* Range of angles of the camera, both horizontally and vertically.
- */
-struct pwc_mpt_range
-{
- int pan_min, pan_max; /* degrees * 100 */
- int tilt_min, tilt_max;
-};
-
-struct pwc_mpt_status
-{
- int status;
- int time_pan;
- int time_tilt;
-};
-
-
-/* This is used for out-of-kernel decompression. With it, you can get
- all the necessary information to initialize and use the decompressor
- routines in standalone applications.
- */
-struct pwc_video_command
-{
- int type; /* camera type (645, 675, 730, etc.) */
- int release; /* release number */
-
- int size; /* one of PSZ_* */
- int alternate;
- int command_len; /* length of USB video command */
- unsigned char command_buf[13]; /* Actual USB video command */
- int bandlength; /* >0 = compressed */
- int frame_size; /* Size of one (un)compressed frame */
-};
-
-/* Flags for PWCX subroutines. Not all modules honour all flags. */
-#define PWCX_FLAG_PLANAR 0x0001
-#define PWCX_FLAG_BAYER 0x0008
-
-
-/* IOCTL definitions */
-
- /* Restore user settings */
-#define VIDIOCPWCRUSER _IO('v', 192)
- /* Save user settings */
-#define VIDIOCPWCSUSER _IO('v', 193)
- /* Restore factory settings */
-#define VIDIOCPWCFACTORY _IO('v', 194)
-
- /* You can manipulate the compression factor. A compression preference of 0
- means use uncompressed modes when available; 1 is low compression, 2 is
- medium and 3 is high compression preferred. Of course, the higher the
- compression, the lower the bandwidth used but more chance of artefacts
- in the image. The driver automatically chooses a higher compression when
- the preferred mode is not available.
- */
- /* Set preferred compression quality (0 = uncompressed, 3 = highest compression) */
-#define VIDIOCPWCSCQUAL _IOW('v', 195, int)
- /* Get preferred compression quality */
-#define VIDIOCPWCGCQUAL _IOR('v', 195, int)
-
-
-/* Retrieve serial number of camera */
-#define VIDIOCPWCGSERIAL _IOR('v', 198, struct pwc_serial)
-
- /* This is a probe function; since so many devices are supported, it
- becomes difficult to include all the names in programs that want to
- check for the enhanced Philips stuff. So in stead, try this PROBE;
- it returns a structure with the original name, and the corresponding
- Philips type.
- To use, fill the structure with zeroes, call PROBE and if that succeeds,
- compare the name with that returned from VIDIOCGCAP; they should be the
- same. If so, you can be assured it is a Philips (OEM) cam and the type
- is valid.
- */
-#define VIDIOCPWCPROBE _IOR('v', 199, struct pwc_probe)
-
- /* Set AGC (Automatic Gain Control); int < 0 = auto, 0..65535 = fixed */
-#define VIDIOCPWCSAGC _IOW('v', 200, int)
- /* Get AGC; int < 0 = auto; >= 0 = fixed, range 0..65535 */
-#define VIDIOCPWCGAGC _IOR('v', 200, int)
- /* Set shutter speed; int < 0 = auto; >= 0 = fixed, range 0..65535 */
-#define VIDIOCPWCSSHUTTER _IOW('v', 201, int)
-
- /* Color compensation (Auto White Balance) */
-#define VIDIOCPWCSAWB _IOW('v', 202, struct pwc_whitebalance)
-#define VIDIOCPWCGAWB _IOR('v', 202, struct pwc_whitebalance)
-
- /* Auto WB speed */
-#define VIDIOCPWCSAWBSPEED _IOW('v', 203, struct pwc_wb_speed)
-#define VIDIOCPWCGAWBSPEED _IOR('v', 203, struct pwc_wb_speed)
-
- /* LEDs on/off/blink; int range 0..65535 */
-#define VIDIOCPWCSLED _IOW('v', 205, struct pwc_leds)
-#define VIDIOCPWCGLED _IOR('v', 205, struct pwc_leds)
-
- /* Contour (sharpness); int < 0 = auto, 0..65536 = fixed */
-#define VIDIOCPWCSCONTOUR _IOW('v', 206, int)
-#define VIDIOCPWCGCONTOUR _IOR('v', 206, int)
-
- /* Backlight compensation; 0 = off, otherwise on */
-#define VIDIOCPWCSBACKLIGHT _IOW('v', 207, int)
-#define VIDIOCPWCGBACKLIGHT _IOR('v', 207, int)
-
- /* Flickerless mode; = 0 off, otherwise on */
-#define VIDIOCPWCSFLICKER _IOW('v', 208, int)
-#define VIDIOCPWCGFLICKER _IOR('v', 208, int)
-
- /* Dynamic noise reduction; 0 off, 3 = high noise reduction */
-#define VIDIOCPWCSDYNNOISE _IOW('v', 209, int)
-#define VIDIOCPWCGDYNNOISE _IOR('v', 209, int)
-
- /* Real image size as used by the camera; tells you whether or not there's a gray border around the image */
-#define VIDIOCPWCGREALSIZE _IOR('v', 210, struct pwc_imagesize)
-
- /* Motorized pan & tilt functions */
-#define VIDIOCPWCMPTRESET _IOW('v', 211, int)
-#define VIDIOCPWCMPTGRANGE _IOR('v', 211, struct pwc_mpt_range)
-#define VIDIOCPWCMPTSANGLE _IOW('v', 212, struct pwc_mpt_angles)
-#define VIDIOCPWCMPTGANGLE _IOR('v', 212, struct pwc_mpt_angles)
-#define VIDIOCPWCMPTSTATUS _IOR('v', 213, struct pwc_mpt_status)
-
- /* Get the USB set-video command; needed for initializing libpwcx */
-#define VIDIOCPWCGVIDCMD _IOR('v', 215, struct pwc_video_command)
-struct pwc_table_init_buffer {
- int len;
- char *buffer;
-
-};
-#define VIDIOCPWCGVIDTABLE _IOR('v', 216, struct pwc_table_init_buffer)
-
-/*
- * This is private command used when communicating with v4l2.
- * In the future all private ioctl will be remove/replace to
- * use interface offer by v4l2.
- */
-
-#define V4L2_CID_PRIVATE_SAVE_USER (V4L2_CID_PRIVATE_BASE + 0)
-#define V4L2_CID_PRIVATE_RESTORE_USER (V4L2_CID_PRIVATE_BASE + 1)
-#define V4L2_CID_PRIVATE_RESTORE_FACTORY (V4L2_CID_PRIVATE_BASE + 2)
-#define V4L2_CID_PRIVATE_COLOUR_MODE (V4L2_CID_PRIVATE_BASE + 3)
-#define V4L2_CID_PRIVATE_AUTOCONTOUR (V4L2_CID_PRIVATE_BASE + 4)
-#define V4L2_CID_PRIVATE_CONTOUR (V4L2_CID_PRIVATE_BASE + 5)
-#define V4L2_CID_PRIVATE_BACKLIGHT (V4L2_CID_PRIVATE_BASE + 6)
-#define V4L2_CID_PRIVATE_FLICKERLESS (V4L2_CID_PRIVATE_BASE + 7)
-#define V4L2_CID_PRIVATE_NOISE_REDUCTION (V4L2_CID_PRIVATE_BASE + 8)
-
-struct pwc_raw_frame {
- __le16 type; /* type of the webcam */
- __le16 vbandlength; /* Size of 4lines compressed (used by the decompressor) */
- __u8 cmd[4]; /* the four byte of the command (in case of nala,
- only the first 3 bytes is filled) */
- __u8 rawframe[0]; /* frame_size = H/4*vbandlength */
-} __attribute__ ((packed));
-
-
-#endif
diff --git a/drivers/media/video/pwc/pwc-kiara.c b/drivers/media/video/pwc/pwc-kiara.c
index f4ae83c0cf2..e5f4fd81712 100644
--- a/drivers/media/video/pwc/pwc-kiara.c
+++ b/drivers/media/video/pwc/pwc-kiara.c
@@ -40,7 +40,6 @@
#include "pwc-kiara.h"
-#include "pwc-uncompress.h"
const unsigned int Kiara_fps_vector[PWC_FPS_MAX_KIARA] = { 5, 10, 15, 20, 25, 30 };
diff --git a/drivers/media/video/pwc/pwc-misc.c b/drivers/media/video/pwc/pwc-misc.c
index 6af5bb53835..0b031336eab 100644
--- a/drivers/media/video/pwc/pwc-misc.c
+++ b/drivers/media/video/pwc/pwc-misc.c
@@ -126,8 +126,4 @@ void pwc_construct(struct pwc_device *pdev)
pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
pdev->view_min.size = pdev->view_min.x * pdev->view_min.y;
pdev->view_max.size = pdev->view_max.x * pdev->view_max.y;
- /* length of image, in YUV format; always allocate enough memory. */
- pdev->len_per_image = PAGE_ALIGN((pdev->abs_max.x * pdev->abs_max.y * 3) / 2);
}
-
-
diff --git a/drivers/media/video/pwc/pwc-uncompress.c b/drivers/media/video/pwc/pwc-uncompress.c
index 3b73f295f03..51265092bd3 100644
--- a/drivers/media/video/pwc/pwc-uncompress.c
+++ b/drivers/media/video/pwc/pwc-uncompress.c
@@ -30,26 +30,17 @@
#include <asm/types.h>
#include "pwc.h"
-#include "pwc-uncompress.h"
#include "pwc-dec1.h"
#include "pwc-dec23.h"
-int pwc_decompress(struct pwc_device *pdev)
+int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf)
{
- struct pwc_frame_buf *fbuf;
int n, line, col, stride;
void *yuv, *image;
u16 *src;
u16 *dsty, *dstu, *dstv;
- if (pdev == NULL)
- return -EFAULT;
-
- fbuf = pdev->read_frame;
- if (fbuf == NULL)
- return -EFAULT;
- image = pdev->image_data;
- image += pdev->images[pdev->fill_image].offset;
+ image = vb2_plane_vaddr(&fbuf->vb, 0);
yuv = fbuf->data + pdev->frame_header_size; /* Skip header */
@@ -64,9 +55,13 @@ int pwc_decompress(struct pwc_device *pdev)
* determine this using the type of the webcam */
memcpy(raw_frame->cmd, pdev->cmd_buf, 4);
memcpy(raw_frame+1, yuv, pdev->frame_size);
+ vb2_set_plane_payload(&fbuf->vb, 0,
+ pdev->frame_size + sizeof(struct pwc_raw_frame));
return 0;
}
+ vb2_set_plane_payload(&fbuf->vb, 0, pdev->view.size);
+
if (pdev->vbandlength == 0) {
/* Uncompressed mode.
* We copy the data into the output buffer, using the viewport
diff --git a/drivers/media/video/pwc/pwc-uncompress.h b/drivers/media/video/pwc/pwc-uncompress.h
deleted file mode 100644
index 43028e74e9e..00000000000
--- a/drivers/media/video/pwc/pwc-uncompress.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* (C) 1999-2003 Nemosoft Unv.
- (C) 2004-2006 Luc Saillard (luc@saillard.org)
-
- NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
- driver and thus may have bugs that are not present in the original version.
- Please send bug reports and support requests to <luc@saillard.org>.
- The decompression routines have been implemented by reverse-engineering the
- Nemosoft binary pwcx module. Caveat emptor.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-/* This file is the bridge between the kernel module and the plugin; it
- describes the structures and datatypes used in both modules. Any
- significant change should be reflected by increasing the
- pwc_decompressor_version major number.
- */
-#ifndef PWC_UNCOMPRESS_H
-#define PWC_UNCOMPRESS_H
-
-
-#include <media/pwc-ioctl.h>
-
-/* from pwc-dec.h */
-#define PWCX_FLAG_PLANAR 0x0001
-/* */
-
-#endif
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index f85c51249c7..e9a0e94b999 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -2,6 +2,7 @@
USB and Video4Linux interface part.
(C) 1999-2004 Nemosoft Unv.
(C) 2004-2006 Luc Saillard (luc@saillard.org)
+ (C) 2011 Hans de Goede <hdegoede@redhat.com>
NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
driver and thus may have bugs that are not present in the original version.
@@ -31,184 +32,330 @@
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
#include <asm/io.h>
#include "pwc.h"
-static struct v4l2_queryctrl pwc_controls[] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 128,
- .step = 1,
- .default_value = 64,
- },
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 64,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = -100,
- .maximum = 100,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_GAMMA,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = 0,
- .maximum = 32,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Gain",
- .minimum = 0,
- .maximum = 256,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Gain",
- .minimum = 0,
- .maximum = 256,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto White Balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Shutter Speed (Exposure)",
- .minimum = 0,
- .maximum = 256,
- .step = 1,
- .default_value = 200,
- },
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain Enabled",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain Level",
- .minimum = 0,
- .maximum = 256,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_SAVE_USER,
- .type = V4L2_CTRL_TYPE_BUTTON,
- .name = "Save User Settings",
- .minimum = 0,
- .maximum = 0,
- .step = 0,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_RESTORE_USER,
- .type = V4L2_CTRL_TYPE_BUTTON,
- .name = "Restore User Settings",
- .minimum = 0,
- .maximum = 0,
- .step = 0,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_RESTORE_FACTORY,
- .type = V4L2_CTRL_TYPE_BUTTON,
- .name = "Restore Factory Settings",
- .minimum = 0,
- .maximum = 0,
- .step = 0,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_COLOUR_MODE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Colour mode",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_AUTOCONTOUR,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto contour",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_CONTOUR,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contour",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_BACKLIGHT,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Backlight compensation",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_FLICKERLESS,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Flickerless",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- {
- .id = V4L2_CID_PRIVATE_NOISE_REDUCTION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Noise reduction",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 0,
- },
+#define PWC_CID_CUSTOM(ctrl) ((V4L2_CID_USER_BASE | 0xf000) + custom_ ## ctrl)
+
+static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl);
+static int pwc_s_ctrl(struct v4l2_ctrl *ctrl);
+
+static const struct v4l2_ctrl_ops pwc_ctrl_ops = {
+ .g_volatile_ctrl = pwc_g_volatile_ctrl,
+ .s_ctrl = pwc_s_ctrl,
+};
+
+enum { awb_indoor, awb_outdoor, awb_fl, awb_manual, awb_auto };
+enum { custom_autocontour, custom_contour, custom_noise_reduction,
+ custom_save_user, custom_restore_user, custom_restore_factory };
+
+const char * const pwc_auto_whitebal_qmenu[] = {
+ "Indoor (Incandescant Lighting) Mode",
+ "Outdoor (Sunlight) Mode",
+ "Indoor (Fluorescent Lighting) Mode",
+ "Manual Mode",
+ "Auto Mode",
+ NULL
+};
+
+static const struct v4l2_ctrl_config pwc_auto_white_balance_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = V4L2_CID_AUTO_WHITE_BALANCE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = awb_auto,
+ .qmenu = pwc_auto_whitebal_qmenu,
+};
+
+static const struct v4l2_ctrl_config pwc_autocontour_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(autocontour),
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Auto contour",
+ .min = 0,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config pwc_contour_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(contour),
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contour",
+ .min = 0,
+ .max = 63,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config pwc_backlight_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = V4L2_CID_BACKLIGHT_COMPENSATION,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config pwc_flicker_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = V4L2_CID_BAND_STOP_FILTER,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config pwc_noise_reduction_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(noise_reduction),
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Dynamic Noise Reduction",
+ .min = 0,
+ .max = 3,
+ .step = 1,
+};
+
+static const struct v4l2_ctrl_config pwc_save_user_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(save_user),
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .name = "Save User Settings",
};
+static const struct v4l2_ctrl_config pwc_restore_user_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(restore_user),
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .name = "Restore User Settings",
+};
+
+static const struct v4l2_ctrl_config pwc_restore_factory_cfg = {
+ .ops = &pwc_ctrl_ops,
+ .id = PWC_CID_CUSTOM(restore_factory),
+ .type = V4L2_CTRL_TYPE_BUTTON,
+ .name = "Restore Factory Settings",
+};
+
+int pwc_init_controls(struct pwc_device *pdev)
+{
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_config cfg;
+ int r, def;
+
+ hdl = &pdev->ctrl_handler;
+ r = v4l2_ctrl_handler_init(hdl, 20);
+ if (r)
+ return r;
+
+ /* Brightness, contrast, saturation, gamma */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, BRIGHTNESS_FORMATTER, &def);
+ if (r || def > 127)
+ def = 63;
+ pdev->brightness = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 127, 1, def);
+
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, CONTRAST_FORMATTER, &def);
+ if (r || def > 63)
+ def = 31;
+ pdev->contrast = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 63, 1, def);
+
+ if (pdev->type >= 675) {
+ if (pdev->type < 730)
+ pdev->saturation_fmt = SATURATION_MODE_FORMATTER2;
+ else
+ pdev->saturation_fmt = SATURATION_MODE_FORMATTER1;
+ r = pwc_get_s8_ctrl(pdev, GET_CHROM_CTL, pdev->saturation_fmt,
+ &def);
+ if (r || def < -100 || def > 100)
+ def = 0;
+ pdev->saturation = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_SATURATION, -100, 100, 1, def);
+ }
+
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, GAMMA_FORMATTER, &def);
+ if (r || def > 31)
+ def = 15;
+ pdev->gamma = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_GAMMA, 0, 31, 1, def);
+
+ /* auto white balance, red gain, blue gain */
+ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, WB_MODE_FORMATTER, &def);
+ if (r || def > awb_auto)
+ def = awb_auto;
+ cfg = pwc_auto_white_balance_cfg;
+ cfg.name = v4l2_ctrl_get_name(cfg.id);
+ cfg.def = def;
+ pdev->auto_white_balance = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+ /* check auto controls to avoid NULL deref in v4l2_ctrl_auto_cluster */
+ if (!pdev->auto_white_balance)
+ return hdl->error;
+
+ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL,
+ PRESET_MANUAL_RED_GAIN_FORMATTER, &def);
+ if (r)
+ def = 127;
+ pdev->red_balance = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 255, 1, def);
+
+ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL,
+ PRESET_MANUAL_BLUE_GAIN_FORMATTER, &def);
+ if (r)
+ def = 127;
+ pdev->blue_balance = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, 0, 255, 1, def);
+
+ v4l2_ctrl_auto_cluster(3, &pdev->auto_white_balance, awb_manual,
+ pdev->auto_white_balance->cur.val == awb_auto);
+
+ /* autogain, gain */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, AGC_MODE_FORMATTER, &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0;
+ /* Note a register value if 0 means auto gain is on */
+ pdev->autogain = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, def == 0);
+ if (!pdev->autogain)
+ return hdl->error;
+
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, PRESET_AGC_FORMATTER, &def);
+ if (r || def > 63)
+ def = 31;
+ pdev->gain = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_GAIN, 0, 63, 1, def);
+
+ /* auto exposure, exposure */
+ if (DEVICE_USE_CODEC2(pdev->type)) {
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, SHUTTER_MODE_FORMATTER,
+ &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0;
+ /*
+ * def = 0 auto, def = ff manual
+ * menu idx 0 = auto, idx 1 = manual
+ */
+ pdev->exposure_auto = v4l2_ctrl_new_std_menu(hdl,
+ &pwc_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ 1, 0, def != 0);
+ if (!pdev->exposure_auto)
+ return hdl->error;
+
+ /* GET_LUM_CTL, PRESET_SHUTTER_FORMATTER is unreliable */
+ r = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
+ READ_SHUTTER_FORMATTER, &def);
+ if (r || def > 655)
+ def = 655;
+ pdev->exposure = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 655, 1, def);
+ /* CODEC2: separate auto gain & auto exposure */
+ v4l2_ctrl_auto_cluster(2, &pdev->autogain, 0, true);
+ v4l2_ctrl_auto_cluster(2, &pdev->exposure_auto,
+ V4L2_EXPOSURE_MANUAL, true);
+ } else if (DEVICE_USE_CODEC3(pdev->type)) {
+ /* GET_LUM_CTL, PRESET_SHUTTER_FORMATTER is unreliable */
+ r = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
+ READ_SHUTTER_FORMATTER, &def);
+ if (r || def > 255)
+ def = 255;
+ pdev->exposure = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 255, 1, def);
+ /* CODEC3: both gain and exposure controlled by autogain */
+ pdev->autogain_expo_cluster[0] = pdev->autogain;
+ pdev->autogain_expo_cluster[1] = pdev->gain;
+ pdev->autogain_expo_cluster[2] = pdev->exposure;
+ v4l2_ctrl_auto_cluster(3, pdev->autogain_expo_cluster,
+ 0, true);
+ }
+
+ /* color / bw setting */
+ r = pwc_get_u8_ctrl(pdev, GET_CHROM_CTL, COLOUR_MODE_FORMATTER,
+ &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0xff;
+ /* def = 0 bw, def = ff color, menu idx 0 = color, idx 1 = bw */
+ pdev->colorfx = v4l2_ctrl_new_std_menu(hdl, &pwc_ctrl_ops,
+ V4L2_CID_COLORFX, 1, 0, def == 0);
+
+ /* autocontour, contour */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0;
+ cfg = pwc_autocontour_cfg;
+ cfg.def = def == 0;
+ pdev->autocontour = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+ if (!pdev->autocontour)
+ return hdl->error;
+
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL, PRESET_CONTOUR_FORMATTER, &def);
+ if (r || def > 63)
+ def = 31;
+ cfg = pwc_contour_cfg;
+ cfg.def = def;
+ pdev->contour = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+
+ v4l2_ctrl_auto_cluster(2, &pdev->autocontour, 0, false);
+
+ /* backlight */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL,
+ BACK_LIGHT_COMPENSATION_FORMATTER, &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0;
+ cfg = pwc_backlight_cfg;
+ cfg.name = v4l2_ctrl_get_name(cfg.id);
+ cfg.def = def == 0;
+ pdev->backlight = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+
+ /* flikker rediction */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL,
+ FLICKERLESS_MODE_FORMATTER, &def);
+ if (r || (def != 0 && def != 0xff))
+ def = 0;
+ cfg = pwc_flicker_cfg;
+ cfg.name = v4l2_ctrl_get_name(cfg.id);
+ cfg.def = def == 0;
+ pdev->flicker = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+
+ /* Dynamic noise reduction */
+ r = pwc_get_u8_ctrl(pdev, GET_LUM_CTL,
+ DYNAMIC_NOISE_CONTROL_FORMATTER, &def);
+ if (r || def > 3)
+ def = 2;
+ cfg = pwc_noise_reduction_cfg;
+ cfg.def = def;
+ pdev->noise_reduction = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+
+ /* Save / Restore User / Factory Settings */
+ pdev->save_user = v4l2_ctrl_new_custom(hdl, &pwc_save_user_cfg, NULL);
+ pdev->restore_user = v4l2_ctrl_new_custom(hdl, &pwc_restore_user_cfg,
+ NULL);
+ if (pdev->restore_user)
+ pdev->restore_user->flags = V4L2_CTRL_FLAG_UPDATE;
+ pdev->restore_factory = v4l2_ctrl_new_custom(hdl,
+ &pwc_restore_factory_cfg,
+ NULL);
+ if (pdev->restore_factory)
+ pdev->restore_factory->flags = V4L2_CTRL_FLAG_UPDATE;
+
+ if (!pdev->features & FEATURE_MOTOR_PANTILT)
+ return hdl->error;
+
+ /* Motor pan / tilt / reset */
+ pdev->motor_pan = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_PAN_RELATIVE, -4480, 4480, 64, 0);
+ if (!pdev->motor_pan)
+ return hdl->error;
+ pdev->motor_tilt = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_TILT_RELATIVE, -1920, 1920, 64, 0);
+ pdev->motor_pan_reset = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_PAN_RESET, 0, 0, 0, 0);
+ pdev->motor_tilt_reset = v4l2_ctrl_new_std(hdl, &pwc_ctrl_ops,
+ V4L2_CID_TILT_RESET, 0, 0, 0, 0);
+ v4l2_ctrl_cluster(4, &pdev->motor_pan);
+
+ return hdl->error;
+}
static void pwc_vidioc_fill_fmt(const struct pwc_device *pdev, struct v4l2_format *f)
{
@@ -284,10 +431,21 @@ static int pwc_vidioc_try_fmt(struct pwc_device *pdev, struct v4l2_format *f)
}
/* ioctl(VIDIOC_SET_FMT) */
-static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
+
+static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
{
+ struct pwc_device *pdev = video_drvdata(file);
int ret, fps, snapshot, compression, pixelformat;
+ if (!pdev->udev)
+ return -ENODEV;
+
+ if (pdev->capt_file != NULL &&
+ pdev->capt_file != file)
+ return -EBUSY;
+
+ pdev->capt_file = file;
+
ret = pwc_vidioc_try_fmt(pdev, f);
if (ret<0)
return ret;
@@ -309,7 +467,7 @@ static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
pixelformat != V4L2_PIX_FMT_PWC2)
return -EINVAL;
- if (pdev->iso_init)
+ if (vb2_is_streaming(&pdev->vb_queue))
return -EBUSY;
PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d "
@@ -343,13 +501,14 @@ static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
static int pwc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
- struct video_device *vdev = video_devdata(file);
struct pwc_device *pdev = video_drvdata(file);
+ if (!pdev->udev)
+ return -ENODEV;
+
strcpy(cap->driver, PWC_NAME);
- strlcpy(cap->card, vdev->name, sizeof(cap->card));
+ strlcpy(cap->card, pdev->vdev.name, sizeof(cap->card));
usb_make_path(pdev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = PWC_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING |
@@ -377,255 +536,396 @@ static int pwc_s_input(struct file *file, void *fh, unsigned int i)
return i ? -EINVAL : 0;
}
-static int pwc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *c)
+static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
- int i, idx;
- u32 id;
-
- id = c->id;
- if (id & V4L2_CTRL_FLAG_NEXT_CTRL) {
- id &= V4L2_CTRL_ID_MASK;
- id++;
- idx = -1;
- for (i = 0; i < ARRAY_SIZE(pwc_controls); i++) {
- if (pwc_controls[i].id < id)
- continue;
- if (idx >= 0
- && pwc_controls[i].id > pwc_controls[idx].id)
- continue;
- idx = i;
+ struct pwc_device *pdev =
+ container_of(ctrl->handler, struct pwc_device, ctrl_handler);
+ int ret = 0;
+
+ /*
+ * Sometimes it can take quite long for the pwc to complete usb control
+ * transfers, so release the modlock to give streaming by another
+ * process / thread the chance to continue with a dqbuf.
+ */
+ mutex_unlock(&pdev->modlock);
+
+ /*
+ * Take the udev-lock to protect against the disconnect handler
+ * completing and setting dev->udev to NULL underneath us. Other code
+ * does not need to do this since it is protected by the modlock.
+ */
+ mutex_lock(&pdev->udevlock);
+
+ if (!pdev->udev) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ if (pdev->color_bal_valid && time_before(jiffies,
+ pdev->last_color_bal_update + HZ / 4)) {
+ pdev->red_balance->val = pdev->last_red_balance;
+ pdev->blue_balance->val = pdev->last_blue_balance;
+ break;
}
- if (idx < 0)
- return -EINVAL;
- memcpy(c, &pwc_controls[idx], sizeof pwc_controls[0]);
- return 0;
+ ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_RED_GAIN_FORMATTER,
+ &pdev->red_balance->val);
+ if (ret)
+ break;
+ ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_BLUE_GAIN_FORMATTER,
+ &pdev->blue_balance->val);
+ if (ret)
+ break;
+ pdev->last_red_balance = pdev->red_balance->val;
+ pdev->last_blue_balance = pdev->blue_balance->val;
+ pdev->last_color_bal_update = jiffies;
+ pdev->color_bal_valid = true;
+ break;
+ case V4L2_CID_AUTOGAIN:
+ if (pdev->gain_valid && time_before(jiffies,
+ pdev->last_gain_update + HZ / 4)) {
+ pdev->gain->val = pdev->last_gain;
+ break;
+ }
+ ret = pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_AGC_FORMATTER, &pdev->gain->val);
+ if (ret)
+ break;
+ pdev->last_gain = pdev->gain->val;
+ pdev->last_gain_update = jiffies;
+ pdev->gain_valid = true;
+ if (!DEVICE_USE_CODEC3(pdev->type))
+ break;
+ /* Fall through for CODEC3 where autogain also controls expo */
+ case V4L2_CID_EXPOSURE_AUTO:
+ if (pdev->exposure_valid && time_before(jiffies,
+ pdev->last_exposure_update + HZ / 4)) {
+ pdev->exposure->val = pdev->last_exposure;
+ break;
+ }
+ ret = pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
+ READ_SHUTTER_FORMATTER,
+ &pdev->exposure->val);
+ if (ret)
+ break;
+ pdev->last_exposure = pdev->exposure->val;
+ pdev->last_exposure_update = jiffies;
+ pdev->exposure_valid = true;
+ break;
+ default:
+ ret = -EINVAL;
}
- for (i = 0; i < sizeof(pwc_controls) / sizeof(struct v4l2_queryctrl); i++) {
- if (pwc_controls[i].id == c->id) {
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYCTRL) found\n");
- memcpy(c, &pwc_controls[i], sizeof(struct v4l2_queryctrl));
- return 0;
+
+ if (ret)
+ PWC_ERROR("g_ctrl %s error %d\n", ctrl->name, ret);
+
+leave:
+ mutex_unlock(&pdev->udevlock);
+ mutex_lock(&pdev->modlock);
+ return ret;
+}
+
+static int pwc_set_awb(struct pwc_device *pdev)
+{
+ int ret = 0;
+
+ if (pdev->auto_white_balance->is_new) {
+ ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
+ WB_MODE_FORMATTER,
+ pdev->auto_white_balance->val);
+ if (ret)
+ return ret;
+
+ /* Update val when coming from auto or going to a preset */
+ if (pdev->red_balance->is_volatile ||
+ pdev->auto_white_balance->val == awb_indoor ||
+ pdev->auto_white_balance->val == awb_outdoor ||
+ pdev->auto_white_balance->val == awb_fl) {
+ if (!pdev->red_balance->is_new)
+ pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_RED_GAIN_FORMATTER,
+ &pdev->red_balance->val);
+ if (!pdev->blue_balance->is_new)
+ pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_BLUE_GAIN_FORMATTER,
+ &pdev->blue_balance->val);
+ }
+ if (pdev->auto_white_balance->val == awb_auto) {
+ pdev->red_balance->is_volatile = true;
+ pdev->blue_balance->is_volatile = true;
+ pdev->color_bal_valid = false; /* Force cache update */
+ } else {
+ pdev->red_balance->is_volatile = false;
+ pdev->blue_balance->is_volatile = false;
}
}
- return -EINVAL;
+
+ if (ret == 0 && pdev->red_balance->is_new) {
+ if (pdev->auto_white_balance->val != awb_manual)
+ return -EBUSY;
+ ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
+ PRESET_MANUAL_RED_GAIN_FORMATTER,
+ pdev->red_balance->val);
+ }
+
+ if (ret == 0 && pdev->blue_balance->is_new) {
+ if (pdev->auto_white_balance->val != awb_manual)
+ return -EBUSY;
+ ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
+ PRESET_MANUAL_BLUE_GAIN_FORMATTER,
+ pdev->blue_balance->val);
+ }
+ return ret;
}
-static int pwc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
+/* For CODEC2 models which have separate autogain and auto exposure */
+static int pwc_set_autogain(struct pwc_device *pdev)
{
- struct pwc_device *pdev = video_drvdata(file);
- int ret;
+ int ret = 0;
+
+ if (pdev->autogain->is_new) {
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ AGC_MODE_FORMATTER,
+ pdev->autogain->val ? 0 : 0xff);
+ if (ret)
+ return ret;
+ if (pdev->autogain->val)
+ pdev->gain_valid = false; /* Force cache update */
+ else if (!pdev->gain->is_new)
+ pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_AGC_FORMATTER,
+ &pdev->gain->val);
+ }
+ if (ret == 0 && pdev->gain->is_new) {
+ if (pdev->autogain->val)
+ return -EBUSY;
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ PRESET_AGC_FORMATTER,
+ pdev->gain->val);
+ }
+ return ret;
+}
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- c->value = pwc_get_brightness(pdev);
- if (c->value < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_CONTRAST:
- c->value = pwc_get_contrast(pdev);
- if (c->value < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_SATURATION:
- ret = pwc_get_saturation(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_GAMMA:
- c->value = pwc_get_gamma(pdev);
- if (c->value < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_RED_BALANCE:
- ret = pwc_get_red_gain(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value >>= 8;
- return 0;
- case V4L2_CID_BLUE_BALANCE:
- ret = pwc_get_blue_gain(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value >>= 8;
- return 0;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = pwc_get_awb(pdev);
- if (ret < 0)
- return -EINVAL;
- c->value = (ret == PWC_WB_MANUAL) ? 0 : 1;
- return 0;
- case V4L2_CID_GAIN:
- ret = pwc_get_agc(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value >>= 8;
- return 0;
- case V4L2_CID_AUTOGAIN:
- ret = pwc_get_agc(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value = (c->value < 0) ? 1 : 0;
- return 0;
- case V4L2_CID_EXPOSURE:
- ret = pwc_get_shutter_speed(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_COLOUR_MODE:
- ret = pwc_get_colour_mode(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_AUTOCONTOUR:
- ret = pwc_get_contour(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value = (c->value == -1 ? 1 : 0);
- return 0;
- case V4L2_CID_PRIVATE_CONTOUR:
- ret = pwc_get_contour(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value >>= 10;
- return 0;
- case V4L2_CID_PRIVATE_BACKLIGHT:
- ret = pwc_get_backlight(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_FLICKERLESS:
- ret = pwc_get_flicker(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- c->value = (c->value ? 1 : 0);
- return 0;
- case V4L2_CID_PRIVATE_NOISE_REDUCTION:
- ret = pwc_get_dynamic_noise(pdev, &c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
+/* For CODEC2 models which have separate autogain and auto exposure */
+static int pwc_set_exposure_auto(struct pwc_device *pdev)
+{
+ int ret = 0;
+ int is_auto = pdev->exposure_auto->val == V4L2_EXPOSURE_AUTO;
+
+ if (pdev->exposure_auto->is_new) {
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ SHUTTER_MODE_FORMATTER,
+ is_auto ? 0 : 0xff);
+ if (ret)
+ return ret;
+ if (is_auto)
+ pdev->exposure_valid = false; /* Force cache update */
+ else if (!pdev->exposure->is_new)
+ pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
+ READ_SHUTTER_FORMATTER,
+ &pdev->exposure->val);
+ }
+ if (ret == 0 && pdev->exposure->is_new) {
+ if (is_auto)
+ return -EBUSY;
+ ret = pwc_set_u16_ctrl(pdev, SET_LUM_CTL,
+ PRESET_SHUTTER_FORMATTER,
+ pdev->exposure->val);
+ }
+ return ret;
+}
- case V4L2_CID_PRIVATE_SAVE_USER:
- case V4L2_CID_PRIVATE_RESTORE_USER:
- case V4L2_CID_PRIVATE_RESTORE_FACTORY:
- return -EINVAL;
+/* For CODEC3 models which have autogain controlling both gain and exposure */
+static int pwc_set_autogain_expo(struct pwc_device *pdev)
+{
+ int ret = 0;
+
+ if (pdev->autogain->is_new) {
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ AGC_MODE_FORMATTER,
+ pdev->autogain->val ? 0 : 0xff);
+ if (ret)
+ return ret;
+ if (pdev->autogain->val) {
+ pdev->gain_valid = false; /* Force cache update */
+ pdev->exposure_valid = false; /* Force cache update */
+ } else {
+ if (!pdev->gain->is_new)
+ pwc_get_u8_ctrl(pdev, GET_STATUS_CTL,
+ READ_AGC_FORMATTER,
+ &pdev->gain->val);
+ if (!pdev->exposure->is_new)
+ pwc_get_u16_ctrl(pdev, GET_STATUS_CTL,
+ READ_SHUTTER_FORMATTER,
+ &pdev->exposure->val);
+ }
}
- return -EINVAL;
+ if (ret == 0 && pdev->gain->is_new) {
+ if (pdev->autogain->val)
+ return -EBUSY;
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ PRESET_AGC_FORMATTER,
+ pdev->gain->val);
+ }
+ if (ret == 0 && pdev->exposure->is_new) {
+ if (pdev->autogain->val)
+ return -EBUSY;
+ ret = pwc_set_u16_ctrl(pdev, SET_LUM_CTL,
+ PRESET_SHUTTER_FORMATTER,
+ pdev->exposure->val);
+ }
+ return ret;
}
-static int pwc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
+static int pwc_set_motor(struct pwc_device *pdev)
{
- struct pwc_device *pdev = video_drvdata(file);
int ret;
+ u8 buf[4];
+
+ buf[0] = 0;
+ if (pdev->motor_pan_reset->is_new)
+ buf[0] |= 0x01;
+ if (pdev->motor_tilt_reset->is_new)
+ buf[0] |= 0x02;
+ if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) {
+ ret = send_control_msg(pdev, SET_MPT_CTL,
+ PT_RESET_CONTROL_FORMATTER, buf, 1);
+ if (ret < 0)
+ return ret;
+ }
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- c->value <<= 9;
- ret = pwc_set_brightness(pdev, c->value);
+ memset(buf, 0, sizeof(buf));
+ if (pdev->motor_pan->is_new) {
+ buf[0] = pdev->motor_pan->val & 0xFF;
+ buf[1] = (pdev->motor_pan->val >> 8);
+ }
+ if (pdev->motor_tilt->is_new) {
+ buf[2] = pdev->motor_tilt->val & 0xFF;
+ buf[3] = (pdev->motor_tilt->val >> 8);
+ }
+ if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) {
+ ret = send_control_msg(pdev, SET_MPT_CTL,
+ PT_RELATIVE_CONTROL_FORMATTER,
+ buf, sizeof(buf));
if (ret < 0)
- return -EINVAL;
- return 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pwc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct pwc_device *pdev =
+ container_of(ctrl->handler, struct pwc_device, ctrl_handler);
+ int ret = 0;
+
+ /* See the comments on locking in pwc_g_volatile_ctrl */
+ mutex_unlock(&pdev->modlock);
+ mutex_lock(&pdev->udevlock);
+
+ if (!pdev->udev) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ BRIGHTNESS_FORMATTER, ctrl->val);
+ break;
case V4L2_CID_CONTRAST:
- c->value <<= 10;
- ret = pwc_set_contrast(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ CONTRAST_FORMATTER, ctrl->val);
+ break;
case V4L2_CID_SATURATION:
- ret = pwc_set_saturation(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
+ ret = pwc_set_s8_ctrl(pdev, SET_CHROM_CTL,
+ pdev->saturation_fmt, ctrl->val);
+ break;
case V4L2_CID_GAMMA:
- c->value <<= 11;
- ret = pwc_set_gamma(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_RED_BALANCE:
- c->value <<= 8;
- ret = pwc_set_red_gain(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_BLUE_BALANCE:
- c->value <<= 8;
- ret = pwc_set_blue_gain(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ GAMMA_FORMATTER, ctrl->val);
+ break;
case V4L2_CID_AUTO_WHITE_BALANCE:
- c->value = (c->value == 0) ? PWC_WB_MANUAL : PWC_WB_AUTO;
- ret = pwc_set_awb(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_EXPOSURE:
- c->value <<= 8;
- ret = pwc_set_shutter_speed(pdev, c->value ? 0 : 1, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
+ ret = pwc_set_awb(pdev);
+ break;
case V4L2_CID_AUTOGAIN:
- /* autogain off means nothing without a gain */
- if (c->value == 0)
- return 0;
- ret = pwc_set_agc(pdev, c->value, 0);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_GAIN:
- c->value <<= 8;
- ret = pwc_set_agc(pdev, 0, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_SAVE_USER:
- if (pwc_save_user(pdev))
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_RESTORE_USER:
- if (pwc_restore_user(pdev))
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_RESTORE_FACTORY:
- if (pwc_restore_factory(pdev))
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_COLOUR_MODE:
- ret = pwc_set_colour_mode(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_AUTOCONTOUR:
- c->value = (c->value == 1) ? -1 : 0;
- ret = pwc_set_contour(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_CONTOUR:
- c->value <<= 10;
- ret = pwc_set_contour(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_BACKLIGHT:
- ret = pwc_set_backlight(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
- case V4L2_CID_PRIVATE_FLICKERLESS:
- ret = pwc_set_flicker(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- case V4L2_CID_PRIVATE_NOISE_REDUCTION:
- ret = pwc_set_dynamic_noise(pdev, c->value);
- if (ret < 0)
- return -EINVAL;
- return 0;
-
+ if (DEVICE_USE_CODEC2(pdev->type))
+ ret = pwc_set_autogain(pdev);
+ else if (DEVICE_USE_CODEC3(pdev->type))
+ ret = pwc_set_autogain_expo(pdev);
+ else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ if (DEVICE_USE_CODEC2(pdev->type))
+ ret = pwc_set_exposure_auto(pdev);
+ else
+ ret = -EINVAL;
+ break;
+ case V4L2_CID_COLORFX:
+ ret = pwc_set_u8_ctrl(pdev, SET_CHROM_CTL,
+ COLOUR_MODE_FORMATTER,
+ ctrl->val ? 0 : 0xff);
+ break;
+ case PWC_CID_CUSTOM(autocontour):
+ if (pdev->autocontour->is_new) {
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ AUTO_CONTOUR_FORMATTER,
+ pdev->autocontour->val ? 0 : 0xff);
+ }
+ if (ret == 0 && pdev->contour->is_new) {
+ if (pdev->autocontour->val) {
+ ret = -EBUSY;
+ break;
+ }
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ PRESET_CONTOUR_FORMATTER,
+ pdev->contour->val);
+ }
+ break;
+ case V4L2_CID_BACKLIGHT_COMPENSATION:
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ BACK_LIGHT_COMPENSATION_FORMATTER,
+ ctrl->val ? 0 : 0xff);
+ break;
+ case V4L2_CID_BAND_STOP_FILTER:
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ FLICKERLESS_MODE_FORMATTER,
+ ctrl->val ? 0 : 0xff);
+ break;
+ case PWC_CID_CUSTOM(noise_reduction):
+ ret = pwc_set_u8_ctrl(pdev, SET_LUM_CTL,
+ DYNAMIC_NOISE_CONTROL_FORMATTER,
+ ctrl->val);
+ break;
+ case PWC_CID_CUSTOM(save_user):
+ ret = pwc_button_ctrl(pdev, SAVE_USER_DEFAULTS_FORMATTER);
+ break;
+ case PWC_CID_CUSTOM(restore_user):
+ ret = pwc_button_ctrl(pdev, RESTORE_USER_DEFAULTS_FORMATTER);
+ break;
+ case PWC_CID_CUSTOM(restore_factory):
+ ret = pwc_button_ctrl(pdev,
+ RESTORE_FACTORY_DEFAULTS_FORMATTER);
+ break;
+ case V4L2_CID_PAN_RELATIVE:
+ ret = pwc_set_motor(pdev);
+ break;
+ default:
+ ret = -EINVAL;
}
- return -EINVAL;
+
+ if (ret)
+ PWC_ERROR("s_ctrl %s error %d\n", ctrl->name, ret);
+
+leave:
+ mutex_unlock(&pdev->udevlock);
+ mutex_lock(&pdev->modlock);
+ return ret;
}
static int pwc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
@@ -667,157 +967,77 @@ static int pwc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *
return pwc_vidioc_try_fmt(pdev, f);
}
-static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
+static int pwc_reqbufs(struct file *file, void *fh,
+ struct v4l2_requestbuffers *rb)
{
struct pwc_device *pdev = video_drvdata(file);
- return pwc_vidioc_set_fmt(pdev, f);
-}
-
-static int pwc_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
-{
- int nbuffers;
+ if (pdev->capt_file != NULL &&
+ pdev->capt_file != file)
+ return -EBUSY;
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_REQBUFS) count=%d\n", rb->count);
- if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (rb->memory != V4L2_MEMORY_MMAP)
- return -EINVAL;
+ pdev->capt_file = file;
- nbuffers = rb->count;
- if (nbuffers < 2)
- nbuffers = 2;
- else if (nbuffers > pwc_mbufs)
- nbuffers = pwc_mbufs;
- /* Force to use our # of buffers */
- rb->count = pwc_mbufs;
- return 0;
+ return vb2_reqbufs(&pdev->vb_queue, rb);
}
static int pwc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct pwc_device *pdev = video_drvdata(file);
- int index;
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) index=%d\n", buf->index);
- if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) Bad type\n");
- return -EINVAL;
- }
- index = buf->index;
- if (index < 0 || index >= pwc_mbufs) {
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) Bad index %d\n", buf->index);
- return -EINVAL;
- }
-
- buf->m.offset = index * pdev->len_per_image;
- if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
- buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
- else
- buf->bytesused = pdev->view.size;
- buf->field = V4L2_FIELD_NONE;
- buf->memory = V4L2_MEMORY_MMAP;
- /*buf->flags = V4L2_BUF_FLAG_MAPPED;*/
- buf->length = pdev->len_per_image;
-
- PWC_DEBUG_READ("VIDIOC_QUERYBUF: index=%d\n", buf->index);
- PWC_DEBUG_READ("VIDIOC_QUERYBUF: m.offset=%d\n", buf->m.offset);
- PWC_DEBUG_READ("VIDIOC_QUERYBUF: bytesused=%d\n", buf->bytesused);
-
- return 0;
+ return vb2_querybuf(&pdev->vb_queue, buf);
}
static int pwc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_QBUF) index=%d\n", buf->index);
- if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (buf->memory != V4L2_MEMORY_MMAP)
- return -EINVAL;
- if (buf->index >= pwc_mbufs)
- return -EINVAL;
+ struct pwc_device *pdev = video_drvdata(file);
- buf->flags |= V4L2_BUF_FLAG_QUEUED;
- buf->flags &= ~V4L2_BUF_FLAG_DONE;
+ if (!pdev->udev)
+ return -ENODEV;
- return 0;
+ if (pdev->capt_file != file)
+ return -EBUSY;
+
+ return vb2_qbuf(&pdev->vb_queue, buf);
}
static int pwc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
- DECLARE_WAITQUEUE(wait, current);
struct pwc_device *pdev = video_drvdata(file);
- int ret;
- PWC_DEBUG_IOCTL("ioctl(VIDIOC_DQBUF)\n");
+ if (!pdev->udev)
+ return -ENODEV;
- if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- add_wait_queue(&pdev->frameq, &wait);
- while (pdev->full_frames == NULL) {
- if (pdev->error_status) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- return -pdev->error_status;
- }
-
- if (signal_pending(current)) {
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
- return -ERESTARTSYS;
- }
- mutex_unlock(&pdev->modlock);
- schedule();
- set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&pdev->modlock);
- }
- remove_wait_queue(&pdev->frameq, &wait);
- set_current_state(TASK_RUNNING);
-
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: frame ready.\n");
- /* Decompress data in pdev->images[pdev->fill_image] */
- ret = pwc_handle_frame(pdev);
- if (ret)
- return -EFAULT;
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: after pwc_handle_frame\n");
-
- buf->index = pdev->fill_image;
- if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
- buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
- else
- buf->bytesused = pdev->view.size;
- buf->flags = V4L2_BUF_FLAG_MAPPED;
- buf->field = V4L2_FIELD_NONE;
- do_gettimeofday(&buf->timestamp);
- buf->sequence = 0;
- buf->memory = V4L2_MEMORY_MMAP;
- buf->m.offset = pdev->fill_image * pdev->len_per_image;
- buf->length = pdev->len_per_image;
- pwc_next_image(pdev);
-
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: buf->index=%d\n", buf->index);
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: buf->length=%d\n", buf->length);
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: m.offset=%d\n", buf->m.offset);
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: bytesused=%d\n", buf->bytesused);
- PWC_DEBUG_IOCTL("VIDIOC_DQBUF: leaving\n");
- return 0;
+ if (pdev->capt_file != file)
+ return -EBUSY;
+ return vb2_dqbuf(&pdev->vb_queue, buf, file->f_flags & O_NONBLOCK);
}
static int pwc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
{
struct pwc_device *pdev = video_drvdata(file);
- return pwc_isoc_init(pdev);
+ if (!pdev->udev)
+ return -ENODEV;
+
+ if (pdev->capt_file != file)
+ return -EBUSY;
+
+ return vb2_streamon(&pdev->vb_queue, i);
}
static int pwc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
{
struct pwc_device *pdev = video_drvdata(file);
- pwc_isoc_cleanup(pdev);
- return 0;
+ if (!pdev->udev)
+ return -ENODEV;
+
+ if (pdev->capt_file != file)
+ return -EBUSY;
+
+ return vb2_streamoff(&pdev->vb_queue, i);
}
static int pwc_enum_framesizes(struct file *file, void *fh,
@@ -896,9 +1116,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
.vidioc_g_fmt_vid_cap = pwc_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = pwc_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = pwc_try_fmt_vid_cap,
- .vidioc_queryctrl = pwc_queryctrl,
- .vidioc_g_ctrl = pwc_g_ctrl,
- .vidioc_s_ctrl = pwc_s_ctrl,
.vidioc_reqbufs = pwc_reqbufs,
.vidioc_querybuf = pwc_querybuf,
.vidioc_qbuf = pwc_qbuf,
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index e947766337d..0e4e2d7b787 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -29,7 +29,6 @@
#include <linux/usb.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
-#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -37,19 +36,16 @@
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-vmalloc.h>
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
#include <linux/input.h>
#endif
-#include "pwc-uncompress.h"
#include <media/pwc-ioctl.h>
/* Version block */
-#define PWC_MAJOR 10
-#define PWC_MINOR 0
-#define PWC_EXTRAMINOR 12
-#define PWC_VERSION_CODE KERNEL_VERSION(PWC_MAJOR,PWC_MINOR,PWC_EXTRAMINOR)
-#define PWC_VERSION "10.0.14"
+#define PWC_VERSION "10.0.15"
#define PWC_NAME "pwc"
#define PFX PWC_NAME ": "
@@ -81,9 +77,9 @@
#define PWC_DEBUG_LEVEL (PWC_DEBUG_LEVEL_MODULE)
#define PWC_DEBUG(level, fmt, args...) do {\
- if ((PWC_DEBUG_LEVEL_ ##level) & pwc_trace) \
- printk(KERN_DEBUG PFX fmt, ##args); \
- } while(0)
+ if ((PWC_DEBUG_LEVEL_ ##level) & pwc_trace) \
+ printk(KERN_DEBUG PFX fmt, ##args); \
+ } while (0)
#define PWC_ERROR(fmt, args...) printk(KERN_ERR PFX fmt, ##args)
#define PWC_WARNING(fmt, args...) printk(KERN_WARNING PFX fmt, ##args)
@@ -110,25 +106,21 @@
#define FEATURE_CODEC1 0x0002
#define FEATURE_CODEC2 0x0004
-/* Turn certain features on/off */
-#define PWC_INT_PIPE 0
-
/* Ignore errors in the first N frames, to allow for startup delays */
#define FRAME_LOWMARK 5
/* Size and number of buffers for the ISO pipe. */
-#define MAX_ISO_BUFS 2
+#define MAX_ISO_BUFS 3
#define ISO_FRAMES_PER_DESC 10
#define ISO_MAX_FRAME_SIZE 960
#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE)
-/* Frame buffers: contains compressed or uncompressed video data. */
-#define MAX_FRAMES 5
/* Maximum size after decompression is 640x480 YUV data, 1.5 * 640 * 480 */
#define PWC_FRAME_SIZE (460800 + TOUCAM_HEADER_SIZE + TOUCAM_TRAILER_SIZE)
-/* Absolute maximum number of buffers available for mmap() */
-#define MAX_IMAGES 10
+/* Absolute minimum and maximum number of buffers available for mmap() */
+#define MIN_FRAMES 2
+#define MAX_FRAMES 16
/* Some macros to quickly find the type of a webcam */
#define DEVICE_USE_CODEC1(x) ((x)<675)
@@ -136,149 +128,221 @@
#define DEVICE_USE_CODEC3(x) ((x)>=700)
#define DEVICE_USE_CODEC23(x) ((x)>=675)
-/* The following structures were based on cpia.h. Why reinvent the wheel? :-) */
-struct pwc_iso_buf
-{
- void *data;
- int length;
- int read;
- struct urb *urb;
-};
+/* from pwc-dec.h */
+#define PWCX_FLAG_PLANAR 0x0001
+
+/* Request types: video */
+#define SET_LUM_CTL 0x01
+#define GET_LUM_CTL 0x02
+#define SET_CHROM_CTL 0x03
+#define GET_CHROM_CTL 0x04
+#define SET_STATUS_CTL 0x05
+#define GET_STATUS_CTL 0x06
+#define SET_EP_STREAM_CTL 0x07
+#define GET_EP_STREAM_CTL 0x08
+#define GET_XX_CTL 0x09
+#define SET_XX_CTL 0x0A
+#define GET_XY_CTL 0x0B
+#define SET_XY_CTL 0x0C
+#define SET_MPT_CTL 0x0D
+#define GET_MPT_CTL 0x0E
+
+/* Selectors for the Luminance controls [GS]ET_LUM_CTL */
+#define AGC_MODE_FORMATTER 0x2000
+#define PRESET_AGC_FORMATTER 0x2100
+#define SHUTTER_MODE_FORMATTER 0x2200
+#define PRESET_SHUTTER_FORMATTER 0x2300
+#define PRESET_CONTOUR_FORMATTER 0x2400
+#define AUTO_CONTOUR_FORMATTER 0x2500
+#define BACK_LIGHT_COMPENSATION_FORMATTER 0x2600
+#define CONTRAST_FORMATTER 0x2700
+#define DYNAMIC_NOISE_CONTROL_FORMATTER 0x2800
+#define FLICKERLESS_MODE_FORMATTER 0x2900
+#define AE_CONTROL_SPEED 0x2A00
+#define BRIGHTNESS_FORMATTER 0x2B00
+#define GAMMA_FORMATTER 0x2C00
+
+/* Selectors for the Chrominance controls [GS]ET_CHROM_CTL */
+#define WB_MODE_FORMATTER 0x1000
+#define AWB_CONTROL_SPEED_FORMATTER 0x1100
+#define AWB_CONTROL_DELAY_FORMATTER 0x1200
+#define PRESET_MANUAL_RED_GAIN_FORMATTER 0x1300
+#define PRESET_MANUAL_BLUE_GAIN_FORMATTER 0x1400
+#define COLOUR_MODE_FORMATTER 0x1500
+#define SATURATION_MODE_FORMATTER1 0x1600
+#define SATURATION_MODE_FORMATTER2 0x1700
+
+/* Selectors for the Status controls [GS]ET_STATUS_CTL */
+#define SAVE_USER_DEFAULTS_FORMATTER 0x0200
+#define RESTORE_USER_DEFAULTS_FORMATTER 0x0300
+#define RESTORE_FACTORY_DEFAULTS_FORMATTER 0x0400
+#define READ_AGC_FORMATTER 0x0500
+#define READ_SHUTTER_FORMATTER 0x0600
+#define READ_RED_GAIN_FORMATTER 0x0700
+#define READ_BLUE_GAIN_FORMATTER 0x0800
+
+/* Formatters for the motorized pan & tilt [GS]ET_MPT_CTL */
+#define PT_RELATIVE_CONTROL_FORMATTER 0x01
+#define PT_RESET_CONTROL_FORMATTER 0x02
+#define PT_STATUS_FORMATTER 0x03
/* intermediate buffers with raw data from the USB cam */
struct pwc_frame_buf
{
- void *data;
- volatile int filled; /* number of bytes filled */
- struct pwc_frame_buf *next; /* list */
-};
-
-/* additionnal informations used when dealing image between kernel and userland */
-struct pwc_imgbuf
-{
- unsigned long offset; /* offset of this buffer in the big array of image_data */
- int vma_use_count; /* count the number of time this memory is mapped */
+ struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
+ struct list_head list;
+ void *data;
+ int filled; /* number of bytes filled */
};
struct pwc_device
{
- struct video_device *vdev;
-
- /* Pointer to our usb_device */
- struct usb_device *udev;
-
- int type; /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
- int release; /* release number */
- int features; /* feature bits */
- char serial[30]; /* serial number (string) */
- int error_status; /* set when something goes wrong with the cam (unplugged, USB errors) */
- int usb_init; /* set when the cam has been initialized over USB */
-
- /*** Video data ***/
- int vopen; /* flag */
- int vendpoint; /* video isoc endpoint */
- int vcinterface; /* video control interface */
- int valternate; /* alternate interface needed */
- int vframes, vsize; /* frames-per-second & size (see PSZ_*) */
- int pixfmt; /* pixelformat: V4L2_PIX_FMT_YUV420 or raw: _PWC1, _PWC2 */
- int vframe_count; /* received frames */
- int vframes_dumped; /* counter for dumped frames */
- int vframes_error; /* frames received in error */
- int vmax_packet_size; /* USB maxpacket size */
- int vlast_packet_size; /* for frame synchronisation */
- int visoc_errors; /* number of contiguous ISOC errors */
- int vcompression; /* desired compression factor */
- int vbandlength; /* compressed band length; 0 is uncompressed */
- char vsnapshot; /* snapshot mode */
- char vsync; /* used by isoc handler */
- char vmirror; /* for ToUCaM series */
- char unplugged;
-
- int cmd_len;
- unsigned char cmd_buf[13];
-
- /* The image acquisition requires 3 to 4 steps:
- 1. data is gathered in short packets from the USB controller
- 2. data is synchronized and packed into a frame buffer
- 3a. in case data is compressed, decompress it directly into image buffer
- 3b. in case data is uncompressed, copy into image buffer with viewport
- 4. data is transferred to the user process
-
- Note that MAX_ISO_BUFS != MAX_FRAMES != MAX_IMAGES....
- We have in effect a back-to-back-double-buffer system.
- */
- /* 1: isoc */
- struct pwc_iso_buf sbuf[MAX_ISO_BUFS];
- char iso_init;
-
- /* 2: frame */
- struct pwc_frame_buf *fbuf; /* all frames */
- struct pwc_frame_buf *empty_frames, *empty_frames_tail; /* all empty frames */
- struct pwc_frame_buf *full_frames, *full_frames_tail; /* all filled frames */
- struct pwc_frame_buf *fill_frame; /* frame currently being filled */
- struct pwc_frame_buf *read_frame; /* frame currently read by user process */
- int frame_header_size, frame_trailer_size;
- int frame_size;
- int frame_total_size; /* including header & trailer */
- int drop_frames;
-
- /* 3: decompression */
- void *decompress_data; /* private data for decompression engine */
-
- /* 4: image */
- /* We have an 'image' and a 'view', where 'image' is the fixed-size image
- as delivered by the camera, and 'view' is the size requested by the
- program. The camera image is centered in this viewport, laced with
- a gray or black border. view_min <= image <= view <= view_max;
- */
- int image_mask; /* bitmask of supported sizes */
- struct pwc_coord view_min, view_max; /* minimum and maximum viewable sizes */
- struct pwc_coord abs_max; /* maximum supported size with compression */
- struct pwc_coord image, view; /* image and viewport size */
- struct pwc_coord offset; /* offset within the viewport */
-
- void *image_data; /* total buffer, which is subdivided into ... */
- struct pwc_imgbuf images[MAX_IMAGES];/* ...several images... */
- int fill_image; /* ...which are rotated. */
- int len_per_image; /* length per image */
- int image_read_pos; /* In case we read data in pieces, keep track of were we are in the imagebuffer */
- int image_used[MAX_IMAGES]; /* For MCAPTURE and SYNC */
-
- struct mutex modlock; /* to prevent races in video_open(), etc */
- spinlock_t ptrlock; /* for manipulating the buffer pointers */
-
- /*** motorized pan/tilt feature */
- struct pwc_mpt_range angle_range;
- int pan_angle; /* in degrees * 100 */
- int tilt_angle; /* absolute angle; 0,0 is home position */
- int snapshot_button_status; /* set to 1 when the user push the button, reset to 0 when this value is read */
+ struct video_device vdev;
+ struct mutex modlock;
+
+ /* Pointer to our usb_device, may be NULL after unplug */
+ struct usb_device *udev;
+ /* Protects the setting of udev to NULL by our disconnect handler */
+ struct mutex udevlock;
+
+ /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
+ int type;
+ int release; /* release number */
+ int features; /* feature bits */
+ char serial[30]; /* serial number (string) */
+
+ /*** Video data ***/
+ struct file *capt_file; /* file doing video capture */
+ int vendpoint; /* video isoc endpoint */
+ int vcinterface; /* video control interface */
+ int valternate; /* alternate interface needed */
+ int vframes, vsize; /* frames-per-second & size (see PSZ_*) */
+ int pixfmt; /* pixelformat: V4L2_PIX_FMT_YUV420 or _PWCX */
+ int vframe_count; /* received frames */
+ int vmax_packet_size; /* USB maxpacket size */
+ int vlast_packet_size; /* for frame synchronisation */
+ int visoc_errors; /* number of contiguous ISOC errors */
+ int vcompression; /* desired compression factor */
+ int vbandlength; /* compressed band length; 0 is uncompressed */
+ char vsnapshot; /* snapshot mode */
+ char vsync; /* used by isoc handler */
+ char vmirror; /* for ToUCaM series */
+ char power_save; /* Do powersaving for this cam */
+
+ int cmd_len;
+ unsigned char cmd_buf[13];
+
+ struct urb *urbs[MAX_ISO_BUFS];
+ char iso_init;
+
+ /* videobuf2 queue and queued buffers list */
+ struct vb2_queue vb_queue;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock;
+
+ /*
+ * Frame currently being filled, this only gets touched by the
+ * isoc urb complete handler, and by stream start / stop since
+ * start / stop touch it before / after starting / killing the urbs
+ * no locking is needed around this
+ */
+ struct pwc_frame_buf *fill_buf;
+
+ int frame_header_size, frame_trailer_size;
+ int frame_size;
+ int frame_total_size; /* including header & trailer */
+ int drop_frames;
+
+ void *decompress_data; /* private data for decompression engine */
+
+ /*
+ * We have an 'image' and a 'view', where 'image' is the fixed-size img
+ * as delivered by the camera, and 'view' is the size requested by the
+ * program. The camera image is centered in this viewport, laced with
+ * a gray or black border. view_min <= image <= view <= view_max;
+ */
+ int image_mask; /* supported sizes */
+ struct pwc_coord view_min, view_max; /* minimum and maximum view */
+ struct pwc_coord abs_max; /* maximum supported size */
+ struct pwc_coord image, view; /* image and viewport size */
+ struct pwc_coord offset; /* offset of the viewport */
+
+ /*** motorized pan/tilt feature */
+ struct pwc_mpt_range angle_range;
+ int pan_angle; /* in degrees * 100 */
+ int tilt_angle; /* absolute angle; 0,0 is home */
+
+ /*
+ * Set to 1 when the user push the button, reset to 0
+ * when this value is read from sysfs.
+ */
+ int snapshot_button_status;
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
- struct input_dev *button_dev; /* webcam snapshot button input */
- char button_phys[64];
+ struct input_dev *button_dev; /* webcam snapshot button input */
+ char button_phys[64];
#endif
- /*** Misc. data ***/
- wait_queue_head_t frameq; /* When waiting for a frame to finish... */
-#if PWC_INT_PIPE
- void *usb_int_handler; /* for the interrupt endpoint */
-#endif
+ /* controls */
+ struct v4l2_ctrl_handler ctrl_handler;
+ u16 saturation_fmt;
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *gamma;
+ struct {
+ /* awb / red-blue balance cluster */
+ struct v4l2_ctrl *auto_white_balance;
+ struct v4l2_ctrl *red_balance;
+ struct v4l2_ctrl *blue_balance;
+ /* usb ctrl transfers are slow, so we cache things */
+ int color_bal_valid;
+ unsigned long last_color_bal_update; /* In jiffies */
+ s32 last_red_balance;
+ s32 last_blue_balance;
+ };
+ struct {
+ /* autogain / gain cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *gain;
+ int gain_valid;
+ unsigned long last_gain_update; /* In jiffies */
+ s32 last_gain;
+ };
+ struct {
+ /* exposure_auto / exposure cluster */
+ struct v4l2_ctrl *exposure_auto;
+ struct v4l2_ctrl *exposure;
+ int exposure_valid;
+ unsigned long last_exposure_update; /* In jiffies */
+ s32 last_exposure;
+ };
+ struct v4l2_ctrl *colorfx;
+ struct {
+ /* autocontour/contour cluster */
+ struct v4l2_ctrl *autocontour;
+ struct v4l2_ctrl *contour;
+ };
+ struct v4l2_ctrl *backlight;
+ struct v4l2_ctrl *flicker;
+ struct v4l2_ctrl *noise_reduction;
+ struct v4l2_ctrl *save_user;
+ struct v4l2_ctrl *restore_user;
+ struct v4l2_ctrl *restore_factory;
+ struct {
+ /* motor control cluster */
+ struct v4l2_ctrl *motor_pan;
+ struct v4l2_ctrl *motor_tilt;
+ struct v4l2_ctrl *motor_pan_reset;
+ struct v4l2_ctrl *motor_tilt_reset;
+ };
+ /* CODEC3 models have both gain and exposure controlled by autogain */
+ struct v4l2_ctrl *autogain_expo_cluster[3];
};
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/* Global variables */
#ifdef CONFIG_USB_PWC_DEBUG
extern int pwc_trace;
#endif
-extern int pwc_mbufs;
-
-/** functions in pwc-if.c */
-int pwc_handle_frame(struct pwc_device *pdev);
-void pwc_next_image(struct pwc_device *pdev);
-int pwc_isoc_init(struct pwc_device *pdev);
-void pwc_isoc_cleanup(struct pwc_device *pdev);
/** Functions in pwc-misc.c */
/* sizes in pixels */
@@ -291,50 +355,25 @@ void pwc_construct(struct pwc_device *pdev);
/* Request a certain video mode. Returns < 0 if not possible */
extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frames, int compression, int snapshot);
extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
-/* Calculate the number of bytes per image (not frame) */
extern int pwc_mpt_reset(struct pwc_device *pdev, int flags);
extern int pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt);
-
-/* Various controls; should be obvious. Value 0..65535, or < 0 on error */
-extern int pwc_get_brightness(struct pwc_device *pdev);
-extern int pwc_set_brightness(struct pwc_device *pdev, int value);
-extern int pwc_get_contrast(struct pwc_device *pdev);
-extern int pwc_set_contrast(struct pwc_device *pdev, int value);
-extern int pwc_get_gamma(struct pwc_device *pdev);
-extern int pwc_set_gamma(struct pwc_device *pdev, int value);
-extern int pwc_get_saturation(struct pwc_device *pdev, int *value);
-extern int pwc_set_saturation(struct pwc_device *pdev, int value);
extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
-extern int pwc_restore_user(struct pwc_device *pdev);
-extern int pwc_save_user(struct pwc_device *pdev);
-extern int pwc_restore_factory(struct pwc_device *pdev);
-
-/* exported for use by v4l2 controls */
-extern int pwc_get_red_gain(struct pwc_device *pdev, int *value);
-extern int pwc_set_red_gain(struct pwc_device *pdev, int value);
-extern int pwc_get_blue_gain(struct pwc_device *pdev, int *value);
-extern int pwc_set_blue_gain(struct pwc_device *pdev, int value);
-extern int pwc_get_awb(struct pwc_device *pdev);
-extern int pwc_set_awb(struct pwc_device *pdev, int mode);
-extern int pwc_set_agc(struct pwc_device *pdev, int mode, int value);
-extern int pwc_get_agc(struct pwc_device *pdev, int *value);
-extern int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value);
-extern int pwc_get_shutter_speed(struct pwc_device *pdev, int *value);
-
-extern int pwc_set_colour_mode(struct pwc_device *pdev, int colour);
-extern int pwc_get_colour_mode(struct pwc_device *pdev, int *colour);
-extern int pwc_set_contour(struct pwc_device *pdev, int contour);
-extern int pwc_get_contour(struct pwc_device *pdev, int *contour);
-extern int pwc_set_backlight(struct pwc_device *pdev, int backlight);
-extern int pwc_get_backlight(struct pwc_device *pdev, int *backlight);
-extern int pwc_set_flicker(struct pwc_device *pdev, int flicker);
-extern int pwc_get_flicker(struct pwc_device *pdev, int *flicker);
-extern int pwc_set_dynamic_noise(struct pwc_device *pdev, int noise);
-extern int pwc_get_dynamic_noise(struct pwc_device *pdev, int *noise);
+extern int send_control_msg(struct pwc_device *pdev,
+ u8 request, u16 value, void *buf, int buflen);
+
+/* Control get / set helpers */
+int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data);
+int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data);
+int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data);
+#define pwc_set_s8_ctrl pwc_set_u8_ctrl
+int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *dat);
+int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data);
+int pwc_button_ctrl(struct pwc_device *pdev, u16 value);
+int pwc_init_controls(struct pwc_device *pdev);
/* Power down or up the camera; not supported by all models */
-extern int pwc_camera_power(struct pwc_device *pdev, int power);
+extern void pwc_camera_power(struct pwc_device *pdev, int power);
/* Private ioctl()s; see pwc-ioctl.h */
extern long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg);
@@ -343,12 +382,6 @@ extern const struct v4l2_ioctl_ops pwc_ioctl_ops;
/** pwc-uncompress.c */
/* Expand frame to image, possibly including decompression. Uses read_frame and fill_image */
-extern int pwc_decompress(struct pwc_device *pdev);
-
-#ifdef __cplusplus
-}
-#endif
-
+int pwc_decompress(struct pwc_device *pdev, struct pwc_frame_buf *fbuf);
#endif
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
diff --git a/drivers/media/video/pxa_camera.c b/drivers/media/video/pxa_camera.c
index b42bfa5ccdf..d07df22a5ec 100644
--- a/drivers/media/video/pxa_camera.c
+++ b/drivers/media/video/pxa_camera.c
@@ -22,7 +22,6 @@
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -40,7 +39,7 @@
#include <mach/dma.h>
#include <mach/camera.h>
-#define PXA_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5)
+#define PXA_CAM_VERSION "0.0.6"
#define PXA_CAM_DRV_NAME "pxa27x-camera"
/* Camera Interface */
@@ -247,7 +246,7 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
if (bytes_per_line < 0)
return bytes_per_line;
- dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size);
+ dev_dbg(icd->parent, "count=%d, size=%d\n", *count, *size);
*size = bytes_per_line * icd->user_height;
@@ -262,13 +261,13 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
int i;
BUG_ON(in_interrupt());
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
&buf->vb, buf->vb.baddr, buf->vb.bsize);
/*
@@ -429,7 +428,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
struct videobuf_buffer *vb, enum v4l2_field field)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
struct device *dev = pcdev->soc_host.v4l2_dev.dev;
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
@@ -636,11 +635,11 @@ static void pxa_videobuf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n",
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n",
__func__, vb, vb->baddr, vb->bsize, pcdev->active);
list_add_tail(&vb->queue, &pcdev->capture);
@@ -658,7 +657,7 @@ static void pxa_videobuf_release(struct videobuf_queue *vq,
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
#ifdef DEBUG
struct soc_camera_device *icd = vq->priv_data;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -843,7 +842,7 @@ static struct videobuf_queue_ops pxa_videobuf_ops = {
static void pxa_camera_init_videobuf(struct videobuf_queue *q,
struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
/*
@@ -972,7 +971,7 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
*/
static int pxa_camera_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
if (pcdev->icd)
@@ -982,7 +981,7 @@ static int pxa_camera_add_device(struct soc_camera_device *icd)
pcdev->icd = icd;
- dev_info(icd->dev.parent, "PXA Camera driver attached to camera %d\n",
+ dev_info(icd->parent, "PXA Camera driver attached to camera %d\n",
icd->devnum);
return 0;
@@ -991,12 +990,12 @@ static int pxa_camera_add_device(struct soc_camera_device *icd)
/* Called with .video_lock held */
static void pxa_camera_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
BUG_ON(icd != pcdev->icd);
- dev_info(icd->dev.parent, "PXA Camera driver detached from camera %d\n",
+ dev_info(icd->parent, "PXA Camera driver detached from camera %d\n",
icd->devnum);
/* disable capture, disable interrupts */
@@ -1057,7 +1056,7 @@ static int test_platform_param(struct pxa_camera_dev *pcdev,
static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
unsigned long flags, __u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
unsigned long dw, bpp;
@@ -1152,7 +1151,7 @@ static void pxa_camera_setup_cicr(struct soc_camera_device *icd,
static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
unsigned long bus_flags, camera_flags, common_flags;
int ret;
@@ -1210,7 +1209,7 @@ static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt)
static int pxa_camera_try_bus_param(struct soc_camera_device *icd,
unsigned char buswidth)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct pxa_camera_dev *pcdev = ici->priv;
unsigned long bus_flags, camera_flags;
int ret = test_platform_param(pcdev, buswidth, &bus_flags);
@@ -1247,7 +1246,7 @@ static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int id
struct soc_camera_format_xlate *xlate)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
int formats = 0, ret;
struct pxa_cam *cam;
enum v4l2_mbus_pixelcode code;
@@ -1335,9 +1334,9 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
struct v4l2_crop *a)
{
struct v4l2_rect *rect = &a->c;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct pxa_camera_dev *pcdev = ici->priv;
- struct device *dev = icd->dev.parent;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_sense sense = {
.master_clock = pcdev->mclk,
@@ -1379,7 +1378,7 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
return ret;
if (pxa_camera_check_frame(mf.width, mf.height)) {
- dev_warn(icd->dev.parent,
+ dev_warn(icd->parent,
"Inconsistent state. Use S_FMT to repair\n");
return -EINVAL;
}
@@ -1406,9 +1405,9 @@ static int pxa_camera_set_crop(struct soc_camera_device *icd,
static int pxa_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct pxa_camera_dev *pcdev = ici->priv;
- struct device *dev = icd->dev.parent;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
const struct soc_camera_format_xlate *xlate = NULL;
struct soc_camera_sense sense = {
@@ -1485,7 +1484,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt);
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -1499,16 +1498,11 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
&pix->height, 32, 2048, 0,
pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0);
- pix->bytesperline = soc_mbus_bytes_per_line(pix->width,
- xlate->host_fmt);
- if (pix->bytesperline < 0)
- return pix->bytesperline;
- pix->sizeimage = pix->height * pix->bytesperline;
-
/* limit to sensor capabilities */
mf.width = pix->width;
mf.height = pix->height;
- mf.field = pix->field;
+ /* Only progressive video supported so far */
+ mf.field = V4L2_FIELD_NONE;
mf.colorspace = pix->colorspace;
mf.code = xlate->code;
@@ -1527,7 +1521,7 @@ static int pxa_camera_try_fmt(struct soc_camera_device *icd,
break;
default:
/* TODO: support interlaced at least in pass-through mode */
- dev_err(icd->dev.parent, "Field type %d unsupported.\n",
+ dev_err(icd->parent, "Field type %d unsupported.\n",
mf.field);
return -EINVAL;
}
@@ -1578,15 +1572,14 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
- cap->version = PXA_CAM_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
}
-static int pxa_camera_suspend(struct soc_camera_device *icd, pm_message_t state)
+static int pxa_camera_suspend(struct device *dev)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct pxa_camera_dev *pcdev = ici->priv;
int i = 0, ret = 0;
@@ -1596,15 +1589,19 @@ static int pxa_camera_suspend(struct soc_camera_device *icd, pm_message_t state)
pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3);
pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4);
- if ((pcdev->icd) && (pcdev->icd->ops->suspend))
- ret = pcdev->icd->ops->suspend(pcdev->icd, state);
+ if (pcdev->icd) {
+ struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->icd);
+ ret = v4l2_subdev_call(sd, core, s_power, 0);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ }
return ret;
}
-static int pxa_camera_resume(struct soc_camera_device *icd)
+static int pxa_camera_resume(struct device *dev)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct pxa_camera_dev *pcdev = ici->priv;
int i = 0, ret = 0;
@@ -1618,8 +1615,12 @@ static int pxa_camera_resume(struct soc_camera_device *icd)
__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3);
__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4);
- if ((pcdev->icd) && (pcdev->icd->ops->resume))
- ret = pcdev->icd->ops->resume(pcdev->icd);
+ if (pcdev->icd) {
+ struct v4l2_subdev *sd = soc_camera_to_subdev(pcdev->icd);
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret == -ENOIOCTLCMD)
+ ret = 0;
+ }
/* Restart frame capture if active buffer exists */
if (!ret && pcdev->active)
@@ -1632,8 +1633,6 @@ static struct soc_camera_host_ops pxa_soc_camera_host_ops = {
.owner = THIS_MODULE,
.add = pxa_camera_add_device,
.remove = pxa_camera_remove_device,
- .suspend = pxa_camera_suspend,
- .resume = pxa_camera_resume,
.set_crop = pxa_camera_set_crop,
.get_formats = pxa_camera_get_formats,
.put_formats = pxa_camera_put_formats,
@@ -1818,9 +1817,15 @@ static int __devexit pxa_camera_remove(struct platform_device *pdev)
return 0;
}
+static struct dev_pm_ops pxa_camera_pm = {
+ .suspend = pxa_camera_suspend,
+ .resume = pxa_camera_resume,
+};
+
static struct platform_driver pxa_camera_driver = {
.driver = {
.name = PXA_CAM_DRV_NAME,
+ .pm = &pxa_camera_pm,
},
.probe = pxa_camera_probe,
.remove = __devexit_p(pxa_camera_remove),
@@ -1843,4 +1848,5 @@ module_exit(pxa_camera_exit);
MODULE_DESCRIPTION("PXA27x SoC Camera Host driver");
MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
MODULE_LICENSE("GPL");
+MODULE_VERSION(PXA_CAM_VERSION);
MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME);
diff --git a/drivers/media/video/rj54n1cb0c.c b/drivers/media/video/rj54n1cb0c.c
index 57e11b6f19f..847ccc067e8 100644
--- a/drivers/media/video/rj54n1cb0c.c
+++ b/drivers/media/video/rj54n1cb0c.c
@@ -1364,10 +1364,9 @@ static int rj54n1_video_probe(struct soc_camera_device *icd,
int data1, data2;
int ret;
- /* This could be a BUG_ON() or a WARN_ON(), or remove it completely */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/* Read out the chip version register */
data1 = reg_read(client, RJ54N1_DEV_CODE);
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 5b9dce85645..803c9c82e49 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -16,15 +16,10 @@
* Example maximum bandwidth utilization:
*
* -full size, color mode YUYV or YUV422P: 2 channels at once
- *
* -full or half size Grey scale: all 4 channels at once
- *
* -half size, color mode YUYV or YUV422P: all 4 channels at once
- *
* -full size, color mode YUYV or YUV422P 1/2 frame rate: all 4 channels
* at once.
- * (TODO: Incorporate videodev2 frame rate(FR) enumeration,
- * which is currently experimental.)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -47,7 +42,6 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-common.h>
@@ -56,12 +50,7 @@
#include <linux/vmalloc.h>
#include <linux/usb.h>
-#define S2255_MAJOR_VERSION 1
-#define S2255_MINOR_VERSION 21
-#define S2255_RELEASE 0
-#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
- S2255_MINOR_VERSION, \
- S2255_RELEASE)
+#define S2255_VERSION "1.22.1"
#define FIRMWARE_FILE_NAME "f2255usb.bin"
/* default JPEG quality */
@@ -126,7 +115,7 @@
#define MASK_COLOR 0x000000ff
#define MASK_JPG_QUALITY 0x0000ff00
#define MASK_INPUT_TYPE 0x000f0000
-/* frame decimation. Not implemented by V4L yet(experimental in V4L) */
+/* frame decimation. */
#define FDEC_1 1 /* capture every frame. default */
#define FDEC_2 2 /* capture every 2nd frame */
#define FDEC_3 3 /* capture every 3rd frame */
@@ -312,9 +301,9 @@ struct s2255_fh {
};
/* current cypress EEPROM firmware version */
-#define S2255_CUR_USB_FWVER ((3 << 8) | 11)
+#define S2255_CUR_USB_FWVER ((3 << 8) | 12)
/* current DSP FW version */
-#define S2255_CUR_DSP_FWVER 10102
+#define S2255_CUR_DSP_FWVER 10104
/* Need DSP version 5+ for video status feature */
#define S2255_MIN_DSP_STATUS 5
#define S2255_MIN_DSP_COLORFILTER 8
@@ -502,7 +491,7 @@ static void planar422p_to_yuv_packed(const unsigned char *in,
static void s2255_reset_dsppower(struct s2255_dev *dev)
{
- s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1);
+ s2255_vendor_req(dev, 0x40, 0x0000, 0x0001, NULL, 0, 1);
msleep(10);
s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
msleep(600);
@@ -856,7 +845,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->driver, "s2255", sizeof(cap->driver));
strlcpy(cap->card, "s2255", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->version = S2255_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
}
@@ -1984,9 +1972,8 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
video_device_node_name(&channel->vdev));
}
- printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %d.%d\n",
- S2255_MAJOR_VERSION,
- S2255_MINOR_VERSION);
+ printk(KERN_INFO "Sensoray 2255 V4L driver Revision: %s\n",
+ S2255_VERSION);
/* if no channels registered, return error and probe will fail*/
if (atomic_read(&dev->num_channels) == 0) {
v4l2_device_unregister(&dev->v4l2_dev);
@@ -2302,15 +2289,12 @@ static int s2255_board_init(struct s2255_dev *dev)
/* query the firmware */
fw_ver = s2255_get_fx2fw(dev);
- printk(KERN_INFO "2255 usb firmware version %d.%d\n",
+ printk(KERN_INFO "s2255: usb firmware version %d.%d\n",
(fw_ver >> 8) & 0xff,
fw_ver & 0xff);
if (fw_ver < S2255_CUR_USB_FWVER)
- dev_err(&dev->udev->dev,
- "usb firmware not up to date %d.%d\n",
- (fw_ver >> 8) & 0xff,
- fw_ver & 0xff);
+ printk(KERN_INFO "s2255: newer USB firmware available\n");
for (j = 0; j < MAX_CHANNELS; j++) {
struct s2255_channel *channel = &dev->channel[j];
@@ -2721,3 +2705,4 @@ module_exit(usb_s2255_exit);
MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver");
MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)");
MODULE_LICENSE("GPL");
+MODULE_VERSION(S2255_VERSION);
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index d142b40ea64..0d730e55605 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -1,7 +1,7 @@
/*
- * Samsung S5P SoC series camera interface (camera capture) driver
+ * Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd
+ * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
@@ -262,12 +261,7 @@ static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane)
{
if (!fr || plane >= fr->fmt->memplanes)
return 0;
-
- dbg("%s: w: %d. h: %d. depth[%d]: %d",
- __func__, fr->width, fr->height, plane, fr->fmt->depth[plane]);
-
return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8;
-
}
static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
@@ -283,24 +277,14 @@ static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
*num_planes = fmt->memplanes;
- dbg("%s, buffer count=%d, plane count=%d",
- __func__, *num_buffers, *num_planes);
-
for (i = 0; i < fmt->memplanes; i++) {
sizes[i] = get_plane_size(&ctx->d_frame, i);
- dbg("plane: %u, plane_size: %lu", i, sizes[i]);
allocators[i] = ctx->fimc_dev->alloc_ctx;
}
return 0;
}
-static int buffer_init(struct vb2_buffer *vb)
-{
- /* TODO: */
- return 0;
-}
-
static int buffer_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
@@ -380,7 +364,6 @@ static struct vb2_ops fimc_capture_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
- .buf_init = buffer_init,
.wait_prepare = fimc_unlock,
.wait_finish = fimc_lock,
.start_streaming = start_streaming,
@@ -467,7 +450,6 @@ static int fimc_vidioc_querycap_capture(struct file *file, void *priv,
strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
- cap->version = KERNEL_VERSION(1, 0, 0);
cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VIDEO_CAPTURE_MPLANE;
@@ -903,6 +885,7 @@ err_vd_reg:
err_v4l2_reg:
v4l2_device_unregister(v4l2_dev);
err_info:
+ kfree(ctx);
dev_err(&fimc->pdev->dev, "failed to install\n");
return ret;
}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index dc91a8511af..aa550666cc0 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -1,9 +1,8 @@
/*
- * S5P camera interface (video postprocessor) driver
+ * Samsung S5P/EXYNOS4 SoC series camera interface (video postprocessor) driver
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd
- *
- * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
+ * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
@@ -13,7 +12,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
@@ -42,7 +40,6 @@ static struct fimc_fmt fimc_formats[] = {
.color = S5P_FIMC_RGB565,
.memplanes = 1,
.colplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_BE,
.flags = FMT_FLAGS_M2M,
}, {
.name = "BGR666",
@@ -232,11 +229,7 @@ static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
return 0;
}
}
-
*shift = 0, *ratio = 1;
-
- dbg("s: %d, t: %d, shift: %d, ratio: %d",
- src, tar, *shift, *ratio);
return 0;
}
@@ -268,10 +261,8 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
err("invalid source size: %d x %d", sx, sy);
return -EINVAL;
}
-
sc->real_width = sx;
sc->real_height = sy;
- dbg("sx= %d, sy= %d, tx= %d, ty= %d", sx, sy, tx, ty);
ret = fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
if (ret)
@@ -711,22 +702,18 @@ static int fimc_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
f = ctx_get_frame(ctx, vq->type);
if (IS_ERR(f))
return PTR_ERR(f);
-
/*
* Return number of non-contigous planes (plane buffers)
* depending on the configured color format.
*/
- if (f->fmt)
- *num_planes = f->fmt->memplanes;
+ if (!f->fmt)
+ return -EINVAL;
+ *num_planes = f->fmt->memplanes;
for (i = 0; i < f->fmt->memplanes; i++) {
- sizes[i] = (f->width * f->height * f->fmt->depth[i]) >> 3;
+ sizes[i] = (f->f_width * f->f_height * f->fmt->depth[i]) / 8;
allocators[i] = ctx->fimc_dev->alloc_ctx;
}
-
- if (*num_buffers == 0)
- *num_buffers = 1;
-
return 0;
}
@@ -786,7 +773,6 @@ static int fimc_m2m_querycap(struct file *file, void *priv,
strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
- cap->version = KERNEL_VERSION(1, 0, 0);
cap->capabilities = V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
@@ -852,7 +838,7 @@ struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask)
for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
fmt = &fimc_formats[i];
- if (fmt->fourcc == f->fmt.pix.pixelformat &&
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat &&
(fmt->flags & mask))
break;
}
@@ -1949,3 +1935,4 @@ module_exit(fimc_exit);
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.1");
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index 3beb1e5320c..1f70772daaf 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -1,7 +1,5 @@
/*
- * Copyright (c) 2010 Samsung Electronics
- *
- * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -135,9 +133,10 @@ enum fimc_color_fmt {
* @name: format description
* @fourcc: the fourcc code for this format, 0 if not applicable
* @color: the corresponding fimc_color_fmt
- * @depth: per plane driver's private 'number of bits per pixel'
* @memplanes: number of physically non-contiguous data planes
* @colplanes: number of physically contiguous data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @flags: flags indicating which operation mode format applies to
*/
struct fimc_fmt {
enum v4l2_mbus_pixelcode mbus_code;
@@ -171,7 +170,7 @@ struct fimc_dma_offset {
};
/**
- * struct fimc_effect - the configuration data for the "Arbitrary" image effect
+ * struct fimc_effect - color effect information
* @type: effect type
* @pat_cb: cr value when type is "arbitrary"
* @pat_cr: cr value when type is "arbitrary"
@@ -184,7 +183,6 @@ struct fimc_effect {
/**
* struct fimc_scaler - the configuration data for FIMC inetrnal scaler
- *
* @scaleup_h: flag indicating scaling up horizontally
* @scaleup_v: flag indicating scaling up vertically
* @copy_mode: flag indicating transparent DMA transfer (no scaling
@@ -220,7 +218,6 @@ struct fimc_scaler {
/**
* struct fimc_addr - the FIMC physical address set for DMA
- *
* @y: luminance plane physical address
* @cb: Cb plane physical address
* @cr: Cr plane physical address
@@ -234,6 +231,7 @@ struct fimc_addr {
/**
* struct fimc_vid_buffer - the driver's video buffer
* @vb: v4l videobuf buffer
+ * @list: linked list structure for buffer queue
* @paddr: precalculated physical address set
* @index: buffer index for the output DMA engine
*/
@@ -254,11 +252,10 @@ struct fimc_vid_buffer {
* @offs_v: image vertical pixel offset
* @width: image pixel width
* @height: image pixel weight
- * @paddr: image frame buffer physical addresses
- * @buf_cnt: number of buffers depending on a color format
* @payload: image size in bytes (w x h x bpp)
- * @color: color format
+ * @paddr: image frame buffer physical addresses
* @dma_offset: DMA offset in bytes
+ * @fmt: fimc color format pointer
*/
struct fimc_frame {
u32 f_width;
@@ -390,21 +387,22 @@ struct fimc_ctx;
/**
* struct fimc_dev - abstraction for FIMC entity
- *
* @slock: the spinlock protecting this data structure
* @lock: the mutex protecting this data structure
* @pdev: pointer to the FIMC platform device
* @pdata: pointer to the device platform data
+ * @variant: the IP variant information
* @id: FIMC device index (0..FIMC_MAX_DEVS)
* @num_clocks: the number of clocks managed by this device instance
- * @clock[]: the clocks required for FIMC operation
+ * @clock: clocks required for FIMC operation
* @regs: the mapped hardware registers
* @regs_res: the resource claimed for IO registers
- * @irq: interrupt number of the FIMC subdevice
- * @irq_queue:
+ * @irq: FIMC interrupt number
+ * @irq_queue: interrupt handler waitqueue
* @m2m: memory-to-memory V4L2 device information
* @vid_cap: camera capture device information
* @state: flags used to synchronize m2m and capture mode operation
+ * @alloc_ctx: videobuf2 memory allocator context
*/
struct fimc_dev {
spinlock_t slock;
@@ -427,8 +425,7 @@ struct fimc_dev {
/**
* fimc_ctx - the device context data
- *
- * @lock: mutex protecting this data structure
+ * @slock: spinlock protecting this data structure
* @s_frame: source frame properties
* @d_frame: destination frame properties
* @out_order_1p: output 1-plane YCBCR order
diff --git a/drivers/media/video/s5p-mfc/Makefile b/drivers/media/video/s5p-mfc/Makefile
new file mode 100644
index 00000000000..d0663409af0
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o
+s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o s5p_mfc_opr.o
+s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
+s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_cmd.o
+s5p-mfc-y += s5p_mfc_pm.o s5p_mfc_shm.o
diff --git a/drivers/media/video/s5p-mfc/regs-mfc.h b/drivers/media/video/s5p-mfc/regs-mfc.h
new file mode 100644
index 00000000000..053a8a872fd
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/regs-mfc.h
@@ -0,0 +1,413 @@
+/*
+ * Register definition file for Samsung MFC V5.1 Interface (FIMV) driver
+ *
+ * Kamil Debski, Copyright (c) 2010 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef _REGS_FIMV_H
+#define _REGS_FIMV_H
+
+#define S5P_FIMV_REG_SIZE (S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR)
+#define S5P_FIMV_REG_COUNT ((S5P_FIMV_END_ADDR - S5P_FIMV_START_ADDR) / 4)
+
+/* Number of bits that the buffer address should be shifted for particular
+ * MFC buffers. */
+#define S5P_FIMV_START_ADDR 0x0000
+#define S5P_FIMV_END_ADDR 0xe008
+
+#define S5P_FIMV_SW_RESET 0x0000
+#define S5P_FIMV_RISC_HOST_INT 0x0008
+
+/* Command from HOST to RISC */
+#define S5P_FIMV_HOST2RISC_CMD 0x0030
+#define S5P_FIMV_HOST2RISC_ARG1 0x0034
+#define S5P_FIMV_HOST2RISC_ARG2 0x0038
+#define S5P_FIMV_HOST2RISC_ARG3 0x003c
+#define S5P_FIMV_HOST2RISC_ARG4 0x0040
+
+/* Command from RISC to HOST */
+#define S5P_FIMV_RISC2HOST_CMD 0x0044
+#define S5P_FIMV_RISC2HOST_CMD_MASK 0x1FFFF
+#define S5P_FIMV_RISC2HOST_ARG1 0x0048
+#define S5P_FIMV_RISC2HOST_ARG2 0x004c
+#define S5P_FIMV_RISC2HOST_ARG3 0x0050
+#define S5P_FIMV_RISC2HOST_ARG4 0x0054
+
+#define S5P_FIMV_FW_VERSION 0x0058
+#define S5P_FIMV_SYS_MEM_SZ 0x005c
+#define S5P_FIMV_FW_STATUS 0x0080
+
+/* Memory controller register */
+#define S5P_FIMV_MC_DRAMBASE_ADR_A 0x0508
+#define S5P_FIMV_MC_DRAMBASE_ADR_B 0x050c
+#define S5P_FIMV_MC_STATUS 0x0510
+
+/* Common register */
+#define S5P_FIMV_COMMON_BASE_A 0x0600
+#define S5P_FIMV_COMMON_BASE_B 0x0700
+
+/* Decoder */
+#define S5P_FIMV_DEC_CHROMA_ADR (S5P_FIMV_COMMON_BASE_A)
+#define S5P_FIMV_DEC_LUMA_ADR (S5P_FIMV_COMMON_BASE_B)
+
+/* H.264 decoding */
+#define S5P_FIMV_H264_VERT_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+ /* vertical neighbor motion vector */
+#define S5P_FIMV_H264_NB_IP_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+ /* neighbor pixels for intra pred */
+#define S5P_FIMV_H264_MV_ADR (S5P_FIMV_COMMON_BASE_B + 0x80)
+ /* H264 motion vector */
+
+/* MPEG4 decoding */
+#define S5P_FIMV_MPEG4_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+ /* neighbor AC/DC coeff. */
+#define S5P_FIMV_MPEG4_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+ /* upper neighbor motion vector */
+#define S5P_FIMV_MPEG4_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
+ /* subseq. anchor motion vector */
+#define S5P_FIMV_MPEG4_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
+ /* overlap transform line */
+#define S5P_FIMV_MPEG4_SP_ADR (S5P_FIMV_COMMON_BASE_A + 0xa8)
+ /* syntax parser */
+
+/* H.263 decoding */
+#define S5P_FIMV_H263_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+#define S5P_FIMV_H263_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+#define S5P_FIMV_H263_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
+#define S5P_FIMV_H263_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
+
+/* VC-1 decoding */
+#define S5P_FIMV_VC1_NB_DCAC_ADR (S5P_FIMV_COMMON_BASE_A + 0x8c)
+#define S5P_FIMV_VC1_UP_NB_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x90)
+#define S5P_FIMV_VC1_SA_MV_ADR (S5P_FIMV_COMMON_BASE_A + 0x94)
+#define S5P_FIMV_VC1_OT_LINE_ADR (S5P_FIMV_COMMON_BASE_A + 0x98)
+#define S5P_FIMV_VC1_BITPLANE3_ADR (S5P_FIMV_COMMON_BASE_A + 0x9c)
+ /* bitplane3 */
+#define S5P_FIMV_VC1_BITPLANE2_ADR (S5P_FIMV_COMMON_BASE_A + 0xa0)
+ /* bitplane2 */
+#define S5P_FIMV_VC1_BITPLANE1_ADR (S5P_FIMV_COMMON_BASE_A + 0xa4)
+ /* bitplane1 */
+
+/* Encoder */
+#define S5P_FIMV_ENC_REF0_LUMA_ADR (S5P_FIMV_COMMON_BASE_A + 0x1c)
+#define S5P_FIMV_ENC_REF1_LUMA_ADR (S5P_FIMV_COMMON_BASE_A + 0x20)
+ /* reconstructed luma */
+#define S5P_FIMV_ENC_REF0_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B)
+#define S5P_FIMV_ENC_REF1_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x04)
+ /* reconstructed chroma */
+#define S5P_FIMV_ENC_REF2_LUMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x10)
+#define S5P_FIMV_ENC_REF2_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x08)
+#define S5P_FIMV_ENC_REF3_LUMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x14)
+#define S5P_FIMV_ENC_REF3_CHROMA_ADR (S5P_FIMV_COMMON_BASE_B + 0x0c)
+
+/* H.264 encoding */
+#define S5P_FIMV_H264_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
+ /* upper motion vector */
+#define S5P_FIMV_H264_NBOR_INFO_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
+ /* entropy engine's neighbor info. */
+#define S5P_FIMV_H264_UP_INTRA_MD_ADR (S5P_FIMV_COMMON_BASE_A + 0x08)
+ /* upper intra MD */
+#define S5P_FIMV_H264_COZERO_FLAG_ADR (S5P_FIMV_COMMON_BASE_A + 0x10)
+ /* direct cozero flag */
+#define S5P_FIMV_H264_UP_INTRA_PRED_ADR (S5P_FIMV_COMMON_BASE_B + 0x40)
+ /* upper intra PRED */
+
+/* H.263 encoding */
+#define S5P_FIMV_H263_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
+ /* upper motion vector */
+#define S5P_FIMV_H263_ACDC_COEF_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
+ /* upper Q coeff. */
+
+/* MPEG4 encoding */
+#define S5P_FIMV_MPEG4_UP_MV_ADR (S5P_FIMV_COMMON_BASE_A)
+ /* upper motion vector */
+#define S5P_FIMV_MPEG4_ACDC_COEF_ADR (S5P_FIMV_COMMON_BASE_A + 0x04)
+ /* upper Q coeff. */
+#define S5P_FIMV_MPEG4_COZERO_FLAG_ADR (S5P_FIMV_COMMON_BASE_A + 0x10)
+ /* direct cozero flag */
+
+#define S5P_FIMV_ENC_REF_B_LUMA_ADR 0x062c /* ref B Luma addr */
+#define S5P_FIMV_ENC_REF_B_CHROMA_ADR 0x0630 /* ref B Chroma addr */
+
+#define S5P_FIMV_ENC_CUR_LUMA_ADR 0x0718 /* current Luma addr */
+#define S5P_FIMV_ENC_CUR_CHROMA_ADR 0x071C /* current Chroma addr */
+
+/* Codec common register */
+#define S5P_FIMV_ENC_HSIZE_PX 0x0818 /* frame width at encoder */
+#define S5P_FIMV_ENC_VSIZE_PX 0x081c /* frame height at encoder */
+#define S5P_FIMV_ENC_PROFILE 0x0830 /* profile register */
+#define S5P_FIMV_ENC_PROFILE_H264_MAIN 0
+#define S5P_FIMV_ENC_PROFILE_H264_HIGH 1
+#define S5P_FIMV_ENC_PROFILE_H264_BASELINE 2
+#define S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE 0
+#define S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE 1
+#define S5P_FIMV_ENC_PIC_STRUCT 0x083c /* picture field/frame flag */
+#define S5P_FIMV_ENC_LF_CTRL 0x0848 /* loop filter control */
+#define S5P_FIMV_ENC_ALPHA_OFF 0x084c /* loop filter alpha offset */
+#define S5P_FIMV_ENC_BETA_OFF 0x0850 /* loop filter beta offset */
+#define S5P_FIMV_MR_BUSIF_CTRL 0x0854 /* hidden, bus interface ctrl */
+#define S5P_FIMV_ENC_PXL_CACHE_CTRL 0x0a00 /* pixel cache control */
+
+/* Channel & stream interface register */
+#define S5P_FIMV_SI_RTN_CHID 0x2000 /* Return CH inst ID register */
+#define S5P_FIMV_SI_CH0_INST_ID 0x2040 /* codec instance ID */
+#define S5P_FIMV_SI_CH1_INST_ID 0x2080 /* codec instance ID */
+/* Decoder */
+#define S5P_FIMV_SI_VRESOL 0x2004 /* vertical res of decoder */
+#define S5P_FIMV_SI_HRESOL 0x2008 /* horizontal res of decoder */
+#define S5P_FIMV_SI_BUF_NUMBER 0x200c /* number of frames in the
+ decoded pic */
+#define S5P_FIMV_SI_DISPLAY_Y_ADR 0x2010 /* luma addr of displayed pic */
+#define S5P_FIMV_SI_DISPLAY_C_ADR 0x2014 /* chroma addrof displayed pic */
+#define S5P_FIMV_SI_CONSUMED_BYTES 0x2018 /* Consumed number of bytes to
+ decode a frame */
+#define S5P_FIMV_SI_DISPLAY_STATUS 0x201c /* status of decoded picture */
+
+#define S5P_FIMV_SI_CH0_SB_ST_ADR 0x2044 /* start addr of stream buf */
+#define S5P_FIMV_SI_CH0_SB_FRM_SIZE 0x2048 /* size of stream buf */
+#define S5P_FIMV_SI_CH0_DESC_ADR 0x204c /* addr of descriptor buf */
+#define S5P_FIMV_SI_CH0_CPB_SIZE 0x2058 /* max size of coded pic. buf */
+#define S5P_FIMV_SI_CH0_DESC_SIZE 0x205c /* max size of descriptor buf */
+
+#define S5P_FIMV_SI_CH1_SB_ST_ADR 0x2084 /* start addr of stream buf */
+#define S5P_FIMV_SI_CH1_SB_FRM_SIZE 0x2088 /* size of stream buf */
+#define S5P_FIMV_SI_CH1_DESC_ADR 0x208c /* addr of descriptor buf */
+#define S5P_FIMV_SI_CH1_CPB_SIZE 0x2098 /* max size of coded pic. buf */
+#define S5P_FIMV_SI_CH1_DESC_SIZE 0x209c /* max size of descriptor buf */
+
+#define S5P_FIMV_CRC_LUMA0 0x2030 /* luma crc data per frame
+ (top field) */
+#define S5P_FIMV_CRC_CHROMA0 0x2034 /* chroma crc data per frame
+ (top field) */
+#define S5P_FIMV_CRC_LUMA1 0x2038 /* luma crc data per bottom
+ field */
+#define S5P_FIMV_CRC_CHROMA1 0x203c /* chroma crc data per bottom
+ field */
+
+/* Display status */
+#define S5P_FIMV_DEC_STATUS_DECODING_ONLY 0
+#define S5P_FIMV_DEC_STATUS_DECODING_DISPLAY 1
+#define S5P_FIMV_DEC_STATUS_DISPLAY_ONLY 2
+#define S5P_FIMV_DEC_STATUS_DECODING_EMPTY 3
+#define S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK 7
+#define S5P_FIMV_DEC_STATUS_PROGRESSIVE (0<<3)
+#define S5P_FIMV_DEC_STATUS_INTERLACE (1<<3)
+#define S5P_FIMV_DEC_STATUS_INTERLACE_MASK (1<<3)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_TWO (0<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_FOUR (1<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_NUMBER_MASK (1<<4)
+#define S5P_FIMV_DEC_STATUS_CRC_GENERATED (1<<5)
+#define S5P_FIMV_DEC_STATUS_CRC_NOT_GENERATED (0<<5)
+#define S5P_FIMV_DEC_STATUS_CRC_MASK (1<<5)
+
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_MASK (3<<4)
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_INC (1<<4)
+#define S5P_FIMV_DEC_STATUS_RESOLUTION_DEC (2<<4)
+
+/* Decode frame address */
+#define S5P_FIMV_DECODE_Y_ADR 0x2024
+#define S5P_FIMV_DECODE_C_ADR 0x2028
+
+/* Decoded frame tpe */
+#define S5P_FIMV_DECODE_FRAME_TYPE 0x2020
+#define S5P_FIMV_DECODE_FRAME_MASK 7
+
+#define S5P_FIMV_DECODE_FRAME_SKIPPED 0
+#define S5P_FIMV_DECODE_FRAME_I_FRAME 1
+#define S5P_FIMV_DECODE_FRAME_P_FRAME 2
+#define S5P_FIMV_DECODE_FRAME_B_FRAME 3
+#define S5P_FIMV_DECODE_FRAME_OTHER_FRAME 4
+
+/* Sizes of buffers required for decoding */
+#define S5P_FIMV_DEC_NB_IP_SIZE (32 * 1024)
+#define S5P_FIMV_DEC_VERT_NB_MV_SIZE (16 * 1024)
+#define S5P_FIMV_DEC_NB_DCAC_SIZE (16 * 1024)
+#define S5P_FIMV_DEC_UPNB_MV_SIZE (68 * 1024)
+#define S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE (136 * 1024)
+#define S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE (32 * 1024)
+#define S5P_FIMV_DEC_VC1_BITPLANE_SIZE (2 * 1024)
+#define S5P_FIMV_DEC_STX_PARSER_SIZE (68 * 1024)
+
+#define S5P_FIMV_DEC_BUF_ALIGN (8 * 1024)
+#define S5P_FIMV_ENC_BUF_ALIGN (8 * 1024)
+#define S5P_FIMV_NV12M_HALIGN 16
+#define S5P_FIMV_NV12M_LVALIGN 16
+#define S5P_FIMV_NV12M_CVALIGN 8
+#define S5P_FIMV_NV12MT_HALIGN 128
+#define S5P_FIMV_NV12MT_VALIGN 32
+#define S5P_FIMV_NV12M_SALIGN 2048
+#define S5P_FIMV_NV12MT_SALIGN 8192
+
+/* Sizes of buffers required for encoding */
+#define S5P_FIMV_ENC_UPMV_SIZE 0x10000
+#define S5P_FIMV_ENC_COLFLG_SIZE 0x10000
+#define S5P_FIMV_ENC_INTRAMD_SIZE 0x10000
+#define S5P_FIMV_ENC_INTRAPRED_SIZE 0x4000
+#define S5P_FIMV_ENC_NBORINFO_SIZE 0x10000
+#define S5P_FIMV_ENC_ACDCCOEF_SIZE 0x10000
+
+/* Encoder */
+#define S5P_FIMV_ENC_SI_STRM_SIZE 0x2004 /* stream size */
+#define S5P_FIMV_ENC_SI_PIC_CNT 0x2008 /* picture count */
+#define S5P_FIMV_ENC_SI_WRITE_PTR 0x200c /* write pointer */
+#define S5P_FIMV_ENC_SI_SLICE_TYPE 0x2010 /* slice type(I/P/B/IDR) */
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_NON_CODED 0
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_I 1
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_P 2
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_B 3
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_SKIPPED 4
+#define S5P_FIMV_ENC_SI_SLICE_TYPE_OTHERS 5
+#define S5P_FIMV_ENCODED_Y_ADDR 0x2014 /* the addr of the encoded
+ luma pic */
+#define S5P_FIMV_ENCODED_C_ADDR 0x2018 /* the addr of the encoded
+ chroma pic */
+
+#define S5P_FIMV_ENC_SI_CH0_SB_ADR 0x2044 /* addr of stream buf */
+#define S5P_FIMV_ENC_SI_CH0_SB_SIZE 0x204c /* size of stream buf */
+#define S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR 0x2050 /* current Luma addr */
+#define S5P_FIMV_ENC_SI_CH0_CUR_C_ADR 0x2054 /* current Chroma addr */
+#define S5P_FIMV_ENC_SI_CH0_FRAME_INS 0x2058 /* frame insertion */
+
+#define S5P_FIMV_ENC_SI_CH1_SB_ADR 0x2084 /* addr of stream buf */
+#define S5P_FIMV_ENC_SI_CH1_SB_SIZE 0x208c /* size of stream buf */
+#define S5P_FIMV_ENC_SI_CH1_CUR_Y_ADR 0x2090 /* current Luma addr */
+#define S5P_FIMV_ENC_SI_CH1_CUR_C_ADR 0x2094 /* current Chroma addr */
+#define S5P_FIMV_ENC_SI_CH1_FRAME_INS 0x2098 /* frame insertion */
+
+#define S5P_FIMV_ENC_PIC_TYPE_CTRL 0xc504 /* pic type level control */
+#define S5P_FIMV_ENC_B_RECON_WRITE_ON 0xc508 /* B frame recon write ctrl */
+#define S5P_FIMV_ENC_MSLICE_CTRL 0xc50c /* multi slice control */
+#define S5P_FIMV_ENC_MSLICE_MB 0xc510 /* MB number in the one slice */
+#define S5P_FIMV_ENC_MSLICE_BIT 0xc514 /* bit count for one slice */
+#define S5P_FIMV_ENC_CIR_CTRL 0xc518 /* number of intra refresh MB */
+#define S5P_FIMV_ENC_MAP_FOR_CUR 0xc51c /* linear or tiled mode */
+#define S5P_FIMV_ENC_PADDING_CTRL 0xc520 /* padding control */
+
+#define S5P_FIMV_ENC_RC_CONFIG 0xc5a0 /* RC config */
+#define S5P_FIMV_ENC_RC_BIT_RATE 0xc5a8 /* bit rate */
+#define S5P_FIMV_ENC_RC_QBOUND 0xc5ac /* max/min QP */
+#define S5P_FIMV_ENC_RC_RPARA 0xc5b0 /* rate control reaction coeff */
+#define S5P_FIMV_ENC_RC_MB_CTRL 0xc5b4 /* MB adaptive scaling */
+
+/* Encoder for H264 only */
+#define S5P_FIMV_ENC_H264_ENTROPY_MODE 0xd004 /* CAVLC or CABAC */
+#define S5P_FIMV_ENC_H264_ALPHA_OFF 0xd008 /* loop filter alpha offset */
+#define S5P_FIMV_ENC_H264_BETA_OFF 0xd00c /* loop filter beta offset */
+#define S5P_FIMV_ENC_H264_NUM_OF_REF 0xd010 /* number of reference for P/B */
+#define S5P_FIMV_ENC_H264_TRANS_FLAG 0xd034 /* 8x8 transform flag in PPS &
+ high profile */
+
+#define S5P_FIMV_ENC_RC_FRAME_RATE 0xd0d0 /* frame rate */
+
+/* Encoder for MPEG4 only */
+#define S5P_FIMV_ENC_MPEG4_QUART_PXL 0xe008 /* qpel interpolation ctrl */
+
+/* Additional */
+#define S5P_FIMV_SI_CH0_DPB_CONF_CTRL 0x2068 /* DPB Config Control Register */
+#define S5P_FIMV_SLICE_INT_MASK 1
+#define S5P_FIMV_SLICE_INT_SHIFT 31
+#define S5P_FIMV_DDELAY_ENA_SHIFT 30
+#define S5P_FIMV_DDELAY_VAL_MASK 0xff
+#define S5P_FIMV_DDELAY_VAL_SHIFT 16
+#define S5P_FIMV_DPB_COUNT_MASK 0xffff
+#define S5P_FIMV_DPB_FLUSH_MASK 1
+#define S5P_FIMV_DPB_FLUSH_SHIFT 14
+
+
+#define S5P_FIMV_SI_CH0_RELEASE_BUF 0x2060 /* DPB release buffer register */
+#define S5P_FIMV_SI_CH0_HOST_WR_ADR 0x2064 /* address of shared memory */
+
+/* Codec numbers */
+#define S5P_FIMV_CODEC_NONE -1
+
+#define S5P_FIMV_CODEC_H264_DEC 0
+#define S5P_FIMV_CODEC_VC1_DEC 1
+#define S5P_FIMV_CODEC_MPEG4_DEC 2
+#define S5P_FIMV_CODEC_MPEG2_DEC 3
+#define S5P_FIMV_CODEC_H263_DEC 4
+#define S5P_FIMV_CODEC_VC1RCV_DEC 5
+
+#define S5P_FIMV_CODEC_H264_ENC 16
+#define S5P_FIMV_CODEC_MPEG4_ENC 17
+#define S5P_FIMV_CODEC_H263_ENC 18
+
+/* Channel Control Register */
+#define S5P_FIMV_CH_SEQ_HEADER 1
+#define S5P_FIMV_CH_FRAME_START 2
+#define S5P_FIMV_CH_LAST_FRAME 3
+#define S5P_FIMV_CH_INIT_BUFS 4
+#define S5P_FIMV_CH_FRAME_START_REALLOC 5
+#define S5P_FIMV_CH_MASK 7
+#define S5P_FIMV_CH_SHIFT 16
+
+
+/* Host to RISC command */
+#define S5P_FIMV_H2R_CMD_EMPTY 0
+#define S5P_FIMV_H2R_CMD_OPEN_INSTANCE 1
+#define S5P_FIMV_H2R_CMD_CLOSE_INSTANCE 2
+#define S5P_FIMV_H2R_CMD_SYS_INIT 3
+#define S5P_FIMV_H2R_CMD_FLUSH 4
+#define S5P_FIMV_H2R_CMD_SLEEP 5
+#define S5P_FIMV_H2R_CMD_WAKEUP 6
+
+#define S5P_FIMV_R2H_CMD_EMPTY 0
+#define S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET 1
+#define S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET 2
+#define S5P_FIMV_R2H_CMD_RSV_RET 3
+#define S5P_FIMV_R2H_CMD_SEQ_DONE_RET 4
+#define S5P_FIMV_R2H_CMD_FRAME_DONE_RET 5
+#define S5P_FIMV_R2H_CMD_SLICE_DONE_RET 6
+#define S5P_FIMV_R2H_CMD_ENC_COMPLETE_RET 7
+#define S5P_FIMV_R2H_CMD_SYS_INIT_RET 8
+#define S5P_FIMV_R2H_CMD_FW_STATUS_RET 9
+#define S5P_FIMV_R2H_CMD_SLEEP_RET 10
+#define S5P_FIMV_R2H_CMD_WAKEUP_RET 11
+#define S5P_FIMV_R2H_CMD_FLUSH_RET 12
+#define S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET 15
+#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16
+#define S5P_FIMV_R2H_CMD_ERR_RET 32
+
+/* Error handling defines */
+#define S5P_FIMV_ERR_WARNINGS_START 145
+#define S5P_FIMV_ERR_DEC_MASK 0xFFFF
+#define S5P_FIMV_ERR_DEC_SHIFT 0
+#define S5P_FIMV_ERR_DSPL_MASK 0xFFFF0000
+#define S5P_FIMV_ERR_DSPL_SHIFT 16
+
+/* Shared memory registers' offsets */
+
+/* An offset of the start position in the stream when
+ * the start position is not aligned */
+#define S5P_FIMV_SHARED_CROP_INFO_H 0x0020
+#define S5P_FIMV_SHARED_CROP_LEFT_MASK 0xFFFF
+#define S5P_FIMV_SHARED_CROP_LEFT_SHIFT 0
+#define S5P_FIMV_SHARED_CROP_RIGHT_MASK 0xFFFF0000
+#define S5P_FIMV_SHARED_CROP_RIGHT_SHIFT 16
+#define S5P_FIMV_SHARED_CROP_INFO_V 0x0024
+#define S5P_FIMV_SHARED_CROP_TOP_MASK 0xFFFF
+#define S5P_FIMV_SHARED_CROP_TOP_SHIFT 0
+#define S5P_FIMV_SHARED_CROP_BOTTOM_MASK 0xFFFF0000
+#define S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT 16
+#define S5P_FIMV_SHARED_SET_FRAME_TAG 0x0004
+#define S5P_FIMV_SHARED_GET_FRAME_TAG_TOP 0x0008
+#define S5P_FIMV_SHARED_GET_FRAME_TAG_BOT 0x000C
+#define S5P_FIMV_SHARED_START_BYTE_NUM 0x0018
+#define S5P_FIMV_SHARED_RC_VOP_TIMING 0x0030
+#define S5P_FIMV_SHARED_LUMA_DPB_SIZE 0x0064
+#define S5P_FIMV_SHARED_CHROMA_DPB_SIZE 0x0068
+#define S5P_FIMV_SHARED_MV_SIZE 0x006C
+#define S5P_FIMV_SHARED_PIC_TIME_TOP 0x0010
+#define S5P_FIMV_SHARED_PIC_TIME_BOTTOM 0x0014
+#define S5P_FIMV_SHARED_EXT_ENC_CONTROL 0x0028
+#define S5P_FIMV_SHARED_P_B_FRAME_QP 0x0070
+#define S5P_FIMV_SHARED_ASPECT_RATIO_IDC 0x0074
+#define S5P_FIMV_SHARED_EXTENDED_SAR 0x0078
+#define S5P_FIMV_SHARED_H264_I_PERIOD 0x009C
+#define S5P_FIMV_SHARED_RC_CONTROL_CONFIG 0x00A0
+
+#endif /* _REGS_FIMV_H */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc.c b/drivers/media/video/s5p-mfc/s5p_mfc.c
new file mode 100644
index 00000000000..7dc7eab58b3
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc.c
@@ -0,0 +1,1274 @@
+/*
+ * Samsung S5P Multi Format Codec v 5.1
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/videobuf2-core.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_ctrl.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_dec.h"
+#include "s5p_mfc_enc.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_shm.h"
+
+#define S5P_MFC_NAME "s5p-mfc"
+#define S5P_MFC_DEC_NAME "s5p-mfc-dec"
+#define S5P_MFC_ENC_NAME "s5p-mfc-enc"
+
+int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
+
+/* Helper functions for interrupt processing */
+/* Remove from hw execution round robin */
+static void clear_work_bit(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ spin_lock(&dev->condlock);
+ clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock(&dev->condlock);
+}
+
+/* Wake up context wait_queue */
+static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason,
+ unsigned int err)
+{
+ ctx->int_cond = 1;
+ ctx->int_type = reason;
+ ctx->int_err = err;
+ wake_up(&ctx->queue);
+}
+
+/* Wake up device wait_queue */
+static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
+ unsigned int err)
+{
+ dev->int_cond = 1;
+ dev->int_type = reason;
+ dev->int_err = err;
+ wake_up(&dev->queue);
+}
+
+void s5p_mfc_watchdog(unsigned long arg)
+{
+ struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
+
+ if (test_bit(0, &dev->hw_lock))
+ atomic_inc(&dev->watchdog_cnt);
+ if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) {
+ /* This means that hw is busy and no interrupts were
+ * generated by hw for the Nth time of running this
+ * watchdog timer. This usually means a serious hw
+ * error. Now it is time to kill all instances and
+ * reset the MFC. */
+ mfc_err("Time out during waiting for HW\n");
+ queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
+ }
+ dev->watchdog_timer.expires = jiffies +
+ msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
+ add_timer(&dev->watchdog_timer);
+}
+
+static void s5p_mfc_watchdog_worker(struct work_struct *work)
+{
+ struct s5p_mfc_dev *dev;
+ struct s5p_mfc_ctx *ctx;
+ unsigned long flags;
+ int mutex_locked;
+ int i, ret;
+
+ dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
+
+ mfc_err("Driver timeout error handling\n");
+ /* Lock the mutex that protects open and release.
+ * This is necessary as they may load and unload firmware. */
+ mutex_locked = mutex_trylock(&dev->mfc_mutex);
+ if (!mutex_locked)
+ mfc_err("Error: some instance may be closing/opening\n");
+ spin_lock_irqsave(&dev->irqlock, flags);
+
+ s5p_mfc_clock_off();
+
+ for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
+ ctx = dev->ctx[i];
+ if (!ctx)
+ continue;
+ ctx->state = MFCINST_ERROR;
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ clear_work_bit(ctx);
+ wake_up_ctx(ctx, S5P_FIMV_R2H_CMD_ERR_RET, 0);
+ }
+ clear_bit(0, &dev->hw_lock);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ /* Double check if there is at least one instance running.
+ * If no instance is in memory than no firmware should be present */
+ if (dev->num_inst > 0) {
+ ret = s5p_mfc_reload_firmware(dev);
+ if (ret) {
+ mfc_err("Failed to reload FW\n");
+ goto unlock;
+ }
+ s5p_mfc_clock_on();
+ ret = s5p_mfc_init_hw(dev);
+ if (ret)
+ mfc_err("Failed to reinit FW\n");
+ }
+unlock:
+ if (mutex_locked)
+ mutex_unlock(&dev->mfc_mutex);
+}
+
+static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (!vdev) {
+ mfc_err("failed to get video_device");
+ return MFCNODE_INVALID;
+ }
+ if (vdev->index == 0)
+ return MFCNODE_DECODER;
+ else if (vdev->index == 1)
+ return MFCNODE_ENCODER;
+ return MFCNODE_INVALID;
+}
+
+static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
+{
+ mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
+ mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
+ mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
+}
+
+static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_buf *dst_buf;
+
+ ctx->state = MFCINST_FINISHED;
+ ctx->sequence++;
+ while (!list_empty(&ctx->dst_queue)) {
+ dst_buf = list_entry(ctx->dst_queue.next,
+ struct s5p_mfc_buf, list);
+ mfc_debug(2, "Cleaning up buffer: %d\n",
+ dst_buf->b->v4l2_buf.index);
+ vb2_set_plane_payload(dst_buf->b, 0, 0);
+ vb2_set_plane_payload(dst_buf->b, 1, 0);
+ list_del(&dst_buf->list);
+ ctx->dst_queue_cnt--;
+ dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
+
+ if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
+ s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
+ dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
+ else
+ dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
+
+ ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
+ vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
+ }
+}
+
+static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_buf, *src_buf;
+ size_t dec_y_addr = s5p_mfc_get_dec_y_adr();
+ unsigned int frame_type = s5p_mfc_get_frame_type();
+
+ /* Copy timestamp / timecode from decoded src to dst and set
+ appropraite flags */
+ src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+ if (vb2_dma_contig_plane_paddr(dst_buf->b, 0) == dec_y_addr) {
+ memcpy(&dst_buf->b->v4l2_buf.timecode,
+ &src_buf->b->v4l2_buf.timecode,
+ sizeof(struct v4l2_timecode));
+ memcpy(&dst_buf->b->v4l2_buf.timestamp,
+ &src_buf->b->v4l2_buf.timestamp,
+ sizeof(struct timeval));
+ switch (frame_type) {
+ case S5P_FIMV_DECODE_FRAME_I_FRAME:
+ dst_buf->b->v4l2_buf.flags |=
+ V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case S5P_FIMV_DECODE_FRAME_P_FRAME:
+ dst_buf->b->v4l2_buf.flags |=
+ V4L2_BUF_FLAG_PFRAME;
+ break;
+ case S5P_FIMV_DECODE_FRAME_B_FRAME:
+ dst_buf->b->v4l2_buf.flags |=
+ V4L2_BUF_FLAG_BFRAME;
+ break;
+ }
+ break;
+ }
+ }
+}
+
+static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_buf;
+ size_t dspl_y_addr = s5p_mfc_get_dspl_y_adr();
+ unsigned int frame_type = s5p_mfc_get_frame_type();
+ unsigned int index;
+
+ /* If frame is same as previous then skip and do not dequeue */
+ if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
+ if (!ctx->after_packed_pb)
+ ctx->sequence++;
+ ctx->after_packed_pb = 0;
+ return;
+ }
+ ctx->sequence++;
+ /* The MFC returns address of the buffer, now we have to
+ * check which videobuf does it correspond to */
+ list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
+ /* Check if this is the buffer we're looking for */
+ if (vb2_dma_contig_plane_paddr(dst_buf->b, 0) == dspl_y_addr) {
+ list_del(&dst_buf->list);
+ ctx->dst_queue_cnt--;
+ dst_buf->b->v4l2_buf.sequence = ctx->sequence;
+ if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
+ s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
+ dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
+ else
+ dst_buf->b->v4l2_buf.field =
+ V4L2_FIELD_INTERLACED;
+ vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
+ vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
+ clear_bit(dst_buf->b->v4l2_buf.index,
+ &ctx->dec_dst_flag);
+
+ vb2_buffer_done(dst_buf->b,
+ err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+ index = dst_buf->b->v4l2_buf.index;
+ break;
+ }
+ }
+}
+
+/* Handle frame decoding interrupt */
+static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
+ unsigned int reason, unsigned int err)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int dst_frame_status;
+ struct s5p_mfc_buf *src_buf;
+ unsigned long flags;
+ unsigned int res_change;
+
+ unsigned int index;
+
+ dst_frame_status = s5p_mfc_get_dspl_status()
+ & S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
+ res_change = s5p_mfc_get_dspl_status()
+ & S5P_FIMV_DEC_STATUS_RESOLUTION_MASK;
+ mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
+ if (ctx->state == MFCINST_RES_CHANGE_INIT)
+ ctx->state = MFCINST_RES_CHANGE_FLUSH;
+ if (res_change) {
+ ctx->state = MFCINST_RES_CHANGE_INIT;
+ s5p_mfc_clear_int_flags(dev);
+ wake_up_ctx(ctx, reason, err);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ s5p_mfc_try_run(dev);
+ return;
+ }
+ if (ctx->dpb_flush_flag)
+ ctx->dpb_flush_flag = 0;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ /* All frames remaining in the buffer have been extracted */
+ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) {
+ if (ctx->state == MFCINST_RES_CHANGE_FLUSH) {
+ s5p_mfc_handle_frame_all_extracted(ctx);
+ ctx->state = MFCINST_RES_CHANGE_END;
+ goto leave_handle_frame;
+ } else {
+ s5p_mfc_handle_frame_all_extracted(ctx);
+ }
+ }
+
+ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY ||
+ dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_ONLY)
+ s5p_mfc_handle_frame_copy_time(ctx);
+
+ /* A frame has been decoded and is in the buffer */
+ if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY ||
+ dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) {
+ s5p_mfc_handle_frame_new(ctx, err);
+ } else {
+ mfc_debug(2, "No frame decode\n");
+ }
+ /* Mark source buffer as complete */
+ if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
+ && !list_empty(&ctx->src_queue)) {
+ src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
+ list);
+ ctx->consumed_stream += s5p_mfc_get_consumed_stream();
+ if (ctx->codec_mode != S5P_FIMV_CODEC_H264_DEC &&
+ s5p_mfc_get_frame_type() == S5P_FIMV_DECODE_FRAME_P_FRAME
+ && ctx->consumed_stream + STUFF_BYTE <
+ src_buf->b->v4l2_planes[0].bytesused) {
+ /* Run MFC again on the same buffer */
+ mfc_debug(2, "Running again the same buffer\n");
+ ctx->after_packed_pb = 1;
+ } else {
+ index = src_buf->b->v4l2_buf.index;
+ mfc_debug(2, "MFC needs next buffer\n");
+ ctx->consumed_stream = 0;
+ list_del(&src_buf->list);
+ ctx->src_queue_cnt--;
+ if (s5p_mfc_err_dec(err) > 0)
+ vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
+ else
+ vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
+ }
+ }
+leave_handle_frame:
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
+ || ctx->dst_queue_cnt < ctx->dpb_count)
+ clear_work_bit(ctx);
+ s5p_mfc_clear_int_flags(dev);
+ wake_up_ctx(ctx, reason, err);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ s5p_mfc_try_run(dev);
+}
+
+/* Error handling for interrupt */
+static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
+ unsigned int reason, unsigned int err)
+{
+ struct s5p_mfc_dev *dev;
+ unsigned long flags;
+
+ /* If no context is available then all necessary
+ * processing has been done. */
+ if (ctx == 0)
+ return;
+
+ dev = ctx->dev;
+ mfc_err("Interrupt Error: %08x\n", err);
+ s5p_mfc_clear_int_flags(dev);
+ wake_up_dev(dev, reason, err);
+
+ /* Error recovery is dependent on the state of context */
+ switch (ctx->state) {
+ case MFCINST_INIT:
+ /* This error had to happen while acquireing instance */
+ case MFCINST_GOT_INST:
+ /* This error had to happen while parsing the header */
+ case MFCINST_HEAD_PARSED:
+ /* This error had to happen while setting dst buffers */
+ case MFCINST_RETURN_INST:
+ /* This error had to happen while releasing instance */
+ clear_work_bit(ctx);
+ wake_up_ctx(ctx, reason, err);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ ctx->state = MFCINST_ERROR;
+ break;
+ case MFCINST_FINISHING:
+ case MFCINST_FINISHED:
+ case MFCINST_RUNNING:
+ /* It is higly probable that an error occured
+ * while decoding a frame */
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_ERROR;
+ /* Mark all dst buffers as having an error */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ /* Mark all src buffers as having an error */
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ break;
+ default:
+ mfc_err("Encountered an error interrupt which had not been handled\n");
+ break;
+ }
+ return;
+}
+
+/* Header parsing interrupt handling */
+static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
+ unsigned int reason, unsigned int err)
+{
+ struct s5p_mfc_dev *dev;
+ unsigned int guard_width, guard_height;
+
+ if (ctx == 0)
+ return;
+ dev = ctx->dev;
+ if (ctx->c_ops->post_seq_start) {
+ if (ctx->c_ops->post_seq_start(ctx))
+ mfc_err("post_seq_start() failed\n");
+ } else {
+ ctx->img_width = s5p_mfc_get_img_width();
+ ctx->img_height = s5p_mfc_get_img_height();
+
+ ctx->buf_width = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12MT_HALIGN);
+ ctx->buf_height = ALIGN(ctx->img_height,
+ S5P_FIMV_NV12MT_VALIGN);
+ mfc_debug(2, "SEQ Done: Movie dimensions %dx%d, "
+ "buffer dimensions: %dx%d\n", ctx->img_width,
+ ctx->img_height, ctx->buf_width,
+ ctx->buf_height);
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
+ ctx->luma_size = ALIGN(ctx->buf_width *
+ ctx->buf_height, S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->chroma_size = ALIGN(ctx->buf_width *
+ ALIGN((ctx->img_height >> 1),
+ S5P_FIMV_NV12MT_VALIGN),
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->mv_size = ALIGN(ctx->buf_width *
+ ALIGN((ctx->buf_height >> 2),
+ S5P_FIMV_NV12MT_VALIGN),
+ S5P_FIMV_DEC_BUF_ALIGN);
+ } else {
+ guard_width = ALIGN(ctx->img_width + 24,
+ S5P_FIMV_NV12MT_HALIGN);
+ guard_height = ALIGN(ctx->img_height + 16,
+ S5P_FIMV_NV12MT_VALIGN);
+ ctx->luma_size = ALIGN(guard_width *
+ guard_height, S5P_FIMV_DEC_BUF_ALIGN);
+ guard_width = ALIGN(ctx->img_width + 16,
+ S5P_FIMV_NV12MT_HALIGN);
+ guard_height = ALIGN((ctx->img_height >> 1) + 4,
+ S5P_FIMV_NV12MT_VALIGN);
+ ctx->chroma_size = ALIGN(guard_width *
+ guard_height, S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->mv_size = 0;
+ }
+ ctx->dpb_count = s5p_mfc_get_dpb_count();
+ if (ctx->img_width == 0 || ctx->img_width == 0)
+ ctx->state = MFCINST_ERROR;
+ else
+ ctx->state = MFCINST_HEAD_PARSED;
+ }
+ s5p_mfc_clear_int_flags(dev);
+ clear_work_bit(ctx);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ s5p_mfc_try_run(dev);
+ wake_up_ctx(ctx, reason, err);
+}
+
+/* Header parsing interrupt handling */
+static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
+ unsigned int reason, unsigned int err)
+{
+ struct s5p_mfc_buf *src_buf;
+ struct s5p_mfc_dev *dev;
+ unsigned long flags;
+
+ if (ctx == 0)
+ return;
+ dev = ctx->dev;
+ s5p_mfc_clear_int_flags(dev);
+ ctx->int_type = reason;
+ ctx->int_err = err;
+ ctx->int_cond = 1;
+ spin_lock(&dev->condlock);
+ clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock(&dev->condlock);
+ if (err == 0) {
+ ctx->state = MFCINST_RUNNING;
+ if (!ctx->dpb_flush_flag) {
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (!list_empty(&ctx->src_queue)) {
+ src_buf = list_entry(ctx->src_queue.next,
+ struct s5p_mfc_buf, list);
+ list_del(&src_buf->list);
+ ctx->src_queue_cnt--;
+ vb2_buffer_done(src_buf->b,
+ VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else {
+ ctx->dpb_flush_flag = 0;
+ }
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+
+ s5p_mfc_clock_off();
+
+ wake_up(&ctx->queue);
+ s5p_mfc_try_run(dev);
+ } else {
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+
+ s5p_mfc_clock_off();
+
+ wake_up(&ctx->queue);
+ }
+}
+
+/* Interrupt processing */
+static irqreturn_t s5p_mfc_irq(int irq, void *priv)
+{
+ struct s5p_mfc_dev *dev = priv;
+ struct s5p_mfc_ctx *ctx;
+ unsigned int reason;
+ unsigned int err;
+
+ mfc_debug_enter();
+ /* Reset the timeout watchdog */
+ atomic_set(&dev->watchdog_cnt, 0);
+ ctx = dev->ctx[dev->curr_ctx];
+ /* Get the reason of interrupt and the error code */
+ reason = s5p_mfc_get_int_reason();
+ err = s5p_mfc_get_int_err();
+ mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
+ switch (reason) {
+ case S5P_FIMV_R2H_CMD_ERR_RET:
+ /* An error has occured */
+ if (ctx->state == MFCINST_RUNNING &&
+ s5p_mfc_err_dec(err) >= S5P_FIMV_ERR_WARNINGS_START)
+ s5p_mfc_handle_frame(ctx, reason, err);
+ else
+ s5p_mfc_handle_error(ctx, reason, err);
+ clear_bit(0, &dev->enter_suspend);
+ break;
+
+ case S5P_FIMV_R2H_CMD_SLICE_DONE_RET:
+ case S5P_FIMV_R2H_CMD_FRAME_DONE_RET:
+ if (ctx->c_ops->post_frame_start) {
+ if (ctx->c_ops->post_frame_start(ctx))
+ mfc_err("post_frame_start() failed\n");
+ s5p_mfc_clear_int_flags(dev);
+ wake_up_ctx(ctx, reason, err);
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ BUG();
+ s5p_mfc_clock_off();
+ s5p_mfc_try_run(dev);
+ } else {
+ s5p_mfc_handle_frame(ctx, reason, err);
+ }
+ break;
+
+ case S5P_FIMV_R2H_CMD_SEQ_DONE_RET:
+ s5p_mfc_handle_seq_done(ctx, reason, err);
+ break;
+
+ case S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET:
+ ctx->inst_no = s5p_mfc_get_inst_no();
+ ctx->state = MFCINST_GOT_INST;
+ clear_work_bit(ctx);
+ wake_up(&ctx->queue);
+ goto irq_cleanup_hw;
+
+ case S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET:
+ clear_work_bit(ctx);
+ ctx->state = MFCINST_FREE;
+ wake_up(&ctx->queue);
+ goto irq_cleanup_hw;
+
+ case S5P_FIMV_R2H_CMD_SYS_INIT_RET:
+ case S5P_FIMV_R2H_CMD_FW_STATUS_RET:
+ case S5P_FIMV_R2H_CMD_SLEEP_RET:
+ case S5P_FIMV_R2H_CMD_WAKEUP_RET:
+ if (ctx)
+ clear_work_bit(ctx);
+ s5p_mfc_clear_int_flags(dev);
+ wake_up_dev(dev, reason, err);
+ clear_bit(0, &dev->hw_lock);
+ clear_bit(0, &dev->enter_suspend);
+ break;
+
+ case S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET:
+ s5p_mfc_handle_init_buffers(ctx, reason, err);
+ break;
+ default:
+ mfc_debug(2, "Unknown int reason\n");
+ s5p_mfc_clear_int_flags(dev);
+ }
+ mfc_debug_leave();
+ return IRQ_HANDLED;
+irq_cleanup_hw:
+ s5p_mfc_clear_int_flags(dev);
+ ctx->int_type = reason;
+ ctx->int_err = err;
+ ctx->int_cond = 1;
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ mfc_err("Failed to unlock hw\n");
+
+ s5p_mfc_clock_off();
+
+ s5p_mfc_try_run(dev);
+ mfc_debug(2, "Exit via irq_cleanup_hw\n");
+ return IRQ_HANDLED;
+}
+
+/* Open an MFC node */
+static int s5p_mfc_open(struct file *file)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = NULL;
+ struct vb2_queue *q;
+ unsigned long flags;
+ int ret = 0;
+
+ mfc_debug_enter();
+ dev->num_inst++; /* It is guarded by mfc_mutex in vfd */
+ /* Allocate memory for context */
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx) {
+ mfc_err("Not enough memory\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+ ctx->dev = dev;
+ INIT_LIST_HEAD(&ctx->src_queue);
+ INIT_LIST_HEAD(&ctx->dst_queue);
+ ctx->src_queue_cnt = 0;
+ ctx->dst_queue_cnt = 0;
+ /* Get context number */
+ ctx->num = 0;
+ while (dev->ctx[ctx->num]) {
+ ctx->num++;
+ if (ctx->num >= MFC_NUM_CONTEXTS) {
+ mfc_err("Too many open contexts\n");
+ ret = -EBUSY;
+ goto err_no_ctx;
+ }
+ }
+ /* Mark context as idle */
+ spin_lock_irqsave(&dev->condlock, flags);
+ clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ dev->ctx[ctx->num] = ctx;
+ if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ ctx->type = MFCINST_DECODER;
+ ctx->c_ops = get_dec_codec_ops();
+ /* Setup ctrl handler */
+ ret = s5p_mfc_dec_ctrls_setup(ctx);
+ if (ret) {
+ mfc_err("Failed to setup mfc controls\n");
+ goto err_ctrls_setup;
+ }
+ } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ ctx->type = MFCINST_ENCODER;
+ ctx->c_ops = get_enc_codec_ops();
+ /* only for encoder */
+ INIT_LIST_HEAD(&ctx->ref_queue);
+ ctx->ref_queue_cnt = 0;
+ /* Setup ctrl handler */
+ ret = s5p_mfc_enc_ctrls_setup(ctx);
+ if (ret) {
+ mfc_err("Failed to setup mfc controls\n");
+ goto err_ctrls_setup;
+ }
+ } else {
+ ret = -ENOENT;
+ goto err_bad_node;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ ctx->inst_no = -1;
+ /* Load firmware if this is the first instance */
+ if (dev->num_inst == 1) {
+ dev->watchdog_timer.expires = jiffies +
+ msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
+ add_timer(&dev->watchdog_timer);
+ ret = s5p_mfc_power_on();
+ if (ret < 0) {
+ mfc_err("power on failed\n");
+ goto err_pwr_enable;
+ }
+ s5p_mfc_clock_on();
+ ret = s5p_mfc_alloc_and_load_firmware(dev);
+ if (ret)
+ goto err_alloc_fw;
+ /* Init the FW */
+ ret = s5p_mfc_init_hw(dev);
+ if (ret)
+ goto err_init_hw;
+ s5p_mfc_clock_off();
+ }
+ /* Init videobuf2 queue for CAPTURE */
+ q = &ctx->vq_dst;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->drv_priv = &ctx->fh;
+ if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ q->io_modes = VB2_MMAP;
+ q->ops = get_dec_queue_ops();
+ } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = get_enc_queue_ops();
+ } else {
+ ret = -ENOENT;
+ goto err_queue_init;
+ }
+ q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
+ ret = vb2_queue_init(q);
+ if (ret) {
+ mfc_err("Failed to initialize videobuf2 queue(capture)\n");
+ goto err_queue_init;
+ }
+ /* Init videobuf2 queue for OUTPUT */
+ q = &ctx->vq_src;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ q->io_modes = VB2_MMAP;
+ q->drv_priv = &ctx->fh;
+ if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ q->io_modes = VB2_MMAP;
+ q->ops = get_dec_queue_ops();
+ } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->ops = get_enc_queue_ops();
+ } else {
+ ret = -ENOENT;
+ goto err_queue_init;
+ }
+ q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
+ ret = vb2_queue_init(q);
+ if (ret) {
+ mfc_err("Failed to initialize videobuf2 queue(output)\n");
+ goto err_queue_init;
+ }
+ init_waitqueue_head(&ctx->queue);
+ mfc_debug_leave();
+ return ret;
+ /* Deinit when failure occured */
+err_queue_init:
+err_init_hw:
+ s5p_mfc_release_firmware(dev);
+err_alloc_fw:
+ dev->ctx[ctx->num] = 0;
+ del_timer_sync(&dev->watchdog_timer);
+ s5p_mfc_clock_off();
+err_pwr_enable:
+ if (dev->num_inst == 1) {
+ if (s5p_mfc_power_off() < 0)
+ mfc_err("power off failed\n");
+ s5p_mfc_release_firmware(dev);
+ }
+err_ctrls_setup:
+ s5p_mfc_dec_ctrls_delete(ctx);
+err_bad_node:
+err_no_ctx:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+err_alloc:
+ dev->num_inst--;
+ mfc_debug_leave();
+ return ret;
+}
+
+/* Release MFC context */
+static int s5p_mfc_release(struct file *file)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+
+ mfc_debug_enter();
+ s5p_mfc_clock_on();
+ vb2_queue_release(&ctx->vq_src);
+ vb2_queue_release(&ctx->vq_dst);
+ /* Mark context as idle */
+ spin_lock_irqsave(&dev->condlock, flags);
+ clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ /* If instance was initialised then
+ * return instance and free reosurces */
+ if (ctx->inst_no != MFC_NO_INSTANCE_SET) {
+ mfc_debug(2, "Has to free instance\n");
+ ctx->state = MFCINST_RETURN_INST;
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_try_run(dev);
+ /* Wait until instance is returned or timeout occured */
+ if (s5p_mfc_wait_for_done_ctx
+ (ctx, S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
+ s5p_mfc_clock_off();
+ mfc_err("Err returning instance\n");
+ }
+ mfc_debug(2, "After free instance\n");
+ /* Free resources */
+ s5p_mfc_release_codec_buffers(ctx);
+ s5p_mfc_release_instance_buffer(ctx);
+ if (ctx->type == MFCINST_DECODER)
+ s5p_mfc_release_dec_desc_buffer(ctx);
+
+ ctx->inst_no = MFC_NO_INSTANCE_SET;
+ }
+ /* hardware locking scheme */
+ if (dev->curr_ctx == ctx->num)
+ clear_bit(0, &dev->hw_lock);
+ dev->num_inst--;
+ if (dev->num_inst == 0) {
+ mfc_debug(2, "Last instance - release firmware\n");
+ /* reset <-> F/W release */
+ s5p_mfc_reset(dev);
+ s5p_mfc_release_firmware(dev);
+ del_timer_sync(&dev->watchdog_timer);
+ if (s5p_mfc_power_off() < 0)
+ mfc_err("Power off failed\n");
+ }
+ mfc_debug(2, "Shutting down clock\n");
+ s5p_mfc_clock_off();
+ dev->ctx[ctx->num] = 0;
+ s5p_mfc_dec_ctrls_delete(ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ mfc_debug_leave();
+ return 0;
+}
+
+/* Poll */
+static unsigned int s5p_mfc_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct vb2_queue *src_q, *dst_q;
+ struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
+ unsigned int rc = 0;
+ unsigned long flags;
+
+ src_q = &ctx->vq_src;
+ dst_q = &ctx->vq_dst;
+ /*
+ * There has to be at least one buffer queued on each queued_list, which
+ * means either in driver already or waiting for driver to claim it
+ * and start processing.
+ */
+ if ((!src_q->streaming || list_empty(&src_q->queued_list))
+ && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
+ rc = POLLERR;
+ goto end;
+ }
+ mutex_unlock(&dev->mfc_mutex);
+ poll_wait(file, &src_q->done_wq, wait);
+ poll_wait(file, &dst_q->done_wq, wait);
+ mutex_lock(&dev->mfc_mutex);
+ spin_lock_irqsave(&src_q->done_lock, flags);
+ if (!list_empty(&src_q->done_list))
+ src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
+ done_entry);
+ if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
+ || src_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= POLLOUT | POLLWRNORM;
+ spin_unlock_irqrestore(&src_q->done_lock, flags);
+ spin_lock_irqsave(&dst_q->done_lock, flags);
+ if (!list_empty(&dst_q->done_list))
+ dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
+ done_entry);
+ if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
+ || dst_vb->state == VB2_BUF_STATE_ERROR))
+ rc |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&dst_q->done_lock, flags);
+end:
+ return rc;
+}
+
+/* Mmap */
+static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ int ret;
+ if (offset < DST_QUEUE_OFF_BASE) {
+ mfc_debug(2, "mmaping source\n");
+ ret = vb2_mmap(&ctx->vq_src, vma);
+ } else { /* capture */
+ mfc_debug(2, "mmaping destination\n");
+ vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
+ ret = vb2_mmap(&ctx->vq_dst, vma);
+ }
+ return ret;
+}
+
+/* v4l2 ops */
+static const struct v4l2_file_operations s5p_mfc_fops = {
+ .owner = THIS_MODULE,
+ .open = s5p_mfc_open,
+ .release = s5p_mfc_release,
+ .poll = s5p_mfc_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = s5p_mfc_mmap,
+};
+
+static int match_child(struct device *dev, void *data)
+{
+ if (!dev_name(dev))
+ return 0;
+ return !strcmp(dev_name(dev), (char *)data);
+}
+
+
+/* MFC probe function */
+static int __devinit s5p_mfc_probe(struct platform_device *pdev)
+{
+ struct s5p_mfc_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret;
+
+ pr_debug("%s++\n", __func__);
+ dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ if (!dev) {
+ dev_err(&pdev->dev, "Not enough memory for MFC device\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&dev->irqlock);
+ spin_lock_init(&dev->condlock);
+ dev->plat_dev = pdev;
+ if (!dev->plat_dev) {
+ dev_err(&pdev->dev, "No platform data specified\n");
+ ret = -ENODEV;
+ goto err_dev;
+ }
+
+ ret = s5p_mfc_init_pm(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get mfc clock source\n");
+ goto err_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get memory region resource\n");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ dev->mfc_mem = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if (dev->mfc_mem == NULL) {
+ dev_err(&pdev->dev, "failed to get memory region\n");
+ ret = -ENOENT;
+ goto err_mem_reg;
+ }
+ dev->regs_base = ioremap(dev->mfc_mem->start, resource_size(dev->mfc_mem));
+ if (dev->regs_base == NULL) {
+ dev_err(&pdev->dev, "failed to ioremap address region\n");
+ ret = -ENOENT;
+ goto err_ioremap;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ ret = -ENOENT;
+ goto err_get_res;
+ }
+ dev->irq = res->start;
+ ret = request_irq(dev->irq, s5p_mfc_irq, IRQF_DISABLED, pdev->name,
+ dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
+ goto err_req_irq;
+ }
+
+ dev->mem_dev_l = device_find_child(&dev->plat_dev->dev, "s5p-mfc-l",
+ match_child);
+ if (!dev->mem_dev_l) {
+ mfc_err("Mem child (L) device get failed\n");
+ ret = -ENODEV;
+ goto err_find_child;
+ }
+ dev->mem_dev_r = device_find_child(&dev->plat_dev->dev, "s5p-mfc-r",
+ match_child);
+ if (!dev->mem_dev_r) {
+ mfc_err("Mem child (R) device get failed\n");
+ ret = -ENODEV;
+ goto err_find_child;
+ }
+
+ dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
+ if (IS_ERR_OR_NULL(dev->alloc_ctx[0])) {
+ ret = PTR_ERR(dev->alloc_ctx[0]);
+ goto err_mem_init_ctx_0;
+ }
+ dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
+ if (IS_ERR_OR_NULL(dev->alloc_ctx[1])) {
+ ret = PTR_ERR(dev->alloc_ctx[1]);
+ goto err_mem_init_ctx_1;
+ }
+
+ mutex_init(&dev->mfc_mutex);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret)
+ goto err_v4l2_dev_reg;
+ init_waitqueue_head(&dev->queue);
+
+ /* decoder */
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_dec_alloc;
+ }
+ vfd->fops = &s5p_mfc_fops,
+ vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
+ vfd->release = video_device_release,
+ vfd->lock = &dev->mfc_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
+ dev->vfd_dec = vfd;
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ video_device_release(vfd);
+ goto err_dec_reg;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "decoder registered as /dev/video%d\n", vfd->num);
+ video_set_drvdata(vfd, dev);
+
+ /* encoder */
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_enc_alloc;
+ }
+ vfd->fops = &s5p_mfc_fops,
+ vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
+ vfd->release = video_device_release,
+ vfd->lock = &dev->mfc_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
+ dev->vfd_enc = vfd;
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ video_device_release(vfd);
+ goto err_enc_reg;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "encoder registered as /dev/video%d\n", vfd->num);
+ video_set_drvdata(vfd, dev);
+ platform_set_drvdata(pdev, dev);
+
+ dev->hw_lock = 0;
+ dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
+ INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
+ atomic_set(&dev->watchdog_cnt, 0);
+ init_timer(&dev->watchdog_timer);
+ dev->watchdog_timer.data = (unsigned long)dev;
+ dev->watchdog_timer.function = s5p_mfc_watchdog;
+
+ pr_debug("%s--\n", __func__);
+ return 0;
+
+/* Deinit MFC if probe had failed */
+err_enc_reg:
+ video_device_release(dev->vfd_enc);
+err_enc_alloc:
+ video_unregister_device(dev->vfd_dec);
+err_dec_reg:
+ video_device_release(dev->vfd_dec);
+err_dec_alloc:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_v4l2_dev_reg:
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
+err_mem_init_ctx_1:
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
+err_mem_init_ctx_0:
+err_find_child:
+ free_irq(dev->irq, dev);
+err_req_irq:
+err_get_res:
+ iounmap(dev->regs_base);
+ dev->regs_base = NULL;
+err_ioremap:
+ release_resource(dev->mfc_mem);
+ kfree(dev->mfc_mem);
+err_mem_reg:
+err_res:
+ s5p_mfc_final_pm(dev);
+err_clk:
+err_dev:
+ kfree(dev);
+ pr_debug("%s-- with error\n", __func__);
+ return ret;
+
+}
+
+/* Remove the driver */
+static int __devexit s5p_mfc_remove(struct platform_device *pdev)
+{
+ struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
+
+ v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
+
+ del_timer_sync(&dev->watchdog_timer);
+ flush_workqueue(dev->watchdog_workqueue);
+ destroy_workqueue(dev->watchdog_workqueue);
+
+ video_unregister_device(dev->vfd_enc);
+ video_unregister_device(dev->vfd_dec);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
+ vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
+
+ free_irq(dev->irq, dev);
+ iounmap(dev->regs_base);
+ if (dev->mfc_mem) {
+ release_resource(dev->mfc_mem);
+ kfree(dev->mfc_mem);
+ dev->mfc_mem = NULL;
+ }
+ s5p_mfc_final_pm(dev);
+ kfree(dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int s5p_mfc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+ int ret;
+
+ if (m_dev->num_inst == 0)
+ return 0;
+ return s5p_mfc_sleep(m_dev);
+ if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) {
+ mfc_err("Error: going to suspend for a second time\n");
+ return -EIO;
+ }
+
+ /* Check if we're processing then wait if it necessary. */
+ while (test_and_set_bit(0, &m_dev->hw_lock) != 0) {
+ /* Try and lock the HW */
+ /* Wait on the interrupt waitqueue */
+ ret = wait_event_interruptible_timeout(m_dev->queue,
+ m_dev->int_cond || m_dev->ctx[m_dev->curr_ctx]->int_cond,
+ msecs_to_jiffies(MFC_INT_TIMEOUT));
+
+ if (ret == 0) {
+ mfc_err("Waiting for hardware to finish timed out\n");
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int s5p_mfc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+
+ if (m_dev->num_inst == 0)
+ return 0;
+ return s5p_mfc_wakeup(m_dev);
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int s5p_mfc_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+
+ atomic_set(&m_dev->pm.power, 0);
+ return 0;
+}
+
+static int s5p_mfc_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
+ int pre_power;
+
+ if (!m_dev->alloc_ctx)
+ return 0;
+ pre_power = atomic_read(&m_dev->pm.power);
+ atomic_set(&m_dev->pm.power, 1);
+ return 0;
+}
+#endif
+
+/* Power management */
+static const struct dev_pm_ops s5p_mfc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
+ SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver s5p_mfc_pdrv = {
+ .probe = s5p_mfc_probe,
+ .remove = __devexit_p(s5p_mfc_remove),
+ .driver = {
+ .name = S5P_MFC_NAME,
+ .owner = THIS_MODULE,
+ .pm = &s5p_mfc_pm_ops
+ },
+};
+
+static char banner[] __initdata =
+ "S5P MFC V4L2 Driver, (C) 2011 Samsung Electronics\n";
+
+static int __init s5p_mfc_init(void)
+{
+ int ret;
+
+ pr_info("%s", banner);
+ ret = platform_driver_register(&s5p_mfc_pdrv);
+ if (ret)
+ pr_err("Platform device registration failed.\n");
+ return ret;
+}
+
+static void __devexit s5p_mfc_exit(void)
+{
+ platform_driver_unregister(&s5p_mfc_pdrv);
+}
+
+module_init(s5p_mfc_init);
+module_exit(s5p_mfc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
+MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c
new file mode 100644
index 00000000000..f0665ed1a52
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c
@@ -0,0 +1,120 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_cmd.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "regs-mfc.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+
+/* This function is used to send a command to the MFC */
+static int s5p_mfc_cmd_host2risc(struct s5p_mfc_dev *dev, int cmd,
+ struct s5p_mfc_cmd_args *args)
+{
+ int cur_cmd;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
+ /* wait until host to risc command register becomes 'H2R_CMD_EMPTY' */
+ do {
+ if (time_after(jiffies, timeout)) {
+ mfc_err("Timeout while waiting for hardware\n");
+ return -EIO;
+ }
+ cur_cmd = mfc_read(dev, S5P_FIMV_HOST2RISC_CMD);
+ } while (cur_cmd != S5P_FIMV_H2R_CMD_EMPTY);
+ mfc_write(dev, args->arg[0], S5P_FIMV_HOST2RISC_ARG1);
+ mfc_write(dev, args->arg[1], S5P_FIMV_HOST2RISC_ARG2);
+ mfc_write(dev, args->arg[2], S5P_FIMV_HOST2RISC_ARG3);
+ mfc_write(dev, args->arg[3], S5P_FIMV_HOST2RISC_ARG4);
+ /* Issue the command */
+ mfc_write(dev, cmd, S5P_FIMV_HOST2RISC_CMD);
+ return 0;
+}
+
+/* Initialize the MFC */
+int s5p_mfc_sys_init_cmd(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ h2r_args.arg[0] = dev->fw_size;
+ return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_SYS_INIT, &h2r_args);
+}
+
+/* Suspend the MFC hardware */
+int s5p_mfc_sleep_cmd(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_SLEEP, &h2r_args);
+}
+
+/* Wake up the MFC hardware */
+int s5p_mfc_wakeup_cmd(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ return s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_WAKEUP, &h2r_args);
+}
+
+
+int s5p_mfc_open_inst_cmd(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_cmd_args h2r_args;
+ int ret;
+
+ /* Preparing decoding - getting instance number */
+ mfc_debug(2, "Getting instance number (codec: %d)\n", ctx->codec_mode);
+ dev->curr_ctx = ctx->num;
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ h2r_args.arg[0] = ctx->codec_mode;
+ h2r_args.arg[1] = 0; /* no crc & no pixelcache */
+ h2r_args.arg[2] = ctx->ctx_ofs;
+ h2r_args.arg[3] = ctx->ctx_size;
+ ret = s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE,
+ &h2r_args);
+ if (ret) {
+ mfc_err("Failed to create a new instance\n");
+ ctx->state = MFCINST_ERROR;
+ }
+ return ret;
+}
+
+int s5p_mfc_close_inst_cmd(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_cmd_args h2r_args;
+ int ret;
+
+ if (ctx->state == MFCINST_FREE) {
+ mfc_err("Instance already returned\n");
+ ctx->state = MFCINST_ERROR;
+ return -EINVAL;
+ }
+ /* Closing decoding instance */
+ mfc_debug(2, "Returning instance number %d\n", ctx->inst_no);
+ dev->curr_ctx = ctx->num;
+ memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
+ h2r_args.arg[0] = ctx->inst_no;
+ ret = s5p_mfc_cmd_host2risc(dev, S5P_FIMV_H2R_CMD_CLOSE_INSTANCE,
+ &h2r_args);
+ if (ret) {
+ mfc_err("Failed to return an instance\n");
+ ctx->state = MFCINST_ERROR;
+ return -EINVAL;
+ }
+ return 0;
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h
new file mode 100644
index 00000000000..5ceebfe6131
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h
@@ -0,0 +1,30 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_cmd.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_CMD_H_
+#define S5P_MFC_CMD_H_
+
+#include "s5p_mfc_common.h"
+
+#define MAX_H2R_ARG 4
+
+struct s5p_mfc_cmd_args {
+ unsigned int arg[MAX_H2R_ARG];
+};
+
+int s5p_mfc_sys_init_cmd(struct s5p_mfc_dev *dev);
+int s5p_mfc_sleep_cmd(struct s5p_mfc_dev *dev);
+int s5p_mfc_wakeup_cmd(struct s5p_mfc_dev *dev);
+int s5p_mfc_open_inst_cmd(struct s5p_mfc_ctx *ctx);
+int s5p_mfc_close_inst_cmd(struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_CMD_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_common.h b/drivers/media/video/s5p-mfc/s5p_mfc_common.h
new file mode 100644
index 00000000000..91146fa622e
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_common.h
@@ -0,0 +1,572 @@
+/*
+ * Samsung S5P Multi Format Codec v 5.0
+ *
+ * This file contains definitions of enums and structs used by the codec
+ * driver.
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#ifndef S5P_MFC_COMMON_H_
+#define S5P_MFC_COMMON_H_
+
+#include "regs-mfc.h"
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+
+/* Definitions related to MFC memory */
+
+/* Offset base used to differentiate between CAPTURE and OUTPUT
+* while mmaping */
+#define DST_QUEUE_OFF_BASE (TASK_SIZE / 2)
+
+/* Offset used by the hardware to store addresses */
+#define MFC_OFFSET_SHIFT 11
+
+#define FIRMWARE_ALIGN 0x20000 /* 128KB */
+#define MFC_H264_CTX_BUF_SIZE 0x96000 /* 600KB per H264 instance */
+#define MFC_CTX_BUF_SIZE 0x2800 /* 10KB per instance */
+#define DESC_BUF_SIZE 0x20000 /* 128KB for DESC buffer */
+#define SHARED_BUF_SIZE 0x2000 /* 8KB for shared buffer */
+
+#define DEF_CPB_SIZE 0x40000 /* 512KB */
+
+#define MFC_BANK1_ALLOC_CTX 0
+#define MFC_BANK2_ALLOC_CTX 1
+
+#define MFC_BANK1_ALIGN_ORDER 13
+#define MFC_BANK2_ALIGN_ORDER 13
+#define MFC_BASE_ALIGN_ORDER 17
+
+#include <media/videobuf2-dma-contig.h>
+
+static inline dma_addr_t s5p_mfc_mem_cookie(void *a, void *b)
+{
+ /* Same functionality as the vb2_dma_contig_plane_paddr */
+ dma_addr_t *paddr = vb2_dma_contig_memops.cookie(b);
+
+ return *paddr;
+}
+
+/* MFC definitions */
+#define MFC_MAX_EXTRA_DPB 5
+#define MFC_MAX_BUFFERS 32
+#define MFC_NUM_CONTEXTS 4
+/* Interrupt timeout */
+#define MFC_INT_TIMEOUT 2000
+/* Busy wait timeout */
+#define MFC_BW_TIMEOUT 500
+/* Watchdog interval */
+#define MFC_WATCHDOG_INTERVAL 1000
+/* After how many executions watchdog should assume lock up */
+#define MFC_WATCHDOG_CNT 10
+#define MFC_NO_INSTANCE_SET -1
+#define MFC_ENC_CAP_PLANE_COUNT 1
+#define MFC_ENC_OUT_PLANE_COUNT 2
+#define STUFF_BYTE 4
+#define MFC_MAX_CTRLS 64
+
+#define mfc_read(dev, offset) readl(dev->regs_base + (offset))
+#define mfc_write(dev, data, offset) writel((data), dev->regs_base + \
+ (offset))
+
+/**
+ * enum s5p_mfc_fmt_type - type of the pixelformat
+ */
+enum s5p_mfc_fmt_type {
+ MFC_FMT_DEC,
+ MFC_FMT_ENC,
+ MFC_FMT_RAW,
+};
+
+/**
+ * enum s5p_mfc_node_type - The type of an MFC device node.
+ */
+enum s5p_mfc_node_type {
+ MFCNODE_INVALID = -1,
+ MFCNODE_DECODER = 0,
+ MFCNODE_ENCODER = 1,
+};
+
+/**
+ * enum s5p_mfc_inst_type - The type of an MFC instance.
+ */
+enum s5p_mfc_inst_type {
+ MFCINST_INVALID,
+ MFCINST_DECODER,
+ MFCINST_ENCODER,
+};
+
+/**
+ * enum s5p_mfc_inst_state - The state of an MFC instance.
+ */
+enum s5p_mfc_inst_state {
+ MFCINST_FREE = 0,
+ MFCINST_INIT = 100,
+ MFCINST_GOT_INST,
+ MFCINST_HEAD_PARSED,
+ MFCINST_BUFS_SET,
+ MFCINST_RUNNING,
+ MFCINST_FINISHING,
+ MFCINST_FINISHED,
+ MFCINST_RETURN_INST,
+ MFCINST_ERROR,
+ MFCINST_ABORT,
+ MFCINST_RES_CHANGE_INIT,
+ MFCINST_RES_CHANGE_FLUSH,
+ MFCINST_RES_CHANGE_END,
+};
+
+/**
+ * enum s5p_mfc_queue_state - The state of buffer queue.
+ */
+enum s5p_mfc_queue_state {
+ QUEUE_FREE,
+ QUEUE_BUFS_REQUESTED,
+ QUEUE_BUFS_QUERIED,
+ QUEUE_BUFS_MMAPED,
+};
+
+/**
+ * enum s5p_mfc_decode_arg - type of frame decoding
+ */
+enum s5p_mfc_decode_arg {
+ MFC_DEC_FRAME,
+ MFC_DEC_LAST_FRAME,
+ MFC_DEC_RES_CHANGE,
+};
+
+struct s5p_mfc_ctx;
+
+/**
+ * struct s5p_mfc_buf - MFC buffer
+ */
+struct s5p_mfc_buf {
+ struct list_head list;
+ struct vb2_buffer *b;
+ union {
+ struct {
+ size_t luma;
+ size_t chroma;
+ } raw;
+ size_t stream;
+ } cookie;
+ int used;
+};
+
+/**
+ * struct s5p_mfc_pm - power management data structure
+ */
+struct s5p_mfc_pm {
+ struct clk *clock;
+ struct clk *clock_gate;
+ atomic_t power;
+ struct device *device;
+};
+
+/**
+ * struct s5p_mfc_dev - The struct containing driver internal parameters.
+ *
+ * @v4l2_dev: v4l2_device
+ * @vfd_dec: video device for decoding
+ * @vfd_enc: video device for encoding
+ * @plat_dev: platform device
+ * @mem_dev_l: child device of the left memory bank (0)
+ * @mem_dev_r: child device of the right memory bank (1)
+ * @regs_base: base address of the MFC hw registers
+ * @irq: irq resource
+ * @mfc_mem: MFC registers memory resource
+ * @dec_ctrl_handler: control framework handler for decoding
+ * @enc_ctrl_handler: control framework handler for encoding
+ * @pm: power management control
+ * @num_inst: couter of active MFC instances
+ * @irqlock: lock for operations on videobuf2 queues
+ * @condlock: lock for changing/checking if a context is ready to be
+ * processed
+ * @mfc_mutex: lock for video_device
+ * @int_cond: variable used by the waitqueue
+ * @int_type: type of last interrupt
+ * @int_err: error number for last interrupt
+ * @queue: waitqueue for waiting for completion of device commands
+ * @fw_size: size of firmware
+ * @bank1: address of the beggining of bank 1 memory
+ * @bank2: address of the beggining of bank 2 memory
+ * @hw_lock: used for hardware locking
+ * @ctx: array of driver contexts
+ * @curr_ctx: number of the currently running context
+ * @ctx_work_bits: used to mark which contexts are waiting for hardware
+ * @watchdog_cnt: counter for the watchdog
+ * @watchdog_workqueue: workqueue for the watchdog
+ * @watchdog_work: worker for the watchdog
+ * @alloc_ctx: videobuf2 allocator contexts for two memory banks
+ * @enter_suspend: flag set when entering suspend
+ *
+ */
+struct s5p_mfc_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd_dec;
+ struct video_device *vfd_enc;
+ struct platform_device *plat_dev;
+ struct device *mem_dev_l;
+ struct device *mem_dev_r;
+ void __iomem *regs_base;
+ int irq;
+ struct resource *mfc_mem;
+ struct v4l2_ctrl_handler dec_ctrl_handler;
+ struct v4l2_ctrl_handler enc_ctrl_handler;
+ struct s5p_mfc_pm pm;
+ int num_inst;
+ spinlock_t irqlock; /* lock when operating on videobuf2 queues */
+ spinlock_t condlock; /* lock when changing/checking if a context is
+ ready to be processed */
+ struct mutex mfc_mutex; /* video_device lock */
+ int int_cond;
+ int int_type;
+ unsigned int int_err;
+ wait_queue_head_t queue;
+ size_t fw_size;
+ size_t bank1;
+ size_t bank2;
+ unsigned long hw_lock;
+ struct s5p_mfc_ctx *ctx[MFC_NUM_CONTEXTS];
+ int curr_ctx;
+ unsigned long ctx_work_bits;
+ atomic_t watchdog_cnt;
+ struct timer_list watchdog_timer;
+ struct workqueue_struct *watchdog_workqueue;
+ struct work_struct watchdog_work;
+ void *alloc_ctx[2];
+ unsigned long enter_suspend;
+};
+
+/**
+ * struct s5p_mfc_h264_enc_params - encoding parameters for h264
+ */
+struct s5p_mfc_h264_enc_params {
+ enum v4l2_mpeg_video_h264_profile profile;
+ enum v4l2_mpeg_video_h264_loop_filter_mode loop_filter_mode;
+ s8 loop_filter_alpha;
+ s8 loop_filter_beta;
+ enum v4l2_mpeg_video_h264_entropy_mode entropy_mode;
+ u8 max_ref_pic;
+ u8 num_ref_pic_4p;
+ int _8x8_transform;
+ int rc_mb;
+ int rc_mb_dark;
+ int rc_mb_smooth;
+ int rc_mb_static;
+ int rc_mb_activity;
+ int vui_sar;
+ u8 vui_sar_idc;
+ u16 vui_ext_sar_width;
+ u16 vui_ext_sar_height;
+ int open_gop;
+ u16 open_gop_size;
+ u8 rc_frame_qp;
+ u8 rc_min_qp;
+ u8 rc_max_qp;
+ u8 rc_p_frame_qp;
+ u8 rc_b_frame_qp;
+ enum v4l2_mpeg_video_h264_level level_v4l2;
+ int level;
+ u16 cpb_size;
+};
+
+/**
+ * struct s5p_mfc_mpeg4_enc_params - encoding parameters for h263 and mpeg4
+ */
+struct s5p_mfc_mpeg4_enc_params {
+ /* MPEG4 Only */
+ enum v4l2_mpeg_video_mpeg4_profile profile;
+ int quarter_pixel;
+ /* Common for MPEG4, H263 */
+ u16 vop_time_res;
+ u16 vop_frm_delta;
+ u8 rc_frame_qp;
+ u8 rc_min_qp;
+ u8 rc_max_qp;
+ u8 rc_p_frame_qp;
+ u8 rc_b_frame_qp;
+ enum v4l2_mpeg_video_mpeg4_level level_v4l2;
+ int level;
+};
+
+/**
+ * struct s5p_mfc_enc_params - general encoding parameters
+ */
+struct s5p_mfc_enc_params {
+ u16 width;
+ u16 height;
+
+ u16 gop_size;
+ enum v4l2_mpeg_video_multi_slice_mode slice_mode;
+ u16 slice_mb;
+ u32 slice_bit;
+ u16 intra_refresh_mb;
+ int pad;
+ u8 pad_luma;
+ u8 pad_cb;
+ u8 pad_cr;
+ int rc_frame;
+ u32 rc_bitrate;
+ u16 rc_reaction_coeff;
+ u16 vbv_size;
+
+ enum v4l2_mpeg_video_header_mode seq_hdr_mode;
+ enum v4l2_mpeg_mfc51_video_frame_skip_mode frame_skip_mode;
+ int fixed_target_bit;
+
+ u8 num_b_frame;
+ u32 rc_framerate_num;
+ u32 rc_framerate_denom;
+ int interlace;
+
+ union {
+ struct s5p_mfc_h264_enc_params h264;
+ struct s5p_mfc_mpeg4_enc_params mpeg4;
+ } codec;
+
+};
+
+/**
+ * struct s5p_mfc_codec_ops - codec ops, used by encoding
+ */
+struct s5p_mfc_codec_ops {
+ /* initialization routines */
+ int (*pre_seq_start) (struct s5p_mfc_ctx *ctx);
+ int (*post_seq_start) (struct s5p_mfc_ctx *ctx);
+ /* execution routines */
+ int (*pre_frame_start) (struct s5p_mfc_ctx *ctx);
+ int (*post_frame_start) (struct s5p_mfc_ctx *ctx);
+};
+
+#define call_cop(c, op, args...) \
+ (((c)->c_ops->op) ? \
+ ((c)->c_ops->op(args)) : 0)
+
+/**
+ * struct s5p_mfc_ctx - This struct contains the instance context
+ *
+ * @dev: pointer to the s5p_mfc_dev of the device
+ * @fh: struct v4l2_fh
+ * @num: number of the context that this structure describes
+ * @int_cond: variable used by the waitqueue
+ * @int_type: type of the last interrupt
+ * @int_err: error number received from MFC hw in the interrupt
+ * @queue: waitqueue that can be used to wait for this context to
+ * finish
+ * @src_fmt: source pixelformat information
+ * @dst_fmt: destination pixelformat information
+ * @vq_src: vb2 queue for source buffers
+ * @vq_dst: vb2 queue for destination buffers
+ * @src_queue: driver internal queue for source buffers
+ * @dst_queue: driver internal queue for destination buffers
+ * @src_queue_cnt: number of buffers queued on the source internal queue
+ * @dst_queue_cnt: number of buffers queued on the dest internal queue
+ * @type: type of the instance - decoder or encoder
+ * @state: state of the context
+ * @inst_no: number of hw instance associated with the context
+ * @img_width: width of the image that is decoded or encoded
+ * @img_height: height of the image that is decoded or encoded
+ * @buf_width: width of the buffer for processed image
+ * @buf_height: height of the buffer for processed image
+ * @luma_size: size of a luma plane
+ * @chroma_size: size of a chroma plane
+ * @mv_size: size of a motion vectors buffer
+ * @consumed_stream: number of bytes that have been used so far from the
+ * decoding buffer
+ * @dpb_flush_flag: flag used to indicate that a DPB buffers are being
+ * flushed
+ * @bank1_buf: handle to memory allocated for temporary buffers from
+ * memory bank 1
+ * @bank1_phys: address of the temporary buffers from memory bank 1
+ * @bank1_size: size of the memory allocated for temporary buffers from
+ * memory bank 1
+ * @bank2_buf: handle to memory allocated for temporary buffers from
+ * memory bank 2
+ * @bank2_phys: address of the temporary buffers from memory bank 2
+ * @bank2_size: size of the memory allocated for temporary buffers from
+ * memory bank 2
+ * @capture_state: state of the capture buffers queue
+ * @output_state: state of the output buffers queue
+ * @src_bufs: information on allocated source buffers
+ * @dst_bufs: information on allocated destination buffers
+ * @sequence: counter for the sequence number for v4l2
+ * @dec_dst_flag: flags for buffers queued in the hardware
+ * @dec_src_buf_size: size of the buffer for source buffers in decoding
+ * @codec_mode: number of codec mode used by MFC hw
+ * @slice_interface: slice interface flag
+ * @loop_filter_mpeg4: loop filter for MPEG4 flag
+ * @display_delay: value of the display delay for H264
+ * @display_delay_enable: display delay for H264 enable flag
+ * @after_packed_pb: flag used to track buffer when stream is in
+ * Packed PB format
+ * @dpb_count: count of the DPB buffers required by MFC hw
+ * @total_dpb_count: count of DPB buffers with additional buffers
+ * requested by the application
+ * @ctx_buf: handle to the memory associated with this context
+ * @ctx_phys: address of the memory associated with this context
+ * @ctx_size: size of the memory associated with this context
+ * @desc_buf: description buffer for decoding handle
+ * @desc_phys: description buffer for decoding address
+ * @shm_alloc: handle for the shared memory buffer
+ * @shm: virtual address for the shared memory buffer
+ * @shm_ofs: address offset for shared memory
+ * @enc_params: encoding parameters for MFC
+ * @enc_dst_buf_size: size of the buffers for encoder output
+ * @frame_type: used to force the type of the next encoded frame
+ * @ref_queue: list of the reference buffers for encoding
+ * @ref_queue_cnt: number of the buffers in the reference list
+ * @c_ops: ops for encoding
+ * @ctrls: array of controls, used when adding controls to the
+ * v4l2 control framework
+ * @ctrl_handler: handler for v4l2 framework
+ */
+struct s5p_mfc_ctx {
+ struct s5p_mfc_dev *dev;
+ struct v4l2_fh fh;
+
+ int num;
+
+ int int_cond;
+ int int_type;
+ unsigned int int_err;
+ wait_queue_head_t queue;
+
+ struct s5p_mfc_fmt *src_fmt;
+ struct s5p_mfc_fmt *dst_fmt;
+
+ struct vb2_queue vq_src;
+ struct vb2_queue vq_dst;
+
+ struct list_head src_queue;
+ struct list_head dst_queue;
+
+ unsigned int src_queue_cnt;
+ unsigned int dst_queue_cnt;
+
+ enum s5p_mfc_inst_type type;
+ enum s5p_mfc_inst_state state;
+ int inst_no;
+
+ /* Image parameters */
+ int img_width;
+ int img_height;
+ int buf_width;
+ int buf_height;
+
+ int luma_size;
+ int chroma_size;
+ int mv_size;
+
+ unsigned long consumed_stream;
+
+ unsigned int dpb_flush_flag;
+
+ /* Buffers */
+ void *bank1_buf;
+ size_t bank1_phys;
+ size_t bank1_size;
+
+ void *bank2_buf;
+ size_t bank2_phys;
+ size_t bank2_size;
+
+ enum s5p_mfc_queue_state capture_state;
+ enum s5p_mfc_queue_state output_state;
+
+ struct s5p_mfc_buf src_bufs[MFC_MAX_BUFFERS];
+ int src_bufs_cnt;
+ struct s5p_mfc_buf dst_bufs[MFC_MAX_BUFFERS];
+ int dst_bufs_cnt;
+
+ unsigned int sequence;
+ unsigned long dec_dst_flag;
+ size_t dec_src_buf_size;
+
+ /* Control values */
+ int codec_mode;
+ int slice_interface;
+ int loop_filter_mpeg4;
+ int display_delay;
+ int display_delay_enable;
+ int after_packed_pb;
+
+ int dpb_count;
+ int total_dpb_count;
+
+ /* Buffers */
+ void *ctx_buf;
+ size_t ctx_phys;
+ size_t ctx_ofs;
+ size_t ctx_size;
+
+ void *desc_buf;
+ size_t desc_phys;
+
+
+ void *shm_alloc;
+ void *shm;
+ size_t shm_ofs;
+
+ struct s5p_mfc_enc_params enc_params;
+
+ size_t enc_dst_buf_size;
+
+ enum v4l2_mpeg_mfc51_video_force_frame_type force_frame_type;
+
+ struct list_head ref_queue;
+ unsigned int ref_queue_cnt;
+
+ struct s5p_mfc_codec_ops *c_ops;
+
+ struct v4l2_ctrl *ctrls[MFC_MAX_CTRLS];
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+/*
+ * struct s5p_mfc_fmt - structure used to store information about pixelformats
+ * used by the MFC
+ */
+struct s5p_mfc_fmt {
+ char *name;
+ u32 fourcc;
+ u32 codec_mode;
+ enum s5p_mfc_fmt_type type;
+ u32 num_planes;
+};
+
+/**
+ * struct mfc_control - structure used to store information about MFC controls
+ * it is used to initialize the control framework.
+ */
+struct mfc_control {
+ __u32 id;
+ enum v4l2_ctrl_type type;
+ __u8 name[32]; /* Whatever */
+ __s32 minimum; /* Note signedness */
+ __s32 maximum;
+ __s32 step;
+ __u32 menu_skip_mask;
+ __s32 default_value;
+ __u32 flags;
+ __u32 reserved[2];
+ __u8 is_volatile;
+};
+
+
+#define fh_to_ctx(__fh) container_of(__fh, struct s5p_mfc_ctx, fh)
+#define ctrl_to_ctx(__ctrl) \
+ container_of((__ctrl)->handler, struct s5p_mfc_ctx, ctrl_handler)
+
+#endif /* S5P_MFC_COMMON_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
new file mode 100644
index 00000000000..5f4da80051b
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
@@ -0,0 +1,343 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_pm.h"
+
+static void *s5p_mfc_bitproc_buf;
+static size_t s5p_mfc_bitproc_phys;
+static unsigned char *s5p_mfc_bitproc_virt;
+
+/* Allocate and load firmware */
+int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev)
+{
+ struct firmware *fw_blob;
+ size_t bank2_base_phys;
+ void *b_base;
+ int err;
+
+ /* Firmare has to be present as a separate file or compiled
+ * into kernel. */
+ mfc_debug_enter();
+ err = request_firmware((const struct firmware **)&fw_blob,
+ "s5pc110-mfc.fw", dev->v4l2_dev.dev);
+ if (err != 0) {
+ mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
+ return -EINVAL;
+ }
+ dev->fw_size = ALIGN(fw_blob->size, FIRMWARE_ALIGN);
+ if (s5p_mfc_bitproc_buf) {
+ mfc_err("Attempting to allocate firmware when it seems that it is already loaded\n");
+ release_firmware(fw_blob);
+ return -ENOMEM;
+ }
+ s5p_mfc_bitproc_buf = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], dev->fw_size);
+ if (IS_ERR(s5p_mfc_bitproc_buf)) {
+ s5p_mfc_bitproc_buf = 0;
+ mfc_err("Allocating bitprocessor buffer failed\n");
+ release_firmware(fw_blob);
+ return -ENOMEM;
+ }
+ s5p_mfc_bitproc_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], s5p_mfc_bitproc_buf);
+ if (s5p_mfc_bitproc_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
+ mfc_err("The base memory for bank 1 is not aligned to 128KB\n");
+ vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+ s5p_mfc_bitproc_phys = 0;
+ s5p_mfc_bitproc_buf = 0;
+ release_firmware(fw_blob);
+ return -EIO;
+ }
+ s5p_mfc_bitproc_virt = vb2_dma_contig_memops.vaddr(s5p_mfc_bitproc_buf);
+ if (!s5p_mfc_bitproc_virt) {
+ mfc_err("Bitprocessor memory remap failed\n");
+ vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+ s5p_mfc_bitproc_phys = 0;
+ s5p_mfc_bitproc_buf = 0;
+ release_firmware(fw_blob);
+ return -EIO;
+ }
+ dev->bank1 = s5p_mfc_bitproc_phys;
+ b_base = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], 1 << MFC_BANK2_ALIGN_ORDER);
+ if (IS_ERR(b_base)) {
+ vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+ s5p_mfc_bitproc_phys = 0;
+ s5p_mfc_bitproc_buf = 0;
+ mfc_err("Allocating bank2 base failed\n");
+ release_firmware(fw_blob);
+ return -ENOMEM;
+ }
+ bank2_base_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], b_base);
+ vb2_dma_contig_memops.put(b_base);
+ if (bank2_base_phys & ((1 << MFC_BASE_ALIGN_ORDER) - 1)) {
+ mfc_err("The base memory for bank 2 is not aligned to 128KB\n");
+ vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+ s5p_mfc_bitproc_phys = 0;
+ s5p_mfc_bitproc_buf = 0;
+ release_firmware(fw_blob);
+ return -EIO;
+ }
+ dev->bank2 = bank2_base_phys;
+ memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
+ wmb();
+ release_firmware(fw_blob);
+ mfc_debug_leave();
+ return 0;
+}
+
+/* Reload firmware to MFC */
+int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev)
+{
+ struct firmware *fw_blob;
+ int err;
+
+ /* Firmare has to be present as a separate file or compiled
+ * into kernel. */
+ mfc_debug_enter();
+ err = request_firmware((const struct firmware **)&fw_blob,
+ "s5pc110-mfc.fw", dev->v4l2_dev.dev);
+ if (err != 0) {
+ mfc_err("Firmware is not present in the /lib/firmware directory nor compiled in kernel\n");
+ return -EINVAL;
+ }
+ if (fw_blob->size > dev->fw_size) {
+ mfc_err("MFC firmware is too big to be loaded\n");
+ release_firmware(fw_blob);
+ return -ENOMEM;
+ }
+ if (s5p_mfc_bitproc_buf == 0 || s5p_mfc_bitproc_phys == 0) {
+ mfc_err("MFC firmware is not allocated or was not mapped correctly\n");
+ release_firmware(fw_blob);
+ return -EINVAL;
+ }
+ memcpy(s5p_mfc_bitproc_virt, fw_blob->data, fw_blob->size);
+ wmb();
+ release_firmware(fw_blob);
+ mfc_debug_leave();
+ return 0;
+}
+
+/* Release firmware memory */
+int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev)
+{
+ /* Before calling this function one has to make sure
+ * that MFC is no longer processing */
+ if (!s5p_mfc_bitproc_buf)
+ return -EINVAL;
+ vb2_dma_contig_memops.put(s5p_mfc_bitproc_buf);
+ s5p_mfc_bitproc_virt = 0;
+ s5p_mfc_bitproc_phys = 0;
+ s5p_mfc_bitproc_buf = 0;
+ return 0;
+}
+
+/* Reset the device */
+int s5p_mfc_reset(struct s5p_mfc_dev *dev)
+{
+ unsigned int mc_status;
+ unsigned long timeout;
+
+ mfc_debug_enter();
+ /* Stop procedure */
+ /* reset RISC */
+ mfc_write(dev, 0x3f6, S5P_FIMV_SW_RESET);
+ /* All reset except for MC */
+ mfc_write(dev, 0x3e2, S5P_FIMV_SW_RESET);
+ mdelay(10);
+
+ timeout = jiffies + msecs_to_jiffies(MFC_BW_TIMEOUT);
+ /* Check MC status */
+ do {
+ if (time_after(jiffies, timeout)) {
+ mfc_err("Timeout while resetting MFC\n");
+ return -EIO;
+ }
+
+ mc_status = mfc_read(dev, S5P_FIMV_MC_STATUS);
+
+ } while (mc_status & 0x3);
+
+ mfc_write(dev, 0x0, S5P_FIMV_SW_RESET);
+ mfc_write(dev, 0x3fe, S5P_FIMV_SW_RESET);
+ mfc_debug_leave();
+ return 0;
+}
+
+static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev)
+{
+ mfc_write(dev, dev->bank1, S5P_FIMV_MC_DRAMBASE_ADR_A);
+ mfc_write(dev, dev->bank2, S5P_FIMV_MC_DRAMBASE_ADR_B);
+ mfc_debug(2, "Bank1: %08x, Bank2: %08x\n", dev->bank1, dev->bank2);
+}
+
+static inline void s5p_mfc_clear_cmds(struct s5p_mfc_dev *dev)
+{
+ mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH0_INST_ID);
+ mfc_write(dev, 0xffffffff, S5P_FIMV_SI_CH1_INST_ID);
+ mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
+ mfc_write(dev, 0, S5P_FIMV_HOST2RISC_CMD);
+}
+
+/* Initialize hardware */
+int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
+{
+ unsigned int ver;
+ int ret;
+
+ mfc_debug_enter();
+ if (!s5p_mfc_bitproc_buf)
+ return -EINVAL;
+
+ /* 0. MFC reset */
+ mfc_debug(2, "MFC reset..\n");
+ s5p_mfc_clock_on();
+ ret = s5p_mfc_reset(dev);
+ if (ret) {
+ mfc_err("Failed to reset MFC - timeout\n");
+ return ret;
+ }
+ mfc_debug(2, "Done MFC reset..\n");
+ /* 1. Set DRAM base Addr */
+ s5p_mfc_init_memctrl(dev);
+ /* 2. Initialize registers of channel I/F */
+ s5p_mfc_clear_cmds(dev);
+ /* 3. Release reset signal to the RISC */
+ s5p_mfc_clean_dev_int_flags(dev);
+ mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
+ mfc_debug(2, "Will now wait for completion of firmware transfer\n");
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_FW_STATUS_RET)) {
+ mfc_err("Failed to load firmware\n");
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return -EIO;
+ }
+ s5p_mfc_clean_dev_int_flags(dev);
+ /* 4. Initialize firmware */
+ ret = s5p_mfc_sys_init_cmd(dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFC - timeout\n");
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return ret;
+ }
+ mfc_debug(2, "Ok, now will write a command to init the system\n");
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SYS_INIT_RET)) {
+ mfc_err("Failed to load firmware\n");
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return -EIO;
+ }
+ dev->int_cond = 0;
+ if (dev->int_err != 0 || dev->int_type !=
+ S5P_FIMV_R2H_CMD_SYS_INIT_RET) {
+ /* Failure. */
+ mfc_err("Failed to init firmware - error: %d int: %d\n",
+ dev->int_err, dev->int_type);
+ s5p_mfc_reset(dev);
+ s5p_mfc_clock_off();
+ return -EIO;
+ }
+ ver = mfc_read(dev, S5P_FIMV_FW_VERSION);
+ mfc_debug(2, "MFC F/W version : %02xyy, %02xmm, %02xdd\n",
+ (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
+ s5p_mfc_clock_off();
+ mfc_debug_leave();
+ return 0;
+}
+
+
+int s5p_mfc_sleep(struct s5p_mfc_dev *dev)
+{
+ int ret;
+
+ mfc_debug_enter();
+ s5p_mfc_clock_on();
+ s5p_mfc_clean_dev_int_flags(dev);
+ ret = s5p_mfc_sleep_cmd(dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFC - timeout\n");
+ return ret;
+ }
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_SLEEP_RET)) {
+ mfc_err("Failed to sleep\n");
+ return -EIO;
+ }
+ s5p_mfc_clock_off();
+ dev->int_cond = 0;
+ if (dev->int_err != 0 || dev->int_type !=
+ S5P_FIMV_R2H_CMD_SLEEP_RET) {
+ /* Failure. */
+ mfc_err("Failed to sleep - error: %d int: %d\n", dev->int_err,
+ dev->int_type);
+ return -EIO;
+ }
+ mfc_debug_leave();
+ return ret;
+}
+
+int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
+{
+ int ret;
+
+ mfc_debug_enter();
+ /* 0. MFC reset */
+ mfc_debug(2, "MFC reset..\n");
+ s5p_mfc_clock_on();
+ ret = s5p_mfc_reset(dev);
+ if (ret) {
+ mfc_err("Failed to reset MFC - timeout\n");
+ return ret;
+ }
+ mfc_debug(2, "Done MFC reset..\n");
+ /* 1. Set DRAM base Addr */
+ s5p_mfc_init_memctrl(dev);
+ /* 2. Initialize registers of channel I/F */
+ s5p_mfc_clear_cmds(dev);
+ s5p_mfc_clean_dev_int_flags(dev);
+ /* 3. Initialize firmware */
+ ret = s5p_mfc_wakeup_cmd(dev);
+ if (ret) {
+ mfc_err("Failed to send command to MFC - timeout\n");
+ return ret;
+ }
+ /* 4. Release reset signal to the RISC */
+ mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
+ mfc_debug(2, "Ok, now will write a command to wakeup the system\n");
+ if (s5p_mfc_wait_for_done_dev(dev, S5P_FIMV_R2H_CMD_WAKEUP_RET)) {
+ mfc_err("Failed to load firmware\n");
+ return -EIO;
+ }
+ s5p_mfc_clock_off();
+ dev->int_cond = 0;
+ if (dev->int_err != 0 || dev->int_type !=
+ S5P_FIMV_R2H_CMD_WAKEUP_RET) {
+ /* Failure. */
+ mfc_err("Failed to wakeup - error: %d int: %d\n", dev->int_err,
+ dev->int_type);
+ return -EIO;
+ }
+ mfc_debug_leave();
+ return 0;
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h
new file mode 100644
index 00000000000..61dc23b7ee5
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h
@@ -0,0 +1,29 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_ctrl.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_CTRL_H
+#define S5P_MFC_CTRL_H
+
+#include "s5p_mfc_common.h"
+
+int s5p_mfc_release_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_alloc_and_load_firmware(struct s5p_mfc_dev *dev);
+int s5p_mfc_reload_firmware(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_init_hw(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_sleep(struct s5p_mfc_dev *dev);
+int s5p_mfc_wakeup(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_reset(struct s5p_mfc_dev *dev);
+
+#endif /* S5P_MFC_CTRL_H */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_debug.h b/drivers/media/video/s5p-mfc/s5p_mfc_debug.h
new file mode 100644
index 00000000000..ecb8616a492
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_debug.h
@@ -0,0 +1,48 @@
+/*
+ * drivers/media/video/samsung/mfc5/s5p_mfc_debug.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains debug macros
+ *
+ * Kamil Debski, Copyright (c) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_DEBUG_H_
+#define S5P_MFC_DEBUG_H_
+
+#define DEBUG
+
+#ifdef DEBUG
+extern int debug;
+
+#define mfc_debug(level, fmt, args...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+#else
+#define mfc_debug(level, fmt, args...)
+#endif
+
+#define mfc_debug_enter() mfc_debug(5, "enter")
+#define mfc_debug_leave() mfc_debug(5, "leave")
+
+#define mfc_err(fmt, args...) \
+ do { \
+ printk(KERN_ERR "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#define mfc_info(fmt, args...) \
+ do { \
+ printk(KERN_INFO "%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#endif /* S5P_MFC_DEBUG_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
new file mode 100644
index 00000000000..b2c5052a9c4
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -0,0 +1,1036 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * Kamil Debski, <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_dec.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_shm.h"
+
+static struct s5p_mfc_fmt formats[] = {
+ {
+ .name = "4:2:0 2 Planes 64x32 Tiles",
+ .fourcc = V4L2_PIX_FMT_NV12MT,
+ .codec_mode = S5P_FIMV_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ },
+ {
+ .name = "4:2:0 2 Planes",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = S5P_FIMV_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ },
+ {
+ .name = "H264 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H264,
+ .codec_mode = S5P_FIMV_CODEC_H264_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "H263 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H263,
+ .codec_mode = S5P_FIMV_CODEC_H263_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "MPEG1 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG1,
+ .codec_mode = S5P_FIMV_CODEC_MPEG2_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "MPEG2 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG2,
+ .codec_mode = S5P_FIMV_CODEC_MPEG2_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "MPEG4 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ .codec_mode = S5P_FIMV_CODEC_MPEG4_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "XviD Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_XVID,
+ .codec_mode = S5P_FIMV_CODEC_MPEG4_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "VC1 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
+ .codec_mode = S5P_FIMV_CODEC_VC1_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .name = "VC1 RCV Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
+ .codec_mode = S5P_FIMV_CODEC_VC1RCV_DEC,
+ .type = MFC_FMT_DEC,
+ .num_planes = 1,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
+/* Find selected format description */
+static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+ formats[i].type == t)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static struct mfc_control controls[] = {
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H264 Display Delay",
+ .minimum = 0,
+ .maximum = 16383,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Display Delay Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Mpeg4 Loop Filter Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Slice Interface Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Minimum number of cap bufs",
+ .minimum = 1,
+ .maximum = 32,
+ .step = 1,
+ .default_value = 1,
+ .is_volatile = 1,
+ },
+};
+
+#define NUM_CTRLS ARRAY_SIZE(controls)
+
+/* Check whether a context should be run on hardware */
+static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
+{
+ /* Context is to parse header */
+ if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_GOT_INST)
+ return 1;
+ /* Context is to decode a frame */
+ if (ctx->src_queue_cnt >= 1 &&
+ ctx->state == MFCINST_RUNNING &&
+ ctx->dst_queue_cnt >= ctx->dpb_count)
+ return 1;
+ /* Context is to return last frame */
+ if (ctx->state == MFCINST_FINISHING &&
+ ctx->dst_queue_cnt >= ctx->dpb_count)
+ return 1;
+ /* Context is to set buffers */
+ if (ctx->src_queue_cnt >= 1 &&
+ ctx->state == MFCINST_HEAD_PARSED &&
+ ctx->capture_state == QUEUE_BUFS_MMAPED)
+ return 1;
+ /* Resolution change */
+ if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
+ ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
+ ctx->dst_queue_cnt >= ctx->dpb_count)
+ return 1;
+ if (ctx->state == MFCINST_RES_CHANGE_END &&
+ ctx->src_queue_cnt >= 1)
+ return 1;
+ mfc_debug(2, "ctx is not ready\n");
+ return 0;
+}
+
+static struct s5p_mfc_codec_ops decoder_codec_ops = {
+ .pre_seq_start = NULL,
+ .post_seq_start = NULL,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+};
+
+/* Query capabilities of the device */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+
+ strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
+ strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(1, 0, 0);
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+/* Enumerate format */
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool mplane, bool out)
+{
+ struct s5p_mfc_fmt *fmt;
+ int i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (mplane && formats[i].num_planes == 1)
+ continue;
+ else if (!mplane && formats[i].num_planes > 1)
+ continue;
+ if (out && formats[i].type != MFC_FMT_DEC)
+ continue;
+ else if (!out && formats[i].type != MFC_FMT_RAW)
+ continue;
+
+ if (j == f->index)
+ break;
+ ++j;
+ }
+ if (i == ARRAY_SIZE(formats))
+ return -EINVAL;
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false, false);
+}
+
+static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true, false);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *prov,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false, true);
+}
+
+static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true, true);
+}
+
+/* Get format */
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ mfc_debug_enter();
+ pix_mp = &f->fmt.pix_mp;
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ (ctx->state == MFCINST_GOT_INST || ctx->state ==
+ MFCINST_RES_CHANGE_END)) {
+ /* If the MFC is parsing the header,
+ * so wait until it is finished */
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_SEQ_DONE_RET,
+ 0);
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ /* This is run on CAPTURE (decode output) */
+ /* Width and height are set to the dimensions
+ of the movie, the buffer is bigger and
+ further processing stages should crop to this
+ rectangle. */
+ pix_mp->width = ctx->buf_width;
+ pix_mp->height = ctx->buf_height;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->num_planes = 2;
+ /* Set pixelformat to the format in which MFC
+ outputs the decoded frame */
+ pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT;
+ pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+ pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+ pix_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* This is run on OUTPUT
+ The buffer contains compressed image
+ so width and height have no meaning */
+ pix_mp->width = 0;
+ pix_mp->height = 0;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->plane_fmt[0].bytesperline = ctx->dec_src_buf_size;
+ pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size;
+ pix_mp->pixelformat = ctx->src_fmt->fourcc;
+ pix_mp->num_planes = ctx->src_fmt->num_planes;
+ } else {
+ mfc_err("Format could not be read\n");
+ mfc_debug(2, "%s-- with error\n", __func__);
+ return -EINVAL;
+ }
+ mfc_debug_leave();
+ return 0;
+}
+
+/* Try format */
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_fmt *fmt;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ mfc_err("This node supports decoding only\n");
+ return -EINVAL;
+ }
+ fmt = find_format(f, MFC_FMT_DEC);
+ if (!fmt) {
+ mfc_err("Unsupported format\n");
+ return -EINVAL;
+ }
+ if (fmt->type != MFC_FMT_DEC) {
+ mfc_err("\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Set format */
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = 0;
+ struct s5p_mfc_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ mfc_debug_enter();
+ ret = vidioc_try_fmt(file, priv, f);
+ pix_mp = &f->fmt.pix_mp;
+ if (ret)
+ return ret;
+ if (ctx->vq_src.streaming || ctx->vq_dst.streaming) {
+ v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+ fmt = find_format(f, MFC_FMT_DEC);
+ if (!fmt || fmt->codec_mode == S5P_FIMV_CODEC_NONE) {
+ mfc_err("Unknown codec\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (fmt->type != MFC_FMT_DEC) {
+ mfc_err("Wrong format selected, you should choose "
+ "format for decoding\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ ctx->src_fmt = fmt;
+ ctx->codec_mode = fmt->codec_mode;
+ mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
+ pix_mp->height = 0;
+ pix_mp->width = 0;
+ if (pix_mp->plane_fmt[0].sizeimage)
+ ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
+ else
+ pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
+ DEF_CPB_SIZE;
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ ctx->state = MFCINST_INIT;
+out:
+ mfc_debug_leave();
+ return ret;
+}
+
+/* Reqeust buffers */
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = 0;
+ unsigned long flags;
+
+ if (reqbufs->memory != V4L2_MEMORY_MMAP) {
+ mfc_err("Only V4L2_MEMORY_MAP is supported\n");
+ return -EINVAL;
+ }
+ if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* Can only request buffers after an instance has been opened.*/
+ if (ctx->state == MFCINST_INIT) {
+ ctx->src_bufs_cnt = 0;
+ if (reqbufs->count == 0) {
+ mfc_debug(2, "Freeing buffers\n");
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+ s5p_mfc_clock_off();
+ return ret;
+ }
+ /* Decoding */
+ if (ctx->output_state != QUEUE_FREE) {
+ mfc_err("Bufs have already been requested\n");
+ return -EINVAL;
+ }
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+ s5p_mfc_clock_off();
+ if (ret) {
+ mfc_err("vb2_reqbufs on output failed\n");
+ return ret;
+ }
+ mfc_debug(2, "vb2_reqbufs: %d\n", ret);
+ ctx->output_state = QUEUE_BUFS_REQUESTED;
+ }
+ } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ctx->dst_bufs_cnt = 0;
+ if (reqbufs->count == 0) {
+ mfc_debug(2, "Freeing buffers\n");
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_clock_off();
+ return ret;
+ }
+ if (ctx->capture_state != QUEUE_FREE) {
+ mfc_err("Bufs have already been requested\n");
+ return -EINVAL;
+ }
+ ctx->capture_state = QUEUE_BUFS_REQUESTED;
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_clock_off();
+ if (ret) {
+ mfc_err("vb2_reqbufs on capture failed\n");
+ return ret;
+ }
+ if (reqbufs->count < ctx->dpb_count) {
+ mfc_err("Not enough buffers allocated\n");
+ reqbufs->count = 0;
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_clock_off();
+ return -ENOMEM;
+ }
+ ctx->total_dpb_count = reqbufs->count;
+ ret = s5p_mfc_alloc_codec_buffers(ctx);
+ if (ret) {
+ mfc_err("Failed to allocate decoding buffers\n");
+ reqbufs->count = 0;
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_clock_off();
+ return -ENOMEM;
+ }
+ if (ctx->dst_bufs_cnt == ctx->total_dpb_count) {
+ ctx->capture_state = QUEUE_BUFS_MMAPED;
+ } else {
+ mfc_err("Not all buffers passed to buf_init\n");
+ reqbufs->count = 0;
+ s5p_mfc_clock_on();
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ s5p_mfc_release_codec_buffers(ctx);
+ s5p_mfc_clock_off();
+ return -ENOMEM;
+ }
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+ s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET, 0);
+ }
+ return ret;
+}
+
+/* Query buffer */
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+ int i;
+
+ if (buf->memory != V4L2_MEMORY_MMAP) {
+ mfc_err("Only mmaped buffers can be used\n");
+ return -EINVAL;
+ }
+ mfc_debug(2, "State: %d, buf->type: %d\n", ctx->state, buf->type);
+ if (ctx->state == MFCINST_INIT &&
+ buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = vb2_querybuf(&ctx->vq_src, buf);
+ } else if (ctx->state == MFCINST_RUNNING &&
+ buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ret = vb2_querybuf(&ctx->vq_dst, buf);
+ for (i = 0; i < buf->length; i++)
+ buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE;
+ } else {
+ mfc_err("vidioc_querybuf called in an inappropriate state\n");
+ ret = -EINVAL;
+ }
+ mfc_debug_leave();
+ return ret;
+}
+
+/* Queue a buffer */
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err("Call on QBUF after unrecoverable error\n");
+ return -EIO;
+ }
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_qbuf(&ctx->vq_src, buf);
+ else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_qbuf(&ctx->vq_dst, buf);
+ return -EINVAL;
+}
+
+/* Dequeue a buffer */
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err("Call on DQBUF after unrecoverable error\n");
+ return -EIO;
+ }
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
+ else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK);
+ return -EINVAL;
+}
+
+/* Stream on */
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ mfc_debug_enter();
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+
+ if (ctx->state == MFCINST_INIT) {
+ ctx->dst_bufs_cnt = 0;
+ ctx->src_bufs_cnt = 0;
+ ctx->capture_state = QUEUE_FREE;
+ ctx->output_state = QUEUE_FREE;
+ s5p_mfc_alloc_instance_buffer(ctx);
+ s5p_mfc_alloc_dec_temp_buffers(ctx);
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_try_run(dev);
+
+ if (s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET, 0)) {
+ /* Error or timeout */
+ mfc_err("Error getting instance from hardware\n");
+ s5p_mfc_release_instance_buffer(ctx);
+ s5p_mfc_release_dec_desc_buffer(ctx);
+ return -EIO;
+ }
+ mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
+ }
+ ret = vb2_streamon(&ctx->vq_src, type);
+ }
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ret = vb2_streamon(&ctx->vq_dst, type);
+ mfc_debug_leave();
+ return ret;
+}
+
+/* Stream off, which equals to a pause */
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_streamoff(&ctx->vq_src, type);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_streamoff(&ctx->vq_dst, type);
+ return -EINVAL;
+}
+
+/* Set controls - v4l2 control framework */
+static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
+ ctx->loop_filter_mpeg4 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
+ ctx->display_delay_enable = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ ctx->display_delay = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
+ ctx->slice_interface = ctrl->val;
+ break;
+ default:
+ mfc_err("Invalid control 0x%08x\n", ctrl->id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ ctrl->val = ctx->dpb_count;
+ break;
+ } else if (ctx->state != MFCINST_INIT) {
+ v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
+ return -EINVAL;
+ }
+ /* Should wait for the header to be parsed */
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_FIMV_R2H_CMD_SEQ_DONE_RET, 0);
+ if (ctx->state >= MFCINST_HEAD_PARSED &&
+ ctx->state < MFCINST_ABORT) {
+ ctrl->val = ctx->dpb_count;
+ } else {
+ v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
+ return -EINVAL;
+ }
+ break;
+ }
+ return 0;
+}
+
+
+static const struct v4l2_ctrl_ops s5p_mfc_dec_ctrl_ops = {
+ .s_ctrl = s5p_mfc_dec_s_ctrl,
+ .g_volatile_ctrl = s5p_mfc_dec_g_v_ctrl,
+};
+
+/* Get cropping information */
+static int vidioc_g_crop(struct file *file, void *priv,
+ struct v4l2_crop *cr)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ u32 left, right, top, bottom;
+
+ if (ctx->state != MFCINST_HEAD_PARSED &&
+ ctx->state != MFCINST_RUNNING && ctx->state != MFCINST_FINISHING
+ && ctx->state != MFCINST_FINISHED) {
+ mfc_err("Cannont set crop\n");
+ return -EINVAL;
+ }
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) {
+ left = s5p_mfc_read_shm(ctx, CROP_INFO_H);
+ right = left >> S5P_FIMV_SHARED_CROP_RIGHT_SHIFT;
+ left = left & S5P_FIMV_SHARED_CROP_LEFT_MASK;
+ top = s5p_mfc_read_shm(ctx, CROP_INFO_V);
+ bottom = top >> S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT;
+ top = top & S5P_FIMV_SHARED_CROP_TOP_MASK;
+ cr->c.left = left;
+ cr->c.top = top;
+ cr->c.width = ctx->img_width - left - right;
+ cr->c.height = ctx->img_height - top - bottom;
+ mfc_debug(2, "Cropping info [h264]: l=%d t=%d "
+ "w=%d h=%d (r=%d b=%d fw=%d fh=%d\n", left, top,
+ cr->c.width, cr->c.height, right, bottom,
+ ctx->buf_width, ctx->buf_height);
+ } else {
+ cr->c.left = 0;
+ cr->c.top = 0;
+ cr->c.width = ctx->img_width;
+ cr->c.height = ctx->img_height;
+ mfc_debug(2, "Cropping info: w=%d h=%d fw=%d "
+ "fh=%d\n", cr->c.width, cr->c.height, ctx->buf_width,
+ ctx->buf_height);
+ }
+ return 0;
+}
+
+/* v4l2_ioctl_ops */
+static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_g_crop = vidioc_g_crop,
+};
+
+static int s5p_mfc_queue_setup(struct vb2_queue *vq, unsigned int *buf_count,
+ unsigned int *plane_count, unsigned long psize[],
+ void *allocators[])
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+
+ /* Video output for decoding (source)
+ * this can be set after getting an instance */
+ if (ctx->state == MFCINST_INIT &&
+ vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* A single plane is required for input */
+ *plane_count = 1;
+ if (*buf_count < 1)
+ *buf_count = 1;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+ /* Video capture for decoding (destination)
+ * this can be set after the header was parsed */
+ } else if (ctx->state == MFCINST_HEAD_PARSED &&
+ vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ /* Output plane count is 2 - one for Y and one for CbCr */
+ *plane_count = 2;
+ /* Setup buffer count */
+ if (*buf_count < ctx->dpb_count)
+ *buf_count = ctx->dpb_count;
+ if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB)
+ *buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+ } else {
+ mfc_err("State seems invalid. State = %d, vq->type = %d\n",
+ ctx->state, vq->type);
+ return -EINVAL;
+ }
+ mfc_debug(2, "Buffer count=%d, plane count=%d\n",
+ *buf_count, *plane_count);
+ if (ctx->state == MFCINST_HEAD_PARSED &&
+ vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ psize[0] = ctx->luma_size;
+ psize[1] = ctx->chroma_size;
+ allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
+ allocators[1] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ ctx->state == MFCINST_INIT) {
+ psize[0] = ctx->dec_src_buf_size;
+ allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+ } else {
+ mfc_err("This video node is dedicated to decoding. Decoding not initalised\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void s5p_mfc_unlock(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mutex_unlock(&dev->mfc_mutex);
+}
+
+static void s5p_mfc_lock(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mutex_lock(&dev->mfc_mutex);
+}
+
+static int s5p_mfc_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ unsigned int i;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->capture_state == QUEUE_BUFS_MMAPED)
+ return 0;
+ for (i = 0; i <= ctx->src_fmt->num_planes ; i++) {
+ if (IS_ERR_OR_NULL(ERR_PTR(
+ vb2_dma_contig_plane_paddr(vb, i)))) {
+ mfc_err("Plane mem not allocated\n");
+ return -EINVAL;
+ }
+ }
+ if (vb2_plane_size(vb, 0) < ctx->luma_size ||
+ vb2_plane_size(vb, 1) < ctx->chroma_size) {
+ mfc_err("Plane buffer (CAPTURE) is too small\n");
+ return -EINVAL;
+ }
+ i = vb->v4l2_buf.index;
+ ctx->dst_bufs[i].b = vb;
+ ctx->dst_bufs[i].cookie.raw.luma =
+ vb2_dma_contig_plane_paddr(vb, 0);
+ ctx->dst_bufs[i].cookie.raw.chroma =
+ vb2_dma_contig_plane_paddr(vb, 1);
+ ctx->dst_bufs_cnt++;
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (IS_ERR_OR_NULL(ERR_PTR(
+ vb2_dma_contig_plane_paddr(vb, 0)))) {
+ mfc_err("Plane memory not allocated\n");
+ return -EINVAL;
+ }
+ if (vb2_plane_size(vb, 0) < ctx->dec_src_buf_size) {
+ mfc_err("Plane buffer (OUTPUT) is too small\n");
+ return -EINVAL;
+ }
+
+ i = vb->v4l2_buf.index;
+ ctx->src_bufs[i].b = vb;
+ ctx->src_bufs[i].cookie.stream =
+ vb2_dma_contig_plane_paddr(vb, 0);
+ ctx->src_bufs_cnt++;
+ } else {
+ mfc_err("s5p_mfc_buf_init: unknown queue type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_start_streaming(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ if (ctx->state == MFCINST_FINISHING ||
+ ctx->state == MFCINST_FINISHED)
+ ctx->state = MFCINST_RUNNING;
+ /* If context is ready then dev = work->data;schedule it to run */
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+ return 0;
+}
+
+static int s5p_mfc_stop_streaming(struct vb2_queue *q)
+{
+ unsigned long flags;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ int aborted = 0;
+
+ if ((ctx->state == MFCINST_FINISHING ||
+ ctx->state == MFCINST_RUNNING) &&
+ dev->curr_ctx == ctx->num && dev->hw_lock) {
+ ctx->state = MFCINST_ABORT;
+ s5p_mfc_wait_for_done_ctx(ctx,
+ S5P_FIMV_R2H_CMD_FRAME_DONE_RET, 0);
+ aborted = 1;
+ }
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ INIT_LIST_HEAD(&ctx->dst_queue);
+ ctx->dst_queue_cnt = 0;
+ ctx->dpb_flush_flag = 1;
+ ctx->dec_dst_flag = 0;
+ }
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ INIT_LIST_HEAD(&ctx->src_queue);
+ ctx->src_queue_cnt = 0;
+ }
+ if (aborted)
+ ctx->state = MFCINST_RUNNING;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return 0;
+}
+
+
+static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *mfc_buf;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
+ mfc_buf->used = 0;
+ spin_lock_irqsave(&dev->irqlock, flags);
+ list_add_tail(&mfc_buf->list, &ctx->src_queue);
+ ctx->src_queue_cnt++;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
+ mfc_buf->used = 0;
+ /* Mark destination as available for use by MFC */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ set_bit(vb->v4l2_buf.index, &ctx->dec_dst_flag);
+ list_add_tail(&mfc_buf->list, &ctx->dst_queue);
+ ctx->dst_queue_cnt++;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else {
+ mfc_err("Unsupported buffer type (%d)\n", vq->type);
+ }
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+}
+
+static struct vb2_ops s5p_mfc_dec_qops = {
+ .queue_setup = s5p_mfc_queue_setup,
+ .wait_prepare = s5p_mfc_unlock,
+ .wait_finish = s5p_mfc_lock,
+ .buf_init = s5p_mfc_buf_init,
+ .start_streaming = s5p_mfc_start_streaming,
+ .stop_streaming = s5p_mfc_stop_streaming,
+ .buf_queue = s5p_mfc_buf_queue,
+};
+
+struct s5p_mfc_codec_ops *get_dec_codec_ops(void)
+{
+ return &decoder_codec_ops;
+}
+
+struct vb2_ops *get_dec_queue_ops(void)
+{
+ return &s5p_mfc_dec_qops;
+}
+
+const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void)
+{
+ return &s5p_mfc_dec_ioctl_ops;
+}
+
+#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \
+ && V4L2_CTRL_DRIVER_PRIV(x))
+
+int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
+{
+ struct v4l2_ctrl_config cfg;
+ int i;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS);
+ if (ctx->ctrl_handler.error) {
+ mfc_err("v4l2_ctrl_handler_init failed\n");
+ return ctx->ctrl_handler.error;
+ }
+
+ for (i = 0; i < NUM_CTRLS; i++) {
+ if (IS_MFC51_PRIV(controls[i].id)) {
+ cfg.ops = &s5p_mfc_dec_ctrl_ops;
+ cfg.id = controls[i].id;
+ cfg.min = controls[i].minimum;
+ cfg.max = controls[i].maximum;
+ cfg.def = controls[i].default_value;
+ cfg.name = controls[i].name;
+ cfg.type = controls[i].type;
+
+ cfg.step = controls[i].step;
+ cfg.menu_skip_mask = 0;
+
+ ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &cfg, NULL);
+ } else {
+ ctx->ctrls[i] = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &s5p_mfc_dec_ctrl_ops,
+ controls[i].id, controls[i].minimum,
+ controls[i].maximum, controls[i].step,
+ controls[i].default_value);
+ }
+ if (ctx->ctrl_handler.error) {
+ mfc_err("Adding control (%d) failed\n", i);
+ return ctx->ctrl_handler.error;
+ }
+ if (controls[i].is_volatile && ctx->ctrls[i])
+ ctx->ctrls[i]->is_volatile = 1;
+ }
+ return 0;
+}
+
+void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx)
+{
+ int i;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ for (i = 0; i < NUM_CTRLS; i++)
+ ctx->ctrls[i] = NULL;
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.h b/drivers/media/video/s5p-mfc/s5p_mfc_dec.h
new file mode 100644
index 00000000000..fb8b215db0e
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.h
@@ -0,0 +1,23 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_dec.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_DEC_H_
+#define S5P_MFC_DEC_H_
+
+struct s5p_mfc_codec_ops *get_dec_codec_ops(void);
+struct vb2_ops *get_dec_queue_ops(void);
+const struct v4l2_ioctl_ops *get_dec_v4l2_ioctl_ops(void);
+struct s5p_mfc_fmt *get_dec_def_fmt(bool src);
+int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_DEC_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
new file mode 100644
index 00000000000..fee094a14f4
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -0,0 +1,1829 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Jeongtae Park <jtp.park@samsung.com>
+ * Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/version.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_enc.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+
+static struct s5p_mfc_fmt formats[] = {
+ {
+ .name = "4:2:0 2 Planes 64x32 Tiles",
+ .fourcc = V4L2_PIX_FMT_NV12MT,
+ .codec_mode = S5P_FIMV_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ },
+ {
+ .name = "4:2:0 2 Planes",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = S5P_FIMV_CODEC_NONE,
+ .type = MFC_FMT_RAW,
+ .num_planes = 2,
+ },
+ {
+ .name = "H264 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H264,
+ .codec_mode = S5P_FIMV_CODEC_H264_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ },
+ {
+ .name = "MPEG4 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_MPEG4,
+ .codec_mode = S5P_FIMV_CODEC_MPEG4_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ },
+ {
+ .name = "H264 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_H263,
+ .codec_mode = S5P_FIMV_CODEC_H263_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(formats)
+static struct s5p_mfc_fmt *find_format(struct v4l2_format *f, unsigned int t)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FORMATS; i++) {
+ if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+ formats[i].type == t)
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static struct mfc_control controls[] = {
+ {
+ .id = V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+ .maximum = V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
+ .default_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1900,
+ .maximum = (1 << 30) - 1,
+ .step = 1,
+ .default_value = 1900,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_PADDING,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Padding Control Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Padding Color YUV Value",
+ .minimum = 0,
+ .maximum = (1 << 25) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 1,
+ .maximum = (1 << 30) - 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Rate Control Reaction Coeff.",
+ .minimum = 1,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Force frame type",
+ .minimum = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED,
+ .maximum = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED,
+ .default_value = V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VBV_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .maximum = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .default_value = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Frame Skip Enable",
+ .minimum = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .maximum = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+ .menu_skip_mask = 0,
+ .default_value = V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Fixed Target Bit Enable",
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_B_FRAMES,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 2,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .maximum = V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+ .default_value = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)
+ ),
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
+ .default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .menu_skip_mask = ~(
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1)
+ ),
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+ .maximum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+ .default_value = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
+ .maximum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
+ .default_value = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = -6,
+ .maximum = 6,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = -6,
+ .maximum = 6,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .maximum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .default_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "The Number of Ref. Pic for P",
+ .minimum = 1,
+ .maximum = 2,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 I-Frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 Minimum QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 Maximum QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 P frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "H263 B frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 I-Frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 Minimum QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 Maximum QP value",
+ .minimum = 0,
+ .maximum = 51,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 P frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "MPEG4 B frame QP value",
+ .minimum = 1,
+ .maximum = 31,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Dark Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Smooth Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Static Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "H264 Activity Reg Adaptive RC",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
+ .maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
+ .default_value = 0,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_GOP_CLOSURE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 1,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
+ .maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
+ .default_value = 0,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG4_QPEL,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+};
+
+#define NUM_CTRLS ARRAY_SIZE(controls)
+static const char * const *mfc51_get_menu(u32 id)
+{
+ static const char * const mfc51_video_frame_skip[] = {
+ "Disabled",
+ "Level Limit",
+ "VBV/CPB Limit",
+ NULL,
+ };
+ static const char * const mfc51_video_force_frame[] = {
+ "Disabled",
+ "I Frame",
+ "Not Coded",
+ NULL,
+ };
+ switch (id) {
+ case V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE:
+ return mfc51_video_frame_skip;
+ case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
+ return mfc51_video_force_frame;
+ }
+ return NULL;
+}
+
+static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
+{
+ mfc_debug(2, "src=%d, dst=%d, state=%d\n",
+ ctx->src_queue_cnt, ctx->dst_queue_cnt, ctx->state);
+ /* context is ready to make header */
+ if (ctx->state == MFCINST_GOT_INST && ctx->dst_queue_cnt >= 1)
+ return 1;
+ /* context is ready to encode a frame */
+ if (ctx->state == MFCINST_RUNNING &&
+ ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
+ return 1;
+ /* context is ready to encode remain frames */
+ if (ctx->state == MFCINST_FINISHING &&
+ ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
+ return 1;
+ mfc_debug(2, "ctx is not ready\n");
+ return 0;
+}
+
+static void cleanup_ref_queue(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_buf *mb_entry;
+ unsigned long mb_y_addr, mb_c_addr;
+
+ /* move buffers in ref queue to src queue */
+ while (!list_empty(&ctx->ref_queue)) {
+ mb_entry = list_entry((&ctx->ref_queue)->next,
+ struct s5p_mfc_buf, list);
+ mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ list_del(&mb_entry->list);
+ ctx->ref_queue_cnt--;
+ list_add_tail(&mb_entry->list, &ctx->src_queue);
+ ctx->src_queue_cnt++;
+ }
+ mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
+ ctx->src_queue_cnt, ctx->ref_queue_cnt);
+ INIT_LIST_HEAD(&ctx->ref_queue);
+ ctx->ref_queue_cnt = 0;
+}
+
+static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_mb;
+ unsigned long dst_addr;
+ unsigned int dst_size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_size = vb2_plane_size(dst_mb->b, 0);
+ s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return 0;
+}
+
+static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_buf *dst_mb;
+ unsigned long flags;
+
+ if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dst_mb = list_entry(ctx->dst_queue.next,
+ struct s5p_mfc_buf, list);
+ list_del(&dst_mb->list);
+ ctx->dst_queue_cnt--;
+ vb2_set_plane_payload(dst_mb->b, 0,
+ s5p_mfc_get_enc_strm_size());
+ vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ }
+ ctx->state = MFCINST_RUNNING;
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+ return 0;
+}
+
+static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *dst_mb;
+ struct s5p_mfc_buf *src_mb;
+ unsigned long flags;
+ unsigned long src_y_addr, src_c_addr, dst_addr;
+ unsigned int dst_size;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ src_y_addr = vb2_dma_contig_plane_paddr(src_mb->b, 0);
+ src_c_addr = vb2_dma_contig_plane_paddr(src_mb->b, 1);
+ s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_size = vb2_plane_size(dst_mb->b, 0);
+ s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+
+ return 0;
+}
+
+static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *mb_entry;
+ unsigned long enc_y_addr, enc_c_addr;
+ unsigned long mb_y_addr, mb_c_addr;
+ int slice_type;
+ unsigned int strm_size;
+ unsigned long flags;
+
+ slice_type = s5p_mfc_get_enc_slice_type();
+ strm_size = s5p_mfc_get_enc_strm_size();
+ mfc_debug(2, "Encoded slice type: %d", slice_type);
+ mfc_debug(2, "Encoded stream size: %d", strm_size);
+ mfc_debug(2, "Display order: %d",
+ mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (slice_type >= 0) {
+ s5p_mfc_get_enc_frame_buffer(ctx, &enc_y_addr, &enc_c_addr);
+ list_for_each_entry(mb_entry, &ctx->src_queue, list) {
+ mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ if ((enc_y_addr == mb_y_addr) &&
+ (enc_c_addr == mb_c_addr)) {
+ list_del(&mb_entry->list);
+ ctx->src_queue_cnt--;
+ vb2_buffer_done(mb_entry->b,
+ VB2_BUF_STATE_DONE);
+ break;
+ }
+ }
+ list_for_each_entry(mb_entry, &ctx->ref_queue, list) {
+ mb_y_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 0);
+ mb_c_addr = vb2_dma_contig_plane_paddr(mb_entry->b, 1);
+ if ((enc_y_addr == mb_y_addr) &&
+ (enc_c_addr == mb_c_addr)) {
+ list_del(&mb_entry->list);
+ ctx->ref_queue_cnt--;
+ vb2_buffer_done(mb_entry->b,
+ VB2_BUF_STATE_DONE);
+ break;
+ }
+ }
+ }
+ if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) {
+ mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
+ list);
+ if (mb_entry->used) {
+ list_del(&mb_entry->list);
+ ctx->src_queue_cnt--;
+ list_add_tail(&mb_entry->list, &ctx->ref_queue);
+ ctx->ref_queue_cnt++;
+ }
+ mfc_debug(2, "enc src count: %d, enc ref count: %d\n",
+ ctx->src_queue_cnt, ctx->ref_queue_cnt);
+ }
+ if (strm_size > 0) {
+ /* at least one more dest. buffers exist always */
+ mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
+ list);
+ list_del(&mb_entry->list);
+ ctx->dst_queue_cnt--;
+ switch (slice_type) {
+ case S5P_FIMV_ENC_SI_SLICE_TYPE_I:
+ mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case S5P_FIMV_ENC_SI_SLICE_TYPE_P:
+ mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+ break;
+ case S5P_FIMV_ENC_SI_SLICE_TYPE_B:
+ mb_entry->b->v4l2_buf.flags |= V4L2_BUF_FLAG_BFRAME;
+ break;
+ }
+ vb2_set_plane_payload(mb_entry->b, 0, strm_size);
+ vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
+ }
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) {
+ spin_lock(&dev->condlock);
+ clear_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock(&dev->condlock);
+ }
+ return 0;
+}
+
+static struct s5p_mfc_codec_ops encoder_codec_ops = {
+ .pre_seq_start = enc_pre_seq_start,
+ .post_seq_start = enc_post_seq_start,
+ .pre_frame_start = enc_pre_frame_start,
+ .post_frame_start = enc_post_frame_start,
+};
+
+/* Query capabilities of the device */
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+
+ strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
+ strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(1, 0, 0);
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
+ | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool mplane, bool out)
+{
+ struct s5p_mfc_fmt *fmt;
+ int i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (mplane && formats[i].num_planes == 1)
+ continue;
+ else if (!mplane && formats[i].num_planes > 1)
+ continue;
+ if (out && formats[i].type != MFC_FMT_RAW)
+ continue;
+ else if (!out && formats[i].type != MFC_FMT_ENC)
+ continue;
+ if (j == f->index) {
+ fmt = &formats[i];
+ strlcpy(f->description, fmt->name,
+ sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false, false);
+}
+
+static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true, false);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *prov,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, false, true);
+}
+
+static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *prov,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(f, true, true);
+}
+
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+
+ mfc_debug(2, "f->type = %d ctx->state = %d\n", f->type, ctx->state);
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ /* This is run on output (encoder dest) */
+ pix_fmt_mp->width = 0;
+ pix_fmt_mp->height = 0;
+ pix_fmt_mp->field = V4L2_FIELD_NONE;
+ pix_fmt_mp->pixelformat = ctx->dst_fmt->fourcc;
+ pix_fmt_mp->num_planes = ctx->dst_fmt->num_planes;
+
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->enc_dst_buf_size;
+ pix_fmt_mp->plane_fmt[0].sizeimage = ctx->enc_dst_buf_size;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* This is run on capture (encoder src) */
+ pix_fmt_mp->width = ctx->img_width;
+ pix_fmt_mp->height = ctx->img_height;
+
+ pix_fmt_mp->field = V4L2_FIELD_NONE;
+ pix_fmt_mp->pixelformat = ctx->src_fmt->fourcc;
+ pix_fmt_mp->num_planes = ctx->src_fmt->num_planes;
+
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+ pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt = find_format(f, MFC_FMT_ENC);
+ if (!fmt) {
+ mfc_err("failed to try output format\n");
+ return -EINVAL;
+ }
+
+ if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
+ mfc_err("must be set encoding output size\n");
+ return -EINVAL;
+ }
+
+ pix_fmt_mp->plane_fmt[0].bytesperline =
+ pix_fmt_mp->plane_fmt[0].sizeimage;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fmt = find_format(f, MFC_FMT_RAW);
+ if (!fmt) {
+ mfc_err("failed to try output format\n");
+ return -EINVAL;
+ }
+
+ if (fmt->num_planes != pix_fmt_mp->num_planes) {
+ mfc_err("failed to try output format\n");
+ return -EINVAL;
+ }
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ unsigned long flags;
+ int ret = 0;
+
+ ret = vidioc_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+ if (ctx->vq_src.streaming || ctx->vq_dst.streaming) {
+ v4l2_err(&dev->v4l2_dev, "%s queue busy\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fmt = find_format(f, MFC_FMT_ENC);
+ if (!fmt) {
+ mfc_err("failed to set capture format\n");
+ return -EINVAL;
+ }
+ ctx->state = MFCINST_INIT;
+ ctx->dst_fmt = fmt;
+ ctx->codec_mode = ctx->dst_fmt->codec_mode;
+ ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage;
+ pix_fmt_mp->plane_fmt[0].bytesperline = 0;
+ ctx->dst_bufs_cnt = 0;
+ ctx->capture_state = QUEUE_FREE;
+ s5p_mfc_alloc_instance_buffer(ctx);
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_try_run(dev);
+ if (s5p_mfc_wait_for_done_ctx(ctx, \
+ S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET, 1)) {
+ /* Error or timeout */
+ mfc_err("Error getting instance from hardware\n");
+ s5p_mfc_release_instance_buffer(ctx);
+ ret = -EIO;
+ goto out;
+ }
+ mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fmt = find_format(f, MFC_FMT_RAW);
+ if (!fmt) {
+ mfc_err("failed to set output format\n");
+ return -EINVAL;
+ }
+ if (fmt->num_planes != pix_fmt_mp->num_planes) {
+ mfc_err("failed to set output format\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ ctx->src_fmt = fmt;
+ ctx->img_width = pix_fmt_mp->width;
+ ctx->img_height = pix_fmt_mp->height;
+ mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode);
+ mfc_debug(2, "fmt - w: %d, h: %d, ctx - w: %d, h: %d\n",
+ pix_fmt_mp->width, pix_fmt_mp->height,
+ ctx->img_width, ctx->img_height);
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) {
+ ctx->buf_width = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12M_HALIGN);
+ ctx->luma_size = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12M_HALIGN) * ALIGN(ctx->img_height,
+ S5P_FIMV_NV12M_LVALIGN);
+ ctx->chroma_size = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12M_HALIGN) * ALIGN((ctx->img_height
+ >> 1), S5P_FIMV_NV12M_CVALIGN);
+
+ ctx->luma_size = ALIGN(ctx->luma_size,
+ S5P_FIMV_NV12M_SALIGN);
+ ctx->chroma_size = ALIGN(ctx->chroma_size,
+ S5P_FIMV_NV12M_SALIGN);
+
+ pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+
+ } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
+ ctx->buf_width = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12MT_HALIGN);
+ ctx->luma_size = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12MT_HALIGN) * ALIGN(ctx->img_height,
+ S5P_FIMV_NV12MT_VALIGN);
+ ctx->chroma_size = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12MT_HALIGN) * ALIGN((ctx->img_height
+ >> 1), S5P_FIMV_NV12MT_VALIGN);
+ ctx->luma_size = ALIGN(ctx->luma_size,
+ S5P_FIMV_NV12MT_SALIGN);
+ ctx->chroma_size = ALIGN(ctx->chroma_size,
+ S5P_FIMV_NV12MT_SALIGN);
+
+ pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size;
+ pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width;
+ pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size;
+ pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width;
+ }
+ ctx->src_bufs_cnt = 0;
+ ctx->output_state = QUEUE_FREE;
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+out:
+ mfc_debug_leave();
+ return ret;
+}
+
+static int vidioc_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = 0;
+
+ /* if memory is not mmp or userptr return error */
+ if ((reqbufs->memory != V4L2_MEMORY_MMAP) &&
+ (reqbufs->memory != V4L2_MEMORY_USERPTR))
+ return -EINVAL;
+ if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->capture_state != QUEUE_FREE) {
+ mfc_err("invalid capture state: %d\n",
+ ctx->capture_state);
+ return -EINVAL;
+ }
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ if (ret != 0) {
+ mfc_err("error in vb2_reqbufs() for E(D)\n");
+ return ret;
+ }
+ ctx->capture_state = QUEUE_BUFS_REQUESTED;
+ ret = s5p_mfc_alloc_codec_buffers(ctx);
+ if (ret) {
+ mfc_err("Failed to allocate encoding buffers\n");
+ reqbufs->count = 0;
+ ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+ return -ENOMEM;
+ }
+ } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (ctx->output_state != QUEUE_FREE) {
+ mfc_err("invalid output state: %d\n",
+ ctx->output_state);
+ return -EINVAL;
+ }
+ ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
+ if (ret != 0) {
+ mfc_err("error in vb2_reqbufs() for E(S)\n");
+ return ret;
+ }
+ ctx->output_state = QUEUE_BUFS_REQUESTED;
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int vidioc_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ int ret = 0;
+
+ /* if memory is not mmp or userptr return error */
+ if ((buf->memory != V4L2_MEMORY_MMAP) &&
+ (buf->memory != V4L2_MEMORY_USERPTR))
+ return -EINVAL;
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->state != MFCINST_GOT_INST) {
+ mfc_err("invalid context state: %d\n", ctx->state);
+ return -EINVAL;
+ }
+ ret = vb2_querybuf(&ctx->vq_dst, buf);
+ if (ret != 0) {
+ mfc_err("error in vb2_querybuf() for E(D)\n");
+ return ret;
+ }
+ buf->m.planes[0].m.mem_offset += DST_QUEUE_OFF_BASE;
+ } else if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = vb2_querybuf(&ctx->vq_src, buf);
+ if (ret != 0) {
+ mfc_err("error in vb2_querybuf() for E(S)\n");
+ return ret;
+ }
+ } else {
+ mfc_err("invalid buf type\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+/* Queue a buffer */
+static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err("Call on QBUF after unrecoverable error\n");
+ return -EIO;
+ }
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_qbuf(&ctx->vq_src, buf);
+ else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_qbuf(&ctx->vq_dst, buf);
+ return -EINVAL;
+}
+
+/* Dequeue a buffer */
+static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (ctx->state == MFCINST_ERROR) {
+ mfc_err("Call on DQBUF after unrecoverable error\n");
+ return -EIO;
+ }
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK);
+ else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK);
+ return -EINVAL;
+}
+
+/* Stream on */
+static int vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_streamon(&ctx->vq_src, type);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_streamon(&ctx->vq_dst, type);
+ return -EINVAL;
+}
+
+/* Stream off, which equals to a pause */
+static int vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return vb2_streamoff(&ctx->vq_src, type);
+ else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return vb2_streamoff(&ctx->vq_dst, type);
+ return -EINVAL;
+}
+
+static inline int h264_level(enum v4l2_mpeg_video_h264_level lvl)
+{
+ static unsigned int t[V4L2_MPEG_VIDEO_H264_LEVEL_4_0 + 1] = {
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_0 */ 10,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1B */ 9,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_1 */ 11,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_2 */ 12,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_1_3 */ 13,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_2_0 */ 20,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_2_1 */ 21,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_2_2 */ 22,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_3_0 */ 30,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_3_1 */ 31,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_3_2 */ 32,
+ /* V4L2_MPEG_VIDEO_H264_LEVEL_4_0 */ 40,
+ };
+ return t[lvl];
+}
+
+static inline int mpeg4_level(enum v4l2_mpeg_video_mpeg4_level lvl)
+{
+ static unsigned int t[V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 + 1] = {
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 */ 0,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B */ 9,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_1 */ 1,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_2 */ 2,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_3 */ 3,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B */ 7,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 */ 4,
+ /* V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 */ 5,
+ };
+ return t[lvl];
+}
+
+static inline int vui_sar_idc(enum v4l2_mpeg_video_h264_vui_sar_idc sar)
+{
+ static unsigned int t[V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED + 1] = {
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED */ 0,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1 */ 1,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11 */ 2,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11 */ 3,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11 */ 4,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33 */ 5,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11 */ 6,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11 */ 7,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11 */ 8,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33 */ 9,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11 */ 10,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11 */ 11,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33 */ 12,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99 */ 13,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3 */ 14,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2 */ 15,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1 */ 16,
+ /* V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED */ 255,
+ };
+ return t[sar];
+}
+
+static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ p->gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ p->slice_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ p->slice_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+ p->slice_bit = ctrl->val * 8;
+ break;
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
+ p->intra_refresh_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_PADDING:
+ p->pad = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV:
+ p->pad_luma = (ctrl->val >> 16) & 0xff;
+ p->pad_cb = (ctrl->val >> 8) & 0xff;
+ p->pad_cr = (ctrl->val >> 0) & 0xff;
+ break;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ p->rc_frame = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ p->rc_bitrate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF:
+ p->rc_reaction_coeff = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE:
+ ctx->force_frame_type = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
+ p->vbv_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:
+ p->codec.h264.cpb_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ p->seq_hdr_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE:
+ p->frame_skip_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT:
+ p->fixed_target_bit = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ p->num_b_frame = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ p->codec.h264.profile =
+ S5P_FIMV_ENC_PROFILE_H264_MAIN;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ p->codec.h264.profile =
+ S5P_FIMV_ENC_PROFILE_H264_HIGH;
+ break;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ p->codec.h264.profile =
+ S5P_FIMV_ENC_PROFILE_H264_BASELINE;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ p->codec.h264.level_v4l2 = ctrl->val;
+ p->codec.h264.level = h264_level(ctrl->val);
+ if (p->codec.h264.level < 0) {
+ mfc_err("Level number is wrong\n");
+ ret = p->codec.h264.level;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ p->codec.mpeg4.level_v4l2 = ctrl->val;
+ p->codec.mpeg4.level = mpeg4_level(ctrl->val);
+ if (p->codec.mpeg4.level < 0) {
+ mfc_err("Level number is wrong\n");
+ ret = p->codec.mpeg4.level;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ p->codec.h264.loop_filter_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+ p->codec.h264.loop_filter_alpha = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+ p->codec.h264.loop_filter_beta = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ p->codec.h264.entropy_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P:
+ p->codec.h264.num_ref_pic_4p = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ p->codec.h264._8x8_transform = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ p->codec.h264.rc_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ p->codec.h264.rc_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ p->codec.h264.rc_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ p->codec.h264.rc_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ p->codec.h264.rc_p_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+ p->codec.h264.rc_b_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP:
+ p->codec.mpeg4.rc_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_MIN_QP:
+ p->codec.mpeg4.rc_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_MAX_QP:
+ p->codec.mpeg4.rc_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:
+ p->codec.mpeg4.rc_p_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP:
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:
+ p->codec.mpeg4.rc_b_frame_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK:
+ p->codec.h264.rc_mb_dark = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH:
+ p->codec.h264.rc_mb_smooth = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC:
+ p->codec.h264.rc_mb_static = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY:
+ p->codec.h264.rc_mb_activity = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ p->codec.h264.vui_sar = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ p->codec.h264.vui_sar_idc = vui_sar_idc(ctrl->val);
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH:
+ p->codec.h264.vui_ext_sar_width = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT:
+ p->codec.h264.vui_ext_sar_height = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
+ p->codec.h264.open_gop = !ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+ p->codec.h264.open_gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE:
+ p->codec.mpeg4.profile =
+ S5P_FIMV_ENC_PROFILE_MPEG4_SIMPLE;
+ break;
+ case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE:
+ p->codec.mpeg4.profile =
+ S5P_FIMV_ENC_PROFILE_MPEG4_ADVANCED_SIMPLE;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
+ p->codec.mpeg4.quarter_pixel = ctrl->val;
+ break;
+ default:
+ v4l2_err(&dev->v4l2_dev, "Invalid control, id=%d, val=%d\n",
+ ctrl->id, ctrl->val);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = {
+ .s_ctrl = s5p_mfc_enc_s_ctrl,
+};
+
+int vidioc_s_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ctx->enc_params.rc_framerate_num =
+ a->parm.output.timeperframe.denominator;
+ ctx->enc_params.rc_framerate_denom =
+ a->parm.output.timeperframe.numerator;
+ } else {
+ mfc_err("Setting FPS is only possible for the output queue\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int vidioc_g_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+
+ if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ a->parm.output.timeperframe.denominator =
+ ctx->enc_params.rc_framerate_num;
+ a->parm.output.timeperframe.numerator =
+ ctx->enc_params.rc_framerate_denom;
+ } else {
+ mfc_err("Setting FPS is only possible for the output queue\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops s5p_mfc_enc_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_s_parm = vidioc_s_parm,
+ .vidioc_g_parm = vidioc_g_parm,
+};
+
+static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
+{
+ int i;
+
+ if (!fmt)
+ return -EINVAL;
+ if (fmt->num_planes != vb->num_planes) {
+ mfc_err("invalid plane number for the format\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < fmt->num_planes; i++) {
+ if (!vb2_dma_contig_plane_paddr(vb, i)) {
+ mfc_err("failed to get plane cookie\n");
+ return -EINVAL;
+ }
+ mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx",
+ vb->v4l2_buf.index, i,
+ vb2_dma_contig_plane_paddr(vb, i));
+ }
+ return 0;
+}
+
+static int s5p_mfc_queue_setup(struct vb2_queue *vq,
+ unsigned int *buf_count, unsigned int *plane_count,
+ unsigned long psize[], void *allocators[])
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+
+ if (ctx->state != MFCINST_GOT_INST) {
+ mfc_err("inavlid state: %d\n", ctx->state);
+ return -EINVAL;
+ }
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (ctx->dst_fmt)
+ *plane_count = ctx->dst_fmt->num_planes;
+ else
+ *plane_count = MFC_ENC_CAP_PLANE_COUNT;
+ if (*buf_count < 1)
+ *buf_count = 1;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+ psize[0] = ctx->enc_dst_buf_size;
+ allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (ctx->src_fmt)
+ *plane_count = ctx->src_fmt->num_planes;
+ else
+ *plane_count = MFC_ENC_OUT_PLANE_COUNT;
+
+ if (*buf_count < 1)
+ *buf_count = 1;
+ if (*buf_count > MFC_MAX_BUFFERS)
+ *buf_count = MFC_MAX_BUFFERS;
+ psize[0] = ctx->luma_size;
+ psize[1] = ctx->chroma_size;
+ allocators[0] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
+ allocators[1] = ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX];
+ } else {
+ mfc_err("inavlid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void s5p_mfc_unlock(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mutex_unlock(&dev->mfc_mutex);
+}
+
+static void s5p_mfc_lock(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mutex_lock(&dev->mfc_mutex);
+}
+
+static int s5p_mfc_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ unsigned int i;
+ int ret;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ret = check_vb_with_fmt(ctx->dst_fmt, vb);
+ if (ret < 0)
+ return ret;
+ i = vb->v4l2_buf.index;
+ ctx->dst_bufs[i].b = vb;
+ ctx->dst_bufs[i].cookie.stream =
+ vb2_dma_contig_plane_paddr(vb, 0);
+ ctx->dst_bufs_cnt++;
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = check_vb_with_fmt(ctx->src_fmt, vb);
+ if (ret < 0)
+ return ret;
+ i = vb->v4l2_buf.index;
+ ctx->src_bufs[i].b = vb;
+ ctx->src_bufs[i].cookie.raw.luma =
+ vb2_dma_contig_plane_paddr(vb, 0);
+ ctx->src_bufs[i].cookie.raw.chroma =
+ vb2_dma_contig_plane_paddr(vb, 1);
+ ctx->src_bufs_cnt++;
+ } else {
+ mfc_err("inavlid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ int ret;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ ret = check_vb_with_fmt(ctx->dst_fmt, vb);
+ if (ret < 0)
+ return ret;
+ mfc_debug(2, "plane size: %ld, dst size: %d\n",
+ vb2_plane_size(vb, 0), ctx->enc_dst_buf_size);
+ if (vb2_plane_size(vb, 0) < ctx->enc_dst_buf_size) {
+ mfc_err("plane size is too small for capture\n");
+ return -EINVAL;
+ }
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ ret = check_vb_with_fmt(ctx->src_fmt, vb);
+ if (ret < 0)
+ return ret;
+ mfc_debug(2, "plane size: %ld, luma size: %d\n",
+ vb2_plane_size(vb, 0), ctx->luma_size);
+ mfc_debug(2, "plane size: %ld, chroma size: %d\n",
+ vb2_plane_size(vb, 1), ctx->chroma_size);
+ if (vb2_plane_size(vb, 0) < ctx->luma_size ||
+ vb2_plane_size(vb, 1) < ctx->chroma_size) {
+ mfc_err("plane size is too small for output\n");
+ return -EINVAL;
+ }
+ } else {
+ mfc_err("inavlid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_start_streaming(struct vb2_queue *q)
+{
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+ /* If context is ready then dev = work->data;schedule it to run */
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+ return 0;
+}
+
+static int s5p_mfc_stop_streaming(struct vb2_queue *q)
+{
+ unsigned long flags;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ if ((ctx->state == MFCINST_FINISHING ||
+ ctx->state == MFCINST_RUNNING) &&
+ dev->curr_ctx == ctx->num && dev->hw_lock) {
+ ctx->state = MFCINST_ABORT;
+ s5p_mfc_wait_for_done_ctx(ctx, S5P_FIMV_R2H_CMD_FRAME_DONE_RET,
+ 0);
+ }
+ ctx->state = MFCINST_FINISHED;
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
+ INIT_LIST_HEAD(&ctx->dst_queue);
+ ctx->dst_queue_cnt = 0;
+ }
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ cleanup_ref_queue(ctx);
+ s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
+ INIT_LIST_HEAD(&ctx->src_queue);
+ ctx->src_queue_cnt = 0;
+ }
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return 0;
+}
+
+static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *mfc_buf;
+
+ if (ctx->state == MFCINST_ERROR) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ cleanup_ref_queue(ctx);
+ return;
+ }
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index];
+ mfc_buf->used = 0;
+ /* Mark destination as available for use by MFC */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ list_add_tail(&mfc_buf->list, &ctx->dst_queue);
+ ctx->dst_queue_cnt++;
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index];
+ mfc_buf->used = 0;
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (vb->v4l2_planes[0].bytesused == 0) {
+ mfc_debug(1, "change state to FINISHING\n");
+ ctx->state = MFCINST_FINISHING;
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ cleanup_ref_queue(ctx);
+ } else {
+ list_add_tail(&mfc_buf->list, &ctx->src_queue);
+ ctx->src_queue_cnt++;
+ }
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ } else {
+ mfc_err("unsupported buffer type (%d)\n", vq->type);
+ }
+ if (s5p_mfc_ctx_ready(ctx)) {
+ spin_lock_irqsave(&dev->condlock, flags);
+ set_bit(ctx->num, &dev->ctx_work_bits);
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ }
+ s5p_mfc_try_run(dev);
+}
+
+static struct vb2_ops s5p_mfc_enc_qops = {
+ .queue_setup = s5p_mfc_queue_setup,
+ .wait_prepare = s5p_mfc_unlock,
+ .wait_finish = s5p_mfc_lock,
+ .buf_init = s5p_mfc_buf_init,
+ .buf_prepare = s5p_mfc_buf_prepare,
+ .start_streaming = s5p_mfc_start_streaming,
+ .stop_streaming = s5p_mfc_stop_streaming,
+ .buf_queue = s5p_mfc_buf_queue,
+};
+
+struct s5p_mfc_codec_ops *get_enc_codec_ops(void)
+{
+ return &encoder_codec_ops;
+}
+
+struct vb2_ops *get_enc_queue_ops(void)
+{
+ return &s5p_mfc_enc_qops;
+}
+
+const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void)
+{
+ return &s5p_mfc_enc_ioctl_ops;
+}
+
+#define IS_MFC51_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) \
+ && V4L2_CTRL_DRIVER_PRIV(x))
+
+int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
+{
+ struct v4l2_ctrl_config cfg;
+ int i;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS);
+ if (ctx->ctrl_handler.error) {
+ mfc_err("v4l2_ctrl_handler_init failed\n");
+ return ctx->ctrl_handler.error;
+ }
+ for (i = 0; i < NUM_CTRLS; i++) {
+ if (IS_MFC51_PRIV(controls[i].id)) {
+ cfg.ops = &s5p_mfc_enc_ctrl_ops;
+ cfg.id = controls[i].id;
+ cfg.min = controls[i].minimum;
+ cfg.max = controls[i].maximum;
+ cfg.def = controls[i].default_value;
+ cfg.name = controls[i].name;
+ cfg.type = controls[i].type;
+ cfg.flags = 0;
+
+ if (cfg.type == V4L2_CTRL_TYPE_MENU) {
+ cfg.step = 0;
+ cfg.menu_skip_mask = cfg.menu_skip_mask;
+ cfg.qmenu = mfc51_get_menu(cfg.id);
+ } else {
+ cfg.step = controls[i].step;
+ cfg.menu_skip_mask = 0;
+ }
+ ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &cfg, NULL);
+ } else {
+ if (controls[i].type == V4L2_CTRL_TYPE_MENU) {
+ ctx->ctrls[i] = v4l2_ctrl_new_std_menu(
+ &ctx->ctrl_handler,
+ &s5p_mfc_enc_ctrl_ops, controls[i].id,
+ controls[i].maximum, 0,
+ controls[i].default_value);
+ } else {
+ ctx->ctrls[i] = v4l2_ctrl_new_std(
+ &ctx->ctrl_handler,
+ &s5p_mfc_enc_ctrl_ops, controls[i].id,
+ controls[i].minimum,
+ controls[i].maximum, controls[i].step,
+ controls[i].default_value);
+ }
+ }
+ if (ctx->ctrl_handler.error) {
+ mfc_err("Adding control (%d) failed\n", i);
+ return ctx->ctrl_handler.error;
+ }
+ if (controls[i].is_volatile && ctx->ctrls[i])
+ ctx->ctrls[i]->is_volatile = 1;
+ }
+ return 0;
+}
+
+void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx)
+{
+ int i;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ for (i = 0; i < NUM_CTRLS; i++)
+ ctx->ctrls[i] = NULL;
+}
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.h b/drivers/media/video/s5p-mfc/s5p_mfc_enc.h
new file mode 100644
index 00000000000..405bdd3ee08
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.h
@@ -0,0 +1,23 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_enc.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_ENC_H_
+#define S5P_MFC_ENC_H_
+
+struct s5p_mfc_codec_ops *get_enc_codec_ops(void);
+struct vb2_ops *get_enc_queue_ops(void);
+const struct v4l2_ioctl_ops *get_enc_v4l2_ioctl_ops(void);
+struct s5p_mfc_fmt *get_enc_def_fmt(bool src);
+int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx);
+
+#endif /* S5P_MFC_ENC_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_intr.c b/drivers/media/video/s5p-mfc/s5p_mfc_intr.c
new file mode 100644
index 00000000000..8f2f8bf4da7
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_intr.c
@@ -0,0 +1,92 @@
+/*
+ * drivers/media/video/samsung/mfc5/s5p_mfc_intr.c
+ *
+ * C file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains functions used to wait for command completion.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include "regs-mfc.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+
+int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command)
+{
+ int ret;
+
+ ret = wait_event_interruptible_timeout(dev->queue,
+ (dev->int_cond && (dev->int_type == command
+ || dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
+ msecs_to_jiffies(MFC_INT_TIMEOUT));
+ if (ret == 0) {
+ mfc_err("Interrupt (dev->int_type:%d, command:%d) timed out\n",
+ dev->int_type, command);
+ return 1;
+ } else if (ret == -ERESTARTSYS) {
+ mfc_err("Interrupted by a signal\n");
+ return 1;
+ }
+ mfc_debug(1, "Finished waiting (dev->int_type:%d, command: %d)\n",
+ dev->int_type, command);
+ if (dev->int_type == S5P_FIMV_R2H_CMD_ERR_RET)
+ return 1;
+ return 0;
+}
+
+void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev)
+{
+ dev->int_cond = 0;
+ dev->int_type = 0;
+ dev->int_err = 0;
+}
+
+int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
+ int command, int interrupt)
+{
+ int ret;
+
+ if (interrupt) {
+ ret = wait_event_interruptible_timeout(ctx->queue,
+ (ctx->int_cond && (ctx->int_type == command
+ || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
+ msecs_to_jiffies(MFC_INT_TIMEOUT));
+ } else {
+ ret = wait_event_timeout(ctx->queue,
+ (ctx->int_cond && (ctx->int_type == command
+ || ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)),
+ msecs_to_jiffies(MFC_INT_TIMEOUT));
+ }
+ if (ret == 0) {
+ mfc_err("Interrupt (ctx->int_type:%d, command:%d) timed out\n",
+ ctx->int_type, command);
+ return 1;
+ } else if (ret == -ERESTARTSYS) {
+ mfc_err("Interrupted by a signal\n");
+ return 1;
+ }
+ mfc_debug(1, "Finished waiting (ctx->int_type:%d, command: %d)\n",
+ ctx->int_type, command);
+ if (ctx->int_type == S5P_FIMV_R2H_CMD_ERR_RET)
+ return 1;
+ return 0;
+}
+
+void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx)
+{
+ ctx->int_cond = 0;
+ ctx->int_type = 0;
+ ctx->int_err = 0;
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_intr.h b/drivers/media/video/s5p-mfc/s5p_mfc_intr.h
new file mode 100644
index 00000000000..122d7732f74
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_intr.h
@@ -0,0 +1,26 @@
+/*
+ * drivers/media/video/samsung/mfc5/s5p_mfc_intr.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * It contains waiting functions declarations.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_INTR_H_
+#define S5P_MFC_INTR_H_
+
+#include "s5p_mfc_common.h"
+
+int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx,
+ int command, int interrupt);
+int s5p_mfc_wait_for_done_dev(struct s5p_mfc_dev *dev, int command);
+void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_clean_dev_int_flags(struct s5p_mfc_dev *dev);
+
+#endif /* S5P_MFC_INTR_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.c b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
new file mode 100644
index 00000000000..7b239168c19
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.c
@@ -0,0 +1,1397 @@
+/*
+ * drivers/media/video/samsung/mfc5/s5p_mfc_opr.c
+ *
+ * Samsung MFC (Multi Function Codec - FIMV) driver
+ * This file contains hw related functions.
+ *
+ * Kamil Debski, Copyright (c) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "regs-mfc.h"
+#include "s5p_mfc_cmd.h"
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_ctrl.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_intr.h"
+#include "s5p_mfc_opr.h"
+#include "s5p_mfc_pm.h"
+#include "s5p_mfc_shm.h"
+#include <asm/cacheflush.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#define OFFSETA(x) (((x) - dev->bank1) >> MFC_OFFSET_SHIFT)
+#define OFFSETB(x) (((x) - dev->bank2) >> MFC_OFFSET_SHIFT)
+
+/* Allocate temporary buffers for decoding */
+int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx)
+{
+ void *desc_virt;
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ ctx->desc_buf = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], DESC_BUF_SIZE);
+ if (IS_ERR_VALUE((int)ctx->desc_buf)) {
+ ctx->desc_buf = 0;
+ mfc_err("Allocating DESC buffer failed\n");
+ return -ENOMEM;
+ }
+ ctx->desc_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->desc_buf);
+ BUG_ON(ctx->desc_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ desc_virt = vb2_dma_contig_memops.vaddr(ctx->desc_buf);
+ if (desc_virt == NULL) {
+ vb2_dma_contig_memops.put(ctx->desc_buf);
+ ctx->desc_phys = 0;
+ ctx->desc_buf = 0;
+ mfc_err("Remapping DESC buffer failed\n");
+ return -ENOMEM;
+ }
+ memset(desc_virt, 0, DESC_BUF_SIZE);
+ wmb();
+ return 0;
+}
+
+/* Release temporary buffers for decoding */
+void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
+{
+ if (ctx->desc_phys) {
+ vb2_dma_contig_memops.put(ctx->desc_buf);
+ ctx->desc_phys = 0;
+ ctx->desc_buf = 0;
+ }
+}
+
+/* Allocate codec buffers */
+int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int enc_ref_y_size = 0;
+ unsigned int enc_ref_c_size = 0;
+ unsigned int guard_width, guard_height;
+
+ if (ctx->type == MFCINST_DECODER) {
+ mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
+ ctx->luma_size, ctx->chroma_size, ctx->mv_size);
+ mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
+ } else if (ctx->type == MFCINST_ENCODER) {
+ enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
+
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
+ enc_ref_c_size = ALIGN(ctx->img_width,
+ S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN(ctx->img_height >> 1,
+ S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(enc_ref_c_size,
+ S5P_FIMV_NV12MT_SALIGN);
+ } else {
+ guard_width = ALIGN(ctx->img_width + 16,
+ S5P_FIMV_NV12MT_HALIGN);
+ guard_height = ALIGN((ctx->img_height >> 1) + 4,
+ S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(guard_width * guard_height,
+ S5P_FIMV_NV12MT_SALIGN);
+ }
+ mfc_debug(2, "recon luma size: %d chroma size: %d\n",
+ enc_ref_y_size, enc_ref_c_size);
+ } else {
+ return -EINVAL;
+ }
+ /* Codecs have different memory requirements */
+ switch (ctx->codec_mode) {
+ case S5P_FIMV_CODEC_H264_DEC:
+ ctx->bank1_size =
+ ALIGN(S5P_FIMV_DEC_NB_IP_SIZE +
+ S5P_FIMV_DEC_VERT_NB_MV_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2_size = ctx->total_dpb_count * ctx->mv_size;
+ break;
+ case S5P_FIMV_CODEC_MPEG4_DEC:
+ ctx->bank1_size =
+ ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE +
+ S5P_FIMV_DEC_UPNB_MV_SIZE +
+ S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+ S5P_FIMV_DEC_STX_PARSER_SIZE +
+ S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2_size = 0;
+ break;
+ case S5P_FIMV_CODEC_VC1RCV_DEC:
+ case S5P_FIMV_CODEC_VC1_DEC:
+ ctx->bank1_size =
+ ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
+ S5P_FIMV_DEC_UPNB_MV_SIZE +
+ S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+ S5P_FIMV_DEC_NB_DCAC_SIZE +
+ 3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2_size = 0;
+ break;
+ case S5P_FIMV_CODEC_MPEG2_DEC:
+ ctx->bank1_size = 0;
+ ctx->bank2_size = 0;
+ break;
+ case S5P_FIMV_CODEC_H263_DEC:
+ ctx->bank1_size =
+ ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE +
+ S5P_FIMV_DEC_UPNB_MV_SIZE +
+ S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE +
+ S5P_FIMV_DEC_NB_DCAC_SIZE,
+ S5P_FIMV_DEC_BUF_ALIGN);
+ ctx->bank2_size = 0;
+ break;
+ case S5P_FIMV_CODEC_H264_ENC:
+ ctx->bank1_size = (enc_ref_y_size * 2) +
+ S5P_FIMV_ENC_UPMV_SIZE +
+ S5P_FIMV_ENC_COLFLG_SIZE +
+ S5P_FIMV_ENC_INTRAMD_SIZE +
+ S5P_FIMV_ENC_NBORINFO_SIZE;
+ ctx->bank2_size = (enc_ref_y_size * 2) +
+ (enc_ref_c_size * 4) +
+ S5P_FIMV_ENC_INTRAPRED_SIZE;
+ break;
+ case S5P_FIMV_CODEC_MPEG4_ENC:
+ ctx->bank1_size = (enc_ref_y_size * 2) +
+ S5P_FIMV_ENC_UPMV_SIZE +
+ S5P_FIMV_ENC_COLFLG_SIZE +
+ S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ ctx->bank2_size = (enc_ref_y_size * 2) +
+ (enc_ref_c_size * 4);
+ break;
+ case S5P_FIMV_CODEC_H263_ENC:
+ ctx->bank1_size = (enc_ref_y_size * 2) +
+ S5P_FIMV_ENC_UPMV_SIZE +
+ S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ ctx->bank2_size = (enc_ref_y_size * 2) +
+ (enc_ref_c_size * 4);
+ break;
+ default:
+ break;
+ }
+ /* Allocate only if memory from bank 1 is necessary */
+ if (ctx->bank1_size > 0) {
+ ctx->bank1_buf = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_size);
+ if (IS_ERR(ctx->bank1_buf)) {
+ ctx->bank1_buf = 0;
+ printk(KERN_ERR
+ "Buf alloc for decoding failed (port A)\n");
+ return -ENOMEM;
+ }
+ ctx->bank1_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->bank1_buf);
+ BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ }
+ /* Allocate only if memory from bank 2 is necessary */
+ if (ctx->bank2_size > 0) {
+ ctx->bank2_buf = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_size);
+ if (IS_ERR(ctx->bank2_buf)) {
+ ctx->bank2_buf = 0;
+ mfc_err("Buf alloc for decoding failed (port B)\n");
+ return -ENOMEM;
+ }
+ ctx->bank2_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK2_ALLOC_CTX], ctx->bank2_buf);
+ BUG_ON(ctx->bank2_phys & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
+ }
+ return 0;
+}
+
+/* Release buffers allocated for codec */
+void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx)
+{
+ if (ctx->bank1_buf) {
+ vb2_dma_contig_memops.put(ctx->bank1_buf);
+ ctx->bank1_buf = 0;
+ ctx->bank1_phys = 0;
+ ctx->bank1_size = 0;
+ }
+ if (ctx->bank2_buf) {
+ vb2_dma_contig_memops.put(ctx->bank2_buf);
+ ctx->bank2_buf = 0;
+ ctx->bank2_phys = 0;
+ ctx->bank2_size = 0;
+ }
+}
+
+/* Allocate memory for instance data buffer */
+int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx)
+{
+ void *context_virt;
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC ||
+ ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
+ ctx->ctx_size = MFC_H264_CTX_BUF_SIZE;
+ else
+ ctx->ctx_size = MFC_CTX_BUF_SIZE;
+ ctx->ctx_buf = vb2_dma_contig_memops.alloc(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_size);
+ if (IS_ERR(ctx->ctx_buf)) {
+ mfc_err("Allocating context buffer failed\n");
+ ctx->ctx_phys = 0;
+ ctx->ctx_buf = 0;
+ return -ENOMEM;
+ }
+ ctx->ctx_phys = s5p_mfc_mem_cookie(
+ dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx_buf);
+ BUG_ON(ctx->ctx_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ ctx->ctx_ofs = OFFSETA(ctx->ctx_phys);
+ context_virt = vb2_dma_contig_memops.vaddr(ctx->ctx_buf);
+ if (context_virt == NULL) {
+ mfc_err("Remapping instance buffer failed\n");
+ vb2_dma_contig_memops.put(ctx->ctx_buf);
+ ctx->ctx_phys = 0;
+ ctx->ctx_buf = 0;
+ return -ENOMEM;
+ }
+ /* Zero content of the allocated memory */
+ memset(context_virt, 0, ctx->ctx_size);
+ wmb();
+ if (s5p_mfc_init_shm(ctx) < 0) {
+ vb2_dma_contig_memops.put(ctx->ctx_buf);
+ ctx->ctx_phys = 0;
+ ctx->ctx_buf = 0;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Release instance buffer */
+void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx)
+{
+ if (ctx->ctx_buf) {
+ vb2_dma_contig_memops.put(ctx->ctx_buf);
+ ctx->ctx_phys = 0;
+ ctx->ctx_buf = 0;
+ }
+ if (ctx->shm_alloc) {
+ vb2_dma_contig_memops.put(ctx->shm_alloc);
+ ctx->shm_alloc = 0;
+ ctx->shm = 0;
+ }
+}
+
+/* Set registers for decoding temporary buffers */
+void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, OFFSETA(ctx->desc_phys), S5P_FIMV_SI_CH0_DESC_ADR);
+ mfc_write(dev, DESC_BUF_SIZE, S5P_FIMV_SI_CH0_DESC_SIZE);
+}
+
+/* Set registers for shared buffer */
+void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ mfc_write(dev, ctx->shm_ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR);
+}
+
+/* Set registers for decoding stream buffer */
+int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr,
+ unsigned int start_num_byte, unsigned int buf_size)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, OFFSETA(buf_addr), S5P_FIMV_SI_CH0_SB_ST_ADR);
+ mfc_write(dev, ctx->dec_src_buf_size, S5P_FIMV_SI_CH0_CPB_SIZE);
+ mfc_write(dev, buf_size, S5P_FIMV_SI_CH0_SB_FRM_SIZE);
+ s5p_mfc_write_shm(ctx, start_num_byte, START_BYTE_NUM);
+ return 0;
+}
+
+/* Set decoding frame buffer */
+int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx)
+{
+ unsigned int frame_size, i;
+ unsigned int frame_size_ch, frame_size_mv;
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int dpb;
+ size_t buf_addr1, buf_addr2;
+ int buf_size1, buf_size2;
+
+ buf_addr1 = ctx->bank1_phys;
+ buf_size1 = ctx->bank1_size;
+ buf_addr2 = ctx->bank2_phys;
+ buf_size2 = ctx->bank2_size;
+ dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
+ ~S5P_FIMV_DPB_COUNT_MASK;
+ mfc_write(dev, ctx->total_dpb_count | dpb,
+ S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+ s5p_mfc_set_shared_buffer(ctx);
+ switch (ctx->codec_mode) {
+ case S5P_FIMV_CODEC_H264_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_VERT_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VERT_NB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VERT_NB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NB_IP_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_IP_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_IP_SIZE;
+ break;
+ case S5P_FIMV_CODEC_MPEG4_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_NB_DCAC_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SA_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SP_ADR);
+ buf_addr1 += S5P_FIMV_DEC_STX_PARSER_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_STX_PARSER_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_OT_LINE_ADR);
+ buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ break;
+ case S5P_FIMV_CODEC_H263_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_OT_LINE_ADR);
+ buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_SA_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_NB_DCAC_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+ break;
+ case S5P_FIMV_CODEC_VC1_DEC:
+ case S5P_FIMV_CODEC_VC1RCV_DEC:
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_NB_DCAC_ADR);
+ buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_OT_LINE_ADR);
+ buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_UP_NB_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_SA_MV_ADR);
+ buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE3_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE2_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE1_ADR);
+ buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
+ break;
+ case S5P_FIMV_CODEC_MPEG2_DEC:
+ break;
+ default:
+ mfc_err("Unknown codec for decoding (%x)\n",
+ ctx->codec_mode);
+ return -EINVAL;
+ break;
+ }
+ frame_size = ctx->luma_size;
+ frame_size_ch = ctx->chroma_size;
+ frame_size_mv = ctx->mv_size;
+ mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size, frame_size_ch,
+ frame_size_mv);
+ for (i = 0; i < ctx->total_dpb_count; i++) {
+ /* Bank2 */
+ mfc_debug(2, "Luma %d: %x\n", i,
+ ctx->dst_bufs[i].cookie.raw.luma);
+ mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma),
+ S5P_FIMV_DEC_LUMA_ADR + i * 4);
+ mfc_debug(2, "\tChroma %d: %x\n", i,
+ ctx->dst_bufs[i].cookie.raw.chroma);
+ mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma),
+ S5P_FIMV_DEC_CHROMA_ADR + i * 4);
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
+ mfc_debug(2, "\tBuf2: %x, size: %d\n",
+ buf_addr2, buf_size2);
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_H264_MV_ADR + i * 4);
+ buf_addr2 += frame_size_mv;
+ buf_size2 -= frame_size_mv;
+ }
+ }
+ mfc_debug(2, "Buf1: %u, buf_size1: %d\n", buf_addr1, buf_size1);
+ mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n",
+ buf_size1, buf_size2, ctx->total_dpb_count);
+ if (buf_size1 < 0 || buf_size2 < 0) {
+ mfc_debug(2, "Not enough memory has been allocated\n");
+ return -ENOMEM;
+ }
+ s5p_mfc_write_shm(ctx, frame_size, ALLOC_LUMA_DPB_SIZE);
+ s5p_mfc_write_shm(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE);
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC)
+ s5p_mfc_write_shm(ctx, frame_size_mv, ALLOC_MV_SIZE);
+ mfc_write(dev, ((S5P_FIMV_CH_INIT_BUFS & S5P_FIMV_CH_MASK)
+ << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
+ S5P_FIMV_SI_CH0_INST_ID);
+ return 0;
+}
+
+/* Set registers for encoding stream buffer */
+int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long addr, unsigned int size)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, OFFSETA(addr), S5P_FIMV_ENC_SI_CH0_SB_ADR);
+ mfc_write(dev, size, S5P_FIMV_ENC_SI_CH0_SB_SIZE);
+ return 0;
+}
+
+void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long y_addr, unsigned long c_addr)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, OFFSETB(y_addr), S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR);
+ mfc_write(dev, OFFSETB(c_addr), S5P_FIMV_ENC_SI_CH0_CUR_C_ADR);
+}
+
+void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long *y_addr, unsigned long *c_addr)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ *y_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR)
+ << MFC_OFFSET_SHIFT);
+ *c_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR)
+ << MFC_OFFSET_SHIFT);
+}
+
+/* Set encoding ref & codec buffer */
+int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ size_t buf_addr1, buf_addr2;
+ size_t buf_size1, buf_size2;
+ unsigned int enc_ref_y_size, enc_ref_c_size;
+ unsigned int guard_width, guard_height;
+ int i;
+
+ buf_addr1 = ctx->bank1_phys;
+ buf_size1 = ctx->bank1_size;
+ buf_addr2 = ctx->bank2_phys;
+ buf_size2 = ctx->bank2_size;
+ enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC) {
+ enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
+ * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN);
+ } else {
+ guard_width = ALIGN(ctx->img_width + 16,
+ S5P_FIMV_NV12MT_HALIGN);
+ guard_height = ALIGN((ctx->img_height >> 1) + 4,
+ S5P_FIMV_NV12MT_VALIGN);
+ enc_ref_c_size = ALIGN(guard_width * guard_height,
+ S5P_FIMV_NV12MT_SALIGN);
+ }
+ mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2);
+ switch (ctx->codec_mode) {
+ case S5P_FIMV_CODEC_H264_ENC:
+ for (i = 0; i < 2; i++) {
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+ buf_addr1 += enc_ref_y_size;
+ buf_size1 -= enc_ref_y_size;
+
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_y_size;
+ buf_size2 -= enc_ref_y_size;
+ }
+ for (i = 0; i < 4; i++) {
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_c_size;
+ buf_size2 -= enc_ref_c_size;
+ }
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_MV_ADR);
+ buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_COZERO_FLAG_ADR);
+ buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_UP_INTRA_MD_ADR);
+ buf_addr1 += S5P_FIMV_ENC_INTRAMD_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_INTRAMD_SIZE;
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_H264_UP_INTRA_PRED_ADR);
+ buf_addr2 += S5P_FIMV_ENC_INTRAPRED_SIZE;
+ buf_size2 -= S5P_FIMV_ENC_INTRAPRED_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_H264_NBOR_INFO_ADR);
+ buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE;
+ mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+ buf_size1, buf_size2);
+ break;
+ case S5P_FIMV_CODEC_MPEG4_ENC:
+ for (i = 0; i < 2; i++) {
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+ buf_addr1 += enc_ref_y_size;
+ buf_size1 -= enc_ref_y_size;
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_y_size;
+ buf_size2 -= enc_ref_y_size;
+ }
+ for (i = 0; i < 4; i++) {
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_c_size;
+ buf_size2 -= enc_ref_c_size;
+ }
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_MV_ADR);
+ buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_MPEG4_COZERO_FLAG_ADR);
+ buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_MPEG4_ACDC_COEF_ADR);
+ buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+ buf_size1, buf_size2);
+ break;
+ case S5P_FIMV_CODEC_H263_ENC:
+ for (i = 0; i < 2; i++) {
+ mfc_write(dev, OFFSETA(buf_addr1),
+ S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
+ buf_addr1 += enc_ref_y_size;
+ buf_size1 -= enc_ref_y_size;
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_y_size;
+ buf_size2 -= enc_ref_y_size;
+ }
+ for (i = 0; i < 4; i++) {
+ mfc_write(dev, OFFSETB(buf_addr2),
+ S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
+ buf_addr2 += enc_ref_c_size;
+ buf_size2 -= enc_ref_c_size;
+ }
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_MV_ADR);
+ buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
+ mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_ACDC_COEF_ADR);
+ buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
+ mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
+ buf_size1, buf_size2);
+ break;
+ default:
+ mfc_err("Unknown codec set for encoding: %d\n",
+ ctx->codec_mode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ unsigned int reg;
+ unsigned int shm;
+
+ /* width */
+ mfc_write(dev, ctx->img_width, S5P_FIMV_ENC_HSIZE_PX);
+ /* height */
+ mfc_write(dev, ctx->img_height, S5P_FIMV_ENC_VSIZE_PX);
+ /* pictype : enable, IDR period */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ reg |= (1 << 18);
+ reg &= ~(0xFFFF);
+ reg |= p->gop_size;
+ mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ mfc_write(dev, 0, S5P_FIMV_ENC_B_RECON_WRITE_ON);
+ /* multi-slice control */
+ /* multi-slice MB number or bit size */
+ mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL);
+ if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) {
+ mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB);
+ } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) {
+ mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT);
+ } else {
+ mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_MB);
+ mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_BIT);
+ }
+ /* cyclic intra refresh */
+ mfc_write(dev, p->intra_refresh_mb, S5P_FIMV_ENC_CIR_CTRL);
+ /* memory structure cur. frame */
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
+ mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
+ else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
+ mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
+ /* padding control & value */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PADDING_CTRL);
+ if (p->pad) {
+ /** enable */
+ reg |= (1 << 31);
+ /** cr value */
+ reg &= ~(0xFF << 16);
+ reg |= (p->pad_cr << 16);
+ /** cb value */
+ reg &= ~(0xFF << 8);
+ reg |= (p->pad_cb << 8);
+ /** y value */
+ reg &= ~(0xFF);
+ reg |= (p->pad_luma);
+ } else {
+ /** disable & all value clear */
+ reg = 0;
+ }
+ mfc_write(dev, reg, S5P_FIMV_ENC_PADDING_CTRL);
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /** frame-level rate control */
+ reg &= ~(0x1 << 9);
+ reg |= (p->rc_frame << 9);
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* bit rate */
+ if (p->rc_frame)
+ mfc_write(dev, p->rc_bitrate,
+ S5P_FIMV_ENC_RC_BIT_RATE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_BIT_RATE);
+ /* reaction coefficient */
+ if (p->rc_frame)
+ mfc_write(dev, p->rc_reaction_coeff, S5P_FIMV_ENC_RC_RPARA);
+ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+ /* seq header ctrl */
+ shm &= ~(0x1 << 3);
+ shm |= (p->seq_hdr_mode << 3);
+ /* frame skip mode */
+ shm &= ~(0x3 << 1);
+ shm |= (p->frame_skip_mode << 1);
+ s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+ /* fixed target bit */
+ s5p_mfc_write_shm(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG);
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
+ unsigned int reg;
+ unsigned int shm;
+
+ s5p_mfc_set_enc_params(ctx);
+ /* pictype : number of B */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* num_b_frame - 0 ~ 2 */
+ reg &= ~(0x3 << 16);
+ reg |= (p->num_b_frame << 16);
+ mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* profile & level */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
+ /* level */
+ reg &= ~(0xFF << 8);
+ reg |= (p_264->level << 8);
+ /* profile - 0 ~ 2 */
+ reg &= ~(0x3F);
+ reg |= p_264->profile;
+ mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
+ /* interlace */
+ mfc_write(dev, p->interlace, S5P_FIMV_ENC_PIC_STRUCT);
+ /* height */
+ if (p->interlace)
+ mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX);
+ /* loopfilter ctrl */
+ mfc_write(dev, p_264->loop_filter_mode, S5P_FIMV_ENC_LF_CTRL);
+ /* loopfilter alpha offset */
+ if (p_264->loop_filter_alpha < 0) {
+ reg = 0x10;
+ reg |= (0xFF - p_264->loop_filter_alpha) + 1;
+ } else {
+ reg = 0x00;
+ reg |= (p_264->loop_filter_alpha & 0xF);
+ }
+ mfc_write(dev, reg, S5P_FIMV_ENC_ALPHA_OFF);
+ /* loopfilter beta offset */
+ if (p_264->loop_filter_beta < 0) {
+ reg = 0x10;
+ reg |= (0xFF - p_264->loop_filter_beta) + 1;
+ } else {
+ reg = 0x00;
+ reg |= (p_264->loop_filter_beta & 0xF);
+ }
+ mfc_write(dev, reg, S5P_FIMV_ENC_BETA_OFF);
+ /* entropy coding mode */
+ if (p_264->entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
+ mfc_write(dev, 1, S5P_FIMV_ENC_H264_ENTROPY_MODE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_H264_ENTROPY_MODE);
+ /* number of ref. picture */
+ reg = mfc_read(dev, S5P_FIMV_ENC_H264_NUM_OF_REF);
+ /* num of ref. pictures of P */
+ reg &= ~(0x3 << 5);
+ reg |= (p_264->num_ref_pic_4p << 5);
+ /* max number of ref. pictures */
+ reg &= ~(0x1F);
+ reg |= p_264->max_ref_pic;
+ mfc_write(dev, reg, S5P_FIMV_ENC_H264_NUM_OF_REF);
+ /* 8x8 transform enable */
+ mfc_write(dev, p_264->_8x8_transform, S5P_FIMV_ENC_H264_TRANS_FLAG);
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /* macroblock level rate control */
+ reg &= ~(0x1 << 8);
+ reg |= (p_264->rc_mb << 8);
+ /* frame QP */
+ reg &= ~(0x3F);
+ reg |= p_264->rc_frame_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* frame rate */
+ if (p->rc_frame && p->rc_framerate_denom)
+ mfc_write(dev, p->rc_framerate_num * 1000
+ / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+ /* max & min value of QP */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+ /* max QP */
+ reg &= ~(0x3F << 8);
+ reg |= (p_264->rc_max_qp << 8);
+ /* min QP */
+ reg &= ~(0x3F);
+ reg |= p_264->rc_min_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+ /* macroblock adaptive scaling features */
+ if (p_264->rc_mb) {
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_MB_CTRL);
+ /* dark region */
+ reg &= ~(0x1 << 3);
+ reg |= (p_264->rc_mb_dark << 3);
+ /* smooth region */
+ reg &= ~(0x1 << 2);
+ reg |= (p_264->rc_mb_smooth << 2);
+ /* static region */
+ reg &= ~(0x1 << 1);
+ reg |= (p_264->rc_mb_static << 1);
+ /* high activity region */
+ reg &= ~(0x1);
+ reg |= p_264->rc_mb_activity;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_MB_CTRL);
+ }
+ if (!p->rc_frame &&
+ !p_264->rc_mb) {
+ shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF);
+ shm |= ((p_264->rc_b_frame_qp & 0x3F) << 6);
+ shm |= (p_264->rc_p_frame_qp & 0x3F);
+ s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
+ }
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+ /* AR VUI control */
+ shm &= ~(0x1 << 15);
+ shm |= (p_264->vui_sar << 1);
+ s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+ if (p_264->vui_sar) {
+ /* aspect ration IDC */
+ shm = s5p_mfc_read_shm(ctx, SAMPLE_ASPECT_RATIO_IDC);
+ shm &= ~(0xFF);
+ shm |= p_264->vui_sar_idc;
+ s5p_mfc_write_shm(ctx, shm, SAMPLE_ASPECT_RATIO_IDC);
+ if (p_264->vui_sar_idc == 0xFF) {
+ /* sample AR info */
+ shm = s5p_mfc_read_shm(ctx, EXTENDED_SAR);
+ shm &= ~(0xFFFFFFFF);
+ shm |= p_264->vui_ext_sar_width << 16;
+ shm |= p_264->vui_ext_sar_height;
+ s5p_mfc_write_shm(ctx, shm, EXTENDED_SAR);
+ }
+ }
+ /* intra picture period for H.264 */
+ shm = s5p_mfc_read_shm(ctx, H264_I_PERIOD);
+ /* control */
+ shm &= ~(0x1 << 16);
+ shm |= (p_264->open_gop << 16);
+ /* value */
+ if (p_264->open_gop) {
+ shm &= ~(0xFFFF);
+ shm |= p_264->open_gop_size;
+ }
+ s5p_mfc_write_shm(ctx, shm, H264_I_PERIOD);
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ shm &= ~(0xFFFF << 16);
+ shm |= (p_264->cpb_size << 16);
+ }
+ s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
+ unsigned int reg;
+ unsigned int shm;
+ unsigned int framerate;
+
+ s5p_mfc_set_enc_params(ctx);
+ /* pictype : number of B */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* num_b_frame - 0 ~ 2 */
+ reg &= ~(0x3 << 16);
+ reg |= (p->num_b_frame << 16);
+ mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL);
+ /* profile & level */
+ reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
+ /* level */
+ reg &= ~(0xFF << 8);
+ reg |= (p_mpeg4->level << 8);
+ /* profile - 0 ~ 2 */
+ reg &= ~(0x3F);
+ reg |= p_mpeg4->profile;
+ mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
+ /* quarter_pixel */
+ mfc_write(dev, p_mpeg4->quarter_pixel, S5P_FIMV_ENC_MPEG4_QUART_PXL);
+ /* qp */
+ if (!p->rc_frame) {
+ shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF);
+ shm |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 6);
+ shm |= (p_mpeg4->rc_p_frame_qp & 0x3F);
+ s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
+ }
+ /* frame rate */
+ if (p->rc_frame) {
+ if (p->rc_framerate_denom > 0) {
+ framerate = p->rc_framerate_num * 1000 /
+ p->rc_framerate_denom;
+ mfc_write(dev, framerate,
+ S5P_FIMV_ENC_RC_FRAME_RATE);
+ shm = s5p_mfc_read_shm(ctx, RC_VOP_TIMING);
+ shm &= ~(0xFFFFFFFF);
+ shm |= (1 << 31);
+ shm |= ((p->rc_framerate_num & 0x7FFF) << 16);
+ shm |= (p->rc_framerate_denom & 0xFFFF);
+ s5p_mfc_write_shm(ctx, shm, RC_VOP_TIMING);
+ }
+ } else {
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+ }
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /* frame QP */
+ reg &= ~(0x3F);
+ reg |= p_mpeg4->rc_frame_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* max & min value of QP */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+ /* max QP */
+ reg &= ~(0x3F << 8);
+ reg |= (p_mpeg4->rc_max_qp << 8);
+ /* min QP */
+ reg &= ~(0x3F);
+ reg |= p_mpeg4->rc_min_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ shm &= ~(0xFFFF << 16);
+ shm |= (p->vbv_size << 16);
+ }
+ s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+ return 0;
+}
+
+static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
+ unsigned int reg;
+ unsigned int shm;
+
+ s5p_mfc_set_enc_params(ctx);
+ /* qp */
+ if (!p->rc_frame) {
+ shm = s5p_mfc_read_shm(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF);
+ shm |= (p_h263->rc_p_frame_qp & 0x3F);
+ s5p_mfc_write_shm(ctx, shm, P_B_FRAME_QP);
+ }
+ /* frame rate */
+ if (p->rc_frame && p->rc_framerate_denom)
+ mfc_write(dev, p->rc_framerate_num * 1000
+ / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE);
+ /* rate control config. */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
+ /* frame QP */
+ reg &= ~(0x3F);
+ reg |= p_h263->rc_frame_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
+ /* max & min value of QP */
+ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
+ /* max QP */
+ reg &= ~(0x3F << 8);
+ reg |= (p_h263->rc_max_qp << 8);
+ /* min QP */
+ reg &= ~(0x3F);
+ reg |= p_h263->rc_min_qp;
+ mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
+ /* extended encoder ctrl */
+ shm = s5p_mfc_read_shm(ctx, EXT_ENC_CONTROL);
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ shm &= ~(0xFFFF << 16);
+ shm |= (p->vbv_size << 16);
+ }
+ s5p_mfc_write_shm(ctx, shm, EXT_ENC_CONTROL);
+ return 0;
+}
+
+/* Initialize decoding */
+int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ s5p_mfc_set_shared_buffer(ctx);
+ /* Setup loop filter, for decoding this is only valid for MPEG4 */
+ if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_DEC)
+ mfc_write(dev, ctx->loop_filter_mpeg4, S5P_FIMV_ENC_LF_CTRL);
+ else
+ mfc_write(dev, 0, S5P_FIMV_ENC_LF_CTRL);
+ mfc_write(dev, ((ctx->slice_interface & S5P_FIMV_SLICE_INT_MASK) <<
+ S5P_FIMV_SLICE_INT_SHIFT) | (ctx->display_delay_enable <<
+ S5P_FIMV_DDELAY_ENA_SHIFT) | ((ctx->display_delay &
+ S5P_FIMV_DDELAY_VAL_MASK) << S5P_FIMV_DDELAY_VAL_SHIFT),
+ S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+ mfc_write(dev,
+ ((S5P_FIMV_CH_SEQ_HEADER & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
+ | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ return 0;
+}
+
+static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned int dpb;
+
+ if (flush)
+ dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (
+ S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
+ else
+ dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) &
+ ~(S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT);
+ mfc_write(dev, dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL);
+}
+
+/* Decode a single frame */
+int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx,
+ enum s5p_mfc_decode_arg last_frame)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ mfc_write(dev, ctx->dec_dst_flag, S5P_FIMV_SI_CH0_RELEASE_BUF);
+ s5p_mfc_set_shared_buffer(ctx);
+ s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
+ /* Issue different commands to instance basing on whether it
+ * is the last frame or not. */
+ switch (last_frame) {
+ case MFC_DEC_FRAME:
+ mfc_write(dev, ((S5P_FIMV_CH_FRAME_START & S5P_FIMV_CH_MASK) <<
+ S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ break;
+ case MFC_DEC_LAST_FRAME:
+ mfc_write(dev, ((S5P_FIMV_CH_LAST_FRAME & S5P_FIMV_CH_MASK) <<
+ S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ break;
+ case MFC_DEC_RES_CHANGE:
+ mfc_write(dev, ((S5P_FIMV_CH_FRAME_START_REALLOC &
+ S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
+ S5P_FIMV_SI_CH0_INST_ID);
+ break;
+ }
+ mfc_debug(2, "Decoding a usual frame\n");
+ return 0;
+}
+
+int s5p_mfc_init_encode(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ if (ctx->codec_mode == S5P_FIMV_CODEC_H264_ENC)
+ s5p_mfc_set_enc_params_h264(ctx);
+ else if (ctx->codec_mode == S5P_FIMV_CODEC_MPEG4_ENC)
+ s5p_mfc_set_enc_params_mpeg4(ctx);
+ else if (ctx->codec_mode == S5P_FIMV_CODEC_H263_ENC)
+ s5p_mfc_set_enc_params_h263(ctx);
+ else {
+ mfc_err("Unknown codec for encoding (%x)\n",
+ ctx->codec_mode);
+ return -EINVAL;
+ }
+ s5p_mfc_set_shared_buffer(ctx);
+ mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER << 16) & 0x70000) |
+ (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ return 0;
+}
+
+/* Encode a single frame */
+int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ /* memory structure cur. frame */
+ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
+ mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR);
+ else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
+ mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR);
+ s5p_mfc_set_shared_buffer(ctx);
+ mfc_write(dev, (S5P_FIMV_CH_FRAME_START << 16 & 0x70000) |
+ (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
+ return 0;
+}
+
+static int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
+{
+ unsigned long flags;
+ int new_ctx;
+ int cnt;
+
+ spin_lock_irqsave(&dev->condlock, flags);
+ new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS;
+ cnt = 0;
+ while (!test_bit(new_ctx, &dev->ctx_work_bits)) {
+ new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS;
+ if (++cnt > MFC_NUM_CONTEXTS) {
+ /* No contexts to run */
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ return -EAGAIN;
+ }
+ }
+ spin_unlock_irqrestore(&dev->condlock, flags);
+ return new_ctx;
+}
+
+static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+
+ s5p_mfc_set_dec_stream_buffer(ctx, 0, 0, 0);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_decode_one_frame(ctx, MFC_DEC_RES_CHANGE);
+}
+
+static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_buf *temp_vb;
+ unsigned long flags;
+ unsigned int index;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ /* Frames are being decoded */
+ if (list_empty(&ctx->src_queue)) {
+ mfc_debug(2, "No src buffers\n");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return -EAGAIN;
+ }
+ /* Get the next source buffer */
+ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ temp_vb->used = 1;
+ s5p_mfc_set_dec_stream_buffer(ctx,
+ vb2_dma_contig_plane_paddr(temp_vb->b, 0), ctx->consumed_stream,
+ temp_vb->b->v4l2_planes[0].bytesused);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ index = temp_vb->b->v4l2_buf.index;
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
+ last_frame = MFC_DEC_LAST_FRAME;
+ mfc_debug(2, "Setting ctx->state to FINISHING\n");
+ ctx->state = MFCINST_FINISHING;
+ }
+ s5p_mfc_decode_one_frame(ctx, last_frame);
+ return 0;
+}
+
+static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *dst_mb;
+ struct s5p_mfc_buf *src_mb;
+ unsigned long src_y_addr, src_c_addr, dst_addr;
+ unsigned int dst_size;
+
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (list_empty(&ctx->src_queue)) {
+ mfc_debug(2, "no src buffers\n");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return -EAGAIN;
+ }
+ if (list_empty(&ctx->dst_queue)) {
+ mfc_debug(2, "no dst buffers\n");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return -EAGAIN;
+ }
+ src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ src_mb->used = 1;
+ src_y_addr = vb2_dma_contig_plane_paddr(src_mb->b, 0);
+ src_c_addr = vb2_dma_contig_plane_paddr(src_mb->b, 1);
+ s5p_mfc_set_enc_frame_buffer(ctx, src_y_addr, src_c_addr);
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_mb->used = 1;
+ dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_size = vb2_plane_size(dst_mb->b, 0);
+ s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_encode_one_frame(ctx);
+ return 0;
+}
+
+static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *temp_vb;
+
+ /* Initializing decoding - parsing header */
+ spin_lock_irqsave(&dev->irqlock, flags);
+ mfc_debug(2, "Preparing to init decoding\n");
+ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ s5p_mfc_set_dec_desc_buffer(ctx);
+ mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+ s5p_mfc_set_dec_stream_buffer(ctx,
+ vb2_dma_contig_plane_paddr(temp_vb->b, 0),
+ 0, temp_vb->b->v4l2_planes[0].bytesused);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_init_decode(ctx);
+}
+
+static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *dst_mb;
+ unsigned long dst_addr;
+ unsigned int dst_size;
+
+ s5p_mfc_set_enc_ref_buffer(ctx);
+ spin_lock_irqsave(&dev->irqlock, flags);
+ dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
+ dst_addr = vb2_dma_contig_plane_paddr(dst_mb->b, 0);
+ dst_size = vb2_plane_size(dst_mb->b, 0);
+ s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ s5p_mfc_init_encode(ctx);
+}
+
+static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ unsigned long flags;
+ struct s5p_mfc_buf *temp_vb;
+ int ret;
+
+ /*
+ * Header was parsed now starting processing
+ * First set the output frame buffers
+ */
+ if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
+ mfc_err("It seems that not all destionation buffers were "
+ "mmaped\nMFC requires that all destination are mmaped "
+ "before starting processing\n");
+ return -EAGAIN;
+ }
+ spin_lock_irqsave(&dev->irqlock, flags);
+ if (list_empty(&ctx->src_queue)) {
+ mfc_err("Header has been deallocated in the middle of"
+ " initialization\n");
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ return -EIO;
+ }
+ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
+ mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
+ s5p_mfc_set_dec_stream_buffer(ctx,
+ vb2_dma_contig_plane_paddr(temp_vb->b, 0),
+ 0, temp_vb->b->v4l2_planes[0].bytesused);
+ spin_unlock_irqrestore(&dev->irqlock, flags);
+ dev->curr_ctx = ctx->num;
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ ret = s5p_mfc_set_dec_frame_buffer(ctx);
+ if (ret) {
+ mfc_err("Failed to alloc frame mem\n");
+ ctx->state = MFCINST_ERROR;
+ }
+ return ret;
+}
+
+/* Try running an operation on hardware */
+void s5p_mfc_try_run(struct s5p_mfc_dev *dev)
+{
+ struct s5p_mfc_ctx *ctx;
+ int new_ctx;
+ unsigned int ret = 0;
+
+ if (test_bit(0, &dev->enter_suspend)) {
+ mfc_debug(1, "Entering suspend so do not schedule any jobs\n");
+ return;
+ }
+ /* Check whether hardware is not running */
+ if (test_and_set_bit(0, &dev->hw_lock) != 0) {
+ /* This is perfectly ok, the scheduled ctx should wait */
+ mfc_debug(1, "Couldn't lock HW\n");
+ return;
+ }
+ /* Choose the context to run */
+ new_ctx = s5p_mfc_get_new_ctx(dev);
+ if (new_ctx < 0) {
+ /* No contexts to run */
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0) {
+ mfc_err("Failed to unlock hardware\n");
+ return;
+ }
+ mfc_debug(1, "No ctx is scheduled to be run\n");
+ return;
+ }
+ ctx = dev->ctx[new_ctx];
+ /* Got context to run in ctx */
+ /*
+ * Last frame has already been sent to MFC.
+ * Now obtaining frames from MFC buffer
+ */
+ s5p_mfc_clock_on();
+ if (ctx->type == MFCINST_DECODER) {
+ s5p_mfc_set_dec_desc_buffer(ctx);
+ switch (ctx->state) {
+ case MFCINST_FINISHING:
+ s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME);
+ break;
+ case MFCINST_RUNNING:
+ ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
+ break;
+ case MFCINST_INIT:
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ ret = s5p_mfc_open_inst_cmd(ctx);
+ break;
+ case MFCINST_RETURN_INST:
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ ret = s5p_mfc_close_inst_cmd(ctx);
+ break;
+ case MFCINST_GOT_INST:
+ s5p_mfc_run_init_dec(ctx);
+ break;
+ case MFCINST_HEAD_PARSED:
+ ret = s5p_mfc_run_init_dec_buffers(ctx);
+ mfc_debug(1, "head parsed\n");
+ break;
+ case MFCINST_RES_CHANGE_INIT:
+ s5p_mfc_run_res_change(ctx);
+ break;
+ case MFCINST_RES_CHANGE_FLUSH:
+ s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
+ break;
+ case MFCINST_RES_CHANGE_END:
+ mfc_debug(2, "Finished remaining frames after resolution change\n");
+ ctx->capture_state = QUEUE_FREE;
+ mfc_debug(2, "Will re-init the codec\n");
+ s5p_mfc_run_init_dec(ctx);
+ break;
+ default:
+ ret = -EAGAIN;
+ }
+ } else if (ctx->type == MFCINST_ENCODER) {
+ switch (ctx->state) {
+ case MFCINST_FINISHING:
+ case MFCINST_RUNNING:
+ ret = s5p_mfc_run_enc_frame(ctx);
+ break;
+ case MFCINST_INIT:
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ ret = s5p_mfc_open_inst_cmd(ctx);
+ break;
+ case MFCINST_RETURN_INST:
+ s5p_mfc_clean_ctx_int_flags(ctx);
+ ret = s5p_mfc_close_inst_cmd(ctx);
+ break;
+ case MFCINST_GOT_INST:
+ s5p_mfc_run_init_enc(ctx);
+ break;
+ default:
+ ret = -EAGAIN;
+ }
+ } else {
+ mfc_err("Invalid context type: %d\n", ctx->type);
+ ret = -EAGAIN;
+ }
+
+ if (ret) {
+ /* Free hardware lock */
+ if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+ mfc_err("Failed to unlock hardware\n");
+
+ /* This is in deed imporant, as no operation has been
+ * scheduled, reduce the clock count as no one will
+ * ever do this, because no interrupt related to this try_run
+ * will ever come from hardware. */
+ s5p_mfc_clock_off();
+ }
+}
+
+
+void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq)
+{
+ struct s5p_mfc_buf *b;
+ int i;
+
+ while (!list_empty(lh)) {
+ b = list_entry(lh->next, struct s5p_mfc_buf, list);
+ for (i = 0; i < b->b->num_planes; i++)
+ vb2_set_plane_payload(b->b, i, 0);
+ vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR);
+ list_del(&b->list);
+ }
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.h b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
new file mode 100644
index 00000000000..db83836e6a9
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
@@ -0,0 +1,91 @@
+/*
+ * drivers/media/video/samsung/mfc5/s5p_mfc_opr.h
+ *
+ * Header file for Samsung MFC (Multi Function Codec - FIMV) driver
+ * Contains declarations of hw related functions.
+ *
+ * Kamil Debski, Copyright (C) 2011 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef S5P_MFC_OPR_H_
+#define S5P_MFC_OPR_H_
+
+#include "s5p_mfc_common.h"
+
+int s5p_mfc_init_decode(struct s5p_mfc_ctx *ctx);
+int s5p_mfc_init_encode(struct s5p_mfc_ctx *mfc_ctx);
+
+/* Decoding functions */
+int s5p_mfc_set_dec_frame_buffer(struct s5p_mfc_ctx *ctx);
+int s5p_mfc_set_dec_stream_buffer(struct s5p_mfc_ctx *ctx, int buf_addr,
+ unsigned int start_num_byte,
+ unsigned int buf_size);
+
+/* Encoding functions */
+void s5p_mfc_set_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long y_addr, unsigned long c_addr);
+int s5p_mfc_set_enc_stream_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long addr, unsigned int size);
+void s5p_mfc_get_enc_frame_buffer(struct s5p_mfc_ctx *ctx,
+ unsigned long *y_addr, unsigned long *c_addr);
+int s5p_mfc_set_enc_ref_buffer(struct s5p_mfc_ctx *mfc_ctx);
+
+int s5p_mfc_decode_one_frame(struct s5p_mfc_ctx *ctx,
+ enum s5p_mfc_decode_arg last_frame);
+int s5p_mfc_encode_one_frame(struct s5p_mfc_ctx *mfc_ctx);
+
+/* Memory allocation */
+int s5p_mfc_alloc_dec_temp_buffers(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_release_dec_desc_buffer(struct s5p_mfc_ctx *ctx);
+
+int s5p_mfc_alloc_codec_buffers(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_release_codec_buffers(struct s5p_mfc_ctx *ctx);
+
+int s5p_mfc_alloc_instance_buffer(struct s5p_mfc_ctx *ctx);
+void s5p_mfc_release_instance_buffer(struct s5p_mfc_ctx *ctx);
+
+void s5p_mfc_try_run(struct s5p_mfc_dev *dev);
+void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
+
+#define s5p_mfc_get_dspl_y_adr() (readl(dev->regs_base + \
+ S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+ MFC_OFFSET_SHIFT)
+#define s5p_mfc_get_dec_y_adr() (readl(dev->regs_base + \
+ S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+ MFC_OFFSET_SHIFT)
+#define s5p_mfc_get_dspl_status() readl(dev->regs_base + \
+ S5P_FIMV_SI_DISPLAY_STATUS)
+#define s5p_mfc_get_frame_type() (readl(dev->regs_base + \
+ S5P_FIMV_DECODE_FRAME_TYPE) \
+ & S5P_FIMV_DECODE_FRAME_MASK)
+#define s5p_mfc_get_consumed_stream() readl(dev->regs_base + \
+ S5P_FIMV_SI_CONSUMED_BYTES)
+#define s5p_mfc_get_int_reason() (readl(dev->regs_base + \
+ S5P_FIMV_RISC2HOST_CMD) & \
+ S5P_FIMV_RISC2HOST_CMD_MASK)
+#define s5p_mfc_get_int_err() readl(dev->regs_base + \
+ S5P_FIMV_RISC2HOST_ARG2)
+#define s5p_mfc_err_dec(x) (((x) & S5P_FIMV_ERR_DEC_MASK) >> \
+ S5P_FIMV_ERR_DEC_SHIFT)
+#define s5p_mfc_err_dspl(x) (((x) & S5P_FIMV_ERR_DSPL_MASK) >> \
+ S5P_FIMV_ERR_DSPL_SHIFT)
+#define s5p_mfc_get_img_width() readl(dev->regs_base + \
+ S5P_FIMV_SI_HRESOL)
+#define s5p_mfc_get_img_height() readl(dev->regs_base + \
+ S5P_FIMV_SI_VRESOL)
+#define s5p_mfc_get_dpb_count() readl(dev->regs_base + \
+ S5P_FIMV_SI_BUF_NUMBER)
+#define s5p_mfc_get_inst_no() readl(dev->regs_base + \
+ S5P_FIMV_RISC2HOST_ARG1)
+#define s5p_mfc_get_enc_strm_size() readl(dev->regs_base + \
+ S5P_FIMV_ENC_SI_STRM_SIZE)
+#define s5p_mfc_get_enc_slice_type() readl(dev->regs_base + \
+ S5P_FIMV_ENC_SI_SLICE_TYPE)
+
+#endif /* S5P_MFC_OPR_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_pm.c b/drivers/media/video/s5p-mfc/s5p_mfc_pm.c
new file mode 100644
index 00000000000..f6a3035c4fb
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_pm.c
@@ -0,0 +1,117 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_pm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+#include "s5p_mfc_pm.h"
+
+#define MFC_CLKNAME "sclk_mfc"
+#define MFC_GATE_CLK_NAME "mfc"
+
+#define CLK_DEBUG
+
+static struct s5p_mfc_pm *pm;
+static struct s5p_mfc_dev *p_dev;
+
+#ifdef CLK_DEBUG
+atomic_t clk_ref;
+#endif
+
+int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
+{
+ int ret = 0;
+
+ pm = &dev->pm;
+ p_dev = dev;
+ pm->clock_gate = clk_get(&dev->plat_dev->dev, MFC_GATE_CLK_NAME);
+ if (IS_ERR(pm->clock_gate)) {
+ mfc_err("Failed to get clock-gating control\n");
+ ret = -ENOENT;
+ goto err_g_ip_clk;
+ }
+ pm->clock = clk_get(&dev->plat_dev->dev, MFC_CLKNAME);
+ if (IS_ERR(pm->clock)) {
+ mfc_err("Failed to get MFC clock\n");
+ ret = -ENOENT;
+ goto err_g_ip_clk_2;
+ }
+ atomic_set(&pm->power, 0);
+#ifdef CONFIG_PM_RUNTIME
+ pm->device = &dev->plat_dev->dev;
+ pm_runtime_enable(pm->device);
+#endif
+#ifdef CLK_DEBUG
+ atomic_set(&clk_ref, 0);
+#endif
+ return 0;
+err_g_ip_clk_2:
+ clk_put(pm->clock_gate);
+err_g_ip_clk:
+ return ret;
+}
+
+void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
+{
+ clk_put(pm->clock_gate);
+ clk_put(pm->clock);
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_disable(pm->device);
+#endif
+}
+
+int s5p_mfc_clock_on(void)
+{
+ int ret;
+#ifdef CLK_DEBUG
+ atomic_inc(&clk_ref);
+ mfc_debug(3, "+ %d", atomic_read(&clk_ref));
+#endif
+ ret = clk_enable(pm->clock_gate);
+ return ret;
+}
+
+void s5p_mfc_clock_off(void)
+{
+#ifdef CLK_DEBUG
+ atomic_dec(&clk_ref);
+ mfc_debug(3, "- %d", atomic_read(&clk_ref));
+#endif
+ clk_disable(pm->clock_gate);
+}
+
+int s5p_mfc_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ return pm_runtime_get_sync(pm->device);
+#else
+ atomic_set(&pm->power, 1);
+ return 0;
+#endif
+}
+
+int s5p_mfc_power_off(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ return pm_runtime_put_sync(pm->device);
+#else
+ atomic_set(&pm->power, 0);
+ return 0;
+#endif
+}
+
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_pm.h b/drivers/media/video/s5p-mfc/s5p_mfc_pm.h
new file mode 100644
index 00000000000..5107914f27e
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_pm.h
@@ -0,0 +1,24 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_pm.h
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_PM_H_
+#define S5P_MFC_PM_H_
+
+int s5p_mfc_init_pm(struct s5p_mfc_dev *dev);
+void s5p_mfc_final_pm(struct s5p_mfc_dev *dev);
+
+int s5p_mfc_clock_on(void);
+void s5p_mfc_clock_off(void);
+int s5p_mfc_power_on(void);
+int s5p_mfc_power_off(void);
+
+#endif /* S5P_MFC_PM_H_ */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_shm.c b/drivers/media/video/s5p-mfc/s5p_mfc_shm.c
new file mode 100644
index 00000000000..91fdbac8c37
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_shm.c
@@ -0,0 +1,47 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_shm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifdef CONFIG_ARCH_EXYNOS4
+#include <linux/dma-mapping.h>
+#endif
+#include <linux/io.h>
+#include "s5p_mfc_common.h"
+#include "s5p_mfc_debug.h"
+
+int s5p_mfc_init_shm(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ void *shm_alloc_ctx = dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
+
+ ctx->shm_alloc = vb2_dma_contig_memops.alloc(shm_alloc_ctx,
+ SHARED_BUF_SIZE);
+ if (IS_ERR(ctx->shm_alloc)) {
+ mfc_err("failed to allocate shared memory\n");
+ return PTR_ERR(ctx->shm_alloc);
+ }
+ /* shm_ofs only keeps the offset from base (port a) */
+ ctx->shm_ofs = s5p_mfc_mem_cookie(shm_alloc_ctx, ctx->shm_alloc)
+ - dev->bank1;
+ BUG_ON(ctx->shm_ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
+ ctx->shm = vb2_dma_contig_memops.vaddr(ctx->shm_alloc);
+ if (!ctx->shm) {
+ vb2_dma_contig_memops.put(ctx->shm_alloc);
+ ctx->shm_ofs = 0;
+ ctx->shm_alloc = NULL;
+ mfc_err("failed to virt addr of shared memory\n");
+ return -ENOMEM;
+ }
+ memset((void *)ctx->shm, 0, SHARED_BUF_SIZE);
+ wmb();
+ return 0;
+}
+
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_shm.h b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
new file mode 100644
index 00000000000..764eac6bcc4
--- /dev/null
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
@@ -0,0 +1,91 @@
+/*
+ * linux/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef S5P_MFC_SHM_H_
+#define S5P_MFC_SHM_H_
+
+enum MFC_SHM_OFS
+{
+ EXTENEDED_DECODE_STATUS = 0x00, /* D */
+ SET_FRAME_TAG = 0x04, /* D */
+ GET_FRAME_TAG_TOP = 0x08, /* D */
+ GET_FRAME_TAG_BOT = 0x0C, /* D */
+ PIC_TIME_TOP = 0x10, /* D */
+ PIC_TIME_BOT = 0x14, /* D */
+ START_BYTE_NUM = 0x18, /* D */
+
+ CROP_INFO_H = 0x20, /* D */
+ CROP_INFO_V = 0x24, /* D */
+ EXT_ENC_CONTROL = 0x28, /* E */
+ ENC_PARAM_CHANGE = 0x2C, /* E */
+ RC_VOP_TIMING = 0x30, /* E, MPEG4 */
+ HEC_PERIOD = 0x34, /* E, MPEG4 */
+ METADATA_ENABLE = 0x38, /* C */
+ METADATA_STATUS = 0x3C, /* C */
+ METADATA_DISPLAY_INDEX = 0x40, /* C */
+ EXT_METADATA_START_ADDR = 0x44, /* C */
+ PUT_EXTRADATA = 0x48, /* C */
+ EXTRADATA_ADDR = 0x4C, /* C */
+
+ ALLOC_LUMA_DPB_SIZE = 0x64, /* D */
+ ALLOC_CHROMA_DPB_SIZE = 0x68, /* D */
+ ALLOC_MV_SIZE = 0x6C, /* D */
+ P_B_FRAME_QP = 0x70, /* E */
+ SAMPLE_ASPECT_RATIO_IDC = 0x74, /* E, H.264, depend on
+ ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
+ EXTENDED_SAR = 0x78, /* E, H.264, depned on
+ ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
+ DISP_PIC_PROFILE = 0x7C, /* D */
+ FLUSH_CMD_TYPE = 0x80, /* C */
+ FLUSH_CMD_INBUF1 = 0x84, /* C */
+ FLUSH_CMD_INBUF2 = 0x88, /* C */
+ FLUSH_CMD_OUTBUF = 0x8C, /* E */
+ NEW_RC_BIT_RATE = 0x90, /* E, format as RC_BIT_RATE(0xC5A8)
+ depend on RC_BIT_RATE_CHANGE in ENC_PARAM_CHANGE */
+ NEW_RC_FRAME_RATE = 0x94, /* E, format as RC_FRAME_RATE(0xD0D0)
+ depend on RC_FRAME_RATE_CHANGE in ENC_PARAM_CHANGE */
+ NEW_I_PERIOD = 0x98, /* E, format as I_FRM_CTRL(0xC504)
+ depend on I_PERIOD_CHANGE in ENC_PARAM_CHANGE */
+ H264_I_PERIOD = 0x9C, /* E, H.264, open GOP */
+ RC_CONTROL_CONFIG = 0xA0, /* E */
+ BATCH_INPUT_ADDR = 0xA4, /* E */
+ BATCH_OUTPUT_ADDR = 0xA8, /* E */
+ BATCH_OUTPUT_SIZE = 0xAC, /* E */
+ MIN_LUMA_DPB_SIZE = 0xB0, /* D */
+ DEVICE_FORMAT_ID = 0xB4, /* C */
+ H264_POC_TYPE = 0xB8, /* D */
+ MIN_CHROMA_DPB_SIZE = 0xBC, /* D */
+ DISP_PIC_FRAME_TYPE = 0xC0, /* D */
+ FREE_LUMA_DPB = 0xC4, /* D, VC1 MPEG4 */
+ ASPECT_RATIO_INFO = 0xC8, /* D, MPEG4 */
+ EXTENDED_PAR = 0xCC, /* D, MPEG4 */
+ DBG_HISTORY_INPUT0 = 0xD0, /* C */
+ DBG_HISTORY_INPUT1 = 0xD4, /* C */
+ DBG_HISTORY_OUTPUT = 0xD8, /* C */
+ HIERARCHICAL_P_QP = 0xE0, /* E, H.264 */
+};
+
+int s5p_mfc_init_shm(struct s5p_mfc_ctx *ctx);
+
+#define s5p_mfc_write_shm(ctx, x, ofs) \
+ do { \
+ writel(x, (ctx->shm + ofs)); \
+ wmb(); \
+ } while (0)
+
+static inline u32 s5p_mfc_read_shm(struct s5p_mfc_ctx *ctx, unsigned int ofs)
+{
+ rmb();
+ return readl(ctx->shm + ofs);
+}
+
+#endif /* S5P_MFC_SHM_H_ */
diff --git a/drivers/media/video/s5p-tv/Kconfig b/drivers/media/video/s5p-tv/Kconfig
new file mode 100644
index 00000000000..9c37dee7bc5
--- /dev/null
+++ b/drivers/media/video/s5p-tv/Kconfig
@@ -0,0 +1,76 @@
+# drivers/media/video/s5p-tv/Kconfig
+#
+# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+# Tomasz Stanislawski <t.stanislaws@samsung.com>
+#
+# Licensed under GPL
+
+config VIDEO_SAMSUNG_S5P_TV
+ bool "Samsung TV driver for S5P platform (experimental)"
+ depends on PLAT_S5P
+ depends on EXPERIMENTAL
+ default n
+ ---help---
+ Say Y here to enable selecting the TV output devices for
+ Samsung S5P platform.
+
+if VIDEO_SAMSUNG_S5P_TV
+
+config VIDEO_SAMSUNG_S5P_HDMI
+ tristate "Samsung HDMI Driver"
+ depends on VIDEO_V4L2
+ depends on VIDEO_SAMSUNG_S5P_TV
+ select VIDEO_SAMSUNG_S5P_HDMIPHY
+ help
+ Say Y here if you want support for the HDMI output
+ interface in S5P Samsung SoC. The driver can be compiled
+ as module. It is an auxiliary driver, that exposes a V4L2
+ subdev for use by other drivers. This driver requires
+ hdmiphy driver to work correctly.
+
+config VIDEO_SAMSUNG_S5P_HDMI_DEBUG
+ bool "Enable debug for HDMI Driver"
+ depends on VIDEO_SAMSUNG_S5P_HDMI
+ default n
+ help
+ Enables debugging for HDMI driver.
+
+config VIDEO_SAMSUNG_S5P_HDMIPHY
+ tristate "Samsung HDMIPHY Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && I2C
+ depends on VIDEO_SAMSUNG_S5P_TV
+ help
+ Say Y here if you want support for the physical HDMI
+ interface in S5P Samsung SoC. The driver can be compiled
+ as module. It is an I2C driver, that exposes a V4L2
+ subdev for use by other drivers.
+
+config VIDEO_SAMSUNG_S5P_SDO
+ tristate "Samsung Analog TV Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_SAMSUNG_S5P_TV
+ help
+ Say Y here if you want support for the analog TV output
+ interface in S5P Samsung SoC. The driver can be compiled
+ as module. It is an auxiliary driver, that exposes a V4L2
+ subdev for use by other drivers. This driver requires
+ hdmiphy driver to work correctly.
+
+config VIDEO_SAMSUNG_S5P_MIXER
+ tristate "Samsung Mixer and Video Processor Driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_SAMSUNG_S5P_TV
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Say Y here if you want support for the Mixer in Samsung S5P SoCs.
+ This device produce image data to one of output interfaces.
+
+config VIDEO_SAMSUNG_S5P_MIXER_DEBUG
+ bool "Enable debug for Mixer Driver"
+ depends on VIDEO_SAMSUNG_S5P_MIXER
+ default n
+ help
+ Enables debugging for Mixer driver.
+
+endif # VIDEO_SAMSUNG_S5P_TV
diff --git a/drivers/media/video/s5p-tv/Makefile b/drivers/media/video/s5p-tv/Makefile
new file mode 100644
index 00000000000..37e4c17663b
--- /dev/null
+++ b/drivers/media/video/s5p-tv/Makefile
@@ -0,0 +1,17 @@
+# drivers/media/video/samsung/tvout/Makefile
+#
+# Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+# Tomasz Stanislawski <t.stanislaws@samsung.com>
+#
+# Licensed under GPL
+
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMIPHY) += s5p-hdmiphy.o
+s5p-hdmiphy-y += hdmiphy_drv.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_HDMI) += s5p-hdmi.o
+s5p-hdmi-y += hdmi_drv.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_SDO) += s5p-sdo.o
+s5p-sdo-y += sdo_drv.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MIXER) += s5p-mixer.o
+s5p-mixer-y += mixer_drv.o mixer_video.o mixer_reg.o mixer_grp_layer.o mixer_vp_layer.o
+
diff --git a/drivers/media/video/s5p-tv/hdmi_drv.c b/drivers/media/video/s5p-tv/hdmi_drv.c
new file mode 100644
index 00000000000..06d6663f459
--- /dev/null
+++ b/drivers/media/video/s5p-tv/hdmi_drv.c
@@ -0,0 +1,1042 @@
+/*
+ * Samsung HDMI interface driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#ifdef CONFIG_VIDEO_SAMSUNG_S5P_HDMI_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/bug.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+
+#include "regs-hdmi.h"
+
+MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung HDMI");
+MODULE_LICENSE("GPL");
+
+/* default preset configured on probe */
+#define HDMI_DEFAULT_PRESET V4L2_DV_1080P60
+
+struct hdmi_resources {
+ struct clk *hdmi;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_pixel;
+ struct clk *sclk_hdmiphy;
+ struct clk *hdmiphy;
+ struct regulator_bulk_data *regul_bulk;
+ int regul_count;
+};
+
+struct hdmi_device {
+ /** base address of HDMI registers */
+ void __iomem *regs;
+ /** HDMI interrupt */
+ unsigned int irq;
+ /** pointer to device parent */
+ struct device *dev;
+ /** subdev generated by HDMI device */
+ struct v4l2_subdev sd;
+ /** V4L2 device structure */
+ struct v4l2_device v4l2_dev;
+ /** subdev of HDMIPHY interface */
+ struct v4l2_subdev *phy_sd;
+ /** configuration of current graphic mode */
+ const struct hdmi_preset_conf *cur_conf;
+ /** current preset */
+ u32 cur_preset;
+ /** other resources */
+ struct hdmi_resources res;
+};
+
+struct hdmi_driver_data {
+ int hdmiphy_bus;
+};
+
+struct hdmi_tg_regs {
+ u8 cmd;
+ u8 h_fsz_l;
+ u8 h_fsz_h;
+ u8 hact_st_l;
+ u8 hact_st_h;
+ u8 hact_sz_l;
+ u8 hact_sz_h;
+ u8 v_fsz_l;
+ u8 v_fsz_h;
+ u8 vsync_l;
+ u8 vsync_h;
+ u8 vsync2_l;
+ u8 vsync2_h;
+ u8 vact_st_l;
+ u8 vact_st_h;
+ u8 vact_sz_l;
+ u8 vact_sz_h;
+ u8 field_chg_l;
+ u8 field_chg_h;
+ u8 vact_st2_l;
+ u8 vact_st2_h;
+ u8 vsync_top_hdmi_l;
+ u8 vsync_top_hdmi_h;
+ u8 vsync_bot_hdmi_l;
+ u8 vsync_bot_hdmi_h;
+ u8 field_top_hdmi_l;
+ u8 field_top_hdmi_h;
+ u8 field_bot_hdmi_l;
+ u8 field_bot_hdmi_h;
+};
+
+struct hdmi_core_regs {
+ u8 h_blank[2];
+ u8 v_blank[3];
+ u8 h_v_line[3];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f[3];
+ u8 h_sync_gen[3];
+ u8 v_sync_gen1[3];
+ u8 v_sync_gen2[3];
+ u8 v_sync_gen3[3];
+};
+
+struct hdmi_preset_conf {
+ struct hdmi_core_regs core;
+ struct hdmi_tg_regs tg;
+ struct v4l2_mbus_framefmt mbus_fmt;
+};
+
+/* I2C module and id for HDMIPHY */
+static struct i2c_board_info hdmiphy_info = {
+ I2C_BOARD_INFO("hdmiphy", 0x38),
+};
+
+static struct hdmi_driver_data hdmi_driver_data[] = {
+ { .hdmiphy_bus = 3 },
+ { .hdmiphy_bus = 8 },
+};
+
+static struct platform_device_id hdmi_driver_types[] = {
+ {
+ .name = "s5pv210-hdmi",
+ .driver_data = (unsigned long)&hdmi_driver_data[0],
+ }, {
+ .name = "exynos4-hdmi",
+ .driver_data = (unsigned long)&hdmi_driver_data[1],
+ }, {
+ /* end node */
+ }
+};
+
+static const struct v4l2_subdev_ops hdmi_sd_ops;
+
+static struct hdmi_device *sd_to_hdmi_dev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct hdmi_device, sd);
+}
+
+static inline
+void hdmi_write(struct hdmi_device *hdev, u32 reg_id, u32 value)
+{
+ writel(value, hdev->regs + reg_id);
+}
+
+static inline
+void hdmi_write_mask(struct hdmi_device *hdev, u32 reg_id, u32 value, u32 mask)
+{
+ u32 old = readl(hdev->regs + reg_id);
+ value = (value & mask) | (old & ~mask);
+ writel(value, hdev->regs + reg_id);
+}
+
+static inline
+void hdmi_writeb(struct hdmi_device *hdev, u32 reg_id, u8 value)
+{
+ writeb(value, hdev->regs + reg_id);
+}
+
+static inline u32 hdmi_read(struct hdmi_device *hdev, u32 reg_id)
+{
+ return readl(hdev->regs + reg_id);
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *dev_data)
+{
+ struct hdmi_device *hdev = dev_data;
+ u32 intc_flag;
+
+ (void)irq;
+ intc_flag = hdmi_read(hdev, HDMI_INTC_FLAG);
+ /* clearing flags for HPD plug/unplug */
+ if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
+ printk(KERN_INFO "unplugged\n");
+ hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
+ HDMI_INTC_FLAG_HPD_UNPLUG);
+ }
+ if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
+ printk(KERN_INFO "plugged\n");
+ hdmi_write_mask(hdev, HDMI_INTC_FLAG, ~0,
+ HDMI_INTC_FLAG_HPD_PLUG);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void hdmi_reg_init(struct hdmi_device *hdev)
+{
+ /* enable HPD interrupts */
+ hdmi_write_mask(hdev, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
+ HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+ /* choose HDMI mode */
+ hdmi_write_mask(hdev, HDMI_MODE_SEL,
+ HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
+ /* disable bluescreen */
+ hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+ /* choose bluescreen (fecal) color */
+ hdmi_writeb(hdev, HDMI_BLUE_SCREEN_0, 0x12);
+ hdmi_writeb(hdev, HDMI_BLUE_SCREEN_1, 0x34);
+ hdmi_writeb(hdev, HDMI_BLUE_SCREEN_2, 0x56);
+ /* enable AVI packet every vsync, fixes purple line problem */
+ hdmi_writeb(hdev, HDMI_AVI_CON, 0x02);
+ /* force YUV444, look to CEA-861-D, table 7 for more detail */
+ hdmi_writeb(hdev, HDMI_AVI_BYTE(0), 2 << 5);
+ hdmi_write_mask(hdev, HDMI_CON_1, 2, 3 << 5);
+}
+
+static void hdmi_timing_apply(struct hdmi_device *hdev,
+ const struct hdmi_preset_conf *conf)
+{
+ const struct hdmi_core_regs *core = &conf->core;
+ const struct hdmi_tg_regs *tg = &conf->tg;
+
+ /* setting core registers */
+ hdmi_writeb(hdev, HDMI_H_BLANK_0, core->h_blank[0]);
+ hdmi_writeb(hdev, HDMI_H_BLANK_1, core->h_blank[1]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_0, core->v_blank[0]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_1, core->v_blank[1]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_2, core->v_blank[2]);
+ hdmi_writeb(hdev, HDMI_H_V_LINE_0, core->h_v_line[0]);
+ hdmi_writeb(hdev, HDMI_H_V_LINE_1, core->h_v_line[1]);
+ hdmi_writeb(hdev, HDMI_H_V_LINE_2, core->h_v_line[2]);
+ hdmi_writeb(hdev, HDMI_VSYNC_POL, core->vsync_pol[0]);
+ hdmi_writeb(hdev, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
+ hdmi_writeb(hdev, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
+ hdmi_writeb(hdev, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
+ hdmi_writeb(hdev, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
+ hdmi_writeb(hdev, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
+ hdmi_writeb(hdev, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ /* Timing generator registers */
+ hdmi_writeb(hdev, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
+ hdmi_writeb(hdev, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
+ hdmi_writeb(hdev, HDMI_TG_HACT_ST_L, tg->hact_st_l);
+ hdmi_writeb(hdev, HDMI_TG_HACT_ST_H, tg->hact_st_h);
+ hdmi_writeb(hdev, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
+ hdmi_writeb(hdev, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
+ hdmi_writeb(hdev, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
+ hdmi_writeb(hdev, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_L, tg->vsync_l);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_H, tg->vsync_h);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC2_L, tg->vsync2_l);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC2_H, tg->vsync2_h);
+ hdmi_writeb(hdev, HDMI_TG_VACT_ST_L, tg->vact_st_l);
+ hdmi_writeb(hdev, HDMI_TG_VACT_ST_H, tg->vact_st_h);
+ hdmi_writeb(hdev, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
+ hdmi_writeb(hdev, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
+ hdmi_writeb(hdev, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
+ hdmi_writeb(hdev, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
+ hdmi_writeb(hdev, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
+ hdmi_writeb(hdev, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+}
+
+static int hdmi_conf_apply(struct hdmi_device *hdmi_dev)
+{
+ struct device *dev = hdmi_dev->dev;
+ const struct hdmi_preset_conf *conf = hdmi_dev->cur_conf;
+ struct v4l2_dv_preset preset;
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* reset hdmiphy */
+ hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
+ mdelay(10);
+ hdmi_write_mask(hdmi_dev, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
+ mdelay(10);
+
+ /* configure presets */
+ preset.preset = hdmi_dev->cur_preset;
+ ret = v4l2_subdev_call(hdmi_dev->phy_sd, video, s_dv_preset, &preset);
+ if (ret) {
+ dev_err(dev, "failed to set preset (%u)\n", preset.preset);
+ return ret;
+ }
+
+ /* resetting HDMI core */
+ hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, 0, HDMI_CORE_SW_RSTOUT);
+ mdelay(10);
+ hdmi_write_mask(hdmi_dev, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
+ mdelay(10);
+
+ hdmi_reg_init(hdmi_dev);
+
+ /* setting core registers */
+ hdmi_timing_apply(hdmi_dev, conf);
+
+ return 0;
+}
+
+static void hdmi_dumpregs(struct hdmi_device *hdev, char *prefix)
+{
+#define DUMPREG(reg_id) \
+ dev_dbg(hdev->dev, "%s:" #reg_id " = %08x\n", prefix, \
+ readl(hdev->regs + reg_id))
+
+ dev_dbg(hdev->dev, "%s: ---- CONTROL REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_INTC_FLAG);
+ DUMPREG(HDMI_INTC_CON);
+ DUMPREG(HDMI_HPD_STATUS);
+ DUMPREG(HDMI_PHY_RSTOUT);
+ DUMPREG(HDMI_PHY_VPLL);
+ DUMPREG(HDMI_PHY_CMU);
+ DUMPREG(HDMI_CORE_RSTOUT);
+
+ dev_dbg(hdev->dev, "%s: ---- CORE REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_CON_0);
+ DUMPREG(HDMI_CON_1);
+ DUMPREG(HDMI_CON_2);
+ DUMPREG(HDMI_SYS_STATUS);
+ DUMPREG(HDMI_PHY_STATUS);
+ DUMPREG(HDMI_STATUS_EN);
+ DUMPREG(HDMI_HPD);
+ DUMPREG(HDMI_MODE_SEL);
+ DUMPREG(HDMI_HPD_GEN);
+ DUMPREG(HDMI_DC_CONTROL);
+ DUMPREG(HDMI_VIDEO_PATTERN_GEN);
+
+ dev_dbg(hdev->dev, "%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_H_BLANK_0);
+ DUMPREG(HDMI_H_BLANK_1);
+ DUMPREG(HDMI_V_BLANK_0);
+ DUMPREG(HDMI_V_BLANK_1);
+ DUMPREG(HDMI_V_BLANK_2);
+ DUMPREG(HDMI_H_V_LINE_0);
+ DUMPREG(HDMI_H_V_LINE_1);
+ DUMPREG(HDMI_H_V_LINE_2);
+ DUMPREG(HDMI_VSYNC_POL);
+ DUMPREG(HDMI_INT_PRO_MODE);
+ DUMPREG(HDMI_V_BLANK_F_0);
+ DUMPREG(HDMI_V_BLANK_F_1);
+ DUMPREG(HDMI_V_BLANK_F_2);
+ DUMPREG(HDMI_H_SYNC_GEN_0);
+ DUMPREG(HDMI_H_SYNC_GEN_1);
+ DUMPREG(HDMI_H_SYNC_GEN_2);
+ DUMPREG(HDMI_V_SYNC_GEN_1_0);
+ DUMPREG(HDMI_V_SYNC_GEN_1_1);
+ DUMPREG(HDMI_V_SYNC_GEN_1_2);
+ DUMPREG(HDMI_V_SYNC_GEN_2_0);
+ DUMPREG(HDMI_V_SYNC_GEN_2_1);
+ DUMPREG(HDMI_V_SYNC_GEN_2_2);
+ DUMPREG(HDMI_V_SYNC_GEN_3_0);
+ DUMPREG(HDMI_V_SYNC_GEN_3_1);
+ DUMPREG(HDMI_V_SYNC_GEN_3_2);
+
+ dev_dbg(hdev->dev, "%s: ---- TG REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_TG_CMD);
+ DUMPREG(HDMI_TG_H_FSZ_L);
+ DUMPREG(HDMI_TG_H_FSZ_H);
+ DUMPREG(HDMI_TG_HACT_ST_L);
+ DUMPREG(HDMI_TG_HACT_ST_H);
+ DUMPREG(HDMI_TG_HACT_SZ_L);
+ DUMPREG(HDMI_TG_HACT_SZ_H);
+ DUMPREG(HDMI_TG_V_FSZ_L);
+ DUMPREG(HDMI_TG_V_FSZ_H);
+ DUMPREG(HDMI_TG_VSYNC_L);
+ DUMPREG(HDMI_TG_VSYNC_H);
+ DUMPREG(HDMI_TG_VSYNC2_L);
+ DUMPREG(HDMI_TG_VSYNC2_H);
+ DUMPREG(HDMI_TG_VACT_ST_L);
+ DUMPREG(HDMI_TG_VACT_ST_H);
+ DUMPREG(HDMI_TG_VACT_SZ_L);
+ DUMPREG(HDMI_TG_VACT_SZ_H);
+ DUMPREG(HDMI_TG_FIELD_CHG_L);
+ DUMPREG(HDMI_TG_FIELD_CHG_H);
+ DUMPREG(HDMI_TG_VACT_ST2_L);
+ DUMPREG(HDMI_TG_VACT_ST2_H);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+#undef DUMPREG
+}
+
+static const struct hdmi_preset_conf hdmi_conf_480p = {
+ .core = {
+ .h_blank = {0x8a, 0x00},
+ .v_blank = {0x0d, 0x6a, 0x01},
+ .h_v_line = {0x0d, 0xa2, 0x35},
+ .vsync_pol = {0x01},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00},
+ .h_sync_gen = {0x0e, 0x30, 0x11},
+ .v_sync_gen1 = {0x0f, 0x90, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x5a, 0x03, /* h_fsz */
+ 0x8a, 0x00, 0xd0, 0x02, /* hact */
+ 0x0d, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0xe0, 0x01, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+ .mbus_fmt = {
+ .width = 720,
+ .height = 480,
+ .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
+ .field = V4L2_FIELD_NONE,
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_720p60 = {
+ .core = {
+ .h_blank = {0x72, 0x01},
+ .v_blank = {0xee, 0xf2, 0x00},
+ .h_v_line = {0xee, 0x22, 0x67},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x6c, 0x50, 0x02},
+ .v_sync_gen1 = {0x0a, 0x50, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x72, 0x06, /* h_fsz */
+ 0x72, 0x01, 0x00, 0x05, /* hact */
+ 0xee, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x1e, 0x00, 0xd0, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+ .mbus_fmt = {
+ .width = 1280,
+ .height = 720,
+ .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
+ .field = V4L2_FIELD_NONE,
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
+ .core = {
+ .h_blank = {0xd0, 0x02},
+ .v_blank = {0x65, 0x6c, 0x01},
+ .h_v_line = {0x65, 0x04, 0xa5},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x0e, 0xea, 0x08},
+ .v_sync_gen1 = {0x09, 0x40, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x18, 0x01, 0x80, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+ .mbus_fmt = {
+ .width = 1920,
+ .height = 1080,
+ .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
+ .field = V4L2_FIELD_NONE,
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v_blank = {0x65, 0x6c, 0x01},
+ .h_v_line = {0x65, 0x84, 0x89},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x56, 0x08, 0x02},
+ .v_sync_gen1 = {0x09, 0x40, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x18, 0x01, 0x80, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+ .mbus_fmt = {
+ .width = 1920,
+ .height = 1080,
+ .code = V4L2_MBUS_FMT_FIXED, /* means RGB888 */
+ .field = V4L2_FIELD_NONE,
+ },
+};
+
+static const struct {
+ u32 preset;
+ const struct hdmi_preset_conf *conf;
+} hdmi_conf[] = {
+ { V4L2_DV_480P59_94, &hdmi_conf_480p },
+ { V4L2_DV_720P59_94, &hdmi_conf_720p60 },
+ { V4L2_DV_1080P50, &hdmi_conf_1080p50 },
+ { V4L2_DV_1080P30, &hdmi_conf_1080p60 },
+ { V4L2_DV_1080P60, &hdmi_conf_1080p60 },
+};
+
+static const struct hdmi_preset_conf *hdmi_preset2conf(u32 preset)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_conf); ++i)
+ if (hdmi_conf[i].preset == preset)
+ return hdmi_conf[i].conf;
+ return NULL;
+}
+
+static int hdmi_streamon(struct hdmi_device *hdev)
+{
+ struct device *dev = hdev->dev;
+ struct hdmi_resources *res = &hdev->res;
+ int ret, tries;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = v4l2_subdev_call(hdev->phy_sd, video, s_stream, 1);
+ if (ret)
+ return ret;
+
+ /* waiting for HDMIPHY's PLL to get to steady state */
+ for (tries = 100; tries; --tries) {
+ u32 val = hdmi_read(hdev, HDMI_PHY_STATUS);
+ if (val & HDMI_PHY_STATUS_READY)
+ break;
+ mdelay(1);
+ }
+ /* steady state not achieved */
+ if (tries == 0) {
+ dev_err(dev, "hdmiphy's pll could not reach steady state.\n");
+ v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
+ hdmi_dumpregs(hdev, "s_stream");
+ return -EIO;
+ }
+
+ /* hdmiphy clock is used for HDMI in streaming mode */
+ clk_disable(res->sclk_hdmi);
+ clk_set_parent(res->sclk_hdmi, res->sclk_hdmiphy);
+ clk_enable(res->sclk_hdmi);
+
+ /* enable HDMI and timing generator */
+ hdmi_write_mask(hdev, HDMI_CON_0, ~0, HDMI_EN);
+ hdmi_write_mask(hdev, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+ hdmi_dumpregs(hdev, "streamon");
+ return 0;
+}
+
+static int hdmi_streamoff(struct hdmi_device *hdev)
+{
+ struct device *dev = hdev->dev;
+ struct hdmi_resources *res = &hdev->res;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ hdmi_write_mask(hdev, HDMI_CON_0, 0, HDMI_EN);
+ hdmi_write_mask(hdev, HDMI_TG_CMD, 0, HDMI_TG_EN);
+
+ /* pixel(vpll) clock is used for HDMI in config mode */
+ clk_disable(res->sclk_hdmi);
+ clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+ clk_enable(res->sclk_hdmi);
+
+ v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
+
+ hdmi_dumpregs(hdev, "streamoff");
+ return 0;
+}
+
+static int hdmi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+ struct device *dev = hdev->dev;
+
+ dev_dbg(dev, "%s(%d)\n", __func__, enable);
+ if (enable)
+ return hdmi_streamon(hdev);
+ return hdmi_streamoff(hdev);
+}
+
+static void hdmi_resource_poweron(struct hdmi_resources *res)
+{
+ /* turn HDMI power on */
+ regulator_bulk_enable(res->regul_count, res->regul_bulk);
+ /* power-on hdmi physical interface */
+ clk_enable(res->hdmiphy);
+ /* use VPP as parent clock; HDMIPHY is not working yet */
+ clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+ /* turn clocks on */
+ clk_enable(res->sclk_hdmi);
+}
+
+static void hdmi_resource_poweroff(struct hdmi_resources *res)
+{
+ /* turn clocks off */
+ clk_disable(res->sclk_hdmi);
+ /* power-off hdmiphy */
+ clk_disable(res->hdmiphy);
+ /* turn HDMI power off */
+ regulator_bulk_disable(res->regul_count, res->regul_bulk);
+}
+
+static int hdmi_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+ int ret;
+
+ if (on)
+ ret = pm_runtime_get_sync(hdev->dev);
+ else
+ ret = pm_runtime_put_sync(hdev->dev);
+ /* only values < 0 indicate errors */
+ return IS_ERR_VALUE(ret) ? ret : 0;
+}
+
+static int hdmi_s_dv_preset(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *preset)
+{
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+ struct device *dev = hdev->dev;
+ const struct hdmi_preset_conf *conf;
+
+ conf = hdmi_preset2conf(preset->preset);
+ if (conf == NULL) {
+ dev_err(dev, "preset (%u) not supported\n", preset->preset);
+ return -EINVAL;
+ }
+ hdev->cur_conf = conf;
+ hdev->cur_preset = preset->preset;
+ return 0;
+}
+
+static int hdmi_g_dv_preset(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *preset)
+{
+ memset(preset, 0, sizeof(*preset));
+ preset->preset = sd_to_hdmi_dev(sd)->cur_preset;
+ return 0;
+}
+
+static int hdmi_g_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+ struct device *dev = hdev->dev;
+
+ dev_dbg(dev, "%s\n", __func__);
+ if (!hdev->cur_conf)
+ return -EINVAL;
+ *fmt = hdev->cur_conf->mbus_fmt;
+ return 0;
+}
+
+static int hdmi_enum_dv_presets(struct v4l2_subdev *sd,
+ struct v4l2_dv_enum_preset *preset)
+{
+ if (preset->index >= ARRAY_SIZE(hdmi_conf))
+ return -EINVAL;
+ return v4l_fill_dv_preset_info(hdmi_conf[preset->index].preset, preset);
+}
+
+static const struct v4l2_subdev_core_ops hdmi_sd_core_ops = {
+ .s_power = hdmi_s_power,
+};
+
+static const struct v4l2_subdev_video_ops hdmi_sd_video_ops = {
+ .s_dv_preset = hdmi_s_dv_preset,
+ .g_dv_preset = hdmi_g_dv_preset,
+ .enum_dv_presets = hdmi_enum_dv_presets,
+ .g_mbus_fmt = hdmi_g_mbus_fmt,
+ .s_stream = hdmi_s_stream,
+};
+
+static const struct v4l2_subdev_ops hdmi_sd_ops = {
+ .core = &hdmi_sd_core_ops,
+ .video = &hdmi_sd_video_ops,
+};
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+
+ dev_dbg(dev, "%s\n", __func__);
+ hdmi_resource_poweroff(&hdev->res);
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct hdmi_device *hdev = sd_to_hdmi_dev(sd);
+ int ret = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ hdmi_resource_poweron(&hdev->res);
+
+ ret = hdmi_conf_apply(hdev);
+ if (ret)
+ goto fail;
+
+ dev_dbg(dev, "poweron succeed\n");
+
+ return 0;
+
+fail:
+ hdmi_resource_poweroff(&hdev->res);
+ dev_err(dev, "poweron failed\n");
+
+ return ret;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
+static void hdmi_resources_cleanup(struct hdmi_device *hdev)
+{
+ struct hdmi_resources *res = &hdev->res;
+
+ dev_dbg(hdev->dev, "HDMI resource cleanup\n");
+ /* put clocks, power */
+ if (res->regul_count)
+ regulator_bulk_free(res->regul_count, res->regul_bulk);
+ /* kfree is NULL-safe */
+ kfree(res->regul_bulk);
+ if (!IS_ERR_OR_NULL(res->hdmiphy))
+ clk_put(res->hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
+ clk_put(res->sclk_hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_pixel))
+ clk_put(res->sclk_pixel);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+ clk_put(res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(res->hdmi))
+ clk_put(res->hdmi);
+ memset(res, 0, sizeof *res);
+}
+
+static int hdmi_resources_init(struct hdmi_device *hdev)
+{
+ struct device *dev = hdev->dev;
+ struct hdmi_resources *res = &hdev->res;
+ static char *supply[] = {
+ "hdmi-en",
+ "vdd",
+ "vdd_osc",
+ "vdd_pll",
+ };
+ int i, ret;
+
+ dev_dbg(dev, "HDMI resource init\n");
+
+ memset(res, 0, sizeof *res);
+ /* get clocks, power */
+
+ res->hdmi = clk_get(dev, "hdmi");
+ if (IS_ERR_OR_NULL(res->hdmi)) {
+ dev_err(dev, "failed to get clock 'hdmi'\n");
+ goto fail;
+ }
+ res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
+ goto fail;
+ }
+ res->sclk_pixel = clk_get(dev, "sclk_pixel");
+ if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+ dev_err(dev, "failed to get clock 'sclk_pixel'\n");
+ goto fail;
+ }
+ res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+ if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+ dev_err(dev, "failed to get clock 'sclk_hdmiphy'\n");
+ goto fail;
+ }
+ res->hdmiphy = clk_get(dev, "hdmiphy");
+ if (IS_ERR_OR_NULL(res->hdmiphy)) {
+ dev_err(dev, "failed to get clock 'hdmiphy'\n");
+ goto fail;
+ }
+ res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+ sizeof res->regul_bulk[0], GFP_KERNEL);
+ if (!res->regul_bulk) {
+ dev_err(dev, "failed to get memory for regulators\n");
+ goto fail;
+ }
+ for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+ res->regul_bulk[i].supply = supply[i];
+ res->regul_bulk[i].consumer = NULL;
+ }
+
+ ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ if (ret) {
+ dev_err(dev, "failed to get regulators\n");
+ goto fail;
+ }
+ res->regul_count = ARRAY_SIZE(supply);
+
+ return 0;
+fail:
+ dev_err(dev, "HDMI resource init - failed\n");
+ hdmi_resources_cleanup(hdev);
+ return -ENODEV;
+}
+
+static int __devinit hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct i2c_adapter *phy_adapter;
+ struct v4l2_subdev *sd;
+ struct hdmi_device *hdmi_dev = NULL;
+ struct hdmi_driver_data *drv_data;
+ int ret;
+
+ dev_dbg(dev, "probe start\n");
+
+ hdmi_dev = kzalloc(sizeof(*hdmi_dev), GFP_KERNEL);
+ if (!hdmi_dev) {
+ dev_err(dev, "out of memory\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ hdmi_dev->dev = dev;
+
+ ret = hdmi_resources_init(hdmi_dev);
+ if (ret)
+ goto fail_hdev;
+
+ /* mapping HDMI registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail_init;
+ }
+
+ hdmi_dev->regs = ioremap(res->start, resource_size(res));
+ if (hdmi_dev->regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail_hdev;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(dev, "get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto fail_regs;
+ }
+
+ ret = request_irq(res->start, hdmi_irq_handler, 0, "hdmi", hdmi_dev);
+ if (ret) {
+ dev_err(dev, "request interrupt failed.\n");
+ goto fail_regs;
+ }
+ hdmi_dev->irq = res->start;
+
+ /* setting v4l2 name to prevent WARN_ON in v4l2_device_register */
+ strlcpy(hdmi_dev->v4l2_dev.name, dev_name(dev),
+ sizeof(hdmi_dev->v4l2_dev.name));
+ /* passing NULL owner prevents driver from erasing drvdata */
+ ret = v4l2_device_register(NULL, &hdmi_dev->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "could not register v4l2 device.\n");
+ goto fail_irq;
+ }
+
+ drv_data = (struct hdmi_driver_data *)
+ platform_get_device_id(pdev)->driver_data;
+ phy_adapter = i2c_get_adapter(drv_data->hdmiphy_bus);
+ if (phy_adapter == NULL) {
+ dev_err(dev, "adapter request failed\n");
+ ret = -ENXIO;
+ goto fail_vdev;
+ }
+
+ hdmi_dev->phy_sd = v4l2_i2c_new_subdev_board(&hdmi_dev->v4l2_dev,
+ phy_adapter, &hdmiphy_info, NULL);
+ /* on failure or not adapter is no longer useful */
+ i2c_put_adapter(phy_adapter);
+ if (hdmi_dev->phy_sd == NULL) {
+ dev_err(dev, "missing subdev for hdmiphy\n");
+ ret = -ENODEV;
+ goto fail_vdev;
+ }
+
+ clk_enable(hdmi_dev->res.hdmi);
+
+ pm_runtime_enable(dev);
+
+ sd = &hdmi_dev->sd;
+ v4l2_subdev_init(sd, &hdmi_sd_ops);
+ sd->owner = THIS_MODULE;
+
+ strlcpy(sd->name, "s5p-hdmi", sizeof sd->name);
+ hdmi_dev->cur_preset = HDMI_DEFAULT_PRESET;
+ /* FIXME: missing fail preset is not supported */
+ hdmi_dev->cur_conf = hdmi_preset2conf(hdmi_dev->cur_preset);
+
+ /* storing subdev for call that have only access to struct device */
+ dev_set_drvdata(dev, sd);
+
+ dev_info(dev, "probe sucessful\n");
+
+ return 0;
+
+fail_vdev:
+ v4l2_device_unregister(&hdmi_dev->v4l2_dev);
+
+fail_irq:
+ free_irq(hdmi_dev->irq, hdmi_dev);
+
+fail_regs:
+ iounmap(hdmi_dev->regs);
+
+fail_init:
+ hdmi_resources_cleanup(hdmi_dev);
+
+fail_hdev:
+ kfree(hdmi_dev);
+
+fail:
+ dev_err(dev, "probe failed\n");
+ return ret;
+}
+
+static int __devexit hdmi_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct hdmi_device *hdmi_dev = sd_to_hdmi_dev(sd);
+
+ pm_runtime_disable(dev);
+ clk_disable(hdmi_dev->res.hdmi);
+ v4l2_device_unregister(&hdmi_dev->v4l2_dev);
+ disable_irq(hdmi_dev->irq);
+ free_irq(hdmi_dev->irq, hdmi_dev);
+ iounmap(hdmi_dev->regs);
+ hdmi_resources_cleanup(hdmi_dev);
+ kfree(hdmi_dev);
+ dev_info(dev, "remove sucessful\n");
+
+ return 0;
+}
+
+static struct platform_driver hdmi_driver __refdata = {
+ .probe = hdmi_probe,
+ .remove = __devexit_p(hdmi_remove),
+ .id_table = hdmi_driver_types,
+ .driver = {
+ .name = "s5p-hdmi",
+ .owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
+ }
+};
+
+/* D R I V E R I N I T I A L I Z A T I O N */
+
+static int __init hdmi_init(void)
+{
+ int ret;
+ static const char banner[] __initdata = KERN_INFO \
+ "Samsung HDMI output driver, "
+ "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
+ printk(banner);
+
+ ret = platform_driver_register(&hdmi_driver);
+ if (ret)
+ printk(KERN_ERR "HDMI platform driver register failed\n");
+
+ return ret;
+}
+module_init(hdmi_init);
+
+static void __exit hdmi_exit(void)
+{
+ platform_driver_unregister(&hdmi_driver);
+}
+module_exit(hdmi_exit);
+
+
diff --git a/drivers/media/video/s5p-tv/hdmiphy_drv.c b/drivers/media/video/s5p-tv/hdmiphy_drv.c
new file mode 100644
index 00000000000..6693f4aff10
--- /dev/null
+++ b/drivers/media/video/s5p-tv/hdmiphy_drv.c
@@ -0,0 +1,188 @@
+/*
+ * Samsung HDMI Physical interface driver
+ *
+ * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
+ * Author: Tomasz Stanislawski <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+
+#include <media/v4l2-subdev.h>
+
+MODULE_AUTHOR("Tomasz Stanislawski <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung HDMI Physical interface driver");
+MODULE_LICENSE("GPL");
+
+struct hdmiphy_conf {
+ u32 preset;
+ const u8 *data;
+};
+
+static const u8 hdmiphy_conf27[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xDf, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xe3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_175[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+ 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_25[32] = {
+ 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
+ 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xe0,
+ 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf148_5[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+ 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+};
+
+static const struct hdmiphy_conf hdmiphy_conf[] = {
+ { V4L2_DV_480P59_94, hdmiphy_conf27 },
+ { V4L2_DV_1080P30, hdmiphy_conf74_175 },
+ { V4L2_DV_720P59_94, hdmiphy_conf74_175 },
+ { V4L2_DV_720P60, hdmiphy_conf74_25 },
+ { V4L2_DV_1080P50, hdmiphy_conf148_5 },
+ { V4L2_DV_1080P60, hdmiphy_conf148_5 },
+};
+
+const u8 *hdmiphy_preset2conf(u32 preset)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(hdmiphy_conf); ++i)
+ if (hdmiphy_conf[i].preset == preset)
+ return hdmiphy_conf[i].data;
+ return NULL;
+}
+
+static int hdmiphy_s_power(struct v4l2_subdev *sd, int on)
+{
+ /* to be implemented */
+ return 0;
+}
+
+static int hdmiphy_s_dv_preset(struct v4l2_subdev *sd,
+ struct v4l2_dv_preset *preset)
+{
+ const u8 *data;
+ u8 buffer[32];
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+
+ dev_info(dev, "s_dv_preset(preset = %d)\n", preset->preset);
+ data = hdmiphy_preset2conf(preset->preset);
+ if (!data) {
+ dev_err(dev, "format not supported\n");
+ return -EINVAL;
+ }
+
+ /* storing configuration to the device */
+ memcpy(buffer, data, 32);
+ ret = i2c_master_send(client, buffer, 32);
+ if (ret != 32) {
+ dev_err(dev, "failed to configure HDMIPHY via I2C\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hdmiphy_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct device *dev = &client->dev;
+ u8 buffer[2];
+ int ret;
+
+ dev_info(dev, "s_stream(%d)\n", enable);
+ /* going to/from configuration from/to operation mode */
+ buffer[0] = 0x1f;
+ buffer[1] = enable ? 0x80 : 0x00;
+
+ ret = i2c_master_send(client, buffer, 2);
+ if (ret != 2) {
+ dev_err(dev, "stream (%d) failed\n", enable);
+ return -EIO;
+ }
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops hdmiphy_core_ops = {
+ .s_power = hdmiphy_s_power,
+};
+
+static const struct v4l2_subdev_video_ops hdmiphy_video_ops = {
+ .s_dv_preset = hdmiphy_s_dv_preset,
+ .s_stream = hdmiphy_s_stream,
+};
+
+static const struct v4l2_subdev_ops hdmiphy_ops = {
+ .core = &hdmiphy_core_ops,
+ .video = &hdmiphy_video_ops,
+};
+
+static int __devinit hdmiphy_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ static struct v4l2_subdev sd;
+
+ v4l2_i2c_subdev_init(&sd, client, &hdmiphy_ops);
+ dev_info(&client->dev, "probe successful\n");
+ return 0;
+}
+
+static int __devexit hdmiphy_remove(struct i2c_client *client)
+{
+ dev_info(&client->dev, "remove successful\n");
+ return 0;
+}
+
+static const struct i2c_device_id hdmiphy_id[] = {
+ { "hdmiphy", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, hdmiphy_id);
+
+static struct i2c_driver hdmiphy_driver = {
+ .driver = {
+ .name = "s5p-hdmiphy",
+ .owner = THIS_MODULE,
+ },
+ .probe = hdmiphy_probe,
+ .remove = __devexit_p(hdmiphy_remove),
+ .id_table = hdmiphy_id,
+};
+
+static int __init hdmiphy_init(void)
+{
+ return i2c_add_driver(&hdmiphy_driver);
+}
+module_init(hdmiphy_init);
+
+static void __exit hdmiphy_exit(void)
+{
+ i2c_del_driver(&hdmiphy_driver);
+}
+module_exit(hdmiphy_exit);
diff --git a/drivers/media/video/s5p-tv/mixer.h b/drivers/media/video/s5p-tv/mixer.h
new file mode 100644
index 00000000000..e2242243f63
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer.h
@@ -0,0 +1,354 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#ifndef SAMSUNG_MIXER_H
+#define SAMSUNG_MIXER_H
+
+#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
+ #define DEBUG
+#endif
+
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-core.h>
+
+#include "regs-mixer.h"
+
+/** maximum number of output interfaces */
+#define MXR_MAX_OUTPUTS 2
+/** maximum number of input interfaces (layers) */
+#define MXR_MAX_LAYERS 3
+#define MXR_DRIVER_NAME "s5p-mixer"
+/** maximal number of planes for every layer */
+#define MXR_MAX_PLANES 2
+
+#define MXR_ENABLE 1
+#define MXR_DISABLE 0
+
+/** description of a macroblock for packed formats */
+struct mxr_block {
+ /** vertical number of pixels in macroblock */
+ unsigned int width;
+ /** horizontal number of pixels in macroblock */
+ unsigned int height;
+ /** size of block in bytes */
+ unsigned int size;
+};
+
+/** description of supported format */
+struct mxr_format {
+ /** format name/mnemonic */
+ const char *name;
+ /** fourcc identifier */
+ u32 fourcc;
+ /** colorspace identifier */
+ enum v4l2_colorspace colorspace;
+ /** number of planes in image data */
+ int num_planes;
+ /** description of block for each plane */
+ struct mxr_block plane[MXR_MAX_PLANES];
+ /** number of subframes in image data */
+ int num_subframes;
+ /** specifies to which subframe belong given plane */
+ int plane2subframe[MXR_MAX_PLANES];
+ /** internal code, driver dependant */
+ unsigned long cookie;
+};
+
+/** description of crop configuration for image */
+struct mxr_crop {
+ /** width of layer in pixels */
+ unsigned int full_width;
+ /** height of layer in pixels */
+ unsigned int full_height;
+ /** horizontal offset of first pixel to be displayed */
+ unsigned int x_offset;
+ /** vertical offset of first pixel to be displayed */
+ unsigned int y_offset;
+ /** width of displayed data in pixels */
+ unsigned int width;
+ /** height of displayed data in pixels */
+ unsigned int height;
+ /** indicate which fields are present in buffer */
+ unsigned int field;
+};
+
+/** description of transformation from source to destination image */
+struct mxr_geometry {
+ /** cropping for source image */
+ struct mxr_crop src;
+ /** cropping for destination image */
+ struct mxr_crop dst;
+ /** layer-dependant description of horizontal scaling */
+ unsigned int x_ratio;
+ /** layer-dependant description of vertical scaling */
+ unsigned int y_ratio;
+};
+
+/** instance of a buffer */
+struct mxr_buffer {
+ /** common v4l buffer stuff -- must be first */
+ struct vb2_buffer vb;
+ /** node for layer's lists */
+ struct list_head list;
+};
+
+
+/** internal states of layer */
+enum mxr_layer_state {
+ /** layers is not shown */
+ MXR_LAYER_IDLE = 0,
+ /** state between STREAMON and hardware start */
+ MXR_LAYER_STREAMING_START,
+ /** layer is shown */
+ MXR_LAYER_STREAMING,
+ /** state before STREAMOFF is finished */
+ MXR_LAYER_STREAMING_FINISH,
+};
+
+/** forward declarations */
+struct mxr_device;
+struct mxr_layer;
+
+/** callback for layers operation */
+struct mxr_layer_ops {
+ /* TODO: try to port it to subdev API */
+ /** handler for resource release function */
+ void (*release)(struct mxr_layer *);
+ /** setting buffer to HW */
+ void (*buffer_set)(struct mxr_layer *, struct mxr_buffer *);
+ /** setting format and geometry in HW */
+ void (*format_set)(struct mxr_layer *);
+ /** streaming stop/start */
+ void (*stream_set)(struct mxr_layer *, int);
+ /** adjusting geometry */
+ void (*fix_geometry)(struct mxr_layer *);
+};
+
+/** layer instance, a single window and content displayed on output */
+struct mxr_layer {
+ /** parent mixer device */
+ struct mxr_device *mdev;
+ /** layer index (unique identifier) */
+ int idx;
+ /** callbacks for layer methods */
+ struct mxr_layer_ops ops;
+ /** format array */
+ const struct mxr_format **fmt_array;
+ /** size of format array */
+ unsigned long fmt_array_size;
+
+ /** lock for protection of list and state fields */
+ spinlock_t enq_slock;
+ /** list for enqueued buffers */
+ struct list_head enq_list;
+ /** buffer currently owned by hardware in temporary registers */
+ struct mxr_buffer *update_buf;
+ /** buffer currently owned by hardware in shadow registers */
+ struct mxr_buffer *shadow_buf;
+ /** state of layer IDLE/STREAMING */
+ enum mxr_layer_state state;
+
+ /** mutex for protection of fields below */
+ struct mutex mutex;
+ /** handler for video node */
+ struct video_device vfd;
+ /** queue for output buffers */
+ struct vb2_queue vb_queue;
+ /** current image format */
+ const struct mxr_format *fmt;
+ /** current geometry of image */
+ struct mxr_geometry geo;
+};
+
+/** description of mixers output interface */
+struct mxr_output {
+ /** name of output */
+ char name[32];
+ /** output subdev */
+ struct v4l2_subdev *sd;
+ /** cookie used for configuration of registers */
+ int cookie;
+};
+
+/** specify source of output subdevs */
+struct mxr_output_conf {
+ /** name of output (connector) */
+ char *output_name;
+ /** name of module that generates output subdev */
+ char *module_name;
+ /** cookie need for mixer HW */
+ int cookie;
+};
+
+struct clk;
+struct regulator;
+
+/** auxiliary resources used my mixer */
+struct mxr_resources {
+ /** interrupt index */
+ int irq;
+ /** pointer to Mixer registers */
+ void __iomem *mxr_regs;
+ /** pointer to Video Processor registers */
+ void __iomem *vp_regs;
+ /** other resources, should used under mxr_device.mutex */
+ struct clk *mixer;
+ struct clk *vp;
+ struct clk *sclk_mixer;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_dac;
+};
+
+/* event flags used */
+enum mxr_devide_flags {
+ MXR_EVENT_VSYNC = 0,
+};
+
+/** drivers instance */
+struct mxr_device {
+ /** master device */
+ struct device *dev;
+ /** state of each layer */
+ struct mxr_layer *layer[MXR_MAX_LAYERS];
+ /** state of each output */
+ struct mxr_output *output[MXR_MAX_OUTPUTS];
+ /** number of registered outputs */
+ int output_cnt;
+
+ /* video resources */
+
+ /** V4L2 device */
+ struct v4l2_device v4l2_dev;
+ /** context of allocator */
+ void *alloc_ctx;
+ /** event wait queue */
+ wait_queue_head_t event_queue;
+ /** state flags */
+ unsigned long event_flags;
+
+ /** spinlock for protection of registers */
+ spinlock_t reg_slock;
+
+ /** mutex for protection of fields below */
+ struct mutex mutex;
+ /** number of entities depndant on output configuration */
+ int n_output;
+ /** number of users that do streaming */
+ int n_streamer;
+ /** index of current output */
+ int current_output;
+ /** auxiliary resources used my mixer */
+ struct mxr_resources res;
+};
+
+/** transform device structure into mixer device */
+static inline struct mxr_device *to_mdev(struct device *dev)
+{
+ struct v4l2_device *vdev = dev_get_drvdata(dev);
+ return container_of(vdev, struct mxr_device, v4l2_dev);
+}
+
+/** get current output data, should be called under mdev's mutex */
+static inline struct mxr_output *to_output(struct mxr_device *mdev)
+{
+ return mdev->output[mdev->current_output];
+}
+
+/** get current output subdev, should be called under mdev's mutex */
+static inline struct v4l2_subdev *to_outsd(struct mxr_device *mdev)
+{
+ struct mxr_output *out = to_output(mdev);
+ return out ? out->sd : NULL;
+}
+
+/** forward declaration for mixer platform data */
+struct mxr_platform_data;
+
+/** acquiring common video resources */
+int __devinit mxr_acquire_video(struct mxr_device *mdev,
+ struct mxr_output_conf *output_cont, int output_count);
+
+/** releasing common video resources */
+void __devexit mxr_release_video(struct mxr_device *mdev);
+
+struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx);
+struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx);
+struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
+ int idx, char *name, struct mxr_layer_ops *ops);
+
+void mxr_base_layer_release(struct mxr_layer *layer);
+void mxr_layer_release(struct mxr_layer *layer);
+
+int mxr_base_layer_register(struct mxr_layer *layer);
+void mxr_base_layer_unregister(struct mxr_layer *layer);
+
+unsigned long mxr_get_plane_size(const struct mxr_block *blk,
+ unsigned int width, unsigned int height);
+
+/** adds new consumer for mixer's power */
+int __must_check mxr_power_get(struct mxr_device *mdev);
+/** removes consumer for mixer's power */
+void mxr_power_put(struct mxr_device *mdev);
+/** add new client for output configuration */
+void mxr_output_get(struct mxr_device *mdev);
+/** removes new client for output configuration */
+void mxr_output_put(struct mxr_device *mdev);
+/** add new client for streaming */
+void mxr_streamer_get(struct mxr_device *mdev);
+/** removes new client for streaming */
+void mxr_streamer_put(struct mxr_device *mdev);
+/** returns format of data delivared to current output */
+void mxr_get_mbus_fmt(struct mxr_device *mdev,
+ struct v4l2_mbus_framefmt *mbus_fmt);
+
+/* Debug */
+
+#define mxr_err(mdev, fmt, ...) dev_err(mdev->dev, fmt, ##__VA_ARGS__)
+#define mxr_warn(mdev, fmt, ...) dev_warn(mdev->dev, fmt, ##__VA_ARGS__)
+#define mxr_info(mdev, fmt, ...) dev_info(mdev->dev, fmt, ##__VA_ARGS__)
+
+#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MIXER_DEBUG
+ #define mxr_dbg(mdev, fmt, ...) dev_dbg(mdev->dev, fmt, ##__VA_ARGS__)
+#else
+ #define mxr_dbg(mdev, fmt, ...) do { (void) mdev; } while (0)
+#endif
+
+/* accessing Mixer's and Video Processor's registers */
+
+void mxr_vsync_set_update(struct mxr_device *mdev, int en);
+void mxr_reg_reset(struct mxr_device *mdev);
+irqreturn_t mxr_irq_handler(int irq, void *dev_data);
+void mxr_reg_s_output(struct mxr_device *mdev, int cookie);
+void mxr_reg_streamon(struct mxr_device *mdev);
+void mxr_reg_streamoff(struct mxr_device *mdev);
+int mxr_reg_wait4vsync(struct mxr_device *mdev);
+void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
+ struct v4l2_mbus_framefmt *fmt);
+void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en);
+void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr);
+void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
+ const struct mxr_format *fmt, const struct mxr_geometry *geo);
+
+void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en);
+void mxr_reg_vp_buffer(struct mxr_device *mdev,
+ dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2]);
+void mxr_reg_vp_format(struct mxr_device *mdev,
+ const struct mxr_format *fmt, const struct mxr_geometry *geo);
+void mxr_reg_dump(struct mxr_device *mdev);
+
+#endif /* SAMSUNG_MIXER_H */
+
diff --git a/drivers/media/video/s5p-tv/mixer_drv.c b/drivers/media/video/s5p-tv/mixer_drv.c
new file mode 100644
index 00000000000..00643094b22
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_drv.c
@@ -0,0 +1,487 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+
+MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung MIXER");
+MODULE_LICENSE("GPL");
+
+/* --------- DRIVER PARAMETERS ---------- */
+
+static struct mxr_output_conf mxr_output_conf[] = {
+ {
+ .output_name = "S5P HDMI connector",
+ .module_name = "s5p-hdmi",
+ .cookie = 1,
+ },
+ {
+ .output_name = "S5P SDO connector",
+ .module_name = "s5p-sdo",
+ .cookie = 0,
+ },
+};
+
+void mxr_get_mbus_fmt(struct mxr_device *mdev,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ struct v4l2_subdev *sd;
+ int ret;
+
+ mutex_lock(&mdev->mutex);
+ sd = to_outsd(mdev);
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, mbus_fmt);
+ WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
+ mutex_unlock(&mdev->mutex);
+}
+
+void mxr_streamer_get(struct mxr_device *mdev)
+{
+ mutex_lock(&mdev->mutex);
+ ++mdev->n_streamer;
+ mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
+ if (mdev->n_streamer == 1) {
+ struct v4l2_subdev *sd = to_outsd(mdev);
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct mxr_resources *res = &mdev->res;
+ int ret;
+
+ if (to_output(mdev)->cookie == 0)
+ clk_set_parent(res->sclk_mixer, res->sclk_dac);
+ else
+ clk_set_parent(res->sclk_mixer, res->sclk_hdmi);
+ mxr_reg_s_output(mdev, to_output(mdev)->cookie);
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mbus_fmt);
+ WARN(ret, "failed to get mbus_fmt for output %s\n", sd->name);
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ WARN(ret, "starting stream failed for output %s\n", sd->name);
+
+ mxr_reg_set_mbus_fmt(mdev, &mbus_fmt);
+ mxr_reg_streamon(mdev);
+ ret = mxr_reg_wait4vsync(mdev);
+ WARN(ret, "failed to get vsync (%d) from output\n", ret);
+ }
+ mutex_unlock(&mdev->mutex);
+ mxr_reg_dump(mdev);
+ /* FIXME: what to do when streaming fails? */
+}
+
+void mxr_streamer_put(struct mxr_device *mdev)
+{
+ mutex_lock(&mdev->mutex);
+ --mdev->n_streamer;
+ mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_streamer);
+ if (mdev->n_streamer == 0) {
+ int ret;
+ struct v4l2_subdev *sd = to_outsd(mdev);
+
+ mxr_reg_streamoff(mdev);
+ /* vsync applies Mixer setup */
+ ret = mxr_reg_wait4vsync(mdev);
+ WARN(ret, "failed to get vsync (%d) from output\n", ret);
+ ret = v4l2_subdev_call(sd, video, s_stream, 0);
+ WARN(ret, "stopping stream failed for output %s\n", sd->name);
+ }
+ WARN(mdev->n_streamer < 0, "negative number of streamers (%d)\n",
+ mdev->n_streamer);
+ mutex_unlock(&mdev->mutex);
+ mxr_reg_dump(mdev);
+}
+
+void mxr_output_get(struct mxr_device *mdev)
+{
+ mutex_lock(&mdev->mutex);
+ ++mdev->n_output;
+ mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
+ /* turn on auxiliary driver */
+ if (mdev->n_output == 1)
+ v4l2_subdev_call(to_outsd(mdev), core, s_power, 1);
+ mutex_unlock(&mdev->mutex);
+}
+
+void mxr_output_put(struct mxr_device *mdev)
+{
+ mutex_lock(&mdev->mutex);
+ --mdev->n_output;
+ mxr_dbg(mdev, "%s(%d)\n", __func__, mdev->n_output);
+ /* turn on auxiliary driver */
+ if (mdev->n_output == 0)
+ v4l2_subdev_call(to_outsd(mdev), core, s_power, 0);
+ WARN(mdev->n_output < 0, "negative number of output users (%d)\n",
+ mdev->n_output);
+ mutex_unlock(&mdev->mutex);
+}
+
+int mxr_power_get(struct mxr_device *mdev)
+{
+ int ret = pm_runtime_get_sync(mdev->dev);
+
+ /* returning 1 means that power is already enabled,
+ * so zero success be returned */
+ if (IS_ERR_VALUE(ret))
+ return ret;
+ return 0;
+}
+
+void mxr_power_put(struct mxr_device *mdev)
+{
+ pm_runtime_put_sync(mdev->dev);
+}
+
+/* --------- RESOURCE MANAGEMENT -------------*/
+
+static int __devinit mxr_acquire_plat_resources(struct mxr_device *mdev,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
+ if (res == NULL) {
+ mxr_err(mdev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ mdev->res.mxr_regs = ioremap(res->start, resource_size(res));
+ if (mdev->res.mxr_regs == NULL) {
+ mxr_err(mdev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
+ if (res == NULL) {
+ mxr_err(mdev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail_mxr_regs;
+ }
+
+ mdev->res.vp_regs = ioremap(res->start, resource_size(res));
+ if (mdev->res.vp_regs == NULL) {
+ mxr_err(mdev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail_mxr_regs;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
+ if (res == NULL) {
+ mxr_err(mdev, "get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto fail_vp_regs;
+ }
+
+ ret = request_irq(res->start, mxr_irq_handler, 0, "s5p-mixer", mdev);
+ if (ret) {
+ mxr_err(mdev, "request interrupt failed.\n");
+ goto fail_vp_regs;
+ }
+ mdev->res.irq = res->start;
+
+ return 0;
+
+fail_vp_regs:
+ iounmap(mdev->res.vp_regs);
+
+fail_mxr_regs:
+ iounmap(mdev->res.mxr_regs);
+
+fail:
+ return ret;
+}
+
+static void mxr_release_plat_resources(struct mxr_device *mdev)
+{
+ free_irq(mdev->res.irq, mdev);
+ iounmap(mdev->res.vp_regs);
+ iounmap(mdev->res.mxr_regs);
+}
+
+static void mxr_release_clocks(struct mxr_device *mdev)
+{
+ struct mxr_resources *res = &mdev->res;
+
+ if (!IS_ERR_OR_NULL(res->sclk_dac))
+ clk_put(res->sclk_dac);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+ clk_put(res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(res->sclk_mixer))
+ clk_put(res->sclk_mixer);
+ if (!IS_ERR_OR_NULL(res->vp))
+ clk_put(res->vp);
+ if (!IS_ERR_OR_NULL(res->mixer))
+ clk_put(res->mixer);
+}
+
+static int mxr_acquire_clocks(struct mxr_device *mdev)
+{
+ struct mxr_resources *res = &mdev->res;
+ struct device *dev = mdev->dev;
+
+ res->mixer = clk_get(dev, "mixer");
+ if (IS_ERR_OR_NULL(res->mixer)) {
+ mxr_err(mdev, "failed to get clock 'mixer'\n");
+ goto fail;
+ }
+ res->vp = clk_get(dev, "vp");
+ if (IS_ERR_OR_NULL(res->vp)) {
+ mxr_err(mdev, "failed to get clock 'vp'\n");
+ goto fail;
+ }
+ res->sclk_mixer = clk_get(dev, "sclk_mixer");
+ if (IS_ERR_OR_NULL(res->sclk_mixer)) {
+ mxr_err(mdev, "failed to get clock 'sclk_mixer'\n");
+ goto fail;
+ }
+ res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ mxr_err(mdev, "failed to get clock 'sclk_hdmi'\n");
+ goto fail;
+ }
+ res->sclk_dac = clk_get(dev, "sclk_dac");
+ if (IS_ERR_OR_NULL(res->sclk_dac)) {
+ mxr_err(mdev, "failed to get clock 'sclk_dac'\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ mxr_release_clocks(mdev);
+ return -ENODEV;
+}
+
+static int __devinit mxr_acquire_resources(struct mxr_device *mdev,
+ struct platform_device *pdev)
+{
+ int ret;
+ ret = mxr_acquire_plat_resources(mdev, pdev);
+
+ if (ret)
+ goto fail;
+
+ ret = mxr_acquire_clocks(mdev);
+ if (ret)
+ goto fail_plat;
+
+ mxr_info(mdev, "resources acquired\n");
+ return 0;
+
+fail_plat:
+ mxr_release_plat_resources(mdev);
+fail:
+ mxr_err(mdev, "resources acquire failed\n");
+ return ret;
+}
+
+static void mxr_release_resources(struct mxr_device *mdev)
+{
+ mxr_release_clocks(mdev);
+ mxr_release_plat_resources(mdev);
+ memset(&mdev->res, 0, sizeof mdev->res);
+}
+
+static void mxr_release_layers(struct mxr_device *mdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdev->layer); ++i)
+ if (mdev->layer[i])
+ mxr_layer_release(mdev->layer[i]);
+}
+
+static int __devinit mxr_acquire_layers(struct mxr_device *mdev,
+ struct mxr_platform_data *pdata)
+{
+ mdev->layer[0] = mxr_graph_layer_create(mdev, 0);
+ mdev->layer[1] = mxr_graph_layer_create(mdev, 1);
+ mdev->layer[2] = mxr_vp_layer_create(mdev, 0);
+
+ if (!mdev->layer[0] || !mdev->layer[1] || !mdev->layer[2]) {
+ mxr_err(mdev, "failed to acquire layers\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ mxr_release_layers(mdev);
+ return -ENODEV;
+}
+
+/* ---------- POWER MANAGEMENT ----------- */
+
+static int mxr_runtime_resume(struct device *dev)
+{
+ struct mxr_device *mdev = to_mdev(dev);
+ struct mxr_resources *res = &mdev->res;
+
+ mxr_dbg(mdev, "resume - start\n");
+ mutex_lock(&mdev->mutex);
+ /* turn clocks on */
+ clk_enable(res->mixer);
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+ /* apply default configuration */
+ mxr_reg_reset(mdev);
+ mxr_dbg(mdev, "resume - finished\n");
+
+ mutex_unlock(&mdev->mutex);
+ return 0;
+}
+
+static int mxr_runtime_suspend(struct device *dev)
+{
+ struct mxr_device *mdev = to_mdev(dev);
+ struct mxr_resources *res = &mdev->res;
+ mxr_dbg(mdev, "suspend - start\n");
+ mutex_lock(&mdev->mutex);
+ /* turn clocks off */
+ clk_disable(res->sclk_mixer);
+ clk_disable(res->vp);
+ clk_disable(res->mixer);
+ mutex_unlock(&mdev->mutex);
+ mxr_dbg(mdev, "suspend - finished\n");
+ return 0;
+}
+
+static const struct dev_pm_ops mxr_pm_ops = {
+ .runtime_suspend = mxr_runtime_suspend,
+ .runtime_resume = mxr_runtime_resume,
+};
+
+/* --------- DRIVER INITIALIZATION ---------- */
+
+static int __devinit mxr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxr_platform_data *pdata = dev->platform_data;
+ struct mxr_device *mdev;
+ int ret;
+
+ /* mdev does not exist yet so no mxr_dbg is used */
+ dev_info(dev, "probe start\n");
+
+ mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+ if (!mdev) {
+ mxr_err(mdev, "not enough memory.\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* setup pointer to master device */
+ mdev->dev = dev;
+
+ mutex_init(&mdev->mutex);
+ spin_lock_init(&mdev->reg_slock);
+ init_waitqueue_head(&mdev->event_queue);
+
+ /* acquire resources: regs, irqs, clocks, regulators */
+ ret = mxr_acquire_resources(mdev, pdev);
+ if (ret)
+ goto fail_mem;
+
+ /* configure resources for video output */
+ ret = mxr_acquire_video(mdev, mxr_output_conf,
+ ARRAY_SIZE(mxr_output_conf));
+ if (ret)
+ goto fail_resources;
+
+ /* configure layers */
+ ret = mxr_acquire_layers(mdev, pdata);
+ if (ret)
+ goto fail_video;
+
+ pm_runtime_enable(dev);
+
+ mxr_info(mdev, "probe successful\n");
+ return 0;
+
+fail_video:
+ mxr_release_video(mdev);
+
+fail_resources:
+ mxr_release_resources(mdev);
+
+fail_mem:
+ kfree(mdev);
+
+fail:
+ dev_info(dev, "probe failed\n");
+ return ret;
+}
+
+static int __devexit mxr_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxr_device *mdev = to_mdev(dev);
+
+ pm_runtime_disable(dev);
+
+ mxr_release_layers(mdev);
+ mxr_release_video(mdev);
+ mxr_release_resources(mdev);
+
+ kfree(mdev);
+
+ dev_info(dev, "remove sucessful\n");
+ return 0;
+}
+
+static struct platform_driver mxr_driver __refdata = {
+ .probe = mxr_probe,
+ .remove = __devexit_p(mxr_remove),
+ .driver = {
+ .name = MXR_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &mxr_pm_ops,
+ }
+};
+
+static int __init mxr_init(void)
+{
+ int i, ret;
+ static const char banner[] __initdata = KERN_INFO
+ "Samsung TV Mixer driver, "
+ "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
+ printk(banner);
+
+ /* Loading auxiliary modules */
+ for (i = 0; i < ARRAY_SIZE(mxr_output_conf); ++i)
+ request_module(mxr_output_conf[i].module_name);
+
+ ret = platform_driver_register(&mxr_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "registration of MIXER driver failed\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+module_init(mxr_init);
+
+static void __exit mxr_exit(void)
+{
+ platform_driver_unregister(&mxr_driver);
+}
+module_exit(mxr_exit);
diff --git a/drivers/media/video/s5p-tv/mixer_grp_layer.c b/drivers/media/video/s5p-tv/mixer_grp_layer.c
new file mode 100644
index 00000000000..58f0ba49580
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_grp_layer.c
@@ -0,0 +1,185 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include <media/videobuf2-dma-contig.h>
+
+/* FORMAT DEFINITIONS */
+
+static const struct mxr_format mxr_fb_fmt_rgb565 = {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .num_planes = 1,
+ .plane = {
+ { .width = 1, .height = 1, .size = 2 },
+ },
+ .num_subframes = 1,
+ .cookie = 4,
+};
+
+static const struct mxr_format mxr_fb_fmt_argb1555 = {
+ .name = "ARGB1555",
+ .num_planes = 1,
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .plane = {
+ { .width = 1, .height = 1, .size = 2 },
+ },
+ .num_subframes = 1,
+ .cookie = 5,
+};
+
+static const struct mxr_format mxr_fb_fmt_argb4444 = {
+ .name = "ARGB4444",
+ .num_planes = 1,
+ .fourcc = V4L2_PIX_FMT_RGB444,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .plane = {
+ { .width = 1, .height = 1, .size = 2 },
+ },
+ .num_subframes = 1,
+ .cookie = 6,
+};
+
+static const struct mxr_format mxr_fb_fmt_argb8888 = {
+ .name = "ARGB8888",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .num_planes = 1,
+ .plane = {
+ { .width = 1, .height = 1, .size = 4 },
+ },
+ .num_subframes = 1,
+ .cookie = 7,
+};
+
+static const struct mxr_format *mxr_graph_format[] = {
+ &mxr_fb_fmt_rgb565,
+ &mxr_fb_fmt_argb1555,
+ &mxr_fb_fmt_argb4444,
+ &mxr_fb_fmt_argb8888,
+};
+
+/* AUXILIARY CALLBACKS */
+
+static void mxr_graph_layer_release(struct mxr_layer *layer)
+{
+ mxr_base_layer_unregister(layer);
+ mxr_base_layer_release(layer);
+}
+
+static void mxr_graph_buffer_set(struct mxr_layer *layer,
+ struct mxr_buffer *buf)
+{
+ dma_addr_t addr = 0;
+
+ if (buf)
+ addr = vb2_dma_contig_plane_paddr(&buf->vb, 0);
+ mxr_reg_graph_buffer(layer->mdev, layer->idx, addr);
+}
+
+static void mxr_graph_stream_set(struct mxr_layer *layer, int en)
+{
+ mxr_reg_graph_layer_stream(layer->mdev, layer->idx, en);
+}
+
+static void mxr_graph_format_set(struct mxr_layer *layer)
+{
+ mxr_reg_graph_format(layer->mdev, layer->idx,
+ layer->fmt, &layer->geo);
+}
+
+static void mxr_graph_fix_geometry(struct mxr_layer *layer)
+{
+ struct mxr_geometry *geo = &layer->geo;
+
+ /* limit to boundary size */
+ geo->src.full_width = clamp_val(geo->src.full_width, 1, 32767);
+ geo->src.full_height = clamp_val(geo->src.full_height, 1, 2047);
+ geo->src.width = clamp_val(geo->src.width, 1, geo->src.full_width);
+ geo->src.width = min(geo->src.width, 2047U);
+ /* not possible to crop of Y axis */
+ geo->src.y_offset = min(geo->src.y_offset, geo->src.full_height - 1);
+ geo->src.height = geo->src.full_height - geo->src.y_offset;
+ /* limitting offset */
+ geo->src.x_offset = min(geo->src.x_offset,
+ geo->src.full_width - geo->src.width);
+
+ /* setting position in output */
+ geo->dst.width = min(geo->dst.width, geo->dst.full_width);
+ geo->dst.height = min(geo->dst.height, geo->dst.full_height);
+
+ /* Mixer supports only 1x and 2x scaling */
+ if (geo->dst.width >= 2 * geo->src.width) {
+ geo->x_ratio = 1;
+ geo->dst.width = 2 * geo->src.width;
+ } else {
+ geo->x_ratio = 0;
+ geo->dst.width = geo->src.width;
+ }
+
+ if (geo->dst.height >= 2 * geo->src.height) {
+ geo->y_ratio = 1;
+ geo->dst.height = 2 * geo->src.height;
+ } else {
+ geo->y_ratio = 0;
+ geo->dst.height = geo->src.height;
+ }
+
+ geo->dst.x_offset = min(geo->dst.x_offset,
+ geo->dst.full_width - geo->dst.width);
+ geo->dst.y_offset = min(geo->dst.y_offset,
+ geo->dst.full_height - geo->dst.height);
+}
+
+/* PUBLIC API */
+
+struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
+{
+ struct mxr_layer *layer;
+ int ret;
+ struct mxr_layer_ops ops = {
+ .release = mxr_graph_layer_release,
+ .buffer_set = mxr_graph_buffer_set,
+ .stream_set = mxr_graph_stream_set,
+ .format_set = mxr_graph_format_set,
+ .fix_geometry = mxr_graph_fix_geometry,
+ };
+ char name[32];
+
+ sprintf(name, "graph%d", idx);
+
+ layer = mxr_base_layer_create(mdev, idx, name, &ops);
+ if (layer == NULL) {
+ mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
+ goto fail;
+ }
+
+ layer->fmt_array = mxr_graph_format;
+ layer->fmt_array_size = ARRAY_SIZE(mxr_graph_format);
+
+ ret = mxr_base_layer_register(layer);
+ if (ret)
+ goto fail_layer;
+
+ return layer;
+
+fail_layer:
+ mxr_base_layer_release(layer);
+
+fail:
+ return NULL;
+}
+
diff --git a/drivers/media/video/s5p-tv/mixer_reg.c b/drivers/media/video/s5p-tv/mixer_reg.c
new file mode 100644
index 00000000000..38dac672aa1
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_reg.c
@@ -0,0 +1,541 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+#include "regs-mixer.h"
+#include "regs-vp.h"
+
+#include <linux/delay.h>
+
+/* Register access subroutines */
+
+static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
+{
+ return readl(mdev->res.vp_regs + reg_id);
+}
+
+static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
+{
+ writel(val, mdev->res.vp_regs + reg_id);
+}
+
+static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
+ u32 val, u32 mask)
+{
+ u32 old = vp_read(mdev, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, mdev->res.vp_regs + reg_id);
+}
+
+static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
+{
+ return readl(mdev->res.mxr_regs + reg_id);
+}
+
+static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
+{
+ writel(val, mdev->res.mxr_regs + reg_id);
+}
+
+static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
+ u32 val, u32 mask)
+{
+ u32 old = mxr_read(mdev, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, mdev->res.mxr_regs + reg_id);
+}
+
+void mxr_vsync_set_update(struct mxr_device *mdev, int en)
+{
+ /* block update on vsync */
+ mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
+ MXR_STATUS_SYNC_ENABLE);
+ vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
+}
+
+static void __mxr_reg_vp_reset(struct mxr_device *mdev)
+{
+ int tries = 100;
+
+ vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
+ for (tries = 100; tries; --tries) {
+ /* waiting until VP_SRESET_PROCESSING is 0 */
+ if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
+ break;
+ mdelay(10);
+ }
+ WARN(tries == 0, "failed to reset Video Processor\n");
+}
+
+static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
+
+void mxr_reg_reset(struct mxr_device *mdev)
+{
+ unsigned long flags;
+ u32 val; /* value stored to register */
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ /* set output in RGB888 mode */
+ mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_YUV444);
+
+ /* 16 beat burst in DMA */
+ mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
+ MXR_STATUS_BURST_MASK);
+
+ /* setting default layer priority: layer1 > video > layer0
+ * because typical usage scenario would be
+ * layer0 - framebuffer
+ * video - video overlay
+ * layer1 - OSD
+ */
+ val = MXR_LAYER_CFG_GRP0_VAL(1);
+ val |= MXR_LAYER_CFG_VP_VAL(2);
+ val |= MXR_LAYER_CFG_GRP1_VAL(3);
+ mxr_write(mdev, MXR_LAYER_CFG, val);
+
+ /* use dark gray background color */
+ mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
+ mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
+ mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
+
+ /* setting graphical layers */
+
+ val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
+ val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+ /* the same configuration for both layers */
+ mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
+ mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
+
+ /* configuration of Video Processor Registers */
+ __mxr_reg_vp_reset(mdev);
+ mxr_reg_vp_default_filter(mdev);
+
+ /* enable all interrupts */
+ mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
+ const struct mxr_format *fmt, const struct mxr_geometry *geo)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ /* setup format */
+ mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
+ MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
+
+ /* setup geometry */
+ mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
+ val = MXR_GRP_WH_WIDTH(geo->src.width);
+ val |= MXR_GRP_WH_HEIGHT(geo->src.height);
+ val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
+ val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
+ mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
+
+ /* setup offsets in source image */
+ val = MXR_GRP_SXY_SX(geo->src.x_offset);
+ val |= MXR_GRP_SXY_SY(geo->src.y_offset);
+ mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
+
+ /* setup offsets in display image */
+ val = MXR_GRP_DXY_DX(geo->dst.x_offset);
+ val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
+ mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_vp_format(struct mxr_device *mdev,
+ const struct mxr_format *fmt, const struct mxr_geometry *geo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
+
+ /* setting size of input image */
+ vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
+ VP_IMG_VSIZE(geo->src.full_height));
+ /* chroma height has to reduced by 2 to avoid chroma distorions */
+ vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
+ VP_IMG_VSIZE(geo->src.full_height / 2));
+
+ vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
+ vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
+ vp_write(mdev, VP_SRC_H_POSITION,
+ VP_SRC_H_POSITION_VAL(geo->src.x_offset));
+ vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
+
+ vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
+ vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
+ if (geo->dst.field == V4L2_FIELD_INTERLACED) {
+ vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
+ vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
+ } else {
+ vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
+ vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
+ }
+
+ vp_write(mdev, VP_H_RATIO, geo->x_ratio);
+ vp_write(mdev, VP_V_RATIO, geo->y_ratio);
+
+ vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+
+}
+
+void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
+{
+ u32 val = addr ? ~0 : 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ if (idx == 0)
+ mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+ else
+ mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+ mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_vp_buffer(struct mxr_device *mdev,
+ dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
+{
+ u32 val = luma_addr[0] ? ~0 : 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
+ vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
+ /* TODO: fix tiled mode */
+ vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
+ vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
+ vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
+ vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+static void mxr_irq_layer_handle(struct mxr_layer *layer)
+{
+ struct list_head *head = &layer->enq_list;
+ struct mxr_buffer *done;
+
+ /* skip non-existing layer */
+ if (layer == NULL)
+ return;
+
+ spin_lock(&layer->enq_slock);
+ if (layer->state == MXR_LAYER_IDLE)
+ goto done;
+
+ done = layer->shadow_buf;
+ layer->shadow_buf = layer->update_buf;
+
+ if (list_empty(head)) {
+ if (layer->state != MXR_LAYER_STREAMING)
+ layer->update_buf = NULL;
+ } else {
+ struct mxr_buffer *next;
+ next = list_first_entry(head, struct mxr_buffer, list);
+ list_del(&next->list);
+ layer->update_buf = next;
+ }
+
+ layer->ops.buffer_set(layer, layer->update_buf);
+
+ if (done && done != layer->shadow_buf)
+ vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
+
+done:
+ spin_unlock(&layer->enq_slock);
+}
+
+irqreturn_t mxr_irq_handler(int irq, void *dev_data)
+{
+ struct mxr_device *mdev = dev_data;
+ u32 i, val;
+
+ spin_lock(&mdev->reg_slock);
+ val = mxr_read(mdev, MXR_INT_STATUS);
+
+ /* wake up process waiting for VSYNC */
+ if (val & MXR_INT_STATUS_VSYNC) {
+ set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
+ wake_up(&mdev->event_queue);
+ }
+
+ /* clear interrupts */
+ if (~val & MXR_INT_EN_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val &= ~MXR_INT_EN_VSYNC;
+ val |= MXR_INT_CLEAR_VSYNC;
+ }
+ mxr_write(mdev, MXR_INT_STATUS, val);
+
+ spin_unlock(&mdev->reg_slock);
+ /* leave on non-vsync event */
+ if (~val & MXR_INT_CLEAR_VSYNC)
+ return IRQ_HANDLED;
+ for (i = 0; i < MXR_MAX_LAYERS; ++i)
+ mxr_irq_layer_handle(mdev->layer[i]);
+ return IRQ_HANDLED;
+}
+
+void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
+{
+ u32 val;
+
+ val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
+ mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
+}
+
+void mxr_reg_streamon(struct mxr_device *mdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ /* single write -> no need to block vsync update */
+
+ /* start MIXER */
+ mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_streamoff(struct mxr_device *mdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ /* single write -> no need to block vsync update */
+
+ /* stop MIXER */
+ mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
+
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+int mxr_reg_wait4vsync(struct mxr_device *mdev)
+{
+ int ret;
+
+ clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
+ /* TODO: consider adding interruptible */
+ ret = wait_event_timeout(mdev->event_queue,
+ test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
+ msecs_to_jiffies(1000));
+ if (ret > 0)
+ return 0;
+ if (ret < 0)
+ return ret;
+ mxr_warn(mdev, "no vsync detected - timeout\n");
+ return -ETIME;
+}
+
+void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ u32 val = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->reg_slock, flags);
+ mxr_vsync_set_update(mdev, MXR_DISABLE);
+
+ /* choosing between interlace and progressive mode */
+ if (fmt->field == V4L2_FIELD_INTERLACED)
+ val |= MXR_CFG_SCAN_INTERLACE;
+ else
+ val |= MXR_CFG_SCAN_PROGRASSIVE;
+
+ /* choosing between porper HD and SD mode */
+ if (fmt->height == 480)
+ val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
+ else if (fmt->height == 576)
+ val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
+ else if (fmt->height == 720)
+ val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+ else if (fmt->height == 1080)
+ val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
+ else
+ WARN(1, "unrecognized mbus height %u!\n", fmt->height);
+
+ mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+
+ val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
+ vp_write_mask(mdev, VP_MODE, val,
+ VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
+
+ mxr_vsync_set_update(mdev, MXR_ENABLE);
+ spin_unlock_irqrestore(&mdev->reg_slock, flags);
+}
+
+void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
+{
+ /* no extra actions need to be done */
+}
+
+void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
+{
+ /* no extra actions need to be done */
+}
+
+static const u8 filter_y_horiz_tap8[] = {
+ 0, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 0, 0, 0,
+ 0, 2, 4, 5, 6, 6, 6, 6,
+ 6, 5, 5, 4, 3, 2, 1, 1,
+ 0, -6, -12, -16, -18, -20, -21, -20,
+ -20, -18, -16, -13, -10, -8, -5, -2,
+ 127, 126, 125, 121, 114, 107, 99, 89,
+ 79, 68, 57, 46, 35, 25, 16, 8,
+};
+
+static const u8 filter_y_vert_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+ 0, 5, 11, 19, 27, 37, 48, 59,
+ 70, 81, 92, 102, 111, 118, 124, 126,
+ 0, 0, -1, -1, -2, -3, -4, -5,
+ -6, -7, -8, -8, -8, -8, -6, -3,
+};
+
+static const u8 filter_cr_horiz_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+};
+
+static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
+ int reg_id, const u8 *data, unsigned int size)
+{
+ /* assure 4-byte align */
+ BUG_ON(size & 3);
+ for (; size; size -= 4, reg_id += 4, data += 4) {
+ u32 val = (data[0] << 24) | (data[1] << 16) |
+ (data[2] << 8) | data[3];
+ vp_write(mdev, reg_id, val);
+ }
+}
+
+static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
+{
+ mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
+ filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+ mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
+ filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+ mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
+ filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+}
+
+static void mxr_reg_mxr_dump(struct mxr_device *mdev)
+{
+#define DUMPREG(reg_id) \
+do { \
+ mxr_dbg(mdev, #reg_id " = %08x\n", \
+ (u32)readl(mdev->res.mxr_regs + reg_id)); \
+} while (0)
+
+ DUMPREG(MXR_STATUS);
+ DUMPREG(MXR_CFG);
+ DUMPREG(MXR_INT_EN);
+ DUMPREG(MXR_INT_STATUS);
+
+ DUMPREG(MXR_LAYER_CFG);
+ DUMPREG(MXR_VIDEO_CFG);
+
+ DUMPREG(MXR_GRAPHIC0_CFG);
+ DUMPREG(MXR_GRAPHIC0_BASE);
+ DUMPREG(MXR_GRAPHIC0_SPAN);
+ DUMPREG(MXR_GRAPHIC0_WH);
+ DUMPREG(MXR_GRAPHIC0_SXY);
+ DUMPREG(MXR_GRAPHIC0_DXY);
+
+ DUMPREG(MXR_GRAPHIC1_CFG);
+ DUMPREG(MXR_GRAPHIC1_BASE);
+ DUMPREG(MXR_GRAPHIC1_SPAN);
+ DUMPREG(MXR_GRAPHIC1_WH);
+ DUMPREG(MXR_GRAPHIC1_SXY);
+ DUMPREG(MXR_GRAPHIC1_DXY);
+#undef DUMPREG
+}
+
+static void mxr_reg_vp_dump(struct mxr_device *mdev)
+{
+#define DUMPREG(reg_id) \
+do { \
+ mxr_dbg(mdev, #reg_id " = %08x\n", \
+ (u32) readl(mdev->res.vp_regs + reg_id)); \
+} while (0)
+
+
+ DUMPREG(VP_ENABLE);
+ DUMPREG(VP_SRESET);
+ DUMPREG(VP_SHADOW_UPDATE);
+ DUMPREG(VP_FIELD_ID);
+ DUMPREG(VP_MODE);
+ DUMPREG(VP_IMG_SIZE_Y);
+ DUMPREG(VP_IMG_SIZE_C);
+ DUMPREG(VP_PER_RATE_CTRL);
+ DUMPREG(VP_TOP_Y_PTR);
+ DUMPREG(VP_BOT_Y_PTR);
+ DUMPREG(VP_TOP_C_PTR);
+ DUMPREG(VP_BOT_C_PTR);
+ DUMPREG(VP_ENDIAN_MODE);
+ DUMPREG(VP_SRC_H_POSITION);
+ DUMPREG(VP_SRC_V_POSITION);
+ DUMPREG(VP_SRC_WIDTH);
+ DUMPREG(VP_SRC_HEIGHT);
+ DUMPREG(VP_DST_H_POSITION);
+ DUMPREG(VP_DST_V_POSITION);
+ DUMPREG(VP_DST_WIDTH);
+ DUMPREG(VP_DST_HEIGHT);
+ DUMPREG(VP_H_RATIO);
+ DUMPREG(VP_V_RATIO);
+
+#undef DUMPREG
+}
+
+void mxr_reg_dump(struct mxr_device *mdev)
+{
+ mxr_reg_mxr_dump(mdev);
+ mxr_reg_vp_dump(mdev);
+}
+
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
new file mode 100644
index 00000000000..43ac22f35bc
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -0,0 +1,1006 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include <media/v4l2-ioctl.h>
+#include <linux/videodev2.h>
+#include <linux/mm.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <media/videobuf2-dma-contig.h>
+
+static int find_reg_callback(struct device *dev, void *p)
+{
+ struct v4l2_subdev **sd = p;
+
+ *sd = dev_get_drvdata(dev);
+ /* non-zero value stops iteration */
+ return 1;
+}
+
+static struct v4l2_subdev *find_and_register_subdev(
+ struct mxr_device *mdev, char *module_name)
+{
+ struct device_driver *drv;
+ struct v4l2_subdev *sd = NULL;
+ int ret;
+
+ /* TODO: add waiting until probe is finished */
+ drv = driver_find(module_name, &platform_bus_type);
+ if (!drv) {
+ mxr_warn(mdev, "module %s is missing\n", module_name);
+ return NULL;
+ }
+ /* driver refcnt is increased, it is safe to iterate over devices */
+ ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
+ /* ret == 0 means that find_reg_callback was never executed */
+ if (sd == NULL) {
+ mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
+ goto done;
+ }
+ /* v4l2_device_register_subdev detects if sd is NULL */
+ ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
+ if (ret) {
+ mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
+ sd = NULL;
+ }
+
+done:
+ put_driver(drv);
+ return sd;
+}
+
+int __devinit mxr_acquire_video(struct mxr_device *mdev,
+ struct mxr_output_conf *output_conf, int output_count)
+{
+ struct device *dev = mdev->dev;
+ struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
+ int i;
+ int ret = 0;
+ struct v4l2_subdev *sd;
+
+ strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
+ /* prepare context for V4L2 device */
+ ret = v4l2_device_register(dev, v4l2_dev);
+ if (ret) {
+ mxr_err(mdev, "could not register v4l2 device.\n");
+ goto fail;
+ }
+
+ mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
+ if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
+ mxr_err(mdev, "could not acquire vb2 allocator\n");
+ goto fail_v4l2_dev;
+ }
+
+ /* registering outputs */
+ mdev->output_cnt = 0;
+ for (i = 0; i < output_count; ++i) {
+ struct mxr_output_conf *conf = &output_conf[i];
+ struct mxr_output *out;
+
+ sd = find_and_register_subdev(mdev, conf->module_name);
+ /* trying to register next output */
+ if (sd == NULL)
+ continue;
+ out = kzalloc(sizeof *out, GFP_KERNEL);
+ if (out == NULL) {
+ mxr_err(mdev, "no memory for '%s'\n",
+ conf->output_name);
+ ret = -ENOMEM;
+ /* registered subdevs are removed in fail_v4l2_dev */
+ goto fail_output;
+ }
+ strlcpy(out->name, conf->output_name, sizeof(out->name));
+ out->sd = sd;
+ out->cookie = conf->cookie;
+ mdev->output[mdev->output_cnt++] = out;
+ mxr_info(mdev, "added output '%s' from module '%s'\n",
+ conf->output_name, conf->module_name);
+ /* checking if maximal number of outputs is reached */
+ if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
+ break;
+ }
+
+ if (mdev->output_cnt == 0) {
+ mxr_err(mdev, "failed to register any output\n");
+ ret = -ENODEV;
+ /* skipping fail_output because there is nothing to free */
+ goto fail_vb2_allocator;
+ }
+
+ return 0;
+
+fail_output:
+ /* kfree is NULL-safe */
+ for (i = 0; i < mdev->output_cnt; ++i)
+ kfree(mdev->output[i]);
+ memset(mdev->output, 0, sizeof mdev->output);
+
+fail_vb2_allocator:
+ /* freeing allocator context */
+ vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
+
+fail_v4l2_dev:
+ /* NOTE: automatically unregister all subdevs */
+ v4l2_device_unregister(v4l2_dev);
+
+fail:
+ return ret;
+}
+
+void __devexit mxr_release_video(struct mxr_device *mdev)
+{
+ int i;
+
+ /* kfree is NULL-safe */
+ for (i = 0; i < mdev->output_cnt; ++i)
+ kfree(mdev->output[i]);
+
+ vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
+ v4l2_device_unregister(&mdev->v4l2_dev);
+}
+
+static int mxr_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+ strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
+ strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
+ sprintf(cap->bus_info, "%d", layer->idx);
+ cap->version = KERNEL_VERSION(0, 1, 0);
+ cap->capabilities = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+
+ return 0;
+}
+
+/* Geometry handling */
+static void mxr_layer_geo_fix(struct mxr_layer *layer)
+{
+ struct mxr_device *mdev = layer->mdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+
+ /* TODO: add some dirty flag to avoid unnecessary adjustments */
+ mxr_get_mbus_fmt(mdev, &mbus_fmt);
+ layer->geo.dst.full_width = mbus_fmt.width;
+ layer->geo.dst.full_height = mbus_fmt.height;
+ layer->geo.dst.field = mbus_fmt.field;
+ layer->ops.fix_geometry(layer);
+}
+
+static void mxr_layer_default_geo(struct mxr_layer *layer)
+{
+ struct mxr_device *mdev = layer->mdev;
+ struct v4l2_mbus_framefmt mbus_fmt;
+
+ memset(&layer->geo, 0, sizeof layer->geo);
+
+ mxr_get_mbus_fmt(mdev, &mbus_fmt);
+
+ layer->geo.dst.full_width = mbus_fmt.width;
+ layer->geo.dst.full_height = mbus_fmt.height;
+ layer->geo.dst.width = layer->geo.dst.full_width;
+ layer->geo.dst.height = layer->geo.dst.full_height;
+ layer->geo.dst.field = mbus_fmt.field;
+
+ layer->geo.src.full_width = mbus_fmt.width;
+ layer->geo.src.full_height = mbus_fmt.height;
+ layer->geo.src.width = layer->geo.src.full_width;
+ layer->geo.src.height = layer->geo.src.full_height;
+
+ layer->ops.fix_geometry(layer);
+}
+
+static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
+{
+ mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
+ geo->src.full_width, geo->src.full_height);
+ mxr_dbg(mdev, "src.size = (%u, %u)\n",
+ geo->src.width, geo->src.height);
+ mxr_dbg(mdev, "src.offset = (%u, %u)\n",
+ geo->src.x_offset, geo->src.y_offset);
+ mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
+ geo->dst.full_width, geo->dst.full_height);
+ mxr_dbg(mdev, "dst.size = (%u, %u)\n",
+ geo->dst.width, geo->dst.height);
+ mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
+ geo->dst.x_offset, geo->dst.y_offset);
+ mxr_dbg(mdev, "ratio = (%u, %u)\n",
+ geo->x_ratio, geo->y_ratio);
+}
+
+
+static const struct mxr_format *find_format_by_fourcc(
+ struct mxr_layer *layer, unsigned long fourcc);
+static const struct mxr_format *find_format_by_index(
+ struct mxr_layer *layer, unsigned long index);
+
+static int mxr_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ const struct mxr_format *fmt;
+
+ mxr_dbg(mdev, "%s\n", __func__);
+ fmt = find_format_by_index(layer, f->index);
+ if (fmt == NULL)
+ return -EINVAL;
+
+ strlcpy(f->description, fmt->name, sizeof(f->description));
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int mxr_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ const struct mxr_format *fmt;
+ struct v4l2_pix_format_mplane *pix;
+ struct mxr_device *mdev = layer->mdev;
+ struct mxr_geometry *geo = &layer->geo;
+
+ mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
+
+ pix = &f->fmt.pix_mp;
+ fmt = find_format_by_fourcc(layer, pix->pixelformat);
+ if (fmt == NULL) {
+ mxr_warn(mdev, "not recognized fourcc: %08x\n",
+ pix->pixelformat);
+ return -EINVAL;
+ }
+ layer->fmt = fmt;
+ geo->src.full_width = pix->width;
+ geo->src.width = pix->width;
+ geo->src.full_height = pix->height;
+ geo->src.height = pix->height;
+ /* assure consistency of geometry */
+ mxr_layer_geo_fix(layer);
+ mxr_dbg(mdev, "width=%u height=%u span=%u\n",
+ geo->src.width, geo->src.height, geo->src.full_width);
+
+ return 0;
+}
+
+static unsigned int divup(unsigned int divident, unsigned int divisor)
+{
+ return (divident + divisor - 1) / divisor;
+}
+
+unsigned long mxr_get_plane_size(const struct mxr_block *blk,
+ unsigned int width, unsigned int height)
+{
+ unsigned int bl_width = divup(width, blk->width);
+ unsigned int bl_height = divup(height, blk->height);
+
+ return bl_width * bl_height * blk->size;
+}
+
+static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
+ const struct mxr_format *fmt, u32 width, u32 height)
+{
+ int i;
+
+ memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
+ for (i = 0; i < fmt->num_planes; ++i) {
+ struct v4l2_plane_pix_format *plane = planes
+ + fmt->plane2subframe[i];
+ const struct mxr_block *blk = &fmt->plane[i];
+ u32 bl_width = divup(width, blk->width);
+ u32 bl_height = divup(height, blk->height);
+ u32 sizeimage = bl_width * bl_height * blk->size;
+ u16 bytesperline = bl_width * blk->size / blk->height;
+
+ plane->sizeimage += sizeimage;
+ plane->bytesperline = max(plane->bytesperline, bytesperline);
+ }
+}
+
+static int mxr_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+ pix->width = layer->geo.src.full_width;
+ pix->height = layer->geo.src.full_height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = layer->fmt->fourcc;
+ pix->colorspace = layer->fmt->colorspace;
+ mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
+
+ return 0;
+}
+
+static inline struct mxr_crop *choose_crop_by_type(struct mxr_geometry *geo,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return &geo->dst;
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ return &geo->src;
+ default:
+ return NULL;
+ }
+}
+
+static int mxr_g_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_crop *crop;
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ crop = choose_crop_by_type(&layer->geo, a->type);
+ if (crop == NULL)
+ return -EINVAL;
+ mxr_layer_geo_fix(layer);
+ a->c.left = crop->x_offset;
+ a->c.top = crop->y_offset;
+ a->c.width = crop->width;
+ a->c.height = crop->height;
+ return 0;
+}
+
+static int mxr_s_crop(struct file *file, void *fh, struct v4l2_crop *a)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_crop *crop;
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ crop = choose_crop_by_type(&layer->geo, a->type);
+ if (crop == NULL)
+ return -EINVAL;
+ crop->x_offset = a->c.left;
+ crop->y_offset = a->c.top;
+ crop->width = a->c.width;
+ crop->height = a->c.height;
+ mxr_layer_geo_fix(layer);
+ return 0;
+}
+
+static int mxr_cropcap(struct file *file, void *fh, struct v4l2_cropcap *a)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_crop *crop;
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ crop = choose_crop_by_type(&layer->geo, a->type);
+ if (crop == NULL)
+ return -EINVAL;
+ mxr_layer_geo_fix(layer);
+ a->bounds.left = 0;
+ a->bounds.top = 0;
+ a->bounds.width = crop->full_width;
+ a->bounds.top = crop->full_height;
+ a->defrect = a->bounds;
+ /* setting pixel aspect to 1/1 */
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+ return 0;
+}
+
+static int mxr_enum_dv_presets(struct file *file, void *fh,
+ struct v4l2_dv_enum_preset *preset)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ /* lock protects from changing sd_out */
+ mutex_lock(&mdev->mutex);
+ ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
+ mutex_unlock(&mdev->mutex);
+
+ return ret ? -EINVAL : 0;
+}
+
+static int mxr_s_dv_preset(struct file *file, void *fh,
+ struct v4l2_dv_preset *preset)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ /* lock protects from changing sd_out */
+ mutex_lock(&mdev->mutex);
+
+ /* preset change cannot be done while there is an entity
+ * dependant on output configuration
+ */
+ if (mdev->n_output > 0) {
+ mutex_unlock(&mdev->mutex);
+ return -EBUSY;
+ }
+
+ ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
+
+ mutex_unlock(&mdev->mutex);
+
+ /* any failure should return EINVAL according to V4L2 doc */
+ return ret ? -EINVAL : 0;
+}
+
+static int mxr_g_dv_preset(struct file *file, void *fh,
+ struct v4l2_dv_preset *preset)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ /* lock protects from changing sd_out */
+ mutex_lock(&mdev->mutex);
+ ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
+ mutex_unlock(&mdev->mutex);
+
+ return ret ? -EINVAL : 0;
+}
+
+static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ /* lock protects from changing sd_out */
+ mutex_lock(&mdev->mutex);
+
+ /* standard change cannot be done while there is an entity
+ * dependant on output configuration
+ */
+ if (mdev->n_output > 0) {
+ mutex_unlock(&mdev->mutex);
+ return -EBUSY;
+ }
+
+ ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
+
+ mutex_unlock(&mdev->mutex);
+
+ return ret ? -EINVAL : 0;
+}
+
+static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ /* lock protects from changing sd_out */
+ mutex_lock(&mdev->mutex);
+ ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
+ mutex_unlock(&mdev->mutex);
+
+ return ret ? -EINVAL : 0;
+}
+
+static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ struct mxr_output *out;
+ struct v4l2_subdev *sd;
+
+ if (a->index >= mdev->output_cnt)
+ return -EINVAL;
+ out = mdev->output[a->index];
+ BUG_ON(out == NULL);
+ sd = out->sd;
+ strlcpy(a->name, out->name, sizeof(a->name));
+
+ /* try to obtain supported tv norms */
+ v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
+ a->capabilities = 0;
+ if (sd->ops->video && sd->ops->video->s_dv_preset)
+ a->capabilities |= V4L2_OUT_CAP_PRESETS;
+ if (sd->ops->video && sd->ops->video->s_std_output)
+ a->capabilities |= V4L2_OUT_CAP_STD;
+ a->type = V4L2_OUTPUT_TYPE_ANALOG;
+
+ return 0;
+}
+
+static int mxr_s_output(struct file *file, void *fh, unsigned int i)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret = 0;
+
+ if (i >= mdev->output_cnt || mdev->output[i] == NULL)
+ return -EINVAL;
+
+ mutex_lock(&mdev->mutex);
+ if (mdev->n_output > 0) {
+ ret = -EBUSY;
+ goto done;
+ }
+ mdev->current_output = i;
+ vfd->tvnorms = 0;
+ v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
+ &vfd->tvnorms);
+ mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
+
+done:
+ mutex_unlock(&mdev->mutex);
+ return ret;
+}
+
+static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+
+ mutex_lock(&mdev->mutex);
+ *p = mdev->current_output;
+ mutex_unlock(&mdev->mutex);
+
+ return 0;
+}
+
+static int mxr_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_reqbufs(&layer->vb_queue, p);
+}
+
+static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_querybuf(&layer->vb_queue, p);
+}
+
+static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
+ return vb2_qbuf(&layer->vb_queue, p);
+}
+
+static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
+}
+
+static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_streamon(&layer->vb_queue, i);
+}
+
+static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ return vb2_streamoff(&layer->vb_queue, i);
+}
+
+static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
+ .vidioc_querycap = mxr_querycap,
+ /* format handling */
+ .vidioc_enum_fmt_vid_out = mxr_enum_fmt,
+ .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
+ .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
+ /* buffer control */
+ .vidioc_reqbufs = mxr_reqbufs,
+ .vidioc_querybuf = mxr_querybuf,
+ .vidioc_qbuf = mxr_qbuf,
+ .vidioc_dqbuf = mxr_dqbuf,
+ /* Streaming control */
+ .vidioc_streamon = mxr_streamon,
+ .vidioc_streamoff = mxr_streamoff,
+ /* Preset functions */
+ .vidioc_enum_dv_presets = mxr_enum_dv_presets,
+ .vidioc_s_dv_preset = mxr_s_dv_preset,
+ .vidioc_g_dv_preset = mxr_g_dv_preset,
+ /* analog TV standard functions */
+ .vidioc_s_std = mxr_s_std,
+ .vidioc_g_std = mxr_g_std,
+ /* Output handling */
+ .vidioc_enum_output = mxr_enum_output,
+ .vidioc_s_output = mxr_s_output,
+ .vidioc_g_output = mxr_g_output,
+ /* Crop ioctls */
+ .vidioc_g_crop = mxr_g_crop,
+ .vidioc_s_crop = mxr_s_crop,
+ .vidioc_cropcap = mxr_cropcap,
+};
+
+static int mxr_video_open(struct file *file)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+ struct mxr_device *mdev = layer->mdev;
+ int ret = 0;
+
+ mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
+ /* assure device probe is finished */
+ wait_for_device_probe();
+ /* creating context for file descriptor */
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ mxr_err(mdev, "v4l2_fh_open failed\n");
+ return ret;
+ }
+
+ /* leaving if layer is already initialized */
+ if (!v4l2_fh_is_singular_file(file))
+ return 0;
+
+ /* FIXME: should power be enabled on open? */
+ ret = mxr_power_get(mdev);
+ if (ret) {
+ mxr_err(mdev, "power on failed\n");
+ goto fail_fh_open;
+ }
+
+ ret = vb2_queue_init(&layer->vb_queue);
+ if (ret != 0) {
+ mxr_err(mdev, "failed to initialize vb2 queue\n");
+ goto fail_power;
+ }
+ /* set default format, first on the list */
+ layer->fmt = layer->fmt_array[0];
+ /* setup default geometry */
+ mxr_layer_default_geo(layer);
+
+ return 0;
+
+fail_power:
+ mxr_power_put(mdev);
+
+fail_fh_open:
+ v4l2_fh_release(file);
+
+ return ret;
+}
+
+static unsigned int
+mxr_video_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+ return vb2_poll(&layer->vb_queue, file, wait);
+}
+
+static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+
+ return vb2_mmap(&layer->vb_queue, vma);
+}
+
+static int mxr_video_release(struct file *file)
+{
+ struct mxr_layer *layer = video_drvdata(file);
+
+ mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
+ if (v4l2_fh_is_singular_file(file)) {
+ vb2_queue_release(&layer->vb_queue);
+ mxr_power_put(layer->mdev);
+ }
+ v4l2_fh_release(file);
+ return 0;
+}
+
+static const struct v4l2_file_operations mxr_fops = {
+ .owner = THIS_MODULE,
+ .open = mxr_video_open,
+ .poll = mxr_video_poll,
+ .mmap = mxr_video_mmap,
+ .release = mxr_video_release,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned long sizes[],
+ void *alloc_ctxs[])
+{
+ struct mxr_layer *layer = vb2_get_drv_priv(vq);
+ const struct mxr_format *fmt = layer->fmt;
+ int i;
+ struct mxr_device *mdev = layer->mdev;
+ struct v4l2_plane_pix_format planes[3];
+
+ mxr_dbg(mdev, "%s\n", __func__);
+ /* checking if format was configured */
+ if (fmt == NULL)
+ return -EINVAL;
+ mxr_dbg(mdev, "fmt = %s\n", fmt->name);
+ mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
+ layer->geo.src.full_height);
+
+ *nplanes = fmt->num_subframes;
+ for (i = 0; i < fmt->num_subframes; ++i) {
+ alloc_ctxs[i] = layer->mdev->alloc_ctx;
+ sizes[i] = PAGE_ALIGN(planes[i].sizeimage);
+ mxr_dbg(mdev, "size[%d] = %08lx\n", i, sizes[i]);
+ }
+
+ if (*nbuffers == 0)
+ *nbuffers = 1;
+
+ return 0;
+}
+
+static void buf_queue(struct vb2_buffer *vb)
+{
+ struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
+ struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
+ struct mxr_device *mdev = layer->mdev;
+ unsigned long flags;
+ int must_start = 0;
+
+ spin_lock_irqsave(&layer->enq_slock, flags);
+ if (layer->state == MXR_LAYER_STREAMING_START) {
+ layer->state = MXR_LAYER_STREAMING;
+ must_start = 1;
+ }
+ list_add_tail(&buffer->list, &layer->enq_list);
+ spin_unlock_irqrestore(&layer->enq_slock, flags);
+ if (must_start) {
+ layer->ops.stream_set(layer, MXR_ENABLE);
+ mxr_streamer_get(mdev);
+ }
+
+ mxr_dbg(mdev, "queuing buffer\n");
+}
+
+static void wait_lock(struct vb2_queue *vq)
+{
+ struct mxr_layer *layer = vb2_get_drv_priv(vq);
+
+ mxr_dbg(layer->mdev, "%s\n", __func__);
+ mutex_lock(&layer->mutex);
+}
+
+static void wait_unlock(struct vb2_queue *vq)
+{
+ struct mxr_layer *layer = vb2_get_drv_priv(vq);
+
+ mxr_dbg(layer->mdev, "%s\n", __func__);
+ mutex_unlock(&layer->mutex);
+}
+
+static int start_streaming(struct vb2_queue *vq)
+{
+ struct mxr_layer *layer = vb2_get_drv_priv(vq);
+ struct mxr_device *mdev = layer->mdev;
+ unsigned long flags;
+
+ mxr_dbg(mdev, "%s\n", __func__);
+ /* block any changes in output configuration */
+ mxr_output_get(mdev);
+
+ /* update layers geometry */
+ mxr_layer_geo_fix(layer);
+ mxr_geometry_dump(mdev, &layer->geo);
+
+ layer->ops.format_set(layer);
+ /* enabling layer in hardware */
+ spin_lock_irqsave(&layer->enq_slock, flags);
+ layer->state = MXR_LAYER_STREAMING_START;
+ spin_unlock_irqrestore(&layer->enq_slock, flags);
+
+ return 0;
+}
+
+static void mxr_watchdog(unsigned long arg)
+{
+ struct mxr_layer *layer = (struct mxr_layer *) arg;
+ struct mxr_device *mdev = layer->mdev;
+ unsigned long flags;
+
+ mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
+
+ spin_lock_irqsave(&layer->enq_slock, flags);
+
+ if (layer->update_buf == layer->shadow_buf)
+ layer->update_buf = NULL;
+ if (layer->update_buf) {
+ vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
+ layer->update_buf = NULL;
+ }
+ if (layer->shadow_buf) {
+ vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
+ layer->shadow_buf = NULL;
+ }
+ spin_unlock_irqrestore(&layer->enq_slock, flags);
+}
+
+static int stop_streaming(struct vb2_queue *vq)
+{
+ struct mxr_layer *layer = vb2_get_drv_priv(vq);
+ struct mxr_device *mdev = layer->mdev;
+ unsigned long flags;
+ struct timer_list watchdog;
+ struct mxr_buffer *buf, *buf_tmp;
+
+ mxr_dbg(mdev, "%s\n", __func__);
+
+ spin_lock_irqsave(&layer->enq_slock, flags);
+
+ /* reset list */
+ layer->state = MXR_LAYER_STREAMING_FINISH;
+
+ /* set all buffer to be done */
+ list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irqrestore(&layer->enq_slock, flags);
+
+ /* give 1 seconds to complete to complete last buffers */
+ setup_timer_on_stack(&watchdog, mxr_watchdog,
+ (unsigned long)layer);
+ mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
+
+ /* wait until all buffers are goes to done state */
+ vb2_wait_for_all_buffers(vq);
+
+ /* stop timer if all synchronization is done */
+ del_timer_sync(&watchdog);
+ destroy_timer_on_stack(&watchdog);
+
+ /* stopping hardware */
+ spin_lock_irqsave(&layer->enq_slock, flags);
+ layer->state = MXR_LAYER_IDLE;
+ spin_unlock_irqrestore(&layer->enq_slock, flags);
+
+ /* disabling layer in hardware */
+ layer->ops.stream_set(layer, MXR_DISABLE);
+ /* remove one streamer */
+ mxr_streamer_put(mdev);
+ /* allow changes in output configuration */
+ mxr_output_put(mdev);
+ return 0;
+}
+
+static struct vb2_ops mxr_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buf_queue,
+ .wait_prepare = wait_unlock,
+ .wait_finish = wait_lock,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+};
+
+/* FIXME: try to put this functions to mxr_base_layer_create */
+int mxr_base_layer_register(struct mxr_layer *layer)
+{
+ struct mxr_device *mdev = layer->mdev;
+ int ret;
+
+ ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ mxr_err(mdev, "failed to register video device\n");
+ else
+ mxr_info(mdev, "registered layer %s as /dev/video%d\n",
+ layer->vfd.name, layer->vfd.num);
+ return ret;
+}
+
+void mxr_base_layer_unregister(struct mxr_layer *layer)
+{
+ video_unregister_device(&layer->vfd);
+}
+
+void mxr_layer_release(struct mxr_layer *layer)
+{
+ if (layer->ops.release)
+ layer->ops.release(layer);
+}
+
+void mxr_base_layer_release(struct mxr_layer *layer)
+{
+ kfree(layer);
+}
+
+static void mxr_vfd_release(struct video_device *vdev)
+{
+ printk(KERN_INFO "video device release\n");
+}
+
+struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
+ int idx, char *name, struct mxr_layer_ops *ops)
+{
+ struct mxr_layer *layer;
+
+ layer = kzalloc(sizeof *layer, GFP_KERNEL);
+ if (layer == NULL) {
+ mxr_err(mdev, "not enough memory for layer.\n");
+ goto fail;
+ }
+
+ layer->mdev = mdev;
+ layer->idx = idx;
+ layer->ops = *ops;
+
+ spin_lock_init(&layer->enq_slock);
+ INIT_LIST_HEAD(&layer->enq_list);
+ mutex_init(&layer->mutex);
+
+ layer->vfd = (struct video_device) {
+ .minor = -1,
+ .release = mxr_vfd_release,
+ .fops = &mxr_fops,
+ .ioctl_ops = &mxr_ioctl_ops,
+ };
+ strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
+ /* let framework control PRIORITY */
+ set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
+
+ video_set_drvdata(&layer->vfd, layer);
+ layer->vfd.lock = &layer->mutex;
+ layer->vfd.v4l2_dev = &mdev->v4l2_dev;
+
+ layer->vb_queue = (struct vb2_queue) {
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ .io_modes = VB2_MMAP | VB2_USERPTR,
+ .drv_priv = layer,
+ .buf_struct_size = sizeof(struct mxr_buffer),
+ .ops = &mxr_video_qops,
+ .mem_ops = &vb2_dma_contig_memops,
+ };
+
+ return layer;
+
+fail:
+ return NULL;
+}
+
+static const struct mxr_format *find_format_by_fourcc(
+ struct mxr_layer *layer, unsigned long fourcc)
+{
+ int i;
+
+ for (i = 0; i < layer->fmt_array_size; ++i)
+ if (layer->fmt_array[i]->fourcc == fourcc)
+ return layer->fmt_array[i];
+ return NULL;
+}
+
+static const struct mxr_format *find_format_by_index(
+ struct mxr_layer *layer, unsigned long index)
+{
+ if (index >= layer->fmt_array_size)
+ return NULL;
+ return layer->fmt_array[index];
+}
+
diff --git a/drivers/media/video/s5p-tv/mixer_vp_layer.c b/drivers/media/video/s5p-tv/mixer_vp_layer.c
new file mode 100644
index 00000000000..6950ed8ac1a
--- /dev/null
+++ b/drivers/media/video/s5p-tv/mixer_vp_layer.c
@@ -0,0 +1,211 @@
+/*
+ * Samsung TV Mixer driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include "mixer.h"
+
+#include "regs-vp.h"
+
+#include <media/videobuf2-dma-contig.h>
+
+/* FORMAT DEFINITIONS */
+static const struct mxr_format mxr_fmt_nv12 = {
+ .name = "NV12",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .num_planes = 2,
+ .plane = {
+ { .width = 1, .height = 1, .size = 1 },
+ { .width = 2, .height = 2, .size = 2 },
+ },
+ .num_subframes = 1,
+ .cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
+};
+
+static const struct mxr_format mxr_fmt_nv21 = {
+ .name = "NV21",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .num_planes = 2,
+ .plane = {
+ { .width = 1, .height = 1, .size = 1 },
+ { .width = 2, .height = 2, .size = 2 },
+ },
+ .num_subframes = 1,
+ .cookie = VP_MODE_NV21 | VP_MODE_MEM_LINEAR,
+};
+
+static const struct mxr_format mxr_fmt_nv12m = {
+ .name = "NV12 (mplane)",
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .num_planes = 2,
+ .plane = {
+ { .width = 1, .height = 1, .size = 1 },
+ { .width = 2, .height = 2, .size = 2 },
+ },
+ .num_subframes = 2,
+ .plane2subframe = {0, 1},
+ .cookie = VP_MODE_NV12 | VP_MODE_MEM_LINEAR,
+};
+
+static const struct mxr_format mxr_fmt_nv12mt = {
+ .name = "NV12 tiled (mplane)",
+ .fourcc = V4L2_PIX_FMT_NV12MT,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .num_planes = 2,
+ .plane = {
+ { .width = 128, .height = 32, .size = 4096 },
+ { .width = 128, .height = 32, .size = 2048 },
+ },
+ .num_subframes = 2,
+ .plane2subframe = {0, 1},
+ .cookie = VP_MODE_NV12 | VP_MODE_MEM_TILED,
+};
+
+static const struct mxr_format *mxr_video_format[] = {
+ &mxr_fmt_nv12,
+ &mxr_fmt_nv21,
+ &mxr_fmt_nv12m,
+ &mxr_fmt_nv12mt,
+};
+
+/* AUXILIARY CALLBACKS */
+
+static void mxr_vp_layer_release(struct mxr_layer *layer)
+{
+ mxr_base_layer_unregister(layer);
+ mxr_base_layer_release(layer);
+}
+
+static void mxr_vp_buffer_set(struct mxr_layer *layer,
+ struct mxr_buffer *buf)
+{
+ dma_addr_t luma_addr[2] = {0, 0};
+ dma_addr_t chroma_addr[2] = {0, 0};
+
+ if (buf == NULL) {
+ mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
+ return;
+ }
+ luma_addr[0] = vb2_dma_contig_plane_paddr(&buf->vb, 0);
+ if (layer->fmt->num_subframes == 2) {
+ chroma_addr[0] = vb2_dma_contig_plane_paddr(&buf->vb, 1);
+ } else {
+ /* FIXME: mxr_get_plane_size compute integer division,
+ * which is slow and should not be performed in interrupt */
+ chroma_addr[0] = luma_addr[0] + mxr_get_plane_size(
+ &layer->fmt->plane[0], layer->geo.src.full_width,
+ layer->geo.src.full_height);
+ }
+ if (layer->fmt->cookie & VP_MODE_MEM_TILED) {
+ luma_addr[1] = luma_addr[0] + 0x40;
+ chroma_addr[1] = chroma_addr[0] + 0x40;
+ } else {
+ luma_addr[1] = luma_addr[0] + layer->geo.src.full_width;
+ chroma_addr[1] = chroma_addr[0];
+ }
+ mxr_reg_vp_buffer(layer->mdev, luma_addr, chroma_addr);
+}
+
+static void mxr_vp_stream_set(struct mxr_layer *layer, int en)
+{
+ mxr_reg_vp_layer_stream(layer->mdev, en);
+}
+
+static void mxr_vp_format_set(struct mxr_layer *layer)
+{
+ mxr_reg_vp_format(layer->mdev, layer->fmt, &layer->geo);
+}
+
+static void mxr_vp_fix_geometry(struct mxr_layer *layer)
+{
+ struct mxr_geometry *geo = &layer->geo;
+
+ /* align horizontal size to 8 pixels */
+ geo->src.full_width = ALIGN(geo->src.full_width, 8);
+ /* limit to boundary size */
+ geo->src.full_width = clamp_val(geo->src.full_width, 8, 8192);
+ geo->src.full_height = clamp_val(geo->src.full_height, 1, 8192);
+ geo->src.width = clamp_val(geo->src.width, 32, geo->src.full_width);
+ geo->src.width = min(geo->src.width, 2047U);
+ geo->src.height = clamp_val(geo->src.height, 4, geo->src.full_height);
+ geo->src.height = min(geo->src.height, 2047U);
+
+ /* setting size of output window */
+ geo->dst.width = clamp_val(geo->dst.width, 8, geo->dst.full_width);
+ geo->dst.height = clamp_val(geo->dst.height, 1, geo->dst.full_height);
+
+ /* ensure that scaling is in range 1/4x to 16x */
+ if (geo->src.width >= 4 * geo->dst.width)
+ geo->src.width = 4 * geo->dst.width;
+ if (geo->dst.width >= 16 * geo->src.width)
+ geo->dst.width = 16 * geo->src.width;
+ if (geo->src.height >= 4 * geo->dst.height)
+ geo->src.height = 4 * geo->dst.height;
+ if (geo->dst.height >= 16 * geo->src.height)
+ geo->dst.height = 16 * geo->src.height;
+
+ /* setting scaling ratio */
+ geo->x_ratio = (geo->src.width << 16) / geo->dst.width;
+ geo->y_ratio = (geo->src.height << 16) / geo->dst.height;
+
+ /* adjust offsets */
+ geo->src.x_offset = min(geo->src.x_offset,
+ geo->src.full_width - geo->src.width);
+ geo->src.y_offset = min(geo->src.y_offset,
+ geo->src.full_height - geo->src.height);
+ geo->dst.x_offset = min(geo->dst.x_offset,
+ geo->dst.full_width - geo->dst.width);
+ geo->dst.y_offset = min(geo->dst.y_offset,
+ geo->dst.full_height - geo->dst.height);
+}
+
+/* PUBLIC API */
+
+struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
+{
+ struct mxr_layer *layer;
+ int ret;
+ struct mxr_layer_ops ops = {
+ .release = mxr_vp_layer_release,
+ .buffer_set = mxr_vp_buffer_set,
+ .stream_set = mxr_vp_stream_set,
+ .format_set = mxr_vp_format_set,
+ .fix_geometry = mxr_vp_fix_geometry,
+ };
+ char name[32];
+
+ sprintf(name, "video%d", idx);
+
+ layer = mxr_base_layer_create(mdev, idx, name, &ops);
+ if (layer == NULL) {
+ mxr_err(mdev, "failed to initialize layer(%d) base\n", idx);
+ goto fail;
+ }
+
+ layer->fmt_array = mxr_video_format;
+ layer->fmt_array_size = ARRAY_SIZE(mxr_video_format);
+
+ ret = mxr_base_layer_register(layer);
+ if (ret)
+ goto fail_layer;
+
+ return layer;
+
+fail_layer:
+ mxr_base_layer_release(layer);
+
+fail:
+ return NULL;
+}
+
diff --git a/drivers/media/video/s5p-tv/regs-hdmi.h b/drivers/media/video/s5p-tv/regs-hdmi.h
new file mode 100644
index 00000000000..ac93ad6f2bc
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-hdmi.h
@@ -0,0 +1,141 @@
+/* linux/arch/arm/mach-exynos4/include/mach/regs-hdmi.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * HDMI register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SAMSUNG_REGS_HDMI_H
+#define SAMSUNG_REGS_HDMI_H
+
+/*
+ * Register part
+*/
+
+#define HDMI_CTRL_BASE(x) ((x) + 0x00000000)
+#define HDMI_CORE_BASE(x) ((x) + 0x00010000)
+#define HDMI_TG_BASE(x) ((x) + 0x00050000)
+
+/* Control registers */
+#define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000)
+#define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004)
+#define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C)
+#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0014)
+#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0018)
+#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x001C)
+#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0020)
+
+/* Core registers */
+#define HDMI_CON_0 HDMI_CORE_BASE(0x0000)
+#define HDMI_CON_1 HDMI_CORE_BASE(0x0004)
+#define HDMI_CON_2 HDMI_CORE_BASE(0x0008)
+#define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010)
+#define HDMI_PHY_STATUS HDMI_CORE_BASE(0x0014)
+#define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020)
+#define HDMI_HPD HDMI_CORE_BASE(0x0030)
+#define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040)
+#define HDMI_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050)
+#define HDMI_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054)
+#define HDMI_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058)
+#define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0)
+#define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4)
+#define HDMI_V_BLANK_0 HDMI_CORE_BASE(0x00B0)
+#define HDMI_V_BLANK_1 HDMI_CORE_BASE(0x00B4)
+#define HDMI_V_BLANK_2 HDMI_CORE_BASE(0x00B8)
+#define HDMI_H_V_LINE_0 HDMI_CORE_BASE(0x00C0)
+#define HDMI_H_V_LINE_1 HDMI_CORE_BASE(0x00C4)
+#define HDMI_H_V_LINE_2 HDMI_CORE_BASE(0x00C8)
+#define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4)
+#define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8)
+#define HDMI_V_BLANK_F_0 HDMI_CORE_BASE(0x0110)
+#define HDMI_V_BLANK_F_1 HDMI_CORE_BASE(0x0114)
+#define HDMI_V_BLANK_F_2 HDMI_CORE_BASE(0x0118)
+#define HDMI_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120)
+#define HDMI_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124)
+#define HDMI_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128)
+#define HDMI_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130)
+#define HDMI_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134)
+#define HDMI_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138)
+#define HDMI_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140)
+#define HDMI_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144)
+#define HDMI_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148)
+#define HDMI_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
+#define HDMI_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
+#define HDMI_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
+#define HDMI_AVI_CON HDMI_CORE_BASE(0x0300)
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
+#define HDMI_DC_CONTROL HDMI_CORE_BASE(0x05C0)
+#define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4)
+#define HDMI_HPD_GEN HDMI_CORE_BASE(0x05C8)
+
+/* Timing generator registers */
+#define HDMI_TG_CMD HDMI_TG_BASE(0x0000)
+#define HDMI_TG_H_FSZ_L HDMI_TG_BASE(0x0018)
+#define HDMI_TG_H_FSZ_H HDMI_TG_BASE(0x001C)
+#define HDMI_TG_HACT_ST_L HDMI_TG_BASE(0x0020)
+#define HDMI_TG_HACT_ST_H HDMI_TG_BASE(0x0024)
+#define HDMI_TG_HACT_SZ_L HDMI_TG_BASE(0x0028)
+#define HDMI_TG_HACT_SZ_H HDMI_TG_BASE(0x002C)
+#define HDMI_TG_V_FSZ_L HDMI_TG_BASE(0x0030)
+#define HDMI_TG_V_FSZ_H HDMI_TG_BASE(0x0034)
+#define HDMI_TG_VSYNC_L HDMI_TG_BASE(0x0038)
+#define HDMI_TG_VSYNC_H HDMI_TG_BASE(0x003C)
+#define HDMI_TG_VSYNC2_L HDMI_TG_BASE(0x0040)
+#define HDMI_TG_VSYNC2_H HDMI_TG_BASE(0x0044)
+#define HDMI_TG_VACT_ST_L HDMI_TG_BASE(0x0048)
+#define HDMI_TG_VACT_ST_H HDMI_TG_BASE(0x004C)
+#define HDMI_TG_VACT_SZ_L HDMI_TG_BASE(0x0050)
+#define HDMI_TG_VACT_SZ_H HDMI_TG_BASE(0x0054)
+#define HDMI_TG_FIELD_CHG_L HDMI_TG_BASE(0x0058)
+#define HDMI_TG_FIELD_CHG_H HDMI_TG_BASE(0x005C)
+#define HDMI_TG_VACT_ST2_L HDMI_TG_BASE(0x0060)
+#define HDMI_TG_VACT_ST2_H HDMI_TG_BASE(0x0064)
+#define HDMI_TG_VSYNC_TOP_HDMI_L HDMI_TG_BASE(0x0078)
+#define HDMI_TG_VSYNC_TOP_HDMI_H HDMI_TG_BASE(0x007C)
+#define HDMI_TG_VSYNC_BOT_HDMI_L HDMI_TG_BASE(0x0080)
+#define HDMI_TG_VSYNC_BOT_HDMI_H HDMI_TG_BASE(0x0084)
+#define HDMI_TG_FIELD_TOP_HDMI_L HDMI_TG_BASE(0x0088)
+#define HDMI_TG_FIELD_TOP_HDMI_H HDMI_TG_BASE(0x008C)
+#define HDMI_TG_FIELD_BOT_HDMI_L HDMI_TG_BASE(0x0090)
+#define HDMI_TG_FIELD_BOT_HDMI_H HDMI_TG_BASE(0x0094)
+
+/*
+ * Bit definition part
+ */
+
+/* HDMI_INTC_CON */
+#define HDMI_INTC_EN_GLOBAL (1 << 6)
+#define HDMI_INTC_EN_HPD_PLUG (1 << 3)
+#define HDMI_INTC_EN_HPD_UNPLUG (1 << 2)
+
+/* HDMI_INTC_FLAG */
+#define HDMI_INTC_FLAG_HPD_PLUG (1 << 3)
+#define HDMI_INTC_FLAG_HPD_UNPLUG (1 << 2)
+
+/* HDMI_PHY_RSTOUT */
+#define HDMI_PHY_SW_RSTOUT (1 << 0)
+
+/* HDMI_CORE_RSTOUT */
+#define HDMI_CORE_SW_RSTOUT (1 << 0)
+
+/* HDMI_CON_0 */
+#define HDMI_BLUE_SCR_EN (1 << 5)
+#define HDMI_EN (1 << 0)
+
+/* HDMI_PHY_STATUS */
+#define HDMI_PHY_STATUS_READY (1 << 0)
+
+/* HDMI_MODE_SEL */
+#define HDMI_MODE_HDMI_EN (1 << 1)
+#define HDMI_MODE_DVI_EN (1 << 0)
+#define HDMI_MODE_MASK (3 << 0)
+
+/* HDMI_TG_CMD */
+#define HDMI_TG_EN (1 << 0)
+
+#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/media/video/s5p-tv/regs-mixer.h b/drivers/media/video/s5p-tv/regs-mixer.h
new file mode 100644
index 00000000000..3c8442609c1
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-mixer.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Mixer register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#ifndef SAMSUNG_REGS_MIXER_H
+#define SAMSUNG_REGS_MIXER_H
+
+/*
+ * Register part
+ */
+#define MXR_STATUS 0x0000
+#define MXR_CFG 0x0004
+#define MXR_INT_EN 0x0008
+#define MXR_INT_STATUS 0x000C
+#define MXR_LAYER_CFG 0x0010
+#define MXR_VIDEO_CFG 0x0014
+#define MXR_GRAPHIC0_CFG 0x0020
+#define MXR_GRAPHIC0_BASE 0x0024
+#define MXR_GRAPHIC0_SPAN 0x0028
+#define MXR_GRAPHIC0_SXY 0x002C
+#define MXR_GRAPHIC0_WH 0x0030
+#define MXR_GRAPHIC0_DXY 0x0034
+#define MXR_GRAPHIC0_BLANK 0x0038
+#define MXR_GRAPHIC1_CFG 0x0040
+#define MXR_GRAPHIC1_BASE 0x0044
+#define MXR_GRAPHIC1_SPAN 0x0048
+#define MXR_GRAPHIC1_SXY 0x004C
+#define MXR_GRAPHIC1_WH 0x0050
+#define MXR_GRAPHIC1_DXY 0x0054
+#define MXR_GRAPHIC1_BLANK 0x0058
+#define MXR_BG_CFG 0x0060
+#define MXR_BG_COLOR0 0x0064
+#define MXR_BG_COLOR1 0x0068
+#define MXR_BG_COLOR2 0x006C
+
+/* for parametrized access to layer registers */
+#define MXR_GRAPHIC_CFG(i) (0x0020 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE(i) (0x0024 + (i) * 0x20)
+#define MXR_GRAPHIC_SPAN(i) (0x0028 + (i) * 0x20)
+#define MXR_GRAPHIC_SXY(i) (0x002C + (i) * 0x20)
+#define MXR_GRAPHIC_WH(i) (0x0030 + (i) * 0x20)
+#define MXR_GRAPHIC_DXY(i) (0x0034 + (i) * 0x20)
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+#define MXR_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define MXR_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
+
+/* bits for MXR_STATUS */
+#define MXR_STATUS_16_BURST (1 << 7)
+#define MXR_STATUS_BURST_MASK (1 << 7)
+#define MXR_STATUS_SYNC_ENABLE (1 << 2)
+#define MXR_STATUS_REG_RUN (1 << 0)
+
+/* bits for MXR_CFG */
+#define MXR_CFG_OUT_YUV444 (0 << 8)
+#define MXR_CFG_OUT_RGB888 (1 << 8)
+#define MXR_CFG_DST_SDO (0 << 7)
+#define MXR_CFG_DST_HDMI (1 << 7)
+#define MXR_CFG_DST_MASK (1 << 7)
+#define MXR_CFG_SCAN_HD_720 (0 << 6)
+#define MXR_CFG_SCAN_HD_1080 (1 << 6)
+#define MXR_CFG_GRP1_ENABLE (1 << 5)
+#define MXR_CFG_GRP0_ENABLE (1 << 4)
+#define MXR_CFG_VP_ENABLE (1 << 3)
+#define MXR_CFG_SCAN_INTERLACE (0 << 2)
+#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
+#define MXR_CFG_SCAN_NTSC (0 << 1)
+#define MXR_CFG_SCAN_PAL (1 << 1)
+#define MXR_CFG_SCAN_SD (0 << 0)
+#define MXR_CFG_SCAN_HD (1 << 0)
+#define MXR_CFG_SCAN_MASK 0x47
+
+/* bits for MXR_GRAPHICn_CFG */
+#define MXR_GRP_CFG_COLOR_KEY_DISABLE (1 << 21)
+#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20)
+#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0)
+#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0)
+
+/* bits for MXR_GRAPHICn_WH */
+#define MXR_GRP_WH_H_SCALE(x) MXR_MASK_VAL(x, 28, 28)
+#define MXR_GRP_WH_V_SCALE(x) MXR_MASK_VAL(x, 12, 12)
+#define MXR_GRP_WH_WIDTH(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_WH_HEIGHT(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_SXY */
+#define MXR_GRP_SXY_SX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_SXY_SY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_DXY */
+#define MXR_GRP_DXY_DX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_DXY_DY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_INT_EN */
+#define MXR_INT_EN_VSYNC (1 << 11)
+#define MXR_INT_EN_ALL (0x0f << 8)
+
+/* bit for MXR_INT_STATUS */
+#define MXR_INT_CLEAR_VSYNC (1 << 11)
+#define MXR_INT_STATUS_VSYNC (1 << 0)
+
+/* bit for MXR_LAYER_CFG */
+#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4)
+#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0)
+
+#endif /* SAMSUNG_REGS_MIXER_H */
+
diff --git a/drivers/media/video/s5p-tv/regs-sdo.h b/drivers/media/video/s5p-tv/regs-sdo.h
new file mode 100644
index 00000000000..7f7c2b8ac14
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-sdo.h
@@ -0,0 +1,63 @@
+/* drivers/media/video/s5p-tv/regs-sdo.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * SDO register description file
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_SDO_H
+#define SAMSUNG_REGS_SDO_H
+
+/*
+ * Register part
+ */
+
+#define SDO_CLKCON 0x0000
+#define SDO_CONFIG 0x0008
+#define SDO_VBI 0x0014
+#define SDO_DAC 0x003C
+#define SDO_CCCON 0x0180
+#define SDO_IRQ 0x0280
+#define SDO_IRQMASK 0x0284
+#define SDO_VERSION 0x03D8
+
+/*
+ * Bit definition part
+ */
+
+/* SDO Clock Control Register (SDO_CLKCON) */
+#define SDO_TVOUT_SW_RESET (1 << 4)
+#define SDO_TVOUT_CLOCK_READY (1 << 1)
+#define SDO_TVOUT_CLOCK_ON (1 << 0)
+
+/* SDO Video Standard Configuration Register (SDO_CONFIG) */
+#define SDO_PROGRESSIVE (1 << 4)
+#define SDO_NTSC_M 0
+#define SDO_PAL_M 1
+#define SDO_PAL_BGHID 2
+#define SDO_PAL_N 3
+#define SDO_PAL_NC 4
+#define SDO_NTSC_443 8
+#define SDO_PAL_60 9
+#define SDO_STANDARD_MASK 0xf
+
+/* SDO VBI Configuration Register (SDO_VBI) */
+#define SDO_CVBS_WSS_INS (1 << 14)
+#define SDO_CVBS_CLOSED_CAPTION_MASK (3 << 12)
+
+/* SDO DAC Configuration Register (SDO_DAC) */
+#define SDO_POWER_ON_DAC (1 << 0)
+
+/* SDO Color Compensation On/Off Control (SDO_CCCON) */
+#define SDO_COMPENSATION_BHS_ADJ_OFF (1 << 4)
+#define SDO_COMPENSATION_CVBS_COMP_OFF (1 << 0)
+
+/* SDO Interrupt Request Register (SDO_IRQ) */
+#define SDO_VSYNC_IRQ_PEND (1 << 0)
+
+#endif /* SAMSUNG_REGS_SDO_H */
diff --git a/drivers/media/video/s5p-tv/regs-vp.h b/drivers/media/video/s5p-tv/regs-vp.h
new file mode 100644
index 00000000000..6c63984e11e
--- /dev/null
+++ b/drivers/media/video/s5p-tv/regs-vp.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Video processor register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_VP_H
+#define SAMSUNG_REGS_VP_H
+
+/*
+ * Register part
+ */
+
+#define VP_ENABLE 0x0000
+#define VP_SRESET 0x0004
+#define VP_SHADOW_UPDATE 0x0008
+#define VP_FIELD_ID 0x000C
+#define VP_MODE 0x0010
+#define VP_IMG_SIZE_Y 0x0014
+#define VP_IMG_SIZE_C 0x0018
+#define VP_PER_RATE_CTRL 0x001C
+#define VP_TOP_Y_PTR 0x0028
+#define VP_BOT_Y_PTR 0x002C
+#define VP_TOP_C_PTR 0x0030
+#define VP_BOT_C_PTR 0x0034
+#define VP_ENDIAN_MODE 0x03CC
+#define VP_SRC_H_POSITION 0x0044
+#define VP_SRC_V_POSITION 0x0048
+#define VP_SRC_WIDTH 0x004C
+#define VP_SRC_HEIGHT 0x0050
+#define VP_DST_H_POSITION 0x0054
+#define VP_DST_V_POSITION 0x0058
+#define VP_DST_WIDTH 0x005C
+#define VP_DST_HEIGHT 0x0060
+#define VP_H_RATIO 0x0064
+#define VP_V_RATIO 0x0068
+#define VP_POLY8_Y0_LL 0x006C
+#define VP_POLY4_Y0_LL 0x00EC
+#define VP_POLY4_C0_LL 0x012C
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+
+#define VP_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define VP_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
+
+ /* VP_ENABLE */
+#define VP_ENABLE_ON (1 << 0)
+
+/* VP_SRESET */
+#define VP_SRESET_PROCESSING (1 << 0)
+
+/* VP_SHADOW_UPDATE */
+#define VP_SHADOW_UPDATE_ENABLE (1 << 0)
+
+/* VP_MODE */
+#define VP_MODE_NV12 (0 << 6)
+#define VP_MODE_NV21 (1 << 6)
+#define VP_MODE_LINE_SKIP (1 << 5)
+#define VP_MODE_MEM_LINEAR (0 << 4)
+#define VP_MODE_MEM_TILED (1 << 4)
+#define VP_MODE_FMT_MASK (5 << 4)
+#define VP_MODE_FIELD_ID_AUTO_TOGGLING (1 << 2)
+#define VP_MODE_2D_IPC (1 << 1)
+
+/* VP_IMG_SIZE_Y */
+/* VP_IMG_SIZE_C */
+#define VP_IMG_HSIZE(x) VP_MASK_VAL(x, 29, 16)
+#define VP_IMG_VSIZE(x) VP_MASK_VAL(x, 13, 0)
+
+/* VP_SRC_H_POSITION */
+#define VP_SRC_H_POSITION_VAL(x) VP_MASK_VAL(x, 14, 4)
+
+/* VP_ENDIAN_MODE */
+#define VP_ENDIAN_MODE_LITTLE (1 << 0)
+
+#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/media/video/s5p-tv/sdo_drv.c b/drivers/media/video/s5p-tv/sdo_drv.c
new file mode 100644
index 00000000000..4dddd6bd635
--- /dev/null
+++ b/drivers/media/video/s5p-tv/sdo_drv.c
@@ -0,0 +1,479 @@
+/*
+ * Samsung Standard Definition Output (SDO) driver
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ *
+ * Tomasz Stanislawski, <t.stanislaws@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundiation. either version 2 of the License,
+ * or (at your option) any later version
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "regs-sdo.h"
+
+MODULE_AUTHOR("Tomasz Stanislawski, <t.stanislaws@samsung.com>");
+MODULE_DESCRIPTION("Samsung Standard Definition Output (SDO)");
+MODULE_LICENSE("GPL");
+
+#define SDO_DEFAULT_STD V4L2_STD_PAL
+
+struct sdo_format {
+ v4l2_std_id id;
+ /* all modes are 720 pixels wide */
+ unsigned int height;
+ unsigned int cookie;
+};
+
+struct sdo_device {
+ /** pointer to device parent */
+ struct device *dev;
+ /** base address of SDO registers */
+ void __iomem *regs;
+ /** SDO interrupt */
+ unsigned int irq;
+ /** DAC source clock */
+ struct clk *sclk_dac;
+ /** DAC clock */
+ struct clk *dac;
+ /** DAC physical interface */
+ struct clk *dacphy;
+ /** clock for control of VPLL */
+ struct clk *fout_vpll;
+ /** regulator for SDO IP power */
+ struct regulator *vdac;
+ /** regulator for SDO plug detection */
+ struct regulator *vdet;
+ /** subdev used as device interface */
+ struct v4l2_subdev sd;
+ /** current format */
+ const struct sdo_format *fmt;
+};
+
+static inline struct sdo_device *sd_to_sdev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct sdo_device, sd);
+}
+
+static inline
+void sdo_write_mask(struct sdo_device *sdev, u32 reg_id, u32 value, u32 mask)
+{
+ u32 old = readl(sdev->regs + reg_id);
+ value = (value & mask) | (old & ~mask);
+ writel(value, sdev->regs + reg_id);
+}
+
+static inline
+void sdo_write(struct sdo_device *sdev, u32 reg_id, u32 value)
+{
+ writel(value, sdev->regs + reg_id);
+}
+
+static inline
+u32 sdo_read(struct sdo_device *sdev, u32 reg_id)
+{
+ return readl(sdev->regs + reg_id);
+}
+
+static irqreturn_t sdo_irq_handler(int irq, void *dev_data)
+{
+ struct sdo_device *sdev = dev_data;
+
+ /* clear interrupt */
+ sdo_write_mask(sdev, SDO_IRQ, ~0, SDO_VSYNC_IRQ_PEND);
+ return IRQ_HANDLED;
+}
+
+static void sdo_reg_debug(struct sdo_device *sdev)
+{
+#define DBGREG(reg_id) \
+ dev_info(sdev->dev, #reg_id " = %08x\n", \
+ sdo_read(sdev, reg_id))
+
+ DBGREG(SDO_CLKCON);
+ DBGREG(SDO_CONFIG);
+ DBGREG(SDO_VBI);
+ DBGREG(SDO_DAC);
+ DBGREG(SDO_IRQ);
+ DBGREG(SDO_IRQMASK);
+ DBGREG(SDO_VERSION);
+}
+
+static const struct sdo_format sdo_format[] = {
+ { V4L2_STD_PAL_N, .height = 576, .cookie = SDO_PAL_N },
+ { V4L2_STD_PAL_Nc, .height = 576, .cookie = SDO_PAL_NC },
+ { V4L2_STD_PAL_M, .height = 480, .cookie = SDO_PAL_M },
+ { V4L2_STD_PAL_60, .height = 480, .cookie = SDO_PAL_60 },
+ { V4L2_STD_NTSC_443, .height = 480, .cookie = SDO_NTSC_443 },
+ { V4L2_STD_PAL, .height = 576, .cookie = SDO_PAL_BGHID },
+ { V4L2_STD_NTSC_M, .height = 480, .cookie = SDO_NTSC_M },
+};
+
+static const struct sdo_format *sdo_find_format(v4l2_std_id id)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(sdo_format); ++i)
+ if (sdo_format[i].id & id)
+ return &sdo_format[i];
+ return NULL;
+}
+
+static int sdo_g_tvnorms_output(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ *std = V4L2_STD_NTSC_M | V4L2_STD_PAL_M | V4L2_STD_PAL |
+ V4L2_STD_PAL_N | V4L2_STD_PAL_Nc |
+ V4L2_STD_NTSC_443 | V4L2_STD_PAL_60;
+ return 0;
+}
+
+static int sdo_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct sdo_device *sdev = sd_to_sdev(sd);
+ const struct sdo_format *fmt;
+ fmt = sdo_find_format(std);
+ if (fmt == NULL)
+ return -EINVAL;
+ sdev->fmt = fmt;
+ return 0;
+}
+
+static int sdo_g_std_output(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ *std = sd_to_sdev(sd)->fmt->id;
+ return 0;
+}
+
+static int sdo_g_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct sdo_device *sdev = sd_to_sdev(sd);
+
+ if (!sdev->fmt)
+ return -ENXIO;
+ /* all modes are 720 pixels wide */
+ fmt->width = 720;
+ fmt->height = sdev->fmt->height;
+ fmt->code = V4L2_MBUS_FMT_FIXED;
+ fmt->field = V4L2_FIELD_INTERLACED;
+ return 0;
+}
+
+static int sdo_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct sdo_device *sdev = sd_to_sdev(sd);
+ struct device *dev = sdev->dev;
+ int ret;
+
+ dev_info(dev, "sdo_s_power(%d)\n", on);
+
+ if (on)
+ ret = pm_runtime_get_sync(dev);
+ else
+ ret = pm_runtime_put_sync(dev);
+
+ /* only values < 0 indicate errors */
+ return IS_ERR_VALUE(ret) ? ret : 0;
+}
+
+static int sdo_streamon(struct sdo_device *sdev)
+{
+ /* set proper clock for Timing Generator */
+ clk_set_rate(sdev->fout_vpll, 54000000);
+ dev_info(sdev->dev, "fout_vpll.rate = %lu\n",
+ clk_get_rate(sdev->fout_vpll));
+ /* enable clock in SDO */
+ sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_CLOCK_ON);
+ clk_enable(sdev->dacphy);
+ /* enable DAC */
+ sdo_write_mask(sdev, SDO_DAC, ~0, SDO_POWER_ON_DAC);
+ sdo_reg_debug(sdev);
+ return 0;
+}
+
+static int sdo_streamoff(struct sdo_device *sdev)
+{
+ int tries;
+
+ sdo_write_mask(sdev, SDO_DAC, 0, SDO_POWER_ON_DAC);
+ clk_disable(sdev->dacphy);
+ sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_CLOCK_ON);
+ for (tries = 100; tries; --tries) {
+ if (sdo_read(sdev, SDO_CLKCON) & SDO_TVOUT_CLOCK_READY)
+ break;
+ mdelay(1);
+ }
+ if (tries == 0)
+ dev_err(sdev->dev, "failed to stop streaming\n");
+ return tries ? 0 : -EIO;
+}
+
+static int sdo_s_stream(struct v4l2_subdev *sd, int on)
+{
+ struct sdo_device *sdev = sd_to_sdev(sd);
+ return on ? sdo_streamon(sdev) : sdo_streamoff(sdev);
+}
+
+static const struct v4l2_subdev_core_ops sdo_sd_core_ops = {
+ .s_power = sdo_s_power,
+};
+
+static const struct v4l2_subdev_video_ops sdo_sd_video_ops = {
+ .s_std_output = sdo_s_std_output,
+ .g_std_output = sdo_g_std_output,
+ .g_tvnorms_output = sdo_g_tvnorms_output,
+ .g_mbus_fmt = sdo_g_mbus_fmt,
+ .s_stream = sdo_s_stream,
+};
+
+static const struct v4l2_subdev_ops sdo_sd_ops = {
+ .core = &sdo_sd_core_ops,
+ .video = &sdo_sd_video_ops,
+};
+
+static int sdo_runtime_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct sdo_device *sdev = sd_to_sdev(sd);
+
+ dev_info(dev, "suspend\n");
+ regulator_disable(sdev->vdet);
+ regulator_disable(sdev->vdac);
+ clk_disable(sdev->sclk_dac);
+ return 0;
+}
+
+static int sdo_runtime_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct sdo_device *sdev = sd_to_sdev(sd);
+
+ dev_info(dev, "resume\n");
+ clk_enable(sdev->sclk_dac);
+ regulator_enable(sdev->vdac);
+ regulator_enable(sdev->vdet);
+
+ /* software reset */
+ sdo_write_mask(sdev, SDO_CLKCON, ~0, SDO_TVOUT_SW_RESET);
+ mdelay(10);
+ sdo_write_mask(sdev, SDO_CLKCON, 0, SDO_TVOUT_SW_RESET);
+
+ /* setting TV mode */
+ sdo_write_mask(sdev, SDO_CONFIG, sdev->fmt->cookie, SDO_STANDARD_MASK);
+ /* XXX: forcing interlaced mode using undocumented bit */
+ sdo_write_mask(sdev, SDO_CONFIG, 0, SDO_PROGRESSIVE);
+ /* turn all VBI off */
+ sdo_write_mask(sdev, SDO_VBI, 0, SDO_CVBS_WSS_INS |
+ SDO_CVBS_CLOSED_CAPTION_MASK);
+ /* turn all post processing off */
+ sdo_write_mask(sdev, SDO_CCCON, ~0, SDO_COMPENSATION_BHS_ADJ_OFF |
+ SDO_COMPENSATION_CVBS_COMP_OFF);
+ sdo_reg_debug(sdev);
+ return 0;
+}
+
+static const struct dev_pm_ops sdo_pm_ops = {
+ .runtime_suspend = sdo_runtime_suspend,
+ .runtime_resume = sdo_runtime_resume,
+};
+
+static int __devinit sdo_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdo_device *sdev;
+ struct resource *res;
+ int ret = 0;
+ struct clk *sclk_vpll;
+
+ dev_info(dev, "probe start\n");
+ sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+ if (!sdev) {
+ dev_err(dev, "not enough memory.\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ sdev->dev = dev;
+
+ /* mapping registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail_sdev;
+ }
+
+ sdev->regs = ioremap(res->start, resource_size(res));
+ if (sdev->regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail_sdev;
+ }
+
+ /* acquiring interrupt */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(dev, "get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto fail_regs;
+ }
+ ret = request_irq(res->start, sdo_irq_handler, 0, "s5p-sdo", sdev);
+ if (ret) {
+ dev_err(dev, "request interrupt failed.\n");
+ goto fail_regs;
+ }
+ sdev->irq = res->start;
+
+ /* acquire clocks */
+ sdev->sclk_dac = clk_get(dev, "sclk_dac");
+ if (IS_ERR_OR_NULL(sdev->sclk_dac)) {
+ dev_err(dev, "failed to get clock 'sclk_dac'\n");
+ ret = -ENXIO;
+ goto fail_irq;
+ }
+ sdev->dac = clk_get(dev, "dac");
+ if (IS_ERR_OR_NULL(sdev->dac)) {
+ dev_err(dev, "failed to get clock 'dac'\n");
+ ret = -ENXIO;
+ goto fail_sclk_dac;
+ }
+ sdev->dacphy = clk_get(dev, "dacphy");
+ if (IS_ERR_OR_NULL(sdev->dacphy)) {
+ dev_err(dev, "failed to get clock 'dacphy'\n");
+ ret = -ENXIO;
+ goto fail_dac;
+ }
+ sclk_vpll = clk_get(dev, "sclk_vpll");
+ if (IS_ERR_OR_NULL(sclk_vpll)) {
+ dev_err(dev, "failed to get clock 'sclk_vpll'\n");
+ ret = -ENXIO;
+ goto fail_dacphy;
+ }
+ clk_set_parent(sdev->sclk_dac, sclk_vpll);
+ clk_put(sclk_vpll);
+ sdev->fout_vpll = clk_get(dev, "fout_vpll");
+ if (IS_ERR_OR_NULL(sdev->fout_vpll)) {
+ dev_err(dev, "failed to get clock 'fout_vpll'\n");
+ goto fail_dacphy;
+ }
+ dev_info(dev, "fout_vpll.rate = %lu\n", clk_get_rate(sclk_vpll));
+
+ /* acquire regulator */
+ sdev->vdac = regulator_get(dev, "vdd33a_dac");
+ if (IS_ERR_OR_NULL(sdev->vdac)) {
+ dev_err(dev, "failed to get regulator 'vdac'\n");
+ goto fail_fout_vpll;
+ }
+ sdev->vdet = regulator_get(dev, "vdet");
+ if (IS_ERR_OR_NULL(sdev->vdet)) {
+ dev_err(dev, "failed to get regulator 'vdet'\n");
+ goto fail_vdac;
+ }
+
+ /* enable gate for dac clock, because mixer uses it */
+ clk_enable(sdev->dac);
+
+ /* configure power management */
+ pm_runtime_enable(dev);
+
+ /* configuration of interface subdevice */
+ v4l2_subdev_init(&sdev->sd, &sdo_sd_ops);
+ sdev->sd.owner = THIS_MODULE;
+ strlcpy(sdev->sd.name, "s5p-sdo", sizeof sdev->sd.name);
+
+ /* set default format */
+ sdev->fmt = sdo_find_format(SDO_DEFAULT_STD);
+ BUG_ON(sdev->fmt == NULL);
+
+ /* keeping subdev in device's private for use by other drivers */
+ dev_set_drvdata(dev, &sdev->sd);
+
+ dev_info(dev, "probe succeeded\n");
+ return 0;
+
+fail_vdac:
+ regulator_put(sdev->vdac);
+fail_fout_vpll:
+ clk_put(sdev->fout_vpll);
+fail_dacphy:
+ clk_put(sdev->dacphy);
+fail_dac:
+ clk_put(sdev->dac);
+fail_sclk_dac:
+ clk_put(sdev->sclk_dac);
+fail_irq:
+ free_irq(sdev->irq, sdev);
+fail_regs:
+ iounmap(sdev->regs);
+fail_sdev:
+ kfree(sdev);
+fail:
+ dev_info(dev, "probe failed\n");
+ return ret;
+}
+
+static int __devexit sdo_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(&pdev->dev);
+ struct sdo_device *sdev = sd_to_sdev(sd);
+
+ pm_runtime_disable(&pdev->dev);
+ clk_disable(sdev->dac);
+ regulator_put(sdev->vdet);
+ regulator_put(sdev->vdac);
+ clk_put(sdev->fout_vpll);
+ clk_put(sdev->dacphy);
+ clk_put(sdev->dac);
+ clk_put(sdev->sclk_dac);
+ free_irq(sdev->irq, sdev);
+ iounmap(sdev->regs);
+ kfree(sdev);
+
+ dev_info(&pdev->dev, "remove successful\n");
+ return 0;
+}
+
+static struct platform_driver sdo_driver __refdata = {
+ .probe = sdo_probe,
+ .remove = __devexit_p(sdo_remove),
+ .driver = {
+ .name = "s5p-sdo",
+ .owner = THIS_MODULE,
+ .pm = &sdo_pm_ops,
+ }
+};
+
+static int __init sdo_init(void)
+{
+ int ret;
+ static const char banner[] __initdata = KERN_INFO \
+ "Samsung Standard Definition Output (SDO) driver, "
+ "(c) 2010-2011 Samsung Electronics Co., Ltd.\n";
+ printk(banner);
+
+ ret = platform_driver_register(&sdo_driver);
+ if (ret)
+ printk(KERN_ERR "SDO platform driver register failed\n");
+
+ return ret;
+}
+module_init(sdo_init);
+
+static void __exit sdo_exit(void)
+{
+ platform_driver_unregister(&sdo_driver);
+}
+module_exit(sdo_exit);
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index 0db90922ee9..f2ae405c74a 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -757,8 +757,8 @@ static int saa711x_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_CHROMA_AGC:
/* chroma gain cluster */
- if (state->agc->cur.val)
- state->gain->cur.val =
+ if (state->agc->val)
+ state->gain->val =
saa711x_read(sd, R_0F_CHROMA_GAIN_CNTL) & 0x7f;
break;
}
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index e2062b240e3..0f9fb99adeb 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -4951,8 +4951,9 @@ struct saa7134_board saa7134_boards[] = {
.audio_clock = 0x00187de7,
.tuner_type = TUNER_XC2028,
.radio_type = UNSET,
- .tuner_addr = ADDR_UNSET,
+ .tuner_addr = 0x61,
.radio_addr = ADDR_UNSET,
+ .mpeg = SAA7134_MPEG_DVB,
.inputs = {{
.name = name_tv,
.vmux = 3,
@@ -6992,6 +6993,11 @@ static int saa7134_xc2028_callback(struct saa7134_dev *dev,
msleep(10);
saa7134_set_gpio(dev, 18, 1);
break;
+ case SAA7134_BOARD_VIDEOMATE_T750:
+ saa7134_set_gpio(dev, 20, 0);
+ msleep(10);
+ saa7134_set_gpio(dev, 20, 1);
+ break;
}
return 0;
}
@@ -7451,6 +7457,11 @@ int saa7134_board_init1(struct saa7134_dev *dev)
saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x0e050000, 0x0c050000);
saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x0e050000, 0x0c050000);
break;
+ case SAA7134_BOARD_VIDEOMATE_T750:
+ /* enable the analog tuner */
+ saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, 0x00008000, 0x00008000);
+ saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, 0x00008000, 0x00008000);
+ break;
}
return 0;
}
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index f9be737ba6f..ca65cda3e10 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -39,6 +39,8 @@
MODULE_DESCRIPTION("v4l2 driver module for saa7130/34 based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
+MODULE_VERSION(SAA7134_VERSION);
+
/* ------------------------------------------------------------------ */
@@ -1332,14 +1334,8 @@ static struct pci_driver saa7134_pci_driver = {
static int __init saa7134_init(void)
{
INIT_LIST_HEAD(&saa7134_devlist);
- printk(KERN_INFO "saa7130/34: v4l2 driver version %d.%d.%d loaded\n",
- (SAA7134_VERSION_CODE >> 16) & 0xff,
- (SAA7134_VERSION_CODE >> 8) & 0xff,
- SAA7134_VERSION_CODE & 0xff);
-#ifdef SNAPSHOT
- printk(KERN_INFO "saa7130/34: snapshot date %04d-%02d-%02d\n",
- SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
-#endif
+ printk(KERN_INFO "saa7130/34: v4l2 driver version %s loaded\n",
+ SAA7134_VERSION);
return pci_register_driver(&saa7134_pci_driver);
}
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 996a206c6d7..1e4ef166988 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -56,6 +56,7 @@
#include "lgs8gxx.h"
#include "zl10353.h"
+#include "qt1010.h"
#include "zl10036.h"
#include "zl10039.h"
@@ -939,6 +940,18 @@ static struct zl10353_config behold_x7_config = {
.disable_i2c_gate_ctrl = 1,
};
+static struct zl10353_config videomate_t750_zl10353_config = {
+ .demod_address = 0x0f,
+ .no_tuner = 1,
+ .parallel_ts = 1,
+ .disable_i2c_gate_ctrl = 1,
+};
+
+static struct qt1010_config videomate_t750_qt1010_config = {
+ .i2c_address = 0x62
+};
+
+
/* ==================================================================
* tda10086 based DVB-S cards, helper functions
*/
@@ -1650,6 +1663,18 @@ static int dvb_init(struct saa7134_dev *dev)
__func__);
break;
+ case SAA7134_BOARD_VIDEOMATE_T750:
+ fe0->dvb.frontend = dvb_attach(zl10353_attach,
+ &videomate_t750_zl10353_config,
+ &dev->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ if (dvb_attach(qt1010_attach,
+ fe0->dvb.frontend,
+ &dev->i2c_adap,
+ &videomate_t750_qt1010_config) == NULL)
+ wprintk("error attaching QT1010\n");
+ }
+ break;
case SAA7134_BOARD_ZOLID_HYBRID_PCI:
fe0->dvb.frontend = dvb_attach(tda10048_attach,
&zolid_tda10048_config,
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 18294db38a0..dde361a9194 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -172,7 +172,6 @@ static int empress_querycap(struct file *file, void *priv,
strlcpy(cap->card, saa7134_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->version = SAA7134_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index ff6c0e97563..d4ee24bf692 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -963,7 +963,7 @@ static int saa7134_raw_decode_irq(struct saa7134_dev *dev)
* to work with other protocols.
*/
if (!ir->active) {
- timeout = jiffies + jiffies_to_msecs(15);
+ timeout = jiffies + msecs_to_jiffies(15);
mod_timer(&ir->timer, timeout);
ir->active = true;
}
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 776ba2dd7f9..9cf7914f6f9 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1810,7 +1810,6 @@ static int saa7134_querycap(struct file *file, void *priv,
strlcpy(cap->card, saa7134_boards[dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->version = SAA7134_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VBI_CAPTURE |
@@ -2307,7 +2306,6 @@ static int radio_querycap(struct file *file, void *priv,
strcpy(cap->driver, "saa7134");
strlcpy(cap->card, saa7134_boards[dev->board].name, sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
- cap->version = SAA7134_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
}
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index 28eb1039832..bc8d6bba8ee 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -19,8 +19,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/version.h>
-#define SAA7134_VERSION_CODE KERNEL_VERSION(0, 2, 16)
+#define SAA7134_VERSION "0, 2, 17"
#include <linux/pci.h>
#include <linux/i2c.h>
diff --git a/drivers/media/video/saa7164/saa7164-encoder.c b/drivers/media/video/saa7164/saa7164-encoder.c
index 400364569c8..2fd38a01887 100644
--- a/drivers/media/video/saa7164/saa7164-encoder.c
+++ b/drivers/media/video/saa7164/saa7164-encoder.c
@@ -1246,7 +1246,6 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
struct saa7164_encoder_fh *fh =
(struct saa7164_encoder_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- struct saa7164_user_buffer *ubuf;
unsigned int mask = 0;
port->last_poll_msecs_diff = port->last_poll_msecs;
@@ -1278,10 +1277,7 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
}
/* Pull the first buffer from the used list */
- ubuf = list_first_entry(&port->list_buf_used.list,
- struct saa7164_user_buffer, list);
-
- if (ubuf)
+ if (!list_empty(&port->list_buf_used.list))
mask |= POLLIN | POLLRDNORM;
return mask;
diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
index bc1fcedba87..e2e03415871 100644
--- a/drivers/media/video/saa7164/saa7164-vbi.c
+++ b/drivers/media/video/saa7164/saa7164-vbi.c
@@ -1192,7 +1192,6 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
{
struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- struct saa7164_user_buffer *ubuf;
unsigned int mask = 0;
port->last_poll_msecs_diff = port->last_poll_msecs;
@@ -1224,10 +1223,7 @@ static unsigned int fops_poll(struct file *file, poll_table *wait)
}
/* Pull the first buffer from the used list */
- ubuf = list_first_entry(&port->list_buf_used.list,
- struct saa7164_user_buffer, list);
-
- if (ubuf)
+ if (!list_empty(&port->list_buf_used.list))
mask |= POLLIN | POLLRDNORM;
return mask;
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h
index 16745d2fb34..6678bf1e781 100644
--- a/drivers/media/video/saa7164/saa7164.h
+++ b/drivers/media/video/saa7164/saa7164.h
@@ -48,7 +48,6 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/kdev_t.h>
-#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/crc32.h>
#include <linux/kthread.h>
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 3ae5c9c58cb..e54089802b6 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -27,7 +27,6 @@
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
@@ -39,6 +38,7 @@
#include <media/v4l2-dev.h>
#include <media/soc_camera.h>
#include <media/sh_mobile_ceu.h>
+#include <media/sh_mobile_csi2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-mediabus.h>
#include <media/soc_mediabus.h>
@@ -96,6 +96,7 @@ struct sh_mobile_ceu_buffer {
struct sh_mobile_ceu_dev {
struct soc_camera_host ici;
struct soc_camera_device *icd;
+ struct platform_device *csi2_pdev;
unsigned int irq;
void __iomem *base;
@@ -205,7 +206,7 @@ static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
if (2 != success) {
- dev_warn(&icd->dev, "soft reset time out\n");
+ dev_warn(icd->pdev, "soft reset time out\n");
return -EIO;
}
@@ -220,7 +221,7 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
unsigned long sizes[], void *alloc_ctxs[])
{
struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width,
icd->current_fmt->host_fmt);
@@ -242,7 +243,7 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
*count = pcdev->video_limit / PAGE_ALIGN(sizes[0]);
}
- dev_dbg(icd->dev.parent, "count=%d, size=%lu\n", *count, sizes[0]);
+ dev_dbg(icd->parent, "count=%d, size=%lu\n", *count, sizes[0]);
return 0;
}
@@ -351,7 +352,7 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
buf = to_ceu_vb(vb);
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
/* Added list head initialization on alloc */
@@ -371,7 +372,7 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
size = icd->user_height * bytes_per_line;
if (vb2_plane_size(vb, 0) < size) {
- dev_err(icd->dev.parent, "Buffer too small (%lu < %lu)\n",
+ dev_err(icd->parent, "Buffer too small (%lu < %lu)\n",
vb2_plane_size(vb, 0), size);
return -ENOBUFS;
}
@@ -384,11 +385,11 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
- dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
spin_lock_irq(&pcdev->lock);
@@ -409,7 +410,7 @@ static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -421,8 +422,12 @@ static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
pcdev->active = NULL;
}
- /* Doesn't hurt also if the list is empty */
- list_del_init(&buf->queue);
+ /*
+ * Doesn't hurt also if the list is empty, but it hurts, if queuing the
+ * buffer failed, and .buf_init() hasn't been called
+ */
+ if (buf->queue.next)
+ list_del_init(&buf->queue);
spin_unlock_irq(&pcdev->lock);
}
@@ -437,7 +442,7 @@ static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
static int sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
{
struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct list_head *buf_head, *tmp;
@@ -499,25 +504,48 @@ out:
return IRQ_HANDLED;
}
+static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev)
+{
+ struct v4l2_subdev *sd;
+
+ if (!pcdev->csi2_pdev)
+ return NULL;
+
+ v4l2_device_for_each_subdev(sd, &pcdev->ici.v4l2_dev)
+ if (&pcdev->csi2_pdev->dev == v4l2_get_subdevdata(sd))
+ return sd;
+
+ return NULL;
+}
+
/* Called with .video_lock held */
static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct v4l2_subdev *csi2_sd;
int ret;
if (pcdev->icd)
return -EBUSY;
- dev_info(icd->dev.parent,
+ dev_info(icd->parent,
"SuperH Mobile CEU driver attached to camera %d\n",
icd->devnum);
pm_runtime_get_sync(ici->v4l2_dev.dev);
ret = sh_mobile_ceu_soft_reset(pcdev);
- if (!ret)
+
+ csi2_sd = find_csi2(pcdev);
+
+ ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
+ if (ret != -ENODEV && ret != -ENOIOCTLCMD && ret < 0) {
+ pm_runtime_put_sync(ici->v4l2_dev.dev);
+ } else {
pcdev->icd = icd;
+ ret = 0;
+ }
return ret;
}
@@ -525,11 +553,13 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
/* Called with .video_lock held */
static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
+ struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
BUG_ON(icd != pcdev->icd);
+ v4l2_subdev_call(csi2_sd, core, s_power, 0);
/* disable capture, disable interrupts */
ceu_write(pcdev, CEIER, 0);
sh_mobile_ceu_soft_reset(pcdev);
@@ -545,7 +575,7 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
pm_runtime_put_sync(ici->v4l2_dev.dev);
- dev_info(icd->dev.parent,
+ dev_info(icd->parent,
"SuperH Mobile CEU driver detached from camera %d\n",
icd->devnum);
@@ -585,14 +615,14 @@ static u16 calc_scale(unsigned int src, unsigned int *dst)
/* rect is guaranteed to not exceed the scaled camera rectangle */
static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned int height, width, cdwdr_width, in_width, in_height;
unsigned int left_offset, top_offset;
u32 camor;
- dev_geo(icd->dev.parent, "Crop %ux%u@%u:%u\n",
+ dev_geo(icd->parent, "Crop %ux%u@%u:%u\n",
icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
left_offset = cam->ceu_left;
@@ -641,7 +671,7 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
}
/* CSI2 special configuration */
- if (pcdev->pdata->csi2_dev) {
+ if (pcdev->pdata->csi2) {
in_width = ((in_width - 2) * 2);
left_offset *= 2;
}
@@ -649,7 +679,7 @@ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
/* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
camor = left_offset | (top_offset << 16);
- dev_geo(icd->dev.parent,
+ dev_geo(icd->parent,
"CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor,
(in_height << 16) | in_width, (height << 16) | width,
cdwdr_width);
@@ -697,7 +727,7 @@ static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
__u32 pixfmt)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
int ret;
unsigned long camera_flags, common_flags, value;
@@ -783,7 +813,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
value |= pcdev->is_16bit ? 1 << 12 : 0;
/* CSI2 mode */
- if (pcdev->pdata->csi2_dev)
+ if (pcdev->pdata->csi2)
value |= 3 << 12;
ceu_write(pcdev, CAMCR, value);
@@ -806,7 +836,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
sh_mobile_ceu_set_rect(icd);
mdelay(1);
- dev_geo(icd->dev.parent, "CFLCR 0x%x\n", pcdev->cflcr);
+ dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr);
ceu_write(pcdev, CFLCR, pcdev->cflcr);
/*
@@ -829,7 +859,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
ceu_write(pcdev, CDOCR, value);
ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
- dev_dbg(icd->dev.parent, "S_FMT successful for %c%c%c%c %ux%u\n",
+ dev_dbg(icd->parent, "S_FMT successful for %c%c%c%c %ux%u\n",
pixfmt & 0xff, (pixfmt >> 8) & 0xff,
(pixfmt >> 16) & 0xff, (pixfmt >> 24) & 0xff,
icd->user_width, icd->user_height);
@@ -843,7 +873,7 @@ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd,
static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
unsigned char buswidth)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned long camera_flags, common_flags;
@@ -901,7 +931,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
struct soc_camera_format_xlate *xlate)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
struct soc_camera_host *ici = to_soc_camera_host(dev);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
int ret, k, n;
@@ -921,7 +951,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
return 0;
}
- if (!pcdev->pdata->csi2_dev) {
+ if (!pcdev->pdata->csi2) {
ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
if (ret < 0)
return 0;
@@ -1244,7 +1274,7 @@ static int client_s_fmt(struct soc_camera_device *icd,
{
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h;
unsigned int max_width, max_height;
struct v4l2_cropcap cap;
@@ -1313,7 +1343,7 @@ static int client_scale(struct soc_camera_device *icd,
bool ceu_can_scale)
{
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
struct v4l2_mbus_framefmt mf_tmp = *mf;
unsigned int scale_h, scale_v;
int ret;
@@ -1363,13 +1393,13 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
struct v4l2_crop *a)
{
struct v4l2_rect *rect = &a->c;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct v4l2_crop cam_crop;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_rect *cam_rect = &cam_crop.c;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct device *dev = icd->dev.parent;
struct v4l2_mbus_framefmt mf;
unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
out_width, out_height;
@@ -1511,7 +1541,7 @@ static void calculate_client_output(struct soc_camera_device *icd,
struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf)
{
struct sh_mobile_ceu_cam *cam = icd->host_priv;
- struct device *dev = icd->dev.parent;
+ struct device *dev = icd->parent;
struct v4l2_rect *cam_subrect = &cam->subrect;
unsigned int scale_v, scale_h;
@@ -1555,12 +1585,12 @@ static void calculate_client_output(struct soc_camera_device *icd,
static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct device *dev = icd->parent;
+ struct soc_camera_host *ici = to_soc_camera_host(dev);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_mbus_framefmt mf;
- struct device *dev = icd->dev.parent;
__u32 pixfmt = pix->pixelformat;
const struct soc_camera_format_xlate *xlate;
/* Keep Compiler Happy */
@@ -1684,12 +1714,12 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
int width, height;
int ret;
- dev_geo(icd->dev.parent, "TRY_FMT(pix=0x%x, %ux%u)\n",
+ dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n",
pixfmt, pix->width, pix->height);
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
- dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt);
+ dev_warn(icd->parent, "Format %x not found\n", pixfmt);
return -EINVAL;
}
@@ -1701,11 +1731,6 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
width = pix->width;
height = pix->height;
- pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt);
- if ((int)pix->bytesperline < 0)
- return pix->bytesperline;
- pix->sizeimage = height * pix->bytesperline;
-
/* limit to sensor capabilities */
mf.width = pix->width;
mf.height = pix->height;
@@ -1741,7 +1766,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
try_mbus_fmt, &mf);
if (ret < 0) {
/* Shouldn't actually happen... */
- dev_err(icd->dev.parent,
+ dev_err(icd->parent,
"FIXME: client try_fmt() = %d\n", ret);
return ret;
}
@@ -1753,7 +1778,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
pix->height = height;
}
- dev_geo(icd->dev.parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
+ dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
__func__, ret, pix->pixelformat, pix->width, pix->height);
return ret;
@@ -1763,7 +1788,7 @@ static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
struct v4l2_crop *a)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
u32 out_width = icd->user_width, out_height = icd->user_height;
int ret;
@@ -1775,13 +1800,13 @@ static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
/* Stop the client */
ret = v4l2_subdev_call(sd, video, s_stream, 0);
if (ret < 0)
- dev_warn(icd->dev.parent,
+ dev_warn(icd->parent,
"Client failed to stop the stream: %d\n", ret);
else
/* Do the crop, if it fails, there's nothing more we can do */
sh_mobile_ceu_set_crop(icd, a);
- dev_geo(icd->dev.parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
+ dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
if (icd->user_width != out_width || icd->user_height != out_height) {
struct v4l2_format f = {
@@ -1827,7 +1852,6 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
- cap->version = KERNEL_VERSION(0, 0, 5);
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
}
@@ -1848,7 +1872,7 @@ static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd,
struct v4l2_control *ctrl)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
u32 val;
@@ -1864,7 +1888,7 @@ static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd,
static int sh_mobile_ceu_set_ctrl(struct soc_camera_device *icd,
struct v4l2_control *ctrl)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
switch (ctrl->id) {
@@ -1950,7 +1974,7 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
.completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion),
.notifier.notifier_call = bus_notify,
};
- struct device *csi2;
+ struct sh_mobile_ceu_companion *csi2;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -2023,26 +2047,61 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
pcdev->ici.drv_name = dev_name(&pdev->dev);
pcdev->ici.ops = &sh_mobile_ceu_host_ops;
+ pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(pcdev->alloc_ctx)) {
+ err = PTR_ERR(pcdev->alloc_ctx);
+ goto exit_free_clk;
+ }
+
+ err = soc_camera_host_register(&pcdev->ici);
+ if (err)
+ goto exit_free_ctx;
+
/* CSI2 interfacing */
- csi2 = pcdev->pdata->csi2_dev;
+ csi2 = pcdev->pdata->csi2;
if (csi2) {
- wait.dev = csi2;
+ struct platform_device *csi2_pdev =
+ platform_device_alloc("sh-mobile-csi2", csi2->id);
+ struct sh_csi2_pdata *csi2_pdata = csi2->platform_data;
+
+ if (!csi2_pdev) {
+ err = -ENOMEM;
+ goto exit_host_unregister;
+ }
+
+ pcdev->csi2_pdev = csi2_pdev;
+
+ err = platform_device_add_data(csi2_pdev, csi2_pdata, sizeof(*csi2_pdata));
+ if (err < 0)
+ goto exit_pdev_put;
+
+ csi2_pdata = csi2_pdev->dev.platform_data;
+ csi2_pdata->v4l2_dev = &pcdev->ici.v4l2_dev;
+
+ csi2_pdev->resource = csi2->resource;
+ csi2_pdev->num_resources = csi2->num_resources;
+
+ err = platform_device_add(csi2_pdev);
+ if (err < 0)
+ goto exit_pdev_put;
+
+ wait.dev = &csi2_pdev->dev;
err = bus_register_notifier(&platform_bus_type, &wait.notifier);
if (err < 0)
- goto exit_free_clk;
+ goto exit_pdev_unregister;
/*
* From this point the driver module will not unload, until
* we complete the completion.
*/
- if (!csi2->driver) {
+ if (!csi2_pdev->dev.driver) {
complete(&wait.completion);
/* Either too late, or probing failed */
bus_unregister_notifier(&platform_bus_type, &wait.notifier);
err = -ENXIO;
- goto exit_free_clk;
+ goto exit_pdev_unregister;
}
/*
@@ -2051,34 +2110,28 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
* the "owner" is safe!
*/
- err = try_module_get(csi2->driver->owner);
+ err = try_module_get(csi2_pdev->dev.driver->owner);
/* Let notifier complete, if it has been locked */
complete(&wait.completion);
bus_unregister_notifier(&platform_bus_type, &wait.notifier);
if (!err) {
err = -ENODEV;
- goto exit_free_clk;
+ goto exit_pdev_unregister;
}
}
- pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(pcdev->alloc_ctx)) {
- err = PTR_ERR(pcdev->alloc_ctx);
- goto exit_module_put;
- }
-
- err = soc_camera_host_register(&pcdev->ici);
- if (err)
- goto exit_free_ctx;
-
return 0;
+exit_pdev_unregister:
+ platform_device_del(pcdev->csi2_pdev);
+exit_pdev_put:
+ pcdev->csi2_pdev->resource = NULL;
+ platform_device_put(pcdev->csi2_pdev);
+exit_host_unregister:
+ soc_camera_host_unregister(&pcdev->ici);
exit_free_ctx:
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
-exit_module_put:
- if (csi2 && csi2->driver)
- module_put(csi2->driver->owner);
exit_free_clk:
pm_runtime_disable(&pdev->dev);
free_irq(pcdev->irq, pcdev);
@@ -2098,7 +2151,7 @@ static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
struct sh_mobile_ceu_dev, ici);
- struct device *csi2 = pcdev->pdata->csi2_dev;
+ struct platform_device *csi2_pdev = pcdev->csi2_pdev;
soc_camera_host_unregister(soc_host);
pm_runtime_disable(&pdev->dev);
@@ -2107,8 +2160,13 @@ static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
dma_release_declared_memory(&pdev->dev);
iounmap(pcdev->base);
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
- if (csi2 && csi2->driver)
- module_put(csi2->driver->owner);
+ if (csi2_pdev && csi2_pdev->dev.driver) {
+ struct module *csi2_drv = csi2_pdev->dev.driver->owner;
+ platform_device_del(csi2_pdev);
+ csi2_pdev->resource = NULL;
+ platform_device_put(csi2_pdev);
+ module_put(csi2_drv);
+ }
kfree(pcdev);
return 0;
@@ -2158,4 +2216,5 @@ module_exit(sh_mobile_ceu_exit);
MODULE_DESCRIPTION("SuperH Mobile CEU driver");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.6");
MODULE_ALIAS("platform:sh_mobile_ceu");
diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c
index 98b87481fa9..2893a0134c7 100644
--- a/drivers/media/video/sh_mobile_csi2.c
+++ b/drivers/media/video/sh_mobile_csi2.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <media/sh_mobile_ceu.h>
#include <media/sh_mobile_csi2.h>
#include <media/soc_camera.h>
#include <media/v4l2-common.h>
@@ -33,7 +34,6 @@
struct sh_csi2 {
struct v4l2_subdev subdev;
struct list_head list;
- struct notifier_block notifier;
unsigned int irq;
void __iomem *base;
struct platform_device *pdev;
@@ -132,13 +132,6 @@ static struct v4l2_subdev_video_ops sh_csi2_subdev_video_ops = {
.try_mbus_fmt = sh_csi2_try_fmt,
};
-static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops;
-
-static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
- .core = &sh_csi2_subdev_core_ops,
- .video = &sh_csi2_subdev_video_ops,
-};
-
static void sh_csi2_hwinit(struct sh_csi2 *priv)
{
struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
@@ -186,65 +179,84 @@ static unsigned long sh_csi2_query_bus_param(struct soc_camera_device *icd)
return soc_camera_apply_sensor_flags(icl, flags);
}
-static int sh_csi2_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+static int sh_csi2_client_connect(struct sh_csi2 *priv)
{
- struct device *dev = data;
- struct soc_camera_device *icd = to_soc_camera_dev(dev);
- struct v4l2_device *v4l2_dev = dev_get_drvdata(dev->parent);
- struct sh_csi2 *priv =
- container_of(nb, struct sh_csi2, notifier);
struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
- int ret, i;
+ struct v4l2_subdev *sd, *csi2_sd = &priv->subdev;
+ struct soc_camera_device *icd = NULL;
+ struct device *dev = v4l2_get_subdevdata(&priv->subdev);
+ int i;
+
+ v4l2_device_for_each_subdev(sd, csi2_sd->v4l2_dev)
+ if (sd->grp_id) {
+ icd = (struct soc_camera_device *)sd->grp_id;
+ break;
+ }
+
+ if (!icd)
+ return -EINVAL;
for (i = 0; i < pdata->num_clients; i++)
if (&pdata->clients[i].pdev->dev == icd->pdev)
break;
- dev_dbg(dev, "%s(%p): action = %lu, found #%d\n", __func__, dev, action, i);
+ dev_dbg(dev, "%s(%p): found #%d\n", __func__, dev, i);
if (i == pdata->num_clients)
- return NOTIFY_DONE;
+ return -ENODEV;
- switch (action) {
- case BUS_NOTIFY_BOUND_DRIVER:
- snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s%s",
- dev_name(v4l2_dev->dev), ".mipi-csi");
- priv->subdev.grp_id = (long)icd;
- ret = v4l2_device_register_subdev(v4l2_dev, &priv->subdev);
- dev_dbg(dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret);
- if (ret < 0)
- return NOTIFY_DONE;
+ priv->client = pdata->clients + i;
- priv->client = pdata->clients + i;
+ priv->set_bus_param = icd->ops->set_bus_param;
+ priv->query_bus_param = icd->ops->query_bus_param;
+ icd->ops->set_bus_param = sh_csi2_set_bus_param;
+ icd->ops->query_bus_param = sh_csi2_query_bus_param;
- priv->set_bus_param = icd->ops->set_bus_param;
- priv->query_bus_param = icd->ops->query_bus_param;
- icd->ops->set_bus_param = sh_csi2_set_bus_param;
- icd->ops->query_bus_param = sh_csi2_query_bus_param;
+ csi2_sd->grp_id = (long)icd;
- pm_runtime_get_sync(v4l2_get_subdevdata(&priv->subdev));
+ pm_runtime_get_sync(dev);
- sh_csi2_hwinit(priv);
- break;
- case BUS_NOTIFY_UNBIND_DRIVER:
- priv->client = NULL;
+ sh_csi2_hwinit(priv);
- /* Driver is about to be unbound */
- icd->ops->set_bus_param = priv->set_bus_param;
- icd->ops->query_bus_param = priv->query_bus_param;
- priv->set_bus_param = NULL;
- priv->query_bus_param = NULL;
+ return 0;
+}
- v4l2_device_unregister_subdev(&priv->subdev);
+static void sh_csi2_client_disconnect(struct sh_csi2 *priv)
+{
+ struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
- pm_runtime_put(v4l2_get_subdevdata(&priv->subdev));
- break;
- }
+ priv->client = NULL;
+ priv->subdev.grp_id = 0;
- return NOTIFY_OK;
+ /* Driver is about to be unbound */
+ icd->ops->set_bus_param = priv->set_bus_param;
+ icd->ops->query_bus_param = priv->query_bus_param;
+ priv->set_bus_param = NULL;
+ priv->query_bus_param = NULL;
+
+ pm_runtime_put(v4l2_get_subdevdata(&priv->subdev));
}
+static int sh_csi2_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
+
+ if (on)
+ return sh_csi2_client_connect(priv);
+
+ sh_csi2_client_disconnect(priv);
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops sh_csi2_subdev_core_ops = {
+ .s_power = sh_csi2_s_power,
+};
+
+static struct v4l2_subdev_ops sh_csi2_subdev_ops = {
+ .core = &sh_csi2_subdev_core_ops,
+ .video = &sh_csi2_subdev_video_ops,
+};
+
static __devinit int sh_csi2_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -274,14 +286,6 @@ static __devinit int sh_csi2_probe(struct platform_device *pdev)
return -ENOMEM;
priv->irq = irq;
- priv->notifier.notifier_call = sh_csi2_notify;
-
- /* We MUST attach after the MIPI sensor */
- ret = bus_register_notifier(&soc_camera_bus_type, &priv->notifier);
- if (ret < 0) {
- dev_err(&pdev->dev, "CSI2 cannot register notifier\n");
- goto ernotify;
- }
if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
dev_err(&pdev->dev, "CSI2 register region already claimed\n");
@@ -297,11 +301,17 @@ static __devinit int sh_csi2_probe(struct platform_device *pdev)
}
priv->pdev = pdev;
+ platform_set_drvdata(pdev, priv);
v4l2_subdev_init(&priv->subdev, &sh_csi2_subdev_ops);
v4l2_set_subdevdata(&priv->subdev, &pdev->dev);
- platform_set_drvdata(pdev, priv);
+ snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.mipi-csi",
+ dev_name(pdata->v4l2_dev->dev));
+ ret = v4l2_device_register_subdev(pdata->v4l2_dev, &priv->subdev);
+ dev_dbg(&pdev->dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret);
+ if (ret < 0)
+ goto esdreg;
pm_runtime_enable(&pdev->dev);
@@ -309,11 +319,11 @@ static __devinit int sh_csi2_probe(struct platform_device *pdev)
return 0;
+esdreg:
+ iounmap(priv->base);
eremap:
release_mem_region(res->start, resource_size(res));
ereqreg:
- bus_unregister_notifier(&soc_camera_bus_type, &priv->notifier);
-ernotify:
kfree(priv);
return ret;
@@ -324,7 +334,7 @@ static __devexit int sh_csi2_remove(struct platform_device *pdev)
struct sh_csi2 *priv = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bus_unregister_notifier(&soc_camera_bus_type, &priv->notifier);
+ v4l2_device_unregister_subdev(&priv->subdev);
pm_runtime_disable(&pdev->dev);
iounmap(priv->base);
release_mem_region(res->start, resource_size(res));
@@ -335,8 +345,9 @@ static __devexit int sh_csi2_remove(struct platform_device *pdev)
}
static struct platform_driver __refdata sh_csi2_pdrv = {
- .remove = __devexit_p(sh_csi2_remove),
- .driver = {
+ .remove = __devexit_p(sh_csi2_remove),
+ .probe = sh_csi2_probe,
+ .driver = {
.name = "sh-mobile-csi2",
.owner = THIS_MODULE,
},
@@ -344,7 +355,7 @@ static struct platform_driver __refdata sh_csi2_pdrv = {
static int __init sh_csi2_init(void)
{
- return platform_driver_probe(&sh_csi2_pdrv, sh_csi2_probe);
+ return platform_driver_register(&sh_csi2_pdrv);
}
static void __exit sh_csi2_exit(void)
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
index 07cf0c6c7c1..6a729879d89 100644
--- a/drivers/media/video/sh_vou.c
+++ b/drivers/media/video/sh_vou.c
@@ -19,7 +19,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/sh_vou.h>
@@ -393,7 +392,6 @@ static int sh_vou_querycap(struct file *file, void *priv,
dev_dbg(vou_file->vbq.dev, "%s()\n", __func__);
strlcpy(cap->card, "SuperH VOU", sizeof(cap->card));
- cap->version = KERNEL_VERSION(0, 1, 0);
cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
return 0;
}
@@ -1490,4 +1488,5 @@ module_exit(sh_vou_exit);
MODULE_DESCRIPTION("SuperH VOU driver");
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1.0");
MODULE_ALIAS("platform:sh-vou");
diff --git a/drivers/media/video/sn9c102/sn9c102.h b/drivers/media/video/sn9c102/sn9c102.h
index cbfc44433b9..22ea211ab54 100644
--- a/drivers/media/video/sn9c102/sn9c102.h
+++ b/drivers/media/video/sn9c102/sn9c102.h
@@ -21,7 +21,6 @@
#ifndef _SN9C102_H_
#define _SN9C102_H_
-#include <linux/version.h>
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 0e07c493e6f..16cb07c5c27 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -33,6 +33,7 @@
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <linux/version.h>
#include <linux/page-flags.h>
#include <asm/byteorder.h>
#include <asm/page.h>
@@ -47,8 +48,7 @@
#define SN9C102_MODULE_AUTHOR "(C) 2004-2007 Luca Risolia"
#define SN9C102_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
#define SN9C102_MODULE_LICENSE "GPL"
-#define SN9C102_MODULE_VERSION "1:1.47pre49"
-#define SN9C102_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 47)
+#define SN9C102_MODULE_VERSION "1:1.48"
/*****************************************************************************/
@@ -2158,7 +2158,7 @@ sn9c102_vidioc_querycap(struct sn9c102_device* cam, void __user * arg)
{
struct v4l2_capability cap = {
.driver = "sn9c102",
- .version = SN9C102_MODULE_VERSION_CODE,
+ .version = LINUX_VERSION_CODE,
.capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING,
};
@@ -3187,16 +3187,8 @@ static long sn9c102_ioctl_v4l2(struct file *filp,
case VIDIOC_S_AUDIO:
return sn9c102_vidioc_s_audio(cam, arg);
- case VIDIOC_G_STD:
- case VIDIOC_S_STD:
- case VIDIOC_QUERYSTD:
- case VIDIOC_ENUMSTD:
- case VIDIOC_QUERYMENU:
- case VIDIOC_ENUM_FRAMEINTERVALS:
- return -EINVAL;
-
default:
- return -EINVAL;
+ return -ENOTTY;
}
}
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 4e4d4122d9a..5bdfe7e16bc 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -60,14 +60,14 @@ static int soc_camera_power_set(struct soc_camera_device *icd,
ret = regulator_bulk_enable(icl->num_regulators,
icl->regulators);
if (ret < 0) {
- dev_err(&icd->dev, "Cannot enable regulators\n");
+ dev_err(icd->pdev, "Cannot enable regulators\n");
return ret;
}
if (icl->power)
ret = icl->power(icd->pdev, power_on);
if (ret < 0) {
- dev_err(&icd->dev,
+ dev_err(icd->pdev,
"Platform failed to power-on the camera.\n");
regulator_bulk_disable(icl->num_regulators,
@@ -79,7 +79,7 @@ static int soc_camera_power_set(struct soc_camera_device *icd,
if (icl->power)
ret = icl->power(icd->pdev, 0);
if (ret < 0) {
- dev_err(&icd->dev,
+ dev_err(icd->pdev,
"Platform failed to power-off the camera.\n");
return ret;
}
@@ -87,7 +87,7 @@ static int soc_camera_power_set(struct soc_camera_device *icd,
ret = regulator_bulk_disable(icl->num_regulators,
icl->regulators);
if (ret < 0) {
- dev_err(&icd->dev, "Cannot disable regulators\n");
+ dev_err(icd->pdev, "Cannot disable regulators\n");
return ret;
}
}
@@ -147,11 +147,11 @@ EXPORT_SYMBOL(soc_camera_apply_sensor_flags);
static int soc_camera_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_pix_format *pix = &f->fmt.pix;
int ret;
- dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
+ dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
pixfmtstr(pix->pixelformat), pix->width, pix->height);
pix->bytesperline = 0;
@@ -199,22 +199,15 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
static int soc_camera_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
- struct soc_camera_device *icd = file->private_data;
- int ret = 0;
-
if (inp->index != 0)
return -EINVAL;
- if (icd->ops->enum_input)
- ret = icd->ops->enum_input(icd, inp);
- else {
- /* default is camera */
- inp->type = V4L2_INPUT_TYPE_CAMERA;
- inp->std = V4L2_STD_UNKNOWN;
- strcpy(inp->name, "Camera");
- }
+ /* default is camera */
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = V4L2_STD_UNKNOWN;
+ strcpy(inp->name, "Camera");
- return ret;
+ return 0;
}
static int soc_camera_g_input(struct file *file, void *priv, unsigned int *i)
@@ -244,7 +237,7 @@ static int soc_camera_enum_fsizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
return ici->ops->enum_fsizes(icd, fsize);
}
@@ -254,7 +247,7 @@ static int soc_camera_reqbufs(struct file *file, void *priv,
{
int ret;
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -281,7 +274,7 @@ static int soc_camera_querybuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -295,7 +288,7 @@ static int soc_camera_qbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -312,7 +305,7 @@ static int soc_camera_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -329,7 +322,7 @@ static int soc_camera_dqbuf(struct file *file, void *priv,
static int soc_camera_init_user_formats(struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
unsigned int i, fmts = 0, raw_fmts = 0;
int ret;
enum v4l2_mbus_pixelcode code;
@@ -363,7 +356,7 @@ static int soc_camera_init_user_formats(struct soc_camera_device *icd)
if (!icd->user_formats)
return -ENOMEM;
- dev_dbg(&icd->dev, "Found %d supported formats.\n", fmts);
+ dev_dbg(icd->pdev, "Found %d supported formats.\n", fmts);
/* Second pass - actually fill data formats */
fmts = 0;
@@ -395,7 +388,7 @@ egfmt:
/* Always entered with .video_lock held */
static void soc_camera_free_user_formats(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
if (ici->ops->put_formats)
ici->ops->put_formats(icd);
@@ -409,11 +402,11 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd)
static int soc_camera_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_pix_format *pix = &f->fmt.pix;
int ret;
- dev_dbg(&icd->dev, "S_FMT(%c%c%c%c, %ux%u)\n",
+ dev_dbg(icd->pdev, "S_FMT(%c%c%c%c, %ux%u)\n",
pixfmtstr(pix->pixelformat), pix->width, pix->height);
/* We always call try_fmt() before set_fmt() or set_crop() */
@@ -426,7 +419,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
return ret;
} else if (!icd->current_fmt ||
icd->current_fmt->host_fmt->fourcc != pix->pixelformat) {
- dev_err(&icd->dev,
+ dev_err(icd->pdev,
"Host driver hasn't set up current format correctly!\n");
return -EINVAL;
}
@@ -440,7 +433,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
if (ici->ops->init_videobuf)
icd->vb_vidq.field = pix->field;
- dev_dbg(&icd->dev, "set width: %d height: %d\n",
+ dev_dbg(icd->pdev, "set width: %d height: %d\n",
icd->user_width, icd->user_height);
/* set physical bus parameters */
@@ -450,9 +443,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
static int soc_camera_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
- struct soc_camera_device *icd = container_of(vdev->parent,
- struct soc_camera_device,
- dev);
+ struct soc_camera_device *icd = dev_get_drvdata(vdev->parent);
struct soc_camera_link *icl = to_soc_camera_link(icd);
struct soc_camera_host *ici;
int ret;
@@ -461,10 +452,10 @@ static int soc_camera_open(struct file *file)
/* No device driver attached */
return -ENODEV;
- ici = to_soc_camera_host(icd->dev.parent);
+ ici = to_soc_camera_host(icd->parent);
if (!try_module_get(ici->ops->owner)) {
- dev_err(&icd->dev, "Couldn't lock capture bus driver.\n");
+ dev_err(icd->pdev, "Couldn't lock capture bus driver.\n");
return -EINVAL;
}
@@ -495,7 +486,7 @@ static int soc_camera_open(struct file *file)
ret = ici->ops->add(icd);
if (ret < 0) {
- dev_err(&icd->dev, "Couldn't activate the camera: %d\n", ret);
+ dev_err(icd->pdev, "Couldn't activate the camera: %d\n", ret);
goto eiciadd;
}
@@ -524,7 +515,7 @@ static int soc_camera_open(struct file *file)
}
file->private_data = icd;
- dev_dbg(&icd->dev, "camera device open\n");
+ dev_dbg(icd->pdev, "camera device open\n");
return 0;
@@ -549,7 +540,7 @@ epower:
static int soc_camera_close(struct file *file)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
icd->use_count--;
if (!icd->use_count) {
@@ -570,7 +561,7 @@ static int soc_camera_close(struct file *file)
module_put(ici->ops->owner);
- dev_dbg(&icd->dev, "camera device close\n");
+ dev_dbg(icd->pdev, "camera device close\n");
return 0;
}
@@ -581,7 +572,7 @@ static ssize_t soc_camera_read(struct file *file, char __user *buf,
struct soc_camera_device *icd = file->private_data;
int err = -EINVAL;
- dev_err(&icd->dev, "camera device read not implemented\n");
+ dev_err(icd->pdev, "camera device read not implemented\n");
return err;
}
@@ -589,10 +580,10 @@ static ssize_t soc_camera_read(struct file *file, char __user *buf,
static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
int err;
- dev_dbg(&icd->dev, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
+ dev_dbg(icd->pdev, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
if (icd->streamer != file)
return -EBUSY;
@@ -602,7 +593,7 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
else
err = vb2_mmap(&icd->vb2_vidq, vma);
- dev_dbg(&icd->dev, "vma start=0x%08lx, size=%ld, ret=%d\n",
+ dev_dbg(icd->pdev, "vma start=0x%08lx, size=%ld, ret=%d\n",
(unsigned long)vma->vm_start,
(unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
err);
@@ -613,13 +604,13 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
if (icd->streamer != file)
return -EBUSY;
if (ici->ops->init_videobuf && list_empty(&icd->vb_vidq.stream)) {
- dev_err(&icd->dev, "Trying to poll with no queued buffers!\n");
+ dev_err(icd->pdev, "Trying to poll with no queued buffers!\n");
return POLLERR;
}
@@ -659,15 +650,15 @@ static int soc_camera_s_fmt_vid_cap(struct file *file, void *priv,
WARN_ON(priv != file->private_data);
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- dev_warn(&icd->dev, "Wrong buf-type %d\n", f->type);
+ dev_warn(icd->pdev, "Wrong buf-type %d\n", f->type);
return -EINVAL;
}
if (icd->streamer && icd->streamer != file)
return -EBUSY;
- if (is_streaming(to_soc_camera_host(icd->dev.parent), icd)) {
- dev_err(&icd->dev, "S_FMT denied: queue initialised\n");
+ if (is_streaming(to_soc_camera_host(icd->parent), icd)) {
+ dev_err(icd->pdev, "S_FMT denied: queue initialised\n");
return -EBUSY;
}
@@ -716,7 +707,7 @@ static int soc_camera_g_fmt_vid_cap(struct file *file, void *priv,
pix->field = icd->field;
pix->pixelformat = icd->current_fmt->host_fmt->fourcc;
pix->colorspace = icd->colorspace;
- dev_dbg(&icd->dev, "current_fmt->fourcc: 0x%08x\n",
+ dev_dbg(icd->pdev, "current_fmt->fourcc: 0x%08x\n",
icd->current_fmt->host_fmt->fourcc);
return 0;
}
@@ -725,7 +716,7 @@ static int soc_camera_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -737,7 +728,7 @@ static int soc_camera_streamon(struct file *file, void *priv,
enum v4l2_buf_type i)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -766,7 +757,7 @@ static int soc_camera_streamoff(struct file *file, void *priv,
{
struct soc_camera_device *icd = file->private_data;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
WARN_ON(priv != file->private_data);
@@ -794,7 +785,7 @@ static int soc_camera_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
int i;
WARN_ON(priv != file->private_data);
@@ -825,7 +816,7 @@ static int soc_camera_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -844,7 +835,7 @@ static int soc_camera_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
int ret;
@@ -863,7 +854,7 @@ static int soc_camera_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *a)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
return ici->ops->cropcap(icd, a);
}
@@ -872,7 +863,7 @@ static int soc_camera_g_crop(struct file *file, void *fh,
struct v4l2_crop *a)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
int ret;
ret = ici->ops->get_crop(icd, a);
@@ -889,7 +880,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
struct v4l2_crop *a)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct v4l2_rect *rect = &a->c;
struct v4l2_crop current_crop;
int ret;
@@ -897,7 +888,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- dev_dbg(&icd->dev, "S_CROP(%ux%u@%u:%u)\n",
+ dev_dbg(icd->pdev, "S_CROP(%ux%u@%u:%u)\n",
rect->width, rect->height, rect->left, rect->top);
/* If get_crop fails, we'll let host and / or client drivers decide */
@@ -905,7 +896,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
/* Prohibit window size change with initialised buffers */
if (ret < 0) {
- dev_err(&icd->dev,
+ dev_err(icd->pdev,
"S_CROP denied: getting current crop failed\n");
} else if ((a->c.width == current_crop.c.width &&
a->c.height == current_crop.c.height) ||
@@ -915,7 +906,7 @@ static int soc_camera_s_crop(struct file *file, void *fh,
} else if (ici->ops->set_livecrop) {
ret = ici->ops->set_livecrop(icd, a);
} else {
- dev_err(&icd->dev,
+ dev_err(icd->pdev,
"S_CROP denied: queue initialised and sizes differ\n");
ret = -EBUSY;
}
@@ -927,7 +918,7 @@ static int soc_camera_g_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
if (ici->ops->get_parm)
return ici->ops->get_parm(icd, a);
@@ -939,7 +930,7 @@ static int soc_camera_s_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
struct soc_camera_device *icd = file->private_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
if (ici->ops->set_parm)
return ici->ops->set_parm(icd, a);
@@ -976,6 +967,8 @@ static int soc_camera_s_register(struct file *file, void *fh,
}
#endif
+static int soc_camera_probe(struct soc_camera_device *icd);
+
/* So far this function cannot fail */
static void scan_add_host(struct soc_camera_host *ici)
{
@@ -986,15 +979,9 @@ static void scan_add_host(struct soc_camera_host *ici)
list_for_each_entry(icd, &devices, list) {
if (icd->iface == ici->nr) {
int ret;
- icd->dev.parent = ici->v4l2_dev.dev;
- dev_set_name(&icd->dev, "%u-%u", icd->iface,
- icd->devnum);
- ret = device_register(&icd->dev);
- if (ret < 0) {
- icd->dev.parent = NULL;
- dev_err(&icd->dev,
- "Cannot register device: %d\n", ret);
- }
+
+ icd->parent = ici->v4l2_dev.dev;
+ ret = soc_camera_probe(icd);
}
}
@@ -1006,12 +993,12 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd,
struct soc_camera_link *icl)
{
struct i2c_client *client;
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct i2c_adapter *adap = i2c_get_adapter(icl->i2c_adapter_id);
struct v4l2_subdev *subdev;
if (!adap) {
- dev_err(&icd->dev, "Cannot get I2C adapter #%d. No driver?\n",
+ dev_err(icd->pdev, "Cannot get I2C adapter #%d. No driver?\n",
icl->i2c_adapter_id);
goto ei2cga;
}
@@ -1026,7 +1013,7 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd,
client = v4l2_get_subdevdata(subdev);
/* Use to_i2c_client(dev) to recover the i2c client */
- dev_set_drvdata(&icd->dev, &client->dev);
+ icd->control = &client->dev;
return 0;
ei2cnd:
@@ -1040,7 +1027,8 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd)
struct i2c_client *client =
to_i2c_client(to_soc_camera_control(icd));
struct i2c_adapter *adap = client->adapter;
- dev_set_drvdata(&icd->dev, NULL);
+
+ icd->control = NULL;
v4l2_device_unregister_subdev(i2c_get_clientdata(client));
i2c_unregister_device(client);
i2c_put_adapter(adap);
@@ -1053,17 +1041,16 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd)
static int soc_camera_video_start(struct soc_camera_device *icd);
static int video_dev_create(struct soc_camera_device *icd);
/* Called during host-driver probe */
-static int soc_camera_probe(struct device *dev)
+static int soc_camera_probe(struct soc_camera_device *icd)
{
- struct soc_camera_device *icd = to_soc_camera_dev(dev);
- struct soc_camera_host *ici = to_soc_camera_host(dev->parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct soc_camera_link *icl = to_soc_camera_link(icd);
struct device *control = NULL;
struct v4l2_subdev *sd;
struct v4l2_mbus_framefmt mf;
int ret;
- dev_info(dev, "Probing %s\n", dev_name(dev));
+ dev_info(icd->pdev, "Probing %s\n", dev_name(icd->pdev));
ret = regulator_bulk_get(icd->pdev, icl->num_regulators,
icl->regulators);
@@ -1099,7 +1086,7 @@ static int soc_camera_probe(struct device *dev)
if (icl->module_name)
ret = request_module(icl->module_name);
- ret = icl->add_device(icl, &icd->dev);
+ ret = icl->add_device(icd);
if (ret < 0)
goto eadddev;
@@ -1110,7 +1097,7 @@ static int soc_camera_probe(struct device *dev)
control = to_soc_camera_control(icd);
if (!control || !control->driver || !dev_get_drvdata(control) ||
!try_module_get(control->driver->owner)) {
- icl->del_device(icl);
+ icl->del_device(icd);
goto enodrv;
}
}
@@ -1125,8 +1112,6 @@ static int soc_camera_probe(struct device *dev)
icd->field = V4L2_FIELD_ANY;
- icd->vdev->lock = &icd->video_lock;
-
/*
* ..._video_start() will create a device node, video_register_device()
* itself is protected against concurrent open() calls, but we also have
@@ -1146,11 +1131,6 @@ static int soc_camera_probe(struct device *dev)
icd->field = mf.field;
}
- /* Do we have to sysfs_remove_link() before device_unregister()? */
- if (sysfs_create_link(&icd->dev.kobj, &to_soc_camera_control(icd)->kobj,
- "control"))
- dev_warn(&icd->dev, "Failed creating the control symlink\n");
-
ici->ops->remove(icd);
soc_camera_power_set(icd, icl, 0);
@@ -1166,7 +1146,7 @@ eiufmt:
if (icl->board_info) {
soc_camera_free_i2c(icd);
} else {
- icl->del_device(icl);
+ icl->del_device(icd);
module_put(control->driver->owner);
}
enodrv:
@@ -1186,13 +1166,12 @@ ereg:
* This is called on device_unregister, which only means we have to disconnect
* from the host, but not remove ourselves from the device list
*/
-static int soc_camera_remove(struct device *dev)
+static int soc_camera_remove(struct soc_camera_device *icd)
{
- struct soc_camera_device *icd = to_soc_camera_dev(dev);
struct soc_camera_link *icl = to_soc_camera_link(icd);
struct video_device *vdev = icd->vdev;
- BUG_ON(!dev->parent);
+ BUG_ON(!icd->parent);
if (vdev) {
video_unregister_device(vdev);
@@ -1202,10 +1181,9 @@ static int soc_camera_remove(struct device *dev)
if (icl->board_info) {
soc_camera_free_i2c(icd);
} else {
- struct device_driver *drv = to_soc_camera_control(icd) ?
- to_soc_camera_control(icd)->driver : NULL;
+ struct device_driver *drv = to_soc_camera_control(icd)->driver;
if (drv) {
- icl->del_device(icl);
+ icl->del_device(icd);
module_put(drv->owner);
}
}
@@ -1216,49 +1194,6 @@ static int soc_camera_remove(struct device *dev)
return 0;
}
-static int soc_camera_suspend(struct device *dev, pm_message_t state)
-{
- struct soc_camera_device *icd = to_soc_camera_dev(dev);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- int ret = 0;
-
- if (ici->ops->suspend)
- ret = ici->ops->suspend(icd, state);
-
- return ret;
-}
-
-static int soc_camera_resume(struct device *dev)
-{
- struct soc_camera_device *icd = to_soc_camera_dev(dev);
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
- int ret = 0;
-
- if (ici->ops->resume)
- ret = ici->ops->resume(icd);
-
- return ret;
-}
-
-struct bus_type soc_camera_bus_type = {
- .name = "soc-camera",
- .probe = soc_camera_probe,
- .remove = soc_camera_remove,
- .suspend = soc_camera_suspend,
- .resume = soc_camera_resume,
-};
-EXPORT_SYMBOL_GPL(soc_camera_bus_type);
-
-static struct device_driver ic_drv = {
- .name = "camera",
- .bus = &soc_camera_bus_type,
- .owner = THIS_MODULE,
-};
-
-static void dummy_release(struct device *dev)
-{
-}
-
static int default_cropcap(struct soc_camera_device *icd,
struct v4l2_cropcap *a)
{
@@ -1317,13 +1252,6 @@ static int default_enum_fsizes(struct soc_camera_device *icd,
return 0;
}
-static void soc_camera_device_init(struct device *dev, void *pdata)
-{
- dev->platform_data = pdata;
- dev->bus = &soc_camera_bus_type;
- dev->release = dummy_release;
-}
-
int soc_camera_host_register(struct soc_camera_host *ici)
{
struct soc_camera_host *ix;
@@ -1389,24 +1317,9 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
mutex_lock(&list_lock);
list_del(&ici->list);
-
- list_for_each_entry(icd, &devices, list) {
- if (icd->iface == ici->nr) {
- void *pdata = icd->dev.platform_data;
- /* The bus->remove will be called */
- device_unregister(&icd->dev);
- /*
- * Not before device_unregister(), .remove
- * needs parent to call ici->ops->remove().
- * If the host module is loaded again, device_register()
- * would complain "already initialised," since 2.6.32
- * this is also needed to prevent use-after-free of the
- * device private data.
- */
- memset(&icd->dev, 0, sizeof(icd->dev));
- soc_camera_device_init(&icd->dev, pdata);
- }
- }
+ list_for_each_entry(icd, &devices, list)
+ if (icd->iface == ici->nr && to_soc_camera_control(icd))
+ soc_camera_remove(icd);
mutex_unlock(&list_lock);
@@ -1448,11 +1361,6 @@ static int soc_camera_device_register(struct soc_camera_device *icd)
return 0;
}
-static void soc_camera_device_unregister(struct soc_camera_device *icd)
-{
- list_del(&icd->list);
-}
-
static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
.vidioc_querycap = soc_camera_querycap,
.vidioc_g_fmt_vid_cap = soc_camera_g_fmt_vid_cap,
@@ -1487,7 +1395,7 @@ static const struct v4l2_ioctl_ops soc_camera_ioctl_ops = {
static int video_dev_create(struct soc_camera_device *icd)
{
- struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct video_device *vdev = video_device_alloc();
if (!vdev)
@@ -1495,12 +1403,13 @@ static int video_dev_create(struct soc_camera_device *icd)
strlcpy(vdev->name, ici->drv_name, sizeof(vdev->name));
- vdev->parent = &icd->dev;
+ vdev->parent = icd->pdev;
vdev->current_norm = V4L2_STD_UNKNOWN;
vdev->fops = &soc_camera_fops;
vdev->ioctl_ops = &soc_camera_ioctl_ops;
vdev->release = video_device_release;
vdev->tvnorms = V4L2_STD_UNKNOWN;
+ vdev->lock = &icd->video_lock;
icd->vdev = vdev;
@@ -1515,7 +1424,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
const struct device_type *type = icd->vdev->dev.type;
int ret;
- if (!icd->dev.parent)
+ if (!icd->parent)
return -ENODEV;
if (!icd->ops ||
@@ -1525,7 +1434,7 @@ static int soc_camera_video_start(struct soc_camera_device *icd)
ret = video_register_device(icd->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
- dev_err(&icd->dev, "video_register_device failed: %d\n", ret);
+ dev_err(icd->pdev, "video_register_device failed: %d\n", ret);
return ret;
}
@@ -1549,6 +1458,7 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
return -ENOMEM;
icd->iface = icl->bus_id;
+ icd->link = icl;
icd->pdev = &pdev->dev;
platform_set_drvdata(pdev, icd);
@@ -1556,8 +1466,6 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
if (ret < 0)
goto escdevreg;
- soc_camera_device_init(&icd->dev, icl);
-
icd->user_width = DEFAULT_WIDTH;
icd->user_height = DEFAULT_HEIGHT;
@@ -1581,7 +1489,7 @@ static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
if (!icd)
return -EINVAL;
- soc_camera_device_unregister(icd);
+ list_del(&icd->list);
kfree(icd);
@@ -1598,31 +1506,12 @@ static struct platform_driver __refdata soc_camera_pdrv = {
static int __init soc_camera_init(void)
{
- int ret = bus_register(&soc_camera_bus_type);
- if (ret)
- return ret;
- ret = driver_register(&ic_drv);
- if (ret)
- goto edrvr;
-
- ret = platform_driver_probe(&soc_camera_pdrv, soc_camera_pdrv_probe);
- if (ret)
- goto epdr;
-
- return 0;
-
-epdr:
- driver_unregister(&ic_drv);
-edrvr:
- bus_unregister(&soc_camera_bus_type);
- return ret;
+ return platform_driver_probe(&soc_camera_pdrv, soc_camera_pdrv_probe);
}
static void __exit soc_camera_exit(void)
{
platform_driver_unregister(&soc_camera_pdrv);
- driver_unregister(&ic_drv);
- bus_unregister(&soc_camera_bus_type);
}
module_init(soc_camera_init);
diff --git a/drivers/media/video/soc_camera_platform.c b/drivers/media/video/soc_camera_platform.c
index bf406e89c99..8069cd6bc5e 100644
--- a/drivers/media/video/soc_camera_platform.c
+++ b/drivers/media/video/soc_camera_platform.c
@@ -146,7 +146,7 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
if (!p)
return -EINVAL;
- if (!p->dev) {
+ if (!p->icd) {
dev_err(&pdev->dev,
"Platform has not set soc_camera_device pointer!\n");
return -EINVAL;
@@ -156,16 +156,16 @@ static int soc_camera_platform_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- icd = to_soc_camera_dev(p->dev);
+ icd = p->icd;
/* soc-camera convention: control's drvdata points to the subdev */
platform_set_drvdata(pdev, &priv->subdev);
/* Set the control device reference */
- dev_set_drvdata(&icd->dev, &pdev->dev);
+ icd->control = &pdev->dev;
icd->ops = &soc_camera_platform_ops;
- ici = to_soc_camera_host(icd->dev.parent);
+ ici = to_soc_camera_host(icd->parent);
v4l2_subdev_init(&priv->subdev, &platform_subdev_ops);
v4l2_set_subdevdata(&priv->subdev, p);
@@ -188,7 +188,7 @@ static int soc_camera_platform_remove(struct platform_device *pdev)
{
struct soc_camera_platform_priv *priv = get_priv(pdev);
struct soc_camera_platform_info *p = pdev->dev.platform_data;
- struct soc_camera_device *icd = to_soc_camera_dev(p->dev);
+ struct soc_camera_device *icd = p->icd;
v4l2_device_unregister_subdev(&priv->subdev);
icd->ops = NULL;
diff --git a/drivers/media/video/sr030pc30.c b/drivers/media/video/sr030pc30.c
index c901721a1db..8afb0e8a2e0 100644
--- a/drivers/media/video/sr030pc30.c
+++ b/drivers/media/video/sr030pc30.c
@@ -726,8 +726,10 @@ static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
const struct sr030pc30_platform_data *pdata = info->pdata;
int ret;
- if (WARN(pdata == NULL, "No platform data!\n"))
- return -ENOMEM;
+ if (pdata == NULL) {
+ WARN(1, "No platform data!\n");
+ return -EINVAL;
+ }
/*
* Put sensor into power sleep mode before switching off
@@ -746,6 +748,7 @@ static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
if (on) {
ret = sr030pc30_base_config(sd);
} else {
+ ret = 0;
info->curr_win = NULL;
info->curr_fmt = NULL;
}
diff --git a/drivers/media/video/tda7432.c b/drivers/media/video/tda7432.c
index 3941f954daf..bd218545da9 100644
--- a/drivers/media/video/tda7432.c
+++ b/drivers/media/video/tda7432.c
@@ -49,10 +49,11 @@ static int maxvol;
static int loudness; /* disable loudness by default */
static int debug; /* insmod parameter */
module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Set debugging level from 0 to 3. Default is off(0).");
module_param(loudness, int, S_IRUGO);
-MODULE_PARM_DESC(maxvol,"Set maximium volume to +20db (0), default is 0db(1)");
+MODULE_PARM_DESC(loudness, "Turn loudness on(1) else off(0). Default is off(0).");
module_param(maxvol, int, S_IRUGO | S_IWUSR);
-
+MODULE_PARM_DESC(maxvol, "Set maximium volume to +20dB(0) else +0dB(1). Default is +20dB(0).");
/* Structure of address and subaddresses for the tda7432 */
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index fc611ebeb82..84cd1b65b76 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -20,7 +20,6 @@
* Timberdale FPGA LogiWin Video In
*/
-#include <linux/version.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
diff --git a/drivers/media/video/tlg2300/pd-common.h b/drivers/media/video/tlg2300/pd-common.h
index 46066bdc73f..56564e6aaac 100644
--- a/drivers/media/video/tlg2300/pd-common.h
+++ b/drivers/media/video/tlg2300/pd-common.h
@@ -1,7 +1,6 @@
#ifndef PD_COMMON_H
#define PD_COMMON_H
-#include <linux/version.h>
#include <linux/fs.h>
#include <linux/wait.h>
#include <linux/list.h>
diff --git a/drivers/media/video/tlg2300/pd-dvb.c b/drivers/media/video/tlg2300/pd-dvb.c
index edd78f8b1ba..d0da11ae19d 100644
--- a/drivers/media/video/tlg2300/pd-dvb.c
+++ b/drivers/media/video/tlg2300/pd-dvb.c
@@ -7,7 +7,7 @@
#include "vendorcmds.h"
#include <linux/sched.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
static void dvb_urb_cleanup(struct pd_dvb_adapter *pd_dvb);
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 99c81a9a4f4..129f135d5a5 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -531,3 +531,4 @@ module_exit(poseidon_exit);
MODULE_AUTHOR("Telegent Systems");
MODULE_DESCRIPTION("For tlg2300-based USB device ");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.0.2");
diff --git a/drivers/media/video/tlg2300/pd-radio.c b/drivers/media/video/tlg2300/pd-radio.c
index fae84c2a0c3..4fad1dfb92c 100644
--- a/drivers/media/video/tlg2300/pd-radio.c
+++ b/drivers/media/video/tlg2300/pd-radio.c
@@ -6,7 +6,6 @@
#include <linux/usb.h>
#include <linux/i2c.h>
#include <media/v4l2-dev.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <media/v4l2-ioctl.h>
@@ -149,7 +148,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "tele-radio", sizeof(v->driver));
strlcpy(v->card, "Telegent Poseidon", sizeof(v->card));
usb_make_path(p->udev, v->bus_info, sizeof(v->bus_info));
- v->version = KERNEL_VERSION(0, 0, 1);
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 9363ed91a4c..11cc980b0cd 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -39,6 +39,7 @@
#include "tda9887.h"
#include "xc5000.h"
#include "tda18271.h"
+#include "xc4000.h"
#define UNSET (-1U)
@@ -391,6 +392,23 @@ static void set_type(struct i2c_client *c, unsigned int type,
tune_now = 0;
break;
}
+ case TUNER_XC4000:
+ {
+ struct xc4000_config xc4000_cfg = {
+ .i2c_address = t->i2c->addr,
+ /* FIXME: the correct parameters will be set */
+ /* only when the digital dvb_attach() occurs */
+ .default_pm = 0,
+ .dvb_amplitude = 0,
+ .set_smoothedcvbs = 0,
+ .if_khz = 0
+ };
+ if (!dvb_attach(xc4000_attach,
+ &t->fe, t->i2c->adapter, &xc4000_cfg))
+ goto attach_failed;
+ tune_now = 0;
+ break;
+ }
default:
if (!dvb_attach(simple_tuner_attach, &t->fe,
t->i2c->adapter, t->i2c->addr, t->type))
@@ -714,29 +732,34 @@ static int tuner_remove(struct i2c_client *client)
* returns 0.
* This function is needed for boards that have a separate tuner for
* radio (like devices with tea5767).
+ * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to
+ * select a TV frequency. So, t_mode = T_ANALOG_TV could actually
+ * be used to represent a Digital TV too.
*/
static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
{
- if ((1 << mode & t->mode_mask) == 0)
+ int t_mode;
+ if (mode == V4L2_TUNER_RADIO)
+ t_mode = T_RADIO;
+ else
+ t_mode = T_ANALOG_TV;
+
+ if ((t_mode & t->mode_mask) == 0)
return -EINVAL;
return 0;
}
/**
- * set_mode_freq - Switch tuner to other mode.
- * @client: struct i2c_client pointer
+ * set_mode - Switch tuner to other mode.
* @t: a pointer to the module's internal struct_tuner
* @mode: enum v4l2_type (radio or TV)
- * @freq: frequency to set (0 means to use the previous one)
*
* If tuner doesn't support the needed mode (radio or TV), prints a
* debug message and returns -EINVAL, changing its state to standby.
- * Otherwise, changes the state and sets frequency to the last value, if
- * the tuner can sleep or if it supports both Radio and TV.
+ * Otherwise, changes the mode and returns 0.
*/
-static int set_mode_freq(struct i2c_client *client, struct tuner *t,
- enum v4l2_tuner_type mode, unsigned int freq)
+static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
{
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
@@ -752,17 +775,27 @@ static int set_mode_freq(struct i2c_client *client, struct tuner *t,
t->mode = mode;
tuner_dbg("Changing to mode %d\n", mode);
}
+ return 0;
+}
+
+/**
+ * set_freq - Set the tuner to the desired frequency.
+ * @t: a pointer to the module's internal struct_tuner
+ * @freq: frequency to set (0 means to use the current frequency)
+ */
+static void set_freq(struct tuner *t, unsigned int freq)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
+
if (t->mode == V4L2_TUNER_RADIO) {
- if (freq)
- t->radio_freq = freq;
- set_radio_freq(client, t->radio_freq);
+ if (!freq)
+ freq = t->radio_freq;
+ set_radio_freq(client, freq);
} else {
- if (freq)
- t->tv_freq = freq;
- set_tv_freq(client, t->tv_freq);
+ if (!freq)
+ freq = t->tv_freq;
+ set_tv_freq(client, freq);
}
-
- return 0;
}
/*
@@ -817,7 +850,8 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
/**
* tuner_fixup_std - force a given video standard variant
*
- * @t: tuner internal struct
+ * @t: tuner internal struct
+ * @std: TV standard
*
* A few devices or drivers have problem to detect some standard variations.
* On other operational systems, the drivers generally have a per-country
@@ -827,57 +861,39 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
* to distinguish all video standard variations, a modprobe parameter can
* be used to force a video standard match.
*/
-static int tuner_fixup_std(struct tuner *t)
+static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
{
- if ((t->std & V4L2_STD_PAL) == V4L2_STD_PAL) {
+ if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) {
switch (pal[0]) {
case '6':
- tuner_dbg("insmod fixup: PAL => PAL-60\n");
- t->std = V4L2_STD_PAL_60;
- break;
+ return V4L2_STD_PAL_60;
case 'b':
case 'B':
case 'g':
case 'G':
- tuner_dbg("insmod fixup: PAL => PAL-BG\n");
- t->std = V4L2_STD_PAL_BG;
- break;
+ return V4L2_STD_PAL_BG;
case 'i':
case 'I':
- tuner_dbg("insmod fixup: PAL => PAL-I\n");
- t->std = V4L2_STD_PAL_I;
- break;
+ return V4L2_STD_PAL_I;
case 'd':
case 'D':
case 'k':
case 'K':
- tuner_dbg("insmod fixup: PAL => PAL-DK\n");
- t->std = V4L2_STD_PAL_DK;
- break;
+ return V4L2_STD_PAL_DK;
case 'M':
case 'm':
- tuner_dbg("insmod fixup: PAL => PAL-M\n");
- t->std = V4L2_STD_PAL_M;
- break;
+ return V4L2_STD_PAL_M;
case 'N':
case 'n':
- if (pal[1] == 'c' || pal[1] == 'C') {
- tuner_dbg("insmod fixup: PAL => PAL-Nc\n");
- t->std = V4L2_STD_PAL_Nc;
- } else {
- tuner_dbg("insmod fixup: PAL => PAL-N\n");
- t->std = V4L2_STD_PAL_N;
- }
- break;
- case '-':
- /* default parameter, do nothing */
- break;
+ if (pal[1] == 'c' || pal[1] == 'C')
+ return V4L2_STD_PAL_Nc;
+ return V4L2_STD_PAL_N;
default:
tuner_warn("pal= argument not recognised\n");
break;
}
}
- if ((t->std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
+ if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
switch (secam[0]) {
case 'b':
case 'B':
@@ -885,63 +901,42 @@ static int tuner_fixup_std(struct tuner *t)
case 'G':
case 'h':
case 'H':
- tuner_dbg("insmod fixup: SECAM => SECAM-BGH\n");
- t->std = V4L2_STD_SECAM_B |
- V4L2_STD_SECAM_G |
- V4L2_STD_SECAM_H;
- break;
+ return V4L2_STD_SECAM_B |
+ V4L2_STD_SECAM_G |
+ V4L2_STD_SECAM_H;
case 'd':
case 'D':
case 'k':
case 'K':
- tuner_dbg("insmod fixup: SECAM => SECAM-DK\n");
- t->std = V4L2_STD_SECAM_DK;
- break;
+ return V4L2_STD_SECAM_DK;
case 'l':
case 'L':
- if ((secam[1] == 'C') || (secam[1] == 'c')) {
- tuner_dbg("insmod fixup: SECAM => SECAM-L'\n");
- t->std = V4L2_STD_SECAM_LC;
- } else {
- tuner_dbg("insmod fixup: SECAM => SECAM-L\n");
- t->std = V4L2_STD_SECAM_L;
- }
- break;
- case '-':
- /* default parameter, do nothing */
- break;
+ if ((secam[1] == 'C') || (secam[1] == 'c'))
+ return V4L2_STD_SECAM_LC;
+ return V4L2_STD_SECAM_L;
default:
tuner_warn("secam= argument not recognised\n");
break;
}
}
- if ((t->std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
+ if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
switch (ntsc[0]) {
case 'm':
case 'M':
- tuner_dbg("insmod fixup: NTSC => NTSC-M\n");
- t->std = V4L2_STD_NTSC_M;
- break;
+ return V4L2_STD_NTSC_M;
case 'j':
case 'J':
- tuner_dbg("insmod fixup: NTSC => NTSC_M_JP\n");
- t->std = V4L2_STD_NTSC_M_JP;
- break;
+ return V4L2_STD_NTSC_M_JP;
case 'k':
case 'K':
- tuner_dbg("insmod fixup: NTSC => NTSC_M_KR\n");
- t->std = V4L2_STD_NTSC_M_KR;
- break;
- case '-':
- /* default parameter, do nothing */
- break;
+ return V4L2_STD_NTSC_M_KR;
default:
tuner_info("ntsc= argument not recognised\n");
break;
}
}
- return 0;
+ return std;
}
/*
@@ -1016,7 +1011,7 @@ static void tuner_status(struct dvb_frontend *fe)
case V4L2_TUNER_RADIO:
p = "radio";
break;
- case V4L2_TUNER_DIGITAL_TV:
+ case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */
p = "digital TV";
break;
case V4L2_TUNER_ANALOG_TV:
@@ -1058,10 +1053,9 @@ static void tuner_status(struct dvb_frontend *fe)
static int tuner_s_radio(struct v4l2_subdev *sd)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (set_mode_freq(client, t, V4L2_TUNER_RADIO, 0) == -EINVAL)
- return 0;
+ if (set_mode(t, V4L2_TUNER_RADIO) == 0)
+ set_freq(t, 0);
return 0;
}
@@ -1072,16 +1066,20 @@ static int tuner_s_radio(struct v4l2_subdev *sd)
/**
* tuner_s_power - controls the power state of the tuner
* @sd: pointer to struct v4l2_subdev
- * @on: a zero value puts the tuner to sleep
+ * @on: a zero value puts the tuner to sleep, non-zero wakes it up
*/
static int tuner_s_power(struct v4l2_subdev *sd, int on)
{
struct tuner *t = to_tuner(sd);
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
- /* FIXME: Why this function don't wake the tuner if on != 0 ? */
- if (on)
+ if (on) {
+ if (t->standby && set_mode(t, t->mode) == 0) {
+ tuner_dbg("Waking up tuner\n");
+ set_freq(t, 0);
+ }
return 0;
+ }
tuner_dbg("Putting tuner to sleep\n");
t->standby = true;
@@ -1093,28 +1091,36 @@ static int tuner_s_power(struct v4l2_subdev *sd, int on)
static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (set_mode_freq(client, t, V4L2_TUNER_ANALOG_TV, 0) == -EINVAL)
+ if (set_mode(t, V4L2_TUNER_ANALOG_TV))
return 0;
- t->std = std;
- tuner_fixup_std(t);
-
+ t->std = tuner_fixup_std(t, std);
+ if (t->std != std)
+ tuner_dbg("Fixup standard %llx to %llx\n", std, t->std);
+ set_freq(t, 0);
return 0;
}
static int tuner_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (set_mode_freq(client, t, f->type, f->frequency) == -EINVAL)
- return 0;
+ if (set_mode(t, f->type) == 0)
+ set_freq(t, f->frequency);
return 0;
}
+/**
+ * tuner_g_frequency - Get the tuned frequency for the tuner
+ * @sd: pointer to struct v4l2_subdev
+ * @f: pointer to struct v4l2_frequency
+ *
+ * At return, the structure f will be filled with tuner frequency
+ * if the tuner matches the f->type.
+ * Note: f->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
{
struct tuner *t = to_tuner(sd);
@@ -1122,8 +1128,7 @@ static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
if (check_mode(t, f->type) == -EINVAL)
return 0;
- f->type = t->mode;
- if (fe_tuner_ops->get_frequency && !t->standby) {
+ if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) {
u32 abs_freq;
fe_tuner_ops->get_frequency(&t->fe, &abs_freq);
@@ -1131,12 +1136,22 @@ static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
DIV_ROUND_CLOSEST(abs_freq * 2, 125) :
DIV_ROUND_CLOSEST(abs_freq, 62500);
} else {
- f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
+ f->frequency = (V4L2_TUNER_RADIO == f->type) ?
t->radio_freq : t->tv_freq;
}
return 0;
}
+/**
+ * tuner_g_tuner - Fill in tuner information
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * At return, the structure vt will be filled with tuner information
+ * if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct tuner *t = to_tuner(sd);
@@ -1145,48 +1160,57 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
if (check_mode(t, vt->type) == -EINVAL)
return 0;
- vt->type = t->mode;
- if (analog_ops->get_afc)
+ if (vt->type == t->mode && analog_ops->get_afc)
vt->afc = analog_ops->get_afc(&t->fe);
- if (t->mode == V4L2_TUNER_ANALOG_TV)
- vt->capability |= V4L2_TUNER_CAP_NORM;
if (t->mode != V4L2_TUNER_RADIO) {
+ vt->capability |= V4L2_TUNER_CAP_NORM;
vt->rangelow = tv_range[0] * 16;
vt->rangehigh = tv_range[1] * 16;
return 0;
}
/* radio mode */
- vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- if (fe_tuner_ops->get_status) {
- u32 tuner_status;
-
- fe_tuner_ops->get_status(&t->fe, &tuner_status);
- vt->rxsubchans =
- (tuner_status & TUNER_STATUS_STEREO) ?
- V4L2_TUNER_SUB_STEREO :
- V4L2_TUNER_SUB_MONO;
+ if (vt->type == t->mode) {
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
+ if (fe_tuner_ops->get_status) {
+ u32 tuner_status;
+
+ fe_tuner_ops->get_status(&t->fe, &tuner_status);
+ vt->rxsubchans =
+ (tuner_status & TUNER_STATUS_STEREO) ?
+ V4L2_TUNER_SUB_STEREO :
+ V4L2_TUNER_SUB_MONO;
+ }
+ if (analog_ops->has_signal)
+ vt->signal = analog_ops->has_signal(&t->fe);
+ vt->audmode = t->audmode;
}
- if (analog_ops->has_signal)
- vt->signal = analog_ops->has_signal(&t->fe);
vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
- vt->audmode = t->audmode;
vt->rangelow = radio_range[0] * 16000;
vt->rangehigh = radio_range[1] * 16000;
return 0;
}
+/**
+ * tuner_s_tuner - Set the tuner's audio mode
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * Sets the audio mode if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
static int tuner_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (set_mode_freq(client, t, vt->type, 0) == -EINVAL)
+ if (set_mode(t, vt->type))
return 0;
if (t->mode == V4L2_TUNER_RADIO)
t->audmode = vt->audmode;
+ set_freq(t, 0);
return 0;
}
@@ -1221,7 +1245,8 @@ static int tuner_resume(struct i2c_client *c)
tuner_dbg("resume\n");
if (!t->standby)
- set_mode_freq(c, t, t->type, 0);
+ if (set_mode(t, t->mode) == 0)
+ set_freq(t, 0);
return 0;
}
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index 0347bbe3645..742482e3001 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -552,16 +552,6 @@ static int tw9910_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
return ret;
}
-static int tw9910_enum_input(struct soc_camera_device *icd,
- struct v4l2_input *inp)
-{
- inp->type = V4L2_INPUT_TYPE_TUNER;
- inp->std = V4L2_STD_UNKNOWN;
- strcpy(inp->name, "Video");
-
- return 0;
-}
-
static int tw9910_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
{
@@ -846,13 +836,9 @@ static int tw9910_video_probe(struct soc_camera_device *icd,
struct tw9910_priv *priv = to_tw9910(client);
s32 id;
- /*
- * We must have a parent by now. And it cannot be a wrong one.
- * So this entire test is completely redundant.
- */
- if (!icd->dev.parent ||
- to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
- return -ENODEV;
+ /* We must have a parent by now. And it cannot be a wrong one. */
+ BUG_ON(!icd->parent ||
+ to_soc_camera_host(icd->parent)->nr != icd->iface);
/*
* tw9910 only use 8 or 16 bit bus width
@@ -891,7 +877,6 @@ static int tw9910_video_probe(struct soc_camera_device *icd,
static struct soc_camera_ops tw9910_ops = {
.set_bus_param = tw9910_set_bus_param,
.query_bus_param = tw9910_query_bus_param,
- .enum_input = tw9910_enum_input,
};
static struct v4l2_subdev_core_ops tw9910_subdev_core_ops = {
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index ea8ea8a48df..5a74f5e07d7 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -45,7 +45,6 @@
*
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
@@ -77,15 +76,7 @@
#define DRIVER_ALIAS "USBVision"
#define DRIVER_DESC "USBVision USB Video Device Driver for Linux"
#define DRIVER_LICENSE "GPL"
-#define USBVISION_DRIVER_VERSION_MAJOR 0
-#define USBVISION_DRIVER_VERSION_MINOR 9
-#define USBVISION_DRIVER_VERSION_PATCHLEVEL 10
-#define USBVISION_DRIVER_VERSION KERNEL_VERSION(USBVISION_DRIVER_VERSION_MAJOR,\
-USBVISION_DRIVER_VERSION_MINOR,\
-USBVISION_DRIVER_VERSION_PATCHLEVEL)
-#define USBVISION_VERSION_STRING __stringify(USBVISION_DRIVER_VERSION_MAJOR) \
-"." __stringify(USBVISION_DRIVER_VERSION_MINOR) \
-"." __stringify(USBVISION_DRIVER_VERSION_PATCHLEVEL)
+#define USBVISION_VERSION_STRING "0.9.11"
#define ENABLE_HEXDUMP 0 /* Enable if you need it */
@@ -516,7 +507,6 @@ static int vidioc_querycap(struct file *file, void *priv,
usbvision_device_data[usbvision->dev_model].model_string,
sizeof(vc->card));
usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info));
- vc->version = USBVISION_DRIVER_VERSION;
vc->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_AUDIO |
V4L2_CAP_READWRITE |
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index a4db26fa2f5..10c2364f3e8 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -20,7 +20,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "uvcvideo.h"
@@ -1664,8 +1664,8 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
return -EINVAL;
}
- /* Search for the matching (GUID/CS) control in the given device */
- list_for_each_entry(entity, &dev->entities, list) {
+ /* Search for the matching (GUID/CS) control on the current chain */
+ list_for_each_entry(entity, &chain->entities, chain) {
unsigned int i;
if (UVC_ENTITY_TYPE(entity) != UVC_VC_EXTENSION_UNIT ||
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index b6eae48d7fb..d29f9c2d085 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -31,6 +31,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
+#include <linux/version.h>
#include <asm/atomic.h>
#include <asm/unaligned.h>
@@ -1857,7 +1858,7 @@ static int uvc_probe(struct usb_interface *intf,
sizeof(dev->mdev.serial));
strcpy(dev->mdev.bus_info, udev->devpath);
dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- dev->mdev.driver_version = DRIVER_VERSION_NUMBER;
+ dev->mdev.driver_version = LINUX_VERSION_CODE;
if (media_device_register(&dev->mdev) < 0)
goto error;
@@ -2130,6 +2131,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX
| UVC_QUIRK_BUILTIN_ISIGHT },
+ /* Foxlink ("HP Webcam" on HP Mini 5103) */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x05c8,
+ .idProduct = 0x0403,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_FIX_BANDWIDTH },
/* Genesys Logic USB 2.0 PC Camera */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
index c3ab0c813be..48fea373c25 100644
--- a/drivers/media/video/uvc/uvc_entity.c
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -27,14 +27,20 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
struct uvc_entity *entity)
{
const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
- struct uvc_entity *remote;
+ struct media_entity *sink;
unsigned int i;
- u8 remote_pad;
- int ret = 0;
+ int ret;
+
+ sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
+ ? (entity->vdev ? &entity->vdev->entity : NULL)
+ : &entity->subdev.entity;
+ if (sink == NULL)
+ return 0;
for (i = 0; i < entity->num_pads; ++i) {
struct media_entity *source;
- struct media_entity *sink;
+ struct uvc_entity *remote;
+ u8 remote_pad;
if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK))
continue;
@@ -43,10 +49,11 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
if (remote == NULL)
return -EINVAL;
- source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
- ? &remote->vdev->entity : &remote->subdev.entity;
- sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
- ? &entity->vdev->entity : &entity->subdev.entity;
+ source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
+ ? (remote->vdev ? &remote->vdev->entity : NULL)
+ : &remote->subdev.entity;
+ if (source == NULL)
+ continue;
remote_pad = remote->num_pads - 1;
ret = media_entity_create_link(source, remote_pad,
@@ -55,11 +62,10 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
return ret;
}
- if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
- ret = v4l2_device_register_subdev(&chain->dev->vdev,
- &entity->subdev);
+ if (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
+ return 0;
- return ret;
+ return v4l2_device_register_subdev(&chain->dev->vdev, &entity->subdev);
}
static struct v4l2_subdev_ops uvc_subdev_ops = {
@@ -84,9 +90,11 @@ static int uvc_mc_init_entity(struct uvc_entity *entity)
ret = media_entity_init(&entity->subdev.entity,
entity->num_pads, entity->pads, 0);
- } else
+ } else if (entity->vdev != NULL) {
ret = media_entity_init(&entity->vdev->entity,
entity->num_pads, entity->pads, 0);
+ } else
+ ret = 0;
return ret;
}
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 109a06384a8..677691c4450 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -19,7 +19,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "uvcvideo.h"
@@ -104,6 +104,8 @@ static int __uvc_free_buffers(struct uvc_video_queue *queue)
}
if (queue->count) {
+ uvc_queue_cancel(queue, 0);
+ INIT_LIST_HEAD(&queue->mainqueue);
vfree(queue->mem);
queue->count = 0;
}
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 543a80395b7..ea71d5f1f6d 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -21,7 +21,7 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
@@ -83,7 +83,7 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
default:
uvc_trace(UVC_TRACE_CONTROL, "Unsupported V4L2 control type "
"%u.\n", xmap->v4l2_type);
- ret = -EINVAL;
+ ret = -ENOTTY;
goto done;
}
@@ -571,7 +571,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
strlcpy(cap->card, vdev->name, sizeof cap->card);
usb_make_path(stream->dev->udev,
cap->bus_info, sizeof(cap->bus_info));
- cap->version = DRIVER_VERSION_NUMBER;
+ cap->version = LINUX_VERSION_CODE;
if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE
| V4L2_CAP_STREAMING;
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index fc766b9f24c..8244167c891 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -19,7 +19,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <media/v4l2-common.h>
@@ -1255,8 +1255,10 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
/* Commit the streaming parameters. */
ret = uvc_commit_video(stream, &stream->ctrl);
- if (ret < 0)
+ if (ret < 0) {
+ uvc_queue_enable(&stream->queue, 0);
return ret;
+ }
return uvc_init_video(stream, GFP_KERNEL);
}
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 20107fd3574..df32a43ca86 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -183,8 +183,7 @@ struct uvc_xu_control {
* Driver specific constants.
*/
-#define DRIVER_VERSION_NUMBER KERNEL_VERSION(1, 1, 0)
-#define DRIVER_VERSION "v1.1.0"
+#define DRIVER_VERSION "1.1.1"
/* Number of isochronous URBs. */
#define UVC_URBS 5
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 06b9f9f8201..5c6100fb407 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -105,6 +105,9 @@ int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
menu_items[ctrl->value][0] == '\0')
return -EINVAL;
}
+ if (qctrl->type == V4L2_CTRL_TYPE_BITMASK &&
+ (ctrl->value & ~qctrl->maximum))
+ return -ERANGE;
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_check);
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 7c2694738b3..61979b70f38 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -662,6 +662,32 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
return 0;
}
+struct v4l2_event32 {
+ __u32 type;
+ union {
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct compat_timespec timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
+static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
+{
+ if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
+ put_user(kp->type, &up->type) ||
+ copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
+ put_user(kp->pending, &up->pending) ||
+ put_user(kp->sequence, &up->sequence) ||
+ put_compat_timespec(&kp->timestamp, &up->timestamp) ||
+ put_user(kp->id, &up->id) ||
+ copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
+ return -EFAULT;
+ return 0;
+}
+
#define VIDIOC_G_FMT32 _IOWR('V', 4, struct v4l2_format32)
#define VIDIOC_S_FMT32 _IOWR('V', 5, struct v4l2_format32)
#define VIDIOC_QUERYBUF32 _IOWR('V', 9, struct v4l2_buffer32)
@@ -675,6 +701,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
#define VIDIOC_G_EXT_CTRLS32 _IOWR('V', 71, struct v4l2_ext_controls32)
#define VIDIOC_S_EXT_CTRLS32 _IOWR('V', 72, struct v4l2_ext_controls32)
#define VIDIOC_TRY_EXT_CTRLS32 _IOWR('V', 73, struct v4l2_ext_controls32)
+#define VIDIOC_DQEVENT32 _IOR ('V', 89, struct v4l2_event32)
#define VIDIOC_OVERLAY32 _IOW ('V', 14, s32)
#define VIDIOC_STREAMON32 _IOW ('V', 18, s32)
@@ -693,6 +720,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
struct v4l2_input v2i;
struct v4l2_standard v2s;
struct v4l2_ext_controls v2ecs;
+ struct v4l2_event v2ev;
unsigned long vx;
int vi;
} karg;
@@ -715,6 +743,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
case VIDIOC_G_EXT_CTRLS32: cmd = VIDIOC_G_EXT_CTRLS; break;
case VIDIOC_S_EXT_CTRLS32: cmd = VIDIOC_S_EXT_CTRLS; break;
case VIDIOC_TRY_EXT_CTRLS32: cmd = VIDIOC_TRY_EXT_CTRLS; break;
+ case VIDIOC_DQEVENT32: cmd = VIDIOC_DQEVENT; break;
case VIDIOC_OVERLAY32: cmd = VIDIOC_OVERLAY; break;
case VIDIOC_STREAMON32: cmd = VIDIOC_STREAMON; break;
case VIDIOC_STREAMOFF32: cmd = VIDIOC_STREAMOFF; break;
@@ -778,6 +807,9 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
err = get_v4l2_ext_controls32(&karg.v2ecs, up);
compatible_arg = 0;
break;
+ case VIDIOC_DQEVENT:
+ compatible_arg = 0;
+ break;
}
if (err)
return err;
@@ -818,6 +850,10 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
err = put_v4l2_framebuffer32(&karg.v2fb, up);
break;
+ case VIDIOC_DQEVENT:
+ err = put_v4l2_event32(&karg.v2ev, up);
+ break;
+
case VIDIOC_G_FMT:
case VIDIOC_S_FMT:
case VIDIOC_TRY_FMT:
@@ -920,6 +956,7 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_S_DV_TIMINGS:
case VIDIOC_G_DV_TIMINGS:
case VIDIOC_DQEVENT:
+ case VIDIOC_DQEVENT32:
case VIDIOC_SUBSCRIBE_EVENT:
case VIDIOC_UNSUBSCRIBE_EVENT:
ret = do_video_ioctl(file, cmd, arg);
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 2412f08527a..06b6014d4fb 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -23,17 +23,39 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-dev.h>
+#define has_op(master, op) \
+ (master->ops && master->ops->op)
+#define call_op(master, op) \
+ (has_op(master, op) ? master->ops->op(master) : 0)
+
/* Internal temporary helper struct, one for each v4l2_ext_control */
-struct ctrl_helper {
+struct v4l2_ctrl_helper {
+ /* Pointer to the control reference of the master control */
+ struct v4l2_ctrl_ref *mref;
/* The control corresponding to the v4l2_ext_control ID field. */
struct v4l2_ctrl *ctrl;
- /* Used internally to mark whether this control was already
- processed. */
- bool handled;
+ /* v4l2_ext_control index of the next control belonging to the
+ same cluster, or 0 if there isn't any. */
+ u32 next;
};
+/* Small helper function to determine if the autocluster is set to manual
+ mode. In that case the is_volatile flag should be ignored. */
+static bool is_cur_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->cur.val == master->manual_mode_value;
+}
+
+/* Same as above, but this checks the against the new value instead of the
+ current value. */
+static bool is_new_manual(const struct v4l2_ctrl *master)
+{
+ return master->is_auto && master->val == master->manual_mode_value;
+}
+
/* Returns NULL or a character pointer array containing the menu for
the given control ID. The pointer array ends with a NULL pointer.
An empty string signifies a menu entry that is invalid. This allows
@@ -181,7 +203,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
};
static const char * const mpeg_stream_vbi_fmt[] = {
"No VBI",
- "Private packet, IVTV format",
+ "Private Packet, IVTV Format",
NULL
};
static const char * const camera_power_line_frequency[] = {
@@ -204,18 +226,130 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Negative",
"Emboss",
"Sketch",
- "Sky blue",
- "Grass green",
- "Skin whiten",
+ "Sky Blue",
+ "Grass Green",
+ "Skin Whiten",
"Vivid",
NULL
};
static const char * const tune_preemphasis[] = {
- "No preemphasis",
+ "No Preemphasis",
"50 useconds",
"75 useconds",
NULL,
};
+ static const char * const header_mode[] = {
+ "Separate Buffer",
+ "Joined With 1st Frame",
+ NULL,
+ };
+ static const char * const multi_slice[] = {
+ "Single",
+ "Max Macroblocks",
+ "Max Bytes",
+ NULL,
+ };
+ static const char * const entropy_mode[] = {
+ "CAVLC",
+ "CABAC",
+ NULL,
+ };
+ static const char * const mpeg_h264_level[] = {
+ "1",
+ "1b",
+ "1.1",
+ "1.2",
+ "1.3",
+ "2",
+ "2.1",
+ "2.2",
+ "3",
+ "3.1",
+ "3.2",
+ "4",
+ "4.1",
+ "4.2",
+ "5",
+ "5.1",
+ NULL,
+ };
+ static const char * const h264_loop_filter[] = {
+ "Enabled",
+ "Disabled",
+ "Disabled at Slice Boundary",
+ NULL,
+ };
+ static const char * const h264_profile[] = {
+ "Baseline",
+ "Constrained Baseline",
+ "Main",
+ "Extended",
+ "High",
+ "High 10",
+ "High 422",
+ "High 444 Predictive",
+ "High 10 Intra",
+ "High 422 Intra",
+ "High 444 Intra",
+ "CAVLC 444 Intra",
+ "Scalable Baseline",
+ "Scalable High",
+ "Scalable High Intra",
+ "Multiview High",
+ NULL,
+ };
+ static const char * const vui_sar_idc[] = {
+ "Unspecified",
+ "1:1",
+ "12:11",
+ "10:11",
+ "16:11",
+ "40:33",
+ "24:11",
+ "20:11",
+ "32:11",
+ "80:33",
+ "18:11",
+ "15:11",
+ "64:33",
+ "160:99",
+ "4:3",
+ "3:2",
+ "2:1",
+ "Extended SAR",
+ NULL,
+ };
+ static const char * const mpeg_mpeg4_level[] = {
+ "0",
+ "0b",
+ "1",
+ "2",
+ "3",
+ "3b",
+ "4",
+ "5",
+ NULL,
+ };
+ static const char * const mpeg4_profile[] = {
+ "Simple",
+ "Adcanved Simple",
+ "Core",
+ "Simple Scalable",
+ "Advanced Coding Efficency",
+ NULL,
+ };
+
+ static const char * const flash_led_mode[] = {
+ "Off",
+ "Flash",
+ "Torch",
+ NULL,
+ };
+ static const char * const flash_strobe_source[] = {
+ "Software",
+ "External",
+ NULL,
+ };
switch (id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
@@ -256,6 +390,28 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return colorfx;
case V4L2_CID_TUNE_PREEMPHASIS:
return tune_preemphasis;
+ case V4L2_CID_FLASH_LED_MODE:
+ return flash_led_mode;
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ return flash_strobe_source;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ return header_mode;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ return multi_slice;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ return entropy_mode;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ return mpeg_h264_level;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ return h264_loop_filter;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ return h264_profile;
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ return vui_sar_idc;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ return mpeg_mpeg4_level;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ return mpeg4_profile;
default:
return NULL;
}
@@ -307,6 +463,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_CHROMA_GAIN: return "Chroma Gain";
case V4L2_CID_ILLUMINATORS_1: return "Illuminator 1";
case V4L2_CID_ILLUMINATORS_2: return "Illuminator 2";
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: return "Minimum Number of Capture Buffers";
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: return "Minimum Number of Output Buffers";
/* MPEG controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -343,6 +501,48 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return "Video Temporal Decimation";
case V4L2_CID_MPEG_VIDEO_MUTE: return "Video Mute";
case V4L2_CID_MPEG_VIDEO_MUTE_YUV: return "Video Mute YUV";
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE: return "Decoder Slice Interface";
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: return "MPEG4 Loop Filter Enable";
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: return "The Number of Intra Refresh MBs";
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: return "Frame Level Rate Control Enable";
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE: return "H264 MB Level Rate Control";
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE: return "Sequence Header Mode";
+ case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC: return "The Max Number of Reference Picture";
+ case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP: return "H263 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP: return "H263 P frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP: return "H263 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MIN_QP: return "H263 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H263_MAX_QP: return "H263 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: return "H264 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: return "H264 P frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: return "H264 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: return "H264 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: return "H264 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM: return "H264 8x8 Transform Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE: return "H264 CPB Buffer Size";
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: return "H264 Entorpy Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: return "H264 I Period";
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL: return "H264 Level";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: return "H264 Loop Filter Alpha Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: return "H264 Loop Filter Beta Offset";
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: return "H264 Loop Filter Mode";
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE: return "H264 Profile";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT: return "Vertical Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH: return "Horizontal Size of SAR";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE: return "Aspect Ratio VUI Enable";
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC: return "VUI Aspect Ratio IDC";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B frame QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP: return "MPEG4 Minimum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP: return "MPEG4 Maximum QP Value";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: return "MPEG4 Level";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: return "MPEG4 Profile";
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL: return "Quarter Pixel Search Enable";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: return "The Maximum Bytes Per Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: return "The Number of MB in a Slice";
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: return "The Slice Partitioning Method";
+ case V4L2_CID_MPEG_VIDEO_VBV_SIZE: return "VBV Buffer Size";
/* CAMERA controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
@@ -389,6 +589,21 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_TUNE_POWER_LEVEL: return "Tune Power Level";
case V4L2_CID_TUNE_ANTENNA_CAPACITOR: return "Tune Antenna Capacitor";
+ /* Flash controls */
+ case V4L2_CID_FLASH_CLASS: return "Flash controls";
+ case V4L2_CID_FLASH_LED_MODE: return "LED mode";
+ case V4L2_CID_FLASH_STROBE_SOURCE: return "Strobe source";
+ case V4L2_CID_FLASH_STROBE: return "Strobe";
+ case V4L2_CID_FLASH_STROBE_STOP: return "Stop strobe";
+ case V4L2_CID_FLASH_STROBE_STATUS: return "Strobe status";
+ case V4L2_CID_FLASH_TIMEOUT: return "Strobe timeout";
+ case V4L2_CID_FLASH_INTENSITY: return "Intensity, flash mode";
+ case V4L2_CID_FLASH_TORCH_INTENSITY: return "Intensity, torch mode";
+ case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator";
+ case V4L2_CID_FLASH_FAULT: return "Faults";
+ case V4L2_CID_FLASH_CHARGE: return "Charge";
+ case V4L2_CID_FLASH_READY: return "Ready to strobe";
+
default:
return NULL;
}
@@ -423,12 +638,24 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_PILOT_TONE_ENABLED:
case V4L2_CID_ILLUMINATORS_1:
case V4L2_CID_ILLUMINATORS_2:
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_FLASH_CHARGE:
+ case V4L2_CID_FLASH_READY:
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
*type = V4L2_CTRL_TYPE_BOOLEAN;
*min = 0;
*max = *step = 1;
break;
case V4L2_CID_PAN_RESET:
case V4L2_CID_TILT_RESET:
+ case V4L2_CID_FLASH_STROBE:
+ case V4L2_CID_FLASH_STROBE_STOP:
*type = V4L2_CTRL_TYPE_BUTTON;
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
*min = *max = *step = *def = 0;
@@ -452,6 +679,17 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_EXPOSURE_AUTO:
case V4L2_CID_COLORFX:
case V4L2_CID_TUNE_PREEMPHASIS:
+ case V4L2_CID_FLASH_LED_MODE:
+ case V4L2_CID_FLASH_STROBE_SOURCE:
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
*type = V4L2_CTRL_TYPE_MENU;
break;
case V4L2_CID_RDS_TX_PS_NAME:
@@ -462,6 +700,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_CAMERA_CLASS:
case V4L2_CID_MPEG_CLASS:
case V4L2_CID_FM_TX_CLASS:
+ case V4L2_CID_FLASH_CLASS:
*type = V4L2_CTRL_TYPE_CTRL_CLASS;
/* You can neither read not write these */
*flags |= V4L2_CTRL_FLAG_READ_ONLY | V4L2_CTRL_FLAG_WRITE_ONLY;
@@ -474,6 +713,14 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
/* Max is calculated as RGB888 that is 2^24 */
*max = 0xFFFFFF;
break;
+ case V4L2_CID_FLASH_FAULT:
+ *type = V4L2_CTRL_TYPE_BITMASK;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ *type = V4L2_CTRL_TYPE_INTEGER;
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -519,6 +766,10 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_ZOOM_RELATIVE:
*flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
break;
+ case V4L2_CID_FLASH_STROBE_STATUS:
+ case V4L2_CID_FLASH_READY:
+ *flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ break;
}
}
EXPORT_SYMBOL(v4l2_ctrl_fill);
@@ -537,6 +788,42 @@ static bool type_is_int(const struct v4l2_ctrl *ctrl)
}
}
+static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
+{
+ memset(ev->reserved, 0, sizeof(ev->reserved));
+ ev->type = V4L2_EVENT_CTRL;
+ ev->id = ctrl->id;
+ ev->u.ctrl.changes = changes;
+ ev->u.ctrl.type = ctrl->type;
+ ev->u.ctrl.flags = ctrl->flags;
+ if (ctrl->type == V4L2_CTRL_TYPE_STRING)
+ ev->u.ctrl.value64 = 0;
+ else
+ ev->u.ctrl.value64 = ctrl->cur.val64;
+ ev->u.ctrl.minimum = ctrl->minimum;
+ ev->u.ctrl.maximum = ctrl->maximum;
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU)
+ ev->u.ctrl.step = 1;
+ else
+ ev->u.ctrl.step = ctrl->step;
+ ev->u.ctrl.default_value = ctrl->default_value;
+}
+
+static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes)
+{
+ struct v4l2_event ev;
+ struct v4l2_subscribed_event *sev;
+
+ if (list_empty(&ctrl->ev_subs))
+ return;
+ fill_event(&ev, ctrl, changes);
+
+ list_for_each_entry(sev, &ctrl->ev_subs, node)
+ if (sev->fh && (sev->fh != fh ||
+ (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK)))
+ v4l2_event_queue_fh(sev->fh, &ev);
+}
+
/* Helper function: copy the current control value back to the caller */
static int cur_to_user(struct v4l2_ext_control *c,
struct v4l2_ctrl *ctrl)
@@ -624,22 +911,45 @@ static int new_to_user(struct v4l2_ext_control *c,
}
/* Copy the new value to the current value. */
-static void new_to_cur(struct v4l2_ctrl *ctrl)
+static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl,
+ bool update_inactive)
{
+ bool changed = false;
+
if (ctrl == NULL)
return;
switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_BUTTON:
+ changed = true;
+ break;
case V4L2_CTRL_TYPE_STRING:
/* strings are always 0-terminated */
+ changed = strcmp(ctrl->string, ctrl->cur.string);
strcpy(ctrl->cur.string, ctrl->string);
break;
case V4L2_CTRL_TYPE_INTEGER64:
+ changed = ctrl->val64 != ctrl->cur.val64;
ctrl->cur.val64 = ctrl->val64;
break;
default:
+ changed = ctrl->val != ctrl->cur.val;
ctrl->cur.val = ctrl->val;
break;
}
+ if (update_inactive) {
+ ctrl->flags &= ~V4L2_CTRL_FLAG_INACTIVE;
+ if (!is_cur_manual(ctrl->cluster[0]))
+ ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ }
+ if (changed || update_inactive) {
+ /* If a control was changed that was not one of the controls
+ modified by the application, then send the event to all. */
+ if (!ctrl->is_new)
+ fh = NULL;
+ send_event(fh, ctrl,
+ (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) |
+ (update_inactive ? V4L2_EVENT_CTRL_CH_FLAGS : 0));
+ }
}
/* Copy the current value to the new value */
@@ -692,13 +1002,11 @@ static int cluster_changed(struct v4l2_ctrl *master)
return diff;
}
-/* Validate a new control */
-static int validate_new(struct v4l2_ctrl *ctrl)
+/* Validate integer-type control */
+static int validate_new_int(const struct v4l2_ctrl *ctrl, s32 *pval)
{
- s32 val = ctrl->val;
- char *s = ctrl->string;
+ s32 val = *pval;
u32 offset;
- size_t len;
switch (ctrl->type) {
case V4L2_CTRL_TYPE_INTEGER:
@@ -711,11 +1019,11 @@ static int validate_new(struct v4l2_ctrl *ctrl)
offset = val - ctrl->minimum;
offset = ctrl->step * (offset / ctrl->step);
val = ctrl->minimum + offset;
- ctrl->val = val;
+ *pval = val;
return 0;
case V4L2_CTRL_TYPE_BOOLEAN:
- ctrl->val = !!ctrl->val;
+ *pval = !!val;
return 0;
case V4L2_CTRL_TYPE_MENU:
@@ -726,11 +1034,35 @@ static int validate_new(struct v4l2_ctrl *ctrl)
return -EINVAL;
return 0;
+ case V4L2_CTRL_TYPE_BITMASK:
+ *pval &= ctrl->maximum;
+ return 0;
+
case V4L2_CTRL_TYPE_BUTTON:
case V4L2_CTRL_TYPE_CTRL_CLASS:
- ctrl->val64 = 0;
+ *pval = 0;
return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* Validate a new control */
+static int validate_new(const struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c)
+{
+ char *s = c->string;
+ size_t len;
+
+ switch (ctrl->type) {
+ case V4L2_CTRL_TYPE_INTEGER:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_BITMASK:
+ case V4L2_CTRL_TYPE_BUTTON:
+ case V4L2_CTRL_TYPE_CTRL_CLASS:
+ return validate_new_int(ctrl, &c->value);
+
case V4L2_CTRL_TYPE_INTEGER64:
return 0;
@@ -780,6 +1112,7 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
{
struct v4l2_ctrl_ref *ref, *next_ref;
struct v4l2_ctrl *ctrl, *next_ctrl;
+ struct v4l2_subscribed_event *sev, *next_sev;
if (hdl == NULL || hdl->buckets == NULL)
return;
@@ -793,6 +1126,8 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
/* Free all controls owned by the handler */
list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) {
list_del(&ctrl->node);
+ list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
+ list_del(&sev->node);
kfree(ctrl);
}
kfree(hdl->buckets);
@@ -962,13 +1297,17 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
/* Sanity checks */
if (id == 0 || name == NULL || id >= V4L2_CID_PRIVATE_BASE ||
- max < min ||
(type == V4L2_CTRL_TYPE_INTEGER && step == 0) ||
+ (type == V4L2_CTRL_TYPE_BITMASK && max == 0) ||
(type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) ||
(type == V4L2_CTRL_TYPE_STRING && max == 0)) {
handler_set_err(hdl, -ERANGE);
return NULL;
}
+ if (type != V4L2_CTRL_TYPE_BITMASK && max < min) {
+ handler_set_err(hdl, -ERANGE);
+ return NULL;
+ }
if ((type == V4L2_CTRL_TYPE_INTEGER ||
type == V4L2_CTRL_TYPE_MENU ||
type == V4L2_CTRL_TYPE_BOOLEAN) &&
@@ -976,6 +1315,10 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
handler_set_err(hdl, -ERANGE);
return NULL;
}
+ if (type == V4L2_CTRL_TYPE_BITMASK && ((def & ~max) || min || step)) {
+ handler_set_err(hdl, -ERANGE);
+ return NULL;
+ }
if (type == V4L2_CTRL_TYPE_BUTTON)
flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
@@ -991,6 +1334,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
}
INIT_LIST_HEAD(&ctrl->node);
+ INIT_LIST_HEAD(&ctrl->ev_subs);
ctrl->handler = hdl;
ctrl->ops = ops;
ctrl->id = id;
@@ -1132,6 +1476,9 @@ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
/* Skip handler-private controls. */
if (ctrl->is_private)
continue;
+ /* And control classes */
+ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
+ continue;
ret = handler_new_ref(hdl, ctrl);
if (ret)
break;
@@ -1147,7 +1494,7 @@ void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
int i;
/* The first control is the master control and it must not be NULL */
- BUG_ON(controls[0] == NULL);
+ BUG_ON(ncontrols == 0 || controls[0] == NULL);
for (i = 0; i < ncontrols; i++) {
if (controls[i]) {
@@ -1158,18 +1505,47 @@ void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls)
}
EXPORT_SYMBOL(v4l2_ctrl_cluster);
+void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls,
+ u8 manual_val, bool set_volatile)
+{
+ struct v4l2_ctrl *master = controls[0];
+ u32 flag;
+ int i;
+
+ v4l2_ctrl_cluster(ncontrols, controls);
+ WARN_ON(ncontrols <= 1);
+ WARN_ON(manual_val < master->minimum || manual_val > master->maximum);
+ master->is_auto = true;
+ master->manual_mode_value = manual_val;
+ master->flags |= V4L2_CTRL_FLAG_UPDATE;
+ flag = is_cur_manual(master) ? 0 : V4L2_CTRL_FLAG_INACTIVE;
+
+ for (i = 1; i < ncontrols; i++)
+ if (controls[i]) {
+ controls[i]->is_volatile = set_volatile;
+ controls[i]->flags |= flag;
+ }
+}
+EXPORT_SYMBOL(v4l2_ctrl_auto_cluster);
+
/* Activate/deactivate a control. */
void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
{
+ /* invert since the actual flag is called 'inactive' */
+ bool inactive = !active;
+ bool old;
+
if (ctrl == NULL)
return;
- if (!active)
+ if (inactive)
/* set V4L2_CTRL_FLAG_INACTIVE */
- set_bit(4, &ctrl->flags);
+ old = test_and_set_bit(4, &ctrl->flags);
else
/* clear V4L2_CTRL_FLAG_INACTIVE */
- clear_bit(4, &ctrl->flags);
+ old = test_and_clear_bit(4, &ctrl->flags);
+ if (old != inactive)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
}
EXPORT_SYMBOL(v4l2_ctrl_activate);
@@ -1181,15 +1557,21 @@ EXPORT_SYMBOL(v4l2_ctrl_activate);
these controls. */
void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
{
+ bool old;
+
if (ctrl == NULL)
return;
+ v4l2_ctrl_lock(ctrl);
if (grabbed)
/* set V4L2_CTRL_FLAG_GRABBED */
- set_bit(1, &ctrl->flags);
+ old = test_and_set_bit(1, &ctrl->flags);
else
/* clear V4L2_CTRL_FLAG_GRABBED */
- clear_bit(1, &ctrl->flags);
+ old = test_and_clear_bit(1, &ctrl->flags);
+ if (old != grabbed)
+ send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
+ v4l2_ctrl_unlock(ctrl);
}
EXPORT_SYMBOL(v4l2_ctrl_grab);
@@ -1217,6 +1599,9 @@ static void log_ctrl(const struct v4l2_ctrl *ctrl,
case V4L2_CTRL_TYPE_MENU:
printk(KERN_CONT "%s", ctrl->qmenu[ctrl->cur.val]);
break;
+ case V4L2_CTRL_TYPE_BITMASK:
+ printk(KERN_CONT "0x%08x", ctrl->cur.val);
+ break;
case V4L2_CTRL_TYPE_INTEGER64:
printk(KERN_CONT "%lld", ctrl->cur.val64);
break;
@@ -1277,26 +1662,21 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
int i;
/* Skip if this control was already handled by a cluster. */
- if (ctrl->done)
+ /* Skip button controls and read-only controls. */
+ if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
+ (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
continue;
for (i = 0; i < master->ncontrols; i++) {
if (master->cluster[i]) {
cur_to_new(master->cluster[i]);
master->cluster[i]->is_new = 1;
+ master->cluster[i]->done = true;
}
}
-
- /* Skip button controls and read-only controls. */
- if (ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
- (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
- continue;
- ret = master->ops->s_ctrl(master);
+ ret = call_op(master, s_ctrl);
if (ret)
break;
- for (i = 0; i < master->ncontrols; i++)
- if (master->cluster[i])
- master->cluster[i]->done = true;
}
mutex_unlock(&hdl->lock);
return ret;
@@ -1447,18 +1827,19 @@ EXPORT_SYMBOL(v4l2_subdev_querymenu);
Find the controls in the control array and do some basic checks. */
static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
struct v4l2_ext_controls *cs,
- struct ctrl_helper *helpers,
- bool try)
+ struct v4l2_ctrl_helper *helpers)
{
+ struct v4l2_ctrl_helper *h;
+ bool have_clusters = false;
u32 i;
- for (i = 0; i < cs->count; i++) {
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
struct v4l2_ext_control *c = &cs->controls[i];
+ struct v4l2_ctrl_ref *ref;
struct v4l2_ctrl *ctrl;
u32 id = c->id & V4L2_CTRL_ID_MASK;
- if (try)
- cs->error_idx = i;
+ cs->error_idx = i;
if (cs->ctrl_class && V4L2_CTRL_ID2CLASS(id) != cs->ctrl_class)
return -EINVAL;
@@ -1467,53 +1848,59 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
extended controls */
if (id >= V4L2_CID_PRIVATE_BASE)
return -EINVAL;
- ctrl = v4l2_ctrl_find(hdl, id);
- if (ctrl == NULL)
+ ref = find_ref_lock(hdl, id);
+ if (ref == NULL)
return -EINVAL;
+ ctrl = ref->ctrl;
if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
- helpers[i].ctrl = ctrl;
- helpers[i].handled = false;
+ if (ctrl->cluster[0]->ncontrols > 1)
+ have_clusters = true;
+ if (ctrl->cluster[0] != ctrl)
+ ref = find_ref_lock(hdl, ctrl->cluster[0]->id);
+ /* Store the ref to the master control of the cluster */
+ h->mref = ref;
+ h->ctrl = ctrl;
+ /* Initially set next to 0, meaning that there is no other
+ control in this helper array belonging to the same
+ cluster */
+ h->next = 0;
}
- return 0;
-}
-typedef int (*cluster_func)(struct v4l2_ext_control *c,
- struct v4l2_ctrl *ctrl);
+ /* We are done if there were no controls that belong to a multi-
+ control cluster. */
+ if (!have_clusters)
+ return 0;
-/* Walk over all controls in v4l2_ext_controls belonging to the same cluster
- and call the provided function. */
-static int cluster_walk(unsigned from,
- struct v4l2_ext_controls *cs,
- struct ctrl_helper *helpers,
- cluster_func f)
-{
- struct v4l2_ctrl **cluster = helpers[from].ctrl->cluster;
- int ret = 0;
- int i;
+ /* The code below figures out in O(n) time which controls in the list
+ belong to the same cluster. */
- /* Find any controls from the same cluster and call the function */
- for (i = from; !ret && i < cs->count; i++) {
- struct v4l2_ctrl *ctrl = helpers[i].ctrl;
+ /* This has to be done with the handler lock taken. */
+ mutex_lock(&hdl->lock);
- if (!helpers[i].handled && ctrl->cluster == cluster)
- ret = f(&cs->controls[i], ctrl);
+ /* First zero the helper field in the master control references */
+ for (i = 0; i < cs->count; i++)
+ helpers[i].mref->helper = 0;
+ for (i = 0, h = helpers; i < cs->count; i++, h++) {
+ struct v4l2_ctrl_ref *mref = h->mref;
+
+ /* If the mref->helper is set, then it points to an earlier
+ helper that belongs to the same cluster. */
+ if (mref->helper) {
+ /* Set the next field of mref->helper to the current
+ index: this means that that earlier helper now
+ points to the next helper in the same cluster. */
+ mref->helper->next = i;
+ /* mref should be set only for the first helper in the
+ cluster, clear the others. */
+ h->mref = NULL;
+ }
+ /* Point the mref helper to the current helper struct. */
+ mref->helper = h;
}
- return ret;
-}
-
-static void cluster_done(unsigned from,
- struct v4l2_ext_controls *cs,
- struct ctrl_helper *helpers)
-{
- struct v4l2_ctrl **cluster = helpers[from].ctrl->cluster;
- int i;
-
- /* Find any controls from the same cluster and mark them as handled */
- for (i = from; i < cs->count; i++)
- if (helpers[i].ctrl->cluster == cluster)
- helpers[i].handled = true;
+ mutex_unlock(&hdl->lock);
+ return 0;
}
/* Handles the corner case where cs->count == 0. It checks whether the
@@ -1531,10 +1918,10 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 ctrl_class)
/* Get extended controls. Allocates the helpers array if needed. */
int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
{
- struct ctrl_helper helper[4];
- struct ctrl_helper *helpers = helper;
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
int ret;
- int i;
+ int i, j;
cs->error_idx = cs->count;
cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
@@ -1551,30 +1938,46 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
return -ENOMEM;
}
- ret = prepare_ext_ctrls(hdl, cs, helpers, false);
+ ret = prepare_ext_ctrls(hdl, cs, helpers);
+ cs->error_idx = cs->count;
for (i = 0; !ret && i < cs->count; i++)
if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
ret = -EACCES;
for (i = 0; !ret && i < cs->count; i++) {
- struct v4l2_ctrl *ctrl = helpers[i].ctrl;
- struct v4l2_ctrl *master = ctrl->cluster[0];
+ int (*ctrl_to_user)(struct v4l2_ext_control *c,
+ struct v4l2_ctrl *ctrl) = cur_to_user;
+ struct v4l2_ctrl *master;
- if (helpers[i].handled)
+ if (helpers[i].mref == NULL)
continue;
+ master = helpers[i].mref->ctrl;
cs->error_idx = i;
v4l2_ctrl_lock(master);
- /* g_volatile_ctrl will update the current control values */
- if (ctrl->is_volatile && master->ops->g_volatile_ctrl)
- ret = master->ops->g_volatile_ctrl(master);
- /* If OK, then copy the current control values to the caller */
- if (!ret)
- ret = cluster_walk(i, cs, helpers, cur_to_user);
+
+ /* g_volatile_ctrl will update the new control values */
+ if (has_op(master, g_volatile_ctrl) && !is_cur_manual(master)) {
+ for (j = 0; j < master->ncontrols; j++)
+ cur_to_new(master->cluster[j]);
+ ret = call_op(master, g_volatile_ctrl);
+ ctrl_to_user = new_to_user;
+ }
+ /* If OK, then copy the current (for non-volatile controls)
+ or the new (for volatile controls) control values to the
+ caller */
+ if (!ret) {
+ u32 idx = i;
+
+ do {
+ ret = ctrl_to_user(cs->controls + idx,
+ helpers[idx].ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+ }
v4l2_ctrl_unlock(master);
- cluster_done(i, cs, helpers);
}
if (cs->count > ARRAY_SIZE(helper))
@@ -1594,15 +1997,21 @@ static int get_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
{
struct v4l2_ctrl *master = ctrl->cluster[0];
int ret = 0;
+ int i;
if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
return -EACCES;
v4l2_ctrl_lock(master);
/* g_volatile_ctrl will update the current control values */
- if (ctrl->is_volatile && master->ops->g_volatile_ctrl)
- ret = master->ops->g_volatile_ctrl(master);
- *val = ctrl->cur.val;
+ if (ctrl->is_volatile && !is_cur_manual(master)) {
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ ret = call_op(master, g_volatile_ctrl);
+ *val = ctrl->val;
+ } else {
+ *val = ctrl->cur.val;
+ }
v4l2_ctrl_unlock(master);
return ret;
}
@@ -1638,72 +2047,61 @@ EXPORT_SYMBOL(v4l2_ctrl_g_ctrl);
/* Core function that calls try/s_ctrl and ensures that the new value is
copied to the current value on a set.
Must be called with ctrl->handler->lock held. */
-static int try_or_set_control_cluster(struct v4l2_ctrl *master, bool set)
+static int try_or_set_cluster(struct v4l2_fh *fh,
+ struct v4l2_ctrl *master, bool set)
{
- bool try = !set;
- int ret = 0;
+ bool update_flag;
+ int ret;
int i;
/* Go through the cluster and either validate the new value or
(if no new value was set), copy the current value to the new
value, ensuring a consistent view for the control ops when
called. */
- for (i = 0; !ret && i < master->ncontrols; i++) {
+ for (i = 0; i < master->ncontrols; i++) {
struct v4l2_ctrl *ctrl = master->cluster[i];
if (ctrl == NULL)
continue;
- if (ctrl->is_new) {
- /* Double check this: it may have changed since the
- last check in try_or_set_ext_ctrls(). */
- if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
- return -EBUSY;
-
- /* Validate if required */
- if (!set)
- ret = validate_new(ctrl);
+ if (!ctrl->is_new) {
+ cur_to_new(ctrl);
continue;
}
- /* No new value was set, so copy the current and force
- a call to try_ctrl later, since the values for the cluster
- may now have changed and the end result might be invalid. */
- try = true;
- cur_to_new(ctrl);
+ /* Check again: it may have changed since the
+ previous check in try_or_set_ext_ctrls(). */
+ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
+ return -EBUSY;
}
- /* For larger clusters you have to call try_ctrl again to
- verify that the controls are still valid after the
- 'cur_to_new' above. */
- if (!ret && master->ops->try_ctrl && try)
- ret = master->ops->try_ctrl(master);
+ ret = call_op(master, try_ctrl);
/* Don't set if there is no change */
- if (!ret && set && cluster_changed(master)) {
- ret = master->ops->s_ctrl(master);
- /* If OK, then make the new values permanent. */
- if (!ret)
- for (i = 0; i < master->ncontrols; i++)
- new_to_cur(master->cluster[i]);
- }
- return ret;
+ if (ret || !set || !cluster_changed(master))
+ return ret;
+ ret = call_op(master, s_ctrl);
+ if (ret)
+ return ret;
+
+ /* If OK, then make the new values permanent. */
+ update_flag = is_cur_manual(master) != is_new_manual(master);
+ for (i = 0; i < master->ncontrols; i++)
+ new_to_cur(fh, master->cluster[i], update_flag && i > 0);
+ return 0;
}
-/* Try or set controls. */
-static int try_or_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs,
- struct ctrl_helper *helpers,
- bool set)
+/* Validate controls. */
+static int validate_ctrls(struct v4l2_ext_controls *cs,
+ struct v4l2_ctrl_helper *helpers, bool set)
{
- unsigned i, j;
+ unsigned i;
int ret = 0;
cs->error_idx = cs->count;
for (i = 0; i < cs->count; i++) {
struct v4l2_ctrl *ctrl = helpers[i].ctrl;
- if (!set)
- cs->error_idx = i;
+ cs->error_idx = i;
if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
return -EACCES;
@@ -1715,50 +2113,22 @@ static int try_or_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
best-effort to avoid that. */
if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
return -EBUSY;
+ ret = validate_new(ctrl, &cs->controls[i]);
+ if (ret)
+ return ret;
}
-
- for (i = 0; !ret && i < cs->count; i++) {
- struct v4l2_ctrl *ctrl = helpers[i].ctrl;
- struct v4l2_ctrl *master = ctrl->cluster[0];
-
- cs->error_idx = i;
-
- if (helpers[i].handled)
- continue;
-
- v4l2_ctrl_lock(ctrl);
-
- /* Reset the 'is_new' flags of the cluster */
- for (j = 0; j < master->ncontrols; j++)
- if (master->cluster[j])
- master->cluster[j]->is_new = 0;
-
- /* Copy the new caller-supplied control values.
- user_to_new() sets 'is_new' to 1. */
- ret = cluster_walk(i, cs, helpers, user_to_new);
-
- if (!ret)
- ret = try_or_set_control_cluster(master, set);
-
- /* Copy the new values back to userspace. */
- if (!ret)
- ret = cluster_walk(i, cs, helpers, new_to_user);
-
- v4l2_ctrl_unlock(ctrl);
- cluster_done(i, cs, helpers);
- }
- return ret;
+ return 0;
}
/* Try or try-and-set controls */
-static int try_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
+static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
struct v4l2_ext_controls *cs,
bool set)
{
- struct ctrl_helper helper[4];
- struct ctrl_helper *helpers = helper;
+ struct v4l2_ctrl_helper helper[4];
+ struct v4l2_ctrl_helper *helpers = helper;
+ unsigned i, j;
int ret;
- int i;
cs->error_idx = cs->count;
cs->ctrl_class = V4L2_CTRL_ID2CLASS(cs->ctrl_class);
@@ -1774,25 +2144,49 @@ static int try_set_ext_ctrls(struct v4l2_ctrl_handler *hdl,
if (!helpers)
return -ENOMEM;
}
- ret = prepare_ext_ctrls(hdl, cs, helpers, !set);
- if (ret)
- goto free;
-
- /* First 'try' all controls and abort on error */
- ret = try_or_set_ext_ctrls(hdl, cs, helpers, false);
- /* If this is a 'set' operation and the initial 'try' failed,
- then set error_idx to count to tell the application that no
- controls changed value yet. */
- if (set)
+ ret = prepare_ext_ctrls(hdl, cs, helpers);
+ if (!ret)
+ ret = validate_ctrls(cs, helpers, set);
+ if (ret && set)
cs->error_idx = cs->count;
- if (!ret && set) {
- /* Reset 'handled' state */
- for (i = 0; i < cs->count; i++)
- helpers[i].handled = false;
- ret = try_or_set_ext_ctrls(hdl, cs, helpers, true);
+ for (i = 0; !ret && i < cs->count; i++) {
+ struct v4l2_ctrl *master;
+ u32 idx = i;
+
+ if (helpers[i].mref == NULL)
+ continue;
+
+ cs->error_idx = i;
+ master = helpers[i].mref->ctrl;
+ v4l2_ctrl_lock(master);
+
+ /* Reset the 'is_new' flags of the cluster */
+ for (j = 0; j < master->ncontrols; j++)
+ if (master->cluster[j])
+ master->cluster[j]->is_new = 0;
+
+ /* Copy the new caller-supplied control values.
+ user_to_new() sets 'is_new' to 1. */
+ do {
+ ret = user_to_new(cs->controls + idx, helpers[idx].ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+
+ if (!ret)
+ ret = try_or_set_cluster(fh, master, set);
+
+ /* Copy the new values back to userspace. */
+ if (!ret) {
+ idx = i;
+ do {
+ ret = new_to_user(cs->controls + idx,
+ helpers[idx].ctrl);
+ idx = helpers[idx].next;
+ } while (!ret && idx);
+ }
+ v4l2_ctrl_unlock(master);
}
-free:
if (cs->count > ARRAY_SIZE(helper))
kfree(helpers);
return ret;
@@ -1800,37 +2194,39 @@ free:
int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(hdl, cs, false);
+ return try_set_ext_ctrls(NULL, hdl, cs, false);
}
EXPORT_SYMBOL(v4l2_try_ext_ctrls);
-int v4l2_s_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(hdl, cs, true);
+ return try_set_ext_ctrls(fh, hdl, cs, true);
}
EXPORT_SYMBOL(v4l2_s_ext_ctrls);
int v4l2_subdev_try_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(sd->ctrl_handler, cs, false);
+ return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, false);
}
EXPORT_SYMBOL(v4l2_subdev_try_ext_ctrls);
int v4l2_subdev_s_ext_ctrls(struct v4l2_subdev *sd, struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(sd->ctrl_handler, cs, true);
+ return try_set_ext_ctrls(NULL, sd->ctrl_handler, cs, true);
}
EXPORT_SYMBOL(v4l2_subdev_s_ext_ctrls);
/* Helper function for VIDIOC_S_CTRL compatibility */
-static int set_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
+static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, s32 *val)
{
struct v4l2_ctrl *master = ctrl->cluster[0];
int ret;
int i;
- if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
- return -EACCES;
+ ret = validate_new_int(ctrl, val);
+ if (ret)
+ return ret;
v4l2_ctrl_lock(ctrl);
@@ -1841,28 +2237,30 @@ static int set_ctrl(struct v4l2_ctrl *ctrl, s32 *val)
ctrl->val = *val;
ctrl->is_new = 1;
- ret = try_or_set_control_cluster(master, false);
- if (!ret)
- ret = try_or_set_control_cluster(master, true);
+ ret = try_or_set_cluster(fh, master, true);
*val = ctrl->cur.val;
v4l2_ctrl_unlock(ctrl);
return ret;
}
-int v4l2_s_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control)
+int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
+ struct v4l2_control *control)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id);
if (ctrl == NULL || !type_is_int(ctrl))
return -EINVAL;
- return set_ctrl(ctrl, &control->value);
+ if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+ return -EACCES;
+
+ return set_ctrl(fh, ctrl, &control->value);
}
EXPORT_SYMBOL(v4l2_s_ctrl);
int v4l2_subdev_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *control)
{
- return v4l2_s_ctrl(sd->ctrl_handler, control);
+ return v4l2_s_ctrl(NULL, sd->ctrl_handler, control);
}
EXPORT_SYMBOL(v4l2_subdev_s_ctrl);
@@ -1870,6 +2268,34 @@ int v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val)
{
/* It's a driver bug if this happens. */
WARN_ON(!type_is_int(ctrl));
- return set_ctrl(ctrl, &val);
+ return set_ctrl(NULL, ctrl, &val);
}
EXPORT_SYMBOL(v4l2_ctrl_s_ctrl);
+
+void v4l2_ctrl_add_event(struct v4l2_ctrl *ctrl,
+ struct v4l2_subscribed_event *sev)
+{
+ v4l2_ctrl_lock(ctrl);
+ list_add_tail(&sev->node, &ctrl->ev_subs);
+ if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS &&
+ (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) {
+ struct v4l2_event ev;
+ u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
+
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY))
+ changes |= V4L2_EVENT_CTRL_CH_VALUE;
+ fill_event(&ev, ctrl, changes);
+ v4l2_event_queue_fh(sev->fh, &ev);
+ }
+ v4l2_ctrl_unlock(ctrl);
+}
+EXPORT_SYMBOL(v4l2_ctrl_add_event);
+
+void v4l2_ctrl_del_event(struct v4l2_ctrl *ctrl,
+ struct v4l2_subscribed_event *sev)
+{
+ v4l2_ctrl_lock(ctrl);
+ list_del(&sev->node);
+ v4l2_ctrl_unlock(ctrl);
+}
+EXPORT_SYMBOL(v4l2_ctrl_del_event);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 19d5ae29378..06f14008b34 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -167,6 +167,12 @@ static void v4l2_device_release(struct device *cd)
mutex_unlock(&videodev_lock);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
+ vdev->vfl_type != VFL_TYPE_SUBDEV)
+ media_device_unregister_entity(&vdev->entity);
+#endif
+
/* Release video_device and perform other
cleanups as needed. */
vdev->release(vdev);
@@ -389,9 +395,6 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
static int v4l2_open(struct inode *inode, struct file *filp)
{
struct video_device *vdev;
-#if defined(CONFIG_MEDIA_CONTROLLER)
- struct media_entity *entity = NULL;
-#endif
int ret = 0;
/* Check if the video device is available */
@@ -405,17 +408,6 @@ static int v4l2_open(struct inode *inode, struct file *filp)
/* and increase the device refcount */
video_get(vdev);
mutex_unlock(&videodev_lock);
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV) {
- entity = media_entity_get(&vdev->entity);
- if (!entity) {
- ret = -EBUSY;
- video_put(vdev);
- return ret;
- }
- }
-#endif
if (vdev->fops->open) {
if (vdev->lock && mutex_lock_interruptible(vdev->lock)) {
ret = -ERESTARTSYS;
@@ -431,14 +423,8 @@ static int v4l2_open(struct inode *inode, struct file *filp)
err:
/* decrease the refcount in case of an error */
- if (ret) {
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV)
- media_entity_put(entity);
-#endif
+ if (ret)
video_put(vdev);
- }
return ret;
}
@@ -455,11 +441,6 @@ static int v4l2_release(struct inode *inode, struct file *filp)
if (vdev->lock)
mutex_unlock(vdev->lock);
}
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV)
- media_entity_put(&vdev->entity);
-#endif
/* decrease the refcount unconditionally since the release()
return value is ignored. */
video_put(vdev);
@@ -754,12 +735,6 @@ void video_unregister_device(struct video_device *vdev)
if (!vdev || !video_is_registered(vdev))
return;
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV)
- media_device_unregister_entity(&vdev->entity);
-#endif
-
mutex_lock(&videodev_lock);
/* This must be in a critical section to prevent a race with v4l2_open.
* Once this bit has been cleared video_get may never be called again.
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 4aae501f02d..c72856c4143 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -209,6 +209,7 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
vdev->v4l2_dev = v4l2_dev;
vdev->fops = &v4l2_subdev_fops;
vdev->release = video_device_release_empty;
+ vdev->ctrl_handler = sd->ctrl_handler;
err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
sd->owner);
if (err < 0)
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c
index 69fd343d477..53b190cf225 100644
--- a/drivers/media/video/v4l2-event.c
+++ b/drivers/media/video/v4l2-event.c
@@ -25,100 +25,39 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
#include <linux/sched.h>
#include <linux/slab.h>
-int v4l2_event_init(struct v4l2_fh *fh)
+static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
{
- fh->events = kzalloc(sizeof(*fh->events), GFP_KERNEL);
- if (fh->events == NULL)
- return -ENOMEM;
-
- init_waitqueue_head(&fh->events->wait);
-
- INIT_LIST_HEAD(&fh->events->free);
- INIT_LIST_HEAD(&fh->events->available);
- INIT_LIST_HEAD(&fh->events->subscribed);
-
- fh->events->sequence = -1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(v4l2_event_init);
-
-int v4l2_event_alloc(struct v4l2_fh *fh, unsigned int n)
-{
- struct v4l2_events *events = fh->events;
- unsigned long flags;
-
- if (!events) {
- WARN_ON(1);
- return -ENOMEM;
- }
-
- while (events->nallocated < n) {
- struct v4l2_kevent *kev;
-
- kev = kzalloc(sizeof(*kev), GFP_KERNEL);
- if (kev == NULL)
- return -ENOMEM;
-
- spin_lock_irqsave(&fh->vdev->fh_lock, flags);
- list_add_tail(&kev->list, &events->free);
- events->nallocated++;
- spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(v4l2_event_alloc);
-
-#define list_kfree(list, type, member) \
- while (!list_empty(list)) { \
- type *hi; \
- hi = list_first_entry(list, type, member); \
- list_del(&hi->member); \
- kfree(hi); \
- }
-
-void v4l2_event_free(struct v4l2_fh *fh)
-{
- struct v4l2_events *events = fh->events;
-
- if (!events)
- return;
-
- list_kfree(&events->free, struct v4l2_kevent, list);
- list_kfree(&events->available, struct v4l2_kevent, list);
- list_kfree(&events->subscribed, struct v4l2_subscribed_event, list);
-
- kfree(events);
- fh->events = NULL;
+ idx += sev->first;
+ return idx >= sev->elems ? idx - sev->elems : idx;
}
-EXPORT_SYMBOL_GPL(v4l2_event_free);
static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
{
- struct v4l2_events *events = fh->events;
struct v4l2_kevent *kev;
unsigned long flags;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
- if (list_empty(&events->available)) {
+ if (list_empty(&fh->available)) {
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
return -ENOENT;
}
- WARN_ON(events->navailable == 0);
+ WARN_ON(fh->navailable == 0);
- kev = list_first_entry(&events->available, struct v4l2_kevent, list);
- list_move(&kev->list, &events->free);
- events->navailable--;
+ kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
+ list_del(&kev->list);
+ fh->navailable--;
- kev->event.pending = events->navailable;
+ kev->event.pending = fh->navailable;
*event = kev->event;
+ kev->sev->first = sev_pos(kev->sev, 1);
+ kev->sev->in_use--;
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
@@ -128,7 +67,6 @@ static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
int nonblocking)
{
- struct v4l2_events *events = fh->events;
int ret;
if (nonblocking)
@@ -139,8 +77,8 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
mutex_unlock(fh->vdev->lock);
do {
- ret = wait_event_interruptible(events->wait,
- events->navailable != 0);
+ ret = wait_event_interruptible(fh->wait,
+ fh->navailable != 0);
if (ret < 0)
break;
@@ -154,23 +92,72 @@ int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
}
EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
-/* Caller must hold fh->event->lock! */
+/* Caller must hold fh->vdev->fh_lock! */
static struct v4l2_subscribed_event *v4l2_event_subscribed(
- struct v4l2_fh *fh, u32 type)
+ struct v4l2_fh *fh, u32 type, u32 id)
{
- struct v4l2_events *events = fh->events;
struct v4l2_subscribed_event *sev;
assert_spin_locked(&fh->vdev->fh_lock);
- list_for_each_entry(sev, &events->subscribed, list) {
- if (sev->type == type)
+ list_for_each_entry(sev, &fh->subscribed, list)
+ if (sev->type == type && sev->id == id)
return sev;
- }
return NULL;
}
+static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
+ const struct timespec *ts)
+{
+ struct v4l2_subscribed_event *sev;
+ struct v4l2_kevent *kev;
+ bool copy_payload = true;
+
+ /* Are we subscribed? */
+ sev = v4l2_event_subscribed(fh, ev->type, ev->id);
+ if (sev == NULL)
+ return;
+
+ /* Increase event sequence number on fh. */
+ fh->sequence++;
+
+ /* Do we have any free events? */
+ if (sev->in_use == sev->elems) {
+ /* no, remove the oldest one */
+ kev = sev->events + sev_pos(sev, 0);
+ list_del(&kev->list);
+ sev->in_use--;
+ sev->first = sev_pos(sev, 1);
+ fh->navailable--;
+ if (sev->elems == 1) {
+ if (sev->replace) {
+ sev->replace(&kev->event, ev);
+ copy_payload = false;
+ }
+ } else if (sev->merge) {
+ struct v4l2_kevent *second_oldest =
+ sev->events + sev_pos(sev, 0);
+ sev->merge(&kev->event, &second_oldest->event);
+ }
+ }
+
+ /* Take one and fill it. */
+ kev = sev->events + sev_pos(sev, sev->in_use);
+ kev->event.type = ev->type;
+ if (copy_payload)
+ kev->event.u = ev->u;
+ kev->event.id = ev->id;
+ kev->event.timestamp = *ts;
+ kev->event.sequence = fh->sequence;
+ sev->in_use++;
+ list_add_tail(&kev->list, &fh->available);
+
+ fh->navailable++;
+
+ wake_up_all(&fh->wait);
+}
+
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
{
struct v4l2_fh *fh;
@@ -181,81 +168,95 @@ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
spin_lock_irqsave(&vdev->fh_lock, flags);
- list_for_each_entry(fh, &vdev->fh_list, list) {
- struct v4l2_events *events = fh->events;
- struct v4l2_kevent *kev;
+ list_for_each_entry(fh, &vdev->fh_list, list)
+ __v4l2_event_queue_fh(fh, ev, &timestamp);
- /* Are we subscribed? */
- if (!v4l2_event_subscribed(fh, ev->type))
- continue;
+ spin_unlock_irqrestore(&vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_queue);
- /* Increase event sequence number on fh. */
- events->sequence++;
+void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
+{
+ unsigned long flags;
+ struct timespec timestamp;
- /* Do we have any free events? */
- if (list_empty(&events->free))
- continue;
+ ktime_get_ts(&timestamp);
- /* Take one and fill it. */
- kev = list_first_entry(&events->free, struct v4l2_kevent, list);
- kev->event.type = ev->type;
- kev->event.u = ev->u;
- kev->event.timestamp = timestamp;
- kev->event.sequence = events->sequence;
- list_move_tail(&kev->list, &events->available);
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ __v4l2_event_queue_fh(fh, ev, &timestamp);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
- events->navailable++;
+int v4l2_event_pending(struct v4l2_fh *fh)
+{
+ return fh->navailable;
+}
+EXPORT_SYMBOL_GPL(v4l2_event_pending);
- wake_up_all(&events->wait);
- }
+static void ctrls_replace(struct v4l2_event *old, const struct v4l2_event *new)
+{
+ u32 old_changes = old->u.ctrl.changes;
- spin_unlock_irqrestore(&vdev->fh_lock, flags);
+ old->u.ctrl = new->u.ctrl;
+ old->u.ctrl.changes |= old_changes;
}
-EXPORT_SYMBOL_GPL(v4l2_event_queue);
-int v4l2_event_pending(struct v4l2_fh *fh)
+static void ctrls_merge(const struct v4l2_event *old, struct v4l2_event *new)
{
- return fh->events->navailable;
+ new->u.ctrl.changes |= old->u.ctrl.changes;
}
-EXPORT_SYMBOL_GPL(v4l2_event_pending);
int v4l2_event_subscribe(struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
+ struct v4l2_event_subscription *sub, unsigned elems)
{
- struct v4l2_events *events = fh->events;
- struct v4l2_subscribed_event *sev;
+ struct v4l2_subscribed_event *sev, *found_ev;
+ struct v4l2_ctrl *ctrl = NULL;
unsigned long flags;
-
- if (fh->events == NULL) {
- WARN_ON(1);
- return -ENOMEM;
+ unsigned i;
+
+ if (elems < 1)
+ elems = 1;
+ if (sub->type == V4L2_EVENT_CTRL) {
+ ctrl = v4l2_ctrl_find(fh->ctrl_handler, sub->id);
+ if (ctrl == NULL)
+ return -EINVAL;
}
- sev = kmalloc(sizeof(*sev), GFP_KERNEL);
+ sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
if (!sev)
return -ENOMEM;
-
- spin_lock_irqsave(&fh->vdev->fh_lock, flags);
-
- if (v4l2_event_subscribed(fh, sub->type) == NULL) {
- INIT_LIST_HEAD(&sev->list);
- sev->type = sub->type;
-
- list_add(&sev->list, &events->subscribed);
- sev = NULL;
+ for (i = 0; i < elems; i++)
+ sev->events[i].sev = sev;
+ sev->type = sub->type;
+ sev->id = sub->id;
+ sev->flags = sub->flags;
+ sev->fh = fh;
+ sev->elems = elems;
+ if (ctrl) {
+ sev->replace = ctrls_replace;
+ sev->merge = ctrls_merge;
}
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
+ if (!found_ev)
+ list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- kfree(sev);
+ /* v4l2_ctrl_add_event uses a mutex, so do this outside the spin lock */
+ if (found_ev)
+ kfree(sev);
+ else if (ctrl)
+ v4l2_ctrl_add_event(ctrl, sev);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
-static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
+void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
{
- struct v4l2_events *events = fh->events;
+ struct v4l2_event_subscription sub;
struct v4l2_subscribed_event *sev;
unsigned long flags;
@@ -263,15 +264,18 @@ static void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
sev = NULL;
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
- if (!list_empty(&events->subscribed)) {
- sev = list_first_entry(&events->subscribed,
- struct v4l2_subscribed_event, list);
- list_del(&sev->list);
+ if (!list_empty(&fh->subscribed)) {
+ sev = list_first_entry(&fh->subscribed,
+ struct v4l2_subscribed_event, list);
+ sub.type = sev->type;
+ sub.id = sev->id;
}
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- kfree(sev);
+ if (sev)
+ v4l2_event_unsubscribe(fh, &sub);
} while (sev);
}
+EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
@@ -286,11 +290,19 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
- sev = v4l2_event_subscribed(fh, sub->type);
- if (sev != NULL)
+ sev = v4l2_event_subscribed(fh, sub->type, sub->id);
+ if (sev != NULL) {
list_del(&sev->list);
+ sev->fh = NULL;
+ }
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ if (sev && sev->type == V4L2_EVENT_CTRL) {
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(fh->ctrl_handler, sev->id);
+
+ if (ctrl)
+ v4l2_ctrl_del_event(ctrl, sev);
+ }
kfree(sev);
diff --git a/drivers/media/video/v4l2-fh.c b/drivers/media/video/v4l2-fh.c
index 717f71e6370..122822d2b8b 100644
--- a/drivers/media/video/v4l2-fh.c
+++ b/drivers/media/video/v4l2-fh.c
@@ -29,23 +29,18 @@
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
-int v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
+void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
{
fh->vdev = vdev;
+ /* Inherit from video_device. May be overridden by the driver. */
+ fh->ctrl_handler = vdev->ctrl_handler;
INIT_LIST_HEAD(&fh->list);
set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
fh->prio = V4L2_PRIORITY_UNSET;
-
- /*
- * fh->events only needs to be initialized if the driver
- * supports the VIDIOC_SUBSCRIBE_EVENT ioctl.
- */
- if (vdev->ioctl_ops && vdev->ioctl_ops->vidioc_subscribe_event)
- return v4l2_event_init(fh);
-
- fh->events = NULL;
-
- return 0;
+ init_waitqueue_head(&fh->wait);
+ INIT_LIST_HEAD(&fh->available);
+ INIT_LIST_HEAD(&fh->subscribed);
+ fh->sequence = -1;
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
@@ -91,10 +86,8 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
{
if (fh->vdev == NULL)
return;
-
+ v4l2_event_unsubscribe_all(fh);
fh->vdev = NULL;
-
- v4l2_event_free(fh);
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 506edcc2dde..002ce136344 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/version.h>
#include <linux/videodev2.h>
@@ -542,12 +543,12 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_fh *vfh = NULL;
struct v4l2_format f_copy;
int use_fh_prio = 0;
- long ret = -EINVAL;
+ long ret = -ENOTTY;
if (ops == NULL) {
printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n",
vfd->name);
- return -EINVAL;
+ return ret;
}
if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
@@ -605,6 +606,7 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_querycap)
break;
+ cap->version = LINUX_VERSION_CODE;
ret = ops->vidioc_querycap(file, fh, cap);
if (!ret)
dbgarg(cmd, "driver=%s, card=%s, bus=%s, "
@@ -1418,7 +1420,9 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_queryctrl *p = arg;
- if (vfd->ctrl_handler)
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_queryctrl(vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
ret = v4l2_queryctrl(vfd->ctrl_handler, p);
else if (ops->vidioc_queryctrl)
ret = ops->vidioc_queryctrl(file, fh, p);
@@ -1438,7 +1442,9 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_control *p = arg;
- if (vfd->ctrl_handler)
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_g_ctrl(vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
ret = v4l2_g_ctrl(vfd->ctrl_handler, p);
else if (ops->vidioc_g_ctrl)
ret = ops->vidioc_g_ctrl(file, fh, p);
@@ -1470,14 +1476,18 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
- if (!vfd->ctrl_handler &&
+ if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
!ops->vidioc_s_ctrl && !ops->vidioc_s_ext_ctrls)
break;
dbgarg(cmd, "id=0x%x, value=%d\n", p->id, p->value);
+ if (vfh && vfh->ctrl_handler) {
+ ret = v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
+ break;
+ }
if (vfd->ctrl_handler) {
- ret = v4l2_s_ctrl(vfd->ctrl_handler, p);
+ ret = v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
break;
}
if (ops->vidioc_s_ctrl) {
@@ -1501,7 +1511,9 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls *p = arg;
p->error_idx = p->count;
- if (vfd->ctrl_handler)
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
ret = v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
else if (ops->vidioc_g_ext_ctrls && check_ext_ctrls(p, 0))
ret = ops->vidioc_g_ext_ctrls(file, fh, p);
@@ -1515,11 +1527,14 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls *p = arg;
p->error_idx = p->count;
- if (!vfd->ctrl_handler && !ops->vidioc_s_ext_ctrls)
+ if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
+ !ops->vidioc_s_ext_ctrls)
break;
v4l_print_ext_ctrls(cmd, vfd, p, 1);
- if (vfd->ctrl_handler)
- ret = v4l2_s_ext_ctrls(vfd->ctrl_handler, p);
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
+ ret = v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
else if (check_ext_ctrls(p, 0))
ret = ops->vidioc_s_ext_ctrls(file, fh, p);
break;
@@ -1529,10 +1544,13 @@ static long __video_do_ioctl(struct file *file,
struct v4l2_ext_controls *p = arg;
p->error_idx = p->count;
- if (!vfd->ctrl_handler && !ops->vidioc_try_ext_ctrls)
+ if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
+ !ops->vidioc_try_ext_ctrls)
break;
v4l_print_ext_ctrls(cmd, vfd, p, 1);
- if (vfd->ctrl_handler)
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_try_ext_ctrls(vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
ret = v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
else if (check_ext_ctrls(p, 0))
ret = ops->vidioc_try_ext_ctrls(file, fh, p);
@@ -1542,7 +1560,9 @@ static long __video_do_ioctl(struct file *file,
{
struct v4l2_querymenu *p = arg;
- if (vfd->ctrl_handler)
+ if (vfh && vfh->ctrl_handler)
+ ret = v4l2_querymenu(vfh->ctrl_handler, p);
+ else if (vfd->ctrl_handler)
ret = v4l2_querymenu(vfd->ctrl_handler, p);
else if (ops->vidioc_querymenu)
ret = ops->vidioc_querymenu(file, fh, p);
@@ -1822,6 +1842,8 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_g_tuner)
break;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
ret = ops->vidioc_g_tuner(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, type=%d, "
@@ -1840,6 +1862,8 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_tuner)
break;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd, "index=%d, name=%s, type=%d, "
"capability=0x%x, rangelow=%d, "
"rangehigh=%d, signal=%d, afc=%d, "
@@ -1858,6 +1882,8 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_g_frequency)
break;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
ret = ops->vidioc_g_frequency(file, fh, p);
if (!ret)
dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
@@ -1940,13 +1966,19 @@ static long __video_do_ioctl(struct file *file,
case VIDIOC_S_HW_FREQ_SEEK:
{
struct v4l2_hw_freq_seek *p = arg;
+ enum v4l2_tuner_type type;
if (!ops->vidioc_s_hw_freq_seek)
break;
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd,
- "tuner=%d, type=%d, seek_upward=%d, wrap_around=%d\n",
- p->tuner, p->type, p->seek_upward, p->wrap_around);
- ret = ops->vidioc_s_hw_freq_seek(file, fh, p);
+ "tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n",
+ p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing);
+ if (p->type != type)
+ ret = -EINVAL;
+ else
+ ret = ops->vidioc_s_hw_freq_seek(file, fh, p);
break;
}
case VIDIOC_ENUM_FRAMESIZES:
@@ -2264,7 +2296,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
break;
}
*user_ptr = (void __user *)buf->m.planes;
- *kernel_ptr = (void **)&buf->m.planes;
+ *kernel_ptr = (void *)&buf->m.planes;
*array_size = sizeof(struct v4l2_plane) * buf->length;
ret = 1;
}
@@ -2278,7 +2310,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
if (ctrls->count != 0) {
*user_ptr = (void __user *)ctrls->controls;
- *kernel_ptr = (void **)&ctrls->controls;
+ *kernel_ptr = (void *)&ctrls->controls;
*array_size = sizeof(struct v4l2_ext_control)
* ctrls->count;
ret = 1;
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
index 812729ebf09..b7967c9dc4a 100644
--- a/drivers/media/video/v4l2-subdev.c
+++ b/drivers/media/video/v4l2-subdev.c
@@ -75,20 +75,7 @@ static int subdev_open(struct file *file)
return ret;
}
- ret = v4l2_fh_init(&subdev_fh->vfh, vdev);
- if (ret)
- goto err;
-
- if (sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS) {
- ret = v4l2_event_init(&subdev_fh->vfh);
- if (ret)
- goto err;
-
- ret = v4l2_event_alloc(&subdev_fh->vfh, sd->nevents);
- if (ret)
- goto err;
- }
-
+ v4l2_fh_init(&subdev_fh->vfh, vdev);
v4l2_fh_add(&subdev_fh->vfh);
file->private_data = &subdev_fh->vfh;
#if defined(CONFIG_MEDIA_CONTROLLER)
@@ -155,25 +142,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
switch (cmd) {
case VIDIOC_QUERYCTRL:
- return v4l2_queryctrl(sd->ctrl_handler, arg);
+ return v4l2_queryctrl(vfh->ctrl_handler, arg);
case VIDIOC_QUERYMENU:
- return v4l2_querymenu(sd->ctrl_handler, arg);
+ return v4l2_querymenu(vfh->ctrl_handler, arg);
case VIDIOC_G_CTRL:
- return v4l2_g_ctrl(sd->ctrl_handler, arg);
+ return v4l2_g_ctrl(vfh->ctrl_handler, arg);
case VIDIOC_S_CTRL:
- return v4l2_s_ctrl(sd->ctrl_handler, arg);
+ return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
case VIDIOC_G_EXT_CTRLS:
- return v4l2_g_ext_ctrls(sd->ctrl_handler, arg);
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg);
case VIDIOC_S_EXT_CTRLS:
- return v4l2_s_ext_ctrls(sd->ctrl_handler, arg);
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg);
case VIDIOC_TRY_EXT_CTRLS:
- return v4l2_try_ext_ctrls(sd->ctrl_handler, arg);
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg);
case VIDIOC_DQEVENT:
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
@@ -297,7 +284,7 @@ static unsigned int subdev_poll(struct file *file, poll_table *wait)
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
return POLLERR;
- poll_wait(file, &fh->events->wait, wait);
+ poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
return POLLPRI;
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index ddb8f4b46c0..f300deafd26 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -108,8 +108,9 @@ static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
if (PageHighMem(pages[0]))
/* DMA to highmem pages might not work */
goto highmem;
- sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset);
- size -= PAGE_SIZE - offset;
+ sg_set_page(&sglist[0], pages[0],
+ min_t(size_t, PAGE_SIZE - offset, size), offset);
+ size -= min_t(size_t, PAGE_SIZE - offset, size);
for (i = 1; i < nr_pages; i++) {
if (NULL == pages[i])
goto nopage;
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 6ba1461d51e..3015e600094 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -492,13 +492,6 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return -EINVAL;
}
- /*
- * If the same number of buffers and memory access method is requested
- * then return immediately.
- */
- if (q->memory == req->memory && req->count == q->num_buffers)
- return 0;
-
if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
/*
* We already have buffers allocated, so first check if they
@@ -539,9 +532,9 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
/* Finally, allocate buffers and video memory */
ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes,
plane_sizes);
- if (ret < 0) {
- dprintk(1, "Memory allocation failed with error: %d\n", ret);
- return ret;
+ if (ret == 0) {
+ dprintk(1, "Memory allocation failed\n");
+ return -ENOMEM;
}
/*
@@ -1196,6 +1189,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* has not already dequeued before initiating cancel.
*/
INIT_LIST_HEAD(&q->done_list);
+ atomic_set(&q->queued_count, 0);
wake_up_all(&q->done_wq);
/*
diff --git a/drivers/media/video/videobuf2-dma-sg.c b/drivers/media/video/videobuf2-dma-sg.c
index b2d9485aac7..065f468faf8 100644
--- a/drivers/media/video/videobuf2-dma-sg.c
+++ b/drivers/media/video/videobuf2-dma-sg.c
@@ -48,12 +48,10 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
buf->sg_desc.size = size;
buf->sg_desc.num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- buf->sg_desc.sglist = vmalloc(buf->sg_desc.num_pages *
+ buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
sizeof(*buf->sg_desc.sglist));
if (!buf->sg_desc.sglist)
goto fail_sglist_alloc;
- memset(buf->sg_desc.sglist, 0, buf->sg_desc.num_pages *
- sizeof(*buf->sg_desc.sglist));
sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
@@ -62,7 +60,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
goto fail_pages_array_alloc;
for (i = 0; i < buf->sg_desc.num_pages; ++i) {
- buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
if (NULL == buf->pages[i])
goto fail_pages_alloc;
sg_set_page(&buf->sg_desc.sglist[i],
@@ -136,13 +134,11 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
buf->sg_desc.num_pages = last - first + 1;
- buf->sg_desc.sglist = vmalloc(
+ buf->sg_desc.sglist = vzalloc(
buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
if (!buf->sg_desc.sglist)
goto userptr_fail_sglist_alloc;
- memset(buf->sg_desc.sglist, 0,
- buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
diff --git a/drivers/media/video/videobuf2-memops.c b/drivers/media/video/videobuf2-memops.c
index 5370a3a7ee2..569eeb3dfd5 100644
--- a/drivers/media/video/videobuf2-memops.c
+++ b/drivers/media/video/videobuf2-memops.c
@@ -18,7 +18,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/file.h>
-#include <linux/slab.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-memops.h>
@@ -177,7 +176,7 @@ int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr,
vma->vm_ops->open(vma);
- printk(KERN_DEBUG "%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
+ pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n",
__func__, paddr, vma->vm_start, size);
return 0;
@@ -195,7 +194,7 @@ static void vb2_common_vm_open(struct vm_area_struct *vma)
{
struct vb2_vmarea_handler *h = vma->vm_private_data;
- printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+ pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
__func__, h, atomic_read(h->refcount), vma->vm_start,
vma->vm_end);
@@ -213,7 +212,7 @@ static void vb2_common_vm_close(struct vm_area_struct *vma)
{
struct vb2_vmarea_handler *h = vma->vm_private_data;
- printk(KERN_DEBUG "%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+ pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
__func__, h, atomic_read(h->refcount), vma->vm_start,
vma->vm_end);
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index d63e9d97849..52a0a3736c8 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -36,7 +36,6 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/kmod.h>
#include <linux/i2c.h>
@@ -61,8 +60,7 @@
// #define VINO_DEBUG
// #define VINO_DEBUG_INT
-#define VINO_MODULE_VERSION "0.0.6"
-#define VINO_VERSION_CODE KERNEL_VERSION(0, 0, 6)
+#define VINO_MODULE_VERSION "0.0.7"
MODULE_DESCRIPTION("SGI VINO Video4Linux2 driver");
MODULE_VERSION(VINO_MODULE_VERSION);
@@ -2934,7 +2932,6 @@ static int vino_querycap(struct file *file, void *__fh,
strcpy(cap->driver, vino_driver_name);
strcpy(cap->card, vino_driver_description);
strcpy(cap->bus_info, vino_bus_name);
- cap->version = VINO_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING;
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 2238a613d66..a848bd2af97 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -22,7 +22,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/font.h>
-#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/videodev2.h>
#include <linux/kthread.h>
@@ -32,6 +31,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-common.h>
#define VIVI_MODULE_NAME "vivi"
@@ -44,15 +44,12 @@
#define MAX_WIDTH 1920
#define MAX_HEIGHT 1200
-#define VIVI_MAJOR_VERSION 0
-#define VIVI_MINOR_VERSION 8
-#define VIVI_RELEASE 0
-#define VIVI_VERSION \
- KERNEL_VERSION(VIVI_MAJOR_VERSION, VIVI_MINOR_VERSION, VIVI_RELEASE)
+#define VIVI_VERSION "0.8.1"
MODULE_DESCRIPTION("Video Technology Magazine Virtual Video Capture Board");
MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(VIVI_VERSION);
static unsigned video_nr = -1;
module_param(video_nr, uint, 0644);
@@ -167,6 +164,11 @@ struct vivi_dev {
struct v4l2_ctrl *contrast;
struct v4l2_ctrl *saturation;
struct v4l2_ctrl *hue;
+ struct {
+ /* autogain/gain cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *gain;
+ };
struct v4l2_ctrl *volume;
struct v4l2_ctrl *button;
struct v4l2_ctrl *boolean;
@@ -174,6 +176,7 @@ struct vivi_dev {
struct v4l2_ctrl *int64;
struct v4l2_ctrl *menu;
struct v4l2_ctrl *string;
+ struct v4l2_ctrl *bitmask;
spinlock_t slock;
struct mutex mutex;
@@ -457,6 +460,7 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
unsigned ms;
char str[100];
int h, line = 1;
+ s32 gain;
if (!vbuf)
return;
@@ -479,6 +483,7 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
dev->width, dev->height, dev->input);
gen_text(dev, vbuf, line++ * 16, 16, str);
+ gain = v4l2_ctrl_g_ctrl(dev->gain);
mutex_lock(&dev->ctrl_handler.lock);
snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ",
dev->brightness->cur.val,
@@ -486,11 +491,13 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
dev->saturation->cur.val,
dev->hue->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " volume %3d ", dev->volume->cur.val);
+ snprintf(str, sizeof(str), " autogain %d, gain %3d, volume %3d ",
+ dev->autogain->cur.val, gain, dev->volume->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
- snprintf(str, sizeof(str), " int32 %d, int64 %lld ",
+ snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
dev->int32->cur.val,
- dev->int64->cur.val64);
+ dev->int64->cur.val64,
+ dev->bitmask->cur.val);
gen_text(dev, vbuf, line++ * 16, 16, str);
snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
dev->boolean->cur.val,
@@ -524,11 +531,13 @@ static void vivi_thread_tick(struct vivi_dev *dev)
spin_lock_irqsave(&dev->slock, flags);
if (list_empty(&dma_q->active)) {
dprintk(dev, 1, "No active queue to serve\n");
- goto unlock;
+ spin_unlock_irqrestore(&dev->slock, flags);
+ return;
}
buf = list_entry(dma_q->active.next, struct vivi_buffer, list);
list_del(&buf->list);
+ spin_unlock_irqrestore(&dev->slock, flags);
do_gettimeofday(&buf->vb.v4l2_buf.timestamp);
@@ -538,8 +547,6 @@ static void vivi_thread_tick(struct vivi_dev *dev)
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
dprintk(dev, 2, "[%p/%d] done\n", buf, buf->vb.v4l2_buf.index);
-unlock:
- spin_unlock_irqrestore(&dev->slock, flags);
}
#define frames_to_ms(frames) \
@@ -812,7 +819,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strcpy(cap->driver, "vivi");
strcpy(cap->card, "vivi");
strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));
- cap->version = VIVI_VERSION;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | \
V4L2_CAP_READWRITE;
return 0;
@@ -975,14 +981,37 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
if (i >= NUM_INPUTS)
return -EINVAL;
+ if (i == dev->input)
+ return 0;
+
dev->input = i;
precalculate_bars(dev);
precalculate_line(dev);
return 0;
}
+static int vidioc_subscribe_event(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_CTRL:
+ return v4l2_event_subscribe(fh, sub, 0);
+ default:
+ return -EINVAL;
+ }
+}
+
/* --- controls ---------------------------------------------- */
+static int vivi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
+
+ if (ctrl == dev->autogain)
+ dev->gain->val = jiffies & 0xff;
+ return 0;
+}
+
static int vivi_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vivi_dev *dev = container_of(ctrl->handler, struct vivi_dev, ctrl_handler);
@@ -1010,10 +1039,17 @@ static unsigned int
vivi_poll(struct file *file, struct poll_table_struct *wait)
{
struct vivi_dev *dev = video_drvdata(file);
+ struct v4l2_fh *fh = file->private_data;
struct vb2_queue *q = &dev->vb_vidq;
+ unsigned int res;
dprintk(dev, 1, "%s\n", __func__);
- return vb2_poll(q, file, wait);
+ res = vb2_poll(q, file, wait);
+ if (v4l2_event_pending(fh))
+ res |= POLLPRI;
+ else
+ poll_wait(file, &fh->wait, wait);
+ return res;
}
static int vivi_close(struct file *file)
@@ -1045,6 +1081,7 @@ static int vivi_mmap(struct file *file, struct vm_area_struct *vma)
}
static const struct v4l2_ctrl_ops vivi_ctrl_ops = {
+ .g_volatile_ctrl = vivi_g_volatile_ctrl,
.s_ctrl = vivi_s_ctrl,
};
@@ -1117,9 +1154,20 @@ static const struct v4l2_ctrl_config vivi_ctrl_string = {
.step = 1,
};
+static const struct v4l2_ctrl_config vivi_ctrl_bitmask = {
+ .ops = &vivi_ctrl_ops,
+ .id = VIVI_CID_CUSTOM_BASE + 6,
+ .name = "Bitmask",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .def = 0x80002000,
+ .min = 0,
+ .max = 0x80402010,
+ .step = 0,
+};
+
static const struct v4l2_file_operations vivi_fops = {
.owner = THIS_MODULE,
- .open = v4l2_fh_open,
+ .open = v4l2_fh_open,
.release = vivi_close,
.read = vivi_read,
.poll = vivi_poll,
@@ -1143,6 +1191,8 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_s_input = vidioc_s_input,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
+ .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device vivi_template = {
@@ -1213,16 +1263,22 @@ static int __init vivi_create_instance(int inst)
V4L2_CID_SATURATION, 0, 255, 1, 127);
dev->hue = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
V4L2_CID_HUE, -128, 127, 1, 0);
+ dev->autogain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ dev->gain = v4l2_ctrl_new_std(hdl, &vivi_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 100);
dev->button = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_button, NULL);
dev->int32 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int32, NULL);
dev->int64 = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_int64, NULL);
dev->boolean = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_boolean, NULL);
dev->menu = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_menu, NULL);
dev->string = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_string, NULL);
+ dev->bitmask = v4l2_ctrl_new_custom(hdl, &vivi_ctrl_bitmask, NULL);
if (hdl->error) {
ret = hdl->error;
goto unreg_dev;
}
+ v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true);
dev->v4l2_dev.ctrl_handler = hdl;
/* initialize locks */
@@ -1325,9 +1381,8 @@ static int __init vivi_init(void)
}
printk(KERN_INFO "Video Technology Magazine Virtual Video "
- "Capture Board ver %u.%u.%u successfully loaded.\n",
- (VIVI_VERSION >> 16) & 0xFF, (VIVI_VERSION >> 8) & 0xFF,
- VIVI_VERSION & 0xFF);
+ "Capture Board ver %s successfully loaded.\n",
+ VIVI_VERSION);
/* n_devs will reflect the actual number of allocated devices */
n_devs = i;
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index fa35639d0c1..453dbbd1e6e 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -57,7 +57,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <media/v4l2-common.h>
@@ -127,7 +126,7 @@ struct w9966 {
MODULE_AUTHOR("Jakob Kemi <jakob.kemi@post.utfors.se>");
MODULE_DESCRIPTION("Winbond w9966cf WebCam driver (0.32)");
MODULE_LICENSE("GPL");
-
+MODULE_VERSION("0.33.1");
#ifdef MODULE
static const char *pardev[] = {[0 ... W9966_MAXCAMS] = ""};
@@ -568,7 +567,6 @@ static int cam_querycap(struct file *file, void *priv,
strlcpy(vcap->driver, cam->v4l2_dev.name, sizeof(vcap->driver));
strlcpy(vcap->card, W9966_DRIVERNAME, sizeof(vcap->card));
strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
- vcap->version = KERNEL_VERSION(0, 33, 0);
vcap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
return 0;
}
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h
index f3f64001492..d7166afc255 100644
--- a/drivers/media/video/zoran/zoran.h
+++ b/drivers/media/video/zoran/zoran.h
@@ -41,10 +41,6 @@ struct zoran_sync {
};
-#define MAJOR_VERSION 0 /* driver major version */
-#define MINOR_VERSION 10 /* driver minor version */
-#define RELEASE_VERSION 0 /* release version */
-
#define ZORAN_NAME "ZORAN" /* name of the device */
#define ZR_DEVNAME(zr) ((zr)->name)
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index 79b04ac0f1a..c3602d6cd48 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -123,9 +123,12 @@ int zr36067_debug = 1;
module_param_named(debug, zr36067_debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-5)");
+#define ZORAN_VERSION "0.10.1"
+
MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver");
MODULE_AUTHOR("Serguei Miridonov");
MODULE_LICENSE("GPL");
+MODULE_VERSION(ZORAN_VERSION);
#define ZR_DEVICE(subven, subdev, data) { \
.vendor = PCI_VENDOR_ID_ZORAN, .device = PCI_DEVICE_ID_ZORAN_36057, \
@@ -1459,8 +1462,8 @@ static int __init zoran_init(void)
{
int res;
- printk(KERN_INFO "Zoran MJPEG board driver version %d.%d.%d\n",
- MAJOR_VERSION, MINOR_VERSION, RELEASE_VERSION);
+ printk(KERN_INFO "Zoran MJPEG board driver version %s\n",
+ ZORAN_VERSION);
/* check the parameters we have been given, adjust if necessary */
if (v4l_nbufs < 2)
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 2771d818406..d4d05d2ace6 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -44,7 +44,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -1538,8 +1537,6 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability
strncpy(cap->driver, "zoran", sizeof(cap->driver)-1);
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
pci_name(zr->pci_dev));
- cap->version = KERNEL_VERSION(MAJOR_VERSION, MINOR_VERSION,
- RELEASE_VERSION);
cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OVERLAY;
return 0;
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 7dfb01e9930..c492846c1c5 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/vmalloc.h>
@@ -42,8 +41,7 @@
/* Version Information */
-#define DRIVER_VERSION "v0.73"
-#define ZR364XX_VERSION_CODE KERNEL_VERSION(0, 7, 3)
+#define DRIVER_VERSION "0.7.4"
#define DRIVER_AUTHOR "Antoine Jacquet, http://royale.zerezo.com/"
#define DRIVER_DESC "Zoran 364xx"
@@ -744,7 +742,6 @@ static int zr364xx_vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, cam->udev->product, sizeof(cap->card));
strlcpy(cap->bus_info, dev_name(&cam->udev->dev),
sizeof(cap->bus_info));
- cap->version = ZR364XX_VERSION_CODE;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
@@ -1721,3 +1718,4 @@ module_exit(zr364xx_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index a1d4ee6671b..ce61a576976 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -827,7 +827,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
* DID_SOFT_ERROR is set.
*/
if (ioc->bus_type == SPI) {
- if (pScsiReq->CDB[0] == READ_6 ||
+ if ((pScsiReq->CDB[0] == READ_6 && ((pScsiReq->CDB[1] & 0x02) == 0)) ||
pScsiReq->CDB[0] == READ_10 ||
pScsiReq->CDB[0] == READ_12 ||
pScsiReq->CDB[0] == READ_16 ||
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 74fbe56321f..c8ed7b63fdf 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -59,7 +59,7 @@
#include <asm/dma.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 090d2a3a654..a8c08f332da 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -681,11 +681,11 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
NULL, NULL) >= 0) {
c->mem_alloc = 1;
- sb->current_mem_size = 1 + res->end - res->start;
+ sb->current_mem_size = resource_size(res);
sb->current_mem_base = res->start;
osm_info("%s: allocated %llu bytes of PCI memory at "
"0x%016llX.\n", c->name,
- (unsigned long long)(1 + res->end - res->start),
+ (unsigned long long)resource_size(res),
(unsigned long long)res->start);
}
}
@@ -703,11 +703,11 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
NULL, NULL) >= 0) {
c->io_alloc = 1;
- sb->current_io_size = 1 + res->end - res->start;
+ sb->current_io_size = resource_size(res);
sb->current_mem_base = res->start;
osm_info("%s: allocated %llu bytes of PCI I/O at "
"0x%016llX.\n", c->name,
- (unsigned long long)(1 + res->end - res->start),
+ (unsigned long long)resource_size(res),
(unsigned long long)res->start);
}
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0f09c057e79..21574bdf485 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -171,6 +171,37 @@ config MFD_TPS6586X
This driver can also be built as a module. If so, the module
will be called tps6586x.
+config MFD_TPS65910
+ bool "TPS65910 Power Management chip"
+ depends on I2C=y && GPIOLIB
+ select MFD_CORE
+ select GPIO_TPS65910
+ help
+ if you say yes here you get support for the TPS65910 series of
+ Power Management chips.
+
+config MFD_TPS65912
+ bool
+ depends on GPIOLIB
+
+config MFD_TPS65912_I2C
+ bool "TPS95612 Power Management chip with I2C"
+ select MFD_CORE
+ select MFD_TPS65912
+ depends on I2C=y && GPIOLIB
+ help
+ If you say yes here you get support for the TPS65912 series of
+ PM chips with I2C interface.
+
+config MFD_TPS65912_SPI
+ bool "TPS65912 Power Management chip with SPI"
+ select MFD_CORE
+ select MFD_TPS65912
+ depends on SPI_MASTER && GPIOLIB
+ help
+ If you say yes here you get support for the TPS65912 series of
+ PM chips with SPI interface.
+
config MENELAUS
bool "Texas Instruments TWL92330/Menelaus PM chip"
depends on I2C=y && ARCH_OMAP2
@@ -218,7 +249,7 @@ config TWL4030_POWER
and load scripts controlling which resources are switched off/on
or reset when a sleep, wakeup or warm reset event occurs.
-config TWL4030_CODEC
+config MFD_TWL4030_AUDIO
bool
depends on TWL4030_CORE
select MFD_CORE
@@ -233,6 +264,12 @@ config TWL6030_PWM
Say yes here if you want support for TWL6030 PWM.
This is used to control charging LED brightness.
+config TWL6040_CORE
+ bool
+ depends on TWL4030_CORE && GENERIC_HARDIRQS
+ select MFD_CORE
+ default n
+
config MFD_STMPE
bool "Support STMicroelectronics STMPE"
depends on I2C=y && GENERIC_HARDIRQS
@@ -656,8 +693,9 @@ config MFD_JANZ_CMODIO
CAN and GPIO controllers.
config MFD_JZ4740_ADC
- tristate "Support for the JZ4740 SoC ADC core"
+ bool "Support for the JZ4740 SoC ADC core"
select MFD_CORE
+ select GENERIC_IRQ_CHIP
depends on MACH_JZ4740
help
Say yes here if you want support for the ADC unit in the JZ4740 SoC.
@@ -719,14 +757,18 @@ config MFD_PM8XXX_IRQ
This is required to use certain other PM 8xxx features, such as GPIO
and MPP.
-config MFD_TPS65910
- bool "TPS65910 Power Management chip"
- depends on I2C=y && GPIOLIB
+config TPS65911_COMPARATOR
+ tristate
+
+config MFD_AAT2870_CORE
+ bool "Support for the AnalogicTech AAT2870"
select MFD_CORE
- select GPIO_TPS65910
+ depends on I2C=y && GPIOLIB
help
- if you say yes here you get support for the TPS65910 series of
- Power Management chips.
+ If you say yes here you get support for the AAT2870.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
endif # MFD_SUPPORT
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index efe3cc33ed9..c58020303d1 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
+wm831x-objs += wm831x-auxadc.o
obj-$(CONFIG_MFD_WM831X) += wm831x.o
obj-$(CONFIG_MFD_WM831X_I2C) += wm831x-i2c.o
obj-$(CONFIG_MFD_WM831X_SPI) += wm831x-spi.o
@@ -35,13 +36,19 @@ obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
+obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
+tps65912-objs := tps65912-core.o tps65912-irq.o
+obj-$(CONFIG_MFD_TPS65912) += tps65912.o
+obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
+obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o
obj-$(CONFIG_MENELAUS) += menelaus.o
obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o
obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
-obj-$(CONFIG_TWL4030_CODEC) += twl4030-codec.o
+obj-$(CONFIG_MFD_TWL4030_AUDIO) += twl4030-audio.o
obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
+obj-$(CONFIG_TWL6040_CORE) += twl6040-core.o twl6040-irq.o
obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
@@ -93,4 +100,5 @@ obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o
obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
-obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
+obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
+obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
new file mode 100644
index 00000000000..345dc658ef0
--- /dev/null
+++ b/drivers/mfd/aat2870-core.c
@@ -0,0 +1,535 @@
+/*
+ * linux/drivers/mfd/aat2870-core.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/aat2870.h>
+#include <linux/regulator/machine.h>
+
+static struct aat2870_register aat2870_regs[AAT2870_REG_NUM] = {
+ /* readable, writeable, value */
+ { 0, 1, 0x00 }, /* 0x00 AAT2870_BL_CH_EN */
+ { 0, 1, 0x16 }, /* 0x01 AAT2870_BLM */
+ { 0, 1, 0x16 }, /* 0x02 AAT2870_BLS */
+ { 0, 1, 0x56 }, /* 0x03 AAT2870_BL1 */
+ { 0, 1, 0x56 }, /* 0x04 AAT2870_BL2 */
+ { 0, 1, 0x56 }, /* 0x05 AAT2870_BL3 */
+ { 0, 1, 0x56 }, /* 0x06 AAT2870_BL4 */
+ { 0, 1, 0x56 }, /* 0x07 AAT2870_BL5 */
+ { 0, 1, 0x56 }, /* 0x08 AAT2870_BL6 */
+ { 0, 1, 0x56 }, /* 0x09 AAT2870_BL7 */
+ { 0, 1, 0x56 }, /* 0x0A AAT2870_BL8 */
+ { 0, 1, 0x00 }, /* 0x0B AAT2870_FLR */
+ { 0, 1, 0x03 }, /* 0x0C AAT2870_FM */
+ { 0, 1, 0x03 }, /* 0x0D AAT2870_FS */
+ { 0, 1, 0x10 }, /* 0x0E AAT2870_ALS_CFG0 */
+ { 0, 1, 0x06 }, /* 0x0F AAT2870_ALS_CFG1 */
+ { 0, 1, 0x00 }, /* 0x10 AAT2870_ALS_CFG2 */
+ { 1, 0, 0x00 }, /* 0x11 AAT2870_AMB */
+ { 0, 1, 0x00 }, /* 0x12 AAT2870_ALS0 */
+ { 0, 1, 0x00 }, /* 0x13 AAT2870_ALS1 */
+ { 0, 1, 0x00 }, /* 0x14 AAT2870_ALS2 */
+ { 0, 1, 0x00 }, /* 0x15 AAT2870_ALS3 */
+ { 0, 1, 0x00 }, /* 0x16 AAT2870_ALS4 */
+ { 0, 1, 0x00 }, /* 0x17 AAT2870_ALS5 */
+ { 0, 1, 0x00 }, /* 0x18 AAT2870_ALS6 */
+ { 0, 1, 0x00 }, /* 0x19 AAT2870_ALS7 */
+ { 0, 1, 0x00 }, /* 0x1A AAT2870_ALS8 */
+ { 0, 1, 0x00 }, /* 0x1B AAT2870_ALS9 */
+ { 0, 1, 0x00 }, /* 0x1C AAT2870_ALSA */
+ { 0, 1, 0x00 }, /* 0x1D AAT2870_ALSB */
+ { 0, 1, 0x00 }, /* 0x1E AAT2870_ALSC */
+ { 0, 1, 0x00 }, /* 0x1F AAT2870_ALSD */
+ { 0, 1, 0x00 }, /* 0x20 AAT2870_ALSE */
+ { 0, 1, 0x00 }, /* 0x21 AAT2870_ALSF */
+ { 0, 1, 0x00 }, /* 0x22 AAT2870_SUB_SET */
+ { 0, 1, 0x00 }, /* 0x23 AAT2870_SUB_CTRL */
+ { 0, 1, 0x00 }, /* 0x24 AAT2870_LDO_AB */
+ { 0, 1, 0x00 }, /* 0x25 AAT2870_LDO_CD */
+ { 0, 1, 0x00 }, /* 0x26 AAT2870_LDO_EN */
+};
+
+static struct mfd_cell aat2870_devs[] = {
+ {
+ .name = "aat2870-backlight",
+ .id = AAT2870_ID_BL,
+ .pdata_size = sizeof(struct aat2870_bl_platform_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOA,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOB,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOC,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+ {
+ .name = "aat2870-regulator",
+ .id = AAT2870_ID_LDOD,
+ .pdata_size = sizeof(struct regulator_init_data),
+ },
+};
+
+static int __aat2870_read(struct aat2870_data *aat2870, u8 addr, u8 *val)
+{
+ int ret;
+
+ if (addr >= AAT2870_REG_NUM) {
+ dev_err(aat2870->dev, "Invalid address, 0x%02x\n", addr);
+ return -EINVAL;
+ }
+
+ if (!aat2870->reg_cache[addr].readable) {
+ *val = aat2870->reg_cache[addr].value;
+ goto out;
+ }
+
+ ret = i2c_master_send(aat2870->client, &addr, 1);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EIO;
+
+ ret = i2c_master_recv(aat2870->client, val, 1);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EIO;
+
+out:
+ dev_dbg(aat2870->dev, "read: addr=0x%02x, val=0x%02x\n", addr, *val);
+ return 0;
+}
+
+static int __aat2870_write(struct aat2870_data *aat2870, u8 addr, u8 val)
+{
+ u8 msg[2];
+ int ret;
+
+ if (addr >= AAT2870_REG_NUM) {
+ dev_err(aat2870->dev, "Invalid address, 0x%02x\n", addr);
+ return -EINVAL;
+ }
+
+ if (!aat2870->reg_cache[addr].writeable) {
+ dev_err(aat2870->dev, "Address 0x%02x is not writeable\n",
+ addr);
+ return -EINVAL;
+ }
+
+ msg[0] = addr;
+ msg[1] = val;
+ ret = i2c_master_send(aat2870->client, msg, 2);
+ if (ret < 0)
+ return ret;
+ if (ret != 2)
+ return -EIO;
+
+ aat2870->reg_cache[addr].value = val;
+
+ dev_dbg(aat2870->dev, "write: addr=0x%02x, val=0x%02x\n", addr, val);
+ return 0;
+}
+
+static int aat2870_read(struct aat2870_data *aat2870, u8 addr, u8 *val)
+{
+ int ret;
+
+ mutex_lock(&aat2870->io_lock);
+ ret = __aat2870_read(aat2870, addr, val);
+ mutex_unlock(&aat2870->io_lock);
+
+ return ret;
+}
+
+static int aat2870_write(struct aat2870_data *aat2870, u8 addr, u8 val)
+{
+ int ret;
+
+ mutex_lock(&aat2870->io_lock);
+ ret = __aat2870_write(aat2870, addr, val);
+ mutex_unlock(&aat2870->io_lock);
+
+ return ret;
+}
+
+static int aat2870_update(struct aat2870_data *aat2870, u8 addr, u8 mask,
+ u8 val)
+{
+ int change;
+ u8 old_val, new_val;
+ int ret;
+
+ mutex_lock(&aat2870->io_lock);
+
+ ret = __aat2870_read(aat2870, addr, &old_val);
+ if (ret)
+ goto out_unlock;
+
+ new_val = (old_val & ~mask) | (val & mask);
+ change = old_val != new_val;
+ if (change)
+ ret = __aat2870_write(aat2870, addr, new_val);
+
+out_unlock:
+ mutex_unlock(&aat2870->io_lock);
+
+ return ret;
+}
+
+static inline void aat2870_enable(struct aat2870_data *aat2870)
+{
+ if (aat2870->en_pin >= 0)
+ gpio_set_value(aat2870->en_pin, 1);
+
+ aat2870->is_enable = 1;
+}
+
+static inline void aat2870_disable(struct aat2870_data *aat2870)
+{
+ if (aat2870->en_pin >= 0)
+ gpio_set_value(aat2870->en_pin, 0);
+
+ aat2870->is_enable = 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t aat2870_dump_reg(struct aat2870_data *aat2870, char *buf)
+{
+ u8 addr, val;
+ ssize_t count = 0;
+ int ret;
+
+ count += sprintf(buf, "aat2870 registers\n");
+ for (addr = 0; addr < AAT2870_REG_NUM; addr++) {
+ count += sprintf(buf + count, "0x%02x: ", addr);
+ if (count >= PAGE_SIZE - 1)
+ break;
+
+ ret = aat2870->read(aat2870, addr, &val);
+ if (ret == 0)
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "0x%02x", val);
+ else
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "<read fail: %d>", ret);
+
+ if (count >= PAGE_SIZE - 1)
+ break;
+
+ count += snprintf(buf + count, PAGE_SIZE - count, "\n");
+ if (count >= PAGE_SIZE - 1)
+ break;
+ }
+
+ /* Truncate count; min() would cause a warning */
+ if (count >= PAGE_SIZE)
+ count = PAGE_SIZE - 1;
+
+ return count;
+}
+
+static int aat2870_reg_open_file(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t aat2870_reg_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct aat2870_data *aat2870 = file->private_data;
+ char *buf;
+ ssize_t ret;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = aat2870_dump_reg(aat2870, buf);
+ if (ret >= 0)
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t aat2870_reg_write_file(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct aat2870_data *aat2870 = file->private_data;
+ char buf[32];
+ int buf_size;
+ char *start = buf;
+ unsigned long addr, val;
+ int ret;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size)) {
+ dev_err(aat2870->dev, "Failed to copy from user\n");
+ return -EFAULT;
+ }
+ buf[buf_size] = 0;
+
+ while (*start == ' ')
+ start++;
+
+ addr = simple_strtoul(start, &start, 16);
+ if (addr >= AAT2870_REG_NUM) {
+ dev_err(aat2870->dev, "Invalid address, 0x%lx\n", addr);
+ return -EINVAL;
+ }
+
+ while (*start == ' ')
+ start++;
+
+ if (strict_strtoul(start, 16, &val))
+ return -EINVAL;
+
+ ret = aat2870->write(aat2870, (u8)addr, (u8)val);
+ if (ret)
+ return ret;
+
+ return buf_size;
+}
+
+static const struct file_operations aat2870_reg_fops = {
+ .open = aat2870_reg_open_file,
+ .read = aat2870_reg_read_file,
+ .write = aat2870_reg_write_file,
+};
+
+static void aat2870_init_debugfs(struct aat2870_data *aat2870)
+{
+ aat2870->dentry_root = debugfs_create_dir("aat2870", NULL);
+ if (!aat2870->dentry_root) {
+ dev_warn(aat2870->dev,
+ "Failed to create debugfs root directory\n");
+ return;
+ }
+
+ aat2870->dentry_reg = debugfs_create_file("regs", 0644,
+ aat2870->dentry_root,
+ aat2870, &aat2870_reg_fops);
+ if (!aat2870->dentry_reg)
+ dev_warn(aat2870->dev,
+ "Failed to create debugfs register file\n");
+}
+
+static void aat2870_uninit_debugfs(struct aat2870_data *aat2870)
+{
+ debugfs_remove_recursive(aat2870->dentry_root);
+}
+#else
+static inline void aat2870_init_debugfs(struct aat2870_data *aat2870)
+{
+}
+
+static inline void aat2870_uninit_debugfs(struct aat2870_data *aat2870)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int aat2870_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct aat2870_platform_data *pdata = client->dev.platform_data;
+ struct aat2870_data *aat2870;
+ int i, j;
+ int ret = 0;
+
+ aat2870 = kzalloc(sizeof(struct aat2870_data), GFP_KERNEL);
+ if (!aat2870) {
+ dev_err(&client->dev,
+ "Failed to allocate memory for aat2870\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ aat2870->dev = &client->dev;
+ dev_set_drvdata(aat2870->dev, aat2870);
+
+ aat2870->client = client;
+ i2c_set_clientdata(client, aat2870);
+
+ aat2870->reg_cache = aat2870_regs;
+
+ if (pdata->en_pin < 0)
+ aat2870->en_pin = -1;
+ else
+ aat2870->en_pin = pdata->en_pin;
+
+ aat2870->init = pdata->init;
+ aat2870->uninit = pdata->uninit;
+ aat2870->read = aat2870_read;
+ aat2870->write = aat2870_write;
+ aat2870->update = aat2870_update;
+
+ mutex_init(&aat2870->io_lock);
+
+ if (aat2870->init)
+ aat2870->init(aat2870);
+
+ if (aat2870->en_pin >= 0) {
+ ret = gpio_request(aat2870->en_pin, "aat2870-en");
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to request GPIO %d\n", aat2870->en_pin);
+ goto out_kfree;
+ }
+ gpio_direction_output(aat2870->en_pin, 1);
+ }
+
+ aat2870_enable(aat2870);
+
+ for (i = 0; i < pdata->num_subdevs; i++) {
+ for (j = 0; j < ARRAY_SIZE(aat2870_devs); j++) {
+ if ((pdata->subdevs[i].id == aat2870_devs[j].id) &&
+ !strcmp(pdata->subdevs[i].name,
+ aat2870_devs[j].name)) {
+ aat2870_devs[j].platform_data =
+ pdata->subdevs[i].platform_data;
+ break;
+ }
+ }
+ }
+
+ ret = mfd_add_devices(aat2870->dev, 0, aat2870_devs,
+ ARRAY_SIZE(aat2870_devs), NULL, 0);
+ if (ret != 0) {
+ dev_err(aat2870->dev, "Failed to add subdev: %d\n", ret);
+ goto out_disable;
+ }
+
+ aat2870_init_debugfs(aat2870);
+
+ return 0;
+
+out_disable:
+ aat2870_disable(aat2870);
+ if (aat2870->en_pin >= 0)
+ gpio_free(aat2870->en_pin);
+out_kfree:
+ kfree(aat2870);
+out:
+ return ret;
+}
+
+static int aat2870_i2c_remove(struct i2c_client *client)
+{
+ struct aat2870_data *aat2870 = i2c_get_clientdata(client);
+
+ aat2870_uninit_debugfs(aat2870);
+
+ mfd_remove_devices(aat2870->dev);
+ aat2870_disable(aat2870);
+ if (aat2870->en_pin >= 0)
+ gpio_free(aat2870->en_pin);
+ if (aat2870->uninit)
+ aat2870->uninit(aat2870);
+ kfree(aat2870);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int aat2870_i2c_suspend(struct i2c_client *client, pm_message_t state)
+{
+ struct aat2870_data *aat2870 = i2c_get_clientdata(client);
+
+ aat2870_disable(aat2870);
+
+ return 0;
+}
+
+static int aat2870_i2c_resume(struct i2c_client *client)
+{
+ struct aat2870_data *aat2870 = i2c_get_clientdata(client);
+ struct aat2870_register *reg = NULL;
+ int i;
+
+ aat2870_enable(aat2870);
+
+ /* restore registers */
+ for (i = 0; i < AAT2870_REG_NUM; i++) {
+ reg = &aat2870->reg_cache[i];
+ if (reg->writeable)
+ aat2870->write(aat2870, i, reg->value);
+ }
+
+ return 0;
+}
+#else
+#define aat2870_i2c_suspend NULL
+#define aat2870_i2c_resume NULL
+#endif /* CONFIG_PM */
+
+static struct i2c_device_id aat2870_i2c_id_table[] = {
+ { "aat2870", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, aat2870_i2c_id_table);
+
+static struct i2c_driver aat2870_i2c_driver = {
+ .driver = {
+ .name = "aat2870",
+ .owner = THIS_MODULE,
+ },
+ .probe = aat2870_i2c_probe,
+ .remove = aat2870_i2c_remove,
+ .suspend = aat2870_i2c_suspend,
+ .resume = aat2870_i2c_resume,
+ .id_table = aat2870_i2c_id_table,
+};
+
+static int __init aat2870_init(void)
+{
+ return i2c_add_driver(&aat2870_i2c_driver);
+}
+subsys_initcall(aat2870_init);
+
+static void __exit aat2870_exit(void)
+{
+ i2c_del_driver(&aat2870_i2c_driver);
+}
+module_exit(aat2870_exit);
+
+MODULE_DESCRIPTION("Core support for the AnalogicTech AAT2870");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 3d7dce671b9..56ba1943c91 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -879,20 +879,13 @@ static ssize_t ab3550_bank_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_bank;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_bank);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_bank);
if (err)
- return -EINVAL;
+ return err;
if (user_bank >= AB3550_NUM_BANKS) {
dev_err(&ab->i2c_client[0]->dev,
@@ -902,7 +895,7 @@ static ssize_t ab3550_bank_write(struct file *file,
ab->debug_bank = user_bank;
- return buf_size;
+ return count;
}
static int ab3550_address_print(struct seq_file *s, void *p)
@@ -923,27 +916,21 @@ static ssize_t ab3550_address_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_address;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_address);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_address);
if (err)
- return -EINVAL;
+ return err;
+
if (user_address > 0xff) {
dev_err(&ab->i2c_client[0]->dev,
"debugfs error input > 0xff\n");
return -EINVAL;
}
ab->debug_address = user_address;
- return buf_size;
+ return count;
}
static int ab3550_val_print(struct seq_file *s, void *p)
@@ -971,21 +958,15 @@ static ssize_t ab3550_val_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_val;
int err;
u8 regvalue;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_val);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_val);
if (err)
- return -EINVAL;
+ return err;
+
if (user_val > 0xff) {
dev_err(&ab->i2c_client[0]->dev,
"debugfs error input > 0xff\n");
@@ -1002,7 +983,7 @@ static ssize_t ab3550_val_write(struct file *file,
if (err)
return -EINVAL;
- return buf_size;
+ return count;
}
static const struct file_operations ab3550_bank_fops = {
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index fc0c1af1566..387705e494b 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -363,7 +363,7 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
}
}
-static struct resource ab8500_gpio_resources[] = {
+static struct resource __devinitdata ab8500_gpio_resources[] = {
{
.name = "GPIO_INT6",
.start = AB8500_INT_GPIO6R,
@@ -372,7 +372,7 @@ static struct resource ab8500_gpio_resources[] = {
}
};
-static struct resource ab8500_gpadc_resources[] = {
+static struct resource __devinitdata ab8500_gpadc_resources[] = {
{
.name = "HW_CONV_END",
.start = AB8500_INT_GP_HW_ADC_CONV_END,
@@ -387,7 +387,7 @@ static struct resource ab8500_gpadc_resources[] = {
},
};
-static struct resource ab8500_rtc_resources[] = {
+static struct resource __devinitdata ab8500_rtc_resources[] = {
{
.name = "60S",
.start = AB8500_INT_RTC_60S,
@@ -402,7 +402,7 @@ static struct resource ab8500_rtc_resources[] = {
},
};
-static struct resource ab8500_poweronkey_db_resources[] = {
+static struct resource __devinitdata ab8500_poweronkey_db_resources[] = {
{
.name = "ONKEY_DBF",
.start = AB8500_INT_PON_KEY1DB_F,
@@ -417,20 +417,47 @@ static struct resource ab8500_poweronkey_db_resources[] = {
},
};
-static struct resource ab8500_bm_resources[] = {
+static struct resource __devinitdata ab8500_av_acc_detect_resources[] = {
{
- .name = "MAIN_EXT_CH_NOT_OK",
- .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
- .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
- .flags = IORESOURCE_IRQ,
+ .name = "ACC_DETECT_1DB_F",
+ .start = AB8500_INT_ACC_DETECT_1DB_F,
+ .end = AB8500_INT_ACC_DETECT_1DB_F,
+ .flags = IORESOURCE_IRQ,
},
{
- .name = "BATT_OVV",
- .start = AB8500_INT_BATT_OVV,
- .end = AB8500_INT_BATT_OVV,
- .flags = IORESOURCE_IRQ,
+ .name = "ACC_DETECT_1DB_R",
+ .start = AB8500_INT_ACC_DETECT_1DB_R,
+ .end = AB8500_INT_ACC_DETECT_1DB_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ACC_DETECT_21DB_F",
+ .start = AB8500_INT_ACC_DETECT_21DB_F,
+ .end = AB8500_INT_ACC_DETECT_21DB_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ACC_DETECT_21DB_R",
+ .start = AB8500_INT_ACC_DETECT_21DB_R,
+ .end = AB8500_INT_ACC_DETECT_21DB_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "ACC_DETECT_22DB_F",
+ .start = AB8500_INT_ACC_DETECT_22DB_F,
+ .end = AB8500_INT_ACC_DETECT_22DB_F,
+ .flags = IORESOURCE_IRQ,
},
{
+ .name = "ACC_DETECT_22DB_R",
+ .start = AB8500_INT_ACC_DETECT_22DB_R,
+ .end = AB8500_INT_ACC_DETECT_22DB_R,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource __devinitdata ab8500_charger_resources[] = {
+ {
.name = "MAIN_CH_UNPLUG_DET",
.start = AB8500_INT_MAIN_CH_UNPLUG_DET,
.end = AB8500_INT_MAIN_CH_UNPLUG_DET,
@@ -443,27 +470,27 @@ static struct resource ab8500_bm_resources[] = {
.flags = IORESOURCE_IRQ,
},
{
- .name = "VBUS_DET_F",
- .start = AB8500_INT_VBUS_DET_F,
- .end = AB8500_INT_VBUS_DET_F,
- .flags = IORESOURCE_IRQ,
- },
- {
.name = "VBUS_DET_R",
.start = AB8500_INT_VBUS_DET_R,
.end = AB8500_INT_VBUS_DET_R,
.flags = IORESOURCE_IRQ,
},
{
- .name = "BAT_CTRL_INDB",
- .start = AB8500_INT_BAT_CTRL_INDB,
- .end = AB8500_INT_BAT_CTRL_INDB,
+ .name = "VBUS_DET_F",
+ .start = AB8500_INT_VBUS_DET_F,
+ .end = AB8500_INT_VBUS_DET_F,
.flags = IORESOURCE_IRQ,
},
{
- .name = "CH_WD_EXP",
- .start = AB8500_INT_CH_WD_EXP,
- .end = AB8500_INT_CH_WD_EXP,
+ .name = "USB_LINK_STATUS",
+ .start = AB8500_INT_USB_LINK_STATUS,
+ .end = AB8500_INT_USB_LINK_STATUS,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGE_DET_DONE",
+ .start = AB8500_INT_USB_CHG_DET_DONE,
+ .end = AB8500_INT_USB_CHG_DET_DONE,
.flags = IORESOURCE_IRQ,
},
{
@@ -473,21 +500,60 @@ static struct resource ab8500_bm_resources[] = {
.flags = IORESOURCE_IRQ,
},
{
- .name = "NCONV_ACCU",
- .start = AB8500_INT_CCN_CONV_ACC,
- .end = AB8500_INT_CCN_CONV_ACC,
+ .name = "USB_CH_TH_PROT_R",
+ .start = AB8500_INT_USB_CH_TH_PROT_R,
+ .end = AB8500_INT_USB_CH_TH_PROT_R,
.flags = IORESOURCE_IRQ,
},
{
- .name = "LOW_BAT_F",
- .start = AB8500_INT_LOW_BAT_F,
- .end = AB8500_INT_LOW_BAT_F,
+ .name = "USB_CH_TH_PROT_F",
+ .start = AB8500_INT_USB_CH_TH_PROT_F,
+ .end = AB8500_INT_USB_CH_TH_PROT_F,
.flags = IORESOURCE_IRQ,
},
{
- .name = "LOW_BAT_R",
- .start = AB8500_INT_LOW_BAT_R,
- .end = AB8500_INT_LOW_BAT_R,
+ .name = "MAIN_EXT_CH_NOT_OK",
+ .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+ .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "MAIN_CH_TH_PROT_R",
+ .start = AB8500_INT_MAIN_CH_TH_PROT_R,
+ .end = AB8500_INT_MAIN_CH_TH_PROT_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "MAIN_CH_TH_PROT_F",
+ .start = AB8500_INT_MAIN_CH_TH_PROT_F,
+ .end = AB8500_INT_MAIN_CH_TH_PROT_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGER_NOT_OKR",
+ .start = AB8500_INT_USB_CHARGER_NOT_OK,
+ .end = AB8500_INT_USB_CHARGER_NOT_OK,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_CHARGER_NOT_OKF",
+ .start = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .end = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "CH_WD_EXP",
+ .start = AB8500_INT_CH_WD_EXP,
+ .end = AB8500_INT_CH_WD_EXP,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource __devinitdata ab8500_btemp_resources[] = {
+ {
+ .name = "BAT_CTRL_INDB",
+ .start = AB8500_INT_BAT_CTRL_INDB,
+ .end = AB8500_INT_BAT_CTRL_INDB,
.flags = IORESOURCE_IRQ,
},
{
@@ -503,38 +569,55 @@ static struct resource ab8500_bm_resources[] = {
.flags = IORESOURCE_IRQ,
},
{
- .name = "USB_CHARGER_NOT_OKR",
- .start = AB8500_INT_USB_CHARGER_NOT_OK,
- .end = AB8500_INT_USB_CHARGER_NOT_OK,
+ .name = "BTEMP_LOW_MEDIUM",
+ .start = AB8500_INT_BTEMP_LOW_MEDIUM,
+ .end = AB8500_INT_BTEMP_LOW_MEDIUM,
.flags = IORESOURCE_IRQ,
},
{
- .name = "USB_CHARGE_DET_DONE",
- .start = AB8500_INT_USB_CHG_DET_DONE,
- .end = AB8500_INT_USB_CHG_DET_DONE,
+ .name = "BTEMP_MEDIUM_HIGH",
+ .start = AB8500_INT_BTEMP_MEDIUM_HIGH,
+ .end = AB8500_INT_BTEMP_MEDIUM_HIGH,
.flags = IORESOURCE_IRQ,
},
+};
+
+static struct resource __devinitdata ab8500_fg_resources[] = {
{
- .name = "USB_CH_TH_PROT_R",
- .start = AB8500_INT_USB_CH_TH_PROT_R,
- .end = AB8500_INT_USB_CH_TH_PROT_R,
+ .name = "NCONV_ACCU",
+ .start = AB8500_INT_CCN_CONV_ACC,
+ .end = AB8500_INT_CCN_CONV_ACC,
.flags = IORESOURCE_IRQ,
},
{
- .name = "MAIN_CH_TH_PROT_R",
- .start = AB8500_INT_MAIN_CH_TH_PROT_R,
- .end = AB8500_INT_MAIN_CH_TH_PROT_R,
+ .name = "BATT_OVV",
+ .start = AB8500_INT_BATT_OVV,
+ .end = AB8500_INT_BATT_OVV,
.flags = IORESOURCE_IRQ,
},
{
- .name = "USB_CHARGER_NOT_OKF",
- .start = AB8500_INT_USB_CHARGER_NOT_OKF,
- .end = AB8500_INT_USB_CHARGER_NOT_OKF,
+ .name = "LOW_BAT_F",
+ .start = AB8500_INT_LOW_BAT_F,
+ .end = AB8500_INT_LOW_BAT_F,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "LOW_BAT_R",
+ .start = AB8500_INT_LOW_BAT_R,
+ .end = AB8500_INT_LOW_BAT_R,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "CC_INT_CALIB",
+ .start = AB8500_INT_CC_INT_CALIB,
+ .end = AB8500_INT_CC_INT_CALIB,
.flags = IORESOURCE_IRQ,
},
};
-static struct resource ab8500_debug_resources[] = {
+static struct resource __devinitdata ab8500_chargalg_resources[] = {};
+
+static struct resource __devinitdata ab8500_debug_resources[] = {
{
.name = "IRQ_FIRST",
.start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
@@ -549,7 +632,7 @@ static struct resource ab8500_debug_resources[] = {
},
};
-static struct resource ab8500_usb_resources[] = {
+static struct resource __devinitdata ab8500_usb_resources[] = {
{
.name = "ID_WAKEUP_R",
.start = AB8500_INT_ID_WAKEUP_R,
@@ -580,9 +663,21 @@ static struct resource ab8500_usb_resources[] = {
.end = AB8500_INT_USB_LINK_STATUS,
.flags = IORESOURCE_IRQ,
},
+ {
+ .name = "USB_ADP_PROBE_PLUG",
+ .start = AB8500_INT_ADP_PROBE_PLUG,
+ .end = AB8500_INT_ADP_PROBE_PLUG,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "USB_ADP_PROBE_UNPLUG",
+ .start = AB8500_INT_ADP_PROBE_UNPLUG,
+ .end = AB8500_INT_ADP_PROBE_UNPLUG,
+ .flags = IORESOURCE_IRQ,
+ },
};
-static struct resource ab8500_temp_resources[] = {
+static struct resource __devinitdata ab8500_temp_resources[] = {
{
.name = "AB8500_TEMP_WARM",
.start = AB8500_INT_TEMP_WARM,
@@ -591,7 +686,7 @@ static struct resource ab8500_temp_resources[] = {
},
};
-static struct mfd_cell ab8500_devs[] = {
+static struct mfd_cell __devinitdata ab8500_devs[] = {
#ifdef CONFIG_DEBUG_FS
{
.name = "ab8500-debug",
@@ -621,11 +716,33 @@ static struct mfd_cell ab8500_devs[] = {
.resources = ab8500_rtc_resources,
},
{
- .name = "ab8500-bm",
- .num_resources = ARRAY_SIZE(ab8500_bm_resources),
- .resources = ab8500_bm_resources,
+ .name = "ab8500-charger",
+ .num_resources = ARRAY_SIZE(ab8500_charger_resources),
+ .resources = ab8500_charger_resources,
+ },
+ {
+ .name = "ab8500-btemp",
+ .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
+ .resources = ab8500_btemp_resources,
+ },
+ {
+ .name = "ab8500-fg",
+ .num_resources = ARRAY_SIZE(ab8500_fg_resources),
+ .resources = ab8500_fg_resources,
+ },
+ {
+ .name = "ab8500-chargalg",
+ .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
+ .resources = ab8500_chargalg_resources,
+ },
+ {
+ .name = "ab8500-acc-det",
+ .num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
+ .resources = ab8500_av_acc_detect_resources,
+ },
+ {
+ .name = "ab8500-codec",
},
- { .name = "ab8500-codec", },
{
.name = "ab8500-usb",
.num_resources = ARRAY_SIZE(ab8500_usb_resources),
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 64748e42ac0..64bdeeb1c11 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -419,20 +419,13 @@ static ssize_t ab8500_bank_write(struct file *file,
size_t count, loff_t *ppos)
{
struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_bank;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_bank);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_bank);
if (err)
- return -EINVAL;
+ return err;
if (user_bank >= AB8500_NUM_BANKS) {
dev_err(dev, "debugfs error input > number of banks\n");
@@ -441,7 +434,7 @@ static ssize_t ab8500_bank_write(struct file *file,
debug_bank = user_bank;
- return buf_size;
+ return count;
}
static int ab8500_address_print(struct seq_file *s, void *p)
@@ -459,26 +452,20 @@ static ssize_t ab8500_address_write(struct file *file,
size_t count, loff_t *ppos)
{
struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_address;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf) - 1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_address);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_address);
if (err)
- return -EINVAL;
+ return err;
+
if (user_address > 0xff) {
dev_err(dev, "debugfs error input > 0xff\n");
return -EINVAL;
}
debug_address = user_address;
- return buf_size;
+ return count;
}
static int ab8500_val_print(struct seq_file *s, void *p)
@@ -509,20 +496,14 @@ static ssize_t ab8500_val_write(struct file *file,
size_t count, loff_t *ppos)
{
struct device *dev = ((struct seq_file *)(file->private_data))->private;
- char buf[32];
- int buf_size;
unsigned long user_val;
int err;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size] = 0;
-
- err = strict_strtoul(buf, 0, &user_val);
+ err = kstrtoul_from_user(user_buf, count, 0, &user_val);
if (err)
- return -EINVAL;
+ return err;
+
if (user_val > 0xff) {
dev_err(dev, "debugfs error input > 0xff\n");
return -EINVAL;
@@ -534,7 +515,7 @@ static ssize_t ab8500_val_write(struct file *file,
return -EINVAL;
}
- return buf_size;
+ return count;
}
static const struct file_operations ab8500_bank_fops = {
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index c27fd1fc3b8..c71ae09430c 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -619,6 +619,7 @@ static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk)
/* MFD cells (SPI, PWM, LED, DS1WM, MMC) */
static struct ds1wm_driver_data ds1wm_pdata = {
.active_high = 1,
+ .reset_recover_delay = 1,
};
static struct resource ds1wm_resources[] = {
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 2808bd125d1..04c7093d649 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -99,6 +99,7 @@ static int ds1wm_disable(struct platform_device *pdev)
static struct ds1wm_driver_data ds1wm_pdata = {
.active_high = 0,
+ .reset_recover_delay = 1,
};
static struct resource ds1wm_resources[] __initdata = {
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index a0bd0cf05af..21131c7b0f1 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -56,7 +56,7 @@ struct jz4740_adc {
void __iomem *base;
int irq;
- int irq_base;
+ struct irq_chip_generic *gc;
struct clk *clk;
atomic_t clk_ref;
@@ -64,63 +64,17 @@ struct jz4740_adc {
spinlock_t lock;
};
-static inline void jz4740_adc_irq_set_masked(struct jz4740_adc *adc, int irq,
- bool masked)
-{
- unsigned long flags;
- uint8_t val;
-
- irq -= adc->irq_base;
-
- spin_lock_irqsave(&adc->lock, flags);
-
- val = readb(adc->base + JZ_REG_ADC_CTRL);
- if (masked)
- val |= BIT(irq);
- else
- val &= ~BIT(irq);
- writeb(val, adc->base + JZ_REG_ADC_CTRL);
-
- spin_unlock_irqrestore(&adc->lock, flags);
-}
-
-static void jz4740_adc_irq_mask(struct irq_data *data)
-{
- struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
- jz4740_adc_irq_set_masked(adc, data->irq, true);
-}
-
-static void jz4740_adc_irq_unmask(struct irq_data *data)
-{
- struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
- jz4740_adc_irq_set_masked(adc, data->irq, false);
-}
-
-static void jz4740_adc_irq_ack(struct irq_data *data)
-{
- struct jz4740_adc *adc = irq_data_get_irq_chip_data(data);
- unsigned int irq = data->irq - adc->irq_base;
- writeb(BIT(irq), adc->base + JZ_REG_ADC_STATUS);
-}
-
-static struct irq_chip jz4740_adc_irq_chip = {
- .name = "jz4740-adc",
- .irq_mask = jz4740_adc_irq_mask,
- .irq_unmask = jz4740_adc_irq_unmask,
- .irq_ack = jz4740_adc_irq_ack,
-};
-
static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
{
- struct jz4740_adc *adc = irq_desc_get_handler_data(desc);
+ struct irq_chip_generic *gc = irq_desc_get_handler_data(desc);
uint8_t status;
unsigned int i;
- status = readb(adc->base + JZ_REG_ADC_STATUS);
+ status = readb(gc->reg_base + JZ_REG_ADC_STATUS);
for (i = 0; i < 5; ++i) {
if (status & BIT(i))
- generic_handle_irq(adc->irq_base + i);
+ generic_handle_irq(gc->irq_base + i);
}
}
@@ -249,10 +203,12 @@ const struct mfd_cell jz4740_adc_cells[] = {
static int __devinit jz4740_adc_probe(struct platform_device *pdev)
{
- int ret;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
struct jz4740_adc *adc;
struct resource *mem_base;
- int irq;
+ int ret;
+ int irq_base;
adc = kmalloc(sizeof(*adc), GFP_KERNEL);
if (!adc) {
@@ -267,9 +223,9 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
goto err_free;
}
- adc->irq_base = platform_get_irq(pdev, 1);
- if (adc->irq_base < 0) {
- ret = adc->irq_base;
+ irq_base = platform_get_irq(pdev, 1);
+ if (irq_base < 0) {
+ ret = irq_base;
dev_err(&pdev->dev, "Failed to get irq base: %d\n", ret);
goto err_free;
}
@@ -309,20 +265,28 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, adc);
- for (irq = adc->irq_base; irq < adc->irq_base + 5; ++irq) {
- irq_set_chip_data(irq, adc);
- irq_set_chip_and_handler(irq, &jz4740_adc_irq_chip,
- handle_level_irq);
- }
+ gc = irq_alloc_generic_chip("INTC", 1, irq_base, adc->base,
+ handle_level_irq);
+
+ ct = gc->chip_types;
+ ct->regs.mask = JZ_REG_ADC_CTRL;
+ ct->regs.ack = JZ_REG_ADC_STATUS;
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ ct->chip.irq_ack = irq_gc_ack;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
+
+ adc->gc = gc;
- irq_set_handler_data(adc->irq, adc);
+ irq_set_handler_data(adc->irq, gc);
irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux);
writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
ret = mfd_add_devices(&pdev->dev, 0, jz4740_adc_cells,
- ARRAY_SIZE(jz4740_adc_cells), mem_base, adc->irq_base);
+ ARRAY_SIZE(jz4740_adc_cells), mem_base, irq_base);
if (ret < 0)
goto err_clk_put;
@@ -347,6 +311,8 @@ static int __devexit jz4740_adc_remove(struct platform_device *pdev)
mfd_remove_devices(&pdev->dev);
+ irq_remove_generic_chip(adc->gc, IRQ_MSK(5), IRQ_NOPROBE | IRQ_LEVEL, 0);
+ kfree(adc->gc);
irq_set_handler_data(adc->irq, NULL);
irq_set_chained_handler(adc->irq, NULL);
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index ea3f52c07ef..ea1169b0477 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -37,6 +37,9 @@
#define GPIOBASE 0x44
#define GPIO_IO_SIZE 64
+#define WDTBASE 0x84
+#define WDT_IO_SIZE 64
+
static struct resource smbus_sch_resource = {
.flags = IORESOURCE_IO,
};
@@ -59,6 +62,18 @@ static struct mfd_cell lpc_sch_cells[] = {
},
};
+static struct resource wdt_sch_resource = {
+ .flags = IORESOURCE_IO,
+};
+
+static struct mfd_cell tunnelcreek_cells[] = {
+ {
+ .name = "tunnelcreek_wdt",
+ .num_resources = 1,
+ .resources = &wdt_sch_resource,
+ },
+};
+
static struct pci_device_id lpc_sch_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
@@ -72,6 +87,7 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
unsigned int base_addr_cfg;
unsigned short base_addr;
int i;
+ int ret;
pci_read_config_dword(dev, SMBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
@@ -104,8 +120,39 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
lpc_sch_cells[i].id = id->device;
- return mfd_add_devices(&dev->dev, 0,
+ ret = mfd_add_devices(&dev->dev, 0,
lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL, 0);
+ if (ret)
+ goto out_dev;
+
+ if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) {
+ pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
+ if (!(base_addr_cfg & (1 << 31))) {
+ dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
+ ret = -ENODEV;
+ goto out_dev;
+ }
+ base_addr = (unsigned short)base_addr_cfg;
+ if (base_addr == 0) {
+ dev_err(&dev->dev, "I/O space for WDT uninitialized\n");
+ ret = -ENODEV;
+ goto out_dev;
+ }
+
+ wdt_sch_resource.start = base_addr;
+ wdt_sch_resource.end = base_addr + WDT_IO_SIZE - 1;
+
+ for (i = 0; i < ARRAY_SIZE(tunnelcreek_cells); i++)
+ tunnelcreek_cells[i].id = id->device;
+
+ ret = mfd_add_devices(&dev->dev, 0, tunnelcreek_cells,
+ ARRAY_SIZE(tunnelcreek_cells), NULL, 0);
+ }
+
+ return ret;
+out_dev:
+ mfd_remove_devices(&dev->dev);
+ return ret;
}
static void __devexit lpc_sch_remove(struct pci_dev *dev)
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index 638bf7e4d3b..09274cf7c33 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -58,8 +58,6 @@ static struct i2c_client *get_i2c(struct max8997_dev *max8997,
default:
return ERR_PTR(-EINVAL);
}
-
- return ERR_PTR(-EINVAL);
}
struct max8997_irq_data {
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 9ec7570f5b8..de4096aee24 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -39,6 +39,8 @@ static struct mfd_cell max8998_devs[] = {
.name = "max8998-pmic",
}, {
.name = "max8998-rtc",
+ }, {
+ .name = "max8998-battery",
},
};
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 855219526cc..29601e7d606 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -26,7 +26,6 @@
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <plat/usb.h>
-#include <linux/pm_runtime.h>
#define USBHS_DRIVER_NAME "usbhs-omap"
#define OMAP_EHCI_DEVICE "ehci-omap"
@@ -147,6 +146,9 @@
struct usbhs_hcd_omap {
+ struct clk *usbhost_ick;
+ struct clk *usbhost_hs_fck;
+ struct clk *usbhost_fs_fck;
struct clk *xclk60mhsp1_ck;
struct clk *xclk60mhsp2_ck;
struct clk *utmi_p1_fck;
@@ -156,6 +158,8 @@ struct usbhs_hcd_omap {
struct clk *usbhost_p2_fck;
struct clk *usbtll_p2_fck;
struct clk *init_60m_fclk;
+ struct clk *usbtll_fck;
+ struct clk *usbtll_ick;
void __iomem *uhh_base;
void __iomem *tll_base;
@@ -349,13 +353,46 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
omap->platdata.ehci_data = pdata->ehci_data;
omap->platdata.ohci_data = pdata->ohci_data;
- pm_runtime_enable(&pdev->dev);
+ omap->usbhost_ick = clk_get(dev, "usbhost_ick");
+ if (IS_ERR(omap->usbhost_ick)) {
+ ret = PTR_ERR(omap->usbhost_ick);
+ dev_err(dev, "usbhost_ick failed error:%d\n", ret);
+ goto err_end;
+ }
+
+ omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
+ if (IS_ERR(omap->usbhost_hs_fck)) {
+ ret = PTR_ERR(omap->usbhost_hs_fck);
+ dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
+ goto err_usbhost_ick;
+ }
+
+ omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
+ if (IS_ERR(omap->usbhost_fs_fck)) {
+ ret = PTR_ERR(omap->usbhost_fs_fck);
+ dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
+ goto err_usbhost_hs_fck;
+ }
+
+ omap->usbtll_fck = clk_get(dev, "usbtll_fck");
+ if (IS_ERR(omap->usbtll_fck)) {
+ ret = PTR_ERR(omap->usbtll_fck);
+ dev_err(dev, "usbtll_fck failed error:%d\n", ret);
+ goto err_usbhost_fs_fck;
+ }
+
+ omap->usbtll_ick = clk_get(dev, "usbtll_ick");
+ if (IS_ERR(omap->usbtll_ick)) {
+ ret = PTR_ERR(omap->usbtll_ick);
+ dev_err(dev, "usbtll_ick failed error:%d\n", ret);
+ goto err_usbtll_fck;
+ }
omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
if (IS_ERR(omap->utmi_p1_fck)) {
ret = PTR_ERR(omap->utmi_p1_fck);
dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
- goto err_end;
+ goto err_usbtll_ick;
}
omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
@@ -485,8 +522,22 @@ err_xclk60mhsp1_ck:
err_utmi_p1_fck:
clk_put(omap->utmi_p1_fck);
+err_usbtll_ick:
+ clk_put(omap->usbtll_ick);
+
+err_usbtll_fck:
+ clk_put(omap->usbtll_fck);
+
+err_usbhost_fs_fck:
+ clk_put(omap->usbhost_fs_fck);
+
+err_usbhost_hs_fck:
+ clk_put(omap->usbhost_hs_fck);
+
+err_usbhost_ick:
+ clk_put(omap->usbhost_ick);
+
err_end:
- pm_runtime_disable(&pdev->dev);
kfree(omap);
end_probe:
@@ -520,7 +571,11 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
clk_put(omap->utmi_p2_fck);
clk_put(omap->xclk60mhsp1_ck);
clk_put(omap->utmi_p1_fck);
- pm_runtime_disable(&pdev->dev);
+ clk_put(omap->usbtll_ick);
+ clk_put(omap->usbtll_fck);
+ clk_put(omap->usbhost_fs_fck);
+ clk_put(omap->usbhost_hs_fck);
+ clk_put(omap->usbhost_ick);
kfree(omap);
return 0;
@@ -640,6 +695,7 @@ static int usbhs_enable(struct device *dev)
struct usbhs_omap_platform_data *pdata = &omap->platdata;
unsigned long flags = 0;
int ret = 0;
+ unsigned long timeout;
unsigned reg;
dev_dbg(dev, "starting TI HSUSB Controller\n");
@@ -652,7 +708,11 @@ static int usbhs_enable(struct device *dev)
if (omap->count > 0)
goto end_count;
- pm_runtime_get_sync(dev);
+ clk_enable(omap->usbhost_ick);
+ clk_enable(omap->usbhost_hs_fck);
+ clk_enable(omap->usbhost_fs_fck);
+ clk_enable(omap->usbtll_fck);
+ clk_enable(omap->usbtll_ick);
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
@@ -676,6 +736,50 @@ static int usbhs_enable(struct device *dev)
omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
+ /* perform TLL soft reset, and wait until reset is complete */
+ usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+
+ /* Wait for TLL reset to complete */
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+ & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(dev, "operation timed out\n");
+ ret = -EINVAL;
+ goto err_tll;
+ }
+ }
+
+ dev_dbg(dev, "TLL RESET DONE\n");
+
+ /* (1<<3) = no idle mode only for initial debugging */
+ usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
+ OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
+ OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
+
+ /* Put UHH in NoIdle/NoStandby mode */
+ reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
+ if (is_omap_usbhs_rev1(omap)) {
+ reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+ | OMAP_UHH_SYSCONFIG_SIDLEMODE
+ | OMAP_UHH_SYSCONFIG_CACTIVITY
+ | OMAP_UHH_SYSCONFIG_MIDLEMODE);
+ reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+
+
+ } else if (is_omap_usbhs_rev2(omap)) {
+ reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
+ reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
+ reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
+ reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
+ }
+
+ usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+
reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
/* setup ULPI bypass and burst configurations */
reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
@@ -815,8 +919,6 @@ end_count:
return 0;
err_tll:
- pm_runtime_put_sync(dev);
- spin_unlock_irqrestore(&omap->lock, flags);
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -824,6 +926,13 @@ err_tll:
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
gpio_free(pdata->ehci_data->reset_gpio_port[1]);
}
+
+ clk_disable(omap->usbtll_ick);
+ clk_disable(omap->usbtll_fck);
+ clk_disable(omap->usbhost_fs_fck);
+ clk_disable(omap->usbhost_hs_fck);
+ clk_disable(omap->usbhost_ick);
+ spin_unlock_irqrestore(&omap->lock, flags);
return ret;
}
@@ -889,14 +998,18 @@ static void usbhs_disable(struct device *dev)
if (is_omap_usbhs_rev2(omap)) {
if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_enable(omap->usbtll_p1_fck);
+ clk_disable(omap->usbtll_p1_fck);
if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_enable(omap->usbtll_p2_fck);
+ clk_disable(omap->usbtll_p2_fck);
clk_disable(omap->utmi_p2_fck);
clk_disable(omap->utmi_p1_fck);
}
- pm_runtime_put_sync(dev);
+ clk_disable(omap->usbtll_ick);
+ clk_disable(omap->usbtll_fck);
+ clk_disable(omap->usbhost_fs_fck);
+ clk_disable(omap->usbhost_hs_fck);
+ clk_disable(omap->usbhost_ick);
/* The gpio_free migh sleep; so unlock the spinlock */
spin_unlock_irqrestore(&omap->lock, flags);
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 7ab7746631d..2963689cf45 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -228,7 +228,7 @@ int stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length,
EXPORT_SYMBOL_GPL(stmpe_block_write);
/**
- * stmpe_set_altfunc: set the alternate function for STMPE pins
+ * stmpe_set_altfunc()- set the alternate function for STMPE pins
* @stmpe: Device to configure
* @pins: Bitmask of pins to affect
* @block: block to enable alternate functions for
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index 0dbdc4e8cd7..e4ee3895658 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -42,6 +42,7 @@ struct stmpe_variant_block {
* @id_mask: bits valid in CHIPID register for comparison with id_val
* @num_gpios: number of GPIOS
* @af_bits: number of bits used to specify the alternate function
+ * @regs: variant specific registers.
* @blocks: list of blocks present on this device
* @num_blocks: number of blocks present on this device
* @num_irqs: number of internal IRQs available on this device
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index ad715bf49ca..71bc835324d 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -177,7 +177,7 @@ static int __devinit tc6387xb_probe(struct platform_device *dev)
if (ret)
goto err_resource;
- tc6387xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
+ tc6387xb->scr = ioremap(rscr->start, resource_size(rscr));
if (!tc6387xb->scr) {
ret = -ENOMEM;
goto err_ioremap;
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 69272e4e345..696879e2eef 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -287,12 +287,8 @@ static __devinitdata struct i2c_board_info timberdale_saa7706_i2c_board_info = {
static __devinitdata struct timb_radio_platform_data
timberdale_radio_platform_data = {
.i2c_adapter = 0,
- .tuner = {
- .info = &timberdale_tef6868_i2c_board_info
- },
- .dsp = {
- .info = &timberdale_saa7706_i2c_board_info
- }
+ .tuner = &timberdale_tef6868_i2c_board_info,
+ .dsp = &timberdale_saa7706_i2c_board_info
};
static const __devinitconst struct resource timberdale_video_resources[] = {
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 2229e66d80d..6f5b8cf2f65 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -147,12 +147,11 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
if (init_data == NULL)
return -ENOMEM;
- init_data->irq = pmic_plat_data->irq;
- init_data->irq_base = pmic_plat_data->irq;
-
tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
- if (tps65910 == NULL)
+ if (tps65910 == NULL) {
+ kfree(init_data);
return -ENOMEM;
+ }
i2c_set_clientdata(i2c, tps65910);
tps65910->dev = &i2c->dev;
@@ -168,17 +167,22 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
goto err;
+ init_data->irq = pmic_plat_data->irq;
+ init_data->irq_base = pmic_plat_data->irq;
+
tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
if (ret < 0)
goto err;
+ kfree(init_data);
return ret;
err:
mfd_remove_devices(tps65910->dev);
kfree(tps65910);
+ kfree(init_data);
return ret;
}
@@ -187,6 +191,7 @@ static int tps65910_i2c_remove(struct i2c_client *i2c)
struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
mfd_remove_devices(tps65910->dev);
+ tps65910_irq_exit(tps65910);
kfree(tps65910);
return 0;
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index 3d2dc56a3d4..e7ff783aa31 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -125,7 +125,7 @@ static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
- struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev);
+ struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
int ret;
ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold);
@@ -157,6 +157,8 @@ static __devexit int tps65911_comparator_remove(struct platform_device *pdev)
struct tps65910 *tps65910;
tps65910 = dev_get_drvdata(pdev->dev.parent);
+ device_remove_file(&pdev->dev, &dev_attr_comp2_threshold);
+ device_remove_file(&pdev->dev, &dev_attr_comp1_threshold);
return 0;
}
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
new file mode 100644
index 00000000000..955bc00e4b2
--- /dev/null
+++ b/drivers/mfd/tps65912-core.c
@@ -0,0 +1,177 @@
+/*
+ * tps65912-core.c -- TI TPS65912x
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65912.h>
+
+static struct mfd_cell tps65912s[] = {
+ {
+ .name = "tps65912-pmic",
+ },
+};
+
+int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&tps65912->io_mutex);
+
+ err = tps65912->read(tps65912, reg, 1, &data);
+ if (err) {
+ dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
+ goto out;
+ }
+
+ data |= mask;
+ err = tps65912->write(tps65912, reg, 1, &data);
+ if (err)
+ dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&tps65912->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65912_set_bits);
+
+int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
+{
+ u8 data;
+ int err;
+
+ mutex_lock(&tps65912->io_mutex);
+ err = tps65912->read(tps65912, reg, 1, &data);
+ if (err) {
+ dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
+ goto out;
+ }
+
+ data &= ~mask;
+ err = tps65912->write(tps65912, reg, 1, &data);
+ if (err)
+ dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
+
+out:
+ mutex_unlock(&tps65912->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65912_clear_bits);
+
+static inline int tps65912_read(struct tps65912 *tps65912, u8 reg)
+{
+ u8 val;
+ int err;
+
+ err = tps65912->read(tps65912, reg, 1, &val);
+ if (err < 0)
+ return err;
+
+ return val;
+}
+
+static inline int tps65912_write(struct tps65912 *tps65912, u8 reg, u8 val)
+{
+ return tps65912->write(tps65912, reg, 1, &val);
+}
+
+int tps65912_reg_read(struct tps65912 *tps65912, u8 reg)
+{
+ int data;
+
+ mutex_lock(&tps65912->io_mutex);
+
+ data = tps65912_read(tps65912, reg);
+ if (data < 0)
+ dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps65912->io_mutex);
+ return data;
+}
+EXPORT_SYMBOL_GPL(tps65912_reg_read);
+
+int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val)
+{
+ int err;
+
+ mutex_lock(&tps65912->io_mutex);
+
+ err = tps65912_write(tps65912, reg, val);
+ if (err < 0)
+ dev_err(tps65912->dev, "Write for reg 0x%x failed\n", reg);
+
+ mutex_unlock(&tps65912->io_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(tps65912_reg_write);
+
+int tps65912_device_init(struct tps65912 *tps65912)
+{
+ struct tps65912_board *pmic_plat_data = tps65912->dev->platform_data;
+ struct tps65912_platform_data *init_data;
+ int ret, dcdc_avs, value;
+
+ init_data = kzalloc(sizeof(struct tps65912_platform_data), GFP_KERNEL);
+ if (init_data == NULL)
+ return -ENOMEM;
+
+ init_data->irq = pmic_plat_data->irq;
+ init_data->irq_base = pmic_plat_data->irq;
+
+ mutex_init(&tps65912->io_mutex);
+ dev_set_drvdata(tps65912->dev, tps65912);
+
+ dcdc_avs = (pmic_plat_data->is_dcdc1_avs << 0 |
+ pmic_plat_data->is_dcdc2_avs << 1 |
+ pmic_plat_data->is_dcdc3_avs << 2 |
+ pmic_plat_data->is_dcdc4_avs << 3);
+ if (dcdc_avs) {
+ tps65912->read(tps65912, TPS65912_I2C_SPI_CFG, 1, &value);
+ dcdc_avs |= value;
+ tps65912->write(tps65912, TPS65912_I2C_SPI_CFG, 1, &dcdc_avs);
+ }
+
+ ret = mfd_add_devices(tps65912->dev, -1,
+ tps65912s, ARRAY_SIZE(tps65912s),
+ NULL, 0);
+ if (ret < 0)
+ goto err;
+
+ ret = tps65912_irq_init(tps65912, init_data->irq, init_data);
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ kfree(init_data);
+ mfd_remove_devices(tps65912->dev);
+ kfree(tps65912);
+ return ret;
+}
+
+void tps65912_device_exit(struct tps65912 *tps65912)
+{
+ mfd_remove_devices(tps65912->dev);
+ kfree(tps65912);
+}
+
+MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS65912x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
new file mode 100644
index 00000000000..c041f2c3d2b
--- /dev/null
+++ b/drivers/mfd/tps65912-i2c.c
@@ -0,0 +1,139 @@
+/*
+ * tps65912-i2c.c -- I2C access for TI TPS65912x PMIC
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65912.h>
+
+static int tps65912_i2c_read(struct tps65912 *tps65912, u8 reg,
+ int bytes, void *dest)
+{
+ struct i2c_client *i2c = tps65912->control_data;
+ struct i2c_msg xfer[2];
+ int ret;
+
+ /* Write register */
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = 1;
+ xfer[0].buf = &reg;
+
+ /* Read data */
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = bytes;
+ xfer[1].buf = dest;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+ return ret;
+}
+
+static int tps65912_i2c_write(struct tps65912 *tps65912, u8 reg,
+ int bytes, void *src)
+{
+ struct i2c_client *i2c = tps65912->control_data;
+ /* we add 1 byte for device register */
+ u8 msg[TPS6591X_MAX_REGISTER + 1];
+ int ret;
+
+ if (bytes > TPS6591X_MAX_REGISTER)
+ return -EINVAL;
+
+ msg[0] = reg;
+ memcpy(&msg[1], src, bytes);
+
+ ret = i2c_master_send(i2c, msg, bytes + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != bytes + 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int tps65912_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tps65912 *tps65912;
+
+ tps65912 = kzalloc(sizeof(struct tps65912), GFP_KERNEL);
+ if (tps65912 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, tps65912);
+ tps65912->dev = &i2c->dev;
+ tps65912->control_data = i2c;
+ tps65912->read = tps65912_i2c_read;
+ tps65912->write = tps65912_i2c_write;
+
+ return tps65912_device_init(tps65912);
+}
+
+static int tps65912_i2c_remove(struct i2c_client *i2c)
+{
+ struct tps65912 *tps65912 = i2c_get_clientdata(i2c);
+
+ tps65912_device_exit(tps65912);
+
+ return 0;
+}
+
+static const struct i2c_device_id tps65912_i2c_id[] = {
+ {"tps65912", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps65912_i2c_id);
+
+static struct i2c_driver tps65912_i2c_driver = {
+ .driver = {
+ .name = "tps65912",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_i2c_probe,
+ .remove = tps65912_i2c_remove,
+ .id_table = tps65912_i2c_id,
+};
+
+static int __init tps65912_i2c_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&tps65912_i2c_driver);
+ if (ret != 0)
+ pr_err("Failed to register TPS65912 I2C driver: %d\n", ret);
+
+ return ret;
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps65912_i2c_init);
+
+static void __exit tps65912_i2c_exit(void)
+{
+ i2c_del_driver(&tps65912_i2c_driver);
+}
+module_exit(tps65912_i2c_exit);
+
+MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS6591x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65912-irq.c b/drivers/mfd/tps65912-irq.c
new file mode 100644
index 00000000000..d360a83a273
--- /dev/null
+++ b/drivers/mfd/tps65912-irq.c
@@ -0,0 +1,224 @@
+/*
+ * tps65912-irq.c -- TI TPS6591x
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65912.h>
+
+static inline int irq_to_tps65912_irq(struct tps65912 *tps65912,
+ int irq)
+{
+ return irq - tps65912->irq_base;
+}
+
+/*
+ * This is a threaded IRQ handler so can access I2C/SPI. Since the
+ * IRQ handler explicitly clears the IRQ it handles the IRQ line
+ * will be reasserted and the physical IRQ will be handled again if
+ * another interrupt is asserted while we run - in the normal course
+ * of events this is a rare occurrence so we save I2C/SPI reads. We're
+ * also assuming that it's rare to get lots of interrupts firing
+ * simultaneously so try to minimise I/O.
+ */
+static irqreturn_t tps65912_irq(int irq, void *irq_data)
+{
+ struct tps65912 *tps65912 = irq_data;
+ u32 irq_sts;
+ u32 irq_mask;
+ u8 reg;
+ int i;
+
+
+ tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
+ irq_sts = reg;
+ tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
+ irq_sts |= reg << 8;
+ tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
+ irq_sts |= reg << 16;
+ tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
+ irq_sts |= reg << 24;
+
+ tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
+ irq_mask = reg;
+ tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
+ irq_mask |= reg << 8;
+ tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
+ irq_mask |= reg << 16;
+ tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
+ irq_mask |= reg << 24;
+
+ irq_sts &= ~irq_mask;
+ if (!irq_sts)
+ return IRQ_NONE;
+
+ for (i = 0; i < tps65912->irq_num; i++) {
+ if (!(irq_sts & (1 << i)))
+ continue;
+
+ handle_nested_irq(tps65912->irq_base + i);
+ }
+
+ /* Write the STS register back to clear IRQs we handled */
+ reg = irq_sts & 0xFF;
+ irq_sts >>= 8;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
+ reg = irq_sts & 0xFF;
+ irq_sts >>= 8;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
+ reg = irq_sts & 0xFF;
+ irq_sts >>= 8;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
+ reg = irq_sts & 0xFF;
+ if (reg)
+ tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
+
+ return IRQ_HANDLED;
+}
+
+static void tps65912_irq_lock(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&tps65912->irq_lock);
+}
+
+static void tps65912_irq_sync_unlock(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+ u32 reg_mask;
+ u8 reg;
+
+ tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
+ reg_mask = reg;
+ tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
+ reg_mask |= reg << 8;
+ tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
+ reg_mask |= reg << 16;
+ tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
+ reg_mask |= reg << 24;
+
+ if (tps65912->irq_mask != reg_mask) {
+ reg = tps65912->irq_mask & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK, 1, &reg);
+ reg = tps65912->irq_mask >> 8 & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK2, 1, &reg);
+ reg = tps65912->irq_mask >> 16 & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK3, 1, &reg);
+ reg = tps65912->irq_mask >> 24 & 0xFF;
+ tps65912->write(tps65912, TPS65912_INT_MSK4, 1, &reg);
+ }
+
+ mutex_unlock(&tps65912->irq_lock);
+}
+
+static void tps65912_irq_enable(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+
+ tps65912->irq_mask &= ~(1 << irq_to_tps65912_irq(tps65912, data->irq));
+}
+
+static void tps65912_irq_disable(struct irq_data *data)
+{
+ struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
+
+ tps65912->irq_mask |= (1 << irq_to_tps65912_irq(tps65912, data->irq));
+}
+
+static struct irq_chip tps65912_irq_chip = {
+ .name = "tps65912",
+ .irq_bus_lock = tps65912_irq_lock,
+ .irq_bus_sync_unlock = tps65912_irq_sync_unlock,
+ .irq_disable = tps65912_irq_disable,
+ .irq_enable = tps65912_irq_enable,
+};
+
+int tps65912_irq_init(struct tps65912 *tps65912, int irq,
+ struct tps65912_platform_data *pdata)
+{
+ int ret, cur_irq;
+ int flags = IRQF_ONESHOT;
+ u8 reg;
+
+ if (!irq) {
+ dev_warn(tps65912->dev, "No interrupt support, no core IRQ\n");
+ return 0;
+ }
+
+ if (!pdata || !pdata->irq_base) {
+ dev_warn(tps65912->dev, "No interrupt support, no IRQ base\n");
+ return 0;
+ }
+
+ /* Clear unattended interrupts */
+ tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
+ tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
+ tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
+ tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
+ tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
+
+ /* Mask top level interrupts */
+ tps65912->irq_mask = 0xFFFFFFFF;
+
+ mutex_init(&tps65912->irq_lock);
+ tps65912->chip_irq = irq;
+ tps65912->irq_base = pdata->irq_base;
+
+ tps65912->irq_num = TPS65912_NUM_IRQ;
+
+ /* Register with genirq */
+ for (cur_irq = tps65912->irq_base;
+ cur_irq < tps65912->irq_num + tps65912->irq_base;
+ cur_irq++) {
+ irq_set_chip_data(cur_irq, tps65912);
+ irq_set_chip_and_handler(cur_irq, &tps65912_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, tps65912_irq, flags,
+ "tps65912", tps65912);
+
+ irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+ if (ret != 0)
+ dev_err(tps65912->dev, "Failed to request IRQ: %d\n", ret);
+
+ return ret;
+}
+
+int tps65912_irq_exit(struct tps65912 *tps65912)
+{
+ free_irq(tps65912->chip_irq, tps65912);
+ return 0;
+}
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
new file mode 100644
index 00000000000..6d71e0d2574
--- /dev/null
+++ b/drivers/mfd/tps65912-spi.c
@@ -0,0 +1,142 @@
+/*
+ * tps65912-spi.c -- SPI access for TI TPS65912x PMIC
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65912.h>
+
+static int tps65912_spi_write(struct tps65912 *tps65912, u8 addr,
+ int bytes, void *src)
+{
+ struct spi_device *spi = tps65912->control_data;
+ u8 *data = (u8 *) src;
+ int ret;
+ /* bit 23 is the read/write bit */
+ unsigned long spi_data = 1 << 23 | addr << 15 | *data;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ u32 tx_buf, rx_buf;
+
+ tx_buf = spi_data;
+ rx_buf = 0;
+
+ xfer.tx_buf = &tx_buf;
+ xfer.rx_buf = NULL;
+ xfer.len = sizeof(unsigned long);
+ xfer.bits_per_word = 24;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ ret = spi_sync(spi, &msg);
+ return ret;
+}
+
+static int tps65912_spi_read(struct tps65912 *tps65912, u8 addr,
+ int bytes, void *dest)
+{
+ struct spi_device *spi = tps65912->control_data;
+ /* bit 23 is the read/write bit */
+ unsigned long spi_data = 0 << 23 | addr << 15;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ int ret;
+ u8 *data = (u8 *) dest;
+ u32 tx_buf, rx_buf;
+
+ tx_buf = spi_data;
+ rx_buf = 0;
+
+ xfer.tx_buf = &tx_buf;
+ xfer.rx_buf = &rx_buf;
+ xfer.len = sizeof(unsigned long);
+ xfer.bits_per_word = 24;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ if (spi == NULL)
+ return 0;
+
+ ret = spi_sync(spi, &msg);
+ if (ret == 0)
+ *data = (u8) (rx_buf & 0xFF);
+ return ret;
+}
+
+static int __devinit tps65912_spi_probe(struct spi_device *spi)
+{
+ struct tps65912 *tps65912;
+
+ tps65912 = kzalloc(sizeof(struct tps65912), GFP_KERNEL);
+ if (tps65912 == NULL)
+ return -ENOMEM;
+
+ tps65912->dev = &spi->dev;
+ tps65912->control_data = spi;
+ tps65912->read = tps65912_spi_read;
+ tps65912->write = tps65912_spi_write;
+
+ spi_set_drvdata(spi, tps65912);
+
+ return tps65912_device_init(tps65912);
+}
+
+static int __devexit tps65912_spi_remove(struct spi_device *spi)
+{
+ struct tps65912 *tps65912 = spi_get_drvdata(spi);
+
+ tps65912_device_exit(tps65912);
+
+ return 0;
+}
+
+static struct spi_driver tps65912_spi_driver = {
+ .driver = {
+ .name = "tps65912",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_spi_probe,
+ .remove = __devexit_p(tps65912_spi_remove),
+};
+
+static int __init tps65912_spi_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&tps65912_spi_driver);
+ if (ret != 0)
+ pr_err("Failed to register TPS65912 SPI driver: %d\n", ret);
+
+ return 0;
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps65912_spi_init);
+
+static void __exit tps65912_spi_exit(void)
+{
+ spi_unregister_driver(&tps65912_spi_driver);
+}
+module_exit(tps65912_spi_exit);
+
+MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("SPI support for TPS65912 chip family mfd");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index b8f2a4e7f6e..01ecfeee652 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -110,7 +110,7 @@
#endif
#if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) ||\
- defined(CONFIG_SND_SOC_TWL6040) || defined(CONFIG_SND_SOC_TWL6040_MODULE)
+ defined(CONFIG_TWL6040_CORE) || defined(CONFIG_TWL6040_CORE_MODULE)
#define twl_has_codec() true
#else
#define twl_has_codec() false
@@ -815,20 +815,19 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
return PTR_ERR(child);
}
- if (twl_has_codec() && pdata->codec && twl_class_is_4030()) {
+ if (twl_has_codec() && pdata->audio && twl_class_is_4030()) {
sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
child = add_child(sub_chip_id, "twl4030-audio",
- pdata->codec, sizeof(*pdata->codec),
+ pdata->audio, sizeof(*pdata->audio),
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
}
- /* Phoenix codec driver is probed directly atm */
- if (twl_has_codec() && pdata->codec && twl_class_is_6030()) {
+ if (twl_has_codec() && pdata->audio && twl_class_is_6030()) {
sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
- child = add_child(sub_chip_id, "twl6040-codec",
- pdata->codec, sizeof(*pdata->codec),
+ child = add_child(sub_chip_id, "twl6040",
+ pdata->audio, sizeof(*pdata->audio),
false, 0, 0);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -1284,6 +1283,8 @@ static const struct i2c_device_id twl_ids[] = {
{ "tps65950", 0 }, /* catalog version of twl5030 */
{ "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */
{ "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */
+ { "tps65921", TPS_SUBSET }, /* fewer LDOs; no codec, no LED
+ and vibrator. Charger in USB module*/
{ "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */
{ "twl6025", TWL6030_CLASS | TWL6025_SUBCLASS }, /* "Phoenix lite" */
{ /* end of list */ },
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
new file mode 100644
index 00000000000..ae51ab5d0e5
--- /dev/null
+++ b/drivers/mfd/twl4030-audio.c
@@ -0,0 +1,277 @@
+/*
+ * MFD driver for twl4030 audio submodule, which contains an audio codec, and
+ * the vibra control.
+ *
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * Copyright: (C) 2009 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/i2c/twl.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/twl4030-audio.h>
+
+#define TWL4030_AUDIO_CELLS 2
+
+static struct platform_device *twl4030_audio_dev;
+
+struct twl4030_audio_resource {
+ int request_count;
+ u8 reg;
+ u8 mask;
+};
+
+struct twl4030_audio {
+ unsigned int audio_mclk;
+ struct mutex mutex;
+ struct twl4030_audio_resource resource[TWL4030_AUDIO_RES_MAX];
+ struct mfd_cell cells[TWL4030_AUDIO_CELLS];
+};
+
+/*
+ * Modify the resource, the function returns the content of the register
+ * after the modification.
+ */
+static int twl4030_audio_set_resource(enum twl4030_audio_res id, int enable)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
+ u8 val;
+
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
+ audio->resource[id].reg);
+
+ if (enable)
+ val |= audio->resource[id].mask;
+ else
+ val &= ~audio->resource[id].mask;
+
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ val, audio->resource[id].reg);
+
+ return val;
+}
+
+static inline int twl4030_audio_get_resource(enum twl4030_audio_res id)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
+ u8 val;
+
+ twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
+ audio->resource[id].reg);
+
+ return val;
+}
+
+/*
+ * Enable the resource.
+ * The function returns with error or the content of the register
+ */
+int twl4030_audio_enable_resource(enum twl4030_audio_res id)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
+ int val;
+
+ if (id >= TWL4030_AUDIO_RES_MAX) {
+ dev_err(&twl4030_audio_dev->dev,
+ "Invalid resource ID (%u)\n", id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&audio->mutex);
+ if (!audio->resource[id].request_count)
+ /* Resource was disabled, enable it */
+ val = twl4030_audio_set_resource(id, 1);
+ else
+ val = twl4030_audio_get_resource(id);
+
+ audio->resource[id].request_count++;
+ mutex_unlock(&audio->mutex);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(twl4030_audio_enable_resource);
+
+/*
+ * Disable the resource.
+ * The function returns with error or the content of the register
+ */
+int twl4030_audio_disable_resource(unsigned id)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
+ int val;
+
+ if (id >= TWL4030_AUDIO_RES_MAX) {
+ dev_err(&twl4030_audio_dev->dev,
+ "Invalid resource ID (%u)\n", id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&audio->mutex);
+ if (!audio->resource[id].request_count) {
+ dev_err(&twl4030_audio_dev->dev,
+ "Resource has been disabled already (%u)\n", id);
+ mutex_unlock(&audio->mutex);
+ return -EPERM;
+ }
+ audio->resource[id].request_count--;
+
+ if (!audio->resource[id].request_count)
+ /* Resource can be disabled now */
+ val = twl4030_audio_set_resource(id, 0);
+ else
+ val = twl4030_audio_get_resource(id);
+
+ mutex_unlock(&audio->mutex);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(twl4030_audio_disable_resource);
+
+unsigned int twl4030_audio_get_mclk(void)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(twl4030_audio_dev);
+
+ return audio->audio_mclk;
+}
+EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
+
+static int __devinit twl4030_audio_probe(struct platform_device *pdev)
+{
+ struct twl4030_audio *audio;
+ struct twl4030_audio_data *pdata = pdev->dev.platform_data;
+ struct mfd_cell *cell = NULL;
+ int ret, childs = 0;
+ u8 val;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Platform data is missing\n");
+ return -EINVAL;
+ }
+
+ /* Configure APLL_INFREQ and disable APLL if enabled */
+ val = 0;
+ switch (pdata->audio_mclk) {
+ case 19200000:
+ val |= TWL4030_APLL_INFREQ_19200KHZ;
+ break;
+ case 26000000:
+ val |= TWL4030_APLL_INFREQ_26000KHZ;
+ break;
+ case 38400000:
+ val |= TWL4030_APLL_INFREQ_38400KHZ;
+ break;
+ default:
+ dev_err(&pdev->dev, "Invalid audio_mclk\n");
+ return -EINVAL;
+ }
+ twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+ val, TWL4030_REG_APLL_CTL);
+
+ audio = kzalloc(sizeof(struct twl4030_audio), GFP_KERNEL);
+ if (!audio)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, audio);
+
+ twl4030_audio_dev = pdev;
+ mutex_init(&audio->mutex);
+ audio->audio_mclk = pdata->audio_mclk;
+
+ /* Codec power */
+ audio->resource[TWL4030_AUDIO_RES_POWER].reg = TWL4030_REG_CODEC_MODE;
+ audio->resource[TWL4030_AUDIO_RES_POWER].mask = TWL4030_CODECPDZ;
+
+ /* PLL */
+ audio->resource[TWL4030_AUDIO_RES_APLL].reg = TWL4030_REG_APLL_CTL;
+ audio->resource[TWL4030_AUDIO_RES_APLL].mask = TWL4030_APLL_EN;
+
+ if (pdata->codec) {
+ cell = &audio->cells[childs];
+ cell->name = "twl4030-codec";
+ cell->platform_data = pdata->codec;
+ cell->pdata_size = sizeof(*pdata->codec);
+ childs++;
+ }
+ if (pdata->vibra) {
+ cell = &audio->cells[childs];
+ cell->name = "twl4030-vibra";
+ cell->platform_data = pdata->vibra;
+ cell->pdata_size = sizeof(*pdata->vibra);
+ childs++;
+ }
+
+ if (childs)
+ ret = mfd_add_devices(&pdev->dev, pdev->id, audio->cells,
+ childs, NULL, 0);
+ else {
+ dev_err(&pdev->dev, "No platform data found for childs\n");
+ ret = -ENODEV;
+ }
+
+ if (!ret)
+ return 0;
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(audio);
+ twl4030_audio_dev = NULL;
+ return ret;
+}
+
+static int __devexit twl4030_audio_remove(struct platform_device *pdev)
+{
+ struct twl4030_audio *audio = platform_get_drvdata(pdev);
+
+ mfd_remove_devices(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ kfree(audio);
+ twl4030_audio_dev = NULL;
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:twl4030-audio");
+
+static struct platform_driver twl4030_audio_driver = {
+ .probe = twl4030_audio_probe,
+ .remove = __devexit_p(twl4030_audio_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "twl4030-audio",
+ },
+};
+
+static int __devinit twl4030_audio_init(void)
+{
+ return platform_driver_register(&twl4030_audio_driver);
+}
+module_init(twl4030_audio_init);
+
+static void __devexit twl4030_audio_exit(void)
+{
+ platform_driver_unregister(&twl4030_audio_driver);
+}
+module_exit(twl4030_audio_exit);
+
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c
deleted file mode 100644
index 2bf4136464c..00000000000
--- a/drivers/mfd/twl4030-codec.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * MFD driver for twl4030 codec submodule
- *
- * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * Copyright: (C) 2009 Nokia Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/platform_device.h>
-#include <linux/i2c/twl.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/twl4030-codec.h>
-
-#define TWL4030_CODEC_CELLS 2
-
-static struct platform_device *twl4030_codec_dev;
-
-struct twl4030_codec_resource {
- int request_count;
- u8 reg;
- u8 mask;
-};
-
-struct twl4030_codec {
- unsigned int audio_mclk;
- struct mutex mutex;
- struct twl4030_codec_resource resource[TWL4030_CODEC_RES_MAX];
- struct mfd_cell cells[TWL4030_CODEC_CELLS];
-};
-
-/*
- * Modify the resource, the function returns the content of the register
- * after the modification.
- */
-static int twl4030_codec_set_resource(enum twl4030_codec_res id, int enable)
-{
- struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
- u8 val;
-
- twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
- codec->resource[id].reg);
-
- if (enable)
- val |= codec->resource[id].mask;
- else
- val &= ~codec->resource[id].mask;
-
- twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
- val, codec->resource[id].reg);
-
- return val;
-}
-
-static inline int twl4030_codec_get_resource(enum twl4030_codec_res id)
-{
- struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
- u8 val;
-
- twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &val,
- codec->resource[id].reg);
-
- return val;
-}
-
-/*
- * Enable the resource.
- * The function returns with error or the content of the register
- */
-int twl4030_codec_enable_resource(enum twl4030_codec_res id)
-{
- struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
- int val;
-
- if (id >= TWL4030_CODEC_RES_MAX) {
- dev_err(&twl4030_codec_dev->dev,
- "Invalid resource ID (%u)\n", id);
- return -EINVAL;
- }
-
- mutex_lock(&codec->mutex);
- if (!codec->resource[id].request_count)
- /* Resource was disabled, enable it */
- val = twl4030_codec_set_resource(id, 1);
- else
- val = twl4030_codec_get_resource(id);
-
- codec->resource[id].request_count++;
- mutex_unlock(&codec->mutex);
-
- return val;
-}
-EXPORT_SYMBOL_GPL(twl4030_codec_enable_resource);
-
-/*
- * Disable the resource.
- * The function returns with error or the content of the register
- */
-int twl4030_codec_disable_resource(unsigned id)
-{
- struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
- int val;
-
- if (id >= TWL4030_CODEC_RES_MAX) {
- dev_err(&twl4030_codec_dev->dev,
- "Invalid resource ID (%u)\n", id);
- return -EINVAL;
- }
-
- mutex_lock(&codec->mutex);
- if (!codec->resource[id].request_count) {
- dev_err(&twl4030_codec_dev->dev,
- "Resource has been disabled already (%u)\n", id);
- mutex_unlock(&codec->mutex);
- return -EPERM;
- }
- codec->resource[id].request_count--;
-
- if (!codec->resource[id].request_count)
- /* Resource can be disabled now */
- val = twl4030_codec_set_resource(id, 0);
- else
- val = twl4030_codec_get_resource(id);
-
- mutex_unlock(&codec->mutex);
-
- return val;
-}
-EXPORT_SYMBOL_GPL(twl4030_codec_disable_resource);
-
-unsigned int twl4030_codec_get_mclk(void)
-{
- struct twl4030_codec *codec = platform_get_drvdata(twl4030_codec_dev);
-
- return codec->audio_mclk;
-}
-EXPORT_SYMBOL_GPL(twl4030_codec_get_mclk);
-
-static int __devinit twl4030_codec_probe(struct platform_device *pdev)
-{
- struct twl4030_codec *codec;
- struct twl4030_codec_data *pdata = pdev->dev.platform_data;
- struct mfd_cell *cell = NULL;
- int ret, childs = 0;
- u8 val;
-
- if (!pdata) {
- dev_err(&pdev->dev, "Platform data is missing\n");
- return -EINVAL;
- }
-
- /* Configure APLL_INFREQ and disable APLL if enabled */
- val = 0;
- switch (pdata->audio_mclk) {
- case 19200000:
- val |= TWL4030_APLL_INFREQ_19200KHZ;
- break;
- case 26000000:
- val |= TWL4030_APLL_INFREQ_26000KHZ;
- break;
- case 38400000:
- val |= TWL4030_APLL_INFREQ_38400KHZ;
- break;
- default:
- dev_err(&pdev->dev, "Invalid audio_mclk\n");
- return -EINVAL;
- }
- twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
- val, TWL4030_REG_APLL_CTL);
-
- codec = kzalloc(sizeof(struct twl4030_codec), GFP_KERNEL);
- if (!codec)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, codec);
-
- twl4030_codec_dev = pdev;
- mutex_init(&codec->mutex);
- codec->audio_mclk = pdata->audio_mclk;
-
- /* Codec power */
- codec->resource[TWL4030_CODEC_RES_POWER].reg = TWL4030_REG_CODEC_MODE;
- codec->resource[TWL4030_CODEC_RES_POWER].mask = TWL4030_CODECPDZ;
-
- /* PLL */
- codec->resource[TWL4030_CODEC_RES_APLL].reg = TWL4030_REG_APLL_CTL;
- codec->resource[TWL4030_CODEC_RES_APLL].mask = TWL4030_APLL_EN;
-
- if (pdata->audio) {
- cell = &codec->cells[childs];
- cell->name = "twl4030-codec";
- cell->platform_data = pdata->audio;
- cell->pdata_size = sizeof(*pdata->audio);
- childs++;
- }
- if (pdata->vibra) {
- cell = &codec->cells[childs];
- cell->name = "twl4030-vibra";
- cell->platform_data = pdata->vibra;
- cell->pdata_size = sizeof(*pdata->vibra);
- childs++;
- }
-
- if (childs)
- ret = mfd_add_devices(&pdev->dev, pdev->id, codec->cells,
- childs, NULL, 0);
- else {
- dev_err(&pdev->dev, "No platform data found for childs\n");
- ret = -ENODEV;
- }
-
- if (!ret)
- return 0;
-
- platform_set_drvdata(pdev, NULL);
- kfree(codec);
- twl4030_codec_dev = NULL;
- return ret;
-}
-
-static int __devexit twl4030_codec_remove(struct platform_device *pdev)
-{
- struct twl4030_codec *codec = platform_get_drvdata(pdev);
-
- mfd_remove_devices(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
- kfree(codec);
- twl4030_codec_dev = NULL;
-
- return 0;
-}
-
-MODULE_ALIAS("platform:twl4030-audio");
-
-static struct platform_driver twl4030_codec_driver = {
- .probe = twl4030_codec_probe,
- .remove = __devexit_p(twl4030_codec_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = "twl4030-audio",
- },
-};
-
-static int __devinit twl4030_codec_init(void)
-{
- return platform_driver_register(&twl4030_codec_driver);
-}
-module_init(twl4030_codec_init);
-
-static void __devexit twl4030_codec_exit(void)
-{
- platform_driver_unregister(&twl4030_codec_driver);
-}
-module_exit(twl4030_codec_exit);
-
-MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index 3941ddcf15f..b5d598c3aa7 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -530,13 +530,13 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
if (ret) {
dev_err(twl4030_madc->dev,
"unable to write sel register 0x%X\n", method->sel + 1);
- return ret;
+ goto out;
}
ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel);
if (ret) {
dev_err(twl4030_madc->dev,
"unable to write sel register 0x%X\n", method->sel + 1);
- return ret;
+ goto out;
}
/* Select averaging for all channels if do_avg is set */
if (req->do_avg) {
@@ -546,7 +546,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
dev_err(twl4030_madc->dev,
"unable to write avg register 0x%X\n",
method->avg + 1);
- return ret;
+ goto out;
}
ret = twl_i2c_write_u8(TWL4030_MODULE_MADC,
ch_lsb, method->avg);
@@ -554,7 +554,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req)
dev_err(twl4030_madc->dev,
"unable to write sel reg 0x%X\n",
method->sel + 1);
- return ret;
+ goto out;
}
}
if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) {
diff --git a/drivers/mfd/twl6030-pwm.c b/drivers/mfd/twl6030-pwm.c
index 5d25bdc7842..e8fee147678 100644
--- a/drivers/mfd/twl6030-pwm.c
+++ b/drivers/mfd/twl6030-pwm.c
@@ -161,3 +161,5 @@ void pwm_free(struct pwm_device *pwm)
kfree(pwm);
}
EXPORT_SYMBOL(pwm_free);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
new file mode 100644
index 00000000000..24d436c2fe4
--- /dev/null
+++ b/drivers/mfd/twl6040-core.c
@@ -0,0 +1,620 @@
+/*
+ * MFD driver for TWL6040 audio device
+ *
+ * Authors: Misael Lopez Cruz <misael.lopez@ti.com>
+ * Jorge Eduardo Candelaria <jorge.candelaria@ti.com>
+ * Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * Copyright: (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/i2c/twl.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/twl6040.h>
+
+static struct platform_device *twl6040_dev;
+
+int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
+{
+ int ret;
+ u8 val = 0;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+ if (ret < 0) {
+ mutex_unlock(&twl6040->io_mutex);
+ return ret;
+ }
+ mutex_unlock(&twl6040->io_mutex);
+
+ return val;
+}
+EXPORT_SYMBOL(twl6040_reg_read);
+
+int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, u8 val)
+{
+ int ret;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+ mutex_unlock(&twl6040->io_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_reg_write);
+
+int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
+{
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+ if (ret)
+ goto out;
+
+ val |= mask;
+ ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+out:
+ mutex_unlock(&twl6040->io_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_set_bits);
+
+int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
+{
+ int ret;
+ u8 val;
+
+ mutex_lock(&twl6040->io_mutex);
+ ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+ if (ret)
+ goto out;
+
+ val &= ~mask;
+ ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+out:
+ mutex_unlock(&twl6040->io_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_clear_bits);
+
+/* twl6040 codec manual power-up sequence */
+static int twl6040_power_up(struct twl6040 *twl6040)
+{
+ u8 ldoctl, ncpctl, lppllctl;
+ int ret;
+
+ /* enable high-side LDO, reference system and internal oscillator */
+ ldoctl = TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ return ret;
+ usleep_range(10000, 10500);
+
+ /* enable negative charge pump */
+ ncpctl = TWL6040_NCPENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
+ if (ret)
+ goto ncp_err;
+ usleep_range(1000, 1500);
+
+ /* enable low-side LDO */
+ ldoctl |= TWL6040_LSLDOENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto lsldo_err;
+ usleep_range(1000, 1500);
+
+ /* enable low-power PLL */
+ lppllctl = TWL6040_LPLLENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+ if (ret)
+ goto lppll_err;
+ usleep_range(5000, 5500);
+
+ /* disable internal oscillator */
+ ldoctl &= ~TWL6040_OSCENA;
+ ret = twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ if (ret)
+ goto osc_err;
+
+ return 0;
+
+osc_err:
+ lppllctl &= ~TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+lppll_err:
+ ldoctl &= ~TWL6040_LSLDOENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+lsldo_err:
+ ncpctl &= ~TWL6040_NCPENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
+ncp_err:
+ ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA);
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+
+ return ret;
+}
+
+/* twl6040 manual power-down sequence */
+static void twl6040_power_down(struct twl6040 *twl6040)
+{
+ u8 ncpctl, ldoctl, lppllctl;
+
+ ncpctl = twl6040_reg_read(twl6040, TWL6040_REG_NCPCTL);
+ ldoctl = twl6040_reg_read(twl6040, TWL6040_REG_LDOCTL);
+ lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
+
+ /* enable internal oscillator */
+ ldoctl |= TWL6040_OSCENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+ usleep_range(1000, 1500);
+
+ /* disable low-power PLL */
+ lppllctl &= ~TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+
+ /* disable low-side LDO */
+ ldoctl &= ~TWL6040_LSLDOENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+
+ /* disable negative charge pump */
+ ncpctl &= ~TWL6040_NCPENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_NCPCTL, ncpctl);
+
+ /* disable high-side LDO, reference system and internal oscillator */
+ ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA);
+ twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
+}
+
+static irqreturn_t twl6040_naudint_handler(int irq, void *data)
+{
+ struct twl6040 *twl6040 = data;
+ u8 intid, status;
+
+ intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+
+ if (intid & TWL6040_READYINT)
+ complete(&twl6040->ready);
+
+ if (intid & TWL6040_THINT) {
+ status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
+ if (status & TWL6040_TSHUTDET) {
+ dev_warn(&twl6040_dev->dev,
+ "Thermal shutdown, powering-off");
+ twl6040_power(twl6040, 0);
+ } else {
+ dev_warn(&twl6040_dev->dev,
+ "Leaving thermal shutdown, powering-on");
+ twl6040_power(twl6040, 1);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int twl6040_power_up_completion(struct twl6040 *twl6040,
+ int naudint)
+{
+ int time_left;
+ u8 intid;
+
+ time_left = wait_for_completion_timeout(&twl6040->ready,
+ msecs_to_jiffies(144));
+ if (!time_left) {
+ intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+ if (!(intid & TWL6040_READYINT)) {
+ dev_err(&twl6040_dev->dev,
+ "timeout waiting for READYINT\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+int twl6040_power(struct twl6040 *twl6040, int on)
+{
+ int audpwron = twl6040->audpwron;
+ int naudint = twl6040->irq;
+ int ret = 0;
+
+ mutex_lock(&twl6040->mutex);
+
+ if (on) {
+ /* already powered-up */
+ if (twl6040->power_count++)
+ goto out;
+
+ if (gpio_is_valid(audpwron)) {
+ /* use AUDPWRON line */
+ gpio_set_value(audpwron, 1);
+ /* wait for power-up completion */
+ ret = twl6040_power_up_completion(twl6040, naudint);
+ if (ret) {
+ dev_err(&twl6040_dev->dev,
+ "automatic power-down failed\n");
+ twl6040->power_count = 0;
+ goto out;
+ }
+ } else {
+ /* use manual power-up sequence */
+ ret = twl6040_power_up(twl6040);
+ if (ret) {
+ dev_err(&twl6040_dev->dev,
+ "manual power-up failed\n");
+ twl6040->power_count = 0;
+ goto out;
+ }
+ }
+ /* Default PLL configuration after power up */
+ twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
+ twl6040->sysclk = 19200000;
+ } else {
+ /* already powered-down */
+ if (!twl6040->power_count) {
+ dev_err(&twl6040_dev->dev,
+ "device is already powered-off\n");
+ ret = -EPERM;
+ goto out;
+ }
+
+ if (--twl6040->power_count)
+ goto out;
+
+ if (gpio_is_valid(audpwron)) {
+ /* use AUDPWRON line */
+ gpio_set_value(audpwron, 0);
+
+ /* power-down sequence latency */
+ usleep_range(500, 700);
+ } else {
+ /* use manual power-down sequence */
+ twl6040_power_down(twl6040);
+ }
+ twl6040->sysclk = 0;
+ }
+
+out:
+ mutex_unlock(&twl6040->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_power);
+
+int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
+ unsigned int freq_in, unsigned int freq_out)
+{
+ u8 hppllctl, lppllctl;
+ int ret = 0;
+
+ mutex_lock(&twl6040->mutex);
+
+ hppllctl = twl6040_reg_read(twl6040, TWL6040_REG_HPPLLCTL);
+ lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
+
+ switch (pll_id) {
+ case TWL6040_SYSCLK_SEL_LPPLL:
+ /* low-power PLL divider */
+ switch (freq_out) {
+ case 17640000:
+ lppllctl |= TWL6040_LPLLFIN;
+ break;
+ case 19200000:
+ lppllctl &= ~TWL6040_LPLLFIN;
+ break;
+ default:
+ dev_err(&twl6040_dev->dev,
+ "freq_out %d not supported\n", freq_out);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+
+ switch (freq_in) {
+ case 32768:
+ lppllctl |= TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+ lppllctl);
+ mdelay(5);
+ lppllctl &= ~TWL6040_HPLLSEL;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+ lppllctl);
+ hppllctl &= ~TWL6040_HPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL,
+ hppllctl);
+ break;
+ default:
+ dev_err(&twl6040_dev->dev,
+ "freq_in %d not supported\n", freq_in);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+ break;
+ case TWL6040_SYSCLK_SEL_HPPLL:
+ /* high-performance PLL can provide only 19.2 MHz */
+ if (freq_out != 19200000) {
+ dev_err(&twl6040_dev->dev,
+ "freq_out %d not supported\n", freq_out);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+
+ hppllctl &= ~TWL6040_MCLK_MSK;
+
+ switch (freq_in) {
+ case 12000000:
+ /* PLL enabled, active mode */
+ hppllctl |= TWL6040_MCLK_12000KHZ |
+ TWL6040_HPLLENA;
+ break;
+ case 19200000:
+ /*
+ * PLL disabled
+ * (enable PLL if MCLK jitter quality
+ * doesn't meet specification)
+ */
+ hppllctl |= TWL6040_MCLK_19200KHZ;
+ break;
+ case 26000000:
+ /* PLL enabled, active mode */
+ hppllctl |= TWL6040_MCLK_26000KHZ |
+ TWL6040_HPLLENA;
+ break;
+ case 38400000:
+ /* PLL enabled, active mode */
+ hppllctl |= TWL6040_MCLK_38400KHZ |
+ TWL6040_HPLLENA;
+ break;
+ default:
+ dev_err(&twl6040_dev->dev,
+ "freq_in %d not supported\n", freq_in);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+
+ /* enable clock slicer to ensure input waveform is square */
+ hppllctl |= TWL6040_HPLLSQRENA;
+
+ twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL, hppllctl);
+ usleep_range(500, 700);
+ lppllctl |= TWL6040_HPLLSEL;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+ lppllctl &= ~TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+ break;
+ default:
+ dev_err(&twl6040_dev->dev, "unknown pll id %d\n", pll_id);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+
+ twl6040->sysclk = freq_out;
+ twl6040->pll = pll_id;
+
+pll_out:
+ mutex_unlock(&twl6040->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(twl6040_set_pll);
+
+int twl6040_get_pll(struct twl6040 *twl6040)
+{
+ if (twl6040->power_count)
+ return twl6040->pll;
+ else
+ return -ENODEV;
+}
+EXPORT_SYMBOL(twl6040_get_pll);
+
+unsigned int twl6040_get_sysclk(struct twl6040 *twl6040)
+{
+ return twl6040->sysclk;
+}
+EXPORT_SYMBOL(twl6040_get_sysclk);
+
+static struct resource twl6040_vibra_rsrc[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource twl6040_codec_rsrc[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int __devinit twl6040_probe(struct platform_device *pdev)
+{
+ struct twl4030_audio_data *pdata = pdev->dev.platform_data;
+ struct twl6040 *twl6040;
+ struct mfd_cell *cell = NULL;
+ int ret, children = 0;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Platform data is missing\n");
+ return -EINVAL;
+ }
+
+ /* In order to operate correctly we need valid interrupt config */
+ if (!pdata->naudint_irq || !pdata->irq_base) {
+ dev_err(&pdev->dev, "Invalid IRQ configuration\n");
+ return -EINVAL;
+ }
+
+ twl6040 = kzalloc(sizeof(struct twl6040), GFP_KERNEL);
+ if (!twl6040)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, twl6040);
+
+ twl6040_dev = pdev;
+ twl6040->dev = &pdev->dev;
+ twl6040->audpwron = pdata->audpwron_gpio;
+ twl6040->irq = pdata->naudint_irq;
+ twl6040->irq_base = pdata->irq_base;
+
+ mutex_init(&twl6040->mutex);
+ mutex_init(&twl6040->io_mutex);
+ init_completion(&twl6040->ready);
+
+ twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
+
+ if (gpio_is_valid(twl6040->audpwron)) {
+ ret = gpio_request(twl6040->audpwron, "audpwron");
+ if (ret)
+ goto gpio1_err;
+
+ ret = gpio_direction_output(twl6040->audpwron, 0);
+ if (ret)
+ goto gpio2_err;
+ }
+
+ /* ERRATA: Automatic power-up is not possible in ES1.0 */
+ if (twl6040->rev == TWL6040_REV_ES1_0)
+ twl6040->audpwron = -EINVAL;
+
+ /* codec interrupt */
+ ret = twl6040_irq_init(twl6040);
+ if (ret)
+ goto gpio2_err;
+
+ ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
+ NULL, twl6040_naudint_handler, 0,
+ "twl6040_irq_ready", twl6040);
+ if (ret) {
+ dev_err(twl6040->dev, "READY IRQ request failed: %d\n",
+ ret);
+ goto irq_err;
+ }
+
+ /* dual-access registers controlled by I2C only */
+ twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL);
+
+ if (pdata->codec) {
+ int irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
+
+ cell = &twl6040->cells[children];
+ cell->name = "twl6040-codec";
+ twl6040_codec_rsrc[0].start = irq;
+ twl6040_codec_rsrc[0].end = irq;
+ cell->resources = twl6040_codec_rsrc;
+ cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+ cell->platform_data = pdata->codec;
+ cell->pdata_size = sizeof(*pdata->codec);
+ children++;
+ }
+
+ if (pdata->vibra) {
+ int irq = twl6040->irq_base + TWL6040_IRQ_VIB;
+
+ cell = &twl6040->cells[children];
+ cell->name = "twl6040-vibra";
+ twl6040_vibra_rsrc[0].start = irq;
+ twl6040_vibra_rsrc[0].end = irq;
+ cell->resources = twl6040_vibra_rsrc;
+ cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc);
+
+ cell->platform_data = pdata->vibra;
+ cell->pdata_size = sizeof(*pdata->vibra);
+ children++;
+ }
+
+ if (children) {
+ ret = mfd_add_devices(&pdev->dev, pdev->id, twl6040->cells,
+ children, NULL, 0);
+ if (ret)
+ goto mfd_err;
+ } else {
+ dev_err(&pdev->dev, "No platform data found for children\n");
+ ret = -ENODEV;
+ goto mfd_err;
+ }
+
+ return 0;
+
+mfd_err:
+ free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
+irq_err:
+ twl6040_irq_exit(twl6040);
+gpio2_err:
+ if (gpio_is_valid(twl6040->audpwron))
+ gpio_free(twl6040->audpwron);
+gpio1_err:
+ platform_set_drvdata(pdev, NULL);
+ kfree(twl6040);
+ twl6040_dev = NULL;
+ return ret;
+}
+
+static int __devexit twl6040_remove(struct platform_device *pdev)
+{
+ struct twl6040 *twl6040 = platform_get_drvdata(pdev);
+
+ if (twl6040->power_count)
+ twl6040_power(twl6040, 0);
+
+ if (gpio_is_valid(twl6040->audpwron))
+ gpio_free(twl6040->audpwron);
+
+ free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
+ twl6040_irq_exit(twl6040);
+
+ mfd_remove_devices(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ kfree(twl6040);
+ twl6040_dev = NULL;
+
+ return 0;
+}
+
+static struct platform_driver twl6040_driver = {
+ .probe = twl6040_probe,
+ .remove = __devexit_p(twl6040_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "twl6040",
+ },
+};
+
+static int __devinit twl6040_init(void)
+{
+ return platform_driver_register(&twl6040_driver);
+}
+module_init(twl6040_init);
+
+static void __devexit twl6040_exit(void)
+{
+ platform_driver_unregister(&twl6040_driver);
+}
+
+module_exit(twl6040_exit);
+
+MODULE_DESCRIPTION("TWL6040 MFD");
+MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
+MODULE_AUTHOR("Jorge Eduardo Candelaria <jorge.candelaria@ti.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:twl6040");
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
new file mode 100644
index 00000000000..b3f8ddaa28a
--- /dev/null
+++ b/drivers/mfd/twl6040-irq.c
@@ -0,0 +1,191 @@
+/*
+ * Interrupt controller support for TWL6040
+ *
+ * Author: Misael Lopez Cruz <misael.lopez@ti.com>
+ *
+ * Copyright: (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/twl6040.h>
+
+struct twl6040_irq_data {
+ int mask;
+ int status;
+};
+
+static struct twl6040_irq_data twl6040_irqs[] = {
+ {
+ .mask = TWL6040_THMSK,
+ .status = TWL6040_THINT,
+ },
+ {
+ .mask = TWL6040_PLUGMSK,
+ .status = TWL6040_PLUGINT | TWL6040_UNPLUGINT,
+ },
+ {
+ .mask = TWL6040_HOOKMSK,
+ .status = TWL6040_HOOKINT,
+ },
+ {
+ .mask = TWL6040_HFMSK,
+ .status = TWL6040_HFINT,
+ },
+ {
+ .mask = TWL6040_VIBMSK,
+ .status = TWL6040_VIBINT,
+ },
+ {
+ .mask = TWL6040_READYMSK,
+ .status = TWL6040_READYINT,
+ },
+};
+
+static inline
+struct twl6040_irq_data *irq_to_twl6040_irq(struct twl6040 *twl6040,
+ int irq)
+{
+ return &twl6040_irqs[irq - twl6040->irq_base];
+}
+
+static void twl6040_irq_lock(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&twl6040->irq_mutex);
+}
+
+static void twl6040_irq_sync_unlock(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+
+ /* write back to hardware any change in irq mask */
+ if (twl6040->irq_masks_cur != twl6040->irq_masks_cache) {
+ twl6040->irq_masks_cache = twl6040->irq_masks_cur;
+ twl6040_reg_write(twl6040, TWL6040_REG_INTMR,
+ twl6040->irq_masks_cur);
+ }
+
+ mutex_unlock(&twl6040->irq_mutex);
+}
+
+static void twl6040_irq_enable(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+ struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
+ data->irq);
+
+ twl6040->irq_masks_cur &= ~irq_data->mask;
+}
+
+static void twl6040_irq_disable(struct irq_data *data)
+{
+ struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
+ struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
+ data->irq);
+
+ twl6040->irq_masks_cur |= irq_data->mask;
+}
+
+static struct irq_chip twl6040_irq_chip = {
+ .name = "twl6040",
+ .irq_bus_lock = twl6040_irq_lock,
+ .irq_bus_sync_unlock = twl6040_irq_sync_unlock,
+ .irq_enable = twl6040_irq_enable,
+ .irq_disable = twl6040_irq_disable,
+};
+
+static irqreturn_t twl6040_irq_thread(int irq, void *data)
+{
+ struct twl6040 *twl6040 = data;
+ u8 intid;
+ int i;
+
+ intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+
+ /* apply masking and report (backwards to handle READYINT first) */
+ for (i = ARRAY_SIZE(twl6040_irqs) - 1; i >= 0; i--) {
+ if (twl6040->irq_masks_cur & twl6040_irqs[i].mask)
+ intid &= ~twl6040_irqs[i].status;
+ if (intid & twl6040_irqs[i].status)
+ handle_nested_irq(twl6040->irq_base + i);
+ }
+
+ /* ack unmasked irqs */
+ twl6040_reg_write(twl6040, TWL6040_REG_INTID, intid);
+
+ return IRQ_HANDLED;
+}
+
+int twl6040_irq_init(struct twl6040 *twl6040)
+{
+ int cur_irq, ret;
+ u8 val;
+
+ mutex_init(&twl6040->irq_mutex);
+
+ /* mask the individual interrupt sources */
+ twl6040->irq_masks_cur = TWL6040_ALLINT_MSK;
+ twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
+ twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
+
+ /* Register them with genirq */
+ for (cur_irq = twl6040->irq_base;
+ cur_irq < twl6040->irq_base + ARRAY_SIZE(twl6040_irqs);
+ cur_irq++) {
+ irq_set_chip_data(cur_irq, twl6040);
+ irq_set_chip_and_handler(cur_irq, &twl6040_irq_chip,
+ handle_level_irq);
+ irq_set_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(twl6040->irq, NULL, twl6040_irq_thread,
+ IRQF_ONESHOT, "twl6040", twl6040);
+ if (ret) {
+ dev_err(twl6040->dev, "failed to request IRQ %d: %d\n",
+ twl6040->irq, ret);
+ return ret;
+ }
+
+ /* reset interrupts */
+ val = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+
+ /* interrupts cleared on write */
+ twl6040_clear_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_INTCLRMODE);
+
+ return 0;
+}
+EXPORT_SYMBOL(twl6040_irq_init);
+
+void twl6040_irq_exit(struct twl6040 *twl6040)
+{
+ free_irq(twl6040->irq, twl6040);
+}
+EXPORT_SYMBOL(twl6040_irq_exit);
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
new file mode 100644
index 00000000000..87210954a06
--- /dev/null
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -0,0 +1,299 @@
+/*
+ * wm831x-auxadc.c -- AUXADC for Wolfson WM831x PMICs
+ *
+ * Copyright 2009-2011 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mfd/core.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include <linux/mfd/wm831x/core.h>
+#include <linux/mfd/wm831x/pdata.h>
+#include <linux/mfd/wm831x/irq.h>
+#include <linux/mfd/wm831x/auxadc.h>
+#include <linux/mfd/wm831x/otp.h>
+#include <linux/mfd/wm831x/regulator.h>
+
+struct wm831x_auxadc_req {
+ struct list_head list;
+ enum wm831x_auxadc input;
+ int val;
+ struct completion done;
+};
+
+static int wm831x_auxadc_read_irq(struct wm831x *wm831x,
+ enum wm831x_auxadc input)
+{
+ struct wm831x_auxadc_req *req;
+ int ret;
+ bool ena = false;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ init_completion(&req->done);
+ req->input = input;
+ req->val = -ETIMEDOUT;
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ /* Enqueue the request */
+ list_add(&req->list, &wm831x->auxadc_pending);
+
+ ena = !wm831x->auxadc_active;
+
+ if (ena) {
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_ENA, WM831X_AUX_ENA);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ /* Enable the conversion if not already running */
+ if (!(wm831x->auxadc_active & (1 << input))) {
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE,
+ 1 << input, 1 << input);
+ if (ret != 0) {
+ dev_err(wm831x->dev,
+ "Failed to set AUXADC source: %d\n", ret);
+ goto out;
+ }
+
+ wm831x->auxadc_active |= 1 << input;
+ }
+
+ /* We convert at the fastest rate possible */
+ if (ena) {
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_CVT_ENA |
+ WM831X_AUX_RATE_MASK,
+ WM831X_AUX_CVT_ENA |
+ WM831X_AUX_RATE_MASK);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to start AUXADC: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ mutex_unlock(&wm831x->auxadc_lock);
+
+ /* Wait for an interrupt */
+ wait_for_completion_timeout(&req->done, msecs_to_jiffies(500));
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ list_del(&req->list);
+ ret = req->val;
+
+out:
+ mutex_unlock(&wm831x->auxadc_lock);
+
+ kfree(req);
+
+ return ret;
+}
+
+static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data)
+{
+ struct wm831x *wm831x = irq_data;
+ struct wm831x_auxadc_req *req;
+ int ret, input, val;
+
+ ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "Failed to read AUXADC data: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ input = ((ret & WM831X_AUX_DATA_SRC_MASK)
+ >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
+
+ if (input == 14)
+ input = WM831X_AUX_CAL;
+
+ val = ret & WM831X_AUX_DATA_MASK;
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ /* Disable this conversion, we're about to complete all users */
+ wm831x_set_bits(wm831x, WM831X_AUXADC_SOURCE,
+ 1 << input, 0);
+ wm831x->auxadc_active &= ~(1 << input);
+
+ /* Turn off the entire convertor if idle */
+ if (!wm831x->auxadc_active)
+ wm831x_reg_write(wm831x, WM831X_AUXADC_CONTROL, 0);
+
+ /* Wake up any threads waiting for this request */
+ list_for_each_entry(req, &wm831x->auxadc_pending, list) {
+ if (req->input == input) {
+ req->val = val;
+ complete(&req->done);
+ }
+ }
+
+ mutex_unlock(&wm831x->auxadc_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int wm831x_auxadc_read_polled(struct wm831x *wm831x,
+ enum wm831x_auxadc input)
+{
+ int ret, src, timeout;
+
+ mutex_lock(&wm831x->auxadc_lock);
+
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_ENA, WM831X_AUX_ENA);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret);
+ goto out;
+ }
+
+ /* We force a single source at present */
+ src = input;
+ ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE,
+ 1 << src);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret);
+ goto out;
+ }
+
+ ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
+ WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret);
+ goto disable;
+ }
+
+ /* If we're not using interrupts then poll the
+ * interrupt status register */
+ timeout = 5;
+ while (timeout) {
+ msleep(1);
+
+ ret = wm831x_reg_read(wm831x,
+ WM831X_INTERRUPT_STATUS_1);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "ISR 1 read failed: %d\n", ret);
+ goto disable;
+ }
+
+ /* Did it complete? */
+ if (ret & WM831X_AUXADC_DATA_EINT) {
+ wm831x_reg_write(wm831x,
+ WM831X_INTERRUPT_STATUS_1,
+ WM831X_AUXADC_DATA_EINT);
+ break;
+ } else {
+ dev_err(wm831x->dev,
+ "AUXADC conversion timeout\n");
+ ret = -EBUSY;
+ goto disable;
+ }
+ }
+
+ ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
+ if (ret < 0) {
+ dev_err(wm831x->dev,
+ "Failed to read AUXADC data: %d\n", ret);
+ goto disable;
+ }
+
+ src = ((ret & WM831X_AUX_DATA_SRC_MASK)
+ >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
+
+ if (src == 14)
+ src = WM831X_AUX_CAL;
+
+ if (src != input) {
+ dev_err(wm831x->dev, "Data from source %d not %d\n",
+ src, input);
+ ret = -EINVAL;
+ } else {
+ ret &= WM831X_AUX_DATA_MASK;
+ }
+
+disable:
+ wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0);
+out:
+ mutex_unlock(&wm831x->auxadc_lock);
+ return ret;
+}
+
+/**
+ * wm831x_auxadc_read: Read a value from the WM831x AUXADC
+ *
+ * @wm831x: Device to read from.
+ * @input: AUXADC input to read.
+ */
+int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
+{
+ return wm831x->auxadc_read(wm831x, input);
+}
+EXPORT_SYMBOL_GPL(wm831x_auxadc_read);
+
+/**
+ * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC
+ *
+ * @wm831x: Device to read from.
+ * @input: AUXADC input to read.
+ */
+int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input)
+{
+ int ret;
+
+ ret = wm831x_auxadc_read(wm831x, input);
+ if (ret < 0)
+ return ret;
+
+ ret *= 1465;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv);
+
+void wm831x_auxadc_init(struct wm831x *wm831x)
+{
+ int ret;
+
+ mutex_init(&wm831x->auxadc_lock);
+ INIT_LIST_HEAD(&wm831x->auxadc_pending);
+
+ if (wm831x->irq && wm831x->irq_base) {
+ wm831x->auxadc_read = wm831x_auxadc_read_irq;
+
+ ret = request_threaded_irq(wm831x->irq_base +
+ WM831X_IRQ_AUXADC_DATA,
+ NULL, wm831x_auxadc_irq, 0,
+ "auxadc", wm831x);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "AUXADC IRQ request failed: %d\n",
+ ret);
+ wm831x->auxadc_read = NULL;
+ }
+ }
+
+ if (!wm831x->auxadc_read)
+ wm831x->auxadc_read = wm831x_auxadc_read_polled;
+}
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 265f75fc6a2..282e76ab678 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -295,7 +295,7 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
goto out;
r &= ~mask;
- r |= val;
+ r |= val & mask;
ret = wm831x_write(wm831x, reg, 2, &r);
@@ -306,146 +306,6 @@ out:
}
EXPORT_SYMBOL_GPL(wm831x_set_bits);
-/**
- * wm831x_auxadc_read: Read a value from the WM831x AUXADC
- *
- * @wm831x: Device to read from.
- * @input: AUXADC input to read.
- */
-int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input)
-{
- int ret, src, irq_masked, timeout;
-
- /* Are we using the interrupt? */
- irq_masked = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1_MASK);
- irq_masked &= WM831X_AUXADC_DATA_EINT;
-
- mutex_lock(&wm831x->auxadc_lock);
-
- ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
- WM831X_AUX_ENA, WM831X_AUX_ENA);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to enable AUXADC: %d\n", ret);
- goto out;
- }
-
- /* We force a single source at present */
- src = input;
- ret = wm831x_reg_write(wm831x, WM831X_AUXADC_SOURCE,
- 1 << src);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to set AUXADC source: %d\n", ret);
- goto out;
- }
-
- /* Clear any notification from a very late arriving interrupt */
- try_wait_for_completion(&wm831x->auxadc_done);
-
- ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL,
- WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to start AUXADC: %d\n", ret);
- goto disable;
- }
-
- if (irq_masked) {
- /* If we're not using interrupts then poll the
- * interrupt status register */
- timeout = 5;
- while (timeout) {
- msleep(1);
-
- ret = wm831x_reg_read(wm831x,
- WM831X_INTERRUPT_STATUS_1);
- if (ret < 0) {
- dev_err(wm831x->dev,
- "ISR 1 read failed: %d\n", ret);
- goto disable;
- }
-
- /* Did it complete? */
- if (ret & WM831X_AUXADC_DATA_EINT) {
- wm831x_reg_write(wm831x,
- WM831X_INTERRUPT_STATUS_1,
- WM831X_AUXADC_DATA_EINT);
- break;
- } else {
- dev_err(wm831x->dev,
- "AUXADC conversion timeout\n");
- ret = -EBUSY;
- goto disable;
- }
- }
- } else {
- /* If we are using interrupts then wait for the
- * interrupt to complete. Use an extremely long
- * timeout to handle situations with heavy load where
- * the notification of the interrupt may be delayed by
- * threaded IRQ handling. */
- if (!wait_for_completion_timeout(&wm831x->auxadc_done,
- msecs_to_jiffies(500))) {
- dev_err(wm831x->dev, "Timed out waiting for AUXADC\n");
- ret = -EBUSY;
- goto disable;
- }
- }
-
- ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to read AUXADC data: %d\n", ret);
- } else {
- src = ((ret & WM831X_AUX_DATA_SRC_MASK)
- >> WM831X_AUX_DATA_SRC_SHIFT) - 1;
-
- if (src == 14)
- src = WM831X_AUX_CAL;
-
- if (src != input) {
- dev_err(wm831x->dev, "Data from source %d not %d\n",
- src, input);
- ret = -EINVAL;
- } else {
- ret &= WM831X_AUX_DATA_MASK;
- }
- }
-
-disable:
- wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_ENA, 0);
-out:
- mutex_unlock(&wm831x->auxadc_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(wm831x_auxadc_read);
-
-static irqreturn_t wm831x_auxadc_irq(int irq, void *irq_data)
-{
- struct wm831x *wm831x = irq_data;
-
- complete(&wm831x->auxadc_done);
-
- return IRQ_HANDLED;
-}
-
-/**
- * wm831x_auxadc_read_uv: Read a voltage from the WM831x AUXADC
- *
- * @wm831x: Device to read from.
- * @input: AUXADC input to read.
- */
-int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input)
-{
- int ret;
-
- ret = wm831x_auxadc_read(wm831x, input);
- if (ret < 0)
- return ret;
-
- ret *= 1465;
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(wm831x_auxadc_read_uv);
-
static struct resource wm831x_dcdc1_resources[] = {
{
.start = WM831X_DC1_CONTROL_1,
@@ -872,6 +732,9 @@ static struct mfd_cell wm8310_devs[] = {
.resources = wm831x_dcdc4_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-epe",
.id = 1,
},
@@ -976,11 +839,6 @@ static struct mfd_cell wm8310_devs[] = {
.resources = wm831x_power_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1028,6 +886,9 @@ static struct mfd_cell wm8311_devs[] = {
.resources = wm831x_dcdc4_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-epe",
.id = 1,
},
@@ -1108,11 +969,6 @@ static struct mfd_cell wm8311_devs[] = {
.resources = wm831x_power_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1125,11 +981,6 @@ static struct mfd_cell wm8311_devs[] = {
.resources = wm831x_status2_resources,
},
{
- .name = "wm831x-touch",
- .num_resources = ARRAY_SIZE(wm831x_touch_resources),
- .resources = wm831x_touch_resources,
- },
- {
.name = "wm831x-watchdog",
.num_resources = ARRAY_SIZE(wm831x_wdt_resources),
.resources = wm831x_wdt_resources,
@@ -1165,6 +1016,9 @@ static struct mfd_cell wm8312_devs[] = {
.resources = wm831x_dcdc4_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-epe",
.id = 1,
},
@@ -1269,11 +1123,6 @@ static struct mfd_cell wm8312_devs[] = {
.resources = wm831x_power_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1286,11 +1135,6 @@ static struct mfd_cell wm8312_devs[] = {
.resources = wm831x_status2_resources,
},
{
- .name = "wm831x-touch",
- .num_resources = ARRAY_SIZE(wm831x_touch_resources),
- .resources = wm831x_touch_resources,
- },
- {
.name = "wm831x-watchdog",
.num_resources = ARRAY_SIZE(wm831x_wdt_resources),
.resources = wm831x_wdt_resources,
@@ -1326,6 +1170,9 @@ static struct mfd_cell wm8320_devs[] = {
.resources = wm8320_dcdc4_buck_resources,
},
{
+ .name = "wm831x-clk",
+ },
+ {
.name = "wm831x-gpio",
.num_resources = ARRAY_SIZE(wm831x_gpio_resources),
.resources = wm831x_gpio_resources,
@@ -1405,11 +1252,6 @@ static struct mfd_cell wm8320_devs[] = {
.resources = wm831x_on_resources,
},
{
- .name = "wm831x-rtc",
- .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
- .resources = wm831x_rtc_resources,
- },
- {
.name = "wm831x-status",
.id = 1,
.num_resources = ARRAY_SIZE(wm831x_status1_resources),
@@ -1428,6 +1270,22 @@ static struct mfd_cell wm8320_devs[] = {
},
};
+static struct mfd_cell touch_devs[] = {
+ {
+ .name = "wm831x-touch",
+ .num_resources = ARRAY_SIZE(wm831x_touch_resources),
+ .resources = wm831x_touch_resources,
+ },
+};
+
+static struct mfd_cell rtc_devs[] = {
+ {
+ .name = "wm831x-rtc",
+ .num_resources = ARRAY_SIZE(wm831x_rtc_resources),
+ .resources = wm831x_rtc_resources,
+ },
+};
+
static struct mfd_cell backlight_devs[] = {
{
.name = "wm831x-backlight",
@@ -1440,14 +1298,12 @@ static struct mfd_cell backlight_devs[] = {
int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
{
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int rev;
+ int rev, wm831x_num;
enum wm831x_parent parent;
int ret, i;
mutex_init(&wm831x->io_lock);
mutex_init(&wm831x->key_lock);
- mutex_init(&wm831x->auxadc_lock);
- init_completion(&wm831x->auxadc_done);
dev_set_drvdata(wm831x->dev, wm831x);
ret = wm831x_reg_read(wm831x, WM831X_PARENT_ID);
@@ -1592,45 +1448,51 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
}
}
+ /* Multiply by 10 as we have many subdevices of the same type */
+ if (pdata && pdata->wm831x_num)
+ wm831x_num = pdata->wm831x_num * 10;
+ else
+ wm831x_num = -1;
+
ret = wm831x_irq_init(wm831x, irq);
if (ret != 0)
goto err;
- if (wm831x->irq_base) {
- ret = request_threaded_irq(wm831x->irq_base +
- WM831X_IRQ_AUXADC_DATA,
- NULL, wm831x_auxadc_irq, 0,
- "auxadc", wm831x);
- if (ret < 0)
- dev_err(wm831x->dev, "AUXADC IRQ request failed: %d\n",
- ret);
- }
+ wm831x_auxadc_init(wm831x);
/* The core device is up, instantiate the subdevices. */
switch (parent) {
case WM8310:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8310_devs, ARRAY_SIZE(wm8310_devs),
NULL, wm831x->irq_base);
break;
case WM8311:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8311_devs, ARRAY_SIZE(wm8311_devs),
NULL, wm831x->irq_base);
+ if (!pdata || !pdata->disable_touch)
+ mfd_add_devices(wm831x->dev, wm831x_num,
+ touch_devs, ARRAY_SIZE(touch_devs),
+ NULL, wm831x->irq_base);
break;
case WM8312:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8312_devs, ARRAY_SIZE(wm8312_devs),
NULL, wm831x->irq_base);
+ if (!pdata || !pdata->disable_touch)
+ mfd_add_devices(wm831x->dev, wm831x_num,
+ touch_devs, ARRAY_SIZE(touch_devs),
+ NULL, wm831x->irq_base);
break;
case WM8320:
case WM8321:
case WM8325:
case WM8326:
- ret = mfd_add_devices(wm831x->dev, -1,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8320_devs, ARRAY_SIZE(wm8320_devs),
NULL, wm831x->irq_base);
break;
@@ -1645,9 +1507,30 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
goto err_irq;
}
+ /* The RTC can only be used if the 32.768kHz crystal is
+ * enabled; this can't be controlled by software at runtime.
+ */
+ ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to read clock status: %d\n", ret);
+ goto err_irq;
+ }
+
+ if (ret & WM831X_XTAL_ENA) {
+ ret = mfd_add_devices(wm831x->dev, wm831x_num,
+ rtc_devs, ARRAY_SIZE(rtc_devs),
+ NULL, wm831x->irq_base);
+ if (ret != 0) {
+ dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret);
+ goto err_irq;
+ }
+ } else {
+ dev_info(wm831x->dev, "32.768kHz clock disabled, no RTC\n");
+ }
+
if (pdata && pdata->backlight) {
/* Treat errors as non-critical */
- ret = mfd_add_devices(wm831x->dev, -1, backlight_devs,
+ ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
ARRAY_SIZE(backlight_devs), NULL,
wm831x->irq_base);
if (ret < 0)
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 42b928ec891..ada1835a545 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -348,6 +348,15 @@ static void wm831x_irq_sync_unlock(struct irq_data *data)
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
int i;
+ for (i = 0; i < ARRAY_SIZE(wm831x->gpio_update); i++) {
+ if (wm831x->gpio_update[i]) {
+ wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + i,
+ WM831X_GPN_INT_MODE | WM831X_GPN_POL,
+ wm831x->gpio_update[i]);
+ wm831x->gpio_update[i] = 0;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
/* If there's been a change in the mask write it back
* to the hardware. */
@@ -387,7 +396,7 @@ static void wm831x_irq_disable(struct irq_data *data)
static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
- int val, irq;
+ int irq;
irq = data->irq - wm831x->irq_base;
@@ -399,22 +408,30 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
return -EINVAL;
}
+ /* Rebase the IRQ into the GPIO range so we've got a sensible array
+ * index.
+ */
+ irq -= WM831X_IRQ_GPIO_1;
+
+ /* We set the high bit to flag that we need an update; don't
+ * do the update here as we can be called with the bus lock
+ * held.
+ */
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
- val = WM831X_GPN_INT_MODE;
+ wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE;
break;
case IRQ_TYPE_EDGE_RISING:
- val = WM831X_GPN_POL;
+ wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
break;
case IRQ_TYPE_EDGE_FALLING:
- val = 0;
+ wm831x->gpio_update[irq] = 0x10000;
break;
default:
return -EINVAL;
}
- return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + irq,
- WM831X_GPN_INT_MODE | WM831X_GPN_POL, val);
+ return 0;
}
static struct irq_chip wm831x_irq_chip = {
@@ -432,7 +449,7 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
{
struct wm831x *wm831x = data;
unsigned int i;
- int primary;
+ int primary, status_addr;
int status_regs[WM831X_NUM_IRQ_REGS] = { 0 };
int read[WM831X_NUM_IRQ_REGS] = { 0 };
int *status;
@@ -467,8 +484,9 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
/* Hopefully there should only be one register to read
* each time otherwise we ought to do a block read. */
if (!read[offset]) {
- *status = wm831x_reg_read(wm831x,
- irq_data_to_status_reg(&wm831x_irqs[i]));
+ status_addr = irq_data_to_status_reg(&wm831x_irqs[i]);
+
+ *status = wm831x_reg_read(wm831x, status_addr);
if (*status < 0) {
dev_err(wm831x->dev,
"Failed to read IRQ status: %d\n",
@@ -477,26 +495,21 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
}
read[offset] = 1;
+
+ /* Ignore any bits that we don't think are masked */
+ *status &= ~wm831x->irq_masks_cur[offset];
+
+ /* Acknowledge now so we don't miss
+ * notifications while we handle.
+ */
+ wm831x_reg_write(wm831x, status_addr, *status);
}
- /* Report it if it isn't masked, or forget the status. */
- if ((*status & ~wm831x->irq_masks_cur[offset])
- & wm831x_irqs[i].mask)
+ if (*status & wm831x_irqs[i].mask)
handle_nested_irq(wm831x->irq_base + i);
- else
- *status &= ~wm831x_irqs[i].mask;
}
out:
- /* Touchscreen interrupts are handled specially in the driver */
- status_regs[0] &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
-
- for (i = 0; i < ARRAY_SIZE(status_regs); i++) {
- if (status_regs[i])
- wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1 + i,
- status_regs[i]);
- }
-
return IRQ_HANDLED;
}
@@ -515,13 +528,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
0xffff);
}
- if (!pdata || !pdata->irq_base) {
- dev_err(wm831x->dev,
- "No interrupt base specified, no interrupts\n");
+ /* Try to dynamically allocate IRQs if no base is specified */
+ if (!pdata || !pdata->irq_base)
+ wm831x->irq_base = -1;
+ else
+ wm831x->irq_base = pdata->irq_base;
+
+ wm831x->irq_base = irq_alloc_descs(wm831x->irq_base, 0,
+ WM831X_NUM_IRQS, 0);
+ if (wm831x->irq_base < 0) {
+ dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
+ wm831x->irq_base);
+ wm831x->irq_base = 0;
return 0;
}
- if (pdata->irq_cmos)
+ if (pdata && pdata->irq_cmos)
i = 0;
else
i = WM831X_IRQ_OD;
@@ -541,7 +563,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
}
wm831x->irq = irq;
- wm831x->irq_base = pdata->irq_base;
/* Register them with genirq */
for (cur_irq = wm831x->irq_base;
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index ed4b22a167b..8a1fafd0bf7 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -473,17 +473,13 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
{
int ret, cur_irq, i;
int flags = IRQF_ONESHOT;
+ int irq_base = -1;
if (!irq) {
dev_warn(wm8350->dev, "No interrupt support, no core IRQ\n");
return 0;
}
- if (!pdata || !pdata->irq_base) {
- dev_warn(wm8350->dev, "No interrupt support, no IRQ base\n");
- return 0;
- }
-
/* Mask top level interrupts */
wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF);
@@ -502,7 +498,17 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
wm8350->chip_irq = irq;
wm8350->irq_base = pdata->irq_base;
- if (pdata->irq_high) {
+ if (pdata && pdata->irq_base > 0)
+ irq_base = pdata->irq_base;
+
+ wm8350->irq_base = irq_alloc_descs(irq_base, 0, ARRAY_SIZE(wm8350_irqs), 0);
+ if (wm8350->irq_base < 0) {
+ dev_warn(wm8350->dev, "Allocating irqs failed with %d\n",
+ wm8350->irq_base);
+ return 0;
+ }
+
+ if (pdata && pdata->irq_high) {
flags |= IRQF_TRIGGER_HIGH;
wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index e198d40292e..96479c9b172 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -316,7 +316,7 @@ static int wm8994_suspend(struct device *dev)
static int wm8994_resume(struct device *dev)
{
struct wm8994 *wm8994 = dev_get_drvdata(dev);
- int ret;
+ int ret, i;
/* We may have lied to the PM core about suspending */
if (!wm8994->suspended)
@@ -329,10 +329,16 @@ static int wm8994_resume(struct device *dev)
return ret;
}
- ret = wm8994_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK,
- WM8994_NUM_IRQ_REGS * 2, &wm8994->irq_masks_cur);
- if (ret < 0)
- dev_err(dev, "Failed to restore interrupt masks: %d\n", ret);
+ /* Write register at a time as we use the cache on the CPU so store
+ * it in native endian.
+ */
+ for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
+ ret = wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK
+ + i, wm8994->irq_masks_cur[i]);
+ if (ret < 0)
+ dev_err(dev, "Failed to restore interrupt masks: %d\n",
+ ret);
+ }
ret = wm8994_write(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
&wm8994->ldo_regs);
@@ -403,7 +409,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
default:
BUG();
- return -EINVAL;
+ goto err;
}
wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
@@ -425,7 +431,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
default:
BUG();
- return -EINVAL;
+ goto err;
}
ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
@@ -476,13 +482,18 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err_enable;
}
- switch (ret) {
- case 0:
- case 1:
- if (wm8994->type == WM8994)
+ switch (wm8994->type) {
+ case WM8994:
+ switch (ret) {
+ case 0:
+ case 1:
dev_warn(wm8994->dev,
"revision %c not fully supported\n",
'A' + ret);
+ break;
+ default:
+ break;
+ }
break;
default:
break;
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 71c6e8f9aed..d682f7bd112 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -231,12 +231,6 @@ static irqreturn_t wm8994_irq_thread(int irq, void *data)
status[i] &= ~wm8994->irq_masks_cur[i];
}
- /* Report */
- for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
- if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
- handle_nested_irq(wm8994->irq_base + i);
- }
-
/* Ack any unmasked IRQs */
for (i = 0; i < ARRAY_SIZE(status); i++) {
if (status[i])
@@ -244,6 +238,12 @@ static irqreturn_t wm8994_irq_thread(int irq, void *data)
status[i]);
}
+ /* Report */
+ for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
+ if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
+ handle_nested_irq(wm8994->irq_base + i);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 4e349cd98bc..0a4d86c6c4a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -245,8 +245,7 @@ config SGI_XP
config CS5535_MFGPT
tristate "CS5535/CS5536 Geode Multi-Function General Purpose Timer (MFGPT) support"
- depends on PCI
- depends on X86
+ depends on PCI && X86 && MFD_CS5535
default n
help
This driver provides access to MFGPT functionality for other
@@ -490,6 +489,15 @@ config PCH_PHUB
To compile this driver as a module, choose M here: the module will
be called pch_phub.
+config USB_SWITCH_FSA9480
+ tristate "FSA9480 USB Switch"
+ depends on I2C
+ help
+ The FSA9480 is a USB port accessory detector and switch.
+ The FSA9480 is fully controlled using I2C and enables USB data,
+ stereo and mono audio, video, microphone and UART data to use
+ a common connector port.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 5f03172cc0b..8f3efb68a14 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_IBM_ASM) += ibmasm/
obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
-0bj-$(CONFIG_INTEL_MID_PTI) += pti.o
+obj-$(CONFIG_INTEL_MID_PTI) += pti.o
obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
@@ -46,3 +46,4 @@ obj-y += ti-st/
obj-$(CONFIG_AB8500_PWM) += ab8500-pwm.o
obj-y += lis3lv02d/
obj-y += carma/
+obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 4afffe610f9..769a4e8e10d 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -95,7 +95,7 @@ static int __init ssc_probe(struct platform_device *pdev)
}
ssc->pdev = pdev;
- ssc->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ ssc->regs = ioremap(regs->start, resource_size(regs));
if (!ssc->regs) {
dev_dbg(&pdev->dev, "ioremap failed\n");
retval = -EINVAL;
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
index 0f3fb4f03bd..28f5aaa19d4 100644
--- a/drivers/misc/atmel_pwm.c
+++ b/drivers/misc/atmel_pwm.c
@@ -329,7 +329,7 @@ static int __init pwm_probe(struct platform_device *pdev)
p->pdev = pdev;
p->mask = *mp;
p->irq = irq;
- p->base = ioremap(r->start, r->end - r->start + 1);
+ p->base = ioremap(r->start, resource_size(r));
if (!p->base)
goto fail;
p->clk = clk_get(&pdev->dev, "pwm_clk");
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index efec4139c3f..68cd05b6d82 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg);
static int __devinit cb710_pci_configure(struct pci_dev *pdev)
{
unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
- struct pci_dev *pdev0 = pci_get_slot(pdev->bus, devfn);
+ struct pci_dev *pdev0;
u32 val;
cb710_pci_update_config_reg(pdev, 0x48,
@@ -43,6 +43,7 @@ static int __devinit cb710_pci_configure(struct pci_dev *pdev)
if (val & 0x80000000)
return 0;
+ pdev0 = pci_get_slot(pdev->bus, devfn);
if (!pdev0)
return -ENODEV;
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index d019746551f..2a40d0efdff 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -47,7 +47,7 @@ static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter)
static inline bool needs_unaligned_copy(const void *ptr)
{
-#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
return false;
#else
return ((ptr - NULL) & 3) != 0;
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 9118613af32..26cf12ca7f5 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -70,4 +70,29 @@ config EEPROM_93CX6
If unsure, say N.
+config EEPROM_93XX46
+ tristate "Microwire EEPROM 93XX46 support"
+ depends on SPI && SYSFS
+ help
+ Driver for the microwire EEPROM chipsets 93xx46x. The driver
+ supports both read and write commands and also the command to
+ erase the whole EEPROM.
+
+ This driver can also be built as a module. If so, the module
+ will be called eeprom_93xx46.
+
+ If unsure, say N.
+
+config EEPROM_DIGSY_MTC_CFG
+ bool "DigsyMTC display configuration EEPROMs device"
+ depends on PPC_MPC5200_GPIO && GPIOLIB && SPI_GPIO
+ help
+ This option enables access to display configuration EEPROMs
+ on digsy_mtc board. You have to additionally select Microwire
+ EEPROM 93XX46 driver. sysfs entries will be created for that
+ EEPROM allowing to read/write the configuration data or to
+ erase the whole EEPROM.
+
+ If unsure, say N.
+
endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index df3d68ffa9d..fc1e81d2926 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -3,3 +3,5 @@ obj-$(CONFIG_EEPROM_AT25) += at25.o
obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o
obj-$(CONFIG_EEPROM_MAX6875) += max6875.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
+obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
+obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c
new file mode 100644
index 00000000000..66d9e1baeae
--- /dev/null
+++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c
@@ -0,0 +1,85 @@
+/*
+ * EEPROMs access control driver for display configuration EEPROMs
+ * on DigsyMTC board.
+ *
+ * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_gpio.h>
+#include <linux/eeprom_93xx46.h>
+
+#define GPIO_EEPROM_CLK 216
+#define GPIO_EEPROM_CS 210
+#define GPIO_EEPROM_DI 217
+#define GPIO_EEPROM_DO 249
+#define GPIO_EEPROM_OE 255
+#define EE_SPI_BUS_NUM 1
+
+static void digsy_mtc_op_prepare(void *p)
+{
+ /* enable */
+ gpio_set_value(GPIO_EEPROM_OE, 0);
+}
+
+static void digsy_mtc_op_finish(void *p)
+{
+ /* disable */
+ gpio_set_value(GPIO_EEPROM_OE, 1);
+}
+
+struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = {
+ .flags = EE_ADDR8,
+ .prepare = digsy_mtc_op_prepare,
+ .finish = digsy_mtc_op_finish,
+};
+
+static struct spi_gpio_platform_data eeprom_spi_gpio_data = {
+ .sck = GPIO_EEPROM_CLK,
+ .mosi = GPIO_EEPROM_DI,
+ .miso = GPIO_EEPROM_DO,
+ .num_chipselect = 1,
+};
+
+static struct platform_device digsy_mtc_eeprom = {
+ .name = "spi_gpio",
+ .id = EE_SPI_BUS_NUM,
+ .dev = {
+ .platform_data = &eeprom_spi_gpio_data,
+ },
+};
+
+static struct spi_board_info digsy_mtc_eeprom_info[] __initdata = {
+ {
+ .modalias = "93xx46",
+ .max_speed_hz = 1000000,
+ .bus_num = EE_SPI_BUS_NUM,
+ .chip_select = 0,
+ .mode = SPI_MODE_0,
+ .controller_data = (void *)GPIO_EEPROM_CS,
+ .platform_data = &digsy_mtc_eeprom_data,
+ },
+};
+
+static int __init digsy_mtc_eeprom_devices_init(void)
+{
+ int ret;
+
+ ret = gpio_request_one(GPIO_EEPROM_OE, GPIOF_OUT_INIT_HIGH,
+ "93xx46 EEPROMs OE");
+ if (ret) {
+ pr_err("can't request gpio %d\n", GPIO_EEPROM_OE);
+ return ret;
+ }
+ spi_register_board_info(digsy_mtc_eeprom_info,
+ ARRAY_SIZE(digsy_mtc_eeprom_info));
+ return platform_device_register(&digsy_mtc_eeprom);
+}
+device_initcall(digsy_mtc_eeprom_devices_init);
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
new file mode 100644
index 00000000000..0c7ebb1e19e
--- /dev/null
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -0,0 +1,410 @@
+/*
+ * Driver for 93xx46 EEPROMs
+ *
+ * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/eeprom_93xx46.h>
+
+#define OP_START 0x4
+#define OP_WRITE (OP_START | 0x1)
+#define OP_READ (OP_START | 0x2)
+#define ADDR_EWDS 0x00
+#define ADDR_ERAL 0x20
+#define ADDR_EWEN 0x30
+
+struct eeprom_93xx46_dev {
+ struct spi_device *spi;
+ struct eeprom_93xx46_platform_data *pdata;
+ struct bin_attribute bin;
+ struct mutex lock;
+ int addrlen;
+};
+
+static ssize_t
+eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct eeprom_93xx46_dev *edev;
+ struct device *dev;
+ struct spi_message m;
+ struct spi_transfer t[2];
+ int bits, ret;
+ u16 cmd_addr;
+
+ dev = container_of(kobj, struct device, kobj);
+ edev = dev_get_drvdata(dev);
+
+ if (unlikely(off >= edev->bin.size))
+ return 0;
+ if ((off + count) > edev->bin.size)
+ count = edev->bin.size - off;
+ if (unlikely(!count))
+ return count;
+
+ cmd_addr = OP_READ << edev->addrlen;
+
+ if (edev->addrlen == 7) {
+ cmd_addr |= off & 0x7f;
+ bits = 10;
+ } else {
+ cmd_addr |= off & 0x3f;
+ bits = 9;
+ }
+
+ dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
+ cmd_addr, edev->spi->max_speed_hz);
+
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].tx_buf = (char *)&cmd_addr;
+ t[0].len = 2;
+ t[0].bits_per_word = bits;
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ t[1].bits_per_word = 8;
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&edev->lock);
+
+ if (edev->pdata->prepare)
+ edev->pdata->prepare(edev);
+
+ ret = spi_sync(edev->spi, &m);
+ /* have to wait at least Tcsl ns */
+ ndelay(250);
+ if (ret) {
+ dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
+ count, (int)off, ret);
+ }
+
+ if (edev->pdata->finish)
+ edev->pdata->finish(edev);
+
+ mutex_unlock(&edev->lock);
+ return ret ? : count;
+}
+
+static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
+{
+ struct spi_message m;
+ struct spi_transfer t;
+ int bits, ret;
+ u16 cmd_addr;
+
+ cmd_addr = OP_START << edev->addrlen;
+ if (edev->addrlen == 7) {
+ cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS) << 1;
+ bits = 10;
+ } else {
+ cmd_addr |= (is_on ? ADDR_EWEN : ADDR_EWDS);
+ bits = 9;
+ }
+
+ dev_dbg(&edev->spi->dev, "ew cmd 0x%04x\n", cmd_addr);
+
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(t));
+
+ t.tx_buf = &cmd_addr;
+ t.len = 2;
+ t.bits_per_word = bits;
+ spi_message_add_tail(&t, &m);
+
+ mutex_lock(&edev->lock);
+
+ if (edev->pdata->prepare)
+ edev->pdata->prepare(edev);
+
+ ret = spi_sync(edev->spi, &m);
+ /* have to wait at least Tcsl ns */
+ ndelay(250);
+ if (ret)
+ dev_err(&edev->spi->dev, "erase/write %sable error %d\n",
+ is_on ? "en" : "dis", ret);
+
+ if (edev->pdata->finish)
+ edev->pdata->finish(edev);
+
+ mutex_unlock(&edev->lock);
+ return ret;
+}
+
+static ssize_t
+eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
+ const char *buf, unsigned off)
+{
+ struct spi_message m;
+ struct spi_transfer t[2];
+ int bits, data_len, ret;
+ u16 cmd_addr;
+
+ cmd_addr = OP_WRITE << edev->addrlen;
+
+ if (edev->addrlen == 7) {
+ cmd_addr |= off & 0x7f;
+ bits = 10;
+ data_len = 1;
+ } else {
+ cmd_addr |= off & 0x3f;
+ bits = 9;
+ data_len = 2;
+ }
+
+ dev_dbg(&edev->spi->dev, "write cmd 0x%x\n", cmd_addr);
+
+ spi_message_init(&m);
+ memset(t, 0, sizeof(t));
+
+ t[0].tx_buf = (char *)&cmd_addr;
+ t[0].len = 2;
+ t[0].bits_per_word = bits;
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = data_len;
+ t[1].bits_per_word = 8;
+ spi_message_add_tail(&t[1], &m);
+
+ ret = spi_sync(edev->spi, &m);
+ /* have to wait program cycle time Twc ms */
+ mdelay(6);
+ return ret;
+}
+
+static ssize_t
+eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct eeprom_93xx46_dev *edev;
+ struct device *dev;
+ int i, ret, step = 1;
+
+ dev = container_of(kobj, struct device, kobj);
+ edev = dev_get_drvdata(dev);
+
+ if (unlikely(off >= edev->bin.size))
+ return 0;
+ if ((off + count) > edev->bin.size)
+ count = edev->bin.size - off;
+ if (unlikely(!count))
+ return count;
+
+ /* only write even number of bytes on 16-bit devices */
+ if (edev->addrlen == 6) {
+ step = 2;
+ count &= ~1;
+ }
+
+ /* erase/write enable */
+ ret = eeprom_93xx46_ew(edev, 1);
+ if (ret)
+ return ret;
+
+ mutex_lock(&edev->lock);
+
+ if (edev->pdata->prepare)
+ edev->pdata->prepare(edev);
+
+ for (i = 0; i < count; i += step) {
+ ret = eeprom_93xx46_write_word(edev, &buf[i], off + i);
+ if (ret) {
+ dev_err(&edev->spi->dev, "write failed at %d: %d\n",
+ (int)off + i, ret);
+ break;
+ }
+ }
+
+ if (edev->pdata->finish)
+ edev->pdata->finish(edev);
+
+ mutex_unlock(&edev->lock);
+
+ /* erase/write disable */
+ eeprom_93xx46_ew(edev, 0);
+ return ret ? : count;
+}
+
+static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
+{
+ struct eeprom_93xx46_platform_data *pd = edev->pdata;
+ struct spi_message m;
+ struct spi_transfer t;
+ int bits, ret;
+ u16 cmd_addr;
+
+ cmd_addr = OP_START << edev->addrlen;
+ if (edev->addrlen == 7) {
+ cmd_addr |= ADDR_ERAL << 1;
+ bits = 10;
+ } else {
+ cmd_addr |= ADDR_ERAL;
+ bits = 9;
+ }
+
+ spi_message_init(&m);
+ memset(&t, 0, sizeof(t));
+
+ t.tx_buf = &cmd_addr;
+ t.len = 2;
+ t.bits_per_word = bits;
+ spi_message_add_tail(&t, &m);
+
+ mutex_lock(&edev->lock);
+
+ if (edev->pdata->prepare)
+ edev->pdata->prepare(edev);
+
+ ret = spi_sync(edev->spi, &m);
+ if (ret)
+ dev_err(&edev->spi->dev, "erase error %d\n", ret);
+ /* have to wait erase cycle time Tec ms */
+ mdelay(6);
+
+ if (pd->finish)
+ pd->finish(edev);
+
+ mutex_unlock(&edev->lock);
+ return ret;
+}
+
+static ssize_t eeprom_93xx46_store_erase(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct eeprom_93xx46_dev *edev = dev_get_drvdata(dev);
+ int erase = 0, ret;
+
+ sscanf(buf, "%d", &erase);
+ if (erase) {
+ ret = eeprom_93xx46_ew(edev, 1);
+ if (ret)
+ return ret;
+ ret = eeprom_93xx46_eral(edev);
+ if (ret)
+ return ret;
+ ret = eeprom_93xx46_ew(edev, 0);
+ if (ret)
+ return ret;
+ }
+ return count;
+}
+static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase);
+
+static int __devinit eeprom_93xx46_probe(struct spi_device *spi)
+{
+ struct eeprom_93xx46_platform_data *pd;
+ struct eeprom_93xx46_dev *edev;
+ int err;
+
+ pd = spi->dev.platform_data;
+ if (!pd) {
+ dev_err(&spi->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ return -ENOMEM;
+
+ if (pd->flags & EE_ADDR8)
+ edev->addrlen = 7;
+ else if (pd->flags & EE_ADDR16)
+ edev->addrlen = 6;
+ else {
+ dev_err(&spi->dev, "unspecified address type\n");
+ err = -EINVAL;
+ goto fail;
+ }
+
+ mutex_init(&edev->lock);
+
+ edev->spi = spi_dev_get(spi);
+ edev->pdata = pd;
+
+ sysfs_bin_attr_init(&edev->bin);
+ edev->bin.attr.name = "eeprom";
+ edev->bin.attr.mode = S_IRUSR;
+ edev->bin.read = eeprom_93xx46_bin_read;
+ edev->bin.size = 128;
+ if (!(pd->flags & EE_READONLY)) {
+ edev->bin.write = eeprom_93xx46_bin_write;
+ edev->bin.attr.mode |= S_IWUSR;
+ }
+
+ err = sysfs_create_bin_file(&spi->dev.kobj, &edev->bin);
+ if (err)
+ goto fail;
+
+ dev_info(&spi->dev, "%d-bit eeprom %s\n",
+ (pd->flags & EE_ADDR8) ? 8 : 16,
+ (pd->flags & EE_READONLY) ? "(readonly)" : "");
+
+ if (!(pd->flags & EE_READONLY)) {
+ if (device_create_file(&spi->dev, &dev_attr_erase))
+ dev_err(&spi->dev, "can't create erase interface\n");
+ }
+
+ dev_set_drvdata(&spi->dev, edev);
+ return 0;
+fail:
+ kfree(edev);
+ return err;
+}
+
+static int __devexit eeprom_93xx46_remove(struct spi_device *spi)
+{
+ struct eeprom_93xx46_dev *edev = dev_get_drvdata(&spi->dev);
+
+ if (!(edev->pdata->flags & EE_READONLY))
+ device_remove_file(&spi->dev, &dev_attr_erase);
+
+ sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin);
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(edev);
+ return 0;
+}
+
+static struct spi_driver eeprom_93xx46_driver = {
+ .driver = {
+ .name = "93xx46",
+ .owner = THIS_MODULE,
+ },
+ .probe = eeprom_93xx46_probe,
+ .remove = __devexit_p(eeprom_93xx46_remove),
+};
+
+static int __init eeprom_93xx46_init(void)
+{
+ return spi_register_driver(&eeprom_93xx46_driver);
+}
+module_init(eeprom_93xx46_init);
+
+static void __exit eeprom_93xx46_exit(void)
+{
+ spi_unregister_driver(&eeprom_93xx46_driver);
+}
+module_exit(eeprom_93xx46_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_ALIAS("spi:93xx46");
diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c
new file mode 100644
index 00000000000..5325a7e70dc
--- /dev/null
+++ b/drivers/misc/fsa9480.c
@@ -0,0 +1,557 @@
+/*
+ * fsa9480.c - FSA9480 micro USB switch device driver
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * Minkyu Kang <mk7.kang@samsung.com>
+ * Wonguk Jeong <wonguk.jeong@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/platform_data/fsa9480.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+/* FSA9480 I2C registers */
+#define FSA9480_REG_DEVID 0x01
+#define FSA9480_REG_CTRL 0x02
+#define FSA9480_REG_INT1 0x03
+#define FSA9480_REG_INT2 0x04
+#define FSA9480_REG_INT1_MASK 0x05
+#define FSA9480_REG_INT2_MASK 0x06
+#define FSA9480_REG_ADC 0x07
+#define FSA9480_REG_TIMING1 0x08
+#define FSA9480_REG_TIMING2 0x09
+#define FSA9480_REG_DEV_T1 0x0a
+#define FSA9480_REG_DEV_T2 0x0b
+#define FSA9480_REG_BTN1 0x0c
+#define FSA9480_REG_BTN2 0x0d
+#define FSA9480_REG_CK 0x0e
+#define FSA9480_REG_CK_INT1 0x0f
+#define FSA9480_REG_CK_INT2 0x10
+#define FSA9480_REG_CK_INTMASK1 0x11
+#define FSA9480_REG_CK_INTMASK2 0x12
+#define FSA9480_REG_MANSW1 0x13
+#define FSA9480_REG_MANSW2 0x14
+
+/* Control */
+#define CON_SWITCH_OPEN (1 << 4)
+#define CON_RAW_DATA (1 << 3)
+#define CON_MANUAL_SW (1 << 2)
+#define CON_WAIT (1 << 1)
+#define CON_INT_MASK (1 << 0)
+#define CON_MASK (CON_SWITCH_OPEN | CON_RAW_DATA | \
+ CON_MANUAL_SW | CON_WAIT)
+
+/* Device Type 1 */
+#define DEV_USB_OTG (1 << 7)
+#define DEV_DEDICATED_CHG (1 << 6)
+#define DEV_USB_CHG (1 << 5)
+#define DEV_CAR_KIT (1 << 4)
+#define DEV_UART (1 << 3)
+#define DEV_USB (1 << 2)
+#define DEV_AUDIO_2 (1 << 1)
+#define DEV_AUDIO_1 (1 << 0)
+
+#define DEV_T1_USB_MASK (DEV_USB_OTG | DEV_USB)
+#define DEV_T1_UART_MASK (DEV_UART)
+#define DEV_T1_CHARGER_MASK (DEV_DEDICATED_CHG | DEV_USB_CHG)
+
+/* Device Type 2 */
+#define DEV_AV (1 << 6)
+#define DEV_TTY (1 << 5)
+#define DEV_PPD (1 << 4)
+#define DEV_JIG_UART_OFF (1 << 3)
+#define DEV_JIG_UART_ON (1 << 2)
+#define DEV_JIG_USB_OFF (1 << 1)
+#define DEV_JIG_USB_ON (1 << 0)
+
+#define DEV_T2_USB_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON)
+#define DEV_T2_UART_MASK (DEV_JIG_UART_OFF | DEV_JIG_UART_ON)
+#define DEV_T2_JIG_MASK (DEV_JIG_USB_OFF | DEV_JIG_USB_ON | \
+ DEV_JIG_UART_OFF | DEV_JIG_UART_ON)
+
+/*
+ * Manual Switch
+ * D- [7:5] / D+ [4:2]
+ * 000: Open all / 001: USB / 010: AUDIO / 011: UART / 100: V_AUDIO
+ */
+#define SW_VAUDIO ((4 << 5) | (4 << 2))
+#define SW_UART ((3 << 5) | (3 << 2))
+#define SW_AUDIO ((2 << 5) | (2 << 2))
+#define SW_DHOST ((1 << 5) | (1 << 2))
+#define SW_AUTO ((0 << 5) | (0 << 2))
+
+/* Interrupt 1 */
+#define INT_DETACH (1 << 1)
+#define INT_ATTACH (1 << 0)
+
+struct fsa9480_usbsw {
+ struct i2c_client *client;
+ struct fsa9480_platform_data *pdata;
+ int dev1;
+ int dev2;
+ int mansw;
+};
+
+static struct fsa9480_usbsw *chip;
+
+static int fsa9480_write_reg(struct i2c_client *client,
+ int reg, int value)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int fsa9480_read_reg(struct i2c_client *client, int reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static int fsa9480_read_irq(struct i2c_client *client, int *value)
+{
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client,
+ FSA9480_REG_INT1, 2, (u8 *)value);
+ *value &= 0xffff;
+
+ if (ret < 0)
+ dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+ return ret;
+}
+
+static void fsa9480_set_switch(const char *buf)
+{
+ struct fsa9480_usbsw *usbsw = chip;
+ struct i2c_client *client = usbsw->client;
+ unsigned int value;
+ unsigned int path = 0;
+
+ value = fsa9480_read_reg(client, FSA9480_REG_CTRL);
+
+ if (!strncmp(buf, "VAUDIO", 6)) {
+ path = SW_VAUDIO;
+ value &= ~CON_MANUAL_SW;
+ } else if (!strncmp(buf, "UART", 4)) {
+ path = SW_UART;
+ value &= ~CON_MANUAL_SW;
+ } else if (!strncmp(buf, "AUDIO", 5)) {
+ path = SW_AUDIO;
+ value &= ~CON_MANUAL_SW;
+ } else if (!strncmp(buf, "DHOST", 5)) {
+ path = SW_DHOST;
+ value &= ~CON_MANUAL_SW;
+ } else if (!strncmp(buf, "AUTO", 4)) {
+ path = SW_AUTO;
+ value |= CON_MANUAL_SW;
+ } else {
+ printk(KERN_ERR "Wrong command\n");
+ return;
+ }
+
+ usbsw->mansw = path;
+ fsa9480_write_reg(client, FSA9480_REG_MANSW1, path);
+ fsa9480_write_reg(client, FSA9480_REG_CTRL, value);
+}
+
+static ssize_t fsa9480_get_switch(char *buf)
+{
+ struct fsa9480_usbsw *usbsw = chip;
+ struct i2c_client *client = usbsw->client;
+ unsigned int value;
+
+ value = fsa9480_read_reg(client, FSA9480_REG_MANSW1);
+
+ if (value == SW_VAUDIO)
+ return sprintf(buf, "VAUDIO\n");
+ else if (value == SW_UART)
+ return sprintf(buf, "UART\n");
+ else if (value == SW_AUDIO)
+ return sprintf(buf, "AUDIO\n");
+ else if (value == SW_DHOST)
+ return sprintf(buf, "DHOST\n");
+ else if (value == SW_AUTO)
+ return sprintf(buf, "AUTO\n");
+ else
+ return sprintf(buf, "%x", value);
+}
+
+static ssize_t fsa9480_show_device(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fsa9480_usbsw *usbsw = dev_get_drvdata(dev);
+ struct i2c_client *client = usbsw->client;
+ int dev1, dev2;
+
+ dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1);
+ dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2);
+
+ if (!dev1 && !dev2)
+ return sprintf(buf, "NONE\n");
+
+ /* USB */
+ if (dev1 & DEV_T1_USB_MASK || dev2 & DEV_T2_USB_MASK)
+ return sprintf(buf, "USB\n");
+
+ /* UART */
+ if (dev1 & DEV_T1_UART_MASK || dev2 & DEV_T2_UART_MASK)
+ return sprintf(buf, "UART\n");
+
+ /* CHARGER */
+ if (dev1 & DEV_T1_CHARGER_MASK)
+ return sprintf(buf, "CHARGER\n");
+
+ /* JIG */
+ if (dev2 & DEV_T2_JIG_MASK)
+ return sprintf(buf, "JIG\n");
+
+ return sprintf(buf, "UNKNOWN\n");
+}
+
+static ssize_t fsa9480_show_manualsw(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return fsa9480_get_switch(buf);
+
+}
+
+static ssize_t fsa9480_set_manualsw(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ fsa9480_set_switch(buf);
+
+ return count;
+}
+
+static DEVICE_ATTR(device, S_IRUGO, fsa9480_show_device, NULL);
+static DEVICE_ATTR(switch, S_IRUGO | S_IWUSR,
+ fsa9480_show_manualsw, fsa9480_set_manualsw);
+
+static struct attribute *fsa9480_attributes[] = {
+ &dev_attr_device.attr,
+ &dev_attr_switch.attr,
+ NULL
+};
+
+static const struct attribute_group fsa9480_group = {
+ .attrs = fsa9480_attributes,
+};
+
+static void fsa9480_detect_dev(struct fsa9480_usbsw *usbsw, int intr)
+{
+ int val1, val2, ctrl;
+ struct fsa9480_platform_data *pdata = usbsw->pdata;
+ struct i2c_client *client = usbsw->client;
+
+ val1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1);
+ val2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2);
+ ctrl = fsa9480_read_reg(client, FSA9480_REG_CTRL);
+
+ dev_info(&client->dev, "intr: 0x%x, dev1: 0x%x, dev2: 0x%x\n",
+ intr, val1, val2);
+
+ if (!intr)
+ goto out;
+
+ if (intr & INT_ATTACH) { /* Attached */
+ /* USB */
+ if (val1 & DEV_T1_USB_MASK || val2 & DEV_T2_USB_MASK) {
+ if (pdata->usb_cb)
+ pdata->usb_cb(FSA9480_ATTACHED);
+
+ if (usbsw->mansw) {
+ fsa9480_write_reg(client,
+ FSA9480_REG_MANSW1, usbsw->mansw);
+ }
+ }
+
+ /* UART */
+ if (val1 & DEV_T1_UART_MASK || val2 & DEV_T2_UART_MASK) {
+ if (pdata->uart_cb)
+ pdata->uart_cb(FSA9480_ATTACHED);
+
+ if (!(ctrl & CON_MANUAL_SW)) {
+ fsa9480_write_reg(client,
+ FSA9480_REG_MANSW1, SW_UART);
+ }
+ }
+
+ /* CHARGER */
+ if (val1 & DEV_T1_CHARGER_MASK) {
+ if (pdata->charger_cb)
+ pdata->charger_cb(FSA9480_ATTACHED);
+ }
+
+ /* JIG */
+ if (val2 & DEV_T2_JIG_MASK) {
+ if (pdata->jig_cb)
+ pdata->jig_cb(FSA9480_ATTACHED);
+ }
+ } else if (intr & INT_DETACH) { /* Detached */
+ /* USB */
+ if (usbsw->dev1 & DEV_T1_USB_MASK ||
+ usbsw->dev2 & DEV_T2_USB_MASK) {
+ if (pdata->usb_cb)
+ pdata->usb_cb(FSA9480_DETACHED);
+ }
+
+ /* UART */
+ if (usbsw->dev1 & DEV_T1_UART_MASK ||
+ usbsw->dev2 & DEV_T2_UART_MASK) {
+ if (pdata->uart_cb)
+ pdata->uart_cb(FSA9480_DETACHED);
+ }
+
+ /* CHARGER */
+ if (usbsw->dev1 & DEV_T1_CHARGER_MASK) {
+ if (pdata->charger_cb)
+ pdata->charger_cb(FSA9480_DETACHED);
+ }
+
+ /* JIG */
+ if (usbsw->dev2 & DEV_T2_JIG_MASK) {
+ if (pdata->jig_cb)
+ pdata->jig_cb(FSA9480_DETACHED);
+ }
+ }
+
+ usbsw->dev1 = val1;
+ usbsw->dev2 = val2;
+
+out:
+ ctrl &= ~CON_INT_MASK;
+ fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl);
+}
+
+static irqreturn_t fsa9480_irq_handler(int irq, void *data)
+{
+ struct fsa9480_usbsw *usbsw = data;
+ struct i2c_client *client = usbsw->client;
+ int intr;
+
+ /* clear interrupt */
+ fsa9480_read_irq(client, &intr);
+
+ /* device detection */
+ fsa9480_detect_dev(usbsw, intr);
+
+ return IRQ_HANDLED;
+}
+
+static int fsa9480_irq_init(struct fsa9480_usbsw *usbsw)
+{
+ struct fsa9480_platform_data *pdata = usbsw->pdata;
+ struct i2c_client *client = usbsw->client;
+ int ret;
+ int intr;
+ unsigned int ctrl = CON_MASK;
+
+ /* clear interrupt */
+ fsa9480_read_irq(client, &intr);
+
+ /* unmask interrupt (attach/detach only) */
+ fsa9480_write_reg(client, FSA9480_REG_INT1_MASK, 0xfc);
+ fsa9480_write_reg(client, FSA9480_REG_INT2_MASK, 0x1f);
+
+ usbsw->mansw = fsa9480_read_reg(client, FSA9480_REG_MANSW1);
+
+ if (usbsw->mansw)
+ ctrl &= ~CON_MANUAL_SW; /* Manual Switching Mode */
+
+ fsa9480_write_reg(client, FSA9480_REG_CTRL, ctrl);
+
+ if (pdata && pdata->cfg_gpio)
+ pdata->cfg_gpio();
+
+ if (client->irq) {
+ ret = request_threaded_irq(client->irq, NULL,
+ fsa9480_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "fsa9480 micro USB", usbsw);
+ if (ret) {
+ dev_err(&client->dev, "failed to reqeust IRQ\n");
+ return ret;
+ }
+
+ device_init_wakeup(&client->dev, pdata->wakeup);
+ }
+
+ return 0;
+}
+
+static int __devinit fsa9480_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct fsa9480_usbsw *usbsw;
+ int ret = 0;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ usbsw = kzalloc(sizeof(struct fsa9480_usbsw), GFP_KERNEL);
+ if (!usbsw) {
+ dev_err(&client->dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ usbsw->client = client;
+ usbsw->pdata = client->dev.platform_data;
+
+ chip = usbsw;
+
+ i2c_set_clientdata(client, usbsw);
+
+ ret = fsa9480_irq_init(usbsw);
+ if (ret)
+ goto fail1;
+
+ ret = sysfs_create_group(&client->dev.kobj, &fsa9480_group);
+ if (ret) {
+ dev_err(&client->dev,
+ "failed to create fsa9480 attribute group\n");
+ goto fail2;
+ }
+
+ /* ADC Detect Time: 500ms */
+ fsa9480_write_reg(client, FSA9480_REG_TIMING1, 0x6);
+
+ if (chip->pdata->reset_cb)
+ chip->pdata->reset_cb();
+
+ /* device detection */
+ fsa9480_detect_dev(usbsw, INT_ATTACH);
+
+ pm_runtime_set_active(&client->dev);
+
+ return 0;
+
+fail2:
+ if (client->irq)
+ free_irq(client->irq, NULL);
+fail1:
+ i2c_set_clientdata(client, NULL);
+ kfree(usbsw);
+ return ret;
+}
+
+static int __devexit fsa9480_remove(struct i2c_client *client)
+{
+ struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
+ if (client->irq)
+ free_irq(client->irq, NULL);
+ i2c_set_clientdata(client, NULL);
+
+ sysfs_remove_group(&client->dev.kobj, &fsa9480_group);
+ device_init_wakeup(&client->dev, 0);
+ kfree(usbsw);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int fsa9480_suspend(struct i2c_client *client, pm_message_t state)
+{
+ struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
+ struct fsa9480_platform_data *pdata = usbsw->pdata;
+
+ if (device_may_wakeup(&client->dev) && client->irq)
+ enable_irq_wake(client->irq);
+
+ if (pdata->usb_power)
+ pdata->usb_power(0);
+
+ return 0;
+}
+
+static int fsa9480_resume(struct i2c_client *client)
+{
+ struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client);
+ int dev1, dev2;
+
+ if (device_may_wakeup(&client->dev) && client->irq)
+ disable_irq_wake(client->irq);
+
+ /*
+ * Clear Pending interrupt. Note that detect_dev does what
+ * the interrupt handler does. So, we don't miss pending and
+ * we reenable interrupt if there is one.
+ */
+ fsa9480_read_reg(client, FSA9480_REG_INT1);
+ fsa9480_read_reg(client, FSA9480_REG_INT2);
+
+ dev1 = fsa9480_read_reg(client, FSA9480_REG_DEV_T1);
+ dev2 = fsa9480_read_reg(client, FSA9480_REG_DEV_T2);
+
+ /* device detection */
+ fsa9480_detect_dev(usbsw, (dev1 || dev2) ? INT_ATTACH : INT_DETACH);
+
+ return 0;
+}
+
+#else
+
+#define fsa9480_suspend NULL
+#define fsa9480_resume NULL
+
+#endif /* CONFIG_PM */
+
+static const struct i2c_device_id fsa9480_id[] = {
+ {"fsa9480", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, fsa9480_id);
+
+static struct i2c_driver fsa9480_i2c_driver = {
+ .driver = {
+ .name = "fsa9480",
+ },
+ .probe = fsa9480_probe,
+ .remove = __devexit_p(fsa9480_remove),
+ .resume = fsa9480_resume,
+ .suspend = fsa9480_suspend,
+ .id_table = fsa9480_id,
+};
+
+static int __init fsa9480_init(void)
+{
+ return i2c_add_driver(&fsa9480_i2c_driver);
+}
+module_init(fsa9480_init);
+
+static void __exit fsa9480_exit(void)
+{
+ i2c_del_driver(&fsa9480_i2c_driver);
+}
+module_exit(fsa9480_exit);
+
+MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
+MODULE_DESCRIPTION("FSA9480 USB Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 668d41e594a..df03dd3bd0e 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -270,7 +270,7 @@ ioc4_variant(struct ioc4_driver_data *idd)
return IOC4_VARIANT_PCI_RT;
}
-static void __devinit
+static void
ioc4_load_modules(struct work_struct *work)
{
request_module("sgiioc4");
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 81d7fa4ec0d..150cd7061b8 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -120,6 +120,7 @@ static int recur_count = REC_NUM_DEFAULT;
static enum cname cpoint = CN_INVALID;
static enum ctype cptype = CT_NONE;
static int count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(count_lock);
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
@@ -230,11 +231,14 @@ static const char *cp_name_to_str(enum cname name)
static int lkdtm_parse_commandline(void)
{
int i;
+ unsigned long flags;
if (cpoint_count < 1 || recur_count < 1)
return -EINVAL;
+ spin_lock_irqsave(&count_lock, flags);
count = cpoint_count;
+ spin_unlock_irqrestore(&count_lock, flags);
/* No special parameters */
if (!cpoint_type && !cpoint_name)
@@ -349,6 +353,9 @@ static void lkdtm_do_action(enum ctype which)
static void lkdtm_handler(void)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&count_lock, flags);
count--;
printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
@@ -357,6 +364,7 @@ static void lkdtm_handler(void)
lkdtm_do_action(cptype);
count = cpoint_count;
}
+ spin_unlock_irqrestore(&count_lock, flags);
}
static int lkdtm_register_cpoint(enum cname which)
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 5fe79df4483..0fd7e77bee2 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -686,6 +686,8 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
}
if (id->driver_data == 1) { /* EG20T PCH */
+ const char *board_name;
+
retval = sysfs_create_file(&pdev->dev.kobj,
&dev_attr_pch_mac.attr);
if (retval)
@@ -701,7 +703,8 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
CLKCFG_CANCLK_MASK);
/* quirk for CM-iTC board */
- if (strstr(dmi_get_system_info(DMI_BOARD_NAME), "CM-iTC"))
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+ if (board_name && strstr(board_name, "CM-iTC"))
pch_phub_read_modify_write_reg(chip,
(unsigned int)CLKCFG_REG_OFFSET,
CLKCFG_UART_48MHZ | CLKCFG_BAUDDIV |
@@ -732,6 +735,8 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
* Device8(GbE)
*/
iowrite32(0x000a0000, chip->pch_phub_base_address + 0x14);
+ /* set the interrupt delay value */
+ iowrite32(0x25, chip->pch_phub_base_address + 0x140);
chip->pch_opt_rom_start_address =\
PCH_PHUB_ROM_START_ADDR_ML7223;
chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
@@ -749,8 +754,6 @@ static int __devinit pch_phub_probe(struct pci_dev *pdev,
* Device6(SATA 2):f
*/
iowrite32(0x0000ffa0, chip->pch_phub_base_address + 0x14);
- /* set the interrupt delay value */
- iowrite32(0x25, chip->pch_phub_base_address + 0x140);
chip->pch_opt_rom_start_address =\
PCH_PHUB_ROM_START_ADDR_ML7223;
chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index b05db55c8c8..21b28fc6d91 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -26,7 +26,7 @@
#include <linux/sched.h>
#include <linux/mutex.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#define PHANTOM_VERSION "n0.9.8"
diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c
index bb6f9255c17..8653bd0b1a3 100644
--- a/drivers/misc/pti.c
+++ b/drivers/misc/pti.c
@@ -146,45 +146,54 @@ static void pti_write_to_aperture(struct pti_masterchannel *mc,
/**
* pti_control_frame_built_and_sent()- control frame build and send function.
*
- * @mc: The master / channel structure on which the function
- * built a control frame.
+ * @mc: The master / channel structure on which the function
+ * built a control frame.
+ * @thread_name: The thread name associated with the master / channel or
+ * 'NULL' if using the 'current' global variable.
*
* To be able to post process the PTI contents on host side, a control frame
* is added before sending any PTI content. So the host side knows on
* each PTI frame the name of the thread using a dedicated master / channel.
- * The thread name is retrieved from the 'current' global variable.
+ * The thread name is retrieved from 'current' global variable if 'thread_name'
+ * is 'NULL', else it is retrieved from 'thread_name' parameter.
* This function builds this frame and sends it to a master ID CONTROL_ID.
* The overhead is only 32 bytes since the driver only writes to HW
* in 32 byte chunks.
*/
-
-static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc)
+static void pti_control_frame_built_and_sent(struct pti_masterchannel *mc,
+ const char *thread_name)
{
struct pti_masterchannel mccontrol = {.master = CONTROL_ID,
.channel = 0};
+ const char *thread_name_p;
const char *control_format = "%3d %3d %s";
u8 control_frame[CONTROL_FRAME_LEN];
- /*
- * Since we access the comm member in current's task_struct,
- * we only need to be as large as what 'comm' in that
- * structure is.
- */
- char comm[TASK_COMM_LEN];
+ if (!thread_name) {
+ /*
+ * Since we access the comm member in current's task_struct,
+ * we only need to be as large as what 'comm' in that
+ * structure is.
+ */
+ char comm[TASK_COMM_LEN];
- if (!in_interrupt())
- get_task_comm(comm, current);
- else
- strncpy(comm, "Interrupt", TASK_COMM_LEN);
+ if (!in_interrupt())
+ get_task_comm(comm, current);
+ else
+ strncpy(comm, "Interrupt", TASK_COMM_LEN);
- /* Absolutely ensure our buffer is zero terminated. */
- comm[TASK_COMM_LEN-1] = 0;
+ /* Absolutely ensure our buffer is zero terminated. */
+ comm[TASK_COMM_LEN-1] = 0;
+ thread_name_p = comm;
+ } else {
+ thread_name_p = thread_name;
+ }
mccontrol.channel = pti_control_channel;
pti_control_channel = (pti_control_channel + 1) & 0x7f;
snprintf(control_frame, CONTROL_FRAME_LEN, control_format, mc->master,
- mc->channel, comm);
+ mc->channel, thread_name_p);
pti_write_to_aperture(&mccontrol, control_frame, strlen(control_frame));
}
@@ -206,18 +215,20 @@ static void pti_write_full_frame_to_aperture(struct pti_masterchannel *mc,
const unsigned char *buf,
int len)
{
- pti_control_frame_built_and_sent(mc);
+ pti_control_frame_built_and_sent(mc, NULL);
pti_write_to_aperture(mc, (u8 *)buf, len);
}
/**
* get_id()- Allocate a master and channel ID.
*
- * @id_array: an array of bits representing what channel
- * id's are allocated for writing.
- * @max_ids: The max amount of available write IDs to use.
- * @base_id: The starting SW channel ID, based on the Intel
- * PTI arch.
+ * @id_array: an array of bits representing what channel
+ * id's are allocated for writing.
+ * @max_ids: The max amount of available write IDs to use.
+ * @base_id: The starting SW channel ID, based on the Intel
+ * PTI arch.
+ * @thread_name: The thread name associated with the master / channel or
+ * 'NULL' if using the 'current' global variable.
*
* Returns:
* pti_masterchannel struct with master, channel ID address
@@ -227,7 +238,10 @@ static void pti_write_full_frame_to_aperture(struct pti_masterchannel *mc,
* channel id. The bit is one if the id is taken and 0 if free. For
* every master there are 128 channel id's.
*/
-static struct pti_masterchannel *get_id(u8 *id_array, int max_ids, int base_id)
+static struct pti_masterchannel *get_id(u8 *id_array,
+ int max_ids,
+ int base_id,
+ const char *thread_name)
{
struct pti_masterchannel *mc;
int i, j, mask;
@@ -257,7 +271,7 @@ static struct pti_masterchannel *get_id(u8 *id_array, int max_ids, int base_id)
mc->master = base_id;
mc->channel = ((i & 0xf)<<3) + j;
/* write new master Id / channel Id allocation to channel control */
- pti_control_frame_built_and_sent(mc);
+ pti_control_frame_built_and_sent(mc, thread_name);
return mc;
}
@@ -273,18 +287,22 @@ static struct pti_masterchannel *get_id(u8 *id_array, int max_ids, int base_id)
* a master, channel ID address
* to write to PTI HW.
*
- * @type: 0- request Application master, channel aperture ID write address.
- * 1- request OS master, channel aperture ID write
- * address.
- * 2- request Modem master, channel aperture ID
- * write address.
- * Other values, error.
+ * @type: 0- request Application master, channel aperture ID
+ * write address.
+ * 1- request OS master, channel aperture ID write
+ * address.
+ * 2- request Modem master, channel aperture ID
+ * write address.
+ * Other values, error.
+ * @thread_name: The thread name associated with the master / channel or
+ * 'NULL' if using the 'current' global variable.
*
* Returns:
* pti_masterchannel struct
* 0 for error
*/
-struct pti_masterchannel *pti_request_masterchannel(u8 type)
+struct pti_masterchannel *pti_request_masterchannel(u8 type,
+ const char *thread_name)
{
struct pti_masterchannel *mc;
@@ -293,15 +311,18 @@ struct pti_masterchannel *pti_request_masterchannel(u8 type)
switch (type) {
case 0:
- mc = get_id(drv_data->ia_app, MAX_APP_IDS, APP_BASE_ID);
+ mc = get_id(drv_data->ia_app, MAX_APP_IDS,
+ APP_BASE_ID, thread_name);
break;
case 1:
- mc = get_id(drv_data->ia_os, MAX_OS_IDS, OS_BASE_ID);
+ mc = get_id(drv_data->ia_os, MAX_OS_IDS,
+ OS_BASE_ID, thread_name);
break;
case 2:
- mc = get_id(drv_data->ia_modem, MAX_MODEM_IDS, MODEM_BASE_ID);
+ mc = get_id(drv_data->ia_modem, MAX_MODEM_IDS,
+ MODEM_BASE_ID, thread_name);
break;
default:
mc = NULL;
@@ -317,7 +338,8 @@ EXPORT_SYMBOL_GPL(pti_request_masterchannel);
* a master, channel ID address
* used to write to PTI HW.
*
- * @mc: master, channel apeture ID address to be released.
+ * @mc: master, channel apeture ID address to be released. This
+ * will de-allocate the structure via kfree().
*/
void pti_release_masterchannel(struct pti_masterchannel *mc)
{
@@ -444,9 +466,9 @@ static void pti_tty_driver_close(struct tty_struct *tty, struct file *filp)
}
/**
- * pti_tty_intstall()- Used to set up specific master-channels
- * to tty ports for organizational purposes when
- * tracing viewed from debuging tools.
+ * pti_tty_install()- Used to set up specific master-channels
+ * to tty ports for organizational purposes when
+ * tracing viewed from debuging tools.
*
* @driver: tty driver information.
* @tty: tty struct containing pti information.
@@ -471,12 +493,14 @@ static int pti_tty_install(struct tty_driver *driver, struct tty_struct *tty)
return -ENOMEM;
if (idx == PTITTY_MINOR_START)
- pti_tty_data->mc = pti_request_masterchannel(0);
+ pti_tty_data->mc = pti_request_masterchannel(0, NULL);
else
- pti_tty_data->mc = pti_request_masterchannel(2);
+ pti_tty_data->mc = pti_request_masterchannel(2, NULL);
- if (pti_tty_data->mc == NULL)
+ if (pti_tty_data->mc == NULL) {
+ kfree(pti_tty_data);
return -ENXIO;
+ }
tty->driver_data = pti_tty_data;
}
@@ -495,7 +519,7 @@ static void pti_tty_cleanup(struct tty_struct *tty)
if (pti_tty_data == NULL)
return;
pti_release_masterchannel(pti_tty_data->mc);
- kfree(tty->driver_data);
+ kfree(pti_tty_data);
tty->driver_data = NULL;
}
@@ -560,7 +584,7 @@ static int pti_char_open(struct inode *inode, struct file *filp)
* before assigning the value to filp->private_data.
* Slightly easier to debug if this driver needs debugging.
*/
- mc = pti_request_masterchannel(0);
+ mc = pti_request_masterchannel(0, NULL);
if (mc == NULL)
return -ENOMEM;
filp->private_data = mc;
@@ -581,7 +605,7 @@ static int pti_char_open(struct inode *inode, struct file *filp)
static int pti_char_release(struct inode *inode, struct file *filp)
{
pti_release_masterchannel(filp->private_data);
- kfree(filp->private_data);
+ filp->private_data = NULL;
return 0;
}
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index f91f82eabda..54c91ffe4a9 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -605,7 +605,7 @@ long st_unregister(struct st_proto_s *proto)
pr_debug("%s: %d ", __func__, proto->chnl_id);
st_kim_ref(&st_gdata, 0);
- if (proto->chnl_id >= ST_MAX_CHANNELS) {
+ if (!st_gdata || proto->chnl_id >= ST_MAX_CHANNELS) {
pr_err(" chnl_id %d not supported", proto->chnl_id);
return -EPROTONOSUPPORT;
}
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 5da93ee6f6b..38fd2f04c07 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -245,9 +245,9 @@ void skip_change_remote_baud(unsigned char **ptr, long *len)
pr_err("invalid action after change remote baud command");
} else {
*ptr = *ptr + sizeof(struct bts_action) +
- ((struct bts_action *)nxt_action)->size;
+ ((struct bts_action *)cur_action)->size;
*len = *len - (sizeof(struct bts_action) +
- ((struct bts_action *)nxt_action)->size);
+ ((struct bts_action *)cur_action)->size);
/* warn user on not commenting these in firmware */
pr_warn("skipping the wait event of change remote baud");
}
@@ -604,6 +604,10 @@ void st_kim_ref(struct st_data_s **core_data, int id)
struct kim_data_s *kim_gdata;
/* get kim_gdata reference from platform device */
pdev = st_get_plat_device(id);
+ if (!pdev) {
+ *core_data = NULL;
+ return;
+ }
kim_gdata = dev_get_drvdata(&pdev->dev);
*core_data = kim_gdata->core_data;
}
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 6df5a55da11..053d36caf95 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,7 +45,7 @@
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.2.1.2-k");
+MODULE_VERSION("1.2.1.3-k");
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
@@ -215,7 +215,6 @@ struct vmballoon {
};
static struct vmballoon balloon;
-static struct workqueue_struct *vmballoon_wq;
/*
* Send "start" command to the host, communicating supported version
@@ -674,7 +673,12 @@ static void vmballoon_work(struct work_struct *work)
vmballoon_deflate(b);
}
- queue_delayed_work(vmballoon_wq, dwork, round_jiffies_relative(HZ));
+ /*
+ * We are using a freezable workqueue so that balloon operations are
+ * stopped while the system transitions to/from sleep/hibernation.
+ */
+ queue_delayed_work(system_freezable_wq,
+ dwork, round_jiffies_relative(HZ));
}
/*
@@ -785,12 +789,6 @@ static int __init vmballoon_init(void)
if (x86_hyper != &x86_hyper_vmware)
return -ENODEV;
- vmballoon_wq = create_freezable_workqueue("vmmemctl");
- if (!vmballoon_wq) {
- pr_err("failed to create workqueue\n");
- return -ENOMEM;
- }
-
INIT_LIST_HEAD(&balloon.pages);
INIT_LIST_HEAD(&balloon.refused_pages);
@@ -805,34 +803,27 @@ static int __init vmballoon_init(void)
*/
if (!vmballoon_send_start(&balloon)) {
pr_err("failed to send start command to the host\n");
- error = -EIO;
- goto fail;
+ return -EIO;
}
if (!vmballoon_send_guest_id(&balloon)) {
pr_err("failed to send guest ID to the host\n");
- error = -EIO;
- goto fail;
+ return -EIO;
}
error = vmballoon_debugfs_init(&balloon);
if (error)
- goto fail;
+ return error;
- queue_delayed_work(vmballoon_wq, &balloon.dwork, 0);
+ queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
return 0;
-
-fail:
- destroy_workqueue(vmballoon_wq);
- return error;
}
module_init(vmballoon_init);
static void __exit vmballoon_exit(void)
{
cancel_delayed_work_sync(&balloon.dwork);
- destroy_workqueue(vmballoon_wq);
vmballoon_debugfs_exit(&balloon);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 71da5641e25..1ff5486213f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -106,6 +106,16 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock);
+enum mmc_blk_status {
+ MMC_BLK_SUCCESS = 0,
+ MMC_BLK_PARTIAL,
+ MMC_BLK_RETRY,
+ MMC_BLK_RETRY_SINGLE,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_CMD_ERR,
+ MMC_BLK_ABORT,
+};
+
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
@@ -427,14 +437,6 @@ static const struct block_device_operations mmc_bdops = {
#endif
};
-struct mmc_blk_request {
- struct mmc_request mrq;
- struct mmc_command sbc;
- struct mmc_command cmd;
- struct mmc_command stop;
- struct mmc_data data;
-};
-
static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md)
{
@@ -525,7 +527,20 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
return result;
}
-static u32 get_card_status(struct mmc_card *card, struct request *req)
+static int send_stop(struct mmc_card *card, u32 *status)
+{
+ struct mmc_command cmd = {0};
+ int err;
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ if (err == 0)
+ *status = cmd.resp[0];
+ return err;
+}
+
+static int get_card_status(struct mmc_card *card, u32 *status, int retries)
{
struct mmc_command cmd = {0};
int err;
@@ -534,11 +549,141 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ err = mmc_wait_for_cmd(card->host, &cmd, retries);
+ if (err == 0)
+ *status = cmd.resp[0];
+ return err;
+}
+
+#define ERR_RETRY 2
+#define ERR_ABORT 1
+#define ERR_CONTINUE 0
+
+static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
+ bool status_valid, u32 status)
+{
+ switch (error) {
+ case -EILSEQ:
+ /* response crc error, retry the r/w cmd */
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "response CRC error",
+ name, status);
+ return ERR_RETRY;
+
+ case -ETIMEDOUT:
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "timed out", name, status);
+
+ /* If the status cmd initially failed, retry the r/w cmd */
+ if (!status_valid)
+ return ERR_RETRY;
+
+ /*
+ * If it was a r/w cmd crc error, or illegal command
+ * (eg, issued in wrong state) then retry - we should
+ * have corrected the state problem above.
+ */
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+ return ERR_RETRY;
+
+ /* Otherwise abort the command */
+ return ERR_ABORT;
+
+ default:
+ /* We don't understand the error code the driver gave us */
+ pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+ req->rq_disk->disk_name, error, status);
+ return ERR_ABORT;
+ }
+}
+
+/*
+ * Initial r/w and stop cmd error recovery.
+ * We don't know whether the card received the r/w cmd or not, so try to
+ * restore things back to a sane state. Essentially, we do this as follows:
+ * - Obtain card status. If the first attempt to obtain card status fails,
+ * the status word will reflect the failed status cmd, not the failed
+ * r/w cmd. If we fail to obtain card status, it suggests we can no
+ * longer communicate with the card.
+ * - Check the card state. If the card received the cmd but there was a
+ * transient problem with the response, it might still be in a data transfer
+ * mode. Try to send it a stop command. If this fails, we can't recover.
+ * - If the r/w cmd failed due to a response CRC error, it was probably
+ * transient, so retry the cmd.
+ * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
+ * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
+ * illegal cmd, retry.
+ * Otherwise we don't understand what happened, so abort.
+ */
+static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+ struct mmc_blk_request *brq)
+{
+ bool prev_cmd_status_valid = true;
+ u32 status, stop_status = 0;
+ int err, retry;
+
+ /*
+ * Try to get card status which indicates both the card state
+ * and why there was no response. If the first attempt fails,
+ * we can't be sure the returned status is for the r/w command.
+ */
+ for (retry = 2; retry >= 0; retry--) {
+ err = get_card_status(card, &status, 0);
+ if (!err)
+ break;
+
+ prev_cmd_status_valid = false;
+ pr_err("%s: error %d sending status command, %sing\n",
+ req->rq_disk->disk_name, err, retry ? "retry" : "abort");
+ }
+
+ /* We couldn't get a response from the card. Give up. */
if (err)
- printk(KERN_ERR "%s: error %d sending status command",
- req->rq_disk->disk_name, err);
- return cmd.resp[0];
+ return ERR_ABORT;
+
+ /*
+ * Check the current card state. If it is in some data transfer
+ * mode, tell it to stop (and hopefully transition back to TRAN.)
+ */
+ if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
+ R1_CURRENT_STATE(status) == R1_STATE_RCV) {
+ err = send_stop(card, &stop_status);
+ if (err)
+ pr_err("%s: error %d sending stop command\n",
+ req->rq_disk->disk_name, err);
+
+ /*
+ * If the stop cmd also timed out, the card is probably
+ * not present, so abort. Other errors are bad news too.
+ */
+ if (err)
+ return ERR_ABORT;
+ }
+
+ /* Check for set block count errors */
+ if (brq->sbc.error)
+ return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
+ prev_cmd_status_valid, status);
+
+ /* Check for r/w command errors */
+ if (brq->cmd.error)
+ return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
+ prev_cmd_status_valid, status);
+
+ /* Now for stop errors. These aren't fatal to the transfer. */
+ pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->stop.error,
+ brq->cmd.resp[0], status);
+
+ /*
+ * Subsitute in our own stop status as this will give the error
+ * state which happened during the execution of the r/w command.
+ */
+ if (stop_status) {
+ brq->stop.resp[0] = stop_status;
+ brq->stop.error = 0;
+ }
+ return ERR_CONTINUE;
}
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -669,240 +814,324 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
}
}
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+#define CMD_ERRORS \
+ (R1_OUT_OF_RANGE | /* Command argument out of range */ \
+ R1_ADDRESS_ERROR | /* Misaligned address */ \
+ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
+ R1_WP_VIOLATION | /* Tried to write to protected block */ \
+ R1_CC_ERROR | /* Card controller error */ \
+ R1_ERROR) /* General/unknown error */
+
+static int mmc_blk_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
{
- struct mmc_blk_data *md = mq->data;
- struct mmc_card *card = md->queue.card;
- struct mmc_blk_request brq;
- int ret = 1, disable_multi = 0;
+ enum mmc_blk_status ret = MMC_BLK_SUCCESS;
+ struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct mmc_blk_request *brq = &mq_mrq->brq;
+ struct request *req = mq_mrq->req;
/*
- * Reliable writes are used to implement Forced Unit Access and
- * REQ_META accesses, and are supported only on MMCs.
+ * sbc.error indicates a problem with the set block count
+ * command. No data will have been transferred.
+ *
+ * cmd.error indicates a problem with the r/w command. No
+ * data will have been transferred.
+ *
+ * stop.error indicates a problem with the stop command. Data
+ * may have been transferred, or may still be transferring.
*/
- bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
- (req->cmd_flags & REQ_META)) &&
- (rq_data_dir(req) == WRITE) &&
- (md->flags & MMC_BLK_REL_WR);
+ if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
+ switch (mmc_blk_cmd_recovery(card, req, brq)) {
+ case ERR_RETRY:
+ return MMC_BLK_RETRY;
+ case ERR_ABORT:
+ return MMC_BLK_ABORT;
+ case ERR_CONTINUE:
+ break;
+ }
+ }
- do {
- struct mmc_command cmd = {0};
- u32 readcmd, writecmd, status = 0;
-
- memset(&brq, 0, sizeof(struct mmc_blk_request));
- brq.mrq.cmd = &brq.cmd;
- brq.mrq.data = &brq.data;
-
- brq.cmd.arg = blk_rq_pos(req);
- if (!mmc_card_blockaddr(card))
- brq.cmd.arg <<= 9;
- brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- brq.data.blksz = 512;
- brq.stop.opcode = MMC_STOP_TRANSMISSION;
- brq.stop.arg = 0;
- brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- brq.data.blocks = blk_rq_sectors(req);
+ /*
+ * Check for errors relating to the execution of the
+ * initial command - such as address errors. No data
+ * has been transferred.
+ */
+ if (brq->cmd.resp[0] & CMD_ERRORS) {
+ pr_err("%s: r/w command failed, status = %#x\n",
+ req->rq_disk->disk_name, brq->cmd.resp[0]);
+ return MMC_BLK_ABORT;
+ }
- /*
- * The block layer doesn't support all sector count
- * restrictions, so we need to be prepared for too big
- * requests.
- */
- if (brq.data.blocks > card->host->max_blk_count)
- brq.data.blocks = card->host->max_blk_count;
+ /*
+ * Everything else is either success, or a data error of some
+ * kind. If it was a write, we may have transitioned to
+ * program mode, which we have to wait for it to complete.
+ */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+ u32 status;
+ do {
+ int err = get_card_status(card, &status, 5);
+ if (err) {
+ printk(KERN_ERR "%s: error %d requesting status\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_CMD_ERR;
+ }
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!(status & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ }
- /*
- * After a read error, we redo the request one sector at a time
- * in order to accurately determine which sectors can be read
- * successfully.
- */
- if (disable_multi && brq.data.blocks > 1)
- brq.data.blocks = 1;
+ if (brq->data.error) {
+ pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->data.error,
+ (unsigned)blk_rq_pos(req),
+ (unsigned)blk_rq_sectors(req),
+ brq->cmd.resp[0], brq->stop.resp[0]);
- if (brq.data.blocks > 1 || do_rel_wr) {
- /* SPI multiblock writes terminate using a special
- * token, not a STOP_TRANSMISSION request.
- */
- if (!mmc_host_is_spi(card->host) ||
- rq_data_dir(req) == READ)
- brq.mrq.stop = &brq.stop;
- readcmd = MMC_READ_MULTIPLE_BLOCK;
- writecmd = MMC_WRITE_MULTIPLE_BLOCK;
- } else {
- brq.mrq.stop = NULL;
- readcmd = MMC_READ_SINGLE_BLOCK;
- writecmd = MMC_WRITE_BLOCK;
- }
if (rq_data_dir(req) == READ) {
- brq.cmd.opcode = readcmd;
- brq.data.flags |= MMC_DATA_READ;
+ if (brq->data.blocks > 1) {
+ /* Redo read one sector at a time */
+ pr_warning("%s: retrying using single block read\n",
+ req->rq_disk->disk_name);
+ return MMC_BLK_RETRY_SINGLE;
+ }
+ return MMC_BLK_DATA_ERR;
} else {
- brq.cmd.opcode = writecmd;
- brq.data.flags |= MMC_DATA_WRITE;
+ return MMC_BLK_CMD_ERR;
}
+ }
- if (do_rel_wr)
- mmc_apply_rel_rw(&brq, card, req);
+ if (ret == MMC_BLK_SUCCESS &&
+ blk_rq_bytes(req) != brq->data.bytes_xfered)
+ ret = MMC_BLK_PARTIAL;
- /*
- * Pre-defined multi-block transfers are preferable to
- * open ended-ones (and necessary for reliable writes).
- * However, it is not sufficient to just send CMD23,
- * and avoid the final CMD12, as on an error condition
- * CMD12 (stop) needs to be sent anyway. This, coupled
- * with Auto-CMD23 enhancements provided by some
- * hosts, means that the complexity of dealing
- * with this is best left to the host. If CMD23 is
- * supported by card and host, we'll fill sbc in and let
- * the host deal with handling it correctly. This means
- * that for hosts that don't expose MMC_CAP_CMD23, no
- * change of behavior will be observed.
- *
- * N.B: Some MMC cards experience perf degradation.
- * We'll avoid using CMD23-bounded multiblock writes for
- * these, while retaining features like reliable writes.
- */
+ return ret;
+}
- if ((md->flags & MMC_BLK_CMD23) &&
- mmc_op_multi(brq.cmd.opcode) &&
- (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
- brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
- brq.sbc.arg = brq.data.blocks |
- (do_rel_wr ? (1 << 31) : 0);
- brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
- brq.mrq.sbc = &brq.sbc;
- }
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ int disable_multi,
+ struct mmc_queue *mq)
+{
+ u32 readcmd, writecmd;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct mmc_blk_data *md = mq->data;
- mmc_set_data_timeout(&brq.data, card);
+ /*
+ * Reliable writes are used to implement Forced Unit Access and
+ * REQ_META accesses, and are supported only on MMCs.
+ */
+ bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+ (req->cmd_flags & REQ_META)) &&
+ (rq_data_dir(req) == WRITE) &&
+ (md->flags & MMC_BLK_REL_WR);
- brq.data.sg = mq->sg;
- brq.data.sg_len = mmc_queue_map_sg(mq);
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
- /*
- * Adjust the sg list so it is the same size as the
- * request.
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ brq->data.blksz = 512;
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ brq->data.blocks = blk_rq_sectors(req);
+
+ /*
+ * The block layer doesn't support all sector count
+ * restrictions, so we need to be prepared for too big
+ * requests.
+ */
+ if (brq->data.blocks > card->host->max_blk_count)
+ brq->data.blocks = card->host->max_blk_count;
+
+ /*
+ * After a read error, we redo the request one sector at a time
+ * in order to accurately determine which sectors can be read
+ * successfully.
+ */
+ if (disable_multi && brq->data.blocks > 1)
+ brq->data.blocks = 1;
+
+ if (brq->data.blocks > 1 || do_rel_wr) {
+ /* SPI multiblock writes terminate using a special
+ * token, not a STOP_TRANSMISSION request.
*/
- if (brq.data.blocks != blk_rq_sectors(req)) {
- int i, data_size = brq.data.blocks << 9;
- struct scatterlist *sg;
-
- for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
- data_size -= sg->length;
- if (data_size <= 0) {
- sg->length += data_size;
- i++;
- break;
- }
- }
- brq.data.sg_len = i;
- }
+ if (!mmc_host_is_spi(card->host) ||
+ rq_data_dir(req) == READ)
+ brq->mrq.stop = &brq->stop;
+ readcmd = MMC_READ_MULTIPLE_BLOCK;
+ writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+ } else {
+ brq->mrq.stop = NULL;
+ readcmd = MMC_READ_SINGLE_BLOCK;
+ writecmd = MMC_WRITE_BLOCK;
+ }
+ if (rq_data_dir(req) == READ) {
+ brq->cmd.opcode = readcmd;
+ brq->data.flags |= MMC_DATA_READ;
+ } else {
+ brq->cmd.opcode = writecmd;
+ brq->data.flags |= MMC_DATA_WRITE;
+ }
- mmc_queue_bounce_pre(mq);
+ if (do_rel_wr)
+ mmc_apply_rel_rw(brq, card, req);
- mmc_wait_for_req(card->host, &brq.mrq);
+ /*
+ * Pre-defined multi-block transfers are preferable to
+ * open ended-ones (and necessary for reliable writes).
+ * However, it is not sufficient to just send CMD23,
+ * and avoid the final CMD12, as on an error condition
+ * CMD12 (stop) needs to be sent anyway. This, coupled
+ * with Auto-CMD23 enhancements provided by some
+ * hosts, means that the complexity of dealing
+ * with this is best left to the host. If CMD23 is
+ * supported by card and host, we'll fill sbc in and let
+ * the host deal with handling it correctly. This means
+ * that for hosts that don't expose MMC_CAP_CMD23, no
+ * change of behavior will be observed.
+ *
+ * N.B: Some MMC cards experience perf degradation.
+ * We'll avoid using CMD23-bounded multiblock writes for
+ * these, while retaining features like reliable writes.
+ */
- mmc_queue_bounce_post(mq);
+ if ((md->flags & MMC_BLK_CMD23) &&
+ mmc_op_multi(brq->cmd.opcode) &&
+ (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = brq->data.blocks |
+ (do_rel_wr ? (1 << 31) : 0);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ brq->mrq.sbc = &brq->sbc;
+ }
- /*
- * Check for errors here, but don't jump to cmd_err
- * until later as we need to wait for the card to leave
- * programming mode even when things go wrong.
- */
- if (brq.sbc.error || brq.cmd.error ||
- brq.data.error || brq.stop.error) {
- if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
- /* Redo read one sector at a time */
- printk(KERN_WARNING "%s: retrying using single "
- "block read\n", req->rq_disk->disk_name);
- disable_multi = 1;
- continue;
- }
- status = get_card_status(card, req);
- }
+ mmc_set_data_timeout(&brq->data, card);
- if (brq.sbc.error) {
- printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
- "command, response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.sbc.error,
- brq.sbc.resp[0], status);
- }
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
- if (brq.cmd.error) {
- printk(KERN_ERR "%s: error %d sending read/write "
- "command, response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.cmd.error,
- brq.cmd.resp[0], status);
+ /*
+ * Adjust the sg list so it is the same size as the
+ * request.
+ */
+ if (brq->data.blocks != blk_rq_sectors(req)) {
+ int i, data_size = brq->data.blocks << 9;
+ struct scatterlist *sg;
+
+ for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+ data_size -= sg->length;
+ if (data_size <= 0) {
+ sg->length += data_size;
+ i++;
+ break;
+ }
}
+ brq->data.sg_len = i;
+ }
- if (brq.data.error) {
- if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
- /* 'Stop' response contains card status */
- status = brq.mrq.stop->resp[0];
- printk(KERN_ERR "%s: error %d transferring data,"
- " sector %u, nr %u, card status %#x\n",
- req->rq_disk->disk_name, brq.data.error,
- (unsigned)blk_rq_pos(req),
- (unsigned)blk_rq_sectors(req), status);
- }
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_err_check;
- if (brq.stop.error) {
- printk(KERN_ERR "%s: error %d sending stop command, "
- "response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.stop.error,
- brq.stop.resp[0], status);
- }
+ mmc_queue_bounce_pre(mqrq);
+}
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- do {
- int err;
-
- cmd.opcode = MMC_SEND_STATUS;
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 5);
- if (err) {
- printk(KERN_ERR "%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- goto cmd_err;
- }
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(cmd.resp[0]) == 7));
-
-#if 0
- if (cmd.resp[0] & ~0x00000900)
- printk(KERN_ERR "%s: status = %08x\n",
- req->rq_disk->disk_name, cmd.resp[0]);
- if (mmc_decode_status(cmd.resp))
- goto cmd_err;
-#endif
- }
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
+ int ret = 1, disable_multi = 0, retry = 0;
+ enum mmc_blk_status status;
+ struct mmc_queue_req *mq_rq;
+ struct request *req;
+ struct mmc_async_req *areq;
+
+ if (!rqc && !mq->mqrq_prev->req)
+ return 0;
- if (brq.cmd.error || brq.stop.error || brq.data.error) {
- if (rq_data_dir(req) == READ) {
+ do {
+ if (rqc) {
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ areq = &mq->mqrq_cur->mmc_active;
+ } else
+ areq = NULL;
+ areq = mmc_start_req(card->host, areq, (int *) &status);
+ if (!areq)
+ return 0;
+
+ mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+ brq = &mq_rq->brq;
+ req = mq_rq->req;
+ mmc_queue_bounce_post(mq_rq);
+
+ switch (status) {
+ case MMC_BLK_SUCCESS:
+ case MMC_BLK_PARTIAL:
+ /*
+ * A block was successfully transferred.
+ */
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0,
+ brq->data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ if (status == MMC_BLK_SUCCESS && ret) {
/*
- * After an error, we redo I/O one sector at a
- * time, so we only reach here after trying to
- * read a single sector.
+ * The blk_end_request has returned non zero
+ * even though all data is transfered and no
+ * erros returned by host.
+ * If this happen it's a bug.
*/
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, -EIO, brq.data.blksz);
- spin_unlock_irq(&md->lock);
- continue;
+ printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
+ __func__, blk_rq_bytes(req),
+ brq->data.bytes_xfered);
+ rqc = NULL;
+ goto cmd_abort;
}
+ break;
+ case MMC_BLK_CMD_ERR:
goto cmd_err;
+ case MMC_BLK_RETRY_SINGLE:
+ disable_multi = 1;
+ break;
+ case MMC_BLK_RETRY:
+ if (retry++ < 5)
+ break;
+ case MMC_BLK_ABORT:
+ goto cmd_abort;
+ case MMC_BLK_DATA_ERR:
+ /*
+ * After an error, we redo I/O one sector at a
+ * time, so we only reach here after trying to
+ * read a single sector.
+ */
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, -EIO,
+ brq->data.blksz);
+ spin_unlock_irq(&md->lock);
+ if (!ret)
+ goto start_new_req;
+ break;
}
- /*
- * A block was successfully transferred.
- */
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
- spin_unlock_irq(&md->lock);
+ if (ret) {
+ /*
+ * In case of a none complete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ }
} while (ret);
return 1;
@@ -927,15 +1156,22 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
}
} else {
spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
+ ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
spin_unlock_irq(&md->lock);
}
+ cmd_abort:
spin_lock_irq(&md->lock);
while (ret)
ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
spin_unlock_irq(&md->lock);
+ start_new_req:
+ if (rqc) {
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
+ }
+
return 0;
}
@@ -945,26 +1181,37 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
- mmc_claim_host(card->host);
+ if (req && !mq->mqrq_prev->req)
+ /* claim host only for the first request */
+ mmc_claim_host(card->host);
+
ret = mmc_blk_part_switch(card, md);
if (ret) {
ret = 0;
goto out;
}
- if (req->cmd_flags & REQ_DISCARD) {
+ if (req && req->cmd_flags & REQ_DISCARD) {
+ /* complete ongoing async transfer before issuing discard */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
if (req->cmd_flags & REQ_SECURE)
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (req->cmd_flags & REQ_FLUSH) {
+ } else if (req && req->cmd_flags & REQ_FLUSH) {
+ /* complete ongoing async transfer before issuing flush */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
ret = mmc_blk_issue_flush(mq, req);
} else {
ret = mmc_blk_issue_rw_rq(mq, req);
}
out:
- mmc_release_host(card->host);
+ if (!req)
+ /* release host only when there are no more requests */
+ mmc_release_host(card->host);
return ret;
}
@@ -1024,7 +1271,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
INIT_LIST_HEAD(&md->part);
md->usage = 1;
- ret = mmc_init_queue(&md->queue, card, &md->lock);
+ ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
if (ret)
goto err_putdisk;
@@ -1297,6 +1544,9 @@ static void mmc_blk_remove(struct mmc_card *card)
struct mmc_blk_data *md = mmc_get_drvdata(card);
mmc_blk_remove_parts(card, md);
+ mmc_claim_host(card->host);
+ mmc_blk_part_switch(card, md);
+ mmc_release_host(card->host);
mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
}
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 233cdfae92f..2bf229acd3b 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -148,6 +148,27 @@ struct mmc_test_card {
struct mmc_test_general_result *gr;
};
+enum mmc_test_prep_media {
+ MMC_TEST_PREP_NONE = 0,
+ MMC_TEST_PREP_WRITE_FULL = 1 << 0,
+ MMC_TEST_PREP_ERASE = 1 << 1,
+};
+
+struct mmc_test_multiple_rw {
+ unsigned int *sg_len;
+ unsigned int *bs;
+ unsigned int len;
+ unsigned int size;
+ bool do_write;
+ bool do_nonblock_req;
+ enum mmc_test_prep_media prepare;
+};
+
+struct mmc_test_async_req {
+ struct mmc_async_req areq;
+ struct mmc_test_card *test;
+};
+
/*******************************************************************/
/* General helper functions */
/*******************************************************************/
@@ -203,7 +224,7 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
static int mmc_test_busy(struct mmc_command *cmd)
{
return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(cmd->resp[0]) == 7);
+ (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
}
/*
@@ -367,21 +388,26 @@ out_free:
* Map memory into a scatterlist. Optionally allow the same memory to be
* mapped more than once.
*/
-static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
+static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
struct scatterlist *sglist, int repeat,
unsigned int max_segs, unsigned int max_seg_sz,
- unsigned int *sg_len)
+ unsigned int *sg_len, int min_sg_len)
{
struct scatterlist *sg = NULL;
unsigned int i;
+ unsigned long sz = size;
sg_init_table(sglist, max_segs);
+ if (min_sg_len > max_segs)
+ min_sg_len = max_segs;
*sg_len = 0;
do {
for (i = 0; i < mem->cnt; i++) {
unsigned long len = PAGE_SIZE << mem->arr[i].order;
+ if (min_sg_len && (size / min_sg_len < len))
+ len = ALIGN(size / min_sg_len, 512);
if (len > sz)
len = sz;
if (len > max_seg_sz)
@@ -554,11 +580,12 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
"%lu.%09lu seconds (%u kB/s, %u KiB/s, "
- "%u.%02u IOPS)\n",
+ "%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
(unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
- rate / 1000, rate / 1024, iops / 100, iops % 100);
+ rate / 1000, rate / 1024, iops / 100, iops % 100,
+ test->area.sg_len);
mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
}
@@ -661,7 +688,7 @@ static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
* Checks that a normal transfer didn't have any errors
*/
static int mmc_test_check_result(struct mmc_test_card *test,
- struct mmc_request *mrq)
+ struct mmc_request *mrq)
{
int ret;
@@ -685,6 +712,17 @@ static int mmc_test_check_result(struct mmc_test_card *test,
return ret;
}
+static int mmc_test_check_result_async(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_test_async_req *test_async =
+ container_of(areq, struct mmc_test_async_req, areq);
+
+ mmc_test_wait_busy(test_async->test);
+
+ return mmc_test_check_result(test_async->test, areq->mrq);
+}
+
/*
* Checks that a "short transfer" behaved as expected
*/
@@ -720,6 +758,85 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test,
}
/*
+ * Tests nonblock transfer with certain parameters
+ */
+static void mmc_test_nonblock_reset(struct mmc_request *mrq,
+ struct mmc_command *cmd,
+ struct mmc_command *stop,
+ struct mmc_data *data)
+{
+ memset(mrq, 0, sizeof(struct mmc_request));
+ memset(cmd, 0, sizeof(struct mmc_command));
+ memset(data, 0, sizeof(struct mmc_data));
+ memset(stop, 0, sizeof(struct mmc_command));
+
+ mrq->cmd = cmd;
+ mrq->data = data;
+ mrq->stop = stop;
+}
+static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
+ struct scatterlist *sg, unsigned sg_len,
+ unsigned dev_addr, unsigned blocks,
+ unsigned blksz, int write, int count)
+{
+ struct mmc_request mrq1;
+ struct mmc_command cmd1;
+ struct mmc_command stop1;
+ struct mmc_data data1;
+
+ struct mmc_request mrq2;
+ struct mmc_command cmd2;
+ struct mmc_command stop2;
+ struct mmc_data data2;
+
+ struct mmc_test_async_req test_areq[2];
+ struct mmc_async_req *done_areq;
+ struct mmc_async_req *cur_areq = &test_areq[0].areq;
+ struct mmc_async_req *other_areq = &test_areq[1].areq;
+ int i;
+ int ret;
+
+ test_areq[0].test = test;
+ test_areq[1].test = test;
+
+ mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
+ mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
+
+ cur_areq->mrq = &mrq1;
+ cur_areq->err_check = mmc_test_check_result_async;
+ other_areq->mrq = &mrq2;
+ other_areq->err_check = mmc_test_check_result_async;
+
+ for (i = 0; i < count; i++) {
+ mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
+ blocks, blksz, write);
+ done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
+
+ if (ret || (!done_areq && i > 0))
+ goto err;
+
+ if (done_areq) {
+ if (done_areq->mrq == &mrq2)
+ mmc_test_nonblock_reset(&mrq2, &cmd2,
+ &stop2, &data2);
+ else
+ mmc_test_nonblock_reset(&mrq1, &cmd1,
+ &stop1, &data1);
+ }
+ done_areq = cur_areq;
+ cur_areq = other_areq;
+ other_areq = done_areq;
+ dev_addr += blocks;
+ }
+
+ done_areq = mmc_start_req(test->card->host, NULL, &ret);
+
+ return ret;
+err:
+ return ret;
+}
+
+/*
* Tests a basic transfer with certain parameters
*/
static int mmc_test_simple_transfer(struct mmc_test_card *test,
@@ -1302,7 +1419,7 @@ static int mmc_test_no_highmem(struct mmc_test_card *test)
* Map sz bytes so that it can be transferred.
*/
static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
- int max_scatter)
+ int max_scatter, int min_sg_len)
{
struct mmc_test_area *t = &test->area;
int err;
@@ -1315,7 +1432,7 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
&t->sg_len);
} else {
err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
- t->max_seg_sz, &t->sg_len);
+ t->max_seg_sz, &t->sg_len, min_sg_len);
}
if (err)
printk(KERN_INFO "%s: Failed to map sg list\n",
@@ -1336,14 +1453,17 @@ static int mmc_test_area_transfer(struct mmc_test_card *test,
}
/*
- * Map and transfer bytes.
+ * Map and transfer bytes for multiple transfers.
*/
-static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
- unsigned int dev_addr, int write, int max_scatter,
- int timed)
+static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
+ unsigned int dev_addr, int write,
+ int max_scatter, int timed, int count,
+ bool nonblock, int min_sg_len)
{
struct timespec ts1, ts2;
- int ret;
+ int ret = 0;
+ int i;
+ struct mmc_test_area *t = &test->area;
/*
* In the case of a maximally scattered transfer, the maximum transfer
@@ -1361,14 +1481,21 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
sz = max_tfr;
}
- ret = mmc_test_area_map(test, sz, max_scatter);
+ ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
if (ret)
return ret;
if (timed)
getnstimeofday(&ts1);
+ if (nonblock)
+ ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
+ dev_addr, t->blocks, 512, write, count);
+ else
+ for (i = 0; i < count && ret == 0; i++) {
+ ret = mmc_test_area_transfer(test, dev_addr, write);
+ dev_addr += sz >> 9;
+ }
- ret = mmc_test_area_transfer(test, dev_addr, write);
if (ret)
return ret;
@@ -1376,11 +1503,19 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
getnstimeofday(&ts2);
if (timed)
- mmc_test_print_rate(test, sz, &ts1, &ts2);
+ mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
return 0;
}
+static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
+ unsigned int dev_addr, int write, int max_scatter,
+ int timed)
+{
+ return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
+ timed, 1, false, 0);
+}
+
/*
* Write the test area entirely.
*/
@@ -1954,6 +2089,245 @@ static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
return mmc_test_large_seq_perf(test, 1);
}
+static int mmc_test_rw_multiple(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *tdata,
+ unsigned int reqsize, unsigned int size,
+ int min_sg_len)
+{
+ unsigned int dev_addr;
+ struct mmc_test_area *t = &test->area;
+ int ret = 0;
+
+ /* Set up test area */
+ if (size > mmc_test_capacity(test->card) / 2 * 512)
+ size = mmc_test_capacity(test->card) / 2 * 512;
+ if (reqsize > t->max_tfr)
+ reqsize = t->max_tfr;
+ dev_addr = mmc_test_capacity(test->card) / 4;
+ if ((dev_addr & 0xffff0000))
+ dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+ else
+ dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
+ if (!dev_addr)
+ goto err;
+
+ if (reqsize > size)
+ return 0;
+
+ /* prepare test area */
+ if (mmc_can_erase(test->card) &&
+ tdata->prepare & MMC_TEST_PREP_ERASE) {
+ ret = mmc_erase(test->card, dev_addr,
+ size / 512, MMC_SECURE_ERASE_ARG);
+ if (ret)
+ ret = mmc_erase(test->card, dev_addr,
+ size / 512, MMC_ERASE_ARG);
+ if (ret)
+ goto err;
+ }
+
+ /* Run test */
+ ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
+ tdata->do_write, 0, 1, size / reqsize,
+ tdata->do_nonblock_req, min_sg_len);
+ if (ret)
+ goto err;
+
+ return ret;
+ err:
+ printk(KERN_INFO "[%s] error\n", __func__);
+ return ret;
+}
+
+static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *rw)
+{
+ int ret = 0;
+ int i;
+ void *pre_req = test->card->host->ops->pre_req;
+ void *post_req = test->card->host->ops->post_req;
+
+ if (rw->do_nonblock_req &&
+ ((!pre_req && post_req) || (pre_req && !post_req))) {
+ printk(KERN_INFO "error: only one of pre/post is defined\n");
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < rw->len && ret == 0; i++) {
+ ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *rw)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0 ; i < rw->len && ret == 0; i++) {
+ ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
+ rw->sg_len[i]);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Multiple blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = true,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = true,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = false,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = false,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = true,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = true,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = false,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = false,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@@ -2221,6 +2595,61 @@ static const struct mmc_test_case mmc_test_cases[] = {
.cleanup = mmc_test_area_cleanup,
},
+ {
+ .name = "Write performance with blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_write_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance with non-blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_write_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance with blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_read_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance with non-blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_read_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_wr_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance non-blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_wr_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_r_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance non-blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_r_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
};
static DEFINE_MUTEX(mmc_test_lock);
@@ -2445,7 +2874,33 @@ static const struct file_operations mmc_test_fops_test = {
.release = single_release,
};
-static void mmc_test_free_file_test(struct mmc_card *card)
+static int mtf_testlist_show(struct seq_file *sf, void *data)
+{
+ int i;
+
+ mutex_lock(&mmc_test_lock);
+
+ for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
+ seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
+
+ mutex_unlock(&mmc_test_lock);
+
+ return 0;
+}
+
+static int mtf_testlist_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtf_testlist_show, inode->i_private);
+}
+
+static const struct file_operations mmc_test_fops_testlist = {
+ .open = mtf_testlist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void mmc_test_free_dbgfs_file(struct mmc_card *card)
{
struct mmc_test_dbgfs_file *df, *dfs;
@@ -2462,23 +2917,21 @@ static void mmc_test_free_file_test(struct mmc_card *card)
mutex_unlock(&mmc_test_lock);
}
-static int mmc_test_register_file_test(struct mmc_card *card)
+static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
+ const char *name, mode_t mode, const struct file_operations *fops)
{
struct dentry *file = NULL;
struct mmc_test_dbgfs_file *df;
- int ret = 0;
-
- mutex_lock(&mmc_test_lock);
if (card->debugfs_root)
- file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
- card->debugfs_root, card, &mmc_test_fops_test);
+ file = debugfs_create_file(name, mode, card->debugfs_root,
+ card, fops);
if (IS_ERR_OR_NULL(file)) {
dev_err(&card->dev,
- "Can't create file. Perhaps debugfs is disabled.\n");
- ret = -ENODEV;
- goto err;
+ "Can't create %s. Perhaps debugfs is disabled.\n",
+ name);
+ return -ENODEV;
}
df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
@@ -2486,14 +2939,31 @@ static int mmc_test_register_file_test(struct mmc_card *card)
debugfs_remove(file);
dev_err(&card->dev,
"Can't allocate memory for internal usage.\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
df->card = card;
df->file = file;
list_add(&df->link, &mmc_test_file_test);
+ return 0;
+}
+
+static int mmc_test_register_dbgfs_file(struct mmc_card *card)
+{
+ int ret;
+
+ mutex_lock(&mmc_test_lock);
+
+ ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
+ &mmc_test_fops_test);
+ if (ret)
+ goto err;
+
+ ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
+ &mmc_test_fops_testlist);
+ if (ret)
+ goto err;
err:
mutex_unlock(&mmc_test_lock);
@@ -2508,7 +2978,7 @@ static int mmc_test_probe(struct mmc_card *card)
if (!mmc_card_mmc(card) && !mmc_card_sd(card))
return -ENODEV;
- ret = mmc_test_register_file_test(card);
+ ret = mmc_test_register_dbgfs_file(card);
if (ret)
return ret;
@@ -2520,7 +2990,7 @@ static int mmc_test_probe(struct mmc_card *card)
static void mmc_test_remove(struct mmc_card *card)
{
mmc_test_free_result(card);
- mmc_test_free_file_test(card);
+ mmc_test_free_dbgfs_file(card);
}
static struct mmc_driver mmc_driver = {
@@ -2540,7 +3010,7 @@ static void __exit mmc_test_exit(void)
{
/* Clear stalled data if card is still plugged */
mmc_test_free_result(NULL);
- mmc_test_free_file_test(NULL);
+ mmc_test_free_dbgfs_file(NULL);
mmc_unregister_driver(&mmc_driver);
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index c07322c2658..45fb362e3f0 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -52,14 +52,18 @@ static int mmc_queue_thread(void *d)
down(&mq->thread_sem);
do {
struct request *req = NULL;
+ struct mmc_queue_req *tmp;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
req = blk_fetch_request(q);
- mq->req = req;
+ mq->mqrq_cur->req = req;
spin_unlock_irq(q->queue_lock);
- if (!req) {
+ if (req || mq->mqrq_prev->req) {
+ set_current_state(TASK_RUNNING);
+ mq->issue_fn(mq, req);
+ } else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
@@ -67,11 +71,14 @@ static int mmc_queue_thread(void *d)
up(&mq->thread_sem);
schedule();
down(&mq->thread_sem);
- continue;
}
- set_current_state(TASK_RUNNING);
- mq->issue_fn(mq, req);
+ /* Current request becomes previous request and vice versa. */
+ mq->mqrq_prev->brq.mrq.data = NULL;
+ mq->mqrq_prev->req = NULL;
+ tmp = mq->mqrq_prev;
+ mq->mqrq_prev = mq->mqrq_cur;
+ mq->mqrq_cur = tmp;
} while (1);
up(&mq->thread_sem);
@@ -97,23 +104,63 @@ static void mmc_request(struct request_queue *q)
return;
}
- if (!mq->req)
+ if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
wake_up_process(mq->thread);
}
+struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
+{
+ struct scatterlist *sg;
+
+ sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
+ if (!sg)
+ *err = -ENOMEM;
+ else {
+ *err = 0;
+ sg_init_table(sg, sg_len);
+ }
+
+ return sg;
+}
+
+static void mmc_queue_setup_discard(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned max_discard;
+
+ max_discard = mmc_calc_max_discard(card);
+ if (!max_discard)
+ return;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ q->limits.max_discard_sectors = max_discard;
+ if (card->erased_byte == 0)
+ q->limits.discard_zeroes_data = 1;
+ q->limits.discard_granularity = card->pref_erase << 9;
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ q->limits.discard_granularity = 0;
+ if (mmc_can_secure_erase_trim(card))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
* @card: mmc card to attach this queue
* @lock: queue lock
+ * @subname: partition subname
*
* Initialise a MMC card request queue.
*/
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock, const char *subname)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
int ret;
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = *mmc_dev(host)->dma_mask;
@@ -123,26 +170,16 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
if (!mq->queue)
return -ENOMEM;
+ memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
+ memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
+ mq->mqrq_cur = mqrq_cur;
+ mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
- mq->req = NULL;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
- if (mmc_can_erase(card)) {
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
- mq->queue->limits.max_discard_sectors = UINT_MAX;
- if (card->erased_byte == 0)
- mq->queue->limits.discard_zeroes_data = 1;
- if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
- mq->queue->limits.discard_granularity =
- card->erase_size << 9;
- mq->queue->limits.discard_alignment =
- card->erase_size << 9;
- }
- if (mmc_can_secure_erase_trim(card))
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
- mq->queue);
- }
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_segs == 1) {
@@ -158,59 +195,70 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
bouncesz = host->max_blk_count * 512;
if (bouncesz > 512) {
- mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
- if (!mq->bounce_buf) {
+ mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_cur->bounce_buf) {
+ printk(KERN_WARNING "%s: unable to "
+ "allocate bounce cur buffer\n",
+ mmc_card_name(card));
+ }
+ mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_prev->bounce_buf) {
printk(KERN_WARNING "%s: unable to "
- "allocate bounce buffer\n",
+ "allocate bounce prev buffer\n",
mmc_card_name(card));
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
}
}
- if (mq->bounce_buf) {
+ if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
- mq->sg = kmalloc(sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!mq->sg) {
- ret = -ENOMEM;
+ mqrq_cur->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->sg, 1);
- mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
- bouncesz / 512, GFP_KERNEL);
- if (!mq->bounce_sg) {
- ret = -ENOMEM;
+ mqrq_cur->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->bounce_sg, bouncesz / 512);
}
}
#endif
- if (!mq->bounce_buf) {
+ if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- mq->sg = kmalloc(sizeof(struct scatterlist) *
- host->max_segs, GFP_KERNEL);
- if (!mq->sg) {
- ret = -ENOMEM;
+ mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+
+ mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->sg, host->max_segs);
}
sema_init(&mq->thread_sem, 1);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d",
- host->index);
+ mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
+ host->index, subname ? subname : "");
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
@@ -219,16 +267,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
return 0;
free_bounce_sg:
- if (mq->bounce_sg)
- kfree(mq->bounce_sg);
- mq->bounce_sg = NULL;
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
cleanup_queue:
- if (mq->sg)
- kfree(mq->sg);
- mq->sg = NULL;
- if (mq->bounce_buf)
- kfree(mq->bounce_buf);
- mq->bounce_buf = NULL;
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
+
blk_cleanup_queue(mq->queue);
return ret;
}
@@ -237,6 +291,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
+ struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+ struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
@@ -250,16 +306,23 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- if (mq->bounce_sg)
- kfree(mq->bounce_sg);
- mq->bounce_sg = NULL;
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
- kfree(mq->sg);
- mq->sg = NULL;
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
- if (mq->bounce_buf)
- kfree(mq->bounce_buf);
- mq->bounce_buf = NULL;
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
mq->card = NULL;
}
@@ -312,27 +375,27 @@ void mmc_queue_resume(struct mmc_queue *mq)
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
-unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
+unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
int i;
- if (!mq->bounce_buf)
- return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
+ if (!mqrq->bounce_buf)
+ return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
- BUG_ON(!mq->bounce_sg);
+ BUG_ON(!mqrq->bounce_sg);
- sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
+ sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
- mq->bounce_sg_len = sg_len;
+ mqrq->bounce_sg_len = sg_len;
buflen = 0;
- for_each_sg(mq->bounce_sg, sg, sg_len, i)
+ for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
- sg_init_one(mq->sg, mq->bounce_buf, buflen);
+ sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
return 1;
}
@@ -341,31 +404,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
-void mmc_queue_bounce_pre(struct mmc_queue *mq)
+void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
{
- if (!mq->bounce_buf)
+ if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mq->req) != WRITE)
+ if (rq_data_dir(mqrq->req) != WRITE)
return;
- sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
- mq->bounce_buf, mq->sg[0].length);
+ sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
}
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
-void mmc_queue_bounce_post(struct mmc_queue *mq)
+void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
{
- if (!mq->bounce_buf)
+ if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mq->req) != READ)
+ if (rq_data_dir(mqrq->req) != READ)
return;
- sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
- mq->bounce_buf, mq->sg[0].length);
+ sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
}
-
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 64e66e0d499..d2a1eb4b9f9 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -4,28 +4,46 @@
struct request;
struct task_struct;
+struct mmc_blk_request {
+ struct mmc_request mrq;
+ struct mmc_command sbc;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+};
+
+struct mmc_queue_req {
+ struct request *req;
+ struct mmc_blk_request brq;
+ struct scatterlist *sg;
+ char *bounce_buf;
+ struct scatterlist *bounce_sg;
+ unsigned int bounce_sg_len;
+ struct mmc_async_req mmc_active;
+};
+
struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
- struct request *req;
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
- struct scatterlist *sg;
- char *bounce_buf;
- struct scatterlist *bounce_sg;
- unsigned int bounce_sg_len;
+ struct mmc_queue_req mqrq[2];
+ struct mmc_queue_req *mqrq_cur;
+ struct mmc_queue_req *mqrq_prev;
};
-extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
+ const char *);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
-extern unsigned int mmc_queue_map_sg(struct mmc_queue *);
-extern void mmc_queue_bounce_pre(struct mmc_queue *);
-extern void mmc_queue_bounce_post(struct mmc_queue *);
+extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
+ struct mmc_queue_req *);
+extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
+extern void mmc_queue_bounce_post(struct mmc_queue_req *);
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 68091dda3f3..91a0a7460eb 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -23,6 +23,7 @@
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -198,9 +199,109 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
static void mmc_wait_done(struct mmc_request *mrq)
{
- complete(mrq->done_data);
+ complete(&mrq->completion);
}
+static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ init_completion(&mrq->completion);
+ mrq->done = mmc_wait_done;
+ mmc_start_request(host, mrq);
+}
+
+static void mmc_wait_for_req_done(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+ wait_for_completion(&mrq->completion);
+}
+
+/**
+ * mmc_pre_req - Prepare for a new request
+ * @host: MMC host to prepare command
+ * @mrq: MMC request to prepare for
+ * @is_first_req: true if there is no previous started request
+ * that may run in parellel to this call, otherwise false
+ *
+ * mmc_pre_req() is called in prior to mmc_start_req() to let
+ * host prepare for the new request. Preparation of a request may be
+ * performed while another request is running on the host.
+ */
+static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ if (host->ops->pre_req)
+ host->ops->pre_req(host, mrq, is_first_req);
+}
+
+/**
+ * mmc_post_req - Post process a completed request
+ * @host: MMC host to post process command
+ * @mrq: MMC request to post process for
+ * @err: Error, if non zero, clean up any resources made in pre_req
+ *
+ * Let the host post process a completed request. Post processing of
+ * a request may be performed while another reuqest is running.
+ */
+static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ int err)
+{
+ if (host->ops->post_req)
+ host->ops->post_req(host, mrq, err);
+}
+
+/**
+ * mmc_start_req - start a non-blocking request
+ * @host: MMC host to start command
+ * @areq: async request to start
+ * @error: out parameter returns 0 for success, otherwise non zero
+ *
+ * Start a new MMC custom command request for a host.
+ * If there is on ongoing async request wait for completion
+ * of that request and start the new one and return.
+ * Does not wait for the new request to complete.
+ *
+ * Returns the completed request, NULL in case of none completed.
+ * Wait for the an ongoing request (previoulsy started) to complete and
+ * return the completed request. If there is no ongoing request, NULL
+ * is returned without waiting. NULL is not an error condition.
+ */
+struct mmc_async_req *mmc_start_req(struct mmc_host *host,
+ struct mmc_async_req *areq, int *error)
+{
+ int err = 0;
+ struct mmc_async_req *data = host->areq;
+
+ /* Prepare a new request */
+ if (areq)
+ mmc_pre_req(host, areq->mrq, !host->areq);
+
+ if (host->areq) {
+ mmc_wait_for_req_done(host, host->areq->mrq);
+ err = host->areq->err_check(host->card, host->areq);
+ if (err) {
+ mmc_post_req(host, host->areq->mrq, 0);
+ if (areq)
+ mmc_post_req(host, areq->mrq, -EINVAL);
+
+ host->areq = NULL;
+ goto out;
+ }
+ }
+
+ if (areq)
+ __mmc_start_req(host, areq->mrq);
+
+ if (host->areq)
+ mmc_post_req(host, host->areq->mrq, 0);
+
+ host->areq = areq;
+ out:
+ if (error)
+ *error = err;
+ return data;
+}
+EXPORT_SYMBOL(mmc_start_req);
+
/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
@@ -212,16 +313,9 @@ static void mmc_wait_done(struct mmc_request *mrq)
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
- DECLARE_COMPLETION_ONSTACK(complete);
-
- mrq->done_data = &complete;
- mrq->done = mmc_wait_done;
-
- mmc_start_request(host, mrq);
-
- wait_for_completion(&complete);
+ __mmc_start_req(host, mrq);
+ mmc_wait_for_req_done(host, mrq);
}
-
EXPORT_SYMBOL(mmc_wait_for_req);
/**
@@ -1245,7 +1339,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
*/
timeout_clks <<= 1;
timeout_us += (timeout_clks * 1000) /
- (card->host->ios.clock / 1000);
+ (mmc_host_clk_rate(card->host) / 1000);
erase_timeout = timeout_us / 1000;
@@ -1408,7 +1502,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
goto out;
}
} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
- R1_CURRENT_STATE(cmd.resp[0]) == 7);
+ R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
out:
return err;
}
@@ -1516,6 +1610,82 @@ int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
}
EXPORT_SYMBOL(mmc_erase_group_aligned);
+static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
+ unsigned int arg)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
+ unsigned int last_timeout = 0;
+
+ if (card->erase_shift)
+ max_qty = UINT_MAX >> card->erase_shift;
+ else if (mmc_card_sd(card))
+ max_qty = UINT_MAX;
+ else
+ max_qty = UINT_MAX / card->erase_size;
+
+ /* Find the largest qty with an OK timeout */
+ do {
+ y = 0;
+ for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
+ timeout = mmc_erase_timeout(card, arg, qty + x);
+ if (timeout > host->max_discard_to)
+ break;
+ if (timeout < last_timeout)
+ break;
+ last_timeout = timeout;
+ y = x;
+ }
+ qty += y;
+ } while (y);
+
+ if (!qty)
+ return 0;
+
+ if (qty == 1)
+ return 1;
+
+ /* Convert qty to sectors */
+ if (card->erase_shift)
+ max_discard = --qty << card->erase_shift;
+ else if (mmc_card_sd(card))
+ max_discard = qty;
+ else
+ max_discard = --qty * card->erase_size;
+
+ return max_discard;
+}
+
+unsigned int mmc_calc_max_discard(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_discard, max_trim;
+
+ if (!host->max_discard_to)
+ return UINT_MAX;
+
+ /*
+ * Without erase_group_def set, MMC erase timeout depends on clock
+ * frequence which can change. In that case, the best choice is
+ * just the preferred erase size.
+ */
+ if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
+ return card->pref_erase;
+
+ max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
+ if (mmc_can_trim(card)) {
+ max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
+ if (max_trim < max_discard)
+ max_discard = max_trim;
+ } else if (max_discard < card->erase_size) {
+ max_discard = 0;
+ }
+ pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
+ mmc_hostname(host), max_discard, host->max_discard_to);
+ return max_discard;
+}
+EXPORT_SYMBOL(mmc_calc_max_discard);
+
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
struct mmc_command cmd = {0};
@@ -1663,6 +1833,10 @@ int mmc_power_save_host(struct mmc_host *host)
{
int ret = 0;
+#ifdef CONFIG_MMC_DEBUG
+ pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
+#endif
+
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
@@ -1685,6 +1859,10 @@ int mmc_power_restore_host(struct mmc_host *host)
{
int ret;
+#ifdef CONFIG_MMC_DEBUG
+ pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
+#endif
+
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2a7e43bc796..5700b1cbdfe 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -247,25 +247,29 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
return 0;
/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
+ card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
if (card->csd.structure == 3) {
- int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
- if (ext_csd_struct > 2) {
+ if (card->ext_csd.raw_ext_csd_structure > 2) {
printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
"version %d\n", mmc_hostname(card->host),
- ext_csd_struct);
+ card->ext_csd.raw_ext_csd_structure);
err = -EINVAL;
goto out;
}
}
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
- if (card->ext_csd.rev > 5) {
+ if (card->ext_csd.rev > 6) {
printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
mmc_hostname(card->host), card->ext_csd.rev);
err = -EINVAL;
goto out;
}
+ card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
+ card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
+ card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
+ card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
if (card->ext_csd.rev >= 2) {
card->ext_csd.sectors =
ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
@@ -277,7 +281,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
mmc_card_set_blockaddr(card);
}
-
+ card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
EXT_CSD_CARD_TYPE_26:
@@ -307,6 +311,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
mmc_hostname(card->host));
}
+ card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
+ card->ext_csd.raw_erase_timeout_mult =
+ ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+ card->ext_csd.raw_hc_erase_grp_size =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
if (card->ext_csd.rev >= 3) {
u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
@@ -334,6 +343,16 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
}
+ card->ext_csd.raw_hc_erase_gap_size =
+ ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
+ card->ext_csd.raw_sec_trim_mult =
+ ext_csd[EXT_CSD_SEC_TRIM_MULT];
+ card->ext_csd.raw_sec_erase_mult =
+ ext_csd[EXT_CSD_SEC_ERASE_MULT];
+ card->ext_csd.raw_sec_feature_support =
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.raw_trim_mult =
+ ext_csd[EXT_CSD_TRIM_MULT];
if (card->ext_csd.rev >= 4) {
/*
* Enhanced area feature support -- check whether the eMMC
@@ -341,7 +360,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
* area offset and size to user by adding sysfs interface.
*/
if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
- (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
u8 hc_erase_grp_sz =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
u8 hc_wp_grp_sz =
@@ -401,17 +420,17 @@ static inline void mmc_free_ext_csd(u8 *ext_csd)
}
-static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
- unsigned bus_width)
+static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
{
u8 *bw_ext_csd;
int err;
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
err = mmc_get_ext_csd(card, &bw_ext_csd);
- if (err)
- return err;
- if ((ext_csd == NULL || bw_ext_csd == NULL)) {
+ if (err || bw_ext_csd == NULL) {
if (bus_width != MMC_BUS_WIDTH_1)
err = -EINVAL;
goto out;
@@ -421,35 +440,40 @@ static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
goto out;
/* only compare read only fields */
- err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] ==
+ err = (!(card->ext_csd.raw_partition_support ==
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
- (ext_csd[EXT_CSD_ERASED_MEM_CONT] ==
+ (card->ext_csd.raw_erased_mem_count ==
bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
- (ext_csd[EXT_CSD_REV] ==
+ (card->ext_csd.rev ==
bw_ext_csd[EXT_CSD_REV]) &&
- (ext_csd[EXT_CSD_STRUCTURE] ==
+ (card->ext_csd.raw_ext_csd_structure ==
bw_ext_csd[EXT_CSD_STRUCTURE]) &&
- (ext_csd[EXT_CSD_CARD_TYPE] ==
+ (card->ext_csd.raw_card_type ==
bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
- (ext_csd[EXT_CSD_S_A_TIMEOUT] ==
+ (card->ext_csd.raw_s_a_timeout ==
bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
- (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
+ (card->ext_csd.raw_hc_erase_gap_size ==
bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
- (ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] ==
+ (card->ext_csd.raw_erase_timeout_mult ==
bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
- (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
+ (card->ext_csd.raw_hc_erase_grp_size ==
bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
- (ext_csd[EXT_CSD_SEC_TRIM_MULT] ==
+ (card->ext_csd.raw_sec_trim_mult ==
bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
- (ext_csd[EXT_CSD_SEC_ERASE_MULT] ==
+ (card->ext_csd.raw_sec_erase_mult ==
bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
- (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] ==
+ (card->ext_csd.raw_sec_feature_support ==
bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
- (ext_csd[EXT_CSD_TRIM_MULT] ==
+ (card->ext_csd.raw_trim_mult ==
bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
- memcmp(&ext_csd[EXT_CSD_SEC_CNT],
- &bw_ext_csd[EXT_CSD_SEC_CNT],
- 4) != 0);
+ (card->ext_csd.raw_sectors[0] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
+ (card->ext_csd.raw_sectors[1] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
+ (card->ext_csd.raw_sectors[2] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
+ (card->ext_csd.raw_sectors[3] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 3]));
if (err)
err = -EINVAL;
@@ -770,7 +794,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
err = mmc_compare_ext_csds(card,
- ext_csd,
bus_width);
else
err = mmc_bus_test(card, bus_width);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 845ce7c533b..770c3d06f5d 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -407,7 +407,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
break;
if (mmc_host_is_spi(card->host))
break;
- } while (R1_CURRENT_STATE(status) == 7);
+ } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
if (mmc_host_is_spi(card->host)) {
if (status & R1_SPI_ILLEGAL_COMMAND)
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index ff2774128aa..633975ff2bb 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -409,52 +409,62 @@ out:
static int sd_select_driver_type(struct mmc_card *card, u8 *status)
{
- int host_drv_type = 0, card_drv_type = 0;
+ int host_drv_type = SD_DRIVER_TYPE_B;
+ int card_drv_type = SD_DRIVER_TYPE_B;
+ int drive_strength;
int err;
/*
* If the host doesn't support any of the Driver Types A,C or D,
- * default Driver Type B is used.
+ * or there is no board specific handler then default Driver
+ * Type B is used.
*/
if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C
| MMC_CAP_DRIVER_TYPE_D)))
return 0;
- if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) {
- host_drv_type = MMC_SET_DRIVER_TYPE_A;
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
- card_drv_type = MMC_SET_DRIVER_TYPE_A;
- else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
- card_drv_type = MMC_SET_DRIVER_TYPE_B;
- else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
- card_drv_type = MMC_SET_DRIVER_TYPE_C;
- } else if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) {
- host_drv_type = MMC_SET_DRIVER_TYPE_C;
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
- card_drv_type = MMC_SET_DRIVER_TYPE_C;
- } else if (!(card->host->caps & MMC_CAP_DRIVER_TYPE_D)) {
- /*
- * If we are here, that means only the default driver type
- * B is supported by the host.
- */
- host_drv_type = MMC_SET_DRIVER_TYPE_B;
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
- card_drv_type = MMC_SET_DRIVER_TYPE_B;
- else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
- card_drv_type = MMC_SET_DRIVER_TYPE_C;
- }
+ if (!card->host->ops->select_drive_strength)
+ return 0;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
+ host_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
+ host_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
+ host_drv_type |= SD_DRIVER_TYPE_D;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
+ card_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
+ card_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
+ card_drv_type |= SD_DRIVER_TYPE_D;
+
+ /*
+ * The drive strength that the hardware can support
+ * depends on the board design. Pass the appropriate
+ * information and let the hardware specific code
+ * return what is possible given the options
+ */
+ drive_strength = card->host->ops->select_drive_strength(
+ card->sw_caps.uhs_max_dtr,
+ host_drv_type, card_drv_type);
- err = mmc_sd_switch(card, 1, 2, card_drv_type, status);
+ err = mmc_sd_switch(card, 1, 2, drive_strength, status);
if (err)
return err;
- if ((status[15] & 0xF) != card_drv_type) {
- printk(KERN_WARNING "%s: Problem setting driver strength!\n",
+ if ((status[15] & 0xF) != drive_strength) {
+ printk(KERN_WARNING "%s: Problem setting drive strength!\n",
mmc_hostname(card->host));
return 0;
}
- mmc_set_driver_type(card->host, host_drv_type);
+ mmc_set_driver_type(card->host, drive_strength);
return 0;
}
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 4d0c15bfa51..262fff01917 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -691,15 +691,54 @@ static int mmc_sdio_resume(struct mmc_host *host)
static int mmc_sdio_power_restore(struct mmc_host *host)
{
int ret;
+ u32 ocr;
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
+
+ /*
+ * Reset the card by performing the same steps that are taken by
+ * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
+ *
+ * sdio_reset() is technically not needed. Having just powered up the
+ * hardware, it should already be in reset state. However, some
+ * platforms (such as SD8686 on OLPC) do not instantly cut power,
+ * meaning that a reset is required when restoring power soon after
+ * powering off. It is harmless in other cases.
+ *
+ * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
+ * is not necessary for non-removable cards. However, it is required
+ * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
+ * harmless in other situations.
+ *
+ * With these steps taken, mmc_select_voltage() is also required to
+ * restore the correct voltage setting of the card.
+ */
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, host->ocr_avail);
+
+ ret = mmc_send_io_op_cond(host, 0, &ocr);
+ if (ret)
+ goto out;
+
+ if (host->ocr_avail_sdio)
+ host->ocr_avail = host->ocr_avail_sdio;
+
+ host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
+ if (!host->ocr) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
if (!ret && host->sdio_irqs)
mmc_signal_sdio_irq(host);
+
+out:
mmc_release_host(host);
return ret;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d29b9c36919..e4e6822d09e 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -167,11 +167,8 @@ static int sdio_bus_remove(struct device *dev)
int ret = 0;
/* Make sure card is powered before invoking ->remove() */
- if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- goto out;
- }
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_get_sync(dev);
drv->remove(func);
@@ -189,9 +186,8 @@ static int sdio_bus_remove(struct device *dev)
/* Then undo the runtime PM settings in sdio_bus_probe() */
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
- pm_runtime_put_noidle(dev);
+ pm_runtime_put_sync(dev);
-out:
return ret;
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 56dbf3f6ad0..8c87096531e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -81,28 +81,32 @@ config MMC_RICOH_MMC
If unsure, say Y.
-config MMC_SDHCI_OF
- tristate "SDHCI support on OpenFirmware platforms"
- depends on MMC_SDHCI && OF
+config MMC_SDHCI_PLTFM
+ tristate "SDHCI platform and OF driver helper"
+ depends on MMC_SDHCI
help
- This selects the OF support for Secure Digital Host Controller
- Interfaces.
+ This selects the common helper functions support for Secure Digital
+ Host Controller Interface based platform and OF drivers.
+
+ If you have a controller with this interface, say Y or M here.
If unsure, say N.
config MMC_SDHCI_OF_ESDHC
- bool "SDHCI OF support for the Freescale eSDHC controller"
- depends on MMC_SDHCI_OF
+ tristate "SDHCI OF support for the Freescale eSDHC controller"
+ depends on MMC_SDHCI_PLTFM
depends on PPC_OF
select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
help
This selects the Freescale eSDHC controller support.
+ If you have a controller with this interface, say Y or M here.
+
If unsure, say N.
config MMC_SDHCI_OF_HLWD
- bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
- depends on MMC_SDHCI_OF
+ tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers"
+ depends on MMC_SDHCI_PLTFM
depends on PPC_OF
select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
help
@@ -110,40 +114,36 @@ config MMC_SDHCI_OF_HLWD
found in the "Hollywood" chipset of the Nintendo Wii video game
console.
- If unsure, say N.
-
-config MMC_SDHCI_PLTFM
- tristate "SDHCI support on the platform specific bus"
- depends on MMC_SDHCI
- help
- This selects the platform specific bus support for Secure Digital Host
- Controller Interface.
-
If you have a controller with this interface, say Y or M here.
If unsure, say N.
config MMC_SDHCI_CNS3XXX
- bool "SDHCI support on the Cavium Networks CNS3xxx SoC"
+ tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
depends on ARCH_CNS3XXX
depends on MMC_SDHCI_PLTFM
help
This selects the SDHCI support for CNS3xxx System-on-Chip devices.
+ If you have a controller with this interface, say Y or M here.
+
If unsure, say N.
config MMC_SDHCI_ESDHC_IMX
- bool "SDHCI platform support for the Freescale eSDHC i.MX controller"
- depends on MMC_SDHCI_PLTFM && (ARCH_MX25 || ARCH_MX35 || ARCH_MX5)
+ tristate "SDHCI platform support for the Freescale eSDHC i.MX controller"
+ depends on ARCH_MX25 || ARCH_MX35 || ARCH_MX5
+ depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
help
This selects the Freescale eSDHC controller support on the platform
bus, found on platforms like mx35/51.
+ If you have a controller with this interface, say Y or M here.
+
If unsure, say N.
config MMC_SDHCI_DOVE
- bool "SDHCI support on Marvell's Dove SoC"
+ tristate "SDHCI support on Marvell's Dove SoC"
depends on ARCH_DOVE
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
@@ -151,11 +151,14 @@ config MMC_SDHCI_DOVE
This selects the Secure Digital Host Controller Interface in
Marvell's Dove SoC.
+ If you have a controller with this interface, say Y or M here.
+
If unsure, say N.
config MMC_SDHCI_TEGRA
- bool "SDHCI platform support for the Tegra SD/MMC Controller"
- depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
+ tristate "SDHCI platform support for the Tegra SD/MMC Controller"
+ depends on ARCH_TEGRA
+ depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
help
This selects the Tegra SD/MMC controller. If you have a Tegra
@@ -178,14 +181,28 @@ config MMC_SDHCI_S3C
If unsure, say N.
-config MMC_SDHCI_PXA
- tristate "Marvell PXA168/PXA910/MMP2 SD Host Controller support"
- depends on ARCH_PXA || ARCH_MMP
+config MMC_SDHCI_PXAV3
+ tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
+ depends on CLKDEV_LOOKUP
select MMC_SDHCI
- select MMC_SDHCI_IO_ACCESSORS
+ select MMC_SDHCI_PLTFM
+ default CPU_MMP2
+ help
+ This selects the Marvell(R) PXAV3 SD Host Controller.
+ If you have a MMP2 platform with SD Host Controller
+ and a card slot, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_PXAV2
+ tristate "Marvell PXA9XX SD Host Controller support (PXAV2)"
+ depends on CLKDEV_LOOKUP
+ select MMC_SDHCI
+ select MMC_SDHCI_PLTFM
+ default CPU_PXA910
help
- This selects the Marvell(R) PXA168/PXA910/MMP2 SD Host Controller.
- If you have a PXA168/PXA910/MMP2 platform with SD Host Controller
+ This selects the Marvell(R) PXAV2 SD Host Controller.
+ If you have a PXA9XX platform with SD Host Controller
and a card slot, say Y or M here.
If unsure, say N.
@@ -281,13 +298,12 @@ config MMC_ATMELMCI
endchoice
config MMC_ATMELMCI_DMA
- bool "Atmel MCI DMA support (EXPERIMENTAL)"
- depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE && EXPERIMENTAL
+ bool "Atmel MCI DMA support"
+ depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE
help
Say Y here to have the Atmel MCI driver use a DMA engine to
do data transfers and thus increase the throughput and
- reduce the CPU utilization. Note that this is highly
- experimental and may cause the driver to lock up.
+ reduce the CPU utilization.
If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 58a5cf73d6e..b4b83f302e3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -9,7 +9,8 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
-obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
+obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
+obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -31,9 +32,7 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
tmio_mmc_core-y := tmio_mmc_pio.o
-ifneq ($(CONFIG_MMC_SDHI),n)
-tmio_mmc_core-y += tmio_mmc_dma.o
-endif
+tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_SDHI)) += tmio_mmc_dma.o
obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
@@ -44,17 +43,13 @@ obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
-obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
-sdhci-platform-y := sdhci-pltfm.o
-sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
-sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
-sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
-sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
-
-obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
-sdhci-of-y := sdhci-of-core.o
-sdhci-of-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
-sdhci-of-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
+obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
+obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
+obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
+obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
+obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
+obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
+obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index d3e6a962f42..a4aa3af86fe 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -77,7 +77,8 @@
#include <mach/board.h>
#include <mach/cpu.h>
-#include <mach/at91_mci.h>
+
+#include "at91_mci.h"
#define DRIVER_NAME "at91_mci"
diff --git a/drivers/mmc/host/at91_mci.h b/drivers/mmc/host/at91_mci.h
new file mode 100644
index 00000000000..eec3a6b1c2b
--- /dev/null
+++ b/drivers/mmc/host/at91_mci.h
@@ -0,0 +1,115 @@
+/*
+ * drivers/mmc/host/at91_mci.h
+ *
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * MultiMedia Card Interface (MCI) registers.
+ * Based on AT91RM9200 datasheet revision F.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91_MCI_H
+#define AT91_MCI_H
+
+#define AT91_MCI_CR 0x00 /* Control Register */
+#define AT91_MCI_MCIEN (1 << 0) /* Multi-Media Interface Enable */
+#define AT91_MCI_MCIDIS (1 << 1) /* Multi-Media Interface Disable */
+#define AT91_MCI_PWSEN (1 << 2) /* Power Save Mode Enable */
+#define AT91_MCI_PWSDIS (1 << 3) /* Power Save Mode Disable */
+#define AT91_MCI_SWRST (1 << 7) /* Software Reset */
+
+#define AT91_MCI_MR 0x04 /* Mode Register */
+#define AT91_MCI_CLKDIV (0xff << 0) /* Clock Divider */
+#define AT91_MCI_PWSDIV (7 << 8) /* Power Saving Divider */
+#define AT91_MCI_RDPROOF (1 << 11) /* Read Proof Enable [SAM926[03] only] */
+#define AT91_MCI_WRPROOF (1 << 12) /* Write Proof Enable [SAM926[03] only] */
+#define AT91_MCI_PDCFBYTE (1 << 13) /* PDC Force Byte Transfer [SAM926[03] only] */
+#define AT91_MCI_PDCPADV (1 << 14) /* PDC Padding Value */
+#define AT91_MCI_PDCMODE (1 << 15) /* PDC-orientated Mode */
+#define AT91_MCI_BLKLEN (0xfff << 18) /* Data Block Length */
+
+#define AT91_MCI_DTOR 0x08 /* Data Timeout Register */
+#define AT91_MCI_DTOCYC (0xf << 0) /* Data Timeout Cycle Number */
+#define AT91_MCI_DTOMUL (7 << 4) /* Data Timeout Multiplier */
+#define AT91_MCI_DTOMUL_1 (0 << 4)
+#define AT91_MCI_DTOMUL_16 (1 << 4)
+#define AT91_MCI_DTOMUL_128 (2 << 4)
+#define AT91_MCI_DTOMUL_256 (3 << 4)
+#define AT91_MCI_DTOMUL_1K (4 << 4)
+#define AT91_MCI_DTOMUL_4K (5 << 4)
+#define AT91_MCI_DTOMUL_64K (6 << 4)
+#define AT91_MCI_DTOMUL_1M (7 << 4)
+
+#define AT91_MCI_SDCR 0x0c /* SD Card Register */
+#define AT91_MCI_SDCSEL (3 << 0) /* SD Card Selector */
+#define AT91_MCI_SDCBUS (1 << 7) /* 1-bit or 4-bit bus */
+
+#define AT91_MCI_ARGR 0x10 /* Argument Register */
+
+#define AT91_MCI_CMDR 0x14 /* Command Register */
+#define AT91_MCI_CMDNB (0x3f << 0) /* Command Number */
+#define AT91_MCI_RSPTYP (3 << 6) /* Response Type */
+#define AT91_MCI_RSPTYP_NONE (0 << 6)
+#define AT91_MCI_RSPTYP_48 (1 << 6)
+#define AT91_MCI_RSPTYP_136 (2 << 6)
+#define AT91_MCI_SPCMD (7 << 8) /* Special Command */
+#define AT91_MCI_SPCMD_NONE (0 << 8)
+#define AT91_MCI_SPCMD_INIT (1 << 8)
+#define AT91_MCI_SPCMD_SYNC (2 << 8)
+#define AT91_MCI_SPCMD_ICMD (4 << 8)
+#define AT91_MCI_SPCMD_IRESP (5 << 8)
+#define AT91_MCI_OPDCMD (1 << 11) /* Open Drain Command */
+#define AT91_MCI_MAXLAT (1 << 12) /* Max Latency for Command to Response */
+#define AT91_MCI_TRCMD (3 << 16) /* Transfer Command */
+#define AT91_MCI_TRCMD_NONE (0 << 16)
+#define AT91_MCI_TRCMD_START (1 << 16)
+#define AT91_MCI_TRCMD_STOP (2 << 16)
+#define AT91_MCI_TRDIR (1 << 18) /* Transfer Direction */
+#define AT91_MCI_TRTYP (3 << 19) /* Transfer Type */
+#define AT91_MCI_TRTYP_BLOCK (0 << 19)
+#define AT91_MCI_TRTYP_MULTIPLE (1 << 19)
+#define AT91_MCI_TRTYP_STREAM (2 << 19)
+#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19)
+#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19)
+
+#define AT91_MCI_BLKR 0x18 /* Block Register */
+#define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */
+#define AT91_MCI_BLKR_BLKLEN(n) ((0xffff & (n)) << 16) /* Block length */
+
+#define AT91_MCI_RSPR(n) (0x20 + ((n) * 4)) /* Response Registers 0-3 */
+#define AT91_MCR_RDR 0x30 /* Receive Data Register */
+#define AT91_MCR_TDR 0x34 /* Transmit Data Register */
+
+#define AT91_MCI_SR 0x40 /* Status Register */
+#define AT91_MCI_CMDRDY (1 << 0) /* Command Ready */
+#define AT91_MCI_RXRDY (1 << 1) /* Receiver Ready */
+#define AT91_MCI_TXRDY (1 << 2) /* Transmit Ready */
+#define AT91_MCI_BLKE (1 << 3) /* Data Block Ended */
+#define AT91_MCI_DTIP (1 << 4) /* Data Transfer in Progress */
+#define AT91_MCI_NOTBUSY (1 << 5) /* Data Not Busy */
+#define AT91_MCI_ENDRX (1 << 6) /* End of RX Buffer */
+#define AT91_MCI_ENDTX (1 << 7) /* End fo TX Buffer */
+#define AT91_MCI_SDIOIRQA (1 << 8) /* SDIO Interrupt for Slot A */
+#define AT91_MCI_SDIOIRQB (1 << 9) /* SDIO Interrupt for Slot B */
+#define AT91_MCI_RXBUFF (1 << 14) /* RX Buffer Full */
+#define AT91_MCI_TXBUFE (1 << 15) /* TX Buffer Empty */
+#define AT91_MCI_RINDE (1 << 16) /* Response Index Error */
+#define AT91_MCI_RDIRE (1 << 17) /* Response Direction Error */
+#define AT91_MCI_RCRCE (1 << 18) /* Response CRC Error */
+#define AT91_MCI_RENDE (1 << 19) /* Response End Bit Error */
+#define AT91_MCI_RTOE (1 << 20) /* Response Time-out Error */
+#define AT91_MCI_DCRCE (1 << 21) /* Data CRC Error */
+#define AT91_MCI_DTOE (1 << 22) /* Data Time-out Error */
+#define AT91_MCI_OVRE (1 << 30) /* Overrun */
+#define AT91_MCI_UNRE (1 << 31) /* Underrun */
+
+#define AT91_MCI_IER 0x44 /* Interrupt Enable Register */
+#define AT91_MCI_IDR 0x48 /* Interrupt Disable Register */
+#define AT91_MCI_IMR 0x4c /* Interrupt Mask Register */
+
+#endif
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index aa8039f473c..fa8cae1d700 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -203,6 +203,7 @@ struct atmel_mci_slot {
#define ATMCI_CARD_PRESENT 0
#define ATMCI_CARD_NEED_INIT 1
#define ATMCI_SHUTDOWN 2
+#define ATMCI_SUSPENDED 3
int detect_pin;
int wp_pin;
@@ -1878,10 +1879,72 @@ static int __exit atmci_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int atmci_suspend(struct device *dev)
+{
+ struct atmel_mci *host = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ struct atmel_mci_slot *slot = host->slot[i];
+ int ret;
+
+ if (!slot)
+ continue;
+ ret = mmc_suspend_host(slot->mmc);
+ if (ret < 0) {
+ while (--i >= 0) {
+ slot = host->slot[i];
+ if (slot
+ && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
+ mmc_resume_host(host->slot[i]->mmc);
+ clear_bit(ATMCI_SUSPENDED, &slot->flags);
+ }
+ }
+ return ret;
+ } else {
+ set_bit(ATMCI_SUSPENDED, &slot->flags);
+ }
+ }
+
+ return 0;
+}
+
+static int atmci_resume(struct device *dev)
+{
+ struct atmel_mci *host = dev_get_drvdata(dev);
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ struct atmel_mci_slot *slot = host->slot[i];
+ int err;
+
+ slot = host->slot[i];
+ if (!slot)
+ continue;
+ if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
+ continue;
+ err = mmc_resume_host(slot->mmc);
+ if (err < 0)
+ ret = err;
+ else
+ clear_bit(ATMCI_SUSPENDED, &slot->flags);
+ }
+
+ return ret;
+}
+static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
+#define ATMCI_PM_OPS (&atmci_pm)
+#else
+#define ATMCI_PM_OPS NULL
+#endif
+
static struct platform_driver atmci_driver = {
.remove = __exit_p(atmci_remove),
.driver = {
.name = "atmel_mci",
+ .pm = ATMCI_PM_OPS,
},
};
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 66dcddb9c20..ff0f714b012 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -33,6 +33,7 @@
#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
#include "dw_mmc.h"
@@ -61,7 +62,7 @@ struct idmac_desc {
u32 des1; /* Buffer sizes */
#define IDMAC_SET_BUFFER1_SIZE(d, s) \
- ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
+ ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
u32 des2; /* buffer 1 physical address */
@@ -100,6 +101,8 @@ struct dw_mci_slot {
int last_detect_state;
};
+static struct workqueue_struct *dw_mci_card_workqueue;
+
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
@@ -284,7 +287,7 @@ static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
/* DMA interface functions */
static void dw_mci_stop_dma(struct dw_mci *host)
{
- if (host->use_dma) {
+ if (host->using_dma) {
host->dma_ops->stop(host);
host->dma_ops->cleanup(host);
} else {
@@ -432,6 +435,8 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
unsigned int i, direction, sg_len;
u32 temp;
+ host->using_dma = 0;
+
/* If we don't have a channel, we can't do DMA */
if (!host->use_dma)
return -ENODEV;
@@ -451,6 +456,8 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
return -EINVAL;
}
+ host->using_dma = 1;
+
if (data->flags & MMC_DATA_READ)
direction = DMA_FROM_DEVICE;
else
@@ -489,14 +496,18 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
host->sg = NULL;
host->data = data;
+ if (data->flags & MMC_DATA_READ)
+ host->dir_status = DW_MCI_RECV_STATUS;
+ else
+ host->dir_status = DW_MCI_SEND_STATUS;
+
if (dw_mci_submit_data_dma(host, data)) {
host->sg = data->sg;
host->pio_offset = 0;
- if (data->flags & MMC_DATA_READ)
- host->dir_status = DW_MCI_RECV_STATUS;
- else
- host->dir_status = DW_MCI_SEND_STATUS;
+ host->part_buf_start = 0;
+ host->part_buf_count = 0;
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
temp = mci_readl(host, INTMASK);
temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
mci_writel(host, INTMASK, temp);
@@ -574,7 +585,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
}
/* Set the current slot bus width */
- mci_writel(host, CTYPE, slot->ctype);
+ mci_writel(host, CTYPE, (slot->ctype << slot->id));
}
static void dw_mci_start_request(struct dw_mci *host,
@@ -624,13 +635,13 @@ static void dw_mci_start_request(struct dw_mci *host,
host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
}
+/* must be called with host->lock held */
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
struct mmc_request *mrq)
{
dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
host->state);
- spin_lock_bh(&host->lock);
slot->mrq = mrq;
if (host->state == STATE_IDLE) {
@@ -639,8 +650,6 @@ static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
} else {
list_add_tail(&slot->queue_node, &host->queue);
}
-
- spin_unlock_bh(&host->lock);
}
static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -650,14 +659,23 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(slot->mrq);
+ /*
+ * The check for card presence and queueing of the request must be
+ * atomic, otherwise the card could be removed in between and the
+ * request wouldn't fail until another card was inserted.
+ */
+ spin_lock_bh(&host->lock);
+
if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
+ spin_unlock_bh(&host->lock);
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
}
- /* We don't support multiple blocks of weird lengths. */
dw_mci_queue_request(host, slot, mrq);
+
+ spin_unlock_bh(&host->lock);
}
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -681,7 +699,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* DDR mode set */
- if (ios->ddr) {
+ if (ios->timing == MMC_TIMING_UHS_DDR50) {
regs = mci_readl(slot->host, UHS_REG);
regs |= (0x1 << slot->id) << 16;
mci_writel(slot->host, UHS_REG, regs);
@@ -831,7 +849,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
struct mmc_command *cmd;
enum dw_mci_state state;
enum dw_mci_state prev_state;
- u32 status;
+ u32 status, ctrl;
spin_lock(&host->lock);
@@ -891,13 +909,19 @@ static void dw_mci_tasklet_func(unsigned long priv)
if (status & DW_MCI_DATA_ERROR_FLAGS) {
if (status & SDMMC_INT_DTO) {
- dev_err(&host->pdev->dev,
- "data timeout error\n");
data->error = -ETIMEDOUT;
} else if (status & SDMMC_INT_DCRC) {
- dev_err(&host->pdev->dev,
- "data CRC error\n");
data->error = -EILSEQ;
+ } else if (status & SDMMC_INT_EBE &&
+ host->dir_status ==
+ DW_MCI_SEND_STATUS) {
+ /*
+ * No data CRC status was returned.
+ * The number of bytes transferred will
+ * be exaggerated in PIO mode.
+ */
+ data->bytes_xfered = 0;
+ data->error = -ETIMEDOUT;
} else {
dev_err(&host->pdev->dev,
"data FIFO error "
@@ -905,6 +929,16 @@ static void dw_mci_tasklet_func(unsigned long priv)
status);
data->error = -EIO;
}
+ /*
+ * After an error, there may be data lingering
+ * in the FIFO, so reset it - doing so
+ * generates a block interrupt, hence setting
+ * the scatter-gather pointer to NULL.
+ */
+ host->sg = NULL;
+ ctrl = mci_readl(host, CTRL);
+ ctrl |= SDMMC_CTRL_FIFO_RESET;
+ mci_writel(host, CTRL, ctrl);
} else {
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
@@ -946,84 +980,278 @@ unlock:
}
-static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
+/* push final bytes to part_buf, only use during push */
+static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
- u16 *pdata = (u16 *)buf;
+ memcpy((void *)&host->part_buf, buf, cnt);
+ host->part_buf_count = cnt;
+}
- WARN_ON(cnt % 2 != 0);
+/* append bytes to part_buf, only use during push */
+static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
+{
+ cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
+ memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
+ host->part_buf_count += cnt;
+ return cnt;
+}
- cnt = cnt >> 1;
- while (cnt > 0) {
- mci_writew(host, DATA, *pdata++);
- cnt--;
+/* pull first bytes from part_buf, only use during pull */
+static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
+{
+ cnt = min(cnt, (int)host->part_buf_count);
+ if (cnt) {
+ memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
+ cnt);
+ host->part_buf_count -= cnt;
+ host->part_buf_start += cnt;
}
+ return cnt;
}
-static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
+/* pull final bytes from the part_buf, assuming it's just been filled */
+static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
{
- u16 *pdata = (u16 *)buf;
+ memcpy(buf, &host->part_buf, cnt);
+ host->part_buf_start = cnt;
+ host->part_buf_count = (1 << host->data_shift) - cnt;
+}
- WARN_ON(cnt % 2 != 0);
+static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
+{
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+ buf += len;
+ cnt -= len;
+ if (!sg_next(host->sg) || host->part_buf_count == 2) {
+ mci_writew(host, DATA, host->part_buf16);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x1)) {
+ while (cnt >= 2) {
+ u16 aligned_buf[64];
+ int len = min(cnt & -2, (int)sizeof(aligned_buf));
+ int items = len >> 1;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_writew(host, DATA, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u16 *pdata = buf;
+ for (; cnt >= 2; cnt -= 2)
+ mci_writew(host, DATA, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ if (!sg_next(host->sg))
+ mci_writew(host, DATA, host->part_buf16);
+ }
+}
- cnt = cnt >> 1;
- while (cnt > 0) {
- *pdata++ = mci_readw(host, DATA);
- cnt--;
+static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x1)) {
+ while (cnt >= 2) {
+ /* pull data from fifo into aligned buffer */
+ u16 aligned_buf[64];
+ int len = min(cnt & -2, (int)sizeof(aligned_buf));
+ int items = len >> 1;
+ int i;
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_readw(host, DATA);
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u16 *pdata = buf;
+ for (; cnt >= 2; cnt -= 2)
+ *pdata++ = mci_readw(host, DATA);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf16 = mci_readw(host, DATA);
+ dw_mci_pull_final_bytes(host, buf, cnt);
}
}
static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
{
- u32 *pdata = (u32 *)buf;
-
- WARN_ON(cnt % 4 != 0);
- WARN_ON((unsigned long)pdata & 0x3);
-
- cnt = cnt >> 2;
- while (cnt > 0) {
- mci_writel(host, DATA, *pdata++);
- cnt--;
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+ buf += len;
+ cnt -= len;
+ if (!sg_next(host->sg) || host->part_buf_count == 4) {
+ mci_writel(host, DATA, host->part_buf32);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x3)) {
+ while (cnt >= 4) {
+ u32 aligned_buf[32];
+ int len = min(cnt & -4, (int)sizeof(aligned_buf));
+ int items = len >> 2;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_writel(host, DATA, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u32 *pdata = buf;
+ for (; cnt >= 4; cnt -= 4)
+ mci_writel(host, DATA, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ if (!sg_next(host->sg))
+ mci_writel(host, DATA, host->part_buf32);
}
}
static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
{
- u32 *pdata = (u32 *)buf;
-
- WARN_ON(cnt % 4 != 0);
- WARN_ON((unsigned long)pdata & 0x3);
-
- cnt = cnt >> 2;
- while (cnt > 0) {
- *pdata++ = mci_readl(host, DATA);
- cnt--;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x3)) {
+ while (cnt >= 4) {
+ /* pull data from fifo into aligned buffer */
+ u32 aligned_buf[32];
+ int len = min(cnt & -4, (int)sizeof(aligned_buf));
+ int items = len >> 2;
+ int i;
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_readl(host, DATA);
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u32 *pdata = buf;
+ for (; cnt >= 4; cnt -= 4)
+ *pdata++ = mci_readl(host, DATA);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf32 = mci_readl(host, DATA);
+ dw_mci_pull_final_bytes(host, buf, cnt);
}
}
static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
{
- u64 *pdata = (u64 *)buf;
-
- WARN_ON(cnt % 8 != 0);
-
- cnt = cnt >> 3;
- while (cnt > 0) {
- mci_writeq(host, DATA, *pdata++);
- cnt--;
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+ buf += len;
+ cnt -= len;
+ if (!sg_next(host->sg) || host->part_buf_count == 8) {
+ mci_writew(host, DATA, host->part_buf);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_writeq(host, DATA, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+ for (; cnt >= 8; cnt -= 8)
+ mci_writeq(host, DATA, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ if (!sg_next(host->sg))
+ mci_writeq(host, DATA, host->part_buf);
}
}
static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
{
- u64 *pdata = (u64 *)buf;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ /* pull data from fifo into aligned buffer */
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_readq(host, DATA);
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+ for (; cnt >= 8; cnt -= 8)
+ *pdata++ = mci_readq(host, DATA);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf = mci_readq(host, DATA);
+ dw_mci_pull_final_bytes(host, buf, cnt);
+ }
+}
- WARN_ON(cnt % 8 != 0);
+static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
+{
+ int len;
- cnt = cnt >> 3;
- while (cnt > 0) {
- *pdata++ = mci_readq(host, DATA);
- cnt--;
- }
+ /* get remaining partial bytes */
+ len = dw_mci_pull_part_bytes(host, buf, cnt);
+ if (unlikely(len == cnt))
+ return;
+ buf += len;
+ cnt -= len;
+
+ /* get the rest of the data */
+ host->pull_data(host, buf, cnt);
}
static void dw_mci_read_data_pio(struct dw_mci *host)
@@ -1037,9 +1265,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
unsigned int nbytes = 0, len;
do {
- len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
+ len = host->part_buf_count +
+ (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
if (offset + len <= sg->length) {
- host->pull_data(host, (void *)(buf + offset), len);
+ dw_mci_pull_data(host, (void *)(buf + offset), len);
offset += len;
nbytes += len;
@@ -1055,8 +1284,8 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
}
} else {
unsigned int remaining = sg->length - offset;
- host->pull_data(host, (void *)(buf + offset),
- remaining);
+ dw_mci_pull_data(host, (void *)(buf + offset),
+ remaining);
nbytes += remaining;
flush_dcache_page(sg_page(sg));
@@ -1066,7 +1295,7 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
offset = len - remaining;
buf = sg_virt(sg);
- host->pull_data(host, buf, offset);
+ dw_mci_pull_data(host, buf, offset);
nbytes += offset;
}
@@ -1083,7 +1312,6 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
return;
}
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
- len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
host->pio_offset = offset;
data->bytes_xfered += nbytes;
return;
@@ -1105,8 +1333,9 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
unsigned int nbytes = 0, len;
do {
- len = SDMMC_FIFO_SZ -
- (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
+ len = ((host->fifo_depth -
+ SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift)
+ - host->part_buf_count;
if (offset + len <= sg->length) {
host->push_data(host, (void *)(buf + offset), len);
@@ -1151,10 +1380,8 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
return;
}
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
-
host->pio_offset = offset;
data->bytes_xfered += nbytes;
-
return;
done:
@@ -1202,7 +1429,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
host->cmd_status = status;
smp_wmb();
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
@@ -1211,7 +1437,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
host->data_status = status;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC |
+ SDMMC_INT_SBE | SDMMC_INT_EBE)))
+ tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_DATA_OVER) {
@@ -1229,13 +1457,13 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_RXDR) {
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
- if (host->sg)
+ if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
dw_mci_read_data_pio(host);
}
if (pending & SDMMC_INT_TXDR) {
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
- if (host->sg)
+ if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
dw_mci_write_data_pio(host);
}
@@ -1246,7 +1474,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_CD) {
mci_writel(host, RINTSTS, SDMMC_INT_CD);
- tasklet_schedule(&host->card_tasklet);
+ queue_work(dw_mci_card_workqueue, &host->card_work);
}
} while (pass_count++ < 5);
@@ -1265,9 +1493,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void dw_mci_tasklet_card(unsigned long data)
+static void dw_mci_work_routine_card(struct work_struct *work)
{
- struct dw_mci *host = (struct dw_mci *)data;
+ struct dw_mci *host = container_of(work, struct dw_mci, card_work);
int i;
for (i = 0; i < host->num_slots; i++) {
@@ -1279,22 +1507,21 @@ static void dw_mci_tasklet_card(unsigned long data)
present = dw_mci_get_cd(mmc);
while (present != slot->last_detect_state) {
- spin_lock(&host->lock);
-
dev_dbg(&slot->mmc->class_dev, "card %s\n",
present ? "inserted" : "removed");
+ /* Power up slot (before spin_lock, may sleep) */
+ if (present != 0 && host->pdata->setpower)
+ host->pdata->setpower(slot->id, mmc->ocr_avail);
+
+ spin_lock_bh(&host->lock);
+
/* Card change detected */
slot->last_detect_state = present;
- /* Power up slot */
- if (present != 0) {
- if (host->pdata->setpower)
- host->pdata->setpower(slot->id,
- mmc->ocr_avail);
-
+ /* Mark card as present if applicable */
+ if (present != 0)
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
- }
/* Clean up queue if present */
mrq = slot->mrq;
@@ -1344,8 +1571,6 @@ static void dw_mci_tasklet_card(unsigned long data)
/* Power down slot */
if (present == 0) {
- if (host->pdata->setpower)
- host->pdata->setpower(slot->id, 0);
clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
/*
@@ -1367,7 +1592,12 @@ static void dw_mci_tasklet_card(unsigned long data)
}
- spin_unlock(&host->lock);
+ spin_unlock_bh(&host->lock);
+
+ /* Power down slot (after spin_unlock, may sleep) */
+ if (present == 0 && host->pdata->setpower)
+ host->pdata->setpower(slot->id, 0);
+
present = dw_mci_get_cd(mmc);
}
@@ -1416,7 +1646,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
- mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
#ifdef CONFIG_MMC_DW_IDMAC
mmc->max_segs = host->ring_size;
@@ -1467,7 +1697,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
* Card may have been plugged in prior to boot so we
* need to run the detect tasklet
*/
- tasklet_schedule(&host->card_tasklet);
+ queue_work(dw_mci_card_workqueue, &host->card_work);
return 0;
}
@@ -1595,7 +1825,7 @@ static int dw_mci_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&host->queue);
ret = -ENOMEM;
- host->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ host->regs = ioremap(regs->start, resource_size(regs));
if (!host->regs)
goto err_freehost;
@@ -1645,8 +1875,19 @@ static int dw_mci_probe(struct platform_device *pdev)
* FIFO threshold settings RxMark = fifo_size / 2 - 1,
* Tx Mark = fifo_size / 2 DMA Size = 8
*/
- fifo_size = mci_readl(host, FIFOTH);
- fifo_size = (fifo_size >> 16) & 0x7ff;
+ if (!host->pdata->fifo_depth) {
+ /*
+ * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
+ * have been overwritten by the bootloader, just like we're
+ * about to do, so if you know the value for your hardware, you
+ * should put it in the platform data.
+ */
+ fifo_size = mci_readl(host, FIFOTH);
+ fifo_size = 1 + ((fifo_size >> 16) & 0x7ff);
+ } else {
+ fifo_size = host->pdata->fifo_depth;
+ }
+ host->fifo_depth = fifo_size;
host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
((fifo_size/2) << 0));
mci_writel(host, FIFOTH, host->fifoth_val);
@@ -1656,12 +1897,15 @@ static int dw_mci_probe(struct platform_device *pdev)
mci_writel(host, CLKSRC, 0);
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
- tasklet_init(&host->card_tasklet,
- dw_mci_tasklet_card, (unsigned long)host);
+ dw_mci_card_workqueue = alloc_workqueue("dw-mci-card",
+ WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
+ if (!dw_mci_card_workqueue)
+ goto err_dmaunmap;
+ INIT_WORK(&host->card_work, dw_mci_work_routine_card);
ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
if (ret)
- goto err_dmaunmap;
+ goto err_workqueue;
platform_set_drvdata(pdev, host);
@@ -1690,7 +1934,9 @@ static int dw_mci_probe(struct platform_device *pdev)
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
dev_info(&pdev->dev, "DW MMC controller at irq %d, "
- "%d bit host data width\n", irq, width);
+ "%d bit host data width, "
+ "%u deep fifo\n",
+ irq, width, fifo_size);
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
@@ -1705,6 +1951,9 @@ err_init_slot:
}
free_irq(irq, host);
+err_workqueue:
+ destroy_workqueue(dw_mci_card_workqueue);
+
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
@@ -1744,6 +1993,7 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
mci_writel(host, CLKSRC, 0);
free_irq(platform_get_irq(pdev, 0), host);
+ destroy_workqueue(dw_mci_card_workqueue);
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->use_dma && host->dma_ops->exit)
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 23c662af561..027d3773539 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -118,7 +118,6 @@
#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
/* Status register defines */
#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
-#define SDMMC_FIFO_SZ 32
/* Internal DMAC interrupt defines */
#define SDMMC_IDMAC_INT_AI BIT(9)
#define SDMMC_IDMAC_INT_NI BIT(8)
@@ -134,22 +133,22 @@
/* Register access macros */
#define mci_readl(dev, reg) \
- __raw_readl(dev->regs + SDMMC_##reg)
+ __raw_readl((dev)->regs + SDMMC_##reg)
#define mci_writel(dev, reg, value) \
- __raw_writel((value), dev->regs + SDMMC_##reg)
+ __raw_writel((value), (dev)->regs + SDMMC_##reg)
/* 16-bit FIFO access macros */
#define mci_readw(dev, reg) \
- __raw_readw(dev->regs + SDMMC_##reg)
+ __raw_readw((dev)->regs + SDMMC_##reg)
#define mci_writew(dev, reg, value) \
- __raw_writew((value), dev->regs + SDMMC_##reg)
+ __raw_writew((value), (dev)->regs + SDMMC_##reg)
/* 64-bit FIFO access macros */
#ifdef readq
#define mci_readq(dev, reg) \
- __raw_readq(dev->regs + SDMMC_##reg)
+ __raw_readq((dev)->regs + SDMMC_##reg)
#define mci_writeq(dev, reg, value) \
- __raw_writeq((value), dev->regs + SDMMC_##reg)
+ __raw_writeq((value), (dev)->regs + SDMMC_##reg)
#else
/*
* Dummy readq implementation for architectures that don't define it.
@@ -160,9 +159,9 @@
* rest of the code free from ifdefs.
*/
#define mci_readq(dev, reg) \
- (*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
+ (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg))
#define mci_writeq(dev, reg, value) \
- (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
+ (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg) = (value))
#endif
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 7721de942c6..56e9a416826 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -226,6 +226,9 @@ static void __devinit mmci_dma_setup(struct mmci_host *host)
return;
}
+ /* initialize pre request cookie */
+ host->next_data.cookie = 1;
+
/* Try to acquire a generic DMA engine slave channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -335,7 +338,8 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
dir = DMA_FROM_DEVICE;
}
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+ if (!data->host_cookie)
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
/*
* Use of DMA with scatter-gather is impossible.
@@ -353,7 +357,8 @@ static void mmci_dma_data_error(struct mmci_host *host)
dmaengine_terminate_all(host->dma_current);
}
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+ struct mmci_host_next *next)
{
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
@@ -364,13 +369,20 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
};
- struct mmc_data *data = host->data;
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
int nr_sg;
- host->dma_current = NULL;
+ /* Check if next job is already prepared */
+ if (data->host_cookie && !next &&
+ host->dma_current && host->dma_desc_current)
+ return 0;
+
+ if (!next) {
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
+ }
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_FROM_DEVICE;
@@ -385,7 +397,7 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
return -EINVAL;
/* If less than or equal to the fifo size, don't bother with DMA */
- if (host->size <= variant->fifosize)
+ if (data->blksz * data->blocks <= variant->fifosize)
return -EINVAL;
device = chan->device;
@@ -399,14 +411,38 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
if (!desc)
goto unmap_exit;
- /* Okay, go for it. */
- host->dma_current = chan;
+ if (next) {
+ next->dma_chan = chan;
+ next->dma_desc = desc;
+ } else {
+ host->dma_current = chan;
+ host->dma_desc_current = desc;
+ }
+
+ return 0;
+
+ unmap_exit:
+ if (!next)
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ return -ENOMEM;
+}
+
+static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ int ret;
+ struct mmc_data *data = host->data;
+ ret = mmci_dma_prep_data(host, host->data, NULL);
+ if (ret)
+ return ret;
+
+ /* Okay, go for it. */
dev_vdbg(mmc_dev(host->mmc),
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
- dmaengine_submit(desc);
- dma_async_issue_pending(chan);
+ dmaengine_submit(host->dma_desc_current);
+ dma_async_issue_pending(host->dma_current);
datactrl |= MCI_DPSM_DMAENABLE;
@@ -421,14 +457,90 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
host->base + MMCIMASK0);
return 0;
+}
-unmap_exit:
- dmaengine_terminate_all(chan);
- dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
- return -ENOMEM;
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+{
+ struct mmci_host_next *next = &host->next_data;
+
+ if (data->host_cookie && data->host_cookie != next->cookie) {
+ printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
+ " host->next_data.cookie %d\n",
+ __func__, data->host_cookie, host->next_data.cookie);
+ data->host_cookie = 0;
+ }
+
+ if (!data->host_cookie)
+ return;
+
+ host->dma_desc_current = next->dma_desc;
+ host->dma_current = next->dma_chan;
+
+ next->dma_desc = NULL;
+ next->dma_chan = NULL;
+}
+
+static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ struct mmci_host_next *nd = &host->next_data;
+
+ if (!data)
+ return;
+
+ if (data->host_cookie) {
+ data->host_cookie = 0;
+ return;
+ }
+
+ /* if config for dma */
+ if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
+ ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
+ if (mmci_dma_prep_data(host, data, nd))
+ data->host_cookie = 0;
+ else
+ data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
+ }
+}
+
+static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ struct dma_chan *chan;
+ enum dma_data_direction dir;
+
+ if (!data)
+ return;
+
+ if (data->flags & MMC_DATA_READ) {
+ dir = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ dir = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+
+ /* if config for dma */
+ if (chan) {
+ if (err)
+ dmaengine_terminate_all(chan);
+ if (err || data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, dir);
+ mrq->data->host_cookie = 0;
+ }
}
+
#else
/* Blank functions if the DMA engine is not available */
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+{
+}
static inline void mmci_dma_setup(struct mmci_host *host)
{
}
@@ -449,6 +561,10 @@ static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datac
{
return -ENOSYS;
}
+
+#define mmci_pre_request NULL
+#define mmci_post_request NULL
+
#endif
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
@@ -582,6 +698,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
data->error = -EILSEQ;
} else if (status & MCI_DATATIMEOUT) {
data->error = -ETIMEDOUT;
+ } else if (status & MCI_STARTBITERR) {
+ data->error = -ECOMM;
} else if (status & MCI_TXUNDERRUN) {
data->error = -EIO;
} else if (status & MCI_RXOVERRUN) {
@@ -870,6 +988,9 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
+ if (mrq->data)
+ mmci_get_next_data(host, mrq->data);
+
if (mrq->data && mrq->data->flags & MMC_DATA_READ)
mmci_start_data(host, mrq->data);
@@ -984,6 +1105,8 @@ static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
static const struct mmc_host_ops mmci_ops = {
.request = mmci_request,
+ .pre_req = mmci_pre_request,
+ .post_req = mmci_post_request,
.set_ios = mmci_set_ios,
.get_ro = mmci_get_ro,
.get_cd = mmci_get_cd,
@@ -1061,7 +1184,15 @@ static int __devinit mmci_probe(struct amba_device *dev,
}
mmc->ops = &mmci_ops;
- mmc->f_min = (host->mclk + 511) / 512;
+ /*
+ * The ARM and ST versions of the block have slightly different
+ * clock divider equations which means that the minimum divider
+ * differs too.
+ */
+ if (variant->st_clkdiv)
+ mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
+ else
+ mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
/*
* If the platform data supplies a maximum operating
* frequency, this takes precedence. Else, we fall back
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index bb32e21c09d..79e4143ab9d 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -86,6 +86,7 @@
#define MCI_CMDRESPEND (1 << 6)
#define MCI_CMDSENT (1 << 7)
#define MCI_DATAEND (1 << 8)
+#define MCI_STARTBITERR (1 << 9)
#define MCI_DATABLOCKEND (1 << 10)
#define MCI_CMDACTIVE (1 << 11)
#define MCI_TXACTIVE (1 << 12)
@@ -112,6 +113,7 @@
#define MCI_CMDRESPENDCLR (1 << 6)
#define MCI_CMDSENTCLR (1 << 7)
#define MCI_DATAENDCLR (1 << 8)
+#define MCI_STARTBITERRCLR (1 << 9)
#define MCI_DATABLOCKENDCLR (1 << 10)
/* Extended status bits for the ST Micro variants */
#define MCI_ST_SDIOITC (1 << 22)
@@ -127,6 +129,7 @@
#define MCI_CMDRESPENDMASK (1 << 6)
#define MCI_CMDSENTMASK (1 << 7)
#define MCI_DATAENDMASK (1 << 8)
+#define MCI_STARTBITERRMASK (1 << 9)
#define MCI_DATABLOCKENDMASK (1 << 10)
#define MCI_CMDACTIVEMASK (1 << 11)
#define MCI_TXACTIVEMASK (1 << 12)
@@ -150,7 +153,7 @@
#define MCI_IRQENABLE \
(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
- MCI_CMDRESPENDMASK|MCI_CMDSENTMASK)
+ MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK)
/* These interrupts are directed to IRQ1 when two IRQ lines are available */
#define MCI_IRQ1MASK \
@@ -163,6 +166,12 @@ struct clk;
struct variant_data;
struct dma_chan;
+struct mmci_host_next {
+ struct dma_async_tx_descriptor *dma_desc;
+ struct dma_chan *dma_chan;
+ s32 cookie;
+};
+
struct mmci_host {
phys_addr_t phybase;
void __iomem *base;
@@ -200,6 +209,8 @@ struct mmci_host {
struct dma_chan *dma_current;
struct dma_chan *dma_rx_channel;
struct dma_chan *dma_tx_channel;
+ struct dma_async_tx_descriptor *dma_desc_current;
+ struct mmci_host_next next_data;
#define dma_inprogress(host) ((host)->dma_current)
#else
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index cc20e025932..14aa213b00d 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -715,13 +715,13 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
int burstlen, ret;
/*
- * use burstlen of 64 in 4 bit mode (--> reg value 0)
- * use burstlen of 16 in 1 bit mode (--> reg value 16)
+ * use burstlen of 64 (16 words) in 4 bit mode (--> reg value 0)
+ * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)
*/
if (ios->bus_width == MMC_BUS_WIDTH_4)
- burstlen = 64;
- else
burstlen = 16;
+ else
+ burstlen = 4;
if (mxcmci_use_dma(host) && burstlen != host->burstlen) {
host->burstlen = burstlen;
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 99d39a6a103..d513d47364d 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -564,40 +564,38 @@ static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
{
- unsigned int ssp_rate, bit_rate;
- u32 div1, div2;
+ unsigned int ssp_clk, ssp_sck;
+ u32 clock_divide, clock_rate;
u32 val;
- ssp_rate = clk_get_rate(host->clk);
+ ssp_clk = clk_get_rate(host->clk);
- for (div1 = 2; div1 < 254; div1 += 2) {
- div2 = ssp_rate / rate / div1;
- if (div2 < 0x100)
+ for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) {
+ clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide);
+ clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0;
+ if (clock_rate <= 255)
break;
}
- if (div1 >= 254) {
+ if (clock_divide > 254) {
dev_err(mmc_dev(host->mmc),
"%s: cannot set clock to %d\n", __func__, rate);
return;
}
- if (div2 == 0)
- bit_rate = ssp_rate / div1;
- else
- bit_rate = ssp_rate / div1 / div2;
+ ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
val = readl(host->base + HW_SSP_TIMING);
val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
- val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE);
- val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE);
+ val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
+ val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
writel(val, host->base + HW_SSP_TIMING);
- host->clk_rate = bit_rate;
+ host->clk_rate = ssp_sck;
dev_dbg(mmc_dev(host->mmc),
- "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n",
- __func__, div1, div2, ssp_rate, bit_rate, rate);
+ "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n",
+ __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate);
}
static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index e2aecb7f1d5..ab66f2454dc 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -25,6 +25,11 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
+/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */
+#ifndef NO_IRQ
+#define NO_IRQ 0
+#endif
+
MODULE_LICENSE("GPL");
enum {
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 5b2e2155b41..21e4a799df4 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
@@ -33,6 +34,7 @@
#include <linux/semaphore.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <plat/dma.h>
#include <mach/hardware.h>
#include <plat/board.h>
@@ -116,15 +118,13 @@
#define OMAP_MMC4_DEVID 3
#define OMAP_MMC5_DEVID 4
+#define MMC_AUTOSUSPEND_DELAY 100
#define MMC_TIMEOUT_MS 20
#define OMAP_MMC_MASTER_CLOCK 96000000
+#define OMAP_MMC_MIN_CLOCK 400000
+#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
-/* Timeouts for entering power saving states on inactivity, msec */
-#define OMAP_MMC_DISABLED_TIMEOUT 100
-#define OMAP_MMC_SLEEP_TIMEOUT 1000
-#define OMAP_MMC_OFF_TIMEOUT 8000
-
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -141,6 +141,11 @@
#define OMAP_HSMMC_WRITE(base, reg, val) \
__raw_writel((val), (base) + OMAP_HSMMC_##reg)
+struct omap_hsmmc_next {
+ unsigned int dma_len;
+ s32 cookie;
+};
+
struct omap_hsmmc_host {
struct device *dev;
struct mmc_host *mmc;
@@ -148,7 +153,6 @@ struct omap_hsmmc_host {
struct mmc_command *cmd;
struct mmc_data *data;
struct clk *fclk;
- struct clk *iclk;
struct clk *dbclk;
/*
* vcc == configured supply
@@ -184,6 +188,7 @@ struct omap_hsmmc_host {
int reqs_blocked;
int use_reg;
int req_in_progress;
+ struct omap_hsmmc_next next_data;
struct omap_mmc_platform_data *pdata;
};
@@ -429,7 +434,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
return -EINVAL;
}
}
- mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg);
/* Allow an aux regulator */
reg = regulator_get(host->dev, "vmmc_aux");
@@ -549,6 +553,15 @@ static void omap_hsmmc_gpio_free(struct omap_mmc_platform_data *pdata)
}
/*
+ * Start clock to the card
+ */
+static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host)
+{
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
+}
+
+/*
* Stop clock to the card
*/
static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
@@ -585,6 +598,81 @@ static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
}
+/* Calculate divisor for the given clock frequency */
+static u16 calc_divisor(struct mmc_ios *ios)
+{
+ u16 dsor = 0;
+
+ if (ios->clock) {
+ dsor = DIV_ROUND_UP(OMAP_MMC_MASTER_CLOCK, ios->clock);
+ if (dsor > 250)
+ dsor = 250;
+ }
+
+ return dsor;
+}
+
+static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
+{
+ struct mmc_ios *ios = &host->mmc->ios;
+ unsigned long regval;
+ unsigned long timeout;
+
+ dev_dbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
+
+ omap_hsmmc_stop_clock(host);
+
+ regval = OMAP_HSMMC_READ(host->base, SYSCTL);
+ regval = regval & ~(CLKD_MASK | DTO_MASK);
+ regval = regval | (calc_divisor(ios) << 6) | (DTO << 16);
+ OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
+
+ /* Wait till the ICS bit is set */
+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
+ while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
+ && time_before(jiffies, timeout))
+ cpu_relax();
+
+ omap_hsmmc_start_clock(host);
+}
+
+static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
+{
+ struct mmc_ios *ios = &host->mmc->ios;
+ u32 con;
+
+ con = OMAP_HSMMC_READ(host->base, CON);
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
+ break;
+ case MMC_BUS_WIDTH_4:
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
+ break;
+ case MMC_BUS_WIDTH_1:
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
+ break;
+ }
+}
+
+static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
+{
+ struct mmc_ios *ios = &host->mmc->ios;
+ u32 con;
+
+ con = OMAP_HSMMC_READ(host->base, CON);
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ OMAP_HSMMC_WRITE(host->base, CON, con | OD);
+ else
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
+}
+
#ifdef CONFIG_PM
/*
@@ -596,8 +684,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
struct mmc_ios *ios = &host->mmc->ios;
struct omap_mmc_platform_data *pdata = host->pdata;
int context_loss = 0;
- u32 hctl, capa, con;
- u16 dsor = 0;
+ u32 hctl, capa;
unsigned long timeout;
if (pdata->get_context_loss_count) {
@@ -659,54 +746,12 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
if (host->power_mode == MMC_POWER_OFF)
goto out;
- con = OMAP_HSMMC_READ(host->base, CON);
- switch (ios->bus_width) {
- case MMC_BUS_WIDTH_8:
- OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
- break;
- case MMC_BUS_WIDTH_4:
- OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
- OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
- break;
- case MMC_BUS_WIDTH_1:
- OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
- OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
- break;
- }
-
- if (ios->clock) {
- dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
- if (dsor < 1)
- dsor = 1;
-
- if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
- dsor++;
-
- if (dsor > 250)
- dsor = 250;
- }
-
- OMAP_HSMMC_WRITE(host->base, SYSCTL,
- OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
- OMAP_HSMMC_WRITE(host->base, SYSCTL, (dsor << 6) | (DTO << 16));
- OMAP_HSMMC_WRITE(host->base, SYSCTL,
- OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
+ omap_hsmmc_set_bus_width(host);
- timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
- while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
- && time_before(jiffies, timeout))
- ;
+ omap_hsmmc_set_clock(host);
- OMAP_HSMMC_WRITE(host->base, SYSCTL,
- OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
+ omap_hsmmc_set_bus_mode(host);
- con = OMAP_HSMMC_READ(host->base, CON);
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
- OMAP_HSMMC_WRITE(host->base, CON, con | OD);
- else
- OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
out:
host->context_loss = context_loss;
@@ -962,7 +1007,8 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
spin_unlock(&host->irq_lock);
if (host->use_dma && dma_ch != -1) {
- dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len,
+ dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
+ host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
omap_free_dma(dma_ch);
}
@@ -973,14 +1019,14 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
* Readable error output
*/
#ifdef CONFIG_MMC_DEBUG
-static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status)
+static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
{
/* --- means reserved bit without definition at documentation */
static const char *omap_hsmmc_status_bits[] = {
- "CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ",
- "OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC",
- "CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---",
- "---", "---", "---", "CERR", "CERR", "BADA", "---", "---", "---"
+ "CC" , "TC" , "BGE", "---", "BWR" , "BRR" , "---" , "---" ,
+ "CIRQ", "OBI" , "---", "---", "---" , "---" , "---" , "ERRI",
+ "CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" ,
+ "ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---"
};
char res[256];
char *buf = res;
@@ -997,6 +1043,11 @@ static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status)
dev_dbg(mmc_dev(host->mmc), "%s\n", res);
}
+#else
+static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
+ u32 status)
+{
+}
#endif /* CONFIG_MMC_DEBUG */
/*
@@ -1055,9 +1106,7 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
if (status & ERR) {
-#ifdef CONFIG_MMC_DEBUG
- omap_hsmmc_report_irq(host, status);
-#endif
+ omap_hsmmc_dbg_report_irq(host, status);
if ((status & CMD_TIMEOUT) ||
(status & CMD_CRC)) {
if (host->cmd) {
@@ -1155,8 +1204,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
int ret;
/* Disable the clocks */
- clk_disable(host->fclk);
- clk_disable(host->iclk);
+ pm_runtime_put_sync(host->dev);
if (host->got_dbclk)
clk_disable(host->dbclk);
@@ -1167,8 +1215,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
if (!ret)
ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
vdd);
- clk_enable(host->iclk);
- clk_enable(host->fclk);
+ pm_runtime_get_sync(host->dev);
if (host->got_dbclk)
clk_enable(host->dbclk);
@@ -1322,7 +1369,7 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
{
struct omap_hsmmc_host *host = cb_data;
- struct mmc_data *data = host->mrq->data;
+ struct mmc_data *data;
int dma_ch, req_in_progress;
if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
@@ -1337,6 +1384,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
return;
}
+ data = host->mrq->data;
host->dma_sg_idx++;
if (host->dma_sg_idx < host->dma_len) {
/* Fire up the next transfer. */
@@ -1346,8 +1394,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
return;
}
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
- omap_hsmmc_get_dma_dir(host, data));
+ if (!data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
req_in_progress = host->req_in_progress;
dma_ch = host->dma_ch;
@@ -1365,6 +1414,45 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
}
}
+static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
+ struct mmc_data *data,
+ struct omap_hsmmc_next *next)
+{
+ int dma_len;
+
+ if (!next && data->host_cookie &&
+ data->host_cookie != host->next_data.cookie) {
+ printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
+ " host->next_data.cookie %d\n",
+ __func__, data->host_cookie, host->next_data.cookie);
+ data->host_cookie = 0;
+ }
+
+ /* Check if next job is already prepared */
+ if (next ||
+ (!next && data->host_cookie != host->next_data.cookie)) {
+ dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
+
+ } else {
+ dma_len = host->next_data.dma_len;
+ host->next_data.dma_len = 0;
+ }
+
+
+ if (dma_len == 0)
+ return -EINVAL;
+
+ if (next) {
+ next->dma_len = dma_len;
+ data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
+ } else
+ host->dma_len = dma_len;
+
+ return 0;
+}
+
/*
* Routine to configure and start DMA for the MMC card
*/
@@ -1398,9 +1486,10 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
mmc_hostname(host->mmc), ret);
return ret;
}
+ ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
+ if (ret)
+ return ret;
- host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, omap_hsmmc_get_dma_dir(host, data));
host->dma_ch = dma_ch;
host->dma_sg_idx = 0;
@@ -1480,6 +1569,35 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
return 0;
}
+static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (host->use_dma) {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
+ data->host_cookie = 0;
+ }
+}
+
+static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ if (mrq->data->host_cookie) {
+ mrq->data->host_cookie = 0;
+ return ;
+ }
+
+ if (host->use_dma)
+ if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
+ &host->next_data))
+ mrq->data->host_cookie = 0;
+}
+
/*
* Request function. for read/write operation
*/
@@ -1528,13 +1646,9 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- u16 dsor = 0;
- unsigned long regval;
- unsigned long timeout;
- u32 con;
int do_send_init_stream = 0;
- mmc_host_enable(host->mmc);
+ pm_runtime_get_sync(host->dev);
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
@@ -1557,22 +1671,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* FIXME: set registers based only on changes to ios */
- con = OMAP_HSMMC_READ(host->base, CON);
- switch (mmc->ios.bus_width) {
- case MMC_BUS_WIDTH_8:
- OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
- break;
- case MMC_BUS_WIDTH_4:
- OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
- OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
- break;
- case MMC_BUS_WIDTH_1:
- OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
- OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
- break;
- }
+ omap_hsmmc_set_bus_width(host);
if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
/* Only MMC1 can interface at 3V without some flavor
@@ -1592,47 +1691,14 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
- if (ios->clock) {
- dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
- if (dsor < 1)
- dsor = 1;
-
- if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
- dsor++;
-
- if (dsor > 250)
- dsor = 250;
- }
- omap_hsmmc_stop_clock(host);
- regval = OMAP_HSMMC_READ(host->base, SYSCTL);
- regval = regval & ~(CLKD_MASK);
- regval = regval | (dsor << 6) | (DTO << 16);
- OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
- OMAP_HSMMC_WRITE(host->base, SYSCTL,
- OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
-
- /* Wait till the ICS bit is set */
- timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
- while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
- && time_before(jiffies, timeout))
- msleep(1);
-
- OMAP_HSMMC_WRITE(host->base, SYSCTL,
- OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
+ omap_hsmmc_set_clock(host);
if (do_send_init_stream)
send_init_stream(host);
- con = OMAP_HSMMC_READ(host->base, CON);
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
- OMAP_HSMMC_WRITE(host->base, CON, con | OD);
- else
- OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
+ omap_hsmmc_set_bus_mode(host);
- if (host->power_mode == MMC_POWER_OFF)
- mmc_host_disable(host->mmc);
- else
- mmc_host_lazy_disable(host->mmc);
+ pm_runtime_put_autosuspend(host->dev);
}
static int omap_hsmmc_get_cd(struct mmc_host *mmc)
@@ -1688,230 +1754,12 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
set_sd_bus_power(host);
}
-/*
- * Dynamic power saving handling, FSM:
- * ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF
- * ^___________| | |
- * |______________________|______________________|
- *
- * ENABLED: mmc host is fully functional
- * DISABLED: fclk is off
- * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep
- * REGSLEEP: fclk is off, voltage regulator is asleep
- * OFF: fclk is off, voltage regulator is off
- *
- * Transition handlers return the timeout for the next state transition
- * or negative error.
- */
-
-enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF};
-
-/* Handler for [ENABLED -> DISABLED] transition */
-static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
-{
- omap_hsmmc_context_save(host);
- clk_disable(host->fclk);
- host->dpm_state = DISABLED;
-
- dev_dbg(mmc_dev(host->mmc), "ENABLED -> DISABLED\n");
-
- if (host->power_mode == MMC_POWER_OFF)
- return 0;
-
- return OMAP_MMC_SLEEP_TIMEOUT;
-}
-
-/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */
-static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host)
-{
- int err, new_state;
-
- if (!mmc_try_claim_host(host->mmc))
- return 0;
-
- clk_enable(host->fclk);
- omap_hsmmc_context_restore(host);
- if (mmc_card_can_sleep(host->mmc)) {
- err = mmc_card_sleep(host->mmc);
- if (err < 0) {
- clk_disable(host->fclk);
- mmc_release_host(host->mmc);
- return err;
- }
- new_state = CARDSLEEP;
- } else {
- new_state = REGSLEEP;
- }
- if (mmc_slot(host).set_sleep)
- mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0,
- new_state == CARDSLEEP);
- /* FIXME: turn off bus power and perhaps interrupts too */
- clk_disable(host->fclk);
- host->dpm_state = new_state;
-
- mmc_release_host(host->mmc);
-
- dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n",
- host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
-
- if (mmc_slot(host).no_off)
- return 0;
-
- if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
- mmc_slot(host).card_detect ||
- (mmc_slot(host).get_cover_state &&
- mmc_slot(host).get_cover_state(host->dev, host->slot_id)))
- return OMAP_MMC_OFF_TIMEOUT;
-
- return 0;
-}
-
-/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */
-static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host)
-{
- if (!mmc_try_claim_host(host->mmc))
- return 0;
-
- if (mmc_slot(host).no_off)
- return 0;
-
- if (!((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
- mmc_slot(host).card_detect ||
- (mmc_slot(host).get_cover_state &&
- mmc_slot(host).get_cover_state(host->dev, host->slot_id)))) {
- mmc_release_host(host->mmc);
- return 0;
- }
-
- mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
- host->vdd = 0;
- host->power_mode = MMC_POWER_OFF;
-
- dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n",
- host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
-
- host->dpm_state = OFF;
-
- mmc_release_host(host->mmc);
-
- return 0;
-}
-
-/* Handler for [DISABLED -> ENABLED] transition */
-static int omap_hsmmc_disabled_to_enabled(struct omap_hsmmc_host *host)
-{
- int err;
-
- err = clk_enable(host->fclk);
- if (err < 0)
- return err;
-
- omap_hsmmc_context_restore(host);
- host->dpm_state = ENABLED;
-
- dev_dbg(mmc_dev(host->mmc), "DISABLED -> ENABLED\n");
-
- return 0;
-}
-
-/* Handler for [SLEEP -> ENABLED] transition */
-static int omap_hsmmc_sleep_to_enabled(struct omap_hsmmc_host *host)
-{
- if (!mmc_try_claim_host(host->mmc))
- return 0;
-
- clk_enable(host->fclk);
- omap_hsmmc_context_restore(host);
- if (mmc_slot(host).set_sleep)
- mmc_slot(host).set_sleep(host->dev, host->slot_id, 0,
- host->vdd, host->dpm_state == CARDSLEEP);
- if (mmc_card_can_sleep(host->mmc))
- mmc_card_awake(host->mmc);
-
- dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n",
- host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
-
- host->dpm_state = ENABLED;
-
- mmc_release_host(host->mmc);
-
- return 0;
-}
-
-/* Handler for [OFF -> ENABLED] transition */
-static int omap_hsmmc_off_to_enabled(struct omap_hsmmc_host *host)
-{
- clk_enable(host->fclk);
-
- omap_hsmmc_context_restore(host);
- omap_hsmmc_conf_bus_power(host);
- mmc_power_restore_host(host->mmc);
-
- host->dpm_state = ENABLED;
-
- dev_dbg(mmc_dev(host->mmc), "OFF -> ENABLED\n");
-
- return 0;
-}
-
-/*
- * Bring MMC host to ENABLED from any other PM state.
- */
-static int omap_hsmmc_enable(struct mmc_host *mmc)
-{
- struct omap_hsmmc_host *host = mmc_priv(mmc);
-
- switch (host->dpm_state) {
- case DISABLED:
- return omap_hsmmc_disabled_to_enabled(host);
- case CARDSLEEP:
- case REGSLEEP:
- return omap_hsmmc_sleep_to_enabled(host);
- case OFF:
- return omap_hsmmc_off_to_enabled(host);
- default:
- dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
- return -EINVAL;
- }
-}
-
-/*
- * Bring MMC host in PM state (one level deeper).
- */
-static int omap_hsmmc_disable(struct mmc_host *mmc, int lazy)
-{
- struct omap_hsmmc_host *host = mmc_priv(mmc);
-
- switch (host->dpm_state) {
- case ENABLED: {
- int delay;
-
- delay = omap_hsmmc_enabled_to_disabled(host);
- if (lazy || delay < 0)
- return delay;
- return 0;
- }
- case DISABLED:
- return omap_hsmmc_disabled_to_sleep(host);
- case CARDSLEEP:
- case REGSLEEP:
- return omap_hsmmc_sleep_to_off(host);
- default:
- dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
- return -EINVAL;
- }
-}
-
static int omap_hsmmc_enable_fclk(struct mmc_host *mmc)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- int err;
- err = clk_enable(host->fclk);
- if (err)
- return err;
- dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n");
- omap_hsmmc_context_restore(host);
+ pm_runtime_get_sync(host->dev);
+
return 0;
}
@@ -1919,26 +1767,17 @@ static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- omap_hsmmc_context_save(host);
- clk_disable(host->fclk);
- dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n");
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+
return 0;
}
static const struct mmc_host_ops omap_hsmmc_ops = {
.enable = omap_hsmmc_enable_fclk,
.disable = omap_hsmmc_disable_fclk,
- .request = omap_hsmmc_request,
- .set_ios = omap_hsmmc_set_ios,
- .get_cd = omap_hsmmc_get_cd,
- .get_ro = omap_hsmmc_get_ro,
- .init_card = omap_hsmmc_init_card,
- /* NYET -- enable_sdio_irq */
-};
-
-static const struct mmc_host_ops omap_hsmmc_ps_ops = {
- .enable = omap_hsmmc_enable,
- .disable = omap_hsmmc_disable,
+ .post_req = omap_hsmmc_post_req,
+ .pre_req = omap_hsmmc_pre_req,
.request = omap_hsmmc_request,
.set_ios = omap_hsmmc_set_ios,
.get_cd = omap_hsmmc_get_cd,
@@ -1968,15 +1807,12 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
host->dpm_state, mmc->nesting_cnt,
host->context_loss, context_loss);
- if (host->suspended || host->dpm_state == OFF) {
+ if (host->suspended) {
seq_printf(s, "host suspended, can't read registers\n");
return 0;
}
- if (clk_enable(host->fclk) != 0) {
- seq_printf(s, "can't read the regs\n");
- return 0;
- }
+ pm_runtime_get_sync(host->dev);
seq_printf(s, "SYSCONFIG:\t0x%08x\n",
OMAP_HSMMC_READ(host->base, SYSCONFIG));
@@ -1993,7 +1829,8 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
seq_printf(s, "CAPA:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, CAPA));
- clk_disable(host->fclk);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
return 0;
}
@@ -2077,14 +1914,12 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
host->mapbase = res->start;
host->base = ioremap(host->mapbase, SZ_4K);
host->power_mode = MMC_POWER_OFF;
+ host->next_data.cookie = 1;
platform_set_drvdata(pdev, host);
INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
- if (mmc_slot(host).power_saving)
- mmc->ops = &omap_hsmmc_ps_ops;
- else
- mmc->ops = &omap_hsmmc_ops;
+ mmc->ops = &omap_hsmmc_ops;
/*
* If regulator_disable can only put vcc_aux to sleep then there is
@@ -2093,44 +1928,26 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
if (mmc_slot(host).vcc_aux_disable_is_sleep)
mmc_slot(host).no_off = 1;
- mmc->f_min = 400000;
- mmc->f_max = 52000000;
+ mmc->f_min = OMAP_MMC_MIN_CLOCK;
+ mmc->f_max = OMAP_MMC_MAX_CLOCK;
spin_lock_init(&host->irq_lock);
- host->iclk = clk_get(&pdev->dev, "ick");
- if (IS_ERR(host->iclk)) {
- ret = PTR_ERR(host->iclk);
- host->iclk = NULL;
- goto err1;
- }
host->fclk = clk_get(&pdev->dev, "fck");
if (IS_ERR(host->fclk)) {
ret = PTR_ERR(host->fclk);
host->fclk = NULL;
- clk_put(host->iclk);
goto err1;
}
omap_hsmmc_context_save(host);
mmc->caps |= MMC_CAP_DISABLE;
- mmc_set_disable_delay(mmc, OMAP_MMC_DISABLED_TIMEOUT);
- /* we start off in DISABLED state */
- host->dpm_state = DISABLED;
- if (clk_enable(host->iclk) != 0) {
- clk_put(host->iclk);
- clk_put(host->fclk);
- goto err1;
- }
-
- if (mmc_host_enable(host->mmc) != 0) {
- clk_disable(host->iclk);
- clk_put(host->iclk);
- clk_put(host->fclk);
- goto err1;
- }
+ pm_runtime_enable(host->dev);
+ pm_runtime_get_sync(host->dev);
+ pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(host->dev);
if (cpu_is_omap2430()) {
host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
@@ -2240,8 +2057,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_disable_irq(host);
- mmc_host_lazy_disable(host->mmc);
-
omap_hsmmc_protect_card(host);
mmc_add_host(mmc);
@@ -2259,6 +2074,8 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
}
omap_hsmmc_debugfs(mmc);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
return 0;
@@ -2274,10 +2091,9 @@ err_reg:
err_irq_cd_init:
free_irq(host->irq, host);
err_irq:
- mmc_host_disable(host->mmc);
- clk_disable(host->iclk);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
clk_put(host->fclk);
- clk_put(host->iclk);
if (host->got_dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
@@ -2299,7 +2115,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
struct resource *res;
if (host) {
- mmc_host_enable(host->mmc);
+ pm_runtime_get_sync(host->dev);
mmc_remove_host(host->mmc);
if (host->use_reg)
omap_hsmmc_reg_put(host);
@@ -2310,10 +2126,9 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
free_irq(mmc_slot(host).card_detect_irq, host);
flush_work_sync(&host->mmc_carddetect_work);
- mmc_host_disable(host->mmc);
- clk_disable(host->iclk);
+ pm_runtime_put_sync(host->dev);
+ pm_runtime_disable(host->dev);
clk_put(host->fclk);
- clk_put(host->iclk);
if (host->got_dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
@@ -2343,6 +2158,7 @@ static int omap_hsmmc_suspend(struct device *dev)
return 0;
if (host) {
+ pm_runtime_get_sync(host->dev);
host->suspended = 1;
if (host->pdata->suspend) {
ret = host->pdata->suspend(&pdev->dev,
@@ -2357,13 +2173,11 @@ static int omap_hsmmc_suspend(struct device *dev)
}
cancel_work_sync(&host->mmc_carddetect_work);
ret = mmc_suspend_host(host->mmc);
- mmc_host_enable(host->mmc);
+
if (ret == 0) {
omap_hsmmc_disable_irq(host);
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
- mmc_host_disable(host->mmc);
- clk_disable(host->iclk);
if (host->got_dbclk)
clk_disable(host->dbclk);
} else {
@@ -2375,9 +2189,8 @@ static int omap_hsmmc_suspend(struct device *dev)
dev_dbg(mmc_dev(host->mmc),
"Unmask interrupt failed\n");
}
- mmc_host_disable(host->mmc);
}
-
+ pm_runtime_put_sync(host->dev);
}
return ret;
}
@@ -2393,14 +2206,7 @@ static int omap_hsmmc_resume(struct device *dev)
return 0;
if (host) {
- ret = clk_enable(host->iclk);
- if (ret)
- goto clk_en_err;
-
- if (mmc_host_enable(host->mmc) != 0) {
- clk_disable(host->iclk);
- goto clk_en_err;
- }
+ pm_runtime_get_sync(host->dev);
if (host->got_dbclk)
clk_enable(host->dbclk);
@@ -2421,15 +2227,12 @@ static int omap_hsmmc_resume(struct device *dev)
if (ret == 0)
host->suspended = 0;
- mmc_host_lazy_disable(host->mmc);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
}
return ret;
-clk_en_err:
- dev_dbg(mmc_dev(host->mmc),
- "Failed to enable MMC clocks during resume\n");
- return ret;
}
#else
@@ -2437,9 +2240,33 @@ clk_en_err:
#define omap_hsmmc_resume NULL
#endif
+static int omap_hsmmc_runtime_suspend(struct device *dev)
+{
+ struct omap_hsmmc_host *host;
+
+ host = platform_get_drvdata(to_platform_device(dev));
+ omap_hsmmc_context_save(host);
+ dev_dbg(mmc_dev(host->mmc), "disabled\n");
+
+ return 0;
+}
+
+static int omap_hsmmc_runtime_resume(struct device *dev)
+{
+ struct omap_hsmmc_host *host;
+
+ host = platform_get_drvdata(to_platform_device(dev));
+ omap_hsmmc_context_restore(host);
+ dev_dbg(mmc_dev(host->mmc), "enabled\n");
+
+ return 0;
+}
+
static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
.suspend = omap_hsmmc_suspend,
.resume = omap_hsmmc_resume,
+ .runtime_suspend = omap_hsmmc_runtime_suspend,
+ .runtime_resume = omap_hsmmc_runtime_resume,
};
static struct platform_driver omap_hsmmc_driver = {
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 9ebd1d7759d..4b920b7621c 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,9 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/sdhci-pltfm.h>
#include <mach/cns3xxx.h>
-#include "sdhci.h"
#include "sdhci-pltfm.h"
static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host)
@@ -86,7 +84,7 @@ static struct sdhci_ops sdhci_cns3xxx_ops = {
.set_clock = sdhci_cns3xxx_set_clock,
};
-struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
+static struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
.ops = &sdhci_cns3xxx_ops,
.quirks = SDHCI_QUIRK_BROKEN_DMA |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -95,3 +93,43 @@ struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_NONSTANDARD_CLOCK,
};
+
+static int __devinit sdhci_cns3xxx_probe(struct platform_device *pdev)
+{
+ return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata);
+}
+
+static int __devexit sdhci_cns3xxx_remove(struct platform_device *pdev)
+{
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static struct platform_driver sdhci_cns3xxx_driver = {
+ .driver = {
+ .name = "sdhci-cns3xxx",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_cns3xxx_probe,
+ .remove = __devexit_p(sdhci_cns3xxx_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
+};
+
+static int __init sdhci_cns3xxx_init(void)
+{
+ return platform_driver_register(&sdhci_cns3xxx_driver);
+}
+module_init(sdhci_cns3xxx_init);
+
+static void __exit sdhci_cns3xxx_exit(void)
+{
+ platform_driver_unregister(&sdhci_cns3xxx_driver);
+}
+module_exit(sdhci_cns3xxx_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for CNS3xxx");
+MODULE_AUTHOR("Scott Shu, "
+ "Anton Vorontsov <avorontsov@mvista.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 2aeef4ffed8..f2d29dca442 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -22,7 +22,6 @@
#include <linux/io.h>
#include <linux/mmc/host.h>
-#include "sdhci.h"
#include "sdhci-pltfm.h"
static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
@@ -61,10 +60,50 @@ static struct sdhci_ops sdhci_dove_ops = {
.read_l = sdhci_dove_readl,
};
-struct sdhci_pltfm_data sdhci_dove_pdata = {
+static struct sdhci_pltfm_data sdhci_dove_pdata = {
.ops = &sdhci_dove_ops,
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_FORCE_DMA,
};
+
+static int __devinit sdhci_dove_probe(struct platform_device *pdev)
+{
+ return sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
+}
+
+static int __devexit sdhci_dove_remove(struct platform_device *pdev)
+{
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static struct platform_driver sdhci_dove_driver = {
+ .driver = {
+ .name = "sdhci-dove",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_dove_probe,
+ .remove = __devexit_p(sdhci_dove_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
+};
+
+static int __init sdhci_dove_init(void)
+{
+ return platform_driver_register(&sdhci_dove_driver);
+}
+module_init(sdhci_dove_init);
+
+static void __exit sdhci_dove_exit(void)
+{
+ platform_driver_unregister(&sdhci_dove_driver);
+}
+module_exit(sdhci_dove_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for Dove");
+MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, "
+ "Mike Rapoport <mike@compulab.co.il>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index a19967d0bfc..0e9780f5a4a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -18,20 +18,20 @@
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/sdhci-pltfm.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
-#include <mach/hardware.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <mach/esdhc.h>
-#include "sdhci.h"
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
+#define SDHCI_CTRL_D3CD 0x08
/* VENDOR SPEC register */
#define SDHCI_VENDOR_SPEC 0xC0
#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
-#define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0)
/*
* The CMDTYPE of the CMD register (offset 0xE) should be set to
* "11" when the STOP CMD12 is issued on imx53 to abort one
@@ -45,11 +45,68 @@
*/
#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1)
+enum imx_esdhc_type {
+ IMX25_ESDHC,
+ IMX35_ESDHC,
+ IMX51_ESDHC,
+ IMX53_ESDHC,
+};
+
struct pltfm_imx_data {
int flags;
u32 scratchpad;
+ enum imx_esdhc_type devtype;
+ struct esdhc_platform_data boarddata;
};
+static struct platform_device_id imx_esdhc_devtype[] = {
+ {
+ .name = "sdhci-esdhc-imx25",
+ .driver_data = IMX25_ESDHC,
+ }, {
+ .name = "sdhci-esdhc-imx35",
+ .driver_data = IMX35_ESDHC,
+ }, {
+ .name = "sdhci-esdhc-imx51",
+ .driver_data = IMX51_ESDHC,
+ }, {
+ .name = "sdhci-esdhc-imx53",
+ .driver_data = IMX53_ESDHC,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype);
+
+static const struct of_device_id imx_esdhc_dt_ids[] = {
+ { .compatible = "fsl,imx25-esdhc", .data = &imx_esdhc_devtype[IMX25_ESDHC], },
+ { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], },
+ { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], },
+ { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
+
+static inline int is_imx25_esdhc(struct pltfm_imx_data *data)
+{
+ return data->devtype == IMX25_ESDHC;
+}
+
+static inline int is_imx35_esdhc(struct pltfm_imx_data *data)
+{
+ return data->devtype == IMX35_ESDHC;
+}
+
+static inline int is_imx51_esdhc(struct pltfm_imx_data *data)
+{
+ return data->devtype == IMX51_ESDHC;
+}
+
+static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
+{
+ return data->devtype == IMX53_ESDHC;
+}
+
static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
{
void __iomem *base = host->ioaddr + (reg & ~0x3);
@@ -62,19 +119,16 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
- /* fake CARD_PRESENT flag on mx25/35 */
+ /* fake CARD_PRESENT flag */
u32 val = readl(host->ioaddr + reg);
if (unlikely((reg == SDHCI_PRESENT_STATE)
- && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) {
- struct esdhc_platform_data *boarddata =
- host->mmc->parent->platform_data;
-
- if (boarddata && gpio_is_valid(boarddata->cd_gpio)
- && gpio_get_value(boarddata->cd_gpio))
+ && gpio_is_valid(boarddata->cd_gpio))) {
+ if (gpio_get_value(boarddata->cd_gpio))
/* no card, if a valid gpio says so... */
- val &= SDHCI_CARD_PRESENT;
+ val &= ~SDHCI_CARD_PRESENT;
else
/* ... in all other cases assume card is present */
val |= SDHCI_CARD_PRESENT;
@@ -87,14 +141,33 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
-
- if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)
- && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP)))
- /*
- * these interrupts won't work with a custom card_detect gpio
- * (only applied to mx25/35)
- */
- val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ u32 data;
+
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
+ if (boarddata->cd_type == ESDHC_CD_GPIO)
+ /*
+ * These interrupts won't work with a custom
+ * card_detect gpio (only applied to mx25/35)
+ */
+ val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
+
+ if (val & SDHCI_INT_CARD_INT) {
+ /*
+ * Clear and then set D3CD bit to avoid missing the
+ * card interrupt. This is a eSDHC controller problem
+ * so we need to apply the following workaround: clear
+ * and set D3CD bit will make eSDHC re-sample the card
+ * interrupt. In case a card interrupt was lost,
+ * re-sample it by the following steps.
+ */
+ data = readl(host->ioaddr + SDHCI_HOST_CONTROL);
+ data &= ~SDHCI_CTRL_D3CD;
+ writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
+ data |= SDHCI_CTRL_D3CD;
+ writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
+ }
+ }
if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
&& (reg == SDHCI_INT_STATUS)
@@ -164,8 +237,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
*/
return;
case SDHCI_HOST_CONTROL:
- /* FSL messed up here, so we can just keep those two */
- new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS);
+ /* FSL messed up here, so we can just keep those three */
+ new_val = val & (SDHCI_CTRL_LED | \
+ SDHCI_CTRL_4BITBUS | \
+ SDHCI_CTRL_D3CD);
/* ensure the endianess */
new_val |= ESDHC_HOST_CONTROL_LE;
/* DMA mode bits are shifted */
@@ -175,6 +250,17 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
return;
}
esdhc_clrset_le(host, 0xff, val, reg);
+
+ /*
+ * The esdhc has a design violation to SDHC spec which tells
+ * that software reset should not affect card detection circuit.
+ * But esdhc clears its SYSCTL register bits [0..2] during the
+ * software reset. This will stop those clocks that card detection
+ * circuit relies on. To work around it, we turn the clocks on back
+ * to keep card detection circuit functional.
+ */
+ if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1))
+ esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
}
static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
@@ -193,12 +279,22 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
{
- struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+
+ switch (boarddata->wp_type) {
+ case ESDHC_WP_GPIO:
+ if (gpio_is_valid(boarddata->wp_gpio))
+ return gpio_get_value(boarddata->wp_gpio);
+ case ESDHC_WP_CONTROLLER:
+ return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
+ SDHCI_WRITE_PROTECT);
+ case ESDHC_WP_NONE:
+ break;
+ }
- if (boarddata && gpio_is_valid(boarddata->wp_gpio))
- return gpio_get_value(boarddata->wp_gpio);
- else
- return -ENOSYS;
+ return -ENOSYS;
}
static struct sdhci_ops sdhci_esdhc_ops = {
@@ -210,6 +306,14 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.set_clock = esdhc_set_clock,
.get_max_clock = esdhc_pltfm_get_max_clock,
.get_min_clock = esdhc_pltfm_get_min_clock,
+ .get_ro = esdhc_pltfm_get_ro,
+};
+
+static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
+ | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
+ /* ADMA has issues. Might be fixable */
+ .ops = &sdhci_esdhc_ops,
};
static irqreturn_t cd_irq(int irq, void *data)
@@ -220,112 +324,228 @@ static irqreturn_t cd_irq(int irq, void *data)
return IRQ_HANDLED;
};
-static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata)
+#ifdef CONFIG_OF
+static int __devinit
+sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
+ struct esdhc_platform_data *boarddata)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ if (of_get_property(np, "fsl,card-wired", NULL))
+ boarddata->cd_type = ESDHC_CD_PERMANENT;
+
+ if (of_get_property(np, "fsl,cd-controller", NULL))
+ boarddata->cd_type = ESDHC_CD_CONTROLLER;
+
+ if (of_get_property(np, "fsl,wp-controller", NULL))
+ boarddata->wp_type = ESDHC_WP_CONTROLLER;
+
+ boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
+ if (gpio_is_valid(boarddata->cd_gpio))
+ boarddata->cd_type = ESDHC_CD_GPIO;
+
+ boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ if (gpio_is_valid(boarddata->wp_gpio))
+ boarddata->wp_type = ESDHC_WP_GPIO;
+
+ return 0;
+}
+#else
+static inline int
+sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
+ struct esdhc_platform_data *boarddata)
+{
+ return -ENODEV;
+}
+#endif
+
+static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(imx_esdhc_dt_ids, &pdev->dev);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_host *host;
+ struct esdhc_platform_data *boarddata;
struct clk *clk;
int err;
struct pltfm_imx_data *imx_data;
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+
+ imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL);
+ if (!imx_data) {
+ err = -ENOMEM;
+ goto err_imx_data;
+ }
+
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ imx_data->devtype = pdev->id_entry->driver_data;
+ pltfm_host->priv = imx_data;
+
clk = clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
dev_err(mmc_dev(host->mmc), "clk err\n");
- return PTR_ERR(clk);
+ err = PTR_ERR(clk);
+ goto err_clk_get;
}
clk_enable(clk);
pltfm_host->clk = clk;
- imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL);
- if (!imx_data) {
- clk_disable(pltfm_host->clk);
- clk_put(pltfm_host->clk);
- return -ENOMEM;
- }
- pltfm_host->priv = imx_data;
-
- if (!cpu_is_mx25())
+ if (!is_imx25_esdhc(imx_data))
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
- if (cpu_is_mx25() || cpu_is_mx35()) {
+ if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
- /* write_protect can't be routed to controller, use gpio */
- sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
- }
- if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51()))
+ if (is_imx53_esdhc(imx_data))
imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
- if (boarddata) {
+ boarddata = &imx_data->boarddata;
+ if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
+ if (!host->mmc->parent->platform_data) {
+ dev_err(mmc_dev(host->mmc), "no board data!\n");
+ err = -EINVAL;
+ goto no_board_data;
+ }
+ imx_data->boarddata = *((struct esdhc_platform_data *)
+ host->mmc->parent->platform_data);
+ }
+
+ /* write_protect */
+ if (boarddata->wp_type == ESDHC_WP_GPIO) {
err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
if (err) {
dev_warn(mmc_dev(host->mmc),
- "no write-protect pin available!\n");
- boarddata->wp_gpio = err;
+ "no write-protect pin available!\n");
+ boarddata->wp_gpio = -EINVAL;
}
+ } else {
+ boarddata->wp_gpio = -EINVAL;
+ }
+ /* card_detect */
+ if (boarddata->cd_type != ESDHC_CD_GPIO)
+ boarddata->cd_gpio = -EINVAL;
+
+ switch (boarddata->cd_type) {
+ case ESDHC_CD_GPIO:
err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD");
if (err) {
- dev_warn(mmc_dev(host->mmc),
+ dev_err(mmc_dev(host->mmc),
"no card-detect pin available!\n");
goto no_card_detect_pin;
}
- /* i.MX5x has issues to be researched */
- if (!cpu_is_mx25() && !cpu_is_mx35())
- goto not_supported;
-
err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
mmc_hostname(host->mmc), host);
if (err) {
- dev_warn(mmc_dev(host->mmc), "request irq error\n");
+ dev_err(mmc_dev(host->mmc), "request irq error\n");
goto no_card_detect_irq;
}
+ /* fall through */
- imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP;
- /* Now we have a working card_detect again */
+ case ESDHC_CD_CONTROLLER:
+ /* we have a working card_detect back */
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ break;
+
+ case ESDHC_CD_PERMANENT:
+ host->mmc->caps = MMC_CAP_NONREMOVABLE;
+ break;
+
+ case ESDHC_CD_NONE:
+ break;
}
+ err = sdhci_add_host(host);
+ if (err)
+ goto err_add_host;
+
return 0;
- no_card_detect_irq:
- gpio_free(boarddata->cd_gpio);
- no_card_detect_pin:
- boarddata->cd_gpio = err;
- not_supported:
+err_add_host:
+ if (gpio_is_valid(boarddata->cd_gpio))
+ free_irq(gpio_to_irq(boarddata->cd_gpio), host);
+no_card_detect_irq:
+ if (gpio_is_valid(boarddata->cd_gpio))
+ gpio_free(boarddata->cd_gpio);
+ if (gpio_is_valid(boarddata->wp_gpio))
+ gpio_free(boarddata->wp_gpio);
+no_card_detect_pin:
+no_board_data:
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+err_clk_get:
kfree(imx_data);
- return 0;
+err_imx_data:
+ sdhci_pltfm_free(pdev);
+ return err;
}
-static void esdhc_pltfm_exit(struct sdhci_host *host)
+static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+ int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+
+ sdhci_remove_host(host, dead);
- if (boarddata && gpio_is_valid(boarddata->wp_gpio))
+ if (gpio_is_valid(boarddata->wp_gpio))
gpio_free(boarddata->wp_gpio);
- if (boarddata && gpio_is_valid(boarddata->cd_gpio)) {
+ if (gpio_is_valid(boarddata->cd_gpio)) {
+ free_irq(gpio_to_irq(boarddata->cd_gpio), host);
gpio_free(boarddata->cd_gpio);
-
- if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
- free_irq(gpio_to_irq(boarddata->cd_gpio), host);
}
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
kfree(imx_data);
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
}
-struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
- | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
- /* ADMA has issues. Might be fixable */
- .ops = &sdhci_esdhc_ops,
- .init = esdhc_pltfm_init,
- .exit = esdhc_pltfm_exit,
+static struct platform_driver sdhci_esdhc_imx_driver = {
+ .driver = {
+ .name = "sdhci-esdhc-imx",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_esdhc_dt_ids,
+ },
+ .id_table = imx_esdhc_devtype,
+ .probe = sdhci_esdhc_imx_probe,
+ .remove = __devexit_p(sdhci_esdhc_imx_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
};
+
+static int __init sdhci_esdhc_imx_init(void)
+{
+ return platform_driver_register(&sdhci_esdhc_imx_driver);
+}
+module_init(sdhci_esdhc_imx_init);
+
+static void __exit sdhci_esdhc_imx_exit(void)
+{
+ platform_driver_unregister(&sdhci_esdhc_imx_driver);
+}
+module_exit(sdhci_esdhc_imx_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC");
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
deleted file mode 100644
index 60e4186a434..00000000000
--- a/drivers/mmc/host/sdhci-of-core.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * OpenFirmware bindings for Secure Digital Host Controller Interface.
- *
- * Copyright (c) 2007 Freescale Semiconductor, Inc.
- * Copyright (c) 2009 MontaVista Software, Inc.
- *
- * Authors: Xiaobo Xie <X.Xie@freescale.com>
- * Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/mmc/host.h>
-#ifdef CONFIG_PPC
-#include <asm/machdep.h>
-#endif
-#include "sdhci-of.h"
-#include "sdhci.h"
-
-#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
-
-/*
- * These accessors are designed for big endian hosts doing I/O to
- * little endian controllers incorporating a 32-bit hardware byte swapper.
- */
-
-u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
-{
- return in_be32(host->ioaddr + reg);
-}
-
-u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
-{
- return in_be16(host->ioaddr + (reg ^ 0x2));
-}
-
-u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
-{
- return in_8(host->ioaddr + (reg ^ 0x3));
-}
-
-void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg)
-{
- out_be32(host->ioaddr + reg, val);
-}
-
-void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg)
-{
- struct sdhci_of_host *of_host = sdhci_priv(host);
- int base = reg & ~0x3;
- int shift = (reg & 0x2) * 8;
-
- switch (reg) {
- case SDHCI_TRANSFER_MODE:
- /*
- * Postpone this write, we must do it together with a
- * command write that is down below.
- */
- of_host->xfer_mode_shadow = val;
- return;
- case SDHCI_COMMAND:
- sdhci_be32bs_writel(host, val << 16 | of_host->xfer_mode_shadow,
- SDHCI_TRANSFER_MODE);
- return;
- }
- clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
-}
-
-void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
-{
- int base = reg & ~0x3;
- int shift = (reg & 0x3) * 8;
-
- clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
-}
-#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
-
-#ifdef CONFIG_PM
-
-static int sdhci_of_suspend(struct platform_device *ofdev, pm_message_t state)
-{
- struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
-
- return mmc_suspend_host(host->mmc);
-}
-
-static int sdhci_of_resume(struct platform_device *ofdev)
-{
- struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
-
- return mmc_resume_host(host->mmc);
-}
-
-#else
-
-#define sdhci_of_suspend NULL
-#define sdhci_of_resume NULL
-
-#endif
-
-static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
-{
- if (of_get_property(np, "sdhci,wp-inverted", NULL))
- return true;
-
- /* Old device trees don't have the wp-inverted property. */
-#ifdef CONFIG_PPC
- return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
-#else
- return false;
-#endif
-}
-
-static const struct of_device_id sdhci_of_match[];
-static int __devinit sdhci_of_probe(struct platform_device *ofdev)
-{
- const struct of_device_id *match;
- struct device_node *np = ofdev->dev.of_node;
- struct sdhci_of_data *sdhci_of_data;
- struct sdhci_host *host;
- struct sdhci_of_host *of_host;
- const __be32 *clk;
- int size;
- int ret;
-
- match = of_match_device(sdhci_of_match, &ofdev->dev);
- if (!match)
- return -EINVAL;
- sdhci_of_data = match->data;
-
- if (!of_device_is_available(np))
- return -ENODEV;
-
- host = sdhci_alloc_host(&ofdev->dev, sizeof(*of_host));
- if (IS_ERR(host))
- return -ENOMEM;
-
- of_host = sdhci_priv(host);
- dev_set_drvdata(&ofdev->dev, host);
-
- host->ioaddr = of_iomap(np, 0);
- if (!host->ioaddr) {
- ret = -ENOMEM;
- goto err_addr_map;
- }
-
- host->irq = irq_of_parse_and_map(np, 0);
- if (!host->irq) {
- ret = -EINVAL;
- goto err_no_irq;
- }
-
- host->hw_name = dev_name(&ofdev->dev);
- if (sdhci_of_data) {
- host->quirks = sdhci_of_data->quirks;
- host->ops = &sdhci_of_data->ops;
- }
-
- if (of_get_property(np, "sdhci,auto-cmd12", NULL))
- host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
-
-
- if (of_get_property(np, "sdhci,1-bit-only", NULL))
- host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
-
- if (sdhci_of_wp_inverted(np))
- host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
-
- clk = of_get_property(np, "clock-frequency", &size);
- if (clk && size == sizeof(*clk) && *clk)
- of_host->clock = be32_to_cpup(clk);
-
- ret = sdhci_add_host(host);
- if (ret)
- goto err_add_host;
-
- return 0;
-
-err_add_host:
- irq_dispose_mapping(host->irq);
-err_no_irq:
- iounmap(host->ioaddr);
-err_addr_map:
- sdhci_free_host(host);
- return ret;
-}
-
-static int __devexit sdhci_of_remove(struct platform_device *ofdev)
-{
- struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
-
- sdhci_remove_host(host, 0);
- sdhci_free_host(host);
- irq_dispose_mapping(host->irq);
- iounmap(host->ioaddr);
- return 0;
-}
-
-static const struct of_device_id sdhci_of_match[] = {
-#ifdef CONFIG_MMC_SDHCI_OF_ESDHC
- { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, },
- { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, },
- { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, },
-#endif
-#ifdef CONFIG_MMC_SDHCI_OF_HLWD
- { .compatible = "nintendo,hollywood-sdhci", .data = &sdhci_hlwd, },
-#endif
- { .compatible = "generic-sdhci", },
- {},
-};
-MODULE_DEVICE_TABLE(of, sdhci_of_match);
-
-static struct platform_driver sdhci_of_driver = {
- .driver = {
- .name = "sdhci-of",
- .owner = THIS_MODULE,
- .of_match_table = sdhci_of_match,
- },
- .probe = sdhci_of_probe,
- .remove = __devexit_p(sdhci_of_remove),
- .suspend = sdhci_of_suspend,
- .resume = sdhci_of_resume,
-};
-
-static int __init sdhci_of_init(void)
-{
- return platform_driver_register(&sdhci_of_driver);
-}
-module_init(sdhci_of_init);
-
-static void __exit sdhci_of_exit(void)
-{
- platform_driver_unregister(&sdhci_of_driver);
-}
-module_exit(sdhci_of_exit);
-
-MODULE_DESCRIPTION("Secure Digital Host Controller Interface OF driver");
-MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
- "Anton Vorontsov <avorontsov@ru.mvista.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index ba40d6d035c..fe604df6501 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -16,8 +16,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/mmc/host.h>
-#include "sdhci-of.h"
-#include "sdhci.h"
+#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
static u16 esdhc_readw(struct sdhci_host *host, int reg)
@@ -60,32 +59,83 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
{
- struct sdhci_of_host *of_host = sdhci_priv(host);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- return of_host->clock;
+ return pltfm_host->clock;
}
static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
{
- struct sdhci_of_host *of_host = sdhci_priv(host);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- return of_host->clock / 256 / 16;
+ return pltfm_host->clock / 256 / 16;
}
-struct sdhci_of_data sdhci_esdhc = {
+static struct sdhci_ops sdhci_esdhc_ops = {
+ .read_l = sdhci_be32bs_readl,
+ .read_w = esdhc_readw,
+ .read_b = sdhci_be32bs_readb,
+ .write_l = sdhci_be32bs_writel,
+ .write_w = esdhc_writew,
+ .write_b = esdhc_writeb,
+ .set_clock = esdhc_set_clock,
+ .enable_dma = esdhc_of_enable_dma,
+ .get_max_clock = esdhc_of_get_max_clock,
+ .get_min_clock = esdhc_of_get_min_clock,
+};
+
+static struct sdhci_pltfm_data sdhci_esdhc_pdata = {
/* card detection could be handled via GPIO */
.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
| SDHCI_QUIRK_NO_CARD_NO_RESET,
- .ops = {
- .read_l = sdhci_be32bs_readl,
- .read_w = esdhc_readw,
- .read_b = sdhci_be32bs_readb,
- .write_l = sdhci_be32bs_writel,
- .write_w = esdhc_writew,
- .write_b = esdhc_writeb,
- .set_clock = esdhc_set_clock,
- .enable_dma = esdhc_of_enable_dma,
- .get_max_clock = esdhc_of_get_max_clock,
- .get_min_clock = esdhc_of_get_min_clock,
+ .ops = &sdhci_esdhc_ops,
+};
+
+static int __devinit sdhci_esdhc_probe(struct platform_device *pdev)
+{
+ return sdhci_pltfm_register(pdev, &sdhci_esdhc_pdata);
+}
+
+static int __devexit sdhci_esdhc_remove(struct platform_device *pdev)
+{
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static const struct of_device_id sdhci_esdhc_of_match[] = {
+ { .compatible = "fsl,mpc8379-esdhc" },
+ { .compatible = "fsl,mpc8536-esdhc" },
+ { .compatible = "fsl,esdhc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
+
+static struct platform_driver sdhci_esdhc_driver = {
+ .driver = {
+ .name = "sdhci-esdhc",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_esdhc_of_match,
},
+ .probe = sdhci_esdhc_probe,
+ .remove = __devexit_p(sdhci_esdhc_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
};
+
+static int __init sdhci_esdhc_init(void)
+{
+ return platform_driver_register(&sdhci_esdhc_driver);
+}
+module_init(sdhci_esdhc_init);
+
+static void __exit sdhci_esdhc_exit(void)
+{
+ platform_driver_unregister(&sdhci_esdhc_driver);
+}
+module_exit(sdhci_esdhc_exit);
+
+MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
+MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
+ "Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 68ddb7546ae..735be131dca 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -21,8 +21,7 @@
#include <linux/delay.h>
#include <linux/mmc/host.h>
-#include "sdhci-of.h"
-#include "sdhci.h"
+#include "sdhci-pltfm.h"
/*
* Ops and quirks for the Nintendo Wii SDHCI controllers.
@@ -51,15 +50,63 @@ static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg)
udelay(SDHCI_HLWD_WRITE_DELAY);
}
-struct sdhci_of_data sdhci_hlwd = {
+static struct sdhci_ops sdhci_hlwd_ops = {
+ .read_l = sdhci_be32bs_readl,
+ .read_w = sdhci_be32bs_readw,
+ .read_b = sdhci_be32bs_readb,
+ .write_l = sdhci_hlwd_writel,
+ .write_w = sdhci_hlwd_writew,
+ .write_b = sdhci_hlwd_writeb,
+};
+
+static struct sdhci_pltfm_data sdhci_hlwd_pdata = {
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE,
- .ops = {
- .read_l = sdhci_be32bs_readl,
- .read_w = sdhci_be32bs_readw,
- .read_b = sdhci_be32bs_readb,
- .write_l = sdhci_hlwd_writel,
- .write_w = sdhci_hlwd_writew,
- .write_b = sdhci_hlwd_writeb,
+ .ops = &sdhci_hlwd_ops,
+};
+
+static int __devinit sdhci_hlwd_probe(struct platform_device *pdev)
+{
+ return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata);
+}
+
+static int __devexit sdhci_hlwd_remove(struct platform_device *pdev)
+{
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static const struct of_device_id sdhci_hlwd_of_match[] = {
+ { .compatible = "nintendo,hollywood-sdhci" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_hlwd_of_match);
+
+static struct platform_driver sdhci_hlwd_driver = {
+ .driver = {
+ .name = "sdhci-hlwd",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_hlwd_of_match,
},
+ .probe = sdhci_hlwd_probe,
+ .remove = __devexit_p(sdhci_hlwd_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
};
+
+static int __init sdhci_hlwd_init(void)
+{
+ return platform_driver_register(&sdhci_hlwd_driver);
+}
+module_init(sdhci_hlwd_init);
+
+static void __exit sdhci_hlwd_exit(void)
+{
+ platform_driver_unregister(&sdhci_hlwd_driver);
+}
+module_exit(sdhci_hlwd_exit);
+
+MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver");
+MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of.h b/drivers/mmc/host/sdhci-of.h
deleted file mode 100644
index ad09ad9915d..00000000000
--- a/drivers/mmc/host/sdhci-of.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * OpenFirmware bindings for Secure Digital Host Controller Interface.
- *
- * Copyright (c) 2007 Freescale Semiconductor, Inc.
- * Copyright (c) 2009 MontaVista Software, Inc.
- *
- * Authors: Xiaobo Xie <X.Xie@freescale.com>
- * Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-#ifndef __SDHCI_OF_H
-#define __SDHCI_OF_H
-
-#include <linux/types.h>
-#include "sdhci.h"
-
-struct sdhci_of_data {
- unsigned int quirks;
- struct sdhci_ops ops;
-};
-
-struct sdhci_of_host {
- unsigned int clock;
- u16 xfer_mode_shadow;
-};
-
-extern u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg);
-extern u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg);
-extern u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg);
-extern void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg);
-extern void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg);
-extern void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg);
-
-extern struct sdhci_of_data sdhci_esdhc;
-extern struct sdhci_of_data sdhci_hlwd;
-
-#endif /* __SDHCI_OF_H */
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 936bbca19c0..26c528648f3 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -143,6 +143,12 @@ static const struct sdhci_pci_fixes sdhci_cafe = {
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
};
+static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ return 0;
+}
+
/*
* ADMA operation is disabled for Moorestown platform due to
* hardware bugs.
@@ -157,8 +163,15 @@ static int mrst_hc_probe(struct sdhci_pci_chip *chip)
return 0;
}
+static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ return 0;
+}
+
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
.quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
+ .probe_slot = mrst_hc_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
@@ -170,8 +183,13 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
};
-static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
+static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .probe_slot = mfd_emmc_probe_slot,
};
/* O2Micro extra registers */
@@ -682,7 +700,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.device = PCI_DEVICE_ID_INTEL_MFD_SDIO1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
},
{
@@ -690,7 +708,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.device = PCI_DEVICE_ID_INTEL_MFD_SDIO2,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
},
{
@@ -698,7 +716,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.device = PCI_DEVICE_ID_INTEL_MFD_EMMC0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
},
{
@@ -706,7 +724,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.device = PCI_DEVICE_ID_INTEL_MFD_EMMC1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
},
{
@@ -789,8 +807,34 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
return 0;
}
+static int sdhci_pci_8bit_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+ switch (width) {
+ case MMC_BUS_WIDTH_8:
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ break;
+ case MMC_BUS_WIDTH_4:
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ break;
+ default:
+ ctrl &= ~(SDHCI_CTRL_8BITBUS | SDHCI_CTRL_4BITBUS);
+ break;
+ }
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_pci_ops = {
.enable_dma = sdhci_pci_enable_dma,
+ .platform_8bit_width = sdhci_pci_8bit_width,
};
/*****************************************************************************\
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index dbab0407f4b..6414efeddca 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -2,6 +2,12 @@
* sdhci-pltfm.c Support for SDHCI platform devices
* Copyright (c) 2009 Intel Corporation
*
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -22,48 +28,67 @@
* Inspired by sdhci-pci.c, by Pierre Ossman
*/
-#include <linux/delay.h>
-#include <linux/highmem.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#ifdef CONFIG_PPC
+#include <asm/machdep.h>
+#endif
+#include "sdhci-pltfm.h"
-#include <linux/mmc/host.h>
+static struct sdhci_ops sdhci_pltfm_ops = {
+};
-#include <linux/io.h>
-#include <linux/mmc/sdhci-pltfm.h>
+#ifdef CONFIG_OF
+static bool sdhci_of_wp_inverted(struct device_node *np)
+{
+ if (of_get_property(np, "sdhci,wp-inverted", NULL))
+ return true;
-#include "sdhci.h"
-#include "sdhci-pltfm.h"
+ /* Old device trees don't have the wp-inverted property. */
+#ifdef CONFIG_PPC
+ return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
+#else
+ return false;
+#endif /* CONFIG_PPC */
+}
+
+void sdhci_get_of_property(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ const __be32 *clk;
+ int size;
-/*****************************************************************************\
- * *
- * SDHCI core callbacks *
- * *
-\*****************************************************************************/
+ if (of_device_is_available(np)) {
+ if (of_get_property(np, "sdhci,auto-cmd12", NULL))
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
-static struct sdhci_ops sdhci_pltfm_ops = {
-};
+ if (of_get_property(np, "sdhci,1-bit-only", NULL))
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
-/*****************************************************************************\
- * *
- * Device probing/removal *
- * *
-\*****************************************************************************/
+ if (sdhci_of_wp_inverted(np))
+ host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
-static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
+ clk = of_get_property(np, "clock-frequency", &size);
+ if (clk && size == sizeof(*clk) && *clk)
+ pltfm_host->clock = be32_to_cpup(clk);
+ }
+}
+#else
+void sdhci_get_of_property(struct platform_device *pdev) {}
+#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(sdhci_get_of_property);
+
+struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
+ struct sdhci_pltfm_data *pdata)
{
- const struct platform_device_id *platid = platform_get_device_id(pdev);
- struct sdhci_pltfm_data *pdata;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
+ struct device_node *np = pdev->dev.of_node;
struct resource *iomem;
int ret;
- if (platid && platid->driver_data)
- pdata = (void *)platid->driver_data;
- else
- pdata = pdev->dev.platform_data;
-
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
ret = -ENOMEM;
@@ -71,11 +96,10 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
}
if (resource_size(iomem) < 0x100)
- dev_err(&pdev->dev, "Invalid iomem size. You may "
- "experience problems.\n");
+ dev_err(&pdev->dev, "Invalid iomem size!\n");
/* Some PCI-based MFD need the parent here */
- if (pdev->dev.parent != &platform_bus)
+ if (pdev->dev.parent != &platform_bus && !np)
host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host));
else
host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host));
@@ -87,7 +111,7 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
- host->hw_name = "platform";
+ host->hw_name = dev_name(&pdev->dev);
if (pdata && pdata->ops)
host->ops = pdata->ops;
else
@@ -110,126 +134,95 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
goto err_remap;
}
- if (pdata && pdata->init) {
- ret = pdata->init(host, pdata);
- if (ret)
- goto err_plat_init;
- }
-
- ret = sdhci_add_host(host);
- if (ret)
- goto err_add_host;
-
platform_set_drvdata(pdev, host);
- return 0;
+ return host;
-err_add_host:
- if (pdata && pdata->exit)
- pdata->exit(host);
-err_plat_init:
- iounmap(host->ioaddr);
err_remap:
release_mem_region(iomem->start, resource_size(iomem));
err_request:
sdhci_free_host(host);
err:
- printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret);
- return ret;
+ dev_err(&pdev->dev, "%s failed %d\n", __func__, ret);
+ return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_init);
-static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
+void sdhci_pltfm_free(struct platform_device *pdev)
{
- struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- int dead;
- u32 scratch;
-
- dead = 0;
- scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
- if (scratch == (u32)-1)
- dead = 1;
- sdhci_remove_host(host, dead);
- if (pdata && pdata->exit)
- pdata->exit(host);
iounmap(host->ioaddr);
release_mem_region(iomem->start, resource_size(iomem));
sdhci_free_host(host);
platform_set_drvdata(pdev, NULL);
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_free);
- return 0;
+int sdhci_pltfm_register(struct platform_device *pdev,
+ struct sdhci_pltfm_data *pdata)
+{
+ struct sdhci_host *host;
+ int ret = 0;
+
+ host = sdhci_pltfm_init(pdev, pdata);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ sdhci_get_of_property(pdev);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ sdhci_pltfm_free(pdev);
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_register);
-static const struct platform_device_id sdhci_pltfm_ids[] = {
- { "sdhci", },
-#ifdef CONFIG_MMC_SDHCI_CNS3XXX
- { "sdhci-cns3xxx", (kernel_ulong_t)&sdhci_cns3xxx_pdata },
-#endif
-#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
- { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
-#endif
-#ifdef CONFIG_MMC_SDHCI_DOVE
- { "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
-#endif
-#ifdef CONFIG_MMC_SDHCI_TEGRA
- { "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
-#endif
- { },
-};
-MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
+int sdhci_pltfm_unregister(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+
+ sdhci_remove_host(host, dead);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
#ifdef CONFIG_PM
-static int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
{
struct sdhci_host *host = platform_get_drvdata(dev);
return sdhci_suspend_host(host, state);
}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
-static int sdhci_pltfm_resume(struct platform_device *dev)
+int sdhci_pltfm_resume(struct platform_device *dev)
{
struct sdhci_host *host = platform_get_drvdata(dev);
return sdhci_resume_host(host);
}
-#else
-#define sdhci_pltfm_suspend NULL
-#define sdhci_pltfm_resume NULL
+EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
#endif /* CONFIG_PM */
-static struct platform_driver sdhci_pltfm_driver = {
- .driver = {
- .name = "sdhci",
- .owner = THIS_MODULE,
- },
- .probe = sdhci_pltfm_probe,
- .remove = __devexit_p(sdhci_pltfm_remove),
- .id_table = sdhci_pltfm_ids,
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-};
-
-/*****************************************************************************\
- * *
- * Driver init/exit *
- * *
-\*****************************************************************************/
-
-static int __init sdhci_drv_init(void)
+static int __init sdhci_pltfm_drv_init(void)
{
- return platform_driver_register(&sdhci_pltfm_driver);
+ pr_info("sdhci-pltfm: SDHCI platform and OF driver helper\n");
+
+ return 0;
}
+module_init(sdhci_pltfm_drv_init);
-static void __exit sdhci_drv_exit(void)
+static void __exit sdhci_pltfm_drv_exit(void)
{
- platform_driver_unregister(&sdhci_pltfm_driver);
}
+module_exit(sdhci_pltfm_drv_exit);
-module_init(sdhci_drv_init);
-module_exit(sdhci_drv_exit);
-
-MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
-MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_DESCRIPTION("SDHCI platform and OF driver helper");
+MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 2b37016ad0a..3a9fc3f4084 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -12,17 +12,95 @@
#define _DRIVERS_MMC_SDHCI_PLTFM_H
#include <linux/clk.h>
-#include <linux/types.h>
-#include <linux/mmc/sdhci-pltfm.h>
+#include <linux/platform_device.h>
+#include "sdhci.h"
+
+struct sdhci_pltfm_data {
+ struct sdhci_ops *ops;
+ unsigned int quirks;
+};
struct sdhci_pltfm_host {
struct clk *clk;
void *priv; /* to handle quirks across io-accessor calls */
+
+ /* migrate from sdhci_of_host */
+ unsigned int clock;
+ u16 xfer_mode_shadow;
};
-extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
-extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
-extern struct sdhci_pltfm_data sdhci_dove_pdata;
-extern struct sdhci_pltfm_data sdhci_tegra_pdata;
+#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+/*
+ * These accessors are designed for big endian hosts doing I/O to
+ * little endian controllers incorporating a 32-bit hardware byte swapper.
+ */
+static inline u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
+{
+ return in_be32(host->ioaddr + reg);
+}
+
+static inline u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
+{
+ return in_be16(host->ioaddr + (reg ^ 0x2));
+}
+
+static inline u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
+{
+ return in_8(host->ioaddr + (reg ^ 0x3));
+}
+
+static inline void sdhci_be32bs_writel(struct sdhci_host *host,
+ u32 val, int reg)
+{
+ out_be32(host->ioaddr + reg, val);
+}
+
+static inline void sdhci_be32bs_writew(struct sdhci_host *host,
+ u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int base = reg & ~0x3;
+ int shift = (reg & 0x2) * 8;
+
+ switch (reg) {
+ case SDHCI_TRANSFER_MODE:
+ /*
+ * Postpone this write, we must do it together with a
+ * command write that is down below.
+ */
+ pltfm_host->xfer_mode_shadow = val;
+ return;
+ case SDHCI_COMMAND:
+ sdhci_be32bs_writel(host,
+ val << 16 | pltfm_host->xfer_mode_shadow,
+ SDHCI_TRANSFER_MODE);
+ return;
+ }
+ clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
+}
+
+static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ int base = reg & ~0x3;
+ int shift = (reg & 0x3) * 8;
+
+ clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
+}
+#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
+
+extern void sdhci_get_of_property(struct platform_device *pdev);
+
+extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
+ struct sdhci_pltfm_data *pdata);
+extern void sdhci_pltfm_free(struct platform_device *pdev);
+
+extern int sdhci_pltfm_register(struct platform_device *pdev,
+ struct sdhci_pltfm_data *pdata);
+extern int sdhci_pltfm_unregister(struct platform_device *pdev);
+
+#ifdef CONFIG_PM
+extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
+extern int sdhci_pltfm_resume(struct platform_device *dev);
+#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c
deleted file mode 100644
index 089c9a68b7b..00000000000
--- a/drivers/mmc/host/sdhci-pxa.c
+++ /dev/null
@@ -1,303 +0,0 @@
-/* linux/drivers/mmc/host/sdhci-pxa.c
- *
- * Copyright (C) 2010 Marvell International Ltd.
- * Zhangfei Gao <zhangfei.gao@marvell.com>
- * Kevin Wang <dwang4@marvell.com>
- * Mingwei Wang <mwwang@marvell.com>
- * Philip Rakity <prakity@marvell.com>
- * Mark Brown <markb@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/* Supports:
- * SDHCI support for MMP2/PXA910/PXA168
- *
- * Refer to sdhci-s3c.c.
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/mmc/host.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <plat/sdhci.h>
-#include "sdhci.h"
-
-#define DRIVER_NAME "sdhci-pxa"
-
-#define SD_FIFO_PARAM 0x104
-#define DIS_PAD_SD_CLK_GATE 0x400
-
-struct sdhci_pxa {
- struct sdhci_host *host;
- struct sdhci_pxa_platdata *pdata;
- struct clk *clk;
- struct resource *res;
-
- u8 clk_enable;
-};
-
-/*****************************************************************************\
- * *
- * SDHCI core callbacks *
- * *
-\*****************************************************************************/
-static void set_clock(struct sdhci_host *host, unsigned int clock)
-{
- struct sdhci_pxa *pxa = sdhci_priv(host);
- u32 tmp = 0;
-
- if (clock == 0) {
- if (pxa->clk_enable) {
- clk_disable(pxa->clk);
- pxa->clk_enable = 0;
- }
- } else {
- if (0 == pxa->clk_enable) {
- if (pxa->pdata->flags & PXA_FLAG_DISABLE_CLOCK_GATING) {
- tmp = readl(host->ioaddr + SD_FIFO_PARAM);
- tmp |= DIS_PAD_SD_CLK_GATE;
- writel(tmp, host->ioaddr + SD_FIFO_PARAM);
- }
- clk_enable(pxa->clk);
- pxa->clk_enable = 1;
- }
- }
-}
-
-static int set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
-{
- u16 ctrl_2;
-
- /*
- * Set V18_EN -- UHS modes do not work without this.
- * does not change signaling voltage
- */
- ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-
- /* Select Bus Speed Mode for host */
- ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
- switch (uhs) {
- case MMC_TIMING_UHS_SDR12:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
- break;
- case MMC_TIMING_UHS_SDR25:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
- break;
- case MMC_TIMING_UHS_SDR50:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
- break;
- case MMC_TIMING_UHS_SDR104:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
- break;
- case MMC_TIMING_UHS_DDR50:
- ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
- break;
- }
-
- sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
- pr_debug("%s:%s uhs = %d, ctrl_2 = %04X\n",
- __func__, mmc_hostname(host->mmc), uhs, ctrl_2);
-
- return 0;
-}
-
-static struct sdhci_ops sdhci_pxa_ops = {
- .set_uhs_signaling = set_uhs_signaling,
- .set_clock = set_clock,
-};
-
-/*****************************************************************************\
- * *
- * Device probing/removal *
- * *
-\*****************************************************************************/
-
-static int __devinit sdhci_pxa_probe(struct platform_device *pdev)
-{
- struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
- struct device *dev = &pdev->dev;
- struct sdhci_host *host = NULL;
- struct resource *iomem = NULL;
- struct sdhci_pxa *pxa = NULL;
- int ret, irq;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no irq specified\n");
- return irq;
- }
-
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem) {
- dev_err(dev, "no memory specified\n");
- return -ENOENT;
- }
-
- host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pxa));
- if (IS_ERR(host)) {
- dev_err(dev, "failed to alloc host\n");
- return PTR_ERR(host);
- }
-
- pxa = sdhci_priv(host);
- pxa->host = host;
- pxa->pdata = pdata;
- pxa->clk_enable = 0;
-
- pxa->clk = clk_get(dev, "PXA-SDHCLK");
- if (IS_ERR(pxa->clk)) {
- dev_err(dev, "failed to get io clock\n");
- ret = PTR_ERR(pxa->clk);
- goto out;
- }
-
- pxa->res = request_mem_region(iomem->start, resource_size(iomem),
- mmc_hostname(host->mmc));
- if (!pxa->res) {
- dev_err(&pdev->dev, "cannot request region\n");
- ret = -EBUSY;
- goto out;
- }
-
- host->ioaddr = ioremap(iomem->start, resource_size(iomem));
- if (!host->ioaddr) {
- dev_err(&pdev->dev, "failed to remap registers\n");
- ret = -ENOMEM;
- goto out;
- }
-
- host->hw_name = "MMC";
- host->ops = &sdhci_pxa_ops;
- host->irq = irq;
- host->quirks = SDHCI_QUIRK_BROKEN_ADMA
- | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
- | SDHCI_QUIRK_32BIT_DMA_ADDR
- | SDHCI_QUIRK_32BIT_DMA_SIZE
- | SDHCI_QUIRK_32BIT_ADMA_SIZE
- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
-
- if (pdata->quirks)
- host->quirks |= pdata->quirks;
-
- /* enable 1/8V DDR capable */
- host->mmc->caps |= MMC_CAP_1_8V_DDR;
-
- /* If slot design supports 8 bit data, indicate this to MMC. */
- if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
- host->mmc->caps |= MMC_CAP_8_BIT_DATA;
-
- ret = sdhci_add_host(host);
- if (ret) {
- dev_err(&pdev->dev, "failed to add host\n");
- goto out;
- }
-
- if (pxa->pdata->max_speed)
- host->mmc->f_max = pxa->pdata->max_speed;
-
- platform_set_drvdata(pdev, host);
-
- return 0;
-out:
- if (host) {
- clk_put(pxa->clk);
- if (host->ioaddr)
- iounmap(host->ioaddr);
- if (pxa->res)
- release_mem_region(pxa->res->start,
- resource_size(pxa->res));
- sdhci_free_host(host);
- }
-
- return ret;
-}
-
-static int __devexit sdhci_pxa_remove(struct platform_device *pdev)
-{
- struct sdhci_host *host = platform_get_drvdata(pdev);
- struct sdhci_pxa *pxa = sdhci_priv(host);
- int dead = 0;
- u32 scratch;
-
- if (host) {
- scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
- if (scratch == (u32)-1)
- dead = 1;
-
- sdhci_remove_host(host, dead);
-
- if (host->ioaddr)
- iounmap(host->ioaddr);
- if (pxa->res)
- release_mem_region(pxa->res->start,
- resource_size(pxa->res));
- if (pxa->clk_enable) {
- clk_disable(pxa->clk);
- pxa->clk_enable = 0;
- }
- clk_put(pxa->clk);
-
- sdhci_free_host(host);
- platform_set_drvdata(pdev, NULL);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int sdhci_pxa_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct sdhci_host *host = platform_get_drvdata(dev);
-
- return sdhci_suspend_host(host, state);
-}
-
-static int sdhci_pxa_resume(struct platform_device *dev)
-{
- struct sdhci_host *host = platform_get_drvdata(dev);
-
- return sdhci_resume_host(host);
-}
-#else
-#define sdhci_pxa_suspend NULL
-#define sdhci_pxa_resume NULL
-#endif
-
-static struct platform_driver sdhci_pxa_driver = {
- .probe = sdhci_pxa_probe,
- .remove = __devexit_p(sdhci_pxa_remove),
- .suspend = sdhci_pxa_suspend,
- .resume = sdhci_pxa_resume,
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-/*****************************************************************************\
- * *
- * Driver init/exit *
- * *
-\*****************************************************************************/
-
-static int __init sdhci_pxa_init(void)
-{
- return platform_driver_register(&sdhci_pxa_driver);
-}
-
-static void __exit sdhci_pxa_exit(void)
-{
- platform_driver_unregister(&sdhci_pxa_driver);
-}
-
-module_init(sdhci_pxa_init);
-module_exit(sdhci_pxa_exit);
-
-MODULE_DESCRIPTION("SDH controller driver for PXA168/PXA910/MMP2");
-MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
new file mode 100644
index 00000000000..38f58994f79
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ * Kevin Wang <dwang4@marvell.com>
+ * Jun Nie <njun@marvell.com>
+ * Qiming Wu <wuqm@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_data/pxa_sdhci.h>
+#include <linux/slab.h>
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+#define SD_FIFO_PARAM 0xe0
+#define DIS_PAD_SD_CLK_GATE 0x0400 /* Turn on/off Dynamic SD Clock Gating */
+#define CLK_GATE_ON 0x0200 /* Disable/enable Clock Gate */
+#define CLK_GATE_CTL 0x0100 /* Clock Gate Control */
+#define CLK_GATE_SETTING_BITS (DIS_PAD_SD_CLK_GATE | \
+ CLK_GATE_ON | CLK_GATE_CTL)
+
+#define SD_CLOCK_BURST_SIZE_SETUP 0xe6
+#define SDCLK_SEL_SHIFT 8
+#define SDCLK_SEL_MASK 0x3
+#define SDCLK_DELAY_SHIFT 10
+#define SDCLK_DELAY_MASK 0x3c
+
+#define SD_CE_ATA_2 0xea
+#define MMC_CARD 0x1000
+#define MMC_WIDTH 0x0100
+
+static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+
+ if (mask == SDHCI_RESET_ALL) {
+ u16 tmp = 0;
+
+ /*
+ * tune timing of read data/command when crc error happen
+ * no performance impact
+ */
+ if (pdata->clk_delay_sel == 1) {
+ tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+
+ tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT);
+ tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK)
+ << SDCLK_DELAY_SHIFT;
+ tmp &= ~(SDCLK_SEL_MASK << SDCLK_SEL_SHIFT);
+ tmp |= (1 & SDCLK_SEL_MASK) << SDCLK_SEL_SHIFT;
+
+ writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+ }
+
+ if (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING) {
+ tmp = readw(host->ioaddr + SD_FIFO_PARAM);
+ tmp &= ~CLK_GATE_SETTING_BITS;
+ writew(tmp, host->ioaddr + SD_FIFO_PARAM);
+ } else {
+ tmp = readw(host->ioaddr + SD_FIFO_PARAM);
+ tmp &= ~CLK_GATE_SETTING_BITS;
+ tmp |= CLK_GATE_SETTING_BITS;
+ writew(tmp, host->ioaddr + SD_FIFO_PARAM);
+ }
+ }
+}
+
+static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+ u16 tmp;
+
+ ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
+ tmp = readw(host->ioaddr + SD_CE_ATA_2);
+ if (width == MMC_BUS_WIDTH_8) {
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ tmp |= MMC_CARD | MMC_WIDTH;
+ } else {
+ tmp &= ~(MMC_CARD | MMC_WIDTH);
+ if (width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+ writew(tmp, host->ioaddr + SD_CE_ATA_2);
+ writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+
+ return 0;
+}
+
+static u32 pxav2_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk);
+}
+
+static struct sdhci_ops pxav2_sdhci_ops = {
+ .get_max_clock = pxav2_get_max_clock,
+ .platform_reset_exit = pxav2_set_private_registers,
+ .platform_8bit_width = pxav2_mmc_set_width,
+};
+
+static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = NULL;
+ struct sdhci_pxa *pxa = NULL;
+ int ret;
+ struct clk *clk;
+
+ pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL);
+ if (!pxa)
+ return -ENOMEM;
+
+ host = sdhci_pltfm_init(pdev, NULL);
+ if (IS_ERR(host)) {
+ kfree(pxa);
+ return PTR_ERR(host);
+ }
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = pxa;
+
+ clk = clk_get(dev, "PXA-SDHCLK");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(clk);
+ goto err_clk_get;
+ }
+ pltfm_host->clk = clk;
+ clk_enable(clk);
+
+ host->quirks = SDHCI_QUIRK_BROKEN_ADMA
+ | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
+ | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+
+ if (pdata) {
+ if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
+ /* on-chip device */
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ }
+
+ /* If slot design supports 8 bit data, indicate this to MMC. */
+ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+ if (pdata->quirks)
+ host->quirks |= pdata->quirks;
+ if (pdata->host_caps)
+ host->mmc->caps |= pdata->host_caps;
+ if (pdata->pm_caps)
+ host->mmc->pm_caps |= pdata->pm_caps;
+ }
+
+ host->ops = &pxav2_sdhci_ops;
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add host\n");
+ goto err_add_host;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+err_add_host:
+ clk_disable(clk);
+ clk_put(clk);
+err_clk_get:
+ sdhci_pltfm_free(pdev);
+ kfree(pxa);
+ return ret;
+}
+
+static int __devexit sdhci_pxav2_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+
+ sdhci_remove_host(host, 1);
+
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+ sdhci_pltfm_free(pdev);
+ kfree(pxa);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_pxav2_driver = {
+ .driver = {
+ .name = "sdhci-pxav2",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_pxav2_probe,
+ .remove = __devexit_p(sdhci_pxav2_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
+};
+static int __init sdhci_pxav2_init(void)
+{
+ return platform_driver_register(&sdhci_pxav2_driver);
+}
+
+static void __exit sdhci_pxav2_exit(void)
+{
+ platform_driver_unregister(&sdhci_pxav2_driver);
+}
+
+module_init(sdhci_pxav2_init);
+module_exit(sdhci_pxav2_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for pxav2");
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
new file mode 100644
index 00000000000..fc7e4a51562
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ * Kevin Wang <dwang4@marvell.com>
+ * Mingwei Wang <mwwang@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_data/pxa_sdhci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+#define SD_CLOCK_BURST_SIZE_SETUP 0x10A
+#define SDCLK_SEL 0x100
+#define SDCLK_DELAY_SHIFT 9
+#define SDCLK_DELAY_MASK 0x1f
+
+#define SD_CFG_FIFO_PARAM 0x100
+#define SDCFG_GEN_PAD_CLK_ON (1<<6)
+#define SDCFG_GEN_PAD_CLK_CNT_MASK 0xFF
+#define SDCFG_GEN_PAD_CLK_CNT_SHIFT 24
+
+#define SD_SPI_MODE 0x108
+#define SD_CE_ATA_1 0x10C
+
+#define SD_CE_ATA_2 0x10E
+#define SDCE_MISC_INT (1<<2)
+#define SDCE_MISC_INT_EN (1<<1)
+
+static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+
+ if (mask == SDHCI_RESET_ALL) {
+ /*
+ * tune timing of read data/command when crc error happen
+ * no performance impact
+ */
+ if (pdata && 0 != pdata->clk_delay_cycles) {
+ u16 tmp;
+
+ tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+ tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK)
+ << SDCLK_DELAY_SHIFT;
+ tmp |= SDCLK_SEL;
+ writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+ }
+ }
+}
+
+#define MAX_WAIT_COUNT 5
+static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+ u16 tmp;
+ int count;
+
+ if (pxa->power_mode == MMC_POWER_UP
+ && power_mode == MMC_POWER_ON) {
+
+ dev_dbg(mmc_dev(host->mmc),
+ "%s: slot->power_mode = %d,"
+ "ios->power_mode = %d\n",
+ __func__,
+ pxa->power_mode,
+ power_mode);
+
+ /* set we want notice of when 74 clocks are sent */
+ tmp = readw(host->ioaddr + SD_CE_ATA_2);
+ tmp |= SDCE_MISC_INT_EN;
+ writew(tmp, host->ioaddr + SD_CE_ATA_2);
+
+ /* start sending the 74 clocks */
+ tmp = readw(host->ioaddr + SD_CFG_FIFO_PARAM);
+ tmp |= SDCFG_GEN_PAD_CLK_ON;
+ writew(tmp, host->ioaddr + SD_CFG_FIFO_PARAM);
+
+ /* slowest speed is about 100KHz or 10usec per clock */
+ udelay(740);
+ count = 0;
+
+ while (count++ < MAX_WAIT_COUNT) {
+ if ((readw(host->ioaddr + SD_CE_ATA_2)
+ & SDCE_MISC_INT) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (count == MAX_WAIT_COUNT)
+ dev_warn(mmc_dev(host->mmc), "74 clock interrupt not cleared\n");
+
+ /* clear the interrupt bit if posted */
+ tmp = readw(host->ioaddr + SD_CE_ATA_2);
+ tmp |= SDCE_MISC_INT;
+ writew(tmp, host->ioaddr + SD_CE_ATA_2);
+ }
+ pxa->power_mode = power_mode;
+}
+
+static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+{
+ u16 ctrl_2;
+
+ /*
+ * Set V18_EN -- UHS modes do not work without this.
+ * does not change signaling voltage
+ */
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR12:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
+ break;
+ }
+
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+ dev_dbg(mmc_dev(host->mmc),
+ "%s uhs = %d, ctrl_2 = %04X\n",
+ __func__, uhs, ctrl_2);
+
+ return 0;
+}
+
+static struct sdhci_ops pxav3_sdhci_ops = {
+ .platform_reset_exit = pxav3_set_private_registers,
+ .set_uhs_signaling = pxav3_set_uhs_signaling,
+ .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
+};
+
+static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = NULL;
+ struct sdhci_pxa *pxa = NULL;
+ int ret;
+ struct clk *clk;
+
+ pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL);
+ if (!pxa)
+ return -ENOMEM;
+
+ host = sdhci_pltfm_init(pdev, NULL);
+ if (IS_ERR(host)) {
+ kfree(pxa);
+ return PTR_ERR(host);
+ }
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = pxa;
+
+ clk = clk_get(dev, "PXA-SDHCLK");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(clk);
+ goto err_clk_get;
+ }
+ pltfm_host->clk = clk;
+ clk_enable(clk);
+
+ host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
+ | SDHCI_QUIRK_32BIT_ADMA_SIZE;
+
+ /* enable 1/8V DDR capable */
+ host->mmc->caps |= MMC_CAP_1_8V_DDR;
+
+ if (pdata) {
+ if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
+ /* on-chip device */
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ }
+
+ /* If slot design supports 8 bit data, indicate this to MMC. */
+ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+ if (pdata->quirks)
+ host->quirks |= pdata->quirks;
+ if (pdata->host_caps)
+ host->mmc->caps |= pdata->host_caps;
+ if (pdata->pm_caps)
+ host->mmc->pm_caps |= pdata->pm_caps;
+ }
+
+ host->ops = &pxav3_sdhci_ops;
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add host\n");
+ goto err_add_host;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+err_add_host:
+ clk_disable(clk);
+ clk_put(clk);
+err_clk_get:
+ sdhci_pltfm_free(pdev);
+ kfree(pxa);
+ return ret;
+}
+
+static int __devexit sdhci_pxav3_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+
+ sdhci_remove_host(host, 1);
+
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+ sdhci_pltfm_free(pdev);
+ kfree(pxa);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_pxav3_driver = {
+ .driver = {
+ .name = "sdhci-pxav3",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_pxav3_probe,
+ .remove = __devexit_p(sdhci_pxav3_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
+};
+static int __init sdhci_pxav3_init(void)
+{
+ return platform_driver_register(&sdhci_pxav3_driver);
+}
+
+static void __exit sdhci_pxav3_exit(void)
+{
+ platform_driver_unregister(&sdhci_pxav3_driver);
+}
+
+module_init(sdhci_pxav3_init);
+module_exit(sdhci_pxav3_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for pxav3");
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 69e3ee321eb..2bd7bf4fece 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -19,6 +19,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <linux/mmc/host.h>
@@ -502,6 +503,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
/* This host supports the Auto CMD12 */
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
+ /* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
+ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;
+
if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
@@ -612,16 +616,14 @@ static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
{
struct sdhci_host *host = platform_get_drvdata(dev);
- sdhci_suspend_host(host, pm);
- return 0;
+ return sdhci_suspend_host(host, pm);
}
static int sdhci_s3c_resume(struct platform_device *dev)
{
struct sdhci_host *host = platform_get_drvdata(dev);
- sdhci_resume_host(host);
- return 0;
+ return sdhci_resume_host(host);
}
#else
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 343c97edba3..18b0bd31de7 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -24,7 +24,6 @@
#include <mach/gpio.h>
#include <mach/sdhci.h>
-#include "sdhci.h"
#include "sdhci-pltfm.h"
static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
@@ -116,20 +115,42 @@ static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
return 0;
}
+static struct sdhci_ops tegra_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+ .read_l = tegra_sdhci_readl,
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+ .platform_8bit_width = tegra_sdhci_8bit,
+};
+
+static struct sdhci_pltfm_data sdhci_tegra_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ .ops = &tegra_sdhci_ops,
+};
-static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
- struct sdhci_pltfm_data *pdata)
+static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pltfm_host *pltfm_host;
struct tegra_sdhci_platform_data *plat;
+ struct sdhci_host *host;
struct clk *clk;
int rc;
+ host = sdhci_pltfm_init(pdev, &sdhci_tegra_pdata);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+
plat = pdev->dev.platform_data;
+
if (plat == NULL) {
dev_err(mmc_dev(host->mmc), "missing platform data\n");
- return -ENXIO;
+ rc = -ENXIO;
+ goto err_no_plat;
}
if (gpio_is_valid(plat->power_gpio)) {
@@ -137,7 +158,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate power gpio\n");
- goto out;
+ goto err_power_req;
}
tegra_gpio_enable(plat->power_gpio);
gpio_direction_output(plat->power_gpio, 1);
@@ -148,7 +169,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate cd gpio\n");
- goto out_power;
+ goto err_cd_req;
}
tegra_gpio_enable(plat->cd_gpio);
gpio_direction_input(plat->cd_gpio);
@@ -159,7 +180,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (rc) {
dev_err(mmc_dev(host->mmc), "request irq error\n");
- goto out_cd;
+ goto err_cd_irq_req;
}
}
@@ -169,7 +190,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate wp gpio\n");
- goto out_irq;
+ goto err_wp_req;
}
tegra_gpio_enable(plat->wp_gpio);
gpio_direction_input(plat->wp_gpio);
@@ -179,7 +200,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (IS_ERR(clk)) {
dev_err(mmc_dev(host->mmc), "clk err\n");
rc = PTR_ERR(clk);
- goto out_wp;
+ goto err_clk_get;
}
clk_enable(clk);
pltfm_host->clk = clk;
@@ -189,38 +210,47 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (plat->is_8bit)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ rc = sdhci_add_host(host);
+ if (rc)
+ goto err_add_host;
+
return 0;
-out_wp:
+err_add_host:
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+err_clk_get:
if (gpio_is_valid(plat->wp_gpio)) {
tegra_gpio_disable(plat->wp_gpio);
gpio_free(plat->wp_gpio);
}
-
-out_irq:
+err_wp_req:
if (gpio_is_valid(plat->cd_gpio))
free_irq(gpio_to_irq(plat->cd_gpio), host);
-out_cd:
+err_cd_irq_req:
if (gpio_is_valid(plat->cd_gpio)) {
tegra_gpio_disable(plat->cd_gpio);
gpio_free(plat->cd_gpio);
}
-
-out_power:
+err_cd_req:
if (gpio_is_valid(plat->power_gpio)) {
tegra_gpio_disable(plat->power_gpio);
gpio_free(plat->power_gpio);
}
-
-out:
+err_power_req:
+err_no_plat:
+ sdhci_pltfm_free(pdev);
return rc;
}
-static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
+static int __devexit sdhci_tegra_remove(struct platform_device *pdev)
{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
struct tegra_sdhci_platform_data *plat;
+ int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+
+ sdhci_remove_host(host, dead);
plat = pdev->dev.platform_data;
@@ -242,22 +272,37 @@ static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
}
-static struct sdhci_ops tegra_sdhci_ops = {
- .get_ro = tegra_sdhci_get_ro,
- .read_l = tegra_sdhci_readl,
- .read_w = tegra_sdhci_readw,
- .write_l = tegra_sdhci_writel,
- .platform_8bit_width = tegra_sdhci_8bit,
+static struct platform_driver sdhci_tegra_driver = {
+ .driver = {
+ .name = "sdhci-tegra",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_tegra_probe,
+ .remove = __devexit_p(sdhci_tegra_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
};
-struct sdhci_pltfm_data sdhci_tegra_pdata = {
- .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_NO_HISPD_BIT |
- SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
- .ops = &tegra_sdhci_ops,
- .init = tegra_sdhci_pltfm_init,
- .exit = tegra_sdhci_pltfm_exit,
-};
+static int __init sdhci_tegra_init(void)
+{
+ return platform_driver_register(&sdhci_tegra_driver);
+}
+module_init(sdhci_tegra_init);
+
+static void __exit sdhci_tegra_exit(void)
+{
+ platform_driver_unregister(&sdhci_tegra_driver);
+}
+module_exit(sdhci_tegra_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for Tegra");
+MODULE_AUTHOR(" Google, Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 58d5436ff64..0e02cc1df12 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -127,11 +127,15 @@ static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
- u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
+ u32 present, irqs;
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
return;
+ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+ irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
+
if (enable)
sdhci_unmask_irqs(host, irqs);
else
@@ -624,12 +628,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
/* timeout in us */
if (!data)
target_timeout = cmd->cmd_timeout_ms * 1000;
- else
- target_timeout = data->timeout_ns / 1000 +
- data->timeout_clks / host->clock;
-
- if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
- host->timeout_clk = host->clock / 1000;
+ else {
+ target_timeout = data->timeout_ns / 1000;
+ if (host->clock)
+ target_timeout += data->timeout_clks / host->clock;
+ }
/*
* Figure out needed cycles.
@@ -641,7 +644,6 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
* =>
* (1) / (2) > 2^6
*/
- BUG_ON(!host->timeout_clk);
count = 0;
current_timeout = (1 << 13) * 1000 / host->timeout_clk;
while (current_timeout < target_timeout) {
@@ -1863,9 +1865,6 @@ static void sdhci_tasklet_finish(unsigned long param)
del_timer(&host->timer);
- if (host->version >= SDHCI_SPEC_300)
- del_timer(&host->tuning_timer);
-
mrq = host->mrq;
/*
@@ -2154,13 +2153,30 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
mmc_hostname(host->mmc), intmask);
if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+
+ /*
+ * There is a observation on i.mx esdhc. INSERT bit will be
+ * immediately set again when it gets cleared, if a card is
+ * inserted. We have to mask the irq to prevent interrupt
+ * storm which will freeze the system. And the REMOVE gets
+ * the same situation.
+ *
+ * More testing are needed here to ensure it works for other
+ * platforms though.
+ */
+ sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
+ SDHCI_INT_CARD_REMOVE);
+ sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
+ SDHCI_INT_CARD_INSERT);
+
sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
- SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+ SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+ intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
tasklet_schedule(&host->card_tasklet);
}
- intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
-
if (intmask & SDHCI_INT_CMD_MASK) {
sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
SDHCI_INT_STATUS);
@@ -2440,22 +2456,6 @@ int sdhci_add_host(struct sdhci_host *host)
host->max_clk = host->ops->get_max_clock(host);
}
- host->timeout_clk =
- (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
- if (host->timeout_clk == 0) {
- if (host->ops->get_timeout_clock) {
- host->timeout_clk = host->ops->get_timeout_clock(host);
- } else if (!(host->quirks &
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
- printk(KERN_ERR
- "%s: Hardware doesn't specify timeout clock "
- "frequency.\n", mmc_hostname(mmc));
- return -ENODEV;
- }
- }
- if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
- host->timeout_clk *= 1000;
-
/*
* In case of Host Controller v3.00, find out whether clock
* multiplier is supported.
@@ -2488,6 +2488,27 @@ int sdhci_add_host(struct sdhci_host *host)
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
+ host->timeout_clk =
+ (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
+ if (host->timeout_clk == 0) {
+ if (host->ops->get_timeout_clock) {
+ host->timeout_clk = host->ops->get_timeout_clock(host);
+ } else if (!(host->quirks &
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
+ printk(KERN_ERR
+ "%s: Hardware doesn't specify timeout clock "
+ "frequency.\n", mmc_hostname(mmc));
+ return -ENODEV;
+ }
+ }
+ if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
+ host->timeout_clk *= 1000;
+
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
+ host->timeout_clk = mmc->f_max / 1000;
+
+ mmc->max_discard_to = (1 << 27) / host->timeout_clk;
+
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 14f8edbaa19..557886bee9c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -175,6 +175,7 @@ struct sh_mmcif_host {
enum mmcif_state state;
spinlock_t lock;
bool power;
+ bool card_present;
/* DMA support */
struct dma_chan *chan_rx;
@@ -877,23 +878,23 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_unlock_irqrestore(&host->lock, flags);
if (ios->power_mode == MMC_POWER_UP) {
- if (p->set_pwr)
- p->set_pwr(host->pd, ios->power_mode);
- if (!host->power) {
+ if (!host->card_present) {
/* See if we also get DMA */
sh_mmcif_request_dma(host, host->pd->dev.platform_data);
- pm_runtime_get_sync(&host->pd->dev);
- host->power = true;
+ host->card_present = true;
}
} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
/* clock stop */
sh_mmcif_clock_control(host, 0);
if (ios->power_mode == MMC_POWER_OFF) {
- if (host->power) {
- pm_runtime_put(&host->pd->dev);
+ if (host->card_present) {
sh_mmcif_release_dma(host);
- host->power = false;
+ host->card_present = false;
}
+ }
+ if (host->power) {
+ pm_runtime_put(&host->pd->dev);
+ host->power = false;
if (p->down_pwr)
p->down_pwr(host->pd);
}
@@ -901,8 +902,16 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
return;
}
- if (ios->clock)
+ if (ios->clock) {
+ if (!host->power) {
+ if (p->set_pwr)
+ p->set_pwr(host->pd, ios->power_mode);
+ pm_runtime_get_sync(&host->pd->dev);
+ host->power = true;
+ sh_mmcif_sync_reset(host);
+ }
sh_mmcif_clock_control(host, ios->clock);
+ }
host->bus_width = ios->bus_width;
host->state = STATE_IDLE;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index b3654293017..774f6439d7c 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -26,6 +26,7 @@
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/sh_dma.h>
+#include <linux/delay.h>
#include "tmio_mmc.h"
@@ -55,6 +56,39 @@ static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
return -ENOSYS;
}
+static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
+{
+ int timeout = 1000;
+
+ while (--timeout && !(sd_ctrl_read16(host, CTL_STATUS2) & (1 << 13)))
+ udelay(1);
+
+ if (!timeout) {
+ dev_warn(host->pdata->dev, "timeout waiting for SD bus idle\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
+{
+ switch (addr)
+ {
+ case CTL_SD_CMD:
+ case CTL_STOP_INTERNAL_ACTION:
+ case CTL_XFER_BLK_COUNT:
+ case CTL_SD_CARD_CLK_CTL:
+ case CTL_SD_XFER_LEN:
+ case CTL_SD_MEM_CARD_OPT:
+ case CTL_TRANSACTION_CTL:
+ case CTL_DMA_ENABLE:
+ return sh_mobile_sdhi_wait_idle(host);
+ }
+
+ return 0;
+}
+
static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
{
struct sh_mobile_sdhi *priv;
@@ -86,13 +120,15 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->hclk = clk_get_rate(priv->clk);
mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
mmc_data->get_cd = sh_mobile_sdhi_get_cd;
+ if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
+ mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
if (p) {
mmc_data->flags = p->tmio_flags;
mmc_data->ocr_mask = p->tmio_ocr_mask;
mmc_data->capabilities |= p->tmio_caps;
- if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
+ if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
priv->param_tx.slave_id = p->dma_slave_tx;
priv->param_rx.slave_id = p->dma_slave_rx;
priv->dma_priv.chan_priv_tx = &priv->param_tx;
@@ -165,13 +201,14 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
p->pdata = NULL;
+ tmio_mmc_host_remove(host);
+
for (i = 0; i < 3; i++) {
irq = platform_get_irq(pdev, i);
if (irq >= 0)
free_irq(irq, host);
}
- tmio_mmc_host_remove(host);
clk_disable(priv->clk);
clk_put(priv->clk);
kfree(priv);
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 8d185de90d2..44a9668c4b7 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -27,7 +27,6 @@
static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
- struct mmc_host *mmc = platform_get_drvdata(dev);
int ret;
ret = tmio_mmc_host_suspend(&dev->dev);
@@ -42,7 +41,6 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_mmc_resume(struct platform_device *dev)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
- struct mmc_host *mmc = platform_get_drvdata(dev);
int ret = 0;
/* Tell the MFD core we are ready to be enabled */
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 8260bc2c34e..eeaf64391fb 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -18,8 +18,10 @@
#include <linux/highmem.h>
#include <linux/mmc/tmio.h>
+#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
+#include <linux/scatterlist.h>
/* Definitions for values the CTRL_SDIO_STATUS register can take. */
#define TMIO_SDIO_STAT_IOIRQ 0x0001
@@ -52,6 +54,8 @@ struct tmio_mmc_host {
void (*set_clk_div)(struct platform_device *host, int state);
int pm_error;
+ /* recognise system-wide suspend in runtime PM methods */
+ bool pm_global;
/* pio related stuff */
struct scatterlist *sg_ptr;
@@ -73,8 +77,11 @@ struct tmio_mmc_host {
/* Track lost interrupts */
struct delayed_work delayed_reset_work;
- spinlock_t lock;
+ struct work_struct done;
+
+ spinlock_t lock; /* protect host private data */
unsigned long last_req_ts;
+ struct mutex ios_lock; /* protect set_ios() context */
};
int tmio_mmc_host_probe(struct tmio_mmc_host **host,
@@ -103,6 +110,7 @@ static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
+void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable);
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
void tmio_mmc_release_dma(struct tmio_mmc_host *host);
#else
@@ -111,6 +119,10 @@ static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
{
}
+static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+{
+}
+
static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
@@ -134,4 +146,44 @@ int tmio_mmc_host_resume(struct device *dev);
int tmio_mmc_host_runtime_suspend(struct device *dev);
int tmio_mmc_host_runtime_resume(struct device *dev);
+static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift));
+}
+
+static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ readsw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift)) |
+ readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+}
+
+static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
+{
+ /* If there is a hook and it returns non-zero then there
+ * is an error and the write should be skipped
+ */
+ if (host->pdata->write16_hook && host->pdata->write16_hook(host, addr))
+ return;
+ writew(val, host->ctl + (addr << host->bus_shift));
+}
+
+static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ writesw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+ writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+}
+
+
#endif
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 25f1ad6cbe0..86f259cdfcb 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -11,6 +11,7 @@
*/
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
@@ -22,11 +23,14 @@
#define TMIO_MMC_MIN_DMA_LEN 8
-static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
{
+ if (!host->chan_tx || !host->chan_rx)
+ return;
+
#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
/* Switch DMA mode on or off - SuperH specific? */
- writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift));
+ sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
#endif
}
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index ad6347bb02d..1f16357e730 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -46,40 +46,6 @@
#include "tmio_mmc.h"
-static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
-{
- return readw(host->ctl + (addr << host->bus_shift));
-}
-
-static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
- u16 *buf, int count)
-{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
-{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
-}
-
-static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
-{
- writew(val, host->ctl + (addr << host->bus_shift));
-}
-
-static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
- u16 *buf, int count)
-{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
-{
- writew(val, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
-}
-
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
@@ -284,10 +250,16 @@ static void tmio_mmc_reset_work(struct work_struct *work)
/* called with host->lock held, interrupts disabled */
static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
{
- struct mmc_request *mrq = host->mrq;
+ struct mmc_request *mrq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
- if (!mrq)
+ mrq = host->mrq;
+ if (IS_ERR_OR_NULL(mrq)) {
+ spin_unlock_irqrestore(&host->lock, flags);
return;
+ }
host->cmd = NULL;
host->data = NULL;
@@ -296,11 +268,18 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
cancel_delayed_work(&host->delayed_reset_work);
host->mrq = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
- /* FIXME: mmc_request_done() can schedule! */
mmc_request_done(host->mmc, mrq);
}
+static void tmio_mmc_done_work(struct work_struct *work)
+{
+ struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
+ done);
+ tmio_mmc_finish_request(host);
+}
+
/* These are the bitmasks the tmio chip requires to implement the MMC response
* types. Note that R1 and R6 are the same in this scheme. */
#define APP_CMD 0x0040
@@ -467,7 +446,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
BUG();
}
- tmio_mmc_finish_request(host);
+ schedule_work(&host->done);
}
static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
@@ -557,7 +536,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
tasklet_schedule(&host->dma_issue);
}
} else {
- tmio_mmc_finish_request(host);
+ schedule_work(&host->done);
}
out:
@@ -567,6 +546,7 @@ out:
irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
+ struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata;
unsigned int ireg, irq_mask, status;
unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
@@ -588,13 +568,13 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (sdio_ireg && !host->sdio_irq_enabled) {
pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
sdio_status, sdio_irq_mask, sdio_ireg);
- tmio_mmc_enable_sdio_irq(host->mmc, 0);
+ tmio_mmc_enable_sdio_irq(mmc, 0);
goto out;
}
- if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
+ if (mmc->caps & MMC_CAP_SDIO_IRQ &&
sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
- mmc_signal_sdio_irq(host->mmc);
+ mmc_signal_sdio_irq(mmc);
if (sdio_ireg)
goto out;
@@ -603,58 +583,49 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
pr_debug_status(status);
pr_debug_status(ireg);
- if (!ireg) {
- tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
-
- pr_warning("tmio_mmc: Spurious irq, disabling! "
- "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
- pr_debug_status(status);
-
+ /* Card insert / remove attempts */
+ if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
+ TMIO_STAT_CARD_REMOVE);
+ if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
+ ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
+ !work_pending(&mmc->detect.work))
+ mmc_detect_change(host->mmc, msecs_to_jiffies(100));
goto out;
}
- while (ireg) {
- /* Card insert / remove attempts */
- if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
- tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
- TMIO_STAT_CARD_REMOVE);
- mmc_detect_change(host->mmc, msecs_to_jiffies(100));
- }
-
- /* CRC and other errors */
-/* if (ireg & TMIO_STAT_ERR_IRQ)
- * handled |= tmio_error_irq(host, irq, stat);
+ /* CRC and other errors */
+/* if (ireg & TMIO_STAT_ERR_IRQ)
+ * handled |= tmio_error_irq(host, irq, stat);
*/
- /* Command completion */
- if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
- tmio_mmc_ack_mmc_irqs(host,
- TMIO_STAT_CMDRESPEND |
- TMIO_STAT_CMDTIMEOUT);
- tmio_mmc_cmd_irq(host, status);
- }
-
- /* Data transfer */
- if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
- tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
- tmio_mmc_pio_irq(host);
- }
-
- /* Data transfer completion */
- if (ireg & TMIO_STAT_DATAEND) {
- tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
- tmio_mmc_data_irq(host);
- }
+ /* Command completion */
+ if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
+ tmio_mmc_ack_mmc_irqs(host,
+ TMIO_STAT_CMDRESPEND |
+ TMIO_STAT_CMDTIMEOUT);
+ tmio_mmc_cmd_irq(host, status);
+ goto out;
+ }
- /* Check status - keep going until we've handled it all */
- status = sd_ctrl_read32(host, CTL_STATUS);
- irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
- ireg = status & TMIO_MASK_IRQ & ~irq_mask;
+ /* Data transfer */
+ if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
+ tmio_mmc_pio_irq(host);
+ goto out;
+ }
- pr_debug("Status at end of loop: %08x\n", status);
- pr_debug_status(status);
+ /* Data transfer completion */
+ if (ireg & TMIO_STAT_DATAEND) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tmio_mmc_data_irq(host);
+ goto out;
}
- pr_debug("MMC IRQ end\n");
+
+ pr_warning("tmio_mmc: Spurious irq, disabling! "
+ "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
+ pr_debug_status(status);
+ tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
out:
return IRQ_HANDLED;
@@ -749,6 +720,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct tmio_mmc_data *pdata = host->pdata;
unsigned long flags;
+ mutex_lock(&host->ios_lock);
+
spin_lock_irqsave(&host->lock, flags);
if (host->mrq) {
if (IS_ERR(host->mrq)) {
@@ -764,6 +737,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->mrq->cmd->opcode, host->last_req_ts, jiffies);
}
spin_unlock_irqrestore(&host->lock, flags);
+
+ mutex_unlock(&host->ios_lock);
return;
}
@@ -771,33 +746,30 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_unlock_irqrestore(&host->lock, flags);
- if (ios->clock)
- tmio_mmc_set_clock(host, ios->clock);
-
- /* Power sequence - OFF -> UP -> ON */
- if (ios->power_mode == MMC_POWER_UP) {
- if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) {
+ /*
+ * pdata->power == false only if COLD_CD is available, otherwise only
+ * in short time intervals during probing or resuming
+ */
+ if (ios->power_mode == MMC_POWER_ON && ios->clock) {
+ if (!pdata->power) {
pm_runtime_get_sync(&host->pdev->dev);
pdata->power = true;
}
+ tmio_mmc_set_clock(host, ios->clock);
/* power up SD bus */
if (host->set_pwr)
host->set_pwr(host->pdev, 1);
- } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
- /* power down SD bus */
- if (ios->power_mode == MMC_POWER_OFF) {
- if (host->set_pwr)
- host->set_pwr(host->pdev, 0);
- if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
- pdata->power) {
- pdata->power = false;
- pm_runtime_put(&host->pdev->dev);
- }
- }
- tmio_mmc_clk_stop(host);
- } else {
/* start bus clock */
tmio_mmc_clk_start(host);
+ } else if (ios->power_mode != MMC_POWER_UP) {
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, 0);
+ if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
+ pdata->power) {
+ pdata->power = false;
+ pm_runtime_put(&host->pdev->dev);
+ }
+ tmio_mmc_clk_stop(host);
}
switch (ios->bus_width) {
@@ -817,6 +789,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
current->comm, task_pid_nr(current),
ios->clock, ios->power_mode);
host->mrq = NULL;
+
+ mutex_unlock(&host->ios_lock);
}
static int tmio_mmc_get_ro(struct mmc_host *mmc)
@@ -824,8 +798,8 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
- return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- !(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
+ return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
static int tmio_mmc_get_cd(struct mmc_host *mmc)
@@ -913,16 +887,20 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
tmio_mmc_enable_sdio_irq(mmc, 0);
spin_lock_init(&_host->lock);
+ mutex_init(&_host->ios_lock);
/* Init delayed work for request timeouts */
INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
+ INIT_WORK(&_host->done, tmio_mmc_done_work);
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
/* We have to keep the device powered for its card detection to work */
- if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD))
+ if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) {
+ pdata->power = true;
pm_runtime_get_noresume(&pdev->dev);
+ }
mmc_add_host(mmc);
@@ -963,6 +941,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
pm_runtime_get_sync(&pdev->dev);
mmc_remove_host(host->mmc);
+ cancel_work_sync(&host->done);
cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
@@ -998,11 +977,16 @@ int tmio_mmc_host_resume(struct device *dev)
/* The MMC core will perform the complete set up */
host->pdata->power = false;
+ host->pm_global = true;
if (!host->pm_error)
pm_runtime_get_sync(dev);
- tmio_mmc_reset(mmc_priv(mmc));
- tmio_mmc_request_dma(host, host->pdata);
+ if (host->pm_global) {
+ /* Runtime PM resume callback didn't run */
+ tmio_mmc_reset(host);
+ tmio_mmc_enable_dma(host, true);
+ host->pm_global = false;
+ }
return mmc_resume_host(mmc);
}
@@ -1023,12 +1007,15 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
struct tmio_mmc_data *pdata = host->pdata;
tmio_mmc_reset(host);
+ tmio_mmc_enable_dma(host, true);
if (pdata->power) {
/* Only entered after a card-insert interrupt */
- tmio_mmc_set_ios(mmc, &mmc->ios);
+ if (!mmc->card)
+ tmio_mmc_set_ios(mmc, &mmc->ios);
mmc_detect_change(mmc, msecs_to_jiffies(100));
}
+ host->pm_global = false;
return 0;
}
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index cbb03305b77..e8f6e65183d 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -1625,8 +1625,8 @@ static void __vub300_command_response(struct vub300_mmc_host *vub300,
cmd->error = respretval;
} else if (cmd->error) {
/*
- * the error occured sending the command
- * or recieving the response
+ * the error occurred sending the command
+ * or receiving the response
*/
} else if (vub300->command_out_urb->status) {
vub300->usb_transport_fail = vub300->command_out_urb->status;
@@ -2096,7 +2096,7 @@ static struct mmc_host_ops vub300_mmc_ops = {
static int vub300_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{ /* NOT irq */
- struct vub300_mmc_host *vub300 = NULL;
+ struct vub300_mmc_host *vub300;
struct usb_host_interface *iface_desc;
struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface));
int i;
@@ -2118,23 +2118,20 @@ static int vub300_probe(struct usb_interface *interface,
command_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!command_out_urb) {
retval = -ENOMEM;
- dev_err(&vub300->udev->dev,
- "not enough memory for the command_out_urb\n");
+ dev_err(&udev->dev, "not enough memory for command_out_urb\n");
goto error0;
}
command_res_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!command_res_urb) {
retval = -ENOMEM;
- dev_err(&vub300->udev->dev,
- "not enough memory for the command_res_urb\n");
+ dev_err(&udev->dev, "not enough memory for command_res_urb\n");
goto error1;
}
/* this also allocates memory for our VUB300 mmc host device */
mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev);
if (!mmc) {
retval = -ENOMEM;
- dev_err(&vub300->udev->dev,
- "not enough memory for the mmc_host\n");
+ dev_err(&udev->dev, "not enough memory for the mmc_host\n");
goto error4;
}
/* MMC core transfer sizes tunable parameters */
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 1e2c430aaad..83e80c65d6e 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -5,7 +5,7 @@
*
* Copyright © 2009 Bluewater Systems Ltd
* Author: Andre Renaud <andre@bluewatersys.com>
- * Author: Ryan Mallon <ryan@bluewatersys.com>
+ * Author: Ryan Mallon
*
* Based on m25p80.c
*
@@ -498,5 +498,5 @@ module_exit(sst25l_exit);
MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips");
MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, "
- "Ryan Mallon <ryan@bluewatersys.com>");
+ "Ryan Mallon");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index d4297a97e10..67815eed2f0 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -142,7 +142,7 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
state->map.write = bfin_flash_write;
state->map.copy_to = bfin_flash_copy_to;
state->map.bankwidth = pdata->width;
- state->map.size = memory->end - memory->start + 1;
+ state->map.size = resource_size(memory);
state->map.virt = (void __iomem *)memory->start;
state->map.phys = memory->start;
state->map.map_priv_1 = (unsigned long)state;
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index c00b9175ba9..1594a802631 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -155,7 +155,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
if (!plat)
return -ENODEV;
- window_size = dev->resource->end - dev->resource->start + 1;
+ window_size = resource_size(dev->resource);
dev_info(&dev->dev, "Probe of IXP2000 flash(%d banks x %dMiB)\n",
ixp_data->nr_banks, ((u32)window_size >> 20));
@@ -194,16 +194,17 @@ static int ixp2000_flash_probe(struct platform_device *dev)
info->map.copy_to = ixp2000_flash_copy_to;
info->res = request_mem_region(dev->resource->start,
- dev->resource->end - dev->resource->start + 1,
- dev_name(&dev->dev));
+ resource_size(dev->resource),
+ dev_name(&dev->dev));
if (!info->res) {
dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto Error;
}
- info->map.map_priv_1 = (unsigned long) ioremap(dev->resource->start,
- dev->resource->end - dev->resource->start + 1);
+ info->map.map_priv_1 =
+ (unsigned long)ioremap(dev->resource->start,
+ resource_size(dev->resource));
if (!info->map.map_priv_1) {
dev_err(&dev->dev, "Failed to ioremap flash region\n");
err = -EIO;
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index f59d62f74d4..7ae137d4b99 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -70,7 +70,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
info->map.name = (char *) flash->name;
info->map.bankwidth = flash->width;
info->map.phys = res->start;
- info->map.size = res->end - res->start + 1;
+ info->map.size = resource_size(res);
info->parts = flash->parts;
info->nr_parts = flash->nr_parts;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 3f92731a5b9..f1af2228a1b 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1192,7 +1192,7 @@ err_unregister_chdev:
static void __exit cleanup_mtdchar(void)
{
unregister_mtd_user(&mtdchar_notifier);
- mntput(mtd_inode_mnt);
+ kern_unmount(mtd_inode_mnt);
unregister_filesystem(&mtd_inodefs_type);
__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
}
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index b300705d41c..55da20ccc7a 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -513,7 +514,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
host->io_phys = (dma_addr_t)mem->start;
- host->io_base = ioremap(mem->start, mem->end - mem->start + 1);
+ host->io_base = ioremap(mem->start, resource_size(mem));
if (host->io_base == NULL) {
printk(KERN_ERR "atmel_nand: ioremap failed\n");
res = -EIO;
@@ -547,7 +548,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
if (no_ecc)
nand_chip->ecc.mode = NAND_ECC_NONE;
if (hard_ecc && regs) {
- host->ecc = ioremap(regs->start, regs->end - regs->start + 1);
+ host->ecc = ioremap(regs->start, resource_size(regs));
if (host->ecc == NULL) {
printk(KERN_ERR "atmel_nand: ioremap failed\n");
res = -EIO;
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 9ec280738a9..8c569e454dc 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -380,7 +380,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
return -ENXIO;
/* map physical address */
- bcm_umi_io_base = ioremap(r->start, r->end - r->start + 1);
+ bcm_umi_io_base = ioremap(r->start, resource_size(r));
if (!bcm_umi_io_base) {
printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 0bb254c7d2b..33d8aad8bba 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -339,9 +339,9 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(FIR_OP_UA << FIR_OP1_SHIFT) |
(FIR_OP_RBW << FIR_OP2_SHIFT));
out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
- /* 5 bytes for manuf, device and exts */
- out_be32(&lbc->fbcr, 5);
- elbc_fcm_ctrl->read_bytes = 5;
+ /* nand_get_flash_type() reads 8 bytes of entire ID string */
+ out_be32(&lbc->fbcr, 8);
+ elbc_fcm_ctrl->read_bytes = 8;
elbc_fcm_ctrl->use_mdr = 1;
elbc_fcm_ctrl->mdr = 0;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 2f7c930872f..eb1fbac63eb 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -713,7 +713,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
}
regs_paddr = res.start;
- regs_size = res.end - res.start + 1;
+ regs_size = resource_size(&res);
if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n");
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 65626c1c446..6c3fb5ab20f 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -953,10 +953,14 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi->peb_buf2)
goto out_free;
+ err = ubi_debugging_init_dev(ubi);
+ if (err)
+ goto out_free;
+
err = attach_by_scanning(ubi);
if (err) {
dbg_err("failed to attach by scanning, error %d", err);
- goto out_free;
+ goto out_debugging;
}
if (ubi->autoresize_vol_id != -1) {
@@ -969,12 +973,16 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (err)
goto out_detach;
+ err = ubi_debugfs_init_dev(ubi);
+ if (err)
+ goto out_uif;
+
ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
if (IS_ERR(ubi->bgt_thread)) {
err = PTR_ERR(ubi->bgt_thread);
ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
err);
- goto out_uif;
+ goto out_debugfs;
}
ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
@@ -1008,12 +1016,18 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
+out_debugfs:
+ ubi_debugfs_exit_dev(ubi);
out_uif:
+ get_device(&ubi->dev);
+ ubi_assert(ref);
uif_close(ubi);
out_detach:
ubi_wl_close(ubi);
free_internal_volumes(ubi);
vfree(ubi->vtbl);
+out_debugging:
+ ubi_debugging_exit_dev(ubi);
out_free:
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
@@ -1080,11 +1094,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
*/
get_device(&ubi->dev);
+ ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
ubi_wl_close(ubi);
free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
+ ubi_debugging_exit_dev(ubi);
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
@@ -1199,6 +1215,11 @@ static int __init ubi_init(void)
if (!ubi_wl_entry_slab)
goto out_dev_unreg;
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
+
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
struct mtd_dev_param *p = &mtd_dev_param[i];
@@ -1247,6 +1268,8 @@ out_detach:
ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
mutex_unlock(&ubi_devices_mutex);
}
+ ubi_debugfs_exit();
+out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
out_dev_unreg:
misc_deregister(&ubi_ctrl_cdev);
@@ -1270,6 +1293,7 @@ static void __exit ubi_exit(void)
ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
mutex_unlock(&ubi_devices_mutex);
}
+ ubi_debugfs_exit();
kmem_cache_destroy(ubi_wl_entry_slab);
misc_deregister(&ubi_ctrl_cdev);
class_remove_file(ubi_class, &ubi_version);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 191f3bb3c41..3320a50ba4f 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -189,12 +189,16 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
return new_offset;
}
-static int vol_cdev_fsync(struct file *file, int datasync)
+static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
-
- return ubi_sync(ubi->ubi_num);
+ struct inode *inode = file->f_path.dentry->d_inode;
+ int err;
+ mutex_lock(&inode->i_mutex);
+ err = ubi_sync(ubi->ubi_num);
+ mutex_unlock(&inode->i_mutex);
+ return err;
}
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 2224cbe41dd..ab80c0debac 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -27,17 +27,9 @@
#ifdef CONFIG_MTD_UBI_DEBUG
#include "ubi.h"
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-unsigned int ubi_chk_flags;
-unsigned int ubi_tst_flags;
-
-module_param_named(debug_chks, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
-module_param_named(debug_tsts, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
-
-MODULE_PARM_DESC(debug_chks, "Debug check flags");
-MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
/**
* ubi_dbg_dump_ec_hdr - dump an erase counter header.
@@ -239,4 +231,261 @@ out:
return;
}
+/**
+ * ubi_debugging_init_dev - initialize debugging for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * This function initializes debugging-related data for UBI device @ubi.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_debugging_init_dev(struct ubi_device *ubi)
+{
+ ubi->dbg = kzalloc(sizeof(struct ubi_debug_info), GFP_KERNEL);
+ if (!ubi->dbg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * ubi_debugging_exit_dev - free debugging data for an UBI device.
+ * @ubi: UBI device description object
+ */
+void ubi_debugging_exit_dev(struct ubi_device *ubi)
+{
+ kfree(ubi->dbg);
+}
+
+/*
+ * Root directory for UBI stuff in debugfs. Contains sub-directories which
+ * contain the stuff specific to particular UBI devices.
+ */
+static struct dentry *dfs_rootdir;
+
+/**
+ * ubi_debugfs_init - create UBI debugfs directory.
+ *
+ * Create UBI debugfs directory. Returns zero in case of success and a negative
+ * error code in case of failure.
+ */
+int ubi_debugfs_init(void)
+{
+ dfs_rootdir = debugfs_create_dir("ubi", NULL);
+ if (IS_ERR_OR_NULL(dfs_rootdir)) {
+ int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
+
+ ubi_err("cannot create \"ubi\" debugfs directory, error %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_debugfs_exit - remove UBI debugfs directory.
+ */
+void ubi_debugfs_exit(void)
+{
+ debugfs_remove(dfs_rootdir);
+}
+
+/* Read an UBI debugfs file */
+static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ char buf[3];
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = ubi->dbg;
+
+ if (dent == d->dfs_chk_gen)
+ val = d->chk_gen;
+ else if (dent == d->dfs_chk_io)
+ val = d->chk_io;
+ else if (dent == d->dfs_disable_bgt)
+ val = d->disable_bgt;
+ else if (dent == d->dfs_emulate_bitflips)
+ val = d->emulate_bitflips;
+ else if (dent == d->dfs_emulate_io_failures)
+ val = d->emulate_io_failures;
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (val)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ count = simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+/* Write an UBI debugfs file */
+static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ size_t buf_size;
+ char buf[8];
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = ubi->dbg;
+
+ buf_size = min_t(size_t, count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size)) {
+ count = -EFAULT;
+ goto out;
+ }
+
+ if (buf[0] == '1')
+ val = 1;
+ else if (buf[0] == '0')
+ val = 0;
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (dent == d->dfs_chk_gen)
+ d->chk_gen = val;
+ else if (dent == d->dfs_chk_io)
+ d->chk_io = val;
+ else if (dent == d->dfs_disable_bgt)
+ d->disable_bgt = val;
+ else if (dent == d->dfs_emulate_bitflips)
+ d->emulate_bitflips = val;
+ else if (dent == d->dfs_emulate_io_failures)
+ d->emulate_io_failures = val;
+ else
+ count = -EINVAL;
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+static int default_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+/* File operations for all UBI debugfs files */
+static const struct file_operations dfs_fops = {
+ .read = dfs_file_read,
+ .write = dfs_file_write,
+ .open = default_open,
+ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * ubi_debugfs_init_dev - initialize debugfs for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+int ubi_debugfs_init_dev(struct ubi_device *ubi)
+{
+ int err, n;
+ unsigned long ubi_num = ubi->ubi_num;
+ const char *fname;
+ struct dentry *dent;
+ struct ubi_debug_info *d = ubi->dbg;
+
+ n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
+ ubi->ubi_num);
+ if (n == UBI_DFS_DIR_LEN) {
+ /* The array size is too small */
+ fname = UBI_DFS_DIR_NAME;
+ dent = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ fname = d->dfs_dir_name;
+ dent = debugfs_create_dir(fname, dfs_rootdir);
+ if (IS_ERR_OR_NULL(dent))
+ goto out;
+ d->dfs_dir = dent;
+
+ fname = "chk_gen";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_gen = dent;
+
+ fname = "chk_io";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_io = dent;
+
+ fname = "tst_disable_bgt";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_disable_bgt = dent;
+
+ fname = "tst_emulate_bitflips";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_bitflips = dent;
+
+ fname = "tst_emulate_io_failures";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_io_failures = dent;
+
+ return 0;
+
+out_remove:
+ debugfs_remove_recursive(d->dfs_dir);
+out:
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ ubi_err("cannot create \"%s\" debugfs file or directory, error %d\n",
+ fname, err);
+ return err;
+}
+
+/**
+ * dbg_debug_exit_dev - free all debugfs files corresponding to device @ubi
+ * @ubi: UBI device description object
+ */
+void ubi_debugfs_exit_dev(struct ubi_device *ubi)
+{
+ debugfs_remove_recursive(ubi->dbg->dfs_dir);
+}
+
#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 3f1a09c5c43..65b5b76cc37 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -21,14 +21,6 @@
#ifndef __UBI_DEBUG_H__
#define __UBI_DEBUG_H__
-struct ubi_ec_hdr;
-struct ubi_vid_hdr;
-struct ubi_volume;
-struct ubi_vtbl_record;
-struct ubi_scan_volume;
-struct ubi_scan_leb;
-struct ubi_mkvol_req;
-
#ifdef CONFIG_MTD_UBI_DEBUG
#include <linux/random.h>
@@ -71,86 +63,103 @@ void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
-
-extern unsigned int ubi_chk_flags;
-
-/*
- * Debugging check flags.
- *
- * UBI_CHK_GEN: general checks
- * UBI_CHK_IO: check writes and erases
- */
-enum {
- UBI_CHK_GEN = 0x1,
- UBI_CHK_IO = 0x2,
-};
-
int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
int offset, int len);
-
-extern unsigned int ubi_tst_flags;
+int ubi_debugging_init_dev(struct ubi_device *ubi);
+void ubi_debugging_exit_dev(struct ubi_device *ubi);
+int ubi_debugfs_init(void);
+void ubi_debugfs_exit(void);
+int ubi_debugfs_init_dev(struct ubi_device *ubi);
+void ubi_debugfs_exit_dev(struct ubi_device *ubi);
/*
- * Special testing flags.
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
+/**
+ * struct ubi_debug_info - debugging information for an UBI device.
*
- * UBIFS_TST_DISABLE_BGT: disable the background thread
- * UBI_TST_EMULATE_BITFLIPS: emulate bit-flips
- * UBI_TST_EMULATE_WRITE_FAILURES: emulate write failures
- * UBI_TST_EMULATE_ERASE_FAILURES: emulate erase failures
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
*/
-enum {
- UBI_TST_DISABLE_BGT = 0x1,
- UBI_TST_EMULATE_BITFLIPS = 0x2,
- UBI_TST_EMULATE_WRITE_FAILURES = 0x4,
- UBI_TST_EMULATE_ERASE_FAILURES = 0x8,
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
};
/**
* ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
*
* Returns non-zero if the UBI background thread is disabled for testing
* purposes.
*/
-static inline int ubi_dbg_is_bgt_disabled(void)
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
{
- return ubi_tst_flags & UBI_TST_DISABLE_BGT;
+ return ubi->dbg->disable_bgt;
}
/**
* ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
+ * @ubi: UBI device description object
*
* Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
*/
-static inline int ubi_dbg_is_bitflip(void)
+static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- if (ubi_tst_flags & UBI_TST_EMULATE_BITFLIPS)
+ if (ubi->dbg->emulate_bitflips)
return !(random32() % 200);
return 0;
}
/**
* ubi_dbg_is_write_failure - if it is time to emulate a write failure.
+ * @ubi: UBI device description object
*
* Returns non-zero if a write failure should be emulated, otherwise returns
* zero.
*/
-static inline int ubi_dbg_is_write_failure(void)
+static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- if (ubi_tst_flags & UBI_TST_EMULATE_WRITE_FAILURES)
+ if (ubi->dbg->emulate_io_failures)
return !(random32() % 500);
return 0;
}
/**
* ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
+ * @ubi: UBI device description object
*
* Returns non-zero if an erase failure should be emulated, otherwise returns
* zero.
*/
-static inline int ubi_dbg_is_erase_failure(void)
+static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
- if (ubi_tst_flags & UBI_TST_EMULATE_ERASE_FAILURES)
+ if (ubi->dbg->emulate_io_failures)
return !(random32() % 400);
return 0;
}
@@ -201,11 +210,6 @@ static inline void ubi_dbg_dump_flash(struct ubi_device *ubi,
static inline void
ubi_dbg_print_hex_dump(const char *l, const char *ps, int pt, int r,
int g, const void *b, size_t len, bool a) { return; }
-
-static inline int ubi_dbg_is_bgt_disabled(void) { return 0; }
-static inline int ubi_dbg_is_bitflip(void) { return 0; }
-static inline int ubi_dbg_is_write_failure(void) { return 0; }
-static inline int ubi_dbg_is_erase_failure(void) { return 0; }
static inline int ubi_dbg_check_all_ff(struct ubi_device *ubi,
int pnum, int offset,
int len) { return 0; }
@@ -213,5 +217,20 @@ static inline int ubi_dbg_check_write(struct ubi_device *ubi,
const void *buf, int pnum,
int offset, int len) { return 0; }
+static inline int ubi_debugging_init_dev(struct ubi_device *ubi) { return 0; }
+static inline void ubi_debugging_exit_dev(struct ubi_device *ubi) { return; }
+static inline int ubi_debugfs_init(void) { return 0; }
+static inline void ubi_debugfs_exit(void) { return; }
+static inline int ubi_debugfs_init_dev(struct ubi_device *ubi) { return 0; }
+static inline void ubi_debugfs_exit_dev(struct ubi_device *ubi) { return; }
+
+static inline int
+ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi) { return 0; }
+static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) { return 0; }
+static inline int
+ubi_dbg_is_write_failure(const struct ubi_device *ubi) { return 0; }
+static inline int
+ubi_dbg_is_erase_failure(const struct ubi_device *ubi) { return 0; }
+
#endif /* !CONFIG_MTD_UBI_DEBUG */
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 8c1b1c7bc4a..6ba55c23587 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -212,7 +212,7 @@ retry:
} else {
ubi_assert(len == read);
- if (ubi_dbg_is_bitflip()) {
+ if (ubi_dbg_is_bitflip(ubi)) {
dbg_gen("bit-flip (emulated)");
err = UBI_IO_BITFLIPS;
}
@@ -281,7 +281,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
return err;
}
- if (ubi_dbg_is_write_failure()) {
+ if (ubi_dbg_is_write_failure(ubi)) {
dbg_err("cannot write %d bytes to PEB %d:%d "
"(emulated)", len, pnum, offset);
ubi_dbg_dump_stack();
@@ -396,7 +396,7 @@ retry:
if (err)
return err;
- if (ubi_dbg_is_erase_failure()) {
+ if (ubi_dbg_is_erase_failure(ubi)) {
dbg_err("cannot erase PEB %d (emulated)", pnum);
return -EIO;
}
@@ -1146,7 +1146,7 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
err = ubi_io_is_bad(ubi, pnum);
@@ -1173,7 +1173,7 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
magic = be32_to_cpu(ec_hdr->magic);
@@ -1211,7 +1211,7 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1255,7 +1255,7 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
magic = be32_to_cpu(vid_hdr->magic);
@@ -1296,7 +1296,7 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
struct ubi_vid_hdr *vid_hdr;
void *p;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -1348,7 +1348,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
void *buf1;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1412,7 +1412,7 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 2135a53732f..a3a198f9b98 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -1347,7 +1347,7 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
struct ubi_scan_leb *seb, *last_seb;
uint8_t *buf;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return 0;
/*
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c6c22295898..dc64c767fd2 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -44,7 +44,6 @@
#include "ubi-media.h"
#include "scan.h"
-#include "debug.h"
/* Maximum number of supported UBI devices */
#define UBI_MAX_DEVICES 32
@@ -390,6 +389,8 @@ struct ubi_wl_entry;
* @peb_buf2: another buffer of PEB size used for different purposes
* @buf_mutex: protects @peb_buf1 and @peb_buf2
* @ckvol_mutex: serializes static volume checking when opening
+ *
+ * @dbg: debugging information for this UBI device
*/
struct ubi_device {
struct cdev cdev;
@@ -472,8 +473,12 @@ struct ubi_device {
void *peb_buf2;
struct mutex buf_mutex;
struct mutex ckvol_mutex;
+
+ struct ubi_debug_info *dbg;
};
+#include "debug.h"
+
extern struct kmem_cache *ubi_wl_entry_slab;
extern const struct file_operations ubi_ctrl_cdev_operations;
extern const struct file_operations ubi_cdev_operations;
@@ -662,6 +667,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
if (!ubi->ro_mode) {
ubi->ro_mode = 1;
ubi_warn("switch to read-only mode");
+ ubi_dbg_dump_stack();
}
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 366eb70219a..97e093d1967 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -871,7 +871,7 @@ static int paranoid_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return 0;
for (i = 0; i < ubi->vtbl_slots; i++) {
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index fd3bf770f51..4b50a3029b8 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -307,8 +307,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
{
int err, tries = 0;
static struct ubi_vid_hdr *vid_hdr;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *new_seb, *old_seb = NULL;
+ struct ubi_scan_leb *new_seb;
ubi_msg("create volume table (copy #%d)", copy + 1);
@@ -316,15 +315,6 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
if (!vid_hdr)
return -ENOMEM;
- /*
- * Check if there is a logical eraseblock which would have to contain
- * this volume table copy was found during scanning. It has to be wiped
- * out.
- */
- sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
- if (sv)
- old_seb = ubi_scan_find_seb(sv, copy);
-
retry:
new_seb = ubi_scan_get_free_peb(ubi, si);
if (IS_ERR(new_seb)) {
@@ -351,8 +341,8 @@ retry:
goto write_error;
/*
- * And add it to the scanning information. Don't delete the old
- * @old_seb as it will be deleted and freed in 'ubi_scan_add_used()'.
+ * And add it to the scanning information. Don't delete the old version
+ * of this LEB as it will be deleted and freed in 'ubi_scan_add_used()'.
*/
err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
vid_hdr, 0);
@@ -876,7 +866,7 @@ out_free:
*/
static void paranoid_vtbl_check(const struct ubi_device *ubi)
{
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return;
if (vtbl_check(ubi, ubi->vtbl)) {
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ff2c4956eef..42c684cf368 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1,4 +1,5 @@
/*
+ * @ubi: UBI device description object
* Copyright (c) International Business Machines Corp., 2006
*
* This program is free software; you can redistribute it and/or modify
@@ -163,12 +164,14 @@ struct ubi_work {
#ifdef CONFIG_MTD_UBI_DEBUG
static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
-static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
+static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
struct rb_root *root);
-static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
+static int paranoid_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e);
#else
#define paranoid_check_ec(ubi, pnum, ec) 0
-#define paranoid_check_in_wl_tree(e, root)
+#define paranoid_check_in_wl_tree(ubi, e, root)
#define paranoid_check_in_pq(ubi, e) 0
#endif
@@ -449,7 +452,7 @@ retry:
BUG();
}
- paranoid_check_in_wl_tree(e, &ubi->free);
+ paranoid_check_in_wl_tree(ubi, e, &ubi->free);
/*
* Move the physical eraseblock to the protection queue where it will
@@ -613,7 +616,7 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
list_add_tail(&wrk->list, &ubi->works);
ubi_assert(ubi->works_count >= 0);
ubi->works_count += 1;
- if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled())
+ if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
}
@@ -712,7 +715,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
e1->ec, e2->ec);
goto out_cancel;
}
- paranoid_check_in_wl_tree(e1, &ubi->used);
+ paranoid_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
@@ -721,12 +724,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- paranoid_check_in_wl_tree(e1, &ubi->scrub);
+ paranoid_check_in_wl_tree(ubi, e1, &ubi->scrub);
rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
- paranoid_check_in_wl_tree(e2, &ubi->free);
+ paranoid_check_in_wl_tree(ubi, e2, &ubi->free);
rb_erase(&e2->u.rb, &ubi->free);
ubi->move_from = e1;
ubi->move_to = e2;
@@ -1169,13 +1172,13 @@ retry:
return 0;
} else {
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(e, &ubi->used);
+ paranoid_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else if (in_wl_tree(e, &ubi->scrub)) {
- paranoid_check_in_wl_tree(e, &ubi->scrub);
+ paranoid_check_in_wl_tree(ubi, e, &ubi->scrub);
rb_erase(&e->u.rb, &ubi->scrub);
} else if (in_wl_tree(e, &ubi->erroneous)) {
- paranoid_check_in_wl_tree(e, &ubi->erroneous);
+ paranoid_check_in_wl_tree(ubi, e, &ubi->erroneous);
rb_erase(&e->u.rb, &ubi->erroneous);
ubi->erroneous_peb_count -= 1;
ubi_assert(ubi->erroneous_peb_count >= 0);
@@ -1242,7 +1245,7 @@ retry:
}
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(e, &ubi->used);
+ paranoid_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else {
int err;
@@ -1364,7 +1367,7 @@ int ubi_thread(void *u)
spin_lock(&ubi->wl_lock);
if (list_empty(&ubi->works) || ubi->ro_mode ||
- !ubi->thread_enabled || ubi_dbg_is_bgt_disabled()) {
+ !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&ubi->wl_lock);
schedule();
@@ -1579,7 +1582,7 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1609,16 +1612,18 @@ out_free:
/**
* paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * @ubi: UBI device description object
* @e: the wear-leveling entry to check
* @root: the root of the tree
*
* This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
* is not.
*/
-static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
+static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
struct rb_root *root)
{
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return 0;
if (in_wl_tree(e, root))
@@ -1638,12 +1643,13 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
*
* This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
*/
-static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
+static int paranoid_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
{
struct ubi_wl_entry *p;
int i;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return 0;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 5b732988d49..84e68f1b9ad 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -49,6 +49,7 @@ static const char version[] =
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/ethtool.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 903bcb3ef5b..60b35fb5f52 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -594,7 +594,6 @@ static void lance_load_multicast (struct net_device *dev)
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
struct netdev_hw_addr *ha;
- char *addrs;
u32 crc;
/* set all multicast bits */
@@ -609,13 +608,7 @@ static void lance_load_multicast (struct net_device *dev)
/* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- /* multicast address? */
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc_le(6, ha->addr);
crc = crc >> 26;
mcast_table [crc >> 4] |= 1 << (crc & 0xf);
}
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 10c45051cae..cc4c210a91f 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -60,6 +60,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@@ -77,17 +78,6 @@
#include <asm/irq.h>
#include <asm/uaccess.h>
-/* VLAN tagging feature enable/disable */
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define CP_VLAN_TAG_USED 1
-#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
- do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0)
-#else
-#define CP_VLAN_TAG_USED 0
-#define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
- do { (tx_desc)->opts2 = 0; } while (0)
-#endif
-
/* These identify the driver base version and may not be removed. */
static char version[] =
DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -355,9 +345,6 @@ struct cp_private {
unsigned rx_buf_sz;
unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
-#if CP_VLAN_TAG_USED
- struct vlan_group *vlgrp;
-#endif
dma_addr_t ring_dma;
struct mii_if_info mii_if;
@@ -422,24 +409,6 @@ static struct {
};
-#if CP_VLAN_TAG_USED
-static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct cp_private *cp = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&cp->lock, flags);
- cp->vlgrp = grp;
- if (grp)
- cp->cpcmd |= RxVlanOn;
- else
- cp->cpcmd &= ~RxVlanOn;
-
- cpw16(CpCmd, cp->cpcmd);
- spin_unlock_irqrestore(&cp->lock, flags);
-}
-#endif /* CP_VLAN_TAG_USED */
-
static inline void cp_set_rxbufsize (struct cp_private *cp)
{
unsigned int mtu = cp->dev->mtu;
@@ -454,18 +423,17 @@ static inline void cp_set_rxbufsize (struct cp_private *cp)
static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
struct cp_desc *desc)
{
+ u32 opts2 = le32_to_cpu(desc->opts2);
+
skb->protocol = eth_type_trans (skb, cp->dev);
cp->dev->stats.rx_packets++;
cp->dev->stats.rx_bytes += skb->len;
-#if CP_VLAN_TAG_USED
- if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) {
- vlan_hwaccel_receive_skb(skb, cp->vlgrp,
- swab16(le32_to_cpu(desc->opts2) & 0xffff));
- } else
-#endif
- netif_receive_skb(skb);
+ if (opts2 & RxVlanTagged)
+ __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
+
+ napi_gro_receive(&cp->napi, skb);
}
static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
@@ -729,6 +697,12 @@ static void cp_tx (struct cp_private *cp)
netif_wake_queue(cp->dev);
}
+static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
+{
+ return vlan_tx_tag_present(skb) ?
+ TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
+}
+
static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
struct net_device *dev)
{
@@ -736,9 +710,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
unsigned entry;
u32 eor, flags;
unsigned long intr_flags;
-#if CP_VLAN_TAG_USED
- u32 vlan_tag = 0;
-#endif
+ __le32 opts2;
int mss = 0;
spin_lock_irqsave(&cp->lock, intr_flags);
@@ -751,15 +723,12 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
-#if CP_VLAN_TAG_USED
- if (vlan_tx_tag_present(skb))
- vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb));
-#endif
-
entry = cp->tx_head;
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
mss = skb_shinfo(skb)->gso_size;
+ opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
+
if (skb_shinfo(skb)->nr_frags == 0) {
struct cp_desc *txd = &cp->tx_ring[entry];
u32 len;
@@ -767,7 +736,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
len = skb->len;
mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
- CP_VLAN_TX_TAG(txd, vlan_tag);
+ txd->opts2 = opts2;
txd->addr = cpu_to_le64(mapping);
wmb();
@@ -838,7 +807,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
ctrl |= LastFrag;
txd = &cp->tx_ring[entry];
- CP_VLAN_TX_TAG(txd, vlan_tag);
+ txd->opts2 = opts2;
txd->addr = cpu_to_le64(mapping);
wmb();
@@ -850,7 +819,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
}
txd = &cp->tx_ring[first_entry];
- CP_VLAN_TX_TAG(txd, vlan_tag);
+ txd->opts2 = opts2;
txd->addr = cpu_to_le64(first_mapping);
wmb();
@@ -1430,6 +1399,11 @@ static int cp_set_features(struct net_device *dev, u32 features)
else
cp->cpcmd &= ~RxChkSum;
+ if (features & NETIF_F_HW_VLAN_RX)
+ cp->cpcmd |= RxVlanOn;
+ else
+ cp->cpcmd &= ~RxVlanOn;
+
cpw16_f(CpCmd, cp->cpcmd);
spin_unlock_irqrestore(&cp->lock, flags);
@@ -1817,9 +1791,6 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_start_xmit = cp_start_xmit,
.ndo_tx_timeout = cp_tx_timeout,
.ndo_set_features = cp_set_features,
-#if CP_VLAN_TAG_USED
- .ndo_vlan_rx_register = cp_vlan_rx_register,
-#endif
#ifdef BROKEN
.ndo_change_mtu = cp_change_mtu,
#endif
@@ -1948,15 +1919,16 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &cp_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
-#if CP_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
/* disabled by default until verified */
- dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
dev->irq = pdev->irq;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 98517a37347..c2672c692d6 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -100,6 +100,7 @@
#include <linux/compiler.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
@@ -992,6 +993,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
* features
*/
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+ dev->vlan_features = dev->features;
dev->irq = pdev->irq;
diff --git a/drivers/net/8390.h b/drivers/net/8390.h
index 3d9e8fb4fbe..58a12e4c78f 100644
--- a/drivers/net/8390.h
+++ b/drivers/net/8390.h
@@ -9,6 +9,7 @@
#include <linux/if_ether.h>
#include <linux/ioport.h>
+#include <linux/irqreturn.h>
#include <linux/skbuff.h>
#define TX_PAGES 12 /* Two Tx slots */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 19f04a34783..8d0314dbd94 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1934,13 +1934,6 @@ config DECLANCE
DEC (now Compaq) based on the AMD Lance chipset, including the
DEPCA series. (This chipset is better known via the NE2100 cards.)
-config 68360_ENET
- bool "Motorola 68360 ethernet controller"
- depends on M68360
- help
- Say Y here if you want to use the built-in ethernet controller of
- the Motorola 68360 processor.
-
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
@@ -2115,7 +2108,6 @@ config E1000
config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
- select CRC32
depends on PCI && (!SPARC32 || BROKEN)
select CRC32
---help---
@@ -2197,15 +2189,6 @@ config IGBVF
source "drivers/net/ixp2000/Kconfig"
-config MYRI_SBUS
- tristate "MyriCOM Gigabit Ethernet support"
- depends on SBUS
- help
- This driver supports MyriCOM Sbus gigabit Ethernet cards.
-
- To compile this driver as a module, choose M here: the module
- will be called myri_sbus. This is recommended.
-
config NS83820
tristate "National Semiconductor DP83820 support"
depends on PCI
@@ -2282,7 +2265,7 @@ config SIS190
will be called sis190. This is recommended.
config SKGE
- tristate "New SysKonnect GigaEthernet support"
+ tristate "Marvell Yukon Gigabit Ethernet support"
depends on PCI
select CRC32
---help---
@@ -2298,7 +2281,7 @@ config SKGE
Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872.
It does not support the newer Yukon2 chipset: a separate driver,
- sky2, is provided for Yukon2-based adapters.
+ sky2, is provided for these adapters.
To compile this driver as a module, choose M here: the module
will be called skge. This is recommended.
@@ -2313,8 +2296,17 @@ config SKGE_DEBUG
If unsure, say N.
+config SKGE_GENESIS
+ bool "Support for older SysKonnect Genesis boards"
+ depends on SKGE
+ help
+ This enables support for the older and uncommon SysKonnect Genesis
+ chips, which support MII via an external transceiver, instead of
+ an internal one. Disabling this option will save some memory
+ by making code smaller. If unsure say Y.
+
config SKY2
- tristate "SysKonnect Yukon2 support"
+ tristate "Marvell Yukon 2 support"
depends on PCI
select CRC32
---help---
@@ -2324,7 +2316,7 @@ config SKY2
88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21
There is companion driver for the older Marvell Yukon and
- Genesis based adapters: skge.
+ SysKonnect Genesis based adapters: skge.
To compile this driver as a module, choose M here: the module
will be called sky2. This is recommended.
@@ -2561,6 +2553,15 @@ config PCH_GBE
ML7223 is companion chip for Intel Atom E6xx series.
ML7223 is completely compatible for Intel EG20T PCH.
+config FTGMAC100
+ tristate "Faraday FTGMAC100 Gigabit Ethernet support"
+ depends on ARM
+ select PHYLIB
+ help
+ This driver supports the FTGMAC100 Gigabit Ethernet controller
+ from Faraday. It is used on Faraday A369, Andes AG102 and some
+ other ARM/NDS32 SoC's.
+
endif # NETDEV_1000
#
@@ -3416,7 +3417,8 @@ config NETCONSOLE
config NETCONSOLE_DYNAMIC
bool "Dynamic reconfiguration of logging targets"
- depends on NETCONSOLE && SYSFS && CONFIGFS_FS
+ depends on NETCONSOLE && SYSFS && CONFIGFS_FS && \
+ !(NETCONSOLE=y && CONFIGFS_FS=m)
help
This option enables the ability to dynamically reconfigure target
parameters (interface, IP addresses, port numbers, MAC addresses)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 776a478e629..e1eca2ab505 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -59,7 +59,6 @@ obj-$(CONFIG_HAPPYMEAL) += sunhme.o
obj-$(CONFIG_SUNLANCE) += sunlance.o
obj-$(CONFIG_SUNQE) += sunqe.o
obj-$(CONFIG_SUNBMAC) += sunbmac.o
-obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o
obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o
obj-$(CONFIG_CASSINI) += cassini.o
obj-$(CONFIG_SUNVNET) += sunvnet.o
@@ -128,7 +127,6 @@ obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
endif
-obj-$(CONFIG_68360_ENET) += 68360enet.o
obj-$(CONFIG_WD80x3) += wd.o 8390.o
obj-$(CONFIG_EL2) += 3c503.o 8390p.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
@@ -148,6 +146,7 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
obj-$(CONFIG_NE_H8300) += ne-h8300.o
obj-$(CONFIG_AX88796) += ax88796.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_FTGMAC100) += ftgmac100.o
obj-$(CONFIG_FTMAC100) += ftmac100.o
obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
@@ -283,6 +282,7 @@ obj-$(CONFIG_USB_HSO) += usb/
obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
obj-$(CONFIG_USB_IPHETH) += usb/
+obj-$(CONFIG_USB_CDC_PHONET) += usb/
obj-$(CONFIG_WLAN) += wireless/
obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index deaa8bc16cf..e1e1b07d9b8 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -37,6 +37,11 @@
* both 10BASE-2 (thin coax) and AUI (DB-15) connectors
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/*#define DEBUG*/
+/*#define TEST_HITS*/
+
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -58,29 +63,22 @@
#include "a2065.h"
-
- /*
- * Transmit/Receive Ring Definitions
- */
+/* Transmit/Receive Ring Definitions */
#define LANCE_LOG_TX_BUFFERS (2)
#define LANCE_LOG_RX_BUFFERS (4)
-#define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
-#define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
+#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
-#define TX_RING_MOD_MASK (TX_RING_SIZE-1)
-#define RX_RING_MOD_MASK (RX_RING_SIZE-1)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
#define PKT_BUF_SIZE (1544)
#define RX_BUFF_SIZE PKT_BUF_SIZE
#define TX_BUFF_SIZE PKT_BUF_SIZE
-
- /*
- * Layout of the Lance's RAM Buffer
- */
-
+/* Layout of the Lance's RAM Buffer */
struct lance_init_block {
unsigned short mode; /* Pre-set mode (reg. 15) */
@@ -97,14 +95,11 @@ struct lance_init_block {
struct lance_rx_desc brx_ring[RX_RING_SIZE];
struct lance_tx_desc btx_ring[TX_RING_SIZE];
- char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
- char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
+ char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
+ char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
};
-
- /*
- * Private Device Data
- */
+/* Private Device Data */
struct lance_private {
char *name;
@@ -129,21 +124,14 @@ struct lance_private {
struct timer_list multicast_timer;
};
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
- lp->tx_old - lp->tx_new-1)
-
-
#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
/* Load the CSR registers */
-static void load_csrs (struct lance_private *lp)
+static void load_csrs(struct lance_private *lp)
{
volatile struct lance_regs *ll = lp->ll;
volatile struct lance_init_block *aib = lp->lance_init_block;
- int leptr;
-
- leptr = LANCE_ADDR (aib);
+ int leptr = LANCE_ADDR(aib);
ll->rap = LE_CSR1;
ll->rdp = (leptr & 0xFFFF);
@@ -156,19 +144,16 @@ static void load_csrs (struct lance_private *lp)
ll->rap = LE_CSR0;
}
-#define ZERO 0
-
/* Setup the Lance Rx and Tx rings */
-static void lance_init_ring (struct net_device *dev)
+static void lance_init_ring(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
- volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ /* for LANCE_ADDR computations */
int leptr;
int i;
- aib = lp->lance_init_block;
-
/* Lock out other processes while setting up hardware */
netif_stop_queue(dev);
lp->rx_new = lp->tx_new = 0;
@@ -179,41 +164,38 @@ static void lance_init_ring (struct net_device *dev)
/* Copy the ethernet address to the lance init block
* Note that on the sparc you need to swap the ethernet address.
*/
- ib->phys_addr [0] = dev->dev_addr [1];
- ib->phys_addr [1] = dev->dev_addr [0];
- ib->phys_addr [2] = dev->dev_addr [3];
- ib->phys_addr [3] = dev->dev_addr [2];
- ib->phys_addr [4] = dev->dev_addr [5];
- ib->phys_addr [5] = dev->dev_addr [4];
-
- if (ZERO)
- printk(KERN_DEBUG "TX rings:\n");
+ ib->phys_addr[0] = dev->dev_addr[1];
+ ib->phys_addr[1] = dev->dev_addr[0];
+ ib->phys_addr[2] = dev->dev_addr[3];
+ ib->phys_addr[3] = dev->dev_addr[2];
+ ib->phys_addr[4] = dev->dev_addr[5];
+ ib->phys_addr[5] = dev->dev_addr[4];
/* Setup the Tx ring entries */
- for (i = 0; i <= (1<<lp->lance_log_tx_bufs); i++) {
+ netdev_dbg(dev, "TX rings:\n");
+ for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) {
leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
- ib->btx_ring [i].tmd0 = leptr;
- ib->btx_ring [i].tmd1_hadr = leptr >> 16;
- ib->btx_ring [i].tmd1_bits = 0;
- ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
- ib->btx_ring [i].misc = 0;
- if (i < 3 && ZERO)
- printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr);
+ ib->btx_ring[i].tmd0 = leptr;
+ ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring[i].tmd1_bits = 0;
+ ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring[i].misc = 0;
+ if (i < 3)
+ netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
}
/* Setup the Rx ring entries */
- if (ZERO)
- printk(KERN_DEBUG "RX rings:\n");
- for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
+ netdev_dbg(dev, "RX rings:\n");
+ for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) {
leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
- ib->brx_ring [i].rmd0 = leptr;
- ib->brx_ring [i].rmd1_hadr = leptr >> 16;
- ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
- ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
- ib->brx_ring [i].mblength = 0;
- if (i < 3 && ZERO)
- printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr);
+ ib->brx_ring[i].rmd0 = leptr;
+ ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+ ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring[i].mblength = 0;
+ if (i < 3)
+ netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
}
/* Setup the initialization block */
@@ -222,22 +204,20 @@ static void lance_init_ring (struct net_device *dev)
leptr = LANCE_ADDR(&aib->brx_ring);
ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
ib->rx_ptr = leptr;
- if (ZERO)
- printk(KERN_DEBUG "RX ptr: %8.8x\n", leptr);
+ netdev_dbg(dev, "RX ptr: %08x\n", leptr);
/* Setup tx descriptor pointer */
leptr = LANCE_ADDR(&aib->btx_ring);
ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
ib->tx_ptr = leptr;
- if (ZERO)
- printk(KERN_DEBUG "TX ptr: %8.8x\n", leptr);
+ netdev_dbg(dev, "TX ptr: %08x\n", leptr);
/* Clear the multicast filter */
- ib->filter [0] = 0;
- ib->filter [1] = 0;
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
}
-static int init_restart_lance (struct lance_private *lp)
+static int init_restart_lance(struct lance_private *lp)
{
volatile struct lance_regs *ll = lp->ll;
int i;
@@ -249,8 +229,7 @@ static int init_restart_lance (struct lance_private *lp)
for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
barrier();
if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
- printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
- i, ll->rdp);
+ pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp);
return -EIO;
}
@@ -261,7 +240,7 @@ static int init_restart_lance (struct lance_private *lp)
return 0;
}
-static int lance_rx (struct net_device *dev)
+static int lance_rx(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
@@ -271,22 +250,24 @@ static int lance_rx (struct net_device *dev)
#ifdef TEST_HITS
int i;
- printk(KERN_DEBUG "[");
+ char buf[RX_RING_SIZE + 1];
+
for (i = 0; i < RX_RING_SIZE; i++) {
+ char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN;
if (i == lp->rx_new)
- printk ("%s",
- ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+ buf[i] = r1_own ? '_' : 'X';
else
- printk ("%s",
- ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
+ buf[i] = r1_own ? '.' : '1';
}
- printk ("]\n");
+ buf[RX_RING_SIZE] = 0;
+
+ pr_debug("RxRing TestHits: [%s]\n", buf);
#endif
- ll->rdp = LE_C0_RINT|LE_C0_INEA;
- for (rd = &ib->brx_ring [lp->rx_new];
+ ll->rdp = LE_C0_RINT | LE_C0_INEA;
+ for (rd = &ib->brx_ring[lp->rx_new];
!((bits = rd->rmd1_bits) & LE_R1_OWN);
- rd = &ib->brx_ring [lp->rx_new]) {
+ rd = &ib->brx_ring[lp->rx_new]) {
/* We got an incomplete frame? */
if ((bits & LE_R1_POK) != LE_R1_POK) {
@@ -297,18 +278,22 @@ static int lance_rx (struct net_device *dev)
/* Count only the end frame as a rx error,
* not the beginning
*/
- if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
- if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
- if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
- if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
- if (bits & LE_R1_EOP) dev->stats.rx_errors++;
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
} else {
int len = (rd->mblength & 0xfff) - 4;
- struct sk_buff *skb = dev_alloc_skb (len+2);
+ struct sk_buff *skb = dev_alloc_skb(len + 2);
if (!skb) {
- printk(KERN_WARNING "%s: Memory squeeze, "
- "deferring packet.\n", dev->name);
+ netdev_warn(dev, "Memory squeeze, deferring packet\n");
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
@@ -316,13 +301,13 @@ static int lance_rx (struct net_device *dev)
return 0;
}
- skb_reserve (skb, 2); /* 16 byte align */
- skb_put (skb, len); /* make room */
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
skb_copy_to_linear_data(skb,
- (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
- len);
- skb->protocol = eth_type_trans (skb, dev);
- netif_rx (skb);
+ (unsigned char *)&ib->rx_buf[lp->rx_new][0],
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
}
@@ -335,7 +320,7 @@ static int lance_rx (struct net_device *dev)
return 0;
}
-static int lance_tx (struct net_device *dev)
+static int lance_tx(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
@@ -350,7 +335,7 @@ static int lance_tx (struct net_device *dev)
j = lp->tx_old;
for (i = j; i != lp->tx_new; i = j) {
- td = &ib->btx_ring [i];
+ td = &ib->btx_ring[i];
/* If we hit a packet not owned by us, stop */
if (td->tmd1_bits & LE_T1_OWN)
@@ -360,45 +345,44 @@ static int lance_tx (struct net_device *dev)
status = td->misc;
dev->stats.tx_errors++;
- if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
- if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
if (status & LE_T3_CLOS) {
dev->stats.tx_carrier_errors++;
if (lp->auto_select) {
lp->tpe = 1 - lp->tpe;
- printk(KERN_ERR "%s: Carrier Lost, "
- "trying %s\n", dev->name,
- lp->tpe?"TPE":"AUI");
+ netdev_err(dev, "Carrier Lost, trying %s\n",
+ lp->tpe ? "TPE" : "AUI");
/* Stop the lance */
ll->rap = LE_CSR0;
ll->rdp = LE_C0_STOP;
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
return 0;
}
}
- /* buffer errors and underflows turn off the transmitter */
- /* Restart the adapter */
- if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ /* buffer errors and underflows turn off
+ * the transmitter, so restart the adapter
+ */
+ if (status & (LE_T3_BUF | LE_T3_UFL)) {
dev->stats.tx_fifo_errors++;
- printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, "
- "restarting\n", dev->name);
+ netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n");
/* Stop the lance */
ll->rap = LE_CSR0;
ll->rdp = LE_C0_STOP;
- lance_init_ring (dev);
- load_csrs (lp);
- init_restart_lance (lp);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
return 0;
}
} else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
- /*
- * So we don't count the packet more than once.
- */
+ /* So we don't count the packet more than once. */
td->tmd1_bits &= ~(LE_T1_POK);
/* One collision before packet was sent. */
@@ -419,17 +403,19 @@ static int lance_tx (struct net_device *dev)
return 0;
}
-static irqreturn_t lance_interrupt (int irq, void *dev_id)
+static int lance_tx_buffs_avail(struct lance_private *lp)
{
- struct net_device *dev;
- struct lance_private *lp;
- volatile struct lance_regs *ll;
- int csr0;
-
- dev = (struct net_device *) dev_id;
+ if (lp->tx_old <= lp->tx_new)
+ return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new;
+ return lp->tx_old - lp->tx_new - 1;
+}
- lp = netdev_priv(dev);
- ll = lp->ll;
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int csr0;
ll->rap = LE_CSR0; /* LANCE Controller Status */
csr0 = ll->rdp;
@@ -438,19 +424,19 @@ static irqreturn_t lance_interrupt (int irq, void *dev_id)
return IRQ_NONE; /* been generated by the Lance. */
/* Acknowledge all the interrupt sources ASAP */
- ll->rdp = csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|
+ ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT |
LE_C0_INIT);
- if ((csr0 & LE_C0_ERR)) {
+ if (csr0 & LE_C0_ERR) {
/* Clear the error condition */
- ll->rdp = LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA;
+ ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA;
}
if (csr0 & LE_C0_RINT)
- lance_rx (dev);
+ lance_rx(dev);
if (csr0 & LE_C0_TINT)
- lance_tx (dev);
+ lance_tx(dev);
/* Log misc errors. */
if (csr0 & LE_C0_BABL)
@@ -458,22 +444,22 @@ static irqreturn_t lance_interrupt (int irq, void *dev_id)
if (csr0 & LE_C0_MISS)
dev->stats.rx_errors++; /* Missed a Rx frame. */
if (csr0 & LE_C0_MERR) {
- printk(KERN_ERR "%s: Bus master arbitration failure, status "
- "%4.4x.\n", dev->name, csr0);
+ netdev_err(dev, "Bus master arbitration failure, status %04x\n",
+ csr0);
/* Restart the chip. */
ll->rdp = LE_C0_STRT;
}
- if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0)
+ if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0)
netif_wake_queue(dev);
ll->rap = LE_CSR0;
- ll->rdp = LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|
- LE_C0_IDON|LE_C0_INEA;
+ ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR |
+ LE_C0_IDON | LE_C0_INEA);
return IRQ_HANDLED;
}
-static int lance_open (struct net_device *dev)
+static int lance_open(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
@@ -486,17 +472,18 @@ static int lance_open (struct net_device *dev)
/* Install the Interrupt handler */
ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED,
dev->name, dev);
- if (ret) return ret;
+ if (ret)
+ return ret;
- load_csrs (lp);
- lance_init_ring (dev);
+ load_csrs(lp);
+ lance_init_ring(dev);
netif_start_queue(dev);
- return init_restart_lance (lp);
+ return init_restart_lance(lp);
}
-static int lance_close (struct net_device *dev)
+static int lance_close(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
@@ -512,7 +499,7 @@ static int lance_close (struct net_device *dev)
return 0;
}
-static inline int lance_reset (struct net_device *dev)
+static inline int lance_reset(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
@@ -522,16 +509,15 @@ static inline int lance_reset (struct net_device *dev)
ll->rap = LE_CSR0;
ll->rdp = LE_C0_STOP;
- load_csrs (lp);
+ load_csrs(lp);
- lance_init_ring (dev);
+ lance_init_ring(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
netif_start_queue(dev);
- status = init_restart_lance (lp);
-#ifdef DEBUG_DRIVER
- printk(KERN_DEBUG "Lance restart=%d\n", status);
-#endif
+ status = init_restart_lance(lp);
+ netdev_dbg(dev, "Lance restart=%d\n", status);
+
return status;
}
@@ -540,14 +526,13 @@ static void lance_tx_timeout(struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
- printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
- dev->name, ll->rdp);
+ netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp);
lance_reset(dev);
netif_wake_queue(dev);
}
-static netdev_tx_t lance_start_xmit (struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_regs *ll = lp->ll;
@@ -562,33 +547,33 @@ static netdev_tx_t lance_start_xmit (struct sk_buff *skb,
local_irq_save(flags);
- if (!TX_BUFFS_AVAIL){
+ if (!lance_tx_buffs_avail(lp)) {
local_irq_restore(flags);
return NETDEV_TX_LOCKED;
}
-#ifdef DEBUG_DRIVER
+#ifdef DEBUG
/* dump the packet */
print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
16, 1, skb->data, 64, true);
#endif
entry = lp->tx_new & lp->tx_ring_mod_mask;
- ib->btx_ring [entry].length = (-skblen) | 0xf000;
- ib->btx_ring [entry].misc = 0;
+ ib->btx_ring[entry].length = (-skblen) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
- skb_copy_from_linear_data(skb, (void *)&ib->tx_buf [entry][0], skblen);
+ skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
/* Now, give the packet to the lance */
- ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
dev->stats.tx_bytes += skblen;
- if (TX_BUFFS_AVAIL <= 0)
+ if (lance_tx_buffs_avail(lp) <= 0)
netif_stop_queue(dev);
/* Kick the lance: transmit now */
ll->rdp = LE_C0_INEA | LE_C0_TDMD;
- dev_kfree_skb (skb);
+ dev_kfree_skb(skb);
local_irq_restore(flags);
@@ -596,40 +581,33 @@ static netdev_tx_t lance_start_xmit (struct sk_buff *skb,
}
/* taken from the depca driver */
-static void lance_load_multicast (struct net_device *dev)
+static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
struct netdev_hw_addr *ha;
- char *addrs;
u32 crc;
/* set all multicast bits */
- if (dev->flags & IFF_ALLMULTI){
- ib->filter [0] = 0xffffffff;
- ib->filter [1] = 0xffffffff;
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
return;
}
/* clear the multicast filter */
- ib->filter [0] = 0;
- ib->filter [1] = 0;
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
/* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- /* multicast address? */
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc_le(6, ha->addr);
crc = crc >> 26;
- mcast_table [crc >> 4] |= 1 << (crc & 0xf);
+ mcast_table[crc >> 4] |= 1 << (crc & 0xf);
}
}
-static void lance_set_multicast (struct net_device *dev)
+static void lance_set_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
@@ -648,16 +626,16 @@ static void lance_set_multicast (struct net_device *dev)
ll->rap = LE_CSR0;
ll->rdp = LE_C0_STOP;
- lance_init_ring (dev);
+ lance_init_ring(dev);
if (dev->flags & IFF_PROMISC) {
ib->mode |= LE_MO_PROM;
} else {
ib->mode &= ~LE_MO_PROM;
- lance_load_multicast (dev);
+ lance_load_multicast(dev);
}
- load_csrs (lp);
- init_restart_lance (lp);
+ load_csrs(lp);
+ init_restart_lance(lp);
netif_wake_queue(dev);
}
@@ -697,14 +675,12 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
{
struct net_device *dev;
struct lance_private *priv;
- unsigned long board, base_addr, mem_start;
+ unsigned long board = z->resource.start;
+ unsigned long base_addr = board + A2065_LANCE;
+ unsigned long mem_start = board + A2065_RAM;
struct resource *r1, *r2;
int err;
- board = z->resource.start;
- base_addr = board+A2065_LANCE;
- mem_start = board+A2065_RAM;
-
r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
"Am7990");
if (!r1)
@@ -735,12 +711,12 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
dev->dev_addr[1] = 0x00;
dev->dev_addr[2] = 0x9f;
}
- dev->dev_addr[3] = (z->rom.er_SerialNumber>>16) & 0xff;
- dev->dev_addr[4] = (z->rom.er_SerialNumber>>8) & 0xff;
+ dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff;
+ dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff;
dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
dev->base_addr = ZTWO_VADDR(base_addr);
dev->mem_start = ZTWO_VADDR(mem_start);
- dev->mem_end = dev->mem_start+A2065_RAM_SIZE;
+ dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
priv->ll = (volatile struct lance_regs *)dev->base_addr;
priv->init_block = (struct lance_init_block *)dev->mem_start;
@@ -760,7 +736,7 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
init_timer(&priv->multicast_timer);
priv->multicast_timer.data = (unsigned long) dev;
priv->multicast_timer.function =
- (void (*)(unsigned long)) &lance_set_multicast;
+ (void (*)(unsigned long))lance_set_multicast;
err = register_netdev(dev);
if (err) {
@@ -771,8 +747,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
}
zorro_set_drvdata(z, dev);
- printk(KERN_INFO "%s: A2065 at 0x%08lx, Ethernet Address "
- "%pM\n", dev->name, board, dev->dev_addr);
+ netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n",
+ board, dev->dev_addr);
return 0;
}
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 5181e932211..f07b2e980fb 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -32,6 +32,7 @@ static const char version[] =
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/io.h>
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index d7c1bfe4b6e..31798f5f5d0 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -69,10 +69,7 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
-
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#include <linux/if_vlan.h>
-#endif
#ifdef SIOCETHTOOL
#include <linux/ethtool.h>
@@ -171,15 +168,6 @@ MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
#define BOARD_IDX_STATIC 0
#define BOARD_IDX_OVERFLOW -1
-#if (defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)) && \
- defined(NETIF_F_HW_VLAN_RX)
-#define ACENIC_DO_VLAN 1
-#define ACE_RCB_VLAN_FLAG RCB_FLG_VLAN_ASSIST
-#else
-#define ACENIC_DO_VLAN 0
-#define ACE_RCB_VLAN_FLAG 0
-#endif
-
#include "acenic.h"
/*
@@ -465,9 +453,6 @@ static const struct net_device_ops ace_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ace_set_mac_addr,
.ndo_change_mtu = ace_change_mtu,
-#if ACENIC_DO_VLAN
- .ndo_vlan_rx_register = ace_vlan_rx_register,
-#endif
};
static int __devinit acenic_probe_one(struct pci_dev *pdev,
@@ -491,9 +476,7 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev,
ap->name = pci_name(pdev);
dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
-#if ACENIC_DO_VLAN
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
dev->watchdog_timeo = 5*HZ;
@@ -1248,7 +1231,7 @@ static int __devinit ace_init(struct net_device *dev)
set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
info->rx_std_ctrl.flags =
- RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
+ RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
memset(ap->rx_std_ring, 0,
RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
@@ -1264,7 +1247,7 @@ static int __devinit ace_init(struct net_device *dev)
(sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
info->rx_jumbo_ctrl.max_len = 0;
info->rx_jumbo_ctrl.flags =
- RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
+ RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
memset(ap->rx_jumbo_ring, 0,
RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
@@ -1286,7 +1269,7 @@ static int __devinit ace_init(struct net_device *dev)
RX_JUMBO_RING_ENTRIES))));
info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
info->rx_mini_ctrl.flags =
- RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|ACE_RCB_VLAN_FLAG;
+ RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST;
for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
ap->rx_mini_ring[i].flags =
@@ -1332,7 +1315,7 @@ static int __devinit ace_init(struct net_device *dev)
}
info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
- tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
+ tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
/*
* The Tigon I does not like having the TX ring in host memory ;-(
@@ -1519,13 +1502,13 @@ static int __devinit ace_init(struct net_device *dev)
* firmware to wipe the ring without re-initializing it.
*/
if (!test_and_set_bit(0, &ap->std_refill_busy))
- ace_load_std_rx_ring(ap, RX_RING_SIZE);
+ ace_load_std_rx_ring(dev, RX_RING_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
ap->name);
if (ap->version >= 2) {
if (!test_and_set_bit(0, &ap->mini_refill_busy))
- ace_load_mini_rx_ring(ap, RX_MINI_SIZE);
+ ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling "
"the RX mini ring\n", ap->name);
@@ -1601,9 +1584,10 @@ static void ace_watchdog(struct net_device *data)
}
-static void ace_tasklet(unsigned long dev)
+static void ace_tasklet(unsigned long arg)
{
- struct ace_private *ap = netdev_priv((struct net_device *)dev);
+ struct net_device *dev = (struct net_device *) arg;
+ struct ace_private *ap = netdev_priv(dev);
int cur_size;
cur_size = atomic_read(&ap->cur_rx_bufs);
@@ -1612,7 +1596,7 @@ static void ace_tasklet(unsigned long dev)
#ifdef DEBUG
printk("refilling buffers (current %i)\n", cur_size);
#endif
- ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size);
+ ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
}
if (ap->version >= 2) {
@@ -1623,7 +1607,7 @@ static void ace_tasklet(unsigned long dev)
printk("refilling mini buffers (current %i)\n",
cur_size);
#endif
- ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
+ ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
}
}
@@ -1633,7 +1617,7 @@ static void ace_tasklet(unsigned long dev)
#ifdef DEBUG
printk("refilling jumbo buffers (current %i)\n", cur_size);
#endif
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
}
ap->tasklet_pending = 0;
}
@@ -1659,8 +1643,9 @@ static void ace_dump_trace(struct ace_private *ap)
* done only before the device is enabled, thus no interrupts are
* generated and by the interrupt handler/tasklet handler.
*/
-static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
+static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
{
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
@@ -1674,11 +1659,10 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_STD_BUFSIZE,
@@ -1722,8 +1706,9 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
}
-static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
+static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
{
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
@@ -1735,11 +1720,10 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_MINI_BUFSIZE,
@@ -1779,8 +1763,9 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
* Load the jumbo rx ring, this may happen at any time if the MTU
* is changed to a value > 1500.
*/
-static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
+static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
{
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
@@ -1791,11 +1776,10 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
- skb = alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_JUMBO_BUFSIZE,
@@ -2038,12 +2022,9 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
}
/* send it up */
-#if ACENIC_DO_VLAN
- if (ap->vlgrp && (bd_flags & BD_FLG_VLAN_TAG)) {
- vlan_hwaccel_rx(skb, ap->vlgrp, retdesc->vlan);
- } else
-#endif
- netif_rx(skb);
+ if ((bd_flags & BD_FLG_VLAN_TAG))
+ __vlan_hwaccel_put_tag(skb, retdesc->vlan);
+ netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += retdesc->size;
@@ -2216,7 +2197,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
#ifdef DEBUG
printk("low on std buffers %i\n", cur_size);
#endif
- ace_load_std_rx_ring(ap,
+ ace_load_std_rx_ring(dev,
RX_RING_SIZE - cur_size);
} else
run_tasklet = 1;
@@ -2232,7 +2213,8 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
printk("low on mini buffers %i\n",
cur_size);
#endif
- ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
+ ace_load_mini_rx_ring(dev,
+ RX_MINI_SIZE - cur_size);
} else
run_tasklet = 1;
}
@@ -2248,7 +2230,8 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
printk("low on jumbo buffers %i\n",
cur_size);
#endif
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
+ ace_load_jumbo_rx_ring(dev,
+ RX_JUMBO_SIZE - cur_size);
} else
run_tasklet = 1;
}
@@ -2262,24 +2245,6 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-
-#if ACENIC_DO_VLAN
-static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct ace_private *ap = netdev_priv(dev);
- unsigned long flags;
-
- local_irq_save(flags);
- ace_mask_irq(dev);
-
- ap->vlgrp = grp;
-
- ace_unmask_irq(dev);
- local_irq_restore(flags);
-}
-#endif /* ACENIC_DO_VLAN */
-
-
static int ace_open(struct net_device *dev)
{
struct ace_private *ap = netdev_priv(dev);
@@ -2305,7 +2270,7 @@ static int ace_open(struct net_device *dev)
if (ap->jumbo &&
!test_and_set_bit(0, &ap->jumbo_refill_busy))
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
if (dev->flags & IFF_PROMISC) {
cmd.evt = C_SET_PROMISC_MODE;
@@ -2449,16 +2414,12 @@ ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
writel(addr >> 32, &io->addr.addrhi);
writel(addr & 0xffffffff, &io->addr.addrlo);
writel(flagsize, &io->flagsize);
-#if ACENIC_DO_VLAN
writel(vlan_tag, &io->vlanres);
-#endif
} else {
desc->addr.addrhi = addr >> 32;
desc->addr.addrlo = addr;
desc->flagsize = flagsize;
-#if ACENIC_DO_VLAN
desc->vlanres = vlan_tag;
-#endif
}
}
@@ -2486,12 +2447,10 @@ restart:
flagsize = (skb->len << 16) | (BD_FLG_END);
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
-#if ACENIC_DO_VLAN
if (vlan_tx_tag_present(skb)) {
flagsize |= BD_FLG_VLAN_TAG;
vlan_tag = vlan_tx_tag_get(skb);
}
-#endif
desc = ap->tx_ring + idx;
idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
@@ -2509,12 +2468,10 @@ restart:
flagsize = (skb_headlen(skb) << 16);
if (skb->ip_summed == CHECKSUM_PARTIAL)
flagsize |= BD_FLG_TCP_UDP_SUM;
-#if ACENIC_DO_VLAN
if (vlan_tx_tag_present(skb)) {
flagsize |= BD_FLG_VLAN_TAG;
vlan_tag = vlan_tx_tag_get(skb);
}
-#endif
ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
@@ -2621,7 +2578,7 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu)
"support\n", dev->name);
ap->jumbo = 1;
if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
ace_set_rxtx_parms(dev, 1);
}
} else {
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index 0681da7e875..51c486cfbb8 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -1,5 +1,6 @@
#ifndef _ACENIC_H_
#define _ACENIC_H_
+#include <linux/interrupt.h>
/*
@@ -664,10 +665,6 @@ struct ace_private
struct rx_desc *rx_mini_ring;
struct rx_desc *rx_return_ring;
-#if ACENIC_DO_VLAN
- struct vlan_group *vlgrp;
-#endif
-
int tasklet_pending, jumbo;
struct tasklet_struct ace_tasklet;
@@ -769,9 +766,9 @@ static inline void ace_unmask_irq(struct net_device *dev)
* Prototypes
*/
static int ace_init(struct net_device *dev);
-static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs);
-static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs);
-static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs);
+static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs);
+static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs);
+static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs);
static irqreturn_t ace_interrupt(int irq, void *dev_id);
static int ace_load_firmware(struct net_device *dev);
static int ace_open(struct net_device *dev);
@@ -789,8 +786,5 @@ static void ace_free_descriptors(struct net_device *dev);
static void ace_init_cleanup(struct net_device *dev);
static struct net_device_stats *ace_get_stats(struct net_device *dev);
static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
-#if ACENIC_DO_VLAN
-static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp);
-#endif
#endif /* _ACENIC_H_ */
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 241b185e656..78002ef9c0e 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -75,6 +75,7 @@ Revision History:
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -660,15 +661,6 @@ static void amd8111e_free_ring(struct amd8111e_priv* lp)
}
}
-#if AMD8111E_VLAN_TAG_USED
-/*
-This is the receive indication function for packets with vlan tag.
-*/
-static int amd8111e_vlan_rx(struct amd8111e_priv *lp, struct sk_buff *skb, u16 vlan_tag)
-{
- return vlan_hwaccel_receive_skb(skb, lp->vlgrp,vlan_tag);
-}
-#endif
/*
This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
@@ -763,7 +755,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
#if AMD8111E_VLAN_TAG_USED
vtag = status & TT_MASK;
/*MAC will strip vlan tag*/
- if(lp->vlgrp != NULL && vtag !=0)
+ if (vtag != 0)
min_pkt_len =MIN_PKT_LEN - 4;
else
#endif
@@ -798,12 +790,12 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
skb->protocol = eth_type_trans(skb, dev);
#if AMD8111E_VLAN_TAG_USED
- if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
- amd8111e_vlan_rx(lp, skb,
- le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info));
- } else
+ if (vtag == TT_VLAN_TAGGED){
+ u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
+ }
#endif
- netif_receive_skb(skb);
+ netif_receive_skb(skb);
/*COAL update rx coalescing parameters*/
lp->coal_conf.rx_packets++;
lp->coal_conf.rx_bytes += pkt_len;
@@ -1597,16 +1589,6 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
return err;
}
-#if AMD8111E_VLAN_TAG_USED
-static void amd8111e_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct amd8111e_priv *lp = netdev_priv(dev);
- spin_lock_irq(&lp->lock);
- lp->vlgrp = grp;
- spin_unlock_irq(&lp->lock);
-}
-#endif
-
static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
{
writel( VAL1|MPPLBA, lp->mmio + CMD3);
@@ -1821,9 +1803,6 @@ static const struct net_device_ops amd8111e_netdev_ops = {
.ndo_set_mac_address = amd8111e_set_mac_address,
.ndo_do_ioctl = amd8111e_ioctl,
.ndo_change_mtu = amd8111e_change_mtu,
-#if AMD8111E_VLAN_TAG_USED
- .ndo_vlan_rx_register = amd8111e_vlan_rx_register,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = amd8111e_poll,
#endif
@@ -1958,7 +1937,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
IPG_CONVERGE_JIFFIES;
lp->ipg_data.ipg = DEFAULT_IPG;
lp->ipg_data.ipg_state = CSTATE;
- };
+ }
/* display driver and device information */
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index b5926af03a7..2ff2e7a12dd 100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
@@ -783,9 +783,6 @@ struct amd8111e_priv{
struct net_device *next;
int mii;
struct mii_if_info mii_if;
-#if AMD8111E_VLAN_TAG_USED
- struct vlan_group *vlgrp;
-#endif
char opened;
unsigned int drv_rx_errors;
struct amd8111e_coalesce_conf coal_conf;
diff --git a/drivers/net/apne.c b/drivers/net/apne.c
index 2fe60f16810..547737340cb 100644
--- a/drivers/net/apne.c
+++ b/drivers/net/apne.c
@@ -36,6 +36,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <asm/system.h>
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index e69eead12ec..34ffb542262 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -652,9 +652,9 @@ static int do_write(struct net_device *dev, void *cbuf, int cbuflen,
int ret;
if(i) {
- qels[i].cbuf = (unsigned char *) cbuf;
+ qels[i].cbuf = cbuf;
qels[i].cbuflen = cbuflen;
- qels[i].dbuf = (unsigned char *) dbuf;
+ qels[i].dbuf = dbuf;
qels[i].dbuflen = dbuflen;
qels[i].QWrite = 1;
qels[i].mailbox = i; /* this should be initted rather */
@@ -676,9 +676,9 @@ static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
int ret;
if(i) {
- qels[i].cbuf = (unsigned char *) cbuf;
+ qels[i].cbuf = cbuf;
qels[i].cbuflen = cbuflen;
- qels[i].dbuf = (unsigned char *) dbuf;
+ qels[i].dbuf = dbuf;
qels[i].dbuflen = dbuflen;
qels[i].QWrite = 0;
qels[i].mailbox = i; /* this should be initted rather */
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 9efbbbae47c..25197b698dd 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -32,6 +32,7 @@
#include <linux/netdevice.h>
#include <linux/bootmem.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <asm/io.h>
#include <linux/arcdevice.h>
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 37272827ee5..45c61a2c5fb 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -34,6 +34,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/bootmem.h>
#include <linux/arcdevice.h>
#include <linux/com20020.h>
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 48a1dbf01e6..d427493997b 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -34,6 +34,7 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/arcdevice.h>
#include <linux/com20020.h>
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index c9e459400ff..7bfb91f3285 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -33,6 +33,7 @@
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/arcdevice.h>
#include <linux/com20020.h>
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index eb27976dab3..487d780ebbd 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -33,6 +33,7 @@
#include <linux/netdevice.h>
#include <linux/bootmem.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <asm/io.h>
#include <linux/arcdevice.h>
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index f3b46f71e29..b80fbe40aa0 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index b7f45cd756a..7ed78f40204 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -34,6 +34,9 @@
* - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/*#define DEBUG*/
+
#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -54,802 +57,734 @@
#include "ariadne.h"
-
#ifdef ARIADNE_DEBUG
int ariadne_debug = ARIADNE_DEBUG;
#else
int ariadne_debug = 1;
#endif
+/* Macros to Fix Endianness problems */
- /*
- * Macros to Fix Endianness problems
- */
-
- /* Swap the Bytes in a WORD */
-#define swapw(x) (((x>>8)&0x00ff)|((x<<8)&0xff00))
- /* Get the Low BYTE in a WORD */
-#define lowb(x) (x&0xff)
- /* Get the Swapped High WORD in a LONG */
-#define swhighw(x) ((((x)>>8)&0xff00)|(((x)>>24)&0x00ff))
- /* Get the Swapped Low WORD in a LONG */
-#define swloww(x) ((((x)<<8)&0xff00)|(((x)>>8)&0x00ff))
+/* Swap the Bytes in a WORD */
+#define swapw(x) (((x >> 8) & 0x00ff) | ((x << 8) & 0xff00))
+/* Get the Low BYTE in a WORD */
+#define lowb(x) (x & 0xff)
+/* Get the Swapped High WORD in a LONG */
+#define swhighw(x) ((((x) >> 8) & 0xff00) | (((x) >> 24) & 0x00ff))
+/* Get the Swapped Low WORD in a LONG */
+#define swloww(x) ((((x) << 8) & 0xff00) | (((x) >> 8) & 0x00ff))
-
- /*
- * Transmit/Receive Ring Definitions
- */
+/* Transmit/Receive Ring Definitions */
#define TX_RING_SIZE 5
#define RX_RING_SIZE 16
#define PKT_BUF_SIZE 1520
-
- /*
- * Private Device Data
- */
+/* Private Device Data */
struct ariadne_private {
- volatile struct TDRE *tx_ring[TX_RING_SIZE];
- volatile struct RDRE *rx_ring[RX_RING_SIZE];
- volatile u_short *tx_buff[TX_RING_SIZE];
- volatile u_short *rx_buff[RX_RING_SIZE];
- int cur_tx, cur_rx; /* The next free ring entry */
- int dirty_tx; /* The ring entries to be free()ed. */
- char tx_full;
+ volatile struct TDRE *tx_ring[TX_RING_SIZE];
+ volatile struct RDRE *rx_ring[RX_RING_SIZE];
+ volatile u_short *tx_buff[TX_RING_SIZE];
+ volatile u_short *rx_buff[RX_RING_SIZE];
+ int cur_tx, cur_rx; /* The next free ring entry */
+ int dirty_tx; /* The ring entries to be free()ed */
+ char tx_full;
};
-
- /*
- * Structure Created in the Ariadne's RAM Buffer
- */
+/* Structure Created in the Ariadne's RAM Buffer */
struct lancedata {
- struct TDRE tx_ring[TX_RING_SIZE];
- struct RDRE rx_ring[RX_RING_SIZE];
- u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE/sizeof(u_short)];
- u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE/sizeof(u_short)];
+ struct TDRE tx_ring[TX_RING_SIZE];
+ struct RDRE rx_ring[RX_RING_SIZE];
+ u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
+ u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
};
-static int ariadne_open(struct net_device *dev);
-static void ariadne_init_ring(struct net_device *dev);
-static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static void ariadne_tx_timeout(struct net_device *dev);
-static int ariadne_rx(struct net_device *dev);
-static void ariadne_reset(struct net_device *dev);
-static irqreturn_t ariadne_interrupt(int irq, void *data);
-static int ariadne_close(struct net_device *dev);
-static struct net_device_stats *ariadne_get_stats(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-
-
static void memcpyw(volatile u_short *dest, u_short *src, int len)
{
- while (len >= 2) {
- *(dest++) = *(src++);
- len -= 2;
- }
- if (len == 1)
- *dest = (*(u_char *)src)<<8;
+ while (len >= 2) {
+ *(dest++) = *(src++);
+ len -= 2;
+ }
+ if (len == 1)
+ *dest = (*(u_char *)src) << 8;
}
+static void ariadne_init_ring(struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start;
+ int i;
-static int __devinit ariadne_init_one(struct zorro_dev *z,
- const struct zorro_device_id *ent);
-static void __devexit ariadne_remove_one(struct zorro_dev *z);
-
-
-static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = {
- { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
- { 0 }
-};
-MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
+ netif_stop_queue(dev);
-static struct zorro_driver ariadne_driver = {
- .name = "ariadne",
- .id_table = ariadne_zorro_tbl,
- .probe = ariadne_init_one,
- .remove = __devexit_p(ariadne_remove_one),
-};
+ priv->tx_full = 0;
+ priv->cur_rx = priv->cur_tx = 0;
+ priv->dirty_tx = 0;
+
+ /* Set up TX Ring */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ volatile struct TDRE *t = &lancedata->tx_ring[i];
+ t->TMD0 = swloww(ARIADNE_RAM +
+ offsetof(struct lancedata, tx_buff[i]));
+ t->TMD1 = swhighw(ARIADNE_RAM +
+ offsetof(struct lancedata, tx_buff[i])) |
+ TF_STP | TF_ENP;
+ t->TMD2 = swapw((u_short)-PKT_BUF_SIZE);
+ t->TMD3 = 0;
+ priv->tx_ring[i] = &lancedata->tx_ring[i];
+ priv->tx_buff[i] = lancedata->tx_buff[i];
+ netdev_dbg(dev, "TX Entry %2d at %p, Buf at %p\n",
+ i, &lancedata->tx_ring[i], lancedata->tx_buff[i]);
+ }
-static const struct net_device_ops ariadne_netdev_ops = {
- .ndo_open = ariadne_open,
- .ndo_stop = ariadne_close,
- .ndo_start_xmit = ariadne_start_xmit,
- .ndo_tx_timeout = ariadne_tx_timeout,
- .ndo_get_stats = ariadne_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
-};
+ /* Set up RX Ring */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ volatile struct RDRE *r = &lancedata->rx_ring[i];
+ r->RMD0 = swloww(ARIADNE_RAM +
+ offsetof(struct lancedata, rx_buff[i]));
+ r->RMD1 = swhighw(ARIADNE_RAM +
+ offsetof(struct lancedata, rx_buff[i])) |
+ RF_OWN;
+ r->RMD2 = swapw((u_short)-PKT_BUF_SIZE);
+ r->RMD3 = 0x0000;
+ priv->rx_ring[i] = &lancedata->rx_ring[i];
+ priv->rx_buff[i] = lancedata->rx_buff[i];
+ netdev_dbg(dev, "RX Entry %2d at %p, Buf at %p\n",
+ i, &lancedata->rx_ring[i], lancedata->rx_buff[i]);
+ }
+}
-static int __devinit ariadne_init_one(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int ariadne_rx(struct net_device *dev)
{
- unsigned long board = z->resource.start;
- unsigned long base_addr = board+ARIADNE_LANCE;
- unsigned long mem_start = board+ARIADNE_RAM;
- struct resource *r1, *r2;
- struct net_device *dev;
- struct ariadne_private *priv;
- int err;
-
- r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
- if (!r1)
- return -EBUSY;
- r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
- if (!r2) {
- release_mem_region(base_addr, sizeof(struct Am79C960));
- return -EBUSY;
- }
-
- dev = alloc_etherdev(sizeof(struct ariadne_private));
- if (dev == NULL) {
- release_mem_region(base_addr, sizeof(struct Am79C960));
- release_mem_region(mem_start, ARIADNE_RAM_SIZE);
- return -ENOMEM;
- }
-
- priv = netdev_priv(dev);
-
- r1->name = dev->name;
- r2->name = dev->name;
-
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x60;
- dev->dev_addr[2] = 0x30;
- dev->dev_addr[3] = (z->rom.er_SerialNumber>>16) & 0xff;
- dev->dev_addr[4] = (z->rom.er_SerialNumber>>8) & 0xff;
- dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
- dev->base_addr = ZTWO_VADDR(base_addr);
- dev->mem_start = ZTWO_VADDR(mem_start);
- dev->mem_end = dev->mem_start+ARIADNE_RAM_SIZE;
-
- dev->netdev_ops = &ariadne_netdev_ops;
- dev->watchdog_timeo = 5*HZ;
-
- err = register_netdev(dev);
- if (err) {
- release_mem_region(base_addr, sizeof(struct Am79C960));
- release_mem_region(mem_start, ARIADNE_RAM_SIZE);
- free_netdev(dev);
- return err;
- }
- zorro_set_drvdata(z, dev);
+ struct ariadne_private *priv = netdev_priv(dev);
+ int entry = priv->cur_rx % RX_RING_SIZE;
+ int i;
- printk(KERN_INFO "%s: Ariadne at 0x%08lx, Ethernet Address %pM\n",
- dev->name, board, dev->dev_addr);
+ /* If we own the next entry, it's a new packet. Send it up */
+ while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) {
+ int status = lowb(priv->rx_ring[entry]->RMD1);
+
+ if (status != (RF_STP | RF_ENP)) { /* There was an error */
+ /* There is a tricky error noted by
+ * John Murphy <murf@perftech.com> to Russ Nelson:
+ * Even with full-sized buffers it's possible for a
+ * jabber packet to use two buffers, with only the
+ * last correctly noting the error
+ */
+ /* Only count a general error at the end of a packet */
+ if (status & RF_ENP)
+ dev->stats.rx_errors++;
+ if (status & RF_FRAM)
+ dev->stats.rx_frame_errors++;
+ if (status & RF_OFLO)
+ dev->stats.rx_over_errors++;
+ if (status & RF_CRC)
+ dev->stats.rx_crc_errors++;
+ if (status & RF_BUFF)
+ dev->stats.rx_fifo_errors++;
+ priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP;
+ } else {
+ /* Malloc up new buffer, compatible with net-3 */
+ short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL) {
+ netdev_warn(dev, "Memory squeeze, deferring packet\n");
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
+ break;
+
+ if (i > RX_RING_SIZE - 2) {
+ dev->stats.rx_dropped++;
+ priv->rx_ring[entry]->RMD1 |= RF_OWN;
+ priv->cur_rx++;
+ }
+ break;
+ }
+
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, pkt_len); /* Make room */
+ skb_copy_to_linear_data(skb,
+ (const void *)priv->rx_buff[entry],
+ pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n",
+ ((u_short *)skb->data)[6],
+ skb->data + 6, skb->data,
+ (int)skb->data, (int)skb->len);
+
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
- return 0;
-}
+ priv->rx_ring[entry]->RMD1 |= RF_OWN;
+ entry = (++priv->cur_rx) % RX_RING_SIZE;
+ }
+ priv->cur_rx = priv->cur_rx % RX_RING_SIZE;
-static int ariadne_open(struct net_device *dev)
-{
- volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
- u_short in;
- u_long version;
- int i;
-
- /* Reset the LANCE */
- in = lance->Reset;
-
- /* Stop the LANCE */
- lance->RAP = CSR0; /* PCnet-ISA Controller Status */
- lance->RDP = STOP;
-
- /* Check the LANCE version */
- lance->RAP = CSR88; /* Chip ID */
- version = swapw(lance->RDP);
- lance->RAP = CSR89; /* Chip ID */
- version |= swapw(lance->RDP)<<16;
- if ((version & 0x00000fff) != 0x00000003) {
- printk(KERN_WARNING "ariadne_open: Couldn't find AMD Ethernet Chip\n");
- return -EAGAIN;
- }
- if ((version & 0x0ffff000) != 0x00003000) {
- printk(KERN_WARNING "ariadne_open: Couldn't find Am79C960 (Wrong part "
- "number = %ld)\n", (version & 0x0ffff000)>>12);
- return -EAGAIN;
- }
-#if 0
- printk(KERN_DEBUG "ariadne_open: Am79C960 (PCnet-ISA) Revision %ld\n",
- (version & 0xf0000000)>>28);
-#endif
+ /* We should check that at least two ring entries are free.
+ * If not, we should free one and mark stats->rx_dropped++
+ */
- ariadne_init_ring(dev);
-
- /* Miscellaneous Stuff */
- lance->RAP = CSR3; /* Interrupt Masks and Deferral Control */
- lance->RDP = 0x0000;
- lance->RAP = CSR4; /* Test and Features Control */
- lance->RDP = DPOLL|APAD_XMT|MFCOM|RCVCCOM|TXSTRTM|JABM;
-
- /* Set the Multicast Table */
- lance->RAP = CSR8; /* Logical Address Filter, LADRF[15:0] */
- lance->RDP = 0x0000;
- lance->RAP = CSR9; /* Logical Address Filter, LADRF[31:16] */
- lance->RDP = 0x0000;
- lance->RAP = CSR10; /* Logical Address Filter, LADRF[47:32] */
- lance->RDP = 0x0000;
- lance->RAP = CSR11; /* Logical Address Filter, LADRF[63:48] */
- lance->RDP = 0x0000;
-
- /* Set the Ethernet Hardware Address */
- lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
- lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
- lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
-
- /* Set the Init Block Mode */
- lance->RAP = CSR15; /* Mode Register */
- lance->RDP = 0x0000;
-
- /* Set the Transmit Descriptor Ring Pointer */
- lance->RAP = CSR30; /* Base Address of Transmit Ring */
- lance->RDP = swloww(ARIADNE_RAM+offsetof(struct lancedata, tx_ring));
- lance->RAP = CSR31; /* Base Address of transmit Ring */
- lance->RDP = swhighw(ARIADNE_RAM+offsetof(struct lancedata, tx_ring));
-
- /* Set the Receive Descriptor Ring Pointer */
- lance->RAP = CSR24; /* Base Address of Receive Ring */
- lance->RDP = swloww(ARIADNE_RAM+offsetof(struct lancedata, rx_ring));
- lance->RAP = CSR25; /* Base Address of Receive Ring */
- lance->RDP = swhighw(ARIADNE_RAM+offsetof(struct lancedata, rx_ring));
-
- /* Set the Number of RX and TX Ring Entries */
- lance->RAP = CSR76; /* Receive Ring Length */
- lance->RDP = swapw(((u_short)-RX_RING_SIZE));
- lance->RAP = CSR78; /* Transmit Ring Length */
- lance->RDP = swapw(((u_short)-TX_RING_SIZE));
-
- /* Enable Media Interface Port Auto Select (10BASE-2/10BASE-T) */
- lance->RAP = ISACSR2; /* Miscellaneous Configuration */
- lance->IDP = ASEL;
-
- /* LED Control */
- lance->RAP = ISACSR5; /* LED1 Status */
- lance->IDP = PSE|XMTE;
- lance->RAP = ISACSR6; /* LED2 Status */
- lance->IDP = PSE|COLE;
- lance->RAP = ISACSR7; /* LED3 Status */
- lance->IDP = PSE|RCVE;
-
- netif_start_queue(dev);
-
- i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, IRQF_SHARED,
- dev->name, dev);
- if (i) return i;
-
- lance->RAP = CSR0; /* PCnet-ISA Controller Status */
- lance->RDP = INEA|STRT;
-
- return 0;
+ return 0;
}
-
-static void ariadne_init_ring(struct net_device *dev)
+static irqreturn_t ariadne_interrupt(int irq, void *data)
{
- struct ariadne_private *priv = netdev_priv(dev);
- volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start;
- int i;
-
- netif_stop_queue(dev);
-
- priv->tx_full = 0;
- priv->cur_rx = priv->cur_tx = 0;
- priv->dirty_tx = 0;
-
- /* Set up TX Ring */
- for (i = 0; i < TX_RING_SIZE; i++) {
- volatile struct TDRE *t = &lancedata->tx_ring[i];
- t->TMD0 = swloww(ARIADNE_RAM+offsetof(struct lancedata, tx_buff[i]));
- t->TMD1 = swhighw(ARIADNE_RAM+offsetof(struct lancedata, tx_buff[i])) |
- TF_STP | TF_ENP;
- t->TMD2 = swapw((u_short)-PKT_BUF_SIZE);
- t->TMD3 = 0;
- priv->tx_ring[i] = &lancedata->tx_ring[i];
- priv->tx_buff[i] = lancedata->tx_buff[i];
-#if 0
- printk(KERN_DEBUG "TX Entry %2d at %p, Buf at %p\n", i,
- &lancedata->tx_ring[i], lancedata->tx_buff[i]);
-#endif
- }
-
- /* Set up RX Ring */
- for (i = 0; i < RX_RING_SIZE; i++) {
- volatile struct RDRE *r = &lancedata->rx_ring[i];
- r->RMD0 = swloww(ARIADNE_RAM+offsetof(struct lancedata, rx_buff[i]));
- r->RMD1 = swhighw(ARIADNE_RAM+offsetof(struct lancedata, rx_buff[i])) |
- RF_OWN;
- r->RMD2 = swapw((u_short)-PKT_BUF_SIZE);
- r->RMD3 = 0x0000;
- priv->rx_ring[i] = &lancedata->rx_ring[i];
- priv->rx_buff[i] = lancedata->rx_buff[i];
-#if 0
- printk(KERN_DEBUG "RX Entry %2d at %p, Buf at %p\n", i,
- &lancedata->rx_ring[i], lancedata->rx_buff[i]);
+ struct net_device *dev = (struct net_device *)data;
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ struct ariadne_private *priv;
+ int csr0, boguscnt;
+ int handled = 0;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+
+ if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
+ return IRQ_NONE; /* generated by the board */
+
+ priv = netdev_priv(dev);
+
+ boguscnt = 10;
+ while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP */
+ lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT);
+
+#ifdef DEBUG
+ if (ariadne_debug > 5) {
+ netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [",
+ csr0, lance->RDP);
+ if (csr0 & INTR)
+ pr_cont(" INTR");
+ if (csr0 & INEA)
+ pr_cont(" INEA");
+ if (csr0 & RXON)
+ pr_cont(" RXON");
+ if (csr0 & TXON)
+ pr_cont(" TXON");
+ if (csr0 & TDMD)
+ pr_cont(" TDMD");
+ if (csr0 & STOP)
+ pr_cont(" STOP");
+ if (csr0 & STRT)
+ pr_cont(" STRT");
+ if (csr0 & INIT)
+ pr_cont(" INIT");
+ if (csr0 & ERR)
+ pr_cont(" ERR");
+ if (csr0 & BABL)
+ pr_cont(" BABL");
+ if (csr0 & CERR)
+ pr_cont(" CERR");
+ if (csr0 & MISS)
+ pr_cont(" MISS");
+ if (csr0 & MERR)
+ pr_cont(" MERR");
+ if (csr0 & RINT)
+ pr_cont(" RINT");
+ if (csr0 & TINT)
+ pr_cont(" TINT");
+ if (csr0 & IDON)
+ pr_cont(" IDON");
+ pr_cont(" ]\n");
+ }
#endif
- }
-}
+ if (csr0 & RINT) { /* Rx interrupt */
+ handled = 1;
+ ariadne_rx(dev);
+ }
-static int ariadne_close(struct net_device *dev)
-{
- volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+ if (csr0 & TINT) { /* Tx-done interrupt */
+ int dirty_tx = priv->dirty_tx;
+
+ handled = 1;
+ while (dirty_tx < priv->cur_tx) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = lowb(priv->tx_ring[entry]->TMD1);
+
+ if (status & TF_OWN)
+ break; /* It still hasn't been Txed */
+
+ priv->tx_ring[entry]->TMD1 &= 0xff00;
+
+ if (status & TF_ERR) {
+ /* There was an major error, log it */
+ int err_status = priv->tx_ring[entry]->TMD3;
+ dev->stats.tx_errors++;
+ if (err_status & EF_RTRY)
+ dev->stats.tx_aborted_errors++;
+ if (err_status & EF_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if (err_status & EF_LCOL)
+ dev->stats.tx_window_errors++;
+ if (err_status & EF_UFLO) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ dev->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ netdev_err(dev, "Tx FIFO error! Status %04x\n",
+ csr0);
+ /* Restart the chip */
+ lance->RDP = STRT;
+ }
+ } else {
+ if (status & (TF_MORE | TF_ONE))
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+ dirty_tx++;
+ }
- netif_stop_queue(dev);
+#ifndef final_version
+ if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, priv->cur_tx,
+ priv->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
- lance->RAP = CSR112; /* Missed Frame Count */
- dev->stats.rx_missed_errors = swapw(lance->RDP);
- lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ if (priv->tx_full && netif_queue_stopped(dev) &&
+ dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full */
+ priv->tx_full = 0;
+ netif_wake_queue(dev);
+ }
- if (ariadne_debug > 1) {
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, lance->RDP);
- printk(KERN_DEBUG "%s: %lu packets missed\n", dev->name,
- dev->stats.rx_missed_errors);
- }
+ priv->dirty_tx = dirty_tx;
+ }
- /* We stop the LANCE here -- it occasionally polls memory if we don't. */
- lance->RDP = STOP;
+ /* Log misc errors */
+ if (csr0 & BABL) {
+ handled = 1;
+ dev->stats.tx_errors++; /* Tx babble */
+ }
+ if (csr0 & MISS) {
+ handled = 1;
+ dev->stats.rx_errors++; /* Missed a Rx frame */
+ }
+ if (csr0 & MERR) {
+ handled = 1;
+ netdev_err(dev, "Bus master arbitration failure, status %04x\n",
+ csr0);
+ /* Restart the chip */
+ lance->RDP = STRT;
+ }
+ }
- free_irq(IRQ_AMIGA_PORTS, dev);
+ /* Clear any other interrupt, and set interrupt enable */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON;
- return 0;
-}
+ if (ariadne_debug > 4)
+ netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n",
+ lance->RAP, lance->RDP);
+ return IRQ_RETVAL(handled);
+}
-static inline void ariadne_reset(struct net_device *dev)
+static int ariadne_open(struct net_device *dev)
{
- volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
-
- lance->RAP = CSR0; /* PCnet-ISA Controller Status */
- lance->RDP = STOP;
- ariadne_init_ring(dev);
- lance->RDP = INEA|STRT;
- netif_start_queue(dev);
-}
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ u_short in;
+ u_long version;
+ int i;
+ /* Reset the LANCE */
+ in = lance->Reset;
+
+ /* Stop the LANCE */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP;
+
+ /* Check the LANCE version */
+ lance->RAP = CSR88; /* Chip ID */
+ version = swapw(lance->RDP);
+ lance->RAP = CSR89; /* Chip ID */
+ version |= swapw(lance->RDP) << 16;
+ if ((version & 0x00000fff) != 0x00000003) {
+ pr_warn("Couldn't find AMD Ethernet Chip\n");
+ return -EAGAIN;
+ }
+ if ((version & 0x0ffff000) != 0x00003000) {
+ pr_warn("Couldn't find Am79C960 (Wrong part number = %ld)\n",
+ (version & 0x0ffff000) >> 12);
+ return -EAGAIN;
+ }
-static irqreturn_t ariadne_interrupt(int irq, void *data)
-{
- struct net_device *dev = (struct net_device *)data;
- volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
- struct ariadne_private *priv;
- int csr0, boguscnt;
- int handled = 0;
+ netdev_dbg(dev, "Am79C960 (PCnet-ISA) Revision %ld\n",
+ (version & 0xf0000000) >> 28);
- lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ ariadne_init_ring(dev);
- if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
- return IRQ_NONE; /* generated by the board. */
+ /* Miscellaneous Stuff */
+ lance->RAP = CSR3; /* Interrupt Masks and Deferral Control */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR4; /* Test and Features Control */
+ lance->RDP = DPOLL | APAD_XMT | MFCOM | RCVCCOM | TXSTRTM | JABM;
- priv = netdev_priv(dev);
+ /* Set the Multicast Table */
+ lance->RAP = CSR8; /* Logical Address Filter, LADRF[15:0] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR9; /* Logical Address Filter, LADRF[31:16] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR10; /* Logical Address Filter, LADRF[47:32] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR11; /* Logical Address Filter, LADRF[63:48] */
+ lance->RDP = 0x0000;
- boguscnt = 10;
- while ((csr0 = lance->RDP) & (ERR|RINT|TINT) && --boguscnt >= 0) {
- /* Acknowledge all of the current interrupt sources ASAP. */
- lance->RDP = csr0 & ~(INEA|TDMD|STOP|STRT|INIT);
+ /* Set the Ethernet Hardware Address */
+ lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
+ lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
+ lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
-#if 0
- if (ariadne_debug > 5) {
- printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.",
- dev->name, csr0, lance->RDP);
- printk("[");
- if (csr0 & INTR)
- printk(" INTR");
- if (csr0 & INEA)
- printk(" INEA");
- if (csr0 & RXON)
- printk(" RXON");
- if (csr0 & TXON)
- printk(" TXON");
- if (csr0 & TDMD)
- printk(" TDMD");
- if (csr0 & STOP)
- printk(" STOP");
- if (csr0 & STRT)
- printk(" STRT");
- if (csr0 & INIT)
- printk(" INIT");
- if (csr0 & ERR)
- printk(" ERR");
- if (csr0 & BABL)
- printk(" BABL");
- if (csr0 & CERR)
- printk(" CERR");
- if (csr0 & MISS)
- printk(" MISS");
- if (csr0 & MERR)
- printk(" MERR");
- if (csr0 & RINT)
- printk(" RINT");
- if (csr0 & TINT)
- printk(" TINT");
- if (csr0 & IDON)
- printk(" IDON");
- printk(" ]\n");
- }
-#endif
+ /* Set the Init Block Mode */
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = 0x0000;
- if (csr0 & RINT) { /* Rx interrupt */
- handled = 1;
- ariadne_rx(dev);
- }
+ /* Set the Transmit Descriptor Ring Pointer */
+ lance->RAP = CSR30; /* Base Address of Transmit Ring */
+ lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
+ lance->RAP = CSR31; /* Base Address of transmit Ring */
+ lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
+
+ /* Set the Receive Descriptor Ring Pointer */
+ lance->RAP = CSR24; /* Base Address of Receive Ring */
+ lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
+ lance->RAP = CSR25; /* Base Address of Receive Ring */
+ lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
+
+ /* Set the Number of RX and TX Ring Entries */
+ lance->RAP = CSR76; /* Receive Ring Length */
+ lance->RDP = swapw(((u_short)-RX_RING_SIZE));
+ lance->RAP = CSR78; /* Transmit Ring Length */
+ lance->RDP = swapw(((u_short)-TX_RING_SIZE));
+
+ /* Enable Media Interface Port Auto Select (10BASE-2/10BASE-T) */
+ lance->RAP = ISACSR2; /* Miscellaneous Configuration */
+ lance->IDP = ASEL;
+
+ /* LED Control */
+ lance->RAP = ISACSR5; /* LED1 Status */
+ lance->IDP = PSE|XMTE;
+ lance->RAP = ISACSR6; /* LED2 Status */
+ lance->IDP = PSE|COLE;
+ lance->RAP = ISACSR7; /* LED3 Status */
+ lance->IDP = PSE|RCVE;
+
+ netif_start_queue(dev);
+
+ i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (i)
+ return i;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | STRT;
+
+ return 0;
+}
- if (csr0 & TINT) { /* Tx-done interrupt */
- int dirty_tx = priv->dirty_tx;
-
- handled = 1;
- while (dirty_tx < priv->cur_tx) {
- int entry = dirty_tx % TX_RING_SIZE;
- int status = lowb(priv->tx_ring[entry]->TMD1);
-
- if (status & TF_OWN)
- break; /* It still hasn't been Txed */
-
- priv->tx_ring[entry]->TMD1 &= 0xff00;
-
- if (status & TF_ERR) {
- /* There was an major error, log it. */
- int err_status = priv->tx_ring[entry]->TMD3;
- dev->stats.tx_errors++;
- if (err_status & EF_RTRY)
- dev->stats.tx_aborted_errors++;
- if (err_status & EF_LCAR)
- dev->stats.tx_carrier_errors++;
- if (err_status & EF_LCOL)
- dev->stats.tx_window_errors++;
- if (err_status & EF_UFLO) {
- /* Ackk! On FIFO errors the Tx unit is turned off! */
- dev->stats.tx_fifo_errors++;
- /* Remove this verbosity later! */
- printk(KERN_ERR "%s: Tx FIFO error! Status %4.4x.\n",
- dev->name, csr0);
- /* Restart the chip. */
- lance->RDP = STRT;
- }
- } else {
- if (status & (TF_MORE|TF_ONE))
- dev->stats.collisions++;
- dev->stats.tx_packets++;
- }
- dirty_tx++;
- }
+static int ariadne_close(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
-#ifndef final_version
- if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
- printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d, "
- "full=%d.\n", dirty_tx, priv->cur_tx, priv->tx_full);
- dirty_tx += TX_RING_SIZE;
- }
-#endif
+ netif_stop_queue(dev);
- if (priv->tx_full && netif_queue_stopped(dev) &&
- dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) {
- /* The ring is no longer full. */
- priv->tx_full = 0;
- netif_wake_queue(dev);
- }
+ lance->RAP = CSR112; /* Missed Frame Count */
+ dev->stats.rx_missed_errors = swapw(lance->RDP);
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
- priv->dirty_tx = dirty_tx;
+ if (ariadne_debug > 1) {
+ netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
+ lance->RDP);
+ netdev_dbg(dev, "%lu packets missed\n",
+ dev->stats.rx_missed_errors);
}
- /* Log misc errors. */
- if (csr0 & BABL) {
- handled = 1;
- dev->stats.tx_errors++; /* Tx babble. */
- }
- if (csr0 & MISS) {
- handled = 1;
- dev->stats.rx_errors++; /* Missed a Rx frame. */
- }
- if (csr0 & MERR) {
- handled = 1;
- printk(KERN_ERR "%s: Bus master arbitration failure, status "
- "%4.4x.\n", dev->name, csr0);
- /* Restart the chip. */
- lance->RDP = STRT;
- }
- }
+ /* We stop the LANCE here -- it occasionally polls memory if we don't */
+ lance->RDP = STOP;
- /* Clear any other interrupt, and set interrupt enable. */
- lance->RAP = CSR0; /* PCnet-ISA Controller Status */
- lance->RDP = INEA|BABL|CERR|MISS|MERR|IDON;
+ free_irq(IRQ_AMIGA_PORTS, dev);
-#if 0
- if (ariadne_debug > 4)
- printk(KERN_DEBUG "%s: exiting interrupt, csr%d=%#4.4x.\n", dev->name,
- lance->RAP, lance->RDP);
-#endif
- return IRQ_RETVAL(handled);
+ return 0;
}
+static inline void ariadne_reset(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP;
+ ariadne_init_ring(dev);
+ lance->RDP = INEA | STRT;
+ netif_start_queue(dev);
+}
static void ariadne_tx_timeout(struct net_device *dev)
{
- volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
- printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n",
- dev->name, lance->RDP);
- ariadne_reset(dev);
- netif_wake_queue(dev);
+ netdev_err(dev, "transmit timed out, status %04x, resetting\n",
+ lance->RDP);
+ ariadne_reset(dev);
+ netif_wake_queue(dev);
}
-
static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct ariadne_private *priv = netdev_priv(dev);
- volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
- int entry;
- unsigned long flags;
- int len = skb->len;
-
-#if 0
- if (ariadne_debug > 3) {
- lance->RAP = CSR0; /* PCnet-ISA Controller Status */
- printk(KERN_DEBUG "%s: ariadne_start_xmit() called, csr0 %4.4x.\n",
- dev->name, lance->RDP);
- lance->RDP = 0x0000;
- }
-#endif
-
- /* FIXME: is the 79C960 new enough to do its own padding right ? */
- if (skb->len < ETH_ZLEN)
- {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- len = ETH_ZLEN;
- }
-
- /* Fill in a Tx ring entry */
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ int entry;
+ unsigned long flags;
+ int len = skb->len;
#if 0
-{
- printk(KERN_DEBUG "TX pkt type 0x%04x from %pM to %pM "
- " data 0x%08x len %d\n",
- ((u_short *)skb->data)[6],
- skb->data + 6, skb->data,
- (int)skb->data, (int)skb->len);
-}
+ if (ariadne_debug > 3) {
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ netdev_dbg(dev, "%s: csr0 %04x\n", __func__, lance->RDP);
+ lance->RDP = 0x0000;
+ }
#endif
- local_irq_save(flags);
-
- entry = priv->cur_tx % TX_RING_SIZE;
-
- /* Caution: the write order is important here, set the base address with
- the "ownership" bits last. */
-
- priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
- priv->tx_ring[entry]->TMD3 = 0x0000;
- memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len);
-
-#if 0
- {
- int i, len;
-
- len = skb->len > 64 ? 64 : skb->len;
- len >>= 1;
- for (i = 0; i < len; i += 8) {
- int j;
- printk(KERN_DEBUG "%04x:", i);
- for (j = 0; (j < 8) && ((i+j) < len); j++) {
- if (!(j & 1))
- printk(" ");
- printk("%04x", priv->tx_buff[entry][i+j]);
- }
- printk("\n");
+ /* FIXME: is the 79C960 new enough to do its own padding right ? */
+ if (skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ len = ETH_ZLEN;
}
- }
-#endif
- priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1&0xff00)|TF_OWN|TF_STP|TF_ENP;
+ /* Fill in a Tx ring entry */
- dev_kfree_skb(skb);
+ netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n",
+ ((u_short *)skb->data)[6],
+ skb->data + 6, skb->data,
+ (int)skb->data, (int)skb->len);
- priv->cur_tx++;
- if ((priv->cur_tx >= TX_RING_SIZE) && (priv->dirty_tx >= TX_RING_SIZE)) {
+ local_irq_save(flags);
-#if 0
- printk(KERN_DEBUG "*** Subtracting TX_RING_SIZE from cur_tx (%d) and "
- "dirty_tx (%d)\n", priv->cur_tx, priv->dirty_tx);
-#endif
+ entry = priv->cur_tx % TX_RING_SIZE;
- priv->cur_tx -= TX_RING_SIZE;
- priv->dirty_tx -= TX_RING_SIZE;
- }
- dev->stats.tx_bytes += len;
+ /* Caution: the write order is important here, set the base address with
+ the "ownership" bits last */
- /* Trigger an immediate send poll. */
- lance->RAP = CSR0; /* PCnet-ISA Controller Status */
- lance->RDP = INEA|TDMD;
+ priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
+ priv->tx_ring[entry]->TMD3 = 0x0000;
+ memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len);
- if (lowb(priv->tx_ring[(entry+1) % TX_RING_SIZE]->TMD1) != 0) {
- netif_stop_queue(dev);
- priv->tx_full = 1;
- }
- local_irq_restore(flags);
+#ifdef DEBUG
+ print_hex_dump(KERN_DEBUG, "tx_buff: ", DUMP_PREFIX_OFFSET, 16, 1,
+ (void *)priv->tx_buff[entry],
+ skb->len > 64 ? 64 : skb->len, true);
+#endif
- return NETDEV_TX_OK;
-}
+ priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1 & 0xff00)
+ | TF_OWN | TF_STP | TF_ENP;
+ dev_kfree_skb(skb);
-static int ariadne_rx(struct net_device *dev)
-{
- struct ariadne_private *priv = netdev_priv(dev);
- int entry = priv->cur_rx % RX_RING_SIZE;
- int i;
-
- /* If we own the next entry, it's a new packet. Send it up. */
- while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) {
- int status = lowb(priv->rx_ring[entry]->RMD1);
-
- if (status != (RF_STP|RF_ENP)) { /* There was an error. */
- /* There is a tricky error noted by John Murphy,
- <murf@perftech.com> to Russ Nelson: Even with full-sized
- buffers it's possible for a jabber packet to use two
- buffers, with only the last correctly noting the error. */
- if (status & RF_ENP)
- /* Only count a general error at the end of a packet.*/
- dev->stats.rx_errors++;
- if (status & RF_FRAM)
- dev->stats.rx_frame_errors++;
- if (status & RF_OFLO)
- dev->stats.rx_over_errors++;
- if (status & RF_CRC)
- dev->stats.rx_crc_errors++;
- if (status & RF_BUFF)
- dev->stats.rx_fifo_errors++;
- priv->rx_ring[entry]->RMD1 &= 0xff00|RF_STP|RF_ENP;
- } else {
- /* Malloc up new buffer, compatible with net-3. */
- short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
- struct sk_buff *skb;
-
- skb = dev_alloc_skb(pkt_len+2);
- if (skb == NULL) {
- printk(KERN_WARNING "%s: Memory squeeze, deferring packet.\n",
- dev->name);
- for (i = 0; i < RX_RING_SIZE; i++)
- if (lowb(priv->rx_ring[(entry+i) % RX_RING_SIZE]->RMD1) & RF_OWN)
- break;
-
- if (i > RX_RING_SIZE-2) {
- dev->stats.rx_dropped++;
- priv->rx_ring[entry]->RMD1 |= RF_OWN;
- priv->cur_rx++;
- }
- break;
- }
+ priv->cur_tx++;
+ if ((priv->cur_tx >= TX_RING_SIZE) &&
+ (priv->dirty_tx >= TX_RING_SIZE)) {
+ netdev_dbg(dev, "*** Subtracting TX_RING_SIZE from cur_tx (%d) and dirty_tx (%d)\n",
+ priv->cur_tx, priv->dirty_tx);
- skb_reserve(skb,2); /* 16 byte align */
- skb_put(skb,pkt_len); /* Make room */
- skb_copy_to_linear_data(skb, (char *)priv->rx_buff[entry], pkt_len);
- skb->protocol=eth_type_trans(skb,dev);
-#if 0
-{
- printk(KERN_DEBUG "RX pkt type 0x%04x from ",
- ((u_short *)skb->data)[6]);
- {
- u_char *ptr = &((u_char *)skb->data)[6];
- printk("%pM", ptr);
- }
- printk(" to ");
- {
- u_char *ptr = (u_char *)skb->data;
- printk("%pM", ptr);
- }
- printk(" data 0x%08x len %d\n", (int)skb->data, (int)skb->len);
-}
-#endif
-
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
+ priv->cur_tx -= TX_RING_SIZE;
+ priv->dirty_tx -= TX_RING_SIZE;
}
+ dev->stats.tx_bytes += len;
- priv->rx_ring[entry]->RMD1 |= RF_OWN;
- entry = (++priv->cur_rx) % RX_RING_SIZE;
- }
+ /* Trigger an immediate send poll */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | TDMD;
- priv->cur_rx = priv->cur_rx % RX_RING_SIZE;
-
- /* We should check that at least two ring entries are free. If not,
- we should free one and mark stats->rx_dropped++. */
+ if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) {
+ netif_stop_queue(dev);
+ priv->tx_full = 1;
+ }
+ local_irq_restore(flags);
- return 0;
+ return NETDEV_TX_OK;
}
-
static struct net_device_stats *ariadne_get_stats(struct net_device *dev)
{
- volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
- short saved_addr;
- unsigned long flags;
-
- local_irq_save(flags);
- saved_addr = lance->RAP;
- lance->RAP = CSR112; /* Missed Frame Count */
- dev->stats.rx_missed_errors = swapw(lance->RDP);
- lance->RAP = saved_addr;
- local_irq_restore(flags);
-
- return &dev->stats;
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ saved_addr = lance->RAP;
+ lance->RAP = CSR112; /* Missed Frame Count */
+ dev->stats.rx_missed_errors = swapw(lance->RDP);
+ lance->RAP = saved_addr;
+ local_irq_restore(flags);
+
+ return &dev->stats;
}
-
/* Set or clear the multicast filter for this adaptor.
- num_addrs == -1 Promiscuous mode, receive all packets
- num_addrs == 0 Normal mode, clear multicast list
- num_addrs > 0 Multicast mode, receive normal and MC packets, and do
- best-effort filtering.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
*/
static void set_multicast_list(struct net_device *dev)
{
- volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
- if (!netif_running(dev))
- return;
+ if (!netif_running(dev))
+ return;
- netif_stop_queue(dev);
+ netif_stop_queue(dev);
- /* We take the simple way out and always enable promiscuous mode. */
- lance->RAP = CSR0; /* PCnet-ISA Controller Status */
- lance->RDP = STOP; /* Temporarily stop the lance. */
- ariadne_init_ring(dev);
+ /* We take the simple way out and always enable promiscuous mode */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP; /* Temporarily stop the lance */
+ ariadne_init_ring(dev);
- if (dev->flags & IFF_PROMISC) {
- lance->RAP = CSR15; /* Mode Register */
- lance->RDP = PROM; /* Set promiscuous mode */
- } else {
- short multicast_table[4];
- int num_addrs = netdev_mc_count(dev);
- int i;
- /* We don't use the multicast table, but rely on upper-layer filtering. */
- memset(multicast_table, (num_addrs == 0) ? 0 : -1,
- sizeof(multicast_table));
- for (i = 0; i < 4; i++) {
- lance->RAP = CSR8+(i<<8); /* Logical Address Filter */
- lance->RDP = swapw(multicast_table[i]);
+ if (dev->flags & IFF_PROMISC) {
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = PROM; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = netdev_mc_count(dev);
+ int i;
+ /* We don't use the multicast table,
+ * but rely on upper-layer filtering
+ */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ lance->RAP = CSR8 + (i << 8);
+ /* Logical Address Filter */
+ lance->RDP = swapw(multicast_table[i]);
+ }
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = 0x0000; /* Unset promiscuous mode */
}
- lance->RAP = CSR15; /* Mode Register */
- lance->RDP = 0x0000; /* Unset promiscuous mode */
- }
- lance->RAP = CSR0; /* PCnet-ISA Controller Status */
- lance->RDP = INEA|STRT|IDON; /* Resume normal operation. */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | STRT | IDON;/* Resume normal operation */
- netif_wake_queue(dev);
+ netif_wake_queue(dev);
}
static void __devexit ariadne_remove_one(struct zorro_dev *z)
{
- struct net_device *dev = zorro_get_drvdata(z);
+ struct net_device *dev = zorro_get_drvdata(z);
- unregister_netdev(dev);
- release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960));
- release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE);
- free_netdev(dev);
+ unregister_netdev(dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960));
+ release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE);
+ free_netdev(dev);
}
+static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
+
+static const struct net_device_ops ariadne_netdev_ops = {
+ .ndo_open = ariadne_open,
+ .ndo_stop = ariadne_close,
+ .ndo_start_xmit = ariadne_start_xmit,
+ .ndo_tx_timeout = ariadne_tx_timeout,
+ .ndo_get_stats = ariadne_get_stats,
+ .ndo_set_multicast_list = set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int __devinit ariadne_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ unsigned long board = z->resource.start;
+ unsigned long base_addr = board + ARIADNE_LANCE;
+ unsigned long mem_start = board + ARIADNE_RAM;
+ struct resource *r1, *r2;
+ struct net_device *dev;
+ struct ariadne_private *priv;
+ int err;
+
+ r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
+ if (!r1)
+ return -EBUSY;
+ r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
+ if (!r2) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(sizeof(struct ariadne_private));
+ if (dev == NULL) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ release_mem_region(mem_start, ARIADNE_RAM_SIZE);
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(dev);
+
+ r1->name = dev->name;
+ r2->name = dev->name;
+
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x60;
+ dev->dev_addr[2] = 0x30;
+ dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff;
+ dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff;
+ dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
+ dev->base_addr = ZTWO_VADDR(base_addr);
+ dev->mem_start = ZTWO_VADDR(mem_start);
+ dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
+
+ dev->netdev_ops = &ariadne_netdev_ops;
+ dev->watchdog_timeo = 5 * HZ;
+
+ err = register_netdev(dev);
+ if (err) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ release_mem_region(mem_start, ARIADNE_RAM_SIZE);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+
+ netdev_info(dev, "Ariadne at 0x%08lx, Ethernet Address %pM\n",
+ board, dev->dev_addr);
+
+ return 0;
+}
+
+static struct zorro_driver ariadne_driver = {
+ .name = "ariadne",
+ .id_table = ariadne_zorro_tbl,
+ .probe = ariadne_init_one,
+ .remove = __devexit_p(ariadne_remove_one),
+};
+
static int __init ariadne_init_module(void)
{
- return zorro_register_driver(&ariadne_driver);
+ return zorro_register_driver(&ariadne_driver);
}
static void __exit ariadne_cleanup_module(void)
{
- zorro_unregister_driver(&ariadne_driver);
+ zorro_unregister_driver(&ariadne_driver);
}
module_init(ariadne_init_module);
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 7b3e23f3891..52fe21e1e2c 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -199,17 +199,15 @@ am79c961_ramtest(struct net_device *dev, unsigned int val)
static void am79c961_mc_hash(char *addr, u16 *hash)
{
- if (addr[0] & 0x01) {
- int idx, bit;
- u32 crc;
+ int idx, bit;
+ u32 crc;
- crc = ether_crc_le(ETH_ALEN, addr);
+ crc = ether_crc_le(ETH_ALEN, addr);
- idx = crc >> 30;
- bit = (crc >> 26) & 15;
+ idx = crc >> 30;
+ bit = (crc >> 26) & 15;
- hash[idx] |= 1 << bit;
- }
+ hash[idx] |= 1 << bit;
}
static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index e07b314ed8f..29dc43523ce 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 0b46b8ea0e8..4317af8d2f0 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -19,6 +19,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index a7b0caa1817..c827a6097d0 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -16,11 +16,13 @@
* Vincent Sanders <vince@simtec.co.uk>
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 925929d764c..ca70e16b6e2 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -22,8 +22,8 @@
#ifndef _ATL1C_H_
#define _ATL1C_H_
-#include <linux/version.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -555,7 +555,6 @@ struct atl1c_smb {
struct atl1c_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- struct vlan_group *vlgrp;
struct napi_struct napi;
struct atl1c_hw hw;
struct atl1c_hw_stats hw_stats;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 1269ba5d6e5..97224421840 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -411,29 +411,29 @@ static void atl1c_set_multi(struct net_device *netdev)
}
}
-static void atl1c_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
+static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
+{
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ *mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
+ } else {
+ /* disable VLAN tag insert/strip */
+ *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
+ }
+}
+
+static void atl1c_vlan_mode(struct net_device *netdev, u32 features)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
u32 mac_ctrl_data = 0;
if (netif_msg_pktdata(adapter))
- dev_dbg(&pdev->dev, "atl1c_vlan_rx_register\n");
+ dev_dbg(&pdev->dev, "atl1c_vlan_mode\n");
atl1c_irq_disable(adapter);
-
- adapter->vlgrp = grp;
AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data);
-
- if (grp) {
- /* enable VLAN tag insert/strip */
- mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
- } else {
- /* disable VLAN tag insert/strip */
- mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
- }
-
+ __atl1c_vlan_mode(features, &mac_ctrl_data);
AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
atl1c_irq_enable(adapter);
}
@@ -443,9 +443,10 @@ static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
if (netif_msg_pktdata(adapter))
- dev_dbg(&pdev->dev, "atl1c_restore_vlan !");
- atl1c_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+ dev_dbg(&pdev->dev, "atl1c_restore_vlan\n");
+ atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
}
+
/*
* atl1c_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -483,12 +484,31 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
if (netdev->mtu > MAX_TSO_FRAME_SIZE)
features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
return features;
}
+static int atl1c_set_features(struct net_device *netdev, u32 features)
+{
+ u32 changed = netdev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ atl1c_vlan_mode(netdev, features);
+
+ return 0;
+}
+
/*
* atl1c_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
@@ -1433,8 +1453,7 @@ static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
mac_ctrl_data |= ((hw->preamble_len & MAC_CTRL_PRMLEN_MASK) <<
MAC_CTRL_PRMLEN_SHIFT);
- if (adapter->vlgrp)
- mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
+ __atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
mac_ctrl_data |= MAC_CTRL_BC_EN;
if (netdev->flags & IFF_PROMISC)
@@ -1878,14 +1897,14 @@ rrs_checked:
skb_put(skb, length - ETH_FCS_LEN);
skb->protocol = eth_type_trans(skb, netdev);
atl1c_rx_checksum(adapter, skb, rrs);
- if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) {
+ if (rrs->word3 & RRS_VLAN_INS) {
u16 vlan;
AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
vlan = le16_to_cpu(vlan);
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vlan);
- } else
- netif_receive_skb(skb);
+ __vlan_hwaccel_put_tag(skb, vlan);
+ }
+ netif_receive_skb(skb);
(*work_done)++;
count++;
@@ -2507,8 +2526,7 @@ static int atl1c_suspend(struct device *dev)
/* clear phy interrupt */
atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
/* Config MAC Ctrl register */
- if (adapter->vlgrp)
- mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
+ __atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
/* magic packet maybe Broadcast&multicast&Unicast frame */
if (wufc & AT_WUFC_MAG)
@@ -2581,14 +2599,14 @@ static const struct net_device_ops atl1c_netdev_ops = {
.ndo_stop = atl1c_close,
.ndo_validate_addr = eth_validate_addr,
.ndo_start_xmit = atl1c_xmit_frame,
- .ndo_set_mac_address = atl1c_set_mac_addr,
+ .ndo_set_mac_address = atl1c_set_mac_addr,
.ndo_set_multicast_list = atl1c_set_multi,
.ndo_change_mtu = atl1c_change_mtu,
.ndo_fix_features = atl1c_fix_features,
+ .ndo_set_features = atl1c_set_features,
.ndo_do_ioctl = atl1c_ioctl,
.ndo_tx_timeout = atl1c_tx_timeout,
.ndo_get_stats = atl1c_get_stats,
- .ndo_vlan_rx_register = atl1c_vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl1c_netpoll,
#endif
@@ -2607,11 +2625,11 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
/* TODO: add when ready */
netdev->hw_features = NETIF_F_SG |
NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
NETIF_F_TSO |
NETIF_F_TSO6;
netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_RX;
+ NETIF_F_HW_VLAN_TX;
return 0;
}
diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h
index 490d3b38e0c..829b5ad71d0 100644
--- a/drivers/net/atl1e/atl1e.h
+++ b/drivers/net/atl1e/atl1e.h
@@ -23,8 +23,8 @@
#ifndef _ATL1E_H_
#define _ATL1E_H_
-#include <linux/version.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -433,7 +433,6 @@ struct atl1e_rx_ring {
struct atl1e_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- struct vlan_group *vlgrp;
struct napi_struct napi;
struct mii_if_info mii; /* MII interface info */
struct atl1e_hw hw;
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 86a91228313..d8d411998fa 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -313,8 +313,18 @@ static void atl1e_set_multi(struct net_device *netdev)
}
}
-static void atl1e_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
+static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
+{
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ *mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
+ } else {
+ /* disable VLAN tag insert/strip */
+ *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
+ }
+}
+
+static void atl1e_vlan_mode(struct net_device *netdev, u32 features)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
u32 mac_ctrl_data = 0;
@@ -322,18 +332,8 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
netdev_dbg(adapter->netdev, "%s\n", __func__);
atl1e_irq_disable(adapter);
-
- adapter->vlgrp = grp;
mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
-
- if (grp) {
- /* enable VLAN tag insert/strip */
- mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
- } else {
- /* disable VLAN tag insert/strip */
- mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
- }
-
+ __atl1e_vlan_mode(features, &mac_ctrl_data);
AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
atl1e_irq_enable(adapter);
}
@@ -341,8 +341,9 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
{
netdev_dbg(adapter->netdev, "%s\n", __func__);
- atl1e_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+ atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
}
+
/*
* atl1e_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -369,6 +370,30 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
return 0;
}
+static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int atl1e_set_features(struct net_device *netdev, u32 features)
+{
+ u32 changed = netdev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ atl1e_vlan_mode(netdev, features);
+
+ return 0;
+}
+
/*
* atl1e_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
@@ -800,8 +825,7 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
/* Init TPD Ring */
tx_ring->dma = roundup(adapter->ring_dma, 8);
offset = tx_ring->dma - adapter->ring_dma;
- tx_ring->desc = (struct atl1e_tpd_desc *)
- (adapter->ring_vir_addr + offset);
+ tx_ring->desc = adapter->ring_vir_addr + offset;
size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
if (tx_ring->tx_buffer == NULL) {
@@ -827,7 +851,7 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
/* Init CMB dma address */
tx_ring->cmb_dma = adapter->ring_dma + offset;
- tx_ring->cmb = (u32 *)(adapter->ring_vir_addr + offset);
+ tx_ring->cmb = adapter->ring_vir_addr + offset;
offset += sizeof(u32);
for (i = 0; i < adapter->num_rx_queues; i++) {
@@ -1040,8 +1064,7 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
value |= (((u32)adapter->hw.preamble_len &
MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
- if (adapter->vlgrp)
- value |= MAC_CTRL_RMV_VLAN;
+ __atl1e_vlan_mode(netdev->features, &value);
value |= MAC_CTRL_BC_EN;
if (netdev->flags & IFF_PROMISC)
@@ -1424,19 +1447,16 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
skb->protocol = eth_type_trans(skb, netdev);
atl1e_rx_checksum(adapter, skb, prrs);
- if (unlikely(adapter->vlgrp &&
- (prrs->pkt_flag & RRS_IS_VLAN_TAG))) {
+ if (prrs->pkt_flag & RRS_IS_VLAN_TAG) {
u16 vlan_tag = (prrs->vtag >> 4) |
((prrs->vtag & 7) << 13) |
((prrs->vtag & 8) << 9);
netdev_dbg(netdev,
"RXD VLAN TAG<RRD>=0x%04x\n",
prrs->vtag);
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- vlan_tag);
- } else {
- netif_receive_skb(skb);
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
}
+ netif_receive_skb(skb);
skip_pkt:
/* skip current packet whether it's ok or not. */
@@ -1812,7 +1832,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
tpd = atl1e_get_tpd(adapter);
- if (unlikely(vlan_tx_tag_present(skb))) {
+ if (vlan_tx_tag_present(skb)) {
u16 vlan_tag = vlan_tx_tag_get(skb);
u16 atl1e_vlan_tag;
@@ -2094,8 +2114,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
MAC_CTRL_PRMLEN_MASK) <<
MAC_CTRL_PRMLEN_SHIFT);
- if (adapter->vlgrp)
- mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
+ __atl1e_vlan_mode(netdev->features, &mac_ctrl_data);
/* magic packet maybe Broadcast&multicast&Unicast frame */
if (wufc & AT_WUFC_MAG)
@@ -2196,10 +2215,11 @@ static const struct net_device_ops atl1e_netdev_ops = {
.ndo_set_multicast_list = atl1e_set_multi,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = atl1e_set_mac_addr,
+ .ndo_fix_features = atl1e_fix_features,
+ .ndo_set_features = atl1e_set_features,
.ndo_change_mtu = atl1e_change_mtu,
.ndo_do_ioctl = atl1e_ioctl,
.ndo_tx_timeout = atl1e_tx_timeout,
- .ndo_vlan_rx_register = atl1e_vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl1e_netpoll,
#endif
@@ -2218,9 +2238,9 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
atl1e_set_ethtool_ops(netdev);
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
- NETIF_F_HW_VLAN_TX;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_LLTX;
+ NETIF_F_HW_VLAN_RX;
+ netdev->features = netdev->hw_features | NETIF_F_LLTX |
+ NETIF_F_HW_VLAN_TX;
return 0;
}
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index cd5789ff372..97e6954304e 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -44,7 +44,7 @@
* SMP torture testing
*/
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include <linux/compiler.h>
@@ -1285,8 +1285,7 @@ static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
value |= (((u32) adapter->hw.preamble_len
& MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
/* vlan */
- if (adapter->vlgrp)
- value |= MAC_CTRL_RMV_VLAN;
+ __atlx_vlan_mode(netdev->features, &value);
/* rx checksum
if (adapter->rx_csum)
value |= MAC_CTRL_RX_CHKSUM_EN;
@@ -2023,13 +2022,14 @@ rrd_ok:
atl1_rx_checksum(adapter, rrd, skb);
skb->protocol = eth_type_trans(skb, adapter->netdev);
- if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
+ if (rrd->pkt_flg & PACKET_FLAG_VLAN_INS) {
u16 vlan_tag = (rrd->vlan_tag >> 4) |
((rrd->vlan_tag & 7) << 13) |
((rrd->vlan_tag & 8) << 9);
- vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
- } else
- netif_rx(skb);
+
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
+ }
+ netif_rx(skb);
/* let protocol layer free skb */
buffer_info->skb = NULL;
@@ -2783,8 +2783,7 @@ static int atl1_suspend(struct device *dev)
ctrl |= MAC_CTRL_DUPLX;
ctrl |= (((u32)adapter->hw.preamble_len &
MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
- if (adapter->vlgrp)
- ctrl |= MAC_CTRL_RMV_VLAN;
+ __atlx_vlan_mode(netdev->features, &ctrl);
if (wufc & ATLX_WUFC_MAG)
ctrl |= MAC_CTRL_BC_EN;
iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
@@ -2874,9 +2873,10 @@ static const struct net_device_ops atl1_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = atl1_set_mac,
.ndo_change_mtu = atl1_change_mtu,
+ .ndo_fix_features = atlx_fix_features,
+ .ndo_set_features = atlx_set_features,
.ndo_do_ioctl = atlx_ioctl,
.ndo_tx_timeout = atlx_tx_timeout,
- .ndo_vlan_rx_register = atlx_vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl1_poll_controller,
#endif
@@ -2984,7 +2984,8 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_SG;
netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
- netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO;
+ netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_RX;
/* is this valid? see atl1_setup_mac_ctrl() */
netdev->features |= NETIF_F_RXCSUM;
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 68de8cbfb3e..109d6da8be9 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -753,7 +753,6 @@ struct atl1_adapter {
struct pci_dev *pdev;
struct atl1_sft_stats soft_stats;
- struct vlan_group *vlgrp;
u32 rx_buffer_len;
u32 wol;
u16 link_speed;
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 16249e9b6b9..d4f7dda3972 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -20,7 +20,7 @@
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/crc32.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
@@ -311,8 +311,7 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
adapter->txd_dma = adapter->ring_dma ;
offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0;
adapter->txd_dma += offset;
- adapter->txd_ring = (struct tx_pkt_header *) (adapter->ring_vir_addr +
- offset);
+ adapter->txd_ring = adapter->ring_vir_addr + offset;
/* Init TXS Ring */
adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size;
@@ -362,36 +361,59 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-#ifdef NETIF_F_HW_VLAN_TX
-static void atl2_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
+static void __atl2_vlan_mode(u32 features, u32 *ctrl)
+{
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ *ctrl |= MAC_CTRL_RMV_VLAN;
+ } else {
+ /* disable VLAN tag insert/strip */
+ *ctrl &= ~MAC_CTRL_RMV_VLAN;
+ }
+}
+
+static void atl2_vlan_mode(struct net_device *netdev, u32 features)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
u32 ctrl;
atl2_irq_disable(adapter);
- adapter->vlgrp = grp;
- if (grp) {
- /* enable VLAN tag insert/strip */
- ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
- ctrl |= MAC_CTRL_RMV_VLAN;
- ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
- } else {
- /* disable VLAN tag insert/strip */
- ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
- ctrl &= ~MAC_CTRL_RMV_VLAN;
- ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
- }
+ ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
+ __atl2_vlan_mode(features, &ctrl);
+ ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
atl2_irq_enable(adapter);
}
static void atl2_restore_vlan(struct atl2_adapter *adapter)
{
- atl2_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+ atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
+}
+
+static u32 atl2_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int atl2_set_features(struct net_device *netdev, u32 features)
+{
+ u32 changed = netdev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ atl2_vlan_mode(netdev, features);
+
+ return 0;
}
-#endif
static void atl2_intr_rx(struct atl2_adapter *adapter)
{
@@ -425,14 +447,13 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
memcpy(skb->data, rxd->packet, rx_size);
skb_put(skb, rx_size);
skb->protocol = eth_type_trans(skb, netdev);
-#ifdef NETIF_F_HW_VLAN_TX
- if (adapter->vlgrp && (rxd->status.vlan)) {
+ if (rxd->status.vlan) {
u16 vlan_tag = (rxd->status.vtag>>4) |
((rxd->status.vtag&7) << 13) |
((rxd->status.vtag&8) << 9);
- vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
- } else
-#endif
+
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
+ }
netif_rx(skb);
netdev->stats.rx_bytes += rx_size;
netdev->stats.rx_packets++;
@@ -705,9 +726,7 @@ static int atl2_open(struct net_device *netdev)
atl2_set_multi(netdev);
init_ring_ptrs(adapter);
-#ifdef NETIF_F_HW_VLAN_TX
atl2_restore_vlan(adapter);
-#endif
if (atl2_configure(adapter)) {
err = -EIO;
@@ -1083,9 +1102,7 @@ static int atl2_up(struct atl2_adapter *adapter)
atl2_set_multi(netdev);
init_ring_ptrs(adapter);
-#ifdef NETIF_F_HW_VLAN_TX
atl2_restore_vlan(adapter);
-#endif
if (atl2_configure(adapter)) {
err = -EIO;
@@ -1146,8 +1163,7 @@ static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter)
MAC_CTRL_PRMLEN_SHIFT);
/* vlan */
- if (adapter->vlgrp)
- value |= MAC_CTRL_RMV_VLAN;
+ __atl2_vlan_mode(netdev->features, &value);
/* filter mode */
value |= MAC_CTRL_BC_EN;
@@ -1313,9 +1329,10 @@ static const struct net_device_ops atl2_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = atl2_set_mac,
.ndo_change_mtu = atl2_change_mtu,
+ .ndo_fix_features = atl2_fix_features,
+ .ndo_set_features = atl2_set_features,
.ndo_do_ioctl = atl2_ioctl,
.ndo_tx_timeout = atl2_tx_timeout,
- .ndo_vlan_rx_register = atl2_vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl2_poll_controller,
#endif
@@ -1411,7 +1428,7 @@ static int __devinit atl2_probe(struct pci_dev *pdev,
err = -EIO;
- netdev->hw_features = NETIF_F_SG;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_RX;
netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
/* Init PHY as early as possible due to power saving issue */
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h
index 927e4de6474..bf9016ebdd9 100644
--- a/drivers/net/atlx/atl2.h
+++ b/drivers/net/atlx/atl2.h
@@ -25,7 +25,7 @@
#ifndef _ATL2_H_
#define _ATL2_H_
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/netdevice.h>
#ifndef _ATL2_HW_H_
@@ -453,9 +453,6 @@ struct atl2_adapter {
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
-#ifdef NETIF_F_HW_VLAN_TX
- struct vlan_group *vlgrp;
-#endif
u32 wol;
u16 link_speed;
u16 link_duplex;
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index afb7f7dd1bb..aabcf4b5745 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -211,8 +211,18 @@ static void atlx_link_chg_task(struct work_struct *work)
spin_unlock_irqrestore(&adapter->lock, flags);
}
-static void atlx_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
+static void __atlx_vlan_mode(u32 features, u32 *ctrl)
+{
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ *ctrl |= MAC_CTRL_RMV_VLAN;
+ } else {
+ /* disable VLAN tag insert/strip */
+ *ctrl &= ~MAC_CTRL_RMV_VLAN;
+ }
+}
+
+static void atlx_vlan_mode(struct net_device *netdev, u32 features)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
@@ -220,27 +230,40 @@ static void atlx_vlan_rx_register(struct net_device *netdev,
spin_lock_irqsave(&adapter->lock, flags);
/* atlx_irq_disable(adapter); FIXME: confirm/remove */
- adapter->vlgrp = grp;
-
- if (grp) {
- /* enable VLAN tag insert/strip */
- ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
- ctrl |= MAC_CTRL_RMV_VLAN;
- iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
- } else {
- /* disable VLAN tag insert/strip */
- ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
- ctrl &= ~MAC_CTRL_RMV_VLAN;
- iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
- }
-
+ ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
+ __atlx_vlan_mode(features, &ctrl);
+ iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
/* atlx_irq_enable(adapter); FIXME */
spin_unlock_irqrestore(&adapter->lock, flags);
}
static void atlx_restore_vlan(struct atlx_adapter *adapter)
{
- atlx_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+ atlx_vlan_mode(adapter->netdev, adapter->netdev->features);
+}
+
+static u32 atlx_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int atlx_set_features(struct net_device *netdev, u32 features)
+{
+ u32 changed = netdev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ atlx_vlan_mode(netdev, features);
+
+ return 0;
}
#endif /* ATLX_C */
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index a69331e06b8..41ea84e3f69 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -5,7 +5,7 @@
* Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
* Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
* Copyright (C) 2006 Broadcom Corporation.
- * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
+ * Copyright (C) 2007 Michael Buesch <m@bues.ch>
*
* Distribute under GPL.
*/
@@ -25,6 +25,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/ssb/ssb.h>
#include <linux/slab.h>
@@ -38,6 +39,7 @@
#define DRV_MODULE_NAME "b44"
#define DRV_MODULE_VERSION "2.0"
+#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
#define B44_DEF_MSG_ENABLE \
(NETIF_MSG_DRV | \
@@ -90,11 +92,8 @@
#define B44_ETHIPV6UDP_HLEN 62
#define B44_ETHIPV4UDP_HLEN 42
-static char version[] __devinitdata =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
-
MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
-MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -609,7 +608,7 @@ static void b44_tx(struct b44 *bp)
skb->len,
DMA_TO_DEVICE);
rp->skb = NULL;
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb(skb);
}
bp->tx_cons = cons;
@@ -2129,16 +2128,13 @@ static const struct net_device_ops b44_netdev_ops = {
static int __devinit b44_init_one(struct ssb_device *sdev,
const struct ssb_device_id *ent)
{
- static int b44_version_printed = 0;
struct net_device *dev;
struct b44 *bp;
int err;
instance++;
- if (b44_version_printed++ == 0)
- pr_info("%s", version);
-
+ pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
dev = alloc_etherdev(sizeof(*bp));
if (!dev) {
@@ -2224,8 +2220,7 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
if (b44_phy_reset(bp) < 0)
bp->phy_addr = B44_PHY_ADDR_NO_PHY;
- netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
- dev->dev_addr);
+ netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
return 0;
@@ -2335,7 +2330,7 @@ static struct ssb_driver b44_ssb_driver = {
.resume = b44_resume,
};
-static inline int b44_pci_init(void)
+static inline int __init b44_pci_init(void)
{
int err = 0;
#ifdef CONFIG_B44_PCI
@@ -2344,7 +2339,7 @@ static inline int b44_pci_init(void)
return err;
}
-static inline void b44_pci_exit(void)
+static inline void __exit b44_pci_exit(void)
{
#ifdef CONFIG_B44_PCI
ssb_pcihost_unregister(&b44_pci_driver);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index f1573d492e9..1d9b9858067 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -18,6 +18,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/etherdevice.h>
@@ -1646,7 +1647,7 @@ static int __devinit bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- iomem_size = res_mem->end - res_mem->start + 1;
+ iomem_size = resource_size(res_mem);
if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
ret = -EBUSY;
goto out;
@@ -1861,7 +1862,7 @@ static int __devexit bcm_enet_remove(struct platform_device *pdev)
/* release device resources */
iounmap(priv->base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
/* disable hw block clocks */
if (priv->phy_clk) {
@@ -1897,7 +1898,7 @@ static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- iomem_size = res->end - res->start + 1;
+ iomem_size = resource_size(res);
if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
return -EBUSY;
@@ -1915,7 +1916,7 @@ static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
iounmap(bcm_enet_shared_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index a7db870d164..c85768cd1b1 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -20,7 +20,6 @@
#include <linux/pci.h>
#include <linux/etherdevice.h>
-#include <linux/version.h>
#include <linux/delay.h>
#include <net/tcp.h>
#include <net/ip.h>
@@ -87,6 +86,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
+#define MAX_TX_QS 8
#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
#define BE_NAPI_WEIGHT 64
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
@@ -170,7 +170,6 @@ struct be_tx_stats {
u32 be_tx_reqs; /* number of TX requests initiated */
u32 be_tx_stops; /* number of times TX Q was stopped */
u32 be_tx_wrbs; /* number of tx WRBs used */
- u32 be_tx_events; /* number of tx completion events */
u32 be_tx_compl; /* number of tx completion entries processed */
ulong be_tx_jiffies;
u64 be_tx_bytes;
@@ -184,6 +183,7 @@ struct be_tx_obj {
struct be_queue_info cq;
/* Remember the skbs that were transmitted */
struct sk_buff *sent_skb_list[TX_Q_LEN];
+ struct be_tx_stats stats;
};
/* Struct to remember the pages posted for rx frags */
@@ -199,6 +199,7 @@ struct be_rx_stats {
u32 rx_polls; /* number of times NAPI called poll function */
u32 rx_events; /* number of ucast rx completion events */
u32 rx_compl; /* number of rx completion entries processed */
+ ulong rx_dropped; /* number of skb allocation errors */
ulong rx_jiffies;
u64 rx_bytes;
u64 rx_bytes_prev;
@@ -319,8 +320,8 @@ struct be_adapter {
/* TX Rings */
struct be_eq_obj tx_eq;
- struct be_tx_obj tx_obj;
- struct be_tx_stats tx_stats;
+ struct be_tx_obj tx_obj[MAX_TX_QS];
+ u8 num_tx_qs;
u32 cache_line_break[8];
@@ -332,7 +333,6 @@ struct be_adapter {
u8 eq_next_idx;
struct be_drv_stats drv_stats;
- struct vlan_group *vlan_grp;
u16 vlans_added;
u16 max_vlans; /* Number of vlans supported */
u8 vlan_tag[VLAN_N_VID];
@@ -391,7 +391,7 @@ struct be_adapter {
extern const struct ethtool_ops be_ethtool_ops;
#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
-#define tx_stats(adapter) (&adapter->tx_stats)
+#define tx_stats(txo) (&txo->stats)
#define rx_stats(rxo) (&rxo->stats)
#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
@@ -405,6 +405,10 @@ extern const struct ethtool_ops be_ethtool_ops;
for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
i++, rxo++)
+#define for_all_tx_queues(adapter, txo, i) \
+ for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
+ i++, txo++)
+
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 81654ae16c6..054fa67bc4e 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -106,14 +106,24 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
netdev_stats_update(adapter);
adapter->stats_cmd_sent = false;
}
- } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
- (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
- dev_warn(&adapter->pdev->dev,
- "Error in cmd completion - opcode %d, compl %d, extd %d\n",
- compl->tag0, compl_status, extd_status);
+ } else {
+ if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
+ compl_status == MCC_STATUS_ILLEGAL_REQUEST)
+ goto done;
+
+ if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
+ dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
+ "permitted to execute this cmd (opcode %d)\n",
+ compl->tag0);
+ } else {
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+ dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
+ "status %d, extd-status %d\n",
+ compl->tag0, compl_status, extd_status);
+ }
}
+done:
return compl_status;
}
@@ -799,12 +809,12 @@ static u32 be_encoded_q_len(int q_len)
return len_encoded;
}
-int be_cmd_mccq_create(struct be_adapter *adapter,
+int be_cmd_mccq_ext_create(struct be_adapter *adapter,
struct be_queue_info *mccq,
struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
- struct be_cmd_req_mcc_create *req;
+ struct be_cmd_req_mcc_ext_create *req;
struct be_dma_mem *q_mem = &mccq->dma_mem;
void *ctxt;
int status;
@@ -859,6 +869,67 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
return status;
}
+int be_cmd_mccq_org_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcc_create *req;
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ void *ctxt;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+ req = embedded_payload(wrb);
+ ctxt = &req->context;
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_MCC_CREATE);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+
+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+
+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+int be_cmd_mccq_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ int status;
+
+ status = be_cmd_mccq_ext_create(adapter, mccq, cq);
+ if (status && !lancer_chip(adapter)) {
+ dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
+ "or newer to avoid conflicting priorities between NIC "
+ "and FCoE traffic");
+ status = be_cmd_mccq_org_create(adapter, mccq, cq);
+ }
+ return status;
+}
+
int be_cmd_txq_create(struct be_adapter *adapter,
struct be_queue_info *txq,
struct be_queue_info *cq)
@@ -913,7 +984,7 @@ int be_cmd_txq_create(struct be_adapter *adapter,
return status;
}
-/* Uses mbox */
+/* Uses MCC */
int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
@@ -923,10 +994,13 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
- wrb = wrb_from_mbox(adapter);
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
@@ -943,7 +1017,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
req->max_frame_size = cpu_to_le16(max_frame_size);
req->rss_queue = cpu_to_le32(rss);
- status = be_mbox_notify_wait(adapter);
+ status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
rxq->id = le16_to_cpu(resp->id);
@@ -951,8 +1025,8 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
*rss_id = resp->rss_id;
}
- mutex_unlock(&adapter->mbox_lock);
-
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1007,9 +1081,40 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
req->id = cpu_to_le16(q->id);
status = be_mbox_notify_wait(adapter);
+ if (!status)
+ q->created = false;
mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
+/* Uses MCC */
+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_q_destroy *req;
+ int status;
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
+ sizeof(*req));
+ req->id = cpu_to_le16(q->id);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status)
+ q->created = false;
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2273,8 +2378,7 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
if (!status) {
- attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
- sizeof(struct be_cmd_resp_hdr));
+ attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
adapter->hba_port_num = attribs->hba_attribs.phy_port;
}
@@ -2286,7 +2390,7 @@ err:
}
/* Uses mbox */
-int be_cmd_check_native_mode(struct be_adapter *adapter)
+int be_cmd_req_native_mode(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_func_cap *req;
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 8148cc66cbe..8e4d48824fe 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -51,17 +51,12 @@ struct be_mcc_wrb {
/* Completion Status */
enum {
- MCC_STATUS_SUCCESS = 0x0,
-/* The client does not have sufficient privileges to execute the command */
- MCC_STATUS_INSUFFICIENT_PRIVILEGES = 0x1,
-/* A parameter in the command was invalid. */
- MCC_STATUS_INVALID_PARAMETER = 0x2,
-/* There are insufficient chip resources to execute the command */
- MCC_STATUS_INSUFFICIENT_RESOURCES = 0x3,
-/* The command is completing because the queue was getting flushed */
- MCC_STATUS_QUEUE_FLUSHING = 0x4,
-/* The command is completing with a DMA error */
- MCC_STATUS_DMA_FAILED = 0x5,
+ MCC_STATUS_SUCCESS = 0,
+ MCC_STATUS_FAILED = 1,
+ MCC_STATUS_ILLEGAL_REQUEST = 2,
+ MCC_STATUS_ILLEGAL_FIELD = 3,
+ MCC_STATUS_INSUFFICIENT_BUFFER = 4,
+ MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
MCC_STATUS_NOT_SUPPORTED = 66
};
@@ -434,6 +429,14 @@ struct be_cmd_req_mcc_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u16 cq_id;
+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_req_mcc_ext_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 cq_id;
u32 async_event_bitmap[1];
u8 context[sizeof(struct amap_mcc_context_be) / 8];
struct phys_addr pages[8];
@@ -1479,6 +1482,8 @@ extern int be_cmd_rxq_create(struct be_adapter *adapter,
u32 rss, u8 *rss_id);
extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int type);
+extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
+ struct be_queue_info *q);
extern int be_cmd_link_status_query(struct be_adapter *adapter,
bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom);
extern int be_cmd_reset(struct be_adapter *adapter);
@@ -1540,7 +1545,7 @@ extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
extern void be_detect_dump_ue(struct be_adapter *adapter);
extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
-extern int be_cmd_check_native_mode(struct be_adapter *adapter);
+extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index facfe3ca5c4..7fd8130d86e 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -52,12 +52,7 @@ static const struct be_ethtool_stat et_stats[] = {
{NETSTAT_INFO(tx_errors)},
{NETSTAT_INFO(rx_dropped)},
{NETSTAT_INFO(tx_dropped)},
- {DRVSTAT_TX_INFO(be_tx_rate)},
- {DRVSTAT_TX_INFO(be_tx_reqs)},
- {DRVSTAT_TX_INFO(be_tx_wrbs)},
- {DRVSTAT_TX_INFO(be_tx_stops)},
- {DRVSTAT_TX_INFO(be_tx_events)},
- {DRVSTAT_TX_INFO(be_tx_compl)},
+ {DRVSTAT_INFO(be_tx_events)},
{DRVSTAT_INFO(rx_crc_errors)},
{DRVSTAT_INFO(rx_alignment_symbol_errors)},
{DRVSTAT_INFO(rx_pause_frames)},
@@ -107,10 +102,21 @@ static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_compl)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
{DRVSTAT_RX_INFO(rx_post_fail)},
+ {DRVSTAT_RX_INFO(rx_dropped)},
{ERXSTAT_INFO(rx_drops_no_fragments)}
};
#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
+/* Stats related to multi TX queues */
+static const struct be_ethtool_stat et_tx_stats[] = {
+ {DRVSTAT_TX_INFO(be_tx_rate)},
+ {DRVSTAT_TX_INFO(be_tx_reqs)},
+ {DRVSTAT_TX_INFO(be_tx_wrbs)},
+ {DRVSTAT_TX_INFO(be_tx_stops)},
+ {DRVSTAT_TX_INFO(be_tx_compl)}
+};
+#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
+
static const char et_self_tests[][ETH_GSTRING_LEN] = {
"MAC Loopback test",
"PHY Loopback test",
@@ -253,17 +259,15 @@ be_get_ethtool_stats(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
void *p = NULL;
- int i, j;
+ int i, j, base;
for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
switch (et_stats[i].type) {
case NETSTAT:
p = &netdev->stats;
break;
- case DRVSTAT_TX:
- p = &adapter->tx_stats;
- break;
case DRVSTAT:
p = &adapter->drv_stats;
break;
@@ -274,6 +278,7 @@ be_get_ethtool_stats(struct net_device *netdev,
*(u64 *)p: *(u32 *)p;
}
+ base = ETHTOOL_STATS_NUM;
for_all_rx_queues(adapter, rxo, j) {
for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
switch (et_rx_stats[i].type) {
@@ -285,11 +290,21 @@ be_get_ethtool_stats(struct net_device *netdev,
rxo->q.id;
break;
}
- data[ETHTOOL_STATS_NUM + j * ETHTOOL_RXSTATS_NUM + i] =
+ data[base + j * ETHTOOL_RXSTATS_NUM + i] =
(et_rx_stats[i].size == sizeof(u64)) ?
*(u64 *)p: *(u32 *)p;
}
}
+
+ base = ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
+ for_all_tx_queues(adapter, txo, j) {
+ for (i = 0; i < ETHTOOL_TXSTATS_NUM; i++) {
+ p = (u8 *)&txo->stats + et_tx_stats[i].offset;
+ data[base + j * ETHTOOL_TXSTATS_NUM + i] =
+ (et_tx_stats[i].size == sizeof(u64)) ?
+ *(u64 *)p: *(u32 *)p;
+ }
+ }
}
static void
@@ -312,6 +327,13 @@ be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
data += ETH_GSTRING_LEN;
}
}
+ for (i = 0; i < adapter->num_tx_qs; i++) {
+ for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
+ sprintf(data, "txq%d: %s", i,
+ et_tx_stats[j].desc);
+ data += ETH_GSTRING_LEN;
+ }
+ }
break;
case ETH_SS_TEST:
for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
@@ -331,7 +353,8 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
return ETHTOOL_TESTS_NUM;
case ETH_SS_STATS:
return ETHTOOL_STATS_NUM +
- adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
+ adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
+ adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
default:
return -EINVAL;
}
@@ -386,7 +409,7 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
status = be_cmd_get_phy_info(adapter, &phy_cmd);
if (!status) {
- resp = (struct be_cmd_resp_get_phy_info *) phy_cmd.va;
+ resp = phy_cmd.va;
intf_type = le16_to_cpu(resp->interface_type);
switch (intf_type) {
@@ -457,10 +480,10 @@ be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
struct be_adapter *adapter = netdev_priv(netdev);
ring->rx_max_pending = adapter->rx_obj[0].q.len;
- ring->tx_max_pending = adapter->tx_obj.q.len;
+ ring->tx_max_pending = adapter->tx_obj[0].q.len;
ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
- ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
+ ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
}
static void
@@ -690,7 +713,7 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
if (!status) {
- resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
+ resp = eeprom_cmd.va;
memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
}
dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index a485f7fdaf3..c411bb1845f 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -33,10 +33,6 @@ module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
-static bool multi_rxq = true;
-module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
-
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -48,7 +44,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
/* UE Status Low CSR */
-static char *ue_status_low_desc[] = {
+static const char * const ue_status_low_desc[] = {
"CEV",
"CTX",
"DBUF",
@@ -83,7 +79,7 @@ static char *ue_status_low_desc[] = {
"MPU_INTPEND"
};
/* UE Status High CSR */
-static char *ue_status_hi_desc[] = {
+static const char * const ue_status_hi_desc[] = {
"LPCMEMHOST",
"MGMT_MAC",
"PCS0ONLINE",
@@ -107,7 +103,7 @@ static char *ue_status_hi_desc[] = {
"HOST7",
"HOST8",
"HOST9",
- "NETC"
+ "NETC",
"Unknown",
"Unknown",
"Unknown",
@@ -362,8 +358,8 @@ static void populate_lancer_stats(struct be_adapter *adapter)
drvs->rx_priority_pause_frames = 0;
drvs->pmem_fifo_overflow_drop = 0;
drvs->rx_pause_frames =
- make_64bit_val(pport_stats->rx_pause_frames_lo,
- pport_stats->rx_pause_frames_hi);
+ make_64bit_val(pport_stats->rx_pause_frames_hi,
+ pport_stats->rx_pause_frames_lo);
drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
pport_stats->rx_crc_errors_lo);
drvs->rx_control_frames =
@@ -427,31 +423,40 @@ void netdev_stats_update(struct be_adapter *adapter)
struct be_drv_stats *drvs = &adapter->drv_stats;
struct net_device_stats *dev_stats = &adapter->netdev->stats;
struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
+ unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
int i;
- memset(dev_stats, 0, sizeof(*dev_stats));
for_all_rx_queues(adapter, rxo, i) {
- dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
- dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
- dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
+ pkts += rx_stats(rxo)->rx_pkts;
+ bytes += rx_stats(rxo)->rx_bytes;
+ mcast += rx_stats(rxo)->rx_mcast_pkts;
+ drops += rx_stats(rxo)->rx_dropped;
/* no space in linux buffers: best possible approximation */
if (adapter->generation == BE_GEN3) {
if (!(lancer_chip(adapter))) {
- struct be_erx_stats_v1 *erx_stats =
+ struct be_erx_stats_v1 *erx =
be_erx_stats_from_cmd(adapter);
- dev_stats->rx_dropped +=
- erx_stats->rx_drops_no_fragments[rxo->q.id];
+ drops += erx->rx_drops_no_fragments[rxo->q.id];
}
} else {
- struct be_erx_stats_v0 *erx_stats =
+ struct be_erx_stats_v0 *erx =
be_erx_stats_from_cmd(adapter);
- dev_stats->rx_dropped +=
- erx_stats->rx_drops_no_fragments[rxo->q.id];
+ drops += erx->rx_drops_no_fragments[rxo->q.id];
}
}
+ dev_stats->rx_packets = pkts;
+ dev_stats->rx_bytes = bytes;
+ dev_stats->multicast = mcast;
+ dev_stats->rx_dropped = drops;
- dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
- dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
+ pkts = bytes = 0;
+ for_all_tx_queues(adapter, txo, i) {
+ pkts += tx_stats(txo)->be_tx_pkts;
+ bytes += tx_stats(txo)->be_tx_bytes;
+ }
+ dev_stats->tx_packets = pkts;
+ dev_stats->tx_bytes = bytes;
/* bad pkts received */
dev_stats->rx_errors = drvs->rx_crc_errors +
@@ -554,9 +559,9 @@ static u32 be_calc_rate(u64 bytes, unsigned long ticks)
return rate;
}
-static void be_tx_rate_update(struct be_adapter *adapter)
+static void be_tx_rate_update(struct be_tx_obj *txo)
{
- struct be_tx_stats *stats = tx_stats(adapter);
+ struct be_tx_stats *stats = tx_stats(txo);
ulong now = jiffies;
/* Wrapped around? */
@@ -575,10 +580,11 @@ static void be_tx_rate_update(struct be_adapter *adapter)
}
}
-static void be_tx_stats_update(struct be_adapter *adapter,
+static void be_tx_stats_update(struct be_tx_obj *txo,
u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
{
- struct be_tx_stats *stats = tx_stats(adapter);
+ struct be_tx_stats *stats = tx_stats(txo);
+
stats->be_tx_reqs++;
stats->be_tx_wrbs += wrb_cnt;
stats->be_tx_bytes += copied;
@@ -648,7 +654,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
}
- if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
+ if (vlan_tx_tag_present(skb)) {
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
vlan_tag = vlan_tx_tag_get(skb);
vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
@@ -682,14 +688,13 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
}
-static int make_tx_wrbs(struct be_adapter *adapter,
+static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
{
dma_addr_t busaddr;
int i, copied = 0;
struct device *dev = &adapter->pdev->dev;
struct sk_buff *first_skb = skb;
- struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_wrb *wrb;
struct be_eth_hdr_wrb *hdr;
bool map_single = false;
@@ -753,19 +758,19 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_tx_obj *tx_obj = &adapter->tx_obj;
- struct be_queue_info *txq = &tx_obj->q;
+ struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
+ struct be_queue_info *txq = &txo->q;
u32 wrb_cnt = 0, copied = 0;
u32 start = txq->head;
bool dummy_wrb, stopped = false;
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
- copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
if (copied) {
/* record the sent skb in the sent_skb table */
- BUG_ON(tx_obj->sent_skb_list[start]);
- tx_obj->sent_skb_list[start] = skb;
+ BUG_ON(txo->sent_skb_list[start]);
+ txo->sent_skb_list[start] = skb;
/* Ensure txq has space for the next skb; Else stop the queue
* *BEFORE* ringing the tx doorbell, so that we serialze the
@@ -774,13 +779,13 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
atomic_add(wrb_cnt, &txq->used);
if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
txq->len) {
- netif_stop_queue(netdev);
+ netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
stopped = true;
}
be_txq_notify(adapter, txq->id, wrb_cnt);
- be_tx_stats_update(adapter, wrb_cnt, copied,
+ be_tx_stats_update(txo, wrb_cnt, copied,
skb_shinfo(skb)->gso_segs, stopped);
} else {
txq->head = start;
@@ -842,13 +847,6 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
return status;
}
-static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- adapter->vlan_grp = grp;
-}
-
static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -867,7 +865,6 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
adapter->vlans_added--;
- vlan_group_set_device(adapter->vlan_grp, vid, NULL);
if (!be_physfn(adapter))
return;
@@ -1177,8 +1174,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
if (unlikely(!skb)) {
- if (net_ratelimit())
- dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
+ rxo->stats.rx_dropped++;
be_rx_compl_discard(adapter, rxo, rxcp);
return;
}
@@ -1196,16 +1192,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb->rxhash = rxcp->rss_hash;
- if (unlikely(rxcp->vlanf)) {
- if (!adapter->vlan_grp || adapter->vlans_added == 0) {
- kfree_skb(skb);
- return;
- }
- vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
- rxcp->vlan_tag);
- } else {
- netif_receive_skb(skb);
- }
+ if (unlikely(rxcp->vlanf))
+ __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
+
+ netif_receive_skb(skb);
}
/* Process the RX completion indicated by rxcp when GRO is enabled */
@@ -1259,11 +1249,10 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
if (adapter->netdev->features & NETIF_F_RXHASH)
skb->rxhash = rxcp->rss_hash;
- if (likely(!rxcp->vlanf))
- napi_gro_frags(&eq_obj->napi);
- else
- vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
- rxcp->vlan_tag);
+ if (unlikely(rxcp->vlanf))
+ __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
+
+ napi_gro_frags(&eq_obj->napi);
}
static void be_parse_rx_compl_v1(struct be_adapter *adapter,
@@ -1459,11 +1448,12 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
return txcp;
}
-static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
+static u16 be_tx_compl_process(struct be_adapter *adapter,
+ struct be_tx_obj *txo, u16 last_index)
{
- struct be_queue_info *txq = &adapter->tx_obj.q;
+ struct be_queue_info *txq = &txo->q;
struct be_eth_wrb *wrb;
- struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
+ struct sk_buff **sent_skbs = txo->sent_skb_list;
struct sk_buff *sent_skb;
u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
bool unmap_skb_hdr = true;
@@ -1504,7 +1494,8 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
}
static int event_handle(struct be_adapter *adapter,
- struct be_eq_obj *eq_obj)
+ struct be_eq_obj *eq_obj,
+ bool rearm)
{
struct be_eq_entry *eqe;
u16 num = 0;
@@ -1517,7 +1508,10 @@ static int event_handle(struct be_adapter *adapter,
/* Deal with any spurious interrupts that come
* without events
*/
- be_eq_notify(adapter, eq_obj->q.id, true, true, num);
+ if (!num)
+ rearm = true;
+
+ be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
if (num)
napi_schedule(&eq_obj->napi);
@@ -1563,15 +1557,17 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
memset(page_info, 0, sizeof(*page_info));
}
BUG_ON(atomic_read(&rxq->used));
+ rxq->tail = rxq->head = 0;
}
-static void be_tx_compl_clean(struct be_adapter *adapter)
+static void be_tx_compl_clean(struct be_adapter *adapter,
+ struct be_tx_obj *txo)
{
- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
- struct be_queue_info *txq = &adapter->tx_obj.q;
+ struct be_queue_info *tx_cq = &txo->cq;
+ struct be_queue_info *txq = &txo->q;
struct be_eth_tx_compl *txcp;
u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
- struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
+ struct sk_buff **sent_skbs = txo->sent_skb_list;
struct sk_buff *sent_skb;
bool dummy_wrb;
@@ -1580,7 +1576,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp);
- num_wrbs += be_tx_compl_process(adapter, end_idx);
+ num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
cmpl++;
}
if (cmpl) {
@@ -1607,7 +1603,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
index_adv(&end_idx,
wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
txq->len);
- num_wrbs = be_tx_compl_process(adapter, end_idx);
+ num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
atomic_sub(num_wrbs, &txq->used);
}
}
@@ -1666,16 +1662,20 @@ err:
static void be_tx_queues_destroy(struct be_adapter *adapter)
{
struct be_queue_info *q;
+ struct be_tx_obj *txo;
+ u8 i;
- q = &adapter->tx_obj.q;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
- be_queue_free(adapter, q);
+ for_all_tx_queues(adapter, txo, i) {
+ q = &txo->q;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
+ be_queue_free(adapter, q);
- q = &adapter->tx_obj.cq;
- if (q->created)
- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
- be_queue_free(adapter, q);
+ q = &txo->cq;
+ if (q->created)
+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+ }
/* Clear any residual events */
be_eq_clean(adapter, &adapter->tx_eq);
@@ -1686,56 +1686,48 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
be_queue_free(adapter, q);
}
+/* One TX event queue is shared by all TX compl qs */
static int be_tx_queues_create(struct be_adapter *adapter)
{
struct be_queue_info *eq, *q, *cq;
+ struct be_tx_obj *txo;
+ u8 i;
adapter->tx_eq.max_eqd = 0;
adapter->tx_eq.min_eqd = 0;
adapter->tx_eq.cur_eqd = 96;
adapter->tx_eq.enable_aic = false;
- /* Alloc Tx Event queue */
+
eq = &adapter->tx_eq.q;
- if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
+ if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
+ sizeof(struct be_eq_entry)))
return -1;
- /* Ask BE to create Tx Event queue */
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
- goto tx_eq_free;
-
+ goto err;
adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
-
- /* Alloc TX eth compl queue */
- cq = &adapter->tx_obj.cq;
- if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
+ for_all_tx_queues(adapter, txo, i) {
+ cq = &txo->cq;
+ if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
sizeof(struct be_eth_tx_compl)))
- goto tx_eq_destroy;
+ goto err;
- /* Ask BE to create Tx eth compl queue */
- if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
- goto tx_cq_free;
+ if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
+ goto err;
- /* Alloc TX eth queue */
- q = &adapter->tx_obj.q;
- if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
- goto tx_cq_destroy;
+ q = &txo->q;
+ if (be_queue_alloc(adapter, q, TX_Q_LEN,
+ sizeof(struct be_eth_wrb)))
+ goto err;
- /* Ask BE to create Tx eth queue */
- if (be_cmd_txq_create(adapter, q, cq))
- goto tx_q_free;
+ if (be_cmd_txq_create(adapter, q, cq))
+ goto err;
+ }
return 0;
-tx_q_free:
- be_queue_free(adapter, q);
-tx_cq_destroy:
- be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
-tx_cq_free:
- be_queue_free(adapter, cq);
-tx_eq_destroy:
- be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
-tx_eq_free:
- be_queue_free(adapter, eq);
+err:
+ be_tx_queues_destroy(adapter);
return -1;
}
@@ -1746,36 +1738,23 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
int i;
for_all_rx_queues(adapter, rxo, i) {
- q = &rxo->q;
- if (q->created) {
- be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
- /* After the rxq is invalidated, wait for a grace time
- * of 1ms for all dma to end and the flush compl to
- * arrive
- */
- mdelay(1);
- be_rx_q_clean(adapter, rxo);
- }
- be_queue_free(adapter, q);
+ be_queue_free(adapter, &rxo->q);
q = &rxo->cq;
if (q->created)
be_cmd_q_destroy(adapter, q, QTYPE_CQ);
be_queue_free(adapter, q);
- /* Clear any residual events */
q = &rxo->rx_eq.q;
- if (q->created) {
- be_eq_clean(adapter, &rxo->rx_eq);
+ if (q->created)
be_cmd_q_destroy(adapter, q, QTYPE_EQ);
- }
be_queue_free(adapter, q);
}
}
static u32 be_num_rxqs_want(struct be_adapter *adapter)
{
- if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
+ if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
!adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
return 1 + MAX_RSS_QS; /* one default non-RSS queue */
} else {
@@ -1827,30 +1806,14 @@ static int be_rx_queues_create(struct be_adapter *adapter)
rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
if (rc)
goto err;
- /* Rx Q */
+
+ /* Rx Q - will be created in be_open() */
q = &rxo->q;
rc = be_queue_alloc(adapter, q, RX_Q_LEN,
sizeof(struct be_eth_rx_d));
if (rc)
goto err;
- rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
- BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
- (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
- if (rc)
- goto err;
- }
-
- if (be_multi_rxq(adapter)) {
- u8 rsstable[MAX_RSS_QS];
-
- for_all_rss_queues(adapter, rxo, i)
- rsstable[i] = rxo->rss_id;
-
- rc = be_cmd_rss_config(adapter, rsstable,
- adapter->num_rx_qs - 1);
- if (rc)
- goto err;
}
return 0;
@@ -1876,10 +1839,10 @@ static irqreturn_t be_intx(int irq, void *dev)
if (lancer_chip(adapter)) {
if (event_peek(&adapter->tx_eq))
- tx = event_handle(adapter, &adapter->tx_eq);
+ tx = event_handle(adapter, &adapter->tx_eq, false);
for_all_rx_queues(adapter, rxo, i) {
if (event_peek(&rxo->rx_eq))
- rx |= event_handle(adapter, &rxo->rx_eq);
+ rx |= event_handle(adapter, &rxo->rx_eq, true);
}
if (!(tx || rx))
@@ -1892,11 +1855,11 @@ static irqreturn_t be_intx(int irq, void *dev)
return IRQ_NONE;
if ((1 << adapter->tx_eq.eq_idx & isr))
- event_handle(adapter, &adapter->tx_eq);
+ event_handle(adapter, &adapter->tx_eq, false);
for_all_rx_queues(adapter, rxo, i) {
if ((1 << rxo->rx_eq.eq_idx & isr))
- event_handle(adapter, &rxo->rx_eq);
+ event_handle(adapter, &rxo->rx_eq, true);
}
}
@@ -1908,7 +1871,7 @@ static irqreturn_t be_msix_rx(int irq, void *dev)
struct be_rx_obj *rxo = dev;
struct be_adapter *adapter = rxo->adapter;
- event_handle(adapter, &rxo->rx_eq);
+ event_handle(adapter, &rxo->rx_eq, true);
return IRQ_HANDLED;
}
@@ -1917,7 +1880,7 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
{
struct be_adapter *adapter = dev;
- event_handle(adapter, &adapter->tx_eq);
+ event_handle(adapter, &adapter->tx_eq, false);
return IRQ_HANDLED;
}
@@ -1978,45 +1941,48 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter =
container_of(tx_eq, struct be_adapter, tx_eq);
- struct be_queue_info *txq = &adapter->tx_obj.q;
- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
+ struct be_tx_obj *txo;
struct be_eth_tx_compl *txcp;
- int tx_compl = 0, mcc_compl, status = 0;
- u16 end_idx, num_wrbs = 0;
+ int tx_compl, mcc_compl, status = 0;
+ u8 i;
+ u16 num_wrbs;
+
+ for_all_tx_queues(adapter, txo, i) {
+ tx_compl = 0;
+ num_wrbs = 0;
+ while ((txcp = be_tx_compl_get(&txo->cq))) {
+ num_wrbs += be_tx_compl_process(adapter, txo,
+ AMAP_GET_BITS(struct amap_eth_tx_compl,
+ wrb_index, txcp));
+ tx_compl++;
+ }
+ if (tx_compl) {
+ be_cq_notify(adapter, txo->cq.id, true, tx_compl);
- while ((txcp = be_tx_compl_get(tx_cq))) {
- end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp);
- num_wrbs += be_tx_compl_process(adapter, end_idx);
- tx_compl++;
+ atomic_sub(num_wrbs, &txo->q.used);
+
+ /* As Tx wrbs have been freed up, wake up netdev queue
+ * if it was stopped due to lack of tx wrbs. */
+ if (__netif_subqueue_stopped(adapter->netdev, i) &&
+ atomic_read(&txo->q.used) < txo->q.len / 2) {
+ netif_wake_subqueue(adapter->netdev, i);
+ }
+
+ adapter->drv_stats.be_tx_events++;
+ txo->stats.be_tx_compl += tx_compl;
+ }
}
mcc_compl = be_process_mcc(adapter, &status);
- napi_complete(napi);
-
if (mcc_compl) {
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
}
- if (tx_compl) {
- be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
-
- atomic_sub(num_wrbs, &txq->used);
-
- /* As Tx wrbs have been freed up, wake up netdev queue if
- * it was stopped due to lack of tx wrbs.
- */
- if (netif_queue_stopped(adapter->netdev) &&
- atomic_read(&txq->used) < txq->len / 2) {
- netif_wake_queue(adapter->netdev);
- }
-
- tx_stats(adapter)->be_tx_events++;
- tx_stats(adapter)->be_tx_compl += tx_compl;
- }
+ napi_complete(napi);
+ be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
return 1;
}
@@ -2065,6 +2031,7 @@ static void be_worker(struct work_struct *work)
struct be_adapter *adapter =
container_of(work, struct be_adapter, work.work);
struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
int i;
if (!adapter->ue_detected && !lancer_chip(adapter))
@@ -2092,7 +2059,9 @@ static void be_worker(struct work_struct *work)
else
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
- be_tx_rate_update(adapter);
+
+ for_all_tx_queues(adapter, txo, i)
+ be_tx_rate_update(txo);
for_all_rx_queues(adapter, rxo, i) {
be_rx_rate_update(rxo);
@@ -2290,10 +2259,36 @@ done:
adapter->isr_registered = false;
}
+static void be_rx_queues_clear(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_rx_obj *rxo;
+ int i;
+
+ for_all_rx_queues(adapter, rxo, i) {
+ q = &rxo->q;
+ if (q->created) {
+ be_cmd_rxq_destroy(adapter, q);
+ /* After the rxq is invalidated, wait for a grace time
+ * of 1ms for all dma to end and the flush compl to
+ * arrive
+ */
+ mdelay(1);
+ be_rx_q_clean(adapter, rxo);
+ }
+
+ /* Clear any residual events */
+ q = &rxo->rx_eq.q;
+ if (q->created)
+ be_eq_clean(adapter, &rxo->rx_eq);
+ }
+}
+
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo;
+ struct be_tx_obj *txo;
struct be_eq_obj *tx_eq = &adapter->tx_eq;
int vec, i;
@@ -2311,10 +2306,11 @@ static int be_close(struct net_device *netdev)
napi_disable(&tx_eq->napi);
if (lancer_chip(adapter)) {
- be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
for_all_rx_queues(adapter, rxo, i)
be_cq_notify(adapter, rxo->cq.id, false, 0);
+ for_all_tx_queues(adapter, txo, i)
+ be_cq_notify(adapter, txo->cq.id, false, 0);
}
if (msix_enabled(adapter)) {
@@ -2333,8 +2329,43 @@ static int be_close(struct net_device *netdev)
/* Wait for all pending tx completions to arrive so that
* all tx skbs are freed.
*/
- be_tx_compl_clean(adapter);
+ for_all_tx_queues(adapter, txo, i)
+ be_tx_compl_clean(adapter, txo);
+
+ be_rx_queues_clear(adapter);
+ return 0;
+}
+
+static int be_rx_queues_setup(struct be_adapter *adapter)
+{
+ struct be_rx_obj *rxo;
+ int rc, i;
+ u8 rsstable[MAX_RSS_QS];
+
+ for_all_rx_queues(adapter, rxo, i) {
+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
+ rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
+ adapter->if_handle,
+ (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
+ if (rc)
+ return rc;
+ }
+
+ if (be_multi_rxq(adapter)) {
+ for_all_rss_queues(adapter, rxo, i)
+ rsstable[i] = rxo->rss_id;
+ rc = be_cmd_rss_config(adapter, rsstable,
+ adapter->num_rx_qs - 1);
+ if (rc)
+ return rc;
+ }
+
+ /* First time posting */
+ for_all_rx_queues(adapter, rxo, i) {
+ be_post_rx_frags(rxo, GFP_KERNEL);
+ napi_enable(&rxo->rx_eq.napi);
+ }
return 0;
}
@@ -2348,10 +2379,10 @@ static int be_open(struct net_device *netdev)
u8 mac_speed;
u16 link_speed;
- for_all_rx_queues(adapter, rxo, i) {
- be_post_rx_frags(rxo, GFP_KERNEL);
- napi_enable(&rxo->rx_eq.napi);
- }
+ status = be_rx_queues_setup(adapter);
+ if (status)
+ goto err;
+
napi_enable(&tx_eq->napi);
be_irq_register(adapter);
@@ -2480,6 +2511,8 @@ static int be_setup(struct be_adapter *adapter)
int status;
u8 mac[ETH_ALEN];
+ be_cmd_req_native_mode(adapter);
+
cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST;
@@ -2539,6 +2572,9 @@ static int be_setup(struct be_adapter *adapter)
if (status != 0)
goto tx_qs_destroy;
+ /* Allow all priorities by default. A GRP5 evt may modify this */
+ adapter->vlan_prio_bmap = 0xff;
+
status = be_mcc_queues_create(adapter);
if (status != 0)
goto rx_qs_destroy;
@@ -2584,6 +2620,8 @@ static int be_clear(struct be_adapter *adapter)
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+ adapter->be3_native = 0;
+
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
return 0;
@@ -2901,7 +2939,6 @@ static struct net_device_ops be_netdev_ops = {
.ndo_set_mac_address = be_mac_addr_set,
.ndo_change_mtu = be_change_mtu,
.ndo_validate_addr = eth_validate_addr,
- .ndo_vlan_rx_register = be_vlan_register,
.ndo_vlan_rx_add_vid = be_vlan_add_vid,
.ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
.ndo_set_vf_mac = be_set_vf_mac,
@@ -2925,12 +2962,9 @@ static void be_netdev_init(struct net_device *netdev)
netdev->features |= netdev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
- netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- if (lancer_chip(adapter))
- netdev->vlan_features |= NETIF_F_TSO6;
-
netdev->flags |= IFF_MULTICAST;
/* Default settings for Rx and Tx flow control */
@@ -3185,7 +3219,16 @@ static int be_get_config(struct be_adapter *adapter)
if (status)
return status;
- be_cmd_check_native_mode(adapter);
+ if ((num_vfs && adapter->sriov_enabled) ||
+ (adapter->function_mode & 0x400) ||
+ lancer_chip(adapter) || !be_physfn(adapter)) {
+ adapter->num_tx_qs = 1;
+ netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_qs);
+ } else {
+ adapter->num_tx_qs = MAX_TX_QS;
+ }
+
return 0;
}
@@ -3288,7 +3331,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto disable_dev;
pci_set_master(pdev);
- netdev = alloc_etherdev(sizeof(struct be_adapter));
+ netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
if (netdev == NULL) {
status = -ENOMEM;
goto rel_reg;
@@ -3360,6 +3403,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto stats_clean;
+ /* The INTR bit may be set in the card when probed by a kdump kernel
+ * after a crash.
+ */
+ if (!lancer_chip(adapter))
+ be_intr_set(adapter, false);
+
be_msix_enable(adapter);
INIT_DELAYED_WORK(&adapter->work, be_worker);
@@ -3396,6 +3445,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
}
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index a1b8c8b8010..45e45e8d3d6 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -7,6 +7,7 @@
* May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
* dynamic procfs inode.
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -1014,7 +1015,6 @@ static void bmac_set_multicast(struct net_device *dev)
static void bmac_set_multicast(struct net_device *dev)
{
struct netdev_hw_addr *ha;
- char *addrs;
int i;
unsigned short rx_cfg;
u32 crc;
@@ -1038,12 +1038,7 @@ static void bmac_set_multicast(struct net_device *dev)
for(i = 0; i < 4; i++) hash_table[i] = 0;
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- if(!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc_le(6, ha->addr);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
diff --git a/drivers/net/bna/bfa_cee.c b/drivers/net/bna/bfa_cee.c
index f7b789a3b21..39e5ab9fde5 100644
--- a/drivers/net/bna/bfa_cee.c
+++ b/drivers/net/bna/bfa_cee.c
@@ -223,44 +223,56 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
}
/**
- * bfa_cee_hbfail()
+ * bfa_cee_notify()
*
* @brief CEE module heart-beat failure handler.
+ * @brief CEE module IOC event handler.
*
- * @param[in] Pointer to the CEE module data structure.
+ * @param[in] IOC event type
*
* @return void
*/
static void
-bfa_cee_hbfail(void *arg)
+bfa_cee_notify(void *arg, enum bfa_ioc_event event)
{
struct bfa_cee *cee;
cee = (struct bfa_cee *) arg;
- if (cee->get_attr_pending == true) {
- cee->get_attr_status = BFA_STATUS_FAILED;
- cee->get_attr_pending = false;
- if (cee->cbfn.get_attr_cbfn) {
- cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
- BFA_STATUS_FAILED);
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (cee->get_attr_pending == true) {
+ cee->get_attr_status = BFA_STATUS_FAILED;
+ cee->get_attr_pending = false;
+ if (cee->cbfn.get_attr_cbfn) {
+ cee->cbfn.get_attr_cbfn(
+ cee->cbfn.get_attr_cbarg,
+ BFA_STATUS_FAILED);
+ }
}
- }
- if (cee->get_stats_pending == true) {
- cee->get_stats_status = BFA_STATUS_FAILED;
- cee->get_stats_pending = false;
- if (cee->cbfn.get_stats_cbfn) {
- cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
- BFA_STATUS_FAILED);
+ if (cee->get_stats_pending == true) {
+ cee->get_stats_status = BFA_STATUS_FAILED;
+ cee->get_stats_pending = false;
+ if (cee->cbfn.get_stats_cbfn) {
+ cee->cbfn.get_stats_cbfn(
+ cee->cbfn.get_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
}
- }
- if (cee->reset_stats_pending == true) {
- cee->reset_stats_status = BFA_STATUS_FAILED;
- cee->reset_stats_pending = false;
- if (cee->cbfn.reset_stats_cbfn) {
- cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
- BFA_STATUS_FAILED);
+ if (cee->reset_stats_pending == true) {
+ cee->reset_stats_status = BFA_STATUS_FAILED;
+ cee->reset_stats_pending = false;
+ if (cee->cbfn.reset_stats_cbfn) {
+ cee->cbfn.reset_stats_cbfn(
+ cee->cbfn.reset_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
}
+ break;
+
+ default:
+ break;
}
}
@@ -286,6 +298,7 @@ bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
cee->ioc = ioc;
bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
- bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
- bfa_nw_ioc_hbfail_register(cee->ioc, &cee->hbfail);
+ bfa_q_qe_init(&cee->ioc_notify);
+ bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
+ bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify);
}
diff --git a/drivers/net/bna/bfa_cee.h b/drivers/net/bna/bfa_cee.h
index 20543d15b64..58d54e98d59 100644
--- a/drivers/net/bna/bfa_cee.h
+++ b/drivers/net/bna/bfa_cee.h
@@ -25,7 +25,6 @@
typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
-typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, enum bfa_status status);
struct bfa_cee_cbfn {
bfa_cee_get_attr_cbfn_t get_attr_cbfn;
@@ -45,7 +44,7 @@ struct bfa_cee {
enum bfa_status get_stats_status;
enum bfa_status reset_stats_status;
struct bfa_cee_cbfn cbfn;
- struct bfa_ioc_hbfail_notify hbfail;
+ struct bfa_ioc_notify ioc_notify;
struct bfa_cee_attr *attr;
struct bfa_cee_stats *stats;
struct bfa_dma attr_dma;
diff --git a/drivers/net/bna/bfa_sm.h b/drivers/net/bna/bfa_cs.h
index 46462c49b6f..3da1a946ccd 100644
--- a/drivers/net/bna/bfa_sm.h
+++ b/drivers/net/bna/bfa_cs.h
@@ -11,20 +11,24 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
/**
- * @file bfasm.h State machine defines
+ * @file bfa_cs.h BFA common services
*/
-#ifndef __BFA_SM_H__
-#define __BFA_SM_H__
+#ifndef __BFA_CS_H__
+#define __BFA_CS_H__
#include "cna.h"
+/**
+ * @ BFA state machine interfaces
+ */
+
typedef void (*bfa_sm_t)(void *sm, int event);
/**
@@ -33,7 +37,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
*/
-#define bfa_sm_state_decl(oc, st, otype, etype) \
+#define bfa_sm_state_decl(oc, st, otype, etype) \
static void oc ## _sm_ ## st(otype * fsm, etype event)
#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
@@ -49,7 +53,7 @@ struct bfa_sm_table {
int state; /*!< state machine encoding */
char *name; /*!< state name for display */
};
-#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
/**
* State machine with entry actions.
@@ -62,18 +66,18 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
*/
-#define bfa_fsm_state_decl(oc, st, otype, etype) \
- static void oc ## _sm_ ## st(otype * fsm, etype event); \
+#define bfa_fsm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event); \
static void oc ## _sm_ ## st ## _entry(otype * fsm)
-#define bfa_fsm_set_state(_fsm, _state) do { \
- (_fsm)->fsm = (bfa_fsm_t)(_state); \
- _state ## _entry(_fsm); \
+#define bfa_fsm_set_state(_fsm, _state) do { \
+ (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ _state ## _entry(_fsm); \
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
-#define bfa_fsm_cmp_state(_fsm, _state) \
+#define bfa_fsm_cmp_state(_fsm, _state) \
((_fsm)->fsm == (bfa_fsm_t)(_state))
static inline int
@@ -85,4 +89,52 @@ bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
i++;
return smt[i].state;
}
-#endif
+
+/**
+ * @ Generic wait counter.
+ */
+
+typedef void (*bfa_wc_resume_t) (void *cbarg);
+
+struct bfa_wc {
+ bfa_wc_resume_t wc_resume;
+ void *wc_cbarg;
+ int wc_count;
+};
+
+static inline void
+bfa_wc_up(struct bfa_wc *wc)
+{
+ wc->wc_count++;
+}
+
+static inline void
+bfa_wc_down(struct bfa_wc *wc)
+{
+ wc->wc_count--;
+ if (wc->wc_count == 0)
+ wc->wc_resume(wc->wc_cbarg);
+}
+
+/**
+ * Initialize a waiting counter.
+ */
+static inline void
+bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
+{
+ wc->wc_resume = wc_resume;
+ wc->wc_cbarg = wc_cbarg;
+ wc->wc_count = 0;
+ bfa_wc_up(wc);
+}
+
+/**
+ * Wait for counter to reach zero
+ */
+static inline void
+bfa_wc_wait(struct bfa_wc *wc)
+{
+ bfa_wc_down(wc);
+}
+
+#endif /* __BFA_CS_H__ */
diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/bna/bfa_defs.h
index 2ea0dfe1ced..b080b3698f4 100644
--- a/drivers/net/bna/bfa_defs.h
+++ b/drivers/net/bna/bfa_defs.h
@@ -80,7 +80,7 @@ struct bfa_adapter_attr {
enum {
BFA_IOC_DRIVER_LEN = 16,
- BFA_IOC_CHIP_REV_LEN = 8,
+ BFA_IOC_CHIP_REV_LEN = 8,
};
/**
@@ -153,6 +153,7 @@ struct bfa_ioc_drv_stats {
u32 enable_reqs;
u32 disable_replies;
u32 enable_replies;
+ u32 rsvd;
};
/**
@@ -174,7 +175,7 @@ enum bfa_ioc_type {
*/
struct bfa_ioc_attr {
enum bfa_ioc_type ioc_type;
- enum bfa_ioc_state state; /*!< IOC state */
+ enum bfa_ioc_state state; /*!< IOC state */
struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
struct bfa_ioc_pci_attr pci_attr;
diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h
index fdd67761836..885ef3afdd4 100644
--- a/drivers/net/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/bna/bfa_defs_mfg_comm.h
@@ -192,14 +192,14 @@ do { \
* VPD vendor tag
*/
enum {
- BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
- BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
- BFA_MFG_VPD_HP = 2, /*!< vendor HP */
- BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
- BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
- BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
- BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
- BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
+ BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
+ BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
+ BFA_MFG_VPD_HP = 2, /*!< vendor HP */
+ BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
+ BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
+ BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
+ BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
+ BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
};
/**
@@ -212,8 +212,8 @@ struct bfa_mfg_vpd {
u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
u8 chksum; /*!< u8 checksum */
u8 vendor; /*!< vendor */
- u8 len; /*!< vpd data length excluding header */
- u8 rsv;
+ u8 len; /*!< vpd data length excluding header */
+ u8 rsv;
u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
};
diff --git a/drivers/net/bna/bfa_defs_status.h b/drivers/net/bna/bfa_defs_status.h
index af951126375..7c5fe6c2e80 100644
--- a/drivers/net/bna/bfa_defs_status.h
+++ b/drivers/net/bna/bfa_defs_status.h
@@ -25,95 +25,95 @@
* comments are supported
*/
enum bfa_status {
- BFA_STATUS_OK = 0,
- BFA_STATUS_FAILED = 1,
- BFA_STATUS_EINVAL = 2,
- BFA_STATUS_ENOMEM = 3,
- BFA_STATUS_ENOSYS = 4,
- BFA_STATUS_ETIMER = 5,
- BFA_STATUS_EPROTOCOL = 6,
- BFA_STATUS_ENOFCPORTS = 7,
- BFA_STATUS_NOFLASH = 8,
- BFA_STATUS_BADFLASH = 9,
- BFA_STATUS_SFP_UNSUPP = 10,
+ BFA_STATUS_OK = 0,
+ BFA_STATUS_FAILED = 1,
+ BFA_STATUS_EINVAL = 2,
+ BFA_STATUS_ENOMEM = 3,
+ BFA_STATUS_ENOSYS = 4,
+ BFA_STATUS_ETIMER = 5,
+ BFA_STATUS_EPROTOCOL = 6,
+ BFA_STATUS_ENOFCPORTS = 7,
+ BFA_STATUS_NOFLASH = 8,
+ BFA_STATUS_BADFLASH = 9,
+ BFA_STATUS_SFP_UNSUPP = 10,
BFA_STATUS_UNKNOWN_VFID = 11,
BFA_STATUS_DATACORRUPTED = 12,
- BFA_STATUS_DEVBUSY = 13,
- BFA_STATUS_ABORTED = 14,
- BFA_STATUS_NODEV = 15,
- BFA_STATUS_HDMA_FAILED = 16,
+ BFA_STATUS_DEVBUSY = 13,
+ BFA_STATUS_ABORTED = 14,
+ BFA_STATUS_NODEV = 15,
+ BFA_STATUS_HDMA_FAILED = 16,
BFA_STATUS_FLASH_BAD_LEN = 17,
BFA_STATUS_UNKNOWN_LWWN = 18,
BFA_STATUS_UNKNOWN_RWWN = 19,
- BFA_STATUS_FCPT_LS_RJT = 20,
+ BFA_STATUS_FCPT_LS_RJT = 20,
BFA_STATUS_VPORT_EXISTS = 21,
- BFA_STATUS_VPORT_MAX = 22,
+ BFA_STATUS_VPORT_MAX = 22,
BFA_STATUS_UNSUPP_SPEED = 23,
- BFA_STATUS_INVLD_DFSZ = 24,
- BFA_STATUS_CNFG_FAILED = 25,
- BFA_STATUS_CMD_NOTSUPP = 26,
- BFA_STATUS_NO_ADAPTER = 27,
- BFA_STATUS_LINKDOWN = 28,
- BFA_STATUS_FABRIC_RJT = 29,
+ BFA_STATUS_INVLD_DFSZ = 24,
+ BFA_STATUS_CNFG_FAILED = 25,
+ BFA_STATUS_CMD_NOTSUPP = 26,
+ BFA_STATUS_NO_ADAPTER = 27,
+ BFA_STATUS_LINKDOWN = 28,
+ BFA_STATUS_FABRIC_RJT = 29,
BFA_STATUS_UNKNOWN_VWWN = 30,
BFA_STATUS_NSLOGIN_FAILED = 31,
- BFA_STATUS_NO_RPORTS = 32,
+ BFA_STATUS_NO_RPORTS = 32,
BFA_STATUS_NSQUERY_FAILED = 33,
BFA_STATUS_PORT_OFFLINE = 34,
BFA_STATUS_RPORT_OFFLINE = 35,
BFA_STATUS_TGTOPEN_FAILED = 36,
- BFA_STATUS_BAD_LUNS = 37,
- BFA_STATUS_IO_FAILURE = 38,
- BFA_STATUS_NO_FABRIC = 39,
- BFA_STATUS_EBADF = 40,
- BFA_STATUS_EINTR = 41,
- BFA_STATUS_EIO = 42,
- BFA_STATUS_ENOTTY = 43,
- BFA_STATUS_ENXIO = 44,
- BFA_STATUS_EFOPEN = 45,
+ BFA_STATUS_BAD_LUNS = 37,
+ BFA_STATUS_IO_FAILURE = 38,
+ BFA_STATUS_NO_FABRIC = 39,
+ BFA_STATUS_EBADF = 40,
+ BFA_STATUS_EINTR = 41,
+ BFA_STATUS_EIO = 42,
+ BFA_STATUS_ENOTTY = 43,
+ BFA_STATUS_ENXIO = 44,
+ BFA_STATUS_EFOPEN = 45,
BFA_STATUS_VPORT_WWN_BP = 46,
BFA_STATUS_PORT_NOT_DISABLED = 47,
- BFA_STATUS_BADFRMHDR = 48,
- BFA_STATUS_BADFRMSZ = 49,
- BFA_STATUS_MISSINGFRM = 50,
- BFA_STATUS_LINKTIMEOUT = 51,
+ BFA_STATUS_BADFRMHDR = 48,
+ BFA_STATUS_BADFRMSZ = 49,
+ BFA_STATUS_MISSINGFRM = 50,
+ BFA_STATUS_LINKTIMEOUT = 51,
BFA_STATUS_NO_FCPIM_NEXUS = 52,
BFA_STATUS_CHECKSUM_FAIL = 53,
- BFA_STATUS_GZME_FAILED = 54,
+ BFA_STATUS_GZME_FAILED = 54,
BFA_STATUS_SCSISTART_REQD = 55,
- BFA_STATUS_IOC_FAILURE = 56,
- BFA_STATUS_INVALID_WWN = 57,
- BFA_STATUS_MISMATCH = 58,
- BFA_STATUS_IOC_ENABLED = 59,
+ BFA_STATUS_IOC_FAILURE = 56,
+ BFA_STATUS_INVALID_WWN = 57,
+ BFA_STATUS_MISMATCH = 58,
+ BFA_STATUS_IOC_ENABLED = 59,
BFA_STATUS_ADAPTER_ENABLED = 60,
- BFA_STATUS_IOC_NON_OP = 61,
+ BFA_STATUS_IOC_NON_OP = 61,
BFA_STATUS_ADDR_MAP_FAILURE = 62,
- BFA_STATUS_SAME_NAME = 63,
- BFA_STATUS_PENDING = 64,
- BFA_STATUS_8G_SPD = 65,
- BFA_STATUS_4G_SPD = 66,
+ BFA_STATUS_SAME_NAME = 63,
+ BFA_STATUS_PENDING = 64,
+ BFA_STATUS_8G_SPD = 65,
+ BFA_STATUS_4G_SPD = 66,
BFA_STATUS_AD_IS_ENABLE = 67,
- BFA_STATUS_EINVAL_TOV = 68,
+ BFA_STATUS_EINVAL_TOV = 68,
BFA_STATUS_EINVAL_QDEPTH = 69,
BFA_STATUS_VERSION_FAIL = 70,
- BFA_STATUS_DIAG_BUSY = 71,
- BFA_STATUS_BEACON_ON = 72,
- BFA_STATUS_BEACON_OFF = 73,
- BFA_STATUS_LBEACON_ON = 74,
- BFA_STATUS_LBEACON_OFF = 75,
+ BFA_STATUS_DIAG_BUSY = 71,
+ BFA_STATUS_BEACON_ON = 72,
+ BFA_STATUS_BEACON_OFF = 73,
+ BFA_STATUS_LBEACON_ON = 74,
+ BFA_STATUS_LBEACON_OFF = 75,
BFA_STATUS_PORT_NOT_INITED = 76,
BFA_STATUS_RPSC_ENABLED = 77,
- BFA_STATUS_ENOFSAVE = 78,
- BFA_STATUS_BAD_FILE = 79,
- BFA_STATUS_RLIM_EN = 80,
- BFA_STATUS_RLIM_DIS = 81,
- BFA_STATUS_IOC_DISABLED = 82,
- BFA_STATUS_ADAPTER_DISABLED = 83,
- BFA_STATUS_BIOS_DISABLED = 84,
- BFA_STATUS_AUTH_ENABLED = 85,
- BFA_STATUS_AUTH_DISABLED = 86,
- BFA_STATUS_ERROR_TRL_ENABLED = 87,
- BFA_STATUS_ERROR_QOS_ENABLED = 88,
+ BFA_STATUS_ENOFSAVE = 78,
+ BFA_STATUS_BAD_FILE = 79,
+ BFA_STATUS_RLIM_EN = 80,
+ BFA_STATUS_RLIM_DIS = 81,
+ BFA_STATUS_IOC_DISABLED = 82,
+ BFA_STATUS_ADAPTER_DISABLED = 83,
+ BFA_STATUS_BIOS_DISABLED = 84,
+ BFA_STATUS_AUTH_ENABLED = 85,
+ BFA_STATUS_AUTH_DISABLED = 86,
+ BFA_STATUS_ERROR_TRL_ENABLED = 87,
+ BFA_STATUS_ERROR_QOS_ENABLED = 88,
BFA_STATUS_NO_SFP_DEV = 89,
BFA_STATUS_MEMTEST_FAILED = 90,
BFA_STATUS_INVALID_DEVID = 91,
@@ -190,7 +190,7 @@ enum bfa_status {
BFA_STATUS_FLASH_CKFAIL = 162,
BFA_STATUS_TRUNK_UNSUPP = 163,
BFA_STATUS_TRUNK_ENABLED = 164,
- BFA_STATUS_TRUNK_DISABLED = 165,
+ BFA_STATUS_TRUNK_DISABLED = 165,
BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166,
BFA_STATUS_BOOT_CODE_UPDATED = 167,
BFA_STATUS_BOOT_VERSION = 168,
@@ -198,8 +198,8 @@ enum bfa_status {
BFA_STATUS_INVALID_CARDTYPE = 170,
BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171,
BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172,
- BFA_STATUS_ETHBOOT_ENABLED = 173,
- BFA_STATUS_ETHBOOT_DISABLED = 174,
+ BFA_STATUS_ETHBOOT_ENABLED = 173,
+ BFA_STATUS_ETHBOOT_DISABLED = 174,
BFA_STATUS_IOPROFILE_OFF = 175,
BFA_STATUS_NO_PORT_INSTANCE = 176,
BFA_STATUS_BOOT_CODE_TIMEDOUT = 177,
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index fcb9bb3169e..126b0aac9f9 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -58,6 +58,7 @@ static bool bfa_nw_auto_recover = true;
/*
* forward declarations
*/
+static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
@@ -68,9 +69,10 @@ static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
-static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
+static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
static void bfa_ioc_recover(struct bfa_ioc *ioc);
static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
+static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
@@ -107,7 +109,7 @@ enum ioc_event {
IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
IOC_E_DISABLED = 7, /*!< f/w disabled */
IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */
- IOC_E_PFAILED = 9, /*!< failure notice by iocpf sm */
+ IOC_E_PFFAILED = 9, /*!< failure notice by iocpf sm */
IOC_E_HBFAIL = 10, /*!< heartbeat failure */
IOC_E_HWERROR = 11, /*!< hardware error interrupt */
IOC_E_TIMEOUT = 12, /*!< timeout */
@@ -156,7 +158,7 @@ enum iocpf_event {
IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
IOCPF_E_STOP = 3, /*!< stop on driver detach */
- IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
+ IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
@@ -239,7 +241,7 @@ bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -272,7 +274,7 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -294,12 +296,12 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
- case IOC_E_PFAILED:
+ case IOC_E_PFFAILED:
/* !!! fall through !!! */
case IOC_E_HWERROR:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
- if (event != IOC_E_PFAILED)
+ if (event != IOC_E_PFFAILED)
bfa_iocpf_initfail(ioc);
break;
@@ -316,7 +318,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -344,14 +346,14 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
- case IOC_E_PFAILED:
+ case IOC_E_PFFAILED:
case IOC_E_HWERROR:
del_timer(&ioc->ioc_timer);
/* fall through */
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
- if (event != IOC_E_PFAILED)
+ if (event != IOC_E_PFFAILED)
bfa_iocpf_getattrfail(ioc);
break;
@@ -364,7 +366,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -387,7 +389,7 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
- case IOC_E_PFAILED:
+ case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_hb_stop(ioc);
/* !!! fall through !!! */
@@ -398,12 +400,12 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
else
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
- if (event != IOC_E_PFAILED)
+ if (event != IOC_E_PFFAILED)
bfa_iocpf_fail(ioc);
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -434,7 +436,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -465,7 +467,7 @@ bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -485,13 +487,13 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
- case IOC_E_PFAILED:
+ case IOC_E_PFFAILED:
case IOC_E_HWERROR:
/**
* Initialization retry failed.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
- if (event != IOC_E_PFAILED)
+ if (event != IOC_E_PFFAILED)
bfa_iocpf_initfail(ioc);
break;
@@ -512,7 +514,7 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -546,7 +548,7 @@ bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -579,7 +581,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(iocpf->ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -589,6 +591,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
{
+ bfa_ioc_hw_sem_init(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -631,7 +634,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -675,7 +678,7 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -714,7 +717,7 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -762,7 +765,7 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -813,7 +816,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -856,7 +859,7 @@ bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -898,7 +901,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -927,7 +930,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -937,6 +940,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
{
+ bfa_ioc_mbox_flush(iocpf->ioc);
bfa_ioc_pf_disabled(iocpf->ioc);
}
@@ -957,7 +961,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -1009,7 +1013,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -1038,7 +1042,7 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -1053,7 +1057,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
/**
* Flush any queued up mailbox requests.
*/
- bfa_ioc_mbox_hbfail(iocpf->ioc);
+ bfa_ioc_mbox_flush(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -1093,7 +1097,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -1115,7 +1119,7 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(iocpf->ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -1123,23 +1127,28 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
* BFA IOC private functions
*/
+/**
+ * Notify common modules registered for notification.
+ */
static void
-bfa_ioc_disable_comp(struct bfa_ioc *ioc)
+bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
{
+ struct bfa_ioc_notify *notify;
struct list_head *qe;
- struct bfa_ioc_hbfail_notify *notify;
-
- ioc->cbfn->disable_cbfn(ioc->bfa);
- /**
- * Notify common modules registered for notification.
- */
- list_for_each(qe, &ioc->hb_notify_q) {
- notify = (struct bfa_ioc_hbfail_notify *) qe;
- notify->cbfn(notify->cbarg);
+ list_for_each(qe, &ioc->notify_q) {
+ notify = (struct bfa_ioc_notify *)qe;
+ notify->cbfn(notify->cbarg, event);
}
}
+static void
+bfa_ioc_disable_comp(struct bfa_ioc *ioc)
+{
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
+}
+
bool
bfa_nw_ioc_sem_get(void __iomem *sem_reg)
{
@@ -1169,6 +1178,29 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
}
static void
+bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_image_hdr fwhdr;
+ u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ if (fwstate == BFI_IOC_UNINIT)
+ return;
+
+ bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+
+ if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+ return;
+
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+
+ /*
+ * Try to lock and then unlock the semaphore.
+ */
+ readl(ioc->ioc_regs.ioc_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+static void
bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
{
u32 r32;
@@ -1638,7 +1670,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
* Cleanup any pending requests.
*/
static void
-bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
+bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
{
struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd *cmd;
@@ -1650,17 +1682,11 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
static void
bfa_ioc_fail_notify(struct bfa_ioc *ioc)
{
- struct list_head *qe;
- struct bfa_ioc_hbfail_notify *notify;
-
/**
* Notify driver and common modules registered for notification.
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
- list_for_each(qe, &ioc->hb_notify_q) {
- notify = (struct bfa_ioc_hbfail_notify *) qe;
- notify->cbfn(notify->cbarg);
- }
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
}
static void
@@ -1684,7 +1710,7 @@ bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
static void
bfa_ioc_pf_failed(struct bfa_ioc *ioc)
{
- bfa_fsm_send_event(ioc, IOC_E_PFAILED);
+ bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
}
static void
@@ -1839,7 +1865,7 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
ioc->iocpf.ioc = ioc;
bfa_ioc_mbox_attach(ioc);
- INIT_LIST_HEAD(&ioc->hb_notify_q);
+ INIT_LIST_HEAD(&ioc->notify_q);
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(ioc, IOC_E_RESET);
@@ -1969,6 +1995,8 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
* mailbox is free -- queue command to firmware
*/
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+
+ return;
}
/**
@@ -2001,18 +2029,30 @@ bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
void
bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
{
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
/**
+ * return true if IOC is disabled
+ */
+bool
+bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
+ bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
+}
+
+/**
* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
void
-bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
- struct bfa_ioc_hbfail_notify *notify)
+bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_notify *notify)
{
- list_add_tail(&notify->qe, &ioc->hb_notify_q);
+ list_add_tail(&notify->qe, &ioc->notify_q);
}
#define BFA_MFG_NAME "Brocade"
@@ -2217,6 +2257,7 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
{
pr_crit("Heart Beat of IOC has failed\n");
bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index bd48abee781..bda866ba6e9 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -19,7 +19,7 @@
#ifndef __BFA_IOC_H__
#define __BFA_IOC_H__
-#include "bfa_sm.h"
+#include "bfa_cs.h"
#include "bfi.h"
#include "cna.h"
@@ -97,9 +97,12 @@ struct bfa_ioc_regs {
/**
* IOC Mailbox structures
*/
+typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
struct bfa_mbox_cmd {
struct list_head qe;
- u32 msg[BFI_IOC_MSGSZ];
+ bfa_mbox_cmd_cbfn_t cbfn;
+ void *cbarg;
+ u32 msg[BFI_IOC_MSGSZ];
};
/**
@@ -130,6 +133,23 @@ struct bfa_ioc_cbfn {
};
/**
+ * IOC event notification mechanism.
+ */
+enum bfa_ioc_event {
+ BFA_IOC_E_ENABLED = 1,
+ BFA_IOC_E_DISABLED = 2,
+ BFA_IOC_E_FAILED = 3,
+};
+
+typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event);
+
+struct bfa_ioc_notify {
+ struct list_head qe;
+ bfa_ioc_notify_cbfn_t cbfn;
+ void *cbarg;
+};
+
+/**
* Heartbeat failure notification queue element.
*/
struct bfa_ioc_hbfail_notify {
@@ -141,7 +161,7 @@ struct bfa_ioc_hbfail_notify {
/**
* Initialize a heartbeat failure notification structure
*/
-#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
+#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
(__notify)->cbfn = (__cbfn); \
(__notify)->cbarg = (__cbarg); \
} while (0)
@@ -155,25 +175,25 @@ struct bfa_iocpf {
struct bfa_ioc {
bfa_fsm_t fsm;
- struct bfa *bfa;
- struct bfa_pcidev pcidev;
- struct timer_list ioc_timer;
- struct timer_list iocpf_timer;
- struct timer_list sem_timer;
+ struct bfa *bfa;
+ struct bfa_pcidev pcidev;
+ struct timer_list ioc_timer;
+ struct timer_list iocpf_timer;
+ struct timer_list sem_timer;
struct timer_list hb_timer;
u32 hb_count;
- struct list_head hb_notify_q;
+ struct list_head notify_q;
void *dbg_fwsave;
int dbg_fwsave_len;
bool dbg_fwsave_once;
enum bfi_mclass ioc_mc;
- struct bfa_ioc_regs ioc_regs;
+ struct bfa_ioc_regs ioc_regs;
struct bfa_ioc_drv_stats stats;
bool fcmode;
bool ctdev;
bool cna;
bool pllinit;
- bool stats_busy; /*!< outstanding stats */
+ bool stats_busy; /*!< outstanding stats */
u8 port_id;
struct bfa_dma attr_dma;
@@ -217,9 +237,11 @@ struct bfa_ioc_hwif {
BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
+#define bfa_ioc_stats_hb_count(_ioc, _hb_count) \
+ ((_ioc)->stats.hb_count = (_hb_count))
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
#define BFA_IOC_FWIMG_TYPE(__ioc) \
- (((__ioc)->ctdev) ? \
+ (((__ioc)->ctdev) ? \
(((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
BFI_IMAGE_CB_FC)
#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
@@ -263,9 +285,10 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
+bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
-void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
- struct bfa_ioc_hbfail_notify *notify);
+void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_notify *notify);
bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
diff --git a/drivers/net/bna/bfa_wc.h b/drivers/net/bna/bfa_wc.h
deleted file mode 100644
index d0e4caee67b..00000000000
--- a/drivers/net/bna/bfa_wc.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-
-/**
- * @file bfa_wc.h Generic wait counter.
- */
-
-#ifndef __BFA_WC_H__
-#define __BFA_WC_H__
-
-typedef void (*bfa_wc_resume_t) (void *cbarg);
-
-struct bfa_wc {
- bfa_wc_resume_t wc_resume;
- void *wc_cbarg;
- int wc_count;
-};
-
-static inline void
-bfa_wc_up(struct bfa_wc *wc)
-{
- wc->wc_count++;
-}
-
-static inline void
-bfa_wc_down(struct bfa_wc *wc)
-{
- wc->wc_count--;
- if (wc->wc_count == 0)
- wc->wc_resume(wc->wc_cbarg);
-}
-
-/**
- * Initialize a waiting counter.
- */
-static inline void
-bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
-{
- wc->wc_resume = wc_resume;
- wc->wc_cbarg = wc_cbarg;
- wc->wc_count = 0;
- bfa_wc_up(wc);
-}
-
-/**
- * Wait for counter to reach zero
- */
-static inline void
-bfa_wc_wait(struct bfa_wc *wc)
-{
- bfa_wc_down(wc);
-}
-
-#endif
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
index 6050379526f..088211c2724 100644
--- a/drivers/net/bna/bfi.h
+++ b/drivers/net/bna/bfi.h
@@ -51,13 +51,13 @@ struct bfi_mhdr {
};
#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
- (_mh).msg_class = (_mc); \
+ (_mh).msg_class = (_mc); \
(_mh).msg_id = (_op); \
(_mh).mtag.h2i.lpu_id = (_lpuid); \
} while (0)
#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
- (_mh).msg_class = (_mc); \
+ (_mh).msg_class = (_mc); \
(_mh).msg_id = (_op); \
(_mh).mtag.i2htok = (_i2htok); \
} while (0)
@@ -66,7 +66,7 @@ struct bfi_mhdr {
* Message opcodes: 0-127 to firmware, 128-255 to host
*/
#define BFI_I2H_OPCODE_BASE 128
-#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
+#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
/**
****************************************************************************
@@ -186,7 +186,7 @@ enum bfi_mclass {
#define BFI_BOOT_TYPE_OFF 8
#define BFI_BOOT_LOADER_OFF 12
-#define BFI_BOOT_TYPE_NORMAL 0
+#define BFI_BOOT_TYPE_NORMAL 0
#define BFI_BOOT_TYPE_FLASH 1
#define BFI_BOOT_TYPE_MEMTEST 2
@@ -211,9 +211,9 @@ enum bfi_ioc_h2i_msgs {
enum bfi_ioc_i2h_msgs {
BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
- BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
- BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
- BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
+ BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
+ BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
+ BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
};
@@ -289,6 +289,12 @@ struct bfi_ioc_image_hdr {
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
+enum bfi_fwboot_type {
+ BFI_FWBOOT_TYPE_NORMAL = 0,
+ BFI_FWBOOT_TYPE_FLASH = 1,
+ BFI_FWBOOT_TYPE_MEMTEST = 2,
+};
+
/**
* BFI_IOC_I2H_READY_EVENT message
*/
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
index a287f89b028..21e9155d6e5 100644
--- a/drivers/net/bna/bna.h
+++ b/drivers/net/bna/bna.h
@@ -13,7 +13,7 @@
#ifndef __BNA_H__
#define __BNA_H__
-#include "bfa_wc.h"
+#include "bfa_cs.h"
#include "bfa_ioc.h"
#include "cna.h"
#include "bfi_ll.h"
@@ -88,7 +88,7 @@ do { \
} while (0)
#define containing_rec(addr, type, field) \
- ((type *)((unsigned char *)(addr) - \
+ ((type *)((unsigned char *)(addr) - \
(unsigned char *)(&((type *)0)->field)))
#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
@@ -101,8 +101,8 @@ do { \
{ \
unsigned int page_index; /* index within a page */ \
void *page_addr; \
- page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
+ page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
(_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
}
@@ -166,25 +166,25 @@ do { \
(((_q_ptr)->q.producer_index + (_num)) & \
((_q_ptr)->q.q_depth - 1))
-#define BNA_Q_CI_ADD(_q_ptr, _num) \
+#define BNA_Q_CI_ADD(_q_ptr, _num) \
(_q_ptr)->q.consumer_index = \
- (((_q_ptr)->q.consumer_index + (_num)) \
+ (((_q_ptr)->q.consumer_index + (_num)) \
& ((_q_ptr)->q.q_depth - 1))
#define BNA_Q_FREE_COUNT(_q_ptr) \
(BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
-#define BNA_Q_IN_USE_COUNT(_q_ptr) \
+#define BNA_Q_IN_USE_COUNT(_q_ptr) \
(BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
/* These macros build the data portion of the TxQ/RxQ doorbell */
-#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
+#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
#define BNA_DOORBELL_Q_STOP (0x40000000)
/* These macros build the data portion of the IB doorbell */
#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
(0x80000000 | ((_timeout) << 16) | (_events))
-#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
+#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
/* Set the coalescing timer for the given ib */
#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
index 53b14169e36..cb2594c564d 100644
--- a/drivers/net/bna/bna_ctrl.c
+++ b/drivers/net/bna/bna_ctrl.c
@@ -16,8 +16,7 @@
* www.brocade.com
*/
#include "bna.h"
-#include "bfa_sm.h"
-#include "bfa_wc.h"
+#include "bfa_cs.h"
static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
@@ -380,7 +379,7 @@ bna_llport_sm_stopped(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -409,7 +408,7 @@ bna_llport_sm_down(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -455,7 +454,7 @@ bna_llport_sm_up_resp_wait(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -497,7 +496,7 @@ bna_llport_sm_down_resp_wait(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -526,7 +525,7 @@ bna_llport_sm_up(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -563,7 +562,7 @@ bna_llport_sm_last_resp_wait(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -916,7 +915,7 @@ bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -956,7 +955,7 @@ bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1001,7 +1000,7 @@ bna_port_sm_pause_init_wait(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1022,7 +1021,7 @@ bna_port_sm_last_resp_wait(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1061,7 +1060,7 @@ bna_port_sm_started(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1086,7 +1085,7 @@ bna_port_sm_pause_cfg_wait(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1111,7 +1110,7 @@ bna_port_sm_rx_stop_wait(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1136,7 +1135,7 @@ bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1161,7 +1160,7 @@ bna_port_sm_chld_stop_wait(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1472,7 +1471,7 @@ bna_device_sm_stopped(struct bna_device *device,
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1512,7 +1511,7 @@ bna_device_sm_ioc_ready_wait(struct bna_device *device,
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1542,7 +1541,7 @@ bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1568,7 +1567,7 @@ bna_device_sm_port_stop_wait(struct bna_device *device,
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1589,7 +1588,7 @@ bna_device_sm_ioc_disable_wait(struct bna_device *device,
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1622,7 +1621,7 @@ bna_device_sm_failed(struct bna_device *device,
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
diff --git a/drivers/net/bna/bna_hw.h b/drivers/net/bna/bna_hw.h
index 6cb89692f5c..cad233da843 100644
--- a/drivers/net/bna/bna_hw.h
+++ b/drivers/net/bna/bna_hw.h
@@ -67,7 +67,7 @@ static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
/**
* There are 2 free RIT segment pools:
- * Pool1: 192 segments of 1 RIT entry each
+ * Pool1: 192 segments of 1 RIT entry each
* Pool2: 1 segment of 64 RIT entry
*/
#define BFI_RIT_SEG_POOL1_SIZE 192
@@ -357,14 +357,14 @@ static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
* To clear set the value to 0.
* Range : 0x20 to 0x5c
*/
-#define PSS_SEM_LOCK_REG(_num) \
+#define PSS_SEM_LOCK_REG(_num) \
(PSS_BLK_REG_ADDR + 0x020 + ((_num) << 2))
/**
* PSS Semaphore Status Registers,
* corresponding to the lock registers above
*/
-#define PSS_SEM_STATUS_REG(_num) \
+#define PSS_SEM_STATUS_REG(_num) \
(PSS_BLK_REG_ADDR + 0x060 + ((_num) << 2))
/**
@@ -1044,7 +1044,7 @@ static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
__LPU12HOST_MBOX1_STATUS_BITS))
#define BNA_IS_MBOX_INTR(_intr_status) \
- ((_intr_status) & \
+ ((_intr_status) & \
(__LPU02HOST_MBOX0_STATUS_BITS | \
__LPU02HOST_MBOX1_STATUS_BITS | \
__LPU12HOST_MBOX0_STATUS_BITS | \
@@ -1070,11 +1070,11 @@ static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
__HALT_MASK_BITS)
#define BNA_IS_ERR_INTR(_intr_status) \
- ((_intr_status) & \
- (__EMC_ERROR_STATUS_BITS | \
- __LPU0_ERROR_STATUS_BITS | \
- __LPU1_ERROR_STATUS_BITS | \
- __PSS_ERROR_STATUS_BITS | \
+ ((_intr_status) & \
+ (__EMC_ERROR_STATUS_BITS | \
+ __LPU0_ERROR_STATUS_BITS | \
+ __LPU1_ERROR_STATUS_BITS | \
+ __PSS_ERROR_STATUS_BITS | \
__HALT_STATUS_BITS))
#define BNA_IS_MBOX_ERR_INTR(_intr_status) \
@@ -1087,9 +1087,9 @@ static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
#define BNA_INTR_STATUS_MBOX_CLR(_intr_status) \
do { \
(_intr_status) &= ~(__LPU02HOST_MBOX0_STATUS_BITS | \
- __LPU02HOST_MBOX1_STATUS_BITS | \
- __LPU12HOST_MBOX0_STATUS_BITS | \
- __LPU12HOST_MBOX1_STATUS_BITS); \
+ __LPU02HOST_MBOX1_STATUS_BITS | \
+ __LPU12HOST_MBOX0_STATUS_BITS | \
+ __LPU12HOST_MBOX1_STATUS_BITS); \
} while (0)
#define BNA_INTR_STATUS_ERR_CLR(_intr_status) \
@@ -1107,7 +1107,7 @@ do { \
writel(0xffffffff, (_bna)->regs.fn_int_mask);\
}
-#define bna_intx_enable(bna, new_mask) \
+#define bna_intx_enable(bna, new_mask) \
writel((new_mask), (bna)->regs.fn_int_mask)
#define bna_mbox_intr_disable(bna) \
@@ -1179,18 +1179,18 @@ do {\
#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
/* TxQ Entry Opcodes */
-#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
-#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
+#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
+#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
-#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
-#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
-#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
-#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
-#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
-#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
-#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
+#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
+#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
+#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
+#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
+#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
+#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
+#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
@@ -1199,30 +1199,30 @@ do {\
* Completion Q defines
*/
/* CQ Entry Flags */
-#define BNA_CQ_EF_MAC_ERROR (1 << 0)
-#define BNA_CQ_EF_FCS_ERROR (1 << 1)
-#define BNA_CQ_EF_TOO_LONG (1 << 2)
-#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
+#define BNA_CQ_EF_MAC_ERROR (1 << 0)
+#define BNA_CQ_EF_FCS_ERROR (1 << 1)
+#define BNA_CQ_EF_TOO_LONG (1 << 2)
+#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
-#define BNA_CQ_EF_RSVD1 (1 << 4)
+#define BNA_CQ_EF_RSVD1 (1 << 4)
#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
#define BNA_CQ_EF_HDS_HEADER (1 << 7)
-#define BNA_CQ_EF_UDP (1 << 8)
-#define BNA_CQ_EF_TCP (1 << 9)
+#define BNA_CQ_EF_UDP (1 << 8)
+#define BNA_CQ_EF_TCP (1 << 9)
#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
-#define BNA_CQ_EF_IPV6 (1 << 11)
+#define BNA_CQ_EF_IPV6 (1 << 11)
-#define BNA_CQ_EF_IPV4 (1 << 12)
-#define BNA_CQ_EF_VLAN (1 << 13)
-#define BNA_CQ_EF_RSS (1 << 14)
-#define BNA_CQ_EF_RSVD2 (1 << 15)
+#define BNA_CQ_EF_IPV4 (1 << 12)
+#define BNA_CQ_EF_VLAN (1 << 13)
+#define BNA_CQ_EF_RSS (1 << 14)
+#define BNA_CQ_EF_RSVD2 (1 << 15)
#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
-#define BNA_CQ_EF_MCAST (1 << 17)
-#define BNA_CQ_EF_BCAST (1 << 18)
-#define BNA_CQ_EF_REMOTE (1 << 19)
+#define BNA_CQ_EF_MCAST (1 << 17)
+#define BNA_CQ_EF_BCAST (1 << 18)
+#define BNA_CQ_EF_REMOTE (1 << 19)
#define BNA_CQ_EF_LOCAL (1 << 20)
@@ -1257,10 +1257,10 @@ enum ib_flags {
};
enum rss_hash_type {
- BFI_RSS_T_V4_TCP = (1 << 11),
- BFI_RSS_T_V4_IP = (1 << 10),
- BFI_RSS_T_V6_TCP = (1 << 9),
- BFI_RSS_T_V6_IP = (1 << 8)
+ BFI_RSS_T_V4_TCP = (1 << 11),
+ BFI_RSS_T_V4_IP = (1 << 10),
+ BFI_RSS_T_V6_TCP = (1 << 9),
+ BFI_RSS_T_V6_IP = (1 << 8)
};
enum hds_header_type {
BNA_HDS_T_V4_TCP = (1 << 11),
@@ -1298,7 +1298,7 @@ struct bna_txq_mem {
u32 reserved2;
u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
/* 15:0 ->producer pointer (index?) */
- u32 entry_n_pg_size; /* 31:16->entry size */
+ u32 entry_n_pg_size; /* 31:16->entry size */
/* 15:0 ->page size */
u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
/* 23:16->Int Blk Offset */
@@ -1326,7 +1326,7 @@ struct bna_rxq_mem {
u32 sg_n_cq_n_cns_ptr; /* 31:28->reserved; 27:24->sg count */
/* 23:16->CQ; */
/* 15:0->consumer pointer(index?) */
- u32 buf_sz_n_q_state; /* 31:16->buffer size; 15:0-> Q state */
+ u32 buf_sz_n_q_state; /* 31:16->buffer size; 15:0-> Q state */
u32 next_qid; /* 17:10->next QId */
u32 reserved3;
u32 reserved4[4];
@@ -1426,8 +1426,8 @@ struct bna_dma_addr {
};
struct bna_txq_wi_vector {
- u16 reserved;
- u16 length; /* Only 14 LSB are valid */
+ u16 reserved;
+ u16 length; /* Only 14 LSB are valid */
struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
};
@@ -1465,7 +1465,7 @@ struct bna_txq_entry {
} hdr;
struct bna_txq_wi_vector vector[4];
};
-#define wi_hdr hdr.wi
+#define wi_hdr hdr.wi
#define wi_ext_hdr hdr.wi_ext
/* RxQ Entry Structure */
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
index 380085cc308..f0983c83244 100644
--- a/drivers/net/bna/bna_txrx.c
+++ b/drivers/net/bna/bna_txrx.c
@@ -16,7 +16,7 @@
* www.brocade.com
*/
#include "bna.h"
-#include "bfa_sm.h"
+#include "bfa_cs.h"
#include "bfi.h"
/**
@@ -569,7 +569,7 @@ bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -627,7 +627,7 @@ bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -678,7 +678,7 @@ bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -724,7 +724,7 @@ bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -734,7 +734,7 @@ bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
/**
* Note: Do not add rxf_clear_packet_filter here.
* It will overstep mbox when this transition happens:
- * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
+ * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
*/
}
@@ -761,7 +761,7 @@ bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -771,7 +771,7 @@ bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
/**
* NOTE: Do not add rxf_disable here.
* It will overstep mbox when this transition happens:
- * start_wait -> stop_wait on RXF_E_STOP event
+ * start_wait -> stop_wait on RXF_E_STOP event
*/
}
@@ -815,7 +815,7 @@ bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -851,7 +851,7 @@ bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
* any other event during these states
*/
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -887,7 +887,7 @@ bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
* any other event during these states
*/
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -907,7 +907,7 @@ bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1898,7 +1898,7 @@ static void bna_rx_sm_stopped(struct bna_rx *rx,
/* no-op */
break;
default:
- bfa_sm_fault(rx->bna, event);
+ bfa_sm_fault(event);
break;
}
@@ -1946,7 +1946,7 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
bfa_fsm_set_state(rx, bna_rx_sm_started);
break;
default:
- bfa_sm_fault(rx->bna, event);
+ bfa_sm_fault(event);
break;
}
}
@@ -1981,7 +1981,7 @@ bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
break;
default:
- bfa_sm_fault(rx->bna, event);
+ bfa_sm_fault(event);
break;
}
}
@@ -2011,7 +2011,7 @@ bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
bna_rxf_fail(&rx->rxf);
break;
default:
- bfa_sm_fault(rx->bna, event);
+ bfa_sm_fault(event);
break;
}
@@ -2064,7 +2064,7 @@ bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
break;
default:
- bfa_sm_fault(rx->bna, event);
+ bfa_sm_fault(event);
break;
}
}
@@ -3216,7 +3216,7 @@ bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
break;
default:
- bfa_sm_fault(tx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -3261,7 +3261,7 @@ bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
break;
default:
- bfa_sm_fault(tx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -3294,7 +3294,7 @@ bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
break;
default:
- bfa_sm_fault(tx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -3335,7 +3335,7 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
break;
default:
- bfa_sm_fault(tx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -3355,7 +3355,7 @@ bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
break;
default:
- bfa_sm_fault(tx->bna, event);
+ bfa_sm_fault(event);
}
}
diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
index b9c134f7ad3..2f89cb23524 100644
--- a/drivers/net/bna/bna_types.h
+++ b/drivers/net/bna/bna_types.h
@@ -50,12 +50,12 @@ enum bna_status {
};
enum bna_cleanup_type {
- BNA_HARD_CLEANUP = 0,
- BNA_SOFT_CLEANUP = 1
+ BNA_HARD_CLEANUP = 0,
+ BNA_SOFT_CLEANUP = 1
};
enum bna_cb_status {
- BNA_CB_SUCCESS = 0,
+ BNA_CB_SUCCESS = 0,
BNA_CB_FAIL = 1,
BNA_CB_INTERRUPT = 2,
BNA_CB_BUSY = 3,
@@ -72,8 +72,8 @@ enum bna_res_type {
};
enum bna_mem_type {
- BNA_MEM_T_KVA = 1,
- BNA_MEM_T_DMA = 2
+ BNA_MEM_T_KVA = 1,
+ BNA_MEM_T_DMA = 2
};
enum bna_intr_type {
@@ -82,10 +82,10 @@ enum bna_intr_type {
};
enum bna_res_req_type {
- BNA_RES_MEM_T_COM = 0,
- BNA_RES_MEM_T_ATTR = 1,
- BNA_RES_MEM_T_FWTRC = 2,
- BNA_RES_MEM_T_STATS = 3,
+ BNA_RES_MEM_T_COM = 0,
+ BNA_RES_MEM_T_ATTR = 1,
+ BNA_RES_MEM_T_FWTRC = 2,
+ BNA_RES_MEM_T_STATS = 3,
BNA_RES_MEM_T_SWSTATS = 4,
BNA_RES_MEM_T_IBIDX = 5,
BNA_RES_MEM_T_IB_ARRAY = 6,
@@ -107,9 +107,9 @@ enum bna_res_req_type {
enum bna_tx_res_req_type {
BNA_TX_RES_MEM_T_TCB = 0,
BNA_TX_RES_MEM_T_UNMAPQ = 1,
- BNA_TX_RES_MEM_T_QPT = 2,
+ BNA_TX_RES_MEM_T_QPT = 2,
BNA_TX_RES_MEM_T_SWQPT = 3,
- BNA_TX_RES_MEM_T_PAGE = 4,
+ BNA_TX_RES_MEM_T_PAGE = 4,
BNA_TX_RES_INTR_T_TXCMPL = 5,
BNA_TX_RES_T_MAX,
};
@@ -158,14 +158,14 @@ enum bna_rx_type {
};
enum bna_rxp_type {
- BNA_RXP_SINGLE = 1,
- BNA_RXP_SLR = 2,
- BNA_RXP_HDS = 3
+ BNA_RXP_SINGLE = 1,
+ BNA_RXP_SLR = 2,
+ BNA_RXP_HDS = 3
};
enum bna_rxmode {
- BNA_RXMODE_PROMISC = 1,
- BNA_RXMODE_ALLMULTI = 2
+ BNA_RXMODE_PROMISC = 1,
+ BNA_RXMODE_ALLMULTI = 2
};
enum bna_rx_event {
@@ -202,7 +202,7 @@ enum bna_rxf_oper_state {
};
enum bna_rxf_flags {
- BNA_RXF_FL_STOP_PENDING = 0x01,
+ BNA_RXF_FL_STOP_PENDING = 0x01,
BNA_RXF_FL_FAILED = 0x02,
BNA_RXF_FL_RSS_CONFIG_PENDING = 0x04,
BNA_RXF_FL_OPERSTATE_CHANGED = 0x08,
@@ -244,11 +244,11 @@ enum bna_port_type {
enum bna_link_status {
BNA_LINK_DOWN = 0,
BNA_LINK_UP = 1,
- BNA_CEE_UP = 2
+ BNA_CEE_UP = 2
};
enum bna_llport_flags {
- BNA_LLPORT_F_ADMIN_UP = 1,
+ BNA_LLPORT_F_ADMIN_UP = 1,
BNA_LLPORT_F_PORT_ENABLED = 2,
BNA_LLPORT_F_RX_STARTED = 4
};
@@ -304,7 +304,7 @@ struct bna_mem_descr {
struct bna_mem_info {
enum bna_mem_type mem_type;
u32 len;
- u32 num;
+ u32 num;
u32 align_sz; /* 0/1 = no alignment */
struct bna_mem_descr *mdl;
void *cookie; /* For bnad to unmap dma later */
@@ -371,10 +371,10 @@ struct bna_mbox_qe {
struct list_head qe;
struct bfa_mbox_cmd cmd;
- u32 cmd_len;
+ u32 cmd_len;
/* Callback for port, tx, rx, rxf */
void (*cbfn)(void *arg, int status);
- void *cbarg;
+ void *cbarg;
};
struct bna_mbox_mod {
@@ -480,7 +480,7 @@ struct bna_ib_dbell {
/* Interrupt timer configuration */
struct bna_ib_config {
- u8 coalescing_timeo; /* Unit is 5usec. */
+ u8 coalescing_timeo; /* Unit is 5usec. */
int interpkt_count;
int interpkt_timeo;
@@ -576,8 +576,8 @@ struct bna_txq {
struct bna_tx *tx;
- u64 tx_packets;
- u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
};
/* TxF structure (hardware Tx Function) */
@@ -739,10 +739,10 @@ struct bna_rxq {
struct bna_rxp *rxp;
struct bna_rx *rx;
- u64 rx_packets;
+ u64 rx_packets;
u64 rx_bytes;
- u64 rx_packets_with_error;
- u64 rxbuf_alloc_failed;
+ u64 rx_packets_with_error;
+ u64 rxbuf_alloc_failed;
};
/* RxQ pair */
@@ -902,7 +902,7 @@ struct bna_rxf {
* callback for:
* bna_rxf_ucast_set()
* bna_rxf_{ucast/mcast}_add(),
- * bna_rxf_{ucast/mcast}_del(),
+ * bna_rxf_{ucast/mcast}_del(),
* bna_rxf_mode_set()
*/
void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx,
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 7d25a97d33f..8e35b2596f9 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -15,6 +15,7 @@
* All rights reserved
* www.brocade.com
*/
+#include <linux/bitops.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
@@ -58,7 +59,7 @@ static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
#define BNAD_GET_MBOX_IRQ(_bnad) \
(((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
- ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
+ ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
((_bnad)->pcidev->irq))
#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
@@ -110,10 +111,10 @@ static void
bnad_free_all_txbufs(struct bnad *bnad,
struct bna_tcb *tcb)
{
- u32 unmap_cons;
+ u32 unmap_cons;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb = NULL;
int i;
unmap_array = unmap_q->unmap_array;
@@ -163,11 +164,11 @@ static u32
bnad_free_txbufs(struct bnad *bnad,
struct bna_tcb *tcb)
{
- u32 sent_packets = 0, sent_bytes = 0;
- u16 wis, unmap_cons, updated_hw_cons;
+ u32 sent_packets = 0, sent_bytes = 0;
+ u16 wis, unmap_cons, updated_hw_cons;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
- struct sk_buff *skb;
+ struct sk_buff *skb;
int i;
/*
@@ -245,7 +246,7 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
{
struct bnad *bnad = (struct bnad *)bnad_ptr;
struct bna_tcb *tcb;
- u32 acked = 0;
+ u32 acked = 0;
int i, j;
for (i = 0; i < bnad->num_tx; i++) {
@@ -386,14 +387,12 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
wi_range);
}
- skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
- GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(bnad->netdev,
+ rcb->rxq->buffer_size);
if (unlikely(!skb)) {
BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
goto finishing;
}
- skb->dev = bnad->netdev;
- skb_reserve(skb, NET_IP_ALIGN);
unmap_array[unmap_prod].skb = skb;
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
rcb->rxq->buffer_size,
@@ -516,24 +515,16 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb->rxq->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, bnad->netdev);
- if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
- struct bnad_rx_ctrl *rx_ctrl =
- (struct bnad_rx_ctrl *)ccb->ctrl;
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
- ntohs(cmpl->vlan_tag), skb);
- else
- vlan_hwaccel_receive_skb(skb,
- bnad->vlan_grp,
- ntohs(cmpl->vlan_tag));
-
- } else { /* Not VLAN tagged/stripped */
- struct bnad_rx_ctrl *rx_ctrl =
- (struct bnad_rx_ctrl *)ccb->ctrl;
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(&rx_ctrl->napi, skb);
- else
- netif_receive_skb(skb);
+ if (flags & BNA_CQ_EF_VLAN)
+ __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
+
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ struct bnad_rx_ctrl *rx_ctrl;
+
+ rx_ctrl = (struct bnad_rx_ctrl *) ccb->ctrl;
+ napi_gro_receive(&rx_ctrl->napi, skb);
+ } else {
+ netif_receive_skb(skb);
}
next:
@@ -1110,10 +1101,10 @@ static int
bnad_mbox_irq_alloc(struct bnad *bnad,
struct bna_intr_info *intr_info)
{
- int err = 0;
- unsigned long flags;
+ int err = 0;
+ unsigned long irq_flags, flags;
u32 irq;
- irq_handler_t irq_handler;
+ irq_handler_t irq_handler;
/* Mbox should use only 1 vector */
@@ -1124,19 +1115,18 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
spin_lock_irqsave(&bnad->bna_lock, flags);
if (bnad->cfg_flags & BNAD_CF_MSIX) {
irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
- irq = bnad->msix_table[bnad->msix_num - 1].vector;
- flags = 0;
+ irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
+ irq_flags = 0;
intr_info->intr_type = BNA_INTR_T_MSIX;
- intr_info->idl[0].vector = bnad->msix_num - 1;
+ intr_info->idl[0].vector = BNAD_MAILBOX_MSIX_INDEX;
} else {
irq_handler = (irq_handler_t)bnad_isr;
irq = bnad->pcidev->irq;
- flags = IRQF_SHARED;
+ irq_flags = IRQF_SHARED;
intr_info->intr_type = BNA_INTR_T_INTX;
- /* intr_info->idl.vector = 0 ? */
}
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
/*
@@ -1147,7 +1137,7 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
- err = request_irq(irq, irq_handler, flags,
+ err = request_irq(irq, irq_handler, irq_flags,
bnad->mbox_irq_name, bnad);
if (err) {
@@ -1188,11 +1178,12 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
switch (src) {
case BNAD_INTR_TX:
- vector_start = txrx_id;
+ vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
break;
case BNAD_INTR_RX:
- vector_start = bnad->num_tx * bnad->num_txq_per_tx +
+ vector_start = BNAD_MAILBOX_MSIX_VECTORS +
+ (bnad->num_tx * bnad->num_txq_per_tx) +
txrx_id;
break;
@@ -1213,11 +1204,11 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
switch (src) {
case BNAD_INTR_TX:
- intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
+ intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
break;
case BNAD_INTR_RX:
- intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
+ intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
break;
}
}
@@ -1456,7 +1447,7 @@ bnad_iocpf_sem_timeout(unsigned long data)
/*
* All timer routines use bnad->bna_lock to protect against
* the following race, which may occur in case of no locking:
- * Time CPU m CPU n
+ * Time CPU m CPU n
* 0 1 = test_bit
* 1 clear_bit
* 2 del_timer_sync
@@ -1921,7 +1912,7 @@ void
bnad_rx_coalescing_timeo_set(struct bnad *bnad)
{
struct bnad_rx_info *rx_info;
- int i;
+ int i;
for (i = 0; i < bnad->num_rx; i++) {
rx_info = &bnad->rx_info[i];
@@ -1984,19 +1975,14 @@ bnad_enable_default_bcast(struct bnad *bnad)
static void
bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
{
- u16 vlan_id;
+ u16 vid;
unsigned long flags;
- if (!bnad->vlan_grp)
- return;
-
BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
- for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) {
- if (!vlan_group_get_device(bnad->vlan_grp, vlan_id))
- continue;
+ for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
spin_lock_irqsave(&bnad->bna_lock, flags);
- bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id);
+ bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
}
@@ -2089,7 +2075,7 @@ bnad_mbox_irq_sync(struct bnad *bnad)
spin_lock_irqsave(&bnad->bna_lock, flags);
if (bnad->cfg_flags & BNAD_CF_MSIX)
- irq = bnad->msix_table[bnad->msix_num - 1].vector;
+ irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
else
irq = bnad->pcidev->irq;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -2440,18 +2426,18 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
- u16 txq_prod, vlan_tag = 0;
- u32 unmap_prod, wis, wis_used, wi_range;
- u32 vectors, vect_id, i, acked;
+ u16 txq_prod, vlan_tag = 0;
+ u32 unmap_prod, wis, wis_used, wi_range;
+ u32 vectors, vect_id, i, acked;
u32 tx_id;
- int err;
+ int err;
struct bnad_tx_info *tx_info;
struct bna_tcb *tcb;
struct bnad_unmap_q *unmap_q;
- dma_addr_t dma_addr;
+ dma_addr_t dma_addr;
struct bna_txq_entry *txqent;
- bna_txq_wi_ctrl_flag_t flags;
+ bna_txq_wi_ctrl_flag_t flags;
if (unlikely
(skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
@@ -2799,17 +2785,6 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
}
static void
-bnad_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *vlan_grp)
-{
- struct bnad *bnad = netdev_priv(netdev);
-
- mutex_lock(&bnad->conf_mutex);
- bnad->vlan_grp = vlan_grp;
- mutex_unlock(&bnad->conf_mutex);
-}
-
-static void
bnad_vlan_rx_add_vid(struct net_device *netdev,
unsigned short vid)
{
@@ -2823,6 +2798,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
+ set_bit(vid, bnad->active_vlans);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
mutex_unlock(&bnad->conf_mutex);
@@ -2841,6 +2817,7 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
mutex_lock(&bnad->conf_mutex);
spin_lock_irqsave(&bnad->bna_lock, flags);
+ clear_bit(vid, bnad->active_vlans);
bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -2890,7 +2867,6 @@ static const struct net_device_ops bnad_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = bnad_set_mac_address,
.ndo_change_mtu = bnad_change_mtu,
- .ndo_vlan_rx_register = bnad_vlan_rx_register,
.ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3057,8 +3033,8 @@ static int __devinit
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
- bool using_dac = false;
- int err;
+ bool using_dac = false;
+ int err;
struct bnad *bnad;
struct bna *bna;
struct net_device *netdev;
@@ -3090,7 +3066,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/*
* PCI initialization
- * Output : using_dac = 1 for 64 bit DMA
+ * Output : using_dac = 1 for 64 bit DMA
* = 0 for 32 bit DMA
*/
err = bnad_pci_init(bnad, pdev, &using_dac);
@@ -3233,7 +3209,7 @@ bnad_pci_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
-static const struct pci_device_id bnad_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
{
PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
PCI_DEVICE_ID_BROCADE_CT),
@@ -3256,7 +3232,8 @@ bnad_module_init(void)
{
int err;
- pr_info("Brocade 10G Ethernet driver\n");
+ pr_info("Brocade 10G Ethernet driver - version: %s\n",
+ BNAD_VERSION);
bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index ccdabad0a40..458eb30371b 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -24,6 +24,7 @@
#include <linux/etherdevice.h>
#include <linux/mutex.h>
#include <linux/firmware.h>
+#include <linux/if_vlan.h>
/* Fix for IA64 */
#include <asm/checksum.h>
@@ -67,10 +68,13 @@ struct bnad_rx_ctrl {
#define BNAD_VERSION "2.3.2.3"
+#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
+#define BNAD_INTX_TX_IB_BITMASK 0x1
+#define BNAD_INTX_RX_IB_BITMASK 0x2
-#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
-#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
+#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
+#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
#define BNAD_MAX_Q_DEPTH 0x10000
#define BNAD_MIN_Q_DEPTH 0x200
@@ -101,12 +105,12 @@ enum bnad_intr_source {
enum bnad_link_state {
BNAD_LS_DOWN = 0,
- BNAD_LS_UP = 1
+ BNAD_LS_UP = 1
};
struct bnad_completion {
- struct completion ioc_comp;
- struct completion ucast_comp;
+ struct completion ioc_comp;
+ struct completion ucast_comp;
struct completion mcast_comp;
struct completion tx_comp;
struct completion rx_comp;
@@ -124,7 +128,7 @@ struct bnad_completion {
/* Tx Rx Control Stats */
struct bnad_drv_stats {
- u64 netif_queue_stop;
+ u64 netif_queue_stop;
u64 netif_queue_wakeup;
u64 netif_queue_stopped;
u64 tso4;
@@ -187,7 +191,7 @@ struct bnad_skb_unmap {
struct bnad_unmap_q {
u32 producer_index;
u32 consumer_index;
- u32 q_depth;
+ u32 q_depth;
/* This should be the last one */
struct bnad_skb_unmap unmap_array[1];
};
@@ -210,13 +214,13 @@ struct bnad_unmap_q {
#define BNAD_RF_RX_SHUTDOWN_DELAYED 7
struct bnad {
- struct net_device *netdev;
+ struct net_device *netdev;
/* Data path */
struct bnad_tx_info tx_info[BNAD_MAX_TXS];
struct bnad_rx_info rx_info[BNAD_MAX_RXS];
- struct vlan_group *vlan_grp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/*
* These q numbers are global only because
* they are used to calculate MSIx vectors.
@@ -244,7 +248,7 @@ struct bnad {
u32 cfg_flags;
unsigned long run_flags;
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev;
u64 mmio_start;
u64 mmio_len;
@@ -277,7 +281,7 @@ struct bnad {
struct bnad_diag *diag;
char adapter_name[BNAD_NAME_LEN];
- char port_name[BNAD_NAME_LEN];
+ char port_name[BNAD_NAME_LEN];
char mbox_irq_name[BNAD_NAME_LEN];
};
@@ -285,7 +289,7 @@ struct bnad {
* EXTERN VARIABLES
*/
extern struct firmware *bfi_fw;
-extern u32 bnad_rxqs_per_cq;
+extern u32 bnad_rxqs_per_cq;
/*
* EXTERN PROTOTYPES
@@ -331,7 +335,7 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
}
#define bnad_dim_timer_running(_bnad) \
- (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
+ (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
(test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
#endif /* __BNAD_H__ */
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 3330cd78da2..fea07f19a5d 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -295,7 +295,7 @@ get_regs(struct bnad *bnad, u32 * regs)
u32 reg_addr;
unsigned long flags;
-#define BNAD_GET_REG(addr) \
+#define BNAD_GET_REG(addr) \
do { \
if (regs) \
regs[num++] = readl(bnad->bar0 + (addr)); \
diff --git a/drivers/net/bna/cna.h b/drivers/net/bna/cna.h
index bbd39dc6597..a679e038747 100644
--- a/drivers/net/bna/cna.h
+++ b/drivers/net/bna/cna.h
@@ -19,7 +19,6 @@
#ifndef __CNA_H__
#define __CNA_H__
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
@@ -34,7 +33,7 @@
#include <linux/list.h>
-#define bfa_sm_fault(__mod, __event) do { \
+#define bfa_sm_fault(__event) do { \
pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
__event); \
} while (0)
@@ -74,7 +73,7 @@ typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
bfa_q_qe_init(*((struct list_head **) _qe)); \
} else { \
- *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
+ *((struct list_head **)(_qe)) = NULL; \
} \
}
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 57d3293c65b..4b2b57018a0 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.1.6"
-#define DRV_MODULE_RELDATE "Mar 7, 2011"
+#define DRV_MODULE_VERSION "2.1.11"
+#define DRV_MODULE_RELDATE "July 20, 2011"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
@@ -385,6 +385,9 @@ static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
if (cp->drv_state & CNIC_DRV_STATE_REGD)
return -EBUSY;
+ if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
+ return -ENODEV;
+
bp->cnic_data = data;
rcu_assign_pointer(bp->cnic_ops, ops);
@@ -416,6 +419,9 @@ struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
struct bnx2 *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ if (!cp->max_iscsi_conn)
+ return NULL;
+
cp->drv_owner = THIS_MODULE;
cp->chip_id = bp->chip_id;
cp->pdev = bp->pdev;
@@ -2440,6 +2446,48 @@ bnx2_set_phy_loopback(struct bnx2 *bp)
return 0;
}
+static void
+bnx2_dump_mcp_state(struct bnx2 *bp)
+{
+ struct net_device *dev = bp->dev;
+ u32 mcp_p0, mcp_p1;
+
+ netdev_err(dev, "<--- start MCP states dump --->\n");
+ if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+ mcp_p0 = BNX2_MCP_STATE_P0;
+ mcp_p1 = BNX2_MCP_STATE_P1;
+ } else {
+ mcp_p0 = BNX2_MCP_STATE_P0_5708;
+ mcp_p1 = BNX2_MCP_STATE_P1_5708;
+ }
+ netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
+ bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
+ netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
+ netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
+ bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
+ netdev_err(dev, "DEBUG: shmem states:\n");
+ netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
+ bnx2_shmem_rd(bp, BNX2_DRV_MB),
+ bnx2_shmem_rd(bp, BNX2_FW_MB),
+ bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
+ pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
+ netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
+ bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
+ bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
+ pr_cont(" condition[%08x]\n",
+ bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
+ DP_SHMEM_LINE(bp, 0x3cc);
+ DP_SHMEM_LINE(bp, 0x3dc);
+ DP_SHMEM_LINE(bp, 0x3ec);
+ netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
+ netdev_err(dev, "<--- end MCP states dump --->\n");
+}
+
static int
bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
{
@@ -2468,13 +2516,14 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
/* If we timed out, inform the firmware that this is the case. */
if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
- if (!silent)
- pr_err("fw sync timeout, reset code = %x\n", msg_data);
-
msg_data &= ~BNX2_DRV_MSG_CODE;
msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+ if (!silent) {
+ pr_err("fw sync timeout, reset code = %x\n", msg_data);
+ bnx2_dump_mcp_state(bp);
+ }
return -EBUSY;
}
@@ -6293,6 +6342,7 @@ static void
bnx2_reset_task(struct work_struct *work)
{
struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
+ int rc;
rtnl_lock();
if (!netif_running(bp->dev)) {
@@ -6302,7 +6352,14 @@ bnx2_reset_task(struct work_struct *work)
bnx2_netif_stop(bp, true);
- bnx2_init_nic(bp, 1);
+ rc = bnx2_init_nic(bp, 1);
+ if (rc) {
+ netdev_err(bp->dev, "failed to reset NIC, closing\n");
+ bnx2_napi_enable(bp);
+ dev_close(bp->dev);
+ rtnl_unlock();
+ return;
+ }
atomic_set(&bp->intr_sem, 1);
bnx2_netif_start(bp, true);
@@ -6313,7 +6370,7 @@ static void
bnx2_dump_state(struct bnx2 *bp)
{
struct net_device *dev = bp->dev;
- u32 mcp_p0, mcp_p1, val1, val2;
+ u32 val1, val2;
pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
@@ -6326,15 +6383,6 @@ bnx2_dump_state(struct bnx2 *bp)
REG_RD(bp, BNX2_EMAC_RX_STATUS));
netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
- mcp_p0 = BNX2_MCP_STATE_P0;
- mcp_p1 = BNX2_MCP_STATE_P1;
- } else {
- mcp_p0 = BNX2_MCP_STATE_P0_5708;
- mcp_p1 = BNX2_MCP_STATE_P1_5708;
- }
- netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
- bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
if (bp->flags & BNX2_FLAG_USING_MSIX)
@@ -6348,6 +6396,7 @@ bnx2_tx_timeout(struct net_device *dev)
struct bnx2 *bp = netdev_priv(dev);
bnx2_dump_state(bp);
+ bnx2_dump_mcp_state(bp);
/* This allows the netif to be shutdown gracefully before resetting */
schedule_work(&bp->reset_task);
@@ -6532,8 +6581,6 @@ bnx2_close(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
- cancel_work_sync(&bp->reset_task);
-
bnx2_disable_int_sync(bp);
bnx2_napi_disable(bp);
del_timer_sync(&bp->timer);
@@ -7908,9 +7955,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
if (CHIP_NUM(bp) == CHIP_NUM_5709) {
- if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
- dev_err(&pdev->dev,
- "Cannot find PCIE capability, aborting\n");
+ if (!pci_is_pcie(pdev)) {
+ dev_err(&pdev->dev, "Not PCIE, aborting\n");
rc = -EIO;
goto err_out_unmap;
}
@@ -8051,7 +8097,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->fw_version[j++] = ' ';
for (i = 0; i < 3 && j < 28; i++) {
reg = bnx2_reg_rd_ind(bp, addr + i * 4);
- reg = swab32(reg);
+ reg = be32_to_cpu(reg);
memcpy(&bp->fw_version[j], &reg, 4);
j += 4;
}
@@ -8177,6 +8223,12 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->timer.data = (unsigned long) bp;
bp->timer.function = bnx2_timer;
+#ifdef BCM_CNIC
+ if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
+ bp->cnic_eth_dev.max_iscsi_conn =
+ (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
+ BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
+#endif
pci_save_state(pdev);
return 0;
@@ -8358,6 +8410,7 @@ bnx2_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
del_timer_sync(&bp->timer);
+ cancel_work_sync(&bp->reset_task);
if (bp->mips_firmware)
release_firmware(bp->mips_firmware);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index bf371f6fe15..fc50d4267df 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -7368,6 +7368,21 @@ struct bnx2_rv2p_fw_file {
#define BNX2_RPHY_SERDES_LINK 0x374
#define BNX2_RPHY_COPPER_LINK 0x378
+#define BNX2_ISCSI_INITIATOR 0x3dc
+#define BNX2_ISCSI_INITIATOR_EN 0x00080000
+
+#define BNX2_ISCSI_MAX_CONN 0x3e4
+#define BNX2_ISCSI_MAX_CONN_MASK 0xffff0000
+#define BNX2_ISCSI_MAX_CONN_SHIFT 16
+
#define HOST_VIEW_SHMEM_BASE 0x167c00
+#define DP_SHMEM_LINE(bp, offset) \
+ netdev_err(bp->dev, "DEBUG: %08x: %08x %08x %08x %08x\n", \
+ offset, \
+ bnx2_shmem_rd(bp, offset), \
+ bnx2_shmem_rd(bp, offset + 4), \
+ bnx2_shmem_rd(bp, offset + 8), \
+ bnx2_shmem_rd(bp, offset + 12))
+
#endif
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index bb83a296127..48fbdd48f88 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_BNX2X) += bnx2x.o
-bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o
+bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 668a578c49e..c423504a755 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -14,6 +14,7 @@
#ifndef BNX2X_H
#define BNX2X_H
#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
#include <linux/types.h>
/* compilation time flags */
@@ -22,14 +23,10 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.62.12-0"
-#define DRV_MODULE_RELDATE "2011/03/20"
+#define DRV_MODULE_VERSION "1.70.00-0"
+#define DRV_MODULE_RELDATE "2011/06/13"
#define BNX2X_BC_VER 0x040200
-#define BNX2X_MULTI_QUEUE
-
-#define BNX2X_NEW_NAPI
-
#if defined(CONFIG_DCB)
#define BCM_DCBNL
#endif
@@ -47,11 +44,12 @@
#endif
#include <linux/mdio.h>
-#include <linux/pci.h>
+
#include "bnx2x_reg.h"
#include "bnx2x_fw_defs.h"
#include "bnx2x_hsi.h"
#include "bnx2x_link.h"
+#include "bnx2x_sp.h"
#include "bnx2x_dcb.h"
#include "bnx2x_stats.h"
@@ -80,6 +78,12 @@ do { \
##__args); \
} while (0)
+#define DP_CONT(__mask, __fmt, __args...) \
+do { \
+ if (bp->msg_enable & (__mask)) \
+ pr_cont(__fmt, ##__args); \
+} while (0)
+
/* errors debug print */
#define BNX2X_DBG_ERR(__fmt, __args...) \
do { \
@@ -111,9 +115,12 @@ do { \
dev_info(&bp->pdev->dev, __fmt, ##__args); \
} while (0)
-void bnx2x_panic_dump(struct bnx2x *bp);
+#define BNX2X_MAC_FMT "%pM"
+#define BNX2X_MAC_PRN_LIST(mac) (mac)
+
#ifdef BNX2X_STOP_ON_ERROR
+void bnx2x_int_disable(struct bnx2x *bp);
#define bnx2x_panic() do { \
bp->panic = 1; \
BNX2X_ERR("driver assert\n"); \
@@ -233,22 +240,22 @@ void bnx2x_panic_dump(struct bnx2x *bp);
*
*/
/* iSCSI L2 */
-#define BNX2X_ISCSI_ETH_CL_ID 17
-#define BNX2X_ISCSI_ETH_CID 17
+#define BNX2X_ISCSI_ETH_CL_ID_IDX 1
+#define BNX2X_ISCSI_ETH_CID 49
/* FCoE L2 */
-#define BNX2X_FCOE_ETH_CL_ID 18
-#define BNX2X_FCOE_ETH_CID 18
+#define BNX2X_FCOE_ETH_CL_ID_IDX 2
+#define BNX2X_FCOE_ETH_CID 50
/** Additional rings budgeting */
#ifdef BCM_CNIC
-#define CNIC_CONTEXT_USE 1
-#define FCOE_CONTEXT_USE 1
+#define CNIC_PRESENT 1
+#define FCOE_PRESENT 1
#else
-#define CNIC_CONTEXT_USE 0
-#define FCOE_CONTEXT_USE 0
+#define CNIC_PRESENT 0
+#define FCOE_PRESENT 0
#endif /* BCM_CNIC */
-#define NONE_ETH_CONTEXT_USE (FCOE_CONTEXT_USE)
+#define NON_ETH_CONTEXT_USE (FCOE_PRESENT)
#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -256,8 +263,35 @@ void bnx2x_panic_dump(struct bnx2x *bp);
#define SM_RX_ID 0
#define SM_TX_ID 1
-/* fast path */
+/* defines for multiple tx priority indices */
+#define FIRST_TX_ONLY_COS_INDEX 1
+#define FIRST_TX_COS_INDEX 0
+
+/* defines for decodeing the fastpath index and the cos index out of the
+ * transmission queue index
+ */
+#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
+
+#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
+#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
+
+/* rules for calculating the cids of tx-only connections */
+#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS)
+#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS)
+
+/* fp index inside class of service range */
+#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS)
+
+/*
+ * 0..15 eth cos0
+ * 16..31 eth cos1 if applicable
+ * 32..47 eth cos2 If applicable
+ * fcoe queue follows eth queues (16, 32, 48 depending on cos)
+ */
+#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos)
+#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
+/* fast path */
struct sw_rx_bd {
struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(mapping);
@@ -283,44 +317,73 @@ union db_prod {
/* MC hsi */
-#define BCM_PAGE_SHIFT 12
-#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
-#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
+#define BCM_PAGE_SHIFT 12
+#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT)
+#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1))
#define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
-#define PAGES_PER_SGE_SHIFT 0
-#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
-#define SGE_PAGE_SIZE PAGE_SIZE
-#define SGE_PAGE_SHIFT PAGE_SHIFT
-#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define PAGES_PER_SGE_SHIFT 0
+#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
+#define SGE_PAGE_SIZE PAGE_SIZE
+#define SGE_PAGE_SHIFT PAGE_SHIFT
+#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
/* SGE ring related macros */
-#define NUM_RX_SGE_PAGES 2
+#define NUM_RX_SGE_PAGES 2
#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
-#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
+#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
/* RX_SGE_CNT is promised to be a power of 2 */
-#define RX_SGE_MASK (RX_SGE_CNT - 1)
-#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
-#define MAX_RX_SGE (NUM_RX_SGE - 1)
+#define RX_SGE_MASK (RX_SGE_CNT - 1)
+#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
+#define MAX_RX_SGE (NUM_RX_SGE - 1)
#define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \
(MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
-#define RX_SGE(x) ((x) & MAX_RX_SGE)
+#define RX_SGE(x) ((x) & MAX_RX_SGE)
+
+/* Manipulate a bit vector defined as an array of u64 */
-/* SGE producer mask related macros */
/* Number of bits in one sge_mask array element */
-#define RX_SGE_MASK_ELEM_SZ 64
-#define RX_SGE_MASK_ELEM_SHIFT 6
-#define RX_SGE_MASK_ELEM_MASK ((u64)RX_SGE_MASK_ELEM_SZ - 1)
+#define BIT_VEC64_ELEM_SZ 64
+#define BIT_VEC64_ELEM_SHIFT 6
+#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1)
+
+
+#define __BIT_VEC64_SET_BIT(el, bit) \
+ do { \
+ el = ((el) | ((u64)0x1 << (bit))); \
+ } while (0)
+
+#define __BIT_VEC64_CLEAR_BIT(el, bit) \
+ do { \
+ el = ((el) & (~((u64)0x1 << (bit)))); \
+ } while (0)
+
+
+#define BIT_VEC64_SET_BIT(vec64, idx) \
+ __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
+ __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+ (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_TEST_BIT(vec64, idx) \
+ (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
+ ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
/* Creates a bitmask of all ones in less significant bits.
idx - index of the most significant bit in the created mask */
-#define RX_SGE_ONES_MASK(idx) \
- (((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1)
-#define RX_SGE_MASK_ELEM_ONE_MASK ((u64)(~0))
+#define BIT_VEC64_ONES_MASK(idx) \
+ (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
+#define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0))
+
+/*******************************************************/
+
+
/* Number of u64 elements in SGE mask array */
#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
- RX_SGE_MASK_ELEM_SZ)
+ BIT_VEC64_ELEM_SZ)
#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
@@ -331,7 +394,53 @@ union host_hc_status_block {
struct host_hc_status_block_e2 *e2_sb;
};
+struct bnx2x_agg_info {
+ /*
+ * First aggregation buffer is an skb, the following - are pages.
+ * We will preallocate the skbs for each aggregation when
+ * we open the interface and will replace the BD at the consumer
+ * with this one when we receive the TPA_START CQE in order to
+ * keep the Rx BD ring consistent.
+ */
+ struct sw_rx_bd first_buf;
+ u8 tpa_state;
+#define BNX2X_TPA_START 1
+#define BNX2X_TPA_STOP 2
+#define BNX2X_TPA_ERROR 3
+ u8 placement_offset;
+ u16 parsing_flags;
+ u16 vlan_tag;
+ u16 len_on_bd;
+};
+
+#define Q_STATS_OFFSET32(stat_name) \
+ (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
+
+struct bnx2x_fp_txdata {
+
+ struct sw_tx_bd *tx_buf_ring;
+
+ union eth_tx_bd_types *tx_desc_ring;
+ dma_addr_t tx_desc_mapping;
+
+ u32 cid;
+
+ union db_prod tx_db;
+
+ u16 tx_pkt_prod;
+ u16 tx_pkt_cons;
+ u16 tx_bd_prod;
+ u16 tx_bd_cons;
+
+ unsigned long tx_pkt;
+
+ __le16 *tx_cons_sb;
+
+ int txq_index;
+};
+
struct bnx2x_fastpath {
+ struct bnx2x *bp; /* parent */
#define BNX2X_NAPI_WEIGHT 128
struct napi_struct napi;
@@ -346,10 +455,8 @@ struct bnx2x_fastpath {
dma_addr_t status_blk_mapping;
- struct sw_tx_bd *tx_buf_ring;
-
- union eth_tx_bd_types *tx_desc_ring;
- dma_addr_t tx_desc_mapping;
+ u8 max_cos; /* actual number of active tx coses */
+ struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
@@ -366,32 +473,15 @@ struct bnx2x_fastpath {
u64 sge_mask[RX_SGE_MASK_LEN];
- int state;
-#define BNX2X_FP_STATE_CLOSED 0
-#define BNX2X_FP_STATE_IRQ 0x80000
-#define BNX2X_FP_STATE_OPENING 0x90000
-#define BNX2X_FP_STATE_OPEN 0xa0000
-#define BNX2X_FP_STATE_HALTING 0xb0000
-#define BNX2X_FP_STATE_HALTED 0xc0000
-#define BNX2X_FP_STATE_TERMINATING 0xd0000
-#define BNX2X_FP_STATE_TERMINATED 0xe0000
+ u32 cid;
+
+ __le16 fp_hc_idx;
u8 index; /* number in fp array */
u8 cl_id; /* eth client id */
u8 cl_qzone_id;
u8 fw_sb_id; /* status block number in FW */
u8 igu_sb_id; /* status block number in HW */
- u32 cid;
-
- union db_prod tx_db;
-
- u16 tx_pkt_prod;
- u16 tx_pkt_cons;
- u16 tx_bd_prod;
- u16 tx_bd_cons;
- __le16 *tx_cons_sb;
-
- __le16 fp_hc_idx;
u16 rx_bd_prod;
u16 rx_bd_cons;
@@ -401,24 +491,19 @@ struct bnx2x_fastpath {
/* The last maximal completed SGE */
u16 last_max_sge;
__le16 *rx_cons_sb;
-
- unsigned long tx_pkt,
- rx_pkt,
+ unsigned long rx_pkt,
rx_calls;
/* TPA related */
- struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
- u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
-#define BNX2X_TPA_START 1
-#define BNX2X_TPA_STOP 2
+ struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
u8 disable_tpa;
#ifdef BNX2X_STOP_ON_ERROR
u64 tpa_queue_used;
#endif
- struct tstorm_per_client_stats old_tclient;
- struct ustorm_per_client_stats old_uclient;
- struct xstorm_per_client_stats old_xclient;
+ struct tstorm_per_queue_stats old_tclient;
+ struct ustorm_per_queue_stats old_uclient;
+ struct xstorm_per_queue_stats old_xclient;
struct bnx2x_eth_q_stats eth_q_stats;
/* The size is calculated using the following:
@@ -427,7 +512,13 @@ struct bnx2x_fastpath {
4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE];
- struct bnx2x *bp; /* parent */
+
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* Queue State object */
+ struct bnx2x_queue_sp_obj q_obj;
+
};
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
@@ -435,11 +526,17 @@ struct bnx2x_fastpath {
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
-#ifdef BCM_CNIC
-/* FCoE L2 `fastpath' is right after the eth entries */
+/* FCoE L2 `fastpath' entry is right after the eth entries */
#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
+#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
+ txdata[FIRST_TX_COS_INDEX].var)
+
+
+#define IS_ETH_FP(fp) (fp->index < \
+ BNX2X_NUM_ETH_QUEUES(fp->bp))
+#ifdef BCM_CNIC
#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
#else
@@ -449,77 +546,68 @@ struct bnx2x_fastpath {
/* MC hsi */
-#define MAX_FETCH_BD 13 /* HW max BDs per packet */
-#define RX_COPY_THRESH 92
+#define MAX_FETCH_BD 13 /* HW max BDs per packet */
+#define RX_COPY_THRESH 92
-#define NUM_TX_RINGS 16
+#define NUM_TX_RINGS 16
#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
-#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
-#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
-#define MAX_TX_BD (NUM_TX_BD - 1)
-#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
-#define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL
-#define INIT_TX_RING_SIZE MAX_TX_AVAIL
+#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
+#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
+#define MAX_TX_BD (NUM_TX_BD - 1)
+#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
(MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
-#define TX_BD(x) ((x) & MAX_TX_BD)
-#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
+#define TX_BD(x) ((x) & MAX_TX_BD)
+#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
-#define NUM_RX_RINGS 8
+#define NUM_RX_RINGS 8
#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
-#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
-#define RX_DESC_MASK (RX_DESC_CNT - 1)
-#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
-#define MAX_RX_BD (NUM_RX_BD - 1)
-#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
-#define MIN_RX_SIZE_TPA 72
-#define MIN_RX_SIZE_NONTPA 10
-#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL
-#define INIT_RX_RING_SIZE MAX_RX_AVAIL
+#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2)
+#define RX_DESC_MASK (RX_DESC_CNT - 1)
+#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
+#define MAX_RX_BD (NUM_RX_BD - 1)
+#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
+#define MIN_RX_AVAIL 128
+
+#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \
+ ETH_MIN_RX_CQES_WITH_TPA_E1 : \
+ ETH_MIN_RX_CQES_WITH_TPA_E1H_E2)
+#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA
+#define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL))
+#define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\
+ MIN_RX_AVAIL))
+
#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
(MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
-#define RX_BD(x) ((x) & MAX_RX_BD)
+#define RX_BD(x) ((x) & MAX_RX_BD)
-/* As long as CQE is 4 times bigger than BD entry we have to allocate
- 4 times more pages for CQ ring in order to keep it balanced with
- BD ring */
-#define NUM_RCQ_RINGS (NUM_RX_RINGS * 4)
+/*
+ * As long as CQE is X times bigger than BD entry we have to allocate X times
+ * more pages for CQ ring in order to keep it balanced with BD ring
+ */
+#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
+#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL)
#define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
-#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
-#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
-#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
-#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
+#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1)
+#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS)
+#define MAX_RCQ_BD (NUM_RCQ_BD - 1)
+#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
#define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \
(MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
-#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
+#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
/* This is needed for determining of last_max */
-#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
+#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
+#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b))
-#define __SGE_MASK_SET_BIT(el, bit) \
- do { \
- el = ((el) | ((u64)0x1 << (bit))); \
- } while (0)
-
-#define __SGE_MASK_CLEAR_BIT(el, bit) \
- do { \
- el = ((el) & (~((u64)0x1 << (bit)))); \
- } while (0)
-
-#define SGE_MASK_SET_BIT(fp, idx) \
- __SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
- ((idx) & RX_SGE_MASK_ELEM_MASK))
-
-#define SGE_MASK_CLEAR_BIT(fp, idx) \
- __SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
- ((idx) & RX_SGE_MASK_ELEM_MASK))
+#define BNX2X_SWCID_SHIFT 17
+#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1)
/* used on a CID received from the HW */
-#define SW_CID(x) (le32_to_cpu(x) & \
- (COMMON_RAMROD_ETH_RX_CQE_CID >> 7))
+#define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK)
#define CQE_CMD(x) (le32_to_cpu(x) >> \
COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
@@ -529,6 +617,9 @@ struct bnx2x_fastpath {
#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
+#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
+#error "Min DB doorbell stride is 8"
+#endif
#define DPM_TRIGER_TYPE 0x40
#define DOORBELL(bp, cid, val) \
do { \
@@ -557,13 +648,11 @@ struct bnx2x_fastpath {
/* stuff added to make the code fit 80Col */
-
-#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
-
-#define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG
-#define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG
-#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
- (TPA_TYPE_START | TPA_TYPE_END))
+#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
+#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
+#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
+#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
+#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
@@ -590,15 +679,38 @@ struct bnx2x_fastpath {
#define BNX2X_RX_SUM_FIX(cqe) \
BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
-#define U_SB_ETH_RX_CQ_INDEX 1
-#define U_SB_ETH_RX_BD_INDEX 2
-#define C_SB_ETH_TX_CQ_INDEX 5
+
+#define FP_USB_FUNC_OFF \
+ offsetof(struct cstorm_status_block_u, func)
+#define FP_CSB_FUNC_OFF \
+ offsetof(struct cstorm_status_block_c, func)
+
+#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */
+ /* (HC_INDEX_U_TOE_RX_CQ_CONS) */
+#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */
+ /* (HC_INDEX_U_ETH_RX_CQ_CONS) */
+#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */
+ /* (HC_INDEX_U_ETH_RX_BD_CONS) */
+
+#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */
+ /* (HC_INDEX_C_TOE_TX_CQ_CONS) */
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */
+ /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */
+ /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */
+ /* (HC_INDEX_C_ETH_TX_CQ_CONS) */
+
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
+
#define BNX2X_RX_SB_INDEX \
- (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX])
+ (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
-#define BNX2X_TX_SB_INDEX \
- (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX])
+#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0
+
+#define BNX2X_TX_SB_INDEX_COS0 \
+ (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])
/* end of fast path */
@@ -615,41 +727,74 @@ struct bnx2x_common {
#define CHIP_NUM_57711 0x164f
#define CHIP_NUM_57711E 0x1650
#define CHIP_NUM_57712 0x1662
-#define CHIP_NUM_57712E 0x1663
+#define CHIP_NUM_57712_MF 0x1663
+#define CHIP_NUM_57713 0x1651
+#define CHIP_NUM_57713E 0x1652
+#define CHIP_NUM_57800 0x168a
+#define CHIP_NUM_57800_MF 0x16a5
+#define CHIP_NUM_57810 0x168e
+#define CHIP_NUM_57810_MF 0x16ae
+#define CHIP_NUM_57840 0x168d
+#define CHIP_NUM_57840_MF 0x16ab
#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
-#define CHIP_IS_57712E(bp) (CHIP_NUM(bp) == CHIP_NUM_57712E)
+#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
+#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800)
+#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
+#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840)
+#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
- CHIP_IS_57712E(bp))
+ CHIP_IS_57712_MF(bp))
+#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \
+ CHIP_IS_57800_MF(bp) || \
+ CHIP_IS_57810(bp) || \
+ CHIP_IS_57810_MF(bp) || \
+ CHIP_IS_57840(bp) || \
+ CHIP_IS_57840_MF(bp))
#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
-#define IS_E1H_OFFSET (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp))
-
-#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000)
-#define CHIP_REV_Ax 0x00000000
+#define USES_WARPCORE(bp) (CHIP_IS_E3(bp))
+#define IS_E1H_OFFSET (!CHIP_IS_E1(bp))
+
+#define CHIP_REV_SHIFT 12
+#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT)
+#define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK)
+#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT)
+#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT)
/* assume maximum 5 revisions */
-#define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000)
+#define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000)
/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
#define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \
- !(CHIP_REV(bp) & 0x00001000))
+ !(CHIP_REV_VAL(bp) & 0x00001000))
/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
#define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \
- (CHIP_REV(bp) & 0x00001000))
+ (CHIP_REV_VAL(bp) & 0x00001000))
#define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
-#define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
+#define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
+ (CHIP_REV_SHIFT + 1)) \
+ << CHIP_REV_SHIFT)
+#define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \
+ CHIP_REV_SIM(bp) :\
+ CHIP_REV_VAL(bp))
+#define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \
+ (CHIP_REV(bp) == CHIP_REV_Bx))
+#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
+ (CHIP_REV(bp) == CHIP_REV_Ax))
int flash_size;
-#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
-#define NVRAM_TIMEOUT_COUNT 30000
-#define NVRAM_PAGE_SIZE 256
+#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
+#define BNX2X_NVRAM_TIMEOUT_COUNT 30000
+#define BNX2X_NVRAM_PAGE_SIZE 256
u32 shmem_base;
u32 shmem2_base;
@@ -666,7 +811,7 @@ struct bnx2x_common {
#define INT_BLOCK_MODE_NORMAL 0
#define INT_BLOCK_MODE_BW_COMP 2
#define CHIP_INT_MODE_IS_NBC(bp) \
- (CHIP_IS_E2(bp) && \
+ (!CHIP_IS_E1x(bp) && \
!((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
@@ -712,19 +857,15 @@ struct bnx2x_port {
/* end of port */
-/* e1h Classification CAM line allocations */
-enum {
- CAM_ETH_LINE = 0,
- CAM_ISCSI_ETH_LINE,
- CAM_FIP_ETH_LINE,
- CAM_FIP_MCAST_LINE,
- CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE
-};
-/* number of MACs per function in NIG memory - used for SI mode */
-#define NIG_LLH_FUNC_MEM_SIZE 16
-/* number of entries in NIG_REG_LLHX_FUNC_MEM */
-#define NIG_LLH_FUNC_MEM_MAX_OFFSET 8
+#define STATS_OFFSET32(stat_name) \
+ (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
+/* slow path */
+
+/* slow path work-queue */
+extern struct workqueue_struct *bnx2x_wq;
+
+#define BNX2X_MAX_NUM_OF_VFS 64
#define BNX2X_VF_ID_INVALID 0xFF
/*
@@ -749,27 +890,10 @@ enum {
* L2 queue is supported. the cid for the FCoE L2 queue is always X.
*/
-#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */
-#define FP_SB_MAX_E2 16 /* fast-path interrupt contexts E2 */
-
-/*
- * cid_cnt paramter below refers to the value returned by
- * 'bnx2x_get_l2_cid_count()' routine
- */
-
-/*
- * The number of FP context allocated by the driver == max number of regular
- * L2 queues + 1 for the FCoE L2 queue
- */
-#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
-
-/*
- * The number of FP-SB allocated by the driver == max number of regular L2
- * queues + 1 for the CNIC which also consumes an FP-SB
- */
-#define FP_SB_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE)
-#define NUM_IGU_SB_REQUIRED(cid_cnt) \
- (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE)
+/* fast-path interrupt contexts E1x */
+#define FP_SB_MAX_E1x 16
+/* fast-path interrupt contexts E2 */
+#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2
union cdu_context {
struct eth_context eth;
@@ -778,7 +902,7 @@ union cdu_context {
/* CDU host DB constants */
#define CDU_ILT_PAGE_SZ_HW 3
-#define CDU_ILT_PAGE_SZ (4096 << CDU_ILT_PAGE_SZ_HW) /* 32K */
+#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
#ifdef BCM_CNIC
@@ -788,38 +912,63 @@ union cdu_context {
#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
#endif
-#define QM_ILT_PAGE_SZ_HW 3
-#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */
+#define QM_ILT_PAGE_SZ_HW 0
+#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
#define QM_CID_ROUND 1024
#ifdef BCM_CNIC
/* TM (timers) host DB constants */
-#define TM_ILT_PAGE_SZ_HW 2
-#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */
+#define TM_ILT_PAGE_SZ_HW 0
+#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
#define TM_CONN_NUM 1024
#define TM_ILT_SZ (8 * TM_CONN_NUM)
#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
/* SRC (Searcher) host DB constants */
-#define SRC_ILT_PAGE_SZ_HW 3
-#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */
+#define SRC_ILT_PAGE_SZ_HW 0
+#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
#define SRC_HASH_BITS 10
#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
#define SRC_T2_SZ SRC_ILT_SZ
#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
+
#endif
-#define MAX_DMAE_C 8
+#define MAX_DMAE_C 8
/* DMA memory not used in fastpath */
struct bnx2x_slowpath {
- struct eth_stats_query fw_stats;
- struct mac_configuration_cmd mac_config;
- struct mac_configuration_cmd mcast_config;
- struct mac_configuration_cmd uc_mac_config;
- struct client_init_ramrod_data client_init_data;
+ union {
+ struct mac_configuration_cmd e1x;
+ struct eth_classify_rules_ramrod_data e2;
+ } mac_rdata;
+
+
+ union {
+ struct tstorm_eth_mac_filter_config e1x;
+ struct eth_filter_rules_ramrod_data e2;
+ } rx_mode_rdata;
+
+ union {
+ struct mac_configuration_cmd e1;
+ struct eth_multicast_rules_ramrod_data e2;
+ } mcast_rdata;
+
+ struct eth_rss_update_ramrod_data rss_rdata;
+
+ /* Queue State related ramrods are always sent under rtnl_lock */
+ union {
+ struct client_init_ramrod_data init_data;
+ struct client_update_ramrod_data update_data;
+ } q_rdata;
+
+ union {
+ struct function_start_data func_start;
+ /* pfc configuration for DCBX ramrod */
+ struct flow_control_configuration pfc_config;
+ } func_rdata;
/* used by dmae command executer */
struct dmae_command dmae[MAX_DMAE_C];
@@ -833,8 +982,6 @@ struct bnx2x_slowpath {
u32 wb_comp;
u32 wb_data[4];
- /* pfc configuration for DCBX ramrod */
- struct flow_control_configuration pfc_config;
};
#define bnx2x_sp(bp, var) (&bp->slowpath->var)
@@ -846,7 +993,7 @@ struct bnx2x_slowpath {
#define MAX_DYNAMIC_ATTN_GRPS 8
struct attn_route {
- u32 sig[5];
+ u32 sig[5];
};
struct iro {
@@ -866,13 +1013,15 @@ struct hw_context {
/* forward */
struct bnx2x_ilt;
-typedef enum {
+
+enum bnx2x_recovery_state {
BNX2X_RECOVERY_DONE,
BNX2X_RECOVERY_INIT,
BNX2X_RECOVERY_WAIT,
-} bnx2x_recovery_state_t;
+ BNX2X_RECOVERY_FAILED
+};
-/**
+/*
* Event queue (EQ or event ring) MC hsi
* NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
*/
@@ -910,6 +1059,31 @@ enum {
BNX2X_LINK_REPORT_TX_FC_ON,
};
+enum {
+ BNX2X_PORT_QUERY_IDX,
+ BNX2X_PF_QUERY_IDX,
+ BNX2X_FIRST_QUEUE_QUERY_IDX,
+};
+
+struct bnx2x_fw_stats_req {
+ struct stats_query_header hdr;
+ struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+struct bnx2x_fw_stats_data {
+ struct stats_counter storm_counters;
+ struct per_port_stats port;
+ struct per_pf_stats pf;
+ struct per_queue_stats queue_stats[1];
+};
+
+/* Public slow path states */
+enum {
+ BNX2X_SP_RTNL_SETUP_TC,
+ BNX2X_SP_RTNL_TX_TIMEOUT,
+};
+
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
@@ -919,19 +1093,28 @@ struct bnx2x {
void __iomem *doorbells;
u16 db_size;
+ u8 pf_num; /* absolute PF number */
+ u8 pfid; /* per-path PF number */
+ int base_fw_ndsb; /**/
+#define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1))
+#define BP_PORT(bp) (bp->pfid & 1)
+#define BP_FUNC(bp) (bp->pfid)
+#define BP_ABS_FUNC(bp) (bp->pf_num)
+#define BP_E1HVN(bp) (bp->pfid >> 1)
+#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/
+#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
+#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\
+ BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
+
struct net_device *dev;
struct pci_dev *pdev;
- struct iro *iro_arr;
+ const struct iro *iro_arr;
#define IRO (bp->iro_arr)
- atomic_t intr_sem;
-
- bnx2x_recovery_state_t recovery_state;
+ enum bnx2x_recovery_state recovery_state;
int is_leader;
struct msix_entry *msix_table;
-#define INT_MODE_INTx 1
-#define INT_MODE_MSI 2
int tx_ring_size;
@@ -944,7 +1127,8 @@ struct bnx2x {
/* Max supported alignment is 256 (8 shift) */
#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
L1_CACHE_SHIFT : 8)
-#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT)
+ /* FW use 2 Cache lines Alignment for start packet and size */
+#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT)
#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
struct host_sp_status_block *def_status_blk;
@@ -974,10 +1158,12 @@ struct bnx2x {
__le16 *eq_cons_sb;
atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
- /* Flags for marking that there is a STAT_QUERY or
- SET_MAC ramrod pending */
- int stats_pending;
- int set_mac_pending;
+
+
+ /* Counter for marking that there is a STAT_QUERY ramrod pending */
+ u16 stats_pending;
+ /* Counter for completed statistics ramrods */
+ u16 stats_comp;
/* End of fields used in the performance code paths */
@@ -985,54 +1171,35 @@ struct bnx2x {
int msg_enable;
u32 flags;
-#define PCIX_FLAG 1
-#define PCI_32BIT_FLAG 2
-#define ONE_PORT_FLAG 4
-#define NO_WOL_FLAG 8
-#define USING_DAC_FLAG 0x10
-#define USING_MSIX_FLAG 0x20
-#define USING_MSI_FLAG 0x40
-
-#define TPA_ENABLE_FLAG 0x80
-#define NO_MCP_FLAG 0x100
-#define DISABLE_MSI_FLAG 0x200
+#define PCIX_FLAG (1 << 0)
+#define PCI_32BIT_FLAG (1 << 1)
+#define ONE_PORT_FLAG (1 << 2)
+#define NO_WOL_FLAG (1 << 3)
+#define USING_DAC_FLAG (1 << 4)
+#define USING_MSIX_FLAG (1 << 5)
+#define USING_MSI_FLAG (1 << 6)
+#define DISABLE_MSI_FLAG (1 << 7)
+#define TPA_ENABLE_FLAG (1 << 8)
+#define NO_MCP_FLAG (1 << 9)
+
#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
-#define MF_FUNC_DIS 0x1000
-#define FCOE_MACS_SET 0x2000
-#define NO_FCOE_FLAG 0x4000
-#define NO_ISCSI_OOO_FLAG 0x8000
-#define NO_ISCSI_FLAG 0x10000
+#define MF_FUNC_DIS (1 << 11)
+#define OWN_CNIC_IRQ (1 << 12)
+#define NO_ISCSI_OOO_FLAG (1 << 13)
+#define NO_ISCSI_FLAG (1 << 14)
+#define NO_FCOE_FLAG (1 << 15)
-#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
-
- int pf_num; /* absolute PF number */
- int pfid; /* per-path PF number */
- int base_fw_ndsb;
-#define BP_PATH(bp) (!CHIP_IS_E2(bp) ? \
- 0 : (bp->pf_num & 1))
-#define BP_PORT(bp) (bp->pfid & 1)
-#define BP_FUNC(bp) (bp->pfid)
-#define BP_ABS_FUNC(bp) (bp->pf_num)
-#define BP_E1HVN(bp) (bp->pfid >> 1)
-#define BP_VN(bp) (CHIP_MODE_IS_4_PORT(bp) ? \
- 0 : BP_E1HVN(bp))
-#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
-#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\
- BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1))
-
-#ifdef BCM_CNIC
-#define BCM_CNIC_CID_START 16
-#define BCM_ISCSI_ETH_CL_ID 17
-#endif
+#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
int pm_cap;
- int pcie_cap;
int mrrs;
struct delayed_work sp_task;
- struct delayed_work reset_task;
+ struct delayed_work sp_rtnl_task;
+
+ struct delayed_work period_task;
struct timer_list timer;
int current_interval;
@@ -1052,9 +1219,9 @@ struct bnx2x {
struct cmng_struct_per_port cmng;
u32 vn_weight_sum;
-
u32 mf_config[E1HVN_MAX];
u32 mf2_config[E2_FUNC_MAX];
+ u32 path_has_ovlan; /* E3 */
u16 mf_ov;
u8 mf_mode;
#define IS_MF(bp) (bp->mf_mode != 0)
@@ -1079,33 +1246,24 @@ struct bnx2x {
u32 lin_cnt;
- int state;
+ u16 state;
#define BNX2X_STATE_CLOSED 0
#define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000
#define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000
#define BNX2X_STATE_OPEN 0x3000
#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
-#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
-#define BNX2X_STATE_FUNC_STARTED 0x7000
+
#define BNX2X_STATE_DIAG 0xe000
#define BNX2X_STATE_ERROR 0xf000
int multi_mode;
+#define BNX2X_MAX_PRIORITY 8
+#define BNX2X_MAX_ENTRIES_PER_PRI 16
+#define BNX2X_MAX_COS 3
+#define BNX2X_MAX_TX_COS 2
int num_queues;
int disable_tpa;
- int int_mode;
- u32 *rx_indir_table;
-
- struct tstorm_eth_mac_filter_config mac_filters;
-#define BNX2X_ACCEPT_NONE 0x0000
-#define BNX2X_ACCEPT_UNICAST 0x0001
-#define BNX2X_ACCEPT_MULTICAST 0x0002
-#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
-#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
-#define BNX2X_ACCEPT_BROADCAST 0x0010
-#define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020
-#define BNX2X_PROMISCUOUS_MODE 0x10000
u32 rx_mode;
#define BNX2X_RX_MODE_NONE 0
@@ -1113,7 +1271,6 @@ struct bnx2x {
#define BNX2X_RX_MODE_ALLMULTI 2
#define BNX2X_RX_MODE_PROMISC 3
#define BNX2X_MAX_MULTICAST 64
-#define BNX2X_MAX_EMUL_MULTI 16
u8 igu_dsb_id;
u8 igu_base_sb;
@@ -1122,16 +1279,53 @@ struct bnx2x {
struct bnx2x_slowpath *slowpath;
dma_addr_t slowpath_mapping;
+
+ /* Total number of FW statistics requests */
+ u8 fw_stats_num;
+
+ /*
+ * This is a memory buffer that will contain both statistics
+ * ramrod request and data.
+ */
+ void *fw_stats;
+ dma_addr_t fw_stats_mapping;
+
+ /*
+ * FW statistics request shortcut (points at the
+ * beginning of fw_stats buffer).
+ */
+ struct bnx2x_fw_stats_req *fw_stats_req;
+ dma_addr_t fw_stats_req_mapping;
+ int fw_stats_req_sz;
+
+ /*
+ * FW statistics data shortcut (points at the begining of
+ * fw_stats buffer + fw_stats_req_sz).
+ */
+ struct bnx2x_fw_stats_data *fw_stats_data;
+ dma_addr_t fw_stats_data_mapping;
+ int fw_stats_data_sz;
+
struct hw_context context;
struct bnx2x_ilt *ilt;
#define BP_ILT(bp) ((bp)->ilt)
-#define ILT_MAX_LINES 128
+#define ILT_MAX_LINES 256
+/*
+ * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
+ * to CNIC.
+ */
+#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT)
- int l2_cid_count;
-#define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \
- ILT_PAGE_CIDS))
-#define BNX2X_DB_SIZE(bp) ((bp)->l2_cid_count * (1 << BNX2X_DB_SHIFT))
+/*
+ * Maximum CID count that might be required by the bnx2x:
+ * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
+ */
+#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
+ NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
+ ILT_PAGE_CIDS))
+#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
int qm_cid_count;
@@ -1148,16 +1342,18 @@ struct bnx2x {
struct cnic_eth_dev cnic_eth_dev;
union host_hc_status_block cnic_sb;
dma_addr_t cnic_sb_mapping;
-#define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp))
-#define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb)
struct eth_spe *cnic_kwq;
struct eth_spe *cnic_kwq_prod;
struct eth_spe *cnic_kwq_cons;
struct eth_spe *cnic_kwq_last;
u16 cnic_kwq_pending;
u16 cnic_spq_pending;
- struct mutex cnic_mutex;
u8 fip_mac[ETH_ALEN];
+ struct mutex cnic_mutex;
+ struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
+
+ /* Start index of the "special" (CNIC related) L2 cleints */
+ u8 cnic_base_cl_id;
#endif
int dmae_ready;
@@ -1194,6 +1390,8 @@ struct bnx2x {
u16 *init_ops_offsets;
/* Data blob - has 32 bit granularity */
u32 *init_data;
+ u32 init_mode_flags;
+#define INIT_MODE_FLAGS(bp) (bp->init_mode_flags)
/* Zipped PRAM blobs - raw data */
const u8 *tsem_int_table_data;
const u8 *tsem_pram_data;
@@ -1215,10 +1413,9 @@ struct bnx2x {
#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
+#define PHY_FW_VER_LEN 20
char fw_ver[32];
const struct firmware *firmware;
- /* LLDP params */
- struct bnx2x_config_lldp_params lldp_config_params;
/* DCB support on/off */
u16 dcb_state;
@@ -1235,59 +1432,56 @@ struct bnx2x {
bool dcbx_mode_uset;
struct bnx2x_config_dcbx_params dcbx_config_params;
-
struct bnx2x_dcbx_port_params dcbx_port_params;
int dcb_version;
- /* DCBX Negotiation results */
+ /* CAM credit pools */
+ struct bnx2x_credit_pool_obj macs_pool;
+
+ /* RX_MODE object */
+ struct bnx2x_rx_mode_obj rx_mode_obj;
+
+ /* MCAST object */
+ struct bnx2x_mcast_obj mcast_obj;
+
+ /* RSS configuration object */
+ struct bnx2x_rss_config_obj rss_conf_obj;
+
+ /* Function State controlling object */
+ struct bnx2x_func_sp_obj func_obj;
+
+ unsigned long sp_state;
+
+ /* operation indication for the sp_rtnl task */
+ unsigned long sp_rtnl_state;
+
+ /* DCBX Negotation results */
struct dcbx_features dcbx_local_feat;
u32 dcbx_error;
+
#ifdef BCM_DCBNL
struct dcbx_features dcbx_remote_feat;
u32 dcbx_remote_flags;
#endif
u32 pending_max;
-};
-/**
- * Init queue/func interface
- */
-/* queue init flags */
-#define QUEUE_FLG_TPA 0x0001
-#define QUEUE_FLG_CACHE_ALIGN 0x0002
-#define QUEUE_FLG_STATS 0x0004
-#define QUEUE_FLG_OV 0x0008
-#define QUEUE_FLG_VLAN 0x0010
-#define QUEUE_FLG_COS 0x0020
-#define QUEUE_FLG_HC 0x0040
-#define QUEUE_FLG_DHC 0x0080
-#define QUEUE_FLG_OOO 0x0100
-
-#define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR
-#define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR
-#define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0
-#define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR
-
-
-
-/* rss capabilities */
-#define RSS_IPV4_CAP 0x0001
-#define RSS_IPV4_TCP_CAP 0x0002
-#define RSS_IPV6_CAP 0x0004
-#define RSS_IPV6_TCP_CAP 0x0008
+ /* multiple tx classes of service */
+ u8 max_cos;
-#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
-#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE)
+ /* priority to cos mapping */
+ u8 prio_to_cos[8];
+};
-/* ethtool statistics are displayed for all regular ethernet queues and the
- * fcoe L2 queue if not disabled
- */
-#define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \
- (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE))
+/* Tx queues may be less or equal to Rx queues */
+extern int num_queues;
+#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
+#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
-#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE)
+#define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp)
+/* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */
#define RSS_IPV4_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
@@ -1302,107 +1496,15 @@ struct bnx2x {
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
/* func init flags */
-#define FUNC_FLG_STATS 0x0001
-#define FUNC_FLG_TPA 0x0002
-#define FUNC_FLG_SPQ 0x0004
-#define FUNC_FLG_LEADING 0x0008 /* PF only */
-
-struct rxq_pause_params {
- u16 bd_th_lo;
- u16 bd_th_hi;
- u16 rcq_th_lo;
- u16 rcq_th_hi;
- u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */
- u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */
- u16 pri_map;
-};
+#define FUNC_FLG_RSS 0x0001
+#define FUNC_FLG_STATS 0x0002
+/* removed FUNC_FLG_UNMATCHED 0x0004 */
+#define FUNC_FLG_TPA 0x0008
+#define FUNC_FLG_SPQ 0x0010
+#define FUNC_FLG_LEADING 0x0020 /* PF only */
-struct bnx2x_rxq_init_params {
- /* cxt*/
- struct eth_context *cxt;
-
- /* dma */
- dma_addr_t dscr_map;
- dma_addr_t sge_map;
- dma_addr_t rcq_map;
- dma_addr_t rcq_np_map;
-
- u16 flags;
- u16 drop_flags;
- u16 mtu;
- u16 buf_sz;
- u16 fw_sb_id;
- u16 cl_id;
- u16 spcl_id;
- u16 cl_qzone_id;
-
- /* valid iff QUEUE_FLG_STATS */
- u16 stat_id;
-
- /* valid iff QUEUE_FLG_TPA */
- u16 tpa_agg_sz;
- u16 sge_buf_sz;
- u16 max_sges_pkt;
-
- /* valid iff QUEUE_FLG_CACHE_ALIGN */
- u8 cache_line_log;
-
- u8 sb_cq_index;
- u32 cid;
-
- /* desired interrupts per sec. valid iff QUEUE_FLG_HC */
- u32 hc_rate;
-};
-
-struct bnx2x_txq_init_params {
- /* cxt*/
- struct eth_context *cxt;
-
- /* dma */
- dma_addr_t dscr_map;
-
- u16 flags;
- u16 fw_sb_id;
- u8 sb_cq_index;
- u8 cos; /* valid iff QUEUE_FLG_COS */
- u16 stat_id; /* valid iff QUEUE_FLG_STATS */
- u16 traffic_type;
- u32 cid;
- u16 hc_rate; /* desired interrupts per sec.*/
- /* valid iff QUEUE_FLG_HC */
-
-};
-
-struct bnx2x_client_ramrod_params {
- int *pstate;
- int state;
- u16 index;
- u16 cl_id;
- u32 cid;
- u8 poll;
-#define CLIENT_IS_FCOE 0x01
-#define CLIENT_IS_LEADING_RSS 0x02
- u8 flags;
-};
-
-struct bnx2x_client_init_params {
- struct rxq_pause_params pause;
- struct bnx2x_rxq_init_params rxq_params;
- struct bnx2x_txq_init_params txq_params;
- struct bnx2x_client_ramrod_params ramrod_params;
-};
-
-struct bnx2x_rss_params {
- int mode;
- u16 cap;
- u16 result_mask;
-};
struct bnx2x_func_init_params {
-
- /* rss */
- struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */
-
/* dma */
dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
@@ -1414,42 +1516,40 @@ struct bnx2x_func_init_params {
};
#define for_each_eth_queue(bp, var) \
- for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
+ for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
#define for_each_nondefault_eth_queue(bp, var) \
- for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
-
-#define for_each_napi_queue(bp, var) \
- for (var = 0; \
- var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \
- if (skip_queue(bp, var)) \
- continue; \
- else
+ for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
#define for_each_queue(bp, var) \
- for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
if (skip_queue(bp, var)) \
continue; \
else
+/* Skip forwarding FP */
#define for_each_rx_queue(bp, var) \
- for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
if (skip_rx_queue(bp, var)) \
continue; \
else
+/* Skip OOO FP */
#define for_each_tx_queue(bp, var) \
- for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
+ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
if (skip_tx_queue(bp, var)) \
continue; \
else
#define for_each_nondefault_queue(bp, var) \
- for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) \
+ for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
if (skip_queue(bp, var)) \
continue; \
else
+#define for_each_cos_in_tx_queue(fp, var) \
+ for ((var) = 0; (var) < (fp)->max_cos; (var)++)
+
/* skip rx queue
* if FCOE l2 support is disabled and this is the fcoe L2 queue
*/
@@ -1462,11 +1562,66 @@ struct bnx2x_func_init_params {
#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
-#define WAIT_RAMROD_POLL 0x01
-#define WAIT_RAMROD_COMMON 0x02
+
+
+/**
+ * bnx2x_set_mac_one - configure a single MAC address
+ *
+ * @bp: driver handle
+ * @mac: MAC to configure
+ * @obj: MAC object handle
+ * @set: if 'true' add a new MAC, otherwise - delete
+ * @mac_type: the type of the MAC to configure (e.g. ETH, UC list)
+ * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT)
+ *
+ * Configures one MAC according to provided parameters or continues the
+ * execution of previously scheduled commands if RAMROD_CONT is set in
+ * ramrod_flags.
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ int mac_type, unsigned long *ramrod_flags);
+/**
+ * Deletes all MACs configured for the specific MAC object.
+ *
+ * @param bp Function driver instance
+ * @param mac_obj MAC object to cleanup
+ *
+ * @return zero if all MACs were cleaned
+ */
+
+/**
+ * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
+ *
+ * @bp: driver handle
+ * @mac_obj: MAC object handle
+ * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC)
+ * @wait_for_comp: if 'true' block until completion
+ *
+ * Deletes all MACs of the specific type (e.g. ETH, UC list).
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_del_all_macs(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ int mac_type, bool wait_for_comp);
+
+/* Init Function API */
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
void bnx2x_read_mf_cfg(struct bnx2x *bp);
+
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
@@ -1477,22 +1632,12 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
bool with_comp, u8 comp_type);
-int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
-int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
-int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
-u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
- u32 data_hi, u32 data_lo, int common);
-
-/* Clears multicast and unicast list configuration in the chip. */
-void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
-void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
-void bnx2x_invalidate_uc_list(struct bnx2x *bp);
-
+ u32 data_hi, u32 data_lo, int cmd_type);
void bnx2x_update_coalesce(struct bnx2x *bp);
-int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
int wait)
@@ -1648,7 +1793,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* must be used on a CID before placing it on a HW ring */
#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
- (BP_E1HVN(bp) << 17) | (x))
+ (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \
+ (x))
#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
@@ -1718,12 +1864,14 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
- AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT)
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
- AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR)
+ AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
#define HW_INTERRUT_ASSERT_SET_1 \
(AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
@@ -1736,17 +1884,22 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
-#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\
+#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
- AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR)
+ AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
+ AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
#define HW_INTERRUT_ASSERT_SET_2 \
(AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
@@ -1758,6 +1911,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
@@ -1766,6 +1920,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
+
#define RSS_FLAGS(bp) \
(TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
@@ -1775,6 +1932,30 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
#define MULTI_MASK 0x7f
+
+#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func)
+#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func)
+#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func)
+#define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func)
+
+#define DEF_USB_IGU_INDEX_OFF \
+ offsetof(struct cstorm_def_status_block_u, igu_index)
+#define DEF_CSB_IGU_INDEX_OFF \
+ offsetof(struct cstorm_def_status_block_c, igu_index)
+#define DEF_XSB_IGU_INDEX_OFF \
+ offsetof(struct xstorm_def_status_block, igu_index)
+#define DEF_TSB_IGU_INDEX_OFF \
+ offsetof(struct tstorm_def_status_block, igu_index)
+
+#define DEF_USB_SEGMENT_OFF \
+ offsetof(struct cstorm_def_status_block_u, segment)
+#define DEF_CSB_SEGMENT_OFF \
+ offsetof(struct cstorm_def_status_block_c, segment)
+#define DEF_XSB_SEGMENT_OFF \
+ offsetof(struct xstorm_def_status_block, segment)
+#define DEF_TSB_SEGMENT_OFF \
+ offsetof(struct tstorm_def_status_block, segment)
+
#define BNX2X_SP_DSB_INDEX \
(&bp->def_status_blk->sp_sb.\
index_values[HC_SP_INDEX_ETH_DEF_CONS])
@@ -1786,7 +1967,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
} while (0)
#define GET_FLAG(value, mask) \
- (((value) &= (mask)) >> (mask##_SHIFT))
+ (((value) & (mask)) >> (mask##_SHIFT))
#define GET_FIELD(value, fname) \
(((value) & (fname##_MASK)) >> (fname##_SHIFT))
@@ -1821,15 +2002,13 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define HC_SEG_ACCESS_ATTN 4
#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
-#ifdef BNX2X_MAIN
-#define BNX2X_EXTERN
-#else
-#define BNX2X_EXTERN extern
-#endif
-
-BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
-
-extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
-void bnx2x_push_indir_table(struct bnx2x *bp);
+static const u32 dmae_reg_go_c[] = {
+ DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+ DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+ DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+ DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
+void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_notify_link_changed(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 289044332ed..37e5790681a 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -17,16 +17,17 @@
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/firmware.h>
#include <linux/prefetch.h>
#include "bnx2x_cmn.h"
-
#include "bnx2x_init.h"
+#include "bnx2x_sp.h"
+
-static int bnx2x_setup_irqs(struct bnx2x *bp);
/**
* bnx2x_bz_fp - zero content of the fastpath structure.
@@ -46,6 +47,26 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Restore the NAPI object as it has been already initialized */
fp->napi = orig_napi;
+
+ fp->bp = bp;
+ fp->index = index;
+ if (IS_ETH_FP(fp))
+ fp->max_cos = bp->max_cos;
+ else
+ /* Special queues support only one CoS */
+ fp->max_cos = 1;
+
+ /*
+ * set the tpa flag for each queue. The tpa flag determines the queue
+ * minimal size so it must be set prior to queue memory allocation
+ */
+ fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
+
+#ifdef BCM_CNIC
+ /* We don't want TPA on an FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ fp->disable_tpa = 1;
+#endif
}
/**
@@ -71,13 +92,15 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
to_fp->napi = orig_napi;
}
+int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+
/* free skb in the packet ring at pos idx
* return idx of last bd freed
*/
-static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
u16 idx)
{
- struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
+ struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
struct eth_tx_start_bd *tx_start_bd;
struct eth_tx_bd *tx_data_bd;
struct sk_buff *skb = tx_buf->skb;
@@ -87,15 +110,16 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* prefetch skb end pointer to speedup dev_kfree_skb() */
prefetch(&skb->end);
- DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
- idx, tx_buf, skb);
+ DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
+ txdata->txq_index, idx, tx_buf, skb);
/* unmap first bd */
DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
- tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
+ tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
+
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
@@ -122,7 +146,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
while (nbd > 0) {
DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
- tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
+ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
if (--nbd)
@@ -138,20 +162,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return new_cons;
}
-int bnx2x_tx_int(struct bnx2x_fastpath *fp)
+int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
{
- struct bnx2x *bp = fp->bp;
struct netdev_queue *txq;
- u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
+ u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return -1;
#endif
- txq = netdev_get_tx_queue(bp->dev, fp->index);
- hw_cons = le16_to_cpu(*fp->tx_cons_sb);
- sw_cons = fp->tx_pkt_cons;
+ txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
+ hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+ sw_cons = txdata->tx_pkt_cons;
while (sw_cons != hw_cons) {
u16 pkt_cons;
@@ -160,20 +183,23 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
" pkt_cons %u\n",
- fp->index, hw_cons, sw_cons, pkt_cons);
+ txdata->txq_index, hw_cons, sw_cons, pkt_cons);
- bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
+ bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
sw_cons++;
}
- fp->tx_pkt_cons = sw_cons;
- fp->tx_bd_cons = bd_cons;
+ txdata->tx_pkt_cons = sw_cons;
+ txdata->tx_bd_cons = bd_cons;
/* Need to make the tx_bd_cons update visible to start_xmit()
* before checking for netif_tx_queue_stopped(). Without the
* memory barrier, there is a small possibility that
* start_xmit() will miss it and cause the queue to be stopped
* forever.
+ * On the other hand we need an rmb() here to ensure the proper
+ * ordering of bit testing in the following
+ * netif_tx_queue_stopped(txq) call.
*/
smp_mb();
@@ -192,7 +218,7 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
if ((netif_tx_queue_stopped(txq)) &&
(bp->state == BNX2X_STATE_OPEN) &&
- (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
+ (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
@@ -225,7 +251,7 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
/* First mark all used pages */
for (i = 0; i < sge_len; i++)
- SGE_MASK_CLEAR_BIT(fp,
+ BIT_VEC64_CLEAR_BIT(fp->sge_mask,
RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
@@ -237,8 +263,8 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
last_max = RX_SGE(fp->last_max_sge);
- last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
- first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
+ last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
+ first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
/* If ring is not full */
if (last_elem + 1 != first_elem)
@@ -249,8 +275,8 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
if (likely(fp->sge_mask[i]))
break;
- fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
- delta += RX_SGE_MASK_ELEM_SZ;
+ fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
+ delta += BIT_VEC64_ELEM_SZ;
}
if (delta > 0) {
@@ -265,33 +291,56 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
}
static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
- struct sk_buff *skb, u16 cons, u16 prod)
+ struct sk_buff *skb, u16 cons, u16 prod,
+ struct eth_fast_path_rx_cqe *cqe)
{
struct bnx2x *bp = fp->bp;
struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
dma_addr_t mapping;
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ struct sw_rx_bd *first_buf = &tpa_info->first_buf;
- /* move empty skb from pool to prod and map it */
- prod_rx_buf->skb = fp->tpa_pool[queue].skb;
- mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
- fp->rx_buf_size, DMA_FROM_DEVICE);
- dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
-
- /* move partial skb from cons to pool (don't unmap yet) */
- fp->tpa_pool[queue] = *cons_rx_buf;
-
- /* mark bin state as start - print error if current state != stop */
- if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
+ /* print error if current state != stop */
+ if (tpa_info->tpa_state != BNX2X_TPA_STOP)
BNX2X_ERR("start of bin not in stop [%d]\n", queue);
- fp->tpa_state[queue] = BNX2X_TPA_START;
+ /* Try to map an empty skb from the aggregation info */
+ mapping = dma_map_single(&bp->pdev->dev,
+ first_buf->skb->data,
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+ /*
+ * ...if it fails - move the skb from the consumer to the producer
+ * and set the current aggregation state as ERROR to drop it
+ * when TPA_STOP arrives.
+ */
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ /* Move the BD from the consumer to the producer */
+ bnx2x_reuse_rx_skb(fp, cons, prod);
+ tpa_info->tpa_state = BNX2X_TPA_ERROR;
+ return;
+ }
+
+ /* move empty skb from pool to prod */
+ prod_rx_buf->skb = first_buf->skb;
+ dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
/* point prod_bd to new skb */
prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+ /* move partial skb from cons to pool (don't unmap yet) */
+ *first_buf = *cons_rx_buf;
+
+ /* mark bin state as START */
+ tpa_info->parsing_flags =
+ le16_to_cpu(cqe->pars_flags.flags);
+ tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+ tpa_info->tpa_state = BNX2X_TPA_START;
+ tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
+ tpa_info->placement_offset = cqe->placement_offset;
+
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
#ifdef _ASM_GENERIC_INT_L64_H
@@ -322,10 +371,17 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
u16 len_on_bd)
{
- /* TPA arrgregation won't have an IP options and TCP options
- * other than timestamp.
+ /*
+ * TPA arrgregation won't have either IP options or TCP options
+ * other than timestamp or IPv6 extension headers.
*/
- u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+ u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
+
+ if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+ PRS_FLAG_OVERETH_IPV6)
+ hdrs_len += sizeof(struct ipv6hdr);
+ else /* IPv4 */
+ hdrs_len += sizeof(struct iphdr);
/* Check if there was a TCP timestamp, if there is it's will
@@ -340,30 +396,30 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
}
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- struct sk_buff *skb,
- struct eth_fast_path_rx_cqe *fp_cqe,
- u16 cqe_idx, u16 parsing_flags)
+ u16 queue, struct sk_buff *skb,
+ struct eth_end_agg_rx_cqe *cqe,
+ u16 cqe_idx)
{
struct sw_rx_page *rx_pg, old_rx_pg;
- u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
u32 i, frag_len, frag_size, pages;
int err;
int j;
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ u16 len_on_bd = tpa_info->len_on_bd;
- frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
+ frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
/* This is needed in order to enable forwarding support */
if (frag_size)
- skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
- len_on_bd);
+ skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
+ tpa_info->parsing_flags, len_on_bd);
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
pages, cqe_idx);
- BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
- fp_cqe->pkt_len, len_on_bd);
+ BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
bnx2x_panic();
return -EINVAL;
}
@@ -371,8 +427,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Run through the SGL and compose the fragmented skb */
for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
- u16 sge_idx =
- RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
+ u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
/* FW gives the indices of the SGE as if the ring is an array
(meaning that "next" element will consume 2 indices) */
@@ -407,13 +462,28 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 queue, int pad, int len, union eth_rx_cqe *cqe,
+ u16 queue, struct eth_end_agg_rx_cqe *cqe,
u16 cqe_idx)
{
- struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
+ u8 pad = tpa_info->placement_offset;
+ u16 len = tpa_info->len_on_bd;
struct sk_buff *skb = rx_buf->skb;
/* alloc new skb */
- struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+ struct sk_buff *new_skb;
+ u8 old_tpa_state = tpa_info->tpa_state;
+
+ tpa_info->tpa_state = BNX2X_TPA_STOP;
+
+ /* If we there was an error during the handling of the TPA_START -
+ * drop this aggregation.
+ */
+ if (old_tpa_state == BNX2X_TPA_ERROR)
+ goto drop;
+
+ /* Try to allocate the new skb */
+ new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
@@ -422,11 +492,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
fp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_skb)) {
- /* fix ip xsum and give it to the stack */
- /* (no need to map the new skb) */
- u16 parsing_flags =
- le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
-
prefetch(skb);
prefetch(((char *)(skb)) + L1_CACHE_BYTES);
@@ -446,21 +511,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
- {
- struct iphdr *iph;
-
- iph = (struct iphdr *)skb->data;
- iph->check = 0;
- iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
- }
-
- if (!bnx2x_fill_frag_skb(bp, fp, skb,
- &cqe->fast_path_cqe, cqe_idx,
- parsing_flags)) {
- if (parsing_flags & PARSING_FLAGS_VLAN)
- __vlan_hwaccel_put_tag(skb,
- le16_to_cpu(cqe->fast_path_cqe.
- vlan_tag));
+ if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+ if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
+ __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
napi_gro_receive(&fp->napi, skb);
} else {
DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
@@ -470,16 +523,16 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* put new skb in bin */
- fp->tpa_pool[queue].skb = new_skb;
+ rx_buf->skb = new_skb;
- } else {
- /* else drop the packet and keep the buffer in the bin */
- DP(NETIF_MSG_RX_STATUS,
- "Failed to allocate new skb - dropping packet!\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ return;
}
- fp->tpa_state[queue] = BNX2X_TPA_STOP;
+drop:
+ /* drop the packet and keep the buffer in the bin */
+ DP(NETIF_MSG_RX_STATUS,
+ "Failed to allocate or map a new skb - dropping packet!\n");
+ fp->eth_q_stats.rx_skb_alloc_failed++;
}
/* Set Toeplitz hash value in the skb using the value from the
@@ -533,9 +586,16 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
struct sw_rx_bd *rx_buf = NULL;
struct sk_buff *skb;
union eth_rx_cqe *cqe;
+ struct eth_fast_path_rx_cqe *cqe_fp;
u8 cqe_fp_flags;
+ enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad;
+#ifdef BNX2X_STOP_ON_ERROR
+ if (unlikely(bp->panic))
+ return 0;
+#endif
+
comp_ring_cons = RCQ_BD(sw_comp_cons);
bd_prod = RX_BD(bd_prod);
bd_cons = RX_BD(bd_cons);
@@ -548,17 +608,18 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
PAGE_SIZE + 1));
cqe = &fp->rx_comp_ring[comp_ring_cons];
- cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+ cqe_fp = &cqe->fast_path_cqe;
+ cqe_fp_flags = cqe_fp->type_error_flags;
+ cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
" queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
- cqe_fp_flags, cqe->fast_path_cqe.status_flags,
- le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
- le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
- le16_to_cpu(cqe->fast_path_cqe.pkt_len));
+ cqe_fp_flags, cqe_fp->status_flags,
+ le32_to_cpu(cqe_fp->rss_hash_result),
+ le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
/* is this a slowpath msg? */
- if (unlikely(CQE_TYPE(cqe_fp_flags))) {
+ if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
bnx2x_sp_event(fp, cqe);
goto next_cqe;
@@ -567,61 +628,59 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
rx_buf = &fp->rx_buf_ring[bd_cons];
skb = rx_buf->skb;
prefetch(skb);
- len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
- pad = cqe->fast_path_cqe.placement_offset;
- /* - If CQE is marked both TPA_START and TPA_END it is
- * a non-TPA CQE.
- * - FP CQE will always have either TPA_START or/and
- * TPA_STOP flags set.
- */
- if ((!fp->disable_tpa) &&
- (TPA_TYPE(cqe_fp_flags) !=
- (TPA_TYPE_START | TPA_TYPE_END))) {
- u16 queue = cqe->fast_path_cqe.queue_index;
+ if (!CQE_TYPE_FAST(cqe_fp_type)) {
+#ifdef BNX2X_STOP_ON_ERROR
+ /* sanity check */
+ if (fp->disable_tpa &&
+ (CQE_TYPE_START(cqe_fp_type) ||
+ CQE_TYPE_STOP(cqe_fp_type)))
+ BNX2X_ERR("START/STOP packet while "
+ "disable_tpa type %x\n",
+ CQE_TYPE(cqe_fp_type));
+#endif
- if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
+ if (CQE_TYPE_START(cqe_fp_type)) {
+ u16 queue = cqe_fp->queue_index;
DP(NETIF_MSG_RX_STATUS,
"calling tpa_start on queue %d\n",
queue);
bnx2x_tpa_start(fp, queue, skb,
- bd_cons, bd_prod);
+ bd_cons, bd_prod,
+ cqe_fp);
- /* Set Toeplitz hash for an LRO skb */
+ /* Set Toeplitz hash for LRO skb */
bnx2x_set_skb_rxhash(bp, cqe, skb);
goto next_rx;
- } else { /* TPA_STOP */
+
+ } else {
+ u16 queue =
+ cqe->end_agg_cqe.queue_index;
DP(NETIF_MSG_RX_STATUS,
"calling tpa_stop on queue %d\n",
queue);
- if (!BNX2X_RX_SUM_FIX(cqe))
- BNX2X_ERR("STOP on none TCP "
- "data\n");
-
- /* This is a size of the linear data
- on this skb */
- len = le16_to_cpu(cqe->fast_path_cqe.
- len_on_bd);
- bnx2x_tpa_stop(bp, fp, queue, pad,
- len, cqe, comp_ring_cons);
+ bnx2x_tpa_stop(bp, fp, queue,
+ &cqe->end_agg_cqe,
+ comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
if (bp->panic)
return 0;
#endif
- bnx2x_update_sge_prod(fp,
- &cqe->fast_path_cqe);
+ bnx2x_update_sge_prod(fp, cqe_fp);
goto next_cqe;
}
}
-
- dma_sync_single_for_device(&bp->pdev->dev,
+ /* non TPA */
+ len = le16_to_cpu(cqe_fp->pkt_len);
+ pad = cqe_fp->placement_offset;
+ dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- pad + RX_COPY_THRESH,
- DMA_FROM_DEVICE);
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
prefetch(((char *)(skb)) + L1_CACHE_BYTES);
/* is this an error packet? */
@@ -640,8 +699,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
(len <= RX_COPY_THRESH)) {
struct sk_buff *new_skb;
- new_skb = netdev_alloc_skb(bp->dev,
- len + pad);
+ new_skb = netdev_alloc_skb(bp->dev, len + pad);
if (new_skb == NULL) {
DP(NETIF_MSG_RX_ERR,
"ERROR packet dropped "
@@ -687,6 +745,7 @@ reuse_rx:
skb_checksum_none_assert(skb);
if (bp->dev->features & NETIF_F_RXCSUM) {
+
if (likely(BNX2X_RX_CSUM_OK(cqe)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
@@ -696,10 +755,10 @@ reuse_rx:
skb_record_rx_queue(skb, fp->index);
- if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
- PARSING_FLAGS_VLAN)
+ if (le16_to_cpu(cqe_fp->pars_flags.flags) &
+ PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb,
- le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
+ le16_to_cpu(cqe_fp->vlan_tag));
napi_gro_receive(&fp->napi, skb);
@@ -737,12 +796,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
{
struct bnx2x_fastpath *fp = fp_cookie;
struct bnx2x *bp = fp->bp;
-
- /* Return here if interrupt is disabled */
- if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
- DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
- return IRQ_HANDLED;
- }
+ u8 cos;
DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
"[fp %d fw_sd %d igusb %d]\n",
@@ -756,7 +810,10 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
/* Handle Rx and Tx according to MSI-X vector */
prefetch(fp->rx_cons_sb);
- prefetch(fp->tx_cons_sb);
+
+ for_each_cos_in_tx_queue(fp, cos)
+ prefetch(fp->txdata[cos].tx_cons_sb);
+
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
@@ -931,7 +988,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H;
+ ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
u16 ring_prod;
int i, j;
@@ -943,11 +1000,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
if (!fp->disable_tpa) {
- /* Fill the per-aggregation pool */
+ /* Fill the per-aggregtion pool */
for (i = 0; i < max_agg_queues; i++) {
- fp->tpa_pool[i].skb =
- netdev_alloc_skb(bp->dev, fp->rx_buf_size);
- if (!fp->tpa_pool[i].skb) {
+ struct bnx2x_agg_info *tpa_info =
+ &fp->tpa_info[i];
+ struct sw_rx_bd *first_buf =
+ &tpa_info->first_buf;
+
+ first_buf->skb = netdev_alloc_skb(bp->dev,
+ fp->rx_buf_size);
+ if (!first_buf->skb) {
BNX2X_ERR("Failed to allocate TPA "
"skb pool for queue[%d] - "
"disabling TPA on this "
@@ -956,10 +1018,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->disable_tpa = 1;
break;
}
- dma_unmap_addr_set((struct sw_rx_bd *)
- &bp->fp->tpa_pool[i],
- mapping, 0);
- fp->tpa_state[i] = BNX2X_TPA_STOP;
+ dma_unmap_addr_set(first_buf, mapping, 0);
+ tpa_info->tpa_state = BNX2X_TPA_STOP;
}
/* "next page" elements initialization */
@@ -975,13 +1035,13 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
BNX2X_ERR("was only able to allocate "
"%d rx sges\n", i);
- BNX2X_ERR("disabling TPA for"
- " queue[%d]\n", j);
+ BNX2X_ERR("disabling TPA for "
+ "queue[%d]\n", j);
/* Cleanup already allocated elements */
- bnx2x_free_rx_sge_range(bp,
- fp, ring_prod);
- bnx2x_free_tpa_pool(bp,
- fp, max_agg_queues);
+ bnx2x_free_rx_sge_range(bp, fp,
+ ring_prod);
+ bnx2x_free_tpa_pool(bp, fp,
+ max_agg_queues);
fp->disable_tpa = 1;
ring_prod = 0;
break;
@@ -1009,7 +1069,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
if (j != 0)
continue;
- if (!CHIP_IS_E2(bp)) {
+ if (CHIP_IS_E1(bp)) {
REG_WR(bp, BAR_USTRORM_INTMEM +
USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
U64_LO(fp->rx_comp_mapping));
@@ -1023,17 +1083,22 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
static void bnx2x_free_tx_skbs(struct bnx2x *bp)
{
int i;
+ u8 cos;
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
- u16 bd_cons = fp->tx_bd_cons;
- u16 sw_prod = fp->tx_pkt_prod;
- u16 sw_cons = fp->tx_pkt_cons;
+ u16 bd_cons = txdata->tx_bd_cons;
+ u16 sw_prod = txdata->tx_pkt_prod;
+ u16 sw_cons = txdata->tx_pkt_cons;
- while (sw_cons != sw_prod) {
- bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
- sw_cons++;
+ while (sw_cons != sw_prod) {
+ bd_cons = bnx2x_free_tx_pkt(bp, txdata,
+ TX_BD(sw_cons));
+ sw_cons++;
+ }
}
}
}
@@ -1053,7 +1118,6 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
if (skb == NULL)
continue;
-
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
@@ -1075,7 +1139,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
if (!fp->disable_tpa)
bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
ETH_MAX_AGGREGATION_QUEUES_E1 :
- ETH_MAX_AGGREGATION_QUEUES_E1H);
+ ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
}
}
@@ -1102,30 +1166,43 @@ void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
}
}
-static void bnx2x_free_msix_irqs(struct bnx2x *bp)
+/**
+ * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
+ *
+ * @bp: driver handle
+ * @nvecs: number of vectors to be released
+ */
+static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
{
- int i, offset = 1;
+ int i, offset = 0;
- free_irq(bp->msix_table[0].vector, bp->dev);
+ if (nvecs == offset)
+ return;
+ free_irq(bp->msix_table[offset].vector, bp->dev);
DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
- bp->msix_table[0].vector);
-
+ bp->msix_table[offset].vector);
+ offset++;
#ifdef BCM_CNIC
+ if (nvecs == offset)
+ return;
offset++;
#endif
+
for_each_eth_queue(bp, i) {
- DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
- "state %x\n", i, bp->msix_table[i + offset].vector,
- bnx2x_fp(bp, i, state));
+ if (nvecs == offset)
+ return;
+ DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
+ "irq\n", i, bp->msix_table[offset].vector);
- free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
+ free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
}
}
void bnx2x_free_irq(struct bnx2x *bp)
{
if (bp->flags & USING_MSIX_FLAG)
- bnx2x_free_msix_irqs(bp);
+ bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
+ CNIC_PRESENT + 1);
else if (bp->flags & USING_MSI_FLAG)
free_irq(bp->pdev->irq, bp->dev);
else
@@ -1147,6 +1224,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
msix_vec++;
#endif
+ /* We need separate vectors for ETH queues only (not FCoE) */
for_each_eth_queue(bp, i) {
bp->msix_table[msix_vec].entry = msix_vec;
DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
@@ -1154,7 +1232,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
msix_vec++;
}
- req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
+ req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
@@ -1198,9 +1276,10 @@ int bnx2x_enable_msix(struct bnx2x *bp)
static int bnx2x_req_msix_irqs(struct bnx2x *bp)
{
- int i, rc, offset = 1;
+ int i, rc, offset = 0;
- rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
+ rc = request_irq(bp->msix_table[offset++].vector,
+ bnx2x_msix_sp_int, 0,
bp->dev->name, bp->dev);
if (rc) {
BNX2X_ERR("request sp irq failed\n");
@@ -1218,17 +1297,17 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
rc = request_irq(bp->msix_table[offset].vector,
bnx2x_msix_fp_int, 0, fp->name, fp);
if (rc) {
- BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
- bnx2x_free_msix_irqs(bp);
+ BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
+ bp->msix_table[offset].vector, rc);
+ bnx2x_free_msix_irqs(bp, offset);
return -EBUSY;
}
offset++;
- fp->state = BNX2X_FP_STATE_IRQ;
}
i = BNX2X_NUM_ETH_QUEUES(bp);
- offset = 1 + CNIC_CONTEXT_USE;
+ offset = 1 + CNIC_PRESENT;
netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
" ... fp[%d] %d\n",
bp->msix_table[0].vector,
@@ -1264,42 +1343,56 @@ static int bnx2x_req_irq(struct bnx2x *bp)
rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
bp->dev->name, bp->dev);
- if (!rc)
- bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
-
return rc;
}
-static void bnx2x_napi_enable(struct bnx2x *bp)
+static inline int bnx2x_setup_irqs(struct bnx2x *bp)
+{
+ int rc = 0;
+ if (bp->flags & USING_MSIX_FLAG) {
+ rc = bnx2x_req_msix_irqs(bp);
+ if (rc)
+ return rc;
+ } else {
+ bnx2x_ack_int(bp);
+ rc = bnx2x_req_irq(bp);
+ if (rc) {
+ BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
+ return rc;
+ }
+ if (bp->flags & USING_MSI_FLAG) {
+ bp->dev->irq = bp->pdev->irq;
+ netdev_info(bp->dev, "using MSI IRQ %d\n",
+ bp->pdev->irq);
+ }
+ }
+
+ return 0;
+}
+
+static inline void bnx2x_napi_enable(struct bnx2x *bp)
{
int i;
- for_each_napi_queue(bp, i)
+ for_each_rx_queue(bp, i)
napi_enable(&bnx2x_fp(bp, i, napi));
}
-static void bnx2x_napi_disable(struct bnx2x *bp)
+static inline void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
- for_each_napi_queue(bp, i)
+ for_each_rx_queue(bp, i)
napi_disable(&bnx2x_fp(bp, i, napi));
}
void bnx2x_netif_start(struct bnx2x *bp)
{
- int intr_sem;
-
- intr_sem = atomic_dec_and_test(&bp->intr_sem);
- smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
-
- if (intr_sem) {
- if (netif_running(bp->dev)) {
- bnx2x_napi_enable(bp);
- bnx2x_int_enable(bp);
- if (bp->state == BNX2X_STATE_OPEN)
- netif_tx_wake_all_queues(bp->dev);
- }
+ if (netif_running(bp->dev)) {
+ bnx2x_napi_enable(bp);
+ bnx2x_int_enable(bp);
+ if (bp->state == BNX2X_STATE_OPEN)
+ netif_tx_wake_all_queues(bp->dev);
}
}
@@ -1307,16 +1400,14 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
{
bnx2x_int_disable_sync(bp, disable_hw);
bnx2x_napi_disable(bp);
- netif_tx_disable(bp->dev);
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
{
-#ifdef BCM_CNIC
struct bnx2x *bp = netdev_priv(dev);
- if (NO_FCOE(bp))
- return skb_tx_hash(dev, skb);
- else {
+
+#ifdef BCM_CNIC
+ if (!NO_FCOE(bp)) {
struct ethhdr *hdr = (struct ethhdr *)skb->data;
u16 ether_type = ntohs(hdr->h_proto);
@@ -1330,13 +1421,11 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
/* If ethertype is FCoE or FIP - use FCoE ring */
if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
- return bnx2x_fcoe(bp, index);
+ return bnx2x_fcoe_tx(bp, txq_index);
}
#endif
- /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
- */
- return __skb_tx_hash(dev, skb,
- dev->real_num_tx_queues - FCOE_CONTEXT_USE);
+ /* select a non-FCoE queue */
+ return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
}
void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -1355,40 +1444,60 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
}
/* Add special queues */
- bp->num_queues += NONE_ETH_CONTEXT_USE;
+ bp->num_queues += NON_ETH_CONTEXT_USE;
}
-#ifdef BCM_CNIC
-static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
+/**
+ * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
+ *
+ * @bp: Driver handle
+ *
+ * We currently support for at most 16 Tx queues for each CoS thus we will
+ * allocate a multiple of 16 for ETH L2 rings according to the value of the
+ * bp->max_cos.
+ *
+ * If there is an FCoE L2 queue the appropriate Tx queue will have the next
+ * index after all ETH L2 indices.
+ *
+ * If the actual number of Tx queues (for each CoS) is less than 16 then there
+ * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
+ * 16..31,...) with indicies that are not coupled with any real Tx queue.
+ *
+ * The proper configuration of skb->queue_mapping is handled by
+ * bnx2x_select_queue() and __skb_tx_hash().
+ *
+ * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
+ * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
+ */
+static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
{
+ int rc, tx, rx;
+
+ tx = MAX_TXQS_PER_COS * bp->max_cos;
+ rx = BNX2X_NUM_ETH_QUEUES(bp);
+
+/* account for fcoe queue */
+#ifdef BCM_CNIC
if (!NO_FCOE(bp)) {
- if (!IS_MF_SD(bp))
- bnx2x_set_fip_eth_mac_addr(bp, 1);
- bnx2x_set_all_enode_macs(bp, 1);
- bp->flags |= FCOE_MACS_SET;
+ rx += FCOE_PRESENT;
+ tx += FCOE_PRESENT;
}
-}
#endif
-static void bnx2x_release_firmware(struct bnx2x *bp)
-{
- kfree(bp->init_ops_offsets);
- kfree(bp->init_ops);
- kfree(bp->init_data);
- release_firmware(bp->firmware);
-}
-
-static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
-{
- int rc, num = bp->num_queues;
+ rc = netif_set_real_num_tx_queues(bp->dev, tx);
+ if (rc) {
+ BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
+ return rc;
+ }
+ rc = netif_set_real_num_rx_queues(bp->dev, rx);
+ if (rc) {
+ BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
+ return rc;
+ }
-#ifdef BCM_CNIC
- if (NO_FCOE(bp))
- num -= FCOE_CONTEXT_USE;
+ DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
+ tx, rx);
-#endif
- netif_set_real_num_tx_queues(bp->dev, num);
- rc = netif_set_real_num_rx_queues(bp->dev, num);
return rc;
}
@@ -1409,27 +1518,198 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
*/
fp->rx_buf_size =
BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
- BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
else
fp->rx_buf_size =
- bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
- IP_HEADER_ALIGNMENT_PADDING;
+ bp->dev->mtu + ETH_OVREHEAD +
+ BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ }
+}
+
+static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
+{
+ int i;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+ u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /*
+ * Prepare the inital contents fo the indirection table if RSS is
+ * enabled
+ */
+ if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+ for (i = 0; i < sizeof(ind_table); i++)
+ ind_table[i] =
+ bp->fp->cl_id + (i % num_eth_queues);
}
+
+ /*
+ * For 57710 and 57711 SEARCHER configuration (rss_keys) is
+ * per-port, so if explicit configuration is needed , do it only
+ * for a PMF.
+ *
+ * For 57712 and newer on the other hand it's a per-function
+ * configuration.
+ */
+ return bnx2x_config_rss_pf(bp, ind_table,
+ bp->port.pmf || !CHIP_IS_E1x(bp));
}
+int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
+{
+ struct bnx2x_config_rss_params params = {0};
+ int i;
+
+ /* Although RSS is meaningless when there is a single HW queue we
+ * still need it enabled in order to have HW Rx hash generated.
+ *
+ * if (!is_eth_multi(bp))
+ * bp->multi_mode = ETH_RSS_MODE_DISABLED;
+ */
+
+ params.rss_obj = &bp->rss_conf_obj;
+
+ __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+ /* RSS mode */
+ switch (bp->multi_mode) {
+ case ETH_RSS_MODE_DISABLED:
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+ break;
+ case ETH_RSS_MODE_REGULAR:
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+ break;
+ case ETH_RSS_MODE_VLAN_PRI:
+ __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
+ break;
+ case ETH_RSS_MODE_E1HOV_PRI:
+ __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
+ break;
+ case ETH_RSS_MODE_IP_DSCP:
+ __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
+ break;
+ default:
+ BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
+ return -EINVAL;
+ }
+
+ /* If RSS is enabled */
+ if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+ /* RSS configuration */
+ __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+
+ /* Hash bits */
+ params.rss_result_mask = MULTI_MASK;
+
+ memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+
+ if (config_hash) {
+ /* RSS keys */
+ for (i = 0; i < sizeof(params.rss_key) / 4; i++)
+ params.rss_key[i] = random32();
+
+ __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
+ }
+ }
+
+ return bnx2x_config_rss(bp, &params);
+}
+
+static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
+{
+ struct bnx2x_func_state_params func_params = {0};
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_HW_INIT;
+
+ func_params.params.hw_init.load_phase = load_code;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+/*
+ * Cleans the object that have internal lists without sending
+ * ramrods. Should be run when interrutps are disabled.
+ */
+static void bnx2x_squeeze_objects(struct bnx2x *bp)
+{
+ int rc;
+ unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+ struct bnx2x_mcast_ramrod_params rparam = {0};
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+
+ /***************** Cleanup MACs' object first *************************/
+
+ /* Wait for completion of requested */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ /* Perform a dry cleanup */
+ __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
+
+ /* Clean ETH primary MAC */
+ __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
+ rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc != 0)
+ BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
+
+ /* Cleanup UC list */
+ vlan_mac_flags = 0;
+ __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
+ rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc != 0)
+ BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
+
+ /***************** Now clean mcast object *****************************/
+ rparam.mcast_obj = &bp->mcast_obj;
+ __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
+
+ /* Add a DEL command... */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0)
+ BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
+ "object: %d\n", rc);
+
+ /* ...and wait until all pending commands are cleared */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ while (rc != 0) {
+ if (rc < 0) {
+ BNX2X_ERR("Failed to clean multi-cast object: %d\n",
+ rc);
+ return;
+ }
+
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ }
+}
+
+#ifndef BNX2X_STOP_ON_ERROR
+#define LOAD_ERROR_EXIT(bp, label) \
+ do { \
+ (bp)->state = BNX2X_STATE_ERROR; \
+ goto label; \
+ } while (0)
+#else
+#define LOAD_ERROR_EXIT(bp, label) \
+ do { \
+ (bp)->state = BNX2X_STATE_ERROR; \
+ (bp)->panic = 1; \
+ return -EBUSY; \
+ } while (0)
+#endif
+
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
+ int port = BP_PORT(bp);
u32 load_code;
int i, rc;
- /* Set init arrays */
- rc = bnx2x_init_firmware(bp);
- if (rc) {
- BNX2X_ERR("Error loading firmware\n");
- return rc;
- }
-
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return -EPERM;
@@ -1447,24 +1727,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* must be called before memory allocation and HW init */
bnx2x_ilt_set_info(bp);
- /* zero fastpath structures preserving invariants like napi which are
- * allocated only once
+ /*
+ * Zero fastpath structures preserving invariants like napi, which are
+ * allocated only once, fp index, max_cos, bp pointer.
+ * Also set fp->disable_tpa.
*/
for_each_queue(bp, i)
bnx2x_bz_fp(bp, i);
+
/* Set the receive queues buffer size */
bnx2x_set_rx_buf_size(bp);
- for_each_queue(bp, i)
- bnx2x_fp(bp, i, disable_tpa) =
- ((bp->flags & TPA_ENABLE_FLAG) == 0);
-
-#ifdef BCM_CNIC
- /* We don't want TPA on FCoE L2 ring */
- bnx2x_fcoe(bp, disable_tpa) = 1;
-#endif
-
if (bnx2x_alloc_mem(bp))
return -ENOMEM;
@@ -1475,31 +1749,36 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_set_real_num_queues(bp);
if (rc) {
BNX2X_ERR("Unable to set real_num_queues\n");
- goto load_error0;
+ LOAD_ERROR_EXIT(bp, load_error0);
}
+ /* configure multi cos mappings in kernel.
+ * this configuration may be overriden by a multi class queue discipline
+ * or by a dcbx negotiation result.
+ */
+ bnx2x_setup_tc(bp->dev, bp->max_cos);
+
bnx2x_napi_enable(bp);
/* Send LOAD_REQUEST command to MCP
- Returns the type of LOAD command:
- if it is the first port to be initialized
- common blocks should be initialized, otherwise - not
- */
+ * Returns the type of LOAD command:
+ * if it is the first port to be initialized
+ * common blocks should be initialized, otherwise - not
+ */
if (!BP_NOMCP(bp)) {
load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
rc = -EBUSY;
- goto load_error1;
+ LOAD_ERROR_EXIT(bp, load_error1);
}
if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
rc = -EBUSY; /* other port in diagnostic mode */
- goto load_error1;
+ LOAD_ERROR_EXIT(bp, load_error1);
}
} else {
int path = BP_PATH(bp);
- int port = BP_PORT(bp);
DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
path, load_count[path][0], load_count[path][1],
@@ -1519,36 +1798,60 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
(load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
+ (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
bp->port.pmf = 1;
- else
+ /*
+ * We need the barrier to ensure the ordering between the
+ * writing to bp->port.pmf here and reading it from the
+ * bnx2x_periodic_task().
+ */
+ smp_mb();
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+ } else
bp->port.pmf = 0;
+
DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+ /* Init Function state controlling object */
+ bnx2x__init_func_obj(bp);
+
/* Initialize HW */
rc = bnx2x_init_hw(bp, load_code);
if (rc) {
BNX2X_ERR("HW init failed, aborting\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- goto load_error2;
+ LOAD_ERROR_EXIT(bp, load_error2);
}
/* Connect to IRQs */
rc = bnx2x_setup_irqs(bp);
if (rc) {
bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- goto load_error2;
+ LOAD_ERROR_EXIT(bp, load_error2);
}
/* Setup NIC internals and enable interrupts */
bnx2x_nic_init(bp, load_code);
+ /* Init per-function objects */
+ bnx2x_init_bp_objs(bp);
+
if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
(load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
- (bp->common.shmem2_base))
- SHMEM2_WR(bp, dcc_support,
- (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
- SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+ (bp->common.shmem2_base)) {
+ if (SHMEM2_HAS(bp, dcc_support))
+ SHMEM2_WR(bp, dcc_support,
+ (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+ SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+ }
+
+ bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+ rc = bnx2x_func_start(bp);
+ if (rc) {
+ BNX2X_ERR("Function start failed!\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
/* Send LOAD_DONE command to MCP */
if (!BP_NOMCP(bp)) {
@@ -1556,74 +1859,38 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
rc = -EBUSY;
- goto load_error3;
+ LOAD_ERROR_EXIT(bp, load_error3);
}
}
- bnx2x_dcbx_init(bp);
-
- bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
-
- rc = bnx2x_func_start(bp);
- if (rc) {
- BNX2X_ERR("Function start failed!\n");
-#ifndef BNX2X_STOP_ON_ERROR
- goto load_error3;
-#else
- bp->panic = 1;
- return -EBUSY;
-#endif
- }
-
- rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
+ rc = bnx2x_setup_leading(bp);
if (rc) {
BNX2X_ERR("Setup leading failed!\n");
-#ifndef BNX2X_STOP_ON_ERROR
- goto load_error3;
-#else
- bp->panic = 1;
- return -EBUSY;
-#endif
- }
-
- if (!CHIP_IS_E1(bp) &&
- (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
- DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
- bp->flags |= MF_FUNC_DIS;
+ LOAD_ERROR_EXIT(bp, load_error3);
}
#ifdef BCM_CNIC
/* Enable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
#endif
for_each_nondefault_queue(bp, i) {
- rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
if (rc)
-#ifdef BCM_CNIC
- goto load_error4;
-#else
- goto load_error3;
-#endif
+ LOAD_ERROR_EXIT(bp, load_error4);
}
+ rc = bnx2x_init_rss_pf(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error4);
+
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
-#ifdef BCM_CNIC
- bnx2x_set_fcoe_eth_macs(bp);
-#endif
-
- bnx2x_set_eth_mac(bp, 1);
-
- /* Clear MC configuration */
- if (CHIP_IS_E1(bp))
- bnx2x_invalidate_e1_mc_list(bp);
- else
- bnx2x_invalidate_e1h_mc_list(bp);
-
- /* Clear UC lists configuration */
- bnx2x_invalidate_uc_list(bp);
+ /* Configure a ucast MAC */
+ rc = bnx2x_set_eth_mac(bp, true);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error4);
if (bp->pending_max) {
bnx2x_update_max_mf_config(bp, bp->pending_max);
@@ -1633,15 +1900,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->port.pmf)
bnx2x_initial_phy_init(bp, load_mode);
- /* Initialize Rx filtering */
+ /* Start fast path */
+
+ /* Initialize Rx filter. */
+ netif_addr_lock_bh(bp->dev);
bnx2x_set_rx_mode(bp->dev);
+ netif_addr_unlock_bh(bp->dev);
- /* Start fast path */
+ /* Start the Tx */
switch (load_mode) {
case LOAD_NORMAL:
/* Tx queue should be only reenabled */
netif_tx_wake_all_queues(bp->dev);
- /* Initialize the receive filter. */
break;
case LOAD_OPEN:
@@ -1670,18 +1940,28 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
#endif
bnx2x_inc_load_cnt(bp);
- bnx2x_release_firmware(bp);
+ /* Wait for all pending SP commands to complete */
+ if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+ BNX2X_ERR("Timeout waiting for SP elements to complete\n");
+ bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+ return -EBUSY;
+ }
+ bnx2x_dcbx_init(bp);
return 0;
-#ifdef BCM_CNIC
+#ifndef BNX2X_STOP_ON_ERROR
load_error4:
+#ifdef BCM_CNIC
/* Disable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
#endif
load_error3:
bnx2x_int_disable_sync(bp, 1);
+ /* Clean queueable objects */
+ bnx2x_squeeze_objects(bp);
+
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
for_each_rx_queue(bp, i)
@@ -1701,42 +1981,58 @@ load_error1:
load_error0:
bnx2x_free_mem(bp);
- bnx2x_release_firmware(bp);
-
return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
}
/* must be called with rtnl_lock */
int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
{
int i;
-
- if (bp->state == BNX2X_STATE_CLOSED) {
- /* Interface has been removed - nothing to recover */
+ bool global = false;
+
+ if ((bp->state == BNX2X_STATE_CLOSED) ||
+ (bp->state == BNX2X_STATE_ERROR)) {
+ /* We can get here if the driver has been unloaded
+ * during parity error recovery and is either waiting for a
+ * leader to complete or for other functions to unload and
+ * then ifdown has been issued. In this case we want to
+ * unload and let other functions to complete a recovery
+ * process.
+ */
bp->recovery_state = BNX2X_RECOVERY_DONE;
bp->is_leader = 0;
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
- smp_wmb();
+ bnx2x_release_leader_lock(bp);
+ smp_mb();
+
+ DP(NETIF_MSG_HW, "Releasing a leadership...\n");
return -EINVAL;
}
+ /*
+ * It's important to set the bp->state to the value different from
+ * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
+ * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
+ */
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+ smp_mb();
+
+ /* Stop Tx */
+ bnx2x_tx_disable(bp);
+
#ifdef BCM_CNIC
bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
#endif
- bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
- /* Set "drop all" */
bp->rx_mode = BNX2X_RX_MODE_NONE;
- bnx2x_set_storm_rx_mode(bp);
-
- /* Stop Tx */
- bnx2x_tx_disable(bp);
del_timer_sync(&bp->timer);
- SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
- (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
+ /* Set ALWAYS_ALIVE bit in shmem */
+ bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+
+ bnx2x_drv_pulse(bp);
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -1744,13 +2040,38 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
if (unload_mode != UNLOAD_RECOVERY)
bnx2x_chip_cleanup(bp, unload_mode);
else {
- /* Disable HW interrupts, NAPI and Tx */
+ /* Send the UNLOAD_REQUEST to the MCP */
+ bnx2x_send_unload_req(bp, unload_mode);
+
+ /*
+ * Prevent transactions to host from the functions on the
+ * engine that doesn't reset global blocks in case of global
+ * attention once gloabl blocks are reset and gates are opened
+ * (the engine which leader will perform the recovery
+ * last).
+ */
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_disable(bp);
+
+ /* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
/* Release IRQs */
bnx2x_free_irq(bp);
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp);
}
+ /*
+ * At this stage no more interrupts will arrive so we may safly clean
+ * the queueable objects here in case they failed to get cleaned so far.
+ */
+ bnx2x_squeeze_objects(bp);
+
+ /* There should be no more pending SP commands at this stage */
+ bp->sp_state = 0;
+
bp->port.pmf = 0;
/* Free SKBs, SGEs, TPA pool and driver internals */
@@ -1762,17 +2083,24 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bp->state = BNX2X_STATE_CLOSED;
+ /* Check if there are pending parity attentions. If there are - set
+ * RECOVERY_IN_PROGRESS.
+ */
+ if (bnx2x_chk_parity_attn(bp, &global, false)) {
+ bnx2x_set_reset_in_progress(bp);
+
+ /* Set RESET_IS_GLOBAL if needed */
+ if (global)
+ bnx2x_set_reset_global(bp);
+ }
+
+
/* The last driver must disable a "close the gate" if there is no
* parity attention or "process kill" pending.
*/
- if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
- bnx2x_reset_is_done(bp))
+ if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
bnx2x_disable_close_the_gate(bp);
- /* Reset MCP mail box sequence if there is on going recovery */
- if (unload_mode == UNLOAD_RECOVERY)
- bp->fw_seq = 0;
-
return 0;
}
@@ -1834,6 +2162,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
int bnx2x_poll(struct napi_struct *napi, int budget)
{
int work_done = 0;
+ u8 cos;
struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
napi);
struct bnx2x *bp = fp->bp;
@@ -1846,8 +2175,10 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
}
#endif
- if (bnx2x_has_tx_work(fp))
- bnx2x_tx_int(fp);
+ for_each_cos_in_tx_queue(fp, cos)
+ if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+ bnx2x_tx_int(bp, &fp->txdata[cos]);
+
if (bnx2x_has_rx_work(fp)) {
work_done += bnx2x_rx_int(fp, budget - work_done);
@@ -1909,7 +2240,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
* in Other Operating Systems(TM)
*/
static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
- struct bnx2x_fastpath *fp,
+ struct bnx2x_fp_txdata *txdata,
struct sw_tx_bd *tx_buf,
struct eth_tx_start_bd **tx_bd, u16 hlen,
u16 bd_prod, int nbd)
@@ -1930,7 +2261,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
/* now get a new data BD
* (after the pbd) and fill it */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
- d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
+ d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
@@ -2148,6 +2479,22 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
sizeof(struct udphdr) - skb->data;
}
+static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+{
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
+
+ if (xmit_type & XMIT_CSUM_V4)
+ tx_start_bd->bd_flags.as_bitfield |=
+ ETH_TX_BD_FLAGS_IP_CSUM;
+ else
+ tx_start_bd->bd_flags.as_bitfield |=
+ ETH_TX_BD_FLAGS_IPV6;
+
+ if (!(xmit_type & XMIT_CSUM_TCP))
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
+}
+
/**
* bnx2x_set_pbd_csum - update PBD with checksum and return header length
*
@@ -2210,16 +2557,18 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
+
struct bnx2x_fastpath *fp;
struct netdev_queue *txq;
+ struct bnx2x_fp_txdata *txdata;
struct sw_tx_bd *tx_buf;
- struct eth_tx_start_bd *tx_start_bd;
+ struct eth_tx_start_bd *tx_start_bd, *first_bd;
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
- int nbd, fp_index;
+ int nbd, txq_index, fp_index, txdata_index;
dma_addr_t mapping;
u32 xmit_type = bnx2x_xmit_type(bp, skb);
int i;
@@ -2233,12 +2582,43 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
#endif
- fp_index = skb_get_queue_mapping(skb);
- txq = netdev_get_tx_queue(dev, fp_index);
+ txq_index = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, txq_index);
+
+ BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
+
+ /* decode the fastpath index and the cos index from the txq */
+ fp_index = TXQ_TO_FP(txq_index);
+ txdata_index = TXQ_TO_COS(txq_index);
+#ifdef BCM_CNIC
+ /*
+ * Override the above for the FCoE queue:
+ * - FCoE fp entry is right after the ETH entries.
+ * - FCoE L2 queue uses bp->txdata[0] only.
+ */
+ if (unlikely(!NO_FCOE(bp) && (txq_index ==
+ bnx2x_fcoe_tx(bp, txq_index)))) {
+ fp_index = FCOE_IDX;
+ txdata_index = 0;
+ }
+#endif
+
+ /* enable this debug print to view the transmission queue being used
+ DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d",
+ txq_index, fp_index, txdata_index); */
+
+ /* locate the fastpath and the txdata */
fp = &bp->fp[fp_index];
+ txdata = &fp->txdata[txdata_index];
- if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
+ /* enable this debug print to view the tranmission details
+ DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
+ " tx_data ptr %p fp pointer %p",
+ txdata->cid, fp_index, txdata_index, txdata, fp); */
+
+ if (unlikely(bnx2x_tx_avail(bp, txdata) <
+ (skb_shinfo(skb)->nr_frags + 3))) {
fp->eth_q_stats.driver_xoff++;
netif_tx_stop_queue(txq);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
@@ -2247,7 +2627,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
"protocol(%x,%x) gso type %x xmit_type %x\n",
- fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
+ txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
eth = (struct ethhdr *)skb->data;
@@ -2275,7 +2655,15 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#endif
-
+ /* Map skb linear data for DMA */
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
+ "silently dropping this SKB\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
/*
Please read carefully. First we use one BD which we mark as start,
then we have a parsing info BD (used for TSO or xsum),
@@ -2285,12 +2673,19 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
And above all, all pdb sizes are in words - NOT DWORDS!
*/
- pkt_prod = fp->tx_pkt_prod++;
- bd_prod = TX_BD(fp->tx_bd_prod);
+ /* get current pkt produced now - advance it just before sending packet
+ * since mapping of pages may fail and cause packet to be dropped
+ */
+ pkt_prod = txdata->tx_pkt_prod;
+ bd_prod = TX_BD(txdata->tx_bd_prod);
- /* get a tx_buf and first BD */
- tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
- tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
+ /* get a tx_buf and first BD
+ * tx_start_bd may be changed during SPLIT,
+ * but first_bd will always stay first
+ */
+ tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+ tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
+ first_bd = tx_start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
@@ -2300,13 +2695,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
/* remember the first BD of the packet */
- tx_buf->first_bd = fp->tx_bd_prod;
+ tx_buf->first_bd = txdata->tx_bd_prod;
tx_buf->skb = skb;
tx_buf->flags = 0;
DP(NETIF_MSG_TX_QUEUED,
"sending pkt %u @%p next_idx %u bd %u @%p\n",
- pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
+ pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
if (vlan_tx_tag_present(skb)) {
tx_start_bd->vlan_or_ethertype =
@@ -2319,31 +2714,33 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
- if (xmit_type & XMIT_CSUM) {
- tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
-
- if (xmit_type & XMIT_CSUM_V4)
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IP_CSUM;
- else
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IPV6;
-
- if (!(xmit_type & XMIT_CSUM_TCP))
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IS_UDP;
- }
+ if (xmit_type & XMIT_CSUM)
+ bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
- if (CHIP_IS_E2(bp)) {
- pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
+ if (!CHIP_IS_E1x(bp)) {
+ pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM)
hlen = bnx2x_set_pbd_csum_e2(bp, skb,
&pbd_e2_parsing_data,
xmit_type);
+ if (IS_MF_SI(bp)) {
+ /*
+ * fill in the MAC addresses in the PBD - for local
+ * switching
+ */
+ bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
+ &pbd_e2->src_mac_addr_mid,
+ &pbd_e2->src_mac_addr_lo,
+ eth->h_source);
+ bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
+ &pbd_e2->dst_mac_addr_mid,
+ &pbd_e2->dst_mac_addr_lo,
+ eth->h_dest);
+ }
} else {
- pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
+ pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
/* Set PBD in checksum offload case */
if (xmit_type & XMIT_CSUM)
@@ -2351,15 +2748,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
- /* Map skb linear data for DMA */
- mapping = dma_map_single(&bp->pdev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
-
/* Setup the data pointer of the first BD of the packet */
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
- tx_start_bd->nbd = cpu_to_le16(nbd);
+ nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
pkt_size = tx_start_bd->nbytes;
@@ -2380,9 +2772,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
if (unlikely(skb_headlen(skb) > hlen))
- bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
- hlen, bd_prod, ++nbd);
- if (CHIP_IS_E2(bp))
+ bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
+ &tx_start_bd, hlen,
+ bd_prod, ++nbd);
+ if (!CHIP_IS_E1x(bp))
bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
xmit_type);
else
@@ -2401,19 +2794,35 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ mapping = dma_map_page(&bp->pdev->dev, frag->page,
+ frag->page_offset, frag->size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+
+ DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
+ "dropping packet...\n");
+
+ /* we need unmap all buffers already mapped
+ * for this SKB;
+ * first_bd->nbd need to be properly updated
+ * before call to bnx2x_free_tx_pkt
+ */
+ first_bd->nbd = cpu_to_le16(nbd);
+ bnx2x_free_tx_pkt(bp, txdata,
+ TX_BD(txdata->tx_pkt_prod));
+ return NETDEV_TX_OK;
+ }
+
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
- tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
+ tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
if (total_pkt_bd == NULL)
- total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
-
- mapping = dma_map_page(&bp->pdev->dev, frag->page,
- frag->page_offset,
- frag->size, DMA_TO_DEVICE);
+ total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
tx_data_bd->nbytes = cpu_to_le16(frag->size);
le16_add_cpu(&pkt_size, frag->size);
+ nbd++;
DP(NETIF_MSG_TX_QUEUED,
"frag %d bd @%p addr (%x:%x) nbytes %d\n",
@@ -2423,6 +2832,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
+ /* update with actual num BDs */
+ first_bd->nbd = cpu_to_le16(nbd);
+
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
/* now send a tx doorbell, counting the next BD
@@ -2431,6 +2843,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BD_POFF(bd_prod) < nbd)
nbd++;
+ /* total_pkt_bytes should be set on the first data BD if
+ * it's not an LSO packet and there is more than one
+ * data BD. In this case pkt_size is limited by an MTU value.
+ * However we prefer to set it for an LSO packet (while we don't
+ * have to) in order to save some CPU cycles in a none-LSO
+ * case, when we much more care about them.
+ */
if (total_pkt_bd != NULL)
total_pkt_bd->total_pkt_bytes = pkt_size;
@@ -2451,6 +2870,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
+ txdata->tx_pkt_prod++;
/*
* Make sure that the BD data is updated before updating the producer
* since FW might read the BD right after the producer is updated.
@@ -2460,16 +2880,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
wmb();
- fp->tx_db.data.prod += nbd;
+ txdata->tx_db.data.prod += nbd;
barrier();
- DOORBELL(bp, fp->cid, fp->tx_db.raw);
+ DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
mmiowb();
- fp->tx_bd_prod += nbd;
+ txdata->tx_bd_prod += nbd;
- if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
+ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
netif_tx_stop_queue(txq);
/* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -2478,34 +2898,110 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
smp_mb();
fp->eth_q_stats.driver_xoff++;
- if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
+ if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
netif_tx_wake_queue(txq);
}
- fp->tx_pkt++;
+ txdata->tx_pkt++;
return NETDEV_TX_OK;
}
+/**
+ * bnx2x_setup_tc - routine to configure net_device for multi tc
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ *
+ * callback connected to the ndo_setup_tc function pointer
+ */
+int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
+{
+ int cos, prio, count, offset;
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* setup tc must be called under rtnl lock */
+ ASSERT_RTNL();
+
+ /* no traffic classes requested. aborting */
+ if (!num_tc) {
+ netdev_reset_tc(dev);
+ return 0;
+ }
+
+ /* requested to support too many traffic classes */
+ if (num_tc > bp->max_cos) {
+ DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
+ " requested: %d. max supported is %d",
+ num_tc, bp->max_cos);
+ return -EINVAL;
+ }
+
+ /* declare amount of supported traffic classes */
+ if (netdev_set_num_tc(dev, num_tc)) {
+ DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes",
+ num_tc);
+ return -EINVAL;
+ }
+
+ /* configure priority to traffic class mapping */
+ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+ netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d",
+ prio, bp->prio_to_cos[prio]);
+ }
+
+
+ /* Use this configuration to diffrentiate tc0 from other COSes
+ This can be used for ets or pfc, and save the effort of setting
+ up a multio class queue disc or negotiating DCBX with a switch
+ netdev_set_prio_tc_map(dev, 0, 0);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0);
+ for (prio = 1; prio < 16; prio++) {
+ netdev_set_prio_tc_map(dev, prio, 1);
+ DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1);
+ } */
+
+ /* configure traffic class to transmission queue mapping */
+ for (cos = 0; cos < bp->max_cos; cos++) {
+ count = BNX2X_NUM_ETH_QUEUES(bp);
+ offset = cos * MAX_TXQS_PER_COS;
+ netdev_set_tc_queue(dev, cos, count, offset);
+ DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d",
+ cos, offset, count);
+ }
+
+ return 0;
+}
+
/* called with rtnl_lock */
int bnx2x_change_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0;
if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
return -EINVAL;
+ if (netif_running(dev)) {
+ rc = bnx2x_set_eth_mac(bp, false);
+ if (rc)
+ return rc;
+ }
+
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
if (netif_running(dev))
- bnx2x_set_eth_mac(bp, 1);
+ rc = bnx2x_set_eth_mac(bp, true);
- return 0;
+ return rc;
}
static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
{
union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
struct bnx2x_fastpath *fp = &bp->fp[fp_index];
+ u8 cos;
/* Common */
#ifdef BCM_CNIC
@@ -2516,7 +3012,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
} else {
#endif
/* status blocks */
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
BNX2X_PCI_FREE(sb->e2_sb,
bnx2x_fp(bp, fp_index,
status_blk_mapping),
@@ -2554,10 +3050,18 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
/* Tx */
if (!skip_tx_queue(bp, fp_index)) {
/* fastpath tx rings: tx_buf tx_desc */
- BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
- BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
- bnx2x_fp(bp, fp_index, tx_desc_mapping),
- sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ DP(BNX2X_MSG_SP,
+ "freeing tx memory of fp %d cos %d cid %d",
+ fp_index, cos, txdata->cid);
+
+ BNX2X_FREE(txdata->tx_buf_ring);
+ BNX2X_PCI_FREE(txdata->tx_desc_ring,
+ txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
}
/* end of fastpath */
}
@@ -2572,7 +3076,7 @@ void bnx2x_free_fp_mem(struct bnx2x *bp)
static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
{
union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
bnx2x_fp(bp, index, sb_index_values) =
(__le16 *)status_blk.e2_sb->sb.index_values;
bnx2x_fp(bp, index, sb_running_index) =
@@ -2590,26 +3094,24 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
union host_hc_status_block *sb;
struct bnx2x_fastpath *fp = &bp->fp[index];
int ring_size = 0;
+ u8 cos;
/* if rx_ring_size specified - use it */
int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
- MAX_RX_AVAIL/bp->num_queues;
+ MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
/* allocate at least number of buffers required by FW */
- rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA,
rx_ring_size);
- bnx2x_fp(bp, index, bp) = bp;
- bnx2x_fp(bp, index, index) = index;
-
/* Common */
sb = &bnx2x_fp(bp, index, status_blk);
#ifdef BCM_CNIC
if (!IS_FCOE_IDX(index)) {
#endif
/* status blocks */
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
BNX2X_PCI_ALLOC(sb->e2_sb,
&bnx2x_fp(bp, index, status_blk_mapping),
sizeof(struct host_hc_status_block_e2));
@@ -2620,16 +3122,29 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
#ifdef BCM_CNIC
}
#endif
- set_sb_shortcuts(bp, index);
+
+ /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
+ * set shortcuts for it.
+ */
+ if (!IS_FCOE_IDX(index))
+ set_sb_shortcuts(bp, index);
/* Tx */
if (!skip_tx_queue(bp, index)) {
/* fastpath tx rings: tx_buf tx_desc */
- BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ DP(BNX2X_MSG_SP, "allocating tx memory of "
+ "fp %d cos %d",
+ index, cos);
+
+ BNX2X_ALLOC(txdata->tx_buf_ring,
sizeof(struct sw_tx_bd) * NUM_TX_BD);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
- &bnx2x_fp(bp, index, tx_desc_mapping),
+ BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
+ &txdata->tx_desc_mapping,
sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
}
/* Rx */
@@ -2672,7 +3187,7 @@ alloc_mem_err:
index, ring_size);
/* FW will drop all packets if queue is not big enough,
* In these cases we disable the queue
- * Min size diferent for TPA and non-TPA queues
+ * Min size is different for OOO, TPA and non-TPA queues
*/
if (ring_size < (fp->disable_tpa ?
MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
@@ -2690,17 +3205,24 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
/**
* 1. Allocate FP for leading - fatal if error
* 2. {CNIC} Allocate FCoE FP - fatal if error
- * 3. Allocate RSS - fix number of queues if error
+ * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
+ * 4. Allocate RSS - fix number of queues if error
*/
/* leading */
if (bnx2x_alloc_fp_mem_at(bp, 0))
return -ENOMEM;
+
#ifdef BCM_CNIC
- /* FCoE */
- if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
- return -ENOMEM;
+ if (!NO_FCOE(bp))
+ /* FCoE */
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+ /* we will fail load process instead of mark
+ * NO_FCOE_FLAG
+ */
+ return -ENOMEM;
#endif
+
/* RSS */
for_each_nondefault_eth_queue(bp, i)
if (bnx2x_alloc_fp_mem_at(bp, i))
@@ -2718,7 +3240,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
* FCOE_IDX < FWD_IDX < OOO_IDX
*/
- /* move FCoE fp */
+ /* move FCoE fp even NO_FCOE_FLAG is on */
bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
#endif
bp->num_queues -= delta;
@@ -2729,30 +3251,6 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
return 0;
}
-static int bnx2x_setup_irqs(struct bnx2x *bp)
-{
- int rc = 0;
- if (bp->flags & USING_MSIX_FLAG) {
- rc = bnx2x_req_msix_irqs(bp);
- if (rc)
- return rc;
- } else {
- bnx2x_ack_int(bp);
- rc = bnx2x_req_irq(bp);
- if (rc) {
- BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
- return rc;
- }
- if (bp->flags & USING_MSI_FLAG) {
- bp->dev->irq = bp->pdev->irq;
- netdev_info(bp->dev, "using MSI IRQ %d\n",
- bp->pdev->irq);
- }
- }
-
- return 0;
-}
-
void bnx2x_free_mem_bp(struct bnx2x *bp)
{
kfree(bp->fp);
@@ -2765,16 +3263,23 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
struct bnx2x_fastpath *fp;
struct msix_entry *tbl;
struct bnx2x_ilt *ilt;
+ int msix_table_size = 0;
- /* fp array */
- fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
+ /*
+ * The biggest MSI-X table we might need is as a maximum number of fast
+ * path IGU SBs plus default SB (for PF).
+ */
+ msix_table_size = bp->igu_sb_cnt + 1;
+
+ /* fp array: RSS plus CNIC related L2 queues */
+ fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
+ sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
bp->fp = fp;
/* msix table */
- tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
- GFP_KERNEL);
+ tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
if (!tbl)
goto alloc_err;
bp->msix_table = tbl;
@@ -2792,7 +3297,7 @@ alloc_err:
}
-static int bnx2x_reload_if_running(struct net_device *dev)
+int bnx2x_reload_if_running(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -2803,6 +3308,78 @@ static int bnx2x_reload_if_running(struct net_device *dev)
return bnx2x_nic_load(bp, LOAD_NORMAL);
}
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
+{
+ u32 sel_phy_idx = 0;
+ if (bp->link_params.num_phys <= 1)
+ return INT_PHY;
+
+ if (bp->link_vars.link_up) {
+ sel_phy_idx = EXT_PHY1;
+ /* In case link is SERDES, check if the EXT_PHY2 is the one */
+ if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
+ (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
+ sel_phy_idx = EXT_PHY2;
+ } else {
+
+ switch (bnx2x_phy_selection(&bp->link_params)) {
+ case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY1;
+ break;
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+ case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+ sel_phy_idx = EXT_PHY2;
+ break;
+ }
+ }
+
+ return sel_phy_idx;
+
+}
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
+{
+ u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
+ /*
+ * The selected actived PHY is always after swapping (in case PHY
+ * swapping is enabled). So when swapping is enabled, we need to reverse
+ * the configuration
+ */
+
+ if (bp->link_params.multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+ if (sel_phy_idx == EXT_PHY1)
+ sel_phy_idx = EXT_PHY2;
+ else if (sel_phy_idx == EXT_PHY2)
+ sel_phy_idx = EXT_PHY1;
+ }
+ return LINK_CONFIG_IDX(sel_phy_idx);
+}
+
+#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+ switch (type) {
+ case NETDEV_FCOE_WWNN:
+ *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
+ cp->fcoe_wwn_node_name_lo);
+ break;
+ case NETDEV_FCOE_WWPN:
+ *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
+ cp->fcoe_wwn_port_name_lo);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
/* called with rtnl_lock */
int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
{
@@ -2882,8 +3459,13 @@ void bnx2x_tx_timeout(struct net_device *dev)
if (!bp->panic)
bnx2x_panic();
#endif
+
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+
/* This allows the netif to be shutdown gracefully before resetting */
- schedule_delayed_work(&bp->reset_task, 0);
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
}
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -2954,3 +3536,57 @@ int bnx2x_resume(struct pci_dev *pdev)
return rc;
}
+
+
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+ u32 cid)
+{
+ /* ustorm cxt validation */
+ cxt->ustorm_ag_context.cdu_usage =
+ CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+ CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
+ /* xcontext validation */
+ cxt->xstorm_ag_context.cdu_reserved =
+ CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+ CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
+}
+
+static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
+ u8 fw_sb_id, u8 sb_index,
+ u8 ticks)
+{
+
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
+ REG_WR8(bp, addr, ticks);
+ DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
+ port, fw_sb_id, sb_index, ticks);
+}
+
+static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
+ u16 fw_sb_id, u8 sb_index,
+ u8 disable)
+{
+ u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
+ u16 flags = REG_RD16(bp, addr);
+ /* clear and set */
+ flags &= ~HC_INDEX_DATA_HC_ENABLED;
+ flags |= enable_flag;
+ REG_WR16(bp, addr, flags);
+ DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
+ port, fw_sb_id, sb_index, disable);
+}
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+ u8 sb_index, u8 disable, u16 usec)
+{
+ int port = BP_PORT(bp);
+ u8 ticks = usec / BNX2X_BTR;
+
+ storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
+
+ disable = disable ? 1 : (usec ? 0 : 1);
+ storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
+}
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 1a3545bd8a9..223bfeebc59 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -18,11 +18,15 @@
#define BNX2X_CMN_H
#include <linux/types.h>
+#include <linux/pci.h>
#include <linux/netdevice.h>
#include "bnx2x.h"
+/* This is used as a replacement for an MCP if it's not present */
+extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+
extern int num_queues;
/************************ Macros ********************************/
@@ -61,6 +65,73 @@ extern int num_queues;
/*********************** Interfaces ****************************
* Functions that need to be implemented by each driver version
*/
+/* Init */
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp: driver handle
+ * @unload_mode: requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp);
+
+/**
+ * bnx2x_config_rss_pf - configure RSS parameters.
+ *
+ * @bp: driver handle
+ * @ind_table: indirection table to configure
+ * @config_hash: re-configure RSS hash keys configuration
+ */
+int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
+
+/**
+ * bnx2x__init_func_obj - init function object
+ *
+ * @bp: driver handle
+ *
+ * Initializes the Function Object with the appropriate
+ * parameters which include a function slow path driver
+ * interface.
+ */
+void bnx2x__init_func_obj(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_queue - setup eth queue.
+ *
+ * @bp: driver handle
+ * @fp: pointer to the fastpath structure
+ * @leading: boolean
+ *
+ */
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool leading);
+
+/**
+ * bnx2x_setup_leading - bring up a leading eth queue.
+ *
+ * @bp: driver handle
+ */
+int bnx2x_setup_leading(struct bnx2x *bp);
+
+/**
+ * bnx2x_fw_command - send the MCP a request
+ *
+ * @bp: driver handle
+ * @command: request
+ * @param: request's parameter
+ *
+ * block until there is a reply
+ */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
/**
* bnx2x_initial_phy_init - initialize link parameters structure variables.
@@ -88,6 +159,32 @@ void bnx2x_link_set(struct bnx2x *bp);
u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
/**
+ * bnx2x_drv_pulse - write driver pulse to shmem
+ *
+ * @bp: driver handle
+ *
+ * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
+ * in the shmem.
+ */
+void bnx2x_drv_pulse(struct bnx2x *bp);
+
+/**
+ * bnx2x_igu_ack_sb - update IGU with current SB value
+ *
+ * @bp: driver handle
+ * @igu_sb_id: SB id
+ * @segment: SB segment
+ * @index: SB index
+ * @op: SB operation
+ * @update: is HW update required
+ */
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+ u16 index, u8 op, u8 update);
+
+/* Disable transactions from chip to host */
+void bnx2x_pf_disable(struct bnx2x *bp);
+
+/**
* bnx2x__link_status_update - handles link status change.
*
* @bp: driver handle
@@ -165,21 +262,6 @@ void bnx2x_int_enable(struct bnx2x *bp);
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
/**
- * bnx2x_init_firmware - loads device firmware
- *
- * @bp: driver handle
- */
-int bnx2x_init_firmware(struct bnx2x *bp);
-
-/**
- * bnx2x_init_hw - init HW blocks according to current initialization stage.
- *
- * @bp: driver handle
- * @load_code: COMMON, PORT or FUNCTION
- */
-int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
-
-/**
* bnx2x_nic_init - init driver internals.
*
* @bp: driver handle
@@ -207,16 +289,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp);
void bnx2x_free_mem(struct bnx2x *bp);
/**
- * bnx2x_setup_client - setup eth client.
- *
- * @bp: driver handle
- * @fp: pointer to fastpath structure
- * @is_leading: boolean
- */
-int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- int is_leading);
-
-/**
* bnx2x_set_num_queues - set number of queues according to mode.
*
* @bp: driver handle
@@ -252,36 +324,21 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
/**
- * bnx2x_set_eth_mac - configure eth MAC address in the HW
- *
- * @bp: driver handle
- * @set: set or clear
- *
- * Configures according to the value in netdev->dev_addr.
- */
-void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
-
-#ifdef BCM_CNIC
-/**
- * bnx2x_set_fip_eth_mac_addr - Set/Clear FIP MAC(s)
+ * bnx2x_release_leader_lock - release recovery leader lock
*
* @bp: driver handle
- * @set: set or clear the CAM entry
- *
- * Used next enties in the CAM after the ETH MAC(s).
- * This function will wait until the ramdord completion returns.
- * Return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
-int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
+int bnx2x_release_leader_lock(struct bnx2x *bp);
/**
- * bnx2x_set_all_enode_macs - Set/Clear ALL_ENODE mcast MAC.
+ * bnx2x_set_eth_mac - configure eth MAC address in the HW
*
* @bp: driver handle
* @set: set or clear
+ *
+ * Configures according to the value in netdev->dev_addr.
*/
-int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
-#endif
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
/**
* bnx2x_set_rx_mode - set MAC filtering configurations.
@@ -289,6 +346,8 @@ int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
* @dev: netdevice
*
* called with netif_tx_lock from dev_mcast.c
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh()
*/
void bnx2x_set_rx_mode(struct net_device *dev);
@@ -296,25 +355,38 @@ void bnx2x_set_rx_mode(struct net_device *dev);
* bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
*
* @bp: driver handle
+ *
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh().
*/
void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+/**
+ * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
+ *
+ * @bp: driver handle
+ * @cl_id: client id
+ * @rx_mode_flags: rx mode configuration
+ * @rx_accept_flags: rx accept configuration
+ * @tx_accept_flags: tx accept configuration (tx switch)
+ * @ramrod_flags: ramrod configuration
+ */
+void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags);
+
/* Parity errors related */
void bnx2x_inc_load_cnt(struct bnx2x *bp);
u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
-bool bnx2x_chk_parity_attn(struct bnx2x *bp);
-bool bnx2x_reset_is_done(struct bnx2x *bp);
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
+void bnx2x_set_reset_in_progress(struct bnx2x *bp);
+void bnx2x_set_reset_global(struct bnx2x *bp);
void bnx2x_disable_close_the_gate(struct bnx2x *bp);
/**
- * bnx2x_stats_handle - perform statistics handling according to event.
- *
- * @bp: driver handle
- * @event: bnx2x_stats_event
- */
-void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
-
-/**
* bnx2x_sp_event - handle ramrods completion.
*
* @fp: fastpath handle for the event
@@ -323,15 +395,6 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
/**
- * bnx2x_func_start - init function
- *
- * @bp: driver handle
- *
- * Must be called before sending CLIENT_SETUP for the first client.
- */
-int bnx2x_func_start(struct bnx2x *bp);
-
-/**
* bnx2x_ilt_set_info - prepare ILT configurations.
*
* @bp: driver handle
@@ -362,6 +425,10 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
* @value: new value
*/
void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+/* Error handling */
+void bnx2x_panic_dump(struct bnx2x *bp);
+
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
/* dev_close main block */
int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
@@ -372,16 +439,25 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
/* hard_xmit callback */
netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
+/* setup_tc callback */
+int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+/* reload helper */
+int bnx2x_reload_if_running(struct net_device *dev);
+
int bnx2x_change_mac_addr(struct net_device *dev, void *p);
/* NAPI poll Rx part */
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
+void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
+
/* NAPI poll Tx part */
-int bnx2x_tx_int(struct bnx2x_fastpath *fp);
+int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
/* suspend/resume callbacks */
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
@@ -392,7 +468,6 @@ void bnx2x_free_irq(struct bnx2x *bp);
void bnx2x_free_fp_mem(struct bnx2x *bp);
int bnx2x_alloc_fp_mem(struct bnx2x *bp);
-
void bnx2x_init_rx_rings(struct bnx2x *bp);
void bnx2x_free_skbs(struct bnx2x *bp);
void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
@@ -447,6 +522,17 @@ void bnx2x_free_mem_bp(struct bnx2x *bp);
*/
int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
+#if defined(BCM_CNIC) && (defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE))
+/**
+ * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
+ *
+ * @dev: net_device
+ * @wwn: output buffer
+ * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port)
+ *
+ */
+int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
+#endif
u32 bnx2x_fix_features(struct net_device *dev, u32 features);
int bnx2x_set_features(struct net_device *dev, u32 features);
@@ -457,19 +543,20 @@ int bnx2x_set_features(struct net_device *dev, u32 features);
*/
void bnx2x_tx_timeout(struct net_device *dev);
+/*********************** Inlines **********************************/
+/*********************** Fast path ********************************/
static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
{
barrier(); /* status block is written to by the chip */
fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
}
-static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
- struct bnx2x_fastpath *fp,
- u16 bd_prod, u16 rx_comp_prod,
- u16 rx_sge_prod)
+static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, u16 bd_prod,
+ u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
{
struct ustorm_eth_rx_producers rx_prods = {0};
- int i;
+ u32 i;
/* Update producers */
rx_prods.bd_prod = bd_prod;
@@ -486,10 +573,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
*/
wmb();
- for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
- REG_WR(bp,
- BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
- ((u32 *)&rx_prods)[i]);
+ for (i = 0; i < sizeof(rx_prods)/4; i++)
+ REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
mmiowb(); /* keep prod updates ordered */
@@ -519,7 +604,7 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
barrier();
}
-static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
+static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
u8 idu_sb_id, bool is_Pf)
{
u32 data, ctl, cnt = 100;
@@ -527,7 +612,7 @@ static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
u32 sb_bit = 1 << (idu_sb_id%32);
- u32 func_encode = BP_FUNC(bp) |
+ u32 func_encode = func |
((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
@@ -590,15 +675,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
barrier();
}
-static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
- u16 index, u8 op, u8 update)
-{
- u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
-
- bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
- igu_addr);
-}
-
static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
u16 index, u8 op, u8 update)
{
@@ -653,21 +729,22 @@ static inline u16 bnx2x_ack_int(struct bnx2x *bp)
return bnx2x_igu_ack_int(bp);
}
-static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
+static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
{
/* Tell compiler that consumer and producer can change */
barrier();
- return fp->tx_pkt_prod != fp->tx_pkt_cons;
+ return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
}
-static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
+static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata)
{
s16 used;
u16 prod;
u16 cons;
- prod = fp->tx_bd_prod;
- cons = fp->tx_bd_cons;
+ prod = txdata->tx_bd_prod;
+ cons = txdata->tx_bd_cons;
/* NUM_TX_RINGS = number of "next-page" entries
It will be used as a threshold */
@@ -675,21 +752,30 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
#ifdef BNX2X_STOP_ON_ERROR
WARN_ON(used < 0);
- WARN_ON(used > fp->bp->tx_ring_size);
- WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
+ WARN_ON(used > bp->tx_ring_size);
+ WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL);
#endif
- return (s16)(fp->bp->tx_ring_size) - used;
+ return (s16)(bp->tx_ring_size) - used;
}
-static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
+static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
{
u16 hw_cons;
/* Tell compiler that status block fields can change */
barrier();
- hw_cons = le16_to_cpu(*fp->tx_cons_sb);
- return hw_cons != fp->tx_pkt_cons;
+ hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+ return hw_cons != txdata->tx_pkt_cons;
+}
+
+static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
+{
+ u8 cos;
+ for_each_cos_in_tx_queue(fp, cos)
+ if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+ return true;
+ return false;
}
static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
@@ -705,7 +791,7 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
}
/**
- * disables tx from stack point of view
+ * bnx2x_tx_disable - disables tx from stack point of view
*
* @bp: driver handle
*/
@@ -740,7 +826,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
int i;
/* Add NAPI objects */
- for_each_napi_queue(bp, i)
+ for_each_rx_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@@ -749,7 +835,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_napi_queue(bp, i)
+ for_each_rx_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
@@ -779,7 +865,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
int idx = RX_SGE_CNT * i - 1;
for (j = 0; j < 2; j++) {
- SGE_MASK_CLEAR_BIT(fp, idx);
+ BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
idx--;
}
}
@@ -789,7 +875,7 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
{
/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
memset(fp->sge_mask, 0xff,
- (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
+ (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
/* Clear the two last indices in the page to 1:
these are the indices that correspond to the "next" element,
@@ -861,22 +947,69 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
u16 cons, u16 prod)
{
- struct bnx2x *bp = fp->bp;
struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
- dma_sync_single_for_device(&bp->pdev->dev,
- dma_unmap_addr(cons_rx_buf, mapping),
- RX_COPY_THRESH, DMA_FROM_DEVICE);
-
- prod_rx_buf->skb = cons_rx_buf->skb;
dma_unmap_addr_set(prod_rx_buf, mapping,
dma_unmap_addr(cons_rx_buf, mapping));
+ prod_rx_buf->skb = cons_rx_buf->skb;
*prod_bd = *cons_bd;
}
+/************************* Init ******************************************/
+
+/**
+ * bnx2x_func_start - init function
+ *
+ * @bp: driver handle
+ *
+ * Must be called before sending CLIENT_SETUP for the first client.
+ */
+static inline int bnx2x_func_start(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_start_params *start_params =
+ &func_params.params.start;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_START;
+
+ /* Function parameters */
+ start_params->mf_mode = bp->mf_mode;
+ start_params->sd_vlan_tag = bp->mf_ov;
+ if (CHIP_IS_E1x(bp))
+ start_params->network_cos_mode = OVERRIDE_COS;
+ else
+ start_params->network_cos_mode = STATIC_COS;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+
+/**
+ * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
+ *
+ * @fw_hi: pointer to upper part
+ * @fw_mid: pointer to middle part
+ * @fw_lo: pointer to lower part
+ * @mac: pointer to MAC address
+ */
+static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
+ u8 *mac)
+{
+ ((u8 *)fw_hi)[0] = mac[1];
+ ((u8 *)fw_hi)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lo)[0] = mac[5];
+ ((u8 *)fw_lo)[1] = mac[4];
+}
+
static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
struct bnx2x_fastpath *fp, int last)
{
@@ -895,57 +1028,58 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
int i;
for (i = 0; i < last; i++) {
- struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
- struct sk_buff *skb = rx_buf->skb;
+ struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
+ struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+ struct sk_buff *skb = first_buf->skb;
if (skb == NULL) {
DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
continue;
}
-
- if (fp->tpa_state[i] == BNX2X_TPA_START)
+ if (tpa_info->tpa_state == BNX2X_TPA_START)
dma_unmap_single(&bp->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(first_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
-
dev_kfree_skb(skb);
- rx_buf->skb = NULL;
+ first_buf->skb = NULL;
}
}
-static inline void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp)
+static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
{
int i;
for (i = 1; i <= NUM_TX_RINGS; i++) {
struct eth_tx_next_bd *tx_next_bd =
- &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
+ &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
tx_next_bd->addr_hi =
- cpu_to_le32(U64_HI(fp->tx_desc_mapping +
+ cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
tx_next_bd->addr_lo =
- cpu_to_le32(U64_LO(fp->tx_desc_mapping +
+ cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
}
- SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
- fp->tx_db.data.zero_fill1 = 0;
- fp->tx_db.data.prod = 0;
+ SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
+ txdata->tx_db.data.zero_fill1 = 0;
+ txdata->tx_db.data.prod = 0;
- fp->tx_pkt_prod = 0;
- fp->tx_pkt_cons = 0;
- fp->tx_bd_prod = 0;
- fp->tx_bd_cons = 0;
- fp->tx_pkt = 0;
+ txdata->tx_pkt_prod = 0;
+ txdata->tx_pkt_cons = 0;
+ txdata->tx_bd_prod = 0;
+ txdata->tx_bd_cons = 0;
+ txdata->tx_pkt = 0;
}
static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
{
int i;
+ u8 cos;
for_each_tx_queue(bp, i)
- bnx2x_init_tx_ring_one(&bp->fp[i]);
+ for_each_cos_in_tx_queue(&bp->fp[i], cos)
+ bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
}
static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
@@ -1038,31 +1172,220 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
return i - fp->eth_q_stats.rx_skb_alloc_failed;
}
+/* Statistics ID are global per chip/path, while Client IDs for E1x are per
+ * port.
+ */
+static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
+{
+ if (!CHIP_IS_E1x(fp->bp))
+ return fp->cl_id;
+ else
+ return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x;
+}
+
+static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
+ bnx2x_obj_type obj_type)
+{
+ struct bnx2x *bp = fp->bp;
+
+ /* Configure classification DBs */
+ bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
+ BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+ bnx2x_sp_mapping(bp, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &bp->sp_state, obj_type,
+ &bp->macs_pool);
+}
+
+/**
+ * bnx2x_get_path_func_num - get number of active functions
+ *
+ * @bp: driver handle
+ *
+ * Calculates the number of active (not hidden) functions on the
+ * current path.
+ */
+static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
+{
+ u8 func_num = 0, i;
+
+ /* 57710 has only one function per-port */
+ if (CHIP_IS_E1(bp))
+ return 1;
+
+ /* Calculate a number of functions enabled on the current
+ * PATH/PORT.
+ */
+ if (CHIP_REV_IS_SLOW(bp)) {
+ if (IS_MF(bp))
+ func_num = 4;
+ else
+ func_num = 2;
+ } else {
+ for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
+ u32 func_config =
+ MF_CFG_RD(bp,
+ func_mf_config[BP_PORT(bp) + 2 * i].
+ config);
+ func_num +=
+ ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
+ }
+ }
+
+ WARN_ON(!func_num);
+
+ return func_num;
+}
+
+static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
+{
+ /* RX_MODE controlling object */
+ bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
+
+ /* multicast configuration controlling object */
+ bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
+ BP_FUNC(bp), BP_FUNC(bp),
+ bnx2x_sp(bp, mcast_rdata),
+ bnx2x_sp_mapping(bp, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
+ BNX2X_OBJ_TYPE_RX);
+
+ /* Setup CAM credit pools */
+ bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
+ bnx2x_get_path_func_num(bp));
+
+ /* RSS configuration object */
+ bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
+ bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
+ bnx2x_sp(bp, rss_rdata),
+ bnx2x_sp_mapping(bp, rss_rdata),
+ BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
+ BNX2X_OBJ_TYPE_RX);
+}
+
+static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
+{
+ if (CHIP_IS_E1x(fp->bp))
+ return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
+ else
+ return fp->cl_id;
+}
+
+static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+
+ if (!CHIP_IS_E1x(bp))
+ return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+ else
+ return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+}
+
+static inline void bnx2x_init_txdata(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
+ __le16 *tx_cons_sb)
+{
+ txdata->cid = cid;
+ txdata->txq_index = txq_index;
+ txdata->tx_cons_sb = tx_cons_sb;
+
+ DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d",
+ txdata->cid, txdata->txq_index);
+}
+
#ifdef BCM_CNIC
+static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
+{
+ return bp->cnic_base_cl_id + cl_idx +
+ (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE;
+}
+
+static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
+{
+
+ /* the 'first' id is allocated for the cnic */
+ return bp->base_fw_ndsb;
+}
+
+static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
+{
+ return bp->igu_base_sb;
+}
+
+
static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
{
- bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID +
- BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ unsigned long q_type = 0;
+
+ bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+ BNX2X_FCOE_ETH_CL_ID_IDX);
+ /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
+ * 16 ETH clients per function when CNIC is enabled!
+ *
+ * Fix it ASAP!!!
+ */
bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
- bnx2x_fcoe(bp, bp) = bp;
- bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
- bnx2x_fcoe(bp, index) = FCOE_IDX;
bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
- bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;
+
+ bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
+
+ DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)", fp->index);
+
/* qZone id equals to FW (per path) client id */
- bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) +
- BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
- ETH_MAX_RX_CLIENTS_E1H);
+ bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
/* init shortcut */
- bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ?
- USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) :
- USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id);
+ bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+ bnx2x_rx_ustorm_prods_offset(fp);
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+ /* No multi-CoS for FCoE L2 client */
+ BUG_ON(fp->max_cos != 1);
+
+ bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
+ BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
+ "igu_sb %d\n",
+ fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
}
#endif
+static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata)
+{
+ int cnt = 1000;
+
+ while (bnx2x_has_tx_work_unload(txdata)) {
+ if (!cnt) {
+ BNX2X_ERR("timeout waiting for queue[%d]: "
+ "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
+ txdata->txq_index, txdata->tx_pkt_prod,
+ txdata->tx_pkt_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+ return -EBUSY;
+#else
+ break;
+#endif
+ }
+ cnt--;
+ usleep_range(1000, 1000);
+ }
+
+ return 0;
+}
+
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
+
static inline void __storm_memset_struct(struct bnx2x *bp,
u32 addr, size_t size, u32 *data)
{
@@ -1071,42 +1394,78 @@ static inline void __storm_memset_struct(struct bnx2x *bp,
REG_WR(bp, addr + (i * 4), data[i]);
}
-static inline void storm_memset_mac_filters(struct bnx2x *bp,
- struct tstorm_eth_mac_filter_config *mac_filters,
- u16 abs_fid)
+static inline void storm_memset_func_cfg(struct bnx2x *bp,
+ struct tstorm_eth_function_common_config *tcfg,
+ u16 abs_fid)
{
- size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+ size_t size = sizeof(struct tstorm_eth_function_common_config);
u32 addr = BAR_TSTRORM_INTMEM +
- TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
+ TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
- __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
+ __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
}
static inline void storm_memset_cmng(struct bnx2x *bp,
struct cmng_struct_per_port *cmng,
u8 port)
{
- size_t size =
- sizeof(struct rate_shaping_vars_per_port) +
- sizeof(struct fairness_vars_per_port) +
- sizeof(struct safc_struct_per_port) +
- sizeof(struct pfc_struct_per_port);
+ size_t size = sizeof(struct cmng_struct_per_port);
u32 addr = BAR_XSTRORM_INTMEM +
XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
__storm_memset_struct(bp, addr, size, (u32 *)cmng);
+}
- addr += size + 4 /* SKIP DCB+LLFC */;
- size = sizeof(struct cmng_struct_per_port) -
- size /* written */ - 4 /*skipped*/;
+/**
+ * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
+ *
+ * @bp: driver handle
+ * @mask: bits that need to be cleared
+ */
+static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
+{
+ int tout = 5000; /* Wait for 5 secs tops */
+
+ while (tout--) {
+ smp_mb();
+ netif_addr_lock_bh(bp->dev);
+ if (!(bp->sp_state & mask)) {
+ netif_addr_unlock_bh(bp->dev);
+ return true;
+ }
+ netif_addr_unlock_bh(bp->dev);
+
+ usleep_range(1000, 1000);
+ }
+
+ smp_mb();
- __storm_memset_struct(bp, addr, size,
- (u32 *)(cmng->traffic_type_to_priority_cos));
+ netif_addr_lock_bh(bp->dev);
+ if (bp->sp_state & mask) {
+ BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, "
+ "mask 0x%lx\n", bp->sp_state, mask);
+ netif_addr_unlock_bh(bp->dev);
+ return false;
+ }
+ netif_addr_unlock_bh(bp->dev);
+
+ return true;
}
-/* HW Lock for shared dual port PHYs */
+/**
+ * bnx2x_set_ctx_validation - set CDU context validation values
+ *
+ * @bp: driver handle
+ * @cxt: context of the connection on the host memory
+ * @cid: SW CID of the connection to be configured
+ */
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+ u32 cid);
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+ u8 sb_index, u8 disable, u16 usec);
void bnx2x_acquire_phy_lock(struct bnx2x *bp);
void bnx2x_release_phy_lock(struct bnx2x *bp);
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index 410a49e571a..a1e004a82f7 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -19,20 +19,18 @@
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/errno.h>
-#ifdef BCM_DCBNL
-#include <linux/dcbnl.h>
-#endif
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
#include "bnx2x.h"
#include "bnx2x_cmn.h"
#include "bnx2x_dcb.h"
-
/* forward declarations of dcbx related functions */
-static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
-static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
u32 *set_configuration_ets_pg,
u32 *pri_pg_tbl);
@@ -47,34 +45,56 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
struct cos_help_data *cos_data,
u32 *pg_pri_orginal_spread,
struct dcbx_ets_feature *ets);
-static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp);
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+ struct bnx2x_func_tx_start_params*);
+
+/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */
+static void bnx2x_read_data(struct bnx2x *bp, u32 *buff,
+ u32 addr, u32 len)
+{
+ int i;
+ for (i = 0; i < len; i += 4, buff++)
+ *buff = REG_RD(bp, addr + i);
+}
+static void bnx2x_write_data(struct bnx2x *bp, u32 *buff,
+ u32 addr, u32 len)
+{
+ int i;
+ for (i = 0; i < len; i += 4, buff++)
+ REG_WR(bp, addr + i, *buff);
+}
static void bnx2x_pfc_set(struct bnx2x *bp)
{
struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
u32 pri_bit, val = 0;
- u8 pri;
+ int i;
- /* Tx COS configuration */
- if (bp->dcbx_port_params.ets.cos_params[0].pauseable)
- pfc_params.rx_cos0_priority_mask =
- bp->dcbx_port_params.ets.cos_params[0].pri_bitmask;
- if (bp->dcbx_port_params.ets.cos_params[1].pauseable)
- pfc_params.rx_cos1_priority_mask =
- bp->dcbx_port_params.ets.cos_params[1].pri_bitmask;
+ pfc_params.num_of_rx_cos_priority_mask =
+ bp->dcbx_port_params.ets.num_of_cos;
+ /* Tx COS configuration */
+ for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
+ /*
+ * We configure only the pauseable bits (non pauseable aren't
+ * configured at all) it's done to avoid false pauses from
+ * network
+ */
+ pfc_params.rx_cos_priority_mask[i] =
+ bp->dcbx_port_params.ets.cos_params[i].pri_bitmask
+ & DCBX_PFC_PRI_PAUSE_MASK(bp);
- /**
+ /*
* Rx COS configuration
* Changing PFC RX configuration .
* In RX COS0 will always be configured to lossy and COS1 to lossless
*/
- for (pri = 0 ; pri < MAX_PFC_PRIORITIES ; pri++) {
- pri_bit = 1 << pri;
+ for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
+ pri_bit = 1 << i;
if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
- val |= 1 << (pri * 4);
+ val |= 1 << (i * 4);
}
pfc_params.pkt_priority_to_cos = val;
@@ -200,7 +220,11 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
- if (app->enabled && !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) {
+ if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH))
+ DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n");
+
+ if (app->enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) {
bp->dcbx_port_params.app.enabled = true;
@@ -253,12 +277,11 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
/* Clean up old settings of ets on COS */
- for (i = 0; i < E2_NUM_OF_COS ; i++) {
-
+ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
cos_params[i].pauseable = false;
- cos_params[i].strict = BNX2X_DCBX_COS_NOT_STRICT;
+ cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID;
cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
- cos_params[i].pri_bitmask = DCBX_PFC_PRI_GET_NON_PAUSE(bp, 0);
+ cos_params[i].pri_bitmask = 0;
}
if (bp->dcbx_port_params.app.enabled &&
@@ -296,7 +319,7 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
if (bp->dcbx_port_params.app.enabled &&
- !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR) &&
+ !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) &&
pfc->enabled) {
bp->dcbx_port_params.pfc.enabled = true;
bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
@@ -308,6 +331,32 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
}
}
+/* maps unmapped priorities to to the same COS as L2 */
+static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
+{
+ int i;
+ u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */
+ u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW];
+ struct bnx2x_dcbx_cos_params *cos_params =
+ bp->dcbx_port_params.ets.cos_params;
+
+ /* get unmapped priorities by clearing mapped bits */
+ for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+ unmapped &= ~(1 << ttp[i]);
+
+ /* find cos for nw prio and extend it with unmapped */
+ for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
+ if (cos_params[i].pri_bitmask & nw_prio) {
+ /* extend the bitmask with unmapped */
+ DP(NETIF_MSG_LINK,
+ "cos %d extended with 0x%08x", i, unmapped);
+ cos_params[i].pri_bitmask |= unmapped;
+ break;
+ }
+ }
+}
+
static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
struct dcbx_features *features,
u32 error)
@@ -317,6 +366,8 @@ static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
+
+ bnx2x_dcbx_map_nw(bp);
}
#define DCBX_LOCAL_MIB_MAX_TRY_READ (100)
@@ -325,8 +376,8 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
u32 offset,
int read_mib_type)
{
- int max_try_read = 0, i;
- u32 *buff, mib_size, prefix_seq_num, suffix_seq_num;
+ int max_try_read = 0;
+ u32 mib_size, prefix_seq_num, suffix_seq_num;
struct lldp_remote_mib *remote_mib ;
struct lldp_local_mib *local_mib;
@@ -345,9 +396,7 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
offset += BP_PORT(bp) * mib_size;
do {
- buff = base_mib_addr;
- for (i = 0; i < mib_size; i += 4, buff++)
- *buff = REG_RD(bp, offset + i);
+ bnx2x_read_data(bp, base_mib_addr, offset, mib_size);
max_try_read++;
@@ -378,60 +427,50 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
{
- if (CHIP_IS_E2(bp)) {
- if (BP_PORT(bp)) {
- BNX2X_ERR("4 port mode is not supported");
- return;
- }
+ if (bp->dcbx_port_params.pfc.enabled &&
+ !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
+ /*
+ * 1. Fills up common PFC structures if required
+ * 2. Configure NIG, MAC and BRB via the elink
+ */
+ bnx2x_pfc_set(bp);
+ else
+ bnx2x_pfc_clear(bp);
+}
- if (bp->dcbx_port_params.pfc.enabled)
+static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {0};
- /* 1. Fills up common PFC structures if required.*/
- /* 2. Configure NIG, MAC and BRB via the elink:
- * elink must first check if BMAC is not in reset
- * and only then configures the BMAC
- * Or, configure EMAC.
- */
- bnx2x_pfc_set(bp);
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_TX_STOP;
- else
- bnx2x_pfc_clear(bp);
- }
+ DP(NETIF_MSG_LINK, "STOP TRAFFIC\n");
+ return bnx2x_func_state_change(bp, &func_params);
}
-static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
{
- DP(NETIF_MSG_LINK, "sending STOP TRAFFIC\n");
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
- 0 /* connectionless */,
- 0 /* dataHi is zero */,
- 0 /* dataLo is zero */,
- 1 /* common */);
-}
+ struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_tx_start_params *tx_params =
+ &func_params.params.tx_start;
-static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
-{
- bnx2x_pfc_fw_struct_e2(bp);
- DP(NETIF_MSG_LINK, "sending START TRAFFIC\n");
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC,
- 0, /* connectionless */
- U64_HI(bnx2x_sp_mapping(bp, pfc_config)),
- U64_LO(bnx2x_sp_mapping(bp, pfc_config)),
- 1 /* commmon */);
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_TX_START;
+
+ bnx2x_dcbx_fw_struct(bp, tx_params);
+
+ DP(NETIF_MSG_LINK, "START TRAFFIC\n");
+ return bnx2x_func_state_change(bp, &func_params);
}
-static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
+static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
{
struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
- u8 status = 0;
-
- bnx2x_ets_disabled(&bp->link_params);
-
- if (!ets->enabled)
- return;
+ int rc = 0;
- if ((ets->num_of_cos == 0) || (ets->num_of_cos > E2_NUM_OF_COS)) {
- BNX2X_ERR("illegal num of cos= %x", ets->num_of_cos);
+ if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) {
+ BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos);
return;
}
@@ -440,9 +479,9 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
return;
/* sanity */
- if (((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[0].strict) &&
+ if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) &&
(DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
- ((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[1].strict) &&
+ ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) &&
(DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
BNX2X_ERR("all COS should have at least bw_limit or strict"
"ets->cos_params[0].strict= %x"
@@ -474,17 +513,71 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
} else {
- if (ets->cos_params[0].strict == BNX2X_DCBX_COS_HIGH_STRICT)
- status = bnx2x_ets_strict(&bp->link_params, 0);
+ if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST)
+ rc = bnx2x_ets_strict(&bp->link_params, 0);
else if (ets->cos_params[1].strict
- == BNX2X_DCBX_COS_HIGH_STRICT)
- status = bnx2x_ets_strict(&bp->link_params, 1);
-
- if (status)
+ == BNX2X_DCBX_STRICT_COS_HIGHEST)
+ rc = bnx2x_ets_strict(&bp->link_params, 1);
+ if (rc)
BNX2X_ERR("update_ets_params failed\n");
}
}
+/*
+ * In E3B0 the configuration may have more than 2 COS.
+ */
+void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
+{
+ struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+ struct bnx2x_ets_params ets_params = { 0 };
+ u8 i;
+
+ ets_params.num_of_cos = ets->num_of_cos;
+
+ for (i = 0; i < ets->num_of_cos; i++) {
+ /* COS is SP */
+ if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) {
+ if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) {
+ BNX2X_ERR("COS can't be not BW and not SP\n");
+ return;
+ }
+
+ ets_params.cos[i].state = bnx2x_cos_state_strict;
+ ets_params.cos[i].params.sp_params.pri =
+ ets->cos_params[i].strict;
+ } else { /* COS is BW */
+ if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) {
+ BNX2X_ERR("COS can't be not BW and not SP\n");
+ return;
+ }
+ ets_params.cos[i].state = bnx2x_cos_state_bw;
+ ets_params.cos[i].params.bw_params.bw =
+ (u8)ets->cos_params[i].bw_tbl;
+ }
+ }
+
+ /* Configure the ETS in HW */
+ if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars,
+ &ets_params)) {
+ BNX2X_ERR("bnx2x_ets_e3b0_config failed\n");
+ bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+ }
+}
+
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
+{
+ bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+
+ if (!bp->dcbx_port_params.ets.enabled ||
+ (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
+ return;
+
+ if (CHIP_IS_E3B0(bp))
+ bnx2x_dcbx_update_ets_config(bp);
+ else
+ bnx2x_dcbx_2cos_limit_update_ets_config(bp);
+}
+
#ifdef BCM_DCBNL
static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
{
@@ -527,6 +620,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
return -EINVAL;
}
+
rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
DCBX_READ_LOCAL_MIB);
@@ -563,15 +657,6 @@ u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
DCB_APP_IDTYPE_ETHTYPE;
}
-static inline
-void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp)
-{
- int i;
- for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
- bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &=
- ~DCBX_APP_ENTRY_VALID;
-}
-
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
{
int i, err = 0;
@@ -597,32 +682,50 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
}
#endif
-void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
{
- switch (state) {
- case BNX2X_DCBX_STATE_NEG_RECEIVED:
-#ifdef BCM_CNIC
- if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
- struct cnic_ops *c_ops;
- struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
- bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
- cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
- cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
-
- rcu_read_lock();
- c_ops = rcu_dereference(bp->cnic_ops);
- if (c_ops) {
- bnx2x_cnic_notify(bp, CNIC_CTL_STOP_ISCSI_CMD);
- rcu_read_unlock();
- return;
+ if (SHMEM2_HAS(bp, drv_flags)) {
+ u32 drv_flags;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ drv_flags = SHMEM2_RD(bp, drv_flags);
+
+ if (set)
+ SET_FLAGS(drv_flags, flags);
+ else
+ RESET_FLAGS(drv_flags, flags);
+
+ SHMEM2_WR(bp, drv_flags, drv_flags);
+ DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+ bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ }
+}
+
+static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
+{
+ u8 prio, cos;
+ for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) {
+ for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+ if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
+ & (1 << prio)) {
+ bp->prio_to_cos[prio] = cos;
+ DP(NETIF_MSG_LINK,
+ "tx_mapping %d --> %d\n", prio, cos);
}
- rcu_read_unlock();
}
+ }
- /* fall through if no CNIC initialized */
- case BNX2X_DCBX_STATE_ISCSI_STOPPED:
-#endif
+ /* setup tc must be called under rtnl lock, but we can't take it here
+ * as we are handling an attetntion on a work queue which must be
+ * flushed at some rtnl-locked contexts (e.g. if down)
+ */
+ if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
+{
+ switch (state) {
+ case BNX2X_DCBX_STATE_NEG_RECEIVED:
{
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
#ifdef BCM_DCBNL
@@ -646,102 +749,53 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
bp->dcbx_error);
- if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
-#ifdef BCM_DCBNL
- /**
- * Add new app tlvs to dcbnl
- */
- bnx2x_dcbnl_update_applist(bp, false);
-#endif
- bnx2x_dcbx_stop_hw_tx(bp);
- return;
- }
- /* fall through */
+ /* mark DCBX result for PMF migration */
+ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
#ifdef BCM_DCBNL
/**
- * Invalidate the local app tlvs if they are not added
- * to the dcbnl app list to avoid deleting them from
- * the list later on
+ * Add new app tlvs to dcbnl
*/
- bnx2x_dcbx_invalidate_local_apps(bp);
+ bnx2x_dcbnl_update_applist(bp, false);
#endif
+ bnx2x_dcbx_stop_hw_tx(bp);
+
+ /* reconfigure the netdevice with the results of the new
+ * dcbx negotiation.
+ */
+ bnx2x_dcbx_update_tc_mapping(bp);
+
+ return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
bnx2x_pfc_set_pfc(bp);
bnx2x_dcbx_update_ets_params(bp);
- if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
- bnx2x_dcbx_resume_hw_tx(bp);
- return;
- }
- /* fall through */
+ bnx2x_dcbx_resume_hw_tx(bp);
+ return;
case BNX2X_DCBX_STATE_TX_RELEASED:
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
- if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD)
- bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
-
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
+#ifdef BCM_DCBNL
+ /*
+ * Send a notification for the new negotiated parameters
+ */
+ dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
return;
default:
BNX2X_ERR("Unknown DCBX_STATE\n");
}
}
-
-#define LLDP_STATS_OFFSET(bp) (BP_PORT(bp)*\
- sizeof(struct lldp_dcbx_stat))
-
-/* calculate struct offset in array according to chip information */
-#define LLDP_PARAMS_OFFSET(bp) (BP_PORT(bp)*sizeof(struct lldp_params))
-
#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \
BP_PORT(bp)*sizeof(struct lldp_admin_mib))
-static void bnx2x_dcbx_lldp_updated_params(struct bnx2x *bp,
- u32 dcbx_lldp_params_offset)
-{
- struct lldp_params lldp_params = {0};
- u32 i = 0, *buff = NULL;
- u32 offset = dcbx_lldp_params_offset + LLDP_PARAMS_OFFSET(bp);
-
- DP(NETIF_MSG_LINK, "lldp_offset 0x%x\n", offset);
-
- if ((bp->lldp_config_params.overwrite_settings ==
- BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE)) {
- /* Read the data first */
- buff = (u32 *)&lldp_params;
- for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++)
- *buff = REG_RD(bp, (offset + i));
-
- lldp_params.msg_tx_hold =
- (u8)bp->lldp_config_params.msg_tx_hold;
- lldp_params.msg_fast_tx_interval =
- (u8)bp->lldp_config_params.msg_fast_tx;
- lldp_params.tx_crd_max =
- (u8)bp->lldp_config_params.tx_credit_max;
- lldp_params.msg_tx_interval =
- (u8)bp->lldp_config_params.msg_tx_interval;
- lldp_params.tx_fast =
- (u8)bp->lldp_config_params.tx_fast;
-
- /* Write the data.*/
- buff = (u32 *)&lldp_params;
- for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++)
- REG_WR(bp, (offset + i) , *buff);
-
-
- } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
- bp->lldp_config_params.overwrite_settings)
- bp->lldp_config_params.overwrite_settings =
- BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID;
-}
-
static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
u32 dcbx_lldp_params_offset)
{
struct lldp_admin_mib admin_mib;
u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
- u32 *buff;
u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
/*shortcuts*/
@@ -749,18 +803,18 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
- buff = (u32 *)&admin_mib;
+
/* Read the data first */
- for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++)
- *buff = REG_RD(bp, (offset + i));
+ bnx2x_read_data(bp, (u32 *)&admin_mib, offset,
+ sizeof(struct lldp_admin_mib));
if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
else
RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
- if ((BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
- dp->overwrite_settings)) {
+ if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) {
+
RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
admin_mib.ver_cfg_flags |=
(dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
@@ -856,19 +910,17 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
af->app.default_pri = (u8)dp->admin_default_priority;
- } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
- dp->overwrite_settings)
- dp->overwrite_settings = BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID;
+ }
/* Write the data. */
- buff = (u32 *)&admin_mib;
- for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++)
- REG_WR(bp, (offset + i), *buff);
+ bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
+ sizeof(struct lldp_admin_mib));
+
}
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
{
- if (CHIP_IS_E2(bp) && !CHIP_MODE_IS_4_PORT(bp)) {
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
bp->dcb_state = dcb_on;
bp->dcbx_enabled = dcbx_enabled;
} else {
@@ -966,7 +1018,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
bp->dcb_state, bp->port.pmf);
- if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
+ if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
dcbx_lldp_params_offset =
SHMEM2_RD(bp, dcbx_lldp_params_offset);
@@ -974,56 +1026,21 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
dcbx_lldp_params_offset);
- if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
- bnx2x_dcbx_lldp_updated_params(bp,
- dcbx_lldp_params_offset);
+ bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+ if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
bnx2x_dcbx_admin_mib_updated_params(bp,
dcbx_lldp_params_offset);
- /* set default configuration BC has */
- bnx2x_dcbx_set_params(bp,
- BNX2X_DCBX_STATE_NEG_RECEIVED);
-
+ /* Let HW start negotiation */
bnx2x_fw_command(bp,
DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
}
}
}
-
-void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp)
-{
- struct priority_cos pricos[MAX_PFC_TRAFFIC_TYPES];
- u32 i = 0, addr;
- memset(pricos, 0, sizeof(pricos));
- /* Default initialization */
- for (i = 0; i < MAX_PFC_TRAFFIC_TYPES; i++)
- pricos[i].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
-
- /* Store per port struct to internal memory */
- addr = BAR_XSTRORM_INTMEM +
- XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
- offsetof(struct cmng_struct_per_port,
- traffic_type_to_priority_cos);
- __storm_memset_struct(bp, addr, sizeof(pricos), (u32 *)pricos);
-
-
- /* LLFC disabled.*/
- REG_WR8(bp , BAR_XSTRORM_INTMEM +
- XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
- offsetof(struct cmng_struct_per_port, llfc_mode),
- LLFC_MODE_NONE);
-
- /* DCBX disabled.*/
- REG_WR8(bp , BAR_XSTRORM_INTMEM +
- XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
- offsetof(struct cmng_struct_per_port, dcb_enabled),
- DCB_DISABLED);
-}
-
static void
bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
- struct flow_control_configuration *pfc_fw_cfg)
+ struct bnx2x_func_tx_start_params *pfc_fw_cfg)
{
u8 pri = 0;
u8 cos = 0;
@@ -1171,7 +1188,7 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
/* If we join a group and one is strict
* than the bw rulls */
cos_data->data[entry].strict =
- BNX2X_DCBX_COS_HIGH_STRICT;
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
}
if ((0 == cos_data->data[0].pri_join_mask) &&
(0 == cos_data->data[1].pri_join_mask))
@@ -1183,7 +1200,7 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1))))
#endif
-static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp,
+static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
struct pg_help_data *pg_help_data,
struct cos_help_data *cos_data,
u32 pri_join_mask,
@@ -1263,14 +1280,16 @@ static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp,
if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
cos_data->data[0].strict =
- BNX2X_DCBX_COS_HIGH_STRICT;
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
cos_data->data[1].strict =
- BNX2X_DCBX_COS_LOW_STRICT;
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
} else {
cos_data->data[0].strict =
- BNX2X_DCBX_COS_LOW_STRICT;
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
cos_data->data[1].strict =
- BNX2X_DCBX_COS_HIGH_STRICT;
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
}
/* Pauseable */
cos_data->data[0].pausable = true;
@@ -1306,13 +1325,16 @@ static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp,
* and that with the highest priority
* gets the highest strict priority in the arbiter.
*/
- cos_data->data[0].strict = BNX2X_DCBX_COS_LOW_STRICT;
- cos_data->data[1].strict = BNX2X_DCBX_COS_HIGH_STRICT;
+ cos_data->data[0].strict =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+ BNX2X_DCBX_STRICT_COS_HIGHEST);
+ cos_data->data[1].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
}
}
}
-static void bnx2x_dcbx_two_pg_to_cos_params(
+static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
struct bnx2x *bp,
struct pg_help_data *pg_help_data,
struct dcbx_ets_feature *ets,
@@ -1322,7 +1344,7 @@ static void bnx2x_dcbx_two_pg_to_cos_params(
u8 num_of_dif_pri)
{
u8 i = 0;
- u8 pg[E2_NUM_OF_COS] = {0};
+ u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 };
/* If there are both pauseable and non-pauseable priorities,
* the pauseable priorities go to the first queue and
@@ -1378,16 +1400,68 @@ static void bnx2x_dcbx_two_pg_to_cos_params(
}
/* There can be only one strict pg */
- for (i = 0 ; i < E2_NUM_OF_COS; i++) {
+ for (i = 0 ; i < ARRAY_SIZE(pg); i++) {
if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
cos_data->data[i].cos_bw =
DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
else
- cos_data->data[i].strict = BNX2X_DCBX_COS_HIGH_STRICT;
+ cos_data->data[i].strict =
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
}
}
-static void bnx2x_dcbx_three_pg_to_cos_params(
+static int bnx2x_dcbx_join_pgs(
+ struct bnx2x *bp,
+ struct dcbx_ets_feature *ets,
+ struct pg_help_data *pg_help_data,
+ u8 required_num_of_pg)
+{
+ u8 entry_joined = pg_help_data->num_of_pg - 1;
+ u8 entry_removed = entry_joined + 1;
+ u8 pg_joined = 0;
+
+ if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data)
+ <= pg_help_data->num_of_pg) {
+
+ BNX2X_ERR("required_num_of_pg can't be zero\n");
+ return -EINVAL;
+ }
+
+ while (required_num_of_pg < pg_help_data->num_of_pg) {
+ entry_joined = pg_help_data->num_of_pg - 2;
+ entry_removed = entry_joined + 1;
+ /* protect index */
+ entry_removed %= ARRAY_SIZE(pg_help_data->data);
+
+ pg_help_data->data[entry_joined].pg_priority |=
+ pg_help_data->data[entry_removed].pg_priority;
+
+ pg_help_data->data[entry_joined].num_of_dif_pri +=
+ pg_help_data->data[entry_removed].num_of_dif_pri;
+
+ if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG ||
+ pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG)
+ /* Entries joined strict priority rules */
+ pg_help_data->data[entry_joined].pg =
+ DCBX_STRICT_PRI_PG;
+ else {
+ /* Entries can be joined join BW */
+ pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_joined].pg) +
+ DCBX_PG_BW_GET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_removed].pg);
+
+ DCBX_PG_BW_SET(ets->pg_bw_tbl,
+ pg_help_data->data[entry_joined].pg, pg_joined);
+ }
+ /* Joined the entries */
+ pg_help_data->num_of_pg--;
+ }
+
+ return 0;
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
struct bnx2x *bp,
struct pg_help_data *pg_help_data,
struct dcbx_ets_feature *ets,
@@ -1459,102 +1533,271 @@ static void bnx2x_dcbx_three_pg_to_cos_params(
/* If we join a group and one is strict
* than the bw rulls */
cos_data->data[1].strict =
- BNX2X_DCBX_COS_HIGH_STRICT;
+ BNX2X_DCBX_STRICT_COS_HIGHEST;
}
}
}
}
-static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
struct pg_help_data *help_data,
struct dcbx_ets_feature *ets,
- u32 *pg_pri_orginal_spread)
+ struct cos_help_data *cos_data,
+ u32 *pg_pri_orginal_spread,
+ u32 pri_join_mask,
+ u8 num_of_dif_pri)
{
- struct cos_help_data cos_data ;
- u8 i = 0;
- u32 pri_join_mask = 0;
- u8 num_of_dif_pri = 0;
-
- memset(&cos_data, 0, sizeof(cos_data));
- /* Validate the pg value */
- for (i = 0; i < help_data->num_of_pg ; i++) {
- if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
- DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
- BNX2X_ERR("Invalid pg[%d] data %x\n", i,
- help_data->data[i].pg);
- pri_join_mask |= help_data->data[i].pg_priority;
- num_of_dif_pri += help_data->data[i].num_of_dif_pri;
- }
- /* default settings */
- cos_data.num_of_cos = 2;
- for (i = 0; i < E2_NUM_OF_COS ; i++) {
- cos_data.data[i].pri_join_mask = pri_join_mask;
- cos_data.data[i].pausable = false;
- cos_data.data[i].strict = BNX2X_DCBX_COS_NOT_STRICT;
- cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
- }
+ /* default E2 settings */
+ cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
switch (help_data->num_of_pg) {
case 1:
-
- bxn2x_dcbx_single_pg_to_cos_params(
+ bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(
bp,
help_data,
- &cos_data,
+ cos_data,
pri_join_mask,
num_of_dif_pri);
break;
case 2:
- bnx2x_dcbx_two_pg_to_cos_params(
+ bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
bp,
help_data,
ets,
- &cos_data,
+ cos_data,
pg_pri_orginal_spread,
pri_join_mask,
num_of_dif_pri);
break;
case 3:
- bnx2x_dcbx_three_pg_to_cos_params(
+ bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
bp,
help_data,
ets,
- &cos_data,
+ cos_data,
pg_pri_orginal_spread,
pri_join_mask,
num_of_dif_pri);
-
break;
default:
BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
bnx2x_dcbx_ets_disabled_entry_data(bp,
- &cos_data, pri_join_mask);
+ cos_data, pri_join_mask);
+ }
+}
+
+static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u8 entry,
+ u8 num_spread_of_entries,
+ u8 strict_app_pris)
+{
+ u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST;
+ u8 num_of_app_pri = MAX_PFC_PRIORITIES;
+ u8 app_pri_bit = 0;
+
+ while (num_spread_of_entries && num_of_app_pri > 0) {
+ app_pri_bit = 1 << (num_of_app_pri - 1);
+ if (app_pri_bit & strict_app_pris) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ num_spread_of_entries--;
+ if (num_spread_of_entries == 0) {
+ /* last entry needed put all the entries left */
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = strict_pri;
+ data->pri_join_mask = strict_app_pris;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ } else {
+ strict_app_pris &= ~app_pri_bit;
+
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = strict_pri;
+ data->pri_join_mask = app_pri_bit;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ }
+
+ strict_pri =
+ BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri);
+ entry++;
+ }
+
+ num_of_app_pri--;
+ }
+
+ if (num_spread_of_entries)
+ return -EINVAL;
+
+ return 0;
+}
+
+static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
+ struct cos_help_data *cos_data,
+ u8 entry,
+ u8 num_spread_of_entries,
+ u8 strict_app_pris)
+{
+
+ if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
+ num_spread_of_entries,
+ strict_app_pris)) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ /* Fill BW entry */
+ data->cos_bw = DCBX_INVALID_COS_BW;
+ data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST;
+ data->pri_join_mask = strict_app_pris;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+ return 1;
+ }
+
+ return num_spread_of_entries;
+}
+
+static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ struct cos_help_data *cos_data,
+ u32 pri_join_mask)
+
+{
+ u8 need_num_of_entries = 0;
+ u8 i = 0;
+ u8 entry = 0;
+
+ /*
+ * if the number of requested PG-s in CEE is greater than 3
+ * then the results are not determined since this is a violation
+ * of the standard.
+ */
+ if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) {
+ if (bnx2x_dcbx_join_pgs(bp, ets, help_data,
+ DCBX_COS_MAX_NUM_E3B0)) {
+ BNX2X_ERR("Unable to reduce the number of PGs -"
+ "we will disables ETS\n");
+ bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data,
+ pri_join_mask);
+ return;
+ }
+ }
+
+ for (i = 0 ; i < help_data->num_of_pg; i++) {
+ struct pg_entry_help_data *pg = &help_data->data[i];
+ if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+ struct cos_entry_help_data *data = &cos_data->
+ data[entry];
+ /* Fill BW entry */
+ data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg);
+ data->strict = BNX2X_DCBX_STRICT_INVALID;
+ data->pri_join_mask = pg->pg_priority;
+ data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+ data->pri_join_mask);
+
+ entry++;
+ } else {
+ need_num_of_entries = min_t(u8,
+ (u8)pg->num_of_dif_pri,
+ (u8)DCBX_COS_MAX_NUM_E3B0 -
+ help_data->num_of_pg + 1);
+ /*
+ * If there are still VOQ-s which have no associated PG,
+ * then associate these VOQ-s to PG15. These PG-s will
+ * be used for SP between priorities on PG15.
+ */
+ entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data,
+ entry, need_num_of_entries, pg->pg_priority);
+ }
}
+ /* the entry will represent the number of COSes used */
+ cos_data->num_of_cos = entry;
+}
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+ struct pg_help_data *help_data,
+ struct dcbx_ets_feature *ets,
+ u32 *pg_pri_orginal_spread)
+{
+ struct cos_help_data cos_data;
+ u8 i = 0;
+ u32 pri_join_mask = 0;
+ u8 num_of_dif_pri = 0;
+
+ memset(&cos_data, 0, sizeof(cos_data));
+
+ /* Validate the pg value */
+ for (i = 0; i < help_data->num_of_pg ; i++) {
+ if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
+ DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
+ BNX2X_ERR("Invalid pg[%d] data %x\n", i,
+ help_data->data[i].pg);
+ pri_join_mask |= help_data->data[i].pg_priority;
+ num_of_dif_pri += help_data->data[i].num_of_dif_pri;
+ }
+
+ /* defaults */
+ cos_data.num_of_cos = 1;
+ for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) {
+ cos_data.data[i].pri_join_mask = 0;
+ cos_data.data[i].pausable = false;
+ cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID;
+ cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
+ }
+
+ if (CHIP_IS_E3B0(bp))
+ bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets,
+ &cos_data, pri_join_mask);
+ else /* E2 + E3A0 */
+ bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp,
+ help_data, ets,
+ &cos_data,
+ pg_pri_orginal_spread,
+ pri_join_mask,
+ num_of_dif_pri);
+
for (i = 0; i < cos_data.num_of_cos ; i++) {
- struct bnx2x_dcbx_cos_params *params =
+ struct bnx2x_dcbx_cos_params *p =
&bp->dcbx_port_params.ets.cos_params[i];
- params->pauseable = cos_data.data[i].pausable;
- params->strict = cos_data.data[i].strict;
- params->bw_tbl = cos_data.data[i].cos_bw;
- if (params->pauseable) {
- params->pri_bitmask =
- DCBX_PFC_PRI_GET_PAUSE(bp,
- cos_data.data[i].pri_join_mask);
+ p->strict = cos_data.data[i].strict;
+ p->bw_tbl = cos_data.data[i].cos_bw;
+ p->pri_bitmask = cos_data.data[i].pri_join_mask;
+ p->pauseable = cos_data.data[i].pausable;
+
+ /* sanity */
+ if (p->bw_tbl != DCBX_INVALID_COS_BW ||
+ p->strict != BNX2X_DCBX_STRICT_INVALID) {
+ if (p->pri_bitmask == 0)
+ BNX2X_ERR("Invalid pri_bitmask for %d\n", i);
+
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) {
+
+ if (p->pauseable &&
+ DCBX_PFC_PRI_GET_NON_PAUSE(bp,
+ p->pri_bitmask) != 0)
+ BNX2X_ERR("Inconsistent config for "
+ "pausable COS %d\n", i);
+
+ if (!p->pauseable &&
+ DCBX_PFC_PRI_GET_PAUSE(bp,
+ p->pri_bitmask) != 0)
+ BNX2X_ERR("Inconsistent config for "
+ "nonpausable COS %d\n", i);
+ }
+ }
+
+ if (p->pauseable)
DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
i, cos_data.data[i].pri_join_mask);
- } else {
- params->pri_bitmask =
- DCBX_PFC_PRI_GET_NON_PAUSE(bp,
- cos_data.data[i].pri_join_mask);
+ else
DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
"0x%x\n",
i, cos_data.data[i].pri_join_mask);
- }
}
bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
@@ -1574,30 +1817,26 @@ static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
}
}
-static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+ struct bnx2x_func_tx_start_params *pfc_fw_cfg)
{
- struct flow_control_configuration *pfc_fw_cfg = NULL;
u16 pri_bit = 0;
u8 cos = 0, pri = 0;
struct priority_cos *tt2cos;
u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
- pfc_fw_cfg = (struct flow_control_configuration *)
- bnx2x_sp(bp, pfc_config);
- memset(pfc_fw_cfg, 0, sizeof(struct flow_control_configuration));
+ memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg));
+
+ /* to disable DCB - the structure must be zeroed */
+ if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)
+ return;
/*shortcut*/
tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
/* Fw version should be incremented each update */
pfc_fw_cfg->dcb_version = ++bp->dcb_version;
- pfc_fw_cfg->dcb_enabled = DCB_ENABLED;
-
- /* Default initialization */
- for (pri = 0; pri < MAX_PFC_TRAFFIC_TYPES ; pri++) {
- tt2cos[pri].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
- tt2cos[pri].cos = 0;
- }
+ pfc_fw_cfg->dcb_enabled = 1;
/* Fill priority parameters */
for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
@@ -1605,14 +1844,37 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
pri_bit = 1 << tt2cos[pri].priority;
/* Fill COS parameters based on COS calculated to
- * make it more generally for future use */
+ * make it more general for future use */
for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
if (bp->dcbx_port_params.ets.cos_params[cos].
pri_bitmask & pri_bit)
tt2cos[pri].cos = cos;
}
+
+ /* we never want the FW to add a 0 vlan tag */
+ pfc_fw_cfg->dont_add_pri_0_en = 1;
+
bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
}
+
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
+{
+ /* if we need to syncronize DCBX result from prev PMF
+ * read it from shmem and update bp accordingly
+ */
+ if (SHMEM2_HAS(bp, drv_flags) &&
+ GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
+ /* Read neg results if dcbx is in the FW */
+ if (bnx2x_dcbx_read_shmem_neg_results(bp))
+ return;
+
+ bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+ bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+ bp->dcbx_error);
+ }
+}
+
/* DCB netlink */
#ifdef BCM_DCBNL
@@ -1879,10 +2141,12 @@ static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
if (bp->dcb_state) {
switch (tcid) {
case DCB_NUMTCS_ATTR_PG:
- *num = E2_NUM_OF_COS;
+ *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+ DCBX_COS_MAX_NUM_E2;
break;
case DCB_NUMTCS_ATTR_PFC:
- *num = E2_NUM_OF_COS;
+ *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+ DCBX_COS_MAX_NUM_E2;
break;
default:
rval = -EINVAL;
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
index bed369d67e0..2c6a3bca6f2 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -27,22 +27,30 @@ struct bnx2x_dcbx_app_params {
u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
};
-#define E2_NUM_OF_COS 2
-#define BNX2X_DCBX_COS_NOT_STRICT 0
-#define BNX2X_DCBX_COS_LOW_STRICT 1
-#define BNX2X_DCBX_COS_HIGH_STRICT 2
+#define DCBX_COS_MAX_NUM_E2 DCBX_E2E3_MAX_NUM_COS
+/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */
+#define BNX2X_MAX_COS_SUPPORT 3
+#define DCBX_COS_MAX_NUM_E3B0 BNX2X_MAX_COS_SUPPORT
+#define DCBX_COS_MAX_NUM BNX2X_MAX_COS_SUPPORT
struct bnx2x_dcbx_cos_params {
u32 bw_tbl;
u32 pri_bitmask;
+ /*
+ * strict priority: valid values are 0..5; 0 is highest priority.
+ * There can't be two COSes with the same priority.
+ */
u8 strict;
+#define BNX2X_DCBX_STRICT_INVALID DCBX_COS_MAX_NUM
+#define BNX2X_DCBX_STRICT_COS_HIGHEST 0
+#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp) ((sp) + 1)
u8 pauseable;
};
struct bnx2x_dcbx_pg_params {
u32 enabled;
u8 num_of_cos; /* valid COS entries */
- struct bnx2x_dcbx_cos_params cos_params[E2_NUM_OF_COS];
+ struct bnx2x_dcbx_cos_params cos_params[DCBX_COS_MAX_NUM];
};
struct bnx2x_dcbx_pfc_params {
@@ -60,6 +68,8 @@ struct bnx2x_dcbx_port_params {
#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0
#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
+#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\
+ (bp)->dcbx_port_params.ets.enabled)
struct bnx2x_config_lldp_params {
u32 overwrite_settings;
@@ -132,7 +142,7 @@ struct cos_entry_help_data {
};
struct cos_help_data {
- struct cos_entry_help_data data[E2_NUM_OF_COS];
+ struct cos_entry_help_data data[DCBX_COS_MAX_NUM];
u8 num_of_cos;
};
@@ -148,6 +158,8 @@ struct cos_help_data {
((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
(DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
+#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \
+ (0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri))
#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
(pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
@@ -170,22 +182,18 @@ struct pg_help_data {
/* forward DCB/PFC related declarations */
struct bnx2x;
-void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp);
void bnx2x_dcbx_update(struct work_struct *work);
void bnx2x_dcbx_init_params(struct bnx2x *bp);
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
enum {
BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
-#ifdef BCM_CNIC
- BNX2X_DCBX_STATE_ISCSI_STOPPED,
-#endif
BNX2X_DCBX_STATE_TX_PAUSED,
BNX2X_DCBX_STATE_TX_RELEASED
};
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
-
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp);
/* DCB netlink */
#ifdef BCM_DCBNL
extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index fb3ff7c4d7c..b983825d0ee 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -25,34 +25,94 @@
/*definitions */
-#define XSTORM_WAITP_ADDR 0x2b8a80
-#define TSTORM_WAITP_ADDR 0x1b8a80
-#define USTORM_WAITP_ADDR 0x338a80
-#define CSTORM_WAITP_ADDR 0x238a80
-#define TSTORM_CAM_MODE 0x1B1440
+#define XSTORM_WAITP_ADDR 0x2b8a80
+#define TSTORM_WAITP_ADDR 0x1b8a80
+#define USTORM_WAITP_ADDR 0x338a80
+#define CSTORM_WAITP_ADDR 0x238a80
+#define TSTORM_CAM_MODE 0x1B1440
+
+#define MAX_TIMER_PENDING 200
+#define TIMER_SCAN_DONT_CARE 0xFF
+#define RI_E1 0x1
+#define RI_E1H 0x2
+#define RI_E2 0x4
+#define RI_E3 0x8
+#define RI_E3B0 0x10
+#define RI_ONLINE 0x100
+#define RI_OFFLINE 0x0
+#define RI_PATH0_DUMP 0x200
+#define RI_PATH1_DUMP 0x400
-#define MAX_TIMER_PENDING 200
-#define TIMER_SCAN_DONT_CARE 0xFF
-#define RI_E1 0x1
-#define RI_E1H 0x2
-#define RI_E2 0x4
-#define RI_ONLINE 0x100
-#define RI_PATH0_DUMP 0x200
-#define RI_PATH1_DUMP 0x400
-#define RI_E1_OFFLINE (RI_E1)
#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
-#define RI_E1H_OFFLINE (RI_E1H)
#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
-#define RI_E2_OFFLINE (RI_E2)
-#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
-#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
-#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
-#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
-#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
-#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
-#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
+#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE)
+#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE)
+#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+#define RI_E3_ONLINE (RI_E3 | RI_ONLINE)
+#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE)
+#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE)
+#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE)
+#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE)
+#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE)
+#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE2E3E3B0_ONLINE \
+ (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE)
+#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE)
+#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE)
+#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE)
+#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE)
+#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE)
+#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE)
+#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE)
+#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE)
+#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE)
+#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE)
+#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE)
+#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE)
+#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE2E3E3B0_OFFLINE \
+ (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE
+#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE
+
+#define DBG_DMP_TRACE_BUFFER_SIZE 0x800
+#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \
+ ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE)
struct dump_sign {
u32 time_stamp;
@@ -86,628 +146,1011 @@ struct wreg_addr {
u16 info;
};
-#define REGS_COUNT 834
-static const struct reg_addr reg_addrs[REGS_COUNT] = {
- { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
- { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
- { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
- { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
- { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
- { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
- { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
- { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
- { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
- { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
- { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
- { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
- { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
- { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
- { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
- { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
- { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
- { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
- { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
- { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
- { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
- { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
- { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
- { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
- { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
- { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
- { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
- { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
- { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
- { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
- { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
- { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
- { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
- { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
- { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
- { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
- { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
- { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
- { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
- { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
- { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
- { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
- { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
- { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
- { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
- { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
- { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
- { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
- { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
- { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
- { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
- { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
- { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
- { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
- { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
- { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
- { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
- { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
- { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
- { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
- { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
- { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
- { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
- { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
- { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
- { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
- { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
- { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
- { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
- { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
- { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
- { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
- { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
- { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
- { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
- { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
- { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
- { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
- { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
- { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
- { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
- { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
- { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
- { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
- { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
- { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
- { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
- { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
- { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
- { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
- { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
- { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
- { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
- { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
- { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
- { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
- { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
- { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
- { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
- { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
- { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
- { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
- { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
- { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
- { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
- { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
- { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
- { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
- { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
- { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
- { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
- { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
- { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
- { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
- { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
- { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
- { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
- { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
- { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
- { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
- { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
- { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
- { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
- { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
- { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
- { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
- { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
- { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
- { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
- { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
- { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
- { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
- { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
- { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
- { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
- { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
- { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
- { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
- { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
- { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
- { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
- { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
- { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
- { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
- { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
- { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
- { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
- { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
- { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
- { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
- { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
- { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
- { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
- { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
- { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
- { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
- { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
- { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
- { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
- { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
- { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
- { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
- { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
- { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
- { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
- { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
- { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
- { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
- { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
- { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
- { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
- { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
- { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
- { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
- { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
- { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
- { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
- { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
- { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
- { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
- { 0x164238, 1, RI_ALL_ONLINE }, { 0x164240, 1, RI_ALL_ONLINE },
- { 0x164248, 1, RI_ALL_ONLINE }, { 0x164250, 1, RI_ALL_ONLINE },
- { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
- { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
- { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
- { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
- { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
- { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
- { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
- { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
- { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
- { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE },
- { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE },
- { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE },
- { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
- { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
- { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
- { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
- { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
- { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
- { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
- { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
- { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
- { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
- { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
- { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
- { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
- { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
- { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
- { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
- { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
- { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
- { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
- { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
- { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
- { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
- { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
- { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
- { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
- { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
- { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
- { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
- { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
- { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
- { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
- { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
- { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
- { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
- { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
- { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
- { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
- { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
- { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
- { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
- { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
- { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
- { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
- { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
- { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
- { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
- { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
- { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
- { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
- { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
- { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
- { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
- { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
- { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
- { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
- { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
- { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
- { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
- { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
- { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
- { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
- { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
- { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
- { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
- { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
- { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
- { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
- { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
- { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
- { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
- { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
- { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
- { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
- { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
- { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
- { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
- { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
- { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
- { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
- { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
- { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
- { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
- { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
- { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
- { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
- { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
- { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
- { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
- { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
- { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE },
- { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE },
- { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE },
- { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE },
- { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE },
- { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
- { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
- { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
- { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
- { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
- { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
- { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
- { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE },
- { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE },
- { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE },
- { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE },
- { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
- { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
- { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
- { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
- { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
- { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
- { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
- { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
- { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
- { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
- { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
- { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
- { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
- { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
- { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
- { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
- { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
- { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
- { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
- { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
- { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
- { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
- { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
- { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
- { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
- { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
- { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
- { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
- { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
- { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
- { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
- { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
- { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
- { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
- { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
- { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
- { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
- { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
- { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
- { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
- { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
- { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
- { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
- { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
- { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
- { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
- { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
- { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
- { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
- { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
- { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
- { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
- { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
- { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
- { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
- { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
- { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
- { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
- { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
- { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
- { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
- { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
- { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
- { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
- { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
- { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
- { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
- { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
- { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
- { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
- { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
- { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
- { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
- { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
- { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
- { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
- { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
- { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
- { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
- { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
- { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
- { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
- { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
- { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
- { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
- { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
- { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
- { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
- { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE },
- { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
- { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE },
- { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE },
- { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE },
- { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
- { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
- { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
- { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
- { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
- { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
- { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
- { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
- { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE },
- { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE },
- { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
- { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
- { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
- { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
- { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
- { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
- { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
- { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
- { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
- { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
- { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
- { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
- { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
-};
-
-#define IDLE_REGS_COUNT 237
-static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
- { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
- { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
- { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
- { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
- { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
- { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
- { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
- { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
- { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
- { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
- { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
- { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
- { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
- { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
- { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
- { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
- { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
- { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
- { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE },
- { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE },
- { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE },
- { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE },
- { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE },
- { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE },
- { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE },
- { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE },
- { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE },
- { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE },
- { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE },
- { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE },
- { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE },
- { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE },
- { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE },
- { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE },
- { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE },
- { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE },
- { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE },
- { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE },
- { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE },
- { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE },
- { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE },
- { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
- { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
- { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
- { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
- { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
- { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
- { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
- { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
- { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
- { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE },
- { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE },
- { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE },
- { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
- { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
- { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
- { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
- { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
- { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
- { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
- { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
- { 0x120848, 1, RI_ALL_ONLINE }, { 0x120850, 1, RI_ALL_ONLINE },
- { 0x120858, 1, RI_ALL_ONLINE }, { 0x120860, 1, RI_ALL_ONLINE },
- { 0x120868, 1, RI_ALL_ONLINE }, { 0x120870, 1, RI_ALL_ONLINE },
- { 0x120878, 1, RI_ALL_ONLINE }, { 0x120880, 1, RI_ALL_ONLINE },
- { 0x120888, 1, RI_ALL_ONLINE }, { 0x120890, 1, RI_ALL_ONLINE },
- { 0x120898, 1, RI_ALL_ONLINE }, { 0x1208a0, 1, RI_ALL_ONLINE },
- { 0x1208a8, 1, RI_ALL_ONLINE }, { 0x1208b0, 1, RI_ALL_ONLINE },
- { 0x1208b8, 1, RI_ALL_ONLINE }, { 0x1208c0, 1, RI_ALL_ONLINE },
- { 0x1208c8, 1, RI_ALL_ONLINE }, { 0x1208d0, 1, RI_ALL_ONLINE },
- { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
- { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
- { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
- { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
- { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
- { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
- { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
- { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
- { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
- { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
- { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
- { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE },
- { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE },
- { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE },
- { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE },
- { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE },
- { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE },
- { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE },
- { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE },
- { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE },
- { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE },
- { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
- { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
- { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
- { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
- { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
- { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
- { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
- { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
- { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
- { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE },
- { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE },
- { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
- { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
- { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE },
- { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE },
- { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE },
- { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
- { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
- { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE },
- { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE },
- { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE },
- { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
- { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
- { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
- { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
- { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
- { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
- { 0x3380c0, 1, RI_ALL_ONLINE }
-};
-
-#define WREGS_COUNT_E1 1
-static const u32 read_reg_e1_0[] = { 0x1b1000 };
-
-static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = {
- { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
-};
-
-#define WREGS_COUNT_E1H 1
-static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
-
-static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = {
- { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE }
+static const struct reg_addr reg_addrs[] = {
+ { 0x2000, 341, RI_ALL_ONLINE },
+ { 0x2800, 103, RI_ALL_ONLINE },
+ { 0x3000, 287, RI_ALL_ONLINE },
+ { 0x3800, 331, RI_ALL_ONLINE },
+ { 0x8800, 6, RI_ALL_ONLINE },
+ { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x9000, 147, RI_E2E3E3B0_ONLINE },
+ { 0x924c, 1, RI_E2_ONLINE },
+ { 0x9250, 16, RI_E2E3E3B0_ONLINE },
+ { 0x9400, 33, RI_E2E3E3B0_ONLINE },
+ { 0x9484, 5, RI_E3E3B0_ONLINE },
+ { 0xa000, 27, RI_ALL_ONLINE },
+ { 0xa06c, 1, RI_E1E1H_ONLINE },
+ { 0xa070, 71, RI_ALL_ONLINE },
+ { 0xa18c, 4, RI_E1E1H_ONLINE },
+ { 0xa19c, 62, RI_ALL_ONLINE },
+ { 0xa294, 2, RI_E1E1H_ONLINE },
+ { 0xa29c, 2, RI_ALL_ONLINE },
+ { 0xa2a4, 2, RI_E1E1HE2_ONLINE },
+ { 0xa2ac, 52, RI_ALL_ONLINE },
+ { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3b8, 2, RI_E3E3B0_ONLINE },
+ { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa400, 40, RI_ALL_ONLINE },
+ { 0xa4a0, 1, RI_E1E1HE2_ONLINE },
+ { 0xa4a4, 2, RI_ALL_ONLINE },
+ { 0xa4ac, 2, RI_E1E1H_ONLINE },
+ { 0xa4b4, 1, RI_E1E1HE2_ONLINE },
+ { 0xa4b8, 2, RI_E1E1H_ONLINE },
+ { 0xa4c0, 3, RI_ALL_ONLINE },
+ { 0xa4cc, 5, RI_E1E1H_ONLINE },
+ { 0xa4e0, 3, RI_ALL_ONLINE },
+ { 0xa4fc, 2, RI_ALL_ONLINE },
+ { 0xa504, 1, RI_E1E1H_ONLINE },
+ { 0xa508, 3, RI_ALL_ONLINE },
+ { 0xa518, 1, RI_ALL_ONLINE },
+ { 0xa520, 1, RI_ALL_ONLINE },
+ { 0xa528, 1, RI_ALL_ONLINE },
+ { 0xa530, 1, RI_ALL_ONLINE },
+ { 0xa538, 1, RI_ALL_ONLINE },
+ { 0xa540, 1, RI_ALL_ONLINE },
+ { 0xa548, 1, RI_E1E1H_ONLINE },
+ { 0xa550, 1, RI_E1E1H_ONLINE },
+ { 0xa558, 1, RI_E1E1H_ONLINE },
+ { 0xa560, 1, RI_E1E1H_ONLINE },
+ { 0xa568, 1, RI_E1E1H_ONLINE },
+ { 0xa570, 1, RI_ALL_ONLINE },
+ { 0xa580, 1, RI_ALL_ONLINE },
+ { 0xa590, 1, RI_ALL_ONLINE },
+ { 0xa5a0, 1, RI_E1E1HE2_ONLINE },
+ { 0xa5c0, 1, RI_ALL_ONLINE },
+ { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa5f8, 1, RI_E1HE2_ONLINE },
+ { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE },
+ { 0xa620, 6, RI_E2E3E3B0_ONLINE },
+ { 0xa638, 20, RI_E2_ONLINE },
+ { 0xa688, 42, RI_E2E3E3B0_ONLINE },
+ { 0xa730, 1, RI_E2_ONLINE },
+ { 0xa734, 2, RI_E2E3E3B0_ONLINE },
+ { 0xa73c, 4, RI_E2_ONLINE },
+ { 0xa74c, 5, RI_E2E3E3B0_ONLINE },
+ { 0xa760, 5, RI_E2_ONLINE },
+ { 0xa774, 7, RI_E2E3E3B0_ONLINE },
+ { 0xa790, 15, RI_E2_ONLINE },
+ { 0xa7cc, 4, RI_E2E3E3B0_ONLINE },
+ { 0xa7e0, 6, RI_E3E3B0_ONLINE },
+ { 0xa800, 18, RI_E2_ONLINE },
+ { 0xa848, 33, RI_E2E3E3B0_ONLINE },
+ { 0xa8cc, 2, RI_E3E3B0_ONLINE },
+ { 0xa8d4, 4, RI_E2E3E3B0_ONLINE },
+ { 0xa8e4, 1, RI_E3E3B0_ONLINE },
+ { 0xa8e8, 1, RI_E2E3E3B0_ONLINE },
+ { 0xa8f0, 1, RI_E2E3E3B0_ONLINE },
+ { 0xa8f8, 30, RI_E3E3B0_ONLINE },
+ { 0xa974, 73, RI_E3E3B0_ONLINE },
+ { 0xac30, 1, RI_E3E3B0_ONLINE },
+ { 0xac40, 1, RI_E3E3B0_ONLINE },
+ { 0xac50, 1, RI_E3E3B0_ONLINE },
+ { 0xac60, 1, RI_E3B0_ONLINE },
+ { 0x10000, 9, RI_ALL_ONLINE },
+ { 0x10024, 1, RI_E1E1HE2_ONLINE },
+ { 0x10028, 5, RI_ALL_ONLINE },
+ { 0x1003c, 6, RI_E1E1HE2_ONLINE },
+ { 0x10054, 20, RI_ALL_ONLINE },
+ { 0x100a4, 4, RI_E1E1HE2_ONLINE },
+ { 0x100b4, 11, RI_ALL_ONLINE },
+ { 0x100e0, 4, RI_E1E1HE2_ONLINE },
+ { 0x100f0, 8, RI_ALL_ONLINE },
+ { 0x10110, 6, RI_E1E1HE2_ONLINE },
+ { 0x10128, 110, RI_ALL_ONLINE },
+ { 0x102e0, 4, RI_E1E1HE2_ONLINE },
+ { 0x102f0, 18, RI_ALL_ONLINE },
+ { 0x10338, 20, RI_E1E1HE2_ONLINE },
+ { 0x10388, 10, RI_ALL_ONLINE },
+ { 0x10400, 6, RI_E1E1HE2_ONLINE },
+ { 0x10418, 6, RI_ALL_ONLINE },
+ { 0x10430, 10, RI_E1E1HE2_ONLINE },
+ { 0x10458, 22, RI_ALL_ONLINE },
+ { 0x104b0, 12, RI_E1E1HE2_ONLINE },
+ { 0x104e0, 1, RI_ALL_ONLINE },
+ { 0x104e8, 2, RI_ALL_ONLINE },
+ { 0x104f4, 2, RI_ALL_ONLINE },
+ { 0x10500, 146, RI_ALL_ONLINE },
+ { 0x10750, 2, RI_E1E1HE2_ONLINE },
+ { 0x10760, 2, RI_E1E1HE2_ONLINE },
+ { 0x10770, 2, RI_E1E1HE2_ONLINE },
+ { 0x10780, 2, RI_E1E1HE2_ONLINE },
+ { 0x10790, 2, RI_ALL_ONLINE },
+ { 0x107a0, 2, RI_E1E1HE2_ONLINE },
+ { 0x107b0, 2, RI_E1E1HE2_ONLINE },
+ { 0x107c0, 2, RI_E1E1HE2_ONLINE },
+ { 0x107d0, 2, RI_E1E1HE2_ONLINE },
+ { 0x107e0, 2, RI_ALL_ONLINE },
+ { 0x10880, 2, RI_ALL_ONLINE },
+ { 0x10900, 2, RI_ALL_ONLINE },
+ { 0x16000, 1, RI_E1HE2_ONLINE },
+ { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16090, 4, RI_E1HE2E3_ONLINE },
+ { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE },
+ { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE },
+ { 0x160dc, 2, RI_E1HE2_ONLINE },
+ { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1610c, 2, RI_E1HE2_ONLINE },
+ { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE },
+ { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x18010, 35, RI_E2E3E3B0_ONLINE },
+ { 0x180a4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x180c0, 9, RI_E2E3E3B0_ONLINE },
+ { 0x180e4, 1, RI_E2E3_ONLINE },
+ { 0x180e8, 2, RI_E2E3E3B0_ONLINE },
+ { 0x180f0, 1, RI_E2E3_ONLINE },
+ { 0x180f4, 79, RI_E2E3E3B0_ONLINE },
+ { 0x18230, 1, RI_E2E3_ONLINE },
+ { 0x18234, 2, RI_E2E3E3B0_ONLINE },
+ { 0x1823c, 1, RI_E2E3_ONLINE },
+ { 0x18240, 13, RI_E2E3E3B0_ONLINE },
+ { 0x18274, 1, RI_E2_ONLINE },
+ { 0x18278, 81, RI_E2E3E3B0_ONLINE },
+ { 0x18440, 63, RI_E2E3E3B0_ONLINE },
+ { 0x18570, 42, RI_E3E3B0_ONLINE },
+ { 0x18618, 25, RI_E3B0_ONLINE },
+ { 0x18680, 44, RI_E3B0_ONLINE },
+ { 0x18748, 12, RI_E3B0_ONLINE },
+ { 0x18788, 1, RI_E3B0_ONLINE },
+ { 0x1879c, 6, RI_E3B0_ONLINE },
+ { 0x187c4, 51, RI_E3B0_ONLINE },
+ { 0x18a00, 48, RI_E3B0_ONLINE },
+ { 0x20000, 24, RI_ALL_ONLINE },
+ { 0x20060, 8, RI_ALL_ONLINE },
+ { 0x20080, 94, RI_ALL_ONLINE },
+ { 0x201f8, 1, RI_E1E1H_ONLINE },
+ { 0x201fc, 1, RI_ALL_ONLINE },
+ { 0x20200, 1, RI_E1E1H_ONLINE },
+ { 0x20204, 1, RI_ALL_ONLINE },
+ { 0x20208, 1, RI_E1E1H_ONLINE },
+ { 0x2020c, 39, RI_ALL_ONLINE },
+ { 0x202c8, 1, RI_E2E3E3B0_ONLINE },
+ { 0x202d8, 4, RI_E2E3E3B0_ONLINE },
+ { 0x202f0, 1, RI_E3B0_ONLINE },
+ { 0x20400, 2, RI_ALL_ONLINE },
+ { 0x2040c, 8, RI_ALL_ONLINE },
+ { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE },
+ { 0x20480, 1, RI_ALL_ONLINE },
+ { 0x20500, 1, RI_ALL_ONLINE },
+ { 0x20600, 1, RI_ALL_ONLINE },
+ { 0x28000, 1, RI_ALL_ONLINE },
+ { 0x28004, 8191, RI_ALL_OFFLINE },
+ { 0x30000, 1, RI_ALL_ONLINE },
+ { 0x30004, 16383, RI_ALL_OFFLINE },
+ { 0x40000, 98, RI_ALL_ONLINE },
+ { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE },
+ { 0x401c8, 1, RI_E1H_ONLINE },
+ { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x401d4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x40200, 4, RI_ALL_ONLINE },
+ { 0x40220, 6, RI_E2E3E3B0_ONLINE },
+ { 0x40238, 8, RI_E2E3_ONLINE },
+ { 0x40258, 4, RI_E2E3E3B0_ONLINE },
+ { 0x40268, 2, RI_E3E3B0_ONLINE },
+ { 0x40270, 17, RI_E3B0_ONLINE },
+ { 0x40400, 43, RI_ALL_ONLINE },
+ { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE },
+ { 0x404e0, 1, RI_E2E3E3B0_ONLINE },
+ { 0x40500, 2, RI_ALL_ONLINE },
+ { 0x40510, 2, RI_ALL_ONLINE },
+ { 0x40520, 2, RI_ALL_ONLINE },
+ { 0x40530, 2, RI_ALL_ONLINE },
+ { 0x40540, 2, RI_ALL_ONLINE },
+ { 0x40550, 10, RI_E2E3E3B0_ONLINE },
+ { 0x40610, 2, RI_E2E3E3B0_ONLINE },
+ { 0x42000, 164, RI_ALL_ONLINE },
+ { 0x422c0, 4, RI_E2E3E3B0_ONLINE },
+ { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE },
+ { 0x422e8, 1, RI_E2E3E3B0_ONLINE },
+ { 0x42400, 49, RI_ALL_ONLINE },
+ { 0x424c8, 38, RI_ALL_ONLINE },
+ { 0x42568, 2, RI_ALL_ONLINE },
+ { 0x42640, 5, RI_E2E3E3B0_ONLINE },
+ { 0x42800, 1, RI_ALL_ONLINE },
+ { 0x50000, 1, RI_ALL_ONLINE },
+ { 0x50004, 19, RI_ALL_ONLINE },
+ { 0x50050, 8, RI_ALL_ONLINE },
+ { 0x50070, 88, RI_ALL_ONLINE },
+ { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE },
+ { 0x50200, 2, RI_ALL_ONLINE },
+ { 0x5020c, 7, RI_ALL_ONLINE },
+ { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE },
+ { 0x50240, 1, RI_ALL_ONLINE },
+ { 0x50280, 1, RI_ALL_ONLINE },
+ { 0x50300, 1, RI_E2E3E3B0_ONLINE },
+ { 0x5030c, 1, RI_E2E3E3B0_ONLINE },
+ { 0x50318, 1, RI_E2E3E3B0_ONLINE },
+ { 0x5031c, 1, RI_E2E3E3B0_ONLINE },
+ { 0x50320, 2, RI_E2E3E3B0_ONLINE },
+ { 0x50330, 1, RI_E3B0_ONLINE },
+ { 0x52000, 1, RI_ALL_ONLINE },
+ { 0x54000, 1, RI_ALL_ONLINE },
+ { 0x54004, 3327, RI_ALL_OFFLINE },
+ { 0x58000, 1, RI_ALL_ONLINE },
+ { 0x58004, 8191, RI_E1E1H_OFFLINE },
+ { 0x60000, 26, RI_ALL_ONLINE },
+ { 0x60068, 8, RI_E1E1H_ONLINE },
+ { 0x60088, 12, RI_ALL_ONLINE },
+ { 0x600b8, 9, RI_E1E1H_ONLINE },
+ { 0x600dc, 1, RI_ALL_ONLINE },
+ { 0x600e0, 5, RI_E1E1H_ONLINE },
+ { 0x600f4, 1, RI_E1E1HE2_ONLINE },
+ { 0x600f8, 1, RI_E1E1H_ONLINE },
+ { 0x600fc, 8, RI_ALL_ONLINE },
+ { 0x6013c, 24, RI_E1H_ONLINE },
+ { 0x6019c, 2, RI_E2E3E3B0_ONLINE },
+ { 0x601ac, 18, RI_E2E3E3B0_ONLINE },
+ { 0x60200, 1, RI_ALL_ONLINE },
+ { 0x60204, 2, RI_ALL_OFFLINE },
+ { 0x60210, 13, RI_E2E3E3B0_ONLINE },
+ { 0x60244, 16, RI_E3B0_ONLINE },
+ { 0x61000, 1, RI_ALL_ONLINE },
+ { 0x61004, 511, RI_ALL_OFFLINE },
+ { 0x61800, 512, RI_E3E3B0_OFFLINE },
+ { 0x70000, 8, RI_ALL_ONLINE },
+ { 0x70020, 8184, RI_ALL_OFFLINE },
+ { 0x78000, 8192, RI_E3E3B0_OFFLINE },
+ { 0x85000, 3, RI_ALL_ONLINE },
+ { 0x8501c, 7, RI_ALL_ONLINE },
+ { 0x85048, 1, RI_ALL_ONLINE },
+ { 0x85200, 32, RI_ALL_ONLINE },
+ { 0xb0000, 16384, RI_E1H_ONLINE },
+ { 0xc1000, 7, RI_ALL_ONLINE },
+ { 0xc103c, 2, RI_E2E3E3B0_ONLINE },
+ { 0xc1800, 2, RI_ALL_ONLINE },
+ { 0xc2000, 164, RI_ALL_ONLINE },
+ { 0xc22c0, 5, RI_E2E3E3B0_ONLINE },
+ { 0xc22d8, 4, RI_E2E3E3B0_ONLINE },
+ { 0xc2400, 49, RI_ALL_ONLINE },
+ { 0xc24c8, 38, RI_ALL_ONLINE },
+ { 0xc2568, 2, RI_ALL_ONLINE },
+ { 0xc2600, 1, RI_ALL_ONLINE },
+ { 0xc4000, 165, RI_ALL_ONLINE },
+ { 0xc42d8, 2, RI_E2E3E3B0_ONLINE },
+ { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE },
+ { 0xc42fc, 1, RI_E2E3E3B0_ONLINE },
+ { 0xc4400, 51, RI_ALL_ONLINE },
+ { 0xc44d0, 38, RI_ALL_ONLINE },
+ { 0xc4570, 2, RI_ALL_ONLINE },
+ { 0xc4578, 5, RI_E2E3E3B0_ONLINE },
+ { 0xc4600, 1, RI_ALL_ONLINE },
+ { 0xd0000, 19, RI_ALL_ONLINE },
+ { 0xd004c, 8, RI_ALL_ONLINE },
+ { 0xd006c, 91, RI_ALL_ONLINE },
+ { 0xd01fc, 1, RI_E2E3E3B0_ONLINE },
+ { 0xd0200, 2, RI_ALL_ONLINE },
+ { 0xd020c, 7, RI_ALL_ONLINE },
+ { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE },
+ { 0xd0280, 1, RI_ALL_ONLINE },
+ { 0xd0300, 1, RI_ALL_ONLINE },
+ { 0xd0400, 1, RI_ALL_ONLINE },
+ { 0xd0818, 1, RI_E3B0_ONLINE },
+ { 0xd4000, 1, RI_ALL_ONLINE },
+ { 0xd4004, 2559, RI_ALL_OFFLINE },
+ { 0xd8000, 1, RI_ALL_ONLINE },
+ { 0xd8004, 8191, RI_ALL_OFFLINE },
+ { 0xe0000, 21, RI_ALL_ONLINE },
+ { 0xe0054, 8, RI_ALL_ONLINE },
+ { 0xe0074, 49, RI_ALL_ONLINE },
+ { 0xe0138, 1, RI_E1E1H_ONLINE },
+ { 0xe013c, 35, RI_ALL_ONLINE },
+ { 0xe01f4, 1, RI_E2_ONLINE },
+ { 0xe01f8, 1, RI_E2E3E3B0_ONLINE },
+ { 0xe0200, 2, RI_ALL_ONLINE },
+ { 0xe020c, 8, RI_ALL_ONLINE },
+ { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE },
+ { 0xe0280, 1, RI_ALL_ONLINE },
+ { 0xe0300, 1, RI_ALL_ONLINE },
+ { 0xe0400, 1, RI_E3B0_ONLINE },
+ { 0xe1000, 1, RI_ALL_ONLINE },
+ { 0xe2000, 1, RI_ALL_ONLINE },
+ { 0xe2004, 2047, RI_ALL_OFFLINE },
+ { 0xf0000, 1, RI_ALL_ONLINE },
+ { 0xf0004, 16383, RI_ALL_OFFLINE },
+ { 0x101000, 12, RI_ALL_ONLINE },
+ { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x101054, 3, RI_E2E3E3B0_ONLINE },
+ { 0x101100, 1, RI_ALL_ONLINE },
+ { 0x101800, 8, RI_ALL_ONLINE },
+ { 0x102000, 18, RI_ALL_ONLINE },
+ { 0x102068, 6, RI_E2E3E3B0_ONLINE },
+ { 0x102080, 17, RI_ALL_ONLINE },
+ { 0x1020c8, 8, RI_E1H_ONLINE },
+ { 0x1020e8, 9, RI_E2E3E3B0_ONLINE },
+ { 0x102400, 1, RI_ALL_ONLINE },
+ { 0x103000, 26, RI_ALL_ONLINE },
+ { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1030ac, 2, RI_E2E3E3B0_ONLINE },
+ { 0x1030b4, 1, RI_E2_ONLINE },
+ { 0x1030b8, 7, RI_E2E3E3B0_ONLINE },
+ { 0x1030d8, 8, RI_E2E3E3B0_ONLINE },
+ { 0x103400, 1, RI_E2E3E3B0_ONLINE },
+ { 0x103404, 135, RI_E2E3E3B0_OFFLINE },
+ { 0x103800, 8, RI_ALL_ONLINE },
+ { 0x104000, 63, RI_ALL_ONLINE },
+ { 0x10411c, 16, RI_E2E3E3B0_ONLINE },
+ { 0x104200, 17, RI_ALL_ONLINE },
+ { 0x104400, 64, RI_ALL_ONLINE },
+ { 0x104500, 192, RI_ALL_OFFLINE },
+ { 0x104800, 64, RI_ALL_ONLINE },
+ { 0x104900, 192, RI_ALL_OFFLINE },
+ { 0x105000, 256, RI_ALL_ONLINE },
+ { 0x105400, 768, RI_ALL_OFFLINE },
+ { 0x107000, 7, RI_E2E3E3B0_ONLINE },
+ { 0x10701c, 1, RI_E3E3B0_ONLINE },
+ { 0x108000, 33, RI_E1E1H_ONLINE },
+ { 0x1080ac, 5, RI_E1H_ONLINE },
+ { 0x108100, 5, RI_E1E1H_ONLINE },
+ { 0x108120, 5, RI_E1E1H_ONLINE },
+ { 0x108200, 74, RI_E1E1H_ONLINE },
+ { 0x108400, 74, RI_E1E1H_ONLINE },
+ { 0x108800, 152, RI_E1E1H_ONLINE },
+ { 0x110000, 111, RI_E2E3E3B0_ONLINE },
+ { 0x1101dc, 1, RI_E3E3B0_ONLINE },
+ { 0x110200, 4, RI_E2E3E3B0_ONLINE },
+ { 0x120000, 2, RI_ALL_ONLINE },
+ { 0x120008, 4, RI_ALL_ONLINE },
+ { 0x120018, 3, RI_ALL_ONLINE },
+ { 0x120024, 4, RI_ALL_ONLINE },
+ { 0x120034, 3, RI_ALL_ONLINE },
+ { 0x120040, 4, RI_ALL_ONLINE },
+ { 0x120050, 3, RI_ALL_ONLINE },
+ { 0x12005c, 4, RI_ALL_ONLINE },
+ { 0x12006c, 3, RI_ALL_ONLINE },
+ { 0x120078, 4, RI_ALL_ONLINE },
+ { 0x120088, 3, RI_ALL_ONLINE },
+ { 0x120094, 4, RI_ALL_ONLINE },
+ { 0x1200a4, 3, RI_ALL_ONLINE },
+ { 0x1200b0, 4, RI_ALL_ONLINE },
+ { 0x1200c0, 3, RI_ALL_ONLINE },
+ { 0x1200cc, 4, RI_ALL_ONLINE },
+ { 0x1200dc, 3, RI_ALL_ONLINE },
+ { 0x1200e8, 4, RI_ALL_ONLINE },
+ { 0x1200f8, 3, RI_ALL_ONLINE },
+ { 0x120104, 4, RI_ALL_ONLINE },
+ { 0x120114, 1, RI_ALL_ONLINE },
+ { 0x120118, 22, RI_ALL_ONLINE },
+ { 0x120170, 2, RI_E1E1H_ONLINE },
+ { 0x120178, 243, RI_ALL_ONLINE },
+ { 0x120544, 4, RI_E1E1H_ONLINE },
+ { 0x120554, 6, RI_ALL_ONLINE },
+ { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1205f4, 1, RI_E1HE2_ONLINE },
+ { 0x1205f8, 4, RI_E2E3E3B0_ONLINE },
+ { 0x120618, 1, RI_E2E3E3B0_ONLINE },
+ { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE },
+ { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE },
+ { 0x120698, 3, RI_E2E3E3B0_ONLINE },
+ { 0x1206a4, 1, RI_E2_ONLINE },
+ { 0x1206a8, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1206b0, 75, RI_E2E3E3B0_ONLINE },
+ { 0x1207dc, 1, RI_E2_ONLINE },
+ { 0x1207fc, 1, RI_E2E3E3B0_ONLINE },
+ { 0x12080c, 65, RI_ALL_ONLINE },
+ { 0x120910, 7, RI_E2E3E3B0_ONLINE },
+ { 0x120930, 9, RI_E2E3E3B0_ONLINE },
+ { 0x12095c, 37, RI_E3E3B0_ONLINE },
+ { 0x120a00, 2, RI_E1E1HE2_ONLINE },
+ { 0x120b00, 1, RI_E3E3B0_ONLINE },
+ { 0x122000, 2, RI_ALL_ONLINE },
+ { 0x122008, 2046, RI_E1_OFFLINE },
+ { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x130000, 35, RI_E2E3E3B0_ONLINE },
+ { 0x130100, 29, RI_E2E3E3B0_ONLINE },
+ { 0x130180, 1, RI_E2E3E3B0_ONLINE },
+ { 0x130200, 1, RI_E2E3E3B0_ONLINE },
+ { 0x130280, 1, RI_E2E3E3B0_ONLINE },
+ { 0x130300, 5, RI_E2E3E3B0_ONLINE },
+ { 0x130380, 1, RI_E2E3E3B0_ONLINE },
+ { 0x130400, 1, RI_E2E3E3B0_ONLINE },
+ { 0x130480, 5, RI_E2E3E3B0_ONLINE },
+ { 0x130800, 72, RI_E2E3E3B0_ONLINE },
+ { 0x131000, 136, RI_E2E3E3B0_ONLINE },
+ { 0x132000, 148, RI_E2E3E3B0_ONLINE },
+ { 0x134000, 544, RI_E2E3E3B0_ONLINE },
+ { 0x140000, 1, RI_ALL_ONLINE },
+ { 0x140004, 9, RI_E1E1HE2E3_ONLINE },
+ { 0x140028, 8, RI_ALL_ONLINE },
+ { 0x140048, 10, RI_E1E1HE2E3_ONLINE },
+ { 0x140070, 1, RI_ALL_ONLINE },
+ { 0x140074, 10, RI_E1E1HE2E3_ONLINE },
+ { 0x14009c, 1, RI_ALL_ONLINE },
+ { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE },
+ { 0x1400b4, 7, RI_ALL_ONLINE },
+ { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE },
+ { 0x1400f8, 2, RI_ALL_ONLINE },
+ { 0x140100, 5, RI_E1E1H_ONLINE },
+ { 0x140114, 5, RI_E1E1HE2E3_ONLINE },
+ { 0x140128, 7, RI_ALL_ONLINE },
+ { 0x140144, 9, RI_E1E1HE2E3_ONLINE },
+ { 0x140168, 8, RI_ALL_ONLINE },
+ { 0x140188, 3, RI_E1E1HE2E3_ONLINE },
+ { 0x140194, 13, RI_ALL_ONLINE },
+ { 0x140200, 6, RI_E1E1HE2E3_ONLINE },
+ { 0x140220, 4, RI_E2E3_ONLINE },
+ { 0x140240, 4, RI_E2E3_ONLINE },
+ { 0x140260, 4, RI_E2E3_ONLINE },
+ { 0x140280, 4, RI_E2E3_ONLINE },
+ { 0x1402a0, 4, RI_E2E3_ONLINE },
+ { 0x1402c0, 4, RI_E2E3_ONLINE },
+ { 0x1402e0, 2, RI_E2E3_ONLINE },
+ { 0x1402e8, 2, RI_E2E3E3B0_ONLINE },
+ { 0x1402f0, 9, RI_E2E3_ONLINE },
+ { 0x140314, 44, RI_E3B0_ONLINE },
+ { 0x1403d0, 70, RI_E3B0_ONLINE },
+ { 0x144000, 4, RI_E1E1H_ONLINE },
+ { 0x148000, 4, RI_E1E1H_ONLINE },
+ { 0x14c000, 4, RI_E1E1H_ONLINE },
+ { 0x150000, 4, RI_E1E1H_ONLINE },
+ { 0x154000, 4, RI_E1E1H_ONLINE },
+ { 0x158000, 4, RI_E1E1H_ONLINE },
+ { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x15c008, 5, RI_E1H_ONLINE },
+ { 0x15c020, 8, RI_E2E3E3B0_ONLINE },
+ { 0x15c040, 1, RI_E2E3_ONLINE },
+ { 0x15c044, 2, RI_E2E3E3B0_ONLINE },
+ { 0x15c04c, 8, RI_E2E3_ONLINE },
+ { 0x15c06c, 8, RI_E2E3E3B0_ONLINE },
+ { 0x15c090, 13, RI_E2E3E3B0_ONLINE },
+ { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE },
+ { 0x15c128, 2, RI_E2E3_ONLINE },
+ { 0x15c130, 8, RI_E2E3E3B0_ONLINE },
+ { 0x15c150, 2, RI_E3E3B0_ONLINE },
+ { 0x15c158, 2, RI_E3_ONLINE },
+ { 0x15c160, 149, RI_E3B0_ONLINE },
+ { 0x161000, 7, RI_ALL_ONLINE },
+ { 0x16103c, 2, RI_E2E3E3B0_ONLINE },
+ { 0x161800, 2, RI_ALL_ONLINE },
+ { 0x162000, 54, RI_E3E3B0_ONLINE },
+ { 0x162200, 60, RI_E3E3B0_ONLINE },
+ { 0x162400, 54, RI_E3E3B0_ONLINE },
+ { 0x162600, 60, RI_E3E3B0_ONLINE },
+ { 0x162800, 54, RI_E3E3B0_ONLINE },
+ { 0x162a00, 60, RI_E3E3B0_ONLINE },
+ { 0x162c00, 54, RI_E3E3B0_ONLINE },
+ { 0x162e00, 60, RI_E3E3B0_ONLINE },
+ { 0x164000, 60, RI_ALL_ONLINE },
+ { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x164118, 15, RI_E2E3E3B0_ONLINE },
+ { 0x164200, 1, RI_ALL_ONLINE },
+ { 0x164208, 1, RI_ALL_ONLINE },
+ { 0x164210, 1, RI_ALL_ONLINE },
+ { 0x164218, 1, RI_ALL_ONLINE },
+ { 0x164220, 1, RI_ALL_ONLINE },
+ { 0x164228, 1, RI_ALL_ONLINE },
+ { 0x164230, 1, RI_ALL_ONLINE },
+ { 0x164238, 1, RI_ALL_ONLINE },
+ { 0x164240, 1, RI_ALL_ONLINE },
+ { 0x164248, 1, RI_ALL_ONLINE },
+ { 0x164250, 1, RI_ALL_ONLINE },
+ { 0x164258, 1, RI_ALL_ONLINE },
+ { 0x164260, 1, RI_ALL_ONLINE },
+ { 0x164270, 2, RI_ALL_ONLINE },
+ { 0x164280, 2, RI_ALL_ONLINE },
+ { 0x164800, 2, RI_ALL_ONLINE },
+ { 0x165000, 2, RI_ALL_ONLINE },
+ { 0x166000, 164, RI_ALL_ONLINE },
+ { 0x1662cc, 7, RI_E2E3E3B0_ONLINE },
+ { 0x166400, 49, RI_ALL_ONLINE },
+ { 0x1664c8, 38, RI_ALL_ONLINE },
+ { 0x166568, 2, RI_ALL_ONLINE },
+ { 0x166570, 5, RI_E2E3E3B0_ONLINE },
+ { 0x166800, 1, RI_ALL_ONLINE },
+ { 0x168000, 137, RI_ALL_ONLINE },
+ { 0x168224, 2, RI_E1E1H_ONLINE },
+ { 0x16822c, 29, RI_ALL_ONLINE },
+ { 0x1682a0, 12, RI_E1E1H_ONLINE },
+ { 0x1682d0, 12, RI_ALL_ONLINE },
+ { 0x168300, 2, RI_E1E1H_ONLINE },
+ { 0x168308, 68, RI_ALL_ONLINE },
+ { 0x168418, 2, RI_E1E1H_ONLINE },
+ { 0x168420, 6, RI_ALL_ONLINE },
+ { 0x168800, 19, RI_ALL_ONLINE },
+ { 0x168900, 1, RI_ALL_ONLINE },
+ { 0x168a00, 128, RI_ALL_ONLINE },
+ { 0x16a000, 1, RI_ALL_ONLINE },
+ { 0x16a004, 1535, RI_ALL_OFFLINE },
+ { 0x16c000, 1, RI_ALL_ONLINE },
+ { 0x16c004, 1535, RI_ALL_OFFLINE },
+ { 0x16e000, 16, RI_E1H_ONLINE },
+ { 0x16e040, 8, RI_E2E3E3B0_ONLINE },
+ { 0x16e100, 1, RI_E1H_ONLINE },
+ { 0x16e200, 2, RI_E1H_ONLINE },
+ { 0x16e400, 161, RI_E1H_ONLINE },
+ { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16e68c, 12, RI_E1H_ONLINE },
+ { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE },
+ { 0x16e6cc, 4, RI_E1H_ONLINE },
+ { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE },
+ { 0x16e6e8, 5, RI_E2E3_ONLINE },
+ { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE },
+ { 0x16e768, 17, RI_E2E3E3B0_ONLINE },
+ { 0x16e7ac, 12, RI_E3B0_ONLINE },
+ { 0x170000, 24, RI_ALL_ONLINE },
+ { 0x170060, 4, RI_E1E1H_ONLINE },
+ { 0x170070, 65, RI_ALL_ONLINE },
+ { 0x170194, 11, RI_E2E3E3B0_ONLINE },
+ { 0x1701c4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1701cc, 7, RI_E2E3E3B0_ONLINE },
+ { 0x1701e8, 1, RI_E3E3B0_ONLINE },
+ { 0x1701ec, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1701f4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x170200, 4, RI_ALL_ONLINE },
+ { 0x170214, 1, RI_ALL_ONLINE },
+ { 0x170218, 77, RI_E2E3E3B0_ONLINE },
+ { 0x170400, 64, RI_E2E3E3B0_ONLINE },
+ { 0x178000, 1, RI_ALL_ONLINE },
+ { 0x180000, 61, RI_ALL_ONLINE },
+ { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x180200, 58, RI_ALL_ONLINE },
+ { 0x180340, 4, RI_ALL_ONLINE },
+ { 0x180380, 1, RI_E2E3E3B0_ONLINE },
+ { 0x180388, 1, RI_E2E3E3B0_ONLINE },
+ { 0x180390, 1, RI_E2E3E3B0_ONLINE },
+ { 0x180398, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1803a0, 5, RI_E2E3E3B0_ONLINE },
+ { 0x1803b4, 2, RI_E3E3B0_ONLINE },
+ { 0x180400, 1, RI_ALL_ONLINE },
+ { 0x180404, 255, RI_E1E1H_OFFLINE },
+ { 0x181000, 4, RI_ALL_ONLINE },
+ { 0x181010, 1020, RI_ALL_OFFLINE },
+ { 0x182000, 4, RI_E3E3B0_ONLINE },
+ { 0x1a0000, 1, RI_ALL_ONLINE },
+ { 0x1a0004, 5631, RI_ALL_OFFLINE },
+ { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x1a8000, 1, RI_ALL_ONLINE },
+ { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x1b0000, 1, RI_ALL_ONLINE },
+ { 0x1b0004, 15, RI_E1H_OFFLINE },
+ { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b0044, 239, RI_E1H_OFFLINE },
+ { 0x1b0400, 1, RI_ALL_ONLINE },
+ { 0x1b0404, 255, RI_E1H_OFFLINE },
+ { 0x1b0800, 1, RI_ALL_ONLINE },
+ { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b0c00, 1, RI_ALL_ONLINE },
+ { 0x1b1000, 1, RI_ALL_ONLINE },
+ { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b1400, 1, RI_ALL_ONLINE },
+ { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b1800, 128, RI_ALL_OFFLINE },
+ { 0x1b1c00, 128, RI_ALL_OFFLINE },
+ { 0x1b2000, 1, RI_ALL_ONLINE },
+ { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE },
+ { 0x1b8000, 1, RI_ALL_ONLINE },
+ { 0x1b8040, 1, RI_ALL_ONLINE },
+ { 0x1b8080, 1, RI_ALL_ONLINE },
+ { 0x1b80c0, 1, RI_ALL_ONLINE },
+ { 0x1b8100, 1, RI_ALL_ONLINE },
+ { 0x1b8140, 1, RI_ALL_ONLINE },
+ { 0x1b8180, 1, RI_ALL_ONLINE },
+ { 0x1b81c0, 1, RI_ALL_ONLINE },
+ { 0x1b8200, 1, RI_ALL_ONLINE },
+ { 0x1b8240, 1, RI_ALL_ONLINE },
+ { 0x1b8280, 1, RI_ALL_ONLINE },
+ { 0x1b82c0, 1, RI_ALL_ONLINE },
+ { 0x1b8300, 1, RI_ALL_ONLINE },
+ { 0x1b8340, 1, RI_ALL_ONLINE },
+ { 0x1b8380, 1, RI_ALL_ONLINE },
+ { 0x1b83c0, 1, RI_ALL_ONLINE },
+ { 0x1b8400, 1, RI_ALL_ONLINE },
+ { 0x1b8440, 1, RI_ALL_ONLINE },
+ { 0x1b8480, 1, RI_ALL_ONLINE },
+ { 0x1b84c0, 1, RI_ALL_ONLINE },
+ { 0x1b8500, 1, RI_ALL_ONLINE },
+ { 0x1b8540, 1, RI_ALL_ONLINE },
+ { 0x1b8580, 1, RI_ALL_ONLINE },
+ { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE },
+ { 0x1b8800, 1, RI_ALL_ONLINE },
+ { 0x1b8840, 1, RI_ALL_ONLINE },
+ { 0x1b8880, 1, RI_ALL_ONLINE },
+ { 0x1b88c0, 1, RI_ALL_ONLINE },
+ { 0x1b8900, 1, RI_ALL_ONLINE },
+ { 0x1b8940, 1, RI_ALL_ONLINE },
+ { 0x1b8980, 1, RI_ALL_ONLINE },
+ { 0x1b89c0, 1, RI_ALL_ONLINE },
+ { 0x1b8a00, 1, RI_ALL_ONLINE },
+ { 0x1b8a40, 1, RI_ALL_ONLINE },
+ { 0x1b8a80, 1, RI_ALL_ONLINE },
+ { 0x1b8ac0, 1, RI_ALL_ONLINE },
+ { 0x1b8b00, 1, RI_ALL_ONLINE },
+ { 0x1b8b40, 1, RI_ALL_ONLINE },
+ { 0x1b8b80, 1, RI_ALL_ONLINE },
+ { 0x1b8bc0, 1, RI_ALL_ONLINE },
+ { 0x1b8c00, 1, RI_ALL_ONLINE },
+ { 0x1b8c40, 1, RI_ALL_ONLINE },
+ { 0x1b8c80, 1, RI_ALL_ONLINE },
+ { 0x1b8cc0, 1, RI_ALL_ONLINE },
+ { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1b8d00, 1, RI_ALL_ONLINE },
+ { 0x1b8d40, 1, RI_ALL_ONLINE },
+ { 0x1b8d80, 1, RI_ALL_ONLINE },
+ { 0x1b8dc0, 1, RI_ALL_ONLINE },
+ { 0x1b8e00, 1, RI_ALL_ONLINE },
+ { 0x1b8e40, 1, RI_ALL_ONLINE },
+ { 0x1b8e80, 1, RI_ALL_ONLINE },
+ { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE },
+ { 0x1b8fe8, 2, RI_E3E3B0_ONLINE },
+ { 0x1b9000, 1, RI_E2E3E3B0_ONLINE },
+ { 0x1b9040, 3, RI_E2E3E3B0_ONLINE },
+ { 0x1b905c, 1, RI_E3E3B0_ONLINE },
+ { 0x1b9064, 1, RI_E3B0_ONLINE },
+ { 0x1b9080, 10, RI_E3B0_ONLINE },
+ { 0x1b9400, 14, RI_E2E3E3B0_ONLINE },
+ { 0x1b943c, 19, RI_E2E3E3B0_ONLINE },
+ { 0x1b9490, 10, RI_E2E3E3B0_ONLINE },
+ { 0x1c0000, 2, RI_ALL_ONLINE },
+ { 0x200000, 65, RI_ALL_ONLINE },
+ { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x200200, 58, RI_ALL_ONLINE },
+ { 0x200340, 4, RI_ALL_ONLINE },
+ { 0x200380, 1, RI_E2E3E3B0_ONLINE },
+ { 0x200388, 1, RI_E2E3E3B0_ONLINE },
+ { 0x200390, 1, RI_E2E3E3B0_ONLINE },
+ { 0x200398, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2003a0, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2003a8, 2, RI_E2E3E3B0_ONLINE },
+ { 0x200400, 1, RI_ALL_ONLINE },
+ { 0x200404, 255, RI_E1E1H_OFFLINE },
+ { 0x202000, 4, RI_ALL_ONLINE },
+ { 0x202010, 2044, RI_ALL_OFFLINE },
+ { 0x204000, 4, RI_E3E3B0_ONLINE },
+ { 0x220000, 1, RI_ALL_ONLINE },
+ { 0x220004, 5631, RI_ALL_OFFLINE },
+ { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x228000, 1, RI_ALL_ONLINE },
+ { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x230000, 1, RI_ALL_ONLINE },
+ { 0x230004, 15, RI_E1H_OFFLINE },
+ { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x230044, 239, RI_E1H_OFFLINE },
+ { 0x230400, 1, RI_ALL_ONLINE },
+ { 0x230404, 255, RI_E1H_OFFLINE },
+ { 0x230800, 1, RI_ALL_ONLINE },
+ { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x230c00, 1, RI_ALL_ONLINE },
+ { 0x231000, 1, RI_ALL_ONLINE },
+ { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x231400, 1, RI_ALL_ONLINE },
+ { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x231800, 128, RI_ALL_OFFLINE },
+ { 0x231c00, 128, RI_ALL_OFFLINE },
+ { 0x232000, 1, RI_ALL_ONLINE },
+ { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x232404, 5631, RI_E2E3E3B0_OFFLINE },
+ { 0x238000, 1, RI_ALL_ONLINE },
+ { 0x238040, 1, RI_ALL_ONLINE },
+ { 0x238080, 1, RI_ALL_ONLINE },
+ { 0x2380c0, 1, RI_ALL_ONLINE },
+ { 0x238100, 1, RI_ALL_ONLINE },
+ { 0x238140, 1, RI_ALL_ONLINE },
+ { 0x238180, 1, RI_ALL_ONLINE },
+ { 0x2381c0, 1, RI_ALL_ONLINE },
+ { 0x238200, 1, RI_ALL_ONLINE },
+ { 0x238240, 1, RI_ALL_ONLINE },
+ { 0x238280, 1, RI_ALL_ONLINE },
+ { 0x2382c0, 1, RI_ALL_ONLINE },
+ { 0x238300, 1, RI_ALL_ONLINE },
+ { 0x238340, 1, RI_ALL_ONLINE },
+ { 0x238380, 1, RI_ALL_ONLINE },
+ { 0x2383c0, 1, RI_ALL_ONLINE },
+ { 0x238400, 1, RI_ALL_ONLINE },
+ { 0x238440, 1, RI_ALL_ONLINE },
+ { 0x238480, 1, RI_ALL_ONLINE },
+ { 0x2384c0, 1, RI_ALL_ONLINE },
+ { 0x238500, 1, RI_ALL_ONLINE },
+ { 0x238540, 1, RI_ALL_ONLINE },
+ { 0x238580, 1, RI_ALL_ONLINE },
+ { 0x2385c0, 19, RI_E2E3E3B0_ONLINE },
+ { 0x238800, 1, RI_ALL_ONLINE },
+ { 0x238840, 1, RI_ALL_ONLINE },
+ { 0x238880, 1, RI_ALL_ONLINE },
+ { 0x2388c0, 1, RI_ALL_ONLINE },
+ { 0x238900, 1, RI_ALL_ONLINE },
+ { 0x238940, 1, RI_ALL_ONLINE },
+ { 0x238980, 1, RI_ALL_ONLINE },
+ { 0x2389c0, 1, RI_ALL_ONLINE },
+ { 0x238a00, 1, RI_ALL_ONLINE },
+ { 0x238a40, 1, RI_ALL_ONLINE },
+ { 0x238a80, 1, RI_ALL_ONLINE },
+ { 0x238ac0, 1, RI_ALL_ONLINE },
+ { 0x238b00, 1, RI_ALL_ONLINE },
+ { 0x238b40, 1, RI_ALL_ONLINE },
+ { 0x238b80, 1, RI_ALL_ONLINE },
+ { 0x238bc0, 1, RI_ALL_ONLINE },
+ { 0x238c00, 1, RI_ALL_ONLINE },
+ { 0x238c40, 1, RI_ALL_ONLINE },
+ { 0x238c80, 1, RI_ALL_ONLINE },
+ { 0x238cc0, 1, RI_ALL_ONLINE },
+ { 0x238cc4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x238d00, 1, RI_ALL_ONLINE },
+ { 0x238d40, 1, RI_ALL_ONLINE },
+ { 0x238d80, 1, RI_ALL_ONLINE },
+ { 0x238dc0, 1, RI_ALL_ONLINE },
+ { 0x238e00, 1, RI_ALL_ONLINE },
+ { 0x238e40, 1, RI_ALL_ONLINE },
+ { 0x238e80, 1, RI_ALL_ONLINE },
+ { 0x238e84, 1, RI_E2E3E3B0_ONLINE },
+ { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x238fc4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x238fd0, 6, RI_E2E3E3B0_ONLINE },
+ { 0x238fe8, 2, RI_E3E3B0_ONLINE },
+ { 0x239000, 1, RI_E2E3E3B0_ONLINE },
+ { 0x239040, 3, RI_E2E3E3B0_ONLINE },
+ { 0x23905c, 1, RI_E3E3B0_ONLINE },
+ { 0x239064, 1, RI_E3B0_ONLINE },
+ { 0x239080, 10, RI_E3B0_ONLINE },
+ { 0x240000, 2, RI_ALL_ONLINE },
+ { 0x280000, 65, RI_ALL_ONLINE },
+ { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x280200, 58, RI_ALL_ONLINE },
+ { 0x280340, 4, RI_ALL_ONLINE },
+ { 0x280380, 1, RI_E2E3E3B0_ONLINE },
+ { 0x280388, 1, RI_E2E3E3B0_ONLINE },
+ { 0x280390, 1, RI_E2E3E3B0_ONLINE },
+ { 0x280398, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2803a0, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2803a8, 2, RI_E2E3E3B0_ONLINE },
+ { 0x280400, 1, RI_ALL_ONLINE },
+ { 0x280404, 255, RI_E1E1H_OFFLINE },
+ { 0x282000, 4, RI_ALL_ONLINE },
+ { 0x282010, 2044, RI_ALL_OFFLINE },
+ { 0x284000, 4, RI_E3E3B0_ONLINE },
+ { 0x2a0000, 1, RI_ALL_ONLINE },
+ { 0x2a0004, 5631, RI_ALL_OFFLINE },
+ { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x2a8000, 1, RI_ALL_ONLINE },
+ { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x2b0000, 1, RI_ALL_ONLINE },
+ { 0x2b0004, 15, RI_E1H_OFFLINE },
+ { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b0044, 239, RI_E1H_OFFLINE },
+ { 0x2b0400, 1, RI_ALL_ONLINE },
+ { 0x2b0404, 255, RI_E1H_OFFLINE },
+ { 0x2b0800, 1, RI_ALL_ONLINE },
+ { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b0c00, 1, RI_ALL_ONLINE },
+ { 0x2b1000, 1, RI_ALL_ONLINE },
+ { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b1400, 1, RI_ALL_ONLINE },
+ { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b1800, 128, RI_ALL_OFFLINE },
+ { 0x2b1c00, 128, RI_ALL_OFFLINE },
+ { 0x2b2000, 1, RI_ALL_ONLINE },
+ { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE },
+ { 0x2b8000, 1, RI_ALL_ONLINE },
+ { 0x2b8040, 1, RI_ALL_ONLINE },
+ { 0x2b8080, 1, RI_ALL_ONLINE },
+ { 0x2b80c0, 1, RI_ALL_ONLINE },
+ { 0x2b8100, 1, RI_ALL_ONLINE },
+ { 0x2b8140, 1, RI_ALL_ONLINE },
+ { 0x2b8180, 1, RI_ALL_ONLINE },
+ { 0x2b81c0, 1, RI_ALL_ONLINE },
+ { 0x2b8200, 1, RI_ALL_ONLINE },
+ { 0x2b8240, 1, RI_ALL_ONLINE },
+ { 0x2b8280, 1, RI_ALL_ONLINE },
+ { 0x2b82c0, 1, RI_ALL_ONLINE },
+ { 0x2b8300, 1, RI_ALL_ONLINE },
+ { 0x2b8340, 1, RI_ALL_ONLINE },
+ { 0x2b8380, 1, RI_ALL_ONLINE },
+ { 0x2b83c0, 1, RI_ALL_ONLINE },
+ { 0x2b8400, 1, RI_ALL_ONLINE },
+ { 0x2b8440, 1, RI_ALL_ONLINE },
+ { 0x2b8480, 1, RI_ALL_ONLINE },
+ { 0x2b84c0, 1, RI_ALL_ONLINE },
+ { 0x2b8500, 1, RI_ALL_ONLINE },
+ { 0x2b8540, 1, RI_ALL_ONLINE },
+ { 0x2b8580, 1, RI_ALL_ONLINE },
+ { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE },
+ { 0x2b8800, 1, RI_ALL_ONLINE },
+ { 0x2b8840, 1, RI_ALL_ONLINE },
+ { 0x2b8880, 1, RI_ALL_ONLINE },
+ { 0x2b88c0, 1, RI_ALL_ONLINE },
+ { 0x2b8900, 1, RI_ALL_ONLINE },
+ { 0x2b8940, 1, RI_ALL_ONLINE },
+ { 0x2b8980, 1, RI_ALL_ONLINE },
+ { 0x2b89c0, 1, RI_ALL_ONLINE },
+ { 0x2b8a00, 1, RI_ALL_ONLINE },
+ { 0x2b8a40, 1, RI_ALL_ONLINE },
+ { 0x2b8a80, 1, RI_ALL_ONLINE },
+ { 0x2b8ac0, 1, RI_ALL_ONLINE },
+ { 0x2b8b00, 1, RI_ALL_ONLINE },
+ { 0x2b8b40, 1, RI_ALL_ONLINE },
+ { 0x2b8b80, 1, RI_ALL_ONLINE },
+ { 0x2b8bc0, 1, RI_ALL_ONLINE },
+ { 0x2b8c00, 1, RI_ALL_ONLINE },
+ { 0x2b8c40, 1, RI_ALL_ONLINE },
+ { 0x2b8c80, 1, RI_ALL_ONLINE },
+ { 0x2b8cc0, 1, RI_ALL_ONLINE },
+ { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2b8d00, 1, RI_ALL_ONLINE },
+ { 0x2b8d40, 1, RI_ALL_ONLINE },
+ { 0x2b8d80, 1, RI_ALL_ONLINE },
+ { 0x2b8dc0, 1, RI_ALL_ONLINE },
+ { 0x2b8e00, 1, RI_ALL_ONLINE },
+ { 0x2b8e40, 1, RI_ALL_ONLINE },
+ { 0x2b8e80, 1, RI_ALL_ONLINE },
+ { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE },
+ { 0x2b8fe8, 2, RI_E3E3B0_ONLINE },
+ { 0x2b9000, 1, RI_E2E3E3B0_ONLINE },
+ { 0x2b9040, 3, RI_E2E3E3B0_ONLINE },
+ { 0x2b905c, 1, RI_E3E3B0_ONLINE },
+ { 0x2b9064, 1, RI_E3B0_ONLINE },
+ { 0x2b9080, 10, RI_E3B0_ONLINE },
+ { 0x2b9400, 14, RI_E2E3E3B0_ONLINE },
+ { 0x2b943c, 19, RI_E2E3E3B0_ONLINE },
+ { 0x2b9490, 10, RI_E2E3E3B0_ONLINE },
+ { 0x2c0000, 2, RI_ALL_ONLINE },
+ { 0x300000, 65, RI_ALL_ONLINE },
+ { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE },
+ { 0x300200, 58, RI_ALL_ONLINE },
+ { 0x300340, 4, RI_ALL_ONLINE },
+ { 0x300380, 1, RI_E2E3E3B0_ONLINE },
+ { 0x300388, 1, RI_E2E3E3B0_ONLINE },
+ { 0x300390, 1, RI_E2E3E3B0_ONLINE },
+ { 0x300398, 1, RI_E2E3E3B0_ONLINE },
+ { 0x3003a0, 1, RI_E2E3E3B0_ONLINE },
+ { 0x3003a8, 2, RI_E2E3E3B0_ONLINE },
+ { 0x300400, 1, RI_ALL_ONLINE },
+ { 0x300404, 255, RI_E1E1H_OFFLINE },
+ { 0x302000, 4, RI_ALL_ONLINE },
+ { 0x302010, 2044, RI_ALL_OFFLINE },
+ { 0x304000, 4, RI_E3E3B0_ONLINE },
+ { 0x320000, 1, RI_ALL_ONLINE },
+ { 0x320004, 5631, RI_ALL_OFFLINE },
+ { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x328000, 1, RI_ALL_ONLINE },
+ { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+ { 0x330000, 1, RI_ALL_ONLINE },
+ { 0x330004, 15, RI_E1H_OFFLINE },
+ { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x330044, 239, RI_E1H_OFFLINE },
+ { 0x330400, 1, RI_ALL_ONLINE },
+ { 0x330404, 255, RI_E1H_OFFLINE },
+ { 0x330800, 1, RI_ALL_ONLINE },
+ { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x330c00, 1, RI_ALL_ONLINE },
+ { 0x331000, 1, RI_ALL_ONLINE },
+ { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x331400, 1, RI_ALL_ONLINE },
+ { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x331800, 128, RI_ALL_OFFLINE },
+ { 0x331c00, 128, RI_ALL_OFFLINE },
+ { 0x332000, 1, RI_ALL_ONLINE },
+ { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x332404, 5631, RI_E2E3E3B0_OFFLINE },
+ { 0x338000, 1, RI_ALL_ONLINE },
+ { 0x338040, 1, RI_ALL_ONLINE },
+ { 0x338080, 1, RI_ALL_ONLINE },
+ { 0x3380c0, 1, RI_ALL_ONLINE },
+ { 0x338100, 1, RI_ALL_ONLINE },
+ { 0x338140, 1, RI_ALL_ONLINE },
+ { 0x338180, 1, RI_ALL_ONLINE },
+ { 0x3381c0, 1, RI_ALL_ONLINE },
+ { 0x338200, 1, RI_ALL_ONLINE },
+ { 0x338240, 1, RI_ALL_ONLINE },
+ { 0x338280, 1, RI_ALL_ONLINE },
+ { 0x3382c0, 1, RI_ALL_ONLINE },
+ { 0x338300, 1, RI_ALL_ONLINE },
+ { 0x338340, 1, RI_ALL_ONLINE },
+ { 0x338380, 1, RI_ALL_ONLINE },
+ { 0x3383c0, 1, RI_ALL_ONLINE },
+ { 0x338400, 1, RI_ALL_ONLINE },
+ { 0x338440, 1, RI_ALL_ONLINE },
+ { 0x338480, 1, RI_ALL_ONLINE },
+ { 0x3384c0, 1, RI_ALL_ONLINE },
+ { 0x338500, 1, RI_ALL_ONLINE },
+ { 0x338540, 1, RI_ALL_ONLINE },
+ { 0x338580, 1, RI_ALL_ONLINE },
+ { 0x3385c0, 19, RI_E2E3E3B0_ONLINE },
+ { 0x338800, 1, RI_ALL_ONLINE },
+ { 0x338840, 1, RI_ALL_ONLINE },
+ { 0x338880, 1, RI_ALL_ONLINE },
+ { 0x3388c0, 1, RI_ALL_ONLINE },
+ { 0x338900, 1, RI_ALL_ONLINE },
+ { 0x338940, 1, RI_ALL_ONLINE },
+ { 0x338980, 1, RI_ALL_ONLINE },
+ { 0x3389c0, 1, RI_ALL_ONLINE },
+ { 0x338a00, 1, RI_ALL_ONLINE },
+ { 0x338a40, 1, RI_ALL_ONLINE },
+ { 0x338a80, 1, RI_ALL_ONLINE },
+ { 0x338ac0, 1, RI_ALL_ONLINE },
+ { 0x338b00, 1, RI_ALL_ONLINE },
+ { 0x338b40, 1, RI_ALL_ONLINE },
+ { 0x338b80, 1, RI_ALL_ONLINE },
+ { 0x338bc0, 1, RI_ALL_ONLINE },
+ { 0x338c00, 1, RI_ALL_ONLINE },
+ { 0x338c40, 1, RI_ALL_ONLINE },
+ { 0x338c80, 1, RI_ALL_ONLINE },
+ { 0x338cc0, 1, RI_ALL_ONLINE },
+ { 0x338cc4, 1, RI_E2E3E3B0_ONLINE },
+ { 0x338d00, 1, RI_ALL_ONLINE },
+ { 0x338d40, 1, RI_ALL_ONLINE },
+ { 0x338d80, 1, RI_ALL_ONLINE },
+ { 0x338dc0, 1, RI_ALL_ONLINE },
+ { 0x338e00, 1, RI_ALL_ONLINE },
+ { 0x338e40, 1, RI_ALL_ONLINE },
+ { 0x338e80, 1, RI_ALL_ONLINE },
+ { 0x338e84, 1, RI_E2E3E3B0_ONLINE },
+ { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+ { 0x338fc4, 2, RI_E2E3E3B0_ONLINE },
+ { 0x338fd0, 6, RI_E2E3E3B0_ONLINE },
+ { 0x338fe8, 2, RI_E3E3B0_ONLINE },
+ { 0x339000, 1, RI_E2E3E3B0_ONLINE },
+ { 0x339040, 3, RI_E2E3E3B0_ONLINE },
+ { 0x33905c, 1, RI_E3E3B0_ONLINE },
+ { 0x339064, 1, RI_E3B0_ONLINE },
+ { 0x339080, 10, RI_E3B0_ONLINE },
+ { 0x340000, 2, RI_ALL_ONLINE },
};
+#define REGS_COUNT ARRAY_SIZE(reg_addrs)
-#define WREGS_COUNT_E2 1
-static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 };
-
-static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
- { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
-};
-
-static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
-
-#define TIMER_REGS_COUNT_E1 2
-
-static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
- 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
- 0x1640d0, 0x1640d4 };
-
-#define TIMER_REGS_COUNT_E1H 2
+static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a };
-static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
- 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
- 0x1640d0, 0x1640d4 };
+static const u32 page_vals_e2[] = { 0, 128 };
+#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2)
-#define TIMER_REGS_COUNT_E2 2
+static const u32 page_write_regs_e2[] = { 328476 };
+#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2)
-static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
- 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
- 0x1640d0, 0x1640d4 };
-
-#define PAGE_MODE_VALUES_E1 0
-
-#define PAGE_READ_REGS_E1 0
-
-#define PAGE_WRITE_REGS_E1 0
-
-static const u32 page_vals_e1[] = { 0 };
-
-static const u32 page_write_regs_e1[] = { 0 };
-
-static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
-
-#define PAGE_MODE_VALUES_E1H 0
-
-#define PAGE_READ_REGS_E1H 0
-
-#define PAGE_WRITE_REGS_E1H 0
-
-static const u32 page_vals_e1h[] = { 0 };
-
-static const u32 page_write_regs_e1h[] = { 0 };
-
-static const struct reg_addr page_read_regs_e1h[] = {
- { 0x0, 0, RI_E1H_ONLINE } };
-
-#define PAGE_MODE_VALUES_E2 2
-
-#define PAGE_READ_REGS_E2 1
-
-#define PAGE_WRITE_REGS_E2 1
+static const struct reg_addr page_read_regs_e2[] = {
+ { 0x58000, 4608, RI_E2_ONLINE } };
+#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2)
-static const u32 page_vals_e2[PAGE_MODE_VALUES_E2] = { 0, 128 };
+static const u32 page_vals_e3[] = { 0, 128 };
+#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3)
-static const u32 page_write_regs_e2[PAGE_WRITE_REGS_E2] = { 328476 };
+static const u32 page_write_regs_e3[] = { 328476 };
+#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3)
-static const struct reg_addr page_read_regs_e2[PAGE_READ_REGS_E2] = {
- { 0x58000, 4608, RI_E2_ONLINE } };
+static const struct reg_addr page_read_regs_e3[] = {
+ { 0x58000, 4608, RI_E3E3B0_ONLINE } };
+#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3)
#endif /* BNX2X_DUMP_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 727fe89ff37..221863059da 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -25,6 +25,7 @@
#include "bnx2x_cmn.h"
#include "bnx2x_dump.h"
#include "bnx2x_init.h"
+#include "bnx2x_sp.h"
/* Note: in the format strings below %s is replaced by the queue-name which is
* either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -37,8 +38,6 @@ static const struct {
char string[ETH_GSTRING_LEN];
} bnx2x_q_stats_arr[] = {
/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
- { Q_STATS_OFFSET32(error_bytes_received_hi),
- 8, "[%s]: rx_error_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
8, "[%s]: rx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
@@ -52,13 +51,18 @@ static const struct {
4, "[%s]: rx_skb_alloc_discard" },
{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
-/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
- { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+ { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
+/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8, "[%s]: tx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8, "[%s]: tx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
- 8, "[%s]: tx_bcast_packets" }
+ 8, "[%s]: tx_bcast_packets" },
+ { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
+ 8, "[%s]: tpa_aggregations" },
+ { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+ 8, "[%s]: tpa_aggregated_frames"},
+ { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"}
};
#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@@ -98,8 +102,8 @@ static const struct {
8, STATS_FLAGS_BOTH, "rx_discards" },
{ STATS_OFFSET32(mac_filter_discard),
4, STATS_FLAGS_PORT, "rx_filtered_packets" },
- { STATS_OFFSET32(xxoverflow_discard),
- 4, STATS_FLAGS_PORT, "rx_fw_discards" },
+ { STATS_OFFSET32(mf_tag_discard),
+ 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
{ STATS_OFFSET32(brb_drop_hi),
8, STATS_FLAGS_PORT, "rx_brb_discard" },
{ STATS_OFFSET32(brb_truncate_hi),
@@ -158,10 +162,43 @@ static const struct {
{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
{ STATS_OFFSET32(pause_frames_sent_hi),
- 8, STATS_FLAGS_PORT, "tx_pause_frames" }
+ 8, STATS_FLAGS_PORT, "tx_pause_frames" },
+ { STATS_OFFSET32(total_tpa_aggregations_hi),
+ 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
+ { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+ 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
+ { STATS_OFFSET32(total_tpa_bytes_hi),
+ 8, STATS_FLAGS_FUNC, "tpa_bytes"}
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
+static int bnx2x_get_port_type(struct bnx2x *bp)
+{
+ int port_type;
+ u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
+ switch (bp->link_params.phy[phy_idx].media_type) {
+ case ETH_PHY_SFP_FIBER:
+ case ETH_PHY_XFP_FIBER:
+ case ETH_PHY_KR:
+ case ETH_PHY_CX4:
+ port_type = PORT_FIBRE;
+ break;
+ case ETH_PHY_DA_TWINAX:
+ port_type = PORT_DA;
+ break;
+ case ETH_PHY_BASE_T:
+ port_type = PORT_TP;
+ break;
+ case ETH_PHY_NOT_PRESENT:
+ port_type = PORT_NONE;
+ break;
+ case ETH_PHY_UNSPECIFIED:
+ default:
+ port_type = PORT_OTHER;
+ break;
+ }
+ return port_type;
+}
static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -188,12 +225,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (IS_MF(bp))
ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
- if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
- cmd->port = PORT_TP;
- else if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
- cmd->port = PORT_FIBRE;
- else
- BNX2X_ERR("XGXS PHY Failure detected\n");
+ cmd->port = bnx2x_get_port_type(bp);
cmd->phy_address = bp->mdio.prtad;
cmd->transceiver = XCVR_INTERNAL;
@@ -468,78 +500,179 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
+#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
+#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
+
+static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
+ const struct reg_addr *reg_info)
+{
+ if (CHIP_IS_E1(bp))
+ return IS_E1_ONLINE(reg_info->info);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_ONLINE(reg_info->info);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_ONLINE(reg_info->info);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3_ONLINE(reg_info->info);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_ONLINE(reg_info->info);
+ else
+ return false;
+}
+
+/******* Paged registers info selectors ********/
+static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_vals_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_vals_e3;
+ else
+ return NULL;
+}
+
+static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_MODE_VALUES_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_MODE_VALUES_E3;
+ else
+ return 0;
+}
+
+static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_write_regs_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_write_regs_e3;
+ else
+ return NULL;
+}
+
+static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_WRITE_REGS_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_WRITE_REGS_E3;
+ else
+ return 0;
+}
+
+static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return page_read_regs_e2;
+ else if (CHIP_IS_E3(bp))
+ return page_read_regs_e3;
+ else
+ return NULL;
+}
+
+static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
+{
+ if (CHIP_IS_E2(bp))
+ return PAGE_READ_REGS_E2;
+ else if (CHIP_IS_E3(bp))
+ return PAGE_READ_REGS_E3;
+ else
+ return 0;
+}
+
+static inline int __bnx2x_get_regs_len(struct bnx2x *bp)
+{
+ int num_pages = __bnx2x_get_page_reg_num(bp);
+ int page_write_num = __bnx2x_get_page_write_num(bp);
+ const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp);
+ int page_read_num = __bnx2x_get_page_read_num(bp);
+ int regdump_len = 0;
+ int i, j, k;
+
+ for (i = 0; i < REGS_COUNT; i++)
+ if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
+ regdump_len += reg_addrs[i].size;
+
+ for (i = 0; i < num_pages; i++)
+ for (j = 0; j < page_write_num; j++)
+ for (k = 0; k < page_read_num; k++)
+ if (bnx2x_is_reg_online(bp, &page_read_addr[k]))
+ regdump_len += page_read_addr[k].size;
+
+ return regdump_len;
+}
static int bnx2x_get_regs_len(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
int regdump_len = 0;
- int i, j, k;
- if (CHIP_IS_E1(bp)) {
- for (i = 0; i < REGS_COUNT; i++)
- if (IS_E1_ONLINE(reg_addrs[i].info))
- regdump_len += reg_addrs[i].size;
-
- for (i = 0; i < WREGS_COUNT_E1; i++)
- if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
- regdump_len += wreg_addrs_e1[i].size *
- (1 + wreg_addrs_e1[i].read_regs_count);
-
- } else if (CHIP_IS_E1H(bp)) {
- for (i = 0; i < REGS_COUNT; i++)
- if (IS_E1H_ONLINE(reg_addrs[i].info))
- regdump_len += reg_addrs[i].size;
-
- for (i = 0; i < WREGS_COUNT_E1H; i++)
- if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
- regdump_len += wreg_addrs_e1h[i].size *
- (1 + wreg_addrs_e1h[i].read_regs_count);
- } else if (CHIP_IS_E2(bp)) {
- for (i = 0; i < REGS_COUNT; i++)
- if (IS_E2_ONLINE(reg_addrs[i].info))
- regdump_len += reg_addrs[i].size;
-
- for (i = 0; i < WREGS_COUNT_E2; i++)
- if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
- regdump_len += wreg_addrs_e2[i].size *
- (1 + wreg_addrs_e2[i].read_regs_count);
-
- for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
- for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
- for (k = 0; k < PAGE_READ_REGS_E2; k++)
- if (IS_E2_ONLINE(page_read_regs_e2[k].
- info))
- regdump_len +=
- page_read_regs_e2[k].size;
- }
- }
+ regdump_len = __bnx2x_get_regs_len(bp);
regdump_len *= 4;
regdump_len += sizeof(struct dump_hdr);
return regdump_len;
}
-static inline void bnx2x_read_pages_regs_e2(struct bnx2x *bp, u32 *p)
+/**
+ * bnx2x_read_pages_regs - read "paged" registers
+ *
+ * @bp device handle
+ * @p output buffer
+ *
+ * Reads "paged" memories: memories that may only be read by first writing to a
+ * specific address ("write address") and then reading from a specific address
+ * ("read address"). There may be more than one write address per "page" and
+ * more than one read address per write address.
+ */
+static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
{
u32 i, j, k, n;
-
- for (i = 0; i < PAGE_MODE_VALUES_E2; i++) {
- for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
- REG_WR(bp, page_write_regs_e2[j], page_vals_e2[i]);
- for (k = 0; k < PAGE_READ_REGS_E2; k++)
- if (IS_E2_ONLINE(page_read_regs_e2[k].info))
+ /* addresses of the paged registers */
+ const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
+ /* number of paged registers */
+ int num_pages = __bnx2x_get_page_reg_num(bp);
+ /* write addresses */
+ const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
+ /* number of write addresses */
+ int write_num = __bnx2x_get_page_write_num(bp);
+ /* read addresses info */
+ const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
+ /* number of read addresses */
+ int read_num = __bnx2x_get_page_read_num(bp);
+
+ for (i = 0; i < num_pages; i++) {
+ for (j = 0; j < write_num; j++) {
+ REG_WR(bp, write_addr[j], page_addr[i]);
+ for (k = 0; k < read_num; k++)
+ if (bnx2x_is_reg_online(bp, &read_addr[k]))
for (n = 0; n <
- page_read_regs_e2[k].size; n++)
+ read_addr[k].size; n++)
*p++ = REG_RD(bp,
- page_read_regs_e2[k].addr + n*4);
+ read_addr[k].addr + n*4);
}
}
}
+static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+{
+ u32 i, j;
+
+ /* Read the regular registers */
+ for (i = 0; i < REGS_COUNT; i++)
+ if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
+ for (j = 0; j < reg_addrs[i].size; j++)
+ *p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
+
+ /* Read "paged" registes */
+ bnx2x_read_pages_regs(bp, p);
+}
+
static void bnx2x_get_regs(struct net_device *dev,
struct ethtool_regs *regs, void *_p)
{
- u32 *p = _p, i, j;
+ u32 *p = _p;
struct bnx2x *bp = netdev_priv(dev);
struct dump_hdr dump_hdr = {0};
@@ -566,44 +699,21 @@ static void bnx2x_get_regs(struct net_device *dev,
dump_hdr.info = RI_E1_ONLINE;
else if (CHIP_IS_E1H(bp))
dump_hdr.info = RI_E1H_ONLINE;
- else if (CHIP_IS_E2(bp))
+ else if (!CHIP_IS_E1x(bp))
dump_hdr.info = RI_E2_ONLINE |
(BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
p += dump_hdr.hdr_size + 1;
- if (CHIP_IS_E1(bp)) {
- for (i = 0; i < REGS_COUNT; i++)
- if (IS_E1_ONLINE(reg_addrs[i].info))
- for (j = 0; j < reg_addrs[i].size; j++)
- *p++ = REG_RD(bp,
- reg_addrs[i].addr + j*4);
-
- } else if (CHIP_IS_E1H(bp)) {
- for (i = 0; i < REGS_COUNT; i++)
- if (IS_E1H_ONLINE(reg_addrs[i].info))
- for (j = 0; j < reg_addrs[i].size; j++)
- *p++ = REG_RD(bp,
- reg_addrs[i].addr + j*4);
-
- } else if (CHIP_IS_E2(bp)) {
- for (i = 0; i < REGS_COUNT; i++)
- if (IS_E2_ONLINE(reg_addrs[i].info))
- for (j = 0; j < reg_addrs[i].size; j++)
- *p++ = REG_RD(bp,
- reg_addrs[i].addr + j*4);
-
- bnx2x_read_pages_regs_e2(bp, p);
- }
+ /* Actually read the registers */
+ __bnx2x_get_regs(bp, p);
+
/* Re-enable parity attentions */
bnx2x_clear_blocks_parity(bp);
- if (CHIP_PARITY_ENABLED(bp))
- bnx2x_enable_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
}
-#define PHY_FW_VER_LEN 20
-
static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -682,8 +792,12 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
{
struct bnx2x *bp = netdev_priv(dev);
- if (capable(CAP_NET_ADMIN))
+ if (capable(CAP_NET_ADMIN)) {
+ /* dump MCP trace */
+ if (level & BNX2X_MSG_MCP)
+ bnx2x_fw_dump_lvl(bp, KERN_INFO);
bp->msg_enable = level;
+ }
}
static int bnx2x_nway_reset(struct net_device *dev)
@@ -725,7 +839,7 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
u32 val = 0;
/* adjust timeout for emulation/FPGA */
- count = NVRAM_TIMEOUT_COUNT;
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
if (CHIP_REV_IS_SLOW(bp))
count *= 100;
@@ -756,7 +870,7 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp)
u32 val = 0;
/* adjust timeout for emulation/FPGA */
- count = NVRAM_TIMEOUT_COUNT;
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
if (CHIP_REV_IS_SLOW(bp))
count *= 100;
@@ -824,7 +938,7 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
/* adjust timeout for emulation/FPGA */
- count = NVRAM_TIMEOUT_COUNT;
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
if (CHIP_REV_IS_SLOW(bp))
count *= 100;
@@ -947,7 +1061,7 @@ static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
/* adjust timeout for emulation/FPGA */
- count = NVRAM_TIMEOUT_COUNT;
+ count = BNX2X_NVRAM_TIMEOUT_COUNT;
if (CHIP_REV_IS_SLOW(bp))
count *= 100;
@@ -1051,9 +1165,9 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
while ((written_so_far < buf_size) && (rc == 0)) {
if (written_so_far == (buf_size - sizeof(u32)))
cmd_flags |= MCPR_NVM_COMMAND_LAST;
- else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
+ else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
cmd_flags |= MCPR_NVM_COMMAND_LAST;
- else if ((offset % NVRAM_PAGE_SIZE) == 0)
+ else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
cmd_flags |= MCPR_NVM_COMMAND_FIRST;
memcpy(&val, data_buf, 4);
@@ -1212,7 +1326,6 @@ static int bnx2x_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc = 0;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
printk(KERN_ERR "Handling parity error recovery. Try again later\n");
@@ -1229,12 +1342,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
bp->rx_ring_size = ering->rx_pending;
bp->tx_ring_size = ering->tx_pending;
- if (netif_running(dev)) {
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
- }
-
- return rc;
+ return bnx2x_reload_if_running(dev);
}
static void bnx2x_get_pauseparam(struct net_device *dev,
@@ -1313,60 +1421,129 @@ static const struct {
{ "idle check (online)" }
};
+enum {
+ BNX2X_CHIP_E1_OFST = 0,
+ BNX2X_CHIP_E1H_OFST,
+ BNX2X_CHIP_E2_OFST,
+ BNX2X_CHIP_E3_OFST,
+ BNX2X_CHIP_E3B0_OFST,
+ BNX2X_CHIP_MAX_OFST
+};
+
+#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST)
+#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST)
+#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST)
+#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST)
+#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST)
+
+#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1)
+#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
+
static int bnx2x_test_registers(struct bnx2x *bp)
{
int idx, i, rc = -ENODEV;
- u32 wr_val = 0;
+ u32 wr_val = 0, hw;
int port = BP_PORT(bp);
static const struct {
+ u32 hw;
u32 offset0;
u32 offset1;
u32 mask;
} reg_tbl[] = {
-/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
- { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
- { HC_REG_AGG_INT_0, 4, 0x000003ff },
- { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
- { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
- { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
- { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
- { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
- { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
- { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
-/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
- { QM_REG_CONNNUM_0, 4, 0x000fffff },
- { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
- { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
- { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
- { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
- { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
- { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
- { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
- { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
-/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
- { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
- { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
- { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
- { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
- { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
- { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
- { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
- { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
- { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
-/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
- { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
- { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
- { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
- { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
- { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
- { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
-
- { 0xffffffff, 0, 0x00000000 }
+/* 0 */ { BNX2X_CHIP_MASK_ALL,
+ BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
+ { BNX2X_CHIP_MASK_ALL,
+ DORQ_REG_DB_ADDR0, 4, 0xffffffff },
+ { BNX2X_CHIP_MASK_E1X,
+ HC_REG_AGG_INT_0, 4, 0x000003ff },
+ { BNX2X_CHIP_MASK_ALL,
+ PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
+ PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
+ { BNX2X_CHIP_MASK_E3B0,
+ PBF_REG_INIT_CRD_Q0, 4, 0x000007ff },
+ { BNX2X_CHIP_MASK_ALL,
+ PRS_REG_CID_PORT_0, 4, 0x00ffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
+/* 10 */ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ QM_REG_CONNNUM_0, 4, 0x000fffff },
+ { BNX2X_CHIP_MASK_ALL,
+ TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ SRC_REG_KEYRSS0_0, 40, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ SRC_REG_KEYRSS0_7, 40, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
+ { BNX2X_CHIP_MASK_ALL,
+ XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
+/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
+/* 30 */ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
+ { BNX2X_CHIP_MASK_ALL,
+ NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
+ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
+
+ { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
};
if (!netif_running(bp->dev))
return rc;
+ if (CHIP_IS_E1(bp))
+ hw = BNX2X_CHIP_MASK_E1;
+ else if (CHIP_IS_E1H(bp))
+ hw = BNX2X_CHIP_MASK_E1H;
+ else if (CHIP_IS_E2(bp))
+ hw = BNX2X_CHIP_MASK_E2;
+ else if (CHIP_IS_E3B0(bp))
+ hw = BNX2X_CHIP_MASK_E3B0;
+ else /* e3 A0 */
+ hw = BNX2X_CHIP_MASK_E3;
+
/* Repeat the test twice:
First by writing 0x00000000, second by writing 0xffffffff */
for (idx = 0; idx < 2; idx++) {
@@ -1382,8 +1559,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
u32 offset, mask, save_val, val;
- if (CHIP_IS_E2(bp) &&
- reg_tbl[i].offset0 == HC_REG_AGG_INT_0)
+ if (!(hw & reg_tbl[i].hw))
continue;
offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
@@ -1400,7 +1576,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
/* verify value is as expected */
if ((val & mask) != (wr_val & mask)) {
- DP(NETIF_MSG_PROBE,
+ DP(NETIF_MSG_HW,
"offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
offset, val, wr_val, mask);
goto test_reg_exit;
@@ -1417,7 +1593,7 @@ test_reg_exit:
static int bnx2x_test_memory(struct bnx2x *bp)
{
int i, j, rc = -ENODEV;
- u32 val;
+ u32 val, index;
static const struct {
u32 offset;
int size;
@@ -1432,32 +1608,44 @@ static int bnx2x_test_memory(struct bnx2x *bp)
{ 0xffffffff, 0 }
};
+
static const struct {
char *name;
u32 offset;
- u32 e1_mask;
- u32 e1h_mask;
- u32 e2_mask;
+ u32 hw_mask[BNX2X_CHIP_MAX_OFST];
} prty_tbl[] = {
- { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0, 0 },
- { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2, 0 },
- { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0, 0 },
- { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0, 0 },
- { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0, 0 },
- { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0, 0 },
-
- { NULL, 0xffffffff, 0, 0, 0 }
+ { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS,
+ {0x2, 0x2, 0, 0} },
+ { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
+ {0, 0, 0, 0} },
+ { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS,
+ {0x3ffc0, 0, 0, 0} },
+ { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS,
+ {0x3ffc1, 0, 0, 0} },
+
+ { NULL, 0xffffffff, {0, 0, 0, 0} }
};
if (!netif_running(bp->dev))
return rc;
+ if (CHIP_IS_E1(bp))
+ index = BNX2X_CHIP_E1_OFST;
+ else if (CHIP_IS_E1H(bp))
+ index = BNX2X_CHIP_E1H_OFST;
+ else if (CHIP_IS_E2(bp))
+ index = BNX2X_CHIP_E2_OFST;
+ else /* e3 */
+ index = BNX2X_CHIP_E3_OFST;
+
/* pre-Check the parity status */
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
- if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
- (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
- (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
+ if (val & ~(prty_tbl[i].hw_mask[index])) {
DP(NETIF_MSG_HW,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
@@ -1472,9 +1660,7 @@ static int bnx2x_test_memory(struct bnx2x *bp)
/* Check the parity status */
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
- if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
- (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
- (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
+ if (val & ~(prty_tbl[i].hw_mask[index])) {
DP(NETIF_MSG_HW,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
@@ -1491,28 +1677,33 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
{
int cnt = 1400;
- if (link_up)
+ if (link_up) {
while (bnx2x_link_test(bp, is_serdes) && cnt--)
- msleep(10);
+ msleep(20);
+
+ if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
+ DP(NETIF_MSG_LINK, "Timeout waiting for link up\n");
+ }
}
-static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
+static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
{
unsigned int pkt_size, num_pkts, i;
struct sk_buff *skb;
unsigned char *packet;
struct bnx2x_fastpath *fp_rx = &bp->fp[0];
struct bnx2x_fastpath *fp_tx = &bp->fp[0];
+ struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
u16 tx_start_idx, tx_idx;
u16 rx_start_idx, rx_idx;
- u16 pkt_prod, bd_prod;
+ u16 pkt_prod, bd_prod, rx_comp_cons;
struct sw_tx_bd *tx_buf;
struct eth_tx_start_bd *tx_start_bd;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
dma_addr_t mapping;
union eth_rx_cqe *cqe;
- u8 cqe_fp_flags;
+ u8 cqe_fp_flags, cqe_fp_type;
struct sw_rx_bd *rx_buf;
u16 len;
int rc = -ENODEV;
@@ -1524,7 +1715,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
return -EINVAL;
break;
case BNX2X_MAC_LOOPBACK:
- bp->link_params.loopback_mode = LOOPBACK_BMAC;
+ bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
+ LOOPBACK_XMAC : LOOPBACK_BMAC;
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
default:
@@ -1545,22 +1737,28 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
for (i = ETH_HLEN; i < pkt_size; i++)
packet[i] = (unsigned char) (i & 0xff);
+ mapping = dma_map_single(&bp->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ rc = -ENOMEM;
+ dev_kfree_skb(skb);
+ BNX2X_ERR("Unable to map SKB\n");
+ goto test_loopback_exit;
+ }
/* send the loopback packet */
num_pkts = 0;
- tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
+ tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
- pkt_prod = fp_tx->tx_pkt_prod++;
- tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
- tx_buf->first_bd = fp_tx->tx_bd_prod;
+ pkt_prod = txdata->tx_pkt_prod++;
+ tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+ tx_buf->first_bd = txdata->tx_bd_prod;
tx_buf->skb = skb;
tx_buf->flags = 0;
- bd_prod = TX_BD(fp_tx->tx_bd_prod);
- tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
- mapping = dma_map_single(&bp->pdev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
+ bd_prod = TX_BD(txdata->tx_bd_prod);
+ tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -1577,26 +1775,27 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
- pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x;
- pbd_e2 = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e2;
+ pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+ pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
wmb();
- fp_tx->tx_db.data.prod += 2;
+ txdata->tx_db.data.prod += 2;
barrier();
- DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
+ DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
mmiowb();
+ barrier();
num_pkts++;
- fp_tx->tx_bd_prod += 2; /* start + pbd */
+ txdata->tx_bd_prod += 2; /* start + pbd */
udelay(100);
- tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
+ tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
if (tx_idx != tx_start_idx + num_pkts)
goto test_loopback_exit;
@@ -1610,7 +1809,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
* bnx2x_tx_int()), as both are taking netif_tx_lock().
*/
local_bh_disable();
- bnx2x_tx_int(fp_tx);
+ bnx2x_tx_int(bp, txdata);
local_bh_enable();
}
@@ -1618,9 +1817,11 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
if (rx_idx != rx_start_idx + num_pkts)
goto test_loopback_exit;
- cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
+ rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
+ cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
- if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
+ cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+ if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
goto test_loopback_rx_exit;
len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
@@ -1628,6 +1829,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
goto test_loopback_rx_exit;
rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
+ dma_sync_single_for_cpu(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp_rx->rx_buf_size, DMA_FROM_DEVICE);
skb = rx_buf->skb;
skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
for (i = ETH_HLEN; i < pkt_size; i++)
@@ -1653,7 +1857,7 @@ test_loopback_exit:
return rc;
}
-static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
+static int bnx2x_test_loopback(struct bnx2x *bp)
{
int rc = 0, res;
@@ -1666,13 +1870,13 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
bnx2x_netif_stop(bp, 1);
bnx2x_acquire_phy_lock(bp);
- res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
+ res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
if (res) {
DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
rc |= BNX2X_PHY_LOOPBACK_FAILED;
}
- res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
+ res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
if (res) {
DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
rc |= BNX2X_MAC_LOOPBACK_FAILED;
@@ -1744,39 +1948,20 @@ test_nvram_exit:
return rc;
}
+/* Send an EMPTY ramrod on the first queue */
static int bnx2x_test_intr(struct bnx2x *bp)
{
- struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
- int i, rc;
+ struct bnx2x_queue_state_params params = {0};
if (!netif_running(bp->dev))
return -ENODEV;
- config->hdr.length = 0;
- if (CHIP_IS_E1(bp))
- config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
- else
- config->hdr.offset = BP_FUNC(bp);
- config->hdr.client_id = bp->fp->cl_id;
- config->hdr.reserved1 = 0;
-
- bp->set_mac_pending = 1;
- smp_wmb();
- rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
- U64_HI(bnx2x_sp_mapping(bp, mac_config)),
- U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
- if (rc == 0) {
- for (i = 0; i < 10; i++) {
- if (!bp->set_mac_pending)
- break;
- smp_rmb();
- msleep_interruptible(10);
- }
- if (i == 10)
- rc = -ENODEV;
- }
+ params.q_obj = &bp->fp->q_obj;
+ params.cmd = BNX2X_Q_CMD_EMPTY;
- return rc;
+ __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+ return bnx2x_queue_state_change(bp, &params);
}
static void bnx2x_self_test(struct net_device *dev,
@@ -1815,7 +2000,7 @@ static void bnx2x_self_test(struct net_device *dev,
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
bnx2x_nic_load(bp, LOAD_DIAG);
/* wait until link state is restored */
- bnx2x_wait_for_link(bp, link_up, is_serdes);
+ bnx2x_wait_for_link(bp, 1, is_serdes);
if (bnx2x_test_registers(bp) != 0) {
buf[0] = 1;
@@ -1826,7 +2011,7 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
- buf[2] = bnx2x_test_loopback(bp, link_up);
+ buf[2] = bnx2x_test_loopback(bp);
if (buf[2] != 0)
etest->flags |= ETH_TEST_FL_FAILED;
@@ -1864,6 +2049,14 @@ static void bnx2x_self_test(struct net_device *dev,
#define IS_MF_MODE_STAT(bp) \
(IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+/* ethtool statistics are displayed for all regular ethernet queues and the
+ * fcoe L2 queue if not disabled
+ */
+static inline int bnx2x_num_stat_queues(struct bnx2x *bp)
+{
+ return BNX2X_NUM_ETH_QUEUES(bp);
+}
+
static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1872,7 +2065,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
- num_stats = BNX2X_NUM_STAT_QUEUES(bp) *
+ num_stats = bnx2x_num_stat_queues(bp) *
BNX2X_NUM_Q_STATS;
if (!IS_MF_MODE_STAT(bp))
num_stats += BNX2X_NUM_STATS;
@@ -1905,14 +2098,9 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
case ETH_SS_STATS:
if (is_multi(bp)) {
k = 0;
- for_each_napi_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
memset(queue_name, 0, sizeof(queue_name));
-
- if (IS_FCOE_IDX(i))
- sprintf(queue_name, "fcoe");
- else
- sprintf(queue_name, "%d", i);
-
+ sprintf(queue_name, "%d", i);
for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
snprintf(buf + (k + j)*ETH_GSTRING_LEN,
ETH_GSTRING_LEN,
@@ -1951,7 +2139,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
if (is_multi(bp)) {
k = 0;
- for_each_napi_queue(bp, i) {
+ for_each_eth_queue(bp, i) {
hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
if (bnx2x_q_stats_arr[j].size == 0) {
@@ -2069,14 +2257,30 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
size_t copy_size =
- min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
+ min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+ size_t i;
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
return -EOPNOTSUPP;
- indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
- memcpy(indir->ring_index, bp->rx_indir_table,
- copy_size * sizeof(bp->rx_indir_table[0]));
+ /* Get the current configuration of the RSS indirection table */
+ bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
+
+ /*
+ * We can't use a memcpy() as an internal storage of an
+ * indirection table is a u8 array while indir->ring_index
+ * points to an array of u32.
+ *
+ * Indirection table contains the FW Client IDs, so we need to
+ * align the returned table to the Client ID of the leading RSS
+ * queue.
+ */
+ for (i = 0; i < copy_size; i++)
+ indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
+
+ indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+
return 0;
}
@@ -2085,21 +2289,33 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+ u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
return -EOPNOTSUPP;
- /* Validate size and indices */
- if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
+ /* validate the size */
+ if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
return -EINVAL;
- for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
- if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
+
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+ /* validate the indices */
+ if (indir->ring_index[i] >= num_eth_queues)
return -EINVAL;
+ /*
+ * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
+ * as an internal storage of an indirection table is a u8 array
+ * while indir->ring_index points to an array of u32.
+ *
+ * Indirection table contains the FW Client IDs, so we need to
+ * align the received table to the Client ID of the leading RSS
+ * queue
+ */
+ ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+ }
- memcpy(bp->rx_indir_table, indir->ring_index,
- indir->size * sizeof(bp->rx_indir_table[0]));
- bnx2x_push_indir_table(bp);
- return 0;
+ return bnx2x_config_rss_pf(bp, ind_table, false);
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
index 9fe367836a5..998652a1b85 100644
--- a/drivers/net/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
@@ -10,249 +10,221 @@
#ifndef BNX2X_FW_DEFS_H
#define BNX2X_FW_DEFS_H
-#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base)
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base)
#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
- (IRO[141].base + ((assertListEntry) * IRO[141].m1))
-#define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
- (IRO[144].base + ((pfId) * IRO[144].m1))
+ (IRO[147].base + ((assertListEntry) * IRO[147].m1))
#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
- (IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \
- IRO[149].m2))
+ (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
+ IRO[153].m2))
#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
- (IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \
- IRO[150].m2))
+ (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
+ IRO[154].m2))
#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
- (IRO[156].base + ((funcId) * IRO[156].m1))
+ (IRO[159].base + ((funcId) * IRO[159].m1))
#define CSTORM_FUNC_EN_OFFSET(funcId) \
- (IRO[146].base + ((funcId) * IRO[146].m1))
-#define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base)
-#define CSTORM_IGU_MODE_OFFSET (IRO[154].base)
+ (IRO[149].base + ((funcId) * IRO[149].m1))
+#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[311].base + ((pfId) * IRO[311].m1))
+ (IRO[315].base + ((pfId) * IRO[315].m1))
#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
- (IRO[312].base + ((pfId) * IRO[312].m1))
- #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \
- IRO[304].m2))
- #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \
- IRO[306].m2))
- #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \
- IRO[305].m2))
- #define \
- CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \
- IRO[307].m2))
- #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \
- IRO[303].m2))
- #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
- (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \
- IRO[309].m2))
- #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \
- IRO[308].m2))
+ (IRO[316].base + ((pfId) * IRO[316].m1))
+#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
+ (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
+ (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
+ (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
+ (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+ (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+ (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[310].base + ((pfId) * IRO[310].m1))
+ (IRO[314].base + ((pfId) * IRO[314].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[302].base + ((pfId) * IRO[302].m1))
+ (IRO[306].base + ((pfId) * IRO[306].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[301].base + ((pfId) * IRO[301].m1))
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[300].base + ((pfId) * IRO[300].m1))
-#define CSTORM_PATH_ID_OFFSET (IRO[159].base)
+ (IRO[304].base + ((pfId) * IRO[304].m1))
+#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[151].base + ((funcId) * IRO[151].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
- (IRO[137].base + ((pfId) * IRO[137].m1))
+ (IRO[142].base + ((pfId) * IRO[142].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
+ (IRO[143].base + ((pfId) * IRO[143].m1))
#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
- (IRO[136].base + ((pfId) * IRO[136].m1))
-#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size)
+ (IRO[141].base + ((pfId) * IRO[141].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
- (IRO[138].base + ((pfId) * IRO[138].m1))
-#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size)
-#define CSTORM_STATS_FLAGS_OFFSET(pfId) \
- (IRO[143].base + ((pfId) * IRO[143].m1))
+ (IRO[144].base + ((pfId) * IRO[144].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
+#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
+ (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
- (IRO[129].base + ((sbId) * IRO[129].m1))
+ (IRO[133].base + ((sbId) * IRO[133].m1))
+#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
+ (IRO[134].base + ((sbId) * IRO[134].m1))
+#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
+ (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
- (IRO[128].base + ((sbId) * IRO[128].m1))
-#define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size)
-#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
(IRO[132].base + ((sbId) * IRO[132].m1))
-#define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size)
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
+#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
+ (IRO[137].base + ((sbId) * IRO[137].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
- (IRO[151].base + ((vfId) * IRO[151].m1))
+ (IRO[155].base + ((vfId) * IRO[155].m1))
#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
- (IRO[152].base + ((vfId) * IRO[152].m1))
+ (IRO[156].base + ((vfId) * IRO[156].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
- (IRO[147].base + ((funcId) * IRO[147].m1))
-#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base)
+ (IRO[150].base + ((funcId) * IRO[150].m1))
+#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
- (IRO[198].base + ((pfId) * IRO[198].m1))
-#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base)
+ (IRO[203].base + ((pfId) * IRO[203].m1))
+#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
- (IRO[98].base + ((assertListEntry) * IRO[98].m1))
- #define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \
- (IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \
- IRO[197].m2))
-#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base)
+ (IRO[101].base + ((assertListEntry) * IRO[101].m1))
+#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
- (IRO[105].base)
-#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
- (IRO[96].base + ((pfId) * IRO[96].m1))
-#define TSTORM_FUNC_EN_OFFSET(funcId) \
- (IRO[101].base + ((funcId) * IRO[101].m1))
+ (IRO[108].base)
#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
- (IRO[195].base + ((pfId) * IRO[195].m1))
-#define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base)
-#define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \
- (IRO[91].base + ((pfId) * IRO[91].m1))
-#define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size)
- #define \
- TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \
- (IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \
- * IRO[260].m2))
+ (IRO[201].base + ((pfId) * IRO[201].m1))
+#define TSTORM_FUNC_EN_OFFSET(funcId) \
+ (IRO[103].base + ((funcId) * IRO[103].m1))
#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[264].base + ((pfId) * IRO[264].m1))
+ (IRO[271].base + ((pfId) * IRO[271].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
- (IRO[265].base + ((pfId) * IRO[265].m1))
+ (IRO[272].base + ((pfId) * IRO[272].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
- (IRO[266].base + ((pfId) * IRO[266].m1))
+ (IRO[273].base + ((pfId) * IRO[273].m1))
#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
- (IRO[267].base + ((pfId) * IRO[267].m1))
+ (IRO[274].base + ((pfId) * IRO[274].m1))
#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[263].base + ((pfId) * IRO[263].m1))
+ (IRO[270].base + ((pfId) * IRO[270].m1))
#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[262].base + ((pfId) * IRO[262].m1))
+ (IRO[269].base + ((pfId) * IRO[269].m1))
#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[261].base + ((pfId) * IRO[261].m1))
+ (IRO[268].base + ((pfId) * IRO[268].m1))
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
- (IRO[259].base + ((pfId) * IRO[259].m1))
+ (IRO[267].base + ((pfId) * IRO[267].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[269].base + ((pfId) * IRO[269].m1))
+ (IRO[276].base + ((pfId) * IRO[276].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[256].base + ((pfId) * IRO[256].m1))
+ (IRO[263].base + ((pfId) * IRO[263].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
- (IRO[257].base + ((pfId) * IRO[257].m1))
+ (IRO[264].base + ((pfId) * IRO[264].m1))
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+ (IRO[265].base + ((pfId) * IRO[265].m1))
#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
- (IRO[258].base + ((pfId) * IRO[258].m1))
+ (IRO[266].base + ((pfId) * IRO[266].m1))
#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
- (IRO[196].base + ((pfId) * IRO[196].m1))
- #define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \
- (IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \
- IRO[100].m2))
-#define TSTORM_STATS_FLAGS_OFFSET(pfId) \
- (IRO[95].base + ((pfId) * IRO[95].m1))
+ (IRO[202].base + ((pfId) * IRO[202].m1))
+#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[105].base + ((funcId) * IRO[105].m1))
#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
- (IRO[211].base + ((pfId) * IRO[211].m1))
+ (IRO[216].base + ((pfId) * IRO[216].m1))
#define TSTORM_VF_TO_PF_OFFSET(funcId) \
- (IRO[102].base + ((funcId) * IRO[102].m1))
-#define USTORM_AGG_DATA_OFFSET (IRO[201].base)
-#define USTORM_AGG_DATA_SIZE (IRO[201].size)
-#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base)
+ (IRO[104].base + ((funcId) * IRO[104].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
+#define USTORM_AGG_DATA_SIZE (IRO[206].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
- (IRO[169].base + ((assertListEntry) * IRO[169].m1))
+ (IRO[176].base + ((assertListEntry) * IRO[176].m1))
+#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
+ (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
+ IRO[205].m2))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
- (IRO[178].base + ((portId) * IRO[178].m1))
-#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
- (IRO[172].base + ((pfId) * IRO[172].m1))
+ (IRO[183].base + ((portId) * IRO[183].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[313].base + ((pfId) * IRO[313].m1))
+ (IRO[317].base + ((pfId) * IRO[317].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
- (IRO[174].base + ((funcId) * IRO[174].m1))
-#define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base)
+ (IRO[178].base + ((funcId) * IRO[178].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[277].base + ((pfId) * IRO[277].m1))
+ (IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
-#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
(IRO[282].base + ((pfId) * IRO[282].m1))
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+ (IRO[286].base + ((pfId) * IRO[286].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[279].base + ((pfId) * IRO[279].m1))
+ (IRO[283].base + ((pfId) * IRO[283].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[275].base + ((pfId) * IRO[275].m1))
+ (IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[274].base + ((pfId) * IRO[274].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[273].base + ((pfId) * IRO[273].m1))
+ (IRO[277].base + ((pfId) * IRO[277].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[276].base + ((pfId) * IRO[276].m1))
-#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
(IRO[280].base + ((pfId) * IRO[280].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+ (IRO[284].base + ((pfId) * IRO[284].m1))
#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
- (IRO[281].base + ((pfId) * IRO[281].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
- (IRO[176].base + ((pfId) * IRO[176].m1))
- #define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \
- (IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \
- IRO[173].m2))
- #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
- (IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \
- IRO[204].m2))
+ (IRO[182].base + ((pfId) * IRO[182].m1))
+#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[180].base + ((funcId) * IRO[180].m1))
+#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
+ (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
+ IRO[209].m2))
#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
- (IRO[205].base + ((qzoneId) * IRO[205].m1))
-#define USTORM_STATS_FLAGS_OFFSET(pfId) \
- (IRO[171].base + ((pfId) * IRO[171].m1))
-#define USTORM_TPA_BTR_OFFSET (IRO[202].base)
-#define USTORM_TPA_BTR_SIZE (IRO[202].size)
+ (IRO[210].base + ((qzoneId) * IRO[210].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
+#define USTORM_TPA_BTR_SIZE (IRO[207].size)
#define USTORM_VF_TO_PF_OFFSET(funcId) \
- (IRO[175].base + ((funcId) * IRO[175].m1))
-#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base)
-#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base)
-#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base)
+ (IRO[179].base + ((funcId) * IRO[179].m1))
+#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
+#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
+#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base)
#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
- (IRO[53].base + ((assertListEntry) * IRO[53].m1))
+ (IRO[50].base + ((assertListEntry) * IRO[50].m1))
#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
- (IRO[47].base + ((portId) * IRO[47].m1))
-#define XSTORM_E1HOV_OFFSET(pfId) \
- (IRO[55].base + ((pfId) * IRO[55].m1))
-#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
- (IRO[45].base + ((pfId) * IRO[45].m1))
+ (IRO[43].base + ((portId) * IRO[43].m1))
#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
- (IRO[49].base + ((pfId) * IRO[49].m1))
+ (IRO[45].base + ((pfId) * IRO[45].m1))
#define XSTORM_FUNC_EN_OFFSET(funcId) \
- (IRO[51].base + ((funcId) * IRO[51].m1))
-#define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base)
+ (IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[290].base + ((pfId) * IRO[290].m1))
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[293].base + ((pfId) * IRO[293].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[299].base + ((pfId) * IRO[299].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[300].base + ((pfId) * IRO[300].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[301].base + ((pfId) * IRO[301].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
+ (IRO[302].base + ((pfId) * IRO[302].m1))
#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
- (IRO[299].base + ((pfId) * IRO[299].m1))
+ (IRO[303].base + ((pfId) * IRO[303].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
+ (IRO[293].base + ((pfId) * IRO[293].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[286].base + ((pfId) * IRO[286].m1))
+ (IRO[290].base + ((pfId) * IRO[290].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[283].base + ((pfId) * IRO[283].m1))
-#define XSTORM_PATH_ID_OFFSET (IRO[65].base)
- #define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \
- (IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \
- IRO[50].m2))
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
- (IRO[48].base + ((pfId) * IRO[48].m1))
+ (IRO[44].base + ((pfId) * IRO[44].m1))
+#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+ (IRO[49].base + ((funcId) * IRO[49].m1))
#define XSTORM_SPQ_DATA_OFFSET(funcId) \
(IRO[32].base + ((funcId) * IRO[32].m1))
#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
@@ -260,42 +232,37 @@
(IRO[30].base + ((funcId) * IRO[30].m1))
#define XSTORM_SPQ_PROD_OFFSET(funcId) \
(IRO[31].base + ((funcId) * IRO[31].m1))
-#define XSTORM_STATS_FLAGS_OFFSET(pfId) \
- (IRO[43].base + ((pfId) * IRO[43].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
- (IRO[206].base + ((portId) * IRO[206].m1))
+ (IRO[211].base + ((portId) * IRO[211].m1))
#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
- (IRO[207].base + ((portId) * IRO[207].m1))
+ (IRO[212].base + ((portId) * IRO[212].m1))
#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
- (IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \
- IRO[209].m2))
+ (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
+ IRO[214].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
- (IRO[52].base + ((funcId) * IRO[52].m1))
+ (IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
-/* RSS hash types */
-#define DEFAULT_HASH_TYPE 0
-#define IPV4_HASH_TYPE 1
-#define TCP_IPV4_HASH_TYPE 2
-#define IPV6_HASH_TYPE 3
-#define TCP_IPV6_HASH_TYPE 4
-#define VLAN_PRI_HASH_TYPE 5
-#define E1HOV_PRI_HASH_TYPE 6
-#define DSCP_HASH_TYPE 7
+/**
+* This file defines HSI constants for the ETH flow
+*/
+#ifdef _EVEREST_MICROCODE
+#include "Microcode\Generated\DataTypes\eth_rx_bd.h"
+#include "Microcode\Generated\DataTypes\eth_tx_bd.h"
+#include "Microcode\Generated\DataTypes\eth_rx_cqe.h"
+#include "Microcode\Generated\DataTypes\eth_rx_sge.h"
+#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h"
+#endif
/* Ethernet Ring parameters */
#define X_ETH_LOCAL_RING_SIZE 13
-#define FIRST_BD_IN_PKT 0
+#define FIRST_BD_IN_PKT 0
#define PARSE_BD_INDEX 1
#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
#define U_ETH_NUM_OF_SGES_TO_FETCH 8
#define U_ETH_MAX_SGES_FOR_PACKET 3
-/*Tx params*/
-#define X_ETH_NO_VLAN 0
-#define X_ETH_OUTBAND_VLAN 1
-#define X_ETH_INBAND_VLAN 2
/* Rx ring params */
#define U_ETH_LOCAL_BD_RING_SIZE 8
#define U_ETH_LOCAL_SGE_RING_SIZE 10
@@ -311,79 +278,64 @@
#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
-#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
-#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
+#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1)
+#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1)
#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
#define U_ETH_UNDEFINED_Q 0xFF
-/* values of command IDs in the ramrod message */
-#define RAMROD_CMD_ID_ETH_UNUSED 0
-#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1
-#define RAMROD_CMD_ID_ETH_UPDATE 2
-#define RAMROD_CMD_ID_ETH_HALT 3
-#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4
-#define RAMROD_CMD_ID_ETH_ACTIVATE 5
-#define RAMROD_CMD_ID_ETH_DEACTIVATE 6
-#define RAMROD_CMD_ID_ETH_EMPTY 7
-#define RAMROD_CMD_ID_ETH_TERMINATE 8
-
-/* command values for set mac command */
-#define T_ETH_MAC_COMMAND_SET 0
-#define T_ETH_MAC_COMMAND_INVALIDATE 1
-
#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY 10
+#define ETH_NUM_OF_RSS_ENGINES_E2 72
+
+#define FILTER_RULES_COUNT 16
+#define MULTICAST_RULES_COUNT 16
+#define CLASSIFY_RULES_COUNT 16
/*The CRC32 seed, that is used for the hash(reduction) multicast address */
-#define T_ETH_CRC32_HASH_SEED 0x00000000
+#define ETH_CRC32_HASH_SEED 0x00000000
+
+#define ETH_CRC32_HASH_BIT_SIZE (8)
+#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
/* Maximal L2 clients supported */
#define ETH_MAX_RX_CLIENTS_E1 18
#define ETH_MAX_RX_CLIENTS_E1H 28
+#define ETH_MAX_RX_CLIENTS_E2 152
+
+/* Maximal statistics client Ids */
+#define MAX_STAT_COUNTER_ID_E1 36
+#define MAX_STAT_COUNTER_ID_E1H 56
+#define MAX_STAT_COUNTER_ID_E2 140
+
+#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
+#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
+#define MAX_MAC_CREDIT_E2 272 /* Per Path */
+#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
-#define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H
/* Maximal aggregation queues supported */
#define ETH_MAX_AGGREGATION_QUEUES_E1 32
-#define ETH_MAX_AGGREGATION_QUEUES_E1H 64
+#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
-/* ETH RSS modes */
-#define ETH_RSS_MODE_DISABLED 0
-#define ETH_RSS_MODE_REGULAR 1
-#define ETH_RSS_MODE_VLAN_PRI 2
-#define ETH_RSS_MODE_E1HOV_PRI 3
-#define ETH_RSS_MODE_IP_DSCP 4
-#define ETH_RSS_MODE_E2_INTEG 5
+#define ETH_NUM_OF_MCAST_BINS 256
+#define ETH_NUM_OF_MCAST_ENGINES_E2 72
-/* ETH vlan filtering modes */
-#define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */
-#define ETH_VLAN_FILTER_SPECIFIC_VLAN \
- 1 /* Only the vlan_id is allowed */
-#define ETH_VLAN_FILTER_CLASSIFY \
- 2 /* vlan will be added to CAM for classification */
+#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
+ (ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
+ (ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
-/* Fast path CQE selection */
-#define ETH_FP_CQE_REGULAR 0
-#define ETH_FP_CQE_SGL 1
-#define ETH_FP_CQE_RAW 2
+#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
/**
-* This file defines HSI constants common to all microcode flows
-*/
-
-/* Connection types */
-#define ETH_CONNECTION_TYPE 0
-#define TOE_CONNECTION_TYPE 1
-#define RDMA_CONNECTION_TYPE 2
-#define ISCSI_CONNECTION_TYPE 3
-#define FCOE_CONNECTION_TYPE 4
-#define RESERVED_CONNECTION_TYPE_0 5
-#define RESERVED_CONNECTION_TYPE_1 6
-#define RESERVED_CONNECTION_TYPE_2 7
-#define NONE_CONNECTION_TYPE 8
-
+ * This file defines HSI constants common to all microcode flows
+ */
#define PROTOCOL_STATE_BIT_OFFSET 6
@@ -391,25 +343,9 @@
#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
-/* values of command IDs in the ramrod message */
-#define RAMROD_CMD_ID_COMMON_FUNCTION_START 1
-#define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2
-#define RAMROD_CMD_ID_COMMON_CFC_DEL 3
-#define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4
-#define RAMROD_CMD_ID_COMMON_SET_MAC 5
-#define RAMROD_CMD_ID_COMMON_STAT_QUERY 6
-#define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7
-#define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8
-
/* microcode fixed page page size 4K (chains and ring segments) */
#define MC_PAGE_SIZE 4096
-
-/* Host coalescing constants */
-#define HC_IGU_BC_MODE 0
-#define HC_IGU_NBC_MODE 1
-/* Host coalescing constants. E1 includes E1H as well */
-
/* Number of indices per slow-path SB */
#define HC_SP_SB_MAX_INDICES 16
@@ -418,30 +354,17 @@
#define HC_SB_MAX_INDICES_E2 8
#define HC_SB_MAX_SB_E1X 32
-#define HC_SB_MAX_SB_E2 136
+#define HC_SB_MAX_SB_E2 136
#define HC_SP_SB_ID 0xde
-#define HC_REGULAR_SEGMENT 0
-#define HC_DEFAULT_SEGMENT 1
#define HC_SB_MAX_SM 2
#define HC_SB_MAX_DYNAMIC_INDICES 4
-#define HC_FUNCTION_DISABLED 0xff
-/* used by the driver to get the SB offset */
-#define USTORM_ID 0
-#define CSTORM_ID 1
-#define XSTORM_ID 2
-#define TSTORM_ID 3
-#define ATTENTION_ID 4
/* max number of slow path commands per port */
#define MAX_RAMRODS_PER_PORT 8
-/* values for RX ETH CQE type field */
-#define RX_ETH_CQE_TYPE_ETH_FASTPATH 0
-#define RX_ETH_CQE_TYPE_ETH_RAMROD 1
-
/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
@@ -451,7 +374,7 @@
#define XSEMI_CLK1_RESUL_CHIP (1e-3)
-#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
+#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
@@ -460,72 +383,28 @@
#define FW_LOG_LIST_SIZE 50
-#define NUM_OF_PROTOCOLS 4
#define NUM_OF_SAFC_BITS 16
#define MAX_COS_NUMBER 4
-
-#define FAIRNESS_COS_WRR_MODE 0
-#define FAIRNESS_COS_ETS_MODE 1
-
-
-/* Priority Flow Control (PFC) */
+#define MAX_TRAFFIC_TYPES 8
#define MAX_PFC_PRIORITIES 8
-#define MAX_PFC_TRAFFIC_TYPES 8
-
-/* Available Traffic Types for Link Layer Flow Control */
-#define LLFC_TRAFFIC_TYPE_NW 0
-#define LLFC_TRAFFIC_TYPE_FCOE 1
-#define LLFC_TRAFFIC_TYPE_ISCSI 2
- /***************** START OF E2 INTEGRATION \
- CODE***************************************/
-#define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3
- /***************** END OF E2 INTEGRATION \
- CODE***************************************/
-#define LLFC_TRAFFIC_TYPE_MAX 4
/* used by array traffic_type_to_priority[] to mark traffic type \
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
-#define LLFC_MODE_NONE 0
-#define LLFC_MODE_PFC 1
-#define LLFC_MODE_SAFC 2
-
-#define DCB_DISABLED 0
-#define DCB_ENABLED 1
-#define UNKNOWN_ADDRESS 0
-#define UNICAST_ADDRESS 1
-#define MULTICAST_ADDRESS 2
-#define BROADCAST_ADDRESS 3
+#define C_ERES_PER_PAGE \
+ (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
+#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
-#define SINGLE_FUNCTION 0
-#define MULTI_FUNCTION_SD 1
-#define MULTI_FUNCTION_SI 2
+#define STATS_QUERY_CMD_COUNT 16
-#define IP_V4 0
-#define IP_V6 1
+#define NIV_LIST_TABLE_SIZE 4096
+#define INVALID_VNIC_ID 0xFF
-#define C_ERES_PER_PAGE \
- (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
-#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
-#define EVENT_RING_OPCODE_VF_PF_CHANNEL 0
-#define EVENT_RING_OPCODE_FUNCTION_START 1
-#define EVENT_RING_OPCODE_FUNCTION_STOP 2
-#define EVENT_RING_OPCODE_CFC_DEL 3
-#define EVENT_RING_OPCODE_CFC_DEL_WB 4
-#define EVENT_RING_OPCODE_SET_MAC 5
-#define EVENT_RING_OPCODE_STAT_QUERY 6
-#define EVENT_RING_OPCODE_STOP_TRAFFIC 7
-#define EVENT_RING_OPCODE_START_TRAFFIC 8
-#define EVENT_RING_OPCODE_FORWARD_SETUP 9
-
-#define VF_PF_CHANNEL_STATE_READY 0
-#define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1
-
-#define VF_PF_CHANNEL_STATE_MAX_NUMBER 2
+#define UNDEF_IRO 0x80000000
#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index cdf19fe7c7f..dc24de40e33 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -11,7 +11,7 @@
#include "bnx2x_fw_defs.h"
-#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
+#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
struct license_key {
u32 reserved[6];
@@ -33,201 +33,366 @@ struct license_key {
u32 reserved_b[4];
};
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_MAX 2
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
/****************************************************************************
- * Shared HW configuration *
+ * Shared HW configuration *
****************************************************************************/
-struct shared_hw_cfg { /* NVRAM Offset */
+#define PIN_CFG_NA 0x00000000
+#define PIN_CFG_GPIO0_P0 0x00000001
+#define PIN_CFG_GPIO1_P0 0x00000002
+#define PIN_CFG_GPIO2_P0 0x00000003
+#define PIN_CFG_GPIO3_P0 0x00000004
+#define PIN_CFG_GPIO0_P1 0x00000005
+#define PIN_CFG_GPIO1_P1 0x00000006
+#define PIN_CFG_GPIO2_P1 0x00000007
+#define PIN_CFG_GPIO3_P1 0x00000008
+#define PIN_CFG_EPIO0 0x00000009
+#define PIN_CFG_EPIO1 0x0000000a
+#define PIN_CFG_EPIO2 0x0000000b
+#define PIN_CFG_EPIO3 0x0000000c
+#define PIN_CFG_EPIO4 0x0000000d
+#define PIN_CFG_EPIO5 0x0000000e
+#define PIN_CFG_EPIO6 0x0000000f
+#define PIN_CFG_EPIO7 0x00000010
+#define PIN_CFG_EPIO8 0x00000011
+#define PIN_CFG_EPIO9 0x00000012
+#define PIN_CFG_EPIO10 0x00000013
+#define PIN_CFG_EPIO11 0x00000014
+#define PIN_CFG_EPIO12 0x00000015
+#define PIN_CFG_EPIO13 0x00000016
+#define PIN_CFG_EPIO14 0x00000017
+#define PIN_CFG_EPIO15 0x00000018
+#define PIN_CFG_EPIO16 0x00000019
+#define PIN_CFG_EPIO17 0x0000001a
+#define PIN_CFG_EPIO18 0x0000001b
+#define PIN_CFG_EPIO19 0x0000001c
+#define PIN_CFG_EPIO20 0x0000001d
+#define PIN_CFG_EPIO21 0x0000001e
+#define PIN_CFG_EPIO22 0x0000001f
+#define PIN_CFG_EPIO23 0x00000020
+#define PIN_CFG_EPIO24 0x00000021
+#define PIN_CFG_EPIO25 0x00000022
+#define PIN_CFG_EPIO26 0x00000023
+#define PIN_CFG_EPIO27 0x00000024
+#define PIN_CFG_EPIO28 0x00000025
+#define PIN_CFG_EPIO29 0x00000026
+#define PIN_CFG_EPIO30 0x00000027
+#define PIN_CFG_EPIO31 0x00000028
+
+/* EPIO definition */
+#define EPIO_CFG_NA 0x00000000
+#define EPIO_CFG_EPIO0 0x00000001
+#define EPIO_CFG_EPIO1 0x00000002
+#define EPIO_CFG_EPIO2 0x00000003
+#define EPIO_CFG_EPIO3 0x00000004
+#define EPIO_CFG_EPIO4 0x00000005
+#define EPIO_CFG_EPIO5 0x00000006
+#define EPIO_CFG_EPIO6 0x00000007
+#define EPIO_CFG_EPIO7 0x00000008
+#define EPIO_CFG_EPIO8 0x00000009
+#define EPIO_CFG_EPIO9 0x0000000a
+#define EPIO_CFG_EPIO10 0x0000000b
+#define EPIO_CFG_EPIO11 0x0000000c
+#define EPIO_CFG_EPIO12 0x0000000d
+#define EPIO_CFG_EPIO13 0x0000000e
+#define EPIO_CFG_EPIO14 0x0000000f
+#define EPIO_CFG_EPIO15 0x00000010
+#define EPIO_CFG_EPIO16 0x00000011
+#define EPIO_CFG_EPIO17 0x00000012
+#define EPIO_CFG_EPIO18 0x00000013
+#define EPIO_CFG_EPIO19 0x00000014
+#define EPIO_CFG_EPIO20 0x00000015
+#define EPIO_CFG_EPIO21 0x00000016
+#define EPIO_CFG_EPIO22 0x00000017
+#define EPIO_CFG_EPIO23 0x00000018
+#define EPIO_CFG_EPIO24 0x00000019
+#define EPIO_CFG_EPIO25 0x0000001a
+#define EPIO_CFG_EPIO26 0x0000001b
+#define EPIO_CFG_EPIO27 0x0000001c
+#define EPIO_CFG_EPIO28 0x0000001d
+#define EPIO_CFG_EPIO29 0x0000001e
+#define EPIO_CFG_EPIO30 0x0000001f
+#define EPIO_CFG_EPIO31 0x00000020
+
+
+struct shared_hw_cfg { /* NVRAM Offset */
/* Up to 16 bytes of NULL-terminated string */
- u8 part_num[16]; /* 0x104 */
+ u8 part_num[16]; /* 0x104 */
+
+ u32 config; /* 0x114 */
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000
+ #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001
+ #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002
- u32 config; /* 0x114 */
-#define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001
-#define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0
-#define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000
-#define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001
-#define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002
+ #define SHARED_HW_CFG_PORT_SWAP 0x00000004
-#define SHARED_HW_CFG_PORT_SWAP 0x00000004
+ #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008
-#define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008
+ #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010
-#define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700
-#define SHARED_HW_CFG_MFW_SELECT_SHIFT 8
+ #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700
+ #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8
/* Whatever MFW found in NVM
(if multiple found, priority order is: NC-SI, UMP, IPMI) */
-#define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000
-#define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100
-#define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200
-#define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300
+ #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000
+ #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100
+ #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200
+ #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300
/* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI
(can only be used when an add-in board, not BMC, pulls-down SPIO4) */
-#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400
/* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI
(can only be used when an add-in board, not BMC, pulls-down SPIO4) */
-#define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500
/* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP
(can only be used when an add-in board, not BMC, pulls-down SPIO4) */
-#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600
-
-#define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000
-#define SHARED_HW_CFG_LED_MODE_SHIFT 16
-#define SHARED_HW_CFG_LED_MAC1 0x00000000
-#define SHARED_HW_CFG_LED_PHY1 0x00010000
-#define SHARED_HW_CFG_LED_PHY2 0x00020000
-#define SHARED_HW_CFG_LED_PHY3 0x00030000
-#define SHARED_HW_CFG_LED_MAC2 0x00040000
-#define SHARED_HW_CFG_LED_PHY4 0x00050000
-#define SHARED_HW_CFG_LED_PHY5 0x00060000
-#define SHARED_HW_CFG_LED_PHY6 0x00070000
-#define SHARED_HW_CFG_LED_MAC3 0x00080000
-#define SHARED_HW_CFG_LED_PHY7 0x00090000
-#define SHARED_HW_CFG_LED_PHY9 0x000a0000
-#define SHARED_HW_CFG_LED_PHY11 0x000b0000
-#define SHARED_HW_CFG_LED_MAC4 0x000c0000
-#define SHARED_HW_CFG_LED_PHY8 0x000d0000
-#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
-
-
-#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
-#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
-#define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000
-#define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000
-#define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000
-#define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000
-#define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000
-#define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000
-
- u32 config2; /* 0x118 */
+ #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600
+
+ #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000
+ #define SHARED_HW_CFG_LED_MODE_SHIFT 16
+ #define SHARED_HW_CFG_LED_MAC1 0x00000000
+ #define SHARED_HW_CFG_LED_PHY1 0x00010000
+ #define SHARED_HW_CFG_LED_PHY2 0x00020000
+ #define SHARED_HW_CFG_LED_PHY3 0x00030000
+ #define SHARED_HW_CFG_LED_MAC2 0x00040000
+ #define SHARED_HW_CFG_LED_PHY4 0x00050000
+ #define SHARED_HW_CFG_LED_PHY5 0x00060000
+ #define SHARED_HW_CFG_LED_PHY6 0x00070000
+ #define SHARED_HW_CFG_LED_MAC3 0x00080000
+ #define SHARED_HW_CFG_LED_PHY7 0x00090000
+ #define SHARED_HW_CFG_LED_PHY9 0x000a0000
+ #define SHARED_HW_CFG_LED_PHY11 0x000b0000
+ #define SHARED_HW_CFG_LED_MAC4 0x000c0000
+ #define SHARED_HW_CFG_LED_PHY8 0x000d0000
+ #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
+
+
+ #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
+ #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
+ #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000
+ #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000
+ #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000
+ #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000
+ #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000
+ #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000
+
+ #define SHARED_HW_CFG_SRIOV_MASK 0x40000000
+ #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000
+ #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000
+
+ #define SHARED_HW_CFG_ATC_MASK 0x80000000
+ #define SHARED_HW_CFG_ATC_DISABLED 0x00000000
+ #define SHARED_HW_CFG_ATC_ENABLED 0x80000000
+
+ u32 config2; /* 0x118 */
/* one time auto detect grace period (in sec) */
-#define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff
-#define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0
+ #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff
+ #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0
-#define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100
+ #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100
+ #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000
/* The default value for the core clock is 250MHz and it is
achieved by setting the clock change to 4 */
-#define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00
-#define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9
+ #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00
+ #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9
-#define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000
-#define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000
+ #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000
+ #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000
+ #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000
-#define SHARED_HW_CFG_HIDE_PORT1 0x00002000
+ #define SHARED_HW_CFG_HIDE_PORT1 0x00002000
- /* The fan failure mechanism is usually related to the PHY type
- since the power consumption of the board is determined by the PHY.
- Currently, fan is required for most designs with SFX7101, BCM8727
- and BCM8481. If a fan is not required for a board which uses one
- of those PHYs, this field should be set to "Disabled". If a fan is
- required for a different PHY type, this option should be set to
- "Enabled".
- The fan failure indication is expected on
- SPIO5 */
-#define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000
-#define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19
-#define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000
-#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
-#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
-
- /* Set the MDC/MDIO access for the first external phy */
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
-
- /* Set the MDC/MDIO access for the second external phy */
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
-#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
- u32 power_dissipated; /* 0x11c */
-#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
-#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
-
-#define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000
-#define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16
-#define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000
-#define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000
-#define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000
-#define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000
-
- u32 ump_nc_si_config; /* 0x120 */
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000
-#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002
-
-#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00
-#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8
-
-#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000
-#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16
-#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000
-#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000
-
- u32 board; /* 0x124 */
-#define SHARED_HW_CFG_BOARD_REV_MASK 0x00FF0000
-#define SHARED_HW_CFG_BOARD_REV_SHIFT 16
-
-#define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0F000000
-#define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24
-
-#define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xF0000000
-#define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28
-
- u32 reserved; /* 0x128 */
+ #define SHARED_HW_CFG_WOL_CAPABLE_MASK 0x00004000
+ #define SHARED_HW_CFG_WOL_CAPABLE_DISABLED 0x00000000
+ #define SHARED_HW_CFG_WOL_CAPABLE_ENABLED 0x00004000
+ /* Output low when PERST is asserted */
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000
+ #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000
+
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000
+ #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000
+
+ /* The fan failure mechanism is usually related to the PHY type
+ since the power consumption of the board is determined by the PHY.
+ Currently, fan is required for most designs with SFX7101, BCM8727
+ and BCM8481. If a fan is not required for a board which uses one
+ of those PHYs, this field should be set to "Disabled". If a fan is
+ required for a different PHY type, this option should be set to
+ "Enabled". The fan failure indication is expected on SPIO5 */
+ #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000
+ #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19
+ #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
+ #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
+
+ /* ASPM Power Management support */
+ #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000
+ #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000
+
+ /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register
+ tl_control_0 (register 0x2800) */
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000
+
+ #define SHARED_HW_CFG_PORT_MODE_MASK 0x01000000
+ #define SHARED_HW_CFG_PORT_MODE_2 0x00000000
+ #define SHARED_HW_CFG_PORT_MODE_4 0x01000000
+
+ #define SHARED_HW_CFG_PATH_SWAP_MASK 0x02000000
+ #define SHARED_HW_CFG_PATH_SWAP_DISABLED 0x00000000
+ #define SHARED_HW_CFG_PATH_SWAP_ENABLED 0x02000000
+
+ /* Set the MDC/MDIO access for the first external phy */
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
+
+ /* Set the MDC/MDIO access for the second external phy */
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
+ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
+
+
+ u32 power_dissipated; /* 0x11c */
+ #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000
+ #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16
+ #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000
+ #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000
+ #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000
+ #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000
+
+ #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
+ #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
+
+ u32 ump_nc_si_config; /* 0x120 */
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002
+
+ #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00
+ #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8
+
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000
+ #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000
+
+ u32 board; /* 0x124 */
+ #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F
+ #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0
+ #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0
+ #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6
+ /* Use the PIN_CFG_XXX defines on top */
+ #define SHARED_HW_CFG_BOARD_REV_MASK 0x00ff0000
+ #define SHARED_HW_CFG_BOARD_REV_SHIFT 16
+
+ #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0f000000
+ #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24
+
+ #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xf0000000
+ #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28
+
+ u32 wc_lane_config; /* 0x128 */
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00
+ #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
+
+ /* TX lane Polarity swap */
+ #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000
+ #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000
+ #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000
+ #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000
+ /* TX lane Polarity swap */
+ #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000
+ #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000
+ #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000
+ #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000
+
+ /* Selects the port layout of the board */
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000
+ #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000
};
/****************************************************************************
- * Port HW configuration *
+ * Port HW configuration *
****************************************************************************/
-struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
+struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u32 pci_id;
-#define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000
-#define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff
+ #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000
+ #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff
u32 pci_sub_id;
-#define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000
-#define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff
+ #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000
+ #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff
u32 power_dissipated;
-#define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000
-#define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24
-#define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000
-#define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16
-#define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00
-#define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8
-#define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff
-#define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff
+ #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00
+ #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8
+ #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000
+ #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16
+ #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000
+ #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24
u32 power_consumed;
-#define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000
-#define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24
-#define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000
-#define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16
-#define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00
-#define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8
-#define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff
-#define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff
+ #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0
+ #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00
+ #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8
+ #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000
+ #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16
+ #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000
+ #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24
u32 mac_upper;
-#define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff
-#define PORT_HW_CFG_UPPERMAC_SHIFT 0
+ #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff
+ #define PORT_HW_CFG_UPPERMAC_SHIFT 0
u32 mac_lower;
u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */
@@ -237,642 +402,809 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u32 rdma_mac_lower;
u32 serdes_config;
-#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000FFFF
-#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0
-
-#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xFFFF0000
-#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
-
-
- u32 Reserved0[3]; /* 0x158 */
- /* Controls the TX laser of the SFP+ module */
- u32 sfp_ctrl; /* 0x164 */
-#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
-#define PORT_HW_CFG_TX_LASER_SHIFT 0
-#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
-#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
-#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
-#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
-#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
-
- /* Controls the fault module LED of the SFP+ */
-#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
-#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
-#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
-#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
-#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
-#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
-#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
- u32 Reserved01[12]; /* 0x158 */
- /* for external PHY, or forced mode or during AN */
- u16 xgxs_config_rx[4]; /* 0x198 */
-
- u16 xgxs_config_tx[4]; /* 0x1A0 */
-
- u32 Reserved1[56]; /* 0x1A8 */
- u32 default_cfg; /* 0x288 */
-#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
-#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
-#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
-#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
-#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
-#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
-
-#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
-#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
-#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
-#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
-#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
-#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
-
-#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
-#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
-#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
-#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
-#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
-#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
-
-#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
-#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
-#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
-#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
-#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
-#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
+ #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff
+ #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0
+
+ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xffff0000
+ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
+
+
+ /* Default values: 2P-64, 4P-32 */
+ u32 pf_config; /* 0x158 */
+ #define PORT_HW_CFG_PF_NUM_VF_MASK 0x0000007F
+ #define PORT_HW_CFG_PF_NUM_VF_SHIFT 0
+
+ /* Default values: 17 */
+ #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK 0x00007F00
+ #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT 8
+
+ #define PORT_HW_CFG_ENABLE_FLR_MASK 0x00010000
+ #define PORT_HW_CFG_FLR_ENABLED 0x00010000
+
+ u32 vf_config; /* 0x15C */
+ #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK 0x0000007F
+ #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT 0
+
+ #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000
+ #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16
+
+ u32 mf_pci_id; /* 0x160 */
+ #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF
+ #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0
+
+ /* Controls the TX laser of the SFP+ module */
+ u32 sfp_ctrl; /* 0x164 */
+ #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
+ #define PORT_HW_CFG_TX_LASER_SHIFT 0
+ #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
+ #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
+ #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
+ #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
+ #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
+
+ /* Controls the fault module LED of the SFP+ */
+ #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
+ #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
+ #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
+ #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
+
+ /* The output pin TX_DIS that controls the TX laser of the SFP+
+ module. Use the PIN_CFG_XXX defines on top */
+ u32 e3_sfp_ctrl; /* 0x168 */
+ #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0
+
+ /* The output pin for SFPP_TYPE which turns on the Fault module LED */
+ #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00
+ #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8
+
+ /* The input pin MOD_ABS that indicates whether SFP+ module is
+ present or not. Use the PIN_CFG_XXX defines on top */
+ #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000
+ #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16
+
+ /* The output pin PWRDIS_SFP_X which disable the power of the SFP+
+ module. Use the PIN_CFG_XXX defines on top */
+ #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000
+ #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24
/*
- * When KR link is required to be set to force which is not
- * KR-compliant, this parameter determine what is the trigger for it.
- * When GPIO is selected, low input will force the speed. Currently
- * default speed is 1G. In the future, it may be widen to select the
- * forced speed in with another parameter. Note when force-1G is
- * enabled, it override option 56: Link Speed option.
+ * The input pin which signals module transmit fault. Use the
+ * PIN_CFG_XXX defines on top
*/
-#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
-#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
-#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
-#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
-#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
- /* Enable to determine with which GPIO to reset the external phy */
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
-#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
+ u32 e3_cmn_pin_cfg; /* 0x16C */
+ #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0
+
+ /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on
+ top */
+ #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00
+ #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8
+
+ /*
+ * The output pin which powers down the PHY. Use the PIN_CFG_XXX
+ * defines on top
+ */
+ #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000
+ #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16
+
+ /* The output pin values BSC_SEL which selects the I2C for this port
+ in the I2C Mux */
+ #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000
+ #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000
+
+
+ /*
+ * The input pin I_FAULT which indicate over-current has occurred.
+ * Use the PIN_CFG_XXX defines on top
+ */
+ u32 e3_cmn_pin_cfg1; /* 0x170 */
+ #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF
+ #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0
+ u32 reserved0[7]; /* 0x174 */
+
+ u32 aeu_int_mask; /* 0x190 */
+
+ u32 media_type; /* 0x194 */
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0
+
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8
+
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000
+ #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16
+
+ /* 4 times 16 bits for all 4 lanes. In case external PHY is present
+ (not direct mode), those values will not take effect on the 4 XGXS
+ lanes. For some external PHYs (such as 8706 and 8726) the values
+ will be used to configure the external PHY in those cases, not
+ all 4 values are needed. */
+ u16 xgxs_config_rx[4]; /* 0x198 */
+ u16 xgxs_config_tx[4]; /* 0x1A0 */
+
+ /* For storing FCOE mac on shared memory */
+ u32 fcoe_fip_mac_upper;
+ #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff
+ #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0
+ u32 fcoe_fip_mac_lower;
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 Reserved1[49]; /* 0x1C0 */
+
+ /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default),
+ 84833 only */
+ u32 xgbt_phy_cfg; /* 0x284 */
+ #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF
+ #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0
+
+ u32 default_cfg; /* 0x288 */
+ #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
+ #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
+ #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
+ #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
+ #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
+
+ #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
+ #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
+ #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
+ #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
+ #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
+
+ #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
+ #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
+ #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
+ #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
+ #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
+
+ #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
+ #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
+ #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
+ #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
+ #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
+ #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
+
+ /* When KR link is required to be set to force which is not
+ KR-compliant, this parameter determine what is the trigger for it.
+ When GPIO is selected, low input will force the speed. Currently
+ default speed is 1G. In the future, it may be widen to select the
+ forced speed in with another parameter. Note when force-1G is
+ enabled, it override option 56: Link Speed option. */
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
+ #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
+ /* Enable to determine with which GPIO to reset the external phy */
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
+ #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
+
/* Enable BAM on KR */
-#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
-#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
-#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
-#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
+ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
/* Enable Common Mode Sense */
-#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
-#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
-#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
-#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
+ #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
+ #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
+ #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
+ #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
+
+ /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */
+ #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000
+ #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22
+ #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000
+ #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000
+
+ /* Determine the Serdes electrical interface */
+ #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000
+ #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24
+ #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000
+ #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000
+ #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000
+ #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000
+ #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000
+ #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000
+
u32 speed_capability_mask2; /* 0x28C */
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12G 0x00000080
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12_DOT_5G 0x00000100
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_13G 0x00000200
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_15G 0x00000400
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_16G 0x00000800
-
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12G 0x00800000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12_DOT_5G 0x01000000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_13G 0x02000000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_15G 0x04000000
-#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_16G 0x08000000
-
- /* In the case where two media types (e.g. copper and fiber) are
- present and electrically active at the same time, PHY Selection
- will determine which of the two PHYs will be designated as the
- Active PHY and used for a connection to the network. */
- u32 multi_phy_config; /* 0x290 */
-#define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
-#define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
-#define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
-#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
-#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
-#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
-#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
-
- /* When enabled, all second phy nvram parameters will be swapped
- with the first phy parameters */
-#define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
-#define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
-#define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
-#define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
-
-
- /* Address of the second external phy */
- u32 external_phy_config2; /* 0x294 */
-#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
-#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
-
- /* The second XGXS external PHY type */
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
-#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
-
- /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
- 8706, 8726 and 8727) not all 4 values are needed. */
- u16 xgxs_config2_rx[4]; /* 0x296 */
- u16 xgxs_config2_tx[4]; /* 0x2A0 */
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080
+
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
+ #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000
+
+
+ /* In the case where two media types (e.g. copper and fiber) are
+ present and electrically active at the same time, PHY Selection
+ will determine which of the two PHYs will be designated as the
+ Active PHY and used for a connection to the network. */
+ u32 multi_phy_config; /* 0x290 */
+ #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
+ #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
+ #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
+ #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
+ #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
+ #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
+ #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
+
+ /* When enabled, all second phy nvram parameters will be swapped
+ with the first phy parameters */
+ #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
+ #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
+ #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
+ #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
+
+
+ /* Address of the second external phy */
+ u32 external_phy_config2; /* 0x294 */
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
+
+ /* The second XGXS external PHY type */
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
+ #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
+
+
+ /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
+ 8706, 8726 and 8727) not all 4 values are needed. */
+ u16 xgxs_config2_rx[4]; /* 0x296 */
+ u16 xgxs_config2_tx[4]; /* 0x2A0 */
u32 lane_config;
-#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
-#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
-
-#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff
-#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
-#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00
-#define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
-#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000
-#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14
- /* AN and forced */
-#define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b
- /* forced only */
-#define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4
- /* forced only */
-#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
- /* forced only */
-#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
- /* Indicate whether to swap the external phy polarity */
-#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
-#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
-#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
+ #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
+ /* AN and forced */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
+ /* forced only */
+ #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
+ #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff
+ #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
+ #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00
+ #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000
+ #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14
+
+ /* Indicate whether to swap the external phy polarity */
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
+ #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
+
u32 external_phy_config;
-#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
-#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24
-#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000
-#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000
-#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000
-
-#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000
-#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16
-
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
-#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
-
-#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff
-#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0
+ #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff
+ #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0
+
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640 0x00000c00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
+ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
+
+ #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16
+
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000
+ #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000
u32 speed_capability_mask;
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_12G 0x00800000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_12_5G 0x01000000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_13G 0x02000000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_15G 0x04000000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_16G 0x08000000
-#define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000
-
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_12G 0x00000080
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_12_5G 0x00000100
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_13G 0x00000200
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_15G 0x00000400
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_16G 0x00000800
-#define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000
-
- u32 reserved[2];
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000
+
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000
+ #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000
+
+ /* A place to hold the original MAC address as a backup */
+ u32 backup_mac_upper; /* 0x2B4 */
+ u32 backup_mac_lower; /* 0x2B8 */
};
/****************************************************************************
- * Shared Feature configuration *
+ * Shared Feature configuration *
****************************************************************************/
-struct shared_feat_cfg { /* NVRAM Offset */
+struct shared_feat_cfg { /* NVRAM Offset */
+
+ u32 config; /* 0x450 */
+ #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001
+
+ /* Use NVRAM values instead of HW default values */
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \
+ 0x00000002
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \
+ 0x00000000
+ #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \
+ 0x00000002
- u32 config; /* 0x450 */
-#define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000
+ #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008
- /* Use the values from options 47 and 48 instead of the HW default
- values */
-#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000
-#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002
+ #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030
+ #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
-#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
+ /* Override the OTP back to single function mode. When using GPIO,
+ high means only SF, 0 is according to CLP configuration */
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
+ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
+
+ /* The interval in seconds between sending LLDP packets. Set to zero
+ to disable the feature */
+ #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00ff0000
+ #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16
+
+ /* The assigned device type ID for LLDP usage */
+ #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xff000000
+ #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24
};
/****************************************************************************
- * Port Feature configuration *
+ * Port Feature configuration *
****************************************************************************/
-struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
+struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
u32 config;
-#define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f
-#define PORT_FEATURE_BAR1_SIZE_SHIFT 0
-#define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000
-#define PORT_FEATURE_BAR1_SIZE_64K 0x00000001
-#define PORT_FEATURE_BAR1_SIZE_128K 0x00000002
-#define PORT_FEATURE_BAR1_SIZE_256K 0x00000003
-#define PORT_FEATURE_BAR1_SIZE_512K 0x00000004
-#define PORT_FEATURE_BAR1_SIZE_1M 0x00000005
-#define PORT_FEATURE_BAR1_SIZE_2M 0x00000006
-#define PORT_FEATURE_BAR1_SIZE_4M 0x00000007
-#define PORT_FEATURE_BAR1_SIZE_8M 0x00000008
-#define PORT_FEATURE_BAR1_SIZE_16M 0x00000009
-#define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a
-#define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b
-#define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c
-#define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d
-#define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e
-#define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f
-#define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0
-#define PORT_FEATURE_BAR2_SIZE_SHIFT 4
-#define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000
-#define PORT_FEATURE_BAR2_SIZE_64K 0x00000010
-#define PORT_FEATURE_BAR2_SIZE_128K 0x00000020
-#define PORT_FEATURE_BAR2_SIZE_256K 0x00000030
-#define PORT_FEATURE_BAR2_SIZE_512K 0x00000040
-#define PORT_FEATURE_BAR2_SIZE_1M 0x00000050
-#define PORT_FEATURE_BAR2_SIZE_2M 0x00000060
-#define PORT_FEATURE_BAR2_SIZE_4M 0x00000070
-#define PORT_FEATURE_BAR2_SIZE_8M 0x00000080
-#define PORT_FEATURE_BAR2_SIZE_16M 0x00000090
-#define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0
-#define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0
-#define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0
-#define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0
-#define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0
-#define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0
-#define PORT_FEATURE_EN_SIZE_MASK 0x07000000
-#define PORT_FEATURE_EN_SIZE_SHIFT 24
-#define PORT_FEATURE_WOL_ENABLED 0x01000000
-#define PORT_FEATURE_MBA_ENABLED 0x02000000
-#define PORT_FEATURE_MFW_ENABLED 0x04000000
-
- /* Reserved bits: 28-29 */
- /* Check the optic vendor via i2c against a list of approved modules
- in a separate nvram image */
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xE0000000
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT 0x00000000
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER 0x20000000
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000
-#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000
-
+ #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f
+ #define PORT_FEATURE_BAR1_SIZE_SHIFT 0
+ #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000
+ #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001
+ #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002
+ #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003
+ #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004
+ #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005
+ #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006
+ #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007
+ #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008
+ #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009
+ #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a
+ #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b
+ #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c
+ #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d
+ #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e
+ #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f
+ #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0
+ #define PORT_FEATURE_BAR2_SIZE_SHIFT 4
+ #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000
+ #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010
+ #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020
+ #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030
+ #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040
+ #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050
+ #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060
+ #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070
+ #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080
+ #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090
+ #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0
+ #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0
+ #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0
+ #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0
+ #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0
+ #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0
+
+ #define PORT_FEAT_CFG_DCBX_MASK 0x00000100
+ #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
+
+ #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200
+ #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9
+ #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200
+
+ #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
+ #define PORT_FEATURE_EN_SIZE_SHIFT 24
+ #define PORT_FEATURE_WOL_ENABLED 0x01000000
+ #define PORT_FEATURE_MBA_ENABLED 0x02000000
+ #define PORT_FEATURE_MFW_ENABLED 0x04000000
+
+ /* Advertise expansion ROM even if MBA is disabled */
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000
+
+ /* Check the optic vendor via i2c against a list of approved modules
+ in a separate nvram image */
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xe0000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \
+ 0x00000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \
+ 0x20000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000
+ #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000
u32 wol_config;
/* Default is used when driver sets to "auto" mode */
-#define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003
-#define PORT_FEATURE_WOL_DEFAULT_SHIFT 0
-#define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000
-#define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001
-#define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002
-#define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003
-#define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004
-#define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008
-#define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010
+ #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003
+ #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0
+ #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000
+ #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001
+ #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002
+ #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003
+ #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004
+ #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008
+ #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010
u32 mba_config;
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000003
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002
-#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003
-#define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100
-#define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200
-#define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400
-#define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000
-#define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000
-#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000
-#define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000
-#define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000
-#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000
-#define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000
-#define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26
-#define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000
-#define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000
-#define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000
-#define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000
-#define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000
-#define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000
-#define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000
-#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000
-#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KX4 0x20000000
-#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KR 0x24000000
-#define PORT_FEATURE_MBA_LINK_SPEED_12GBPS 0x28000000
-#define PORT_FEATURE_MBA_LINK_SPEED_12_5GBPS 0x2c000000
-#define PORT_FEATURE_MBA_LINK_SPEED_13GBPS 0x30000000
-#define PORT_FEATURE_MBA_LINK_SPEED_15GBPS 0x34000000
-#define PORT_FEATURE_MBA_LINK_SPEED_16GBPS 0x38000000
-
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004
+ #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007
+
+ #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038
+ #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3
+
+ #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100
+ #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200
+ #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400
+ #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800
+ #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000
+ #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000
+ #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000
+ #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000
+ #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000
+ #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26
+ #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000
+ #define PORT_FEATURE_MBA_LINK_SPEED_20GBPS 0x20000000
u32 bmc_config;
-#define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000
-#define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001
+ #define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK 0x00000001
+ #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000
+ #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001
u32 mba_vlan_cfg;
-#define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff
-#define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0
-#define PORT_FEATURE_MBA_VLAN_EN 0x00010000
+ #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff
+ #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0
+ #define PORT_FEATURE_MBA_VLAN_EN 0x00010000
u32 resource_cfg;
-#define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001
-#define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002
-#define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004
-#define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008
-#define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010
+ #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001
+ #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002
+ #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004
+ #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008
+ #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010
u32 smbus_config;
- /* Obsolete */
-#define PORT_FEATURE_SMBUS_EN 0x00000001
-#define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe
-#define PORT_FEATURE_SMBUS_ADDR_SHIFT 1
-
- u32 reserved1;
+ #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe
+ #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1
+
+ u32 vf_config;
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000f
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e
+ #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f
u32 link_config; /* Used as HW defaults for the driver */
-#define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000
-#define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24
- /* (forced) low speed switch (< 10G) */
-#define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000
- /* (forced) high speed switch (>= 10G) */
-#define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000
-#define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000
-#define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000
-
-#define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000
-#define PORT_FEATURE_LINK_SPEED_SHIFT 16
-#define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000
-#define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000
-#define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000
-#define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000
-#define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000
-#define PORT_FEATURE_LINK_SPEED_1G 0x00050000
-#define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000
-#define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000
-#define PORT_FEATURE_LINK_SPEED_10G_KX4 0x00080000
-#define PORT_FEATURE_LINK_SPEED_10G_KR 0x00090000
-#define PORT_FEATURE_LINK_SPEED_12G 0x000a0000
-#define PORT_FEATURE_LINK_SPEED_12_5G 0x000b0000
-#define PORT_FEATURE_LINK_SPEED_13G 0x000c0000
-#define PORT_FEATURE_LINK_SPEED_15G 0x000d0000
-#define PORT_FEATURE_LINK_SPEED_16G 0x000e0000
-
-#define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700
-#define PORT_FEATURE_FLOW_CONTROL_SHIFT 8
-#define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000
-#define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100
-#define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200
-#define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300
-#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
+ #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000
+ #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24
+ /* (forced) low speed switch (< 10G) */
+ #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000
+ /* (forced) high speed switch (>= 10G) */
+ #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000
+ #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000
+ #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000
+
+ #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000
+ #define PORT_FEATURE_LINK_SPEED_SHIFT 16
+ #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000
+ #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000
+ #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000
+ #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000
+ #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000
+ #define PORT_FEATURE_LINK_SPEED_1G 0x00050000
+ #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000
+ #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000
+ #define PORT_FEATURE_LINK_SPEED_20G 0x00080000
+
+ #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700
+ #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8
+ #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000
+ #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100
+ #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200
+ #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300
+ #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
/* The default for MCP link configuration,
- uses the same defines as link_config */
+ uses the same defines as link_config */
u32 mfw_wol_link_cfg;
+
/* The default for the driver of the second external phy,
- uses the same defines as link_config */
- u32 link_config2; /* 0x47C */
+ uses the same defines as link_config */
+ u32 link_config2; /* 0x47C */
/* The default for MCP of the second external phy,
- uses the same defines as link_config */
- u32 mfw_wol_link_cfg2; /* 0x480 */
+ uses the same defines as link_config */
+ u32 mfw_wol_link_cfg2; /* 0x480 */
- u32 Reserved2[17]; /* 0x484 */
+ u32 Reserved2[17]; /* 0x484 */
};
/****************************************************************************
- * Device Information *
+ * Device Information *
****************************************************************************/
-struct shm_dev_info { /* size */
+struct shm_dev_info { /* size */
u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */
- struct shared_hw_cfg shared_hw_config; /* 40 */
+ struct shared_hw_cfg shared_hw_config; /* 40 */
- struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
+ struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
- struct shared_feat_cfg shared_feature_config; /* 4 */
+ struct shared_feat_cfg shared_feature_config; /* 4 */
- struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */
+ struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */
};
-#define FUNC_0 0
-#define FUNC_1 1
-#define FUNC_2 2
-#define FUNC_3 3
-#define FUNC_4 4
-#define FUNC_5 5
-#define FUNC_6 6
-#define FUNC_7 7
-#define E1_FUNC_MAX 2
-#define E1H_FUNC_MAX 8
-#define E2_FUNC_MAX 4 /* per path */
-
-#define VN_0 0
-#define VN_1 1
-#define VN_2 2
-#define VN_3 3
-#define E1VN_MAX 1
-#define E1HVN_MAX 4
+#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+ #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
+#endif
-#define E2_VF_MAX 64
+#define FUNC_0 0
+#define FUNC_1 1
+#define FUNC_2 2
+#define FUNC_3 3
+#define FUNC_4 4
+#define FUNC_5 5
+#define FUNC_6 6
+#define FUNC_7 7
+#define E1_FUNC_MAX 2
+#define E1H_FUNC_MAX 8
+#define E2_FUNC_MAX 4 /* per path */
+
+#define VN_0 0
+#define VN_1 1
+#define VN_2 2
+#define VN_3 3
+#define E1VN_MAX 1
+#define E1HVN_MAX 4
+
+#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */
/* This value (in milliseconds) determines the frequency of the driver
* issuing the PULSE message code. The firmware monitors this periodic
* pulse to determine when to switch to an OS-absent mode. */
-#define DRV_PULSE_PERIOD_MS 250
+#define DRV_PULSE_PERIOD_MS 250
/* This value (in milliseconds) determines how long the driver should
* wait for an acknowledgement from the firmware before timing out. Once
* the firmware has timed out, the driver will assume there is no firmware
* running and there won't be any firmware-driver synchronization during a
* driver reset. */
-#define FW_ACK_TIME_OUT_MS 5000
+#define FW_ACK_TIME_OUT_MS 5000
-#define FW_ACK_POLL_TIME_MS 1
+#define FW_ACK_POLL_TIME_MS 1
-#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
+#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
/* LED Blink rate that will achieve ~15.9Hz */
-#define LED_BLINK_RATE_VAL 480
+#define LED_BLINK_RATE_VAL 480
/****************************************************************************
- * Driver <-> FW Mailbox *
+ * Driver <-> FW Mailbox *
****************************************************************************/
struct drv_port_mb {
u32 link_status;
/* Driver should update this field on any link change event */
-#define LINK_STATUS_LINK_FLAG_MASK 0x00000001
-#define LINK_STATUS_LINK_UP 0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
-#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD (11<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD (11<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD (12<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD (12<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD (13<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD (13<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD (14<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD (14<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD (15<<1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD (15<<1)
-
-#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
-
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080
-#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-
-#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
-#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
-#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800
-#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000
-#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000
-#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000
-#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000
-
-#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000
-
-#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000
-
-#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
-#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18)
-#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18)
-#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18)
-
-#define LINK_STATUS_SERDES_LINK 0x00100000
-
-#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000
-#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000
-#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000
-#define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE 0x01000000
-#define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE 0x02000000
-#define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE 0x04000000
-#define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000
-#define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000
+ #define LINK_STATUS_LINK_FLAG_MASK 0x00000001
+ #define LINK_STATUS_LINK_UP 0x00000001
+ #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
+ #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1)
+ #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1)
+
+ #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020
+ #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+ #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+ #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080
+ #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+ #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+ #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+ #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800
+ #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000
+ #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000
+ #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000
+ #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000
+
+ #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000
+ #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000
+
+ #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000
+ #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000
+
+ #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+ #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18)
+ #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18)
+ #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18)
+ #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18)
+
+ #define LINK_STATUS_SERDES_LINK 0x00100000
+
+ #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000
+ #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000
+ #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000
+ #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000
+
+ #define LINK_STATUS_PFC_ENABLED 0x20000000
+
+ #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
u32 port_stx;
@@ -887,138 +1219,159 @@ struct drv_port_mb {
struct drv_func_mb {
u32 drv_mb_header;
-#define DRV_MSG_CODE_MASK 0xffff0000
-#define DRV_MSG_CODE_LOAD_REQ 0x10000000
-#define DRV_MSG_CODE_LOAD_DONE 0x11000000
-#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000
-#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000
-#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000
-#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
-#define DRV_MSG_CODE_DCC_OK 0x30000000
-#define DRV_MSG_CODE_DCC_FAILURE 0x31000000
-#define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000
-#define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000
-#define DRV_MSG_CODE_VALIDATE_KEY 0x70000000
-#define DRV_MSG_CODE_GET_CURR_KEY 0x80000000
-#define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
-#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
-#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
- /*
- * The optic module verification commands require bootcode
- * v5.0.6 or later
- */
-#define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
-#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
+ #define DRV_MSG_CODE_MASK 0xffff0000
+ #define DRV_MSG_CODE_LOAD_REQ 0x10000000
+ #define DRV_MSG_CODE_LOAD_DONE 0x11000000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000
+ #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000
+ #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+ #define DRV_MSG_CODE_DCC_OK 0x30000000
+ #define DRV_MSG_CODE_DCC_FAILURE 0x31000000
+ #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000
+ #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000
+ #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000
+ #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000
+ #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
+ #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
+ #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
/*
- * The specific optic module verification command requires bootcode
- * v5.2.12 or later
+ * The optic module verification command requires bootcode
+ * v5.0.6 or later, te specific optic module verification command
+ * requires bootcode v5.2.12 or later
*/
-#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
-#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
+ #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
+ #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
+ #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
+ #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
+ #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
-#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
-#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
-#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
-#define REQ_BC_VER_4_SET_MF_BW 0x00060202
-#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
-#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
-#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
-#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
-#define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
+ #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
+ #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
-#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+ #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+
+ #define DRV_MSG_CODE_SET_MF_BW 0xe0000000
+ #define REQ_BC_VER_4_SET_MF_BW 0x00060202
+ #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
+
+ #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000
+
+ #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
+ #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
+ #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
+ #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
+
+ #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 drv_mb_param;
+ #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000
+ #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000
u32 fw_mb_header;
-#define FW_MSG_CODE_MASK 0xffff0000
-#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
-#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
-#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
- /* Load common chip is supported from bc 6.0.0 */
-#define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
-#define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
-#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
-#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000
-#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000
-#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
-#define FW_MSG_CODE_DCC_DONE 0x30100000
-#define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000
-#define FW_MSG_CODE_DIAG_REFUSE 0x50200000
-#define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000
-#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000
-#define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000
-#define FW_MSG_CODE_GET_KEY_DONE 0x80100000
-#define FW_MSG_CODE_NO_KEY 0x80f00000
-#define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000
-#define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000
-#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000
-#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000
-#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000
-#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000
-#define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000
-#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
-#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
-
-#define FW_MSG_CODE_LIC_CHALLENGE 0xff010000
-#define FW_MSG_CODE_LIC_RESPONSE 0xff020000
-#define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000
-#define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
-
-#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+ #define FW_MSG_CODE_MASK 0xffff0000
+ #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
+ #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+ #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+ /* Load common chip is supported from bc 6.0.0 */
+ #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
+ #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
+
+ #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
+ #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+ #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
+ #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000
+ #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000
+ #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+ #define FW_MSG_CODE_DCC_DONE 0x30100000
+ #define FW_MSG_CODE_LLDP_DONE 0x40100000
+ #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000
+ #define FW_MSG_CODE_DIAG_REFUSE 0x50200000
+ #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000
+ #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000
+ #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000
+ #define FW_MSG_CODE_GET_KEY_DONE 0x80100000
+ #define FW_MSG_CODE_NO_KEY 0x80f00000
+ #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000
+ #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000
+ #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000
+ #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000
+ #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000
+ #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
+ #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
+ #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+
+ #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
+ #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
+
+ #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000
+
+ #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000
+ #define FW_MSG_CODE_LIC_RESPONSE 0xff020000
+ #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000
+ #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000
+
+ #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param;
u32 drv_pulse_mb;
-#define DRV_PULSE_SEQ_MASK 0x00007fff
-#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
- /* The system time is in the format of
- * (year-2001)*12*32 + month*32 + day. */
-#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
- /* Indicate to the firmware not to go into the
+ #define DRV_PULSE_SEQ_MASK 0x00007fff
+ #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+ /*
+ * The system time is in the format of
+ * (year-2001)*12*32 + month*32 + day.
+ */
+ #define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+ /*
+ * Indicate to the firmware not to go into the
* OS-absent when it is not getting driver pulse.
- * This is used for debugging as well for PXE(MBA). */
+ * This is used for debugging as well for PXE(MBA).
+ */
u32 mcp_pulse_mb;
-#define MCP_PULSE_SEQ_MASK 0x00007fff
-#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+ #define MCP_PULSE_SEQ_MASK 0x00007fff
+ #define MCP_PULSE_ALWAYS_ALIVE 0x00008000
/* Indicates to the driver not to assert due to lack
* of MCP response */
-#define MCP_EVENT_MASK 0xffff0000
-#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+ #define MCP_EVENT_MASK 0xffff0000
+ #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
u32 iscsi_boot_signature;
u32 iscsi_boot_block_offset;
u32 drv_status;
-#define DRV_STATUS_PMF 0x00000001
-#define DRV_STATUS_SET_MF_BW 0x00000004
-
-#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
-#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
-#define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200
-#define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400
-#define DRV_STATUS_DCC_RESERVED1 0x00000800
-#define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
-#define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
-#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
-#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
+ #define DRV_STATUS_PMF 0x00000001
+ #define DRV_STATUS_VF_DISABLED 0x00000002
+ #define DRV_STATUS_SET_MF_BW 0x00000004
+ #define DRV_STATUS_LINK_EVENT 0x00000008
+
+ #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
+ #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
+ #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200
+ #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400
+ #define DRV_STATUS_DCC_RESERVED1 0x00000800
+ #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
+ #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
+
+ #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
+ #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
u32 virt_mac_upper;
-#define VIRT_MAC_SIGN_MASK 0xffff0000
-#define VIRT_MAC_SIGNATURE 0x564d0000
+ #define VIRT_MAC_SIGN_MASK 0xffff0000
+ #define VIRT_MAC_SIGNATURE 0x564d0000
u32 virt_mac_lower;
};
/****************************************************************************
- * Management firmware state *
+ * Management firmware state *
****************************************************************************/
/* Allocate 440 bytes for management firmware */
-#define MGMTFW_STATE_WORD_SIZE 110
+#define MGMTFW_STATE_WORD_SIZE 110
struct mgmtfw_state {
u32 opaque[MGMTFW_STATE_WORD_SIZE];
@@ -1026,25 +1379,25 @@ struct mgmtfw_state {
/****************************************************************************
- * Multi-Function configuration *
+ * Multi-Function configuration *
****************************************************************************/
struct shared_mf_cfg {
u32 clp_mb;
-#define SHARED_MF_CLP_SET_DEFAULT 0x00000000
+ #define SHARED_MF_CLP_SET_DEFAULT 0x00000000
/* set by CLP */
-#define SHARED_MF_CLP_EXIT 0x00000001
+ #define SHARED_MF_CLP_EXIT 0x00000001
/* set by MCP */
-#define SHARED_MF_CLP_EXIT_DONE 0x00010000
+ #define SHARED_MF_CLP_EXIT_DONE 0x00010000
};
struct port_mf_cfg {
- u32 dynamic_cfg; /* device control channel */
-#define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
-#define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
-#define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
+ u32 dynamic_cfg; /* device control channel */
+ #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
+ #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
+ #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
u32 reserved[3];
@@ -1055,57 +1408,58 @@ struct func_mf_cfg {
u32 config;
/* E/R/I/D */
/* function 0 of each port cannot be hidden */
-#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+ #define FUNC_MF_CFG_FUNC_HIDE 0x00000001
-#define FUNC_MF_CFG_PROTOCOL_MASK 0x00000007
-#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002
-#define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
-#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006
-#define FUNC_MF_CFG_PROTOCOL_DEFAULT\
- FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
+ #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006
+ #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000
+ #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002
+ #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
+ #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006
+ #define FUNC_MF_CFG_PROTOCOL_DEFAULT \
+ FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
-#define FUNC_MF_CFG_FUNC_DISABLED 0x00000008
+ #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008
+ #define FUNC_MF_CFG_FUNC_DELETED 0x00000010
/* PRI */
/* 0 - low priority, 3 - high priority */
-#define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300
-#define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8
-#define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8
+ #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000
/* MINBW, MAXBW */
/* value range - 0..100, increments in 100Mbps */
-#define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000
-#define FUNC_MF_CFG_MIN_BW_SHIFT 16
-#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
-#define FUNC_MF_CFG_MAX_BW_MASK 0xff000000
-#define FUNC_MF_CFG_MAX_BW_SHIFT 24
-#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000
-
- u32 mac_upper; /* MAC */
-#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
-#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
-#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000
+ #define FUNC_MF_CFG_MIN_BW_SHIFT 16
+ #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+ #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000
+ #define FUNC_MF_CFG_MAX_BW_SHIFT 24
+ #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000
+
+ u32 mac_upper; /* MAC */
+ #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+ #define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+ #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
u32 mac_lower;
-#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+ #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
u32 e1hov_tag; /* VNI */
-#define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
-#define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
-#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
+ #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff
+ #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
+ #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
u32 reserved[2];
-
};
/* This structure is not applicable and should not be accessed on 57711 */
struct func_ext_cfg {
u32 func_cfg;
-#define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
-#define MACP_FUNC_CFG_FLAGS_SHIFT 0
-#define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
-#define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
-#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
-#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
+ #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
+ #define MACP_FUNC_CFG_FLAGS_SHIFT 0
+ #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
+ #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
+ #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
+ #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
u32 iscsi_mac_addr_upper;
u32 iscsi_mac_addr_lower;
@@ -1120,73 +1474,99 @@ struct func_ext_cfg {
u32 fcoe_wwn_node_name_lower;
u32 preserve_data;
-#define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
-#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
-#define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
-#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
-#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
+ #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
+ #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
+ #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
+ #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5)
};
struct mf_cfg {
- struct shared_mf_cfg shared_mf_config;
- struct port_mf_cfg port_mf_config[PORT_MAX];
- struct func_mf_cfg func_mf_config[E1H_FUNC_MAX];
-
- struct func_ext_cfg func_ext_config[E1H_FUNC_MAX];
-};
-
+ struct shared_mf_cfg shared_mf_config; /* 0x4 */
+ struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */
+ /* for all chips, there are 8 mf functions */
+ struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
+ /*
+ * Extended configuration per function - this array does not exist and
+ * should not be accessed on 57711
+ */
+ struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/
+}; /* 0x224 */
/****************************************************************************
- * Shared Memory Region *
+ * Shared Memory Region *
****************************************************************************/
-struct shmem_region { /* SharedMem Offset (size) */
+struct shmem_region { /* SharedMem Offset (size) */
- u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */
-#define SHR_MEM_FORMAT_REV_ID ('A'<<24)
-#define SHR_MEM_FORMAT_REV_MASK 0xff000000
+ u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */
+ #define SHR_MEM_FORMAT_REV_MASK 0xff000000
+ #define SHR_MEM_FORMAT_REV_ID ('A'<<24)
/* validity bits */
-#define SHR_MEM_VALIDITY_PCI_CFG 0x00100000
-#define SHR_MEM_VALIDITY_MB 0x00200000
-#define SHR_MEM_VALIDITY_DEV_INFO 0x00400000
-#define SHR_MEM_VALIDITY_RESERVED 0x00000007
+ #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000
+ #define SHR_MEM_VALIDITY_MB 0x00200000
+ #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000
+ #define SHR_MEM_VALIDITY_RESERVED 0x00000007
/* One licensing bit should be set */
-#define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
-#define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
-#define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
-#define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
+ #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
+ #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
+ #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
+ #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
/* Active MFW */
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
-#define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0
+ #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
- struct shm_dev_info dev_info; /* 0x8 (0x438) */
+ struct shm_dev_info dev_info; /* 0x8 (0x438) */
- struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
+ struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
/* FW information (for internal FW use) */
- u32 fw_info_fio_offset; /* 0x4a8 (0x4) */
- struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
+ u32 fw_info_fio_offset; /* 0x4a8 (0x4) */
+ struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
+
+ struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
- struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
- struct drv_func_mb func_mb[]; /* 0x684
- (44*2/4/8=0x58/0xb0/0x160) */
+#ifdef BMAPI
+ /* This is a variable length array */
+ /* the number of function depends on the chip type */
+ struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#else
+ /* the number of function depends on the chip type */
+ struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#endif /* BMAPI */
}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
+/****************************************************************************
+ * Shared Memory 2 Region *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way: */
+/* 8 bit: PF ack */
+/* 64 bit: VF ack */
+/* 8 bit: ios_dis_ack */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it */
+/* access arrays(it expects always the VF to reside after the PF, and that */
+/* makes the calculation much easier for it. ) */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition */
+/* above */
+/****************************************************************************/
struct fw_flr_ack {
- u32 pf_ack;
- u32 vf_ack[1];
- u32 iov_dis_ack;
+ u32 pf_ack;
+ u32 vf_ack[1];
+ u32 iov_dis_ack;
};
struct fw_flr_mb {
- u32 aggint;
- u32 opgen_addr;
- struct fw_flr_ack ack;
+ u32 aggint;
+ u32 opgen_addr;
+ struct fw_flr_ack ack;
};
/**** SUPPORT FOR SHMEM ARRRAYS ***
@@ -1210,36 +1590,36 @@ struct fw_flr_mb {
*
* SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
*
- * | | | |
- * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
- * | | | |
+ * | | | |
+ * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | | | |
*
* SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
*
- * | | | |
- * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
- * | | | |
+ * | | | |
+ * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
+ * | | | |
*
* SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
*
- * | | | |
- * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
- * | | | |
+ * | | | |
+ * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
+ * | | | |
*/
#define SHMEM_ARRAY_BITPOS(i, eb, fb) \
((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
(((i)%((fb)/(eb))) * (eb)))
-#define SHMEM_ARRAY_GET(a, i, eb, fb) \
+#define SHMEM_ARRAY_GET(a, i, eb, fb) \
((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
SHMEM_ARRAY_MASK(eb))
-#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
+#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
do { \
a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
- SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
- SHMEM_ARRAY_BITPOS(i, eb, fb)); \
+ SHMEM_ARRAY_BITPOS(i, eb, fb)); \
} while (0)
@@ -1263,23 +1643,30 @@ do { \
#define ISCSI_APP_IDX 1
#define PREDEFINED_APP_IDX_MAX 2
+
+/* Big/Little endian have the same representation. */
struct dcbx_ets_feature {
+ /*
+ * For Admin MIB - is this feature supported by the
+ * driver | For Local MIB - should this feature be enabled.
+ */
u32 enabled;
u32 pg_bw_tbl[2];
u32 pri_pg_tbl[1];
};
+/* Driver structure in LE */
struct dcbx_pfc_feature {
#ifdef __BIG_ENDIAN
u8 pri_en_bitmap;
-#define DCBX_PFC_PRI_0 0x01
-#define DCBX_PFC_PRI_1 0x02
-#define DCBX_PFC_PRI_2 0x04
-#define DCBX_PFC_PRI_3 0x08
-#define DCBX_PFC_PRI_4 0x10
-#define DCBX_PFC_PRI_5 0x20
-#define DCBX_PFC_PRI_6 0x40
-#define DCBX_PFC_PRI_7 0x80
+ #define DCBX_PFC_PRI_0 0x01
+ #define DCBX_PFC_PRI_1 0x02
+ #define DCBX_PFC_PRI_2 0x04
+ #define DCBX_PFC_PRI_3 0x08
+ #define DCBX_PFC_PRI_4 0x10
+ #define DCBX_PFC_PRI_5 0x20
+ #define DCBX_PFC_PRI_6 0x40
+ #define DCBX_PFC_PRI_7 0x80
u8 pfc_caps;
u8 reserved;
u8 enabled;
@@ -1288,39 +1675,41 @@ struct dcbx_pfc_feature {
u8 reserved;
u8 pfc_caps;
u8 pri_en_bitmap;
-#define DCBX_PFC_PRI_0 0x01
-#define DCBX_PFC_PRI_1 0x02
-#define DCBX_PFC_PRI_2 0x04
-#define DCBX_PFC_PRI_3 0x08
-#define DCBX_PFC_PRI_4 0x10
-#define DCBX_PFC_PRI_5 0x20
-#define DCBX_PFC_PRI_6 0x40
-#define DCBX_PFC_PRI_7 0x80
+ #define DCBX_PFC_PRI_0 0x01
+ #define DCBX_PFC_PRI_1 0x02
+ #define DCBX_PFC_PRI_2 0x04
+ #define DCBX_PFC_PRI_3 0x08
+ #define DCBX_PFC_PRI_4 0x10
+ #define DCBX_PFC_PRI_5 0x20
+ #define DCBX_PFC_PRI_6 0x40
+ #define DCBX_PFC_PRI_7 0x80
#endif
};
struct dcbx_app_priority_entry {
#ifdef __BIG_ENDIAN
- u16 app_id;
- u8 pri_bitmap;
- u8 appBitfield;
-#define DCBX_APP_ENTRY_VALID 0x01
-#define DCBX_APP_ENTRY_SF_MASK 0x30
-#define DCBX_APP_ENTRY_SF_SHIFT 4
-#define DCBX_APP_SF_ETH_TYPE 0x10
-#define DCBX_APP_SF_PORT 0x20
+ u16 app_id;
+ u8 pri_bitmap;
+ u8 appBitfield;
+ #define DCBX_APP_ENTRY_VALID 0x01
+ #define DCBX_APP_ENTRY_SF_MASK 0x30
+ #define DCBX_APP_ENTRY_SF_SHIFT 4
+ #define DCBX_APP_SF_ETH_TYPE 0x10
+ #define DCBX_APP_SF_PORT 0x20
#elif defined(__LITTLE_ENDIAN)
u8 appBitfield;
-#define DCBX_APP_ENTRY_VALID 0x01
-#define DCBX_APP_ENTRY_SF_MASK 0x30
-#define DCBX_APP_ENTRY_SF_SHIFT 4
-#define DCBX_APP_SF_ETH_TYPE 0x10
-#define DCBX_APP_SF_PORT 0x20
- u8 pri_bitmap;
- u16 app_id;
+ #define DCBX_APP_ENTRY_VALID 0x01
+ #define DCBX_APP_ENTRY_SF_MASK 0x30
+ #define DCBX_APP_ENTRY_SF_SHIFT 4
+ #define DCBX_APP_SF_ETH_TYPE 0x10
+ #define DCBX_APP_SF_PORT 0x20
+ u8 pri_bitmap;
+ u16 app_id;
#endif
};
+
+/* FW structure in BE */
struct dcbx_app_priority_feature {
#ifdef __BIG_ENDIAN
u8 reserved;
@@ -1336,302 +1725,403 @@ struct dcbx_app_priority_feature {
struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
};
+/* FW structure in BE */
struct dcbx_features {
+ /* PG feature */
struct dcbx_ets_feature ets;
+ /* PFC feature */
struct dcbx_pfc_feature pfc;
+ /* APP feature */
struct dcbx_app_priority_feature app;
};
+/* LLDP protocol parameters */
+/* FW structure in BE */
struct lldp_params {
#ifdef __BIG_ENDIAN
- u8 msg_fast_tx_interval;
- u8 msg_tx_hold;
- u8 msg_tx_interval;
- u8 admin_status;
-#define LLDP_TX_ONLY 0x01
-#define LLDP_RX_ONLY 0x02
-#define LLDP_TX_RX 0x03
-#define LLDP_DISABLED 0x04
- u8 reserved1;
- u8 tx_fast;
- u8 tx_crd_max;
- u8 tx_crd;
+ u8 msg_fast_tx_interval;
+ u8 msg_tx_hold;
+ u8 msg_tx_interval;
+ u8 admin_status;
+ #define LLDP_TX_ONLY 0x01
+ #define LLDP_RX_ONLY 0x02
+ #define LLDP_TX_RX 0x03
+ #define LLDP_DISABLED 0x04
+ u8 reserved1;
+ u8 tx_fast;
+ u8 tx_crd_max;
+ u8 tx_crd;
#elif defined(__LITTLE_ENDIAN)
- u8 admin_status;
-#define LLDP_TX_ONLY 0x01
-#define LLDP_RX_ONLY 0x02
-#define LLDP_TX_RX 0x03
-#define LLDP_DISABLED 0x04
- u8 msg_tx_interval;
- u8 msg_tx_hold;
- u8 msg_fast_tx_interval;
- u8 tx_crd;
- u8 tx_crd_max;
- u8 tx_fast;
- u8 reserved1;
+ u8 admin_status;
+ #define LLDP_TX_ONLY 0x01
+ #define LLDP_RX_ONLY 0x02
+ #define LLDP_TX_RX 0x03
+ #define LLDP_DISABLED 0x04
+ u8 msg_tx_interval;
+ u8 msg_tx_hold;
+ u8 msg_fast_tx_interval;
+ u8 tx_crd;
+ u8 tx_crd_max;
+ u8 tx_fast;
+ u8 reserved1;
#endif
-#define REM_CHASSIS_ID_STAT_LEN 4
-#define REM_PORT_ID_STAT_LEN 4
+ #define REM_CHASSIS_ID_STAT_LEN 4
+ #define REM_PORT_ID_STAT_LEN 4
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
u32 peer_port_id[REM_PORT_ID_STAT_LEN];
};
struct lldp_dcbx_stat {
-#define LOCAL_CHASSIS_ID_STAT_LEN 2
-#define LOCAL_PORT_ID_STAT_LEN 2
+ #define LOCAL_CHASSIS_ID_STAT_LEN 2
+ #define LOCAL_PORT_ID_STAT_LEN 2
+ /* Holds local Chassis ID 8B payload of constant subtype 4. */
u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
+ /* Holds local Port ID 8B payload of constant subtype 3. */
u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
+ /* Number of DCBX frames transmitted. */
u32 num_tx_dcbx_pkts;
+ /* Number of DCBX frames received. */
u32 num_rx_dcbx_pkts;
};
+/* ADMIN MIB - DCBX local machine default configuration. */
struct lldp_admin_mib {
- u32 ver_cfg_flags;
-#define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
-#define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
-#define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
-#define DCBX_ETS_RECO_TX_ENABLED 0x00000008
-#define DCBX_ETS_RECO_VALID 0x00000010
-#define DCBX_ETS_WILLING 0x00000020
-#define DCBX_PFC_WILLING 0x00000040
-#define DCBX_APP_WILLING 0x00000080
-#define DCBX_VERSION_CEE 0x00000100
-#define DCBX_VERSION_IEEE 0x00000200
-#define DCBX_DCBX_ENABLED 0x00000400
-#define DCBX_CEE_VERSION_MASK 0x0000f000
-#define DCBX_CEE_VERSION_SHIFT 12
-#define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
-#define DCBX_CEE_MAX_VERSION_SHIFT 16
- struct dcbx_features features;
-};
-
+ u32 ver_cfg_flags;
+ #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
+ #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
+ #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
+ #define DCBX_ETS_RECO_TX_ENABLED 0x00000008
+ #define DCBX_ETS_RECO_VALID 0x00000010
+ #define DCBX_ETS_WILLING 0x00000020
+ #define DCBX_PFC_WILLING 0x00000040
+ #define DCBX_APP_WILLING 0x00000080
+ #define DCBX_VERSION_CEE 0x00000100
+ #define DCBX_VERSION_IEEE 0x00000200
+ #define DCBX_DCBX_ENABLED 0x00000400
+ #define DCBX_CEE_VERSION_MASK 0x0000f000
+ #define DCBX_CEE_VERSION_SHIFT 12
+ #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
+ #define DCBX_CEE_MAX_VERSION_SHIFT 16
+ struct dcbx_features features;
+};
+
+/* REMOTE MIB - remote machine DCBX configuration. */
struct lldp_remote_mib {
u32 prefix_seq_num;
u32 flags;
-#define DCBX_ETS_TLV_RX 0x00000001
-#define DCBX_PFC_TLV_RX 0x00000002
-#define DCBX_APP_TLV_RX 0x00000004
-#define DCBX_ETS_RX_ERROR 0x00000010
-#define DCBX_PFC_RX_ERROR 0x00000020
-#define DCBX_APP_RX_ERROR 0x00000040
-#define DCBX_ETS_REM_WILLING 0x00000100
-#define DCBX_PFC_REM_WILLING 0x00000200
-#define DCBX_APP_REM_WILLING 0x00000400
-#define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
+ #define DCBX_ETS_TLV_RX 0x00000001
+ #define DCBX_PFC_TLV_RX 0x00000002
+ #define DCBX_APP_TLV_RX 0x00000004
+ #define DCBX_ETS_RX_ERROR 0x00000010
+ #define DCBX_PFC_RX_ERROR 0x00000020
+ #define DCBX_APP_RX_ERROR 0x00000040
+ #define DCBX_ETS_REM_WILLING 0x00000100
+ #define DCBX_PFC_REM_WILLING 0x00000200
+ #define DCBX_APP_REM_WILLING 0x00000400
+ #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
+ #define DCBX_REMOTE_MIB_VALID 0x00002000
struct dcbx_features features;
u32 suffix_seq_num;
};
+/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */
struct lldp_local_mib {
u32 prefix_seq_num;
+ /* Indicates if there is mismatch with negotiation results. */
u32 error;
-#define DCBX_LOCAL_ETS_ERROR 0x00000001
-#define DCBX_LOCAL_PFC_ERROR 0x00000002
-#define DCBX_LOCAL_APP_ERROR 0x00000004
-#define DCBX_LOCAL_PFC_MISMATCH 0x00000010
-#define DCBX_LOCAL_APP_MISMATCH 0x00000020
+ #define DCBX_LOCAL_ETS_ERROR 0x00000001
+ #define DCBX_LOCAL_PFC_ERROR 0x00000002
+ #define DCBX_LOCAL_APP_ERROR 0x00000004
+ #define DCBX_LOCAL_PFC_MISMATCH 0x00000010
+ #define DCBX_LOCAL_APP_MISMATCH 0x00000020
+ #define DCBX_REMOTE_MIB_ERROR 0x00000040
struct dcbx_features features;
u32 suffix_seq_num;
};
/***END OF DCBX STRUCTURES DECLARATIONS***/
+struct ncsi_oem_fcoe_features {
+ u32 fcoe_features1;
+ #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF
+ #define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET 0
+
+ #define FCOE_FEATURES1_LOGINS_PER_PORT_MASK 0xFFFF0000
+ #define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET 16
+
+ u32 fcoe_features2;
+ #define FCOE_FEATURES2_EXCHANGES_MASK 0x0000FFFF
+ #define FCOE_FEATURES2_EXCHANGES_OFFSET 0
+
+ #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK 0xFFFF0000
+ #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET 16
+
+ u32 fcoe_features3;
+ #define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK 0x0000FFFF
+ #define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET 0
+
+ #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK 0xFFFF0000
+ #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET 16
+
+ u32 fcoe_features4;
+ #define FCOE_FEATURES4_FEATURE_SETTINGS_MASK 0x0000000F
+ #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0
+};
+
+struct ncsi_oem_data {
+ u32 driver_version[4];
+ struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
+};
+
struct shmem2_region {
- u32 size;
-
- u32 dcc_support;
-#define SHMEM_DCC_SUPPORT_NONE 0x00000000
-#define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001
-#define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004
-#define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008
-#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
-#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
-#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE
- u32 ext_phy_fw_version2[PORT_MAX];
+ u32 size; /* 0x0000 */
+
+ u32 dcc_support; /* 0x0004 */
+ #define SHMEM_DCC_SUPPORT_NONE 0x00000000
+ #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001
+ #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004
+ #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008
+ #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
+ #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
+
+ u32 ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */
/*
* For backwards compatibility, if the mf_cfg_addr does not exist
* (the size filed is smaller than 0xc) the mf_cfg resides at the
* end of struct shmem_region
- */
- u32 mf_cfg_addr;
-#define SHMEM_MF_CFG_ADDR_NONE 0x00000000
-
- struct fw_flr_mb flr_mb;
- u32 dcbx_lldp_params_offset;
-#define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
- u32 dcbx_neg_res_offset;
-#define SHMEM_DCBX_NEG_RES_NONE 0x00000000
- u32 dcbx_remote_mib_offset;
-#define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
+ */
+ u32 mf_cfg_addr; /* 0x0010 */
+ #define SHMEM_MF_CFG_ADDR_NONE 0x00000000
+
+ struct fw_flr_mb flr_mb; /* 0x0014 */
+ u32 dcbx_lldp_params_offset; /* 0x0028 */
+ #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
+ u32 dcbx_neg_res_offset; /* 0x002c */
+ #define SHMEM_DCBX_NEG_RES_NONE 0x00000000
+ u32 dcbx_remote_mib_offset; /* 0x0030 */
+ #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
/*
* The other shmemX_base_addr holds the other path's shmem address
* required for example in case of common phy init, or for path1 to know
* the address of mcp debug trace which is located in offset from shmem
* of path0
*/
- u32 other_shmem_base_addr;
- u32 other_shmem2_base_addr;
- u32 reserved1[E2_VF_MAX / 32];
- u32 reserved2[E2_FUNC_MAX][E2_VF_MAX / 32];
- u32 dcbx_lldp_dcbx_stat_offset;
-#define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
+ u32 other_shmem_base_addr; /* 0x0034 */
+ u32 other_shmem2_base_addr; /* 0x0038 */
+ /*
+ * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+ * which were disabled/flred
+ */
+ u32 mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */
+
+ /*
+ * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+ * VFs
+ */
+ u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */
+
+ u32 dcbx_lldp_dcbx_stat_offset; /* 0x0064 */
+ #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
+
+ /*
+ * edebug_driver_if field is used to transfer messages between edebug
+ * app to the driver through shmem2.
+ *
+ * message format:
+ * bits 0-2 - function number / instance of driver to perform request
+ * bits 3-5 - op code / is_ack?
+ * bits 6-63 - data
+ */
+ u32 edebug_driver_if[2]; /* 0x0068 */
+ #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1
+ #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2
+ #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3
+
+ u32 nvm_retain_bitmap_addr; /* 0x0070 */
+
+ u32 reserved1; /* 0x0074 */
+
+ u32 reserved2[E2_FUNC_MAX];
+
+ u32 reserved3[E2_FUNC_MAX];/* 0x0088 */
+ u32 reserved4[E2_FUNC_MAX];/* 0x0098 */
+
+ u32 swim_base_addr; /* 0x0108 */
+ u32 swim_funcs;
+ u32 swim_main_cb;
+
+ u32 reserved5[2];
+
+ /* generic flags controlled by the driver */
+ u32 drv_flags;
+ #define DRV_FLAGS_DCB_CONFIGURED 0x1
+
+ /* pointer to extended dev_info shared data copied from nvm image */
+ u32 extended_dev_info_shared_addr;
+ u32 ncsi_oem_data_addr;
+
+ u32 ocsd_host_addr;
+ u32 ocbb_host_addr;
+ u32 ocsd_req_update_interval;
};
struct emac_stats {
- u32 rx_stat_ifhcinoctets;
- u32 rx_stat_ifhcinbadoctets;
- u32 rx_stat_etherstatsfragments;
- u32 rx_stat_ifhcinucastpkts;
- u32 rx_stat_ifhcinmulticastpkts;
- u32 rx_stat_ifhcinbroadcastpkts;
- u32 rx_stat_dot3statsfcserrors;
- u32 rx_stat_dot3statsalignmenterrors;
- u32 rx_stat_dot3statscarriersenseerrors;
- u32 rx_stat_xonpauseframesreceived;
- u32 rx_stat_xoffpauseframesreceived;
- u32 rx_stat_maccontrolframesreceived;
- u32 rx_stat_xoffstateentered;
- u32 rx_stat_dot3statsframestoolong;
- u32 rx_stat_etherstatsjabbers;
- u32 rx_stat_etherstatsundersizepkts;
- u32 rx_stat_etherstatspkts64octets;
- u32 rx_stat_etherstatspkts65octetsto127octets;
- u32 rx_stat_etherstatspkts128octetsto255octets;
- u32 rx_stat_etherstatspkts256octetsto511octets;
- u32 rx_stat_etherstatspkts512octetsto1023octets;
- u32 rx_stat_etherstatspkts1024octetsto1522octets;
- u32 rx_stat_etherstatspktsover1522octets;
-
- u32 rx_stat_falsecarriererrors;
-
- u32 tx_stat_ifhcoutoctets;
- u32 tx_stat_ifhcoutbadoctets;
- u32 tx_stat_etherstatscollisions;
- u32 tx_stat_outxonsent;
- u32 tx_stat_outxoffsent;
- u32 tx_stat_flowcontroldone;
- u32 tx_stat_dot3statssinglecollisionframes;
- u32 tx_stat_dot3statsmultiplecollisionframes;
- u32 tx_stat_dot3statsdeferredtransmissions;
- u32 tx_stat_dot3statsexcessivecollisions;
- u32 tx_stat_dot3statslatecollisions;
- u32 tx_stat_ifhcoutucastpkts;
- u32 tx_stat_ifhcoutmulticastpkts;
- u32 tx_stat_ifhcoutbroadcastpkts;
- u32 tx_stat_etherstatspkts64octets;
- u32 tx_stat_etherstatspkts65octetsto127octets;
- u32 tx_stat_etherstatspkts128octetsto255octets;
- u32 tx_stat_etherstatspkts256octetsto511octets;
- u32 tx_stat_etherstatspkts512octetsto1023octets;
- u32 tx_stat_etherstatspkts1024octetsto1522octets;
- u32 tx_stat_etherstatspktsover1522octets;
- u32 tx_stat_dot3statsinternalmactransmiterrors;
+ u32 rx_stat_ifhcinoctets;
+ u32 rx_stat_ifhcinbadoctets;
+ u32 rx_stat_etherstatsfragments;
+ u32 rx_stat_ifhcinucastpkts;
+ u32 rx_stat_ifhcinmulticastpkts;
+ u32 rx_stat_ifhcinbroadcastpkts;
+ u32 rx_stat_dot3statsfcserrors;
+ u32 rx_stat_dot3statsalignmenterrors;
+ u32 rx_stat_dot3statscarriersenseerrors;
+ u32 rx_stat_xonpauseframesreceived;
+ u32 rx_stat_xoffpauseframesreceived;
+ u32 rx_stat_maccontrolframesreceived;
+ u32 rx_stat_xoffstateentered;
+ u32 rx_stat_dot3statsframestoolong;
+ u32 rx_stat_etherstatsjabbers;
+ u32 rx_stat_etherstatsundersizepkts;
+ u32 rx_stat_etherstatspkts64octets;
+ u32 rx_stat_etherstatspkts65octetsto127octets;
+ u32 rx_stat_etherstatspkts128octetsto255octets;
+ u32 rx_stat_etherstatspkts256octetsto511octets;
+ u32 rx_stat_etherstatspkts512octetsto1023octets;
+ u32 rx_stat_etherstatspkts1024octetsto1522octets;
+ u32 rx_stat_etherstatspktsover1522octets;
+
+ u32 rx_stat_falsecarriererrors;
+
+ u32 tx_stat_ifhcoutoctets;
+ u32 tx_stat_ifhcoutbadoctets;
+ u32 tx_stat_etherstatscollisions;
+ u32 tx_stat_outxonsent;
+ u32 tx_stat_outxoffsent;
+ u32 tx_stat_flowcontroldone;
+ u32 tx_stat_dot3statssinglecollisionframes;
+ u32 tx_stat_dot3statsmultiplecollisionframes;
+ u32 tx_stat_dot3statsdeferredtransmissions;
+ u32 tx_stat_dot3statsexcessivecollisions;
+ u32 tx_stat_dot3statslatecollisions;
+ u32 tx_stat_ifhcoutucastpkts;
+ u32 tx_stat_ifhcoutmulticastpkts;
+ u32 tx_stat_ifhcoutbroadcastpkts;
+ u32 tx_stat_etherstatspkts64octets;
+ u32 tx_stat_etherstatspkts65octetsto127octets;
+ u32 tx_stat_etherstatspkts128octetsto255octets;
+ u32 tx_stat_etherstatspkts256octetsto511octets;
+ u32 tx_stat_etherstatspkts512octetsto1023octets;
+ u32 tx_stat_etherstatspkts1024octetsto1522octets;
+ u32 tx_stat_etherstatspktsover1522octets;
+ u32 tx_stat_dot3statsinternalmactransmiterrors;
};
struct bmac1_stats {
- u32 tx_stat_gtpkt_lo;
- u32 tx_stat_gtpkt_hi;
- u32 tx_stat_gtxpf_lo;
- u32 tx_stat_gtxpf_hi;
- u32 tx_stat_gtfcs_lo;
- u32 tx_stat_gtfcs_hi;
- u32 tx_stat_gtmca_lo;
- u32 tx_stat_gtmca_hi;
- u32 tx_stat_gtbca_lo;
- u32 tx_stat_gtbca_hi;
- u32 tx_stat_gtfrg_lo;
- u32 tx_stat_gtfrg_hi;
- u32 tx_stat_gtovr_lo;
- u32 tx_stat_gtovr_hi;
- u32 tx_stat_gt64_lo;
- u32 tx_stat_gt64_hi;
- u32 tx_stat_gt127_lo;
- u32 tx_stat_gt127_hi;
- u32 tx_stat_gt255_lo;
- u32 tx_stat_gt255_hi;
- u32 tx_stat_gt511_lo;
- u32 tx_stat_gt511_hi;
- u32 tx_stat_gt1023_lo;
- u32 tx_stat_gt1023_hi;
- u32 tx_stat_gt1518_lo;
- u32 tx_stat_gt1518_hi;
- u32 tx_stat_gt2047_lo;
- u32 tx_stat_gt2047_hi;
- u32 tx_stat_gt4095_lo;
- u32 tx_stat_gt4095_hi;
- u32 tx_stat_gt9216_lo;
- u32 tx_stat_gt9216_hi;
- u32 tx_stat_gt16383_lo;
- u32 tx_stat_gt16383_hi;
- u32 tx_stat_gtmax_lo;
- u32 tx_stat_gtmax_hi;
- u32 tx_stat_gtufl_lo;
- u32 tx_stat_gtufl_hi;
- u32 tx_stat_gterr_lo;
- u32 tx_stat_gterr_hi;
- u32 tx_stat_gtbyt_lo;
- u32 tx_stat_gtbyt_hi;
-
- u32 rx_stat_gr64_lo;
- u32 rx_stat_gr64_hi;
- u32 rx_stat_gr127_lo;
- u32 rx_stat_gr127_hi;
- u32 rx_stat_gr255_lo;
- u32 rx_stat_gr255_hi;
- u32 rx_stat_gr511_lo;
- u32 rx_stat_gr511_hi;
- u32 rx_stat_gr1023_lo;
- u32 rx_stat_gr1023_hi;
- u32 rx_stat_gr1518_lo;
- u32 rx_stat_gr1518_hi;
- u32 rx_stat_gr2047_lo;
- u32 rx_stat_gr2047_hi;
- u32 rx_stat_gr4095_lo;
- u32 rx_stat_gr4095_hi;
- u32 rx_stat_gr9216_lo;
- u32 rx_stat_gr9216_hi;
- u32 rx_stat_gr16383_lo;
- u32 rx_stat_gr16383_hi;
- u32 rx_stat_grmax_lo;
- u32 rx_stat_grmax_hi;
- u32 rx_stat_grpkt_lo;
- u32 rx_stat_grpkt_hi;
- u32 rx_stat_grfcs_lo;
- u32 rx_stat_grfcs_hi;
- u32 rx_stat_grmca_lo;
- u32 rx_stat_grmca_hi;
- u32 rx_stat_grbca_lo;
- u32 rx_stat_grbca_hi;
- u32 rx_stat_grxcf_lo;
- u32 rx_stat_grxcf_hi;
- u32 rx_stat_grxpf_lo;
- u32 rx_stat_grxpf_hi;
- u32 rx_stat_grxuo_lo;
- u32 rx_stat_grxuo_hi;
- u32 rx_stat_grjbr_lo;
- u32 rx_stat_grjbr_hi;
- u32 rx_stat_grovr_lo;
- u32 rx_stat_grovr_hi;
- u32 rx_stat_grflr_lo;
- u32 rx_stat_grflr_hi;
- u32 rx_stat_grmeg_lo;
- u32 rx_stat_grmeg_hi;
- u32 rx_stat_grmeb_lo;
- u32 rx_stat_grmeb_hi;
- u32 rx_stat_grbyt_lo;
- u32 rx_stat_grbyt_hi;
- u32 rx_stat_grund_lo;
- u32 rx_stat_grund_hi;
- u32 rx_stat_grfrg_lo;
- u32 rx_stat_grfrg_hi;
- u32 rx_stat_grerb_lo;
- u32 rx_stat_grerb_hi;
- u32 rx_stat_grfre_lo;
- u32 rx_stat_grfre_hi;
- u32 rx_stat_gripj_lo;
- u32 rx_stat_gripj_hi;
+ u32 tx_stat_gtpkt_lo;
+ u32 tx_stat_gtpkt_hi;
+ u32 tx_stat_gtxpf_lo;
+ u32 tx_stat_gtxpf_hi;
+ u32 tx_stat_gtfcs_lo;
+ u32 tx_stat_gtfcs_hi;
+ u32 tx_stat_gtmca_lo;
+ u32 tx_stat_gtmca_hi;
+ u32 tx_stat_gtbca_lo;
+ u32 tx_stat_gtbca_hi;
+ u32 tx_stat_gtfrg_lo;
+ u32 tx_stat_gtfrg_hi;
+ u32 tx_stat_gtovr_lo;
+ u32 tx_stat_gtovr_hi;
+ u32 tx_stat_gt64_lo;
+ u32 tx_stat_gt64_hi;
+ u32 tx_stat_gt127_lo;
+ u32 tx_stat_gt127_hi;
+ u32 tx_stat_gt255_lo;
+ u32 tx_stat_gt255_hi;
+ u32 tx_stat_gt511_lo;
+ u32 tx_stat_gt511_hi;
+ u32 tx_stat_gt1023_lo;
+ u32 tx_stat_gt1023_hi;
+ u32 tx_stat_gt1518_lo;
+ u32 tx_stat_gt1518_hi;
+ u32 tx_stat_gt2047_lo;
+ u32 tx_stat_gt2047_hi;
+ u32 tx_stat_gt4095_lo;
+ u32 tx_stat_gt4095_hi;
+ u32 tx_stat_gt9216_lo;
+ u32 tx_stat_gt9216_hi;
+ u32 tx_stat_gt16383_lo;
+ u32 tx_stat_gt16383_hi;
+ u32 tx_stat_gtmax_lo;
+ u32 tx_stat_gtmax_hi;
+ u32 tx_stat_gtufl_lo;
+ u32 tx_stat_gtufl_hi;
+ u32 tx_stat_gterr_lo;
+ u32 tx_stat_gterr_hi;
+ u32 tx_stat_gtbyt_lo;
+ u32 tx_stat_gtbyt_hi;
+
+ u32 rx_stat_gr64_lo;
+ u32 rx_stat_gr64_hi;
+ u32 rx_stat_gr127_lo;
+ u32 rx_stat_gr127_hi;
+ u32 rx_stat_gr255_lo;
+ u32 rx_stat_gr255_hi;
+ u32 rx_stat_gr511_lo;
+ u32 rx_stat_gr511_hi;
+ u32 rx_stat_gr1023_lo;
+ u32 rx_stat_gr1023_hi;
+ u32 rx_stat_gr1518_lo;
+ u32 rx_stat_gr1518_hi;
+ u32 rx_stat_gr2047_lo;
+ u32 rx_stat_gr2047_hi;
+ u32 rx_stat_gr4095_lo;
+ u32 rx_stat_gr4095_hi;
+ u32 rx_stat_gr9216_lo;
+ u32 rx_stat_gr9216_hi;
+ u32 rx_stat_gr16383_lo;
+ u32 rx_stat_gr16383_hi;
+ u32 rx_stat_grmax_lo;
+ u32 rx_stat_grmax_hi;
+ u32 rx_stat_grpkt_lo;
+ u32 rx_stat_grpkt_hi;
+ u32 rx_stat_grfcs_lo;
+ u32 rx_stat_grfcs_hi;
+ u32 rx_stat_grmca_lo;
+ u32 rx_stat_grmca_hi;
+ u32 rx_stat_grbca_lo;
+ u32 rx_stat_grbca_hi;
+ u32 rx_stat_grxcf_lo;
+ u32 rx_stat_grxcf_hi;
+ u32 rx_stat_grxpf_lo;
+ u32 rx_stat_grxpf_hi;
+ u32 rx_stat_grxuo_lo;
+ u32 rx_stat_grxuo_hi;
+ u32 rx_stat_grjbr_lo;
+ u32 rx_stat_grjbr_hi;
+ u32 rx_stat_grovr_lo;
+ u32 rx_stat_grovr_hi;
+ u32 rx_stat_grflr_lo;
+ u32 rx_stat_grflr_hi;
+ u32 rx_stat_grmeg_lo;
+ u32 rx_stat_grmeg_hi;
+ u32 rx_stat_grmeb_lo;
+ u32 rx_stat_grmeb_hi;
+ u32 rx_stat_grbyt_lo;
+ u32 rx_stat_grbyt_hi;
+ u32 rx_stat_grund_lo;
+ u32 rx_stat_grund_hi;
+ u32 rx_stat_grfrg_lo;
+ u32 rx_stat_grfrg_hi;
+ u32 rx_stat_grerb_lo;
+ u32 rx_stat_grerb_hi;
+ u32 rx_stat_grfre_lo;
+ u32 rx_stat_grfre_hi;
+ u32 rx_stat_gripj_lo;
+ u32 rx_stat_gripj_hi;
};
struct bmac2_stats {
@@ -1750,187 +2240,316 @@ struct bmac2_stats {
u32 rx_stat_gripj_hi;
};
+struct mstat_stats {
+ struct {
+ /* OTE MSTAT on E3 has a bug where this register's contents are
+ * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp
+ */
+ u32 tx_gtxpok_lo;
+ u32 tx_gtxpok_hi;
+ u32 tx_gtxpf_lo;
+ u32 tx_gtxpf_hi;
+ u32 tx_gtxpp_lo;
+ u32 tx_gtxpp_hi;
+ u32 tx_gtfcs_lo;
+ u32 tx_gtfcs_hi;
+ u32 tx_gtuca_lo;
+ u32 tx_gtuca_hi;
+ u32 tx_gtmca_lo;
+ u32 tx_gtmca_hi;
+ u32 tx_gtgca_lo;
+ u32 tx_gtgca_hi;
+ u32 tx_gtpkt_lo;
+ u32 tx_gtpkt_hi;
+ u32 tx_gt64_lo;
+ u32 tx_gt64_hi;
+ u32 tx_gt127_lo;
+ u32 tx_gt127_hi;
+ u32 tx_gt255_lo;
+ u32 tx_gt255_hi;
+ u32 tx_gt511_lo;
+ u32 tx_gt511_hi;
+ u32 tx_gt1023_lo;
+ u32 tx_gt1023_hi;
+ u32 tx_gt1518_lo;
+ u32 tx_gt1518_hi;
+ u32 tx_gt2047_lo;
+ u32 tx_gt2047_hi;
+ u32 tx_gt4095_lo;
+ u32 tx_gt4095_hi;
+ u32 tx_gt9216_lo;
+ u32 tx_gt9216_hi;
+ u32 tx_gt16383_lo;
+ u32 tx_gt16383_hi;
+ u32 tx_gtufl_lo;
+ u32 tx_gtufl_hi;
+ u32 tx_gterr_lo;
+ u32 tx_gterr_hi;
+ u32 tx_gtbyt_lo;
+ u32 tx_gtbyt_hi;
+ u32 tx_collisions_lo;
+ u32 tx_collisions_hi;
+ u32 tx_singlecollision_lo;
+ u32 tx_singlecollision_hi;
+ u32 tx_multiplecollisions_lo;
+ u32 tx_multiplecollisions_hi;
+ u32 tx_deferred_lo;
+ u32 tx_deferred_hi;
+ u32 tx_excessivecollisions_lo;
+ u32 tx_excessivecollisions_hi;
+ u32 tx_latecollisions_lo;
+ u32 tx_latecollisions_hi;
+ } stats_tx;
+
+ struct {
+ u32 rx_gr64_lo;
+ u32 rx_gr64_hi;
+ u32 rx_gr127_lo;
+ u32 rx_gr127_hi;
+ u32 rx_gr255_lo;
+ u32 rx_gr255_hi;
+ u32 rx_gr511_lo;
+ u32 rx_gr511_hi;
+ u32 rx_gr1023_lo;
+ u32 rx_gr1023_hi;
+ u32 rx_gr1518_lo;
+ u32 rx_gr1518_hi;
+ u32 rx_gr2047_lo;
+ u32 rx_gr2047_hi;
+ u32 rx_gr4095_lo;
+ u32 rx_gr4095_hi;
+ u32 rx_gr9216_lo;
+ u32 rx_gr9216_hi;
+ u32 rx_gr16383_lo;
+ u32 rx_gr16383_hi;
+ u32 rx_grpkt_lo;
+ u32 rx_grpkt_hi;
+ u32 rx_grfcs_lo;
+ u32 rx_grfcs_hi;
+ u32 rx_gruca_lo;
+ u32 rx_gruca_hi;
+ u32 rx_grmca_lo;
+ u32 rx_grmca_hi;
+ u32 rx_grbca_lo;
+ u32 rx_grbca_hi;
+ u32 rx_grxpf_lo;
+ u32 rx_grxpf_hi;
+ u32 rx_grxpp_lo;
+ u32 rx_grxpp_hi;
+ u32 rx_grxuo_lo;
+ u32 rx_grxuo_hi;
+ u32 rx_grovr_lo;
+ u32 rx_grovr_hi;
+ u32 rx_grxcf_lo;
+ u32 rx_grxcf_hi;
+ u32 rx_grflr_lo;
+ u32 rx_grflr_hi;
+ u32 rx_grpok_lo;
+ u32 rx_grpok_hi;
+ u32 rx_grbyt_lo;
+ u32 rx_grbyt_hi;
+ u32 rx_grund_lo;
+ u32 rx_grund_hi;
+ u32 rx_grfrg_lo;
+ u32 rx_grfrg_hi;
+ u32 rx_grerb_lo;
+ u32 rx_grerb_hi;
+ u32 rx_grfre_lo;
+ u32 rx_grfre_hi;
+
+ u32 rx_alignmenterrors_lo;
+ u32 rx_alignmenterrors_hi;
+ u32 rx_falsecarrier_lo;
+ u32 rx_falsecarrier_hi;
+ u32 rx_llfcmsgcnt_lo;
+ u32 rx_llfcmsgcnt_hi;
+ } stats_rx;
+};
+
union mac_stats {
- struct emac_stats emac_stats;
- struct bmac1_stats bmac1_stats;
- struct bmac2_stats bmac2_stats;
+ struct emac_stats emac_stats;
+ struct bmac1_stats bmac1_stats;
+ struct bmac2_stats bmac2_stats;
+ struct mstat_stats mstat_stats;
};
struct mac_stx {
- /* in_bad_octets */
- u32 rx_stat_ifhcinbadoctets_hi;
- u32 rx_stat_ifhcinbadoctets_lo;
-
- /* out_bad_octets */
- u32 tx_stat_ifhcoutbadoctets_hi;
- u32 tx_stat_ifhcoutbadoctets_lo;
-
- /* crc_receive_errors */
- u32 rx_stat_dot3statsfcserrors_hi;
- u32 rx_stat_dot3statsfcserrors_lo;
- /* alignment_errors */
- u32 rx_stat_dot3statsalignmenterrors_hi;
- u32 rx_stat_dot3statsalignmenterrors_lo;
- /* carrier_sense_errors */
- u32 rx_stat_dot3statscarriersenseerrors_hi;
- u32 rx_stat_dot3statscarriersenseerrors_lo;
- /* false_carrier_detections */
- u32 rx_stat_falsecarriererrors_hi;
- u32 rx_stat_falsecarriererrors_lo;
-
- /* runt_packets_received */
- u32 rx_stat_etherstatsundersizepkts_hi;
- u32 rx_stat_etherstatsundersizepkts_lo;
- /* jabber_packets_received */
- u32 rx_stat_dot3statsframestoolong_hi;
- u32 rx_stat_dot3statsframestoolong_lo;
-
- /* error_runt_packets_received */
- u32 rx_stat_etherstatsfragments_hi;
- u32 rx_stat_etherstatsfragments_lo;
- /* error_jabber_packets_received */
- u32 rx_stat_etherstatsjabbers_hi;
- u32 rx_stat_etherstatsjabbers_lo;
-
- /* control_frames_received */
- u32 rx_stat_maccontrolframesreceived_hi;
- u32 rx_stat_maccontrolframesreceived_lo;
- u32 rx_stat_bmac_xpf_hi;
- u32 rx_stat_bmac_xpf_lo;
- u32 rx_stat_bmac_xcf_hi;
- u32 rx_stat_bmac_xcf_lo;
-
- /* xoff_state_entered */
- u32 rx_stat_xoffstateentered_hi;
- u32 rx_stat_xoffstateentered_lo;
- /* pause_xon_frames_received */
- u32 rx_stat_xonpauseframesreceived_hi;
- u32 rx_stat_xonpauseframesreceived_lo;
- /* pause_xoff_frames_received */
- u32 rx_stat_xoffpauseframesreceived_hi;
- u32 rx_stat_xoffpauseframesreceived_lo;
- /* pause_xon_frames_transmitted */
- u32 tx_stat_outxonsent_hi;
- u32 tx_stat_outxonsent_lo;
- /* pause_xoff_frames_transmitted */
- u32 tx_stat_outxoffsent_hi;
- u32 tx_stat_outxoffsent_lo;
- /* flow_control_done */
- u32 tx_stat_flowcontroldone_hi;
- u32 tx_stat_flowcontroldone_lo;
-
- /* ether_stats_collisions */
- u32 tx_stat_etherstatscollisions_hi;
- u32 tx_stat_etherstatscollisions_lo;
- /* single_collision_transmit_frames */
- u32 tx_stat_dot3statssinglecollisionframes_hi;
- u32 tx_stat_dot3statssinglecollisionframes_lo;
- /* multiple_collision_transmit_frames */
- u32 tx_stat_dot3statsmultiplecollisionframes_hi;
- u32 tx_stat_dot3statsmultiplecollisionframes_lo;
- /* deferred_transmissions */
- u32 tx_stat_dot3statsdeferredtransmissions_hi;
- u32 tx_stat_dot3statsdeferredtransmissions_lo;
- /* excessive_collision_frames */
- u32 tx_stat_dot3statsexcessivecollisions_hi;
- u32 tx_stat_dot3statsexcessivecollisions_lo;
- /* late_collision_frames */
- u32 tx_stat_dot3statslatecollisions_hi;
- u32 tx_stat_dot3statslatecollisions_lo;
-
- /* frames_transmitted_64_bytes */
- u32 tx_stat_etherstatspkts64octets_hi;
- u32 tx_stat_etherstatspkts64octets_lo;
- /* frames_transmitted_65_127_bytes */
- u32 tx_stat_etherstatspkts65octetsto127octets_hi;
- u32 tx_stat_etherstatspkts65octetsto127octets_lo;
- /* frames_transmitted_128_255_bytes */
- u32 tx_stat_etherstatspkts128octetsto255octets_hi;
- u32 tx_stat_etherstatspkts128octetsto255octets_lo;
- /* frames_transmitted_256_511_bytes */
- u32 tx_stat_etherstatspkts256octetsto511octets_hi;
- u32 tx_stat_etherstatspkts256octetsto511octets_lo;
- /* frames_transmitted_512_1023_bytes */
- u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
- u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
- /* frames_transmitted_1024_1522_bytes */
- u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
- u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
- /* frames_transmitted_1523_9022_bytes */
- u32 tx_stat_etherstatspktsover1522octets_hi;
- u32 tx_stat_etherstatspktsover1522octets_lo;
- u32 tx_stat_bmac_2047_hi;
- u32 tx_stat_bmac_2047_lo;
- u32 tx_stat_bmac_4095_hi;
- u32 tx_stat_bmac_4095_lo;
- u32 tx_stat_bmac_9216_hi;
- u32 tx_stat_bmac_9216_lo;
- u32 tx_stat_bmac_16383_hi;
- u32 tx_stat_bmac_16383_lo;
-
- /* internal_mac_transmit_errors */
- u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
- u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
-
- /* if_out_discards */
- u32 tx_stat_bmac_ufl_hi;
- u32 tx_stat_bmac_ufl_lo;
-};
-
-
-#define MAC_STX_IDX_MAX 2
+ /* in_bad_octets */
+ u32 rx_stat_ifhcinbadoctets_hi;
+ u32 rx_stat_ifhcinbadoctets_lo;
+
+ /* out_bad_octets */
+ u32 tx_stat_ifhcoutbadoctets_hi;
+ u32 tx_stat_ifhcoutbadoctets_lo;
+
+ /* crc_receive_errors */
+ u32 rx_stat_dot3statsfcserrors_hi;
+ u32 rx_stat_dot3statsfcserrors_lo;
+ /* alignment_errors */
+ u32 rx_stat_dot3statsalignmenterrors_hi;
+ u32 rx_stat_dot3statsalignmenterrors_lo;
+ /* carrier_sense_errors */
+ u32 rx_stat_dot3statscarriersenseerrors_hi;
+ u32 rx_stat_dot3statscarriersenseerrors_lo;
+ /* false_carrier_detections */
+ u32 rx_stat_falsecarriererrors_hi;
+ u32 rx_stat_falsecarriererrors_lo;
+
+ /* runt_packets_received */
+ u32 rx_stat_etherstatsundersizepkts_hi;
+ u32 rx_stat_etherstatsundersizepkts_lo;
+ /* jabber_packets_received */
+ u32 rx_stat_dot3statsframestoolong_hi;
+ u32 rx_stat_dot3statsframestoolong_lo;
+
+ /* error_runt_packets_received */
+ u32 rx_stat_etherstatsfragments_hi;
+ u32 rx_stat_etherstatsfragments_lo;
+ /* error_jabber_packets_received */
+ u32 rx_stat_etherstatsjabbers_hi;
+ u32 rx_stat_etherstatsjabbers_lo;
+
+ /* control_frames_received */
+ u32 rx_stat_maccontrolframesreceived_hi;
+ u32 rx_stat_maccontrolframesreceived_lo;
+ u32 rx_stat_mac_xpf_hi;
+ u32 rx_stat_mac_xpf_lo;
+ u32 rx_stat_mac_xcf_hi;
+ u32 rx_stat_mac_xcf_lo;
+
+ /* xoff_state_entered */
+ u32 rx_stat_xoffstateentered_hi;
+ u32 rx_stat_xoffstateentered_lo;
+ /* pause_xon_frames_received */
+ u32 rx_stat_xonpauseframesreceived_hi;
+ u32 rx_stat_xonpauseframesreceived_lo;
+ /* pause_xoff_frames_received */
+ u32 rx_stat_xoffpauseframesreceived_hi;
+ u32 rx_stat_xoffpauseframesreceived_lo;
+ /* pause_xon_frames_transmitted */
+ u32 tx_stat_outxonsent_hi;
+ u32 tx_stat_outxonsent_lo;
+ /* pause_xoff_frames_transmitted */
+ u32 tx_stat_outxoffsent_hi;
+ u32 tx_stat_outxoffsent_lo;
+ /* flow_control_done */
+ u32 tx_stat_flowcontroldone_hi;
+ u32 tx_stat_flowcontroldone_lo;
+
+ /* ether_stats_collisions */
+ u32 tx_stat_etherstatscollisions_hi;
+ u32 tx_stat_etherstatscollisions_lo;
+ /* single_collision_transmit_frames */
+ u32 tx_stat_dot3statssinglecollisionframes_hi;
+ u32 tx_stat_dot3statssinglecollisionframes_lo;
+ /* multiple_collision_transmit_frames */
+ u32 tx_stat_dot3statsmultiplecollisionframes_hi;
+ u32 tx_stat_dot3statsmultiplecollisionframes_lo;
+ /* deferred_transmissions */
+ u32 tx_stat_dot3statsdeferredtransmissions_hi;
+ u32 tx_stat_dot3statsdeferredtransmissions_lo;
+ /* excessive_collision_frames */
+ u32 tx_stat_dot3statsexcessivecollisions_hi;
+ u32 tx_stat_dot3statsexcessivecollisions_lo;
+ /* late_collision_frames */
+ u32 tx_stat_dot3statslatecollisions_hi;
+ u32 tx_stat_dot3statslatecollisions_lo;
+
+ /* frames_transmitted_64_bytes */
+ u32 tx_stat_etherstatspkts64octets_hi;
+ u32 tx_stat_etherstatspkts64octets_lo;
+ /* frames_transmitted_65_127_bytes */
+ u32 tx_stat_etherstatspkts65octetsto127octets_hi;
+ u32 tx_stat_etherstatspkts65octetsto127octets_lo;
+ /* frames_transmitted_128_255_bytes */
+ u32 tx_stat_etherstatspkts128octetsto255octets_hi;
+ u32 tx_stat_etherstatspkts128octetsto255octets_lo;
+ /* frames_transmitted_256_511_bytes */
+ u32 tx_stat_etherstatspkts256octetsto511octets_hi;
+ u32 tx_stat_etherstatspkts256octetsto511octets_lo;
+ /* frames_transmitted_512_1023_bytes */
+ u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
+ u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
+ /* frames_transmitted_1024_1522_bytes */
+ u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
+ /* frames_transmitted_1523_9022_bytes */
+ u32 tx_stat_etherstatspktsover1522octets_hi;
+ u32 tx_stat_etherstatspktsover1522octets_lo;
+ u32 tx_stat_mac_2047_hi;
+ u32 tx_stat_mac_2047_lo;
+ u32 tx_stat_mac_4095_hi;
+ u32 tx_stat_mac_4095_lo;
+ u32 tx_stat_mac_9216_hi;
+ u32 tx_stat_mac_9216_lo;
+ u32 tx_stat_mac_16383_hi;
+ u32 tx_stat_mac_16383_lo;
+
+ /* internal_mac_transmit_errors */
+ u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
+ u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
+
+ /* if_out_discards */
+ u32 tx_stat_mac_ufl_hi;
+ u32 tx_stat_mac_ufl_lo;
+};
+
+
+#define MAC_STX_IDX_MAX 2
struct host_port_stats {
- u32 host_port_stats_start;
+ u32 host_port_stats_start;
- struct mac_stx mac_stx[MAC_STX_IDX_MAX];
+ struct mac_stx mac_stx[MAC_STX_IDX_MAX];
- u32 brb_drop_hi;
- u32 brb_drop_lo;
+ u32 brb_drop_hi;
+ u32 brb_drop_lo;
- u32 host_port_stats_end;
+ u32 host_port_stats_end;
};
struct host_func_stats {
- u32 host_func_stats_start;
+ u32 host_func_stats_start;
- u32 total_bytes_received_hi;
- u32 total_bytes_received_lo;
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
- u32 total_bytes_transmitted_hi;
- u32 total_bytes_transmitted_lo;
+ u32 total_bytes_transmitted_hi;
+ u32 total_bytes_transmitted_lo;
- u32 total_unicast_packets_received_hi;
- u32 total_unicast_packets_received_lo;
+ u32 total_unicast_packets_received_hi;
+ u32 total_unicast_packets_received_lo;
- u32 total_multicast_packets_received_hi;
- u32 total_multicast_packets_received_lo;
+ u32 total_multicast_packets_received_hi;
+ u32 total_multicast_packets_received_lo;
- u32 total_broadcast_packets_received_hi;
- u32 total_broadcast_packets_received_lo;
+ u32 total_broadcast_packets_received_hi;
+ u32 total_broadcast_packets_received_lo;
- u32 total_unicast_packets_transmitted_hi;
- u32 total_unicast_packets_transmitted_lo;
+ u32 total_unicast_packets_transmitted_hi;
+ u32 total_unicast_packets_transmitted_lo;
- u32 total_multicast_packets_transmitted_hi;
- u32 total_multicast_packets_transmitted_lo;
+ u32 total_multicast_packets_transmitted_hi;
+ u32 total_multicast_packets_transmitted_lo;
- u32 total_broadcast_packets_transmitted_hi;
- u32 total_broadcast_packets_transmitted_lo;
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
- u32 valid_bytes_received_hi;
- u32 valid_bytes_received_lo;
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
- u32 host_func_stats_end;
+ u32 host_func_stats_end;
};
+/* VIC definitions */
+#define VICSTATST_UIF_INDEX 2
-#define BCM_5710_FW_MAJOR_VERSION 6
-#define BCM_5710_FW_MINOR_VERSION 2
-#define BCM_5710_FW_REVISION_VERSION 9
-#define BCM_5710_FW_ENGINEERING_VERSION 0
+#define BCM_5710_FW_MAJOR_VERSION 7
+#define BCM_5710_FW_MINOR_VERSION 0
+#define BCM_5710_FW_REVISION_VERSION 23
+#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -1948,6 +2567,115 @@ struct atten_sp_status_block {
/*
+ * The eth aggregative context of Cstorm
+ */
+struct cstorm_eth_ag_context {
+ u32 __reserved0[10];
+};
+
+
+/*
+ * dmae command structure
+ */
+struct dmae_command {
+ u32 opcode;
+#define DMAE_COMMAND_SRC (0x1<<0)
+#define DMAE_COMMAND_SRC_SHIFT 0
+#define DMAE_COMMAND_DST (0x3<<1)
+#define DMAE_COMMAND_DST_SHIFT 1
+#define DMAE_COMMAND_C_DST (0x1<<3)
+#define DMAE_COMMAND_C_DST_SHIFT 3
+#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4)
+#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5)
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6)
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
+#define DMAE_COMMAND_ENDIANITY (0x3<<9)
+#define DMAE_COMMAND_ENDIANITY_SHIFT 9
+#define DMAE_COMMAND_PORT (0x1<<11)
+#define DMAE_COMMAND_PORT_SHIFT 11
+#define DMAE_COMMAND_CRC_RESET (0x1<<12)
+#define DMAE_COMMAND_CRC_RESET_SHIFT 12
+#define DMAE_COMMAND_SRC_RESET (0x1<<13)
+#define DMAE_COMMAND_SRC_RESET_SHIFT 13
+#define DMAE_COMMAND_DST_RESET (0x1<<14)
+#define DMAE_COMMAND_DST_RESET_SHIFT 14
+#define DMAE_COMMAND_E1HVN (0x3<<15)
+#define DMAE_COMMAND_E1HVN_SHIFT 15
+#define DMAE_COMMAND_DST_VN (0x3<<17)
+#define DMAE_COMMAND_DST_VN_SHIFT 17
+#define DMAE_COMMAND_C_FUNC (0x1<<19)
+#define DMAE_COMMAND_C_FUNC_SHIFT 19
+#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
+#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
+#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
+#define DMAE_COMMAND_RESERVED0_SHIFT 22
+ u32 src_addr_lo;
+ u32 src_addr_hi;
+ u32 dst_addr_lo;
+ u32 dst_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+ u16 len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 len;
+ u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+#endif
+ u32 comp_addr_lo;
+ u32 comp_addr_hi;
+ u32 comp_val;
+ u32 crc32;
+ u32 crc32_c;
+#if defined(__BIG_ENDIAN)
+ u16 crc16_c;
+ u16 crc16;
+#elif defined(__LITTLE_ENDIAN)
+ u16 crc16;
+ u16 crc16_c;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u16 crc_t10;
+#elif defined(__LITTLE_ENDIAN)
+ u16 crc_t10;
+ u16 reserved3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 xsum8;
+ u16 xsum16;
+#elif defined(__LITTLE_ENDIAN)
+ u16 xsum16;
+ u16 xsum8;
+#endif
+};
+
+
+/*
* common data for all protocols
*/
struct doorbell_hdr {
@@ -1963,33 +2691,29 @@ struct doorbell_hdr {
};
/*
- * doorbell message sent to the chip
- */
-struct doorbell {
-#if defined(__BIG_ENDIAN)
- u16 zero_fill2;
- u8 zero_fill1;
- struct doorbell_hdr header;
-#elif defined(__LITTLE_ENDIAN)
- struct doorbell_hdr header;
- u8 zero_fill1;
- u16 zero_fill2;
-#endif
-};
-
-
-/*
- * doorbell message sent to the chip
+ * Ethernet doorbell
*/
-struct doorbell_set_prod {
+struct eth_tx_doorbell {
#if defined(__BIG_ENDIAN)
- u16 prod;
- u8 zero_fill1;
- struct doorbell_hdr header;
+ u16 npackets;
+ u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+ struct doorbell_hdr hdr;
#elif defined(__LITTLE_ENDIAN)
- struct doorbell_hdr header;
- u8 zero_fill1;
- u16 prod;
+ struct doorbell_hdr hdr;
+ u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+ u16 npackets;
#endif
};
@@ -2000,7 +2724,7 @@ struct doorbell_set_prod {
struct hc_status_block_e1x {
__le16 index_values[HC_SB_MAX_INDICES_E1X];
__le16 running_index[HC_SB_MAX_SM];
- u32 rsrv;
+ __le32 rsrv[11];
};
/*
@@ -2017,7 +2741,7 @@ struct host_hc_status_block_e1x {
struct hc_status_block_e2 {
__le16 index_values[HC_SB_MAX_INDICES_E2];
__le16 running_index[HC_SB_MAX_SM];
- u32 reserved;
+ __le32 reserved[11];
};
/*
@@ -2138,6 +2862,16 @@ union igu_consprod_reg {
/*
+ * Igu control commands
+ */
+enum igu_ctrl_cmd {
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD
+};
+
+
+/*
* Control register for the IGU command register
*/
struct igu_ctrl_reg {
@@ -2156,6 +2890,29 @@ struct igu_ctrl_reg {
/*
+ * Igu interrupt command
+ */
+enum igu_int_cmd {
+ IGU_INT_ENABLE,
+ IGU_INT_DISABLE,
+ IGU_INT_NOP,
+ IGU_INT_NOP2,
+ MAX_IGU_INT_CMD
+};
+
+
+/*
+ * Igu segments
+ */
+enum igu_seg_access {
+ IGU_SEG_ACCESS_NORM,
+ IGU_SEG_ACCESS_DEF,
+ IGU_SEG_ACCESS_ATTN,
+ MAX_IGU_SEG_ACCESS
+};
+
+
+/*
* Parser parsing flags field
*/
struct parsing_flags {
@@ -2189,94 +2946,46 @@ struct parsing_flags {
};
-struct regpair {
- __le32 lo;
- __le32 hi;
+/*
+ * Parsing flags for TCP ACK type
+ */
+enum prs_flags_ack_type {
+ PRS_FLAG_PUREACK_PIGGY,
+ PRS_FLAG_PUREACK_PURE,
+ MAX_PRS_FLAGS_ACK_TYPE
};
/*
- * dmae command structure
+ * Parsing flags for Ethernet address type
*/
-struct dmae_command {
- u32 opcode;
-#define DMAE_COMMAND_SRC (0x1<<0)
-#define DMAE_COMMAND_SRC_SHIFT 0
-#define DMAE_COMMAND_DST (0x3<<1)
-#define DMAE_COMMAND_DST_SHIFT 1
-#define DMAE_COMMAND_C_DST (0x1<<3)
-#define DMAE_COMMAND_C_DST_SHIFT 3
-#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4)
-#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
-#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5)
-#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
-#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6)
-#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
-#define DMAE_COMMAND_ENDIANITY (0x3<<9)
-#define DMAE_COMMAND_ENDIANITY_SHIFT 9
-#define DMAE_COMMAND_PORT (0x1<<11)
-#define DMAE_COMMAND_PORT_SHIFT 11
-#define DMAE_COMMAND_CRC_RESET (0x1<<12)
-#define DMAE_COMMAND_CRC_RESET_SHIFT 12
-#define DMAE_COMMAND_SRC_RESET (0x1<<13)
-#define DMAE_COMMAND_SRC_RESET_SHIFT 13
-#define DMAE_COMMAND_DST_RESET (0x1<<14)
-#define DMAE_COMMAND_DST_RESET_SHIFT 14
-#define DMAE_COMMAND_E1HVN (0x3<<15)
-#define DMAE_COMMAND_E1HVN_SHIFT 15
-#define DMAE_COMMAND_DST_VN (0x3<<17)
-#define DMAE_COMMAND_DST_VN_SHIFT 17
-#define DMAE_COMMAND_C_FUNC (0x1<<19)
-#define DMAE_COMMAND_C_FUNC_SHIFT 19
-#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
-#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
-#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
-#define DMAE_COMMAND_RESERVED0_SHIFT 22
- u32 src_addr_lo;
- u32 src_addr_hi;
- u32 dst_addr_lo;
- u32 dst_addr_hi;
-#if defined(__BIG_ENDIAN)
- u16 reserved1;
- u16 len;
-#elif defined(__LITTLE_ENDIAN)
- u16 len;
- u16 reserved1;
-#endif
- u32 comp_addr_lo;
- u32 comp_addr_hi;
- u32 comp_val;
- u32 crc32;
- u32 crc32_c;
-#if defined(__BIG_ENDIAN)
- u16 crc16_c;
- u16 crc16;
-#elif defined(__LITTLE_ENDIAN)
- u16 crc16;
- u16 crc16_c;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 reserved3;
- u16 crc_t10;
-#elif defined(__LITTLE_ENDIAN)
- u16 crc_t10;
- u16 reserved3;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 xsum8;
- u16 xsum16;
-#elif defined(__LITTLE_ENDIAN)
- u16 xsum16;
- u16 xsum8;
-#endif
+enum prs_flags_eth_addr_type {
+ PRS_FLAG_ETHTYPE_NON_UNICAST,
+ PRS_FLAG_ETHTYPE_UNICAST,
+ MAX_PRS_FLAGS_ETH_ADDR_TYPE
};
-struct double_regpair {
- u32 regpair0_lo;
- u32 regpair0_hi;
- u32 regpair1_lo;
- u32 regpair1_hi;
+/*
+ * Parsing flags for over-ethernet protocol
+ */
+enum prs_flags_over_eth {
+ PRS_FLAG_OVERETH_UNKNOWN,
+ PRS_FLAG_OVERETH_IPV4,
+ PRS_FLAG_OVERETH_IPV6,
+ PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN,
+ MAX_PRS_FLAGS_OVER_ETH
+};
+
+
+/*
+ * Parsing flags for over-IP protocol
+ */
+enum prs_flags_over_ip {
+ PRS_FLAG_OVERIP_UNKNOWN,
+ PRS_FLAG_OVERIP_TCP,
+ PRS_FLAG_OVERIP_UDP,
+ MAX_PRS_FLAGS_OVER_IP
};
@@ -2297,54 +3006,23 @@ struct sdm_op_gen {
#define SDM_OP_GEN_RESERVED_SHIFT 17
};
-/*
- * The eth Rx Buffer Descriptor
- */
-struct eth_rx_bd {
- __le32 addr_lo;
- __le32 addr_hi;
-};
/*
- * The eth Rx SGE Descriptor
- */
-struct eth_rx_sge {
- __le32 addr_lo;
- __le32 addr_hi;
-};
-
-
-
-/*
- * The eth storm context of Ustorm
- */
-struct ustorm_eth_st_context {
- u32 reserved0[48];
-};
-
-/*
- * The eth storm context of Tstorm
+ * Timers connection context
*/
-struct tstorm_eth_st_context {
- u32 __reserved0[28];
+struct timers_block_context {
+ u32 __reserved_0;
+ u32 __reserved_1;
+ u32 __reserved_2;
+ u32 flags;
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
};
-/*
- * The eth aggregative context of Xstorm
- */
-struct xstorm_eth_ag_context {
- u32 reserved0;
-#if defined(__BIG_ENDIAN)
- u8 cdu_reserved;
- u8 reserved2;
- u16 reserved1;
-#elif defined(__LITTLE_ENDIAN)
- u16 reserved1;
- u8 reserved2;
- u8 cdu_reserved;
-#endif
- u32 reserved3[30];
-};
/*
* The eth aggregative context of Tstorm
@@ -2355,14 +3033,6 @@ struct tstorm_eth_ag_context {
/*
- * The eth aggregative context of Cstorm
- */
-struct cstorm_eth_ag_context {
- u32 __reserved0[10];
-};
-
-
-/*
* The eth aggregative context of Ustorm
*/
struct ustorm_eth_ag_context {
@@ -2379,229 +3049,81 @@ struct ustorm_eth_ag_context {
u32 __reserved3[6];
};
-/*
- * Timers connection context
- */
-struct timers_block_context {
- u32 __reserved_0;
- u32 __reserved_1;
- u32 __reserved_2;
- u32 flags;
-#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
-#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
-#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
-#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
-#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
-#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
-};
/*
- * structure for easy accessibility to assembler
- */
-struct eth_tx_bd_flags {
- u8 as_bitfield;
-#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0)
-#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
-#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1)
-#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
-#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2)
-#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
-#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
-#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
-#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5)
-#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
-#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
-#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
-#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
-#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7
-};
-
-/*
- * The eth Tx Buffer Descriptor
- */
-struct eth_tx_start_bd {
- __le32 addr_lo;
- __le32 addr_hi;
- __le16 nbd;
- __le16 nbytes;
- __le16 vlan_or_ethertype;
- struct eth_tx_bd_flags bd_flags;
- u8 general_data;
-#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0)
-#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
-#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
-#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
-};
-
-/*
- * Tx regular BD structure
- */
-struct eth_tx_bd {
- __le32 addr_lo;
- __le32 addr_hi;
- __le16 total_pkt_bytes;
- __le16 nbytes;
- u8 reserved[4];
-};
-
-/*
- * Tx parsing BD structure for ETH E1/E1h
- */
-struct eth_tx_parse_bd_e1x {
- u8 global_data;
-#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
-#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
-#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
-#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
-#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
-#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
-#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
-#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
-#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
- u8 tcp_flags;
-#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
-#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
-#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1)
-#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
-#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2)
-#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
-#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3)
-#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
-#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4)
-#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
-#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5)
-#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
-#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6)
-#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
-#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
-#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
- u8 ip_hlen_w;
- s8 reserved;
- __le16 total_hlen_w;
- __le16 tcp_pseudo_csum;
- __le16 lso_mss;
- __le16 ip_id;
- __le32 tcp_send_seq;
-};
-
-/*
- * Tx parsing BD structure for ETH E2
+ * The eth aggregative context of Xstorm
*/
-struct eth_tx_parse_bd_e2 {
- __le16 dst_mac_addr_lo;
- __le16 dst_mac_addr_mid;
- __le16 dst_mac_addr_hi;
- __le16 src_mac_addr_lo;
- __le16 src_mac_addr_mid;
- __le16 src_mac_addr_hi;
- __le32 parsing_data;
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
-#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
-#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
-#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
-#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
+struct xstorm_eth_ag_context {
+ u32 reserved0;
+#if defined(__BIG_ENDIAN)
+ u8 cdu_reserved;
+ u8 reserved2;
+ u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved1;
+ u8 reserved2;
+ u8 cdu_reserved;
+#endif
+ u32 reserved3[30];
};
-/*
- * The last BD in the BD memory will hold a pointer to the next BD memory
- */
-struct eth_tx_next_bd {
- __le32 addr_lo;
- __le32 addr_hi;
- u8 reserved[8];
-};
/*
- * union for 4 Bd types
+ * doorbell message sent to the chip
*/
-union eth_tx_bd_types {
- struct eth_tx_start_bd start_bd;
- struct eth_tx_bd reg_bd;
- struct eth_tx_parse_bd_e1x parse_bd_e1x;
- struct eth_tx_parse_bd_e2 parse_bd_e2;
- struct eth_tx_next_bd next_bd;
+struct doorbell {
+#if defined(__BIG_ENDIAN)
+ u16 zero_fill2;
+ u8 zero_fill1;
+ struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr header;
+ u8 zero_fill1;
+ u16 zero_fill2;
+#endif
};
/*
- * The eth storm context of Xstorm
+ * doorbell message sent to the chip
*/
-struct xstorm_eth_st_context {
- u32 reserved0[60];
+struct doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+ u16 prod;
+ u8 zero_fill1;
+ struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct doorbell_hdr header;
+ u8 zero_fill1;
+ u16 prod;
+#endif
};
-/*
- * The eth storm context of Cstorm
- */
-struct cstorm_eth_st_context {
- u32 __reserved0[4];
-};
-/*
- * Ethernet connection context
- */
-struct eth_context {
- struct ustorm_eth_st_context ustorm_st_context;
- struct tstorm_eth_st_context tstorm_st_context;
- struct xstorm_eth_ag_context xstorm_ag_context;
- struct tstorm_eth_ag_context tstorm_ag_context;
- struct cstorm_eth_ag_context cstorm_ag_context;
- struct ustorm_eth_ag_context ustorm_ag_context;
- struct timers_block_context timers_context;
- struct xstorm_eth_st_context xstorm_st_context;
- struct cstorm_eth_st_context cstorm_st_context;
+struct regpair {
+ __le32 lo;
+ __le32 hi;
};
/*
- * Ethernet doorbell
+ * Classify rule opcodes in E2/E3
*/
-struct eth_tx_doorbell {
-#if defined(__BIG_ENDIAN)
- u16 npackets;
- u8 params;
-#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
-#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
-#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
-#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
-#define ETH_TX_DOORBELL_SPARE (0x1<<7)
-#define ETH_TX_DOORBELL_SPARE_SHIFT 7
- struct doorbell_hdr hdr;
-#elif defined(__LITTLE_ENDIAN)
- struct doorbell_hdr hdr;
- u8 params;
-#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
-#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
-#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
-#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
-#define ETH_TX_DOORBELL_SPARE (0x1<<7)
-#define ETH_TX_DOORBELL_SPARE_SHIFT 7
- u16 npackets;
-#endif
+enum classify_rule {
+ CLASSIFY_RULE_OPCODE_MAC,
+ CLASSIFY_RULE_OPCODE_VLAN,
+ CLASSIFY_RULE_OPCODE_PAIR,
+ MAX_CLASSIFY_RULE
};
/*
- * client init fc data
+ * Classify rule types in E2/E3
*/
-struct client_init_fc_data {
- __le16 cqe_pause_thr_low;
- __le16 cqe_pause_thr_high;
- __le16 bd_pause_thr_low;
- __le16 bd_pause_thr_high;
- __le16 sge_pause_thr_low;
- __le16 sge_pause_thr_high;
- __le16 rx_cos_mask;
- u8 safc_group_num;
- u8 safc_group_en_flg;
- u8 traffic_type;
- u8 reserved0;
- __le16 reserved1;
- __le32 reserved2;
+enum classify_rule_action_type {
+ CLASSIFY_RULE_REMOVE,
+ CLASSIFY_RULE_ADD,
+ MAX_CLASSIFY_RULE_ACTION_TYPE
};
@@ -2615,8 +3137,12 @@ struct client_init_general_data {
u8 is_fcoe_flg;
u8 activate_flg;
u8 sp_client_id;
- __le16 reserved0;
- __le32 reserved1[2];
+ __le16 mtu;
+ u8 statistics_zero_flg;
+ u8 func_id;
+ u8 cos;
+ u8 traffic_type;
+ u32 reserved0;
};
@@ -2624,7 +3150,13 @@ struct client_init_general_data {
* client init rx data
*/
struct client_init_rx_data {
- u8 tpa_en_flg;
+ u8 tpa_en;
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2
u8 vmqueue_mode_en_flg;
u8 extra_data_over_sgl_en_flg;
u8 cache_line_alignment_log_size;
@@ -2639,17 +3171,46 @@ struct client_init_rx_data {
u8 outer_vlan_removal_enable_flg;
u8 status_block_id;
u8 rx_sb_index_number;
- u8 reserved0[3];
- __le16 bd_buff_size;
+ u8 reserved0;
+ u8 max_tpa_queues;
+ u8 silent_vlan_removal_flg;
+ __le16 max_bytes_on_bd;
__le16 sge_buff_size;
- __le16 mtu;
+ u8 approx_mcast_engine_id;
+ u8 rss_engine_id;
struct regpair bd_page_base;
struct regpair sge_page_base;
struct regpair cqe_page_base;
u8 is_leading_rss;
u8 is_approx_mcast;
__le16 max_agg_size;
- __le32 reserved2[3];
+ __le16 state;
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0)
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3)
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4)
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5)
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6)
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6
+#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7)
+#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7
+ __le16 cqe_pause_thr_low;
+ __le16 cqe_pause_thr_high;
+ __le16 bd_pause_thr_low;
+ __le16 bd_pause_thr_high;
+ __le16 sge_pause_thr_low;
+ __le16 sge_pause_thr_high;
+ __le16 rx_cos_mask;
+ __le16 silent_vlan_value;
+ __le16 silent_vlan_mask;
+ __le32 reserved6[2];
};
/*
@@ -2659,11 +3220,25 @@ struct client_init_tx_data {
u8 enforce_security_flg;
u8 tx_status_block_id;
u8 tx_sb_index_number;
- u8 reserved0;
- __le16 mtu;
- __le16 reserved1;
+ u8 tss_leading_client_id;
+ u8 tx_switching_flg;
+ u8 anti_spoofing_flg;
+ __le16 default_vlan;
struct regpair tx_bd_page_base;
- __le32 reserved2[2];
+ __le16 state;
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0)
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2)
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
+#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
+ u8 default_vlan_flg;
+ u8 reserved2;
+ __le32 reserved3;
};
/*
@@ -2673,7 +3248,146 @@ struct client_init_ramrod_data {
struct client_init_general_data general;
struct client_init_rx_data rx;
struct client_init_tx_data tx;
- struct client_init_fc_data fc;
+};
+
+
+/*
+ * client update ramrod data
+ */
+struct client_update_ramrod_data {
+ u8 client_id;
+ u8 func_id;
+ u8 inner_vlan_removal_enable_flg;
+ u8 inner_vlan_removal_change_flg;
+ u8 outer_vlan_removal_enable_flg;
+ u8 outer_vlan_removal_change_flg;
+ u8 anti_spoofing_enable_flg;
+ u8 anti_spoofing_change_flg;
+ u8 activate_flg;
+ u8 activate_change_flg;
+ __le16 default_vlan;
+ u8 default_vlan_enable_flg;
+ u8 default_vlan_change_flg;
+ __le16 silent_vlan_value;
+ __le16 silent_vlan_mask;
+ u8 silent_vlan_removal_flg;
+ u8 silent_vlan_change_flg;
+ __le32 echo;
+};
+
+
+/*
+ * The eth storm context of Cstorm
+ */
+struct cstorm_eth_st_context {
+ u32 __reserved0[4];
+};
+
+
+struct double_regpair {
+ u32 regpair0_lo;
+ u32 regpair0_hi;
+ u32 regpair1_lo;
+ u32 regpair1_hi;
+};
+
+
+/*
+ * Ethernet address typesm used in ethernet tx BDs
+ */
+enum eth_addr_type {
+ UNKNOWN_ADDRESS,
+ UNICAST_ADDRESS,
+ MULTICAST_ADDRESS,
+ BROADCAST_ADDRESS,
+ MAX_ETH_ADDR_TYPE
+};
+
+
+/*
+ *
+ */
+struct eth_classify_cmd_header {
+ u8 cmd_general_data;
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0)
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1)
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2)
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4)
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5)
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5
+ u8 func_id;
+ u8 client_id;
+ u8 reserved1;
+};
+
+
+/*
+ * header for eth classification config ramrod
+ */
+struct eth_classify_header {
+ u8 rule_cnt;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 echo;
+};
+
+
+/*
+ * Command for adding/removing a MAC classification rule
+ */
+struct eth_classify_mac_cmd {
+ struct eth_classify_cmd_header header;
+ __le32 reserved0;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 reserved1;
+};
+
+
+/*
+ * Command for adding/removing a MAC-VLAN pair classification rule
+ */
+struct eth_classify_pair_cmd {
+ struct eth_classify_cmd_header header;
+ __le32 reserved0;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan;
+};
+
+
+/*
+ * Command for adding/removing a VLAN classification rule
+ */
+struct eth_classify_vlan_cmd {
+ struct eth_classify_cmd_header header;
+ __le32 reserved0;
+ __le32 reserved1;
+ __le16 reserved2;
+ __le16 vlan;
+};
+
+/*
+ * union for eth classification rule
+ */
+union eth_classify_rule_cmd {
+ struct eth_classify_mac_cmd mac;
+ struct eth_classify_vlan_cmd vlan;
+ struct eth_classify_pair_cmd pair;
+};
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_classify_rules_ramrod_data {
+ struct eth_classify_header header;
+ union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
};
@@ -2681,8 +3395,45 @@ struct client_init_ramrod_data {
* The data contain client ID need to the ramrod
*/
struct eth_common_ramrod_data {
- u32 client_id;
- u32 reserved1;
+ __le32 client_id;
+ __le32 reserved1;
+};
+
+
+/*
+ * The eth storm context of Ustorm
+ */
+struct ustorm_eth_st_context {
+ u32 reserved0[52];
+};
+
+/*
+ * The eth storm context of Tstorm
+ */
+struct tstorm_eth_st_context {
+ u32 __reserved0[28];
+};
+
+/*
+ * The eth storm context of Xstorm
+ */
+struct xstorm_eth_st_context {
+ u32 reserved0[60];
+};
+
+/*
+ * Ethernet connection context
+ */
+struct eth_context {
+ struct ustorm_eth_st_context ustorm_st_context;
+ struct tstorm_eth_st_context tstorm_st_context;
+ struct xstorm_eth_ag_context xstorm_ag_context;
+ struct tstorm_eth_ag_context tstorm_ag_context;
+ struct cstorm_eth_ag_context cstorm_ag_context;
+ struct ustorm_eth_ag_context ustorm_ag_context;
+ struct timers_block_context timers_context;
+ struct xstorm_eth_st_context xstorm_st_context;
+ struct cstorm_eth_st_context cstorm_st_context;
};
@@ -2695,24 +3446,47 @@ union eth_sgl_or_raw_data {
};
/*
+ * eth FP end aggregation CQE parameters struct
+ */
+struct eth_end_agg_rx_cqe {
+ u8 type_error_flags;
+#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0)
+#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3)
+#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3
+ u8 reserved1;
+ u8 queue_index;
+ u8 reserved2;
+ __le32 timestamp_delta;
+ __le16 num_of_coalesced_segs;
+ __le16 pkt_len;
+ u8 pure_ack_count;
+ u8 reserved3;
+ __le16 reserved4;
+ union eth_sgl_or_raw_data sgl_or_raw_data;
+ __le32 reserved5[8];
+};
+
+
+/*
* regular eth FP CQE parameters struct
*/
struct eth_fast_path_rx_cqe {
u8 type_error_flags;
-#define ETH_FAST_PATH_RX_CQE_TYPE (0x1<<0)
+#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0)
#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<1)
-#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 1
-#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<2)
-#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 2
-#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<3)
-#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 3
-#define ETH_FAST_PATH_RX_CQE_START_FLG (0x1<<4)
-#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4
-#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5)
-#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5
-#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x3<<6)
-#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3)
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4)
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5)
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
+#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
u8 status_flags;
#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
@@ -2726,39 +3500,108 @@ struct eth_fast_path_rx_cqe {
#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7)
#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
- u8 placement_offset;
u8 queue_index;
+ u8 placement_offset;
__le32 rss_hash_result;
__le16 vlan_tag;
__le16 pkt_len;
__le16 len_on_bd;
struct parsing_flags pars_flags;
union eth_sgl_or_raw_data sgl_or_raw_data;
+ __le32 reserved1[8];
};
/*
- * The data for RSS setup ramrod
+ * Command for setting classification flags for a client
+ */
+struct eth_filter_rules_cmd {
+ u8 cmd_general_data;
+#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2)
+#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2
+ u8 func_id;
+ u8 client_id;
+ u8 reserved1;
+ __le16 state;
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0)
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3)
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4)
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5)
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6)
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6
+#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7)
+#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7
+ __le16 reserved3;
+ struct regpair reserved4;
+};
+
+
+/*
+ * parameters for eth classification filters ramrod
+ */
+struct eth_filter_rules_ramrod_data {
+ struct eth_classify_header header;
+ struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
+};
+
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_general_rules_ramrod_data {
+ struct eth_classify_header header;
+ union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data for Halt ramrod
*/
struct eth_halt_ramrod_data {
- u32 client_id;
- u32 reserved0;
+ __le32 client_id;
+ __le32 reserved0;
};
+
/*
- * The data for statistics query ramrod
+ * Command for setting multicast classification for a client
*/
-struct common_query_ramrod_data {
-#if defined(__BIG_ENDIAN)
- u8 reserved0;
- u8 collect_port;
- u16 drv_counter;
-#elif defined(__LITTLE_ENDIAN)
- u16 drv_counter;
- u8 collect_port;
- u8 reserved0;
-#endif
- u32 ctr_id_vector;
+struct eth_multicast_rules_cmd {
+ u8 cmd_general_data;
+#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2)
+#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2
+#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3)
+#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3
+ u8 func_id;
+ u8 bin_id;
+ u8 engine_id;
+ __le32 reserved2;
+ struct regpair reserved3;
+};
+
+
+/*
+ * parameters for multicast classification ramrod
+ */
+struct eth_multicast_rules_ramrod_data {
+ struct eth_classify_header header;
+ struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
};
@@ -2779,16 +3622,86 @@ union eth_ramrod_data {
/*
+ * RSS toeplitz hash type, as reported in CQE
+ */
+enum eth_rss_hash_type {
+ DEFAULT_HASH_TYPE,
+ IPV4_HASH_TYPE,
+ TCP_IPV4_HASH_TYPE,
+ IPV6_HASH_TYPE,
+ TCP_IPV6_HASH_TYPE,
+ VLAN_PRI_HASH_TYPE,
+ E1HOV_PRI_HASH_TYPE,
+ DSCP_HASH_TYPE,
+ MAX_ETH_RSS_HASH_TYPE
+};
+
+
+/*
+ * Ethernet RSS mode
+ */
+enum eth_rss_mode {
+ ETH_RSS_MODE_DISABLED,
+ ETH_RSS_MODE_REGULAR,
+ ETH_RSS_MODE_VLAN_PRI,
+ ETH_RSS_MODE_E1HOV_PRI,
+ ETH_RSS_MODE_IP_DSCP,
+ MAX_ETH_RSS_MODE
+};
+
+
+/*
+ * parameters for RSS update ramrod (E2)
+ */
+struct eth_rss_update_ramrod_data {
+ u8 rss_engine_id;
+ u8 capabilities;
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6
+#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7)
+#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7
+ u8 rss_result_mask;
+ u8 rss_mode;
+ __le32 __reserved2;
+ u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ __le32 rss_key[T_ETH_RSS_KEY];
+ __le32 echo;
+ __le32 reserved3;
+};
+
+
+/*
+ * The eth Rx Buffer Descriptor
+ */
+struct eth_rx_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+};
+
+
+/*
* Eth Rx Cqe structure- general structure for ramrods
*/
struct common_ramrod_eth_rx_cqe {
u8 ramrod_type;
-#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0)
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0)
#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
-#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<1)
-#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 1
-#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x3F<<2)
-#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 2
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2)
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3)
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3
u8 conn_type;
__le16 reserved1;
__le32 conn_and_cmd_data;
@@ -2797,7 +3710,8 @@ struct common_ramrod_eth_rx_cqe {
#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24)
#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
struct ramrod_data protocol_data;
- __le32 reserved2[4];
+ __le32 echo;
+ __le32 reserved2[11];
};
/*
@@ -2806,7 +3720,7 @@ struct common_ramrod_eth_rx_cqe {
struct eth_rx_cqe_next_page {
__le32 addr_lo;
__le32 addr_hi;
- __le32 reserved[6];
+ __le32 reserved[14];
};
/*
@@ -2816,6 +3730,38 @@ union eth_rx_cqe {
struct eth_fast_path_rx_cqe fast_path_cqe;
struct common_ramrod_eth_rx_cqe ramrod_cqe;
struct eth_rx_cqe_next_page next_page_cqe;
+ struct eth_end_agg_rx_cqe end_agg_cqe;
+};
+
+
+/*
+ * Values for RX ETH CQE type field
+ */
+enum eth_rx_cqe_type {
+ RX_ETH_CQE_TYPE_ETH_FASTPATH,
+ RX_ETH_CQE_TYPE_ETH_RAMROD,
+ RX_ETH_CQE_TYPE_ETH_START_AGG,
+ RX_ETH_CQE_TYPE_ETH_STOP_AGG,
+ MAX_ETH_RX_CQE_TYPE
+};
+
+
+/*
+ * Type of SGL/Raw field in ETH RX fast path CQE
+ */
+enum eth_rx_fp_sel {
+ ETH_FP_CQE_REGULAR,
+ ETH_FP_CQE_RAW,
+ MAX_ETH_RX_FP_SEL
+};
+
+
+/*
+ * The eth Rx SGE Descriptor
+ */
+struct eth_rx_sge {
+ __le32 addr_lo;
+ __le32 addr_hi;
};
@@ -2837,14 +3783,18 @@ struct spe_hdr {
};
/*
- * Ethernet slow path element
+ * specific data for ethernet slow path element
*/
union eth_specific_data {
u8 protocol_data[8];
+ struct regpair client_update_ramrod_data;
struct regpair client_init_ramrod_init_data;
struct eth_halt_ramrod_data halt_ramrod_data;
struct regpair update_data_addr;
struct eth_common_ramrod_data common_ramrod_data;
+ struct regpair classify_cfg_addr;
+ struct regpair filter_cfg_addr;
+ struct regpair mcast_cfg_addr;
};
/*
@@ -2857,94 +3807,202 @@ struct eth_spe {
/*
- * array of 13 bds as appears in the eth xstorm context
+ * Ethernet command ID for slow path elements
*/
-struct eth_tx_bds_array {
- union eth_tx_bd_types bds[13];
+enum eth_spqe_cmd_id {
+ RAMROD_CMD_ID_ETH_UNUSED,
+ RAMROD_CMD_ID_ETH_CLIENT_SETUP,
+ RAMROD_CMD_ID_ETH_HALT,
+ RAMROD_CMD_ID_ETH_FORWARD_SETUP,
+ RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP,
+ RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+ RAMROD_CMD_ID_ETH_EMPTY,
+ RAMROD_CMD_ID_ETH_TERMINATE,
+ RAMROD_CMD_ID_ETH_TPA_UPDATE,
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES,
+ RAMROD_CMD_ID_ETH_FILTER_RULES,
+ RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+ RAMROD_CMD_ID_ETH_RSS_UPDATE,
+ RAMROD_CMD_ID_ETH_SET_MAC,
+ MAX_ETH_SPQE_CMD_ID
};
/*
- * Common configuration parameters per function in Tstorm
+ * eth tpa update command
*/
-struct tstorm_eth_function_common_config {
-#if defined(__BIG_ENDIAN)
- u8 reserved1;
- u8 rss_result_mask;
- u16 config_flags;
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
-#elif defined(__LITTLE_ENDIAN)
- u16 config_flags;
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
-#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
-#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
- u8 rss_result_mask;
- u8 reserved1;
-#endif
- u16 vlan_id[2];
+enum eth_tpa_update_command {
+ TPA_UPDATE_NONE_COMMAND,
+ TPA_UPDATE_ENABLE_COMMAND,
+ TPA_UPDATE_DISABLE_COMMAND,
+ MAX_ETH_TPA_UPDATE_COMMAND
};
+
/*
- * RSS idirection table update configuration
+ * Tx regular BD structure
*/
-struct rss_update_config {
-#if defined(__BIG_ENDIAN)
- u16 toe_rss_bitmap;
- u16 flags;
-#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0)
-#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0
-#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1)
-#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1
-#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2)
-#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2
-#elif defined(__LITTLE_ENDIAN)
- u16 flags;
-#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0)
-#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0
-#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1)
-#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1
-#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2)
-#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2
- u16 toe_rss_bitmap;
-#endif
- u32 reserved1;
+struct eth_tx_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ __le16 total_pkt_bytes;
+ __le16 nbytes;
+ u8 reserved[4];
+};
+
+
+/*
+ * structure for easy accessibility to assembler
+ */
+struct eth_tx_bd_flags {
+ u8 as_bitfield;
+#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0)
+#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
+#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1)
+#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
+#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2)
+#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
+#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
+#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
+#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5)
+#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
+#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
+#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
+#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
+#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7
+};
+
+/*
+ * The eth Tx Buffer Descriptor
+ */
+struct eth_tx_start_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ __le16 nbd;
+ __le16 nbytes;
+ __le16 vlan_or_ethertype;
+ struct eth_tx_bd_flags bd_flags;
+ u8 general_data;
+#define ETH_TX_START_BD_HDR_NBDS (0xF<<0)
+#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
+#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
+#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
+#define ETH_TX_START_BD_RESREVED (0x1<<5)
+#define ETH_TX_START_BD_RESREVED_SHIFT 5
+#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
+#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
+};
+
+/*
+ * Tx parsing BD structure for ETH E1/E1h
+ */
+struct eth_tx_parse_bd_e1x {
+ u8 global_data;
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
+ u8 tcp_flags;
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
+ u8 ip_hlen_w;
+ s8 reserved;
+ __le16 total_hlen_w;
+ __le16 tcp_pseudo_csum;
+ __le16 lso_mss;
+ __le16 ip_id;
+ __le32 tcp_send_seq;
+};
+
+/*
+ * Tx parsing BD structure for ETH E2
+ */
+struct eth_tx_parse_bd_e2 {
+ __le16 dst_mac_addr_lo;
+ __le16 dst_mac_addr_mid;
+ __le16 dst_mac_addr_hi;
+ __le16 src_mac_addr_lo;
+ __le16 src_mac_addr_mid;
+ __le16 src_mac_addr_hi;
+ __le32 parsing_data;
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
+};
+
+/*
+ * The last BD in the BD memory will hold a pointer to the next BD memory
+ */
+struct eth_tx_next_bd {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ u8 reserved[8];
+};
+
+/*
+ * union for 4 Bd types
+ */
+union eth_tx_bd_types {
+ struct eth_tx_start_bd start_bd;
+ struct eth_tx_bd reg_bd;
+ struct eth_tx_parse_bd_e1x parse_bd_e1x;
+ struct eth_tx_parse_bd_e2 parse_bd_e2;
+ struct eth_tx_next_bd next_bd;
+};
+
+/*
+ * array of 13 bds as appears in the eth xstorm context
+ */
+struct eth_tx_bds_array {
+ union eth_tx_bd_types bds[13];
};
+
/*
- * parameters for eth update ramrod
+ * VLAN mode on TX BDs
*/
-struct eth_update_ramrod_data {
- struct tstorm_eth_function_common_config func_config;
- u8 indirectionTable[128];
- struct rss_update_config rss_config;
+enum eth_tx_vlan_type {
+ X_ETH_NO_VLAN,
+ X_ETH_OUTBAND_VLAN,
+ X_ETH_INBAND_VLAN,
+ X_ETH_FW_ADDED_VLAN,
+ MAX_ETH_TX_VLAN_TYPE
+};
+
+
+/*
+ * Ethernet VLAN filtering mode in E1x
+ */
+enum eth_vlan_filter_mode {
+ ETH_VLAN_FILTER_ANY_VLAN,
+ ETH_VLAN_FILTER_SPECIFIC_VLAN,
+ ETH_VLAN_FILTER_CLASSIFY,
+ MAX_ETH_VLAN_FILTER_MODE
};
@@ -2954,9 +4012,8 @@ struct eth_update_ramrod_data {
struct mac_configuration_hdr {
u8 length;
u8 offset;
- u16 client_id;
- u16 echo;
- u16 reserved1;
+ __le16 client_id;
+ __le32 echo;
};
/*
@@ -2981,8 +4038,8 @@ struct mac_configuration_entry {
#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6)
#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
- u16 reserved0;
- u32 clients_bit_vector;
+ __le16 reserved0;
+ __le32 clients_bit_vector;
};
/*
@@ -2995,6 +4052,36 @@ struct mac_configuration_cmd {
/*
+ * Set-MAC command type (in E1x)
+ */
+enum set_mac_action_type {
+ T_ETH_MAC_COMMAND_INVALIDATE,
+ T_ETH_MAC_COMMAND_SET,
+ MAX_SET_MAC_ACTION_TYPE
+};
+
+
+/*
+ * tpa update ramrod data
+ */
+struct tpa_update_ramrod_data {
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 client_id;
+ u8 max_tpa_queues;
+ u8 max_sges_for_packet;
+ u8 complete_on_both_clients;
+ __le16 reserved1;
+ __le16 sge_buff_size;
+ __le16 max_agg_size;
+ __le32 sge_page_base_lo;
+ __le32 sge_page_base_hi;
+ __le16 sge_pause_thr_low;
+ __le16 sge_pause_thr_high;
+};
+
+
+/*
* approximate-match multicast filtering for E1H per function in Tstorm
*/
struct tstorm_eth_approximate_match_multicast_filtering {
@@ -3003,35 +4090,50 @@ struct tstorm_eth_approximate_match_multicast_filtering {
/*
+ * Common configuration parameters per function in Tstorm
+ */
+struct tstorm_eth_function_common_config {
+ __le16 config_flags;
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8)
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8
+ u8 rss_result_mask;
+ u8 reserved1;
+ __le16 vlan_id[2];
+};
+
+
+/*
* MAC filtering configuration parameters per port in Tstorm
*/
struct tstorm_eth_mac_filter_config {
- u32 ucast_drop_all;
- u32 ucast_accept_all;
- u32 mcast_drop_all;
- u32 mcast_accept_all;
- u32 bcast_drop_all;
- u32 bcast_accept_all;
- u32 vlan_filter[2];
- u32 unmatched_unicast;
- u32 reserved;
+ __le32 ucast_drop_all;
+ __le32 ucast_accept_all;
+ __le32 mcast_drop_all;
+ __le32 mcast_accept_all;
+ __le32 bcast_accept_all;
+ __le32 vlan_filter[2];
+ __le32 unmatched_unicast;
};
/*
- * common flag to indicate existence of TPA.
+ * tx only queue init ramrod data
*/
-struct tstorm_eth_tpa_exist {
-#if defined(__BIG_ENDIAN)
- u16 reserved1;
- u8 reserved0;
- u8 tpa_exist;
-#elif defined(__LITTLE_ENDIAN)
- u8 tpa_exist;
- u8 reserved0;
- u16 reserved1;
-#endif
- u32 reserved2;
+struct tx_queue_init_ramrod_data {
+ struct client_init_general_data general;
+ struct client_init_tx_data tx;
};
@@ -3061,10 +4163,8 @@ struct ustorm_eth_rx_producers {
*/
struct cfc_del_event_data {
u32 cid;
- u8 error;
- u8 reserved0;
- u16 reserved1;
- u32 reserved2;
+ u32 reserved0;
+ u32 reserved1;
};
@@ -3072,22 +4172,18 @@ struct cfc_del_event_data {
* per-port SAFC demo variables
*/
struct cmng_flags_per_port {
- u8 con_number[NUM_OF_PROTOCOLS];
u32 cmng_enables;
#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0)
#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0
#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1)
#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL (0x1<<2)
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL_SHIFT 2
-#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL (0x1<<3)
-#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4)
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<5)
-#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 5
-#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x3FFFFFF<<6)
-#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 6
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3
+#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4)
+#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4
+ u32 __reserved1;
};
@@ -3106,6 +4202,7 @@ struct fairness_vars_per_port {
u32 upper_bound;
u32 fair_threshold;
u32 fairness_timeout;
+ u32 reserved0;
};
/*
@@ -3122,65 +4219,65 @@ struct safc_struct_per_port {
u16 __reserved1;
#endif
u8 cos_to_traffic_types[MAX_COS_NUMBER];
- u32 __reserved2;
u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
};
/*
- * per-port PFC variables
+ * Per-port congestion management variables
*/
-struct pfc_struct_per_port {
- u8 priority_to_traffic_types[MAX_PFC_PRIORITIES];
-#if defined(__BIG_ENDIAN)
- u16 pfc_pause_quanta_in_nanosec;
- u8 __reserved0;
- u8 priority_non_pausable_mask;
-#elif defined(__LITTLE_ENDIAN)
- u8 priority_non_pausable_mask;
- u8 __reserved0;
- u16 pfc_pause_quanta_in_nanosec;
-#endif
+struct cmng_struct_per_port {
+ struct rate_shaping_vars_per_port rs_vars;
+ struct fairness_vars_per_port fair_vars;
+ struct safc_struct_per_port safc_vars;
+ struct cmng_flags_per_port flags;
};
+
/*
- * Priority and cos
+ * Protocol-common command ID for slow path elements
*/
-struct priority_cos {
-#if defined(__BIG_ENDIAN)
- u16 reserved1;
- u8 cos;
- u8 priority;
-#elif defined(__LITTLE_ENDIAN)
- u8 priority;
- u8 cos;
- u16 reserved1;
-#endif
- u32 reserved2;
+enum common_spqe_cmd_id {
+ RAMROD_CMD_ID_COMMON_UNUSED,
+ RAMROD_CMD_ID_COMMON_FUNCTION_START,
+ RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
+ RAMROD_CMD_ID_COMMON_CFC_DEL,
+ RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
+ RAMROD_CMD_ID_COMMON_STAT_QUERY,
+ RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
+ RAMROD_CMD_ID_COMMON_START_TRAFFIC,
+ RAMROD_CMD_ID_COMMON_RESERVED1,
+ RAMROD_CMD_ID_COMMON_RESERVED2,
+ MAX_COMMON_SPQE_CMD_ID
};
+
/*
- * Per-port congestion management variables
+ * Per-protocol connection types
*/
-struct cmng_struct_per_port {
- struct rate_shaping_vars_per_port rs_vars;
- struct fairness_vars_per_port fair_vars;
- struct safc_struct_per_port safc_vars;
- struct pfc_struct_per_port pfc_vars;
-#if defined(__BIG_ENDIAN)
- u16 __reserved1;
- u8 dcb_enabled;
- u8 llfc_mode;
-#elif defined(__LITTLE_ENDIAN)
- u8 llfc_mode;
- u8 dcb_enabled;
- u16 __reserved1;
-#endif
- struct priority_cos
- traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
- struct cmng_flags_per_port flags;
+enum connection_type {
+ ETH_CONNECTION_TYPE,
+ TOE_CONNECTION_TYPE,
+ RDMA_CONNECTION_TYPE,
+ ISCSI_CONNECTION_TYPE,
+ FCOE_CONNECTION_TYPE,
+ RESERVED_CONNECTION_TYPE_0,
+ RESERVED_CONNECTION_TYPE_1,
+ RESERVED_CONNECTION_TYPE_2,
+ NONE_CONNECTION_TYPE,
+ MAX_CONNECTION_TYPE
};
+/*
+ * Cos modes
+ */
+enum cos_mode {
+ OVERRIDE_COS,
+ STATIC_COS,
+ FW_WRR,
+ MAX_COS_MODE
+};
+
/*
* Dynamic HC counters set by the driver
@@ -3197,126 +4294,174 @@ struct cstorm_queue_zone_data {
struct regpair reserved[2];
};
+
/*
- * Dynamic host coalescing init parameters
+ * Vf-PF channel data in cstorm ram (non-triggered zone)
*/
-struct dynamic_hc_config {
- u32 threshold[3];
- u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
- u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
- u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES];
- u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES];
- u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
+struct vf_pf_channel_zone_data {
+ u32 msg_addr_lo;
+ u32 msg_addr_hi;
};
-
/*
- * Protocol-common statistics collected by the Xstorm (per client)
+ * zone for VF non-triggered data
*/
-struct xstorm_per_client_stats {
- __le32 reserved0;
- __le32 unicast_pkts_sent;
- struct regpair unicast_bytes_sent;
- struct regpair multicast_bytes_sent;
- __le32 multicast_pkts_sent;
- __le32 broadcast_pkts_sent;
- struct regpair broadcast_bytes_sent;
- __le16 stats_counter;
- __le16 reserved1;
- __le32 reserved2;
+struct non_trigger_vf_zone {
+ struct vf_pf_channel_zone_data vf_pf_channel;
};
/*
- * Common statistics collected by the Xstorm (per port)
+ * Vf-PF channel trigger zone in cstorm ram
*/
-struct xstorm_common_stats {
- struct xstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
+struct vf_pf_channel_zone_trigger {
+ u8 addr_valid;
};
/*
- * Protocol-common statistics collected by the Tstorm (per port)
+ * zone that triggers the in-bound interrupt
*/
-struct tstorm_per_port_stats {
- __le32 mac_filter_discard;
- __le32 xxoverflow_discard;
- __le32 brb_truncate_discard;
- __le32 mac_discard;
+struct trigger_vf_zone {
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 reserved0;
+ struct vf_pf_channel_zone_trigger vf_pf_channel;
+#elif defined(__LITTLE_ENDIAN)
+ struct vf_pf_channel_zone_trigger vf_pf_channel;
+ u8 reserved0;
+ u16 reserved1;
+#endif
+ u32 reserved2;
};
/*
- * Protocol-common statistics collected by the Tstorm (per client)
+ * zone B per-VF data
*/
-struct tstorm_per_client_stats {
- struct regpair rcv_unicast_bytes;
- struct regpair rcv_broadcast_bytes;
- struct regpair rcv_multicast_bytes;
- struct regpair rcv_error_bytes;
- __le32 checksum_discard;
- __le32 packets_too_big_discard;
- __le32 rcv_unicast_pkts;
- __le32 rcv_broadcast_pkts;
- __le32 rcv_multicast_pkts;
- __le32 no_buff_discard;
- __le32 ttl0_discard;
- __le16 stats_counter;
- __le16 reserved0;
+struct cstorm_vf_zone_data {
+ struct non_trigger_vf_zone non_trigger;
+ struct trigger_vf_zone trigger;
};
+
/*
- * Protocol-common statistics collected by the Tstorm
+ * Dynamic host coalescing init parameters, per state machine
*/
-struct tstorm_common_stats {
- struct tstorm_per_port_stats port_statistics;
- struct tstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
+struct dynamic_hc_sm_config {
+ u32 threshold[3];
+ u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES];
+ u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
};
/*
- * Protocol-common statistics collected by the Ustorm (per client)
+ * Dynamic host coalescing init parameters
*/
-struct ustorm_per_client_stats {
- struct regpair ucast_no_buff_bytes;
- struct regpair mcast_no_buff_bytes;
- struct regpair bcast_no_buff_bytes;
- __le32 ucast_no_buff_pkts;
- __le32 mcast_no_buff_pkts;
- __le32 bcast_no_buff_pkts;
- __le16 stats_counter;
- __le16 reserved0;
+struct dynamic_hc_config {
+ struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM];
+};
+
+
+struct e2_integ_data {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+ u8 cos;
+ u8 voq;
+ u8 pbf_queue;
+#elif defined(__LITTLE_ENDIAN)
+ u8 pbf_queue;
+ u8 voq;
+ u8 cos;
+ u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 reserved3;
+ u8 reserved2;
+ u8 ramEn;
+#elif defined(__LITTLE_ENDIAN)
+ u8 ramEn;
+ u8 reserved2;
+ u16 reserved3;
+#endif
};
+
/*
- * Protocol-common statistics collected by the Ustorm
+ * set mac event data
*/
-struct ustorm_common_stats {
- struct ustorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
+struct eth_event_data {
+ u32 echo;
+ u32 reserved0;
+ u32 reserved1;
};
+
/*
- * Eth statistics query structure for the eth_stats_query ramrod
+ * pf-vf event data
*/
-struct eth_stats_query {
- struct xstorm_common_stats xstorm_common;
- struct tstorm_common_stats tstorm_common;
- struct ustorm_common_stats ustorm_common;
+struct vf_pf_event_data {
+ u8 vf_id;
+ u8 reserved0;
+ u16 reserved1;
+ u32 msg_addr_lo;
+ u32 msg_addr_hi;
};
+/*
+ * VF FLR event data
+ */
+struct vf_flr_event_data {
+ u8 vf_id;
+ u8 reserved0;
+ u16 reserved1;
+ u32 reserved2;
+ u32 reserved3;
+};
/*
- * set mac event data
+ * malicious VF event data
*/
-struct set_mac_event_data {
- u16 echo;
- u16 reserved0;
- u32 reserved1;
+struct malicious_vf_event_data {
+ u8 vf_id;
+ u8 reserved0;
+ u16 reserved1;
u32 reserved2;
+ u32 reserved3;
};
/*
* union for all event ring message types
*/
union event_data {
- struct set_mac_event_data set_mac_event;
+ struct vf_pf_event_data vf_pf_event;
+ struct eth_event_data eth_event;
struct cfc_del_event_data cfc_del_event;
+ struct vf_flr_event_data vf_flr_event;
+ struct malicious_vf_event_data malicious_vf_event;
};
@@ -3343,7 +4488,7 @@ struct event_ring_data {
*/
struct event_ring_msg {
u8 opcode;
- u8 reserved0;
+ u8 error;
u16 reserved1;
union event_data data;
};
@@ -3366,32 +4511,82 @@ union event_ring_elem {
/*
+ * Common event ring opcodes
+ */
+enum event_ring_opcode {
+ EVENT_RING_OPCODE_VF_PF_CHANNEL,
+ EVENT_RING_OPCODE_FUNCTION_START,
+ EVENT_RING_OPCODE_FUNCTION_STOP,
+ EVENT_RING_OPCODE_CFC_DEL,
+ EVENT_RING_OPCODE_CFC_DEL_WB,
+ EVENT_RING_OPCODE_STAT_QUERY,
+ EVENT_RING_OPCODE_STOP_TRAFFIC,
+ EVENT_RING_OPCODE_START_TRAFFIC,
+ EVENT_RING_OPCODE_VF_FLR,
+ EVENT_RING_OPCODE_MALICIOUS_VF,
+ EVENT_RING_OPCODE_FORWARD_SETUP,
+ EVENT_RING_OPCODE_RSS_UPDATE_RULES,
+ EVENT_RING_OPCODE_RESERVED1,
+ EVENT_RING_OPCODE_RESERVED2,
+ EVENT_RING_OPCODE_SET_MAC,
+ EVENT_RING_OPCODE_CLASSIFICATION_RULES,
+ EVENT_RING_OPCODE_FILTERS_RULES,
+ EVENT_RING_OPCODE_MULTICAST_RULES,
+ MAX_EVENT_RING_OPCODE
+};
+
+
+/*
+ * Modes for fairness algorithm
+ */
+enum fairness_mode {
+ FAIRNESS_COS_WRR_MODE,
+ FAIRNESS_COS_ETS_MODE,
+ MAX_FAIRNESS_MODE
+};
+
+
+/*
* per-vnic fairness variables
*/
struct fairness_vars_per_vn {
u32 cos_credit_delta[MAX_COS_NUMBER];
- u32 protocol_credit_delta[NUM_OF_PROTOCOLS];
u32 vn_credit_delta;
u32 __reserved0;
};
/*
+ * Priority and cos
+ */
+struct priority_cos {
+ u8 priority;
+ u8 cos;
+ __le16 reserved1;
+};
+
+/*
* The data for flow control configuration
*/
struct flow_control_configuration {
- struct priority_cos
- traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
-#if defined(__BIG_ENDIAN)
- u16 reserved1;
- u8 dcb_version;
- u8 dcb_enabled;
-#elif defined(__LITTLE_ENDIAN)
+ struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
u8 dcb_enabled;
u8 dcb_version;
- u16 reserved1;
-#endif
- u32 reserved2;
+ u8 dont_add_pri_0_en;
+ u8 reserved1;
+ __le32 reserved2;
+};
+
+
+/*
+ *
+ */
+struct function_start_data {
+ __le16 function_mode;
+ __le16 sd_vlan_tag;
+ u16 reserved;
+ u8 path_id;
+ u8 network_cos_mode;
};
@@ -3504,13 +4699,13 @@ struct hc_sb_data {
struct pci_entity p_func;
#if defined(__BIG_ENDIAN)
u8 rsrv0;
+ u8 state;
u8 dhc_qzone_id;
- u8 __dynamic_hc_level;
u8 same_igu_sb_1b;
#elif defined(__LITTLE_ENDIAN)
u8 same_igu_sb_1b;
- u8 __dynamic_hc_level;
u8 dhc_qzone_id;
+ u8 state;
u8 rsrv0;
#endif
struct regpair rsrv1[2];
@@ -3518,18 +4713,30 @@ struct hc_sb_data {
/*
+ * Segment types for host coaslescing
+ */
+enum hc_segment {
+ HC_REGULAR_SEGMENT,
+ HC_DEFAULT_SEGMENT,
+ MAX_HC_SEGMENT
+};
+
+
+/*
* The fast-path status block meta-data
*/
struct hc_sp_status_block_data {
struct regpair host_sb_addr;
#if defined(__BIG_ENDIAN)
- u16 rsrv;
+ u8 rsrv1;
+ u8 state;
u8 igu_seg_id;
u8 igu_sb_id;
#elif defined(__LITTLE_ENDIAN)
u8 igu_sb_id;
u8 igu_seg_id;
- u16 rsrv;
+ u8 state;
+ u8 rsrv1;
#endif
struct pci_entity p_func;
};
@@ -3554,6 +4761,129 @@ struct hc_status_block_data_e2 {
/*
+ * IGU block operartion modes (in Everest2)
+ */
+enum igu_mode {
+ HC_IGU_BC_MODE,
+ HC_IGU_NBC_MODE,
+ MAX_IGU_MODE
+};
+
+
+/*
+ * IP versions
+ */
+enum ip_ver {
+ IP_V4,
+ IP_V6,
+ MAX_IP_VER
+};
+
+
+/*
+ * Multi-function modes
+ */
+enum mf_mode {
+ SINGLE_FUNCTION,
+ MULTI_FUNCTION_SD,
+ MULTI_FUNCTION_SI,
+ MULTI_FUNCTION_RESERVED,
+ MAX_MF_MODE
+};
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per pf)
+ */
+struct tstorm_per_pf_stats {
+ struct regpair rcv_error_bytes;
+};
+
+/*
+ *
+ */
+struct per_pf_stats {
+ struct tstorm_per_pf_stats tstorm_pf_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per port)
+ */
+struct tstorm_per_port_stats {
+ __le32 mac_discard;
+ __le32 mac_filter_discard;
+ __le32 brb_truncate_discard;
+ __le32 mf_tag_discard;
+ __le32 packet_drop;
+ __le32 reserved;
+};
+
+/*
+ *
+ */
+struct per_port_stats {
+ struct tstorm_per_port_stats tstorm_port_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per client)
+ */
+struct tstorm_per_queue_stats {
+ struct regpair rcv_ucast_bytes;
+ __le32 rcv_ucast_pkts;
+ __le32 checksum_discard;
+ struct regpair rcv_bcast_bytes;
+ __le32 rcv_bcast_pkts;
+ __le32 pkts_too_big_discard;
+ struct regpair rcv_mcast_bytes;
+ __le32 rcv_mcast_pkts;
+ __le32 ttl0_discard;
+ __le16 no_buff_discard;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+/*
+ * Protocol-common statistics collected by the Ustorm (per client)
+ */
+struct ustorm_per_queue_stats {
+ struct regpair ucast_no_buff_bytes;
+ struct regpair mcast_no_buff_bytes;
+ struct regpair bcast_no_buff_bytes;
+ __le32 ucast_no_buff_pkts;
+ __le32 mcast_no_buff_pkts;
+ __le32 bcast_no_buff_pkts;
+ __le32 coalesced_pkts;
+ struct regpair coalesced_bytes;
+ __le32 coalesced_events;
+ __le32 coalesced_aborts;
+};
+
+/*
+ * Protocol-common statistics collected by the Xstorm (per client)
+ */
+struct xstorm_per_queue_stats {
+ struct regpair ucast_bytes_sent;
+ struct regpair mcast_bytes_sent;
+ struct regpair bcast_bytes_sent;
+ __le32 ucast_pkts_sent;
+ __le32 mcast_pkts_sent;
+ __le32 bcast_pkts_sent;
+ __le32 error_drop_pkts;
+};
+
+/*
+ *
+ */
+struct per_queue_stats {
+ struct tstorm_per_queue_stats tstorm_queue_statistics;
+ struct ustorm_per_queue_stats ustorm_queue_statistics;
+ struct xstorm_per_queue_stats xstorm_queue_statistics;
+};
+
+
+/*
* FW version stored in first line of pram
*/
struct pram_fw_version {
@@ -3582,7 +4912,6 @@ union protocol_common_specific_data {
u8 protocol_data[8];
struct regpair phy_address;
struct regpair mac_config_addr;
- struct common_query_ramrod_data query_ramrod_data;
};
/*
@@ -3613,7 +4942,6 @@ struct rate_shaping_counter {
* per-vnic rate shaping variables
*/
struct rate_shaping_vars_per_vn {
- struct rate_shaping_counter protocol_counters[NUM_OF_PROTOCOLS];
struct rate_shaping_counter vn_counter;
};
@@ -3628,39 +4956,100 @@ struct slow_path_element {
/*
- * eth/toe flags that indicate if to query
+ * Protocol-common statistics counter
*/
-struct stats_indication_flags {
- u32 collect_eth;
- u32 collect_toe;
+struct stats_counter {
+ __le16 xstats_counter;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le16 tstats_counter;
+ __le16 reserved2;
+ __le32 reserved3;
+ __le16 ustats_counter;
+ __le16 reserved4;
+ __le32 reserved5;
+ __le16 cstats_counter;
+ __le16 reserved6;
+ __le32 reserved7;
};
/*
- * per-port PFC variables
+ *
*/
-struct storm_pfc_struct_per_port {
-#if defined(__BIG_ENDIAN)
- u16 mid_mac_addr;
- u16 msb_mac_addr;
-#elif defined(__LITTLE_ENDIAN)
- u16 msb_mac_addr;
- u16 mid_mac_addr;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 pfc_pause_quanta_in_nanosec;
- u16 lsb_mac_addr;
-#elif defined(__LITTLE_ENDIAN)
- u16 lsb_mac_addr;
- u16 pfc_pause_quanta_in_nanosec;
-#endif
+struct stats_query_entry {
+ u8 kind;
+ u8 index;
+ __le16 funcID;
+ __le32 reserved;
+ struct regpair address;
};
/*
- * Per-port congestion management variables
+ * statistic command
*/
-struct storm_cmng_struct_per_port {
- struct storm_pfc_struct_per_port pfc_vars;
+struct stats_query_cmd_group {
+ struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+
+/*
+ * statistic command header
+ */
+struct stats_query_header {
+ u8 cmd_num;
+ u8 reserved0;
+ __le16 drv_stats_counter;
+ __le32 reserved1;
+ struct regpair stats_counters_addrs;
+};
+
+
+/*
+ * Types of statistcis query entry
+ */
+enum stats_query_type {
+ STATS_TYPE_QUEUE,
+ STATS_TYPE_PORT,
+ STATS_TYPE_PF,
+ STATS_TYPE_TOE,
+ STATS_TYPE_FCOE,
+ MAX_STATS_QUERY_TYPE
+};
+
+
+/*
+ * Indicate of the function status block state
+ */
+enum status_block_state {
+ SB_DISABLED,
+ SB_ENABLED,
+ SB_CLEANED,
+ MAX_STATUS_BLOCK_STATE
+};
+
+
+/*
+ * Storm IDs (including attentions for IGU related enums)
+ */
+enum storm_id {
+ USTORM_ID,
+ CSTORM_ID,
+ XSTORM_ID,
+ TSTORM_ID,
+ ATTENTION_ID,
+ MAX_STORM_ID
+};
+
+
+/*
+ * Taffic types used in ETS and flow control algorithms
+ */
+enum traffic_type {
+ LLFC_TRAFFIC_TYPE_NW,
+ LLFC_TRAFFIC_TYPE_FCOE,
+ LLFC_TRAFFIC_TYPE_ISCSI,
+ MAX_TRAFFIC_TYPE
};
@@ -3715,6 +5104,16 @@ struct vf_pf_channel_data {
/*
+ * State of VF-PF channel
+ */
+enum vf_pf_channel_state {
+ VF_PF_CHANNEL_STATE_READY,
+ VF_PF_CHANNEL_STATE_WAITING_FOR_ACK,
+ MAX_VF_PF_CHANNEL_STATE
+};
+
+
+/*
* zone A per-queue data
*/
struct xstorm_queue_zone_data {
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index d5399206f66..4d748e77d1a 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -15,98 +15,34 @@
#ifndef BNX2X_INIT_H
#define BNX2X_INIT_H
-/* RAM0 size in bytes */
-#define STORM_INTMEM_SIZE_E1 0x5800
-#define STORM_INTMEM_SIZE_E1H 0x10000
-#define STORM_INTMEM_SIZE(bp) ((CHIP_IS_E1(bp) ? STORM_INTMEM_SIZE_E1 : \
- STORM_INTMEM_SIZE_E1H) / 4)
-
-
/* Init operation types and structures */
-/* Common for both E1 and E1H */
-#define OP_RD 0x1 /* read single register */
-#define OP_WR 0x2 /* write single register */
-#define OP_IW 0x3 /* write single register using mailbox */
-#define OP_SW 0x4 /* copy a string to the device */
-#define OP_SI 0x5 /* copy a string using mailbox */
-#define OP_ZR 0x6 /* clear memory */
-#define OP_ZP 0x7 /* unzip then copy with DMAE */
-#define OP_WR_64 0x8 /* write 64 bit pattern */
-#define OP_WB 0x9 /* copy a string using DMAE */
-
-/* FPGA and EMUL specific operations */
-#define OP_WR_EMUL 0xa /* write single register on Emulation */
-#define OP_WR_FPGA 0xb /* write single register on FPGA */
-#define OP_WR_ASIC 0xc /* write single register on ASIC */
-
-/* Init stages */
-/* Never reorder stages !!! */
-#define COMMON_STAGE 0
-#define PORT0_STAGE 1
-#define PORT1_STAGE 2
-#define FUNC0_STAGE 3
-#define FUNC1_STAGE 4
-#define FUNC2_STAGE 5
-#define FUNC3_STAGE 6
-#define FUNC4_STAGE 7
-#define FUNC5_STAGE 8
-#define FUNC6_STAGE 9
-#define FUNC7_STAGE 10
-#define STAGE_IDX_MAX 11
-
-#define STAGE_START 0
-#define STAGE_END 1
-
-
-/* Indices of blocks */
-#define PRS_BLOCK 0
-#define SRCH_BLOCK 1
-#define TSDM_BLOCK 2
-#define TCM_BLOCK 3
-#define BRB1_BLOCK 4
-#define TSEM_BLOCK 5
-#define PXPCS_BLOCK 6
-#define EMAC0_BLOCK 7
-#define EMAC1_BLOCK 8
-#define DBU_BLOCK 9
-#define MISC_BLOCK 10
-#define DBG_BLOCK 11
-#define NIG_BLOCK 12
-#define MCP_BLOCK 13
-#define UPB_BLOCK 14
-#define CSDM_BLOCK 15
-#define USDM_BLOCK 16
-#define CCM_BLOCK 17
-#define UCM_BLOCK 18
-#define USEM_BLOCK 19
-#define CSEM_BLOCK 20
-#define XPB_BLOCK 21
-#define DQ_BLOCK 22
-#define TIMERS_BLOCK 23
-#define XSDM_BLOCK 24
-#define QM_BLOCK 25
-#define PBF_BLOCK 26
-#define XCM_BLOCK 27
-#define XSEM_BLOCK 28
-#define CDU_BLOCK 29
-#define DMAE_BLOCK 30
-#define PXP_BLOCK 31
-#define CFC_BLOCK 32
-#define HC_BLOCK 33
-#define PXP2_BLOCK 34
-#define MISC_AEU_BLOCK 35
-#define PGLUE_B_BLOCK 36
-#define IGU_BLOCK 37
-#define ATC_BLOCK 38
-#define QM_4PORT_BLOCK 39
-#define XSEM_4PORT_BLOCK 40
+enum {
+ OP_RD = 0x1, /* read a single register */
+ OP_WR, /* write a single register */
+ OP_SW, /* copy a string to the device */
+ OP_ZR, /* clear memory */
+ OP_ZP, /* unzip then copy with DMAE */
+ OP_WR_64, /* write 64 bit pattern */
+ OP_WB, /* copy a string using DMAE */
+ OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */
+ /* Skip the following ops if all of the init modes don't match */
+ OP_IF_MODE_OR,
+ /* Skip the following ops if any of the init modes don't match */
+ OP_IF_MODE_AND,
+ OP_MAX
+};
+enum {
+ STAGE_START,
+ STAGE_END,
+};
/* Returns the index of start or end of a specific block stage in ops array*/
#define BLOCK_OPS_IDX(block, stage, end) \
- (2*(((block)*STAGE_IDX_MAX) + (stage)) + (end))
+ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+/* structs for the various opcodes */
struct raw_op {
u32 op:8;
u32 offset:24;
@@ -116,7 +52,7 @@ struct raw_op {
struct op_read {
u32 op:8;
u32 offset:24;
- u32 pad;
+ u32 val;
};
struct op_write {
@@ -125,15 +61,15 @@ struct op_write {
u32 val;
};
-struct op_string_write {
+struct op_arr_write {
u32 op:8;
u32 offset:24;
-#ifdef __LITTLE_ENDIAN
- u16 data_off;
- u16 data_len;
-#else /* __BIG_ENDIAN */
+#ifdef __BIG_ENDIAN
u16 data_len;
u16 data_off;
+#else /* __LITTLE_ENDIAN */
+ u16 data_off;
+ u16 data_len;
#endif
};
@@ -143,14 +79,209 @@ struct op_zero {
u32 len;
};
+struct op_if_mode {
+ u32 op:8;
+ u32 cmd_offset:24;
+ u32 mode_bit_map;
+};
+
+
union init_op {
struct op_read read;
struct op_write write;
- struct op_string_write str_wr;
+ struct op_arr_write arr_wr;
struct op_zero zero;
struct raw_op raw;
+ struct op_if_mode if_mode;
+};
+
+
+/* Init Phases */
+enum {
+ PHASE_COMMON,
+ PHASE_PORT0,
+ PHASE_PORT1,
+ PHASE_PF0,
+ PHASE_PF1,
+ PHASE_PF2,
+ PHASE_PF3,
+ PHASE_PF4,
+ PHASE_PF5,
+ PHASE_PF6,
+ PHASE_PF7,
+ NUM_OF_INIT_PHASES
};
+/* Init Modes */
+enum {
+ MODE_ASIC = 0x00000001,
+ MODE_FPGA = 0x00000002,
+ MODE_EMUL = 0x00000004,
+ MODE_E2 = 0x00000008,
+ MODE_E3 = 0x00000010,
+ MODE_PORT2 = 0x00000020,
+ MODE_PORT4 = 0x00000040,
+ MODE_SF = 0x00000080,
+ MODE_MF = 0x00000100,
+ MODE_MF_SD = 0x00000200,
+ MODE_MF_SI = 0x00000400,
+ MODE_MF_NIV = 0x00000800,
+ MODE_E3_A0 = 0x00001000,
+ MODE_E3_B0 = 0x00002000,
+ MODE_COS3 = 0x00004000,
+ MODE_COS6 = 0x00008000,
+ MODE_LITTLE_ENDIAN = 0x00010000,
+ MODE_BIG_ENDIAN = 0x00020000,
+};
+
+/* Init Blocks */
+enum {
+ BLOCK_ATC,
+ BLOCK_BRB1,
+ BLOCK_CCM,
+ BLOCK_CDU,
+ BLOCK_CFC,
+ BLOCK_CSDM,
+ BLOCK_CSEM,
+ BLOCK_DBG,
+ BLOCK_DMAE,
+ BLOCK_DORQ,
+ BLOCK_HC,
+ BLOCK_IGU,
+ BLOCK_MISC,
+ BLOCK_NIG,
+ BLOCK_PBF,
+ BLOCK_PGLUE_B,
+ BLOCK_PRS,
+ BLOCK_PXP2,
+ BLOCK_PXP,
+ BLOCK_QM,
+ BLOCK_SRC,
+ BLOCK_TCM,
+ BLOCK_TM,
+ BLOCK_TSDM,
+ BLOCK_TSEM,
+ BLOCK_UCM,
+ BLOCK_UPB,
+ BLOCK_USDM,
+ BLOCK_USEM,
+ BLOCK_XCM,
+ BLOCK_XPB,
+ BLOCK_XSDM,
+ BLOCK_XSEM,
+ BLOCK_MISC_AEU,
+ NUM_OF_INIT_BLOCKS
+};
+
+/* QM queue numbers */
+#define BNX2X_ETH_Q 0
+#define BNX2X_TOE_Q 3
+#define BNX2X_TOE_ACK_Q 6
+#define BNX2X_ISCSI_Q 9
+#define BNX2X_ISCSI_ACK_Q 11
+#define BNX2X_FCOE_Q 10
+
+/* Vnics per mode */
+#define BNX2X_PORT2_MODE_NUM_VNICS 4
+#define BNX2X_PORT4_MODE_NUM_VNICS 2
+
+/* COS offset for port1 in E3 B0 4port mode */
+#define BNX2X_E3B0_PORT1_COS_OFFSET 3
+
+/* QM Register addresses */
+#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
+ (QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
+#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
+ (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
+#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
+ (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
+
+/* extracts the QM queue number for the specified port and vnic */
+#define BNX2X_PF_Q_NUM(q_num, port, vnic)\
+ ((((port) << 1) | (vnic)) * 16 + (q_num))
+
+
+/* Maps the specified queue to the specified COS */
+static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
+{
+ /* find current COS mapping */
+ u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
+
+ /* check if queue->COS mapping has changed */
+ if (curr_cos != new_cos) {
+ u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
+ u32 reg_addr, reg_bit_map, vnic;
+
+ /* update parameters for 4port mode */
+ if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
+ num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
+ if (BP_PORT(bp)) {
+ curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+ new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+ }
+ }
+
+ /* change queue mapping for each VNIC */
+ for (vnic = 0; vnic < num_vnics; vnic++) {
+ u32 pf_q_num =
+ BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
+ u32 q_bit_map = 1 << (pf_q_num & 0x1f);
+
+ /* overwrite queue->VOQ mapping */
+ REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
+
+ /* clear queue bit from current COS bit map */
+ reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
+ reg_bit_map = REG_RD(bp, reg_addr);
+ REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
+
+ /* set queue bit in new COS bit map */
+ reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
+ reg_bit_map = REG_RD(bp, reg_addr);
+ REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
+
+ /* set/clear queue bit in command-queue bit map
+ (E2/E3A0 only, valid COS values are 0/1) */
+ if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
+ reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
+ reg_bit_map = REG_RD(bp, reg_addr);
+ q_bit_map = 1 << (2 * (pf_q_num & 0xf));
+ reg_bit_map = new_cos ?
+ (reg_bit_map | q_bit_map) :
+ (reg_bit_map & (~q_bit_map));
+ REG_WR(bp, reg_addr, reg_bit_map);
+ }
+ }
+ }
+}
+
+/* Configures the QM according to the specified per-traffic-type COSes */
+static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
+ struct priority_cos *traffic_cos)
+{
+ bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
+ bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+ bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+ if (mode != STATIC_COS) {
+ /* required only in backward compatible COS mode */
+ bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
+ traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+ }
+}
+
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+ (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
#define INITOP_SET 0 /* set the HW directly */
#define INITOP_CLEAR 1 /* clear the HW directly */
#define INITOP_INIT 2 /* set the init-value array */
@@ -195,25 +326,25 @@ struct src_ent {
/****************************************************************************
* Parity configuration
****************************************************************************/
-#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
{ \
block##_REG_##block##_PRTY_MASK, \
block##_REG_##block##_PRTY_STS_CLR, \
- en_mask, {m1, m1h, m2}, #block \
+ en_mask, {m1, m1h, m2, m3}, #block \
}
-#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
{ \
block##_REG_##block##_PRTY_MASK_0, \
block##_REG_##block##_PRTY_STS_CLR_0, \
- en_mask, {m1, m1h, m2}, #block"_0" \
+ en_mask, {m1, m1h, m2, m3}, #block"_0" \
}
-#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
{ \
block##_REG_##block##_PRTY_MASK_1, \
block##_REG_##block##_PRTY_STS_CLR_1, \
- en_mask, {m1, m1h, m2}, #block"_1" \
+ en_mask, {m1, m1h, m2, m3}, #block"_1" \
}
static const struct {
@@ -224,6 +355,7 @@ static const struct {
u32 e1; /* 57710 */
u32 e1h; /* 57711 */
u32 e2; /* 57712 */
+ u32 e3; /* 578xx */
} reg_mask; /* Register mask (all valid bits) */
char name[7]; /* Block's longest name is 6 characters long
* (name + suffix)
@@ -241,39 +373,56 @@ static const struct {
/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
* want to handle "system kill" flow at the moment.
*/
- BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
- BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
- BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
- BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
- BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
- BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
- BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
- BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
+ BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
+ 0x7ffffff),
+ BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
+ BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
+ BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
+ BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
+ BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff),
+ BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
+ BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
+ BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
- GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
- {0xf, 0xf, 0xf}, "UPB"},
+ GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
+ {0xf, 0xf, 0xf, 0xf}, "UPB"},
{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
- {0xf, 0xf, 0xf}, "XPB"},
- BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
- BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
- BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
- BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
- BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
- BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
- BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
- BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
- BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
- BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
- BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
- BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
- BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
- BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
- BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
- BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
- BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
- BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
- BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
+ {0xf, 0xf, 0xf, 0xf}, "XPB"},
+ BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
+ BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
+ BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
+ BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
+ BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff),
+ BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
+ BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
+ BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+ BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+ BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
+ BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
+ BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+ BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff),
+ BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
};
@@ -324,8 +473,10 @@ static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
return bnx2x_blocks_parity_data[idx].reg_mask.e1;
else if (CHIP_IS_E1H(bp))
return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
- else
+ else if (CHIP_IS_E2(bp))
return bnx2x_blocks_parity_data[idx].reg_mask.e2;
+ else /* CHIP_IS_E3 */
+ return bnx2x_blocks_parity_data[idx].reg_mask.e3;
}
static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index aafd0232393..7ec1724753a 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -15,13 +15,39 @@
#ifndef BNX2X_INIT_OPS_H
#define BNX2X_INIT_OPS_H
+
+#ifndef BP_ILT
+#define BP_ILT(bp) NULL
+#endif
+
+#ifndef BP_FUNC
+#define BP_FUNC(bp) 0
+#endif
+
+#ifndef BP_PORT
+#define BP_PORT(bp) 0
+#endif
+
+#ifndef BNX2X_ILT_FREE
+#define BNX2X_ILT_FREE(x, y, sz)
+#endif
+
+#ifndef BNX2X_ILT_ZALLOC
+#define BNX2X_ILT_ZALLOC(x, y, sz)
+#endif
+
+#ifndef ILOG2
+#define ILOG2(x) x
+#endif
+
static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
-static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
- u32 addr, u32 len);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
+ dma_addr_t phys_addr, u32 addr,
+ u32 len);
-static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
- u32 len)
+static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len)
{
u32 i;
@@ -29,24 +55,32 @@ static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
REG_WR(bp, addr + i*4, data[i]);
}
-static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
- u32 len)
+static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len)
{
u32 i;
for (i = 0; i < len; i++)
- REG_WR_IND(bp, addr + i*4, data[i]);
+ bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
}
-static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
+static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
+ u8 wb)
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+ else if (wb)
+ /*
+ * Wide bus registers with no dmae need to be written
+ * using indirect write.
+ */
+ bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
else
bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
}
-static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
+ u32 len, u8 wb)
{
u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
u32 buf_len32 = buf_len/4;
@@ -57,12 +91,20 @@ static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
for (i = 0; i < len; i += buf_len32) {
u32 cur_len = min(buf_len32, len - i);
- bnx2x_write_big_buf(bp, addr + i*4, cur_len);
+ bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
}
}
-static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
- u32 len64)
+static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
+{
+ if (bp->dmae_ready)
+ bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+ else
+ bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+}
+
+static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len64)
{
u32 buf_len32 = FW_BUF_SIZE/4;
u32 len = len64*2;
@@ -82,7 +124,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
for (i = 0; i < len; i += buf_len32) {
u32 cur_len = min(buf_len32, len - i);
- bnx2x_write_big_buf(bp, addr + i*4, cur_len);
+ bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
}
}
@@ -100,7 +142,8 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
#define IF_IS_PRAM_ADDR(base, addr) \
if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
-static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
+static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
+ const u8 *data)
{
IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
data = INIT_TSEM_INT_TABLE_DATA(bp);
@@ -129,31 +172,17 @@ static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
return data;
}
-static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
+static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
+ const u32 *data, u32 len)
{
if (bp->dmae_ready)
- bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+ VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
else
- bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
-}
-
-static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
- u32 len)
-{
- const u32 *old_data = data;
-
- data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data);
-
- if (bp->dmae_ready) {
- if (old_data != data)
- VIRT_WR_DMAE_LEN(bp, data, addr, len, 1);
- else
- VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
- } else
bnx2x_init_ind_wr(bp, addr, data, len);
}
-static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
+static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
+ u32 val_hi)
{
u32 wb_write[2];
@@ -161,8 +190,8 @@ static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
wb_write[1] = val_hi;
REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
}
-
-static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
+static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
+ u32 blob_off)
{
const u8 *data = NULL;
int rc;
@@ -186,39 +215,33 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
{
u16 op_start =
- INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_START)];
+ INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+ STAGE_START)];
u16 op_end =
- INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)];
+ INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+ STAGE_END)];
union init_op *op;
- int hw_wr;
- u32 i, op_type, addr, len;
+ u32 op_idx, op_type, addr, len;
const u32 *data, *data_base;
/* If empty block */
if (op_start == op_end)
return;
- if (CHIP_REV_IS_FPGA(bp))
- hw_wr = OP_WR_FPGA;
- else if (CHIP_REV_IS_EMUL(bp))
- hw_wr = OP_WR_EMUL;
- else
- hw_wr = OP_WR_ASIC;
-
data_base = INIT_DATA(bp);
- for (i = op_start; i < op_end; i++) {
-
- op = (union init_op *)&(INIT_OPS(bp)[i]);
+ for (op_idx = op_start; op_idx < op_end; op_idx++) {
- op_type = op->str_wr.op;
- addr = op->str_wr.offset;
- len = op->str_wr.data_len;
- data = data_base + op->str_wr.data_off;
-
- /* HW/EMUL specific */
- if ((op_type > OP_WB) && (op_type == hw_wr))
- op_type = OP_WR;
+ op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
+ /* Get generic data */
+ op_type = op->raw.op;
+ addr = op->raw.offset;
+ /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
+ * OP_WR64 (we assume that op_arr_write and op_write have the
+ * same structure).
+ */
+ len = op->arr_wr.data_len;
+ data = data_base + op->arr_wr.data_off;
switch (op_type) {
case OP_RD:
@@ -233,21 +256,39 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
case OP_WB:
bnx2x_init_wr_wb(bp, addr, data, len);
break;
- case OP_SI:
- bnx2x_init_ind_wr(bp, addr, data, len);
- break;
case OP_ZR:
- bnx2x_init_fill(bp, addr, 0, op->zero.len);
+ bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
+ break;
+ case OP_WB_ZR:
+ bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
break;
case OP_ZP:
bnx2x_init_wr_zp(bp, addr, len,
- op->str_wr.data_off);
+ op->arr_wr.data_off);
break;
case OP_WR_64:
bnx2x_init_wr_64(bp, addr, data, len);
break;
+ case OP_IF_MODE_AND:
+ /* if any of the flags doesn't match, skip the
+ * conditional block.
+ */
+ if ((INIT_MODE_FLAGS(bp) &
+ op->if_mode.mode_bit_map) !=
+ op->if_mode.mode_bit_map)
+ op_idx += op->if_mode.cmd_offset;
+ break;
+ case OP_IF_MODE_OR:
+ /* if all the flags don't match, skip the conditional
+ * block.
+ */
+ if ((INIT_MODE_FLAGS(bp) &
+ op->if_mode.mode_bit_map) == 0)
+ op_idx += op->if_mode.cmd_offset;
+ break;
default:
- /* happens whenever an op is of a diff HW */
+ /* Should never get here! */
+
break;
}
}
@@ -417,7 +458,8 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
PXP2_REG_RQ_BW_WR_UBOUND30}
};
-static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
+static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
+ int w_order)
{
u32 val, i;
@@ -491,19 +533,21 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
- if (CHIP_IS_E2(bp))
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
+ else if (CHIP_IS_E2(bp))
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
else
REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
- if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1(bp)) {
/* MPS w_order optimal TH presently TH
* 128 0 0 2
* 256 1 1 3
* >=512 2 2 3
*/
/* DMAE is special */
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1H(bp)) {
/* E2 can use optimal TH */
val = w_order;
REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
@@ -557,8 +601,8 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
#define ILT_RANGE(f, l) (((l) << 10) | f)
-static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
- u32 size, u8 memop)
+static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
+ struct ilt_line *line, u32 size, u8 memop)
{
if (memop == ILT_MEMOP_FREE) {
BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
@@ -572,7 +616,8 @@ static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
}
-static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
+static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
+ u8 memop)
{
int i, rc;
struct bnx2x_ilt *ilt = BP_ILT(bp);
@@ -617,8 +662,8 @@ static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
}
-static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
- int idx, u8 initop)
+static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
+ struct bnx2x_ilt *ilt, int idx, u8 initop)
{
dma_addr_t null_mapping;
int abs_idx = ilt->start_line + idx;
@@ -733,7 +778,7 @@ static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
}
static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
- u32 psz_reg, u8 initop)
+ u32 psz_reg, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
@@ -848,7 +893,8 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
/* Initialize T2 */
for (i = 0; i < src_cid_count-1; i++)
- t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent));
+ t2[i].next = (u64)(t2_mapping +
+ (i+1)*sizeof(struct src_ent));
/* tell the searcher where the T2 table is */
REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 076e11f5769..d45b1555a60 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -25,6 +25,8 @@
#include <linux/mutex.h>
#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+
/********************************************************/
#define ETH_HLEN 14
@@ -35,6 +37,13 @@
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
#define MDIO_ACCESS_TIMEOUT 1000
#define BMAC_CONTROL_RX_ENABLE 2
+#define WC_LANE_MAX 4
+#define I2C_SWITCH_WIDTH 2
+#define I2C_BSC0 0
+#define I2C_BSC1 1
+#define I2C_WA_RETRY_CNT 3
+#define MCPR_IMC_COMMAND_READ_OP 1
+#define MCPR_IMC_COMMAND_WRITE_OP 2
/***********************************************************/
/* Shortcut definitions */
@@ -103,16 +112,13 @@
MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
#define GP_STATUS_10G_CX4 \
MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
-#define GP_STATUS_12G_HIG \
- MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG
-#define GP_STATUS_12_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G
-#define GP_STATUS_13G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G
-#define GP_STATUS_15G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G
-#define GP_STATUS_16G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G
#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
#define GP_STATUS_10G_KX4 \
MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
-
+#define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
+#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
+#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
+#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
@@ -126,20 +132,10 @@
#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
-#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
-#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
-#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
-#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
-#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
-#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
-#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
-#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
-#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
-#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
-
-#define PHY_XGXS_FLAG 0x1
-#define PHY_SGMII_FLAG 0x2
-#define PHY_SERDES_FLAG 0x4
+#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
+#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
+
+
/* */
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
@@ -165,8 +161,104 @@
#define EDC_MODE_PASSIVE_DAC 0x0055
+/* BRB thresholds for E2*/
+#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
+#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250
+#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90
+
+#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
+
+/* BRB thresholds for E3A0 */
+#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
+#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410
+#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170
+
+#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
+
+
+/* BRB thresholds for E3B0 2 port mode*/
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025
+
+#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025
+
+/* only for E3B0*/
+#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025
+#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025
+
+/* Lossy +Lossless GUARANTIED == GUART */
+#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284
+/* Lossless +Lossless*/
+#define PFC_E3B0_2P_PAUSE_LB_GUART 236
+/* Lossy +Lossy*/
+#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342
+
+/* Lossy +Lossless*/
+#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284
+/* Lossless +Lossless*/
+#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236
+/* Lossy +Lossy*/
+#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336
+#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80
+
+#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0
+#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0
+
+/* BRB thresholds for E3B0 4 port mode */
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
+
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0
+
+#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10
+#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304
+
+#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
+#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
+
+
+/* only for E3B0*/
+#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
+#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
+#define PFC_E3B0_4P_LB_GUART 120
+
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
+
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
+
+#define DCBX_INVALID_COS (0xFF)
+
#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
+#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360)
+#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720)
+#define ETS_E3B0_PBF_MIN_W_VAL (10000)
+
+#define MAX_PACKET_SIZE (9700)
+#define WC_UC_TIMEOUT 100
+
/**********************************************************/
/* INTERFACE */
/**********************************************************/
@@ -202,14 +294,86 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
}
/******************************************************************/
+/* EPIO/GPIO section */
+/******************************************************************/
+static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en)
+{
+ u32 epio_mask, gp_oenable;
+ *en = 0;
+ /* Sanity check */
+ if (epio_pin > 31) {
+ DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin);
+ return;
+ }
+
+ epio_mask = 1 << epio_pin;
+ /* Set this EPIO to output */
+ gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+ REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
+
+ *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
+}
+static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en)
+{
+ u32 epio_mask, gp_output, gp_oenable;
+
+ /* Sanity check */
+ if (epio_pin > 31) {
+ DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin);
+ return;
+ }
+ DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en);
+ epio_mask = 1 << epio_pin;
+ /* Set this EPIO to output */
+ gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS);
+ if (en)
+ gp_output |= epio_mask;
+ else
+ gp_output &= ~epio_mask;
+
+ REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
+
+ /* Set the value for this EPIO */
+ gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+ REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
+}
+
+static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val)
+{
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ if (pin_cfg >= PIN_CFG_EPIO0) {
+ bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+ } else {
+ u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+ u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+ bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port);
+ }
+}
+
+static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val)
+{
+ if (pin_cfg == PIN_CFG_NA)
+ return -EINVAL;
+ if (pin_cfg >= PIN_CFG_EPIO0) {
+ bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+ } else {
+ u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+ u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+ *val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+ }
+ return 0;
+
+}
+/******************************************************************/
/* ETS section */
/******************************************************************/
-void bnx2x_ets_disabled(struct link_params *params)
+static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
{
/* ETS disabled configuration*/
struct bnx2x *bp = params->bp;
- DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
+ DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
/*
* mapping between entry priority to client number (0,1,2 -debug and
@@ -262,7 +426,756 @@ void bnx2x_ets_disabled(struct link_params *params)
/* Defines the number of consecutive slots for the strict priority */
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
}
+/******************************************************************************
+* Description:
+* Getting min_w_val will be set according to line speed .
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
+{
+ u32 min_w_val = 0;
+ /* Calculate min_w_val.*/
+ if (vars->link_up) {
+ if (SPEED_20000 == vars->line_speed)
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+ else
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
+ } else
+ min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+ /**
+ * If the link isn't up (static configuration for example ) The
+ * link will be according to 20GBPS.
+ */
+ return min_w_val;
+}
+/******************************************************************************
+* Description:
+* Getting credit upper bound form min_w_val.
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val)
+{
+ const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val),
+ MAX_PACKET_SIZE);
+ return credit_upper_bound;
+}
+/******************************************************************************
+* Description:
+* Set credit upper bound for NIG.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
+ const struct link_params *params,
+ const u32 min_w_val)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 credit_upper_bound =
+ bnx2x_ets_get_credit_upper_bound(min_w_val);
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
+ NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
+
+ if (0 == port) {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
+ credit_upper_bound);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
+ credit_upper_bound);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
+ credit_upper_bound);
+ }
+}
+/******************************************************************************
+* Description:
+* Will return the NIG ETS registers to init values.Except
+* credit_upper_bound.
+* That isn't used in this configuration (No WFQ is enabled) and will be
+* configured acording to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
+ const struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
+ /**
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
+ * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
+ * reset value or init tool
+ */
+ if (port) {
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
+ } else {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
+ }
+ /**
+ * For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
+ /* TODO_ETS - Should be done by reset value or init tool */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
+ NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ /**
+ * mapping between the CREDIT_WEIGHT registers and actual client
+ * numbers
+ */
+ /* TODO_ETS - Should be done by reset value or init tool */
+ if (port) {
+ /*Port 1 has 6 COS*/
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
+ } else {
+ /*Port 0 has 9 COS*/
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
+ 0x43210876);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
+ }
+
+ /**
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 -
+ * COS0 entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
+ if (port)
+ REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
+ else
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
+ /* defines which entries (clients) are subjected to WFQ arbitration */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+
+ /**
+ * Please notice the register address are note continuous and a
+ * for here is note appropriate.In 2 port mode port0 only COS0-5
+ * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
+ * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
+ * are never used for WFQ
+ */
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
+ if (0 == port) {
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
+ }
+
+ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
+}
+/******************************************************************************
+* Description:
+* Set credit upper bound for PBF.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
+ const struct link_params *params,
+ const u32 min_w_val)
+{
+ struct bnx2x *bp = params->bp;
+ const u32 credit_upper_bound =
+ bnx2x_ets_get_credit_upper_bound(min_w_val);
+ const u8 port = params->port;
+ u32 base_upper_bound = 0;
+ u8 max_cos = 0;
+ u8 i = 0;
+ /**
+ * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
+ * port mode port1 has COS0-2 that can be used for WFQ.
+ */
+ if (0 == port) {
+ base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+ } else {
+ base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+ }
+
+ for (i = 0; i < max_cos; i++)
+ REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound);
+}
+
+/******************************************************************************
+* Description:
+* Will return the PBF ETS registers to init values.Except
+* credit_upper_bound.
+* That isn't used in this configuration (No WFQ is enabled) and will be
+* configured acording to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+ u8 i = 0;
+ u32 base_weight = 0;
+ u8 max_cos = 0;
+
+ /**
+ * mapping between entry priority to client number 0 - COS0
+ * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
+ * TODO_ETS - Should be done by reset value or init tool
+ */
+ if (port)
+ /* 0x688 (|011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688);
+ else
+ /* (10 1|100 |011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688);
+
+ /* TODO_ETS - Should be done by reset value or init tool */
+ if (port)
+ /* 0x688 (|011|0 10|00 1|000)*/
+ REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
+ else
+ /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
+ REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
+ PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100);
+
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
+ /**
+ * In 2 port mode port0 has COS0-5 that can be used for WFQ.
+ * In 4 port mode port1 has COS0-2 that can be used for WFQ.
+ */
+ if (0 == port) {
+ base_weight = PBF_REG_COS0_WEIGHT_P0;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+ } else {
+ base_weight = PBF_REG_COS0_WEIGHT_P1;
+ max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+ }
+
+ for (i = 0; i < max_cos; i++)
+ REG_WR(bp, base_weight + (0x4 * i), 0);
+
+ bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+}
+/******************************************************************************
+* Description:
+* E3B0 disable will return basicly the values to init values.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
+ const struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (!CHIP_IS_E3B0(bp)) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0"
+ "\n");
+ return -EINVAL;
+ }
+
+ bnx2x_ets_e3b0_nig_disabled(params, vars);
+
+ bnx2x_ets_e3b0_pbf_disabled(params);
+
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Disable will return basicly the values to init values.
+*.
+******************************************************************************/
+int bnx2x_ets_disabled(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ int bnx2x_status = 0;
+
+ if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp)))
+ bnx2x_ets_e2e3a0_disabled(params);
+ else if (CHIP_IS_E3B0(bp))
+ bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars);
+ else {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n");
+ return -EINVAL;
+ }
+
+ return bnx2x_status;
+}
+
+/******************************************************************************
+* Description
+* Set the COS mappimg to SP and BW until this point all the COS are not
+* set as SP or BW.
+******************************************************************************/
+static int bnx2x_ets_e3b0_cli_map(const struct link_params *params,
+ const struct bnx2x_ets_params *ets_params,
+ const u8 cos_sp_bitmap,
+ const u8 cos_bw_bitmap)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
+ const u8 pbf_cli_sp_bitmap = cos_sp_bitmap;
+ const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
+ const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap);
+
+ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+ NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+ nig_cli_subject2wfq_bitmap);
+
+ REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+ PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
+ pbf_cli_subject2wfq_bitmap);
+
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
+ const u8 cos_entry,
+ const u32 min_w_val_nig,
+ const u32 min_w_val_pbf,
+ const u16 total_bw,
+ const u8 bw,
+ const u8 port)
+{
+ u32 nig_reg_adress_crd_weight = 0;
+ u32 pbf_reg_adress_crd_weight = 0;
+ /* Calculate and set BW for this COS*/
+ const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw;
+ const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw;
+
+ switch (cos_entry) {
+ case 0:
+ nig_reg_adress_crd_weight =
+ (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+ break;
+ case 1:
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+ break;
+ case 2:
+ nig_reg_adress_crd_weight = (port) ?
+ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+
+ pbf_reg_adress_crd_weight = (port) ?
+ PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+ break;
+ case 3:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+ pbf_reg_adress_crd_weight =
+ PBF_REG_COS3_WEIGHT_P0;
+ break;
+ case 4:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+ pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+ break;
+ case 5:
+ if (port)
+ return -EINVAL;
+ nig_reg_adress_crd_weight =
+ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+ pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+ break;
+ }
+
+ REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
+
+ REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf);
+
+ return 0;
+}
+/******************************************************************************
+* Description:
+* Calculate the total BW.A value of 0 isn't legal.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_get_total_bw(
+ const struct link_params *params,
+ const struct bnx2x_ets_params *ets_params,
+ u16 *total_bw)
+{
+ struct bnx2x *bp = params->bp;
+ u8 cos_idx = 0;
+
+ *total_bw = 0 ;
+ /* Calculate total BW requested */
+ for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
+ if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
+
+ if (0 == ets_params->cos[cos_idx].params.bw_params.bw) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+ "was set to 0\n");
+ return -EINVAL;
+ }
+ *total_bw +=
+ ets_params->cos[cos_idx].params.bw_params.bw;
+ }
+ }
+
+ /*Check taotl BW is valid */
+ if ((100 != *total_bw) || (0 == *total_bw)) {
+ if (0 == *total_bw) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
+ "shouldn't be 0\n");
+ return -EINVAL;
+ }
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW should be"
+ "100\n");
+ /**
+ * We can handle a case whre the BW isn't 100 this can happen
+ * if the TC are joined.
+ */
+ }
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Invalidate all the sp_pri_to_cos.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
+{
+ u8 pri = 0;
+ for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++)
+ sp_pri_to_cos[pri] = DCBX_INVALID_COS;
+}
+/******************************************************************************
+* Description:
+* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+* according to sp_pri_to_cos.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
+ u8 *sp_pri_to_cos, const u8 pri,
+ const u8 cos_entry)
+{
+ struct bnx2x *bp = params->bp;
+ const u8 port = params->port;
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+ if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+ "parameter There can't be two COS's with"
+ "the same strict pri\n");
+ return -EINVAL;
+ }
+
+ if (pri > max_num_of_cos) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid"
+ "parameter Illegal strict priority\n");
+ return -EINVAL;
+ }
+
+ sp_pri_to_cos[pri] = cos_entry;
+ return 0;
+
+}
+
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in
+* the sp_pri_cli register.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
+ const u8 pri_set,
+ const u8 pri_offset,
+ const u8 entry_size)
+{
+ u64 pri_cli_nig = 0;
+ pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size *
+ (pri_set + pri_offset));
+ return pri_cli_nig;
+}
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in the
+* sp_pri_cli register for NIG.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
+{
+ /* MCP Dbg0 and dbg1 are always with higher strict pri*/
+ const u8 nig_cos_offset = 3;
+ const u8 nig_pri_offset = 3;
+
+ return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
+ nig_pri_offset, 4);
+
+}
+/******************************************************************************
+* Description:
+* Returns the correct value according to COS and priority in the
+* sp_pri_cli register for PBF.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
+{
+ const u8 pbf_cos_offset = 0;
+ const u8 pbf_pri_offset = 0;
+
+ return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
+ pbf_pri_offset, 3);
+
+}
+
+/******************************************************************************
+* Description:
+* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+* according to sp_pri_to_cos.(which COS has higher priority)
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
+ u8 *sp_pri_to_cos)
+{
+ struct bnx2x *bp = params->bp;
+ u8 i = 0;
+ const u8 port = params->port;
+ /* MCP Dbg0 and dbg1 are always with higher strict pri*/
+ u64 pri_cli_nig = 0x210;
+ u32 pri_cli_pbf = 0x0;
+ u8 pri_set = 0;
+ u8 pri_bitmask = 0;
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+ u8 cos_bit_to_set = (1 << max_num_of_cos) - 1;
+
+ /* Set all the strict priority first */
+ for (i = 0; i < max_num_of_cos; i++) {
+ if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
+ if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+ "invalid cos entry\n");
+ return -EINVAL;
+ }
+
+ pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+ sp_pri_to_cos[i], pri_set);
+
+ pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+ sp_pri_to_cos[i], pri_set);
+ pri_bitmask = 1 << sp_pri_to_cos[i];
+ /* COS is used remove it from bitmap.*/
+ if (0 == (pri_bitmask & cos_bit_to_set)) {
+ DP(NETIF_MSG_LINK,
+ "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+ "invalid There can't be two COS's with"
+ " the same strict pri\n");
+ return -EINVAL;
+ }
+ cos_bit_to_set &= ~pri_bitmask;
+ pri_set++;
+ }
+ }
+
+ /* Set all the Non strict priority i= COS*/
+ for (i = 0; i < max_num_of_cos; i++) {
+ pri_bitmask = 1 << i;
+ /* Check if COS was already used for SP */
+ if (pri_bitmask & cos_bit_to_set) {
+ /* COS wasn't used for SP */
+ pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+ i, pri_set);
+
+ pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+ i, pri_set);
+ /* COS is used remove it from bitmap.*/
+ cos_bit_to_set &= ~pri_bitmask;
+ pri_set++;
+ }
+ }
+
+ if (pri_set != max_num_of_cos) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all "
+ "entries were set\n");
+ return -EINVAL;
+ }
+
+ if (port) {
+ /* Only 6 usable clients*/
+ REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
+ (u32)pri_cli_nig);
+
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf);
+ } else {
+ /* Only 9 usable clients*/
+ const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig);
+ const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF);
+
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
+ pri_cli_nig_lsb);
+ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
+ pri_cli_nig_msb);
+
+ REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf);
+ }
+ return 0;
+}
+
+/******************************************************************************
+* Description:
+* Configure the COS to ETS according to BW and SP settings.
+******************************************************************************/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+ const struct link_vars *vars,
+ const struct bnx2x_ets_params *ets_params)
+{
+ struct bnx2x *bp = params->bp;
+ int bnx2x_status = 0;
+ const u8 port = params->port;
+ u16 total_bw = 0;
+ const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars);
+ const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+ u8 cos_bw_bitmap = 0;
+ u8 cos_sp_bitmap = 0;
+ u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0};
+ const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+ DCBX_E3B0_MAX_NUM_COS_PORT0;
+ u8 cos_entry = 0;
+
+ if (!CHIP_IS_E3B0(bp)) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0"
+ "\n");
+ return -EINVAL;
+ }
+
+ if ((ets_params->num_of_cos > max_num_of_cos)) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS "
+ "isn't supported\n");
+ return -EINVAL;
+ }
+
+ /* Prepare sp strict priority parameters*/
+ bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
+
+ /* Prepare BW parameters*/
+ bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
+ &total_bw);
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config get_total_bw failed "
+ "\n");
+ return -EINVAL;
+ }
+
+ /**
+ * Upper bound is set according to current link speed (min_w_val
+ * should be the same for upper bound and COS credit val).
+ */
+ bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
+ bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+
+
+ for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
+ if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
+ cos_bw_bitmap |= (1 << cos_entry);
+ /**
+ * The function also sets the BW in HW(not the mappin
+ * yet)
+ */
+ bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
+ bp, cos_entry, min_w_val_nig, min_w_val_pbf,
+ total_bw,
+ ets_params->cos[cos_entry].params.bw_params.bw,
+ port);
+ } else if (bnx2x_cos_state_strict ==
+ ets_params->cos[cos_entry].state){
+ cos_sp_bitmap |= (1 << cos_entry);
+
+ bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set(
+ params,
+ sp_pri_to_cos,
+ ets_params->cos[cos_entry].params.sp_params.pri,
+ cos_entry);
+
+ } else {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config cos state not"
+ " valid\n");
+ return -EINVAL;
+ }
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config set cos bw "
+ "failed\n");
+ return bnx2x_status;
+ }
+ }
+
+ /* Set SP register (which COS has higher priority) */
+ bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
+ sp_pri_to_cos);
+
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config set_pri_cli_reg "
+ "failed\n");
+ return bnx2x_status;
+ }
+
+ /* Set client mapping of BW and strict */
+ bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params,
+ cos_sp_bitmap,
+ cos_bw_bitmap);
+
+ if (0 != bnx2x_status) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
+ return bnx2x_status;
+ }
+ return 0;
+}
static void bnx2x_ets_bw_limit_common(const struct link_params *params)
{
/* ETS disabled configuration */
@@ -342,7 +1255,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
}
-u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
{
/* ETS disabled configuration*/
struct bnx2x *bp = params->bp;
@@ -388,24 +1301,64 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
/* PFC section */
/******************************************************************/
-static void bnx2x_bmac2_get_pfc_stat(struct link_params *params,
- u32 pfc_frames_sent[2],
- u32 pfc_frames_received[2])
+static void bnx2x_update_pfc_xmac(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
{
- /* Read pfc statistic */
struct bnx2x *bp = params->bp;
- u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM;
+ u32 xmac_base;
+ u32 pause_val, pfc0_val, pfc1_val;
- DP(NETIF_MSG_LINK, "pfc statistic read from BMAC\n");
+ /* XMAC base adrr */
+ xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
- REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_STAT_GTPP,
- pfc_frames_sent, 2);
+ /* Initialize pause and pfc registers */
+ pause_val = 0x18000;
+ pfc0_val = 0xFFFF8000;
+ pfc1_val = 0x2;
+
+ /* No PFC support */
+ if (!(params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED)) {
+
+ /*
+ * RX flow control - Process pause frame in receive direction
+ */
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
- REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_STAT_GRPP,
- pfc_frames_received, 2);
+ /*
+ * TX flow control - Send pause packet when buffer is full
+ */
+ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
+ } else {/* PFC support */
+ pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
+ XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
+ XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
+ XMAC_PFC_CTRL_HI_REG_TX_PFC_EN;
+ }
+
+ /* Write pause and PFC registers */
+ REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+
+
+ /* Set MAC address for source TX Pause/PFC frames */
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO,
+ ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ (params->mac_addr[5])));
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI,
+ ((params->mac_addr[0] << 8) |
+ (params->mac_addr[1])));
+ udelay(30);
}
+
+
static void bnx2x_emac_get_pfc_stat(struct link_params *params,
u32 pfc_frames_sent[2],
u32 pfc_frames_received[2])
@@ -437,33 +1390,54 @@ static void bnx2x_emac_get_pfc_stat(struct link_params *params,
pfc_frames_sent[0] = val_xon + val_xoff;
}
+/* Read pfc statistic*/
void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
u32 pfc_frames_sent[2],
u32 pfc_frames_received[2])
{
/* Read pfc statistic */
struct bnx2x *bp = params->bp;
- u32 val = 0;
+
DP(NETIF_MSG_LINK, "pfc statistic\n");
if (!vars->link_up)
return;
- val = REG_RD(bp, MISC_REG_RESET_REG_2);
- if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
- == 0) {
- DP(NETIF_MSG_LINK, "About to read stats from EMAC\n");
+ if (MAC_TYPE_EMAC == vars->mac_type) {
+ DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
pfc_frames_received);
- } else {
- DP(NETIF_MSG_LINK, "About to read stats from BMAC\n");
- bnx2x_bmac2_get_pfc_stat(params, pfc_frames_sent,
- pfc_frames_received);
}
}
/******************************************************************/
/* MAC/PBF section */
/******************************************************************/
+static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
+{
+ u32 mode, emac_base;
+ /**
+ * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ * (a value of 49==0x31) and make sure that the AUTO poll is off
+ */
+
+ if (CHIP_IS_E2(bp))
+ emac_base = GRCBASE_EMAC0;
+ else
+ emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+ mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+ mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
+ EMAC_MDIO_MODE_CLOCK_CNT);
+ if (USES_WARPCORE(bp))
+ mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+ else
+ mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+
+ mode |= (EMAC_MDIO_MODE_CLAUSE_45);
+ REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode);
+
+ udelay(40);
+}
+
static void bnx2x_emac_init(struct link_params *params,
struct link_vars *vars)
{
@@ -495,7 +1469,7 @@ static void bnx2x_emac_init(struct link_params *params,
}
timeout--;
} while (val & EMAC_MODE_RESET);
-
+ bnx2x_set_mdio_clk(bp, params->chip_id, port);
/* Set mac address */
val = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
@@ -508,9 +1482,262 @@ static void bnx2x_emac_init(struct link_params *params,
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
}
-static u8 bnx2x_emac_enable(struct link_params *params,
+static void bnx2x_set_xumac_nig(struct link_params *params,
+ u16 tx_pause_en,
+ u8 enable)
+{
+ struct bnx2x *bp = params->bp;
+
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN,
+ enable);
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN,
+ enable);
+ REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
+ NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
+}
+
+static void bnx2x_umac_enable(struct link_params *params,
struct link_vars *vars, u8 lb)
{
+ u32 val;
+ u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+ struct bnx2x *bp = params->bp;
+ /* Reset UMAC */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+ usleep_range(1000, 1000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+
+ DP(NETIF_MSG_LINK, "enabling UMAC\n");
+
+ /**
+ * This register determines on which events the MAC will assert
+ * error on the i/f to the NIG along w/ EOP.
+ */
+
+ /**
+ * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
+ * params->port*0x14, 0xfffff.
+ */
+ /* This register opens the gate for the UMAC despite its name */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+ val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
+ UMAC_COMMAND_CONFIG_REG_PAD_EN |
+ UMAC_COMMAND_CONFIG_REG_SW_RESET |
+ UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
+ switch (vars->line_speed) {
+ case SPEED_10:
+ val |= (0<<2);
+ break;
+ case SPEED_100:
+ val |= (1<<2);
+ break;
+ case SPEED_1000:
+ val |= (2<<2);
+ break;
+ case SPEED_2500:
+ val |= (3<<2);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n",
+ vars->line_speed);
+ break;
+ }
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
+
+ if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+ val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
+
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+ udelay(50);
+
+ /* Set MAC address for source TX Pause/PFC frames (under SW reset) */
+ REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
+ ((params->mac_addr[2] << 24) |
+ (params->mac_addr[3] << 16) |
+ (params->mac_addr[4] << 8) |
+ (params->mac_addr[5])));
+ REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1,
+ ((params->mac_addr[0] << 8) |
+ (params->mac_addr[1])));
+
+ /* Enable RX and TX */
+ val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
+ val |= UMAC_COMMAND_CONFIG_REG_TX_ENA |
+ UMAC_COMMAND_CONFIG_REG_RX_ENA;
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+ udelay(50);
+
+ /* Remove SW Reset */
+ val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET;
+
+ /* Check loopback mode */
+ if (lb)
+ val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
+ REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+
+ /*
+ * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+ * length used by the MAC receive logic to check frames.
+ */
+ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+ bnx2x_set_xumac_nig(params,
+ ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+ vars->mac_type = MAC_TYPE_UMAC;
+
+}
+
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+ u32 port4mode_ovwr_val;
+ /* Check 4-port override enabled */
+ port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if (port4mode_ovwr_val & (1<<0)) {
+ /* Return 4-port mode override value */
+ return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+ }
+ /* Return 4-port mode from input pin */
+ return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
+
+/* Define the XMAC mode */
+static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed)
+{
+ u32 is_port4mode = bnx2x_is_4_port_mode(bp);
+
+ /**
+ * In 4-port mode, need to set the mode only once, so if XMAC is
+ * already out of reset, it means the mode has already been set,
+ * and it must not* reset the XMAC again, since it controls both
+ * ports of the path
+ **/
+
+ if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC)) {
+ DP(NETIF_MSG_LINK, "XMAC already out of reset"
+ " in 4-port mode\n");
+ return;
+ }
+
+ /* Hard reset */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ MISC_REGISTERS_RESET_REG_2_XMAC);
+ usleep_range(1000, 1000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ MISC_REGISTERS_RESET_REG_2_XMAC);
+ if (is_port4mode) {
+ DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
+
+ /* Set the number of ports on the system side to up to 2 */
+ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+
+ /* Set the number of ports on the Warp Core to 10G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ } else {
+ /* Set the number of ports on the system side to 1 */
+ REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
+ if (max_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "Init XMAC to 10G x 1"
+ " port per path\n");
+ /* Set the number of ports on the Warp Core to 10G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ } else {
+ DP(NETIF_MSG_LINK, "Init XMAC to 20G x 2 ports"
+ " per path\n");
+ /* Set the number of ports on the Warp Core to 20G */
+ REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1);
+ }
+ }
+ /* Soft reset */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+ usleep_range(1000, 1000);
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+
+}
+
+static void bnx2x_xmac_disable(struct link_params *params)
+{
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ MISC_REGISTERS_RESET_REG_2_XMAC) {
+ /*
+ * Send an indication to change the state in the NIG back to XON
+ * Clearing this bit enables the next set of this bit to get
+ * rising edge
+ */
+ pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl & ~(1<<1)));
+ REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+ (pfc_ctrl | (1<<1)));
+ DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
+ usleep_range(1000, 1000);
+ bnx2x_set_xumac_nig(params, 0, 0);
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL,
+ XMAC_CTRL_REG_SOFT_RESET);
+ }
+}
+
+static int bnx2x_xmac_enable(struct link_params *params,
+ struct link_vars *vars, u8 lb)
+{
+ u32 val, xmac_base;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "enabling XMAC\n");
+
+ xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ bnx2x_xmac_init(bp, vars->line_speed);
+
+ /*
+ * This register determines on which events the MAC will assert
+ * error on the i/f to the NIG along w/ EOP.
+ */
+
+ /*
+ * This register tells the NIG whether to send traffic to UMAC
+ * or XMAC
+ */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
+
+ /* Set Max packet size */
+ REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
+
+ /* CRC append for Tx packets */
+ REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
+
+ /* update PFC */
+ bnx2x_update_pfc_xmac(params, vars, 0);
+
+ /* Enable TX and RX */
+ val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
+
+ /* Check loopback mode */
+ if (lb)
+ val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK;
+ REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
+ bnx2x_set_xumac_nig(params,
+ ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+
+ vars->mac_type = MAC_TYPE_XMAC;
+
+ return 0;
+}
+static int bnx2x_emac_enable(struct link_params *params,
+ struct link_vars *vars, u8 lb)
+{
struct bnx2x *bp = params->bp;
u8 port = params->port;
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
@@ -518,6 +1745,10 @@ static u8 bnx2x_emac_enable(struct link_params *params,
DP(NETIF_MSG_LINK, "enabling EMAC\n");
+ /* Disable BMAC */
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
/* enable emac and not bmac */
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
@@ -760,95 +1991,398 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
-static void bnx2x_update_pfc_brb(struct link_params *params,
- struct link_vars *vars,
- struct bnx2x_nig_brb_pfc_port_params *pfc_params)
+
+/* PFC BRB internal port configuration params */
+struct bnx2x_pfc_brb_threshold_val {
+ u32 pause_xoff;
+ u32 pause_xon;
+ u32 full_xoff;
+ u32 full_xon;
+};
+
+struct bnx2x_pfc_brb_e3b0_val {
+ u32 full_lb_xoff_th;
+ u32 full_lb_xon_threshold;
+ u32 lb_guarantied;
+ u32 mac_0_class_t_guarantied;
+ u32 mac_0_class_t_guarantied_hyst;
+ u32 mac_1_class_t_guarantied;
+ u32 mac_1_class_t_guarantied_hyst;
+};
+
+struct bnx2x_pfc_brb_th_val {
+ struct bnx2x_pfc_brb_threshold_val pauseable_th;
+ struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+};
+static int bnx2x_pfc_brb_get_config_params(
+ struct link_params *params,
+ struct bnx2x_pfc_brb_th_val *config_val)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+ if (CHIP_IS_E2(bp)) {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else if (CHIP_IS_E3A0(bp)) {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else if (CHIP_IS_E3B0(bp)) {
+ if (params->phy[INT_PHY].flags &
+ FLAGS_4_PORT_MODE) {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ }
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
+ struct bnx2x_pfc_brb_e3b0_val
+ *e3b0_val,
+ u32 cos0_pauseable,
+ u32 cos1_pauseable)
+{
+ if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_4P_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
+ } else {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+ if (cos0_pauseable != cos1_pauseable) {
+ /* nonpauseable= Lossy + pauseable = Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+ } else if (cos0_pauseable) {
+ /* Lossless +Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+ } else {
+ /* Lossy +Lossy*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+ }
+ }
+}
+static int bnx2x_update_pfc_brb(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_nig_brb_pfc_port_params
+ *pfc_params)
{
struct bnx2x *bp = params->bp;
+ struct bnx2x_pfc_brb_th_val config_val = { {0} };
+ struct bnx2x_pfc_brb_threshold_val *reg_th_config =
+ &config_val.pauseable_th;
+ struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
int set_pfc = params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED;
+ int bnx2x_status = 0;
+ u8 port = params->port;
/* default - pause configuration */
- u32 pause_xoff_th = PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
- u32 pause_xon_th = PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
- u32 full_xoff_th = PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
- u32 full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
+ reg_th_config = &config_val.pauseable_th;
+ bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
+ if (0 != bnx2x_status)
+ return bnx2x_status;
if (set_pfc && pfc_params)
/* First COS */
- if (!pfc_params->cos0_pauseable) {
- pause_xoff_th =
- PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
- pause_xon_th =
- PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
- full_xoff_th =
- PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
- full_xon_th =
- PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
- }
+ if (!pfc_params->cos0_pauseable)
+ reg_th_config = &config_val.non_pauseable_th;
/*
* The number of free blocks below which the pause signal to class 0
* of MAC #n is asserted. n=0,1
*/
- REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
+ BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
+ reg_th_config->pause_xoff);
/*
* The number of free blocks above which the pause signal to class 0
* of MAC #n is de-asserted. n=0,1
*/
- REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
+ BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
/*
* The number of free blocks below which the full signal to class 0
* of MAC #n is asserted. n=0,1
*/
- REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
+ REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
+ BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
/*
* The number of free blocks above which the full signal to class 0
* of MAC #n is de-asserted. n=0,1
*/
- REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
+ REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
+ BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
if (set_pfc && pfc_params) {
/* Second COS */
- if (pfc_params->cos1_pauseable) {
- pause_xoff_th =
- PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
- pause_xon_th =
- PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
- full_xoff_th =
- PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
- full_xon_th =
- PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
- } else {
- pause_xoff_th =
- PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
- pause_xon_th =
- PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
- full_xoff_th =
- PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
- full_xon_th =
- PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
- }
+ if (pfc_params->cos1_pauseable)
+ reg_th_config = &config_val.pauseable_th;
+ else
+ reg_th_config = &config_val.non_pauseable_th;
/*
* The number of free blocks below which the pause signal to
* class 1 of MAC #n is asserted. n=0,1
- */
- REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
+ **/
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+ reg_th_config->pause_xoff);
/*
* The number of free blocks above which the pause signal to
* class 1 of MAC #n is de-asserted. n=0,1
*/
- REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+ reg_th_config->pause_xon);
/*
* The number of free blocks below which the full signal to
* class 1 of MAC #n is asserted. n=0,1
*/
- REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+ reg_th_config->full_xoff);
/*
* The number of free blocks above which the full signal to
* class 1 of MAC #n is de-asserted. n=0,1
*/
- REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XON_THRESHOLD_0,
+ reg_th_config->full_xon);
+
+
+ if (CHIP_IS_E3B0(bp)) {
+ /*Should be done by init tool */
+ /*
+ * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
+ * reset value
+ * 944
+ */
+
+ /**
+ * The hysteresis on the guarantied buffer space for the Lb port
+ * before signaling XON.
+ **/
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
+
+ bnx2x_pfc_brb_get_e3b0_config_params(
+ params,
+ &e3b0_val,
+ pfc_params->cos0_pauseable,
+ pfc_params->cos1_pauseable);
+ /**
+ * The number of free blocks below which the full signal to the
+ * LB port is asserted.
+ */
+ REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+ e3b0_val.full_lb_xoff_th);
+ /**
+ * The number of free blocks above which the full signal to the
+ * LB port is de-asserted.
+ */
+ REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+ e3b0_val.full_lb_xon_threshold);
+ /**
+ * The number of blocks guarantied for the MAC #n port. n=0,1
+ */
+
+ /*The number of blocks guarantied for the LB port.*/
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+ e3b0_val.lb_guarantied);
+
+ /**
+ * The number of blocks guarantied for the MAC #n port.
+ */
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+ 2 * e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+ 2 * e3b0_val.mac_1_class_t_guarantied);
+ /**
+ * The number of blocks guarantied for class #t in MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ /**
+ * The hysteresis on the guarantied buffer space for class in
+ * MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+
+ /**
+ * The number of blocks guarantied for class #t in MAC1.t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ /**
+ * The hysteresis on the guarantied buffer space for class #t
+ * in MAC1. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
+
+ }
+
+ }
+
+ return bnx2x_status;
+}
+
+/******************************************************************************
+* Description:
+* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+ u8 cos_entry,
+ u32 priority_mask, u8 port)
+{
+ u32 nig_reg_rx_priority_mask_add = 0;
+
+ switch (cos_entry) {
+ case 0:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS0_PRIORITY_MASK;
+ break;
+ case 1:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS1_PRIORITY_MASK;
+ break;
+ case 2:
+ nig_reg_rx_priority_mask_add = (port) ?
+ NIG_REG_P1_RX_COS2_PRIORITY_MASK :
+ NIG_REG_P0_RX_COS2_PRIORITY_MASK;
+ break;
+ case 3:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
+ break;
+ case 4:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
+ break;
+ case 5:
+ if (port)
+ return -EINVAL;
+ nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
+ break;
}
+
+ REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
+
+ return 0;
+}
+static void bnx2x_update_mng(struct link_params *params, u32 link_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ REG_WR(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status), link_status);
}
static void bnx2x_update_pfc_nig(struct link_params *params,
@@ -858,9 +2392,9 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
u32 pkt_priority_to_cos = 0;
- u32 val;
struct bnx2x *bp = params->bp;
- int port = params->port;
+ u8 port = params->port;
+
int set_pfc = params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED;
DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
@@ -881,6 +2415,9 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
pause_enable = 0;
llfc_out_en = 0;
llfc_enable = 0;
+ if (CHIP_IS_E3(bp))
+ ppp_enable = 0;
+ else
ppp_enable = 1;
xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
@@ -899,6 +2436,9 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
xcm0_out_en = 1;
}
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN :
+ NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
@@ -920,30 +2460,13 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
/* HW PFC TX enable */
REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
- /* 0x2 = BMAC, 0x1= EMAC */
- switch (vars->mac_type) {
- case MAC_TYPE_EMAC:
- val = 1;
- break;
- case MAC_TYPE_BMAC:
- val = 0;
- break;
- default:
- val = 0;
- break;
- }
- REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val);
-
if (nig_params) {
+ u8 i = 0;
pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
- REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK :
- NIG_REG_P0_RX_COS0_PRIORITY_MASK,
- nig_params->rx_cos0_priority_mask);
-
- REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK :
- NIG_REG_P0_RX_COS1_PRIORITY_MASK,
- nig_params->rx_cos1_priority_mask);
+ for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
+ bnx2x_pfc_nig_rx_priority_mask(bp, i,
+ nig_params->rx_cos_priority_mask[i], port);
REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
@@ -958,8 +2481,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
pkt_priority_to_cos);
}
-
-void bnx2x_update_pfc(struct link_params *params,
+int bnx2x_update_pfc(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *pfc_params)
{
@@ -970,41 +2492,59 @@ void bnx2x_update_pfc(struct link_params *params,
*/
u32 val;
struct bnx2x *bp = params->bp;
+ int bnx2x_status = 0;
+ u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
+
+ if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+ vars->link_status |= LINK_STATUS_PFC_ENABLED;
+ else
+ vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
+ bnx2x_update_mng(params, vars->link_status);
/* update NIG params */
bnx2x_update_pfc_nig(params, vars, pfc_params);
/* update BRB params */
- bnx2x_update_pfc_brb(params, vars, pfc_params);
+ bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
+ if (0 != bnx2x_status)
+ return bnx2x_status;
if (!vars->link_up)
- return;
-
- val = REG_RD(bp, MISC_REG_RESET_REG_2);
- if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
- == 0) {
- DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
- bnx2x_emac_enable(params, vars, 0);
- return;
- }
+ return bnx2x_status;
DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
- if (CHIP_IS_E2(bp))
- bnx2x_update_pfc_bmac2(params, vars, 0);
- else
- bnx2x_update_pfc_bmac1(params, vars);
+ if (CHIP_IS_E3(bp))
+ bnx2x_update_pfc_xmac(params, vars, 0);
+ else {
+ val = REG_RD(bp, MISC_REG_RESET_REG_2);
+ if ((val &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+ == 0) {
+ DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
+ bnx2x_emac_enable(params, vars, 0);
+ return bnx2x_status;
+ }
- val = 0;
- if ((params->feature_config_flags &
- FEATURE_CONFIG_PFC_ENABLED) ||
- (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
- val = 1;
- REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
+ if (CHIP_IS_E2(bp))
+ bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
+ else
+ bnx2x_update_pfc_bmac1(params, vars);
+
+ val = 0;
+ if ((params->feature_config_flags &
+ FEATURE_CONFIG_PFC_ENABLED) ||
+ (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+ val = 1;
+ REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
+ }
+ return bnx2x_status;
}
-static u8 bnx2x_bmac1_enable(struct link_params *params,
- struct link_vars *vars,
- u8 is_lb)
+
+static int bnx2x_bmac1_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -1066,9 +2606,9 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
return 0;
}
-static u8 bnx2x_bmac2_enable(struct link_params *params,
- struct link_vars *vars,
- u8 is_lb)
+static int bnx2x_bmac2_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -1131,11 +2671,12 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
return 0;
}
-static u8 bnx2x_bmac_enable(struct link_params *params,
- struct link_vars *vars,
- u8 is_lb)
+static int bnx2x_bmac_enable(struct link_params *params,
+ struct link_vars *vars,
+ u8 is_lb)
{
- u8 rc, port = params->port;
+ int rc = 0;
+ u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 val;
/* reset and unreset the BigMac */
@@ -1173,16 +2714,6 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
return rc;
}
-
-static void bnx2x_update_mng(struct link_params *params, u32 link_status)
-{
- struct bnx2x *bp = params->bp;
-
- REG_WR(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[params->port].link_status), link_status);
-}
-
static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
{
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
@@ -1218,8 +2749,8 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
}
}
-static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
- u32 line_speed)
+static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
+ u32 line_speed)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -1269,18 +2800,6 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
case SPEED_10000:
init_crd = thresh + 553 - 22;
break;
-
- case SPEED_12000:
- init_crd = thresh + 664 - 22;
- break;
-
- case SPEED_13000:
- init_crd = thresh + 742 - 22;
- break;
-
- case SPEED_16000:
- init_crd = thresh + 778 - 22;
- break;
default:
DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
line_speed);
@@ -1349,31 +2868,23 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp,
}
/******************************************************************/
-/* CL45 access functions */
+/* CL22 access functions */
/******************************************************************/
-static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val)
+static int bnx2x_cl22_write(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 reg, u16 val)
{
- u32 tmp, saved_mode;
- u8 i, rc = 0;
- /*
- * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
- * (a value of 49==0x31) and make sure that the AUTO poll is off
- */
-
- saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
- tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL |
- EMAC_MDIO_MODE_CLOCK_CNT);
- tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
- (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
- REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
- REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
- udelay(40);
+ u32 tmp, mode;
+ u8 i;
+ int rc = 0;
+ /* Switch to CL22 */
+ mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+ mode & ~EMAC_MDIO_MODE_CLAUSE_45);
/* address */
-
- tmp = ((phy->addr << 21) | (devad << 16) | reg |
- EMAC_MDIO_COMM_COMMAND_ADDRESS |
+ tmp = ((phy->addr << 21) | (reg << 16) | val |
+ EMAC_MDIO_COMM_COMMAND_WRITE_22 |
EMAC_MDIO_COMM_START_BUSY);
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
@@ -1388,58 +2899,63 @@ static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
- netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
- } else {
- /* data */
- tmp = ((phy->addr << 21) | (devad << 16) | val |
- EMAC_MDIO_COMM_COMMAND_WRITE_45 |
- EMAC_MDIO_COMM_START_BUSY);
- REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+ }
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+ return rc;
+}
- for (i = 0; i < 50; i++) {
- udelay(10);
+static int bnx2x_cl22_read(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 reg, u16 *ret_val)
+{
+ u32 val, mode;
+ u16 i;
+ int rc = 0;
- tmp = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
- if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
- udelay(5);
- break;
- }
- }
- if (tmp & EMAC_MDIO_COMM_START_BUSY) {
- DP(NETIF_MSG_LINK, "write phy register failed\n");
- netdev_err(bp->dev, "MDC/MDIO access timeout\n");
- rc = -EFAULT;
+ /* Switch to CL22 */
+ mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+ mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+ /* address */
+ val = ((phy->addr << 21) | (reg << 16) |
+ EMAC_MDIO_COMM_COMMAND_READ_22 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+ *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+ udelay(5);
+ break;
}
}
+ if (val & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "read phy register failed\n");
- /* Restore the saved mode */
- REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
-
+ *ret_val = 0;
+ rc = -EFAULT;
+ }
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
return rc;
}
-static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val)
+/******************************************************************/
+/* CL45 access functions */
+/******************************************************************/
+static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val)
{
- u32 val, saved_mode;
+ u32 val;
u16 i;
- u8 rc = 0;
- /*
- * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
- * (a value of 49==0x31) and make sure that the AUTO poll is off
- */
-
- saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
- val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
- EMAC_MDIO_MODE_CLOCK_CNT));
- val |= (EMAC_MDIO_MODE_CLAUSE_45 |
- (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
- REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
- REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
- udelay(40);
-
+ int rc = 0;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
/* address */
val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
@@ -1460,7 +2976,6 @@ static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
*ret_val = 0;
rc = -EFAULT;
-
} else {
/* data */
val = ((phy->addr << 21) | (devad << 16) |
@@ -1485,15 +3000,222 @@ static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
rc = -EFAULT;
}
}
+ /* Work around for E3 A0 */
+ if (phy->flags & FLAGS_MDC_MDIO_WA) {
+ phy->flags ^= FLAGS_DUMMY_READ;
+ if (phy->flags & FLAGS_DUMMY_READ) {
+ u16 temp_val;
+ bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+ }
+ }
+
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ return rc;
+}
+
+static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val)
+{
+ u32 tmp;
+ u8 i;
+ int rc = 0;
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+
+ /* address */
+
+ tmp = ((phy->addr << 21) | (devad << 16) | reg |
+ EMAC_MDIO_COMM_COMMAND_ADDRESS |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ rc = -EFAULT;
+
+ } else {
+ /* data */
+ tmp = ((phy->addr << 21) | (devad << 16) | val |
+ EMAC_MDIO_COMM_COMMAND_WRITE_45 |
+ EMAC_MDIO_COMM_START_BUSY);
+ REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+ for (i = 0; i < 50; i++) {
+ udelay(10);
+
+ tmp = REG_RD(bp, phy->mdio_ctrl +
+ EMAC_REG_EMAC_MDIO_COMM);
+ if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+ udelay(5);
+ break;
+ }
+ }
+ if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+ DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
+ rc = -EFAULT;
+ }
+ }
+ /* Work around for E3 A0 */
+ if (phy->flags & FLAGS_MDC_MDIO_WA) {
+ phy->flags ^= FLAGS_DUMMY_READ;
+ if (phy->flags & FLAGS_DUMMY_READ) {
+ u16 temp_val;
+ bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+ }
+ }
+ if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+ bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+ EMAC_MDIO_STATUS_10MB);
+ return rc;
+}
+
+
+/******************************************************************/
+/* BSC access functions from E3 */
+/******************************************************************/
+static void bnx2x_bsc_module_sel(struct link_params *params)
+{
+ int idx;
+ u32 board_cfg, sfp_ctrl;
+ u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ /* Read I2C output PINs */
+ board_cfg = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.shared_hw_config.board));
+ i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
+ i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
+ SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
+
+ /* Read I2C output value */
+ sfp_ctrl = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg));
+ i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
+ i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
+ DP(NETIF_MSG_LINK, "Setting BSC switch\n");
+ for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
+ bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]);
+}
+
+static int bnx2x_bsc_read(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 sl_devid,
+ u16 sl_addr,
+ u8 lc_addr,
+ u8 xfer_cnt,
+ u32 *data_array)
+{
+ u32 val, i;
+ int rc = 0;
+ struct bnx2x *bp = params->bp;
- /* Restore the saved mode */
- REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
+ if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
+ DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
+ return -EINVAL;
+ }
+ if (xfer_cnt > 16) {
+ DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
+ xfer_cnt);
+ return -EINVAL;
+ }
+ bnx2x_bsc_module_sel(params);
+
+ xfer_cnt = 16 - lc_addr;
+
+ /* enable the engine */
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ val |= MCPR_IMC_COMMAND_ENABLE;
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* program slave device ID */
+ val = (sl_devid << 16) | sl_addr;
+ REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
+
+ /* start xfer with 0 byte to update the address pointer ???*/
+ val = (MCPR_IMC_COMMAND_ENABLE) |
+ (MCPR_IMC_COMMAND_WRITE_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* poll for completion */
+ i = 0;
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+ udelay(10);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ if (i++ > 1000) {
+ DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n",
+ i);
+ rc = -EFAULT;
+ break;
+ }
+ }
+ if (rc == -EFAULT)
+ return rc;
+
+ /* start xfer with read op */
+ val = (MCPR_IMC_COMMAND_ENABLE) |
+ (MCPR_IMC_COMMAND_READ_OP <<
+ MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+ (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
+ (xfer_cnt);
+ REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+ /* poll for completion */
+ i = 0;
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+ udelay(10);
+ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+ if (i++ > 1000) {
+ DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i);
+ rc = -EFAULT;
+ break;
+ }
+ }
+ if (rc == -EFAULT)
+ return rc;
+
+ for (i = (lc_addr >> 2); i < 4; i++) {
+ data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4));
+#ifdef __BIG_ENDIAN
+ data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
+ ((data_array[i] & 0x0000ff00) << 8) |
+ ((data_array[i] & 0x00ff0000) >> 8) |
+ ((data_array[i] & 0xff000000) >> 24);
+#endif
+ }
return rc;
}
-u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
- u8 devad, u16 reg, u16 *ret_val)
+static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 or_val)
+{
+ u16 val;
+ bnx2x_cl45_read(bp, phy, devad, reg, &val);
+ bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
+}
+
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 *ret_val)
{
u8 phy_index;
/*
@@ -1510,8 +3232,8 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
return -EINVAL;
}
-u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
- u8 devad, u16 reg, u16 val)
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 val)
{
u8 phy_index;
/*
@@ -1527,9 +3249,62 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
}
return -EINVAL;
}
+static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u8 lane = 0;
+ struct bnx2x *bp = params->bp;
+ u32 path_swap, path_swap_ovr;
+ u8 path, port;
+
+ path = BP_PATH(bp);
+ port = params->port;
+
+ if (bnx2x_is_4_port_mode(bp)) {
+ u32 port_swap, port_swap_ovr;
+
+ /*figure out path swap value */
+ path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
+ if (path_swap_ovr & 0x1)
+ path_swap = (path_swap_ovr & 0x2);
+ else
+ path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP);
+
+ if (path_swap)
+ path = path ^ 1;
+
+ /*figure out port swap value */
+ port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
+ if (port_swap_ovr & 0x1)
+ port_swap = (port_swap_ovr & 0x2);
+ else
+ port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP);
-static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
- struct bnx2x_phy *phy)
+ if (port_swap)
+ port = port ^ 1;
+
+ lane = (port<<1) + path;
+ } else { /* two port mode - no port swap */
+
+ /*figure out path swap value */
+ path_swap_ovr =
+ REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
+ if (path_swap_ovr & 0x1) {
+ path_swap = (path_swap_ovr & 0x2);
+ } else {
+ path_swap =
+ REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP);
+ }
+ if (path_swap)
+ path = path ^ 1;
+
+ lane = path << 1 ;
+ }
+ return lane;
+}
+
+static void bnx2x_set_aer_mmd(struct link_params *params,
+ struct bnx2x_phy *phy)
{
u32 ser_lane;
u16 offset, aer_val;
@@ -1538,20 +3313,28 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
- offset = phy->addr + ser_lane;
- if (CHIP_IS_E2(bp))
+ offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
+ (phy->addr + ser_lane) : 0;
+
+ if (USES_WARPCORE(bp)) {
+ aer_val = bnx2x_get_warpcore_lane(phy, params);
+ /*
+ * In Dual-lane mode, two lanes are joined together,
+ * so in order to configure them, the AER broadcast method is
+ * used here.
+ * 0x200 is the broadcast address for lanes 0,1
+ * 0x201 is the broadcast address for lanes 2,3
+ */
+ if (phy->flags & FLAGS_WC_DUAL_MODE)
+ aer_val = (aer_val >> 1) | 0x200;
+ } else if (CHIP_IS_E2(bp))
aer_val = 0x3800 + offset - 1;
else
aer_val = 0x3800 + offset;
+ DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, aer_val);
-}
-static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
- struct bnx2x_phy *phy)
-{
- CL22_WR_OVER_CL45(bp, phy,
- MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, 0x3800);
+
}
/******************************************************************/
@@ -1611,19 +3394,981 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
params->phy[INT_PHY].def_md_devad);
}
+static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
+ struct link_params *params, u16 *ieee_fc)
+{
+ struct bnx2x *bp = params->bp;
+ *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+ /**
+ * resolve pause mode and advertisement Please refer to Table
+ * 28B-3 of the 802.3ab-1999 spec
+ */
+
+ switch (phy->req_flow_ctrl) {
+ case BNX2X_FLOW_CTRL_AUTO:
+ if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ else
+ *ieee_fc |=
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+
+ case BNX2X_FLOW_CTRL_TX:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ break;
+
+ case BNX2X_FLOW_CTRL_RX:
+ case BNX2X_FLOW_CTRL_BOTH:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ break;
+
+ case BNX2X_FLOW_CTRL_NONE:
+ default:
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+ break;
+ }
+ DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
+}
+
+static void set_phy_vars(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 actual_phy_idx, phy_index, link_cfg_idx;
+ u8 phy_config_swapped = params->multi_phy_config &
+ PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+ for (phy_index = INT_PHY; phy_index < params->num_phys;
+ phy_index++) {
+ link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+ actual_phy_idx = phy_index;
+ if (phy_config_swapped) {
+ if (phy_index == EXT_PHY1)
+ actual_phy_idx = EXT_PHY2;
+ else if (phy_index == EXT_PHY2)
+ actual_phy_idx = EXT_PHY1;
+ }
+ params->phy[actual_phy_idx].req_flow_ctrl =
+ params->req_flow_ctrl[link_cfg_idx];
+
+ params->phy[actual_phy_idx].req_line_speed =
+ params->req_line_speed[link_cfg_idx];
+
+ params->phy[actual_phy_idx].speed_cap_mask =
+ params->speed_cap_mask[link_cfg_idx];
+
+ params->phy[actual_phy_idx].req_duplex =
+ params->req_duplex[link_cfg_idx];
+
+ if (params->req_line_speed[link_cfg_idx] ==
+ SPEED_AUTO_NEG)
+ vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+
+ DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
+ " speed_cap_mask %x\n",
+ params->phy[actual_phy_idx].req_flow_ctrl,
+ params->phy[actual_phy_idx].req_line_speed,
+ params->phy[actual_phy_idx].speed_cap_mask);
+ }
+}
+
+static void bnx2x_ext_phy_set_pause(struct link_params *params,
+ struct bnx2x_phy *phy,
+ struct link_vars *vars)
+{
+ u16 val;
+ struct bnx2x *bp = params->bp;
+ /* read modify write pause advertizing */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
+
+ val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+ }
+ if ((vars->ieee_fc &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+ val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+ }
+ DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
+}
+
+static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
+{ /* LD LP */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
+ break;
+
+ case 0xe: /* 1 1 1 0 */
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
+ break;
+
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+ break;
+
+ default:
+ break;
+ }
+ if (pause_result & (1<<0))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
+ if (pause_result & (1<<1))
+ vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
+}
+
+static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 ld_pause; /* local */
+ u16 lp_pause; /* link partner */
+ u16 pause_result;
+ u8 ret = 0;
+ /* read twice */
+
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ vars->flow_ctrl = phy->req_flow_ctrl;
+ else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ vars->flow_ctrl = params->req_fc_auto_adv;
+ else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ ret = 1;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
+ bnx2x_cl22_read(bp, phy,
+ 0x4, &ld_pause);
+ bnx2x_cl22_read(bp, phy,
+ 0x5, &lp_pause);
+ } else {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ }
+ pause_result = (ld_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
+ pause_result);
+ bnx2x_pause_resolve(vars, pause_result);
+ }
+ return ret;
+}
+/******************************************************************/
+/* Warpcore section */
+/******************************************************************/
+/* The init_internal_warpcore should mirror the xgxs,
+ * i.e. reset the lane (if needed), set aer for the
+ * init configuration, and set/clear SGMII flag. Internal
+ * phy init is done purely in phy_init stage.
+ */
+static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars) {
+ u16 val16 = 0, lane, bam37 = 0;
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
+ /* Check adding advertisement for 1G KX */
+ if (((vars->line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (vars->line_speed == SPEED_1000)) {
+ u16 sd_digital;
+ val16 |= (1<<5);
+
+ /* Enable CL37 1G Parallel Detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (sd_digital | 0x1));
+
+ DP(NETIF_MSG_LINK, "Advertize 1G\n");
+ }
+ if (((vars->line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+ (vars->line_speed == SPEED_10000)) {
+ /* Check adding advertisement for 10G KR */
+ val16 |= (1<<7);
+ /* Enable 10G Parallel Detect */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+
+ DP(NETIF_MSG_LINK, "Advertize 10G\n");
+ }
+
+ /* Set Transmit PMD settings */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
+ 0x03f0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
+ 0x03f0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x383f);
+
+ /* Advertised speeds */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
+
+ /* Enable CL37 BAM */
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
+
+ /* Advertise pause */
+ bnx2x_ext_phy_set_pause(params, phy, vars);
+
+ /* Enable Autoneg */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000);
+
+ /* Over 1G - AN local device user page 1 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, &val16);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
+}
+
+static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+
+ /* Disable Autoneg */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_UP1, 0x1);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
+
+ /* Disable CL36 PCS Tx */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
+
+ /* Double Wide Single Data Rate @ pll rate */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
+
+ /* Leave cl72 training enable, needed for KR */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
+ 0x2);
+
+ /* Leave CL72 enabled */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ val | 0x3800);
+
+ /* Set speed via PMA/PMD register */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
+
+ /*Enable encoded forced speed */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
+
+ /* Turn TX scramble payload only the 64/66 scrambler */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX66_CONTROL, 0x9);
+
+ /* Turn RX scramble payload only the 64/66 scrambler */
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, 0xF9);
+
+ /* set and clear loopback to cause a reset to 64/66 decoder */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+}
+
+static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 is_xfi)
+{
+ struct bnx2x *bp = params->bp;
+ u16 misc1_val, tap_val, tx_driver_val, lane, val;
+ /* Hold rxSeqStart */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
+
+ /* Hold tx_fifo_reset */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
+
+ /* Disable CL73 AN */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+
+ /* Disable 100FX Enable and Auto-Detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
+
+ /* Disable 100FX Idle detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
+
+ /* Set Block address to Remote PHY & Clear forced_speed[5] */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
+
+ /* Turn off auto-detect & fiber mode */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ (val & 0xFFEE));
+
+ /* Set filter_force_link, disable_false_link and parallel_detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ ((val | 0x0006) & 0xFFFE));
+
+ /* Set XFI / SFI */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
+
+ misc1_val &= ~(0x1f);
+
+ if (is_xfi) {
+ misc1_val |= 0x5;
+ tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+ (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+ (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+ tx_driver_val =
+ ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+
+ } else {
+ misc1_val |= 0x9;
+ tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+ (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+ (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+ tx_driver_val =
+ ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+ }
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
+
+ /* Set Transmit PMD settings */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP,
+ tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ tx_driver_val);
+
+ /* Enable fiber mode, enable and invert sig_det */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
+
+ /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
+
+ /* 10G XFI Full Duplex */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
+
+ /* Release tx_fifo_reset */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE);
+
+ /* Release rxSeqStart */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF));
+}
+
+static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n");
+}
+
+static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 lane)
+{
+ /* Rx0 anaRxControl1G */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90);
+
+ /* Rx2 anaRxControl1G */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW0, 0xE070);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW1, 0xC0D0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW2, 0xA0B0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW3, 0x8090);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
+
+ /* Serdes Digital Misc1 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008);
+
+ /* Serdes Digital4 Misc3 */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
+
+ /* Set Transmit PMD settings */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP,
+ ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+ (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+ (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
+ MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+ (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+}
+
+static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 fiber_mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16, digctrl_kx1, digctrl_kx2;
+ u8 lane;
+
+ lane = bnx2x_get_warpcore_lane(phy, params);
+
+ /* Clear XFI clock comp in non-10G single lane mode. */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
+
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ /* SGMII Autoneg */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ val16 | 0x1000);
+ DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ val16 &= 0xcfbf;
+ switch (phy->req_line_speed) {
+ case SPEED_10:
+ break;
+ case SPEED_100:
+ val16 |= 0x2000;
+ break;
+ case SPEED_1000:
+ val16 |= 0x0040;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Speed not supported: 0x%x"
+ "\n", phy->req_line_speed);
+ return;
+ }
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ val16 |= 0x0100;
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
+
+ DP(NETIF_MSG_LINK, "set SGMII force speed %d\n",
+ phy->req_line_speed);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ DP(NETIF_MSG_LINK, " (readback) %x\n", val16);
+ }
+
+ /* SGMII Slave mode and disable signal detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1);
+ if (fiber_mode)
+ digctrl_kx1 = 1;
+ else
+ digctrl_kx1 &= 0xff4a;
+
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ digctrl_kx1);
+
+ /* Turn off parallel detect */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 & ~(1<<2)));
+
+ /* Re-enable parallel detect */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ (digctrl_kx2 | (1<<2)));
+
+ /* Enable autodet */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ (digctrl_kx1 | 0x10));
+}
+
+static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 reset)
+{
+ u16 val;
+ /* Take lane out of reset after configuration is finished */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, &val);
+ if (reset)
+ val |= 0xC000;
+ else
+ val &= 0x3FFF;
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, val);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6, &val);
+}
+
+
+ /* Clear SFI/XFI link settings registers */
+static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 lane)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16;
+
+ /* Set XFI clock comp as default. */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
+
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL1, 0x014a);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, 0x0800);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX_FIR_TAP, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+}
+
+static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
+ u32 chip_id,
+ u32 shmem_base, u8 port,
+ u8 *gpio_num, u8 *gpio_port)
+{
+ u32 cfg_pin;
+ *gpio_num = 0;
+ *gpio_port = 0;
+ if (CHIP_IS_E3(bp)) {
+ cfg_pin = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_MOD_ABS_MASK) >>
+ PORT_HW_CFG_E3_MOD_ABS_SHIFT;
+
+ /*
+ * Should not happen. This function called upon interrupt
+ * triggered by GPIO ( since EPIO can only generate interrupts
+ * to MCP).
+ * So if this function was called and none of the GPIOs was set,
+ * it means the shit hit the fan.
+ */
+ if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
+ (cfg_pin > PIN_CFG_GPIO3_P1)) {
+ DP(NETIF_MSG_LINK, "ERROR: Invalid cfg pin %x for "
+ "module detect indication\n",
+ cfg_pin);
+ return -EINVAL;
+ }
+
+ *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
+ *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
+ } else {
+ *gpio_num = MISC_REGISTERS_GPIO_3;
+ *gpio_port = port;
+ }
+ DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port);
+ return 0;
+}
+
+static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 gpio_num, gpio_port;
+ u32 gpio_val;
+ if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id,
+ params->shmem_base, params->port,
+ &gpio_num, &gpio_port) != 0)
+ return 0;
+ gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+ /* Call the handling function in case module is detected */
+ if (gpio_val == 0)
+ return 1;
+ else
+ return 0;
+}
+
+static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 serdes_net_if;
+ u8 fiber_mode;
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ serdes_net_if = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+ DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, "
+ "serdes_net_if = 0x%x\n",
+ vars->line_speed, serdes_net_if);
+ bnx2x_set_aer_mmd(params, phy);
+
+ vars->phy_flags |= PHY_XGXS_FLAG;
+ if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
+ (phy->req_line_speed &&
+ ((phy->req_line_speed == SPEED_100) ||
+ (phy->req_line_speed == SPEED_10)))) {
+ vars->phy_flags |= PHY_SGMII_FLAG;
+ DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+ } else {
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ /* Enable KR Auto Neg */
+ if (params->loopback_mode == LOOPBACK_NONE)
+ bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+ else {
+ DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
+ bnx2x_warpcore_set_10G_KR(phy, params, vars);
+ }
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_XFI:
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if (vars->line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "Setting 10G XFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 1);
+ } else {
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ DP(NETIF_MSG_LINK, "1G Fiber\n");
+ fiber_mode = 1;
+ } else {
+ DP(NETIF_MSG_LINK, "10/100/1G SGMII\n");
+ fiber_mode = 0;
+ }
+ bnx2x_warpcore_set_sgmii_speed(phy,
+ params,
+ fiber_mode);
+ }
+
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_SFI:
+
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if (vars->line_speed == SPEED_10000) {
+ DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+ } else if (vars->line_speed == SPEED_1000) {
+ DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+ }
+ /* Issue Module detection */
+ if (bnx2x_is_sfp_module_plugged(phy, params))
+ bnx2x_sfp_module_detection(phy, params);
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+ if (vars->line_speed != SPEED_20000) {
+ DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+ return;
+ }
+ DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n");
+ bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane);
+ /* Issue Module detection */
+
+ bnx2x_sfp_module_detection(phy, params);
+ break;
+
+ case PORT_HW_CFG_NET_SERDES_IF_KR2:
+ if (vars->line_speed != SPEED_20000) {
+ DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+ return;
+ }
+ DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
+ bnx2x_warpcore_set_20G_KR2(bp, phy);
+ break;
+
+ default:
+ DP(NETIF_MSG_LINK, "Unsupported Serdes Net Interface "
+ "0x%x\n", serdes_net_if);
+ return;
+ }
+ }
+
+ /* Take lane out of reset after configuration is finished */
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+ DP(NETIF_MSG_LINK, "Exit config init\n");
+}
+
+static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port = params->port;
+
+ cfg_pin = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_TX_LASER_MASK;
+ /* Set the !tx_en since this pin is DISABLE_TX_LASER */
+ DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
+ /* For 20G, the expected pin to be used is 3 pins after the current */
+
+ bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
+ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+ bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
+}
+
+static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16;
+ bnx2x_sfp_e3_set_transmitter(params, phy, 0);
+ bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+ bnx2x_set_aer_mmd(params, phy);
+ /* Global register */
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+
+ /* Clear loopback settings (if any) */
+ /* 10G & 20G */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 &
+ 0xBFFF);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
+
+ /* Update those 1-copy registers */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Enable 1G MDIO (1-copy) */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ val16 & ~0x10);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+ val16 & 0xff00);
+
+}
+
+static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val16;
+ u32 lane;
+ DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
+ params->loopback_mode, phy->req_line_speed);
+
+ if (phy->req_line_speed < SPEED_10000) {
+ /* 10/100/1000 */
+
+ /* Update those 1-copy registers */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ /* Enable 1G MDIO (1-copy) */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ val16 | 0x10);
+ /* Set 1G loopback based on lane (1-copy) */
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+ val16 | (1<<lane));
+
+ /* Switch back to 4-copy registers */
+ bnx2x_set_aer_mmd(params, phy);
+ /* Global loopback, not recommended. */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
+ 0x4000);
+ } else {
+ /* 10G & 20G */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
+ 0x4000);
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
+ }
+}
+
void bnx2x_link_status_update(struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u8 link_10g;
+ u8 link_10g_plus;
u8 port = params->port;
+ u32 sync_offset, media_types;
+ /* Update PHY configuration */
+ set_phy_vars(params, vars);
vars->link_status = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
port_mb[port].link_status));
vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
+ vars->phy_flags = PHY_XGXS_FLAG;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
if (vars->link_up) {
DP(NETIF_MSG_LINK, "phy link up\n");
@@ -1664,27 +4409,9 @@ void bnx2x_link_status_update(struct link_params *params,
case LINK_10GTFD:
vars->line_speed = SPEED_10000;
break;
-
- case LINK_12GTFD:
- vars->line_speed = SPEED_12000;
- break;
-
- case LINK_12_5GTFD:
- vars->line_speed = SPEED_12500;
- break;
-
- case LINK_13GTFD:
- vars->line_speed = SPEED_13000;
+ case LINK_20GTFD:
+ vars->line_speed = SPEED_20000;
break;
-
- case LINK_15GTFD:
- vars->line_speed = SPEED_15000;
- break;
-
- case LINK_16GTFD:
- vars->line_speed = SPEED_16000;
- break;
-
default:
break;
}
@@ -1705,19 +4432,24 @@ void bnx2x_link_status_update(struct link_params *params,
} else {
vars->phy_flags &= ~PHY_SGMII_FLAG;
}
-
+ if (vars->line_speed &&
+ USES_WARPCORE(bp) &&
+ (vars->line_speed == SPEED_1000))
+ vars->phy_flags |= PHY_SGMII_FLAG;
/* anything 10 and over uses the bmac */
- link_10g = ((vars->line_speed == SPEED_10000) ||
- (vars->line_speed == SPEED_12000) ||
- (vars->line_speed == SPEED_12500) ||
- (vars->line_speed == SPEED_13000) ||
- (vars->line_speed == SPEED_15000) ||
- (vars->line_speed == SPEED_16000));
- if (link_10g)
- vars->mac_type = MAC_TYPE_BMAC;
- else
- vars->mac_type = MAC_TYPE_EMAC;
+ link_10g_plus = (vars->line_speed >= SPEED_10000);
+ if (link_10g_plus) {
+ if (USES_WARPCORE(bp))
+ vars->mac_type = MAC_TYPE_XMAC;
+ else
+ vars->mac_type = MAC_TYPE_BMAC;
+ } else {
+ if (USES_WARPCORE(bp))
+ vars->mac_type = MAC_TYPE_UMAC;
+ else
+ vars->mac_type = MAC_TYPE_EMAC;
+ }
} else { /* link down */
DP(NETIF_MSG_LINK, "phy link down\n");
@@ -1729,10 +4461,44 @@ void bnx2x_link_status_update(struct link_params *params,
/* indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
+ if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
}
- DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
- vars->link_status, vars->phy_link_up);
+ /* Sync media type */
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+
+ params->phy[INT_PHY].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
+ params->phy[EXT_PHY1].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
+ params->phy[EXT_PHY2].media_type =
+ (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
+ PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
+ DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types);
+
+ /* Sync AEU offset */
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
+
+ vars->aeu_int_mask = REG_RD(bp, sync_offset);
+
+ /* Sync PFC status */
+ if (vars->link_status & LINK_STATUS_PFC_ENABLED)
+ params->feature_config_flags |=
+ FEATURE_CONFIG_PFC_ENABLED;
+ else
+ params->feature_config_flags &=
+ ~FEATURE_CONFIG_PFC_ENABLED;
+
+ DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
+ vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
vars->line_speed, vars->duplex, vars->flow_ctrl);
}
@@ -1759,9 +4525,9 @@ static void bnx2x_set_master_ln(struct link_params *params,
(new_master_ln | ser_lane));
}
-static u8 bnx2x_reset_unicore(struct link_params *params,
- struct bnx2x_phy *phy,
- u8 set_serdes)
+static int bnx2x_reset_unicore(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 set_serdes)
{
struct bnx2x *bp = params->bp;
u16 mii_control;
@@ -2048,9 +4814,6 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
if (vars->line_speed == SPEED_10000)
reg_val |=
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
- if (vars->line_speed == SPEED_13000)
- reg_val |=
- MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
}
CL22_WR_OVER_CL45(bp, phy,
@@ -2059,8 +4822,8 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
}
-static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
- struct link_params *params)
+static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -2081,44 +4844,9 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
MDIO_OVER_1G_UP3, 0x400);
}
-static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
- struct link_params *params, u16 *ieee_fc)
-{
- struct bnx2x *bp = params->bp;
- *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
- /*
- * Resolve pause mode and advertisement.
- * Please refer to Table 28B-3 of the 802.3ab-1999 spec
- */
-
- switch (phy->req_flow_ctrl) {
- case BNX2X_FLOW_CTRL_AUTO:
- if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
- *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
- else
- *ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
- break;
- case BNX2X_FLOW_CTRL_TX:
- *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
- break;
-
- case BNX2X_FLOW_CTRL_RX:
- case BNX2X_FLOW_CTRL_BOTH:
- *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
- break;
-
- case BNX2X_FLOW_CTRL_NONE:
- default:
- *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
- break;
- }
- DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
-}
-
-static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
- struct link_params *params,
- u16 ieee_fc)
+static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 ieee_fc)
{
struct bnx2x *bp = params->bp;
u16 val;
@@ -2252,35 +4980,8 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
* link management
*/
-static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
-{ /* LD LP */
- switch (pause_result) { /* ASYM P ASYM P */
- case 0xb: /* 1 0 1 1 */
- vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
- break;
-
- case 0xe: /* 1 1 1 0 */
- vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
- break;
-
- case 0x5: /* 0 1 0 1 */
- case 0x7: /* 0 1 1 1 */
- case 0xd: /* 1 1 0 1 */
- case 0xf: /* 1 1 1 1 */
- vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
- break;
-
- default:
- break;
- }
- if (pause_result & (1<<0))
- vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
- if (pause_result & (1<<1))
- vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
-}
-
-static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
- struct link_params *params)
+static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 pd_10g, status2_1000x;
@@ -2383,7 +5084,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u16 rx_status, ustat_val, cl37_fsm_recieved;
+ u16 rx_status, ustat_val, cl37_fsm_received;
DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
/* Step 1: Make sure signal is detected */
CL22_RD_OVER_CL45(bp, phy,
@@ -2421,15 +5122,15 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_REMOTE_PHY,
MDIO_REMOTE_PHY_MISC_RX_STATUS,
- &cl37_fsm_recieved);
- if ((cl37_fsm_recieved &
+ &cl37_fsm_received);
+ if ((cl37_fsm_received &
(MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
(MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
"misc_rx_status(0x8330) = 0x%x\n",
- cl37_fsm_recieved);
+ cl37_fsm_received);
return;
}
/*
@@ -2462,45 +5163,25 @@ static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
vars->link_status |=
LINK_STATUS_PARALLEL_DETECTION_USED;
}
-
-static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
+static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars,
+ u16 is_link_up,
+ u16 speed_mask,
+ u16 is_duplex)
{
struct bnx2x *bp = params->bp;
- u16 new_line_speed, gp_status;
- u8 rc = 0;
-
- /* Read gp_status */
- CL22_RD_OVER_CL45(bp, phy,
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
-
if (phy->req_line_speed == SPEED_AUTO_NEG)
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
- if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
- DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n",
- gp_status);
+ if (is_link_up) {
+ DP(NETIF_MSG_LINK, "phy link up\n");
vars->phy_link_up = 1;
vars->link_status |= LINK_STATUS_LINK_UP;
- if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
- vars->duplex = DUPLEX_FULL;
- else
- vars->duplex = DUPLEX_HALF;
-
- if (SINGLE_MEDIA_DIRECT(params)) {
- bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
- if (phy->req_line_speed == SPEED_AUTO_NEG)
- bnx2x_xgxs_an_resolve(phy, params, vars,
- gp_status);
- }
-
- switch (gp_status & GP_STATUS_SPEED_MASK) {
+ switch (speed_mask) {
case GP_STATUS_10M:
- new_line_speed = SPEED_10;
+ vars->line_speed = SPEED_10;
if (vars->duplex == DUPLEX_FULL)
vars->link_status |= LINK_10TFD;
else
@@ -2508,7 +5189,7 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
break;
case GP_STATUS_100M:
- new_line_speed = SPEED_100;
+ vars->line_speed = SPEED_100;
if (vars->duplex == DUPLEX_FULL)
vars->link_status |= LINK_100TXFD;
else
@@ -2517,7 +5198,7 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
case GP_STATUS_1G:
case GP_STATUS_1G_KX:
- new_line_speed = SPEED_1000;
+ vars->line_speed = SPEED_1000;
if (vars->duplex == DUPLEX_FULL)
vars->link_status |= LINK_1000TFD;
else
@@ -2525,7 +5206,7 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
break;
case GP_STATUS_2_5G:
- new_line_speed = SPEED_2500;
+ vars->line_speed = SPEED_2500;
if (vars->duplex == DUPLEX_FULL)
vars->link_status |= LINK_2500TFD;
else
@@ -2536,50 +5217,28 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
case GP_STATUS_6G:
DP(NETIF_MSG_LINK,
"link speed unsupported gp_status 0x%x\n",
- gp_status);
+ speed_mask);
return -EINVAL;
case GP_STATUS_10G_KX4:
case GP_STATUS_10G_HIG:
case GP_STATUS_10G_CX4:
- new_line_speed = SPEED_10000;
+ case GP_STATUS_10G_KR:
+ case GP_STATUS_10G_SFI:
+ case GP_STATUS_10G_XFI:
+ vars->line_speed = SPEED_10000;
vars->link_status |= LINK_10GTFD;
break;
-
- case GP_STATUS_12G_HIG:
- new_line_speed = SPEED_12000;
- vars->link_status |= LINK_12GTFD;
- break;
-
- case GP_STATUS_12_5G:
- new_line_speed = SPEED_12500;
- vars->link_status |= LINK_12_5GTFD;
+ case GP_STATUS_20G_DXGXS:
+ vars->line_speed = SPEED_20000;
+ vars->link_status |= LINK_20GTFD;
break;
-
- case GP_STATUS_13G:
- new_line_speed = SPEED_13000;
- vars->link_status |= LINK_13GTFD;
- break;
-
- case GP_STATUS_15G:
- new_line_speed = SPEED_15000;
- vars->link_status |= LINK_15GTFD;
- break;
-
- case GP_STATUS_16G:
- new_line_speed = SPEED_16000;
- vars->link_status |= LINK_16GTFD;
- break;
-
default:
DP(NETIF_MSG_LINK,
"link speed unsupported gp_status 0x%x\n",
- gp_status);
+ speed_mask);
return -EINVAL;
}
-
- vars->line_speed = new_line_speed;
-
} else { /* link_down */
DP(NETIF_MSG_LINK, "phy link down\n");
@@ -2588,7 +5247,47 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
vars->mac_type = MAC_TYPE_NONE;
+ }
+ DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n",
+ vars->phy_link_up, vars->line_speed);
+ return 0;
+}
+
+static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+
+ struct bnx2x *bp = params->bp;
+ u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
+ int rc = 0;
+
+ /* Read gp_status */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
+ duplex = DUPLEX_FULL;
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
+ link_up = 1;
+ speed_mask = gp_status & GP_STATUS_SPEED_MASK;
+ DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n",
+ gp_status, link_up, speed_mask);
+ rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
+ duplex);
+ if (rc == -EINVAL)
+ return rc;
+
+ if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
+ if (SINGLE_MEDIA_DIRECT(params)) {
+ bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_xgxs_an_resolve(phy, params, vars,
+ gp_status);
+ }
+ } else { /* link_down */
if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
SINGLE_MEDIA_DIRECT(params)) {
/* Check signal is detected */
@@ -2596,13 +5295,86 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
}
}
- DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
- gp_status, vars->phy_link_up, vars->line_speed);
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
}
+static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+
+ struct bnx2x *bp = params->bp;
+
+ u8 lane;
+ u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
+ int rc = 0;
+ lane = bnx2x_get_warpcore_lane(phy, params);
+ /* Read gp_status */
+ if (phy->req_line_speed > SPEED_10000) {
+ u16 temp_link_up;
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ 1, &temp_link_up);
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n",
+ temp_link_up, link_up);
+ link_up &= (1<<2);
+ if (link_up)
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
+ DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
+ /* Check for either KR or generic link up. */
+ gp_status1 = ((gp_status1 >> 8) & 0xf) |
+ ((gp_status1 >> 12) & 0xf);
+ link_up = gp_status1 & (1 << lane);
+ if (link_up && SINGLE_MEDIA_DIRECT(params)) {
+ u16 pd, gp_status4;
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ /* Check Autoneg complete */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_4,
+ &gp_status4);
+ if (gp_status4 & ((1<<12)<<lane))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+ /* Check parallel detect used */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_PAR_DET_10G_STATUS,
+ &pd);
+ if (pd & (1<<15))
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+ }
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ }
+
+ if (lane < 2) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
+ } else {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
+ }
+ DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed);
+
+ if ((lane & 1) == 0)
+ gp_speed <<= 8;
+ gp_speed &= 0x3f00;
+
+
+ rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
+ duplex);
+
+ DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
+ vars->duplex, vars->flow_ctrl, vars->link_status);
+ return rc;
+}
static void bnx2x_set_gmii_tx_driver(struct link_params *params)
{
struct bnx2x *bp = params->bp;
@@ -2642,8 +5414,8 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
}
}
-static u8 bnx2x_emac_program(struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_emac_program(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -2713,9 +5485,9 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
}
}
-static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
@@ -2742,11 +5514,11 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "not SGMII, AN\n");
/* AN enabled */
- bnx2x_set_brcm_cl37_advertisment(phy, params);
+ bnx2x_set_brcm_cl37_advertisement(phy, params);
/* program duplex & pause advertisement (for aneg) */
- bnx2x_set_ieee_aneg_advertisment(phy, params,
- vars->ieee_fc);
+ bnx2x_set_ieee_aneg_advertisement(phy, params,
+ vars->ieee_fc);
/* enable autoneg */
bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2762,29 +5534,12 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
}
}
-static u8 bnx2x_init_serdes(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
-{
- u8 rc;
- vars->phy_flags |= PHY_SGMII_FLAG;
- bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
- bnx2x_set_aer_mmd_serdes(params->bp, phy);
- rc = bnx2x_reset_unicore(params, phy, 1);
- /* reset the SerDes and wait for reset bit return low */
- if (rc != 0)
- return rc;
- bnx2x_set_aer_mmd_serdes(params->bp, phy);
-
- return rc;
-}
-
-static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
+static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
- u8 rc;
- vars->phy_flags = PHY_XGXS_FLAG;
+ int rc;
+ vars->phy_flags |= PHY_XGXS_FLAG;
if ((phy->req_line_speed &&
((phy->req_line_speed == SPEED_100) ||
(phy->req_line_speed == SPEED_10))) ||
@@ -2792,26 +5547,28 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
(phy->speed_cap_mask >=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
(phy->speed_cap_mask <
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
- ))
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD))
vars->phy_flags |= PHY_SGMII_FLAG;
else
vars->phy_flags &= ~PHY_SGMII_FLAG;
bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
- bnx2x_set_aer_mmd_xgxs(params, phy);
- bnx2x_set_master_ln(params, phy);
+ bnx2x_set_aer_mmd(params, phy);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ bnx2x_set_master_ln(params, phy);
rc = bnx2x_reset_unicore(params, phy, 0);
/* reset the SerDes and wait for reset bit return low */
if (rc != 0)
return rc;
- bnx2x_set_aer_mmd_xgxs(params, phy);
-
+ bnx2x_set_aer_mmd(params, phy);
/* setting the masterLn_def again after the reset */
- bnx2x_set_master_ln(params, phy);
- bnx2x_set_swap_lanes(params, phy);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
+ bnx2x_set_master_ln(params, phy);
+ bnx2x_set_swap_lanes(params, phy);
+ }
return rc;
}
@@ -2823,8 +5580,13 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
u16 cnt, ctrl;
/* Wait for soft reset to get cleared up to 1 sec */
for (cnt = 0; cnt < 1000; cnt++) {
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, &ctrl);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+ bnx2x_cl22_read(bp, phy,
+ MDIO_PMA_REG_CTRL, &ctrl);
+ else
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, &ctrl);
if (!(ctrl & (1<<15)))
break;
msleep(1);
@@ -2845,7 +5607,11 @@ static void bnx2x_link_int_enable(struct link_params *params)
struct bnx2x *bp = params->bp;
/* Setting the status to report on link up for either XGXS or SerDes */
- if (params->switch_cfg == SWITCH_CFG_10G) {
+ if (CHIP_IS_E3(bp)) {
+ mask = NIG_MASK_XGXS0_LINK_STATUS;
+ if (!(SINGLE_MEDIA_DIRECT(params)))
+ mask |= NIG_MASK_MI_INT;
+ } else if (params->switch_cfg == SWITCH_CFG_10G) {
mask = (NIG_MASK_XGXS0_LINK10G |
NIG_MASK_XGXS0_LINK_STATUS);
DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
@@ -2918,11 +5684,11 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
}
static void bnx2x_link_int_ack(struct link_params *params,
- struct link_vars *vars, u8 is_10g)
+ struct link_vars *vars, u8 is_10g_plus)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
-
+ u32 mask;
/*
* First reset all status we assume only one line will be
* change at a time
@@ -2932,47 +5698,34 @@ static void bnx2x_link_int_ack(struct link_params *params,
NIG_STATUS_XGXS0_LINK_STATUS |
NIG_STATUS_SERDES0_LINK_STATUS));
if (vars->phy_link_up) {
- if (is_10g) {
- /*
- * Disable the 10G link interrupt by writing 1 to the
- * status register
- */
- DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
- bnx2x_bits_en(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- NIG_STATUS_XGXS0_LINK10G);
-
- } else if (params->switch_cfg == SWITCH_CFG_10G) {
- /*
- * Disable the link interrupt by writing 1 to the
- * relevant lane in the status register
- */
- u32 ser_lane = ((params->lane_config &
+ if (USES_WARPCORE(bp))
+ mask = NIG_STATUS_XGXS0_LINK_STATUS;
+ else {
+ if (is_10g_plus)
+ mask = NIG_STATUS_XGXS0_LINK10G;
+ else if (params->switch_cfg == SWITCH_CFG_10G) {
+ /*
+ * Disable the link interrupt by writing 1 to
+ * the relevant lane in the status register
+ */
+ u32 ser_lane =
+ ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
-
- DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n",
- vars->line_speed);
- bnx2x_bits_en(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- ((1 << ser_lane) <<
- NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
-
- } else { /* SerDes */
- DP(NETIF_MSG_LINK, "SerDes phy link up\n");
- /*
- * Disable the link interrupt by writing 1 to the status
- * register
- */
- bnx2x_bits_en(bp,
- NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- NIG_STATUS_SERDES0_LINK_STATUS);
+ mask = ((1 << ser_lane) <<
+ NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
+ } else
+ mask = NIG_STATUS_SERDES0_LINK_STATUS;
}
-
+ DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n",
+ mask);
+ bnx2x_bits_en(bp,
+ NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+ mask);
}
}
-static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len)
+static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
{
u8 *str_ptr = str;
u32 mask = 0xf0000000;
@@ -3011,19 +5764,19 @@ static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len)
}
-static u8 bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
str[0] = '\0';
(*len)--;
return 0;
}
-u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
- u8 *version, u16 len)
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+ u8 *version, u16 len)
{
struct bnx2x *bp;
u32 spirom_ver = 0;
- u8 status = 0;
+ int status = 0;
u8 *ver_p = version;
u16 remain_len = len;
if (version == NULL || params == NULL)
@@ -3065,15 +5818,18 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
if (phy->req_line_speed != SPEED_1000) {
- u32 md_devad;
+ u32 md_devad = 0;
DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
- /* change the uni_phy_addr in the nig */
- md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
- port*0x18));
+ if (!CHIP_IS_E3(bp)) {
+ /* change the uni_phy_addr in the nig */
+ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
+ port*0x18));
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ 0x5);
+ }
bnx2x_cl45_write(bp, phy,
5,
@@ -3088,10 +5844,13 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
0x6041);
msleep(200);
/* set aer mmd back */
- bnx2x_set_aer_mmd_xgxs(params, phy);
+ bnx2x_set_aer_mmd(params, phy);
- /* and md_devad */
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
+ if (!CHIP_IS_E3(bp)) {
+ /* and md_devad */
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+ md_devad);
+ }
} else {
u16 mii_ctrl;
DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3107,12 +5866,13 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
}
}
-u8 bnx2x_set_led(struct link_params *params,
- struct link_vars *vars, u8 mode, u32 speed)
+int bnx2x_set_led(struct link_params *params,
+ struct link_vars *vars, u8 mode, u32 speed)
{
u8 port = params->port;
u16 hw_led_mode = params->hw_led_mode;
- u8 rc = 0, phy_idx;
+ int rc = 0;
+ u8 phy_idx;
u32 tmp;
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
struct bnx2x *bp = params->bp;
@@ -3146,8 +5906,10 @@ u8 bnx2x_set_led(struct link_params *params,
if (!vars->link_up)
break;
case LED_MODE_ON:
- if (params->phy[EXT_PHY1].type ==
- PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
+ if (((params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
+ (params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
CHIP_IS_E2(bp) && params->num_phys == 2) {
/*
* This is a work-around for E2+8727 Configurations
@@ -3160,18 +5922,30 @@ u8 bnx2x_set_led(struct link_params *params,
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED,
(tmp | EMAC_LED_OVERRIDE));
- return rc;
+ /*
+ * return here without enabling traffic
+ * LED blink andsetting rate in ON mode.
+ * In oper mode, enabling LED blink
+ * and setting rate is needed.
+ */
+ if (mode == LED_MODE_ON)
+ return rc;
}
} else if (SINGLE_MEDIA_DIRECT(params)) {
/*
* This is a work-around for HW issue found when link
* is up in CL73
*/
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
- } else {
+ if (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp) ||
+ (mode == LED_MODE_ON))
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+ else
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+ hw_led_mode);
+ } else
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
- }
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
@@ -3214,21 +5988,49 @@ u8 bnx2x_set_led(struct link_params *params,
* This function comes to reflect the actual link state read DIRECTLY from the
* HW
*/
-u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
- u8 is_serdes)
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+ u8 is_serdes)
{
struct bnx2x *bp = params->bp;
u16 gp_status = 0, phy_index = 0;
u8 ext_phy_link_up = 0, serdes_phy_type;
struct link_vars temp_vars;
-
- CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
+ struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
+
+ if (CHIP_IS_E3(bp)) {
+ u16 link_up;
+ if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)]
+ > SPEED_10000) {
+ /* Check 20G link */
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ 1, &link_up);
+ link_up &= (1<<2);
+ } else {
+ /* Check 10G link and below*/
+ u8 lane = bnx2x_get_warpcore_lane(int_phy, params);
+ bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1,
+ &gp_status);
+ gp_status = ((gp_status >> 8) & 0xf) |
+ ((gp_status >> 12) & 0xf);
+ link_up = gp_status & (1 << lane);
+ }
+ if (!link_up)
+ return -ESRCH;
+ } else {
+ CL22_RD_OVER_CL45(bp, int_phy,
MDIO_REG_BANK_GP_STATUS,
MDIO_GP_STATUS_TOP_AN_STATUS1,
&gp_status);
/* link is up only if both local phy and external phy are up */
if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
return -ESRCH;
+ }
+ /* In XGXS loopback mode, do not check external PHY */
+ if (params->loopback_mode == LOOPBACK_XGXS)
+ return 0;
switch (params->num_phys) {
case 1:
@@ -3245,7 +6047,9 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
serdes_phy_type = ((params->phy[phy_index].media_type ==
ETH_PHY_SFP_FIBER) ||
(params->phy[phy_index].media_type ==
- ETH_PHY_XFP_FIBER));
+ ETH_PHY_XFP_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_DA_TWINAX));
if (is_serdes != serdes_phy_type)
continue;
@@ -3263,10 +6067,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
return -ESRCH;
}
-static u8 bnx2x_link_initialize(struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_link_initialize(struct link_params *params,
+ struct link_vars *vars)
{
- u8 rc = 0;
+ int rc = 0;
u8 phy_index, non_ext_phy;
struct bnx2x *bp = params->bp;
/*
@@ -3282,12 +6086,8 @@ static u8 bnx2x_link_initialize(struct link_params *params,
* (no external phys), or this board has external phy which requires
* to first.
*/
-
- if (params->phy[INT_PHY].config_init)
- params->phy[INT_PHY].config_init(
- &params->phy[INT_PHY],
- params, vars);
-
+ if (!USES_WARPCORE(bp))
+ bnx2x_prepare_xgxs(&params->phy[INT_PHY], params, vars);
/* init ext phy and enable link state int */
non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
(params->loopback_mode == LOOPBACK_XGXS));
@@ -3296,13 +6096,22 @@ static u8 bnx2x_link_initialize(struct link_params *params,
(params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
(params->loopback_mode == LOOPBACK_EXT_PHY)) {
struct bnx2x_phy *phy = &params->phy[INT_PHY];
- if (vars->line_speed == SPEED_AUTO_NEG)
+ if (vars->line_speed == SPEED_AUTO_NEG &&
+ (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp)))
bnx2x_set_parallel_detection(phy, params);
- bnx2x_init_internal_phy(phy, params, vars);
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(phy,
+ params,
+ vars);
}
/* Init external phy*/
- if (!non_ext_phy)
+ if (non_ext_phy) {
+ if (params->phy[INT_PHY].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+ } else {
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
/*
@@ -3311,17 +6120,22 @@ static u8 bnx2x_link_initialize(struct link_params *params,
* need to initialize the first phy, since they are
* connected.
*/
+ if (params->phy[phy_index].supported &
+ SUPPORTED_FIBRE)
+ vars->link_status |= LINK_STATUS_SERDES_LINK;
+
if (phy_index == EXT_PHY2 &&
(bnx2x_phy_selection(params) ==
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
- DP(NETIF_MSG_LINK, "Ignoring second phy\n");
+ DP(NETIF_MSG_LINK, "Not initializing"
+ " second phy\n");
continue;
}
params->phy[phy_index].config_init(
&params->phy[phy_index],
params, vars);
}
-
+ }
/* Reset the interrupt indication after phy was initialized */
bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
params->port*4,
@@ -3329,6 +6143,7 @@ static u8 bnx2x_link_initialize(struct link_params *params,
NIG_STATUS_XGXS0_LINK_STATUS |
NIG_STATUS_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
+ bnx2x_update_mng(params, vars->link_status);
return rc;
}
@@ -3359,20 +6174,26 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "reset external PHY\n");
}
-static u8 bnx2x_update_link_down(struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_update_link_down(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
-
+ vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
/* indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
/* update shared memory */
- vars->link_status = 0;
+ vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
+ LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG |
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
+ LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
+ LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
+ LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK);
vars->line_speed = 0;
bnx2x_update_mng(params, vars->link_status);
@@ -3380,26 +6201,35 @@ static u8 bnx2x_update_link_down(struct link_params *params,
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
/* disable emac */
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
msleep(10);
-
- /* reset BigMac */
- bnx2x_bmac_rx_disable(bp, params->port);
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ /* reset BigMac/Xmac */
+ if (CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp)) {
+ bnx2x_bmac_rx_disable(bp, params->port);
+ REG_WR(bp, GRCBASE_MISC +
+ MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ }
+ if (CHIP_IS_E3(bp))
+ bnx2x_xmac_disable(params);
+
return 0;
}
-static u8 bnx2x_update_link_up(struct link_params *params,
- struct link_vars *vars,
- u8 link_10g)
+static int bnx2x_update_link_up(struct link_params *params,
+ struct link_vars *vars,
+ u8 link_10g)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
- u8 rc = 0;
+ int rc = 0;
- vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_status |= (LINK_STATUS_LINK_UP |
+ LINK_STATUS_PHYSICAL_LINK_FLAG);
+ vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
vars->link_status |=
@@ -3408,25 +6238,48 @@ static u8 bnx2x_update_link_up(struct link_params *params,
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
vars->link_status |=
LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
-
- if (link_10g) {
- bnx2x_bmac_enable(params, vars, 0);
+ if (USES_WARPCORE(bp)) {
+ if (link_10g) {
+ if (bnx2x_xmac_enable(params, vars, 0) ==
+ -ESRCH) {
+ DP(NETIF_MSG_LINK, "Found errors on XMAC\n");
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ }
+ } else
+ bnx2x_umac_enable(params, vars, 0);
bnx2x_set_led(params, vars,
- LED_MODE_OPER, SPEED_10000);
- } else {
- rc = bnx2x_emac_program(params, vars);
+ LED_MODE_OPER, vars->line_speed);
+ }
+ if ((CHIP_IS_E1x(bp) ||
+ CHIP_IS_E2(bp))) {
+ if (link_10g) {
+ if (bnx2x_bmac_enable(params, vars, 0) ==
+ -ESRCH) {
+ DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ }
- bnx2x_emac_enable(params, vars, 0);
+ bnx2x_set_led(params, vars,
+ LED_MODE_OPER, SPEED_10000);
+ } else {
+ rc = bnx2x_emac_program(params, vars);
+ bnx2x_emac_enable(params, vars, 0);
- /* AN complete? */
- if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
- && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
- SINGLE_MEDIA_DIRECT(params))
- bnx2x_set_gmii_tx_driver(params);
+ /* AN complete? */
+ if ((vars->link_status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+ && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
+ SINGLE_MEDIA_DIRECT(params))
+ bnx2x_set_gmii_tx_driver(params);
+ }
}
/* PBF - link up */
- if (!(CHIP_IS_E2(bp)))
+ if (CHIP_IS_E1x(bp))
rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
vars->line_speed);
@@ -3451,17 +6304,18 @@ static u8 bnx2x_update_link_up(struct link_params *params,
* external phy needs to be up, and at least one of the 2
* external phy link must be up.
*/
-u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
struct link_vars phy_vars[MAX_PHYS];
u8 port = params->port;
- u8 link_10g, phy_index;
- u8 ext_phy_link_up = 0, cur_link_up, rc = 0;
+ u8 link_10g_plus, phy_index;
+ u8 ext_phy_link_up = 0, cur_link_up;
+ int rc = 0;
u8 is_mi_int = 0;
u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
u8 active_external_phy = INT_PHY;
- vars->link_status = 0;
+ vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
for (phy_index = INT_PHY; phy_index < params->num_phys;
phy_index++) {
phy_vars[phy_index].flow_ctrl = 0;
@@ -3470,8 +6324,12 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
phy_vars[phy_index].duplex = DUPLEX_FULL;
phy_vars[phy_index].phy_link_up = 0;
phy_vars[phy_index].link_up = 0;
+ phy_vars[phy_index].fault_detected = 0;
}
+ if (USES_WARPCORE(bp))
+ bnx2x_set_aer_mmd(params, &params->phy[INT_PHY]);
+
DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
port, (vars->phy_flags & PHY_XGXS_FLAG),
REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
@@ -3488,13 +6346,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
/* disable emac */
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
/*
* Step 1:
* Check external link change only for external phys, and apply
* priority selection between them in case the link on both phys
- * is up. Note that the instead of the common vars, a temporary
+ * is up. Note that instead of the common vars, a temporary
* vars argument is used since each phy may have different link/
* speed/duplex result
*/
@@ -3601,6 +6460,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
if (params->phy[active_external_phy].supported &
SUPPORTED_FIBRE)
vars->link_status |= LINK_STATUS_SERDES_LINK;
+ else
+ vars->link_status &= ~LINK_STATUS_SERDES_LINK;
DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
active_external_phy);
}
@@ -3640,14 +6501,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
}
/* anything 10 and over uses the bmac */
- link_10g = ((vars->line_speed == SPEED_10000) ||
- (vars->line_speed == SPEED_12000) ||
- (vars->line_speed == SPEED_12500) ||
- (vars->line_speed == SPEED_13000) ||
- (vars->line_speed == SPEED_15000) ||
- (vars->line_speed == SPEED_16000));
+ link_10g_plus = (vars->line_speed >= SPEED_10000);
- bnx2x_link_int_ack(params, vars, link_10g);
+ bnx2x_link_int_ack(params, vars, link_10g_plus);
/*
* In case external phy link is up, and internal link is down
@@ -3671,21 +6527,24 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
vars->phy_flags |= PHY_SGMII_FLAG;
else
vars->phy_flags &= ~PHY_SGMII_FLAG;
- bnx2x_init_internal_phy(&params->phy[INT_PHY],
- params,
+
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(
+ &params->phy[INT_PHY], params,
vars);
}
}
/*
* Link is up only if both local phy and external phy (in case of
- * non-direct board) are up
+ * non-direct board) are up and no fault detected on active PHY.
*/
vars->link_up = (vars->phy_link_up &&
(ext_phy_link_up ||
- SINGLE_MEDIA_DIRECT(params)));
+ SINGLE_MEDIA_DIRECT(params)) &&
+ (phy_vars[active_external_phy].fault_detected == 0));
if (vars->link_up)
- rc = bnx2x_update_link_up(params, vars, link_10g);
+ rc = bnx2x_update_link_up(params, vars, link_10g_plus);
else
rc = bnx2x_update_link_down(params, vars);
@@ -3729,69 +6588,6 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
phy->ver_addr);
}
-static void bnx2x_ext_phy_set_pause(struct link_params *params,
- struct bnx2x_phy *phy,
- struct link_vars *vars)
-{
- u16 val;
- struct bnx2x *bp = params->bp;
- /* read modify write pause advertizing */
- bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
-
- val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
-
- /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
- bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
- val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
- }
- if ((vars->ieee_fc &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
- val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
- }
- DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
-}
-
-static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
-{
- struct bnx2x *bp = params->bp;
- u16 ld_pause; /* local */
- u16 lp_pause; /* link partner */
- u16 pause_result;
- u8 ret = 0;
- /* read twice */
-
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-
- if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
- vars->flow_ctrl = phy->req_flow_ctrl;
- else if (phy->req_line_speed != SPEED_AUTO_NEG)
- vars->flow_ctrl = params->req_fc_auto_adv;
- else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
- ret = 1;
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, &ld_pause);
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
- pause_result = (ld_pause &
- MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
- pause_result |= (lp_pause &
- MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
- DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
- pause_result);
- bnx2x_pause_resolve(vars, pause_result);
- }
- return ret;
-}
-
static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
struct bnx2x_phy *phy,
struct link_vars *vars)
@@ -3845,13 +6641,13 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
pause_result);
}
}
-static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
- struct bnx2x_phy *phy,
- u8 port)
+static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u8 port)
{
u32 count = 0;
u16 fw_ver1, fw_msgout;
- u8 rc = 0;
+ int rc = 0;
/* Boot port from external ROM */
/* EDC grst */
@@ -3926,7 +6722,7 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
/******************************************************************/
/* BCM8073 PHY SECTION */
/******************************************************************/
-static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
+static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
{
/* This is only required for 8073A1, version 102 only */
u16 val;
@@ -3952,7 +6748,7 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
return 1;
}
-static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
+static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
{
u16 val, cnt, cnt1 ;
@@ -4059,9 +6855,9 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params,
msleep(500);
}
-static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 val = 0, tmp1;
@@ -4081,9 +6877,9 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
/* enable LASI */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, (1<<2));
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x0004);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004);
bnx2x_8073_set_pause_cl37(params, phy, vars);
@@ -4091,7 +6887,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
@@ -4225,7 +7021,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
u16 an1000_status = 0;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
@@ -4241,7 +7037,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
/* Check the LASI */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
@@ -4367,9 +7163,9 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
/******************************************************************/
/* BCM8705 PHY SECTION */
/******************************************************************/
-static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "init 8705\n");
@@ -4430,6 +7226,30 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
/******************************************************************/
/* SFP+ module Section */
/******************************************************************/
+static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 pmd_dis)
+{
+ struct bnx2x *bp = params->bp;
+ /*
+ * Disable transmitter only for bootcodes which can enable it afterwards
+ * (for D3 link)
+ */
+ if (pmd_dis) {
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED)
+ DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n");
+ else {
+ DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n");
+ return;
+ }
+ } else
+ DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_DISABLE, pmd_dis);
+}
+
static u8 bnx2x_get_gpio_port(struct link_params *params)
{
u8 gpio_port;
@@ -4443,9 +7263,10 @@ static u8 bnx2x_get_gpio_port(struct link_params *params)
swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
return gpio_port ^ (swap_val && swap_override);
}
-static void bnx2x_sfp_set_transmitter(struct link_params *params,
- struct bnx2x_phy *phy,
- u8 tx_en)
+
+static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
{
u16 val;
u8 port = params->port;
@@ -4500,9 +7321,21 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params,
}
}
-static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 tx_en)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en);
+ if (CHIP_IS_E3(bp))
+ bnx2x_sfp_e3_set_transmitter(params, phy, tx_en);
+ else
+ bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en);
+}
+
+static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -4566,9 +7399,45 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 addr, u8 byte_cnt,
+ u8 *o_buf)
+{
+ int rc = 0;
+ u8 i, j = 0, cnt = 0;
+ u32 data_array[4];
+ u16 addr32;
+ struct bnx2x *bp = params->bp;
+ /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
+ " addr %d, cnt %d\n",
+ addr, byte_cnt);*/
+ if (byte_cnt > 16) {
+ DP(NETIF_MSG_LINK, "Reading from eeprom is"
+ " is limited to 16 bytes\n");
+ return -EINVAL;
+ }
+
+ /* 4 byte aligned address */
+ addr32 = addr & (~0x3);
+ do {
+ rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
+ data_array);
+ } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
+
+ if (rc == 0) {
+ for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) {
+ o_buf[j] = *((u8 *)data_array + i);
+ j++;
+ }
+ }
+
+ return rc;
+}
+
+static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val, i;
@@ -4653,27 +7522,39 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf)
{
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
- return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
- return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- return -EINVAL;
+ int rc = -EINVAL;
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
+ byte_cnt, o_buf);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
+ byte_cnt, o_buf);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
+ byte_cnt, o_buf);
+ break;
+ }
+ return rc;
}
-static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
- struct link_params *params,
- u16 *edc_mode)
+static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u16 *edc_mode)
{
struct bnx2x *bp = params->bp;
+ u32 sync_offset = 0, phy_idx, media_types;
u8 val, check_limiting_mode = 0;
*edc_mode = EDC_MODE_LIMITING;
+ phy->media_type = ETH_PHY_UNSPECIFIED;
/* First check for copper cable */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
@@ -4688,7 +7569,7 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
case SFP_EEPROM_CON_TYPE_VAL_COPPER:
{
u8 copper_module_type;
-
+ phy->media_type = ETH_PHY_DA_TWINAX;
/*
* Check if its active cable (includes SFP+ module)
* of passive cable
@@ -4697,8 +7578,7 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
params,
SFP_EEPROM_FC_TX_TECH_ADDR,
1,
- &copper_module_type) !=
- 0) {
+ &copper_module_type) != 0) {
DP(NETIF_MSG_LINK,
"Failed to read copper-cable-type"
" from SFP+ EEPROM\n");
@@ -4723,6 +7603,7 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
+ phy->media_type = ETH_PHY_SFP_FIBER;
DP(NETIF_MSG_LINK, "Optic module detected\n");
check_limiting_mode = 1;
break;
@@ -4731,7 +7612,22 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
val);
return -EINVAL;
}
-
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+ /* Update media type for non-PMF sync */
+ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+ if (&(params->phy[phy_idx]) == phy) {
+ media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+ media_types |= ((phy->media_type &
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+ break;
+ }
+ }
+ REG_WR(bp, sync_offset, media_types);
if (check_limiting_mode) {
u8 options[SFP_EEPROM_OPTIONS_SIZE];
if (bnx2x_read_sfp_module_eeprom(phy,
@@ -4755,8 +7651,8 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
* This function read the relevant field from the module (SFP+), and verify it
* is compliant with this board
*/
-static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
- struct link_params *params)
+static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u32 val, cmd;
@@ -4825,8 +7721,8 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
return -EINVAL;
}
-static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
- struct link_params *params)
+static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
+ struct link_params *params)
{
u8 val;
@@ -4858,8 +7754,8 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
* In the GPIO register, bit 4 is use to determine if the GPIOs are
* operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
* output
- * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
- * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
+ * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
+ * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1
* where the 1st bit is the over-current(only input), and 2nd bit is
* for power( only output )
*
@@ -4868,15 +7764,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
*/
if (phy->flags & FLAGS_NOC)
return;
- if (!(phy->flags &
- FLAGS_NOC) && is_power_up)
+ if (is_power_up)
val = (1<<4);
else
/*
* Set GPIO control to OUTPUT, and set the power bit
* to according to the is_power_up
*/
- val = ((!(is_power_up)) << 1);
+ val = (1<<1);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
@@ -4884,9 +7779,9 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
val);
}
-static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
- struct bnx2x_phy *phy,
- u16 edc_mode)
+static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
{
u16 cur_limiting_mode;
@@ -4934,9 +7829,9 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
return 0;
}
-static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
- struct bnx2x_phy *phy,
- u16 edc_mode)
+static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
{
u16 phy_identifier;
u16 rom_ver2_val;
@@ -4989,7 +7884,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
}
}
-static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+static void bnx2x_set_e1e2_module_fault_led(struct link_params *params,
u8 gpio_mode)
{
struct bnx2x *bp = params->bp;
@@ -5021,12 +7916,149 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
}
}
-static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
- struct link_params *params)
+static void bnx2x_set_e3_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ u32 pin_cfg;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ pin_cfg = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
+ PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
+ DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n",
+ gpio_mode, pin_cfg);
+ bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode);
+}
+
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
+ if (CHIP_IS_E3(bp)) {
+ /*
+ * Low ==> if SFP+ module is supported otherwise
+ * High ==> if SFP+ module is not on the approved vendor list
+ */
+ bnx2x_set_e3_module_fault_led(params, gpio_mode);
+ } else
+ bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
+}
+
+static void bnx2x_warpcore_power_module(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 power)
+{
+ u32 pin_cfg;
+ struct bnx2x *bp = params->bp;
+
+ pin_cfg = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+ PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+ power, pin_cfg);
+ /*
+ * Low ==> corresponding SFP+ module is powered
+ * high ==> the SFP+ module is powered down
+ */
+ bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
+
+static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ bnx2x_warpcore_power_module(params, phy, 0);
+}
+
+static void bnx2x_power_sfp_module(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 power)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power);
+
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ bnx2x_8727_power_module(params->bp, phy, power);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ bnx2x_warpcore_power_module(params, phy, power);
+ break;
+ default:
+ break;
+ }
+}
+static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ u16 val = 0;
+ u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+ struct bnx2x *bp = params->bp;
+
+ u8 lane = bnx2x_get_warpcore_lane(phy, params);
+ /* This is a global register which controls all lanes */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+ val &= ~(0xf << (lane << 2));
+
+ switch (edc_mode) {
+ case EDC_MODE_LINEAR:
+ case EDC_MODE_LIMITING:
+ mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+ break;
+ case EDC_MODE_PASSIVE_DAC:
+ mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
+ break;
+ default:
+ break;
+ }
+
+ val |= (mode << (lane << 2));
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val);
+ /* A must read */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+
+ /* Restart microcode to re-read the new mode */
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+
+}
+
+static void bnx2x_set_limiting_mode(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u16 edc_mode)
+{
+ switch (phy->type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+ bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+ bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode);
+ break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+ bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode);
+ break;
+ }
+}
+
+int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params)
{
struct bnx2x *bp = params->bp;
u16 edc_mode;
- u8 rc = 0;
+ int rc = 0;
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -5034,7 +8066,8 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
params->port);
-
+ /* Power up module */
+ bnx2x_power_sfp_module(params, phy, 1);
if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
@@ -5046,12 +8079,11 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
bnx2x_set_sfp_module_fault_led(params,
MISC_REGISTERS_GPIO_HIGH);
- if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
- ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
- PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
- /* Shutdown SFP+ module */
+ /* Check if need to power down the SFP+ module */
+ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
- bnx2x_8727_power_module(bp, phy, 0);
+ bnx2x_power_sfp_module(params, phy, 0);
return rc;
}
} else {
@@ -5059,18 +8091,12 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
}
- /* power up the SFP module */
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
- bnx2x_8727_power_module(bp, phy, 1);
-
/*
* Check and set limiting mode / LRM mode on 8726. On 8727 it
* is done automatically
*/
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
- bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
- else
- bnx2x_8727_set_limiting_mode(bp, phy, edc_mode);
+ bnx2x_set_limiting_mode(params, phy, edc_mode);
+
/*
* Enable transmit for this module if the module is approved, or
* if unapproved modules should also enable the Tx laser
@@ -5088,23 +8114,33 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
void bnx2x_handle_module_detect_int(struct link_params *params)
{
struct bnx2x *bp = params->bp;
- struct bnx2x_phy *phy = &params->phy[EXT_PHY1];
+ struct bnx2x_phy *phy;
u32 gpio_val;
- u8 port = params->port;
+ u8 gpio_num, gpio_port;
+ if (CHIP_IS_E3(bp))
+ phy = &params->phy[INT_PHY];
+ else
+ phy = &params->phy[EXT_PHY1];
+
+ if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
+ params->port, &gpio_num, &gpio_port) ==
+ -EINVAL) {
+ DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n");
+ return;
+ }
/* Set valid module led off */
bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
/* Get current gpio val reflecting module plugged in / out*/
- gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
+ gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
/* Call the handling function in case module is detected */
if (gpio_val == 0) {
-
- bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
+ bnx2x_power_sfp_module(params, phy, 1);
+ bnx2x_set_gpio_int(bp, gpio_num,
MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
- port);
-
+ gpio_port);
if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
bnx2x_sfp_module_detection(phy, params);
else
@@ -5114,21 +8150,45 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].
config));
-
- bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
+ bnx2x_set_gpio_int(bp, gpio_num,
MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
- port);
+ gpio_port);
/*
* Module was plugged out.
* Disable transmit for this module
*/
- if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
- PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+ phy->media_type = ETH_PHY_NOT_PRESENT;
+ if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+ PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
+ CHIP_IS_E3(bp))
bnx2x_sfp_set_transmitter(params, phy, 0);
}
}
/******************************************************************/
+/* Used by 8706 and 8727 */
+/******************************************************************/
+static void bnx2x_sfp_mask_fault(struct bnx2x *bp,
+ struct bnx2x_phy *phy,
+ u16 alarm_status_offset,
+ u16 alarm_ctrl_offset)
+{
+ u16 alarm_status, val;
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, alarm_status_offset,
+ &alarm_status);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, alarm_status_offset,
+ &alarm_status);
+ /* Mask or enable the fault event. */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
+ if (alarm_status & (1<<0))
+ val &= ~(1<<0);
+ else
+ val |= (1<<0);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
+}
+/******************************************************************/
/* common BCM8706/BCM8726 PHY SECTION */
/******************************************************************/
static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
@@ -5141,12 +8201,16 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
/* Clear RX Alarm*/
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+ bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+ MDIO_PMA_LASI_TXCTRL);
+
/* clear LASI indication*/
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
bnx2x_cl45_read(bp, phy,
@@ -5173,6 +8237,17 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
bnx2x_ext_phy_resolve_fc(phy, params, vars);
vars->duplex = DUPLEX_FULL;
}
+
+ /* Capture 10G link fault. Read twice to clear stale value. */
+ if (vars->line_speed == SPEED_10000) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+ if (val1 & (1<<0))
+ vars->fault_detected = 1;
+ }
+
return link_up;
}
@@ -5186,6 +8261,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
u32 tx_en_mode;
u16 cnt, val, tmp1;
struct bnx2x *bp = params->bp;
+
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
@@ -5228,7 +8304,11 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
+ /* Arm LASI for link and Tx fault. */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3);
} else {
/* Force 1Gbps using autoneg with 1G advertisement */
@@ -5251,10 +8331,10 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
0x0400);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
0x0004);
}
bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5281,9 +8361,9 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
return 0;
}
-static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
return bnx2x_8706_8726_read_status(phy, params, vars);
}
@@ -5358,13 +8438,11 @@ static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
}
-static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u32 val;
- u32 swap_val, swap_override, aeu_gpio_mask, offset;
DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
@@ -5387,9 +8465,9 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x5);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
0x400);
} else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
@@ -5415,14 +8493,14 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
* change
*/
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
0x400);
} else { /* Default 10G. Set only LASI control */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1);
}
/* Set TX PreEmphasis if needed */
@@ -5443,30 +8521,6 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
phy->tx_preemphasis[1]);
}
- /* Set GPIO3 to trigger SFP+ module insertion/removal */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
-
- /* The GPIO should be swapped if the swap register is set and active */
- swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
- swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
-
- /* Select function upon port-swap configuration */
- if (params->port == 0) {
- offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
- aeu_gpio_mask = (swap_val && swap_override) ?
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
- } else {
- offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
- aeu_gpio_mask = (swap_val && swap_override) ?
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
- }
- val = REG_RD(bp, offset);
- /* add GPIO3 to group */
- val |= aeu_gpio_mask;
- REG_WR(bp, offset, val);
return 0;
}
@@ -5548,9 +8602,9 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
-static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u32 tx_en_mode;
u16 tmp1, val, mod_abs, tmp2;
@@ -5561,16 +8615,19 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_wait_reset_complete(bp, phy, params);
rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
- lasi_ctrl_val = 0x0004;
+ /* Should be 0x6 to enable XS on Tx side. */
+ lasi_ctrl_val = 0x0006;
DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
/* enable LASI */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
rx_alarm_ctrl_val);
-
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+ 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
/*
* Initially configure MOD_ABS to interrupt when module is
@@ -5590,6 +8647,9 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+ /* Enable/Disable PHY transmitter output */
+ bnx2x_set_disable_pmd_transmit(params, phy, 0);
+
/* Make MOD_ABS give interrupt on change */
bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
&val);
@@ -5612,7 +8672,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
/* Set option 1G speed */
if (phy->req_line_speed == SPEED_1000) {
@@ -5730,7 +8790,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
/* Module is absent */
DP(NETIF_MSG_LINK, "MOD_ABS indication "
"show module is absent\n");
-
+ phy->media_type = ETH_PHY_NOT_PRESENT;
/*
* 1. Set mod_abs to detect next module
* presence event
@@ -5752,7 +8812,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
*/
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
} else {
/* Module is present */
@@ -5781,7 +8841,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
*/
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
@@ -5805,26 +8865,29 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
- u8 link_up = 0;
+ u8 link_up = 0, oc_port = params->port;
u16 link_status = 0;
u16 rx_alarm_status, lasi_ctrl, val1;
/* If PHY is not initialized, do not check link status */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
&lasi_ctrl);
if (!lasi_ctrl)
return 0;
- /* Check the LASI */
+ /* Check the LASI on Rx */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT,
&rx_alarm_status);
vars->line_speed = 0;
DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status);
+ bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+ MDIO_PMA_LASI_TXCTRL);
+
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
@@ -5843,8 +8906,10 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
&val1);
if ((val1 & (1<<8)) == 0) {
+ if (!CHIP_IS_E1x(bp))
+ oc_port = BP_PATH(bp) + (params->port << 1);
DP(NETIF_MSG_LINK, "8727 Power fault has been detected"
- " on port %d\n", params->port);
+ " on port %d\n", oc_port);
netdev_err(bp->dev, "Error: Power fault on Port %d has"
" been detected and the power to "
"that SFP+ module has been removed"
@@ -5852,11 +8917,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
" Please remove the SFP+ module and"
" restart the system to clear this"
" error.\n",
- params->port);
+ oc_port);
/* Disable all RX_ALARMs except for mod_abs */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
+ MDIO_PMA_LASI_RXCTRL, (1<<5));
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -5869,7 +8934,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
/* Clear RX alarm */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+ MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
return 0;
}
} /* Over current check */
@@ -5879,7 +8944,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
bnx2x_8727_handle_mod_abs(phy, params);
/* Enable all mod_abs and link detection bits */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
((1<<5) | (1<<2)));
}
DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
@@ -5915,6 +8980,20 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "port %x: External link is down\n",
params->port);
}
+
+ /* Capture 10G link fault. */
+ if (vars->line_speed == SPEED_10000) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+ MDIO_PMA_LASI_TXSTAT, &val1);
+
+ if (val1 & (1<<0)) {
+ vars->fault_detected = 1;
+ }
+ }
+
if (link_up) {
bnx2x_ext_phy_resolve_fc(phy, params, vars);
vars->duplex = DUPLEX_FULL;
@@ -5945,10 +9024,14 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
+
+ /* Enable/Disable PHY transmitter output */
+ bnx2x_set_disable_pmd_transmit(params, phy, 1);
+
/* Disable Transmitter */
bnx2x_sfp_set_transmitter(params, phy, 0);
/* Clear LASI */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0);
}
@@ -5958,111 +9041,106 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
struct link_params *params)
{
- u16 val, fw_ver1, fw_ver2, cnt, adj;
+ u16 val, fw_ver1, fw_ver2, cnt;
+ u8 port;
struct bnx2x *bp = params->bp;
- adj = 0;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
- adj = -1;
+ port = params->port;
/* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
/* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
if (val & 1)
break;
udelay(5);
}
if (cnt == 100) {
DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
- bnx2x_save_spirom_version(bp, params->port, 0,
+ bnx2x_save_spirom_version(bp, port, 0,
phy->ver_addr);
return;
}
/* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
if (val & 1)
break;
udelay(5);
}
if (cnt == 100) {
DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
- bnx2x_save_spirom_version(bp, params->port, 0,
+ bnx2x_save_spirom_version(bp, port, 0,
phy->ver_addr);
return;
}
/* lower 16 bits of the register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
/* upper 16 bits of register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
- bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
+ bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
phy->ver_addr);
}
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- u16 val, adj;
-
- adj = 0;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
- adj = -1;
+ u16 val;
/* PHYC_CTL_LED_CTL */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
val &= 0xFE00;
val |= 0x0092;
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL, val);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK + adj,
+ MDIO_PMA_REG_8481_LED1_MASK,
0x80);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK + adj,
+ MDIO_PMA_REG_8481_LED2_MASK,
0x18);
/* Select activity source by Tx and Rx, as suggested by PHY AE */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK + adj,
+ MDIO_PMA_REG_8481_LED3_MASK,
0x0006);
/* Select the closest activity blink rate to that in 10/100/1000 */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_BLINK + adj,
+ MDIO_PMA_REG_8481_LED3_BLINK,
0);
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
/* 'Interrupt Mask' */
bnx2x_cl45_write(bp, phy,
@@ -6070,12 +9148,19 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
0xFFFB, 0xFFFD);
}
-static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val;
+ u16 tmp_req_line_speed;
+
+ tmp_req_line_speed = phy->req_line_speed;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if (phy->req_line_speed == SPEED_10000)
+ phy->req_line_speed = SPEED_AUTO_NEG;
+
/*
* This phy uses the NIG latch mechanism since link indication
* arrives through its LED4 and not via its LASI signal, so we
@@ -6122,11 +9207,14 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
an_1000_val);
- /* set 10 speed advertisement */
+ /* set 100 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) &&
+ (phy->supported &
+ (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full)))) {
an_10_100_val |= (1<<7);
/* Enable autoneg and restart autoneg for legacy speeds */
autoneg_val |= (1<<9 | 1<<12);
@@ -6137,9 +9225,12 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
}
/* set 10 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
+ (phy->supported &
+ (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full)))) {
an_10_100_val |= (1<<5);
autoneg_val |= (1<<9 | 1<<12);
if (phy->req_duplex == DUPLEX_FULL)
@@ -6148,7 +9239,10 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
}
/* Only 10/100 are allowed to work in FORCE mode */
- if (phy->req_line_speed == SPEED_100) {
+ if ((phy->req_line_speed == SPEED_100) &&
+ (phy->supported &
+ (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full))) {
autoneg_val |= (1<<13);
/* Enabled AUTO-MDIX when autoneg is disabled */
bnx2x_cl45_write(bp, phy,
@@ -6156,7 +9250,10 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
(1<<15 | 1<<9 | 7<<0));
DP(NETIF_MSG_LINK, "Setting 100M force\n");
}
- if (phy->req_line_speed == SPEED_10) {
+ if ((phy->req_line_speed == SPEED_10) &&
+ (phy->supported &
+ (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full))) {
/* Enabled AUTO-MDIX when autoneg is disabled */
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
@@ -6171,7 +9268,13 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
if (phy->req_duplex == DUPLEX_FULL)
autoneg_val |= (1<<8);
- bnx2x_cl45_write(bp, phy,
+ /*
+ * Always write this if this is not 84833.
+ * For 84833, write it only when it's a forced speed.
+ */
+ if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ ((autoneg_val & (1<<12)) == 0))
+ bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
@@ -6179,28 +9282,29 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
(phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
(phy->req_line_speed == SPEED_10000)) {
- DP(NETIF_MSG_LINK, "Advertising 10G\n");
- /* Restart autoneg for 10G*/
+ DP(NETIF_MSG_LINK, "Advertising 10G\n");
+ /* Restart autoneg for 10G*/
- bnx2x_cl45_write(bp, phy,
+ bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
0x3200);
- } else if (phy->req_line_speed != SPEED_10 &&
- phy->req_line_speed != SPEED_100) {
+ } else
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
1);
- }
+
/* Save spirom version */
bnx2x_save_848xx_spirom_version(phy, params);
+ phy->req_line_speed = tmp_req_line_speed;
+
return 0;
}
-static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
/* Restore normal power mode*/
@@ -6215,33 +9319,200 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
return bnx2x_848xx_cmn_config_init(phy, params, vars);
}
-static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+
+#define PHY84833_HDSHK_WAIT 300
+static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 idx;
+ u32 pair_swap;
+ u16 val;
+ u16 data;
+ struct bnx2x *bp = params->bp;
+ /* Do pair swap */
+
+ /* Check for configuration. */
+ pair_swap = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+ PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+ if (pair_swap == 0)
+ return 0;
+
+ data = (u16)pair_swap;
+
+ /* Write CMD_OPEN_OVERRIDE to STATUS reg */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2,
+ PHY84833_CMD_OPEN_OVERRIDE);
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+ break;
+ msleep(1);
+ }
+ if (idx >= PHY84833_HDSHK_WAIT) {
+ DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
+ return -EINVAL;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG4,
+ data);
+ /* Issue pair swap command */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG0,
+ PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if ((val == PHY84833_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR))
+ break;
+ msleep(1);
+ }
+ if ((idx >= PHY84833_HDSHK_WAIT) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
+ return -EINVAL;
+ }
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2,
+ PHY84833_CMD_CLEAR_COMPLETE);
+ DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
+ return 0;
+}
+
+
+static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 chip_id)
+{
+ u32 reset_pin[2];
+ u32 idx;
+ u8 reset_gpios;
+ if (CHIP_IS_E3(bp)) {
+ /* Assume that these will be GPIOs, not EPIOs. */
+ for (idx = 0; idx < 2; idx++) {
+ /* Map config param to register bit. */
+ reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].e3_cmn_pin_cfg));
+ reset_pin[idx] = (reset_pin[idx] &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+ reset_pin[idx] -= PIN_CFG_GPIO0_P0;
+ reset_pin[idx] = (1 << reset_pin[idx]);
+ }
+ reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+ } else {
+ /* E2, look from diff place of shmem. */
+ for (idx = 0; idx < 2; idx++) {
+ reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[0].default_cfg));
+ reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK;
+ reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0;
+ reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT;
+ reset_pin[idx] = (1 << reset_pin[idx]);
+ }
+ reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+ }
+
+ return reset_gpios;
+}
+
+static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u8 reset_gpios;
+ u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ other_shmem_base_addr));
+
+ u32 shmem_base_path[2];
+ shmem_base_path[0] = params->shmem_base;
+ shmem_base_path[1] = other_shmem_base_addr;
+
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path,
+ params->chip_id);
+
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
+ reset_gpios);
+
+ return 0;
+}
+
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 chip_id)
+{
+ u8 reset_gpios;
+
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ msleep(800);
+ DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+ reset_gpios);
+
+ return 0;
+}
+
+#define PHY84833_CONSTANT_LATENCY 1193
+static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
- u16 val, adj;
+ u16 val;
u16 temp;
- u32 actual_phy_selection, cms_enable;
- u8 rc = 0;
-
- /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
- adj = 0;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
- adj = 3;
+ u32 actual_phy_selection, cms_enable, idx;
+ int rc = 0;
msleep(1);
- if (CHIP_IS_E2(bp))
+
+ if (!(CHIP_IS_E1(bp)))
port = BP_PATH(bp);
else
port = params->port;
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH,
- port);
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+ } else {
+ /* MDIO reset */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, 0x8000);
+ /* Bring PHY out of super isolate mode */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+ val &= ~MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+ }
+
bnx2x_wait_reset_complete(bp, phy, params);
+
/* Wait for GPHY to come out of reset */
msleep(50);
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ bnx2x_84833_pair_swap_cfg(phy, params, vars);
+
/*
* BCM84823 requires that XGXS links up first @ 10G for normal behavior
*/
@@ -6254,14 +9525,20 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* Set dual-media configuration according to configuration */
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_MEDIA + adj, &val);
+ MDIO_CTL_REG_84823_MEDIA, &val);
val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
- val |= MDIO_CTL_REG_84823_CTRL_MAC_XFI |
- MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L;
+
+ if (CHIP_IS_E3(bp)) {
+ val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+ MDIO_CTL_REG_84823_MEDIA_LINE_MASK);
+ } else {
+ val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI |
+ MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L);
+ }
actual_phy_selection = bnx2x_phy_selection(params);
@@ -6287,28 +9564,90 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_MEDIA + adj, val);
+ MDIO_CTL_REG_84823_MEDIA, val);
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
+ /* AutogrEEEn */
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+ /* Ensure that f/w is ready */
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+ break;
+ usleep_range(1000, 1000);
+ }
+ if (idx >= PHY84833_HDSHK_WAIT) {
+ DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
+ return -EINVAL;
+ }
+
+ /* Select EEE mode */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG3,
+ 0x2);
+
+ /* Set Idle and Latency */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG4,
+ PHY84833_CONSTANT_LATENCY + 1);
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_DATA3_REG,
+ PHY84833_CONSTANT_LATENCY + 1);
+
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_DATA4_REG,
+ PHY84833_CONSTANT_LATENCY);
+
+ /* Send EEE instruction to command register */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG0,
+ PHY84833_DIAG_CMD_SET_EEE_MODE);
+
+ /* Ensure that the command has completed */
+ for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+ if ((val == PHY84833_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR))
+ break;
+ usleep_range(1000, 1000);
+ }
+ if ((idx >= PHY84833_HDSHK_WAIT) ||
+ (val == PHY84833_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
+ return -EINVAL;
+ }
+
+ /* Reset command handler */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_SCRATCH_REG2,
+ PHY84833_CMD_CLEAR_COMPLETE);
+ }
+
if (initialize)
rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
else
bnx2x_save_848xx_spirom_version(phy, params);
- cms_enable = REG_RD(bp, params->shmem_base +
+ /* 84833 PHY has a better feature and doesn't need to support this. */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ cms_enable = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
dev_info.port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_ENABLE_CMS_MASK;
- bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
- if (cms_enable)
- val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
- else
- val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_USER_CTRL_REG, val);
-
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+ if (cms_enable)
+ val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ else
+ val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+ }
return rc;
}
@@ -6318,20 +9657,16 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 val, val1, val2, adj;
+ u16 val, val1, val2;
u8 link_up = 0;
- /* Reg offset adjustment for 84833 */
- adj = 0;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
- adj = -1;
/* Check 10G-BaseT link status */
/* Check PMD signal ok */
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, 0xFFFA, &val1);
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
&val2);
DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
@@ -6403,9 +9738,10 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
return link_up;
}
-static u8 bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
+
+static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
{
- u8 status = 0;
+ int status = 0;
u32 spirom_ver;
spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
status = bnx2x_format_ver(spirom_ver, str, len);
@@ -6435,13 +9771,25 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u8 port;
- if (CHIP_IS_E2(bp))
+ u16 val16;
+
+ if (!(CHIP_IS_E1(bp)))
port = BP_PATH(bp);
else
port = params->port;
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ } else {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ 0x400f, &val16);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, 0x800);
+ }
}
static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6449,11 +9797,17 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 val;
+ u8 port;
+
+ if (!(CHIP_IS_E1(bp)))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
switch (mode) {
case LED_MODE_OFF:
- DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", params->port);
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
@@ -6489,7 +9843,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
case LED_MODE_FRONT_PANEL_OFF:
DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
- params->port);
+ port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
@@ -6524,7 +9878,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
break;
case LED_MODE_ON:
- DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", params->port);
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
@@ -6571,7 +9925,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
case LED_MODE_OPER:
- DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", params->port);
+ DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
@@ -6633,7 +9987,395 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
}
break;
}
+
+ /*
+ * This is a workaround for E3+84833 until autoneg
+ * restart is fixed in f/w
+ */
+ if (CHIP_IS_E3(bp)) {
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_GP2_STATUS_GP_2_1, &val);
+ }
+}
+
+/******************************************************************/
+/* 54618SE PHY SECTION */
+/******************************************************************/
+static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port;
+ u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
+ u32 cfg_pin;
+
+ DP(NETIF_MSG_LINK, "54618SE cfg init\n");
+ usleep_range(1000, 1000);
+
+ /* This works with E3 only, no need to check the chip
+ before determining the port. */
+ port = params->port;
+
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+ /* Drive pin high to bring the GPHY out of reset. */
+ bnx2x_set_cfg_pin(bp, cfg_pin, 1);
+
+ /* wait for GPHY to reset */
+ msleep(50);
+
+ /* reset phy */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_PMA_REG_CTRL, 0x8000);
+ bnx2x_wait_reset_complete(bp, phy, params);
+
+ /*wait for GPHY to reset */
+ msleep(50);
+
+ /* Configure LED4: set to INTR (0x6). */
+ /* Accessing shadow register 0xe. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL2);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= ~(0xf << 4);
+ temp |= (0x6 << 4);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ /* Configure INTR based on link status change. */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_INTR_MASK,
+ ~MDIO_REG_INTR_MASK_LINK_STATUS);
+
+ /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD;
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+
+ /* Set up fc */
+ /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+ bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+ fc_val = 0;
+ if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
+ fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+
+ if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+ fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+
+ /* read all advertisement */
+ bnx2x_cl22_read(bp, phy,
+ 0x09,
+ &an_1000_val);
+
+ bnx2x_cl22_read(bp, phy,
+ 0x04,
+ &an_10_100_val);
+
+ bnx2x_cl22_read(bp, phy,
+ MDIO_PMA_REG_CTRL,
+ &autoneg_val);
+
+ /* Disable forced speed */
+ autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+ an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) |
+ (1<<11));
+
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
+ an_1000_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_1000_val |= (1<<9);
+ DP(NETIF_MSG_LINK, "Advertising 1G\n");
+ } else
+ an_1000_val &= ~((1<<8) | (1<<9));
+
+ bnx2x_cl22_write(bp, phy,
+ 0x09,
+ an_1000_val);
+ bnx2x_cl22_read(bp, phy,
+ 0x09,
+ &an_1000_val);
+
+ /* set 100 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
+ an_10_100_val |= (1<<7);
+ /* Enable autoneg and restart autoneg for legacy speeds */
+ autoneg_val |= (1<<9 | 1<<12);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<8);
+ DP(NETIF_MSG_LINK, "Advertising 100M\n");
+ }
+
+ /* set 10 speed advertisement */
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ if (phy->req_duplex == DUPLEX_FULL)
+ an_10_100_val |= (1<<6);
+ DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ }
+
+ /* Only 10/100 are allowed to work in FORCE mode */
+ if (phy->req_line_speed == SPEED_100) {
+ autoneg_val |= (1<<13);
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl22_write(bp, phy,
+ 0x18,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 100M force\n");
+ }
+ if (phy->req_line_speed == SPEED_10) {
+ /* Enabled AUTO-MDIX when autoneg is disabled */
+ bnx2x_cl22_write(bp, phy,
+ 0x18,
+ (1<<15 | 1<<9 | 7<<0));
+ DP(NETIF_MSG_LINK, "Setting 10M force\n");
+ }
+
+ /* Check if we should turn on Auto-GrEEEn */
+ bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp);
+ if (temp == MDIO_REG_GPHY_ID_54618SE) {
+ if (params->feature_config_flags &
+ FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+ temp = 6;
+ DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+ } else {
+ temp = 0;
+ DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n");
+ }
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ MDIO_REG_GPHY_EEE_ADV);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG,
+ (0x1 << 14) | MDIO_AN_DEVAD);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ temp);
+ }
+
+ bnx2x_cl22_write(bp, phy,
+ 0x04,
+ an_10_100_val | fc_val);
+
+ if (phy->req_duplex == DUPLEX_FULL)
+ autoneg_val |= (1<<8);
+
+ bnx2x_cl22_write(bp, phy,
+ MDIO_PMA_REG_CTRL, autoneg_val);
+
+ return 0;
+}
+
+static void bnx2x_54618se_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "54618SE set link led (mode=%x)\n", mode);
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ case LED_MODE_OPER:
+ case LED_MODE_ON:
+ default:
+ break;
+ }
+ return;
+}
+
+static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port;
+
+ /*
+ * In case of no EPIO routed to reset the GPHY, put it
+ * in low power mode.
+ */
+ bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
+ /*
+ * This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
+ port = params->port;
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+ PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+ /* Drive pin low to put GPHY in reset. */
+ bnx2x_set_cfg_pin(bp, cfg_pin, 0);
+}
+
+static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ u8 link_up = 0;
+ u16 legacy_status, legacy_speed;
+
+ /* Get speed operation status */
+ bnx2x_cl22_read(bp, phy,
+ 0x19,
+ &legacy_status);
+ DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
+
+ /* Read status to clear the PHY interrupt. */
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_INTR_STATUS,
+ &val);
+
+ link_up = ((legacy_status & (1<<2)) == (1<<2));
+
+ if (link_up) {
+ legacy_speed = (legacy_status & (7<<8));
+ if (legacy_speed == (7<<8)) {
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ } else if (legacy_speed == (6<<8)) {
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_HALF;
+ } else if (legacy_speed == (5<<8)) {
+ vars->line_speed = SPEED_100;
+ vars->duplex = DUPLEX_FULL;
+ }
+ /* Omitting 100Base-T4 for now */
+ else if (legacy_speed == (3<<8)) {
+ vars->line_speed = SPEED_100;
+ vars->duplex = DUPLEX_HALF;
+ } else if (legacy_speed == (2<<8)) {
+ vars->line_speed = SPEED_10;
+ vars->duplex = DUPLEX_FULL;
+ } else if (legacy_speed == (1<<8)) {
+ vars->line_speed = SPEED_10;
+ vars->duplex = DUPLEX_HALF;
+ } else /* Should not happen */
+ vars->line_speed = 0;
+
+ DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
+ " is_duplex_full= %d\n", vars->line_speed,
+ (vars->duplex == DUPLEX_FULL));
+
+ /* Check legacy speed AN resolution */
+ bnx2x_cl22_read(bp, phy,
+ 0x01,
+ &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+ bnx2x_cl22_read(bp, phy,
+ 0x06,
+ &val);
+ if ((val & (1<<0)) == 0)
+ vars->link_status |=
+ LINK_STATUS_PARALLEL_DETECTION_USED;
+
+ DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
+ vars->line_speed);
+
+ /* Report whether EEE is resolved. */
+ bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
+ if (val == MDIO_REG_GPHY_ID_54618SE) {
+ if (vars->link_status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+ val = 0;
+ else {
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG,
+ MDIO_AN_DEVAD);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ MDIO_REG_GPHY_EEE_RESOLVED);
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_CL45_ADDR_REG,
+ (0x1 << 14) | MDIO_AN_DEVAD);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_CL45_DATA_REG,
+ &val);
+ }
+ DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
+ }
+
+ bnx2x_ext_phy_resolve_fc(phy, params, vars);
+ }
+ return link_up;
}
+
+static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 val;
+ u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+
+ DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n");
+
+ /* Enable master/slave manual mmode and set to master */
+ /* mii write 9 [bits set 11 12] */
+ bnx2x_cl22_write(bp, phy, 0x09, 3<<11);
+
+ /* forced 1G and disable autoneg */
+ /* set val [mii read 0] */
+ /* set val [expr $val & [bits clear 6 12 13]] */
+ /* set val [expr $val | [bits set 6 8]] */
+ /* mii write 0 $val */
+ bnx2x_cl22_read(bp, phy, 0x00, &val);
+ val &= ~((1<<6) | (1<<12) | (1<<13));
+ val |= (1<<6) | (1<<8);
+ bnx2x_cl22_write(bp, phy, 0x00, val);
+
+ /* Set external loopback and Tx using 6dB coding */
+ /* mii write 0x18 7 */
+ /* set val [mii read 0x18] */
+ /* mii write 0x18 [expr $val | [bits set 10 15]] */
+ bnx2x_cl22_write(bp, phy, 0x18, 7);
+ bnx2x_cl22_read(bp, phy, 0x18, &val);
+ bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15));
+
+ /* This register opens the gate for the UMAC despite its name */
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+ /*
+ * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+ * length used by the MAC receive logic to check frames.
+ */
+ REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+}
+
/******************************************************************/
/* SFX7101 PHY SECTION */
/******************************************************************/
@@ -6646,9 +10388,9 @@ static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
}
-static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
u16 fw_ver1, fw_ver2, val;
struct bnx2x *bp = params->bp;
@@ -6662,7 +10404,7 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
@@ -6694,9 +10436,9 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
u8 link_up;
u16 val1, val2;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
+ MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
val2, val1);
bnx2x_cl45_read(bp, phy,
@@ -6721,8 +10463,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
return link_up;
}
-
-static u8 bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
if (*len < 5)
return -EINVAL;
@@ -6800,9 +10541,8 @@ static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
static struct bnx2x_phy phy_null = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
.addr = 0,
- .flags = FLAGS_INIT_XGXS_FIRST,
.def_md_devad = 0,
- .reserved = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -6827,9 +10567,8 @@ static struct bnx2x_phy phy_null = {
static struct bnx2x_phy phy_serdes = {
.type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
.addr = 0xff,
- .flags = 0,
.def_md_devad = 0,
- .reserved = 0,
+ .flags = 0,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -6843,14 +10582,14 @@ static struct bnx2x_phy phy_serdes = {
SUPPORTED_Autoneg |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
- .media_type = ETH_PHY_UNSPECIFIED,
+ .media_type = ETH_PHY_BASE_T,
.ver_addr = 0,
.req_flow_ctrl = 0,
.req_line_speed = 0,
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_init_serdes,
+ .config_init = (config_init_t)bnx2x_xgxs_config_init,
.read_status = (read_status_t)bnx2x_link_settings_status,
.link_reset = (link_reset_t)bnx2x_int_link_reset,
.config_loopback = (config_loopback_t)NULL,
@@ -6863,9 +10602,8 @@ static struct bnx2x_phy phy_serdes = {
static struct bnx2x_phy phy_xgxs = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
.addr = 0xff,
- .flags = 0,
.def_md_devad = 0,
- .reserved = 0,
+ .flags = 0,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -6880,14 +10618,14 @@ static struct bnx2x_phy phy_xgxs = {
SUPPORTED_Autoneg |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
- .media_type = ETH_PHY_UNSPECIFIED,
+ .media_type = ETH_PHY_CX4,
.ver_addr = 0,
.req_flow_ctrl = 0,
.req_line_speed = 0,
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t)bnx2x_init_xgxs,
+ .config_init = (config_init_t)bnx2x_xgxs_config_init,
.read_status = (read_status_t)bnx2x_link_settings_status,
.link_reset = (link_reset_t)bnx2x_int_link_reset,
.config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
@@ -6896,13 +10634,50 @@ static struct bnx2x_phy phy_xgxs = {
.set_link_led = (set_link_led_t)NULL,
.phy_specific_func = (phy_specific_func_t)NULL
};
+static struct bnx2x_phy phy_warpcore = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = (FLAGS_HW_LOCK_REQUIRED |
+ FLAGS_TX_ERROR_CHECK),
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_UNSPECIFIED,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */0,
+ /* rsrv = */0,
+ .config_init = (config_init_t)bnx2x_warpcore_config_init,
+ .read_status = (read_status_t)bnx2x_warpcore_read_status,
+ .link_reset = (link_reset_t)bnx2x_warpcore_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset,
+ .set_link_led = (set_link_led_t)NULL,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
static struct bnx2x_phy phy_7101 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
.addr = 0xff,
- .flags = FLAGS_FAN_FAILURE_DET_REQ,
.def_md_devad = 0,
- .reserved = 0,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -6930,9 +10705,8 @@ static struct bnx2x_phy phy_7101 = {
static struct bnx2x_phy phy_8073 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
.addr = 0xff,
- .flags = FLAGS_HW_LOCK_REQUIRED,
.def_md_devad = 0,
- .reserved = 0,
+ .flags = FLAGS_HW_LOCK_REQUIRED,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -6943,7 +10717,7 @@ static struct bnx2x_phy phy_8073 = {
SUPPORTED_Autoneg |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
- .media_type = ETH_PHY_UNSPECIFIED,
+ .media_type = ETH_PHY_KR,
.ver_addr = 0,
.req_flow_ctrl = 0,
.req_line_speed = 0,
@@ -6962,9 +10736,8 @@ static struct bnx2x_phy phy_8073 = {
static struct bnx2x_phy phy_8705 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
.addr = 0xff,
- .flags = FLAGS_INIT_XGXS_FIRST,
.def_md_devad = 0,
- .reserved = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -6991,9 +10764,9 @@ static struct bnx2x_phy phy_8705 = {
static struct bnx2x_phy phy_8706 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
.addr = 0xff,
- .flags = FLAGS_INIT_XGXS_FIRST,
.def_md_devad = 0,
- .reserved = 0,
+ .flags = (FLAGS_INIT_XGXS_FIRST |
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -7022,10 +10795,10 @@ static struct bnx2x_phy phy_8706 = {
static struct bnx2x_phy phy_8726 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
.addr = 0xff,
- .flags = (FLAGS_HW_LOCK_REQUIRED |
- FLAGS_INIT_XGXS_FIRST),
.def_md_devad = 0,
- .reserved = 0,
+ .flags = (FLAGS_HW_LOCK_REQUIRED |
+ FLAGS_INIT_XGXS_FIRST |
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -7035,7 +10808,7 @@ static struct bnx2x_phy phy_8726 = {
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
- .media_type = ETH_PHY_SFP_FIBER,
+ .media_type = ETH_PHY_NOT_PRESENT,
.ver_addr = 0,
.req_flow_ctrl = 0,
.req_line_speed = 0,
@@ -7055,9 +10828,9 @@ static struct bnx2x_phy phy_8726 = {
static struct bnx2x_phy phy_8727 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
.addr = 0xff,
- .flags = FLAGS_FAN_FAILURE_DET_REQ,
.def_md_devad = 0,
- .reserved = 0,
+ .flags = (FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_TX_ERROR_CHECK),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -7066,7 +10839,7 @@ static struct bnx2x_phy phy_8727 = {
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
- .media_type = ETH_PHY_SFP_FIBER,
+ .media_type = ETH_PHY_NOT_PRESENT,
.ver_addr = 0,
.req_flow_ctrl = 0,
.req_line_speed = 0,
@@ -7085,10 +10858,9 @@ static struct bnx2x_phy phy_8727 = {
static struct bnx2x_phy phy_8481 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
.addr = 0xff,
+ .def_md_devad = 0,
.flags = FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL,
- .def_md_devad = 0,
- .reserved = 0,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -7122,10 +10894,9 @@ static struct bnx2x_phy phy_8481 = {
static struct bnx2x_phy phy_84823 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
.addr = 0xff,
+ .def_md_devad = 0,
.flags = FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL,
- .def_md_devad = 0,
- .reserved = 0,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -7159,16 +10930,13 @@ static struct bnx2x_phy phy_84823 = {
static struct bnx2x_phy phy_84833 = {
.type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
.addr = 0xff,
+ .def_md_devad = 0,
.flags = FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL,
- .def_md_devad = 0,
- .reserved = 0,
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
- .supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
+ .supported = (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_10000baseT_Full |
@@ -7188,11 +10956,44 @@ static struct bnx2x_phy phy_84833 = {
.link_reset = (link_reset_t)bnx2x_848x3_link_reset,
.config_loopback = (config_loopback_t)NULL,
.format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
- .hw_reset = (hw_reset_t)NULL,
+ .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
.set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
.phy_specific_func = (phy_specific_func_t)NULL
};
+static struct bnx2x_phy phy_54618se = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
+ .addr = 0xff,
+ .def_md_devad = 0,
+ .flags = FLAGS_INIT_XGXS_FIRST,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ /* req_duplex = */0,
+ /* rsrv = */0,
+ .config_init = (config_init_t)bnx2x_54618se_config_init,
+ .read_status = (read_status_t)bnx2x_54618se_read_status,
+ .link_reset = (link_reset_t)bnx2x_54618se_link_reset,
+ .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
+ .format_fw_ver = (format_fw_ver_t)NULL,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_54618se_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
/*****************************************************************/
/* */
/* Populate the phy according. Main function: bnx2x_populate_phy */
@@ -7259,8 +11060,8 @@ static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
return ext_phy_config;
}
-static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
- struct bnx2x_phy *phy)
+static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
+ struct bnx2x_phy *phy)
{
u32 phy_addr;
u32 chip_id;
@@ -7269,22 +11070,107 @@ static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
dev_info.port_feature_config[port].link_config)) &
PORT_FEATURE_CONNECTED_SWITCH_MASK);
chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
- switch (switch_cfg) {
- case SWITCH_CFG_1G:
- phy_addr = REG_RD(bp,
- NIG_REG_SERDES0_CTRL_PHY_ADDR +
- port * 0x10);
- *phy = phy_serdes;
- break;
- case SWITCH_CFG_10G:
+ DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
+ if (USES_WARPCORE(bp)) {
+ u32 serdes_net_if;
phy_addr = REG_RD(bp,
- NIG_REG_XGXS0_CTRL_PHY_ADDR +
- port * 0x18);
- *phy = phy_xgxs;
- break;
- default:
- DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
- return -EINVAL;
+ MISC_REG_WC0_CTRL_PHY_ADDR);
+ *phy = phy_warpcore;
+ if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3)
+ phy->flags |= FLAGS_4_PORT_MODE;
+ else
+ phy->flags &= ~FLAGS_4_PORT_MODE;
+ /* Check Dual mode */
+ serdes_net_if = (REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[port].default_cfg)) &
+ PORT_HW_CFG_NET_SERDES_IF_MASK);
+ /*
+ * Set the appropriate supported and flags indications per
+ * interface type of the chip
+ */
+ switch (serdes_net_if) {
+ case PORT_HW_CFG_NET_SERDES_IF_SGMII:
+ phy->supported &= (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phy->media_type = ETH_PHY_BASE_T;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_XFI:
+ phy->media_type = ETH_PHY_XFP_FIBER;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_SFI:
+ phy->supported &= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ phy->media_type = ETH_PHY_SFP_FIBER;
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR:
+ phy->media_type = ETH_PHY_KR;
+ phy->supported &= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+ phy->media_type = ETH_PHY_KR;
+ phy->flags |= FLAGS_WC_DUAL_MODE;
+ phy->supported &= (SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ case PORT_HW_CFG_NET_SERDES_IF_KR2:
+ phy->media_type = ETH_PHY_KR;
+ phy->flags |= FLAGS_WC_DUAL_MODE;
+ phy->supported &= (SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
+ serdes_net_if);
+ break;
+ }
+
+ /*
+ * Enable MDC/MDIO work-around for E3 A0 since free running MDC
+ * was not set as expected. For B0, ECO will be enabled so there
+ * won't be an issue there
+ */
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ phy->flags |= FLAGS_MDC_MDIO_WA;
+ else
+ phy->flags |= FLAGS_MDC_MDIO_WA_B0;
+ } else {
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_SERDES0_CTRL_PHY_ADDR +
+ port * 0x10);
+ *phy = phy_serdes;
+ break;
+ case SWITCH_CFG_10G:
+ phy_addr = REG_RD(bp,
+ NIG_REG_XGXS0_CTRL_PHY_ADDR +
+ port * 0x18);
+ *phy = phy_xgxs;
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
+ return -EINVAL;
+ }
}
phy->addr = (u8)phy_addr;
phy->mdio_ctrl = bnx2x_get_emac_base(bp,
@@ -7302,12 +11188,12 @@ static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
return 0;
}
-static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
- u8 phy_index,
- u32 shmem_base,
- u32 shmem2_base,
- u8 port,
- struct bnx2x_phy *phy)
+static int bnx2x_populate_ext_phy(struct bnx2x *bp,
+ u8 phy_index,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u8 port,
+ struct bnx2x_phy *phy)
{
u32 ext_phy_config, phy_type, config2;
u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
@@ -7336,6 +11222,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
*phy = phy_8727;
phy->flags |= FLAGS_NOC;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
*phy = phy_8727;
@@ -7349,6 +11236,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
*phy = phy_84833;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
+ *phy = phy_54618se;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
*phy = phy_7101;
break;
@@ -7410,10 +11300,10 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
return 0;
}
-static u8 bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
- u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
+static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
+ u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
{
- u8 status = 0;
+ int status = 0;
phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
if (phy_index == INT_PHY)
return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
@@ -7527,10 +11417,10 @@ u32 bnx2x_phy_selection(struct link_params *params)
}
-u8 bnx2x_phy_probe(struct link_params *params)
+int bnx2x_phy_probe(struct link_params *params)
{
u8 phy_index, actual_phy_idx, link_cfg_idx;
- u32 phy_config_swapped;
+ u32 phy_config_swapped, sync_offset, media_types;
struct bnx2x *bp = params->bp;
struct bnx2x_phy *phy;
params->num_phys = 0;
@@ -7567,6 +11457,26 @@ u8 bnx2x_phy_probe(struct link_params *params)
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
break;
+ sync_offset = params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].media_type);
+ media_types = REG_RD(bp, sync_offset);
+
+ /*
+ * Update media type for non-PMF sync only for the first time
+ * In case the media type changes afterwards, it will be updated
+ * using the update_status function
+ */
+ if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ actual_phy_idx))) == 0) {
+ media_types |= ((phy->media_type &
+ PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+ (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+ actual_phy_idx));
+ }
+ REG_WR(bp, sync_offset, media_types);
+
bnx2x_phy_def_cfg(params, phy, phy_index);
params->num_phys++;
}
@@ -7575,77 +11485,10 @@ u8 bnx2x_phy_probe(struct link_params *params)
return 0;
}
-static void set_phy_vars(struct link_params *params)
-{
- struct bnx2x *bp = params->bp;
- u8 actual_phy_idx, phy_index, link_cfg_idx;
- u8 phy_config_swapped = params->multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED;
- for (phy_index = INT_PHY; phy_index < params->num_phys;
- phy_index++) {
- link_cfg_idx = LINK_CONFIG_IDX(phy_index);
- actual_phy_idx = phy_index;
- if (phy_config_swapped) {
- if (phy_index == EXT_PHY1)
- actual_phy_idx = EXT_PHY2;
- else if (phy_index == EXT_PHY2)
- actual_phy_idx = EXT_PHY1;
- }
- params->phy[actual_phy_idx].req_flow_ctrl =
- params->req_flow_ctrl[link_cfg_idx];
-
- params->phy[actual_phy_idx].req_line_speed =
- params->req_line_speed[link_cfg_idx];
-
- params->phy[actual_phy_idx].speed_cap_mask =
- params->speed_cap_mask[link_cfg_idx];
-
- params->phy[actual_phy_idx].req_duplex =
- params->req_duplex[link_cfg_idx];
-
- DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
- " speed_cap_mask %x\n",
- params->phy[actual_phy_idx].req_flow_ctrl,
- params->phy[actual_phy_idx].req_line_speed,
- params->phy[actual_phy_idx].speed_cap_mask);
- }
-}
-
-u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
+void bnx2x_init_bmac_loopback(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- DP(NETIF_MSG_LINK, "Phy Initialization started\n");
- DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
- params->req_line_speed[0], params->req_flow_ctrl[0]);
- DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
- params->req_line_speed[1], params->req_flow_ctrl[1]);
- vars->link_status = 0;
- vars->phy_link_up = 0;
- vars->link_up = 0;
- vars->line_speed = 0;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->mac_type = MAC_TYPE_NONE;
- vars->phy_flags = 0;
-
- /* disable attentions */
- bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
- (NIG_MASK_XGXS0_LINK_STATUS |
- NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_SERDES0_LINK_STATUS |
- NIG_MASK_MI_INT));
-
- bnx2x_emac_init(params, vars);
-
- if (params->num_phys == 0) {
- DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
- return -EINVAL;
- }
- set_phy_vars(params);
-
- DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
- if (params->loopback_mode == LOOPBACK_BMAC) {
-
vars->link_up = 1;
vars->line_speed = SPEED_10000;
vars->duplex = DUPLEX_FULL;
@@ -7660,9 +11503,12 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
bnx2x_bmac_enable(params, vars, 1);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
- } else if (params->loopback_mode == LOOPBACK_EMAC) {
-
+void bnx2x_init_emac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
vars->link_up = 1;
vars->line_speed = SPEED_1000;
vars->duplex = DUPLEX_FULL;
@@ -7676,29 +11522,80 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
bnx2x_emac_enable(params, vars, 1);
bnx2x_emac_program(params, vars);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_xmac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ if (!params->req_line_speed[0])
+ vars->line_speed = SPEED_10000;
+ else
+ vars->line_speed = params->req_line_speed[0];
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_XMAC;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ /*
+ * Set WC to loopback mode since link is required to provide clock
+ * to the XMAC in 20G mode
+ */
+ bnx2x_set_aer_mmd(params, &params->phy[0]);
+ bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
+ params->phy[INT_PHY].config_loopback(
+ &params->phy[INT_PHY],
+ params);
- } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
- (params->loopback_mode == LOOPBACK_EXT_PHY)) {
+ bnx2x_xmac_enable(params, vars, 1);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+void bnx2x_init_umac_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ vars->link_up = 1;
+ vars->line_speed = SPEED_1000;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_UMAC;
+ vars->phy_flags = PHY_XGXS_FLAG;
+ bnx2x_umac_enable(params, vars, 1);
+
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_xgxs_loopback(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
vars->link_up = 1;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
vars->duplex = DUPLEX_FULL;
- if (params->req_line_speed[0] == SPEED_1000) {
+ if (params->req_line_speed[0] == SPEED_1000)
vars->line_speed = SPEED_1000;
- vars->mac_type = MAC_TYPE_EMAC;
- } else {
+ else
vars->line_speed = SPEED_10000;
- vars->mac_type = MAC_TYPE_BMAC;
- }
+ if (!USES_WARPCORE(bp))
bnx2x_xgxs_deassert(params);
- bnx2x_link_initialize(params, vars);
+ bnx2x_link_initialize(params, vars);
- if (params->req_line_speed[0] == SPEED_1000) {
+ if (params->req_line_speed[0] == SPEED_1000) {
+ if (USES_WARPCORE(bp))
+ bnx2x_umac_enable(params, vars, 0);
+ else {
bnx2x_emac_program(params, vars);
bnx2x_emac_enable(params, vars, 0);
- } else
+ }
+ } else {
+ if (USES_WARPCORE(bp))
+ bnx2x_xmac_enable(params, vars, 0);
+ else
bnx2x_bmac_enable(params, vars, 0);
+ }
+
if (params->loopback_mode == LOOPBACK_XGXS) {
/* set 10G XGXS loopback */
params->phy[INT_PHY].config_loopback(
@@ -7718,24 +11615,76 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
}
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
- bnx2x_set_led(params, vars,
- LED_MODE_OPER, vars->line_speed);
- } else
- /* No loopback */
- {
- if (params->switch_cfg == SWITCH_CFG_10G)
- bnx2x_xgxs_deassert(params);
- else
- bnx2x_serdes_deassert(bp, params->port);
+ bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+}
+
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ DP(NETIF_MSG_LINK, "Phy Initialization started\n");
+ DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[0], params->req_flow_ctrl[0]);
+ DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
+ params->req_line_speed[1], params->req_flow_ctrl[1]);
+ vars->link_status = 0;
+ vars->phy_link_up = 0;
+ vars->link_up = 0;
+ vars->line_speed = 0;
+ vars->duplex = DUPLEX_FULL;
+ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ vars->mac_type = MAC_TYPE_NONE;
+ vars->phy_flags = 0;
+
+ /* disable attentions */
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ bnx2x_emac_init(params, vars);
+ if (params->num_phys == 0) {
+ DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
+ return -EINVAL;
+ }
+ set_phy_vars(params, vars);
+
+ DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
+ switch (params->loopback_mode) {
+ case LOOPBACK_BMAC:
+ bnx2x_init_bmac_loopback(params, vars);
+ break;
+ case LOOPBACK_EMAC:
+ bnx2x_init_emac_loopback(params, vars);
+ break;
+ case LOOPBACK_XMAC:
+ bnx2x_init_xmac_loopback(params, vars);
+ break;
+ case LOOPBACK_UMAC:
+ bnx2x_init_umac_loopback(params, vars);
+ break;
+ case LOOPBACK_XGXS:
+ case LOOPBACK_EXT_PHY:
+ bnx2x_init_xgxs_loopback(params, vars);
+ break;
+ default:
+ if (!CHIP_IS_E3(bp)) {
+ if (params->switch_cfg == SWITCH_CFG_10G)
+ bnx2x_xgxs_deassert(params);
+ else
+ bnx2x_serdes_deassert(bp, params->port);
+ }
bnx2x_link_initialize(params, vars);
msleep(30);
bnx2x_link_int_enable(params);
+ break;
}
return 0;
}
-u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
- u8 reset_ext_phy)
+
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+ u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7753,14 +11702,19 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
/* disable nig egress interface */
- REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
- REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+ if (!CHIP_IS_E3(bp)) {
+ REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+ }
/* Stop BigMac rx */
- bnx2x_bmac_rx_disable(bp, port);
-
+ if (!CHIP_IS_E3(bp))
+ bnx2x_bmac_rx_disable(bp, port);
+ else
+ bnx2x_xmac_disable(params);
/* disable emac */
- REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
msleep(10);
/* The PHY reset is controlled by GPIO 1
@@ -7770,12 +11724,16 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
if (reset_ext_phy) {
+ bnx2x_set_mdio_clk(bp, params->chip_id, port);
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
- if (params->phy[phy_index].link_reset)
+ if (params->phy[phy_index].link_reset) {
+ bnx2x_set_aer_mmd(params,
+ &params->phy[phy_index]);
params->phy[phy_index].link_reset(
&params->phy[phy_index],
params);
+ }
if (params->phy[phy_index].flags &
FLAGS_REARM_LATCH_SIGNAL)
clear_latch_ind = 1;
@@ -7796,21 +11754,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
/* disable nig ingress interface */
- REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
- REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
- REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
- REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+ if (!CHIP_IS_E3(bp)) {
+ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
+ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
+ }
vars->link_up = 0;
+ vars->phy_flags = 0;
return 0;
}
/****************************************************************************/
/* Common function */
/****************************************************************************/
-static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
- u32 shmem_base_path[],
- u32 shmem2_base_path[], u8 phy_index,
- u32 chip_id)
+static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
{
struct bnx2x_phy phy[PORT_MAX];
struct bnx2x_phy *phy_blk[PORT_MAX];
@@ -7826,14 +11785,14 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
/* In E2, same phy is using for port0 of the two paths */
- if (CHIP_IS_E2(bp)) {
- shmem_base = shmem_base_path[port];
- shmem2_base = shmem2_base_path[port];
- port_of_path = 0;
- } else {
+ if (CHIP_IS_E1x(bp)) {
shmem_base = shmem_base_path[0];
shmem2_base = shmem2_base_path[0];
port_of_path = port;
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
}
/* Extract the ext phy address for the port */
@@ -7877,10 +11836,10 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- if (CHIP_IS_E2(bp))
- port_of_path = 0;
- else
+ if (CHIP_IS_E1x(bp))
port_of_path = port;
+ else
+ port_of_path = 0;
DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
phy_blk[port]->addr);
@@ -7933,10 +11892,10 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
}
return 0;
}
-static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
- u32 shmem_base_path[],
- u32 shmem2_base_path[], u8 phy_index,
- u32 chip_id)
+static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
{
u32 val;
s8 port;
@@ -7954,12 +11913,12 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
u32 shmem_base, shmem2_base;
/* In E2, same phy is using for port0 of the two paths */
- if (CHIP_IS_E2(bp)) {
- shmem_base = shmem_base_path[port];
- shmem2_base = shmem2_base_path[port];
- } else {
+ if (CHIP_IS_E1x(bp)) {
shmem_base = shmem_base_path[0];
shmem2_base = shmem2_base_path[0];
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
}
/* Extract the ext phy address for the port */
if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
@@ -8027,10 +11986,11 @@ static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
break;
}
}
-static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
- u32 shmem_base_path[],
- u32 shmem2_base_path[], u8 phy_index,
- u32 chip_id)
+
+static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 chip_id)
{
s8 port, reset_gpio;
u32 swap_val, swap_override;
@@ -8067,14 +12027,14 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
u32 shmem_base, shmem2_base;
/* In E2, same phy is using for port0 of the two paths */
- if (CHIP_IS_E2(bp)) {
- shmem_base = shmem_base_path[port];
- shmem2_base = shmem2_base_path[port];
- port_of_path = 0;
- } else {
+ if (CHIP_IS_E1x(bp)) {
shmem_base = shmem_base_path[0];
shmem2_base = shmem2_base_path[0];
port_of_path = port;
+ } else {
+ shmem_base = shmem_base_path[port];
+ shmem2_base = shmem2_base_path[port];
+ port_of_path = 0;
}
/* Extract the ext phy address for the port */
@@ -8109,25 +12069,29 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
}
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- if (CHIP_IS_E2(bp))
- port_of_path = 0;
- else
+ if (CHIP_IS_E1x(bp))
port_of_path = port;
+ else
+ port_of_path = 0;
DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
phy_blk[port]->addr);
if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
port_of_path))
return -EINVAL;
+ /* Disable PHY transmitter output */
+ bnx2x_cl45_write(bp, phy_blk[port],
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_DISABLE, 1);
}
return 0;
}
-static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
- u32 shmem2_base_path[], u8 phy_index,
- u32 ext_phy_type, u32 chip_id)
+static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u8 phy_index,
+ u32 ext_phy_type, u32 chip_id)
{
- u8 rc = 0;
+ int rc = 0;
switch (ext_phy_type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
@@ -8135,7 +12099,7 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
shmem2_base_path,
phy_index, chip_id);
break;
-
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
@@ -8152,6 +12116,13 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
shmem2_base_path,
phy_index, chip_id);
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ /*
+ * GPIO3's are linked, and so both need to be toggled
+ * to obtain required 2us pulse.
+ */
+ rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
rc = -EINVAL;
break;
@@ -8169,15 +12140,21 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
return rc;
}
-u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
- u32 shmem2_base_path[], u32 chip_id)
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u32 chip_id)
{
- u8 rc = 0;
- u32 phy_ver;
- u8 phy_index;
+ int rc = 0;
+ u32 phy_ver, val;
+ u8 phy_index = 0;
u32 ext_phy_type, ext_phy_config;
+ bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
+ bnx2x_set_mdio_clk(bp, chip_id, PORT_1);
DP(NETIF_MSG_LINK, "Begin common phy init\n");
-
+ if (CHIP_IS_E3(bp)) {
+ /* Enable EPIO */
+ val = REG_RD(bp, MISC_REG_GEN_PURP_HWG);
+ REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1);
+ }
/* Check if common init was already done */
phy_ver = REG_RD(bp, shmem_base_path[0] +
offsetof(struct shmem_region,
@@ -8203,6 +12180,167 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
return rc;
}
+static void bnx2x_check_over_curr(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin;
+ u8 port = params->port;
+ u32 pin_val;
+
+ cfg_pin = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) &
+ PORT_HW_CFG_E3_OVER_CURRENT_MASK) >>
+ PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
+
+ /* Ignore check if no external input PIN available */
+ if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0)
+ return;
+
+ if (!pin_val) {
+ if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) {
+ netdev_err(bp->dev, "Error: Power fault on Port %d has"
+ " been detected and the power to "
+ "that SFP+ module has been removed"
+ " to prevent failure of the card."
+ " Please remove the SFP+ module and"
+ " restart the system to clear this"
+ " error.\n",
+ params->port);
+ vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
+ }
+ } else
+ vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
+}
+
+static void bnx2x_analyze_link_error(struct link_params *params,
+ struct link_vars *vars, u32 lss_status)
+{
+ struct bnx2x *bp = params->bp;
+ /* Compare new value with previous value */
+ u8 led_mode;
+ u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
+
+ if ((lss_status ^ half_open_conn) == 0)
+ return;
+
+ /* If values differ */
+ DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
+ half_open_conn, lss_status);
+
+ /*
+ * a. Update shmem->link_status accordingly
+ * b. Update link_vars->link_up
+ */
+ if (lss_status) {
+ DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
+ vars->link_status &= ~LINK_STATUS_LINK_UP;
+ vars->link_up = 0;
+ vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ /*
+ * Set LED mode to off since the PHY doesn't know about these
+ * errors
+ */
+ led_mode = LED_MODE_OFF;
+ } else {
+ DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
+ vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_up = 1;
+ vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ led_mode = LED_MODE_OPER;
+ }
+ /* Update the LED according to the link state */
+ bnx2x_set_led(params, vars, led_mode, SPEED_10000);
+
+ /* Update link status in the shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+
+ /* C. Trigger General Attention */
+ vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
+ bnx2x_notify_link_changed(bp);
+}
+
+/******************************************************************************
+* Description:
+* This function checks for half opened connection change indication.
+* When such change occurs, it calls the bnx2x_analyze_link_error
+* to check if Remote Fault is set or cleared. Reception of remote fault
+* status message in the MAC indicates that the peer's MAC has detected
+* a fault, for example, due to break in the TX side of fiber.
+*
+******************************************************************************/
+static void bnx2x_check_half_open_conn(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 lss_status = 0;
+ u32 mac_base;
+ /* In case link status is physically up @ 10G do */
+ if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+ return;
+
+ if (CHIP_IS_E3(bp) &&
+ (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_XMAC))) {
+ /* Check E3 XMAC */
+ /*
+ * Note that link speed cannot be queried here, since it may be
+ * zero while link is down. In case UMAC is active, LSS will
+ * simply not be set
+ */
+ mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+ /* Clear stick bits (Requires rising edge) */
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+ REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+ XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+ if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
+ lss_status = 1;
+
+ bnx2x_analyze_link_error(params, vars, lss_status);
+ } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
+ /* Check E1X / E2 BMAC */
+ u32 lss_status_reg;
+ u32 wb_data[2];
+ mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM;
+ /* Read BIGMAC_REGISTER_RX_LSS_STATUS */
+ if (CHIP_IS_E2(bp))
+ lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT;
+ else
+ lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS;
+
+ REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
+ lss_status = (wb_data[0] > 0);
+
+ bnx2x_analyze_link_error(params, vars, lss_status);
+ }
+}
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u16 phy_idx;
+ if (!params) {
+ DP(NETIF_MSG_LINK, "Uninitialized params !\n");
+ return;
+ }
+
+ for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+ if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+ bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
+ bnx2x_check_half_open_conn(params, vars);
+ break;
+ }
+ }
+
+ if (CHIP_IS_E3(bp))
+ bnx2x_check_over_curr(params, vars);
+}
+
u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
{
u8 phy_index;
@@ -8245,7 +12383,15 @@ u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
void bnx2x_hw_reset_phy(struct link_params *params)
{
u8 phy_index;
- for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ struct bnx2x *bp = params->bp;
+ bnx2x_update_mng(params, 0);
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
+
+ for (phy_index = INT_PHY; phy_index < MAX_PHYS;
phy_index++) {
if (params->phy[phy_index].hw_reset) {
params->phy[phy_index].hw_reset(
@@ -8255,3 +12401,72 @@ void bnx2x_hw_reset_phy(struct link_params *params)
}
}
}
+
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+ u32 chip_id, u32 shmem_base, u32 shmem2_base,
+ u8 port)
+{
+ u8 gpio_num = 0xff, gpio_port = 0xff, phy_index;
+ u32 val;
+ u32 offset, aeu_mask, swap_val, swap_override, sync_offset;
+ if (CHIP_IS_E3(bp)) {
+ if (bnx2x_get_mod_abs_int_cfg(bp, chip_id,
+ shmem_base,
+ port,
+ &gpio_num,
+ &gpio_port) != 0)
+ return;
+ } else {
+ struct bnx2x_phy phy;
+ for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+ phy_index++) {
+ if (bnx2x_populate_phy(bp, phy_index, shmem_base,
+ shmem2_base, port, &phy)
+ != 0) {
+ DP(NETIF_MSG_LINK, "populate phy failed\n");
+ return;
+ }
+ if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
+ gpio_num = MISC_REGISTERS_GPIO_3;
+ gpio_port = port;
+ break;
+ }
+ }
+ }
+
+ if (gpio_num == 0xff)
+ return;
+
+ /* Set GPIO3 to trigger SFP+ module insertion/removal */
+ bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port);
+
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ gpio_port ^= (swap_val && swap_override);
+
+ vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 <<
+ (gpio_num + (gpio_port << 2));
+
+ sync_offset = shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].aeu_int_mask);
+ REG_WR(bp, sync_offset, vars->aeu_int_mask);
+
+ DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n",
+ gpio_num, gpio_port, vars->aeu_int_mask);
+
+ if (port == 0)
+ offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+ else
+ offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+
+ /* Open appropriate AEU for interrupts */
+ aeu_mask = REG_RD(bp, offset);
+ aeu_mask |= vars->aeu_int_mask;
+ REG_WR(bp, offset, aeu_mask);
+
+ /* Enable the GPIO to trigger interrupt */
+ val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+ val |= 1 << (gpio_num + (gpio_port << 2));
+ REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 92f36b6950d..c12db6da213 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -33,12 +33,13 @@
#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
+#define NET_SERDES_IF_XFI 1
+#define NET_SERDES_IF_SFI 2
+#define NET_SERDES_IF_KR 3
+#define NET_SERDES_IF_DXGXS 4
+
#define SPEED_AUTO_NEG 0
-#define SPEED_12000 12000
-#define SPEED_12500 12500
-#define SPEED_13000 13000
-#define SPEED_15000 15000
-#define SPEED_16000 16000
+#define SPEED_20000 20000
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
@@ -46,6 +47,12 @@
#define SFP_EEPROM_VENDOR_OUI_SIZE 3
#define SFP_EEPROM_PART_NO_ADDR 0x28
#define SFP_EEPROM_PART_NO_SIZE 16
+#define SFP_EEPROM_REVISION_ADDR 0x38
+#define SFP_EEPROM_REVISION_SIZE 4
+#define SFP_EEPROM_SERIAL_ADDR 0x44
+#define SFP_EEPROM_SERIAL_SIZE 16
+#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
+#define SFP_EEPROM_DATE_SIZE 6
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,25 +69,26 @@
#define SINGLE_MEDIA(params) (params->num_phys == 2)
/* Dual Media board contains two external phy with different media */
#define DUAL_MEDIA(params) (params->num_phys == 3)
+
+#define FW_PARAM_PHY_ADDR_MASK 0x000000FF
+#define FW_PARAM_PHY_TYPE_MASK 0x0000FF00
+#define FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000
#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \
+ FW_PARAM_PHY_ADDR_MASK)
+#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \
+ FW_PARAM_PHY_TYPE_MASK)
+#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \
+ FW_PARAM_MDIO_CTRL_MASK) >> \
+ FW_PARAM_MDIO_CTRL_OFFSET)
#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
-#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE 170
-#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE 0
-
-#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE 250
-#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE 0
-
-#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE 10
-#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE 90
-
-#define PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE 50
-#define PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE 250
#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
+#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
/***********************************************************/
/* Structs */
/***********************************************************/
@@ -121,8 +129,8 @@ struct bnx2x_phy {
/* Loaded during init */
u8 addr;
-
- u8 flags;
+ u8 def_md_devad;
+ u16 flags;
/* Require HW lock */
#define FLAGS_HW_LOCK_REQUIRED (1<<0)
/* No Over-Current detection */
@@ -131,11 +139,15 @@ struct bnx2x_phy {
#define FLAGS_FAN_FAILURE_DET_REQ (1<<2)
/* Initialize first the XGXS and only then the phy itself */
#define FLAGS_INIT_XGXS_FIRST (1<<3)
+#define FLAGS_WC_DUAL_MODE (1<<4)
+#define FLAGS_4_PORT_MODE (1<<5)
#define FLAGS_REARM_LATCH_SIGNAL (1<<6)
#define FLAGS_SFP_NOT_APPROVED (1<<7)
+#define FLAGS_MDC_MDIO_WA (1<<8)
+#define FLAGS_DUMMY_READ (1<<9)
+#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
+#define FLAGS_TX_ERROR_CHECK (1<<12)
- u8 def_md_devad;
- u8 reserved;
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
@@ -153,6 +165,8 @@ struct bnx2x_phy {
#define ETH_PHY_XFP_FIBER 0x2
#define ETH_PHY_DA_TWINAX 0x3
#define ETH_PHY_BASE_T 0x4
+#define ETH_PHY_KR 0xf0
+#define ETH_PHY_CX4 0xf1
#define ETH_PHY_NOT_PRESENT 0xff
/* The address in which version is located*/
@@ -238,6 +252,8 @@ struct link_params {
#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
+#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
+#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
/* Will be populated during common init */
struct bnx2x_phy phy[MAX_PHYS];
@@ -257,11 +273,18 @@ struct link_params {
/* Output parameters */
struct link_vars {
u8 phy_flags;
+#define PHY_XGXS_FLAG (1<<0)
+#define PHY_SGMII_FLAG (1<<1)
+#define PHY_PHYSICAL_LINK_FLAG (1<<2)
+#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
+#define PHY_OVER_CURRENT_FLAG (1<<4)
u8 mac_type;
#define MAC_TYPE_NONE 0
#define MAC_TYPE_EMAC 1
#define MAC_TYPE_BMAC 2
+#define MAC_TYPE_UMAC 3
+#define MAC_TYPE_XMAC 4
u8 phy_link_up; /* internal phy link indication */
u8 link_up;
@@ -274,45 +297,52 @@ struct link_vars {
/* The same definitions as the shmem parameter */
u32 link_status;
+ u8 fault_detected;
+ u8 rsrv1;
+ u16 periodic_flags;
+#define PERIODIC_FLAGS_LINK_EVENT 0x0001
+
+ u32 aeu_int_mask;
};
/***********************************************************/
/* Functions */
/***********************************************************/
-u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output);
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
/* Reset the link. Should be called when driver or interface goes down
Before calling phy firmware upgrade, the reset_ext_phy should be set
to 0 */
-u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
- u8 reset_ext_phy);
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+ u8 reset_ext_phy);
/* bnx2x_link_update should be called upon link interrupt */
-u8 bnx2x_link_update(struct link_params *input, struct link_vars *output);
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
/* use the following phy functions to read/write from external_phy
In order to use it to read/write internal phy registers, use
DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
the register */
-u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
- u8 devad, u16 reg, u16 *ret_val);
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 *ret_val);
+
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+ u8 devad, u16 reg, u16 val);
-u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
- u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
struct link_vars *output);
/* returns string representing the fw_version of the external phy */
-u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
- u8 *version, u16 len);
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+ u8 *version, u16 len);
/* Set/Unset the led
Basically, the CLC takes care of the led for the link, but in case one needs
to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
blink the led, and LED_MODE_OFF to set the led off.*/
-u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
- u8 mode, u32 speed);
+int bnx2x_set_led(struct link_params *params,
+ struct link_vars *vars, u8 mode, u32 speed);
#define LED_MODE_OFF 0
#define LED_MODE_ON 1
#define LED_MODE_OPER 2
@@ -324,12 +354,12 @@ void bnx2x_handle_module_detect_int(struct link_params *params);
/* Get the actual link status. In case it returns 0, link is up,
otherwise link is down*/
-u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars,
- u8 is_serdes);
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+ u8 is_serdes);
/* One-time initialization for external phy after power up */
-u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
- u32 shmem2_base_path[], u32 chip_id);
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+ u32 shmem2_base_path[], u32 chip_id);
/* Reset the external PHY using GPIO */
void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
@@ -338,9 +368,9 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf);
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf);
void bnx2x_hw_reset_phy(struct link_params *params);
@@ -352,11 +382,28 @@ u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
u32 bnx2x_phy_selection(struct link_params *params);
/* Probe the phys on board, and populate them in "params" */
-u8 bnx2x_phy_probe(struct link_params *params);
+int bnx2x_phy_probe(struct link_params *params);
+
/* Checks if fan failure detection is required on one of the phys on board */
u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 port);
+
+
+/* DCBX structs */
+
+/* Number of maximum COS per chip */
+#define DCBX_E2E3_MAX_NUM_COS (2)
+#define DCBX_E3B0_MAX_NUM_COS_PORT0 (6)
+#define DCBX_E3B0_MAX_NUM_COS_PORT1 (3)
+#define DCBX_E3B0_MAX_NUM_COS ( \
+ MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \
+ DCBX_E3B0_MAX_NUM_COS_PORT1))
+
+#define DCBX_MAX_NUM_COS ( \
+ MAXVAL(DCBX_E3B0_MAX_NUM_COS, \
+ DCBX_E2E3_MAX_NUM_COS))
+
/* PFC port configuration params */
struct bnx2x_nig_brb_pfc_port_params {
/* NIG */
@@ -364,8 +411,8 @@ struct bnx2x_nig_brb_pfc_port_params {
u32 llfc_out_en;
u32 llfc_enable;
u32 pkt_priority_to_cos;
- u32 rx_cos0_priority_mask;
- u32 rx_cos1_priority_mask;
+ u8 num_of_rx_cos_priority_mask;
+ u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
u32 llfc_high_priority_classes;
u32 llfc_low_priority_classes;
/* BRB */
@@ -373,27 +420,74 @@ struct bnx2x_nig_brb_pfc_port_params {
u32 cos1_pauseable;
};
+
+/* ETS port configuration params */
+struct bnx2x_ets_bw_params {
+ u8 bw;
+};
+
+struct bnx2x_ets_sp_params {
+ /**
+ * valid values are 0 - 5. 0 is highest strict priority.
+ * There can't be two COS's with the same pri.
+ */
+ u8 pri;
+};
+
+enum bnx2x_cos_state {
+ bnx2x_cos_state_strict = 0,
+ bnx2x_cos_state_bw = 1,
+};
+
+struct bnx2x_ets_cos_params {
+ enum bnx2x_cos_state state ;
+ union {
+ struct bnx2x_ets_bw_params bw_params;
+ struct bnx2x_ets_sp_params sp_params;
+ } params;
+};
+
+struct bnx2x_ets_params {
+ u8 num_of_cos; /* Number of valid COS entries*/
+ struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
+};
+
/**
* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
* when link is already up
*/
-void bnx2x_update_pfc(struct link_params *params,
+int bnx2x_update_pfc(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *pfc_params);
/* Used to configure the ETS to disable */
-void bnx2x_ets_disabled(struct link_params *params);
+int bnx2x_ets_disabled(struct link_params *params,
+ struct link_vars *vars);
/* Used to configure the ETS to BW limited */
void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
const u32 cos1_bw);
/* Used to configure the ETS to strict */
-u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
+
+/* Configure the COS to ETS according to BW and SP settings.*/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+ const struct link_vars *vars,
+ const struct bnx2x_ets_params *ets_params);
/* Read pfc statistic*/
void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
u32 pfc_frames_sent[2],
u32 pfc_frames_received[2]);
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+ u32 chip_id, u32 shmem_base, u32 shmem2_base,
+ u8 port);
+
+int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+ struct link_params *params);
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
+
#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 4b70311a11e..f74582a22c6 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -39,6 +39,7 @@
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
+#include <net/ipv6.h>
#include <net/tcp.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
@@ -49,13 +50,14 @@
#include <linux/zlib.h>
#include <linux/io.h>
#include <linux/stringify.h>
+#include <linux/vmalloc.h>
-#define BNX2X_MAIN
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
#include "bnx2x_cmn.h"
#include "bnx2x_dcb.h"
+#include "bnx2x_sp.h"
#include <linux/firmware.h>
#include "bnx2x_fw_file_hdr.h"
@@ -73,12 +75,14 @@
#define TX_TIMEOUT (5*HZ)
static char version[] __devinitdata =
- "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
+ "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Eliezer Tamir");
MODULE_DESCRIPTION("Broadcom NetXtreme II "
- "BCM57710/57711/57711E/57712/57712E Driver");
+ "BCM57710/57711/57711E/"
+ "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
+ "57840/57840_MF Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
MODULE_FIRMWARE(FW_FILE_NAME_E1);
@@ -99,9 +103,11 @@ static int disable_tpa;
module_param(disable_tpa, int, 0);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
+#define INT_MODE_INTx 1
+#define INT_MODE_MSI 2
static int int_mode;
module_param(int_mode, int, 0);
-MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
static int dropless_fc;
@@ -120,37 +126,87 @@ static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, " Default debug msglevel");
-static struct workqueue_struct *bnx2x_wq;
-#ifdef BCM_CNIC
-static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
-#endif
+
+struct workqueue_struct *bnx2x_wq;
enum bnx2x_board_type {
BCM57710 = 0,
- BCM57711 = 1,
- BCM57711E = 2,
- BCM57712 = 3,
- BCM57712E = 4
+ BCM57711,
+ BCM57711E,
+ BCM57712,
+ BCM57712_MF,
+ BCM57800,
+ BCM57800_MF,
+ BCM57810,
+ BCM57810_MF,
+ BCM57840,
+ BCM57840_MF
};
/* indexed by board_type, above */
static struct {
char *name;
} board_info[] __devinitdata = {
- { "Broadcom NetXtreme II BCM57710 XGb" },
- { "Broadcom NetXtreme II BCM57711 XGb" },
- { "Broadcom NetXtreme II BCM57711E XGb" },
- { "Broadcom NetXtreme II BCM57712 XGb" },
- { "Broadcom NetXtreme II BCM57712E XGb" }
+ { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
+ { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
+ { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
+ { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
+ "Ethernet Multi Function"}
};
+#ifndef PCI_DEVICE_ID_NX2_57710
+#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711
+#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711E
+#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712
+#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_MF
+#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800
+#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_MF
+#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810
+#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_MF
+#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840
+#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MF
+#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
+#endif
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
- { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
{ 0 }
};
@@ -167,48 +223,6 @@ static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
REG_WR(bp, addr + 4, U64_HI(mapping));
}
-static inline void __storm_memset_fill(struct bnx2x *bp,
- u32 addr, size_t size, u32 val)
-{
- int i;
- for (i = 0; i < size/4; i++)
- REG_WR(bp, addr + (i * 4), val);
-}
-
-static inline void storm_memset_ustats_zero(struct bnx2x *bp,
- u8 port, u16 stat_id)
-{
- size_t size = sizeof(struct ustorm_per_client_stats);
-
- u32 addr = BAR_USTRORM_INTMEM +
- USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
-
- __storm_memset_fill(bp, addr, size, 0);
-}
-
-static inline void storm_memset_tstats_zero(struct bnx2x *bp,
- u8 port, u16 stat_id)
-{
- size_t size = sizeof(struct tstorm_per_client_stats);
-
- u32 addr = BAR_TSTRORM_INTMEM +
- TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
-
- __storm_memset_fill(bp, addr, size, 0);
-}
-
-static inline void storm_memset_xstats_zero(struct bnx2x *bp,
- u8 port, u16 stat_id)
-{
- size_t size = sizeof(struct xstorm_per_client_stats);
-
- u32 addr = BAR_XSTRORM_INTMEM +
- XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
-
- __storm_memset_fill(bp, addr, size, 0);
-}
-
-
static inline void storm_memset_spq_addr(struct bnx2x *bp,
dma_addr_t mapping, u16 abs_fid)
{
@@ -218,103 +232,6 @@ static inline void storm_memset_spq_addr(struct bnx2x *bp,
__storm_memset_dma_mapping(bp, addr, mapping);
}
-static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
-{
- REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
-}
-
-static inline void storm_memset_func_cfg(struct bnx2x *bp,
- struct tstorm_eth_function_common_config *tcfg,
- u16 abs_fid)
-{
- size_t size = sizeof(struct tstorm_eth_function_common_config);
-
- u32 addr = BAR_TSTRORM_INTMEM +
- TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
-
- __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
-}
-
-static inline void storm_memset_xstats_flags(struct bnx2x *bp,
- struct stats_indication_flags *flags,
- u16 abs_fid)
-{
- size_t size = sizeof(struct stats_indication_flags);
-
- u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
-
- __storm_memset_struct(bp, addr, size, (u32 *)flags);
-}
-
-static inline void storm_memset_tstats_flags(struct bnx2x *bp,
- struct stats_indication_flags *flags,
- u16 abs_fid)
-{
- size_t size = sizeof(struct stats_indication_flags);
-
- u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
-
- __storm_memset_struct(bp, addr, size, (u32 *)flags);
-}
-
-static inline void storm_memset_ustats_flags(struct bnx2x *bp,
- struct stats_indication_flags *flags,
- u16 abs_fid)
-{
- size_t size = sizeof(struct stats_indication_flags);
-
- u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
-
- __storm_memset_struct(bp, addr, size, (u32 *)flags);
-}
-
-static inline void storm_memset_cstats_flags(struct bnx2x *bp,
- struct stats_indication_flags *flags,
- u16 abs_fid)
-{
- size_t size = sizeof(struct stats_indication_flags);
-
- u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
-
- __storm_memset_struct(bp, addr, size, (u32 *)flags);
-}
-
-static inline void storm_memset_xstats_addr(struct bnx2x *bp,
- dma_addr_t mapping, u16 abs_fid)
-{
- u32 addr = BAR_XSTRORM_INTMEM +
- XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
-
- __storm_memset_dma_mapping(bp, addr, mapping);
-}
-
-static inline void storm_memset_tstats_addr(struct bnx2x *bp,
- dma_addr_t mapping, u16 abs_fid)
-{
- u32 addr = BAR_TSTRORM_INTMEM +
- TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
-
- __storm_memset_dma_mapping(bp, addr, mapping);
-}
-
-static inline void storm_memset_ustats_addr(struct bnx2x *bp,
- dma_addr_t mapping, u16 abs_fid)
-{
- u32 addr = BAR_USTRORM_INTMEM +
- USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
-
- __storm_memset_dma_mapping(bp, addr, mapping);
-}
-
-static inline void storm_memset_cstats_addr(struct bnx2x *bp,
- dma_addr_t mapping, u16 abs_fid)
-{
- u32 addr = BAR_CSTRORM_INTMEM +
- CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
-
- __storm_memset_dma_mapping(bp, addr, mapping);
-}
-
static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
u16 pf_id)
{
@@ -359,45 +276,6 @@ static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
REG_WR16(bp, addr, eq_prod);
}
-static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
- u16 fw_sb_id, u8 sb_index,
- u8 ticks)
-{
-
- int index_offset = CHIP_IS_E2(bp) ?
- offsetof(struct hc_status_block_data_e2, index_data) :
- offsetof(struct hc_status_block_data_e1x, index_data);
- u32 addr = BAR_CSTRORM_INTMEM +
- CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
- index_offset +
- sizeof(struct hc_index_data)*sb_index +
- offsetof(struct hc_index_data, timeout);
- REG_WR8(bp, addr, ticks);
- DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
- port, fw_sb_id, sb_index, ticks);
-}
-static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
- u16 fw_sb_id, u8 sb_index,
- u8 disable)
-{
- u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
- int index_offset = CHIP_IS_E2(bp) ?
- offsetof(struct hc_status_block_data_e2, index_data) :
- offsetof(struct hc_status_block_data_e1x, index_data);
- u32 addr = BAR_CSTRORM_INTMEM +
- CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
- index_offset +
- sizeof(struct hc_index_data)*sb_index +
- offsetof(struct hc_index_data, flags);
- u16 flags = REG_RD16(bp, addr);
- /* clear and set */
- flags &= ~HC_INDEX_DATA_HC_ENABLED;
- flags |= enable_flag;
- REG_WR16(bp, addr, flags);
- DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
- port, fw_sb_id, sb_index, disable);
-}
-
/* used only at init
* locking is done by mcp
*/
@@ -491,13 +369,6 @@ static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
}
-const u32 dmae_reg_go_c[] = {
- DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
- DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
- DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
- DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
-};
-
/* copy command into DMAE command memory and set DMAE command go */
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
{
@@ -578,7 +449,11 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
- /* lock the dmae channel */
+ /*
+ * Lock the dmae channel. Disable BHs to prevent a dead-lock
+ * as long as this code is called both from syscall context and
+ * from ndo_set_rx_mode() flow that may be called from BH.
+ */
spin_lock_bh(&bp->dmae_lock);
/* reset completion */
@@ -833,9 +708,9 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
return rc;
}
-static void bnx2x_fw_dump(struct bnx2x *bp)
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
{
- u32 addr;
+ u32 addr, val;
u32 mark, offset;
__be32 data[9];
int word;
@@ -844,6 +719,14 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
BNX2X_ERR("NO MCP - can not dump\n");
return;
}
+ netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff));
+
+ val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
+ if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
+ printk("%s" "MCP PC at 0x%x\n", lvl, val);
if (BP_PATH(bp) == 0)
trace_shmem_base = bp->common.shmem_base;
@@ -853,9 +736,9 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
mark = REG_RD(bp, addr);
mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+ ((mark + 0x3) & ~0x3) - 0x08000000;
- pr_err("begin fw dump (mark 0x%x)\n", mark);
+ printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
- pr_err("");
+ printk("%s", lvl);
for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
@@ -868,7 +751,12 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
- pr_err("end of fw dump\n");
+ printk("%s" "end of fw dump\n", lvl);
+}
+
+static inline void bnx2x_fw_dump(struct bnx2x *bp)
+{
+ bnx2x_fw_dump_lvl(bp, KERN_ERR);
}
void bnx2x_panic_dump(struct bnx2x *bp)
@@ -879,6 +767,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
int func = BP_FUNC(bp);
#ifdef BNX2X_STOP_ON_ERROR
u16 start = 0, end = 0;
+ u8 cos;
#endif
bp->stats_state = STATS_STATE_DISABLED;
@@ -889,9 +778,9 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* Indices */
/* Common */
BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
- " spq_prod_idx(0x%x)\n",
- bp->def_idx, bp->def_att_idx,
- bp->attn_state, bp->spq_prod_idx);
+ " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ bp->def_idx, bp->def_att_idx, bp->attn_state,
+ bp->spq_prod_idx, bp->stats_counter);
BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
bp->def_status_blk->atten_status_block.attn_bits,
bp->def_status_blk->atten_status_block.attn_bits_ack,
@@ -908,15 +797,17 @@ void bnx2x_panic_dump(struct bnx2x *bp)
CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
i*sizeof(u32));
- pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) "
"pf_id(0x%x) vnic_id(0x%x) "
- "vf_id(0x%x) vf_valid (0x%x)\n",
+ "vf_id(0x%x) vf_valid (0x%x) "
+ "state(0x%x)\n",
sp_sb_data.igu_sb_id,
sp_sb_data.igu_seg_id,
sp_sb_data.p_func.pf_id,
sp_sb_data.p_func.vnic_id,
sp_sb_data.p_func.vf_id,
- sp_sb_data.p_func.vf_valid);
+ sp_sb_data.p_func.vf_valid,
+ sp_sb_data.state);
for_each_eth_queue(bp, i) {
@@ -925,15 +816,16 @@ void bnx2x_panic_dump(struct bnx2x *bp)
struct hc_status_block_data_e2 sb_data_e2;
struct hc_status_block_data_e1x sb_data_e1x;
struct hc_status_block_sm *hc_sm_p =
- CHIP_IS_E2(bp) ?
- sb_data_e2.common.state_machine :
- sb_data_e1x.common.state_machine;
+ CHIP_IS_E1x(bp) ?
+ sb_data_e1x.common.state_machine :
+ sb_data_e2.common.state_machine;
struct hc_index_data *hc_index_p =
- CHIP_IS_E2(bp) ?
- sb_data_e2.index_data :
- sb_data_e1x.index_data;
- int data_size;
+ CHIP_IS_E1x(bp) ?
+ sb_data_e1x.index_data :
+ sb_data_e2.index_data;
+ u8 data_size, cos;
u32 *sb_data_p;
+ struct bnx2x_fp_txdata txdata;
/* Rx */
BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
@@ -948,14 +840,20 @@ void bnx2x_panic_dump(struct bnx2x *bp)
le16_to_cpu(fp->fp_hc_idx));
/* Tx */
- BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
- " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
- " *tx_cons_sb(0x%x)\n",
- i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
- fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
+ for_each_cos_in_tx_queue(fp, cos)
+ {
+ txdata = fp->txdata[cos];
+ BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
+ " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
+ " *tx_cons_sb(0x%x)\n",
+ i, txdata.tx_pkt_prod,
+ txdata.tx_pkt_cons, txdata.tx_bd_prod,
+ txdata.tx_bd_cons,
+ le16_to_cpu(*txdata.tx_cons_sb));
+ }
- loop = CHIP_IS_E2(bp) ?
- HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
+ loop = CHIP_IS_E1x(bp) ?
+ HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
/* host sb data */
@@ -975,35 +873,39 @@ void bnx2x_panic_dump(struct bnx2x *bp)
fp->sb_index_values[j],
(j == loop - 1) ? ")" : " ");
/* fw sb data */
- data_size = CHIP_IS_E2(bp) ?
- sizeof(struct hc_status_block_data_e2) :
- sizeof(struct hc_status_block_data_e1x);
+ data_size = CHIP_IS_E1x(bp) ?
+ sizeof(struct hc_status_block_data_e1x) :
+ sizeof(struct hc_status_block_data_e2);
data_size /= sizeof(u32);
- sb_data_p = CHIP_IS_E2(bp) ?
- (u32 *)&sb_data_e2 :
- (u32 *)&sb_data_e1x;
+ sb_data_p = CHIP_IS_E1x(bp) ?
+ (u32 *)&sb_data_e1x :
+ (u32 *)&sb_data_e2;
/* copy sb data in here */
for (j = 0; j < data_size; j++)
*(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
j * sizeof(u32));
- if (CHIP_IS_E2(bp)) {
- pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
- "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
+ if (!CHIP_IS_E1x(bp)) {
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
+ "state(0x%x)\n",
sb_data_e2.common.p_func.pf_id,
sb_data_e2.common.p_func.vf_id,
sb_data_e2.common.p_func.vf_valid,
sb_data_e2.common.p_func.vnic_id,
- sb_data_e2.common.same_igu_sb_1b);
+ sb_data_e2.common.same_igu_sb_1b,
+ sb_data_e2.common.state);
} else {
- pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
- "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
+ "state(0x%x)\n",
sb_data_e1x.common.p_func.pf_id,
sb_data_e1x.common.p_func.vf_id,
sb_data_e1x.common.p_func.vf_valid,
sb_data_e1x.common.p_func.vnic_id,
- sb_data_e1x.common.same_igu_sb_1b);
+ sb_data_e1x.common.same_igu_sb_1b,
+ sb_data_e1x.common.state);
}
/* SB_SMs data */
@@ -1067,23 +969,31 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* Tx */
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ for_each_cos_in_tx_queue(fp, cos) {
+ struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+ start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
+ end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
+ for (j = start; j != end; j = TX_BD(j + 1)) {
+ struct sw_tx_bd *sw_bd =
+ &txdata->tx_buf_ring[j];
+
+ BNX2X_ERR("fp%d: txdata %d, "
+ "packet[%x]=[%p,%x]\n",
+ i, cos, j, sw_bd->skb,
+ sw_bd->first_bd);
+ }
- start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
- end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
- for (j = start; j != end; j = TX_BD(j + 1)) {
- struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
-
- BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
- i, j, sw_bd->skb, sw_bd->first_bd);
- }
-
- start = TX_BD(fp->tx_bd_cons - 10);
- end = TX_BD(fp->tx_bd_cons + 254);
- for (j = start; j != end; j = TX_BD(j + 1)) {
- u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
+ start = TX_BD(txdata->tx_bd_cons - 10);
+ end = TX_BD(txdata->tx_bd_cons + 254);
+ for (j = start; j != end; j = TX_BD(j + 1)) {
+ u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
- BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
- i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
+ BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
+ "[%x:%x:%x:%x]\n",
+ i, cos, j, tx_bd[0], tx_bd[1],
+ tx_bd[2], tx_bd[3]);
+ }
}
}
#endif
@@ -1092,6 +1002,373 @@ void bnx2x_panic_dump(struct bnx2x *bp)
BNX2X_ERR("end crash dump -----------------\n");
}
+/*
+ * FLR Support for E2
+ *
+ * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
+ * initialization.
+ */
+#define FLR_WAIT_USEC 10000 /* 10 miliseconds */
+#define FLR_WAIT_INTERAVAL 50 /* usec */
+#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
+
+struct pbf_pN_buf_regs {
+ int pN;
+ u32 init_crd;
+ u32 crd;
+ u32 crd_freed;
+};
+
+struct pbf_pN_cmd_regs {
+ int pN;
+ u32 lines_occup;
+ u32 lines_freed;
+};
+
+static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
+ struct pbf_pN_buf_regs *regs,
+ u32 poll_count)
+{
+ u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
+ u32 cur_cnt = poll_count;
+
+ crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
+ crd = crd_start = REG_RD(bp, regs->crd);
+ init_crd = REG_RD(bp, regs->init_crd);
+
+ DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
+ DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
+ DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
+
+ while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
+ (init_crd - crd_start))) {
+ if (cur_cnt--) {
+ udelay(FLR_WAIT_INTERAVAL);
+ crd = REG_RD(bp, regs->crd);
+ crd_freed = REG_RD(bp, regs->crd_freed);
+ } else {
+ DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
+ regs->pN);
+ DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
+ regs->pN, crd);
+ DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
+ regs->pN, crd_freed);
+ break;
+ }
+ }
+ DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
+ poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
+ struct pbf_pN_cmd_regs *regs,
+ u32 poll_count)
+{
+ u32 occup, to_free, freed, freed_start;
+ u32 cur_cnt = poll_count;
+
+ occup = to_free = REG_RD(bp, regs->lines_occup);
+ freed = freed_start = REG_RD(bp, regs->lines_freed);
+
+ DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
+ DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
+
+ while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
+ if (cur_cnt--) {
+ udelay(FLR_WAIT_INTERAVAL);
+ occup = REG_RD(bp, regs->lines_occup);
+ freed = REG_RD(bp, regs->lines_freed);
+ } else {
+ DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
+ regs->pN);
+ DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
+ regs->pN, occup);
+ DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
+ regs->pN, freed);
+ break;
+ }
+ }
+ DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
+ poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
+ u32 expected, u32 poll_count)
+{
+ u32 cur_cnt = poll_count;
+ u32 val;
+
+ while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
+ udelay(FLR_WAIT_INTERAVAL);
+
+ return val;
+}
+
+static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt)
+{
+ u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
+ if (val != 0) {
+ BNX2X_ERR("%s usage count=%d\n", msg, val);
+ return 1;
+ }
+ return 0;
+}
+
+static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+{
+ /* adjust polling timeout */
+ if (CHIP_REV_IS_EMUL(bp))
+ return FLR_POLL_CNT * 2000;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ return FLR_POLL_CNT * 120;
+
+ return FLR_POLL_CNT;
+}
+
+static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+{
+ struct pbf_pN_cmd_regs cmd_regs[] = {
+ {0, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_Q0 :
+ PBF_REG_P0_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q0 :
+ PBF_REG_P0_TQ_LINES_FREED_CNT},
+ {1, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_Q1 :
+ PBF_REG_P1_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_Q1 :
+ PBF_REG_P1_TQ_LINES_FREED_CNT},
+ {4, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_OCCUPANCY_LB_Q :
+ PBF_REG_P4_TQ_OCCUPANCY,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
+ PBF_REG_P4_TQ_LINES_FREED_CNT}
+ };
+
+ struct pbf_pN_buf_regs buf_regs[] = {
+ {0, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_Q0 :
+ PBF_REG_P0_INIT_CRD ,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_Q0 :
+ PBF_REG_P0_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
+ PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
+ {1, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_Q1 :
+ PBF_REG_P1_INIT_CRD,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_Q1 :
+ PBF_REG_P1_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
+ PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
+ {4, (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INIT_CRD_LB_Q :
+ PBF_REG_P4_INIT_CRD,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_CREDIT_LB_Q :
+ PBF_REG_P4_CREDIT,
+ (CHIP_IS_E3B0(bp)) ?
+ PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
+ PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
+ };
+
+ int i;
+
+ /* Verify the command queues are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
+ bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
+
+
+ /* Verify the transmission buffers are flushed P0, P1, P4 */
+ for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
+ bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
+}
+
+#define OP_GEN_PARAM(param) \
+ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
+
+#define OP_GEN_TYPE(type) \
+ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
+
+#define OP_GEN_AGG_VECT(index) \
+ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
+
+
+static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
+ u32 poll_cnt)
+{
+ struct sdm_op_gen op_gen = {0};
+
+ u32 comp_addr = BAR_CSTRORM_INTMEM +
+ CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
+ int ret = 0;
+
+ if (REG_RD(bp, comp_addr)) {
+ BNX2X_ERR("Cleanup complete is not 0\n");
+ return 1;
+ }
+
+ op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+ op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+ op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
+ op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+
+ DP(BNX2X_MSG_SP, "FW Final cleanup\n");
+ REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
+
+ if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
+ BNX2X_ERR("FW final cleanup did not succeed\n");
+ ret = 1;
+ }
+ /* Zero completion for nxt FLR */
+ REG_WR(bp, comp_addr, 0);
+
+ return ret;
+}
+
+static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+{
+ int pos;
+ u16 status;
+
+ pos = pci_pcie_cap(dev);
+ if (!pos)
+ return false;
+
+ pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ return status & PCI_EXP_DEVSTA_TRPND;
+}
+
+/* PF FLR specific routines
+*/
+static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
+{
+
+ /* wait for CFC PF usage-counter to zero (includes all the VFs) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ CFC_REG_NUM_LCIDS_INSIDE_PF,
+ "CFC PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+
+ /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ DORQ_REG_PF_USAGE_CNT,
+ "DQ PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
+ "QM PF usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
+ "Timers VNIC usage counter timed out",
+ poll_cnt))
+ return 1;
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
+ "Timers NUM_SCANS usage counter timed out",
+ poll_cnt))
+ return 1;
+
+ /* Wait DMAE PF usage counter to zero */
+ if (bnx2x_flr_clnup_poll_hw_counter(bp,
+ dmae_reg_go_c[INIT_DMAE_C(bp)],
+ "DMAE dommand register timed out",
+ poll_cnt))
+ return 1;
+
+ return 0;
+}
+
+static void bnx2x_hw_enable_status(struct bnx2x *bp)
+{
+ u32 val;
+
+ val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
+ DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
+
+ val = REG_RD(bp, PBF_REG_DISABLE_PF);
+ DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
+
+ val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
+ DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
+
+ val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
+ val);
+}
+
+static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
+{
+ u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+ DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
+
+ /* Re-enable PF target read access */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+ /* Poll HW usage counters */
+ if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
+ return -EBUSY;
+
+ /* Zero the igu 'trailing edge' and 'leading edge' */
+
+ /* Send the FW cleanup command */
+ if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
+ return -EBUSY;
+
+ /* ATC cleanup */
+
+ /* Verify TX hw is flushed */
+ bnx2x_tx_hw_flushed(bp, poll_cnt);
+
+ /* Wait 100ms (not adjusted according to platform) */
+ msleep(100);
+
+ /* Verify no pending pci transactions */
+ if (bnx2x_is_pcie_pending(bp->pdev))
+ BNX2X_ERR("PCIE Transactions still pending\n");
+
+ /* Debug */
+ bnx2x_hw_enable_status(bp);
+
+ /*
+ * Master enable - Due to WB DMAE writes performed before this
+ * register is re-initialized as part of the regular function init
+ */
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+ return 0;
+}
+
static void bnx2x_hc_int_enable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -1272,7 +1549,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
BNX2X_ERR("BUG! proper val not read from IGU!\n");
}
-static void bnx2x_int_disable(struct bnx2x *bp)
+void bnx2x_int_disable(struct bnx2x *bp)
{
if (bp->common.int_block == INT_BLOCK_HC)
bnx2x_hc_int_disable(bp);
@@ -1285,10 +1562,6 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
int i, offset;
- /* disable interrupt handling */
- atomic_inc(&bp->intr_sem);
- smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
-
if (disable_hw)
/* prevent the HW from sending interrupts */
bnx2x_int_disable(bp);
@@ -1301,12 +1574,13 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
offset++;
#endif
for_each_eth_queue(bp, i)
- synchronize_irq(bp->msix_table[i + offset].vector);
+ synchronize_irq(bp->msix_table[offset++].vector);
} else
synchronize_irq(bp->pdev->irq);
/* make sure sp_task is not running */
cancel_delayed_work(&bp->sp_task);
+ cancel_delayed_work(&bp->period_task);
flush_workqueue(bnx2x_wq);
}
@@ -1350,59 +1624,129 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
return false;
}
+/**
+ * bnx2x_get_leader_lock_resource - get the recovery leader resource id
+ *
+ * @bp: driver handle
+ *
+ * Returns the recovery leader resource id according to the engine this function
+ * belongs to. Currently only only 2 engines is supported.
+ */
+static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
+{
+ if (BP_PATH(bp))
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
+ else
+ return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
+}
+
+/**
+ * bnx2x_trylock_leader_lock- try to aquire a leader lock.
+ *
+ * @bp: driver handle
+ *
+ * Tries to aquire a leader lock for cuurent engine.
+ */
+static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
+{
+ return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
#ifdef BCM_CNIC
-static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
#endif
-void bnx2x_sp_event(struct bnx2x_fastpath *fp,
- union eth_rx_cqe *rr_cqe)
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
{
struct bnx2x *bp = fp->bp;
int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+ enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
+ struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
DP(BNX2X_MSG_SP,
"fp %d cid %d got ramrod #%d state is %x type is %d\n",
fp->index, cid, command, bp->state,
rr_cqe->ramrod_cqe.ramrod_type);
- switch (command | fp->state) {
- case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
- DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
- fp->state = BNX2X_FP_STATE_OPEN;
+ switch (command) {
+ case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
+ DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE;
break;
- case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
- DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
- fp->state = BNX2X_FP_STATE_HALTED;
+ case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_SETUP;
break;
- case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
- DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
- fp->state = BNX2X_FP_STATE_TERMINATED;
+ case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
+ DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
break;
- default:
- BNX2X_ERR("unexpected MC reply (%d) "
- "fp[%d] state is %x\n",
- command, fp->index, fp->state);
+ case (RAMROD_CMD_ID_ETH_HALT):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_HALT;
break;
+
+ case (RAMROD_CMD_ID_ETH_TERMINATE):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_TERMINATE;
+ break;
+
+ case (RAMROD_CMD_ID_ETH_EMPTY):
+ DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
+ drv_cmd = BNX2X_Q_CMD_EMPTY;
+ break;
+
+ default:
+ BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
+ command, fp->index);
+ return;
}
+ if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
+ q_obj->complete_cmd(bp, q_obj, drv_cmd))
+ /* q_obj->complete_cmd() failure means that this was
+ * an unexpected completion.
+ *
+ * In this case we don't want to increase the bp->spq_left
+ * because apparently we haven't sent this command the first
+ * place.
+ */
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#else
+ return;
+#endif
+
smp_mb__before_atomic_inc();
atomic_inc(&bp->cq_spq_left);
- /* push the change in fp->state and towards the memory */
- smp_wmb();
+ /* push the change in bp->spq_left and towards the memory */
+ smp_mb__after_atomic_inc();
+
+ DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
return;
}
+void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
+{
+ u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
+
+ bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
+ start);
+}
+
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
{
struct bnx2x *bp = netdev_priv(dev_instance);
u16 status = bnx2x_ack_int(bp);
u16 mask;
int i;
+ u8 cos;
/* Return here if interrupt is shared and it's not for us */
if (unlikely(status == 0)) {
@@ -1411,12 +1755,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
}
DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
- /* Return here if interrupt is disabled */
- if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
- DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
- return IRQ_HANDLED;
- }
-
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
return IRQ_HANDLED;
@@ -1425,11 +1763,12 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
+ mask = 0x2 << (fp->index + CNIC_PRESENT);
if (status & mask) {
- /* Handle Rx and Tx according to SB id */
+ /* Handle Rx or Tx according to SB id */
prefetch(fp->rx_cons_sb);
- prefetch(fp->tx_cons_sb);
+ for_each_cos_in_tx_queue(fp, cos)
+ prefetch(fp->txdata[cos].tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
status &= ~mask;
@@ -1441,11 +1780,13 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
if (status & (mask | 0x1)) {
struct cnic_ops *c_ops = NULL;
- rcu_read_lock();
- c_ops = rcu_dereference(bp->cnic_ops);
- if (c_ops)
- c_ops->cnic_handler(bp->cnic_data, NULL);
- rcu_read_unlock();
+ if (likely(bp->state == BNX2X_STATE_OPEN)) {
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops)
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
+ }
status &= ~mask;
}
@@ -1466,9 +1807,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
return IRQ_HANDLED;
}
-/* end of fast path */
-
-
/* Link */
/*
@@ -1520,6 +1858,11 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
return -EAGAIN;
}
+int bnx2x_release_leader_lock(struct bnx2x *bp)
+{
+ return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
{
u32 lock_status;
@@ -1640,6 +1983,53 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
return 0;
}
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
+{
+ u32 gpio_reg = 0;
+ int rc = 0;
+
+ /* Any port swapping should be handled by caller. */
+
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+ /* read GPIO and mask except the float bits */
+ gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
+ gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
+
+ switch (mode) {
+ case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
+ /* set CLR */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
+ /* set SET */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
+ break;
+
+ case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+ DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
+ /* set FLOAT */
+ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+ break;
+
+ default:
+ BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc == 0)
+ REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+ return rc;
+}
+
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
{
/* The GPIO should be swapped if swap register is set and active */
@@ -1732,45 +2122,6 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
return 0;
}
-int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
-{
- u32 sel_phy_idx = 0;
- if (bp->link_vars.link_up) {
- sel_phy_idx = EXT_PHY1;
- /* In case link is SERDES, check if the EXT_PHY2 is the one */
- if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
- (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
- sel_phy_idx = EXT_PHY2;
- } else {
-
- switch (bnx2x_phy_selection(&bp->link_params)) {
- case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
- case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
- case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
- sel_phy_idx = EXT_PHY1;
- break;
- case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
- case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
- sel_phy_idx = EXT_PHY2;
- break;
- }
- }
- /*
- * The selected actived PHY is always after swapping (in case PHY
- * swapping is enabled). So when swapping is enabled, we need to reverse
- * the configuration
- */
-
- if (bp->link_params.multi_phy_config &
- PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
- if (sel_phy_idx == EXT_PHY1)
- sel_phy_idx = EXT_PHY2;
- else if (sel_phy_idx == EXT_PHY2)
- sel_phy_idx = EXT_PHY1;
- }
- return LINK_CONFIG_IDX(sel_phy_idx);
-}
-
void bnx2x_calc_fc_adv(struct bnx2x *bp)
{
u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -1803,10 +2154,12 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
u8 rc;
int cfx_idx = bnx2x_get_link_cfg_idx(bp);
u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
- /* Initialize link parameters structure variables */
- /* It is recommended to turn off RX FC for jumbo frames
- for better performance */
- if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
+ /*
+ * Initialize link parameters structure variables
+ * It is recommended to turn off RX FC for jumbo frames
+ * for better performance
+ */
+ if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
else
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
@@ -1814,8 +2167,18 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
bnx2x_acquire_phy_lock(bp);
if (load_mode == LOAD_DIAG) {
- bp->link_params.loopback_mode = LOOPBACK_XGXS;
- bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
+ struct link_params *lp = &bp->link_params;
+ lp->loopback_mode = LOOPBACK_XGXS;
+ /* do PHY loopback at 10G speed, if possible */
+ if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
+ if (lp->speed_cap_mask[cfx_idx] &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+ lp->req_line_speed[cfx_idx] =
+ SPEED_10000;
+ else
+ lp->req_line_speed[cfx_idx] =
+ SPEED_1000;
+ }
}
rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
@@ -1827,7 +2190,8 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
bnx2x_link_report(bp);
- }
+ } else
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
return rc;
}
@@ -1941,8 +2305,12 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
bp->vn_weight_sum += vn_min_rate;
}
- /* ... only if all min rates are zeros - disable fairness */
- if (all_zero) {
+ /* if ETS or all min rates are zeros - disable fairness */
+ if (BNX2X_IS_ETS_ENABLED(bp)) {
+ bp->cmng.flags.cmng_enables &=
+ ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+ DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
+ } else if (all_zero) {
bp->cmng.flags.cmng_enables &=
~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
@@ -2143,11 +2511,11 @@ static void bnx2x_link_attn(struct bnx2x *bp)
pause_enabled);
}
- if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
+ if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
struct host_port_stats *pstats;
pstats = bnx2x_sp(bp, port_stats);
- /* reset old bmac stats */
+ /* reset old mac stats */
memset(&(pstats->mac_stx[0]), 0,
sizeof(struct mac_stx));
}
@@ -2197,12 +2565,23 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
bp->port.pmf = 1;
DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+ /*
+ * We need the mb() to ensure the ordering between the writing to
+ * bp->port.pmf here and reading it from the bnx2x_periodic_task().
+ */
+ smp_mb();
+
+ /* queue a periodic task */
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+
+ bnx2x_dcbx_pmf_update(bp);
+
/* enable nig attention */
val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
if (bp->common.int_block == INT_BLOCK_HC) {
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
- } else if (CHIP_IS_E2(bp)) {
+ } else if (!CHIP_IS_E1x(bp)) {
REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
}
@@ -2232,7 +2611,8 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
- DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
+ DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
+ (command | seq), param);
do {
/* let the FW do it's magic ... */
@@ -2263,182 +2643,118 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
{
#ifdef BCM_CNIC
- if (IS_FCOE_FP(fp) && IS_MF(bp))
+ /* Statistics are not supported for CNIC Clients at the moment */
+ if (IS_FCOE_FP(fp))
return false;
#endif
return true;
}
-/* must be called under rtnl_lock */
-static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
- u32 mask = (1 << cl_id);
-
- /* initial seeting is BNX2X_ACCEPT_NONE */
- u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
- u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
- u8 unmatched_unicast = 0;
-
- if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
- unmatched_unicast = 1;
-
- if (filters & BNX2X_PROMISCUOUS_MODE) {
- /* promiscious - accept all, drop none */
- drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
- accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
- if (IS_MF_SI(bp)) {
- /*
- * SI mode defines to accept in promiscuos mode
- * only unmatched packets
- */
- unmatched_unicast = 1;
- accp_all_ucast = 0;
- }
- }
- if (filters & BNX2X_ACCEPT_UNICAST) {
- /* accept matched ucast */
- drop_all_ucast = 0;
- }
- if (filters & BNX2X_ACCEPT_MULTICAST)
- /* accept matched mcast */
- drop_all_mcast = 0;
+ if (CHIP_IS_E1x(bp)) {
+ struct tstorm_eth_function_common_config tcfg = {0};
- if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
- /* accept all mcast */
- drop_all_ucast = 0;
- accp_all_ucast = 1;
- }
- if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
- /* accept all mcast */
- drop_all_mcast = 0;
- accp_all_mcast = 1;
+ storm_memset_func_cfg(bp, &tcfg, p->func_id);
}
- if (filters & BNX2X_ACCEPT_BROADCAST) {
- /* accept (all) bcast */
- drop_all_bcast = 0;
- accp_all_bcast = 1;
- }
-
- bp->mac_filters.ucast_drop_all = drop_all_ucast ?
- bp->mac_filters.ucast_drop_all | mask :
- bp->mac_filters.ucast_drop_all & ~mask;
-
- bp->mac_filters.mcast_drop_all = drop_all_mcast ?
- bp->mac_filters.mcast_drop_all | mask :
- bp->mac_filters.mcast_drop_all & ~mask;
-
- bp->mac_filters.bcast_drop_all = drop_all_bcast ?
- bp->mac_filters.bcast_drop_all | mask :
- bp->mac_filters.bcast_drop_all & ~mask;
- bp->mac_filters.ucast_accept_all = accp_all_ucast ?
- bp->mac_filters.ucast_accept_all | mask :
- bp->mac_filters.ucast_accept_all & ~mask;
-
- bp->mac_filters.mcast_accept_all = accp_all_mcast ?
- bp->mac_filters.mcast_accept_all | mask :
- bp->mac_filters.mcast_accept_all & ~mask;
-
- bp->mac_filters.bcast_accept_all = accp_all_bcast ?
- bp->mac_filters.bcast_accept_all | mask :
- bp->mac_filters.bcast_accept_all & ~mask;
+ /* Enable the function in the FW */
+ storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
+ storm_memset_func_en(bp, p->func_id, 1);
- bp->mac_filters.unmatched_unicast = unmatched_unicast ?
- bp->mac_filters.unmatched_unicast | mask :
- bp->mac_filters.unmatched_unicast & ~mask;
+ /* spq */
+ if (p->func_flgs & FUNC_FLG_SPQ) {
+ storm_memset_spq_addr(bp, p->spq_map, p->func_id);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY +
+ XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
+ }
}
-static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+/**
+ * bnx2x_get_tx_only_flags - Return common flags
+ *
+ * @bp device handle
+ * @fp queue handle
+ * @zero_stats TRUE if statistics zeroing is needed
+ *
+ * Return the flags that are common for the Tx-only and not normal connections.
+ */
+static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ bool zero_stats)
{
- struct tstorm_eth_function_common_config tcfg = {0};
- u16 rss_flgs;
-
- /* tpa */
- if (p->func_flgs & FUNC_FLG_TPA)
- tcfg.config_flags |=
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
-
- /* set rss flags */
- rss_flgs = (p->rss->mode <<
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
-
- if (p->rss->cap & RSS_IPV4_CAP)
- rss_flgs |= RSS_IPV4_CAP_MASK;
- if (p->rss->cap & RSS_IPV4_TCP_CAP)
- rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
- if (p->rss->cap & RSS_IPV6_CAP)
- rss_flgs |= RSS_IPV6_CAP_MASK;
- if (p->rss->cap & RSS_IPV6_TCP_CAP)
- rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
+ unsigned long flags = 0;
- tcfg.config_flags |= rss_flgs;
- tcfg.rss_result_mask = p->rss->result_mask;
+ /* PF driver will always initialize the Queue to an ACTIVE state */
+ __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
- storm_memset_func_cfg(bp, &tcfg, p->func_id);
-
- /* Enable the function in the FW */
- storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
- storm_memset_func_en(bp, p->func_id, 1);
+ /* tx only connections collect statistics (on the same index as the
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
+ */
+ if (stat_counter_valid(bp, fp)) {
+ __set_bit(BNX2X_Q_FLG_STATS, &flags);
+ if (zero_stats)
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ }
- /* statistics */
- if (p->func_flgs & FUNC_FLG_STATS) {
- struct stats_indication_flags stats_flags = {0};
- stats_flags.collect_eth = 1;
+ return flags;
+}
- storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
- storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
+static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ bool leading)
+{
+ unsigned long flags = 0;
- storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
- storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
+ /* calculate other queue flags */
+ if (IS_MF_SD(bp))
+ __set_bit(BNX2X_Q_FLG_OV, &flags);
- storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
- storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
+ if (IS_FCOE_FP(fp))
+ __set_bit(BNX2X_Q_FLG_FCOE, &flags);
- storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
- storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
+ if (!fp->disable_tpa) {
+ __set_bit(BNX2X_Q_FLG_TPA, &flags);
+ __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
}
- /* spq */
- if (p->func_flgs & FUNC_FLG_SPQ) {
- storm_memset_spq_addr(bp, p->spq_map, p->func_id);
- REG_WR(bp, XSEM_REG_FAST_MEMORY +
- XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
+ if (leading) {
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
+ __set_bit(BNX2X_Q_FLG_MCAST, &flags);
}
-}
-static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
- struct bnx2x_fastpath *fp)
-{
- u16 flags = 0;
+ /* Always set HW VLAN stripping */
+ __set_bit(BNX2X_Q_FLG_VLAN, &flags);
- /* calculate queue flags */
- flags |= QUEUE_FLG_CACHE_ALIGN;
- flags |= QUEUE_FLG_HC;
- flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
- flags |= QUEUE_FLG_VLAN;
- DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+ return flags | bnx2x_get_common_flags(bp, fp, true);
+}
- if (!fp->disable_tpa)
- flags |= QUEUE_FLG_TPA;
+static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
+ u8 cos)
+{
+ gen_init->stat_id = bnx2x_stats_id(fp);
+ gen_init->spcl_id = fp->cl_id;
- flags = stat_counter_valid(bp, fp) ?
- (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
+ /* Always use mini-jumbo MTU for FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+ else
+ gen_init->mtu = bp->dev->mtu;
- return flags;
+ gen_init->cos = cos;
}
-static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
+static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
- struct bnx2x_rxq_init_params *rxq_init)
+ struct bnx2x_rxq_setup_params *rxq_init)
{
- u16 max_sge = 0;
+ u8 max_sge = 0;
u16 sge_sz = 0;
u16 tpa_agg_size = 0;
- /* calculate queue flags */
- u16 flags = bnx2x_get_cl_flags(bp, fp);
-
if (!fp->disable_tpa) {
pause->sge_th_hi = 250;
pause->sge_th_lo = 150;
@@ -2459,80 +2775,74 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
pause->bd_th_lo = 250;
pause->rcq_th_hi = 350;
pause->rcq_th_lo = 250;
- pause->sge_th_hi = 0;
- pause->sge_th_lo = 0;
+
pause->pri_map = 1;
}
/* rxq setup */
- rxq_init->flags = flags;
- rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
rxq_init->dscr_map = fp->rx_desc_mapping;
rxq_init->sge_map = fp->rx_sge_mapping;
rxq_init->rcq_map = fp->rx_comp_mapping;
rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
- /* Always use mini-jumbo MTU for FCoE L2 ring */
- if (IS_FCOE_FP(fp))
- rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
- else
- rxq_init->mtu = bp->dev->mtu;
+ /* This should be a maximum number of data bytes that may be
+ * placed on the BD (not including paddings).
+ */
+ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
+ IP_HEADER_ALIGNMENT_PADDING;
- rxq_init->buf_sz = fp->rx_buf_size;
rxq_init->cl_qzone_id = fp->cl_qzone_id;
- rxq_init->cl_id = fp->cl_id;
- rxq_init->spcl_id = fp->cl_id;
- rxq_init->stat_id = fp->cl_id;
rxq_init->tpa_agg_sz = tpa_agg_size;
rxq_init->sge_buf_sz = sge_sz;
rxq_init->max_sges_pkt = max_sge;
+ rxq_init->rss_engine_id = BP_FUNC(bp);
+
+ /* Maximum number or simultaneous TPA aggregation for this Queue.
+ *
+ * For PF Clients it should be the maximum avaliable number.
+ * VF driver(s) may want to define it to a smaller value.
+ */
+ rxq_init->max_tpa_queues =
+ (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+ ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+
rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
rxq_init->fw_sb_id = fp->fw_sb_id;
if (IS_FCOE_FP(fp))
rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
else
- rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
-
- rxq_init->cid = HW_CID(bp, fp->cid);
-
- rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
+ rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
}
-static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
+static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
+ u8 cos)
{
- u16 flags = bnx2x_get_cl_flags(bp, fp);
-
- txq_init->flags = flags;
- txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
- txq_init->dscr_map = fp->tx_desc_mapping;
- txq_init->stat_id = fp->cl_id;
- txq_init->cid = HW_CID(bp, fp->cid);
- txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
+ txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
+ txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
+ /*
+ * set the tss leading client id for TX classfication ==
+ * leading RSS client id
+ */
+ txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
+
if (IS_FCOE_FP(fp)) {
txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
}
-
- txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
}
static void bnx2x_pf_init(struct bnx2x *bp)
{
struct bnx2x_func_init_params func_init = {0};
- struct bnx2x_rss_params rss = {0};
struct event_ring_data eq_data = { {0} };
u16 flags;
- /* pf specific setups */
- if (!CHIP_IS_E1(bp))
- storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
-
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
/* reset IGU PF statistics: MSIX + ATTN */
/* PF */
REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
@@ -2550,27 +2860,14 @@ static void bnx2x_pf_init(struct bnx2x *bp)
/* function setup flags */
flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
- if (CHIP_IS_E1x(bp))
- flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
- else
- flags |= FUNC_FLG_TPA;
-
- /* function setup */
-
- /**
- * Although RSS is meaningless when there is a single HW queue we
- * still need it enabled in order to have HW Rx hash generated.
+ /* This flag is relevant for E1x only.
+ * E2 doesn't have a TPA configuration in a function level.
*/
- rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
- RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
- rss.mode = bp->multi_mode;
- rss.result_mask = MULTI_MASK;
- func_init.rss = &rss;
+ flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
func_init.func_flgs = flags;
func_init.pf_id = BP_FUNC(bp);
func_init.func_id = BP_FUNC(bp);
- func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
func_init.spq_map = bp->spq_mapping;
func_init.spq_prod = bp->spq_prod_idx;
@@ -2579,11 +2876,11 @@ static void bnx2x_pf_init(struct bnx2x *bp)
memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
/*
- Congestion management values depend on the link rate
- There is no active link so initial link rate is set to 10 Gbps.
- When the link comes up The congestion management values are
- re-calculated according to the actual link rate.
- */
+ * Congestion management values depend on the link rate
+ * There is no active link so initial link rate is set to 10 Gbps.
+ * When the link comes up The congestion management values are
+ * re-calculated according to the actual link rate.
+ */
bp->link_vars.line_speed = SPEED_10000;
bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
@@ -2591,10 +2888,6 @@ static void bnx2x_pf_init(struct bnx2x *bp)
if (bp->port.pmf)
storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- /* no rx until link is up */
- bp->rx_mode = BNX2X_RX_MODE_NONE;
- bnx2x_set_storm_rx_mode(bp);
-
/* init Event Queue */
eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
@@ -2609,11 +2902,9 @@ static void bnx2x_e1h_disable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
- netif_tx_disable(bp->dev);
+ bnx2x_tx_disable(bp);
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
-
- netif_carrier_off(bp->dev);
}
static void bnx2x_e1h_enable(struct bnx2x *bp)
@@ -2708,20 +2999,60 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
- /* Make sure that BD data is updated before writing the producer */
- wmb();
+ /*
+ * Make sure that BD data is updated before writing the producer:
+ * BD data is written to the memory, the producer is read from the
+ * memory, thus we need a full memory barrier to ensure the ordering.
+ */
+ mb();
REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
bp->spq_prod_idx);
mmiowb();
}
-/* the slow path queue is odd since completions arrive on the fastpath ring */
+/**
+ * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
+ *
+ * @cmd: command to check
+ * @cmd_type: command type
+ */
+static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
+{
+ if ((cmd_type == NONE_CONNECTION_TYPE) ||
+ (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
+ (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
+ (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
+ (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
+ return true;
+ else
+ return false;
+
+}
+
+
+/**
+ * bnx2x_sp_post - place a single command on an SP ring
+ *
+ * @bp: driver handle
+ * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
+ * @cid: SW CID the command is related to
+ * @data_hi: command private data address (high 32 bits)
+ * @data_lo: command private data address (low 32 bits)
+ * @cmd_type: command type (e.g. NONE, ETH)
+ *
+ * SP data is handled as if it's always an address pair, thus data fields are
+ * not swapped to little endian in upper functions. Instead this function swaps
+ * data as if it's two u32 fields.
+ */
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
- u32 data_hi, u32 data_lo, int common)
+ u32 data_hi, u32 data_lo, int cmd_type)
{
struct eth_spe *spe;
u16 type;
+ bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -2751,17 +3082,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid));
- if (common)
- /* Common ramrods:
- * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
- * TRAFFIC_STOP, TRAFFIC_START
- */
- type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
- & SPE_HDR_CONN_TYPE;
- else
- /* ETH ramrods: SETUP, HALT */
- type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
- & SPE_HDR_CONN_TYPE;
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID);
@@ -2771,25 +3092,23 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
- /* stats ramrod has it's own slot on the spq */
- if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
- /* It's ok if the actual decrement is issued towards the memory
- * somewhere between the spin_lock and spin_unlock. Thus no
- * more explict memory barrier is needed.
- */
- if (common)
- atomic_dec(&bp->eq_spq_left);
- else
- atomic_dec(&bp->cq_spq_left);
- }
+ /*
+ * It's ok if the actual decrement is issued towards the memory
+ * somewhere between the spin_lock and spin_unlock. Thus no
+ * more explict memory barrier is needed.
+ */
+ if (common)
+ atomic_dec(&bp->eq_spq_left);
+ else
+ atomic_dec(&bp->cq_spq_left);
DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
- "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
- "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
+ "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) "
+ "type(0x%x) left (CQ, EQ) (%x,%x)\n",
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
(u32)(U64_LO(bp->spq_mapping) +
- (void *)bp->spq_prod_bd - (void *)bp->spq), command,
+ (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
HW_CID(bp, cid), data_hi, data_lo, type,
atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
@@ -2892,9 +3211,15 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
/* save nig interrupt mask */
nig_mask = REG_RD(bp, nig_int_mask_addr);
- REG_WR(bp, nig_int_mask_addr, 0);
- bnx2x_link_attn(bp);
+ /* If nig_mask is not set, no need to call the update
+ * function.
+ */
+ if (nig_mask) {
+ REG_WR(bp, nig_int_mask_addr, 0);
+
+ bnx2x_link_attn(bp);
+ }
/* handle unicore attn? */
}
@@ -2999,8 +3324,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
bnx2x_fan_failure(bp);
}
- if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
- AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
+ if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
bnx2x_acquire_phy_lock(bp);
bnx2x_handle_module_detect_int(&bp->link_params);
bnx2x_release_phy_lock(bp);
@@ -3063,13 +3387,13 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
}
if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
-
val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
- BNX2X_ERR("PXP hw attention 0x%x\n", val);
+ BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
/* RQ_USDMDP_FIFO_OVERFLOW */
if (val & 0x18000)
BNX2X_ERR("FATAL error from PXP\n");
- if (CHIP_IS_E2(bp)) {
+
+ if (!CHIP_IS_E1x(bp)) {
val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
}
@@ -3117,20 +3441,31 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
- /* Always call it here: bnx2x_link_report() will
- * prevent the link indication duplication.
- */
- bnx2x__link_status_update(bp);
-
if (bp->port.pmf &&
(val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
bp->dcbx_enabled > 0)
/* start dcbx state machine */
bnx2x_dcbx_set_params(bp,
BNX2X_DCBX_STATE_NEG_RECEIVED);
+ if (bp->link_vars.periodic_flags &
+ PERIODIC_FLAGS_LINK_EVENT) {
+ /* sync with link */
+ bnx2x_acquire_phy_lock(bp);
+ bp->link_vars.periodic_flags &=
+ ~PERIODIC_FLAGS_LINK_EVENT;
+ bnx2x_release_phy_lock(bp);
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+ bnx2x_link_report(bp);
+ }
+ /* Always call it here: bnx2x_link_report() will
+ * prevent the link indication duplication.
+ */
+ bnx2x__link_status_update(bp);
} else if (attn & BNX2X_MC_ASSERT_BITS) {
BNX2X_ERR("MC assert!\n");
+ bnx2x_mc_assert(bp);
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
@@ -3163,72 +3498,185 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
}
}
-#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
-#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
-#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
-#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
-#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
+/*
+ * Bits map:
+ * 0-7 - Engine0 load counter.
+ * 8-15 - Engine1 load counter.
+ * 16 - Engine0 RESET_IN_PROGRESS bit.
+ * 17 - Engine1 RESET_IN_PROGRESS bit.
+ * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
+ * on the engine
+ * 19 - Engine1 ONE_IS_LOADED.
+ * 20 - Chip reset flow bit. When set none-leader must wait for both engines
+ * leader to complete (check for both RESET_IN_PROGRESS bits and not for
+ * just the one belonging to its engine).
+ *
+ */
+#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
+
+#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
+#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
+#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
+#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
+#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
+#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
+#define BNX2X_GLOBAL_RESET_BIT 0x00040000
/*
+ * Set the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+void bnx2x_set_reset_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Clear the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
+ barrier();
+ mmiowb();
+}
+
+/*
+ * Checks the GLOBAL_RESET bit.
+ *
* should be run under rtnl lock
*/
+static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+ return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
+}
+
+/*
+ * Clear RESET_IN_PROGRESS bit for the current engine.
+ *
+ * Should be run under rtnl lock
+ */
static inline void bnx2x_set_reset_done(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
- val &= ~(1 << RESET_DONE_FLAG_SHIFT);
- REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = BP_PATH(bp) ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* Clear the bit */
+ val &= ~bit;
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
barrier();
mmiowb();
}
/*
+ * Set RESET_IN_PROGRESS for the current engine.
+ *
* should be run under rtnl lock
*/
-static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+void bnx2x_set_reset_in_progress(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
- val |= (1 << 16);
- REG_WR(bp, BNX2X_MISC_GEN_REG, val);
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = BP_PATH(bp) ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* Set the bit */
+ val |= bit;
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
barrier();
mmiowb();
}
/*
+ * Checks the RESET_IN_PROGRESS bit for the given engine.
* should be run under rtnl lock
*/
-bool bnx2x_reset_is_done(struct bnx2x *bp)
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
{
- u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
- DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
- return (val & RESET_DONE_FLAG_MASK) ? false : true;
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 bit = engine ?
+ BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+ /* return false if bit is set */
+ return (val & bit) ? false : true;
}
/*
+ * Increment the load counter for the current engine.
+ *
* should be run under rtnl lock
*/
-inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
+void bnx2x_inc_load_cnt(struct bnx2x *bp)
{
- u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
- val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
- REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+ /* get the current counter value */
+ val1 = (val & mask) >> shift;
+
+ /* increment... */
+ val1++;
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
barrier();
mmiowb();
}
-/*
- * should be run under rtnl lock
+/**
+ * bnx2x_dec_load_cnt - decrement the load counter
+ *
+ * @bp: driver handle
+ *
+ * Should be run under rtnl lock.
+ * Decrements the load counter for the current engine. Returns
+ * the new counter value.
*/
u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
{
- u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
+ u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK;
+ u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT;
DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
- val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
- REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
+ /* get the current counter value */
+ val1 = (val & mask) >> shift;
+
+ /* decrement... */
+ val1--;
+
+ /* clear the old value */
+ val &= ~mask;
+
+ /* set the new one */
+ val |= ((val1 << shift) & mask);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
barrier();
mmiowb();
@@ -3236,17 +3684,39 @@ u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
}
/*
+ * Read the load counter for the current engine.
+ *
* should be run under rtnl lock
*/
-static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
{
- return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
+ u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK);
+ u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+ BNX2X_PATH0_LOAD_CNT_SHIFT);
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
+
+ val = (val & mask) >> shift;
+
+ DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
+
+ return val;
}
+/*
+ * Reset the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
- REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
+ u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+ BNX2X_PATH0_LOAD_CNT_MASK);
+
+ REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
}
static inline void _print_next_block(int idx, const char *blk)
@@ -3256,7 +3726,8 @@ static inline void _print_next_block(int idx, const char *blk)
pr_cont("%s", blk);
}
-static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
+static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
+ bool print)
{
int i = 0;
u32 cur_bit = 0;
@@ -3265,19 +3736,33 @@ static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
- _print_next_block(par_num++, "BRB");
+ if (print)
+ _print_next_block(par_num++, "BRB");
break;
case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
- _print_next_block(par_num++, "PARSER");
+ if (print)
+ _print_next_block(par_num++, "PARSER");
break;
case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
- _print_next_block(par_num++, "TSDM");
+ if (print)
+ _print_next_block(par_num++, "TSDM");
break;
case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
- _print_next_block(par_num++, "SEARCHER");
+ if (print)
+ _print_next_block(par_num++,
+ "SEARCHER");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TCM");
break;
case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
- _print_next_block(par_num++, "TSEMI");
+ if (print)
+ _print_next_block(par_num++, "TSEMI");
+ break;
+ case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XPB");
break;
}
@@ -3289,7 +3774,8 @@ static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
return par_num;
}
-static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
+static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
+ bool *global, bool print)
{
int i = 0;
u32 cur_bit = 0;
@@ -3297,38 +3783,72 @@ static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
cur_bit = ((u32)0x1 << i);
if (sig & cur_bit) {
switch (cur_bit) {
- case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
- _print_next_block(par_num++, "PBCLIENT");
+ case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PBF");
break;
case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
- _print_next_block(par_num++, "QM");
+ if (print)
+ _print_next_block(par_num++, "QM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "TM");
break;
case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
- _print_next_block(par_num++, "XSDM");
+ if (print)
+ _print_next_block(par_num++, "XSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "XCM");
break;
case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
- _print_next_block(par_num++, "XSEMI");
+ if (print)
+ _print_next_block(par_num++, "XSEMI");
break;
case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
- _print_next_block(par_num++, "DOORBELLQ");
+ if (print)
+ _print_next_block(par_num++,
+ "DOORBELLQ");
+ break;
+ case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "NIG");
break;
case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
- _print_next_block(par_num++, "VAUX PCI CORE");
+ if (print)
+ _print_next_block(par_num++,
+ "VAUX PCI CORE");
+ *global = true;
break;
case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
- _print_next_block(par_num++, "DEBUG");
+ if (print)
+ _print_next_block(par_num++, "DEBUG");
break;
case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
- _print_next_block(par_num++, "USDM");
+ if (print)
+ _print_next_block(par_num++, "USDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "UCM");
break;
case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
- _print_next_block(par_num++, "USEMI");
+ if (print)
+ _print_next_block(par_num++, "USEMI");
break;
case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
- _print_next_block(par_num++, "UPB");
+ if (print)
+ _print_next_block(par_num++, "UPB");
break;
case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
- _print_next_block(par_num++, "CSDM");
+ if (print)
+ _print_next_block(par_num++, "CSDM");
+ break;
+ case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "CCM");
break;
}
@@ -3340,7 +3860,8 @@ static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
return par_num;
}
-static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
+static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
+ bool print)
{
int i = 0;
u32 cur_bit = 0;
@@ -3349,26 +3870,37 @@ static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
- _print_next_block(par_num++, "CSEMI");
+ if (print)
+ _print_next_block(par_num++, "CSEMI");
break;
case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
- _print_next_block(par_num++, "PXP");
+ if (print)
+ _print_next_block(par_num++, "PXP");
break;
case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
- _print_next_block(par_num++,
+ if (print)
+ _print_next_block(par_num++,
"PXPPCICLOCKCLIENT");
break;
case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
- _print_next_block(par_num++, "CFC");
+ if (print)
+ _print_next_block(par_num++, "CFC");
break;
case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
- _print_next_block(par_num++, "CDU");
+ if (print)
+ _print_next_block(par_num++, "CDU");
+ break;
+ case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "DMAE");
break;
case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
- _print_next_block(par_num++, "IGU");
+ if (print)
+ _print_next_block(par_num++, "IGU");
break;
case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
- _print_next_block(par_num++, "MISC");
+ if (print)
+ _print_next_block(par_num++, "MISC");
break;
}
@@ -3380,7 +3912,8 @@ static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
return par_num;
}
-static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
+static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
+ bool *global, bool print)
{
int i = 0;
u32 cur_bit = 0;
@@ -3389,16 +3922,54 @@ static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
if (sig & cur_bit) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
- _print_next_block(par_num++, "MCP ROM");
+ if (print)
+ _print_next_block(par_num++, "MCP ROM");
+ *global = true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
- _print_next_block(par_num++, "MCP UMP RX");
+ if (print)
+ _print_next_block(par_num++,
+ "MCP UMP RX");
+ *global = true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
- _print_next_block(par_num++, "MCP UMP TX");
+ if (print)
+ _print_next_block(par_num++,
+ "MCP UMP TX");
+ *global = true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
- _print_next_block(par_num++, "MCP SCPAD");
+ if (print)
+ _print_next_block(par_num++,
+ "MCP SCPAD");
+ *global = true;
+ break;
+ }
+
+ /* Clear the bit */
+ sig &= ~cur_bit;
+ }
+ }
+
+ return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
+ bool print)
+{
+ int i = 0;
+ u32 cur_bit = 0;
+ for (i = 0; sig; i++) {
+ cur_bit = ((u32)0x1 << i);
+ if (sig & cur_bit) {
+ switch (cur_bit) {
+ case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "PGLUE_B");
+ break;
+ case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+ if (print)
+ _print_next_block(par_num++, "ATC");
break;
}
@@ -3410,38 +3981,55 @@ static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
return par_num;
}
-static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
- u32 sig2, u32 sig3)
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+ u32 *sig)
{
- if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
- (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
+ if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+ (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+ (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+ (sig[3] & HW_PRTY_ASSERT_SET_3) ||
+ (sig[4] & HW_PRTY_ASSERT_SET_4)) {
int par_num = 0;
DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
- "[0]:0x%08x [1]:0x%08x "
- "[2]:0x%08x [3]:0x%08x\n",
- sig0 & HW_PRTY_ASSERT_SET_0,
- sig1 & HW_PRTY_ASSERT_SET_1,
- sig2 & HW_PRTY_ASSERT_SET_2,
- sig3 & HW_PRTY_ASSERT_SET_3);
- printk(KERN_ERR"%s: Parity errors detected in blocks: ",
- bp->dev->name);
- par_num = bnx2x_print_blocks_with_parity0(
- sig0 & HW_PRTY_ASSERT_SET_0, par_num);
- par_num = bnx2x_print_blocks_with_parity1(
- sig1 & HW_PRTY_ASSERT_SET_1, par_num);
- par_num = bnx2x_print_blocks_with_parity2(
- sig2 & HW_PRTY_ASSERT_SET_2, par_num);
- par_num = bnx2x_print_blocks_with_parity3(
- sig3 & HW_PRTY_ASSERT_SET_3, par_num);
- printk("\n");
+ "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x "
+ "[4]:0x%08x\n",
+ sig[0] & HW_PRTY_ASSERT_SET_0,
+ sig[1] & HW_PRTY_ASSERT_SET_1,
+ sig[2] & HW_PRTY_ASSERT_SET_2,
+ sig[3] & HW_PRTY_ASSERT_SET_3,
+ sig[4] & HW_PRTY_ASSERT_SET_4);
+ if (print)
+ netdev_err(bp->dev,
+ "Parity errors detected in blocks: ");
+ par_num = bnx2x_check_blocks_with_parity0(
+ sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
+ par_num = bnx2x_check_blocks_with_parity1(
+ sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
+ par_num = bnx2x_check_blocks_with_parity2(
+ sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
+ par_num = bnx2x_check_blocks_with_parity3(
+ sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
+ par_num = bnx2x_check_blocks_with_parity4(
+ sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+
+ if (print)
+ pr_cont("\n");
+
return true;
} else
return false;
}
-bool bnx2x_chk_parity_attn(struct bnx2x *bp)
+/**
+ * bnx2x_chk_parity_attn - checks for parity attentions.
+ *
+ * @bp: driver handle
+ * @global: true if there was a global attention
+ * @print: show parity attention in syslog
+ */
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
{
- struct attn_route attn;
+ struct attn_route attn = { {0} };
int port = BP_PORT(bp);
attn.sig[0] = REG_RD(bp,
@@ -3457,8 +4045,12 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp)
MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
port*4);
- return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
- attn.sig[3]);
+ if (!CHIP_IS_E1x(bp))
+ attn.sig[4] = REG_RD(bp,
+ MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
+ port*4);
+
+ return bnx2x_parity_attn(bp, global, print, attn.sig);
}
@@ -3537,21 +4129,25 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
u32 reg_addr;
u32 val;
u32 aeu_mask;
+ bool global = false;
/* need to take HW lock because MCP or other port might also
try to handle this event */
bnx2x_acquire_alr(bp);
- if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
+ if (bnx2x_chk_parity_attn(bp, &global, true)) {
+#ifndef BNX2X_STOP_ON_ERROR
bp->recovery_state = BNX2X_RECOVERY_INIT;
- bnx2x_set_reset_in_progress(bp);
- schedule_delayed_work(&bp->reset_task, 0);
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
/* Disable HW interrupts */
bnx2x_int_disable(bp);
- bnx2x_release_alr(bp);
/* In case of parity errors don't handle attentions so that
* other function would "see" parity errors.
*/
+#else
+ bnx2x_panic();
+#endif
+ bnx2x_release_alr(bp);
return;
}
@@ -3559,7 +4155,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
attn.sig[4] =
REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
else
@@ -3655,6 +4251,15 @@ static void bnx2x_attn_int(struct bnx2x *bp)
bnx2x_attn_int_deasserted(bp, deasserted);
}
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+ u16 index, u8 op, u8 update)
+{
+ u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
+
+ bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
+ igu_addr);
+}
+
static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
{
/* No memory barriers */
@@ -3666,6 +4271,8 @@ static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
union event_ring_elem *elem)
{
+ u8 err = elem->message.error;
+
if (!bp->cnic_eth_dev.starting_cid ||
(cid < bp->cnic_eth_dev.starting_cid &&
cid != bp->cnic_eth_dev.iscsi_l2_cid))
@@ -3673,16 +4280,123 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
- if (unlikely(elem->message.data.cfc_del_event.error)) {
+ if (unlikely(err)) {
+
BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
cid);
bnx2x_panic_dump(bp);
}
- bnx2x_cnic_cfc_comp(bp, cid);
+ bnx2x_cnic_cfc_comp(bp, cid, err);
return 0;
}
#endif
+static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
+{
+ struct bnx2x_mcast_ramrod_params rparam;
+ int rc;
+
+ memset(&rparam, 0, sizeof(rparam));
+
+ rparam.mcast_obj = &bp->mcast_obj;
+
+ netif_addr_lock_bh(bp->dev);
+
+ /* Clear pending state for the last command */
+ bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
+
+ /* If there are pending mcast commands - send them */
+ if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+ rc);
+ }
+
+ netif_addr_unlock_bh(bp->dev);
+}
+
+static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
+ union event_ring_elem *elem)
+{
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+ u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+ /* Always push next commands out, don't wait here */
+ __set_bit(RAMROD_CONT, &ramrod_flags);
+
+ switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ case BNX2X_FILTER_MAC_PENDING:
+#ifdef BCM_CNIC
+ if (cid == BNX2X_ISCSI_ETH_CID)
+ vlan_mac_obj = &bp->iscsi_l2_mac_obj;
+ else
+#endif
+ vlan_mac_obj = &bp->fp[cid].mac_obj;
+
+ break;
+ vlan_mac_obj = &bp->fp[cid].mac_obj;
+
+ case BNX2X_FILTER_MCAST_PENDING:
+ /* This is only relevant for 57710 where multicast MACs are
+ * configured as unicast MACs using the same ramrod.
+ */
+ bnx2x_handle_mcast_eqe(bp);
+ return;
+ default:
+ BNX2X_ERR("Unsupported classification command: %d\n",
+ elem->message.data.eth_event.echo);
+ return;
+ }
+
+ rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
+
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+ else if (rc > 0)
+ DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
+
+}
+
+#ifdef BCM_CNIC
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
+#endif
+
+static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
+{
+ netif_addr_lock_bh(bp->dev);
+
+ clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+ /* Send rx_mode command again if was requested */
+ if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
+ bnx2x_set_storm_rx_mode(bp);
+#ifdef BCM_CNIC
+ else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+ &bp->sp_state))
+ bnx2x_set_iscsi_eth_rx_mode(bp, true);
+ else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+ &bp->sp_state))
+ bnx2x_set_iscsi_eth_rx_mode(bp, false);
+#endif
+
+ netif_addr_unlock_bh(bp->dev);
+}
+
+static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
+ struct bnx2x *bp, u32 cid)
+{
+ DP(BNX2X_MSG_SP, "retrieving fp from cid %d", cid);
+#ifdef BCM_CNIC
+ if (cid == BNX2X_FCOE_ETH_CID)
+ return &bnx2x_fcoe(bp, q_obj);
+ else
+#endif
+ return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
+}
+
static void bnx2x_eq_int(struct bnx2x *bp)
{
u16 hw_cons, sw_cons, sw_prod;
@@ -3690,6 +4404,9 @@ static void bnx2x_eq_int(struct bnx2x *bp)
u32 cid;
u8 opcode;
int spqe_cnt = 0;
+ struct bnx2x_queue_sp_obj *q_obj;
+ struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
+ struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
hw_cons = le16_to_cpu(*bp->eq_cons_sb);
@@ -3708,7 +4425,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
sw_cons = bp->eq_cons;
sw_prod = bp->eq_prod;
- DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
+ DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
for (; sw_cons != hw_cons;
@@ -3724,9 +4441,10 @@ static void bnx2x_eq_int(struct bnx2x *bp)
/* handle eq element */
switch (opcode) {
case EVENT_RING_OPCODE_STAT_QUERY:
- DP(NETIF_MSG_TIMER, "got statistics comp event\n");
+ DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
+ bp->stats_comp++);
/* nothing to do with stats comp */
- continue;
+ goto next_spqe;
case EVENT_RING_OPCODE_CFC_DEL:
/* handle according to cid range */
@@ -3734,60 +4452,100 @@ static void bnx2x_eq_int(struct bnx2x *bp)
* we may want to verify here that the bp state is
* HALTING
*/
- DP(NETIF_MSG_IFDOWN,
+ DP(BNX2X_MSG_SP,
"got delete ramrod for MULTI[%d]\n", cid);
#ifdef BCM_CNIC
if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
goto next_spqe;
- if (cid == BNX2X_FCOE_ETH_CID)
- bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
- else
#endif
- bnx2x_fp(bp, cid, state) =
- BNX2X_FP_STATE_CLOSED;
+ q_obj = bnx2x_cid_to_q_obj(bp, cid);
+
+ if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
+ break;
+
+
goto next_spqe;
case EVENT_RING_OPCODE_STOP_TRAFFIC:
- DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
+ DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_TX_STOP))
+ break;
bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
goto next_spqe;
+
case EVENT_RING_OPCODE_START_TRAFFIC:
- DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
+ DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_TX_START))
+ break;
bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
goto next_spqe;
+ case EVENT_RING_OPCODE_FUNCTION_START:
+ DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
+ if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
+ break;
+
+ goto next_spqe;
+
+ case EVENT_RING_OPCODE_FUNCTION_STOP:
+ DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
+ if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
+ break;
+
+ goto next_spqe;
}
switch (opcode | bp->state) {
- case (EVENT_RING_OPCODE_FUNCTION_START |
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
BNX2X_STATE_OPENING_WAIT4_PORT):
- DP(NETIF_MSG_IFUP, "got setup ramrod\n");
- bp->state = BNX2X_STATE_FUNC_STARTED;
+ cid = elem->message.data.eth_event.echo &
+ BNX2X_SWCID_MASK;
+ DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
+ cid);
+ rss_raw->clear_pending(rss_raw);
break;
- case (EVENT_RING_OPCODE_FUNCTION_STOP |
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_SET_MAC |
BNX2X_STATE_CLOSING_WAIT4_HALT):
- DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
- bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+ bnx2x_handle_classification_eqe(bp, elem);
break;
- case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
- case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
- DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
- if (elem->message.data.set_mac_event.echo)
- bp->set_mac_pending = 0;
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_MULTICAST_RULES |
+ BNX2X_STATE_CLOSING_WAIT4_HALT):
+ DP(BNX2X_MSG_SP, "got mcast ramrod\n");
+ bnx2x_handle_mcast_eqe(bp);
break;
- case (EVENT_RING_OPCODE_SET_MAC |
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_OPEN):
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
+ BNX2X_STATE_DIAG):
+ case (EVENT_RING_OPCODE_FILTERS_RULES |
BNX2X_STATE_CLOSING_WAIT4_HALT):
- DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
- if (elem->message.data.set_mac_event.echo)
- bp->set_mac_pending = 0;
+ DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
+ bnx2x_handle_rx_mode_eqe(bp);
break;
default:
/* unknown event log error and continue */
- BNX2X_ERR("Unknown EQ event %d\n",
- elem->message.opcode);
+ BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
+ elem->message.opcode, bp->state);
}
next_spqe:
spqe_cnt++;
@@ -3810,12 +4568,6 @@ static void bnx2x_sp_task(struct work_struct *work)
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
u16 status;
- /* Return here if interrupt is disabled */
- if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
- DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
- return;
- }
-
status = bnx2x_update_dsb_idx(bp);
/* if (status == 0) */
/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
@@ -3834,8 +4586,15 @@ static void bnx2x_sp_task(struct work_struct *work)
struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
if ((!NO_FCOE(bp)) &&
- (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ /*
+ * Prevent local bottom-halves from running as
+ * we are going to change the local NAPI list.
+ */
+ local_bh_disable();
napi_schedule(&bnx2x_fcoe(bp, napi));
+ local_bh_enable();
+ }
#endif
/* Handle EQ completions */
bnx2x_eq_int(bp);
@@ -3859,12 +4618,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
struct net_device *dev = dev_instance;
struct bnx2x *bp = netdev_priv(dev);
- /* Return here if interrupt is disabled */
- if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
- DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
- return IRQ_HANDLED;
- }
-
bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
IGU_INT_DISABLE, 0);
@@ -3891,20 +4644,27 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
/* end of slow path */
+
+void bnx2x_drv_pulse(struct bnx2x *bp)
+{
+ SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
+ bp->fw_drv_pulse_wr_seq);
+}
+
+
static void bnx2x_timer(unsigned long data)
{
+ u8 cos;
struct bnx2x *bp = (struct bnx2x *) data;
if (!netif_running(bp->dev))
return;
- if (atomic_read(&bp->intr_sem) != 0)
- goto timer_restart;
-
if (poll) {
struct bnx2x_fastpath *fp = &bp->fp[0];
- bnx2x_tx_int(fp);
+ for_each_cos_in_tx_queue(fp, cos)
+ bnx2x_tx_int(bp, &fp->txdata[cos]);
bnx2x_rx_int(fp, 1000);
}
@@ -3917,7 +4677,7 @@ static void bnx2x_timer(unsigned long data)
bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
/* TBD - add SYSTEM_TIME */
drv_pulse = bp->fw_drv_pulse_wr_seq;
- SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
+ bnx2x_drv_pulse(bp);
mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
MCP_PULSE_SEQ_MASK);
@@ -3935,7 +4695,6 @@ static void bnx2x_timer(unsigned long data)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
-timer_restart:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -3981,18 +4740,16 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
struct hc_status_block_data_e1x sb_data_e1x;
/* disable the function first */
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
- sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
- sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
+ sb_data_e2.common.state = SB_DISABLED;
sb_data_e2.common.p_func.vf_valid = false;
sb_data_p = (u32 *)&sb_data_e2;
data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
} else {
memset(&sb_data_e1x, 0,
sizeof(struct hc_status_block_data_e1x));
- sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
- sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
+ sb_data_e1x.common.state = SB_DISABLED;
sb_data_e1x.common.p_func.vf_valid = false;
sb_data_p = (u32 *)&sb_data_e1x;
data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
@@ -4026,8 +4783,7 @@ static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
struct hc_sp_status_block_data sp_sb_data;
memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
- sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
- sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
+ sp_sb_data.state = SB_DISABLED;
sp_sb_data.p_func.vf_valid = false;
bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
@@ -4070,8 +4826,9 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
bnx2x_zero_fp_sb(bp, fw_sb_id);
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+ sb_data_e2.common.state = SB_ENABLED;
sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
sb_data_e2.common.p_func.vf_id = vfid;
sb_data_e2.common.p_func.vf_valid = vf_valid;
@@ -4085,6 +4842,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
} else {
memset(&sb_data_e1x, 0,
sizeof(struct hc_status_block_data_e1x));
+ sb_data_e1x.common.state = SB_ENABLED;
sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
sb_data_e1x.common.p_func.vf_id = 0xff;
sb_data_e1x.common.p_func.vf_valid = false;
@@ -4108,25 +4866,20 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
}
-static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
- u8 sb_index, u8 disable, u16 usec)
-{
- int port = BP_PORT(bp);
- u8 ticks = usec / BNX2X_BTR;
-
- storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
-
- disable = disable ? 1 : (usec ? 0 : 1);
- storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
-}
-
-static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
+static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
u16 tx_usec, u16 rx_usec)
{
- bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
false, rx_usec);
- bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
- false, tx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
+ tx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
+ tx_usec);
+ bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+ HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
+ tx_usec);
}
static void bnx2x_init_def_sb(struct bnx2x *bp)
@@ -4167,7 +4920,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
bp->attn_group[index].sig[sindex] =
REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
/*
* enable5 is separate from the rest of the registers,
* and therefore the address skip is 4
@@ -4185,7 +4938,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
REG_WR(bp, reg_offset, U64_LO(section));
REG_WR(bp, reg_offset + 4, U64_HI(section));
- } else if (CHIP_IS_E2(bp)) {
+ } else if (!CHIP_IS_E1x(bp)) {
REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
}
@@ -4195,6 +4948,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
bnx2x_zero_sp_sb(bp);
+ sp_sb_data.state = SB_ENABLED;
sp_sb_data.host_sb_addr.lo = U64_LO(section);
sp_sb_data.host_sb_addr.hi = U64_HI(section);
sp_sb_data.igu_sb_id = igu_sp_sb_index;
@@ -4205,9 +4959,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
- bp->stats_pending = 0;
- bp->set_mac_pending = 0;
-
bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
}
@@ -4253,146 +5004,129 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
}
-void bnx2x_push_indir_table(struct bnx2x *bp)
+
+/* called with netif_addr_lock_bh() */
+void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
{
- int func = BP_FUNC(bp);
- int i;
+ struct bnx2x_rx_mode_ramrod_params ramrod_param;
+ int rc;
- if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
- return;
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
- for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
- bp->fp->cl_id + bp->rx_indir_table[i]);
-}
+ /* Prepare ramrod parameters */
+ ramrod_param.cid = 0;
+ ramrod_param.cl_id = cl_id;
+ ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
+ ramrod_param.func_id = BP_FUNC(bp);
-static void bnx2x_init_ind_table(struct bnx2x *bp)
-{
- int i;
+ ramrod_param.pstate = &bp->sp_state;
+ ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
+ ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
- for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
- bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.rx_mode_flags = rx_mode_flags;
- bnx2x_push_indir_table(bp);
+ ramrod_param.rx_accept_flags = rx_accept_flags;
+ ramrod_param.tx_accept_flags = tx_accept_flags;
+
+ rc = bnx2x_config_rx_mode(bp, &ramrod_param);
+ if (rc < 0) {
+ BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
+ return;
+ }
}
+/* called with netif_addr_lock_bh() */
void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
{
- int mode = bp->rx_mode;
- int port = BP_PORT(bp);
- u16 cl_id;
- u32 def_q_filters = 0;
+ unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+ unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
- /* All but management unicast packets should pass to the host as well */
- u32 llh_mask =
- NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
- NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
- NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
- NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
-
- switch (mode) {
- case BNX2X_RX_MODE_NONE: /* no Rx */
- def_q_filters = BNX2X_ACCEPT_NONE;
#ifdef BCM_CNIC
- if (!NO_FCOE(bp)) {
- cl_id = bnx2x_fcoe(bp, cl_id);
- bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
- }
-#endif
- break;
+ if (!NO_FCOE(bp))
- case BNX2X_RX_MODE_NORMAL:
- def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
- BNX2X_ACCEPT_MULTICAST;
-#ifdef BCM_CNIC
- if (!NO_FCOE(bp)) {
- cl_id = bnx2x_fcoe(bp, cl_id);
- bnx2x_rxq_set_mac_filters(bp, cl_id,
- BNX2X_ACCEPT_UNICAST |
- BNX2X_ACCEPT_MULTICAST);
- }
+ /* Configure rx_mode of FCoE Queue */
+ __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
#endif
- break;
- case BNX2X_RX_MODE_ALLMULTI:
- def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
- BNX2X_ACCEPT_ALL_MULTICAST;
-#ifdef BCM_CNIC
+ switch (bp->rx_mode) {
+ case BNX2X_RX_MODE_NONE:
/*
- * Prevent duplication of multicast packets by configuring FCoE
- * L2 Client to receive only matched unicast frames.
+ * 'drop all' supersedes any accept flags that may have been
+ * passed to the function.
*/
- if (!NO_FCOE(bp)) {
- cl_id = bnx2x_fcoe(bp, cl_id);
- bnx2x_rxq_set_mac_filters(bp, cl_id,
- BNX2X_ACCEPT_UNICAST);
- }
-#endif
break;
+ case BNX2X_RX_MODE_NORMAL:
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+ break;
+ case BNX2X_RX_MODE_ALLMULTI:
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ break;
case BNX2X_RX_MODE_PROMISC:
- def_q_filters |= BNX2X_PROMISCUOUS_MODE;
-#ifdef BCM_CNIC
- /*
- * Prevent packets duplication by configuring DROP_ALL for FCoE
- * L2 Client.
+ /* According to deffinition of SI mode, iface in promisc mode
+ * should receive matched and unmatched (in resolution of port)
+ * unicast packets.
*/
- if (!NO_FCOE(bp)) {
- cl_id = bnx2x_fcoe(bp, cl_id);
- bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
- }
-#endif
- /* pass management unicast packets as well */
- llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
- break;
+ __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+ /* internal switching mode */
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+ if (IS_MF_SI(bp))
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
+ else
+ __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
- default:
- BNX2X_ERR("BAD rx mode (%d)\n", mode);
break;
+ default:
+ BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
+ return;
}
- cl_id = BP_L_ID(bp);
- bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
-
- REG_WR(bp,
- (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
- NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
+ if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
+ }
- DP(NETIF_MSG_IFUP, "rx mode %d\n"
- "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
- "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
- "unmatched_ucast 0x%x\n", mode,
- bp->mac_filters.ucast_drop_all,
- bp->mac_filters.mcast_drop_all,
- bp->mac_filters.bcast_drop_all,
- bp->mac_filters.ucast_accept_all,
- bp->mac_filters.mcast_accept_all,
- bp->mac_filters.bcast_accept_all,
- bp->mac_filters.unmatched_unicast
- );
+ __set_bit(RAMROD_RX, &ramrod_flags);
+ __set_bit(RAMROD_TX, &ramrod_flags);
- storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
+ bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
+ tx_accept_flags, ramrod_flags);
}
static void bnx2x_init_internal_common(struct bnx2x *bp)
{
int i;
- if (!CHIP_IS_E1(bp)) {
-
- /* xstorm needs to know whether to add ovlan to packets or not,
- * in switch-independent we'll write 0 to here... */
- REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
- bp->mf_mode);
- REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
- bp->mf_mode);
- REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
- bp->mf_mode);
- REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
- bp->mf_mode);
- }
-
if (IS_MF_SI(bp))
/*
* In switch independent mode, the TSTORM needs to accept
@@ -4401,25 +5135,22 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
*/
REG_WR8(bp, BAR_TSTRORM_INTMEM +
TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
+ else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
/* Zero this manually as its initialization is
currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
REG_WR(bp, BAR_USTRORM_INTMEM +
USTORM_AGG_DATA_OFFSET + i * 4, 0);
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
CHIP_INT_MODE_IS_BC(bp) ?
HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
}
}
-static void bnx2x_init_internal_port(struct bnx2x *bp)
-{
- /* port */
- bnx2x_dcb_init_intmem_pfc(bp);
-}
-
static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
{
switch (load_code) {
@@ -4429,7 +5160,7 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
/* no break */
case FW_MSG_CODE_DRV_LOAD_PORT:
- bnx2x_init_internal_port(bp);
+ /* nothing to do */
/* no break */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
@@ -4443,31 +5174,70 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
}
}
-static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
+static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
{
- struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
+}
- fp->state = BNX2X_FP_STATE_CLOSED;
+static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
+{
+ return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
+}
+
+static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
+{
+ if (CHIP_IS_E1x(fp->bp))
+ return BP_L_ID(fp->bp) + fp->index;
+ else /* We want Client ID to be the same as IGU SB ID for 57712 */
+ return bnx2x_fp_igu_sb_id(fp);
+}
+
+static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u8 cos;
+ unsigned long q_type = 0;
+ u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
fp->cid = fp_idx;
- fp->cl_id = BP_L_ID(bp) + fp_idx;
- fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
- fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
+ fp->cl_id = bnx2x_fp_cl_id(fp);
+ fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
+ fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
/* qZone id equals to FW (per path) client id */
- fp->cl_qzone_id = fp->cl_id +
- BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
- ETH_MAX_RX_CLIENTS_E1H);
+ fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
+
/* init shortcut */
- fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
- USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
- USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+ fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
/* Setup SB indicies */
fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
- fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
+
+ /* Configure Queue State object */
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
+
+ /* init tx data */
+ for_each_cos_in_tx_queue(fp, cos) {
+ bnx2x_init_txdata(bp, &fp->txdata[cos],
+ CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
+ FP_COS_TO_TXQ(fp, cos),
+ BNX2X_TX_SB_INDEX_BASE + cos);
+ cids[cos] = fp->txdata[cos].cid;
+ }
+
+ bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
+ BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+ /**
+ * Configure classification DBs: Always enable Tx switching
+ */
+ bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
"cl_id %d fw_sb %d igu_sb %d\n",
- fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
+ fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
fp->igu_sb_id);
bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
fp->fw_sb_id, fp->igu_sb_id);
@@ -4480,17 +5250,21 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
int i;
for_each_eth_queue(bp, i)
- bnx2x_init_fp_sb(bp, i);
+ bnx2x_init_eth_fp(bp, i);
#ifdef BCM_CNIC
if (!NO_FCOE(bp))
bnx2x_init_fcoe_fp(bp);
bnx2x_init_sb(bp, bp->cnic_sb_mapping,
BNX2X_VF_ID_INVALID, false,
- CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
+ bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
#endif
+ /* Initialize MOD_ABS interrupts */
+ bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+ bp->common.shmem_base, bp->common.shmem2_base,
+ BP_PORT(bp));
/* ensure status block indices were read */
rmb();
@@ -4502,12 +5276,8 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
bnx2x_init_eq_ring(bp);
bnx2x_init_internal(bp, load_code);
bnx2x_pf_init(bp);
- bnx2x_init_ind_table(bp);
bnx2x_stats_init(bp);
- /* At this point, we are ready for interrupts */
- atomic_set(&bp->intr_sem, 0);
-
/* flush all before enabling interrupts */
mb();
mmiowb();
@@ -4537,8 +5307,7 @@ static int bnx2x_gunzip_init(struct bnx2x *bp)
if (bp->strm == NULL)
goto gunzip_nomem2;
- bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
- GFP_KERNEL);
+ bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
if (bp->strm->workspace == NULL)
goto gunzip_nomem3;
@@ -4562,7 +5331,7 @@ gunzip_nomem1:
static void bnx2x_gunzip_end(struct bnx2x *bp)
{
if (bp->strm) {
- kfree(bp->strm->workspace);
+ vfree(bp->strm->workspace);
kfree(bp->strm);
bp->strm = NULL;
}
@@ -4711,8 +5480,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
msleep(50);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
msleep(50);
- bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
DP(NETIF_MSG_HW, "part2\n");
@@ -4776,8 +5545,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
msleep(50);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
msleep(50);
- bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
#ifndef BCM_CNIC
/* set NIC mode */
REG_WR(bp, PRS_REG_NIC_MODE, 1);
@@ -4797,7 +5566,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
{
REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
else
REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
@@ -4831,7 +5600,7 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
if (CHIP_REV_IS_FPGA(bp))
REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
- else if (CHIP_IS_E2(bp))
+ else if (!CHIP_IS_E1x(bp))
REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
(PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
| PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
@@ -4844,7 +5613,11 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
-/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
+
+ if (!CHIP_IS_E1x(bp))
+ /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
+ REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
+
REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
@@ -4853,10 +5626,24 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
static void bnx2x_reset_common(struct bnx2x *bp)
{
+ u32 val = 0x1400;
+
/* reset_common */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
0xd3ffff7f);
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
+
+ if (CHIP_IS_E3(bp)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
+}
+
+static void bnx2x_setup_dmae(struct bnx2x *bp)
+{
+ bp->dmae_ready = 0;
+ spin_lock_init(&bp->dmae_lock);
}
static void bnx2x_init_pxp(struct bnx2x *bp)
@@ -4865,7 +5652,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
int r_order, w_order;
pci_read_config_word(bp->pdev,
- bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
+ pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
if (bp->mrrs == -1)
@@ -4973,7 +5760,7 @@ static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
}
-static void bnx2x_pf_disable(struct bnx2x *bp)
+void bnx2x_pf_disable(struct bnx2x *bp)
{
u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
val &= ~IGU_PF_CONF_FUNC_EN;
@@ -4983,22 +5770,56 @@ static void bnx2x_pf_disable(struct bnx2x *bp)
REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
}
-static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
+static inline void bnx2x__common_init_phy(struct bnx2x *bp)
{
- u32 val, i;
+ u32 shmem_base[2], shmem2_base[2];
+ shmem_base[0] = bp->common.shmem_base;
+ shmem2_base[0] = bp->common.shmem2_base;
+ if (!CHIP_IS_E1x(bp)) {
+ shmem_base[1] =
+ SHMEM2_RD(bp, other_shmem_base_addr);
+ shmem2_base[1] =
+ SHMEM2_RD(bp, other_shmem2_base_addr);
+ }
+ bnx2x_acquire_phy_lock(bp);
+ bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
+ bp->common.chip_id);
+ bnx2x_release_phy_lock(bp);
+}
+
+/**
+ * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
+ *
+ * @bp: driver handle
+ */
+static int bnx2x_init_hw_common(struct bnx2x *bp)
+{
+ u32 val;
DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
+ /*
+ * take the UNDI lock to protect undi_unload flow from accessing
+ * registers while we're resetting the chip
+ */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+
bnx2x_reset_common(bp);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
- bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
- if (!CHIP_IS_E1(bp))
- REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
+ val = 0xfffc;
+ if (CHIP_IS_E3(bp)) {
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+
+ bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
- if (CHIP_IS_E2(bp)) {
- u8 fid;
+ if (!CHIP_IS_E1x(bp)) {
+ u8 abs_func_id;
/**
* 4-port mode or 2-port mode we need to turn of master-enable
@@ -5007,29 +5828,30 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
* for all functions on the given path, this means 0,2,4,6 for
* path 0 and 1,3,5,7 for path 1
*/
- for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
- if (fid == BP_ABS_FUNC(bp)) {
+ for (abs_func_id = BP_PATH(bp);
+ abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
+ if (abs_func_id == BP_ABS_FUNC(bp)) {
REG_WR(bp,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
1);
continue;
}
- bnx2x_pretend_func(bp, fid);
+ bnx2x_pretend_func(bp, abs_func_id);
/* clear pf enable */
bnx2x_pf_disable(bp);
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
}
}
- bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
if (CHIP_IS_E1(bp)) {
/* enable HW interrupt from PXP on USDM overflow
bit 16 on INT_MASK_0 */
REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
}
- bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
bnx2x_init_pxp(bp);
#ifdef __BIG_ENDIAN
@@ -5072,7 +5894,69 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
* This needs to be done by the first PF that is loaded in a path
* (i.e. common phase)
*/
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
+/* In E2 there is a bug in the timers block that can cause function 6 / 7
+ * (i.e. vnic3) to start even if it is marked as "scan-off".
+ * This occurs when a different function (func2,3) is being marked
+ * as "scan-off". Real-life scenario for example: if a driver is being
+ * load-unloaded while func6,7 are down. This will cause the timer to access
+ * the ilt, translate to a logical address and send a request to read/write.
+ * Since the ilt for the function that is down is not valid, this will cause
+ * a translation error which is unrecoverable.
+ * The Workaround is intended to make sure that when this happens nothing fatal
+ * will occur. The workaround:
+ * 1. First PF driver which loads on a path will:
+ * a. After taking the chip out of reset, by using pretend,
+ * it will write "0" to the following registers of
+ * the other vnics.
+ * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
+ * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
+ * And for itself it will write '1' to
+ * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
+ * dmae-operations (writing to pram for example.)
+ * note: can be done for only function 6,7 but cleaner this
+ * way.
+ * b. Write zero+valid to the entire ILT.
+ * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
+ * VNIC3 (of that port). The range allocated will be the
+ * entire ILT. This is needed to prevent ILT range error.
+ * 2. Any PF driver load flow:
+ * a. ILT update with the physical addresses of the allocated
+ * logical pages.
+ * b. Wait 20msec. - note that this timeout is needed to make
+ * sure there are no requests in one of the PXP internal
+ * queues with "old" ILT addresses.
+ * c. PF enable in the PGLC.
+ * d. Clear the was_error of the PF in the PGLC. (could have
+ * occured while driver was down)
+ * e. PF enable in the CFC (WEAK + STRONG)
+ * f. Timers scan enable
+ * 3. PF driver unload flow:
+ * a. Clear the Timers scan_en.
+ * b. Polling for scan_on=0 for that PF.
+ * c. Clear the PF enable bit in the PXP.
+ * d. Clear the PF enable in the CFC (WEAK + STRONG)
+ * e. Write zero+valid to all ILT entries (The valid bit must
+ * stay set)
+ * f. If this is VNIC 3 of a port then also init
+ * first_timers_ilt_entry to zero and last_timers_ilt_entry
+ * to the last enrty in the ILT.
+ *
+ * Notes:
+ * Currently the PF error in the PGLC is non recoverable.
+ * In the future the there will be a recovery routine for this error.
+ * Currently attention is masked.
+ * Having an MCP lock on the load/unload process does not guarantee that
+ * there is no Timer disable during Func6/7 enable. This is because the
+ * Timers scan is currently being cleared by the MCP on FLR.
+ * Step 2.d can be done only for PF6/7 and the driver can also check if
+ * there is error before clearing it. But the flow above is simpler and
+ * more general.
+ * All ILT entries are written by zero+valid and not just PF6/7
+ * ILT entries since in the future the ILT entries allocation for
+ * PF-s might be dynamic.
+ */
struct ilt_client_info ilt_cli;
struct bnx2x_ilt ilt;
memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
@@ -5086,7 +5970,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
/* Step 1: set zeroes to all ilt page entries with valid bit on
* Step 2: set the timers first/last ilt entry to point
* to the entire range to prevent ILT range error for 3rd/4th
- * vnic (this code assumes existence of the vnic)
+ * vnic (this code assumes existance of the vnic)
*
* both steps performed by call to bnx2x_ilt_client_init_op()
* with dummy TM client
@@ -5107,12 +5991,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
(CHIP_REV_IS_FPGA(bp) ? 400 : 0);
- bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
- bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
/* let the HW do it's magic ... */
do {
@@ -5126,26 +6010,27 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
}
}
- bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
/* clean the DMAE memory */
bp->dmae_ready = 1;
- bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
+ bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
+
+ bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
+
+ bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
- bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
- bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
- if (CHIP_MODE_IS_4_PORT(bp))
- bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
/* QM queues pointers table */
bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
@@ -5155,57 +6040,51 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
REG_WR(bp, QM_REG_SOFT_RESET, 0);
#ifdef BCM_CNIC
- bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
#endif
- bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
-
- if (!CHIP_REV_IS_SLOW(bp)) {
+ if (!CHIP_REV_IS_SLOW(bp))
/* enable hw interrupt from doorbell Q */
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
- }
- bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
- if (CHIP_MODE_IS_4_PORT(bp)) {
- REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
- REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
- }
+ bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
- bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
-#ifndef BCM_CNIC
- /* set NIC mode */
- REG_WR(bp, PRS_REG_NIC_MODE, 1);
-#endif
+
if (!CHIP_IS_E1(bp))
- REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
+ REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
- if (CHIP_IS_E2(bp)) {
- /* Bit-map indicating which L2 hdrs may appear after the
- basic Ethernet header */
- int has_ovlan = IS_MF_SD(bp);
- REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
- REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
- }
+ if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
+ /* Bit-map indicating which L2 hdrs may appear
+ * after the basic Ethernet header
+ */
+ REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+ bp->path_has_ovlan ? 7 : 6);
- bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
- bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
- bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
- bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
- bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
+ if (!CHIP_IS_E1x(bp)) {
+ /* reset VFC memories */
+ REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
+ REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+ VFC_MEMORIES_RST_REG_CAM_RST |
+ VFC_MEMORIES_RST_REG_RAM_RST);
- bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
+ msleep(20);
+ }
- if (CHIP_MODE_IS_4_PORT(bp))
- bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
/* sync semi rtc */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
@@ -5213,21 +6092,18 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
0x80000000);
- bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
- if (CHIP_IS_E2(bp)) {
- int has_ovlan = IS_MF_SD(bp);
- REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
- REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
- }
+ if (!CHIP_IS_E1x(bp))
+ REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+ bp->path_has_ovlan ? 7 : 6);
REG_WR(bp, SRC_REG_SOFT_RST, 1);
- for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
- REG_WR(bp, i, random32());
- bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
+
#ifdef BCM_CNIC
REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
@@ -5248,11 +6124,11 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
"of cdu_context(%ld)\n",
(long)sizeof(union cdu_context));
- bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
val = (4 << 24) + (0 << 12) + 1024;
REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
- bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
/* enable context validation interrupt from CFC */
REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
@@ -5260,20 +6136,19 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
/* set the thresholds to prevent CFC/CDU race */
REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
- bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
- if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
+ if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
- bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
- bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
/* Reset PCIE errors for debug */
REG_WR(bp, 0x2814, 0xffffffff);
REG_WR(bp, 0x3820, 0xffffffff);
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
(PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
@@ -5287,21 +6162,15 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
}
- bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
- bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
-
- bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
+ bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
if (!CHIP_IS_E1(bp)) {
- REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
- REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
- }
- if (CHIP_IS_E2(bp)) {
- /* Bit-map indicating which L2 hdrs may appear after the
- basic Ethernet header */
- REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
+ /* in E3 this done in per-port section */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
}
+ if (CHIP_IS_E1H(bp))
+ /* not applicable for E2 (and above ...) */
+ REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
if (CHIP_REV_IS_SLOW(bp))
msleep(200);
@@ -5343,127 +6212,136 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
bnx2x_enable_blocks_attention(bp);
- if (CHIP_PARITY_ENABLED(bp))
- bnx2x_enable_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
if (!BP_NOMCP(bp)) {
- /* In E2 2-PORT mode, same ext phy is used for the two paths */
- if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
- CHIP_IS_E1x(bp)) {
- u32 shmem_base[2], shmem2_base[2];
- shmem_base[0] = bp->common.shmem_base;
- shmem2_base[0] = bp->common.shmem2_base;
- if (CHIP_IS_E2(bp)) {
- shmem_base[1] =
- SHMEM2_RD(bp, other_shmem_base_addr);
- shmem2_base[1] =
- SHMEM2_RD(bp, other_shmem2_base_addr);
- }
- bnx2x_acquire_phy_lock(bp);
- bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
- bp->common.chip_id);
- bnx2x_release_phy_lock(bp);
- }
+ if (CHIP_IS_E1x(bp))
+ bnx2x__common_init_phy(bp);
} else
BNX2X_ERR("Bootcode is missing - can not initialize link\n");
return 0;
}
+/**
+ * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
+ *
+ * @bp: driver handle
+ */
+static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
+{
+ int rc = bnx2x_init_hw_common(bp);
+
+ if (rc)
+ return rc;
+
+ /* In E2 2-PORT mode, same ext phy is used for the two paths */
+ if (!BP_NOMCP(bp))
+ bnx2x__common_init_phy(bp);
+
+ return 0;
+}
+
static int bnx2x_init_hw_port(struct bnx2x *bp)
{
int port = BP_PORT(bp);
- int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
+ int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
u32 low, high;
u32 val;
+ bnx2x__link_reset(bp);
+
DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
- bnx2x_init_block(bp, PXP_BLOCK, init_stage);
- bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
/* Timers bug workaround: disables the pf_master bit in pglue at
* common phase, we need to enable it here before any dmae access are
* attempted. Therefore we manually added the enable-master to the
* port phase (it also happens in the function phase)
*/
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
- bnx2x_init_block(bp, TCM_BLOCK, init_stage);
- bnx2x_init_block(bp, UCM_BLOCK, init_stage);
- bnx2x_init_block(bp, CCM_BLOCK, init_stage);
- bnx2x_init_block(bp, XCM_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+ bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+ bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XCM, init_phase);
/* QM cid (connection) count */
bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
#ifdef BCM_CNIC
- bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
#endif
- bnx2x_init_block(bp, DQ_BLOCK, init_stage);
-
- if (CHIP_MODE_IS_4_PORT(bp))
- bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
- bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
- if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
- /* no pause for emulation and FPGA */
- low = 0;
- high = 513;
- } else {
- if (IS_MF(bp))
- low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
- else if (bp->dev->mtu > 4096) {
- if (bp->flags & ONE_PORT_FLAG)
- low = 160;
- else {
- val = bp->dev->mtu;
- /* (24*1024 + val*4)/256 */
- low = 96 + (val/64) +
- ((val % 64) ? 1 : 0);
- }
- } else
- low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
- high = low + 56; /* 14*1024/256 */
- }
+ bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+
+ if (IS_MF(bp))
+ low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
+ else if (bp->dev->mtu > 4096) {
+ if (bp->flags & ONE_PORT_FLAG)
+ low = 160;
+ else {
+ val = bp->dev->mtu;
+ /* (24*1024 + val*4)/256 */
+ low = 96 + (val/64) +
+ ((val % 64) ? 1 : 0);
+ }
+ } else
+ low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
+ high = low + 56; /* 14*1024/256 */
REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
}
- if (CHIP_MODE_IS_4_PORT(bp)) {
- REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
- REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
- REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
- BRB1_REG_MAC_GUARANTIED_0), 40);
- }
+ if (CHIP_MODE_IS_4_PORT(bp))
+ REG_WR(bp, (BP_PORT(bp) ?
+ BRB1_REG_MAC_GUARANTIED_1 :
+ BRB1_REG_MAC_GUARANTIED_0), 40);
- bnx2x_init_block(bp, PRS_BLOCK, init_stage);
- bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
- bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
- bnx2x_init_block(bp, USDM_BLOCK, init_stage);
- bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+ if (CHIP_IS_E3B0(bp))
+ /* Ovlan exists only if we are in multi-function +
+ * switch-dependent mode, in switch-independent there
+ * is no ovlan headers
+ */
+ REG_WR(bp, BP_PORT(bp) ?
+ PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+ PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+ (bp->path_has_ovlan ? 7 : 6));
- bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
- bnx2x_init_block(bp, USEM_BLOCK, init_stage);
- bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
- bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
- if (CHIP_MODE_IS_4_PORT(bp))
- bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
- bnx2x_init_block(bp, UPB_BLOCK, init_stage);
- bnx2x_init_block(bp, XPB_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_XPB, init_phase);
- bnx2x_init_block(bp, PBF_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_PBF, init_phase);
- if (!CHIP_IS_E2(bp)) {
+ if (CHIP_IS_E1x(bp)) {
/* configure PBF to work without PAUSE mtu 9000 */
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
@@ -5479,20 +6357,20 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
}
#ifdef BCM_CNIC
- bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
#endif
- bnx2x_init_block(bp, CDU_BLOCK, init_stage);
- bnx2x_init_block(bp, CFC_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+ bnx2x_init_block(bp, BLOCK_CFC, init_phase);
if (CHIP_IS_E1(bp)) {
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
}
- bnx2x_init_block(bp, HC_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_HC, init_phase);
- bnx2x_init_block(bp, IGU_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_IGU, init_phase);
- bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
/* init aeu_mask_attn_func_0/1:
* - SF mode: bits 3-7 are masked. only bits 0-2 are in use
* - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
@@ -5502,22 +6380,31 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
val |= CHIP_IS_E1(bp) ? 0 : 0x10;
REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
- bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
- bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
- bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
- bnx2x_init_block(bp, DBU_BLOCK, init_stage);
- bnx2x_init_block(bp, DBG_BLOCK, init_stage);
+ bnx2x_init_block(bp, BLOCK_NIG, init_phase);
- bnx2x_init_block(bp, NIG_BLOCK, init_stage);
+ if (!CHIP_IS_E1x(bp)) {
+ /* Bit-map indicating which L2 hdrs may appear after the
+ * basic Ethernet header
+ */
+ REG_WR(bp, BP_PORT(bp) ?
+ NIG_REG_P1_HDRS_AFTER_BASIC :
+ NIG_REG_P0_HDRS_AFTER_BASIC,
+ IS_MF_SD(bp) ? 7 : 6);
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+ if (CHIP_IS_E3(bp))
+ REG_WR(bp, BP_PORT(bp) ?
+ NIG_REG_LLH1_MF_MODE :
+ NIG_REG_LLH_MF_MODE, IS_MF(bp));
+ }
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
if (!CHIP_IS_E1(bp)) {
/* 0x2 disable mf_ov, 0x1 enable */
REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
(IS_MF_SD(bp) ? 0x1 : 0x2));
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
val = 0;
switch (bp->mf_mode) {
case MULTI_FUNCTION_SD:
@@ -5538,17 +6425,16 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
}
}
- bnx2x_init_block(bp, MCP_BLOCK, init_stage);
- bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
- if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
- bp->common.shmem2_base, port)) {
+
+ /* If SPIO5 is set to generate interrupts, enable it for this port */
+ val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+ if (val & (1 << MISC_REGISTERS_SPIO_5)) {
u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
val = REG_RD(bp, reg_addr);
val |= AEU_INPUTS_ATTN_BITS_SPIO5;
REG_WR(bp, reg_addr, val);
}
- bnx2x__link_reset(bp);
return 0;
}
@@ -5567,7 +6453,7 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
{
- bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
+ bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
}
static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
@@ -5581,6 +6467,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
{
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
+ int init_phase = PHASE_PF0 + func;
struct bnx2x_ilt *ilt = BP_ILT(bp);
u16 cdu_ilt_start;
u32 addr, val;
@@ -5589,6 +6476,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
+ /* FLR cleanup - hmmm */
+ if (!CHIP_IS_E1x(bp))
+ bnx2x_pf_flr_clnup(bp);
+
/* set MSI reconfigure capability */
if (bp->common.int_block == INT_BLOCK_HC) {
addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
@@ -5597,6 +6488,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
REG_WR(bp, addr, val);
}
+ bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+ bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
ilt = BP_ILT(bp);
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
@@ -5622,7 +6516,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
REG_WR(bp, PRS_REG_NIC_MODE, 1);
#endif /* BCM_CNIC */
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
u32 pf_conf = IGU_PF_CONF_FUNC_EN;
/* Turn on a single ISR mode in IGU if driver is going to use
@@ -5649,58 +6543,55 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bp->dmae_ready = 1;
- bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
- bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
-
- if (CHIP_IS_E2(bp)) {
- REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
- BP_PATH(bp));
- REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
- BP_PATH(bp));
- }
-
- if (CHIP_MODE_IS_4_PORT(bp))
- bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
-
- if (CHIP_IS_E2(bp))
+ bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+ bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+ bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+ bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+ bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+ bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+ bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+ if (!CHIP_IS_E1x(bp))
REG_WR(bp, QM_REG_PF_EN, 1);
- bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
-
- if (CHIP_MODE_IS_4_PORT(bp))
- bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
-
- bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp)) {
+ REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+ }
+ bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+ bnx2x_init_block(bp, BLOCK_TM, init_phase);
+ bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+ bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+ bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+ bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+ bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+ bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+ if (!CHIP_IS_E1x(bp))
REG_WR(bp, PBF_REG_DISABLE_PF, 0);
- bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, BLOCK_CDU, init_phase);
- bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, BLOCK_CFC, init_phase);
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
if (IS_MF(bp)) {
@@ -5708,7 +6599,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
}
- bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
/* HC init per function */
if (bp->common.int_block == INT_BLOCK_HC) {
@@ -5718,21 +6609,21 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
}
- bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, BLOCK_HC, init_phase);
} else {
int num_segs, sb_idx, prod_offset;
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
}
- bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
+ bnx2x_init_block(bp, BLOCK_IGU, init_phase);
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
int dsb_idx = 0;
/**
* Producer memory:
@@ -5827,13 +6718,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
REG_WR(bp, 0x2114, 0xffffffff);
REG_WR(bp, 0x2120, 0xffffffff);
- bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
- bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
-
if (CHIP_IS_E1x(bp)) {
main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
main_mem_base = HC_REG_MAIN_MEMORY +
@@ -5859,65 +6743,26 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
REG_RD(bp, main_mem_prty_clr);
}
+#ifdef BNX2X_STOP_ON_ERROR
+ /* Enable STORMs SP logging */
+ REG_WR8(bp, BAR_USTRORM_INTMEM +
+ USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM +
+ TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+ REG_WR8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+#endif
+
bnx2x_phy_probe(&bp->link_params);
return 0;
}
-int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
-{
- int rc = 0;
-
- DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
- BP_ABS_FUNC(bp), load_code);
-
- bp->dmae_ready = 0;
- spin_lock_init(&bp->dmae_lock);
-
- switch (load_code) {
- case FW_MSG_CODE_DRV_LOAD_COMMON:
- case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
- rc = bnx2x_init_hw_common(bp, load_code);
- if (rc)
- goto init_hw_err;
- /* no break */
-
- case FW_MSG_CODE_DRV_LOAD_PORT:
- rc = bnx2x_init_hw_port(bp);
- if (rc)
- goto init_hw_err;
- /* no break */
-
- case FW_MSG_CODE_DRV_LOAD_FUNCTION:
- rc = bnx2x_init_hw_func(bp);
- if (rc)
- goto init_hw_err;
- break;
-
- default:
- BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
- break;
- }
-
- if (!BP_NOMCP(bp)) {
- int mb_idx = BP_FW_MB_IDX(bp);
-
- bp->fw_drv_pulse_wr_seq =
- (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
- DRV_PULSE_SEQ_MASK);
- DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
- }
-
-init_hw_err:
- bnx2x_gunzip_end(bp);
-
- return rc;
-}
void bnx2x_free_mem(struct bnx2x *bp)
{
- bnx2x_gunzip_end(bp);
-
/* fastpath */
bnx2x_free_fp_mem(bp);
/* end of fastpath */
@@ -5925,6 +6770,9 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
@@ -5936,7 +6784,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_FREE(bp->ilt->lines);
#ifdef BCM_CNIC
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
sizeof(struct host_hc_status_block_e2));
else
@@ -5950,18 +6798,67 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
+}
+
+static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+{
+ int num_groups;
+
+ /* number of eth_queues */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+
+ /* Total number of FW statistics requests =
+ * 1 for port stats + 1 for PF stats + num_eth_queues */
+ bp->fw_stats_num = 2 + num_queue_stats;
- BNX2X_FREE(bp->rx_indir_table);
+
+ /* Request is built from stats_query_header and an array of
+ * stats_query_cmd_group each of which contains
+ * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+ * configured in the stats_query_header.
+ */
+ num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
+ (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+
+ bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+ num_groups * sizeof(struct stats_query_cmd_group);
+
+ /* Data for statistics requests + stats_conter
+ *
+ * stats_counter holds per-STORM counters that are incremented
+ * when STORM has finished with the current request.
+ */
+ bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+ sizeof(struct per_pf_stats) +
+ sizeof(struct per_queue_stats) * num_queue_stats +
+ sizeof(struct stats_counter);
+
+ BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ /* Set shortcuts */
+ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+ bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+
+ bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+ ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+
+ bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+ bp->fw_stats_req_sz;
+ return 0;
+
+alloc_mem_err:
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ return -ENOMEM;
}
int bnx2x_alloc_mem(struct bnx2x *bp)
{
- if (bnx2x_gunzip_init(bp))
- return -ENOMEM;
-
#ifdef BCM_CNIC
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
+ /* size = the status block + ramrod buffers */
BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
sizeof(struct host_hc_status_block_e2));
else
@@ -5979,7 +6876,11 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
- bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
+ /* Allocated memory for FW statistics */
+ if (bnx2x_alloc_fw_stats_mem(bp))
+ goto alloc_mem_err;
+
+ bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
bp->context.size);
@@ -5996,8 +6897,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
- BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
- TSTORM_INDIRECTION_TABLE_SIZE);
/* fastpath */
/* need to be done at the end, since it's self adjusting to amount
@@ -6015,629 +6914,75 @@ alloc_mem_err:
/*
* Init service functions
*/
-static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
- int *state_p, int flags);
-
-int bnx2x_func_start(struct bnx2x *bp)
-{
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
-
- /* Wait for completion */
- return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
- WAIT_RAMROD_COMMON);
-}
-
-static int bnx2x_func_stop(struct bnx2x *bp)
-{
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
-
- /* Wait for completion */
- return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
- 0, &(bp->state), WAIT_RAMROD_COMMON);
-}
-
-/**
- * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
- *
- * @bp: driver handle
- * @set: set or clear an entry (1 or 0)
- * @mac: pointer to a buffer containing a MAC
- * @cl_bit_vec: bit vector of clients to register a MAC for
- * @cam_offset: offset in a CAM to use
- * @is_bcast: is the set MAC a broadcast address (for E1 only)
- */
-static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
- u32 cl_bit_vec, u8 cam_offset,
- u8 is_bcast)
-{
- struct mac_configuration_cmd *config =
- (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
- int ramrod_flags = WAIT_RAMROD_COMMON;
-
- bp->set_mac_pending = 1;
-
- config->hdr.length = 1;
- config->hdr.offset = cam_offset;
- config->hdr.client_id = 0xff;
- /* Mark the single MAC configuration ramrod as opposed to a
- * UC/MC list configuration).
- */
- config->hdr.echo = 1;
-
- /* primary MAC */
- config->config_table[0].msb_mac_addr =
- swab16(*(u16 *)&mac[0]);
- config->config_table[0].middle_mac_addr =
- swab16(*(u16 *)&mac[2]);
- config->config_table[0].lsb_mac_addr =
- swab16(*(u16 *)&mac[4]);
- config->config_table[0].clients_bit_vector =
- cpu_to_le32(cl_bit_vec);
- config->config_table[0].vlan_id = 0;
- config->config_table[0].pf_id = BP_FUNC(bp);
- if (set)
- SET_FLAG(config->config_table[0].flags,
- MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
- T_ETH_MAC_COMMAND_SET);
- else
- SET_FLAG(config->config_table[0].flags,
- MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
- T_ETH_MAC_COMMAND_INVALIDATE);
-
- if (is_bcast)
- SET_FLAG(config->config_table[0].flags,
- MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
-
- DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
- (set ? "setting" : "clearing"),
- config->config_table[0].msb_mac_addr,
- config->config_table[0].middle_mac_addr,
- config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
-
- mb();
-
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
- U64_HI(bnx2x_sp_mapping(bp, mac_config)),
- U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
-
- /* Wait for a completion */
- bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
-}
-
-static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
- int *state_p, int flags)
-{
- /* can take a while if any port is running */
- int cnt = 5000;
- u8 poll = flags & WAIT_RAMROD_POLL;
- u8 common = flags & WAIT_RAMROD_COMMON;
-
- DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
- poll ? "polling" : "waiting", state, idx);
-
- might_sleep();
- while (cnt--) {
- if (poll) {
- if (common)
- bnx2x_eq_int(bp);
- else {
- bnx2x_rx_int(bp->fp, 10);
- /* if index is different from 0
- * the reply for some commands will
- * be on the non default queue
- */
- if (idx)
- bnx2x_rx_int(&bp->fp[idx], 10);
- }
- }
-
- mb(); /* state is changed by bnx2x_sp_event() */
- if (*state_p == state) {
-#ifdef BNX2X_STOP_ON_ERROR
- DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
-#endif
- return 0;
- }
-
- msleep(1);
-
- if (bp->panic)
- return -EIO;
- }
-
- /* timeout! */
- BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
- poll ? "polling" : "waiting", state, idx);
-#ifdef BNX2X_STOP_ON_ERROR
- bnx2x_panic();
-#endif
-
- return -EBUSY;
-}
-
-static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
-{
- if (CHIP_IS_E1H(bp))
- return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
- else if (CHIP_MODE_IS_4_PORT(bp))
- return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
- else
- return E2_FUNC_MAX * rel_offset + BP_VN(bp);
-}
-
-/**
- * LLH CAM line allocations: currently only iSCSI and ETH macs are
- * relevant. In addition, current implementation is tuned for a
- * single ETH MAC.
- */
-enum {
- LLH_CAM_ISCSI_ETH_LINE = 0,
- LLH_CAM_ETH_LINE,
- LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
-};
-
-static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
- int set,
- unsigned char *dev_addr,
- int index)
-{
- u32 wb_data[2];
- u32 mem_offset, ena_offset, mem_index;
- /**
- * indexes mapping:
- * 0..7 - goes to MEM
- * 8..15 - goes to MEM2
- */
-
- if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
- return;
-
- /* calculate memory start offset according to the mapping
- * and index in the memory */
- if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
- mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
- NIG_REG_LLH0_FUNC_MEM;
- ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
- NIG_REG_LLH0_FUNC_MEM_ENABLE;
- mem_index = index;
- } else {
- mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
- NIG_REG_P0_LLH_FUNC_MEM2;
- ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
- NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
- mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
- }
-
- if (set) {
- /* LLH_FUNC_MEM is a u64 WB register */
- mem_offset += 8*mem_index;
- wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
- (dev_addr[4] << 8) | dev_addr[5]);
- wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
-
- REG_WR_DMAE(bp, mem_offset, wb_data, 2);
- }
-
- /* enable/disable the entry */
- REG_WR(bp, ena_offset + 4*mem_index, set);
-
-}
-
-void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+ struct bnx2x_vlan_mac_obj *obj, bool set,
+ int mac_type, unsigned long *ramrod_flags)
{
- u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
- bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
+ int rc;
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
- /* networking MAC */
- bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
- (1 << bp->fp->cl_id), cam_offset , 0);
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
- bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
+ /* Fill general parameters */
+ ramrod_param.vlan_mac_obj = obj;
+ ramrod_param.ramrod_flags = *ramrod_flags;
- if (CHIP_IS_E1(bp)) {
- /* broadcast MAC */
- static const u8 bcast[ETH_ALEN] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
- bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
- }
-}
+ /* Fill a user request section if needed */
+ if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+ memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
-static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
-{
- return CHIP_REV_IS_SLOW(bp) ?
- (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
- (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
-}
+ __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
-/* set mc list, do not wait as wait implies sleep and
- * set_rx_mode can be invoked from non-sleepable context.
- *
- * Instead we use the same ramrod data buffer each time we need
- * to configure a list of addresses, and use the fact that the
- * list of MACs is changed in an incremental way and that the
- * function is called under the netif_addr_lock. A temporary
- * inconsistent CAM configuration (possible in case of a very fast
- * sequence of add/del/add on the host side) will shortly be
- * restored by the handler of the last ramrod.
- */
-static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
-{
- int i = 0, old;
- struct net_device *dev = bp->dev;
- u8 offset = bnx2x_e1_cam_mc_offset(bp);
- struct netdev_hw_addr *ha;
- struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
- dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
-
- if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
- return -EINVAL;
-
- netdev_for_each_mc_addr(ha, dev) {
- /* copy mac */
- config_cmd->config_table[i].msb_mac_addr =
- swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
- config_cmd->config_table[i].middle_mac_addr =
- swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
- config_cmd->config_table[i].lsb_mac_addr =
- swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
-
- config_cmd->config_table[i].vlan_id = 0;
- config_cmd->config_table[i].pf_id = BP_FUNC(bp);
- config_cmd->config_table[i].clients_bit_vector =
- cpu_to_le32(1 << BP_L_ID(bp));
-
- SET_FLAG(config_cmd->config_table[i].flags,
- MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
- T_ETH_MAC_COMMAND_SET);
-
- DP(NETIF_MSG_IFUP,
- "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
- config_cmd->config_table[i].msb_mac_addr,
- config_cmd->config_table[i].middle_mac_addr,
- config_cmd->config_table[i].lsb_mac_addr);
- i++;
- }
- old = config_cmd->hdr.length;
- if (old > i) {
- for (; i < old; i++) {
- if (CAM_IS_INVALID(config_cmd->
- config_table[i])) {
- /* already invalidated */
- break;
- }
- /* invalidate */
- SET_FLAG(config_cmd->config_table[i].flags,
- MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
- T_ETH_MAC_COMMAND_INVALIDATE);
- }
+ /* Set the command: ADD or DEL */
+ if (set)
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ else
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
}
- wmb();
-
- config_cmd->hdr.length = i;
- config_cmd->hdr.offset = offset;
- config_cmd->hdr.client_id = 0xff;
- /* Mark that this ramrod doesn't use bp->set_mac_pending for
- * synchronization.
- */
- config_cmd->hdr.echo = 0;
-
- mb();
-
- return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
- U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
-}
-
-void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
-{
- int i;
- struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
- dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
- int ramrod_flags = WAIT_RAMROD_COMMON;
- u8 offset = bnx2x_e1_cam_mc_offset(bp);
-
- for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
- SET_FLAG(config_cmd->config_table[i].flags,
- MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
- T_ETH_MAC_COMMAND_INVALIDATE);
-
- wmb();
-
- config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
- config_cmd->hdr.offset = offset;
- config_cmd->hdr.client_id = 0xff;
- /* We'll wait for a completion this time... */
- config_cmd->hdr.echo = 1;
-
- bp->set_mac_pending = 1;
-
- mb();
-
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
- U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
-
- /* Wait for a completion */
- bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
- ramrod_flags);
-
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc < 0)
+ BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
+ return rc;
}
-/* Accept one or more multicasts */
-static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
+int bnx2x_del_all_macs(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ int mac_type, bool wait_for_comp)
{
- struct net_device *dev = bp->dev;
- struct netdev_hw_addr *ha;
- u32 mc_filter[MC_HASH_SIZE];
- u32 crc, bit, regidx;
- int i;
-
- memset(mc_filter, 0, 4 * MC_HASH_SIZE);
-
- netdev_for_each_mc_addr(ha, dev) {
- DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
- bnx2x_mc_addr(ha));
-
- crc = crc32c_le(0, bnx2x_mc_addr(ha),
- ETH_ALEN);
- bit = (crc >> 24) & 0xff;
- regidx = bit >> 5;
- bit &= 0x1f;
- mc_filter[regidx] |= (1 << bit);
- }
+ int rc;
+ unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
- for (i = 0; i < MC_HASH_SIZE; i++)
- REG_WR(bp, MC_HASH_OFFSET(bp, i),
- mc_filter[i]);
+ /* Wait for completion of requested */
+ if (wait_for_comp)
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- return 0;
-}
+ /* Set the mac type of addresses we want to clear */
+ __set_bit(mac_type, &vlan_mac_flags);
-void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
-{
- int i;
+ rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete MACs: %d\n", rc);
- for (i = 0; i < MC_HASH_SIZE; i++)
- REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+ return rc;
}
-#ifdef BCM_CNIC
-/**
- * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
- *
- * @bp: driver handle
- * @set: set or clear the CAM entry
- *
- * This function will wait until the ramdord completion returns.
- * Return 0 if success, -ENODEV if ramrod doesn't return.
- */
-static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
- u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
- bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
- u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
- BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
- u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
- u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
-
- /* Send a SET_MAC ramrod */
- bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
- cam_offset, 0);
+ unsigned long ramrod_flags = 0;
- bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
+ DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
- return 0;
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ /* Eth MAC is set on RSS leading client (fp[0]) */
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
+ BNX2X_ETH_MAC, &ramrod_flags);
}
-/**
- * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
- *
- * @bp: driver handle
- * @set: set or clear the CAM entry
- *
- * This function will wait until the ramrod completion returns.
- * Returns 0 if success, -ENODEV if ramrod doesn't return.
- */
-int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
-{
- u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
- /**
- * CAM allocation for E1H
- * eth unicasts: by func number
- * iscsi: by func number
- * fip unicast: by func number
- * fip multicast: by func number
- */
- bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
- cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
-
- return 0;
-}
-
-int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
+int bnx2x_setup_leading(struct bnx2x *bp)
{
- u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
-
- /**
- * CAM allocation for E1H
- * eth unicasts: by func number
- * iscsi: by func number
- * fip unicast: by func number
- * fip multicast: by func number
- */
- bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
- bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
-
- return 0;
-}
-#endif
-
-static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
- struct bnx2x_client_init_params *params,
- u8 activate,
- struct client_init_ramrod_data *data)
-{
- /* Clear the buffer */
- memset(data, 0, sizeof(*data));
-
- /* general */
- data->general.client_id = params->rxq_params.cl_id;
- data->general.statistics_counter_id = params->rxq_params.stat_id;
- data->general.statistics_en_flg =
- (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
- data->general.is_fcoe_flg =
- (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
- data->general.activate_flg = activate;
- data->general.sp_client_id = params->rxq_params.spcl_id;
-
- /* Rx data */
- data->rx.tpa_en_flg =
- (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
- data->rx.vmqueue_mode_en_flg = 0;
- data->rx.cache_line_alignment_log_size =
- params->rxq_params.cache_line_log;
- data->rx.enable_dynamic_hc =
- (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
- data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
- data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
- data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
-
- /* We don't set drop flags */
- data->rx.drop_ip_cs_err_flg = 0;
- data->rx.drop_tcp_cs_err_flg = 0;
- data->rx.drop_ttl0_flg = 0;
- data->rx.drop_udp_cs_err_flg = 0;
-
- data->rx.inner_vlan_removal_enable_flg =
- (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
- data->rx.outer_vlan_removal_enable_flg =
- (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
- data->rx.status_block_id = params->rxq_params.fw_sb_id;
- data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
- data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
- data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
- data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
- data->rx.bd_page_base.lo =
- cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
- data->rx.bd_page_base.hi =
- cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
- data->rx.sge_page_base.lo =
- cpu_to_le32(U64_LO(params->rxq_params.sge_map));
- data->rx.sge_page_base.hi =
- cpu_to_le32(U64_HI(params->rxq_params.sge_map));
- data->rx.cqe_page_base.lo =
- cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
- data->rx.cqe_page_base.hi =
- cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
- data->rx.is_leading_rss =
- (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
- data->rx.is_approx_mcast = data->rx.is_leading_rss;
-
- /* Tx data */
- data->tx.enforce_security_flg = 0; /* VF specific */
- data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
- data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
- data->tx.mtu = 0; /* VF specific */
- data->tx.tx_bd_page_base.lo =
- cpu_to_le32(U64_LO(params->txq_params.dscr_map));
- data->tx.tx_bd_page_base.hi =
- cpu_to_le32(U64_HI(params->txq_params.dscr_map));
-
- /* flow control data */
- data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
- data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
- data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
- data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
- data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
- data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
- data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
-
- data->fc.safc_group_num = params->txq_params.cos;
- data->fc.safc_group_en_flg =
- (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
- data->fc.traffic_type =
- (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
- LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
-}
-
-static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
-{
- /* ustorm cxt validation */
- cxt->ustorm_ag_context.cdu_usage =
- CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
- ETH_CONNECTION_TYPE);
- /* xcontext validation */
- cxt->xstorm_ag_context.cdu_reserved =
- CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
- ETH_CONNECTION_TYPE);
-}
-
-static int bnx2x_setup_fw_client(struct bnx2x *bp,
- struct bnx2x_client_init_params *params,
- u8 activate,
- struct client_init_ramrod_data *data,
- dma_addr_t data_mapping)
-{
- u16 hc_usec;
- int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
- int ramrod_flags = 0, rc;
-
- /* HC and context validation values */
- hc_usec = params->txq_params.hc_rate ?
- 1000000 / params->txq_params.hc_rate : 0;
- bnx2x_update_coalesce_sb_index(bp,
- params->txq_params.fw_sb_id,
- params->txq_params.sb_cq_index,
- !(params->txq_params.flags & QUEUE_FLG_HC),
- hc_usec);
-
- *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
-
- hc_usec = params->rxq_params.hc_rate ?
- 1000000 / params->rxq_params.hc_rate : 0;
- bnx2x_update_coalesce_sb_index(bp,
- params->rxq_params.fw_sb_id,
- params->rxq_params.sb_cq_index,
- !(params->rxq_params.flags & QUEUE_FLG_HC),
- hc_usec);
-
- bnx2x_set_ctx_validation(params->rxq_params.cxt,
- params->rxq_params.cid);
-
- /* zero stats */
- if (params->txq_params.flags & QUEUE_FLG_STATS)
- storm_memset_xstats_zero(bp, BP_PORT(bp),
- params->txq_params.stat_id);
-
- if (params->rxq_params.flags & QUEUE_FLG_STATS) {
- storm_memset_ustats_zero(bp, BP_PORT(bp),
- params->rxq_params.stat_id);
- storm_memset_tstats_zero(bp, BP_PORT(bp),
- params->rxq_params.stat_id);
- }
-
- /* Fill the ramrod data */
- bnx2x_fill_cl_init_data(bp, params, activate, data);
-
- /* SETUP ramrod.
- *
- * bnx2x_sp_post() takes a spin_lock thus no other explict memory
- * barrier except from mmiowb() is needed to impose a
- * proper ordering of memory operations.
- */
- mmiowb();
-
-
- bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
- U64_HI(data_mapping), U64_LO(data_mapping), 0);
-
- /* Wait for completion */
- rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
- params->ramrod_params.index,
- params->ramrod_params.pstate,
- ramrod_flags);
- return rc;
+ return bnx2x_setup_queue(bp, &bp->fp[0], 1);
}
/**
@@ -6647,16 +6992,14 @@ static int bnx2x_setup_fw_client(struct bnx2x *bp,
*
* In case of MSI-X it will also try to enable MSI-X.
*/
-static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
{
- int rc = 0;
-
- switch (bp->int_mode) {
+ switch (int_mode) {
case INT_MODE_MSI:
bnx2x_enable_msi(bp);
/* falling through... */
case INT_MODE_INTx:
- bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
+ bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
break;
default:
@@ -6670,8 +7013,7 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
* so try to enable MSI-X with the requested number of fp's
* and fallback to MSI or legacy INTx with one fp
*/
- rc = bnx2x_enable_msix(bp);
- if (rc) {
+ if (bnx2x_enable_msix(bp)) {
/* failed to enable MSI-X */
if (bp->multi_mode)
DP(NETIF_MSG_IFUP,
@@ -6679,17 +7021,15 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
"enable MSI-X (%d), "
"set number of queues to %d\n",
bp->num_queues,
- 1 + NONE_ETH_CONTEXT_USE);
- bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
+ 1 + NON_ETH_CONTEXT_USE);
+ bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+ /* Try to enable MSI */
if (!(bp->flags & DISABLE_MSI_FLAG))
bnx2x_enable_msi(bp);
}
-
break;
}
-
- return rc;
}
/* must be called prioir to any HW initializations */
@@ -6713,7 +7053,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
ilt_client->page_size = CDU_ILT_PAGE_SZ;
ilt_client->flags = ILT_CLIENT_SKIP_MEM;
ilt_client->start = line;
- line += L2_ILT_LINES(bp);
+ line += bnx2x_cid_ilt_lines(bp);
#ifdef BCM_CNIC
line += CNIC_ILT_LINES;
#endif
@@ -6793,92 +7133,258 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
#else
ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
#endif
+ BUG_ON(line > ILT_MAX_LINES);
+}
+
+/**
+ * bnx2x_pf_q_prep_init - prepare INIT transition parameters
+ *
+ * @bp: driver handle
+ * @fp: pointer to fastpath
+ * @init_params: pointer to parameters structure
+ *
+ * parameters configured:
+ * - HC configuration
+ * - Queue's CDU context
+ */
+static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
+{
+
+ u8 cos;
+ /* FCoE Queue uses Default SB, thus has no HC capabilities */
+ if (!IS_FCOE_FP(fp)) {
+ __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
+ __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
+
+ /* If HC is supporterd, enable host coalescing in the transition
+ * to INIT state.
+ */
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
+
+ /* HC rate */
+ init_params->rx.hc_rate = bp->rx_ticks ?
+ (1000000 / bp->rx_ticks) : 0;
+ init_params->tx.hc_rate = bp->tx_ticks ?
+ (1000000 / bp->tx_ticks) : 0;
+
+ /* FW SB ID */
+ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
+ fp->fw_sb_id;
+
+ /*
+ * CQ index among the SB indices: FCoE clients uses the default
+ * SB, therefore it's different.
+ */
+ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+ init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
+ }
+
+ /* set maximum number of COSs supported by this queue */
+ init_params->max_cos = fp->max_cos;
+
+ DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d",
+ fp->index, init_params->max_cos);
+
+ /* set the context pointers queue object */
+ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
+ init_params->cxts[cos] =
+ &bp->context.vcxt[fp->txdata[cos].cid].eth;
}
-int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- int is_leading)
+int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct bnx2x_queue_state_params *q_params,
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params,
+ int tx_index, bool leading)
{
- struct bnx2x_client_init_params params = { {0} };
+ memset(tx_only_params, 0, sizeof(*tx_only_params));
+
+ /* Set the command */
+ q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+
+ /* Set tx-only QUEUE flags: don't zero statistics */
+ tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
+
+ /* choose the index of the cid to send the slow path on */
+ tx_only_params->cid_index = tx_index;
+
+ /* Set general TX_ONLY_SETUP parameters */
+ bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
+
+ /* Set Tx TX_ONLY_SETUP parameters */
+ bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
+
+ DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
+ "cos %d, primary cid %d, cid %d, "
+ "client id %d, sp-client id %d, flags %lx",
+ tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
+ q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
+ tx_only_params->gen_params.spcl_id, tx_only_params->flags);
+
+ /* send the ramrod */
+ return bnx2x_queue_state_change(bp, q_params);
+}
+
+
+/**
+ * bnx2x_setup_queue - setup queue
+ *
+ * @bp: driver handle
+ * @fp: pointer to fastpath
+ * @leading: is leading
+ *
+ * This function performs 2 steps in a Queue state machine
+ * actually: 1) RESET->INIT 2) INIT->SETUP
+ */
+
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool leading)
+{
+ struct bnx2x_queue_state_params q_params = {0};
+ struct bnx2x_queue_setup_params *setup_params =
+ &q_params.params.setup;
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+ &q_params.params.tx_only;
int rc;
+ u8 tx_index;
+
+ DP(BNX2X_MSG_SP, "setting up queue %d", fp->index);
/* reset IGU state skip FCoE L2 queue */
if (!IS_FCOE_FP(fp))
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
IGU_INT_ENABLE, 0);
- params.ramrod_params.pstate = &fp->state;
- params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
- params.ramrod_params.index = fp->index;
- params.ramrod_params.cid = fp->cid;
+ q_params.q_obj = &fp->q_obj;
+ /* We want to wait for completion in this context */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
-#ifdef BCM_CNIC
- if (IS_FCOE_FP(fp))
- params.ramrod_params.flags |= CLIENT_IS_FCOE;
+ /* Prepare the INIT parameters */
+ bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
-#endif
+ /* Set the command */
+ q_params.cmd = BNX2X_Q_CMD_INIT;
+
+ /* Change the state to INIT */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
+ return rc;
+ }
- if (is_leading)
- params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
+ DP(BNX2X_MSG_SP, "init complete");
- bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
- bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
+ /* Now move the Queue to the SETUP state... */
+ memset(setup_params, 0, sizeof(*setup_params));
- rc = bnx2x_setup_fw_client(bp, &params, 1,
- bnx2x_sp(bp, client_init_data),
- bnx2x_sp_mapping(bp, client_init_data));
- return rc;
-}
+ /* Set QUEUE flags */
+ setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
-static int bnx2x_stop_fw_client(struct bnx2x *bp,
- struct bnx2x_client_ramrod_params *p)
-{
- int rc;
+ /* Set general SETUP parameters */
+ bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
+ FIRST_TX_COS_INDEX);
- int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
+ bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
+ &setup_params->rxq_params);
- /* halt the connection */
- *p->pstate = BNX2X_FP_STATE_HALTING;
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
- p->cl_id, 0);
+ bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
+ FIRST_TX_COS_INDEX);
- /* Wait for completion */
- rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
- p->pstate, poll_flag);
- if (rc) /* timeout */
- return rc;
+ /* Set the command */
+ q_params.cmd = BNX2X_Q_CMD_SETUP;
- *p->pstate = BNX2X_FP_STATE_TERMINATING;
- bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
- p->cl_id, 0);
- /* Wait for completion */
- rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
- p->pstate, poll_flag);
- if (rc) /* timeout */
+ /* Change the state to SETUP */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
return rc;
+ }
+ /* loop through the relevant tx-only indices */
+ for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+ tx_index < fp->max_cos;
+ tx_index++) {
- /* delete cfc entry */
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
+ /* prepare and send tx-only ramrod*/
+ rc = bnx2x_setup_tx_only(bp, fp, &q_params,
+ tx_only_params, tx_index, leading);
+ if (rc) {
+ BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
+ fp->index, tx_index);
+ return rc;
+ }
+ }
- /* Wait for completion */
- rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
- p->pstate, WAIT_RAMROD_COMMON);
return rc;
}
-static int bnx2x_stop_client(struct bnx2x *bp, int index)
+static int bnx2x_stop_queue(struct bnx2x *bp, int index)
{
- struct bnx2x_client_ramrod_params client_stop = {0};
struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct bnx2x_fp_txdata *txdata;
+ struct bnx2x_queue_state_params q_params = {0};
+ int rc, tx_index;
+
+ DP(BNX2X_MSG_SP, "stopping queue %d cid %d", index, fp->cid);
+
+ q_params.q_obj = &fp->q_obj;
+ /* We want to wait for completion in this context */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
- client_stop.index = index;
- client_stop.cid = fp->cid;
- client_stop.cl_id = fp->cl_id;
- client_stop.pstate = &(fp->state);
- client_stop.poll = 0;
+ /* close tx-only connections */
+ for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+ tx_index < fp->max_cos;
+ tx_index++){
- return bnx2x_stop_fw_client(bp, &client_stop);
+ /* ascertain this is a normal queue*/
+ txdata = &fp->txdata[tx_index];
+
+ DP(BNX2X_MSG_SP, "stopping tx-only queue %d",
+ txdata->txq_index);
+
+ /* send halt terminate on tx-only connection */
+ q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+ memset(&q_params.params.terminate, 0,
+ sizeof(q_params.params.terminate));
+ q_params.params.terminate.cid_index = tx_index;
+
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+
+ /* send halt terminate on tx-only connection */
+ q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+ memset(&q_params.params.cfc_del, 0,
+ sizeof(q_params.params.cfc_del));
+ q_params.params.cfc_del.cid_index = tx_index;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+ }
+ /* Stop the primary connection: */
+ /* ...halt the connection */
+ q_params.cmd = BNX2X_Q_CMD_HALT;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+
+ /* ...terminate the connection */
+ q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+ memset(&q_params.params.terminate, 0,
+ sizeof(q_params.params.terminate));
+ q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc)
+ return rc;
+ /* ...delete cfc entry */
+ q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+ memset(&q_params.params.cfc_del, 0,
+ sizeof(q_params.params.cfc_del));
+ q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
+ return bnx2x_queue_state_change(bp, &q_params);
}
@@ -6887,12 +7393,6 @@ static void bnx2x_reset_func(struct bnx2x *bp)
int port = BP_PORT(bp);
int func = BP_FUNC(bp);
int i;
- int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
- (CHIP_IS_E2(bp) ?
- offsetof(struct hc_status_block_data_e2, common) :
- offsetof(struct hc_status_block_data_e1x, common));
- int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
- int pfid_offset = offsetof(struct pci_entity, pf_id);
/* Disable the function in the FW */
REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
@@ -6903,20 +7403,21 @@ static void bnx2x_reset_func(struct bnx2x *bp)
/* FP SBs */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- REG_WR8(bp,
- BAR_CSTRORM_INTMEM +
- CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
- + pfunc_offset_fp + pfid_offset,
- HC_FUNCTION_DISABLED);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
+ SB_DISABLED);
}
+#ifdef BCM_CNIC
+ /* CNIC SB */
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
+ SB_DISABLED);
+#endif
/* SP SB */
- REG_WR8(bp,
- BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
- pfunc_offset_sp + pfid_offset,
- HC_FUNCTION_DISABLED);
-
+ REG_WR8(bp, BAR_CSTRORM_INTMEM +
+ CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+ SB_DISABLED);
for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
@@ -6950,7 +7451,7 @@ static void bnx2x_reset_func(struct bnx2x *bp)
/* Timers workaround bug for E2: if this is vnic-3,
* we need to set the entire ilt range for this timers.
*/
- if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
+ if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
struct ilt_client_info ilt_cli;
/* use dummy TM client */
memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
@@ -6962,7 +7463,7 @@ static void bnx2x_reset_func(struct bnx2x *bp)
}
/* this assumes that reset_port() called before reset_func()*/
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
bnx2x_pf_disable(bp);
bp->dmae_ready = 0;
@@ -6973,6 +7474,9 @@ static void bnx2x_reset_port(struct bnx2x *bp)
int port = BP_PORT(bp);
u32 val;
+ /* Reset physical Link */
+ bnx2x__link_reset(bp);
+
REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
/* Do not rcv packets to BRB */
@@ -6994,92 +7498,66 @@ static void bnx2x_reset_port(struct bnx2x *bp)
/* TODO: Close Doorbell port? */
}
-static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
+static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
{
- DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
- BP_ABS_FUNC(bp), reset_code);
+ struct bnx2x_func_state_params func_params = {0};
- switch (reset_code) {
- case FW_MSG_CODE_DRV_UNLOAD_COMMON:
- bnx2x_reset_port(bp);
- bnx2x_reset_func(bp);
- bnx2x_reset_common(bp);
- break;
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
- case FW_MSG_CODE_DRV_UNLOAD_PORT:
- bnx2x_reset_port(bp);
- bnx2x_reset_func(bp);
- break;
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_HW_RESET;
- case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
- bnx2x_reset_func(bp);
- break;
+ func_params.params.hw_init.load_phase = load_code;
- default:
- BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
- break;
- }
+ return bnx2x_func_state_change(bp, &func_params);
}
-#ifdef BCM_CNIC
-static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
+static inline int bnx2x_func_stop(struct bnx2x *bp)
{
- if (bp->flags & FCOE_MACS_SET) {
- if (!IS_MF_SD(bp))
- bnx2x_set_fip_eth_mac_addr(bp, 0);
-
- bnx2x_set_all_enode_macs(bp, 0);
-
- bp->flags &= ~FCOE_MACS_SET;
- }
-}
-#endif
-
-void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
-{
- int port = BP_PORT(bp);
- u32 reset_code = 0;
- int i, cnt, rc;
-
- /* Wait until tx fastpath tasks complete */
- for_each_tx_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_func_state_params func_params = {0};
+ int rc;
- cnt = 1000;
- while (bnx2x_has_tx_work_unload(fp)) {
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_STOP;
- if (!cnt) {
- BNX2X_ERR("timeout waiting for queue[%d]\n",
- i);
+ /*
+ * Try to stop the function the 'good way'. If fails (in case
+ * of a parity error during bnx2x_chip_cleanup()) and we are
+ * not in a debug mode, perform a state transaction in order to
+ * enable further HW_RESET transaction.
+ */
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
#ifdef BNX2X_STOP_ON_ERROR
- bnx2x_panic();
- return -EBUSY;
+ return rc;
#else
- break;
+ BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
+ "transaction\n");
+ __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+ return bnx2x_func_state_change(bp, &func_params);
#endif
- }
- cnt--;
- msleep(1);
- }
}
- /* Give HW time to discard old tx messages */
- msleep(1);
- bnx2x_set_eth_mac(bp, 0);
-
- bnx2x_invalidate_uc_list(bp);
-
- if (CHIP_IS_E1(bp))
- bnx2x_invalidate_e1_mc_list(bp);
- else {
- bnx2x_invalidate_e1h_mc_list(bp);
- REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
- }
+ return 0;
+}
-#ifdef BCM_CNIC
- bnx2x_del_fcoe_eth_macs(bp);
-#endif
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp: driver handle
+ * @unload_mode: requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
+{
+ u32 reset_code = 0;
+ int port = BP_PORT(bp);
+ /* Select the UNLOAD request mode */
if (unload_mode == UNLOAD_NORMAL)
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -7106,54 +7584,215 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
} else
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+ /* Send the request to the MCP */
+ if (!BP_NOMCP(bp))
+ reset_code = bnx2x_fw_command(bp, reset_code, 0);
+ else {
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
+ "%d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ load_count[path][0]--;
+ load_count[path][1 + port]--;
+ DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
+ "%d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 0)
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
+ else if (load_count[path][1 + port] == 0)
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
+ else
+ reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
+ }
+
+ return reset_code;
+}
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp)
+{
+ /* Report UNLOAD_DONE to MCP */
+ if (!BP_NOMCP(bp))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+}
+
+static inline int bnx2x_func_wait_started(struct bnx2x *bp)
+{
+ int tout = 50;
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+
+ if (!bp->port.pmf)
+ return 0;
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TXdisable/enable transaction
+ * 1. Sync IRS for default SB
+ * 2. Sync SP queue - this guarantes us that attention handling started
+ * 3. Wait, that TXdisable/enable transaction completes
+ *
+ * 1+2 guranty that if DCBx attention was scheduled it already changed
+ * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
+ * received complettion for the transaction the state is TX_STOPPED.
+ * State will return to STARTED after completion of TX_STOPPED-->STARTED
+ * transaction.
+ */
+
+ /* make sure default SB ISR is done */
+ if (msix)
+ synchronize_irq(bp->msix_table[0].vector);
+ else
+ synchronize_irq(bp->pdev->irq);
+
+ flush_workqueue(bnx2x_wq);
+
+ while (bnx2x_func_get_state(bp, &bp->func_obj) !=
+ BNX2X_F_STATE_STARTED && tout--)
+ msleep(20);
+
+ if (bnx2x_func_get_state(bp, &bp->func_obj) !=
+ BNX2X_F_STATE_STARTED) {
+#ifdef BNX2X_STOP_ON_ERROR
+ return -EBUSY;
+#else
+ /*
+ * Failed to complete the transaction in a "good way"
+ * Force both transactions with CLR bit
+ */
+ struct bnx2x_func_state_params func_params = {0};
+
+ DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! "
+ "Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+
+ func_params.f_obj = &bp->func_obj;
+ __set_bit(RAMROD_DRV_CLR_ONLY,
+ &func_params.ramrod_flags);
+
+ /* STARTED-->TX_ST0PPED */
+ func_params.cmd = BNX2X_F_CMD_TX_STOP;
+ bnx2x_func_state_change(bp, &func_params);
+
+ /* TX_ST0PPED-->STARTED */
+ func_params.cmd = BNX2X_F_CMD_TX_START;
+ return bnx2x_func_state_change(bp, &func_params);
+#endif
+ }
+
+ return 0;
+}
+
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
+{
+ int port = BP_PORT(bp);
+ int i, rc = 0;
+ u8 cos;
+ struct bnx2x_mcast_ramrod_params rparam = {0};
+ u32 reset_code;
+
+ /* Wait until tx fastpath tasks complete */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
+#ifdef BNX2X_STOP_ON_ERROR
+ if (rc)
+ return;
+#endif
+ }
+
+ /* Give HW time to discard old tx messages */
+ usleep_range(1000, 1000);
+
+ /* Clean all ETH MACs */
+ rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
+
+ /* Clean up UC list */
+ rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+ true);
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
+ "%d\n", rc);
+
+ /* Disable LLH */
+ if (!CHIP_IS_E1(bp))
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
+ /* Set "drop all" (stop Rx).
+ * We need to take a netif_addr_lock() here in order to prevent
+ * a race between the completion code and this code.
+ */
+ netif_addr_lock_bh(bp->dev);
+ /* Schedule the rx_mode command */
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+ set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ else
+ bnx2x_set_storm_rx_mode(bp);
+
+ /* Cleanup multicast configuration */
+ rparam.mcast_obj = &bp->mcast_obj;
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
+
+ netif_addr_unlock_bh(bp->dev);
+
+
+
+ /*
+ * Send the UNLOAD_REQUEST to the MCP. This will return if
+ * this function should perform FUNC, PORT or COMMON HW
+ * reset.
+ */
+ reset_code = bnx2x_send_unload_req(bp, unload_mode);
+
+ /*
+ * (assumption: No Attention from MCP at this stage)
+ * PMF probably in the middle of TXdisable/enable transaction
+ */
+ rc = bnx2x_func_wait_started(bp);
+ if (rc) {
+ BNX2X_ERR("bnx2x_func_wait_started failed\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ return;
+#endif
+ }
+
/* Close multi and leading connections
- Completions for ramrods are collected in a synchronous way */
+ * Completions for ramrods are collected in a synchronous way
+ */
for_each_queue(bp, i)
-
- if (bnx2x_stop_client(bp, i))
+ if (bnx2x_stop_queue(bp, i))
#ifdef BNX2X_STOP_ON_ERROR
return;
#else
goto unload_error;
#endif
+ /* If SP settings didn't get completed so far - something
+ * very wrong has happen.
+ */
+ if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
+ BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
+#ifndef BNX2X_STOP_ON_ERROR
+unload_error:
+#endif
rc = bnx2x_func_stop(bp);
if (rc) {
BNX2X_ERR("Function stop failed!\n");
#ifdef BNX2X_STOP_ON_ERROR
return;
-#else
- goto unload_error;
#endif
}
-#ifndef BNX2X_STOP_ON_ERROR
-unload_error:
-#endif
- if (!BP_NOMCP(bp))
- reset_code = bnx2x_fw_command(bp, reset_code, 0);
- else {
- DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
- "%d, %d, %d\n", BP_PATH(bp),
- load_count[BP_PATH(bp)][0],
- load_count[BP_PATH(bp)][1],
- load_count[BP_PATH(bp)][2]);
- load_count[BP_PATH(bp)][0]--;
- load_count[BP_PATH(bp)][1 + port]--;
- DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
- "%d, %d, %d\n", BP_PATH(bp),
- load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
- load_count[BP_PATH(bp)][2]);
- if (load_count[BP_PATH(bp)][0] == 0)
- reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
- else if (load_count[BP_PATH(bp)][1 + port] == 0)
- reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
- else
- reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
- }
-
- if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
- (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
- bnx2x__link_reset(bp);
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
@@ -7162,12 +7801,13 @@ unload_error:
bnx2x_free_irq(bp);
/* Reset the chip */
- bnx2x_reset_chip(bp, reset_code);
+ rc = bnx2x_reset_hw(bp, reset_code);
+ if (rc)
+ BNX2X_ERR("HW_RESET failed\n");
- /* Report UNLOAD_DONE to MCP */
- if (!BP_NOMCP(bp))
- bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp);
}
void bnx2x_disable_close_the_gate(struct bnx2x *bp)
@@ -7184,7 +7824,7 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp)
val = REG_RD(bp, addr);
val &= ~(0x300);
REG_WR(bp, addr, val);
- } else if (CHIP_IS_E1H(bp)) {
+ } else {
val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
@@ -7195,24 +7835,37 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp)
/* Close gates #2, #3 and #4: */
static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
{
- u32 val, addr;
+ u32 val;
/* Gates #2 and #4a are closed/opened for "not E1" only */
if (!CHIP_IS_E1(bp)) {
/* #4 */
- val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
- REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
- close ? (val | 0x1) : (val & (~(u32)1)));
+ REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
/* #2 */
- val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
- REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
- close ? (val | 0x1) : (val & (~(u32)1)));
+ REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
}
/* #3 */
- addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
- val = REG_RD(bp, addr);
- REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
+ if (CHIP_IS_E1x(bp)) {
+ /* Prevent interrupts from HC on both ports */
+ val = REG_RD(bp, HC_REG_CONFIG_1);
+ REG_WR(bp, HC_REG_CONFIG_1,
+ (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
+ (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
+
+ val = REG_RD(bp, HC_REG_CONFIG_0);
+ REG_WR(bp, HC_REG_CONFIG_0,
+ (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
+ (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
+ } else {
+ /* Prevent incomming interrupts in IGU */
+ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+ REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
+ (!close) ?
+ (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
+ (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
+ }
DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
close ? "closing" : "opening");
@@ -7330,7 +7983,6 @@ static void bnx2x_pxp_prep(struct bnx2x *bp)
if (!CHIP_IS_E1(bp)) {
REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
- REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
mmiowb();
}
}
@@ -7345,46 +7997,133 @@ static void bnx2x_pxp_prep(struct bnx2x *bp)
* - GRC
* - RBCN, RBCP
*/
-static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
{
u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+ u32 global_bits2, stay_reset2;
+
+ /*
+ * Bits that have to be set in reset_mask2 if we want to reset 'global'
+ * (per chip) blocks.
+ */
+ global_bits2 =
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
+ /* Don't reset the following blocks */
not_reset_mask1 =
MISC_REGISTERS_RESET_REG_1_RST_HC |
MISC_REGISTERS_RESET_REG_1_RST_PXPV |
MISC_REGISTERS_RESET_REG_1_RST_PXP;
not_reset_mask2 =
- MISC_REGISTERS_RESET_REG_2_RST_MDIO |
+ MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
MISC_REGISTERS_RESET_REG_2_RST_RBCN |
MISC_REGISTERS_RESET_REG_2_RST_GRC |
MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
- MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
+ MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
+ MISC_REGISTERS_RESET_REG_2_RST_ATC |
+ MISC_REGISTERS_RESET_REG_2_PGLC;
+ /*
+ * Keep the following blocks in reset:
+ * - all xxMACs are handled by the bnx2x_link code.
+ */
+ stay_reset2 =
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
+ MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
+ MISC_REGISTERS_RESET_REG_2_UMAC0 |
+ MISC_REGISTERS_RESET_REG_2_UMAC1 |
+ MISC_REGISTERS_RESET_REG_2_XMAC |
+ MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
+
+ /* Full reset masks according to the chip */
reset_mask1 = 0xffffffff;
if (CHIP_IS_E1(bp))
reset_mask2 = 0xffff;
- else
+ else if (CHIP_IS_E1H(bp))
reset_mask2 = 0x1ffff;
+ else if (CHIP_IS_E2(bp))
+ reset_mask2 = 0xfffff;
+ else /* CHIP_IS_E3 */
+ reset_mask2 = 0x3ffffff;
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
- reset_mask1 & (~not_reset_mask1));
+ /* Don't reset global blocks unless we need to */
+ if (!global)
+ reset_mask2 &= ~global_bits2;
+
+ /*
+ * In case of attention in the QM, we need to reset PXP
+ * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
+ * because otherwise QM reset would release 'close the gates' shortly
+ * before resetting the PXP, then the PSWRQ would send a write
+ * request to PGLUE. Then when PXP is reset, PGLUE would try to
+ * read the payload data from PSWWR, but PSWWR would not
+ * respond. The write queue in PGLUE would stuck, dmae commands
+ * would not return. Therefore it's important to reset the second
+ * reset register (containing the
+ * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
+ * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
+ * bit).
+ */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
reset_mask2 & (~not_reset_mask2));
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+ reset_mask1 & (~not_reset_mask1));
+
+ barrier();
+ mmiowb();
+
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+ reset_mask2 & (~stay_reset2));
+
barrier();
mmiowb();
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
- REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
mmiowb();
}
-static int bnx2x_process_kill(struct bnx2x *bp)
+/**
+ * bnx2x_er_poll_igu_vq - poll for pending writes bit.
+ * It should get cleared in no more than 1s.
+ *
+ * @bp: driver handle
+ *
+ * It should get cleared in no more than 1s. Returns 0 if
+ * pending writes bit gets cleared.
+ */
+static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
+{
+ u32 cnt = 1000;
+ u32 pend_bits = 0;
+
+ do {
+ pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
+
+ if (pend_bits == 0)
+ break;
+
+ usleep_range(1000, 1000);
+ } while (cnt-- > 0);
+
+ if (cnt <= 0) {
+ BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
+ pend_bits);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp, bool global)
{
int cnt = 1000;
u32 val = 0;
@@ -7403,7 +8142,7 @@ static int bnx2x_process_kill(struct bnx2x *bp)
((port_is_idle_1 & 0x1) == 0x1) &&
(pgl_exp_rom2 == 0xffffffff))
break;
- msleep(1);
+ usleep_range(1000, 1000);
} while (cnt-- > 0);
if (cnt <= 0) {
@@ -7423,6 +8162,11 @@ static int bnx2x_process_kill(struct bnx2x *bp)
/* Close gates #2, #3 and #4 */
bnx2x_set_234_gates(bp, true);
+ /* Poll for IGU VQs for 57712 and newer chips */
+ if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
+ return -EAGAIN;
+
+
/* TBD: Indicate that "process kill" is in progress to MCP */
/* Clear "unprepared" bit */
@@ -7435,25 +8179,28 @@ static int bnx2x_process_kill(struct bnx2x *bp)
/* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
- msleep(1);
+ usleep_range(1000, 1000);
/* Prepare to chip reset: */
/* MCP */
- bnx2x_reset_mcp_prep(bp, &val);
+ if (global)
+ bnx2x_reset_mcp_prep(bp, &val);
/* PXP */
bnx2x_pxp_prep(bp);
barrier();
/* reset the chip */
- bnx2x_process_kill_chip_reset(bp);
+ bnx2x_process_kill_chip_reset(bp, global);
barrier();
/* Recover after reset: */
/* MCP */
- if (bnx2x_reset_mcp_comp(bp, val))
+ if (global && bnx2x_reset_mcp_comp(bp, val))
return -EAGAIN;
+ /* TBD: Add resetting the NO_MCP mode DB here */
+
/* PXP */
bnx2x_pxp_prep(bp);
@@ -7466,43 +8213,85 @@ static int bnx2x_process_kill(struct bnx2x *bp)
return 0;
}
-static int bnx2x_leader_reset(struct bnx2x *bp)
+int bnx2x_leader_reset(struct bnx2x *bp)
{
int rc = 0;
+ bool global = bnx2x_reset_is_global(bp);
+
/* Try to recover after the failure */
- if (bnx2x_process_kill(bp)) {
- printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
- bp->dev->name);
+ if (bnx2x_process_kill(bp, global)) {
+ netdev_err(bp->dev, "Something bad had happen on engine %d! "
+ "Aii!\n", BP_PATH(bp));
rc = -EAGAIN;
goto exit_leader_reset;
}
- /* Clear "reset is in progress" bit and update the driver state */
+ /*
+ * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
+ * state.
+ */
bnx2x_set_reset_done(bp);
- bp->recovery_state = BNX2X_RECOVERY_DONE;
+ if (global)
+ bnx2x_clear_reset_global(bp);
exit_leader_reset:
bp->is_leader = 0;
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
- smp_wmb();
+ bnx2x_release_leader_lock(bp);
+ smp_mb();
return rc;
}
-/* Assumption: runs under rtnl lock. This together with the fact
- * that it's called only from bnx2x_reset_task() ensure that it
+static inline void bnx2x_recovery_failed(struct bnx2x *bp)
+{
+ netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
+
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+
+ /*
+ * Block ifup for all function on this engine until "process kill"
+ * or power cycle.
+ */
+ bnx2x_set_reset_in_progress(bp);
+
+ /* Shut down the power */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+ smp_mb();
+}
+
+/*
+ * Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_sp_rtnl() ensure that it
* will never be called when netif_running(bp->dev) is false.
*/
static void bnx2x_parity_recover(struct bnx2x *bp)
{
+ bool global = false;
+
DP(NETIF_MSG_HW, "Handling parity\n");
while (1) {
switch (bp->recovery_state) {
case BNX2X_RECOVERY_INIT:
DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+ bnx2x_chk_parity_attn(bp, &global, false);
+
/* Try to get a LEADER_LOCK HW lock */
- if (bnx2x_trylock_hw_lock(bp,
- HW_LOCK_RESOURCE_RESERVED_08))
+ if (bnx2x_trylock_leader_lock(bp)) {
+ bnx2x_set_reset_in_progress(bp);
+ /*
+ * Check if there is a global attention and if
+ * there was a global attention, set the global
+ * reset bit.
+ */
+
+ if (global)
+ bnx2x_set_reset_global(bp);
+
bp->is_leader = 1;
+ }
/* Stop the driver */
/* If interface has been removed - break */
@@ -7510,21 +8299,47 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
return;
bp->recovery_state = BNX2X_RECOVERY_WAIT;
- /* Ensure "is_leader" and "recovery_state"
- * update values are seen on other CPUs
+
+ /*
+ * Reset MCP command sequence number and MCP mail box
+ * sequence as we are going to reset the MCP.
+ */
+ if (global) {
+ bp->fw_seq = 0;
+ bp->fw_drv_pulse_wr_seq = 0;
+ }
+
+ /* Ensure "is_leader", MCP command sequence and
+ * "recovery_state" update values are seen on other
+ * CPUs.
*/
- smp_wmb();
+ smp_mb();
break;
case BNX2X_RECOVERY_WAIT:
DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
if (bp->is_leader) {
- u32 load_counter = bnx2x_get_load_cnt(bp);
- if (load_counter) {
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ u32 other_load_counter =
+ bnx2x_get_load_cnt(bp, other_engine);
+ u32 load_counter =
+ bnx2x_get_load_cnt(bp, BP_PATH(bp));
+ global = bnx2x_reset_is_global(bp);
+
+ /*
+ * In case of a parity in a global block, let
+ * the first leader that performs a
+ * leader_reset() reset the global blocks in
+ * order to clear global attentions. Otherwise
+ * the the gates will remain closed for that
+ * engine.
+ */
+ if (load_counter ||
+ (global && other_load_counter)) {
/* Wait until all other functions get
* down.
*/
- schedule_delayed_work(&bp->reset_task,
+ schedule_delayed_work(&bp->sp_rtnl_task,
HZ/10);
return;
} else {
@@ -7533,37 +8348,27 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
* normal. In any case it's an exit
* point for a leader.
*/
- if (bnx2x_leader_reset(bp) ||
- bnx2x_nic_load(bp, LOAD_NORMAL)) {
- printk(KERN_ERR"%s: Recovery "
- "has failed. Power cycle is "
- "needed.\n", bp->dev->name);
- /* Disconnect this device */
- netif_device_detach(bp->dev);
- /* Block ifup for all function
- * of this ASIC until
- * "process kill" or power
- * cycle.
- */
- bnx2x_set_reset_in_progress(bp);
- /* Shut down the power */
- bnx2x_set_power_state(bp,
- PCI_D3hot);
+ if (bnx2x_leader_reset(bp)) {
+ bnx2x_recovery_failed(bp);
return;
}
- return;
+ /* If we are here, means that the
+ * leader has succeeded and doesn't
+ * want to be a leader any more. Try
+ * to continue as a none-leader.
+ */
+ break;
}
} else { /* non-leader */
- if (!bnx2x_reset_is_done(bp)) {
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
/* Try to get a LEADER_LOCK HW lock as
* long as a former leader may have
* been unloaded by the user or
* released a leadership by another
* reason.
*/
- if (bnx2x_trylock_hw_lock(bp,
- HW_LOCK_RESOURCE_RESERVED_08)) {
+ if (bnx2x_trylock_leader_lock(bp)) {
/* I'm a leader now! Restart a
* switch case.
*/
@@ -7571,18 +8376,30 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
break;
}
- schedule_delayed_work(&bp->reset_task,
+ schedule_delayed_work(&bp->sp_rtnl_task,
HZ/10);
return;
- } else { /* A leader has completed
- * the "process kill". It's an exit
- * point for a non-leader.
- */
- bnx2x_nic_load(bp, LOAD_NORMAL);
- bp->recovery_state =
- BNX2X_RECOVERY_DONE;
- smp_wmb();
+ } else {
+ /*
+ * If there was a global attention, wait
+ * for it to be cleared.
+ */
+ if (bnx2x_reset_is_global(bp)) {
+ schedule_delayed_work(
+ &bp->sp_rtnl_task,
+ HZ/10);
+ return;
+ }
+
+ if (bnx2x_nic_load(bp, LOAD_NORMAL))
+ bnx2x_recovery_failed(bp);
+ else {
+ bp->recovery_state =
+ BNX2X_RECOVERY_DONE;
+ smp_mb();
+ }
+
return;
}
}
@@ -7595,35 +8412,92 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
* scheduled on a general queue in order to prevent a dead lock.
*/
-static void bnx2x_reset_task(struct work_struct *work)
+static void bnx2x_sp_rtnl_task(struct work_struct *work)
{
- struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
-
-#ifdef BNX2X_STOP_ON_ERROR
- BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
- " so reset not done to allow debug dump,\n"
- KERN_ERR " you will need to reboot when done\n");
- return;
-#endif
+ struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
rtnl_lock();
if (!netif_running(bp->dev))
- goto reset_task_exit;
+ goto sp_rtnl_exit;
+
+ /* if stop on error is defined no recovery flows should be executed */
+#ifdef BNX2X_STOP_ON_ERROR
+ BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
+ "so reset not done to allow debug dump,\n"
+ "you will need to reboot when done\n");
+ goto sp_rtnl_not_reset;
+#endif
+
+ if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
+ /*
+ * Clear all pending SP commands as we are going to reset the
+ * function anyway.
+ */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
- if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
bnx2x_parity_recover(bp);
- else {
+
+ goto sp_rtnl_exit;
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
+ /*
+ * Clear all pending SP commands as we are going to reset the
+ * function anyway.
+ */
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
bnx2x_nic_load(bp, LOAD_NORMAL);
+
+ goto sp_rtnl_exit;
}
+#ifdef BNX2X_STOP_ON_ERROR
+sp_rtnl_not_reset:
+#endif
+ if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
+ bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
-reset_task_exit:
+sp_rtnl_exit:
rtnl_unlock();
}
/* end of nic load/unload */
+static void bnx2x_period_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
+
+ if (!netif_running(bp->dev))
+ goto period_task_exit;
+
+ if (CHIP_REV_IS_SLOW(bp)) {
+ BNX2X_ERR("period task called on emulation, ignoring\n");
+ goto period_task_exit;
+ }
+
+ bnx2x_acquire_phy_lock(bp);
+ /*
+ * The barrier is needed to ensure the ordering between the writing to
+ * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
+ * the reading here.
+ */
+ smp_mb();
+ if (bp->port.pmf) {
+ bnx2x_period_func(&bp->link_params, &bp->link_vars);
+
+ /* Re-queue task in 1 sec */
+ queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
+ }
+
+ bnx2x_release_phy_lock(bp);
+period_task_exit:
+ return;
+}
+
/*
* Init service functions
*/
@@ -7681,8 +8555,8 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
/* save our pf_num */
int orig_pf_num = bp->pf_num;
- u32 swap_en;
- u32 swap_val;
+ int port;
+ u32 swap_en, swap_val, value;
/* clear the UNDI indication */
REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
@@ -7717,21 +8591,19 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
bnx2x_undi_int_disable(bp);
+ port = BP_PORT(bp);
/* close input traffic and wait for it */
/* Do not rcv packets to BRB */
- REG_WR(bp,
- (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
- NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
+ NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
/* Do not direct rcv packets that are not for MCP to
* the BRB */
- REG_WR(bp,
- (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
- NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+ REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+ NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
/* clear AEU */
- REG_WR(bp,
- (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
- MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
+ REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+ MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
msleep(10);
/* save NIG port swap info */
@@ -7741,9 +8613,17 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
REG_WR(bp,
GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
0xd3ffffff);
+
+ value = 0x1400;
+ if (CHIP_IS_E3(bp)) {
+ value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+ value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+ }
+
REG_WR(bp,
GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- 0x1403);
+ value);
+
/* take the NIG out of reset and restore swap values */
REG_WR(bp,
GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
@@ -7784,7 +8664,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
/* Set doorbell size */
bp->db_size = (1 << BNX2X_DB_SHIFT);
- if (CHIP_IS_E2(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
if ((val & 1) == 0)
val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
@@ -7804,16 +8684,6 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->pfid = bp->pf_num; /* 0..7 */
}
- /*
- * set base FW non-default (fast path) status block id, this value is
- * used to initialize the fw_sb_id saved on the fp/queue structure to
- * determine the id used by the FW.
- */
- if (CHIP_IS_E1x(bp))
- bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
- else /* E2 */
- bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
-
bp->link_params.chip_id = bp->common.chip_id;
BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
@@ -7825,13 +8695,15 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
}
val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
- bp->common.flash_size = (NVRAM_1MB_SIZE <<
+ bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
(val & MCPR_NVM_CFG4_FLASH_SIZE));
BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
bp->common.flash_size, bp->common.flash_size);
bnx2x_init_shmem(bp);
+
+
bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
MISC_REG_GENERIC_CR_1 :
MISC_REG_GENERIC_CR_0));
@@ -7880,6 +8752,10 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
(val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
+ bp->link_params.feature_config_flags |=
+ (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
+ FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
@@ -7904,14 +8780,11 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
int vn = BP_E1HVN(bp);
int igu_sb_id;
u32 val;
- u8 fid;
+ u8 fid, igu_sb_cnt = 0;
bp->igu_base_sb = 0xff;
- bp->igu_sb_cnt = 0;
if (CHIP_INT_MODE_IS_BC(bp)) {
- bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
- NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
-
+ igu_sb_cnt = bp->igu_sb_cnt;
bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
FP_SB_MAX_E1x;
@@ -7937,13 +8810,21 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
else {
if (bp->igu_base_sb == 0xff)
bp->igu_base_sb = igu_sb_id;
- bp->igu_sb_cnt++;
+ igu_sb_cnt++;
}
}
}
- bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
- NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
- if (bp->igu_sb_cnt == 0)
+
+#ifdef CONFIG_PCI_MSI
+ /*
+ * It's expected that number of CAM entries for this functions is equal
+ * to the number evaluated based on the MSI-X table size. We want a
+ * harsh warning if these values are different!
+ */
+ WARN_ON(bp->igu_sb_cnt != igu_sb_cnt);
+#endif
+
+ if (igu_sb_cnt == 0)
BNX2X_ERR("CAM configuration error\n");
}
@@ -7991,24 +8872,25 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
return;
}
- switch (switch_cfg) {
- case SWITCH_CFG_1G:
- bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
- port*0x10);
- BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
- break;
-
- case SWITCH_CFG_10G:
- bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
- port*0x18);
- BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
- break;
-
- default:
- BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
- bp->port.link_config[0]);
- return;
+ if (CHIP_IS_E3(bp))
+ bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
+ else {
+ switch (switch_cfg) {
+ case SWITCH_CFG_1G:
+ bp->port.phy_addr = REG_RD(
+ bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
+ break;
+ case SWITCH_CFG_10G:
+ bp->port.phy_addr = REG_RD(
+ bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
+ break;
+ default:
+ BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+ bp->port.link_config[0]);
+ return;
+ }
}
+ BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
/* mask what we support according to speed_cap_mask per configuration */
for (idx = 0; idx < cfg_size; idx++) {
if (!(bp->link_params.speed_cap_mask[idx] &
@@ -8089,7 +8971,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_10baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERROR("NVRAM config error. "
+ BNX2X_ERR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
link_config,
@@ -8108,7 +8990,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_10baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERROR("NVRAM config error. "
+ BNX2X_ERR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
link_config,
@@ -8126,7 +9008,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_100baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERROR("NVRAM config error. "
+ BNX2X_ERR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
link_config,
@@ -8146,7 +9028,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_100baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERROR("NVRAM config error. "
+ BNX2X_ERR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
link_config,
@@ -8164,7 +9046,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_1000baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERROR("NVRAM config error. "
+ BNX2X_ERR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
link_config,
@@ -8182,7 +9064,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_2500baseX_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERROR("NVRAM config error. "
+ BNX2X_ERR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
link_config,
@@ -8192,8 +9074,6 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
break;
case PORT_FEATURE_LINK_SPEED_10G_CX4:
- case PORT_FEATURE_LINK_SPEED_10G_KX4:
- case PORT_FEATURE_LINK_SPEED_10G_KR:
if (bp->port.supported[idx] &
SUPPORTED_10000baseT_Full) {
bp->link_params.req_line_speed[idx] =
@@ -8202,7 +9082,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
} else {
- BNX2X_ERROR("NVRAM config error. "
+ BNX2X_ERR("NVRAM config error. "
"Invalid link_config 0x%x"
" speed_cap_mask 0x%x\n",
link_config,
@@ -8210,11 +9090,14 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
return;
}
break;
+ case PORT_FEATURE_LINK_SPEED_20G:
+ bp->link_params.req_line_speed[idx] = SPEED_20000;
+ break;
default:
- BNX2X_ERROR("NVRAM config error. "
- "BAD link speed link_config 0x%x\n",
- link_config);
+ BNX2X_ERR("NVRAM config error. "
+ "BAD link speed link_config 0x%x\n",
+ link_config);
bp->link_params.req_line_speed[idx] =
SPEED_AUTO_NEG;
bp->port.advertising[idx] =
@@ -8325,10 +9208,13 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
#ifdef BCM_CNIC
static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
{
+ int port = BP_PORT(bp);
+ int func = BP_ABS_FUNC(bp);
+
u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
- drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
+ drv_lic_key[port].max_iscsi_conn);
u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
- drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
+ drv_lic_key[port].max_fcoe_conn);
/* Get the number of maximum allowed iSCSI and FCoE connections */
bp->cnic_eth_dev.max_iscsi_conn =
@@ -8339,11 +9225,59 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+ /* Read the WWN: */
+ if (!IS_MF(bp)) {
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].
+ fcoe_wwn_node_name_lower);
+ } else if (!IS_MF_SD(bp)) {
+ u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+
+ /*
+ * Read the WWN info only if the FCoE feature is enabled for
+ * this function.
+ */
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_wwn_node_name_lower);
+ }
+ }
+
BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
bp->cnic_eth_dev.max_iscsi_conn,
bp->cnic_eth_dev.max_fcoe_conn);
- /* If mamimum allowed number of connections is zero -
+ /*
+ * If maximum allowed number of connections is zero -
* disable the feature.
*/
if (!bp->cnic_eth_dev.max_iscsi_conn)
@@ -8364,6 +9298,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
u8 *fip_mac = bp->fip_mac;
#endif
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
random_ether_addr(bp->dev->dev_addr);
@@ -8385,9 +9322,10 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
iscsi_mac_addr_upper);
val = MF_CFG_RD(bp, func_ext_config[func].
iscsi_mac_addr_lower);
- BNX2X_DEV_INFO("Read iSCSI MAC: "
- "0x%x:0x%04x\n", val2, val);
bnx2x_set_mac_buf(iscsi_mac, val, val2);
+ BNX2X_DEV_INFO("Read iSCSI MAC: "
+ BNX2X_MAC_FMT"\n",
+ BNX2X_MAC_PRN_LIST(iscsi_mac));
} else
bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
@@ -8396,9 +9334,10 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
fcoe_mac_addr_upper);
val = MF_CFG_RD(bp, func_ext_config[func].
fcoe_mac_addr_lower);
- BNX2X_DEV_INFO("Read FCoE MAC to "
- "0x%x:0x%04x\n", val2, val);
bnx2x_set_mac_buf(fip_mac, val, val2);
+ BNX2X_DEV_INFO("Read FCoE L2 MAC to "
+ BNX2X_MAC_FMT"\n",
+ BNX2X_MAC_PRN_LIST(fip_mac));
} else
bp->flags |= NO_FCOE_FLAG;
@@ -8416,6 +9355,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_lower);
bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+ val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ fcoe_fip_mac_upper);
+ val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+ fcoe_fip_mac_lower);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
#endif
}
@@ -8423,13 +9368,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
#ifdef BCM_CNIC
- /* Set the FCoE MAC in modes other then MF_SI */
- if (!CHIP_IS_E1x(bp)) {
- if (IS_MF_SD(bp))
- memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
- else if (!IS_MF(bp))
- memcpy(fip_mac, iscsi_mac, ETH_ALEN);
- }
+ /* Set the FCoE MAC in MF_SD mode */
+ if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp))
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
/* Disable iSCSI if MAC configuration is
* invalid.
@@ -8447,6 +9388,13 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
memset(bp->fip_mac, 0, ETH_ALEN);
}
#endif
+
+ if (!is_valid_ether_addr(bp->dev->dev_addr))
+ dev_err(&bp->pdev->dev,
+ "bad Ethernet MAC address configuration: "
+ BNX2X_MAC_FMT", change it manually before bringing up "
+ "the appropriate network interface\n",
+ BNX2X_MAC_PRN_LIST(bp->dev->dev_addr));
}
static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -8458,27 +9406,66 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bnx2x_get_common_hwinfo(bp);
+ /*
+ * initialize IGU parameters
+ */
if (CHIP_IS_E1x(bp)) {
bp->common.int_block = INT_BLOCK_HC;
bp->igu_dsb_id = DEF_SB_IGU_ID;
bp->igu_base_sb = 0;
- bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
- NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
} else {
bp->common.int_block = INT_BLOCK_IGU;
val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
- DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
+ int tout = 5000;
+
+ BNX2X_DEV_INFO("FORCING Normal Mode\n");
+
+ val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
+ REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
+ REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
+
+ while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+ tout--;
+ usleep_range(1000, 1000);
+ }
+
+ if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+ dev_err(&bp->pdev->dev,
+ "FORCING Normal Mode failed!!!\n");
+ return -EPERM;
+ }
+ }
+
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+ BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
} else
- DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
+ BNX2X_DEV_INFO("IGU Normal Mode\n");
bnx2x_get_igu_cam_info(bp);
}
- DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
- bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
+
+ /*
+ * set base FW non-default (fast path) status block id, this value is
+ * used to initialize the fw_sb_id saved on the fp/queue structure to
+ * determine the id used by the FW.
+ */
+ if (CHIP_IS_E1x(bp))
+ bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
+ else /*
+ * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
+ * the same queue are indicated on the same IGU SB). So we prefer
+ * FW and IGU SBs to be the same value.
+ */
+ bp->base_fw_ndsb = bp->igu_base_sb;
+
+ BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
+ "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
+ bp->igu_sb_cnt, bp->base_fw_ndsb);
/*
* Initialize MF configuration
@@ -8489,10 +9476,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
vn = BP_E1HVN(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
- DP(NETIF_MSG_PROBE,
- "shmem2base 0x%x, size %d, mfcfg offset %d\n",
- bp->common.shmem2_base, SHMEM2_RD(bp, size),
- (u32)offsetof(struct shmem2_region, mf_cfg_addr));
+ BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
+ bp->common.shmem2_base, SHMEM2_RD(bp, size),
+ (u32)offsetof(struct shmem2_region, mf_cfg_addr));
+
if (SHMEM2_HAS(bp, mf_cfg_addr))
bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
else
@@ -8523,8 +9510,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_config[vn] = MF_CFG_RD(bp,
func_mf_config[func].config);
} else
- DP(NETIF_MSG_PROBE, "illegal MAC "
- "address for SI\n");
+ BNX2X_DEV_INFO("illegal MAC address "
+ "for SI\n");
break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
/* get OV configuration */
@@ -8537,14 +9524,12 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_config[vn] = MF_CFG_RD(bp,
func_mf_config[func].config);
} else
- DP(NETIF_MSG_PROBE, "illegal OV for "
- "SD\n");
+ BNX2X_DEV_INFO("illegal OV for SD\n");
break;
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
- DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
- val);
+ BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
}
}
@@ -8557,13 +9542,16 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
FUNC_MF_CFG_E1HOV_TAG_MASK;
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
bp->mf_ov = val;
- BNX2X_DEV_INFO("MF OV for func %d is %d"
- " (0x%04x)\n", func,
- bp->mf_ov, bp->mf_ov);
+ bp->path_has_ovlan = true;
+
+ BNX2X_DEV_INFO("MF OV for func %d is %d "
+ "(0x%04x)\n", func, bp->mf_ov,
+ bp->mf_ov);
} else {
- BNX2X_ERR("No valid MF OV for func %d,"
- " aborting\n", func);
- rc = -EPERM;
+ dev_err(&bp->pdev->dev,
+ "No valid MF OV for func %d, "
+ "aborting\n", func);
+ return -EPERM;
}
break;
case MULTI_FUNCTION_SI:
@@ -8572,31 +9560,40 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
break;
default:
if (vn) {
- BNX2X_ERR("VN %d in single function mode,"
- " aborting\n", vn);
- rc = -EPERM;
+ dev_err(&bp->pdev->dev,
+ "VN %d is in a single function mode, "
+ "aborting\n", vn);
+ return -EPERM;
}
break;
}
+ /* check if other port on the path needs ovlan:
+ * Since MF configuration is shared between ports
+ * Possible mixed modes are only
+ * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
+ */
+ if (CHIP_MODE_IS_4_PORT(bp) &&
+ !bp->path_has_ovlan &&
+ !IS_MF(bp) &&
+ bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+ u8 other_port = !BP_PORT(bp);
+ u8 other_func = BP_PATH(bp) + 2*other_port;
+ val = MF_CFG_RD(bp,
+ func_mf_config[other_func].e1hov_tag);
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
+ bp->path_has_ovlan = true;
+ }
}
/* adjust igu_sb_cnt to MF for E1x */
if (CHIP_IS_E1x(bp) && IS_MF(bp))
bp->igu_sb_cnt /= E1HVN_MAX;
- /*
- * adjust E2 sb count: to be removed when FW will support
- * more then 16 L2 clients
- */
-#define MAX_L2_CLIENTS 16
- if (CHIP_IS_E2(bp))
- bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
- MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
+ /* port info */
+ bnx2x_get_port_hwinfo(bp);
if (!BP_NOMCP(bp)) {
- bnx2x_get_port_hwinfo(bp);
-
bp->fw_seq =
(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
@@ -8610,6 +9607,16 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bnx2x_get_cnic_info(bp);
#endif
+ /* Get current FW pulse sequence */
+ if (!BP_NOMCP(bp)) {
+ int mb_idx = BP_FW_MB_IDX(bp);
+
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+ }
+
return rc;
}
@@ -8677,16 +9684,59 @@ out_not_found:
return;
}
+static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
+{
+ u32 flags = 0;
+
+ if (CHIP_REV_IS_FPGA(bp))
+ SET_FLAGS(flags, MODE_FPGA);
+ else if (CHIP_REV_IS_EMUL(bp))
+ SET_FLAGS(flags, MODE_EMUL);
+ else
+ SET_FLAGS(flags, MODE_ASIC);
+
+ if (CHIP_MODE_IS_4_PORT(bp))
+ SET_FLAGS(flags, MODE_PORT4);
+ else
+ SET_FLAGS(flags, MODE_PORT2);
+
+ if (CHIP_IS_E2(bp))
+ SET_FLAGS(flags, MODE_E2);
+ else if (CHIP_IS_E3(bp)) {
+ SET_FLAGS(flags, MODE_E3);
+ if (CHIP_REV(bp) == CHIP_REV_Ax)
+ SET_FLAGS(flags, MODE_E3_A0);
+ else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
+ SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
+ }
+
+ if (IS_MF(bp)) {
+ SET_FLAGS(flags, MODE_MF);
+ switch (bp->mf_mode) {
+ case MULTI_FUNCTION_SD:
+ SET_FLAGS(flags, MODE_MF_SD);
+ break;
+ case MULTI_FUNCTION_SI:
+ SET_FLAGS(flags, MODE_MF_SI);
+ break;
+ }
+ } else
+ SET_FLAGS(flags, MODE_SF);
+
+#if defined(__LITTLE_ENDIAN)
+ SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
+#else /*(__BIG_ENDIAN)*/
+ SET_FLAGS(flags, MODE_BIG_ENDIAN);
+#endif
+ INIT_MODE_FLAGS(bp) = flags;
+}
+
static int __devinit bnx2x_init_bp(struct bnx2x *bp)
{
int func;
int timer_interval;
int rc;
- /* Disable interrupt handling until HW is initialized */
- atomic_set(&bp->intr_sem, 1);
- smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
-
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
spin_lock_init(&bp->stats_lock);
@@ -8695,12 +9745,17 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
#endif
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
- INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
-
+ INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
+ INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
rc = bnx2x_get_hwinfo(bp);
+ if (rc)
+ return rc;
- if (!rc)
- rc = bnx2x_alloc_mem_bp(bp);
+ bnx2x_set_modes_bitmap(bp);
+
+ rc = bnx2x_alloc_mem_bp(bp);
+ if (rc)
+ return rc;
bnx2x_read_fwinfo(bp);
@@ -8718,7 +9773,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
"must load devices in order!\n");
bp->multi_mode = multi_mode;
- bp->int_mode = int_mode;
/* Set TPA flags */
if (disable_tpa) {
@@ -8754,6 +9808,21 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
bnx2x_dcbx_init_params(bp);
+#ifdef BCM_CNIC
+ if (CHIP_IS_E1x(bp))
+ bp->cnic_base_cl_id = FP_SB_MAX_E1x;
+ else
+ bp->cnic_base_cl_id = FP_SB_MAX_E2;
+#endif
+
+ /* multiple tx priority */
+ if (CHIP_IS_E1x(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
+ if (CHIP_IS_E3B0(bp))
+ bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+
return rc;
}
@@ -8762,49 +9831,70 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
* General service functions
****************************************************************************/
+/*
+ * net_device service functions
+ */
+
/* called with rtnl_lock */
static int bnx2x_open(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
+ bool global = false;
+ int other_engine = BP_PATH(bp) ? 0 : 1;
+ u32 other_load_counter, load_counter;
netif_carrier_off(dev);
bnx2x_set_power_state(bp, PCI_D0);
- if (!bnx2x_reset_is_done(bp)) {
+ other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
+ load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
+
+ /*
+ * If parity had happen during the unload, then attentions
+ * and/or RECOVERY_IN_PROGRES may still be set. In this case we
+ * want the first function loaded on the current engine to
+ * complete the recovery.
+ */
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+ bnx2x_chk_parity_attn(bp, &global, true))
do {
- /* Reset MCP mail box sequence if there is on going
- * recovery
+ /*
+ * If there are attentions and they are in a global
+ * blocks, set the GLOBAL_RESET bit regardless whether
+ * it will be this function that will complete the
+ * recovery or not.
*/
- bp->fw_seq = 0;
+ if (global)
+ bnx2x_set_reset_global(bp);
- /* If it's the first function to load and reset done
- * is still not cleared it may mean that. We don't
- * check the attention state here because it may have
- * already been cleared by a "common" reset but we
- * shell proceed with "process kill" anyway.
+ /*
+ * Only the first function on the current engine should
+ * try to recover in open. In case of attentions in
+ * global blocks only the first in the chip should try
+ * to recover.
*/
- if ((bnx2x_get_load_cnt(bp) == 0) &&
- bnx2x_trylock_hw_lock(bp,
- HW_LOCK_RESOURCE_RESERVED_08) &&
- (!bnx2x_leader_reset(bp))) {
- DP(NETIF_MSG_HW, "Recovered in open\n");
+ if ((!load_counter &&
+ (!global || !other_load_counter)) &&
+ bnx2x_trylock_leader_lock(bp) &&
+ !bnx2x_leader_reset(bp)) {
+ netdev_info(bp->dev, "Recovered in open\n");
break;
}
+ /* recovery has failed... */
bnx2x_set_power_state(bp, PCI_D3hot);
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
- printk(KERN_ERR"%s: Recovery flow hasn't been properly"
+ netdev_err(bp->dev, "Recovery flow hasn't been properly"
" completed yet. Try again later. If u still see this"
" message after a few retries then power cycle is"
- " required.\n", bp->dev->name);
+ " required.\n");
return -EAGAIN;
} while (0);
- }
bp->recovery_state = BNX2X_RECOVERY_DONE;
-
return bnx2x_nic_load(bp, LOAD_OPEN);
}
@@ -8815,198 +9905,126 @@ static int bnx2x_close(struct net_device *dev)
/* Unload the driver, release IRQs */
bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+
+ /* Power off */
bnx2x_set_power_state(bp, PCI_D3hot);
return 0;
}
-#define E1_MAX_UC_LIST 29
-#define E1H_MAX_UC_LIST 30
-#define E2_MAX_UC_LIST 14
-static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
+static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p)
{
- if (CHIP_IS_E1(bp))
- return E1_MAX_UC_LIST;
- else if (CHIP_IS_E1H(bp))
- return E1H_MAX_UC_LIST;
- else
- return E2_MAX_UC_LIST;
-}
+ int mc_count = netdev_mc_count(bp->dev);
+ struct bnx2x_mcast_list_elem *mc_mac =
+ kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
+ struct netdev_hw_addr *ha;
+ if (!mc_mac)
+ return -ENOMEM;
-static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
-{
- if (CHIP_IS_E1(bp))
- /* CAM Entries for Port0:
- * 0 - prim ETH MAC
- * 1 - BCAST MAC
- * 2 - iSCSI L2 ring ETH MAC
- * 3-31 - UC MACs
- *
- * Port1 entries are allocated the same way starting from
- * entry 32.
- */
- return 3 + 32 * BP_PORT(bp);
- else if (CHIP_IS_E1H(bp)) {
- /* CAM Entries:
- * 0-7 - prim ETH MAC for each function
- * 8-15 - iSCSI L2 ring ETH MAC for each function
- * 16 till 255 UC MAC lists for each function
- *
- * Remark: There is no FCoE support for E1H, thus FCoE related
- * MACs are not considered.
- */
- return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
- bnx2x_max_uc_list(bp) * BP_FUNC(bp);
- } else {
- /* CAM Entries (there is a separate CAM per engine):
- * 0-4 - prim ETH MAC for each function
- * 4-7 - iSCSI L2 ring ETH MAC for each function
- * 8-11 - FIP ucast L2 MAC for each function
- * 12-15 - ALL_ENODE_MACS mcast MAC for each function
- * 16 till 71 UC MAC lists for each function
- */
- u8 func_idx =
- (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
+ INIT_LIST_HEAD(&p->mcast_list);
- return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
- bnx2x_max_uc_list(bp) * func_idx;
+ netdev_for_each_mc_addr(ha, bp->dev) {
+ mc_mac->mac = bnx2x_mc_addr(ha);
+ list_add_tail(&mc_mac->link, &p->mcast_list);
+ mc_mac++;
}
+
+ p->mcast_list_len = mc_count;
+
+ return 0;
}
-/* set uc list, do not wait as wait implies sleep and
- * set_rx_mode can be invoked from non-sleepable context.
+static inline void bnx2x_free_mcast_macs_list(
+ struct bnx2x_mcast_ramrod_params *p)
+{
+ struct bnx2x_mcast_list_elem *mc_mac =
+ list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
+ link);
+
+ WARN_ON(!mc_mac);
+ kfree(mc_mac);
+}
+
+/**
+ * bnx2x_set_uc_list - configure a new unicast MACs list.
*
- * Instead we use the same ramrod data buffer each time we need
- * to configure a list of addresses, and use the fact that the
- * list of MACs is changed in an incremental way and that the
- * function is called under the netif_addr_lock. A temporary
- * inconsistent CAM configuration (possible in case of very fast
- * sequence of add/del/add on the host side) will shortly be
- * restored by the handler of the last ramrod.
+ * @bp: driver handle
+ *
+ * We will use zero (0) as a MAC type for these MACs.
*/
-static int bnx2x_set_uc_list(struct bnx2x *bp)
+static inline int bnx2x_set_uc_list(struct bnx2x *bp)
{
- int i = 0, old;
+ int rc;
struct net_device *dev = bp->dev;
- u8 offset = bnx2x_uc_list_cam_offset(bp);
struct netdev_hw_addr *ha;
- struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
- dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ unsigned long ramrod_flags = 0;
- if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
- return -EINVAL;
+ /* First schedule a cleanup up of old configuration */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
+ return rc;
+ }
netdev_for_each_uc_addr(ha, dev) {
- /* copy mac */
- config_cmd->config_table[i].msb_mac_addr =
- swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
- config_cmd->config_table[i].middle_mac_addr =
- swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
- config_cmd->config_table[i].lsb_mac_addr =
- swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
-
- config_cmd->config_table[i].vlan_id = 0;
- config_cmd->config_table[i].pf_id = BP_FUNC(bp);
- config_cmd->config_table[i].clients_bit_vector =
- cpu_to_le32(1 << BP_L_ID(bp));
-
- SET_FLAG(config_cmd->config_table[i].flags,
- MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
- T_ETH_MAC_COMMAND_SET);
-
- DP(NETIF_MSG_IFUP,
- "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
- config_cmd->config_table[i].msb_mac_addr,
- config_cmd->config_table[i].middle_mac_addr,
- config_cmd->config_table[i].lsb_mac_addr);
-
- i++;
-
- /* Set uc MAC in NIG */
- bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
- LLH_CAM_ETH_LINE + i);
- }
- old = config_cmd->hdr.length;
- if (old > i) {
- for (; i < old; i++) {
- if (CAM_IS_INVALID(config_cmd->
- config_table[i])) {
- /* already invalidated */
- break;
- }
- /* invalidate */
- SET_FLAG(config_cmd->config_table[i].flags,
- MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
- T_ETH_MAC_COMMAND_INVALIDATE);
+ rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
+ BNX2X_UC_LIST_MAC, &ramrod_flags);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to schedule ADD operations: %d\n",
+ rc);
+ return rc;
}
}
- wmb();
-
- config_cmd->hdr.length = i;
- config_cmd->hdr.offset = offset;
- config_cmd->hdr.client_id = 0xff;
- /* Mark that this ramrod doesn't use bp->set_mac_pending for
- * synchronization.
- */
- config_cmd->hdr.echo = 0;
-
- mb();
-
- return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
- U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
-
+ /* Execute the pending commands */
+ __set_bit(RAMROD_CONT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
+ BNX2X_UC_LIST_MAC, &ramrod_flags);
}
-void bnx2x_invalidate_uc_list(struct bnx2x *bp)
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
{
- int i;
- struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
- dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
- int ramrod_flags = WAIT_RAMROD_COMMON;
- u8 offset = bnx2x_uc_list_cam_offset(bp);
- u8 max_list_size = bnx2x_max_uc_list(bp);
-
- for (i = 0; i < max_list_size; i++) {
- SET_FLAG(config_cmd->config_table[i].flags,
- MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
- T_ETH_MAC_COMMAND_INVALIDATE);
- bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
- }
-
- wmb();
+ struct net_device *dev = bp->dev;
+ struct bnx2x_mcast_ramrod_params rparam = {0};
+ int rc = 0;
- config_cmd->hdr.length = max_list_size;
- config_cmd->hdr.offset = offset;
- config_cmd->hdr.client_id = 0xff;
- /* We'll wait for a completion this time... */
- config_cmd->hdr.echo = 1;
+ rparam.mcast_obj = &bp->mcast_obj;
- bp->set_mac_pending = 1;
+ /* first, clear all configured multicast MACs */
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to clear multicast "
+ "configuration: %d\n", rc);
+ return rc;
+ }
- mb();
+ /* then, configure a new MACs list */
+ if (netdev_mc_count(dev)) {
+ rc = bnx2x_init_mcast_macs_list(bp, &rparam);
+ if (rc) {
+ BNX2X_ERR("Failed to create multicast MACs "
+ "list: %d\n", rc);
+ return rc;
+ }
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
- U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+ /* Now add the new MACs */
+ rc = bnx2x_config_mcast(bp, &rparam,
+ BNX2X_MCAST_CMD_ADD);
+ if (rc < 0)
+ BNX2X_ERR("Failed to set a new multicast "
+ "configuration: %d\n", rc);
- /* Wait for a completion */
- bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
- ramrod_flags);
+ bnx2x_free_mcast_macs_list(&rparam);
+ }
+ return rc;
}
-static inline int bnx2x_set_mc_list(struct bnx2x *bp)
-{
- /* some multicasts */
- if (CHIP_IS_E1(bp)) {
- return bnx2x_set_e1_mc_list(bp);
- } else { /* E1H and newer */
- return bnx2x_set_e1h_mc_list(bp);
- }
-}
-/* called with netif_tx_lock from dev_mcast.c */
+/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -9017,23 +10035,31 @@ void bnx2x_set_rx_mode(struct net_device *dev)
return;
}
- DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
if (dev->flags & IFF_PROMISC)
rx_mode = BNX2X_RX_MODE_PROMISC;
- else if (dev->flags & IFF_ALLMULTI)
+ else if ((dev->flags & IFF_ALLMULTI) ||
+ ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else {
/* some multicasts */
- if (bnx2x_set_mc_list(bp))
+ if (bnx2x_set_mc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_ALLMULTI;
- /* some unicasts */
- if (bnx2x_set_uc_list(bp))
+ if (bnx2x_set_uc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_PROMISC;
}
bp->rx_mode = rx_mode;
+
+ /* Schedule the rx_mode command */
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
+ set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ return;
+ }
+
bnx2x_set_storm_rx_mode(bp);
}
@@ -9122,10 +10148,35 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
#endif
+ .ndo_setup_tc = bnx2x_setup_tc,
+
+#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+ .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
+#endif
};
+static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
+{
+ struct device *dev = &bp->pdev->dev;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+ bp->flags |= USING_DAC_FLAG;
+ if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+ dev_err(dev, "dma_set_coherent_mask failed, "
+ "aborting\n");
+ return -EIO;
+ }
+ } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+ dev_err(dev, "System does not support DMA, aborting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
- struct net_device *dev)
+ struct net_device *dev,
+ unsigned long board_type)
{
struct bnx2x *bp;
int rc;
@@ -9179,29 +10230,15 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
goto err_out_release;
}
- bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (bp->pcie_cap == 0) {
- dev_err(&bp->pdev->dev,
- "Cannot find PCI Express capability, aborting\n");
+ if (!pci_is_pcie(pdev)) {
+ dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
rc = -EIO;
goto err_out_release;
}
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
- bp->flags |= USING_DAC_FLAG;
- if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
- dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
- " failed, aborting\n");
- rc = -EIO;
- goto err_out_release;
- }
-
- } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
- dev_err(&bp->pdev->dev,
- "System does not support DMA, aborting\n");
- rc = -EIO;
+ rc = bnx2x_set_coherency_mask(bp);
+ if (rc)
goto err_out_release;
- }
dev->mem_start = pci_resource_start(pdev, 0);
dev->base_addr = dev->mem_start;
@@ -9217,25 +10254,31 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
goto err_out_release;
}
- bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- min_t(u64, BNX2X_DB_SIZE(bp),
- pci_resource_len(pdev, 2)));
- if (!bp->doorbells) {
- dev_err(&bp->pdev->dev,
- "Cannot map doorbell space, aborting\n");
- rc = -ENOMEM;
- goto err_out_unmap;
- }
-
bnx2x_set_power_state(bp, PCI_D0);
/* clean indirect addresses */
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
PCICFG_VENDOR_ID_OFFSET);
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
+ /* Clean the following indirect addresses for all functions since it
+ * is not used by the driver.
+ */
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+
+ /*
+ * Enable internal target-read (in case we are probed after PF FLR).
+ * Must be done prior to any BAR read access. Only for 57712 and up
+ */
+ if (board_type != BCM57710 &&
+ board_type != BCM57711 &&
+ board_type != BCM57711E)
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
/* Reset the load counter */
bnx2x_clear_load_cnt(bp);
@@ -9273,16 +10316,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
return 0;
-err_out_unmap:
- if (bp->regview) {
- iounmap(bp->regview);
- bp->regview = NULL;
- }
- if (bp->doorbells) {
- iounmap(bp->doorbells);
- bp->doorbells = NULL;
- }
-
err_out_release:
if (atomic_read(&pdev->enable_cnt) == 1)
pci_release_regions(pdev);
@@ -9451,7 +10484,7 @@ int bnx2x_init_firmware(struct bnx2x *bp)
fw_file_name = FW_FILE_NAME_E1;
else if (CHIP_IS_E1H(bp))
fw_file_name = FW_FILE_NAME_E1H;
- else if (CHIP_IS_E2(bp))
+ else if (!CHIP_IS_E1x(bp))
fw_file_name = FW_FILE_NAME_E2;
else {
BNX2X_ERR("Unsupported chip revision\n");
@@ -9519,9 +10552,47 @@ request_firmware_exit:
return rc;
}
-static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
+static void bnx2x_release_firmware(struct bnx2x *bp)
{
- int cid_count = L2_FP_COUNT(l2_cid_count);
+ kfree(bp->init_ops_offsets);
+ kfree(bp->init_ops);
+ kfree(bp->init_data);
+ release_firmware(bp->firmware);
+}
+
+
+static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
+ .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
+ .init_hw_cmn = bnx2x_init_hw_common,
+ .init_hw_port = bnx2x_init_hw_port,
+ .init_hw_func = bnx2x_init_hw_func,
+
+ .reset_hw_cmn = bnx2x_reset_common,
+ .reset_hw_port = bnx2x_reset_port,
+ .reset_hw_func = bnx2x_reset_func,
+
+ .gunzip_init = bnx2x_gunzip_init,
+ .gunzip_end = bnx2x_gunzip_end,
+
+ .init_fw = bnx2x_init_firmware,
+ .release_fw = bnx2x_release_firmware,
+};
+
+void bnx2x__init_func_obj(struct bnx2x *bp)
+{
+ /* Prepare DMAE related driver resources */
+ bnx2x_setup_dmae(bp);
+
+ bnx2x_init_func_obj(bp, &bp->func_obj,
+ bnx2x_sp(bp, func_rdata),
+ bnx2x_sp_mapping(bp, func_rdata),
+ &bnx2x_func_sp_drv);
+}
+
+/* must be called after sriov-enable */
+static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
+{
+ int cid_count = BNX2X_L2_CID_COUNT(bp);
#ifdef BCM_CNIC
cid_count += CNIC_CID_MAX;
@@ -9529,24 +10600,74 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
return roundup(cid_count, QM_CID_ROUND);
}
+/**
+ * bnx2x_get_num_none_def_sbs - return the number of none default SBs
+ *
+ * @dev: pci device
+ *
+ */
+static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+
+ /*
+ * If MSI-X is not supported - return number of SBs needed to support
+ * one fast path queue: one FP queue + SB for CNIC
+ */
+ if (!pos)
+ return 1 + CNIC_PRESENT;
+
+ /*
+ * The value in the PCI configuration space is the index of the last
+ * entry, namely one less than the actual size of the table, which is
+ * exactly what we want to return from this function: number of all SBs
+ * without the default SB.
+ */
+ pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ return control & PCI_MSIX_FLAGS_QSIZE;
+}
+
static int __devinit bnx2x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev = NULL;
struct bnx2x *bp;
int pcie_width, pcie_speed;
- int rc, cid_count;
+ int rc, max_non_def_sbs;
+ int rx_count, tx_count, rss_count;
+ /*
+ * An estimated maximum supported CoS number according to the chip
+ * version.
+ * We will try to roughly estimate the maximum number of CoSes this chip
+ * may support in order to minimize the memory allocated for Tx
+ * netdev_queue's. This number will be accurately calculated during the
+ * initialization of bp->max_cos based on the chip versions AND chip
+ * revision in the bnx2x_init_bp().
+ */
+ u8 max_cos_est = 0;
switch (ent->driver_data) {
case BCM57710:
case BCM57711:
case BCM57711E:
- cid_count = FP_SB_MAX_E1x;
+ max_cos_est = BNX2X_MULTI_TX_COS_E1X;
break;
case BCM57712:
- case BCM57712E:
- cid_count = FP_SB_MAX_E2;
+ case BCM57712_MF:
+ max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
+ break;
+
+ case BCM57800:
+ case BCM57800_MF:
+ case BCM57810:
+ case BCM57810_MF:
+ case BCM57840:
+ case BCM57840_MF:
+ max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
break;
default:
@@ -9555,38 +10676,77 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
return -ENODEV;
}
- cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
+
+ /* !!! FIXME !!!
+ * Do not allow the maximum SB count to grow above 16
+ * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
+ * We will use the FP_SB_MAX_E1x macro for this matter.
+ */
+ max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
+
+ WARN_ON(!max_non_def_sbs);
+
+ /* Maximum number of RSS queues: one IGU SB goes to CNIC */
+ rss_count = max_non_def_sbs - CNIC_PRESENT;
+
+ /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
+ rx_count = rss_count + FCOE_PRESENT;
+
+ /*
+ * Maximum number of netdev Tx queues:
+ * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
+ */
+ tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
/* dev zeroed in init_etherdev */
- dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
+ dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
if (!dev) {
dev_err(&pdev->dev, "Cannot allocate net device\n");
return -ENOMEM;
}
bp = netdev_priv(dev);
- bp->msg_enable = debug;
- pci_set_drvdata(pdev, dev);
+ DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
+ tx_count, rx_count);
- bp->l2_cid_count = cid_count;
+ bp->igu_sb_cnt = max_non_def_sbs;
+ bp->msg_enable = debug;
+ pci_set_drvdata(pdev, dev);
- rc = bnx2x_init_dev(pdev, dev);
+ rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
if (rc < 0) {
free_netdev(dev);
return rc;
}
+ DP(NETIF_MSG_DRV, "max_non_def_sbs %d", max_non_def_sbs);
+
rc = bnx2x_init_bp(bp);
if (rc)
goto init_one_exit;
+ /*
+ * Map doorbels here as we need the real value of bp->max_cos which
+ * is initialized in bnx2x_init_bp().
+ */
+ bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ min_t(u64, BNX2X_DB_SIZE(bp),
+ pci_resource_len(pdev, 2)));
+ if (!bp->doorbells) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbell space, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
+
/* calc qm_cid_count */
- bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
+ bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
#ifdef BCM_CNIC
- /* disable FCOE L2 queue for E1x*/
- if (CHIP_IS_E1x(bp))
+ /* disable FCOE L2 queue for E1x and E3*/
+ if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
bp->flags |= NO_FCOE_FLAG;
#endif
@@ -9686,7 +10846,7 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
bnx2x_set_power_state(bp, PCI_D3hot);
/* Make sure RESET task is not scheduled before continuing */
- cancel_delayed_work_sync(&bp->reset_task);
+ cancel_delayed_work_sync(&bp->sp_rtnl_task);
if (bp->regview)
iounmap(bp->regview);
@@ -9713,12 +10873,17 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bp->rx_mode = BNX2X_RX_MODE_NONE;
+#ifdef BCM_CNIC
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+ /* Stop Tx */
+ bnx2x_tx_disable(bp);
+
bnx2x_netif_stop(bp, 0);
- netif_carrier_off(bp->dev);
del_timer_sync(&bp->timer);
- bp->stats_state = STATS_STATE_DISABLED;
- DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
/* Release IRQs */
bnx2x_free_irq(bp);
@@ -9733,6 +10898,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bp->state = BNX2X_STATE_CLOSED;
+ netif_carrier_off(bp->dev);
+
return 0;
}
@@ -9845,8 +11012,8 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
struct bnx2x *bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- printk(KERN_ERR "Handling parity error recovery. "
- "Try again later\n");
+ netdev_err(bp->dev, "Handling parity error recovery. "
+ "Try again later\n");
return;
}
@@ -9905,10 +11072,33 @@ static void __exit bnx2x_cleanup(void)
destroy_workqueue(bnx2x_wq);
}
+void bnx2x_notify_link_changed(struct bnx2x *bp)
+{
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
+}
+
module_init(bnx2x_init);
module_exit(bnx2x_cleanup);
#ifdef BCM_CNIC
+/**
+ * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
+ *
+ * @bp: driver handle
+ * @set: set or clear the CAM entry
+ *
+ * This function will wait until the ramdord completion returns.
+ * Return 0 if success, -ENODEV if ramrod doesn't return.
+ */
+static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
+{
+ unsigned long ramrod_flags = 0;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
+ &bp->iscsi_l2_mac_obj, true,
+ BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
+}
/* count denotes the number of new completions we have seen */
static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
@@ -9929,23 +11119,22 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
& SPE_HDR_CONN_TYPE) >>
SPE_HDR_CONN_TYPE_SHIFT;
+ u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
+ >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
/* Set validation for iSCSI L2 client before sending SETUP
* ramrod
*/
if (type == ETH_CONNECTION_TYPE) {
- u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
- hdr.conn_and_cmd_data) >>
- SPE_HDR_CMD_ID_SHIFT) & 0xff;
-
if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
- bnx2x_set_ctx_validation(&bp->context.
- vcxt[BNX2X_ISCSI_ETH_CID].eth,
- HW_CID(bp, BNX2X_ISCSI_ETH_CID));
+ bnx2x_set_ctx_validation(bp, &bp->context.
+ vcxt[BNX2X_ISCSI_ETH_CID].eth,
+ BNX2X_ISCSI_ETH_CID);
}
- /* There may be not more than 8 L2 and not more than 8 L5 SPEs
- * We also check that the number of outstanding
+ /*
+ * There may be not more than 8 L2, not more than 8 L5 SPEs
+ * and in the air. We also check that number of outstanding
* COMMON ramrods is not more than the EQ and SPQ can
* accommodate.
*/
@@ -10071,18 +11260,61 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
return bnx2x_cnic_ctl_send(bp, &ctl);
}
-static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
{
- struct cnic_ctl_info ctl;
+ struct cnic_ctl_info ctl = {0};
/* first we tell CNIC and only then we count this as a completion */
ctl.cmd = CNIC_CTL_COMPLETION_CMD;
ctl.data.comp.cid = cid;
+ ctl.data.comp.error = err;
bnx2x_cnic_ctl_send_bh(bp, &ctl);
bnx2x_cnic_sp_post(bp, 0);
}
+
+/* Called with netif_addr_lock_bh() taken.
+ * Sets an rx_mode config for an iSCSI ETH client.
+ * Doesn't block.
+ * Completion should be checked outside.
+ */
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
+{
+ unsigned long accept_flags = 0, ramrod_flags = 0;
+ u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+ int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
+
+ if (start) {
+ /* Start accepting on iSCSI L2 ring. Accept all multicasts
+ * because it's the only way for UIO Queue to accept
+ * multicasts (in non-promiscuous mode only one Queue per
+ * function will receive multicast packets (leading in our
+ * case).
+ */
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ /* Clear STOP_PENDING bit if START is requested */
+ clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
+
+ sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
+ } else
+ /* Clear START_PENDING bit if STOP is requested */
+ clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
+
+ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+ set_bit(sched_state, &bp->sp_state);
+ else {
+ __set_bit(RAMROD_RX, &ramrod_flags);
+ bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
+ ramrod_flags);
+ }
+}
+
+
static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -10106,45 +11338,65 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
/* rtnl_lock is held. */
case DRV_CTL_START_L2_CMD: {
- u32 cli = ctl->data.ring.client_id;
-
- /* Clear FCoE FIP and ALL ENODE MACs addresses first */
- bnx2x_del_fcoe_eth_macs(bp);
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ unsigned long sp_bits = 0;
+
+ /* Configure the iSCSI classification object */
+ bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
+ cp->iscsi_l2_client_id,
+ cp->iscsi_l2_cid, BP_FUNC(bp),
+ bnx2x_sp(bp, mac_rdata),
+ bnx2x_sp_mapping(bp, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &bp->sp_state, BNX2X_OBJ_TYPE_RX,
+ &bp->macs_pool);
/* Set iSCSI MAC address */
- bnx2x_set_iscsi_eth_mac_addr(bp, 1);
+ rc = bnx2x_set_iscsi_eth_mac_addr(bp);
+ if (rc)
+ break;
mmiowb();
barrier();
- /* Start accepting on iSCSI L2 ring. Accept all multicasts
- * because it's the only way for UIO Client to accept
- * multicasts (in non-promiscuous mode only one Client per
- * function will receive multicast packets (leading in our
- * case).
- */
- bnx2x_rxq_set_mac_filters(bp, cli,
- BNX2X_ACCEPT_UNICAST |
- BNX2X_ACCEPT_BROADCAST |
- BNX2X_ACCEPT_ALL_MULTICAST);
- storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
+ /* Start accepting on iSCSI L2 ring */
+
+ netif_addr_lock_bh(dev);
+ bnx2x_set_iscsi_eth_rx_mode(bp, true);
+ netif_addr_unlock_bh(dev);
+
+ /* bits to wait on */
+ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+ __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
+
+ if (!bnx2x_wait_sp_comp(bp, sp_bits))
+ BNX2X_ERR("rx_mode completion timed out!\n");
break;
}
/* rtnl_lock is held. */
case DRV_CTL_STOP_L2_CMD: {
- u32 cli = ctl->data.ring.client_id;
+ unsigned long sp_bits = 0;
/* Stop accepting on iSCSI L2 ring */
- bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
- storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
+ netif_addr_lock_bh(dev);
+ bnx2x_set_iscsi_eth_rx_mode(bp, false);
+ netif_addr_unlock_bh(dev);
+
+ /* bits to wait on */
+ __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+ __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
+
+ if (!bnx2x_wait_sp_comp(bp, sp_bits))
+ BNX2X_ERR("rx_mode completion timed out!\n");
mmiowb();
barrier();
/* Unset iSCSI L2 MAC */
- bnx2x_set_iscsi_eth_mac_addr(bp, 0);
+ rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
+ BNX2X_ISCSI_ETH_MAC, true);
break;
}
case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
@@ -10156,11 +11408,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
break;
}
- case DRV_CTL_ISCSI_STOPPED_CMD: {
- bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
- break;
- }
-
default:
BNX2X_ERR("unknown command %x\n", ctl->cmd);
rc = -EINVAL;
@@ -10181,13 +11428,13 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
}
- if (CHIP_IS_E2(bp))
+ if (!CHIP_IS_E1x(bp))
cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
else
cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
- cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
- cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
+ cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
+ cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
cp->irq_arr[1].status_blk = bp->def_status_blk;
cp->irq_arr[1].status_blk_num = DEF_SB_ID;
cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
@@ -10204,9 +11451,6 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
if (ops == NULL)
return -EINVAL;
- if (atomic_read(&bp->intr_sem) != 0)
- return -EBUSY;
-
bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!bp->cnic_kwq)
return -ENOMEM;
@@ -10221,7 +11465,7 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
bp->cnic_data = data;
cp->num_irq = 0;
- cp->drv_state = CNIC_DRV_STATE_REGD;
+ cp->drv_state |= CNIC_DRV_STATE_REGD;
cp->iro_arr = bp->iro_arr;
bnx2x_setup_cnic_irq_info(bp);
@@ -10275,8 +11519,8 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
- cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
- BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
+ cp->iscsi_l2_client_id =
+ bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
if (NO_ISCSI_OOO(bp))
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 86bba25d2d3..40266c14e6d 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -32,7 +32,11 @@
/* [R 1] ATC initalization done */
#define ATC_REG_ATC_INIT_DONE 0x1100bc
/* [RC 6] Interrupt register #0 read clear */
-#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
+#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
+/* [RW 5] Parity mask register #0 read/write */
+#define ATC_REG_ATC_PRTY_MASK 0x1101d8
+/* [RC 5] Parity register #0 read clear */
+#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0
/* [RW 19] Interrupt mask register #0 read/write */
#define BRB1_REG_BRB1_INT_MASK 0x60128
/* [R 19] Interrupt register #0 read */
@@ -54,16 +58,20 @@
/* [RW 10] The number of free blocks below which the full signal to class 0
* is asserted */
#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0
-/* [RW 10] The number of free blocks above which the full signal to class 0
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230
+/* [RW 11] The number of free blocks above which the full signal to class 0
* is de-asserted */
#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4
-/* [RW 10] The number of free blocks below which the full signal to class 1
+#define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234
+/* [RW 11] The number of free blocks below which the full signal to class 1
* is asserted */
#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8
-/* [RW 10] The number of free blocks above which the full signal to class 1
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238
+/* [RW 11] The number of free blocks above which the full signal to class 1
* is de-asserted */
#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc
-/* [RW 10] The number of free blocks below which the full signal to the LB
+#define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c
+/* [RW 11] The number of free blocks below which the full signal to the LB
* port is asserted */
#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0
/* [RW 10] The number of free blocks above which the full signal to the LB
@@ -75,15 +83,49 @@
/* [RW 10] The number of free blocks below which the High_llfc signal to
interface #n is asserted. */
#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c
-/* [RW 23] LL RAM data. */
-#define BRB1_REG_LL_RAM 0x61000
+/* [RW 11] The number of blocks guarantied for the LB port */
+#define BRB1_REG_LB_GUARANTIED 0x601ec
+/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port
+ * before signaling XON. */
+#define BRB1_REG_LB_GUARANTIED_HYST 0x60264
+/* [RW 24] LL RAM data. */
+#define BRB1_REG_LL_RAM 0x61000
/* [RW 10] The number of free blocks above which the Low_llfc signal to
interface #n is de-asserted. */
#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c
/* [RW 10] The number of free blocks below which the Low_llfc signal to
interface #n is asserted. */
#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
-/* [RW 10] The number of blocks guarantied for the MAC port */
+/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0
+ * before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258
+/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register
+ * is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260
+/* [RW 11] The number of blocks guarantied for the MAC port. The register is
+ * applicable only when per_class_guaranty_mode is reset. */
#define BRB1_REG_MAC_GUARANTIED_0 0x601e8
#define BRB1_REG_MAC_GUARANTIED_1 0x60240
/* [R 24] The number of full blocks. */
@@ -100,15 +142,19 @@
/* [RW 10] The number of free blocks below which the pause signal to class 0
* is asserted */
#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0
-/* [RW 10] The number of free blocks above which the pause signal to class 0
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220
+/* [RW 11] The number of free blocks above which the pause signal to class 0
* is de-asserted */
#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4
-/* [RW 10] The number of free blocks below which the pause signal to class 1
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224
+/* [RW 11] The number of free blocks below which the pause signal to class 1
* is asserted */
#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8
-/* [RW 10] The number of free blocks above which the pause signal to class 1
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228
+/* [RW 11] The number of free blocks above which the pause signal to class 1
* is de-asserted */
#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c
/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
@@ -325,7 +371,7 @@
mechanism. The fields are: [5:0] - message length; [12:6] - message
pointer; 18:13] - next pointer. */
#define CCM_REG_XX_DESCR_TABLE 0xd0300
-#define CCM_REG_XX_DESCR_TABLE_SIZE 36
+#define CCM_REG_XX_DESCR_TABLE_SIZE 24
/* [R 7] Used to read the value of XX protection Free counter. */
#define CCM_REG_XX_FREE 0xd0184
/* [RW 6] Initial value for the credit counter; responsible for fulfilling
@@ -422,6 +468,7 @@
#define CFC_REG_NUM_LCIDS_ALLOC 0x104020
/* [R 9] Number of Arriving LCIDs in Link List Block */
#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
+#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120
/* [R 9] Number of Leaving LCIDs in Link List Block */
#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
#define CFC_REG_WEAK_ENABLE_PF 0x104124
@@ -783,6 +830,7 @@
/* [RW 3] The number of simultaneous outstanding requests to Context Fetch
Interface. */
#define DORQ_REG_OUTST_REQ 0x17003c
+#define DORQ_REG_PF_USAGE_CNT 0x1701d0
#define DORQ_REG_REGN 0x170038
/* [R 4] Current value of response A counter credit. Initial credit is
configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
@@ -802,10 +850,12 @@
/* [RW 28] TCM Header when both ULP and TCP context is loaded. */
#define DORQ_REG_SHRT_CMHEAD 0x170054
#define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4)
+#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0)
#define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3)
#define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7)
#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
-#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
+#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
+#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0)
#define HC_REG_AGG_INT_0 0x108050
#define HC_REG_AGG_INT_1 0x108054
#define HC_REG_ATTN_BIT 0x108120
@@ -844,6 +894,7 @@
#define HC_REG_VQID_0 0x108008
#define HC_REG_VQID_1 0x10800c
#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1)
+#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0)
#define IGU_REG_ATTENTION_ACK_BITS 0x130108
/* [R 4] Debug: attn_fsm */
#define IGU_REG_ATTN_FSM 0x130054
@@ -933,6 +984,14 @@
* clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
#define IGU_REG_WRITE_DONE_PENDING 0x130480
#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
+#define MCP_REG_MCPR_GP_INPUTS 0x800c0
+#define MCP_REG_MCPR_GP_OENABLE 0x800c8
+#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4
+#define MCP_REG_MCPR_IMC_COMMAND 0x85900
+#define MCP_REG_MCPR_IMC_DATAREG0 0x85920
+#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c
#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
#define MCP_REG_MCPR_NVM_ADDR 0x8640c
#define MCP_REG_MCPR_NVM_CFG4 0x8642c
@@ -1429,11 +1488,37 @@
/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
only. */
#define MISC_REG_E1HMF_MODE 0xa5f8
+/* [R 1] Status of four port mode path swap input pin. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c
+/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 -
+ the path_swap output is equal to 4 port mode path swap input pin; if it
+ is 1 - the path_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738
+/* [R 1] Status of 4 port mode port swap input pin. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754
+/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 -
+ the port_swap output is equal to 4 port mode port swap input pin; if it
+ is 1 - the port_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the port_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734
/* [RW 32] Debug only: spare RW register reset by core reset */
#define MISC_REG_GENERIC_CR_0 0xa460
#define MISC_REG_GENERIC_CR_1 0xa464
/* [RW 32] Debug only: spare RW register reset by por reset */
#define MISC_REG_GENERIC_POR_1 0xa474
+/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to
+ use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO
+ can not be configured as an output. Each output has its output enable in
+ the MCP register space; but this bit needs to be set to make use of that.
+ Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When
+ set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON.
+ When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change
+ the i/o to an output and will drive the TimeSync output. Bit[31:7]:
+ spare. Global register. Reset by hard reset. */
+#define MISC_REG_GEN_PURP_HWG 0xa9a0
/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
these bits is written as a '1'; the corresponding SPIO bit will turn off
it's drivers and become an input. This is the reset state of all GPIO
@@ -1636,6 +1721,14 @@
in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 -
timer 8 */
#define MISC_REG_SW_TIMER_VAL 0xa5c0
+/* [R 1] Status of two port mode path swap input pin. */
+#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758
+/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the
+ path_swap output is equal to 2 port mode path swap input pin; if it is 1
+ - the path_swap output is equal to bit[1] of this register; [1] -
+ Overwrite value. If bit[0] of this register is 1 this is the value that
+ receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c
/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
loaded; 0-prepare; -unprepare */
#define MISC_REG_UNPREPARED 0xa424
@@ -1644,6 +1737,36 @@
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
+/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or
+ * not it is the recipient of the message on the MDIO interface. The value
+ * is compared to the value on ctrl_md_devad. Drives output
+ * misc_xgxs0_phy_addr. Global register. */
+#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc
+/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
+ side. This should be less than or equal to phy_port_mode; if some of the
+ ports are not used. This enables reduction of frequency on the core side.
+ This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 -
+ Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap
+ input for the XMAC_MP core; and should be changed only while reset is
+ held low. Reset on Hard reset. */
+#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964
+/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp
+ Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode;
+ 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the
+ XMAC_MP core; and should be changed only while reset is held low. Reset
+ on Hard reset. */
+#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960
+/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0.
+ * Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_RX_STAT_GR64_LO 0x200
+/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits
+ * 31:0. Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_TX_STAT_GTXPOK_LO 0
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
@@ -1837,6 +1960,10 @@
#define NIG_REG_LLH1_FUNC_MEM 0x161c0
#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ * sending it to the BRB or calculating WoL on it. This bit controls port 1
+ * only. The legacy llh_multi_function_mode bit controls port 0. */
+#define NIG_REG_LLH1_MF_MODE 0x18614
/* [RW 8] init credit counter for port1 in LLH */
#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
#define NIG_REG_LLH1_XCM_MASK 0x10134
@@ -1858,11 +1985,25 @@
/* [R 32] Interrupt register #0 read */
#define NIG_REG_NIG_INT_STS_0 0x103b0
#define NIG_REG_NIG_INT_STS_1 0x103c0
+/* [R 32] Legacy E1 and E1H location for parity error mask register. */
+#define NIG_REG_NIG_PRTY_MASK 0x103dc
+/* [RW 32] Parity mask register #0 read/write */
+#define NIG_REG_NIG_PRTY_MASK_0 0x183c8
+#define NIG_REG_NIG_PRTY_MASK_1 0x183d8
/* [R 32] Legacy E1 and E1H location for parity error status register. */
#define NIG_REG_NIG_PRTY_STS 0x103d0
/* [R 32] Parity register #0 read */
#define NIG_REG_NIG_PRTY_STS_0 0x183bc
#define NIG_REG_NIG_PRTY_STS_1 0x183cc
+/* [R 32] Legacy E1 and E1H location for parity error status clear register. */
+#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4
+/* [RC 32] Parity register #0 read clear */
+#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0
+#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0
+#define MCPR_IMC_COMMAND_ENABLE (1L<<31)
+#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16
+#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28
+#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8
/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
* Ethernet header. */
#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038
@@ -1872,6 +2013,12 @@
#define NIG_REG_P0_HWPFC_ENABLE 0x18078
#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
+/* [RW 1] Input enable for RX MAC interface. */
+#define NIG_REG_P0_MAC_IN_EN 0x185ac
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P0_MAC_OUT_EN 0x185b0
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4
/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
* future expansion) each priorty is to be mapped to. Bits 3:0 specify the
* COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
@@ -1888,11 +2035,52 @@
* than one bit may be set; allowing multiple priorities to be mapped to one
* COS. */
#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A
+ * priority is mapped to COS 3 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A
+ * priority is mapped to COS 4 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A
+ * priority is mapped to COS 5 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
/* [RW 15] Specify which of the credit registers the client is to be mapped
* to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
* clients that are not subject to WFQ credit blocking - their
* specifications here are not used. */
#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c
/* [RW 5] Specify whether the client competes directly in the strict
* priority arbiter. The bits are mapped according to client ID (client IDs
* are defined in tx_arb_priority_client). Default value is set to enable
@@ -1907,10 +2095,24 @@
* reach. */
#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c
#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac
/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
* when it is time to increment. */
#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8
#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c
/* [RW 12] Specify the number of strict priority arbitration slots between
* two round-robin arbitration slots to avoid starvation. A value of 0 means
* no strict priority cycles - the strict priority with anti-starvation
@@ -1925,8 +2127,36 @@
* for management at priority 0; debug traffic at priorities 1 and 2; COS0
* traffic at priority 3; and COS1 traffic at priority 4. */
#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c
#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
+#define NIG_REG_P1_MAC_IN_EN 0x185c0
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P1_MAC_OUT_EN 0x185c4
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8
/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
* future expansion) each priorty is to be mapped to. Bits 3:0 specify the
* COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
@@ -1943,6 +2173,105 @@
* than one bit may be set; allowing multiple priorities to be mapped to one
* COS. */
#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c
+/* [R 1] TLLH FIFO is empty. */
+#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec
+/* [RW 9] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic.
+ * Default value is set to enable strict priorities for all clients. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234
+/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client2): 0-management; 1-debug traffic from this port;
+ * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2
+ * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is
+ * 0 for not using WFQ credit blocking. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ two round-robin arbitration slots to avoid starvation. A value of 0 means
+ no strict priority cycles - the strict priority with anti-starvation
+ arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ client; bits [35-32] are for priority 8 client. The clients are assigned
+ the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ accommodate the 9 input clients to ETS arbiter. Note that this register
+ is the same as the one for port 0, except that port 1 only has COS 0-2
+ traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ client; bits [35-32] are for priority 8 client. The clients are assigned
+ the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ accommodate the 9 input clients to ETS arbiter. Note that this register
+ is the same as the one for port 0, except that port 1 only has COS 0-2
+ traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4
+/* [R 1] TX FIFO for transmitting data to MAC is empty. */
+#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594
+/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
+ forwarded to the host. */
+#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
/* [RW 1] Pause enable for port0. This register may get 1 only when
~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
port */
@@ -2026,12 +2355,45 @@
#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
#define PBF_REG_COS0_UPPER_BOUND 0x15c05c
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 0. */
+#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 1. */
+#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4
/* [RW 31] The weight of COS0 in the ETS command arbiter. */
#define PBF_REG_COS0_WEIGHT 0x15c054
+/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8
+/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0
/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
#define PBF_REG_COS1_UPPER_BOUND 0x15c060
/* [RW 31] The weight of COS1 in the ETS command arbiter. */
#define PBF_REG_COS1_WEIGHT 0x15c058
+/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac
+/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4
+/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0
+/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8
+/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */
+#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4
+/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */
+#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8
+/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */
+#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc
+/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_LB_Q 0x140338
+/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q0 0x14033c
+/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q1 0x140340
/* [RW 1] Disable processing further tasks from port 0 (after ending the
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
@@ -2042,6 +2404,52 @@
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
#define PBF_REG_DISABLE_PF 0x1402e8
+/* [RW 18] For port 0: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288
+/* [RW 9] For port 1: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c
+/* [RW 6] For port 0: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278
+/* [RW 3] For port 1: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c
+/* [RW 6] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280
+/* [RW 3] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284
+/* [RW 16] For port 0: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0
+/* [RW 16] For port 1: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4
+/* [RW 18] For port 0: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270
+/* [RW 9] For port 1: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274
/* [RW 1] Indicates that ETS is performed between the COSes in the command
* arbiter. If reset strict priority w/ anti-starvation will be performed
* w/o WFQ. */
@@ -2049,14 +2457,25 @@
/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
* Ethernet header. */
#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
-/* [RW 1] Indicates which COS is conncted to the highest priority in the
- * command arbiter. */
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8
+/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
+ * priority in the command arbiter. */
#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
#define PBF_REG_IF_ENABLE_REG 0x140044
/* [RW 1] Init bit. When set the initial credits are copied to the credit
registers (except the port credits). Should be set and then reset after
the configuration of the block has ended. */
#define PBF_REG_INIT 0x140000
+/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_LB_Q 0x15c248
+/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q0 0x15c230
+/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q1 0x15c234
/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
copied to the credit register. Should be set and then reset after the
configuration of the port has ended. */
@@ -2069,6 +2488,15 @@
copied to the credit register. Should be set and then reset after the
configuration of the port has ended. */
#define PBF_REG_INIT_P4 0x14000c
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * the LB queue. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 0. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 1. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c
/* [RW 1] Enable for mac interface 0. */
#define PBF_REG_MAC_IF0_ENABLE 0x140030
/* [RW 1] Enable for mac interface 1. */
@@ -2089,24 +2517,49 @@
/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
lines. */
#define PBF_REG_P0_INIT_CRD 0x1400d0
-/* [RW 1] Indication that pause is enabled for port 0. */
-#define PBF_REG_P0_PAUSE_ENABLE 0x140014
-/* [R 8] Number of tasks in port 0 task queue. */
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 0. Reset upon init. */
+#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308
+/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */
+#define PBF_REG_P0_PAUSE_ENABLE 0x140014
+/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */
#define PBF_REG_P0_TASK_CNT 0x140204
-/* [R 11] Current credit for port 1 in the tx port buffers in 16 byte lines. */
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 0. Reset upon init. */
+#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */
+#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc
+/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port
+ * buffers in 16 byte lines. */
#define PBF_REG_P1_CREDIT 0x140208
-/* [RW 11] Initial credit for port 1 in the tx port buffers in 16 byte
- lines. */
+/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port
+ * buffers in 16 byte lines. */
#define PBF_REG_P1_INIT_CRD 0x1400d4
-/* [R 8] Number of tasks in port 1 task queue. */
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 1. Reset upon init. */
+#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c
+/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */
#define PBF_REG_P1_TASK_CNT 0x14020c
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 1. Reset upon init. */
+#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */
+#define PBF_REG_P1_TQ_OCCUPANCY 0x140300
/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
#define PBF_REG_P4_CREDIT 0x140210
/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
lines. */
#define PBF_REG_P4_INIT_CRD 0x1400e0
-/* [R 8] Number of tasks in port 4 task queue. */
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 4. Reset upon init. */
+#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310
+/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */
#define PBF_REG_P4_TASK_CNT 0x140214
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 4. Reset upon init. */
+#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */
+#define PBF_REG_P4_TQ_OCCUPANCY 0x140304
/* [RW 5] Interrupt mask register #0 read/write */
#define PBF_REG_PBF_INT_MASK 0x1401d4
/* [R 5] Interrupt register #0 read */
@@ -2115,6 +2568,27 @@
#define PBF_REG_PBF_PRTY_MASK 0x1401e4
/* [RC 20] Parity register #0 read clear */
#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PBF_REG_TAG_ETHERTYPE_0 0x15c090
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PBF_REG_TAG_LEN_0 0x15c09c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task
+ * queue. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the task
+ * queue 0. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390
+/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1.
+ * Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394
+/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB
+ * queue. */
+#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */
+#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
+#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0
#define PB_REG_CONTROL 0
/* [RW 2] Interrupt mask register #0 read/write */
#define PB_REG_PB_INT_MASK 0x28
@@ -2206,8 +2680,12 @@
#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
/* [RC 9] Interrupt register #0 read clear */
#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c
+/* [RW 2] Parity mask register #0 read/write */
+#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4
/* [R 2] Parity register #0 read */
#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8
+/* [RC 2] Parity register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac
/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
* VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
* Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
@@ -2444,10 +2922,24 @@
/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
* Ethernet header. */
#define PRS_REG_HDRS_AFTER_BASIC 0x40238
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header for port 0 packets. */
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290
+/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PRS_REG_HDRS_AFTER_TAG_0 0x40248
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for
+ * port 0 packets */
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0
/* [RW 4] The increment value to send in the CFC load request message */
#define PRS_REG_INC_VALUE 0x40048
/* [RW 6] Bit-map indicating which headers must appear in the packet */
#define PRS_REG_MUST_HAVE_HDRS 0x40254
+/* [RW 6] Bit-map indicating which headers must appear in the packet for
+ * port 0 packets */
+#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c
+#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac
#define PRS_REG_NIC_MODE 0x40138
/* [RW 8] The 8-bit event ID for cases where there is no match on the
connection. Used in packet start message to TCM. */
@@ -2496,6 +2988,11 @@
#define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158
/* [R 4] debug only: SRC current credit. Transaction based. */
#define PRS_REG_SRC_CURRENT_CREDIT 0x4016c
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PRS_REG_TAG_ETHERTYPE_0 0x401d4
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PRS_REG_TAG_LEN_0 0x4022c
/* [R 8] debug only: TCM current credit. Cycle based. */
#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
/* [R 8] debug only: TSDM current credit. Transaction based. */
@@ -2510,11 +3007,27 @@
/* [R 6] Debug only: Number of used entries in the data FIFO */
#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
/* [R 7] Debug only: Number of used entries in the header FIFO */
-#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
-#define PXP2_REG_PGL_ADDR_88_F0 0x120534
-#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
-#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
-#define PXP2_REG_PGL_ADDR_94_F0 0x120540
+#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478
+#define PXP2_REG_PGL_ADDR_88_F0 0x120534
+/* [R 32] GRC address for configuration access to PCIE config address 0x88.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_88_F1 0x120544
+#define PXP2_REG_PGL_ADDR_8C_F0 0x120538
+/* [R 32] GRC address for configuration access to PCIE config address 0x8c.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_8C_F1 0x120548
+#define PXP2_REG_PGL_ADDR_90_F0 0x12053c
+/* [R 32] GRC address for configuration access to PCIE config address 0x90.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_90_F1 0x12054c
+#define PXP2_REG_PGL_ADDR_94_F0 0x120540
+/* [R 32] GRC address for configuration access to PCIE config address 0x94.
+ * any write to this PCIE address will cause a GRC write access to the
+ * address that's in t this register */
+#define PXP2_REG_PGL_ADDR_94_F1 0x120550
#define PXP2_REG_PGL_CONTROL0 0x120490
#define PXP2_REG_PGL_CONTROL1 0x120514
#define PXP2_REG_PGL_DEBUG 0x120520
@@ -3080,6 +3593,7 @@
#define QM_REG_BYTECREDITAFULLTHR 0x168094
/* [RW 4] The initial credit for interface */
#define QM_REG_CMINITCRD_0 0x1680cc
+#define QM_REG_BYTECRDCMDQ_0 0x16e6e8
#define QM_REG_CMINITCRD_1 0x1680d0
#define QM_REG_CMINITCRD_2 0x1680d4
#define QM_REG_CMINITCRD_3 0x1680d8
@@ -3170,7 +3684,10 @@
/* [RW 2] The PCI attributes field used in the PCI request. */
#define QM_REG_PCIREQAT 0x168054
#define QM_REG_PF_EN 0x16e70c
-/* [R 16] The byte credit of port 0 */
+/* [R 24] The number of tasks stored in the QM for the PF. only even
+ * functions are valid in E2 (odd I registers will be hard wired to 0) */
+#define QM_REG_PF_USG_CNT_0 0x16e040
+/* [R 16] NOT USED */
#define QM_REG_PORT0BYTECRD 0x168300
/* [R 16] The byte credit of port 1 */
#define QM_REG_PORT1BYTECRD 0x168304
@@ -3725,7 +4242,7 @@
mechanism. The fields are: [5:0] - length of the message; 15:6] - message
pointer; 20:16] - next pointer. */
#define TCM_REG_XX_DESCR_TABLE 0x50280
-#define TCM_REG_XX_DESCR_TABLE_SIZE 32
+#define TCM_REG_XX_DESCR_TABLE_SIZE 29
/* [R 6] Use to read the value of XX protection Free counter. */
#define TCM_REG_XX_FREE 0x50178
/* [RW 6] Initial value for the credit counter; responsible for fulfilling
@@ -3782,6 +4299,8 @@
#define TM_REG_LIN0_LOGIC_ADDR 0x164240
/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
#define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048
+/* [ST 16] Linear0 Number of scans counter. */
+#define TM_REG_LIN0_NUM_SCANS 0x1640a0
/* [WB 64] Linear0 phy address. */
#define TM_REG_LIN0_PHY_ADDR 0x164270
/* [RW 1] Linear0 physical address valid. */
@@ -3789,6 +4308,7 @@
#define TM_REG_LIN0_SCAN_ON 0x1640d0
/* [RW 24] Linear0 array scan timeout. */
#define TM_REG_LIN0_SCAN_TIME 0x16403c
+#define TM_REG_LIN0_VNIC_UC 0x164128
/* [RW 32] Linear1 logic address. */
#define TM_REG_LIN1_LOGIC_ADDR 0x164250
/* [WB 64] Linear1 phy address. */
@@ -4175,6 +4695,8 @@
#define UCM_REG_UCM_INT_MASK 0xe01d4
/* [R 11] Interrupt register #0 read */
#define UCM_REG_UCM_INT_STS 0xe01c8
+/* [RW 27] Parity mask register #0 read/write */
+#define UCM_REG_UCM_PRTY_MASK 0xe01e4
/* [R 27] Parity register #0 read */
#define UCM_REG_UCM_PRTY_STS 0xe01d8
/* [RC 27] Parity register #0 read clear */
@@ -4248,7 +4770,7 @@
mechanism. The fields are:[5:0] - message length; 14:6] - message
pointer; 19:15] - next pointer. */
#define UCM_REG_XX_DESCR_TABLE 0xe0280
-#define UCM_REG_XX_DESCR_TABLE_SIZE 32
+#define UCM_REG_XX_DESCR_TABLE_SIZE 27
/* [R 6] Use to read the XX protection Free counter. */
#define UCM_REG_XX_FREE 0xe016c
/* [RW 6] Initial value for the credit counter; responsible for fulfilling
@@ -4265,6 +4787,25 @@
The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
header pointer. */
#define UCM_REG_XX_TABLE 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28)
+#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15)
+#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24)
+#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8)
+#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4)
+#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1)
+#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13)
+#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0)
+#define UMAC_REG_COMMAND_CONFIG 0x8
+/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
+ * to bit 17 of the MAC address etc. */
+#define UMAC_REG_MAC_ADDR0 0xc
+/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1
+ * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */
+#define UMAC_REG_MAC_ADDR1 0x10
+/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
+ * logic to check frames. */
+#define UMAC_REG_MAXFR 0x14
/* [RW 8] The event id for aggregated interrupt 0 */
#define USDM_REG_AGG_INT_EVENT_0 0xc4038
#define USDM_REG_AGG_INT_EVENT_1 0xc403c
@@ -4696,8 +5237,13 @@
#define XCM_REG_XCM_INT_MASK 0x202b4
/* [R 14] Interrupt register #0 read */
#define XCM_REG_XCM_INT_STS 0x202a8
+/* [RW 30] Parity mask register #0 read/write */
+#define XCM_REG_XCM_PRTY_MASK 0x202c4
/* [R 30] Parity register #0 read */
#define XCM_REG_XCM_PRTY_STS 0x202b8
+/* [RC 30] Parity register #0 read clear */
+#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc
+
/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
Is used to determine the number of the AG context REG-pairs written back;
@@ -4772,6 +5318,34 @@
#define XCM_REG_XX_MSG_NUM 0x20428
/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
#define XCM_REG_XX_OVFL_EVNT_ID 0x20058
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0)
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1)
+#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3)
+#define XMAC_CTRL_REG_RX_EN (0x1<<1)
+#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
+#define XMAC_CTRL_REG_TX_EN (0x1<<0)
+#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18)
+#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0)
+#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3)
+#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4)
+#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5)
+#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60
+#define XMAC_REG_CTRL 0
+/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_HI 0x2c
+/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_LO 0x28
+#define XMAC_REG_PAUSE_CTRL 0x68
+#define XMAC_REG_PFC_CTRL 0x70
+#define XMAC_REG_PFC_CTRL_HI 0x74
+#define XMAC_REG_RX_LSS_STATUS 0x58
+/* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
+ * CRC in strip mode */
+#define XMAC_REG_RX_MAX_SIZE 0x40
+#define XMAC_REG_TX_CTRL 0x20
/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
header pointer. */
@@ -4846,6 +5420,10 @@
#define XSDM_REG_NUM_OF_Q9_CMD 0x166268
/* [RW 13] The start address in the internal RAM for queue counters */
#define XSDM_REG_Q_COUNTER_START_ADDR 0x166010
+/* [W 17] Generate an operation after completion; bit-16 is
+ * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and
+ * bits 4:0 are the T124Param[4:0] */
+#define XSDM_REG_OPERATION_GEN 0x1664c4
/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548
/* [R 1] parser fifo empty in sdm_sync block */
@@ -5019,6 +5597,7 @@
#define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3)
#define BIGMAC_REGISTER_RX_CONTROL (0x21<<3)
#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3)
+#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3)
#define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3)
#define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3)
#define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3)
@@ -5034,6 +5613,7 @@
#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
+#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3)
#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
@@ -5052,14 +5632,17 @@
#define EMAC_LED_OVERRIDE (1L<<0)
#define EMAC_LED_TRAFFIC (1L<<6)
#define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26)
#define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26)
#define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26)
#define EMAC_MDIO_COMM_DATA (0xffffL<<0)
#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
-#define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16)
#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
+#define EMAC_MDIO_STATUS_10MB (1L<<1)
#define EMAC_MODE_25G_MODE (1L<<5)
#define EMAC_MODE_HALF_DUPLEX (1L<<1)
#define EMAC_MODE_PORT_GMII (2L<<2)
@@ -5070,6 +5653,7 @@
#define EMAC_REG_EMAC_MAC_MATCH 0x10
#define EMAC_REG_EMAC_MDIO_COMM 0xac
#define EMAC_REG_EMAC_MDIO_MODE 0xb4
+#define EMAC_REG_EMAC_MDIO_STATUS 0xb0
#define EMAC_REG_EMAC_MODE 0x0
#define EMAC_REG_EMAC_RX_MODE 0xc8
#define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c
@@ -5128,16 +5712,30 @@
#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
#define MISC_REGISTERS_RESET_REG_1_SET 0x584
#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
+#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24)
+#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25)
+#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1<<17)
#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2)
#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3)
#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7)
#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13)
#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
#define MISC_REGISTERS_RESET_REG_2_SET 0x594
+#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20)
+#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1<<21)
+#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23)
#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2)
@@ -5160,74 +5758,86 @@
#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
#define MISC_REGISTERS_SPIO_SET_POS 8
+#define HW_LOCK_DRV_FLAGS 10
#define HW_LOCK_MAX_RESOURCE_VALUE 31
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
-#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
-#define HW_LOCK_RESOURCE_RESERVED_08 8
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
#define HW_LOCK_RESOURCE_SPIO 2
#define HW_LOCK_RESOURCE_UNDI 5
-#define PRS_FLAG_OVERETH_IPV4 1
-#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
-#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
-#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
-#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
-#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
-#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (1<<8)
-#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (1<<7)
-#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (1<<6)
-#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (1<<29)
-#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (1<<28)
-#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (1<<1)
-#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (1<<0)
-#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (1<<18)
-#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (1<<11)
-#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (1<<13)
-#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (1<<12)
-#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5)
-#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9)
-#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30)
-#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15)
-#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14)
-#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
-#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0)
-#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31)
-#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
-#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
-#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3)
-#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2)
-#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5)
-#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (1<<4)
-#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (1<<3)
-#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (1<<2)
-#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (1<<22)
-#define AEU_INPUTS_ATTN_BITS_SPIO5 (1<<15)
-#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (1<<27)
-#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (1<<5)
-#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (1<<25)
-#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (1<<24)
-#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (1<<29)
-#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (1<<28)
-#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (1<<23)
-#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (1<<27)
-#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (1<<26)
-#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (1<<21)
-#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (1<<20)
-#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (1<<25)
-#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (1<<24)
-#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (1<<16)
-#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (1<<9)
-#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (1<<7)
-#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (1<<6)
-#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (1<<11)
-#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (1<<10)
+#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23)
+#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21)
+#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16)
+#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10)
+
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9)
+
#define RESERVED_GENERAL_ATTENTION_BIT_0 0
-#define EVEREST_GEN_ATTN_IN_USE_MASK 0x3ffe0
+#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0
#define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000
#define RESERVED_GENERAL_ATTENTION_BIT_6 6
@@ -5317,7 +5927,13 @@
#define GRCBASE_HC 0x108000
#define GRCBASE_PXP2 0x120000
#define GRCBASE_PBF 0x140000
+#define GRCBASE_UMAC0 0x160000
+#define GRCBASE_UMAC1 0x160400
#define GRCBASE_XPB 0x161000
+#define GRCBASE_MSTAT0 0x162000
+#define GRCBASE_MSTAT1 0x162800
+#define GRCBASE_XMAC0 0x163000
+#define GRCBASE_XMAC1 0x163800
#define GRCBASE_TIMERS 0x164000
#define GRCBASE_XSDM 0x166000
#define GRCBASE_QM 0x168000
@@ -5883,6 +6499,10 @@
#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00
#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00
#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
@@ -6032,15 +6652,11 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_CTRL 0x0
#define MDIO_PMA_REG_STATUS 0x1
#define MDIO_PMA_REG_10G_CTRL2 0x7
+#define MDIO_PMA_REG_TX_DISABLE 0x0009
#define MDIO_PMA_REG_RX_SD 0xa
/*bcm*/
#define MDIO_PMA_REG_BCM_CTRL 0x0096
#define MDIO_PMA_REG_FEC_CTRL 0x00ab
-#define MDIO_PMA_REG_RX_ALARM_CTRL 0x9000
-#define MDIO_PMA_REG_LASI_CTRL 0x9002
-#define MDIO_PMA_REG_RX_ALARM 0x9003
-#define MDIO_PMA_REG_TX_ALARM 0x9004
-#define MDIO_PMA_REG_LASI_STATUS 0x9005
#define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800
#define MDIO_PMA_REG_DIGITAL_CTRL 0xc808
#define MDIO_PMA_REG_DIGITAL_STATUS 0xc809
@@ -6201,6 +6817,169 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
+#define MDIO_84833_SUPER_ISOLATE 0x8000
+/* These are mailbox register set used by 84833. */
+#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005
+#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006
+#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
+#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
+#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
+#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011
+#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012
+
+/* Mailbox command set used by 84833. */
+#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2
+/* Mailbox status set used by 84833. */
+#define PHY84833_CMD_RECEIVED 0x0001
+#define PHY84833_CMD_IN_PROGRESS 0x0002
+#define PHY84833_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5
+
+
+/* 84833 F/W Feature Commands */
+#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27
+#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28
+
+/* Warpcore clause 45 addressing */
+#define MDIO_WC_DEVAD 0x3
+#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0
+#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11
+#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96
+#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
+#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
+#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017
+#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061
+#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071
+#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081
+#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091
+#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000
+#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077
+#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087
+#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097
+#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9
+#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9
+#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba
+#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
+#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
+#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
+#define MDIO_WC_REG_XGXS_STATUS3 0x8129
+#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
+#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
+#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141
+#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B
+#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169
+#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0
+#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1
+#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2
+#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4
+#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE
+#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc
+#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE
+#define MDIO_WC_REG_DSC_SMC 0x8213
+#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e
+#define MDIO_WC_REG_TX_FIR_TAP 0x82e2
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
+#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
+#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
+#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7
+#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302
+#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304
+#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308
+#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309
+#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
+#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
+#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
+#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
+#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
+#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_TX66_CONTROL 0x83b0
+#define MDIO_WC_REG_RX66_CONTROL 0x83c0
+#define MDIO_WC_REG_RX66_SCW0 0x83c2
+#define MDIO_WC_REG_RX66_SCW1 0x83c3
+#define MDIO_WC_REG_RX66_SCW2 0x83c4
+#define MDIO_WC_REG_RX66_SCW3 0x83c5
+#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6
+#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7
+#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8
+#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9
+#define MDIO_WC_REG_FX100_CTRL1 0x8400
+#define MDIO_WC_REG_FX100_CTRL3 0x8402
+
+#define MDIO_WC_REG_MICROBLK_CMD 0xffc2
+#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5
+#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc
+
+#define MDIO_WC_REG_AERBLK_AER 0xffde
+#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0
+#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1
+
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4
+
+#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141
+
+#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f
+
+/* 54618se */
+#define MDIO_REG_GPHY_PHYID_LSB 0x3
+#define MDIO_REG_GPHY_ID_54618SE 0x5cd5
+#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd
+#define MDIO_REG_GPHY_CL45_DATA_REG 0xe
+#define MDIO_REG_GPHY_EEE_ADV 0x3c
+#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
+#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
+#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_INTR_STATUS 0x1a
+#define MDIO_REG_INTR_MASK 0x1b
+#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
+#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
+#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
+#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
+#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8)
+
#define IGU_FUNC_BASE 0x0400
#define IGU_ADDR_MSIX 0x0000
@@ -6217,11 +6996,6 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_ADDR_MSI_ADDR_HI 0x0212
#define IGU_ADDR_MSI_DATA 0x0213
-#define IGU_INT_ENABLE 0
-#define IGU_INT_DISABLE 1
-#define IGU_INT_NOP 2
-#define IGU_INT_NOP2 3
-
#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
@@ -6292,15 +7066,6 @@ Theotherbitsarereservedandshouldbezero*/
#define IGU_BC_BASE_DSB_PROD 128
#define IGU_NORM_BASE_DSB_PROD 136
-#define IGU_CTRL_CMD_TYPE_WR\
- 1
-#define IGU_CTRL_CMD_TYPE_RD\
- 0
-
-#define IGU_SEG_ACCESS_NORM 0
-#define IGU_SEG_ACCESS_DEF 1
-#define IGU_SEG_ACCESS_ATTN 2
-
/* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
[5:2] = 0; [1:0] = PF number) */
#define IGU_FID_ENCODE_IS_PF (0x1<<6)
diff --git a/drivers/net/bnx2x/bnx2x_sp.c b/drivers/net/bnx2x/bnx2x_sp.c
new file mode 100644
index 00000000000..df52f110c6c
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_sp.c
@@ -0,0 +1,5692 @@
+/* bnx2x_sp.c: Broadcom Everest network driver.
+ *
+ * Copyright 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32c.h>
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
+
+#define BNX2X_MAX_EMUL_MULTI 16
+
+/**** Exe Queue interfaces ****/
+
+/**
+ * bnx2x_exe_queue_init - init the Exe Queue object
+ *
+ * @o: poiter to the object
+ * @exe_len: length
+ * @owner: poiter to the owner
+ * @validate: validate function pointer
+ * @optimize: optimize function pointer
+ * @exec: execute function pointer
+ * @get: get function pointer
+ */
+static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o,
+ int exe_len,
+ union bnx2x_qable_obj *owner,
+ exe_q_validate validate,
+ exe_q_optimize optimize,
+ exe_q_execute exec,
+ exe_q_get get)
+{
+ memset(o, 0, sizeof(*o));
+
+ INIT_LIST_HEAD(&o->exe_queue);
+ INIT_LIST_HEAD(&o->pending_comp);
+
+ spin_lock_init(&o->lock);
+
+ o->exe_chunk_len = exe_len;
+ o->owner = owner;
+
+ /* Owner specific callbacks */
+ o->validate = validate;
+ o->optimize = optimize;
+ o->execute = exec;
+ o->get = get;
+
+ DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
+ "length of %d\n", exe_len);
+}
+
+static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
+ struct bnx2x_exeq_elem *elem)
+{
+ DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
+ kfree(elem);
+}
+
+static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
+{
+ struct bnx2x_exeq_elem *elem;
+ int cnt = 0;
+
+ spin_lock_bh(&o->lock);
+
+ list_for_each_entry(elem, &o->exe_queue, link)
+ cnt++;
+
+ spin_unlock_bh(&o->lock);
+
+ return cnt;
+}
+
+/**
+ * bnx2x_exe_queue_add - add a new element to the execution queue
+ *
+ * @bp: driver handle
+ * @o: queue
+ * @cmd: new command to add
+ * @restore: true - do not optimize the command
+ *
+ * If the element is optimized or is illegal, frees it.
+ */
+static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ bool restore)
+{
+ int rc;
+
+ spin_lock_bh(&o->lock);
+
+ if (!restore) {
+ /* Try to cancel this element queue */
+ rc = o->optimize(bp, o->owner, elem);
+ if (rc)
+ goto free_and_exit;
+
+ /* Check if this request is ok */
+ rc = o->validate(bp, o->owner, elem);
+ if (rc) {
+ BNX2X_ERR("Preamble failed: %d\n", rc);
+ goto free_and_exit;
+ }
+ }
+
+ /* If so, add it to the execution queue */
+ list_add_tail(&elem->link, &o->exe_queue);
+
+ spin_unlock_bh(&o->lock);
+
+ return 0;
+
+free_and_exit:
+ bnx2x_exe_queue_free_elem(bp, elem);
+
+ spin_unlock_bh(&o->lock);
+
+ return rc;
+
+}
+
+static inline void __bnx2x_exe_queue_reset_pending(
+ struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o)
+{
+ struct bnx2x_exeq_elem *elem;
+
+ while (!list_empty(&o->pending_comp)) {
+ elem = list_first_entry(&o->pending_comp,
+ struct bnx2x_exeq_elem, link);
+
+ list_del(&elem->link);
+ bnx2x_exe_queue_free_elem(bp, elem);
+ }
+}
+
+static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o)
+{
+
+ spin_lock_bh(&o->lock);
+
+ __bnx2x_exe_queue_reset_pending(bp, o);
+
+ spin_unlock_bh(&o->lock);
+
+}
+
+/**
+ * bnx2x_exe_queue_step - execute one execution chunk atomically
+ *
+ * @bp: driver handle
+ * @o: queue
+ * @ramrod_flags: flags
+ *
+ * (Atomicy is ensured using the exe_queue->lock).
+ */
+static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
+ struct bnx2x_exe_queue_obj *o,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_exeq_elem *elem, spacer;
+ int cur_len = 0, rc;
+
+ memset(&spacer, 0, sizeof(spacer));
+
+ spin_lock_bh(&o->lock);
+
+ /*
+ * Next step should not be performed until the current is finished,
+ * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
+ * properly clear object internals without sending any command to the FW
+ * which also implies there won't be any completion to clear the
+ * 'pending' list.
+ */
+ if (!list_empty(&o->pending_comp)) {
+ if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
+ "resetting pending_comp\n");
+ __bnx2x_exe_queue_reset_pending(bp, o);
+ } else {
+ spin_unlock_bh(&o->lock);
+ return 1;
+ }
+ }
+
+ /*
+ * Run through the pending commands list and create a next
+ * execution chunk.
+ */
+ while (!list_empty(&o->exe_queue)) {
+ elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
+ link);
+ WARN_ON(!elem->cmd_len);
+
+ if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
+ cur_len += elem->cmd_len;
+ /*
+ * Prevent from both lists being empty when moving an
+ * element. This will allow the call of
+ * bnx2x_exe_queue_empty() without locking.
+ */
+ list_add_tail(&spacer.link, &o->pending_comp);
+ mb();
+ list_del(&elem->link);
+ list_add_tail(&elem->link, &o->pending_comp);
+ list_del(&spacer.link);
+ } else
+ break;
+ }
+
+ /* Sanity check */
+ if (!cur_len) {
+ spin_unlock_bh(&o->lock);
+ return 0;
+ }
+
+ rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
+ if (rc < 0)
+ /*
+ * In case of an error return the commands back to the queue
+ * and reset the pending_comp.
+ */
+ list_splice_init(&o->pending_comp, &o->exe_queue);
+ else if (!rc)
+ /*
+ * If zero is returned, means there are no outstanding pending
+ * completions and we may dismiss the pending list.
+ */
+ __bnx2x_exe_queue_reset_pending(bp, o);
+
+ spin_unlock_bh(&o->lock);
+ return rc;
+}
+
+static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
+{
+ bool empty = list_empty(&o->exe_queue);
+
+ /* Don't reorder!!! */
+ mb();
+
+ return empty && list_empty(&o->pending_comp);
+}
+
+static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
+ struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
+ return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
+}
+
+/************************ raw_obj functions ***********************************/
+static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
+{
+ return !!test_bit(o->state, o->pstate);
+}
+
+static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(o->state, o->pstate);
+ smp_mb__after_clear_bit();
+}
+
+static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
+{
+ smp_mb__before_clear_bit();
+ set_bit(o->state, o->pstate);
+ smp_mb__after_clear_bit();
+}
+
+/**
+ * bnx2x_state_wait - wait until the given bit(state) is cleared
+ *
+ * @bp: device handle
+ * @state: state which is to be cleared
+ * @state_p: state buffer
+ *
+ */
+static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
+ unsigned long *pstate)
+{
+ /* can take a while if any port is running */
+ int cnt = 5000;
+
+
+ if (CHIP_REV_IS_EMUL(bp))
+ cnt *= 20;
+
+ DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
+
+ might_sleep();
+ while (cnt--) {
+ if (!test_bit(state, pstate)) {
+#ifdef BNX2X_STOP_ON_ERROR
+ DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
+#endif
+ return 0;
+ }
+
+ usleep_range(1000, 1000);
+
+ if (bp->panic)
+ return -EIO;
+ }
+
+ /* timeout! */
+ BNX2X_ERR("timeout waiting for state %d\n", state);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+
+ return -EBUSY;
+}
+
+static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
+{
+ return bnx2x_state_wait(bp, raw->state, raw->pstate);
+}
+
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/* credit handling callbacks */
+static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ WARN_ON(!mp);
+
+ return mp->get_entry(mp, offset);
+}
+
+static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ WARN_ON(!mp);
+
+ return mp->get(mp, 1);
+}
+
+static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ WARN_ON(!vp);
+
+ return vp->get_entry(vp, offset);
+}
+
+static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ WARN_ON(!vp);
+
+ return vp->get(vp, 1);
+}
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->get(mp, 1))
+ return false;
+
+ if (!vp->get(vp, 1)) {
+ mp->put(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
+static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ return mp->put_entry(mp, offset);
+}
+
+static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+ return mp->put(mp, 1);
+}
+
+static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ return vp->put_entry(vp, offset);
+}
+
+static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ return vp->put(vp, 1);
+}
+
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+ struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+ if (!mp->put(mp, 1))
+ return false;
+
+ if (!vp->put(vp, 1)) {
+ mp->get(mp, 1);
+ return false;
+ }
+
+ return true;
+}
+
+/* check_add() callbacks */
+static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ if (!is_valid_ether_addr(data->mac.mac))
+ return -EINVAL;
+
+ /* Check if a requested MAC already exists */
+ list_for_each_entry(pos, &o->head, link)
+ if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ return -EEXIST;
+
+ return 0;
+}
+
+static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ list_for_each_entry(pos, &o->head, link)
+ if (data->vlan.vlan == pos->u.vlan.vlan)
+ return -EEXIST;
+
+ return 0;
+}
+
+static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)))
+ return -EEXIST;
+
+ return 0;
+}
+
+
+/* check_del() callbacks */
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ list_for_each_entry(pos, &o->head, link)
+ if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ list_for_each_entry(pos, &o->head, link)
+ if (data->vlan.vlan == pos->u.vlan.vlan)
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+ bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+
+ list_for_each_entry(pos, &o->head, link)
+ if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+ (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+ ETH_ALEN)))
+ return pos;
+
+ return NULL;
+}
+
+/* check_move() callback */
+static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
+ struct bnx2x_vlan_mac_obj *dst_o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ int rc;
+
+ /* Check if we can delete the requested configuration from the first
+ * object.
+ */
+ pos = src_o->check_del(src_o, data);
+
+ /* check if configuration can be added */
+ rc = dst_o->check_add(dst_o, data);
+
+ /* If this classification can not be added (is already set)
+ * or can't be deleted - return an error.
+ */
+ if (rc || !pos)
+ return false;
+
+ return true;
+}
+
+static bool bnx2x_check_move_always_err(
+ struct bnx2x_vlan_mac_obj *src_o,
+ struct bnx2x_vlan_mac_obj *dst_o,
+ union bnx2x_classification_ramrod_data *data)
+{
+ return false;
+}
+
+
+static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ u8 rx_tx_flag = 0;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
+
+ return rx_tx_flag;
+}
+
+/* LLH CAM line allocations */
+enum {
+ LLH_CAM_ISCSI_ETH_LINE = 0,
+ LLH_CAM_ETH_LINE,
+ LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+ bool add, unsigned char *dev_addr, int index)
+{
+ u32 wb_data[2];
+ u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
+ NIG_REG_LLH0_FUNC_MEM;
+
+ if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
+ return;
+
+ DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
+ (add ? "ADD" : "DELETE"), index);
+
+ if (add) {
+ /* LLH_FUNC_MEM is a u64 WB register */
+ reg_offset += 8*index;
+
+ wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
+ (dev_addr[4] << 8) | dev_addr[5]);
+ wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
+
+ REG_WR_DMAE(bp, reg_offset, wb_data, 2);
+ }
+
+ REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
+ NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
+}
+
+/**
+ * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
+ *
+ * @bp: device handle
+ * @o: queue for which we want to configure this rule
+ * @add: if true the command is an ADD command, DEL otherwise
+ * @opcode: CLASSIFY_RULE_OPCODE_XXX
+ * @hdr: pointer to a header to setup
+ *
+ */
+static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
+ struct eth_classify_cmd_header *hdr)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ hdr->client_id = raw->cl_id;
+ hdr->func_id = raw->func_id;
+
+ /* Rx or/and Tx (internal switching) configuration ? */
+ hdr->cmd_general_data |=
+ bnx2x_vlan_mac_get_rx_tx_flag(o);
+
+ if (add)
+ hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
+
+ hdr->cmd_general_data |=
+ (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
+ *
+ * @cid: connection id
+ * @type: BNX2X_FILTER_XXX_PENDING
+ * @hdr: poiter to header to setup
+ * @rule_cnt:
+ *
+ * currently we always configure one rule and echo field to contain a CID and an
+ * opcode type.
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
+ struct eth_classify_header *hdr, int rule_cnt)
+{
+ hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
+ hdr->rule_cnt = (u8)rule_cnt;
+}
+
+
+/* hw_config() callbacks */
+static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
+ u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
+
+ /*
+ * Set LLH CAM entry: currently only iSCSI and ETH macs are
+ * relevant. In addition, current implementation is tuned for a
+ * single ETH MAC.
+ *
+ * When multiple unicast ETH MACs PF configuration in switch
+ * independent mode is required (NetQ, multiple netdev MACs,
+ * etc.), consider better utilisation of 8 per function MAC
+ * entries in the LLH register. There is also
+ * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
+ * total number of CAM entries to 16.
+ *
+ * Currently we won't configure NIG for MACs other than a primary ETH
+ * MAC and iSCSI L2 MAC.
+ *
+ * If this MAC is moving from one Queue to another, no need to change
+ * NIG configuration.
+ */
+ if (cmd != BNX2X_VLAN_MAC_MOVE) {
+ if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
+ bnx2x_set_mac_in_nig(bp, add, mac,
+ LLH_CAM_ISCSI_ETH_LINE);
+ else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
+ bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
+ }
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Setup a command header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
+ &rule_entry->mac.header);
+
+ DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
+ "Queue %d\n", (add ? "add" : "delete"),
+ BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
+
+ /* Set a MAC itself */
+ bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+ &rule_entry->mac.mac_mid,
+ &rule_entry->mac.mac_lsb, mac);
+
+ /* MOVE: Add a rule that will add this MAC to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+ elem->cmd_data.vlan_mac.target_obj,
+ true, CLASSIFY_RULE_OPCODE_MAC,
+ &rule_entry->mac.header);
+
+ /* Set a MAC itself */
+ bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+ &rule_entry->mac.mac_mid,
+ &rule_entry->mac.mac_lsb, mac);
+ }
+
+ /* Set the ramrod data header */
+ /* TODO: take this to the higher level in order to prevent multiple
+ writing */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
+ *
+ * @bp: device handle
+ * @o: queue
+ * @type:
+ * @cam_offset: offset in cam memory
+ * @hdr: pointer to a header to setup
+ *
+ * E1/E1H
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
+ struct mac_configuration_hdr *hdr)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+
+ hdr->length = 1;
+ hdr->offset = (u8)cam_offset;
+ hdr->client_id = 0xff;
+ hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
+}
+
+static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
+ u16 vlan_id, struct mac_configuration_entry *cfg_entry)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ u32 cl_bit_vec = (1 << r->cl_id);
+
+ cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
+ cfg_entry->pf_id = r->func_id;
+ cfg_entry->vlan_id = cpu_to_le16(vlan_id);
+
+ if (add) {
+ SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
+ SET_FLAG(cfg_entry->flags,
+ MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
+
+ /* Set a MAC in a ramrod data */
+ bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
+ &cfg_entry->middle_mac_addr,
+ &cfg_entry->lsb_mac_addr, mac);
+ } else
+ SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+}
+
+static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
+ u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
+{
+ struct mac_configuration_entry *cfg_entry = &config->config_table[0];
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
+ &config->hdr);
+ bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
+ cfg_entry);
+
+ DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
+ (add ? "setting" : "clearing"),
+ BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
+}
+
+/**
+ * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @elem: bnx2x_exeq_elem
+ * @rule_idx: rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *config =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ /*
+ * 57710 and 57711 do not support MOVE command,
+ * so it's either ADD or DEL
+ */
+ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ true : false;
+
+ /* Reset the ramrod data buffer */
+ memset(config, 0, sizeof(*config));
+
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
+ cam_offset, add,
+ elem->cmd_data.vlan_mac.u.mac.mac, 0,
+ ETH_VLAN_FILTER_ANY_VLAN, config);
+}
+
+static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ int cmd = elem->cmd_data.vlan_mac.cmd;
+ bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Set a rule header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
+ &rule_entry->vlan.header);
+
+ DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
+ vlan);
+
+ /* Set a VLAN itself */
+ rule_entry->vlan.vlan = cpu_to_le16(vlan);
+
+ /* MOVE: Add a rule that will add this MAC to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+ elem->cmd_data.vlan_mac.target_obj,
+ true, CLASSIFY_RULE_OPCODE_VLAN,
+ &rule_entry->vlan.header);
+
+ /* Set a VLAN itself */
+ rule_entry->vlan.vlan = cpu_to_le16(vlan);
+ }
+
+ /* Set the ramrod data header */
+ /* TODO: take this to the higher level in order to prevent multiple
+ writing */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct eth_classify_rules_ramrod_data *data =
+ (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+ int rule_cnt = rule_idx + 1;
+ union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+ int cmd = elem->cmd_data.vlan_mac.cmd;
+ bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+ u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+ u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+
+
+ /* Reset the ramrod data buffer for the first rule */
+ if (rule_idx == 0)
+ memset(data, 0, sizeof(*data));
+
+ /* Set a rule header */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set VLAN and MAC themselvs */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+
+ /* MOVE: Add a rule that will add this MAC to the target Queue */
+ if (cmd == BNX2X_VLAN_MAC_MOVE) {
+ rule_entry++;
+ rule_cnt++;
+
+ /* Setup ramrod data */
+ bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+ elem->cmd_data.vlan_mac.target_obj,
+ true, CLASSIFY_RULE_OPCODE_PAIR,
+ &rule_entry->pair.header);
+
+ /* Set a VLAN itself */
+ rule_entry->pair.vlan = cpu_to_le16(vlan);
+ bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+ &rule_entry->pair.mac_mid,
+ &rule_entry->pair.mac_lsb, mac);
+ }
+
+ /* Set the ramrod data header */
+ /* TODO: take this to the higher level in order to prevent multiple
+ writing */
+ bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+ rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @elem: bnx2x_exeq_elem
+ * @rule_idx: rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ int rule_idx, int cam_offset)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *config =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ /*
+ * 57710 and 57711 do not support MOVE command,
+ * so it's either ADD or DEL
+ */
+ bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ true : false;
+
+ /* Reset the ramrod data buffer */
+ memset(config, 0, sizeof(*config));
+
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+ cam_offset, add,
+ elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+ elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+ ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
+#define list_next_entry(pos, member) \
+ list_entry((pos)->member.next, typeof(*(pos)), member)
+
+/**
+ * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
+ *
+ * @bp: device handle
+ * @p: command parameters
+ * @ppos: pointer to the cooky
+ *
+ * reconfigure next MAC/VLAN/VLAN-MAC element from the
+ * previously configured elements list.
+ *
+ * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
+ * into an account
+ *
+ * pointer to the cooky - that should be given back in the next call to make
+ * function handle the next element. If *ppos is set to NULL it will restart the
+ * iterator. If returned *ppos == NULL this means that the last element has been
+ * handled.
+ *
+ */
+static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p,
+ struct bnx2x_vlan_mac_registry_elem **ppos)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+
+ /* If list is empty - there is nothing to do here */
+ if (list_empty(&o->head)) {
+ *ppos = NULL;
+ return 0;
+ }
+
+ /* make a step... */
+ if (*ppos == NULL)
+ *ppos = list_first_entry(&o->head,
+ struct bnx2x_vlan_mac_registry_elem,
+ link);
+ else
+ *ppos = list_next_entry(*ppos, link);
+
+ pos = *ppos;
+
+ /* If it's the last step - return NULL */
+ if (list_is_last(&pos->link, &o->head))
+ *ppos = NULL;
+
+ /* Prepare a 'user_req' */
+ memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
+
+ /* Set the command */
+ p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
+
+ /* Set vlan_mac_flags */
+ p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
+
+ /* Set a restore bit */
+ __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+ return bnx2x_config_vlan_mac(bp, p);
+}
+
+/*
+ * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
+ * pointer to an element with a specific criteria and NULL if such an element
+ * hasn't been found.
+ */
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+ struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem *pos;
+ struct bnx2x_vlan_mac_ramrod_data *data =
+ &elem->cmd_data.vlan_mac.u.vlan_mac;
+
+ /* Check pending for execution commands */
+ list_for_each_entry(pos, &o->exe_queue, link)
+ if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+ sizeof(*data)) &&
+ (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+ return pos;
+
+ return NULL;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
+ *
+ * @bp: device handle
+ * @qo: bnx2x_qable_obj
+ * @elem: bnx2x_exeq_elem
+ *
+ * Checks that the requested configuration can be added. If yes and if
+ * requested, consume CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ *
+ */
+static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ int rc;
+
+ /* Check the registry */
+ rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
+ if (rc) {
+ DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
+ "current registry state\n");
+ return rc;
+ }
+
+ /*
+ * Check if there is a pending ADD command for this
+ * MAC/VLAN/VLAN-MAC. Return an error if there is.
+ */
+ if (exeq->get(exeq, elem)) {
+ DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
+ return -EEXIST;
+ }
+
+ /*
+ * TODO: Check the pending MOVE from other objects where this
+ * object is a destination object.
+ */
+
+ /* Consume the credit if not requested not to */
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ o->get_credit(o)))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
+ *
+ * @bp: device handle
+ * @qo: quable object to check
+ * @elem: element that needs to be deleted
+ *
+ * Checks that the requested configuration can be deleted. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ struct bnx2x_exeq_elem query_elem;
+
+ /* If this classification can not be deleted (doesn't exist)
+ * - return a BNX2X_EXIST.
+ */
+ pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+ if (!pos) {
+ DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
+ "current registry state\n");
+ return -EEXIST;
+ }
+
+ /*
+ * Check if there are pending DEL or MOVE commands for this
+ * MAC/VLAN/VLAN-MAC. Return an error if so.
+ */
+ memcpy(&query_elem, elem, sizeof(query_elem));
+
+ /* Check for MOVE commands */
+ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
+ if (exeq->get(exeq, &query_elem)) {
+ BNX2X_ERR("There is a pending MOVE command already\n");
+ return -EINVAL;
+ }
+
+ /* Check for DEL commands */
+ if (exeq->get(exeq, elem)) {
+ DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
+ return -EEXIST;
+ }
+
+ /* Return the credit to the credit pool if not requested not to */
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ o->put_credit(o))) {
+ BNX2X_ERR("Failed to return a credit\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
+ *
+ * @bp: device handle
+ * @qo: quable object to check (source)
+ * @elem: element that needs to be moved
+ *
+ * Checks that the requested configuration can be moved. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
+ struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
+ struct bnx2x_exeq_elem query_elem;
+ struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
+ struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
+
+ /*
+ * Check if we can perform this operation based on the current registry
+ * state.
+ */
+ if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
+ DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
+ "current registry state\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Check if there is an already pending DEL or MOVE command for the
+ * source object or ADD command for a destination object. Return an
+ * error if so.
+ */
+ memcpy(&query_elem, elem, sizeof(query_elem));
+
+ /* Check DEL on source */
+ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+ if (src_exeq->get(src_exeq, &query_elem)) {
+ BNX2X_ERR("There is a pending DEL command on the source "
+ "queue already\n");
+ return -EINVAL;
+ }
+
+ /* Check MOVE on source */
+ if (src_exeq->get(src_exeq, elem)) {
+ DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
+ return -EEXIST;
+ }
+
+ /* Check ADD on destination */
+ query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+ if (dest_exeq->get(dest_exeq, &query_elem)) {
+ BNX2X_ERR("There is a pending ADD command on the "
+ "destination queue already\n");
+ return -EINVAL;
+ }
+
+ /* Consume the credit if not requested not to */
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ dest_o->get_credit(dest_o)))
+ return -EINVAL;
+
+ if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+ src_o->put_credit(src_o))) {
+ /* return the credit taken from dest... */
+ dest_o->put_credit(dest_o);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ switch (elem->cmd_data.vlan_mac.cmd) {
+ case BNX2X_VLAN_MAC_ADD:
+ return bnx2x_validate_vlan_mac_add(bp, qo, elem);
+ case BNX2X_VLAN_MAC_DEL:
+ return bnx2x_validate_vlan_mac_del(bp, qo, elem);
+ case BNX2X_VLAN_MAC_MOVE:
+ return bnx2x_validate_vlan_mac_move(bp, qo, elem);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ *
+ */
+static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int cnt = 5000, rc;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ while (cnt--) {
+ /* Wait for the current command to complete */
+ rc = raw->wait_comp(bp, raw);
+ if (rc)
+ return rc;
+
+ /* Wait until there are no pending commands */
+ if (!bnx2x_exe_queue_empty(exeq))
+ usleep_range(1000, 1000);
+ else
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
+ *
+ * @bp: device handle
+ * @o: bnx2x_vlan_mac_obj
+ * @cqe:
+ * @cont: if true schedule next execution chunk
+ *
+ */
+static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ union event_ring_elem *cqe,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ int rc;
+
+ /* Reset pending list */
+ bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+
+ /* Clear pending */
+ r->clear_pending(r);
+
+ /* If ramrod failed this is most likely a SW bug */
+ if (cqe->message.error)
+ return -EINVAL;
+
+ /* Run the next bulk of pending commands if requeted */
+ if (test_bit(RAMROD_CONT, ramrod_flags)) {
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ /* If there is more work to do return PENDING */
+ if (!bnx2x_exe_queue_empty(&o->exe_queue))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
+ *
+ * @bp: device handle
+ * @o: bnx2x_qable_obj
+ * @elem: bnx2x_exeq_elem
+ */
+static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ struct bnx2x_exeq_elem query, *pos;
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+
+ memcpy(&query, elem, sizeof(query));
+
+ switch (elem->cmd_data.vlan_mac.cmd) {
+ case BNX2X_VLAN_MAC_ADD:
+ query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+ break;
+ case BNX2X_VLAN_MAC_DEL:
+ query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+ break;
+ default:
+ /* Don't handle anything other than ADD or DEL */
+ return 0;
+ }
+
+ /* If we found the appropriate element - delete it */
+ pos = exeq->get(exeq, &query);
+ if (pos) {
+
+ /* Return the credit of the optimized command */
+ if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
+ if ((query.cmd_data.vlan_mac.cmd ==
+ BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
+ BNX2X_ERR("Failed to return the credit for the "
+ "optimized ADD command\n");
+ return -EINVAL;
+ } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
+ BNX2X_ERR("Failed to recover the credit from "
+ "the optimized DEL command\n");
+ return -EINVAL;
+ }
+ }
+
+ DP(BNX2X_MSG_SP, "Optimizing %s command\n",
+ (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+ "ADD" : "DEL");
+
+ list_del(&pos->link);
+ bnx2x_exe_queue_free_elem(bp, pos);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
+ *
+ * @bp: device handle
+ * @o:
+ * @elem:
+ * @restore:
+ * @re:
+ *
+ * prepare a registry element according to the current command request.
+ */
+static inline int bnx2x_vlan_mac_get_registry_elem(
+ struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem,
+ bool restore,
+ struct bnx2x_vlan_mac_registry_elem **re)
+{
+ int cmd = elem->cmd_data.vlan_mac.cmd;
+ struct bnx2x_vlan_mac_registry_elem *reg_elem;
+
+ /* Allocate a new registry element if needed. */
+ if (!restore &&
+ ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
+ reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
+ if (!reg_elem)
+ return -ENOMEM;
+
+ /* Get a new CAM offset */
+ if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
+ /*
+ * This shell never happen, because we have checked the
+ * CAM availiability in the 'validate'.
+ */
+ WARN_ON(1);
+ kfree(reg_elem);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
+
+ /* Set a VLAN-MAC data */
+ memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
+ sizeof(reg_elem->u));
+
+ /* Copy the flags (needed for DEL and RESTORE flows) */
+ reg_elem->vlan_mac_flags =
+ elem->cmd_data.vlan_mac.vlan_mac_flags;
+ } else /* DEL, RESTORE */
+ reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+
+ *re = reg_elem;
+ return 0;
+}
+
+/**
+ * bnx2x_execute_vlan_mac - execute vlan mac command
+ *
+ * @bp: device handle
+ * @qo:
+ * @exe_chunk:
+ * @ramrod_flags:
+ *
+ * go and send a ramrod!
+ */
+static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct list_head *exe_chunk,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_exeq_elem *elem;
+ struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+ int rc, idx = 0;
+ bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
+ bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
+ struct bnx2x_vlan_mac_registry_elem *reg_elem;
+ int cmd;
+
+ /*
+ * If DRIVER_ONLY execution is requested, cleanup a registry
+ * and exit. Otherwise send a ramrod to FW.
+ */
+ if (!drv_only) {
+ WARN_ON(r->check_pending(r));
+
+ /* Set pending */
+ r->set_pending(r);
+
+ /* Fill tha ramrod data */
+ list_for_each_entry(elem, exe_chunk, link) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+ /*
+ * We will add to the target object in MOVE command, so
+ * change the object for a CAM search.
+ */
+ if (cmd == BNX2X_VLAN_MAC_MOVE)
+ cam_obj = elem->cmd_data.vlan_mac.target_obj;
+ else
+ cam_obj = o;
+
+ rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
+ elem, restore,
+ &reg_elem);
+ if (rc)
+ goto error_exit;
+
+ WARN_ON(!reg_elem);
+
+ /* Push a new entry into the registry */
+ if (!restore &&
+ ((cmd == BNX2X_VLAN_MAC_ADD) ||
+ (cmd == BNX2X_VLAN_MAC_MOVE)))
+ list_add(&reg_elem->link, &cam_obj->head);
+
+ /* Configure a single command in a ramrod data buffer */
+ o->set_one_rule(bp, o, elem, idx,
+ reg_elem->cam_offset);
+
+ /* MOVE command consumes 2 entries in the ramrod data */
+ if (cmd == BNX2X_VLAN_MAC_MOVE)
+ idx += 2;
+ else
+ idx++;
+ }
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
+ U64_HI(r->rdata_mapping),
+ U64_LO(r->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ goto error_exit;
+ }
+
+ /* Now, when we are done with the ramrod - clean up the registry */
+ list_for_each_entry(elem, exe_chunk, link) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+ if ((cmd == BNX2X_VLAN_MAC_DEL) ||
+ (cmd == BNX2X_VLAN_MAC_MOVE)) {
+ reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+
+ WARN_ON(!reg_elem);
+
+ o->put_cam_offset(o, reg_elem->cam_offset);
+ list_del(&reg_elem->link);
+ kfree(reg_elem);
+ }
+ }
+
+ if (!drv_only)
+ return 1;
+ else
+ return 0;
+
+error_exit:
+ r->clear_pending(r);
+
+ /* Cleanup a registry in case of a failure */
+ list_for_each_entry(elem, exe_chunk, link) {
+ cmd = elem->cmd_data.vlan_mac.cmd;
+
+ if (cmd == BNX2X_VLAN_MAC_MOVE)
+ cam_obj = elem->cmd_data.vlan_mac.target_obj;
+ else
+ cam_obj = o;
+
+ /* Delete all newly added above entries */
+ if (!restore &&
+ ((cmd == BNX2X_VLAN_MAC_ADD) ||
+ (cmd == BNX2X_VLAN_MAC_MOVE))) {
+ reg_elem = o->check_del(cam_obj,
+ &elem->cmd_data.vlan_mac.u);
+ if (reg_elem) {
+ list_del(&reg_elem->link);
+ kfree(reg_elem);
+ }
+ }
+ }
+
+ return rc;
+}
+
+static inline int bnx2x_vlan_mac_push_new_cmd(
+ struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p)
+{
+ struct bnx2x_exeq_elem *elem;
+ struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+ bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+ /* Allocate the execution queue element */
+ elem = bnx2x_exe_queue_alloc_elem(bp);
+ if (!elem)
+ return -ENOMEM;
+
+ /* Set the command 'length' */
+ switch (p->user_req.cmd) {
+ case BNX2X_VLAN_MAC_MOVE:
+ elem->cmd_len = 2;
+ break;
+ default:
+ elem->cmd_len = 1;
+ }
+
+ /* Fill the object specific info */
+ memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
+
+ /* Try to add a new command to the pending list */
+ return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
+}
+
+/**
+ * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
+ *
+ * @bp: device handle
+ * @p:
+ *
+ */
+int bnx2x_config_vlan_mac(
+ struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p)
+{
+ int rc = 0;
+ struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+ unsigned long *ramrod_flags = &p->ramrod_flags;
+ bool cont = test_bit(RAMROD_CONT, ramrod_flags);
+ struct bnx2x_raw_obj *raw = &o->raw;
+
+ /*
+ * Add new elements to the execution list for commands that require it.
+ */
+ if (!cont) {
+ rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
+ if (rc)
+ return rc;
+ }
+
+ /*
+ * If nothing will be executed further in this iteration we want to
+ * return PENDING if there are pending commands
+ */
+ if (!bnx2x_exe_queue_empty(&o->exe_queue))
+ rc = 1;
+
+ if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
+ "clearing a pending bit.\n");
+ raw->clear_pending(raw);
+ }
+
+ /* Execute commands if required */
+ if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
+ test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ /*
+ * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
+ * then user want to wait until the last command is done.
+ */
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+ /*
+ * Wait maximum for the current exe_queue length iterations plus
+ * one (for the current pending command).
+ */
+ int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
+
+ while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
+ max_iterations--) {
+
+ /* Wait for the current command to complete */
+ rc = raw->wait_comp(bp, raw);
+ if (rc)
+ return rc;
+
+ /* Make a next step */
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
+ ramrod_flags);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+ }
+
+ return rc;
+}
+
+
+
+/**
+ * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
+ *
+ * @bp: device handle
+ * @o:
+ * @vlan_mac_flags:
+ * @ramrod_flags: execution flags to be used for this deletion
+ *
+ * if the last operation has completed successfully and there are no
+ * moreelements left, positive value if the last operation has completed
+ * successfully and there are more previously configured elements, negative
+ * value is current operation has failed.
+ */
+static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *vlan_mac_flags,
+ unsigned long *ramrod_flags)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos = NULL;
+ int rc = 0;
+ struct bnx2x_vlan_mac_ramrod_params p;
+ struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+ struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+
+ /* Clear pending commands first */
+
+ spin_lock_bh(&exeq->lock);
+
+ list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
+ if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
+ *vlan_mac_flags)
+ list_del(&exeq_pos->link);
+ }
+
+ spin_unlock_bh(&exeq->lock);
+
+ /* Prepare a command request */
+ memset(&p, 0, sizeof(p));
+ p.vlan_mac_obj = o;
+ p.ramrod_flags = *ramrod_flags;
+ p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+
+ /*
+ * Add all but the last VLAN-MAC to the execution queue without actually
+ * execution anything.
+ */
+ __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
+ __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
+ __clear_bit(RAMROD_CONT, &p.ramrod_flags);
+
+ list_for_each_entry(pos, &o->head, link) {
+ if (pos->vlan_mac_flags == *vlan_mac_flags) {
+ p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
+ memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
+ rc = bnx2x_config_vlan_mac(bp, &p);
+ if (rc < 0) {
+ BNX2X_ERR("Failed to add a new DEL command\n");
+ return rc;
+ }
+ }
+ }
+
+ p.ramrod_flags = *ramrod_flags;
+ __set_bit(RAMROD_CONT, &p.ramrod_flags);
+
+ return bnx2x_config_vlan_mac(bp, &p);
+}
+
+static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
+ u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type)
+{
+ raw->func_id = func_id;
+ raw->cid = cid;
+ raw->cl_id = cl_id;
+ raw->rdata = rdata;
+ raw->rdata_mapping = rdata_mapping;
+ raw->state = state;
+ raw->pstate = pstate;
+ raw->obj_type = type;
+ raw->check_pending = bnx2x_raw_check_pending;
+ raw->clear_pending = bnx2x_raw_clear_pending;
+ raw->set_pending = bnx2x_raw_set_pending;
+ raw->wait_comp = bnx2x_raw_wait;
+}
+
+static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ INIT_LIST_HEAD(&o->head);
+
+ o->macs_pool = macs_pool;
+ o->vlans_pool = vlans_pool;
+
+ o->delete_all = bnx2x_vlan_mac_del_all;
+ o->restore = bnx2x_vlan_mac_restore;
+ o->complete = bnx2x_complete_vlan_mac;
+ o->wait = bnx2x_wait_vlan_mac;
+
+ bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
+ state, pstate, type);
+}
+
+
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool)
+{
+ union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
+
+ bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type,
+ macs_pool, NULL);
+
+ /* CAM credit pool handling */
+ mac_obj->get_credit = bnx2x_get_credit_mac;
+ mac_obj->put_credit = bnx2x_put_credit_mac;
+ mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+ mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+ if (CHIP_IS_E1x(bp)) {
+ mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
+ mac_obj->check_del = bnx2x_check_mac_del;
+ mac_obj->check_add = bnx2x_check_mac_add;
+ mac_obj->check_move = bnx2x_check_move_always_err;
+ mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &mac_obj->exe_queue, 1, qable_obj,
+ bnx2x_validate_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_mac);
+ } else {
+ mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
+ mac_obj->check_del = bnx2x_check_mac_del;
+ mac_obj->check_add = bnx2x_check_mac_add;
+ mac_obj->check_move = bnx2x_check_move;
+ mac_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_mac);
+ }
+}
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
+
+ bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type, NULL,
+ vlans_pool);
+
+ vlan_obj->get_credit = bnx2x_get_credit_vlan;
+ vlan_obj->put_credit = bnx2x_put_credit_vlan;
+ vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
+ vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
+
+ if (CHIP_IS_E1x(bp)) {
+ BNX2X_ERR("Do not support chips others than E2 and newer\n");
+ BUG();
+ } else {
+ vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
+ vlan_obj->check_del = bnx2x_check_vlan_del;
+ vlan_obj->check_add = bnx2x_check_vlan_add;
+ vlan_obj->check_move = bnx2x_check_move;
+ vlan_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan);
+ }
+}
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool)
+{
+ union bnx2x_qable_obj *qable_obj =
+ (union bnx2x_qable_obj *)vlan_mac_obj;
+
+ bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type,
+ macs_pool, vlans_pool);
+
+ /* CAM pool handling */
+ vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+ vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+ /*
+ * CAM offset is relevant for 57710 and 57711 chips only which have a
+ * single CAM for both MACs and VLAN-MAC pairs. So the offset
+ * will be taken from MACs' pool object only.
+ */
+ vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+ vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+ if (CHIP_IS_E1(bp)) {
+ BNX2X_ERR("Do not support chips others than E2\n");
+ BUG();
+ } else if (CHIP_IS_E1H(bp)) {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move_always_err;
+ vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue, 1, qable_obj,
+ bnx2x_validate_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ } else {
+ vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
+ vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
+ vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
+ vlan_mac_obj->check_move = bnx2x_check_move;
+ vlan_mac_obj->ramrod_cmd =
+ RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+ /* Exe Queue */
+ bnx2x_exe_queue_init(bp,
+ &vlan_mac_obj->exe_queue,
+ CLASSIFY_RULES_COUNT,
+ qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_optimize_vlan_mac,
+ bnx2x_execute_vlan_mac,
+ bnx2x_exeq_get_vlan_mac);
+ }
+
+}
+
+/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+static inline void __storm_memset_mac_filters(struct bnx2x *bp,
+ struct tstorm_eth_mac_filter_config *mac_filters,
+ u16 pf_id)
+{
+ size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+
+ u32 addr = BAR_TSTRORM_INTMEM +
+ TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
+
+ __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
+}
+
+static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ /* update the bp MAC filter structure */
+ u32 mask = (1 << p->cl_id);
+
+ struct tstorm_eth_mac_filter_config *mac_filters =
+ (struct tstorm_eth_mac_filter_config *)p->rdata;
+
+ /* initial seeting is drop-all */
+ u8 drop_all_ucast = 1, drop_all_mcast = 1;
+ u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
+ u8 unmatched_unicast = 0;
+
+ /* In e1x there we only take into account rx acceot flag since tx switching
+ * isn't enabled. */
+ if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
+ /* accept matched ucast */
+ drop_all_ucast = 0;
+
+ if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
+ /* accept matched mcast */
+ drop_all_mcast = 0;
+
+ if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
+ /* accept all mcast */
+ drop_all_ucast = 0;
+ accp_all_ucast = 1;
+ }
+ if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
+ /* accept all mcast */
+ drop_all_mcast = 0;
+ accp_all_mcast = 1;
+ }
+ if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
+ /* accept (all) bcast */
+ accp_all_bcast = 1;
+ if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
+ /* accept unmatched unicasts */
+ unmatched_unicast = 1;
+
+ mac_filters->ucast_drop_all = drop_all_ucast ?
+ mac_filters->ucast_drop_all | mask :
+ mac_filters->ucast_drop_all & ~mask;
+
+ mac_filters->mcast_drop_all = drop_all_mcast ?
+ mac_filters->mcast_drop_all | mask :
+ mac_filters->mcast_drop_all & ~mask;
+
+ mac_filters->ucast_accept_all = accp_all_ucast ?
+ mac_filters->ucast_accept_all | mask :
+ mac_filters->ucast_accept_all & ~mask;
+
+ mac_filters->mcast_accept_all = accp_all_mcast ?
+ mac_filters->mcast_accept_all | mask :
+ mac_filters->mcast_accept_all & ~mask;
+
+ mac_filters->bcast_accept_all = accp_all_bcast ?
+ mac_filters->bcast_accept_all | mask :
+ mac_filters->bcast_accept_all & ~mask;
+
+ mac_filters->unmatched_unicast = unmatched_unicast ?
+ mac_filters->unmatched_unicast | mask :
+ mac_filters->unmatched_unicast & ~mask;
+
+ DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
+ "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+ mac_filters->ucast_drop_all,
+ mac_filters->mcast_drop_all,
+ mac_filters->ucast_accept_all,
+ mac_filters->mcast_accept_all,
+ mac_filters->bcast_accept_all);
+
+ /* write the MAC filter structure*/
+ __storm_memset_mac_filters(bp, mac_filters, p->func_id);
+
+ /* The operation is completed */
+ clear_bit(p->state, p->pstate);
+ smp_mb__after_clear_bit();
+
+ return 0;
+}
+
+/* Setup ramrod data */
+static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
+ struct eth_classify_header *hdr,
+ u8 rule_cnt)
+{
+ hdr->echo = cid;
+ hdr->rule_cnt = rule_cnt;
+}
+
+static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
+ unsigned long accept_flags,
+ struct eth_filter_rules_cmd *cmd,
+ bool clear_accept_all)
+{
+ u16 state;
+
+ /* start with 'drop-all' */
+ state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
+ ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+ if (accept_flags) {
+ if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ }
+
+ if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
+ state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ }
+ if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
+ state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+ }
+ if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
+ state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+ }
+
+ /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
+ if (clear_accept_all) {
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+ }
+
+ cmd->state = cpu_to_le16(state);
+
+}
+
+static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ struct eth_filter_rules_ramrod_data *data = p->rdata;
+ int rc;
+ u8 rule_idx = 0;
+
+ /* Reset the ramrod data buffer */
+ memset(data, 0, sizeof(*data));
+
+ /* Setup ramrod data */
+
+ /* Tx (internal switching) */
+ if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = p->cl_id;
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_TX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
+ &(data->rules[rule_idx++]), false);
+ }
+
+ /* Rx */
+ if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = p->cl_id;
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_RX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
+ &(data->rules[rule_idx++]), false);
+ }
+
+
+ /*
+ * If FCoE Queue configuration has been requested configure the Rx and
+ * internal switching modes for this queue in separate rules.
+ *
+ * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
+ * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
+ */
+ if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
+ /* Tx (internal switching) */
+ if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_TX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
+ &(data->rules[rule_idx++]),
+ true);
+ }
+
+ /* Rx */
+ if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+ data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+ data->rules[rule_idx].func_id = p->func_id;
+
+ data->rules[rule_idx].cmd_general_data =
+ ETH_FILTER_RULES_CMD_RX_CMD;
+
+ bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
+ &(data->rules[rule_idx++]),
+ true);
+ }
+ }
+
+ /*
+ * Set the ramrod header (most importantly - number of rules to
+ * configure).
+ */
+ bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
+
+ DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
+ "tx_accept_flags 0x%lx\n",
+ data->header.rule_cnt, p->rx_accept_flags,
+ p->tx_accept_flags);
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
+ U64_HI(p->rdata_mapping),
+ U64_LO(p->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ return rc;
+
+ /* Ramrod completion is pending */
+ return 1;
+}
+
+static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ return bnx2x_state_wait(bp, p->state, p->pstate);
+}
+
+static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ /* Do nothing */
+ return 0;
+}
+
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p)
+{
+ int rc;
+
+ /* Configure the new classification in the chip */
+ rc = p->rx_mode_obj->config_rx_mode(bp, p);
+ if (rc < 0)
+ return rc;
+
+ /* Wait for a ramrod completion if was requested */
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+ rc = p->rx_mode_obj->wait_comp(bp, p);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+ struct bnx2x_rx_mode_obj *o)
+{
+ if (CHIP_IS_E1x(bp)) {
+ o->wait_comp = bnx2x_empty_rx_mode_wait;
+ o->config_rx_mode = bnx2x_set_rx_mode_e1x;
+ } else {
+ o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
+ o->config_rx_mode = bnx2x_set_rx_mode_e2;
+ }
+}
+
+/********************* Multicast verbs: SET, CLEAR ****************************/
+static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
+{
+ return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+}
+
+struct bnx2x_mcast_mac_elem {
+ struct list_head link;
+ u8 mac[ETH_ALEN];
+ u8 pad[2]; /* For a natural alignment of the following buffer */
+};
+
+struct bnx2x_pending_mcast_cmd {
+ struct list_head link;
+ int type; /* BNX2X_MCAST_CMD_X */
+ union {
+ struct list_head macs_head;
+ u32 macs_num; /* Needed for DEL command */
+ int next_bin; /* Needed for RESTORE flow with aprox match */
+ } data;
+
+ bool done; /* set to true, when the command has been handled,
+ * practically used in 57712 handling only, where one pending
+ * command may be handled in a few operations. As long as for
+ * other chips every operation handling is completed in a
+ * single ramrod, there is no need to utilize this field.
+ */
+};
+
+static int bnx2x_mcast_wait(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o)
+{
+ if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
+ o->raw.wait_comp(bp, &o->raw))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ int total_sz;
+ struct bnx2x_pending_mcast_cmd *new_cmd;
+ struct bnx2x_mcast_mac_elem *cur_mac = NULL;
+ struct bnx2x_mcast_list_elem *pos;
+ int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
+ p->mcast_list_len : 0);
+
+ /* If the command is empty ("handle pending commands only"), break */
+ if (!p->mcast_list_len)
+ return 0;
+
+ total_sz = sizeof(*new_cmd) +
+ macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
+
+ /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
+ new_cmd = kzalloc(total_sz, GFP_ATOMIC);
+
+ if (!new_cmd)
+ return -ENOMEM;
+
+ DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
+ "macs_list_len=%d\n", cmd, macs_list_len);
+
+ INIT_LIST_HEAD(&new_cmd->data.macs_head);
+
+ new_cmd->type = cmd;
+ new_cmd->done = false;
+
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ cur_mac = (struct bnx2x_mcast_mac_elem *)
+ ((u8 *)new_cmd + sizeof(*new_cmd));
+
+ /* Push the MACs of the current command into the pendig command
+ * MACs list: FIFO
+ */
+ list_for_each_entry(pos, &p->mcast_list, link) {
+ memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
+ list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
+ cur_mac++;
+ }
+
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ new_cmd->data.macs_num = p->mcast_list_len;
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ new_cmd->data.next_bin = 0;
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Push the new pending command to the tail of the pending list: FIFO */
+ list_add_tail(&new_cmd->link, &o->pending_cmds_head);
+
+ o->set_sched(o);
+
+ return 1;
+}
+
+/**
+ * bnx2x_mcast_get_next_bin - get the next set bin (index)
+ *
+ * @o:
+ * @last: index to start looking from (including)
+ *
+ * returns the next found (set) bin or a negative value if none is found.
+ */
+static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
+{
+ int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
+
+ for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
+ if (o->registry.aprox_match.vec[i])
+ for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
+ int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
+ if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
+ vec, cur_bit)) {
+ return cur_bit;
+ }
+ }
+ inner_start = 0;
+ }
+
+ /* None found */
+ return -1;
+}
+
+/**
+ * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
+ *
+ * @o:
+ *
+ * returns the index of the found bin or -1 if none is found
+ */
+static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
+{
+ int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
+
+ if (cur_bit >= 0)
+ BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
+
+ return cur_bit;
+}
+
+static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ u8 rx_tx_flag = 0;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
+
+ if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+ (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+ rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
+
+ return rx_tx_flag;
+}
+
+static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, int idx,
+ union bnx2x_mcast_config_data *cfg_data,
+ int cmd)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(r->rdata);
+ u8 func_id = r->func_id;
+ u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
+ int bin;
+
+ if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+ rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
+
+ data->rules[idx].cmd_general_data |= rx_tx_add_flag;
+
+ /* Get a bin and update a bins' vector */
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
+ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ /* If there were no more bins to clear
+ * (bnx2x_mcast_clear_first_bin() returns -1) then we would
+ * clear any (0xff) bin.
+ * See bnx2x_mcast_validate_e2() for explanation when it may
+ * happen.
+ */
+ bin = bnx2x_mcast_clear_first_bin(o);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ bin = cfg_data->bin;
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return;
+ }
+
+ DP(BNX2X_MSG_SP, "%s bin %d\n",
+ ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
+ "Setting" : "Clearing"), bin);
+
+ data->rules[idx].bin_id = (u8)bin;
+ data->rules[idx].func_id = func_id;
+ data->rules[idx].engine_id = o->engine_id;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
+ *
+ * @bp: device handle
+ * @o:
+ * @start_bin: index in the registry to start from (including)
+ * @rdata_idx: index in the ramrod data to start from
+ *
+ * returns last handled bin index or -1 if all bins have been handled
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e2(
+ struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
+ int *rdata_idx)
+{
+ int cur_bin, cnt = *rdata_idx;
+ union bnx2x_mcast_config_data cfg_data = {0};
+
+ /* go through the registry and configure the bins from it */
+ for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
+ cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
+
+ cfg_data.bin = (u8)cur_bin;
+ o->set_one_rule(bp, o, cnt, &cfg_data,
+ BNX2X_MCAST_CMD_RESTORE);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
+
+ /* Break if we reached the maximum number
+ * of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *rdata_idx = cnt;
+
+ return cur_bin;
+}
+
+static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *line_idx)
+{
+ struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
+ int cnt = *line_idx;
+ union bnx2x_mcast_config_data cfg_data = {0};
+
+ list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
+ link) {
+
+ cfg_data.mac = &pmac_pos->mac[0];
+ o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+ " mcast MAC\n",
+ BNX2X_MAC_PRN_LIST(pmac_pos->mac));
+
+ list_del(&pmac_pos->link);
+
+ /* Break if we reached the maximum number
+ * of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *line_idx = cnt;
+
+ /* if no more MACs to configure - we are done */
+ if (list_empty(&cmd_pos->data.macs_head))
+ cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *line_idx)
+{
+ int cnt = *line_idx;
+
+ while (cmd_pos->data.macs_num) {
+ o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
+
+ cnt++;
+
+ cmd_pos->data.macs_num--;
+
+ DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
+ cmd_pos->data.macs_num, cnt);
+
+ /* Break if we reached the maximum
+ * number of rules.
+ */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ *line_idx = cnt;
+
+ /* If we cleared all bins - we are done */
+ if (!cmd_pos->data.macs_num)
+ cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+ int *line_idx)
+{
+ cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
+ line_idx);
+
+ if (cmd_pos->data.next_bin < 0)
+ /* If o->set_restore returned -1 we are done */
+ cmd_pos->done = true;
+ else
+ /* Start from the next bin next time */
+ cmd_pos->data.next_bin++;
+}
+
+static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p)
+{
+ struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
+ int cnt = 0;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+ list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
+ link) {
+ switch (cmd_pos->type) {
+ case BNX2X_MCAST_CMD_ADD:
+ bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
+ &cnt);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+ return -EINVAL;
+ }
+
+ /* If the command has been completed - remove it from the list
+ * and free the memory
+ */
+ if (cmd_pos->done) {
+ list_del(&cmd_pos->link);
+ kfree(cmd_pos);
+ }
+
+ /* Break if we reached the maximum number of rules */
+ if (cnt >= o->max_cmd_len)
+ break;
+ }
+
+ return cnt;
+}
+
+static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+ int *line_idx)
+{
+ struct bnx2x_mcast_list_elem *mlist_pos;
+ union bnx2x_mcast_config_data cfg_data = {0};
+ int cnt = *line_idx;
+
+ list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+ cfg_data.mac = mlist_pos->mac;
+ o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+ " mcast MAC\n",
+ BNX2X_MAC_PRN_LIST(mlist_pos->mac));
+ }
+
+ *line_idx = cnt;
+}
+
+static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+ int *line_idx)
+{
+ int cnt = *line_idx, i;
+
+ for (i = 0; i < p->mcast_list_len; i++) {
+ o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
+ p->mcast_list_len - i - 1);
+ }
+
+ *line_idx = cnt;
+}
+
+/**
+ * bnx2x_mcast_handle_current_cmd -
+ *
+ * @bp: device handle
+ * @p:
+ * @cmd:
+ * @start_cnt: first line in the ramrod data that may be used
+ *
+ * This function is called iff there is enough place for the current command in
+ * the ramrod data.
+ * Returns number of lines filled in the ramrod data in total.
+ */
+static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p, int cmd,
+ int start_cnt)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ int cnt = start_cnt;
+
+ DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ bnx2x_mcast_hdl_add(bp, o, p, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ bnx2x_mcast_hdl_del(bp, o, p, &cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ o->hdl_restore(bp, o, 0, &cnt);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* The current command has been handled */
+ p->mcast_list_len = 0;
+
+ return cnt;
+}
+
+static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ int reg_sz = o->get_registry_size(o);
+
+ switch (cmd) {
+ /* DEL command deletes all currently configured MACs */
+ case BNX2X_MCAST_CMD_DEL:
+ o->set_registry_size(o, 0);
+ /* Don't break */
+
+ /* RESTORE command will restore the entire multicast configuration */
+ case BNX2X_MCAST_CMD_RESTORE:
+ /* Here we set the approximate amount of work to do, which in
+ * fact may be only less as some MACs in postponed ADD
+ * command(s) scheduled before this command may fall into
+ * the same bin and the actual number of bins set in the
+ * registry would be less than we estimated here. See
+ * bnx2x_mcast_set_one_rule_e2() for further details.
+ */
+ p->mcast_list_len = reg_sz;
+ break;
+
+ case BNX2X_MCAST_CMD_ADD:
+ case BNX2X_MCAST_CMD_CONT:
+ /* Here we assume that all new MACs will fall into new bins.
+ * However we will correct the real registry size after we
+ * handle all pending commands.
+ */
+ o->set_registry_size(o, reg_sz + p->mcast_list_len);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+
+ }
+
+ /* Increase the total number of MACs pending to be configured */
+ o->total_pending_num += p->mcast_list_len;
+
+ return 0;
+}
+
+static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_bins)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+ o->set_registry_size(o, old_num_bins);
+ o->total_pending_num -= p->mcast_list_len;
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
+ *
+ * @bp: device handle
+ * @p:
+ * @len: number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ u8 len)
+{
+ struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(r->rdata);
+
+ data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->header.rule_cnt = len;
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
+ *
+ * @bp: device handle
+ * @o:
+ *
+ * Recalculate the actual number of set bins in the registry using Brian
+ * Kernighan's algorithm: it's execution complexity is as a number of set bins.
+ *
+ * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
+ */
+static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o)
+{
+ int i, cnt = 0;
+ u64 elem;
+
+ for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
+ elem = o->registry.aprox_match.vec[i];
+ for (; elem; cnt++)
+ elem &= elem - 1;
+ }
+
+ o->set_registry_size(o, cnt);
+
+ return 0;
+}
+
+static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct eth_multicast_rules_ramrod_data *data =
+ (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
+ int cnt = 0, rc;
+
+ /* Reset the ramrod data buffer */
+ memset(data, 0, sizeof(*data));
+
+ cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
+
+ /* If there are no more pending commands - clear SCHEDULED state */
+ if (list_empty(&o->pending_cmds_head))
+ o->clear_sched(o);
+
+ /* The below may be true iff there was enough room in ramrod
+ * data for all pending commands and for the current
+ * command. Otherwise the current command would have been added
+ * to the pending commands and p->mcast_list_len would have been
+ * zeroed.
+ */
+ if (p->mcast_list_len > 0)
+ cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
+
+ /* We've pulled out some MACs - update the total number of
+ * outstanding.
+ */
+ o->total_pending_num -= cnt;
+
+ /* send a ramrod */
+ WARN_ON(o->total_pending_num < 0);
+ WARN_ON(cnt > o->max_cmd_len);
+
+ bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
+
+ /* Update a registry size if there are no more pending operations.
+ *
+ * We don't want to change the value of the registry size if there are
+ * pending operations because we want it to always be equal to the
+ * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
+ * set bins after the last requested operation in order to properly
+ * evaluate the size of the next DEL/RESTORE operation.
+ *
+ * Note that we update the registry itself during command(s) handling
+ * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
+ * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
+ * with a limited amount of update commands (per MAC/bin) and we don't
+ * know in this scope what the actual state of bins configuration is
+ * going to be after this ramrod.
+ */
+ if (!o->total_pending_num)
+ bnx2x_mcast_refresh_registry_e2(bp, o);
+
+ /*
+ * If CLEAR_ONLY was requested - don't send a ramrod and clear
+ * RAMROD_PENDING status immediately.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ raw->clear_pending(raw);
+ return 0;
+ } else {
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+ raw->cid, U64_HI(raw->rdata_mapping),
+ U64_LO(raw->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ return rc;
+
+ /* Ramrod completion is pending */
+ return 1;
+ }
+}
+
+static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ /* Mark, that there is a work to do */
+ if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+ p->mcast_list_len = 1;
+
+ return 0;
+}
+
+static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_bins)
+{
+ /* Do nothing */
+}
+
+#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
+do { \
+ (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
+} while (0)
+
+static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o,
+ struct bnx2x_mcast_ramrod_params *p,
+ u32 *mc_filter)
+{
+ struct bnx2x_mcast_list_elem *mlist_pos;
+ int bit;
+
+ list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+ bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
+ BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+
+ DP(BNX2X_MSG_SP, "About to configure "
+ BNX2X_MAC_FMT" mcast MAC, bin %d\n",
+ BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
+
+ /* bookkeeping... */
+ BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
+ bit);
+ }
+}
+
+static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+ u32 *mc_filter)
+{
+ int bit;
+
+ for (bit = bnx2x_mcast_get_next_bin(o, 0);
+ bit >= 0;
+ bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
+ BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+ DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
+ }
+}
+
+/* On 57711 we write the multicast MACs' aproximate match
+ * table by directly into the TSTORM's internal RAM. So we don't
+ * really need to handle any tricks to make it work.
+ */
+static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ int i;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+
+ /* If CLEAR_ONLY has been requested - clear the registry
+ * and clear a pending bit.
+ */
+ if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ u32 mc_filter[MC_HASH_SIZE] = {0};
+
+ /* Set the multicast filter bits before writing it into
+ * the internal memory.
+ */
+ switch (cmd) {
+ case BNX2X_MCAST_CMD_ADD:
+ bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ DP(BNX2X_MSG_SP, "Invalidating multicast "
+ "MACs configuration\n");
+
+ /* clear the registry */
+ memset(o->registry.aprox_match.vec, 0,
+ sizeof(o->registry.aprox_match.vec));
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+ }
+
+ /* Set the mcast filter in the internal memory */
+ for (i = 0; i < MC_HASH_SIZE; i++)
+ REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
+ } else
+ /* clear the registry */
+ memset(o->registry.aprox_match.vec, 0,
+ sizeof(o->registry.aprox_match.vec));
+
+ /* We are done */
+ r->clear_pending(r);
+
+ return 0;
+}
+
+static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ int reg_sz = o->get_registry_size(o);
+
+ switch (cmd) {
+ /* DEL command deletes all currently configured MACs */
+ case BNX2X_MCAST_CMD_DEL:
+ o->set_registry_size(o, 0);
+ /* Don't break */
+
+ /* RESTORE command will restore the entire multicast configuration */
+ case BNX2X_MCAST_CMD_RESTORE:
+ p->mcast_list_len = reg_sz;
+ DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
+ cmd, p->mcast_list_len);
+ break;
+
+ case BNX2X_MCAST_CMD_ADD:
+ case BNX2X_MCAST_CMD_CONT:
+ /* Multicast MACs on 57710 are configured as unicast MACs and
+ * there is only a limited number of CAM entries for that
+ * matter.
+ */
+ if (p->mcast_list_len > o->max_cmd_len) {
+ BNX2X_ERR("Can't configure more than %d multicast MACs"
+ "on 57710\n", o->max_cmd_len);
+ return -EINVAL;
+ }
+ /* Every configured MAC should be cleared if DEL command is
+ * called. Only the last ADD command is relevant as long as
+ * every ADD commands overrides the previous configuration.
+ */
+ DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+ if (p->mcast_list_len > 0)
+ o->set_registry_size(o, p->mcast_list_len);
+
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd);
+ return -EINVAL;
+
+ }
+
+ /* We want to ensure that commands are executed one by one for 57710.
+ * Therefore each none-empty command will consume o->max_cmd_len.
+ */
+ if (p->mcast_list_len)
+ o->total_pending_num += o->max_cmd_len;
+
+ return 0;
+}
+
+static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_macs)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+ o->set_registry_size(o, old_num_macs);
+
+ /* If current command hasn't been handled yet and we are
+ * here means that it's meant to be dropped and we have to
+ * update the number of outstandling MACs accordingly.
+ */
+ if (p->mcast_list_len)
+ o->total_pending_num -= o->max_cmd_len;
+}
+
+static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, int idx,
+ union bnx2x_mcast_config_data *cfg_data,
+ int cmd)
+{
+ struct bnx2x_raw_obj *r = &o->raw;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(r->rdata);
+
+ /* copy mac */
+ if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
+ bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
+ &data->config_table[idx].middle_mac_addr,
+ &data->config_table[idx].lsb_mac_addr,
+ cfg_data->mac);
+
+ data->config_table[idx].vlan_id = 0;
+ data->config_table[idx].pf_id = r->func_id;
+ data->config_table[idx].clients_bit_vector =
+ cpu_to_le32(1 << r->cl_id);
+
+ SET_FLAG(data->config_table[idx].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
+ }
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
+ *
+ * @bp: device handle
+ * @p:
+ * @len: number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ u8 len)
+{
+ struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(r->rdata);
+
+ u8 offset = (CHIP_REV_IS_SLOW(bp) ?
+ BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
+ BNX2X_MAX_MULTICAST*(1 + r->func_id));
+
+ data->hdr.offset = offset;
+ data->hdr.client_id = 0xff;
+ data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->hdr.length = len;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
+ *
+ * @bp: device handle
+ * @o:
+ * @start_idx: index in the registry to start from
+ * @rdata_idx: index in the ramrod data to start from
+ *
+ * restore command for 57710 is like all other commands - always a stand alone
+ * command - start_idx and rdata_idx will always be 0. This function will always
+ * succeed.
+ * returns -1 to comply with 57712 variant.
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e1(
+ struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
+ int *rdata_idx)
+{
+ struct bnx2x_mcast_mac_elem *elem;
+ int i = 0;
+ union bnx2x_mcast_config_data cfg_data = {0};
+
+ /* go through the registry and configure the MACs from it. */
+ list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
+ cfg_data.mac = &elem->mac[0];
+ o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
+
+ i++;
+
+ DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+ " mcast MAC\n",
+ BNX2X_MAC_PRN_LIST(cfg_data.mac));
+ }
+
+ *rdata_idx = i;
+
+ return -1;
+}
+
+
+static inline int bnx2x_mcast_handle_pending_cmds_e1(
+ struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
+{
+ struct bnx2x_pending_mcast_cmd *cmd_pos;
+ struct bnx2x_mcast_mac_elem *pmac_pos;
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ union bnx2x_mcast_config_data cfg_data = {0};
+ int cnt = 0;
+
+
+ /* If nothing to be done - return */
+ if (list_empty(&o->pending_cmds_head))
+ return 0;
+
+ /* Handle the first command */
+ cmd_pos = list_first_entry(&o->pending_cmds_head,
+ struct bnx2x_pending_mcast_cmd, link);
+
+ switch (cmd_pos->type) {
+ case BNX2X_MCAST_CMD_ADD:
+ list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
+ cfg_data.mac = &pmac_pos->mac[0];
+ o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+ cnt++;
+
+ DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+ " mcast MAC\n",
+ BNX2X_MAC_PRN_LIST(pmac_pos->mac));
+ }
+ break;
+
+ case BNX2X_MCAST_CMD_DEL:
+ cnt = cmd_pos->data.macs_num;
+ DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
+ break;
+
+ case BNX2X_MCAST_CMD_RESTORE:
+ o->hdl_restore(bp, o, 0, &cnt);
+ break;
+
+ default:
+ BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+ return -EINVAL;
+ }
+
+ list_del(&cmd_pos->link);
+ kfree(cmd_pos);
+
+ return cnt;
+}
+
+/**
+ * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
+ *
+ * @fw_hi:
+ * @fw_mid:
+ * @fw_lo:
+ * @mac:
+ */
+static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+ __le16 *fw_lo, u8 *mac)
+{
+ mac[1] = ((u8 *)fw_hi)[0];
+ mac[0] = ((u8 *)fw_hi)[1];
+ mac[3] = ((u8 *)fw_mid)[0];
+ mac[2] = ((u8 *)fw_mid)[1];
+ mac[5] = ((u8 *)fw_lo)[0];
+ mac[4] = ((u8 *)fw_lo)[1];
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e1 -
+ *
+ * @bp: device handle
+ * @cnt:
+ *
+ * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
+ * and update the registry correspondingly: if ADD - allocate a memory and add
+ * the entries to the registry (list), if DELETE - clear the registry and free
+ * the memory.
+ */
+static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o)
+{
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct bnx2x_mcast_mac_elem *elem;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(raw->rdata);
+
+ /* If first entry contains a SET bit - the command was ADD,
+ * otherwise - DEL_ALL
+ */
+ if (GET_FLAG(data->config_table[0].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
+ int i, len = data->hdr.length;
+
+ /* Break if it was a RESTORE command */
+ if (!list_empty(&o->registry.exact_match.macs))
+ return 0;
+
+ elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+ if (!elem) {
+ BNX2X_ERR("Failed to allocate registry memory\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < len; i++, elem++) {
+ bnx2x_get_fw_mac_addr(
+ &data->config_table[i].msb_mac_addr,
+ &data->config_table[i].middle_mac_addr,
+ &data->config_table[i].lsb_mac_addr,
+ elem->mac);
+ DP(BNX2X_MSG_SP, "Adding registry entry for ["
+ BNX2X_MAC_FMT"]\n",
+ BNX2X_MAC_PRN_LIST(elem->mac));
+ list_add_tail(&elem->link,
+ &o->registry.exact_match.macs);
+ }
+ } else {
+ elem = list_first_entry(&o->registry.exact_match.macs,
+ struct bnx2x_mcast_mac_elem, link);
+ DP(BNX2X_MSG_SP, "Deleting a registry\n");
+ kfree(elem);
+ INIT_LIST_HEAD(&o->registry.exact_match.macs);
+ }
+
+ return 0;
+}
+
+static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct bnx2x_raw_obj *raw = &o->raw;
+ struct mac_configuration_cmd *data =
+ (struct mac_configuration_cmd *)(raw->rdata);
+ int cnt = 0, i, rc;
+
+ /* Reset the ramrod data buffer */
+ memset(data, 0, sizeof(*data));
+
+ /* First set all entries as invalid */
+ for (i = 0; i < o->max_cmd_len ; i++)
+ SET_FLAG(data->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+
+ /* Handle pending commands first */
+ cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
+
+ /* If there are no more pending commands - clear SCHEDULED state */
+ if (list_empty(&o->pending_cmds_head))
+ o->clear_sched(o);
+
+ /* The below may be true iff there were no pending commands */
+ if (!cnt)
+ cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
+
+ /* For 57710 every command has o->max_cmd_len length to ensure that
+ * commands are done one at a time.
+ */
+ o->total_pending_num -= o->max_cmd_len;
+
+ /* send a ramrod */
+
+ WARN_ON(cnt > o->max_cmd_len);
+
+ /* Set ramrod header (in particular, a number of entries to update) */
+ bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
+
+ /* update a registry: we need the registry contents to be always up
+ * to date in order to be able to execute a RESTORE opcode. Here
+ * we use the fact that for 57710 we sent one command at a time
+ * hence we may take the registry update out of the command handling
+ * and do it in a simpler way here.
+ */
+ rc = bnx2x_mcast_refresh_registry_e1(bp, o);
+ if (rc)
+ return rc;
+
+ /*
+ * If CLEAR_ONLY was requested - don't send a ramrod and clear
+ * RAMROD_PENDING status immediately.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ raw->clear_pending(raw);
+ return 0;
+ } else {
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
+ U64_HI(raw->rdata_mapping),
+ U64_LO(raw->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+ if (rc)
+ return rc;
+
+ /* Ramrod completion is pending */
+ return 1;
+ }
+
+}
+
+static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
+{
+ return o->registry.exact_match.num_macs_set;
+}
+
+static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
+{
+ return o->registry.aprox_match.num_bins_set;
+}
+
+static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
+ int n)
+{
+ o->registry.exact_match.num_macs_set = n;
+}
+
+static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
+ int n)
+{
+ o->registry.aprox_match.num_bins_set = n;
+}
+
+int bnx2x_config_mcast(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int cmd)
+{
+ struct bnx2x_mcast_obj *o = p->mcast_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+ int rc = 0, old_reg_size;
+
+ /* This is needed to recover number of currently configured mcast macs
+ * in case of failure.
+ */
+ old_reg_size = o->get_registry_size(o);
+
+ /* Do some calculations and checks */
+ rc = o->validate(bp, p, cmd);
+ if (rc)
+ return rc;
+
+ /* Return if there is no work to do */
+ if ((!p->mcast_list_len) && (!o->check_sched(o)))
+ return 0;
+
+ DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
+ "o->max_cmd_len=%d\n", o->total_pending_num,
+ p->mcast_list_len, o->max_cmd_len);
+
+ /* Enqueue the current command to the pending list if we can't complete
+ * it in the current iteration
+ */
+ if (r->check_pending(r) ||
+ ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
+ rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
+ if (rc < 0)
+ goto error_exit1;
+
+ /* As long as the current command is in a command list we
+ * don't need to handle it separately.
+ */
+ p->mcast_list_len = 0;
+ }
+
+ if (!r->check_pending(r)) {
+
+ /* Set 'pending' state */
+ r->set_pending(r);
+
+ /* Configure the new classification in the chip */
+ rc = o->config_mcast(bp, p, cmd);
+ if (rc < 0)
+ goto error_exit2;
+
+ /* Wait for a ramrod completion if was requested */
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+ rc = o->wait_comp(bp, o);
+ }
+
+ return rc;
+
+error_exit2:
+ r->clear_pending(r);
+
+error_exit1:
+ o->revert(bp, p, old_reg_size);
+
+ return rc;
+}
+
+static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(o->sched_state, o->raw.pstate);
+ smp_mb__after_clear_bit();
+}
+
+static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
+{
+ smp_mb__before_clear_bit();
+ set_bit(o->sched_state, o->raw.pstate);
+ smp_mb__after_clear_bit();
+}
+
+static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
+{
+ return !!test_bit(o->sched_state, o->raw.pstate);
+}
+
+static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
+{
+ return o->raw.check_pending(&o->raw) || o->check_sched(o);
+}
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *mcast_obj,
+ u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+ u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate, bnx2x_obj_type type)
+{
+ memset(mcast_obj, 0, sizeof(*mcast_obj));
+
+ bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
+ rdata, rdata_mapping, state, pstate, type);
+
+ mcast_obj->engine_id = engine_id;
+
+ INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
+
+ mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
+ mcast_obj->check_sched = bnx2x_mcast_check_sched;
+ mcast_obj->set_sched = bnx2x_mcast_set_sched;
+ mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
+
+ if (CHIP_IS_E1(bp)) {
+ mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
+ mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
+ mcast_obj->hdl_restore =
+ bnx2x_mcast_handle_restore_cmd_e1;
+ mcast_obj->check_pending = bnx2x_mcast_check_pending;
+
+ if (CHIP_REV_IS_SLOW(bp))
+ mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
+ else
+ mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
+
+ mcast_obj->wait_comp = bnx2x_mcast_wait;
+ mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
+ mcast_obj->validate = bnx2x_mcast_validate_e1;
+ mcast_obj->revert = bnx2x_mcast_revert_e1;
+ mcast_obj->get_registry_size =
+ bnx2x_mcast_get_registry_size_exact;
+ mcast_obj->set_registry_size =
+ bnx2x_mcast_set_registry_size_exact;
+
+ /* 57710 is the only chip that uses the exact match for mcast
+ * at the moment.
+ */
+ INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
+
+ } else if (CHIP_IS_E1H(bp)) {
+ mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
+ mcast_obj->enqueue_cmd = NULL;
+ mcast_obj->hdl_restore = NULL;
+ mcast_obj->check_pending = bnx2x_mcast_check_pending;
+
+ /* 57711 doesn't send a ramrod, so it has unlimited credit
+ * for one command.
+ */
+ mcast_obj->max_cmd_len = -1;
+ mcast_obj->wait_comp = bnx2x_mcast_wait;
+ mcast_obj->set_one_rule = NULL;
+ mcast_obj->validate = bnx2x_mcast_validate_e1h;
+ mcast_obj->revert = bnx2x_mcast_revert_e1h;
+ mcast_obj->get_registry_size =
+ bnx2x_mcast_get_registry_size_aprox;
+ mcast_obj->set_registry_size =
+ bnx2x_mcast_set_registry_size_aprox;
+ } else {
+ mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
+ mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
+ mcast_obj->hdl_restore =
+ bnx2x_mcast_handle_restore_cmd_e2;
+ mcast_obj->check_pending = bnx2x_mcast_check_pending;
+ /* TODO: There should be a proper HSI define for this number!!!
+ */
+ mcast_obj->max_cmd_len = 16;
+ mcast_obj->wait_comp = bnx2x_mcast_wait;
+ mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
+ mcast_obj->validate = bnx2x_mcast_validate_e2;
+ mcast_obj->revert = bnx2x_mcast_revert_e2;
+ mcast_obj->get_registry_size =
+ bnx2x_mcast_get_registry_size_aprox;
+ mcast_obj->set_registry_size =
+ bnx2x_mcast_set_registry_size_aprox;
+ }
+}
+
+/*************************** Credit handling **********************************/
+
+/**
+ * atomic_add_ifless - add if the result is less than a given value.
+ *
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...if (v + a) is less than u.
+ *
+ * returns true if (v + a) was less than u, and false otherwise.
+ *
+ */
+static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
+{
+ int c, old;
+
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c + a >= u))
+ return false;
+
+ old = atomic_cmpxchg((v), c, c + a);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+
+ return true;
+}
+
+/**
+ * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
+ *
+ * @v: pointer of type atomic_t
+ * @a: the amount to dec from v...
+ * @u: ...if (v - a) is more or equal than u.
+ *
+ * returns true if (v - a) was more or equal than u, and false
+ * otherwise.
+ */
+static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
+{
+ int c, old;
+
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c - a < u))
+ return false;
+
+ old = atomic_cmpxchg((v), c, c - a);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+
+ return true;
+}
+
+static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+ bool rc;
+
+ smp_mb();
+ rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
+ smp_mb();
+
+ return rc;
+}
+
+static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+ bool rc;
+
+ smp_mb();
+
+ /* Don't let to refill if credit + cnt > pool_sz */
+ rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
+
+ smp_mb();
+
+ return rc;
+}
+
+static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
+{
+ int cur_credit;
+
+ smp_mb();
+ cur_credit = atomic_read(&o->credit);
+
+ return cur_credit;
+}
+
+static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
+ int cnt)
+{
+ return true;
+}
+
+
+static bool bnx2x_credit_pool_get_entry(
+ struct bnx2x_credit_pool_obj *o,
+ int *offset)
+{
+ int idx, vec, i;
+
+ *offset = -1;
+
+ /* Find "internal cam-offset" then add to base for this object... */
+ for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
+
+ /* Skip the current vector if there are no free entries in it */
+ if (!o->pool_mirror[vec])
+ continue;
+
+ /* If we've got here we are going to find a free entry */
+ for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
+ i < BIT_VEC64_ELEM_SZ; idx++, i++)
+
+ if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
+ /* Got one!! */
+ BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
+ *offset = o->base_pool_offset + idx;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool bnx2x_credit_pool_put_entry(
+ struct bnx2x_credit_pool_obj *o,
+ int offset)
+{
+ if (offset < o->base_pool_offset)
+ return false;
+
+ offset -= o->base_pool_offset;
+
+ if (offset >= o->pool_sz)
+ return false;
+
+ /* Return the entry to the pool */
+ BIT_VEC64_SET_BIT(o->pool_mirror, offset);
+
+ return true;
+}
+
+static bool bnx2x_credit_pool_put_entry_always_true(
+ struct bnx2x_credit_pool_obj *o,
+ int offset)
+{
+ return true;
+}
+
+static bool bnx2x_credit_pool_get_entry_always_true(
+ struct bnx2x_credit_pool_obj *o,
+ int *offset)
+{
+ *offset = -1;
+ return true;
+}
+/**
+ * bnx2x_init_credit_pool - initialize credit pool internals.
+ *
+ * @p:
+ * @base: Base entry in the CAM to use.
+ * @credit: pool size.
+ *
+ * If base is negative no CAM entries handling will be performed.
+ * If credit is negative pool operations will always succeed (unlimited pool).
+ *
+ */
+static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+ int base, int credit)
+{
+ /* Zero the object first */
+ memset(p, 0, sizeof(*p));
+
+ /* Set the table to all 1s */
+ memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
+
+ /* Init a pool as full */
+ atomic_set(&p->credit, credit);
+
+ /* The total poll size */
+ p->pool_sz = credit;
+
+ p->base_pool_offset = base;
+
+ /* Commit the change */
+ smp_mb();
+
+ p->check = bnx2x_credit_pool_check;
+
+ /* if pool credit is negative - disable the checks */
+ if (credit >= 0) {
+ p->put = bnx2x_credit_pool_put;
+ p->get = bnx2x_credit_pool_get;
+ p->put_entry = bnx2x_credit_pool_put_entry;
+ p->get_entry = bnx2x_credit_pool_get_entry;
+ } else {
+ p->put = bnx2x_credit_pool_always_true;
+ p->get = bnx2x_credit_pool_always_true;
+ p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+ p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+ }
+
+ /* If base is negative - disable entries handling */
+ if (base < 0) {
+ p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+ p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+ }
+}
+
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p, u8 func_id,
+ u8 func_num)
+{
+/* TODO: this will be defined in consts as well... */
+#define BNX2X_CAM_SIZE_EMUL 5
+
+ int cam_sz;
+
+ if (CHIP_IS_E1(bp)) {
+ /* In E1, Multicast is saved in cam... */
+ if (!CHIP_REV_IS_SLOW(bp))
+ cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
+ else
+ cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
+
+ bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+
+ } else if (CHIP_IS_E1H(bp)) {
+ /* CAM credit is equaly divided between all active functions
+ * on the PORT!.
+ */
+ if ((func_num > 0)) {
+ if (!CHIP_REV_IS_SLOW(bp))
+ cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
+ else
+ cam_sz = BNX2X_CAM_SIZE_EMUL;
+ bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+ } else {
+ /* this should never happen! Block MAC operations. */
+ bnx2x_init_credit_pool(p, 0, 0);
+ }
+
+ } else {
+
+ /*
+ * CAM credit is equaly divided between all active functions
+ * on the PATH.
+ */
+ if ((func_num > 0)) {
+ if (!CHIP_REV_IS_SLOW(bp))
+ cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+ else
+ cam_sz = BNX2X_CAM_SIZE_EMUL;
+
+ /*
+ * No need for CAM entries handling for 57712 and
+ * newer.
+ */
+ bnx2x_init_credit_pool(p, -1, cam_sz);
+ } else {
+ /* this should never happen! Block MAC operations. */
+ bnx2x_init_credit_pool(p, 0, 0);
+ }
+
+ }
+}
+
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p,
+ u8 func_id,
+ u8 func_num)
+{
+ if (CHIP_IS_E1x(bp)) {
+ /*
+ * There is no VLAN credit in HW on 57710 and 57711 only
+ * MAC / MAC-VLAN can be set
+ */
+ bnx2x_init_credit_pool(p, 0, -1);
+ } else {
+ /*
+ * CAM credit is equaly divided between all active functions
+ * on the PATH.
+ */
+ if (func_num > 0) {
+ int credit = MAX_VLAN_CREDIT_E2 / func_num;
+ bnx2x_init_credit_pool(p, func_id * credit, credit);
+ } else
+ /* this should never happen! Block VLAN operations. */
+ bnx2x_init_credit_pool(p, 0, 0);
+ }
+}
+
+/****************** RSS Configuration ******************/
+/**
+ * bnx2x_debug_print_ind_table - prints the indirection table configuration.
+ *
+ * @bp: driver hanlde
+ * @p: pointer to rss configuration
+ *
+ * Prints it when NETIF_MSG_IFUP debug level is configured.
+ */
+static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p)
+{
+ int i;
+
+ DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
+ DP(BNX2X_MSG_SP, "0x0000: ");
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+ DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
+
+ /* Print 4 bytes in a line */
+ if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
+ (((i + 1) & 0x3) == 0)) {
+ DP_CONT(BNX2X_MSG_SP, "\n");
+ DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
+ }
+ }
+
+ DP_CONT(BNX2X_MSG_SP, "\n");
+}
+
+/**
+ * bnx2x_setup_rss - configure RSS
+ *
+ * @bp: device handle
+ * @p: rss configuration
+ *
+ * sends on UPDATE ramrod for that matter.
+ */
+static int bnx2x_setup_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p)
+{
+ struct bnx2x_rss_config_obj *o = p->rss_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+ struct eth_rss_update_ramrod_data *data =
+ (struct eth_rss_update_ramrod_data *)(r->rdata);
+ u8 rss_mode = 0;
+ int rc;
+
+ memset(data, 0, sizeof(*data));
+
+ DP(BNX2X_MSG_SP, "Configuring RSS\n");
+
+ /* Set an echo field */
+ data->echo = (r->cid & BNX2X_SWCID_MASK) |
+ (r->state << BNX2X_SWCID_SHIFT);
+
+ /* RSS mode */
+ if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_DISABLED;
+ else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_REGULAR;
+ else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_VLAN_PRI;
+ else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_E1HOV_PRI;
+ else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
+ rss_mode = ETH_RSS_MODE_IP_DSCP;
+
+ data->rss_mode = rss_mode;
+
+ DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
+
+ /* RSS capabilities */
+ if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
+
+ if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+
+ /* Hashing mask */
+ data->rss_result_mask = p->rss_result_mask;
+
+ /* RSS engine ID */
+ data->rss_engine_id = o->engine_id;
+
+ DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
+
+ /* Indirection table */
+ memcpy(data->indirection_table, p->ind_table,
+ T_ETH_INDIRECTION_TABLE_SIZE);
+
+ /* Remember the last configuration */
+ memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+
+ /* Print the indirection table */
+ if (netif_msg_ifup(bp))
+ bnx2x_debug_print_ind_table(bp, p);
+
+ /* RSS keys */
+ if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
+ memcpy(&data->rss_key[0], &p->rss_key[0],
+ sizeof(data->rss_key));
+ data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
+ }
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ /* Send a ramrod */
+ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
+ U64_HI(r->rdata_mapping),
+ U64_LO(r->rdata_mapping),
+ ETH_CONNECTION_TYPE);
+
+ if (rc < 0)
+ return rc;
+
+ return 1;
+}
+
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+ u8 *ind_table)
+{
+ memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
+}
+
+int bnx2x_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p)
+{
+ int rc;
+ struct bnx2x_rss_config_obj *o = p->rss_obj;
+ struct bnx2x_raw_obj *r = &o->raw;
+
+ /* Do nothing if only driver cleanup was requested */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+ return 0;
+
+ r->set_pending(r);
+
+ rc = o->config_rss(bp, p);
+ if (rc < 0) {
+ r->clear_pending(r);
+ return rc;
+ }
+
+ if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+ rc = r->wait_comp(bp, r);
+
+ return rc;
+}
+
+
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+ struct bnx2x_rss_config_obj *rss_obj,
+ u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+ void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ bnx2x_obj_type type)
+{
+ bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
+ rdata_mapping, state, pstate, type);
+
+ rss_obj->engine_id = engine_id;
+ rss_obj->config_rss = bnx2x_setup_rss;
+}
+
+/********************** Queue state object ***********************************/
+
+/**
+ * bnx2x_queue_state_change - perform Queue state change transition
+ *
+ * @bp: device handle
+ * @params: parameters to perform the transition
+ *
+ * returns 0 in case of successfully completed transition, negative error
+ * code in case of failure, positive (EBUSY) value if there is a completion
+ * to that is still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous commands).
+ *
+ */
+int bnx2x_queue_state_change(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ int rc, pending_bit;
+ unsigned long *pending = &o->pending;
+
+ /* Check that the requested transition is legal */
+ if (o->check_transition(bp, o, params))
+ return -EINVAL;
+
+ /* Set "pending" bit */
+ pending_bit = o->set_pending(o, params);
+
+ /* Don't send a command if only driver cleanup was requested */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
+ o->complete_cmd(bp, o, pending_bit);
+ else {
+ /* Send a ramrod */
+ rc = o->send_cmd(bp, params);
+ if (rc) {
+ o->next_state = BNX2X_Q_STATE_MAX;
+ clear_bit(pending_bit, pending);
+ smp_mb__after_clear_bit();
+ return rc;
+ }
+
+ if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+ rc = o->wait_comp(bp, o, pending_bit);
+ if (rc)
+ return rc;
+
+ return 0;
+ }
+ }
+
+ return !!test_bit(pending_bit, pending);
+}
+
+
+static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_state_params *params)
+{
+ enum bnx2x_queue_cmd cmd = params->cmd, bit;
+
+ /* ACTIVATE and DEACTIVATE commands are implemented on top of
+ * UPDATE command.
+ */
+ if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
+ (cmd == BNX2X_Q_CMD_DEACTIVATE))
+ bit = BNX2X_Q_CMD_UPDATE;
+ else
+ bit = cmd;
+
+ set_bit(bit, &obj->pending);
+ return bit;
+}
+
+static int bnx2x_queue_wait_comp(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd cmd)
+{
+ return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_queue_comp_cmd - complete the state change command.
+ *
+ * @bp: device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd cmd)
+{
+ unsigned long cur_pending = o->pending;
+
+ if (!test_and_clear_bit(cmd, &cur_pending)) {
+ BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
+ "pending 0x%lx, next_state %d\n", cmd,
+ o->cids[BNX2X_PRIMARY_CID_INDEX],
+ o->state, cur_pending, o->next_state);
+ return -EINVAL;
+ }
+
+ if (o->next_tx_only >= o->max_cos)
+ /* >= becuase tx only must always be smaller than cos since the
+ * primary connection suports COS 0
+ */
+ BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
+ o->next_tx_only, o->max_cos);
+
+ DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
+ "setting state to %d\n", cmd,
+ o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
+
+ if (o->next_tx_only) /* print num tx-only if any exist */
+ DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d",
+ o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
+
+ o->state = o->next_state;
+ o->num_tx_only = o->next_tx_only;
+ o->next_state = BNX2X_Q_STATE_MAX;
+
+ /* It's important that o->state and o->next_state are
+ * updated before o->pending.
+ */
+ wmb();
+
+ clear_bit(cmd, &o->pending);
+ smp_mb__after_clear_bit();
+
+ return 0;
+}
+
+static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *cmd_params,
+ struct client_init_ramrod_data *data)
+{
+ struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
+
+ /* Rx data */
+
+ /* IPv6 TPA supported for E2 and above only */
+ data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
+ CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
+}
+
+static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_general_setup_params *params,
+ struct client_init_general_data *gen_data,
+ unsigned long *flags)
+{
+ gen_data->client_id = o->cl_id;
+
+ if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
+ gen_data->statistics_counter_id =
+ params->stat_id;
+ gen_data->statistics_en_flg = 1;
+ gen_data->statistics_zero_flg =
+ test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
+ } else
+ gen_data->statistics_counter_id =
+ DISABLE_STATISTIC_COUNTER_ID_VALUE;
+
+ gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
+ gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
+ gen_data->sp_client_id = params->spcl_id;
+ gen_data->mtu = cpu_to_le16(params->mtu);
+ gen_data->func_id = o->func_id;
+
+
+ gen_data->cos = params->cos;
+
+ gen_data->traffic_type =
+ test_bit(BNX2X_Q_FLG_FCOE, flags) ?
+ LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
+
+ DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d",
+ gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
+}
+
+static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_txq_setup_params *params,
+ struct client_init_tx_data *tx_data,
+ unsigned long *flags)
+{
+ tx_data->enforce_security_flg =
+ test_bit(BNX2X_Q_FLG_TX_SEC, flags);
+ tx_data->default_vlan =
+ cpu_to_le16(params->default_vlan);
+ tx_data->default_vlan_flg =
+ test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
+ tx_data->tx_switching_flg =
+ test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
+ tx_data->anti_spoofing_flg =
+ test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
+ tx_data->tx_status_block_id = params->fw_sb_id;
+ tx_data->tx_sb_index_number = params->sb_cq_index;
+ tx_data->tss_leading_client_id = params->tss_leading_cl_id;
+
+ tx_data->tx_bd_page_base.lo =
+ cpu_to_le32(U64_LO(params->dscr_map));
+ tx_data->tx_bd_page_base.hi =
+ cpu_to_le32(U64_HI(params->dscr_map));
+
+ /* Don't configure any Tx switching mode during queue SETUP */
+ tx_data->state = 0;
+}
+
+static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
+ struct rxq_pause_params *params,
+ struct client_init_rx_data *rx_data)
+{
+ /* flow control data */
+ rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
+ rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
+ rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
+ rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
+ rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
+ rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
+ rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
+}
+
+static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_rxq_setup_params *params,
+ struct client_init_rx_data *rx_data,
+ unsigned long *flags)
+{
+ /* Rx data */
+ rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
+ CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
+ rx_data->vmqueue_mode_en_flg = 0;
+
+ rx_data->cache_line_alignment_log_size =
+ params->cache_line_log;
+ rx_data->enable_dynamic_hc =
+ test_bit(BNX2X_Q_FLG_DHC, flags);
+ rx_data->max_sges_for_packet = params->max_sges_pkt;
+ rx_data->client_qzone_id = params->cl_qzone_id;
+ rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
+
+ /* Always start in DROP_ALL mode */
+ rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
+ CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
+
+ /* We don't set drop flags */
+ rx_data->drop_ip_cs_err_flg = 0;
+ rx_data->drop_tcp_cs_err_flg = 0;
+ rx_data->drop_ttl0_flg = 0;
+ rx_data->drop_udp_cs_err_flg = 0;
+ rx_data->inner_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_FLG_VLAN, flags);
+ rx_data->outer_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_FLG_OV, flags);
+ rx_data->status_block_id = params->fw_sb_id;
+ rx_data->rx_sb_index_number = params->sb_cq_index;
+ rx_data->max_tpa_queues = params->max_tpa_queues;
+ rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
+ rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
+ rx_data->bd_page_base.lo =
+ cpu_to_le32(U64_LO(params->dscr_map));
+ rx_data->bd_page_base.hi =
+ cpu_to_le32(U64_HI(params->dscr_map));
+ rx_data->sge_page_base.lo =
+ cpu_to_le32(U64_LO(params->sge_map));
+ rx_data->sge_page_base.hi =
+ cpu_to_le32(U64_HI(params->sge_map));
+ rx_data->cqe_page_base.lo =
+ cpu_to_le32(U64_LO(params->rcq_map));
+ rx_data->cqe_page_base.hi =
+ cpu_to_le32(U64_HI(params->rcq_map));
+ rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
+
+ if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
+ rx_data->approx_mcast_engine_id = o->func_id;
+ rx_data->is_approx_mcast = 1;
+ }
+
+ rx_data->rss_engine_id = params->rss_engine_id;
+
+ /* silent vlan removal */
+ rx_data->silent_vlan_removal_flg =
+ test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
+ rx_data->silent_vlan_value =
+ cpu_to_le16(params->silent_removal_value);
+ rx_data->silent_vlan_mask =
+ cpu_to_le16(params->silent_removal_mask);
+
+}
+
+/* initialize the general, tx and rx parts of a queue object */
+static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *cmd_params,
+ struct client_init_ramrod_data *data)
+{
+ bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
+ &cmd_params->params.setup.gen_params,
+ &data->general,
+ &cmd_params->params.setup.flags);
+
+ bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
+ &cmd_params->params.setup.txq_params,
+ &data->tx,
+ &cmd_params->params.setup.flags);
+
+ bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
+ &cmd_params->params.setup.rxq_params,
+ &data->rx,
+ &cmd_params->params.setup.flags);
+
+ bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
+ &cmd_params->params.setup.pause_params,
+ &data->rx);
+}
+
+/* initialize the general and tx parts of a tx-only queue object */
+static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *cmd_params,
+ struct tx_queue_init_ramrod_data *data)
+{
+ bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
+ &cmd_params->params.tx_only.gen_params,
+ &data->general,
+ &cmd_params->params.tx_only.flags);
+
+ bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
+ &cmd_params->params.tx_only.txq_params,
+ &data->tx,
+ &cmd_params->params.tx_only.flags);
+
+ DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0],
+ data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
+}
+
+/**
+ * bnx2x_q_init - init HW/FW queue
+ *
+ * @bp: device handle
+ * @params:
+ *
+ * HW/FW initial Queue configuration:
+ * - HC: Rx and Tx
+ * - CDU context validation
+ *
+ */
+static inline int bnx2x_q_init(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct bnx2x_queue_init_params *init = &params->params.init;
+ u16 hc_usec;
+ u8 cos;
+
+ /* Tx HC configuration */
+ if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
+ test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
+ hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
+
+ bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
+ init->tx.sb_cq_index,
+ !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
+ hc_usec);
+ }
+
+ /* Rx HC configuration */
+ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
+ test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
+ hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
+
+ bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
+ init->rx.sb_cq_index,
+ !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
+ hc_usec);
+ }
+
+ /* Set CDU context validation values */
+ for (cos = 0; cos < o->max_cos; cos++) {
+ DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d",
+ o->cids[cos], cos);
+ DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]);
+ bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
+ }
+
+ /* As no ramrod is sent, complete the command immediately */
+ o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
+
+ mmiowb();
+ smp_mb();
+
+ return 0;
+}
+
+static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct client_init_ramrod_data *rdata =
+ (struct client_init_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct client_init_ramrod_data *rdata =
+ (struct client_init_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+ bnx2x_q_fill_setup_data_e2(bp, params, rdata);
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct tx_queue_init_ramrod_data *rdata =
+ (struct tx_queue_init_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
+ struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+ &params->params.tx_only;
+ u8 cid_index = tx_only_params->cid_index;
+
+
+ if (cid_index >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_index);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d",
+ tx_only_params->gen_params.cos,
+ tx_only_params->gen_params.spcl_id);
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_setup_tx_only(bp, params, rdata);
+
+ DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
+ "sp-client id %d, cos %d",
+ o->cids[cid_index],
+ rdata->general.client_id,
+ rdata->general.sp_client_id, rdata->general.cos);
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static void bnx2x_q_fill_update_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_update_params *params,
+ struct client_update_ramrod_data *data)
+{
+ /* Client ID of the client to update */
+ data->client_id = obj->cl_id;
+
+ /* Function ID of the client to update */
+ data->func_id = obj->func_id;
+
+ /* Default VLAN value */
+ data->default_vlan = cpu_to_le16(params->def_vlan);
+
+ /* Inner VLAN stripping */
+ data->inner_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
+ data->inner_vlan_removal_change_flg =
+ test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+ &params->update_flags);
+
+ /* Outer VLAN sripping */
+ data->outer_vlan_removal_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
+ data->outer_vlan_removal_change_flg =
+ test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+ &params->update_flags);
+
+ /* Drop packets that have source MAC that doesn't belong to this
+ * Queue.
+ */
+ data->anti_spoofing_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
+ data->anti_spoofing_change_flg =
+ test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
+
+ /* Activate/Deactivate */
+ data->activate_flg =
+ test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
+ data->activate_change_flg =
+ test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
+
+ /* Enable default VLAN */
+ data->default_vlan_enable_flg =
+ test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
+ data->default_vlan_change_flg =
+ test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &params->update_flags);
+
+ /* silent vlan removal */
+ data->silent_vlan_change_flg =
+ test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &params->update_flags);
+ data->silent_vlan_removal_flg =
+ test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
+ data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
+ data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+}
+
+static inline int bnx2x_q_send_update(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct client_update_ramrod_data *rdata =
+ (struct client_update_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_queue_update_params *update_params =
+ &params->params.update;
+ u8 cid_index = update_params->cid_index;
+
+ if (cid_index >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_index);
+ return -EINVAL;
+ }
+
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_update_data(bp, o, update_params, rdata);
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+ o->cids[cid_index], U64_HI(data_mapping),
+ U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+/**
+ * bnx2x_q_send_deactivate - send DEACTIVATE command
+ *
+ * @bp: device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_update_params *update = &params->params.update;
+
+ memset(update, 0, sizeof(*update));
+
+ __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+ return bnx2x_q_send_update(bp, params);
+}
+
+/**
+ * bnx2x_q_send_activate - send ACTIVATE command
+ *
+ * @bp: device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_activate(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_update_params *update = &params->params.update;
+
+ memset(update, 0, sizeof(*update));
+
+ __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+ return bnx2x_q_send_update(bp, params);
+}
+
+static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ /* TODO: Not implemented yet. */
+ return -1;
+}
+
+static inline int bnx2x_q_send_halt(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
+ o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
+ ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ u8 cid_idx = params->params.cfc_del.cid_index;
+
+ if (cid_idx >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_idx);
+ return -EINVAL;
+ }
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
+ o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ u8 cid_index = params->params.terminate.cid_index;
+
+ if (cid_index >= o->max_cos) {
+ BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+ o->cl_id, cid_index);
+ return -EINVAL;
+ }
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
+ o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_empty(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
+ o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
+ ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_Q_CMD_INIT:
+ return bnx2x_q_init(bp, params);
+ case BNX2X_Q_CMD_SETUP_TX_ONLY:
+ return bnx2x_q_send_setup_tx_only(bp, params);
+ case BNX2X_Q_CMD_DEACTIVATE:
+ return bnx2x_q_send_deactivate(bp, params);
+ case BNX2X_Q_CMD_ACTIVATE:
+ return bnx2x_q_send_activate(bp, params);
+ case BNX2X_Q_CMD_UPDATE:
+ return bnx2x_q_send_update(bp, params);
+ case BNX2X_Q_CMD_UPDATE_TPA:
+ return bnx2x_q_send_update_tpa(bp, params);
+ case BNX2X_Q_CMD_HALT:
+ return bnx2x_q_send_halt(bp, params);
+ case BNX2X_Q_CMD_CFC_DEL:
+ return bnx2x_q_send_cfc_del(bp, params);
+ case BNX2X_Q_CMD_TERMINATE:
+ return bnx2x_q_send_terminate(bp, params);
+ case BNX2X_Q_CMD_EMPTY:
+ return bnx2x_q_send_empty(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_Q_CMD_SETUP:
+ return bnx2x_q_send_setup_e1x(bp, params);
+ case BNX2X_Q_CMD_INIT:
+ case BNX2X_Q_CMD_SETUP_TX_ONLY:
+ case BNX2X_Q_CMD_DEACTIVATE:
+ case BNX2X_Q_CMD_ACTIVATE:
+ case BNX2X_Q_CMD_UPDATE:
+ case BNX2X_Q_CMD_UPDATE_TPA:
+ case BNX2X_Q_CMD_HALT:
+ case BNX2X_Q_CMD_CFC_DEL:
+ case BNX2X_Q_CMD_TERMINATE:
+ case BNX2X_Q_CMD_EMPTY:
+ return bnx2x_queue_send_cmd_cmn(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_Q_CMD_SETUP:
+ return bnx2x_q_send_setup_e2(bp, params);
+ case BNX2X_Q_CMD_INIT:
+ case BNX2X_Q_CMD_SETUP_TX_ONLY:
+ case BNX2X_Q_CMD_DEACTIVATE:
+ case BNX2X_Q_CMD_ACTIVATE:
+ case BNX2X_Q_CMD_UPDATE:
+ case BNX2X_Q_CMD_UPDATE_TPA:
+ case BNX2X_Q_CMD_HALT:
+ case BNX2X_Q_CMD_CFC_DEL:
+ case BNX2X_Q_CMD_TERMINATE:
+ case BNX2X_Q_CMD_EMPTY:
+ return bnx2x_queue_send_cmd_cmn(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+/**
+ * bnx2x_queue_chk_transition - check state machine of a regular Queue
+ *
+ * @bp: device handle
+ * @o:
+ * @params:
+ *
+ * (not Forwarding)
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ * -EINVAL otherwise.
+ */
+static int bnx2x_queue_chk_transition(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_queue_state_params *params)
+{
+ enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
+ enum bnx2x_queue_cmd cmd = params->cmd;
+ struct bnx2x_queue_update_params *update_params =
+ &params->params.update;
+ u8 next_tx_only = o->num_tx_only;
+
+ /*
+ * Forget all pending for completion commands if a driver only state
+ * transition has been requested.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ o->pending = 0;
+ o->next_state = BNX2X_Q_STATE_MAX;
+ }
+
+ /*
+ * Don't allow a next state transition if we are in the middle of
+ * the previous one.
+ */
+ if (o->pending)
+ return -EBUSY;
+
+ switch (state) {
+ case BNX2X_Q_STATE_RESET:
+ if (cmd == BNX2X_Q_CMD_INIT)
+ next_state = BNX2X_Q_STATE_INITIALIZED;
+
+ break;
+ case BNX2X_Q_STATE_INITIALIZED:
+ if (cmd == BNX2X_Q_CMD_SETUP) {
+ if (test_bit(BNX2X_Q_FLG_ACTIVE,
+ &params->params.setup.flags))
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ }
+
+ break;
+ case BNX2X_Q_STATE_ACTIVE:
+ if (cmd == BNX2X_Q_CMD_DEACTIVATE)
+ next_state = BNX2X_Q_STATE_INACTIVE;
+
+ else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+ (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+ next_state = BNX2X_Q_STATE_ACTIVE;
+
+ else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ next_tx_only = 1;
+ }
+
+ else if (cmd == BNX2X_Q_CMD_HALT)
+ next_state = BNX2X_Q_STATE_STOPPED;
+
+ else if (cmd == BNX2X_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags))
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ }
+
+ break;
+ case BNX2X_Q_STATE_MULTI_COS:
+ if (cmd == BNX2X_Q_CMD_TERMINATE)
+ next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
+
+ else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ next_tx_only = o->num_tx_only + 1;
+ }
+
+ else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+ (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+
+ else if (cmd == BNX2X_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags))
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ }
+
+ break;
+ case BNX2X_Q_STATE_MCOS_TERMINATED:
+ if (cmd == BNX2X_Q_CMD_CFC_DEL) {
+ next_tx_only = o->num_tx_only - 1;
+ if (next_tx_only == 0)
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ else
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ }
+
+ break;
+ case BNX2X_Q_STATE_INACTIVE:
+ if (cmd == BNX2X_Q_CMD_ACTIVATE)
+ next_state = BNX2X_Q_STATE_ACTIVE;
+
+ else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+ (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+ next_state = BNX2X_Q_STATE_INACTIVE;
+
+ else if (cmd == BNX2X_Q_CMD_HALT)
+ next_state = BNX2X_Q_STATE_STOPPED;
+
+ else if (cmd == BNX2X_Q_CMD_UPDATE) {
+ /* If "active" state change is requested, update the
+ * state accordingly.
+ */
+ if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ &update_params->update_flags) &&
+ test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+ &update_params->update_flags)){
+ if (o->num_tx_only == 0)
+ next_state = BNX2X_Q_STATE_ACTIVE;
+ else /* tx only queues exist for this queue */
+ next_state = BNX2X_Q_STATE_MULTI_COS;
+ } else
+ next_state = BNX2X_Q_STATE_INACTIVE;
+ }
+
+ break;
+ case BNX2X_Q_STATE_STOPPED:
+ if (cmd == BNX2X_Q_CMD_TERMINATE)
+ next_state = BNX2X_Q_STATE_TERMINATED;
+
+ break;
+ case BNX2X_Q_STATE_TERMINATED:
+ if (cmd == BNX2X_Q_CMD_CFC_DEL)
+ next_state = BNX2X_Q_STATE_RESET;
+
+ break;
+ default:
+ BNX2X_ERR("Illegal state: %d\n", state);
+ }
+
+ /* Transition is assured */
+ if (next_state != BNX2X_Q_STATE_MAX) {
+ DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
+ state, cmd, next_state);
+ o->next_state = next_state;
+ o->next_tx_only = next_tx_only;
+ return 0;
+ }
+
+ DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
+
+ return -EINVAL;
+}
+
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
+ void *rdata,
+ dma_addr_t rdata_mapping, unsigned long type)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
+ BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
+
+ memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
+ obj->max_cos = cid_cnt;
+ obj->cl_id = cl_id;
+ obj->func_id = func_id;
+ obj->rdata = rdata;
+ obj->rdata_mapping = rdata_mapping;
+ obj->type = type;
+ obj->next_state = BNX2X_Q_STATE_MAX;
+
+ if (CHIP_IS_E1x(bp))
+ obj->send_cmd = bnx2x_queue_send_cmd_e1x;
+ else
+ obj->send_cmd = bnx2x_queue_send_cmd_e2;
+
+ obj->check_transition = bnx2x_queue_chk_transition;
+
+ obj->complete_cmd = bnx2x_queue_comp_cmd;
+ obj->wait_comp = bnx2x_queue_wait_comp;
+ obj->set_pending = bnx2x_queue_set_pending;
+}
+
+void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ u32 cid, u8 index)
+{
+ obj->cids[index] = cid;
+}
+
+/********************** Function state object *********************************/
+enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o)
+{
+ /* in the middle of transaction - return INVALID state */
+ if (o->pending)
+ return BNX2X_F_STATE_MAX;
+
+ /*
+ * unsure the order of reading of o->pending and o->state
+ * o->pending should be read first
+ */
+ rmb();
+
+ return o->state;
+}
+
+static int bnx2x_func_wait_comp(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd)
+{
+ return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_func_state_change_comp - complete the state machine transition
+ *
+ * @bp: device handle
+ * @o:
+ * @cmd:
+ *
+ * Called on state change transition. Completes the state
+ * machine transition only - no HW interaction.
+ */
+static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd)
+{
+ unsigned long cur_pending = o->pending;
+
+ if (!test_and_clear_bit(cmd, &cur_pending)) {
+ BNX2X_ERR("Bad MC reply %d for func %d in state %d "
+ "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
+ o->state, cur_pending, o->next_state);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
+ "%d\n", cmd, BP_FUNC(bp), o->next_state);
+
+ o->state = o->next_state;
+ o->next_state = BNX2X_F_STATE_MAX;
+
+ /* It's important that o->state and o->next_state are
+ * updated before o->pending.
+ */
+ wmb();
+
+ clear_bit(cmd, &o->pending);
+ smp_mb__after_clear_bit();
+
+ return 0;
+}
+
+/**
+ * bnx2x_func_comp_cmd - complete the state change command
+ *
+ * @bp: device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_func_comp_cmd(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd)
+{
+ /* Complete the state machine part first, check if it's a
+ * legal completion.
+ */
+ int rc = bnx2x_func_state_change_comp(bp, o, cmd);
+ return rc;
+}
+
+/**
+ * bnx2x_func_chk_transition - perform function state machine transition
+ *
+ * @bp: device handle
+ * @o:
+ * @params:
+ *
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ * -EINVAL otherwise.
+ */
+static int bnx2x_func_chk_transition(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ struct bnx2x_func_state_params *params)
+{
+ enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
+ enum bnx2x_func_cmd cmd = params->cmd;
+
+ /*
+ * Forget all pending for completion commands if a driver only state
+ * transition has been requested.
+ */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ o->pending = 0;
+ o->next_state = BNX2X_F_STATE_MAX;
+ }
+
+ /*
+ * Don't allow a next state transition if we are in the middle of
+ * the previous one.
+ */
+ if (o->pending)
+ return -EBUSY;
+
+ switch (state) {
+ case BNX2X_F_STATE_RESET:
+ if (cmd == BNX2X_F_CMD_HW_INIT)
+ next_state = BNX2X_F_STATE_INITIALIZED;
+
+ break;
+ case BNX2X_F_STATE_INITIALIZED:
+ if (cmd == BNX2X_F_CMD_START)
+ next_state = BNX2X_F_STATE_STARTED;
+
+ else if (cmd == BNX2X_F_CMD_HW_RESET)
+ next_state = BNX2X_F_STATE_RESET;
+
+ break;
+ case BNX2X_F_STATE_STARTED:
+ if (cmd == BNX2X_F_CMD_STOP)
+ next_state = BNX2X_F_STATE_INITIALIZED;
+ else if (cmd == BNX2X_F_CMD_TX_STOP)
+ next_state = BNX2X_F_STATE_TX_STOPPED;
+
+ break;
+ case BNX2X_F_STATE_TX_STOPPED:
+ if (cmd == BNX2X_F_CMD_TX_START)
+ next_state = BNX2X_F_STATE_STARTED;
+
+ break;
+ default:
+ BNX2X_ERR("Unknown state: %d\n", state);
+ }
+
+ /* Transition is assured */
+ if (next_state != BNX2X_F_STATE_MAX) {
+ DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
+ state, cmd, next_state);
+ o->next_state = next_state;
+ return 0;
+ }
+
+ DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
+ state, cmd);
+
+ return -EINVAL;
+}
+
+/**
+ * bnx2x_func_init_func - performs HW init at function stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
+ * HW blocks.
+ */
+static inline int bnx2x_func_init_func(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ return drv->init_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_init_port - performs HW init at port stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
+ * FUNCTION-only HW blocks.
+ *
+ */
+static inline int bnx2x_func_init_port(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ int rc = drv->init_hw_port(bp);
+ if (rc)
+ return rc;
+
+ return bnx2x_func_init_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ int rc = drv->init_hw_cmn_chip(bp);
+ if (rc)
+ return rc;
+
+ return bnx2x_func_init_port(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn - performs HW init at common stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ int rc = drv->init_hw_cmn(bp);
+ if (rc)
+ return rc;
+
+ return bnx2x_func_init_port(bp, drv);
+}
+
+static int bnx2x_func_hw_init(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ u32 load_code = params->params.hw_init.load_phase;
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+ int rc = 0;
+
+ DP(BNX2X_MSG_SP, "function %d load_code %x\n",
+ BP_ABS_FUNC(bp), load_code);
+
+ /* Prepare buffers for unzipping the FW */
+ rc = drv->gunzip_init(bp);
+ if (rc)
+ return rc;
+
+ /* Prepare FW */
+ rc = drv->init_fw(bp);
+ if (rc) {
+ BNX2X_ERR("Error loading firmware\n");
+ goto fw_init_err;
+ }
+
+ /* Handle the beginning of COMMON_XXX pases separatelly... */
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+ rc = bnx2x_func_init_cmn_chip(bp, drv);
+ if (rc)
+ goto init_hw_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_COMMON:
+ rc = bnx2x_func_init_cmn(bp, drv);
+ if (rc)
+ goto init_hw_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = bnx2x_func_init_port(bp, drv);
+ if (rc)
+ goto init_hw_err;
+
+ break;
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = bnx2x_func_init_func(bp, drv);
+ if (rc)
+ goto init_hw_err;
+
+ break;
+ default:
+ BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+ rc = -EINVAL;
+ }
+
+init_hw_err:
+ drv->release_fw(bp);
+
+fw_init_err:
+ drv->gunzip_end(bp);
+
+ /* In case of success, complete the comand immediatelly: no ramrods
+ * have been sent.
+ */
+ if (!rc)
+ o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
+
+ return rc;
+}
+
+/**
+ * bnx2x_func_reset_func - reset HW at function stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
+ * FUNCTION-only HW blocks.
+ */
+static inline void bnx2x_func_reset_func(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ drv->reset_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_reset_port - reser HW at port stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
+ * FUNCTION-only and PORT-only HW blocks.
+ *
+ * !!!IMPORTANT!!!
+ *
+ * It's important to call reset_port before reset_func() as the last thing
+ * reset_func does is pf_disable() thus disabling PGLUE_B, which
+ * makes impossible any DMAE transactions.
+ */
+static inline void bnx2x_func_reset_port(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ drv->reset_hw_port(bp);
+ bnx2x_func_reset_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_reset_cmn - reser HW at common stage
+ *
+ * @bp: device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
+ * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
+ * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
+ */
+static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
+ const struct bnx2x_func_sp_drv_ops *drv)
+{
+ bnx2x_func_reset_port(bp, drv);
+ drv->reset_hw_cmn(bp);
+}
+
+
+static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ u32 reset_phase = params->params.hw_reset.reset_phase;
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+
+ DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
+ reset_phase);
+
+ switch (reset_phase) {
+ case FW_MSG_CODE_DRV_UNLOAD_COMMON:
+ bnx2x_func_reset_cmn(bp, drv);
+ break;
+ case FW_MSG_CODE_DRV_UNLOAD_PORT:
+ bnx2x_func_reset_port(bp, drv);
+ break;
+ case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
+ bnx2x_func_reset_func(bp, drv);
+ break;
+ default:
+ BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
+ reset_phase);
+ break;
+ }
+
+ /* Complete the comand immediatelly: no ramrods have been sent. */
+ o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
+
+ return 0;
+}
+
+static inline int bnx2x_func_send_start(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct function_start_data *rdata =
+ (struct function_start_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_func_start_params *start_params = &params->params.start;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data with provided parameters */
+ rdata->function_mode = cpu_to_le16(start_params->mf_mode);
+ rdata->sd_vlan_tag = start_params->sd_vlan_tag;
+ rdata->path_id = BP_PATH(bp);
+ rdata->network_cos_mode = start_params->network_cos_mode;
+
+ /*
+ * No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
+ */
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_stop(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
+ NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
+ NONE_CONNECTION_TYPE);
+}
+static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ struct flow_control_configuration *rdata =
+ (struct flow_control_configuration *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_func_tx_start_params *tx_start_params =
+ &params->params.tx_start;
+ int i;
+
+ memset(rdata, 0, sizeof(*rdata));
+
+ rdata->dcb_enabled = tx_start_params->dcb_enabled;
+ rdata->dcb_version = tx_start_params->dcb_version;
+ rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
+
+ for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
+ rdata->traffic_type_to_priority_cos[i] =
+ tx_start_params->traffic_type_to_priority_cos[i];
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static int bnx2x_func_send_cmd(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ switch (params->cmd) {
+ case BNX2X_F_CMD_HW_INIT:
+ return bnx2x_func_hw_init(bp, params);
+ case BNX2X_F_CMD_START:
+ return bnx2x_func_send_start(bp, params);
+ case BNX2X_F_CMD_STOP:
+ return bnx2x_func_send_stop(bp, params);
+ case BNX2X_F_CMD_HW_RESET:
+ return bnx2x_func_hw_reset(bp, params);
+ case BNX2X_F_CMD_TX_STOP:
+ return bnx2x_func_send_tx_stop(bp, params);
+ case BNX2X_F_CMD_TX_START:
+ return bnx2x_func_send_tx_start(bp, params);
+ default:
+ BNX2X_ERR("Unknown command: %d\n", params->cmd);
+ return -EINVAL;
+ }
+}
+
+void bnx2x_init_func_obj(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *obj,
+ void *rdata, dma_addr_t rdata_mapping,
+ struct bnx2x_func_sp_drv_ops *drv_iface)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ mutex_init(&obj->one_pending_mutex);
+
+ obj->rdata = rdata;
+ obj->rdata_mapping = rdata_mapping;
+
+ obj->send_cmd = bnx2x_func_send_cmd;
+ obj->check_transition = bnx2x_func_chk_transition;
+ obj->complete_cmd = bnx2x_func_comp_cmd;
+ obj->wait_comp = bnx2x_func_wait_comp;
+
+ obj->drv = drv_iface;
+}
+
+/**
+ * bnx2x_func_state_change - perform Function state change transition
+ *
+ * @bp: device handle
+ * @params: parameters to perform the transaction
+ *
+ * returns 0 in case of successfully completed transition,
+ * negative error code in case of failure, positive
+ * (EBUSY) value if there is a completion to that is
+ * still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous
+ * commands).
+ */
+int bnx2x_func_state_change(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params)
+{
+ struct bnx2x_func_sp_obj *o = params->f_obj;
+ int rc;
+ enum bnx2x_func_cmd cmd = params->cmd;
+ unsigned long *pending = &o->pending;
+
+ mutex_lock(&o->one_pending_mutex);
+
+ /* Check that the requested transition is legal */
+ if (o->check_transition(bp, o, params)) {
+ mutex_unlock(&o->one_pending_mutex);
+ return -EINVAL;
+ }
+
+ /* Set "pending" bit */
+ set_bit(cmd, pending);
+
+ /* Don't send a command if only driver cleanup was requested */
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+ bnx2x_func_state_change_comp(bp, o, cmd);
+ mutex_unlock(&o->one_pending_mutex);
+ } else {
+ /* Send a ramrod */
+ rc = o->send_cmd(bp, params);
+
+ mutex_unlock(&o->one_pending_mutex);
+
+ if (rc) {
+ o->next_state = BNX2X_F_STATE_MAX;
+ clear_bit(cmd, pending);
+ smp_mb__after_clear_bit();
+ return rc;
+ }
+
+ if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+ rc = o->wait_comp(bp, o, cmd);
+ if (rc)
+ return rc;
+
+ return 0;
+ }
+ }
+
+ return !!test_bit(cmd, pending);
+}
diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h
new file mode 100644
index 00000000000..9a517c2e9f1
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_sp.h
@@ -0,0 +1,1297 @@
+/* bnx2x_sp.h: Broadcom Everest network driver.
+ *
+ * Copyright 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+#ifndef BNX2X_SP_VERBS
+#define BNX2X_SP_VERBS
+
+struct bnx2x;
+struct eth_context;
+
+/* Bits representing general command's configuration */
+enum {
+ RAMROD_TX,
+ RAMROD_RX,
+ /* Wait until all pending commands complete */
+ RAMROD_COMP_WAIT,
+ /* Don't send a ramrod, only update a registry */
+ RAMROD_DRV_CLR_ONLY,
+ /* Configure HW according to the current object state */
+ RAMROD_RESTORE,
+ /* Execute the next command now */
+ RAMROD_EXEC,
+ /*
+ * Don't add a new command and continue execution of posponed
+ * commands. If not set a new command will be added to the
+ * pending commands list.
+ */
+ RAMROD_CONT,
+};
+
+typedef enum {
+ BNX2X_OBJ_TYPE_RX,
+ BNX2X_OBJ_TYPE_TX,
+ BNX2X_OBJ_TYPE_RX_TX,
+} bnx2x_obj_type;
+
+/* Filtering states */
+enum {
+ BNX2X_FILTER_MAC_PENDING,
+ BNX2X_FILTER_VLAN_PENDING,
+ BNX2X_FILTER_VLAN_MAC_PENDING,
+ BNX2X_FILTER_RX_MODE_PENDING,
+ BNX2X_FILTER_RX_MODE_SCHED,
+ BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+ BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+ BNX2X_FILTER_FCOE_ETH_START_SCHED,
+ BNX2X_FILTER_FCOE_ETH_STOP_SCHED,
+ BNX2X_FILTER_MCAST_PENDING,
+ BNX2X_FILTER_MCAST_SCHED,
+ BNX2X_FILTER_RSS_CONF_PENDING,
+};
+
+struct bnx2x_raw_obj {
+ u8 func_id;
+
+ /* Queue params */
+ u8 cl_id;
+ u32 cid;
+
+ /* Ramrod data buffer params */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /* Ramrod state params */
+ int state; /* "ramrod is pending" state bit */
+ unsigned long *pstate; /* pointer to state buffer */
+
+ bnx2x_obj_type obj_type;
+
+ int (*wait_comp)(struct bnx2x *bp,
+ struct bnx2x_raw_obj *o);
+
+ bool (*check_pending)(struct bnx2x_raw_obj *o);
+ void (*clear_pending)(struct bnx2x_raw_obj *o);
+ void (*set_pending)(struct bnx2x_raw_obj *o);
+};
+
+/************************* VLAN-MAC commands related parameters ***************/
+struct bnx2x_mac_ramrod_data {
+ u8 mac[ETH_ALEN];
+};
+
+struct bnx2x_vlan_ramrod_data {
+ u16 vlan;
+};
+
+struct bnx2x_vlan_mac_ramrod_data {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+};
+
+union bnx2x_classification_ramrod_data {
+ struct bnx2x_mac_ramrod_data mac;
+ struct bnx2x_vlan_ramrod_data vlan;
+ struct bnx2x_vlan_mac_ramrod_data vlan_mac;
+};
+
+/* VLAN_MAC commands */
+enum bnx2x_vlan_mac_cmd {
+ BNX2X_VLAN_MAC_ADD,
+ BNX2X_VLAN_MAC_DEL,
+ BNX2X_VLAN_MAC_MOVE,
+};
+
+struct bnx2x_vlan_mac_data {
+ /* Requested command: BNX2X_VLAN_MAC_XX */
+ enum bnx2x_vlan_mac_cmd cmd;
+ /*
+ * used to contain the data related vlan_mac_flags bits from
+ * ramrod parameters.
+ */
+ unsigned long vlan_mac_flags;
+
+ /* Needed for MOVE command */
+ struct bnx2x_vlan_mac_obj *target_obj;
+
+ union bnx2x_classification_ramrod_data u;
+};
+
+/*************************** Exe Queue obj ************************************/
+union bnx2x_exe_queue_cmd_data {
+ struct bnx2x_vlan_mac_data vlan_mac;
+
+ struct {
+ /* TODO */
+ } mcast;
+};
+
+struct bnx2x_exeq_elem {
+ struct list_head link;
+
+ /* Length of this element in the exe_chunk. */
+ int cmd_len;
+
+ union bnx2x_exe_queue_cmd_data cmd_data;
+};
+
+union bnx2x_qable_obj;
+
+union bnx2x_exeq_comp_elem {
+ union event_ring_elem *elem;
+};
+
+struct bnx2x_exe_queue_obj;
+
+typedef int (*exe_q_validate)(struct bnx2x *bp,
+ union bnx2x_qable_obj *o,
+ struct bnx2x_exeq_elem *elem);
+
+/**
+ * @return positive is entry was optimized, 0 - if not, negative
+ * in case of an error.
+ */
+typedef int (*exe_q_optimize)(struct bnx2x *bp,
+ union bnx2x_qable_obj *o,
+ struct bnx2x_exeq_elem *elem);
+typedef int (*exe_q_execute)(struct bnx2x *bp,
+ union bnx2x_qable_obj *o,
+ struct list_head *exe_chunk,
+ unsigned long *ramrod_flags);
+typedef struct bnx2x_exeq_elem *
+ (*exe_q_get)(struct bnx2x_exe_queue_obj *o,
+ struct bnx2x_exeq_elem *elem);
+
+struct bnx2x_exe_queue_obj {
+ /*
+ * Commands pending for an execution.
+ */
+ struct list_head exe_queue;
+
+ /*
+ * Commands pending for an completion.
+ */
+ struct list_head pending_comp;
+
+ spinlock_t lock;
+
+ /* Maximum length of commands' list for one execution */
+ int exe_chunk_len;
+
+ union bnx2x_qable_obj *owner;
+
+ /****** Virtual functions ******/
+ /**
+ * Called before commands execution for commands that are really
+ * going to be executed (after 'optimize').
+ *
+ * Must run under exe_queue->lock
+ */
+ exe_q_validate validate;
+
+
+ /**
+ * This will try to cancel the current pending commands list
+ * considering the new command.
+ *
+ * Must run under exe_queue->lock
+ */
+ exe_q_optimize optimize;
+
+ /**
+ * Run the next commands chunk (owner specific).
+ */
+ exe_q_execute execute;
+
+ /**
+ * Return the exe_queue element containing the specific command
+ * if any. Otherwise return NULL.
+ */
+ exe_q_get get;
+};
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/*
+ * Element in the VLAN_MAC registry list having all currenty configured
+ * rules.
+ */
+struct bnx2x_vlan_mac_registry_elem {
+ struct list_head link;
+
+ /*
+ * Used to store the cam offset used for the mac/vlan/vlan-mac.
+ * Relevant for 57710 and 57711 only. VLANs and MACs share the
+ * same CAM for these chips.
+ */
+ int cam_offset;
+
+ /* Needed for DEL and RESTORE flows */
+ unsigned long vlan_mac_flags;
+
+ union bnx2x_classification_ramrod_data u;
+};
+
+/* Bits representing VLAN_MAC commands specific flags */
+enum {
+ BNX2X_UC_LIST_MAC,
+ BNX2X_ETH_MAC,
+ BNX2X_ISCSI_ETH_MAC,
+ BNX2X_NETQ_ETH_MAC,
+ BNX2X_DONT_CONSUME_CAM_CREDIT,
+ BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+};
+
+struct bnx2x_vlan_mac_ramrod_params {
+ /* Object to run the command from */
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+ /* General command flags: COMP_WAIT, etc. */
+ unsigned long ramrod_flags;
+
+ /* Command specific configuration request */
+ struct bnx2x_vlan_mac_data user_req;
+};
+
+struct bnx2x_vlan_mac_obj {
+ struct bnx2x_raw_obj raw;
+
+ /* Bookkeeping list: will prevent the addition of already existing
+ * entries.
+ */
+ struct list_head head;
+
+ /* TODO: Add it's initialization in the init functions */
+ struct bnx2x_exe_queue_obj exe_queue;
+
+ /* MACs credit pool */
+ struct bnx2x_credit_pool_obj *macs_pool;
+
+ /* VLANs credit pool */
+ struct bnx2x_credit_pool_obj *vlans_pool;
+
+ /* RAMROD command to be used */
+ int ramrod_cmd;
+
+ /**
+ * Checks if ADD-ramrod with the given params may be performed.
+ *
+ * @return zero if the element may be added
+ */
+
+ int (*check_add)(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data);
+
+ /**
+ * Checks if DEL-ramrod with the given params may be performed.
+ *
+ * @return true if the element may be deleted
+ */
+ struct bnx2x_vlan_mac_registry_elem *
+ (*check_del)(struct bnx2x_vlan_mac_obj *o,
+ union bnx2x_classification_ramrod_data *data);
+
+ /**
+ * Checks if DEL-ramrod with the given params may be performed.
+ *
+ * @return true if the element may be deleted
+ */
+ bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o,
+ struct bnx2x_vlan_mac_obj *dst_o,
+ union bnx2x_classification_ramrod_data *data);
+
+ /**
+ * Update the relevant credit object(s) (consume/return
+ * correspondingly).
+ */
+ bool (*get_credit)(struct bnx2x_vlan_mac_obj *o);
+ bool (*put_credit)(struct bnx2x_vlan_mac_obj *o);
+ bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset);
+ bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset);
+
+ /**
+ * Configures one rule in the ramrod data buffer.
+ */
+ void (*set_one_rule)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ struct bnx2x_exeq_elem *elem, int rule_idx,
+ int cam_offset);
+
+ /**
+ * Delete all configured elements having the given
+ * vlan_mac_flags specification. Assumes no pending for
+ * execution commands. Will schedule all all currently
+ * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
+ * specification for deletion and will use the given
+ * ramrod_flags for the last DEL operation.
+ *
+ * @param bp
+ * @param o
+ * @param ramrod_flags RAMROD_XX flags
+ *
+ * @return 0 if the last operation has completed successfully
+ * and there are no more elements left, positive value
+ * if there are pending for completion commands,
+ * negative value in case of failure.
+ */
+ int (*delete_all)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *vlan_mac_flags,
+ unsigned long *ramrod_flags);
+
+ /**
+ * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
+ * configured elements list.
+ *
+ * @param bp
+ * @param p Command parameters (RAMROD_COMP_WAIT bit in
+ * ramrod_flags is only taken into an account)
+ * @param ppos a pointer to the cooky that should be given back in the
+ * next call to make function handle the next element. If
+ * *ppos is set to NULL it will restart the iterator.
+ * If returned *ppos == NULL this means that the last
+ * element has been handled.
+ *
+ * @return int
+ */
+ int (*restore)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p,
+ struct bnx2x_vlan_mac_registry_elem **ppos);
+
+ /**
+ * Should be called on a completion arival.
+ *
+ * @param bp
+ * @param o
+ * @param cqe Completion element we are handling
+ * @param ramrod_flags if RAMROD_CONT is set the next bulk of
+ * pending commands will be executed.
+ * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
+ * may also be set if needed.
+ *
+ * @return 0 if there are neither pending nor waiting for
+ * completion commands. Positive value if there are
+ * pending for execution or for completion commands.
+ * Negative value in case of an error (including an
+ * error in the cqe).
+ */
+ int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ union event_ring_elem *cqe,
+ unsigned long *ramrod_flags);
+
+ /**
+ * Wait for completion of all commands. Don't schedule new ones,
+ * just wait. It assumes that the completion code will schedule
+ * for new commands.
+ */
+ int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
+};
+
+/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+
+/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in
+ * a bnx2x_rx_mode_ramrod_params.
+ */
+enum {
+ BNX2X_RX_MODE_FCOE_ETH,
+ BNX2X_RX_MODE_ISCSI_ETH,
+};
+
+enum {
+ BNX2X_ACCEPT_UNICAST,
+ BNX2X_ACCEPT_MULTICAST,
+ BNX2X_ACCEPT_ALL_UNICAST,
+ BNX2X_ACCEPT_ALL_MULTICAST,
+ BNX2X_ACCEPT_BROADCAST,
+ BNX2X_ACCEPT_UNMATCHED,
+ BNX2X_ACCEPT_ANY_VLAN
+};
+
+struct bnx2x_rx_mode_ramrod_params {
+ struct bnx2x_rx_mode_obj *rx_mode_obj;
+ unsigned long *pstate;
+ int state;
+ u8 cl_id;
+ u32 cid;
+ u8 func_id;
+ unsigned long ramrod_flags;
+ unsigned long rx_mode_flags;
+
+ /*
+ * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
+ * a tstorm_eth_mac_filter_config (e1x).
+ */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /* Rx mode settings */
+ unsigned long rx_accept_flags;
+
+ /* internal switching settings */
+ unsigned long tx_accept_flags;
+};
+
+struct bnx2x_rx_mode_obj {
+ int (*config_rx_mode)(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p);
+
+ int (*wait_comp)(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p);
+};
+
+/********************** Set multicast group ***********************************/
+
+struct bnx2x_mcast_list_elem {
+ struct list_head link;
+ u8 *mac;
+};
+
+union bnx2x_mcast_config_data {
+ u8 *mac;
+ u8 bin; /* used in a RESTORE flow */
+};
+
+struct bnx2x_mcast_ramrod_params {
+ struct bnx2x_mcast_obj *mcast_obj;
+
+ /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
+ unsigned long ramrod_flags;
+
+ struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */
+ /** TODO:
+ * - rename it to macs_num.
+ * - Add a new command type for handling pending commands
+ * (remove "zero semantics").
+ *
+ * Length of mcast_list. If zero and ADD_CONT command - post
+ * pending commands.
+ */
+ int mcast_list_len;
+};
+
+enum {
+ BNX2X_MCAST_CMD_ADD,
+ BNX2X_MCAST_CMD_CONT,
+ BNX2X_MCAST_CMD_DEL,
+ BNX2X_MCAST_CMD_RESTORE,
+};
+
+struct bnx2x_mcast_obj {
+ struct bnx2x_raw_obj raw;
+
+ union {
+ struct {
+ #define BNX2X_MCAST_BINS_NUM 256
+ #define BNX2X_MCAST_VEC_SZ (BNX2X_MCAST_BINS_NUM / 64)
+ u64 vec[BNX2X_MCAST_VEC_SZ];
+
+ /** Number of BINs to clear. Should be updated
+ * immediately when a command arrives in order to
+ * properly create DEL commands.
+ */
+ int num_bins_set;
+ } aprox_match;
+
+ struct {
+ struct list_head macs;
+ int num_macs_set;
+ } exact_match;
+ } registry;
+
+ /* Pending commands */
+ struct list_head pending_cmds_head;
+
+ /* A state that is set in raw.pstate, when there are pending commands */
+ int sched_state;
+
+ /* Maximal number of mcast MACs configured in one command */
+ int max_cmd_len;
+
+ /* Total number of currently pending MACs to configure: both
+ * in the pending commands list and in the current command.
+ */
+ int total_pending_num;
+
+ u8 engine_id;
+
+ /**
+ * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
+ */
+ int (*config_mcast)(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+ /**
+ * Fills the ramrod data during the RESTORE flow.
+ *
+ * @param bp
+ * @param o
+ * @param start_idx Registry index to start from
+ * @param rdata_idx Index in the ramrod data to start from
+ *
+ * @return -1 if we handled the whole registry or index of the last
+ * handled registry element.
+ */
+ int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+ int start_bin, int *rdata_idx);
+
+ int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+ struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+ void (*set_one_rule)(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *o, int idx,
+ union bnx2x_mcast_config_data *cfg_data, int cmd);
+
+ /** Checks if there are more mcast MACs to be set or a previous
+ * command is still pending.
+ */
+ bool (*check_pending)(struct bnx2x_mcast_obj *o);
+
+ /**
+ * Set/Clear/Check SCHEDULED state of the object
+ */
+ void (*set_sched)(struct bnx2x_mcast_obj *o);
+ void (*clear_sched)(struct bnx2x_mcast_obj *o);
+ bool (*check_sched)(struct bnx2x_mcast_obj *o);
+
+ /* Wait until all pending commands complete */
+ int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o);
+
+ /**
+ * Handle the internal object counters needed for proper
+ * commands handling. Checks that the provided parameters are
+ * feasible.
+ */
+ int (*validate)(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+ /**
+ * Restore the values of internal counters in case of a failure.
+ */
+ void (*revert)(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p,
+ int old_num_bins);
+
+ int (*get_registry_size)(struct bnx2x_mcast_obj *o);
+ void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n);
+};
+
+/*************************** Credit handling **********************************/
+struct bnx2x_credit_pool_obj {
+
+ /* Current amount of credit in the pool */
+ atomic_t credit;
+
+ /* Maximum allowed credit. put() will check against it. */
+ int pool_sz;
+
+ /*
+ * Allocate a pool table statically.
+ *
+ * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272)
+ *
+ * The set bit in the table will mean that the entry is available.
+ */
+#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64)
+ u64 pool_mirror[BNX2X_POOL_VEC_SIZE];
+
+ /* Base pool offset (initialized differently */
+ int base_pool_offset;
+
+ /**
+ * Get the next free pool entry.
+ *
+ * @return true if there was a free entry in the pool
+ */
+ bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry);
+
+ /**
+ * Return the entry back to the pool.
+ *
+ * @return true if entry is legal and has been successfully
+ * returned to the pool.
+ */
+ bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry);
+
+ /**
+ * Get the requested amount of credit from the pool.
+ *
+ * @param cnt Amount of requested credit
+ * @return true if the operation is successful
+ */
+ bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+ /**
+ * Returns the credit to the pool.
+ *
+ * @param cnt Amount of credit to return
+ * @return true if the operation is successful
+ */
+ bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+ /**
+ * Reads the current amount of credit.
+ */
+ int (*check)(struct bnx2x_credit_pool_obj *o);
+};
+
+/*************************** RSS configuration ********************************/
+enum {
+ /* RSS_MODE bits are mutually exclusive */
+ BNX2X_RSS_MODE_DISABLED,
+ BNX2X_RSS_MODE_REGULAR,
+ BNX2X_RSS_MODE_VLAN_PRI,
+ BNX2X_RSS_MODE_E1HOV_PRI,
+ BNX2X_RSS_MODE_IP_DSCP,
+
+ BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
+
+ BNX2X_RSS_IPV4,
+ BNX2X_RSS_IPV4_TCP,
+ BNX2X_RSS_IPV6,
+ BNX2X_RSS_IPV6_TCP,
+};
+
+struct bnx2x_config_rss_params {
+ struct bnx2x_rss_config_obj *rss_obj;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* BNX2X_RSS_X bits */
+ unsigned long rss_flags;
+
+ /* Number hash bits to take into an account */
+ u8 rss_result_mask;
+
+ /* Indirection table */
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+ /* RSS hash values */
+ u32 rss_key[10];
+
+ /* valid only iff BNX2X_RSS_UPDATE_TOE is set */
+ u16 toe_rss_bitmap;
+};
+
+struct bnx2x_rss_config_obj {
+ struct bnx2x_raw_obj raw;
+
+ /* RSS engine to use */
+ u8 engine_id;
+
+ /* Last configured indirection table */
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+ int (*config_rss)(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p);
+};
+
+/*********************** Queue state update ***********************************/
+
+/* UPDATE command options */
+enum {
+ BNX2X_Q_UPDATE_IN_VLAN_REM,
+ BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+ BNX2X_Q_UPDATE_OUT_VLAN_REM,
+ BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+ BNX2X_Q_UPDATE_ANTI_SPOOF,
+ BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
+ BNX2X_Q_UPDATE_ACTIVATE,
+ BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+ BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ BNX2X_Q_UPDATE_SILENT_VLAN_REM
+};
+
+/* Allowed Queue states */
+enum bnx2x_q_state {
+ BNX2X_Q_STATE_RESET,
+ BNX2X_Q_STATE_INITIALIZED,
+ BNX2X_Q_STATE_ACTIVE,
+ BNX2X_Q_STATE_MULTI_COS,
+ BNX2X_Q_STATE_MCOS_TERMINATED,
+ BNX2X_Q_STATE_INACTIVE,
+ BNX2X_Q_STATE_STOPPED,
+ BNX2X_Q_STATE_TERMINATED,
+ BNX2X_Q_STATE_FLRED,
+ BNX2X_Q_STATE_MAX,
+};
+
+/* Allowed commands */
+enum bnx2x_queue_cmd {
+ BNX2X_Q_CMD_INIT,
+ BNX2X_Q_CMD_SETUP,
+ BNX2X_Q_CMD_SETUP_TX_ONLY,
+ BNX2X_Q_CMD_DEACTIVATE,
+ BNX2X_Q_CMD_ACTIVATE,
+ BNX2X_Q_CMD_UPDATE,
+ BNX2X_Q_CMD_UPDATE_TPA,
+ BNX2X_Q_CMD_HALT,
+ BNX2X_Q_CMD_CFC_DEL,
+ BNX2X_Q_CMD_TERMINATE,
+ BNX2X_Q_CMD_EMPTY,
+ BNX2X_Q_CMD_MAX,
+};
+
+/* queue SETUP + INIT flags */
+enum {
+ BNX2X_Q_FLG_TPA,
+ BNX2X_Q_FLG_TPA_IPV6,
+ BNX2X_Q_FLG_STATS,
+ BNX2X_Q_FLG_ZERO_STATS,
+ BNX2X_Q_FLG_ACTIVE,
+ BNX2X_Q_FLG_OV,
+ BNX2X_Q_FLG_VLAN,
+ BNX2X_Q_FLG_COS,
+ BNX2X_Q_FLG_HC,
+ BNX2X_Q_FLG_HC_EN,
+ BNX2X_Q_FLG_DHC,
+ BNX2X_Q_FLG_FCOE,
+ BNX2X_Q_FLG_LEADING_RSS,
+ BNX2X_Q_FLG_MCAST,
+ BNX2X_Q_FLG_DEF_VLAN,
+ BNX2X_Q_FLG_TX_SWITCH,
+ BNX2X_Q_FLG_TX_SEC,
+ BNX2X_Q_FLG_ANTI_SPOOF,
+ BNX2X_Q_FLG_SILENT_VLAN_REM
+};
+
+/* Queue type options: queue type may be a compination of below. */
+enum bnx2x_q_type {
+ /** TODO: Consider moving both these flags into the init()
+ * ramrod params.
+ */
+ BNX2X_Q_TYPE_HAS_RX,
+ BNX2X_Q_TYPE_HAS_TX,
+};
+
+#define BNX2X_PRIMARY_CID_INDEX 0
+#define BNX2X_MULTI_TX_COS_E1X 1
+#define BNX2X_MULTI_TX_COS_E2_E3A0 2
+#define BNX2X_MULTI_TX_COS_E3B0 3
+#define BNX2X_MULTI_TX_COS BNX2X_MULTI_TX_COS_E3B0
+
+
+struct bnx2x_queue_init_params {
+ struct {
+ unsigned long flags;
+ u16 hc_rate;
+ u8 fw_sb_id;
+ u8 sb_cq_index;
+ } tx;
+
+ struct {
+ unsigned long flags;
+ u16 hc_rate;
+ u8 fw_sb_id;
+ u8 sb_cq_index;
+ } rx;
+
+ /* CID context in the host memory */
+ struct eth_context *cxts[BNX2X_MULTI_TX_COS];
+
+ /* maximum number of cos supported by hardware */
+ u8 max_cos;
+};
+
+struct bnx2x_queue_terminate_params {
+ /* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct bnx2x_queue_cfc_del_params {
+ /* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct bnx2x_queue_update_params {
+ unsigned long update_flags; /* BNX2X_Q_UPDATE_XX bits */
+ u16 def_vlan;
+ u16 silent_removal_value;
+ u16 silent_removal_mask;
+/* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct rxq_pause_params {
+ u16 bd_th_lo;
+ u16 bd_th_hi;
+ u16 rcq_th_lo;
+ u16 rcq_th_hi;
+ u16 sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */
+ u16 sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */
+ u16 pri_map;
+};
+
+/* general */
+struct bnx2x_general_setup_params {
+ /* valid iff BNX2X_Q_FLG_STATS */
+ u8 stat_id;
+
+ u8 spcl_id;
+ u16 mtu;
+ u8 cos;
+};
+
+struct bnx2x_rxq_setup_params {
+ /* dma */
+ dma_addr_t dscr_map;
+ dma_addr_t sge_map;
+ dma_addr_t rcq_map;
+ dma_addr_t rcq_np_map;
+
+ u16 drop_flags;
+ u16 buf_sz;
+ u8 fw_sb_id;
+ u8 cl_qzone_id;
+
+ /* valid iff BNX2X_Q_FLG_TPA */
+ u16 tpa_agg_sz;
+ u16 sge_buf_sz;
+ u8 max_sges_pkt;
+ u8 max_tpa_queues;
+ u8 rss_engine_id;
+
+ u8 cache_line_log;
+
+ u8 sb_cq_index;
+
+ /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
+ u16 silent_removal_value;
+ u16 silent_removal_mask;
+};
+
+struct bnx2x_txq_setup_params {
+ /* dma */
+ dma_addr_t dscr_map;
+
+ u8 fw_sb_id;
+ u8 sb_cq_index;
+ u8 cos; /* valid iff BNX2X_Q_FLG_COS */
+ u16 traffic_type;
+ /* equals to the leading rss client id, used for TX classification*/
+ u8 tss_leading_cl_id;
+
+ /* valid iff BNX2X_Q_FLG_DEF_VLAN */
+ u16 default_vlan;
+};
+
+struct bnx2x_queue_setup_params {
+ struct bnx2x_general_setup_params gen_params;
+ struct bnx2x_txq_setup_params txq_params;
+ struct bnx2x_rxq_setup_params rxq_params;
+ struct rxq_pause_params pause_params;
+ unsigned long flags;
+};
+
+struct bnx2x_queue_setup_tx_only_params {
+ struct bnx2x_general_setup_params gen_params;
+ struct bnx2x_txq_setup_params txq_params;
+ unsigned long flags;
+ /* index within the tx_only cids of this queue object */
+ u8 cid_index;
+};
+
+struct bnx2x_queue_state_params {
+ struct bnx2x_queue_sp_obj *q_obj;
+
+ /* Current command */
+ enum bnx2x_queue_cmd cmd;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* Params according to the current command */
+ union {
+ struct bnx2x_queue_update_params update;
+ struct bnx2x_queue_setup_params setup;
+ struct bnx2x_queue_init_params init;
+ struct bnx2x_queue_setup_tx_only_params tx_only;
+ struct bnx2x_queue_terminate_params terminate;
+ struct bnx2x_queue_cfc_del_params cfc_del;
+ } params;
+};
+
+struct bnx2x_queue_sp_obj {
+ u32 cids[BNX2X_MULTI_TX_COS];
+ u8 cl_id;
+ u8 func_id;
+
+ /*
+ * number of traffic classes supported by queue.
+ * The primary connection of the queue suppotrs the first traffic
+ * class. Any further traffic class is suppoted by a tx-only
+ * connection.
+ *
+ * Therefore max_cos is also a number of valid entries in the cids
+ * array.
+ */
+ u8 max_cos;
+ u8 num_tx_only, next_tx_only;
+
+ enum bnx2x_q_state state, next_state;
+
+ /* bits from enum bnx2x_q_type */
+ unsigned long type;
+
+ /* BNX2X_Q_CMD_XX bits. This object implements "one
+ * pending" paradigm but for debug and tracing purposes it's
+ * more convinient to have different bits for different
+ * commands.
+ */
+ unsigned long pending;
+
+ /* Buffer to use as a ramrod data and its mapping */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /**
+ * Performs one state change according to the given parameters.
+ *
+ * @return 0 in case of success and negative value otherwise.
+ */
+ int (*send_cmd)(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params);
+
+ /**
+ * Sets the pending bit according to the requested transition.
+ */
+ int (*set_pending)(struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_queue_state_params *params);
+
+ /**
+ * Checks that the requested state transition is legal.
+ */
+ int (*check_transition)(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ struct bnx2x_queue_state_params *params);
+
+ /**
+ * Completes the pending command.
+ */
+ int (*complete_cmd)(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd);
+
+ int (*wait_comp)(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *o,
+ enum bnx2x_queue_cmd cmd);
+};
+
+/********************** Function state update *********************************/
+/* Allowed Function states */
+enum bnx2x_func_state {
+ BNX2X_F_STATE_RESET,
+ BNX2X_F_STATE_INITIALIZED,
+ BNX2X_F_STATE_STARTED,
+ BNX2X_F_STATE_TX_STOPPED,
+ BNX2X_F_STATE_MAX,
+};
+
+/* Allowed Function commands */
+enum bnx2x_func_cmd {
+ BNX2X_F_CMD_HW_INIT,
+ BNX2X_F_CMD_START,
+ BNX2X_F_CMD_STOP,
+ BNX2X_F_CMD_HW_RESET,
+ BNX2X_F_CMD_TX_STOP,
+ BNX2X_F_CMD_TX_START,
+ BNX2X_F_CMD_MAX,
+};
+
+struct bnx2x_func_hw_init_params {
+ /* A load phase returned by MCP.
+ *
+ * May be:
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+ * FW_MSG_CODE_DRV_LOAD_COMMON
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ */
+ u32 load_phase;
+};
+
+struct bnx2x_func_hw_reset_params {
+ /* A load phase returned by MCP.
+ *
+ * May be:
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+ * FW_MSG_CODE_DRV_LOAD_COMMON
+ * FW_MSG_CODE_DRV_LOAD_PORT
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION
+ */
+ u32 reset_phase;
+};
+
+struct bnx2x_func_start_params {
+ /* Multi Function mode:
+ * - Single Function
+ * - Switch Dependent
+ * - Switch Independent
+ */
+ u16 mf_mode;
+
+ /* Switch Dependent mode outer VLAN tag */
+ u16 sd_vlan_tag;
+
+ /* Function cos mode */
+ u8 network_cos_mode;
+};
+
+struct bnx2x_func_tx_start_params {
+ struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
+ u8 dcb_enabled;
+ u8 dcb_version;
+ u8 dont_add_pri_0_en;
+};
+
+struct bnx2x_func_state_params {
+ struct bnx2x_func_sp_obj *f_obj;
+
+ /* Current command */
+ enum bnx2x_func_cmd cmd;
+
+ /* may have RAMROD_COMP_WAIT set only */
+ unsigned long ramrod_flags;
+
+ /* Params according to the current command */
+ union {
+ struct bnx2x_func_hw_init_params hw_init;
+ struct bnx2x_func_hw_reset_params hw_reset;
+ struct bnx2x_func_start_params start;
+ struct bnx2x_func_tx_start_params tx_start;
+ } params;
+};
+
+struct bnx2x_func_sp_drv_ops {
+ /* Init tool + runtime initialization:
+ * - Common Chip
+ * - Common (per Path)
+ * - Port
+ * - Function phases
+ */
+ int (*init_hw_cmn_chip)(struct bnx2x *bp);
+ int (*init_hw_cmn)(struct bnx2x *bp);
+ int (*init_hw_port)(struct bnx2x *bp);
+ int (*init_hw_func)(struct bnx2x *bp);
+
+ /* Reset Function HW: Common, Port, Function phases. */
+ void (*reset_hw_cmn)(struct bnx2x *bp);
+ void (*reset_hw_port)(struct bnx2x *bp);
+ void (*reset_hw_func)(struct bnx2x *bp);
+
+ /* Init/Free GUNZIP resources */
+ int (*gunzip_init)(struct bnx2x *bp);
+ void (*gunzip_end)(struct bnx2x *bp);
+
+ /* Prepare/Release FW resources */
+ int (*init_fw)(struct bnx2x *bp);
+ void (*release_fw)(struct bnx2x *bp);
+};
+
+struct bnx2x_func_sp_obj {
+ enum bnx2x_func_state state, next_state;
+
+ /* BNX2X_FUNC_CMD_XX bits. This object implements "one
+ * pending" paradigm but for debug and tracing purposes it's
+ * more convinient to have different bits for different
+ * commands.
+ */
+ unsigned long pending;
+
+ /* Buffer to use as a ramrod data and its mapping */
+ void *rdata;
+ dma_addr_t rdata_mapping;
+
+ /* this mutex validates that when pending flag is taken, the next
+ * ramrod to be sent will be the one set the pending bit
+ */
+ struct mutex one_pending_mutex;
+
+ /* Driver interface */
+ struct bnx2x_func_sp_drv_ops *drv;
+
+ /**
+ * Performs one state change according to the given parameters.
+ *
+ * @return 0 in case of success and negative value otherwise.
+ */
+ int (*send_cmd)(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params);
+
+ /**
+ * Checks that the requested state transition is legal.
+ */
+ int (*check_transition)(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ struct bnx2x_func_state_params *params);
+
+ /**
+ * Completes the pending command.
+ */
+ int (*complete_cmd)(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd);
+
+ int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o,
+ enum bnx2x_func_cmd cmd);
+};
+
+/********************** Interfaces ********************************************/
+/* Queueable objects set */
+union bnx2x_qable_obj {
+ struct bnx2x_vlan_mac_obj vlan_mac;
+};
+/************** Function state update *********/
+void bnx2x_init_func_obj(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *obj,
+ void *rdata, dma_addr_t rdata_mapping,
+ struct bnx2x_func_sp_drv_ops *drv_iface);
+
+int bnx2x_func_state_change(struct bnx2x *bp,
+ struct bnx2x_func_state_params *params);
+
+enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
+ struct bnx2x_func_sp_obj *o);
+/******************* Queue State **************/
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids,
+ u8 cid_cnt, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, unsigned long type);
+
+int bnx2x_queue_state_change(struct bnx2x *bp,
+ struct bnx2x_queue_state_params *params);
+
+/********************* VLAN-MAC ****************/
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool);
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *vlans_pool);
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+ u8 cl_id, u32 cid, u8 func_id, void *rdata,
+ dma_addr_t rdata_mapping, int state,
+ unsigned long *pstate, bnx2x_obj_type type,
+ struct bnx2x_credit_pool_obj *macs_pool,
+ struct bnx2x_credit_pool_obj *vlans_pool);
+
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p);
+
+int bnx2x_vlan_mac_move(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p,
+ struct bnx2x_vlan_mac_obj *dest_o);
+
+/********************* RX MODE ****************/
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+ struct bnx2x_rx_mode_obj *o);
+
+/**
+ * Send and RX_MODE ramrod according to the provided parameters.
+ *
+ * @param bp
+ * @param p Command parameters
+ *
+ * @return 0 - if operation was successfull and there is no pending completions,
+ * positive number - if there are pending completions,
+ * negative - if there were errors
+ */
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+ struct bnx2x_rx_mode_ramrod_params *p);
+
+/****************** MULTICASTS ****************/
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+ struct bnx2x_mcast_obj *mcast_obj,
+ u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+ u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ bnx2x_obj_type type);
+
+/**
+ * Configure multicast MACs list. May configure a new list
+ * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
+ * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
+ * configuration, continue to execute the pending commands
+ * (BNX2X_MCAST_CMD_CONT).
+ *
+ * If previous command is still pending or if number of MACs to
+ * configure is more that maximum number of MACs in one command,
+ * the current command will be enqueued to the tail of the
+ * pending commands list.
+ *
+ * @param bp
+ * @param p
+ * @param command to execute: BNX2X_MCAST_CMD_X
+ *
+ * @return 0 is operation was sucessfull and there are no pending completions,
+ * negative if there were errors, positive if there are pending
+ * completions.
+ */
+int bnx2x_config_mcast(struct bnx2x *bp,
+ struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+/****************** CREDIT POOL ****************/
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p, u8 func_id,
+ u8 func_num);
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+ struct bnx2x_credit_pool_obj *p, u8 func_id,
+ u8 func_num);
+
+
+/****************** RSS CONFIGURATION ****************/
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+ struct bnx2x_rss_config_obj *rss_obj,
+ u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+ void *rdata, dma_addr_t rdata_mapping,
+ int state, unsigned long *pstate,
+ bnx2x_obj_type type);
+
+/**
+ * Updates RSS configuration according to provided parameters.
+ *
+ * @param bp
+ * @param p
+ *
+ * @return 0 in case of success
+ */
+int bnx2x_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *p);
+
+/**
+ * Return the current ind_table configuration.
+ *
+ * @param bp
+ * @param ind_table buffer to fill with the current indirection
+ * table content. Should be at least
+ * T_ETH_INDIRECTION_TABLE_SIZE bytes long.
+ */
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+ u8 *ind_table);
+
+#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index e535bfa0894..771f6803b23 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -14,120 +14,11 @@
* Statistics and Link management by Yitchak Gertner
*
*/
-#include "bnx2x_cmn.h"
#include "bnx2x_stats.h"
+#include "bnx2x_cmn.h"
-/* Statistics */
-/****************************************************************************
-* Macros
-****************************************************************************/
-
-/* sum[hi:lo] += add[hi:lo] */
-#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
- do { \
- s_lo += a_lo; \
- s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
- } while (0)
-
-/* difference = minuend - subtrahend */
-#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
- do { \
- if (m_lo < s_lo) { \
- /* underflow */ \
- d_hi = m_hi - s_hi; \
- if (d_hi > 0) { \
- /* we can 'loan' 1 */ \
- d_hi--; \
- d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
- } else { \
- /* m_hi <= s_hi */ \
- d_hi = 0; \
- d_lo = 0; \
- } \
- } else { \
- /* m_lo >= s_lo */ \
- if (m_hi < s_hi) { \
- d_hi = 0; \
- d_lo = 0; \
- } else { \
- /* m_hi >= s_hi */ \
- d_hi = m_hi - s_hi; \
- d_lo = m_lo - s_lo; \
- } \
- } \
- } while (0)
-
-#define UPDATE_STAT64(s, t) \
- do { \
- DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
- diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
- pstats->mac_stx[0].t##_hi = new->s##_hi; \
- pstats->mac_stx[0].t##_lo = new->s##_lo; \
- ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
- pstats->mac_stx[1].t##_lo, diff.lo); \
- } while (0)
-
-#define UPDATE_STAT64_NIG(s, t) \
- do { \
- DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
- diff.lo, new->s##_lo, old->s##_lo); \
- ADD_64(estats->t##_hi, diff.hi, \
- estats->t##_lo, diff.lo); \
- } while (0)
-
-/* sum[hi:lo] += add */
-#define ADD_EXTEND_64(s_hi, s_lo, a) \
- do { \
- s_lo += a; \
- s_hi += (s_lo < a) ? 1 : 0; \
- } while (0)
-
-#define UPDATE_EXTEND_STAT(s) \
- do { \
- ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
- pstats->mac_stx[1].s##_lo, \
- new->s); \
- } while (0)
-
-#define UPDATE_EXTEND_TSTAT(s, t) \
- do { \
- diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
- old_tclient->s = tclient->s; \
- ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
- } while (0)
-
-#define UPDATE_EXTEND_USTAT(s, t) \
- do { \
- diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
- old_uclient->s = uclient->s; \
- ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
- } while (0)
-
-#define UPDATE_EXTEND_XSTAT(s, t) \
- do { \
- diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
- old_xclient->s = xclient->s; \
- ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
- } while (0)
-
-/* minuend -= subtrahend */
-#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
- do { \
- DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
- } while (0)
-
-/* minuend[hi:lo] -= subtrahend */
-#define SUB_EXTEND_64(m_hi, m_lo, s) \
- do { \
- SUB_64(m_hi, 0, m_lo, s); \
- } while (0)
-
-#define SUB_EXTEND_USTAT(s, t) \
- do { \
- diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
- SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
- } while (0)
+/* Statistics */
/*
* General service functions
@@ -149,12 +40,16 @@ static inline long bnx2x_hilo(u32 *hiref)
* Init service functions
*/
-
+/* Post the next statistics ramrod. Protect it with the spin in
+ * order to ensure the strict order between statistics ramrods
+ * (each ramrod has a sequence number passed in a
+ * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
+ * sent in order).
+ */
static void bnx2x_storm_stats_post(struct bnx2x *bp)
{
if (!bp->stats_pending) {
- struct common_query_ramrod_data ramrod_data = {0};
- int i, rc;
+ int rc;
spin_lock_bh(&bp->stats_lock);
@@ -163,14 +58,19 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
return;
}
- ramrod_data.drv_counter = bp->stats_counter++;
- ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
- for_each_eth_queue(bp, i)
- ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
+ bp->fw_stats_req->hdr.drv_stats_counter =
+ cpu_to_le16(bp->stats_counter++);
+ DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n",
+ bp->fw_stats_req->hdr.drv_stats_counter);
+
+
+
+ /* send FW stats ramrod */
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
- ((u32 *)&ramrod_data)[1],
- ((u32 *)&ramrod_data)[0], 1);
+ U64_HI(bp->fw_stats_req_mapping),
+ U64_LO(bp->fw_stats_req_mapping),
+ NONE_CONNECTION_TYPE);
if (rc == 0)
bp->stats_pending = 1;
@@ -230,7 +130,7 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
break;
}
cnt--;
- msleep(1);
+ usleep_range(1000, 1000);
}
return 1;
}
@@ -338,69 +238,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
true, DMAE_COMP_GRC);
- if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
-
- mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM);
-
- /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
- BIGMAC_REGISTER_TX_STAT_GTBYT */
- dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
- dmae->opcode = opcode;
- if (CHIP_IS_E1x(bp)) {
- dmae->src_addr_lo = (mac_addr +
- BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
- dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
- BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
- } else {
- dmae->src_addr_lo = (mac_addr +
- BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
- dmae->len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
- BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
- }
-
- dmae->src_addr_hi = 0;
- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
- dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
- dmae->comp_addr_hi = 0;
- dmae->comp_val = 1;
-
- /* BIGMAC_REGISTER_RX_STAT_GR64 ..
- BIGMAC_REGISTER_RX_STAT_GRIPJ */
- dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
- dmae->opcode = opcode;
- dmae->src_addr_hi = 0;
- if (CHIP_IS_E1x(bp)) {
- dmae->src_addr_lo = (mac_addr +
- BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
- dmae->dst_addr_lo =
- U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
- offsetof(struct bmac1_stats, rx_stat_gr64_lo));
- dmae->dst_addr_hi =
- U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
- offsetof(struct bmac1_stats, rx_stat_gr64_lo));
- dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
- BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
- } else {
- dmae->src_addr_lo =
- (mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
- dmae->dst_addr_lo =
- U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
- offsetof(struct bmac2_stats, rx_stat_gr64_lo));
- dmae->dst_addr_hi =
- U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
- offsetof(struct bmac2_stats, rx_stat_gr64_lo));
- dmae->len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
- BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
- }
-
- dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
- dmae->comp_addr_hi = 0;
- dmae->comp_val = 1;
-
- } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
-
+ /* EMAC is special */
+ if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
@@ -445,46 +284,122 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
dmae->comp_val = 1;
+ } else {
+ u32 tx_src_addr_lo, rx_src_addr_lo;
+ u16 rx_len, tx_len;
+
+ /* configure the params according to MAC type */
+ switch (bp->link_vars.mac_type) {
+ case MAC_TYPE_BMAC:
+ mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
+ NIG_REG_INGRESS_BMAC0_MEM);
+
+ /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
+ BIGMAC_REGISTER_TX_STAT_GTBYT */
+ if (CHIP_IS_E1x(bp)) {
+ tx_src_addr_lo = (mac_addr +
+ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+ tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+ BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+ rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+ } else {
+ tx_src_addr_lo = (mac_addr +
+ BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+ tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
+ BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+ rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
+ BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+ }
+ break;
+
+ case MAC_TYPE_UMAC: /* handled by MSTAT */
+ case MAC_TYPE_XMAC: /* handled by MSTAT */
+ default:
+ mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
+ tx_src_addr_lo = (mac_addr +
+ MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
+ rx_src_addr_lo = (mac_addr +
+ MSTAT_REG_RX_STAT_GR64_LO) >> 2;
+ tx_len = sizeof(bp->slowpath->
+ mac_stats.mstat_stats.stats_tx) >> 2;
+ rx_len = sizeof(bp->slowpath->
+ mac_stats.mstat_stats.stats_rx) >> 2;
+ break;
+ }
+
+ /* TX stats */
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = tx_src_addr_lo;
+ dmae->src_addr_hi = 0;
+ dmae->len = tx_len;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ /* RX stats */
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_hi = 0;
+ dmae->src_addr_lo = rx_src_addr_lo;
+ dmae->dst_addr_lo =
+ U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+ dmae->dst_addr_hi =
+ U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+ dmae->len = rx_len;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
}
/* NIG */
+ if (!CHIP_IS_E3(bp)) {
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
+ NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt0_lo));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt0_lo));
+ dmae->len = (2*sizeof(u32)) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+
+ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+ dmae->opcode = opcode;
+ dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
+ NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
+ dmae->src_addr_hi = 0;
+ dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt1_lo));
+ dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+ offsetof(struct nig_stats, egress_mac_pkt1_lo));
+ dmae->len = (2*sizeof(u32)) >> 2;
+ dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+ dmae->comp_addr_hi = 0;
+ dmae->comp_val = 1;
+ }
+
dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
- dmae->opcode = opcode;
+ dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+ true, DMAE_COMP_PCI);
dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
NIG_REG_STAT0_BRB_DISCARD) >> 2;
dmae->src_addr_hi = 0;
dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
- dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
- dmae->comp_addr_hi = 0;
- dmae->comp_val = 1;
- dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
- dmae->opcode = opcode;
- dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
- NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
- dmae->src_addr_hi = 0;
- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
- offsetof(struct nig_stats, egress_mac_pkt0_lo));
- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
- offsetof(struct nig_stats, egress_mac_pkt0_lo));
- dmae->len = (2*sizeof(u32)) >> 2;
- dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
- dmae->comp_addr_hi = 0;
- dmae->comp_val = 1;
-
- dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
- dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
- true, DMAE_COMP_PCI);
- dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
- NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
- dmae->src_addr_hi = 0;
- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
- offsetof(struct nig_stats, egress_mac_pkt1_lo));
- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
- offsetof(struct nig_stats, egress_mac_pkt1_lo));
- dmae->len = (2*sizeof(u32)) >> 2;
dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_val = DMAE_COMP_VAL;
@@ -566,7 +481,8 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
- UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
@@ -580,13 +496,13 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
tx_stat_etherstatspkts512octetsto1023octets);
UPDATE_STAT64(tx_stat_gt1518,
tx_stat_etherstatspkts1024octetsto1522octets);
- UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
- UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
- UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
- UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
UPDATE_STAT64(tx_stat_gterr,
tx_stat_dot3statsinternalmactransmiterrors);
- UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
} else {
struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
@@ -600,7 +516,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
- UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
+ UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
@@ -614,19 +530,96 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
tx_stat_etherstatspkts512octetsto1023octets);
UPDATE_STAT64(tx_stat_gt1518,
tx_stat_etherstatspkts1024octetsto1522octets);
- UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
- UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
- UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
- UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
+ UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+ UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+ UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+ UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
UPDATE_STAT64(tx_stat_gterr,
tx_stat_dot3statsinternalmactransmiterrors);
- UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
+ UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
}
estats->pause_frames_received_hi =
- pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
+ pstats->mac_stx[1].rx_stat_mac_xpf_hi;
estats->pause_frames_received_lo =
- pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
+ pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+ estats->pause_frames_sent_hi =
+ pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+ estats->pause_frames_sent_lo =
+ pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+}
+
+static void bnx2x_mstat_stats_update(struct bnx2x *bp)
+{
+ struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+ struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
+
+ ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
+ ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
+ ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
+ ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
+ ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
+ ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
+ ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
+ ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
+ ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
+ ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+
+
+ ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
+ ADD_STAT64(stats_tx.tx_gt127,
+ tx_stat_etherstatspkts65octetsto127octets);
+ ADD_STAT64(stats_tx.tx_gt255,
+ tx_stat_etherstatspkts128octetsto255octets);
+ ADD_STAT64(stats_tx.tx_gt511,
+ tx_stat_etherstatspkts256octetsto511octets);
+ ADD_STAT64(stats_tx.tx_gt1023,
+ tx_stat_etherstatspkts512octetsto1023octets);
+ ADD_STAT64(stats_tx.tx_gt1518,
+ tx_stat_etherstatspkts1024octetsto1522octets);
+ ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
+
+ ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
+ ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
+ ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
+
+ ADD_STAT64(stats_tx.tx_gterr,
+ tx_stat_dot3statsinternalmactransmiterrors);
+ ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
+
+ ADD_64(estats->etherstatspkts1024octetsto1522octets_hi,
+ new->stats_tx.tx_gt1518_hi,
+ estats->etherstatspkts1024octetsto1522octets_lo,
+ new->stats_tx.tx_gt1518_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt2047_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt2047_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt4095_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt4095_lo);
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt9216_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt9216_lo);
+
+
+ ADD_64(estats->etherstatspktsover1522octets_hi,
+ new->stats_tx.tx_gt16383_hi,
+ estats->etherstatspktsover1522octets_lo,
+ new->stats_tx.tx_gt16383_lo);
+
+ estats->pause_frames_received_hi =
+ pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+ estats->pause_frames_received_lo =
+ pstats->mac_stx[1].rx_stat_mac_xpf_lo;
estats->pause_frames_sent_hi =
pstats->mac_stx[1].tx_stat_outxoffsent_hi;
@@ -702,15 +695,26 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
u32 hi;
} diff;
- if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
+ switch (bp->link_vars.mac_type) {
+ case MAC_TYPE_BMAC:
bnx2x_bmac_stats_update(bp);
+ break;
- else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
+ case MAC_TYPE_EMAC:
bnx2x_emac_stats_update(bp);
+ break;
+
+ case MAC_TYPE_UMAC:
+ case MAC_TYPE_XMAC:
+ bnx2x_mstat_stats_update(bp);
+ break;
- else { /* unreached */
+ case MAC_TYPE_NONE: /* unreached */
BNX2X_ERR("stats updated by DMAE but no MAC active\n");
return -1;
+
+ default: /* unreached */
+ BNX2X_ERR("Unknown MAC type\n");
}
ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
@@ -718,9 +722,12 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
new->brb_truncate - old->brb_truncate);
- UPDATE_STAT64_NIG(egress_mac_pkt0,
+ if (!CHIP_IS_E3(bp)) {
+ UPDATE_STAT64_NIG(egress_mac_pkt0,
etherstatspkts1024octetsto1522octets);
- UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
+ UPDATE_STAT64_NIG(egress_mac_pkt1,
+ etherstatspktsover1522octets);
+ }
memcpy(old, new, sizeof(struct nig_stats));
@@ -746,11 +753,13 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
static int bnx2x_storm_stats_update(struct bnx2x *bp)
{
- struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
struct tstorm_per_port_stats *tport =
- &stats->tstorm_common.port_statistics;
+ &bp->fw_stats_data->port.tstorm_port_statistics;
+ struct tstorm_per_pf_stats *tfunc =
+ &bp->fw_stats_data->pf.tstorm_pf_statistics;
struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
int i;
u16 cur_stats_counter;
@@ -761,6 +770,35 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
cur_stats_counter = bp->stats_counter - 1;
spin_unlock_bh(&bp->stats_lock);
+ /* are storm stats valid? */
+ if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
+ " xstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->xstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by ustorm"
+ " ustorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->ustats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by cstorm"
+ " cstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->cstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
+ if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
+ DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
+ " tstorm counter (0x%x) != stats_counter (0x%x)\n",
+ le16_to_cpu(counters->tstats_counter), bp->stats_counter);
+ return -EAGAIN;
+ }
+
memcpy(&(fstats->total_bytes_received_hi),
&(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
sizeof(struct host_func_stats) - 2*sizeof(u32));
@@ -770,94 +808,84 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
estats->etherstatsoverrsizepkts_lo = 0;
estats->no_buff_discard_hi = 0;
estats->no_buff_discard_lo = 0;
+ estats->total_tpa_aggregations_hi = 0;
+ estats->total_tpa_aggregations_lo = 0;
+ estats->total_tpa_aggregated_frames_hi = 0;
+ estats->total_tpa_aggregated_frames_lo = 0;
+ estats->total_tpa_bytes_hi = 0;
+ estats->total_tpa_bytes_lo = 0;
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- int cl_id = fp->cl_id;
- struct tstorm_per_client_stats *tclient =
- &stats->tstorm_common.client_statistics[cl_id];
- struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
- struct ustorm_per_client_stats *uclient =
- &stats->ustorm_common.client_statistics[cl_id];
- struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
- struct xstorm_per_client_stats *xclient =
- &stats->xstorm_common.client_statistics[cl_id];
- struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
+ struct tstorm_per_queue_stats *tclient =
+ &bp->fw_stats_data->queue_stats[i].
+ tstorm_queue_statistics;
+ struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
+ struct ustorm_per_queue_stats *uclient =
+ &bp->fw_stats_data->queue_stats[i].
+ ustorm_queue_statistics;
+ struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
+ struct xstorm_per_queue_stats *xclient =
+ &bp->fw_stats_data->queue_stats[i].
+ xstorm_queue_statistics;
+ struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
u32 diff;
- /* are storm stats valid? */
- if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) {
- DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
- " xstorm counter (0x%x) != stats_counter (0x%x)\n",
- i, xclient->stats_counter, cur_stats_counter + 1);
- return -1;
- }
- if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) {
- DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
- " tstorm counter (0x%x) != stats_counter (0x%x)\n",
- i, tclient->stats_counter, cur_stats_counter + 1);
- return -2;
- }
- if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) {
- DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
- " ustorm counter (0x%x) != stats_counter (0x%x)\n",
- i, uclient->stats_counter, cur_stats_counter + 1);
- return -4;
- }
+ DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, "
+ "bcast_sent 0x%x mcast_sent 0x%x\n",
+ i, xclient->ucast_pkts_sent,
+ xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
+
+ DP(BNX2X_MSG_STATS, "---------------\n");
+
+ qstats->total_broadcast_bytes_received_hi =
+ le32_to_cpu(tclient->rcv_bcast_bytes.hi);
+ qstats->total_broadcast_bytes_received_lo =
+ le32_to_cpu(tclient->rcv_bcast_bytes.lo);
+ qstats->total_multicast_bytes_received_hi =
+ le32_to_cpu(tclient->rcv_mcast_bytes.hi);
+ qstats->total_multicast_bytes_received_lo =
+ le32_to_cpu(tclient->rcv_mcast_bytes.lo);
+
+ qstats->total_unicast_bytes_received_hi =
+ le32_to_cpu(tclient->rcv_ucast_bytes.hi);
+ qstats->total_unicast_bytes_received_lo =
+ le32_to_cpu(tclient->rcv_ucast_bytes.lo);
+
+ /*
+ * sum to total_bytes_received all
+ * unicast/multicast/broadcast
+ */
qstats->total_bytes_received_hi =
- le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
+ qstats->total_broadcast_bytes_received_hi;
qstats->total_bytes_received_lo =
- le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
+ qstats->total_broadcast_bytes_received_lo;
ADD_64(qstats->total_bytes_received_hi,
- le32_to_cpu(tclient->rcv_multicast_bytes.hi),
+ qstats->total_multicast_bytes_received_hi,
qstats->total_bytes_received_lo,
- le32_to_cpu(tclient->rcv_multicast_bytes.lo));
+ qstats->total_multicast_bytes_received_lo);
ADD_64(qstats->total_bytes_received_hi,
- le32_to_cpu(tclient->rcv_unicast_bytes.hi),
+ qstats->total_unicast_bytes_received_hi,
qstats->total_bytes_received_lo,
- le32_to_cpu(tclient->rcv_unicast_bytes.lo));
-
- SUB_64(qstats->total_bytes_received_hi,
- le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
- qstats->total_bytes_received_lo,
- le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
-
- SUB_64(qstats->total_bytes_received_hi,
- le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
- qstats->total_bytes_received_lo,
- le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
-
- SUB_64(qstats->total_bytes_received_hi,
- le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
- qstats->total_bytes_received_lo,
- le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
+ qstats->total_unicast_bytes_received_lo);
qstats->valid_bytes_received_hi =
qstats->total_bytes_received_hi;
qstats->valid_bytes_received_lo =
qstats->total_bytes_received_lo;
- qstats->error_bytes_received_hi =
- le32_to_cpu(tclient->rcv_error_bytes.hi);
- qstats->error_bytes_received_lo =
- le32_to_cpu(tclient->rcv_error_bytes.lo);
-
- ADD_64(qstats->total_bytes_received_hi,
- qstats->error_bytes_received_hi,
- qstats->total_bytes_received_lo,
- qstats->error_bytes_received_lo);
- UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
+ UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
total_unicast_packets_received);
- UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
+ UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
total_multicast_packets_received);
- UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
+ UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
total_broadcast_packets_received);
- UPDATE_EXTEND_TSTAT(packets_too_big_discard,
+ UPDATE_EXTEND_TSTAT(pkts_too_big_discard,
etherstatsoverrsizepkts);
UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
@@ -871,30 +899,78 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
+ qstats->total_broadcast_bytes_transmitted_hi =
+ le32_to_cpu(xclient->bcast_bytes_sent.hi);
+ qstats->total_broadcast_bytes_transmitted_lo =
+ le32_to_cpu(xclient->bcast_bytes_sent.lo);
+
+ qstats->total_multicast_bytes_transmitted_hi =
+ le32_to_cpu(xclient->mcast_bytes_sent.hi);
+ qstats->total_multicast_bytes_transmitted_lo =
+ le32_to_cpu(xclient->mcast_bytes_sent.lo);
+
+ qstats->total_unicast_bytes_transmitted_hi =
+ le32_to_cpu(xclient->ucast_bytes_sent.hi);
+ qstats->total_unicast_bytes_transmitted_lo =
+ le32_to_cpu(xclient->ucast_bytes_sent.lo);
+ /*
+ * sum to total_bytes_transmitted all
+ * unicast/multicast/broadcast
+ */
qstats->total_bytes_transmitted_hi =
- le32_to_cpu(xclient->unicast_bytes_sent.hi);
+ qstats->total_unicast_bytes_transmitted_hi;
qstats->total_bytes_transmitted_lo =
- le32_to_cpu(xclient->unicast_bytes_sent.lo);
+ qstats->total_unicast_bytes_transmitted_lo;
ADD_64(qstats->total_bytes_transmitted_hi,
- le32_to_cpu(xclient->multicast_bytes_sent.hi),
+ qstats->total_broadcast_bytes_transmitted_hi,
qstats->total_bytes_transmitted_lo,
- le32_to_cpu(xclient->multicast_bytes_sent.lo));
+ qstats->total_broadcast_bytes_transmitted_lo);
ADD_64(qstats->total_bytes_transmitted_hi,
- le32_to_cpu(xclient->broadcast_bytes_sent.hi),
+ qstats->total_multicast_bytes_transmitted_hi,
qstats->total_bytes_transmitted_lo,
- le32_to_cpu(xclient->broadcast_bytes_sent.lo));
+ qstats->total_multicast_bytes_transmitted_lo);
- UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
+ UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
total_unicast_packets_transmitted);
- UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
+ UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
total_multicast_packets_transmitted);
- UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
+ UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
total_broadcast_packets_transmitted);
- old_tclient->checksum_discard = tclient->checksum_discard;
- old_tclient->ttl0_discard = tclient->ttl0_discard;
+ UPDATE_EXTEND_TSTAT(checksum_discard,
+ total_packets_received_checksum_discarded);
+ UPDATE_EXTEND_TSTAT(ttl0_discard,
+ total_packets_received_ttl0_discarded);
+
+ UPDATE_EXTEND_XSTAT(error_drop_pkts,
+ total_transmitted_dropped_packets_error);
+
+ /* TPA aggregations completed */
+ UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations);
+ /* Number of network frames aggregated by TPA */
+ UPDATE_EXTEND_USTAT(coalesced_pkts,
+ total_tpa_aggregated_frames);
+ /* Total number of bytes in completed TPA aggregations */
+ qstats->total_tpa_bytes_lo =
+ le32_to_cpu(uclient->coalesced_bytes.lo);
+ qstats->total_tpa_bytes_hi =
+ le32_to_cpu(uclient->coalesced_bytes.hi);
+
+ /* TPA stats per-function */
+ ADD_64(estats->total_tpa_aggregations_hi,
+ qstats->total_tpa_aggregations_hi,
+ estats->total_tpa_aggregations_lo,
+ qstats->total_tpa_aggregations_lo);
+ ADD_64(estats->total_tpa_aggregated_frames_hi,
+ qstats->total_tpa_aggregated_frames_hi,
+ estats->total_tpa_aggregated_frames_lo,
+ qstats->total_tpa_aggregated_frames_lo);
+ ADD_64(estats->total_tpa_bytes_hi,
+ qstats->total_tpa_bytes_hi,
+ estats->total_tpa_bytes_lo,
+ qstats->total_tpa_bytes_lo);
ADD_64(fstats->total_bytes_received_hi,
qstats->total_bytes_received_hi,
@@ -933,10 +1009,6 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
fstats->valid_bytes_received_lo,
qstats->valid_bytes_received_lo);
- ADD_64(estats->error_bytes_received_hi,
- qstats->error_bytes_received_hi,
- estats->error_bytes_received_lo,
- qstats->error_bytes_received_lo);
ADD_64(estats->etherstatsoverrsizepkts_hi,
qstats->etherstatsoverrsizepkts_hi,
estats->etherstatsoverrsizepkts_lo,
@@ -950,9 +1022,19 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
fstats->total_bytes_received_lo,
estats->rx_stat_ifhcinbadoctets_lo);
+ ADD_64(fstats->total_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ fstats->total_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
+
memcpy(estats, &(fstats->total_bytes_received_hi),
sizeof(struct host_func_stats) - 2*sizeof(u32));
+ ADD_64(estats->error_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->error_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
+
ADD_64(estats->etherstatsoverrsizepkts_hi,
estats->rx_stat_dot3statsframestoolong_hi,
estats->etherstatsoverrsizepkts_lo,
@@ -965,8 +1047,8 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
if (bp->port.pmf) {
estats->mac_filter_discard =
le32_to_cpu(tport->mac_filter_discard);
- estats->xxoverflow_discard =
- le32_to_cpu(tport->xxoverflow_discard);
+ estats->mf_tag_discard =
+ le32_to_cpu(tport->mf_tag_discard);
estats->brb_truncate_discard =
le32_to_cpu(tport->brb_truncate_discard);
estats->mac_discard = le32_to_cpu(tport->mac_discard);
@@ -1023,7 +1105,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->rx_frame_errors =
bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
- nstats->rx_missed_errors = estats->xxoverflow_discard;
+ nstats->rx_missed_errors = 0;
nstats->rx_errors = nstats->rx_length_errors +
nstats->rx_over_errors +
@@ -1065,10 +1147,27 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
}
}
+static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
+{
+ u32 val;
+
+ if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
+ val = SHMEM2_RD(bp, edebug_driver_if[1]);
+
+ if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
+ return true;
+ }
+
+ return false;
+}
+
static void bnx2x_stats_update(struct bnx2x *bp)
{
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+ if (bnx2x_edebug_stats_stopped(bp))
+ return;
+
if (*stats_comp != DMAE_COMP_VAL)
return;
@@ -1086,10 +1185,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
- int i;
+ int i, cos;
- printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
- bp->dev->name,
+ netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
estats->brb_drop_lo, estats->brb_truncate_lo);
for_each_eth_queue(bp, i) {
@@ -1108,20 +1206,32 @@ static void bnx2x_stats_update(struct bnx2x *bp)
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_fp_txdata *txdata;
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct netdev_queue *txq =
- netdev_get_tx_queue(bp->dev, i);
-
- printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
- " tx pkt(%lu) tx calls (%lu)"
- " %s (Xoff events %u)\n",
- fp->name, bnx2x_tx_avail(fp),
- le16_to_cpu(*fp->tx_cons_sb),
- bnx2x_hilo(&qstats->
- total_unicast_packets_transmitted_hi),
- fp->tx_pkt,
- (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
- qstats->driver_xoff);
+ struct netdev_queue *txq;
+
+ printk(KERN_DEBUG "%s: tx pkt(%lu) (Xoff events %u)",
+ fp->name, bnx2x_hilo(
+ &qstats->total_unicast_packets_transmitted_hi),
+ qstats->driver_xoff);
+
+ for_each_cos_in_tx_queue(fp, cos) {
+ txdata = &fp->txdata[cos];
+ txq = netdev_get_tx_queue(bp->dev,
+ FP_COS_TO_TXQ(fp, cos));
+
+ printk(KERN_DEBUG "%d: tx avail(%4u)"
+ " *tx_cons_sb(%u)"
+ " tx calls (%lu)"
+ " %s\n",
+ cos,
+ bnx2x_tx_avail(bp, txdata),
+ le16_to_cpu(*txdata->tx_cons_sb),
+ txdata->tx_pkt,
+ (netif_tx_queue_stopped(txq) ?
+ "Xoff" : "Xon")
+ );
+ }
}
}
@@ -1149,6 +1259,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
else
dmae->opcode = bnx2x_dmae_opcode_add_comp(
opcode, DMAE_COMP_PCI);
+
dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
@@ -1235,13 +1346,9 @@ static const struct {
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
enum bnx2x_stats_state state;
-
if (unlikely(bp->panic))
return;
-
bnx2x_stats_stm[bp->stats_state][event].action(bp);
-
- /* Protect a state change flow */
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
@@ -1297,7 +1404,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
func_stx = bp->func_stx;
for (vn = VN_0; vn < vn_max; vn++) {
- int mb_idx = !CHIP_IS_E2(bp) ? 2*vn + BP_PORT(bp) : vn;
+ int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn;
bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
bnx2x_func_stats_init(bp);
@@ -1339,12 +1446,97 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
bnx2x_stats_comp(bp);
}
+/**
+ * This function will prepare the statistics ramrod data the way
+ * we will only have to increment the statistics counter and
+ * send the ramrod each time we have to.
+ *
+ * @param bp
+ */
+static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
+{
+ int i;
+ struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
+
+ dma_addr_t cur_data_offset;
+ struct stats_query_entry *cur_query_entry;
+
+ stats_hdr->cmd_num = bp->fw_stats_num;
+ stats_hdr->drv_stats_counter = 0;
+
+ /* storm_counters struct contains the counters of completed
+ * statistics requests per storm which are incremented by FW
+ * each time it completes hadning a statistics ramrod. We will
+ * check these counters in the timer handler and discard a
+ * (statistics) ramrod completion.
+ */
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, storm_counters);
+
+ stats_hdr->stats_counters_addrs.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ stats_hdr->stats_counters_addrs.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+
+ /* prepare to the first stats ramrod (will be completed with
+ * the counters equal to zero) - init counters to somethig different.
+ */
+ memset(&bp->fw_stats_data->storm_counters, 0xff,
+ sizeof(struct stats_counter));
+
+ /**** Port FW statistics data ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, port);
+
+ cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_PORT;
+ /* For port query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ /* For port query funcID is a DONT CARE */
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+ /**** PF FW statistics data ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, pf);
+
+ cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_PF;
+ /* For PF query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+ /**** Clients' queries ****/
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+ for_each_eth_queue(bp, i) {
+ cur_query_entry =
+ &bp->fw_stats_req->
+ query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+
+ cur_data_offset += sizeof(struct per_queue_stats);
+ }
+}
+
void bnx2x_stats_init(struct bnx2x *bp)
{
- int port = BP_PORT(bp);
+ int /*abs*/port = BP_PORT(bp);
int mb_idx = BP_FW_MB_IDX(bp);
int i;
- struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bp->stats_pending = 0;
bp->executer_idx = 0;
@@ -1362,45 +1554,35 @@ void bnx2x_stats_init(struct bnx2x *bp)
DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
bp->port.port_stx, bp->func_stx);
+ port = BP_PORT(bp);
/* port stats */
memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
bp->port.old_nig_stats.brb_discard =
REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
bp->port.old_nig_stats.brb_truncate =
REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
- REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
- REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+ if (!CHIP_IS_E3(bp)) {
+ REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
+ &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+ REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
+ &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+ }
/* function stats */
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- memset(&fp->old_tclient, 0,
- sizeof(struct tstorm_per_client_stats));
- memset(&fp->old_uclient, 0,
- sizeof(struct ustorm_per_client_stats));
- memset(&fp->old_xclient, 0,
- sizeof(struct xstorm_per_client_stats));
- memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
+ memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
+ memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
+ memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
+ memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
}
- /* FW stats are currently collected for ETH clients only */
- for_each_eth_queue(bp, i) {
- /* Set initial stats counter in the stats ramrod data to -1 */
- int cl_id = bp->fp[i].cl_id;
-
- stats->xstorm_common.client_statistics[cl_id].
- stats_counter = 0xffff;
- stats->ustorm_common.client_statistics[cl_id].
- stats_counter = 0xffff;
- stats->tstorm_common.client_statistics[cl_id].
- stats_counter = 0xffff;
- }
+ /* Prepare statistics ramrod data */
+ bnx2x_prep_fw_stats_req(bp);
- memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
- memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
+ memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+ memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
bp->stats_state = STATS_STATE_DISABLED;
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index 45d14d8bc1a..5d8ce2f6afe 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -14,48 +14,11 @@
* Statistics and Link management by Yitchak Gertner
*
*/
-
#ifndef BNX2X_STATS_H
#define BNX2X_STATS_H
#include <linux/types.h>
-struct bnx2x_eth_q_stats {
- u32 total_bytes_received_hi;
- u32 total_bytes_received_lo;
- u32 total_bytes_transmitted_hi;
- u32 total_bytes_transmitted_lo;
- u32 total_unicast_packets_received_hi;
- u32 total_unicast_packets_received_lo;
- u32 total_multicast_packets_received_hi;
- u32 total_multicast_packets_received_lo;
- u32 total_broadcast_packets_received_hi;
- u32 total_broadcast_packets_received_lo;
- u32 total_unicast_packets_transmitted_hi;
- u32 total_unicast_packets_transmitted_lo;
- u32 total_multicast_packets_transmitted_hi;
- u32 total_multicast_packets_transmitted_lo;
- u32 total_broadcast_packets_transmitted_hi;
- u32 total_broadcast_packets_transmitted_lo;
- u32 valid_bytes_received_hi;
- u32 valid_bytes_received_lo;
-
- u32 error_bytes_received_hi;
- u32 error_bytes_received_lo;
- u32 etherstatsoverrsizepkts_hi;
- u32 etherstatsoverrsizepkts_lo;
- u32 no_buff_discard_hi;
- u32 no_buff_discard_lo;
-
- u32 driver_xoff;
- u32 rx_err_discard_pkt;
- u32 rx_skb_alloc_failed;
- u32 hw_csum_err;
-};
-
-#define Q_STATS_OFFSET32(stat_name) \
- (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
-
struct nig_stats {
u32 brb_discard;
u32 brb_packet;
@@ -212,7 +175,7 @@ struct bnx2x_eth_stats {
u32 brb_truncate_lo;
u32 mac_filter_discard;
- u32 xxoverflow_discard;
+ u32 mf_tag_discard;
u32 brb_truncate_discard;
u32 mac_discard;
@@ -222,16 +185,197 @@ struct bnx2x_eth_stats {
u32 hw_csum_err;
u32 nig_timer_max;
+
+ /* TPA */
+ u32 total_tpa_aggregations_hi;
+ u32 total_tpa_aggregations_lo;
+ u32 total_tpa_aggregated_frames_hi;
+ u32 total_tpa_aggregated_frames_lo;
+ u32 total_tpa_bytes_hi;
+ u32 total_tpa_bytes_lo;
+};
+
+
+struct bnx2x_eth_q_stats {
+ u32 total_unicast_bytes_received_hi;
+ u32 total_unicast_bytes_received_lo;
+ u32 total_broadcast_bytes_received_hi;
+ u32 total_broadcast_bytes_received_lo;
+ u32 total_multicast_bytes_received_hi;
+ u32 total_multicast_bytes_received_lo;
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
+ u32 total_unicast_bytes_transmitted_hi;
+ u32 total_unicast_bytes_transmitted_lo;
+ u32 total_broadcast_bytes_transmitted_hi;
+ u32 total_broadcast_bytes_transmitted_lo;
+ u32 total_multicast_bytes_transmitted_hi;
+ u32 total_multicast_bytes_transmitted_lo;
+ u32 total_bytes_transmitted_hi;
+ u32 total_bytes_transmitted_lo;
+ u32 total_unicast_packets_received_hi;
+ u32 total_unicast_packets_received_lo;
+ u32 total_multicast_packets_received_hi;
+ u32 total_multicast_packets_received_lo;
+ u32 total_broadcast_packets_received_hi;
+ u32 total_broadcast_packets_received_lo;
+ u32 total_unicast_packets_transmitted_hi;
+ u32 total_unicast_packets_transmitted_lo;
+ u32 total_multicast_packets_transmitted_hi;
+ u32 total_multicast_packets_transmitted_lo;
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
+
+ u32 etherstatsoverrsizepkts_hi;
+ u32 etherstatsoverrsizepkts_lo;
+ u32 no_buff_discard_hi;
+ u32 no_buff_discard_lo;
+
+ u32 driver_xoff;
+ u32 rx_err_discard_pkt;
+ u32 rx_skb_alloc_failed;
+ u32 hw_csum_err;
+
+ u32 total_packets_received_checksum_discarded_hi;
+ u32 total_packets_received_checksum_discarded_lo;
+ u32 total_packets_received_ttl0_discarded_hi;
+ u32 total_packets_received_ttl0_discarded_lo;
+ u32 total_transmitted_dropped_packets_error_hi;
+ u32 total_transmitted_dropped_packets_error_lo;
+
+ /* TPA */
+ u32 total_tpa_aggregations_hi;
+ u32 total_tpa_aggregations_lo;
+ u32 total_tpa_aggregated_frames_hi;
+ u32 total_tpa_aggregated_frames_lo;
+ u32 total_tpa_bytes_hi;
+ u32 total_tpa_bytes_lo;
};
-#define STATS_OFFSET32(stat_name) \
- (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+ do { \
+ s_lo += a_lo; \
+ s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
+ } while (0)
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+ do { \
+ if (m_lo < s_lo) { \
+ /* underflow */ \
+ d_hi = m_hi - s_hi; \
+ if (d_hi > 0) { \
+ /* we can 'loan' 1 */ \
+ d_hi--; \
+ d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+ } else { \
+ /* m_hi <= s_hi */ \
+ d_hi = 0; \
+ d_lo = 0; \
+ } \
+ } else { \
+ /* m_lo >= s_lo */ \
+ if (m_hi < s_hi) { \
+ d_hi = 0; \
+ d_lo = 0; \
+ } else { \
+ /* m_hi >= s_hi */ \
+ d_hi = m_hi - s_hi; \
+ d_lo = m_lo - s_lo; \
+ } \
+ } \
+ } while (0)
+
+#define UPDATE_STAT64(s, t) \
+ do { \
+ DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
+ diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
+ pstats->mac_stx[0].t##_hi = new->s##_hi; \
+ pstats->mac_stx[0].t##_lo = new->s##_lo; \
+ ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
+ pstats->mac_stx[1].t##_lo, diff.lo); \
+ } while (0)
+
+#define UPDATE_STAT64_NIG(s, t) \
+ do { \
+ DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
+ diff.lo, new->s##_lo, old->s##_lo); \
+ ADD_64(estats->t##_hi, diff.hi, \
+ estats->t##_lo, diff.lo); \
+ } while (0)
+
+/* sum[hi:lo] += add */
+#define ADD_EXTEND_64(s_hi, s_lo, a) \
+ do { \
+ s_lo += a; \
+ s_hi += (s_lo < a) ? 1 : 0; \
+ } while (0)
+
+#define ADD_STAT64(diff, t) \
+ do { \
+ ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
+ pstats->mac_stx[1].t##_lo, new->diff##_lo); \
+ } while (0)
+
+#define UPDATE_EXTEND_STAT(s) \
+ do { \
+ ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
+ pstats->mac_stx[1].s##_lo, \
+ new->s); \
+ } while (0)
+
+#define UPDATE_EXTEND_TSTAT(s, t) \
+ do { \
+ diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
+ old_tclient->s = tclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_USTAT(s, t) \
+ do { \
+ diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+ old_uclient->s = uclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+#define UPDATE_EXTEND_XSTAT(s, t) \
+ do { \
+ diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
+ old_xclient->s = xclient->s; \
+ ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+ do { \
+ DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+ } while (0)
+
+/* minuend[hi:lo] -= subtrahend */
+#define SUB_EXTEND_64(m_hi, m_lo, s) \
+ do { \
+ SUB_64(m_hi, 0, m_lo, s); \
+ } while (0)
+
+#define SUB_EXTEND_USTAT(s, t) \
+ do { \
+ diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+ SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+ } while (0)
+
-/* Forward declaration */
+/* forward */
struct bnx2x;
void bnx2x_stats_init(struct bnx2x *bp);
-extern const u32 dmae_reg_go_c[];
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c7537abca4f..a047eb973e3 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -262,7 +262,7 @@ static inline u32 __get_agg_selection_mode(struct port *port)
if (bond == NULL)
return BOND_AD_STABLE;
- return BOND_AD_INFO(bond).agg_select_mode;
+ return bond->params.ad_select;
}
/**
@@ -1859,7 +1859,6 @@ static void ad_marker_response_received(struct bond_marker *marker,
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
{
BOND_AD_INFO(bond).agg_select_timer = timeout;
- BOND_AD_INFO(bond).agg_select_mode = bond->params.ad_select;
}
static u16 aggregator_identifier;
@@ -1868,11 +1867,10 @@ static u16 aggregator_identifier;
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
* @tick_resolution: tick duration (millisecond resolution)
- * @lacp_fast: boolean. whether fast periodic should be used
*
* Can be called only after the mac address of the bond is set.
*/
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast)
+void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
{
// check that the bond is not initialized yet
if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
@@ -1880,7 +1878,6 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fas
aggregator_identifier = 0;
- BOND_AD_INFO(bond).lacp_fast = lacp_fast;
BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
@@ -1918,7 +1915,7 @@ int bond_3ad_bind_slave(struct slave *slave)
// port initialization
port = &(SLAVE_AD_INFO(slave).port);
- ad_initialize_port(port, BOND_AD_INFO(bond).lacp_fast);
+ ad_initialize_port(port, bond->params.lacp_fast);
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave).id;
@@ -2345,8 +2342,17 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
*/
int bond_3ad_set_carrier(struct bonding *bond)
{
- if (__get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator))) {
- if (!netif_carrier_ok(bond->dev)) {
+ struct aggregator *active;
+
+ active = __get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator));
+ if (active) {
+ /* are enough slaves available to consider link up? */
+ if (active->num_of_ports < bond->params.min_links) {
+ if (netif_carrier_ok(bond->dev)) {
+ netif_carrier_off(bond->dev);
+ return 1;
+ }
+ } else if (!netif_carrier_ok(bond->dev)) {
netif_carrier_on(bond->dev);
return 1;
}
@@ -2473,3 +2479,34 @@ void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
read_unlock(&bond->lock);
}
+
+/*
+ * When modify lacp_rate parameter via sysfs,
+ * update actor_oper_port_state of each port.
+ *
+ * Hold slave->state_machine_lock,
+ * so we can modify port->actor_oper_port_state,
+ * no matter bond is up or down.
+ */
+void bond_3ad_update_lacp_rate(struct bonding *bond)
+{
+ int i;
+ struct slave *slave;
+ struct port *port = NULL;
+ int lacp_fast;
+
+ read_lock(&bond->lock);
+ lacp_fast = bond->params.lacp_fast;
+
+ bond_for_each_slave(bond, slave, i) {
+ port = &(SLAVE_AD_INFO(slave).port);
+ __get_state_machine_lock(port);
+ if (lacp_fast)
+ port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
+ else
+ port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT;
+ __release_state_machine_lock(port);
+ }
+
+ read_unlock(&bond->lock);
+}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 0ee3f1632c4..235b2cc58b2 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -253,11 +253,6 @@ struct ad_system {
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
- u32 agg_select_mode; // Mode of selection of active aggregator(bandwidth/count)
- int lacp_fast; /* whether fast periodic tx should be
- * requested
- */
- struct timer_list ad_timer;
};
struct ad_slave_info {
@@ -269,7 +264,7 @@ struct ad_slave_info {
};
// ================= AD Exported functions to the main bonding code ==================
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fast);
+void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
int bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct work_struct *);
@@ -282,5 +277,6 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
+void bond_3ad_update_lacp_rate(struct bonding *bond);
#endif //__BOND_3AD_H__
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 2df9276720a..7f8b20a34ee 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -635,7 +635,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
client_info->ntt = 0;
}
- if (bond->vlgrp) {
+ if (bond_vlan_used(bond)) {
if (!vlan_get_tag(skb, &client_info->vlan_id))
client_info->tag = 1;
}
@@ -847,7 +847,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
skb->priority = TC_PRIO_CONTROL;
skb->dev = slave->dev;
- if (bond->vlgrp) {
+ if (bond_vlan_used(bond)) {
struct vlan_entry *vlan;
vlan = bond_next_vlan(bond,
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 84fbd4ebd77..027a0ee7d85 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -183,10 +183,10 @@ static int bond_inet6addr_event(struct notifier_block *this,
}
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- if (!bond->vlgrp)
- continue;
- vlan_dev = vlan_group_get_device(bond->vlgrp,
- vlan->vlan_id);
+ rcu_read_lock();
+ vlan_dev = __vlan_find_dev_deep(bond->dev,
+ vlan->vlan_id);
+ rcu_read_unlock();
if (vlan_dev == event_dev) {
switch (event) {
case NETDEV_UP:
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index eafe44a528a..43f2ea54108 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -98,6 +98,7 @@ static char *mode;
static char *primary;
static char *primary_reselect;
static char *lacp_rate;
+static int min_links;
static char *ad_select;
static char *xmit_hash_policy;
static int arp_interval = BOND_LINK_ARP_INTERV;
@@ -150,6 +151,9 @@ module_param(ad_select, charp, 0);
MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
"0 for stable (default), 1 for bandwidth, "
"2 for count");
+module_param(min_links, int, 0);
+MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
+
module_param(xmit_hash_policy, charp, 0);
MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
"0 for layer 2 (default), 1 for layer 3+4, "
@@ -329,16 +333,6 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
kfree(vlan);
- if (list_empty(&bond->vlan_list) &&
- (bond->slave_cnt == 0)) {
- /* Last VLAN removed and no slaves, so
- * restore block on adding VLANs. This will
- * be removed once new slaves that are not
- * VLAN challenged will be added.
- */
- bond->dev->features |= NETIF_F_VLAN_CHALLENGED;
- }
-
res = 0;
goto out;
}
@@ -414,9 +408,8 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
}
/*
- * In the following 3 functions, bond_vlan_rx_register(), bond_vlan_rx_add_vid
- * and bond_vlan_rx_kill_vid, We don't protect the slave list iteration with a
- * lock because:
+ * In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
+ * We don't protect the slave list iteration with a lock because:
* a. This operation is performed in IOCTL context,
* b. The operation is protected by the RTNL semaphore in the 8021q code,
* c. Holding a lock with BH disabled while directly calling a base driver
@@ -432,33 +425,6 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
*/
/**
- * bond_vlan_rx_register - Propagates registration to slaves
- * @bond_dev: bonding net device that got called
- * @grp: vlan group being registered
- */
-static void bond_vlan_rx_register(struct net_device *bond_dev,
- struct vlan_group *grp)
-{
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
- int i;
-
- write_lock_bh(&bond->lock);
- bond->vlgrp = grp;
- write_unlock_bh(&bond->lock);
-
- bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
- slave_ops->ndo_vlan_rx_register) {
- slave_ops->ndo_vlan_rx_register(slave_dev, grp);
- }
- }
-}
-
-/**
* bond_vlan_rx_add_vid - Propagates adding an id to slaves
* @bond_dev: bonding net device that got called
* @vid: vlan id being added
@@ -495,7 +461,6 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- struct net_device *vlan_dev;
int i, res;
bond_for_each_slave(bond, slave, i) {
@@ -504,12 +469,7 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
slave_ops->ndo_vlan_rx_kill_vid) {
- /* Save and then restore vlan_dev in the grp array,
- * since the slave's driver might clear it.
- */
- vlan_dev = vlan_group_get_device(bond->vlgrp, vid);
slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
- vlan_group_set_device(bond->vlgrp, vid, vlan_dev);
}
}
@@ -525,13 +485,6 @@ static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *sla
struct vlan_entry *vlan;
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- if (!bond->vlgrp)
- return;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
- slave_ops->ndo_vlan_rx_register)
- slave_ops->ndo_vlan_rx_register(slave_dev, bond->vlgrp);
-
if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
!(slave_ops->ndo_vlan_rx_add_vid))
return;
@@ -545,30 +498,16 @@ static void bond_del_vlans_from_slave(struct bonding *bond,
{
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct vlan_entry *vlan;
- struct net_device *vlan_dev;
-
- if (!bond->vlgrp)
- return;
if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
!(slave_ops->ndo_vlan_rx_kill_vid))
- goto unreg;
+ return;
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
if (!vlan->vlan_id)
continue;
- /* Save and then restore vlan_dev in the grp array,
- * since the slave's driver might clear it.
- */
- vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
- vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev);
}
-
-unreg:
- if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
- slave_ops->ndo_vlan_rx_register)
- slave_ops->ndo_vlan_rx_register(slave_dev, NULL);
}
/*------------------------------- Link status -------------------------------*/
@@ -634,15 +573,8 @@ static int bond_update_speed_duplex(struct slave *slave)
return -1;
slave_speed = ethtool_cmd_speed(&etool);
- switch (slave_speed) {
- case SPEED_10:
- case SPEED_100:
- case SPEED_1000:
- case SPEED_10000:
- break;
- default:
+ if (slave_speed == 0 || slave_speed == ((__u32) -1))
return -1;
- }
switch (etool.duplex) {
case DUPLEX_FULL:
@@ -849,13 +781,13 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
__bond_resend_igmp_join_requests(bond->dev);
/* rejoin all groups on vlan devices */
- if (bond->vlgrp) {
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- vlan_dev = vlan_group_get_device(bond->vlgrp,
- vlan->vlan_id);
- if (vlan_dev)
- __bond_resend_igmp_join_requests(vlan_dev);
- }
+ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+ rcu_read_lock();
+ vlan_dev = __vlan_find_dev_deep(bond->dev,
+ vlan->vlan_id);
+ rcu_read_unlock();
+ if (vlan_dev)
+ __bond_resend_igmp_join_requests(vlan_dev);
}
if (--bond->igmp_retrans > 0)
@@ -1428,9 +1360,9 @@ out:
return features;
}
-#define BOND_VLAN_FEATURES (NETIF_F_ALL_TX_OFFLOADS | \
- NETIF_F_SOFT_FEATURES | \
- NETIF_F_LRO)
+#define BOND_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
static void bond_compute_features(struct bonding *bond)
{
@@ -1570,7 +1502,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* no need to lock since we're protected by rtnl_lock */
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
- if (bond->vlgrp) {
+ if (bond_vlan_used(bond)) {
pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
bond_dev->name, slave_dev->name, bond_dev->name);
return -EPERM;
@@ -1625,8 +1557,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
- else
+ else {
ether_setup(bond_dev);
+ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ }
netdev_bonding_change(bond_dev,
NETDEV_POST_TYPE_CHANGE);
@@ -1856,8 +1790,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
- bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL,
- bond->params.lacp_fast);
+ bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
} else {
SLAVE_AD_INFO(new_slave).id =
SLAVE_AD_INFO(new_slave->prev).id + 1;
@@ -2079,7 +2012,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
*/
memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
- if (bond->vlgrp) {
+ if (bond_vlan_used(bond)) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2261,7 +2194,7 @@ static int bond_release_all(struct net_device *bond_dev)
*/
memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
- if (bond->vlgrp) {
+ if (bond_vlan_used(bond)) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2699,7 +2632,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
if (!targets[i])
break;
pr_debug("basa: target %x\n", targets[i]);
- if (!bond->vlgrp) {
+ if (!bond_vlan_used(bond)) {
pr_debug("basa: empty vlan: arp_send\n");
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
bond->master_ip, 0);
@@ -2734,7 +2667,10 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
vlan_id = 0;
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
+ rcu_read_lock();
+ vlan_dev = __vlan_find_dev_deep(bond->dev,
+ vlan->vlan_id);
+ rcu_read_unlock();
if (vlan_dev == rt->dst.dev) {
vlan_id = vlan->vlan_id;
pr_debug("basa: vlan match on %s %d\n",
@@ -3395,9 +3331,8 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
}
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- if (!bond->vlgrp)
- continue;
- vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
+ vlan_dev = __vlan_find_dev_deep(bond->dev,
+ vlan->vlan_id);
if (vlan_dev == event_dev) {
switch (event) {
case NETDEV_UP:
@@ -3456,7 +3391,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
int layer4_xor = 0;
if (skb->protocol == htons(ETH_P_IP)) {
- if (!(iph->frag_off & htons(IP_MF|IP_OFFSET)) &&
+ if (!ip_is_fragment(iph) &&
(iph->protocol == IPPROTO_TCP ||
iph->protocol == IPPROTO_UDP)) {
layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1)));
@@ -3484,9 +3419,27 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
static int bond_open(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
+ int i;
bond->kill_timers = 0;
+ /* reset slave->backup and slave->inactive */
+ read_lock(&bond->lock);
+ if (bond->slave_cnt > 0) {
+ read_lock(&bond->curr_slave_lock);
+ bond_for_each_slave(bond, slave, i) {
+ if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
+ && (slave != bond->curr_active_slave)) {
+ bond_set_slave_inactive_flags(slave);
+ } else {
+ bond_set_slave_active_flags(slave);
+ }
+ }
+ read_unlock(&bond->curr_slave_lock);
+ }
+ read_unlock(&bond->lock);
+
INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
if (bond_is_lb(bond)) {
@@ -4349,10 +4302,9 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_do_ioctl = bond_do_ioctl,
.ndo_set_multicast_list = bond_set_multicast_list,
.ndo_change_mtu = bond_change_mtu,
- .ndo_set_mac_address = bond_set_mac_address,
+ .ndo_set_mac_address = bond_set_mac_address,
.ndo_neigh_setup = bond_neigh_setup,
- .ndo_vlan_rx_register = bond_vlan_rx_register,
- .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
+ .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_netpoll_setup = bond_netpoll_setup,
@@ -4398,7 +4350,7 @@ static void bond_setup(struct net_device *bond_dev)
bond_dev->tx_queue_len = 0;
bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
bond_dev->priv_flags |= IFF_BONDING;
- bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
/* At first, we block adding VLANs. That's the only way to
* prevent problems that occur when adding VLANs over an
@@ -4759,7 +4711,7 @@ static int bond_check_params(struct bond_params *params)
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
- pr_warning("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
+ pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
}
if (primary && !USES_PRIMARY(bond_mode)) {
@@ -4816,6 +4768,7 @@ static int bond_check_params(struct bond_params *params)
params->tx_queues = tx_queues;
params->all_slaves_active = all_slaves_active;
params->resend_igmp = resend_igmp;
+ params->min_links = min_links;
if (primary) {
strncpy(params->primary, primary, IFNAMSIZ);
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index c97307ddd1c..95de93b9038 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -125,6 +125,7 @@ static void bond_info_show_master(struct seq_file *seq)
seq_puts(seq, "\n802.3ad info\n");
seq_printf(seq, "LACP rate: %s\n",
(bond->params.lacp_fast) ? "fast" : "slow");
+ seq_printf(seq, "Min links: %d\n", bond->params.min_links);
seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
ad_select_tbl[bond->params.ad_select].modename);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 88fcb25e554..2dfb4bf9008 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -804,6 +804,7 @@ static ssize_t bonding_store_lacp(struct device *d,
if ((new_value == 1) || (new_value == 0)) {
bond->params.lacp_fast = new_value;
+ bond_3ad_update_lacp_rate(bond);
pr_info("%s: Setting LACP rate to %s (%d).\n",
bond->dev->name, bond_lacp_tbl[new_value].modename,
new_value);
@@ -818,6 +819,38 @@ out:
static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
bonding_show_lacp, bonding_store_lacp);
+static ssize_t bonding_show_min_links(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ return sprintf(buf, "%d\n", bond->params.min_links);
+}
+
+static ssize_t bonding_store_min_links(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bonding *bond = to_bond(d);
+ int ret;
+ unsigned int new_value;
+
+ ret = kstrtouint(buf, 0, &new_value);
+ if (ret < 0) {
+ pr_err("%s: Ignoring invalid min links value %s.\n",
+ bond->dev->name, buf);
+ return ret;
+ }
+
+ pr_info("%s: Setting min links value to %u\n",
+ bond->dev->name, new_value);
+ bond->params.min_links = new_value;
+ return count;
+}
+static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
+ bonding_show_min_links, bonding_store_min_links);
+
static ssize_t bonding_show_ad_select(struct device *d,
struct device_attribute *attr,
char *buf)
@@ -992,6 +1025,7 @@ static ssize_t bonding_store_primary(struct device *d,
int i;
struct slave *slave;
struct bonding *bond = to_bond(d);
+ char ifname[IFNAMSIZ];
if (!rtnl_trylock())
return restart_syscall();
@@ -1002,32 +1036,33 @@ static ssize_t bonding_store_primary(struct device *d,
if (!USES_PRIMARY(bond->params.mode)) {
pr_info("%s: Unable to set primary slave; %s is in mode %d\n",
bond->dev->name, bond->dev->name, bond->params.mode);
- } else {
- bond_for_each_slave(bond, slave, i) {
- if (strnicmp
- (slave->dev->name, buf,
- strlen(slave->dev->name)) == 0) {
- pr_info("%s: Setting %s as primary slave.\n",
- bond->dev->name, slave->dev->name);
- bond->primary_slave = slave;
- strcpy(bond->params.primary, slave->dev->name);
- bond_select_active_slave(bond);
- goto out;
- }
- }
+ goto out;
+ }
- /* if we got here, then we didn't match the name of any slave */
+ sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
- if (strlen(buf) == 0 || buf[0] == '\n') {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
- bond->primary_slave = NULL;
- bond_select_active_slave(bond);
- } else {
- pr_info("%s: Unable to set %.*s as primary slave as it is not a slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
+ /* check to see if we are clearing primary */
+ if (!strlen(ifname) || buf[0] == '\n') {
+ pr_info("%s: Setting primary slave to None.\n",
+ bond->dev->name);
+ bond->primary_slave = NULL;
+ bond_select_active_slave(bond);
+ goto out;
+ }
+
+ bond_for_each_slave(bond, slave, i) {
+ if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
+ pr_info("%s: Setting %s as primary slave.\n",
+ bond->dev->name, slave->dev->name);
+ bond->primary_slave = slave;
+ strcpy(bond->params.primary, slave->dev->name);
+ bond_select_active_slave(bond);
+ goto out;
}
}
+
+ pr_info("%s: Unable to set %.*s as primary slave.\n",
+ bond->dev->name, (int)strlen(buf) - 1, buf);
out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
@@ -1162,6 +1197,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
struct slave *old_active = NULL;
struct slave *new_active = NULL;
struct bonding *bond = to_bond(d);
+ char ifname[IFNAMSIZ];
if (!rtnl_trylock())
return restart_syscall();
@@ -1170,56 +1206,62 @@ static ssize_t bonding_store_active_slave(struct device *d,
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
- if (!USES_PRIMARY(bond->params.mode))
+ if (!USES_PRIMARY(bond->params.mode)) {
pr_info("%s: Unable to change active slave; %s is in mode %d\n",
bond->dev->name, bond->dev->name, bond->params.mode);
- else {
- bond_for_each_slave(bond, slave, i) {
- if (strnicmp
- (slave->dev->name, buf,
- strlen(slave->dev->name)) == 0) {
- old_active = bond->curr_active_slave;
- new_active = slave;
- if (new_active == old_active) {
- /* do nothing */
- pr_info("%s: %s is already the current active slave.\n",
+ goto out;
+ }
+
+ sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
+
+ /* check to see if we are clearing active */
+ if (!strlen(ifname) || buf[0] == '\n') {
+ pr_info("%s: Clearing current active slave.\n",
+ bond->dev->name);
+ bond->curr_active_slave = NULL;
+ bond_select_active_slave(bond);
+ goto out;
+ }
+
+ bond_for_each_slave(bond, slave, i) {
+ if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
+ old_active = bond->curr_active_slave;
+ new_active = slave;
+ if (new_active == old_active) {
+ /* do nothing */
+ pr_info("%s: %s is already the current"
+ " active slave.\n",
+ bond->dev->name,
+ slave->dev->name);
+ goto out;
+ }
+ else {
+ if ((new_active) &&
+ (old_active) &&
+ (new_active->link == BOND_LINK_UP) &&
+ IS_UP(new_active->dev)) {
+ pr_info("%s: Setting %s as active"
+ " slave.\n",
bond->dev->name,
slave->dev->name);
- goto out;
+ bond_change_active_slave(bond,
+ new_active);
}
else {
- if ((new_active) &&
- (old_active) &&
- (new_active->link == BOND_LINK_UP) &&
- IS_UP(new_active->dev)) {
- pr_info("%s: Setting %s as active slave.\n",
- bond->dev->name,
- slave->dev->name);
- bond_change_active_slave(bond, new_active);
- }
- else {
- pr_info("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
- bond->dev->name,
- slave->dev->name,
- slave->dev->name);
- }
- goto out;
+ pr_info("%s: Could not set %s as"
+ " active slave; either %s is"
+ " down or the link is down.\n",
+ bond->dev->name,
+ slave->dev->name,
+ slave->dev->name);
}
+ goto out;
}
}
-
- /* if we got here, then we didn't match the name of any slave */
-
- if (strlen(buf) == 0 || buf[0] == '\n') {
- pr_info("%s: Setting active slave to None.\n",
- bond->dev->name);
- bond->primary_slave = NULL;
- bond_select_active_slave(bond);
- } else {
- pr_info("%s: Unable to set %.*s as active slave as it is not a slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- }
}
+
+ pr_info("%s: Unable to set %.*s as active slave.\n",
+ bond->dev->name, (int)strlen(buf) - 1, buf);
out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
@@ -1600,6 +1642,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_queue_id.attr,
&dev_attr_all_slaves_active.attr,
&dev_attr_resend_igmp.attr,
+ &dev_attr_min_links.attr,
NULL,
};
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index ea1d005be92..43526a2d275 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -147,6 +147,7 @@ struct bond_params {
int updelay;
int downdelay;
int lacp_fast;
+ unsigned int min_links;
int ad_select;
char primary[IFNAMSIZ];
int primary_reselect;
@@ -239,8 +240,6 @@ struct bonding {
struct alb_bond_info alb_info;
struct bond_params params;
struct list_head vlan_list;
- struct vlan_group *vlgrp;
- struct packet_type arp_mon_pt;
struct workqueue_struct *wq;
struct delayed_work mii_work;
struct delayed_work arp_work;
@@ -253,6 +252,11 @@ struct bonding {
#endif /* CONFIG_DEBUG_FS */
};
+static inline bool bond_vlan_used(struct bonding *bond)
+{
+ return !list_empty(&bond->vlan_list);
+}
+
#define bond_slave_get_rcu(dev) \
((struct slave *) rcu_dereference(dev->rx_handler_data))
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c
index 6e99d80ec40..a9b759add18 100644
--- a/drivers/net/bsd_comp.c
+++ b/drivers/net/bsd_comp.c
@@ -201,7 +201,7 @@ extern void ppp_unregister_compressor (struct compressor *cp);
#define LAST 255
#define MAXCODE(b) ((1 << (b)) - 1)
-#define BADCODEM1 MAXCODE(MAX_BSD_BITS);
+#define BADCODEM1 MAXCODE(MAX_BSD_BITS)
#define BSD_HASH(prefix,suffix,hshift) ((((unsigned long)(suffix))<<(hshift)) \
^ (unsigned long)(prefix))
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 09ed3f42d67..abf4d7a9dcc 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -38,3 +38,12 @@ config CAIF_SHM
default n
---help---
The CAIF shared memory protocol driver for the STE UX5500 platform.
+
+config CAIF_HSI
+ tristate "CAIF HSI transport driver"
+ depends on CAIF
+ default n
+ ---help---
+ The caif low level driver for CAIF over HSI.
+ Be aware that if you enable this then you also need to
+ enable a low-level HSI driver.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 9560b9d624b..91dff861560 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -10,3 +10,6 @@ obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
# Shared memory
caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
obj-$(CONFIG_CAIF_SHM) += caif_shm.o
+
+# HSI interface
+obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
new file mode 100644
index 00000000000..b41c2fced0a
--- /dev/null
+++ b/drivers/net/caif/caif_hsi.c
@@ -0,0 +1,1219 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Daniel Martensson / daniel.martensson@stericsson.com
+ * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/if_arp.h>
+#include <linux/timer.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/caif_hsi.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
+MODULE_DESCRIPTION("CAIF HSI driver");
+
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
+ (((pow)-((x)&((pow)-1)))))
+
+/*
+ * HSI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+static int hsi_head_align = 4;
+module_param(hsi_head_align, int, S_IRUGO);
+MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
+
+static int hsi_tail_align = 4;
+module_param(hsi_tail_align, int, S_IRUGO);
+MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
+
+/*
+ * HSI link layer flowcontrol thresholds.
+ * Warning: A high threshold value migth increase throughput but it will at
+ * the same time prevent channel prioritization and increase the risk of
+ * flooding the modem. The high threshold should be above the low.
+ */
+static int hsi_high_threshold = 100;
+module_param(hsi_high_threshold, int, S_IRUGO);
+MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
+
+static int hsi_low_threshold = 50;
+module_param(hsi_low_threshold, int, S_IRUGO);
+MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
+
+#define ON 1
+#define OFF 0
+
+/*
+ * Threshold values for the HSI packet queue. Flowcontrol will be asserted
+ * when the number of packets exceeds HIGH_WATER_MARK. It will not be
+ * de-asserted before the number of packets drops below LOW_WATER_MARK.
+ */
+#define LOW_WATER_MARK hsi_low_threshold
+#define HIGH_WATER_MARK hsi_high_threshold
+
+static LIST_HEAD(cfhsi_list);
+static spinlock_t cfhsi_list_lock;
+
+static void cfhsi_inactivity_tout(unsigned long arg)
+{
+ struct cfhsi *cfhsi = (struct cfhsi *)arg;
+
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ __func__);
+
+ /* Schedule power down work queue. */
+ if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+ queue_work(cfhsi->wq, &cfhsi->wake_down_work);
+}
+
+static void cfhsi_abort_tx(struct cfhsi *cfhsi)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ spin_lock_bh(&cfhsi->lock);
+ skb = skb_dequeue(&cfhsi->qhead);
+ if (!skb)
+ break;
+
+ cfhsi->ndev->stats.tx_errors++;
+ cfhsi->ndev->stats.tx_dropped++;
+ spin_unlock_bh(&cfhsi->lock);
+ kfree_skb(skb);
+ }
+ cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+ if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+ mod_timer(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
+ spin_unlock_bh(&cfhsi->lock);
+}
+
+static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
+{
+ char buffer[32]; /* Any reasonable value */
+ size_t fifo_occupancy;
+ int ret;
+
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ __func__);
+
+
+ ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
+ if (ret) {
+ dev_warn(&cfhsi->ndev->dev,
+ "%s: can't wake up HSI interface: %d.\n",
+ __func__, ret);
+ return ret;
+ }
+
+ do {
+ ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ &fifo_occupancy);
+ if (ret) {
+ dev_warn(&cfhsi->ndev->dev,
+ "%s: can't get FIFO occupancy: %d.\n",
+ __func__, ret);
+ break;
+ } else if (!fifo_occupancy)
+ /* No more data, exitting normally */
+ break;
+
+ fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
+ set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
+ ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
+ cfhsi->dev);
+ if (ret) {
+ clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
+ dev_warn(&cfhsi->ndev->dev,
+ "%s: can't read data: %d.\n",
+ __func__, ret);
+ break;
+ }
+
+ ret = 5 * HZ;
+ wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
+ !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
+
+ if (ret < 0) {
+ dev_warn(&cfhsi->ndev->dev,
+ "%s: can't wait for flush complete: %d.\n",
+ __func__, ret);
+ break;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ dev_warn(&cfhsi->ndev->dev,
+ "%s: timeout waiting for flush complete.\n",
+ __func__);
+ break;
+ }
+ } while (1);
+
+ cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+
+ return ret;
+}
+
+static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
+{
+ int nfrms = 0;
+ int pld_len = 0;
+ struct sk_buff *skb;
+ u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
+
+ skb = skb_dequeue(&cfhsi->qhead);
+ if (!skb)
+ return 0;
+
+ /* Check if we can embed a CAIF frame. */
+ if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
+ struct caif_payload_info *info;
+ int hpad = 0;
+ int tpad = 0;
+
+ /* Calculate needed head alignment and tail alignment. */
+ info = (struct caif_payload_info *)&skb->cb;
+
+ hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
+ tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+
+ /* Check if frame still fits with added alignment. */
+ if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
+ u8 *pemb = desc->emb_frm;
+ desc->offset = CFHSI_DESC_SHORT_SZ;
+ *pemb = (u8)(hpad - 1);
+ pemb += hpad;
+
+ /* Update network statistics. */
+ cfhsi->ndev->stats.tx_packets++;
+ cfhsi->ndev->stats.tx_bytes += skb->len;
+
+ /* Copy in embedded CAIF frame. */
+ skb_copy_bits(skb, 0, pemb, skb->len);
+ consume_skb(skb);
+ skb = NULL;
+ }
+ } else
+ /* Clear offset. */
+ desc->offset = 0;
+
+ /* Create payload CAIF frames. */
+ pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
+ while (nfrms < CFHSI_MAX_PKTS) {
+ struct caif_payload_info *info;
+ int hpad = 0;
+ int tpad = 0;
+
+ if (!skb)
+ skb = skb_dequeue(&cfhsi->qhead);
+
+ if (!skb)
+ break;
+
+ /* Calculate needed head alignment and tail alignment. */
+ info = (struct caif_payload_info *)&skb->cb;
+
+ hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
+ tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+
+ /* Fill in CAIF frame length in descriptor. */
+ desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
+
+ /* Fill head padding information. */
+ *pfrm = (u8)(hpad - 1);
+ pfrm += hpad;
+
+ /* Update network statistics. */
+ cfhsi->ndev->stats.tx_packets++;
+ cfhsi->ndev->stats.tx_bytes += skb->len;
+
+ /* Copy in CAIF frame. */
+ skb_copy_bits(skb, 0, pfrm, skb->len);
+
+ /* Update payload length. */
+ pld_len += desc->cffrm_len[nfrms];
+
+ /* Update frame pointer. */
+ pfrm += skb->len + tpad;
+ consume_skb(skb);
+ skb = NULL;
+
+ /* Update number of frames. */
+ nfrms++;
+ }
+
+ /* Unused length fields should be zero-filled (according to SPEC). */
+ while (nfrms < CFHSI_MAX_PKTS) {
+ desc->cffrm_len[nfrms] = 0x0000;
+ nfrms++;
+ }
+
+ /* Check if we can piggy-back another descriptor. */
+ skb = skb_peek(&cfhsi->qhead);
+ if (skb)
+ desc->header |= CFHSI_PIGGY_DESC;
+ else
+ desc->header &= ~CFHSI_PIGGY_DESC;
+
+ return CFHSI_DESC_SZ + pld_len;
+}
+
+static void cfhsi_tx_done_work(struct work_struct *work)
+{
+ struct cfhsi *cfhsi = NULL;
+ struct cfhsi_desc *desc = NULL;
+ int len = 0;
+ int res;
+
+ cfhsi = container_of(work, struct cfhsi, tx_done_work);
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ __func__);
+
+ if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+ return;
+
+ desc = (struct cfhsi_desc *)cfhsi->tx_buf;
+
+ do {
+ /*
+ * Send flow on if flow off has been previously signalled
+ * and number of packets is below low water mark.
+ */
+ spin_lock_bh(&cfhsi->lock);
+ if (cfhsi->flow_off_sent &&
+ cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
+ cfhsi->cfdev.flowctrl) {
+
+ cfhsi->flow_off_sent = 0;
+ cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
+ }
+ spin_unlock_bh(&cfhsi->lock);
+
+ /* Create HSI frame. */
+ len = cfhsi_tx_frm(desc, cfhsi);
+ if (!len) {
+ cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+ /* Start inactivity timer. */
+ mod_timer(&cfhsi->timer,
+ jiffies + CFHSI_INACTIVITY_TOUT);
+ break;
+ }
+
+ /* Set up new transfer. */
+ res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ if (WARN_ON(res < 0)) {
+ dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ __func__, res);
+ }
+ } while (res < 0);
+}
+
+static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
+{
+ struct cfhsi *cfhsi;
+
+ cfhsi = container_of(drv, struct cfhsi, drv);
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ __func__);
+
+ if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+ return;
+
+ queue_work(cfhsi->wq, &cfhsi->tx_done_work);
+}
+
+static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
+{
+ int xfer_sz = 0;
+ int nfrms = 0;
+ u16 *plen = NULL;
+ u8 *pfrm = NULL;
+
+ if ((desc->header & ~CFHSI_PIGGY_DESC) ||
+ (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
+ dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
+ __func__);
+ return 0;
+ }
+
+ /* Check for embedded CAIF frame. */
+ if (desc->offset) {
+ struct sk_buff *skb;
+ u8 *dst = NULL;
+ int len = 0, retries = 0;
+ pfrm = ((u8 *)desc) + desc->offset;
+
+ /* Remove offset padding. */
+ pfrm += *pfrm + 1;
+
+ /* Read length of CAIF frame (little endian). */
+ len = *pfrm;
+ len |= ((*(pfrm+1)) << 8) & 0xFF00;
+ len += 2; /* Add FCS fields. */
+
+
+ /* Allocate SKB (OK even in IRQ context). */
+ skb = alloc_skb(len + 1, GFP_KERNEL);
+ while (!skb) {
+ retries++;
+ schedule_timeout(1);
+ skb = alloc_skb(len + 1, GFP_KERNEL);
+ if (skb) {
+ printk(KERN_WARNING "%s: slept for %u "
+ "before getting memory\n",
+ __func__, retries);
+ break;
+ }
+ if (retries > HZ) {
+ printk(KERN_ERR "%s: slept for 1HZ and "
+ "did not get memory\n",
+ __func__);
+ cfhsi->ndev->stats.rx_dropped++;
+ goto drop_frame;
+ }
+ }
+ caif_assert(skb != NULL);
+
+ dst = skb_put(skb, len);
+ memcpy(dst, pfrm, len);
+
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = cfhsi->ndev;
+
+ /*
+ * We are called from a arch specific platform device.
+ * Unfortunately we don't know what context we're
+ * running in.
+ */
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+
+ /* Update network statistics. */
+ cfhsi->ndev->stats.rx_packets++;
+ cfhsi->ndev->stats.rx_bytes += len;
+ }
+
+drop_frame:
+ /* Calculate transfer length. */
+ plen = desc->cffrm_len;
+ while (nfrms < CFHSI_MAX_PKTS && *plen) {
+ xfer_sz += *plen;
+ plen++;
+ nfrms++;
+ }
+
+ /* Check for piggy-backed descriptor. */
+ if (desc->header & CFHSI_PIGGY_DESC)
+ xfer_sz += CFHSI_DESC_SZ;
+
+ if (xfer_sz % 4) {
+ dev_err(&cfhsi->ndev->dev,
+ "%s: Invalid payload len: %d, ignored.\n",
+ __func__, xfer_sz);
+ xfer_sz = 0;
+ }
+
+ return xfer_sz;
+}
+
+static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
+{
+ int rx_sz = 0;
+ int nfrms = 0;
+ u16 *plen = NULL;
+ u8 *pfrm = NULL;
+
+ /* Sanity check header and offset. */
+ if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
+ (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
+ dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Set frame pointer to start of payload. */
+ pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
+ plen = desc->cffrm_len;
+ while (nfrms < CFHSI_MAX_PKTS && *plen) {
+ struct sk_buff *skb;
+ u8 *dst = NULL;
+ u8 *pcffrm = NULL;
+ int len = 0, retries = 0;
+
+ if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) {
+ dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* CAIF frame starts after head padding. */
+ pcffrm = pfrm + *pfrm + 1;
+
+ /* Read length of CAIF frame (little endian). */
+ len = *pcffrm;
+ len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
+ len += 2; /* Add FCS fields. */
+
+ /* Allocate SKB (OK even in IRQ context). */
+ skb = alloc_skb(len + 1, GFP_KERNEL);
+ while (!skb) {
+ retries++;
+ schedule_timeout(1);
+ skb = alloc_skb(len + 1, GFP_KERNEL);
+ if (skb) {
+ printk(KERN_WARNING "%s: slept for %u "
+ "before getting memory\n",
+ __func__, retries);
+ break;
+ }
+ if (retries > HZ) {
+ printk(KERN_ERR "%s: slept for 1HZ "
+ "and did not get memory\n",
+ __func__);
+ cfhsi->ndev->stats.rx_dropped++;
+ goto drop_frame;
+ }
+ }
+ caif_assert(skb != NULL);
+
+ dst = skb_put(skb, len);
+ memcpy(dst, pcffrm, len);
+
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = cfhsi->ndev;
+
+ /*
+ * We're called from a platform device,
+ * and don't know the context we're running in.
+ */
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+
+ /* Update network statistics. */
+ cfhsi->ndev->stats.rx_packets++;
+ cfhsi->ndev->stats.rx_bytes += len;
+
+drop_frame:
+ pfrm += *plen;
+ rx_sz += *plen;
+ plen++;
+ nfrms++;
+ }
+
+ return rx_sz;
+}
+
+static void cfhsi_rx_done_work(struct work_struct *work)
+{
+ int res;
+ int desc_pld_len = 0;
+ struct cfhsi *cfhsi = NULL;
+ struct cfhsi_desc *desc = NULL;
+
+ cfhsi = container_of(work, struct cfhsi, rx_done_work);
+ desc = (struct cfhsi_desc *)cfhsi->rx_buf;
+
+ dev_dbg(&cfhsi->ndev->dev, "%s: Kick timer if pending.\n",
+ __func__);
+
+ if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+ return;
+
+ /* Update inactivity timer if pending. */
+ mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT);
+
+ if (cfhsi->rx_state == CFHSI_RX_STATE_DESC) {
+ desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
+ } else {
+ int pld_len;
+
+ pld_len = cfhsi_rx_pld(desc, cfhsi);
+
+ if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
+ struct cfhsi_desc *piggy_desc;
+ piggy_desc = (struct cfhsi_desc *)
+ (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
+ pld_len);
+
+ /* Extract piggy-backed descriptor. */
+ desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
+
+ /*
+ * Copy needed information from the piggy-backed
+ * descriptor to the descriptor in the start.
+ */
+ memcpy((u8 *)desc, (u8 *)piggy_desc,
+ CFHSI_DESC_SHORT_SZ);
+ }
+ }
+
+ if (desc_pld_len) {
+ cfhsi->rx_state = CFHSI_RX_STATE_PAYLOAD;
+ cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
+ cfhsi->rx_len = desc_pld_len;
+ } else {
+ cfhsi->rx_state = CFHSI_RX_STATE_DESC;
+ cfhsi->rx_ptr = cfhsi->rx_buf;
+ cfhsi->rx_len = CFHSI_DESC_SZ;
+ }
+ clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
+
+ if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
+ /* Set up new transfer. */
+ dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
+ __func__);
+ res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
+ cfhsi->dev);
+ if (WARN_ON(res < 0)) {
+ dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
+ __func__, res);
+ cfhsi->ndev->stats.rx_errors++;
+ cfhsi->ndev->stats.rx_dropped++;
+ }
+ }
+}
+
+static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
+{
+ struct cfhsi *cfhsi;
+
+ cfhsi = container_of(drv, struct cfhsi, drv);
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ __func__);
+
+ if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+ return;
+
+ set_bit(CFHSI_PENDING_RX, &cfhsi->bits);
+
+ if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
+ wake_up_interruptible(&cfhsi->flush_fifo_wait);
+ else
+ queue_work(cfhsi->wq, &cfhsi->rx_done_work);
+}
+
+static void cfhsi_wake_up(struct work_struct *work)
+{
+ struct cfhsi *cfhsi = NULL;
+ int res;
+ int len;
+ long ret;
+
+ cfhsi = container_of(work, struct cfhsi, wake_up_work);
+
+ if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+ return;
+
+ if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
+ /* It happenes when wakeup is requested by
+ * both ends at the same time. */
+ clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+ return;
+ }
+
+ /* Activate wake line. */
+ cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
+
+ dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
+ __func__);
+
+ /* Wait for acknowledge. */
+ ret = CFHSI_WAKEUP_TOUT;
+ wait_event_interruptible_timeout(cfhsi->wake_up_wait,
+ test_bit(CFHSI_WAKE_UP_ACK,
+ &cfhsi->bits), ret);
+ if (unlikely(ret < 0)) {
+ /* Interrupted by signal. */
+ dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ __func__, ret);
+ clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+ cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+ return;
+ } else if (!ret) {
+ /* Wakeup timeout */
+ dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
+ __func__);
+ clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+ cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+ return;
+ }
+ dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
+ __func__);
+
+ /* Clear power up bit. */
+ set_bit(CFHSI_AWAKE, &cfhsi->bits);
+ clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+
+ /* Resume read operation. */
+ if (!test_bit(CFHSI_PENDING_RX, &cfhsi->bits)) {
+ dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
+ __func__);
+ res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr,
+ cfhsi->rx_len, cfhsi->dev);
+ if (WARN_ON(res < 0)) {
+ dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
+ __func__, res);
+ }
+ }
+
+ /* Clear power up acknowledment. */
+ clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+
+ spin_lock_bh(&cfhsi->lock);
+
+ /* Resume transmit if queue is not empty. */
+ if (!skb_peek(&cfhsi->qhead)) {
+ dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
+ __func__);
+ /* Start inactivity timer. */
+ mod_timer(&cfhsi->timer,
+ jiffies + CFHSI_INACTIVITY_TOUT);
+ spin_unlock_bh(&cfhsi->lock);
+ return;
+ }
+
+ dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
+ __func__);
+
+ spin_unlock_bh(&cfhsi->lock);
+
+ /* Create HSI frame. */
+ len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
+
+ if (likely(len > 0)) {
+ /* Set up new transfer. */
+ res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ if (WARN_ON(res < 0)) {
+ dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ __func__, res);
+ cfhsi_abort_tx(cfhsi);
+ }
+ } else {
+ dev_err(&cfhsi->ndev->dev,
+ "%s: Failed to create HSI frame: %d.\n",
+ __func__, len);
+ }
+
+}
+
+static void cfhsi_wake_down(struct work_struct *work)
+{
+ long ret;
+ struct cfhsi *cfhsi = NULL;
+ size_t fifo_occupancy;
+
+ cfhsi = container_of(work, struct cfhsi, wake_down_work);
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ __func__);
+
+ if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+ return;
+
+ /* Check if there is something in FIFO. */
+ if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ &fifo_occupancy)))
+ fifo_occupancy = 0;
+
+ if (fifo_occupancy) {
+ dev_dbg(&cfhsi->ndev->dev,
+ "%s: %u words in RX FIFO, restart timer.\n",
+ __func__, (unsigned) fifo_occupancy);
+ spin_lock_bh(&cfhsi->lock);
+ mod_timer(&cfhsi->timer,
+ jiffies + CFHSI_INACTIVITY_TOUT);
+ spin_unlock_bh(&cfhsi->lock);
+ return;
+ }
+
+ /* Cancel pending RX requests */
+ cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
+
+ /* Deactivate wake line. */
+ cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+
+ /* Wait for acknowledge. */
+ ret = CFHSI_WAKEUP_TOUT;
+ ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
+ test_bit(CFHSI_WAKE_DOWN_ACK,
+ &cfhsi->bits),
+ ret);
+ if (ret < 0) {
+ /* Interrupted by signal. */
+ dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ __func__, ret);
+ return;
+ } else if (!ret) {
+ /* Timeout */
+ dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
+ __func__);
+ }
+
+ /* Clear power down acknowledment. */
+ clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
+ clear_bit(CFHSI_AWAKE, &cfhsi->bits);
+
+ /* Check if there is something in FIFO. */
+ if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ &fifo_occupancy)))
+ fifo_occupancy = 0;
+
+ if (fifo_occupancy) {
+ dev_dbg(&cfhsi->ndev->dev,
+ "%s: %u words in RX FIFO, wakeup forced.\n",
+ __func__, (unsigned) fifo_occupancy);
+ if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
+ queue_work(cfhsi->wq, &cfhsi->wake_up_work);
+ } else
+ dev_dbg(&cfhsi->ndev->dev, "%s: Done.\n",
+ __func__);
+}
+
+static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
+{
+ struct cfhsi *cfhsi = NULL;
+
+ cfhsi = container_of(drv, struct cfhsi, drv);
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ __func__);
+
+ set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+ wake_up_interruptible(&cfhsi->wake_up_wait);
+
+ if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
+ return;
+
+ /* Schedule wake up work queue if the peer initiates. */
+ if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
+ queue_work(cfhsi->wq, &cfhsi->wake_up_work);
+}
+
+static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
+{
+ struct cfhsi *cfhsi = NULL;
+
+ cfhsi = container_of(drv, struct cfhsi, drv);
+ dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ __func__);
+
+ /* Initiating low power is only permitted by the host (us). */
+ set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
+ wake_up_interruptible(&cfhsi->wake_down_wait);
+}
+
+static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cfhsi *cfhsi = NULL;
+ int start_xfer = 0;
+ int timer_active;
+
+ if (!dev)
+ return -EINVAL;
+
+ cfhsi = netdev_priv(dev);
+
+ spin_lock_bh(&cfhsi->lock);
+
+ skb_queue_tail(&cfhsi->qhead, skb);
+
+ /* Sanity check; xmit should not be called after unregister_netdev */
+ if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
+ spin_unlock_bh(&cfhsi->lock);
+ cfhsi_abort_tx(cfhsi);
+ return -EINVAL;
+ }
+
+ /* Send flow off if number of packets is above high water mark. */
+ if (!cfhsi->flow_off_sent &&
+ cfhsi->qhead.qlen > cfhsi->q_high_mark &&
+ cfhsi->cfdev.flowctrl) {
+ cfhsi->flow_off_sent = 1;
+ cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
+ }
+
+ if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
+ cfhsi->tx_state = CFHSI_TX_STATE_XFER;
+ start_xfer = 1;
+ }
+
+ spin_unlock_bh(&cfhsi->lock);
+
+ if (!start_xfer)
+ return 0;
+
+ /* Delete inactivity timer if started. */
+#ifdef CONFIG_SMP
+ timer_active = del_timer_sync(&cfhsi->timer);
+#else
+ timer_active = del_timer(&cfhsi->timer);
+#endif /* CONFIG_SMP */
+
+ if (timer_active) {
+ struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
+ int len;
+ int res;
+
+ /* Create HSI frame. */
+ len = cfhsi_tx_frm(desc, cfhsi);
+ BUG_ON(!len);
+
+ /* Set up new transfer. */
+ res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ if (WARN_ON(res < 0)) {
+ dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ __func__, res);
+ cfhsi_abort_tx(cfhsi);
+ }
+ } else {
+ /* Schedule wake up work queue if the we initiate. */
+ if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
+ queue_work(cfhsi->wq, &cfhsi->wake_up_work);
+ }
+
+ return 0;
+}
+
+static int cfhsi_open(struct net_device *dev)
+{
+ netif_wake_queue(dev);
+
+ return 0;
+}
+
+static int cfhsi_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+static const struct net_device_ops cfhsi_ops = {
+ .ndo_open = cfhsi_open,
+ .ndo_stop = cfhsi_close,
+ .ndo_start_xmit = cfhsi_xmit
+};
+
+static void cfhsi_setup(struct net_device *dev)
+{
+ struct cfhsi *cfhsi = netdev_priv(dev);
+ dev->features = 0;
+ dev->netdev_ops = &cfhsi_ops;
+ dev->type = ARPHRD_CAIF;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
+ dev->tx_queue_len = 0;
+ dev->destructor = free_netdev;
+ skb_queue_head_init(&cfhsi->qhead);
+ cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+ cfhsi->cfdev.use_frag = false;
+ cfhsi->cfdev.use_stx = false;
+ cfhsi->cfdev.use_fcs = false;
+ cfhsi->ndev = dev;
+}
+
+int cfhsi_probe(struct platform_device *pdev)
+{
+ struct cfhsi *cfhsi = NULL;
+ struct net_device *ndev;
+ struct cfhsi_dev *dev;
+ int res;
+
+ ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
+ if (!ndev) {
+ dev_err(&pdev->dev, "%s: alloc_netdev failed.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ cfhsi = netdev_priv(ndev);
+ cfhsi->ndev = ndev;
+ cfhsi->pdev = pdev;
+
+ /* Initialize state vaiables. */
+ cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
+ cfhsi->rx_state = CFHSI_RX_STATE_DESC;
+
+ /* Set flow info */
+ cfhsi->flow_off_sent = 0;
+ cfhsi->q_low_mark = LOW_WATER_MARK;
+ cfhsi->q_high_mark = HIGH_WATER_MARK;
+
+ /* Assign the HSI device. */
+ dev = (struct cfhsi_dev *)pdev->dev.platform_data;
+ cfhsi->dev = dev;
+
+ /* Assign the driver to this HSI device. */
+ dev->drv = &cfhsi->drv;
+
+ /*
+ * Allocate a TX buffer with the size of a HSI packet descriptors
+ * and the necessary room for CAIF payload frames.
+ */
+ cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
+ if (!cfhsi->tx_buf) {
+ dev_err(&ndev->dev, "%s: Failed to allocate TX buffer.\n",
+ __func__);
+ res = -ENODEV;
+ goto err_alloc_tx;
+ }
+
+ /*
+ * Allocate a RX buffer with the size of two HSI packet descriptors and
+ * the necessary room for CAIF payload frames.
+ */
+ cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
+ if (!cfhsi->rx_buf) {
+ dev_err(&ndev->dev, "%s: Failed to allocate RX buffer.\n",
+ __func__);
+ res = -ENODEV;
+ goto err_alloc_rx;
+ }
+
+ /* Initialize receive variables. */
+ cfhsi->rx_ptr = cfhsi->rx_buf;
+ cfhsi->rx_len = CFHSI_DESC_SZ;
+
+ /* Initialize spin locks. */
+ spin_lock_init(&cfhsi->lock);
+
+ /* Set up the driver. */
+ cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
+ cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
+
+ /* Initialize the work queues. */
+ INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
+ INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
+ INIT_WORK(&cfhsi->rx_done_work, cfhsi_rx_done_work);
+ INIT_WORK(&cfhsi->tx_done_work, cfhsi_tx_done_work);
+
+ /* Clear all bit fields. */
+ clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
+ clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
+ clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
+ clear_bit(CFHSI_AWAKE, &cfhsi->bits);
+ clear_bit(CFHSI_PENDING_RX, &cfhsi->bits);
+
+ /* Create work thread. */
+ cfhsi->wq = create_singlethread_workqueue(pdev->name);
+ if (!cfhsi->wq) {
+ dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
+ __func__);
+ res = -ENODEV;
+ goto err_create_wq;
+ }
+
+ /* Initialize wait queues. */
+ init_waitqueue_head(&cfhsi->wake_up_wait);
+ init_waitqueue_head(&cfhsi->wake_down_wait);
+ init_waitqueue_head(&cfhsi->flush_fifo_wait);
+
+ /* Setup the inactivity timer. */
+ init_timer(&cfhsi->timer);
+ cfhsi->timer.data = (unsigned long)cfhsi;
+ cfhsi->timer.function = cfhsi_inactivity_tout;
+
+ /* Add CAIF HSI device to list. */
+ spin_lock(&cfhsi_list_lock);
+ list_add_tail(&cfhsi->list, &cfhsi_list);
+ spin_unlock(&cfhsi_list_lock);
+
+ /* Activate HSI interface. */
+ res = cfhsi->dev->cfhsi_up(cfhsi->dev);
+ if (res) {
+ dev_err(&cfhsi->ndev->dev,
+ "%s: can't activate HSI interface: %d.\n",
+ __func__, res);
+ goto err_activate;
+ }
+
+ /* Flush FIFO */
+ res = cfhsi_flush_fifo(cfhsi);
+ if (res) {
+ dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
+ __func__, res);
+ goto err_net_reg;
+ }
+
+ cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
+ cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
+
+ /* Register network device. */
+ res = register_netdev(ndev);
+ if (res) {
+ dev_err(&ndev->dev, "%s: Registration error: %d.\n",
+ __func__, res);
+ goto err_net_reg;
+ }
+
+ netif_stop_queue(ndev);
+
+ return res;
+
+ err_net_reg:
+ cfhsi->dev->cfhsi_down(cfhsi->dev);
+ err_activate:
+ destroy_workqueue(cfhsi->wq);
+ err_create_wq:
+ kfree(cfhsi->rx_buf);
+ err_alloc_rx:
+ kfree(cfhsi->tx_buf);
+ err_alloc_tx:
+ free_netdev(ndev);
+
+ return res;
+}
+
+static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev)
+{
+ u8 *tx_buf, *rx_buf;
+
+ /* Stop TXing */
+ netif_tx_stop_all_queues(cfhsi->ndev);
+
+ /* going to shutdown driver */
+ set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
+
+ if (remove_platform_dev) {
+ /* Flush workqueue */
+ flush_workqueue(cfhsi->wq);
+
+ /* Notify device. */
+ platform_device_unregister(cfhsi->pdev);
+ }
+
+ /* Flush workqueue */
+ flush_workqueue(cfhsi->wq);
+
+ /* Delete timer if pending */
+#ifdef CONFIG_SMP
+ del_timer_sync(&cfhsi->timer);
+#else
+ del_timer(&cfhsi->timer);
+#endif /* CONFIG_SMP */
+
+ /* Cancel pending RX request (if any) */
+ cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
+
+ /* Flush again and destroy workqueue */
+ destroy_workqueue(cfhsi->wq);
+
+ /* Store bufferes: will be freed later. */
+ tx_buf = cfhsi->tx_buf;
+ rx_buf = cfhsi->rx_buf;
+
+ /* Flush transmit queues. */
+ cfhsi_abort_tx(cfhsi);
+
+ /* Deactivate interface */
+ cfhsi->dev->cfhsi_down(cfhsi->dev);
+
+ /* Finally unregister the network device. */
+ unregister_netdev(cfhsi->ndev);
+
+ /* Free buffers. */
+ kfree(tx_buf);
+ kfree(rx_buf);
+}
+
+int cfhsi_remove(struct platform_device *pdev)
+{
+ struct list_head *list_node;
+ struct list_head *n;
+ struct cfhsi *cfhsi = NULL;
+ struct cfhsi_dev *dev;
+
+ dev = (struct cfhsi_dev *)pdev->dev.platform_data;
+ spin_lock(&cfhsi_list_lock);
+ list_for_each_safe(list_node, n, &cfhsi_list) {
+ cfhsi = list_entry(list_node, struct cfhsi, list);
+ /* Find the corresponding device. */
+ if (cfhsi->dev == dev) {
+ /* Remove from list. */
+ list_del(list_node);
+ spin_unlock(&cfhsi_list_lock);
+
+ /* Shutdown driver. */
+ cfhsi_shutdown(cfhsi, false);
+
+ return 0;
+ }
+ }
+ spin_unlock(&cfhsi_list_lock);
+ return -ENODEV;
+}
+
+struct platform_driver cfhsi_plat_drv = {
+ .probe = cfhsi_probe,
+ .remove = cfhsi_remove,
+ .driver = {
+ .name = "cfhsi",
+ .owner = THIS_MODULE,
+ },
+};
+
+static void __exit cfhsi_exit_module(void)
+{
+ struct list_head *list_node;
+ struct list_head *n;
+ struct cfhsi *cfhsi = NULL;
+
+ spin_lock(&cfhsi_list_lock);
+ list_for_each_safe(list_node, n, &cfhsi_list) {
+ cfhsi = list_entry(list_node, struct cfhsi, list);
+
+ /* Remove from list. */
+ list_del(list_node);
+ spin_unlock(&cfhsi_list_lock);
+
+ /* Shutdown driver. */
+ cfhsi_shutdown(cfhsi, true);
+
+ spin_lock(&cfhsi_list_lock);
+ }
+ spin_unlock(&cfhsi_list_lock);
+
+ /* Unregister platform driver. */
+ platform_driver_unregister(&cfhsi_plat_drv);
+}
+
+static int __init cfhsi_init_module(void)
+{
+ int result;
+
+ /* Initialize spin lock. */
+ spin_lock_init(&cfhsi_list_lock);
+
+ /* Register platform driver. */
+ result = platform_driver_register(&cfhsi_plat_drv);
+ if (result) {
+ printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
+ result);
+ goto err_dev_register;
+ }
+
+ return result;
+
+ err_dev_register:
+ return result;
+}
+
+module_init(cfhsi_init_module);
+module_exit(cfhsi_exit_module);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 3df0c0f8b8b..23406e62c0b 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -4,8 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#include <linux/hardirq.h>
#include <linux/init.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/types.h>
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
index 5f771ab712c..89d76b7b325 100644
--- a/drivers/net/caif/caif_shm_u5500.c
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -7,7 +7,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 731aa119377..d4b26fb24ed 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -134,7 +134,7 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
u32 avail_emptybuff = 0;
unsigned long flags = 0;
- pshm_drv = (struct shmdrv_layer *)priv;
+ pshm_drv = priv;
/* Check for received buffers. */
if (mbx_msg & SHM_FULL_MASK) {
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 57e63937381..0f8defc7330 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -5,7 +5,6 @@
* License terms: GNU General Public License (GPL) version 2.
*/
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index b009e03cda9..e139e133fc7 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -4,7 +4,6 @@
* Author: Daniel Martensson / Daniel.Martensson@stericsson.com
* License terms: GNU General Public License (GPL) version 2.
*/
-#include <linux/version.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 1d699e3df54..f6c98fb4a51 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -4,7 +4,6 @@ menu "CAN Device Drivers"
config CAN_VCAN
tristate "Virtual Local CAN Interface (vcan)"
depends on CAN
- default N
---help---
Similar to the network loopback devices, vcan offers a
virtual local CAN interface.
@@ -15,7 +14,6 @@ config CAN_VCAN
config CAN_SLCAN
tristate "Serial / USB serial CAN Adaptors (slcan)"
depends on CAN
- default N
---help---
CAN driver for several 'low cost' CAN interfaces that are attached
via serial lines or via USB-to-serial adapters using the LAWICEL
@@ -36,7 +34,7 @@ config CAN_SLCAN
config CAN_DEV
tristate "Platform CAN drivers with Netlink support"
depends on CAN
- default Y
+ default y
---help---
Enables the common framework for platform CAN drivers with Netlink
support. This is the standard library for CAN drivers.
@@ -45,7 +43,7 @@ config CAN_DEV
config CAN_CALC_BITTIMING
bool "CAN bit-timing calculation"
depends on CAN_DEV
- default Y
+ default y
---help---
If enabled, CAN bit-timing parameters will be calculated for the
bit-rate specified via Netlink argument "bitrate" when the device
@@ -58,9 +56,10 @@ config CAN_CALC_BITTIMING
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
- depends on CAN_DEV && ARCH_AT91SAM9263
+ depends on CAN_DEV && (ARCH_AT91SAM9263 || ARCH_AT91SAM9X5)
---help---
- This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
+ This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
+ and AT91SAM9X5 processors.
config CAN_TI_HECC
depends on CAN_DEV && ARCH_OMAP3
@@ -124,7 +123,6 @@ source "drivers/net/can/softing/Kconfig"
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
depends on CAN
- default N
---help---
Say Y here if you want the CAN device drivers to produce a bunch of
debug messages to the system log. Select this if you are having
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 74efb5a2ad4..121ede663e2 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -41,32 +41,7 @@
#include <mach/board.h>
-#define AT91_NAPI_WEIGHT 11
-
-/*
- * RX/TX Mailbox split
- * don't dare to touch
- */
-#define AT91_MB_RX_NUM 11
-#define AT91_MB_TX_SHIFT 2
-
-#define AT91_MB_RX_FIRST 1
-#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
-
-#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
-#define AT91_MB_RX_SPLIT 8
-#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
-#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
- ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
-
-#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
-#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
-#define AT91_MB_TX_LAST (AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1)
-
-#define AT91_NEXT_PRIO_SHIFT (AT91_MB_TX_SHIFT)
-#define AT91_NEXT_PRIO_MASK (0xf << AT91_MB_TX_SHIFT)
-#define AT91_NEXT_MB_MASK (AT91_MB_TX_NUM - 1)
-#define AT91_NEXT_MASK ((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK)
+#define AT91_MB_MASK(i) ((1 << (i)) - 1)
/* Common registers */
enum at91_reg {
@@ -128,12 +103,6 @@ enum at91_mb_mode {
};
/* Interrupt mask bits */
-#define AT91_IRQ_MB_RX ((1 << (AT91_MB_RX_LAST + 1)) \
- - (1 << AT91_MB_RX_FIRST))
-#define AT91_IRQ_MB_TX ((1 << (AT91_MB_TX_LAST + 1)) \
- - (1 << AT91_MB_TX_FIRST))
-#define AT91_IRQ_MB_ALL (AT91_IRQ_MB_RX | AT91_IRQ_MB_TX)
-
#define AT91_IRQ_ERRA (1 << 16)
#define AT91_IRQ_WARN (1 << 17)
#define AT91_IRQ_ERRP (1 << 18)
@@ -156,22 +125,51 @@ enum at91_mb_mode {
#define AT91_IRQ_ALL (0x1fffffff)
+enum at91_devtype {
+ AT91_DEVTYPE_SAM9263,
+ AT91_DEVTYPE_SAM9X5,
+};
+
+struct at91_devtype_data {
+ unsigned int rx_first;
+ unsigned int rx_split;
+ unsigned int rx_last;
+ unsigned int tx_shift;
+ enum at91_devtype type;
+};
+
struct at91_priv {
- struct can_priv can; /* must be the first member! */
- struct net_device *dev;
- struct napi_struct napi;
+ struct can_priv can; /* must be the first member! */
+ struct net_device *dev;
+ struct napi_struct napi;
- void __iomem *reg_base;
+ void __iomem *reg_base;
- u32 reg_sr;
- unsigned int tx_next;
- unsigned int tx_echo;
- unsigned int rx_next;
+ u32 reg_sr;
+ unsigned int tx_next;
+ unsigned int tx_echo;
+ unsigned int rx_next;
+ struct at91_devtype_data devtype_data;
- struct clk *clk;
- struct at91_can_data *pdata;
+ struct clk *clk;
+ struct at91_can_data *pdata;
- canid_t mb0_id;
+ canid_t mb0_id;
+};
+
+static const struct at91_devtype_data at91_devtype_data[] __devinitconst = {
+ [AT91_DEVTYPE_SAM9263] = {
+ .rx_first = 1,
+ .rx_split = 8,
+ .rx_last = 11,
+ .tx_shift = 2,
+ },
+ [AT91_DEVTYPE_SAM9X5] = {
+ .rx_first = 0,
+ .rx_split = 4,
+ .rx_last = 5,
+ .tx_shift = 1,
+ },
};
static struct can_bittiming_const at91_bittiming_const = {
@@ -186,19 +184,111 @@ static struct can_bittiming_const at91_bittiming_const = {
.brp_inc = 1,
};
-static inline int get_tx_next_mb(const struct at91_priv *priv)
+#define AT91_IS(_model) \
+static inline int at91_is_sam##_model(const struct at91_priv *priv) \
+{ \
+ return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \
+}
+
+AT91_IS(9263);
+AT91_IS(9X5);
+
+static inline unsigned int get_mb_rx_first(const struct at91_priv *priv)
+{
+ return priv->devtype_data.rx_first;
+}
+
+static inline unsigned int get_mb_rx_last(const struct at91_priv *priv)
+{
+ return priv->devtype_data.rx_last;
+}
+
+static inline unsigned int get_mb_rx_split(const struct at91_priv *priv)
+{
+ return priv->devtype_data.rx_split;
+}
+
+static inline unsigned int get_mb_rx_num(const struct at91_priv *priv)
+{
+ return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1;
+}
+
+static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv)
+{
+ return get_mb_rx_split(priv) - 1;
+}
+
+static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv)
+{
+ return AT91_MB_MASK(get_mb_rx_split(priv)) &
+ ~AT91_MB_MASK(get_mb_rx_first(priv));
+}
+
+static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv)
+{
+ return priv->devtype_data.tx_shift;
+}
+
+static inline unsigned int get_mb_tx_num(const struct at91_priv *priv)
+{
+ return 1 << get_mb_tx_shift(priv);
+}
+
+static inline unsigned int get_mb_tx_first(const struct at91_priv *priv)
+{
+ return get_mb_rx_last(priv) + 1;
+}
+
+static inline unsigned int get_mb_tx_last(const struct at91_priv *priv)
+{
+ return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1;
+}
+
+static inline unsigned int get_next_prio_shift(const struct at91_priv *priv)
+{
+ return get_mb_tx_shift(priv);
+}
+
+static inline unsigned int get_next_prio_mask(const struct at91_priv *priv)
+{
+ return 0xf << get_mb_tx_shift(priv);
+}
+
+static inline unsigned int get_next_mb_mask(const struct at91_priv *priv)
+{
+ return AT91_MB_MASK(get_mb_tx_shift(priv));
+}
+
+static inline unsigned int get_next_mask(const struct at91_priv *priv)
+{
+ return get_next_mb_mask(priv) | get_next_prio_mask(priv);
+}
+
+static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv)
{
- return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+ return AT91_MB_MASK(get_mb_rx_last(priv) + 1) &
+ ~AT91_MB_MASK(get_mb_rx_first(priv));
}
-static inline int get_tx_next_prio(const struct at91_priv *priv)
+static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv)
{
- return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf;
+ return AT91_MB_MASK(get_mb_tx_last(priv) + 1) &
+ ~AT91_MB_MASK(get_mb_tx_first(priv));
}
-static inline int get_tx_echo_mb(const struct at91_priv *priv)
+static inline unsigned int get_tx_next_mb(const struct at91_priv *priv)
{
- return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+ return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
+}
+
+static inline unsigned int get_tx_next_prio(const struct at91_priv *priv)
+{
+ return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf;
+}
+
+static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
+{
+ return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
}
static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
@@ -259,29 +349,29 @@ static void at91_setup_mailboxes(struct net_device *dev)
* overflow.
*/
reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
- for (i = 0; i < AT91_MB_RX_FIRST; i++) {
+ for (i = 0; i < get_mb_rx_first(priv); i++) {
set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
at91_write(priv, AT91_MID(i), reg_mid);
at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
}
- for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
+ for (i = get_mb_rx_first(priv); i < get_mb_rx_last(priv); i++)
set_mb_mode(priv, i, AT91_MB_MODE_RX);
- set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
+ set_mb_mode(priv, get_mb_rx_last(priv), AT91_MB_MODE_RX_OVRWR);
/* reset acceptance mask and id register */
- for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
- at91_write(priv, AT91_MAM(i), 0x0 );
+ for (i = get_mb_rx_first(priv); i <= get_mb_rx_last(priv); i++) {
+ at91_write(priv, AT91_MAM(i), 0x0);
at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
}
/* The last 4 mailboxes are used for transmitting. */
- for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
+ for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
/* Reset tx and rx helper pointers */
priv->tx_next = priv->tx_echo = 0;
- priv->rx_next = AT91_MB_RX_FIRST;
+ priv->rx_next = get_mb_rx_first(priv);
}
static int at91_set_bittiming(struct net_device *dev)
@@ -336,7 +426,7 @@ static void at91_chip_start(struct net_device *dev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* Enable interrupts */
- reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
+ reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
at91_write(priv, AT91_IER, reg_ier);
}
@@ -375,8 +465,8 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
* mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
* encode the mailbox number, the upper 4 bits the mailbox priority:
*
- * priv->tx_next = (prio << AT91_NEXT_PRIO_SHIFT) ||
- * (mb - AT91_MB_TX_FIRST);
+ * priv->tx_next = (prio << get_next_prio_shift(priv)) |
+ * (mb - get_mb_tx_first(priv));
*
*/
static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -417,7 +507,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_bytes += cf->can_dlc;
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
+ can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
/*
* we have to stop the queue and deliver all messages in case
@@ -430,7 +520,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_next++;
if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
AT91_MSR_MRDY) ||
- (priv->tx_next & AT91_NEXT_MASK) == 0)
+ (priv->tx_next & get_next_mask(priv)) == 0)
netif_stop_queue(dev);
/* Enable interrupt for this mailbox */
@@ -447,7 +537,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
static inline void at91_activate_rx_low(const struct at91_priv *priv)
{
- u32 mask = AT91_MB_RX_LOW_MASK;
+ u32 mask = get_mb_rx_low_mask(priv);
at91_write(priv, AT91_TCR, mask);
}
@@ -513,17 +603,19 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
reg_msr = at91_read(priv, AT91_MSR(mb));
- if (reg_msr & AT91_MSR_MRTR)
- cf->can_id |= CAN_RTR_FLAG;
cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf);
- *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
- *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+ if (reg_msr & AT91_MSR_MRTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else {
+ *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
+ *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+ }
/* allow RX of extended frames */
at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
- if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
+ if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI))
at91_rx_overflow_err(dev);
}
@@ -561,8 +653,9 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
*
* Theory of Operation:
*
- * 11 of the 16 mailboxes on the chip are reserved for RX. we split
- * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
+ * About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last())
+ * on the chip are reserved for RX. We split them into 2 groups. The
+ * lower group ranges from get_mb_rx_first() to get_mb_rx_low_last().
*
* Like it or not, but the chip always saves a received CAN message
* into the first free mailbox it finds (starting with the
@@ -610,23 +703,23 @@ static int at91_poll_rx(struct net_device *dev, int quota)
unsigned int mb;
int received = 0;
- if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
- reg_sr & AT91_MB_RX_LOW_MASK)
+ if (priv->rx_next > get_mb_rx_low_last(priv) &&
+ reg_sr & get_mb_rx_low_mask(priv))
netdev_info(dev,
"order of incoming frames cannot be guaranteed\n");
again:
- for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
- mb < AT91_MB_RX_LAST + 1 && quota > 0;
+ for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
+ mb < get_mb_tx_first(priv) && quota > 0;
reg_sr = at91_read(priv, AT91_SR),
- mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
+ mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) {
at91_read_msg(dev, mb);
/* reactivate mailboxes */
- if (mb == AT91_MB_RX_LOW_LAST)
+ if (mb == get_mb_rx_low_last(priv))
/* all lower mailboxed, if just finished it */
at91_activate_rx_low(priv);
- else if (mb > AT91_MB_RX_LOW_LAST)
+ else if (mb > get_mb_rx_low_last(priv))
/* only the mailbox we read */
at91_activate_rx_mb(priv, mb);
@@ -635,9 +728,9 @@ static int at91_poll_rx(struct net_device *dev, int quota)
}
/* upper group completed, look again in lower */
- if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
- quota > 0 && mb > AT91_MB_RX_LAST) {
- priv->rx_next = AT91_MB_RX_FIRST;
+ if (priv->rx_next > get_mb_rx_low_last(priv) &&
+ quota > 0 && mb > get_mb_rx_last(priv)) {
+ priv->rx_next = get_mb_rx_first(priv);
goto again;
}
@@ -720,7 +813,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
u32 reg_sr = at91_read(priv, AT91_SR);
int work_done = 0;
- if (reg_sr & AT91_IRQ_MB_RX)
+ if (reg_sr & get_irq_mb_rx(priv))
work_done += at91_poll_rx(dev, quota - work_done);
/*
@@ -734,7 +827,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
if (work_done < quota) {
/* enable IRQs for frame errors and all mailboxes >= rx_next */
u32 reg_ier = AT91_IRQ_ERR_FRAME;
- reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next);
+ reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
napi_complete(napi);
at91_write(priv, AT91_IER, reg_ier);
@@ -783,7 +876,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
if (likely(reg_msr & AT91_MSR_MRDY &&
~reg_msr & AT91_MSR_MABT)) {
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST);
+ can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
dev->stats.tx_packets++;
}
}
@@ -793,8 +886,8 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
* we get a TX int for the last can frame directly before a
* wrap around.
*/
- if ((priv->tx_next & AT91_NEXT_MASK) != 0 ||
- (priv->tx_echo & AT91_NEXT_MASK) == 0)
+ if ((priv->tx_next & get_next_mask(priv)) != 0 ||
+ (priv->tx_echo & get_next_mask(priv)) == 0)
netif_wake_queue(dev);
}
@@ -906,6 +999,29 @@ static void at91_irq_err_state(struct net_device *dev,
at91_write(priv, AT91_IER, reg_ier);
}
+static int at91_get_state_by_bec(const struct net_device *dev,
+ enum can_state *state)
+{
+ struct can_berr_counter bec;
+ int err;
+
+ err = at91_get_berr_counter(dev, &bec);
+ if (err)
+ return err;
+
+ if (bec.txerr < 96 && bec.rxerr < 96)
+ *state = CAN_STATE_ERROR_ACTIVE;
+ else if (bec.txerr < 128 && bec.rxerr < 128)
+ *state = CAN_STATE_ERROR_WARNING;
+ else if (bec.txerr < 256 && bec.rxerr < 256)
+ *state = CAN_STATE_ERROR_PASSIVE;
+ else
+ *state = CAN_STATE_BUS_OFF;
+
+ return 0;
+}
+
+
static void at91_irq_err(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
@@ -913,21 +1029,28 @@ static void at91_irq_err(struct net_device *dev)
struct can_frame *cf;
enum can_state new_state;
u32 reg_sr;
+ int err;
- reg_sr = at91_read(priv, AT91_SR);
-
- /* we need to look at the unmasked reg_sr */
- if (unlikely(reg_sr & AT91_IRQ_BOFF))
- new_state = CAN_STATE_BUS_OFF;
- else if (unlikely(reg_sr & AT91_IRQ_ERRP))
- new_state = CAN_STATE_ERROR_PASSIVE;
- else if (unlikely(reg_sr & AT91_IRQ_WARN))
- new_state = CAN_STATE_ERROR_WARNING;
- else if (likely(reg_sr & AT91_IRQ_ERRA))
- new_state = CAN_STATE_ERROR_ACTIVE;
- else {
- netdev_err(dev, "BUG! hardware in undefined state\n");
- return;
+ if (at91_is_sam9263(priv)) {
+ reg_sr = at91_read(priv, AT91_SR);
+
+ /* we need to look at the unmasked reg_sr */
+ if (unlikely(reg_sr & AT91_IRQ_BOFF))
+ new_state = CAN_STATE_BUS_OFF;
+ else if (unlikely(reg_sr & AT91_IRQ_ERRP))
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (unlikely(reg_sr & AT91_IRQ_WARN))
+ new_state = CAN_STATE_ERROR_WARNING;
+ else if (likely(reg_sr & AT91_IRQ_ERRA))
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ else {
+ netdev_err(dev, "BUG! hardware in undefined state\n");
+ return;
+ }
+ } else {
+ err = at91_get_state_by_bec(dev, &new_state);
+ if (err)
+ return;
}
/* state hasn't changed */
@@ -968,19 +1091,19 @@ static irqreturn_t at91_irq(int irq, void *dev_id)
handled = IRQ_HANDLED;
/* Receive or error interrupt? -> napi */
- if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) {
+ if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
/*
* The error bits are clear on read,
* save for later use.
*/
priv->reg_sr = reg_sr;
at91_write(priv, AT91_IDR,
- AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME);
+ get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME);
napi_schedule(&priv->napi);
}
/* Transmission complete interrupt */
- if (reg_sr & AT91_IRQ_MB_TX)
+ if (reg_sr & get_irq_mb_tx(priv))
at91_irq_tx(dev, reg_sr);
at91_irq_err(dev);
@@ -1123,6 +1246,8 @@ static struct attribute_group at91_sysfs_attr_group = {
static int __devinit at91_can_probe(struct platform_device *pdev)
{
+ const struct at91_devtype_data *devtype_data;
+ enum at91_devtype devtype;
struct net_device *dev;
struct at91_priv *priv;
struct resource *res;
@@ -1130,6 +1255,9 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
void __iomem *addr;
int err, irq;
+ devtype = pdev->id_entry->driver_data;
+ devtype_data = &at91_devtype_data[devtype];
+
clk = clk_get(&pdev->dev, "can_clk");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "no clock defined\n");
@@ -1157,7 +1285,8 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
goto exit_release;
}
- dev = alloc_candev(sizeof(struct at91_priv), AT91_MB_TX_NUM);
+ dev = alloc_candev(sizeof(struct at91_priv),
+ 1 << devtype_data->tx_shift);
if (!dev) {
err = -ENOMEM;
goto exit_iounmap;
@@ -1166,7 +1295,6 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
dev->netdev_ops = &at91_netdev_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
- dev->sysfs_groups[0] = &at91_sysfs_attr_group;
priv = netdev_priv(dev);
priv->can.clock.freq = clk_get_rate(clk);
@@ -1174,13 +1302,18 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
priv->can.do_set_mode = at91_set_mode;
priv->can.do_get_berr_counter = at91_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
- priv->reg_base = addr;
priv->dev = dev;
+ priv->reg_base = addr;
+ priv->devtype_data = *devtype_data;
+ priv->devtype_data.type = devtype;
priv->clk = clk;
priv->pdata = pdev->dev.platform_data;
priv->mb0_id = 0x7ff;
- netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
+ netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
+
+ if (at91_is_sam9263(priv))
+ dev->sysfs_groups[0] = &at91_sysfs_attr_group;
dev_set_drvdata(&pdev->dev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1230,13 +1363,26 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id at91_can_id_table[] = {
+ {
+ .name = "at91_can",
+ .driver_data = AT91_DEVTYPE_SAM9263,
+ }, {
+ .name = "at91sam9x5_can",
+ .driver_data = AT91_DEVTYPE_SAM9X5,
+ }, {
+ /* sentinel */
+ }
+};
+
static struct platform_driver at91_can_driver = {
- .probe = at91_can_probe,
- .remove = __devexit_p(at91_can_remove),
- .driver = {
- .name = KBUILD_MODNAME,
- .owner = THIS_MODULE,
+ .probe = at91_can_probe,
+ .remove = __devexit_p(at91_can_remove),
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
},
+ .id_table = at91_can_id_table,
};
static int __init at91_can_module_init(void)
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index b6e890d2836..a1c5abc38cd 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -79,8 +79,8 @@ static int bfin_can_set_bittiming(struct net_device *dev)
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
timing |= SAM;
- bfin_write16(&reg->clock, clk);
- bfin_write16(&reg->timing, timing);
+ bfin_write(&reg->clock, clk);
+ bfin_write(&reg->timing, timing);
dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
clk, timing);
@@ -96,16 +96,16 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
int i;
/* disable interrupts */
- bfin_write16(&reg->mbim1, 0);
- bfin_write16(&reg->mbim2, 0);
- bfin_write16(&reg->gim, 0);
+ bfin_write(&reg->mbim1, 0);
+ bfin_write(&reg->mbim2, 0);
+ bfin_write(&reg->gim, 0);
/* reset can and enter configuration mode */
- bfin_write16(&reg->control, SRS | CCR);
+ bfin_write(&reg->control, SRS | CCR);
SSYNC();
- bfin_write16(&reg->control, CCR);
+ bfin_write(&reg->control, CCR);
SSYNC();
- while (!(bfin_read16(&reg->control) & CCA)) {
+ while (!(bfin_read(&reg->control) & CCA)) {
udelay(10);
if (--timeout == 0) {
dev_err(dev->dev.parent,
@@ -119,33 +119,33 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
* by writing to CAN Mailbox Configuration Registers 1 and 2
* For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled
*/
- bfin_write16(&reg->mc1, 0);
- bfin_write16(&reg->mc2, 0);
+ bfin_write(&reg->mc1, 0);
+ bfin_write(&reg->mc2, 0);
/* Set Mailbox Direction */
- bfin_write16(&reg->md1, 0xFFFF); /* mailbox 1-16 are RX */
- bfin_write16(&reg->md2, 0); /* mailbox 17-32 are TX */
+ bfin_write(&reg->md1, 0xFFFF); /* mailbox 1-16 are RX */
+ bfin_write(&reg->md2, 0); /* mailbox 17-32 are TX */
/* RECEIVE_STD_CHL */
for (i = 0; i < 2; i++) {
- bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].id0, 0);
- bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].id1, AME);
- bfin_write16(&reg->chl[RECEIVE_STD_CHL + i].dlc, 0);
- bfin_write16(&reg->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
- bfin_write16(&reg->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
+ bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id0, 0);
+ bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id1, AME);
+ bfin_write(&reg->chl[RECEIVE_STD_CHL + i].dlc, 0);
+ bfin_write(&reg->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
+ bfin_write(&reg->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
}
/* RECEIVE_EXT_CHL */
for (i = 0; i < 2; i++) {
- bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].id0, 0);
- bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
- bfin_write16(&reg->chl[RECEIVE_EXT_CHL + i].dlc, 0);
- bfin_write16(&reg->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
- bfin_write16(&reg->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
+ bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id0, 0);
+ bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
+ bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].dlc, 0);
+ bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
+ bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
}
- bfin_write16(&reg->mc2, BIT(TRANSMIT_CHL - 16));
- bfin_write16(&reg->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
+ bfin_write(&reg->mc2, BIT(TRANSMIT_CHL - 16));
+ bfin_write(&reg->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
SSYNC();
priv->can.state = CAN_STATE_STOPPED;
@@ -160,9 +160,9 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
/*
* leave configuration mode
*/
- bfin_write16(&reg->control, bfin_read16(&reg->control) & ~CCR);
+ bfin_write(&reg->control, bfin_read(&reg->control) & ~CCR);
- while (bfin_read16(&reg->status) & CCA) {
+ while (bfin_read(&reg->status) & CCA) {
udelay(10);
if (--timeout == 0) {
dev_err(dev->dev.parent,
@@ -174,25 +174,25 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
/*
* clear _All_ tx and rx interrupts
*/
- bfin_write16(&reg->mbtif1, 0xFFFF);
- bfin_write16(&reg->mbtif2, 0xFFFF);
- bfin_write16(&reg->mbrif1, 0xFFFF);
- bfin_write16(&reg->mbrif2, 0xFFFF);
+ bfin_write(&reg->mbtif1, 0xFFFF);
+ bfin_write(&reg->mbtif2, 0xFFFF);
+ bfin_write(&reg->mbrif1, 0xFFFF);
+ bfin_write(&reg->mbrif2, 0xFFFF);
/*
* clear global interrupt status register
*/
- bfin_write16(&reg->gis, 0x7FF); /* overwrites with '1' */
+ bfin_write(&reg->gis, 0x7FF); /* overwrites with '1' */
/*
* Initialize Interrupts
* - set bits in the mailbox interrupt mask register
* - global interrupt mask
*/
- bfin_write16(&reg->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
- bfin_write16(&reg->mbim2, BIT(TRANSMIT_CHL - 16));
+ bfin_write(&reg->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
+ bfin_write(&reg->mbim2, BIT(TRANSMIT_CHL - 16));
- bfin_write16(&reg->gim, EPIM | BOIM | RMLIM);
+ bfin_write(&reg->gim, EPIM | BOIM | RMLIM);
SSYNC();
}
@@ -242,37 +242,28 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* fill id */
if (id & CAN_EFF_FLAG) {
- bfin_write16(&reg->chl[TRANSMIT_CHL].id0, id);
- if (id & CAN_RTR_FLAG)
- writew(((id & 0x1FFF0000) >> 16) | IDE | AME | RTR,
- &reg->chl[TRANSMIT_CHL].id1);
- else
- writew(((id & 0x1FFF0000) >> 16) | IDE | AME,
- &reg->chl[TRANSMIT_CHL].id1);
-
- } else {
- if (id & CAN_RTR_FLAG)
- writew((id << 2) | AME | RTR,
- &reg->chl[TRANSMIT_CHL].id1);
- else
- bfin_write16(&reg->chl[TRANSMIT_CHL].id1,
- (id << 2) | AME);
- }
+ bfin_write(&reg->chl[TRANSMIT_CHL].id0, id);
+ val = ((id & 0x1FFF0000) >> 16) | IDE;
+ } else
+ val = (id << 2);
+ if (id & CAN_RTR_FLAG)
+ val |= RTR;
+ bfin_write(&reg->chl[TRANSMIT_CHL].id1, val | AME);
/* fill payload */
for (i = 0; i < 8; i += 2) {
val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
((6 - i) < dlc ? (data[6 - i] << 8) : 0);
- bfin_write16(&reg->chl[TRANSMIT_CHL].data[i], val);
+ bfin_write(&reg->chl[TRANSMIT_CHL].data[i], val);
}
/* fill data length code */
- bfin_write16(&reg->chl[TRANSMIT_CHL].dlc, dlc);
+ bfin_write(&reg->chl[TRANSMIT_CHL].dlc, dlc);
can_put_echo_skb(skb, dev, 0);
/* set transmit request */
- bfin_write16(&reg->trs2, BIT(TRANSMIT_CHL - 16));
+ bfin_write(&reg->trs2, BIT(TRANSMIT_CHL - 16));
return 0;
}
@@ -295,26 +286,26 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
/* get id */
if (isrc & BIT(RECEIVE_EXT_CHL)) {
/* extended frame format (EFF) */
- cf->can_id = ((bfin_read16(&reg->chl[RECEIVE_EXT_CHL].id1)
+ cf->can_id = ((bfin_read(&reg->chl[RECEIVE_EXT_CHL].id1)
& 0x1FFF) << 16)
- + bfin_read16(&reg->chl[RECEIVE_EXT_CHL].id0);
+ + bfin_read(&reg->chl[RECEIVE_EXT_CHL].id0);
cf->can_id |= CAN_EFF_FLAG;
obj = RECEIVE_EXT_CHL;
} else {
/* standard frame format (SFF) */
- cf->can_id = (bfin_read16(&reg->chl[RECEIVE_STD_CHL].id1)
+ cf->can_id = (bfin_read(&reg->chl[RECEIVE_STD_CHL].id1)
& 0x1ffc) >> 2;
obj = RECEIVE_STD_CHL;
}
- if (bfin_read16(&reg->chl[obj].id1) & RTR)
+ if (bfin_read(&reg->chl[obj].id1) & RTR)
cf->can_id |= CAN_RTR_FLAG;
/* get data length code */
- cf->can_dlc = get_can_dlc(bfin_read16(&reg->chl[obj].dlc) & 0xF);
+ cf->can_dlc = get_can_dlc(bfin_read(&reg->chl[obj].dlc) & 0xF);
/* get payload */
for (i = 0; i < 8; i += 2) {
- val = bfin_read16(&reg->chl[obj].data[i]);
+ val = bfin_read(&reg->chl[obj].data[i]);
cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
}
@@ -368,7 +359,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
state == CAN_STATE_ERROR_PASSIVE)) {
- u16 cec = bfin_read16(&reg->cec);
+ u16 cec = bfin_read(&reg->cec);
u8 rxerr = cec;
u8 txerr = cec >> 8;
@@ -419,23 +410,23 @@ irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
struct net_device_stats *stats = &dev->stats;
u16 status, isrc;
- if ((irq == priv->tx_irq) && bfin_read16(&reg->mbtif2)) {
+ if ((irq == priv->tx_irq) && bfin_read(&reg->mbtif2)) {
/* transmission complete interrupt */
- bfin_write16(&reg->mbtif2, 0xFFFF);
+ bfin_write(&reg->mbtif2, 0xFFFF);
stats->tx_packets++;
- stats->tx_bytes += bfin_read16(&reg->chl[TRANSMIT_CHL].dlc);
+ stats->tx_bytes += bfin_read(&reg->chl[TRANSMIT_CHL].dlc);
can_get_echo_skb(dev, 0);
netif_wake_queue(dev);
- } else if ((irq == priv->rx_irq) && bfin_read16(&reg->mbrif1)) {
+ } else if ((irq == priv->rx_irq) && bfin_read(&reg->mbrif1)) {
/* receive interrupt */
- isrc = bfin_read16(&reg->mbrif1);
- bfin_write16(&reg->mbrif1, 0xFFFF);
+ isrc = bfin_read(&reg->mbrif1);
+ bfin_write(&reg->mbrif1, 0xFFFF);
bfin_can_rx(dev, isrc);
- } else if ((irq == priv->err_irq) && bfin_read16(&reg->gis)) {
+ } else if ((irq == priv->err_irq) && bfin_read(&reg->gis)) {
/* error interrupt */
- isrc = bfin_read16(&reg->gis);
- status = bfin_read16(&reg->esr);
- bfin_write16(&reg->gis, 0x7FF);
+ isrc = bfin_read(&reg->gis);
+ status = bfin_read(&reg->esr);
+ bfin_write(&reg->gis, 0x7FF);
bfin_can_err(dev, isrc, status);
} else {
return IRQ_NONE;
@@ -640,9 +631,9 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
if (netif_running(dev)) {
/* enter sleep mode */
- bfin_write16(&reg->control, bfin_read16(&reg->control) | SMR);
+ bfin_write(&reg->control, bfin_read(&reg->control) | SMR);
SSYNC();
- while (!(bfin_read16(&reg->intr) & SMACK)) {
+ while (!(bfin_read(&reg->intr) & SMACK)) {
udelay(10);
if (--timeout == 0) {
dev_err(dev->dev.parent,
@@ -663,7 +654,7 @@ static int bfin_can_resume(struct platform_device *pdev)
if (netif_running(dev)) {
/* leave sleep mode */
- bfin_write16(&reg->intr, 0);
+ bfin_write(&reg->intr, 0);
SSYNC();
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 7e5cc0bd913..536bda072a1 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -26,7 +26,6 @@
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -34,7 +33,6 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/list.h>
-#include <linux/delay.h>
#include <linux/io.h>
#include <linux/can.h>
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index cc90824f2c9..0b5c6f8bdd3 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -20,7 +20,6 @@
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -28,7 +27,6 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/list.h>
-#include <linux/delay.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index d0f8c7e67e7..9bf1116e5b5 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -208,7 +208,7 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt)
return 0;
}
-int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt)
{
struct can_priv *priv = netdev_priv(dev);
int err;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index f1942cab35f..32778d56d33 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -22,6 +22,7 @@
#include <linux/can/error.h>
#include <linux/mfd/janz.h>
+#include <asm/io.h>
/* the DPM has 64k of memory, organized into 256x 256 byte pages */
#define DPM_NUM_PAGES 256
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 231385b8e08..c7f3d4ea116 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -408,7 +408,7 @@ static void plx_pci_del_card(struct pci_dev *pdev)
struct sja1000_priv *priv;
int i = 0;
- for (i = 0; i < card->channels; i++) {
+ for (i = 0; i < PLX_PCI_MAX_CHAN; i++) {
dev = card->net_dev[i];
if (!dev)
continue;
@@ -536,7 +536,6 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "Registering device failed "
"(err=%d)\n", err);
- free_sja1000dev(dev);
goto failure_cleanup;
}
@@ -549,6 +548,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
dev_err(&pdev->dev, "Channel #%d not detected\n",
i + 1);
free_sja1000dev(dev);
+ card->net_dev[i] = NULL;
}
}
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index de8e778f683..78bd4ecac14 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -47,6 +47,7 @@
#ifndef SJA1000_DEV_H
#define SJA1000_DEV_H
+#include <linux/irqreturn.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 9793df6e345..cee6ba2b8b5 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -38,6 +38,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/can/dev.h>
#include <linux/of_platform.h>
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 1b49df6b247..4b70b7e8bde 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -56,6 +56,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/can.h>
static __initdata const char banner[] =
@@ -95,10 +96,6 @@ struct slcan {
unsigned long flags; /* Flag values/ mode etc */
#define SLF_INUSE 0 /* Channel in use */
#define SLF_ERROR 1 /* Parity, etc. error */
-
- unsigned char leased;
- dev_t line;
- pid_t pid;
};
static struct net_device **slcan_devs;
@@ -142,21 +139,6 @@ static struct net_device **slcan_devs;
* STANDARD SLCAN DECAPSULATION *
************************************************************************/
-static int asc2nibble(char c)
-{
-
- if ((c >= '0') && (c <= '9'))
- return c - '0';
-
- if ((c >= 'A') && (c <= 'F'))
- return c - 'A' + 10;
-
- if ((c >= 'a') && (c <= 'f'))
- return c - 'a' + 10;
-
- return 16; /* error */
-}
-
/* Send one completely decapsulated can_frame to the network layer */
static void slc_bump(struct slcan *sl)
{
@@ -195,18 +177,16 @@ static void slc_bump(struct slcan *sl)
*(u64 *) (&cf.data) = 0; /* clear payload */
for (i = 0, dlc_pos++; i < cf.can_dlc; i++) {
-
- tmp = asc2nibble(sl->rbuff[dlc_pos++]);
- if (tmp > 0x0F)
+ tmp = hex_to_bin(sl->rbuff[dlc_pos++]);
+ if (tmp < 0)
return;
cf.data[i] = (tmp << 4);
- tmp = asc2nibble(sl->rbuff[dlc_pos++]);
- if (tmp > 0x0F)
+ tmp = hex_to_bin(sl->rbuff[dlc_pos++]);
+ if (tmp < 0)
return;
cf.data[i] |= tmp;
}
-
skb = dev_alloc_skb(sizeof(struct can_frame));
if (!skb)
return;
@@ -217,7 +197,7 @@ static void slc_bump(struct slcan *sl)
skb->ip_summed = CHECKSUM_UNNECESSARY;
memcpy(skb_put(skb, sizeof(struct can_frame)),
&cf, sizeof(struct can_frame));
- netif_rx(skb);
+ netif_rx_ni(skb);
sl->dev->stats.rx_packets++;
sl->dev->stats.rx_bytes += cf.can_dlc;
@@ -462,7 +442,7 @@ static void slc_sync(void)
break;
sl = netdev_priv(dev);
- if (sl->tty || sl->leased)
+ if (sl->tty)
continue;
if (dev->flags & IFF_UP)
dev_close(dev);
@@ -473,12 +453,10 @@ static void slc_sync(void)
static struct slcan *slc_alloc(dev_t line)
{
int i;
+ char name[IFNAMSIZ];
struct net_device *dev = NULL;
struct slcan *sl;
- if (slcan_devs == NULL)
- return NULL; /* Master array missing ! */
-
for (i = 0; i < maxdev; i++) {
dev = slcan_devs[i];
if (dev == NULL)
@@ -490,25 +468,12 @@ static struct slcan *slc_alloc(dev_t line)
if (i >= maxdev)
return NULL;
- if (dev) {
- sl = netdev_priv(dev);
- if (test_bit(SLF_INUSE, &sl->flags)) {
- unregister_netdevice(dev);
- dev = NULL;
- slcan_devs[i] = NULL;
- }
- }
-
- if (!dev) {
- char name[IFNAMSIZ];
- sprintf(name, "slcan%d", i);
-
- dev = alloc_netdev(sizeof(*sl), name, slc_setup);
- if (!dev)
- return NULL;
- dev->base_addr = i;
- }
+ sprintf(name, "slcan%d", i);
+ dev = alloc_netdev(sizeof(*sl), name, slc_setup);
+ if (!dev)
+ return NULL;
+ dev->base_addr = i;
sl = netdev_priv(dev);
/* Initialize channel control data */
@@ -565,8 +530,6 @@ static int slcan_open(struct tty_struct *tty)
sl->tty = tty;
tty->disc_data = sl;
- sl->line = tty_devnum(tty);
- sl->pid = current->pid;
if (!test_bit(SLF_INUSE, &sl->flags)) {
/* Perform the low-level SLCAN initialization. */
@@ -617,8 +580,6 @@ static void slcan_close(struct tty_struct *tty)
tty->disc_data = NULL;
sl->tty = NULL;
- if (!sl->leased)
- sl->line = 0;
/* Flush network side */
unregister_netdev(sl->dev);
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index b520784fb19..31059617567 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -20,6 +20,7 @@
#include <linux/firmware.h>
#include <linux/sched.h>
#include <asm/div64.h>
+#include <asm/io.h>
#include "softing.h"
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 60a49e5a2a5..09a8b86cf1a 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -17,10 +17,10 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <asm/io.h>
#include "softing.h"
@@ -799,7 +799,7 @@ static __devinit int softing_pdev_probe(struct platform_device *pdev)
if (!pres)
goto platform_resource_failed;
card->dpram_phys = pres->start;
- card->dpram_size = pres->end - pres->start + 1;
+ card->dpram_size = resource_size(pres);
card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
if (!card->dpram) {
dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 22ce03e55b8..646c86bcc54 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -75,6 +75,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <linux/ioport.h>
#include <linux/pci.h>
@@ -97,7 +98,7 @@
#include <net/checksum.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/byteorder.h>
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index c26d863e169..5ccbed1784d 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -240,8 +240,6 @@ struct adapter {
struct work_struct ext_intr_handler_task;
struct adapter_params params;
- struct vlan_group *vlan_grp;
-
/* Terminator modules. */
struct sge *sge;
struct peespi *espi;
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index b422d83f534..3edbbc4c511 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -263,6 +263,8 @@ static int cxgb_open(struct net_device *dev)
if (!other_ports && adapter->params.stats_update_period)
schedule_mac_stats_update(adapter,
adapter->params.stats_update_period);
+
+ t1_vlan_mode(adapter, dev->features);
return 0;
}
@@ -849,19 +851,30 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-static void t1_vlan_rx_register(struct net_device *dev,
- struct vlan_group *grp)
+static u32 t1_fix_features(struct net_device *dev, u32 features)
{
- struct adapter *adapter = dev->ml_priv;
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
- spin_lock_irq(&adapter->async_lock);
- adapter->vlan_grp = grp;
- t1_set_vlan_accel(adapter, grp != NULL);
- spin_unlock_irq(&adapter->async_lock);
+ return features;
}
-#endif
+static int t1_set_features(struct net_device *dev, u32 features)
+{
+ u32 changed = dev->features ^ features;
+ struct adapter *adapter = dev->ml_priv;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ t1_vlan_mode(adapter, features);
+
+ return 0;
+}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void t1_netpoll(struct net_device *dev)
{
@@ -955,9 +968,8 @@ static const struct net_device_ops cxgb_netdev_ops = {
.ndo_do_ioctl = t1_ioctl,
.ndo_change_mtu = t1_change_mtu,
.ndo_set_mac_address = t1_set_mac_addr,
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- .ndo_vlan_rx_register = t1_vlan_rx_register,
-#endif
+ .ndo_fix_features = t1_fix_features,
+ .ndo_set_features = t1_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = t1_netpoll,
#endif
@@ -1080,10 +1092,9 @@ static int __devinit init_one(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
if (vlan_tso_capable(adapter)) {
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
netdev->features |=
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
+ netdev->hw_features |= NETIF_F_HW_VLAN_RX;
/* T204: disable TSO */
if (!(is_T2(adapter)) || bi->port_number != 4) {
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 58380d24061..e9a03fffef1 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -742,13 +742,14 @@ static inline void setup_ring_params(struct adapter *adapter, u64 addr,
/*
* Enable/disable VLAN acceleration.
*/
-void t1_set_vlan_accel(struct adapter *adapter, int on_off)
+void t1_vlan_mode(struct adapter *adapter, u32 features)
{
struct sge *sge = adapter->sge;
- sge->sge_control &= ~F_VLAN_XTRACT;
- if (on_off)
+ if (features & NETIF_F_HW_VLAN_RX)
sge->sge_control |= F_VLAN_XTRACT;
+ else
+ sge->sge_control &= ~F_VLAN_XTRACT;
if (adapter->open_device_map) {
writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
readl(adapter->regs + A_SG_CONTROL); /* flush */
@@ -1397,12 +1398,11 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
} else
skb_checksum_none_assert(skb);
- if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
+ if (p->vlan_valid) {
st->vlan_xtract++;
- vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
- ntohs(p->vlan));
- } else
- netif_receive_skb(skb);
+ __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
+ }
+ netif_receive_skb(skb);
}
/*
@@ -1875,13 +1875,11 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
cpl->iff = dev->if_port;
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
if (vlan_tx_tag_present(skb)) {
cpl->vlan_valid = 1;
cpl->vlan = htons(vlan_tx_tag_get(skb));
st->vlan_insert++;
} else
-#endif
cpl->vlan_valid = 0;
send:
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
index 00cc37fc1f6..e03980bcdd6 100644
--- a/drivers/net/chelsio/sge.h
+++ b/drivers/net/chelsio/sge.h
@@ -79,7 +79,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie);
int t1_poll(struct napi_struct *, int);
netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void t1_set_vlan_accel(struct adapter *adapter, int on_off);
+void t1_vlan_mode(struct adapter *adapter, u32 features);
void t1_sge_start(struct sge *);
void t1_sge_stop(struct sge *);
int t1_sge_intr_error_handler(struct sge *);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 11a92afdf98..94a2e541006 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2010 Broadcom Corporation
+ * Copyright (c) 2006-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,6 +28,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
+#include <linux/random.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_VLAN 1
#endif
@@ -327,7 +328,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
msleep(100);
retry++;
}
- return 0;
+ return rc;
}
static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
@@ -605,11 +606,12 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
}
EXPORT_SYMBOL(cnic_unregister_driver);
-static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
+static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
+ u32 next)
{
id_tbl->start = start_id;
id_tbl->max = size;
- id_tbl->next = 0;
+ id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
if (!id_tbl->table)
@@ -835,7 +837,6 @@ static void cnic_free_resc(struct cnic_dev *dev)
cp->ctx_blks = 0;
cnic_free_dma(dev, &cp->gbl_buf_info);
- cnic_free_dma(dev, &cp->conn_buf_info);
cnic_free_dma(dev, &cp->kwq_info);
cnic_free_dma(dev, &cp->kwq_16_data_info);
cnic_free_dma(dev, &cp->kcq2.dma);
@@ -899,24 +900,56 @@ static int cnic_alloc_context(struct cnic_dev *dev)
return 0;
}
-static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
+static u16 cnic_bnx2_next_idx(u16 idx)
+{
+ return idx + 1;
+}
+
+static u16 cnic_bnx2_hw_idx(u16 idx)
+{
+ return idx;
+}
+
+static u16 cnic_bnx2x_next_idx(u16 idx)
{
- int err, i, is_bnx2 = 0;
+ idx++;
+ if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+ idx++;
+
+ return idx;
+}
+
+static u16 cnic_bnx2x_hw_idx(u16 idx)
+{
+ if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+ idx++;
+ return idx;
+}
+
+static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
+ bool use_pg_tbl)
+{
+ int err, i, use_page_tbl = 0;
struct kcqe **kcq;
- if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags))
- is_bnx2 = 1;
+ if (use_pg_tbl)
+ use_page_tbl = 1;
- err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2);
+ err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
if (err)
return err;
kcq = (struct kcqe **) info->dma.pg_arr;
info->kcq = kcq;
- if (is_bnx2)
+ info->next_idx = cnic_bnx2_next_idx;
+ info->hw_idx = cnic_bnx2_hw_idx;
+ if (use_pg_tbl)
return 0;
+ info->next_idx = cnic_bnx2x_next_idx;
+ info->hw_idx = cnic_bnx2x_hw_idx;
+
for (i = 0; i < KCQ_PAGE_CNT; i++) {
struct bnx2x_bd_chain_next *next =
(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
@@ -1059,7 +1092,7 @@ static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
goto error;
cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
- ret = cnic_alloc_kcq(dev, &cp->kcq1);
+ ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
if (ret)
goto error;
@@ -1139,25 +1172,17 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->iro_arr = ethdev->iro_arr;
- cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS;
+ cp->max_cid_space = MAX_ISCSI_TBL_SZ;
cp->iscsi_start_cid = start_cid;
cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
cp->fcoe_init_cid = ethdev->fcoe_init_cid;
if (!cp->fcoe_init_cid)
cp->fcoe_init_cid = 0x10;
}
- if (start_cid < BNX2X_ISCSI_START_CID) {
- u32 delta = BNX2X_ISCSI_START_CID - start_cid;
-
- cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
- cp->fcoe_start_cid += delta;
- cp->max_cid_space += delta;
- }
-
cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
GFP_KERNEL);
if (!cp->iscsi_tbl)
@@ -1195,22 +1220,16 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
j++;
}
- ret = cnic_alloc_kcq(dev, &cp->kcq1);
+ ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
if (ret)
goto error;
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
- ret = cnic_alloc_kcq(dev, &cp->kcq2);
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
if (ret)
goto error;
}
- pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
- BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
- ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
- if (ret)
- goto error;
-
pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
if (ret)
@@ -1577,6 +1596,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
struct iscsi_context *ictx;
struct regpair context_addr;
int i, j, n = 2, n_max;
+ u8 port = CNIC_PORT(cp);
ctx->ctx_flags = 0;
if (!req2->num_additional_wqes)
@@ -1628,6 +1648,17 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
ictx->xstorm_st_context.iscsi.flags.flags |=
XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
+ ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
+ ETH_P_8021Q;
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
+ cp->port_mode == CHIP_2_PORT_MODE) {
+
+ port = 0;
+ }
+ ictx->xstorm_st_context.common.flags =
+ 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
+ ictx->xstorm_st_context.common.flags =
+ port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
/* TSTORM requires the base address of RQ DB & not PTE */
@@ -1843,8 +1874,11 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
hw_cid, NONE_CONNECTION_TYPE, &l5_data);
- if (ret == 0)
+ if (ret == 0) {
wait_event(ctx->waitq, ctx->wait_cond);
+ if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
+ return -EBUSY;
+ }
return ret;
}
@@ -1879,8 +1913,10 @@ static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
skip_cfc_delete:
cnic_free_bnx2x_conn_resc(dev, l5_cid);
- atomic_dec(&cp->iscsi_conn);
- clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+ if (!ret) {
+ atomic_dec(&cp->iscsi_conn);
+ clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+ }
destroy_reply:
memset(&kcqe, 0, sizeof(kcqe));
@@ -1939,8 +1975,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
tstorm_buf->ka_interval = kwqe3->ka_interval;
tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
}
- tstorm_buf->rcv_buf = kwqe3->rcv_buf;
- tstorm_buf->snd_buf = kwqe3->snd_buf;
tstorm_buf->max_rt_time = 0xffffffff;
}
@@ -1969,15 +2003,14 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[4]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
+ TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+ TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[2]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
- mac[1]);
+ TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
+ TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
mac[0]);
}
@@ -2156,7 +2189,7 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
memset(fcoe_stat, 0, sizeof(*fcoe_stat));
memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
- ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
@@ -2201,12 +2234,9 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
- fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
- fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
- fcoe_init->eq_next_page_addr.lo =
- cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
- fcoe_init->eq_next_page_addr.hi =
- (u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
+ fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
+ fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
+ fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
fcoe_init->sb_num = cp->status_blk_num;
fcoe_init->eq_prod = MAX_KCQ_IDX;
@@ -2214,7 +2244,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
cp->kcq2.sw_prod_idx = 0;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
- ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
FCOE_CONNECTION_TYPE, &l5_data);
*work = 3;
return ret;
@@ -2418,6 +2448,30 @@ static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
return ret;
}
+static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ u32 i;
+
+ for (i = start_cid; i < cp->max_cid_space; i++) {
+ struct cnic_context *ctx = &cp->ctx_tbl[i];
+ int j;
+
+ while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+ msleep(10);
+
+ for (j = 0; j < 5; j++) {
+ if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+ break;
+ msleep(20);
+ }
+
+ if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+ netdev_warn(dev->netdev, "CID %x not deleted\n",
+ ctx->cid);
+ }
+}
+
static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct fcoe_kwqe_destroy *req;
@@ -2426,11 +2480,13 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
int ret;
u32 cid;
+ cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
+
req = (struct fcoe_kwqe_destroy *) kwqe;
cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data));
- ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
+ ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
FCOE_CONNECTION_TYPE, &l5_data);
return ret;
}
@@ -2511,7 +2567,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
- if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
+ if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
return -EINVAL;
for (i = 0; i < num_wqes; ) {
@@ -2651,32 +2707,6 @@ end:
cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
}
-static u16 cnic_bnx2_next_idx(u16 idx)
-{
- return idx + 1;
-}
-
-static u16 cnic_bnx2_hw_idx(u16 idx)
-{
- return idx;
-}
-
-static u16 cnic_bnx2x_next_idx(u16 idx)
-{
- idx++;
- if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
- idx++;
-
- return idx;
-}
-
-static u16 cnic_bnx2x_hw_idx(u16 idx)
-{
- if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
- idx++;
- return idx;
-}
-
static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -2687,12 +2717,12 @@ static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
i = ri = last = info->sw_prod_idx;
ri &= MAX_KCQ_IDX;
hw_prod = *info->hw_prod_idx_ptr;
- hw_prod = cp->hw_idx(hw_prod);
+ hw_prod = info->hw_idx(hw_prod);
while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
cp->completed_kcq[kcqe_cnt++] = kcqe;
- i = cp->next_idx(i);
+ i = info->next_idx(i);
ri = i & MAX_KCQ_IDX;
if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
last_cnt = kcqe_cnt;
@@ -2778,13 +2808,10 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
/* Tell compiler that status_blk fields can change. */
barrier();
- if (status_idx != *cp->kcq1.status_idx_ptr) {
- status_idx = (u16) *cp->kcq1.status_idx_ptr;
- /* status block index must be read first */
- rmb();
- cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
- } else
- break;
+ status_idx = (u16) *cp->kcq1.status_idx_ptr;
+ /* status block index must be read first */
+ rmb();
+ cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
}
CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
@@ -2908,8 +2935,6 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
/* Tell compiler that sblk fields can change. */
barrier();
- if (last_status == *info->status_idx_ptr)
- break;
last_status = *info->status_idx_ptr;
/* status block index must be read before reading the KCQ */
@@ -2933,7 +2958,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
CNIC_WR16(dev, cp->kcq1.io_addr,
cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
status_idx, IGU_INT_ENABLE, 1);
break;
@@ -3052,13 +3077,21 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
break;
}
case CNIC_CTL_COMPLETION_CMD: {
- u32 cid = BNX2X_SW_CID(info->data.comp.cid);
+ struct cnic_ctl_completion *comp = &info->data.comp;
+ u32 cid = BNX2X_SW_CID(comp->cid);
u32 l5_cid;
struct cnic_local *cp = dev->cnic_priv;
if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+ if (unlikely(comp->error)) {
+ set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
+ netdev_err(dev->netdev,
+ "CID %x CFC delete comp error %x\n",
+ cid, comp->error);
+ }
+
ctx->wait_cond = 1;
wake_up(&ctx->waitq);
}
@@ -3772,7 +3805,13 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
break;
case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
- cnic_cm_upcall(cp, csk, opcode);
+ /* after we already sent CLOSE_REQ */
+ if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
+ !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
+ csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+ cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
+ else
+ cnic_cm_upcall(cp, csk, opcode);
break;
}
csk_put(csk);
@@ -3803,14 +3842,17 @@ static void cnic_cm_free_mem(struct cnic_dev *dev)
static int cnic_cm_alloc_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ u32 port_id;
cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
GFP_KERNEL);
if (!cp->csk_tbl)
return -ENOMEM;
+ port_id = random32();
+ port_id %= CNIC_LOCAL_PORT_RANGE;
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
- CNIC_LOCAL_PORT_MIN)) {
+ CNIC_LOCAL_PORT_MIN, port_id)) {
cnic_cm_free_mem(dev);
return -ENOMEM;
}
@@ -3826,12 +3868,14 @@ static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
}
/* 1. If event opcode matches the expected event in csk->state
- * 2. If the expected event is CLOSE_COMP, we accept any event
+ * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
+ * event
* 3. If the expected event is 0, meaning the connection was never
* never established, we accept the opcode from cm_abort.
*/
if (opcode == csk->state || csk->state == 0 ||
- csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) {
+ csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
+ csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
if (csk->state == 0)
csk->state = opcode;
@@ -3865,7 +3909,7 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
{
u32 seed;
- get_random_bytes(&seed, 4);
+ seed = random32();
cnic_ctx_wr(dev, 45, 0, seed);
return 0;
}
@@ -3912,7 +3956,6 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
- int i;
if (!cp->ctx_tbl)
return;
@@ -3920,16 +3963,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
if (!netif_running(dev->netdev))
return;
- for (i = 0; i < cp->max_cid_space; i++) {
- struct cnic_context *ctx = &cp->ctx_tbl[i];
-
- while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
- msleep(10);
-
- if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
- netdev_warn(dev->netdev, "CID %x not deleted\n",
- ctx->cid);
- }
+ cnic_bnx2x_delete_wait(dev, 0);
cancel_delayed_work(&cp->delete_task);
flush_workqueue(cnic_wq);
@@ -3992,6 +4026,7 @@ static void cnic_delete_task(struct work_struct *work)
for (i = 0; i < cp->max_cid_space; i++) {
struct cnic_context *ctx = &cp->ctx_tbl[i];
+ int err;
if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
!test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
@@ -4005,13 +4040,15 @@ static void cnic_delete_task(struct work_struct *work)
if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
continue;
- cnic_bnx2x_destroy_ramrod(dev, i);
+ err = cnic_bnx2x_destroy_ramrod(dev, i);
cnic_free_bnx2x_conn_resc(dev, i);
- if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
- atomic_dec(&cp->iscsi_conn);
+ if (!err) {
+ if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
+ atomic_dec(&cp->iscsi_conn);
- clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+ clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+ }
}
if (need_resched)
@@ -4218,14 +4255,6 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
}
-static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
-{
- u32 max_conn;
-
- max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
- dev->max_iscsi_conn = max_conn;
-}
-
static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -4291,7 +4320,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
cnic_ctx_wr(dev, cid_addr, offset1, val);
- txbd = (struct tx_bd *) udev->l2_ring;
+ txbd = udev->l2_ring;
buf_map = udev->l2_buf_map;
for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
@@ -4350,7 +4379,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
- rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
+ rxbd = udev->l2_ring + BCM_PAGE_SIZE;
for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
@@ -4550,8 +4579,6 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
return err;
}
- cnic_get_bnx2_iscsi_info(dev);
-
return 0;
}
@@ -4617,7 +4644,7 @@ static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
offsetof(struct hc_status_block_data_e1x, index_data) +
sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
- offsetof(struct hc_index_data, timeout), 64 / 12);
+ offsetof(struct hc_index_data, timeout), 64 / 4);
cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
}
@@ -4633,7 +4660,6 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
dma_addr_t buf_map, ring_map = udev->l2_ring_map;
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
- int port = CNIC_PORT(cp);
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 val;
@@ -4674,10 +4700,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
/* reset xstorm per client statistics */
if (cli < MAX_STAT_COUNTER_ID) {
- val = BAR_XSTRORM_INTMEM +
- XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
- for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
- CNIC_WR(dev, val + i * 4, 0);
+ data->general.statistics_zero_flg = 1;
+ data->general.statistics_en_flg = 1;
+ data->general.statistics_counter_id = cli;
}
cp->tx_cons_ptr =
@@ -4695,7 +4720,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
(udev->l2_ring + (2 * BCM_PAGE_SIZE));
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
- int port = CNIC_PORT(cp);
u32 cli = cp->ethdev->iscsi_l2_client_id;
int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
u32 val;
@@ -4703,10 +4727,10 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
/* General data */
data->general.client_id = cli;
- data->general.statistics_en_flg = 1;
- data->general.statistics_counter_id = cli;
data->general.activate_flg = 1;
data->general.sp_client_id = cli;
+ data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
+ data->general.func_id = cp->pfid;
for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
@@ -4740,23 +4764,12 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
data->rx.status_block_id = BNX2X_DEF_SB_ID;
data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
- data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
- data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
+ data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
data->rx.outer_vlan_removal_enable_flg = 1;
-
- /* reset tstorm and ustorm per client statistics */
- if (cli < MAX_STAT_COUNTER_ID) {
- val = BAR_TSTRORM_INTMEM +
- TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
- for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
- CNIC_WR(dev, val + i * 4, 0);
-
- val = BAR_USTRORM_INTMEM +
- USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
- for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
- CNIC_WR(dev, val + i * 4, 0);
- }
+ data->rx.silent_vlan_removal_flg = 1;
+ data->rx.silent_vlan_value = 0;
+ data->rx.silent_vlan_mask = 0xffff;
cp->rx_cons_ptr =
&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
@@ -4772,7 +4785,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
cp->kcq1.sw_prod_idx = 0;
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq1.hw_prod_idx_ptr =
@@ -4788,7 +4801,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
&sb->sb.running_index[SM_RX_ID];
}
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
@@ -4805,10 +4818,12 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
- int func = CNIC_FUNC(cp), ret, i;
+ int func = CNIC_FUNC(cp), ret;
u32 pfid;
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ cp->port_mode = CHIP_PORT_MODE_NONE;
+
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
if (!(val & 1))
@@ -4816,25 +4831,28 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
else
val = (val >> 1) & 1;
- if (val)
+ if (val) {
+ cp->port_mode = CHIP_4_PORT_MODE;
cp->pfid = func >> 1;
- else
+ } else {
+ cp->port_mode = CHIP_2_PORT_MODE;
cp->pfid = func & 0x6;
+ }
} else {
cp->pfid = func;
}
pfid = cp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
- cp->iscsi_start_cid);
+ cp->iscsi_start_cid, 0);
if (ret)
return -ENOMEM;
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
BNX2X_FCOE_NUM_CONNECTIONS,
- cp->fcoe_start_cid);
+ cp->fcoe_start_cid, 0);
if (ret)
return -ENOMEM;
@@ -4868,15 +4886,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
HC_INDEX_ISCSI_EQ_CONS);
- for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
- CNIC_WR(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
- cp->conn_buf_info.pgtbl[2 * i]);
- CNIC_WR(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
- cp->conn_buf_info.pgtbl[(2 * i) + 1]);
- }
-
CNIC_WR(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
@@ -4915,7 +4924,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
struct client_init_ramrod_data *data;
union l5cm_specific_data l5_data;
struct ustorm_eth_rx_producers rx_prods = {0};
- u32 off, i;
+ u32 off, i, *cid_ptr;
rx_prods.bd_prod = 0;
rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
@@ -4924,7 +4933,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
off = BAR_USTRORM_INTMEM +
- (BNX2X_CHIP_IS_E2(cp->chip_id) ?
+ (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
@@ -4934,6 +4943,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
data = udev->l2_buf;
+ cid_ptr = udev->l2_buf + 12;
memset(data, 0, sizeof(*data));
@@ -4958,12 +4968,15 @@ static void cnic_init_rings(struct cnic_dev *dev)
"iSCSI CLIENT_SETUP did not complete\n");
cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
cnic_ring_ctl(dev, cid, cli, 1);
+ *cid_ptr = cid;
}
}
static void cnic_shutdown_rings(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_uio_dev *udev = cp->udev;
+ void *rx_ring;
if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
return;
@@ -4971,7 +4984,6 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
cnic_shutdown_bnx2_rx_ring(dev);
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
- struct cnic_local *cp = dev->cnic_priv;
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 cid = cp->ethdev->iscsi_l2_cid;
union l5cm_specific_data l5_data;
@@ -5001,6 +5013,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
msleep(10);
}
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
+ rx_ring = udev->l2_ring + BCM_PAGE_SIZE;
+ memset(rx_ring, 0, BCM_PAGE_SIZE);
}
static int cnic_register_netdev(struct cnic_dev *dev)
@@ -5217,6 +5231,8 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
+ cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+
cp->cnic_ops = &cnic_bnx2_ops;
cp->start_hw = cnic_start_bnx2_hw;
cp->stop_hw = cnic_stop_bnx2_hw;
@@ -5228,8 +5244,6 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
cp->enable_int = cnic_enable_bnx2_int;
cp->disable_int_sync = cnic_disable_bnx2_int_sync;
cp->close_conn = cnic_close_bnx2_conn;
- cp->next_idx = cnic_bnx2_next_idx;
- cp->hw_idx = cnic_bnx2_hw_idx;
return cdev;
cnic_err:
@@ -5274,7 +5288,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
- if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
!(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
@@ -5290,13 +5304,11 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cp->stop_cm = cnic_cm_stop_bnx2x_hw;
cp->enable_int = cnic_enable_bnx2x_int;
cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
- if (BNX2X_CHIP_IS_E2(cp->chip_id))
+ if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
cp->ack_int = cnic_ack_bnx2x_e2_msix;
else
cp->ack_int = cnic_ack_bnx2x_msix;
cp->close_conn = cnic_close_bnx2x_conn;
- cp->next_idx = cnic_bnx2x_next_idx;
- cp->hw_idx = cnic_bnx2x_hw_idx;
return cdev;
}
@@ -5322,6 +5334,27 @@ static struct cnic_dev *is_cnic_dev(struct net_device *dev)
return cdev;
}
+static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
+ u16 vlan_id)
+{
+ int if_type;
+
+ rcu_read_lock();
+ for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+ struct cnic_ulp_ops *ulp_ops;
+ void *ctx;
+
+ ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+ if (!ulp_ops || !ulp_ops->indicate_netevent)
+ continue;
+
+ ctx = cp->ulp_handle[if_type];
+
+ ulp_ops->indicate_netevent(ctx, event, vlan_id);
+ }
+ rcu_read_unlock();
+}
+
/**
* netdev event handler
*/
@@ -5330,12 +5363,11 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
{
struct net_device *netdev = ptr;
struct cnic_dev *dev;
- int if_type;
int new_dev = 0;
dev = cnic_from_netdev(netdev);
- if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
+ if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
/* Check for the hot-plug device */
dev = is_cnic_dev(netdev);
if (dev) {
@@ -5351,7 +5383,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
else if (event == NETDEV_UNREGISTER)
cnic_ulp_exit(dev);
- if (event == NETDEV_UP) {
+ if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
if (cnic_register_netdev(dev) != 0) {
cnic_put(dev);
goto done;
@@ -5360,20 +5392,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
cnic_ulp_start(dev);
}
- rcu_read_lock();
- for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
- struct cnic_ulp_ops *ulp_ops;
- void *ctx;
-
- ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
- if (!ulp_ops || !ulp_ops->indicate_netevent)
- continue;
-
- ctx = cp->ulp_handle[if_type];
-
- ulp_ops->indicate_netevent(ctx, event);
- }
- rcu_read_unlock();
+ cnic_rcv_netevent(cp, event, 0);
if (event == NETDEV_GOING_DOWN) {
cnic_ulp_stop(dev);
@@ -5389,6 +5408,19 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
goto done;
}
cnic_put(dev);
+ } else {
+ struct net_device *realdev;
+ u16 vid;
+
+ vid = cnic_get_vlan(netdev, &realdev);
+ if (realdev) {
+ dev = cnic_from_netdev(realdev);
+ if (dev) {
+ vid |= VLAN_TAG_PRESENT;
+ cnic_rcv_netevent(dev->cnic_priv, event, vid);
+ cnic_put(dev);
+ }
+ }
}
done:
return NOTIFY_DONE;
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 3367a6d3a77..7a2928f82d4 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2010 Broadcom Corporation
+ * Copyright (c) 2006-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,11 +68,6 @@
#define BNX2_PG_CTX_MAP 0x1a0034
#define BNX2_ISCSI_CTX_MAP 0x1a0074
-struct cnic_redirect_entry {
- struct dst_entry *old_dst;
- struct dst_entry *new_dst;
-};
-
#define MAX_COMPLETED_KCQE 64
#define MAX_CNIC_L5_CONTEXT 256
@@ -171,6 +166,7 @@ struct cnic_context {
unsigned long ctx_flags;
#define CTX_FL_OFFLD_START 0
#define CTX_FL_DELETE_WAIT 1
+#define CTX_FL_CID_ERROR 2
u8 ulp_proto_id;
union {
struct cnic_iscsi *iscsi;
@@ -185,6 +181,9 @@ struct kcq_info {
u16 sw_prod_idx;
u16 *status_idx_ptr;
u32 io_addr;
+
+ u16 (*next_idx)(u16);
+ u16 (*hw_idx)(u16);
};
struct iro {
@@ -242,7 +241,7 @@ struct cnic_local {
u16 rx_cons;
u16 tx_cons;
- struct iro *iro_arr;
+ const struct iro *iro_arr;
#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
struct cnic_dma kwq_info;
@@ -283,7 +282,6 @@ struct cnic_local {
struct cnic_sock *csk_tbl;
struct cnic_id_tbl csk_port_tbl;
- struct cnic_dma conn_buf_info;
struct cnic_dma gbl_buf_info;
struct cnic_iscsi *iscsi_tbl;
@@ -317,6 +315,11 @@ struct cnic_local {
u32 chip_id;
int func;
u32 pfid;
+ u8 port_mode;
+#define CHIP_4_PORT_MODE 0
+#define CHIP_2_PORT_MODE 1
+#define CHIP_PORT_MODE_NONE 2
+
u32 shmem_base;
struct cnic_ops *cnic_ops;
@@ -332,8 +335,6 @@ struct cnic_local {
void (*disable_int_sync)(struct cnic_dev *);
void (*ack_int)(struct cnic_dev *);
void (*close_conn)(struct cnic_sock *, u32 opcode);
- u16 (*next_idx)(u16);
- u16 (*hw_idx)(u16);
};
struct bnx2x_bd_chain_next {
@@ -368,7 +369,6 @@ struct bnx2x_bd_chain_next {
#define BNX2X_ISCSI_MAX_PENDING_R2TS 4
#define BNX2X_ISCSI_R2TQE_SIZE 8
#define BNX2X_ISCSI_HQ_BD_SIZE 64
-#define BNX2X_ISCSI_CONN_BUF_SIZE 64
#define BNX2X_ISCSI_GLB_BUF_SIZE 64
#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
@@ -384,6 +384,9 @@ struct bnx2x_bd_chain_next {
#define BNX2X_CHIP_NUM_57712E 0x1663
#define BNX2X_CHIP_NUM_57713 0x1651
#define BNX2X_CHIP_NUM_57713E 0x1652
+#define BNX2X_CHIP_NUM_57800 0x168a
+#define BNX2X_CHIP_NUM_57810 0x168e
+#define BNX2X_CHIP_NUM_57840 0x168d
#define BNX2X_CHIP_NUM(x) (x >> 16)
#define BNX2X_CHIP_IS_57710(x) \
@@ -402,9 +405,19 @@ struct bnx2x_bd_chain_next {
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
#define BNX2X_CHIP_IS_57713E(x) \
(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
+#define BNX2X_CHIP_IS_57800(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800)
+#define BNX2X_CHIP_IS_57810(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810)
+#define BNX2X_CHIP_IS_57840(x) \
+ (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840)
#define BNX2X_CHIP_IS_E2(x) \
(BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
+#define BNX2X_CHIP_IS_E3(x) \
+ (BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \
+ BNX2X_CHIP_IS_57840(x))
+#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id)
@@ -441,8 +454,8 @@ struct bnx2x_bd_chain_next {
#define CNIC_PORT(cp) ((cp)->pfid & 1)
#define CNIC_FUNC(cp) ((cp)->func)
-#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2(cp->chip_id) ? 0 :\
- (CNIC_FUNC(cp) & 1))
+#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
+ 0 : (CNIC_FUNC(cp) & 1))
#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \
@@ -451,10 +464,15 @@ struct bnx2x_bd_chain_next {
#define BNX2X_SW_CID(x) (x & 0x1ffff)
#define BNX2X_CL_QZONE_ID(cp, cli) \
- (cli + (CNIC_PORT(cp) * (BNX2X_CHIP_IS_E2(cp->chip_id) ?\
- ETH_MAX_RX_CLIENTS_E2 : \
- ETH_MAX_RX_CLIENTS_E1H)))
+ (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \
+ cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
+
+#ifndef MAX_STAT_COUNTER_ID
+#define MAX_STAT_COUNTER_ID \
+ (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \
+ ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\
+ MAX_STAT_COUNTER_ID_E1))
+#endif
-#define TCP_TSTORM_OOO_DROP_AND_PROC_ACK (0<<4)
#endif
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index fdbc0041560..e47d2107676 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2010 Broadcom Corporation
+ * Copyright (c) 2006-2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -45,13 +45,13 @@
#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
-#define FCOE_RAMROD_CMD_ID_INIT (FCOE_KCQE_OPCODE_INIT_FUNC)
-#define FCOE_RAMROD_CMD_ID_DESTROY (FCOE_KCQE_OPCODE_DESTROY_FUNC)
+#define FCOE_RAMROD_CMD_ID_INIT_FUNC (FCOE_KCQE_OPCODE_INIT_FUNC)
+#define FCOE_RAMROD_CMD_ID_DESTROY_FUNC (FCOE_KCQE_OPCODE_DESTROY_FUNC)
+#define FCOE_RAMROD_CMD_ID_STAT_FUNC (FCOE_KCQE_OPCODE_STAT_FUNC)
#define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN (FCOE_KCQE_OPCODE_OFFLOAD_CONN)
#define FCOE_RAMROD_CMD_ID_ENABLE_CONN (FCOE_KCQE_OPCODE_ENABLE_CONN)
#define FCOE_RAMROD_CMD_ID_DISABLE_CONN (FCOE_KCQE_OPCODE_DISABLE_CONN)
#define FCOE_RAMROD_CMD_ID_DESTROY_CONN (FCOE_KCQE_OPCODE_DESTROY_CONN)
-#define FCOE_RAMROD_CMD_ID_STAT (FCOE_KCQE_OPCODE_STAT_FUNC)
#define FCOE_RAMROD_CMD_ID_TERMINATE_CONN (0x81)
#define FCOE_KWQE_OPCODE_INIT1 (0)
@@ -641,20 +641,20 @@ struct cstorm_iscsi_ag_context {
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<14)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 14
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
-#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<19)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 19
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN (0x1<<20)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN_SHIFT 20
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<21)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 21
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<22)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 22
#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
@@ -694,573 +694,667 @@ struct cstorm_iscsi_ag_context {
#endif
#if defined(__BIG_ENDIAN)
u16 __reserved64;
- u16 __cq_u_prod0;
+ u16 cq_u_prod;
#elif defined(__LITTLE_ENDIAN)
- u16 __cq_u_prod0;
+ u16 cq_u_prod;
u16 __reserved64;
#endif
u32 __cq_u_prod1;
#if defined(__BIG_ENDIAN)
u16 __agg_vars3;
- u16 __cq_u_prod2;
+ u16 cq_u_pend;
#elif defined(__LITTLE_ENDIAN)
- u16 __cq_u_prod2;
+ u16 cq_u_pend;
u16 __agg_vars3;
#endif
#if defined(__BIG_ENDIAN)
u16 __aux2_th;
- u16 __cq_u_prod3;
+ u16 aux2_val;
#elif defined(__LITTLE_ENDIAN)
- u16 __cq_u_prod3;
+ u16 aux2_val;
u16 __aux2_th;
#endif
};
/*
- * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and used in FCoE context section
+ * The fcoe extra aggregative context section of Tstorm
*/
-struct ustorm_fcoe_params {
-#if defined(__BIG_ENDIAN)
- u16 fcoe_conn_id;
- u16 flags;
-#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
-#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
-#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
-#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
-#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
-#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
-#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
-#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
-#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
-#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
-#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
-#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
-#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
-#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
-#define USTORM_FCOE_PARAMS_B_C2_VALID (0x1<<7)
-#define USTORM_FCOE_PARAMS_B_C2_VALID_SHIFT 7
-#define USTORM_FCOE_PARAMS_B_ACK_0 (0x1<<8)
-#define USTORM_FCOE_PARAMS_B_ACK_0_SHIFT 8
-#define USTORM_FCOE_PARAMS_RSRV0 (0x7F<<9)
-#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 9
-#elif defined(__LITTLE_ENDIAN)
- u16 flags;
-#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
-#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
-#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
-#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
-#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
-#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
-#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
-#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
-#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
-#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
-#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
-#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
-#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
-#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
-#define USTORM_FCOE_PARAMS_B_C2_VALID (0x1<<7)
-#define USTORM_FCOE_PARAMS_B_C2_VALID_SHIFT 7
-#define USTORM_FCOE_PARAMS_B_ACK_0 (0x1<<8)
-#define USTORM_FCOE_PARAMS_B_ACK_0_SHIFT 8
-#define USTORM_FCOE_PARAMS_RSRV0 (0x7F<<9)
-#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 9
- u16 fcoe_conn_id;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 hc_csdm_byte_en;
- u8 func_id;
- u8 port_id;
- u8 vnic_id;
-#elif defined(__LITTLE_ENDIAN)
- u8 vnic_id;
- u8 port_id;
- u8 func_id;
- u8 hc_csdm_byte_en;
-#endif
+struct tstorm_fcoe_extra_ag_context_section {
+ u32 __agg_val1;
#if defined(__BIG_ENDIAN)
- u16 rx_total_conc_seqs;
- u16 rx_max_fc_pay_len;
+ u8 __tcp_agg_vars2;
+ u8 __agg_val3;
+ u16 __agg_val2;
#elif defined(__LITTLE_ENDIAN)
- u16 rx_max_fc_pay_len;
- u16 rx_total_conc_seqs;
+ u16 __agg_val2;
+ u8 __agg_val3;
+ u8 __tcp_agg_vars2;
#endif
#if defined(__BIG_ENDIAN)
- u16 ox_id;
- u16 rx_max_conc_seqs;
+ u16 __agg_val5;
+ u8 __agg_val6;
+ u8 __tcp_agg_vars3;
#elif defined(__LITTLE_ENDIAN)
- u16 rx_max_conc_seqs;
- u16 ox_id;
+ u8 __tcp_agg_vars3;
+ u8 __agg_val6;
+ u16 __agg_val5;
#endif
+ u32 __lcq_prod;
+ u32 rtt_seq;
+ u32 rtt_time;
+ u32 __reserved66;
+ u32 wnd_right_edge;
+ u32 tcp_agg_vars1;
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+ u32 snd_max;
+ u32 __lcq_cons;
+ u32 __reserved2;
};
/*
- * FCoE 16-bits index structure
- */
-struct fcoe_idx16_fields {
- u16 fields;
-#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0)
-#define FCOE_IDX16_FIELDS_IDX_SHIFT 0
-#define FCOE_IDX16_FIELDS_MSB (0x1<<15)
-#define FCOE_IDX16_FIELDS_MSB_SHIFT 15
-};
-
-/*
- * FCoE 16-bits index union
- */
-union fcoe_idx16_field_union {
- struct fcoe_idx16_fields fields;
- u16 val;
-};
-
-/*
- * 4 regs size
+ * The fcoe aggregative context of Tstorm
*/
-struct fcoe_bd_ctx {
- u32 buf_addr_hi;
- u32 buf_addr_lo;
+struct tstorm_fcoe_ag_context {
#if defined(__BIG_ENDIAN)
- u16 rsrv0;
- u16 buf_len;
+ u16 ulp_credit;
+ u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+ u8 state;
#elif defined(__LITTLE_ENDIAN)
- u16 buf_len;
- u16 rsrv0;
+ u8 state;
+ u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+ u16 ulp_credit;
#endif
#if defined(__BIG_ENDIAN)
- u16 rsrv1;
- u16 flags;
+ u16 __agg_val4;
+ u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
#elif defined(__LITTLE_ENDIAN)
- u16 flags;
- u16 rsrv1;
+ u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+ u16 __agg_val4;
#endif
+ struct tstorm_fcoe_extra_ag_context_section __extra_section;
};
+
+
/*
- * Parameters required for placement according to SGL
+ * The tcp aggregative context section of Tstorm
*/
-struct ustorm_fcoe_data_place {
+struct tstorm_tcp_tcp_ag_context_section {
+ u32 __agg_val1;
#if defined(__BIG_ENDIAN)
- u16 cached_sge_off;
- u8 cached_num_sges;
- u8 cached_sge_idx;
+ u8 __tcp_agg_vars2;
+ u8 __agg_val3;
+ u16 __agg_val2;
#elif defined(__LITTLE_ENDIAN)
- u8 cached_sge_idx;
- u8 cached_num_sges;
- u16 cached_sge_off;
+ u16 __agg_val2;
+ u8 __agg_val3;
+ u8 __tcp_agg_vars2;
#endif
- struct fcoe_bd_ctx cached_sge[3];
-};
-
-struct fcoe_task_ctx_entry_txwr_rxrd {
#if defined(__BIG_ENDIAN)
- u16 verify_tx_seq;
- u8 init_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
- u8 tx_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+ u16 __agg_val5;
+ u8 __agg_val6;
+ u8 __tcp_agg_vars3;
#elif defined(__LITTLE_ENDIAN)
- u8 tx_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
- u8 init_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
- u16 verify_tx_seq;
+ u8 __tcp_agg_vars3;
+ u8 __agg_val6;
+ u16 __agg_val5;
#endif
+ u32 snd_nxt;
+ u32 rtt_seq;
+ u32 rtt_time;
+ u32 __reserved66;
+ u32 wnd_right_edge;
+ u32 tcp_agg_vars1;
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+ u32 snd_max;
+ u32 snd_una;
+ u32 __reserved2;
};
-struct fcoe_fcp_cmd_payload {
- u32 opaque[8];
-};
-
-struct fcoe_fc_hdr {
-#if defined(__BIG_ENDIAN)
- u8 cs_ctl;
- u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
- u8 s_id[3];
- u8 cs_ctl;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 r_ctl;
- u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
- u8 d_id[3];
- u8 r_ctl;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 seq_id;
- u8 df_ctl;
- u16 seq_cnt;
-#elif defined(__LITTLE_ENDIAN)
- u16 seq_cnt;
- u8 df_ctl;
- u8 seq_id;
-#endif
+/*
+ * The iscsi aggregative context of Tstorm
+ */
+struct tstorm_iscsi_ag_context {
#if defined(__BIG_ENDIAN)
- u8 type;
- u8 f_ctl[3];
+ u16 ulp_credit;
+ u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
+ u8 state;
#elif defined(__LITTLE_ENDIAN)
- u8 f_ctl[3];
- u8 type;
+ u8 state;
+ u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
+ u16 ulp_credit;
#endif
- u32 parameters;
#if defined(__BIG_ENDIAN)
- u16 ox_id;
- u16 rx_id;
+ u16 __agg_val4;
+ u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
#elif defined(__LITTLE_ENDIAN)
- u16 rx_id;
- u16 ox_id;
+ u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+ u16 __agg_val4;
#endif
+ struct tstorm_tcp_tcp_ag_context_section tcp;
};
-struct fcoe_fc_frame {
- struct fcoe_fc_hdr fc_hdr;
- u32 reserved0[2];
-};
-
-union fcoe_cmd_flow_info {
- struct fcoe_fcp_cmd_payload fcp_cmd_payload;
- struct fcoe_fc_frame mp_fc_frame;
-};
-
-struct fcoe_read_flow_info {
- struct fcoe_fc_hdr fc_data_in_hdr;
- u32 reserved[2];
-};
-
-struct fcoe_fcp_xfr_rdy_payload {
- u32 burst_len;
- u32 data_ro;
-};
-
-struct fcoe_write_flow_info {
- struct fcoe_fc_hdr fc_data_out_hdr;
- struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
-};
-
-struct fcoe_fcp_rsp_flags {
- u8 flags;
-#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
-#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
-#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
-#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
-#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
-#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
-#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
-#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
-#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
-#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
-#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
-#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
-};
-
-struct fcoe_fcp_rsp_payload {
- struct regpair reserved0;
- u32 fcp_resid;
-#if defined(__BIG_ENDIAN)
- u16 retry_delay_timer;
- struct fcoe_fcp_rsp_flags fcp_flags;
- u8 scsi_status_code;
-#elif defined(__LITTLE_ENDIAN)
- u8 scsi_status_code;
- struct fcoe_fcp_rsp_flags fcp_flags;
- u16 retry_delay_timer;
-#endif
- u32 fcp_rsp_len;
- u32 fcp_sns_len;
-};
-/*
- * Fixed size structure in order to plant it in Union structure
- */
-struct fcoe_fcp_rsp_union {
- struct fcoe_fcp_rsp_payload payload;
- struct regpair reserved0;
-};
/*
- * Fixed size structure in order to plant it in Union structure
+ * The fcoe aggregative context of Ustorm
*/
-struct fcoe_abts_rsp_union {
- u32 r_ctl;
- u32 abts_rsp_payload[7];
-};
-
-union fcoe_rsp_flow_info {
- struct fcoe_fcp_rsp_union fcp_rsp;
- struct fcoe_abts_rsp_union abts_rsp;
-};
-
-struct fcoe_cleanup_flow_info {
+struct ustorm_fcoe_ag_context {
#if defined(__BIG_ENDIAN)
- u16 reserved1;
- u16 task_id;
+ u8 __aux_counter_flags;
+ u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+ u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+ u8 state;
#elif defined(__LITTLE_ENDIAN)
- u16 task_id;
- u16 reserved1;
+ u8 state;
+ u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+ u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+ u8 __aux_counter_flags;
#endif
- u32 reserved2[7];
-};
-
-/*
- * 32 bytes used for general purposes
- */
-union fcoe_general_task_ctx {
- union fcoe_cmd_flow_info cmd_info;
- struct fcoe_read_flow_info read_info;
- struct fcoe_write_flow_info write_info;
- union fcoe_rsp_flow_info rsp_info;
- struct fcoe_cleanup_flow_info cleanup_info;
- u32 comp_info[8];
-};
-
-struct fcoe_s_stat_ctx {
- u8 flags;
-#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
-#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
-#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
-#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
-#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
-#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
-#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
-#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
-#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
-#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
-#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
-#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
-#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
-#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
-};
-
-/*
- * Common section. Both TX and RX processing might write and read from it in different flows
- */
-struct fcoe_task_ctx_entry_tx_rx_cmn {
- u32 data_2_trns;
- union fcoe_general_task_ctx general;
-#if defined(__BIG_ENDIAN)
- u16 tx_low_seq_cnt;
- struct fcoe_s_stat_ctx tx_s_stat;
- u8 tx_seq_id;
-#elif defined(__LITTLE_ENDIAN)
- u8 tx_seq_id;
- struct fcoe_s_stat_ctx tx_s_stat;
- u16 tx_low_seq_cnt;
-#endif
- u32 common_flags;
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
-};
-
-struct fcoe_task_ctx_entry_rxwr_txrd {
-#if defined(__BIG_ENDIAN)
- u16 rx_id;
- u16 rx_flags;
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
-#elif defined(__LITTLE_ENDIAN)
- u16 rx_flags;
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
- u16 rx_id;
-#endif
-};
-
-struct fcoe_seq_ctx {
-#if defined(__BIG_ENDIAN)
- u16 low_seq_cnt;
- struct fcoe_s_stat_ctx s_stat;
- u8 seq_id;
+#if defined(__BIG_ENDIAN)
+ u8 cdu_usage;
+ u8 agg_misc2;
+ u16 pbf_tx_seq_ack;
#elif defined(__LITTLE_ENDIAN)
- u8 seq_id;
- struct fcoe_s_stat_ctx s_stat;
- u16 low_seq_cnt;
+ u16 pbf_tx_seq_ack;
+ u8 agg_misc2;
+ u8 cdu_usage;
#endif
+ u32 agg_misc4;
#if defined(__BIG_ENDIAN)
- u16 err_seq_cnt;
- u16 high_seq_cnt;
+ u8 agg_val3_th;
+ u8 agg_val3;
+ u16 agg_misc3;
#elif defined(__LITTLE_ENDIAN)
- u16 high_seq_cnt;
- u16 err_seq_cnt;
+ u16 agg_misc3;
+ u8 agg_val3;
+ u8 agg_val3_th;
#endif
- u32 low_exp_ro;
- u32 high_exp_ro;
-};
-
-struct fcoe_single_sge_ctx {
- struct regpair cur_buf_addr;
+ u32 expired_task_id;
+ u32 agg_misc4_th;
#if defined(__BIG_ENDIAN)
- u16 reserved0;
- u16 cur_buf_rem;
+ u16 cq_prod;
+ u16 cq_cons;
#elif defined(__LITTLE_ENDIAN)
- u16 cur_buf_rem;
- u16 reserved0;
+ u16 cq_cons;
+ u16 cq_prod;
#endif
-};
-
-struct fcoe_mul_sges_ctx {
- struct regpair cur_sge_addr;
#if defined(__BIG_ENDIAN)
- u8 sgl_size;
- u8 cur_sge_idx;
- u16 cur_sge_off;
+ u16 __reserved2;
+ u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+ u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
#elif defined(__LITTLE_ENDIAN)
- u16 cur_sge_off;
- u8 cur_sge_idx;
- u8 sgl_size;
+ u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+ u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+ u16 __reserved2;
#endif
};
-union fcoe_sgl_ctx {
- struct fcoe_single_sge_ctx single_sge;
- struct fcoe_mul_sges_ctx mul_sges;
-};
-
-struct fcoe_task_ctx_entry_rx_only {
- struct fcoe_seq_ctx seq_ctx;
- struct fcoe_seq_ctx ooo_seq_ctx;
- u32 rsrv3;
- union fcoe_sgl_ctx sgl_ctx;
-};
-
-struct ustorm_fcoe_task_ctx_entry_rd {
- struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
- struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
- struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
- struct fcoe_task_ctx_entry_rx_only rx_wr;
- u32 reserved;
-};
/*
- * Ustorm FCoE Storm Context
+ * The iscsi aggregative context of Ustorm
*/
-struct ustorm_fcoe_st_context {
- struct ustorm_fcoe_params fcoe_params;
- struct regpair task_addr;
- struct regpair cq_base_addr;
- struct regpair rq_pbl_base;
- struct regpair rq_cur_page_addr;
- struct regpair confq_pbl_base_addr;
- struct regpair conn_db_base;
- struct regpair xfrq_base_addr;
- struct regpair lcq_base_addr;
+struct ustorm_iscsi_ag_context {
#if defined(__BIG_ENDIAN)
- union fcoe_idx16_field_union rq_cons;
- union fcoe_idx16_field_union rq_prod;
+ u8 __aux_counter_flags;
+ u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+ u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+ u8 state;
#elif defined(__LITTLE_ENDIAN)
- union fcoe_idx16_field_union rq_prod;
- union fcoe_idx16_field_union rq_cons;
+ u8 state;
+ u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+ u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+ u8 __aux_counter_flags;
#endif
#if defined(__BIG_ENDIAN)
- u16 xfrq_prod;
- u16 cq_cons;
+ u8 cdu_usage;
+ u8 agg_misc2;
+ u16 __cq_local_comp_itt_val;
#elif defined(__LITTLE_ENDIAN)
- u16 cq_cons;
- u16 xfrq_prod;
+ u16 __cq_local_comp_itt_val;
+ u8 agg_misc2;
+ u8 cdu_usage;
#endif
+ u32 agg_misc4;
#if defined(__BIG_ENDIAN)
- u16 lcq_cons;
- u16 hc_cram_address;
+ u8 agg_val3_th;
+ u8 agg_val3;
+ u16 agg_misc3;
#elif defined(__LITTLE_ENDIAN)
- u16 hc_cram_address;
- u16 lcq_cons;
+ u16 agg_misc3;
+ u8 agg_val3;
+ u8 agg_val3_th;
#endif
+ u32 agg_val1;
+ u32 agg_misc4_th;
#if defined(__BIG_ENDIAN)
- u16 sq_xfrq_lcq_confq_size;
- u16 confq_prod;
+ u16 agg_val2_th;
+ u16 agg_val2;
#elif defined(__LITTLE_ENDIAN)
- u16 confq_prod;
- u16 sq_xfrq_lcq_confq_size;
+ u16 agg_val2;
+ u16 agg_val2_th;
#endif
#if defined(__BIG_ENDIAN)
- u8 hc_csdm_agg_int;
- u8 flags;
-#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG (0x1<<0)
-#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG_SHIFT 0
-#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG (0x1<<1)
-#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG_SHIFT 1
-#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG (0x1<<2)
-#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG_SHIFT 2
-#define USTORM_FCOE_ST_CONTEXT_RSRV1 (0x1F<<3)
-#define USTORM_FCOE_ST_CONTEXT_RSRV1_SHIFT 3
- u8 available_rqes;
- u8 sp_q_flush_cnt;
+ u16 __reserved2;
+ u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+ u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
#elif defined(__LITTLE_ENDIAN)
- u8 sp_q_flush_cnt;
- u8 available_rqes;
- u8 flags;
-#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG (0x1<<0)
-#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG_SHIFT 0
-#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG (0x1<<1)
-#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG_SHIFT 1
-#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG (0x1<<2)
-#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG_SHIFT 2
-#define USTORM_FCOE_ST_CONTEXT_RSRV1 (0x1F<<3)
-#define USTORM_FCOE_ST_CONTEXT_RSRV1_SHIFT 3
- u8 hc_csdm_agg_int;
+ u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+ u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+ u16 __reserved2;
#endif
- struct ustorm_fcoe_data_place data_place;
- struct ustorm_fcoe_task_ctx_entry_rd tce;
};
-/*
- * The FCoE non-aggregative context of Tstorm
- */
-struct tstorm_fcoe_st_context {
- struct regpair reserved0;
- struct regpair reserved1;
-};
/*
* The fcoe aggregative context section of Xstorm
@@ -1272,8 +1366,8 @@ struct xstorm_fcoe_extra_ag_context_section {
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF (0x3<<4)
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_SHIFT 4
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
@@ -1288,20 +1382,20 @@ struct xstorm_fcoe_extra_ag_context_section {
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF (0x3<<4)
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_SHIFT 4
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
#endif
- u32 __task_addr_lo;
- u32 __task_addr_hi;
+ u32 snd_nxt;
+ u32 tx_wnd;
u32 __reserved55;
- u32 __tx_prods;
+ u32 local_adv_wnd;
#if defined(__BIG_ENDIAN)
u8 __agg_val8_th;
- u8 __agg_val8;
+ u8 __tx_dest;
u16 tcp_agg_vars2;
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
@@ -1317,8 +1411,8 @@ struct xstorm_fcoe_extra_ag_context_section {
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN (0x1<<7)
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN_SHIFT 7
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
@@ -1327,8 +1421,8 @@ struct xstorm_fcoe_extra_ag_context_section {
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
-#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
-#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
#elif defined(__LITTLE_ENDIAN)
u16 tcp_agg_vars2;
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
@@ -1345,8 +1439,8 @@ struct xstorm_fcoe_extra_ag_context_section {
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN (0x1<<7)
-#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN_SHIFT 7
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
@@ -1355,9 +1449,9 @@ struct xstorm_fcoe_extra_ag_context_section {
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
-#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
-#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
- u8 __agg_val8;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+ u8 __tx_dest;
u8 __agg_val8_th;
#endif
u32 __sq_base_addr_lo;
@@ -1591,9 +1685,9 @@ struct xstorm_fcoe_ag_context {
#if defined(__BIG_ENDIAN)
u8 __reserved1;
u8 __agg_val6_th;
- u16 __confq_tx_prod;
+ u16 __agg_val9;
#elif defined(__LITTLE_ENDIAN)
- u16 __confq_tx_prod;
+ u16 __agg_val9;
u8 __agg_val6_th;
u8 __reserved1;
#endif
@@ -1605,16 +1699,16 @@ struct xstorm_fcoe_ag_context {
u16 confq_cons;
#endif
u32 agg_vars8;
-#define __XSTORM_FCOE_AG_CONTEXT_CACHE_WQE_IDX (0xFFFFFF<<0)
-#define __XSTORM_FCOE_AG_CONTEXT_CACHE_WQE_IDX_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2_SHIFT 0
#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24
#if defined(__BIG_ENDIAN)
- u16 ox_id;
+ u16 agg_misc0;
u16 sq_prod;
#elif defined(__LITTLE_ENDIAN)
u16 sq_prod;
- u16 ox_id;
+ u16 agg_misc0;
#endif
#if defined(__BIG_ENDIAN)
u8 agg_val3;
@@ -1628,332 +1722,1685 @@ struct xstorm_fcoe_ag_context {
u8 agg_val3;
#endif
#if defined(__BIG_ENDIAN)
- u16 __pbf_tx_seq_ack;
+ u16 __agg_misc1;
u16 agg_limit1;
#elif defined(__LITTLE_ENDIAN)
u16 agg_limit1;
- u16 __pbf_tx_seq_ack;
+ u16 __agg_misc1;
#endif
u32 completion_seq;
u32 confq_pbl_base_lo;
u32 confq_pbl_base_hi;
};
+
+
/*
- * The fcoe extra aggregative context section of Tstorm
+ * The tcp aggregative context section of Xstorm
*/
-struct tstorm_fcoe_extra_ag_context_section {
- u32 __agg_val1;
+struct xstorm_tcp_tcp_ag_context_section {
#if defined(__BIG_ENDIAN)
- u8 __tcp_agg_vars2;
- u8 __agg_val3;
- u16 __agg_val2;
+ u8 tcp_agg_vars1;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7
+ u8 __da_cnt;
+ u16 mss;
#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val2;
- u8 __agg_val3;
- u8 __tcp_agg_vars2;
+ u16 mss;
+ u8 __da_cnt;
+ u8 tcp_agg_vars1;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7
#endif
+ u32 snd_nxt;
+ u32 tx_wnd;
+ u32 snd_una;
+ u32 local_adv_wnd;
#if defined(__BIG_ENDIAN)
- u16 __agg_val5;
- u8 __agg_val6;
+ u8 __agg_val8_th;
+ u8 __tx_dest;
+ u16 tcp_agg_vars2;
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+ u16 tcp_agg_vars2;
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+ u8 __tx_dest;
+ u8 __agg_val8_th;
+#endif
+ u32 ack_to_far_end;
+ u32 rto_timer;
+ u32 ka_timer;
+ u32 ts_to_echo;
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val7_th;
+ u16 __agg_val7;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val7;
+ u16 __agg_val7_th;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __tcp_agg_vars5;
+ u8 __tcp_agg_vars4;
u8 __tcp_agg_vars3;
+ u8 __force_pure_ack_cnt;
#elif defined(__LITTLE_ENDIAN)
+ u8 __force_pure_ack_cnt;
u8 __tcp_agg_vars3;
- u8 __agg_val6;
- u16 __agg_val5;
+ u8 __tcp_agg_vars4;
+ u8 __tcp_agg_vars5;
+#endif
+ u32 tcp_agg_vars6;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31
+#if defined(__BIG_ENDIAN)
+ u16 __agg_misc6;
+ u16 __tcp_agg_vars7;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __tcp_agg_vars7;
+ u16 __agg_misc6;
+#endif
+ u32 __agg_val10;
+ u32 __agg_val10_th;
+#if defined(__BIG_ENDIAN)
+ u16 __reserved3;
+ u8 __reserved2;
+ u8 __da_only_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u8 __da_only_cnt;
+ u8 __reserved2;
+ u16 __reserved3;
#endif
- u32 __lcq_prod;
- u32 rtt_seq;
- u32 rtt_time;
- u32 __reserved66;
- u32 wnd_right_edge;
- u32 tcp_agg_vars1;
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9)
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
-#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
-#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
- u32 snd_max;
- u32 __lcq_cons;
- u32 __reserved2;
};
/*
- * The fcoe aggregative context of Tstorm
+ * The iscsi aggregative context of Xstorm
*/
-struct tstorm_fcoe_ag_context {
+struct xstorm_iscsi_ag_context {
#if defined(__BIG_ENDIAN)
- u16 ulp_credit;
+ u16 agg_val1;
u8 agg_vars1;
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
-#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
u8 state;
#elif defined(__LITTLE_ENDIAN)
u8 state;
u8 agg_vars1;
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
-#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
- u16 ulp_credit;
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+ u16 agg_val1;
#endif
#if defined(__BIG_ENDIAN)
- u16 __agg_val4;
- u16 agg_vars2;
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
-#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
-#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
-#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
-#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
-#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
-#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
-#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
-#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+ u8 cdu_reserved;
+ u8 __agg_vars4;
+ u8 agg_vars3;
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+ u8 agg_vars2;
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
#elif defined(__LITTLE_ENDIAN)
- u16 agg_vars2;
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
-#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
-#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
-#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
-#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
-#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
-#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
-#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
-#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
-#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
-#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
- u16 __agg_val4;
+ u8 agg_vars2;
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+ u8 agg_vars3;
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+ u8 __agg_vars4;
+ u8 cdu_reserved;
#endif
- struct tstorm_fcoe_extra_ag_context_section __extra_section;
+ u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+ u16 agg_vars5;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+ u16 sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_cons;
+ u16 agg_vars5;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+#endif
+ struct xstorm_tcp_tcp_ag_context_section tcp;
+#if defined(__BIG_ENDIAN)
+ u16 agg_vars7;
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+ u8 agg_val3_th;
+ u8 agg_vars6;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_vars6;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+ u8 agg_val3_th;
+ u16 agg_vars7;
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val11_th;
+ u16 __gen_data;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __gen_data;
+ u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __reserved1;
+ u8 __agg_val6_th;
+ u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val9;
+ u8 __agg_val6_th;
+ u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 hq_prod;
+ u16 hq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 hq_cons;
+ u16 hq_prod;
+#endif
+ u32 agg_vars8;
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 r2tq_prod;
+ u16 sq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_prod;
+ u16 r2tq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 agg_val3;
+ u8 agg_val6;
+ u8 agg_val5_th;
+ u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_val5;
+ u8 agg_val5_th;
+ u8 agg_val6;
+ u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_misc1;
+ u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_limit1;
+ u16 __agg_misc1;
+#endif
+ u32 hq_cons_tcp_seq;
+ u32 exp_stat_sn;
+ u32 rst_seq_num;
};
+
/*
- * The fcoe aggregative context of Ustorm
+ * The L5cm aggregative context of XStorm
*/
-struct ustorm_fcoe_ag_context {
+struct xstorm_l5cm_ag_context {
#if defined(__BIG_ENDIAN)
- u8 __aux_counter_flags;
- u8 agg_vars2;
-#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
-#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
-#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+ u16 agg_val1;
u8 agg_vars1;
-#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
-#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
u8 state;
#elif defined(__LITTLE_ENDIAN)
u8 state;
u8 agg_vars1;
-#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
-#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
- u8 agg_vars2;
-#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
-#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
-#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
- u8 __aux_counter_flags;
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+ u16 agg_val1;
#endif
#if defined(__BIG_ENDIAN)
- u8 cdu_usage;
- u8 agg_misc2;
- u16 pbf_tx_seq_ack;
+ u8 cdu_reserved;
+ u8 __agg_vars4;
+ u8 agg_vars3;
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+ u8 agg_vars2;
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7
#elif defined(__LITTLE_ENDIAN)
- u16 pbf_tx_seq_ack;
- u8 agg_misc2;
- u8 cdu_usage;
+ u8 agg_vars2;
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7
+ u8 agg_vars3;
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+ u8 __agg_vars4;
+ u8 cdu_reserved;
#endif
- u32 agg_misc4;
+ u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+ u16 agg_vars5;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+ u16 agg_val4_th;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_val4_th;
+ u16 agg_vars5;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+#endif
+ struct xstorm_tcp_tcp_ag_context_section tcp;
#if defined(__BIG_ENDIAN)
+ u16 agg_vars7;
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
u8 agg_val3_th;
+ u8 agg_vars6;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+ u8 agg_vars6;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+ u8 agg_val3_th;
+ u16 agg_vars7;
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 __agg_val11_th;
+ u16 __gen_data;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __gen_data;
+ u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 __reserved1;
+ u8 __agg_val6_th;
+ u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __agg_val9;
+ u8 __agg_val6_th;
+ u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 agg_val2_th;
+ u16 agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_val2;
+ u16 agg_val2_th;
+#endif
+ u32 agg_vars8;
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+ u16 agg_misc0;
+ u16 agg_val4;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_val4;
+ u16 agg_misc0;
+#endif
+#if defined(__BIG_ENDIAN)
u8 agg_val3;
- u16 agg_misc3;
+ u8 agg_val6;
+ u8 agg_val5_th;
+ u8 agg_val5;
#elif defined(__LITTLE_ENDIAN)
- u16 agg_misc3;
+ u8 agg_val5;
+ u8 agg_val5_th;
+ u8 agg_val6;
u8 agg_val3;
- u8 agg_val3_th;
#endif
- u32 expired_task_id;
- u32 agg_misc4_th;
#if defined(__BIG_ENDIAN)
- u16 cq_prod;
+ u16 __agg_misc1;
+ u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 agg_limit1;
+ u16 __agg_misc1;
+#endif
+ u32 completion_seq;
+ u32 agg_misc4;
+ u32 rst_seq_num;
+};
+
+/*
+ * ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_info {
+ __le16 aborted_task_id;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_rsp_union {
+ u8 r_ctl;
+ u8 rsrv[3];
+ __le32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_bd_ctx {
+ __le32 buf_addr_hi;
+ __le32 buf_addr_lo;
+ __le16 buf_len;
+ __le16 rsrv0;
+ __le16 flags;
+ __le16 rsrv1;
+};
+
+
+/*
+ * FCoE cached sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cached_sge_ctx {
+ struct regpair cur_buf_addr;
+ __le16 cur_buf_rem;
+ __le16 second_buf_rem;
+ struct regpair second_buf_addr;
+};
+
+
+/*
+ * Cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cleanup_info {
+ __le16 cleaned_task_id;
+ __le16 rolled_tx_seq_cnt;
+ __le32 rolled_tx_data_offset;
+};
+
+
+/*
+ * Fcp RSP flags $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_flags {
+ u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+/*
+ * Fcp RSP payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_payload {
+ struct regpair reserved0;
+ __le32 fcp_resid;
+ u8 scsi_status_code;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ __le16 retry_delay_timer;
+ __le32 fcp_rsp_len;
+ __le32 fcp_sns_len;
+};
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_union {
+ struct fcoe_fcp_rsp_payload payload;
+ struct regpair reserved0;
+};
+
+/*
+ * FC header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_hdr {
+ u8 s_id[3];
+ u8 cs_ctl;
+ u8 d_id[3];
+ u8 r_ctl;
+ __le16 seq_cnt;
+ u8 df_ctl;
+ u8 seq_id;
+ u8 f_ctl[3];
+ u8 type;
+ __le32 parameters;
+ __le16 rx_id;
+ __le16 ox_id;
+};
+
+/*
+ * FC header union $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mp_rsp_union {
+ struct fcoe_fc_hdr fc_hdr;
+ __le32 mp_payload_len;
+ __le32 rsrv;
+};
+
+/*
+ * Completion information $$KEEP_ENDIANNESS$$
+ */
+union fcoe_comp_flow_info {
+ struct fcoe_fcp_rsp_union fcp_rsp;
+ struct fcoe_abts_rsp_union abts_rsp;
+ struct fcoe_mp_rsp_union mp_rsp;
+ __le32 opaque[8];
+};
+
+
+/*
+ * External ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_abts_info {
+ __le32 rsrv0[6];
+ struct fcoe_abts_info ctx;
+};
+
+
+/*
+ * External cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_cleanup_info {
+ __le32 rsrv0[6];
+ struct fcoe_cleanup_info ctx;
+};
+
+
+/*
+ * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fw_tx_seq_ctx {
+ __le32 data_offset;
+ __le16 seq_cnt;
+ __le16 rsrv0;
+};
+
+/*
+ * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_fw_tx_seq_ctx {
+ __le32 rsrv0[6];
+ struct fcoe_fw_tx_seq_ctx ctx;
+};
+
+
+/*
+ * FCoE multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mul_sges_ctx {
+ struct regpair cur_sge_addr;
+ __le16 cur_sge_off;
+ u8 cur_sge_idx;
+ u8 sgl_size;
+};
+
+/*
+ * FCoE external multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_mul_sges_ctx {
+ struct fcoe_mul_sges_ctx mul_sgl;
+ struct regpair rsrv0;
+};
+
+
+/*
+ * FCP CMD payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_cmd_payload {
+ __le32 opaque[8];
+};
+
+
+
+
+
+/*
+ * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_xfr_rdy_payload {
+ __le32 burst_len;
+ __le32 data_ro;
+};
+
+
+/*
+ * FC frame $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_frame {
+ struct fcoe_fc_hdr fc_hdr;
+ __le32 reserved0[2];
+};
+
+
+
+
+/*
+ * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kcqe_params {
+ __le32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kcqe {
+ __le32 fcoe_conn_id;
+ __le32 completion_status;
+ __le32 fcoe_conn_context_id;
+ union fcoe_kcqe_params params;
+ __le16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+};
+
+
+
+/*
+ * FCoE KWQE header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_header {
+ u8 op_code;
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init1 {
+ __le16 num_tasks;
+ struct fcoe_kwqe_header hdr;
+ __le32 task_list_pbl_addr_lo;
+ __le32 task_list_pbl_addr_hi;
+ __le32 dummy_buffer_addr_lo;
+ __le32 dummy_buffer_addr_hi;
+ __le16 sq_num_wqes;
+ __le16 rq_num_wqes;
+ __le16 rq_buffer_log_size;
+ __le16 cq_num_wqes;
+ __le16 mtu;
+ u8 num_sessions_log;
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init2 {
+ u8 hsi_major_version;
+ u8 hsi_minor_version;
+ struct fcoe_kwqe_header hdr;
+ __le32 hash_tbl_pbl_addr_lo;
+ __le32 hash_tbl_pbl_addr_hi;
+ __le32 t2_hash_tbl_addr_lo;
+ __le32 t2_hash_tbl_addr_hi;
+ __le32 t2_ptr_hash_tbl_addr_lo;
+ __le32 t2_ptr_hash_tbl_addr_hi;
+ __le32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init3 {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 error_bit_map_lo;
+ __le32 error_bit_map_hi;
+ u8 perf_config;
+ u8 reserved21[3];
+ __le32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload1 {
+ __le16 fcoe_conn_id;
+ struct fcoe_kwqe_header hdr;
+ __le32 sq_addr_lo;
+ __le32 sq_addr_hi;
+ __le32 rq_pbl_addr_lo;
+ __le32 rq_pbl_addr_hi;
+ __le32 rq_first_pbe_addr_lo;
+ __le32 rq_first_pbe_addr_hi;
+ __le16 rq_prod;
+ __le16 reserved0;
+};
+
+/*
+ * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload2 {
+ __le16 tx_max_fc_pay_len;
+ struct fcoe_kwqe_header hdr;
+ __le32 cq_addr_lo;
+ __le32 cq_addr_hi;
+ __le32 xferq_addr_lo;
+ __le32 xferq_addr_hi;
+ __le32 conn_db_addr_lo;
+ __le32 conn_db_addr_hi;
+ __le32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload3 {
+ __le16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+ struct fcoe_kwqe_header hdr;
+ u8 s_id[3];
+ u8 tx_max_conc_seqs_c3;
+ u8 d_id[3];
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+ __le32 reserved;
+ __le32 confq_first_pbe_addr_lo;
+ __le32 confq_first_pbe_addr_hi;
+ __le16 tx_total_conc_seqs;
+ __le16 rx_max_fc_pay_len;
+ __le16 rx_total_conc_seqs;
+ u8 rx_max_conc_seqs_c3;
+ u8 rx_open_seqs_exch_c3;
+};
+
+/*
+ * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload4 {
+ u8 e_d_tov_timer_val;
+ u8 reserved2;
+ struct fcoe_kwqe_header hdr;
+ u8 src_mac_addr_lo[2];
+ u8 src_mac_addr_mid[2];
+ u8 src_mac_addr_hi[2];
+ u8 dst_mac_addr_hi[2];
+ u8 dst_mac_addr_lo[2];
+ u8 dst_mac_addr_mid[2];
+ __le32 lcq_addr_lo;
+ __le32 lcq_addr_hi;
+ __le32 confq_pbl_base_addr_lo;
+ __le32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_enable_disable {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ u8 src_mac_addr_lo[2];
+ u8 src_mac_addr_mid[2];
+ u8 src_mac_addr_hi[2];
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+ u8 dst_mac_addr_lo[2];
+ u8 dst_mac_addr_mid[2];
+ u8 dst_mac_addr_hi[2];
+ __le16 reserved1;
+ u8 s_id[3];
+ u8 vlan_flag;
+ u8 d_id[3];
+ u8 reserved3;
+ __le32 context_id;
+ __le32 conn_id;
+ __le32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_destroy {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 context_id;
+ __le32 conn_id;
+ __le32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_destroy {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_stat {
+ __le16 reserved0;
+ struct fcoe_kwqe_header hdr;
+ __le32 stat_params_addr_lo;
+ __le32 stat_params_addr_hi;
+ __le32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kwqe {
+ struct fcoe_kwqe_init1 init1;
+ struct fcoe_kwqe_init2 init2;
+ struct fcoe_kwqe_init3 init3;
+ struct fcoe_kwqe_conn_offload1 conn_offload1;
+ struct fcoe_kwqe_conn_offload2 conn_offload2;
+ struct fcoe_kwqe_conn_offload3 conn_offload3;
+ struct fcoe_kwqe_conn_offload4 conn_offload4;
+ struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+ struct fcoe_kwqe_conn_destroy conn_destroy;
+ struct fcoe_kwqe_destroy destroy;
+ struct fcoe_kwqe_stat statistics;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * TX SGL context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_sgl_union_ctx {
+ struct fcoe_cached_sge_ctx cached_sge;
+ struct fcoe_ext_mul_sges_ctx sgl;
+ __le32 opaque[5];
+};
+
+/*
+ * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_read_flow_info {
+ union fcoe_sgl_union_ctx sgl_ctx;
+ __le32 rsrv0[3];
+};
+
+
+/*
+ * Fcoe stat context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_s_stat_ctx {
+ u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+/*
+ * Fcoe rx seq context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_seq_ctx {
+ u8 seq_id;
+ struct fcoe_s_stat_ctx s_stat;
+ __le16 seq_cnt;
+ __le32 low_exp_ro;
+ __le32 high_exp_ro;
+};
+
+
+/*
+ * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_rx_wr_union_ctx {
+ struct fcoe_read_flow_info read_info;
+ union fcoe_comp_flow_info comp_info;
+ __le32 opaque[8];
+};
+
+
+
+/*
+ * FCoE SQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_sqe {
+ __le16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+/*
+ * 14 regs $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_only {
+ union fcoe_sgl_union_ctx sgl_ctx;
+ __le32 rsrv0;
+};
+
+/*
+ * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$
+ */
+union fcoe_tx_wr_rx_rd_union_ctx {
+ struct fcoe_fc_frame tx_frame;
+ struct fcoe_fcp_cmd_payload fcp_cmd;
+ struct fcoe_ext_cleanup_info cleanup;
+ struct fcoe_ext_abts_info abts;
+ struct fcoe_ext_fw_tx_seq_ctx tx_seq;
+ __le32 opaque[8];
+};
+
+/*
+ * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd_const {
+ u8 init_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7
+ u8 tx_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7
+ __le16 rsrv3;
+ __le32 verify_tx_seq;
+};
+
+/*
+ * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd {
+ union fcoe_tx_wr_rx_rd_union_ctx union_ctx;
+ struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+/*
+ * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_const {
+ __le32 data_2_trns;
+ __le32 init_flags;
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24
+};
+
+/*
+ * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_var {
+ __le16 rx_flags;
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15
+ __le16 rx_id;
+ struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy;
+};
+
+/*
+ * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd {
+ struct fcoe_tce_rx_wr_tx_rd_const const_ctx;
+ struct fcoe_tce_rx_wr_tx_rd_var var_ctx;
+};
+
+/*
+ * tce_rx_only $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_only {
+ struct fcoe_rx_seq_ctx rx_seq_ctx;
+ union fcoe_rx_wr_union_ctx union_ctx;
+};
+
+/*
+ * task_ctx_entry $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_task_ctx_entry {
+ struct fcoe_tce_tx_only txwr_only;
+ struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+ struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+ struct fcoe_tce_rx_only rxwr_only;
+};
+
+
+
+
+
+
+
+
+
+
+/*
+ * FCoE XFRQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_xfrqe {
+ __le16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * Cached SGEs $$KEEP_ENDIANNESS$$
+ */
+struct common_fcoe_sgl {
+ struct fcoe_bd_ctx sge[3];
+};
+
+
+/*
+ * FCoE SQ\XFRQ element
+ */
+struct fcoe_cached_wqe {
+ struct fcoe_sqe sqe;
+ struct fcoe_xfrqe xfrqe;
+};
+
+
+/*
+ * FCoE connection enable\disable params passed by driver to FW in FCoE enable
+ * ramrod $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_conn_enable_disable_ramrod_params {
+ struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe;
+};
+
+
+/*
+ * FCoE connection offload params passed by driver to FW in FCoE offload ramrod
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_conn_offload_ramrod_params {
+ struct fcoe_kwqe_conn_offload1 offload_kwqe1;
+ struct fcoe_kwqe_conn_offload2 offload_kwqe2;
+ struct fcoe_kwqe_conn_offload3 offload_kwqe3;
+ struct fcoe_kwqe_conn_offload4 offload_kwqe4;
+};
+
+
+struct ustorm_fcoe_mng_ctx {
+#if defined(__BIG_ENDIAN)
+ u8 mid_seq_proc_flag;
+ u8 tce_in_cam_flag;
+ u8 tce_on_ior_flag;
+ u8 en_cached_tce_flag;
+#elif defined(__LITTLE_ENDIAN)
+ u8 en_cached_tce_flag;
+ u8 tce_on_ior_flag;
+ u8 tce_in_cam_flag;
+ u8 mid_seq_proc_flag;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 tce_cam_addr;
+ u8 cached_conn_flag;
+ u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv0;
+ u8 cached_conn_flag;
+ u8 tce_cam_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 dma_tce_ram_addr;
+ u16 tce_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tce_ram_addr;
+ u16 dma_tce_ram_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 ox_id;
+ u16 wr_done_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 wr_done_seq;
+ u16 ox_id;
+#endif
+ struct regpair task_addr;
+};
+
+/*
+ * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and
+ * used in FCoE context section
+ */
+struct ustorm_fcoe_params {
+#if defined(__BIG_ENDIAN)
+ u16 fcoe_conn_id;
+ u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+ u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7
+ u16 fcoe_conn_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 hc_csdm_byte_en;
+ u8 func_id;
+ u8 port_id;
+ u8 vnic_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 vnic_id;
+ u8 port_id;
+ u8 func_id;
+ u8 hc_csdm_byte_en;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 rx_total_conc_seqs;
+ u16 rx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_max_fc_pay_len;
+ u16 rx_total_conc_seqs;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 task_pbe_idx_off;
+ u8 task_in_page_log_size;
+ u16 rx_max_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_max_conc_seqs;
+ u8 task_in_page_log_size;
+ u8 task_pbe_idx_off;
+#endif
+};
+
+/*
+ * FCoE 16-bits index structure
+ */
+struct fcoe_idx16_fields {
+ u16 fields;
+#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0)
+#define FCOE_IDX16_FIELDS_IDX_SHIFT 0
+#define FCOE_IDX16_FIELDS_MSB (0x1<<15)
+#define FCOE_IDX16_FIELDS_MSB_SHIFT 15
+};
+
+/*
+ * FCoE 16-bits index union
+ */
+union fcoe_idx16_field_union {
+ struct fcoe_idx16_fields fields;
+ u16 val;
+};
+
+/*
+ * Parameters required for placement according to SGL
+ */
+struct ustorm_fcoe_data_place_mng {
+#if defined(__BIG_ENDIAN)
+ u16 sge_off;
+ u8 num_sges;
+ u8 sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+ u8 sge_idx;
+ u8 num_sges;
+ u16 sge_off;
+#endif
+};
+
+/*
+ * Parameters required for placement according to SGL
+ */
+struct ustorm_fcoe_data_place {
+ struct ustorm_fcoe_data_place_mng cached_mng;
+ struct fcoe_bd_ctx cached_sge[2];
+};
+
+/*
+ * TX processing shall write and RX processing shall read from this section
+ */
+union fcoe_u_tce_tx_wr_rx_rd_union {
+ struct fcoe_abts_info abts;
+ struct fcoe_cleanup_info cleanup;
+ struct fcoe_fw_tx_seq_ctx tx_seq_ctx;
+ u32 opaque[2];
+};
+
+/*
+ * TX processing shall write and RX processing shall read from this section
+ */
+struct fcoe_u_tce_tx_wr_rx_rd {
+ union fcoe_u_tce_tx_wr_rx_rd_union union_ctx;
+ struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+struct ustorm_fcoe_tce {
+ struct fcoe_u_tce_tx_wr_rx_rd txwr_rxrd;
+ struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+ struct fcoe_tce_rx_only rxwr;
+};
+
+struct ustorm_fcoe_cache_ctx {
+ u32 rsrv0;
+ struct ustorm_fcoe_data_place data_place;
+ struct ustorm_fcoe_tce tce;
+};
+
+/*
+ * Ustorm FCoE Storm Context
+ */
+struct ustorm_fcoe_st_context {
+ struct ustorm_fcoe_mng_ctx mng_ctx;
+ struct ustorm_fcoe_params fcoe_params;
+ struct regpair cq_base_addr;
+ struct regpair rq_pbl_base;
+ struct regpair rq_cur_page_addr;
+ struct regpair confq_pbl_base_addr;
+ struct regpair conn_db_base;
+ struct regpair xfrq_base_addr;
+ struct regpair lcq_base_addr;
+#if defined(__BIG_ENDIAN)
+ union fcoe_idx16_field_union rq_cons;
+ union fcoe_idx16_field_union rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ union fcoe_idx16_field_union rq_prod;
+ union fcoe_idx16_field_union rq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 xfrq_prod;
u16 cq_cons;
#elif defined(__LITTLE_ENDIAN)
u16 cq_cons;
- u16 cq_prod;
+ u16 xfrq_prod;
#endif
#if defined(__BIG_ENDIAN)
- u16 __reserved2;
- u8 decision_rules;
-#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
-#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
-#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
-#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
- u8 decision_rule_enable_bits;
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
-#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
-#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
-#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
-#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
-#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
-#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
-#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+ u16 lcq_cons;
+ u16 hc_cram_address;
#elif defined(__LITTLE_ENDIAN)
- u8 decision_rule_enable_bits;
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
-#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
-#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
-#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
-#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
-#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
-#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
-#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
-#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
-#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
-#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
- u8 decision_rules;
-#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
-#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
-#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
-#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
-#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
-#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
- u16 __reserved2;
+ u16 hc_cram_address;
+ u16 lcq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 sq_xfrq_lcq_confq_size;
+ u16 confq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 confq_prod;
+ u16 sq_xfrq_lcq_confq_size;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 hc_csdm_agg_int;
+ u8 rsrv2;
+ u8 available_rqes;
+ u8 sp_q_flush_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u8 sp_q_flush_cnt;
+ u8 available_rqes;
+ u8 rsrv2;
+ u8 hc_csdm_agg_int;
#endif
+#if defined(__BIG_ENDIAN)
+ u16 num_pend_tasks;
+ u16 pbf_ack_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pbf_ack_ram_addr;
+ u16 num_pend_tasks;
+#endif
+ struct ustorm_fcoe_cache_ctx cache_ctx;
+};
+
+/*
+ * The FCoE non-aggregative context of Tstorm
+ */
+struct tstorm_fcoe_st_context {
+ struct regpair reserved0;
+ struct regpair reserved1;
};
/*
@@ -2023,86 +3470,106 @@ struct xstorm_fcoe_context_flags {
#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0
#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2)
#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_EXCHANGE_CLEANUP_DEFFERED (0x1<<3)
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_EXCHANGE_CLEANUP_DEFFERED_SHIFT 3
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ (0x1<<3)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ_SHIFT 3
#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4)
#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4
#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5)
#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5
#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6)
#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_ABTS_DEFFERED (0x1<<7)
-#define XSTORM_FCOE_CONTEXT_FLAGS_B_ABTS_DEFFERED_SHIFT 7
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN (0x1<<7)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN_SHIFT 7
};
-/*
- * FCoE SQ element
- */
-struct fcoe_sqe {
- u16 wqe;
-#define FCOE_SQE_TASK_ID (0x7FFF<<0)
-#define FCOE_SQE_TASK_ID_SHIFT 0
-#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
-#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
-};
-
-/*
- * FCoE XFRQ element
- */
-struct fcoe_xfrqe {
- u16 wqe;
-#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
-#define FCOE_XFRQE_TASK_ID_SHIFT 0
-#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
-#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+struct xstorm_fcoe_tce {
+ struct fcoe_tce_tx_only txwr;
+ struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
};
/*
- * FCoE SQ\XFRQ element
+ * FCP_DATA parameters required for transmission
*/
-struct fcoe_cached_wqe {
+struct xstorm_fcoe_fcp_data {
+ u32 io_rem;
#if defined(__BIG_ENDIAN)
- struct fcoe_xfrqe xfrqe;
- struct fcoe_sqe sqe;
+ u16 cached_sge_off;
+ u8 cached_num_sges;
+ u8 cached_sge_idx;
#elif defined(__LITTLE_ENDIAN)
- struct fcoe_sqe sqe;
- struct fcoe_xfrqe xfrqe;
+ u8 cached_sge_idx;
+ u8 cached_num_sges;
+ u16 cached_sge_off;
+#endif
+ u32 buf_addr_hi_0;
+ u32 buf_addr_lo_0;
+#if defined(__BIG_ENDIAN)
+ u16 num_of_pending_tasks;
+ u16 buf_len_0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buf_len_0;
+ u16 num_of_pending_tasks;
+#endif
+ u32 buf_addr_hi_1;
+ u32 buf_addr_lo_1;
+#if defined(__BIG_ENDIAN)
+ u16 task_pbe_idx_off;
+ u16 buf_len_1;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buf_len_1;
+ u16 task_pbe_idx_off;
+#endif
+ u32 buf_addr_hi_2;
+ u32 buf_addr_lo_2;
+#if defined(__BIG_ENDIAN)
+ u16 ox_id;
+ u16 buf_len_2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buf_len_2;
+ u16 ox_id;
#endif
};
-struct fcoe_task_ctx_entry_tx_only {
- union fcoe_sgl_ctx sgl_ctx;
+/*
+ * vlan configuration
+ */
+struct xstorm_fcoe_vlan_conf {
+ u8 vlan_conf;
+#define XSTORM_FCOE_VLAN_CONF_PRIORITY (0x7<<0)
+#define XSTORM_FCOE_VLAN_CONF_PRIORITY_SHIFT 0
+#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG (0x1<<3)
+#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG_SHIFT 3
+#define XSTORM_FCOE_VLAN_CONF_RESERVED (0xF<<4)
+#define XSTORM_FCOE_VLAN_CONF_RESERVED_SHIFT 4
};
-struct xstorm_fcoe_task_ctx_entry_rd {
- struct fcoe_task_ctx_entry_tx_only tx_wr;
- struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
- struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
- struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
+/*
+ * FCoE 16-bits vlan structure
+ */
+struct fcoe_vlan_fields {
+ u16 fields;
+#define FCOE_VLAN_FIELDS_VID (0xFFF<<0)
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI (0x1<<12)
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI (0x7<<13)
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
};
/*
- * Cached SGEs
+ * FCoE 16-bits vlan union
*/
-struct common_fcoe_sgl {
- struct fcoe_bd_ctx sge[2];
+union fcoe_vlan_field_union {
+ struct fcoe_vlan_fields fields;
+ u16 val;
};
/*
- * FCP_DATA parameters required for transmission
+ * FCoE 16-bits vlan, vif union
*/
-struct xstorm_fcoe_fcp_data {
- u32 io_rem;
-#if defined(__BIG_ENDIAN)
- u16 cached_sge_off;
- u8 cached_num_sges;
- u8 cached_sge_idx;
-#elif defined(__LITTLE_ENDIAN)
- u8 cached_sge_idx;
- u8 cached_num_sges;
- u16 cached_sge_off;
-#endif
- struct common_fcoe_sgl cached_sgl;
+union fcoe_vlan_vif_field_union {
+ union fcoe_vlan_field_union vlan;
+ u16 vif;
};
/*
@@ -2110,18 +3577,18 @@ struct xstorm_fcoe_fcp_data {
*/
struct xstorm_fcoe_context_section {
#if defined(__BIG_ENDIAN)
- u8 vlan_flag;
+ u8 cs_ctl;
u8 s_id[3];
#elif defined(__LITTLE_ENDIAN)
u8 s_id[3];
- u8 vlan_flag;
+ u8 cs_ctl;
#endif
#if defined(__BIG_ENDIAN)
- u8 func_id;
+ u8 rctl;
u8 d_id[3];
#elif defined(__LITTLE_ENDIAN)
u8 d_id[3];
- u8 func_id;
+ u8 rctl;
#endif
#if defined(__BIG_ENDIAN)
u16 sq_xfrq_lcq_confq_size;
@@ -2133,56 +3600,84 @@ struct xstorm_fcoe_context_section {
u32 lcq_prod;
#if defined(__BIG_ENDIAN)
u8 port_id;
- u8 tx_max_conc_seqs_c3;
+ u8 func_id;
u8 seq_id;
struct xstorm_fcoe_context_flags tx_flags;
#elif defined(__LITTLE_ENDIAN)
struct xstorm_fcoe_context_flags tx_flags;
u8 seq_id;
- u8 tx_max_conc_seqs_c3;
+ u8 func_id;
u8 port_id;
#endif
#if defined(__BIG_ENDIAN)
- u16 verify_tx_seq;
+ u16 mtu;
u8 func_mode;
u8 vnic_id;
#elif defined(__LITTLE_ENDIAN)
u8 vnic_id;
u8 func_mode;
- u16 verify_tx_seq;
+ u16 mtu;
#endif
struct regpair confq_curr_page_addr;
struct fcoe_cached_wqe cached_wqe[8];
struct regpair lcq_base_addr;
- struct xstorm_fcoe_task_ctx_entry_rd tce;
+ struct xstorm_fcoe_tce tce;
struct xstorm_fcoe_fcp_data fcp_data;
#if defined(__BIG_ENDIAN)
+ u8 tx_max_conc_seqs_c3;
+ u8 vlan_flag;
+ u8 dcb_val;
+ u8 data_pb_cmd_size;
+#elif defined(__LITTLE_ENDIAN)
+ u8 data_pb_cmd_size;
+ u8 dcb_val;
+ u8 vlan_flag;
+ u8 tx_max_conc_seqs_c3;
+#endif
+#if defined(__BIG_ENDIAN)
u16 fcoe_tx_stat_params_ram_addr;
- u16 cmng_port_ram_addr;
+ u16 fcoe_tx_fc_seq_ram_addr;
#elif defined(__LITTLE_ENDIAN)
- u16 cmng_port_ram_addr;
+ u16 fcoe_tx_fc_seq_ram_addr;
u16 fcoe_tx_stat_params_ram_addr;
#endif
#if defined(__BIG_ENDIAN)
- u8 fcp_cmd_pb_cmd_size;
+ u8 fcp_cmd_line_credit;
u8 eth_hdr_size;
u16 pbf_addr;
#elif defined(__LITTLE_ENDIAN)
u16 pbf_addr;
u8 eth_hdr_size;
- u8 fcp_cmd_pb_cmd_size;
+ u8 fcp_cmd_line_credit;
#endif
#if defined(__BIG_ENDIAN)
- u8 reserved2[2];
+ union fcoe_vlan_vif_field_union multi_func_val;
+ u8 page_log_size;
+ struct xstorm_fcoe_vlan_conf orig_vlan_conf;
+#elif defined(__LITTLE_ENDIAN)
+ struct xstorm_fcoe_vlan_conf orig_vlan_conf;
+ u8 page_log_size;
+ union fcoe_vlan_vif_field_union multi_func_val;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 fcp_cmd_frame_size;
+ u16 pbf_addr_ff;
+#elif defined(__LITTLE_ENDIAN)
+ u16 pbf_addr_ff;
+ u16 fcp_cmd_frame_size;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 vlan_num;
u8 cos;
- u8 dcb_version;
+ u8 cache_xfrq_cons;
+ u8 cache_sq_cons;
#elif defined(__LITTLE_ENDIAN)
- u8 dcb_version;
+ u8 cache_sq_cons;
+ u8 cache_xfrq_cons;
u8 cos;
- u8 reserved2[2];
+ u8 vlan_num;
#endif
- u32 reserved3;
- struct regpair reserved4[2];
+ u32 verify_tx_seq;
};
/*
@@ -2207,6 +3702,181 @@ struct fcoe_context {
};
/*
+ * FCoE init params passed by driver to FW in FCoE init ramrod
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_init_ramrod_params {
+ struct fcoe_kwqe_init1 init_kwqe1;
+ struct fcoe_kwqe_init2 init_kwqe2;
+ struct fcoe_kwqe_init3 init_kwqe3;
+ struct regpair eq_pbl_base;
+ __le32 eq_pbl_size;
+ __le32 reserved2;
+ __le16 eq_prod;
+ __le16 sb_num;
+ u8 sb_id;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+/*
+ * FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_stat_ramrod_params {
+ struct fcoe_kwqe_stat stat_kwqe;
+};
+
+/*
+ * CQ DB CQ producer and pending completion counter
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt {
+#if defined(__BIG_ENDIAN)
+ u16 cntr;
+ u16 prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 prod;
+ u16 cntr;
+#endif
+};
+
+/*
+ * CQ DB pending completion ITT array
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr {
+ struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8];
+};
+
+/*
+ * Cstorm CQ sequence to notify array, updated by driver
+ */
+struct iscsi_cq_db_sqn_2_notify_arr {
+ u16 sqn[8];
+};
+
+/*
+ * Cstorm iSCSI Storm Context
+ */
+struct cstorm_iscsi_st_context {
+ struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr;
+ struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr;
+ struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr;
+ struct regpair hq_pbl_base;
+ struct regpair hq_curr_pbe;
+ struct regpair task_pbl_base;
+ struct regpair cq_db_base;
+#if defined(__BIG_ENDIAN)
+ u16 hq_bd_itt;
+ u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 iscsi_conn_id;
+ u16 hq_bd_itt;
+#endif
+ u32 hq_bd_data_segment_len;
+ u32 hq_bd_buffer_offset;
+#if defined(__BIG_ENDIAN)
+ u8 rsrv;
+ u8 cq_proc_en_bit_map;
+ u8 cq_pend_comp_itt_valid_bit_map;
+ u8 hq_bd_opcode;
+#elif defined(__LITTLE_ENDIAN)
+ u8 hq_bd_opcode;
+ u8 cq_pend_comp_itt_valid_bit_map;
+ u8 cq_proc_en_bit_map;
+ u8 rsrv;
+#endif
+ u32 hq_tcp_seq;
+#if defined(__BIG_ENDIAN)
+ u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+ u16 hq_cons;
+#elif defined(__LITTLE_ENDIAN)
+ u16 hq_cons;
+ u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+#endif
+ struct regpair rsrv1;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct iscsi_cmd_pdu_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+ u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+ u8 opcode;
+#endif
+ u32 data_fields;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ struct regpair lun;
+ u32 itt;
+ u32 expected_data_transfer_length;
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 scsi_command_block[4];
+};
+
+
+/*
+ * Buffer per connection, used in Tstorm
+ */
+struct iscsi_conn_buf {
+ struct regpair reserved[8];
+};
+
+
+/*
* iSCSI context region, used only in iSCSI
*/
struct ustorm_iscsi_rq_db {
@@ -2271,11 +3941,13 @@ struct ustorm_iscsi_placement_db {
u32 local_sge_1_address_hi;
u32 local_sge_1_address_lo;
#if defined(__BIG_ENDIAN)
- u16 reserved6;
+ u8 exp_padding_2b;
+ u8 nal_len_3b;
u16 local_sge_1_size;
#elif defined(__LITTLE_ENDIAN)
u16 local_sge_1_size;
- u16 reserved6;
+ u8 nal_len_3b;
+ u8 exp_padding_2b;
#endif
#if defined(__BIG_ENDIAN)
u8 sgl_size;
@@ -2300,12 +3972,8 @@ struct ustorm_iscsi_placement_db {
u32 nal;
#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0)
#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0
-#define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B (0x3<<24)
-#define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B_SHIFT 24
-#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0x7<<26)
-#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 26
-#define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B (0x7<<29)
-#define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B_SHIFT 29
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 24
};
/*
@@ -2509,7 +4177,13 @@ struct tstorm_tcp_st_context_section {
u16 vlan_id;
u16 lsb_mac_address;
#endif
- u32 msb_mac_address;
+#if defined(__BIG_ENDIAN)
+ u16 msb_mac_address;
+ u16 mid_mac_address;
+#elif defined(__LITTLE_ENDIAN)
+ u16 mid_mac_address;
+ u16 msb_mac_address;
+#endif
u32 rightmost_received_seq;
};
@@ -2534,13 +4208,7 @@ struct iscsi_term_vars {
* iSCSI context region, used only in iSCSI
*/
struct tstorm_iscsi_st_context_section {
-#if defined(__BIG_ENDIAN)
- u16 rem_tcp_data_len;
- u16 brb_offset;
-#elif defined(__LITTLE_ENDIAN)
- u16 brb_offset;
- u16 rem_tcp_data_len;
-#endif
+ u32 nalPayload;
u32 b2nh;
#if defined(__BIG_ENDIAN)
u16 rq_cons;
@@ -2555,8 +4223,10 @@ struct tstorm_iscsi_st_context_section {
#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7
u8 hdr_bytes_2_fetch;
#elif defined(__LITTLE_ENDIAN)
u8 hdr_bytes_2_fetch;
@@ -2571,18 +4241,20 @@ struct tstorm_iscsi_st_context_section {
#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5)
-#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7
u16 rq_cons;
#endif
struct regpair rq_db_phy_addr;
#if defined(__BIG_ENDIAN)
struct iscsi_term_vars term_vars;
- u8 scratchpad_idx;
+ u8 rsrv1;
u16 iscsi_conn_id;
#elif defined(__LITTLE_ENDIAN)
u16 iscsi_conn_id;
- u8 scratchpad_idx;
+ u8 rsrv1;
struct iscsi_term_vars term_vars;
#endif
u32 process_nxt;
@@ -2597,724 +4269,6 @@ struct tstorm_iscsi_st_context {
};
/*
- * The tcp aggregative context section of Xstorm
- */
-struct xstorm_tcp_tcp_ag_context_section {
-#if defined(__BIG_ENDIAN)
- u8 __tcp_agg_vars1;
- u8 __da_cnt;
- u16 mss;
-#elif defined(__LITTLE_ENDIAN)
- u16 mss;
- u8 __da_cnt;
- u8 __tcp_agg_vars1;
-#endif
- u32 snd_nxt;
- u32 tx_wnd;
- u32 snd_una;
- u32 local_adv_wnd;
-#if defined(__BIG_ENDIAN)
- u8 __agg_val8_th;
- u8 __agg_val8;
- u16 tcp_agg_vars2;
-#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
-#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
-#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
-#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
-#elif defined(__LITTLE_ENDIAN)
- u16 tcp_agg_vars2;
-#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
-#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
-#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
-#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
- u8 __agg_val8;
- u8 __agg_val8_th;
-#endif
- u32 ack_to_far_end;
- u32 rto_timer;
- u32 ka_timer;
- u32 ts_to_echo;
-#if defined(__BIG_ENDIAN)
- u16 __agg_val7_th;
- u16 __agg_val7;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val7;
- u16 __agg_val7_th;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 __tcp_agg_vars5;
- u8 __tcp_agg_vars4;
- u8 __tcp_agg_vars3;
- u8 __force_pure_ack_cnt;
-#elif defined(__LITTLE_ENDIAN)
- u8 __force_pure_ack_cnt;
- u8 __tcp_agg_vars3;
- u8 __tcp_agg_vars4;
- u8 __tcp_agg_vars5;
-#endif
- u32 tcp_agg_vars6;
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN (0x1<<1)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN_SHIFT 1
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24
-#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26)
-#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31)
-#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31
-#if defined(__BIG_ENDIAN)
- u16 __agg_misc6;
- u16 __tcp_agg_vars7;
-#elif defined(__LITTLE_ENDIAN)
- u16 __tcp_agg_vars7;
- u16 __agg_misc6;
-#endif
- u32 __agg_val10;
- u32 __agg_val10_th;
-#if defined(__BIG_ENDIAN)
- u16 __reserved3;
- u8 __reserved2;
- u8 __da_only_cnt;
-#elif defined(__LITTLE_ENDIAN)
- u8 __da_only_cnt;
- u8 __reserved2;
- u16 __reserved3;
-#endif
-};
-
-/*
- * The iscsi aggregative context of Xstorm
- */
-struct xstorm_iscsi_ag_context {
-#if defined(__BIG_ENDIAN)
- u16 agg_val1;
- u8 agg_vars1;
-#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
-#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
-#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
-#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
-#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
-#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
- u8 state;
-#elif defined(__LITTLE_ENDIAN)
- u8 state;
- u8 agg_vars1;
-#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
-#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
-#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
-#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
-#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
-#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
- u16 agg_val1;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 cdu_reserved;
- u8 __agg_vars4;
- u8 agg_vars3;
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
-#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
-#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
- u8 agg_vars2;
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
-#elif defined(__LITTLE_ENDIAN)
- u8 agg_vars2;
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
-#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
- u8 agg_vars3;
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
-#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
-#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
- u8 __agg_vars4;
- u8 cdu_reserved;
-#endif
- u32 more_to_send;
-#if defined(__BIG_ENDIAN)
- u16 agg_vars5;
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
- u16 sq_cons;
-#elif defined(__LITTLE_ENDIAN)
- u16 sq_cons;
- u16 agg_vars5;
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
-#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
-#endif
- struct xstorm_tcp_tcp_ag_context_section tcp;
-#if defined(__BIG_ENDIAN)
- u16 agg_vars7;
-#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
-#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
-#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
-#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
-#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
-#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
-#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
-#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
-#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
-#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
- u8 agg_val3_th;
- u8 agg_vars6;
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
-#elif defined(__LITTLE_ENDIAN)
- u8 agg_vars6;
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
- u8 agg_val3_th;
- u16 agg_vars7;
-#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
-#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
-#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
-#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
-#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
-#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
-#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
-#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
-#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
-#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
-#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
-#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __agg_val11_th;
- u16 __gen_data;
-#elif defined(__LITTLE_ENDIAN)
- u16 __gen_data;
- u16 __agg_val11_th;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 __reserved1;
- u8 __agg_val6_th;
- u16 __agg_val9;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val9;
- u8 __agg_val6_th;
- u8 __reserved1;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 hq_prod;
- u16 hq_cons;
-#elif defined(__LITTLE_ENDIAN)
- u16 hq_cons;
- u16 hq_prod;
-#endif
- u32 agg_vars8;
-#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
-#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0
-#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
-#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24
-#if defined(__BIG_ENDIAN)
- u16 r2tq_prod;
- u16 sq_prod;
-#elif defined(__LITTLE_ENDIAN)
- u16 sq_prod;
- u16 r2tq_prod;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 agg_val3;
- u8 agg_val6;
- u8 agg_val5_th;
- u8 agg_val5;
-#elif defined(__LITTLE_ENDIAN)
- u8 agg_val5;
- u8 agg_val5_th;
- u8 agg_val6;
- u8 agg_val3;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __agg_misc1;
- u16 agg_limit1;
-#elif defined(__LITTLE_ENDIAN)
- u16 agg_limit1;
- u16 __agg_misc1;
-#endif
- u32 hq_cons_tcp_seq;
- u32 exp_stat_sn;
- u32 rst_seq_num;
-};
-
-/*
- * The tcp aggregative context section of Tstorm
- */
-struct tstorm_tcp_tcp_ag_context_section {
- u32 __agg_val1;
-#if defined(__BIG_ENDIAN)
- u8 __tcp_agg_vars2;
- u8 __agg_val3;
- u16 __agg_val2;
-#elif defined(__LITTLE_ENDIAN)
- u16 __agg_val2;
- u8 __agg_val3;
- u8 __tcp_agg_vars2;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __agg_val5;
- u8 __agg_val6;
- u8 __tcp_agg_vars3;
-#elif defined(__LITTLE_ENDIAN)
- u8 __tcp_agg_vars3;
- u8 __agg_val6;
- u16 __agg_val5;
-#endif
- u32 snd_nxt;
- u32 rtt_seq;
- u32 rtt_time;
- u32 __reserved66;
- u32 wnd_right_edge;
- u32 tcp_agg_vars1;
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
-#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
-#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
- u32 snd_max;
- u32 snd_una;
- u32 __reserved2;
-};
-
-/*
- * The iscsi aggregative context of Tstorm
- */
-struct tstorm_iscsi_ag_context {
-#if defined(__BIG_ENDIAN)
- u16 ulp_credit;
- u8 agg_vars1;
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
-#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
-#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
- u8 state;
-#elif defined(__LITTLE_ENDIAN)
- u8 state;
- u8 agg_vars1;
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
-#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
-#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
- u16 ulp_credit;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __agg_val4;
- u16 agg_vars2;
-#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
-#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
-#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
-#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
-#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
-#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
-#elif defined(__LITTLE_ENDIAN)
- u16 agg_vars2;
-#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
-#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
-#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
-#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
-#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
-#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
-#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
-#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
-#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
-#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
-#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
- u16 __agg_val4;
-#endif
- struct tstorm_tcp_tcp_ag_context_section tcp;
-};
-
-/*
- * The iscsi aggregative context of Ustorm
- */
-struct ustorm_iscsi_ag_context {
-#if defined(__BIG_ENDIAN)
- u8 __aux_counter_flags;
- u8 agg_vars2;
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
-#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
- u8 agg_vars1;
-#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
- u8 state;
-#elif defined(__LITTLE_ENDIAN)
- u8 state;
- u8 agg_vars1;
-#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
-#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
-#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
- u8 agg_vars2;
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
-#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
- u8 __aux_counter_flags;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 cdu_usage;
- u8 agg_misc2;
- u16 __cq_local_comp_itt_val;
-#elif defined(__LITTLE_ENDIAN)
- u16 __cq_local_comp_itt_val;
- u8 agg_misc2;
- u8 cdu_usage;
-#endif
- u32 agg_misc4;
-#if defined(__BIG_ENDIAN)
- u8 agg_val3_th;
- u8 agg_val3;
- u16 agg_misc3;
-#elif defined(__LITTLE_ENDIAN)
- u16 agg_misc3;
- u8 agg_val3;
- u8 agg_val3_th;
-#endif
- u32 agg_val1;
- u32 agg_misc4_th;
-#if defined(__BIG_ENDIAN)
- u16 agg_val2_th;
- u16 agg_val2;
-#elif defined(__LITTLE_ENDIAN)
- u16 agg_val2;
- u16 agg_val2_th;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 __reserved2;
- u8 decision_rules;
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
-#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
- u8 decision_rule_enable_bits;
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
-#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
-#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
-#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
-#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
-#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
-#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
-#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
-#elif defined(__LITTLE_ENDIAN)
- u8 decision_rule_enable_bits;
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
-#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
-#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
-#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
-#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
-#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
-#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
-#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
-#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
-#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
-#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
-#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
- u8 decision_rules;
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
-#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
-#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
-#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
-#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
- u16 __reserved2;
-#endif
-};
-
-/*
* Ethernet context section, shared in TOE, RDMA and ISCSI
*/
struct xstorm_eth_context_section {
@@ -3509,7 +4463,27 @@ struct xstorm_tcp_context_section {
u16 window_scaling_factor;
u16 pseudo_csum;
#endif
- u32 reserved2;
+#if defined(__BIG_ENDIAN)
+ u16 reserved2;
+ u8 statistics_counter_id;
+ u8 statistics_params;
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2)
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+ u8 statistics_params;
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2)
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2
+ u8 statistics_counter_id;
+ u16 reserved2;
+#endif
u32 ts_time_diff;
u32 __next_timer_expir;
};
@@ -3522,29 +4496,31 @@ struct xstorm_common_context_section {
union xstorm_ip_context_section_types ip_union;
struct xstorm_tcp_context_section tcp;
#if defined(__BIG_ENDIAN)
- u16 reserved;
- u8 statistics_params;
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
-#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
-#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
-#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
-#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
+ u8 __dcb_val;
+ u8 flags;
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4)
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5)
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5
+ u8 reserved;
u8 ip_version_1b;
#elif defined(__LITTLE_ENDIAN)
u8 ip_version_1b;
- u8 statistics_params;
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
-#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
-#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
-#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
-#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
-#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
- u16 reserved;
+ u8 reserved;
+ u8 flags;
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4)
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5)
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5
+ u8 __dcb_val;
#endif
};
@@ -3682,99 +4658,6 @@ struct xstorm_iscsi_st_context {
};
/*
- * CQ DB CQ producer and pending completion counter
- */
-struct iscsi_cq_db_prod_pnd_cmpltn_cnt {
-#if defined(__BIG_ENDIAN)
- u16 cntr;
- u16 prod;
-#elif defined(__LITTLE_ENDIAN)
- u16 prod;
- u16 cntr;
-#endif
-};
-
-/*
- * CQ DB pending completion ITT array
- */
-struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr {
- struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8];
-};
-
-/*
- * Cstorm CQ sequence to notify array, updated by driver
- */
-struct iscsi_cq_db_sqn_2_notify_arr {
- u16 sqn[8];
-};
-
-/*
- * Cstorm iSCSI Storm Context
- */
-struct cstorm_iscsi_st_context {
- struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr;
- struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr;
- struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr;
- struct regpair hq_pbl_base;
- struct regpair hq_curr_pbe;
- struct regpair task_pbl_base;
- struct regpair cq_db_base;
-#if defined(__BIG_ENDIAN)
- u16 hq_bd_itt;
- u16 iscsi_conn_id;
-#elif defined(__LITTLE_ENDIAN)
- u16 iscsi_conn_id;
- u16 hq_bd_itt;
-#endif
- u32 hq_bd_data_segment_len;
- u32 hq_bd_buffer_offset;
-#if defined(__BIG_ENDIAN)
- u8 timer_entry_idx;
- u8 cq_proc_en_bit_map;
- u8 cq_pend_comp_itt_valid_bit_map;
- u8 hq_bd_opcode;
-#elif defined(__LITTLE_ENDIAN)
- u8 hq_bd_opcode;
- u8 cq_pend_comp_itt_valid_bit_map;
- u8 cq_proc_en_bit_map;
- u8 timer_entry_idx;
-#endif
- u32 hq_tcp_seq;
-#if defined(__BIG_ENDIAN)
- u16 flags;
-#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
-#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
-#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
-#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
-#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
-#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
- u16 hq_cons;
-#elif defined(__LITTLE_ENDIAN)
- u16 hq_cons;
- u16 flags;
-#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
-#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
-#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
-#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
-#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
-#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
-#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
-#endif
- struct regpair rsrv1;
-};
-
-/*
* Iscsi connection context
*/
struct iscsi_context {
@@ -3791,583 +4674,388 @@ struct iscsi_context {
struct cstorm_iscsi_st_context cstorm_st_context;
};
-/*
- * FCoE KCQ CQE parameters
- */
-union fcoe_kcqe_params {
- u32 reserved0[4];
-};
/*
- * FCoE KCQ CQE
+ * PDU header of an iSCSI DATA-OUT
*/
-struct fcoe_kcqe {
- u32 fcoe_conn_id;
- u32 completion_status;
- u32 fcoe_conn_context_id;
- union fcoe_kcqe_params params;
+struct iscsi_data_pdu_hdr_little_endian {
#if defined(__BIG_ENDIAN)
- u8 flags;
-#define FCOE_KCQE_RESERVED0 (0x7<<0)
-#define FCOE_KCQE_RESERVED0_SHIFT 0
-#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
-#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
-#define FCOE_KCQE_LAYER_CODE (0x7<<4)
-#define FCOE_KCQE_LAYER_CODE_SHIFT 4
-#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
-#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
- u8 op_code;
- u16 qe_self_seq;
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+ u16 rsrv0;
#elif defined(__LITTLE_ENDIAN)
- u16 qe_self_seq;
- u8 op_code;
- u8 flags;
-#define FCOE_KCQE_RESERVED0 (0x7<<0)
-#define FCOE_KCQE_RESERVED0_SHIFT 0
-#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
-#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
-#define FCOE_KCQE_LAYER_CODE (0x7<<4)
-#define FCOE_KCQE_LAYER_CODE_SHIFT 4
-#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
-#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+ u8 opcode;
#endif
+ u32 data_fields;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ struct regpair lun;
+ u32 itt;
+ u32 ttt;
+ u32 rsrv2;
+ u32 exp_stat_sn;
+ u32 rsrv3;
+ u32 data_sn;
+ u32 buffer_offset;
+ u32 rsrv4;
};
-/*
- * FCoE KWQE header
- */
-struct fcoe_kwqe_header {
-#if defined(__BIG_ENDIAN)
- u8 flags;
-#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
-#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
-#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
-#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
-#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
- u8 op_code;
-#elif defined(__LITTLE_ENDIAN)
- u8 op_code;
- u8 flags;
-#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
-#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
-#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
-#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
-#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
-#endif
-};
/*
- * FCoE firmware init request 1
+ * PDU header of an iSCSI login request
*/
-struct fcoe_kwqe_init1 {
+struct iscsi_login_req_hdr_little_endian {
#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 num_tasks;
-#elif defined(__LITTLE_ENDIAN)
- u16 num_tasks;
- struct fcoe_kwqe_header hdr;
-#endif
- u32 task_list_pbl_addr_lo;
- u32 task_list_pbl_addr_hi;
- u32 dummy_buffer_addr_lo;
- u32 dummy_buffer_addr_hi;
-#if defined(__BIG_ENDIAN)
- u16 rq_num_wqes;
- u16 sq_num_wqes;
-#elif defined(__LITTLE_ENDIAN)
- u16 sq_num_wqes;
- u16 rq_num_wqes;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 cq_num_wqes;
- u16 rq_buffer_log_size;
-#elif defined(__LITTLE_ENDIAN)
- u16 rq_buffer_log_size;
- u16 cq_num_wqes;
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7
+ u8 version_max;
+ u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+ u8 version_min;
+ u8 version_max;
+ u8 op_attr;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7
+ u8 opcode;
#endif
+ u32 data_fields;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ u32 isid_lo;
#if defined(__BIG_ENDIAN)
- u8 flags;
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
-#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
- u8 num_sessions_log;
- u16 mtu;
+ u16 isid_hi;
+ u16 tsih;
#elif defined(__LITTLE_ENDIAN)
- u16 mtu;
- u8 num_sessions_log;
- u8 flags;
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
-#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+ u16 tsih;
+ u16 isid_hi;
#endif
-};
-
-/*
- * FCoE firmware init request 2
- */
-struct fcoe_kwqe_init2 {
+ u32 itt;
#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 reserved0;
+ u16 cid;
+ u16 rsrv1;
#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
- struct fcoe_kwqe_header hdr;
+ u16 rsrv1;
+ u16 cid;
#endif
- u32 hash_tbl_pbl_addr_lo;
- u32 hash_tbl_pbl_addr_hi;
- u32 t2_hash_tbl_addr_lo;
- u32 t2_hash_tbl_addr_hi;
- u32 t2_ptr_hash_tbl_addr_lo;
- u32 t2_ptr_hash_tbl_addr_hi;
- u32 free_list_count;
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 rsrv2[4];
};
/*
- * FCoE firmware init request 3
+ * PDU header of an iSCSI logout request
*/
-struct fcoe_kwqe_init3 {
+struct iscsi_logout_req_hdr_little_endian {
#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 reserved0;
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+ u16 rsrv0;
#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
- struct fcoe_kwqe_header hdr;
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+ u8 opcode;
#endif
- u32 error_bit_map_lo;
- u32 error_bit_map_hi;
+ u32 data_fields;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ u32 rsrv2[2];
+ u32 itt;
#if defined(__BIG_ENDIAN)
- u8 reserved21[3];
- u8 cached_session_enable;
+ u16 cid;
+ u16 rsrv1;
#elif defined(__LITTLE_ENDIAN)
- u8 cached_session_enable;
- u8 reserved21[3];
+ u16 rsrv1;
+ u16 cid;
#endif
- u32 reserved2[4];
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 rsrv3[4];
};
/*
- * FCoE connection offload request 1
+ * PDU header of an iSCSI TMF request
*/
-struct fcoe_kwqe_conn_offload1 {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 fcoe_conn_id;
-#elif defined(__LITTLE_ENDIAN)
- u16 fcoe_conn_id;
- struct fcoe_kwqe_header hdr;
-#endif
- u32 sq_addr_lo;
- u32 sq_addr_hi;
- u32 rq_pbl_addr_lo;
- u32 rq_pbl_addr_hi;
- u32 rq_first_pbe_addr_lo;
- u32 rq_first_pbe_addr_hi;
+struct iscsi_tmf_req_hdr_little_endian {
#if defined(__BIG_ENDIAN)
- u16 reserved0;
- u16 rq_prod;
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+ u16 rsrv0;
#elif defined(__LITTLE_ENDIAN)
- u16 rq_prod;
- u16 reserved0;
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+ u8 opcode;
#endif
+ u32 data_fields;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ struct regpair lun;
+ u32 itt;
+ u32 referenced_task_tag;
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 ref_cmd_sn;
+ u32 exp_data_sn;
+ u32 rsrv2[2];
};
/*
- * FCoE connection offload request 2
+ * PDU header of an iSCSI Text request
*/
-struct fcoe_kwqe_conn_offload2 {
+struct iscsi_text_req_hdr_little_endian {
#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 tx_max_fc_pay_len;
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7
+ u16 rsrv0;
#elif defined(__LITTLE_ENDIAN)
- u16 tx_max_fc_pay_len;
- struct fcoe_kwqe_header hdr;
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7
+ u8 opcode;
#endif
- u32 cq_addr_lo;
- u32 cq_addr_hi;
- u32 xferq_addr_lo;
- u32 xferq_addr_hi;
- u32 conn_db_addr_lo;
- u32 conn_db_addr_hi;
- u32 reserved1;
+ u32 data_fields;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ struct regpair lun;
+ u32 itt;
+ u32 ttt;
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 rsrv3[4];
};
/*
- * FCoE connection offload request 3
+ * PDU header of an iSCSI Nop-Out
*/
-struct fcoe_kwqe_conn_offload3 {
+struct iscsi_nop_out_hdr_little_endian {
#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 vlan_tag;
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
-#elif defined(__LITTLE_ENDIAN)
- u16 vlan_tag;
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
- struct fcoe_kwqe_header hdr;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 tx_max_conc_seqs_c3;
- u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
- u8 s_id[3];
- u8 tx_max_conc_seqs_c3;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 flags;
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
- u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
- u8 d_id[3];
- u8 flags;
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
-#endif
- u32 reserved;
- u32 confq_first_pbe_addr_lo;
- u32 confq_first_pbe_addr_hi;
-#if defined(__BIG_ENDIAN)
- u16 rx_max_fc_pay_len;
- u16 tx_total_conc_seqs;
-#elif defined(__LITTLE_ENDIAN)
- u16 tx_total_conc_seqs;
- u16 rx_max_fc_pay_len;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 rx_open_seqs_exch_c3;
- u8 rx_max_conc_seqs_c3;
- u16 rx_total_conc_seqs;
+ u8 opcode;
+ u8 op_attr;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7
+ u16 rsrv0;
#elif defined(__LITTLE_ENDIAN)
- u16 rx_total_conc_seqs;
- u8 rx_max_conc_seqs_c3;
- u8 rx_open_seqs_exch_c3;
+ u16 rsrv0;
+ u8 op_attr;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7
+ u8 opcode;
#endif
+ u32 data_fields;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+ struct regpair lun;
+ u32 itt;
+ u32 ttt;
+ u32 cmd_sn;
+ u32 exp_stat_sn;
+ u32 rsrv3[4];
};
/*
- * FCoE connection offload request 4
+ * iscsi pdu headers in little endian form.
*/
-struct fcoe_kwqe_conn_offload4 {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u8 reserved2;
- u8 e_d_tov_timer_val;
-#elif defined(__LITTLE_ENDIAN)
- u8 e_d_tov_timer_val;
- u8 reserved2;
- struct fcoe_kwqe_header hdr;
-#endif
- u8 src_mac_addr_lo32[4];
-#if defined(__BIG_ENDIAN)
- u8 dst_mac_addr_hi16[2];
- u8 src_mac_addr_hi16[2];
-#elif defined(__LITTLE_ENDIAN)
- u8 src_mac_addr_hi16[2];
- u8 dst_mac_addr_hi16[2];
-#endif
- u8 dst_mac_addr_lo32[4];
- u32 lcq_addr_lo;
- u32 lcq_addr_hi;
- u32 confq_pbl_base_addr_lo;
- u32 confq_pbl_base_addr_hi;
+union iscsi_pdu_headers_little_endian {
+ u32 fullHeaderSize[12];
+ struct iscsi_cmd_pdu_hdr_little_endian command_pdu_hdr;
+ struct iscsi_data_pdu_hdr_little_endian data_out_pdu_hdr;
+ struct iscsi_login_req_hdr_little_endian login_req_pdu_hdr;
+ struct iscsi_logout_req_hdr_little_endian logout_req_pdu_hdr;
+ struct iscsi_tmf_req_hdr_little_endian tmf_req_pdu_hdr;
+ struct iscsi_text_req_hdr_little_endian text_req_pdu_hdr;
+ struct iscsi_nop_out_hdr_little_endian nop_out_pdu_hdr;
};
-/*
- * FCoE connection enable request
- */
-struct fcoe_kwqe_conn_enable_disable {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
- struct fcoe_kwqe_header hdr;
-#endif
- u8 src_mac_addr_lo32[4];
-#if defined(__BIG_ENDIAN)
- u16 vlan_tag;
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
- u8 src_mac_addr_hi16[2];
-#elif defined(__LITTLE_ENDIAN)
- u8 src_mac_addr_hi16[2];
- u16 vlan_tag;
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
-#endif
- u8 dst_mac_addr_lo32[4];
+struct iscsi_hq_bd {
+ union iscsi_pdu_headers_little_endian pdu_header;
#if defined(__BIG_ENDIAN)
u16 reserved1;
- u8 dst_mac_addr_hi16[2];
+ u16 lcl_cmp_flg;
#elif defined(__LITTLE_ENDIAN)
- u8 dst_mac_addr_hi16[2];
+ u16 lcl_cmp_flg;
u16 reserved1;
#endif
+ u32 sgl_base_lo;
+ u32 sgl_base_hi;
#if defined(__BIG_ENDIAN)
- u8 vlan_flag;
- u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
- u8 s_id[3];
- u8 vlan_flag;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 reserved3;
- u8 d_id[3];
+ u8 sgl_size;
+ u8 sge_index;
+ u16 sge_offset;
#elif defined(__LITTLE_ENDIAN)
- u8 d_id[3];
- u8 reserved3;
+ u16 sge_offset;
+ u8 sge_index;
+ u8 sgl_size;
#endif
- u32 context_id;
- u32 conn_id;
- u32 reserved4;
};
-/*
- * FCoE connection destroy request
- */
-struct fcoe_kwqe_conn_destroy {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
- struct fcoe_kwqe_header hdr;
-#endif
- u32 context_id;
- u32 conn_id;
- u32 reserved1[5];
-};
/*
- * FCoe destroy request
+ * CQE data for L2 OOO connection $$KEEP_ENDIANNESS$$
*/
-struct fcoe_kwqe_destroy {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
- struct fcoe_kwqe_header hdr;
-#endif
- u32 reserved1[7];
+struct iscsi_l2_ooo_data {
+ __le32 iscsi_cid;
+ u8 drop_isle;
+ u8 drop_size;
+ u8 ooo_opcode;
+ u8 ooo_isle;
+ u8 reserved[8];
};
-/*
- * FCoe statistics request
- */
-struct fcoe_kwqe_stat {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
- struct fcoe_kwqe_header hdr;
-#endif
- u32 stat_params_addr_lo;
- u32 stat_params_addr_hi;
- u32 reserved1[5];
-};
-/*
- * FCoE KWQ WQE
- */
-union fcoe_kwqe {
- struct fcoe_kwqe_init1 init1;
- struct fcoe_kwqe_init2 init2;
- struct fcoe_kwqe_init3 init3;
- struct fcoe_kwqe_conn_offload1 conn_offload1;
- struct fcoe_kwqe_conn_offload2 conn_offload2;
- struct fcoe_kwqe_conn_offload3 conn_offload3;
- struct fcoe_kwqe_conn_offload4 conn_offload4;
- struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
- struct fcoe_kwqe_conn_destroy conn_destroy;
- struct fcoe_kwqe_destroy destroy;
- struct fcoe_kwqe_stat statistics;
-};
-struct fcoe_task_ctx_entry {
- struct fcoe_task_ctx_entry_tx_only tx_wr_only;
- struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
- struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
- struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
- struct fcoe_task_ctx_entry_rx_only rx_wr_only;
- u32 reserved[4];
-};
-/*
- * FCoE connection enable\disable params passed by driver to FW in FCoE enable ramrod
- */
-struct fcoe_conn_enable_disable_ramrod_params {
- struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe;
-};
-/*
- * FCoE connection offload params passed by driver to FW in FCoE offload ramrod
- */
-struct fcoe_conn_offload_ramrod_params {
- struct fcoe_kwqe_conn_offload1 offload_kwqe1;
- struct fcoe_kwqe_conn_offload2 offload_kwqe2;
- struct fcoe_kwqe_conn_offload3 offload_kwqe3;
- struct fcoe_kwqe_conn_offload4 offload_kwqe4;
+struct iscsi_task_context_entry_xuc_c_write_only {
+ u32 total_data_acked;
};
-/*
- * FCoE init params passed by driver to FW in FCoE init ramrod
- */
-struct fcoe_init_ramrod_params {
- struct fcoe_kwqe_init1 init_kwqe1;
- struct fcoe_kwqe_init2 init_kwqe2;
- struct fcoe_kwqe_init3 init_kwqe3;
- struct regpair eq_addr;
- struct regpair eq_next_page_addr;
+struct iscsi_task_context_r2t_table_entry {
+ u32 ttt;
+ u32 desired_data_len;
+};
+
+struct iscsi_task_context_entry_xuc_u_write_only {
+ u32 exp_r2t_sn;
+ struct iscsi_task_context_r2t_table_entry r2t_table[4];
#if defined(__BIG_ENDIAN)
- u16 sb_num;
- u16 eq_prod;
+ u16 data_in_count;
+ u8 cq_id;
+ u8 valid_1b;
#elif defined(__LITTLE_ENDIAN)
- u16 eq_prod;
- u16 sb_num;
+ u8 valid_1b;
+ u8 cq_id;
+ u16 data_in_count;
#endif
+};
+
+struct iscsi_task_context_entry_xuc {
+ struct iscsi_task_context_entry_xuc_c_write_only write_c;
+ u32 exp_data_transfer_len;
+ struct iscsi_task_context_entry_xuc_x_write_only write_x;
+ u32 lun_lo;
+ struct iscsi_task_context_entry_xuc_xu_write_both write_xu;
+ u32 lun_hi;
+ struct iscsi_task_context_entry_xuc_u_write_only write_u;
+};
+
+struct iscsi_task_context_entry_u {
+ u32 exp_r2t_buff_offset;
+ u32 rem_rcv_len;
+ u32 exp_data_sn;
+};
+
+struct iscsi_task_context_entry {
+ struct iscsi_task_context_entry_x tce_x;
#if defined(__BIG_ENDIAN)
- u16 reserved1;
- u8 reserved0;
- u8 sb_id;
+ u16 data_out_count;
+ u16 rsrv0;
#elif defined(__LITTLE_ENDIAN)
- u8 sb_id;
- u8 reserved0;
- u16 reserved1;
+ u16 rsrv0;
+ u16 data_out_count;
#endif
+ struct iscsi_task_context_entry_xuc tce_xuc;
+ struct iscsi_task_context_entry_u tce_u;
+ u32 rsrv1[7];
};
-/*
- * FCoE statistics params buffer passed by driver to FW in FCoE statistics ramrod
- */
-struct fcoe_stat_ramrod_params {
- struct fcoe_kwqe_stat stat_kwqe;
-};
-/*
- * FCoE 16-bits vlan structure
- */
-struct fcoe_vlan_fields {
- u16 fields;
-#define FCOE_VLAN_FIELDS_VID (0xFFF<<0)
-#define FCOE_VLAN_FIELDS_VID_SHIFT 0
-#define FCOE_VLAN_FIELDS_CLI (0x1<<12)
-#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
-#define FCOE_VLAN_FIELDS_PRI (0x7<<13)
-#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
-};
-/*
- * FCoE 16-bits vlan union
- */
-union fcoe_vlan_field_union {
- struct fcoe_vlan_fields fields;
- u16 val;
-};
-/*
- * Parameters used for Class 2 verifications
- */
-struct ustorm_fcoe_c2_params {
-#if defined(__BIG_ENDIAN)
- u16 e2e_credit;
- u16 con_seq;
-#elif defined(__LITTLE_ENDIAN)
- u16 con_seq;
- u16 e2e_credit;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 ackq_prod;
- u16 open_seq_per_exch;
-#elif defined(__LITTLE_ENDIAN)
- u16 open_seq_per_exch;
- u16 ackq_prod;
-#endif
- struct regpair ackq_pbl_base;
- struct regpair ackq_cur_seg;
-};
-/*
- * Parameters used for Class 2 verifications
- */
-struct xstorm_fcoe_c2_params {
-#if defined(__BIG_ENDIAN)
- u16 reserved0;
- u8 ackq_x_prod;
- u8 max_conc_seqs_c2;
-#elif defined(__LITTLE_ENDIAN)
- u8 max_conc_seqs_c2;
- u8 ackq_x_prod;
- u16 reserved0;
-#endif
- struct regpair ackq_pbl_base;
- struct regpair ackq_cur_seg;
+struct iscsi_task_context_entry_xuc_x_init_only {
+ struct regpair lun;
+ u32 exp_data_transfer_len;
};
-/*
- * Buffer per connection, used in Tstorm
- */
-struct iscsi_conn_buf {
- struct regpair reserved[8];
-};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
/*
* ipv6 structure
@@ -4379,6 +5067,8 @@ struct ip_v6_addr {
u32 ip_addr_hi_hi;
};
+
+
/*
* l5cm- connection identification params
*/
@@ -4460,8 +5150,7 @@ struct l5cm_xstorm_conn_buffer {
* l5cm-tstorm connection buffer
*/
struct l5cm_tstorm_conn_buffer {
- u32 snd_buf;
- u32 rcv_buf;
+ u32 rsrv1[2];
#if defined(__BIG_ENDIAN)
u16 params;
#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
@@ -4493,6 +5182,72 @@ struct l5cm_active_conn_buffer {
struct l5cm_tstorm_conn_buffer tstorm_conn_buffer;
};
+
+
+/*
+ * The l5cm opaque buffer passed in add new connection ramrod passive side
+ */
+struct l5cm_hash_input_string {
+ u32 __opaque1;
+#if defined(__BIG_ENDIAN)
+ u16 __opaque3;
+ u16 __opaque2;
+#elif defined(__LITTLE_ENDIAN)
+ u16 __opaque2;
+ u16 __opaque3;
+#endif
+ struct ip_v6_addr __opaque4;
+ struct ip_v6_addr __opaque5;
+ u32 __opaque6;
+ u32 __opaque7[5];
+};
+
+
+/*
+ * syn cookie component
+ */
+struct l5cm_syn_cookie_comp {
+ u32 __opaque;
+};
+
+/*
+ * data related to listeners of a TCP port
+ */
+struct l5cm_port_listener_data {
+ u8 params;
+#define L5CM_PORT_LISTENER_DATA_ENABLE (0x1<<0)
+#define L5CM_PORT_LISTENER_DATA_ENABLE_SHIFT 0
+#define L5CM_PORT_LISTENER_DATA_IP_INDEX (0xF<<1)
+#define L5CM_PORT_LISTENER_DATA_IP_INDEX_SHIFT 1
+#define L5CM_PORT_LISTENER_DATA_NET_FILTER (0x1<<5)
+#define L5CM_PORT_LISTENER_DATA_NET_FILTER_SHIFT 5
+#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE (0x1<<6)
+#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE_SHIFT 6
+#define L5CM_PORT_LISTENER_DATA_MPA_MODE (0x1<<7)
+#define L5CM_PORT_LISTENER_DATA_MPA_MODE_SHIFT 7
+};
+
+/*
+ * Opaque structure passed from U to X when final ack arrives
+ */
+struct l5cm_opaque_buf {
+ u32 __opaque1;
+ u32 __opaque2;
+ u32 __opaque3;
+ u32 __opaque4;
+ struct l5cm_syn_cookie_comp __opaque5;
+#if defined(__BIG_ENDIAN)
+ u16 rsrv2;
+ u8 rsrv;
+ struct l5cm_port_listener_data __opaque6;
+#elif defined(__LITTLE_ENDIAN)
+ struct l5cm_port_listener_data __opaque6;
+ u8 rsrv;
+ u16 rsrv2;
+#endif
+};
+
+
/*
* l5cm slow path element
*/
@@ -4501,6 +5256,109 @@ struct l5cm_packet_size {
u32 rsrv;
};
+
+/*
+ * The final-ack union structure in PCS entry after final ack arrived
+ */
+struct l5cm_pcse_ack {
+ struct l5cm_xstorm_conn_buffer tx_socket_params;
+ struct l5cm_opaque_buf opaque_buf;
+ struct l5cm_tstorm_conn_buffer rx_socket_params;
+};
+
+
+/*
+ * The syn union structure in PCS entry after syn arrived
+ */
+struct l5cm_pcse_syn {
+ struct l5cm_opaque_buf opaque_buf;
+ u32 rsrv[12];
+};
+
+
+/*
+ * pcs entry data for passive connections
+ */
+struct l5cm_pcs_attributes {
+#if defined(__BIG_ENDIAN)
+ u16 pcs_id;
+ u8 status;
+ u8 flags;
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0)
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1)
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2)
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3)
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4)
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5)
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6)
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6
+#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7)
+#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+ u8 flags;
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0)
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1)
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2)
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3)
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4)
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5)
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6)
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6
+#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7)
+#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7
+ u8 status;
+ u16 pcs_id;
+#endif
+};
+
+
+union l5cm_seg_params {
+ struct l5cm_pcse_syn syn_seg_params;
+ struct l5cm_pcse_ack ack_seg_params;
+};
+
+/*
+ * pcs entry data for passive connections
+ */
+struct l5cm_pcs_hdr {
+ struct l5cm_hash_input_string hash_input_string;
+ struct l5cm_conn_addr_params conn_addr_buf;
+ u32 cid;
+ u32 hash_result;
+ union l5cm_seg_params seg_params;
+ struct l5cm_pcs_attributes att;
+#if defined(__BIG_ENDIAN)
+ u16 rsrv;
+ u16 rx_seg_size;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_seg_size;
+ u16 rsrv;
+#endif
+};
+
+/*
+ * pcs entry for passive connections
+ */
+struct l5cm_pcs_entry {
+ struct l5cm_pcs_hdr hdr;
+ u8 rx_segment[1516];
+};
+
+
+
+
/*
* l5cm connection parameters
*/
@@ -4535,6 +5393,29 @@ struct l5cm_spe {
union l5cm_specific_data data;
};
+
+
+
+/*
+ * Termination variables
+ */
+struct l5cm_term_vars {
+ u8 BitMap;
+#define L5CM_TERM_VARS_TCP_STATE (0xF<<0)
+#define L5CM_TERM_VARS_TCP_STATE_SHIFT 0
+#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
+#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
+#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
+#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
+#define L5CM_TERM_VARS_TERM_ON_CHIP (0x1<<6)
+#define L5CM_TERM_VARS_TERM_ON_CHIP_SHIFT 6
+#define L5CM_TERM_VARS_RSRV (0x1<<7)
+#define L5CM_TERM_VARS_RSRV_SHIFT 7
+};
+
+
+
+
/*
* Tstorm Tcp flags
*/
@@ -4550,6 +5431,7 @@ struct tstorm_l5cm_tcp_flags {
#define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14
};
+
/*
* Xstorm Tcp flags
*/
@@ -4565,4 +5447,38 @@ struct xstorm_l5cm_tcp_flags {
#define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3
};
-#endif /* CNIC_DEFS_H */
+
+
+/*
+ * Out-of-order states
+ */
+enum tcp_ooo_event {
+ TCP_EVENT_ADD_PEN = 0,
+ TCP_EVENT_ADD_NEW_ISLE = 1,
+ TCP_EVENT_ADD_ISLE_RIGHT = 2,
+ TCP_EVENT_ADD_ISLE_LEFT = 3,
+ TCP_EVENT_JOIN = 4,
+ TCP_EVENT_NOP = 5,
+ MAX_TCP_OOO_EVENT
+};
+
+
+/*
+ * OOO support modes
+ */
+enum tcp_tstorm_ooo {
+ TCP_TSTORM_OOO_DROP_AND_PROC_ACK = 0,
+ TCP_TSTORM_OOO_SEND_PURE_ACK = 1,
+ TCP_TSTORM_OOO_SUPPORTED = 2,
+ MAX_TCP_TSTORM_OOO
+};
+
+
+
+
+
+
+
+
+
+#endif /* __5710_HSI_CNIC_LE__ */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index fdd8e46a905..79443e0dbf9 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.2.14"
-#define CNIC_MODULE_RELDATE "Mar 30, 2011"
+#define CNIC_MODULE_VERSION "2.5.7"
+#define CNIC_MODULE_RELDATE "July 20, 2011"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -99,6 +99,8 @@ struct kcqe {
struct cnic_ctl_completion {
u32 cid;
+ u8 opcode;
+ u8 error;
};
struct cnic_ctl_info {
@@ -169,7 +171,7 @@ struct cnic_eth_dev {
struct pci_dev *pdev;
void __iomem *io_base;
void __iomem *io_base2;
- void *iro_arr;
+ const void *iro_arr;
u32 ctx_tbl_offset;
u32 ctx_tbl_len;
@@ -179,6 +181,11 @@ struct cnic_eth_dev {
u32 max_fcoe_conn;
u32 max_rdma_conn;
u32 fcoe_init_cid;
+ u32 fcoe_wwn_port_name_hi;
+ u32 fcoe_wwn_port_name_lo;
+ u32 fcoe_wwn_node_name_hi;
+ u32 fcoe_wwn_node_name_lo;
+
u16 iscsi_l2_client_id;
u16 iscsi_l2_cid;
u8 iscsi_mac[ETH_ALEN];
@@ -311,7 +318,7 @@ struct cnic_ulp_ops {
void (*cnic_stop)(void *ulp_ctx);
void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
u32 num_cqes);
- void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
+ void (*indicate_netevent)(void *ulp_ctx, unsigned long event, u16 vid);
void (*cm_connect_complete)(struct cnic_sock *);
void (*cm_close_complete)(struct cnic_sock *);
void (*cm_abort_complete)(struct cnic_sock *);
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index fec939f8f65..e0638cb4b07 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
@@ -39,7 +40,7 @@
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/gpio.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 7300de5a142..8b395b53733 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -45,7 +45,6 @@
#include "t3cdev.h"
#include <asm/io.h>
-struct vlan_group;
struct adapter;
struct sge_qset;
struct port_info;
@@ -66,7 +65,6 @@ struct iscsi_config {
struct port_info {
struct adapter *adapter;
- struct vlan_group *vlan_grp;
struct sge_qset *qs;
u8 port_id;
u8 nqsets;
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 056ee8c831f..df01b634324 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -367,7 +367,6 @@ struct vpd_params {
struct pci_params {
unsigned int vpd_cap_addr;
- unsigned int pcie_cap_addr;
unsigned short speed;
unsigned char width;
unsigned char variant;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 9081ce03714..93b41a7ac17 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2532,25 +2532,51 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
}
}
-static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+static void cxgb_vlan_mode(struct net_device *dev, u32 features)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- pi->vlan_grp = grp;
- if (adapter->params.rev > 0)
- t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
- else {
+ if (adapter->params.rev > 0) {
+ t3_set_vlan_accel(adapter, 1 << pi->port_id,
+ features & NETIF_F_HW_VLAN_RX);
+ } else {
/* single control for all ports */
- unsigned int i, have_vlans = 0;
+ unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
+
for_each_port(adapter, i)
- have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
+ have_vlans |=
+ adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
t3_set_vlan_accel(adapter, 1, have_vlans);
}
t3_synchronize_rx(adapter, pi);
}
+static u32 cxgb_fix_features(struct net_device *dev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int cxgb_set_features(struct net_device *dev, u32 features)
+{
+ u32 changed = dev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ cxgb_vlan_mode(dev, features);
+
+ return 0;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
@@ -3131,7 +3157,8 @@ static const struct net_device_ops cxgb_netdev_ops = {
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
.ndo_set_mac_address = cxgb_set_mac_addr,
- .ndo_vlan_rx_register = vlan_rx_register,
+ .ndo_fix_features = cxgb_fix_features,
+ .ndo_set_features = cxgb_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
@@ -3263,9 +3290,8 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1;
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO | NETIF_F_RXCSUM;
- netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
@@ -3329,6 +3355,9 @@ static int __devinit init_one(struct pci_dev *pdev,
err = sysfs_create_group(&adapter->port[0]->dev.kobj,
&cxgb3_attr_group);
+ for_each_port(adapter, i)
+ cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features);
+
print_port_info(adapter, ai);
return 0;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 862804f32b6..805076c54f1 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -34,7 +34,7 @@
#include <linux/slab.h>
#include <net/neighbour.h>
#include <linux/notifier.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/proc_fs.h>
#include <linux/if_vlan.h>
#include <net/netevent.h>
@@ -176,16 +176,13 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
int i;
for_each_port(adapter, i) {
- struct vlan_group *grp;
struct net_device *dev = adapter->port[i];
- const struct port_info *p = netdev_priv(dev);
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
if (vlan && vlan != VLAN_VID_MASK) {
- grp = p->vlan_grp;
- dev = NULL;
- if (grp)
- dev = vlan_group_get_device(grp, vlan);
+ rcu_read_lock();
+ dev = __vlan_find_dev_deep(dev, vlan);
+ rcu_read_unlock();
} else if (netif_is_bond_slave(dev)) {
while (dev->master)
dev = dev->master;
@@ -567,7 +564,7 @@ static void t3_process_tid_release_list(struct work_struct *work)
while (td->tid_release_list) {
struct t3c_tid_entry *p = td->tid_release_list;
- td->tid_release_list = (struct t3c_tid_entry *)p->ctx;
+ td->tid_release_list = p->ctx;
spin_unlock_bh(&td->tid_release_lock);
skb = alloc_skb(sizeof(struct cpl_tid_release),
@@ -971,7 +968,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
cxgb_redirect(nr->old, nr->new);
- cxgb_neigh_update(nr->new->neighbour);
+ cxgb_neigh_update(dst_get_neighbour(nr->new));
break;
}
default:
@@ -1116,8 +1113,8 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
struct l2t_entry *e;
struct t3c_tid_entry *te;
- olddev = old->neighbour->dev;
- newdev = new->neighbour->dev;
+ olddev = dst_get_neighbour(old)->dev;
+ newdev = dst_get_neighbour(new)->dev;
if (!is_offloading(olddev))
return;
if (!is_offloading(newdev)) {
@@ -1134,7 +1131,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
}
/* Add new L2T entry */
- e = t3_l2t_get(tdev, new->neighbour, newdev);
+ e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
if (!e) {
printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
__func__);
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
index fd3eb07e3f4..7a12d52ed4f 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/cxgb3/l2t.h
@@ -34,7 +34,7 @@
#include <linux/spinlock.h>
#include "t3cdev.h"
-#include <asm/atomic.h>
+#include <linux/atomic.h>
enum {
L2T_STATE_VALID, /* entry is up to date */
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 3f562ba2f0c..d6fa1777a34 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2026,30 +2026,13 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
skb_checksum_none_assert(skb);
- skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
-
- if (unlikely(p->vlan_valid)) {
- struct vlan_group *grp = pi->vlan_grp;
+ skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
+ if (p->vlan_valid) {
qs->port_stats[SGE_PSTAT_VLANEX]++;
- if (likely(grp))
- if (lro)
- vlan_gro_receive(&qs->napi, grp,
- ntohs(p->vlan), skb);
- else {
- if (unlikely(pi->iscsic.flags)) {
- unsigned short vtag = ntohs(p->vlan) &
- VLAN_VID_MASK;
- skb->dev = vlan_group_get_device(grp,
- vtag);
- cxgb3_process_iscsi_prov_pack(pi, skb);
- }
- __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
- rq->polling);
- }
- else
- dev_kfree_skb_any(skb);
- } else if (rq->polling) {
+ __vlan_hwaccel_put_tag(skb, ntohs(p->vlan));
+ }
+ if (rq->polling) {
if (lro)
napi_gro_receive(&qs->napi, skb);
else {
@@ -2145,16 +2128,10 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
if (!complete)
return;
- skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
-
- if (unlikely(cpl->vlan_valid)) {
- struct vlan_group *grp = pi->vlan_grp;
+ skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
- if (likely(grp != NULL)) {
- vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan));
- return;
- }
- }
+ if (cpl->vlan_valid)
+ __vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
napi_gro_frags(&qs->napi);
}
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index c688421da9c..44ac2f40b64 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -3290,22 +3290,20 @@ static void config_pcie(struct adapter *adap)
unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
pci_read_config_word(adap->pdev,
- adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
+ adap->pdev->pcie_cap + PCI_EXP_DEVCTL,
&val);
pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
pci_read_config_word(adap->pdev, 0x2, &devid);
if (devid == 0x37) {
pci_write_config_word(adap->pdev,
- adap->params.pci.pcie_cap_addr +
- PCI_EXP_DEVCTL,
+ adap->pdev->pcie_cap + PCI_EXP_DEVCTL,
val & ~PCI_EXP_DEVCTL_READRQ &
~PCI_EXP_DEVCTL_PAYLOAD);
pldsize = 0;
}
- pci_read_config_word(adap->pdev,
- adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
+ pci_read_config_word(adap->pdev, adap->pdev->pcie_cap + PCI_EXP_LNKCTL,
&val);
fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
@@ -3429,12 +3427,11 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
static unsigned short speed_map[] = { 33, 66, 100, 133 };
u32 pci_mode, pcie_cap;
- pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ pcie_cap = pci_pcie_cap(adapter->pdev);
if (pcie_cap) {
u16 val;
p->variant = PCI_VARIANT_PCIE;
- p->pcie_cap_addr = pcie_cap;
pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
&val);
p->width = (val >> 4) & 0x3f;
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
index be55e9ae74d..705713b5663 100644
--- a/drivers/net/cxgb3/t3cdev.h
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -33,7 +33,7 @@
#define _T3CDEV_H_
#include <linux/list.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index bc9982a4c1f..223a7f72343 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -276,7 +276,6 @@ enum {
};
struct adapter;
-struct vlan_group;
struct sge_rspq;
struct port_info {
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 7e3cfbe89e3..c9957b7f17b 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -3704,7 +3704,7 @@ static int __devinit init_one(struct pci_dev *pdev,
if (err) {
dev_warn(&pdev->dev, "only %d net devices registered\n", i);
err = 0;
- };
+ }
if (cxgb4_debugfs_root) {
adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
index 1b48c017014..b1d39b8d141 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -38,7 +38,7 @@
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/* CPL message priority levels */
enum {
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
index 7bd8f42378f..02b31d0c641 100644
--- a/drivers/net/cxgb4/l2t.h
+++ b/drivers/net/cxgb4/l2t.h
@@ -37,7 +37,7 @@
#include <linux/spinlock.h>
#include <linux/if_ether.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct adapter;
struct l2t_data;
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 4fd821aadc8..594334d5c71 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -40,6 +40,7 @@
#ifndef __CXGB4VF_ADAPTER_H__
#define __CXGB4VF_ADAPTER_H__
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
@@ -91,7 +92,6 @@ struct sge_rspq;
*/
struct port_info {
struct adapter *adapter; /* our adapter */
- struct vlan_group *vlan_grp; /* out VLAN group */
u16 viid; /* virtual interface ID */
s16 xact_addr_filt; /* index of our MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index e71c08e547e..ec799139dfe 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -33,7 +33,6 @@
* SOFTWARE.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -210,18 +209,8 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
* ======================
*/
-/*
- * Record our new VLAN Group and enable/disable hardware VLAN Tag extraction
- * based on whether the specified VLAN Group pointer is NULL or not.
- */
-static void cxgb4vf_vlan_rx_register(struct net_device *dev,
- struct vlan_group *grp)
-{
- struct port_info *pi = netdev_priv(dev);
- pi->vlan_grp = grp;
- t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1, grp != NULL, 0);
-}
+
/*
* Perform the MAC and PHY actions needed to enable a "port" (Virtual
@@ -234,9 +223,9 @@ static int link_start(struct net_device *dev)
/*
* We do not set address filters and promiscuity here, the stack does
- * that step explicitly.
+ * that step explicitly. Enable vlan accel.
*/
- ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, -1,
+ ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
true);
if (ret == 0) {
ret = t4vf_change_mac(pi->adapter, pi->viid,
@@ -1103,6 +1092,32 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
+static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int cxgb4vf_set_features(struct net_device *dev, u32 features)
+{
+ struct port_info *pi = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
+ features & NETIF_F_HW_VLAN_TX, 0);
+
+ return 0;
+}
+
/*
* Change the devices MAC address.
*/
@@ -2422,7 +2437,6 @@ static int __devinit enable_msix(struct adapter *adapter)
return err;
}
-#ifdef HAVE_NET_DEVICE_OPS
static const struct net_device_ops cxgb4vf_netdev_ops = {
.ndo_open = cxgb4vf_open,
.ndo_stop = cxgb4vf_stop,
@@ -2433,12 +2447,12 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb4vf_do_ioctl,
.ndo_change_mtu = cxgb4vf_change_mtu,
- .ndo_vlan_rx_register = cxgb4vf_vlan_rx_register,
+ .ndo_fix_features = cxgb4vf_fix_features,
+ .ndo_set_features = cxgb4vf_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb4vf_poll_controller,
#endif
};
-#endif
/*
* "Probe" a device: initialize a device and construct all kernel and driver
@@ -2603,31 +2617,15 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
+ NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_HIGHDMA;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_RX;
+ netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_TX;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
-#ifdef HAVE_NET_DEVICE_OPS
netdev->netdev_ops = &cxgb4vf_netdev_ops;
-#else
- netdev->vlan_rx_register = cxgb4vf_vlan_rx_register;
- netdev->open = cxgb4vf_open;
- netdev->stop = cxgb4vf_stop;
- netdev->hard_start_xmit = t4vf_eth_xmit;
- netdev->get_stats = cxgb4vf_get_stats;
- netdev->set_rx_mode = cxgb4vf_set_rxmode;
- netdev->do_ioctl = cxgb4vf_do_ioctl;
- netdev->change_mtu = cxgb4vf_change_mtu;
- netdev->set_mac_address = cxgb4vf_set_mac_addr;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- netdev->poll_controller = cxgb4vf_poll_controller;
-#endif
-#endif
SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
/*
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index 5fd75fdaa63..cffb328c46c 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -1491,20 +1491,10 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
- if (unlikely(pkt->vlan_ex)) {
- struct port_info *pi = netdev_priv(rxq->rspq.netdev);
- struct vlan_group *grp = pi->vlan_grp;
-
- rxq->stats.vlan_ex++;
- if (likely(grp)) {
- ret = vlan_gro_frags(&rxq->rspq.napi, grp,
- be16_to_cpu(pkt->vlan));
- goto stats;
- }
- }
+ if (pkt->vlan_ex)
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
ret = napi_gro_frags(&rxq->rspq.napi);
-stats:
if (ret == GRO_HELD)
rxq->stats.lro_pkts++;
else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
@@ -1525,7 +1515,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct sk_buff *skb;
- struct port_info *pi;
const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
@@ -1553,7 +1542,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
__skb_pull(skb, PKTSHIFT);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
- pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
@@ -1569,20 +1557,12 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
} else
skb_checksum_none_assert(skb);
- /*
- * Deliver the packet to the stack.
- */
- if (unlikely(pkt->vlan_ex)) {
- struct vlan_group *grp = pi->vlan_grp;
-
+ if (pkt->vlan_ex) {
rxq->stats.vlan_ex++;
- if (likely(grp))
- vlan_hwaccel_receive_skb(skb, grp,
- be16_to_cpu(pkt->vlan));
- else
- dev_kfree_skb_any(skb);
- } else
- netif_receive_skb(skb);
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+ }
+
+ netif_receive_skb(skb);
return 0;
}
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 192db226ec7..fe3fd3dad6f 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -33,7 +33,6 @@
* SOFTWARE.
*/
-#include <linux/version.h>
#include <linux/pci.h>
#include "t4vf_common.h"
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c
index ae47f23ba93..dca9d3369cd 100644
--- a/drivers/net/davinci_cpdma.c
+++ b/drivers/net/davinci_cpdma.c
@@ -167,7 +167,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
} else {
pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
GFP_KERNEL);
- pool->iomap = (void __force __iomem *)pool->cpumap;
+ pool->iomap = pool->cpumap;
pool->hw_addr = pool->phys;
}
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index dcc4a170b0f..3f451e4d836 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -48,7 +48,6 @@
#include <linux/highmem.h>
#include <linux/proc_fs.h>
#include <linux/ctype.h>
-#include <linux/version.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
@@ -1083,6 +1082,8 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
goto fail_tx;
}
+ skb_tx_timestamp(skb);
+
ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
GFP_KERNEL);
if (unlikely(ret_code != 0)) {
@@ -1489,14 +1490,14 @@ static void emac_adjust_link(struct net_device *ndev)
*/
static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
{
- dev_warn(&ndev->dev, "DaVinci EMAC: ioctl not supported\n");
+ struct emac_priv *priv = netdev_priv(ndev);
if (!(netif_running(ndev)))
return -EINVAL;
/* TODO: Add phy read and write and private statistics get feature */
- return -EOPNOTSUPP;
+ return phy_mii_ioctl(priv->phydev, ifrq, cmd);
}
static int match_first_device(struct device *dev, void *data)
@@ -1821,7 +1822,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
}
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
- size = res->end - res->start + 1;
+ size = resource_size(res);
if (!request_mem_region(res->start, size, ndev->name)) {
dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
rc = -ENXIO;
@@ -1926,7 +1927,7 @@ no_irq_res:
cpdma_ctlr_destroy(priv->dma);
no_dma:
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
iounmap(priv->remap_addr);
probe_quit:
@@ -1960,7 +1961,7 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
cpdma_chan_destroy(priv->rxchan);
cpdma_ctlr_destroy(priv->dma);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
unregister_netdev(ndev);
iounmap(priv->remap_addr);
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 219eb5ad5c1..d5598f6584a 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -326,15 +326,18 @@ static void load_csrs(struct lance_private *lp)
*/
static void cp_to_buf(const int type, void *to, const void *from, int len)
{
- unsigned short *tp, *fp, clen;
- unsigned char *rtp, *rfp;
+ unsigned short *tp;
+ const unsigned short *fp;
+ unsigned short clen;
+ unsigned char *rtp;
+ const unsigned char *rfp;
if (type == PMAD_LANCE) {
memcpy(to, from, len);
} else if (type == PMAX_LANCE) {
clen = len >> 1;
- tp = (unsigned short *) to;
- fp = (unsigned short *) from;
+ tp = to;
+ fp = from;
while (clen--) {
*tp++ = *fp++;
@@ -342,8 +345,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
}
clen = len & 1;
- rtp = (unsigned char *) tp;
- rfp = (unsigned char *) fp;
+ rtp = tp;
+ rfp = fp;
while (clen--) {
*rtp++ = *rfp++;
}
@@ -352,8 +355,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
* copy 16 Byte chunks
*/
clen = len >> 4;
- tp = (unsigned short *) to;
- fp = (unsigned short *) from;
+ tp = to;
+ fp = from;
while (clen--) {
*tp++ = *fp++;
*tp++ = *fp++;
@@ -382,15 +385,18 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
static void cp_from_buf(const int type, void *to, const void *from, int len)
{
- unsigned short *tp, *fp, clen;
- unsigned char *rtp, *rfp;
+ unsigned short *tp;
+ const unsigned short *fp;
+ unsigned short clen;
+ unsigned char *rtp;
+ const unsigned char *rfp;
if (type == PMAD_LANCE) {
memcpy(to, from, len);
} else if (type == PMAX_LANCE) {
clen = len >> 1;
- tp = (unsigned short *) to;
- fp = (unsigned short *) from;
+ tp = to;
+ fp = from;
while (clen--) {
*tp++ = *fp++;
fp++;
@@ -398,8 +404,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
clen = len & 1;
- rtp = (unsigned char *) tp;
- rfp = (unsigned char *) fp;
+ rtp = tp;
+ rfp = fp;
while (clen--) {
*rtp++ = *rfp++;
@@ -410,8 +416,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
* copy 16 Byte chunks
*/
clen = len >> 4;
- tp = (unsigned short *) to;
- fp = (unsigned short *) from;
+ tp = to;
+ fp = from;
while (clen--) {
*tp++ = *fp++;
*tp++ = *fp++;
@@ -940,7 +946,6 @@ static void lance_load_multicast(struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
struct netdev_hw_addr *ha;
- char *addrs;
u32 crc;
/* set all multicast bits */
@@ -959,13 +964,7 @@ static void lance_load_multicast(struct net_device *dev)
/* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- /* multicast address? */
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(ETH_ALEN, addrs);
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
crc = crc >> 26;
*lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
}
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 8b0084d17c8..f2015a85197 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -708,11 +708,11 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
/* Tx & Rx descriptors (aligned to a quadword boundary) */
offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN;
- lp->rx_ring = (struct depca_rx_desc __iomem *) (lp->sh_mem + offset);
+ lp->rx_ring = lp->sh_mem + offset;
lp->rx_ring_offset = offset;
offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
- lp->tx_ring = (struct depca_tx_desc __iomem *) (lp->sh_mem + offset);
+ lp->tx_ring = lp->sh_mem + offset;
lp->tx_ring_offset = offset;
offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
@@ -1073,13 +1073,13 @@ static int depca_rx(struct net_device *dev)
i = DEPCA_PKT_STAT_SZ;
}
}
- if (buf[0] & 0x01) { /* Multicast/Broadcast */
- if ((*(s16 *) & buf[0] == -1) && (*(s16 *) & buf[2] == -1) && (*(s16 *) & buf[4] == -1)) {
+ if (is_multicast_ether_addr(buf)) {
+ if (is_broadcast_ether_addr(buf)) {
lp->pktStats.broadcast++;
} else {
lp->pktStats.multicast++;
}
- } else if ((*(s16 *) & buf[0] == *(s16 *) & dev->dev_addr[0]) && (*(s16 *) & buf[2] == *(s16 *) & dev->dev_addr[2]) && (*(s16 *) & buf[4] == *(s16 *) & dev->dev_addr[4])) {
+ } else if (compare_ether_addr(buf, dev->dev_addr) == 0) {
lp->pktStats.unicast++;
}
@@ -1270,7 +1270,6 @@ static void SetMulticastFilter(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
struct netdev_hw_addr *ha;
- char *addrs;
int i, j, bit, byte;
u16 hashcode;
u32 crc;
@@ -1285,19 +1284,15 @@ static void SetMulticastFilter(struct net_device *dev)
}
/* Add multicast addresses */
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
- if ((*addrs & 0x01) == 1) { /* multicast address? */
- crc = ether_crc(ETH_ALEN, addrs);
- hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
- for (j = 0; j < 5; j++) { /* ... in reverse order. */
- hashcode = (hashcode << 1) | ((crc >>= 1) & 1);
- }
-
-
- byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
- bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
- lp->init_block.mcast_table[byte] |= bit;
+ crc = ether_crc(ETH_ALEN, ha->addr);
+ hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
+ for (j = 0; j < 5; j++) { /* ... in reverse order. */
+ hashcode = (hashcode << 1) | ((crc >>= 1) & 1);
}
+
+ byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+ lp->init_block.mcast_table[byte] |= bit;
}
}
}
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 23179dbcedd..ed73e4a9350 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -221,13 +221,13 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_iounmap;
- np->tx_ring = (struct netdev_desc *) ring_space;
+ np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
- np->rx_ring = (struct netdev_desc *) ring_space;
+ np->rx_ring = ring_space;
np->rx_ring_dma = ring_dma;
/* Parse eeprom data */
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index ee597e676ee..8ef31dc4704 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -24,6 +24,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/crc32.h>
@@ -534,21 +535,35 @@ static int dm9000_set_eeprom(struct net_device *dev,
board_info_t *dm = to_dm9000_board(dev);
int offset = ee->offset;
int len = ee->len;
- int i;
+ int done;
/* EEPROM access is aligned to two bytes */
- if ((len & 1) != 0 || (offset & 1) != 0)
- return -EINVAL;
-
if (dm->flags & DM9000_PLATF_NO_EEPROM)
return -ENOENT;
if (ee->magic != DM_EEPROM_MAGIC)
return -EINVAL;
- for (i = 0; i < len; i += 2)
- dm9000_write_eeprom(dm, (offset + i) / 2, data + i);
+ while (len > 0) {
+ if (len & 1 || offset & 1) {
+ int which = offset & 1;
+ u8 tmp[2];
+
+ dm9000_read_eeprom(dm, offset / 2, tmp);
+ tmp[which] = *data;
+ dm9000_write_eeprom(dm, offset / 2, tmp);
+
+ done = 1;
+ } else {
+ dm9000_write_eeprom(dm, offset / 2, data);
+ done = 2;
+ }
+
+ data += done;
+ offset += done;
+ len -= done;
+ }
return 0;
}
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 8318ea06cb6..c1063d1540c 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
@@ -587,6 +588,8 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dnet_writel(bp, irq_enable, INTR_ENB);
}
+ skb_tx_timestamp(skb);
+
/* free the buffer */
dev_kfree_skb(skb);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index e336c7937f0..c1352c60c29 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -149,6 +149,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 8676899120c..24f41da8c4b 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -215,7 +215,7 @@ struct e1000_adapter {
struct timer_list tx_fifo_stall_timer;
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
- struct vlan_group *vlgrp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 mng_vlan_id;
u32 bd_number;
u32 rx_buffer_len;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index ec0fa426cce..5548d464261 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -290,69 +290,6 @@ static int e1000_set_pauseparam(struct net_device *netdev,
return retval;
}
-static u32 e1000_get_rx_csum(struct net_device *netdev)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- return adapter->rx_csum;
-}
-
-static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- adapter->rx_csum = data;
-
- if (netif_running(netdev))
- e1000_reinit_locked(adapter);
- else
- e1000_reset(adapter);
- return 0;
-}
-
-static u32 e1000_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_HW_CSUM) != 0;
-}
-
-static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- if (hw->mac_type < e1000_82543) {
- if (!data)
- return -EINVAL;
- return 0;
- }
-
- if (data)
- netdev->features |= NETIF_F_HW_CSUM;
- else
- netdev->features &= ~NETIF_F_HW_CSUM;
-
- return 0;
-}
-
-static int e1000_set_tso(struct net_device *netdev, u32 data)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- if ((hw->mac_type < e1000_82544) ||
- (hw->mac_type == e1000_82547))
- return data ? -EINVAL : 0;
-
- if (data)
- netdev->features |= NETIF_F_TSO;
- else
- netdev->features &= ~NETIF_F_TSO;
-
- netdev->features &= ~NETIF_F_TSO6;
-
- e_info(probe, "TSO is %s\n", data ? "Enabled" : "Disabled");
- adapter->tso_force = true;
- return 0;
-}
-
static u32 e1000_get_msglevel(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -901,6 +838,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ E1000_WRITE_FLUSH();
msleep(10);
/* Test each interrupt */
@@ -919,6 +857,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, mask);
ew32(ICS, mask);
+ E1000_WRITE_FLUSH();
msleep(10);
if (adapter->test_icr & mask) {
@@ -936,6 +875,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMS, mask);
ew32(ICS, mask);
+ E1000_WRITE_FLUSH();
msleep(10);
if (!(adapter->test_icr & mask)) {
@@ -953,6 +893,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, ~mask & 0x00007FFF);
ew32(ICS, ~mask & 0x00007FFF);
+ E1000_WRITE_FLUSH();
msleep(10);
if (adapter->test_icr) {
@@ -964,6 +905,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ E1000_WRITE_FLUSH();
msleep(10);
/* Unhook test interrupt handler */
@@ -1457,6 +1399,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
if (unlikely(++k == txdr->count)) k = 0;
}
ew32(TDT, k);
+ E1000_WRITE_FLUSH();
msleep(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
@@ -1905,12 +1848,6 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_ringparam = e1000_set_ringparam,
.get_pauseparam = e1000_get_pauseparam,
.set_pauseparam = e1000_set_pauseparam,
- .get_rx_csum = e1000_get_rx_csum,
- .set_rx_csum = e1000_set_rx_csum,
- .get_tx_csum = e1000_get_tx_csum,
- .set_tx_csum = e1000_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
- .set_tso = e1000_set_tso,
.self_test = e1000_diag_test,
.get_strings = e1000_get_strings,
.set_phys_id = e1000_set_phys_id,
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 7501d977d99..8545c7aa93e 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -446,6 +446,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
/* Must reset the PHY before resetting the MAC */
if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ E1000_WRITE_FLUSH();
msleep(5);
}
@@ -3080,7 +3081,6 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
u32 ctrl, ctrl_ext;
u32 led_ctrl;
- s32 ret_val;
e_dbg("e1000_phy_hw_reset");
@@ -3126,11 +3126,7 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
}
/* Wait for FW to finish PHY configuration. */
- ret_val = e1000_get_phy_cfg_done(hw);
- if (ret_val != E1000_SUCCESS)
- return ret_val;
-
- return ret_val;
+ return e1000_get_phy_cfg_done(hw);
}
/**
@@ -3757,6 +3753,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(1);
}
@@ -3829,6 +3826,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
eecd &= ~E1000_EECD_SK; /* Lower SCK */
ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
udelay(hw->eeprom.delay_usec);
} else if (hw->eeprom.type == e1000_eeprom_microwire) {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 76e8af00d86..f97afda941d 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -30,6 +30,8 @@
#include <net/ip6_checksum.h>
#include <linux/io.h>
#include <linux/prefetch.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
/* Intel Media SOC GbE MDIO physical base address */
static unsigned long ce4100_gbe_mdio_base_phy;
@@ -166,7 +168,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter);
static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
struct sk_buff *skb);
-static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
+static bool e1000_vlan_used(struct e1000_adapter *adapter);
+static void e1000_vlan_mode(struct net_device *netdev, u32 features);
static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
@@ -330,21 +333,24 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u16 vid = hw->mng_cookie.vlan_id;
u16 old_vid = adapter->mng_vlan_id;
- if (adapter->vlgrp) {
- if (!vlan_group_get_device(adapter->vlgrp, vid)) {
- if (hw->mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
- e1000_vlan_rx_add_vid(netdev, vid);
- adapter->mng_vlan_id = vid;
- } else
- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
- !vlan_group_get_device(adapter->vlgrp, old_vid))
- e1000_vlan_rx_kill_vid(netdev, old_vid);
- } else
+ if (!e1000_vlan_used(adapter))
+ return;
+
+ if (!test_bit(vid, adapter->active_vlans)) {
+ if (hw->mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
+ e1000_vlan_rx_add_vid(netdev, vid);
adapter->mng_vlan_id = vid;
+ } else {
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ }
+ if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
+ (vid != old_vid) &&
+ !test_bit(old_vid, adapter->active_vlans))
+ e1000_vlan_rx_kill_vid(netdev, old_vid);
+ } else {
+ adapter->mng_vlan_id = vid;
}
}
@@ -797,6 +803,41 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
}
}
+static u32 e1000_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int e1000_set_features(struct net_device *netdev, u32 features)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u32 changed = features ^ netdev->features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ e1000_vlan_mode(netdev, features);
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
+
+ if (netif_running(netdev))
+ e1000_reinit_locked(adapter);
+ else
+ e1000_reset(adapter);
+
+ return 0;
+}
+
static const struct net_device_ops e1000_netdev_ops = {
.ndo_open = e1000_open,
.ndo_stop = e1000_close,
@@ -804,17 +845,17 @@ static const struct net_device_ops e1000_netdev_ops = {
.ndo_get_stats = e1000_get_stats,
.ndo_set_rx_mode = e1000_set_rx_mode,
.ndo_set_mac_address = e1000_set_mac,
- .ndo_tx_timeout = e1000_tx_timeout,
+ .ndo_tx_timeout = e1000_tx_timeout,
.ndo_change_mtu = e1000_change_mtu,
.ndo_do_ioctl = e1000_ioctl,
.ndo_validate_addr = eth_validate_addr,
-
- .ndo_vlan_rx_register = e1000_vlan_rx_register,
.ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = e1000_netpoll,
#endif
+ .ndo_fix_features = e1000_fix_features,
+ .ndo_set_features = e1000_set_features,
};
/**
@@ -1016,16 +1057,19 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
}
if (hw->mac_type >= e1000_82543) {
- netdev->features = NETIF_F_SG |
+ netdev->hw_features = NETIF_F_SG |
NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_RX;
+ netdev->features = NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_FILTER;
}
if ((hw->mac_type >= e1000_82544) &&
(hw->mac_type != e1000_82547))
- netdev->features |= NETIF_F_TSO;
+ netdev->hw_features |= NETIF_F_TSO;
+
+ netdev->features |= netdev->hw_features;
+ netdev->hw_features |= NETIF_F_RXCSUM;
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
@@ -1175,6 +1219,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
goto err_register;
+ e1000_vlan_mode(netdev, netdev->features);
+
/* print bus type/speed/width info */
e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
@@ -1419,8 +1465,7 @@ static int e1000_close(struct net_device *netdev)
* the same ID is registered on the host OS (let 8021q kill it) */
if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
- !(adapter->vlgrp &&
- vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
+ !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
}
@@ -2211,7 +2256,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
else
rctl &= ~E1000_RCTL_MPE;
/* Enable VLAN filter if there is a VLAN */
- if (adapter->vlgrp)
+ if (e1000_vlan_used(adapter))
rctl |= E1000_RCTL_VFE;
}
@@ -2357,13 +2402,16 @@ bool e1000_has_link(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
- /* get_link_status is set on LSC (link status) interrupt or
- * rx sequence error interrupt. get_link_status will stay
- * false until the e1000_check_for_link establishes link
- * for copper adapters ONLY
+ /* get_link_status is set on LSC (link status) interrupt or rx
+ * sequence error interrupt (except on intel ce4100).
+ * get_link_status will stay false until the
+ * e1000_check_for_link establishes link for copper adapters
+ * ONLY
*/
switch (hw->media_type) {
case e1000_media_type_copper:
+ if (hw->mac_type == e1000_ce4100)
+ hw->get_link_status = 1;
if (hw->get_link_status) {
e1000_check_for_link(hw);
link_active = !hw->get_link_status;
@@ -3158,7 +3206,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
}
- if (unlikely(vlan_tx_tag_present(skb))) {
+ if (vlan_tx_tag_present(skb)) {
tx_flags |= E1000_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
}
@@ -3713,12 +3761,12 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
{
skb->protocol = eth_type_trans(skb, adapter->netdev);
- if ((unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))))
- vlan_gro_receive(&adapter->napi, adapter->vlgrp,
- le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK,
- skb);
- else
- napi_gro_receive(&adapter->napi, skb);
+ if (status & E1000_RXD_STAT_VP) {
+ u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+ napi_gro_receive(&adapter->napi, skb);
}
/**
@@ -4501,46 +4549,61 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
outl(value, port);
}
-static void e1000_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
+static bool e1000_vlan_used(struct e1000_adapter *adapter)
+{
+ u16 vid;
+
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ return true;
+ return false;
+}
+
+static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
+ bool filter_on)
{
- struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, rctl;
+ u32 rctl;
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_disable(adapter);
- adapter->vlgrp = grp;
-
- if (grp) {
- /* enable VLAN tag insert/strip */
- ctrl = er32(CTRL);
- ctrl |= E1000_CTRL_VME;
- ew32(CTRL, ctrl);
+ if (filter_on) {
/* enable VLAN receive filtering */
rctl = er32(RCTL);
rctl &= ~E1000_RCTL_CFIEN;
- if (!(netdev->flags & IFF_PROMISC))
+ if (!(adapter->netdev->flags & IFF_PROMISC))
rctl |= E1000_RCTL_VFE;
ew32(RCTL, rctl);
e1000_update_mng_vlan(adapter);
} else {
- /* disable VLAN tag insert/strip */
- ctrl = er32(CTRL);
- ctrl &= ~E1000_CTRL_VME;
- ew32(CTRL, ctrl);
-
/* disable VLAN receive filtering */
rctl = er32(RCTL);
rctl &= ~E1000_RCTL_VFE;
ew32(RCTL, rctl);
+ }
- if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- }
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_enable(adapter);
+}
+
+static void e1000_vlan_mode(struct net_device *netdev, u32 features)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ e1000_irq_disable(adapter);
+
+ ctrl = er32(CTRL);
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* enable VLAN tag insert/strip */
+ ctrl |= E1000_CTRL_VME;
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl &= ~E1000_CTRL_VME;
}
+ ew32(CTRL, ctrl);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
@@ -4556,11 +4619,17 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
(vid == adapter->mng_vlan_id))
return;
+
+ if (!e1000_vlan_used(adapter))
+ e1000_vlan_filter_on_off(adapter, true);
+
/* add VID to filter table */
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
vfta |= (1 << (vid & 0x1F));
e1000_write_vfta(hw, index, vfta);
+
+ set_bit(vid, adapter->active_vlans);
}
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -4571,7 +4640,6 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_disable(adapter);
- vlan_group_set_device(adapter->vlgrp, vid, NULL);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
@@ -4580,20 +4648,23 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
vfta &= ~(1 << (vid & 0x1F));
e1000_write_vfta(hw, index, vfta);
+
+ clear_bit(vid, adapter->active_vlans);
+
+ if (!e1000_vlan_used(adapter))
+ e1000_vlan_filter_on_off(adapter, false);
}
static void e1000_restore_vlan(struct e1000_adapter *adapter)
{
- e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+ u16 vid;
- if (adapter->vlgrp) {
- u16 vid;
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (!vlan_group_get_device(adapter->vlgrp, vid))
- continue;
- e1000_vlan_rx_add_vid(adapter->netdev, vid);
- }
- }
+ if (!e1000_vlan_used(adapter))
+ return;
+
+ e1000_vlan_filter_on_off(adapter, true);
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ e1000_vlan_rx_add_vid(adapter->netdev, vid);
}
int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 8295f219243..536b3a55c45 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -431,8 +431,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
adapter->flags &= ~FLAG_HAS_WOL;
break;
case e1000_82573:
- case e1000_82574:
- case e1000_82583:
if (pdev->device == E1000_DEV_ID_82573L) {
adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
adapter->max_hw_frame_size = DEFAULT_JUMBO;
@@ -2087,7 +2085,8 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_CHECK_PHY_HANG
- | FLAG2_DISABLE_ASPM_L0S,
+ | FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_NO_DISABLE_RX,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -2104,10 +2103,12 @@ struct e1000_info e1000_82583_info = {
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
+ | FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .flags2 = FLAG2_DISABLE_ASPM_L0S,
+ .flags2 = FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_NO_DISABLE_RX,
.pba = 32,
- .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+ .max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_bm,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 9549879e66a..8533ad7f355 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -104,6 +104,7 @@ struct e1000_info;
(((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
@@ -122,20 +123,21 @@ struct e1000_info;
#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
-#define HV_SCC_UPPER PHY_REG(778, 16) /* Single Collision Count */
-#define HV_SCC_LOWER PHY_REG(778, 17)
-#define HV_ECOL_UPPER PHY_REG(778, 18) /* Excessive Collision Count */
-#define HV_ECOL_LOWER PHY_REG(778, 19)
-#define HV_MCC_UPPER PHY_REG(778, 20) /* Multiple Collision Count */
-#define HV_MCC_LOWER PHY_REG(778, 21)
-#define HV_LATECOL_UPPER PHY_REG(778, 23) /* Late Collision Count */
-#define HV_LATECOL_LOWER PHY_REG(778, 24)
-#define HV_COLC_UPPER PHY_REG(778, 25) /* Collision Count */
-#define HV_COLC_LOWER PHY_REG(778, 26)
-#define HV_DC_UPPER PHY_REG(778, 27) /* Defer Count */
-#define HV_DC_LOWER PHY_REG(778, 28)
-#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */
-#define HV_TNCRS_LOWER PHY_REG(778, 30)
+#define HV_STATS_PAGE 778
+#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
+#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
+#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
+#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
+#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
@@ -153,6 +155,9 @@ struct e1000_info;
#define HV_M_STATUS_SPEED_1000 0x0200
#define HV_M_STATUS_LINK_UP 0x0040
+#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
+
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
@@ -197,11 +202,6 @@ enum e1000_boards {
board_pch2lan,
};
-struct e1000_queue_stats {
- u64 packets;
- u64 bytes;
-};
-
struct e1000_ps_page {
struct page *page;
u64 dma; /* must be u64 - written to hw */
@@ -255,8 +255,6 @@ struct e1000_ring {
int set_itr;
struct sk_buff *rx_skb_top;
-
- struct e1000_queue_stats stats;
};
/* PHY register snapshot values */
@@ -339,7 +337,7 @@ struct e1000_adapter {
int *work_done, int work_to_do)
____cacheline_aligned_in_smp;
void (*alloc_rx_buf) (struct e1000_adapter *adapter,
- int cleaned_count);
+ int cleaned_count, gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;
@@ -458,6 +456,8 @@ struct e1000_info {
#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
#define FLAG2_DISABLE_AIM (1 << 8)
#define FLAG2_CHECK_PHY_HANG (1 << 9)
+#define FLAG2_NO_DISABLE_RX (1 << 10)
+#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -533,7 +533,8 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
+extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
@@ -584,6 +585,7 @@ extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
u16 *data);
@@ -604,6 +606,10 @@ extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+ u16 *phy_reg);
+extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+ u16 *phy_reg);
extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
@@ -624,9 +630,13 @@ extern s32 e1000e_check_downshift(struct e1000_hw *hw);
extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
u16 *data);
+extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+ u16 *data);
extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
u16 data);
+extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+ u16 data);
extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index f4bbeb22f51..e4f42257c24 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -836,6 +836,7 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
struct e1000_mac_info *mac = &hw->mac;
u32 reg_data;
s32 ret_val;
+ u16 kum_reg_data;
u16 i;
e1000_initialize_hw_bits_80003es2lan(hw);
@@ -861,6 +862,13 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Setup link and flow control */
ret_val = e1000e_setup_link(hw);
+ /* Disable IBIST slave mode (far-end loopback) */
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+ e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ kum_reg_data);
+
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
@@ -1305,6 +1313,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
@@ -1339,6 +1348,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 859d0d3af6c..6a0526a59a8 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -29,6 +29,7 @@
/* ethtool support for e1000 */
#include <linux/netdevice.h>
+#include <linux/interrupt.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -963,6 +964,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
usleep_range(10000, 20000);
/* Test each interrupt */
@@ -995,6 +997,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, mask);
ew32(ICS, mask);
+ e1e_flush();
usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
@@ -1013,6 +1016,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMS, mask);
ew32(ICS, mask);
+ e1e_flush();
usleep_range(10000, 20000);
if (!(adapter->test_icr & mask)) {
@@ -1031,6 +1035,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, ~mask & 0x00007FFF);
ew32(ICS, ~mask & 0x00007FFF);
+ e1e_flush();
usleep_range(10000, 20000);
if (adapter->test_icr) {
@@ -1042,6 +1047,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
+ e1e_flush();
usleep_range(10000, 20000);
/* Unhook test interrupt handler */
@@ -1200,7 +1206,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rx_ring->next_to_clean = 0;
rctl = er32(RCTL);
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
ew32(RDBAH, ((u64) rx_ring->dma >> 32));
ew32(RDLEN, rx_ring->size);
@@ -1275,6 +1282,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
E1000_CTRL_FD); /* Force Duplex to FULL */
ew32(CTRL, ctrl_reg);
+ e1e_flush();
udelay(500);
return 0;
@@ -1417,6 +1425,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
*/
#define E1000_SERDES_LB_ON 0x410
ew32(SCTL, E1000_SERDES_LB_ON);
+ e1e_flush();
usleep_range(10000, 20000);
return 0;
@@ -1512,6 +1521,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
hw->phy.media_type == e1000_media_type_internal_serdes) {
#define E1000_SERDES_LB_OFF 0x400
ew32(SCTL, E1000_SERDES_LB_OFF);
+ e1e_flush();
usleep_range(10000, 20000);
break;
}
@@ -1591,6 +1601,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
k = 0;
}
ew32(TDT, k);
+ e1e_flush();
msleep(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 6c2fa8327f5..29670397079 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -246,6 +246,7 @@ enum e1e_registers {
#define BM_WUC_ENABLE_REG 17
#define BM_WUC_ENABLE_BIT (1 << 2)
#define BM_WUC_HOST_WU_BIT (1 << 4)
+#define BM_WUC_ME_WU_BIT (1 << 5)
#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
@@ -312,6 +313,7 @@ enum e1e_registers {
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
@@ -777,7 +779,21 @@ struct e1000_mac_operations {
s32 (*read_mac_addr)(struct e1000_hw *);
};
-/* Function pointers for the PHY. */
+/*
+ * When to use various PHY register access functions:
+ *
+ * Func Caller
+ * Function Does Does When to use
+ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * X_reg L,P,A n/a for simple PHY reg accesses
+ * X_reg_locked P,A L for multiple accesses of different regs
+ * on different pages
+ * X_reg_page A L,P for multiple accesses of different regs
+ * on the same page
+ *
+ * Where X=[read|write], L=locking, P=sets page, A=register access
+ *
+ */
struct e1000_phy_operations {
s32 (*acquire)(struct e1000_hw *);
s32 (*cfg_on_link_up)(struct e1000_hw *);
@@ -788,14 +804,17 @@ struct e1000_phy_operations {
s32 (*get_cfg_done)(struct e1000_hw *hw);
s32 (*get_cable_length)(struct e1000_hw *);
s32 (*get_info)(struct e1000_hw *);
+ s32 (*set_page)(struct e1000_hw *, u16);
s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
s32 (*reset)(struct e1000_hw *);
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
s32 (*write_reg)(struct e1000_hw *, u32, u16);
s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
void (*power_up)(struct e1000_hw *);
void (*power_down)(struct e1000_hw *);
};
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 3369d1f6a39..54add27c8f7 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -137,8 +137,9 @@
#define HV_PM_CTRL PHY_REG(770, 17)
/* PHY Low Power Idle Control */
-#define I82579_LPI_CTRL PHY_REG(772, 20)
-#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
/* EMI Registers */
#define I82579_EMI_ADDR 0x10
@@ -163,6 +164,11 @@
#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
#define HV_KMRN_MDIO_SLOW 0x0400
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
+
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -275,6 +281,20 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
#define ew16flash(reg,val) __ew16flash(hw, (reg), (val))
#define ew32flash(reg,val) __ew32flash(hw, (reg), (val))
+static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
+ ew32(CTRL, ctrl);
+ e1e_flush();
+ udelay(10);
+ ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ ew32(CTRL, ctrl);
+}
+
/**
* e1000_init_phy_params_pchlan - Initialize PHY function pointers
* @hw: pointer to the HW structure
@@ -284,18 +304,21 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- u32 ctrl, fwsm;
+ u32 fwsm;
s32 ret_val = 0;
phy->addr = 1;
phy->reset_delay_us = 100;
+ phy->ops.set_page = e1000_set_page_igp;
phy->ops.read_reg = e1000_read_phy_reg_hv;
phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
+ phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
phy->ops.write_reg = e1000_write_phy_reg_hv;
phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
+ phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
phy->ops.power_up = e1000_power_up_phy_copper;
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
@@ -308,13 +331,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
*/
fwsm = er32(FWSM);
if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
- ctrl = er32(CTRL);
- ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
- ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
- ew32(CTRL, ctrl);
- udelay(10);
- ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
- ew32(CTRL, ctrl);
+ e1000_toggle_lanphypc_value_ich8lan(hw);
msleep(50);
/*
@@ -646,6 +663,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val;
bool link;
+ u16 phy_reg;
/*
* We only want to go out to the PHY registers to see if Auto-Neg
@@ -678,16 +696,35 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
mac->get_link_status = false;
- if (hw->phy.type == e1000_phy_82578) {
- ret_val = e1000_link_stall_workaround_hv(hw);
- if (ret_val)
- goto out;
- }
-
- if (hw->mac.type == e1000_pch2lan) {
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
ret_val = e1000_k1_workaround_lv(hw);
if (ret_val)
goto out;
+ /* fall-thru */
+ case e1000_pchlan:
+ if (hw->phy.type == e1000_phy_82578) {
+ ret_val = e1000_link_stall_workaround_hv(hw);
+ if (ret_val)
+ goto out;
+ }
+
+ /*
+ * Workaround for PCHx parts in half-duplex:
+ * Set the number of preambles removed from the packet
+ * when it is passed from the PHY to the MAC to prevent
+ * the MAC from misinterpreting the packet type.
+ */
+ e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
+ phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
+
+ if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
+ phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+
+ e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
+ break;
+ default:
+ break;
}
/*
@@ -777,6 +814,11 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
(adapter->hw.phy.type == e1000_phy_igp_3))
adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
+ /* Enable workaround for 82579 w/ ME enabled */
+ if ((adapter->hw.mac.type == e1000_pch2lan) &&
+ (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+ adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
+
/* Disable EEE by default until IEEE802.3az spec is finalized */
if (adapter->flags2 & FLAG2_HAS_EEE)
adapter->hw.dev_spec.ich8lan.eee_disable = true;
@@ -882,8 +924,13 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
u32 extcnf_ctrl;
extcnf_ctrl = er32(EXTCNF_CTRL);
- extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
- ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+ if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
+ extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+ ew32(EXTCNF_CTRL, extcnf_ctrl);
+ } else {
+ e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
+ }
mutex_unlock(&swflag_mutex);
}
@@ -1215,9 +1262,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
ew32(CTRL, reg);
ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
+ e1e_flush();
udelay(20);
ew32(CTRL, ctrl_reg);
ew32(CTRL_EXT, ctrl_ext);
+ e1e_flush();
udelay(20);
out:
@@ -1337,7 +1386,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
return ret_val;
/* Preamble tuning for SSC */
- ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204);
+ ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
if (ret_val)
return ret_val;
}
@@ -1376,14 +1425,11 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
goto out;
- ret_val = hw->phy.ops.read_reg_locked(hw,
- PHY_REG(BM_PORT_CTRL_PAGE, 17),
- &phy_data);
+ ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
if (ret_val)
goto release;
- ret_val = hw->phy.ops.write_reg_locked(hw,
- PHY_REG(BM_PORT_CTRL_PAGE, 17),
- phy_data & 0x00FF);
+ ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
+ phy_data & 0x00FF);
release:
hw->phy.ops.release(hw);
out:
@@ -1397,17 +1443,36 @@ out:
void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
{
u32 mac_reg;
- u16 i;
+ u16 i, phy_reg = 0;
+ s32 ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val)
+ goto release;
/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
mac_reg = er32(RAL(i));
- e1e_wphy(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF));
- e1e_wphy(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
+ (u16)((mac_reg >> 16) & 0xFFFF));
+
mac_reg = er32(RAH(i));
- e1e_wphy(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF));
- e1e_wphy(hw, BM_RAR_CTRL(i), (u16)((mac_reg >> 16) & 0x8000));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
+ (u16)((mac_reg & E1000_RAH_AV)
+ >> 16));
}
+
+ e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+release:
+ hw->phy.ops.release(hw);
}
/**
@@ -1611,6 +1676,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
s32 ret_val = 0;
u16 status_reg = 0;
u32 mac_reg;
+ u16 phy_reg;
if (hw->mac.type != e1000_pch2lan)
goto out;
@@ -1625,12 +1691,19 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
mac_reg = er32(FEXTNVM4);
mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
- if (status_reg & HV_M_STATUS_SPEED_1000)
+ ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+ if (ret_val)
+ goto out;
+
+ if (status_reg & HV_M_STATUS_SPEED_1000) {
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
- else
+ phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+ } else {
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
-
+ phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+ }
ew32(FEXTNVM4, mac_reg);
+ ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
}
out:
@@ -1726,9 +1799,12 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
break;
}
- /* Dummy read to clear the phy wakeup bit after lcd reset */
- if (hw->mac.type >= e1000_pchlan)
- e1e_rphy(hw, BM_WUC, &reg);
+ /* Clear the host wakeup bit after lcd reset */
+ if (hw->mac.type >= e1000_pchlan) {
+ e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
+ reg &= ~BM_WUC_HOST_WU_BIT;
+ e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
+ }
/* Configure the LCD with the extended configuration region in NVM */
ret_val = e1000_sw_lcd_config_ich8lan(hw);
@@ -2100,8 +2176,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
ret_val = 0;
for (i = 0; i < words; i++) {
- if ((dev_spec->shadow_ram) &&
- (dev_spec->shadow_ram[offset+i].modified)) {
+ if (dev_spec->shadow_ram[offset+i].modified) {
data[i] = dev_spec->shadow_ram[offset+i].value;
} else {
ret_val = e1000_read_flash_word_ich8lan(hw,
@@ -3056,10 +3131,11 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ret_val = e1000_acquire_swflag_ich8lan(hw);
e_dbg("Issuing a global reset to ich8lan\n");
ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ /* cannot issue a flush here because it hangs the hardware */
msleep(20);
if (!ret_val)
- e1000_release_swflag_ich8lan(hw);
+ mutex_unlock(&swflag_mutex);
if (ctrl & E1000_CTRL_PHY_RST) {
ret_val = hw->phy.ops.get_cfg_done(hw);
@@ -3127,11 +3203,13 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
/*
* The 82578 Rx buffer will stall if wakeup is enabled in host and
- * the ME. Reading the BM_WUC register will clear the host wakeup bit.
+ * the ME. Disable wakeup by clearing the host wakeup bit.
* Reset the phy after disabling host wakeup to reset the Rx buffer.
*/
if (hw->phy.type == e1000_phy_82578) {
- e1e_rphy(hw, BM_WUC, &i);
+ e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
+ i &= ~BM_WUC_HOST_WU_BIT;
+ e1e_wphy(hw, BM_PORT_GEN_CFG, i);
ret_val = e1000_phy_hw_reset_ich8lan(hw);
if (ret_val)
return ret_val;
@@ -3586,17 +3664,16 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
}
/**
- * e1000e_disable_gig_wol_ich8lan - disable gig during WoL
+ * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
* @hw: pointer to the HW structure
*
* During S0 to Sx transition, it is possible the link remains at gig
* instead of negotiating to a lower speed. Before going to Sx, set
* 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
- * to a lower speed.
- *
- * Should only be called for applicable parts.
+ * to a lower speed. For PCH and newer parts, the OEM bits PHY register
+ * (LED, GbE disable and LPLU configurations) also needs to be written.
**/
-void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
{
u32 phy_ctrl;
s32 ret_val;
@@ -3616,6 +3693,60 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
+ * @hw: pointer to the HW structure
+ *
+ * During Sx to S0 transitions on non-managed devices or managed devices
+ * on which PHY resets are not blocked, if the PHY registers cannot be
+ * accessed properly by the s/w toggle the LANPHYPC value to power cycle
+ * the PHY.
+ **/
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
+{
+ u32 fwsm;
+
+ if (hw->mac.type != e1000_pch2lan)
+ return;
+
+ fwsm = er32(FWSM);
+ if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) {
+ u16 phy_id1, phy_id2;
+ s32 ret_val;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ e_dbg("Failed to acquire PHY semaphore in resume\n");
+ return;
+ }
+
+ /* Test access to the PHY registers by reading the ID regs */
+ ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
+ if (ret_val)
+ goto release;
+ ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
+ if (ret_val)
+ goto release;
+
+ if (hw->phy.id == ((u32)(phy_id1 << 16) |
+ (u32)(phy_id2 & PHY_REVISION_MASK)))
+ goto release;
+
+ e1000_toggle_lanphypc_value_ich8lan(hw);
+
+ hw->phy.ops.release(hw);
+ msleep(50);
+ e1000_phy_hw_reset(hw);
+ msleep(50);
+ return;
+ }
+
+release:
+ hw->phy.ops.release(hw);
+
+ return;
+}
+
+/**
* e1000_cleanup_led_ich8lan - Restore the default LED operation
* @hw: pointer to the HW structure
*
@@ -3832,6 +3963,7 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
{
u16 phy_data;
+ s32 ret_val;
e1000e_clear_hw_cntrs_base(hw);
@@ -3853,20 +3985,29 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82579) ||
(hw->phy.type == e1000_phy_82577)) {
- e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
- e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
- e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
- e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
- e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
- e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
- e1e_rphy(hw, HV_DC_UPPER, &phy_data);
- e1e_rphy(hw, HV_DC_LOWER, &phy_data);
- e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
- e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return;
+ ret_val = hw->phy.ops.set_page(hw,
+ HV_STATS_PAGE << IGP_PAGE_SHIFT);
+ if (ret_val)
+ goto release;
+ hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
+release:
+ hw->phy.ops.release(hw);
}
}
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index dd8ab05b559..0893ab107ad 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -56,7 +56,7 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
struct e1000_adapter *adapter = hw->adapter;
u16 pcie_link_status, cap_offset;
- cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ cap_offset = adapter->pdev->pcie_cap;
if (!cap_offset) {
bus->width = e1000_bus_width_unknown;
} else {
@@ -190,7 +190,8 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
/* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
if (!((nvm_data & NVM_COMPAT_LOM) ||
(hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)))
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
goto out;
ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
@@ -200,10 +201,10 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
goto out;
}
- if (nvm_alt_mac_addr_offset == 0xFFFF) {
+ if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+ (nvm_alt_mac_addr_offset == 0x0000))
/* There is no Alternate MAC Address */
goto out;
- }
if (hw->bus.func == E1000_FUNC_1)
nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
@@ -220,7 +221,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
}
/* if multicast bit is set, the alternate address will not be used */
- if (alt_mac_addr[0] & 0x01) {
+ if (is_multicast_ether_addr(alt_mac_addr)) {
e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
goto out;
}
@@ -1986,6 +1987,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
+ e1e_flush();
udelay(1);
/*
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 3310c3d477d..2198e615f24 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -36,6 +36,7 @@
#include <linux/pagemap.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/interrupt.h>
#include <linux/tcp.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
@@ -53,9 +54,9 @@
#include "e1000.h"
-#define DRV_EXTRAVERSION "-k2"
+#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "1.3.10" DRV_EXTRAVERSION
+#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -518,11 +519,68 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
}
/**
+ * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
+ * @hw: pointer to the HW structure
+ * @tail: address of tail descriptor register
+ * @i: value to write to tail descriptor register
+ *
+ * When updating the tail register, the ME could be accessing Host CSR
+ * registers at the same time. Normally, this is handled in h/w by an
+ * arbiter but on some parts there is a bug that acknowledges Host accesses
+ * later than it should which could result in the descriptor register to
+ * have an incorrect value. Workaround this by checking the FWSM register
+ * which has bit 24 set while ME is accessing Host CSR registers, wait
+ * if it is set and try again a number of times.
+ **/
+static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
+ unsigned int i)
+{
+ unsigned int j = 0;
+
+ while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
+ (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
+ udelay(50);
+
+ writel(i, tail);
+
+ if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
+ return E1000_ERR_SWFW_SYNC;
+
+ return 0;
+}
+
+static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+ u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (e1000e_update_tail_wa(hw, tail, i)) {
+ u32 rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e_err("ME firmware caused invalid RDT - resetting\n");
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+ u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (e1000e_update_tail_wa(hw, tail, i)) {
+ u32 tctl = er32(TCTL);
+ ew32(TCTL, tctl & ~E1000_TCTL_EN);
+ e_err("ME firmware caused invalid TDT - resetting\n");
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+/**
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
**/
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
- int cleaned_count)
+ int cleaned_count, gfp_t gfp)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -543,7 +601,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
goto map_skb;
}
- skb = netdev_alloc_skb_ip_align(netdev, bufsz);
+ skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
if (!skb) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
@@ -572,7 +630,10 @@ map_skb:
* such as IA-64).
*/
wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
}
i++;
if (i == rx_ring->count)
@@ -588,7 +649,7 @@ map_skb:
* @adapter: address of board private structure
**/
static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
- int cleaned_count)
+ int cleaned_count, gfp_t gfp)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -614,7 +675,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
continue;
}
if (!ps_page->page) {
- ps_page->page = alloc_page(GFP_ATOMIC);
+ ps_page->page = alloc_page(gfp);
if (!ps_page->page) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
@@ -640,8 +701,9 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
cpu_to_le64(ps_page->dma);
}
- skb = netdev_alloc_skb_ip_align(netdev,
- adapter->rx_ps_bsize0);
+ skb = __netdev_alloc_skb_ip_align(netdev,
+ adapter->rx_ps_bsize0,
+ gfp);
if (!skb) {
adapter->alloc_rx_buff_failed++;
@@ -671,7 +733,11 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
* such as IA-64).
*/
wmb();
- writel(i << 1, adapter->hw.hw_addr + rx_ring->tail);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i << 1);
+ else
+ writel(i << 1,
+ adapter->hw.hw_addr + rx_ring->tail);
}
i++;
@@ -691,7 +757,7 @@ no_buffers:
**/
static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
- int cleaned_count)
+ int cleaned_count, gfp_t gfp)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -712,7 +778,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
goto check_page;
}
- skb = netdev_alloc_skb_ip_align(netdev, bufsz);
+ skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
if (unlikely(!skb)) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
@@ -723,7 +789,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
check_page:
/* allocate a new page if necessary */
if (!buffer_info->page) {
- buffer_info->page = alloc_page(GFP_ATOMIC);
+ buffer_info->page = alloc_page(gfp);
if (unlikely(!buffer_info->page)) {
adapter->alloc_rx_buff_failed++;
break;
@@ -754,7 +820,10 @@ check_page:
* applicable for weak-ordered memory model archs,
* such as IA-64). */
wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_rdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
}
}
@@ -887,7 +956,8 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
- adapter->alloc_rx_buf(adapter, cleaned_count);
+ adapter->alloc_rx_buf(adapter, cleaned_count,
+ GFP_ATOMIC);
cleaned_count = 0;
}
@@ -899,7 +969,7 @@ next_desc:
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
- adapter->alloc_rx_buf(adapter, cleaned_count);
+ adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
@@ -1229,7 +1299,8 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
- adapter->alloc_rx_buf(adapter, cleaned_count);
+ adapter->alloc_rx_buf(adapter, cleaned_count,
+ GFP_ATOMIC);
cleaned_count = 0;
}
@@ -1243,7 +1314,7 @@ next_desc:
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
- adapter->alloc_rx_buf(adapter, cleaned_count);
+ adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
@@ -1410,7 +1481,8 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
- adapter->alloc_rx_buf(adapter, cleaned_count);
+ adapter->alloc_rx_buf(adapter, cleaned_count,
+ GFP_ATOMIC);
cleaned_count = 0;
}
@@ -1422,7 +1494,7 @@ next_desc:
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
- adapter->alloc_rx_buf(adapter, cleaned_count);
+ adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
@@ -2910,7 +2982,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* disable receives while setting up the descriptors */
rctl = er32(RCTL);
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
e1e_flush();
usleep_range(10000, 20000);
@@ -3104,7 +3177,8 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_configure_tx(adapter);
e1000_setup_rctl(adapter);
e1000_configure_rx(adapter);
- adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
+ adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
+ GFP_KERNEL);
}
/**
@@ -3346,7 +3420,7 @@ int e1000e_up(struct e1000_adapter *adapter)
e1000_configure_msix(adapter);
e1000_irq_enable(adapter);
- netif_wake_queue(adapter->netdev);
+ netif_start_queue(adapter->netdev);
/* fire a link change interrupt to start the watchdog */
if (adapter->msix_entries)
@@ -3388,7 +3462,8 @@ void e1000e_down(struct e1000_adapter *adapter)
/* disable receives in the hardware */
rctl = er32(RCTL);
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
/* flush and sleep below */
netif_stop_queue(netdev);
@@ -3397,6 +3472,7 @@ void e1000e_down(struct e1000_adapter *adapter)
tctl = er32(TCTL);
tctl &= ~E1000_TCTL_EN;
ew32(TCTL, tctl);
+
/* flush both disables and wait for them to finish */
e1e_flush();
usleep_range(10000, 20000);
@@ -3413,17 +3489,16 @@ void e1000e_down(struct e1000_adapter *adapter)
e1000e_update_stats(adapter);
spin_unlock(&adapter->stats64_lock);
+ e1000e_flush_descriptors(adapter);
+ e1000_clean_tx_ring(adapter);
+ e1000_clean_rx_ring(adapter);
+
adapter->link_speed = 0;
adapter->link_duplex = 0;
if (!pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
- e1000e_flush_descriptors(adapter);
-
- e1000_clean_tx_ring(adapter);
- e1000_clean_rx_ring(adapter);
-
/*
* TODO: for power management, we could drop the link and
* pci_disable_device here.
@@ -3833,6 +3908,8 @@ static void e1000_update_phy_info(unsigned long data)
/**
* e1000e_update_phy_stats - Update the PHY statistics counters
* @adapter: board private structure
+ *
+ * Read/clear the upper 16-bit PHY registers and read/accumulate lower
**/
static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
{
@@ -3844,89 +3921,61 @@ static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
if (ret_val)
return;
- hw->phy.addr = 1;
-
-#define HV_PHY_STATS_PAGE 778
/*
* A page set is expensive so check if already on desired page.
* If not, set to the page with the PHY status registers.
*/
+ hw->phy.addr = 1;
ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
&phy_data);
if (ret_val)
goto release;
- if (phy_data != (HV_PHY_STATS_PAGE << IGP_PAGE_SHIFT)) {
- ret_val = e1000e_write_phy_reg_mdic(hw,
- IGP01E1000_PHY_PAGE_SELECT,
- (HV_PHY_STATS_PAGE <<
- IGP_PAGE_SHIFT));
+ if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
+ ret_val = hw->phy.ops.set_page(hw,
+ HV_STATS_PAGE << IGP_PAGE_SHIFT);
if (ret_val)
goto release;
}
- /* Read/clear the upper 16-bit registers and read/accumulate lower */
-
/* Single Collision Count */
- e1000e_read_phy_reg_mdic(hw, HV_SCC_UPPER & MAX_PHY_REG_ADDRESS,
- &phy_data);
- ret_val = e1000e_read_phy_reg_mdic(hw,
- HV_SCC_LOWER & MAX_PHY_REG_ADDRESS,
- &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
if (!ret_val)
adapter->stats.scc += phy_data;
/* Excessive Collision Count */
- e1000e_read_phy_reg_mdic(hw, HV_ECOL_UPPER & MAX_PHY_REG_ADDRESS,
- &phy_data);
- ret_val = e1000e_read_phy_reg_mdic(hw,
- HV_ECOL_LOWER & MAX_PHY_REG_ADDRESS,
- &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
if (!ret_val)
adapter->stats.ecol += phy_data;
/* Multiple Collision Count */
- e1000e_read_phy_reg_mdic(hw, HV_MCC_UPPER & MAX_PHY_REG_ADDRESS,
- &phy_data);
- ret_val = e1000e_read_phy_reg_mdic(hw,
- HV_MCC_LOWER & MAX_PHY_REG_ADDRESS,
- &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
if (!ret_val)
adapter->stats.mcc += phy_data;
/* Late Collision Count */
- e1000e_read_phy_reg_mdic(hw, HV_LATECOL_UPPER & MAX_PHY_REG_ADDRESS,
- &phy_data);
- ret_val = e1000e_read_phy_reg_mdic(hw,
- HV_LATECOL_LOWER &
- MAX_PHY_REG_ADDRESS,
- &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
if (!ret_val)
adapter->stats.latecol += phy_data;
/* Collision Count - also used for adaptive IFS */
- e1000e_read_phy_reg_mdic(hw, HV_COLC_UPPER & MAX_PHY_REG_ADDRESS,
- &phy_data);
- ret_val = e1000e_read_phy_reg_mdic(hw,
- HV_COLC_LOWER & MAX_PHY_REG_ADDRESS,
- &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
if (!ret_val)
hw->mac.collision_delta = phy_data;
/* Defer Count */
- e1000e_read_phy_reg_mdic(hw, HV_DC_UPPER & MAX_PHY_REG_ADDRESS,
- &phy_data);
- ret_val = e1000e_read_phy_reg_mdic(hw,
- HV_DC_LOWER & MAX_PHY_REG_ADDRESS,
- &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
if (!ret_val)
adapter->stats.dc += phy_data;
/* Transmit with no CRS */
- e1000e_read_phy_reg_mdic(hw, HV_TNCRS_UPPER & MAX_PHY_REG_ADDRESS,
- &phy_data);
- ret_val = e1000e_read_phy_reg_mdic(hw,
- HV_TNCRS_LOWER & MAX_PHY_REG_ADDRESS,
- &phy_data);
+ hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+ ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
if (!ret_val)
adapter->stats.tncrs += phy_data;
@@ -4707,7 +4756,12 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
wmb();
tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
+
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ e1000e_update_tdt_wa(adapter, i);
+ else
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+
/*
* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
@@ -5154,21 +5208,34 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
{
struct e1000_hw *hw = &adapter->hw;
u32 i, mac_reg;
- u16 phy_reg;
+ u16 phy_reg, wuc_enable;
int retval = 0;
/* copy MAC RARs to PHY RARs */
e1000_copy_rx_addrs_to_phy_ich8lan(hw);
- /* copy MAC MTA to PHY MTA */
+ retval = hw->phy.ops.acquire(hw);
+ if (retval) {
+ e_err("Could not acquire PHY\n");
+ return retval;
+ }
+
+ /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
+ retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+ if (retval)
+ goto out;
+
+ /* copy MAC MTA to PHY MTA - only needed for pchlan */
for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
- e1e_wphy(hw, BM_MTA(i), (u16)(mac_reg & 0xFFFF));
- e1e_wphy(hw, BM_MTA(i) + 1, (u16)((mac_reg >> 16) & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_MTA(i),
+ (u16)(mac_reg & 0xFFFF));
+ hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
+ (u16)((mac_reg >> 16) & 0xFFFF));
}
/* configure PHY Rx Control register */
- e1e_rphy(&adapter->hw, BM_RCTL, &phy_reg);
+ hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
mac_reg = er32(RCTL);
if (mac_reg & E1000_RCTL_UPE)
phy_reg |= BM_RCTL_UPE;
@@ -5185,31 +5252,19 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
mac_reg = er32(CTRL);
if (mac_reg & E1000_CTRL_RFCE)
phy_reg |= BM_RCTL_RFCE;
- e1e_wphy(&adapter->hw, BM_RCTL, phy_reg);
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
/* enable PHY wakeup in MAC register */
ew32(WUFC, wufc);
ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
/* configure and enable PHY wakeup in PHY registers */
- e1e_wphy(&adapter->hw, BM_WUFC, wufc);
- e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
/* activate PHY wakeup */
- retval = hw->phy.ops.acquire(hw);
- if (retval) {
- e_err("Could not acquire PHY\n");
- return retval;
- }
- e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
- (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
- retval = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
- if (retval) {
- e_err("Could not read PHY page 769\n");
- goto out;
- }
- phy_reg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
- retval = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+ wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
+ retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
if (retval)
e_err("Could not set PHY Host Wakeup bit\n");
out:
@@ -5277,7 +5332,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
if (adapter->flags & FLAG_IS_ICH)
- e1000e_disable_gig_wol_ich8lan(&adapter->hw);
+ e1000_suspend_workarounds_ich8lan(&adapter->hw);
/* Allow time for pending master requests to run */
e1000e_disable_pcie_master(&adapter->hw);
@@ -5343,7 +5398,7 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
*/
if (adapter->flags & FLAG_IS_QUAD_PORT) {
struct pci_dev *us_dev = pdev->bus->self;
- int pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
+ int pos = pci_pcie_cap(us_dev);
u16 devctl;
pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
@@ -5428,6 +5483,9 @@ static int __e1000_resume(struct pci_dev *pdev)
return err;
}
+ if (hw->mac.type == e1000_pch2lan)
+ e1000_resume_workarounds_pchlan(&adapter->hw);
+
e1000e_power_up_phy(adapter);
/* report the system wakeup cause from S3/S4 */
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 484774c13c2..8666476cb9b 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -36,7 +36,7 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
static s32 e1000_wait_autoneg(struct e1000_hw *hw);
static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
- u16 *data, bool read);
+ u16 *data, bool read, bool page_set);
static u32 e1000_get_phy_addr_for_hv_page(u32 page);
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
u16 *data, bool read);
@@ -348,6 +348,24 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
}
/**
+ * e1000_set_page_igp - Set page as on IGP-like PHY(s)
+ * @hw: pointer to the HW structure
+ * @page: page to set (shifted left when necessary)
+ *
+ * Sets PHY page required for PHY register access. Assumes semaphore is
+ * already acquired. Note, this function sets phy.addr to 1 so the caller
+ * must set it appropriately (if necessary) after this function returns.
+ **/
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
+{
+ e_dbg("Setting page 0x%x\n", page);
+
+ hw->phy.addr = 1;
+
+ return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
+}
+
+/**
* __e1000e_read_phy_reg_igp - Read igp PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
@@ -519,6 +537,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
@@ -591,6 +610,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
+ e1e_flush();
udelay(2);
@@ -2418,7 +2438,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
- false);
+ false, false);
goto out;
}
@@ -2477,7 +2497,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
- true);
+ true, false);
goto out;
}
@@ -2535,7 +2555,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
- true);
+ true, false);
goto out;
}
@@ -2579,7 +2599,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
- false);
+ false, false);
goto out;
}
@@ -2603,104 +2623,163 @@ out:
}
/**
- * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register
+ * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
* @hw: pointer to the HW structure
- * @offset: register offset to be read or written
- * @data: pointer to the data to read or write
- * @read: determines if operation is read or write
- *
- * Acquires semaphore, if necessary, then reads the PHY register at offset
- * and storing the retrieved information in data. Release any acquired
- * semaphores before exiting. Note that procedure to read the wakeup
- * registers are different. It works as such:
- * 1) Set page 769, register 17, bit 2 = 1
- * 2) Set page to 800 for host (801 if we were manageability)
- * 3) Write the address using the address opcode (0x11)
- * 4) Read or write the data using the data opcode (0x12)
- * 5) Restore 769_17.2 to its original value
+ * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
*
- * Assumes semaphore already acquired.
+ * Assumes semaphore already acquired and phy_reg points to a valid memory
+ * address to store contents of the BM_WUC_ENABLE_REG register.
**/
-static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
- u16 *data, bool read)
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
{
s32 ret_val;
- u16 reg = BM_PHY_REG_NUM(offset);
- u16 phy_reg = 0;
-
- /* Gig must be disabled for MDIO accesses to page 800 */
- if ((hw->mac.type == e1000_pchlan) &&
- (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
- e_dbg("Attempting to access page 800 while gig enabled.\n");
+ u16 temp;
- /* All operations in this function are phy address 1 */
+ /* All page select, port ctrl and wakeup registers use phy address 1 */
hw->phy.addr = 1;
- /* Set page 769 */
- e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
- (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
+ /* Select Port Control Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+ if (ret_val) {
+ e_dbg("Could not set Port Control page\n");
+ goto out;
+ }
- ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg);
+ ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
if (ret_val) {
- e_dbg("Could not read PHY page 769\n");
+ e_dbg("Could not read PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
goto out;
}
- /* First clear bit 4 to avoid a power state change */
- phy_reg &= ~(BM_WUC_HOST_WU_BIT);
- ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+ /*
+ * Enable both PHY wakeup mode and Wakeup register page writes.
+ * Prevent a power state change by disabling ME and Host PHY wakeup.
+ */
+ temp = *phy_reg;
+ temp |= BM_WUC_ENABLE_BIT;
+ temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
+
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp);
if (ret_val) {
- e_dbg("Could not clear PHY page 769 bit 4\n");
+ e_dbg("Could not write PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
goto out;
}
- /* Write bit 2 = 1, and clear bit 4 to 769_17 */
- ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG,
- phy_reg | BM_WUC_ENABLE_BIT);
+ /* Select Host Wakeup Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
+
+ /* caller now able to write registers on the Wakeup registers page */
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
+ * @hw: pointer to the HW structure
+ * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
+ *
+ * Restore BM_WUC_ENABLE_REG to its original value.
+ *
+ * Assumes semaphore already acquired and *phy_reg is the contents of the
+ * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
+ * caller.
+ **/
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+ s32 ret_val = 0;
+
+ /* Select Port Control Registers page */
+ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
if (ret_val) {
- e_dbg("Could not write PHY page 769 bit 2\n");
+ e_dbg("Could not set Port Control page\n");
goto out;
}
- /* Select page 800 */
- ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
- (BM_WUC_PAGE << IGP_PAGE_SHIFT));
+ /* Restore 769.17 to its original value */
+ ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg);
+ if (ret_val)
+ e_dbg("Could not restore PHY register %d.%d\n",
+ BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read or written
+ * @data: pointer to the data to read or write
+ * @read: determines if operation is read or write
+ * @page_set: BM_WUC_PAGE already set and access enabled
+ *
+ * Read the PHY register at offset and store the retrieved information in
+ * data, or write data to PHY register at offset. Note the procedure to
+ * access the PHY wakeup registers is different than reading the other PHY
+ * registers. It works as such:
+ * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1
+ * 2) Set page to 800 for host (801 if we were manageability)
+ * 3) Write the address using the address opcode (0x11)
+ * 4) Read or write the data using the data opcode (0x12)
+ * 5) Restore 769.17.2 to its original value
+ *
+ * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and
+ * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm().
+ *
+ * Assumes semaphore is already acquired. When page_set==true, assumes
+ * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
+ * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()).
+ **/
+static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+ u16 *data, bool read, bool page_set)
+{
+ s32 ret_val;
+ u16 reg = BM_PHY_REG_NUM(offset);
+ u16 page = BM_PHY_REG_PAGE(offset);
+ u16 phy_reg = 0;
+
+ /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */
+ if ((hw->mac.type == e1000_pchlan) &&
+ (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
+ e_dbg("Attempting to access page %d while gig enabled.\n",
+ page);
- /* Write the page 800 offset value using opcode 0x11 */
+ if (!page_set) {
+ /* Enable access to PHY wakeup registers */
+ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+ if (ret_val) {
+ e_dbg("Could not enable PHY wakeup reg access\n");
+ goto out;
+ }
+ }
+
+ e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg);
+
+ /* Write the Wakeup register page offset value using opcode 0x11 */
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
if (ret_val) {
- e_dbg("Could not write address opcode to page 800\n");
+ e_dbg("Could not write address opcode to page %d\n", page);
goto out;
}
if (read) {
- /* Read the page 800 value using opcode 0x12 */
+ /* Read the Wakeup register page value using opcode 0x12 */
ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
data);
} else {
- /* Write the page 800 value using opcode 0x12 */
+ /* Write the Wakeup register page value using opcode 0x12 */
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
*data);
}
if (ret_val) {
- e_dbg("Could not access data value from page 800\n");
+ e_dbg("Could not access PHY reg %d.%d\n", page, reg);
goto out;
}
- /*
- * Restore 769_17.2 to its original value
- * Set page 769
- */
- e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
- (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
-
- /* Clear 769_17.2 */
- ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
- if (ret_val) {
- e_dbg("Could not clear PHY page 769 bit 2\n");
- goto out;
- }
+ if (!page_set)
+ ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
out:
return ret_val;
@@ -2792,11 +2871,12 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
* semaphore before exiting.
**/
static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
- bool locked)
+ bool locked, bool page_set)
{
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
+ u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -2806,8 +2886,8 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
- ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
- data, true);
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+ true, page_set);
goto out;
}
@@ -2817,26 +2897,25 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
goto out;
}
- hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+ if (!page_set) {
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
- if (page == HV_INTC_FC_PAGE_START)
- page = 0;
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_set_page_igp(hw,
+ (page << IGP_PAGE_SHIFT));
- if (reg > MAX_PHY_MULTI_PAGE_REG) {
- u32 phy_addr = hw->phy.addr;
+ hw->phy.addr = phy_addr;
- hw->phy.addr = 1;
-
- /* Page is shifted left, PHY expects (page x 32) */
- ret_val = e1000e_write_phy_reg_mdic(hw,
- IGP01E1000_PHY_PAGE_SELECT,
- (page << IGP_PAGE_SHIFT));
- hw->phy.addr = phy_addr;
-
- if (ret_val)
- goto out;
+ if (ret_val)
+ goto out;
+ }
}
+ e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+ page << IGP_PAGE_SHIFT, reg);
+
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
out:
@@ -2858,7 +2937,7 @@ out:
**/
s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
{
- return __e1000_read_phy_reg_hv(hw, offset, data, false);
+ return __e1000_read_phy_reg_hv(hw, offset, data, false, false);
}
/**
@@ -2872,7 +2951,21 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
**/
s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
{
- return __e1000_read_phy_reg_hv(hw, offset, data, true);
+ return __e1000_read_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ * e1000_read_phy_reg_page_hv - Read HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired and page already set.
+ **/
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_hv(hw, offset, data, true, true);
}
/**
@@ -2886,11 +2979,12 @@ s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
* at the offset. Release any acquired semaphores before exiting.
**/
static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
- bool locked)
+ bool locked, bool page_set)
{
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
+ u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@@ -2900,8 +2994,8 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
- ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
- &data, false);
+ ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+ false, page_set);
goto out;
}
@@ -2911,42 +3005,41 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
goto out;
}
- hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
-
- if (page == HV_INTC_FC_PAGE_START)
- page = 0;
-
- /*
- * Workaround MDIO accesses being disabled after entering IEEE Power
- * Down (whenever bit 11 of the PHY Control register is set)
- */
- if ((hw->phy.type == e1000_phy_82578) &&
- (hw->phy.revision >= 1) &&
- (hw->phy.addr == 2) &&
- ((MAX_PHY_REG_ADDRESS & reg) == 0) &&
- (data & (1 << 11))) {
- u16 data2 = 0x7EFF;
- ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3,
- &data2, false);
- if (ret_val)
- goto out;
- }
+ if (!page_set) {
+ if (page == HV_INTC_FC_PAGE_START)
+ page = 0;
- if (reg > MAX_PHY_MULTI_PAGE_REG) {
- u32 phy_addr = hw->phy.addr;
+ /*
+ * Workaround MDIO accesses being disabled after entering IEEE
+ * Power Down (when bit 11 of the PHY Control register is set)
+ */
+ if ((hw->phy.type == e1000_phy_82578) &&
+ (hw->phy.revision >= 1) &&
+ (hw->phy.addr == 2) &&
+ ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) {
+ u16 data2 = 0x7EFF;
+ ret_val = e1000_access_phy_debug_regs_hv(hw,
+ (1 << 6) | 0x3,
+ &data2, false);
+ if (ret_val)
+ goto out;
+ }
- hw->phy.addr = 1;
+ if (reg > MAX_PHY_MULTI_PAGE_REG) {
+ /* Page is shifted left, PHY expects (page x 32) */
+ ret_val = e1000_set_page_igp(hw,
+ (page << IGP_PAGE_SHIFT));
- /* Page is shifted left, PHY expects (page x 32) */
- ret_val = e1000e_write_phy_reg_mdic(hw,
- IGP01E1000_PHY_PAGE_SELECT,
- (page << IGP_PAGE_SHIFT));
- hw->phy.addr = phy_addr;
+ hw->phy.addr = phy_addr;
- if (ret_val)
- goto out;
+ if (ret_val)
+ goto out;
+ }
}
+ e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+ page << IGP_PAGE_SHIFT, reg);
+
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
@@ -2968,7 +3061,7 @@ out:
**/
s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
{
- return __e1000_write_phy_reg_hv(hw, offset, data, false);
+ return __e1000_write_phy_reg_hv(hw, offset, data, false, false);
}
/**
@@ -2982,7 +3075,21 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
**/
s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
{
- return __e1000_write_phy_reg_hv(hw, offset, data, true);
+ return __e1000_write_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ * e1000_write_phy_reg_page_hv - Write HV PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset. Assumes semaphore
+ * already acquired and page already set.
+ **/
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_hv(hw, offset, data, true, true);
}
/**
@@ -3004,11 +3111,12 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
* @hw: pointer to the HW structure
* @offset: register offset to be read or written
* @data: pointer to the data to be read or written
- * @read: determines if operation is read or written
+ * @read: determines if operation is read or write
*
* Reads the PHY register at offset and stores the retreived information
* in data. Assumes semaphore already acquired. Note that the procedure
- * to read these regs uses the address port and data port to read/write.
+ * to access these regs uses the address port and data port to read/write.
+ * These accesses done with PHY address 2 and without using pages.
**/
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
u16 *data, bool read)
@@ -3028,7 +3136,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
/* masking with 0x3F to remove the page from offset */
ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
if (ret_val) {
- e_dbg("Could not write PHY the HV address register\n");
+ e_dbg("Could not write the Address Offset port register\n");
goto out;
}
@@ -3039,7 +3147,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
if (ret_val) {
- e_dbg("Could not read data value from HV data register\n");
+ e_dbg("Could not access the Data port register\n");
goto out;
}
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index 94ec973b2bd..d50a9998ae7 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -44,6 +44,7 @@ static const char version[] =
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/io.h>
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 48ee51bb9e5..a19228563ef 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -365,7 +365,7 @@ static int __init do_express_probe(struct net_device *dev)
dev->irq = mca_irqmap[(pos1>>4)&0x7];
/*
- * XXX: Transciever selection is done
+ * XXX: Transceiver selection is done
* differently on the MCA version.
* How to get it to select something
* other than external/AUI is currently
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 6c7257bd73f..7dd5e6a0d99 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -457,7 +457,6 @@ struct ehea_port {
struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
struct platform_device ofdev; /* Open Firmware Device */
struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
- struct vlan_group *vgrp;
struct ehea_eq *qp_eq;
struct work_struct reset_task;
struct mutex port_lock;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 3fd5a240034..be2cb4ab8b4 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -683,24 +683,13 @@ static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
struct sk_buff *skb)
{
- int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
- pr->port->vgrp);
-
- if (skb->dev->features & NETIF_F_LRO) {
- if (vlan_extracted)
- lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
- pr->port->vgrp,
- cqe->vlan_tag,
- cqe);
- else
- lro_receive_skb(&pr->lro_mgr, skb, cqe);
- } else {
- if (vlan_extracted)
- vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
- cqe->vlan_tag);
- else
- netif_receive_skb(skb);
- }
+ if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
+ __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
+
+ if (skb->dev->features & NETIF_F_LRO)
+ lro_receive_skb(&pr->lro_mgr, skb, cqe);
+ else
+ netif_receive_skb(skb);
}
static int ehea_proc_rwqes(struct net_device *dev,
@@ -2339,32 +2328,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void ehea_vlan_rx_register(struct net_device *dev,
- struct vlan_group *grp)
-{
- struct ehea_port *port = netdev_priv(dev);
- struct ehea_adapter *adapter = port->adapter;
- struct hcp_ehea_port_cb1 *cb1;
- u64 hret;
-
- port->vgrp = grp;
-
- cb1 = (void *)get_zeroed_page(GFP_KERNEL);
- if (!cb1) {
- pr_err("no mem for cb1\n");
- goto out;
- }
-
- hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
- H_PORT_CB1, H_PORT_CB1_ALL, cb1);
- if (hret != H_SUCCESS)
- pr_err("modify_ehea_port failed\n");
-
- free_page((unsigned long)cb1);
-out:
- return;
-}
-
static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct ehea_port *port = netdev_priv(dev);
@@ -2406,8 +2369,6 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
int index;
u64 hret;
- vlan_group_set_device(port->vgrp, vid, NULL);
-
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
pr_err("no mem for cb1\n");
@@ -3202,7 +3163,6 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = ehea_set_multicast_list,
.ndo_change_mtu = ehea_change_mtu,
- .ndo_vlan_rx_register = ehea_vlan_rx_register,
.ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
.ndo_tx_timeout = ehea_tx_watchdog,
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index cd44bb8017d..95b9f4fa811 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -331,7 +331,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
unsigned long flags;
spin_lock_irqsave(&eq->spinlock, flags);
- eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+ eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
spin_unlock_irqrestore(&eq->spinlock, flags);
return eqe;
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 38b351c7b97..ce76d9a8ca6 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.13"
+#define DRV_VERSION "2.1.1.24"
#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -74,6 +74,7 @@ struct enic {
struct vnic_dev *vdev;
struct timer_list notify_timer;
struct work_struct reset;
+ struct work_struct change_mtu_work;
struct msix_entry msix_entry[ENIC_INTR_MAX];
struct enic_msix_entry msix[ENIC_INTR_MAX];
u32 msg_enable;
@@ -93,7 +94,6 @@ struct enic {
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
spinlock_t wq_lock[ENIC_WQ_MAX];
unsigned int wq_count;
- struct vlan_group *vlan_group;
u16 loop_enable;
u16 loop_tag;
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
index 90687b14e60..fd6247b3c0e 100644
--- a/drivers/net/enic/enic_dev.c
+++ b/drivers/net/enic/enic_dev.c
@@ -166,6 +166,17 @@ int enic_dev_disable(struct enic *enic)
return err;
}
+int enic_dev_intr_coal_timer_info(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_intr_coal_timer_info(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
int enic_vnic_dev_deinit(struct enic *enic)
{
int err;
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
index d5f68133762..ff8e87fdfc1 100644
--- a/drivers/net/enic/enic_dev.h
+++ b/drivers/net/enic/enic_dev.h
@@ -34,6 +34,7 @@ int enic_dev_hang_notify(struct enic *enic);
int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
int enic_dev_enable(struct enic *enic);
int enic_dev_disable(struct enic *enic);
+int enic_dev_intr_coal_timer_info(struct enic *enic);
int enic_vnic_dev_deinit(struct enic *enic);
int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp);
int enic_dev_deinit_done(struct enic *enic, int *status);
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 2f433fbfca0..67a27cd304d 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -23,6 +23,7 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -152,12 +153,12 @@ static inline unsigned int enic_legacy_notify_intr(void)
static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
{
- return rq;
+ return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
}
static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
{
- return enic->rq_count + wq;
+ return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
}
static inline unsigned int enic_msix_err_intr(struct enic *enic)
@@ -283,12 +284,10 @@ static int enic_set_coalesce(struct net_device *netdev,
u32 rx_coalesce_usecs;
unsigned int i, intr;
- tx_coalesce_usecs = min_t(u32,
- INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
- ecmd->tx_coalesce_usecs);
- rx_coalesce_usecs = min_t(u32,
- INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
- ecmd->rx_coalesce_usecs);
+ tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
@@ -297,26 +296,26 @@ static int enic_set_coalesce(struct net_device *netdev,
intr = enic_legacy_io_intr();
vnic_intr_coalescing_timer_set(&enic->intr[intr],
- INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ tx_coalesce_usecs);
break;
case VNIC_DEV_INTR_MODE_MSI:
if (tx_coalesce_usecs != rx_coalesce_usecs)
return -EINVAL;
vnic_intr_coalescing_timer_set(&enic->intr[0],
- INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ tx_coalesce_usecs);
break;
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < enic->wq_count; i++) {
intr = enic_msix_wq_intr(enic, i);
vnic_intr_coalescing_timer_set(&enic->intr[intr],
- INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ tx_coalesce_usecs);
}
for (i = 0; i < enic->rq_count; i++) {
intr = enic_msix_rq_intr(enic, i);
vnic_intr_coalescing_timer_set(&enic->intr[intr],
- INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
+ rx_coalesce_usecs);
}
break;
@@ -423,11 +422,18 @@ static void enic_mtu_check(struct enic *enic)
if (mtu && mtu != enic->port_mtu) {
enic->port_mtu = mtu;
- if (mtu < netdev->mtu)
- netdev_warn(netdev,
- "interface MTU (%d) set higher "
- "than switch port MTU (%d)\n",
- netdev->mtu, mtu);
+ if (enic_is_dynamic(enic)) {
+ mtu = max_t(int, ENIC_MIN_MTU,
+ min_t(int, ENIC_MAX_MTU, mtu));
+ if (mtu != netdev->mtu)
+ schedule_work(&enic->change_mtu_work);
+ } else {
+ if (mtu < netdev->mtu)
+ netdev_warn(netdev,
+ "interface MTU (%d) set higher "
+ "than switch port MTU (%d)\n",
+ netdev->mtu, mtu);
+ }
}
}
@@ -793,10 +799,10 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
}
/* dev_base_lock rwlock held, nominally process context */
-static struct net_device_stats *enic_get_stats(struct net_device *netdev)
+static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *net_stats)
{
struct enic *enic = netdev_priv(netdev);
- struct net_device_stats *net_stats = &netdev->stats;
struct vnic_stats *stats;
enic_dev_stats_dump(enic, &stats);
@@ -1023,14 +1029,6 @@ static void enic_set_rx_mode(struct net_device *netdev)
}
}
-/* rtnl lock is held */
-static void enic_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *vlan_group)
-{
- struct enic *enic = netdev_priv(netdev);
- enic->vlan_group = vlan_group;
-}
-
/* netif_tx_lock held, BHs disabled */
static void enic_tx_timeout(struct net_device *netdev)
{
@@ -1258,24 +1256,13 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb->dev = netdev;
- if (enic->vlan_group && vlan_stripped &&
- (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) {
-
- if (netdev->features & NETIF_F_GRO)
- vlan_gro_receive(&enic->napi[q_number],
- enic->vlan_group, vlan_tci, skb);
- else
- vlan_hwaccel_receive_skb(skb,
- enic->vlan_group, vlan_tci);
-
- } else {
+ if (vlan_stripped)
+ __vlan_hwaccel_put_tag(skb, vlan_tci);
- if (netdev->features & NETIF_F_GRO)
- napi_gro_receive(&enic->napi[q_number], skb);
- else
- netif_receive_skb(skb);
-
- }
+ if (netdev->features & NETIF_F_GRO)
+ napi_gro_receive(&enic->napi[q_number], skb);
+ else
+ netif_receive_skb(skb);
} else {
/* Buffer overflow
@@ -1560,7 +1547,7 @@ static void enic_notify_timer_start(struct enic *enic)
default:
/* Using intr for notification for INTx/MSI-X */
break;
- };
+ }
}
/* rtnl lock is held, process context */
@@ -1688,6 +1675,9 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
return -EINVAL;
+ if (enic_is_dynamic(enic))
+ return -EOPNOTSUPP;
+
if (running)
enic_stop(netdev);
@@ -1704,6 +1694,55 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+static void enic_change_mtu_work(struct work_struct *work)
+{
+ struct enic *enic = container_of(work, struct enic, change_mtu_work);
+ struct net_device *netdev = enic->netdev;
+ int new_mtu = vnic_dev_mtu(enic->vdev);
+ int err;
+ unsigned int i;
+
+ new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
+
+ rtnl_lock();
+
+ /* Stop RQ */
+ del_timer_sync(&enic->notify_timer);
+
+ for (i = 0; i < enic->rq_count; i++)
+ napi_disable(&enic->napi[i]);
+
+ vnic_intr_mask(&enic->intr[0]);
+ enic_synchronize_irqs(enic);
+ err = vnic_rq_disable(&enic->rq[0]);
+ if (err) {
+ netdev_err(netdev, "Unable to disable RQ.\n");
+ return;
+ }
+ vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
+ vnic_cq_clean(&enic->cq[0]);
+ vnic_intr_clean(&enic->intr[0]);
+
+ /* Fill RQ with new_mtu-sized buffers */
+ netdev->mtu = new_mtu;
+ vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ /* Need at least one buffer on ring to get going */
+ if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
+ netdev_err(netdev, "Unable to alloc receive buffers.\n");
+ return;
+ }
+
+ /* Start RQ */
+ vnic_rq_enable(&enic->rq[0]);
+ napi_enable(&enic->napi[0]);
+ vnic_intr_unmask(&enic->intr[0]);
+ enic_notify_timer_start(enic);
+
+ rtnl_unlock();
+
+ netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void enic_poll_controller(struct net_device *netdev)
{
@@ -1718,8 +1757,12 @@ static void enic_poll_controller(struct net_device *netdev)
enic_isr_msix_rq(enic->msix_entry[intr].vector,
&enic->napi[i]);
}
- intr = enic_msix_wq_intr(enic, i);
- enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
+
+ for (i = 0; i < enic->wq_count; i++) {
+ intr = enic_msix_wq_intr(enic, i);
+ enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
+ }
+
break;
case VNIC_DEV_INTR_MODE_MSI:
enic_isr_msi(enic->pdev->irq, enic);
@@ -2057,13 +2100,12 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_open = enic_open,
.ndo_stop = enic_stop,
.ndo_start_xmit = enic_hard_start_xmit,
- .ndo_get_stats = enic_get_stats,
+ .ndo_get_stats64 = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = enic_set_rx_mode,
.ndo_set_multicast_list = enic_set_rx_mode,
.ndo_set_mac_address = enic_set_mac_address_dynamic,
.ndo_change_mtu = enic_change_mtu,
- .ndo_vlan_rx_register = enic_vlan_rx_register,
.ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
.ndo_tx_timeout = enic_tx_timeout,
@@ -2079,13 +2121,12 @@ static const struct net_device_ops enic_netdev_ops = {
.ndo_open = enic_open,
.ndo_stop = enic_stop,
.ndo_start_xmit = enic_hard_start_xmit,
- .ndo_get_stats = enic_get_stats,
+ .ndo_get_stats64 = enic_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = enic_set_mac_address,
.ndo_set_rx_mode = enic_set_rx_mode,
.ndo_set_multicast_list = enic_set_rx_mode,
.ndo_change_mtu = enic_change_mtu,
- .ndo_vlan_rx_register = enic_vlan_rx_register,
.ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
.ndo_tx_timeout = enic_tx_timeout,
@@ -2112,6 +2153,14 @@ static int enic_dev_init(struct enic *enic)
unsigned int i;
int err;
+ /* Get interrupt coalesce timer info */
+ err = enic_dev_intr_coal_timer_info(enic);
+ if (err) {
+ dev_warn(dev, "Using default conversion factor for "
+ "interrupt coalesce timer\n");
+ vnic_dev_intr_coal_timer_info_default(enic->vdev);
+ }
+
/* Get vNIC configuration
*/
@@ -2345,6 +2394,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
enic->notify_timer.data = (unsigned long)enic;
INIT_WORK(&enic->reset, enic_reset);
+ INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
for (i = 0; i < enic->wq_count; i++)
spin_lock_init(&enic->wq_lock[i]);
@@ -2427,6 +2477,7 @@ static void __devexit enic_remove(struct pci_dev *pdev)
struct enic *enic = netdev_priv(netdev);
cancel_work_sync(&enic->reset);
+ cancel_work_sync(&enic->change_mtu_work);
unregister_netdev(netdev);
enic_dev_deinit(enic);
vnic_dev_close(enic->vdev);
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 6e5c6356e7d..4a35367de79 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -90,18 +90,30 @@ int enic_get_vnic_config(struct enic *enic)
max_t(u16, ENIC_MIN_MTU,
c->mtu));
- c->intr_timer_usec = min_t(u32,
- INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
- c->intr_timer_usec);
+ c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
dev_info(enic_get_dev(enic),
"vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
- dev_info(enic_get_dev(enic), "vNIC csum tx/rx %d/%d "
- "tso %d intr timer %d usec rss %d\n",
- ENIC_SETTING(enic, TXCSUM), ENIC_SETTING(enic, RXCSUM),
- ENIC_SETTING(enic, TSO),
- c->intr_timer_usec, ENIC_SETTING(enic, RSS));
+
+ dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
+ "tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
+ "loopback tag 0x%04x\n",
+ ENIC_SETTING(enic, TXCSUM) ? "yes" : "no",
+ ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
+ ENIC_SETTING(enic, TSO) ? "yes" : "no",
+ ENIC_SETTING(enic, LRO) ? "yes" : "no",
+ ENIC_SETTING(enic, RSS) ? "yes" : "no",
+ c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
+ c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
+ c->intr_mode == VENET_INTR_MODE_ANY ? "any" :
+ "unknown",
+ c->intr_timer_type == VENET_INTR_TYPE_MIN ? "min" :
+ c->intr_timer_type == VENET_INTR_TYPE_IDLE ? "idle" :
+ "unknown",
+ c->intr_timer_usec,
+ c->loop_tag);
return 0;
}
@@ -290,7 +302,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->intr_count; i++) {
vnic_intr_init(&enic->intr[i],
- INTR_COALESCE_USEC_TO_HW(enic->config.intr_timer_usec),
+ enic->config.intr_timer_usec,
enic->config.intr_timer_type,
mask_on_assertion);
}
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c
index b86d6ef8dad..0daa1c7073c 100644
--- a/drivers/net/enic/vnic_cq.c
+++ b/drivers/net/enic/vnic_cq.c
@@ -74,6 +74,8 @@ void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
+
+ cq->interrupt_offset = interrupt_offset;
}
void vnic_cq_clean(struct vnic_cq *cq)
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h
index 552d3daf250..579315cbe80 100644
--- a/drivers/net/enic/vnic_cq.h
+++ b/drivers/net/enic/vnic_cq.h
@@ -57,6 +57,7 @@ struct vnic_cq {
struct vnic_dev_ring ring;
unsigned int to_clean;
unsigned int last_color;
+ unsigned int interrupt_offset;
};
static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 68f24ae860a..8c4c8cf486f 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -40,6 +40,12 @@ struct vnic_res {
unsigned int count;
};
+struct vnic_intr_coal_timer_info {
+ u32 mul;
+ u32 div;
+ u32 max_usec;
+};
+
struct vnic_dev {
void *priv;
struct pci_dev *pdev;
@@ -58,6 +64,7 @@ struct vnic_dev {
enum vnic_proxy_type proxy;
u32 proxy_index;
u64 args[VNIC_DEVCMD_NARGS];
+ struct vnic_intr_coal_timer_info intr_coal_timer_info;
};
#define VNIC_MAX_RES_HDR_SIZE \
@@ -794,6 +801,42 @@ int vnic_dev_deinit(struct vnic_dev *vdev)
return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
}
+void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
+{
+ /* Default: hardware intr coal timer is in units of 1.5 usecs */
+ vdev->intr_coal_timer_info.mul = 2;
+ vdev->intr_coal_timer_info.div = 3;
+ vdev->intr_coal_timer_info.max_usec =
+ vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
+}
+
+int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
+{
+ int wait = 1000;
+ int err;
+
+ memset(vdev->args, 0, sizeof(vdev->args));
+
+ err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
+
+ /* Use defaults when firmware doesn't support the devcmd at all or
+ * supports it for only specific hardware
+ */
+ if ((err == ERR_ECMDUNKNOWN) ||
+ (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
+ pr_warning("Using default conversion factor for "
+ "interrupt coalesce timer\n");
+ vnic_dev_intr_coal_timer_info_default(vdev);
+ return 0;
+ }
+
+ vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
+ vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
+ vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
+
+ return err;
+}
+
int vnic_dev_link_status(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
@@ -838,6 +881,23 @@ enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
return vdev->intr_mode;
}
+u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
+{
+ return (usec * vdev->intr_coal_timer_info.mul) /
+ vdev->intr_coal_timer_info.div;
+}
+
+u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
+{
+ return (hw_cycles * vdev->intr_coal_timer_info.div) /
+ vdev->intr_coal_timer_info.mul;
+}
+
+u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
+{
+ return vdev->intr_coal_timer_info.max_usec;
+}
+
void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index cf482a2c9dd..852b698fbe7 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -109,11 +109,16 @@ int vnic_dev_open(struct vnic_dev *vdev, int arg);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
int vnic_dev_init(struct vnic_dev *vdev, int arg);
int vnic_dev_deinit(struct vnic_dev *vdev);
+void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev);
+int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev);
int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode);
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
+u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec);
+u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles);
+u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev);
void vnic_dev_unregister(struct vnic_dev *vdev);
int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
u8 ig_vlan_rewrite_mode);
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index c5569bfb47a..8025e8808d6 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -318,6 +318,25 @@ enum vnic_devcmd_cmd {
* ERR_EINPROGRESS - command in a0 is still in progress
*/
CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49),
+
+ /*
+ * Returns interrupt coalescing timer conversion factors.
+ * After calling this devcmd, ENIC driver can convert
+ * interrupt coalescing timer in usec into CPU cycles as follows:
+ *
+ * intr_timer_cycles = intr_timer_usec * multiplier / divisor
+ *
+ * Interrupt coalescing timer in usecs can be obtained from
+ * CPU cycles as follows:
+ *
+ * intr_timer_usec = intr_timer_cycles * divisor / multiplier
+ *
+ * in: none
+ * out: (u32)a0 = multiplier
+ * (u32)a1 = divisor
+ * (u32)a2 = maximum timer value in usec
+ */
+ CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
};
/* CMD_ENABLE2 flags */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index e8740e3704e..609542848e0 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -20,10 +20,6 @@
#ifndef _VNIC_ENIC_H_
#define _VNIC_ENIC_H_
-/* Hardware intr coalesce timer is in units of 1.5us */
-#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2/3)
-#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3/2)
-
/* Device-specific region: enet configuration */
struct vnic_enet_config {
u32 flags;
@@ -51,4 +47,11 @@ struct vnic_enet_config {
#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
#define VENETF_LOOP 0x800 /* Loopback enabled */
+#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */
+#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */
+
+#define VENET_INTR_MODE_ANY 0 /* Try MSI-X, then MSI, then INTx */
+#define VENET_INTR_MODE_MSI 1 /* Try MSI then INTx */
+#define VENET_INTR_MODE_INTX 2 /* Try INTx only */
+
#endif /* _VNIC_ENIC_H_ */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 3873771d75c..0ca107f7bc8 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -46,7 +46,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
return 0;
}
-void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
vnic_intr_coalescing_timer_set(intr, coalescing_timer);
@@ -56,9 +56,10 @@ void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
}
void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
- unsigned int coalescing_timer)
+ u32 coalescing_timer)
{
- iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+ iowrite32(vnic_dev_intr_coal_timer_usec_to_hw(intr->vdev,
+ coalescing_timer), &intr->ctrl->coalescing_timer);
}
void vnic_intr_clean(struct vnic_intr *intr)
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index 09dc0b73ff4..2b163639229 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -24,8 +24,6 @@
#include "vnic_dev.h"
-#define VNIC_INTR_TIMER_MAX 0xffff
-
#define VNIC_INTR_TIMER_TYPE_ABS 0
#define VNIC_INTR_TIMER_TYPE_QUIET 1
@@ -104,10 +102,10 @@ static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
void vnic_intr_free(struct vnic_intr *intr);
int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index);
-void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
+void vnic_intr_init(struct vnic_intr *intr, u32 coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion);
void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
- unsigned int coalescing_timer);
+ u32 coalescing_timer);
void vnic_intr_clean(struct vnic_intr *intr);
#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index c353bf3113c..814c187d5f9 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -391,13 +391,13 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_iounmap;
- ep->tx_ring = (struct epic_tx_desc *)ring_space;
+ ep->tx_ring = ring_space;
ep->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
- ep->rx_ring = (struct epic_rx_desc *)ring_space;
+ ep->rx_ring = ring_space;
ep->rx_ring_dma = ring_dma;
if (dev->mem_start) {
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 0ba5e7b9058..7a09575ecff 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -54,6 +54,7 @@ static const char version[] =
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a83dd312c3a..8abbe1d8282 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -11,8 +11,10 @@
* Written by Thierry Reding <thierry.reding@avionic-design.de>
*/
+#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mii.h>
#include <linux/phy.h>
@@ -874,6 +876,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
spin_unlock_irq(&priv->lock);
+ skb_tx_timestamp(skb);
out:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -965,7 +968,7 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
priv = netdev_priv(netdev);
priv->netdev = netdev;
priv->dma_alloc = 0;
- priv->io_region_size = mmio->end - mmio->start + 1;
+ priv->io_region_size = resource_size(mmio);
priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
resource_size(mmio));
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index b5f6173130f..05a5f71451a 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -1008,15 +1008,13 @@ static int ewrk3_rx(struct net_device *dev)
}
}
p = skb->data; /* Look at the dest addr */
- if (p[0] & 0x01) { /* Multicast/Broadcast */
- if ((*(s16 *) & p[0] == -1) && (*(s16 *) & p[2] == -1) && (*(s16 *) & p[4] == -1)) {
+ if (is_multicast_ether_addr(p)) {
+ if (is_broadcast_ether_addr(p)) {
lp->pktStats.broadcast++;
} else {
lp->pktStats.multicast++;
}
- } else if ((*(s16 *) & p[0] == *(s16 *) & dev->dev_addr[0]) &&
- (*(s16 *) & p[2] == *(s16 *) & dev->dev_addr[2]) &&
- (*(s16 *) & p[4] == *(s16 *) & dev->dev_addr[4])) {
+ } else if (compare_ether_addr(p, dev->dev_addr) == 0) {
lp->pktStats.unicast++;
}
lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
@@ -1171,7 +1169,7 @@ static void SetMulticastFilter(struct net_device *dev)
struct netdev_hw_addr *ha;
u_long iobase = dev->base_addr;
int i;
- char *addrs, bit, byte;
+ char bit, byte;
short __iomem *p = lp->mctbl;
u16 hashcode;
u32 crc;
@@ -1213,25 +1211,22 @@ static void SetMulticastFilter(struct net_device *dev)
/* Update table */
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
- if ((*addrs & 0x01) == 1) { /* multicast address? */
- crc = ether_crc_le(ETH_ALEN, addrs);
- hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
- byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
- bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
+ byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
+ bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
- if (lp->shmem_length == IO_ONLY) {
- u_char tmp;
+ if (lp->shmem_length == IO_ONLY) {
+ u_char tmp;
- outw(PAGE0_HTE + byte, EWRK3_PIR1);
- tmp = inb(EWRK3_DATA);
- tmp |= bit;
- outw(PAGE0_HTE + byte, EWRK3_PIR1);
- outb(tmp, EWRK3_DATA);
- } else {
- writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte);
- }
+ outw(PAGE0_HTE + byte, EWRK3_PIR1);
+ tmp = inb(EWRK3_DATA);
+ tmp |= bit;
+ outw(PAGE0_HTE + byte, EWRK3_PIR1);
+ outb(tmp, EWRK3_DATA);
+ } else {
+ writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte);
}
}
}
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index dd54abe2f71..fa8677c3238 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -566,7 +566,7 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
err = -ENOMEM;
goto err_out_free_dev;
}
- np->rx_ring = (struct fealnx_desc *)ring_space;
+ np->rx_ring = ring_space;
np->rx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
@@ -574,7 +574,7 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
err = -ENOMEM;
goto err_out_free_rx;
}
- np->tx_ring = (struct fealnx_desc *)ring_space;
+ np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
/* find the connected MII xcvrs */
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 885d8baff7d..e8266ccf818 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -44,6 +44,10 @@
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
#include <asm/cacheflush.h>
@@ -66,17 +70,42 @@
#define FEC_QUIRK_ENET_MAC (1 << 0)
/* Controller needs driver to swap frame */
#define FEC_QUIRK_SWAP_FRAME (1 << 1)
+/* Controller uses gasket */
+#define FEC_QUIRK_USE_GASKET (1 << 2)
static struct platform_device_id fec_devtype[] = {
{
+ /* keep it for coldfire */
.name = DRIVER_NAME,
.driver_data = 0,
}, {
+ .name = "imx25-fec",
+ .driver_data = FEC_QUIRK_USE_GASKET,
+ }, {
+ .name = "imx27-fec",
+ .driver_data = 0,
+ }, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
- },
- { }
+ }, {
+ /* sentinel */
+ }
};
+MODULE_DEVICE_TABLE(platform, fec_devtype);
+
+enum imx_fec_type {
+ IMX25_FEC = 1, /* runs on i.mx25/50/53 */
+ IMX27_FEC, /* runs on i.mx27/35/51 */
+ IMX28_FEC,
+};
+
+static const struct of_device_id fec_dt_ids[] = {
+ { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
+ { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
+ { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fec_dt_ids);
static unsigned char macaddr[ETH_ALEN];
module_param_array(macaddr, byte, NULL, 0);
@@ -324,6 +353,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
fep->cur_tx = bdp;
+ skb_tx_timestamp(skb);
+
spin_unlock_irqrestore(&fep->hw_lock, flags);
return NETDEV_TX_OK;
@@ -425,7 +456,7 @@ fec_restart(struct net_device *ndev, int duplex)
} else {
#ifdef FEC_MIIGSK_ENR
- if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+ if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
/* disable the gasket and wait */
writel(0, fep->hwp + FEC_MIIGSK_ENR);
while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
@@ -434,8 +465,11 @@ fec_restart(struct net_device *ndev, int duplex)
/*
* configure the gasket:
* RMII, 50 MHz, no loopback, no echo
+ * MII, 25 MHz, no loopback, no echo
*/
- writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+ writel((fep->phy_interface == PHY_INTERFACE_MODE_RMII) ?
+ 1 : 0, fep->hwp + FEC_MIIGSK_CFGR);
+
/* re-enable the gasket */
writel(2, fep->hwp + FEC_MIIGSK_ENR);
@@ -650,7 +684,8 @@ fec_enet_rx(struct net_device *ndev)
skb_put(skb, pkt_len - 4); /* Make room */
skb_copy_to_linear_data(skb, data, pkt_len - 4);
skb->protocol = eth_type_trans(skb, ndev);
- netif_rx(skb);
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
}
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
@@ -731,8 +766,22 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
*/
iap = macaddr;
+#ifdef CONFIG_OF
/*
- * 2) from flash or fuse (via platform data)
+ * 2) from device tree data
+ */
+ if (!is_valid_ether_addr(iap)) {
+ struct device_node *np = fep->pdev->dev.of_node;
+ if (np) {
+ const char *mac = of_get_mac_address(np);
+ if (mac)
+ iap = (unsigned char *) mac;
+ }
+ }
+#endif
+
+ /*
+ * 3) from flash or fuse (via platform data)
*/
if (!is_valid_ether_addr(iap)) {
#ifdef CONFIG_M5272
@@ -745,7 +794,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
}
/*
- * 3) FEC mac registers set by bootloader
+ * 4) FEC mac registers set by bootloader
*/
if (!is_valid_ether_addr(iap)) {
*((unsigned long *) &tmpaddr[0]) =
@@ -1224,10 +1273,6 @@ static void set_multicast_list(struct net_device *ndev)
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
netdev_for_each_mc_addr(ha, ndev) {
- /* Only support group multicast for now */
- if (!(ha->addr[0] & 1))
- continue;
-
/* calculate crc32 value of mac address */
crc = 0xffffffff;
@@ -1355,6 +1400,52 @@ static int fec_enet_init(struct net_device *ndev)
return 0;
}
+#ifdef CONFIG_OF
+static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (np)
+ return of_get_phy_mode(np);
+
+ return -ENODEV;
+}
+
+static int __devinit fec_reset_phy(struct platform_device *pdev)
+{
+ int err, phy_reset;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+ err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset");
+ if (err) {
+ pr_warn("FEC: failed to get gpio phy-reset: %d\n", err);
+ return err;
+ }
+ msleep(1);
+ gpio_set_value(phy_reset, 1);
+
+ return 0;
+}
+#else /* CONFIG_OF */
+static inline int fec_get_phy_mode_dt(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+static inline int fec_reset_phy(struct platform_device *pdev)
+{
+ /*
+ * In case of platform probe, the reset has been done
+ * by machine code.
+ */
+ return 0;
+}
+#endif /* CONFIG_OF */
+
static int __devinit
fec_probe(struct platform_device *pdev)
{
@@ -1363,6 +1454,11 @@ fec_probe(struct platform_device *pdev)
struct net_device *ndev;
int i, irq, ret = 0;
struct resource *r;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(fec_dt_ids, &pdev->dev);
+ if (of_id)
+ pdev->id_entry = of_id->data;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
@@ -1394,9 +1490,18 @@ fec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
- pdata = pdev->dev.platform_data;
- if (pdata)
- fep->phy_interface = pdata->phy;
+ ret = fec_get_phy_mode_dt(pdev);
+ if (ret < 0) {
+ pdata = pdev->dev.platform_data;
+ if (pdata)
+ fep->phy_interface = pdata->phy;
+ else
+ fep->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ fep->phy_interface = ret;
+ }
+
+ fec_reset_phy(pdev);
/* This device has up to three irqs on some platforms */
for (i = 0; i < 3; i++) {
@@ -1531,6 +1636,7 @@ static struct platform_driver fec_driver = {
#ifdef CONFIG_PM
.pm = &fec_pm_ops,
#endif
+ .of_match_table = fec_dt_ids,
},
.id_table = fec_devtype,
.probe = fec_probe,
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 9f81b1ac130..cb4416e591f 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -14,6 +14,7 @@
*
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -22,6 +23,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/crc32.h>
#include <linux/hardirq.h>
#include <linux/delay.h>
@@ -335,6 +337,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, skb->len,
DMA_TO_DEVICE);
+ skb_tx_timestamp(skb);
bcom_submit_next_buffer(priv->tx_dmatsk, skb);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -434,7 +437,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
length = status & BCOM_FEC_RX_BD_LEN_MASK;
skb_put(rskb, length - 4); /* length without CRC32 */
rskb->protocol = eth_type_trans(rskb, dev);
- netif_rx(rskb);
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(rskb);
spin_lock(&priv->lock);
}
@@ -867,10 +871,11 @@ static int __devinit mpc52xx_fec_probe(struct platform_device *op)
"Error while parsing device node resource\n" );
goto err_netdev;
}
- if ((mem.end - mem.start + 1) < sizeof(struct mpc52xx_fec)) {
+ if (resource_size(&mem) < sizeof(struct mpc52xx_fec)) {
printk(KERN_ERR DRIVER_NAME
- " - invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
- (unsigned long)(mem.end - mem.start + 1), sizeof(struct mpc52xx_fec));
+ " - invalid resource size (%lx < %x), check mpc52xx_devices.c\n",
+ (unsigned long)resource_size(&mem),
+ sizeof(struct mpc52xx_fec));
rv = -EINVAL;
goto err_netdev;
}
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 537b6957bb7..6d5fbd4d425 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -820,9 +820,6 @@ struct fe_priv {
struct nv_skb_map *tx_end_flip;
int tx_stop;
- /* vlan fields */
- struct vlan_group *vlangrp;
-
/* msi/msi-x fields */
u32 msi_flags;
struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
@@ -2766,17 +2763,20 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
prefetch(skb->data);
- if (likely(!np->vlangrp)) {
- napi_gro_receive(&np->napi, skb);
- } else {
- vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
- if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
- vlan_gro_receive(&np->napi, np->vlangrp,
- vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
- } else {
- napi_gro_receive(&np->napi, skb);
- }
+ vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
+
+ /*
+ * There's need to check for NETIF_F_HW_VLAN_RX here.
+ * Even if vlan rx accel is disabled,
+ * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
+ */
+ if (dev->features & NETIF_F_HW_VLAN_RX &&
+ vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
+ u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
+
+ __vlan_hwaccel_put_tag(skb, vid);
}
+ napi_gro_receive(&np->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
@@ -4484,6 +4484,27 @@ static u32 nv_fix_features(struct net_device *dev, u32 features)
return features;
}
+static void nv_vlan_mode(struct net_device *dev, u32 features)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ spin_lock_irq(&np->lock);
+
+ if (features & NETIF_F_HW_VLAN_RX)
+ np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
+ else
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
+
+ if (features & NETIF_F_HW_VLAN_TX)
+ np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
+ else
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
+
+ writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+
+ spin_unlock_irq(&np->lock);
+}
+
static int nv_set_features(struct net_device *dev, u32 features)
{
struct fe_priv *np = netdev_priv(dev);
@@ -4504,6 +4525,9 @@ static int nv_set_features(struct net_device *dev, u32 features)
spin_unlock_irq(&np->lock);
}
+ if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX))
+ nv_vlan_mode(dev, features);
+
return 0;
}
@@ -4879,29 +4903,6 @@ static const struct ethtool_ops ops = {
.self_test = nv_self_test,
};
-static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct fe_priv *np = get_nvpriv(dev);
-
- spin_lock_irq(&np->lock);
-
- /* save vlan group */
- np->vlangrp = grp;
-
- if (grp) {
- /* enable vlan on MAC */
- np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
- } else {
- /* disable vlan on MAC */
- np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
- np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
- }
-
- writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
-
- spin_unlock_irq(&np->lock);
-}
-
/* The mgmt unit and driver use a semaphore to access the phy during init */
static int nv_mgmt_acquire_sema(struct net_device *dev)
{
@@ -5208,7 +5209,6 @@ static const struct net_device_ops nv_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = nv_set_mac_address,
.ndo_set_multicast_list = nv_set_multicast,
- .ndo_vlan_rx_register = nv_vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = nv_poll_controller,
#endif
@@ -5226,7 +5226,6 @@ static const struct net_device_ops nv_netdev_ops_optimized = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = nv_set_mac_address,
.ndo_set_multicast_list = nv_set_multicast,
- .ndo_vlan_rx_register = nv_vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = nv_poll_controller,
#endif
@@ -5339,15 +5338,16 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM;
- dev->features |= dev->hw_features;
}
np->vlanctl_bits = 0;
if (id->driver_data & DEV_HAS_VLAN) {
np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
- dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
+ dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
}
+ dev->features |= dev->hw_features;
+
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
@@ -5615,6 +5615,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
goto out_error;
}
+ if (id->driver_data & DEV_HAS_VLAN)
+ nv_vlan_mode(dev, dev->features);
+
netif_carrier_off(dev);
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 21abb5c01a5..329ef231a09 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -697,6 +697,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
sc |= BD_ENET_TX_PAD;
CBDS_SC(bdp, sc);
+ skb_tx_timestamp(skb);
+
(*fep->ops->tx_kickstart)(dev);
spin_unlock_irqrestore(&fep->tx_lock, flags);
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index ad297544071..b09270b5d0a 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -120,7 +120,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
if (ret)
return ret;
- if (res.end - res.start < 13)
+ if (resource_size(&res) <= 13)
return -ENODEV;
/* This should really encode the pin number as well, but all
@@ -139,7 +139,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
return -ENODEV;
mdc_pin = *data;
- bitbang->dir = ioremap(res.start, res.end - res.start + 1);
+ bitbang->dir = ioremap(res.start, resource_size(&res));
if (!bitbang->dir)
return -ENOMEM;
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 6a2e150e75b..e0e9d6c35d8 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -136,7 +136,7 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
- fec->fecp = ioremap(res.start, res.end - res.start + 1);
+ fec->fecp = ioremap(res.start, resource_size(&res));
if (!fec->fecp)
goto out_fec;
diff --git a/drivers/net/ftgmac100.c b/drivers/net/ftgmac100.c
new file mode 100644
index 00000000000..54709af917e
--- /dev/null
+++ b/drivers/net/ftgmac100.c
@@ -0,0 +1,1365 @@
+/*
+ * Faraday FTGMAC100 Gigabit Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <net/ip.h>
+
+#include "ftgmac100.h"
+
+#define DRV_NAME "ftgmac100"
+#define DRV_VERSION "0.7"
+
+#define RX_QUEUE_ENTRIES 256 /* must be power of 2 */
+#define TX_QUEUE_ENTRIES 512 /* must be power of 2 */
+
+#define MAX_PKT_SIZE 1518
+#define RX_BUF_SIZE PAGE_SIZE /* must be smaller than 0x3fff */
+
+/******************************************************************************
+ * private data
+ *****************************************************************************/
+struct ftgmac100_descs {
+ struct ftgmac100_rxdes rxdes[RX_QUEUE_ENTRIES];
+ struct ftgmac100_txdes txdes[TX_QUEUE_ENTRIES];
+};
+
+struct ftgmac100 {
+ struct resource *res;
+ void __iomem *base;
+ int irq;
+
+ struct ftgmac100_descs *descs;
+ dma_addr_t descs_dma_addr;
+
+ unsigned int rx_pointer;
+ unsigned int tx_clean_pointer;
+ unsigned int tx_pointer;
+ unsigned int tx_pending;
+
+ spinlock_t tx_lock;
+
+ struct net_device *netdev;
+ struct device *dev;
+ struct napi_struct napi;
+
+ struct mii_bus *mii_bus;
+ int phy_irq[PHY_MAX_ADDR];
+ struct phy_device *phydev;
+ int old_speed;
+};
+
+static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes, gfp_t gfp);
+
+/******************************************************************************
+ * internal functions (hardware register access)
+ *****************************************************************************/
+#define INT_MASK_ALL_ENABLED (FTGMAC100_INT_RPKT_LOST | \
+ FTGMAC100_INT_XPKT_ETH | \
+ FTGMAC100_INT_XPKT_LOST | \
+ FTGMAC100_INT_AHB_ERR | \
+ FTGMAC100_INT_PHYSTS_CHG | \
+ FTGMAC100_INT_RPKT_BUF | \
+ FTGMAC100_INT_NO_RXBUF)
+
+static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr)
+{
+ iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR);
+}
+
+static void ftgmac100_set_rx_buffer_size(struct ftgmac100 *priv,
+ unsigned int size)
+{
+ size = FTGMAC100_RBSR_SIZE(size);
+ iowrite32(size, priv->base + FTGMAC100_OFFSET_RBSR);
+}
+
+static void ftgmac100_set_normal_prio_tx_ring_base(struct ftgmac100 *priv,
+ dma_addr_t addr)
+{
+ iowrite32(addr, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
+}
+
+static void ftgmac100_txdma_normal_prio_start_polling(struct ftgmac100 *priv)
+{
+ iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
+}
+
+static int ftgmac100_reset_hw(struct ftgmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ int i;
+
+ /* NOTE: reset clears all registers */
+ iowrite32(FTGMAC100_MACCR_SW_RST, priv->base + FTGMAC100_OFFSET_MACCR);
+ for (i = 0; i < 5; i++) {
+ unsigned int maccr;
+
+ maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
+ if (!(maccr & FTGMAC100_MACCR_SW_RST))
+ return 0;
+
+ udelay(1000);
+ }
+
+ netdev_err(netdev, "software reset failed\n");
+ return -EIO;
+}
+
+static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
+{
+ unsigned int maddr = mac[0] << 8 | mac[1];
+ unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
+
+ iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR);
+ iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
+}
+
+static void ftgmac100_init_hw(struct ftgmac100 *priv)
+{
+ /* setup ring buffer base registers */
+ ftgmac100_set_rx_ring_base(priv,
+ priv->descs_dma_addr +
+ offsetof(struct ftgmac100_descs, rxdes));
+ ftgmac100_set_normal_prio_tx_ring_base(priv,
+ priv->descs_dma_addr +
+ offsetof(struct ftgmac100_descs, txdes));
+
+ ftgmac100_set_rx_buffer_size(priv, RX_BUF_SIZE);
+
+ iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1), priv->base + FTGMAC100_OFFSET_APTC);
+
+ ftgmac100_set_mac(priv, priv->netdev->dev_addr);
+}
+
+#define MACCR_ENABLE_ALL (FTGMAC100_MACCR_TXDMA_EN | \
+ FTGMAC100_MACCR_RXDMA_EN | \
+ FTGMAC100_MACCR_TXMAC_EN | \
+ FTGMAC100_MACCR_RXMAC_EN | \
+ FTGMAC100_MACCR_FULLDUP | \
+ FTGMAC100_MACCR_CRC_APD | \
+ FTGMAC100_MACCR_RX_RUNT | \
+ FTGMAC100_MACCR_RX_BROADPKT)
+
+static void ftgmac100_start_hw(struct ftgmac100 *priv, int speed)
+{
+ int maccr = MACCR_ENABLE_ALL;
+
+ switch (speed) {
+ default:
+ case 10:
+ break;
+
+ case 100:
+ maccr |= FTGMAC100_MACCR_FAST_MODE;
+ break;
+
+ case 1000:
+ maccr |= FTGMAC100_MACCR_GIGA_MODE;
+ break;
+ }
+
+ iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
+}
+
+static void ftgmac100_stop_hw(struct ftgmac100 *priv)
+{
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
+}
+
+/******************************************************************************
+ * internal functions (receive descriptor)
+ *****************************************************************************/
+static bool ftgmac100_rxdes_first_segment(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FRS);
+}
+
+static bool ftgmac100_rxdes_last_segment(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_LRS);
+}
+
+static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
+}
+
+static void ftgmac100_rxdes_set_dma_own(struct ftgmac100_rxdes *rxdes)
+{
+ /* clear status bits */
+ rxdes->rxdes0 &= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+}
+
+static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ERR);
+}
+
+static bool ftgmac100_rxdes_crc_error(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_CRC_ERR);
+}
+
+static bool ftgmac100_rxdes_frame_too_long(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_FTL);
+}
+
+static bool ftgmac100_rxdes_runt(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RUNT);
+}
+
+static bool ftgmac100_rxdes_odd_nibble(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RX_ODD_NB);
+}
+
+static unsigned int ftgmac100_rxdes_data_length(struct ftgmac100_rxdes *rxdes)
+{
+ return le32_to_cpu(rxdes->rxdes0) & FTGMAC100_RXDES0_VDBC;
+}
+
+static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
+}
+
+static void ftgmac100_rxdes_set_end_of_ring(struct ftgmac100_rxdes *rxdes)
+{
+ rxdes->rxdes0 |= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+}
+
+static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
+ dma_addr_t addr)
+{
+ rxdes->rxdes3 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftgmac100_rxdes_get_dma_addr(struct ftgmac100_rxdes *rxdes)
+{
+ return le32_to_cpu(rxdes->rxdes3);
+}
+
+static bool ftgmac100_rxdes_is_tcp(struct ftgmac100_rxdes *rxdes)
+{
+ return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
+ cpu_to_le32(FTGMAC100_RXDES1_PROT_TCPIP);
+}
+
+static bool ftgmac100_rxdes_is_udp(struct ftgmac100_rxdes *rxdes)
+{
+ return (rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_PROT_MASK)) ==
+ cpu_to_le32(FTGMAC100_RXDES1_PROT_UDPIP);
+}
+
+static bool ftgmac100_rxdes_tcpcs_err(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_TCP_CHKSUM_ERR);
+}
+
+static bool ftgmac100_rxdes_udpcs_err(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_UDP_CHKSUM_ERR);
+}
+
+static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes)
+{
+ return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR);
+}
+
+/*
+ * rxdes2 is not used by hardware. We use it to keep track of page.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftgmac100_rxdes_set_page(struct ftgmac100_rxdes *rxdes, struct page *page)
+{
+ rxdes->rxdes2 = (unsigned int)page;
+}
+
+static struct page *ftgmac100_rxdes_get_page(struct ftgmac100_rxdes *rxdes)
+{
+ return (struct page *)rxdes->rxdes2;
+}
+
+/******************************************************************************
+ * internal functions (receive)
+ *****************************************************************************/
+static int ftgmac100_next_rx_pointer(int pointer)
+{
+ return (pointer + 1) & (RX_QUEUE_ENTRIES - 1);
+}
+
+static void ftgmac100_rx_pointer_advance(struct ftgmac100 *priv)
+{
+ priv->rx_pointer = ftgmac100_next_rx_pointer(priv->rx_pointer);
+}
+
+static struct ftgmac100_rxdes *ftgmac100_current_rxdes(struct ftgmac100 *priv)
+{
+ return &priv->descs->rxdes[priv->rx_pointer];
+}
+
+static struct ftgmac100_rxdes *
+ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv)
+{
+ struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
+
+ while (ftgmac100_rxdes_packet_ready(rxdes)) {
+ if (ftgmac100_rxdes_first_segment(rxdes))
+ return rxdes;
+
+ ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rx_pointer_advance(priv);
+ rxdes = ftgmac100_current_rxdes(priv);
+ }
+
+ return NULL;
+}
+
+static bool ftgmac100_rx_packet_error(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
+{
+ struct net_device *netdev = priv->netdev;
+ bool error = false;
+
+ if (unlikely(ftgmac100_rxdes_rx_error(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx err\n");
+
+ netdev->stats.rx_errors++;
+ error = true;
+ }
+
+ if (unlikely(ftgmac100_rxdes_crc_error(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx crc err\n");
+
+ netdev->stats.rx_crc_errors++;
+ error = true;
+ } else if (unlikely(ftgmac100_rxdes_ipcs_err(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx IP checksum err\n");
+
+ error = true;
+ }
+
+ if (unlikely(ftgmac100_rxdes_frame_too_long(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx frame too long\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ } else if (unlikely(ftgmac100_rxdes_runt(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx runt\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ } else if (unlikely(ftgmac100_rxdes_odd_nibble(rxdes))) {
+ if (net_ratelimit())
+ netdev_info(netdev, "rx odd nibble\n");
+
+ netdev->stats.rx_length_errors++;
+ error = true;
+ }
+
+ return error;
+}
+
+static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftgmac100_rxdes *rxdes = ftgmac100_current_rxdes(priv);
+ bool done = false;
+
+ if (net_ratelimit())
+ netdev_dbg(netdev, "drop packet %p\n", rxdes);
+
+ do {
+ if (ftgmac100_rxdes_last_segment(rxdes))
+ done = true;
+
+ ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rx_pointer_advance(priv);
+ rxdes = ftgmac100_current_rxdes(priv);
+ } while (!done && ftgmac100_rxdes_packet_ready(rxdes));
+
+ netdev->stats.rx_dropped++;
+}
+
+static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftgmac100_rxdes *rxdes;
+ struct sk_buff *skb;
+ bool done = false;
+
+ rxdes = ftgmac100_rx_locate_first_segment(priv);
+ if (!rxdes)
+ return false;
+
+ if (unlikely(ftgmac100_rx_packet_error(priv, rxdes))) {
+ ftgmac100_rx_drop_packet(priv);
+ return true;
+ }
+
+ /* start processing */
+ skb = netdev_alloc_skb_ip_align(netdev, 128);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ netdev_err(netdev, "rx skb alloc failed\n");
+
+ ftgmac100_rx_drop_packet(priv);
+ return true;
+ }
+
+ if (unlikely(ftgmac100_rxdes_multicast(rxdes)))
+ netdev->stats.multicast++;
+
+ /*
+ * It seems that HW does checksum incorrectly with fragmented packets,
+ * so we are conservative here - if HW checksum error, let software do
+ * the checksum again.
+ */
+ if ((ftgmac100_rxdes_is_tcp(rxdes) && !ftgmac100_rxdes_tcpcs_err(rxdes)) ||
+ (ftgmac100_rxdes_is_udp(rxdes) && !ftgmac100_rxdes_udpcs_err(rxdes)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ do {
+ dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
+ struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ unsigned int size;
+
+ dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ size = ftgmac100_rxdes_data_length(rxdes);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0, size);
+
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += size;
+
+ if (ftgmac100_rxdes_last_segment(rxdes))
+ done = true;
+
+ ftgmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
+
+ ftgmac100_rx_pointer_advance(priv);
+ rxdes = ftgmac100_current_rxdes(priv);
+ } while (!done);
+
+ __pskb_pull_tail(skb, min(skb->len, 64U));
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += skb->len;
+
+ /* push packet to protocol stack */
+ napi_gro_receive(&priv->napi, skb);
+
+ (*processed)++;
+ return true;
+}
+
+/******************************************************************************
+ * internal functions (transmit descriptor)
+ *****************************************************************************/
+static void ftgmac100_txdes_reset(struct ftgmac100_txdes *txdes)
+{
+ /* clear all except end of ring bit */
+ txdes->txdes0 &= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+ txdes->txdes1 = 0;
+ txdes->txdes2 = 0;
+ txdes->txdes3 = 0;
+}
+
+static bool ftgmac100_txdes_owned_by_dma(struct ftgmac100_txdes *txdes)
+{
+ return txdes->txdes0 & cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+}
+
+static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes)
+{
+ /*
+ * Make sure dma own bit will not be set before any other
+ * descriptor fields.
+ */
+ wmb();
+ txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
+}
+
+static void ftgmac100_txdes_set_end_of_ring(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+}
+
+static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_FTS);
+}
+
+static void ftgmac100_txdes_set_last_segment(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_LTS);
+}
+
+static void ftgmac100_txdes_set_buffer_size(struct ftgmac100_txdes *txdes,
+ unsigned int len)
+{
+ txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXBUF_SIZE(len));
+}
+
+static void ftgmac100_txdes_set_txint(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TXIC);
+}
+
+static void ftgmac100_txdes_set_tcpcs(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_TCP_CHKSUM);
+}
+
+static void ftgmac100_txdes_set_udpcs(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_UDP_CHKSUM);
+}
+
+static void ftgmac100_txdes_set_ipcs(struct ftgmac100_txdes *txdes)
+{
+ txdes->txdes1 |= cpu_to_le32(FTGMAC100_TXDES1_IP_CHKSUM);
+}
+
+static void ftgmac100_txdes_set_dma_addr(struct ftgmac100_txdes *txdes,
+ dma_addr_t addr)
+{
+ txdes->txdes3 = cpu_to_le32(addr);
+}
+
+static dma_addr_t ftgmac100_txdes_get_dma_addr(struct ftgmac100_txdes *txdes)
+{
+ return le32_to_cpu(txdes->txdes3);
+}
+
+/*
+ * txdes2 is not used by hardware. We use it to keep track of socket buffer.
+ * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
+ */
+static void ftgmac100_txdes_set_skb(struct ftgmac100_txdes *txdes,
+ struct sk_buff *skb)
+{
+ txdes->txdes2 = (unsigned int)skb;
+}
+
+static struct sk_buff *ftgmac100_txdes_get_skb(struct ftgmac100_txdes *txdes)
+{
+ return (struct sk_buff *)txdes->txdes2;
+}
+
+/******************************************************************************
+ * internal functions (transmit)
+ *****************************************************************************/
+static int ftgmac100_next_tx_pointer(int pointer)
+{
+ return (pointer + 1) & (TX_QUEUE_ENTRIES - 1);
+}
+
+static void ftgmac100_tx_pointer_advance(struct ftgmac100 *priv)
+{
+ priv->tx_pointer = ftgmac100_next_tx_pointer(priv->tx_pointer);
+}
+
+static void ftgmac100_tx_clean_pointer_advance(struct ftgmac100 *priv)
+{
+ priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv->tx_clean_pointer);
+}
+
+static struct ftgmac100_txdes *ftgmac100_current_txdes(struct ftgmac100 *priv)
+{
+ return &priv->descs->txdes[priv->tx_pointer];
+}
+
+static struct ftgmac100_txdes *
+ftgmac100_current_clean_txdes(struct ftgmac100 *priv)
+{
+ return &priv->descs->txdes[priv->tx_clean_pointer];
+}
+
+static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftgmac100_txdes *txdes;
+ struct sk_buff *skb;
+ dma_addr_t map;
+
+ if (priv->tx_pending == 0)
+ return false;
+
+ txdes = ftgmac100_current_clean_txdes(priv);
+
+ if (ftgmac100_txdes_owned_by_dma(txdes))
+ return false;
+
+ skb = ftgmac100_txdes_get_skb(txdes);
+ map = ftgmac100_txdes_get_dma_addr(txdes);
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+
+ dev_kfree_skb(skb);
+
+ ftgmac100_txdes_reset(txdes);
+
+ ftgmac100_tx_clean_pointer_advance(priv);
+
+ spin_lock(&priv->tx_lock);
+ priv->tx_pending--;
+ spin_unlock(&priv->tx_lock);
+ netif_wake_queue(netdev);
+
+ return true;
+}
+
+static void ftgmac100_tx_complete(struct ftgmac100 *priv)
+{
+ while (ftgmac100_tx_complete_packet(priv))
+ ;
+}
+
+static int ftgmac100_xmit(struct ftgmac100 *priv, struct sk_buff *skb,
+ dma_addr_t map)
+{
+ struct net_device *netdev = priv->netdev;
+ struct ftgmac100_txdes *txdes;
+ unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
+
+ txdes = ftgmac100_current_txdes(priv);
+ ftgmac100_tx_pointer_advance(priv);
+
+ /* setup TX descriptor */
+ ftgmac100_txdes_set_skb(txdes, skb);
+ ftgmac100_txdes_set_dma_addr(txdes, map);
+ ftgmac100_txdes_set_buffer_size(txdes, len);
+
+ ftgmac100_txdes_set_first_segment(txdes);
+ ftgmac100_txdes_set_last_segment(txdes);
+ ftgmac100_txdes_set_txint(txdes);
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ __be16 protocol = skb->protocol;
+
+ if (protocol == cpu_to_be16(ETH_P_IP)) {
+ u8 ip_proto = ip_hdr(skb)->protocol;
+
+ ftgmac100_txdes_set_ipcs(txdes);
+ if (ip_proto == IPPROTO_TCP)
+ ftgmac100_txdes_set_tcpcs(txdes);
+ else if (ip_proto == IPPROTO_UDP)
+ ftgmac100_txdes_set_udpcs(txdes);
+ }
+ }
+
+ spin_lock(&priv->tx_lock);
+ priv->tx_pending++;
+ if (priv->tx_pending == TX_QUEUE_ENTRIES)
+ netif_stop_queue(netdev);
+
+ /* start transmit */
+ ftgmac100_txdes_set_dma_own(txdes);
+ spin_unlock(&priv->tx_lock);
+
+ ftgmac100_txdma_normal_prio_start_polling(priv);
+
+ return NETDEV_TX_OK;
+}
+
+/******************************************************************************
+ * internal functions (buffer)
+ *****************************************************************************/
+static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes, gfp_t gfp)
+{
+ struct net_device *netdev = priv->netdev;
+ struct page *page;
+ dma_addr_t map;
+
+ page = alloc_page(gfp);
+ if (!page) {
+ if (net_ratelimit())
+ netdev_err(netdev, "failed to allocate rx page\n");
+ return -ENOMEM;
+ }
+
+ map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, map))) {
+ if (net_ratelimit())
+ netdev_err(netdev, "failed to map rx page\n");
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ ftgmac100_rxdes_set_page(rxdes, page);
+ ftgmac100_rxdes_set_dma_addr(rxdes, map);
+ ftgmac100_rxdes_set_dma_own(rxdes);
+ return 0;
+}
+
+static void ftgmac100_free_buffers(struct ftgmac100 *priv)
+{
+ int i;
+
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+ struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
+
+ if (!page)
+ continue;
+
+ dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ __free_page(page);
+ }
+
+ for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
+ struct ftgmac100_txdes *txdes = &priv->descs->txdes[i];
+ struct sk_buff *skb = ftgmac100_txdes_get_skb(txdes);
+ dma_addr_t map = ftgmac100_txdes_get_dma_addr(txdes);
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ }
+
+ dma_free_coherent(priv->dev, sizeof(struct ftgmac100_descs),
+ priv->descs, priv->descs_dma_addr);
+}
+
+static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
+{
+ int i;
+
+ priv->descs = dma_alloc_coherent(priv->dev,
+ sizeof(struct ftgmac100_descs),
+ &priv->descs_dma_addr, GFP_KERNEL);
+ if (!priv->descs)
+ return -ENOMEM;
+
+ memset(priv->descs, 0, sizeof(struct ftgmac100_descs));
+
+ /* initialize RX ring */
+ ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
+
+ for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
+ struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
+
+ if (ftgmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL))
+ goto err;
+ }
+
+ /* initialize TX ring */
+ ftgmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
+ return 0;
+
+err:
+ ftgmac100_free_buffers(priv);
+ return -ENOMEM;
+}
+
+/******************************************************************************
+ * internal functions (mdio)
+ *****************************************************************************/
+static void ftgmac100_adjust_link(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = priv->phydev;
+ int ier;
+
+ if (phydev->speed == priv->old_speed)
+ return;
+
+ priv->old_speed = phydev->speed;
+
+ ier = ioread32(priv->base + FTGMAC100_OFFSET_IER);
+
+ /* disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+ netif_stop_queue(netdev);
+ ftgmac100_stop_hw(priv);
+
+ netif_start_queue(netdev);
+ ftgmac100_init_hw(priv);
+ ftgmac100_start_hw(priv, phydev->speed);
+
+ /* re-enable interrupts */
+ iowrite32(ier, priv->base + FTGMAC100_OFFSET_IER);
+}
+
+static int ftgmac100_mii_probe(struct ftgmac100 *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct phy_device *phydev = NULL;
+ int i;
+
+ /* search for connect PHY device */
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *tmp = priv->mii_bus->phy_map[i];
+
+ if (tmp) {
+ phydev = tmp;
+ break;
+ }
+ }
+
+ /* now we are supposed to have a proper phydev, to attach to... */
+ if (!phydev) {
+ netdev_info(netdev, "%s: no PHY found\n", netdev->name);
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(netdev, dev_name(&phydev->dev),
+ &ftgmac100_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
+ return PTR_ERR(phydev);
+ }
+
+ priv->phydev = phydev;
+ return 0;
+}
+
+/******************************************************************************
+ * struct mii_bus functions
+ *****************************************************************************/
+static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct net_device *netdev = bus->priv;
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ unsigned int phycr;
+ int i;
+
+ phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ /* preserve MDC cycle threshold */
+ phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
+
+ phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
+ FTGMAC100_PHYCR_REGAD(regnum) |
+ FTGMAC100_PHYCR_MIIRD;
+
+ iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ for (i = 0; i < 10; i++) {
+ phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) {
+ int data;
+
+ data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA);
+ return FTGMAC100_PHYDATA_MIIRDATA(data);
+ }
+
+ udelay(100);
+ }
+
+ netdev_err(netdev, "mdio read timed out\n");
+ return -EIO;
+}
+
+static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
+ int regnum, u16 value)
+{
+ struct net_device *netdev = bus->priv;
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ unsigned int phycr;
+ int data;
+ int i;
+
+ phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ /* preserve MDC cycle threshold */
+ phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
+
+ phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
+ FTGMAC100_PHYCR_REGAD(regnum) |
+ FTGMAC100_PHYCR_MIIWR;
+
+ data = FTGMAC100_PHYDATA_MIIWDATA(value);
+
+ iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA);
+ iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ for (i = 0; i < 10; i++) {
+ phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
+
+ if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0)
+ return 0;
+
+ udelay(100);
+ }
+
+ netdev_err(netdev, "mdio write timed out\n");
+ return -EIO;
+}
+
+static int ftgmac100_mdiobus_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+/******************************************************************************
+ * struct ethtool_ops functions
+ *****************************************************************************/
+static void ftgmac100_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, dev_name(&netdev->dev));
+}
+
+static int ftgmac100_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int ftgmac100_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static const struct ethtool_ops ftgmac100_ethtool_ops = {
+ .set_settings = ftgmac100_set_settings,
+ .get_settings = ftgmac100_get_settings,
+ .get_drvinfo = ftgmac100_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+/******************************************************************************
+ * interrupt handler
+ *****************************************************************************/
+static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ if (likely(netif_running(netdev))) {
+ /* Disable interrupts for polling */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ * struct napi_struct functions
+ *****************************************************************************/
+static int ftgmac100_poll(struct napi_struct *napi, int budget)
+{
+ struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
+ struct net_device *netdev = priv->netdev;
+ unsigned int status;
+ bool completed = true;
+ int rx = 0;
+
+ status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+ iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+
+ if (status & (FTGMAC100_INT_RPKT_BUF | FTGMAC100_INT_NO_RXBUF)) {
+ /*
+ * FTGMAC100_INT_RPKT_BUF:
+ * RX DMA has received packets into RX buffer successfully
+ *
+ * FTGMAC100_INT_NO_RXBUF:
+ * RX buffer unavailable
+ */
+ bool retry;
+
+ do {
+ retry = ftgmac100_rx_packet(priv, &rx);
+ } while (retry && rx < budget);
+
+ if (retry && rx == budget)
+ completed = false;
+ }
+
+ if (status & (FTGMAC100_INT_XPKT_ETH | FTGMAC100_INT_XPKT_LOST)) {
+ /*
+ * FTGMAC100_INT_XPKT_ETH:
+ * packet transmitted to ethernet successfully
+ *
+ * FTGMAC100_INT_XPKT_LOST:
+ * packet transmitted to ethernet lost due to late
+ * collision or excessive collision
+ */
+ ftgmac100_tx_complete(priv);
+ }
+
+ if (status & (FTGMAC100_INT_NO_RXBUF | FTGMAC100_INT_RPKT_LOST |
+ FTGMAC100_INT_AHB_ERR | FTGMAC100_INT_PHYSTS_CHG)) {
+ if (net_ratelimit())
+ netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
+ status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
+ status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
+ status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
+ status & FTGMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
+
+ if (status & FTGMAC100_INT_NO_RXBUF) {
+ /* RX buffer unavailable */
+ netdev->stats.rx_over_errors++;
+ }
+
+ if (status & FTGMAC100_INT_RPKT_LOST) {
+ /* received packet lost due to RX FIFO full */
+ netdev->stats.rx_fifo_errors++;
+ }
+ }
+
+ if (completed) {
+ napi_complete(napi);
+
+ /* enable all interrupts */
+ iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
+ }
+
+ return rx;
+}
+
+/******************************************************************************
+ * struct net_device_ops functions
+ *****************************************************************************/
+static int ftgmac100_open(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ int err;
+
+ err = ftgmac100_alloc_buffers(priv);
+ if (err) {
+ netdev_err(netdev, "failed to allocate buffers\n");
+ goto err_alloc;
+ }
+
+ err = request_irq(priv->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
+ if (err) {
+ netdev_err(netdev, "failed to request irq %d\n", priv->irq);
+ goto err_irq;
+ }
+
+ priv->rx_pointer = 0;
+ priv->tx_clean_pointer = 0;
+ priv->tx_pointer = 0;
+ priv->tx_pending = 0;
+
+ err = ftgmac100_reset_hw(priv);
+ if (err)
+ goto err_hw;
+
+ ftgmac100_init_hw(priv);
+ ftgmac100_start_hw(priv, 10);
+
+ phy_start(priv->phydev);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(netdev);
+
+ /* enable all interrupts */
+ iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
+ return 0;
+
+err_hw:
+ free_irq(priv->irq, netdev);
+err_irq:
+ ftgmac100_free_buffers(priv);
+err_alloc:
+ return err;
+}
+
+static int ftgmac100_stop(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ /* disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+ netif_stop_queue(netdev);
+ napi_disable(&priv->napi);
+ phy_stop(priv->phydev);
+
+ ftgmac100_stop_hw(priv);
+ free_irq(priv->irq, netdev);
+ ftgmac100_free_buffers(priv);
+
+ return 0;
+}
+
+static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ dma_addr_t map;
+
+ if (unlikely(skb->len > MAX_PKT_SIZE)) {
+ if (net_ratelimit())
+ netdev_dbg(netdev, "tx packet too big\n");
+
+ netdev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, map))) {
+ /* drop packet */
+ if (net_ratelimit())
+ netdev_err(netdev, "map socket buffer failed\n");
+
+ netdev->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ return ftgmac100_xmit(priv, skb, map);
+}
+
+/* optional */
+static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ return phy_mii_ioctl(priv->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops ftgmac100_netdev_ops = {
+ .ndo_open = ftgmac100_open,
+ .ndo_stop = ftgmac100_stop,
+ .ndo_start_xmit = ftgmac100_hard_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = ftgmac100_do_ioctl,
+};
+
+/******************************************************************************
+ * struct platform_driver functions
+ *****************************************************************************/
+static int ftgmac100_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int irq;
+ struct net_device *netdev;
+ struct ftgmac100 *priv;
+ int err;
+ int i;
+
+ if (!pdev)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /* setup net_device */
+ netdev = alloc_etherdev(sizeof(*priv));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops);
+ netdev->netdev_ops = &ftgmac100_netdev_ops;
+ netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
+
+ platform_set_drvdata(pdev, netdev);
+
+ /* setup private data */
+ priv = netdev_priv(netdev);
+ priv->netdev = netdev;
+ priv->dev = &pdev->dev;
+
+ spin_lock_init(&priv->tx_lock);
+
+ /* initialize NAPI */
+ netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+
+ /* map io memory */
+ priv->res = request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev));
+ if (!priv->res) {
+ dev_err(&pdev->dev, "Could not reserve memory region\n");
+ err = -ENOMEM;
+ goto err_req_mem;
+ }
+
+ priv->base = ioremap(res->start, resource_size(res));
+ if (!priv->base) {
+ dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ priv->irq = irq;
+
+ /* initialize mdio bus */
+ priv->mii_bus = mdiobus_alloc();
+ if (!priv->mii_bus) {
+ err = -EIO;
+ goto err_alloc_mdiobus;
+ }
+
+ priv->mii_bus->name = "ftgmac100_mdio";
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "ftgmac100_mii");
+
+ priv->mii_bus->priv = netdev;
+ priv->mii_bus->read = ftgmac100_mdiobus_read;
+ priv->mii_bus->write = ftgmac100_mdiobus_write;
+ priv->mii_bus->reset = ftgmac100_mdiobus_reset;
+ priv->mii_bus->irq = priv->phy_irq;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ priv->mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(priv->mii_bus);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+ goto err_register_mdiobus;
+ }
+
+ err = ftgmac100_mii_probe(priv);
+ if (err) {
+ dev_err(&pdev->dev, "MII Probe failed!\n");
+ goto err_mii_probe;
+ }
+
+ /* register network device */
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto err_register_netdev;
+ }
+
+ netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ random_ether_addr(netdev->dev_addr);
+ netdev_info(netdev, "generated random MAC address %pM\n",
+ netdev->dev_addr);
+ }
+
+ return 0;
+
+err_register_netdev:
+ phy_disconnect(priv->phydev);
+err_mii_probe:
+ mdiobus_unregister(priv->mii_bus);
+err_register_mdiobus:
+ mdiobus_free(priv->mii_bus);
+err_alloc_mdiobus:
+ iounmap(priv->base);
+err_ioremap:
+ release_resource(priv->res);
+err_req_mem:
+ netif_napi_del(&priv->napi);
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_alloc_etherdev:
+ return err;
+}
+
+static int __exit ftgmac100_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev;
+ struct ftgmac100 *priv;
+
+ netdev = platform_get_drvdata(pdev);
+ priv = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+
+ phy_disconnect(priv->phydev);
+ mdiobus_unregister(priv->mii_bus);
+ mdiobus_free(priv->mii_bus);
+
+ iounmap(priv->base);
+ release_resource(priv->res);
+
+ netif_napi_del(&priv->napi);
+ platform_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ return 0;
+}
+
+static struct platform_driver ftgmac100_driver = {
+ .probe = ftgmac100_probe,
+ .remove = __exit_p(ftgmac100_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/******************************************************************************
+ * initialization / finalization
+ *****************************************************************************/
+static int __init ftgmac100_init(void)
+{
+ pr_info("Loading version " DRV_VERSION " ...\n");
+ return platform_driver_register(&ftgmac100_driver);
+}
+
+static void __exit ftgmac100_exit(void)
+{
+ platform_driver_unregister(&ftgmac100_driver);
+}
+
+module_init(ftgmac100_init);
+module_exit(ftgmac100_exit);
+
+MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
+MODULE_DESCRIPTION("FTGMAC100 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ftgmac100.h b/drivers/net/ftgmac100.h
new file mode 100644
index 00000000000..13408d448b0
--- /dev/null
+++ b/drivers/net/ftgmac100.h
@@ -0,0 +1,246 @@
+/*
+ * Faraday FTGMAC100 Gigabit Ethernet
+ *
+ * (C) Copyright 2009-2011 Faraday Technology
+ * Po-Yu Chuang <ratbert@faraday-tech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __FTGMAC100_H
+#define __FTGMAC100_H
+
+#define FTGMAC100_OFFSET_ISR 0x00
+#define FTGMAC100_OFFSET_IER 0x04
+#define FTGMAC100_OFFSET_MAC_MADR 0x08
+#define FTGMAC100_OFFSET_MAC_LADR 0x0c
+#define FTGMAC100_OFFSET_MAHT0 0x10
+#define FTGMAC100_OFFSET_MAHT1 0x14
+#define FTGMAC100_OFFSET_NPTXPD 0x18
+#define FTGMAC100_OFFSET_RXPD 0x1c
+#define FTGMAC100_OFFSET_NPTXR_BADR 0x20
+#define FTGMAC100_OFFSET_RXR_BADR 0x24
+#define FTGMAC100_OFFSET_HPTXPD 0x28
+#define FTGMAC100_OFFSET_HPTXR_BADR 0x2c
+#define FTGMAC100_OFFSET_ITC 0x30
+#define FTGMAC100_OFFSET_APTC 0x34
+#define FTGMAC100_OFFSET_DBLAC 0x38
+#define FTGMAC100_OFFSET_DMAFIFOS 0x3c
+#define FTGMAC100_OFFSET_REVR 0x40
+#define FTGMAC100_OFFSET_FEAR 0x44
+#define FTGMAC100_OFFSET_TPAFCR 0x48
+#define FTGMAC100_OFFSET_RBSR 0x4c
+#define FTGMAC100_OFFSET_MACCR 0x50
+#define FTGMAC100_OFFSET_MACSR 0x54
+#define FTGMAC100_OFFSET_TM 0x58
+#define FTGMAC100_OFFSET_PHYCR 0x60
+#define FTGMAC100_OFFSET_PHYDATA 0x64
+#define FTGMAC100_OFFSET_FCR 0x68
+#define FTGMAC100_OFFSET_BPR 0x6c
+#define FTGMAC100_OFFSET_WOLCR 0x70
+#define FTGMAC100_OFFSET_WOLSR 0x74
+#define FTGMAC100_OFFSET_WFCRC 0x78
+#define FTGMAC100_OFFSET_WFBM1 0x80
+#define FTGMAC100_OFFSET_WFBM2 0x84
+#define FTGMAC100_OFFSET_WFBM3 0x88
+#define FTGMAC100_OFFSET_WFBM4 0x8c
+#define FTGMAC100_OFFSET_NPTXR_PTR 0x90
+#define FTGMAC100_OFFSET_HPTXR_PTR 0x94
+#define FTGMAC100_OFFSET_RXR_PTR 0x98
+#define FTGMAC100_OFFSET_TX 0xa0
+#define FTGMAC100_OFFSET_TX_MCOL_SCOL 0xa4
+#define FTGMAC100_OFFSET_TX_ECOL_FAIL 0xa8
+#define FTGMAC100_OFFSET_TX_LCOL_UND 0xac
+#define FTGMAC100_OFFSET_RX 0xb0
+#define FTGMAC100_OFFSET_RX_BC 0xb4
+#define FTGMAC100_OFFSET_RX_MC 0xb8
+#define FTGMAC100_OFFSET_RX_PF_AEP 0xbc
+#define FTGMAC100_OFFSET_RX_RUNT 0xc0
+#define FTGMAC100_OFFSET_RX_CRCER_FTL 0xc4
+#define FTGMAC100_OFFSET_RX_COL_LOST 0xc8
+
+/*
+ * Interrupt status register & interrupt enable register
+ */
+#define FTGMAC100_INT_RPKT_BUF (1 << 0)
+#define FTGMAC100_INT_RPKT_FIFO (1 << 1)
+#define FTGMAC100_INT_NO_RXBUF (1 << 2)
+#define FTGMAC100_INT_RPKT_LOST (1 << 3)
+#define FTGMAC100_INT_XPKT_ETH (1 << 4)
+#define FTGMAC100_INT_XPKT_FIFO (1 << 5)
+#define FTGMAC100_INT_NO_NPTXBUF (1 << 6)
+#define FTGMAC100_INT_XPKT_LOST (1 << 7)
+#define FTGMAC100_INT_AHB_ERR (1 << 8)
+#define FTGMAC100_INT_PHYSTS_CHG (1 << 9)
+#define FTGMAC100_INT_NO_HPTXBUF (1 << 10)
+
+/*
+ * Interrupt timer control register
+ */
+#define FTGMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0)
+#define FTGMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4)
+#define FTGMAC100_ITC_RXINT_TIME_SEL (1 << 7)
+#define FTGMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8)
+#define FTGMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12)
+#define FTGMAC100_ITC_TXINT_TIME_SEL (1 << 15)
+
+/*
+ * Automatic polling timer control register
+ */
+#define FTGMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0)
+#define FTGMAC100_APTC_RXPOLL_TIME_SEL (1 << 4)
+#define FTGMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8)
+#define FTGMAC100_APTC_TXPOLL_TIME_SEL (1 << 12)
+
+/*
+ * DMA burst length and arbitration control register
+ */
+#define FTGMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 0)
+#define FTGMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 3)
+#define FTGMAC100_DBLAC_RX_THR_EN (1 << 6)
+#define FTGMAC100_DBLAC_RXBURST_SIZE(x) (((x) & 0x3) << 8)
+#define FTGMAC100_DBLAC_TXBURST_SIZE(x) (((x) & 0x3) << 10)
+#define FTGMAC100_DBLAC_RXDES_SIZE(x) (((x) & 0xf) << 12)
+#define FTGMAC100_DBLAC_TXDES_SIZE(x) (((x) & 0xf) << 16)
+#define FTGMAC100_DBLAC_IFG_CNT(x) (((x) & 0x7) << 20)
+#define FTGMAC100_DBLAC_IFG_INC (1 << 23)
+
+/*
+ * DMA FIFO status register
+ */
+#define FTGMAC100_DMAFIFOS_RXDMA1_SM(dmafifos) ((dmafifos) & 0xf)
+#define FTGMAC100_DMAFIFOS_RXDMA2_SM(dmafifos) (((dmafifos) >> 4) & 0xf)
+#define FTGMAC100_DMAFIFOS_RXDMA3_SM(dmafifos) (((dmafifos) >> 8) & 0x7)
+#define FTGMAC100_DMAFIFOS_TXDMA1_SM(dmafifos) (((dmafifos) >> 12) & 0xf)
+#define FTGMAC100_DMAFIFOS_TXDMA2_SM(dmafifos) (((dmafifos) >> 16) & 0x3)
+#define FTGMAC100_DMAFIFOS_TXDMA3_SM(dmafifos) (((dmafifos) >> 18) & 0xf)
+#define FTGMAC100_DMAFIFOS_RXFIFO_EMPTY (1 << 26)
+#define FTGMAC100_DMAFIFOS_TXFIFO_EMPTY (1 << 27)
+#define FTGMAC100_DMAFIFOS_RXDMA_GRANT (1 << 28)
+#define FTGMAC100_DMAFIFOS_TXDMA_GRANT (1 << 29)
+#define FTGMAC100_DMAFIFOS_RXDMA_REQ (1 << 30)
+#define FTGMAC100_DMAFIFOS_TXDMA_REQ (1 << 31)
+
+/*
+ * Receive buffer size register
+ */
+#define FTGMAC100_RBSR_SIZE(x) ((x) & 0x3fff)
+
+/*
+ * MAC control register
+ */
+#define FTGMAC100_MACCR_TXDMA_EN (1 << 0)
+#define FTGMAC100_MACCR_RXDMA_EN (1 << 1)
+#define FTGMAC100_MACCR_TXMAC_EN (1 << 2)
+#define FTGMAC100_MACCR_RXMAC_EN (1 << 3)
+#define FTGMAC100_MACCR_RM_VLAN (1 << 4)
+#define FTGMAC100_MACCR_HPTXR_EN (1 << 5)
+#define FTGMAC100_MACCR_LOOP_EN (1 << 6)
+#define FTGMAC100_MACCR_ENRX_IN_HALFTX (1 << 7)
+#define FTGMAC100_MACCR_FULLDUP (1 << 8)
+#define FTGMAC100_MACCR_GIGA_MODE (1 << 9)
+#define FTGMAC100_MACCR_CRC_APD (1 << 10)
+#define FTGMAC100_MACCR_RX_RUNT (1 << 12)
+#define FTGMAC100_MACCR_JUMBO_LF (1 << 13)
+#define FTGMAC100_MACCR_RX_ALL (1 << 14)
+#define FTGMAC100_MACCR_HT_MULTI_EN (1 << 15)
+#define FTGMAC100_MACCR_RX_MULTIPKT (1 << 16)
+#define FTGMAC100_MACCR_RX_BROADPKT (1 << 17)
+#define FTGMAC100_MACCR_DISCARD_CRCERR (1 << 18)
+#define FTGMAC100_MACCR_FAST_MODE (1 << 19)
+#define FTGMAC100_MACCR_SW_RST (1 << 31)
+
+/*
+ * PHY control register
+ */
+#define FTGMAC100_PHYCR_MDC_CYCTHR_MASK 0x3f
+#define FTGMAC100_PHYCR_MDC_CYCTHR(x) ((x) & 0x3f)
+#define FTGMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16)
+#define FTGMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21)
+#define FTGMAC100_PHYCR_MIIRD (1 << 26)
+#define FTGMAC100_PHYCR_MIIWR (1 << 27)
+
+/*
+ * PHY data register
+ */
+#define FTGMAC100_PHYDATA_MIIWDATA(x) ((x) & 0xffff)
+#define FTGMAC100_PHYDATA_MIIRDATA(phydata) (((phydata) >> 16) & 0xffff)
+
+/*
+ * Transmit descriptor, aligned to 16 bytes
+ */
+struct ftgmac100_txdes {
+ unsigned int txdes0;
+ unsigned int txdes1;
+ unsigned int txdes2; /* not used by HW */
+ unsigned int txdes3; /* TXBUF_BADR */
+} __attribute__ ((aligned(16)));
+
+#define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff)
+#define FTGMAC100_TXDES0_EDOTR (1 << 15)
+#define FTGMAC100_TXDES0_CRC_ERR (1 << 19)
+#define FTGMAC100_TXDES0_LTS (1 << 28)
+#define FTGMAC100_TXDES0_FTS (1 << 29)
+#define FTGMAC100_TXDES0_TXDMA_OWN (1 << 31)
+
+#define FTGMAC100_TXDES1_VLANTAG_CI(x) ((x) & 0xffff)
+#define FTGMAC100_TXDES1_INS_VLANTAG (1 << 16)
+#define FTGMAC100_TXDES1_TCP_CHKSUM (1 << 17)
+#define FTGMAC100_TXDES1_UDP_CHKSUM (1 << 18)
+#define FTGMAC100_TXDES1_IP_CHKSUM (1 << 19)
+#define FTGMAC100_TXDES1_LLC (1 << 22)
+#define FTGMAC100_TXDES1_TX2FIC (1 << 30)
+#define FTGMAC100_TXDES1_TXIC (1 << 31)
+
+/*
+ * Receive descriptor, aligned to 16 bytes
+ */
+struct ftgmac100_rxdes {
+ unsigned int rxdes0;
+ unsigned int rxdes1;
+ unsigned int rxdes2; /* not used by HW */
+ unsigned int rxdes3; /* RXBUF_BADR */
+} __attribute__ ((aligned(16)));
+
+#define FTGMAC100_RXDES0_VDBC 0x3fff
+#define FTGMAC100_RXDES0_EDORR (1 << 15)
+#define FTGMAC100_RXDES0_MULTICAST (1 << 16)
+#define FTGMAC100_RXDES0_BROADCAST (1 << 17)
+#define FTGMAC100_RXDES0_RX_ERR (1 << 18)
+#define FTGMAC100_RXDES0_CRC_ERR (1 << 19)
+#define FTGMAC100_RXDES0_FTL (1 << 20)
+#define FTGMAC100_RXDES0_RUNT (1 << 21)
+#define FTGMAC100_RXDES0_RX_ODD_NB (1 << 22)
+#define FTGMAC100_RXDES0_FIFO_FULL (1 << 23)
+#define FTGMAC100_RXDES0_PAUSE_OPCODE (1 << 24)
+#define FTGMAC100_RXDES0_PAUSE_FRAME (1 << 25)
+#define FTGMAC100_RXDES0_LRS (1 << 28)
+#define FTGMAC100_RXDES0_FRS (1 << 29)
+#define FTGMAC100_RXDES0_RXPKT_RDY (1 << 31)
+
+#define FTGMAC100_RXDES1_VLANTAG_CI 0xffff
+#define FTGMAC100_RXDES1_PROT_MASK (0x3 << 20)
+#define FTGMAC100_RXDES1_PROT_NONIP (0x0 << 20)
+#define FTGMAC100_RXDES1_PROT_IP (0x1 << 20)
+#define FTGMAC100_RXDES1_PROT_TCPIP (0x2 << 20)
+#define FTGMAC100_RXDES1_PROT_UDPIP (0x3 << 20)
+#define FTGMAC100_RXDES1_LLC (1 << 22)
+#define FTGMAC100_RXDES1_DF (1 << 23)
+#define FTGMAC100_RXDES1_VLANTAG_AVAIL (1 << 24)
+#define FTGMAC100_RXDES1_TCP_CHKSUM_ERR (1 << 25)
+#define FTGMAC100_RXDES1_UDP_CHKSUM_ERR (1 << 26)
+#define FTGMAC100_RXDES1_IP_CHKSUM_ERR (1 << 27)
+
+#endif /* __FTGMAC100_H */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 2dfcc804784..31d5c574e5a 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -62,6 +62,9 @@
* The driver then cleans up the buffer.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define DEBUG
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -137,8 +140,6 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull);
-static void gfar_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp);
void gfar_halt(struct net_device *dev);
static void gfar_halt_nodisable(struct net_device *dev);
void gfar_start(struct net_device *dev);
@@ -213,8 +214,7 @@ static int gfar_init_bds(struct net_device *ndev)
} else {
skb = gfar_new_skb(ndev);
if (!skb) {
- pr_err("%s: Can't allocate RX buffers\n",
- ndev->name);
+ netdev_err(ndev, "Can't allocate RX buffers\n");
goto err_rxalloc_fail;
}
rx_queue->rx_skbuff[j] = skb;
@@ -258,15 +258,14 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
sizeof(struct rxbd8) * priv->total_rx_ring_size,
&addr, GFP_KERNEL);
if (!vaddr) {
- if (netif_msg_ifup(priv))
- pr_err("%s: Could not allocate buffer descriptors!\n",
- ndev->name);
+ netif_err(priv, ifup, ndev,
+ "Could not allocate buffer descriptors!\n");
return -ENOMEM;
}
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
- tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
+ tx_queue->tx_bd_base = vaddr;
tx_queue->tx_bd_dma_base = addr;
tx_queue->dev = ndev;
/* enet DMA only understands physical addresses */
@@ -277,7 +276,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
/* Start the rx descriptor ring where the tx ring leaves off */
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
+ rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
rx_queue->dev = ndev;
addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
@@ -290,9 +289,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
tx_queue->tx_ring_size, GFP_KERNEL);
if (!tx_queue->tx_skbuff) {
- if (netif_msg_ifup(priv))
- pr_err("%s: Could not allocate tx_skbuff\n",
- ndev->name);
+ netif_err(priv, ifup, ndev,
+ "Could not allocate tx_skbuff\n");
goto cleanup;
}
@@ -306,9 +304,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
rx_queue->rx_ring_size, GFP_KERNEL);
if (!rx_queue->rx_skbuff) {
- if (netif_msg_ifup(priv))
- pr_err("%s: Could not allocate rx_skbuff\n",
- ndev->name);
+ netif_err(priv, ifup, ndev,
+ "Could not allocate rx_skbuff\n");
goto cleanup;
}
@@ -391,11 +388,8 @@ static void gfar_init_mac(struct net_device *ndev)
if (priv->hwts_rx_en)
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
- /* keep vlan related bits if it's enabled */
- if (priv->vlgrp) {
+ if (ndev->features & NETIF_F_HW_VLAN_RX)
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
- tctrl |= TCTRL_VLINS;
- }
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
@@ -468,7 +462,6 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_get_stats = gfar_get_stats,
- .ndo_vlan_rx_register = gfar_vlan_rx_register,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -508,10 +501,17 @@ void unlock_tx_qs(struct gfar_private *priv)
spin_unlock(&priv->tx_queue[i]->txlock);
}
+static bool gfar_is_vlan_on(struct gfar_private *priv)
+{
+ return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
+ (priv->ndev->features & NETIF_F_HW_VLAN_TX);
+}
+
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
- return priv->vlgrp || (priv->ndev->features & NETIF_F_RXCSUM) ||
+ return gfar_is_vlan_on(priv) ||
+ (priv->ndev->features & NETIF_F_RXCSUM) ||
(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
}
@@ -625,9 +625,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
num_tx_qs = tx_queues ? *tx_queues : 1;
if (num_tx_qs > MAX_TX_QS) {
- printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
- num_tx_qs, MAX_TX_QS);
- printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+ pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
+ num_tx_qs, MAX_TX_QS);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
return -EINVAL;
}
@@ -635,9 +635,9 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
num_rx_qs = rx_queues ? *rx_queues : 1;
if (num_rx_qs > MAX_RX_QS) {
- printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
- num_tx_qs, MAX_TX_QS);
- printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
+ pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
+ num_rx_qs, MAX_RX_QS);
+ pr_err("Cannot do alloc_etherdev, aborting\n");
return -EINVAL;
}
@@ -655,6 +655,11 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->num_rx_queues = num_rx_qs;
priv->num_grps = 0x0;
+ /* Init Rx queue filer rule set linked list*/
+ INIT_LIST_HEAD(&priv->rx_list.list);
+ priv->rx_list.count = 0;
+ mutex_init(&priv->rx_queue_access);
+
model = of_get_property(np, "model", NULL);
for (i = 0; i < MAXGROUPS; i++)
@@ -1034,10 +1039,10 @@ static int gfar_probe(struct platform_device *ofdev)
NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
}
- priv->vlgrp = NULL;
-
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN)
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+ dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ }
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
priv->extended_hash = 1;
@@ -1148,9 +1153,8 @@ static int gfar_probe(struct platform_device *ofdev)
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
- /* enable filer if using multiple RX queues*/
- if(priv->num_rx_queues > 1)
- priv->rx_filer_enable = 1;
+ /* always enable rx filer*/
+ priv->rx_filer_enable = 1;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -1160,8 +1164,7 @@ static int gfar_probe(struct platform_device *ofdev)
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
- dev->name);
+ pr_err("%s: Cannot register net device, aborting\n", dev->name);
goto register_fail;
}
@@ -1212,17 +1215,17 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_init_sysfs(dev);
/* Print out the device info */
- printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
+ netdev_info(dev, "mac: %pM\n", dev->dev_addr);
/* Even more device info helps when determining which kernel */
/* provided which set of benchmarks. */
- printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
+ netdev_info(dev, "Running with NAPI enabled\n");
for (i = 0; i < priv->num_rx_queues; i++)
- printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
- dev->name, i, priv->rx_queue[i]->rx_ring_size);
+ netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
+ i, priv->rx_queue[i]->rx_ring_size);
for(i = 0; i < priv->num_tx_queues; i++)
- printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
- dev->name, i, priv->tx_queue[i]->tx_ring_size);
+ netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
+ i, priv->tx_queue[i]->tx_ring_size);
return 0;
@@ -1855,34 +1858,30 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
* Transmit, and Receive */
if ((err = request_irq(grp->interruptError, gfar_error, 0,
grp->int_name_er,grp)) < 0) {
- if (netif_msg_intr(priv))
- printk(KERN_ERR "%s: Can't get IRQ %d\n",
- dev->name, grp->interruptError);
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ grp->interruptError);
goto err_irq_fail;
}
if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
0, grp->int_name_tx, grp)) < 0) {
- if (netif_msg_intr(priv))
- printk(KERN_ERR "%s: Can't get IRQ %d\n",
- dev->name, grp->interruptTransmit);
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ grp->interruptTransmit);
goto tx_irq_fail;
}
if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
grp->int_name_rx, grp)) < 0) {
- if (netif_msg_intr(priv))
- printk(KERN_ERR "%s: Can't get IRQ %d\n",
- dev->name, grp->interruptReceive);
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ grp->interruptReceive);
goto rx_irq_fail;
}
} else {
if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
grp->int_name_tx, grp)) < 0) {
- if (netif_msg_intr(priv))
- printk(KERN_ERR "%s: Can't get IRQ %d\n",
- dev->name, grp->interruptTransmit);
+ netif_err(priv, intr, dev, "Can't get IRQ %d\n",
+ grp->interruptTransmit);
goto err_irq_fail;
}
}
@@ -2289,10 +2288,25 @@ static int gfar_set_mac_address(struct net_device *dev)
return 0;
}
+/* Check if rx parser should be activated */
+void gfar_check_rx_parser_mode(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs;
+ u32 tempval;
+
+ regs = priv->gfargrp[0].regs;
+
+ tempval = gfar_read(&regs->rctrl);
+ /* If parse is no longer required, then disable parser */
+ if (tempval & RCTRL_REQ_PARSER)
+ tempval |= RCTRL_PRSDEP_INIT;
+ else
+ tempval &= ~RCTRL_PRSDEP_INIT;
+ gfar_write(&regs->rctrl, tempval);
+}
/* Enables and disables VLAN insertion/extraction */
-static void gfar_vlan_rx_register(struct net_device *dev,
- struct vlan_group *grp)
+void gfar_vlan_mode(struct net_device *dev, u32 features)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
@@ -2303,34 +2317,30 @@ static void gfar_vlan_rx_register(struct net_device *dev,
local_irq_save(flags);
lock_rx_qs(priv);
- priv->vlgrp = grp;
-
- if (grp) {
+ if (features & NETIF_F_HW_VLAN_TX) {
/* Enable VLAN tag insertion */
tempval = gfar_read(&regs->tctrl);
tempval |= TCTRL_VLINS;
-
gfar_write(&regs->tctrl, tempval);
-
- /* Enable VLAN tag extraction */
- tempval = gfar_read(&regs->rctrl);
- tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
- gfar_write(&regs->rctrl, tempval);
} else {
/* Disable VLAN tag insertion */
tempval = gfar_read(&regs->tctrl);
tempval &= ~TCTRL_VLINS;
gfar_write(&regs->tctrl, tempval);
+ }
+ if (features & NETIF_F_HW_VLAN_RX) {
+ /* Enable VLAN tag extraction */
+ tempval = gfar_read(&regs->rctrl);
+ tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
+ gfar_write(&regs->rctrl, tempval);
+ } else {
/* Disable VLAN tag extraction */
tempval = gfar_read(&regs->rctrl);
tempval &= ~RCTRL_VLEX;
- /* If parse is no longer required, then disable parser */
- if (tempval & RCTRL_REQ_PARSER)
- tempval |= RCTRL_PRSDEP_INIT;
- else
- tempval &= ~RCTRL_PRSDEP_INIT;
gfar_write(&regs->rctrl, tempval);
+
+ gfar_check_rx_parser_mode(priv);
}
gfar_change_mtu(dev, dev->mtu);
@@ -2347,13 +2357,11 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
int oldsize = priv->rx_buffer_size;
int frame_size = new_mtu + ETH_HLEN;
- if (priv->vlgrp)
+ if (gfar_is_vlan_on(priv))
frame_size += VLAN_HLEN;
if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
- if (netif_msg_drv(priv))
- printk(KERN_ERR "%s: Invalid MTU setting\n",
- dev->name);
+ netif_err(priv, drv, dev, "Invalid MTU setting\n");
return -EINVAL;
}
@@ -2702,11 +2710,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, dev);
+ /*
+ * There's need to check for NETIF_F_HW_VLAN_RX here.
+ * Even if vlan rx accel is disabled, on some chips
+ * RXFCB_VLN is pseudo randomly set.
+ */
+ if (dev->features & NETIF_F_HW_VLAN_RX &&
+ fcb->flags & RXFCB_VLN)
+ __vlan_hwaccel_put_tag(skb, fcb->vlctl);
+
/* Send the packet up the stack */
- if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
- ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
- else
- ret = netif_receive_skb(skb);
+ ret = netif_receive_skb(skb);
if (NET_RX_DROP == ret)
priv->extra_stats.kernel_dropped++;
@@ -2773,9 +2787,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
gfar_process_frame(dev, skb, amount_pull);
} else {
- if (netif_msg_rx_err(priv))
- printk(KERN_WARNING
- "%s: Missing skb!\n", dev->name);
+ netif_warn(priv, rx_err, dev, "Missing skb!\n");
rx_queue->stats.rx_dropped++;
priv->extra_stats.rx_skbmissing++;
}
@@ -2978,10 +2990,9 @@ static void adjust_link(struct net_device *dev)
ecntrl &= ~(ECNTRL_R100);
break;
default:
- if (netif_msg_link(priv))
- printk(KERN_WARNING
- "%s: Ack! Speed (%d) is not 10/100/1000!\n",
- dev->name, phydev->speed);
+ netif_warn(priv, link, dev,
+ "Ack! Speed (%d) is not 10/100/1000!\n",
+ phydev->speed);
break;
}
@@ -3186,8 +3197,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
/* Hmm... */
if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
- printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
- dev->name, events, gfar_read(&regs->imask));
+ netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
+ events, gfar_read(&regs->imask));
/* Update the error counters */
if (events & IEVENT_TXE) {
@@ -3200,9 +3211,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
if (events & IEVENT_XFUN) {
unsigned long flags;
- if (netif_msg_tx_err(priv))
- printk(KERN_DEBUG "%s: TX FIFO underrun, "
- "packet dropped.\n", dev->name);
+ netif_dbg(priv, tx_err, dev,
+ "TX FIFO underrun, packet dropped\n");
dev->stats.tx_dropped++;
priv->extra_stats.tx_underrun++;
@@ -3215,8 +3225,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
unlock_tx_qs(priv);
local_irq_restore(flags);
}
- if (netif_msg_tx_err(priv))
- printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
+ netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
if (events & IEVENT_BSY) {
dev->stats.rx_errors++;
@@ -3224,29 +3233,25 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
gfar_receive(irq, grp_id);
- if (netif_msg_rx_err(priv))
- printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
- dev->name, gfar_read(&regs->rstat));
+ netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
+ gfar_read(&regs->rstat));
}
if (events & IEVENT_BABR) {
dev->stats.rx_errors++;
priv->extra_stats.rx_babr++;
- if (netif_msg_rx_err(priv))
- printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
+ netif_dbg(priv, rx_err, dev, "babbling RX error\n");
}
if (events & IEVENT_EBERR) {
priv->extra_stats.eberr++;
- if (netif_msg_rx_err(priv))
- printk(KERN_DEBUG "%s: bus error\n", dev->name);
+ netif_dbg(priv, rx_err, dev, "bus error\n");
}
- if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
- printk(KERN_DEBUG "%s: control frame\n", dev->name);
+ if (events & IEVENT_RXC)
+ netif_dbg(priv, rx_status, dev, "control frame\n");
if (events & IEVENT_BABT) {
priv->extra_stats.tx_babt++;
- if (netif_msg_tx_err(priv))
- printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
+ netif_dbg(priv, tx_err, dev, "babbling TX error\n");
}
return IRQ_HANDLED;
}
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index ba36dc7a343..9aa43773e8e 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -47,6 +47,16 @@
#include <linux/workqueue.h>
#include <linux/ethtool.h>
+struct ethtool_flow_spec_container {
+ struct ethtool_rx_flow_spec fs;
+ struct list_head list;
+};
+
+struct ethtool_rx_list {
+ struct list_head list;
+ unsigned int count;
+};
+
/* The maximum number of packets to be handled in one call of gfar_poll */
#define GFAR_DEV_WEIGHT 64
@@ -168,6 +178,7 @@ extern const char gfar_driver_version[];
#define MACCFG2_LENGTHCHECK 0x00000010
#define MACCFG2_MPEN 0x00000008
+#define ECNTRL_FIFM 0x00008000
#define ECNTRL_INIT_SETTINGS 0x00001000
#define ECNTRL_TBI_MODE 0x00000020
#define ECNTRL_REDUCED_MODE 0x00000010
@@ -271,10 +282,11 @@ extern const char gfar_driver_version[];
#define RCTRL_TUCSEN 0x00000100
#define RCTRL_PRSDEP_MASK 0x000000c0
#define RCTRL_PRSDEP_INIT 0x000000c0
+#define RCTRL_PRSFM 0x00000020
#define RCTRL_PROM 0x00000008
#define RCTRL_EMEN 0x00000002
#define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \
- RCTRL_TUCSEN)
+ RCTRL_TUCSEN | RCTRL_FILREN)
#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN | RCTRL_TUCSEN | \
RCTRL_PRSDEP_INIT)
#define RCTRL_EXTHASH (RCTRL_GHTX)
@@ -397,6 +409,7 @@ extern const char gfar_driver_version[];
#define RQFCR_HASHTBL_2 0x00060000
#define RQFCR_HASHTBL_3 0x00080000
#define RQFCR_HASH 0x00010000
+#define RQFCR_QUEUE 0x0000FC00
#define RQFCR_CLE 0x00000200
#define RQFCR_RJE 0x00000100
#define RQFCR_AND 0x00000080
@@ -1064,8 +1077,9 @@ struct gfar_private {
struct sk_buff_head rx_recycle;
- struct vlan_group *vlgrp;
-
+ /* RX queue filer rule set*/
+ struct ethtool_rx_list rx_list;
+ struct mutex rx_queue_access;
/* Hash registers and their width */
u32 __iomem *hash_regs[16];
@@ -1142,6 +1156,16 @@ static inline void gfar_write_filer(struct gfar_private *priv,
gfar_write(&regs->rqfpr, fpr);
}
+static inline void gfar_read_filer(struct gfar_private *priv,
+ unsigned int far, unsigned int *fcr, unsigned int *fpr)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ gfar_write(&regs->rqfar, far);
+ *fcr = gfar_read(&regs->rqfcr);
+ *fpr = gfar_read(&regs->rqfpr);
+}
+
extern void lock_rx_qs(struct gfar_private *priv);
extern void lock_tx_qs(struct gfar_private *priv);
extern void unlock_rx_qs(struct gfar_private *priv);
@@ -1156,7 +1180,37 @@ extern void gfar_configure_coalescing(struct gfar_private *priv,
unsigned long tx_mask, unsigned long rx_mask);
void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, u32 features);
+extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
+extern void gfar_vlan_mode(struct net_device *dev, u32 features);
extern const struct ethtool_ops gfar_ethtool_ops;
+#define MAX_FILER_CACHE_IDX (2*(MAX_FILER_IDX))
+
+#define RQFCR_PID_PRI_MASK 0xFFFFFFF8
+#define RQFCR_PID_L4P_MASK 0xFFFFFF00
+#define RQFCR_PID_VID_MASK 0xFFFFF000
+#define RQFCR_PID_PORT_MASK 0xFFFF0000
+#define RQFCR_PID_MAC_MASK 0xFF000000
+
+struct gfar_mask_entry {
+ unsigned int mask; /* The mask value which is valid form start to end */
+ unsigned int start;
+ unsigned int end;
+ unsigned int block; /* Same block values indicate depended entries */
+};
+
+/* Represents a receive filer table entry */
+struct gfar_filer_entry {
+ u32 ctrl;
+ u32 prop;
+};
+
+
+/* The 20 additional entries are a shadow for one extra element */
+struct filer_table {
+ u32 index;
+ struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
+};
+
#endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 239e3330495..25a8c2adb00 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -16,6 +16,8 @@
* by reference.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -37,6 +39,8 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/sort.h>
+#include <linux/if_vlan.h>
#include "gianfar.h"
@@ -375,13 +379,13 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
/* Check the bounds of the values */
if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
pr_info("Coalescing is limited to %d microseconds\n",
- GFAR_MAX_COAL_USECS);
+ GFAR_MAX_COAL_USECS);
return -EINVAL;
}
if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
pr_info("Coalescing is limited to %d frames\n",
- GFAR_MAX_COAL_FRAMES);
+ GFAR_MAX_COAL_FRAMES);
return -EINVAL;
}
@@ -404,13 +408,13 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
/* Check the bounds of the values */
if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
pr_info("Coalescing is limited to %d microseconds\n",
- GFAR_MAX_COAL_USECS);
+ GFAR_MAX_COAL_USECS);
return -EINVAL;
}
if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
pr_info("Coalescing is limited to %d frames\n",
- GFAR_MAX_COAL_FRAMES);
+ GFAR_MAX_COAL_FRAMES);
return -EINVAL;
}
@@ -464,8 +468,7 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
return -EINVAL;
if (!is_power_of_2(rvals->rx_pending)) {
- printk("%s: Ring sizes must be a power of 2\n",
- dev->name);
+ netdev_err(dev, "Ring sizes must be a power of 2\n");
return -EINVAL;
}
@@ -473,8 +476,7 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
return -EINVAL;
if (!is_power_of_2(rvals->tx_pending)) {
- printk("%s: Ring sizes must be a power of 2\n",
- dev->name);
+ netdev_err(dev, "Ring sizes must be a power of 2\n");
return -EINVAL;
}
@@ -524,6 +526,9 @@ int gfar_set_features(struct net_device *dev, u32 features)
int err = 0, i = 0;
u32 changed = dev->features ^ features;
+ if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
+ gfar_vlan_mode(dev, features);
+
if (!(changed & NETIF_F_RXCSUM))
return 0;
@@ -681,10 +686,21 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
{
unsigned int last_rule_idx = priv->cur_filer_idx;
unsigned int cmp_rqfpr;
- unsigned int local_rqfpr[MAX_FILER_IDX + 1];
- unsigned int local_rqfcr[MAX_FILER_IDX + 1];
+ unsigned int *local_rqfpr;
+ unsigned int *local_rqfcr;
int i = 0x0, k = 0x0;
int j = MAX_FILER_IDX, l = 0x0;
+ int ret = 1;
+
+ local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
+ GFP_KERNEL);
+ local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
+ GFP_KERNEL);
+ if (!local_rqfpr || !local_rqfcr) {
+ pr_err("Out of memory\n");
+ ret = 0;
+ goto err;
+ }
switch (class) {
case TCP_V4_FLOW:
@@ -700,8 +716,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
break;
default:
- printk(KERN_ERR "Right now this class is not supported\n");
- return 0;
+ pr_err("Right now this class is not supported\n");
+ ret = 0;
+ goto err;
}
for (i = 0; i < MAX_FILER_IDX + 1; i++) {
@@ -715,9 +732,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
}
if (i == MAX_FILER_IDX + 1) {
- printk(KERN_ERR "No parse rule found, ");
- printk(KERN_ERR "can't create hash rules\n");
- return 0;
+ pr_err("No parse rule found, can't create hash rules\n");
+ ret = 0;
+ goto err;
}
/* If a match was found, then it begins the starting of a cluster rule
@@ -761,7 +778,10 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
priv->cur_filer_idx = priv->cur_filer_idx - 1;
}
- return 1;
+err:
+ kfree(local_rqfcr);
+ kfree(local_rqfpr);
+ return ret;
}
static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
@@ -773,19 +793,948 @@ static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *c
return 0;
}
+static int gfar_check_filer_hardware(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = NULL;
+ u32 i;
+
+ regs = priv->gfargrp[0].regs;
+
+ /* Check if we are in FIFO mode */
+ i = gfar_read(&regs->ecntrl);
+ i &= ECNTRL_FIFM;
+ if (i == ECNTRL_FIFM) {
+ netdev_notice(priv->ndev, "Interface in FIFO mode\n");
+ i = gfar_read(&regs->rctrl);
+ i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
+ if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
+ netdev_info(priv->ndev,
+ "Receive Queue Filtering enabled\n");
+ } else {
+ netdev_warn(priv->ndev,
+ "Receive Queue Filtering disabled\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ /* Or in standard mode */
+ else {
+ i = gfar_read(&regs->rctrl);
+ i &= RCTRL_PRSDEP_MASK;
+ if (i == RCTRL_PRSDEP_MASK) {
+ netdev_info(priv->ndev,
+ "Receive Queue Filtering enabled\n");
+ } else {
+ netdev_warn(priv->ndev,
+ "Receive Queue Filtering disabled\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* Sets the properties for arbitrary filer rule
+ * to the first 4 Layer 4 Bytes */
+ regs->rbifx = 0xC0C1C2C3;
+ return 0;
+}
+
+static int gfar_comp_asc(const void *a, const void *b)
+{
+ return memcmp(a, b, 4);
+}
+
+static int gfar_comp_desc(const void *a, const void *b)
+{
+ return -memcmp(a, b, 4);
+}
+
+static void gfar_swap(void *a, void *b, int size)
+{
+ u32 *_a = a;
+ u32 *_b = b;
+
+ swap(_a[0], _b[0]);
+ swap(_a[1], _b[1]);
+ swap(_a[2], _b[2]);
+ swap(_a[3], _b[3]);
+}
+
+/* Write a mask to filer cache */
+static void gfar_set_mask(u32 mask, struct filer_table *tab)
+{
+ tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
+ tab->fe[tab->index].prop = mask;
+ tab->index++;
+}
+
+/* Sets parse bits (e.g. IP or TCP) */
+static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
+{
+ gfar_set_mask(mask, tab);
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE
+ | RQFCR_AND;
+ tab->fe[tab->index].prop = value;
+ tab->index++;
+}
+
+static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
+ struct filer_table *tab)
+{
+ gfar_set_mask(mask, tab);
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
+ tab->fe[tab->index].prop = value;
+ tab->index++;
+}
+
+/*
+ * For setting a tuple of value and mask of type flag
+ * Example:
+ * IP-Src = 10.0.0.0/255.0.0.0
+ * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
+ *
+ * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
+ * For a don't care mask it gives us a 0
+ *
+ * The check if don't care and the mask adjustment if mask=0 is done for VLAN
+ * and MAC stuff on an upper level (due to missing information on this level).
+ * For these guys we can discard them if they are value=0 and mask=0.
+ *
+ * Further the all masks are one-padded for better hardware efficiency.
+ */
+static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
+ struct filer_table *tab)
+{
+ switch (flag) {
+ /* 3bit */
+ case RQFCR_PID_PRI:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_PRI_MASK;
+ break;
+ /* 8bit */
+ case RQFCR_PID_L4P:
+ case RQFCR_PID_TOS:
+ if (!~(mask | RQFCR_PID_L4P_MASK))
+ return;
+ if (!mask)
+ mask = ~0;
+ else
+ mask |= RQFCR_PID_L4P_MASK;
+ break;
+ /* 12bit */
+ case RQFCR_PID_VID:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_VID_MASK;
+ break;
+ /* 16bit */
+ case RQFCR_PID_DPT:
+ case RQFCR_PID_SPT:
+ case RQFCR_PID_ETY:
+ if (!~(mask | RQFCR_PID_PORT_MASK))
+ return;
+ if (!mask)
+ mask = ~0;
+ else
+ mask |= RQFCR_PID_PORT_MASK;
+ break;
+ /* 24bit */
+ case RQFCR_PID_DAH:
+ case RQFCR_PID_DAL:
+ case RQFCR_PID_SAH:
+ case RQFCR_PID_SAL:
+ if (!(value | mask))
+ return;
+ mask |= RQFCR_PID_MAC_MASK;
+ break;
+ /* for all real 32bit masks */
+ default:
+ if (!~mask)
+ return;
+ if (!mask)
+ mask = ~0;
+ break;
+ }
+ gfar_set_general_attribute(value, mask, flag, tab);
+}
+
+/* Translates value and mask for UDP, TCP or SCTP */
+static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
+ struct ethtool_tcpip4_spec *mask, struct filer_table *tab)
+{
+ gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
+ gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+ gfar_set_attribute(value->pdst, mask->pdst, RQFCR_PID_DPT, tab);
+ gfar_set_attribute(value->psrc, mask->psrc, RQFCR_PID_SPT, tab);
+ gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+}
+
+/* Translates value and mask for RAW-IP4 */
+static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
+ struct ethtool_usrip4_spec *mask, struct filer_table *tab)
+{
+ gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
+ gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
+ gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
+ gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
+ gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
+ tab);
+
+}
+
+/* Translates value and mask for ETHER spec */
+static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
+ struct filer_table *tab)
+{
+ u32 upper_temp_mask = 0;
+ u32 lower_temp_mask = 0;
+ /* Source address */
+ if (!is_broadcast_ether_addr(mask->h_source)) {
+
+ if (is_zero_ether_addr(mask->h_source)) {
+ upper_temp_mask = 0xFFFFFFFF;
+ lower_temp_mask = 0xFFFFFFFF;
+ } else {
+ upper_temp_mask = mask->h_source[0] << 16
+ | mask->h_source[1] << 8
+ | mask->h_source[2];
+ lower_temp_mask = mask->h_source[3] << 16
+ | mask->h_source[4] << 8
+ | mask->h_source[5];
+ }
+ /* Upper 24bit */
+ gfar_set_attribute(
+ value->h_source[0] << 16 | value->h_source[1]
+ << 8 | value->h_source[2],
+ upper_temp_mask, RQFCR_PID_SAH, tab);
+ /* And the same for the lower part */
+ gfar_set_attribute(
+ value->h_source[3] << 16 | value->h_source[4]
+ << 8 | value->h_source[5],
+ lower_temp_mask, RQFCR_PID_SAL, tab);
+ }
+ /* Destination address */
+ if (!is_broadcast_ether_addr(mask->h_dest)) {
+
+ /* Special for destination is limited broadcast */
+ if ((is_broadcast_ether_addr(value->h_dest)
+ && is_zero_ether_addr(mask->h_dest))) {
+ gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
+ } else {
+
+ if (is_zero_ether_addr(mask->h_dest)) {
+ upper_temp_mask = 0xFFFFFFFF;
+ lower_temp_mask = 0xFFFFFFFF;
+ } else {
+ upper_temp_mask = mask->h_dest[0] << 16
+ | mask->h_dest[1] << 8
+ | mask->h_dest[2];
+ lower_temp_mask = mask->h_dest[3] << 16
+ | mask->h_dest[4] << 8
+ | mask->h_dest[5];
+ }
+
+ /* Upper 24bit */
+ gfar_set_attribute(
+ value->h_dest[0] << 16
+ | value->h_dest[1] << 8
+ | value->h_dest[2],
+ upper_temp_mask, RQFCR_PID_DAH, tab);
+ /* And the same for the lower part */
+ gfar_set_attribute(
+ value->h_dest[3] << 16
+ | value->h_dest[4] << 8
+ | value->h_dest[5],
+ lower_temp_mask, RQFCR_PID_DAL, tab);
+ }
+ }
+
+ gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
+
+}
+
+/* Convert a rule to binary filter format of gianfar */
+static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
+ struct filer_table *tab)
+{
+ u32 vlan = 0, vlan_mask = 0;
+ u32 id = 0, id_mask = 0;
+ u32 cfi = 0, cfi_mask = 0;
+ u32 prio = 0, prio_mask = 0;
+
+ u32 old_index = tab->index;
+
+ /* Check if vlan is wanted */
+ if ((rule->flow_type & FLOW_EXT) && (rule->m_ext.vlan_tci != 0xFFFF)) {
+ if (!rule->m_ext.vlan_tci)
+ rule->m_ext.vlan_tci = 0xFFFF;
+
+ vlan = RQFPR_VLN;
+ vlan_mask = RQFPR_VLN;
+
+ /* Separate the fields */
+ id = rule->h_ext.vlan_tci & VLAN_VID_MASK;
+ id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
+ cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
+ cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
+ prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
+ vlan |= RQFPR_CFI;
+ vlan_mask |= RQFPR_CFI;
+ } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
+ vlan_mask |= RQFPR_CFI;
+ }
+ }
+
+ switch (rule->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
+ RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
+ gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
+ &rule->m_u.tcp_ip4_spec, tab);
+ break;
+ case UDP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
+ RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
+ gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
+ &rule->m_u.udp_ip4_spec, tab);
+ break;
+ case SCTP_V4_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+ tab);
+ gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
+ gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u,
+ (struct ethtool_tcpip4_spec *) &rule->m_u, tab);
+ break;
+ case IP_USER_FLOW:
+ gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
+ tab);
+ gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
+ (struct ethtool_usrip4_spec *) &rule->m_u, tab);
+ break;
+ case ETHER_FLOW:
+ if (vlan)
+ gfar_set_parse_bits(vlan, vlan_mask, tab);
+ gfar_set_ether((struct ethhdr *) &rule->h_u,
+ (struct ethhdr *) &rule->m_u, tab);
+ break;
+ default:
+ return -1;
+ }
+
+ /* Set the vlan attributes in the end */
+ if (vlan) {
+ gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
+ gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
+ }
+
+ /* If there has been nothing written till now, it must be a default */
+ if (tab->index == old_index) {
+ gfar_set_mask(0xFFFFFFFF, tab);
+ tab->fe[tab->index].ctrl = 0x20;
+ tab->fe[tab->index].prop = 0x0;
+ tab->index++;
+ }
+
+ /* Remove last AND */
+ tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
+
+ /* Specify which queue to use or to drop */
+ if (rule->ring_cookie == RX_CLS_FLOW_DISC)
+ tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
+ else
+ tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
+
+ /* Only big enough entries can be clustered */
+ if (tab->index > (old_index + 2)) {
+ tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
+ tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
+ }
+
+ /* In rare cases the cache can be full while there is free space in hw */
+ if (tab->index > MAX_FILER_CACHE_IDX - 1)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Copy size filer entries */
+static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
+ struct gfar_filer_entry src[0], s32 size)
+{
+ while (size > 0) {
+ size--;
+ dst[size].ctrl = src[size].ctrl;
+ dst[size].prop = src[size].prop;
+ }
+}
+
+/* Delete the contents of the filer-table between start and end
+ * and collapse them */
+static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
+{
+ int length;
+ if (end > MAX_FILER_CACHE_IDX || end < begin)
+ return -EINVAL;
+
+ end++;
+ length = end - begin;
+
+ /* Copy */
+ while (end < tab->index) {
+ tab->fe[begin].ctrl = tab->fe[end].ctrl;
+ tab->fe[begin++].prop = tab->fe[end++].prop;
+
+ }
+ /* Fill up with don't cares */
+ while (begin < tab->index) {
+ tab->fe[begin].ctrl = 0x60;
+ tab->fe[begin].prop = 0xFFFFFFFF;
+ begin++;
+ }
+
+ tab->index -= length;
+ return 0;
+}
+
+/* Make space on the wanted location */
+static int gfar_expand_filer_entries(u32 begin, u32 length,
+ struct filer_table *tab)
+{
+ if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin
+ > MAX_FILER_CACHE_IDX)
+ return -EINVAL;
+
+ gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
+ tab->index - length + 1);
+
+ tab->index += length;
+ return 0;
+}
+
+static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
+{
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
+ == (RQFCR_AND | RQFCR_CLE))
+ return start;
+ }
+ return -1;
+}
+
+static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
+{
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
+ == (RQFCR_CLE))
+ return start;
+ }
+ return -1;
+}
+
+/*
+ * Uses hardwares clustering option to reduce
+ * the number of filer table entries
+ */
+static void gfar_cluster_filer(struct filer_table *tab)
+{
+ s32 i = -1, j, iend, jend;
+
+ while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
+ j = i;
+ while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
+ /*
+ * The cluster entries self and the previous one
+ * (a mask) must be identical!
+ */
+ if (tab->fe[i].ctrl != tab->fe[j].ctrl)
+ break;
+ if (tab->fe[i].prop != tab->fe[j].prop)
+ break;
+ if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
+ break;
+ if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
+ break;
+ iend = gfar_get_next_cluster_end(i, tab);
+ jend = gfar_get_next_cluster_end(j, tab);
+ if (jend == -1 || iend == -1)
+ break;
+ /*
+ * First we make some free space, where our cluster
+ * element should be. Then we copy it there and finally
+ * delete in from its old location.
+ */
+
+ if (gfar_expand_filer_entries(iend, (jend - j), tab)
+ == -EINVAL)
+ break;
+
+ gfar_copy_filer_entries(&(tab->fe[iend + 1]),
+ &(tab->fe[jend + 1]), jend - j);
+
+ if (gfar_trim_filer_entries(jend - 1,
+ jend + (jend - j), tab) == -EINVAL)
+ return;
+
+ /* Mask out cluster bit */
+ tab->fe[iend].ctrl &= ~(RQFCR_CLE);
+ }
+ }
+}
+
+/* Swaps the masked bits of a1<>a2 and b1<>b2 */
+static void gfar_swap_bits(struct gfar_filer_entry *a1,
+ struct gfar_filer_entry *a2, struct gfar_filer_entry *b1,
+ struct gfar_filer_entry *b2, u32 mask)
+{
+ u32 temp[4];
+ temp[0] = a1->ctrl & mask;
+ temp[1] = a2->ctrl & mask;
+ temp[2] = b1->ctrl & mask;
+ temp[3] = b2->ctrl & mask;
+
+ a1->ctrl &= ~mask;
+ a2->ctrl &= ~mask;
+ b1->ctrl &= ~mask;
+ b2->ctrl &= ~mask;
+
+ a1->ctrl |= temp[1];
+ a2->ctrl |= temp[0];
+ b1->ctrl |= temp[3];
+ b2->ctrl |= temp[2];
+}
+
+/*
+ * Generate a list consisting of masks values with their start and
+ * end of validity and block as indicator for parts belonging
+ * together (glued by ANDs) in mask_table
+ */
+static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
+ struct filer_table *tab)
+{
+ u32 i, and_index = 0, block_index = 1;
+
+ for (i = 0; i < tab->index; i++) {
+
+ /* LSByte of control = 0 sets a mask */
+ if (!(tab->fe[i].ctrl & 0xF)) {
+ mask_table[and_index].mask = tab->fe[i].prop;
+ mask_table[and_index].start = i;
+ mask_table[and_index].block = block_index;
+ if (and_index >= 1)
+ mask_table[and_index - 1].end = i - 1;
+ and_index++;
+ }
+ /* cluster starts and ends will be separated because they should
+ * hold their position */
+ if (tab->fe[i].ctrl & RQFCR_CLE)
+ block_index++;
+ /* A not set AND indicates the end of a depended block */
+ if (!(tab->fe[i].ctrl & RQFCR_AND))
+ block_index++;
+
+ }
+
+ mask_table[and_index - 1].end = i - 1;
+
+ return and_index;
+}
+
+/*
+ * Sorts the entries of mask_table by the values of the masks.
+ * Important: The 0xFF80 flags of the first and last entry of a
+ * block must hold their position (which queue, CLusterEnable, ReJEct,
+ * AND)
+ */
+static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
+ struct filer_table *temp_table, u32 and_index)
+{
+ /* Pointer to compare function (_asc or _desc) */
+ int (*gfar_comp)(const void *, const void *);
+
+ u32 i, size = 0, start = 0, prev = 1;
+ u32 old_first, old_last, new_first, new_last;
+
+ gfar_comp = &gfar_comp_desc;
+
+ for (i = 0; i < and_index; i++) {
+
+ if (prev != mask_table[i].block) {
+ old_first = mask_table[start].start + 1;
+ old_last = mask_table[i - 1].end;
+ sort(mask_table + start, size,
+ sizeof(struct gfar_mask_entry),
+ gfar_comp, &gfar_swap);
+
+ /* Toggle order for every block. This makes the
+ * thing more efficient! */
+ if (gfar_comp == gfar_comp_desc)
+ gfar_comp = &gfar_comp_asc;
+ else
+ gfar_comp = &gfar_comp_desc;
+
+ new_first = mask_table[start].start + 1;
+ new_last = mask_table[i - 1].end;
+
+ gfar_swap_bits(&temp_table->fe[new_first],
+ &temp_table->fe[old_first],
+ &temp_table->fe[new_last],
+ &temp_table->fe[old_last],
+ RQFCR_QUEUE | RQFCR_CLE |
+ RQFCR_RJE | RQFCR_AND
+ );
+
+ start = i;
+ size = 0;
+ }
+ size++;
+ prev = mask_table[i].block;
+ }
+
+}
+
+/*
+ * Reduces the number of masks needed in the filer table to save entries
+ * This is done by sorting the masks of a depended block. A depended block is
+ * identified by gluing ANDs or CLE. The sorting order toggles after every
+ * block. Of course entries in scope of a mask must change their location with
+ * it.
+ */
+static int gfar_optimize_filer_masks(struct filer_table *tab)
+{
+ struct filer_table *temp_table;
+ struct gfar_mask_entry *mask_table;
+
+ u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
+ s32 ret = 0;
+
+ /* We need a copy of the filer table because
+ * we want to change its order */
+ temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
+ if (temp_table == NULL)
+ return -ENOMEM;
+ memcpy(temp_table, tab, sizeof(*temp_table));
+
+ mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
+ sizeof(struct gfar_mask_entry), GFP_KERNEL);
+
+ if (mask_table == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ and_index = gfar_generate_mask_table(mask_table, tab);
+
+ gfar_sort_mask_table(mask_table, temp_table, and_index);
+
+ /* Now we can copy the data from our duplicated filer table to
+ * the real one in the order the mask table says */
+ for (i = 0; i < and_index; i++) {
+ size = mask_table[i].end - mask_table[i].start + 1;
+ gfar_copy_filer_entries(&(tab->fe[j]),
+ &(temp_table->fe[mask_table[i].start]), size);
+ j += size;
+ }
+
+ /* And finally we just have to check for duplicated masks and drop the
+ * second ones */
+ for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
+ if (tab->fe[i].ctrl == 0x80) {
+ previous_mask = i++;
+ break;
+ }
+ }
+ for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
+ if (tab->fe[i].ctrl == 0x80) {
+ if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
+ /* Two identical ones found!
+ * So drop the second one! */
+ gfar_trim_filer_entries(i, i, tab);
+ } else
+ /* Not identical! */
+ previous_mask = i;
+ }
+ }
+
+ kfree(mask_table);
+end: kfree(temp_table);
+ return ret;
+}
+
+/* Write the bit-pattern from software's buffer to hardware registers */
+static int gfar_write_filer_table(struct gfar_private *priv,
+ struct filer_table *tab)
+{
+ u32 i = 0;
+ if (tab->index > MAX_FILER_IDX - 1)
+ return -EBUSY;
+
+ /* Avoid inconsistent filer table to be processed */
+ lock_rx_qs(priv);
+
+ /* Fill regular entries */
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++)
+ gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
+ /* Fill the rest with fall-troughs */
+ for (; i < MAX_FILER_IDX - 1; i++)
+ gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
+ /* Last entry must be default accept
+ * because that's what people expect */
+ gfar_write_filer(priv, i, 0x20, 0x0);
+
+ unlock_rx_qs(priv);
+
+ return 0;
+}
+
+static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
+ struct gfar_private *priv)
+{
+
+ if (flow->flow_type & FLOW_EXT) {
+ if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
+ netdev_warn(priv->ndev,
+ "User-specific data not supported!\n");
+ if (~flow->m_ext.vlan_etype)
+ netdev_warn(priv->ndev,
+ "VLAN-etype not supported!\n");
+ }
+ if (flow->flow_type == IP_USER_FLOW)
+ if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ netdev_warn(priv->ndev,
+ "IP-Version differing from IPv4 not supported!\n");
+
+ return 0;
+}
+
+static int gfar_process_filer_changes(struct gfar_private *priv)
+{
+ struct ethtool_flow_spec_container *j;
+ struct filer_table *tab;
+ s32 i = 0;
+ s32 ret = 0;
+
+ /* So index is set to zero, too! */
+ tab = kzalloc(sizeof(*tab), GFP_KERNEL);
+ if (tab == NULL)
+ return -ENOMEM;
+
+ /* Now convert the existing filer data from flow_spec into
+ * filer tables binary format */
+ list_for_each_entry(j, &priv->rx_list.list, list) {
+ ret = gfar_convert_to_filer(&j->fs, tab);
+ if (ret == -EBUSY) {
+ netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ goto end;
+ }
+ if (ret == -1) {
+ netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n");
+ goto end;
+ }
+ }
+
+ i = tab->index;
+
+ /* Optimizations to save entries */
+ gfar_cluster_filer(tab);
+ gfar_optimize_filer_masks(tab);
+
+ pr_debug("\n\tSummary:\n"
+ "\tData on hardware: %d\n"
+ "\tCompression rate: %d%%\n",
+ tab->index, 100 - (100 * tab->index) / i);
+
+ /* Write everything to hardware */
+ ret = gfar_write_filer_table(priv, tab);
+ if (ret == -EBUSY) {
+ netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ goto end;
+ }
+
+end: kfree(tab);
+ return ret;
+}
+
+static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
+{
+ u32 i = 0;
+
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xFF;
+
+ flow->m_ext.vlan_etype ^= 0xFFFF;
+ flow->m_ext.vlan_tci ^= 0xFFFF;
+ flow->m_ext.data[0] ^= ~0;
+ flow->m_ext.data[1] ^= ~0;
+}
+
+static int gfar_add_cls(struct gfar_private *priv,
+ struct ethtool_rx_flow_spec *flow)
+{
+ struct ethtool_flow_spec_container *temp, *comp;
+ int ret = 0;
+
+ temp = kmalloc(sizeof(*temp), GFP_KERNEL);
+ if (temp == NULL)
+ return -ENOMEM;
+ memcpy(&temp->fs, flow, sizeof(temp->fs));
+
+ gfar_invert_masks(&temp->fs);
+ ret = gfar_check_capability(&temp->fs, priv);
+ if (ret)
+ goto clean_mem;
+ /* Link in the new element at the right @location */
+ if (list_empty(&priv->rx_list.list)) {
+ ret = gfar_check_filer_hardware(priv);
+ if (ret != 0)
+ goto clean_mem;
+ list_add(&temp->list, &priv->rx_list.list);
+ goto process;
+ } else {
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location > flow->location) {
+ list_add_tail(&temp->list, &comp->list);
+ goto process;
+ }
+ if (comp->fs.location == flow->location) {
+ netdev_err(priv->ndev,
+ "Rule not added: ID %d not free!\n",
+ flow->location);
+ ret = -EBUSY;
+ goto clean_mem;
+ }
+ }
+ list_add_tail(&temp->list, &priv->rx_list.list);
+ }
+
+process:
+ ret = gfar_process_filer_changes(priv);
+ if (ret)
+ goto clean_list;
+ priv->rx_list.count++;
+ return ret;
+
+clean_list:
+ list_del(&temp->list);
+clean_mem:
+ kfree(temp);
+ return ret;
+}
+
+static int gfar_del_cls(struct gfar_private *priv, u32 loc)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 ret = -EINVAL;
+
+ if (list_empty(&priv->rx_list.list))
+ return ret;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location == loc) {
+ list_del(&comp->list);
+ kfree(comp);
+ priv->rx_list.count--;
+ gfar_process_filer_changes(priv);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+
+}
+
+static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 ret = -EINVAL;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (comp->fs.location == cmd->fs.location) {
+ memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
+ gfar_invert_masks(&cmd->fs);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int gfar_get_cls_all(struct gfar_private *priv,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct ethtool_flow_spec_container *comp;
+ u32 i = 0;
+
+ list_for_each_entry(comp, &priv->rx_list.list, list) {
+ if (i <= cmd->rule_cnt) {
+ rule_locs[i] = comp->fs.location;
+ i++;
+ }
+ }
+
+ cmd->data = MAX_FILER_IDX;
+
+ return 0;
+}
+
static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct gfar_private *priv = netdev_priv(dev);
int ret = 0;
- switch(cmd->cmd) {
+ mutex_lock(&priv->rx_queue_access);
+
+ switch (cmd->cmd) {
case ETHTOOL_SRXFH:
ret = gfar_set_hash_opts(priv, cmd);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
+ cmd->fs.ring_cookie >= priv->num_rx_queues) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = gfar_add_cls(priv, &cmd->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = gfar_del_cls(priv, cmd->fs.location);
+ break;
default:
ret = -EINVAL;
}
+ mutex_unlock(&priv->rx_queue_access);
+
+ return ret;
+}
+
+static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ void *rule_locs)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ int ret = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->num_rx_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = priv->rx_list.count;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = gfar_get_cls(priv, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = gfar_get_cls_all(priv, cmd, (u32 *) rule_locs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
return ret;
}
@@ -810,4 +1759,5 @@ const struct ethtool_ops gfar_ethtool_ops = {
.set_wol = gfar_set_wol,
#endif
.set_rxnfc = gfar_set_nfc,
+ .get_rxnfc = gfar_get_nfc,
};
diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c
index d8e175382d1..f67b8aebc89 100644
--- a/drivers/net/gianfar_ptp.c
+++ b/drivers/net/gianfar_ptp.c
@@ -193,14 +193,9 @@ static void set_alarm(struct etsects *etsects)
/* Caller must hold etsects->lock. */
static void set_fipers(struct etsects *etsects)
{
- u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl);
-
- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE));
- gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc);
+ set_alarm(etsects);
gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
- set_alarm(etsects);
- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE);
}
/*
@@ -491,7 +486,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
spin_lock_init(&etsects->lock);
etsects->regs = ioremap(etsects->rsrc->start,
- 1 + etsects->rsrc->end - etsects->rsrc->start);
+ resource_size(etsects->rsrc));
if (!etsects->regs) {
pr_err("ioremap ptp registers failed\n");
goto no_ioremap;
@@ -511,7 +506,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1);
gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2);
set_alarm(etsects);
- gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE);
+ gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD);
spin_unlock_irqrestore(&etsects->lock, flags);
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index f181304a7ab..16ce45c1193 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -22,9 +22,11 @@
* Marko Isomaki
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -1015,11 +1017,10 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
return -EINVAL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+ GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 | dev->dev_addr[5]);
- GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
- GRETH_REGSAVE(regs->esa_lsb,
- addr->sa_data[2] << 24 | addr->
- sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
return 0;
}
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index a09041aa850..c274b3d77eb 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -648,13 +648,13 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_cleardev;
- hmp->tx_ring = (struct hamachi_desc *)ring_space;
+ hmp->tx_ring = ring_space;
hmp->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
- hmp->rx_ring = (struct hamachi_desc *)ring_space;
+ hmp->rx_ring = ring_space;
hmp->rx_ring_dma = ring_dma;
/* Check for options being passed in */
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 3e5d0b6b651..2a5a34d2d67 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -36,7 +36,7 @@
#include <linux/tcp.h>
#include <linux/semaphore.h>
#include <linux/compat.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define SIXPACK_VERSION "Revision: 0.3.0"
@@ -692,10 +692,10 @@ static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;
- write_lock(&disc_data_lock);
+ write_lock_bh(&disc_data_lock);
sp = tty->disc_data;
tty->disc_data = NULL;
- write_unlock(&disc_data_lock);
+ write_unlock_bh(&disc_data_lock);
if (!sp)
return;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 99cdce33df8..a974727dd9a 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -76,6 +76,7 @@
#include <linux/ioport.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/hdlcdrv.h>
#include <linux/baycom.h>
#include <linux/jiffies.h>
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index d92fe6ca788..e349d867449 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -66,6 +66,7 @@
#include <linux/ioport.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/hdlcdrv.h>
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 52b14256e2c..ce555d9ac02 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -36,7 +36,7 @@
#include <linux/rtnetlink.h>
#include <linux/sockios.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 4c628393c8b..bc02968cee1 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -813,10 +813,10 @@ static void mkiss_close(struct tty_struct *tty)
{
struct mkiss *ax;
- write_lock(&disc_data_lock);
+ write_lock_bh(&disc_data_lock);
ax = tty->disc_data;
tty->disc_data = NULL;
- write_unlock(&disc_data_lock);
+ write_unlock_bh(&disc_data_lock);
if (!ax)
return;
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index 82bffc3cabd..29917363ebf 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -30,6 +30,7 @@ static const char version[] =
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/system.h>
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index ef2014375e6..18564d4a7c0 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -30,6 +30,7 @@ static const char version[] =
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/system.h>
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index c3ecb118c1d..b6519c1ba7e 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -2103,20 +2103,18 @@ static void hp100_set_multicast_list(struct net_device *dev)
#endif
netdev_for_each_mc_addr(ha, dev) {
addrs = ha->addr;
- if ((*addrs & 0x01) == 0x01) { /* multicast address? */
#ifdef HP100_DEBUG
- printk("hp100: %s: multicast = %pM, ",
- dev->name, addrs);
+ printk("hp100: %s: multicast = %pM, ",
+ dev->name, addrs);
#endif
- for (i = idx = 0; i < 6; i++) {
- idx ^= *addrs++ & 0x3f;
- printk(":%02x:", idx);
- }
+ for (i = idx = 0; i < 6; i++) {
+ idx ^= *addrs++ & 0x3f;
+ printk(":%02x:", idx);
+ }
#ifdef HP100_DEBUG
- printk("idx = %i\n", idx);
+ printk("idx = %i\n", idx);
#endif
- lp->hash_bytes[idx >> 3] |= (1 << (idx & 7));
- }
+ lp->hash_bytes[idx >> 3] |= (1 << (idx & 7));
}
}
#else
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 079450fe5e9..70cb7d8a3b5 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -39,6 +39,7 @@
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/of_net.h>
#include <linux/slab.h>
#include <asm/processor.h>
@@ -2506,18 +2507,6 @@ static int __devinit emac_init_config(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
const void *p;
- unsigned int plen;
- const char *pm, *phy_modes[] = {
- [PHY_MODE_NA] = "",
- [PHY_MODE_MII] = "mii",
- [PHY_MODE_RMII] = "rmii",
- [PHY_MODE_SMII] = "smii",
- [PHY_MODE_RGMII] = "rgmii",
- [PHY_MODE_TBI] = "tbi",
- [PHY_MODE_GMII] = "gmii",
- [PHY_MODE_RTBI] = "rtbi",
- [PHY_MODE_SGMII] = "sgmii",
- };
/* Read config from device-tree */
if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
@@ -2566,23 +2555,9 @@ static int __devinit emac_init_config(struct emac_instance *dev)
dev->mal_burst_size = 256;
/* PHY mode needs some decoding */
- dev->phy_mode = PHY_MODE_NA;
- pm = of_get_property(np, "phy-mode", &plen);
- if (pm != NULL) {
- int i;
- for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
- if (!strcasecmp(pm, phy_modes[i])) {
- dev->phy_mode = i;
- break;
- }
- }
-
- /* Backward compat with non-final DT */
- if (dev->phy_mode == PHY_MODE_NA && pm != NULL && plen == 4) {
- u32 nmode = *(const u32 *)pm;
- if (nmode > PHY_MODE_NA && nmode <= PHY_MODE_SGMII)
- dev->phy_mode = nmode;
- }
+ dev->phy_mode = of_get_phy_mode(np);
+ if (dev->phy_mode < 0)
+ dev->phy_mode = PHY_MODE_NA;
/* Check EMAC version */
if (of_device_is_compatible(np, "ibm,emac4sync")) {
@@ -2770,7 +2745,7 @@ static int __devinit emac_probe(struct platform_device *ofdev)
}
// TODO : request_mem_region
dev->emacp = ioremap(dev->rsrc_regs.start,
- dev->rsrc_regs.end - dev->rsrc_regs.start + 1);
+ resource_size(&dev->rsrc_regs));
if (dev->emacp == NULL) {
printk(KERN_ERR "%s: Can't map device registers!\n",
np->full_name);
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h
index 8a61b597a16..1568278d759 100644
--- a/drivers/net/ibm_newemac/emac.h
+++ b/drivers/net/ibm_newemac/emac.h
@@ -26,6 +26,7 @@
#define __IBM_NEWEMAC_H
#include <linux/types.h>
+#include <linux/phy.h>
/* EMAC registers Write Access rules */
struct emac_regs {
@@ -106,15 +107,15 @@ struct emac_regs {
/*
* PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
*/
-#define PHY_MODE_NA 0
-#define PHY_MODE_MII 1
-#define PHY_MODE_RMII 2
-#define PHY_MODE_SMII 3
-#define PHY_MODE_RGMII 4
-#define PHY_MODE_TBI 5
-#define PHY_MODE_GMII 6
-#define PHY_MODE_RTBI 7
-#define PHY_MODE_SGMII 8
+#define PHY_MODE_NA PHY_INTERFACE_MODE_NA
+#define PHY_MODE_MII PHY_INTERFACE_MODE_MII
+#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII
+#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII
+#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
+#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI
+#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII
+#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI
+#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
/* EMACx_MR0 */
#define EMAC_MR0_RXI 0x80000000
diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c
index ac9d964e59e..ab4e5969fe6 100644
--- a/drivers/net/ibm_newemac/phy.c
+++ b/drivers/net/ibm_newemac/phy.c
@@ -28,12 +28,15 @@
#include "emac.h"
#include "phy.h"
-static inline int phy_read(struct mii_phy *phy, int reg)
+#define phy_read _phy_read
+#define phy_write _phy_write
+
+static inline int _phy_read(struct mii_phy *phy, int reg)
{
return phy->mdio_read(phy->dev, phy->address, reg);
}
-static inline void phy_write(struct mii_phy *phy, int reg, int val)
+static inline void _phy_write(struct mii_phy *phy, int reg, int val)
{
phy->mdio_write(phy->dev, phy->address, reg, val);
}
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index b388d782c7c..ba99af05bf6 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -34,6 +34,7 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/ethtool.h>
@@ -42,7 +43,7 @@
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <asm/hvcall.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/vio.h>
#include <asm/iommu.h>
#include <asm/firmware.h>
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 4fecaed67fc..46b5f5fd686 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -32,6 +32,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
@@ -40,8 +41,16 @@
struct ifb_private {
struct tasklet_struct ifb_tasklet;
int tasklet_pending;
+
+ struct u64_stats_sync rsync;
struct sk_buff_head rq;
+ u64 rx_packets;
+ u64 rx_bytes;
+
+ struct u64_stats_sync tsync;
struct sk_buff_head tq;
+ u64 tx_packets;
+ u64 tx_bytes;
};
static int numifbs = 2;
@@ -53,10 +62,8 @@ static int ifb_close(struct net_device *dev);
static void ri_tasklet(unsigned long dev)
{
-
struct net_device *_dev = (struct net_device *)dev;
struct ifb_private *dp = netdev_priv(_dev);
- struct net_device_stats *stats = &_dev->stats;
struct netdev_queue *txq;
struct sk_buff *skb;
@@ -76,15 +83,18 @@ static void ri_tasklet(unsigned long dev)
skb->tc_verd = 0;
skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
- stats->tx_packets++;
- stats->tx_bytes +=skb->len;
+
+ u64_stats_update_begin(&dp->tsync);
+ dp->tx_packets++;
+ dp->tx_bytes += skb->len;
+ u64_stats_update_end(&dp->tsync);
rcu_read_lock();
skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
if (!skb->dev) {
rcu_read_unlock();
dev_kfree_skb(skb);
- stats->tx_dropped++;
+ _dev->stats.tx_dropped++;
if (skb_queue_len(&dp->tq) != 0)
goto resched;
break;
@@ -119,9 +129,37 @@ resched:
}
+static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ifb_private *dp = netdev_priv(dev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&dp->rsync);
+ stats->rx_packets = dp->rx_packets;
+ stats->rx_bytes = dp->rx_bytes;
+ } while (u64_stats_fetch_retry_bh(&dp->rsync, start));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&dp->tsync);
+
+ stats->tx_packets = dp->tx_packets;
+ stats->tx_bytes = dp->tx_bytes;
+
+ } while (u64_stats_fetch_retry_bh(&dp->tsync, start));
+
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+
+ return stats;
+}
+
+
static const struct net_device_ops ifb_netdev_ops = {
.ndo_open = ifb_open,
.ndo_stop = ifb_close,
+ .ndo_get_stats64 = ifb_stats64,
.ndo_start_xmit = ifb_xmit,
.ndo_validate_addr = eth_validate_addr,
};
@@ -145,22 +183,23 @@ static void ifb_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
random_ether_addr(dev->dev_addr);
}
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ifb_private *dp = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
u32 from = G_TC_FROM(skb->tc_verd);
- stats->rx_packets++;
- stats->rx_bytes+=skb->len;
+ u64_stats_update_begin(&dp->rsync);
+ dp->rx_packets++;
+ dp->rx_bytes += skb->len;
+ u64_stats_update_end(&dp->rsync);
if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
dev_kfree_skb(skb);
- stats->rx_dropped++;
+ dev->stats.rx_dropped++;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/igb/Makefile b/drivers/net/igb/Makefile
index 8372cb9a8c1..c6e4621b626 100644
--- a/drivers/net/igb/Makefile
+++ b/drivers/net/igb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2009 Intel Corporation.
+# Copyright(c) 1999 - 2011 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 0f563c8c5ff..c0857bdfb03 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -1156,10 +1156,13 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{
u32 ctrl_ext, ctrl_reg, reg;
bool pcs_autoneg;
+ s32 ret_val = E1000_SUCCESS;
+ u16 data;
if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
!igb_sgmii_active_82575(hw))
- return 0;
+ return ret_val;
+
/*
* On the 82575, SerDes loopback mode persists until it is
@@ -1203,6 +1206,18 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
/* disable PCS autoneg and support parallel detect only */
pcs_autoneg = false;
default:
+ if (hw->mac.type == e1000_82575 ||
+ hw->mac.type == e1000_82576) {
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
+ if (ret_val) {
+ printk(KERN_DEBUG "NVM Read Error\n\n");
+ return ret_val;
+ }
+
+ if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
+ pcs_autoneg = false;
+ }
+
/*
* non-SGMII modes only supports a speed of 1000/Full for the
* link so it is best to just force the MAC and let the pcs
@@ -1250,7 +1265,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
if (!igb_sgmii_active_82575(hw))
igb_force_mac_fc(hw);
- return 0;
+ return ret_val;
}
/**
@@ -1735,6 +1750,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
ctrl |= E1000_CTRL_RST;
wr32(E1000_CTRL, ctrl);
+ wrfl();
/* Add delay to insure DEV_RST has time to complete */
if (global_device_reset)
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index dd6df349899..786e110011a 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -243,6 +243,8 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXCTL_MDP_EN 0x0020
#define E1000_DTXCTL_SPOOF_INT 0x0040
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
+
#define ALL_QUEUES 0xFFFF
/* RX packet buffer size defines */
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 6b80d40110c..7b8ddd830f1 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -437,6 +437,7 @@
#define E1000_RAH_POOL_1 0x00040000
/* Error Codes */
+#define E1000_SUCCESS 0
#define E1000_ERR_NVM 1
#define E1000_ERR_PHY 2
#define E1000_ERR_CONFIG 3
@@ -511,6 +512,16 @@
#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
#define E1000_GCR_CAP_VER2 0x00040000
+/* mPHY Address Control and Data Registers */
+#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */
+#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
+#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */
+
+/* mPHY PCS CLK Register */
+#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */
+/* mPHY Near End Digital Loopback Override Bit */
+#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
@@ -587,8 +598,8 @@
#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
/* NVM Word Offsets */
-#define NVM_ID_LED_SETTINGS 0x0004
-/* For SERDES output amplitude adjustment. */
+#define NVM_COMPAT 0x0003
+#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_CONTROL3_PORT_A 0x0024
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 27153e8d7b1..4519a136717 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index ce8255fc3c5..2b5ef761d2a 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include "e1000_mac.h"
@@ -217,7 +218,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
}
/* if multicast bit is set, the alternate address will not be used */
- if (alt_mac_addr[0] & 0x01) {
+ if (is_multicast_ether_addr(alt_mac_addr)) {
hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
goto out;
}
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index 601be99711c..4927f61fbbc 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index 78d48c7fa85..74f2f11ac29 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
index bb112fb6c3a..eddb0f83dce 100644
--- a/drivers/net/igb/e1000_mbx.h
+++ b/drivers/net/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index 75bf36a4bae..40407124e72 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -285,6 +285,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
wr32(E1000_EECD, eecd);
+ wrfl();
udelay(1);
timeout = NVM_MAX_RETRY_SPI;
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h
index 7f43564c4bc..a2a7ca9fa73 100644
--- a/drivers/net/igb/e1000_nvm.h
+++ b/drivers/net/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index d639706eb3f..e662554c62d 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 2cc117705a3..8510797b9d8 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 958ca3bda48..0990f6d860c 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index f4fa4b1751c..265e151b66c 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -37,6 +37,8 @@
#include <linux/clocksource.h>
#include <linux/timecompare.h>
#include <linux/net_tstamp.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
struct igb_adapter;
@@ -252,7 +254,7 @@ static inline int igb_desc_unused(struct igb_ring *ring)
struct igb_adapter {
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
- struct vlan_group *vlgrp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 mng_vlan_id;
u32 bd_number;
u32 wol;
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index fdc895e5a3f..414b0225be8 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -318,65 +318,6 @@ static int igb_set_pauseparam(struct net_device *netdev,
return retval;
}
-static u32 igb_get_rx_csum(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- return !!(adapter->rx_ring[0]->flags & IGB_RING_FLAG_RX_CSUM);
-}
-
-static int igb_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- int i;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- if (data)
- adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
- else
- adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
- }
-
- return 0;
-}
-
-static u32 igb_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_IP_CSUM) != 0;
-}
-
-static int igb_set_tx_csum(struct net_device *netdev, u32 data)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- if (data) {
- netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- if (adapter->hw.mac.type >= e1000_82576)
- netdev->features |= NETIF_F_SCTP_CSUM;
- } else {
- netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_SCTP_CSUM);
- }
-
- return 0;
-}
-
-static int igb_set_tso(struct net_device *netdev, u32 data)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
-
- if (data) {
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- } else {
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- }
-
- dev_info(&adapter->pdev->dev, "TSO is %s\n",
- data ? "Enabled" : "Disabled");
- return 0;
-}
-
static u32 igb_get_msglevel(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -1284,6 +1225,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
+ wrfl();
msleep(10);
/* Define all writable bits for ICS */
@@ -1327,6 +1269,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, mask);
wr32(E1000_ICS, mask);
+ wrfl();
msleep(10);
if (adapter->test_icr & mask) {
@@ -1348,6 +1291,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMS, mask);
wr32(E1000_ICS, mask);
+ wrfl();
msleep(10);
if (!(adapter->test_icr & mask)) {
@@ -1369,6 +1313,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, ~mask);
wr32(E1000_ICS, ~mask);
+ wrfl();
msleep(10);
if (adapter->test_icr & mask) {
@@ -1380,6 +1325,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
+ wrfl();
msleep(10);
/* Unhook test interrupt handler */
@@ -1520,6 +1466,22 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
/* use CTRL_EXT to identify link type as SGMII can appear as copper */
if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+
+ /* Enable DH89xxCC MPHY for near end loopback */
+ reg = rd32(E1000_MPHY_ADDR_CTL);
+ reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
+ E1000_MPHY_PCS_CLK_REG_OFFSET;
+ wr32(E1000_MPHY_ADDR_CTL, reg);
+
+ reg = rd32(E1000_MPHY_DATA);
+ reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
+ wr32(E1000_MPHY_DATA, reg);
+ }
+
reg = rd32(E1000_RCTL);
reg |= E1000_RCTL_LBM_TCVR;
wr32(E1000_RCTL, reg);
@@ -1561,6 +1523,23 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
u32 rctl;
u16 phy_reg;
+ if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ u32 reg;
+
+ /* Disable near end loopback on DH89xxCC */
+ reg = rd32(E1000_MPHY_ADDR_CTL);
+ reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
+ E1000_MPHY_PCS_CLK_REG_OFFSET;
+ wr32(E1000_MPHY_ADDR_CTL, reg);
+
+ reg = rd32(E1000_MPHY_DATA);
+ reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
+ wr32(E1000_MPHY_DATA, reg);
+ }
+
rctl = rd32(E1000_RCTL);
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
wr32(E1000_RCTL, rctl);
@@ -2207,14 +2186,6 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_ringparam = igb_set_ringparam,
.get_pauseparam = igb_get_pauseparam,
.set_pauseparam = igb_set_pauseparam,
- .get_rx_csum = igb_get_rx_csum,
- .set_rx_csum = igb_set_rx_csum,
- .get_tx_csum = igb_get_tx_csum,
- .set_tx_csum = igb_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = igb_set_tso,
.self_test = igb_diag_test,
.get_strings = igb_get_strings,
.set_phys_id = igb_set_phys_id,
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 2c28621eb30..40d4c405fd7 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2009 Intel Corporation.
+ Copyright(c) 2007-2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -28,6 +28,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/bitops.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/netdevice.h>
@@ -54,9 +55,8 @@
#define MAJ 3
#define MIN 0
#define BUILD 6
-#define KFIX 2
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
-__stringify(BUILD) "-k" __stringify(KFIX)
+__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
@@ -141,7 +141,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
+static void igb_vlan_mode(struct net_device *netdev, u32 features);
static void igb_vlan_rx_add_vid(struct net_device *, u16);
static void igb_vlan_rx_kill_vid(struct net_device *, u16);
static void igb_restore_vlan(struct igb_adapter *);
@@ -1052,6 +1052,7 @@ msi_only:
kfree(adapter->vf_data);
adapter->vf_data = NULL;
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ wrfl();
msleep(100);
dev_info(&adapter->pdev->dev, "IOV Disabled\n");
}
@@ -1363,7 +1364,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
(vid != old_vid) &&
- !vlan_group_get_device(adapter->vlgrp, old_vid)) {
+ !test_bit(old_vid, adapter->active_vlans)) {
/* remove VID from filter table */
igb_vfta_set(hw, old_vid, false);
}
@@ -1749,6 +1750,39 @@ void igb_reset(struct igb_adapter *adapter)
igb_get_phy_info(hw);
}
+static u32 igb_fix_features(struct net_device *netdev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int igb_set_features(struct net_device *netdev, u32 features)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
+ u32 changed = netdev->features ^ features;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ if (features & NETIF_F_RXCSUM)
+ adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
+ else
+ adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
+ }
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ igb_vlan_mode(netdev, features);
+
+ return 0;
+}
+
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
@@ -1761,7 +1795,6 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_do_ioctl = igb_ioctl,
.ndo_tx_timeout = igb_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_vlan_rx_register = igb_vlan_rx_register,
.ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
.ndo_set_vf_mac = igb_ndo_set_vf_mac,
@@ -1771,6 +1804,8 @@ static const struct net_device_ops igb_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = igb_netpoll,
#endif
+ .ndo_fix_features = igb_fix_features,
+ .ndo_set_features = igb_set_features,
};
/**
@@ -1910,17 +1945,18 @@ static int __devinit igb_probe(struct pci_dev *pdev,
dev_info(&pdev->dev,
"PHY reset is blocked due to SOL/IDER session.\n");
- netdev->features = NETIF_F_SG |
+ netdev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_RX;
+
+ netdev->features = netdev->hw_features |
NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
- netdev->features |= NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- netdev->features |= NETIF_F_GRO;
-
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_IP_CSUM;
@@ -1932,8 +1968,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
- if (hw->mac.type >= e1000_82576)
+ if (hw->mac.type >= e1000_82576) {
+ netdev->hw_features |= NETIF_F_SCTP_CSUM;
netdev->features |= NETIF_F_SCTP_CSUM;
+ }
adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
@@ -1985,7 +2023,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (hw->bus.func == 0)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if (hw->mac.type == e1000_82580)
+ else if (hw->mac.type >= e1000_82580)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&eeprom_data);
@@ -2039,6 +2077,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (err)
goto err_register;
+ igb_vlan_mode(netdev, netdev->features);
+
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
@@ -2159,6 +2199,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
kfree(adapter->vf_data);
adapter->vf_data = NULL;
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ wrfl();
msleep(100);
dev_info(&pdev->dev, "IOV Disabled\n");
}
@@ -2921,12 +2962,11 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
**/
static void igb_rlpml_set(struct igb_adapter *adapter)
{
- u32 max_frame_size = adapter->max_frame_size;
+ u32 max_frame_size;
struct e1000_hw *hw = &adapter->hw;
u16 pf_id = adapter->vfs_allocated_count;
- if (adapter->vlgrp)
- max_frame_size += VLAN_TAG_SIZE;
+ max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
/* if vfs are enabled we set RLPML to the largest possible request
* size and set the VMOLR RLPML to the size we need */
@@ -5675,25 +5715,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
return count < tx_ring->count;
}
-/**
- * igb_receive_skb - helper function to handle rx indications
- * @q_vector: structure containing interrupt and ring information
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void igb_receive_skb(struct igb_q_vector *q_vector,
- struct sk_buff *skb,
- u16 vlan_tag)
-{
- struct igb_adapter *adapter = q_vector->adapter;
-
- if (vlan_tag && adapter->vlgrp)
- vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
- vlan_tag, skb);
- else
- napi_gro_receive(&q_vector->napi, skb);
-}
-
static inline void igb_rx_checksum_adv(struct igb_ring *ring,
u32 status_err, struct sk_buff *skb)
{
@@ -5791,7 +5812,6 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
unsigned int i;
u32 staterr;
u16 length;
- u16 vlan_tag;
i = rx_ring->next_to_clean;
buffer_info = &rx_ring->buffer_info[i];
@@ -5876,10 +5896,12 @@ send_up:
skb->protocol = eth_type_trans(skb, netdev);
skb_record_rx_queue(skb, rx_ring->queue_index);
- vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
- le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
+ if (staterr & E1000_RXD_STAT_VP) {
+ u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
- igb_receive_skb(q_vector, skb, vlan_tag);
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+ napi_gro_receive(&q_vector->napi, skb);
next_desc:
rx_desc->wb.upper.status_error = 0;
@@ -6249,7 +6271,7 @@ s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
struct igb_adapter *adapter = hw->back;
u16 cap_offset;
- cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ cap_offset = adapter->pdev->pcie_cap;
if (!cap_offset)
return -E1000_ERR_CONFIG;
@@ -6263,7 +6285,7 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
struct igb_adapter *adapter = hw->back;
u16 cap_offset;
- cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ cap_offset = adapter->pdev->pcie_cap;
if (!cap_offset)
return -E1000_ERR_CONFIG;
@@ -6272,17 +6294,15 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
return 0;
}
-static void igb_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
+static void igb_vlan_mode(struct net_device *netdev, u32 features)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl;
igb_irq_disable(adapter);
- adapter->vlgrp = grp;
- if (grp) {
+ if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
ctrl = rd32(E1000_CTRL);
ctrl |= E1000_CTRL_VME;
@@ -6316,6 +6336,8 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
/* add the filter since PF can receive vlans w/o entry in vlvf */
igb_vfta_set(hw, vid, true);
+
+ set_bit(vid, adapter->active_vlans);
}
static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -6326,7 +6348,6 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
s32 err;
igb_irq_disable(adapter);
- vlan_group_set_device(adapter->vlgrp, vid, NULL);
if (!test_bit(__IGB_DOWN, &adapter->state))
igb_irq_enable(adapter);
@@ -6337,20 +6358,16 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
/* if vid was not present in VLVF just remove it from table */
if (err)
igb_vfta_set(hw, vid, false);
+
+ clear_bit(vid, adapter->active_vlans);
}
static void igb_restore_vlan(struct igb_adapter *adapter)
{
- igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+ u16 vid;
- if (adapter->vlgrp) {
- u16 vid;
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (!vlan_group_get_device(adapter->vlgrp, vid))
- continue;
- igb_vlan_rx_add_vid(adapter->netdev, vid);
- }
- }
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ igb_vlan_rx_add_vid(adapter->netdev, vid);
}
int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index d5dad5d607d..fd4a7b780fd 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -34,7 +34,7 @@
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/netdevice.h>
-
+#include <linux/if_vlan.h>
#include "vf.h"
@@ -173,7 +173,7 @@ struct igbvf_adapter {
const struct igbvf_info *ei;
- struct vlan_group *vlgrp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 bd_number;
u32 rx_buffer_len;
u32 polling_interval;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1c77fb3bf4a..40ed066e3ef 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -45,7 +45,7 @@
#include "igbvf.h"
-#define DRV_VERSION "1.0.8-k0"
+#define DRV_VERSION "2.0.0-k"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
@@ -100,12 +100,12 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
struct sk_buff *skb,
u32 status, u16 vlan)
{
- if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(vlan) &
- E1000_RXD_SPC_VLAN_MASK);
- else
- netif_receive_skb(skb);
+ if (status & E1000_RXD_STAT_VP) {
+ u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+ netif_receive_skb(skb);
}
static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
@@ -1167,12 +1167,10 @@ static int igbvf_poll(struct napi_struct *napi, int budget)
*/
static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
{
- int max_frame_size = adapter->max_frame_size;
+ int max_frame_size;
struct e1000_hw *hw = &adapter->hw;
- if (adapter->vlgrp)
- max_frame_size += VLAN_TAG_SIZE;
-
+ max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
e1000_rlpml_set_vf(hw, max_frame_size);
}
@@ -1183,6 +1181,8 @@ static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if (hw->mac.ops.set_vfta(hw, vid, true))
dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
+ else
+ set_bit(vid, adapter->active_vlans);
}
static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -1191,7 +1191,6 @@ static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct e1000_hw *hw = &adapter->hw;
igbvf_irq_disable(adapter);
- vlan_group_set_device(adapter->vlgrp, vid, NULL);
if (!test_bit(__IGBVF_DOWN, &adapter->state))
igbvf_irq_enable(adapter);
@@ -1199,30 +1198,16 @@ static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (hw->mac.ops.set_vfta(hw, vid, false))
dev_err(&adapter->pdev->dev,
"Failed to remove vlan id %d\n", vid);
-}
-
-static void igbvf_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
-{
- struct igbvf_adapter *adapter = netdev_priv(netdev);
-
- adapter->vlgrp = grp;
+ else
+ clear_bit(vid, adapter->active_vlans);
}
static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
{
u16 vid;
- if (!adapter->vlgrp)
- return;
-
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (!vlan_group_get_device(adapter->vlgrp, vid))
- continue;
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
igbvf_vlan_rx_add_vid(adapter->netdev, vid);
- }
-
- igbvf_set_rlpml(adapter);
}
/**
@@ -1241,6 +1226,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
/* disable transmits */
txdctl = er32(TXDCTL(0));
ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
+ e1e_flush();
msleep(10);
/* Setup the HW Tx Head and Tail descriptor pointers */
@@ -1321,6 +1307,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
/* disable receives */
rxdctl = er32(RXDCTL(0));
ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
+ e1e_flush();
msleep(10);
rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
@@ -2203,7 +2190,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+ if (vlan_tx_tag_present(skb)) {
tx_flags |= IGBVF_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
}
@@ -2556,7 +2543,6 @@ static const struct net_device_ops igbvf_netdev_ops = {
.ndo_change_mtu = igbvf_change_mtu,
.ndo_do_ioctl = igbvf_ioctl,
.ndo_tx_timeout = igbvf_tx_timeout,
- .ndo_vlan_rx_register = igbvf_vlan_rx_register,
.ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 32f07f868d8..a234e450452 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -532,7 +532,7 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
return;
ih = (struct iphdr *) ((char *)eh + ETH_HLEN);
- if (ih->frag_off & htons(IP_MF | IP_OFFSET))
+ if (ip_is_fragment(ih))
return;
proto = ih->protocol;
@@ -1664,12 +1664,7 @@ static void ioc3_set_multicast_list(struct net_device *dev)
ip->ehar_l = 0xffffffff;
} else {
netdev_for_each_mc_addr(ha, dev) {
- char *addr = ha->addr;
-
- if (!(*addr & 1))
- continue;
-
- ehar |= (1UL << ioc3_hash(addr));
+ ehar |= (1UL << ioc3_hash(ha->addr));
}
ip->ehar_h = ehar >> 32;
ip->ehar_l = ehar & 0xffffffff;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 58cd3202b48..d4aa40adf1e 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -22,6 +22,7 @@
*/
#include <linux/crc32.h>
#include <linux/ethtool.h>
+#include <linux/interrupt.h>
#include <linux/gfp.h>
#include <linux/mii.h>
#include <linux/mutex.h>
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index d532dde5120..963067d3bda 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -31,6 +31,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/rtnetlink.h>
#include <linux/serial_reg.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 174cafad2c1..b45b2cc4280 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -152,6 +152,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/rtnetlink.h>
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 7a963d4e6d0..b56636da6cc 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -52,6 +52,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/rtnetlink.h>
#include <linux/dma-mapping.h>
#include <linux/pnp.h>
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 001ed0a255f..d0851dfa037 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -12,6 +12,8 @@
* Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
*
*/
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4488bd581ec..82660672dcd 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -22,6 +22,8 @@
* - DMA transfer support
* - FIFO mode support
*/
+#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 52a7c86af66..ed7d7d62bf6 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -12,6 +12,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -511,7 +513,7 @@ static void sh_sir_tx(struct sh_sir_self *self, int phase)
static int sh_sir_read_data(struct sh_sir_self *self)
{
- u16 val;
+ u16 val = 0;
int timeout = 1024;
while (timeout--) {
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index efe05bb34dd..5039f08f5a5 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -11,6 +11,7 @@
*
********************************************************************/
+#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 8800e1fe412..8b1c3484d27 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -49,6 +49,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/rtnetlink.h>
#include <linux/serial_reg.h>
#include <linux/dma-mapping.h>
@@ -515,7 +516,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
* Try to open driver instance
*
*/
-static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
+static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
{
struct smsc_ircc_cb *self;
struct net_device *dev;
@@ -2404,8 +2405,6 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
* addresses making a subsystem device table necessary.
*/
#ifdef CONFIG_PCI
-#define PCIID_VENDOR_INTEL 0x8086
-#define PCIID_VENDOR_ALI 0x10b9
static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
/*
* Subsystems needing entries:
@@ -2415,7 +2414,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
*/
{
/* Guessed entry */
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x103c,
.subdevice = 0x08bc,
@@ -2428,7 +2427,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "HP nx5000 family",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x103c,
.subdevice = 0x088c,
@@ -2442,7 +2441,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "HP nc8000 family",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x103c,
.subdevice = 0x0890,
@@ -2455,7 +2454,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "HP nc6000 family",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */
.device = 0x24cc,
.subvendor = 0x0e11,
.subdevice = 0x0860,
@@ -2470,7 +2469,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
},
{
/* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */
- .vendor = PCIID_VENDOR_INTEL,
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = 0x24c0,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
@@ -2483,7 +2482,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
.name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge",
},
{
- .vendor = PCIID_VENDOR_INTEL, /* Intel 82801CAM ISA bridge */
+ .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801CAM ISA bridge */
.device = 0x248c,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
@@ -2497,7 +2496,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
},
{
/* 82801DBM (ICH4-M) LPC Interface Bridge */
- .vendor = PCIID_VENDOR_INTEL,
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = 0x24cc,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
@@ -2511,7 +2510,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
},
{
/* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */
- .vendor = PCIID_VENDOR_ALI,
+ .vendor = PCI_VENDOR_ID_AL,
.device = 0x1533,
.subvendor = 0x1179,
.subdevice = 0xffff, /* 0xffff is "any" */
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c
index 99e1ec02a01..19ad4606b79 100644
--- a/drivers/net/irda/toim3232-sir.c
+++ b/drivers/net/irda/toim3232-sir.c
@@ -78,7 +78,7 @@
* Target hardware: IRWave IR320ST-2
*
* The IRWave IR320ST-2 is a simple dongle based on the Vishay/Temic
- * TOIM3232 SIR Endec and the Vishay/Temic TFDS4500 SIR IRDA transciever.
+ * TOIM3232 SIR Endec and the Vishay/Temic TFDS4500 SIR IRDA transceiver.
* It uses a hex inverter and some discrete components to buffer and
* line convert the RS232 down to 5V.
*
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index f504b262ba3..6d6479049aa 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -46,6 +46,7 @@ F02 Oct/28/02: Add SB device ID for 3147 and 3177.
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/rtnetlink.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index c6f58482b76..f903a6a2dcb 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -210,7 +210,7 @@ static void DisableDmaChannel(unsigned int channel)
break;
default:
break;
- }; //Switch
+ }
}
static unsigned char ReadLPCReg(int iRegNum)
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index c3d07382b7f..9021d013172 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -36,6 +36,7 @@ MODULE_LICENSE("GPL");
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 1f9c3f08d1a..c4366601b06 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -47,6 +47,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/rtnetlink.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 9ece1fd9889..53dd39e9130 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -538,7 +538,7 @@ static void veth_handle_ack(struct veth_lpevent *event)
default:
veth_error("Unknown ack type %d from LPAR %d.\n",
event->base_event.xSubtype, rlp);
- };
+ }
}
static void veth_handle_int(struct veth_lpevent *event)
@@ -584,7 +584,7 @@ static void veth_handle_int(struct veth_lpevent *event)
default:
veth_error("Unknown interrupt type %d from LPAR %d.\n",
event->base_event.xSubtype, rlp);
- };
+ }
}
static void veth_handle_event(struct HvLpEvent *event)
@@ -964,11 +964,9 @@ static void veth_set_multicast_list(struct net_device *dev)
u8 *addr = ha->addr;
u64 xaddr = 0;
- if (addr[0] & 0x01) {/* multicast address? */
- memcpy(&xaddr, addr, ETH_ALEN);
- port->mcast_addr[port->num_mcast] = xaddr;
- port->num_mcast++;
- }
+ memcpy(&xaddr, addr, ETH_ALEN);
+ port->mcast_addr[port->num_mcast] = xaddr;
+ port->num_mcast++;
}
}
@@ -1184,7 +1182,7 @@ static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct veth_port *port = netdev_priv(dev);
HvLpIndexMap lpmask;
- if (! (frame[0] & 0x01)) {
+ if (is_unicast_ether_addr(frame)) {
/* unicast packet */
HvLpIndex rlp = frame[5];
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index c982ab9f900..38b362b6785 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -57,6 +57,7 @@ ixgb_raise_clock(struct ixgb_hw *hw,
*/
*eecd_reg = *eecd_reg | IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
@@ -75,6 +76,7 @@ ixgb_lower_clock(struct ixgb_hw *hw,
*/
*eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, *eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
@@ -112,6 +114,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
eecd_reg |= IXGB_EECD_DI;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
@@ -206,21 +209,25 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
/* Deselect EEPROM */
eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Clock high */
eecd_reg |= IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Select EEPROM */
eecd_reg |= IXGB_EECD_CS;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Clock low */
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
@@ -239,11 +246,13 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
/* Rising edge of clock */
eecd_reg |= IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
/* Falling edge of clock */
eecd_reg &= ~IXGB_EECD_SK;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
+ IXGB_WRITE_FLUSH(hw);
udelay(50);
}
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 6cb2e42ff4c..3d61a9e4faf 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -149,6 +149,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
*/
IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
+ IXGB_WRITE_FLUSH(hw);
msleep(IXGB_DELAY_BEFORE_RESET);
/* Issue a global reset to the MAC. This will reset the chip's
@@ -1220,6 +1221,7 @@ ixgb_optics_reset_bcm(struct ixgb_hw *hw)
ctrl &= ~IXGB_CTRL0_SDP2;
ctrl |= IXGB_CTRL0_SDP3;
IXGB_WRITE_REG(hw, CTRL0, ctrl);
+ IXGB_WRITE_FLUSH(hw);
/* SerDes needs extra delay */
msleep(IXGB_SUN_PHY_RESET_DELAY);
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index e467b20ed1f..e04a8e49e6d 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -131,6 +131,13 @@ struct vf_macvlans {
u8 vf_macvlan[ETH_ALEN];
};
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
@@ -207,12 +214,10 @@ struct ixgbe_ring {
struct ixgbe_rx_buffer *rx_buffer_info;
};
unsigned long state;
- u8 atr_sample_rate;
- u8 atr_count;
+ u8 __iomem *tail;
+
u16 count; /* amount of descriptors */
u16 rx_buf_len;
- u16 next_to_use;
- u16 next_to_clean;
u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets
@@ -220,15 +225,13 @@ struct ixgbe_ring {
* associated with this ring, which is
* different for DCB and RSS modes
*/
- u8 dcb_tc;
-
- u16 work_limit; /* max work per interrupt */
-
- u8 __iomem *tail;
+ u8 atr_sample_rate;
+ u8 atr_count;
- unsigned int total_bytes;
- unsigned int total_packets;
+ u16 next_to_use;
+ u16 next_to_clean;
+ u8 dcb_tc;
struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp;
union {
@@ -244,7 +247,6 @@ struct ixgbe_ring {
enum ixgbe_ring_f_enum {
RING_F_NONE = 0,
- RING_F_DCB,
RING_F_VMDQ, /* SR-IOV uses the same ring feature */
RING_F_RSS,
RING_F_FDIR,
@@ -255,7 +257,6 @@ enum ixgbe_ring_f_enum {
RING_F_ARRAY_SIZE /* must be last in enum set */
};
-#define IXGBE_MAX_DCB_INDICES 64
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64
@@ -272,6 +273,18 @@ struct ixgbe_ring_feature {
int mask;
} ____cacheline_internodealigned_in_smp;
+struct ixgbe_ring_container {
+#if MAX_RX_QUEUES > MAX_TX_QUEUES
+ DECLARE_BITMAP(idx, MAX_RX_QUEUES);
+#else
+ DECLARE_BITMAP(idx, MAX_TX_QUEUES);
+#endif
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 work_limit; /* total work allowed per interrupt */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
? 8 : 1)
@@ -289,12 +302,7 @@ struct ixgbe_q_vector {
int cpu; /* CPU for DCA */
#endif
struct napi_struct napi;
- DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
- DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
- u8 rxr_count; /* Rx ring count assigned to this vector */
- u8 txr_count; /* Tx ring count assigned to this vector */
- u8 tx_itr;
- u8 rx_itr;
+ struct ixgbe_ring_container rx, tx;
u32 eitr;
cpumask_var_t affinity_mask;
char name[IFNAMSIZ + 9];
@@ -308,9 +316,13 @@ struct ixgbe_q_vector {
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
-#define IXGBE_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
+static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
#define IXGBE_RX_DESC_ADV(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
@@ -404,6 +416,9 @@ struct ixgbe_adapter {
u16 eitr_low;
u16 eitr_high;
+ /* Work limits */
+ u16 tx_work_limit;
+
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
int num_tx_queues;
@@ -484,6 +499,17 @@ struct ixgbe_adapter {
struct vf_macvlans vf_mvs;
struct vf_macvlans *mv_list;
bool antispoofing_enabled;
+
+ struct hlist_head fdir_filter_list;
+ union ixgbe_atr_input fdir_mask;
+ int fdir_filter_count;
+};
+
+struct ixgbe_fdir_filter {
+ struct hlist_node fdir_node;
+ union ixgbe_atr_input filter;
+ u16 sw_idx;
+ u16 action;
};
enum ixbge_state_t {
@@ -545,31 +571,35 @@ extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
-extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
+extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue);
-extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- struct ixgbe_atr_input_masks *input_masks,
- u16 soft_id, u8 queue);
-extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring);
-extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring);
+extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask);
+extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue);
+extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id);
+extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len);
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb);
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u32 staterr);
extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 8179e5060a1..0d4e3826449 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1242,6 +1242,47 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
}
}
+/**
+ * ixgbe_set_rxpba_82598 - Configure packet buffers
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure packet buffers.
+ */
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy)
+{
+ u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
+ u8 i = 0;
+
+ if (!num_pb)
+ return;
+
+ /* Setup Rx packet buffer sizes */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* Setup the first four at 80KB */
+ rxpktsize = IXGBE_RXPBSIZE_80KB;
+ for (; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Setup the last four at 48KB...don't re-init i */
+ rxpktsize = IXGBE_RXPBSIZE_48KB;
+ /* Fall Through */
+ case PBA_STRATEGY_EQUAL:
+ default:
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ }
+
+ /* Setup Tx packet buffer sizes */
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
+
+ return;
+}
+
static struct ixgbe_mac_operations mac_ops_82598 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82598,
@@ -1257,6 +1298,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
.write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
.setup_link = &ixgbe_setup_mac_link_82598,
+ .set_rxpba = &ixgbe_set_rxpba_82598,
.check_link = &ixgbe_check_mac_link_82598,
.get_link_capabilities = &ixgbe_get_link_capabilities_82598,
.led_on = &ixgbe_led_on_generic,
@@ -1274,6 +1316,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.clear_vfta = &ixgbe_clear_vfta_82598,
.set_vfta = &ixgbe_set_vfta_82598,
.fc_enable = &ixgbe_fc_enable_82598,
+ .set_fw_drv_ver = NULL,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
.release_swfw_sync = &ixgbe_release_swfw_sync,
};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 8ee661245af..34f30ec79c2 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -213,6 +213,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_tn:
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+ phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.get_firmware_version =
&ixgbe_get_phy_firmware_version_tnx;
break;
@@ -1107,153 +1108,87 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
/**
- * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ * ixgbe_set_fdir_rxpba_82599 - Initialize Flow Director Rx packet buffer
* @hw: pointer to hardware structure
* @pballoc: which mode to allocate filters with
**/
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
+static s32 ixgbe_set_fdir_rxpba_82599(struct ixgbe_hw *hw, const u32 pballoc)
{
- u32 fdirctrl = 0;
- u32 pbsize;
+ u32 fdir_pbsize = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
+ u32 current_rxpbsize = 0;
int i;
- /*
- * Before enabling Flow Director, the Rx Packet Buffer size
- * must be reduced. The new value is the current size minus
- * flow director memory usage size.
- */
- pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
- (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
-
- /*
- * The defaults in the HW for RX PB 1-7 are not zero and so should be
- * initialized to zero for non DCB mode otherwise actual total RX PB
- * would be bigger than programmed and filter space would run into
- * the PB 0 region.
- */
- for (i = 1; i < 8; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-
- /* Send interrupt when 64 filters are left */
- fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
-
- /* Set the maximum length per hash bucket to 0xA filters */
- fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
-
+ /* reserve space for Flow Director filters */
switch (pballoc) {
- case IXGBE_FDIR_PBALLOC_64K:
- /* 8k - 1 signature filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
+ case IXGBE_FDIR_PBALLOC_256K:
+ fdir_pbsize -= 256 << IXGBE_RXPBSIZE_SHIFT;
break;
case IXGBE_FDIR_PBALLOC_128K:
- /* 16k - 1 signature filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
+ fdir_pbsize -= 128 << IXGBE_RXPBSIZE_SHIFT;
break;
- case IXGBE_FDIR_PBALLOC_256K:
- /* 32k - 1 signature filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
+ case IXGBE_FDIR_PBALLOC_64K:
+ fdir_pbsize -= 64 << IXGBE_RXPBSIZE_SHIFT;
break;
+ case IXGBE_FDIR_PBALLOC_NONE:
default:
- /* bad value */
- return IXGBE_ERR_CONFIG;
- };
-
- /* Move the flexible bytes to use the ethertype - shift 6 words */
- fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
+ return IXGBE_ERR_PARAM;
+ }
+ /* determine current RX packet buffer size */
+ for (i = 0; i < 8; i++)
+ current_rxpbsize += IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
- /* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+ /* if there is already room for the filters do nothing */
+ if (current_rxpbsize <= fdir_pbsize)
+ return 0;
- /*
- * Poll init-done after we write the register. Estimated times:
- * 10G: PBALLOC = 11b, timing is 60us
- * 1G: PBALLOC = 11b, timing is 600us
- * 100M: PBALLOC = 11b, timing is 6ms
- *
- * Multiple these timings by 4 if under full Rx load
- *
- * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
- * 1 msec per poll time. If we're at line rate and drop to 100M, then
- * this might not finish in our poll time, but we can live with that
- * for now.
- */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
- IXGBE_WRITE_FLUSH(hw);
- for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
- if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
- break;
- usleep_range(1000, 2000);
+ if (current_rxpbsize > hw->mac.rx_pb_size) {
+ /*
+ * if rxpbsize is greater than max then HW max the Rx buffer
+ * sizes are unconfigured or misconfigured since HW default is
+ * to give the full buffer to each traffic class resulting in
+ * the total size being buffer size 8x actual size
+ *
+ * This assumes no DCB since the RXPBSIZE registers appear to
+ * be unconfigured.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), fdir_pbsize);
+ for (i = 1; i < 8; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ } else {
+ /*
+ * Since the Rx packet buffer appears to have already been
+ * configured we need to shrink each packet buffer by enough
+ * to make room for the filters. As such we take each rxpbsize
+ * value and multiply it by a fraction representing the size
+ * needed over the size we currently have.
+ *
+ * We need to reduce fdir_pbsize and current_rxpbsize to
+ * 1/1024 of their original values in order to avoid
+ * overflowing the u32 being used to store rxpbsize.
+ */
+ fdir_pbsize >>= IXGBE_RXPBSIZE_SHIFT;
+ current_rxpbsize >>= IXGBE_RXPBSIZE_SHIFT;
+ for (i = 0; i < 8; i++) {
+ u32 rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ rxpbsize *= fdir_pbsize;
+ rxpbsize /= current_rxpbsize;
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+ }
}
- if (i >= IXGBE_FDIR_INIT_DONE_POLL)
- hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
return 0;
}
/**
- * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
* @hw: pointer to hardware structure
- * @pballoc: which mode to allocate filters with
+ * @fdirctrl: value to write to flow director control register
**/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
+static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
- u32 fdirctrl = 0;
- u32 pbsize;
int i;
- /*
- * Before enabling Flow Director, the Rx Packet Buffer size
- * must be reduced. The new value is the current size minus
- * flow director memory usage size.
- */
- pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
- (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
-
- /*
- * The defaults in the HW for RX PB 1-7 are not zero and so should be
- * initialized to zero for non DCB mode otherwise actual total RX PB
- * would be bigger than programmed and filter space would run into
- * the PB 0 region.
- */
- for (i = 1; i < 8; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-
- /* Send interrupt when 64 filters are left */
- fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
-
- /* Initialize the drop queue to Rx queue 127 */
- fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
-
- switch (pballoc) {
- case IXGBE_FDIR_PBALLOC_64K:
- /* 2k - 1 perfect filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
- break;
- case IXGBE_FDIR_PBALLOC_128K:
- /* 4k - 1 perfect filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
- break;
- case IXGBE_FDIR_PBALLOC_256K:
- /* 8k - 1 perfect filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
- break;
- default:
- /* bad value */
- return IXGBE_ERR_CONFIG;
- };
-
- /* Turn perfect match filtering on */
- fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
- fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
-
- /* Move the flexible bytes to use the ethertype - shift 6 words */
- fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
-
/* Prime the keys for hashing */
IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
@@ -1271,10 +1206,6 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
* this might not finish in our poll time, but we can live with that
* for now.
*/
-
- /* Set the maximum length per hash bucket to 0xA filters */
- fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
-
IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
@@ -1283,101 +1214,77 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
break;
usleep_range(1000, 2000);
}
- if (i >= IXGBE_FDIR_INIT_DONE_POLL)
- hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
- return 0;
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+ hw_dbg(hw, "Flow Director poll time exceeded!\n");
}
-
/**
- * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
- * @stream: input bitstream to compute the hash on
- * @key: 32-bit hash key
+ * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
**/
-static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
- u32 key)
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
- /*
- * The algorithm is as follows:
- * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
- * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
- * and A[n] x B[n] is bitwise AND between same length strings
- *
- * K[n] is 16 bits, defined as:
- * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
- * for n modulo 32 < 15, K[n] =
- * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
- *
- * S[n] is 16 bits, defined as:
- * for n >= 15, S[n] = S[n:n - 15]
- * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
- *
- * To simplify for programming, the algorithm is implemented
- * in software this way:
- *
- * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
- *
- * for (i = 0; i < 352; i+=32)
- * hi_hash_dword[31:0] ^= Stream[(i+31):i];
- *
- * lo_hash_dword[15:0] ^= Stream[15:0];
- * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
- * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
- *
- * hi_hash_dword[31:0] ^= Stream[351:320];
- *
- * if(key[0])
- * hash[15:0] ^= Stream[15:0];
- *
- * for (i = 0; i < 16; i++) {
- * if (key[i])
- * hash[15:0] ^= lo_hash_dword[(i+15):i];
- * if (key[i + 16])
- * hash[15:0] ^= hi_hash_dword[(i+15):i];
- * }
- *
- */
- __be32 common_hash_dword = 0;
- u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
- u32 hash_result = 0;
- u8 i;
+ s32 err;
- /* record the flow_vm_vlan bits as they are a key part to the hash */
- flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
+ /* Before enabling Flow Director, verify the Rx Packet Buffer size */
+ err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl);
+ if (err)
+ return err;
- /* generate common hash dword */
- for (i = 10; i; i -= 2)
- common_hash_dword ^= atr_input->dword_stream[i] ^
- atr_input->dword_stream[i - 1];
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
- hi_hash_dword = ntohl(common_hash_dword);
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
- /* low dword is word swapped version of common */
- lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+ return 0;
+}
- /* apply flow ID/VM pool/VLAN ID bits to hash words */
- hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+/**
+ * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ **/
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ s32 err;
- /* Process bits 0 and 16 */
- if (key & 0x0001) hash_result ^= lo_hash_dword;
- if (key & 0x00010000) hash_result ^= hi_hash_dword;
+ /* Before enabling Flow Director, verify the Rx Packet Buffer size */
+ err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl);
+ if (err)
+ return err;
/*
- * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
- * delay this because bit 0 of the stream should not be processed
- * so we do not add the vlan until after bit 0 was processed
+ * Continue setup of fdirctrl register bits:
+ * Turn perfect match filtering on
+ * Report hash in RSS field of Rx wb descriptor
+ * Initialize the drop queue
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 (0x4 * 16) filters are left
*/
- lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
+ IXGBE_FDIRCTRL_REPORT_STATUS |
+ (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
+ (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
- /* process the remaining 30 bits in the key 2 bits at a time */
- for (i = 15; i; i-- ) {
- if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
- if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
- }
-
- return hash_result & IXGBE_ATR_HASH_MASK;
+ return 0;
}
/*
@@ -1514,7 +1421,6 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
*/
fdirhashcmd = (u64)fdircmd << 32;
fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
-
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
@@ -1522,6 +1428,101 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
return 0;
}
+#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+} while (0);
+
+/**
+ * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
+ * @atr_input: input bitstream to compute the hash on
+ * @input_mask: mask for the input bitstream
+ *
+ * This function serves two main purposes. First it applys the input_mask
+ * to the atr_input resulting in a cleaned up atr_input data stream.
+ * Secondly it computes the hash and stores it in the bkt_hash field at
+ * the end of the input byte stream. This way it will be available for
+ * future use without needing to recompute the hash.
+ **/
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *input_mask)
+{
+
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 bucket_hash = 0;
+
+ /* Apply masks to input data */
+ input->dword_stream[0] &= input_mask->dword_stream[0];
+ input->dword_stream[1] &= input_mask->dword_stream[1];
+ input->dword_stream[2] &= input_mask->dword_stream[2];
+ input->dword_stream[3] &= input_mask->dword_stream[3];
+ input->dword_stream[4] &= input_mask->dword_stream[4];
+ input->dword_stream[5] &= input_mask->dword_stream[5];
+ input->dword_stream[6] &= input_mask->dword_stream[6];
+ input->dword_stream[7] &= input_mask->dword_stream[7];
+ input->dword_stream[8] &= input_mask->dword_stream[8];
+ input->dword_stream[9] &= input_mask->dword_stream[9];
+ input->dword_stream[10] &= input_mask->dword_stream[10];
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input->dword_stream[0]);
+
+ /* generate common hash dword */
+ hi_hash_dword = ntohl(input->dword_stream[1] ^
+ input->dword_stream[2] ^
+ input->dword_stream[3] ^
+ input->dword_stream[4] ^
+ input->dword_stream[5] ^
+ input->dword_stream[6] ^
+ input->dword_stream[7] ^
+ input->dword_stream[8] ^
+ input->dword_stream[9] ^
+ input->dword_stream[10]);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
+
+ /*
+ * Limit hash to 13 bits since max bucket count is 8K.
+ * Store result at the end of the input stream.
+ */
+ input->formatted.bkt_hash = bucket_hash & 0x1FFF;
+}
+
/**
* ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
* @input_mask: mask to be bit swapped
@@ -1531,11 +1532,11 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* generate a correctly swapped value we need to bit swap the mask and that
* is what is accomplished by this function.
**/
-static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
+static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
{
- u32 mask = ntohs(input_masks->dst_port_mask);
+ u32 mask = ntohs(input_mask->formatted.dst_port);
mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
- mask |= ntohs(input_masks->src_port_mask);
+ mask |= ntohs(input_mask->formatted.src_port);
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
@@ -1557,52 +1558,14 @@ static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
#define IXGBE_STORE_AS_BE16(_value) \
- (((u16)(_value) >> 8) | ((u16)(_value) << 8))
+ ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8))
-/**
- * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
- * @hw: pointer to hardware structure
- * @input: input bitstream
- * @input_masks: bitwise masks for relevant fields
- * @soft_id: software index into the silicon hash tables for filter storage
- * @queue: queue index to direct traffic to
- *
- * Note that the caller to this function must lock before calling, since the
- * hardware writes must be protected from one another.
- **/
-s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- struct ixgbe_atr_input_masks *input_masks,
- u16 soft_id, u8 queue)
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask)
{
- u32 fdirhash;
- u32 fdircmd;
- u32 fdirport, fdirtcpm;
- u32 fdirvlan;
- /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
- u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
- IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
-
- /*
- * Check flow_type formatting, and bail out before we touch the hardware
- * if there's a configuration issue
- */
- switch (input->formatted.flow_type) {
- case IXGBE_ATR_FLOW_TYPE_IPV4:
- /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
- fdirm |= IXGBE_FDIRM_L4P;
- case IXGBE_ATR_FLOW_TYPE_SCTPV4:
- if (input_masks->dst_port_mask || input_masks->src_port_mask) {
- hw_dbg(hw, " Error on src/dst port mask\n");
- return IXGBE_ERR_CONFIG;
- }
- case IXGBE_ATR_FLOW_TYPE_TCPV4:
- case IXGBE_ATR_FLOW_TYPE_UDPV4:
- break;
- default:
- hw_dbg(hw, " Error on flow type input\n");
- return IXGBE_ERR_CONFIG;
- }
+ /* mask IPv6 since it is currently not supported */
+ u32 fdirm = IXGBE_FDIRM_DIPv6;
+ u32 fdirtcpm;
/*
* Program the relevant mask registers. If src/dst_port or src/dst_addr
@@ -1614,41 +1577,71 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
* point in time.
*/
- /* Program FDIRM */
- switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
- case 0xEFFF:
- /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
- fdirm &= ~IXGBE_FDIRM_VLANID;
- case 0xE000:
- /* Unmask VLAN prio - bit 1 */
- fdirm &= ~IXGBE_FDIRM_VLANP;
+ /* verify bucket hash is cleared on hash generation */
+ if (input_mask->formatted.bkt_hash)
+ hw_dbg(hw, " bucket hash should always be 0 in mask\n");
+
+ /* Program FDIRM and verify partial masks */
+ switch (input_mask->formatted.vm_pool & 0x7F) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_POOL;
+ case 0x7F:
break;
- case 0x0FFF:
- /* Unmask VLAN ID - bit 0 */
- fdirm &= ~IXGBE_FDIRM_VLANID;
+ default:
+ hw_dbg(hw, " Error on vm pool mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_L4P;
+ if (input_mask->formatted.dst_port ||
+ input_mask->formatted.src_port) {
+ hw_dbg(hw, " Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_L4TYPE_MASK:
break;
+ default:
+ hw_dbg(hw, " Error on flow type mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
case 0x0000:
- /* do nothing, vlans already masked */
+ /* mask VLAN ID, fall through to mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0x0FFF:
+ /* mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ break;
+ case 0xE000:
+ /* mask VLAN ID only, fall through */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0xEFFF:
+ /* no VLAN fields masked */
break;
default:
hw_dbg(hw, " Error on VLAN mask\n");
return IXGBE_ERR_CONFIG;
}
- if (input_masks->flex_mask & 0xFFFF) {
- if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
- hw_dbg(hw, " Error on flexible byte mask\n");
- return IXGBE_ERR_CONFIG;
- }
- /* Unmask Flex Bytes - bit 4 */
- fdirm &= ~IXGBE_FDIRM_FLEX;
+ switch (input_mask->formatted.flex_bytes & 0xFFFF) {
+ case 0x0000:
+ /* Mask Flex Bytes, fall through */
+ fdirm |= IXGBE_FDIRM_FLEX;
+ case 0xFFFF:
+ break;
+ default:
+ hw_dbg(hw, " Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
}
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */
- fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
/* write both the same so that UDP and TCP use the same mask */
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
@@ -1656,24 +1649,32 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* store source and destination IP masks (big-enian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
- ~input_masks->src_ip_mask[0]);
+ ~input_mask->formatted.src_ip[0]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
- ~input_masks->dst_ip_mask[0]);
+ ~input_mask->formatted.dst_ip[0]);
- /* Apply masks to input data */
- input->formatted.vlan_id &= input_masks->vlan_id_mask;
- input->formatted.flex_bytes &= input_masks->flex_mask;
- input->formatted.src_port &= input_masks->src_port_mask;
- input->formatted.dst_port &= input_masks->dst_port_mask;
- input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
- input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
+ return 0;
+}
- /* record vlan (little-endian) and flex_bytes(big-endian) */
- fdirvlan =
- IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
- fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
- fdirvlan |= ntohs(input->formatted.vlan_id);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue)
+{
+ u32 fdirport, fdirvlan, fdirhash, fdircmd;
+
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
+ input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
+ input->formatted.src_ip[1]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.src_ip[2]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
/* record source and destination port (little-endian)*/
fdirport = ntohs(input->formatted.dst_port);
@@ -1681,29 +1682,80 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
fdirport |= ntohs(input->formatted.src_port);
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
- /* record the first 32 bits of the destination address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= ntohs(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
- /* record the source address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ IXGBE_WRITE_FLUSH(hw);
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ if (queue == IXGBE_FDIR_DROP_QUEUE)
+ fdircmd |= IXGBE_FDIRCMD_DROP;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
- /* we only want the bucket hash so drop the upper 16 bits */
- fdirhash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
- fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
return 0;
}
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id)
+{
+ u32 fdirhash;
+ u32 fdircmd = 0;
+ u32 retry_count;
+ s32 err = 0;
+
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /* flush hash to HW */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Query if filter is present */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+ for (retry_count = 10; retry_count; retry_count--) {
+ /* allow 10us for query to process */
+ udelay(10);
+ /* verify query completed successfully */
+ fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ break;
+ }
+
+ if (!retry_count)
+ err = IXGBE_ERR_FDIR_REINIT_FAILED;
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+ }
+
+ return err;
+}
+
/**
* ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
* @hw: pointer to hardware structure
@@ -2146,6 +2198,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
.write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
.setup_link = &ixgbe_setup_mac_link_82599,
+ .set_rxpba = &ixgbe_set_rxpba_generic,
.check_link = &ixgbe_check_mac_link_generic,
.get_link_capabilities = &ixgbe_get_link_capabilities_82599,
.led_on = &ixgbe_led_on_generic,
@@ -2163,6 +2216,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index b894b42a741..fc1375f26fe 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1292,7 +1292,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
udelay(5);
ixgbe_standby_eeprom(hw);
- };
+ }
/*
* On some parts, SPI write time could vary from 0-20mSec on 3.3V
@@ -1374,7 +1374,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
* EEPROM
*/
mask = mask >> 1;
- };
+ }
/* We leave the "DI" bit set to "0" when we leave this routine. */
eec &= ~IXGBE_EEC_DI;
@@ -2632,6 +2632,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
usleep_range(10000, 20000);
}
@@ -3267,3 +3268,243 @@ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
return 0;
}
+
+/**
+ * ixgbe_set_rxpba_generic - Initialize RX packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
+ int num_pb,
+ u32 headroom,
+ int strategy)
+{
+ u32 pbsize = hw->mac.rx_pb_size;
+ int i = 0;
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ /* Reserve headroom */
+ pbsize -= headroom;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ /* Divide remaining packet buffer space amongst the number
+ * of packet buffers requested using supplied strategy.
+ */
+ switch (strategy) {
+ case (PBA_STRATEGY_WEIGHTED):
+ /* pba_80_48 strategy weight first half of packet buffer with
+ * 5/8 of the packet buffer space.
+ */
+ rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8));
+ pbsize -= rxpktsize * (num_pb / 2);
+ rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
+ for (; i < (num_pb / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Fall through to configure remaining packet buffers */
+ case (PBA_STRATEGY_EQUAL):
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+ for (; i < num_pb; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Setup Tx packet buffer and threshold equally for all TCs
+ * TXPBTHRESH register is set in K so divide by 1024 and subtract
+ * 10 since the largest packet we support is just over 9K.
+ */
+ txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
+ txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < num_pb; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < IXGBE_MAX_PB; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+ }
+}
+
+/**
+ * ixgbe_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * ixgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @lenght: lenght of buffer, must be multiple of 4 bytes
+ *
+ * Communicates with the manageability block. On success return 0
+ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
+ u32 length)
+{
+ u32 hicr, i;
+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ u8 buf_len, dword_len;
+
+ s32 ret_val = 0;
+
+ if (length == 0 || length & 0x3 ||
+ length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ hw_dbg(hw, "Buffer length failure.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if ((hicr & IXGBE_HICR_EN) == 0) {
+ hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = length >> 2;
+
+ /*
+ * The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < dword_len; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ i, *((u32 *)buffer + i));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
+
+ for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if (!(hicr & IXGBE_HICR_C))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Check command successful completion. */
+ if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+ (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+ hw_dbg(hw, "Command has failed with no status valid.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = hdr_size >> 2;
+
+ /* first pull in the header so we know the buffer length */
+ for (i = 0; i < dword_len; i++)
+ *((u32 *)buffer + i) =
+ IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+
+ /* If there is any thing in data position pull it in */
+ buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
+ if (buf_len == 0)
+ goto out;
+
+ if (length < (buf_len + hdr_size)) {
+ hw_dbg(hw, "Buffer not large enough for reply message.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs, add one for odd lengths */
+ dword_len = (buf_len + 1) >> 2;
+
+ /* Pull in the rest of the buffer (i is where we left off)*/
+ for (; i < buf_len; i++)
+ *((u32 *)buffer + i) =
+ IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return 0
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub)
+{
+ struct ixgbe_hic_drv_info fw_cmd;
+ int i;
+ s32 ret_val = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+ fw_cmd.pad = 0;
+ fw_cmd.pad2 = 0;
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u8 *)&fw_cmd,
+ sizeof(fw_cmd));
+ if (ret_val != 0)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = 0;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+out:
+ return ret_val;
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 46be83cfb50..f24fd64a4c4 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -99,6 +99,11 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 ver);
+
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy);
#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 686a17aadef..9d88c31487b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -258,15 +258,13 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
- pfc_en, refill, max, bwgid,
- ptype);
+ ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
+ bwgid, ptype);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
- pfc_en, refill, max, bwgid,
- ptype, prio_tc);
+ ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
+ bwgid, ptype, prio_tc);
break;
default:
break;
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 944838fc7b5..e85826ae032 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -123,11 +123,6 @@ struct tc_configuration {
u8 tc; /* Traffic class (TC) */
};
-enum dcb_rx_pba_cfg {
- pba_equal, /* PBA[0-7] each use 64KB FIFO */
- pba_80_48 /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
-};
-
struct dcb_num_tcs {
u8 pg_tcs;
u8 pfc_tcs;
@@ -140,8 +135,6 @@ struct ixgbe_dcb_config {
u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
bool pfc_mode_enable;
- enum dcb_rx_pba_cfg rx_pba_cfg;
-
u32 dcb_cfg_version; /* Not used...OS-specific? */
u32 link_speed; /* For bandwidth allocation validation purpose */
};
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 771d01a60d0..2288c3cac01 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -32,45 +32,6 @@
#include "ixgbe_dcb_82598.h"
/**
- * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure packet buffers for DCB mode.
- */
-static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
-{
- s32 ret_val = 0;
- u32 value = IXGBE_RXPBSIZE_64KB;
- u8 i = 0;
-
- /* Setup Rx packet buffer sizes */
- switch (rx_pba) {
- case pba_80_48:
- /* Setup the first four at 80KB */
- value = IXGBE_RXPBSIZE_80KB;
- for (; i < 4; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
- /* Setup the last four at 48KB...don't re-init i */
- value = IXGBE_RXPBSIZE_48KB;
- /* Fall Through */
- case pba_equal:
- default:
- for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
-
- /* Setup Tx packet buffer sizes */
- for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
- IXGBE_TXPBSIZE_40KB);
- }
- break;
- }
-
- return ret_val;
-}
-
-/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
@@ -321,11 +282,9 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
- u8 rx_pba, u8 pfc_en, u16 *refill,
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
- ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index 1e9750c2b46..2f318935561 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -91,8 +91,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
u8 *bwg_id,
u8 *prio_type);
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
- u8 rx_pba, u8 pfc_en, u16 *refill,
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index d50cf78c234..ade98200288 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -31,63 +31,6 @@
#include "ixgbe_dcb_82599.h"
/**
- * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
- * @hw: pointer to hardware structure
- * @rx_pba: method to distribute packet buffer
- *
- * Configure packet buffers for DCB mode.
- */
-static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
-{
- int num_tcs = IXGBE_MAX_PACKET_BUFFERS;
- u32 rx_pb_size = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
- u32 rxpktsize;
- u32 txpktsize;
- u32 txpbthresh;
- u8 i = 0;
-
- /*
- * This really means configure the first half of the TCs
- * (Traffic Classes) to use 5/8 of the Rx packet buffer
- * space. To determine the size of the buffer for each TC,
- * we are multiplying the average size by 5/4 and applying
- * it to half of the traffic classes.
- */
- if (rx_pba == pba_80_48) {
- rxpktsize = (rx_pb_size * 5) / (num_tcs * 4);
- rx_pb_size -= rxpktsize * (num_tcs / 2);
- for (; i < (num_tcs / 2); i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- }
-
- /* Divide the remaining Rx packet buffer evenly among the TCs */
- rxpktsize = rx_pb_size / (num_tcs - i);
- for (; i < num_tcs; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
-
- /*
- * Setup Tx packet buffer and threshold equally for all TCs
- * TXPBTHRESH register is set in K so divide by 1024 and subtract
- * 10 since the largest packet we support is just over 9K.
- */
- txpktsize = IXGBE_TXPBSIZE_MAX / num_tcs;
- txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
- for (i = 0; i < num_tcs; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
- IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
- }
-
- /* Clear unused TCs, if any, to zero buffer size*/
- for (; i < MAX_TRAFFIC_CLASS; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
- }
-
- return 0;
-}
-
-/**
* ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
* @hw: pointer to hardware structure
* @refill: refill credits index by traffic class
@@ -376,65 +319,8 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
}
/**
- * ixgbe_dcb_config_82599 - Configure general DCB parameters
- * @hw: pointer to hardware structure
- *
- * Configure general DCB parameters.
- */
-static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
-{
- u32 reg;
- u32 q;
-
- /* Disable the Tx desc arbiter so that MTQC can be changed */
- reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
- reg |= IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
- /* Enable DCB for Rx with 8 TCs */
- reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
- switch (reg & IXGBE_MRQC_MRQE_MASK) {
- case 0:
- case IXGBE_MRQC_RT4TCEN:
- /* RSS disabled cases */
- reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
- break;
- case IXGBE_MRQC_RSSEN:
- case IXGBE_MRQC_RTRSS4TCEN:
- /* RSS enabled cases */
- reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN;
- break;
- default:
- /* Unsupported value, assume stale data, overwrite no RSS */
- reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
- }
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
-
- /* Enable DCB for Tx with 8 TCs */
- reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
-
- /* Disable drop for all queues */
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE, q << IXGBE_QDE_IDX_SHIFT);
-
- /* Enable the Tx desc arbiter */
- reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
- reg &= ~IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
- /* Enable Security TX Buffer IFG for DCB */
- reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
- reg |= IXGBE_SECTX_DCB;
- IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
-
- return 0;
-}
-
-/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
- * @rx_pba: method to distribute packet buffer
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
@@ -443,12 +329,9 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
- u8 rx_pba, u8 pfc_en, u16 *refill,
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
{
- ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
- ixgbe_dcb_config_82599(hw);
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
prio_type, prio_tc);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 2de71a50315..08d1749862a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -86,17 +86,6 @@
#define IXGBE_RTTPCS_ARBD_SHIFT 22
#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */
-#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */
-#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
-#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
-#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
-#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
-#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
-#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/
-
-#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
-#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
-
/* SECTXMINIFG DCB */
#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
@@ -127,8 +116,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
u8 *prio_type,
u8 *prio_tc);
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
- u8 rx_pba, u8 pfc_en, u16 *refill,
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type,
u8 *prio_tc);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 5e7ed225851..0ace6ce1d0b 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -114,20 +114,19 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
u8 err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ /* verify there is something to do, if not then exit */
+ if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return err;
+
if (state > 0) {
/* Turn on DCB */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- goto out;
-
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
e_err(drv, "Enable failed, needs MSI-X\n");
err = 1;
goto out;
}
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
+ adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
@@ -137,46 +136,30 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
break;
default:
break;
}
- adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
- if (!netdev_get_num_tc(netdev))
- ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
-
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_open(netdev);
+ ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
} else {
/* Turn off DCB */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
-
- adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
- adapter->temp_dcb_cfg.pfc_mode_enable = false;
- adapter->dcb_cfg.pfc_mode_enable = false;
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+ adapter->temp_dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- break;
- default:
- break;
- }
-
- ixgbe_setup_tc(netdev, 0);
-
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_open(netdev);
+ break;
+ default:
+ break;
}
+ ixgbe_setup_tc(netdev, 0);
}
+
out:
return err;
}
@@ -347,24 +330,20 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int ret;
+#ifdef IXGBE_FCOE
struct dcb_app app = {
.selector = DCB_APP_IDTYPE_ETHTYPE,
.protocol = ETH_P_FCOE,
};
u8 up = dcb_getapp(netdev, &app);
- int ret;
+#endif
ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
MAX_TRAFFIC_CLASS);
if (ret)
return DCB_NO_HW_CHG;
- /* In IEEE mode app data must be parsed into DCBX format for
- * hardware routines.
- */
- if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
- up = (1 << up);
-
#ifdef IXGBE_FCOE
if (up && (up != (1 << adapter->fcoe.up)))
adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
@@ -378,7 +357,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- ixgbe_fcoe_setapp(adapter, up);
+ adapter->fcoe.up = ffs(up) - 1;
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
@@ -691,24 +670,75 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
return err;
}
+#ifdef IXGBE_FCOE
+static void ixgbe_dcbnl_devreset(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_stop(dev);
+
+ ixgbe_clear_interrupt_scheme(adapter);
+ ixgbe_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_open(dev);
+}
+#endif
+
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int err = -EINVAL;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
- return -EINVAL;
+ return err;
- dcb_setapp(dev, app);
+ err = dcb_ieee_setapp(dev, app);
#ifdef IXGBE_FCOE
- if (app->selector == 1 && app->protocol == ETH_P_FCOE &&
- adapter->fcoe.tc == app->priority)
- ixgbe_dcbnl_set_all(dev);
+ if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_FCOE) {
+ u8 app_mask = dcb_ieee_getapp_mask(dev, app);
+
+ if (app_mask & (1 << adapter->fcoe.up))
+ return err;
+
+ adapter->fcoe.up = app->priority;
+ ixgbe_dcbnl_devreset(dev);
+ }
#endif
return 0;
}
+static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int err;
+
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ err = dcb_ieee_delapp(dev, app);
+
+#ifdef IXGBE_FCOE
+ if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_FCOE) {
+ u8 app_mask = dcb_ieee_getapp_mask(dev, app);
+
+ if (app_mask & (1 << adapter->fcoe.up))
+ return err;
+
+ adapter->fcoe.up = app_mask ?
+ ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC;
+ ixgbe_dcbnl_devreset(dev);
+ }
+#endif
+ return err;
+}
+
static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -760,6 +790,7 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
.ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
.ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
.ieee_setapp = ixgbe_dcbnl_ieee_setapp,
+ .ieee_delapp = ixgbe_dcbnl_ieee_delapp,
.getstate = ixgbe_dcbnl_get_state,
.setstate = ixgbe_dcbnl_set_state,
.getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index cb1555bc854..82d4244c6e1 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -27,6 +27,7 @@
/* ethtool support for ixgbe */
+#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -441,62 +442,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
return 0;
}
-static u32 ixgbe_get_rx_csum(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
-}
-
-static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (data)
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
- else
- adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
-
- return 0;
-}
-
-static u32 ixgbe_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_IP_CSUM) != 0;
-}
-
-static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 feature_list;
-
- feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- feature_list |= NETIF_F_SCTP_CSUM;
- break;
- default:
- break;
- }
- if (data)
- netdev->features |= feature_list;
- else
- netdev->features &= ~feature_list;
-
- return 0;
-}
-
-static int ixgbe_set_tso(struct net_device *netdev, u32 data)
-{
- if (data) {
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- } else {
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- }
- return 0;
-}
-
static u32 ixgbe_get_msglevel(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -1433,6 +1378,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Disable all the interrupts */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
/* Test each interrupt */
@@ -1453,6 +1399,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
~mask & 0x00007FFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
@@ -1470,6 +1417,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
if (!(adapter->test_icr &mask)) {
@@ -1490,6 +1438,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
~mask & 0x00007FFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
if (adapter->test_icr) {
@@ -1501,6 +1450,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Disable all the interrupts */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
/* Unhook test interrupt handler */
@@ -2055,7 +2005,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
+ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
/* only valid if in constant ITR mode */
switch (adapter->rx_itr_setting) {
@@ -2074,7 +2024,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
}
/* if in mixed tx/rx queues per vector mode, report only rx settings */
- if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
return 0;
/* only valid if in constant ITR mode */
@@ -2139,12 +2089,12 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
bool need_reset = false;
/* don't accept tx specific changes if we've got mixed RxTx vectors */
- if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
&& ec->tx_coalesce_usecs)
return -EINVAL;
if (ec->tx_max_coalesced_frames_irq)
- adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
+ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
/* check the limits */
@@ -2213,18 +2163,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i];
- if (q_vector->txr_count && !q_vector->rxr_count)
+ if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
q_vector->eitr = adapter->tx_eitr_param;
else
/* rx only or mixed */
q_vector->eitr = adapter->rx_eitr_param;
+ q_vector->tx.work_limit = adapter->tx_work_limit;
ixgbe_write_eitr(q_vector);
}
/* Legacy Interrupt Mode */
} else {
q_vector = adapter->q_vector[0];
q_vector->eitr = adapter->rx_eitr_param;
+ q_vector->tx.work_limit = adapter->tx_work_limit;
ixgbe_write_eitr(q_vector);
}
@@ -2233,241 +2185,376 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
* correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
* also locks in RSC enable/disable which requires reset
*/
- if (need_reset) {
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
- }
+ if (need_reset)
+ ixgbe_do_reset(netdev);
return 0;
}
-static int ixgbe_set_flags(struct net_device *netdev, u32 data)
+static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- bool need_reset = false;
- int rc;
+ union ixgbe_atr_input *mask = &adapter->fdir_mask;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *rule = NULL;
+
+ /* report total rule count */
+ cmd->data = (1024 << adapter->fdir_pballoc) - 2;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
-#ifdef CONFIG_IXGBE_DCB
- if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- !(data & ETH_FLAG_RXVLAN))
+ if (!rule || fsp->location != rule->sw_idx)
return -EINVAL;
-#endif
-
- need_reset = (data & ETH_FLAG_RXVLAN) !=
- (netdev->features & NETIF_F_HW_VLAN_RX);
- if ((data & ETH_FLAG_RXHASH) &&
- !(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- return -EOPNOTSUPP;
+ /* fill out the flow spec entry */
- rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
- ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
- ETH_FLAG_RXHASH);
- if (rc)
- return rc;
-
- /* if state changes we need to update adapter->flags and reset */
- if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
- (!!(data & ETH_FLAG_LRO) !=
- !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
- if ((data & ETH_FLAG_LRO) &&
- (!adapter->rx_itr_setting ||
- (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
- e_info(probe, "rx-usecs set too low, "
- "not enabling RSC.\n");
- } else {
- adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- need_reset = true;
- break;
- case ixgbe_mac_X540: {
- int i;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring =
- adapter->rx_ring[i];
- if (adapter->flags2 &
- IXGBE_FLAG2_RSC_ENABLED) {
- ixgbe_configure_rscctl(adapter,
- ring);
- } else {
- ixgbe_clear_rscctl(adapter,
- ring);
- }
- }
- }
- break;
- default:
- break;
- }
- }
+ /* set flow type field */
+ switch (rule->filter.formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ fsp->flow_type = TCP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ fsp->flow_type = UDP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ fsp->flow_type = SCTP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ fsp->flow_type = IP_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ break;
+ default:
+ return -EINVAL;
}
- /*
- * Check if Flow Director n-tuple support was enabled or disabled. If
- * the state changed, we need to reset.
- */
- if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
- (!(data & ETH_FLAG_NTUPLE))) {
- /* turn off Flow Director perfect, set hash and reset */
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- need_reset = true;
- } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
- (data & ETH_FLAG_NTUPLE)) {
- /* turn off Flow Director hash, enable perfect and reset */
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- need_reset = true;
- } else {
- /* no state change */
- }
+ fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
+ fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
+ fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
+ fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
+ fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
+ fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
+ fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
+ fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
+ fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
+ fsp->flow_type |= FLOW_EXT;
+
+ /* record action */
+ if (rule->action == IXGBE_FDIR_DROP_QUEUE)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->action;
- if (need_reset) {
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
+ return 0;
+}
+
+static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *rule;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = (1024 << adapter->fdir_pballoc) - 2;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
}
return 0;
}
-static int ixgbe_set_rx_ntuple(struct net_device *dev,
- struct ethtool_rx_ntuple *cmd)
+static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ void *rule_locs)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
- union ixgbe_atr_input input_struct;
- struct ixgbe_atr_input_masks input_masks;
- int target_queue;
- int err;
+ int ret = -EOPNOTSUPP;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
- return -EOPNOTSUPP;
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->fdir_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = ixgbe_get_ethtool_fdir_all(adapter, cmd,
+ (u32 *)rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ixgbe_fdir_filter *input,
+ u16 sw_idx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct hlist_node *node, *node2, *parent;
+ struct ixgbe_fdir_filter *rule;
+ int err = -EINVAL;
+
+ parent = NULL;
+ rule = NULL;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = node;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && (rule->sw_idx == sw_idx)) {
+ if (!input || (rule->filter.formatted.bkt_hash !=
+ input->filter.formatted.bkt_hash)) {
+ err = ixgbe_fdir_erase_perfect_filter_82599(hw,
+ &rule->filter,
+ sw_idx);
+ }
+
+ hlist_del(&rule->fdir_node);
+ kfree(rule);
+ adapter->fdir_filter_count--;
+ }
/*
- * Don't allow programming if the action is a queue greater than
- * the number of online Tx queues.
+ * If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
*/
- if ((fs->action >= adapter->num_tx_queues) ||
- (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
- return -EINVAL;
+ if (!input)
+ return err;
- memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
- memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
+ /* initialize node and set software index */
+ INIT_HLIST_NODE(&input->fdir_node);
- /* record flow type */
- switch (fs->flow_type) {
- case IPV4_FLOW:
- input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
- break;
+ /* add filter to the list */
+ if (parent)
+ hlist_add_after(parent, &input->fdir_node);
+ else
+ hlist_add_head(&input->fdir_node,
+ &adapter->fdir_filter_list);
+
+ /* update counts */
+ adapter->fdir_filter_count++;
+
+ return 0;
+}
+
+static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
+ u8 *flow_type)
+{
+ switch (fsp->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
- input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
break;
case UDP_V4_FLOW:
- input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+ *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
break;
case SCTP_V4_FLOW:
- input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
break;
- default:
- return -1;
- }
-
- /* copy vlan tag minus the CFI bit */
- if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
- input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
- if (!fs->vlan_tag_mask) {
- input_masks.vlan_id_mask = htons(0xEFFF);
- } else {
- switch (~fs->vlan_tag_mask & 0xEFFF) {
- /* all of these are valid vlan-mask values */
- case 0xEFFF:
- case 0xE000:
- case 0x0FFF:
- case 0x0000:
- input_masks.vlan_id_mask =
- htons(~fs->vlan_tag_mask);
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case IPPROTO_UDP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case IPPROTO_SCTP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case 0:
+ if (!fsp->m_u.usr_ip4_spec.proto) {
+ *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
break;
- /* exit with error if vlan-mask is invalid */
- default:
- e_err(drv, "Partial VLAN ID or "
- "priority mask in vlan-mask is not "
- "supported by hardware\n");
- return -1;
}
+ default:
+ return 0;
}
+ break;
+ default:
+ return 0;
}
- /* make sure we only use the first 2 bytes of user data */
- if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
- input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
- if (!(fs->data_mask & 0xFFFF)) {
- input_masks.flex_mask = 0xFFFF;
- } else if (~fs->data_mask & 0xFFFF) {
- e_err(drv, "Partial user-def-mask is not "
- "supported by hardware\n");
- return -1;
- }
- }
+ return 1;
+}
+
+static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fdir_filter *input;
+ union ixgbe_atr_input mask;
+ int err;
+
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ return -EOPNOTSUPP;
/*
- * Copy input into formatted structures
- *
- * These assignments are based on the following logic
- * If neither input or mask are set assume value is masked out.
- * If input is set, but mask is not mask should default to accept all.
- * If input is not set, but mask is set then mask likely results in 0.
- * If input is set and mask is set then assign both.
+ * Don't allow programming if the action is a queue greater than
+ * the number of online Rx queues.
*/
- if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
- input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
- if (!fs->m_u.tcp_ip4_spec.ip4src)
- input_masks.src_ip_mask[0] = 0xFFFFFFFF;
- else
- input_masks.src_ip_mask[0] =
- ~fs->m_u.tcp_ip4_spec.ip4src;
- }
- if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
- input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
- if (!fs->m_u.tcp_ip4_spec.ip4dst)
- input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
- else
- input_masks.dst_ip_mask[0] =
- ~fs->m_u.tcp_ip4_spec.ip4dst;
+ if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+ (fsp->ring_cookie >= adapter->num_rx_queues))
+ return -EINVAL;
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
+ e_err(drv, "Location out of range\n");
+ return -EINVAL;
}
- if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
- input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
- if (!fs->m_u.tcp_ip4_spec.psrc)
- input_masks.src_port_mask = 0xFFFF;
- else
- input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
+
+ input = kzalloc(sizeof(*input), GFP_ATOMIC);
+ if (!input)
+ return -ENOMEM;
+
+ memset(&mask, 0, sizeof(union ixgbe_atr_input));
+
+ /* set SW index */
+ input->sw_idx = fsp->location;
+
+ /* record flow type */
+ if (!ixgbe_flowspec_to_flow_type(fsp,
+ &input->filter.formatted.flow_type)) {
+ e_err(drv, "Unrecognized flow type\n");
+ goto err_out;
}
- if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
- input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
- if (!fs->m_u.tcp_ip4_spec.pdst)
- input_masks.dst_port_mask = 0xFFFF;
- else
- input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
+
+ mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+
+ if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+ mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+ /* Copy input into formatted structures */
+ input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+
+ if (fsp->flow_type & FLOW_EXT) {
+ input->filter.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->h_ext.data[1]);
+ mask.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->m_ext.data[1]);
+ input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
+ mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
+ input->filter.formatted.flex_bytes =
+ fsp->h_ext.vlan_etype;
+ mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
}
/* determine if we need to drop or route the packet */
- if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
- target_queue = MAX_RX_QUEUES - 1;
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ input->action = IXGBE_FDIR_DROP_QUEUE;
else
- target_queue = fs->action;
+ input->action = fsp->ring_cookie;
spin_lock(&adapter->fdir_perfect_lock);
- err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
- &input_struct,
- &input_masks, 0,
- target_queue);
+
+ if (hlist_empty(&adapter->fdir_filter_list)) {
+ /* save mask and program input mask into HW */
+ memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
+ err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+ if (err) {
+ e_err(drv, "Error writing mask\n");
+ goto err_out_w_lock;
+ }
+ } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+ e_err(drv, "Only one mask supported per port\n");
+ goto err_out_w_lock;
+ }
+
+ /* apply mask and compute/store hash */
+ ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+
+ /* program filters to filter memory */
+ err = ixgbe_fdir_write_perfect_filter_82599(hw,
+ &input->filter, input->sw_idx,
+ (input->action == IXGBE_FDIR_DROP_QUEUE) ?
+ IXGBE_FDIR_DROP_QUEUE :
+ adapter->rx_ring[input->action]->reg_idx);
+ if (err)
+ goto err_out_w_lock;
+
+ ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+
spin_unlock(&adapter->fdir_perfect_lock);
- return err ? -1 : 0;
+ return err;
+err_out_w_lock:
+ spin_unlock(&adapter->fdir_perfect_lock);
+err_out:
+ kfree(input);
+ return -EINVAL;
+}
+
+static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+ err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ return err;
+}
+
+static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
@@ -2486,16 +2573,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_ringparam = ixgbe_set_ringparam,
.get_pauseparam = ixgbe_get_pauseparam,
.set_pauseparam = ixgbe_set_pauseparam,
- .get_rx_csum = ixgbe_get_rx_csum,
- .set_rx_csum = ixgbe_set_rx_csum,
- .get_tx_csum = ixgbe_get_tx_csum,
- .set_tx_csum = ixgbe_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
.get_msglevel = ixgbe_get_msglevel,
.set_msglevel = ixgbe_set_msglevel,
- .get_tso = ethtool_op_get_tso,
- .set_tso = ixgbe_set_tso,
.self_test = ixgbe_diag_test,
.get_strings = ixgbe_get_strings,
.set_phys_id = ixgbe_set_phys_id,
@@ -2503,9 +2582,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
- .get_flags = ethtool_op_get_flags,
- .set_flags = ixgbe_set_flags,
- .set_rx_ntuple = ixgbe_set_rx_ntuple,
+ .get_rxnfc = ixgbe_get_rxnfc,
+ .set_rxnfc = ixgbe_set_rxnfc,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 05920726e82..824edae7786 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -26,9 +26,6 @@
*******************************************************************************/
#include "ixgbe.h"
-#ifdef CONFIG_IXGBE_DCB
-#include "ixgbe_dcb_82599.h"
-#endif /* CONFIG_IXGBE_DCB */
#include <linux/if_ether.h>
#include <linux/gfp.h>
#include <linux/if_vlan.h>
@@ -40,25 +37,6 @@
#include <scsi/libfcoe.h>
/**
- * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
- * @rx_desc: advanced rx descriptor
- *
- * Returns : true if it is FCoE pkt
- */
-static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
-{
- u16 p;
-
- p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info);
- if (p & IXGBE_RXDADV_PKTTYPE_ETQF) {
- p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK;
- p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT;
- return p == IXGBE_ETQF_FILTER_FCOE;
- }
- return false;
-}
-
-/**
* ixgbe_fcoe_clear_ddp - clear the given ddp context
* @ddp - ptr to the ixgbe_fcoe_ddp
*
@@ -128,14 +106,17 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
if (ddp->sgl)
pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
DMA_FROM_DEVICE);
- pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
+ if (ddp->pool) {
+ pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
+ ddp->pool = NULL;
+ }
+
ixgbe_fcoe_clear_ddp(ddp);
out_ddp_put:
return len;
}
-
/**
* ixgbe_fcoe_ddp_setup - called to set up ddp context
* @netdev: the corresponding net_device
@@ -163,6 +144,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
unsigned int thislen = 0;
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
dma_addr_t addr = 0;
+ struct pci_pool *pool;
if (!netdev || !sgl)
return 0;
@@ -199,12 +181,14 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 0;
}
- /* alloc the udl from our ddp pool */
- ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp);
+ /* alloc the udl from per cpu ddp pool */
+ pool = *per_cpu_ptr(fcoe->pool, get_cpu());
+ ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
}
+ ddp->pool = pool;
ddp->sgl = sgl;
ddp->sgc = sgc;
@@ -268,6 +252,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
j++;
lastsize = 1;
}
+ put_cpu();
fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
@@ -311,11 +296,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 1;
out_noddp_free:
- pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
+ pci_pool_free(pool, ddp->udl, ddp->udp);
ixgbe_fcoe_clear_ddp(ddp);
out_noddp_unmap:
pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ put_cpu();
return 0;
}
@@ -374,23 +360,20 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
*/
int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u32 staterr)
{
u16 xid;
u32 fctl;
- u32 sterr, fceofe, fcerr, fcstat;
+ u32 fceofe, fcerr, fcstat;
int rc = -EINVAL;
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
struct fc_frame_header *fh;
struct fcoe_crc_eof *crc;
- if (!ixgbe_rx_is_fcoe(rx_desc))
- goto ddp_out;
-
- sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
- fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
+ fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR);
+ fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE);
if (fcerr == IXGBE_FCERR_BADCRC)
skb_checksum_none_assert(skb);
else
@@ -419,7 +402,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if (fcerr | fceofe)
goto ddp_out;
- fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT);
+ fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT);
if (fcstat) {
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
@@ -465,24 +448,18 @@ ddp_out:
*
* Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
*/
-int ixgbe_fso(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len)
{
- u8 sof, eof;
+ struct fc_frame_header *fh;
u32 vlan_macip_lens;
- u32 fcoe_sof_eof;
- u32 type_tucmd;
+ u32 fcoe_sof_eof = 0;
u32 mss_l4len_idx;
- int mss = 0;
- unsigned int i;
- struct ixgbe_tx_buffer *tx_buffer_info;
- struct ixgbe_adv_tx_context_desc *context_desc;
- struct fc_frame_header *fh;
+ u8 sof, eof;
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
- e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
- skb_shinfo(skb)->gso_type);
+ dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
+ skb_shinfo(skb)->gso_type);
return -EINVAL;
}
@@ -492,23 +469,22 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
sizeof(struct fcoe_hdr));
/* sets up SOF and ORIS */
- fcoe_sof_eof = 0;
sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
switch (sof) {
case FC_SOF_I2:
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS;
+ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
break;
case FC_SOF_I3:
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS;
+ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
+ IXGBE_ADVTXD_FCOEF_ORIS;
break;
case FC_SOF_N2:
break;
case FC_SOF_N3:
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
+ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
break;
default:
- e_warn(drv, "unknown sof = 0x%x\n", sof);
+ dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
return -EINVAL;
}
@@ -521,12 +497,11 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
break;
case FC_EOF_T:
/* lso needs ORIE */
- if (skb_is_gso(skb)) {
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE;
- } else {
+ if (skb_is_gso(skb))
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
+ IXGBE_ADVTXD_FCOEF_ORIE;
+ else
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
- }
break;
case FC_EOF_NI:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
@@ -535,7 +510,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
break;
default:
- e_warn(drv, "unknown eof = 0x%x\n", eof);
+ dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
return -EINVAL;
}
@@ -544,47 +519,72 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
- /* hdr_len includes fc_hdr if FCoE lso is enabled */
+ /* include trailer in headlen as it is replicated per frame */
*hdr_len = sizeof(struct fcoe_crc_eof);
+
+ /* hdr_len includes fc_hdr if FCoE LSO is enabled */
if (skb_is_gso(skb))
*hdr_len += (skb_transport_offset(skb) +
sizeof(struct fc_frame_header));
- /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
- vlan_macip_lens = (skb_transport_offset(skb) +
- sizeof(struct fc_frame_header));
- vlan_macip_lens |= ((skb_transport_offset(skb) - 4)
- << IXGBE_ADVTXD_MACLEN_SHIFT);
- vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
-
- /* type_tycmd and mss: set TUCMD.FCoE to enable offload */
- type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT |
- IXGBE_ADVTXT_TUCMD_FCOE;
- if (skb_is_gso(skb))
- mss = skb_shinfo(skb)->gso_size;
+
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
- mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) |
- (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = skb_transport_offset(skb) +
+ sizeof(struct fc_frame_header);
+ vlan_macip_lens |= (skb_transport_offset(skb) - 4)
+ << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
/* write context desc */
- i = tx_ring->next_to_use;
- context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
+ IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
return skb_is_gso(skb);
}
+static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
+{
+ unsigned int cpu;
+ struct pci_pool **pool;
+
+ for_each_possible_cpu(cpu) {
+ pool = per_cpu_ptr(fcoe->pool, cpu);
+ if (*pool)
+ pci_pool_destroy(*pool);
+ }
+ free_percpu(fcoe->pool);
+ fcoe->pool = NULL;
+}
+
+static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ unsigned int cpu;
+ struct pci_pool **pool;
+ char pool_name[32];
+
+ fcoe->pool = alloc_percpu(struct pci_pool *);
+ if (!fcoe->pool)
+ return;
+
+ /* allocate pci pool for each cpu */
+ for_each_possible_cpu(cpu) {
+ snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
+ pool = per_cpu_ptr(fcoe->pool, cpu);
+ *pool = pci_pool_create(pool_name,
+ adapter->pdev, IXGBE_FCPTR_MAX,
+ IXGBE_FCPTR_ALIGN, PAGE_SIZE);
+ if (!*pool) {
+ e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
+ ixgbe_fcoe_ddp_pools_free(fcoe);
+ return;
+ }
+ }
+}
+
/**
* ixgbe_configure_fcoe - configures registers for fcoe at start
* @adapter: ptr to ixgbe adapter
@@ -599,27 +599,21 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
-#ifdef CONFIG_IXGBE_DCB
- u8 tc;
- u32 up2tc;
-#endif
- /* create the pool for ddp if not created yet */
if (!fcoe->pool) {
- /* allocate ddp pool */
- fcoe->pool = pci_pool_create("ixgbe_fcoe_ddp",
- adapter->pdev, IXGBE_FCPTR_MAX,
- IXGBE_FCPTR_ALIGN, PAGE_SIZE);
- if (!fcoe->pool)
- e_err(drv, "failed to allocated FCoE DDP pool\n");
-
spin_lock_init(&fcoe->lock);
+ ixgbe_fcoe_ddp_pools_alloc(adapter);
+ if (!fcoe->pool) {
+ e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
+ return;
+ }
+
/* Extra buffer to be shared by all DDPs for HW work around */
fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
if (fcoe->extra_ddp_buffer == NULL) {
e_err(drv, "failed to allocated extra DDP buffer\n");
- goto out_extra_ddp_buffer_alloc;
+ goto out_ddp_pools;
}
fcoe->extra_ddp_buffer_dma =
@@ -630,7 +624,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
if (dma_mapping_error(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma)) {
e_err(drv, "failed to map extra DDP buffer\n");
- goto out_extra_ddp_buffer_dma;
+ goto out_extra_ddp_buffer;
}
}
@@ -670,25 +664,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_FCRXCTRL_FCOELLI |
IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
-#ifdef CONFIG_IXGBE_DCB
- up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
- for (i = 0; i < MAX_USER_PRIORITY; i++) {
- tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
- tc &= (MAX_TRAFFIC_CLASS - 1);
- if (fcoe->tc == tc) {
- fcoe->up = i;
- break;
- }
- }
-#endif
-
return;
-out_extra_ddp_buffer_dma:
+out_extra_ddp_buffer:
kfree(fcoe->extra_ddp_buffer);
-out_extra_ddp_buffer_alloc:
- pci_pool_destroy(fcoe->pool);
- fcoe->pool = NULL;
+out_ddp_pools:
+ ixgbe_fcoe_ddp_pools_free(fcoe);
}
/**
@@ -704,18 +685,17 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
int i;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- /* release ddp resource */
- if (fcoe->pool) {
- for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
- ixgbe_fcoe_ddp_put(adapter->netdev, i);
- dma_unmap_single(&adapter->pdev->dev,
- fcoe->extra_ddp_buffer_dma,
- IXGBE_FCBUFF_MIN,
- DMA_FROM_DEVICE);
- kfree(fcoe->extra_ddp_buffer);
- pci_pool_destroy(fcoe->pool);
- fcoe->pool = NULL;
- }
+ if (!fcoe->pool)
+ return;
+
+ for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
+ ixgbe_fcoe_ddp_put(adapter->netdev, i);
+ dma_unmap_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ kfree(fcoe->extra_ddp_buffer);
+ ixgbe_fcoe_ddp_pools_free(fcoe);
}
/**
@@ -811,41 +791,6 @@ out_disable:
return rc;
}
-#ifdef CONFIG_IXGBE_DCB
-/**
- * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
- * @adapter : ixgbe adapter
- * @up : 802.1p user priority bitmap
- *
- * Finds out the traffic class from the input user priority
- * bitmap for FCoE.
- *
- * Returns : 0 on success otherwise returns 1 on error
- */
-u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
-{
- int i;
- u32 up2tc;
-
- /* valid user priority bitmap must not be 0 */
- if (up) {
- /* from user priority to the corresponding traffic class */
- up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
- for (i = 0; i < MAX_USER_PRIORITY; i++) {
- if (up & (1 << i)) {
- up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT);
- up2tc &= (MAX_TRAFFIC_CLASS - 1);
- adapter->fcoe.tc = (u8)up2tc;
- adapter->fcoe.up = i;
- return 0;
- }
- }
- }
-
- return 1;
-}
-#endif /* CONFIG_IXGBE_DCB */
-
/**
* ixgbe_fcoe_get_wwn - get world wide name for the node or the port
* @netdev : ixgbe adapter
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 5a650a4ace6..99de145e290 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -62,20 +62,20 @@ struct ixgbe_fcoe_ddp {
struct scatterlist *sgl;
dma_addr_t udp;
u64 *udl;
+ struct pci_pool *pool;
};
struct ixgbe_fcoe {
-#ifdef CONFIG_IXGBE_DCB
- u8 tc;
- u8 up;
-#endif
- unsigned long mode;
+ struct pci_pool **pool;
atomic_t refcnt;
spinlock_t lock;
- struct pci_pool *pool;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
unsigned char *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
+ unsigned long mode;
+#ifdef CONFIG_IXGBE_DCB
+ u8 up;
+#endif
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 08e8e25c159..22790394318 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -32,8 +32,10 @@
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/in.h>
+#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/sctp.h>
#include <linux/pkt_sched.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
@@ -53,11 +55,10 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
#define MAJ 3
-#define MIN 3
+#define MIN 4
#define BUILD 8
-#define KFIX 2
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
- __stringify(BUILD) "-k" __stringify(KFIX)
+ __stringify(BUILD) "-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2011 Intel Corporation.";
@@ -183,6 +184,7 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+ IXGBE_WRITE_FLUSH(hw);
/* take a breather then clean up driver data */
msleep(100);
@@ -664,62 +666,6 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
/* tx_buffer_info must be completely set up in the transmit path */
}
-/**
- * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
- * @adapter: driver private struct
- * @index: reg idx of queue to query (0-127)
- *
- * Helper function to determine the traffic index for a particular
- * register index.
- *
- * Returns : a tc index for use in range 0-7, or 0-3
- */
-static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
-{
- int tc = -1;
- int dcb_i = netdev_get_num_tc(adapter->netdev);
-
- /* if DCB is not enabled the queues have no TC */
- if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- return tc;
-
- /* check valid range */
- if (reg_idx >= adapter->hw.mac.max_tx_queues)
- return tc;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82598EB:
- tc = reg_idx >> 2;
- break;
- default:
- if (dcb_i != 4 && dcb_i != 8)
- break;
-
- /* if VMDq is enabled the lowest order bits determine TC */
- if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
- IXGBE_FLAG_VMDQ_ENABLED)) {
- tc = reg_idx & (dcb_i - 1);
- break;
- }
-
- /*
- * Convert the reg_idx into the correct TC. This bitmask
- * targets the last full 32 ring traffic class and assigns
- * it a value of 1. From there the rest of the rings are
- * based on shifting the mask further up to include the
- * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
- * will only ever be 8 or 4 and that reg_idx will never
- * be greater then 128. The code without the power of 2
- * optimizations would be:
- * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
- */
- tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
- tc >>= 9 - (reg_idx >> 5);
- }
-
- return tc;
-}
-
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -765,7 +711,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
/* disarm tx queues that have received xoff frames */
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
- u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
+ u8 tc = tx_ring->dcb_tc;
if (xoff[tc])
clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
@@ -827,15 +773,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
return ret;
}
-#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
-
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
- (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
-
/**
* ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
* @adapter: driver private struct
@@ -869,7 +806,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
- (count < tx_ring->work_limit)) {
+ (count < q_vector->tx.work_limit)) {
bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) {
@@ -898,11 +835,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
}
tx_ring->next_to_clean = i;
- tx_ring->total_bytes += total_bytes;
- tx_ring->total_packets += total_packets;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.packets += total_packets;
tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_begin(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
u64_stats_update_end(&tx_ring->syncp);
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
@@ -938,7 +875,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -950,7 +887,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
}
}
- return count < tx_ring->work_limit;
+ return count < q_vector->tx.work_limit;
}
#ifdef CONFIG_IXGBE_DCA
@@ -1023,17 +960,17 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
if (q_vector->cpu == cpu)
goto out_no_update;
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
+ r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->tx.count; i++) {
ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rx.count; i++) {
ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
@@ -1069,7 +1006,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
unsigned long event = *(unsigned long *)data;
- if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+ if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
return 0;
switch (event) {
@@ -1103,6 +1040,24 @@ static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
}
/**
+ * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
+ * @adapter: address of board private structure
+ * @rx_desc: advanced rx descriptor
+ *
+ * Returns : true if it is FCoE pkt
+ */
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+ return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
+ (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
+ IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
+}
+
+/**
* ixgbe_receive_skb - Send a completed packet up the stack
* @adapter: board private structure
* @skb: packet to send up
@@ -1134,14 +1089,14 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
* @adapter: address of board private structure
* @status_err: hardware indication of status of receive
* @skb: skb currently being received and modified
+ * @status_err: status error value of last descriptor in packet
**/
static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u32 status_err)
{
- u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
-
- skb_checksum_none_assert(skb);
+ skb->ip_summed = CHECKSUM_NONE;
/* Rx csum disabled */
if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -1485,14 +1440,12 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
/* ERR_MASK will only have valid bits if EOP set */
- if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
- /* trim packet back to size 0 and recycle it */
- __pskb_trim(skb, 0);
- rx_buffer_info->skb = skb;
+ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_any(skb);
goto next_desc;
}
- ixgbe_rx_checksum(adapter, rx_desc, skb);
+ ixgbe_rx_checksum(adapter, rx_desc, skb, staterr);
if (adapter->netdev->features & NETIF_F_RXHASH)
ixgbe_rx_hash(rx_desc, skb);
@@ -1503,10 +1456,13 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
#ifdef IXGBE_FCOE
/* if ddp, not passing to ULD unless for FCP_RSP or error */
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
- if (!ddp_bytes)
+ if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
+ ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
+ staterr);
+ if (!ddp_bytes) {
+ dev_kfree_skb_any(skb);
goto next_desc;
+ }
}
#endif /* IXGBE_FCOE */
ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
@@ -1530,7 +1486,7 @@ next_desc:
}
rx_ring->next_to_clean = i;
- cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+ cleaned_count = ixgbe_desc_unused(rx_ring);
if (cleaned_count)
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -1550,12 +1506,12 @@ next_desc:
}
#endif /* IXGBE_FCOE */
- rx_ring->total_packets += total_rx_packets;
- rx_ring->total_bytes += total_rx_bytes;
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_packets += total_rx_packets;
+ q_vector->rx.total_bytes += total_rx_bytes;
}
static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1581,38 +1537,37 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
q_vector = adapter->q_vector[v_idx];
/* XXX for_each_set_bit(...) */
- r_idx = find_first_bit(q_vector->rxr_idx,
+ r_idx = find_first_bit(q_vector->rx.idx,
adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ for (i = 0; i < q_vector->rx.count; i++) {
u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
- r_idx = find_next_bit(q_vector->rxr_idx,
+ r_idx = find_next_bit(q_vector->rx.idx,
adapter->num_rx_queues,
r_idx + 1);
}
- r_idx = find_first_bit(q_vector->txr_idx,
+ r_idx = find_first_bit(q_vector->tx.idx,
adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
+ for (i = 0; i < q_vector->tx.count; i++) {
u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
- r_idx = find_next_bit(q_vector->txr_idx,
+ r_idx = find_next_bit(q_vector->tx.idx,
adapter->num_tx_queues,
r_idx + 1);
}
- if (q_vector->txr_count && !q_vector->rxr_count)
+ if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
q_vector->eitr = adapter->tx_eitr_param;
- else if (q_vector->rxr_count)
+ else if (q_vector->rx.count)
/* rx or mixed */
q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
- /* If Flow Director is enabled, set interrupt affinity */
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ /* If ATR is enabled, set interrupt affinity */
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
/*
* Allocate the affinity_hint cpumask, assign the mask
* for this vector, and set our affinity_hint for
@@ -1662,11 +1617,8 @@ enum latency_range {
/**
* ixgbe_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
- * @eitr: eitr setting (ints per sec) to give last timeslice
- * @itr_setting: current throttle rate in ints/second
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
+ * @q_vector: structure containing interrupt and ring information
+ * @ring_container: structure containing ring performance data
*
* Stores a new ITR value based on packets and byte
* counts during the last interrupt. The advantage of per interrupt
@@ -1678,17 +1630,18 @@ enum latency_range {
* this functionality is controlled by the InterruptThrottleRate module
* parameter (see ixgbe_param.c)
**/
-static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
- u32 eitr, u8 itr_setting,
- int packets, int bytes)
+static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring_container *ring_container)
{
- unsigned int retval = itr_setting;
- u32 timepassed_us;
u64 bytes_perint;
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ int bytes = ring_container->total_bytes;
+ int packets = ring_container->total_packets;
+ u32 timepassed_us;
+ u8 itr_setting = ring_container->itr;
if (packets == 0)
- goto update_itr_done;
-
+ return;
/* simple throttlerate management
* 0-20MB/s lowest (100000 ints/s)
@@ -1696,28 +1649,32 @@ static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
* 100-1249MB/s bulk (8000 ints/s)
*/
/* what was last interrupt timeslice? */
- timepassed_us = 1000000/eitr;
+ timepassed_us = 1000000/q_vector->eitr;
bytes_perint = bytes / timepassed_us; /* bytes/usec */
switch (itr_setting) {
case lowest_latency:
if (bytes_perint > adapter->eitr_low)
- retval = low_latency;
+ itr_setting = low_latency;
break;
case low_latency:
if (bytes_perint > adapter->eitr_high)
- retval = bulk_latency;
+ itr_setting = bulk_latency;
else if (bytes_perint <= adapter->eitr_low)
- retval = lowest_latency;
+ itr_setting = lowest_latency;
break;
case bulk_latency:
if (bytes_perint <= adapter->eitr_high)
- retval = low_latency;
+ itr_setting = low_latency;
break;
}
-update_itr_done:
- return retval;
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itr_setting;
}
/**
@@ -1763,44 +1720,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
}
-static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
+static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
{
- struct ixgbe_adapter *adapter = q_vector->adapter;
- int i, r_idx;
- u32 new_itr;
- u8 current_itr, ret_itr;
-
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
- ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
- q_vector->tx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
- }
+ u32 new_itr = q_vector->eitr;
+ u8 current_itr;
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
- ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
- q_vector->rx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
+ ixgbe_update_itr(q_vector, &q_vector->tx);
+ ixgbe_update_itr(q_vector, &q_vector->rx);
- current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
@@ -1811,16 +1739,17 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
new_itr = 20000; /* aka hwitr = ~200 */
break;
case bulk_latency:
- default:
new_itr = 8000;
break;
+ default:
+ break;
}
if (new_itr != q_vector->eitr) {
/* do an exponential smoothing */
new_itr = ((q_vector->eitr * 9) + new_itr)/10;
- /* save the algorithm value here, not the smoothed one */
+ /* save the algorithm value here */
q_vector->eitr = new_itr;
ixgbe_write_eitr(q_vector);
@@ -1937,8 +1866,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
{
- struct net_device *netdev = data;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = data;
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr;
@@ -2061,15 +1989,13 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
struct ixgbe_ring *tx_ring;
int i, r_idx;
- if (!q_vector->txr_count)
+ if (!q_vector->tx.count)
return IRQ_HANDLED;
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
+ r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->tx.count; i++) {
tx_ring = adapter->tx_ring[r_idx];
- tx_ring->total_bytes = 0;
- tx_ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
@@ -2097,16 +2023,14 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
ixgbe_update_dca(q_vector);
#endif
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rx.count; i++) {
rx_ring = adapter->rx_ring[r_idx];
- rx_ring->total_bytes = 0;
- rx_ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
- if (!q_vector->rxr_count)
+ if (!q_vector->rx.count)
return IRQ_HANDLED;
/* EIAM disabled interrupts (on this vector) for us */
@@ -2123,24 +2047,20 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
int r_idx;
int i;
- if (!q_vector->txr_count && !q_vector->rxr_count)
+ if (!q_vector->tx.count && !q_vector->rx.count)
return IRQ_HANDLED;
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
+ r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->tx.count; i++) {
ring = adapter->tx_ring[r_idx];
- ring->total_bytes = 0;
- ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rx.count; i++) {
ring = adapter->rx_ring[r_idx];
- ring->total_bytes = 0;
- ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
@@ -2172,7 +2092,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector);
#endif
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
rx_ring = adapter->rx_ring[r_idx];
ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
@@ -2181,7 +2101,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr_msix(q_vector);
+ ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
((u64)1 << q_vector->v_idx));
@@ -2213,33 +2133,33 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector);
#endif
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
+ r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->tx.count; i++) {
ring = adapter->tx_ring[r_idx];
tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */
- budget /= (q_vector->rxr_count ?: 1);
+ budget /= (q_vector->rx.count ?: 1);
budget = max(budget, 1);
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rx.count; i++) {
ring = adapter->rx_ring[r_idx];
ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
ring = adapter->rx_ring[r_idx];
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr_msix(q_vector);
+ ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
((u64)1 << q_vector->v_idx));
@@ -2271,7 +2191,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector);
#endif
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
tx_ring = adapter->tx_ring[r_idx];
if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
@@ -2281,7 +2201,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
if (adapter->tx_itr_setting & 1)
- ixgbe_set_itr_msix(q_vector);
+ ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
((u64)1 << q_vector->v_idx));
@@ -2296,8 +2216,8 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
- set_bit(r_idx, q_vector->rxr_idx);
- q_vector->rxr_count++;
+ set_bit(r_idx, q_vector->rx.idx);
+ q_vector->rx.count++;
rx_ring->q_vector = q_vector;
}
@@ -2307,9 +2227,10 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
- set_bit(t_idx, q_vector->txr_idx);
- q_vector->txr_count++;
+ set_bit(t_idx, q_vector->tx.idx);
+ q_vector->tx.count++;
tx_ring->q_vector = q_vector;
+ q_vector->tx.work_limit = a->tx_work_limit;
}
/**
@@ -2398,10 +2319,10 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
if (err)
return err;
-#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
+#define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count) \
? &ixgbe_msix_clean_many : \
- (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
- (_v)->txr_count ? &ixgbe_msix_clean_tx : \
+ (_v)->rx.count ? &ixgbe_msix_clean_rx : \
+ (_v)->tx.count ? &ixgbe_msix_clean_tx : \
NULL)
for (vector = 0; vector < q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
@@ -2433,7 +2354,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
+ ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter);
if (err) {
e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
goto free_queue_irqs;
@@ -2452,51 +2373,6 @@ free_queue_irqs:
return err;
}
-static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
- struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
- struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
- u32 new_itr = q_vector->eitr;
- u8 current_itr;
-
- q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
- q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
-
- current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
-
- switch (current_itr) {
- /* counts and packets in update_itr are dependent on these numbers */
- case lowest_latency:
- new_itr = 100000;
- break;
- case low_latency:
- new_itr = 20000; /* aka hwitr = ~200 */
- break;
- case bulk_latency:
- new_itr = 8000;
- break;
- default:
- break;
- }
-
- if (new_itr != q_vector->eitr) {
- /* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 9) + new_itr)/10;
-
- /* save the algorithm value here */
- q_vector->eitr = new_itr;
-
- ixgbe_write_eitr(q_vector);
- }
-}
-
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
@@ -2523,8 +2399,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
default:
break;
}
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
- adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
mask |= IXGBE_EIMS_FLOW_DIR;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
@@ -2546,8 +2421,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
**/
static irqreturn_t ixgbe_intr(int irq, void *data)
{
- struct net_device *netdev = data;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = data;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u32 eicr;
@@ -2596,10 +2470,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
if (napi_schedule_prep(&(q_vector->napi))) {
- adapter->tx_ring[0]->total_packets = 0;
- adapter->tx_ring[0]->total_bytes = 0;
- adapter->rx_ring[0]->total_packets = 0;
- adapter->rx_ring[0]->total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */
__napi_schedule(&(q_vector->napi));
}
@@ -2621,10 +2491,10 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
for (i = 0; i < q_vectors; i++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
- bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
- q_vector->rxr_count = 0;
- q_vector->txr_count = 0;
+ bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES);
+ bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES);
+ q_vector->rx.count = 0;
+ q_vector->tx.count = 0;
}
}
@@ -2644,10 +2514,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
err = ixgbe_request_msix_irqs(adapter);
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
- netdev->name, netdev);
+ netdev->name, adapter);
} else {
err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
- netdev->name, netdev);
+ netdev->name, adapter);
}
if (err)
@@ -2658,21 +2528,19 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
-
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int i, q_vectors;
q_vectors = adapter->num_msix_vectors;
i = q_vectors - 1;
- free_irq(adapter->msix_entries[i].vector, netdev);
+ free_irq(adapter->msix_entries[i].vector, adapter);
i--;
for (; i >= 0; i--) {
/* free only the irqs that were actually requested */
- if (!adapter->q_vector[i]->rxr_count &&
- !adapter->q_vector[i]->txr_count)
+ if (!adapter->q_vector[i]->rx.count &&
+ !adapter->q_vector[i]->tx.count)
continue;
free_irq(adapter->msix_entries[i].vector,
@@ -2681,7 +2549,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
ixgbe_reset_q_vectors(adapter);
} else {
- free_irq(adapter->pdev->irq, netdev);
+ free_irq(adapter->pdev->irq, adapter);
}
}
@@ -2814,7 +2682,8 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rttdcs;
- u32 mask;
+ u32 reg;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
if (hw->mac.type == ixgbe_mac_82598EB)
return;
@@ -2825,22 +2694,27 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
/* set transmit pool layout */
- mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
- switch (adapter->flags & mask) {
-
+ switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
case (IXGBE_FLAG_SRIOV_ENABLED):
IXGBE_WRITE_REG(hw, IXGBE_MTQC,
(IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
break;
+ default:
+ if (!tcs)
+ reg = IXGBE_MTQC_64Q_1PB;
+ else if (tcs <= 4)
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
- case (IXGBE_FLAG_DCB_ENABLED):
- /* We enable 8 traffic classes, DCB only */
- IXGBE_WRITE_REG(hw, IXGBE_MTQC,
- (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
- break;
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
- default:
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+ /* Enable Security TX Buffer IFG for multiple pb */
+ if (tcs) {
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+ }
break;
}
@@ -2931,7 +2805,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
u32 mrqc = 0, reta = 0;
u32 rxcsum;
int i, j;
- int mask;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+ int maxq = adapter->ring_feature[RING_F_RSS].indices;
+
+ if (tcs)
+ maxq = min(maxq, adapter->num_tx_queues / tcs);
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
@@ -2939,7 +2817,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
/* Fill out redirection table */
for (i = 0, j = 0; i < 128; i++, j++) {
- if (j == adapter->ring_feature[RING_F_RSS].indices)
+ if (j == maxq)
j = 0;
/* reta = 4-byte sliding window of
* 0x00..(indices-1)(indices-1)00..etc. */
@@ -2953,33 +2831,28 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
rxcsum |= IXGBE_RXCSUM_PCSD;
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
- mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
- else
- mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
-#ifdef CONFIG_IXGBE_DCB
- | IXGBE_FLAG_DCB_ENABLED
-#endif
- | IXGBE_FLAG_SRIOV_ENABLED
- );
-
- switch (mask) {
-#ifdef CONFIG_IXGBE_DCB
- case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED):
- mrqc = IXGBE_MRQC_RTRSS8TCEN;
- break;
- case (IXGBE_FLAG_DCB_ENABLED):
- mrqc = IXGBE_MRQC_RT8TCEN;
- break;
-#endif /* CONFIG_IXGBE_DCB */
- case (IXGBE_FLAG_RSS_ENABLED):
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
+ (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
mrqc = IXGBE_MRQC_RSSEN;
- break;
- case (IXGBE_FLAG_SRIOV_ENABLED):
- mrqc = IXGBE_MRQC_VMDQEN;
- break;
- default:
- break;
+ } else {
+ int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
+ | IXGBE_FLAG_SRIOV_ENABLED);
+
+ switch (mask) {
+ case (IXGBE_FLAG_RSS_ENABLED):
+ if (!tcs)
+ mrqc = IXGBE_MRQC_RSSEN;
+ else if (tcs <= 4)
+ mrqc = IXGBE_MRQC_RTRSS4TCEN;
+ else
+ mrqc = IXGBE_MRQC_RTRSS8TCEN;
+ break;
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ mrqc = IXGBE_MRQC_VMDQEN;
+ break;
+ default:
+ break;
+ }
}
/* Perform hash on these packet types */
@@ -2992,28 +2865,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_clear_rscctl - disable RSC for the indicated ring
- * @adapter: address of board private structure
- * @ring: structure containing ring specific data
- **/
-void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 rscctrl;
- u8 reg_idx = ring->reg_idx;
-
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
- rscctrl &= ~IXGBE_RSCCTL_RSCEN;
- IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
-}
-
-/**
* ixgbe_configure_rscctl - enable RSC for the indicated ring
* @adapter: address of board private structure
* @index: index of ring to set
**/
-void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -3183,7 +3039,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
+ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
}
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -3681,10 +3537,10 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
q_vector = adapter->q_vector[q_idx];
napi = &q_vector->napi;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- if (!q_vector->rxr_count || !q_vector->txr_count) {
- if (q_vector->txr_count == 1)
+ if (!q_vector->rx.count || !q_vector->tx.count) {
+ if (q_vector->tx.count == 1)
napi->poll = &ixgbe_clean_txonly;
- else if (q_vector->rxr_count == 1)
+ else if (q_vector->rx.count == 1)
napi->poll = &ixgbe_clean_rxonly;
}
}
@@ -3739,7 +3595,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
/* reconfigure the hardware */
- if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) {
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
#ifdef CONFIG_FCOE
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
@@ -3779,12 +3635,51 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
}
#endif
+
+static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
+{
+ int hdrm = 0;
+ int num_tc = netdev_get_num_tc(adapter->netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+ adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ hdrm = 64 << adapter->fdir_pballoc;
+
+ hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL);
+}
+
+static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *filter;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ if (!hlist_empty(&adapter->fdir_filter_list))
+ ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
+
+ hlist_for_each_entry_safe(filter, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ ixgbe_fdir_write_perfect_filter_82599(hw,
+ &filter->filter,
+ filter->sw_idx,
+ (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
+ IXGBE_FDIR_DROP_QUEUE :
+ adapter->rx_ring[filter->action]->reg_idx);
+ }
+
+ spin_unlock(&adapter->fdir_perfect_lock);
+}
+
static void ixgbe_configure(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
int i;
+ ixgbe_configure_pb(adapter);
#ifdef CONFIG_IXGBE_DCB
ixgbe_configure_dcb(adapter);
#endif
@@ -3803,7 +3698,9 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
adapter->atr_sample_rate;
ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
- ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
+ ixgbe_init_fdir_perfect_82599(&adapter->hw,
+ adapter->fdir_pballoc);
+ ixgbe_fdir_filter_restore(adapter);
}
ixgbe_configure_virtualization(adapter);
@@ -4180,6 +4077,23 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
ixgbe_clean_tx_ring(adapter->tx_ring[i]);
}
+static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
+{
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *filter;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ hlist_for_each_entry_safe(filter, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ }
+ adapter->fdir_filter_count = 0;
+
+ spin_unlock(&adapter->fdir_perfect_lock);
+}
+
void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -4306,7 +4220,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr(adapter);
+ ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
}
@@ -4369,15 +4283,13 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
f_fdir->mask = 0;
/* Flow Director must have RSS enabled */
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
- ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
adapter->num_tx_queues = f_fdir->indices;
adapter->num_rx_queues = f_fdir->indices;
ret = true;
} else {
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
}
return ret;
}
@@ -4400,69 +4312,72 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-#ifdef CONFIG_IXGBE_DCB
- int tc;
- struct net_device *dev = adapter->netdev;
+ f->indices = min((int)num_online_cpus(), f->indices);
- tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
- f->indices = dev->tc_to_txq[tc].count;
- f->mask = dev->tc_to_txq[tc].offset;
-#endif
- } else {
- f->indices = min((int)num_online_cpus(), f->indices);
-
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- e_info(probe, "FCoE enabled with RSS\n");
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
- ixgbe_set_fdir_queues(adapter);
- else
- ixgbe_set_rss_queues(adapter);
- }
- /* adding FCoE rx rings to the end */
- f->mask = adapter->num_rx_queues;
- adapter->num_rx_queues += f->indices;
- adapter->num_tx_queues += f->indices;
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ e_info(probe, "FCoE enabled with RSS\n");
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ ixgbe_set_fdir_queues(adapter);
+ else
+ ixgbe_set_rss_queues(adapter);
}
+ /* adding FCoE rx rings to the end */
+ f->mask = adapter->num_rx_queues;
+ adapter->num_rx_queues += f->indices;
+ adapter->num_tx_queues += f->indices;
+
return true;
}
#endif /* IXGBE_FCOE */
+/* Artificial max queue cap per traffic class in DCB mode */
+#define DCB_QUEUE_CAP 8
+
#ifdef CONFIG_IXGBE_DCB
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
{
- bool ret = false;
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
- int i, q;
+ int per_tc_q, q, i, offset = 0;
+ struct net_device *dev = adapter->netdev;
+ int tcs = netdev_get_num_tc(dev);
- if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- return ret;
+ if (!tcs)
+ return false;
- f->indices = 0;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS);
- f->indices += q;
+ /* Map queue offset and counts onto allocated tx queues */
+ per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP);
+ q = min((int)num_online_cpus(), per_tc_q);
+
+ for (i = 0; i < tcs; i++) {
+ netdev_set_prio_tc_map(dev, i, i);
+ netdev_set_tc_queue(dev, i, q, offset);
+ offset += q;
}
- f->mask = 0x7 << 3;
- adapter->num_rx_queues = f->indices;
- adapter->num_tx_queues = f->indices;
- ret = true;
+ adapter->num_tx_queues = q * tcs;
+ adapter->num_rx_queues = q * tcs;
#ifdef IXGBE_FCOE
- /* FCoE enabled queues require special configuration done through
- * configure_fcoe() and others. Here we map FCoE indices onto the
- * DCB queue pairs allowing FCoE to own configuration later.
+ /* FCoE enabled queues require special configuration indexed
+ * by feature specific indices and mask. Here we map FCoE
+ * indices onto the DCB queue pairs allowing FCoE to own
+ * configuration later.
*/
- ixgbe_set_fcoe_queues(adapter);
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ int tc;
+ struct ixgbe_ring_feature *f =
+ &adapter->ring_feature[RING_F_FCOE];
+
+ tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+ f->indices = dev->tc_to_txq[tc].count;
+ f->mask = dev->tc_to_txq[tc].offset;
+ }
#endif
- return ret;
+ return true;
}
#endif
@@ -4616,8 +4531,8 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- *tx = tc << 3;
- *rx = tc << 2;
+ *tx = tc << 2;
+ *rx = tc << 3;
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
@@ -4657,55 +4572,6 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
}
}
-#define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS)
-
-/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
- * classes.
- *
- * @netdev: net device to configure
- * @tc: number of traffic classes to enable
- */
-int ixgbe_setup_tc(struct net_device *dev, u8 tc)
-{
- int i;
- unsigned int q, offset = 0;
-
- if (!tc) {
- netdev_reset_tc(dev);
- } else {
- struct ixgbe_adapter *adapter = netdev_priv(dev);
-
- /* Hardware supports up to 8 traffic classes */
- if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc))
- return -EINVAL;
-
- /* Partition Tx queues evenly amongst traffic classes */
- for (i = 0; i < tc; i++) {
- q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC);
- netdev_set_prio_tc_map(dev, i, i);
- netdev_set_tc_queue(dev, i, q, offset);
- offset += q;
- }
-
- /* This enables multiple traffic class support in the hardware
- * which defaults to strict priority transmission by default.
- * If traffic classes are already enabled perhaps through DCB
- * code path then existing configuration will be used.
- */
- if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) {
- struct ieee_ets ets = {
- .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7},
- };
- u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
-
- dev->dcbnl_ops->setdcbx(dev, mode);
- dev->dcbnl_ops->ieee_setets(dev, &ets);
- }
- }
- return 0;
-}
-
/**
* ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
* @adapter: board private structure to initialize
@@ -4719,7 +4585,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
int i, j, k;
u8 num_tcs = netdev_get_num_tc(dev);
- if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ if (!num_tcs)
return false;
for (i = 0, k = 0; i < num_tcs; i++) {
@@ -4751,9 +4617,8 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
int i;
bool ret = false;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
- ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -4782,8 +4647,7 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
return false;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
ixgbe_cache_ring_fdir(adapter);
else
ixgbe_cache_ring_rss(adapter);
@@ -4963,14 +4827,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
- IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
e_err(probe,
- "Flow Director is not supported while multiple "
+ "ATR is not supported while multiple "
"queues are disabled. Disabling Flow Director\n");
}
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
ixgbe_disable_sriov(adapter);
@@ -5024,7 +4886,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
- if (q_vector->txr_count && !q_vector->rxr_count)
+ if (q_vector->tx.count && !q_vector->rx.count)
q_vector->eitr = adapter->tx_eitr_param;
else
q_vector->eitr = adapter->rx_eitr_param;
@@ -5201,7 +5063,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
- adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
@@ -5215,21 +5076,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- /* n-tuple support exists, always init our spinlock */
- spin_lock_init(&adapter->fdir_perfect_lock);
/* Flow Director hash filters enabled */
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
- adapter->fdir_pballoc = 0;
+ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = 0;
#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
- adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
#endif
#endif /* IXGBE_FCOE */
@@ -5238,6 +5096,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
break;
}
+ /* n-tuple support exists, always init our spinlock */
+ spin_lock_init(&adapter->fdir_perfect_lock);
+
#ifdef CONFIG_IXGBE_DCB
/* Configure DCB traffic classes */
for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
@@ -5250,7 +5111,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
}
adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
- adapter->dcb_cfg.rx_pba_cfg = pba_equal;
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->dcb_set_bitmap = 0x00;
adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
@@ -5285,6 +5145,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
+ /* set default work limits */
+ adapter->tx_work_limit = adapter->tx_ring_count;
+
/* initialize eeprom parameters */
if (ixgbe_init_eeprom_params_generic(hw)) {
e_dev_err("EEPROM initialization failed\n");
@@ -5331,7 +5194,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->work_limit = tx_ring->count;
return 0;
err:
@@ -5620,6 +5482,8 @@ static int ixgbe_close(struct net_device *netdev)
ixgbe_down(adapter);
ixgbe_free_irq(adapter);
+ ixgbe_fdir_filter_exit(adapter);
+
ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter);
@@ -6038,7 +5902,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/* get one bit for every active tx/rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i];
- if (qv->rxr_count || qv->txr_count)
+ if (qv->rx.count || qv->tx.count)
eics |= ((u64)1 << i);
}
}
@@ -6143,9 +6007,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
(flow_tx ? "TX" : "None"))));
netif_carrier_on(netdev);
-#ifdef HAVE_IPLINK_VF_CONFIG
ixgbe_check_vf_rate_limit(adapter);
-#endif /* HAVE_IPLINK_VF_CONFIG */
}
/**
@@ -6404,179 +6266,145 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
}
-static int ixgbe_tso(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len, __be16 protocol)
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
+ u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- int err;
- struct ixgbe_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl;
- u32 mss_l4len_idx, l4len;
+ u16 i = tx_ring->next_to_use;
- if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
-
- if (protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- }
+ context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
- i = tx_ring->next_to_use;
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
-
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |=
- (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= ((skb_network_offset(skb)) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- vlan_macip_lens |=
- (skb_transport_header(skb) - skb_network_header(skb));
- *hdr_len +=
- (skb_transport_header(skb) - skb_network_header(skb));
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- if (protocol == htons(ETH_P_IP))
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
-
- /* MSS L4LEN IDX */
- mss_l4len_idx =
- (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
- /* use index 1 for TSO */
- mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
+static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len)
+{
+ int err;
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ if (!skb_is_gso(skb))
+ return 0;
- return true;
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
}
- return false;
-}
-static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- __be16 protocol)
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = tcp_hdrlen(skb);
+ *hdr_len = skb_transport_offset(skb) + l4len;
+
+ /* mss_l4len_id: use 1 as index for TSO */
+ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = skb_network_header_len(skb);
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
+ mss_l4len_idx);
+
+ return 1;
+}
+
+static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ __be16 protocol)
{
- u32 rtn = 0;
+ u32 vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 type_tucmd = 0;
- switch (protocol) {
- case cpu_to_be16(ETH_P_IP):
- rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ if (!(tx_flags & IXGBE_TX_FLAGS_VLAN))
+ return false;
+ } else {
+ u8 l4_hdr = 0;
+ switch (protocol) {
+ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ l4_hdr = ip_hdr(skb)->protocol;
break;
- case IPPROTO_SCTP:
- rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but proto=%x!\n",
+ skb->protocol);
+ }
break;
}
- break;
- case cpu_to_be16(ETH_P_IPV6):
- /* XXX what about other V6 headers?? */
- switch (ipv6_hdr(skb)->nexthdr) {
+
+ switch (l4_hdr) {
case IPPROTO_TCP:
- rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
break;
case IPPROTO_SCTP:
- rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but l4 proto=%x!\n",
+ skb->protocol);
+ }
break;
}
- break;
- default:
- if (unlikely(net_ratelimit()))
- e_warn(probe, "partial checksum but proto=%x!\n",
- protocol);
- break;
}
- return rtn;
-}
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
-static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags,
- __be16 protocol)
-{
- struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- struct ixgbe_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
- i = tx_ring->next_to_use;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
-
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |=
- (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= (skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- vlan_macip_lens |= (skb_transport_header(skb) -
- skb_network_header(skb));
-
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
+ type_tucmd, mss_l4len_idx);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
-
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
- /* use index zero for tx checksum offload */
- context_desc->mss_l4len_idx = 0;
-
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
-
- return true;
- }
-
- return false;
+ return (skb->ip_summed == CHECKSUM_PARTIAL);
}
static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
@@ -6588,11 +6416,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
- unsigned int offset = 0, size, count = 0, i;
+ unsigned int offset = 0, size, count = 0;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
unsigned int bytecount = skb->len;
u16 gso_segs = 1;
+ u16 i;
i = tx_ring->next_to_use;
@@ -6858,7 +6687,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
input, common, ring->queue_index);
}
-static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
@@ -6868,7 +6697,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
/* We need to check again in a case another CPU has just
* made room available. */
- if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ if (likely(ixgbe_desc_unused(tx_ring) < size))
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
@@ -6877,9 +6706,9 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
return 0;
}
-static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
+static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
{
- if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ if (likely(ixgbe_desc_unused(tx_ring) >= size))
return 0;
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
@@ -6887,11 +6716,10 @@ static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- int txq = smp_processor_id();
+ int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
+ smp_processor_id();
#ifdef IXGBE_FCOE
- __be16 protocol;
-
- protocol = vlan_get_protocol(skb);
+ __be16 protocol = vlan_get_protocol(skb);
if (((protocol == htons(ETH_P_FCOE)) ||
(protocol == htons(ETH_P_FIP))) &&
@@ -6915,13 +6743,33 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
- unsigned int first;
- unsigned int tx_flags = 0;
- u8 hdr_len = 0;
int tso;
- int count = 0;
- unsigned int f;
+ u32 tx_flags = 0;
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ unsigned short f;
+#endif
+ u16 first;
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol;
+ u8 hdr_len = 0;
+
+ /*
+ * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
protocol = vlan_get_protocol(skb);
@@ -6946,51 +6794,29 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
(protocol == htons(ETH_P_FCOE)))
tx_flags |= IXGBE_TX_FLAGS_FCOE;
-#endif
-
- /* four things can cause us to need a context descriptor */
- if (skb_is_gso(skb) ||
- (skb->ip_summed == CHECKSUM_PARTIAL) ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
- (tx_flags & IXGBE_TX_FLAGS_FCOE))
- count++;
-
- count += TXD_USE_COUNT(skb_headlen(skb));
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-
- if (ixgbe_maybe_stop_tx(tx_ring, count)) {
- tx_ring->tx_stats.tx_busy++;
- return NETDEV_TX_BUSY;
- }
+#endif
+ /* record the location of the first descriptor for this packet */
first = tx_ring->next_to_use;
+
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
#ifdef IXGBE_FCOE
/* setup tx offload for FCoE */
- tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- if (tso)
+ tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
tx_flags |= IXGBE_TX_FLAGS_FSO;
#endif /* IXGBE_FCOE */
} else {
if (protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
- protocol);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (tso)
+ tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
- protocol) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
+ else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
@@ -7003,12 +6829,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
} else {
- dev_kfree_skb_any(skb);
tx_ring->tx_buffer_info[first].time_stamp = 0;
tx_ring->next_to_use = first;
+ goto out_drop;
}
return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -7198,6 +7028,177 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
return stats;
}
+/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
+ * #adapter: pointer to ixgbe_adapter
+ * @tc: number of traffic classes currently enabled
+ *
+ * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
+ * 802.1Q priority maps to a packet buffer that exists.
+ */
+static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg, rsave;
+ int i;
+
+ /* 82598 have a static priority to TC mapping that can not
+ * be changed so no validation is needed.
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+ rsave = reg;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
+
+ /* If up2tc is out of bounds default to zero */
+ if (up2tc > tc)
+ reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
+ }
+
+ if (reg != rsave)
+ IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
+
+ return;
+}
+
+
+/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
+ * classes.
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int ixgbe_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* If DCB is anabled do not remove traffic classes, multiple
+ * traffic classes are required to implement DCB
+ */
+ if (!tc && (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return 0;
+
+ /* Hardware supports up to 8 traffic classes */
+ if (tc > MAX_TRAFFIC_CLASS ||
+ (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS))
+ return -EINVAL;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunantly, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ixgbe_close(dev);
+ ixgbe_clear_interrupt_scheme(adapter);
+
+ if (tc)
+ netdev_set_num_tc(dev, tc);
+ else
+ netdev_reset_tc(dev);
+
+ ixgbe_init_interrupt_scheme(adapter);
+ ixgbe_validate_rtr(adapter, tc);
+ if (netif_running(dev))
+ ixgbe_open(dev);
+
+ return 0;
+}
+
+void ixgbe_do_reset(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+}
+
+static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+#ifdef CONFIG_DCB
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ data &= ~NETIF_F_HW_VLAN_RX;
+#endif
+
+ /* return error if RXHASH is being enabled when RSS is not supported */
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ data &= ~NETIF_F_RXHASH;
+
+ /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+ if (!(data & NETIF_F_RXCSUM))
+ data &= ~NETIF_F_LRO;
+
+ /* Turn off LRO if not RSC capable or invalid ITR settings */
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
+ data &= ~NETIF_F_LRO;
+ } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
+ (adapter->rx_itr_setting != 1 &&
+ adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) {
+ data &= ~NETIF_F_LRO;
+ e_info(probe, "rx-usecs set too low, not enabling RSC\n");
+ }
+
+ return data;
+}
+
+static int ixgbe_set_features(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool need_reset = false;
+
+ /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+ if (!(data & NETIF_F_RXCSUM))
+ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+ else
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ /* Make sure RSC matches LRO, reset if change */
+ if (!!(data & NETIF_F_LRO) !=
+ !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+ adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ need_reset = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ /* turn off ATR, enable perfect filters and reset */
+ if (data & NETIF_F_NTUPLE) {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ need_reset = true;
+ }
+ } else if (!(data & NETIF_F_NTUPLE)) {
+ /* turn off Flow Director, set ATR and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ need_reset = true;
+ }
+
+ if (need_reset)
+ ixgbe_do_reset(netdev);
+
+ return 0;
+
+}
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
@@ -7218,9 +7219,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
-#ifdef CONFIG_IXGBE_DCB
.ndo_setup_tc = ixgbe_setup_tc,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
@@ -7232,6 +7231,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fcoe_disable = ixgbe_fcoe_disable,
.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
#endif /* IXGBE_FCOE */
+ .ndo_set_features = ixgbe_set_features,
+ .ndo_fix_features = ixgbe_fix_features,
};
static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
@@ -7379,14 +7380,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
pci_set_master(pdev);
pci_save_state(pdev);
+#ifdef CONFIG_IXGBE_DCB
+ indices *= MAX_TRAFFIC_CLASS;
+#endif
+
if (ii->mac == ixgbe_mac_82598EB)
indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
else
indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
-#if defined(CONFIG_DCB)
- indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
-#elif defined(IXGBE_FCOE)
+#ifdef IXGBE_FCOE
indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES);
#endif
@@ -7497,20 +7500,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM;
- netdev->features |= NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- netdev->features |= NETIF_F_GRO;
- netdev->features |= NETIF_F_RXHASH;
+ netdev->hw_features = netdev->features;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
netdev->features |= NETIF_F_SCTP_CSUM;
+ netdev->hw_features |= NETIF_F_SCTP_CSUM |
+ NETIF_F_NTUPLE;
break;
default:
break;
@@ -7549,6 +7556,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
+ netdev->hw_features |= NETIF_F_LRO;
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO;
@@ -7585,25 +7594,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
+ netdev->hw_features &= ~NETIF_F_RXHASH;
netdev->features &= ~NETIF_F_RXHASH;
+ }
switch (pdev->device) {
case IXGBE_DEV_ID_82599_SFP:
/* Only this subdevice supports WOL */
if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
- adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
- IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ adapter->wol = IXGBE_WUFC_MAG;
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
- adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
- IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ adapter->wol = IXGBE_WUFC_MAG;
break;
case IXGBE_DEV_ID_82599_KX4:
- adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
- IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ adapter->wol = IXGBE_WUFC_MAG;
break;
default:
adapter->wol = 0;
@@ -7678,6 +7686,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_vf_configuration(pdev, (i | 0x10000000));
}
+ /* Inform firmware of driver version */
+ if (hw->mac.ops.set_fw_drv_ver)
+ hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD,
+ FW_CEM_UNUSED_VER);
+
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 735f686c3b3..f7ca3511b9f 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1585,6 +1585,7 @@ static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
*i2cctl |= IXGBE_I2C_CLK_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
udelay(IXGBE_I2C_T_RISE);
@@ -1605,6 +1606,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
*i2cctl &= ~IXGBE_I2C_CLK_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* SCL fall time (300ns) */
udelay(IXGBE_I2C_T_FALL);
@@ -1628,6 +1630,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
*i2cctl &= ~IXGBE_I2C_DATA_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index ac99b0458fe..d99d01e2132 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -605,6 +605,22 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
}
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
+ * and 0x004 otherwise.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
+ break;
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
+ break;
+ default:
+ break;
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index fa43f2507f4..e0d970ebab7 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -534,7 +534,7 @@
#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
#define IXGBE_RTTBCNRC_RF_INT_MASK \
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
-
+#define IXGBE_RTTBCNRM 0x04980
/* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
@@ -707,6 +707,13 @@
#define IXGBE_HFDR 0x15FE8
#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */
+#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define IXGBE_HICR_C 0x02
+#define IXGBE_HICR_SV 0x04 /* Status Validity */
+#define IXGBE_HICR_FW_RESET_ENABLE 0x40
+#define IXGBE_HICR_FW_RESET 0x80
+
/* PCI-E registers */
#define IXGBE_GCR 0x11000
#define IXGBE_GTV 0x11004
@@ -1118,6 +1125,27 @@
#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */
+/* Packet Buffer Initialization */
+#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */
+#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
+#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
+#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/
+#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/
+
+#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
+#define IXGBE_MAX_PB 8
+
+/* Packet buffer allocation strategies */
+enum {
+ PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL
+ PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED
+};
+
/* Transmit Flow Control status */
#define IXGBE_TFCS_TXOFF 0x00000001
#define IXGBE_TFCS_TXOFF0 0x00000100
@@ -1860,6 +1888,7 @@
#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
+#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */
/* Receive Descriptor bit definitions */
#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
@@ -2027,9 +2056,10 @@
#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
enum ixgbe_fdir_pballoc_type {
- IXGBE_FDIR_PBALLOC_64K = 0,
- IXGBE_FDIR_PBALLOC_128K,
- IXGBE_FDIR_PBALLOC_256K,
+ IXGBE_FDIR_PBALLOC_NONE = 0,
+ IXGBE_FDIR_PBALLOC_64K = 1,
+ IXGBE_FDIR_PBALLOC_128K = 2,
+ IXGBE_FDIR_PBALLOC_256K = 3,
};
#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16
@@ -2083,7 +2113,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
-#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH 0x00000007
+#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004
#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
@@ -2102,6 +2132,44 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIR_INIT_DONE_POLL 10
#define IXGBE_FDIRCMD_CMD_POLL 10
+#define IXGBE_FDIR_DROP_QUEUE 127
+
+/* Manageablility Host Interface defines */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+
+/* CEM Support */
+#define FW_CEM_HDR_LEN 0x4
+#define FW_CEM_CMD_DRIVER_INFO 0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
+#define FW_CEM_CMD_RESERVED 0x0
+#define FW_CEM_UNUSED_VER 0x0
+#define FW_CEM_MAX_RETRIES 3
+#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+
+/* Host Interface Command Structures */
+struct ixgbe_hic_hdr {
+ u8 cmd;
+ u8 buf_len;
+ union {
+ u8 cmd_resv;
+ u8 ret_status;
+ } cmd_or_resp;
+ u8 checksum;
+};
+
+struct ixgbe_hic_drv_info {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ u8 pad; /* end spacing to ensure length is mult. of dword */
+ u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
@@ -2286,7 +2354,7 @@ union ixgbe_atr_input {
* src_port - 2 bytes
* dst_port - 2 bytes
* flex_bytes - 2 bytes
- * rsvd0 - 2 bytes - space reserved must be 0.
+ * bkt_hash - 2 bytes
*/
struct {
u8 vm_pool;
@@ -2297,7 +2365,7 @@ union ixgbe_atr_input {
__be16 src_port;
__be16 dst_port;
__be16 flex_bytes;
- __be16 rsvd0;
+ __be16 bkt_hash;
} formatted;
__be32 dword_stream[11];
};
@@ -2318,16 +2386,6 @@ union ixgbe_atr_hash_dword {
__be32 dword;
};
-struct ixgbe_atr_input_masks {
- __be16 rsvd0;
- __be16 vlan_id_mask;
- __be32 dst_ip_mask[4];
- __be32 src_ip_mask[4];
- __be16 src_port_mask;
- __be16 dst_port_mask;
- __be16 flex_mask;
-};
-
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
@@ -2615,6 +2673,9 @@ struct ixgbe_mac_operations {
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
+ /* Packet Buffer Manipulation */
+ void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
+
/* LED */
s32 (*led_on)(struct ixgbe_hw *, u32);
s32 (*led_off)(struct ixgbe_hw *, u32);
@@ -2638,6 +2699,9 @@ struct ixgbe_mac_operations {
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *, s32);
+
+ /* Manageability interface */
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
};
struct ixgbe_phy_operations {
@@ -2807,6 +2871,7 @@ struct ixgbe_info {
#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
#define IXGBE_ERR_PBA_SECTION -31
#define IXGBE_ERR_INVALID_ARGUMENT -32
+#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index 4ed687be2fe..2696c78e9f4 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -162,6 +162,7 @@ mac_reset_top:
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+ IXGBE_WRITE_FLUSH(hw);
msleep(50);
@@ -876,6 +877,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.read_analog_reg8 = NULL,
.write_analog_reg8 = NULL,
.setup_link = &ixgbe_setup_mac_link_X540,
+ .set_rxpba = &ixgbe_set_rxpba_generic,
.check_link = &ixgbe_check_mac_link_generic,
.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
.led_on = &ixgbe_led_on_generic,
@@ -893,6 +895,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = NULL,
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index b703f60be3b..8857df4dd3b 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -29,9 +29,11 @@
#define _IXGBEVF_H_
#include <linux/types.h>
+#include <linux/bitops.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
#include "vf.h"
@@ -185,9 +187,7 @@ struct ixgbevf_q_vector {
/* board specific private data structure */
struct ixgbevf_adapter {
struct timer_list watchdog_timer;
-#ifdef NETIF_F_HW_VLAN_TX
- struct vlan_group *vlgrp;
-#endif
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 bd_number;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
@@ -279,7 +279,7 @@ enum ixgbevf_boards {
extern struct ixgbevf_info ixgbevf_82599_vf_info;
extern struct ixgbevf_info ixgbevf_X540_vf_info;
-extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
+extern struct ixgbe_mbx_operations ixgbevf_mbx_ops;
/* needed by ethtool.c */
extern char ixgbevf_driver_name[];
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 28d3cb21d37..3b880a27f8d 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -30,6 +30,7 @@
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
******************************************************************************/
#include <linux/types.h>
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -52,7 +53,7 @@ char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.0.0-k2"
+#define DRV_VERSION "2.1.0-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2010 Intel Corporation.";
@@ -288,21 +289,17 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
- u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
- if (adapter->vlgrp && is_vlan)
- vlan_gro_receive(&q_vector->napi,
- adapter->vlgrp,
- tag, skb);
- else
+ if (is_vlan) {
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+ __vlan_hwaccel_put_tag(skb, tag);
+ }
+
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
napi_gro_receive(&q_vector->napi, skb);
- } else {
- if (adapter->vlgrp && is_vlan)
- vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
- else
+ else
netif_rx(skb);
- }
}
/**
@@ -1401,24 +1398,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
}
}
-static void ixgbevf_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- int i, j;
- u32 ctrl;
-
- adapter->vlgrp = grp;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i].reg_idx;
- ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
- }
-}
-
static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -1427,6 +1406,7 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
/* add VID to filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, true);
+ set_bit(vid, adapter->active_vlans);
}
static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -1434,31 +1414,18 @@ static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- ixgbevf_irq_disable(adapter);
-
- vlan_group_set_device(adapter->vlgrp, vid, NULL);
-
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- ixgbevf_irq_enable(adapter, true, true);
-
/* remove VID from filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, false);
+ clear_bit(vid, adapter->active_vlans);
}
static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
{
- ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+ u16 vid;
- if (adapter->vlgrp) {
- u16 vid;
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (!vlan_group_get_device(adapter->vlgrp, vid))
- continue;
- ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
- }
- }
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
}
static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
@@ -1648,7 +1615,7 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
for (i = 0; i < num_rx_rings; i++) {
j = adapter->rx_ring[i].reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
- rxdctl |= IXGBE_RXDCTL_ENABLE;
+ rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
if (hw->mac.type == ixgbe_mac_X540_vf) {
rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
@@ -3249,18 +3216,17 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
}
static const struct net_device_ops ixgbe_netdev_ops = {
- .ndo_open = &ixgbevf_open,
- .ndo_stop = &ixgbevf_close,
- .ndo_start_xmit = &ixgbevf_xmit_frame,
- .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
- .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
+ .ndo_open = ixgbevf_open,
+ .ndo_stop = ixgbevf_close,
+ .ndo_start_xmit = ixgbevf_xmit_frame,
+ .ndo_set_rx_mode = ixgbevf_set_rx_mode,
+ .ndo_set_multicast_list = ixgbevf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = &ixgbevf_set_mac,
- .ndo_change_mtu = &ixgbevf_change_mtu,
- .ndo_tx_timeout = &ixgbevf_tx_timeout,
- .ndo_vlan_rx_register = &ixgbevf_vlan_rx_register,
- .ndo_vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid,
+ .ndo_set_mac_address = ixgbevf_set_mac,
+ .ndo_change_mtu = ixgbevf_change_mtu,
+ .ndo_tx_timeout = ixgbevf_tx_timeout,
+ .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3364,7 +3330,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
hw->mac.type = ii->mac;
memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
- sizeof(struct ixgbe_mac_operations));
+ sizeof(struct ixgbe_mbx_operations));
adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 78ddd8b79e7..e122493ab70 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -14,6 +14,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/gfp.h>
#include <asm/hardware/uengine.h>
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index b5b174a8c14..3ac262f5563 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -271,9 +271,7 @@ jme_reset_mac_processor(struct jme_adapter *jme)
static inline void
jme_clear_pm(struct jme_adapter *jme)
{
- jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
- pci_set_power_state(jme->pdev, PCI_D0);
- device_set_wakeup_enable(&jme->pdev->dev, false);
+ jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
}
static int
@@ -753,20 +751,28 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i)
struct jme_ring *rxring = &(jme->rxring[0]);
struct jme_buffer_info *rxbi = rxring->bufinf + i;
struct sk_buff *skb;
+ dma_addr_t mapping;
skb = netdev_alloc_skb(jme->dev,
jme->dev->mtu + RX_EXTRA_LEN);
if (unlikely(!skb))
return -ENOMEM;
+ mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
+ offset_in_page(skb->data), skb_tailroom(skb),
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ if (likely(rxbi->mapping))
+ pci_unmap_page(jme->pdev, rxbi->mapping,
+ rxbi->len, PCI_DMA_FROMDEVICE);
+
rxbi->skb = skb;
rxbi->len = skb_tailroom(skb);
- rxbi->mapping = pci_map_page(jme->pdev,
- virt_to_page(skb->data),
- offset_in_page(skb->data),
- rxbi->len,
- PCI_DMA_FROMDEVICE);
-
+ rxbi->mapping = mapping;
return 0;
}
@@ -1050,16 +1056,12 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
skb_checksum_none_assert(skb);
if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
- if (jme->vlgrp) {
- jme->jme_vlan_rx(skb, jme->vlgrp,
- le16_to_cpu(rxdesc->descwb.vlan));
- NET_STAT(jme).rx_bytes += 4;
- } else {
- dev_kfree_skb(skb);
- }
- } else {
- jme->jme_rx(skb);
+ u16 vid = le16_to_cpu(rxdesc->descwb.vlan);
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ NET_STAT(jme).rx_bytes += 4;
}
+ jme->jme_rx(skb);
if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
cpu_to_le16(RXWBFLAG_DEST_MUL))
@@ -1817,11 +1819,9 @@ jme_powersave_phy(struct jme_adapter *jme)
{
if (jme->reg_pmcs) {
jme_set_100m_half(jme);
-
if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
jme_wait_link(jme);
-
- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+ jme_clear_pm(jme);
} else {
jme_phy_off(jme);
}
@@ -2286,16 +2286,6 @@ static inline void jme_resume_rx(struct jme_adapter *jme)
}
static void
-jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
-{
- struct jme_adapter *jme = netdev_priv(netdev);
-
- jme_pause_rx(jme);
- jme->vlgrp = grp;
- jme_resume_rx(jme);
-}
-
-static void
jme_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -2405,7 +2395,6 @@ jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
test_bit(JME_FLAG_POLL, &jme->flags)) {
clear_bit(JME_FLAG_POLL, &jme->flags);
jme->jme_rx = netif_rx;
- jme->jme_vlan_rx = vlan_hwaccel_rx;
dpi->cur = PCC_P1;
dpi->attempt = PCC_P1;
dpi->cnt = 0;
@@ -2415,7 +2404,6 @@ jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
!(test_bit(JME_FLAG_POLL, &jme->flags))) {
set_bit(JME_FLAG_POLL, &jme->flags);
jme->jme_rx = netif_receive_skb;
- jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
jme_interrupt_mode(jme);
}
@@ -2529,8 +2517,7 @@ jme_set_wol(struct net_device *netdev,
jme->reg_pmcs |= PMCS_MFEN;
jwrite32(jme, JME_PMCS, jme->reg_pmcs);
-
- device_set_wakeup_enable(&jme->pdev->dev, jme->reg_pmcs);
+ device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
return 0;
}
@@ -2855,7 +2842,6 @@ static const struct net_device_ops jme_netdev_ops = {
.ndo_set_multicast_list = jme_set_multi,
.ndo_change_mtu = jme_change_mtu,
.ndo_tx_timeout = jme_tx_timeout,
- .ndo_vlan_rx_register = jme_vlan_rx_register,
.ndo_fix_features = jme_fix_features,
.ndo_set_features = jme_set_features,
};
@@ -2938,7 +2924,6 @@ jme_init_one(struct pci_dev *pdev,
jme->pdev = pdev;
jme->dev = netdev;
jme->jme_rx = netif_rx;
- jme->jme_vlan_rx = vlan_hwaccel_rx;
jme->old_mtu = netdev->mtu = 1500;
jme->phylink = 0;
jme->tx_ring_size = 1 << 10;
@@ -3058,6 +3043,9 @@ jme_init_one(struct pci_dev *pdev,
jme->mii_if.mdio_write = jme_mdio_write;
jme_clear_pm(jme);
+ pci_set_power_state(jme->pdev, PCI_D0);
+ device_set_wakeup_enable(&pdev->dev, true);
+
jme_set_phyfifo_5level(jme);
jme->pcirev = pdev->revision;
if (!jme->fpgaver)
@@ -3135,8 +3123,9 @@ jme_shutdown(struct pci_dev *pdev)
pci_pme_active(pdev, true);
}
-#ifdef CONFIG_PM
-static int jme_suspend(struct device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int
+jme_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3175,14 +3164,14 @@ static int jme_suspend(struct device *dev)
return 0;
}
-static int jme_resume(struct device *dev)
+static int
+jme_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct jme_adapter *jme = netdev_priv(netdev);
- jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
-
+ jme_clear_pm(jme);
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
jme_set_settings(netdev, &jme->old_ecmd);
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index e9aaeca96ab..c1f8b893e2e 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -24,6 +24,7 @@
#ifndef __JME_H_INCLUDED__
#define __JME_H_INCLUDED__
+#include <linux/interrupt.h>
#define DRV_NAME "jme"
#define DRV_VERSION "1.0.8"
@@ -450,7 +451,6 @@ struct jme_adapter {
u32 msg_enable;
struct ethtool_cmd old_ecmd;
unsigned int old_mtu;
- struct vlan_group *vlgrp;
struct dynpcc_info dpi;
atomic_t intr_sem;
atomic_t link_changing;
@@ -458,9 +458,6 @@ struct jme_adapter {
atomic_t rx_cleaning;
atomic_t rx_empty;
int (*jme_rx)(struct sk_buff *skb);
- int (*jme_vlan_rx)(struct sk_buff *skb,
- struct vlan_group *grp,
- unsigned short vlan_tag);
DECLARE_NAPI_STRUCT
DECLARE_NET_DEVICE_STATS
};
@@ -851,6 +848,7 @@ enum jme_ghc_txmac_clk {
* Power management control and status register
*/
enum jme_pmcs_bit_masks {
+ PMCS_STMASK = 0xFFFF0000,
PMCS_WF7DET = 0x80000000,
PMCS_WF6DET = 0x40000000,
PMCS_WF5DET = 0x20000000,
@@ -862,6 +860,7 @@ enum jme_pmcs_bit_masks {
PMCS_LFDET = 0x00040000,
PMCS_LRDET = 0x00020000,
PMCS_MFDET = 0x00010000,
+ PMCS_ENMASK = 0x0000FFFF,
PMCS_WF7EN = 0x00008000,
PMCS_WF6EN = 0x00004000,
PMCS_WF5EN = 0x00002000,
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index c7a9bef4dfb..763844c587f 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -504,12 +504,7 @@ static void korina_multicast_list(struct net_device *dev)
hash_table[i] = 0;
netdev_for_each_mc_addr(ha, dev) {
- char *addrs = ha->addr;
-
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc_le(6, ha->addr);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
}
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index fc12ac0d9f2..4a6ae057e3b 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -23,6 +23,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index bcd9ba68c9f..f56743a28fc 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -13,6 +13,7 @@
#define DEBUG
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index 61631cace91..d19c849059d 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -23,6 +23,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
@@ -34,6 +35,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <asm/io.h>
#define DRV_NAME "ks8851_mll"
@@ -1188,8 +1190,6 @@ static void ks_set_rx_mode(struct net_device *netdev)
int i = 0;
netdev_for_each_mc_addr(ha, netdev) {
- if (!(*ha->addr & 1))
- continue;
if (i >= MAX_MCAST_LST)
break;
memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 41ea5920c15..27418d31a09 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ioport.h>
@@ -5784,8 +5785,6 @@ static void netdev_set_rx_mode(struct net_device *dev)
}
netdev_for_each_mc_addr(ha, dev) {
- if (!(*ha->addr & 1))
- continue;
if (i >= MAX_MULTICAST_LIST)
break;
memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 17b75e5f1b0..05ae21435bf 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -36,7 +36,7 @@
Paul Gortmaker : tweak ANK's above multicast changes a bit.
Paul Gortmaker : update packet statistics for v2.1.x
Alan Cox : support arbitrary stupid port mappings on the
- 68K Macintosh. Support >16bit I/O spaces
+ 68K Macintosh. Support >16bit I/O spaces
Paul Gortmaker : add kmod support for auto-loading of the 8390
module by all drivers that require it.
Alan Cox : Spinlocking work, added 'BUG_83C690'
@@ -58,8 +58,8 @@
#include <linux/string.h>
#include <linux/bitops.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -108,7 +108,6 @@ int ei_debug = 1;
/* Index to functions. */
static void ei_tx_intr(struct net_device *dev);
static void ei_tx_err(struct net_device *dev);
-void ei_tx_timeout(struct net_device *dev);
static void ei_receive(struct net_device *dev);
static void ei_rx_overrun(struct net_device *dev);
@@ -206,19 +205,19 @@ static int __ei_open(struct net_device *dev)
struct ei_device *ei_local = netdev_priv(dev);
if (dev->watchdog_timeo <= 0)
- dev->watchdog_timeo = TX_TIMEOUT;
+ dev->watchdog_timeo = TX_TIMEOUT;
/*
* Grab the page lock so we own the register set, then call
* the init function.
*/
- spin_lock_irqsave(&ei_local->page_lock, flags);
+ spin_lock_irqsave(&ei_local->page_lock, flags);
__NS8390_init(dev, 1);
/* Set the flag before we drop the lock, That way the IRQ arrives
after its set and we get no silly warnings */
netif_start_queue(dev);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
ei_local->irqlock = 0;
return 0;
}
@@ -238,9 +237,9 @@ static int __ei_close(struct net_device *dev)
* Hold the page lock during close
*/
- spin_lock_irqsave(&ei_local->page_lock, flags);
+ spin_lock_irqsave(&ei_local->page_lock, flags);
__NS8390_init(dev, 0);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
+ spin_unlock_irqrestore(&ei_local->page_lock, flags);
netif_stop_queue(dev);
return 0;
}
@@ -267,12 +266,12 @@ static void __ei_tx_timeout(struct net_device *dev)
isr = ei_inb(e8390_base+EN0_ISR);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
- printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
- dev->name, (txsr & ENTSR_ABT) ? "excess collisions." :
- (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar);
+ netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
+ (txsr & ENTSR_ABT) ? "excess collisions." :
+ (isr) ? "lost interrupt?" : "cable problem?",
+ txsr, isr, tickssofar);
- if (!isr && !dev->stats.tx_packets)
- {
+ if (!isr && !dev->stats.tx_packets) {
/* The 8390 probably hasn't gotten on the cable yet. */
ei_local->interface_num ^= 1; /* Try a different xcvr. */
}
@@ -344,27 +343,22 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
* card, leaving a substantial gap between each transmitted packet.
*/
- if (ei_local->tx1 == 0)
- {
+ if (ei_local->tx1 == 0) {
output_page = ei_local->tx_start_page;
ei_local->tx1 = send_length;
if (ei_debug && ei_local->tx2 > 0)
- printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n",
- dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing);
- }
- else if (ei_local->tx2 == 0)
- {
+ netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx2, ei_local->lasttx, ei_local->txing);
+ } else if (ei_local->tx2 == 0) {
output_page = ei_local->tx_start_page + TX_PAGES/2;
ei_local->tx2 = send_length;
if (ei_debug && ei_local->tx1 > 0)
- printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n",
- dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing);
- }
- else
- { /* We should never get here. */
+ netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
+ ei_local->tx1, ei_local->lasttx, ei_local->txing);
+ } else { /* We should never get here. */
if (ei_debug)
- printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n",
- dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx);
+ netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
+ ei_local->tx1, ei_local->tx2, ei_local->lasttx);
ei_local->irqlock = 0;
netif_stop_queue(dev);
ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -382,22 +376,18 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
ei_block_output(dev, send_length, data, output_page);
- if (! ei_local->txing)
- {
+ if (!ei_local->txing) {
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, output_page);
- if (output_page == ei_local->tx_start_page)
- {
+ if (output_page == ei_local->tx_start_page) {
ei_local->tx1 = -1;
ei_local->lasttx = -1;
- }
- else
- {
+ } else {
ei_local->tx2 = -1;
ei_local->lasttx = -2;
}
- }
- else ei_local->txqueue++;
+ } else
+ ei_local->txqueue++;
if (ei_local->tx1 && ei_local->tx2)
netif_stop_queue(dev);
@@ -410,8 +400,8 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
spin_unlock(&ei_local->page_lock);
enable_irq_lockdep_irqrestore(dev->irq, &flags);
-
- dev_kfree_skb (skb);
+ skb_tx_timestamp(skb);
+ dev_kfree_skb(skb);
dev->stats.tx_bytes += send_length;
return NETDEV_TX_OK;
@@ -442,15 +432,13 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
spin_lock(&ei_local->page_lock);
- if (ei_local->irqlock)
- {
+ if (ei_local->irqlock) {
/*
* This might just be an interrupt for a PCI device sharing
* this line
*/
- printk("%s: Interrupted while interrupts are masked!"
- " isr=%#2x imr=%#2x.\n",
- dev->name, ei_inb_p(e8390_base + EN0_ISR),
+ netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
+ ei_inb_p(e8390_base + EN0_ISR),
ei_inb_p(e8390_base + EN0_IMR));
spin_unlock(&ei_local->page_lock);
return IRQ_NONE;
@@ -459,15 +447,14 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
/* Change to page 0 and read the intr status reg. */
ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
if (ei_debug > 3)
- printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name,
+ netdev_dbg(dev, "interrupt(isr=%#2.2x)\n",
ei_inb_p(e8390_base + EN0_ISR));
/* !!Assumption!! -- we stay in page 0. Don't break this. */
while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
- ++nr_serviced < MAX_SERVICE)
- {
+ ++nr_serviced < MAX_SERVICE) {
if (!netif_running(dev)) {
- printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name);
+ netdev_warn(dev, "interrupt from stopped card\n");
/* rmk - acknowledge the interrupts */
ei_outb_p(interrupts, e8390_base + EN0_ISR);
interrupts = 0;
@@ -475,8 +462,7 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
}
if (interrupts & ENISR_OVER)
ei_rx_overrun(dev);
- else if (interrupts & (ENISR_RX+ENISR_RX_ERR))
- {
+ else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
/* Got a good (?) packet. */
ei_receive(dev);
}
@@ -486,35 +472,30 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
else if (interrupts & ENISR_TX_ERR)
ei_tx_err(dev);
- if (interrupts & ENISR_COUNTERS)
- {
+ if (interrupts & ENISR_COUNTERS) {
dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1);
- dev->stats.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2);
+ dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
}
/* Ignore any RDC interrupts that make it back to here. */
if (interrupts & ENISR_RDC)
- {
ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
- }
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
}
- if (interrupts && ei_debug)
- {
+ if (interrupts && ei_debug) {
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
- if (nr_serviced >= MAX_SERVICE)
- {
+ if (nr_serviced >= MAX_SERVICE) {
/* 0xFF is valid for a card removal */
- if(interrupts!=0xFF)
- printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n",
- dev->name, interrupts);
+ if (interrupts != 0xFF)
+ netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
+ interrupts);
ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
} else {
- printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts);
+ netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
}
}
@@ -554,30 +535,32 @@ static void ei_tx_err(struct net_device *dev)
unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
#ifdef VERBOSE_ERROR_DUMP
- printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr);
+ netdev_dbg(dev, "transmitter error (%#2x):", txsr);
if (txsr & ENTSR_ABT)
- printk("excess-collisions ");
+ pr_cont(" excess-collisions ");
if (txsr & ENTSR_ND)
- printk("non-deferral ");
+ pr_cont(" non-deferral ");
if (txsr & ENTSR_CRS)
- printk("lost-carrier ");
+ pr_cont(" lost-carrier ");
if (txsr & ENTSR_FU)
- printk("FIFO-underrun ");
+ pr_cont(" FIFO-underrun ");
if (txsr & ENTSR_CDH)
- printk("lost-heartbeat ");
- printk("\n");
+ pr_cont(" lost-heartbeat ");
+ pr_cont("\n");
#endif
ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
if (tx_was_aborted)
ei_tx_intr(dev);
- else
- {
+ else {
dev->stats.tx_errors++;
- if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++;
- if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++;
- if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++;
+ if (txsr & ENTSR_CRS)
+ dev->stats.tx_carrier_errors++;
+ if (txsr & ENTSR_CDH)
+ dev->stats.tx_heartbeat_errors++;
+ if (txsr & ENTSR_OWC)
+ dev->stats.tx_window_errors++;
}
}
@@ -603,52 +586,45 @@ static void ei_tx_intr(struct net_device *dev)
*/
ei_local->txqueue--;
- if (ei_local->tx1 < 0)
- {
+ if (ei_local->tx1 < 0) {
if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
- printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n",
- ei_local->name, ei_local->lasttx, ei_local->tx1);
+ pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx1);
ei_local->tx1 = 0;
- if (ei_local->tx2 > 0)
- {
+ if (ei_local->tx2 > 0) {
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
dev->trans_start = jiffies;
ei_local->tx2 = -1,
ei_local->lasttx = 2;
- }
- else ei_local->lasttx = 20, ei_local->txing = 0;
- }
- else if (ei_local->tx2 < 0)
- {
+ } else
+ ei_local->lasttx = 20, ei_local->txing = 0;
+ } else if (ei_local->tx2 < 0) {
if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
- printk("%s: bogus last_tx_buffer %d, tx2=%d.\n",
- ei_local->name, ei_local->lasttx, ei_local->tx2);
+ pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
+ ei_local->name, ei_local->lasttx, ei_local->tx2);
ei_local->tx2 = 0;
- if (ei_local->tx1 > 0)
- {
+ if (ei_local->tx1 > 0) {
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
dev->trans_start = jiffies;
ei_local->tx1 = -1;
ei_local->lasttx = 1;
- }
- else
+ } else
ei_local->lasttx = 10, ei_local->txing = 0;
- }
-// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n",
-// dev->name, ei_local->lasttx);
+ } /* else
+ netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
+ ei_local->lasttx);
+*/
/* Minimize Tx latency: update the statistics after we restart TXing. */
if (status & ENTSR_COL)
dev->stats.collisions++;
if (status & ENTSR_PTX)
dev->stats.tx_packets++;
- else
- {
+ else {
dev->stats.tx_errors++;
- if (status & ENTSR_ABT)
- {
+ if (status & ENTSR_ABT) {
dev->stats.tx_aborted_errors++;
dev->stats.collisions += 16;
}
@@ -682,8 +658,7 @@ static void ei_receive(struct net_device *dev)
struct e8390_pkt_hdr rx_frame;
int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
- while (++rx_pkt_count < 10)
- {
+ while (++rx_pkt_count < 10) {
int pkt_len, pkt_stat;
/* Get the rx page (incoming packet pointer). */
@@ -702,9 +677,11 @@ static void ei_receive(struct net_device *dev)
Keep quiet if it looks like a card removal. One problem here
is that some clones crash in roughly the same way.
*/
- if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
- printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n",
- dev->name, this_frame, ei_local->current_page);
+ if (ei_debug > 0 &&
+ this_frame != ei_local->current_page &&
+ (this_frame != 0x0 || rxing_page != 0xFF))
+ netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
+ this_frame, ei_local->current_page);
if (this_frame == rxing_page) /* Read all the frames? */
break; /* Done for now */
@@ -730,46 +707,39 @@ static void ei_receive(struct net_device *dev)
continue;
}
- if (pkt_len < 60 || pkt_len > 1518)
- {
+ if (pkt_len < 60 || pkt_len > 1518) {
if (ei_debug)
- printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n",
- dev->name, rx_frame.count, rx_frame.status,
+ netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
+ rx_frame.count, rx_frame.status,
rx_frame.next);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
- }
- else if ((pkt_stat & 0x0F) == ENRSR_RXOK)
- {
+ } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
struct sk_buff *skb;
skb = dev_alloc_skb(pkt_len+2);
- if (skb == NULL)
- {
+ if (skb == NULL) {
if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n",
- dev->name, pkt_len);
+ netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n",
+ pkt_len);
dev->stats.rx_dropped++;
break;
- }
- else
- {
- skb_reserve(skb,2); /* IP headers on 16 byte boundaries */
+ } else {
+ skb_reserve(skb, 2); /* IP headers on 16 byte boundaries */
skb_put(skb, pkt_len); /* Make room */
ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
+ skb->protocol = eth_type_trans(skb, dev);
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
if (pkt_stat & ENRSR_PHY)
dev->stats.multicast++;
}
- }
- else
- {
+ } else {
if (ei_debug)
- printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n",
- dev->name, rx_frame.status, rx_frame.next,
+ netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
+ rx_frame.status, rx_frame.next,
rx_frame.count);
dev->stats.rx_errors++;
/* NB: The NIC counts CRC, frame and missed errors. */
@@ -780,8 +750,8 @@ static void ei_receive(struct net_device *dev)
/* This _should_ never happen: it's here for avoiding bad clones. */
if (next_frame >= ei_local->stop_page) {
- printk("%s: next frame inconsistency, %#2x\n", dev->name,
- next_frame);
+ netdev_notice(dev, "next frame inconsistency, %#2x\n",
+ next_frame);
next_frame = ei_local->rx_start_page;
}
ei_local->current_page = next_frame;
@@ -821,7 +791,7 @@ static void ei_rx_overrun(struct net_device *dev)
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name);
+ netdev_dbg(dev, "Receiver overrun\n");
dev->stats.rx_over_errors++;
/*
@@ -844,8 +814,7 @@ static void ei_rx_overrun(struct net_device *dev)
* step is vital, and skipping it will cause no end of havoc.
*/
- if (was_txing)
- {
+ if (was_txing) {
unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
if (!tx_completed)
must_resend = 1;
@@ -869,7 +838,7 @@ static void ei_rx_overrun(struct net_device *dev)
*/
ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
if (must_resend)
- ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
+ ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
}
/*
@@ -886,11 +855,11 @@ static struct net_device_stats *__ei_get_stats(struct net_device *dev)
if (!netif_running(dev))
return &dev->stats;
- spin_lock_irqsave(&ei_local->page_lock,flags);
+ spin_lock_irqsave(&ei_local->page_lock, flags);
/* Read the counter registers, assuming we are in page 0. */
- dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
- dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
- dev->stats.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2);
+ dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0);
+ dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1);
+ dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
spin_unlock_irqrestore(&ei_local->page_lock, flags);
return &dev->stats;
@@ -929,13 +898,11 @@ static void do_set_multicast_list(struct net_device *dev)
int i;
struct ei_device *ei_local = netdev_priv(dev);
- if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
- {
+ if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
memset(ei_local->mcfilter, 0, 8);
if (!netdev_mc_empty(dev))
make_mc_bits(ei_local->mcfilter, dev);
- }
- else
+ } else
memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */
/*
@@ -954,23 +921,23 @@ static void do_set_multicast_list(struct net_device *dev)
if (netif_running(dev))
ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
- for(i = 0; i < 8; i++)
- {
+ for (i = 0; i < 8; i++) {
ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
#ifndef BUG_83C690
- if(ei_inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i])
- printk(KERN_ERR "Multicast filter read/write mismap %d\n",i);
+ if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
+ netdev_err(dev, "Multicast filter read/write mismap %d\n",
+ i);
#endif
}
ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
- if(dev->flags&IFF_PROMISC)
- ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
+ if (dev->flags&IFF_PROMISC)
+ ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
- ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
- else
- ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
- }
+ ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
+ else
+ ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
+}
/*
* Called without lock held. This is invoked from user context and may
@@ -1042,8 +1009,8 @@ static void __NS8390_init(struct net_device *dev, int startp)
? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
: 0x48;
- if(sizeof(struct e8390_pkt_hdr)!=4)
- panic("8390.c: header struct mispacked\n");
+ if (sizeof(struct e8390_pkt_hdr) != 4)
+ panic("8390.c: header struct mispacked\n");
/* Follow National Semi's recommendations for initing the DP83902. */
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
@@ -1067,11 +1034,11 @@ static void __NS8390_init(struct net_device *dev, int startp)
/* Copy the station address into the DS8390 registers. */
ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
- for(i = 0; i < 6; i++)
- {
+ for (i = 0; i < 6; i++) {
ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
- if (ei_debug > 1 && ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
- printk(KERN_ERR "Hw. address read/write mismap %d\n",i);
+ if (ei_debug > 1 &&
+ ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
+ netdev_err(dev, "Hw. address read/write mismap %d\n", i);
}
ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
@@ -1080,8 +1047,7 @@ static void __NS8390_init(struct net_device *dev, int startp)
ei_local->tx1 = ei_local->tx2 = 0;
ei_local->txing = 0;
- if (startp)
- {
+ if (startp) {
ei_outb_p(0xff, e8390_base + EN0_ISR);
ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
@@ -1099,14 +1065,12 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
int start_page)
{
unsigned long e8390_base = dev->base_addr;
- struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
+ struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
- if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
- {
- printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
- dev->name);
+ if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
+ netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
return;
}
ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index b7948ccfcf7..728fe414147 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -48,6 +48,8 @@
#include <linux/io.h>
#include <linux/ip.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
#include "ll_temac.h"
@@ -727,6 +729,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (lp->tx_bd_tail >= TX_BD_NUM)
lp->tx_bd_tail = 0;
+ skb_tx_timestamp(skb);
+
/* Kick off the transfer */
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
@@ -772,7 +776,8 @@ static void ll_temac_recv(struct net_device *ndev)
skb->ip_summed = CHECKSUM_COMPLETE;
}
- netif_rx(skb);
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length;
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 8a1097cf8a8..f9888d20177 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -41,6 +41,7 @@ static const char *version =
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 6c6a02869df..dc4e305a108 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
@@ -321,6 +322,9 @@ static void macb_tx(struct macb *bp)
for (i = 0; i < TX_RING_SIZE; i++)
bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ /* Add wrap bit */
+ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
/* free transmit buffer in upper layer*/
for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
struct ring_info *rp = &bp->tx_skb[tail];
@@ -669,6 +673,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = NEXT_TX(entry);
bp->tx_head = entry;
+ skb_tx_timestamp(skb);
+
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
if (TX_BUFFS_AVAIL(bp) < 1)
@@ -1169,7 +1175,7 @@ static int __init macb_probe(struct platform_device *pdev)
clk_enable(bp->hclk);
#endif
- bp->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ bp->regs = ioremap(regs->start, resource_size(regs));
if (!bp->regs) {
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
err = -ENOMEM;
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 1c5221f79d6..2074e9724ba 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -13,6 +13,7 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/crc32.h>
#include <linux/spinlock.h>
#include <linux/bitrev.h>
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index c685a465687..4286e67f963 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -221,7 +221,7 @@ static int __devinit mace_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev->base_addr = (u32)MACE_BASE;
- mp->mace = (volatile struct mace *) MACE_BASE;
+ mp->mace = MACE_BASE;
dev->irq = IRQ_MAC_MACE;
mp->dma_intr = IRQ_MAC_MACE_DMA;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d6aeaa5f25e..05172c39a0c 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -414,7 +414,8 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM)
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
+ NETIF_F_HW_VLAN_FILTER)
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -509,6 +510,28 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
return stats;
}
+static void macvlan_vlan_rx_add_vid(struct net_device *dev,
+ unsigned short vid)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *lowerdev = vlan->lowerdev;
+ const struct net_device_ops *ops = lowerdev->netdev_ops;
+
+ if (ops->ndo_vlan_rx_add_vid)
+ ops->ndo_vlan_rx_add_vid(lowerdev, vid);
+}
+
+static void macvlan_vlan_rx_kill_vid(struct net_device *dev,
+ unsigned short vid)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct net_device *lowerdev = vlan->lowerdev;
+ const struct net_device_ops *ops = lowerdev->netdev_ops;
+
+ if (ops->ndo_vlan_rx_kill_vid)
+ ops->ndo_vlan_rx_kill_vid(lowerdev, vid);
+}
+
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
@@ -541,13 +564,15 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_set_multicast_list = macvlan_set_multicast_list,
.ndo_get_stats64 = macvlan_dev_get_stats64,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid,
};
void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->netdev_ops = &macvlan_netdev_ops;
dev->destructor = free_netdev;
dev->header_ops = &macvlan_hard_header_ops,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 6696e56e632..ab96c319a24 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -60,6 +60,7 @@ static struct proto macvtap_proto = {
*/
static dev_t macvtap_major;
#define MACVTAP_NUM_DEVS 65536
+#define GOODCOPY_LEN 128
static struct class *macvtap_class;
static struct cdev macvtap_cdev;
@@ -340,6 +341,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
{
struct net *net = current->nsproxy->net_ns;
struct net_device *dev = dev_get_by_index(net, iminor(inode));
+ struct macvlan_dev *vlan = netdev_priv(dev);
struct macvtap_queue *q;
int err;
@@ -369,6 +371,16 @@ static int macvtap_open(struct inode *inode, struct file *file)
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
+ /*
+ * so far only KVM virtio_net uses macvtap, enable zero copy between
+ * guest kernel and host kernel when lower device supports zerocopy
+ */
+ if (vlan) {
+ if ((vlan->lowerdev->features & NETIF_F_HIGHDMA) &&
+ (vlan->lowerdev->features & NETIF_F_SG))
+ sock_set_flag(&q->sk, SOCK_ZEROCOPY);
+ }
+
err = macvtap_set_queue(dev, file, q);
if (err)
sock_put(&q->sk);
@@ -433,6 +445,80 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
return skb;
}
+/* set skb frags from iovec, this can move to core network code for reuse */
+static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ int offset, size_t count)
+{
+ int len = iov_length(from, count) - offset;
+ int copy = skb_headlen(skb);
+ int size, offset1 = 0;
+ int i = 0;
+ skb_frag_t *f;
+
+ /* Skip over from offset */
+ while (count && (offset >= from->iov_len)) {
+ offset -= from->iov_len;
+ ++from;
+ --count;
+ }
+
+ /* copy up to skb headlen */
+ while (count && (copy > 0)) {
+ size = min_t(unsigned int, copy, from->iov_len - offset);
+ if (copy_from_user(skb->data + offset1, from->iov_base + offset,
+ size))
+ return -EFAULT;
+ if (copy > size) {
+ ++from;
+ --count;
+ }
+ copy -= size;
+ offset1 += size;
+ offset = 0;
+ }
+
+ if (len == offset1)
+ return 0;
+
+ while (count--) {
+ struct page *page[MAX_SKB_FRAGS];
+ int num_pages;
+ unsigned long base;
+
+ len = from->iov_len - offset1;
+ if (!len) {
+ offset1 = 0;
+ ++from;
+ continue;
+ }
+ base = (unsigned long)from->iov_base + offset1;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+ if ((num_pages != size) ||
+ (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
+ /* put_page is in skb free */
+ return -EFAULT;
+ skb->data_len += len;
+ skb->len += len;
+ skb->truesize += len;
+ atomic_add(len, &skb->sk->sk_wmem_alloc);
+ while (len) {
+ f = &skb_shinfo(skb)->frags[i];
+ f->page = page[i];
+ f->page_offset = base & ~PAGE_MASK;
+ f->size = min_t(int, len, PAGE_SIZE - f->page_offset);
+ skb_shinfo(skb)->nr_frags++;
+ /* increase sk_wmem_alloc */
+ base += f->size;
+ len -= f->size;
+ i++;
+ }
+ offset1 = 0;
+ ++from;
+ }
+ return 0;
+}
+
/*
* macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
* be shared with the tun/tap driver.
@@ -508,6 +594,8 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
vnet_hdr->csum_start = skb_checksum_start_offset(skb);
vnet_hdr->csum_offset = skb->csum_offset;
+ } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
return 0;
@@ -515,16 +603,18 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
/* Get packet from user space buffer */
-static ssize_t macvtap_get_user(struct macvtap_queue *q,
- const struct iovec *iv, size_t count,
- int noblock)
+static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ const struct iovec *iv, unsigned long total_len,
+ size_t count, int noblock)
{
struct sk_buff *skb;
struct macvlan_dev *vlan;
- size_t len = count;
+ unsigned long len = total_len;
int err;
struct virtio_net_hdr vnet_hdr = { 0 };
int vnet_hdr_len = 0;
+ int copylen;
+ bool zerocopy = false;
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = q->vnet_hdr_sz;
@@ -552,12 +642,31 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
if (unlikely(len < ETH_HLEN))
goto err;
- skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, len, vnet_hdr.hdr_len,
- noblock, &err);
+ if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
+ zerocopy = true;
+
+ if (zerocopy) {
+ /* There are 256 bytes to be copied in skb, so there is enough
+ * room for skb expand head in case it is used.
+ * The rest buffer is mapped from userspace.
+ */
+ copylen = vnet_hdr.hdr_len;
+ if (!copylen)
+ copylen = GOODCOPY_LEN;
+ } else
+ copylen = len;
+
+ skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
+ vnet_hdr.hdr_len, noblock, &err);
if (!skb)
goto err;
- err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, len);
+ if (zerocopy) {
+ err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ } else
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
+ len);
if (err)
goto err_kfree;
@@ -573,13 +682,16 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
rcu_read_lock_bh();
vlan = rcu_dereference_bh(q->vlan);
+ /* copy skb_ubuf_info for callback when skb has no error */
+ if (zerocopy)
+ skb_shinfo(skb)->destructor_arg = m->msg_control;
if (vlan)
macvlan_start_xmit(skb, vlan->dev);
else
kfree_skb(skb);
rcu_read_unlock_bh();
- return count;
+ return total_len;
err_kfree:
kfree_skb(skb);
@@ -601,8 +713,8 @@ static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
ssize_t result = -ENOLINK;
struct macvtap_queue *q = file->private_data;
- result = macvtap_get_user(q, iv, iov_length(iv, count),
- file->f_flags & O_NONBLOCK);
+ result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
+ file->f_flags & O_NONBLOCK);
return result;
}
@@ -815,7 +927,7 @@ static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
- return macvtap_get_user(q, m->msg_iov, total_len,
+ return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
m->msg_flags & MSG_DONTWAIT);
}
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 869f0ea43a5..004e64ab1f9 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -5,6 +5,7 @@
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 2e858e4dcf4..eb096253d78 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -104,7 +104,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
int err = 0;
u64 config = 0;
- if (!priv->mdev->dev->caps.wol) {
+ if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
wol->supported = 0;
wol->wolopts = 0;
return;
@@ -134,7 +134,7 @@ static int mlx4_en_set_wol(struct net_device *netdev,
u64 config = 0;
int err = 0;
- if (!priv->mdev->dev->caps.wol)
+ if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
return -EOPNOTSUPP;
if (wol->supported & ~WAKE_MAGIC)
@@ -170,7 +170,8 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
return NUM_ALL_STATS +
(priv->tx_ring_num + priv->rx_ring_num) * 2;
case ETH_SS_TEST:
- return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.loopback_support) * 2;
+ return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
+ & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
default:
return -EOPNOTSUPP;
}
@@ -220,7 +221,7 @@ static void mlx4_en_get_strings(struct net_device *dev,
case ETH_SS_TEST:
for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
- if (priv->mdev->dev->caps.loopback_support)
+ if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
for (; i < MLX4_EN_NUM_SELF_TEST; i++)
strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
break;
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 9276b1b2558..6bfea233a9f 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -106,7 +106,8 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->tcp_rss = tcp_rss;
params->udp_rss = udp_rss;
- if (params->udp_rss && !mdev->dev->caps.udp_rss) {
+ if (params->udp_rss && !(mdev->dev->caps.flags
+ & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
params->udp_rss = 0;
}
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 61850adae6f..4b0f32e568f 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -45,25 +45,6 @@
#include "mlx4_en.h"
#include "en_port.h"
-
-static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
- int err;
-
- en_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
- priv->vlgrp = grp;
-
- mutex_lock(&mdev->state_lock);
- if (mdev->device_up && priv->port_up) {
- err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
- if (err)
- en_err(priv, "Failed configuring VLAN filter\n");
- }
- mutex_unlock(&mdev->state_lock);
-}
-
static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -71,16 +52,14 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
int err;
int idx;
- if (!priv->vlgrp)
- return;
+ en_dbg(HW, priv, "adding VLAN:%d\n", vid);
- en_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
- vid, vlan_group_get_device(priv->vlgrp, vid));
+ set_bit(vid, priv->active_vlans);
/* Add VID to port VLAN filter */
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
- err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
en_err(priv, "Failed configuring VLAN filter\n");
}
@@ -97,12 +76,9 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
int err;
int idx;
- if (!priv->vlgrp)
- return;
+ en_dbg(HW, priv, "Killing VID:%d\n", vid);
- en_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp entry:%p)\n",
- vid, priv->vlgrp, vlan_group_get_device(priv->vlgrp, vid));
- vlan_group_set_device(priv->vlgrp, vid, NULL);
+ clear_bit(vid, priv->active_vlans);
/* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock);
@@ -112,7 +88,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
en_err(priv, "could not find vid %d in cache\n", vid);
if (mdev->device_up && priv->port_up) {
- err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
en_err(priv, "Failed configuring VLAN filter\n");
}
@@ -239,7 +215,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags |= MLX4_EN_FLAG_PROMISC;
/* Enable promiscouos mode */
- if (!mdev->dev->caps.vep_uc_steering)
+ if (!(mdev->dev->caps.flags &
+ MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
priv->base_qpn, 1);
else
@@ -265,12 +242,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
- if (priv->vlgrp) {
- /* Disable port VLAN filter */
- err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
- if (err)
- en_err(priv, "Failed disabling VLAN filter\n");
- }
+ /* Disable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+ if (err)
+ en_err(priv, "Failed disabling VLAN filter\n");
}
goto out;
}
@@ -285,7 +260,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
- if (!mdev->dev->caps.vep_uc_steering)
+ if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
priv->base_qpn, 0);
else
@@ -304,7 +279,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
}
/* Enable port VLAN filter */
- err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
en_err(priv, "Failed enabling VLAN filter\n");
}
@@ -1046,7 +1021,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
.ndo_tx_timeout = mlx4_en_tx_timeout,
- .ndo_vlan_rx_register = mlx4_en_vlan_rx_register,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index f2a4f5dd313..5ada5b46911 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -48,7 +48,7 @@ int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
}
-int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_vlan_fltr_mbox *filter;
@@ -63,20 +63,15 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
return PTR_ERR(mailbox);
filter = mailbox->buf;
- if (grp) {
- memset(filter, 0, sizeof *filter);
- for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
- entry = 0;
- for (j = 0; j < 32; j++)
- if (vlan_group_get_device(grp, index++))
- entry |= 1 << j;
- filter->entry[i] = cpu_to_be32(entry);
- }
- } else {
- /* When no vlans are configured we block all vlans */
- memset(filter, 0, sizeof(*filter));
+ memset(filter, 0, sizeof(*filter));
+ for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
+ entry = 0;
+ for (j = 0; j < 32; j++)
+ if (test_bit(index++, priv->active_vlans))
+ entry |= 1 << j;
+ filter->entry[i] = cpu_to_be32(entry);
}
- err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
+ err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
@@ -119,9 +114,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
struct mlx4_set_port_rqp_calc_context *context;
int err;
u32 in_mod;
- u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT;
+ u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+ MCAST_DIRECT : MCAST_DEFAULT;
- if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering)
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -131,7 +128,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
- context->n_mac = 0x7;
+ context->n_mac = 0x2;
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
base_qpn);
context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 277215fb9d7..37cc9e5c56b 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -611,11 +611,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->truesize += length;
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (priv->vlgrp && (cqe->vlan_my_qpn &
- cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)))
- vlan_gro_frags(&cq->napi, priv->vlgrp, be16_to_cpu(cqe->sl_vid));
- else
- napi_gro_frags(&cq->napi);
+ if (cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
+ u16 vid = be16_to_cpu(cqe->sl_vid);
+
+ __vlan_hwaccel_put_tag(gro_skb, vid);
+ }
+
+ napi_gro_frags(&cq->napi);
goto next;
}
@@ -647,13 +650,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
+ if (be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_VLAN_PRESENT_MASK)
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
+
/* Push it up the stack */
- if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
- MLX4_CQE_VLAN_PRESENT_MASK)) {
- vlan_hwaccel_receive_skb(skb, priv->vlgrp,
- be16_to_cpu(cqe->sl_vid));
- } else
- netif_receive_skb(skb);
+ netif_receive_skb(skb);
next:
++cq->mcq.cons_index;
@@ -859,7 +861,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
priv->rx_ring[0].cqn, &context);
ptr = ((void *) &context) + 0x3c;
- rss_context = (struct mlx4_en_rss_context *) ptr;
+ rss_context = ptr;
rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
index 191a8dcd8a9..9fdbcecd499 100644
--- a/drivers/net/mlx4/en_selftest.c
+++ b/drivers/net/mlx4/en_selftest.c
@@ -159,7 +159,8 @@ retry_tx:
goto retry_tx;
}
- if (priv->mdev->dev->caps.loopback_support){
+ if (priv->mdev->dev->caps.flags &
+ MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
buf[3] = mlx4_en_test_registers(priv);
buf[4] = mlx4_en_test_loopback(priv);
}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index b229acf1855..6e03de034ac 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -238,8 +238,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
} else {
if (!tx_info->inl) {
if ((void *) data >= end) {
- data = (struct mlx4_wqe_data_seg *)
- (ring->buf + ((void *) data - end));
+ data = ring->buf + ((void *)data - end);
}
if (tx_info->linear) {
@@ -253,7 +252,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
for (i = 0; i < frags; i++) {
/* Check for wraparound before unmapping */
if ((void *) data >= end)
- data = (struct mlx4_wqe_data_seg *) ring->buf;
+ data = ring->buf;
frag = &skb_shinfo(skb)->frags[i];
pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data->addr),
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 67a209ba939..7eb8ba822e9 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -75,7 +75,7 @@ MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (defa
} \
} while (0)
-static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
+static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
{
static const char *fname[] = {
[ 0] = "RC transport",
@@ -99,13 +99,19 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
[21] = "UD multicast support",
[24] = "Demand paging support",
[25] = "Router support",
- [30] = "IBoE support"
+ [30] = "IBoE support",
+ [32] = "Unicast loopback support",
+ [38] = "Wake On LAN support",
+ [40] = "UDP RSS support",
+ [41] = "Unicast VEP steering support",
+ [42] = "Multicast VEP steering support",
+ [48] = "Counters support",
};
int i;
mlx4_dbg(dev, "DEV_CAP flags:\n");
for (i = 0; i < ARRAY_SIZE(fname); ++i)
- if (fname[i] && (flags & (1 << i)))
+ if (fname[i] && (flags & (1LL << i)))
mlx4_dbg(dev, " %s\n", fname[i]);
}
@@ -142,7 +148,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u8 field;
- u32 field32;
+ u32 field32, flags, ext_flags;
u16 size;
u16 stat_rate;
int err;
@@ -180,8 +186,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
-#define QUERY_DEV_CAP_UDP_RSS_OFFSET 0x42
-#define QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET 0x43
+#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -199,6 +204,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
+#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -272,14 +278,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_msg_sz = 1 << (field & 0x1f);
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
- MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
- dev_cap->udp_rss = field & 0x1;
- dev_cap->vep_uc_steering = field & 0x2;
- dev_cap->vep_mc_steering = field & 0x4;
- MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
- dev_cap->loopback_support = field & 0x1;
- dev_cap->wol = field & 0x40;
- MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
+ MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
+ MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
+ dev_cap->flags = flags | (u64)ext_flags << 32;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
dev_cap->reserved_uars = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
@@ -356,6 +357,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
+ if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
+ MLX4_GET(dev_cap->max_counters, outbox,
+ QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
for (i = 1; i <= dev_cap->num_ports; ++i) {
@@ -449,6 +453,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
+ mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
dump_dev_cap_flags(dev, dev_cap->flags);
@@ -781,6 +786,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if (enable_qos)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
+ /* enable counters */
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
+
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@@ -801,7 +810,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- if (dev->caps.vep_mc_steering)
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 88003ebc618..1e8ecc3708e 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -78,12 +78,7 @@ struct mlx4_dev_cap {
u16 wavelength[MLX4_MAX_PORTS + 1];
u64 trans_code[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
- int udp_rss;
- int loopback_support;
- int vep_uc_steering;
- int vep_mc_steering;
- int wol;
- u32 flags;
+ u64 flags;
int reserved_uars;
int uar_size;
int min_page_sz;
@@ -116,6 +111,7 @@ struct mlx4_dev_cap {
u8 supported_port_types[MLX4_MAX_PORTS + 1];
u8 log_max_macs[MLX4_MAX_PORTS + 1];
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
+ u32 max_counters;
};
struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 3814fc9b114..f0ee35df4dd 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -143,6 +143,7 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
dev->caps.port_mask |= 1 << (i - 1);
}
+
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
int err;
@@ -226,11 +227,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.bmme_flags = dev_cap->bmme_flags;
dev->caps.reserved_lkey = dev_cap->reserved_lkey;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
- dev->caps.udp_rss = dev_cap->udp_rss;
- dev->caps.loopback_support = dev_cap->loopback_support;
- dev->caps.vep_uc_steering = dev_cap->vep_uc_steering;
- dev->caps.vep_mc_steering = dev_cap->vep_mc_steering;
- dev->caps.wol = dev_cap->wol;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.log_num_macs = log_num_mac;
@@ -262,6 +258,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_set_port_mask(dev);
+ dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
+
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
@@ -839,6 +837,45 @@ err_stop_fw:
return err;
}
+static int mlx4_init_counters_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int nent;
+
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return -ENOENT;
+
+ nent = dev->caps.max_counters;
+ return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
+}
+
+static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
+}
+
+int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return -ENOENT;
+
+ *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
+ if (*idx == -1)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
+
+void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
+{
+ mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
+ return;
+}
+EXPORT_SYMBOL_GPL(mlx4_counter_free);
+
static int mlx4_setup_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -943,6 +980,12 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_qp_table_free;
}
+ err = mlx4_init_counters_table(dev);
+ if (err && err != -ENOENT) {
+ mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
+ goto err_counters_table_free;
+ }
+
for (port = 1; port <= dev->caps.num_ports; port++) {
enum mlx4_port_type port_type = 0;
mlx4_SENSE_PORT(dev, port, &port_type);
@@ -969,6 +1012,9 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err_mcg_table_free:
mlx4_cleanup_mcg_table(dev);
+err_counters_table_free:
+ mlx4_cleanup_counters_table(dev);
+
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -1071,6 +1117,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->port = port;
mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table);
+ info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ (port - 1) * (1 << log_num_mac);
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
@@ -1230,11 +1278,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id);
-
INIT_LIST_HEAD(&priv->bf_list);
mutex_init(&priv->bf_mutex);
+ dev->rev_id = pdev->revision;
+
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
@@ -1299,6 +1347,7 @@ err_port:
for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
+ mlx4_cleanup_counters_table(dev);
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
@@ -1359,6 +1408,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_CLOSE_PORT(dev, p);
}
+ mlx4_cleanup_counters_table(dev);
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index e63c37d6a11..cd1784593a3 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -559,7 +559,8 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_mgm *mgm = mgm_mailbox->buf;
u8 *mgid;
int err;
- u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0;
+ u8 op_mod = (prot == MLX4_PROT_ETH) ?
+ !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -834,7 +835,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
- if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
if (prot == MLX4_PROT_ETH)
@@ -853,7 +855,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
- if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
if (prot == MLX4_PROT_ETH) {
@@ -867,7 +870,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!dev->caps.vep_mc_steering)
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
@@ -877,7 +880,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!dev->caps.vep_mc_steering)
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
@@ -887,7 +890,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!dev->caps.vep_mc_steering)
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
@@ -897,7 +900,7 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!dev->caps.vep_mc_steering)
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index dd7d745fbab..a2fcd8402d3 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -48,8 +48,8 @@
#include <linux/mlx4/doorbell.h>
#define DRV_NAME "mlx4_core"
-#define DRV_VERSION "0.01"
-#define DRV_RELDATE "May 1, 2007"
+#define DRV_VERSION "1.0"
+#define DRV_RELDATE "July 14, 2011"
enum {
MLX4_HCR_BASE = 0x80680,
@@ -342,6 +342,7 @@ struct mlx4_priv {
struct mlx4_srq_table srq_table;
struct mlx4_qp_table qp_table;
struct mlx4_mcg_table mcg_table;
+ struct mlx4_bitmap counters_bitmap;
struct mlx4_catas_err catas_err;
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 0b5150df058..ed84811766e 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -34,10 +34,12 @@
#ifndef _MLX4_EN_H_
#define _MLX4_EN_H_
+#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -418,7 +420,7 @@ struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
struct net_device *dev;
- struct vlan_group *vlgrp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device_stats stats;
struct net_device_stats ret_stats;
struct mlx4_en_port_state port_state;
@@ -553,7 +555,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
-int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 8856659fb43..609e0ec14ce 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -146,7 +146,7 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
int i, err = 0;
int free = -1;
- if (dev->caps.vep_uc_steering) {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
if (!err) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
@@ -203,7 +203,7 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
goto out;
}
- if (!dev->caps.vep_uc_steering)
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
*qpn = info->base_qpn + free;
++table->total;
out:
@@ -243,7 +243,7 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
int index = qpn - info->base_qpn;
struct mlx4_mac_entry *entry;
- if (dev->caps.vep_uc_steering) {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (entry) {
mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
@@ -258,9 +258,12 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
if (validate_index(dev, table, index))
goto out;
- table->entries[index] = 0;
- mlx4_set_port_mac_table(dev, port, table->entries);
- --table->total;
+ /* Check whether this address has reference count */
+ if (!(--table->refs[index])) {
+ table->entries[index] = 0;
+ mlx4_set_port_mac_table(dev, port, table->entries);
+ --table->total;
+ }
out:
mutex_unlock(&table->mutex);
}
@@ -274,7 +277,7 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
struct mlx4_mac_entry *entry;
int err;
- if (dev->caps.vep_uc_steering) {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
index e5741dab382..11e7c1cb99b 100644
--- a/drivers/net/mlx4/reset.c
+++ b/drivers/net/mlx4/reset.c
@@ -77,7 +77,7 @@ int mlx4_reset(struct mlx4_dev *dev)
goto out;
}
- pcie_cap = pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
+ pcie_cap = pci_pcie_cap(dev->pdev);
for (i = 0; i < 64; ++i) {
if (i == 22 || i == 23)
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a5d9b1c310b..259699983ca 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -840,6 +840,8 @@ no_csum:
__skb_queue_tail(&txq->tx_skb, skb);
+ skb_tx_timestamp(skb);
+
/* ensure all other descriptors are written before first cmd_sts */
wmb();
desc->cmd_sts = cmd_sts;
@@ -859,7 +861,7 @@ no_csum:
static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- int queue;
+ int length, queue;
struct tx_queue *txq;
struct netdev_queue *nq;
@@ -881,10 +883,12 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+ length = skb->len;
+
if (!txq_submit_skb(txq, skb)) {
int entries_left;
- txq->tx_bytes += skb->len;
+ txq->tx_bytes += length;
txq->tx_packets++;
entries_left = txq->tx_ring_size - txq->tx_desc_count;
@@ -2593,7 +2597,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
if (msp == NULL)
goto out;
- msp->base = ioremap(res->start, res->end - res->start + 1);
+ msp->base = ioremap(res->start, resource_size(res));
if (msp->base == NULL)
goto out_free;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index bf84849600c..1d2247554a3 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1,7 +1,7 @@
/*************************************************************************
* myri10ge.c: Myricom Myri-10G Ethernet driver.
*
- * Copyright (C) 2005 - 2009 Myricom, Inc.
+ * Copyright (C) 2005 - 2011 Myricom, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -79,7 +79,7 @@
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
-#define MYRI10GE_VERSION_STR "1.5.2-1.459"
+#define MYRI10GE_VERSION_STR "1.5.3-1.534"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -193,6 +193,7 @@ struct myri10ge_slice_state {
int watchdog_tx_done;
int watchdog_tx_req;
int watchdog_rx_done;
+ int stuck;
#ifdef CONFIG_MYRI10GE_DCA
int cached_dca_tag;
int cpu;
@@ -210,7 +211,6 @@ struct myri10ge_priv {
int big_bytes;
int max_intr_slots;
struct net_device *dev;
- spinlock_t stats_lock;
u8 __iomem *sram;
int sram_size;
unsigned long board_span;
@@ -377,7 +377,8 @@ static inline void put_be32(__be32 val, __be32 __iomem * p)
__raw_writel((__force __u32) val, (__force void __iomem *)p);
}
-static struct net_device_stats *myri10ge_get_stats(struct net_device *dev);
+static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
{
@@ -1013,7 +1014,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
cmd.data2 = i;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
&cmd, 0);
- };
+ }
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
@@ -1080,11 +1081,14 @@ static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
int ret, cap, err;
u16 ctl;
- cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ cap = pci_pcie_cap(pdev);
if (!cap)
return 0;
err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
+ if (err)
+ return 0;
+
ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
if (ret != on) {
ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
@@ -1139,20 +1143,19 @@ static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
mgp->ss[i].cpu = -1;
mgp->ss[i].cached_dca_tag = -1;
myri10ge_update_dca(&mgp->ss[i]);
- }
+ }
}
static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
- int err;
if (!mgp->dca_enabled)
return;
mgp->dca_enabled = 0;
if (mgp->relaxed_order)
myri10ge_toggle_relaxed(pdev, 1);
- err = dca_remove_requester(&pdev->dev);
+ dca_remove_requester(&pdev->dev);
}
static int myri10ge_notify_dca_device(struct device *dev, void *data)
@@ -1313,7 +1316,7 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
static inline int
myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
- int lro_enabled)
+ bool lro_enabled)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
@@ -1461,7 +1464,8 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
/* start the queue if we've stopped it */
if (netif_tx_queue_stopped(dev_queue) &&
- tx->req - tx->done < (tx->mask >> 1)) {
+ tx->req - tx->done < (tx->mask >> 1) &&
+ ss->mgp->running == MYRI10GE_ETH_RUNNING) {
tx->wake_queue++;
netif_tx_wake_queue(dev_queue);
}
@@ -1472,11 +1476,9 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
{
struct myri10ge_rx_done *rx_done = &ss->rx_done;
struct myri10ge_priv *mgp = ss->mgp;
-
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long rx_ok;
-
int idx = rx_done->idx;
int cnt = rx_done->cnt;
int work_done = 0;
@@ -1529,16 +1531,14 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
mgp->link_state = link_up;
if (mgp->link_state == MXGEFW_LINK_UP) {
- if (netif_msg_link(mgp))
- netdev_info(mgp->dev, "link up\n");
+ netif_info(mgp, link, mgp->dev, "link up\n");
netif_carrier_on(mgp->dev);
mgp->link_changes++;
} else {
- if (netif_msg_link(mgp))
- netdev_info(mgp->dev, "link %s\n",
- link_up == MXGEFW_LINK_MYRINET ?
+ netif_info(mgp, link, mgp->dev, "link %s\n",
+ (link_up == MXGEFW_LINK_MYRINET ?
"mismatch (Myrinet detected)" :
- "down");
+ "down"));
netif_carrier_off(mgp->dev);
mgp->link_changes++;
}
@@ -1619,7 +1619,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
if (send_done_count != tx->pkt_done)
myri10ge_tx_done(ss, (int)send_done_count);
if (unlikely(i > myri10ge_max_irq_loops)) {
- netdev_err(mgp->dev, "irq stuck?\n");
+ netdev_warn(mgp->dev, "irq stuck?\n");
stats->valid = 0;
schedule_work(&mgp->watchdog_work);
}
@@ -1783,9 +1783,8 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
"----------- slice ---------",
"tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
"rx_small_cnt", "rx_big_cnt",
- "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated",
- "LRO flushed",
- "LRO avg aggr", "LRO no_desc"
+ "wake_queue", "stop_queue", "tx_linearized",
+ "LRO aggregated", "LRO flushed", "LRO avg aggr", "LRO no_desc",
};
#define MYRI10GE_NET_STATS_LEN 21
@@ -1831,13 +1830,15 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
struct myri10ge_slice_state *ss;
+ struct rtnl_link_stats64 link_stats;
int slice;
int i;
/* force stats update */
- (void)myri10ge_get_stats(netdev);
+ memset(&link_stats, 0, sizeof(link_stats));
+ (void)myri10ge_get_stats(netdev, &link_stats);
for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
- data[i] = ((unsigned long *)&netdev->stats)[i];
+ data[i] = ((u64 *)&link_stats)[i];
data[i++] = (unsigned int)mgp->tx_boundary;
data[i++] = (unsigned int)mgp->wc_enabled;
@@ -1907,6 +1908,60 @@ static u32 myri10ge_get_msglevel(struct net_device *netdev)
return mgp->msg_enable;
}
+/*
+ * Use a low-level command to change the LED behavior. Rather than
+ * blinking (which is the normal case), when identify is used, the
+ * yellow LED turns solid.
+ */
+static int myri10ge_led(struct myri10ge_priv *mgp, int on)
+{
+ struct mcp_gen_header *hdr;
+ struct device *dev = &mgp->pdev->dev;
+ size_t hdr_off, pattern_off, hdr_len;
+ u32 pattern = 0xfffffffe;
+
+ /* find running firmware header */
+ hdr_off = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
+ if ((hdr_off & 3) || hdr_off + sizeof(*hdr) > mgp->sram_size) {
+ dev_err(dev, "Running firmware has bad header offset (%d)\n",
+ (int)hdr_off);
+ return -EIO;
+ }
+ hdr_len = swab32(readl(mgp->sram + hdr_off +
+ offsetof(struct mcp_gen_header, header_length)));
+ pattern_off = hdr_off + offsetof(struct mcp_gen_header, led_pattern);
+ if (pattern_off >= (hdr_len + hdr_off)) {
+ dev_info(dev, "Firmware does not support LED identification\n");
+ return -EINVAL;
+ }
+ if (!on)
+ pattern = swab32(readl(mgp->sram + pattern_off + 4));
+ writel(htonl(pattern), mgp->sram + pattern_off);
+ return 0;
+}
+
+static int
+myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+{
+ struct myri10ge_priv *mgp = netdev_priv(netdev);
+ int rc;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ rc = myri10ge_led(mgp, 1);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ rc = myri10ge_led(mgp, 0);
+ break;
+
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
static const struct ethtool_ops myri10ge_ethtool_ops = {
.get_settings = myri10ge_get_settings,
.get_drvinfo = myri10ge_get_drvinfo,
@@ -1921,6 +1976,7 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
.get_ethtool_stats = myri10ge_get_ethtool_stats,
.set_msglevel = myri10ge_set_msglevel,
.get_msglevel = myri10ge_get_msglevel,
+ .set_phys_id = myri10ge_phys_id,
};
static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
@@ -2000,8 +2056,12 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
ss->rx_small.watchdog_needed = 0;
ss->rx_big.watchdog_needed = 0;
- myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
- mgp->small_bytes + MXGEFW_PAD, 0);
+ if (mgp->small_bytes == 0) {
+ ss->rx_small.fill_cnt = ss->rx_small.mask + 1;
+ } else {
+ myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
+ mgp->small_bytes + MXGEFW_PAD, 0);
+ }
if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
@@ -2027,6 +2087,8 @@ abort_with_rx_big_ring:
}
abort_with_rx_small_ring:
+ if (mgp->small_bytes == 0)
+ ss->rx_small.fill_cnt = ss->rx_small.cnt;
for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
int idx = i & ss->rx_small.mask;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
@@ -2077,6 +2139,8 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
put_page(ss->rx_big.info[idx].page);
}
+ if (mgp->small_bytes == 0)
+ ss->rx_small.fill_cnt = ss->rx_small.cnt;
for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
idx = i & ss->rx_small.mask;
if (i == ss->rx_small.fill_cnt - 1)
@@ -2255,7 +2319,7 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
*ip_hdr = iph;
if (iph->protocol != IPPROTO_TCP)
return -1;
- if (iph->frag_off & htons(IP_MF | IP_OFFSET))
+ if (ip_is_fragment(iph))
return -1;
*hdr_flags |= LRO_TCP;
*tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
@@ -2414,7 +2478,7 @@ static int myri10ge_open(struct net_device *dev)
mgp->small_bytes = VLAN_ETH_FRAME_LEN;
/* Override the small buffer size? */
- if (myri10ge_small_bytes > 0)
+ if (myri10ge_small_bytes >= 0)
mgp->small_bytes = myri10ge_small_bytes;
/* Firmware needs the big buff size as a power of 2. Lie and
@@ -2976,15 +3040,13 @@ drop:
return NETDEV_TX_OK;
}
-static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
- struct myri10ge_priv *mgp = netdev_priv(dev);
- struct myri10ge_slice_netstats *slice_stats;
- struct net_device_stats *stats = &dev->stats;
+ const struct myri10ge_priv *mgp = netdev_priv(dev);
+ const struct myri10ge_slice_netstats *slice_stats;
int i;
- spin_lock(&mgp->stats_lock);
- memset(stats, 0, sizeof(*stats));
for (i = 0; i < mgp->num_slices; i++) {
slice_stats = &mgp->ss[i].stats;
stats->rx_packets += slice_stats->rx_packets;
@@ -2994,7 +3056,6 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
stats->rx_dropped += slice_stats->rx_dropped;
stats->tx_dropped += slice_stats->tx_dropped;
}
- spin_unlock(&mgp->stats_lock);
return stats;
}
@@ -3127,7 +3188,7 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
{
struct pci_dev *bridge = mgp->pdev->bus->self;
struct device *dev = &mgp->pdev->dev;
- unsigned cap;
+ int cap;
unsigned err_cap;
u16 val;
u8 ext_type;
@@ -3137,7 +3198,7 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
return;
/* check that the bridge is a root port */
- cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ cap = pci_pcie_cap(bridge);
pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val);
ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
@@ -3155,8 +3216,7 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
" to force ECRC\n");
return;
}
- cap =
- pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ cap = pci_pcie_cap(bridge);
pci_read_config_word(bridge,
cap + PCI_CAP_FLAGS, &val);
ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
@@ -3266,7 +3326,6 @@ abort:
/* fall back to using the unaligned firmware */
mgp->tx_boundary = 2048;
set_fw_name(mgp, myri10ge_fw_unaligned, false);
-
}
static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
@@ -3277,7 +3336,7 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
int link_width, exp_cap;
u16 lnk;
- exp_cap = pci_find_capability(mgp->pdev, PCI_CAP_ID_EXP);
+ exp_cap = pci_pcie_cap(mgp->pdev);
pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
link_width = (lnk >> 4) & 0x3f;
@@ -3327,6 +3386,26 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
mgp->fw_name);
}
+static void myri10ge_mask_surprise_down(struct pci_dev *pdev)
+{
+ struct pci_dev *bridge = pdev->bus->self;
+ int cap;
+ u32 mask;
+
+ if (bridge == NULL)
+ return;
+
+ cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
+ if (cap) {
+ /* a sram parity error can cause a surprise link
+ * down; since we expect and can recover from sram
+ * parity errors, mask surprise link down events */
+ pci_read_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, &mask);
+ mask |= 0x20;
+ pci_write_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, mask);
+ }
+}
+
#ifdef CONFIG_PM
static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
{
@@ -3422,6 +3501,42 @@ static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
return reboot;
}
+static void
+myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed,
+ int *busy_slice_cnt, u32 rx_pause_cnt)
+{
+ struct myri10ge_priv *mgp = ss->mgp;
+ int slice = ss - mgp->ss;
+
+ if (ss->tx.req != ss->tx.done &&
+ ss->tx.done == ss->watchdog_tx_done &&
+ ss->watchdog_tx_req != ss->watchdog_tx_done) {
+ /* nic seems like it might be stuck.. */
+ if (rx_pause_cnt != mgp->watchdog_pause) {
+ if (net_ratelimit())
+ netdev_warn(mgp->dev, "slice %d: TX paused, "
+ "check link partner\n", slice);
+ } else {
+ netdev_warn(mgp->dev,
+ "slice %d: TX stuck %d %d %d %d %d %d\n",
+ slice, ss->tx.queue_active, ss->tx.req,
+ ss->tx.done, ss->tx.pkt_start,
+ ss->tx.pkt_done,
+ (int)ntohl(mgp->ss[slice].fw_stats->
+ send_done_count));
+ *reset_needed = 1;
+ ss->stuck = 1;
+ }
+ }
+ if (ss->watchdog_tx_done != ss->tx.done ||
+ ss->watchdog_rx_done != ss->rx_done.cnt) {
+ *busy_slice_cnt += 1;
+ }
+ ss->watchdog_tx_done = ss->tx.done;
+ ss->watchdog_tx_req = ss->tx.req;
+ ss->watchdog_rx_done = ss->rx_done.cnt;
+}
+
/*
* This watchdog is used to check whether the board has suffered
* from a parity error and needs to be recovered.
@@ -3430,10 +3545,12 @@ static void myri10ge_watchdog(struct work_struct *work)
{
struct myri10ge_priv *mgp =
container_of(work, struct myri10ge_priv, watchdog_work);
- struct myri10ge_tx_buf *tx;
- u32 reboot;
+ struct myri10ge_slice_state *ss;
+ u32 reboot, rx_pause_cnt;
int status, rebooted;
int i;
+ int reset_needed = 0;
+ int busy_slice_cnt = 0;
u16 cmd, vendor;
mgp->watchdog_resets++;
@@ -3445,8 +3562,7 @@ static void myri10ge_watchdog(struct work_struct *work)
* For now, just report it */
reboot = myri10ge_read_reboot(mgp);
netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
- reboot,
- myri10ge_reset_recover ? "" : " not");
+ reboot, myri10ge_reset_recover ? "" : " not");
if (myri10ge_reset_recover == 0)
return;
rtnl_lock();
@@ -3478,23 +3594,24 @@ static void myri10ge_watchdog(struct work_struct *work)
return;
}
}
- /* Perhaps it is a software error. Try to reset */
-
- netdev_err(mgp->dev, "device timeout, resetting\n");
+ /* Perhaps it is a software error. See if stuck slice
+ * has recovered, reset if not */
+ rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
for (i = 0; i < mgp->num_slices; i++) {
- tx = &mgp->ss[i].tx;
- netdev_err(mgp->dev, "(%d): %d %d %d %d %d %d\n",
- i, tx->queue_active, tx->req,
- tx->done, tx->pkt_start, tx->pkt_done,
- (int)ntohl(mgp->ss[i].fw_stats->
- send_done_count));
- msleep(2000);
- netdev_info(mgp->dev, "(%d): %d %d %d %d %d %d\n",
- i, tx->queue_active, tx->req,
- tx->done, tx->pkt_start, tx->pkt_done,
- (int)ntohl(mgp->ss[i].fw_stats->
- send_done_count));
+ ss = mgp->ss;
+ if (ss->stuck) {
+ myri10ge_check_slice(ss, &reset_needed,
+ &busy_slice_cnt,
+ rx_pause_cnt);
+ ss->stuck = 0;
+ }
}
+ if (!reset_needed) {
+ netdev_dbg(mgp->dev, "not resetting\n");
+ return;
+ }
+
+ netdev_err(mgp->dev, "device timeout, resetting\n");
}
if (!rebooted) {
@@ -3547,27 +3664,8 @@ static void myri10ge_watchdog_timer(unsigned long arg)
myri10ge_fill_thresh)
ss->rx_big.watchdog_needed = 0;
}
-
- if (ss->tx.req != ss->tx.done &&
- ss->tx.done == ss->watchdog_tx_done &&
- ss->watchdog_tx_req != ss->watchdog_tx_done) {
- /* nic seems like it might be stuck.. */
- if (rx_pause_cnt != mgp->watchdog_pause) {
- if (net_ratelimit())
- netdev_err(mgp->dev, "slice %d: TX paused, check link partner\n",
- i);
- } else {
- netdev_warn(mgp->dev, "slice %d stuck:", i);
- reset_needed = 1;
- }
- }
- if (ss->watchdog_tx_done != ss->tx.done ||
- ss->watchdog_rx_done != ss->rx_done.cnt) {
- busy_slice_cnt++;
- }
- ss->watchdog_tx_done = ss->tx.done;
- ss->watchdog_tx_req = ss->tx.req;
- ss->watchdog_rx_done = ss->rx_done.cnt;
+ myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt,
+ rx_pause_cnt);
}
/* if we've sent or received no traffic, poll the NIC to
* ensure it is still there. Otherwise, we risk not noticing
@@ -3613,8 +3711,8 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
dma_free_coherent(&pdev->dev, bytes,
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
- netif_napi_del(&ss->napi);
}
+ netif_napi_del(&ss->napi);
}
kfree(mgp->ss);
mgp->ss = NULL;
@@ -3790,7 +3888,7 @@ static const struct net_device_ops myri10ge_netdev_ops = {
.ndo_open = myri10ge_open,
.ndo_stop = myri10ge_close,
.ndo_start_xmit = myri10ge_xmit,
- .ndo_get_stats = myri10ge_get_stats,
+ .ndo_get_stats64 = myri10ge_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = myri10ge_change_mtu,
.ndo_fix_features = myri10ge_fix_features,
@@ -3845,6 +3943,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_enabled;
}
+ myri10ge_mask_surprise_down(pdev);
pci_set_master(pdev);
dac_enabled = 1;
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -3964,7 +4063,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
(unsigned long)mgp);
- spin_lock_init(&mgp->stats_lock);
SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
status = register_netdev(netdev);
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
index 62a1cbab603..7ec4b864a55 100644
--- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -45,6 +45,8 @@ struct mcp_gen_header {
unsigned bss_addr; /* start of bss */
unsigned features;
unsigned ee_hdr_addr;
+ unsigned led_pattern;
+ unsigned led_pattern_dflt;
/* 8 */
};
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
deleted file mode 100644
index 53aeea4b536..00000000000
--- a/drivers/net/myri_sbus.c
+++ /dev/null
@@ -1,1187 +0,0 @@
-/* myri_sbus.c: MyriCOM MyriNET SBUS card driver.
- *
- * Copyright (C) 1996, 1999, 2006, 2008 David S. Miller (davem@davemloft.net)
- */
-
-static char version[] =
- "myri_sbus.c:v2.0 June 23, 2006 David S. Miller (davem@davemloft.net)\n";
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <linux/dma-mapping.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/firmware.h>
-#include <linux/gfp.h>
-
-#include <net/dst.h>
-#include <net/arp.h>
-#include <net/sock.h>
-#include <net/ipv6.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include <asm/idprom.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-#include <asm/auxio.h>
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-
-#include "myri_sbus.h"
-
-/* #define DEBUG_DETECT */
-/* #define DEBUG_IRQ */
-/* #define DEBUG_TRANSMIT */
-/* #define DEBUG_RECEIVE */
-/* #define DEBUG_HEADER */
-
-#ifdef DEBUG_DETECT
-#define DET(x) printk x
-#else
-#define DET(x)
-#endif
-
-#ifdef DEBUG_IRQ
-#define DIRQ(x) printk x
-#else
-#define DIRQ(x)
-#endif
-
-#ifdef DEBUG_TRANSMIT
-#define DTX(x) printk x
-#else
-#define DTX(x)
-#endif
-
-#ifdef DEBUG_RECEIVE
-#define DRX(x) printk x
-#else
-#define DRX(x)
-#endif
-
-#ifdef DEBUG_HEADER
-#define DHDR(x) printk x
-#else
-#define DHDR(x)
-#endif
-
-/* Firmware name */
-#define FWNAME "myricom/lanai.bin"
-
-static void myri_reset_off(void __iomem *lp, void __iomem *cregs)
-{
- /* Clear IRQ mask. */
- sbus_writel(0, lp + LANAI_EIMASK);
-
- /* Turn RESET function off. */
- sbus_writel(CONTROL_ROFF, cregs + MYRICTRL_CTRL);
-}
-
-static void myri_reset_on(void __iomem *cregs)
-{
- /* Enable RESET function. */
- sbus_writel(CONTROL_RON, cregs + MYRICTRL_CTRL);
-
- /* Disable IRQ's. */
- sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
-}
-
-static void myri_disable_irq(void __iomem *lp, void __iomem *cregs)
-{
- sbus_writel(CONTROL_DIRQ, cregs + MYRICTRL_CTRL);
- sbus_writel(0, lp + LANAI_EIMASK);
- sbus_writel(ISTAT_HOST, lp + LANAI_ISTAT);
-}
-
-static void myri_enable_irq(void __iomem *lp, void __iomem *cregs)
-{
- sbus_writel(CONTROL_EIRQ, cregs + MYRICTRL_CTRL);
- sbus_writel(ISTAT_HOST, lp + LANAI_EIMASK);
-}
-
-static inline void bang_the_chip(struct myri_eth *mp)
-{
- struct myri_shmem __iomem *shmem = mp->shmem;
- void __iomem *cregs = mp->cregs;
-
- sbus_writel(1, &shmem->send);
- sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
-}
-
-static int myri_do_handshake(struct myri_eth *mp)
-{
- struct myri_shmem __iomem *shmem = mp->shmem;
- void __iomem *cregs = mp->cregs;
- struct myri_channel __iomem *chan = &shmem->channel;
- int tick = 0;
-
- DET(("myri_do_handshake: "));
- if (sbus_readl(&chan->state) == STATE_READY) {
- DET(("Already STATE_READY, failed.\n"));
- return -1; /* We're hosed... */
- }
-
- myri_disable_irq(mp->lregs, cregs);
-
- while (tick++ < 25) {
- u32 softstate;
-
- /* Wake it up. */
- DET(("shakedown, CONTROL_WON, "));
- sbus_writel(1, &shmem->shakedown);
- sbus_writel(CONTROL_WON, cregs + MYRICTRL_CTRL);
-
- softstate = sbus_readl(&chan->state);
- DET(("chanstate[%08x] ", softstate));
- if (softstate == STATE_READY) {
- DET(("wakeup successful, "));
- break;
- }
-
- if (softstate != STATE_WFN) {
- DET(("not WFN setting that, "));
- sbus_writel(STATE_WFN, &chan->state);
- }
-
- udelay(20);
- }
-
- myri_enable_irq(mp->lregs, cregs);
-
- if (tick > 25) {
- DET(("25 ticks we lose, failure.\n"));
- return -1;
- }
- DET(("success\n"));
- return 0;
-}
-
-static int __devinit myri_load_lanai(struct myri_eth *mp)
-{
- const struct firmware *fw;
- struct net_device *dev = mp->dev;
- struct myri_shmem __iomem *shmem = mp->shmem;
- void __iomem *rptr;
- int i, lanai4_data_size;
-
- myri_disable_irq(mp->lregs, mp->cregs);
- myri_reset_on(mp->cregs);
-
- rptr = mp->lanai;
- for (i = 0; i < mp->eeprom.ramsz; i++)
- sbus_writeb(0, rptr + i);
-
- if (mp->eeprom.cpuvers >= CPUVERS_3_0)
- sbus_writel(mp->eeprom.cval, mp->lregs + LANAI_CVAL);
-
- i = request_firmware(&fw, FWNAME, &mp->myri_op->dev);
- if (i) {
- printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
- FWNAME, i);
- return i;
- }
- if (fw->size < 2) {
- printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
- fw->size, FWNAME);
- release_firmware(fw);
- return -EINVAL;
- }
- lanai4_data_size = fw->data[0] << 8 | fw->data[1];
-
- /* Load executable code. */
- for (i = 2; i < fw->size; i++)
- sbus_writeb(fw->data[i], rptr++);
-
- /* Load data segment. */
- for (i = 0; i < lanai4_data_size; i++)
- sbus_writeb(0, rptr++);
-
- /* Set device address. */
- sbus_writeb(0, &shmem->addr[0]);
- sbus_writeb(0, &shmem->addr[1]);
- for (i = 0; i < 6; i++)
- sbus_writeb(dev->dev_addr[i],
- &shmem->addr[i + 2]);
-
- /* Set SBUS bursts and interrupt mask. */
- sbus_writel(((mp->myri_bursts & 0xf8) >> 3), &shmem->burst);
- sbus_writel(SHMEM_IMASK_RX, &shmem->imask);
-
- /* Release the LANAI. */
- myri_disable_irq(mp->lregs, mp->cregs);
- myri_reset_off(mp->lregs, mp->cregs);
- myri_disable_irq(mp->lregs, mp->cregs);
-
- /* Wait for the reset to complete. */
- for (i = 0; i < 5000; i++) {
- if (sbus_readl(&shmem->channel.state) != STATE_READY)
- break;
- else
- udelay(10);
- }
-
- if (i == 5000)
- printk(KERN_ERR "myricom: Chip would not reset after firmware load.\n");
-
- i = myri_do_handshake(mp);
- if (i)
- printk(KERN_ERR "myricom: Handshake with LANAI failed.\n");
-
- if (mp->eeprom.cpuvers == CPUVERS_4_0)
- sbus_writel(0, mp->lregs + LANAI_VERS);
-
- release_firmware(fw);
- return i;
-}
-
-static void myri_clean_rings(struct myri_eth *mp)
-{
- struct sendq __iomem *sq = mp->sq;
- struct recvq __iomem *rq = mp->rq;
- int i;
-
- sbus_writel(0, &rq->tail);
- sbus_writel(0, &rq->head);
- for (i = 0; i < (RX_RING_SIZE+1); i++) {
- if (mp->rx_skbs[i] != NULL) {
- struct myri_rxd __iomem *rxd = &rq->myri_rxd[i];
- u32 dma_addr;
-
- dma_addr = sbus_readl(&rxd->myri_scatters[0].addr);
- dma_unmap_single(&mp->myri_op->dev, dma_addr,
- RX_ALLOC_SIZE, DMA_FROM_DEVICE);
- dev_kfree_skb(mp->rx_skbs[i]);
- mp->rx_skbs[i] = NULL;
- }
- }
-
- mp->tx_old = 0;
- sbus_writel(0, &sq->tail);
- sbus_writel(0, &sq->head);
- for (i = 0; i < TX_RING_SIZE; i++) {
- if (mp->tx_skbs[i] != NULL) {
- struct sk_buff *skb = mp->tx_skbs[i];
- struct myri_txd __iomem *txd = &sq->myri_txd[i];
- u32 dma_addr;
-
- dma_addr = sbus_readl(&txd->myri_gathers[0].addr);
- dma_unmap_single(&mp->myri_op->dev, dma_addr,
- (skb->len + 3) & ~3,
- DMA_TO_DEVICE);
- dev_kfree_skb(mp->tx_skbs[i]);
- mp->tx_skbs[i] = NULL;
- }
- }
-}
-
-static void myri_init_rings(struct myri_eth *mp, int from_irq)
-{
- struct recvq __iomem *rq = mp->rq;
- struct myri_rxd __iomem *rxd = &rq->myri_rxd[0];
- struct net_device *dev = mp->dev;
- gfp_t gfp_flags = GFP_KERNEL;
- int i;
-
- if (from_irq || in_interrupt())
- gfp_flags = GFP_ATOMIC;
-
- myri_clean_rings(mp);
- for (i = 0; i < RX_RING_SIZE; i++) {
- struct sk_buff *skb = myri_alloc_skb(RX_ALLOC_SIZE, gfp_flags);
- u32 dma_addr;
-
- if (!skb)
- continue;
- mp->rx_skbs[i] = skb;
- skb->dev = dev;
- skb_put(skb, RX_ALLOC_SIZE);
-
- dma_addr = dma_map_single(&mp->myri_op->dev,
- skb->data, RX_ALLOC_SIZE,
- DMA_FROM_DEVICE);
- sbus_writel(dma_addr, &rxd[i].myri_scatters[0].addr);
- sbus_writel(RX_ALLOC_SIZE, &rxd[i].myri_scatters[0].len);
- sbus_writel(i, &rxd[i].ctx);
- sbus_writel(1, &rxd[i].num_sg);
- }
- sbus_writel(0, &rq->head);
- sbus_writel(RX_RING_SIZE, &rq->tail);
-}
-
-static int myri_init(struct myri_eth *mp, int from_irq)
-{
- myri_init_rings(mp, from_irq);
- return 0;
-}
-
-static void myri_is_not_so_happy(struct myri_eth *mp)
-{
-}
-
-#ifdef DEBUG_HEADER
-static void dump_ehdr(struct ethhdr *ehdr)
-{
- printk("ehdr[h_dst(%pM)"
- "h_source(%pM)"
- "h_proto(%04x)]\n",
- ehdr->h_dest, ehdr->h_source, ehdr->h_proto);
-}
-
-static void dump_ehdr_and_myripad(unsigned char *stuff)
-{
- struct ethhdr *ehdr = (struct ethhdr *) (stuff + 2);
-
- printk("pad[%02x:%02x]", stuff[0], stuff[1]);
- dump_ehdr(ehdr);
-}
-#endif
-
-static void myri_tx(struct myri_eth *mp, struct net_device *dev)
-{
- struct sendq __iomem *sq= mp->sq;
- int entry = mp->tx_old;
- int limit = sbus_readl(&sq->head);
-
- DTX(("entry[%d] limit[%d] ", entry, limit));
- if (entry == limit)
- return;
- while (entry != limit) {
- struct sk_buff *skb = mp->tx_skbs[entry];
- u32 dma_addr;
-
- DTX(("SKB[%d] ", entry));
- dma_addr = sbus_readl(&sq->myri_txd[entry].myri_gathers[0].addr);
- dma_unmap_single(&mp->myri_op->dev, dma_addr,
- skb->len, DMA_TO_DEVICE);
- dev_kfree_skb(skb);
- mp->tx_skbs[entry] = NULL;
- dev->stats.tx_packets++;
- entry = NEXT_TX(entry);
- }
- mp->tx_old = entry;
-}
-
-/* Determine the packet's protocol ID. The rule here is that we
- * assume 802.3 if the type field is short enough to be a length.
- * This is normal practice and works for any 'now in use' protocol.
- */
-static __be16 myri_type_trans(struct sk_buff *skb, struct net_device *dev)
-{
- struct ethhdr *eth;
- unsigned char *rawp;
-
- skb_set_mac_header(skb, MYRI_PAD_LEN);
- skb_pull(skb, dev->hard_header_len);
- eth = eth_hdr(skb);
-
-#ifdef DEBUG_HEADER
- DHDR(("myri_type_trans: "));
- dump_ehdr(eth);
-#endif
- if (*eth->h_dest & 1) {
- if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN)==0)
- skb->pkt_type = PACKET_BROADCAST;
- else
- skb->pkt_type = PACKET_MULTICAST;
- } else if (dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) {
- if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
- skb->pkt_type = PACKET_OTHERHOST;
- }
-
- if (ntohs(eth->h_proto) >= 1536)
- return eth->h_proto;
-
- rawp = skb->data;
-
- /* This is a magic hack to spot IPX packets. Older Novell breaks
- * the protocol design and runs IPX over 802.3 without an 802.2 LLC
- * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
- * won't work for fault tolerant netware but does for the rest.
- */
- if (*(unsigned short *)rawp == 0xFFFF)
- return htons(ETH_P_802_3);
-
- /* Real 802.2 LLC */
- return htons(ETH_P_802_2);
-}
-
-static void myri_rx(struct myri_eth *mp, struct net_device *dev)
-{
- struct recvq __iomem *rq = mp->rq;
- struct recvq __iomem *rqa = mp->rqack;
- int entry = sbus_readl(&rqa->head);
- int limit = sbus_readl(&rqa->tail);
- int drops;
-
- DRX(("entry[%d] limit[%d] ", entry, limit));
- if (entry == limit)
- return;
- drops = 0;
- DRX(("\n"));
- while (entry != limit) {
- struct myri_rxd __iomem *rxdack = &rqa->myri_rxd[entry];
- u32 csum = sbus_readl(&rxdack->csum);
- int len = sbus_readl(&rxdack->myri_scatters[0].len);
- int index = sbus_readl(&rxdack->ctx);
- struct myri_rxd __iomem *rxd = &rq->myri_rxd[sbus_readl(&rq->tail)];
- struct sk_buff *skb = mp->rx_skbs[index];
-
- /* Ack it. */
- sbus_writel(NEXT_RX(entry), &rqa->head);
-
- /* Check for errors. */
- DRX(("rxd[%d]: %p len[%d] csum[%08x] ", entry, rxd, len, csum));
- dma_sync_single_for_cpu(&mp->myri_op->dev,
- sbus_readl(&rxd->myri_scatters[0].addr),
- RX_ALLOC_SIZE, DMA_FROM_DEVICE);
- if (len < (ETH_HLEN + MYRI_PAD_LEN) || (skb->data[0] != MYRI_PAD_LEN)) {
- DRX(("ERROR["));
- dev->stats.rx_errors++;
- if (len < (ETH_HLEN + MYRI_PAD_LEN)) {
- DRX(("BAD_LENGTH] "));
- dev->stats.rx_length_errors++;
- } else {
- DRX(("NO_PADDING] "));
- dev->stats.rx_frame_errors++;
- }
-
- /* Return it to the LANAI. */
- drop_it:
- drops++;
- DRX(("DROP "));
- dev->stats.rx_dropped++;
- dma_sync_single_for_device(&mp->myri_op->dev,
- sbus_readl(&rxd->myri_scatters[0].addr),
- RX_ALLOC_SIZE,
- DMA_FROM_DEVICE);
- sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
- sbus_writel(index, &rxd->ctx);
- sbus_writel(1, &rxd->num_sg);
- sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
- goto next;
- }
-
- DRX(("len[%d] ", len));
- if (len > RX_COPY_THRESHOLD) {
- struct sk_buff *new_skb;
- u32 dma_addr;
-
- DRX(("BIGBUFF "));
- new_skb = myri_alloc_skb(RX_ALLOC_SIZE, GFP_ATOMIC);
- if (new_skb == NULL) {
- DRX(("skb_alloc(FAILED) "));
- goto drop_it;
- }
- dma_unmap_single(&mp->myri_op->dev,
- sbus_readl(&rxd->myri_scatters[0].addr),
- RX_ALLOC_SIZE,
- DMA_FROM_DEVICE);
- mp->rx_skbs[index] = new_skb;
- new_skb->dev = dev;
- skb_put(new_skb, RX_ALLOC_SIZE);
- dma_addr = dma_map_single(&mp->myri_op->dev,
- new_skb->data,
- RX_ALLOC_SIZE,
- DMA_FROM_DEVICE);
- sbus_writel(dma_addr, &rxd->myri_scatters[0].addr);
- sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
- sbus_writel(index, &rxd->ctx);
- sbus_writel(1, &rxd->num_sg);
- sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
-
- /* Trim the original skb for the netif. */
- DRX(("trim(%d) ", len));
- skb_trim(skb, len);
- } else {
- struct sk_buff *copy_skb = dev_alloc_skb(len);
-
- DRX(("SMALLBUFF "));
- if (copy_skb == NULL) {
- DRX(("dev_alloc_skb(FAILED) "));
- goto drop_it;
- }
- /* DMA sync already done above. */
- copy_skb->dev = dev;
- DRX(("resv_and_put "));
- skb_put(copy_skb, len);
- skb_copy_from_linear_data(skb, copy_skb->data, len);
-
- /* Reuse original ring buffer. */
- DRX(("reuse "));
- dma_sync_single_for_device(&mp->myri_op->dev,
- sbus_readl(&rxd->myri_scatters[0].addr),
- RX_ALLOC_SIZE,
- DMA_FROM_DEVICE);
- sbus_writel(RX_ALLOC_SIZE, &rxd->myri_scatters[0].len);
- sbus_writel(index, &rxd->ctx);
- sbus_writel(1, &rxd->num_sg);
- sbus_writel(NEXT_RX(sbus_readl(&rq->tail)), &rq->tail);
-
- skb = copy_skb;
- }
-
- /* Just like the happy meal we get checksums from this card. */
- skb->csum = csum;
- skb->ip_summed = CHECKSUM_UNNECESSARY; /* XXX */
-
- skb->protocol = myri_type_trans(skb, dev);
- DRX(("prot[%04x] netif_rx ", skb->protocol));
- netif_rx(skb);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
- next:
- DRX(("NEXT\n"));
- entry = NEXT_RX(entry);
- }
-}
-
-static irqreturn_t myri_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *) dev_id;
- struct myri_eth *mp = netdev_priv(dev);
- void __iomem *lregs = mp->lregs;
- struct myri_channel __iomem *chan = &mp->shmem->channel;
- unsigned long flags;
- u32 status;
- int handled = 0;
-
- spin_lock_irqsave(&mp->irq_lock, flags);
-
- status = sbus_readl(lregs + LANAI_ISTAT);
- DIRQ(("myri_interrupt: status[%08x] ", status));
- if (status & ISTAT_HOST) {
- u32 softstate;
-
- handled = 1;
- DIRQ(("IRQ_DISAB "));
- myri_disable_irq(lregs, mp->cregs);
- softstate = sbus_readl(&chan->state);
- DIRQ(("state[%08x] ", softstate));
- if (softstate != STATE_READY) {
- DIRQ(("myri_not_so_happy "));
- myri_is_not_so_happy(mp);
- }
- DIRQ(("\nmyri_rx: "));
- myri_rx(mp, dev);
- DIRQ(("\nistat=ISTAT_HOST "));
- sbus_writel(ISTAT_HOST, lregs + LANAI_ISTAT);
- DIRQ(("IRQ_ENAB "));
- myri_enable_irq(lregs, mp->cregs);
- }
- DIRQ(("\n"));
-
- spin_unlock_irqrestore(&mp->irq_lock, flags);
-
- return IRQ_RETVAL(handled);
-}
-
-static int myri_open(struct net_device *dev)
-{
- struct myri_eth *mp = netdev_priv(dev);
-
- return myri_init(mp, in_interrupt());
-}
-
-static int myri_close(struct net_device *dev)
-{
- struct myri_eth *mp = netdev_priv(dev);
-
- myri_clean_rings(mp);
- return 0;
-}
-
-static void myri_tx_timeout(struct net_device *dev)
-{
- struct myri_eth *mp = netdev_priv(dev);
-
- printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
-
- dev->stats.tx_errors++;
- myri_init(mp, 0);
- netif_wake_queue(dev);
-}
-
-static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct myri_eth *mp = netdev_priv(dev);
- struct sendq __iomem *sq = mp->sq;
- struct myri_txd __iomem *txd;
- unsigned long flags;
- unsigned int head, tail;
- int len, entry;
- u32 dma_addr;
-
- DTX(("myri_start_xmit: "));
-
- myri_tx(mp, dev);
-
- netif_stop_queue(dev);
-
- /* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */
- head = sbus_readl(&sq->head);
- tail = sbus_readl(&sq->tail);
-
- if (!TX_BUFFS_AVAIL(head, tail)) {
- DTX(("no buffs available, returning 1\n"));
- return NETDEV_TX_BUSY;
- }
-
- spin_lock_irqsave(&mp->irq_lock, flags);
-
- DHDR(("xmit[skbdata(%p)]\n", skb->data));
-#ifdef DEBUG_HEADER
- dump_ehdr_and_myripad(((unsigned char *) skb->data));
-#endif
-
- /* XXX Maybe this can go as well. */
- len = skb->len;
- if (len & 3) {
- DTX(("len&3 "));
- len = (len + 4) & (~3);
- }
-
- entry = sbus_readl(&sq->tail);
-
- txd = &sq->myri_txd[entry];
- mp->tx_skbs[entry] = skb;
-
- /* Must do this before we sbus map it. */
- if (skb->data[MYRI_PAD_LEN] & 0x1) {
- sbus_writew(0xffff, &txd->addr[0]);
- sbus_writew(0xffff, &txd->addr[1]);
- sbus_writew(0xffff, &txd->addr[2]);
- sbus_writew(0xffff, &txd->addr[3]);
- } else {
- sbus_writew(0xffff, &txd->addr[0]);
- sbus_writew((skb->data[0] << 8) | skb->data[1], &txd->addr[1]);
- sbus_writew((skb->data[2] << 8) | skb->data[3], &txd->addr[2]);
- sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
- }
-
- dma_addr = dma_map_single(&mp->myri_op->dev, skb->data,
- len, DMA_TO_DEVICE);
- sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
- sbus_writel(len, &txd->myri_gathers[0].len);
- sbus_writel(1, &txd->num_sg);
- sbus_writel(KERNEL_CHANNEL, &txd->chan);
- sbus_writel(len, &txd->len);
- sbus_writel((u32)-1, &txd->csum_off);
- sbus_writel(0, &txd->csum_field);
-
- sbus_writel(NEXT_TX(entry), &sq->tail);
- DTX(("BangTheChip "));
- bang_the_chip(mp);
-
- DTX(("tbusy=0, returning 0\n"));
- netif_start_queue(dev);
- spin_unlock_irqrestore(&mp->irq_lock, flags);
- return NETDEV_TX_OK;
-}
-
-/* Create the MyriNet MAC header for an arbitrary protocol layer
- *
- * saddr=NULL means use device source address
- * daddr=NULL means leave destination address (eg unresolved arp)
- */
-static int myri_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, unsigned len)
-{
- struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
- unsigned char *pad = (unsigned char *) skb_push(skb, MYRI_PAD_LEN);
-
-#ifdef DEBUG_HEADER
- DHDR(("myri_header: pad[%02x,%02x] ", pad[0], pad[1]));
- dump_ehdr(eth);
-#endif
-
- /* Set the MyriNET padding identifier. */
- pad[0] = MYRI_PAD_LEN;
- pad[1] = 0xab;
-
- /* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the
- * length in here instead.
- */
- if (type != ETH_P_802_3 && type != ETH_P_802_2)
- eth->h_proto = htons(type);
- else
- eth->h_proto = htons(len);
-
- /* Set the source hardware address. */
- if (saddr)
- memcpy(eth->h_source, saddr, dev->addr_len);
- else
- memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
-
- /* Anyway, the loopback-device should never use this function... */
- if (dev->flags & IFF_LOOPBACK) {
- int i;
- for (i = 0; i < dev->addr_len; i++)
- eth->h_dest[i] = 0;
- return dev->hard_header_len;
- }
-
- if (daddr) {
- memcpy(eth->h_dest, daddr, dev->addr_len);
- return dev->hard_header_len;
- }
- return -dev->hard_header_len;
-}
-
-/* Rebuild the MyriNet MAC header. This is called after an ARP
- * (or in future other address resolution) has completed on this
- * sk_buff. We now let ARP fill in the other fields.
- */
-static int myri_rebuild_header(struct sk_buff *skb)
-{
- unsigned char *pad = (unsigned char *) skb->data;
- struct ethhdr *eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
- struct net_device *dev = skb->dev;
-
-#ifdef DEBUG_HEADER
- DHDR(("myri_rebuild_header: pad[%02x,%02x] ", pad[0], pad[1]));
- dump_ehdr(eth);
-#endif
-
- /* Refill MyriNet padding identifiers, this is just being anal. */
- pad[0] = MYRI_PAD_LEN;
- pad[1] = 0xab;
-
- switch (eth->h_proto)
- {
-#ifdef CONFIG_INET
- case cpu_to_be16(ETH_P_IP):
- return arp_find(eth->h_dest, skb);
-#endif
-
- default:
- printk(KERN_DEBUG
- "%s: unable to resolve type %X addresses.\n",
- dev->name, (int)eth->h_proto);
-
- memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
- return 0;
- break;
- }
-
- return 0;
-}
-
-static int myri_header_cache(const struct neighbour *neigh, struct hh_cache *hh)
-{
- unsigned short type = hh->hh_type;
- unsigned char *pad;
- struct ethhdr *eth;
- const struct net_device *dev = neigh->dev;
-
- pad = ((unsigned char *) hh->hh_data) +
- HH_DATA_OFF(sizeof(*eth) + MYRI_PAD_LEN);
- eth = (struct ethhdr *) (pad + MYRI_PAD_LEN);
-
- if (type == htons(ETH_P_802_3))
- return -1;
-
- /* Refill MyriNet padding identifiers, this is just being anal. */
- pad[0] = MYRI_PAD_LEN;
- pad[1] = 0xab;
-
- eth->h_proto = type;
- memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
- memcpy(eth->h_dest, neigh->ha, dev->addr_len);
- hh->hh_len = 16;
- return 0;
-}
-
-
-/* Called by Address Resolution module to notify changes in address. */
-void myri_header_cache_update(struct hh_cache *hh,
- const struct net_device *dev,
- const unsigned char * haddr)
-{
- memcpy(((u8*)hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
- haddr, dev->addr_len);
-}
-
-static int myri_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < (ETH_HLEN + MYRI_PAD_LEN)) || (new_mtu > MYRINET_MTU))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
-static void myri_set_multicast(struct net_device *dev)
-{
- /* Do nothing, all MyriCOM nodes transmit multicast frames
- * as broadcast packets...
- */
-}
-
-static inline void set_boardid_from_idprom(struct myri_eth *mp, int num)
-{
- mp->eeprom.id[0] = 0;
- mp->eeprom.id[1] = idprom->id_machtype;
- mp->eeprom.id[2] = (idprom->id_sernum >> 16) & 0xff;
- mp->eeprom.id[3] = (idprom->id_sernum >> 8) & 0xff;
- mp->eeprom.id[4] = (idprom->id_sernum >> 0) & 0xff;
- mp->eeprom.id[5] = num;
-}
-
-static inline void determine_reg_space_size(struct myri_eth *mp)
-{
- switch(mp->eeprom.cpuvers) {
- case CPUVERS_2_3:
- case CPUVERS_3_0:
- case CPUVERS_3_1:
- case CPUVERS_3_2:
- mp->reg_size = (3 * 128 * 1024) + 4096;
- break;
-
- case CPUVERS_4_0:
- case CPUVERS_4_1:
- mp->reg_size = ((4096<<1) + mp->eeprom.ramsz);
- break;
-
- case CPUVERS_4_2:
- case CPUVERS_5_0:
- default:
- printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n",
- mp->eeprom.cpuvers);
- mp->reg_size = (3 * 128 * 1024) + 4096;
- }
-}
-
-#ifdef DEBUG_DETECT
-static void dump_eeprom(struct myri_eth *mp)
-{
- printk("EEPROM: clockval[%08x] cpuvers[%04x] "
- "id[%02x,%02x,%02x,%02x,%02x,%02x]\n",
- mp->eeprom.cval, mp->eeprom.cpuvers,
- mp->eeprom.id[0], mp->eeprom.id[1], mp->eeprom.id[2],
- mp->eeprom.id[3], mp->eeprom.id[4], mp->eeprom.id[5]);
- printk("EEPROM: ramsz[%08x]\n", mp->eeprom.ramsz);
- printk("EEPROM: fvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
- mp->eeprom.fvers[0], mp->eeprom.fvers[1], mp->eeprom.fvers[2],
- mp->eeprom.fvers[3], mp->eeprom.fvers[4], mp->eeprom.fvers[5],
- mp->eeprom.fvers[6], mp->eeprom.fvers[7]);
- printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
- mp->eeprom.fvers[8], mp->eeprom.fvers[9], mp->eeprom.fvers[10],
- mp->eeprom.fvers[11], mp->eeprom.fvers[12], mp->eeprom.fvers[13],
- mp->eeprom.fvers[14], mp->eeprom.fvers[15]);
- printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
- mp->eeprom.fvers[16], mp->eeprom.fvers[17], mp->eeprom.fvers[18],
- mp->eeprom.fvers[19], mp->eeprom.fvers[20], mp->eeprom.fvers[21],
- mp->eeprom.fvers[22], mp->eeprom.fvers[23]);
- printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
- mp->eeprom.fvers[24], mp->eeprom.fvers[25], mp->eeprom.fvers[26],
- mp->eeprom.fvers[27], mp->eeprom.fvers[28], mp->eeprom.fvers[29],
- mp->eeprom.fvers[30], mp->eeprom.fvers[31]);
- printk("EEPROM: mvers[%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
- mp->eeprom.mvers[0], mp->eeprom.mvers[1], mp->eeprom.mvers[2],
- mp->eeprom.mvers[3], mp->eeprom.mvers[4], mp->eeprom.mvers[5],
- mp->eeprom.mvers[6], mp->eeprom.mvers[7]);
- printk("EEPROM: %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x]\n",
- mp->eeprom.mvers[8], mp->eeprom.mvers[9], mp->eeprom.mvers[10],
- mp->eeprom.mvers[11], mp->eeprom.mvers[12], mp->eeprom.mvers[13],
- mp->eeprom.mvers[14], mp->eeprom.mvers[15]);
- printk("EEPROM: dlval[%04x] brd_type[%04x] bus_type[%04x] prod_code[%04x]\n",
- mp->eeprom.dlval, mp->eeprom.brd_type, mp->eeprom.bus_type,
- mp->eeprom.prod_code);
- printk("EEPROM: serial_num[%08x]\n", mp->eeprom.serial_num);
-}
-#endif
-
-static const struct header_ops myri_header_ops = {
- .create = myri_header,
- .rebuild = myri_rebuild_header,
- .cache = myri_header_cache,
- .cache_update = myri_header_cache_update,
-};
-
-static const struct net_device_ops myri_ops = {
- .ndo_open = myri_open,
- .ndo_stop = myri_close,
- .ndo_start_xmit = myri_start_xmit,
- .ndo_set_multicast_list = myri_set_multicast,
- .ndo_tx_timeout = myri_tx_timeout,
- .ndo_change_mtu = myri_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __devinit myri_sbus_probe(struct platform_device *op)
-{
- struct device_node *dp = op->dev.of_node;
- static unsigned version_printed;
- struct net_device *dev;
- struct myri_eth *mp;
- const void *prop;
- static int num;
- int i, len;
-
- DET(("myri_ether_init(%p,%d):\n", op, num));
- dev = alloc_etherdev(sizeof(struct myri_eth));
- if (!dev)
- return -ENOMEM;
-
- if (version_printed++ == 0)
- printk(version);
-
- SET_NETDEV_DEV(dev, &op->dev);
-
- mp = netdev_priv(dev);
- spin_lock_init(&mp->irq_lock);
- mp->myri_op = op;
-
- /* Clean out skb arrays. */
- for (i = 0; i < (RX_RING_SIZE + 1); i++)
- mp->rx_skbs[i] = NULL;
-
- for (i = 0; i < TX_RING_SIZE; i++)
- mp->tx_skbs[i] = NULL;
-
- /* First check for EEPROM information. */
- prop = of_get_property(dp, "myrinet-eeprom-info", &len);
-
- if (prop)
- memcpy(&mp->eeprom, prop, sizeof(struct myri_eeprom));
- if (!prop) {
- /* No eeprom property, must cook up the values ourselves. */
- DET(("No EEPROM: "));
- mp->eeprom.bus_type = BUS_TYPE_SBUS;
- mp->eeprom.cpuvers =
- of_getintprop_default(dp, "cpu_version", 0);
- mp->eeprom.cval =
- of_getintprop_default(dp, "clock_value", 0);
- mp->eeprom.ramsz = of_getintprop_default(dp, "sram_size", 0);
- if (!mp->eeprom.cpuvers)
- mp->eeprom.cpuvers = CPUVERS_2_3;
- if (mp->eeprom.cpuvers < CPUVERS_3_0)
- mp->eeprom.cval = 0;
- if (!mp->eeprom.ramsz)
- mp->eeprom.ramsz = (128 * 1024);
-
- prop = of_get_property(dp, "myrinet-board-id", &len);
- if (prop)
- memcpy(&mp->eeprom.id[0], prop, 6);
- else
- set_boardid_from_idprom(mp, num);
-
- prop = of_get_property(dp, "fpga_version", &len);
- if (prop)
- memcpy(&mp->eeprom.fvers[0], prop, 32);
- else
- memset(&mp->eeprom.fvers[0], 0, 32);
-
- if (mp->eeprom.cpuvers == CPUVERS_4_1) {
- if (mp->eeprom.ramsz == (128 * 1024))
- mp->eeprom.ramsz = (256 * 1024);
- if ((mp->eeprom.cval == 0x40414041) ||
- (mp->eeprom.cval == 0x90449044))
- mp->eeprom.cval = 0x50e450e4;
- }
- }
-#ifdef DEBUG_DETECT
- dump_eeprom(mp);
-#endif
-
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = mp->eeprom.id[i];
-
- determine_reg_space_size(mp);
-
- /* Map in the MyriCOM register/localram set. */
- if (mp->eeprom.cpuvers < CPUVERS_4_0) {
- /* XXX Makes no sense, if control reg is non-existent this
- * XXX driver cannot function at all... maybe pre-4.0 is
- * XXX only a valid version for PCI cards? Ask feldy...
- */
- DET(("Mapping regs for cpuvers < CPUVERS_4_0\n"));
- mp->regs = of_ioremap(&op->resource[0], 0,
- mp->reg_size, "MyriCOM Regs");
- if (!mp->regs) {
- printk("MyriCOM: Cannot map MyriCOM registers.\n");
- goto err;
- }
- mp->lanai = mp->regs + (256 * 1024);
- mp->lregs = mp->lanai + (0x10000 * 2);
- } else {
- DET(("Mapping regs for cpuvers >= CPUVERS_4_0\n"));
- mp->cregs = of_ioremap(&op->resource[0], 0,
- PAGE_SIZE, "MyriCOM Control Regs");
- mp->lregs = of_ioremap(&op->resource[0], (256 * 1024),
- PAGE_SIZE, "MyriCOM LANAI Regs");
- mp->lanai = of_ioremap(&op->resource[0], (512 * 1024),
- mp->eeprom.ramsz, "MyriCOM SRAM");
- }
- DET(("Registers mapped: cregs[%p] lregs[%p] lanai[%p]\n",
- mp->cregs, mp->lregs, mp->lanai));
-
- if (mp->eeprom.cpuvers >= CPUVERS_4_0)
- mp->shmem_base = 0xf000;
- else
- mp->shmem_base = 0x8000;
-
- DET(("Shared memory base is %04x, ", mp->shmem_base));
-
- mp->shmem = (struct myri_shmem __iomem *)
- (mp->lanai + (mp->shmem_base * 2));
- DET(("shmem mapped at %p\n", mp->shmem));
-
- mp->rqack = &mp->shmem->channel.recvqa;
- mp->rq = &mp->shmem->channel.recvq;
- mp->sq = &mp->shmem->channel.sendq;
-
- /* Reset the board. */
- DET(("Resetting LANAI\n"));
- myri_reset_off(mp->lregs, mp->cregs);
- myri_reset_on(mp->cregs);
-
- /* Turn IRQ's off. */
- myri_disable_irq(mp->lregs, mp->cregs);
-
- /* Reset once more. */
- myri_reset_on(mp->cregs);
-
- /* Get the supported DVMA burst sizes from our SBUS. */
- mp->myri_bursts = of_getintprop_default(dp->parent,
- "burst-sizes", 0x00);
- if (!sbus_can_burst64())
- mp->myri_bursts &= ~(DMA_BURST64);
-
- DET(("MYRI bursts %02x\n", mp->myri_bursts));
-
- /* Encode SBUS interrupt level in second control register. */
- i = of_getintprop_default(dp, "interrupts", 0);
- if (i == 0)
- i = 4;
- DET(("prom_getint(interrupts)==%d, irqlvl set to %04x\n",
- i, (1 << i)));
-
- sbus_writel((1 << i), mp->cregs + MYRICTRL_IRQLVL);
-
- mp->dev = dev;
- dev->watchdog_timeo = 5*HZ;
- dev->irq = op->archdata.irqs[0];
- dev->netdev_ops = &myri_ops;
-
- /* Register interrupt handler now. */
- DET(("Requesting MYRIcom IRQ line.\n"));
- if (request_irq(dev->irq, myri_interrupt,
- IRQF_SHARED, "MyriCOM Ethernet", (void *) dev)) {
- printk("MyriCOM: Cannot register interrupt handler.\n");
- goto err;
- }
-
- dev->mtu = MYRINET_MTU;
- dev->header_ops = &myri_header_ops;
-
- dev->hard_header_len = (ETH_HLEN + MYRI_PAD_LEN);
-
- /* Load code onto the LANai. */
- DET(("Loading LANAI firmware\n"));
- if (myri_load_lanai(mp)) {
- printk(KERN_ERR "MyriCOM: Cannot Load LANAI firmware.\n");
- goto err_free_irq;
- }
-
- if (register_netdev(dev)) {
- printk("MyriCOM: Cannot register device.\n");
- goto err_free_irq;
- }
-
- dev_set_drvdata(&op->dev, mp);
-
- num++;
-
- printk("%s: MyriCOM MyriNET Ethernet %pM\n",
- dev->name, dev->dev_addr);
-
- return 0;
-
-err_free_irq:
- free_irq(dev->irq, dev);
-err:
- /* This will also free the co-allocated private data*/
- free_netdev(dev);
- return -ENODEV;
-}
-
-static int __devexit myri_sbus_remove(struct platform_device *op)
-{
- struct myri_eth *mp = dev_get_drvdata(&op->dev);
- struct net_device *net_dev = mp->dev;
-
- unregister_netdev(net_dev);
-
- free_irq(net_dev->irq, net_dev);
-
- if (mp->eeprom.cpuvers < CPUVERS_4_0) {
- of_iounmap(&op->resource[0], mp->regs, mp->reg_size);
- } else {
- of_iounmap(&op->resource[0], mp->cregs, PAGE_SIZE);
- of_iounmap(&op->resource[0], mp->lregs, (256 * 1024));
- of_iounmap(&op->resource[0], mp->lanai, (512 * 1024));
- }
-
- free_netdev(net_dev);
-
- dev_set_drvdata(&op->dev, NULL);
-
- return 0;
-}
-
-static const struct of_device_id myri_sbus_match[] = {
- {
- .name = "MYRICOM,mlanai",
- },
- {
- .name = "myri",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, myri_sbus_match);
-
-static struct platform_driver myri_sbus_driver = {
- .driver = {
- .name = "myri",
- .owner = THIS_MODULE,
- .of_match_table = myri_sbus_match,
- },
- .probe = myri_sbus_probe,
- .remove = __devexit_p(myri_sbus_remove),
-};
-
-static int __init myri_sbus_init(void)
-{
- return platform_driver_register(&myri_sbus_driver);
-}
-
-static void __exit myri_sbus_exit(void)
-{
- platform_driver_unregister(&myri_sbus_driver);
-}
-
-module_init(myri_sbus_init);
-module_exit(myri_sbus_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(FWNAME);
diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h
deleted file mode 100644
index 80a2fa5cf75..00000000000
--- a/drivers/net/myri_sbus.h
+++ /dev/null
@@ -1,311 +0,0 @@
-/* myri_sbus.h: Defines for MyriCOM MyriNET SBUS card driver.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef _MYRI_SBUS_H
-#define _MYRI_SBUS_H
-
-/* LANAI Registers */
-#define LANAI_IPF0 0x00UL /* Context zero state registers.*/
-#define LANAI_CUR0 0x04UL
-#define LANAI_PREV0 0x08UL
-#define LANAI_DATA0 0x0cUL
-#define LANAI_DPF0 0x10UL
-#define LANAI_IPF1 0x14UL /* Context one state registers. */
-#define LANAI_CUR1 0x18UL
-#define LANAI_PREV1 0x1cUL
-#define LANAI_DATA1 0x20UL
-#define LANAI_DPF1 0x24UL
-#define LANAI_ISTAT 0x28UL /* Interrupt status. */
-#define LANAI_EIMASK 0x2cUL /* External IRQ mask. */
-#define LANAI_ITIMER 0x30UL /* IRQ timer. */
-#define LANAI_RTC 0x34UL /* Real Time Clock */
-#define LANAI_CSUM 0x38UL /* Checksum. */
-#define LANAI_DMAXADDR 0x3cUL /* SBUS DMA external address. */
-#define LANAI_DMALADDR 0x40UL /* SBUS DMA local address. */
-#define LANAI_DMACTR 0x44UL /* SBUS DMA counter. */
-#define LANAI_RXDMAPTR 0x48UL /* Receive DMA pointer. */
-#define LANAI_RXDMALIM 0x4cUL /* Receive DMA limit. */
-#define LANAI_TXDMAPTR 0x50UL /* Transmit DMA pointer. */
-#define LANAI_TXDMALIM 0x54UL /* Transmit DMA limit. */
-#define LANAI_TXDMALIMT 0x58UL /* Transmit DMA limit w/tail. */
- /* 0x5cUL, reserved */
-#define LANAI_RBYTE 0x60UL /* Receive byte. */
- /* 0x64-->0x6c, reserved */
-#define LANAI_RHALF 0x70UL /* Receive half-word. */
- /* 0x72UL, reserved */
-#define LANAI_RWORD 0x74UL /* Receive word. */
-#define LANAI_SALIGN 0x78UL /* Send align. */
-#define LANAI_SBYTE 0x7cUL /* SingleSend send-byte. */
-#define LANAI_SHALF 0x80UL /* SingleSend send-halfword. */
-#define LANAI_SWORD 0x84UL /* SingleSend send-word. */
-#define LANAI_SSENDT 0x88UL /* SingleSend special. */
-#define LANAI_DMADIR 0x8cUL /* DMA direction. */
-#define LANAI_DMASTAT 0x90UL /* DMA status. */
-#define LANAI_TIMEO 0x94UL /* Timeout register. */
-#define LANAI_MYRINET 0x98UL /* XXX MAGIC myricom thing */
-#define LANAI_HWDEBUG 0x9cUL /* Hardware debugging reg. */
-#define LANAI_LEDS 0xa0UL /* LED control. */
-#define LANAI_VERS 0xa4UL /* Version register. */
-#define LANAI_LINKON 0xa8UL /* Link activation reg. */
- /* 0xac-->0x104, reserved */
-#define LANAI_CVAL 0x108UL /* Clock value register. */
-#define LANAI_REG_SIZE 0x10cUL
-
-/* Interrupt status bits. */
-#define ISTAT_DEBUG 0x80000000
-#define ISTAT_HOST 0x40000000
-#define ISTAT_LAN7 0x00800000
-#define ISTAT_LAN6 0x00400000
-#define ISTAT_LAN5 0x00200000
-#define ISTAT_LAN4 0x00100000
-#define ISTAT_LAN3 0x00080000
-#define ISTAT_LAN2 0x00040000
-#define ISTAT_LAN1 0x00020000
-#define ISTAT_LAN0 0x00010000
-#define ISTAT_WRDY 0x00008000
-#define ISTAT_HRDY 0x00004000
-#define ISTAT_SRDY 0x00002000
-#define ISTAT_LINK 0x00001000
-#define ISTAT_FRES 0x00000800
-#define ISTAT_NRES 0x00000800
-#define ISTAT_WAKE 0x00000400
-#define ISTAT_OB2 0x00000200
-#define ISTAT_OB1 0x00000100
-#define ISTAT_TAIL 0x00000080
-#define ISTAT_WDOG 0x00000040
-#define ISTAT_TIME 0x00000020
-#define ISTAT_DMA 0x00000010
-#define ISTAT_SEND 0x00000008
-#define ISTAT_BUF 0x00000004
-#define ISTAT_RECV 0x00000002
-#define ISTAT_BRDY 0x00000001
-
-/* MYRI Registers */
-#define MYRI_RESETOFF 0x00UL
-#define MYRI_RESETON 0x04UL
-#define MYRI_IRQOFF 0x08UL
-#define MYRI_IRQON 0x0cUL
-#define MYRI_WAKEUPOFF 0x10UL
-#define MYRI_WAKEUPON 0x14UL
-#define MYRI_IRQREAD 0x18UL
- /* 0x1c-->0x3ffc, reserved */
-#define MYRI_LOCALMEM 0x4000UL
-#define MYRI_REG_SIZE 0x25000UL
-
-/* Shared memory interrupt mask. */
-#define SHMEM_IMASK_RX 0x00000002
-#define SHMEM_IMASK_TX 0x00000001
-
-/* Just to make things readable. */
-#define KERNEL_CHANNEL 0
-
-/* The size of this must be >= 129 bytes. */
-struct myri_eeprom {
- unsigned int cval;
- unsigned short cpuvers;
- unsigned char id[6];
- unsigned int ramsz;
- unsigned char fvers[32];
- unsigned char mvers[16];
- unsigned short dlval;
- unsigned short brd_type;
- unsigned short bus_type;
- unsigned short prod_code;
- unsigned int serial_num;
- unsigned short _reserved[24];
- unsigned int _unused[2];
-};
-
-/* EEPROM bus types, only SBUS is valid in this driver. */
-#define BUS_TYPE_SBUS 1
-
-/* EEPROM CPU revisions. */
-#define CPUVERS_2_3 0x0203
-#define CPUVERS_3_0 0x0300
-#define CPUVERS_3_1 0x0301
-#define CPUVERS_3_2 0x0302
-#define CPUVERS_4_0 0x0400
-#define CPUVERS_4_1 0x0401
-#define CPUVERS_4_2 0x0402
-#define CPUVERS_5_0 0x0500
-
-/* MYRI Control Registers */
-#define MYRICTRL_CTRL 0x00UL
-#define MYRICTRL_IRQLVL 0x02UL
-#define MYRICTRL_REG_SIZE 0x04UL
-
-/* Global control register defines. */
-#define CONTROL_ROFF 0x8000 /* Reset OFF. */
-#define CONTROL_RON 0x4000 /* Reset ON. */
-#define CONTROL_EIRQ 0x2000 /* Enable IRQ's. */
-#define CONTROL_DIRQ 0x1000 /* Disable IRQ's. */
-#define CONTROL_WON 0x0800 /* Wake-up ON. */
-
-#define MYRI_SCATTER_ENTRIES 8
-#define MYRI_GATHER_ENTRIES 16
-
-struct myri_sglist {
- u32 addr;
- u32 len;
-};
-
-struct myri_rxd {
- struct myri_sglist myri_scatters[MYRI_SCATTER_ENTRIES]; /* DMA scatter list.*/
- u32 csum; /* HW computed checksum. */
- u32 ctx;
- u32 num_sg; /* Total scatter entries. */
-};
-
-struct myri_txd {
- struct myri_sglist myri_gathers[MYRI_GATHER_ENTRIES]; /* DMA scatter list. */
- u32 num_sg; /* Total scatter entries. */
- u16 addr[4]; /* XXX address */
- u32 chan;
- u32 len; /* Total length of packet. */
- u32 csum_off; /* Where data to csum is. */
- u32 csum_field; /* Where csum goes in pkt. */
-};
-
-#define MYRINET_MTU 8432
-#define RX_ALLOC_SIZE 8448
-#define MYRI_PAD_LEN 2
-#define RX_COPY_THRESHOLD 256
-
-/* These numbers are cast in stone, new firmware is needed if
- * you want to change them.
- */
-#define TX_RING_MAXSIZE 16
-#define RX_RING_MAXSIZE 16
-
-#define TX_RING_SIZE 16
-#define RX_RING_SIZE 16
-
-/* GRRR... */
-static __inline__ int NEXT_RX(int num)
-{
- /* XXX >=??? */
- if(++num > RX_RING_SIZE)
- num = 0;
- return num;
-}
-
-static __inline__ int PREV_RX(int num)
-{
- if(--num < 0)
- num = RX_RING_SIZE;
- return num;
-}
-
-#define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
-#define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
-
-#define TX_BUFFS_AVAIL(head, tail) \
- ((head) <= (tail) ? \
- (head) + (TX_RING_SIZE - 1) - (tail) : \
- (head) - (tail) - 1)
-
-struct sendq {
- u32 tail;
- u32 head;
- u32 hdebug;
- u32 mdebug;
- struct myri_txd myri_txd[TX_RING_MAXSIZE];
-};
-
-struct recvq {
- u32 head;
- u32 tail;
- u32 hdebug;
- u32 mdebug;
- struct myri_rxd myri_rxd[RX_RING_MAXSIZE + 1];
-};
-
-#define MYRI_MLIST_SIZE 8
-
-struct mclist {
- u32 maxlen;
- u32 len;
- u32 cache;
- struct pair {
- u8 addr[8];
- u32 val;
- } mc_pairs[MYRI_MLIST_SIZE];
- u8 bcast_addr[8];
-};
-
-struct myri_channel {
- u32 state; /* State of the channel. */
- u32 busy; /* Channel is busy. */
- struct sendq sendq; /* Device tx queue. */
- struct recvq recvq; /* Device rx queue. */
- struct recvq recvqa; /* Device rx queue acked. */
- u32 rbytes; /* Receive bytes. */
- u32 sbytes; /* Send bytes. */
- u32 rmsgs; /* Receive messages. */
- u32 smsgs; /* Send messages. */
- struct mclist mclist; /* Device multicast list. */
-};
-
-/* Values for per-channel state. */
-#define STATE_WFH 0 /* Waiting for HOST. */
-#define STATE_WFN 1 /* Waiting for NET. */
-#define STATE_READY 2 /* Ready. */
-
-struct myri_shmem {
- u8 addr[8]; /* Board's address. */
- u32 nchan; /* Number of channels. */
- u32 burst; /* SBUS dma burst enable. */
- u32 shakedown; /* DarkkkkStarrr Crashesss... */
- u32 send; /* Send wanted. */
- u32 imask; /* Interrupt enable mask. */
- u32 mlevel; /* Map level. */
- u32 debug[4]; /* Misc. debug areas. */
- struct myri_channel channel; /* Only one channel on a host. */
-};
-
-struct myri_eth {
- /* These are frequently accessed, keep together
- * to obtain good cache hit rates.
- */
- spinlock_t irq_lock;
- struct myri_shmem __iomem *shmem; /* Shared data structures. */
- void __iomem *cregs; /* Control register space. */
- struct recvq __iomem *rqack; /* Where we ack rx's. */
- struct recvq __iomem *rq; /* Where we put buffers. */
- struct sendq __iomem *sq; /* Where we stuff tx's. */
- struct net_device *dev; /* Linux/NET dev struct. */
- int tx_old; /* To speed up tx cleaning. */
- void __iomem *lregs; /* Quick ptr to LANAI regs. */
- struct sk_buff *rx_skbs[RX_RING_SIZE+1];/* RX skb's */
- struct sk_buff *tx_skbs[TX_RING_SIZE]; /* TX skb's */
-
- /* These are less frequently accessed. */
- void __iomem *regs; /* MyriCOM register space. */
- void __iomem *lanai; /* View 2 of register space. */
- unsigned int myri_bursts; /* SBUS bursts. */
- struct myri_eeprom eeprom; /* Local copy of EEPROM. */
- unsigned int reg_size; /* Size of register space. */
- unsigned int shmem_base; /* Offset to shared ram. */
- struct platform_device *myri_op; /* Our OF device struct. */
-};
-
-/* We use this to acquire receive skb's that we can DMA directly into. */
-#define ALIGNED_RX_SKB_ADDR(addr) \
- ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
-static inline struct sk_buff *myri_alloc_skb(unsigned int length, gfp_t gfp_flags)
-{
- struct sk_buff *skb;
-
- skb = alloc_skb(length + 64, gfp_flags);
- if(skb) {
- int offset = ALIGNED_RX_SKB_ADDR(skb->data);
-
- if(offset)
- skb_reserve(skb, offset);
- }
- return skb;
-}
-
-#endif /* !(_MYRI_SBUS_H) */
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index b78be088c4a..2962cc695ce 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -140,7 +140,7 @@ MODULE_LICENSE("GPL");
module_param(mtu, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
-module_param(dspcfg_workaround, int, 1);
+module_param(dspcfg_workaround, int, 0);
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
@@ -1382,7 +1382,7 @@ static int find_mii(struct net_device *dev)
/* WCSR bits [0:4] [9:10] */
#define WCSR_RESET_SAVE 0x61f
/* RFCR bits [20] [22] [27:31] */
-#define RFCR_RESET_SAVE 0xf8500000;
+#define RFCR_RESET_SAVE 0xf8500000
static void natsemi_reset(struct net_device *dev)
{
@@ -2028,8 +2028,8 @@ static void drain_rx(struct net_device *dev)
np->rx_ring[i].cmd_status = 0;
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->rx_dma[i], buflen,
+ pci_unmap_single(np->pci_dev, np->rx_dma[i],
+ buflen + NATSEMI_PADDING,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(np->rx_skbuff[i]);
}
@@ -2360,7 +2360,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
PCI_DMA_FROMDEVICE);
} else {
pci_unmap_single(np->pci_dev, np->rx_dma[entry],
- buflen, PCI_DMA_FROMDEVICE);
+ buflen + NATSEMI_PADDING,
+ PCI_DMA_FROMDEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
}
@@ -2919,7 +2920,7 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
/*
* If we're ignoring the PHY then autoneg and the internal
- * transciever are really not going to work so don't let the
+ * transceiver are really not going to work so don't let the
* user select them.
*/
if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE ||
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 2e4b42175f3..2dfee892d20 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -18,6 +18,7 @@
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 77220687b92..f744d291218 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 75
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.75"
+#define _NETXEN_NIC_LINUX_SUBVERSION 76
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.76"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -1302,6 +1302,7 @@ int netxen_nic_wol_supported(struct netxen_adapter *adapter);
int netxen_init_dummy_dma(struct netxen_adapter *adapter);
void netxen_free_dummy_dma(struct netxen_adapter *adapter);
+int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter);
int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
int netxen_load_firmware(struct netxen_adapter *adapter);
int netxen_need_fw_reset(struct netxen_adapter *adapter);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index f16966afa64..a925392abd6 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -163,7 +163,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
rq_size, &hostrq_phys_addr);
if (addr == NULL)
return -ENOMEM;
- prq = (nx_hostrq_rx_ctx_t *)addr;
+ prq = addr;
addr = pci_alloc_consistent(adapter->pdev,
rsp_size, &cardrsp_phys_addr);
@@ -171,7 +171,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
err = -ENOMEM;
goto out_free_rq;
}
- prsp = (nx_cardrsp_rx_ctx_t *)addr;
+ prsp = addr;
prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
@@ -318,10 +318,10 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
}
memset(rq_addr, 0, rq_size);
- prq = (nx_hostrq_tx_ctx_t *)rq_addr;
+ prq = rq_addr;
memset(rsp_addr, 0, rsp_size);
- prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr;
+ prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
@@ -629,7 +629,7 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
}
memset(addr, 0, sizeof(struct netxen_ring_ctx));
- recv_ctx->hwctx = (struct netxen_ring_ctx *)addr;
+ recv_ctx->hwctx = addr;
recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
recv_ctx->hwctx->cmd_consumer_offset =
cpu_to_le64(recv_ctx->phys_addr +
@@ -648,7 +648,7 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
goto err_out_free;
}
- tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
+ tx_ring->desc_head = addr;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
@@ -662,7 +662,7 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
err = -ENOMEM;
goto err_out_free;
}
- rds_ring->desc_head = (struct rcv_desc *)addr;
+ rds_ring->desc_head = addr;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
rds_ring->crb_rcv_producer =
@@ -683,7 +683,7 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
err = -ENOMEM;
goto err_out_free;
}
- sds_ring->desc_head = (struct status_desc *)addr;
+ sds_ring->desc_head = addr;
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
sds_ring->crb_sts_consumer =
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 5cef718fe35..3f89e57cae5 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -809,6 +809,9 @@ int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable)
u64 word;
int rv = 0;
+ if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+ return 0;
+
memset(&req, 0, sizeof(nx_nic_req_t));
req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
@@ -959,6 +962,9 @@ int netxen_send_lro_cleanup(struct netxen_adapter *adapter)
u64 word;
int rv;
+ if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+ return 0;
+
memset(&req, 0, sizeof(nx_nic_req_t));
req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 7f999671c7b..e8993a76a08 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -964,6 +964,35 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
return 0;
}
+#define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505)
+
+int
+netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter)
+{
+ u32 flash_fw_ver, min_fw_ver;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ if (netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
+ dev_err(&adapter->pdev->dev, "Unable to read flash fw"
+ "version\n");
+ return -EIO;
+ }
+
+ flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
+ min_fw_ver = NETXEN_MIN_P3_FW_SUPP;
+ if (flash_fw_ver >= min_fw_ver)
+ return 0;
+
+ dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported"
+ "[4.0.505]. Please update firmware on flash\n",
+ _major(flash_fw_ver), _minor(flash_fw_ver),
+ _build(flash_fw_ver));
+ return -EINVAL;
+}
+
static char *fw_name[] = {
NX_P2_MN_ROMIMAGE_NAME,
NX_P3_CT_ROMIMAGE_NAME,
@@ -1071,10 +1100,12 @@ static int
netxen_validate_firmware(struct netxen_adapter *adapter)
{
__le32 val;
- u32 ver, min_ver, bios;
+ __le32 flash_fw_ver;
+ u32 file_fw_ver, min_ver, bios;
struct pci_dev *pdev = adapter->pdev;
const struct firmware *fw = adapter->fw;
u8 fw_type = adapter->fw_type;
+ u32 crbinit_fix_fw;
if (fw_type == NX_UNIFIED_ROMIMAGE) {
if (netxen_nic_validate_unified_romimage(adapter))
@@ -1091,16 +1122,18 @@ netxen_validate_firmware(struct netxen_adapter *adapter)
val = nx_get_fw_version(adapter);
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- min_ver = NETXEN_VERSION_CODE(4, 0, 216);
+ min_ver = NETXEN_MIN_P3_FW_SUPP;
else
min_ver = NETXEN_VERSION_CODE(3, 4, 216);
- ver = NETXEN_DECODE_VERSION(val);
+ file_fw_ver = NETXEN_DECODE_VERSION(val);
- if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
+ if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) ||
+ (file_fw_ver < min_ver)) {
dev_err(&pdev->dev,
"%s: firmware version %d.%d.%d unsupported\n",
- fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
+ fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver),
+ _build(file_fw_ver));
return -EINVAL;
}
@@ -1112,17 +1145,34 @@ netxen_validate_firmware(struct netxen_adapter *adapter)
return -EINVAL;
}
- /* check if flashed firmware is newer */
if (netxen_rom_fast_read(adapter,
- NX_FW_VERSION_OFFSET, (int *)&val))
+ NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
+ dev_err(&pdev->dev, "Unable to read flash fw version\n");
return -EIO;
- val = NETXEN_DECODE_VERSION(val);
- if (val > ver) {
- dev_info(&pdev->dev, "%s: firmware is older than flash\n",
- fw_name[fw_type]);
+ }
+ flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
+
+ /* New fw from file is not allowed, if fw on flash is < 4.0.554 */
+ crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554);
+ if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw &&
+ NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ dev_err(&pdev->dev, "Incompatibility detected between driver "
+ "and firmware version on flash. This configuration "
+ "is not recommended. Please update the firmware on "
+ "flash immediately\n");
return -EINVAL;
}
+ /* check if flashed firmware is newer only for no-mn and P2 case*/
+ if (!netxen_p3_has_mn(adapter) ||
+ NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if (flash_fw_ver > file_fw_ver) {
+ dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+ }
+
NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
return 0;
}
@@ -1279,7 +1329,7 @@ void netxen_free_dummy_dma(struct netxen_adapter *adapter)
if (--i == 0)
break;
- };
+ }
}
if (i) {
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index c0788a31ff0..f574edff7fc 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -92,7 +92,8 @@ static irqreturn_t netxen_msi_intr(int irq, void *data);
static irqreturn_t netxen_msix_intr(int irq, void *data);
static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
-static struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
+static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
static int netxen_nic_set_mac(struct net_device *netdev, void *p);
/* PCI Device ID Table */
@@ -520,7 +521,7 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_open = netxen_nic_open,
.ndo_stop = netxen_nic_close,
.ndo_start_xmit = netxen_nic_xmit_frame,
- .ndo_get_stats = netxen_nic_get_stats,
+ .ndo_get_stats64 = netxen_nic_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = netxen_set_multicast_list,
.ndo_set_mac_address = netxen_nic_set_mac,
@@ -1387,6 +1388,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
+ err = netxen_check_flash_fw_compatibility(adapter);
+ if (err)
+ goto err_out_iounmap;
+
if (adapter->portnum == 0) {
val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
if (val != 0xffffffff && val != 0) {
@@ -2110,10 +2115,10 @@ request_reset:
clear_bit(__NX_RESETTING, &adapter->state);
}
-static struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
+static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct net_device_stats *stats = &netdev->stats;
stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
stats->tx_packets = adapter->stats.xmitfinished;
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index cc25bff0bd3..ed47585a686 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
@@ -6248,9 +6249,10 @@ static void niu_sync_mac_stats(struct niu *np)
niu_sync_bmac_stats(np);
}
-static void niu_get_rx_stats(struct niu *np)
+static void niu_get_rx_stats(struct niu *np,
+ struct rtnl_link_stats64 *stats)
{
- unsigned long pkts, dropped, errors, bytes;
+ u64 pkts, dropped, errors, bytes;
struct rx_ring_info *rx_rings;
int i;
@@ -6272,15 +6274,16 @@ static void niu_get_rx_stats(struct niu *np)
}
no_rings:
- np->dev->stats.rx_packets = pkts;
- np->dev->stats.rx_bytes = bytes;
- np->dev->stats.rx_dropped = dropped;
- np->dev->stats.rx_errors = errors;
+ stats->rx_packets = pkts;
+ stats->rx_bytes = bytes;
+ stats->rx_dropped = dropped;
+ stats->rx_errors = errors;
}
-static void niu_get_tx_stats(struct niu *np)
+static void niu_get_tx_stats(struct niu *np,
+ struct rtnl_link_stats64 *stats)
{
- unsigned long pkts, errors, bytes;
+ u64 pkts, errors, bytes;
struct tx_ring_info *tx_rings;
int i;
@@ -6299,20 +6302,22 @@ static void niu_get_tx_stats(struct niu *np)
}
no_rings:
- np->dev->stats.tx_packets = pkts;
- np->dev->stats.tx_bytes = bytes;
- np->dev->stats.tx_errors = errors;
+ stats->tx_packets = pkts;
+ stats->tx_bytes = bytes;
+ stats->tx_errors = errors;
}
-static struct net_device_stats *niu_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct niu *np = netdev_priv(dev);
if (netif_running(dev)) {
- niu_get_rx_stats(np);
- niu_get_tx_stats(np);
+ niu_get_rx_stats(np, stats);
+ niu_get_tx_stats(np, stats);
}
- return &dev->stats;
+
+ return stats;
}
static void niu_load_hash_xmac(struct niu *np, u16 *hash)
@@ -9196,7 +9201,7 @@ static int __devinit niu_ldg_init(struct niu *np)
first_chan = 0;
for (i = 0; i < port; i++)
- first_chan += parent->rxchan_per_port[port];
+ first_chan += parent->rxchan_per_port[i];
num_chan = parent->rxchan_per_port[port];
for (i = first_chan; i < (first_chan + num_chan); i++) {
@@ -9212,7 +9217,7 @@ static int __devinit niu_ldg_init(struct niu *np)
first_chan = 0;
for (i = 0; i < port; i++)
- first_chan += parent->txchan_per_port[port];
+ first_chan += parent->txchan_per_port[i];
num_chan = parent->txchan_per_port[port];
for (i = first_chan; i < (first_chan + num_chan); i++) {
err = niu_ldg_assign_ldn(np, parent,
@@ -9710,7 +9715,7 @@ static const struct net_device_ops niu_netdev_ops = {
.ndo_open = niu_open,
.ndo_stop = niu_close,
.ndo_start_xmit = niu_start_xmit,
- .ndo_get_stats = niu_get_stats,
+ .ndo_get_stats64 = niu_get_stats,
.ndo_set_multicast_list = niu_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = niu_set_mac_addr,
@@ -9792,7 +9797,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
goto err_out_disable_pdev;
}
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(pdev);
if (pos <= 0) {
dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
goto err_out_free_res;
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 3e4040f2f3c..e736aec588f 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -106,6 +106,7 @@
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/ip.h> /* for iph */
#include <linux/in.h> /* for IPPROTO_... */
#include <linux/compiler.h>
@@ -429,10 +430,6 @@ struct ns83820 {
struct pci_dev *pci_dev;
struct net_device *ndev;
-#ifdef NS83820_VLAN_ACCEL_SUPPORT
- struct vlan_group *vlgrp;
-#endif
-
struct rx_info rx_info;
struct tasklet_struct rx_tasklet;
@@ -493,22 +490,6 @@ static inline void kick_rx(struct net_device *ndev)
#define start_tx_okay(dev) \
(((NR_TX_DESC-2 + dev->tx_done_idx - dev->tx_free_idx) % NR_TX_DESC) > MIN_TX_DESC_FREE)
-
-#ifdef NS83820_VLAN_ACCEL_SUPPORT
-static void ns83820_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
-{
- struct ns83820 *dev = PRIV(ndev);
-
- spin_lock_irq(&dev->misc_lock);
- spin_lock(&dev->tx_lock);
-
- dev->vlgrp = grp;
-
- spin_unlock(&dev->tx_lock);
- spin_unlock_irq(&dev->misc_lock);
-}
-#endif
-
/* Packet Receiver
*
* The hardware supports linked lists of receive descriptors for
@@ -929,14 +910,12 @@ static void rx_irq(struct net_device *ndev)
#ifdef NS83820_VLAN_ACCEL_SUPPORT
if(extsts & EXTSTS_VPKT) {
unsigned short tag;
+
tag = ntohs(extsts & EXTSTS_VTG_MASK);
- rx_rc = vlan_hwaccel_rx(skb,dev->vlgrp,tag);
- } else {
- rx_rc = netif_rx(skb);
+ __vlan_hwaccel_put_tag(skb, tag);
}
-#else
- rx_rc = netif_rx(skb);
#endif
+ rx_rc = netif_rx(skb);
if (NET_RX_DROP == rx_rc) {
netdev_mangle_me_harder_failed:
ndev->stats.rx_dropped++;
@@ -1960,11 +1939,8 @@ static const struct net_device_ops netdev_ops = {
.ndo_change_mtu = ns83820_change_mtu,
.ndo_set_multicast_list = ns83820_set_multicast,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_tx_timeout = ns83820_tx_timeout,
-#ifdef NS83820_VLAN_ACCEL_SUPPORT
- .ndo_vlan_rx_register = ns83820_vlan_rx_register,
-#endif
};
static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index b264f0f4560..429e08c84e9 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -9,6 +9,7 @@
#include <linux/capability.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 1cd9394c335..cffbc0373fa 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -809,7 +809,7 @@ static int smc91c92_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
char *name;
- int i, j, rev;
+ int i, rev, j = 0;
unsigned int ioaddr;
u_long mir;
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index b48aba9e422..80b6f36a807 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -82,7 +82,7 @@ static int cards_found;
/*
* VLB I/O addresses
*/
-static unsigned int pcnet32_portlist[] __initdata =
+static unsigned int pcnet32_portlist[] =
{ 0x300, 0x320, 0x340, 0x360, 0 };
static int pcnet32_debug;
@@ -2570,7 +2570,6 @@ static void pcnet32_load_multicast(struct net_device *dev)
volatile __le16 *mcast_table = (__le16 *)ib->filter;
struct netdev_hw_addr *ha;
unsigned long ioaddr = dev->base_addr;
- char *addrs;
int i;
u32 crc;
@@ -2590,13 +2589,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
/* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- /* multicast address? */
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc_le(6, ha->addr);
crc = crc >> 26;
mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
}
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2cd8dc5847b..cb6e0b486b1 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -34,8 +34,7 @@
#define PAGESEL 0x13
#define LAYER4 0x02
#define LAYER2 0x01
-#define MAX_RXTS 4
-#define MAX_TXTS 4
+#define MAX_RXTS 64
#define N_EXT_TS 1
#define PSF_PTPVER 2
#define PSF_EVNT 0x4000
@@ -218,7 +217,7 @@ static void phy2rxts(struct phy_rxts *p, struct rxts *rxts)
rxts->seqid = p->seqid;
rxts->msgtype = (p->msgtype >> 12) & 0xf;
rxts->hash = p->msgtype & 0x0fff;
- rxts->tmo = jiffies + HZ;
+ rxts->tmo = jiffies + 2;
}
static u64 phy2txts(struct phy_txts *p)
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 9a09e24c30b..d4cbc2922b2 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -109,11 +109,7 @@ static int ip1001_config_init(struct phy_device *phydev)
value = phy_read(phydev, 16);
value |= 0x3;
- err = phy_write(phydev, 16, value);
- if (err < 0)
- return err;
-
- return err;
+ return phy_write(phydev, 16, value);
}
static int ip175c_read_status(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a4759576075..3cbda0851f8 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -33,7 +33,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index ca4df7f4cf2..a9e9ca8a86e 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -152,7 +152,7 @@ static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len);
static int plip_hard_header_cache(const struct neighbour *neigh,
- struct hh_cache *hh);
+ struct hh_cache *hh, __be16 type);
static int plip_open(struct net_device *dev);
static int plip_close(struct net_device *dev);
static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
@@ -1026,11 +1026,11 @@ plip_hard_header(struct sk_buff *skb, struct net_device *dev,
}
static int plip_hard_header_cache(const struct neighbour *neigh,
- struct hh_cache *hh)
+ struct hh_cache *hh, __be16 type)
{
int ret;
- ret = eth_header_cache(neigh, hh);
+ ret = eth_header_cache(neigh, hh, type);
if (ret == 0) {
struct ethhdr *eth;
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index c554a397e55..c6ba6438082 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -30,6 +30,7 @@
#include <linux/ppp_channel.h>
#include <linux/spinlock.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 31e9407a073..1dbdf82a6df 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -305,7 +305,7 @@ static void z_decomp_free(void *arg)
if (state) {
zlib_inflateEnd(&state->strm);
- kfree(state->strm.workspace);
+ vfree(state->strm.workspace);
kfree(state);
}
}
@@ -345,8 +345,7 @@ static void *z_decomp_alloc(unsigned char *options, int opt_len)
state->w_size = w_size;
state->strm.next_out = NULL;
- state->strm.workspace = kmalloc(zlib_inflate_workspacesize(),
- GFP_KERNEL|__GFP_REPEAT);
+ state->strm.workspace = vmalloc(zlib_inflate_workspacesize());
if (state->strm.workspace == NULL)
goto out_free;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 4609bc0e2f5..10e5d985afa 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -48,7 +48,7 @@
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <net/slhc_vj.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/nsproxy.h>
#include <net/net_namespace.h>
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 2573f525f11..736a39ee05b 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -44,6 +44,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 718879b35b7..bc9a4bb3198 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -348,8 +348,9 @@ static int pppoe_device_event(struct notifier_block *this,
/* Only look at sockets that are using this specific device. */
switch (event) {
+ case NETDEV_CHANGEADDR:
case NETDEV_CHANGEMTU:
- /* A change in mtu is a bad thing, requiring
+ /* A change in mtu or address is a bad thing, requiring
* LCP re-negotiation.
*/
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index 1286fe212dc..eae542a7e98 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -30,7 +30,6 @@
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
-#include <linux/version.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index b1f251da153..d82a82d9870 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -28,6 +28,7 @@
#undef DEBUG
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -837,9 +838,11 @@ static int gelic_card_kick_txdma(struct gelic_card *card,
card->tx_dma_progress = 1;
status = lv1_net_start_tx_dma(bus_id(card), dev_id(card),
descr->bus_addr, 0);
- if (status)
+ if (status) {
+ card->tx_dma_progress = 0;
dev_info(ctodev(card), "lv1_net_start_txdma failed," \
"status=%d\n", status);
+ }
}
return status;
}
@@ -875,7 +878,7 @@ int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
result = gelic_descr_prepare_tx(card, descr, skb);
if (result) {
/*
- * DMA map failed. As chanses are that failure
+ * DMA map failed. As chances are that failure
* would continue, just release skb and return
*/
netdev->stats.tx_dropped++;
@@ -896,12 +899,16 @@ int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
if (gelic_card_kick_txdma(card, descr)) {
/*
* kick failed.
- * release descriptors which were just prepared
+ * release descriptor which was just prepared
*/
netdev->stats.tx_dropped++;
+ /* don't trigger BUG_ON() in gelic_descr_release_tx */
+ descr->data_status = cpu_to_be32(GELIC_DESCR_TX_TAIL);
gelic_descr_release_tx(card, descr);
- gelic_descr_release_tx(card, descr->next);
- card->tx_chain.tail = descr->next->next;
+ /* reset head */
+ card->tx_chain.head = descr;
+ /* reset hw termination */
+ descr->prev->next_descr_addr = 0;
dev_info(ctodev(card), "%s: kick failure\n", __func__);
}
@@ -986,10 +993,6 @@ static int gelic_card_decode_one_descr(struct gelic_card *card)
int dmac_chain_ended;
status = gelic_descr_get_status(descr);
- /* is this descriptor terminated with next_descr == NULL? */
- dmac_chain_ended =
- be32_to_cpu(descr->dmac_cmd_status) &
- GELIC_DESCR_RX_DMA_CHAIN_END;
if (status == GELIC_DESCR_DMA_CARDOWNED)
return 0;
@@ -1009,7 +1012,7 @@ static int gelic_card_decode_one_descr(struct gelic_card *card)
netdev = card->netdev[i];
break;
}
- };
+ }
if (GELIC_PORT_MAX <= i) {
pr_info("%s: unknown packet vid=%x\n", __func__, vid);
goto refill;
@@ -1040,7 +1043,7 @@ static int gelic_card_decode_one_descr(struct gelic_card *card)
goto refill;
}
/*
- * descriptoers any other than FRAME_END here should
+ * descriptors any other than FRAME_END here should
* be treated as error.
*/
if (status != GELIC_DESCR_DMA_FRAME_END) {
@@ -1052,6 +1055,11 @@ static int gelic_card_decode_one_descr(struct gelic_card *card)
/* ok, we've got a packet in descr */
gelic_net_pass_skb_up(descr, card, netdev);
refill:
+
+ /* is the current descriptor terminated with next_descr == NULL? */
+ dmac_chain_ended =
+ be32_to_cpu(descr->dmac_cmd_status) &
+ GELIC_DESCR_RX_DMA_CHAIN_END;
/*
* So that always DMAC can see the end
* of the descriptor chain to avoid
@@ -1080,10 +1088,9 @@ refill:
* If dmac chain was met, DMAC stopped.
* thus re-enable it
*/
- if (dmac_chain_ended) {
- card->rx_dma_restart_required = 1;
- dev_dbg(ctodev(card), "reenable rx dma scheduled\n");
- }
+
+ if (dmac_chain_ended)
+ gelic_card_enable_rxdmac(card);
return 1;
}
@@ -1149,11 +1156,6 @@ static irqreturn_t gelic_card_interrupt(int irq, void *ptr)
status &= card->irq_mask;
- if (card->rx_dma_restart_required) {
- card->rx_dma_restart_required = 0;
- gelic_card_enable_rxdmac(card);
- }
-
if (status & GELIC_CARD_RXINT) {
gelic_card_rx_irq_off(card);
napi_schedule(&card->napi);
@@ -1199,7 +1201,7 @@ void gelic_net_poll_controller(struct net_device *netdev)
#endif /* CONFIG_NET_POLL_CONTROLLER */
/**
- * gelic_net_open - called upon ifonfig up
+ * gelic_net_open - called upon ifconfig up
* @netdev: interface device structure
*
* returns 0 on success, <0 on failure
diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h
index d9a55b93898..d3fadfbc3bc 100644
--- a/drivers/net/ps3_gelic_net.h
+++ b/drivers/net/ps3_gelic_net.h
@@ -289,7 +289,6 @@ struct gelic_card {
struct gelic_descr_chain tx_chain;
struct gelic_descr_chain rx_chain;
- int rx_dma_restart_required;
/*
* tx_lock guards tx descriptor list and
* tx_dma_progress.
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 5f597ca592b..1a3033d8e7e 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -502,7 +502,7 @@ static int add_del_hash_entry(struct pxa168_eth_private *pep,
* Pick the appropriate table, start scanning for free/reusable
* entries at the index obtained by hashing the specified MAC address
*/
- start = (struct addr_table_entry *)(pep->htpr);
+ start = pep->htpr;
entry = start + hash_function(mac_addr);
for (i = 0; i < HOP_NUMBER; i++) {
if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
@@ -1267,6 +1267,9 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
pep->tx_skb[tx_index] = skb;
desc->byte_cnt = length;
desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
+
+ skb_tx_timestamp(skb);
+
wmb();
desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
@@ -1502,7 +1505,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
err = -ENODEV;
goto err_netdev;
}
- pep->base = ioremap(res->start, res->end - res->start + 1);
+ pep->base = ioremap(res->start, resource_size(res));
if (pep->base == NULL) {
err = -ENOMEM;
goto err_netdev;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 771bb614ccc..2f6914025ef 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2873,7 +2873,7 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
PAGE_SIZE, &qdev->shadow_reg_phy_addr);
if (qdev->shadow_reg_virt_addr != NULL) {
- qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
+ qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
qdev->req_consumer_index_phy_addr_high =
MS_64BITS(qdev->shadow_reg_phy_addr);
qdev->req_consumer_index_phy_addr_low =
@@ -3114,8 +3114,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
qdev->small_buf_release_cnt = 8;
qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
qdev->lrg_buf_release_cnt = 8;
- qdev->lrg_buf_next_free =
- (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
+ qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
qdev->small_buf_index = 0;
qdev->lrg_buf_index = 0;
qdev->lrg_buf_free_count = 0;
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 480ef5cb6ef..baf646d98fa 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 18
-#define QLCNIC_LINUX_VERSIONID "5.0.18"
+#define _QLCNIC_LINUX_SUBVERSION 21
+#define QLCNIC_LINUX_VERSIONID "5.0.21"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -429,6 +429,7 @@ struct qlcnic_dump_template_hdr {
struct qlcnic_fw_dump {
u8 clr; /* flag to indicate if dump is cleared */
+ u8 enable; /* enable/disable dump */
u32 size; /* total size of the dump */
void *data; /* dump data area */
struct qlcnic_dump_template_hdr *tmpl_hdr;
@@ -450,6 +451,7 @@ struct qlcnic_hardware_context {
u8 revision_id;
u8 pci_func;
u8 linkup;
+ u8 loopback_state;
u16 port_type;
u16 board_type;
@@ -779,6 +781,14 @@ struct qlcnic_mac_list_s {
#define QLCNIC_IP_UP 2
#define QLCNIC_IP_DOWN 3
+#define QLCNIC_ILB_MODE 0x1
+#define QLCNIC_ELB_MODE 0x2
+
+#define QLCNIC_LINKEVENT 0x1
+#define QLCNIC_LB_RESPONSE 0x2
+#define QLCNIC_IS_LB_CONFIGURED(VAL) \
+ (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE))
+
/*
* Driver --> Firmware
*/
@@ -788,13 +798,17 @@ struct qlcnic_mac_list_s {
#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7
#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc
#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12
+
#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15
#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17
#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18
+#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 0x13
+
/*
* Firmware --> Driver
*/
+#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
@@ -808,6 +822,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_BDG BIT_8
#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
+#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -895,11 +910,11 @@ struct qlcnic_ipaddr {
#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
#define QLCNIC_PROMISC_DISABLED 0x800
#define QLCNIC_NEED_FLR 0x1000
+#define QLCNIC_FW_RESET_OWNER 0x2000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
-#define QLCNIC_MIN_NUM_RSS_RINGS 2
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
@@ -922,6 +937,12 @@ struct qlcnic_ipaddr {
#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
+/* QLCNIC Driver Error Code */
+#define QLCNIC_FW_NOT_RESPOND 51
+#define QLCNIC_TEST_IN_PROGRESS 52
+#define QLCNIC_UNDEFINED_ERROR 53
+#define QLCNIC_LB_CABLE_NOT_CONN 54
+
struct qlcnic_filter {
struct hlist_node fnode;
u8 faddr[ETH_ALEN];
@@ -993,7 +1014,7 @@ struct qlcnic_adapter {
u8 max_mac_filters;
u8 dev_state;
u8 diag_test;
- u8 diag_cnt;
+ char diag_cnt;
u8 reset_ack_timeo;
u8 dev_init_timeo;
u16 msg_enable;
@@ -1001,6 +1022,7 @@ struct qlcnic_adapter {
u8 mac_addr[ETH_ALEN];
u64 dev_rst_time;
+ u8 mac_learn;
unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct qlcnic_npar_info *npars;
@@ -1219,8 +1241,7 @@ struct __ctrl {
struct __cache {
__le32 addr;
- u8 stride;
- u8 rsvd;
+ __le16 stride;
__le16 init_tag_val;
__le32 size;
__le32 no_ops;
@@ -1318,9 +1339,11 @@ enum op_codes {
#define QLCNIC_DUMP_SKIP BIT_7
#define QLCNIC_DUMP_MASK_MIN 3
-#define QLCNIC_DUMP_MASK_DEF 0x0f
+#define QLCNIC_DUMP_MASK_DEF 0x1f
#define QLCNIC_DUMP_MASK_MAX 0xff
#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
+#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
+#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
struct qlcnic_dump_operations {
enum op_codes opcode;
@@ -1428,6 +1451,12 @@ int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring);
void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
+void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter);
+int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode);
+
+/* Functions from qlcnic_ethtool.c */
+int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
@@ -1439,6 +1468,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val);
int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data);
void qlcnic_dev_request_reset(struct qlcnic_adapter *);
+void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
/* Management functions */
int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
@@ -1489,6 +1519,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
"NC523SFP 10Gb 2-port Server Adapter"},
{0x1077, 0x8020, 0x103c, 0x3346,
"CN1000Q Dual Port Converged Network Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x210,
+ "QME8242-k 10GbE Dual Port Mezzanine Card"},
{0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
};
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index bab041a5c75..b0d32ddd2cc 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -95,8 +95,8 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
QLCNIC_CDRP_CMD_TEMP_SIZE);
if (err != QLCNIC_RCODE_SUCCESS) {
err = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- dev_err(&adapter->pdev->dev,
- "Failed to get template size %d\n", err);
+ dev_info(&adapter->pdev->dev,
+ "Can't get template size %d\n", err);
err = -EIO;
return err;
}
@@ -126,7 +126,7 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
err = -EIO;
goto error;
}
- tmp_tmpl = (struct qlcnic_dump_template_hdr *) tmp_addr;
+ tmp_tmpl = tmp_addr;
csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size);
if (csum) {
dev_err(&adapter->pdev->dev,
@@ -139,17 +139,14 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
err = -EIO;
goto error;
}
- tmp_buf = (u32 *) tmp_addr;
+ tmp_buf = tmp_addr;
template = (u32 *) ahw->fw_dump.tmpl_hdr;
for (i = 0; i < temp_size/sizeof(u32); i++)
*template++ = __le32_to_cpu(*tmp_buf++);
tmpl_hdr = ahw->fw_dump.tmpl_hdr;
- if (tmpl_hdr->cap_mask > QLCNIC_DUMP_MASK_DEF &&
- tmpl_hdr->cap_mask <= QLCNIC_DUMP_MASK_MAX)
- tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
- else
- tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
+ tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
+ ahw->fw_dump.enable = 1;
error:
dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
return err;
@@ -214,7 +211,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
&hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
- prq = (struct qlcnic_hostrq_rx_ctx *)addr;
+ prq = addr;
addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
&cardrsp_phys_addr, GFP_KERNEL);
@@ -222,7 +219,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
err = -ENOMEM;
goto out_free_rq;
}
- prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
+ prsp = addr;
prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
@@ -380,10 +377,10 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
}
memset(rq_addr, 0, rq_size);
- prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
+ prq = rq_addr;
memset(rsp_addr, 0, rsp_size);
- prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
+ prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
@@ -493,7 +490,7 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
goto err_out_free;
}
- tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
+ tx_ring->desc_head = addr;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
@@ -506,7 +503,7 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
err = -ENOMEM;
goto err_out_free;
}
- rds_ring->desc_head = (struct rcv_desc *)addr;
+ rds_ring->desc_head = addr;
}
@@ -522,7 +519,7 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
err = -ENOMEM;
goto err_out_free;
}
- sds_ring->desc_head = (struct status_desc *)addr;
+ sds_ring->desc_head = addr;
}
return 0;
@@ -662,7 +659,7 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
return -ENOMEM;
memset(nic_info_addr, 0, nic_size);
- nic_info = (struct qlcnic_info *) nic_info_addr;
+ nic_info = nic_info_addr;
err = qlcnic_issue_cmd(adapter,
adapter->ahw->pci_func,
adapter->fw_hal_version,
@@ -720,7 +717,7 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
return -ENOMEM;
memset(nic_info_addr, 0, nic_size);
- nic_info = (struct qlcnic_info *)nic_info_addr;
+ nic_info = nic_info_addr;
nic_info->pci_func = cpu_to_le16(nic->pci_func);
nic_info->op_mode = cpu_to_le16(nic->op_mode);
@@ -769,7 +766,7 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
return -ENOMEM;
memset(pci_info_addr, 0, pci_size);
- npar = (struct qlcnic_pci_info *) pci_info_addr;
+ npar = pci_info_addr;
err = qlcnic_issue_cmd(adapter,
adapter->ahw->pci_func,
adapter->fw_hal_version,
@@ -877,7 +874,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
if (!err) {
- stats = (struct __qlcnic_esw_statistics *)stats_addr;
+ stats = stats_addr;
esw_stats->context_id = le16_to_cpu(stats->context_id);
esw_stats->version = le16_to_cpu(stats->version);
esw_stats->size = le16_to_cpu(stats->size);
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 9efc690a289..72a723d5c98 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -84,7 +84,9 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
"Register_Test_on_offline",
"Link_Test_on_offline",
- "Interrupt_Test_offline"
+ "Interrupt_Test_offline",
+ "Internal_Loopback_offline",
+ "External_Loopback_offline"
};
#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
@@ -685,6 +687,129 @@ clear_it:
return ret;
}
+#define QLCNIC_ILB_PKT_SIZE 64
+#define QLCNIC_NUM_ILB_PKT 16
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
+
+static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
+{
+ unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
+
+ memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE);
+
+ memcpy(data, mac, ETH_ALEN);
+ memcpy(data + ETH_ALEN, mac, ETH_ALEN);
+
+ memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data));
+}
+
+int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
+{
+ unsigned char buff[QLCNIC_ILB_PKT_SIZE];
+ qlcnic_create_loopback_buff(buff, mac);
+ return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
+}
+
+static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
+ struct sk_buff *skb;
+ int i, loop, cnt = 0;
+
+ for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
+ skb = dev_alloc_skb(QLCNIC_ILB_PKT_SIZE);
+ qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
+ skb_put(skb, QLCNIC_ILB_PKT_SIZE);
+
+ adapter->diag_cnt = 0;
+ qlcnic_xmit_frame(skb, adapter->netdev);
+
+ loop = 0;
+ do {
+ msleep(1);
+ qlcnic_process_rcv_ring_diag(sds_ring);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP)
+ break;
+ } while (!adapter->diag_cnt);
+
+ dev_kfree_skb_any(skb);
+
+ if (!adapter->diag_cnt)
+ dev_warn(&adapter->pdev->dev, "LB Test: %dth packet"
+ " not recevied\n", i + 1);
+ else
+ cnt++;
+ }
+ if (cnt != i) {
+ dev_warn(&adapter->pdev->dev, "LB Test failed\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_sds_rings = adapter->max_sds_rings;
+ struct qlcnic_host_sds_ring *sds_ring;
+ int loop = 0;
+ int ret;
+
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
+ netdev_info(netdev, "Firmware is not loopback test capable\n");
+ return -EOPNOTSUPP;
+ }
+
+ netdev_info(netdev, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(netdev, "Loopback test not supported for non "
+ "privilege function\n");
+ return 0;
+ }
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto clear_it;
+
+ sds_ring = &adapter->recv_ctx->sds_rings[0];
+
+ ret = qlcnic_set_lb_mode(adapter, mode);
+ if (ret)
+ goto free_res;
+
+ adapter->diag_cnt = 0;
+ do {
+ msleep(500);
+ qlcnic_process_rcv_ring_diag(sds_ring);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ netdev_info(netdev, "firmware didnt respond to loopback"
+ " configure request\n");
+ ret = -QLCNIC_FW_NOT_RESPOND;
+ goto free_res;
+ } else if (adapter->diag_cnt) {
+ ret = adapter->diag_cnt;
+ goto free_res;
+ }
+ } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state));
+
+ ret = qlcnic_do_lb_test(adapter);
+
+ qlcnic_clear_lb_mode(adapter);
+
+ free_res:
+ qlcnic_diag_free_res(netdev, max_sds_rings);
+
+ clear_it:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
static void
qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
u64 *data)
@@ -704,7 +829,16 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
if (data[2])
eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
+ if (data[3])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
+ if (data[4])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
}
}
@@ -986,8 +1120,6 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
- if (qlcnic_api_lock(adapter))
- return -EIO;
if (!fw_dump->clr) {
netdev_info(netdev, "Dump not available\n");
qlcnic_api_unlock(adapter);
@@ -996,7 +1128,7 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
/* Copy template header first */
copy_sz = fw_dump->tmpl_hdr->size;
hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
- data = (u32 *) buffer;
+ data = buffer;
for (i = 0; i < copy_sz/sizeof(u32); i++)
*data++ = cpu_to_le32(*hdr_ptr++);
@@ -1009,7 +1141,6 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
vfree(fw_dump->data);
fw_dump->data = NULL;
fw_dump->clr = 0;
- qlcnic_api_unlock(adapter);
return 0;
}
@@ -1022,8 +1153,27 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
if (val->flag == QLCNIC_FORCE_FW_DUMP_KEY) {
+ if (!fw_dump->enable) {
+ netdev_info(netdev, "FW dump not enabled\n");
+ return ret;
+ }
+ if (fw_dump->clr) {
+ dev_info(&adapter->pdev->dev,
+ "Previous dump not cleared, not forcing dump\n");
+ return ret;
+ }
netdev_info(netdev, "Forcing a FW dump\n");
qlcnic_dev_request_reset(adapter);
+ } else if (val->flag == QLCNIC_DISABLE_FW_DUMP) {
+ if (fw_dump->enable) {
+ netdev_info(netdev, "Disabling FW dump\n");
+ fw_dump->enable = 0;
+ }
+ } else if (val->flag == QLCNIC_ENABLE_FW_DUMP) {
+ if (!fw_dump->enable && fw_dump->tmpl_hdr) {
+ netdev_info(netdev, "Enabling FW dump\n");
+ fw_dump->enable = 1;
+ }
} else {
if (val->flag > QLCNIC_DUMP_MASK_MAX ||
val->flag < QLCNIC_DUMP_MASK_MIN) {
@@ -1032,10 +1182,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
ret = -EINVAL;
goto out;
}
- if (qlcnic_api_lock(adapter))
- return -EIO;
fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff;
- qlcnic_api_unlock(adapter);
netdev_info(netdev, "Driver mask changed to: 0x%x\n",
fw_dump->tmpl_hdr->drv_cap_mask);
}
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index a5d9fbf9d81..4055c218ef2 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -446,6 +446,13 @@ void qlcnic_set_multi(struct net_device *netdev)
}
send_fw_cmd:
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->mac_learn = 1;
+ } else {
+ adapter->mac_learn = 0;
+ }
+
qlcnic_nic_set_promisc(adapter, mode);
}
@@ -533,6 +540,56 @@ void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
}
}
+int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+ req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
+ ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
+
+ req.words[0] = cpu_to_le64(flag);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
+ flag ? "Set" : "Reset");
+ return rv;
+}
+
+int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ if (qlcnic_set_fw_loopback(adapter, mode))
+ return -EIO;
+
+ if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
+ qlcnic_set_fw_loopback(adapter, mode);
+ return -EIO;
+ }
+
+ msleep(1000);
+ return 0;
+}
+
+void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
+{
+ int mode = VPORT_MISS_MODE_DROP;
+ struct net_device *netdev = adapter->netdev;
+
+ qlcnic_set_fw_loopback(adapter, 0);
+
+ if (netdev->flags & IFF_PROMISC)
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ else if (netdev->flags & IFF_ALLMULTI)
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+
+ qlcnic_nic_set_promisc(adapter, mode);
+ msleep(1000);
+}
+
/*
* Send the interrupt coalescing parameter set by ethtool to the card.
*/
@@ -1509,18 +1566,26 @@ qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
for (i = 0; i < l2->no_ops; i++) {
QLCNIC_WR_DUMP_REG(l2->addr, base, val);
- do {
+ if (LSW(l2->ctrl_val))
QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
LSW(l2->ctrl_val));
+ if (!poll_mask)
+ goto skip_poll;
+ do {
QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
if (!(data & poll_mask))
break;
msleep(1);
time_out++;
} while (time_out <= poll_to);
- if (time_out > poll_to)
- return -EINVAL;
+ if (time_out > poll_to) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return -EINVAL;
+ }
+skip_poll:
addr = l2->read_addr;
cnt = l2->read_addr_num;
while (cnt) {
@@ -1673,8 +1738,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
tmpl_hdr->sys_info[1] = adapter->fw_version;
for (i = 0; i < no_entries; i++) {
- entry = (struct qlcnic_dump_entry *) ((void *) tmpl_hdr +
- entry_offset);
+ entry = (void *)tmpl_hdr + entry_offset;
if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
entry_offset += entry->hdr.offset;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 5b8bbcf904d..ee8a3982395 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1281,6 +1281,7 @@ qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
u16 cable_len;
u16 link_speed;
u8 link_status, module, duplex, autoneg;
+ u8 lb_status = 0;
struct net_device *netdev = adapter->netdev;
adapter->has_link_events = 1;
@@ -1292,6 +1293,7 @@ qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
link_status = msg->body[2] & 0xff;
duplex = (msg->body[2] >> 16) & 0xff;
autoneg = (msg->body[2] >> 24) & 0xff;
+ lb_status = (msg->body[2] >> 32) & 0x3;
module = (msg->body[2] >> 8) & 0xff;
if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
@@ -1301,6 +1303,10 @@ qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
dev_info(&netdev->dev, "unsupported cable length %d\n",
cable_len);
+ if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
+ lb_status == QLCNIC_ELB_MODE))
+ adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
+
qlcnic_advert_link_change(adapter, link_status);
if (duplex == LINKEVENT_FULL_DUPLEX)
@@ -1319,7 +1325,9 @@ qlcnic_handle_fw_message(int desc_cnt, int index,
{
struct qlcnic_fw_msg msg;
struct status_desc *desc;
- int i = 0, opcode;
+ struct qlcnic_adapter *adapter;
+ struct device *dev;
+ int i = 0, opcode, ret;
while (desc_cnt > 0 && i < 8) {
desc = &sds_ring->desc_head[index];
@@ -1330,10 +1338,34 @@ qlcnic_handle_fw_message(int desc_cnt, int index,
desc_cnt--;
}
+ adapter = sds_ring->adapter;
+ dev = &adapter->pdev->dev;
opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+
switch (opcode) {
case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
- qlcnic_handle_linkevent(sds_ring->adapter, &msg);
+ qlcnic_handle_linkevent(adapter, &msg);
+ break;
+ case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
+ ret = (u32)(msg.body[1]);
+ switch (ret) {
+ case 0:
+ adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
+ break;
+ case 1:
+ dev_info(dev, "loopback already in progress\n");
+ adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
+ break;
+ case 2:
+ dev_info(dev, "loopback cable is not connected\n");
+ adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
+ break;
+ default:
+ dev_info(dev, "loopback configure request failed,"
+ " ret %x\n", ret);
+ adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
+ break;
+ }
break;
default:
break;
@@ -1746,6 +1778,103 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
spin_unlock(&rds_ring->lock);
}
+static void dump_skb(struct sk_buff *skb)
+{
+ int i;
+ unsigned char *data = skb->data;
+
+ printk(KERN_INFO "\n");
+ for (i = 0; i < skb->len; i++) {
+ printk(KERN_INFO "%02x ", data[i]);
+ if ((i & 0x0f) == 8)
+ printk(KERN_INFO "\n");
+ }
+}
+
+void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ length = qlcnic_get_sts_totallength(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return;
+
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+ adapter->diag_cnt++;
+ else
+ dump_skb(skb);
+
+ dev_kfree_skb_any(skb);
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return;
+}
+
+void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ u64 sts_data0;
+ int ring, opcode, desc_cnt;
+
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ return;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+ switch (opcode) {
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ break;
+ default:
+ ring = qlcnic_get_sts_type(sts_data0);
+ qlcnic_process_rcv_diag(adapter, sds_ring, ring, sts_data0);
+ break;
+ }
+
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
+
void
qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
u8 alt_mac, u8 *mac)
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 0f6af5c61a7..5ca1b562443 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -90,7 +90,6 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
static int qlcnic_start_firmware(struct qlcnic_adapter *);
-static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
@@ -418,10 +417,8 @@ qlcnic_setup_intr(struct qlcnic_adapter *adapter)
int num_msix;
if (adapter->msix_supported) {
- num_msix = (num_online_cpus() >=
- QLCNIC_DEF_NUM_STS_DESC_RINGS) ?
- QLCNIC_DEF_NUM_STS_DESC_RINGS :
- QLCNIC_MIN_NUM_RSS_RINGS;
+ num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ QLCNIC_DEF_NUM_STS_DESC_RINGS));
} else
num_msix = 1;
@@ -1393,6 +1390,12 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_enable_int(sds_ring);
}
}
+
+ if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) {
+ adapter->ahw->loopback_state = 0;
+ qlcnic_linkevent_request(adapter, 1);
+ }
+
set_bit(__QLCNIC_DEV_UP, &adapter->state);
return 0;
@@ -1487,8 +1490,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->irq = adapter->msix_entries[0].vector;
- netif_carrier_off(netdev);
-
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "failed to register net device\n");
@@ -1576,6 +1577,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->dev_rst_time = jiffies;
revision_id = pdev->revision;
adapter->ahw->revision_id = revision_id;
+ adapter->mac_learn = qlcnic_mac_learn;
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
@@ -1590,10 +1592,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* This will be reset for mezz cards */
adapter->portnum = adapter->ahw->pci_func;
- /* Get FW dump template and store it */
- if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
- qlcnic_fw_cmd_get_minidump_temp(adapter);
-
err = qlcnic_get_board_info(adapter);
if (err) {
dev_err(&pdev->dev, "Error getting board config info.\n");
@@ -1612,6 +1610,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_decr_ref;
}
+ /* Get FW dump template and store it */
+ if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
+ if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
+ dev_info(&pdev->dev,
+ "Supports FW dump capability\n");
+
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
@@ -1650,7 +1654,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- qlcnic_alloc_lb_filters_mem(adapter);
+ if (adapter->mac_learn)
+ qlcnic_alloc_lb_filters_mem(adapter);
+
qlcnic_create_diag_entries(adapter);
return 0;
@@ -1816,6 +1822,8 @@ static int qlcnic_open(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err;
+ netif_carrier_off(netdev);
+
err = qlcnic_attach(adapter);
if (err)
return err;
@@ -1844,13 +1852,12 @@ static int qlcnic_close(struct net_device *netdev)
return 0;
}
-static void
-qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
+void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
{
void *head;
int i;
- if (!qlcnic_mac_learn)
+ if (adapter->fhash.fmax && adapter->fhash.fhead)
return;
spin_lock_init(&adapter->mac_learn_lock);
@@ -1861,7 +1868,7 @@ qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
return;
adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
- adapter->fhash.fhead = (struct hlist_head *)head;
+ adapter->fhash.fhead = head;
for (i = 0; i < adapter->fhash.fmax; i++)
INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
@@ -2280,14 +2287,14 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
goto unwind_buff;
- if (qlcnic_mac_learn)
+ if (adapter->mac_learn)
qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
- qlcnic_update_cmd_producer(adapter, tx_ring);
-
adapter->stats.txbytes += skb->len;
adapter->stats.xmitcalled++;
+ qlcnic_update_cmd_producer(adapter, tx_ring);
+
return NETDEV_TX_OK;
unwind_buff:
@@ -2683,11 +2690,16 @@ err:
static int
qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
{
- int act, state;
+ int act, state, active_mask;
state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
+ active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
+ act = act & active_mask;
+ }
+
if (((state & 0x11111111) == (act & 0x11111111)) ||
((act & 0x11111111) == ((state >> 1) & 0x11111111)))
return 0;
@@ -2800,6 +2812,7 @@ qlcnic_fwinit_work(struct work_struct *work)
struct qlcnic_adapter *adapter = container_of(work,
struct qlcnic_adapter, fw_work.work);
u32 dev_state = 0xf;
+ u32 val;
if (qlcnic_api_lock(adapter))
goto err_ret;
@@ -2834,12 +2847,22 @@ skip_ack_check:
set_bit(__QLCNIC_START_FW, &adapter->state);
QLCDB(adapter, DRV, "Restarting fw\n");
qlcnic_idc_debug_info(adapter, 0);
- QLCDB(adapter, DRV, "Take FW dump\n");
- qlcnic_dump_fw(adapter);
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_SET_RST_RDY(val, adapter->portnum);
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
}
qlcnic_api_unlock(adapter);
+ rtnl_lock();
+ if (adapter->ahw->fw_dump.enable &&
+ (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
+ QLCDB(adapter, DRV, "Take FW dump\n");
+ qlcnic_dump_fw(adapter);
+ }
+ rtnl_unlock();
+
+ adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
if (!adapter->nic_ops->start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
adapter->fw_wait_cnt = 0;
@@ -2900,9 +2923,11 @@ qlcnic_detach_work(struct work_struct *work)
if (adapter->temp == QLCNIC_TEMP_PANIC)
goto err_ret;
-
- if (qlcnic_set_drv_state(adapter, adapter->dev_state))
- goto err_ret;
+ /* Dont ack if this instance is the reset owner */
+ if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
+ if (qlcnic_set_drv_state(adapter, adapter->dev_state))
+ goto err_ret;
+ }
adapter->fw_wait_cnt = 0;
@@ -2947,6 +2972,7 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
if (state == QLCNIC_DEV_READY) {
QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
+ adapter->flags |= QLCNIC_FW_RESET_OWNER;
QLCDB(adapter, DRV, "NEED_RESET state set\n");
qlcnic_idc_debug_info(adapter, 0);
}
@@ -4178,7 +4204,7 @@ qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
qlcnic_config_indev_addr(adapter, netdev, event);
for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
- dev = vlan_find_dev(netdev, vid);
+ dev = __vlan_find_dev_deep(netdev, vid);
if (!dev)
continue;
qlcnic_config_indev_addr(adapter, dev, event);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index d32850715f5..8731f79c9ef 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -7,16 +7,18 @@
#ifndef _QLGE_H_
#define _QLGE_H_
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
/*
* General definitions...
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.27.00.00-01"
+#define DRV_VERSION "v1.00.00.29.00.00-01"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -1996,6 +1998,7 @@ enum {
QL_LB_LINK_UP = 10,
QL_FRC_COREDUMP = 11,
QL_EEH_FATAL = 12,
+ QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
};
/* link_status bit definitions */
@@ -2050,7 +2053,7 @@ struct ql_adapter {
struct nic_stats nic_stats;
- struct vlan_group *vlgrp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
/* PCI Configuration information for this device */
struct pci_dev *pdev;
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 19b00fa0eaf..9b67bfea035 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -650,8 +650,6 @@ static int ql_set_pauseparam(struct net_device *netdev,
return -EINVAL;
status = ql_mb_set_port_cfg(qdev);
- if (status)
- return status;
return status;
}
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 930ae45457b..f07e96ec884 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -33,8 +34,8 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/skbuff.h>
#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
@@ -415,7 +416,7 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(qdev->
func << CAM_OUT_FUNC_SHIFT) |
(0 << CAM_OUT_CQ_ID_SHIFT));
- if (qdev->vlgrp)
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
cam_output |= CAM_OUT_RV;
/* route to NIC core */
ql_write32(qdev, MAC_ADDR_DATA, cam_output);
@@ -1507,10 +1508,9 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
rx_ring->rx_bytes += length;
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rx_ring->cq_id);
- if (qdev->vlgrp && (vlan_id != 0xffff))
- vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
- else
- napi_gro_frags(napi);
+ if (vlan_id != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vlan_id);
+ napi_gro_frags(napi);
}
/* Process an inbound completion from an rx ring. */
@@ -1594,17 +1594,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
}
skb_record_rx_queue(skb, rx_ring->cq_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- if (qdev->vlgrp && (vlan_id != 0xffff))
- vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
- else
- napi_gro_receive(napi, skb);
- } else {
- if (qdev->vlgrp && (vlan_id != 0xffff))
- vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
- else
- netif_receive_skb(skb);
- }
+ if (vlan_id != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vlan_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(napi, skb);
+ else
+ netif_receive_skb(skb);
return;
err_out:
dev_kfree_skb_any(skb);
@@ -1707,18 +1702,12 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
}
skb_record_rx_queue(skb, rx_ring->cq_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- if (qdev->vlgrp && (vlan_id != 0xffff))
- vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
- vlan_id, skb);
- else
- napi_gro_receive(&rx_ring->napi, skb);
- } else {
- if (qdev->vlgrp && (vlan_id != 0xffff))
- vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
- else
- netif_receive_skb(skb);
- }
+ if (vlan_id != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vlan_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(&rx_ring->napi, skb);
+ else
+ netif_receive_skb(skb);
}
static void ql_realign_skb(struct sk_buff *skb, int len)
@@ -2028,22 +2017,12 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
skb_record_rx_queue(skb, rx_ring->cq_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
- if (qdev->vlgrp &&
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
- (vlan_id != 0))
- vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
- vlan_id, skb);
- else
- napi_gro_receive(&rx_ring->napi, skb);
- } else {
- if (qdev->vlgrp &&
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
- (vlan_id != 0))
- vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
- else
- netif_receive_skb(skb);
- }
+ if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
+ __vlan_hwaccel_put_tag(skb, vlan_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY)
+ napi_gro_receive(&rx_ring->napi, skb);
+ else
+ netif_receive_skb(skb);
}
/* Process an inbound completion from an rx ring. */
@@ -2152,6 +2131,10 @@ void ql_queue_asic_error(struct ql_adapter *qdev)
* thread
*/
clear_bit(QL_ADAPTER_UP, &qdev->flags);
+ /* Set asic recovery bit to indicate reset process that we are
+ * in fatal error recovery process rather than normal close
+ */
+ set_bit(QL_ASIC_RECOVERY, &qdev->flags);
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
}
@@ -2166,23 +2149,20 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
return;
case CAM_LOOKUP_ERR_EVENT:
- netif_err(qdev, link, qdev->ndev,
- "Multiple CAM hits lookup occurred.\n");
- netif_err(qdev, drv, qdev->ndev,
- "This event shouldn't occur.\n");
+ netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
+ netdev_err(qdev->ndev, "This event shouldn't occur.\n");
ql_queue_asic_error(qdev);
return;
case SOFT_ECC_ERROR_EVENT:
- netif_err(qdev, rx_err, qdev->ndev,
- "Soft ECC error detected.\n");
+ netdev_err(qdev->ndev, "Soft ECC error detected.\n");
ql_queue_asic_error(qdev);
break;
case PCI_ERR_ANON_BUF_RD:
- netif_err(qdev, rx_err, qdev->ndev,
- "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
- ib_ae_rsp->q_id);
+ netdev_err(qdev->ndev, "PCI error occurred when reading "
+ "anonymous buffers from rx_ring %d.\n",
+ ib_ae_rsp->q_id);
ql_queue_asic_error(qdev);
break;
@@ -2333,71 +2313,111 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
return work_done;
}
-static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
+static void qlge_vlan_mode(struct net_device *ndev, u32 features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- qdev->vlgrp = grp;
- if (grp) {
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ if (features & NETIF_F_HW_VLAN_RX) {
+ netif_printk(qdev, ifup, KERN_DEBUG, ndev,
"Turning on VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
- NIC_RCV_CFG_VLAN_MATCH_AND_NON);
+ NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ netif_printk(qdev, ifup, KERN_DEBUG, ndev,
"Turning off VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
}
}
-static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static u32 qlge_fix_features(struct net_device *ndev, u32 features)
+{
+ /*
+ * Since there is no support for separate rx/tx vlan accel
+ * enable/disable make sure tx flag is always in same state as rx.
+ */
+ if (features & NETIF_F_HW_VLAN_RX)
+ features |= NETIF_F_HW_VLAN_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_TX;
+
+ return features;
+}
+
+static int qlge_set_features(struct net_device *ndev, u32 features)
+{
+ u32 changed = ndev->features ^ features;
+
+ if (changed & NETIF_F_HW_VLAN_RX)
+ qlge_vlan_mode(ndev, features);
+
+ return 0;
+}
+
+static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
{
- struct ql_adapter *qdev = netdev_priv(ndev);
u32 enable_bit = MAC_ADDR_E;
- int status;
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return;
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to init vlan address.\n");
}
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
-static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- u32 enable_bit = 0;
int status;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return;
+ __qlge_vlan_rx_add_vid(qdev, vid);
+ set_bit(vid, qdev->active_vlans);
+
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+
+static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+{
+ u32 enable_bit = 0;
+
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
netif_err(qdev, ifup, qdev->ndev,
"Failed to clear vlan address.\n");
}
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+}
+static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+{
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ int status;
+
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return;
+
+ __qlge_vlan_rx_kill_vid(qdev, vid);
+ clear_bit(vid, qdev->active_vlans);
+
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
static void qlge_restore_vlan(struct ql_adapter *qdev)
{
- qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
+ int status;
+ u16 vid;
- if (qdev->vlgrp) {
- u16 vid;
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (!vlan_group_get_device(qdev->vlgrp, vid))
- continue;
- qlge_vlan_rx_add_vid(qdev->ndev, vid);
- }
- }
+ status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
+ if (status)
+ return;
+
+ for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
+ __qlge_vlan_rx_add_vid(qdev, vid);
+
+ ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
@@ -2437,11 +2457,10 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
if (var & STS_FE) {
ql_queue_asic_error(qdev);
- netif_err(qdev, intr, qdev->ndev,
- "Got fatal error, STS = %x.\n", var);
+ netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
- netif_err(qdev, intr, qdev->ndev,
- "Resetting chip. Error Status Register = 0x%x\n", var);
+ netdev_err(qdev->ndev, "Resetting chip. "
+ "Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
@@ -3096,7 +3115,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->lbq_len) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
tmp = (u64)rx_ring->lbq_base_dma;
- base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
+ base_indirect_ptr = rx_ring->lbq_base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
@@ -3120,7 +3139,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (rx_ring->sbq_len) {
cqicb->flags |= FLAGS_LS; /* Load sbq values */
tmp = (u64)rx_ring->sbq_base_dma;
- base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
+ base_indirect_ptr = rx_ring->sbq_base_indirect;
page_entries = 0;
do {
*base_indirect_ptr = cpu_to_le64(tmp);
@@ -3818,11 +3837,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
end_jiffies = jiffies +
max((unsigned long)1, usecs_to_jiffies(30));
- /* Stop management traffic. */
- ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
+ /* Check if bit is set then skip the mailbox command and
+ * clear the bit, else we are in normal reset process.
+ */
+ if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
+ /* Stop management traffic. */
+ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
- /* Wait for the NIC and MGMNT FIFOs to empty. */
- ql_wait_fifo_empty(qdev);
+ /* Wait for the NIC and MGMNT FIFOs to empty. */
+ ql_wait_fifo_empty(qdev);
+ } else
+ clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
@@ -4655,7 +4680,8 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout,
- .ndo_vlan_rx_register = qlge_vlan_rx_register,
+ .ndo_fix_features = qlge_fix_features,
+ .ndo_set_features = qlge_set_features,
.ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 200a363c3bf..b64fcee483a 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -677,9 +677,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
if (status & RX_FIFO_FULL)
dev->stats.rx_fifo_errors++;
- /* Mask off RX interrupt */
- misr &= ~RX_INTS;
- napi_schedule(&lp->napi);
+ if (likely(napi_schedule_prep(&lp->napi))) {
+ /* Mask off RX interrupt */
+ misr &= ~RX_INTS;
+ __napi_schedule(&lp->napi);
+ }
}
/* TX interrupt request */
@@ -836,6 +838,9 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
descptr->status = DSC_OWNER_MAC;
+
+ skb_tx_timestamp(skb);
+
/* Trigger the MAC to check the TX descriptor */
iowrite16(0x01, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 05d81780d1f..02339b3352e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -22,6 +22,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
@@ -40,6 +41,7 @@
#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
+#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
#ifdef RTL8169_DEBUG
@@ -69,8 +71,6 @@ static const int multicast_filter_limit = 32;
#define MAC_ADDR_LEN 6
#define MAX_READ_REQUEST_SHIFT 12
-#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
-#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -132,6 +132,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_31,
RTL_GIGA_MAC_VER_32,
RTL_GIGA_MAC_VER_33,
+ RTL_GIGA_MAC_VER_34,
RTL_GIGA_MAC_NONE = 0xff,
};
@@ -215,7 +216,9 @@ static const struct {
[RTL_GIGA_MAC_VER_32] =
_R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1),
[RTL_GIGA_MAC_VER_33] =
- _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2)
+ _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2),
+ [RTL_GIGA_MAC_VER_34] =
+ _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3)
};
#undef _R
@@ -236,6 +239,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
{ PCI_VENDOR_ID_LINKSYS, 0x1032,
@@ -269,10 +273,20 @@ enum rtl_registers {
TxPoll = 0x38,
IntrMask = 0x3c,
IntrStatus = 0x3e,
+
TxConfig = 0x40,
- RxConfig = 0x44,
+#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
+#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
-#define RTL_RX_CONFIG_MASK 0xff7e1880u
+ RxConfig = 0x44,
+#define RX128_INT_EN (1 << 15) /* 8111c and later */
+#define RX_MULTI_EN (1 << 14) /* 8111c only */
+#define RXCFG_FIFO_SHIFT 13
+ /* No threshold before first PCI xfer */
+#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
+#define RXCFG_DMA_SHIFT 8
+ /* Unlimited maximum PCI burst. */
+#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
RxMissed = 0x4c,
Cfg9346 = 0x50,
@@ -326,12 +340,13 @@ enum rtl8168_8101_registers {
#define EPHYAR_REG_SHIFT 16
#define EPHYAR_DATA_MASK 0xffff
DLLPR = 0xd0,
-#define PM_SWITCH (1 << 6)
+#define PFM_EN (1 << 6)
DBG_REG = 0xd1,
#define FIX_NAK_1 (1 << 4)
#define FIX_NAK_2 (1 << 3)
TWSI = 0xd2,
MCU = 0xd3,
+#define NOW_IS_OOB (1 << 7)
#define EN_NDP (1 << 3)
#define EN_OOB_RESET (1 << 2)
EFUSEAR = 0xdc,
@@ -344,18 +359,22 @@ enum rtl8168_8101_registers {
};
enum rtl8168_registers {
+ LED_FREQ = 0x1a,
+ EEE_LED = 0x1b,
ERIDR = 0x70,
ERIAR = 0x74,
#define ERIAR_FLAG 0x80000000
#define ERIAR_WRITE_CMD 0x80000000
#define ERIAR_READ_CMD 0x00000000
#define ERIAR_ADDR_BYTE_ALIGN 4
-#define ERIAR_EXGMAC 0
-#define ERIAR_MSIX 1
-#define ERIAR_ASF 2
#define ERIAR_TYPE_SHIFT 16
-#define ERIAR_BYTEEN 0x0f
-#define ERIAR_BYTEEN_SHIFT 12
+#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
+#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
+#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
+#define ERIAR_MASK_SHIFT 12
+#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
EPHY_RXER_NUM = 0x7c,
OCPDR = 0xb0, /* OCP GPHY access */
#define OCPDR_WRITE_CMD 0x80000000
@@ -370,6 +389,7 @@ enum rtl8168_registers {
RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
MISC = 0xf0, /* 8168e only. */
#define TXPLA_RST (1 << 29)
+#define PWM_EN (1 << 22)
};
enum rtl_register_content {
@@ -394,6 +414,7 @@ enum rtl_register_content {
RxCRC = (1 << 19),
/* ChipCmdBits */
+ StopReq = 0x80,
CmdReset = 0x10,
CmdRxEnb = 0x08,
CmdTxEnb = 0x04,
@@ -415,10 +436,7 @@ enum rtl_register_content {
AcceptMulticast = 0x04,
AcceptMyPhys = 0x02,
AcceptAllPhys = 0x01,
-
- /* RxConfigBits */
- RxCfgFIFOShift = 13,
- RxCfgDMAShift = 8,
+#define RX_CONFIG_ACCEPT_MASK 0x3f
/* TxConfigBits */
TxInterFrameGapShift = 24,
@@ -658,7 +676,6 @@ struct rtl8169_private {
unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
unsigned int (*link_ok)(void __iomem *);
int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
- int pcie_cap;
struct delayed_work task;
unsigned features;
@@ -666,8 +683,19 @@ struct rtl8169_private {
struct rtl8169_counters counters;
u32 saved_wolopts;
- const struct firmware *fw;
-#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN);
+ struct rtl_fw {
+ const struct firmware *fw;
+
+#define RTL_VER_SIZE 32
+
+ char version[RTL_VER_SIZE];
+
+ struct rtl_fw_phy_action {
+ __le32 *code;
+ size_t size;
+ } phy_action;
+ } *rtl_fw;
+#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -701,9 +729,6 @@ static void rtl8169_down(struct net_device *dev);
static void rtl8169_rx_clear(struct rtl8169_private *tp);
static int rtl8169_poll(struct napi_struct *napi, int budget);
-static const unsigned int rtl8169_rx_config =
- (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
-
static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -742,7 +767,7 @@ static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
msleep(2);
for (i = 0; i < 5; i++) {
udelay(100);
- if (!(RTL_R32(ERIDR) & ERIAR_FLAG))
+ if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
break;
}
@@ -1024,6 +1049,64 @@ static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
return value;
}
+static
+void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
+{
+ unsigned int i;
+
+ BUG_ON((addr & 3) || (mask == 0));
+ RTL_W32(ERIDR, val);
+ RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
+
+ for (i = 0; i < 100; i++) {
+ if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
+ break;
+ udelay(100);
+ }
+}
+
+static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
+{
+ u32 value = ~0x00;
+ unsigned int i;
+
+ RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
+
+ for (i = 0; i < 100; i++) {
+ if (RTL_R32(ERIAR) & ERIAR_FLAG) {
+ value = RTL_R32(ERIDR);
+ break;
+ }
+ udelay(100);
+ }
+
+ return value;
+}
+
+static void
+rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
+{
+ u32 val;
+
+ val = rtl_eri_read(ioaddr, addr, type);
+ rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
+}
+
+struct exgmac_reg {
+ u16 addr;
+ u16 mask;
+ u32 val;
+};
+
+static void rtl_write_exgmac_batch(void __iomem *ioaddr,
+ const struct exgmac_reg *r, int len)
+{
+ while (len-- > 0) {
+ rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
+ r++;
+ }
+}
+
static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
{
u8 value = 0xff;
@@ -1049,13 +1132,6 @@ static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
RTL_W16(IntrStatus, 0xffff);
}
-static void rtl8169_asic_down(void __iomem *ioaddr)
-{
- RTL_W8(ChipCmd, 0x00);
- rtl8169_irq_mask_and_ack(ioaddr);
- RTL_R16(CPlusCmd);
-}
-
static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -1093,6 +1169,39 @@ static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
rtl_writephy(tp, MII_BMCR, val & 0xffff);
}
+static void rtl_link_chg_patch(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct net_device *dev = tp->dev;
+
+ if (!netif_running(dev))
+ return;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ if (RTL_R8(PHYstatus) & _1000bpsF) {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x00000011, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x00000005, ERIAR_EXGMAC);
+ } else if (RTL_R8(PHYstatus) & _100bps) {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x0000001f, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x00000005, ERIAR_EXGMAC);
+ } else {
+ rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
+ 0x0000001f, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
+ 0x0000003f, ERIAR_EXGMAC);
+ }
+ /* Reset packet filter */
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
+ ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
+ ERIAR_EXGMAC);
+ }
+}
+
static void __rtl8169_check_link_status(struct net_device *dev,
struct rtl8169_private *tp,
void __iomem *ioaddr, bool pm)
@@ -1101,6 +1210,7 @@ static void __rtl8169_check_link_status(struct net_device *dev,
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
+ rtl_link_chg_patch(tp);
/* This is to cancel a scheduled suspend if there's one. */
if (pm)
pm_request_resume(&tp->pci_dev->dev);
@@ -1221,12 +1331,14 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct rtl_fw *rtl_fw = tp->rtl_fw;
strcpy(info->driver, MODULENAME);
strcpy(info->version, RTL8169_VERSION);
strcpy(info->bus_info, pci_name(tp->pci_dev));
- strncpy(info->fw_version, IS_ERR_OR_NULL(tp->fw) ? "N/A" :
- rtl_lookup_firmware_name(tp), sizeof(info->fw_version) - 1);
+ BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
+ strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
+ rtl_fw->version);
}
static int rtl8169_get_regs_len(struct net_device *dev)
@@ -1627,6 +1739,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
int mac_version;
} mac_info[] = {
/* 8168E family. */
+ { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
{ 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
{ 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
{ 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
@@ -1741,21 +1854,75 @@ static void rtl_writephy_batch(struct rtl8169_private *tp,
#define PHY_DELAY_MS 0xe0000000
#define PHY_WRITE_ERI_WORD 0xf0000000
-static void
-rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
+struct fw_info {
+ u32 magic;
+ char version[RTL_VER_SIZE];
+ __le32 fw_start;
+ __le32 fw_len;
+ u8 chksum;
+} __packed;
+
+#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
+
+static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
{
- __le32 *phytable = (__le32 *)fw->data;
- struct net_device *dev = tp->dev;
- size_t index, fw_size = fw->size / sizeof(*phytable);
- u32 predata, count;
+ const struct firmware *fw = rtl_fw->fw;
+ struct fw_info *fw_info = (struct fw_info *)fw->data;
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ char *version = rtl_fw->version;
+ bool rc = false;
- if (fw->size % sizeof(*phytable)) {
- netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
- return;
+ if (fw->size < FW_OPCODE_SIZE)
+ goto out;
+
+ if (!fw_info->magic) {
+ size_t i, size, start;
+ u8 checksum = 0;
+
+ if (fw->size < sizeof(*fw_info))
+ goto out;
+
+ for (i = 0; i < fw->size; i++)
+ checksum += fw->data[i];
+ if (checksum != 0)
+ goto out;
+
+ start = le32_to_cpu(fw_info->fw_start);
+ if (start > fw->size)
+ goto out;
+
+ size = le32_to_cpu(fw_info->fw_len);
+ if (size > (fw->size - start) / FW_OPCODE_SIZE)
+ goto out;
+
+ memcpy(version, fw_info->version, RTL_VER_SIZE);
+
+ pa->code = (__le32 *)(fw->data + start);
+ pa->size = size;
+ } else {
+ if (fw->size % FW_OPCODE_SIZE)
+ goto out;
+
+ strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
+
+ pa->code = (__le32 *)fw->data;
+ pa->size = fw->size / FW_OPCODE_SIZE;
}
+ version[RTL_VER_SIZE - 1] = 0;
+
+ rc = true;
+out:
+ return rc;
+}
+
+static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
+ struct rtl_fw_phy_action *pa)
+{
+ bool rc = false;
+ size_t index;
- for (index = 0; index < fw_size; index++) {
- u32 action = le32_to_cpu(phytable[index]);
+ for (index = 0; index < pa->size; index++) {
+ u32 action = le32_to_cpu(pa->code[index]);
u32 regno = (action & 0x0fff0000) >> 16;
switch(action & 0xf0000000) {
@@ -1771,25 +1938,25 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
case PHY_BJMPN:
if (regno > index) {
- netif_err(tp, probe, tp->dev,
+ netif_err(tp, ifup, tp->dev,
"Out of range of firmware\n");
- return;
+ goto out;
}
break;
case PHY_READCOUNT_EQ_SKIP:
- if (index + 2 >= fw_size) {
- netif_err(tp, probe, tp->dev,
+ if (index + 2 >= pa->size) {
+ netif_err(tp, ifup, tp->dev,
"Out of range of firmware\n");
- return;
+ goto out;
}
break;
case PHY_COMP_EQ_SKIPN:
case PHY_COMP_NEQ_SKIPN:
case PHY_SKIPN:
- if (index + 1 + regno >= fw_size) {
- netif_err(tp, probe, tp->dev,
+ if (index + 1 + regno >= pa->size) {
+ netif_err(tp, ifup, tp->dev,
"Out of range of firmware\n");
- return;
+ goto out;
}
break;
@@ -1797,17 +1964,42 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
case PHY_WRITE_MAC_BYTE:
case PHY_WRITE_ERI_WORD:
default:
- netif_err(tp, probe, tp->dev,
+ netif_err(tp, ifup, tp->dev,
"Invalid action 0x%08x\n", action);
- return;
+ goto out;
}
}
+ rc = true;
+out:
+ return rc;
+}
+
+static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+ struct net_device *dev = tp->dev;
+ int rc = -EINVAL;
+
+ if (!rtl_fw_format_ok(tp, rtl_fw)) {
+ netif_err(tp, ifup, dev, "invalid firwmare\n");
+ goto out;
+ }
- predata = 0;
- count = 0;
+ if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
+ rc = 0;
+out:
+ return rc;
+}
- for (index = 0; index < fw_size; ) {
- u32 action = le32_to_cpu(phytable[index]);
+static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
+{
+ struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
+ u32 predata, count;
+ size_t index;
+
+ predata = count = 0;
+
+ for (index = 0; index < pa->size; ) {
+ u32 action = le32_to_cpu(pa->code[index]);
u32 data = action & 0x0000ffff;
u32 regno = (action & 0x0fff0000) >> 16;
@@ -1879,18 +2071,20 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
static void rtl_release_firmware(struct rtl8169_private *tp)
{
- if (!IS_ERR_OR_NULL(tp->fw))
- release_firmware(tp->fw);
- tp->fw = RTL_FIRMWARE_UNKNOWN;
+ if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
+ release_firmware(tp->rtl_fw->fw);
+ kfree(tp->rtl_fw);
+ }
+ tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
}
static void rtl_apply_firmware(struct rtl8169_private *tp)
{
- const struct firmware *fw = tp->fw;
+ struct rtl_fw *rtl_fw = tp->rtl_fw;
/* TODO: release firmware once rtl_phy_write_fw signals failures. */
- if (!IS_ERR_OR_NULL(fw))
- rtl_phy_write_fw(tp, fw);
+ if (!IS_ERR_OR_NULL(rtl_fw))
+ rtl_phy_write_fw(tp, rtl_fw);
}
static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -1982,12 +2176,9 @@ static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
- u16 vendor_id, device_id;
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &device_id);
-
- if ((vendor_id != PCI_VENDOR_ID_GIGABYTE) || (device_id != 0xe000))
+ if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
+ (pdev->subsystem_device != 0xe000))
return;
rtl_writephy(tp, 0x1f, 0x0001);
@@ -2523,7 +2714,7 @@ static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
rtl_patchphy(tp, 0x0d, 1 << 5);
}
-static void rtl8168e_hw_phy_config(struct rtl8169_private *tp)
+static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
/* Enable Delay cap */
@@ -2596,6 +2787,91 @@ static void rtl8168e_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x0d, 0x0000);
}
+static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ /* Enable Delay cap */
+ { 0x1f, 0x0004 },
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x00ac },
+ { 0x18, 0x0006 },
+ { 0x1f, 0x0002 },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ /* Channel estimation fine tune */
+ { 0x1f, 0x0003 },
+ { 0x09, 0xa20f },
+ { 0x1f, 0x0000 },
+ { 0x1f, 0x0000 },
+
+ /* Green Setting */
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8b5b },
+ { 0x06, 0x9222 },
+ { 0x05, 0x8b6d },
+ { 0x06, 0x8000 },
+ { 0x05, 0x8b76 },
+ { 0x06, 0x8000 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_apply_firmware(tp);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ /* For 4-corner performance improve */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b80);
+ rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ rtl_writephy(tp, 0x1f, 0x0004);
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x002d);
+ rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+ /* improve 10M EEE waveform */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b86);
+ rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* Improve 2-pair detection performance */
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b85);
+ rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* EEE setting */
+ rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
+ ERIAR_EXGMAC);
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b85);
+ rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
+ rtl_writephy(tp, 0x1f, 0x0004);
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x0020);
+ rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0007);
+ rtl_writephy(tp, 0x0e, 0x003c);
+ rtl_writephy(tp, 0x0d, 0x4007);
+ rtl_writephy(tp, 0x0e, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0000);
+
+ /* Green feature */
+ rtl_writephy(tp, 0x1f, 0x0003);
+ rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
+ rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
+ rtl_writephy(tp, 0x1f, 0x0000);
+}
+
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -2715,7 +2991,10 @@ static void rtl_hw_phy_config(struct net_device *dev)
break;
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
- rtl8168e_hw_phy_config(tp);
+ rtl8168e_1_hw_phy_config(tp);
+ break;
+ case RTL_GIGA_MAC_VER_34:
+ rtl8168e_2_hw_phy_config(tp);
break;
default:
@@ -2853,6 +3132,18 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
RTL_W32(MAC0, low);
RTL_R32(MAC0);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ const struct exgmac_reg e[] = {
+ { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
+ { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
+ { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
+ { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
+ low >> 16 },
+ };
+
+ rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
+ }
+
RTL_W8(Cfg9346, Cfg9346_Lock);
spin_unlock_irq(&tp->lock);
@@ -3125,8 +3416,10 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, MII_BMCR, 0x0000);
- RTL_W32(RxConfig, RTL_R32(RxConfig) |
- AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_33)
+ RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
+ AcceptMulticast | AcceptMyPhys);
return;
}
@@ -3221,6 +3514,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_31:
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_34:
ops->down = r8168_pll_power_down;
ops->up = r8168_pll_power_up;
break;
@@ -3232,6 +3526,47 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
}
}
+static void rtl_init_rxcfg(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_01:
+ case RTL_GIGA_MAC_VER_02:
+ case RTL_GIGA_MAC_VER_03:
+ case RTL_GIGA_MAC_VER_04:
+ case RTL_GIGA_MAC_VER_05:
+ case RTL_GIGA_MAC_VER_06:
+ case RTL_GIGA_MAC_VER_10:
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_13:
+ case RTL_GIGA_MAC_VER_14:
+ case RTL_GIGA_MAC_VER_15:
+ case RTL_GIGA_MAC_VER_16:
+ case RTL_GIGA_MAC_VER_17:
+ RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
+ break;
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
+ break;
+ default:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
+ break;
+ }
+}
+
+static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
+{
+ tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+}
+
static void rtl_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -3244,8 +3579,10 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
for (i = 0; i < 100; i++) {
if ((RTL_R8(ChipCmd) & CmdReset) == 0)
break;
- msleep_interruptible(1);
+ udelay(100);
}
+
+ rtl8169_init_ring_indexes(tp);
}
static int __devinit
@@ -3349,9 +3686,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
tp->mmio_addr = ioaddr;
- tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (!tp->pcie_cap)
- netif_info(tp, probe, dev, "no PCI Express capability\n");
+ if (!pci_is_pcie(pdev))
+ netif_info(tp, probe, dev, "not PCI Express\n");
+
+ /* Identify chip attached to board */
+ rtl8169_get_mac_version(tp, dev, cfg->default_ver);
+
+ rtl_init_rxcfg(tp);
RTL_W16(IntrMask, 0x0000);
@@ -3361,9 +3702,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- /* Identify chip attached to board */
- rtl8169_get_mac_version(tp, dev, cfg->default_ver);
-
/*
* Pretend we are using VLANs; This bypasses a nasty bug where
* Interrupts stop flowing on high load on 8110SCd controllers.
@@ -3443,7 +3781,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->timer.data = (unsigned long) dev;
tp->timer.function = rtl8169_phy_timer;
- tp->fw = RTL_FIRMWARE_UNKNOWN;
+ tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
rc = register_netdev(dev);
if (rc < 0)
@@ -3512,25 +3850,48 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-static void rtl_request_firmware(struct rtl8169_private *tp)
+static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
{
- /* Return early if the firmware is already loaded / cached. */
- if (IS_ERR(tp->fw)) {
- const char *name;
+ struct rtl_fw *rtl_fw;
+ const char *name;
+ int rc = -ENOMEM;
- name = rtl_lookup_firmware_name(tp);
- if (name) {
- int rc;
+ name = rtl_lookup_firmware_name(tp);
+ if (!name)
+ goto out_no_firmware;
- rc = request_firmware(&tp->fw, name, &tp->pci_dev->dev);
- if (rc >= 0)
- return;
+ rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
+ if (!rtl_fw)
+ goto err_warn;
- netif_warn(tp, ifup, tp->dev, "unable to load "
- "firmware patch %s (%d)\n", name, rc);
- }
- tp->fw = NULL;
- }
+ rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
+ if (rc < 0)
+ goto err_free;
+
+ rc = rtl_check_firmware(tp, rtl_fw);
+ if (rc < 0)
+ goto err_release_firmware;
+
+ tp->rtl_fw = rtl_fw;
+out:
+ return;
+
+err_release_firmware:
+ release_firmware(rtl_fw->fw);
+err_free:
+ kfree(rtl_fw);
+err_warn:
+ netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
+ name, rc);
+out_no_firmware:
+ tp->rtl_fw = NULL;
+ goto out;
+}
+
+static void rtl_request_firmware(struct rtl8169_private *tp)
+{
+ if (IS_ERR(tp->rtl_fw))
+ rtl_request_uncached_firmware(tp);
}
static int rtl8169_open(struct net_device *dev)
@@ -3605,6 +3966,13 @@ err_pm_runtime_put:
goto out;
}
+static void rtl_rx_close(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
+}
+
static void rtl8169_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -3612,28 +3980,27 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
/* Disable interrupts */
rtl8169_irq_mask_and_ack(ioaddr);
+ rtl_rx_close(tp);
+
if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
tp->mac_version == RTL_GIGA_MAC_VER_28 ||
tp->mac_version == RTL_GIGA_MAC_VER_31) {
while (RTL_R8(TxPoll) & NPQ)
udelay(20);
-
+ } else if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
+ udelay(100);
+ } else {
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
+ udelay(100);
}
- /* Reset the chipset */
- RTL_W8(ChipCmd, CmdReset);
-
- /* PCI commit */
- RTL_R8(ChipCmd);
+ rtl_hw_reset(tp);
}
static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- u32 cfg = rtl8169_rx_config;
-
- cfg |= (RTL_R32(RxConfig) & RTL_RX_CONFIG_MASK);
- RTL_W32(RxConfig, cfg);
/* Set DMA burst size and Interframe Gap Time */
RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
@@ -3644,8 +4011,6 @@ static void rtl_hw_start(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl_hw_reset(tp);
-
tp->hw_start(dev);
netif_start_queue(dev);
@@ -3723,6 +4088,8 @@ static void rtl_hw_start_8169(struct net_device *dev)
tp->mac_version == RTL_GIGA_MAC_VER_04)
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ rtl_init_rxcfg(tp);
+
RTL_W8(EarlyTxThres, NoEarlyTx);
rtl_set_rx_max_size(ioaddr, rx_buf_sz);
@@ -3780,9 +4147,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
- int cap = tp->pcie_cap;
+ int cap = pci_pcie_cap(pdev);
if (cap) {
u16 ctl;
@@ -3830,9 +4195,7 @@ static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int l
static void rtl_disable_clock_request(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
- int cap = tp->pcie_cap;
+ int cap = pci_pcie_cap(pdev);
if (cap) {
u16 ctl;
@@ -3845,9 +4208,7 @@ static void rtl_disable_clock_request(struct pci_dev *pdev)
static void rtl_enable_clock_request(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct rtl8169_private *tp = netdev_priv(dev);
- int cap = tp->pcie_cap;
+ int cap = pci_pcie_cap(pdev);
if (cap) {
u16 ctl;
@@ -4038,9 +4399,9 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
rtl_enable_clock_request(pdev);
}
-static void rtl_hw_start_8168e(void __iomem *ioaddr, struct pci_dev *pdev)
+static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
{
- static const struct ephy_info e_info_8168e[] = {
+ static const struct ephy_info e_info_8168e_1[] = {
{ 0x00, 0x0200, 0x0100 },
{ 0x00, 0x0000, 0x0004 },
{ 0x06, 0x0002, 0x0001 },
@@ -4058,7 +4419,7 @@ static void rtl_hw_start_8168e(void __iomem *ioaddr, struct pci_dev *pdev)
rtl_csi_access_enable_2(ioaddr);
- rtl_ephy_init(ioaddr, e_info_8168e, ARRAY_SIZE(e_info_8168e));
+ rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
@@ -4073,6 +4434,44 @@ static void rtl_hw_start_8168e(void __iomem *ioaddr, struct pci_dev *pdev)
RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
}
+static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168e_2[] = {
+ { 0x09, 0x0000, 0x0080 },
+ { 0x19, 0x0000, 0x0224 }
+ };
+
+ rtl_csi_access_enable_1(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
+ ERIAR_EXGMAC);
+
+ RTL_W8(MaxTxPacketSize, 0x27);
+
+ rtl_disable_clock_request(pdev);
+
+ RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+}
+
static void rtl_hw_start_8168(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -4161,7 +4560,10 @@ static void rtl_hw_start_8168(struct net_device *dev)
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
- rtl_hw_start_8168e(ioaddr, pdev);
+ rtl_hw_start_8168e_1(ioaddr, pdev);
+ break;
+ case RTL_GIGA_MAC_VER_34:
+ rtl_hw_start_8168e_2(ioaddr, pdev);
break;
default:
@@ -4258,7 +4660,7 @@ static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
- RTL_W8(DLLPR, RTL_R8(DLLPR) | PM_SWITCH);
+ RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
}
@@ -4277,7 +4679,7 @@ static void rtl_hw_start_8101(struct net_device *dev)
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16) {
- int cap = tp->pcie_cap;
+ int cap = pci_pcie_cap(pdev);
if (cap) {
pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
@@ -4460,11 +4862,6 @@ err_out:
return -ENOMEM;
}
-static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
-{
- tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
-}
-
static int rtl8169_init_ring(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -4592,7 +4989,7 @@ static void rtl8169_reset_task(struct work_struct *work)
rtl8169_tx_clear(tp);
- rtl8169_init_ring_indexes(tp);
+ rtl8169_hw_reset(tp);
rtl_hw_start(dev);
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5006,7 +5403,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
* the chip, so just exit the loop.
*/
if (unlikely(!netif_running(dev))) {
- rtl8169_asic_down(ioaddr);
+ rtl8169_hw_reset(tp);
break;
}
@@ -5129,7 +5526,7 @@ static void rtl8169_down(struct net_device *dev)
spin_lock_irq(&tp->lock);
- rtl8169_asic_down(ioaddr);
+ rtl8169_hw_reset(tp);
/*
* At this point device interrupts can not be enabled in any function,
* as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
@@ -5212,8 +5609,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
spin_lock_irqsave(&tp->lock, flags);
- tmp = rtl8169_rx_config | rx_mode |
- (RTL_R32(RxConfig) & RTL_RX_CONFIG_MASK);
+ tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
u32 data = mc_filter[0];
@@ -5383,13 +5779,16 @@ static void rtl_shutdown(struct pci_dev *pdev)
spin_lock_irq(&tp->lock);
- rtl8169_asic_down(ioaddr);
+ rtl8169_hw_reset(tp);
spin_unlock_irq(&tp->lock);
if (system_state == SYSTEM_POWER_OFF) {
- /* WoL fails with some 8168 when the receiver is disabled. */
- if (tp->features & RTL_FEATURE_WOL) {
+ /* WoL fails with 8168b when the receiver is disabled. */
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_11 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_12 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_17) &&
+ (tp->features & RTL_FEATURE_WOL)) {
pci_clear_master(pdev);
RTL_W8(ChipCmd, CmdRxEnb);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 77c5092a6a4..86ac38c96bc 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -190,7 +190,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- if (eth->h_dest[0] & 0x01) {
+ if (is_multicast_ether_addr(eth->h_dest)) {
for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
i++)
if (rionet_active[i])
@@ -378,7 +378,7 @@ static int rionet_close(struct net_device *ndev)
static void rionet_remove(struct rio_dev *rdev)
{
- struct net_device *ndev = NULL;
+ struct net_device *ndev = rio_get_drvdata(rdev);
struct rionet_peer *peer, *tmp;
free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
@@ -433,22 +433,12 @@ static const struct net_device_ops rionet_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int rionet_setup_netdev(struct rio_mport *mport)
+static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
{
int rc = 0;
- struct net_device *ndev = NULL;
struct rionet_private *rnet;
u16 device_id;
- /* Allocate our net_device structure */
- ndev = alloc_etherdev(sizeof(struct rionet_private));
- if (ndev == NULL) {
- printk(KERN_INFO "%s: could not allocate ethernet device.\n",
- DRV_NAME);
- rc = -ENOMEM;
- goto out;
- }
-
rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
mport->sys_size ? __fls(sizeof(void *)) + 4 : 0);
if (!rionet_active) {
@@ -504,11 +494,21 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
int rc = -ENODEV;
u32 lpef, lsrc_ops, ldst_ops;
struct rionet_peer *peer;
+ struct net_device *ndev = NULL;
/* If local device is not rionet capable, give up quickly */
if (!rionet_capable)
goto out;
+ /* Allocate our net_device structure */
+ ndev = alloc_etherdev(sizeof(struct rionet_private));
+ if (ndev == NULL) {
+ printk(KERN_INFO "%s: could not allocate ethernet device.\n",
+ DRV_NAME);
+ rc = -ENOMEM;
+ goto out;
+ }
+
/*
* First time through, make sure local device is rionet
* capable, setup netdev, and set flags so this is skipped
@@ -529,7 +529,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
goto out;
}
- rc = rionet_setup_netdev(rdev->net->hport);
+ rc = rionet_setup_netdev(rdev->net->hport, ndev);
rionet_check = 1;
}
@@ -546,6 +546,8 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
list_add_tail(&peer->node, &rionet_peers);
}
+ rio_set_drvdata(rdev, ndev);
+
out:
return rc;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index df0d2c8ecc0..277d48b0800 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -356,56 +356,6 @@ static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
}
-/* Add the vlan */
-static void s2io_vlan_rx_register(struct net_device *dev,
- struct vlan_group *grp)
-{
- int i;
- struct s2io_nic *nic = netdev_priv(dev);
- unsigned long flags[MAX_TX_FIFOS];
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
-
- spin_lock_irqsave(&fifo->tx_lock, flags[i]);
- }
-
- nic->vlgrp = grp;
-
- for (i = config->tx_fifo_num - 1; i >= 0; i--) {
- struct fifo_info *fifo = &mac_control->fifos[i];
-
- spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
- }
-}
-
-/* Unregister the vlan */
-static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
-{
- int i;
- struct s2io_nic *nic = netdev_priv(dev);
- unsigned long flags[MAX_TX_FIFOS];
- struct config_param *config = &nic->config;
- struct mac_info *mac_control = &nic->mac_control;
-
- for (i = 0; i < config->tx_fifo_num; i++) {
- struct fifo_info *fifo = &mac_control->fifos[i];
-
- spin_lock_irqsave(&fifo->tx_lock, flags[i]);
- }
-
- if (nic->vlgrp)
- vlan_group_set_device(nic->vlgrp, vid, NULL);
-
- for (i = config->tx_fifo_num - 1; i >= 0; i--) {
- struct fifo_info *fifo = &mac_control->fifos[i];
-
- spin_unlock_irqrestore(&fifo->tx_lock, flags[i]);
- }
-}
-
/*
* Constants to be programmed into the Xena's registers, to configure
* the XAUI.
@@ -841,7 +791,7 @@ static int init_shared_mem(struct s2io_nic *nic)
tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
- pre_rxd_blk = (struct RxD_block *)tmp_v_addr;
+ pre_rxd_blk = tmp_v_addr;
pre_rxd_blk->reserved_2_pNext_RxD_block =
(unsigned long)tmp_v_addr_next;
pre_rxd_blk->pNext_RxD_Blk_physical =
@@ -918,7 +868,7 @@ static int init_shared_mem(struct s2io_nic *nic)
mac_control->stats_mem_sz = size;
tmp_v_addr = mac_control->stats_mem;
- mac_control->stats_info = (struct stat_block *)tmp_v_addr;
+ mac_control->stats_info = tmp_v_addr;
memset(tmp_v_addr, 0, size);
DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
@@ -2439,7 +2389,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
spin_lock_irqsave(&fifo->tx_lock, flags);
for (j = 0; j < tx_cfg->fifo_len; j++) {
- txdp = (struct TxD *)fifo->list_info[j].list_virt_addr;
+ txdp = fifo->list_info[j].list_virt_addr;
skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
if (skb) {
swstats->mem_freed += skb->truesize;
@@ -3075,8 +3025,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
get_info = fifo_data->tx_curr_get_info;
memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
- txdlp = (struct TxD *)
- fifo_data->list_info[get_info.offset].list_virt_addr;
+ txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
(get_info.offset != put_info.offset) &&
(txdlp->Host_Control)) {
@@ -3129,8 +3078,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
get_info.offset++;
if (get_info.offset == get_info.fifo_len + 1)
get_info.offset = 0;
- txdlp = (struct TxD *)
- fifo_data->list_info[get_info.offset].list_virt_addr;
+ txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
fifo_data->tx_curr_get_info.offset = get_info.offset;
}
@@ -4111,7 +4059,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
struct tcphdr *th;
ip = ip_hdr(skb);
- if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
+ if (!ip_is_fragment(ip)) {
th = (struct tcphdr *)(((unsigned char *)ip) +
ip->ihl*4);
@@ -4163,7 +4111,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
put_off = (u16)fifo->tx_curr_put_info.offset;
get_off = (u16)fifo->tx_curr_get_info.offset;
- txdp = (struct TxD *)fifo->list_info[put_off].list_virt_addr;
+ txdp = fifo->list_info[put_off].list_virt_addr;
queue_len = fifo->tx_curr_put_info.fifo_len + 1;
/* Avoid "put" pointer going beyond "get" pointer */
@@ -7739,8 +7687,6 @@ static const struct net_device_ops s2io_netdev_ops = {
.ndo_set_mac_address = s2io_set_mac_addr,
.ndo_change_mtu = s2io_change_mtu,
.ndo_set_features = s2io_set_features,
- .ndo_vlan_rx_register = s2io_vlan_rx_register,
- .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
.ndo_tx_timeout = s2io_tx_watchdog,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = s2io_netpoll,
@@ -7972,9 +7918,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Initializing the BAR1 address as the start of the FIFO pointer. */
for (j = 0; j < MAX_TX_FIFOS; j++) {
- mac_control->tx_FIFO_start[j] =
- (struct TxFIFO_element __iomem *)
- (sp->bar1 + (j * 0x00020000));
+ mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
}
/* Driver entry points */
@@ -8621,18 +8565,12 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
struct s2io_nic *sp = netdev_priv(dev);
skb->protocol = eth_type_trans(skb, dev);
- if (sp->vlgrp && vlan_tag && (sp->vlan_strip_flag)) {
- /* Queueing the vlan frame to the upper layer */
- if (sp->config.napi)
- vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
- else
- vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
- } else {
- if (sp->config.napi)
- netif_receive_skb(skb);
- else
- netif_rx(skb);
- }
+ if (vlan_tag && sp->vlan_strip_flag)
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
+ if (sp->config.napi)
+ netif_receive_skb(skb);
+ else
+ netif_rx(skb);
}
static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 800b3a44e65..d5596926a1e 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -939,7 +939,6 @@ struct s2io_nic {
int task_flag;
unsigned long long start_time;
- struct vlan_group *vlgrp;
int vlan_strip_flag;
#define MSIX_FLG 0xA5
int num_entries;
@@ -968,8 +967,8 @@ struct s2io_nic {
u8 serial_num[VPD_STRING_LEN];
};
-#define RESET_ERROR 1;
-#define CMD_ERROR 2;
+#define RESET_ERROR 1
+#define CMD_ERROR 2
/* OS related system calls */
#ifndef readq
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 68d50429ddf..ea65f7ec360 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2597,7 +2597,7 @@ static int __devinit sbmac_probe(struct platform_device *pldev)
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
BUG_ON(!res);
- sbm_base = ioremap_nocache(res->start, res->end - res->start + 1);
+ sbm_base = ioremap_nocache(res->start, resource_size(res));
if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
dev_name(&pldev->dev));
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index fa74314ef78..9da47337b7c 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -22,6 +22,7 @@
* matching, so you need to enable IFF_PROMISC when using it.
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index a65c9863839..a3d5bb9e39d 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -1,5 +1,5 @@
config SFC
- tristate "Solarflare Solarstorm SFC4000/SFC9000-family support"
+ tristate "Solarflare SFC4000/SFC9000-family support"
depends on PCI && INET
select MDIO
select CRC32
@@ -7,13 +7,12 @@ config SFC
select I2C_ALGOBIT
help
This driver supports 10-gigabit Ethernet cards based on
- the Solarflare Communications Solarstorm SFC4000 and
- SFC9000-family controllers.
+ the Solarflare SFC4000 and SFC9000-family controllers.
To compile this driver as a module, choose M here. The module
will be called sfc.
config SFC_MTD
- bool "Solarflare Solarstorm SFC4000/SFC9000-family MTD support"
+ bool "Solarflare SFC4000/SFC9000-family MTD support"
depends on SFC && MTD && !(SFC=y && MTD=m)
default y
help
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index c914729f955..faca764aa21 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -229,8 +229,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
struct efx_nic *efx = channel->efx;
int spent;
- if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
- !channel->enabled))
+ if (unlikely(efx->reset_pending || !channel->enabled))
return 0;
spent = efx_nic_process_eventq(channel, budget);
@@ -1461,7 +1460,7 @@ static void efx_start_all(struct efx_nic *efx)
* reset_pending [modified from an atomic context], we instead guarantee
* that efx_mcdi_mode_poll() isn't reverted erroneously */
efx_mcdi_mode_event(efx);
- if (efx->reset_pending != RESET_TYPE_NONE)
+ if (efx->reset_pending)
efx_mcdi_mode_poll(efx);
/* Start the hardware monitor if there is one. Otherwise (we're link
@@ -2118,8 +2117,10 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
goto out;
}
- /* Allow resets to be rescheduled. */
- efx->reset_pending = RESET_TYPE_NONE;
+ /* Clear flags for the scopes we covered. We assume the NIC and
+ * driver are now quiescent so that there is no race here.
+ */
+ efx->reset_pending &= -(1 << (method + 1));
/* Reinitialise bus-mastering, which may have been turned off before
* the reset was scheduled. This is still appropriate, even in the
@@ -2154,12 +2155,13 @@ out:
static void efx_reset_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ unsigned long pending = ACCESS_ONCE(efx->reset_pending);
- if (efx->reset_pending == RESET_TYPE_NONE)
+ if (!pending)
return;
/* If we're not RUNNING then don't reset. Leave the reset_pending
- * flag set so that efx_pci_probe_main will be retried */
+ * flags set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
netif_info(efx, drv, efx->net_dev,
"scheduled reset quenched. NIC not RUNNING\n");
@@ -2167,7 +2169,7 @@ static void efx_reset_work(struct work_struct *data)
}
rtnl_lock();
- (void)efx_reset(efx, efx->reset_pending);
+ (void)efx_reset(efx, fls(pending) - 1);
rtnl_unlock();
}
@@ -2175,40 +2177,24 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
- if (efx->reset_pending != RESET_TYPE_NONE) {
- netif_info(efx, drv, efx->net_dev,
- "quenching already scheduled reset\n");
- return;
- }
-
switch (type) {
case RESET_TYPE_INVISIBLE:
case RESET_TYPE_ALL:
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
method = type;
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
break;
- case RESET_TYPE_RX_RECOVERY:
- case RESET_TYPE_RX_DESC_FETCH:
- case RESET_TYPE_TX_DESC_FETCH:
- case RESET_TYPE_TX_SKIP:
- method = RESET_TYPE_INVISIBLE;
- break;
- case RESET_TYPE_MC_FAILURE:
default:
- method = RESET_TYPE_ALL;
- break;
- }
-
- if (method != type)
+ method = efx->type->map_reset_reason(type);
netif_dbg(efx, drv, efx->net_dev,
"scheduling %s reset for %s\n",
RESET_TYPE(method), RESET_TYPE(type));
- else
- netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
- RESET_TYPE(method));
+ break;
+ }
- efx->reset_pending = method;
+ set_bit(method, &efx->reset_pending);
/* efx_process_channel() will no longer read events once a
* reset is scheduled. So switch back to poll'd MCDI completions. */
@@ -2288,7 +2274,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
efx->pci_dev = pci_dev;
efx->msg_enable = debug;
efx->state = STATE_INIT;
- efx->reset_pending = RESET_TYPE_NONE;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
@@ -2491,7 +2476,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
goto fail1;
netif_info(efx, probe, efx->net_dev,
- "Solarflare Communications NIC detected\n");
+ "Solarflare NIC detected\n");
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -2510,7 +2495,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
cancel_work_sync(&efx->reset_work);
if (rc == 0) {
- if (efx->reset_pending != RESET_TYPE_NONE) {
+ if (efx->reset_pending) {
/* If there was a scheduled reset during
* probe, the NIC is probably hosed anyway */
efx_pci_remove_main(efx);
@@ -2521,11 +2506,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
}
/* Retry if a recoverably reset event has been scheduled */
- if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
- (efx->reset_pending != RESET_TYPE_ALL))
+ if (efx->reset_pending &
+ ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) ||
+ !efx->reset_pending)
goto fail3;
- efx->reset_pending = RESET_TYPE_NONE;
+ efx->reset_pending = 0;
}
if (rc) {
@@ -2609,7 +2595,7 @@ static int efx_pm_poweroff(struct device *dev)
efx->type->fini(efx);
- efx->reset_pending = RESET_TYPE_NONE;
+ efx->reset_pending = 0;
pci_save_state(pci_dev);
return pci_set_power_state(pci_dev, PCI_D3hot);
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index 384cfe3b1be..d725a8fbe1a 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -134,6 +134,8 @@ enum efx_loopback_mode {
* other valuesspecify reasons, which efx_schedule_reset() will choose
* a method for.
*
+ * Reset methods are numbered in order of increasing scope.
+ *
* @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
* @RESET_TYPE_ALL: reset everything but PCI core blocks
* @RESET_TYPE_WORLD: reset everything, save & restore PCI config
@@ -147,7 +149,6 @@ enum efx_loopback_mode {
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
*/
enum reset_type {
- RESET_TYPE_NONE = -1,
RESET_TYPE_INVISIBLE = 0,
RESET_TYPE_ALL = 1,
RESET_TYPE_WORLD = 2,
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index d229027dc36..bc4643af6dd 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -796,30 +796,13 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
{
struct efx_nic *efx = netdev_priv(net_dev);
- enum reset_type method;
- enum {
- ETH_RESET_EFX_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
- ETH_RESET_OFFLOAD | ETH_RESET_MAC)
- };
-
- /* Check for minimal reset flags */
- if ((*flags & ETH_RESET_EFX_INVISIBLE) != ETH_RESET_EFX_INVISIBLE)
- return -EINVAL;
- *flags ^= ETH_RESET_EFX_INVISIBLE;
- method = RESET_TYPE_INVISIBLE;
-
- if (*flags & ETH_RESET_PHY) {
- *flags ^= ETH_RESET_PHY;
- method = RESET_TYPE_ALL;
- }
+ int rc;
- if ((*flags & efx->type->reset_world_flags) ==
- efx->type->reset_world_flags) {
- *flags ^= efx->type->reset_world_flags;
- method = RESET_TYPE_WORLD;
- }
+ rc = efx->type->map_reset_flags(flags);
+ if (rc < 0)
+ return rc;
- return efx_reset(efx, method);
+ return efx_reset(efx, rc);
}
static int
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 60176e873d6..94bf4aaf984 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -536,7 +536,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
efx_oword_t reg;
int link_speed, isolate;
- isolate = (efx->reset_pending != RESET_TYPE_NONE);
+ isolate = !!ACCESS_ONCE(efx->reset_pending);
switch (link_state->speed) {
case 10000: link_speed = 3; break;
@@ -1051,6 +1051,49 @@ static int falcon_b0_test_registers(struct efx_nic *efx)
**************************************************************************
*/
+static enum reset_type falcon_map_reset_reason(enum reset_type reason)
+{
+ switch (reason) {
+ case RESET_TYPE_RX_RECOVERY:
+ case RESET_TYPE_RX_DESC_FETCH:
+ case RESET_TYPE_TX_DESC_FETCH:
+ case RESET_TYPE_TX_SKIP:
+ /* These can occasionally occur due to hardware bugs.
+ * We try to reset without disrupting the link.
+ */
+ return RESET_TYPE_INVISIBLE;
+ default:
+ return RESET_TYPE_ALL;
+ }
+}
+
+static int falcon_map_reset_flags(u32 *flags)
+{
+ enum {
+ FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC),
+ FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
+ FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
+ };
+
+ if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
+ *flags &= ~FALCON_RESET_WORLD;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
+ *flags &= ~FALCON_RESET_ALL;
+ return RESET_TYPE_ALL;
+ }
+
+ if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
+ *flags &= ~FALCON_RESET_INVISIBLE;
+ return RESET_TYPE_INVISIBLE;
+ }
+
+ return -EINVAL;
+}
+
/* Resets NIC to known state. This routine must be called in process
* context and is allowed to sleep. */
static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
@@ -1709,6 +1752,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
.init = falcon_init_nic,
.fini = efx_port_dummy_op_void,
.monitor = falcon_monitor,
+ .map_reset_reason = falcon_map_reset_reason,
+ .map_reset_flags = falcon_map_reset_flags,
.reset = falcon_reset_hw,
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
@@ -1741,7 +1786,6 @@ const struct efx_nic_type falcon_a1_nic_type = {
.tx_dc_base = 0x130000,
.rx_dc_base = 0x100000,
.offload_features = NETIF_F_IP_CSUM,
- .reset_world_flags = ETH_RESET_IRQ,
};
const struct efx_nic_type falcon_b0_nic_type = {
@@ -1750,6 +1794,8 @@ const struct efx_nic_type falcon_b0_nic_type = {
.init = falcon_init_nic,
.fini = efx_port_dummy_op_void,
.monitor = falcon_monitor,
+ .map_reset_reason = falcon_map_reset_reason,
+ .map_reset_flags = falcon_map_reset_flags,
.reset = falcon_reset_hw,
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
@@ -1791,6 +1837,5 @@ const struct efx_nic_type falcon_b0_nic_type = {
.tx_dc_base = 0x130000,
.rx_dc_base = 0x100000,
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
- .reset_world_flags = ETH_RESET_IRQ,
};
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index 95a980fd63d..2b9636f96e0 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -335,28 +335,35 @@ static int efx_filter_search(struct efx_filter_table *table,
bool for_insert, int *depth_required)
{
unsigned hash, incr, filter_idx, depth, depth_max;
- struct efx_filter_spec *cmp;
hash = efx_filter_hash(key);
incr = efx_filter_increment(key);
- depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
- FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
-
- for (depth = 1, filter_idx = hash & (table->size - 1);
- depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
- ++depth) {
- cmp = &table->spec[filter_idx];
- if (efx_filter_equal(spec, cmp))
- goto found;
+
+ filter_idx = hash & (table->size - 1);
+ depth = 1;
+ depth_max = (for_insert ?
+ (spec->priority <= EFX_FILTER_PRI_HINT ?
+ FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
+ table->search_depth[spec->type]);
+
+ for (;;) {
+ /* Return success if entry is used and matches this spec
+ * or entry is unused and we are trying to insert.
+ */
+ if (test_bit(filter_idx, table->used_bitmap) ?
+ efx_filter_equal(spec, &table->spec[filter_idx]) :
+ for_insert) {
+ *depth_required = depth;
+ return filter_idx;
+ }
+
+ /* Return failure if we reached the maximum search depth */
+ if (depth == depth_max)
+ return for_insert ? -EBUSY : -ENOENT;
+
filter_idx = (filter_idx + incr) & (table->size - 1);
+ ++depth;
}
- if (!for_insert)
- return -ENOENT;
- if (depth > depth_max)
- return -EBUSY;
-found:
- *depth_required = depth;
- return filter_idx;
}
/* Construct/deconstruct external filter IDs */
@@ -650,11 +657,11 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
return -EPROTONOSUPPORT;
/* RFS must validate the IP header length before calling us */
- EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
ip = (const struct iphdr *)(skb->data + nhoff);
- if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+ if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
- EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index e8d5f03a89f..b8e251a1ee4 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -17,7 +17,6 @@
#define DEBUG
#endif
-#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -645,7 +644,7 @@ struct efx_filter_state;
* @irq_rx_moderation: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
* @state: Device state flag. Serialised by the rtnl_lock.
- * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
+ * @reset_pending: Bitmask for pending resets
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
* @channel: Channels
@@ -728,7 +727,7 @@ struct efx_nic {
u32 msg_enable;
enum nic_state state;
- enum reset_type reset_pending;
+ unsigned long reset_pending;
struct efx_channel *channel[EFX_MAX_CHANNELS];
char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
@@ -828,6 +827,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @init: Initialise the controller
* @fini: Shut down the controller
* @monitor: Periodic function for polling link state and hardware monitor
+ * @map_reset_reason: Map ethtool reset reason to a reset method
+ * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
* @reset: Reset the controller hardware and possibly the PHY. This will
* be called while the controller is uninitialised.
* @probe_port: Probe the MAC and PHY
@@ -865,8 +866,6 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @rx_dc_base: Base address in SRAM of RX queue descriptor caches
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
- * @reset_world_flags: Flags for additional components covered by
- * reset method RESET_TYPE_WORLD
*/
struct efx_nic_type {
int (*probe)(struct efx_nic *efx);
@@ -874,6 +873,8 @@ struct efx_nic_type {
int (*init)(struct efx_nic *efx);
void (*fini)(struct efx_nic *efx);
void (*monitor)(struct efx_nic *efx);
+ enum reset_type (*map_reset_reason)(enum reset_type reason);
+ int (*map_reset_flags)(u32 *flags);
int (*reset)(struct efx_nic *efx, enum reset_type method);
int (*probe_port)(struct efx_nic *efx);
void (*remove_port)(struct efx_nic *efx);
@@ -908,7 +909,6 @@ struct efx_nic_type {
unsigned int tx_dc_base;
unsigned int rx_dc_base;
u32 offload_features;
- u32 reset_world_flags;
};
/**************************************************************************
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index f2a2b947f86..bafa23a6874 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index fb4721f780f..5735e84c69d 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -177,6 +177,36 @@ static int siena_test_registers(struct efx_nic *efx)
**************************************************************************
*/
+static enum reset_type siena_map_reset_reason(enum reset_type reason)
+{
+ return RESET_TYPE_ALL;
+}
+
+static int siena_map_reset_flags(u32 *flags)
+{
+ enum {
+ SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC |
+ ETH_RESET_PHY),
+ SIENA_RESET_MC = (SIENA_RESET_PORT |
+ ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT),
+ };
+
+ if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) {
+ *flags &= ~SIENA_RESET_MC;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) {
+ *flags &= ~SIENA_RESET_PORT;
+ return RESET_TYPE_ALL;
+ }
+
+ /* no invisible reset implemented */
+
+ return -EINVAL;
+}
+
static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
{
int rc;
@@ -390,17 +420,16 @@ static void siena_remove_nic(struct efx_nic *efx)
efx->nic_data = NULL;
}
-#define STATS_GENERATION_INVALID ((u64)(-1))
+#define STATS_GENERATION_INVALID ((__force __le64)(-1))
static int siena_try_update_nic_stats(struct efx_nic *efx)
{
- u64 *dma_stats;
+ __le64 *dma_stats;
struct efx_mac_stats *mac_stats;
- u64 generation_start;
- u64 generation_end;
+ __le64 generation_start, generation_end;
mac_stats = &efx->mac_stats;
- dma_stats = (u64 *)efx->stats_buffer.addr;
+ dma_stats = efx->stats_buffer.addr;
generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
if (generation_end == STATS_GENERATION_INVALID)
@@ -408,7 +437,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
rmb();
#define MAC_STAT(M, D) \
- mac_stats->M = dma_stats[MC_CMD_MAC_ ## D]
+ mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
MAC_STAT(tx_bytes, TX_BYTES);
MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
@@ -478,7 +507,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
mac_stats->rx_good_lt64 = 0;
- efx->n_rx_nodesc_drop_cnt = dma_stats[MC_CMD_MAC_RX_NODESC_DROPS];
+ efx->n_rx_nodesc_drop_cnt =
+ le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
#undef MAC_STAT
@@ -507,7 +537,7 @@ static void siena_update_nic_stats(struct efx_nic *efx)
static void siena_start_nic_stats(struct efx_nic *efx)
{
- u64 *dma_stats = (u64 *)efx->stats_buffer.addr;
+ __le64 *dma_stats = efx->stats_buffer.addr;
dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
@@ -605,6 +635,8 @@ const struct efx_nic_type siena_a0_nic_type = {
.init = siena_init_nic,
.fini = efx_port_dummy_op_void,
.monitor = NULL,
+ .map_reset_reason = siena_map_reset_reason,
+ .map_reset_flags = siena_map_reset_flags,
.reset = siena_reset_hw,
.probe_port = siena_probe_port,
.remove_port = siena_remove_port,
@@ -641,5 +673,4 @@ const struct efx_nic_type siena_a0_nic_type = {
.rx_dc_base = 0x68000,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE),
- .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
};
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 54415c7b84a..52fb7ed9f36 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -6,6 +6,7 @@
#undef DEBUG
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 8a72a979ee7..190f619e421 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -21,6 +21,7 @@
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
@@ -33,7 +34,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
-#include <asm/cacheflush.h>
#include "sh_eth.h"
@@ -140,6 +140,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.tpauser = 1,
.hw_swap = 1,
.no_ade = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
};
#define SH_GIGA_ETH_BASE 0xfee00000
@@ -864,6 +866,8 @@ static int sh_eth_txfree(struct net_device *ndev)
break;
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
+ dma_unmap_single(&ndev->dev, txdesc->addr,
+ txdesc->buffer_length, DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL;
freeNum++;
@@ -1184,8 +1188,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
mdp->cd->set_rate(ndev);
}
if (mdp->link == PHY_DOWN) {
- sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF)
- | ECMR_DM, ECMR);
+ sh_eth_write(ndev,
+ (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
new_state = 1;
mdp->link = phydev->link;
}
@@ -1487,13 +1491,12 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
entry = mdp->cur_tx % TX_RING_SIZE;
mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry];
- txdesc->addr = virt_to_phys(skb->data);
/* soft swap. */
if (!mdp->cd->hw_swap)
sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
skb->len + 2);
- /* write back */
- __flush_purge_region(skb->data, skb->len);
+ txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
if (skb->len < ETHERSMALL)
txdesc->buffer_length = ETHERSMALL;
else
@@ -1770,7 +1773,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
int ret, devno = 0;
struct resource *res;
struct net_device *ndev = NULL;
- struct sh_eth_private *mdp;
+ struct sh_eth_private *mdp = NULL;
struct sh_eth_plat_data *pd;
/* get base addr */
@@ -1888,7 +1891,7 @@ out_unregister:
out_release:
/* net_dev free */
- if (mdp->tsu_addr)
+ if (mdp && mdp->tsu_addr)
iounmap(mdp->tsu_addr);
if (ndev)
free_netdev(ndev);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b436e007eea..3c0f1312b39 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
@@ -1824,6 +1825,16 @@ static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
}
+static int sis190_mac_addr(struct net_device *dev, void *p)
+{
+ int rc;
+
+ rc = eth_mac_addr(dev, p);
+ if (!rc)
+ sis190_init_rxfilter(dev);
+ return rc;
+}
+
static const struct net_device_ops sis190_netdev_ops = {
.ndo_open = sis190_open,
.ndo_stop = sis190_close,
@@ -1832,7 +1843,7 @@ static const struct net_device_ops sis190_netdev_ops = {
.ndo_tx_timeout = sis190_tx_timeout,
.ndo_set_multicast_list = sis190_set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = sis190_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sis190_netpoll,
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 484f795a779..658a1928fe7 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -482,7 +482,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
ret = -ENOMEM;
goto err_out_cleardev;
}
- sis_priv->tx_ring = (BufferDesc *)ring_space;
+ sis_priv->tx_ring = ring_space;
sis_priv->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pci_dev, RX_TOTAL_SIZE, &ring_dma);
@@ -490,7 +490,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
ret = -ENOMEM;
goto err_unmap_tx;
}
- sis_priv->rx_ring = (BufferDesc *)ring_space;
+ sis_priv->rx_ring = ring_space;
sis_priv->rx_ring_dma = ring_dma;
/* The SiS900-specific entries in the device structure. */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index f4be5c78ebf..98ec614c569 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -50,7 +50,7 @@
#include "skge.h"
#define DRV_NAME "skge"
-#define DRV_VERSION "1.13"
+#define DRV_VERSION "1.14"
#define DEFAULT_TX_RING_SIZE 128
#define DEFAULT_RX_RING_SIZE 512
@@ -83,17 +83,20 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = {
- { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
- { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
- { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
- { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T) },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */
- { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
- { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
- { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
- { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
- { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 },
+ { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */
+ { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */
+#ifdef CONFIG_SKGE_GENESIS
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */
+#endif
+ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */
+ { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
+ { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */
+ { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */
+ { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, skge_id_table);
@@ -119,6 +122,15 @@ static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
+static inline bool is_genesis(const struct skge_hw *hw)
+{
+#ifdef CONFIG_SKGE_GENESIS
+ return hw->chip_id == CHIP_ID_GENESIS;
+#else
+ return false;
+#endif
+}
+
static int skge_get_regs_len(struct net_device *dev)
{
return 0x4000;
@@ -146,7 +158,7 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
/* Wake on Lan only supported on Yukon chips with rev 1 or above */
static u32 wol_supported(const struct skge_hw *hw)
{
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
return 0;
if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
@@ -270,7 +282,7 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
SUPPORTED_Autoneg |
SUPPORTED_TP);
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
supported &= ~(SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -433,7 +445,7 @@ static void skge_get_ethtool_stats(struct net_device *dev,
{
struct skge_port *skge = netdev_priv(dev);
- if (skge->hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(skge->hw))
genesis_get_stats(skge, data);
else
yukon_get_stats(skge, data);
@@ -448,7 +460,7 @@ static struct net_device_stats *skge_get_stats(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
u64 data[ARRAY_SIZE(skge_stats)];
- if (skge->hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(skge->hw))
genesis_get_stats(skge, data);
else
yukon_get_stats(skge, data);
@@ -589,7 +601,7 @@ static int skge_set_pauseparam(struct net_device *dev,
/* Chip internal frequency for clock calculations */
static inline u32 hwkhz(const struct skge_hw *hw)
{
- return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
+ return is_genesis(hw) ? 53125 : 78125;
}
/* Chip HZ to microseconds */
@@ -674,7 +686,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
int port = skge->port;
spin_lock_bh(&hw->phy_lock);
- if (hw->chip_id == CHIP_ID_GENESIS) {
+ if (is_genesis(hw)) {
switch (mode) {
case LED_MODE_OFF:
if (hw->phy_type == SK_PHY_BCOM)
@@ -1053,7 +1065,6 @@ static void skge_link_down(struct skge_port *skge)
netif_info(skge, link, skge->netdev, "Link is down\n");
}
-
static void xm_link_down(struct skge_hw *hw, int port)
{
struct net_device *dev = hw->dev[port];
@@ -1172,7 +1183,6 @@ static void genesis_reset(struct skge_hw *hw, int port)
xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF);
}
-
/* Convert mode to MII values */
static const u16 phy_pause_map[] = {
[FLOW_MODE_NONE] = 0,
@@ -2405,7 +2415,7 @@ static void skge_phy_reset(struct skge_port *skge)
netif_carrier_off(skge->netdev);
spin_lock_bh(&hw->phy_lock);
- if (hw->chip_id == CHIP_ID_GENESIS) {
+ if (is_genesis(hw)) {
genesis_reset(hw, port);
genesis_mac_init(hw, port);
} else {
@@ -2436,7 +2446,8 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG: {
u16 val = 0;
spin_lock_bh(&hw->phy_lock);
- if (hw->chip_id == CHIP_ID_GENESIS)
+
+ if (is_genesis(hw))
err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
else
err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
@@ -2447,7 +2458,7 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCSMIIREG:
spin_lock_bh(&hw->phy_lock);
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
data->val_in);
else
@@ -2559,7 +2570,7 @@ static int skge_up(struct net_device *dev)
/* Initialize MAC */
spin_lock_bh(&hw->phy_lock);
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
genesis_mac_init(hw, port);
else
yukon_mac_init(hw, port);
@@ -2621,7 +2632,7 @@ static int skge_down(struct net_device *dev)
netif_tx_disable(dev);
- if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
+ if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)
del_timer_sync(&skge->link_timer);
napi_disable(&skge->napi);
@@ -2633,7 +2644,7 @@ static int skge_down(struct net_device *dev)
spin_unlock_irq(&hw->hw_lock);
skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
genesis_stop(skge);
else
yukon_stop(skge);
@@ -2661,7 +2672,7 @@ static int skge_down(struct net_device *dev)
skge_rx_stop(hw, port);
- if (hw->chip_id == CHIP_ID_GENESIS) {
+ if (is_genesis(hw)) {
skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
} else {
@@ -2957,7 +2968,7 @@ static void yukon_set_multicast(struct net_device *dev)
static inline u16 phy_length(const struct skge_hw *hw, u32 status)
{
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
return status >> XMR_FS_LEN_SHIFT;
else
return status >> GMR_FS_LEN_SHIFT;
@@ -2965,7 +2976,7 @@ static inline u16 phy_length(const struct skge_hw *hw, u32 status)
static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
{
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0;
else
return (status & GMR_FS_ANY_ERR) ||
@@ -2975,9 +2986,8 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
static void skge_set_multicast(struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
- struct skge_hw *hw = skge->hw;
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(skge->hw))
genesis_set_multicast(dev);
else
yukon_set_multicast(dev);
@@ -3057,7 +3067,7 @@ error:
"rx err, slot %td control 0x%x status 0x%x\n",
e - skge->rx_ring.start, control, status);
- if (skge->hw->chip_id == CHIP_ID_GENESIS) {
+ if (is_genesis(skge->hw)) {
if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
dev->stats.rx_length_errors++;
if (status & XMR_FS_FRA_ERR)
@@ -3171,7 +3181,7 @@ static void skge_mac_parity(struct skge_hw *hw, int port)
++dev->stats.tx_heartbeat_errors;
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
MFF_CLR_PERR);
else
@@ -3183,7 +3193,7 @@ static void skge_mac_parity(struct skge_hw *hw, int port)
static void skge_mac_intr(struct skge_hw *hw, int port)
{
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
genesis_mac_intr(hw, port);
else
yukon_mac_intr(hw, port);
@@ -3195,7 +3205,7 @@ static void skge_error_irq(struct skge_hw *hw)
struct pci_dev *pdev = hw->pdev;
u32 hwstatus = skge_read32(hw, B0_HWE_ISRC);
- if (hw->chip_id == CHIP_ID_GENESIS) {
+ if (is_genesis(hw)) {
/* clear xmac errors */
if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
@@ -3278,7 +3288,7 @@ static void skge_extirq(unsigned long arg)
struct skge_port *skge = netdev_priv(dev);
spin_lock(&hw->phy_lock);
- if (hw->chip_id != CHIP_ID_GENESIS)
+ if (!is_genesis(hw))
yukon_phy_intr(skge);
else if (hw->phy_type == SK_PHY_BCOM)
bcom_phy_intr(skge);
@@ -3397,7 +3407,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
xm_outaddr(hw, port, XM_SA, dev->dev_addr);
else {
gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
@@ -3473,6 +3483,7 @@ static int skge_reset(struct skge_hw *hw)
switch (hw->chip_id) {
case CHIP_ID_GENESIS:
+#ifdef CONFIG_SKGE_GENESIS
switch (hw->phy_type) {
case SK_PHY_XMAC:
hw->phy_addr = PHY_ADDR_XMAC;
@@ -3486,6 +3497,10 @@ static int skge_reset(struct skge_hw *hw)
return -EOPNOTSUPP;
}
break;
+#else
+ dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n");
+ return -EOPNOTSUPP;
+#endif
case CHIP_ID_YUKON:
case CHIP_ID_YUKON_LITE:
@@ -3508,7 +3523,7 @@ static int skge_reset(struct skge_hw *hw)
/* read the adapters RAM size */
t8 = skge_read8(hw, B2_E_0);
- if (hw->chip_id == CHIP_ID_GENESIS) {
+ if (is_genesis(hw)) {
if (t8 == 3) {
/* special case: 4 x 64k x 36, offset = 0x80000 */
hw->ram_size = 0x100000;
@@ -3523,10 +3538,10 @@ static int skge_reset(struct skge_hw *hw)
hw->intr_mask = IS_HW_ERR;
/* Use PHY IRQ for all but fiber based Genesis board */
- if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC))
+ if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC))
hw->intr_mask |= IS_EXT_REG;
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
genesis_init(hw);
else {
/* switch power to VCC (WA for VAUX problem) */
@@ -3591,7 +3606,7 @@ static int skge_reset(struct skge_hw *hw)
skge_write32(hw, B0_IMSK, hw->intr_mask);
for (i = 0; i < hw->ports; i++) {
- if (hw->chip_id == CHIP_ID_GENESIS)
+ if (is_genesis(hw))
genesis_reset(hw, i);
else
yukon_reset(hw, i);
@@ -3802,9 +3817,9 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
skge->port = port;
/* Only used for Genesis XMAC */
- setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
-
- if (hw->chip_id != CHIP_ID_GENESIS) {
+ if (is_genesis(hw))
+ setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
+ else {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_RXCSUM;
dev->features |= dev->hw_features;
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 598bf7a1a55..a2eb3411584 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -3,6 +3,7 @@
*/
#ifndef _SKGE_H
#define _SKGE_H
+#include <linux/interrupt.h>
/* PCI config registers */
#define PCI_DEV_REG1 0x40
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 3ee41da130c..57339da7632 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -32,6 +32,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
+#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/slab.h>
#include <net/ip.h>
@@ -49,7 +50,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.28"
+#define DRV_VERSION "1.29"
/*
* The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -364,6 +365,17 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
}
} else {
+ if (hw->chip_id >= CHIP_ID_YUKON_OPT) {
+ u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2);
+
+ /* enable PHY Reverse Auto-Negotiation */
+ ctrl2 |= 1u << 13;
+
+ /* Write PHY changes (SW-reset must follow) */
+ gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2);
+ }
+
+
/* disable energy detect */
ctrl &= ~PHY_M_PC_EN_DET_MSK;
@@ -625,6 +637,63 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
if (ledover)
gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
+ } else if (hw->chip_id == CHIP_ID_YUKON_PRM &&
+ (sky2_read8(hw, B2_MAC_CFG) & 0xf) == 0x7) {
+ int i;
+ /* This a phy register setup workaround copied from vendor driver. */
+ static const struct {
+ u16 reg, val;
+ } eee_afe[] = {
+ { 0x156, 0x58ce },
+ { 0x153, 0x99eb },
+ { 0x141, 0x8064 },
+ /* { 0x155, 0x130b },*/
+ { 0x000, 0x0000 },
+ { 0x151, 0x8433 },
+ { 0x14b, 0x8c44 },
+ { 0x14c, 0x0f90 },
+ { 0x14f, 0x39aa },
+ /* { 0x154, 0x2f39 },*/
+ { 0x14d, 0xba33 },
+ { 0x144, 0x0048 },
+ { 0x152, 0x2010 },
+ /* { 0x158, 0x1223 },*/
+ { 0x140, 0x4444 },
+ { 0x154, 0x2f3b },
+ { 0x158, 0xb203 },
+ { 0x157, 0x2029 },
+ };
+
+ /* Start Workaround for OptimaEEE Rev.Z0 */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fb);
+
+ gm_phy_write(hw, port, 1, 0x4099);
+ gm_phy_write(hw, port, 3, 0x1120);
+ gm_phy_write(hw, port, 11, 0x113c);
+ gm_phy_write(hw, port, 14, 0x8100);
+ gm_phy_write(hw, port, 15, 0x112a);
+ gm_phy_write(hw, port, 17, 0x1008);
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00fc);
+ gm_phy_write(hw, port, 1, 0x20b0);
+
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
+
+ for (i = 0; i < ARRAY_SIZE(eee_afe); i++) {
+ /* apply AFE settings */
+ gm_phy_write(hw, port, 17, eee_afe[i].val);
+ gm_phy_write(hw, port, 16, eee_afe[i].reg | 1u<<13);
+ }
+
+ /* End Workaround for OptimaEEE */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+
+ /* Enable 10Base-Te (EEE) */
+ if (hw->chip_id >= CHIP_ID_YUKON_PRM) {
+ reg = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
+ gm_phy_write(hw, port, PHY_MARV_EXT_CTRL,
+ reg | PHY_M_10B_TE_ENABLE);
+ }
}
/* Enable phy interrupt on auto-negotiation complete (or link up) */
@@ -713,6 +782,20 @@ static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
+/* configure IPG according to used link speed */
+static void sky2_set_ipg(struct sky2_port *sky2)
+{
+ u16 reg;
+
+ reg = gma_read16(sky2->hw, sky2->port, GM_SERIAL_MODE);
+ reg &= ~GM_SMOD_IPG_MSK;
+ if (sky2->speed > SPEED_100)
+ reg |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
+ else
+ reg |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
+ gma_write16(sky2->hw, sky2->port, GM_SERIAL_MODE, reg);
+}
+
/* Enable Rx/Tx */
static void sky2_enable_rx_tx(struct sky2_port *sky2)
{
@@ -881,7 +964,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
/* serial mode register */
reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
- GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
+ GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF_1000);
if (hw->dev[port]->mtu > ETH_DATA_LEN)
reg |= GM_SMOD_JUMBO_ENA;
@@ -1361,13 +1444,14 @@ static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
* Allocate an skb for receiving. If the MTU is large enough
* make the skb non-linear with a fragment list of pages.
*/
-static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
+static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp)
{
struct sk_buff *skb;
int i;
- skb = netdev_alloc_skb(sky2->netdev,
- sky2->rx_data_size + sky2_rx_pad(sky2->hw));
+ skb = __netdev_alloc_skb(sky2->netdev,
+ sky2->rx_data_size + sky2_rx_pad(sky2->hw),
+ gfp);
if (!skb)
goto nomem;
@@ -1385,7 +1469,7 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
skb_reserve(skb, NET_IP_ALIGN);
for (i = 0; i < sky2->rx_nfrags; i++) {
- struct page *page = alloc_page(GFP_ATOMIC);
+ struct page *page = alloc_page(gfp);
if (!page)
goto free_partial;
@@ -1415,7 +1499,7 @@ static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
for (i = 0; i < sky2->rx_pending; i++) {
struct rx_ring_info *re = sky2->rx_ring + i;
- re->skb = sky2_rx_alloc(sky2);
+ re->skb = sky2_rx_alloc(sky2, GFP_KERNEL);
if (!re->skb)
return -ENOMEM;
@@ -1448,7 +1532,7 @@ static void sky2_rx_start(struct sky2_port *sky2)
sky2_qset(hw, rxq);
/* On PCI express lowering the watermark gives better performance */
- if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
+ if (pci_is_pcie(hw->pdev))
sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
/* These chips have no ram buffer?
@@ -2051,6 +2135,8 @@ static void sky2_link_up(struct sky2_port *sky2)
[FC_BOTH] = "both",
};
+ sky2_set_ipg(sky2);
+
sky2_enable_rx_tx(sky2);
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
@@ -2288,8 +2374,11 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
netdev_update_features(dev);
- mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
- GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
+ mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA;
+ if (sky2->speed > SPEED_100)
+ mode |= IPG_DATA_VAL(IPG_DATA_DEF_1000);
+ else
+ mode |= IPG_DATA_VAL(IPG_DATA_DEF_10_100);
if (dev->mtu > ETH_DATA_LEN)
mode |= GM_SMOD_JUMBO_ENA;
@@ -2383,7 +2472,7 @@ static struct sk_buff *receive_new(struct sky2_port *sky2,
struct rx_ring_info nre;
unsigned hdr_space = sky2->rx_data_size;
- nre.skb = sky2_rx_alloc(sky2);
+ nre.skb = sky2_rx_alloc(sky2, GFP_ATOMIC);
if (unlikely(!nre.skb))
goto nobuf;
@@ -2938,6 +3027,8 @@ static u32 sky2_mhz(const struct sky2_hw *hw)
case CHIP_ID_YUKON_SUPR:
case CHIP_ID_YUKON_UL_2:
case CHIP_ID_YUKON_OPT:
+ case CHIP_ID_YUKON_PRM:
+ case CHIP_ID_YUKON_OP_2:
return 125;
case CHIP_ID_YUKON_FE:
@@ -2994,7 +3085,8 @@ static int __devinit sky2_init(struct sky2_hw *hw)
hw->flags = SKY2_HW_GIGABIT
| SKY2_HW_NEWER_PHY
| SKY2_HW_NEW_LE
- | SKY2_HW_ADV_POWER_CTL;
+ | SKY2_HW_ADV_POWER_CTL
+ | SKY2_HW_RSS_CHKSUM;
/* New transmit checksum */
if (hw->chip_rev != CHIP_REV_YU_EX_B0)
@@ -3022,7 +3114,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
/* The workaround for status conflicts VLAN tag detection. */
if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
- hw->flags |= SKY2_HW_VLAN_BROKEN;
+ hw->flags |= SKY2_HW_VLAN_BROKEN | SKY2_HW_RSS_CHKSUM;
break;
case CHIP_ID_YUKON_SUPR:
@@ -3031,6 +3123,9 @@ static int __devinit sky2_init(struct sky2_hw *hw)
| SKY2_HW_NEW_LE
| SKY2_HW_AUTO_TX_SUM
| SKY2_HW_ADV_POWER_CTL;
+
+ if (hw->chip_rev == CHIP_REV_YU_SU_A0)
+ hw->flags |= SKY2_HW_RSS_CHKSUM;
break;
case CHIP_ID_YUKON_UL_2:
@@ -3039,6 +3134,8 @@ static int __devinit sky2_init(struct sky2_hw *hw)
break;
case CHIP_ID_YUKON_OPT:
+ case CHIP_ID_YUKON_PRM:
+ case CHIP_ID_YUKON_OP_2:
hw->flags = SKY2_HW_GIGABIT
| SKY2_HW_NEW_LE
| SKY2_HW_ADV_POWER_CTL;
@@ -3071,7 +3168,7 @@ static void sky2_reset(struct sky2_hw *hw)
{
struct pci_dev *pdev = hw->pdev;
u16 status;
- int i, cap;
+ int i;
u32 hwe_mask = Y2_HWE_ALL_MASK;
/* disable ASF */
@@ -3107,8 +3204,7 @@ static void sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, B0_CTST, CS_MRST_CLR);
- cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (cap) {
+ if (pci_is_pcie(pdev)) {
sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
0xfffffffful);
@@ -3139,30 +3235,33 @@ static void sky2_reset(struct sky2_hw *hw)
sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
}
- if (hw->chip_id == CHIP_ID_YUKON_OPT) {
+ if (hw->chip_id == CHIP_ID_YUKON_OPT ||
+ hw->chip_id == CHIP_ID_YUKON_PRM ||
+ hw->chip_id == CHIP_ID_YUKON_OP_2) {
u16 reg;
u32 msk;
- if (hw->chip_rev == 0) {
+ if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
/* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
/* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
reg = 10;
+
+ /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
+ sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
} else {
/* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
reg = 3;
}
reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
+ reg |= PSM_CONFIG_REG4_RST_PHY_LINK_DETECT;
/* reset PHY Link Detect */
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
- sky2_pci_write16(hw, PSM_CONFIG_REG4,
- reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
-
/* enable PHY Quick Link */
msk = sky2_read32(hw, B0_IMSK);
msk |= Y2_IS_PHY_QLNK;
@@ -3170,11 +3269,11 @@ static void sky2_reset(struct sky2_hw *hw)
/* check if PSMv2 was running before */
reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
- if (reg & PCI_EXP_LNKCTL_ASPMC) {
- cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (reg & PCI_EXP_LNKCTL_ASPMC)
/* restore the PCIe Link Control register */
- sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
- }
+ sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
+ reg);
+
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
@@ -4175,8 +4274,18 @@ static u32 sky2_fix_features(struct net_device *dev, u32 features)
/* In order to do Jumbo packets on these chips, need to turn off the
* transmit store/forward. Therefore checksum offload won't work.
*/
- if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U)
+ if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U) {
+ netdev_info(dev, "checksum offload not possible with jumbo frames\n");
features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
+ }
+
+ /* Some hardware requires receive checksum for RSS to work. */
+ if ( (features & NETIF_F_RXHASH) &&
+ !(features & NETIF_F_RXCSUM) &&
+ (sky2->hw->flags & SKY2_HW_RSS_CHKSUM)) {
+ netdev_info(dev, "receive hashing forces receive checksum\n");
+ features |= NETIF_F_RXCSUM;
+ }
return features;
}
@@ -4676,9 +4785,11 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
"UL 2", /* 0xba */
"Unknown", /* 0xbb */
"Optima", /* 0xbc */
+ "Optima Prime", /* 0xbd */
+ "Optima 2", /* 0xbe */
};
- if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OPT)
+ if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2)
strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
else
snprintf(buf, sz, "(chip %#x)", chipid);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 318c9ae7bf9..0af31b8b5f1 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -412,7 +412,7 @@ enum {
Y2_IS_HW_ERR = 1<<31, /* Interrupt HW Error */
Y2_IS_STAT_BMU = 1<<30, /* Status BMU Interrupt */
Y2_IS_ASF = 1<<29, /* ASF subsystem Interrupt */
-
+ Y2_IS_CPU_TO = 1<<28, /* CPU Timeout */
Y2_IS_POLL_CHK = 1<<27, /* Check IRQ from polling unit */
Y2_IS_TWSI_RDY = 1<<26, /* IRQ on end of TWSI Tx */
Y2_IS_IRQ_SW = 1<<25, /* SW forced IRQ */
@@ -547,6 +547,8 @@ enum {
CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
+ CHIP_ID_YUKON_PRM = 0xbd, /* YUKON-2 Optima Prime */
+ CHIP_ID_YUKON_OP_2 = 0xbe, /* YUKON-2 Optima 2 */
};
enum yukon_xl_rev {
@@ -1420,8 +1422,10 @@ enum {
PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */
PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */
- PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */};
+ PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */
+ PHY_M_10B_TE_ENABLE = 1<<7, /* 10Base-Te Enable (88E8079 and above) */
+};
#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10 & PHY_M_EC_M_DSC_MSK)
/* 00=1x; 01=2x; 10=3x; 11=4x */
#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8 & PHY_M_EC_S_DSC_MSK)
@@ -1807,10 +1811,11 @@ enum {
};
#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
-#define DATA_BLIND_DEF 0x04
-
#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
-#define IPG_DATA_DEF 0x1e
+
+#define DATA_BLIND_DEF 0x04
+#define IPG_DATA_DEF_1000 0x1e
+#define IPG_DATA_DEF_10_100 0x18
/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
enum {
@@ -2281,6 +2286,7 @@ struct sky2_hw {
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
#define SKY2_HW_RSS_BROKEN 0x00000100
#define SKY2_HW_VLAN_BROKEN 0x00000200
+#define SKY2_HW_RSS_CHKSUM 0x00000400 /* RSS requires chksum */
u8 chip_id;
u8 chip_rev;
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index ab9e3b785b5..0a0a6643cf3 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -297,7 +297,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
lcs = cs;
cs = cs->next;
comp->sls_o_searches++;
- };
+ }
/*
* Didn't find it -- re-use oldest cstate. Send an
* uncompressed packet that tells the other side what
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 8ec1a9a0bb9..4c617534f93 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -182,11 +182,11 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
#ifdef SL_INCLUDE_CSLIP
cbuff = xchg(&sl->cbuff, cbuff);
slcomp = xchg(&sl->slcomp, slcomp);
+#endif
#ifdef CONFIG_SLIP_MODE_SLIP6
sl->xdata = 0;
sl->xbits = 0;
#endif
-#endif
spin_unlock_bh(&sl->lock);
err = 0;
@@ -194,8 +194,7 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
err_exit:
#ifdef SL_INCLUDE_CSLIP
kfree(cbuff);
- if (slcomp)
- slhc_free(slcomp);
+ slhc_free(slcomp);
#endif
kfree(xbuff);
kfree(rbuff);
@@ -248,7 +247,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
#else
if (xbuff == NULL || rbuff == NULL) {
#endif
- if (mtu >= sl->mtu) {
+ if (mtu > sl->mtu) {
printk(KERN_WARNING "%s: unable to grow slip buffers, MTU change cancelled.\n",
dev->name);
err = -ENOBUFS;
@@ -368,7 +367,7 @@ static void sl_bump(struct slip *sl)
memcpy(skb_put(skb, count), sl->rbuff, count);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IP);
- netif_rx(skb);
+ netif_rx_ni(skb);
dev->stats.rx_packets++;
}
@@ -724,12 +723,10 @@ static void sl_sync(void)
static struct slip *sl_alloc(dev_t line)
{
int i;
+ char name[IFNAMSIZ];
struct net_device *dev = NULL;
struct slip *sl;
- if (slip_devs == NULL)
- return NULL; /* Master array missing ! */
-
for (i = 0; i < slip_maxdev; i++) {
dev = slip_devs[i];
if (dev == NULL)
@@ -739,25 +736,12 @@ static struct slip *sl_alloc(dev_t line)
if (i >= slip_maxdev)
return NULL;
- if (dev) {
- sl = netdev_priv(dev);
- if (test_bit(SLF_INUSE, &sl->flags)) {
- unregister_netdevice(dev);
- dev = NULL;
- slip_devs[i] = NULL;
- }
- }
-
- if (!dev) {
- char name[IFNAMSIZ];
- sprintf(name, "sl%d", i);
-
- dev = alloc_netdev(sizeof(*sl), name, sl_setup);
- if (!dev)
- return NULL;
- dev->base_addr = i;
- }
+ sprintf(name, "sl%d", i);
+ dev = alloc_netdev(sizeof(*sl), name, sl_setup);
+ if (!dev)
+ return NULL;
+ dev->base_addr = i;
sl = netdev_priv(dev);
/* Initialize channel control data */
@@ -823,7 +807,6 @@ static int slip_open(struct tty_struct *tty)
sl->tty = tty;
tty->disc_data = sl;
- sl->line = tty_devnum(tty);
sl->pid = current->pid;
if (!test_bit(SLF_INUSE, &sl->flags)) {
@@ -890,8 +873,6 @@ static void slip_close(struct tty_struct *tty)
tty->disc_data = NULL;
sl->tty = NULL;
- if (!sl->leased)
- sl->line = 0;
/* VSV = very important to remove timers */
#ifdef CONFIG_SLIP_SMART
diff --git a/drivers/net/slip.h b/drivers/net/slip.h
index 914e958abbf..aa0764ce234 100644
--- a/drivers/net/slip.h
+++ b/drivers/net/slip.h
@@ -90,7 +90,6 @@ struct slip {
unsigned char mode; /* SLIP mode */
unsigned char leased;
- dev_t line;
pid_t pid;
#define SL_MODE_SLIP 0
#define SL_MODE_CSLIP 1
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index d07c39cb4da..34934fb23b9 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -42,6 +42,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 235a3c6c9f9..ba44ede2919 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -62,6 +62,7 @@ static const char version[] =
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/isapnp.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 053863aefb1..a91fe172302 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1351,11 +1351,6 @@ static void smc911x_set_multicast_list(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
u32 position;
- /* make sure this is a multicast address -
- shouldn't this be a given if we have it here ? */
- if (!(*ha->addr & 1))
- continue;
-
/* upper 6 bits are used as hash index */
position = ether_crc(ETH_ALEN, ha->addr)>>26;
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 7486d090806..5b65ac4b3ce 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -447,11 +447,6 @@ static void smc_setmulticast(int ioaddr, struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
int position;
- /* make sure this is a multicast address - shouldn't this
- be a given if we have it here ? */
- if (!(*ha->addr & 1))
- continue;
-
/* only use the low order bits */
position = ether_crc_le(6, ha->addr) & 0x3f;
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index f6285748bd3..2b1d254d59a 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1425,11 +1425,6 @@ static void smc_set_multicast_list(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
int position;
- /* make sure this is a multicast address -
- shouldn't this be a given if we have it here ? */
- if (!(*ha->addr & 1))
- continue;
-
/* only use the low order bits */
position = crc32_le(~0, ha->addr, 6) & 0x3f;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index c6d47d10590..b9016a30cdc 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -37,6 +37,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -1473,6 +1474,7 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
freespace -= (skb->len + 32);
+ skb_tx_timestamp(skb);
dev_kfree_skb(skb);
if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 4c92ad8be76..459726f5475 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -19,6 +19,7 @@
***************************************************************************
*/
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
@@ -1030,6 +1031,8 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
pd->tx_ring[index].status = TDES0_OWN_;
wmb();
+ skb_tx_timestamp(skb);
+
/* kick the DMA */
smsc9420_reg_write(pd, TX_POLL_DEMAND, 1);
smsc9420_pci_flush_write(pd);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 949f124e127..1ff3491c824 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -31,6 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/gfp.h>
#include <linux/ioport.h>
#include <linux/ip.h>
@@ -1003,9 +1004,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
}
if (data_status & SPIDER_NET_VLAN_PACKET) {
- /* further enhancements: HW-accel VLAN
- * vlan_hwaccel_receive_skb
- */
+ /* further enhancements: HW-accel VLAN */
}
/* update netdevice statistics */
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 36045f3b032..7ae1f990a98 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -30,6 +30,7 @@
#define DRV_VERSION "2.1"
#define DRV_RELDATE "July 6, 2008"
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -560,7 +561,7 @@ struct netdev_private {
struct net_device *dev;
struct pci_dev *pci_dev;
#ifdef VLAN_SUPPORT
- struct vlan_group *vlgrp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
#endif
void *queue_mem;
dma_addr_t queue_mem_dma;
@@ -606,18 +607,6 @@ static const struct ethtool_ops ethtool_ops;
#ifdef VLAN_SUPPORT
-static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct netdev_private *np = netdev_priv(dev);
-
- spin_lock(&np->lock);
- if (debug > 2)
- printk("%s: Setting vlgrp to %p\n", dev->name, grp);
- np->vlgrp = grp;
- set_rx_mode(dev);
- spin_unlock(&np->lock);
-}
-
static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct netdev_private *np = netdev_priv(dev);
@@ -625,6 +614,7 @@ static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
spin_lock(&np->lock);
if (debug > 1)
printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
+ set_bit(vid, np->active_vlans);
set_rx_mode(dev);
spin_unlock(&np->lock);
}
@@ -636,7 +626,7 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
spin_lock(&np->lock);
if (debug > 1)
printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
- vlan_group_set_device(np->vlgrp, vid, NULL);
+ clear_bit(vid, np->active_vlans);
set_rx_mode(dev);
spin_unlock(&np->lock);
}
@@ -647,15 +637,14 @@ static const struct net_device_ops netdev_ops = {
.ndo_open = netdev_open,
.ndo_stop = netdev_close,
.ndo_start_xmit = start_tx,
- .ndo_tx_timeout = tx_timeout,
- .ndo_get_stats = get_stats,
+ .ndo_tx_timeout = tx_timeout,
+ .ndo_get_stats = get_stats,
.ndo_set_multicast_list = &set_rx_mode,
- .ndo_do_ioctl = netdev_ioctl,
+ .ndo_do_ioctl = netdev_ioctl,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef VLAN_SUPPORT
- .ndo_vlan_rx_register = netdev_vlan_rx_register,
.ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
#endif
@@ -1527,21 +1516,17 @@ static int __netdev_rx(struct net_device *dev, int *quota)
printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
}
#ifdef VLAN_SUPPORT
- if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
+ if (le16_to_cpu(desc->status2) & 0x0200) {
u16 vlid = le16_to_cpu(desc->vlanid);
if (debug > 4) {
printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
vlid);
}
- /*
- * vlan_hwaccel_rx expects a packet with the VLAN tag
- * stripped out.
- */
- vlan_hwaccel_rx(skb, np->vlgrp, vlid);
- } else
+ __vlan_hwaccel_put_tag(skb, vlid);
+ }
#endif /* VLAN_SUPPORT */
- netif_receive_skb(skb);
+ netif_receive_skb(skb);
dev->stats.rx_packets++;
next_rx:
@@ -1751,6 +1736,32 @@ static struct net_device_stats *get_stats(struct net_device *dev)
return &dev->stats;
}
+#ifdef VLAN_SUPPORT
+static u32 set_vlan_mode(struct netdev_private *np)
+{
+ u32 ret = VlanMode;
+ u16 vid;
+ void __iomem *filter_addr = np->base + HashTable + 8;
+ int vlan_count = 0;
+
+ for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
+ if (vlan_count == 32)
+ break;
+ writew(vid, filter_addr);
+ filter_addr += 16;
+ vlan_count++;
+ }
+ if (vlan_count == 32) {
+ ret |= PerfectFilterVlan;
+ while (vlan_count < 32) {
+ writew(0, filter_addr);
+ filter_addr += 16;
+ vlan_count++;
+ }
+ }
+ return ret;
+}
+#endif /* VLAN_SUPPORT */
static void set_rx_mode(struct net_device *dev)
{
@@ -1759,30 +1770,9 @@ static void set_rx_mode(struct net_device *dev)
u32 rx_mode = MinVLANPrio;
struct netdev_hw_addr *ha;
int i;
-#ifdef VLAN_SUPPORT
- rx_mode |= VlanMode;
- if (np->vlgrp) {
- int vlan_count = 0;
- void __iomem *filter_addr = ioaddr + HashTable + 8;
- for (i = 0; i < VLAN_VID_MASK; i++) {
- if (vlan_group_get_device(np->vlgrp, i)) {
- if (vlan_count >= 32)
- break;
- writew(i, filter_addr);
- filter_addr += 16;
- vlan_count++;
- }
- }
- if (i == VLAN_VID_MASK) {
- rx_mode |= PerfectFilterVlan;
- while (vlan_count < 32) {
- writew(0, filter_addr);
- filter_addr += 16;
- vlan_count++;
- }
- }
- }
+#ifdef VLAN_SUPPORT
+ rx_mode |= set_vlan_mode(np);
#endif /* VLAN_SUPPORT */
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index f20455cbfbb..0f63b3c83c1 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -28,6 +28,7 @@
#include <linux/crc32.h>
#include <linux/slab.h>
+#include <asm/io.h>
#include "dwmac1000.h"
static void dwmac1000_core_init(void __iomem *ioaddr)
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 2c47712d45d..3dbeea61908 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -26,6 +26,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <asm/io.h>
#include "dwmac1000.h"
#include "dwmac_dma.h"
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
index c724fc36a24..743a5801763 100644
--- a/drivers/net/stmmac/dwmac100_core.c
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -29,6 +29,7 @@
*******************************************************************************/
#include <linux/crc32.h>
+#include <asm/io.h>
#include "dwmac100.h"
static void dwmac100_core_init(void __iomem *ioaddr)
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
index e3e224b7d9e..627f656b0f3 100644
--- a/drivers/net/stmmac/dwmac100_dma.c
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -28,6 +28,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <asm/io.h>
#include "dwmac100.h"
#include "dwmac_dma.h"
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 2b076b31362..de1929b2641 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Nov_2010"
+#define DRV_MODULE_VERSION "July_2011"
#include <linux/stmmac.h>
#include "common.h"
@@ -56,14 +56,9 @@ struct stmmac_priv {
struct stmmac_extra_stats xstats;
struct napi_struct napi;
- phy_interface_t phy_interface;
- int phy_addr;
- int phy_mask;
- int (*phy_reset) (void *priv);
int rx_coe;
int no_csum_insertion;
- int phy_irq;
struct phy_device *phydev;
int oldlink;
int speed;
@@ -71,6 +66,7 @@ struct stmmac_priv {
unsigned int flow_ctrl;
unsigned int pause;
struct mii_bus *mii;
+ int mii_irq[PHY_MAX_ADDR];
u32 msg_enable;
spinlock_t lock;
@@ -79,9 +75,6 @@ struct stmmac_priv {
#ifdef CONFIG_STMMAC_TIMER
struct stmmac_timer *tm;
#endif
-#ifdef STMMAC_VLAN_TAG_USED
- struct vlan_group *vlgrp;
-#endif
struct plat_stmmacenet_data *plat;
};
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index ae5213a8c4c..7ed8fb6c211 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -24,8 +24,10 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/interrupt.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <asm/io.h>
#include "stmmac.h"
#include "dwmac_dma.h"
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index e25e44a45c2..c6e567e04ef 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -49,7 +49,6 @@
#include "stmmac.h"
#define STMMAC_RESOURCE_NAME "stmmaceth"
-#define PHY_RESOURCE_NAME "stmmacphy"
#undef STMMAC_DEBUG
/*#define STMMAC_DEBUG*/
@@ -305,18 +304,13 @@ static int stmmac_init_phy(struct net_device *dev)
priv->speed = 0;
priv->oldduplex = -1;
- if (priv->phy_addr == -1) {
- /* We don't have a PHY, so do nothing */
- return 0;
- }
-
snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
- priv->phy_addr);
+ priv->plat->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
- priv->phy_interface);
+ priv->plat->interface);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -335,7 +329,7 @@ static int stmmac_init_phy(struct net_device *dev)
return -ENODEV;
}
pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
- " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
+ " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
priv->phydev = phydev;
@@ -557,9 +551,11 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
- if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
- /* In case of GMAC, SF mode has to be enabled
- * to perform the TX COE. This depends on:
+ if (likely(priv->plat->force_sf_dma_mode ||
+ ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
+ /*
+ * In case of GMAC, SF mode can be enabled
+ * to perform the TX COE in HW. This depends on:
* 1) TX COE if actually supported
* 2) There is no bugged Jumbo frame support
* that needs to not insert csum in the TDES.
@@ -1045,6 +1041,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
len, DMA_TO_DEVICE);
priv->tx_skbuff[entry] = NULL;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ wmb();
priv->hw->desc->set_tx_owner(desc);
}
@@ -1056,6 +1053,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(priv->tm->enable))
priv->hw->desc->clear_tx_ic(desc);
#endif
+
+ wmb();
+
/* To avoid raise condition */
priv->hw->desc->set_tx_owner(first);
@@ -1079,6 +1079,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
+ skb_tx_timestamp(skb);
+
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
return NETDEV_TX_OK;
@@ -1116,6 +1118,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
}
+ wmb();
priv->hw->desc->set_rx_owner(p + entry);
}
}
@@ -1412,20 +1415,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return ret;
}
-#ifdef STMMAC_VLAN_TAG_USED
-static void stmmac_vlan_rx_register(struct net_device *dev,
- struct vlan_group *grp)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- DBG(probe, INFO, "%s: Setting vlgrp to %p\n", dev->name, grp);
-
- spin_lock(&priv->lock);
- priv->vlgrp = grp;
- spin_unlock(&priv->lock);
-}
-#endif
-
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
.ndo_start_xmit = stmmac_xmit,
@@ -1436,9 +1425,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
.ndo_set_config = stmmac_config,
-#ifdef STMMAC_VLAN_TAG_USED
- .ndo_vlan_rx_register = stmmac_vlan_rx_register,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
@@ -1536,71 +1522,6 @@ static int stmmac_mac_device_setup(struct net_device *dev)
return 0;
}
-static int stmmacphy_dvr_probe(struct platform_device *pdev)
-{
- struct plat_stmmacphy_data *plat_dat = pdev->dev.platform_data;
-
- pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
- plat_dat->bus_id);
-
- return 0;
-}
-
-static int stmmacphy_dvr_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static struct platform_driver stmmacphy_driver = {
- .driver = {
- .name = PHY_RESOURCE_NAME,
- },
- .probe = stmmacphy_dvr_probe,
- .remove = stmmacphy_dvr_remove,
-};
-
-/**
- * stmmac_associate_phy
- * @dev: pointer to device structure
- * @data: points to the private structure.
- * Description: Scans through all the PHYs we have registered and checks if
- * any are associated with our MAC. If so, then just fill in
- * the blanks in our local context structure
- */
-static int stmmac_associate_phy(struct device *dev, void *data)
-{
- struct stmmac_priv *priv = (struct stmmac_priv *)data;
- struct plat_stmmacphy_data *plat_dat = dev->platform_data;
-
- DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
- plat_dat->bus_id);
-
- /* Check that this phy is for the MAC being initialised */
- if (priv->plat->bus_id != plat_dat->bus_id)
- return 0;
-
- /* OK, this PHY is connected to the MAC.
- Go ahead and get the parameters */
- DBG(probe, DEBUG, "%s: OK. Found PHY config\n", __func__);
- priv->phy_irq =
- platform_get_irq_byname(to_platform_device(dev), "phyirq");
- DBG(probe, DEBUG, "%s: PHY irq on bus %d is %d\n", __func__,
- plat_dat->bus_id, priv->phy_irq);
-
- /* Override with kernel parameters if supplied XXX CRS XXX
- * this needs to have multiple instances */
- if ((phyaddr >= 0) && (phyaddr <= 31))
- plat_dat->phy_addr = phyaddr;
-
- priv->phy_addr = plat_dat->phy_addr;
- priv->phy_mask = plat_dat->phy_mask;
- priv->phy_interface = plat_dat->interface;
- priv->phy_reset = plat_dat->phy_reset;
-
- DBG(probe, DEBUG, "%s: exiting\n", __func__);
- return 1; /* forces exit of driver_for_each_device() */
-}
-
/**
* stmmac_dvr_probe
* @pdev: platform device pointer
@@ -1691,14 +1612,10 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
if (ret < 0)
goto out_plat_exit;
- /* associate a PHY - it is provided by another platform bus */
- if (!driver_for_each_device
- (&(stmmacphy_driver.driver), NULL, (void *)priv,
- stmmac_associate_phy)) {
- pr_err("No PHY device is associated with this MAC!\n");
- ret = -ENODEV;
- goto out_unregister;
- }
+ /* Override with kernel parameters if supplied XXX CRS XXX
+ * this needs to have multiple instances */
+ if ((phyaddr >= 0) && (phyaddr <= 31))
+ priv->plat->phy_addr = phyaddr;
pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
"\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
@@ -1898,11 +1815,6 @@ static int __init stmmac_init_module(void)
{
int ret;
- if (platform_driver_register(&stmmacphy_driver)) {
- pr_err("No PHY devices registered!\n");
- return -ENODEV;
- }
-
ret = platform_driver_register(&stmmac_driver);
return ret;
}
@@ -1913,7 +1825,6 @@ static int __init stmmac_init_module(void)
*/
static void __exit stmmac_cleanup_module(void)
{
- platform_driver_unregister(&stmmacphy_driver);
platform_driver_unregister(&stmmac_driver);
}
@@ -1925,33 +1836,52 @@ static int __init stmmac_cmdline_opt(char *str)
if (!str || !*str)
return -EINVAL;
while ((opt = strsep(&str, ",")) != NULL) {
- if (!strncmp(opt, "debug:", 6))
- strict_strtoul(opt + 6, 0, (unsigned long *)&debug);
- else if (!strncmp(opt, "phyaddr:", 8))
- strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr);
- else if (!strncmp(opt, "dma_txsize:", 11))
- strict_strtoul(opt + 11, 0,
- (unsigned long *)&dma_txsize);
- else if (!strncmp(opt, "dma_rxsize:", 11))
- strict_strtoul(opt + 11, 0,
- (unsigned long *)&dma_rxsize);
- else if (!strncmp(opt, "buf_sz:", 7))
- strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
- else if (!strncmp(opt, "tc:", 3))
- strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
- else if (!strncmp(opt, "watchdog:", 9))
- strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
- else if (!strncmp(opt, "flow_ctrl:", 10))
- strict_strtoul(opt + 10, 0,
- (unsigned long *)&flow_ctrl);
- else if (!strncmp(opt, "pause:", 6))
- strict_strtoul(opt + 6, 0, (unsigned long *)&pause);
+ if (!strncmp(opt, "debug:", 6)) {
+ if (strict_strtoul(opt + 6, 0, (unsigned long *)&debug))
+ goto err;
+ } else if (!strncmp(opt, "phyaddr:", 8)) {
+ if (strict_strtoul(opt + 8, 0,
+ (unsigned long *)&phyaddr))
+ goto err;
+ } else if (!strncmp(opt, "dma_txsize:", 11)) {
+ if (strict_strtoul(opt + 11, 0,
+ (unsigned long *)&dma_txsize))
+ goto err;
+ } else if (!strncmp(opt, "dma_rxsize:", 11)) {
+ if (strict_strtoul(opt + 11, 0,
+ (unsigned long *)&dma_rxsize))
+ goto err;
+ } else if (!strncmp(opt, "buf_sz:", 7)) {
+ if (strict_strtoul(opt + 7, 0,
+ (unsigned long *)&buf_sz))
+ goto err;
+ } else if (!strncmp(opt, "tc:", 3)) {
+ if (strict_strtoul(opt + 3, 0, (unsigned long *)&tc))
+ goto err;
+ } else if (!strncmp(opt, "watchdog:", 9)) {
+ if (strict_strtoul(opt + 9, 0,
+ (unsigned long *)&watchdog))
+ goto err;
+ } else if (!strncmp(opt, "flow_ctrl:", 10)) {
+ if (strict_strtoul(opt + 10, 0,
+ (unsigned long *)&flow_ctrl))
+ goto err;
+ } else if (!strncmp(opt, "pause:", 6)) {
+ if (strict_strtoul(opt + 6, 0, (unsigned long *)&pause))
+ goto err;
#ifdef CONFIG_STMMAC_TIMER
- else if (!strncmp(opt, "tmrate:", 7))
- strict_strtoul(opt + 7, 0, (unsigned long *)&tmrate);
+ } else if (!strncmp(opt, "tmrate:", 7)) {
+ if (strict_strtoul(opt + 7, 0,
+ (unsigned long *)&tmrate))
+ goto err;
#endif
+ }
}
return 0;
+
+err:
+ pr_err("%s: ERROR broken module parameter conversion", __func__);
+ return -EINVAL;
}
__setup("stmmaceth=", stmmac_cmdline_opt);
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 234b4068a1f..9c3b9d5c341 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -27,6 +27,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/slab.h>
+#include <asm/io.h>
#include "stmmac.h"
@@ -112,9 +113,9 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
- if (priv->phy_reset) {
+ if (priv->plat->mdio_bus_data->phy_reset) {
pr_debug("stmmac_mdio_reset: calling phy_reset\n");
- priv->phy_reset(priv->plat->bsp_priv);
+ priv->plat->mdio_bus_data->phy_reset(priv->plat->bsp_priv);
}
/* This is a workaround for problems with the STE101P PHY.
@@ -137,30 +138,29 @@ int stmmac_mdio_register(struct net_device *ndev)
struct mii_bus *new_bus;
int *irqlist;
struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
int addr, found;
+ if (!mdio_bus_data)
+ return 0;
+
new_bus = mdiobus_alloc();
if (new_bus == NULL)
return -ENOMEM;
- irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
- if (irqlist == NULL) {
- err = -ENOMEM;
- goto irqlist_alloc_fail;
- }
-
- /* Assign IRQ to phy at address phy_addr */
- if (priv->phy_addr != -1)
- irqlist[priv->phy_addr] = priv->phy_irq;
+ if (mdio_bus_data->irqs)
+ irqlist = mdio_bus_data->irqs;
+ else
+ irqlist = priv->mii_irq;
new_bus->name = "STMMAC MII Bus";
new_bus->read = &stmmac_mdio_read;
new_bus->write = &stmmac_mdio_write;
new_bus->reset = &stmmac_mdio_reset;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", mdio_bus_data->bus_id);
new_bus->priv = ndev;
new_bus->irq = irqlist;
- new_bus->phy_mask = priv->phy_mask;
+ new_bus->phy_mask = mdio_bus_data->phy_mask;
new_bus->parent = priv->device;
err = mdiobus_register(new_bus);
if (err != 0) {
@@ -171,18 +171,50 @@ int stmmac_mdio_register(struct net_device *ndev)
priv->mii = new_bus;
found = 0;
- for (addr = 0; addr < 32; addr++) {
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
struct phy_device *phydev = new_bus->phy_map[addr];
if (phydev) {
- if (priv->phy_addr == -1) {
- priv->phy_addr = addr;
- phydev->irq = priv->phy_irq;
- irqlist[addr] = priv->phy_irq;
+ int act = 0;
+ char irq_num[4];
+ char *irq_str;
+
+ /*
+ * If an IRQ was provided to be assigned after
+ * the bus probe, do it here.
+ */
+ if ((mdio_bus_data->irqs == NULL) &&
+ (mdio_bus_data->probed_phy_irq > 0)) {
+ irqlist[addr] = mdio_bus_data->probed_phy_irq;
+ phydev->irq = mdio_bus_data->probed_phy_irq;
}
- pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n",
- ndev->name, phydev->phy_id, addr,
- phydev->irq, dev_name(&phydev->dev),
- (addr == priv->phy_addr) ? " active" : "");
+
+ /*
+ * If we're going to bind the MAC to this PHY bus,
+ * and no PHY number was provided to the MAC,
+ * use the one probed here.
+ */
+ if ((priv->plat->bus_id == mdio_bus_data->bus_id) &&
+ (priv->plat->phy_addr == -1))
+ priv->plat->phy_addr = addr;
+
+ act = (priv->plat->bus_id == mdio_bus_data->bus_id) &&
+ (priv->plat->phy_addr == addr);
+ switch (phydev->irq) {
+ case PHY_POLL:
+ irq_str = "POLL";
+ break;
+ case PHY_IGNORE_INTERRUPT:
+ irq_str = "IGNORE";
+ break;
+ default:
+ sprintf(irq_num, "%d", phydev->irq);
+ irq_str = irq_num;
+ break;
+ }
+ pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n",
+ ndev->name, phydev->phy_id, addr,
+ irq_str, dev_name(&phydev->dev),
+ act ? " active" : "");
found = 1;
}
}
@@ -191,10 +223,9 @@ int stmmac_mdio_register(struct net_device *ndev)
pr_warning("%s: No PHY found\n", ndev->name);
return 0;
+
bus_register_fail:
- kfree(irqlist);
-irqlist_alloc_fail:
- kfree(new_bus);
+ mdiobus_free(new_bus);
return err;
}
@@ -209,7 +240,8 @@ int stmmac_mdio_unregister(struct net_device *ndev)
mdiobus_unregister(priv->mii);
priv->mii->priv = NULL;
- kfree(priv->mii);
+ mdiobus_free(priv->mii);
+ priv->mii = NULL;
return 0;
}
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index aa4765803a4..297a4242106 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -998,7 +998,6 @@ static void bigmac_set_multicast(struct net_device *dev)
struct bigmac *bp = netdev_priv(dev);
void __iomem *bregs = bp->bregs;
struct netdev_hw_addr *ha;
- char *addrs;
int i;
u32 tmp, crc;
@@ -1027,12 +1026,7 @@ static void bigmac_set_multicast(struct net_device *dev)
hash_table[i] = 0;
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc_le(6, ha->addr);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index ab593009926..ade35dde5b5 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -10,25 +10,6 @@
* NAPI and NETPOLL support
* (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
*
- * TODO:
- * - Now that the driver was significantly simplified, I need to rework
- * the locking. I'm sure we don't need _2_ spinlocks, and we probably
- * can avoid taking most of them for so long period of time (and schedule
- * instead). The main issues at this point are caused by the netdev layer
- * though:
- *
- * gem_change_mtu() and gem_set_multicast() are called with a read_lock()
- * help by net/core/dev.c, thus they can't schedule. That means they can't
- * call napi_disable() neither, thus force gem_poll() to keep a spinlock
- * where it could have been dropped. change_mtu especially would love also to
- * be able to msleep instead of horrid locked delays when resetting the HW,
- * but that read_lock() makes it impossible, unless I defer it's action to
- * the reset task, which means it'll be asynchronous (won't take effect until
- * the system schedules a bit).
- *
- * Also, it would probably be possible to also remove most of the long-life
- * locking in open/resume code path (gem_reinit_chip) by beeing more careful
- * about when we can start taking interrupts or get xmit() called...
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -57,7 +38,6 @@
#include <linux/workqueue.h>
#include <linux/if_vlan.h>
#include <linux/bitops.h>
-#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/gfp.h>
@@ -95,12 +75,11 @@
SUPPORTED_Pause | SUPPORTED_Autoneg)
#define DRV_NAME "sungem"
-#define DRV_VERSION "0.98"
-#define DRV_RELDATE "8/24/03"
-#define DRV_AUTHOR "David S. Miller (davem@redhat.com)"
+#define DRV_VERSION "1.0"
+#define DRV_AUTHOR "David S. Miller <davem@redhat.com>"
static char version[] __devinitdata =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
@@ -218,6 +197,7 @@ static inline void gem_disable_ints(struct gem *gp)
{
/* Disable all interrupts, including TXDONE */
writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
+ (void)readl(gp->regs + GREG_IMASK); /* write posting */
}
static void gem_get_cell(struct gem *gp)
@@ -247,6 +227,29 @@ static void gem_put_cell(struct gem *gp)
#endif /* CONFIG_PPC_PMAC */
}
+static inline void gem_netif_stop(struct gem *gp)
+{
+ gp->dev->trans_start = jiffies; /* prevent tx timeout */
+ napi_disable(&gp->napi);
+ netif_tx_disable(gp->dev);
+}
+
+static inline void gem_netif_start(struct gem *gp)
+{
+ /* NOTE: unconditional netif_wake_queue is only
+ * appropriate so long as all callers are assured to
+ * have free tx slots.
+ */
+ netif_wake_queue(gp->dev);
+ napi_enable(&gp->napi);
+}
+
+static void gem_schedule_reset(struct gem *gp)
+{
+ gp->reset_task_pending = 1;
+ schedule_work(&gp->reset_task);
+}
+
static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
{
if (netif_msg_intr(gp))
@@ -604,56 +607,46 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
gp->dev->name);
dev->stats.rx_errors++;
- goto do_reset;
+ return 1;
}
if (gem_status & GREG_STAT_PCS) {
if (gem_pcs_interrupt(dev, gp, gem_status))
- goto do_reset;
+ return 1;
}
if (gem_status & GREG_STAT_TXMAC) {
if (gem_txmac_interrupt(dev, gp, gem_status))
- goto do_reset;
+ return 1;
}
if (gem_status & GREG_STAT_RXMAC) {
if (gem_rxmac_interrupt(dev, gp, gem_status))
- goto do_reset;
+ return 1;
}
if (gem_status & GREG_STAT_MAC) {
if (gem_mac_interrupt(dev, gp, gem_status))
- goto do_reset;
+ return 1;
}
if (gem_status & GREG_STAT_MIF) {
if (gem_mif_interrupt(dev, gp, gem_status))
- goto do_reset;
+ return 1;
}
if (gem_status & GREG_STAT_PCIERR) {
if (gem_pci_interrupt(dev, gp, gem_status))
- goto do_reset;
+ return 1;
}
return 0;
-
-do_reset:
- gp->reset_task_pending = 1;
- schedule_work(&gp->reset_task);
-
- return 1;
}
static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
{
int entry, limit;
- if (netif_msg_intr(gp))
- printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",
- gp->dev->name, gem_status);
-
entry = gp->tx_old;
limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
while (entry != limit) {
@@ -697,13 +690,27 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
}
dev->stats.tx_packets++;
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb(skb);
}
gp->tx_old = entry;
- if (netif_queue_stopped(dev) &&
- TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
- netif_wake_queue(dev);
+ /* Need to make the tx_old update visible to gem_start_xmit()
+ * before checking for netif_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that gem_start_xmit()
+ * will miss it and cause the queue to be stopped forever.
+ */
+ smp_mb();
+
+ if (unlikely(netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
+ netif_wake_queue(dev);
+ __netif_tx_unlock(txq);
+ }
}
static __inline__ void gem_post_rxds(struct gem *gp, int limit)
@@ -736,6 +743,21 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
}
}
+#define ALIGNED_RX_SKB_ADDR(addr) \
+ ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
+static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size,
+ gfp_t gfp_flags)
+{
+ struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
+
+ if (likely(skb)) {
+ unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
+ skb_reserve(skb, offset);
+ skb->dev = dev;
+ }
+ return skb;
+}
+
static int gem_rx(struct gem *gp, int work_to_do)
{
struct net_device *dev = gp->dev;
@@ -799,7 +821,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
if (len > RX_COPY_THRESHOLD) {
struct sk_buff *new_skb;
- new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
+ new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
if (new_skb == NULL) {
drops++;
goto drop_it;
@@ -808,7 +830,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
RX_BUF_ALLOC_SIZE(gp),
PCI_DMA_FROMDEVICE);
gp->rx_skbs[entry] = new_skb;
- new_skb->dev = gp->dev;
skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
virt_to_page(new_skb->data),
@@ -820,7 +841,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
/* Trim the original skb for the netif. */
skb_trim(skb, len);
} else {
- struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
+ struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
if (copy_skb == NULL) {
drops++;
@@ -842,7 +863,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
skb->ip_summed = CHECKSUM_COMPLETE;
skb->protocol = eth_type_trans(skb, gp->dev);
- netif_receive_skb(skb);
+ napi_gro_receive(&gp->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
@@ -865,28 +886,32 @@ static int gem_poll(struct napi_struct *napi, int budget)
{
struct gem *gp = container_of(napi, struct gem, napi);
struct net_device *dev = gp->dev;
- unsigned long flags;
int work_done;
- /*
- * NAPI locking nightmare: See comment at head of driver
- */
- spin_lock_irqsave(&gp->lock, flags);
-
work_done = 0;
do {
/* Handle anomalies */
- if (gp->status & GREG_STAT_ABNORMAL) {
- if (gem_abnormal_irq(dev, gp, gp->status))
- break;
+ if (unlikely(gp->status & GREG_STAT_ABNORMAL)) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ int reset;
+
+ /* We run the abnormal interrupt handling code with
+ * the Tx lock. It only resets the Rx portion of the
+ * chip, but we need to guard it against DMA being
+ * restarted by the link poll timer
+ */
+ __netif_tx_lock(txq, smp_processor_id());
+ reset = gem_abnormal_irq(dev, gp, gp->status);
+ __netif_tx_unlock(txq);
+ if (reset) {
+ gem_schedule_reset(gp);
+ napi_complete(napi);
+ return work_done;
+ }
}
/* Run TX completion thread */
- spin_lock(&gp->tx_lock);
gem_tx(dev, gp, gp->status);
- spin_unlock(&gp->tx_lock);
-
- spin_unlock_irqrestore(&gp->lock, flags);
/* Run RX thread. We don't use any locking here,
* code willing to do bad things - like cleaning the
@@ -898,16 +923,12 @@ static int gem_poll(struct napi_struct *napi, int budget)
if (work_done >= budget)
return work_done;
- spin_lock_irqsave(&gp->lock, flags);
-
gp->status = readl(gp->regs + GREG_STAT);
} while (gp->status & GREG_STAT_NAPI);
- __napi_complete(napi);
+ napi_complete(napi);
gem_enable_ints(gp);
- spin_unlock_irqrestore(&gp->lock, flags);
-
return work_done;
}
@@ -915,32 +936,23 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct gem *gp = netdev_priv(dev);
- unsigned long flags;
-
- /* Swallow interrupts when shutting the chip down, though
- * that shouldn't happen, we should have done free_irq() at
- * this point...
- */
- if (!gp->running)
- return IRQ_HANDLED;
-
- spin_lock_irqsave(&gp->lock, flags);
if (napi_schedule_prep(&gp->napi)) {
u32 gem_status = readl(gp->regs + GREG_STAT);
- if (gem_status == 0) {
+ if (unlikely(gem_status == 0)) {
napi_enable(&gp->napi);
- spin_unlock_irqrestore(&gp->lock, flags);
return IRQ_NONE;
}
+ if (netif_msg_intr(gp))
+ printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n",
+ gp->dev->name, gem_status);
+
gp->status = gem_status;
gem_disable_ints(gp);
__napi_schedule(&gp->napi);
}
- spin_unlock_irqrestore(&gp->lock, flags);
-
/* If polling was disabled at the time we received that
* interrupt, we may return IRQ_HANDLED here while we
* should return IRQ_NONE. No big deal...
@@ -951,10 +963,11 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gem_poll_controller(struct net_device *dev)
{
- /* gem_interrupt is safe to reentrance so no need
- * to disable_irq here.
- */
- gem_interrupt(dev->irq, dev);
+ struct gem *gp = netdev_priv(dev);
+
+ disable_irq(gp->pdev->irq);
+ gem_interrupt(gp->pdev->irq, dev);
+ enable_irq(gp->pdev->irq);
}
#endif
@@ -963,10 +976,7 @@ static void gem_tx_timeout(struct net_device *dev)
struct gem *gp = netdev_priv(dev);
netdev_err(dev, "transmit timed out, resetting\n");
- if (!gp->running) {
- netdev_err(dev, "hrm.. hw not running !\n");
- return;
- }
+
netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
readl(gp->regs + TXDMA_CFG),
readl(gp->regs + MAC_TXSTAT),
@@ -976,14 +986,7 @@ static void gem_tx_timeout(struct net_device *dev)
readl(gp->regs + MAC_RXSTAT),
readl(gp->regs + MAC_RXCFG));
- spin_lock_irq(&gp->lock);
- spin_lock(&gp->tx_lock);
-
- gp->reset_task_pending = 1;
- schedule_work(&gp->reset_task);
-
- spin_unlock(&gp->tx_lock);
- spin_unlock_irq(&gp->lock);
+ gem_schedule_reset(gp);
}
static __inline__ int gem_intme(int entry)
@@ -1001,7 +1004,6 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
struct gem *gp = netdev_priv(dev);
int entry;
u64 ctrl;
- unsigned long flags;
ctrl = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1013,21 +1015,12 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
(csum_stuff_off << 21));
}
- if (!spin_trylock_irqsave(&gp->tx_lock, flags)) {
- /* Tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
- }
- /* We raced with gem_do_stop() */
- if (!gp->running) {
- spin_unlock_irqrestore(&gp->tx_lock, flags);
- return NETDEV_TX_BUSY;
- }
-
- /* This is a hard error, log it. */
- if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
- netif_stop_queue(dev);
- spin_unlock_irqrestore(&gp->tx_lock, flags);
- netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ /* This is a hard error, log it. */
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ }
return NETDEV_TX_BUSY;
}
@@ -1104,17 +1097,23 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
}
gp->tx_new = entry;
- if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))
+ if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) {
netif_stop_queue(dev);
+ /* netif_stop_queue() must be done before checking
+ * checking tx index in TX_BUFFS_AVAIL() below, because
+ * in gem_tx(), we update tx_old before checking for
+ * netif_queue_stopped().
+ */
+ smp_mb();
+ if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
+ netif_wake_queue(dev);
+ }
if (netif_msg_tx_queued(gp))
printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
dev->name, entry, skb->len);
mb();
writel(gp->tx_new, gp->regs + TXDMA_KICK);
- spin_unlock_irqrestore(&gp->tx_lock, flags);
-
- dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
return NETDEV_TX_OK;
}
@@ -1184,7 +1183,6 @@ static void gem_pcs_reinit_adv(struct gem *gp)
#define STOP_TRIES 32
-/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_reset(struct gem *gp)
{
int limit;
@@ -1213,7 +1211,6 @@ static void gem_reset(struct gem *gp)
gem_pcs_reinit_adv(gp);
}
-/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_start_dma(struct gem *gp)
{
u32 val;
@@ -1236,8 +1233,7 @@ static void gem_start_dma(struct gem *gp)
writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
}
-/* Must be invoked under gp->lock and gp->tx_lock. DMA won't be
- * actually stopped before about 4ms tho ...
+/* DMA won't be actually stopped before about 4ms tho ...
*/
static void gem_stop_dma(struct gem *gp)
{
@@ -1259,7 +1255,6 @@ static void gem_stop_dma(struct gem *gp)
}
-/* Must be invoked under gp->lock and gp->tx_lock. */
// XXX dbl check what that function should do when called on PCS PHY
static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
{
@@ -1319,7 +1314,7 @@ start_aneg:
/* If we are asleep, we don't try to actually setup the PHY, we
* just store the settings
*/
- if (gp->asleep) {
+ if (!netif_device_present(gp->dev)) {
gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
gp->phy_mii.speed = speed;
gp->phy_mii.duplex = duplex;
@@ -1345,13 +1340,12 @@ non_mii:
/* A link-up condition has occurred, initialize and enable the
* rest of the chip.
- *
- * Must be invoked under gp->lock and gp->tx_lock.
*/
static int gem_set_link_modes(struct gem *gp)
{
- u32 val;
+ struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0);
int full_duplex, speed, pause;
+ u32 val;
full_duplex = 0;
speed = SPEED_10;
@@ -1375,8 +1369,11 @@ static int gem_set_link_modes(struct gem *gp)
netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
speed, (full_duplex ? "full" : "half"));
- if (!gp->running)
- return 0;
+
+ /* We take the tx queue lock to avoid collisions between
+ * this code, the tx path and the NAPI-driven error path
+ */
+ __netif_tx_lock(txq, smp_processor_id());
val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
if (full_duplex) {
@@ -1425,18 +1422,6 @@ static int gem_set_link_modes(struct gem *gp)
pause = 1;
}
- if (netif_msg_link(gp)) {
- if (pause) {
- netdev_info(gp->dev,
- "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
- gp->rx_fifo_sz,
- gp->rx_pause_off,
- gp->rx_pause_on);
- } else {
- netdev_info(gp->dev, "Pause is disabled\n");
- }
- }
-
if (!full_duplex)
writel(512, gp->regs + MAC_STIME);
else
@@ -1450,10 +1435,23 @@ static int gem_set_link_modes(struct gem *gp)
gem_start_dma(gp);
+ __netif_tx_unlock(txq);
+
+ if (netif_msg_link(gp)) {
+ if (pause) {
+ netdev_info(gp->dev,
+ "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+ gp->rx_fifo_sz,
+ gp->rx_pause_off,
+ gp->rx_pause_on);
+ } else {
+ netdev_info(gp->dev, "Pause is disabled\n");
+ }
+ }
+
return 0;
}
-/* Must be invoked under gp->lock and gp->tx_lock. */
static int gem_mdio_link_not_up(struct gem *gp)
{
switch (gp->lstate) {
@@ -1501,20 +1499,12 @@ static int gem_mdio_link_not_up(struct gem *gp)
static void gem_link_timer(unsigned long data)
{
struct gem *gp = (struct gem *) data;
+ struct net_device *dev = gp->dev;
int restart_aneg = 0;
- if (gp->asleep)
- return;
-
- spin_lock_irq(&gp->lock);
- spin_lock(&gp->tx_lock);
- gem_get_cell(gp);
-
- /* If the reset task is still pending, we just
- * reschedule the link timer
- */
+ /* There's no point doing anything if we're going to be reset */
if (gp->reset_task_pending)
- goto restart;
+ return;
if (gp->phy_type == phy_serialink ||
gp->phy_type == phy_serdes) {
@@ -1528,7 +1518,7 @@ static void gem_link_timer(unsigned long data)
goto restart;
gp->lstate = link_up;
- netif_carrier_on(gp->dev);
+ netif_carrier_on(dev);
(void)gem_set_link_modes(gp);
}
goto restart;
@@ -1544,12 +1534,12 @@ static void gem_link_timer(unsigned long data)
gp->last_forced_speed = gp->phy_mii.speed;
gp->timer_ticks = 5;
if (netif_msg_link(gp))
- netdev_info(gp->dev,
+ netdev_info(dev,
"Got link after fallback, retrying autoneg once...\n");
gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
} else if (gp->lstate != link_up) {
gp->lstate = link_up;
- netif_carrier_on(gp->dev);
+ netif_carrier_on(dev);
if (gem_set_link_modes(gp))
restart_aneg = 1;
}
@@ -1559,11 +1549,11 @@ static void gem_link_timer(unsigned long data)
*/
if (gp->lstate == link_up) {
gp->lstate = link_down;
- netif_info(gp, link, gp->dev, "Link down\n");
- netif_carrier_off(gp->dev);
- gp->reset_task_pending = 1;
- schedule_work(&gp->reset_task);
- restart_aneg = 1;
+ netif_info(gp, link, dev, "Link down\n");
+ netif_carrier_off(dev);
+ gem_schedule_reset(gp);
+ /* The reset task will restart the timer */
+ return;
} else if (++gp->timer_ticks > 10) {
if (found_mii_phy(gp))
restart_aneg = gem_mdio_link_not_up(gp);
@@ -1573,17 +1563,12 @@ static void gem_link_timer(unsigned long data)
}
if (restart_aneg) {
gem_begin_auto_negotiation(gp, NULL);
- goto out_unlock;
+ return;
}
restart:
mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
-out_unlock:
- gem_put_cell(gp);
- spin_unlock(&gp->tx_lock);
- spin_unlock_irq(&gp->lock);
}
-/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_clean_rings(struct gem *gp)
{
struct gem_init_block *gb = gp->init_block;
@@ -1634,7 +1619,6 @@ static void gem_clean_rings(struct gem *gp)
}
}
-/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_init_rings(struct gem *gp)
{
struct gem_init_block *gb = gp->init_block;
@@ -1653,7 +1637,7 @@ static void gem_init_rings(struct gem *gp)
struct sk_buff *skb;
struct gem_rxd *rxd = &gb->rxd[i];
- skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
+ skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
if (!skb) {
rxd->buffer = 0;
rxd->status_word = 0;
@@ -1661,7 +1645,6 @@ static void gem_init_rings(struct gem *gp)
}
gp->rx_skbs[i] = skb;
- skb->dev = dev;
skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
dma_addr = pci_map_page(gp->pdev,
virt_to_page(skb->data),
@@ -1737,7 +1720,7 @@ static void gem_init_phy(struct gem *gp)
if (gp->phy_type == phy_mii_mdio0 ||
gp->phy_type == phy_mii_mdio1) {
- // XXX check for errors
+ /* Reset and detect MII PHY */
mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
/* Init PHY */
@@ -1753,13 +1736,15 @@ static void gem_init_phy(struct gem *gp)
gp->lstate = link_down;
netif_carrier_off(gp->dev);
- /* Can I advertise gigabit here ? I'd need BCM PHY docs... */
- spin_lock_irq(&gp->lock);
+ /* Print things out */
+ if (gp->phy_type == phy_mii_mdio0 ||
+ gp->phy_type == phy_mii_mdio1)
+ netdev_info(gp->dev, "Found %s PHY\n",
+ gp->phy_mii.def ? gp->phy_mii.def->name : "no");
+
gem_begin_auto_negotiation(gp, NULL);
- spin_unlock_irq(&gp->lock);
}
-/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_init_dma(struct gem *gp)
{
u64 desc_dma = (u64) gp->gblock_dvma;
@@ -1797,7 +1782,6 @@ static void gem_init_dma(struct gem *gp)
gp->regs + RXDMA_BLANK);
}
-/* Must be invoked under gp->lock and gp->tx_lock. */
static u32 gem_setup_multicast(struct gem *gp)
{
u32 rxcfg = 0;
@@ -1818,12 +1802,7 @@ static u32 gem_setup_multicast(struct gem *gp)
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, gp->dev) {
- char *addrs = ha->addr;
-
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc_le(6, ha->addr);
crc >>= 24;
hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
}
@@ -1835,7 +1814,6 @@ static u32 gem_setup_multicast(struct gem *gp)
return rxcfg;
}
-/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_init_mac(struct gem *gp)
{
unsigned char *e = &gp->dev->dev_addr[0];
@@ -1918,7 +1896,6 @@ static void gem_init_mac(struct gem *gp)
writel(0, gp->regs + WOL_WAKECSR);
}
-/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_init_pause_thresholds(struct gem *gp)
{
u32 cfg;
@@ -2079,7 +2056,6 @@ static int gem_check_invariants(struct gem *gp)
return 0;
}
-/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_reinit_chip(struct gem *gp)
{
/* Reset the chip */
@@ -2100,11 +2076,9 @@ static void gem_reinit_chip(struct gem *gp)
}
-/* Must be invoked with no lock held. */
static void gem_stop_phy(struct gem *gp, int wol)
{
u32 mifcfg;
- unsigned long flags;
/* Let the chip settle down a bit, it seems that helps
* for sleep mode on some models
@@ -2150,15 +2124,9 @@ static void gem_stop_phy(struct gem *gp, int wol)
writel(0, gp->regs + RXDMA_CFG);
if (!wol) {
- spin_lock_irqsave(&gp->lock, flags);
- spin_lock(&gp->tx_lock);
gem_reset(gp);
writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
- spin_unlock(&gp->tx_lock);
- spin_unlock_irqrestore(&gp->lock, flags);
-
- /* No need to take the lock here */
if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
gp->phy_mii.def->ops->suspend(&gp->phy_mii);
@@ -2175,54 +2143,55 @@ static void gem_stop_phy(struct gem *gp, int wol)
}
}
-
static int gem_do_start(struct net_device *dev)
{
struct gem *gp = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&gp->lock, flags);
- spin_lock(&gp->tx_lock);
+ int rc;
/* Enable the cell */
gem_get_cell(gp);
- /* Init & setup chip hardware */
- gem_reinit_chip(gp);
-
- gp->running = 1;
-
- napi_enable(&gp->napi);
+ /* Make sure PCI access and bus master are enabled */
+ rc = pci_enable_device(gp->pdev);
+ if (rc) {
+ netdev_err(dev, "Failed to enable chip on PCI bus !\n");
- if (gp->lstate == link_up) {
- netif_carrier_on(gp->dev);
- gem_set_link_modes(gp);
+ /* Put cell and forget it for now, it will be considered as
+ * still asleep, a new sleep cycle may bring it back
+ */
+ gem_put_cell(gp);
+ return -ENXIO;
}
+ pci_set_master(gp->pdev);
- netif_wake_queue(gp->dev);
-
- spin_unlock(&gp->tx_lock);
- spin_unlock_irqrestore(&gp->lock, flags);
+ /* Init & setup chip hardware */
+ gem_reinit_chip(gp);
- if (request_irq(gp->pdev->irq, gem_interrupt,
- IRQF_SHARED, dev->name, (void *)dev)) {
+ /* An interrupt might come in handy */
+ rc = request_irq(gp->pdev->irq, gem_interrupt,
+ IRQF_SHARED, dev->name, (void *)dev);
+ if (rc) {
netdev_err(dev, "failed to request irq !\n");
- spin_lock_irqsave(&gp->lock, flags);
- spin_lock(&gp->tx_lock);
-
- napi_disable(&gp->napi);
-
- gp->running = 0;
gem_reset(gp);
gem_clean_rings(gp);
gem_put_cell(gp);
+ return rc;
+ }
- spin_unlock(&gp->tx_lock);
- spin_unlock_irqrestore(&gp->lock, flags);
+ /* Mark us as attached again if we come from resume(), this has
+ * no effect if we weren't detatched and needs to be done now.
+ */
+ netif_device_attach(dev);
- return -EAGAIN;
- }
+ /* Restart NAPI & queues */
+ gem_netif_start(gp);
+
+ /* Detect & init PHY, start autoneg etc... this will
+ * eventually result in starting DMA operations when
+ * the link is up
+ */
+ gem_init_phy(gp);
return 0;
}
@@ -2230,22 +2199,30 @@ static int gem_do_start(struct net_device *dev)
static void gem_do_stop(struct net_device *dev, int wol)
{
struct gem *gp = netdev_priv(dev);
- unsigned long flags;
- spin_lock_irqsave(&gp->lock, flags);
- spin_lock(&gp->tx_lock);
+ /* Stop NAPI and stop tx queue */
+ gem_netif_stop(gp);
- gp->running = 0;
-
- /* Stop netif queue */
- netif_stop_queue(dev);
-
- /* Make sure ints are disabled */
+ /* Make sure ints are disabled. We don't care about
+ * synchronizing as NAPI is disabled, thus a stray
+ * interrupt will do nothing bad (our irq handler
+ * just schedules NAPI)
+ */
gem_disable_ints(gp);
- /* We can drop the lock now */
- spin_unlock(&gp->tx_lock);
- spin_unlock_irqrestore(&gp->lock, flags);
+ /* Stop the link timer */
+ del_timer_sync(&gp->link_timer);
+
+ /* We cannot cancel the reset task while holding the
+ * rtnl lock, we'd get an A->B / B->A deadlock stituation
+ * if we did. This is not an issue however as the reset
+ * task is synchronized vs. us (rtnl_lock) and will do
+ * nothing if the device is down or suspended. We do
+ * still clear reset_task_pending to avoid a spurrious
+ * reset later on in case we do resume before it gets
+ * scheduled.
+ */
+ gp->reset_task_pending = 0;
/* If we are going to sleep with WOL */
gem_stop_dma(gp);
@@ -2260,79 +2237,79 @@ static void gem_do_stop(struct net_device *dev, int wol)
/* No irq needed anymore */
free_irq(gp->pdev->irq, (void *) dev);
+ /* Shut the PHY down eventually and setup WOL */
+ gem_stop_phy(gp, wol);
+
+ /* Make sure bus master is disabled */
+ pci_disable_device(gp->pdev);
+
/* Cell not needed neither if no WOL */
- if (!wol) {
- spin_lock_irqsave(&gp->lock, flags);
+ if (!wol)
gem_put_cell(gp);
- spin_unlock_irqrestore(&gp->lock, flags);
- }
}
static void gem_reset_task(struct work_struct *work)
{
struct gem *gp = container_of(work, struct gem, reset_task);
- mutex_lock(&gp->pm_mutex);
+ /* Lock out the network stack (essentially shield ourselves
+ * against a racing open, close, control call, or suspend
+ */
+ rtnl_lock();
- if (gp->opened)
- napi_disable(&gp->napi);
+ /* Skip the reset task if suspended or closed, or if it's
+ * been cancelled by gem_do_stop (see comment there)
+ */
+ if (!netif_device_present(gp->dev) ||
+ !netif_running(gp->dev) ||
+ !gp->reset_task_pending) {
+ rtnl_unlock();
+ return;
+ }
- spin_lock_irq(&gp->lock);
- spin_lock(&gp->tx_lock);
+ /* Stop the link timer */
+ del_timer_sync(&gp->link_timer);
- if (gp->running) {
- netif_stop_queue(gp->dev);
+ /* Stop NAPI and tx */
+ gem_netif_stop(gp);
- /* Reset the chip & rings */
- gem_reinit_chip(gp);
- if (gp->lstate == link_up)
- gem_set_link_modes(gp);
- netif_wake_queue(gp->dev);
- }
+ /* Reset the chip & rings */
+ gem_reinit_chip(gp);
+ if (gp->lstate == link_up)
+ gem_set_link_modes(gp);
- gp->reset_task_pending = 0;
+ /* Restart NAPI and Tx */
+ gem_netif_start(gp);
- spin_unlock(&gp->tx_lock);
- spin_unlock_irq(&gp->lock);
+ /* We are back ! */
+ gp->reset_task_pending = 0;
- if (gp->opened)
- napi_enable(&gp->napi);
+ /* If the link is not up, restart autoneg, else restart the
+ * polling timer
+ */
+ if (gp->lstate != link_up)
+ gem_begin_auto_negotiation(gp, NULL);
+ else
+ mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
- mutex_unlock(&gp->pm_mutex);
+ rtnl_unlock();
}
-
static int gem_open(struct net_device *dev)
{
- struct gem *gp = netdev_priv(dev);
- int rc = 0;
-
- mutex_lock(&gp->pm_mutex);
-
- /* We need the cell enabled */
- if (!gp->asleep)
- rc = gem_do_start(dev);
- gp->opened = (rc == 0);
-
- mutex_unlock(&gp->pm_mutex);
-
- return rc;
+ /* We allow open while suspended, we just do nothing,
+ * the chip will be initialized in resume()
+ */
+ if (netif_device_present(dev))
+ return gem_do_start(dev);
+ return 0;
}
static int gem_close(struct net_device *dev)
{
- struct gem *gp = netdev_priv(dev);
-
- mutex_lock(&gp->pm_mutex);
-
- napi_disable(&gp->napi);
-
- gp->opened = 0;
- if (!gp->asleep)
+ if (netif_device_present(dev))
gem_do_stop(dev, 0);
- mutex_unlock(&gp->pm_mutex);
-
return 0;
}
@@ -2341,59 +2318,35 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct gem *gp = netdev_priv(dev);
- unsigned long flags;
- mutex_lock(&gp->pm_mutex);
-
- netdev_info(dev, "suspending, WakeOnLan %s\n",
- (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
-
- /* Keep the cell enabled during the entire operation */
- spin_lock_irqsave(&gp->lock, flags);
- spin_lock(&gp->tx_lock);
- gem_get_cell(gp);
- spin_unlock(&gp->tx_lock);
- spin_unlock_irqrestore(&gp->lock, flags);
-
- /* If the driver is opened, we stop the MAC */
- if (gp->opened) {
- napi_disable(&gp->napi);
+ /* Lock the network stack first to avoid racing with open/close,
+ * reset task and setting calls
+ */
+ rtnl_lock();
- /* Stop traffic, mark us closed */
+ /* Not running, mark ourselves non-present, no need for
+ * a lock here
+ */
+ if (!netif_running(dev)) {
netif_device_detach(dev);
+ rtnl_unlock();
+ return 0;
+ }
+ netdev_info(dev, "suspending, WakeOnLan %s\n",
+ (gp->wake_on_lan && netif_running(dev)) ?
+ "enabled" : "disabled");
- /* Switch off MAC, remember WOL setting */
- gp->asleep_wol = gp->wake_on_lan;
- gem_do_stop(dev, gp->asleep_wol);
- } else
- gp->asleep_wol = 0;
-
- /* Mark us asleep */
- gp->asleep = 1;
- wmb();
-
- /* Stop the link timer */
- del_timer_sync(&gp->link_timer);
-
- /* Now we release the mutex to not block the reset task who
- * can take it too. We are marked asleep, so there will be no
- * conflict here
+ /* Tell the network stack we're gone. gem_do_stop() below will
+ * synchronize with TX, stop NAPI etc...
*/
- mutex_unlock(&gp->pm_mutex);
+ netif_device_detach(dev);
- /* Wait for the pending reset task to complete */
- flush_work_sync(&gp->reset_task);
+ /* Switch off chip, remember WOL setting */
+ gp->asleep_wol = gp->wake_on_lan;
+ gem_do_stop(dev, gp->asleep_wol);
- /* Shut the PHY down eventually and setup WOL */
- gem_stop_phy(gp, gp->asleep_wol);
-
- /* Make sure bus master is disabled */
- pci_disable_device(gp->pdev);
-
- /* Release the cell, no need to take a lock at this point since
- * nothing else can happen now
- */
- gem_put_cell(gp);
+ /* Unlock the network stack */
+ rtnl_unlock();
return 0;
}
@@ -2402,53 +2355,23 @@ static int gem_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct gem *gp = netdev_priv(dev);
- unsigned long flags;
-
- netdev_info(dev, "resuming\n");
- mutex_lock(&gp->pm_mutex);
+ /* See locking comment in gem_suspend */
+ rtnl_lock();
- /* Keep the cell enabled during the entire operation, no need to
- * take a lock here tho since nothing else can happen while we are
- * marked asleep
+ /* Not running, mark ourselves present, no need for
+ * a lock here
*/
- gem_get_cell(gp);
-
- /* Make sure PCI access and bus master are enabled */
- if (pci_enable_device(gp->pdev)) {
- netdev_err(dev, "Can't re-enable chip !\n");
- /* Put cell and forget it for now, it will be considered as
- * still asleep, a new sleep cycle may bring it back
- */
- gem_put_cell(gp);
- mutex_unlock(&gp->pm_mutex);
+ if (!netif_running(dev)) {
+ netif_device_attach(dev);
+ rtnl_unlock();
return 0;
}
- pci_set_master(gp->pdev);
-
- /* Reset everything */
- gem_reset(gp);
-
- /* Mark us woken up */
- gp->asleep = 0;
- wmb();
- /* Bring the PHY back. Again, lock is useless at this point as
- * nothing can be happening until we restart the whole thing
+ /* Restart chip. If that fails there isn't much we can do, we
+ * leave things stopped.
*/
- gem_init_phy(gp);
-
- /* If we were opened, bring everything back */
- if (gp->opened) {
- /* Restart MAC */
- gem_do_start(dev);
-
- /* Re-attach net device */
- netif_device_attach(dev);
- }
-
- spin_lock_irqsave(&gp->lock, flags);
- spin_lock(&gp->tx_lock);
+ gem_do_start(dev);
/* If we had WOL enabled, the cell clock was never turned off during
* sleep, so we end up beeing unbalanced. Fix that here
@@ -2456,15 +2379,8 @@ static int gem_resume(struct pci_dev *pdev)
if (gp->asleep_wol)
gem_put_cell(gp);
- /* This function doesn't need to hold the cell, it will be held if the
- * driver is open by gem_do_start().
- */
- gem_put_cell(gp);
-
- spin_unlock(&gp->tx_lock);
- spin_unlock_irqrestore(&gp->lock, flags);
-
- mutex_unlock(&gp->pm_mutex);
+ /* Unlock the network stack */
+ rtnl_unlock();
return 0;
}
@@ -2474,33 +2390,35 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
{
struct gem *gp = netdev_priv(dev);
- spin_lock_irq(&gp->lock);
- spin_lock(&gp->tx_lock);
-
/* I have seen this being called while the PM was in progress,
- * so we shield against this
+ * so we shield against this. Let's also not poke at registers
+ * while the reset task is going on.
+ *
+ * TODO: Move stats collection elsewhere (link timer ?) and
+ * make this a nop to avoid all those synchro issues
*/
- if (gp->running) {
- dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
- writel(0, gp->regs + MAC_FCSERR);
+ if (!netif_device_present(dev) || !netif_running(dev))
+ goto bail;
- dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
- writel(0, gp->regs + MAC_AERR);
+ /* Better safe than sorry... */
+ if (WARN_ON(!gp->cell_enabled))
+ goto bail;
- dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
- writel(0, gp->regs + MAC_LERR);
+ dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+ writel(0, gp->regs + MAC_FCSERR);
- dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
- dev->stats.collisions +=
- (readl(gp->regs + MAC_ECOLL) +
- readl(gp->regs + MAC_LCOLL));
- writel(0, gp->regs + MAC_ECOLL);
- writel(0, gp->regs + MAC_LCOLL);
- }
+ dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
+ writel(0, gp->regs + MAC_AERR);
- spin_unlock(&gp->tx_lock);
- spin_unlock_irq(&gp->lock);
+ dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
+ writel(0, gp->regs + MAC_LERR);
+ dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+ dev->stats.collisions +=
+ (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL));
+ writel(0, gp->regs + MAC_ECOLL);
+ writel(0, gp->regs + MAC_LCOLL);
+ bail:
return &dev->stats;
}
@@ -2513,22 +2431,19 @@ static int gem_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
- if (!netif_running(dev) || !netif_device_present(dev)) {
- /* We'll just catch it later when the
- * device is up'd or resumed.
- */
- memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
+ memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
+
+ /* We'll just catch it later when the device is up'd or resumed */
+ if (!netif_running(dev) || !netif_device_present(dev))
return 0;
- }
- mutex_lock(&gp->pm_mutex);
- memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
- if (gp->running) {
- writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
- writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
- writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
- }
- mutex_unlock(&gp->pm_mutex);
+ /* Better safe than sorry... */
+ if (WARN_ON(!gp->cell_enabled))
+ return 0;
+
+ writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
+ writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
+ writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
return 0;
}
@@ -2539,14 +2454,12 @@ static void gem_set_multicast(struct net_device *dev)
u32 rxcfg, rxcfg_new;
int limit = 10000;
+ if (!netif_running(dev) || !netif_device_present(dev))
+ return;
- spin_lock_irq(&gp->lock);
- spin_lock(&gp->tx_lock);
-
- if (!gp->running)
- goto bail;
-
- netif_stop_queue(dev);
+ /* Better safe than sorry... */
+ if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled))
+ return;
rxcfg = readl(gp->regs + MAC_RXCFG);
rxcfg_new = gem_setup_multicast(gp);
@@ -2566,12 +2479,6 @@ static void gem_set_multicast(struct net_device *dev)
rxcfg |= rxcfg_new;
writel(rxcfg, gp->regs + MAC_RXCFG);
-
- netif_wake_queue(dev);
-
- bail:
- spin_unlock(&gp->tx_lock);
- spin_unlock_irq(&gp->lock);
}
/* Jumbo-grams don't seem to work :-( */
@@ -2589,26 +2496,21 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
return -EINVAL;
- if (!netif_running(dev) || !netif_device_present(dev)) {
- /* We'll just catch it later when the
- * device is up'd or resumed.
- */
- dev->mtu = new_mtu;
+ dev->mtu = new_mtu;
+
+ /* We'll just catch it later when the device is up'd or resumed */
+ if (!netif_running(dev) || !netif_device_present(dev))
return 0;
- }
- mutex_lock(&gp->pm_mutex);
- spin_lock_irq(&gp->lock);
- spin_lock(&gp->tx_lock);
- dev->mtu = new_mtu;
- if (gp->running) {
- gem_reinit_chip(gp);
- if (gp->lstate == link_up)
- gem_set_link_modes(gp);
- }
- spin_unlock(&gp->tx_lock);
- spin_unlock_irq(&gp->lock);
- mutex_unlock(&gp->pm_mutex);
+ /* Better safe than sorry... */
+ if (WARN_ON(!gp->cell_enabled))
+ return 0;
+
+ gem_netif_stop(gp);
+ gem_reinit_chip(gp);
+ if (gp->lstate == link_up)
+ gem_set_link_modes(gp);
+ gem_netif_start(gp);
return 0;
}
@@ -2640,7 +2542,6 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->phy_address = 0; /* XXX fixed PHYAD */
/* Return current PHY settings */
- spin_lock_irq(&gp->lock);
cmd->autoneg = gp->want_autoneg;
ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
cmd->duplex = gp->phy_mii.duplex;
@@ -2652,7 +2553,6 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
*/
if (cmd->advertising == 0)
cmd->advertising = cmd->supported;
- spin_unlock_irq(&gp->lock);
} else { // XXX PCS ?
cmd->supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
@@ -2706,11 +2606,10 @@ static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
/* Apply settings and restart link process. */
- spin_lock_irq(&gp->lock);
- gem_get_cell(gp);
- gem_begin_auto_negotiation(gp, cmd);
- gem_put_cell(gp);
- spin_unlock_irq(&gp->lock);
+ if (netif_device_present(gp->dev)) {
+ del_timer_sync(&gp->link_timer);
+ gem_begin_auto_negotiation(gp, cmd);
+ }
return 0;
}
@@ -2722,12 +2621,11 @@ static int gem_nway_reset(struct net_device *dev)
if (!gp->want_autoneg)
return -EINVAL;
- /* Restart link process. */
- spin_lock_irq(&gp->lock);
- gem_get_cell(gp);
- gem_begin_auto_negotiation(gp, NULL);
- gem_put_cell(gp);
- spin_unlock_irq(&gp->lock);
+ /* Restart link process */
+ if (netif_device_present(gp->dev)) {
+ del_timer_sync(&gp->link_timer);
+ gem_begin_auto_negotiation(gp, NULL);
+ }
return 0;
}
@@ -2791,16 +2689,11 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct gem *gp = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(ifr);
int rc = -EOPNOTSUPP;
- unsigned long flags;
- /* Hold the PM mutex while doing ioctl's or we may collide
- * with power management.
+ /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that
+ * netif_device_present() is true and holds rtnl_lock for us
+ * so we have nothing to worry about
*/
- mutex_lock(&gp->pm_mutex);
-
- spin_lock_irqsave(&gp->lock, flags);
- gem_get_cell(gp);
- spin_unlock_irqrestore(&gp->lock, flags);
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
@@ -2808,32 +2701,17 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Fallthrough... */
case SIOCGMIIREG: /* Read MII PHY register. */
- if (!gp->running)
- rc = -EAGAIN;
- else {
- data->val_out = __phy_read(gp, data->phy_id & 0x1f,
- data->reg_num & 0x1f);
- rc = 0;
- }
+ data->val_out = __phy_read(gp, data->phy_id & 0x1f,
+ data->reg_num & 0x1f);
+ rc = 0;
break;
case SIOCSMIIREG: /* Write MII PHY register. */
- if (!gp->running)
- rc = -EAGAIN;
- else {
- __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
- data->val_in);
- rc = 0;
- }
+ __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
+ data->val_in);
+ rc = 0;
break;
- };
-
- spin_lock_irqsave(&gp->lock, flags);
- gem_put_cell(gp);
- spin_unlock_irqrestore(&gp->lock, flags);
-
- mutex_unlock(&gp->pm_mutex);
-
+ }
return rc;
}
@@ -2921,23 +2799,9 @@ static void gem_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
- /* Stop the link timer */
- del_timer_sync(&gp->link_timer);
-
- /* We shouldn't need any locking here */
- gem_get_cell(gp);
-
- /* Cancel reset task */
+ /* Ensure reset task is truely gone */
cancel_work_sync(&gp->reset_task);
- /* Shut the PHY down */
- gem_stop_phy(gp, 0);
-
- gem_put_cell(gp);
-
- /* Make sure bus master is disabled */
- pci_disable_device(gp->pdev);
-
/* Free resources */
pci_free_consistent(pdev,
sizeof(struct gem_init_block),
@@ -3043,10 +2907,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
gp->msg_enable = DEFAULT_MSG;
- spin_lock_init(&gp->lock);
- spin_lock_init(&gp->tx_lock);
- mutex_init(&gp->pm_mutex);
-
init_timer(&gp->link_timer);
gp->link_timer.function = gem_link_timer;
gp->link_timer.data = (unsigned long) gp;
@@ -3122,14 +2982,11 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
/* Set that now, in case PM kicks in now */
pci_set_drvdata(pdev, dev);
- /* Detect & init PHY, start autoneg, we release the cell now
- * too, it will be managed by whoever needs it
- */
- gem_init_phy(gp);
-
- spin_lock_irq(&gp->lock);
- gem_put_cell(gp);
- spin_unlock_irq(&gp->lock);
+ /* We can do scatter/gather and HW checksum */
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+ if (pci_using_dac)
+ dev->features |= NETIF_F_HIGHDMA;
/* Register with kernel */
if (register_netdev(dev)) {
@@ -3138,20 +2995,15 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
goto err_out_free_consistent;
}
+ /* Undo the get_cell with appropriate locking (we could use
+ * ndo_init/uninit but that would be even more clumsy imho)
+ */
+ rtnl_lock();
+ gem_put_cell(gp);
+ rtnl_unlock();
+
netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
dev->dev_addr);
-
- if (gp->phy_type == phy_mii_mdio0 ||
- gp->phy_type == phy_mii_mdio1)
- netdev_info(dev, "Found %s PHY\n",
- gp->phy_mii.def ? gp->phy_mii.def->name : "no");
-
- /* GEM can do it all... */
- dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
- dev->features |= dev->hw_features | NETIF_F_RXCSUM | NETIF_F_LLTX;
- if (pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
-
return 0;
err_out_free_consistent:
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index d225077964e..835ce1b3cb9 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -973,23 +973,14 @@ enum link_state {
};
struct gem {
- spinlock_t lock;
- spinlock_t tx_lock;
void __iomem *regs;
int rx_new, rx_old;
int tx_new, tx_old;
unsigned int has_wol : 1; /* chip supports wake-on-lan */
- unsigned int asleep : 1; /* chip asleep, protected by pm_mutex */
unsigned int asleep_wol : 1; /* was asleep with WOL enabled */
- unsigned int opened : 1; /* driver opened, protected by pm_mutex */
- unsigned int running : 1; /* chip running, protected by lock */
- /* cell enable count, protected by lock */
int cell_enabled;
-
- struct mutex pm_mutex;
-
u32 msg_enable;
u32 status;
@@ -1033,20 +1024,4 @@ struct gem {
#define found_mii_phy(gp) ((gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) && \
gp->phy_mii.def && gp->phy_mii.def->ops)
-#define ALIGNED_RX_SKB_ADDR(addr) \
- ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
-static __inline__ struct sk_buff *gem_alloc_skb(int size,
- gfp_t gfp_flags)
-{
- struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
-
- if (skb) {
- int offset = (int) ALIGNED_RX_SKB_ADDR(skb->data);
- if (offset)
- skb_reserve(skb, offset);
- }
-
- return skb;
-}
-
#endif /* _SUNGEM_H */
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 30aad54b1b3..856e05b9fba 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1524,17 +1524,11 @@ static int happy_meal_init(struct happy_meal *hp)
} else if ((hp->dev->flags & IFF_PROMISC) == 0) {
u16 hash_table[4];
struct netdev_hw_addr *ha;
- char *addrs;
u32 crc;
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, hp->dev) {
- addrs = ha->addr;
-
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc_le(6, ha->addr);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
@@ -2361,7 +2355,6 @@ static void happy_meal_set_multicast(struct net_device *dev)
struct happy_meal *hp = netdev_priv(dev);
void __iomem *bregs = hp->bigmacregs;
struct netdev_hw_addr *ha;
- char *addrs;
u32 crc;
spin_lock_irq(&hp->happy_lock);
@@ -2379,12 +2372,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- if (!(*addrs & 1))
- continue;
-
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc_le(6, ha->addr);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 32a5c7f63c4..06f2d4382dc 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1170,7 +1170,6 @@ static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
struct netdev_hw_addr *ha;
- char *addrs;
u32 crc;
u32 val;
@@ -1195,12 +1194,7 @@ static void lance_load_multicast(struct net_device *dev)
/* Add addresses */
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- /* multicast address? */
- if (!(*addrs & 1))
- continue;
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc_le(6, ha->addr);
crc = crc >> 26;
if (lp->pio_buffer) {
struct lance_init_block __iomem *ib = lp->init_block_iomem;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 18ecdc30375..209c7f8df00 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -628,7 +628,6 @@ static void qe_set_multicast(struct net_device *dev)
struct sunqe *qep = netdev_priv(dev);
struct netdev_hw_addr *ha;
u8 new_mconfig = qep->mconfig;
- char *addrs;
int i;
u32 crc;
@@ -651,11 +650,7 @@ static void qe_set_multicast(struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
-
- if (!(*addrs & 1))
- continue;
- crc = ether_crc_le(6, addrs);
+ crc = ether_crc_le(6, ha->addr);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 80fbee0d40a..749bbf18dc6 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -742,22 +742,6 @@ static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
__bdx_vlan_rx_vid(ndev, vid, 0);
}
-/*
- * bdx_vlan_rx_register - kernel hook for adding VLAN group
- * @ndev network device
- * @grp VLAN group
- */
-static void
-bdx_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
-{
- struct bdx_priv *priv = netdev_priv(ndev);
-
- ENTER;
- DBG("device='%s', group='%p'\n", ndev->name, grp);
- priv->vlgrp = grp;
- RET();
-}
-
/**
* bdx_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
@@ -1146,21 +1130,15 @@ NETIF_RX_MUX(struct bdx_priv *priv, u32 rxd_val1, u16 rxd_vlan,
struct sk_buff *skb)
{
ENTER;
- DBG("rxdd->flags.bits.vtag=%d vlgrp=%p\n", GET_RXD_VTAG(rxd_val1),
- priv->vlgrp);
- if (priv->vlgrp && GET_RXD_VTAG(rxd_val1)) {
- DBG("%s: vlan rcv vlan '%x' vtag '%x', device name '%s'\n",
+ DBG("rxdd->flags.bits.vtag=%d\n", GET_RXD_VTAG(rxd_val1));
+ if (GET_RXD_VTAG(rxd_val1)) {
+ DBG("%s: vlan rcv vlan '%x' vtag '%x'\n",
priv->ndev->name,
GET_RXD_VLAN_ID(rxd_vlan),
- GET_RXD_VTAG(rxd_val1),
- vlan_group_get_device(priv->vlgrp,
- GET_RXD_VLAN_ID(rxd_vlan))->name);
- /* NAPI variant of receive functions */
- vlan_hwaccel_receive_skb(skb, priv->vlgrp,
- GET_RXD_VLAN_TCI(rxd_vlan));
- } else {
- netif_receive_skb(skb);
+ GET_RXD_VTAG(rxd_val1));
+ __vlan_hwaccel_put_tag(skb, GET_RXD_VLAN_TCI(rxd_vlan));
}
+ netif_receive_skb(skb);
}
static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
@@ -1877,7 +1855,7 @@ static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
}
static const struct net_device_ops bdx_netdev_ops = {
- .ndo_open = bdx_open,
+ .ndo_open = bdx_open,
.ndo_stop = bdx_close,
.ndo_start_xmit = bdx_tx_transmit,
.ndo_validate_addr = eth_validate_addr,
@@ -1885,7 +1863,6 @@ static const struct net_device_ops bdx_netdev_ops = {
.ndo_set_multicast_list = bdx_setmulti,
.ndo_change_mtu = bdx_change_mtu,
.ndo_set_mac_address = bdx_set_mac,
- .ndo_vlan_rx_register = bdx_vlan_rx_register,
.ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
};
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index c5642fefc9e..709ebd6e28b 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -250,7 +250,6 @@ struct bdx_priv {
struct rxf_fifo rxf_fifo0;
struct rxdb *rxdb; /* rx dbs to store skb pointers */
int napi_stop;
- struct vlan_group *vlgrp;
/* Tx FIFOs: 1 for data desc, 1 for empty (acks) desc */
struct txd_fifo txd_fifo0;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index a1f9f9eef37..dc3fbf61910 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/in.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
@@ -106,6 +107,8 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
+#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
+
/* length of time before we decide the hardware is borked,
* and dev->tx_timeout() should be called to fix the problem
*/
@@ -187,6 +190,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
+#define TG3_TX_BD_DMA_MAX 4096
#define TG3_RAW_IP_ALIGN 2
@@ -605,7 +609,7 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
static void tg3_ape_lock_init(struct tg3 *tp)
{
int i;
- u32 regbase;
+ u32 regbase, bit;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
regbase = TG3_APE_LOCK_GRANT;
@@ -613,20 +617,34 @@ static void tg3_ape_lock_init(struct tg3 *tp)
regbase = TG3_APE_PER_LOCK_GRANT;
/* Make sure the driver hasn't any stale locks. */
- for (i = 0; i < 8; i++)
+ for (i = 0; i < 8; i++) {
+ if (i == TG3_APE_LOCK_GPIO)
+ continue;
tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
+ }
+
+ /* Clear the correct bit of the GPIO lock too. */
+ if (!tp->pci_fn)
+ bit = APE_LOCK_GRANT_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
+
+ tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
}
static int tg3_ape_lock(struct tg3 *tp, int locknum)
{
int i, off;
int ret = 0;
- u32 status, req, gnt;
+ u32 status, req, gnt, bit;
if (!tg3_flag(tp, ENABLE_APE))
return 0;
switch (locknum) {
+ case TG3_APE_LOCK_GPIO:
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ return 0;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
break;
@@ -644,21 +662,24 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
off = 4 * locknum;
- tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
+ if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
+ bit = APE_LOCK_REQ_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
+
+ tg3_ape_write32(tp, req + off, bit);
/* Wait for up to 1 millisecond to acquire lock. */
for (i = 0; i < 100; i++) {
status = tg3_ape_read32(tp, gnt + off);
- if (status == APE_LOCK_GRANT_DRIVER)
+ if (status == bit)
break;
udelay(10);
}
- if (status != APE_LOCK_GRANT_DRIVER) {
+ if (status != bit) {
/* Revoke the lock request. */
- tg3_ape_write32(tp, gnt + off,
- APE_LOCK_GRANT_DRIVER);
-
+ tg3_ape_write32(tp, gnt + off, bit);
ret = -EBUSY;
}
@@ -667,12 +688,15 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
static void tg3_ape_unlock(struct tg3 *tp, int locknum)
{
- u32 gnt;
+ u32 gnt, bit;
if (!tg3_flag(tp, ENABLE_APE))
return;
switch (locknum) {
+ case TG3_APE_LOCK_GPIO:
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ return;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
break;
@@ -685,7 +709,12 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
else
gnt = TG3_APE_PER_LOCK_GRANT;
- tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
+ if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
+ bit = APE_LOCK_GRANT_DRIVER;
+ else
+ bit = 1 << tp->pci_fn;
+
+ tg3_ape_write32(tp, gnt + 4 * locknum, bit);
}
static void tg3_disable_ints(struct tg3 *tp)
@@ -860,7 +889,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
int ret;
if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
- (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
+ (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
return 0;
if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
@@ -1167,7 +1196,7 @@ static int tg3_mdio_init(struct tg3 *tp)
if (tg3_flag(tp, 5717_PLUS)) {
u32 is_serdes;
- tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
+ tp->phy_addr = tp->pci_fn + 1;
if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
@@ -1830,6 +1859,12 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
}
if (!tp->setlpicnt) {
+ if (current_link_up == 1 &&
+ !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
+
val = tr32(TG3_CPMU_EEE_MODE);
tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
}
@@ -1844,7 +1879,9 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
- tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
}
@@ -1980,15 +2017,14 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
/* Set full-duplex, 1000 mbps. */
tg3_writephy(tp, MII_BMCR,
- BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
+ BMCR_FULLDPLX | BMCR_SPEED1000);
/* Set to master mode. */
- if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
+ if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
continue;
- tg3_writephy(tp, MII_TG3_CTRL,
- (MII_TG3_CTRL_AS_MASTER |
- MII_TG3_CTRL_ENABLE_AS_MASTER));
+ tg3_writephy(tp, MII_CTRL1000,
+ CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
if (err)
@@ -2013,7 +2049,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
- tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
+ tg3_writephy(tp, MII_CTRL1000, phy9_orig);
if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
reg32 &= ~0x3000;
@@ -2165,21 +2201,214 @@ out:
return 0;
}
-static void tg3_frob_aux_power(struct tg3 *tp)
+#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
+#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
+#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
+ TG3_GPIO_MSG_NEED_VAUX)
+#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
+ ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
+ (TG3_GPIO_MSG_DRVR_PRES << 4) | \
+ (TG3_GPIO_MSG_DRVR_PRES << 8) | \
+ (TG3_GPIO_MSG_DRVR_PRES << 12))
+
+#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
+ ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
+ (TG3_GPIO_MSG_NEED_VAUX << 4) | \
+ (TG3_GPIO_MSG_NEED_VAUX << 8) | \
+ (TG3_GPIO_MSG_NEED_VAUX << 12))
+
+static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
+{
+ u32 status, shift;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
+ else
+ status = tr32(TG3_CPMU_DRV_STATUS);
+
+ shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
+ status &= ~(TG3_GPIO_MSG_MASK << shift);
+ status |= (newstat << shift);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
+ else
+ tw32(TG3_CPMU_DRV_STATUS, status);
+
+ return status >> TG3_APE_GPIO_MSG_SHIFT;
+}
+
+static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
+{
+ if (!tg3_flag(tp, IS_NIC))
+ return 0;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
+ return -EIO;
+
+ tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
+
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
+ } else {
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ }
+
+ return 0;
+}
+
+static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
+{
+ u32 grc_local_ctrl;
+
+ if (!tg3_flag(tp, IS_NIC) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
+ return;
+
+ grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+}
+
+static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
+{
+ if (!tg3_flag(tp, IS_NIC))
+ return;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ (GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT0 |
+ GRC_LCLCTRL_GPIO_OUTPUT1),
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
+ /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
+ u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT0 |
+ GRC_LCLCTRL_GPIO_OUTPUT1 |
+ tp->grc_local_ctrl;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
+ tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ } else {
+ u32 no_gpio2;
+ u32 grc_local_ctrl = 0;
+
+ /* Workaround to prevent overdrawing Amps. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+ tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+ grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ }
+
+ /* On 5753 and variants, GPIO2 cannot be used. */
+ no_gpio2 = tp->nic_sram_data_cfg &
+ NIC_SRAM_DATA_CFG_NO_GPIO2;
+
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+ GRC_LCLCTRL_GPIO_OE1 |
+ GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT1 |
+ GRC_LCLCTRL_GPIO_OUTPUT2;
+ if (no_gpio2) {
+ grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
+ GRC_LCLCTRL_GPIO_OUTPUT2);
+ }
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
+
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+ if (!no_gpio2) {
+ grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
+ tw32_wait_f(GRC_LOCAL_CTRL,
+ tp->grc_local_ctrl | grc_local_ctrl,
+ TG3_GRC_LCLCTL_PWRSW_DELAY);
+ }
+ }
+}
+
+static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
+{
+ u32 msg = 0;
+
+ /* Serialize power state transitions */
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
+ return;
+
+ if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
+ msg = TG3_GPIO_MSG_NEED_VAUX;
+
+ msg = tg3_set_function_status(tp, msg);
+
+ if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
+ goto done;
+
+ if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
+ tg3_pwrsrc_switch_to_vaux(tp);
+ else
+ tg3_pwrsrc_die_with_vmain(tp);
+
+done:
+ tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
+}
+
+static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
{
bool need_vaux = false;
/* The GPIOs do something completely different on 57765. */
if (!tg3_flag(tp, IS_NIC) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
return;
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
- tp->pdev_peer != tp->pdev) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ tg3_frob_aux_power_5717(tp, include_wol ?
+ tg3_flag(tp, WOL_ENABLE) != 0 : 0);
+ return;
+ }
+
+ if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
struct net_device *dev_peer;
dev_peer = pci_get_drvdata(tp->pdev_peer);
@@ -2191,95 +2420,20 @@ static void tg3_frob_aux_power(struct tg3 *tp)
if (tg3_flag(tp_peer, INIT_COMPLETE))
return;
- if (tg3_flag(tp_peer, WOL_ENABLE) ||
+ if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
tg3_flag(tp_peer, ENABLE_ASF))
need_vaux = true;
}
}
- if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
+ if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
+ tg3_flag(tp, ENABLE_ASF))
need_vaux = true;
- if (need_vaux) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
- tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
- (GRC_LCLCTRL_GPIO_OE0 |
- GRC_LCLCTRL_GPIO_OE1 |
- GRC_LCLCTRL_GPIO_OE2 |
- GRC_LCLCTRL_GPIO_OUTPUT0 |
- GRC_LCLCTRL_GPIO_OUTPUT1),
- 100);
- } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
- /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
- u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
- GRC_LCLCTRL_GPIO_OE1 |
- GRC_LCLCTRL_GPIO_OE2 |
- GRC_LCLCTRL_GPIO_OUTPUT0 |
- GRC_LCLCTRL_GPIO_OUTPUT1 |
- tp->grc_local_ctrl;
- tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
-
- grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
- tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
-
- grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
- tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
- } else {
- u32 no_gpio2;
- u32 grc_local_ctrl = 0;
-
- /* Workaround to prevent overdrawing Amps. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5714) {
- grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
- tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
- grc_local_ctrl, 100);
- }
-
- /* On 5753 and variants, GPIO2 cannot be used. */
- no_gpio2 = tp->nic_sram_data_cfg &
- NIC_SRAM_DATA_CFG_NO_GPIO2;
-
- grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
- GRC_LCLCTRL_GPIO_OE1 |
- GRC_LCLCTRL_GPIO_OE2 |
- GRC_LCLCTRL_GPIO_OUTPUT1 |
- GRC_LCLCTRL_GPIO_OUTPUT2;
- if (no_gpio2) {
- grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
- GRC_LCLCTRL_GPIO_OUTPUT2);
- }
- tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
- grc_local_ctrl, 100);
-
- grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
-
- tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
- grc_local_ctrl, 100);
-
- if (!no_gpio2) {
- grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
- tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
- grc_local_ctrl, 100);
- }
- }
- } else {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
- tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
- (GRC_LCLCTRL_GPIO_OE1 |
- GRC_LCLCTRL_GPIO_OUTPUT1), 100);
-
- tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
- GRC_LCLCTRL_GPIO_OE1, 100);
-
- tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
- (GRC_LCLCTRL_GPIO_OE1 |
- GRC_LCLCTRL_GPIO_OUTPUT1), 100);
- }
- }
+ if (need_vaux)
+ tg3_pwrsrc_switch_to_vaux(tp);
+ else
+ tg3_pwrsrc_die_with_vmain(tp);
}
static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
@@ -2619,15 +2773,19 @@ static void tg3_enable_register_access(struct tg3 *tp)
static int tg3_power_up(struct tg3 *tp)
{
- tg3_enable_register_access(tp);
+ int err;
- pci_set_power_state(tp->pdev, PCI_D0);
+ tg3_enable_register_access(tp);
- /* Switch out of Vaux if it is a NIC */
- if (tg3_flag(tp, IS_NIC))
- tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
+ err = pci_set_power_state(tp->pdev, PCI_D0);
+ if (!err) {
+ /* Switch out of Vaux if it is a NIC */
+ tg3_pwrsrc_switch_to_vmain(tp);
+ } else {
+ netdev_err(tp->dev, "Transition to D0 failed\n");
+ }
- return 0;
+ return err;
}
static int tg3_power_down_prepare(struct tg3 *tp)
@@ -2642,11 +2800,11 @@ static int tg3_power_down_prepare(struct tg3 *tp)
u16 lnkctl;
pci_read_config_word(tp->pdev,
- tp->pcie_cap + PCI_EXP_LNKCTL,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
&lnkctl);
lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
pci_write_config_word(tp->pdev,
- tp->pcie_cap + PCI_EXP_LNKCTL,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
lnkctl);
}
@@ -2852,7 +3010,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
tg3_power_down_phy(tp, do_low_power);
- tg3_frob_aux_power(tp);
+ tg3_frob_aux_power(tp, true);
/* Workaround for unstable PLL clock */
if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
@@ -2957,16 +3115,15 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
new_adv = 0;
if (advertise & ADVERTISED_1000baseT_Half)
- new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
+ new_adv |= ADVERTISE_1000HALF;
if (advertise & ADVERTISED_1000baseT_Full)
- new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
+ new_adv |= ADVERTISE_1000FULL;
if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
- new_adv |= (MII_TG3_CTRL_AS_MASTER |
- MII_TG3_CTRL_ENABLE_AS_MASTER);
+ new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
- err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
+ err = tg3_writephy(tp, MII_CTRL1000, new_adv);
if (err)
goto done;
@@ -2980,20 +3137,6 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (!err) {
u32 err2;
- switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
- case ASIC_REV_5717:
- case ASIC_REV_57765:
- if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
- tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
- MII_TG3_DSP_CH34TP2_HIBW01);
- /* Fall through */
- case ASIC_REV_5719:
- val = MII_TG3_DSP_TAP26_ALNOKO |
- MII_TG3_DSP_TAP26_RMRXSTO |
- MII_TG3_DSP_TAP26_OPCSINPT;
- tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
- }
-
val = 0;
/* Advertise 100-BaseTX EEE ability */
if (advertise & ADVERTISED_100baseT_Full)
@@ -3002,6 +3145,25 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (advertise & ADVERTISED_1000baseT_Full)
val |= MDIO_AN_EEE_ADV_1000T;
err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
+ if (err)
+ val = 0;
+
+ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ case ASIC_REV_5717:
+ case ASIC_REV_57765:
+ case ASIC_REV_5719:
+ /* If we advertised any eee advertisements above... */
+ if (val)
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO |
+ MII_TG3_DSP_TAP26_OPCSINPT;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+ /* Fall through */
+ case ASIC_REV_5720:
+ if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+ tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+ MII_TG3_DSP_CH34TP2_HIBW01);
+ }
err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
if (!err)
@@ -3075,7 +3237,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
break;
case SPEED_1000:
- bmcr |= TG3_BMCR_SPEED1000;
+ bmcr |= BMCR_SPEED1000;
break;
}
@@ -3152,7 +3314,7 @@ static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
if (mask & ADVERTISED_1000baseT_Full)
all_mask |= ADVERTISE_1000FULL;
- if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
+ if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
return 0;
if ((tg3_ctrl & all_mask) != all_mask)
@@ -3449,7 +3611,7 @@ relink:
u16 oldlnkctl, newlnkctl;
pci_read_config_word(tp->pdev,
- tp->pcie_cap + PCI_EXP_LNKCTL,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
&oldlnkctl);
if (tp->link_config.active_speed == SPEED_100 ||
tp->link_config.active_speed == SPEED_10)
@@ -3458,7 +3620,7 @@ relink:
newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
if (newlnkctl != oldlnkctl)
pci_write_config_word(tp->pdev,
- tp->pcie_cap + PCI_EXP_LNKCTL,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
newlnkctl);
}
@@ -4663,7 +4825,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
txq = netdev_get_tx_queue(tp->dev, index);
while (sw_idx != hw_idx) {
- struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
+ struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
struct sk_buff *skb = ri->skb;
int i, tx_bug = 0;
@@ -4679,6 +4841,12 @@ static void tg3_tx(struct tg3_napi *tnapi)
ri->skb = NULL;
+ while (ri->fragmented) {
+ ri->fragmented = false;
+ sw_idx = NEXT_TX(sw_idx);
+ ri = &tnapi->tx_buffers[sw_idx];
+ }
+
sw_idx = NEXT_TX(sw_idx);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -4690,6 +4858,13 @@ static void tg3_tx(struct tg3_napi *tnapi)
dma_unmap_addr(ri, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
+
+ while (ri->fragmented) {
+ ri->fragmented = false;
+ sw_idx = NEXT_TX(sw_idx);
+ ri = &tnapi->tx_buffers[sw_idx];
+ }
+
sw_idx = NEXT_TX(sw_idx);
}
@@ -5740,40 +5915,100 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
#endif
}
-static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
- dma_addr_t mapping, int len, u32 flags,
- u32 mss_and_is_end)
+static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
+ dma_addr_t mapping, u32 len, u32 flags,
+ u32 mss, u32 vlan)
+{
+ txbd->addr_hi = ((u64) mapping >> 32);
+ txbd->addr_lo = ((u64) mapping & 0xffffffff);
+ txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
+ txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
+}
+
+static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
+ dma_addr_t map, u32 len, u32 flags,
+ u32 mss, u32 vlan)
{
- struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
- int is_end = (mss_and_is_end & 0x1);
- u32 mss = (mss_and_is_end >> 1);
- u32 vlan_tag = 0;
+ struct tg3 *tp = tnapi->tp;
+ bool hwbug = false;
+
+ if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
+ hwbug = 1;
+
+ if (tg3_4g_overflow_test(map, len))
+ hwbug = 1;
+
+ if (tg3_40bit_overflow_test(tp, map, len))
+ hwbug = 1;
+
+ if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+ u32 tmp_flag = flags & ~TXD_FLAG_END;
+ while (len > TG3_TX_BD_DMA_MAX) {
+ u32 frag_len = TG3_TX_BD_DMA_MAX;
+ len -= TG3_TX_BD_DMA_MAX;
+
+ if (len) {
+ tnapi->tx_buffers[*entry].fragmented = true;
+ /* Avoid the 8byte DMA problem */
+ if (len <= 8) {
+ len += TG3_TX_BD_DMA_MAX / 2;
+ frag_len = TG3_TX_BD_DMA_MAX / 2;
+ }
+ } else
+ tmp_flag = flags;
- if (is_end)
- flags |= TXD_FLAG_END;
- if (flags & TXD_FLAG_VLAN) {
- vlan_tag = flags >> 16;
- flags &= 0xffff;
+ if (*budget) {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ frag_len, tmp_flag, mss, vlan);
+ (*budget)--;
+ *entry = NEXT_TX(*entry);
+ } else {
+ hwbug = 1;
+ break;
+ }
+
+ map += frag_len;
+ }
+
+ if (len) {
+ if (*budget) {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ len, flags, mss, vlan);
+ (*budget)--;
+ *entry = NEXT_TX(*entry);
+ } else {
+ hwbug = 1;
+ }
+ }
+ } else {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ len, flags, mss, vlan);
+ *entry = NEXT_TX(*entry);
}
- vlan_tag |= (mss << TXD_MSS_SHIFT);
- txd->addr_hi = ((u64) mapping >> 32);
- txd->addr_lo = ((u64) mapping & 0xffffffff);
- txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
- txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
+ return hwbug;
}
-static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
- struct sk_buff *skb, int last)
+static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
{
int i;
- u32 entry = tnapi->tx_prod;
- struct ring_info *txb = &tnapi->tx_buffers[entry];
+ struct sk_buff *skb;
+ struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
+
+ skb = txb->skb;
+ txb->skb = NULL;
pci_unmap_single(tnapi->tp->pdev,
dma_unmap_addr(txb, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
+
+ while (txb->fragmented) {
+ txb->fragmented = false;
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+ }
+
for (i = 0; i < last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -5783,18 +6018,24 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
pci_unmap_page(tnapi->tp->pdev,
dma_unmap_addr(txb, mapping),
frag->size, PCI_DMA_TODEVICE);
+
+ while (txb->fragmented) {
+ txb->fragmented = false;
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+ }
}
}
/* Workaround 4GB and 40-bit hardware DMA bugs. */
static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
struct sk_buff *skb,
- u32 base_flags, u32 mss)
+ u32 *entry, u32 *budget,
+ u32 base_flags, u32 mss, u32 vlan)
{
struct tg3 *tp = tnapi->tp;
struct sk_buff *new_skb;
dma_addr_t new_addr = 0;
- u32 entry = tnapi->tx_prod;
int ret = 0;
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
@@ -5815,25 +6056,22 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
PCI_DMA_TODEVICE);
/* Make sure the mapping succeeded */
if (pci_dma_mapping_error(tp->pdev, new_addr)) {
- ret = -1;
dev_kfree_skb(new_skb);
-
- /* Make sure new skb does not cross any 4G boundaries.
- * Drop the packet if it does.
- */
- } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
- tg3_4g_overflow_test(new_addr, new_skb->len)) {
- pci_unmap_single(tp->pdev, new_addr, new_skb->len,
- PCI_DMA_TODEVICE);
ret = -1;
- dev_kfree_skb(new_skb);
} else {
- tnapi->tx_buffers[entry].skb = new_skb;
- dma_unmap_addr_set(&tnapi->tx_buffers[entry],
+ base_flags |= TXD_FLAG_END;
+
+ tnapi->tx_buffers[*entry].skb = new_skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
mapping, new_addr);
- tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
- base_flags, 1 | (mss << 1));
+ if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
+ new_skb->len, base_flags,
+ mss, vlan)) {
+ tg3_tx_skb_unmap(tnapi, *entry, 0);
+ dev_kfree_skb(new_skb);
+ ret = -1;
+ }
}
}
@@ -5891,7 +6129,8 @@ tg3_tso_bug_end:
static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
- u32 len, entry, base_flags, mss;
+ u32 len, entry, base_flags, mss, vlan = 0;
+ u32 budget;
int i = -1, would_hit_hwbug;
dma_addr_t mapping;
struct tg3_napi *tnapi;
@@ -5903,12 +6142,14 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (tg3_flag(tp, ENABLE_TSS))
tnapi++;
+ budget = tg3_tx_avail(tnapi);
+
/* We are running in BH disabled context with netif_tx_lock
* and TX reclaim runs via tp->napi.poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_tx_queue_stopped(txq)) {
netif_tx_stop_queue(txq);
@@ -5993,9 +6234,12 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (vlan_tx_tag_present(skb))
- base_flags |= (TXD_FLAG_VLAN |
- (vlan_tx_tag_get(skb) << 16));
+#ifdef BCM_KERNEL_SUPPORTS_8021Q
+ if (vlan_tx_tag_present(skb)) {
+ base_flags |= TXD_FLAG_VLAN;
+ vlan = vlan_tx_tag_get(skb);
+ }
+#endif
if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
!mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -6014,27 +6258,23 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
would_hit_hwbug = 0;
- if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
- would_hit_hwbug = 1;
-
- if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
- tg3_4g_overflow_test(mapping, len))
- would_hit_hwbug = 1;
-
- if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
- tg3_40bit_overflow_test(tp, mapping, len))
- would_hit_hwbug = 1;
-
if (tg3_flag(tp, 5701_DMA_BUG))
would_hit_hwbug = 1;
- tg3_set_txd(tnapi, entry, mapping, len, base_flags,
- (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
-
- entry = NEXT_TX(entry);
+ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
+ ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
+ mss, vlan))
+ would_hit_hwbug = 1;
/* Now loop through additional data fragments, and queue them. */
if (skb_shinfo(skb)->nr_frags > 0) {
+ u32 tmp_mss = mss;
+
+ if (!tg3_flag(tp, HW_TSO_1) &&
+ !tg3_flag(tp, HW_TSO_2) &&
+ !tg3_flag(tp, HW_TSO_3))
+ tmp_mss = 0;
+
last = skb_shinfo(skb)->nr_frags - 1;
for (i = 0; i <= last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -6051,43 +6291,29 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pci_dma_mapping_error(tp->pdev, mapping))
goto dma_error;
- if (tg3_flag(tp, SHORT_DMA_BUG) &&
- len <= 8)
- would_hit_hwbug = 1;
-
- if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
- tg3_4g_overflow_test(mapping, len))
- would_hit_hwbug = 1;
-
- if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
- tg3_40bit_overflow_test(tp, mapping, len))
+ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
+ len, base_flags |
+ ((i == last) ? TXD_FLAG_END : 0),
+ tmp_mss, vlan))
would_hit_hwbug = 1;
-
- if (tg3_flag(tp, HW_TSO_1) ||
- tg3_flag(tp, HW_TSO_2) ||
- tg3_flag(tp, HW_TSO_3))
- tg3_set_txd(tnapi, entry, mapping, len,
- base_flags, (i == last)|(mss << 1));
- else
- tg3_set_txd(tnapi, entry, mapping, len,
- base_flags, (i == last));
-
- entry = NEXT_TX(entry);
}
}
if (would_hit_hwbug) {
- tg3_skb_error_unmap(tnapi, skb, i);
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/
- if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
+ entry = tnapi->tx_prod;
+ budget = tg3_tx_avail(tnapi);
+ if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
+ base_flags, mss, vlan))
goto out_unlock;
-
- entry = NEXT_TX(tnapi->tx_prod);
}
+ skb_tx_timestamp(skb);
+
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
@@ -6111,7 +6337,7 @@ out_unlock:
return NETDEV_TX_OK;
dma_error:
- tg3_skb_error_unmap(tnapi, skb, i);
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
dev_kfree_skb(skb);
tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
return NETDEV_TX_OK;
@@ -6444,35 +6670,13 @@ static void tg3_free_rings(struct tg3 *tp)
if (!tnapi->tx_buffers)
continue;
- for (i = 0; i < TG3_TX_RING_SIZE; ) {
- struct ring_info *txp;
- struct sk_buff *skb;
- unsigned int k;
+ for (i = 0; i < TG3_TX_RING_SIZE; i++) {
+ struct sk_buff *skb = tnapi->tx_buffers[i].skb;
- txp = &tnapi->tx_buffers[i];
- skb = txp->skb;
-
- if (skb == NULL) {
- i++;
+ if (!skb)
continue;
- }
- pci_unmap_single(tp->pdev,
- dma_unmap_addr(txp, mapping),
- skb_headlen(skb),
- PCI_DMA_TODEVICE);
- txp->skb = NULL;
-
- i++;
-
- for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
- txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
- pci_unmap_page(tp->pdev,
- dma_unmap_addr(txp, mapping),
- skb_shinfo(skb)->frags[k].size,
- PCI_DMA_TODEVICE);
- i++;
- }
+ tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
dev_kfree_skb_any(skb);
}
@@ -6604,9 +6808,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
*/
if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
(i && tg3_flag(tp, ENABLE_TSS))) {
- tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
- TG3_TX_RING_SIZE,
- GFP_KERNEL);
+ tnapi->tx_buffers = kzalloc(
+ sizeof(struct tg3_tx_ring_info) *
+ TG3_TX_RING_SIZE, GFP_KERNEL);
if (!tnapi->tx_buffers)
goto err_out;
@@ -7193,7 +7397,7 @@ static int tg3_chip_reset(struct tg3 *tp)
udelay(120);
- if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
+ if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
u16 val16;
if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
@@ -7211,7 +7415,7 @@ static int tg3_chip_reset(struct tg3 *tp)
/* Clear the "no snoop" and "relaxed ordering" bits. */
pci_read_config_word(tp->pdev,
- tp->pcie_cap + PCI_EXP_DEVCTL,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
&val16);
val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
PCI_EXP_DEVCTL_NOSNOOP_EN);
@@ -7222,14 +7426,14 @@ static int tg3_chip_reset(struct tg3 *tp)
if (!tg3_flag(tp, CPMU_PRESENT))
val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
pci_write_config_word(tp->pdev,
- tp->pcie_cap + PCI_EXP_DEVCTL,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
val16);
pcie_set_readrq(tp->pdev, tp->pcie_readrq);
/* Clear error status */
pci_write_config_word(tp->pdev,
- tp->pcie_cap + PCI_EXP_DEVSTA,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
PCI_EXP_DEVSTA_CED |
PCI_EXP_DEVSTA_NFED |
PCI_EXP_DEVSTA_FED |
@@ -7267,16 +7471,11 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
- if (tg3_flag(tp, ENABLE_APE))
- tp->mac_mode = MAC_MODE_APE_TX_EN |
- MAC_MODE_APE_RX_EN |
- MAC_MODE_TDE_ENABLE;
-
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
- tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
+ tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
val = tp->mac_mode;
} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
- tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
val = tp->mac_mode;
} else
val = 0;
@@ -7751,6 +7950,9 @@ static void tg3_rings_reset(struct tg3 *tp)
/* Disable interrupts */
tw32_mailbox_f(tp->napi[0].int_mbox, 1);
+ tp->napi[0].chk_msi_cnt = 0;
+ tp->napi[0].last_rx_cons = 0;
+ tp->napi[0].last_tx_cons = 0;
/* Zero mailbox registers. */
if (tg3_flag(tp, SUPPORT_MSIX)) {
@@ -7761,6 +7963,9 @@ static void tg3_rings_reset(struct tg3 *tp)
tw32_mailbox(tp->napi[i].prodmbox, 0);
tw32_rx_mbox(tp->napi[i].consmbox, 0);
tw32_mailbox_f(tp->napi[i].int_mbox, 1);
+ tp->napi[0].chk_msi_cnt = 0;
+ tp->napi[i].last_rx_cons = 0;
+ tp->napi[i].last_tx_cons = 0;
}
if (!tg3_flag(tp, ENABLE_TSS))
tw32_mailbox(tp->napi[0].prodmbox, 0);
@@ -8201,7 +8406,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Program the jumbo buffer descriptor ring control
* blocks on those devices that have them.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
(tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
@@ -8408,12 +8613,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(10);
}
- if (tg3_flag(tp, ENABLE_APE))
- tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
- else
- tp->mac_mode = 0;
tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
- MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
+ MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
+ MAC_MODE_FHDE_ENABLE;
+ if (tg3_flag(tp, ENABLE_APE))
+ tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
if (!tg3_flag(tp, 5705_PLUS) &&
!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
@@ -8565,15 +8769,24 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(100);
if (tg3_flag(tp, ENABLE_RSS)) {
+ int i = 0;
u32 reg = MAC_RSS_INDIR_TBL_0;
- u8 *ent = (u8 *)&val;
- /* Setup the indirection table */
- for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
- int idx = i % sizeof(val);
+ if (tp->irq_cnt == 2) {
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
+ tw32(reg, 0x0);
+ reg += 4;
+ }
+ } else {
+ u32 val;
- ent[idx] = i % (tp->irq_cnt - 1);
- if (idx == sizeof(val) - 1) {
+ while (i < TG3_RSS_INDIR_TBL_SIZE) {
+ val = i % (tp->irq_cnt - 1);
+ i++;
+ for (; i % 8; i++) {
+ val <<= 4;
+ val |= (i % (tp->irq_cnt - 1));
+ }
tw32(reg, val);
reg += 4;
}
@@ -8816,6 +9029,30 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
}
+static void tg3_chk_missed_msi(struct tg3 *tp)
+{
+ u32 i;
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ if (tg3_has_work(tnapi)) {
+ if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
+ tnapi->last_tx_cons == tnapi->tx_cons) {
+ if (tnapi->chk_msi_cnt < 1) {
+ tnapi->chk_msi_cnt++;
+ return;
+ }
+ tw32_mailbox(tnapi->int_mbox,
+ tnapi->last_tag << 24);
+ }
+ }
+ tnapi->chk_msi_cnt = 0;
+ tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
+ tnapi->last_tx_cons = tnapi->tx_cons;
+ }
+}
+
static void tg3_timer(unsigned long __opaque)
{
struct tg3 *tp = (struct tg3 *) __opaque;
@@ -8825,6 +9062,10 @@ static void tg3_timer(unsigned long __opaque)
spin_lock(&tp->lock);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_chk_missed_msi(tp);
+
if (!tg3_flag(tp, TAGGED_STATUS)) {
/* All of this garbage is because when using non-tagged
* IRQ status the mailbox/status_block protocol the chip
@@ -8988,7 +9229,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
* Turn off MSI one shot mode. Otherwise this test has no
* observable way to know whether the interrupt was delivered.
*/
- if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
+ if (tg3_flag(tp, 57765_PLUS)) {
val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
tw32(MSGINT_MODE, val);
}
@@ -9016,6 +9257,10 @@ static int tg3_test_interrupt(struct tg3 *tp)
break;
}
+ if (tg3_flag(tp, 57765_PLUS) &&
+ tnapi->hw_status->status_tag != tnapi->last_tag)
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+
msleep(10);
}
@@ -9030,7 +9275,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
if (intr_ok) {
/* Reenable MSI one shot mode. */
- if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
+ if (tg3_flag(tp, 57765_PLUS)) {
val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
tw32(MSGINT_MODE, val);
}
@@ -9300,7 +9545,9 @@ static int tg3_open(struct net_device *dev)
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
} else {
- if (tg3_flag(tp, TAGGED_STATUS))
+ if (tg3_flag(tp, TAGGED_STATUS) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
tp->timer_offset = HZ;
else
tp->timer_offset = HZ / 10;
@@ -9376,6 +9623,8 @@ err_out2:
err_out1:
tg3_ints_fini(tp);
+ tg3_frob_aux_power(tp, false);
+ pci_set_power_state(tp->pdev, PCI_D3hot);
return err;
}
@@ -9902,6 +10151,18 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
cmd->advertising = tp->link_config.advertising;
+ if (tg3_flag(tp, PAUSE_AUTONEG)) {
+ if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
+ if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+ cmd->advertising |= ADVERTISED_Pause;
+ } else {
+ cmd->advertising |= ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+ }
+ } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+ cmd->advertising |= ADVERTISED_Asym_Pause;
+ }
+ }
if (netif_running(dev)) {
ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
cmd->duplex = tp->link_config.active_duplex;
@@ -10358,7 +10619,7 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
}
-static __be32 * tg3_vpd_readblock(struct tg3 *tp)
+static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
{
int i;
__be32 *buf;
@@ -10425,6 +10686,8 @@ static __be32 * tg3_vpd_readblock(struct tg3 *tp)
goto error;
}
+ *vpdlen = len;
+
return buf;
error:
@@ -10436,12 +10699,15 @@ error:
#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
+#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
+#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
+#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
#define NVRAM_SELFBOOT_HW_SIZE 0x20
#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
static int tg3_test_nvram(struct tg3 *tp)
{
- u32 csum, magic;
+ u32 csum, magic, len;
__be32 *buf;
int i, j, k, err = 0, size;
@@ -10466,8 +10732,17 @@ static int tg3_test_nvram(struct tg3 *tp)
case TG3_EEPROM_SB_REVISION_3:
size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
break;
+ case TG3_EEPROM_SB_REVISION_4:
+ size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_5:
+ size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_6:
+ size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
+ break;
default:
- return 0;
+ return -EIO;
}
} else
return 0;
@@ -10573,18 +10848,17 @@ static int tg3_test_nvram(struct tg3 *tp)
kfree(buf);
- buf = tg3_vpd_readblock(tp);
+ buf = tg3_vpd_readblock(tp, &len);
if (!buf)
return -ENOMEM;
- i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
- PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
if (i > 0) {
j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
if (j < 0)
goto out;
- if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
+ if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
goto out;
i += PCI_VPD_LRDT_TAG_SIZE;
@@ -10976,6 +11250,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
{
u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
+ u32 budget;
struct sk_buff *skb, *rx_skb;
u8 *tx_data;
dma_addr_t map;
@@ -11135,6 +11410,10 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
return -EIO;
}
+ val = tnapi->tx_prod;
+ tnapi->tx_buffers[val].skb = skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
+
tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
rnapi->coal_now);
@@ -11142,8 +11421,13 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
- tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
- base_flags, (mss << 1) | 1);
+ budget = tg3_tx_avail(tnapi);
+ if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
+ base_flags | TXD_FLAG_END, mss, 0)) {
+ tnapi->tx_buffers[val].skb = NULL;
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
tnapi->tx_prod++;
@@ -11166,7 +11450,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
break;
}
- pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
dev_kfree_skb(skb);
if (tx_idx != tnapi->tx_prod)
@@ -11340,8 +11624,12 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
{
struct tg3 *tp = netdev_priv(dev);
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- tg3_power_up(tp);
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
+ tg3_power_up(tp)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
+ return;
+ }
memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
@@ -12585,29 +12873,6 @@ static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
{
u32 val;
- u16 pmcsr;
-
- /* On some early chips the SRAM cannot be accessed in D3hot state,
- * so need make sure we're in D0.
- */
- pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
- msleep(1);
-
- /* Make sure register accesses (indirect or otherwise)
- * will function correctly.
- */
- pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
- tp->misc_host_ctrl);
-
- /* The memory arbiter has to be enabled in order for SRAM accesses
- * to succeed. Normally on powerup the tg3 chip firmware will make
- * sure it is enabled, but other entities such as system netboot
- * code might disable it.
- */
- val = tr32(MEMARB_MODE);
- tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
tp->phy_id = TG3_PHY_ID_INVALID;
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
@@ -12947,7 +13212,9 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
}
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
- ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
+ (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
@@ -12999,14 +13266,14 @@ static void __devinit tg3_read_vpd(struct tg3 *tp)
{
u8 *vpd_data;
unsigned int block_end, rosize, len;
+ u32 vpdlen;
int j, i = 0;
- vpd_data = (u8 *)tg3_vpd_readblock(tp);
+ vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
if (!vpd_data)
goto out_no_vpd;
- i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
- PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto out_not_found;
@@ -13014,7 +13281,7 @@ static void __devinit tg3_read_vpd(struct tg3 *tp)
block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
i += PCI_VPD_LRDT_TAG_SIZE;
- if (block_end > TG3_NVM_VPD_LEN)
+ if (block_end > vpdlen)
goto out_not_found;
j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
@@ -13039,7 +13306,7 @@ static void __devinit tg3_read_vpd(struct tg3 *tp)
goto partno;
memcpy(tp->fw_ver, &vpd_data[j], len);
- strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
+ strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
}
partno:
@@ -13052,7 +13319,7 @@ partno:
i += PCI_VPD_INFO_FLD_HDR_SIZE;
if (len > TG3_BPN_SIZE ||
- (len + i) > TG3_NVM_VPD_LEN)
+ (len + i) > vpdlen)
goto out_not_found;
memcpy(tp->board_part_number, &vpd_data[i], len);
@@ -13353,10 +13620,15 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
else
return;
- if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
+ if (vpd_vers)
goto done;
- tg3_read_mgmtfw_ver(tp);
+ if (tg3_flag(tp, ENABLE_APE)) {
+ if (tg3_flag(tp, ENABLE_ASF))
+ tg3_read_dash_ver(tp);
+ } else if (tg3_flag(tp, ENABLE_ASF)) {
+ tg3_read_mgmtfw_ver(tp);
+ }
done:
tp->fw_ver[TG3_VER_SIZE - 1] = 0;
@@ -13400,14 +13672,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
pci_cmd &= ~PCI_COMMAND_INVALIDATE;
pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
- /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
- * has the register indirect write enable bit set before
- * we try to access any of the MMIO registers. It is also
- * critical that the PCI-X hw workaround situation is decided
- * before that as well.
+ /* Important! -- Make sure register accesses are byteswapped
+ * correctly. Also, for those chips that require it, make
+ * sure that indirect register accesses are enabled before
+ * the first operation.
*/
pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
&misc_ctrl_reg);
+ tp->misc_host_ctrl |= (misc_ctrl_reg &
+ MISC_HOST_CTRL_CHIPREV);
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
tp->pci_chip_rev_id = (misc_ctrl_reg >>
MISC_HOST_CTRL_CHIPREV_SHIFT);
@@ -13563,16 +13838,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
} while (bridge);
}
- /* Initialize misc host control in PCI block. */
- tp->misc_host_ctrl |= (misc_ctrl_reg &
- MISC_HOST_CTRL_CHIPREV);
- pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
- tp->misc_host_ctrl);
-
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
tp->pdev_peer = tg3_find_peer(tp);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
@@ -13606,7 +13873,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, 5705_PLUS);
/* Determine TSO capabilities */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
; /* Do nothing. HW bug. */
else if (tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, HW_TSO_3);
@@ -13666,21 +13933,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
}
- /* All chips can get confused if TX buffers
- * straddle the 4GB address boundary.
- */
- tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
-
if (tg3_flag(tp, 5755_PLUS))
tg3_flag_set(tp, SHORT_DMA_BUG);
- else
- tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tg3_flag_set(tp, 4K_FIFO_LIMIT);
if (tg3_flag(tp, 5717_PLUS))
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
+ tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
tg3_flag_set(tp, USE_JUMBO_BDFLAG);
if (!tg3_flag(tp, 5705_PLUS) ||
@@ -13691,8 +13954,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
&pci_state_reg);
- tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
- if (tp->pcie_cap != 0) {
+ if (pci_is_pcie(tp->pdev)) {
u16 lnkctl;
tg3_flag_set(tp, PCI_EXPRESS);
@@ -13705,7 +13967,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
pcie_set_readrq(tp->pdev, tp->pcie_readrq);
pci_read_config_word(tp->pdev,
- tp->pcie_cap + PCI_EXP_LNKCTL,
+ pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
&lnkctl);
if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
@@ -13722,6 +13984,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, L1PLLPD_EN);
}
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ /* BCM5785 devices are effectively PCIe devices, and should
+ * follow PCIe codepaths, but do not have a PCIe capabilities
+ * section.
+ */
tg3_flag_set(tp, PCI_EXPRESS);
} else if (!tg3_flag(tp, 5705_PLUS) ||
tg3_flag(tp, 5780_CLASS)) {
@@ -13757,6 +14023,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pci_lat_timer);
}
+ /* Important! -- It is critical that the PCI-X hw workaround
+ * situation is decided before the first MMIO register access.
+ */
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
/* 5700 BX chips need to have their TX producer index
* mailboxes written twice to workaround a bug.
@@ -13863,6 +14132,22 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
tg3_flag_set(tp, SRAM_USE_CONFIG);
+ /* The memory arbiter has to be enabled in order for SRAM accesses
+ * to succeed. Normally on powerup the tg3 chip firmware will make
+ * sure it is enabled, but other entities such as system netboot
+ * code might disable it.
+ */
+ val = tr32(MEMARB_MODE);
+ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
+ if (tg3_flag(tp, PCIX_MODE)) {
+ pci_read_config_dword(tp->pdev,
+ tp->pcix_cap + PCI_X_STATUS, &val);
+ tp->pci_fn = val & 0x7;
+ } else {
+ tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
+ }
+
/* Get eeprom hw config before calling tg3_set_power_state().
* In particular, the TG3_FLAG_IS_NIC flag must be
* determined before calling tg3_set_power_state() so that
@@ -13882,6 +14167,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
PCISTATE_ALLOW_APE_PSPACE_WR;
pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
pci_state_reg);
+
+ tg3_ape_lock_init(tp);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -13891,8 +14178,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, CPMU_PRESENT);
- /* Set up tp->grc_local_ctrl before calling tg3_power_up().
- * GPIO1 driven high will bring 5700's external PHY out of reset.
+ /* Set up tp->grc_local_ctrl before calling
+ * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
+ * will bring 5700's external PHY out of reset.
* It is also used as eeprom write protect on LOMs.
*/
tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
@@ -13921,12 +14209,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GRC_LCLCTRL_GPIO_OUTPUT0;
}
- /* Force the chip into D0. */
- err = tg3_power_up(tp);
- if (err) {
- dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
- return err;
- }
+ /* Switch out of Vaux if it is a NIC */
+ tg3_pwrsrc_switch_to_vmain(tp);
/* Derive initial jumbo mode from MTU assigned in
* ether_setup() via the alloc_etherdev() call
@@ -14229,9 +14513,9 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
else
tg3_nvram_unlock(tp);
} else if (tg3_flag(tp, 5717_PLUS)) {
- if (PCI_FUNC(tp->pdev->devfn) & 1)
+ if (tp->pci_fn & 1)
mac_offset = 0xcc;
- if (PCI_FUNC(tp->pdev->devfn) > 1)
+ if (tp->pci_fn > 1)
mac_offset += 0x18c;
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
mac_offset = 0x10;
@@ -14941,11 +15225,17 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_free_res;
}
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err) {
+ dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
+ goto err_out_free_res;
+ }
+
dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
if (!dev) {
dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
- goto err_out_free_res;
+ goto err_out_power_down;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -14994,6 +15284,24 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_free_dev;
}
+ if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+ tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
+ tg3_flag_set(tp, ENABLE_APE);
+ tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
+ if (!tp->aperegs) {
+ dev_err(&pdev->dev,
+ "Cannot map APE registers, aborting\n");
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+ }
+
tp->rx_pending = TG3_DEF_RX_RING_PENDING;
tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
@@ -15006,7 +15314,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev,
"Problem fetching invariants of chip, aborting\n");
- goto err_out_iounmap;
+ goto err_out_apeunmap;
}
/* The EPB bridge inside 5714, 5715, and 5780 and any
@@ -15035,7 +15343,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (err < 0) {
dev_err(&pdev->dev, "Unable to obtain 64 bit "
"DMA for consistent allocations\n");
- goto err_out_iounmap;
+ goto err_out_apeunmap;
}
}
}
@@ -15044,7 +15352,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev,
"No usable DMA configuration, aborting\n");
- goto err_out_iounmap;
+ goto err_out_apeunmap;
}
}
@@ -15109,22 +15417,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev,
"Could not obtain valid ethernet address, aborting\n");
- goto err_out_iounmap;
- }
-
- if (tg3_flag(tp, ENABLE_APE)) {
- tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
- if (!tp->aperegs) {
- dev_err(&pdev->dev,
- "Cannot map APE registers, aborting\n");
- err = -ENOMEM;
- goto err_out_iounmap;
- }
-
- tg3_ape_lock_init(tp);
-
- if (tg3_flag(tp, ENABLE_ASF))
- tg3_read_dash_ver(tp);
+ goto err_out_apeunmap;
}
/*
@@ -15192,6 +15485,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
+ if (tg3_flag(tp, 5717_PLUS)) {
+ /* Resume a low-power mode */
+ tg3_frob_aux_power(tp, false);
+ }
+
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting\n");
@@ -15257,6 +15555,9 @@ err_out_iounmap:
err_out_free_dev:
free_netdev(dev);
+err_out_power_down:
+ pci_set_power_state(pdev, PCI_D3hot);
+
err_out_free_res:
pci_release_regions(pdev);
@@ -15481,10 +15782,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
}
err = tg3_power_up(tp);
- if (err) {
- netdev_err(netdev, "Failed to restore register access.\n");
+ if (err)
goto done;
- }
rc = PCI_ERS_RESULT_RECOVERED;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 5b3d2f34da7..2ea456dd588 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1065,6 +1065,8 @@
#define RCVLSC_STATUS_ERROR_ATTN 0x00000004
/* 0x3408 --> 0x3600 unused */
+#define TG3_CPMU_DRV_STATUS 0x0000344c
+
/* CPMU registers */
#define TG3_CPMU_CTRL 0x00003600
#define CPMU_CTRL_LINK_IDLE_MODE 0x00000200
@@ -1118,10 +1120,10 @@
#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000
#define TG3_CPMU_EEE_DBTMR1 0x000036b4
#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
-#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
+#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000007ff
#define TG3_CPMU_EEE_DBTMR2 0x000036b8
#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
-#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
+#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000007ff
#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004
@@ -2152,14 +2154,6 @@
/*** Tigon3 specific PHY MII registers. ***/
-#define TG3_BMCR_SPEED1000 0x0040
-
-#define MII_TG3_CTRL 0x09 /* 1000-baseT control register */
-#define MII_TG3_CTRL_ADV_1000_HALF 0x0100
-#define MII_TG3_CTRL_ADV_1000_FULL 0x0200
-#define MII_TG3_CTRL_AS_MASTER 0x0800
-#define MII_TG3_CTRL_ENABLE_AS_MASTER 0x1000
-
#define MII_TG3_MMD_CTRL 0x0d /* MMD Access Control register */
#define MII_TG3_MMD_CTRL_DATA_NOINC 0x4000
#define MII_TG3_MMD_ADDRESS 0x0e /* MMD Address Data register */
@@ -2186,7 +2180,7 @@
#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004
#define MII_TG3_DSP_AADJ1CH0 0x001f
#define MII_TG3_DSP_CH34TP2 0x4022
-#define MII_TG3_DSP_CH34TP2_HIBW01 0x017b
+#define MII_TG3_DSP_CH34TP2_HIBW01 0x01ff
#define MII_TG3_DSP_AADJ1CH3 0x601f
#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
#define MII_TG3_DSP_EXP1_INT_STAT 0x0f01
@@ -2285,6 +2279,8 @@
/* APE registers. Accessible through BAR1 */
+#define TG3_APE_GPIO_MSG 0x0008
+#define TG3_APE_GPIO_MSG_SHIFT 4
#define TG3_APE_EVENT 0x000c
#define APE_EVENT_1 0x00000001
#define TG3_APE_LOCK_REQ 0x002c
@@ -2347,6 +2343,7 @@
/* APE convenience enumerations. */
#define TG3_APE_LOCK_GRC 1
#define TG3_APE_LOCK_MEM 4
+#define TG3_APE_LOCK_GPIO 7
#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
@@ -2655,6 +2652,12 @@ struct ring_info {
DEFINE_DMA_UNMAP_ADDR(mapping);
};
+struct tg3_tx_ring_info {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ bool fragmented;
+};
+
struct tg3_link_config {
/* Describes what we're trying to get. */
u32 advertising;
@@ -2800,6 +2803,7 @@ struct tg3_napi {
struct tg3 *tp;
struct tg3_hw_status *hw_status;
+ u32 chk_msi_cnt;
u32 last_tag;
u32 last_irq_tag;
u32 int_mbox;
@@ -2807,6 +2811,7 @@ struct tg3_napi {
u32 consmbox ____cacheline_aligned;
u32 rx_rcb_ptr;
+ u32 last_rx_cons;
u16 *rx_rcb_prod_idx;
struct tg3_rx_prodring_set prodring;
struct tg3_rx_buffer_desc *rx_rcb;
@@ -2814,9 +2819,10 @@ struct tg3_napi {
u32 tx_prod ____cacheline_aligned;
u32 tx_cons;
u32 tx_pending;
+ u32 last_tx_cons;
u32 prodmbox;
struct tg3_tx_buffer_desc *tx_ring;
- struct ring_info *tx_buffers;
+ struct tg3_tx_ring_info *tx_buffers;
dma_addr_t status_mapping;
dma_addr_t rx_rcb_mapping;
@@ -2862,7 +2868,7 @@ enum TG3_FLAGS {
TG3_FLAG_IS_5788,
TG3_FLAG_MAX_RXPEND_64,
TG3_FLAG_TSO_CAPABLE,
- TG3_FLAG_PCI_EXPRESS,
+ TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
TG3_FLAG_ASF_NEW_HANDSHAKE,
TG3_FLAG_HW_AUTONEG,
TG3_FLAG_IS_NIC,
@@ -2893,14 +2899,13 @@ enum TG3_FLAGS {
TG3_FLAG_NO_NVRAM,
TG3_FLAG_ENABLE_RSS,
TG3_FLAG_ENABLE_TSS,
- TG3_FLAG_4G_DMA_BNDRY_BUG,
- TG3_FLAG_40BIT_DMA_LIMIT_BUG,
TG3_FLAG_SHORT_DMA_BUG,
TG3_FLAG_USE_JUMBO_BDFLAG,
TG3_FLAG_L1PLLPD_EN,
TG3_FLAG_57765_PLUS,
TG3_FLAG_APE_HAS_NCSI,
TG3_FLAG_5717_PLUS,
+ TG3_FLAG_4K_FIFO_LIMIT,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
@@ -3027,12 +3032,10 @@ struct tg3 {
u8 pci_cacheline_sz;
u8 pci_lat_timer;
+ int pci_fn;
int pm_cap;
int msi_cap;
- union {
int pcix_cap;
- int pcie_cap;
- };
int pcie_readrq;
struct mii_bus *mdio_bus;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index ace6404e2fa..145871b3130 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -29,8 +29,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/eisa.h>
#include <linux/pci.h>
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index ff32befd844..b6162fe2348 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -304,7 +304,7 @@ static int __devinit xl_probe(struct pci_dev *pdev,
if ((i = pci_request_regions(pdev,"3c359"))) {
return i ;
- } ;
+ }
/*
* Allowing init_trdev to allocate the private data will align
@@ -1773,7 +1773,9 @@ static void xl_wait_misr_flags(struct net_device *dev)
if (readb(xl_mmio + MMIO_MACDATA) != 0) { /* Misr not clear */
for (i=0; i<6; i++) {
writel(MEM_BYTE_READ | 0xDFFE0 | i, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
- while (readb(xl_mmio + MMIO_MACDATA) != 0 ) {} ; /* Empty Loop */
+ while (readb(xl_mmio + MMIO_MACDATA) != 0) {
+ ; /* Empty Loop */
+ }
}
}
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 4786497de03..e257a00fe14 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -123,6 +123,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */
/* some 95 OS send many non UI frame; this allow removing the warning */
#define TR_FILTERNONUI 1
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/ip.h>
@@ -177,7 +178,7 @@ static char __devinit *adapter_def(char type)
case 0xD: return "16/4 Adapter/A (short) | 16/4 ISA-16 Adapter";
case 0xC: return "Auto 16/4 Adapter";
default: return "adapter (unknown type)";
- };
+ }
};
#define TRC_INIT 0x01 /* Trace initialization & PROBEs */
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 2bedc0ace81..6153cfd696b 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -418,7 +418,7 @@ static irqreturn_t madgemc_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- dev = (struct net_device *)dev_id;
+ dev = dev_id;
/* Make sure its really us. -- the Madge way */
pending = inb(dev->base_addr + MC_CONTROL_REG0);
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 5c633a32eae..64cb9ac19ed 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index e2f69235118..ce90efc6ba3 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -38,6 +38,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index efaa1d69b72..959b41021a6 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1868,14 +1868,13 @@ de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
i = DE4X5_PKT_STAT_SZ;
}
}
- if (buf[0] & 0x01) { /* Multicast/Broadcast */
- if ((*(s32 *)&buf[0] == -1) && (*(s16 *)&buf[4] == -1)) {
+ if (is_multicast_ether_addr(buf)) {
+ if (is_broadcast_ether_addr(buf)) {
lp->pktStats.broadcast++;
} else {
lp->pktStats.multicast++;
}
- } else if ((*(s32 *)&buf[0] == *(s32 *)&dev->dev_addr[0]) &&
- (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
+ } else if (compare_ether_addr(buf, dev->dev_addr) == 0) {
lp->pktStats.unicast++;
}
@@ -1964,9 +1963,7 @@ SetMulticastFilter(struct net_device *dev)
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
- if ((*addrs & 0x01) == 1) { /* multicast address? */
- crc = ether_crc_le(ETH_ALEN, addrs);
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
@@ -1977,7 +1974,6 @@ SetMulticastFilter(struct net_device *dev)
byte -= 1;
}
lp->setup_frame[byte] |= bit;
- }
}
} else { /* Perfect filtering */
netdev_for_each_mc_addr(ha, dev) {
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 46851273196..9a21ca3873f 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -879,7 +879,6 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
txptr = db->tx_remove_ptr;
while(db->tx_packet_cnt) {
tdes0 = le32_to_cpu(txptr->tdes0);
- pr_debug("tdes0=%x\n", tdes0);
if (tdes0 & 0x80000000)
break;
@@ -889,7 +888,6 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
/* Transmit statistic counter */
if ( tdes0 != 0x7fffffff ) {
- pr_debug("tdes0=%x\n", tdes0);
dev->stats.collisions += (tdes0 >> 3) & 0xf;
dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
if (tdes0 & TDES0_ERR_MASK) {
@@ -986,7 +984,6 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
/* error summary bit check */
if (rdes0 & 0x8000) {
/* This is a error packet */
- pr_debug("rdes0: %x\n", rdes0);
dev->stats.rx_errors++;
if (rdes0 & 1)
dev->stats.rx_fifo_errors++;
@@ -1638,7 +1635,6 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
else /* DM9102/DM9102A */
phy_mode = phy_read(db->ioaddr,
db->phy_addr, 17, db->chip_id) & 0xf000;
- pr_debug("Phy_mode %x\n", phy_mode);
switch (phy_mode) {
case 0x1000: db->op_mode = DMFE_10MHF; break;
case 0x2000: db->op_mode = DMFE_10MFD; break;
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index aa4d9dad039..52d898bdbeb 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -13,6 +13,7 @@
Please submit bugs to http://bugzilla.kernel.org/ .
*/
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include "tulip.h"
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 82f87647207..1246998a677 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include "tulip.h"
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/mii.h>
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5235f48be1b..71f3d1a35b7 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -528,6 +528,7 @@ static void tun_net_init(struct net_device *dev)
dev->netdev_ops = &tap_netdev_ops;
/* Ethernet TAP Device */
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
random_ether_addr(dev->dev_addr);
@@ -572,9 +573,9 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
/* prepad is the amount to reserve at front. len is length after that.
* linear is a hint as to how much to copy (usually headers). */
-static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
- size_t prepad, size_t len,
- size_t linear, int noblock)
+static struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
+ size_t prepad, size_t len,
+ size_t linear, int noblock)
{
struct sock *sk = tun->socket.sk;
struct sk_buff *skb;
@@ -600,13 +601,13 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
}
/* Get packet from user space buffer */
-static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
- const struct iovec *iv, size_t count,
- int noblock)
+static ssize_t tun_get_user(struct tun_struct *tun,
+ const struct iovec *iv, size_t count,
+ int noblock)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
- size_t len = count, align = 0;
+ size_t len = count, align = NET_SKB_PAD;
struct virtio_net_hdr gso = { 0 };
int offset = 0;
@@ -636,7 +637,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
- align = NET_IP_ALIGN;
+ align += NET_IP_ALIGN;
if (unlikely(len < ETH_HLEN ||
(gso.hdr_len && gso.hdr_len < ETH_HLEN)))
return -EINVAL;
@@ -688,7 +689,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
case TUN_TAP_DEV:
skb->protocol = eth_type_trans(skb, tun->dev);
break;
- };
+ }
if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
pr_debug("GSO!\n");
@@ -751,9 +752,9 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
}
/* Put packet to the user space buffer */
-static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
- struct sk_buff *skb,
- const struct iovec *iv, int len)
+static ssize_t tun_put_user(struct tun_struct *tun,
+ struct sk_buff *skb,
+ const struct iovec *iv, int len)
{
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
@@ -810,6 +811,8 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
gso.csum_start = skb_checksum_start_offset(skb);
gso.csum_offset = skb->csum_offset;
+ } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
@@ -839,7 +842,8 @@ static ssize_t tun_do_read(struct tun_struct *tun,
tun_debug(KERN_INFO, tun, "tun_chr_read\n");
- add_wait_queue(&tun->wq.wait, &wait);
+ if (unlikely(!noblock))
+ add_wait_queue(&tun->wq.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -870,7 +874,8 @@ static ssize_t tun_do_read(struct tun_struct *tun,
}
current->state = TASK_RUNNING;
- remove_wait_queue(&tun->wq.wait, &wait);
+ if (unlikely(!noblock))
+ remove_wait_queue(&tun->wq.wait, &wait);
return ret;
}
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 3de4283344e..1d5091a1e49 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -2367,7 +2367,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->irq = pdev->irq;
tp = netdev_priv(dev);
- tp->shared = (struct typhoon_shared *) shared;
+ tp->shared = shared;
tp->shared_dma = shared_dma;
tp->pdev = pdev;
tp->tx_pdev = pdev;
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index ef041057d9d..d3465ab50e5 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2030,11 +2030,6 @@ static void ucc_geth_set_multi(struct net_device *dev)
out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
netdev_for_each_mc_addr(ha, dev) {
- /* Only support group multicast for now.
- */
- if (!is_multicast_ether_addr(ha->addr))
- continue;
-
/* Ask CPM to run CRC and set bit in
* filter mask.
*/
@@ -3165,6 +3160,8 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
ugeth->txBd[txQ] = bd;
+ skb_tx_timestamp(skb);
+
if (ugeth->p_scheduler) {
ugeth->cpucount[txQ]++;
/* Indicate to QE that there are more Tx bds ready for
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 6998aa6b7bb..c5c4b4def7f 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -314,12 +314,11 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, 4);
while (skb->len > 0) {
- if ((short)(header & 0x0000ffff) !=
- ~((short)((header & 0xffff0000) >> 16))) {
+ if ((header & 0x07ff) != ((~header >> 16) & 0x07ff))
netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
- }
+
/* get the packet length */
- size = (u16) (header & 0x0000ffff);
+ size = (u16) (header & 0x000007ff);
if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
u8 alignment = (unsigned long)skb->data & 0x3;
@@ -1502,6 +1501,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x04f1, 0x3008),
.driver_info = (unsigned long) &ax8817x_info,
}, {
+ // ASIX AX88772B 10/100
+ USB_DEVICE (0x0b95, 0x772b),
+ .driver_info = (unsigned long) &ax88772_info,
+}, {
// ASIX AX88772 10/100
USB_DEVICE (0x0b95, 0x7720),
.driver_info = (unsigned long) &ax88772_info,
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index f967913e11b..a60d0069cc4 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -21,6 +21,7 @@
*/
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/usb.h>
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f33ca6aa29e..f06fb78383a 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -47,14 +47,13 @@
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/usb.h>
-#include <linux/version.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
-#define DRIVER_VERSION "01-June-2011"
+#define DRIVER_VERSION "04-Aug-2011"
/* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -164,35 +163,8 @@ cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
-static int
-cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
- void *data, u16 flags, u16 *actlen, u16 timeout)
-{
- int err;
-
- err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
- usb_rcvctrlpipe(ctx->udev, 0) :
- usb_sndctrlpipe(ctx->udev, 0),
- req->bNotificationType, req->bmRequestType,
- req->wValue,
- req->wIndex, data,
- req->wLength, timeout);
-
- if (err < 0) {
- if (actlen)
- *actlen = 0;
- return err;
- }
-
- if (actlen)
- *actlen = err;
-
- return 0;
-}
-
static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
{
- struct usb_cdc_notification req;
u32 val;
u8 flags;
u8 iface_no;
@@ -201,14 +173,14 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm));
-
- err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000);
- if (err) {
+ err = usb_control_msg(ctx->udev,
+ usb_rcvctrlpipe(ctx->udev, 0),
+ USB_CDC_GET_NTB_PARAMETERS,
+ USB_TYPE_CLASS | USB_DIR_IN
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, &ctx->ncm_parm,
+ sizeof(ctx->ncm_parm), 10000);
+ if (err < 0) {
pr_debug("failed GET_NTB_PARAMETERS\n");
return 1;
}
@@ -254,31 +226,43 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* inform device about NTB input size changes */
if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
- struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
-
- req.wLength = 8;
- ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
- ndp_in_sz.wNtbInMaxDatagrams =
- cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
- ndp_in_sz.wReserved = 0;
- err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
- 1000);
- } else {
- __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+ struct usb_cdc_ncm_ndp_input_size *ndp_in_sz;
- req.wLength = 4;
- err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
- NULL, 1000);
- }
+ ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL);
+ if (!ndp_in_sz) {
+ err = -ENOMEM;
+ goto size_err;
+ }
- if (err)
+ err = usb_control_msg(ctx->udev,
+ usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_NTB_INPUT_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, ndp_in_sz, 8, 1000);
+ kfree(ndp_in_sz);
+ } else {
+ __le32 *dwNtbInMaxSize;
+ dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize),
+ GFP_KERNEL);
+ if (!dwNtbInMaxSize) {
+ err = -ENOMEM;
+ goto size_err;
+ }
+ *dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+
+ err = usb_control_msg(ctx->udev,
+ usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_NTB_INPUT_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, dwNtbInMaxSize, 4, 1000);
+ kfree(dwNtbInMaxSize);
+ }
+size_err:
+ if (err < 0)
pr_debug("Setting NTB Input Size failed\n");
}
@@ -333,29 +317,24 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* set CRC Mode */
if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_CRC_MODE;
- req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = 0;
-
- err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
- if (err)
+ err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_CRC_MODE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_CRC_NOT_APPENDED,
+ iface_no, NULL, 0, 1000);
+ if (err < 0)
pr_debug("Setting CRC mode off failed\n");
}
/* set NTB format, if both formats are supported */
if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
- req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = 0;
-
- err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
- if (err)
+ err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS
+ | USB_DIR_OUT | USB_RECIP_INTERFACE,
+ USB_CDC_NCM_NTB16_FORMAT,
+ iface_no, NULL, 0, 1000);
+ if (err < 0)
pr_debug("Setting NTB format to 16-bit failed\n");
}
@@ -363,23 +342,29 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
/* set Max Datagram Size (MTU) */
if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
- __le16 max_datagram_size;
+ __le16 *max_datagram_size;
u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = cpu_to_le16(2);
+ max_datagram_size = kzalloc(sizeof(*max_datagram_size),
+ GFP_KERNEL);
+ if (!max_datagram_size) {
+ err = -ENOMEM;
+ goto max_dgram_err;
+ }
- err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
- 1000);
- if (err) {
+ err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0),
+ USB_CDC_GET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_IN
+ | USB_RECIP_INTERFACE,
+ 0, iface_no, max_datagram_size,
+ 2, 1000);
+ if (err < 0) {
pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
CDC_NCM_MIN_DATAGRAM_SIZE);
+ kfree(max_datagram_size);
} else {
- ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+ ctx->max_datagram_size =
+ le16_to_cpu(*max_datagram_size);
/* Check Eth descriptor value */
if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
if (ctx->max_datagram_size > eth_max_sz)
@@ -396,17 +381,17 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
CDC_NCM_MIN_DATAGRAM_SIZE;
/* if value changed, update device */
- req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
- USB_RECIP_INTERFACE;
- req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
- req.wValue = 0;
- req.wIndex = cpu_to_le16(iface_no);
- req.wLength = 2;
- max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
-
- err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
- 0, NULL, 1000);
- if (err)
+ err = usb_control_msg(ctx->udev,
+ usb_sndctrlpipe(ctx->udev, 0),
+ USB_CDC_SET_MAX_DATAGRAM_SIZE,
+ USB_TYPE_CLASS | USB_DIR_OUT
+ | USB_RECIP_INTERFACE,
+ 0,
+ iface_no, max_datagram_size,
+ 2, 1000);
+ kfree(max_datagram_size);
+max_dgram_err:
+ if (err < 0)
pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
}
@@ -672,7 +657,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
u32 rem;
u32 offset;
u32 last_offset;
- u16 n = 0;
+ u16 n = 0, index;
u8 ready2send = 0;
/* if there is a remaining skb, it gets priority */
@@ -860,8 +845,8 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
- ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
- ctx->tx_ndp_modulus);
+ index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus);
+ ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index);
memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
ctx->tx_seq++;
@@ -874,12 +859,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
- memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
+ memcpy(((u8 *)skb_out->data) + index,
&(ctx->tx_ncm.ndp16),
sizeof(ctx->tx_ncm.ndp16));
- memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
- sizeof(ctx->tx_ncm.ndp16),
+ memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16),
&(ctx->tx_ncm.dpe16),
(ctx->tx_curr_frame_num + 1) *
sizeof(struct usb_cdc_ncm_dpe16));
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 387ca43f26f..304fe78ff60 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2421,10 +2421,8 @@ static void hso_free_net_device(struct hso_device *hso_dev)
remove_net_device(hso_net->parent);
- if (hso_net->net) {
+ if (hso_net->net)
unregister_netdev(hso_net->net);
- free_netdev(hso_net->net);
- }
/* start freeing */
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
@@ -2436,6 +2434,9 @@ static void hso_free_net_device(struct hso_device *hso_dev)
kfree(hso_net->mux_bulk_tx_buf);
hso_net->mux_bulk_tx_buf = NULL;
+ if (hso_net->net)
+ free_netdev(hso_net->net);
+
kfree(hso_dev);
}
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 81126ff85e0..15772b1b6a9 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -409,12 +409,6 @@ static void ipheth_tx_timeout(struct net_device *net)
usb_unlink_urb(dev->tx_urb);
}
-static struct net_device_stats *ipheth_stats(struct net_device *net)
-{
- struct ipheth_device *dev = netdev_priv(net);
- return &dev->net->stats;
-}
-
static u32 ipheth_ethtool_op_get_link(struct net_device *net)
{
struct ipheth_device *dev = netdev_priv(net);
@@ -426,11 +420,10 @@ static struct ethtool_ops ops = {
};
static const struct net_device_ops ipheth_netdev_ops = {
- .ndo_open = &ipheth_open,
- .ndo_stop = &ipheth_close,
- .ndo_start_xmit = &ipheth_tx,
- .ndo_tx_timeout = &ipheth_tx_timeout,
- .ndo_get_stats = &ipheth_stats,
+ .ndo_open = ipheth_open,
+ .ndo_stop = ipheth_close,
+ .ndo_start_xmit = ipheth_tx,
+ .ndo_tx_timeout = ipheth_tx_timeout,
};
static int ipheth_probe(struct usb_interface *intf,
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index d965fb1e013..5a6d0f88f43 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -100,34 +100,42 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
static int
kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
{
- char init_msg_1[] =
+ static const char init_msg_1[] =
{ 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
0x00, 0x00 };
- char init_msg_2[] =
+ static const char init_msg_2[] =
{ 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4,
0x00, 0x00 };
- char receive_buf[28];
+ static const int buflen = 28;
+ char *usb_buf;
int status;
- status = kalmia_send_init_packet(dev, init_msg_1, sizeof(init_msg_1)
- / sizeof(init_msg_1[0]), receive_buf, 24);
+ usb_buf = kmalloc(buflen, GFP_DMA | GFP_KERNEL);
+ if (!usb_buf)
+ return -ENOMEM;
+
+ memcpy(usb_buf, init_msg_1, 12);
+ status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_1)
+ / sizeof(init_msg_1[0]), usb_buf, 24);
if (status != 0)
return status;
- status = kalmia_send_init_packet(dev, init_msg_2, sizeof(init_msg_2)
- / sizeof(init_msg_2[0]), receive_buf, 28);
+ memcpy(usb_buf, init_msg_2, 12);
+ status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_2)
+ / sizeof(init_msg_2[0]), usb_buf, 28);
if (status != 0)
return status;
- memcpy(ethernet_addr, receive_buf + 10, ETH_ALEN);
+ memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
+ kfree(usb_buf);
return status;
}
static int
kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
{
- u8 status;
+ int status;
u8 ethernet_addr[ETH_ALEN];
/* Don't bind to AT command interface */
@@ -190,7 +198,8 @@ kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
dev_kfree_skb_any(skb);
skb = skb2;
- done: header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
+done:
+ header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12];
ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13];
@@ -201,9 +210,8 @@ kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
header_start[0] = 0x57;
header_start[1] = 0x44;
content_len = skb->len - KALMIA_HEADER_LENGTH;
- header_start[2] = (content_len & 0xff); /* low byte */
- header_start[3] = (content_len >> 8); /* high byte */
+ put_unaligned_le16(content_len, &header_start[2]);
header_start[4] = ether_type_1;
header_start[5] = ether_type_2;
@@ -231,13 +239,13 @@ kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
* Our task here is to strip off framing, leaving skb with one
* data frame for the usbnet framework code to process.
*/
- const u8 HEADER_END_OF_USB_PACKET[] =
+ static const u8 HEADER_END_OF_USB_PACKET[] =
{ 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 };
- const u8 EXPECTED_UNKNOWN_HEADER_1[] =
+ static const u8 EXPECTED_UNKNOWN_HEADER_1[] =
{ 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 };
- const u8 EXPECTED_UNKNOWN_HEADER_2[] =
+ static const u8 EXPECTED_UNKNOWN_HEADER_2[] =
{ 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 };
- u8 i = 0;
+ int i = 0;
/* incomplete header? */
if (skb->len < KALMIA_HEADER_LENGTH)
@@ -285,7 +293,7 @@ kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* subtract start header and end header */
usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
- ether_packet_length = header_start[2] + (header_start[3] << 8);
+ ether_packet_length = get_unaligned_le16(&header_start[2]);
skb_pull(skb, KALMIA_HEADER_LENGTH);
/* Some small packets misses end marker */
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 041fb7d43c4..ef3b236b514 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -977,7 +977,6 @@ static void rtl8150_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (dev) {
set_bit(RTL8150_UNPLUG, &dev->flags);
- tasklet_disable(&dev->tl);
tasklet_kill(&dev->tl);
unregister_netdev(dev->netdev);
unlink_all_urbs(dev);
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 241756e0e86..1a2234c2051 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -331,17 +331,7 @@ static const struct usb_device_id products [] = {
ZAURUS_MASTER_INTERFACE,
.driver_info = ZAURUS_PXA_INFO,
},
-
-
-/* At least some of the newest PXA units have very different lies about
- * their standards support: they claim to be cell phones offering
- * direct access to their radios! (No, they don't conform to CDC MDLM.)
- */
{
- USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
- USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &bogus_mdlm_info,
-}, {
/* Motorola MOTOMAGX phones */
USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM,
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8461576fa01..5b23767ea81 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
+#include <linux/u64_stats_sync.h>
#include <net/dst.h>
#include <net/xfrm.h>
@@ -24,12 +25,12 @@
#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
struct veth_net_stats {
- unsigned long rx_packets;
- unsigned long tx_packets;
- unsigned long rx_bytes;
- unsigned long tx_bytes;
- unsigned long tx_dropped;
- unsigned long rx_dropped;
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 rx_dropped;
+ struct u64_stats_sync syncp;
};
struct veth_priv {
@@ -124,9 +125,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
stats = this_cpu_ptr(priv->stats);
rcv_stats = this_cpu_ptr(rcv_priv->stats);
- if (!(rcv->flags & IFF_UP))
- goto tx_drop;
-
/* don't change ip_summed == CHECKSUM_PARTIAL, as that
will cause bad checksum on forwarded packets */
if (skb->ip_summed == CHECKSUM_NONE &&
@@ -137,21 +135,22 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
goto rx_drop;
+ u64_stats_update_begin(&stats->syncp);
stats->tx_bytes += length;
stats->tx_packets++;
+ u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&rcv_stats->syncp);
rcv_stats->rx_bytes += length;
rcv_stats->rx_packets++;
+ u64_stats_update_end(&rcv_stats->syncp);
return NETDEV_TX_OK;
-tx_drop:
- kfree_skb(skb);
- stats->tx_dropped++;
- return NETDEV_TX_OK;
-
rx_drop:
+ u64_stats_update_begin(&rcv_stats->syncp);
rcv_stats->rx_dropped++;
+ u64_stats_update_end(&rcv_stats->syncp);
return NETDEV_TX_OK;
}
@@ -159,32 +158,34 @@ rx_drop:
* general routines
*/
-static struct net_device_stats *veth_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
{
- struct veth_priv *priv;
+ struct veth_priv *priv = netdev_priv(dev);
int cpu;
- struct veth_net_stats *stats, total = {0};
-
- priv = netdev_priv(dev);
for_each_possible_cpu(cpu) {
- stats = per_cpu_ptr(priv->stats, cpu);
-
- total.rx_packets += stats->rx_packets;
- total.tx_packets += stats->tx_packets;
- total.rx_bytes += stats->rx_bytes;
- total.tx_bytes += stats->tx_bytes;
- total.tx_dropped += stats->tx_dropped;
- total.rx_dropped += stats->rx_dropped;
+ struct veth_net_stats *stats = per_cpu_ptr(priv->stats, cpu);
+ u64 rx_packets, rx_bytes, rx_dropped;
+ u64 tx_packets, tx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->syncp);
+ rx_packets = stats->rx_packets;
+ tx_packets = stats->tx_packets;
+ rx_bytes = stats->rx_bytes;
+ tx_bytes = stats->tx_bytes;
+ rx_dropped = stats->rx_dropped;
+ } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+ tot->rx_packets += rx_packets;
+ tot->tx_packets += tx_packets;
+ tot->rx_bytes += rx_bytes;
+ tot->tx_bytes += tx_bytes;
+ tot->rx_dropped += rx_dropped;
}
- dev->stats.rx_packets = total.rx_packets;
- dev->stats.tx_packets = total.tx_packets;
- dev->stats.rx_bytes = total.rx_bytes;
- dev->stats.tx_bytes = total.tx_bytes;
- dev->stats.tx_dropped = total.tx_dropped;
- dev->stats.rx_dropped = total.rx_dropped;
-
- return &dev->stats;
+
+ return tot;
}
static int veth_open(struct net_device *dev)
@@ -254,7 +255,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_stop = veth_close,
.ndo_start_xmit = veth_xmit,
.ndo_change_mtu = veth_change_mtu,
- .ndo_get_stats = veth_get_stats,
+ .ndo_get_stats64 = veth_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -262,6 +263,8 @@ static void veth_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 06daa9d6fee..7c5336c5c37 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -45,6 +45,7 @@
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
@@ -501,6 +502,7 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
static void velocity_init_cam_filter(struct velocity_info *vptr)
{
struct mac_regs __iomem *regs = vptr->mac_regs;
+ unsigned int vid, i = 0;
/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
@@ -513,30 +515,13 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
mac_set_cam_mask(regs, vptr->mCAMmask);
/* Enable VCAMs */
- if (vptr->vlgrp) {
- unsigned int vid, i = 0;
-
- if (!vlan_group_get_device(vptr->vlgrp, 0))
- WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
-
- for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
- if (vlan_group_get_device(vptr->vlgrp, vid)) {
- mac_set_vlan_cam(regs, i, (u8 *) &vid);
- vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
- if (++i >= VCAM_SIZE)
- break;
- }
- }
- mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
+ for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
+ mac_set_vlan_cam(regs, i, (u8 *) &vid);
+ vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
+ if (++i >= VCAM_SIZE)
+ break;
}
-}
-
-static void velocity_vlan_rx_register(struct net_device *dev,
- struct vlan_group *grp)
-{
- struct velocity_info *vptr = netdev_priv(dev);
-
- vptr->vlgrp = grp;
+ mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
}
static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
@@ -544,6 +529,7 @@ static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct velocity_info *vptr = netdev_priv(dev);
spin_lock_irq(&vptr->lock);
+ set_bit(vid, vptr->active_vlans);
velocity_init_cam_filter(vptr);
spin_unlock_irq(&vptr->lock);
}
@@ -553,7 +539,7 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
struct velocity_info *vptr = netdev_priv(dev);
spin_lock_irq(&vptr->lock);
- vlan_group_set_device(vptr->vlgrp, vid, NULL);
+ clear_bit(vid, vptr->active_vlans);
velocity_init_cam_filter(vptr);
spin_unlock_irq(&vptr->lock);
}
@@ -1887,7 +1873,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
else
netif_wake_queue(vptr->dev);
- };
+ }
if (status & ISR_MIBFI)
velocity_update_hw_mibs(vptr);
if (status & ISR_LSTEI)
@@ -2094,11 +2080,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, vptr->dev);
- if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
- vlan_hwaccel_rx(skb, vptr->vlgrp,
- swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
- } else
- netif_rx(skb);
+ if (rd->rdesc0.RSR & RSR_DETAG) {
+ u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
+
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
+ netif_rx(skb);
stats->rx_bytes += pkt_len;
@@ -2641,7 +2628,6 @@ static const struct net_device_ops velocity_netdev_ops = {
.ndo_do_ioctl = velocity_ioctl,
.ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
- .ndo_vlan_rx_register = velocity_vlan_rx_register,
};
/**
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 0f1f05f6c4f..4cb9f13485e 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1437,7 +1437,7 @@ struct velocity_info {
struct pci_dev *pdev;
struct net_device *dev;
- struct vlan_group *vlgrp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u8 ip_addr[4];
enum chip_type chip_id;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f6853247a62..0c7321c35ad 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -40,6 +40,15 @@ module_param(gso, bool, 0444);
#define VIRTNET_SEND_COMMAND_SG_MAX 2
+struct virtnet_stats {
+ struct u64_stats_sync syncp;
+ u64 tx_bytes;
+ u64 tx_packets;
+
+ u64 rx_bytes;
+ u64 rx_packets;
+};
+
struct virtnet_info {
struct virtio_device *vdev;
struct virtqueue *rvq, *svq, *cvq;
@@ -56,6 +65,9 @@ struct virtnet_info {
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
+ /* Active statistics */
+ struct virtnet_stats __percpu *stats;
+
/* Work struct for refilling if we run low on memory. */
struct delayed_work refill;
@@ -209,7 +221,6 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
-
page = virtqueue_get_buf(vi->rvq, &len);
if (!page) {
pr_debug("%s: rx error: %d buffers missing\n",
@@ -217,6 +228,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
+
if (len > PAGE_SIZE)
len = PAGE_SIZE;
@@ -230,6 +242,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
{
struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
struct sk_buff *skb;
struct page *page;
struct skb_vnet_hdr *hdr;
@@ -265,8 +278,11 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
hdr = skb_vnet_hdr(skb);
skb->truesize += skb->data_len;
- dev->stats.rx_bytes += skb->len;
- dev->stats.rx_packets++;
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_bytes += skb->len;
+ stats->rx_packets++;
+ u64_stats_update_end(&stats->syncp);
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
@@ -274,6 +290,8 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
hdr->hdr.csum_start,
hdr->hdr.csum_offset))
goto frame_err;
+ } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
}
skb->protocol = eth_type_trans(skb, dev);
@@ -513,11 +531,16 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
{
struct sk_buff *skb;
unsigned int len, tot_sgs = 0;
+ struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats);
while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
- vi->dev->stats.tx_bytes += skb->len;
- vi->dev->stats.tx_packets++;
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ u64_stats_update_end(&stats->syncp);
+
tot_sgs += skb_vnet_hdr(skb)->num_sg;
dev_kfree_skb_any(skb);
}
@@ -639,6 +662,40 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
return 0;
}
+static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int cpu;
+ unsigned int start;
+
+ for_each_possible_cpu(cpu) {
+ struct virtnet_stats __percpu *stats
+ = per_cpu_ptr(vi->stats, cpu);
+ u64 tpackets, tbytes, rpackets, rbytes;
+
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tpackets = stats->tx_packets;
+ tbytes = stats->tx_bytes;
+ rpackets = stats->rx_packets;
+ rbytes = stats->rx_bytes;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ tot->rx_packets += rpackets;
+ tot->tx_packets += tpackets;
+ tot->rx_bytes += rbytes;
+ tot->tx_bytes += tbytes;
+ }
+
+ tot->tx_dropped = dev->stats.tx_dropped;
+ tot->rx_dropped = dev->stats.rx_dropped;
+ tot->rx_length_errors = dev->stats.rx_length_errors;
+ tot->rx_frame_errors = dev->stats.rx_frame_errors;
+
+ return tot;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
@@ -833,6 +890,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_set_mac_address = virtnet_set_mac_address,
.ndo_set_rx_mode = virtnet_set_rx_mode,
.ndo_change_mtu = virtnet_change_mtu,
+ .ndo_get_stats64 = virtnet_stats,
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -893,6 +951,7 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Set up network device as normal. */
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
+
SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
SET_NETDEV_DEV(dev, &vdev->dev);
@@ -937,6 +996,11 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->vdev = vdev;
vdev->priv = vi;
vi->pages = NULL;
+ vi->stats = alloc_percpu(struct virtnet_stats);
+ err = -ENOMEM;
+ if (vi->stats == NULL)
+ goto free;
+
INIT_DELAYED_WORK(&vi->refill, refill_work);
sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
@@ -956,7 +1020,7 @@ static int virtnet_probe(struct virtio_device *vdev)
err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
if (err)
- goto free;
+ goto free_stats;
vi->rvq = vqs[0];
vi->svq = vqs[1];
@@ -1001,6 +1065,8 @@ unregister:
cancel_delayed_work_sync(&vi->refill);
free_vqs:
vdev->config->del_vqs(vdev);
+free_stats:
+ free_percpu(vi->stats);
free:
free_netdev(dev);
return err;
@@ -1047,6 +1113,7 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
while (vi->pages)
__free_pages(get_a_page(vi, GFP_KERNEL), 0);
+ free_percpu(vi->stats);
free_netdev(vi->dev);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index fa6e2ac7475..0959583feb2 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -405,10 +405,8 @@ vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
struct vmxnet3_tx_buf_info *tbi;
- union Vmxnet3_GenericDesc *gdesc;
tbi = tq->buf_info + tq->tx_ring.next2comp;
- gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
if (tbi->skb) {
@@ -575,7 +573,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
u32 val;
- while (num_allocated < num_to_alloc) {
+ while (num_allocated <= num_to_alloc) {
struct vmxnet3_rx_buf_info *rbi;
union Vmxnet3_GenericDesc *gd;
@@ -621,9 +619,15 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
BUG_ON(rbi->dma_addr == 0);
gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
- gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
+ gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
| val | rbi->len);
+ /* Fill the last buffer but dont mark it ready, or else the
+ * device will think that the queue is full */
+ if (num_allocated == num_to_alloc)
+ break;
+
+ gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
num_allocated++;
vmxnet3_cmd_ring_adv_next2fill(ring);
}
@@ -920,7 +924,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
skb_shinfo(skb)->nr_frags + 1;
- ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP));
+ ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
ctx.mss = skb_shinfo(skb)->gso_size;
if (ctx.mss) {
@@ -1140,6 +1144,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
};
u32 num_rxd = 0;
+ bool skip_page_frags = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
#ifdef __BIG_ENDIAN_BITFIELD
@@ -1150,11 +1155,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
&rxComp);
while (rcd->gen == rq->comp_ring.gen) {
struct vmxnet3_rx_buf_info *rbi;
- struct sk_buff *skb;
+ struct sk_buff *skb, *new_skb = NULL;
+ struct page *new_page = NULL;
int num_to_alloc;
struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx;
-
+ struct vmxnet3_cmd_ring *ring = NULL;
if (num_rxd >= quota) {
/* we may stop even before we see the EOP desc of
* the current pkt
@@ -1165,6 +1171,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
idx = rcd->rxdIdx;
ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
+ ring = rq->rx_ring + ring_idx;
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
&rxCmdDesc);
rbi = rq->buf_info[ring_idx] + idx;
@@ -1193,37 +1200,80 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
goto rcd_done;
}
+ skip_page_frags = false;
ctx->skb = rbi->skb;
- rbi->skb = NULL;
+ new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
+ if (new_skb == NULL) {
+ /* Skb allocation failed, do not handover this
+ * skb to stack. Reuse it. Drop the existing pkt
+ */
+ rq->stats.rx_buf_alloc_failure++;
+ ctx->skb = NULL;
+ rq->stats.drop_total++;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
skb_put(ctx->skb, rcd->len);
+
+ /* Immediate refill */
+ new_skb->dev = adapter->netdev;
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ rbi->skb = new_skb;
+ rbi->dma_addr = pci_map_single(adapter->pdev,
+ rbi->skb->data, rbi->len,
+ PCI_DMA_FROMDEVICE);
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
+
} else {
- BUG_ON(ctx->skb == NULL);
+ BUG_ON(ctx->skb == NULL && !skip_page_frags);
+
/* non SOP buffer must be type 1 in most cases */
- if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
- BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
+ BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
+ BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
- if (rcd->len) {
- pci_unmap_page(adapter->pdev,
- rbi->dma_addr, rbi->len,
- PCI_DMA_FROMDEVICE);
+ /* If an sop buffer was dropped, skip all
+ * following non-sop fragments. They will be reused.
+ */
+ if (skip_page_frags)
+ goto rcd_done;
- vmxnet3_append_frag(ctx->skb, rcd, rbi);
- rbi->page = NULL;
- }
- } else {
- /*
- * The only time a non-SOP buffer is type 0 is
- * when it's EOP and error flag is raised, which
- * has already been handled.
+ new_page = alloc_page(GFP_ATOMIC);
+ if (unlikely(new_page == NULL)) {
+ /* Replacement page frag could not be allocated.
+ * Reuse this page. Drop the pkt and free the
+ * skb which contained this page as a frag. Skip
+ * processing all the following non-sop frags.
*/
- BUG_ON(true);
+ rq->stats.rx_buf_alloc_failure++;
+ dev_kfree_skb(ctx->skb);
+ ctx->skb = NULL;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
+
+ if (rcd->len) {
+ pci_unmap_page(adapter->pdev,
+ rbi->dma_addr, rbi->len,
+ PCI_DMA_FROMDEVICE);
+
+ vmxnet3_append_frag(ctx->skb, rcd, rbi);
}
+
+ /* Immediate refill */
+ rbi->page = new_page;
+ rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
+ 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
}
+
skb = ctx->skb;
if (rcd->eop) {
skb->len += skb->data_len;
@@ -1233,37 +1283,39 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
(union Vmxnet3_GenericDesc *)rcd);
skb->protocol = eth_type_trans(skb, adapter->netdev);
- if (unlikely(adapter->vlan_grp && rcd->ts)) {
- vlan_hwaccel_receive_skb(skb,
- adapter->vlan_grp, rcd->tci);
- } else {
+ if (unlikely(rcd->ts))
+ __vlan_hwaccel_put_tag(skb, rcd->tci);
+
+ if (adapter->netdev->features & NETIF_F_LRO)
netif_receive_skb(skb);
- }
+ else
+ napi_gro_receive(&rq->napi, skb);
ctx->skb = NULL;
}
rcd_done:
- /* device may skip some rx descs */
- rq->rx_ring[ring_idx].next2comp = idx;
- VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
- rq->rx_ring[ring_idx].size);
-
- /* refill rx buffers frequently to avoid starving the h/w */
- num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
- ring_idx);
- if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
- ring_idx, adapter))) {
- vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
- adapter);
-
- /* if needed, update the register */
- if (unlikely(rq->shared->updateRxProd)) {
- VMXNET3_WRITE_BAR0_REG(adapter,
- rxprod_reg[ring_idx] + rq->qid * 8,
- rq->rx_ring[ring_idx].next2fill);
- rq->uncommitted[ring_idx] = 0;
- }
+ /* device may have skipped some rx descs */
+ ring->next2comp = idx;
+ num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
+ ring = rq->rx_ring + ring_idx;
+ while (num_to_alloc) {
+ vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
+ &rxCmdDesc);
+ BUG_ON(!rxd->addr);
+
+ /* Recv desc is ready to be used by the device */
+ rxd->gen = ring->gen;
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+ num_to_alloc--;
+ }
+
+ /* if needed, update the register */
+ if (unlikely(rq->shared->updateRxProd)) {
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ rxprod_reg[ring_idx] + rq->qid * 8,
+ ring->next2fill);
+ rq->uncommitted[ring_idx] = 0;
}
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
@@ -1858,94 +1910,38 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
}
}
+
static void
-vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
{
- struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- struct Vmxnet3_DriverShared *shared = adapter->shared;
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
- unsigned long flags;
+ u16 vid;
- if (grp) {
- /* add vlan rx stripping. */
- if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
- int i;
- adapter->vlan_grp = grp;
+ /* allow untagged pkts */
+ VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
- /*
- * Clear entire vfTable; then enable untagged pkts.
- * Note: setting one entry in vfTable to non-zero turns
- * on VLAN rx filtering.
- */
- for (i = 0; i < VMXNET3_VFT_SIZE; i++)
- vfTable[i] = 0;
-
- VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
- spin_lock_irqsave(&adapter->cmd_lock, flags);
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_VLAN_FILTERS);
- spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- } else {
- printk(KERN_ERR "%s: vlan_rx_register when device has "
- "no NETIF_F_HW_VLAN_RX\n", netdev->name);
- }
- } else {
- /* remove vlan rx stripping. */
- struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
- adapter->vlan_grp = NULL;
-
- if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
- int i;
-
- for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
- /* clear entire vfTable; this also disables
- * VLAN rx filtering
- */
- vfTable[i] = 0;
- }
- spin_lock_irqsave(&adapter->cmd_lock, flags);
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_VLAN_FILTERS);
- spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- }
- }
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
}
static void
-vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
+vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
- if (adapter->vlan_grp) {
- u16 vid;
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (!(netdev->flags & IFF_PROMISC)) {
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
- bool activeVlan = false;
+ unsigned long flags;
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (vlan_group_get_device(adapter->vlan_grp, vid)) {
- VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
- activeVlan = true;
- }
- }
- if (activeVlan) {
- /* continue to allow untagged pkts */
- VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
- }
+ VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
-}
-
-static void
-vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-{
- struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
- unsigned long flags;
-
- VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
- spin_lock_irqsave(&adapter->cmd_lock, flags);
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_VLAN_FILTERS);
- spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ set_bit(vid, adapter->active_vlans);
}
@@ -1953,14 +1949,19 @@ static void
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
- unsigned long flags;
- VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
- spin_lock_irqsave(&adapter->cmd_lock, flags);
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_VLAN_FILTERS);
- spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ if (!(netdev->flags & IFF_PROMISC)) {
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ unsigned long flags;
+
+ VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
+
+ clear_bit(vid, adapter->active_vlans);
}
@@ -1997,8 +1998,14 @@ vmxnet3_set_mc(struct net_device *netdev)
u8 *new_table = NULL;
u32 new_mode = VMXNET3_RXM_UCAST;
- if (netdev->flags & IFF_PROMISC)
+ if (netdev->flags & IFF_PROMISC) {
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
+
new_mode |= VMXNET3_RXM_PROMISC;
+ } else {
+ vmxnet3_restore_vlan(adapter);
+ }
if (netdev->flags & IFF_BROADCAST)
new_mode |= VMXNET3_RXM_BCAST;
@@ -2032,6 +2039,8 @@ vmxnet3_set_mc(struct net_device *netdev)
rxConf->rxMode = cpu_to_le32(new_mode);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_RX_MODE);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
}
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -2641,12 +2650,13 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_LRO;
+ NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_LRO;
if (dma64)
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features = netdev->hw_features & ~NETIF_F_HW_VLAN_TX;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ netdev->hw_features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features = netdev->hw_features &
+ ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+ netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
netdev_info(adapter->netdev,
"features: sg csum vlan jf tso tsoIPv6 lro%s\n",
@@ -2864,10 +2874,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
.ndo_set_mac_address = vmxnet3_set_mac_addr,
.ndo_change_mtu = vmxnet3_change_mtu,
.ndo_set_features = vmxnet3_set_features,
- .ndo_get_stats = vmxnet3_get_stats,
+ .ndo_get_stats64 = vmxnet3_get_stats64,
.ndo_tx_timeout = vmxnet3_tx_timeout,
.ndo_set_multicast_list = vmxnet3_set_mc,
- .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2894,6 +2903,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
else
#endif
num_rx_queues = 1;
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
if (enable_mq)
num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
@@ -2901,6 +2911,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
else
num_tx_queues = 1;
+ num_tx_queues = rounddown_pow_of_two(num_tx_queues);
netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
max(num_tx_queues, num_rx_queues));
printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
@@ -2988,6 +2999,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_ver;
}
+ SET_NETDEV_DEV(netdev, &pdev->dev);
vmxnet3_declare_features(adapter, dma64);
adapter->dev_number = atomic_read(&devices_found);
@@ -3033,7 +3045,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
- SET_NETDEV_DEV(netdev, &pdev->dev);
err = register_netdev(netdev);
if (err) {
@@ -3085,6 +3096,7 @@ vmxnet3_remove_device(struct pci_dev *pdev)
else
#endif
num_rx_queues = 1;
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
cancel_work_sync(&adapter->work);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index dc959fe27aa..27400edeef5 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -113,15 +113,15 @@ vmxnet3_global_stats[] = {
};
-struct net_device_stats *
-vmxnet3_get_stats(struct net_device *netdev)
+struct rtnl_link_stats64 *
+vmxnet3_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
struct vmxnet3_adapter *adapter;
struct vmxnet3_tq_driver_stats *drvTxStats;
struct vmxnet3_rq_driver_stats *drvRxStats;
struct UPT1_TxStats *devTxStats;
struct UPT1_RxStats *devRxStats;
- struct net_device_stats *net_stats = &netdev->stats;
unsigned long flags;
int i;
@@ -132,36 +132,36 @@ vmxnet3_get_stats(struct net_device *netdev)
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- memset(net_stats, 0, sizeof(*net_stats));
for (i = 0; i < adapter->num_tx_queues; i++) {
devTxStats = &adapter->tqd_start[i].stats;
drvTxStats = &adapter->tx_queue[i].stats;
- net_stats->tx_packets += devTxStats->ucastPktsTxOK +
- devTxStats->mcastPktsTxOK +
- devTxStats->bcastPktsTxOK;
- net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
- devTxStats->mcastBytesTxOK +
- devTxStats->bcastBytesTxOK;
- net_stats->tx_errors += devTxStats->pktsTxError;
- net_stats->tx_dropped += drvTxStats->drop_total;
+ stats->tx_packets += devTxStats->ucastPktsTxOK +
+ devTxStats->mcastPktsTxOK +
+ devTxStats->bcastPktsTxOK;
+ stats->tx_bytes += devTxStats->ucastBytesTxOK +
+ devTxStats->mcastBytesTxOK +
+ devTxStats->bcastBytesTxOK;
+ stats->tx_errors += devTxStats->pktsTxError;
+ stats->tx_dropped += drvTxStats->drop_total;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
devRxStats = &adapter->rqd_start[i].stats;
drvRxStats = &adapter->rx_queue[i].stats;
- net_stats->rx_packets += devRxStats->ucastPktsRxOK +
- devRxStats->mcastPktsRxOK +
- devRxStats->bcastPktsRxOK;
+ stats->rx_packets += devRxStats->ucastPktsRxOK +
+ devRxStats->mcastPktsRxOK +
+ devRxStats->bcastPktsRxOK;
- net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
- devRxStats->mcastBytesRxOK +
- devRxStats->bcastBytesRxOK;
+ stats->rx_bytes += devRxStats->ucastBytesRxOK +
+ devRxStats->mcastBytesRxOK +
+ devRxStats->bcastBytesRxOK;
- net_stats->rx_errors += devRxStats->pktsRxError;
- net_stats->rx_dropped += drvRxStats->drop_total;
- net_stats->multicast += devRxStats->mcastPktsRxOK;
+ stats->rx_errors += devRxStats->pktsRxError;
+ stats->rx_dropped += drvRxStats->drop_total;
+ stats->multicast += devRxStats->mcastPktsRxOK;
}
- return net_stats;
+
+ return stats;
}
static int
@@ -268,7 +268,7 @@ int vmxnet3_set_features(struct net_device *netdev, u32 features)
unsigned long flags;
u32 changed = features ^ netdev->features;
- if (changed & (NETIF_F_RXCSUM|NETIF_F_LRO)) {
+ if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) {
if (features & NETIF_F_RXCSUM)
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXCSUM;
@@ -284,6 +284,13 @@ int vmxnet3_set_features(struct net_device *netdev, u32 features)
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_LRO;
+ if (features & NETIF_F_HW_VLAN_RX)
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_RXVLAN;
+ else
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_RXVLAN;
+
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index f50d36fdf40..b18eac1dcca 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -27,6 +27,7 @@
#ifndef _VMXNET3_INT_H
#define _VMXNET3_INT_H
+#include <linux/bitops.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
@@ -55,6 +56,7 @@
#include <linux/if_vlan.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
+#include <linux/log2.h>
#include "vmxnet3_defs.h"
@@ -68,10 +70,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.1.18.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01010900
+#define VMXNET3_DRIVER_VERSION_NUM 0x01011200
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -315,7 +317,7 @@ struct vmxnet3_intr {
struct vmxnet3_adapter {
struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
- struct vlan_group *vlan_grp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct vmxnet3_intr intr;
spinlock_t cmd_lock;
struct Vmxnet3_DriverShared *shared;
@@ -323,7 +325,6 @@ struct vmxnet3_adapter {
struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
struct net_device *netdev;
- struct net_device_stats net_stats;
struct pci_dev *pdev;
u8 __iomem *hw_addr0; /* for BAR 0 */
@@ -407,7 +408,9 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
-extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev);
+
+extern struct rtnl_link_stats64 *
+vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
extern char vmxnet3_driver_name[];
#endif
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 32763b2dd73..1520c574cb2 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -582,7 +582,7 @@ __vxge_hw_device_toc_get(void __iomem *bar0)
goto exit;
val64 = readq(&legacy_reg->toc_first_pointer);
- toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
+ toc = bar0 + val64;
exit:
return toc;
}
@@ -600,7 +600,7 @@ __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
u32 i;
enum vxge_hw_status status = VXGE_HW_OK;
- hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
+ hldev->legacy_reg = hldev->bar0;
hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
if (hldev->toc_reg == NULL) {
@@ -609,39 +609,31 @@ __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
}
val64 = readq(&hldev->toc_reg->toc_common_pointer);
- hldev->common_reg =
- (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
+ hldev->common_reg = hldev->bar0 + val64;
val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
- hldev->mrpcim_reg =
- (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
+ hldev->mrpcim_reg = hldev->bar0 + val64;
for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
- hldev->srpcim_reg[i] =
- (struct vxge_hw_srpcim_reg __iomem *)
- (hldev->bar0 + val64);
+ hldev->srpcim_reg[i] = hldev->bar0 + val64;
}
for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
- hldev->vpmgmt_reg[i] =
- (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
+ hldev->vpmgmt_reg[i] = hldev->bar0 + val64;
}
for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
- hldev->vpath_reg[i] =
- (struct vxge_hw_vpath_reg __iomem *)
- (hldev->bar0 + val64);
+ hldev->vpath_reg[i] = hldev->bar0 + val64;
}
val64 = readq(&hldev->toc_reg->toc_kdfc);
switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
case 0:
- hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
- VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
+ hldev->kdfc = hldev->bar0 + VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64) ;
break;
default:
break;
@@ -761,12 +753,11 @@ static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
static enum vxge_hw_status
__vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
{
- int exp_cap;
+ struct pci_dev *dev = hldev->pdev;
u16 lnk;
/* Get the negotiated link width and speed from PCI config space */
- exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
- pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
+ pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
return VXGE_HW_ERR_INVALID_PCI_INFO;
@@ -1024,7 +1015,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
}
val64 = readq(&toc->toc_common_pointer);
- common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
+ common_reg = bar0 + val64;
status = __vxge_hw_device_vpath_reset_in_prog_check(
(u64 __iomem *)&common_reg->vpath_rst_in_prog);
@@ -1044,8 +1035,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
val64 = readq(&toc->toc_vpmgmt_pointer[i]);
- vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
- (bar0 + val64);
+ vpmgmt_reg = bar0 + val64;
hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
if (__vxge_hw_device_access_rights_get(hw_info->host_type,
@@ -1054,8 +1044,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
val64 = readq(&toc->toc_mrpcim_pointer);
- mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
- (bar0 + val64);
+ mrpcim_reg = bar0 + val64;
writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
wmb();
@@ -1064,8 +1053,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
val64 = readq(&toc->toc_vpath_pointer[i]);
spin_lock_init(&vpath.lock);
- vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
- (bar0 + val64);
+ vpath.vp_reg = bar0 + val64;
vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
@@ -1088,8 +1076,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
continue;
val64 = readq(&toc->toc_vpath_pointer[i]);
- vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
- (bar0 + val64);
+ vpath.vp_reg = bar0 + val64;
vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
status = __vxge_hw_vpath_addr_get(&vpath,
@@ -1994,13 +1981,11 @@ exit:
u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
{
- int link_width, exp_cap;
+ struct pci_dev *dev = hldev->pdev;
u16 lnk;
- exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
- pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
- link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
- return link_width;
+ pci_read_config_word(dev, dev->pcie_cap + PCI_EXP_LNKSTA, &lnk);
+ return (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
}
/*
@@ -2140,8 +2125,7 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
memblock_index, item,
&memblock_item_idx);
- rxdp = (struct vxge_hw_ring_rxd_1 *)
- ring->channel.reserve_arr[reserve_index];
+ rxdp = ring->channel.reserve_arr[reserve_index];
uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
@@ -4880,8 +4864,7 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
goto vpath_open_exit8;
}
- vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
- stats_block->memblock;
+ vpath->hw_stats = vpath->stats_block->memblock;
memset(vpath->hw_stats, 0,
sizeof(struct vxge_hw_vpath_stats_hw_info));
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 359b9b9f804..dd362584f5c 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -13,8 +13,10 @@
******************************************************************************/
#ifndef VXGE_CONFIG_H
#define VXGE_CONFIG_H
+#include <linux/hardirq.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <asm/io.h>
#ifndef VXGE_CACHE_LINE_SIZE
#define VXGE_CACHE_LINE_SIZE 128
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 8ab870a2ad0..178348a258d 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -43,7 +43,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/tcp.h>
@@ -295,23 +297,22 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
skb_record_rx_queue(skb, ring->driver_id);
skb->protocol = eth_type_trans(skb, ring->ndev);
+ u64_stats_update_begin(&ring->stats.syncp);
ring->stats.rx_frms++;
ring->stats.rx_bytes += pkt_length;
if (skb->pkt_type == PACKET_MULTICAST)
ring->stats.rx_mcast++;
+ u64_stats_update_end(&ring->stats.syncp);
vxge_debug_rx(VXGE_TRACE,
"%s: %s:%d skb protocol = %d",
ring->ndev->name, __func__, __LINE__, skb->protocol);
- if (ring->vlgrp && ext_info->vlan &&
- (ring->vlan_tag_strip ==
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
- vlan_gro_receive(ring->napi_p, ring->vlgrp,
- ext_info->vlan, skb);
- else
- napi_gro_receive(ring->napi_p, skb);
+ if (ext_info->vlan &&
+ ring->vlan_tag_strip == VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)
+ __vlan_hwaccel_put_tag(skb, ext_info->vlan);
+ napi_gro_receive(ring->napi_p, skb);
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
@@ -591,8 +592,10 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
vxge_hw_fifo_txdl_free(fifo_hw, dtr);
/* Updating the statistics block */
+ u64_stats_update_begin(&fifo->stats.syncp);
fifo->stats.tx_frms++;
fifo->stats.tx_bytes += skb->len;
+ u64_stats_update_end(&fifo->stats.syncp);
*done_skb++ = skb;
@@ -628,7 +631,7 @@ static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
ip = ip_hdr(skb);
- if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
+ if (!ip_is_fragment(ip)) {
th = (struct tcphdr *)(((unsigned char *)ip) +
ip->ihl*4);
@@ -679,8 +682,7 @@ static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
new_mac_entry->state = mac->state;
vpath->mac_addr_cnt++;
- /* Is this a multicast address */
- if (0x01 & mac->macaddr[0])
+ if (is_multicast_ether_addr(mac->macaddr))
vpath->mcast_addr_cnt++;
return TRUE;
@@ -694,7 +696,7 @@ vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
struct vxge_vpath *vpath;
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
- if (0x01 & mac->macaddr[0]) /* multicast address */
+ if (is_multicast_ether_addr(mac->macaddr))
duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
else
duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
@@ -1073,8 +1075,7 @@ static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
kfree((struct vxge_mac_addrs *)entry);
vpath->mac_addr_cnt--;
- /* Is this a multicast address */
- if (0x01 & mac->macaddr[0])
+ if (is_multicast_ether_addr(mac->macaddr))
vpath->mcast_addr_cnt--;
return TRUE;
}
@@ -1196,8 +1197,7 @@ static void vxge_set_multicast(struct net_device *dev)
mac_address = (u8 *)&mac_entry->macaddr;
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
- /* Is this a multicast address */
- if (0x01 & mac_info.macaddr[0]) {
+ if (is_multicast_ether_addr(mac_info.macaddr)) {
for (vpath_idx = 0; vpath_idx <
vdev->no_of_vpath;
vpath_idx++) {
@@ -1239,8 +1239,7 @@ _set_all_mcast:
mac_address = (u8 *)&mac_entry->macaddr;
memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
- /* Is this a multicast address */
- if (0x01 & mac_info.macaddr[0])
+ if (is_multicast_ether_addr(mac_info.macaddr))
break;
}
@@ -1488,15 +1487,11 @@ vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
struct vxgedev *vdev = vpath->vdev;
u16 vid;
- if (vdev->vlgrp && vpath->is_open) {
+ if (!vpath->is_open)
+ return status;
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (!vlan_group_get_device(vdev->vlgrp, vid))
- continue;
- /* Add these vlan to the vid table */
- status = vxge_hw_vpath_vid_add(vpath->handle, vid);
- }
- }
+ for_each_set_bit(vid, vdev->active_vlans, VLAN_N_VID)
+ status = vxge_hw_vpath_vid_add(vpath->handle, vid);
return status;
}
@@ -2629,11 +2624,16 @@ static void vxge_poll_vp_lockup(unsigned long data)
struct vxge_vpath *vpath;
struct vxge_ring *ring;
int i;
+ unsigned long rx_frms;
for (i = 0; i < vdev->no_of_vpath; i++) {
ring = &vdev->vpaths[i].ring;
+
+ /* Truncated to machine word size number of frames */
+ rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
+
/* Did this vpath received any packets */
- if (ring->stats.prev_rx_frms == ring->stats.rx_frms) {
+ if (ring->stats.prev_rx_frms == rx_frms) {
status = vxge_hw_vpath_check_leak(ring->handle);
/* Did it received any packets last time */
@@ -2653,7 +2653,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
}
}
}
- ring->stats.prev_rx_frms = ring->stats.rx_frms;
+ ring->stats.prev_rx_frms = rx_frms;
ring->last_status = status;
}
@@ -3124,14 +3124,36 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
/* net_stats already zeroed by caller */
for (k = 0; k < vdev->no_of_vpath; k++) {
- net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
- net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
- net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
- net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
- net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped;
- net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
- net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
- net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
+ struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
+ struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
+ unsigned int start;
+ u64 packets, bytes, multicast;
+
+ do {
+ start = u64_stats_fetch_begin(&rxstats->syncp);
+
+ packets = rxstats->rx_frms;
+ multicast = rxstats->rx_mcast;
+ bytes = rxstats->rx_bytes;
+ } while (u64_stats_fetch_retry(&rxstats->syncp, start));
+
+ net_stats->rx_packets += packets;
+ net_stats->rx_bytes += bytes;
+ net_stats->multicast += multicast;
+
+ net_stats->rx_errors += rxstats->rx_errors;
+ net_stats->rx_dropped += rxstats->rx_dropped;
+
+ do {
+ start = u64_stats_fetch_begin(&txstats->syncp);
+
+ packets = txstats->tx_frms;
+ bytes = txstats->tx_bytes;
+ } while (u64_stats_fetch_retry(&txstats->syncp, start));
+
+ net_stats->tx_packets += packets;
+ net_stats->tx_bytes += bytes;
+ net_stats->tx_errors += txstats->tx_errors;
}
return net_stats;
@@ -3275,60 +3297,6 @@ static void vxge_tx_watchdog(struct net_device *dev)
}
/**
- * vxge_vlan_rx_register
- * @dev: net device pointer.
- * @grp: vlan group
- *
- * Vlan group registration
- */
-static void
-vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct vxgedev *vdev;
- struct vxge_vpath *vpath;
- int vp;
- u64 vid;
- enum vxge_hw_status status;
- int i;
-
- vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
-
- vdev = netdev_priv(dev);
-
- vpath = &vdev->vpaths[0];
- if ((NULL == grp) && (vpath->is_open)) {
- /* Get the first vlan */
- status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
-
- while (status == VXGE_HW_OK) {
-
- /* Delete this vlan from the vid table */
- for (vp = 0; vp < vdev->no_of_vpath; vp++) {
- vpath = &vdev->vpaths[vp];
- if (!vpath->is_open)
- continue;
-
- vxge_hw_vpath_vid_delete(vpath->handle, vid);
- }
-
- /* Get the next vlan to be deleted */
- vpath = &vdev->vpaths[0];
- status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
- }
- }
-
- vdev->vlgrp = grp;
-
- for (i = 0; i < vdev->no_of_vpath; i++) {
- if (vdev->vpaths[i].is_configured)
- vdev->vpaths[i].ring.vlgrp = grp;
- }
-
- vxge_debug_entryexit(VXGE_TRACE,
- "%s:%d Exiting...", __func__, __LINE__);
-}
-
-/**
* vxge_vlan_rx_add_vid
* @dev: net device pointer.
* @vid: vid
@@ -3338,12 +3306,10 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
static void
vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
- struct vxgedev *vdev;
+ struct vxgedev *vdev = netdev_priv(dev);
struct vxge_vpath *vpath;
int vp_id;
- vdev = netdev_priv(dev);
-
/* Add these vlan to the vid table */
for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
vpath = &vdev->vpaths[vp_id];
@@ -3351,6 +3317,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
continue;
vxge_hw_vpath_vid_add(vpath->handle, vid);
}
+ set_bit(vid, vdev->active_vlans);
}
/**
@@ -3363,16 +3330,12 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
static void
vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
- struct vxgedev *vdev;
+ struct vxgedev *vdev = netdev_priv(dev);
struct vxge_vpath *vpath;
int vp_id;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
- vdev = netdev_priv(dev);
-
- vlan_group_set_device(vdev->vlgrp, vid, NULL);
-
/* Delete this vlan from the vid table */
for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
vpath = &vdev->vpaths[vp_id];
@@ -3382,6 +3345,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
}
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
+ clear_bit(vid, vdev->active_vlans);
}
static const struct net_device_ops vxge_netdev_ops = {
@@ -3396,7 +3360,6 @@ static const struct net_device_ops vxge_netdev_ops = {
.ndo_change_mtu = vxge_change_mtu,
.ndo_fix_features = vxge_fix_features,
.ndo_set_features = vxge_set_features,
- .ndo_vlan_rx_register = vxge_vlan_rx_register,
.ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
.ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
.ndo_tx_timeout = vxge_tx_watchdog,
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index ed120aba443..f52a42d1dbb 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -18,6 +18,8 @@
#include "vxge-config.h"
#include "vxge-version.h"
#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
#define VXGE_DRIVER_NAME "vxge"
#define VXGE_DRIVER_VENDOR "Neterion, Inc"
@@ -201,30 +203,14 @@ struct vxge_msix_entry {
/* Software Statistics */
struct vxge_sw_stats {
- /* Network Stats (interface stats) */
-
- /* Tx */
- u64 tx_frms;
- u64 tx_errors;
- u64 tx_bytes;
- u64 txd_not_free;
- u64 txd_out_of_desc;
/* Virtual Path */
- u64 vpaths_open;
- u64 vpath_open_fail;
-
- /* Rx */
- u64 rx_frms;
- u64 rx_errors;
- u64 rx_bytes;
- u64 rx_mcast;
+ unsigned long vpaths_open;
+ unsigned long vpath_open_fail;
/* Misc. */
- u64 link_up;
- u64 link_down;
- u64 pci_map_fail;
- u64 skb_alloc_fail;
+ unsigned long link_up;
+ unsigned long link_down;
};
struct vxge_mac_addrs {
@@ -237,12 +223,14 @@ struct vxge_mac_addrs {
struct vxgedev;
struct vxge_fifo_stats {
+ struct u64_stats_sync syncp;
u64 tx_frms;
- u64 tx_errors;
u64 tx_bytes;
- u64 txd_not_free;
- u64 txd_out_of_desc;
- u64 pci_map_fail;
+
+ unsigned long tx_errors;
+ unsigned long txd_not_free;
+ unsigned long txd_out_of_desc;
+ unsigned long pci_map_fail;
};
struct vxge_fifo {
@@ -264,14 +252,16 @@ struct vxge_fifo {
} ____cacheline_aligned;
struct vxge_ring_stats {
- u64 prev_rx_frms;
+ struct u64_stats_sync syncp;
u64 rx_frms;
- u64 rx_errors;
- u64 rx_dropped;
- u64 rx_bytes;
u64 rx_mcast;
- u64 pci_map_fail;
- u64 skb_alloc_fail;
+ u64 rx_bytes;
+
+ unsigned long rx_errors;
+ unsigned long rx_dropped;
+ unsigned long prev_rx_frms;
+ unsigned long pci_map_fail;
+ unsigned long skb_alloc_fail;
};
struct vxge_ring {
@@ -299,7 +289,6 @@ struct vxge_ring {
#define VXGE_MAX_MAC_ADDR_COUNT 30
int vlan_tag_strip;
- struct vlan_group *vlgrp;
u32 rx_vector_no;
enum vxge_hw_status last_status;
@@ -344,7 +333,7 @@ struct vxgedev {
struct net_device *ndev;
struct pci_dev *pdev;
struct __vxge_hw_device *devh;
- struct vlan_group *vlgrp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
int vlan_tag_strip;
struct vxge_config config;
unsigned long state;
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index f9351705516..ad64ce0afe3 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -1309,7 +1309,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
vxge_hw_channel_dtr_try_complete(channel, rxdh);
- rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
+ rxdp = *rxdh;
if (rxdp == NULL) {
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
goto exit;
@@ -1565,7 +1565,7 @@ void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
channel = &fifo->channel;
txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
- txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
+ txdp_first = txdlh;
txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
txdp_last->control_0 |=
@@ -1631,7 +1631,7 @@ enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
vxge_hw_channel_dtr_try_complete(channel, txdlh);
- txdp = (struct vxge_hw_fifo_txd *)*txdlh;
+ txdp = *txdlh;
if (txdp == NULL) {
status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
goto exit;
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 4ac85a09c5a..54f995f4a5a 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -14,6 +14,8 @@
* Moxa C101 User's Manual
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
@@ -313,44 +315,44 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
int result;
if (irq<3 || irq>15 || irq == 6) /* FIXME */ {
- printk(KERN_ERR "c101: invalid IRQ value\n");
+ pr_err("invalid IRQ value\n");
return -ENODEV;
}
if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) !=0) {
- printk(KERN_ERR "c101: invalid RAM value\n");
+ pr_err("invalid RAM value\n");
return -ENODEV;
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
- printk(KERN_ERR "c101: unable to allocate memory\n");
+ pr_err("unable to allocate memory\n");
return -ENOBUFS;
}
card->dev = alloc_hdlcdev(card);
if (!card->dev) {
- printk(KERN_ERR "c101: unable to allocate memory\n");
+ pr_err("unable to allocate memory\n");
kfree(card);
return -ENOBUFS;
}
if (request_irq(irq, sca_intr, 0, devname, card)) {
- printk(KERN_ERR "c101: could not allocate IRQ\n");
+ pr_err("could not allocate IRQ\n");
c101_destroy_card(card);
return -EBUSY;
}
card->irq = irq;
if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) {
- printk(KERN_ERR "c101: could not request RAM window\n");
+ pr_err("could not request RAM window\n");
c101_destroy_card(card);
return -EBUSY;
}
card->phy_winbase = winbase;
card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE);
if (!card->win0base) {
- printk(KERN_ERR "c101: could not map I/O address\n");
+ pr_err("could not map I/O address\n");
c101_destroy_card(card);
return -EFAULT;
}
@@ -381,7 +383,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
result = register_hdlc_device(dev);
if (result) {
- printk(KERN_WARNING "c101: unable to register hdlc device\n");
+ pr_warn("unable to register hdlc device\n");
c101_destroy_card(card);
return result;
}
@@ -389,10 +391,8 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
sca_init_port(card); /* Set up C101 memory */
set_carrier(card);
- printk(KERN_INFO "%s: Moxa C101 on IRQ%u,"
- " using %u TX + %u RX packets rings\n",
- dev->name, card->irq,
- card->tx_ring_buffers, card->rx_ring_buffers);
+ netdev_info(dev, "Moxa C101 on IRQ%u, using %u TX + %u RX packets rings\n",
+ card->irq, card->tx_ring_buffers, card->rx_ring_buffers);
*new_card = card;
new_card = &card->next_card;
@@ -405,12 +405,12 @@ static int __init c101_init(void)
{
if (hw == NULL) {
#ifdef MODULE
- printk(KERN_INFO "c101: no card initialized\n");
+ pr_info("no card initialized\n");
#endif
return -EINVAL; /* no parameters specified, abort */
}
- printk(KERN_INFO "%s\n", version);
+ pr_info("%s\n", version);
do {
unsigned long irq, ram;
@@ -428,7 +428,7 @@ static int __init c101_init(void)
return first_card ? 0 : -EINVAL;
}while(*hw++ == ':');
- printk(KERN_ERR "c101: invalid hardware parameters\n");
+ pr_err("invalid hardware parameters\n");
return first_card ? 0 : -EINVAL;
}
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 6fb6f8e667d..6aed238e573 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -74,6 +74,8 @@
* The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -361,14 +363,13 @@ static int __init cosa_init(void)
if (cosa_major > 0) {
if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
- printk(KERN_WARNING "cosa: unable to get major %d\n",
- cosa_major);
+ pr_warn("unable to get major %d\n", cosa_major);
err = -EIO;
goto out;
}
} else {
if (!(cosa_major=register_chrdev(0, "cosa", &cosa_fops))) {
- printk(KERN_WARNING "cosa: unable to register chardev\n");
+ pr_warn("unable to register chardev\n");
err = -EIO;
goto out;
}
@@ -378,7 +379,7 @@ static int __init cosa_init(void)
for (i=0; io[i] != 0 && i < MAX_CARDS; i++)
cosa_probe(io[i], irq[i], dma[i]);
if (!nr_cards) {
- printk(KERN_WARNING "cosa: no devices found.\n");
+ pr_warn("no devices found\n");
unregister_chrdev(cosa_major, "cosa");
err = -ENODEV;
goto out;
@@ -447,26 +448,25 @@ static int cosa_probe(int base, int irq, int dma)
/* Checking validity of parameters: */
/* IRQ should be 2-7 or 10-15; negative IRQ means autoprobe */
if ((irq >= 0 && irq < 2) || irq > 15 || (irq < 10 && irq > 7)) {
- printk (KERN_INFO "cosa_probe: invalid IRQ %d\n", irq);
+ pr_info("invalid IRQ %d\n", irq);
return -1;
}
/* I/O address should be between 0x100 and 0x3ff and should be
* multiple of 8. */
if (base < 0x100 || base > 0x3ff || base & 0x7) {
- printk (KERN_INFO "cosa_probe: invalid I/O address 0x%x\n",
- base);
+ pr_info("invalid I/O address 0x%x\n", base);
return -1;
}
/* DMA should be 0,1 or 3-7 */
if (dma < 0 || dma == 4 || dma > 7) {
- printk (KERN_INFO "cosa_probe: invalid DMA %d\n", dma);
+ pr_info("invalid DMA %d\n", dma);
return -1;
}
/* and finally, on 16-bit COSA DMA should be 4-7 and
* I/O base should not be multiple of 0x10 */
if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) {
- printk (KERN_INFO "cosa_probe: 8/16 bit base and DMA mismatch"
- " (base=0x%x, dma=%d)\n", base, dma);
+ pr_info("8/16 bit base and DMA mismatch (base=0x%x, dma=%d)\n",
+ base, dma);
return -1;
}
@@ -479,7 +479,7 @@ static int cosa_probe(int base, int irq, int dma)
return -1;
if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) {
- printk(KERN_DEBUG "cosa: probe at 0x%x failed.\n", base);
+ printk(KERN_DEBUG "probe at 0x%x failed.\n", base);
err = -1;
goto err_out;
}
@@ -492,8 +492,7 @@ static int cosa_probe(int base, int irq, int dma)
else {
/* Print a warning only if we are not autoprobing */
#ifndef COSA_ISA_AUTOPROBE
- printk(KERN_INFO "cosa: valid signature not found at 0x%x.\n",
- base);
+ pr_info("valid signature not found at 0x%x\n", base);
#endif
err = -1;
goto err_out;
@@ -501,14 +500,14 @@ static int cosa_probe(int base, int irq, int dma)
/* Update the name of the region now we know the type of card */
release_region(base, is_8bit(cosa)?2:4);
if (!request_region(base, is_8bit(cosa)?2:4, cosa->type)) {
- printk(KERN_DEBUG "cosa: changing name at 0x%x failed.\n", base);
+ printk(KERN_DEBUG "changing name at 0x%x failed.\n", base);
return -1;
}
/* Now do IRQ autoprobe */
if (irq < 0) {
unsigned long irqs;
-/* printk(KERN_INFO "IRQ autoprobe\n"); */
+/* pr_info("IRQ autoprobe\n"); */
irqs = probe_irq_on();
/*
* Enable interrupt on tx buffer empty (it sure is)
@@ -526,13 +525,13 @@ static int cosa_probe(int base, int irq, int dma)
cosa_getdata8(cosa);
if (irq < 0) {
- printk (KERN_INFO "cosa IRQ autoprobe: multiple interrupts obtained (%d, board at 0x%x)\n",
+ pr_info("multiple interrupts obtained (%d, board at 0x%x)\n",
irq, cosa->datareg);
err = -1;
goto err_out;
}
if (irq == 0) {
- printk (KERN_INFO "cosa IRQ autoprobe: no interrupt obtained (board at 0x%x)\n",
+ pr_info("no interrupt obtained (board at 0x%x)\n",
cosa->datareg);
/* return -1; */
}
@@ -579,8 +578,7 @@ static int cosa_probe(int base, int irq, int dma)
/* Register the network interface */
if (!(chan->netdev = alloc_hdlcdev(chan))) {
- printk(KERN_WARNING "%s: alloc_hdlcdev failed.\n",
- chan->name);
+ pr_warn("%s: alloc_hdlcdev failed\n", chan->name);
goto err_hdlcdev;
}
dev_to_hdlc(chan->netdev)->attach = cosa_net_attach;
@@ -591,14 +589,14 @@ static int cosa_probe(int base, int irq, int dma)
chan->netdev->irq = chan->cosa->irq;
chan->netdev->dma = chan->cosa->dma;
if (register_hdlc_device(chan->netdev)) {
- printk(KERN_WARNING "%s: register_hdlc_device()"
- " failed.\n", chan->netdev->name);
+ netdev_warn(chan->netdev,
+ "register_hdlc_device() failed\n");
free_netdev(chan->netdev);
goto err_hdlcdev;
}
}
- printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
+ pr_info("cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
cosa->num, cosa->id_string, cosa->type,
cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
@@ -618,8 +616,7 @@ err_out1:
free_irq(cosa->irq, cosa);
err_out:
release_region(cosa->datareg,is_8bit(cosa)?2:4);
- printk(KERN_NOTICE "cosa%d: allocating resources failed\n",
- cosa->num);
+ pr_notice("cosa%d: allocating resources failed\n", cosa->num);
return err;
}
@@ -641,14 +638,14 @@ static int cosa_net_open(struct net_device *dev)
unsigned long flags;
if (!(chan->cosa->firmware_status & COSA_FW_START)) {
- printk(KERN_NOTICE "%s: start the firmware first (status %d)\n",
- chan->cosa->name, chan->cosa->firmware_status);
+ pr_notice("%s: start the firmware first (status %d)\n",
+ chan->cosa->name, chan->cosa->firmware_status);
return -EPERM;
}
spin_lock_irqsave(&chan->cosa->lock, flags);
if (chan->usage != 0) {
- printk(KERN_WARNING "%s: cosa_net_open called with usage count"
- " %d\n", chan->name, chan->usage);
+ pr_warn("%s: cosa_net_open called with usage count %d\n",
+ chan->name, chan->usage);
spin_unlock_irqrestore(&chan->cosa->lock, flags);
return -EBUSY;
}
@@ -736,8 +733,7 @@ static char *cosa_net_setup_rx(struct channel_data *chan, int size)
kfree_skb(chan->rx_skb);
chan->rx_skb = dev_alloc_skb(size);
if (chan->rx_skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n",
- chan->name);
+ pr_notice("%s: Memory squeeze, dropping packet\n", chan->name);
chan->netdev->stats.rx_dropped++;
return NULL;
}
@@ -748,8 +744,7 @@ static char *cosa_net_setup_rx(struct channel_data *chan, int size)
static int cosa_net_rx_done(struct channel_data *chan)
{
if (!chan->rx_skb) {
- printk(KERN_WARNING "%s: rx_done with empty skb!\n",
- chan->name);
+ pr_warn("%s: rx_done with empty skb!\n", chan->name);
chan->netdev->stats.rx_errors++;
chan->netdev->stats.rx_frame_errors++;
return 0;
@@ -768,8 +763,7 @@ static int cosa_net_rx_done(struct channel_data *chan)
static int cosa_net_tx_done(struct channel_data *chan, int size)
{
if (!chan->tx_skb) {
- printk(KERN_WARNING "%s: tx_done with empty skb!\n",
- chan->name);
+ pr_warn("%s: tx_done with empty skb!\n", chan->name);
chan->netdev->stats.tx_errors++;
chan->netdev->stats.tx_aborted_errors++;
return 1;
@@ -794,15 +788,15 @@ static ssize_t cosa_read(struct file *file,
char *kbuf;
if (!(cosa->firmware_status & COSA_FW_START)) {
- printk(KERN_NOTICE "%s: start the firmware first (status %d)\n",
- cosa->name, cosa->firmware_status);
+ pr_notice("%s: start the firmware first (status %d)\n",
+ cosa->name, cosa->firmware_status);
return -EPERM;
}
if (mutex_lock_interruptible(&chan->rlock))
return -ERESTARTSYS;
if ((chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL)) == NULL) {
- printk(KERN_INFO "%s: cosa_read() - OOM\n", cosa->name);
+ pr_info("%s: cosa_read() - OOM\n", cosa->name);
mutex_unlock(&chan->rlock);
return -ENOMEM;
}
@@ -869,8 +863,8 @@ static ssize_t cosa_write(struct file *file,
char *kbuf;
if (!(cosa->firmware_status & COSA_FW_START)) {
- printk(KERN_NOTICE "%s: start the firmware first (status %d)\n",
- cosa->name, cosa->firmware_status);
+ pr_notice("%s: start the firmware first (status %d)\n",
+ cosa->name, cosa->firmware_status);
return -EPERM;
}
if (down_interruptible(&chan->wsem))
@@ -881,8 +875,8 @@ static ssize_t cosa_write(struct file *file,
/* Allocate the buffer */
if ((kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA)) == NULL) {
- printk(KERN_NOTICE "%s: cosa_write() OOM - dropping packet\n",
- cosa->name);
+ pr_notice("%s: cosa_write() OOM - dropping packet\n",
+ cosa->name);
up(&chan->wsem);
return -ENOMEM;
}
@@ -932,7 +926,7 @@ static int chrdev_tx_done(struct channel_data *chan, int size)
static unsigned int cosa_poll(struct file *file, poll_table *poll)
{
- printk(KERN_INFO "cosa_poll is here\n");
+ pr_info("cosa_poll is here\n");
return 0;
}
@@ -1017,15 +1011,14 @@ static inline int cosa_reset(struct cosa_data *cosa)
{
char idstring[COSA_MAX_ID_STRING];
if (cosa->usage > 1)
- printk(KERN_INFO "cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n",
+ pr_info("cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n",
cosa->num, cosa->usage);
cosa->firmware_status &= ~(COSA_FW_RESET|COSA_FW_START);
if (cosa_reset_and_read_id(cosa, idstring) < 0) {
- printk(KERN_NOTICE "cosa%d: reset failed\n", cosa->num);
+ pr_notice("cosa%d: reset failed\n", cosa->num);
return -EIO;
}
- printk(KERN_INFO "cosa%d: resetting device: %s\n", cosa->num,
- idstring);
+ pr_info("cosa%d: resetting device: %s\n", cosa->num, idstring);
cosa->firmware_status |= COSA_FW_RESET;
return 0;
}
@@ -1037,11 +1030,11 @@ static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
int i;
if (cosa->usage > 1)
- printk(KERN_INFO "%s: WARNING: download of microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
+ pr_info("%s: WARNING: download of microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
cosa->name, cosa->usage);
if (!(cosa->firmware_status & COSA_FW_RESET)) {
- printk(KERN_NOTICE "%s: reset the card first (status %d).\n",
- cosa->name, cosa->firmware_status);
+ pr_notice("%s: reset the card first (status %d)\n",
+ cosa->name, cosa->firmware_status);
return -EPERM;
}
@@ -1059,11 +1052,11 @@ static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
i = download(cosa, d.code, d.len, d.addr);
if (i < 0) {
- printk(KERN_NOTICE "cosa%d: microcode download failed: %d\n",
- cosa->num, i);
+ pr_notice("cosa%d: microcode download failed: %d\n",
+ cosa->num, i);
return -EIO;
}
- printk(KERN_INFO "cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n",
+ pr_info("cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n",
cosa->num, d.len, d.addr);
cosa->firmware_status |= COSA_FW_RESET|COSA_FW_DOWNLOAD;
return 0;
@@ -1076,12 +1069,11 @@ static inline int cosa_readmem(struct cosa_data *cosa, void __user *arg)
int i;
if (cosa->usage > 1)
- printk(KERN_INFO "cosa%d: WARNING: readmem requested with "
- "cosa->usage > 1 (%d). Odd things may happen.\n",
+ pr_info("cosa%d: WARNING: readmem requested with cosa->usage > 1 (%d). Odd things may happen.\n",
cosa->num, cosa->usage);
if (!(cosa->firmware_status & COSA_FW_RESET)) {
- printk(KERN_NOTICE "%s: reset the card first (status %d).\n",
- cosa->name, cosa->firmware_status);
+ pr_notice("%s: reset the card first (status %d)\n",
+ cosa->name, cosa->firmware_status);
return -EPERM;
}
@@ -1093,11 +1085,10 @@ static inline int cosa_readmem(struct cosa_data *cosa, void __user *arg)
i = readmem(cosa, d.code, d.len, d.addr);
if (i < 0) {
- printk(KERN_NOTICE "cosa%d: reading memory failed: %d\n",
- cosa->num, i);
+ pr_notice("cosa%d: reading memory failed: %d\n", cosa->num, i);
return -EIO;
}
- printk(KERN_INFO "cosa%d: reading card memory - 0x%04x bytes at 0x%04x\n",
+ pr_info("cosa%d: reading card memory - 0x%04x bytes at 0x%04x\n",
cosa->num, d.len, d.addr);
cosa->firmware_status |= COSA_FW_RESET;
return 0;
@@ -1109,23 +1100,22 @@ static inline int cosa_start(struct cosa_data *cosa, int address)
int i;
if (cosa->usage > 1)
- printk(KERN_INFO "cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
+ pr_info("cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
cosa->num, cosa->usage);
if ((cosa->firmware_status & (COSA_FW_RESET|COSA_FW_DOWNLOAD))
!= (COSA_FW_RESET|COSA_FW_DOWNLOAD)) {
- printk(KERN_NOTICE "%s: download the microcode and/or reset the card first (status %d).\n",
- cosa->name, cosa->firmware_status);
+ pr_notice("%s: download the microcode and/or reset the card first (status %d)\n",
+ cosa->name, cosa->firmware_status);
return -EPERM;
}
cosa->firmware_status &= ~COSA_FW_RESET;
if ((i=startmicrocode(cosa, address)) < 0) {
- printk(KERN_NOTICE "cosa%d: start microcode at 0x%04x failed: %d\n",
- cosa->num, address, i);
+ pr_notice("cosa%d: start microcode at 0x%04x failed: %d\n",
+ cosa->num, address, i);
return -EIO;
}
- printk(KERN_INFO "cosa%d: starting microcode at 0x%04x\n",
- cosa->num, address);
+ pr_info("cosa%d: starting microcode at 0x%04x\n", cosa->num, address);
cosa->startaddr = address;
cosa->firmware_status |= COSA_FW_START;
return 0;
@@ -1255,11 +1245,11 @@ static int cosa_start_tx(struct channel_data *chan, char *buf, int len)
#ifdef DEBUG_DATA
int i;
- printk(KERN_INFO "cosa%dc%d: starting tx(0x%x)", chan->cosa->num,
- chan->num, len);
+ pr_info("cosa%dc%d: starting tx(0x%x)",
+ chan->cosa->num, chan->num, len);
for (i=0; i<len; i++)
- printk(" %02x", buf[i]&0xff);
- printk("\n");
+ pr_cont(" %02x", buf[i]&0xff);
+ pr_cont("\n");
#endif
spin_lock_irqsave(&cosa->lock, flags);
chan->txbuf = buf;
@@ -1353,7 +1343,7 @@ static void cosa_kick(struct cosa_data *cosa)
if (test_bit(TXBIT, &cosa->rxtx))
s = "TX DMA";
- printk(KERN_INFO "%s: %s timeout - restarting.\n", cosa->name, s);
+ pr_info("%s: %s timeout - restarting\n", cosa->name, s);
spin_lock_irqsave(&cosa->lock, flags);
cosa->rxtx = 0;
@@ -1387,7 +1377,7 @@ static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
return 0;
if ((b^ (b+len)) & 0x10000) {
if (count++ < 5)
- printk(KERN_INFO "%s: packet spanning a 64k boundary\n",
+ pr_info("%s: packet spanning a 64k boundary\n",
chan->name);
return 0;
}
@@ -1498,8 +1488,7 @@ static int readmem(struct cosa_data *cosa, char __user *microcode, int length, i
char c;
int i;
if ((i=get_wait_data(cosa)) == -1) {
- printk (KERN_INFO "cosa: 0x%04x bytes remaining\n",
- length);
+ pr_info("0x%04x bytes remaining\n", length);
return -11;
}
c=i;
@@ -1582,14 +1571,15 @@ static int get_wait_data(struct cosa_data *cosa)
short r;
r = cosa_getdata8(cosa);
#if 0
- printk(KERN_INFO "cosa: get_wait_data returning after %d retries\n", 999-retries);
+ pr_info("get_wait_data returning after %d retries\n",
+ 999-retries);
#endif
return r;
}
/* sleep if not ready to read */
schedule_timeout_interruptible(1);
}
- printk(KERN_INFO "cosa: timeout in get_wait_data (status 0x%x)\n",
+ pr_info("timeout in get_wait_data (status 0x%x)\n",
cosa_getstatus(cosa));
return -1;
}
@@ -1607,7 +1597,7 @@ static int put_wait_data(struct cosa_data *cosa, int data)
if (cosa_getstatus(cosa) & SR_TX_RDY) {
cosa_putdata8(cosa, data);
#if 0
- printk(KERN_INFO "Putdata: %d retries\n", 999-retries);
+ pr_info("Putdata: %d retries\n", 999-retries);
#endif
return 0;
}
@@ -1616,7 +1606,7 @@ static int put_wait_data(struct cosa_data *cosa, int data)
schedule_timeout_interruptible(1);
#endif
}
- printk(KERN_INFO "cosa%d: timeout in put_wait_data (status 0x%x)\n",
+ pr_info("cosa%d: timeout in put_wait_data (status 0x%x)\n",
cosa->num, cosa_getstatus(cosa));
return -1;
}
@@ -1636,13 +1626,13 @@ static int puthexnumber(struct cosa_data *cosa, int number)
sprintf(temp, "%04X", number);
for (i=0; i<4; i++) {
if (put_wait_data(cosa, temp[i]) == -1) {
- printk(KERN_NOTICE "cosa%d: puthexnumber failed to write byte %d\n",
- cosa->num, i);
+ pr_notice("cosa%d: puthexnumber failed to write byte %d\n",
+ cosa->num, i);
return -1-2*i;
}
if (get_wait_data(cosa) != temp[i]) {
- printk(KERN_NOTICE "cosa%d: puthexhumber failed to read echo of byte %d\n",
- cosa->num, i);
+ pr_notice("cosa%d: puthexhumber failed to read echo of byte %d\n",
+ cosa->num, i);
return -2-2*i;
}
}
@@ -1687,8 +1677,7 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
{
unsigned long flags, flags1;
#ifdef DEBUG_IRQS
- printk(KERN_INFO "cosa%d: SR_DOWN_REQUEST status=0x%04x\n",
- cosa->num, status);
+ pr_info("cosa%d: SR_DOWN_REQUEST status=0x%04x\n", cosa->num, status);
#endif
spin_lock_irqsave(&cosa->lock, flags);
set_bit(TXBIT, &cosa->rxtx);
@@ -1696,8 +1685,7 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
/* flow control, see the comment above */
int i=0;
if (!cosa->txbitmap) {
- printk(KERN_WARNING "%s: No channel wants data "
- "in TX IRQ. Expect DMA timeout.",
+ pr_warn("%s: No channel wants data in TX IRQ. Expect DMA timeout.\n",
cosa->name);
put_driver_status_nolock(cosa);
clear_bit(TXBIT, &cosa->rxtx);
@@ -1780,14 +1768,14 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
if (cosa->busmaster) {
unsigned long addr = virt_to_bus(cosa->txbuf);
int count=0;
- printk(KERN_INFO "busmaster IRQ\n");
+ pr_info("busmaster IRQ\n");
while (!(cosa_getstatus(cosa)&SR_TX_RDY)) {
count++;
udelay(10);
if (count > 1000) break;
}
- printk(KERN_INFO "status %x\n", cosa_getstatus(cosa));
- printk(KERN_INFO "ready after %d loops\n", count);
+ pr_info("status %x\n", cosa_getstatus(cosa));
+ pr_info("ready after %d loops\n", count);
cosa_putdata16(cosa, (addr >> 16)&0xffff);
count = 0;
@@ -1796,7 +1784,7 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
if (count > 1000) break;
udelay(10);
}
- printk(KERN_INFO "ready after %d loops\n", count);
+ pr_info("ready after %d loops\n", count);
cosa_putdata16(cosa, addr &0xffff);
flags1 = claim_dma_lock();
set_dma_mode(cosa->dma, DMA_MODE_CASCADE);
@@ -1824,7 +1812,7 @@ static inline void rx_interrupt(struct cosa_data *cosa, int status)
{
unsigned long flags;
#ifdef DEBUG_IRQS
- printk(KERN_INFO "cosa%d: SR_UP_REQUEST\n", cosa->num);
+ pr_info("cosa%d: SR_UP_REQUEST\n", cosa->num);
#endif
spin_lock_irqsave(&cosa->lock, flags);
@@ -1847,7 +1835,7 @@ static inline void rx_interrupt(struct cosa_data *cosa, int status)
debug_data_in(cosa, cosa->rxsize & 0xff);
#endif
#if 0
- printk(KERN_INFO "cosa%d: receive rxsize = (0x%04x).\n",
+ pr_info("cosa%d: receive rxsize = (0x%04x)\n",
cosa->num, cosa->rxsize);
#endif
}
@@ -1857,12 +1845,12 @@ static inline void rx_interrupt(struct cosa_data *cosa, int status)
debug_data_in(cosa, cosa->rxsize);
#endif
#if 0
- printk(KERN_INFO "cosa%d: receive rxsize = (0x%04x).\n",
+ pr_info("cosa%d: receive rxsize = (0x%04x)\n",
cosa->num, cosa->rxsize);
#endif
}
if (((cosa->rxsize & 0xe000) >> 13) >= cosa->nchannels) {
- printk(KERN_WARNING "%s: rx for unknown channel (0x%04x)\n",
+ pr_warn("%s: rx for unknown channel (0x%04x)\n",
cosa->name, cosa->rxsize);
spin_unlock_irqrestore(&cosa->lock, flags);
goto reject;
@@ -1877,7 +1865,7 @@ static inline void rx_interrupt(struct cosa_data *cosa, int status)
if (!cosa->rxbuf) {
reject: /* Reject the packet */
- printk(KERN_INFO "cosa%d: rejecting packet on channel %d\n",
+ pr_info("cosa%d: rejecting packet on channel %d\n",
cosa->num, cosa->rxchan->num);
cosa->rxbuf = cosa->bouncebuf;
}
@@ -1924,11 +1912,11 @@ static inline void eot_interrupt(struct cosa_data *cosa, int status)
#ifdef DEBUG_DATA
{
int i;
- printk(KERN_INFO "cosa%dc%d: done rx(0x%x)", cosa->num,
- cosa->rxchan->num, cosa->rxsize);
+ pr_info("cosa%dc%d: done rx(0x%x)",
+ cosa->num, cosa->rxchan->num, cosa->rxsize);
for (i=0; i<cosa->rxsize; i++)
- printk (" %02x", cosa->rxbuf[i]&0xff);
- printk("\n");
+ pr_cont(" %02x", cosa->rxbuf[i]&0xff);
+ pr_cont("\n");
}
#endif
/* Packet for unknown channel? */
@@ -1940,8 +1928,7 @@ static inline void eot_interrupt(struct cosa_data *cosa, int status)
if (cosa->rxchan->rx_done(cosa->rxchan))
clear_bit(cosa->rxchan->num, &cosa->rxbitmap);
} else {
- printk(KERN_NOTICE "cosa%d: unexpected EOT interrupt\n",
- cosa->num);
+ pr_notice("cosa%d: unexpected EOT interrupt\n", cosa->num);
}
/*
* Clear the RXBIT, TXBIT and IRQBIT (the latest should be
@@ -1963,8 +1950,7 @@ static irqreturn_t cosa_interrupt(int irq, void *cosa_)
again:
status = cosa_getstatus(cosa);
#ifdef DEBUG_IRQS
- printk(KERN_INFO "cosa%d: got IRQ, status 0x%02x\n", cosa->num,
- status & 0xff);
+ pr_info("cosa%d: got IRQ, status 0x%02x\n", cosa->num, status & 0xff);
#endif
#ifdef DEBUG_IO
debug_status_in(cosa, status);
@@ -1985,15 +1971,15 @@ again:
udelay(100);
goto again;
}
- printk(KERN_INFO "cosa%d: unknown status 0x%02x in IRQ after %d retries\n",
+ pr_info("cosa%d: unknown status 0x%02x in IRQ after %d retries\n",
cosa->num, status & 0xff, count);
}
#ifdef DEBUG_IRQS
if (count)
- printk(KERN_INFO "%s: %d-times got unknown status in IRQ\n",
+ pr_info("%s: %d-times got unknown status in IRQ\n",
cosa->name, count);
else
- printk(KERN_INFO "%s: returning from IRQ\n", cosa->name);
+ pr_info("%s: returning from IRQ\n", cosa->name);
#endif
return IRQ_HANDLED;
}
@@ -2024,41 +2010,41 @@ static void debug_status_in(struct cosa_data *cosa, int status)
s = "NO_REQ";
break;
}
- printk(KERN_INFO "%s: IO: status -> 0x%02x (%s%s%s%s)\n",
+ pr_info("%s: IO: status -> 0x%02x (%s%s%s%s)\n",
cosa->name,
status,
- status & SR_USR_RQ ? "USR_RQ|":"",
- status & SR_TX_RDY ? "TX_RDY|":"",
- status & SR_RX_RDY ? "RX_RDY|":"",
+ status & SR_USR_RQ ? "USR_RQ|" : "",
+ status & SR_TX_RDY ? "TX_RDY|" : "",
+ status & SR_RX_RDY ? "RX_RDY|" : "",
s);
}
static void debug_status_out(struct cosa_data *cosa, int status)
{
- printk(KERN_INFO "%s: IO: status <- 0x%02x (%s%s%s%s%s%s)\n",
+ pr_info("%s: IO: status <- 0x%02x (%s%s%s%s%s%s)\n",
cosa->name,
status,
- status & SR_RX_DMA_ENA ? "RXDMA|":"!rxdma|",
- status & SR_TX_DMA_ENA ? "TXDMA|":"!txdma|",
- status & SR_RST ? "RESET|":"",
- status & SR_USR_INT_ENA ? "USRINT|":"!usrint|",
- status & SR_TX_INT_ENA ? "TXINT|":"!txint|",
- status & SR_RX_INT_ENA ? "RXINT":"!rxint");
+ status & SR_RX_DMA_ENA ? "RXDMA|" : "!rxdma|",
+ status & SR_TX_DMA_ENA ? "TXDMA|" : "!txdma|",
+ status & SR_RST ? "RESET|" : "",
+ status & SR_USR_INT_ENA ? "USRINT|" : "!usrint|",
+ status & SR_TX_INT_ENA ? "TXINT|" : "!txint|",
+ status & SR_RX_INT_ENA ? "RXINT" : "!rxint");
}
static void debug_data_in(struct cosa_data *cosa, int data)
{
- printk(KERN_INFO "%s: IO: data -> 0x%04x\n", cosa->name, data);
+ pr_info("%s: IO: data -> 0x%04x\n", cosa->name, data);
}
static void debug_data_out(struct cosa_data *cosa, int data)
{
- printk(KERN_INFO "%s: IO: data <- 0x%04x\n", cosa->name, data);
+ pr_info("%s: IO: data <- 0x%04x\n", cosa->name, data);
}
static void debug_data_cmd(struct cosa_data *cosa, int data)
{
- printk(KERN_INFO "%s: IO: data <- 0x%04x (%s|%s)\n",
+ pr_info("%s: IO: data <- 0x%04x (%s|%s)\n",
cosa->name, data,
data & SR_RDY_RCV ? "RX_RDY" : "!rx_rdy",
data & SR_RDY_SND ? "TX_RDY" : "!tx_rdy");
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index 164c3624ba8..2a3ecae67a9 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -48,6 +48,8 @@
* Aug 8, 1998 acme Initial version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h> /* __init */
#include <linux/module.h>
#include <linux/kernel.h> /* printk(), and other useful stuff */
@@ -81,10 +83,9 @@ static u16 checksum(u8 *buf, u32 len);
/* Global Data */
/* private data */
-static const char modname[] = "cycx_drv";
static const char fullname[] = "Cyclom 2X Support Module";
-static const char copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
- "<acme@conectiva.com.br>";
+static const char copyright[] =
+ "(c) 1998-2003 Arnaldo Carvalho de Melo <acme@conectiva.com.br>";
/* Hardware configuration options.
* These are arrays of configuration options used by verification routines.
@@ -110,8 +111,8 @@ static const long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
static int __init cycx_drv_init(void)
{
- printk(KERN_INFO "%s v%u.%u %s\n", fullname, MOD_VERSION, MOD_RELEASE,
- copyright);
+ pr_info("%s v%u.%u %s\n",
+ fullname, MOD_VERSION, MOD_RELEASE, copyright);
return 0;
}
@@ -139,18 +140,16 @@ int cycx_setup(struct cycx_hw *hw, void *cfm, u32 len, unsigned long dpmbase)
/* Verify IRQ configuration options */
if (!get_option_index(cycx_2x_irq_options, hw->irq)) {
- printk(KERN_ERR "%s: IRQ %d is invalid!\n", modname, hw->irq);
+ pr_err("IRQ %d is invalid!\n", hw->irq);
return -EINVAL;
}
/* Setup adapter dual-port memory window and test memory */
if (!dpmbase) {
- printk(KERN_ERR "%s: you must specify the dpm address!\n",
- modname);
+ pr_err("you must specify the dpm address!\n");
return -EINVAL;
} else if (!get_option_index(cyc2x_dpmbase_options, dpmbase)) {
- printk(KERN_ERR "%s: memory address 0x%lX is invalid!\n",
- modname, dpmbase);
+ pr_err("memory address 0x%lX is invalid!\n", dpmbase);
return -EINVAL;
}
@@ -158,13 +157,12 @@ int cycx_setup(struct cycx_hw *hw, void *cfm, u32 len, unsigned long dpmbase)
hw->dpmsize = CYCX_WINDOWSIZE;
if (!detect_cyc2x(hw->dpmbase)) {
- printk(KERN_ERR "%s: adapter Cyclom 2X not found at "
- "address 0x%lX!\n", modname, dpmbase);
+ pr_err("adapter Cyclom 2X not found at address 0x%lX!\n",
+ dpmbase);
return -EINVAL;
}
- printk(KERN_INFO "%s: found Cyclom 2X card at address 0x%lX.\n",
- modname, dpmbase);
+ pr_info("found Cyclom 2X card at address 0x%lX\n", dpmbase);
/* Load firmware. If loader fails then shut down adapter */
err = load_cyc2x(hw, cfm, len);
@@ -339,7 +337,7 @@ static int cycx_data_boot(void __iomem *addr, u8 *code, u32 len)
for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
if (buffer_load(addr, code + i,
min_t(u32, CFM_LOAD_BUFSZ, (len - i))) < 0) {
- printk(KERN_ERR "%s: Error !!\n", modname);
+ pr_err("Error !!\n");
return -1;
}
@@ -370,7 +368,7 @@ static int cycx_code_boot(void __iomem *addr, u8 *code, u32 len)
for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
if (buffer_load(addr, code + i,
min_t(u32, CFM_LOAD_BUFSZ, (len - i)))) {
- printk(KERN_ERR "%s: Error !!\n", modname);
+ pr_err("Error !!\n");
return -1;
}
@@ -391,23 +389,20 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
u16 cksum;
/* Announce */
- printk(KERN_INFO "%s: firmware signature=\"%s\"\n", modname,
- cfm->signature);
+ pr_info("firmware signature=\"%s\"\n", cfm->signature);
/* Verify firmware signature */
if (strcmp(cfm->signature, CFM_SIGNATURE)) {
- printk(KERN_ERR "%s:load_cyc2x: not Cyclom-2X firmware!\n",
- modname);
+ pr_err("load_cyc2x: not Cyclom-2X firmware!\n");
return -EINVAL;
}
- printk(KERN_INFO "%s: firmware version=%u\n", modname, cfm->version);
+ pr_info("firmware version=%u\n", cfm->version);
/* Verify firmware module format version */
if (cfm->version != CFM_VERSION) {
- printk(KERN_ERR "%s:%s: firmware format %u rejected! "
- "Expecting %u.\n",
- modname, __func__, cfm->version, CFM_VERSION);
+ pr_err("%s: firmware format %u rejected! Expecting %u.\n",
+ __func__, cfm->version, CFM_VERSION);
return -EINVAL;
}
@@ -419,23 +414,22 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
if (((len - sizeof(struct cycx_firmware) - 1) != cfm->info.codesize) ||
*/
if (cksum != cfm->checksum) {
- printk(KERN_ERR "%s:%s: firmware corrupted!\n",
- modname, __func__);
- printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n",
- len - (int)sizeof(struct cycx_firmware) - 1,
- cfm->info.codesize);
- printk(KERN_ERR " chksum = 0x%x (expected 0x%x)\n",
- cksum, cfm->checksum);
+ pr_err("%s: firmware corrupted!\n", __func__);
+ pr_err(" cdsize = 0x%x (expected 0x%lx)\n",
+ len - (int)sizeof(struct cycx_firmware) - 1,
+ cfm->info.codesize);
+ pr_err(" chksum = 0x%x (expected 0x%x)\n",
+ cksum, cfm->checksum);
return -EINVAL;
}
/* If everything is ok, set reset, data and code pointers */
img_hdr = (struct cycx_fw_header *)&cfm->image;
#ifdef FIRMWARE_DEBUG
- printk(KERN_INFO "%s:%s: image sizes\n", __func__, modname);
- printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size);
- printk(KERN_INFO " data=%lu\n", img_hdr->data_size);
- printk(KERN_INFO " code=%lu\n", img_hdr->code_size);
+ pr_info("%s: image sizes\n", __func__);
+ pr_info(" reset=%lu\n", img_hdr->reset_size);
+ pr_info(" data=%lu\n", img_hdr->data_size);
+ pr_info(" code=%lu\n", img_hdr->code_size);
#endif
reset_image = ((u8 *)img_hdr) + sizeof(struct cycx_fw_header);
data_image = reset_image + img_hdr->reset_size;
@@ -443,15 +437,14 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
/*---- Start load ----*/
/* Announce */
- printk(KERN_INFO "%s: loading firmware %s (ID=%u)...\n", modname,
- cfm->descr[0] ? cfm->descr : "unknown firmware",
- cfm->info.codeid);
+ pr_info("loading firmware %s (ID=%u)...\n",
+ cfm->descr[0] ? cfm->descr : "unknown firmware",
+ cfm->info.codeid);
for (i = 0 ; i < 5 ; i++) {
/* Reset Cyclom hardware */
if (!reset_cyc2x(hw->dpmbase)) {
- printk(KERN_ERR "%s: dpm problem or board not found\n",
- modname);
+ pr_err("dpm problem or board not found\n");
return -EINVAL;
}
@@ -468,19 +461,19 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
msleep_interruptible(1 * 1000);
}
- printk(KERN_ERR "%s: reset not started.\n", modname);
+ pr_err("reset not started\n");
return -EINVAL;
reset_loaded:
/* Load data.bin */
if (cycx_data_boot(hw->dpmbase, data_image, img_hdr->data_size)) {
- printk(KERN_ERR "%s: cannot load data file.\n", modname);
+ pr_err("cannot load data file\n");
return -EINVAL;
}
/* Load code.bin */
if (cycx_code_boot(hw->dpmbase, code_image, img_hdr->code_size)) {
- printk(KERN_ERR "%s: cannot load code file.\n", modname);
+ pr_err("cannot load code file\n");
return -EINVAL;
}
@@ -493,7 +486,7 @@ reset_loaded:
/* Arthur Ganzert's tip: wait a while after the firmware loading...
seg abr 26 17:17:12 EST 1999 - acme */
msleep_interruptible(7 * 1000);
- printk(KERN_INFO "%s: firmware loaded!\n", modname);
+ pr_info("firmware loaded!\n");
/* enable interrupts */
cycx_inten(hw);
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index 859dba9b972..81fbbad406b 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -40,6 +40,8 @@
* 1998/08/08 acme Initial version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/stddef.h> /* offsetof(), etc. */
#include <linux/errno.h> /* return codes */
#include <linux/string.h> /* inline memset(), etc. */
@@ -50,6 +52,7 @@
#include <linux/wanrouter.h> /* WAN router definitions */
#include <linux/cyclomx.h> /* cyclomx common user API definitions */
#include <linux/init.h> /* __init (when not using as a module) */
+#include <linux/interrupt.h>
unsigned int cycx_debug;
@@ -106,7 +109,7 @@ static int __init cycx_init(void)
{
int cnt, err = -ENOMEM;
- printk(KERN_INFO "%s v%u.%u %s\n",
+ pr_info("%s v%u.%u %s\n",
cycx_fullname, CYCX_DRV_VERSION, CYCX_DRV_RELEASE,
cycx_copyright);
@@ -132,9 +135,8 @@ static int __init cycx_init(void)
err = register_wan_device(wandev);
if (err) {
- printk(KERN_ERR "%s: %s registration failed with "
- "error %d!\n",
- cycx_drvname, card->devname, err);
+ pr_err("%s registration failed with error %d!\n",
+ card->devname, err);
break;
}
}
@@ -197,14 +199,13 @@ static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf)
rc = -EINVAL;
if (!conf->data_size || !conf->data) {
- printk(KERN_ERR "%s: firmware not found in configuration "
- "data!\n", wandev->name);
+ pr_err("%s: firmware not found in configuration data!\n",
+ wandev->name);
goto out;
}
if (conf->irq <= 0) {
- printk(KERN_ERR "%s: can't configure without IRQ!\n",
- wandev->name);
+ pr_err("%s: can't configure without IRQ!\n", wandev->name);
goto out;
}
@@ -212,8 +213,7 @@ static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf)
irq = conf->irq == 2 ? 9 : conf->irq; /* IRQ2 -> IRQ9 */
if (request_irq(irq, cycx_isr, 0, wandev->name, card)) {
- printk(KERN_ERR "%s: can't reserve IRQ %d!\n",
- wandev->name, irq);
+ pr_err("%s: can't reserve IRQ %d!\n", wandev->name, irq);
goto out;
}
@@ -245,8 +245,7 @@ static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf)
break;
#endif
default:
- printk(KERN_ERR "%s: this firmware is not supported!\n",
- wandev->name);
+ pr_err("%s: this firmware is not supported!\n", wandev->name);
rc = -EINVAL;
}
@@ -287,8 +286,7 @@ static int cycx_wan_shutdown(struct wan_device *wandev)
card = wandev->private;
wandev->state = WAN_UNCONFIGURED;
cycx_down(&card->hw);
- printk(KERN_INFO "%s: irq %d being freed!\n", wandev->name,
- wandev->irq);
+ pr_info("%s: irq %d being freed!\n", wandev->name, wandev->irq);
free_irq(wandev->irq, card);
out: return ret;
}
@@ -307,8 +305,8 @@ static irqreturn_t cycx_isr(int irq, void *dev_id)
goto out;
if (card->in_isr) {
- printk(KERN_WARNING "%s: interrupt re-entrancy on IRQ %d!\n",
- card->devname, card->wandev.irq);
+ pr_warn("%s: interrupt re-entrancy on IRQ %d!\n",
+ card->devname, card->wandev.irq);
goto out;
}
@@ -336,7 +334,7 @@ void cycx_set_state(struct cycx_device *card, int state)
string_state = "disconnected!";
break;
}
- printk(KERN_INFO "%s: link %s\n", card->devname, string_state);
+ pr_info("%s: link %s\n", card->devname, string_state);
card->wandev.state = state;
}
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index cf9e15fd8d9..06f3f6309e4 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -76,6 +76,8 @@
* 1998/08/08 acme Initial version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define CYCLOMX_X25_DEBUG 1
#include <linux/ctype.h> /* isdigit() */
@@ -230,8 +232,8 @@ int cycx_x25_wan_init(struct cycx_device *card, wandev_conf_t *conf)
/* Verify configuration ID */
if (conf->config_id != WANCONFIG_X25) {
- printk(KERN_INFO "%s: invalid configuration ID %u!\n",
- card->devname, conf->config_id);
+ pr_info("%s: invalid configuration ID %u!\n",
+ card->devname, conf->config_id);
return -EINVAL;
}
@@ -374,8 +376,7 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
int err = 0;
if (!conf->name[0] || strlen(conf->name) > WAN_IFNAME_SZ) {
- printk(KERN_INFO "%s: invalid interface name!\n",
- card->devname);
+ pr_info("%s: invalid interface name!\n", card->devname);
return -EINVAL;
}
@@ -398,8 +399,8 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
if (len) {
if (len > WAN_ADDRESS_SZ) {
- printk(KERN_ERR "%s: %s local addr too long!\n",
- wandev->name, chan->name);
+ pr_err("%s: %s local addr too long!\n",
+ wandev->name, chan->name);
err = -EINVAL;
goto error;
} else {
@@ -429,15 +430,14 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc)
chan->lcn = lcn;
else {
- printk(KERN_ERR
- "%s: PVC %u is out of range on interface %s!\n",
- wandev->name, lcn, chan->name);
+ pr_err("%s: PVC %u is out of range on interface %s!\n",
+ wandev->name, lcn, chan->name);
err = -EINVAL;
goto error;
}
} else {
- printk(KERN_ERR "%s: invalid media address on interface %s!\n",
- wandev->name, chan->name);
+ pr_err("%s: invalid media address on interface %s!\n",
+ wandev->name, chan->name);
err = -EINVAL;
goto error;
}
@@ -607,9 +607,8 @@ static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
++chan->ifstats.tx_dropped;
else if (chan->svc && chan->protocol &&
chan->protocol != ntohs(skb->protocol)) {
- printk(KERN_INFO
- "%s: unsupported Ethertype 0x%04X on interface %s!\n",
- card->devname, ntohs(skb->protocol), dev->name);
+ pr_info("%s: unsupported Ethertype 0x%04X on interface %s!\n",
+ card->devname, ntohs(skb->protocol), dev->name);
++chan->ifstats.tx_errors;
} else if (chan->protocol == ETH_P_IP) {
switch (chan->state) {
@@ -643,9 +642,8 @@ static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
cycx_x25_chan_disconnect(dev);
goto free_packet;
default:
- printk(KERN_INFO
- "%s: unknown %d x25-iface request on %s!\n",
- card->devname, skb->data[0], dev->name);
+ pr_info("%s: unknown %d x25-iface request on %s!\n",
+ card->devname, skb->data[0], dev->name);
++chan->ifstats.tx_errors;
goto free_packet;
}
@@ -746,8 +744,7 @@ static void cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
card->buff_int_mode_unbusy = 1;
netif_wake_queue(dev);
} else
- printk(KERN_ERR "%s:ackvc for inexistent lcn %d\n",
- card->devname, lcn);
+ pr_err("%s:ackvc for inexistent lcn %d\n", card->devname, lcn);
}
/* Receive interrupt handler.
@@ -780,8 +777,8 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
if (!dev) {
/* Invalid channel, discard packet */
- printk(KERN_INFO "%s: receiving on orphaned LCN %d!\n",
- card->devname, lcn);
+ pr_info("%s: receiving on orphaned LCN %d!\n",
+ card->devname, lcn);
return;
}
@@ -802,8 +799,8 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
if ((skb = dev_alloc_skb((chan->protocol == ETH_P_X25 ? 1 : 0) +
bufsize +
dev->hard_header_len)) == NULL) {
- printk(KERN_INFO "%s: no socket buffers available!\n",
- card->devname);
+ pr_info("%s: no socket buffers available!\n",
+ card->devname);
chan->drop_sequence = 1;
++chan->ifstats.rx_dropped;
return;
@@ -826,8 +823,8 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
if (bitm)
chan->drop_sequence = 1;
- printk(KERN_INFO "%s: unexpectedly long packet sequence "
- "on interface %s!\n", card->devname, dev->name);
+ pr_info("%s: unexpectedly long packet sequence on interface %s!\n",
+ card->devname, dev->name);
++chan->ifstats.rx_length_errors;
return;
}
@@ -880,8 +877,8 @@ static void cycx_x25_irq_connect(struct cycx_device *card,
dev = cycx_x25_get_dev_by_dte_addr(wandev, rem);
if (!dev) {
/* Invalid channel, discard packet */
- printk(KERN_INFO "%s: connect not expected: remote %s!\n",
- card->devname, rem);
+ pr_info("%s: connect not expected: remote %s!\n",
+ card->devname, rem);
return;
}
@@ -909,8 +906,8 @@ static void cycx_x25_irq_connect_confirm(struct cycx_device *card,
if (!dev) {
/* Invalid channel, discard packet */
clear_bit(--key, (void*)&card->u.x.connection_keys);
- printk(KERN_INFO "%s: connect confirm not expected: lcn %d, "
- "key=%d!\n", card->devname, lcn, key);
+ pr_info("%s: connect confirm not expected: lcn %d, key=%d!\n",
+ card->devname, lcn, key);
return;
}
@@ -934,8 +931,8 @@ static void cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
if (!dev) {
/* Invalid channel, discard packet */
- printk(KERN_INFO "%s:disconnect confirm not expected!:lcn %d\n",
- card->devname, lcn);
+ pr_info("%s:disconnect confirm not expected!:lcn %d\n",
+ card->devname, lcn);
return;
}
@@ -980,13 +977,13 @@ static void cycx_x25_irq_log(struct cycx_device *card, struct cycx_x25_cmd *cmd)
cycx_peek(&card->hw, cmd->buf + 10 + toread, &code, 1);
cycx_peek(&card->hw, cmd->buf + 10 + toread + 1, &routine, 1);
- printk(KERN_INFO "cycx_x25_irq_handler: X25_LOG (0x4500) indic.:\n");
- printk(KERN_INFO "cmd->buf=0x%X\n", cmd->buf);
- printk(KERN_INFO "Log message code=0x%X\n", msg_code);
- printk(KERN_INFO "Link=%d\n", link);
- printk(KERN_INFO "log code=0x%X\n", code);
- printk(KERN_INFO "log routine=0x%X\n", routine);
- printk(KERN_INFO "Message size=%d\n", size);
+ pr_info("cycx_x25_irq_handler: X25_LOG (0x4500) indic.:\n");
+ pr_info("cmd->buf=0x%X\n", cmd->buf);
+ pr_info("Log message code=0x%X\n", msg_code);
+ pr_info("Link=%d\n", link);
+ pr_info("log code=0x%X\n", code);
+ pr_info("log routine=0x%X\n", routine);
+ pr_info("Message size=%d\n", size);
hex_dump("Message", bf, toread);
#endif
}
@@ -1009,24 +1006,14 @@ static void cycx_x25_irq_stat(struct cycx_device *card,
static void cycx_x25_irq_spurious(struct cycx_device *card,
struct cycx_x25_cmd *cmd)
{
- printk(KERN_INFO "%s: spurious interrupt (0x%X)!\n",
- card->devname, cmd->command);
+ pr_info("%s: spurious interrupt (0x%X)!\n",
+ card->devname, cmd->command);
}
#ifdef CYCLOMX_X25_DEBUG
static void hex_dump(char *msg, unsigned char *p, int len)
{
- unsigned char hex[1024],
- * phex = hex;
-
- if (len >= (sizeof(hex) / 2))
- len = (sizeof(hex) / 2) - 1;
-
- while (len--) {
- sprintf(phex, "%02x", *p++);
- phex += 2;
- }
-
- printk(KERN_INFO "%s: %s\n", msg, hex);
+ print_hex_dump(KERN_INFO, msg, DUMP_PREFIX_OFFSET, 16, 1,
+ p, len, true);
}
#endif
@@ -1203,8 +1190,8 @@ static int x25_place_call(struct cycx_device *card,
u8 key;
if (card->u.x.connection_keys == ~0U) {
- printk(KERN_INFO "%s: too many simultaneous connection "
- "requests!\n", card->devname);
+ pr_info("%s: too many simultaneous connection requests!\n",
+ card->devname);
return -EAGAIN;
}
@@ -1381,8 +1368,8 @@ static void cycx_x25_chan_timer(unsigned long d)
if (chan->state == WAN_CONNECTED)
cycx_x25_chan_disconnect(dev);
else
- printk(KERN_ERR "%s: %s for svc (%s) not connected!\n",
- chan->card->devname, __func__, dev->name);
+ pr_err("%s: %s for svc (%s) not connected!\n",
+ chan->card->devname, __func__, dev->name);
}
/* Set logical channel state. */
@@ -1433,8 +1420,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
break;
}
- printk(KERN_INFO "%s: interface %s %s\n", card->devname,
- dev->name, string_state);
+ pr_info("%s: interface %s %s\n",
+ card->devname, dev->name, string_state);
chan->state = state;
}
@@ -1488,7 +1475,7 @@ static void cycx_x25_chan_send_event(struct net_device *dev, u8 event)
unsigned char *ptr;
if ((skb = dev_alloc_skb(1)) == NULL) {
- printk(KERN_ERR "%s: out of memory\n", __func__);
+ pr_err("%s: out of memory\n", __func__);
return;
}
@@ -1557,56 +1544,56 @@ static void reset_timer(struct net_device *dev)
#ifdef CYCLOMX_X25_DEBUG
static void cycx_x25_dump_config(struct cycx_x25_config *conf)
{
- printk(KERN_INFO "X.25 configuration\n");
- printk(KERN_INFO "-----------------\n");
- printk(KERN_INFO "link number=%d\n", conf->link);
- printk(KERN_INFO "line speed=%d\n", conf->speed);
- printk(KERN_INFO "clock=%sternal\n", conf->clock == 8 ? "Ex" : "In");
- printk(KERN_INFO "# level 2 retransm.=%d\n", conf->n2);
- printk(KERN_INFO "level 2 window=%d\n", conf->n2win);
- printk(KERN_INFO "level 3 window=%d\n", conf->n3win);
- printk(KERN_INFO "# logical channels=%d\n", conf->nvc);
- printk(KERN_INFO "level 3 pkt len=%d\n", conf->pktlen);
- printk(KERN_INFO "my address=%d\n", conf->locaddr);
- printk(KERN_INFO "remote address=%d\n", conf->remaddr);
- printk(KERN_INFO "t1=%d seconds\n", conf->t1);
- printk(KERN_INFO "t2=%d seconds\n", conf->t2);
- printk(KERN_INFO "t21=%d seconds\n", conf->t21);
- printk(KERN_INFO "# PVCs=%d\n", conf->npvc);
- printk(KERN_INFO "t23=%d seconds\n", conf->t23);
- printk(KERN_INFO "flags=0x%x\n", conf->flags);
+ pr_info("X.25 configuration\n");
+ pr_info("-----------------\n");
+ pr_info("link number=%d\n", conf->link);
+ pr_info("line speed=%d\n", conf->speed);
+ pr_info("clock=%sternal\n", conf->clock == 8 ? "Ex" : "In");
+ pr_info("# level 2 retransm.=%d\n", conf->n2);
+ pr_info("level 2 window=%d\n", conf->n2win);
+ pr_info("level 3 window=%d\n", conf->n3win);
+ pr_info("# logical channels=%d\n", conf->nvc);
+ pr_info("level 3 pkt len=%d\n", conf->pktlen);
+ pr_info("my address=%d\n", conf->locaddr);
+ pr_info("remote address=%d\n", conf->remaddr);
+ pr_info("t1=%d seconds\n", conf->t1);
+ pr_info("t2=%d seconds\n", conf->t2);
+ pr_info("t21=%d seconds\n", conf->t21);
+ pr_info("# PVCs=%d\n", conf->npvc);
+ pr_info("t23=%d seconds\n", conf->t23);
+ pr_info("flags=0x%x\n", conf->flags);
}
static void cycx_x25_dump_stats(struct cycx_x25_stats *stats)
{
- printk(KERN_INFO "X.25 statistics\n");
- printk(KERN_INFO "--------------\n");
- printk(KERN_INFO "rx_crc_errors=%d\n", stats->rx_crc_errors);
- printk(KERN_INFO "rx_over_errors=%d\n", stats->rx_over_errors);
- printk(KERN_INFO "n2_tx_frames=%d\n", stats->n2_tx_frames);
- printk(KERN_INFO "n2_rx_frames=%d\n", stats->n2_rx_frames);
- printk(KERN_INFO "tx_timeouts=%d\n", stats->tx_timeouts);
- printk(KERN_INFO "rx_timeouts=%d\n", stats->rx_timeouts);
- printk(KERN_INFO "n3_tx_packets=%d\n", stats->n3_tx_packets);
- printk(KERN_INFO "n3_rx_packets=%d\n", stats->n3_rx_packets);
- printk(KERN_INFO "tx_aborts=%d\n", stats->tx_aborts);
- printk(KERN_INFO "rx_aborts=%d\n", stats->rx_aborts);
+ pr_info("X.25 statistics\n");
+ pr_info("--------------\n");
+ pr_info("rx_crc_errors=%d\n", stats->rx_crc_errors);
+ pr_info("rx_over_errors=%d\n", stats->rx_over_errors);
+ pr_info("n2_tx_frames=%d\n", stats->n2_tx_frames);
+ pr_info("n2_rx_frames=%d\n", stats->n2_rx_frames);
+ pr_info("tx_timeouts=%d\n", stats->tx_timeouts);
+ pr_info("rx_timeouts=%d\n", stats->rx_timeouts);
+ pr_info("n3_tx_packets=%d\n", stats->n3_tx_packets);
+ pr_info("n3_rx_packets=%d\n", stats->n3_rx_packets);
+ pr_info("tx_aborts=%d\n", stats->tx_aborts);
+ pr_info("rx_aborts=%d\n", stats->rx_aborts);
}
static void cycx_x25_dump_devs(struct wan_device *wandev)
{
struct net_device *dev = wandev->dev;
- printk(KERN_INFO "X.25 dev states\n");
- printk(KERN_INFO "name: addr: txoff: protocol:\n");
- printk(KERN_INFO "---------------------------------------\n");
+ pr_info("X.25 dev states\n");
+ pr_info("name: addr: txoff: protocol:\n");
+ pr_info("---------------------------------------\n");
while(dev) {
struct cycx_x25_channel *chan = netdev_priv(dev);
- printk(KERN_INFO "%-5.5s %-15.15s %d ETH_P_%s\n",
- chan->name, chan->addr, netif_queue_stopped(dev),
- chan->protocol == ETH_P_IP ? "IP" : "X25");
+ pr_info("%-5.5s %-15.15s %d ETH_P_%s\n",
+ chan->name, chan->addr, netif_queue_stopped(dev),
+ chan->protocol == ETH_P_IP ? "IP" : "X25");
dev = chan->slave;
}
}
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 21b104db5a9..48ab38a34c5 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -28,6 +28,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -112,8 +114,7 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
dlp = netdev_priv(dev);
if (!pskb_may_pull(skb, sizeof(*hdr))) {
- printk(KERN_NOTICE "%s: invalid data no header\n",
- dev->name);
+ netdev_notice(dev, "invalid data no header\n");
dev->stats.rx_errors++;
kfree_skb(skb);
return;
@@ -126,7 +127,8 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
if (hdr->control != FRAD_I_UI)
{
- printk(KERN_NOTICE "%s: Invalid header flag 0x%02X.\n", dev->name, hdr->control);
+ netdev_notice(dev, "Invalid header flag 0x%02X\n",
+ hdr->control);
dev->stats.rx_errors++;
}
else
@@ -135,14 +137,18 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
case FRAD_P_PADDING:
if (hdr->NLPID != FRAD_P_SNAP)
{
- printk(KERN_NOTICE "%s: Unsupported NLPID 0x%02X.\n", dev->name, hdr->NLPID);
+ netdev_notice(dev, "Unsupported NLPID 0x%02X\n",
+ hdr->NLPID);
dev->stats.rx_errors++;
break;
}
if (hdr->OUI[0] + hdr->OUI[1] + hdr->OUI[2] != 0)
{
- printk(KERN_NOTICE "%s: Unsupported organizationally unique identifier 0x%02X-%02X-%02X.\n", dev->name, hdr->OUI[0], hdr->OUI[1], hdr->OUI[2]);
+ netdev_notice(dev, "Unsupported organizationally unique identifier 0x%02X-%02X-%02X\n",
+ hdr->OUI[0],
+ hdr->OUI[1],
+ hdr->OUI[2]);
dev->stats.rx_errors++;
break;
}
@@ -163,12 +169,14 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
case FRAD_P_SNAP:
case FRAD_P_Q933:
case FRAD_P_CLNP:
- printk(KERN_NOTICE "%s: Unsupported NLPID 0x%02X.\n", dev->name, hdr->pad);
+ netdev_notice(dev, "Unsupported NLPID 0x%02X\n",
+ hdr->pad);
dev->stats.rx_errors++;
break;
default:
- printk(KERN_NOTICE "%s: Invalid pad byte 0x%02X.\n", dev->name, hdr->pad);
+ netdev_notice(dev, "Invalid pad byte 0x%02X\n",
+ hdr->pad);
dev->stats.rx_errors++;
break;
}
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index acb9ea83062..058e1697c17 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -80,6 +80,8 @@
* - misc crapectomy.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -99,6 +101,7 @@
#include <asm/irq.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/if_arp.h>
@@ -552,7 +555,7 @@ static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
schedule_timeout_uninterruptible(10);
rmb();
} while (++i > 0);
- printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
+ netdev_err(dev, "%s timeout\n", msg);
done:
return (i >= 0) ? i : -EAGAIN;
}
@@ -568,18 +571,18 @@ static int dscc4_do_action(struct net_device *dev, char *msg)
u32 state = readl(ioaddr);
if (state & ArAck) {
- printk(KERN_DEBUG "%s: %s ack\n", dev->name, msg);
+ netdev_dbg(dev, "%s ack\n", msg);
writel(ArAck, ioaddr);
goto done;
} else if (state & Arf) {
- printk(KERN_ERR "%s: %s failed\n", dev->name, msg);
+ netdev_err(dev, "%s failed\n", msg);
writel(Arf, ioaddr);
i = -1;
goto done;
}
rmb();
} while (++i > 0);
- printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
+ netdev_err(dev, "%s timeout\n", msg);
done:
return i;
}
@@ -635,7 +638,7 @@ static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
if (dscc4_do_action(dev, "Rdt") < 0)
- printk(KERN_ERR "%s: Tx reset failed\n", dev->name);
+ netdev_err(dev, "Tx reset failed\n");
}
#endif
@@ -721,22 +724,20 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev,
rc = pci_request_region(pdev, 0, "registers");
if (rc < 0) {
- printk(KERN_ERR "%s: can't reserve MMIO region (regs)\n",
- DRV_NAME);
+ pr_err("can't reserve MMIO region (regs)\n");
goto err_disable_0;
}
rc = pci_request_region(pdev, 1, "LBI interface");
if (rc < 0) {
- printk(KERN_ERR "%s: can't reserve MMIO region (lbi)\n",
- DRV_NAME);
+ pr_err("can't reserve MMIO region (lbi)\n");
goto err_free_mmio_region_1;
}
ioaddr = pci_ioremap_bar(pdev, 0);
if (!ioaddr) {
- printk(KERN_ERR "%s: cannot remap MMIO region %llx @ %llx\n",
- DRV_NAME, (unsigned long long)pci_resource_len(pdev, 0),
- (unsigned long long)pci_resource_start(pdev, 0));
+ pr_err("cannot remap MMIO region %llx @ %llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0),
+ (unsigned long long)pci_resource_start(pdev, 0));
rc = -EIO;
goto err_free_mmio_regions_2;
}
@@ -756,7 +757,7 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev,
rc = request_irq(pdev->irq, dscc4_irq, IRQF_SHARED, DRV_NAME, priv->root);
if (rc < 0) {
- printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq);
+ pr_warn("IRQ %d busy\n", pdev->irq);
goto err_release_4;
}
@@ -903,7 +904,7 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
root = kcalloc(dev_per_card, sizeof(*root), GFP_KERNEL);
if (!root) {
- printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME);
+ pr_err("can't allocate data\n");
goto err_out;
}
@@ -915,7 +916,7 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
ppriv = kzalloc(sizeof(*ppriv), GFP_KERNEL);
if (!ppriv) {
- printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME);
+ pr_err("can't allocate private data\n");
goto err_free_dev;
}
@@ -951,7 +952,7 @@ static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
ret = register_hdlc_device(d);
if (ret < 0) {
- printk(KERN_ERR "%s: unable to register\n", DRV_NAME);
+ pr_err("unable to register\n");
dscc4_release_ring(dpriv);
goto err_unregister;
}
@@ -1004,7 +1005,7 @@ static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv)
if (settings->loopback && (settings->clock_type != CLOCK_INT)) {
struct net_device *dev = dscc4_to_dev(dpriv);
- printk(KERN_INFO "%s: loopback requires clock\n", dev->name);
+ netdev_info(dev, "loopback requires clock\n");
return -1;
}
return 0;
@@ -1077,7 +1078,7 @@ static int dscc4_open(struct net_device *dev)
scc_patchl(0, PowerUp, dpriv, dev, CCR0);
scc_patchl(0, 0x00050000, dpriv, dev, CCR2);
scc_writel(EventsMask, dpriv, dev, IMR);
- printk(KERN_INFO "%s: up again.\n", dev->name);
+ netdev_info(dev, "up again\n");
goto done;
}
@@ -1094,11 +1095,11 @@ static int dscc4_open(struct net_device *dev)
* situations.
*/
if (scc_readl_star(dpriv, dev) & SccBusy) {
- printk(KERN_ERR "%s busy. Try later\n", dev->name);
+ netdev_err(dev, "busy - try later\n");
ret = -EAGAIN;
goto err_out;
} else
- printk(KERN_INFO "%s: available. Good\n", dev->name);
+ netdev_info(dev, "available - good\n");
scc_writel(EventsMask, dpriv, dev, IMR);
@@ -1116,7 +1117,7 @@ static int dscc4_open(struct net_device *dev)
* reset is needed. Suggestions anyone ?
*/
if ((ret = dscc4_xpr_ack(dpriv)) < 0) {
- printk(KERN_ERR "%s: %s timeout\n", DRV_NAME, "XPR");
+ pr_err("XPR timeout\n");
goto err_disable_scc_events;
}
@@ -1341,8 +1342,7 @@ static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EPERM;
if (dpriv->flags & FakeReset) {
- printk(KERN_INFO "%s: please reset the device"
- " before this command\n", dev->name);
+ netdev_info(dev, "please reset the device before this command\n");
return -EPERM;
}
if (copy_from_user(&dpriv->settings, line, size))
@@ -1505,8 +1505,7 @@ static irqreturn_t dscc4_irq(int irq, void *token)
writel(state, ioaddr + GSTAR);
if (state & Arf) {
- printk(KERN_ERR "%s: failure (Arf). Harass the maintener\n",
- dev->name);
+ netdev_err(dev, "failure (Arf). Harass the maintainer\n");
goto out;
}
state &= ~ArAck;
@@ -1514,7 +1513,7 @@ static irqreturn_t dscc4_irq(int irq, void *token)
if (debug > 0)
printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME);
if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & cpu_to_le32(Arf))
- printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG");
+ netdev_err(dev, "CFG failed\n");
if (!(state &= ~Cfg))
goto out;
}
@@ -1595,8 +1594,8 @@ try:
++dpriv->tx_dirty;
} else {
if (debug > 1)
- printk(KERN_ERR "%s Tx: NULL skb %d\n",
- dev->name, cur);
+ netdev_err(dev, "Tx: NULL skb %d\n",
+ cur);
}
/*
* If the driver ends sending crap on the wire, it
@@ -1615,7 +1614,7 @@ try:
* Transmit Data Underrun
*/
if (state & Xdu) {
- printk(KERN_ERR "%s: XDU. Ask maintainer\n", DRV_NAME);
+ netdev_err(dev, "Tx Data Underrun. Ask maintainer\n");
dpriv->flags = NeedIDT;
/* Tx reset */
writel(MTFi | Rdt,
@@ -1624,13 +1623,13 @@ try:
return;
}
if (state & Cts) {
- printk(KERN_INFO "%s: CTS transition\n", dev->name);
+ netdev_info(dev, "CTS transition\n");
if (!(state &= ~Cts)) /* DEBUG */
goto try;
}
if (state & Xmr) {
/* Frame needs to be sent again - FIXME */
- printk(KERN_ERR "%s: Xmr. Ask maintainer\n", DRV_NAME);
+ netdev_err(dev, "Tx ReTx. Ask maintainer\n");
if (!(state &= ~Xmr)) /* DEBUG */
goto try;
}
@@ -1648,7 +1647,7 @@ try:
break;
}
if (!i)
- printk(KERN_INFO "%s busy in irq\n", dev->name);
+ netdev_info(dev, "busy in irq\n");
scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
/* Keep this order: IDT before IDR */
@@ -1685,7 +1684,7 @@ try:
}
if (state & Cd) {
if (debug > 0)
- printk(KERN_INFO "%s: CD transition\n", dev->name);
+ netdev_info(dev, "CD transition\n");
if (!(state &= ~Cd)) /* DEBUG */
goto try;
}
@@ -1694,11 +1693,11 @@ try:
#ifdef DSCC4_POLLING
while (!dscc4_tx_poll(dpriv, dev));
#endif
- printk(KERN_INFO "%s: Tx Hi\n", dev->name);
+ netdev_info(dev, "Tx Hi\n");
state &= ~Hi;
}
if (state & Err) {
- printk(KERN_INFO "%s: Tx ERR\n", dev->name);
+ netdev_info(dev, "Tx ERR\n");
dev->stats.tx_errors++;
state &= ~Err;
}
@@ -1768,7 +1767,7 @@ try:
goto try;
}
if (state & Hi ) { /* HI bit */
- printk(KERN_INFO "%s: Rx Hi\n", dev->name);
+ netdev_info(dev, "Rx Hi\n");
state &= ~Hi;
goto try;
}
@@ -1799,7 +1798,7 @@ try:
goto try;
}
if (state & Cts) {
- printk(KERN_INFO "%s: CTS transition\n", dev->name);
+ netdev_info(dev, "CTS transition\n");
if (!(state &= ~Cts)) /* DEBUG */
goto try;
}
@@ -1858,14 +1857,12 @@ try:
sizeof(struct RxFD), scc_addr + CH0BRDA);
writel(MTFi|Rdr|Idr, scc_addr + CH0CFG);
if (dscc4_do_action(dev, "RDR") < 0) {
- printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
- dev->name, "RDR");
+ netdev_err(dev, "RDO recovery failed(RDR)\n");
goto rdo_end;
}
writel(MTFi|Idr, scc_addr + CH0CFG);
if (dscc4_do_action(dev, "IDR") < 0) {
- printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
- dev->name, "IDR");
+ netdev_err(dev, "RDO recovery failed(IDR)\n");
goto rdo_end;
}
rdo_end:
@@ -1874,7 +1871,7 @@ try:
goto try;
}
if (state & Cd) {
- printk(KERN_INFO "%s: CD transition\n", dev->name);
+ netdev_info(dev, "CD transition\n");
if (!(state &= ~Cd)) /* DEBUG */
goto try;
}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 777d1a4e81b..ebb9f24eefb 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/hdlc.h>
#include <asm/io.h>
@@ -1664,10 +1665,9 @@ check_started_ok(struct fst_card_info *card)
* existing firmware etc so we just report it for the moment.
*/
if (FST_RDL(card, numberOfPorts) != card->nports) {
- pr_warning("Port count mismatch on card %d. "
- "Firmware thinks %d we say %d\n",
- card->card_no,
- FST_RDL(card, numberOfPorts), card->nports);
+ pr_warn("Port count mismatch on card %d. Firmware thinks %d we say %d\n",
+ card->card_no,
+ FST_RDL(card, numberOfPorts), card->nports);
}
}
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index a3ea27ce04f..33b67d88fce 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -582,8 +582,8 @@ static void sca_dump_rings(struct net_device *dev)
sca_in(DSR_RX(phy_node(port)), card), port->rxin,
sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
- printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
- printk(KERN_CONT "\n");
+ pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
+ pr_cont("\n");
printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
"last=%u %sactive",
@@ -593,8 +593,8 @@ static void sca_dump_rings(struct net_device *dev)
sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port_to_card(port)->tx_ring_buffers; cnt++)
- printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
- printk("\n");
+ pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
+ pr_cont("\n");
printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, ST: %02x %02x %02x %02x,"
" FST: %02x CST: %02x %02x\n",
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index e305274f83f..efc0db10118 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -530,8 +530,8 @@ static void sca_dump_rings(struct net_device *dev)
sca_in(DSR_RX(port->chan), card), port->rxin,
sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++)
- printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
- printk(KERN_CONT "\n");
+ pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
+ pr_cont("\n");
printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
"last=%u %sactive",
@@ -541,8 +541,8 @@ static void sca_dump_rings(struct net_device *dev)
sca_in(DSR_TX(port->chan), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port->card->tx_ring_buffers; cnt++)
- printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
- printk("\n");
+ pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
+ pr_cont("\n");
printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x,"
" ST: %02x %02x %02x %02x %02x, FST: %02x CST: %02x %02x\n",
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 5d4bb615ccc..10cc7df9549 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -22,6 +22,8 @@
* - proto->start() and stop() are called with spin_lock_irq held.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
@@ -130,10 +132,10 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
goto carrier_exit;
if (hdlc->carrier) {
- printk(KERN_INFO "%s: Carrier detected\n", dev->name);
+ netdev_info(dev, "Carrier detected\n");
hdlc_proto_start(dev);
} else {
- printk(KERN_INFO "%s: Carrier lost\n", dev->name);
+ netdev_info(dev, "Carrier lost\n");
hdlc_proto_stop(dev);
}
@@ -165,10 +167,10 @@ int hdlc_open(struct net_device *dev)
spin_lock_irq(&hdlc->state_lock);
if (hdlc->carrier) {
- printk(KERN_INFO "%s: Carrier detected\n", dev->name);
+ netdev_info(dev, "Carrier detected\n");
hdlc_proto_start(dev);
} else
- printk(KERN_INFO "%s: No carrier\n", dev->name);
+ netdev_info(dev, "No carrier\n");
hdlc->open = 1;
@@ -281,8 +283,8 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
if (size)
if ((dev_to_hdlc(dev)->state = kmalloc(size,
GFP_KERNEL)) == NULL) {
- printk(KERN_WARNING "Memory squeeze on"
- " hdlc_proto_attach()\n");
+ netdev_warn(dev,
+ "Memory squeeze on hdlc_proto_attach()\n");
module_put(proto->module);
return -ENOBUFS;
}
@@ -363,7 +365,7 @@ static int __init hdlc_module_init(void)
{
int result;
- printk(KERN_INFO "%s\n", version);
+ pr_info("%s\n", version);
if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)
return result;
dev_add_pack(&hdlc_packet_type);
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index b1e5e5b69c2..3f20808b5ff 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -103,9 +103,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
skb = dev_alloc_skb(sizeof(struct hdlc_header) +
sizeof(struct cisco_packet));
if (!skb) {
- printk(KERN_WARNING
- "%s: Memory squeeze on cisco_keepalive_send()\n",
- dev->name);
+ netdev_warn(dev, "Memory squeeze on cisco_keepalive_send()\n");
return;
}
skb_reserve(skb, 4);
@@ -181,8 +179,8 @@ static int cisco_rx(struct sk_buff *skb)
CISCO_PACKET_LEN) &&
(skb->len != sizeof(struct hdlc_header) +
CISCO_BIG_PACKET_LEN)) {
- printk(KERN_INFO "%s: Invalid length of Cisco control"
- " packet (%d bytes)\n", dev->name, skb->len);
+ netdev_info(dev, "Invalid length of Cisco control packet (%d bytes)\n",
+ skb->len);
goto rx_error;
}
@@ -217,8 +215,7 @@ static int cisco_rx(struct sk_buff *skb)
return NET_RX_SUCCESS;
case CISCO_ADDR_REPLY:
- printk(KERN_INFO "%s: Unexpected Cisco IP address "
- "reply\n", dev->name);
+ netdev_info(dev, "Unexpected Cisco IP address reply\n");
goto rx_error;
case CISCO_KEEPALIVE_REQ:
@@ -235,9 +232,8 @@ static int cisco_rx(struct sk_buff *skb)
min = sec / 60; sec -= min * 60;
hrs = min / 60; min -= hrs * 60;
days = hrs / 24; hrs -= days * 24;
- printk(KERN_INFO "%s: Link up (peer "
- "uptime %ud%uh%um%us)\n",
- dev->name, days, hrs, min, sec);
+ netdev_info(dev, "Link up (peer uptime %ud%uh%um%us)\n",
+ days, hrs, min, sec);
netif_dormant_off(dev);
st->up = 1;
}
@@ -249,8 +245,7 @@ static int cisco_rx(struct sk_buff *skb)
} /* switch (keepalive type) */
} /* switch (protocol) */
- printk(KERN_INFO "%s: Unsupported protocol %x\n", dev->name,
- ntohs(data->protocol));
+ netdev_info(dev, "Unsupported protocol %x\n", ntohs(data->protocol));
dev_kfree_skb_any(skb);
return NET_RX_DROP;
@@ -272,7 +267,7 @@ static void cisco_timer(unsigned long arg)
if (st->up &&
time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
st->up = 0;
- printk(KERN_INFO "%s: Link down\n", dev->name);
+ netdev_info(dev, "Link down\n");
netif_dormant_on(dev);
}
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index fc433f28c04..eb2028187fb 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -446,15 +446,14 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
static inline void fr_log_dlci_active(pvc_device *pvc)
{
- printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
- pvc->frad->name,
- pvc->dlci,
- pvc->main ? pvc->main->name : "",
- pvc->main && pvc->ether ? " " : "",
- pvc->ether ? pvc->ether->name : "",
- pvc->state.new ? " new" : "",
- !pvc->state.exist ? "deleted" :
- pvc->state.active ? "active" : "inactive");
+ netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
+ pvc->dlci,
+ pvc->main ? pvc->main->name : "",
+ pvc->main && pvc->ether ? " " : "",
+ pvc->ether ? pvc->ether->name : "",
+ pvc->state.new ? " new" : "",
+ !pvc->state.exist ? "deleted" :
+ pvc->state.active ? "active" : "inactive");
}
@@ -481,16 +480,14 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
if (dce && fullrep) {
len += state(hdlc)->dce_pvc_count * (2 + stat_len);
if (len > HDLC_MAX_MRU) {
- printk(KERN_WARNING "%s: Too many PVCs while sending "
- "LMI full report\n", dev->name);
+ netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
return;
}
}
skb = dev_alloc_skb(len);
if (!skb) {
- printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n",
- dev->name);
+ netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
return;
}
memset(skb->data, 0, len);
@@ -615,8 +612,7 @@ static void fr_timer(unsigned long arg)
state(hdlc)->last_errors <<= 1; /* Shift the list */
if (state(hdlc)->request) {
if (state(hdlc)->reliable)
- printk(KERN_INFO "%s: No LMI status reply "
- "received\n", dev->name);
+ netdev_info(dev, "No LMI status reply received\n");
state(hdlc)->last_errors |= 1;
}
@@ -628,8 +624,7 @@ static void fr_timer(unsigned long arg)
}
if (state(hdlc)->reliable != reliable) {
- printk(KERN_INFO "%s: Link %sreliable\n", dev->name,
- reliable ? "" : "un");
+ netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
fr_set_link_state(reliable, dev);
}
@@ -665,33 +660,32 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
LMI_CCITT_CISCO_LENGTH)) {
- printk(KERN_INFO "%s: Short LMI frame\n", dev->name);
+ netdev_info(dev, "Short LMI frame\n");
return 1;
}
if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
NLPID_CCITT_ANSI_LMI)) {
- printk(KERN_INFO "%s: Received non-LMI frame with LMI DLCI\n",
- dev->name);
+ netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
return 1;
}
if (skb->data[4] != LMI_CALLREF) {
- printk(KERN_INFO "%s: Invalid LMI Call reference (0x%02X)\n",
- dev->name, skb->data[4]);
+ netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
+ skb->data[4]);
return 1;
}
if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
- printk(KERN_INFO "%s: Invalid LMI Message type (0x%02X)\n",
- dev->name, skb->data[5]);
+ netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
+ skb->data[5]);
return 1;
}
if (lmi == LMI_ANSI) {
if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
- printk(KERN_INFO "%s: Not ANSI locking shift in LMI"
- " message (0x%02X)\n", dev->name, skb->data[6]);
+ netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
+ skb->data[6]);
return 1;
}
i = 7;
@@ -700,34 +694,34 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
LMI_ANSI_CISCO_REPTYPE)) {
- printk(KERN_INFO "%s: Not an LMI Report type IE (0x%02X)\n",
- dev->name, skb->data[i]);
+ netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
+ skb->data[i]);
return 1;
}
if (skb->data[++i] != LMI_REPT_LEN) {
- printk(KERN_INFO "%s: Invalid LMI Report type IE length"
- " (%u)\n", dev->name, skb->data[i]);
+ netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
+ skb->data[i]);
return 1;
}
reptype = skb->data[++i];
if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
- printk(KERN_INFO "%s: Unsupported LMI Report type (0x%02X)\n",
- dev->name, reptype);
+ netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
+ reptype);
return 1;
}
if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
LMI_ANSI_CISCO_ALIVE)) {
- printk(KERN_INFO "%s: Not an LMI Link integrity verification"
- " IE (0x%02X)\n", dev->name, skb->data[i]);
+ netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
+ skb->data[i]);
return 1;
}
if (skb->data[++i] != LMI_INTEG_LEN) {
- printk(KERN_INFO "%s: Invalid LMI Link integrity verification"
- " IE length (%u)\n", dev->name, skb->data[i]);
+ netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
+ skb->data[i]);
return 1;
}
i++;
@@ -801,14 +795,14 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
LMI_ANSI_CISCO_PVCSTAT)) {
- printk(KERN_INFO "%s: Not an LMI PVC status IE"
- " (0x%02X)\n", dev->name, skb->data[i]);
+ netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
+ skb->data[i]);
return 1;
}
if (skb->data[++i] != stat_len) {
- printk(KERN_INFO "%s: Invalid LMI PVC status IE length"
- " (%u)\n", dev->name, skb->data[i]);
+ netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
+ skb->data[i]);
return 1;
}
i++;
@@ -829,9 +823,7 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
pvc = add_pvc(dev, dlci);
if (!pvc && !no_ram) {
- printk(KERN_WARNING
- "%s: Memory squeeze on fr_lmi_recv()\n",
- dev->name);
+ netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
no_ram = 1;
}
@@ -902,8 +894,8 @@ static int fr_rx(struct sk_buff *skb)
pvc = find_pvc(hdlc, dlci);
if (!pvc) {
#ifdef DEBUG_PKT
- printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n",
- frad->name, dlci);
+ netdev_info(frad, "No PVC for received frame's DLCI %d\n",
+ dlci);
#endif
dev_kfree_skb_any(skb);
return NET_RX_DROP;
@@ -962,14 +954,14 @@ static int fr_rx(struct sk_buff *skb)
break;
default:
- printk(KERN_INFO "%s: Unsupported protocol, OUI=%x "
- "PID=%x\n", frad->name, oui, pid);
+ netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
+ oui, pid);
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
} else {
- printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x "
- "length = %i\n", frad->name, data[3], skb->len);
+ netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
+ data[3], skb->len);
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
@@ -1073,8 +1065,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
int used;
if ((pvc = add_pvc(frad, dlci)) == NULL) {
- printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
- frad->name);
+ netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
return -ENOBUFS;
}
@@ -1083,14 +1074,14 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
used = pvc_is_used(pvc);
- if (type == ARPHRD_ETHER)
+ if (type == ARPHRD_ETHER) {
dev = alloc_netdev(0, "pvceth%d", ether_setup);
- else
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ } else
dev = alloc_netdev(0, "pvc%d", pvc_setup);
if (!dev) {
- printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
- frad->name);
+ netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
delete_unused_pvcs(hdlc);
return -ENOBUFS;
}
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 941f053e650..055a918067e 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -223,8 +223,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
skb = dev_alloc_skb(sizeof(struct hdlc_header) +
sizeof(struct cp_header) + magic_len + len);
if (!skb) {
- printk(KERN_WARNING "%s: out of memory in ppp_tx_cp()\n",
- dev->name);
+ netdev_warn(dev, "out of memory in ppp_tx_cp()\n");
return;
}
skb_reserve(skb, sizeof(struct hdlc_header));
@@ -345,7 +344,7 @@ static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);
if (old_state != OPENED && proto->state == OPENED) {
- printk(KERN_INFO "%s: %s up\n", dev->name, proto_name(pid));
+ netdev_info(dev, "%s up\n", proto_name(pid));
if (pid == PID_LCP) {
netif_dormant_off(dev);
ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
@@ -356,7 +355,7 @@ static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
}
}
if (old_state == OPENED && proto->state != OPENED) {
- printk(KERN_INFO "%s: %s down\n", dev->name, proto_name(pid));
+ netdev_info(dev, "%s down\n", proto_name(pid));
if (pid == PID_LCP) {
netif_dormant_on(dev);
ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
@@ -585,7 +584,7 @@ static void ppp_timer(unsigned long arg)
break;
if (time_after(jiffies, ppp->last_pong +
ppp->keepalive_timeout * HZ)) {
- printk(KERN_INFO "%s: Link down\n", proto->dev->name);
+ netdev_info(proto->dev, "Link down\n");
ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL);
ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL);
} else { /* send keep-alive packet */
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 70527e5a54a..56aeb011cb3 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -34,7 +34,7 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
unsigned char *ptr;
if ((skb = dev_alloc_skb(1)) == NULL) {
- printk(KERN_ERR "%s: out of memory\n", dev->name);
+ netdev_err(dev, "out of memory\n");
return;
}
@@ -106,9 +106,8 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
/* Send connect confirm. msg to level 3 */
x25_connected(dev, 0);
else
- printk(KERN_ERR "%s: LAPB connect request "
- "failed, error code = %i\n",
- dev->name, result);
+ netdev_err(dev, "LAPB connect request failed, error code = %i\n",
+ result);
}
break;
@@ -118,9 +117,8 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
/* Send disconnect confirm. msg to level 3 */
x25_disconnected(dev, 0);
else
- printk(KERN_ERR "%s: LAPB disconnect request "
- "failed, error code = %i\n",
- dev->name, result);
+ netdev_err(dev, "LAPB disconnect request failed, error code = %i\n",
+ result);
}
break;
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index e817583e6ec..3d80e4267de 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -20,6 +20,8 @@
* Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -192,8 +194,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
*/
if (!request_region(iobase, 8, "Comtrol SV11")) {
- printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n",
- iobase);
+ pr_warn("I/O 0x%X already in use\n", iobase);
return NULL;
}
@@ -221,7 +222,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
"Hostess SV11", sv) < 0) {
- printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
+ pr_warn("IRQ %d already in use\n", irq);
goto err_irq;
}
@@ -255,7 +256,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
*/
if (z8530_init(sv)) {
- printk(KERN_ERR "Z8530 series device not found.\n");
+ pr_err("Z8530 series device not found\n");
enable_irq(irq);
goto free_dma;
}
@@ -282,7 +283,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
netdev->irq = irq;
if (register_hdlc_device(netdev)) {
- printk(KERN_ERR "hostess: unable to register HDLC device.\n");
+ pr_err("unable to register HDLC device\n");
free_netdev(netdev);
goto free_dma;
}
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index f1e1643dc3e..aaaca9aa229 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -8,6 +8,8 @@
* as published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/bitops.h>
#include <linux/cdev.h>
#include <linux/dma-mapping.h>
@@ -358,9 +360,8 @@ static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
{
u32 *val = (u32*)msg;
if (npe_send_message(port->npe, msg, what)) {
- printk(KERN_CRIT "HSS-%i: unable to send command [%08X:%08X]"
- " to %s\n", port->id, val[0], val[1],
- npe_name(port->npe));
+ pr_crit("HSS-%i: unable to send command [%08X:%08X] to %s\n",
+ port->id, val[0], val[1], npe_name(port->npe));
BUG();
}
}
@@ -447,8 +448,7 @@ static void hss_config(struct port *port)
if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") ||
/* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
msg.cmd != PORT_CONFIG_LOAD || msg.data32) {
- printk(KERN_CRIT "HSS-%i: HSS_LOAD_CONFIG failed\n",
- port->id);
+ pr_crit("HSS-%i: HSS_LOAD_CONFIG failed\n", port->id);
BUG();
}
@@ -477,8 +477,7 @@ static u32 hss_get_status(struct port *port)
msg.hss_port = port->id;
hss_npe_send(port, &msg, "PORT_ERROR_READ");
if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) {
- printk(KERN_CRIT "HSS-%i: unable to read HSS status\n",
- port->id);
+ pr_crit("HSS-%i: unable to read HSS status\n", port->id);
BUG();
}
@@ -736,9 +735,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
dev->stats.rx_errors++;
break;
default: /* FIXME - remove printk */
- printk(KERN_ERR "%s: hss_hdlc_poll: status 0x%02X"
- " errors %u\n", dev->name, desc->status,
- desc->error_count);
+ netdev_err(dev, "hss_hdlc_poll: status 0x%02X errors %u\n",
+ desc->status, desc->error_count);
dev->stats.rx_errors++;
}
@@ -1127,8 +1125,8 @@ static int hss_hdlc_close(struct net_device *dev)
buffs--;
if (buffs)
- printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
- " left in NPE\n", dev->name, buffs);
+ netdev_crit(dev, "unable to drain RX queue, %i buffer(s) left in NPE\n",
+ buffs);
buffs = TX_DESCS;
while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
@@ -1143,8 +1141,8 @@ static int hss_hdlc_close(struct net_device *dev)
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
- printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
- "left in NPE\n", dev->name, buffs);
+ netdev_crit(dev, "unable to drain TX queue, %i buffer(s) left in NPE\n",
+ buffs);
#if DEBUG_CLOSE
if (!buffs)
printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
@@ -1364,7 +1362,7 @@ static int __devinit hss_init_one(struct platform_device *pdev)
platform_set_drvdata(pdev, port);
- printk(KERN_INFO "%s: HSS-%i\n", dev->name, port->id);
+ netdev_info(dev, "HSS-%i\n", port->id);
return 0;
err_free_netdev:
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index eec463f99c0..a817081737a 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -20,6 +20,8 @@
* 2000-11-14 Henner Eisen dev_hold/put, NETDEV_GOING_DOWN support
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -165,13 +167,11 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
break;
case X25_IFACE_CONNECT:
if ((err = lapb_connect_request(dev)) != LAPB_OK)
- printk(KERN_ERR "lapbeth: lapb_connect_request "
- "error: %d\n", err);
+ pr_err("lapb_connect_request error: %d\n", err);
goto drop;
case X25_IFACE_DISCONNECT:
if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
- printk(KERN_ERR "lapbeth: lapb_disconnect_request "
- "err: %d\n", err);
+ pr_err("lapb_disconnect_request err: %d\n", err);
/* Fall thru */
default:
goto drop;
@@ -180,7 +180,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
skb_pull(skb, 1);
if ((err = lapb_data_request(dev, skb)) != LAPB_OK) {
- printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err);
+ pr_err("lapb_data_request error - %d\n", err);
goto drop;
}
out:
@@ -220,7 +220,7 @@ static void lapbeth_connected(struct net_device *dev, int reason)
struct sk_buff *skb = dev_alloc_skb(1);
if (!skb) {
- printk(KERN_ERR "lapbeth: out of memory\n");
+ pr_err("out of memory\n");
return;
}
@@ -237,7 +237,7 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
struct sk_buff *skb = dev_alloc_skb(1);
if (!skb) {
- printk(KERN_ERR "lapbeth: out of memory\n");
+ pr_err("out of memory\n");
return;
}
@@ -277,7 +277,7 @@ static int lapbeth_open(struct net_device *dev)
int err;
if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
- printk(KERN_ERR "lapbeth: lapb_register error - %d\n", err);
+ pr_err("lapb_register error: %d\n", err);
return -ENODEV;
}
@@ -292,7 +292,7 @@ static int lapbeth_close(struct net_device *dev)
netif_stop_queue(dev);
if ((err = lapb_unregister(dev)) != LAPB_OK)
- printk(KERN_ERR "lapbeth: lapb_unregister error - %d\n", err);
+ pr_err("lapb_unregister error: %d\n", err);
return 0;
}
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
index 01ad45218d1..a1d202d8ad6 100644
--- a/drivers/net/wan/lmc/lmc_var.h
+++ b/drivers/net/wan/lmc/lmc_var.h
@@ -380,7 +380,7 @@ struct lmc___softc {
/* CSR6 settings */
#define OPERATION_MODE 0x00000200 /* Full Duplex */
#define PROMISC_MODE 0x00000040 /* Promiscuous Mode */
-#define RECIEVE_ALL 0x40000000 /* Receive All */
+#define RECEIVE_ALL 0x40000000 /* Receive All */
#define PASS_BAD_FRAMES 0x00000008 /* Pass Bad Frames */
/* Dec control registers CSR6 as well */
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 17d408fe693..5129ad514d2 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -16,6 +16,8 @@
* SDL Inc. PPP/HDLC/CISCO driver
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
@@ -341,57 +343,57 @@ static int __init n2_run(unsigned long io, unsigned long irq,
int i;
if (io < 0x200 || io > 0x3FF || (io % N2_IOPORTS) != 0) {
- printk(KERN_ERR "n2: invalid I/O port value\n");
+ pr_err("invalid I/O port value\n");
return -ENODEV;
}
if (irq < 3 || irq > 15 || irq == 6) /* FIXME */ {
- printk(KERN_ERR "n2: invalid IRQ value\n");
+ pr_err("invalid IRQ value\n");
return -ENODEV;
}
if (winbase < 0xA0000 || winbase > 0xFFFFF || (winbase & 0xFFF) != 0) {
- printk(KERN_ERR "n2: invalid RAM value\n");
+ pr_err("invalid RAM value\n");
return -ENODEV;
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
- printk(KERN_ERR "n2: unable to allocate memory\n");
+ pr_err("unable to allocate memory\n");
return -ENOBUFS;
}
card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
if (!card->ports[0].dev || !card->ports[1].dev) {
- printk(KERN_ERR "n2: unable to allocate memory\n");
+ pr_err("unable to allocate memory\n");
n2_destroy_card(card);
return -ENOMEM;
}
if (!request_region(io, N2_IOPORTS, devname)) {
- printk(KERN_ERR "n2: I/O port region in use\n");
+ pr_err("I/O port region in use\n");
n2_destroy_card(card);
return -EBUSY;
}
card->io = io;
if (request_irq(irq, sca_intr, 0, devname, card)) {
- printk(KERN_ERR "n2: could not allocate IRQ\n");
+ pr_err("could not allocate IRQ\n");
n2_destroy_card(card);
return -EBUSY;
}
card->irq = irq;
if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) {
- printk(KERN_ERR "n2: could not request RAM window\n");
+ pr_err("could not request RAM window\n");
n2_destroy_card(card);
return -EBUSY;
}
card->phy_winbase = winbase;
card->winbase = ioremap(winbase, USE_WINDOWSIZE);
if (!card->winbase) {
- printk(KERN_ERR "n2: ioremap() failed\n");
+ pr_err("ioremap() failed\n");
n2_destroy_card(card);
return -EFAULT;
}
@@ -413,7 +415,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
break;
default:
- printk(KERN_ERR "n2: invalid window size\n");
+ pr_err("invalid window size\n");
n2_destroy_card(card);
return -ENODEV;
}
@@ -433,12 +435,12 @@ static int __init n2_run(unsigned long io, unsigned long irq,
card->buff_offset = (valid0 + valid1) * sizeof(pkt_desc) *
(card->tx_ring_buffers + card->rx_ring_buffers);
- printk(KERN_INFO "n2: RISCom/N2 %u KB RAM, IRQ%u, "
- "using %u TX + %u RX packets rings\n", card->ram_size / 1024,
- card->irq, card->tx_ring_buffers, card->rx_ring_buffers);
+ pr_info("RISCom/N2 %u KB RAM, IRQ%u, using %u TX + %u RX packets rings\n",
+ card->ram_size / 1024, card->irq,
+ card->tx_ring_buffers, card->rx_ring_buffers);
if (card->tx_ring_buffers < 1) {
- printk(KERN_ERR "n2: RAM test failed\n");
+ pr_err("RAM test failed\n");
n2_destroy_card(card);
return -EIO;
}
@@ -474,16 +476,14 @@ static int __init n2_run(unsigned long io, unsigned long irq,
port->card = card;
if (register_hdlc_device(dev)) {
- printk(KERN_WARNING "n2: unable to register hdlc "
- "device\n");
+ pr_warn("unable to register hdlc device\n");
port->card = NULL;
n2_destroy_card(card);
return -ENOBUFS;
}
sca_init_port(port); /* Set up SCA memory */
- printk(KERN_INFO "%s: RISCom/N2 node %d\n",
- dev->name, port->phy_node);
+ netdev_info(dev, "RISCom/N2 node %d\n", port->phy_node);
}
*new_card = card;
@@ -498,12 +498,12 @@ static int __init n2_init(void)
{
if (hw==NULL) {
#ifdef MODULE
- printk(KERN_INFO "n2: no card initialized\n");
+ pr_info("no card initialized\n");
#endif
return -EINVAL; /* no parameters specified, abort */
}
- printk(KERN_INFO "%s\n", version);
+ pr_info("%s\n", version);
do {
unsigned long io, irq, ram;
@@ -541,7 +541,7 @@ static int __init n2_init(void)
return first_card ? 0 : -EINVAL;
}while(*hw++ == ':');
- printk(KERN_ERR "n2: invalid hardware parameters\n");
+ pr_err("invalid hardware parameters\n");
return first_card ? 0 : -EINVAL;
}
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 9617d3d0ee3..1eeedd6a10b 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -212,6 +212,8 @@ static const char rcsid[] =
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -3242,7 +3244,7 @@ static inline void show_version(void)
rcsdate++;
tmp = strrchr(rcsdate, ' ');
*tmp = '\0';
- printk(KERN_INFO "Cyclades-PC300 driver %s %s\n", rcsvers, rcsdate);
+ pr_info("Cyclades-PC300 driver %s %s\n", rcsvers, rcsdate);
} /* show_version */
static const struct net_device_ops cpc_netdev_ops = {
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 1c65d1c3387..d47d2cd1047 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -755,7 +755,7 @@ void cpc_tty_receive(pc300dev_t *pc300dev)
dsr_rx = cpc_readb(card->hw.scabase + DSR_RX(ch));
- cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty;
+ cpc_tty = pc300dev->cpc_tty;
while (1) {
rx_len = 0;
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index c7ab3becd26..c49c1b3c7aa 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -17,6 +17,8 @@
* PC300/X21 cards.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -318,7 +320,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
- printk(KERN_ERR "pc300: unable to allocate memory\n");
+ pr_err("unable to allocate memory\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
@@ -328,7 +330,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
pci_resource_len(pdev, 3) < 16384) {
- printk(KERN_ERR "pc300: invalid card EEPROM parameters\n");
+ pr_err("invalid card EEPROM parameters\n");
pc300_pci_remove_one(pdev);
return -EFAULT;
}
@@ -345,7 +347,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
if (card->plxbase == NULL ||
card->scabase == NULL ||
card->rambase == NULL) {
- printk(KERN_ERR "pc300: ioremap() failed\n");
+ pr_err("ioremap() failed\n");
pc300_pci_remove_one(pdev);
}
@@ -370,7 +372,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
for (i = 0; i < card->n_ports; i++)
if (!(card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]))) {
- printk(KERN_ERR "pc300: unable to allocate memory\n");
+ pr_err("unable to allocate memory\n");
pc300_pci_remove_one(pdev);
return -ENOMEM;
}
@@ -411,15 +413,14 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
card->buff_offset = card->n_ports * sizeof(pkt_desc) *
(card->tx_ring_buffers + card->rx_ring_buffers);
- printk(KERN_INFO "pc300: PC300/%s, %u KB RAM at 0x%x, IRQ%u, "
- "using %u TX + %u RX packets rings\n",
- card->type == PC300_X21 ? "X21" :
- card->type == PC300_TE ? "TE" : "RSV",
- ramsize / 1024, ramphys, pdev->irq,
- card->tx_ring_buffers, card->rx_ring_buffers);
+ pr_info("PC300/%s, %u KB RAM at 0x%x, IRQ%u, using %u TX + %u RX packets rings\n",
+ card->type == PC300_X21 ? "X21" :
+ card->type == PC300_TE ? "TE" : "RSV",
+ ramsize / 1024, ramphys, pdev->irq,
+ card->tx_ring_buffers, card->rx_ring_buffers);
if (card->tx_ring_buffers < 1) {
- printk(KERN_ERR "pc300: RAM test failed\n");
+ pr_err("RAM test failed\n");
pc300_pci_remove_one(pdev);
return -EFAULT;
}
@@ -429,8 +430,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
/* Allocate IRQ */
if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pc300", card)) {
- printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n",
- pdev->irq);
+ pr_warn("could not allocate IRQ%d\n", pdev->irq);
pc300_pci_remove_one(pdev);
return -EBUSY;
}
@@ -466,15 +466,13 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
sca_init_port(port);
if (register_hdlc_device(dev)) {
- printk(KERN_ERR "pc300: unable to register hdlc "
- "device\n");
+ pr_err("unable to register hdlc device\n");
port->card = NULL;
pc300_pci_remove_one(pdev);
return -ENOBUFS;
}
- printk(KERN_INFO "%s: PC300 channel %d\n",
- dev->name, port->chan);
+ netdev_info(dev, "PC300 channel %d\n", port->chan);
}
return 0;
}
@@ -505,11 +503,11 @@ static struct pci_driver pc300_pci_driver = {
static int __init pc300_init_module(void)
{
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
- printk(KERN_ERR "pc300: Invalid PCI clock frequency\n");
+ pr_err("Invalid PCI clock frequency\n");
return -EINVAL;
}
if (use_crystal_clock != 0 && use_crystal_clock != 1) {
- printk(KERN_ERR "pc300: Invalid 'use_crystal_clock' value\n");
+ pr_err("Invalid 'use_crystal_clock' value\n");
return -EINVAL;
}
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index fd7375955e4..1ce21163c77 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -14,6 +14,8 @@
* PLX Technology Inc. PCI9052 Data Book
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
@@ -297,7 +299,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (card == NULL) {
- printk(KERN_ERR "pci200syn: unable to allocate memory\n");
+ pr_err("unable to allocate memory\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
@@ -306,7 +308,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
card->ports[0].netdev = alloc_hdlcdev(&card->ports[0]);
card->ports[1].netdev = alloc_hdlcdev(&card->ports[1]);
if (!card->ports[0].netdev || !card->ports[1].netdev) {
- printk(KERN_ERR "pci200syn: unable to allocate memory\n");
+ pr_err("unable to allocate memory\n");
pci200_pci_remove_one(pdev);
return -ENOMEM;
}
@@ -314,7 +316,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE ||
pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE ||
pci_resource_len(pdev, 3) < 16384) {
- printk(KERN_ERR "pci200syn: invalid card EEPROM parameters\n");
+ pr_err("invalid card EEPROM parameters\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
@@ -331,7 +333,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
if (card->plxbase == NULL ||
card->scabase == NULL ||
card->rambase == NULL) {
- printk(KERN_ERR "pci200syn: ioremap() failed\n");
+ pr_err("ioremap() failed\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
@@ -357,12 +359,12 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers +
card->rx_ring_buffers);
- printk(KERN_INFO "pci200syn: %u KB RAM at 0x%x, IRQ%u, using %u TX +"
- " %u RX packets rings\n", ramsize / 1024, ramphys,
- pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
+ pr_info("%u KB RAM at 0x%x, IRQ%u, using %u TX + %u RX packets rings\n",
+ ramsize / 1024, ramphys,
+ pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
if (card->tx_ring_buffers < 1) {
- printk(KERN_ERR "pci200syn: RAM test failed\n");
+ pr_err("RAM test failed\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
@@ -373,8 +375,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
/* Allocate IRQ */
if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pci200syn", card)) {
- printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
- pdev->irq);
+ pr_warn("could not allocate IRQ%d\n", pdev->irq);
pci200_pci_remove_one(pdev);
return -EBUSY;
}
@@ -400,15 +401,13 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
port->card = card;
sca_init_port(port);
if (register_hdlc_device(dev)) {
- printk(KERN_ERR "pci200syn: unable to register hdlc "
- "device\n");
+ pr_err("unable to register hdlc device\n");
port->card = NULL;
pci200_pci_remove_one(pdev);
return -ENOBUFS;
}
- printk(KERN_INFO "%s: PCI200SYN channel %d\n",
- dev->name, port->chan);
+ netdev_info(dev, "PCI200SYN channel %d\n", port->chan);
}
sca_flush(card);
@@ -435,7 +434,7 @@ static struct pci_driver pci200_pci_driver = {
static int __init pci200_init_module(void)
{
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
- printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n");
+ pr_err("Invalid PCI clock frequency\n");
return -EINVAL;
}
return pci_register_driver(&pci200_pci_driver);
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index cff13a9597c..86127bcc9f7 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -37,6 +37,8 @@
* Known problem: this driver wasn't tested on multiprocessor machine.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -200,8 +202,8 @@ sbni_isa_probe( struct net_device *dev )
return 0;
else {
- printk( KERN_ERR "sbni: base address 0x%lx is busy, or adapter "
- "is malfunctional!\n", dev->base_addr );
+ pr_err("base address 0x%lx is busy, or adapter is malfunctional!\n",
+ dev->base_addr);
return -ENODEV;
}
}
@@ -226,7 +228,6 @@ static void __init sbni_devsetup(struct net_device *dev)
int __init sbni_probe(int unit)
{
struct net_device *dev;
- static unsigned version_printed __initdata = 0;
int err;
dev = alloc_netdev(sizeof(struct net_local), "sbni", sbni_devsetup);
@@ -250,8 +251,7 @@ int __init sbni_probe(int unit)
free_netdev(dev);
return err;
}
- if( version_printed++ == 0 )
- printk( KERN_INFO "%s", version );
+ pr_info_once("%s", version);
return 0;
}
@@ -303,7 +303,6 @@ sbni_pci_probe( struct net_device *dev )
!= NULL ) {
int pci_irq_line;
unsigned long pci_ioaddr;
- u16 subsys;
if( pdev->vendor != SBNI_PCI_VENDOR &&
pdev->device != SBNI_PCI_DEVICE )
@@ -314,9 +313,7 @@ sbni_pci_probe( struct net_device *dev )
/* Avoid already found cards from previous calls */
if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
- pci_read_config_word( pdev, PCI_SUBSYSTEM_ID, &subsys );
-
- if (subsys != 2)
+ if (pdev->subsystem_device != 2)
continue;
/* Dual adapter is present */
@@ -326,9 +323,9 @@ sbni_pci_probe( struct net_device *dev )
}
if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
- printk( KERN_WARNING
- " WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n"
- " You should use the PCI BIOS setup to assign a valid IRQ line.\n",
+ pr_warn(
+"WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n"
+"You should use the PCI BIOS setup to assign a valid IRQ line.\n",
pci_irq_line );
/* avoiding re-enable dual adapters */
@@ -372,8 +369,7 @@ sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
outb( 0, ioaddr + CSR0 );
if( !irq ) {
- printk( KERN_ERR "%s: can't detect device irq!\n",
- dev->name );
+ pr_err("%s: can't detect device irq!\n", dev->name);
release_region( ioaddr, SBNI_IO_EXTENT );
return NULL;
}
@@ -386,7 +382,7 @@ sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
/* Fill in sbni-specific dev fields. */
nl = netdev_priv(dev);
if( !nl ) {
- printk( KERN_ERR "%s: unable to get memory!\n", dev->name );
+ pr_err("%s: unable to get memory!\n", dev->name);
release_region( ioaddr, SBNI_IO_EXTENT );
return NULL;
}
@@ -415,21 +411,21 @@ sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
if( inb( ioaddr + CSR0 ) & 0x01 )
nl->state |= FL_SLOW_MODE;
- printk( KERN_NOTICE "%s: ioaddr %#lx, irq %d, "
- "MAC: 00:ff:01:%02x:%02x:%02x\n",
- dev->name, dev->base_addr, dev->irq,
- ((u8 *) dev->dev_addr) [3],
- ((u8 *) dev->dev_addr) [4],
- ((u8 *) dev->dev_addr) [5] );
+ pr_notice("%s: ioaddr %#lx, irq %d, MAC: 00:ff:01:%02x:%02x:%02x\n",
+ dev->name, dev->base_addr, dev->irq,
+ ((u8 *)dev->dev_addr)[3],
+ ((u8 *)dev->dev_addr)[4],
+ ((u8 *)dev->dev_addr)[5]);
- printk( KERN_NOTICE "%s: speed %d, receive level ", dev->name,
- ( (nl->state & FL_SLOW_MODE) ? 500000 : 2000000)
- / (1 << nl->csr1.rate) );
+ pr_notice("%s: speed %d",
+ dev->name,
+ ((nl->state & FL_SLOW_MODE) ? 500000 : 2000000)
+ / (1 << nl->csr1.rate));
if( nl->delta_rxl == 0 )
- printk( "0x%x (fixed)\n", nl->cur_rxl_index );
+ pr_cont(", receive level 0x%x (fixed)\n", nl->cur_rxl_index);
else
- printk( "(auto)\n");
+ pr_cont(", receive level (auto)\n");
#ifdef CONFIG_SBNI_MULTILINE
nl->master = dev;
@@ -568,7 +564,7 @@ handle_channel( struct net_device *dev )
*/
csr0 = inb( ioaddr + CSR0 );
if( !(csr0 & TR_RDY) || (csr0 & RC_RDY) )
- printk( KERN_ERR "%s: internal error!\n", dev->name );
+ netdev_err(dev, "internal error!\n");
/* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */
if( req_ans || nl->tx_frameno != 0 )
@@ -851,7 +847,7 @@ prepare_to_send( struct sk_buff *skb, struct net_device *dev )
/* nl->tx_buf_p == NULL here! */
if( nl->tx_buf_p )
- printk( KERN_ERR "%s: memory leak!\n", dev->name );
+ netdev_err(dev, "memory leak!\n");
nl->outpos = 0;
nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
@@ -1179,16 +1175,15 @@ sbni_open( struct net_device *dev )
((struct net_local *) (netdev_priv(*p)))
->second = dev;
- printk( KERN_NOTICE "%s: using shared irq "
- "with %s\n", dev->name, (*p)->name );
+ netdev_notice(dev, "using shared irq with %s\n",
+ (*p)->name);
nl->state |= FL_SECONDARY;
goto handler_attached;
}
}
if( request_irq(dev->irq, sbni_interrupt, IRQF_SHARED, dev->name, dev) ) {
- printk( KERN_ERR "%s: unable to get IRQ %d.\n",
- dev->name, dev->irq );
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
return -EAGAIN;
}
@@ -1220,8 +1215,8 @@ sbni_close( struct net_device *dev )
struct net_local *nl = netdev_priv(dev);
if( nl->second && nl->second->flags & IFF_UP ) {
- printk( KERN_NOTICE "Secondary channel (%s) is active!\n",
- nl->second->name );
+ netdev_notice(dev, "Secondary channel (%s) is active!\n",
+ nl->second->name);
return -EBUSY;
}
@@ -1363,8 +1358,8 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
return -EFAULT;
slave_dev = dev_get_by_name(&init_net, slave_name );
if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
- printk( KERN_ERR "%s: trying to enslave non-active "
- "device %s\n", dev->name, slave_name );
+ netdev_err(dev, "trying to enslave non-active device %s\n",
+ slave_name);
return -EPERM;
}
@@ -1417,8 +1412,7 @@ enslave( struct net_device *dev, struct net_device *slave_dev )
spin_unlock( &snl->lock );
spin_unlock( &nl->lock );
- printk( KERN_NOTICE "%s: slave device (%s) attached.\n",
- dev->name, slave_dev->name );
+ netdev_notice(dev, "slave device (%s) attached\n", slave_dev->name);
return 0;
}
@@ -1547,7 +1541,7 @@ sbni_setup( char *p )
break;
}
bad_param:
- printk( KERN_ERR "Error in sbni kernel parameter!\n" );
+ pr_err("Error in sbni kernel parameter!\n");
return 0;
}
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 3f4e2b5684d..c8531612eea 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -32,6 +32,8 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -350,24 +352,24 @@ static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int
case SDLA_RET_MODEM:
state = data;
if (*state & SDLA_MODEM_DCD_LOW)
- printk(KERN_INFO "%s: Modem DCD unexpectedly low!\n", dev->name);
+ netdev_info(dev, "Modem DCD unexpectedly low!\n");
if (*state & SDLA_MODEM_CTS_LOW)
- printk(KERN_INFO "%s: Modem CTS unexpectedly low!\n", dev->name);
+ netdev_info(dev, "Modem CTS unexpectedly low!\n");
/* I should probably do something about this! */
break;
case SDLA_RET_CHANNEL_OFF:
- printk(KERN_INFO "%s: Channel became inoperative!\n", dev->name);
+ netdev_info(dev, "Channel became inoperative!\n");
/* same here */
break;
case SDLA_RET_CHANNEL_ON:
- printk(KERN_INFO "%s: Channel became operative!\n", dev->name);
+ netdev_info(dev, "Channel became operative!\n");
/* same here */
break;
case SDLA_RET_DLCI_STATUS:
- printk(KERN_INFO "%s: Status change reported by Access Node.\n", dev->name);
+ netdev_info(dev, "Status change reported by Access Node\n");
len /= sizeof(struct _dlci_stat);
for(pstatus = data, i=0;i < len;i++,pstatus++)
{
@@ -382,29 +384,32 @@ static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int
sprintf(line, "unknown status: %02X", pstatus->flags);
state = line;
}
- printk(KERN_INFO "%s: DLCI %i: %s.\n", dev->name, pstatus->dlci, state);
+ netdev_info(dev, "DLCI %i: %s\n",
+ pstatus->dlci, state);
/* same here */
}
break;
case SDLA_RET_DLCI_UNKNOWN:
- printk(KERN_INFO "%s: Received unknown DLCIs:", dev->name);
+ netdev_info(dev, "Received unknown DLCIs:");
len /= sizeof(short);
for(pdlci = data,i=0;i < len;i++,pdlci++)
- printk(" %i", *pdlci);
- printk("\n");
+ pr_cont(" %i", *pdlci);
+ pr_cont("\n");
break;
case SDLA_RET_TIMEOUT:
- printk(KERN_ERR "%s: Command timed out!\n", dev->name);
+ netdev_err(dev, "Command timed out!\n");
break;
case SDLA_RET_BUF_OVERSIZE:
- printk(KERN_INFO "%s: Bc/CIR overflow, acceptable size is %i\n", dev->name, len);
+ netdev_info(dev, "Bc/CIR overflow, acceptable size is %i\n",
+ len);
break;
case SDLA_RET_BUF_TOO_BIG:
- printk(KERN_INFO "%s: Buffer size over specified max of %i\n", dev->name, len);
+ netdev_info(dev, "Buffer size over specified max of %i\n",
+ len);
break;
case SDLA_RET_CHANNEL_INACTIVE:
@@ -415,7 +420,8 @@ static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int
break;
default:
- printk(KERN_DEBUG "%s: Cmd 0x%2.2X generated return code 0x%2.2X\n", dev->name, cmd, ret);
+ netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n",
+ cmd, ret);
/* Further processing could be done here */
break;
}
@@ -678,12 +684,14 @@ static netdev_tx_t sdla_transmit(struct sk_buff *skb,
case ARPHRD_FRAD:
if (skb->dev->type != ARPHRD_DLCI)
{
- printk(KERN_WARNING "%s: Non DLCI device, type %i, tried to send on FRAD module.\n", dev->name, skb->dev->type);
+ netdev_warn(dev, "Non DLCI device, type %i, tried to send on FRAD module\n",
+ skb->dev->type);
accept = 0;
}
break;
default:
- printk(KERN_WARNING "%s: unknown firmware type 0x%4.4X\n", dev->name, dev->type);
+ netdev_warn(dev, "unknown firmware type 0x%04X\n",
+ dev->type);
accept = 0;
break;
}
@@ -807,7 +815,8 @@ static void sdla_receive(struct net_device *dev)
if (i == CONFIG_DLCI_MAX)
{
- printk(KERN_NOTICE "%s: Received packet from invalid DLCI %i, ignoring.", dev->name, dlci);
+ netdev_notice(dev, "Received packet from invalid DLCI %i, ignoring\n",
+ dlci);
dev->stats.rx_errors++;
success = 0;
}
@@ -819,7 +828,7 @@ static void sdla_receive(struct net_device *dev)
skb = dev_alloc_skb(len + sizeof(struct frhdr));
if (skb == NULL)
{
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
+ netdev_notice(dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
success = 0;
}
@@ -880,8 +889,7 @@ static irqreturn_t sdla_isr(int dummy, void *dev_id)
if (!flp->initialized)
{
- printk(KERN_WARNING "%s: irq %d for uninitialized device.\n",
- dev->name, dev->irq);
+ netdev_warn(dev, "irq %d for uninitialized device\n", dev->irq);
return IRQ_NONE;
}
@@ -901,7 +909,7 @@ static irqreturn_t sdla_isr(int dummy, void *dev_id)
case SDLA_INTR_TX:
case SDLA_INTR_COMPLETE:
case SDLA_INTR_TIMER:
- printk(KERN_WARNING "%s: invalid irq flag 0x%02X.\n", dev->name, byte);
+ netdev_warn(dev, "invalid irq flag 0x%02X\n", byte);
break;
}
@@ -1347,7 +1355,7 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
return -EINVAL;
if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
- printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr);
+ pr_warn("io-port 0x%04lx in use\n", dev->base_addr);
return -EINVAL;
}
base = map->base_addr;
@@ -1412,7 +1420,7 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
}
}
- printk(KERN_NOTICE "%s: Unknown card type\n", dev->name);
+ netdev_notice(dev, "Unknown card type\n");
err = -ENODEV;
goto fail;
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index e91457d6023..0b4fd05e150 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -12,6 +12,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -190,7 +192,7 @@ static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
dev->irq = irq;
if (register_hdlc_device(dev)) {
- printk(KERN_ERR "sealevel: unable to register HDLC device\n");
+ pr_err("unable to register HDLC device\n");
free_netdev(dev);
return -1;
}
@@ -215,8 +217,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
*/
if (!request_region(iobase, 8, "Sealevel 4021")) {
- printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n",
- iobase);
+ pr_warn("I/O 0x%X already in use\n", iobase);
return NULL;
}
@@ -267,7 +268,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
"SeaLevel", dev) < 0) {
- printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
+ pr_warn("IRQ %d already in use\n", irq);
goto err_request_irq;
}
@@ -292,7 +293,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
*/
if (z8530_init(dev) != 0) {
- printk(KERN_ERR "Z8530 series device not found.\n");
+ pr_err("Z8530 series device not found\n");
enable_irq(irq);
goto free_hw;
}
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index db73a7be199..44b70719725 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -13,6 +13,8 @@
* - wanXL100 will require minor driver modifications, no access to hw
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -22,6 +24,7 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/hdlc.h>
@@ -101,9 +104,8 @@ static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
{
dma_addr_t addr = pci_map_single(pdev, ptr, size, direction);
if (addr + size > 0x100000000LL)
- printk(KERN_CRIT "wanXL %s: pci_map_single() returned memory"
- " at 0x%LX!\n", pci_name(pdev),
- (unsigned long long)addr);
+ pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
+ pci_name(pdev), (unsigned long long)addr);
return addr;
}
@@ -146,8 +148,8 @@ static inline void wanxl_cable_intr(port_t *port)
}
dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
}
- printk(KERN_INFO "%s: %s%s module, %s cable%s%s\n",
- port->dev->name, pm, dte, cable, dsr, dcd);
+ netdev_info(port->dev, "%s%s module, %s cable%s%s\n",
+ pm, dte, cable, dsr, dcd);
if (value & STATUS_CABLE_DCD)
netif_carrier_on(port->dev);
@@ -197,8 +199,8 @@ static inline void wanxl_rx_intr(card_t *card)
while (desc = &card->status->rx_descs[card->rx_in],
desc->stat != PACKET_EMPTY) {
if ((desc->stat & PACKET_PORT_MASK) > card->n_ports)
- printk(KERN_CRIT "wanXL %s: received packet for"
- " nonexistent port\n", pci_name(card->pdev));
+ pr_crit("%s: received packet for nonexistent port\n",
+ pci_name(card->pdev));
else {
struct sk_buff *skb = card->rx_skbs[card->rx_in];
port_t *port = &card->ports[desc->stat &
@@ -282,7 +284,7 @@ static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
#endif
netif_stop_queue(dev);
- spin_unlock_irq(&port->lock);
+ spin_unlock(&port->lock);
return NETDEV_TX_BUSY; /* request packet to be queued */
}
@@ -396,7 +398,7 @@ static int wanxl_open(struct net_device *dev)
int i;
if (get_status(port)->open) {
- printk(KERN_ERR "%s: port already open\n", dev->name);
+ netdev_err(dev, "port already open\n");
return -EIO;
}
if ((i = hdlc_open(dev)) != 0)
@@ -416,7 +418,7 @@ static int wanxl_open(struct net_device *dev)
}
} while (time_after(timeout, jiffies));
- printk(KERN_ERR "%s: unable to open port\n", dev->name);
+ netdev_err(dev, "unable to open port\n");
/* ask the card to close the port, should it be still alive */
writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
return -EFAULT;
@@ -442,7 +444,7 @@ static int wanxl_close(struct net_device *dev)
} while (time_after(timeout, jiffies));
if (get_status(port)->open)
- printk(KERN_ERR "%s: unable to close port\n", dev->name);
+ netdev_err(dev, "unable to close port\n");
netif_stop_queue(dev);
@@ -567,11 +569,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
int i, ports, alloc_size;
#ifndef MODULE
- static int printed_version;
- if (!printed_version) {
- printed_version++;
- printk(KERN_INFO "%s\n", version);
- }
+ pr_info_once("%s\n", version);
#endif
i = pci_enable_device(pdev);
@@ -587,7 +585,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
work on most platforms */
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(28)) ||
pci_set_dma_mask(pdev, DMA_BIT_MASK(28))) {
- printk(KERN_ERR "wanXL: No usable DMA configuration\n");
+ pr_err("No usable DMA configuration\n");
return -EIO;
}
@@ -606,8 +604,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
alloc_size = sizeof(card_t) + ports * sizeof(port_t);
card = kzalloc(alloc_size, GFP_KERNEL);
if (card == NULL) {
- printk(KERN_ERR "wanXL %s: unable to allocate memory\n",
- pci_name(pdev));
+ pr_err("%s: unable to allocate memory\n", pci_name(pdev));
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
@@ -634,7 +631,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
to indicate the card can do 32-bit DMA addressing */
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) ||
pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_ERR "wanXL: No usable DMA configuration\n");
+ pr_err("No usable DMA configuration\n");
wanxl_pci_remove_one(pdev);
return -EIO;
}
@@ -644,7 +641,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
card->plx = ioremap_nocache(plx_phy, 0x70);
if (!card->plx) {
- printk(KERN_ERR "wanxl: ioremap() failed\n");
+ pr_err("ioremap() failed\n");
wanxl_pci_remove_one(pdev);
return -EFAULT;
}
@@ -656,8 +653,8 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
timeout = jiffies + 20 * HZ;
while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
if (time_before(timeout, jiffies)) {
- printk(KERN_WARNING "wanXL %s: timeout waiting for"
- " PUTS to complete\n", pci_name(pdev));
+ pr_warn("%s: timeout waiting for PUTS to complete\n",
+ pci_name(pdev));
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
@@ -668,8 +665,8 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
break;
default:
- printk(KERN_WARNING "wanXL %s: PUTS test 0x%X"
- " failed\n", pci_name(pdev), stat & 0x30);
+ pr_warn("%s: PUTS test 0x%X failed\n",
+ pci_name(pdev), stat & 0x30);
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
@@ -687,17 +684,16 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
/* sanity check the board's reported memory size */
if (ramsize < BUFFERS_ADDR +
(TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
- printk(KERN_WARNING "wanXL %s: no enough on-board RAM"
- " (%u bytes detected, %u bytes required)\n",
- pci_name(pdev), ramsize, BUFFERS_ADDR +
- (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
+ pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n",
+ pci_name(pdev), ramsize,
+ BUFFERS_ADDR +
+ (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
- printk(KERN_WARNING "wanXL %s: unable to Set Byte Swap"
- " Mode\n", pci_name(pdev));
+ pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev));
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
@@ -714,7 +710,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware));
if (!mem) {
- printk(KERN_ERR "wanxl: ioremap() failed\n");
+ pr_err("ioremap() failed\n");
wanxl_pci_remove_one(pdev);
return -EFAULT;
}
@@ -733,8 +729,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
writel(0, card->plx + PLX_MAILBOX_5);
if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
- printk(KERN_WARNING "wanXL %s: unable to Abort and Jump\n",
- pci_name(pdev));
+ pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev));
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
@@ -748,8 +743,8 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
}while (time_after(timeout, jiffies));
if (!stat) {
- printk(KERN_WARNING "wanXL %s: timeout while initializing card "
- "firmware\n", pci_name(pdev));
+ pr_warn("%s: timeout while initializing card firmware\n",
+ pci_name(pdev));
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
@@ -758,13 +753,13 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
ramsize = stat;
#endif
- printk(KERN_INFO "wanXL %s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
- pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
+ pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
+ pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
/* Allocate IRQ */
if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) {
- printk(KERN_WARNING "wanXL %s: could not allocate IRQ%i.\n",
- pci_name(pdev), pdev->irq);
+ pr_warn("%s: could not allocate IRQ%i\n",
+ pci_name(pdev), pdev->irq);
wanxl_pci_remove_one(pdev);
return -EBUSY;
}
@@ -775,8 +770,8 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
port_t *port = &card->ports[i];
struct net_device *dev = alloc_hdlcdev(port);
if (!dev) {
- printk(KERN_ERR "wanXL %s: unable to allocate"
- " memory\n", pci_name(pdev));
+ pr_err("%s: unable to allocate memory\n",
+ pci_name(pdev));
wanxl_pci_remove_one(pdev);
return -ENOMEM;
}
@@ -792,8 +787,8 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
port->node = i;
get_status(port)->clocking = CLOCK_EXT;
if (register_hdlc_device(dev)) {
- printk(KERN_ERR "wanXL %s: unable to register hdlc"
- " device\n", pci_name(pdev));
+ pr_err("%s: unable to register hdlc device\n",
+ pci_name(pdev));
free_netdev(dev);
wanxl_pci_remove_one(pdev);
return -ENOBUFS;
@@ -801,11 +796,11 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
card->n_ports++;
}
- printk(KERN_INFO "wanXL %s: port", pci_name(pdev));
+ pr_info("%s: port", pci_name(pdev));
for (i = 0; i < ports; i++)
- printk("%s #%i: %s", i ? "," : "", i,
- card->ports[i].dev->name);
- printk("\n");
+ pr_cont("%s #%i: %s",
+ i ? "," : "", i, card->ports[i].dev->name);
+ pr_cont("\n");
for (i = 0; i < ports; i++)
wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/
@@ -835,7 +830,7 @@ static struct pci_driver wanxl_pci_driver = {
static int __init wanxl_init_module(void)
{
#ifdef MODULE
- printk(KERN_INFO "%s\n", version);
+ pr_info("%s\n", version);
#endif
return pci_register_driver(&wanxl_pci_driver);
}
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 24297b274cd..46ceb3ae907 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -14,6 +14,8 @@
* 2000-10-29 Henner Eisen lapb_data_indication() return status.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <asm/system.h>
@@ -96,7 +98,7 @@ static struct x25_asy *x25_asy_alloc(void)
x25_asy_devs[i] = dev;
return sl;
} else {
- printk(KERN_WARNING "x25_asy_alloc() - register_netdev() failure.\n");
+ pr_warn("%s(): register_netdev() failure\n", __func__);
free_netdev(dev);
}
}
@@ -114,8 +116,7 @@ static void x25_asy_free(struct x25_asy *sl)
sl->xbuff = NULL;
if (!test_and_clear_bit(SLF_INUSE, &sl->flags))
- printk(KERN_ERR "%s: x25_asy_free for already free unit.\n",
- sl->dev->name);
+ netdev_err(sl->dev, "x25_asy_free for already free unit\n");
}
static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
@@ -128,8 +129,7 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
rbuff = kmalloc(len + 4, GFP_ATOMIC);
if (xbuff == NULL || rbuff == NULL) {
- printk(KERN_WARNING "%s: unable to grow X.25 buffers, MTU change cancelled.\n",
- dev->name);
+ netdev_warn(dev, "unable to grow X.25 buffers, MTU change cancelled\n");
kfree(xbuff);
kfree(rbuff);
return -ENOMEM;
@@ -198,8 +198,7 @@ static void x25_asy_bump(struct x25_asy *sl)
skb = dev_alloc_skb(count+1);
if (skb == NULL) {
- printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n",
- sl->dev->name);
+ netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
return;
}
@@ -287,9 +286,9 @@ static void x25_asy_timeout(struct net_device *dev)
/* May be we must check transmitter timeout here ?
* 14 Oct 1994 Dmitry Gorodchanin.
*/
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
- (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
- "bad line quality" : "driver error");
+ netdev_warn(dev, "transmit timed out, %s?\n",
+ (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
+ "bad line quality" : "driver error");
sl->xleft = 0;
clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
x25_asy_unlock(sl);
@@ -306,8 +305,7 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
int err;
if (!netif_running(sl->dev)) {
- printk(KERN_ERR "%s: xmit call when iface is down\n",
- dev->name);
+ netdev_err(dev, "xmit call when iface is down\n");
kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -318,13 +316,15 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
case X25_IFACE_CONNECT: /* Connection request .. do nothing */
err = lapb_connect_request(dev);
if (err != LAPB_OK)
- printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
+ netdev_err(dev, "lapb_connect_request error: %d\n",
+ err);
kfree_skb(skb);
return NETDEV_TX_OK;
case X25_IFACE_DISCONNECT: /* do nothing - hang up ?? */
err = lapb_disconnect_request(dev);
if (err != LAPB_OK)
- printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
+ netdev_err(dev, "lapb_disconnect_request error: %d\n",
+ err);
default:
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -343,7 +343,7 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
err = lapb_data_request(dev, skb);
if (err != LAPB_OK) {
- printk(KERN_ERR "x25_asy: lapb_data_request error - %d\n", err);
+ netdev_err(dev, "lapb_data_request error: %d\n", err);
kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -378,7 +378,7 @@ static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
spin_lock(&sl->lock);
if (netif_queue_stopped(sl->dev) || sl->tty == NULL) {
spin_unlock(&sl->lock);
- printk(KERN_ERR "x25_asy: tbusy drop\n");
+ netdev_err(dev, "tbusy drop\n");
kfree_skb(skb);
return;
}
@@ -404,7 +404,7 @@ static void x25_asy_connected(struct net_device *dev, int reason)
skb = dev_alloc_skb(1);
if (skb == NULL) {
- printk(KERN_ERR "x25_asy: out of memory\n");
+ netdev_err(dev, "out of memory\n");
return;
}
@@ -423,7 +423,7 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
skb = dev_alloc_skb(1);
if (skb == NULL) {
- printk(KERN_ERR "x25_asy: out of memory\n");
+ netdev_err(dev, "out of memory\n");
return;
}
@@ -603,8 +603,8 @@ static void x25_asy_close_tty(struct tty_struct *tty)
err = lapb_unregister(sl->dev);
if (err != LAPB_OK)
- printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
- err);
+ pr_err("x25_asy_close: lapb_unregister error: %d\n",
+ err);
tty->disc_data = NULL;
sl->tty = NULL;
@@ -782,14 +782,13 @@ static int __init init_x25_asy(void)
if (x25_asy_maxdev < 4)
x25_asy_maxdev = 4; /* Sanity */
- printk(KERN_INFO "X.25 async: version 0.00 ALPHA "
- "(dynamic channels, max=%d).\n", x25_asy_maxdev);
+ pr_info("X.25 async: version 0.00 ALPHA (dynamic channels, max=%d)\n",
+ x25_asy_maxdev);
x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device *),
GFP_KERNEL);
if (!x25_asy_devs) {
- printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] "
- "array! Uaargh! (-> No X.25 available)\n");
+ pr_warn("Can't allocate x25_asy_ctrls[] array! Uaargh! (-> No X.25 available)\n");
return -ENOMEM;
}
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 0806232e0f8..0e576906170 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -36,6 +36,8 @@
* Synchronous mode without DMA is unlikely to pass about 2400 baud.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -365,7 +367,7 @@ static void z8530_rx(struct z8530_channel *c)
c->count=0;
if(stat&Rx_OVR)
{
- printk(KERN_WARNING "%s: overrun\n", c->dev->name);
+ pr_warn("%s: overrun\n", c->dev->name);
c->rx_overrun++;
}
if(stat&CRC_ERR)
@@ -464,12 +466,12 @@ static void z8530_status(struct z8530_channel *chan)
if (altered & chan->dcdcheck)
{
if (status & chan->dcdcheck) {
- printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
+ pr_info("%s: DCD raised\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
if (chan->netdevice)
netif_carrier_on(chan->netdevice);
} else {
- printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
+ pr_info("%s: DCD lost\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
z8530_flush_fifo(chan);
if (chan->netdevice)
@@ -538,12 +540,12 @@ static void z8530_dma_tx(struct z8530_channel *chan)
{
if(!chan->dma_tx)
{
- printk(KERN_WARNING "Hey who turned the DMA off?\n");
+ pr_warn("Hey who turned the DMA off?\n");
z8530_tx(chan);
return;
}
/* This shouldn't occur in DMA mode */
- printk(KERN_ERR "DMA tx - bogus event!\n");
+ pr_err("DMA tx - bogus event!\n");
z8530_tx(chan);
}
@@ -585,12 +587,12 @@ static void z8530_dma_status(struct z8530_channel *chan)
if (altered & chan->dcdcheck)
{
if (status & chan->dcdcheck) {
- printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
+ pr_info("%s: DCD raised\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
if (chan->netdevice)
netif_carrier_on(chan->netdevice);
} else {
- printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
+ pr_info("%s: DCD lost\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
z8530_flush_fifo(chan);
if (chan->netdevice)
@@ -712,7 +714,7 @@ irqreturn_t z8530_interrupt(int irq, void *dev_id)
if(locker)
{
- printk(KERN_ERR "IRQ re-enter\n");
+ pr_err("IRQ re-enter\n");
return IRQ_NONE;
}
locker=1;
@@ -758,7 +760,8 @@ irqreturn_t z8530_interrupt(int irq, void *dev_id)
}
spin_unlock(&dev->lock);
if(work==5000)
- printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr);
+ pr_err("%s: interrupt jammed - abort(0x%X)!\n",
+ dev->name, intr);
/* Ok all done */
locker=0;
return IRQ_HANDLED;
@@ -1225,7 +1228,7 @@ static const char *z8530_type_name[]={
void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
{
- printk(KERN_INFO "%s: %s found at %s 0x%lX, IRQ %d.\n",
+ pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
dev->name,
z8530_type_name[dev->type],
mapping,
@@ -1621,8 +1624,7 @@ static void z8530_rx_done(struct z8530_channel *c)
else
/* Can't occur as we dont reenable the DMA irq until
after the flip is done */
- printk(KERN_WARNING "%s: DMA flip overrun!\n",
- c->netdevice->name);
+ netdev_warn(c->netdevice, "DMA flip overrun!\n");
release_dma_lock(flags);
@@ -1637,8 +1639,7 @@ static void z8530_rx_done(struct z8530_channel *c)
skb = dev_alloc_skb(ct);
if (skb == NULL) {
c->netdevice->stats.rx_dropped++;
- printk(KERN_WARNING "%s: Memory squeeze.\n",
- c->netdevice->name);
+ netdev_warn(c->netdevice, "Memory squeeze\n");
} else {
skb_put(skb, ct);
skb_copy_to_linear_data(skb, rxb, ct);
@@ -1678,8 +1679,7 @@ static void z8530_rx_done(struct z8530_channel *c)
c->skb2 = dev_alloc_skb(c->mtu);
if (c->skb2 == NULL)
- printk(KERN_WARNING "%s: memory squeeze.\n",
- c->netdevice->name);
+ netdev_warn(c->netdevice, "memory squeeze\n");
else
skb_put(c->skb2, c->mtu);
c->netdevice->stats.rx_packets++;
@@ -1693,7 +1693,7 @@ static void z8530_rx_done(struct z8530_channel *c)
c->rx_function(c, skb);
} else {
c->netdevice->stats.rx_dropped++;
- printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
+ netdev_err(c->netdevice, "Lost a frame\n");
}
}
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 5eacc653a94..c421a614185 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -155,7 +155,7 @@
#include <linux/netdevice.h>
#include <linux/completion.h>
#include <linux/rwsem.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <net/wimax.h>
#include <linux/wimax/i2400m.h>
#include <asm/byteorder.h>
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index afe2cbc6cb2..43ebc44fc82 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -16,6 +16,7 @@
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 55cf71fbffe..e1b3e3c134f 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2823,6 +2823,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
dev->wireless_data = &ai->wireless_data;
dev->irq = irq;
dev->base_addr = port;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
SET_NETDEV_DEV(dev, dmdev);
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 7cf4317a2a8..17c4b56c387 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -161,6 +161,7 @@ struct ath_common {
const struct ath_bus_ops *bus_ops;
bool btcoex_enabled;
+ bool disable_ani;
};
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index ea998278155..a2a167363db 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -24,7 +24,6 @@
#include "debug.h"
#include "base.h"
#include "reg.h"
-#include "debug.h"
/* return bus cachesize in 4B word units */
static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
@@ -35,8 +34,8 @@ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
static bool
ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
- struct ath5k_softc *sc = common->priv;
- struct platform_device *pdev = to_platform_device(sc->dev);
+ struct ath5k_hw *ah = common->priv;
+ struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
u16 *eeprom, *eeprom_end;
@@ -56,8 +55,7 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
- struct ath5k_softc *sc = ah->ah_sc;
- struct platform_device *pdev = to_platform_device(sc->dev);
+ struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
ah->ah_mac_srev = bcfg->devid;
return 0;
@@ -65,12 +63,11 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
- struct ath5k_softc *sc = ah->ah_sc;
- struct platform_device *pdev = to_platform_device(sc->dev);
+ struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
u8 *cfg_mac;
- if (to_platform_device(sc->dev)->id == 0)
+ if (to_platform_device(ah->dev)->id == 0)
cfg_mac = bcfg->config->wlan0_mac;
else
cfg_mac = bcfg->config->wlan1_mac;
@@ -90,7 +87,7 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
static int ath_ahb_probe(struct platform_device *pdev)
{
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
- struct ath5k_softc *sc;
+ struct ath5k_hw *ah;
struct ieee80211_hw *hw;
struct resource *res;
void __iomem *mem;
@@ -127,19 +124,19 @@ static int ath_ahb_probe(struct platform_device *pdev)
irq = res->start;
- hw = ieee80211_alloc_hw(sizeof(struct ath5k_softc), &ath5k_hw_ops);
+ hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
ret = -ENOMEM;
goto err_out;
}
- sc = hw->priv;
- sc->hw = hw;
- sc->dev = &pdev->dev;
- sc->iobase = mem;
- sc->irq = irq;
- sc->devid = bcfg->devid;
+ ah = hw->priv;
+ ah->hw = hw;
+ ah->dev = &pdev->dev;
+ ah->iobase = mem;
+ ah->irq = irq;
+ ah->devid = bcfg->devid;
if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
/* Enable WMAC AHB arbitration */
@@ -155,7 +152,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
/* Enable WMAC DMA access (assuming 5312 or 231x*/
/* TODO: check other platforms */
reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
- if (to_platform_device(sc->dev)->id == 0)
+ if (to_platform_device(ah->dev)->id == 0)
reg |= AR5K_AR5312_ENABLE_WLAN0;
else
reg |= AR5K_AR5312_ENABLE_WLAN1;
@@ -166,13 +163,13 @@ static int ath_ahb_probe(struct platform_device *pdev)
* used as pass-through. Disable 2 GHz support in the
* driver for it
*/
- if (to_platform_device(sc->dev)->id == 0 &&
- (bcfg->config->flags & (BD_WLAN0|BD_WLAN1)) ==
- (BD_WLAN1|BD_WLAN0))
- __set_bit(ATH_STAT_2G_DISABLED, sc->status);
+ if (to_platform_device(ah->dev)->id == 0 &&
+ (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
+ (BD_WLAN1 | BD_WLAN0))
+ __set_bit(ATH_STAT_2G_DISABLED, ah->status);
}
- ret = ath5k_init_softc(sc, &ath_ahb_bus_ops);
+ ret = ath5k_init_softc(ah, &ath_ahb_bus_ops);
if (ret != 0) {
dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
ret = -ENODEV;
@@ -194,13 +191,13 @@ static int ath_ahb_remove(struct platform_device *pdev)
{
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
- struct ath5k_softc *sc;
+ struct ath5k_hw *ah;
u32 reg;
if (!hw)
return 0;
- sc = hw->priv;
+ ah = hw->priv;
if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
/* Disable WMAC AHB arbitration */
@@ -210,15 +207,16 @@ static int ath_ahb_remove(struct platform_device *pdev)
} else {
/*Stop DMA access */
reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
- if (to_platform_device(sc->dev)->id == 0)
+ if (to_platform_device(ah->dev)->id == 0)
reg &= ~AR5K_AR5312_ENABLE_WLAN0;
else
reg &= ~AR5K_AR5312_ENABLE_WLAN1;
__raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE);
}
- ath5k_deinit_softc(sc);
+ ath5k_deinit_softc(ah);
platform_set_drvdata(pdev, NULL);
+ ieee80211_free_hw(hw);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index f915f404302..603ae15f139 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -74,7 +74,7 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
static const s8 fr[] = { -78, -80 };
#endif
if (level < 0 || level >= ARRAY_SIZE(sz)) {
- ATH5K_ERR(ah->ah_sc, "noise immuniy level %d out of range",
+ ATH5K_ERR(ah, "noise immunity level %d out of range",
level);
return;
}
@@ -88,8 +88,8 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
AR5K_PHY_SIG_FIRPWR, fr[level]);
- ah->ah_sc->ani_state.noise_imm_level = level;
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+ ah->ani_state.noise_imm_level = level;
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
@@ -105,8 +105,8 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
static const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
if (level < 0 || level >= ARRAY_SIZE(val) ||
- level > ah->ah_sc->ani_state.max_spur_level) {
- ATH5K_ERR(ah->ah_sc, "spur immunity level %d out of range",
+ level > ah->ani_state.max_spur_level) {
+ ATH5K_ERR(ah, "spur immunity level %d out of range",
level);
return;
}
@@ -114,8 +114,8 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
- ah->ah_sc->ani_state.spur_level = level;
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+ ah->ani_state.spur_level = level;
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
@@ -130,15 +130,15 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
static const int val[] = { 0, 4, 8 };
if (level < 0 || level >= ARRAY_SIZE(val)) {
- ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level);
+ ATH5K_ERR(ah, "firstep level %d out of range", level);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
AR5K_PHY_SIG_FIRSTEP, val[level]);
- ah->ah_sc->ani_state.firstep_level = level;
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
+ ah->ani_state.firstep_level = level;
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
@@ -178,8 +178,8 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
- ah->ah_sc->ani_state.ofdm_weak_sig = on;
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
+ ah->ani_state.ofdm_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
on ? "on" : "off");
}
@@ -195,8 +195,8 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
static const int val[] = { 8, 6 };
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
- ah->ah_sc->ani_state.cck_weak_sig = on;
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
+ ah->ani_state.cck_weak_sig = on;
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
on ? "on" : "off");
}
@@ -218,7 +218,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
{
int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
ofdm_trigger ? "ODFM" : "CCK");
/* first: raise noise immunity */
@@ -229,13 +229,13 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
/* only OFDM: raise spur immunity level */
if (ofdm_trigger &&
- as->spur_level < ah->ah_sc->ani_state.max_spur_level) {
+ as->spur_level < ah->ani_state.max_spur_level) {
ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
return;
}
/* AP mode */
- if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
+ if (ah->opmode == NL80211_IFTYPE_AP) {
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
@@ -248,7 +248,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
* don't shut out a remote node by raising immunity too high. */
if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"beacon RSSI high");
/* only OFDM: beacon RSSI is high, we can disable ODFM weak
* signal detection */
@@ -265,7 +265,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
} else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
/* beacon RSSI in mid range, we need OFDM weak signal detect,
* but can raise firstep level */
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"beacon RSSI mid");
if (ofdm_trigger && as->ofdm_weak_sig == false)
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
@@ -275,7 +275,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
} else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
/* beacon RSSI is low. in B/G mode turn of OFDM weak signal
* detect and zero firstep level to maximize CCK sensitivity */
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"beacon RSSI low, 2GHz");
if (ofdm_trigger && as->ofdm_weak_sig == true)
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
@@ -303,9 +303,9 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
{
int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
- if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
+ if (ah->opmode == NL80211_IFTYPE_AP) {
/* AP mode */
if (as->firstep_level > 0) {
ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
@@ -464,7 +464,7 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
void
ath5k_ani_calibration(struct ath5k_hw *ah)
{
- struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+ struct ath5k_ani_state *as = &ah->ani_state;
int listen, ofdm_high, ofdm_low, cck_high, cck_low;
/* get listen time since last call and add it to the counter because we
@@ -483,9 +483,9 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"listen %d (now %d)", as->listen_time, listen);
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"check high ofdm %d/%d cck %d/%d",
as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
@@ -498,7 +498,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
/* If more than 5 (TODO: why 5?) periods have passed and we got
* relatively little errors we can try to lower immunity */
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"check low ofdm %d/%d cck %d/%d",
as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
@@ -525,7 +525,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
void
ath5k_ani_mib_intr(struct ath5k_hw *ah)
{
- struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+ struct ath5k_ani_state *as = &ah->ani_state;
/* nothing to do here if HW does not have PHY error counters - they
* can't be the reason for the MIB interrupt then */
@@ -536,7 +536,7 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
- if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
+ if (ah->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
return;
/* If one of the errors triggered, we can get a superfluous second
@@ -547,7 +547,7 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
- tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ tasklet_schedule(&ah->ani_tasklet);
}
@@ -561,16 +561,16 @@ void
ath5k_ani_phy_error_report(struct ath5k_hw *ah,
enum ath5k_phy_error_code phyerr)
{
- struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
+ struct ath5k_ani_state *as = &ah->ani_state;
if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
as->ofdm_errors++;
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
- tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ tasklet_schedule(&ah->ani_tasklet);
} else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
as->cck_errors++;
if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
- tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ tasklet_schedule(&ah->ani_tasklet);
}
}
@@ -630,20 +630,25 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
if (ah->ah_version < AR5K_AR5212)
return;
+ if (mode < ATH5K_ANI_MODE_OFF || mode > ATH5K_ANI_MODE_AUTO) {
+ ATH5K_ERR(ah, "ANI mode %d out of range", mode);
+ return;
+ }
+
/* clear old state information */
- memset(&ah->ah_sc->ani_state, 0, sizeof(ah->ah_sc->ani_state));
+ memset(&ah->ani_state, 0, sizeof(ah->ani_state));
/* older hardware has more spur levels than newer */
if (ah->ah_mac_srev < AR5K_SREV_AR2414)
- ah->ah_sc->ani_state.max_spur_level = 7;
+ ah->ani_state.max_spur_level = 7;
else
- ah->ah_sc->ani_state.max_spur_level = 2;
+ ah->ani_state.max_spur_level = 2;
/* initial values for our ani parameters */
if (mode == ATH5K_ANI_MODE_OFF) {
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI off\n");
- } else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI off\n");
+ } else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"ANI manual low -> high sensitivity\n");
ath5k_ani_set_noise_immunity_level(ah, 0);
ath5k_ani_set_spur_immunity_level(ah, 0);
@@ -651,17 +656,17 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
ath5k_ani_set_cck_weak_signal_detection(ah, true);
} else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"ANI manual high -> low sensitivity\n");
ath5k_ani_set_noise_immunity_level(ah,
ATH5K_ANI_MAX_NOISE_IMM_LVL);
ath5k_ani_set_spur_immunity_level(ah,
- ah->ah_sc->ani_state.max_spur_level);
+ ah->ani_state.max_spur_level);
ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
ath5k_ani_set_cck_weak_signal_detection(ah, false);
} else if (mode == ATH5K_ANI_MODE_AUTO) {
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI auto\n");
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI auto\n");
ath5k_ani_set_noise_immunity_level(ah, 0);
ath5k_ani_set_spur_immunity_level(ah, 0);
ath5k_ani_set_firstep_level(ah, 0);
@@ -687,7 +692,7 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
~AR5K_RX_FILTER_PHYERR);
}
- ah->ah_sc->ani_state.ani_mode = mode;
+ ah->ani_state.ani_mode = mode;
}
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index bb50700436f..277d5cbe006 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -18,14 +18,16 @@
#ifndef _ATH5K_H
#define _ATH5K_H
-/* TODO: Clean up channel debuging -doesn't work anyway- and start
+/* TODO: Clean up channel debugging (doesn't work anyway) and start
* working on reg. control code using all available eeprom information
- * -rev. engineering needed- */
+ * (rev. engineering needed) */
#define CHAN_DEBUG 0
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/average.h>
+#include <linux/leds.h>
#include <net/mac80211.h>
/* RX/TX descriptor hw structs
@@ -36,43 +38,46 @@
* TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities)
* and clean up common bits, then introduce set/get functions in eeprom.c */
#include "eeprom.h"
+#include "debug.h"
#include "../ath.h"
+#include "ani.h"
/* PCI IDs */
-#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */
-#define PCI_DEVICE_ID_ATHEROS_AR5311 0x0011 /* AR5311 */
-#define PCI_DEVICE_ID_ATHEROS_AR5211 0x0012 /* AR5211 */
-#define PCI_DEVICE_ID_ATHEROS_AR5212 0x0013 /* AR5212 */
-#define PCI_DEVICE_ID_3COM_3CRDAG675 0x0013 /* 3CRDAG675 (Atheros AR5212) */
-#define PCI_DEVICE_ID_3COM_2_3CRPAG175 0x0013 /* 3CRPAG175 (Atheros AR5212) */
-#define PCI_DEVICE_ID_ATHEROS_AR5210_AP 0x0207 /* AR5210 (Early) */
+#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */
+#define PCI_DEVICE_ID_ATHEROS_AR5311 0x0011 /* AR5311 */
+#define PCI_DEVICE_ID_ATHEROS_AR5211 0x0012 /* AR5211 */
+#define PCI_DEVICE_ID_ATHEROS_AR5212 0x0013 /* AR5212 */
+#define PCI_DEVICE_ID_3COM_3CRDAG675 0x0013 /* 3CRDAG675 (Atheros AR5212) */
+#define PCI_DEVICE_ID_3COM_2_3CRPAG175 0x0013 /* 3CRPAG175 (Atheros AR5212) */
+#define PCI_DEVICE_ID_ATHEROS_AR5210_AP 0x0207 /* AR5210 (Early) */
#define PCI_DEVICE_ID_ATHEROS_AR5212_IBM 0x1014 /* AR5212 (IBM MiniPCI) */
-#define PCI_DEVICE_ID_ATHEROS_AR5210_DEFAULT 0x1107 /* AR5210 (no eeprom) */
-#define PCI_DEVICE_ID_ATHEROS_AR5212_DEFAULT 0x1113 /* AR5212 (no eeprom) */
-#define PCI_DEVICE_ID_ATHEROS_AR5211_DEFAULT 0x1112 /* AR5211 (no eeprom) */
-#define PCI_DEVICE_ID_ATHEROS_AR5212_FPGA 0xf013 /* AR5212 (emulation board) */
-#define PCI_DEVICE_ID_ATHEROS_AR5211_LEGACY 0xff12 /* AR5211 (emulation board) */
-#define PCI_DEVICE_ID_ATHEROS_AR5211_FPGA11B 0xf11b /* AR5211 (emulation board) */
-#define PCI_DEVICE_ID_ATHEROS_AR5312_REV2 0x0052 /* AR5312 WMAC (AP31) */
-#define PCI_DEVICE_ID_ATHEROS_AR5312_REV7 0x0057 /* AR5312 WMAC (AP30-040) */
-#define PCI_DEVICE_ID_ATHEROS_AR5312_REV8 0x0058 /* AR5312 WMAC (AP43-030) */
-#define PCI_DEVICE_ID_ATHEROS_AR5212_0014 0x0014 /* AR5212 compatible */
-#define PCI_DEVICE_ID_ATHEROS_AR5212_0015 0x0015 /* AR5212 compatible */
-#define PCI_DEVICE_ID_ATHEROS_AR5212_0016 0x0016 /* AR5212 compatible */
-#define PCI_DEVICE_ID_ATHEROS_AR5212_0017 0x0017 /* AR5212 compatible */
-#define PCI_DEVICE_ID_ATHEROS_AR5212_0018 0x0018 /* AR5212 compatible */
-#define PCI_DEVICE_ID_ATHEROS_AR5212_0019 0x0019 /* AR5212 compatible */
-#define PCI_DEVICE_ID_ATHEROS_AR2413 0x001a /* AR2413 (Griffin-lite) */
-#define PCI_DEVICE_ID_ATHEROS_AR5413 0x001b /* AR5413 (Eagle) */
-#define PCI_DEVICE_ID_ATHEROS_AR5424 0x001c /* AR5424 (Condor PCI-E) */
-#define PCI_DEVICE_ID_ATHEROS_AR5416 0x0023 /* AR5416 */
-#define PCI_DEVICE_ID_ATHEROS_AR5418 0x0024 /* AR5418 */
+#define PCI_DEVICE_ID_ATHEROS_AR5210_DEFAULT 0x1107 /* AR5210 (no eeprom) */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_DEFAULT 0x1113 /* AR5212 (no eeprom) */
+#define PCI_DEVICE_ID_ATHEROS_AR5211_DEFAULT 0x1112 /* AR5211 (no eeprom) */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_FPGA 0xf013 /* AR5212 (emulation board) */
+#define PCI_DEVICE_ID_ATHEROS_AR5211_LEGACY 0xff12 /* AR5211 (emulation board) */
+#define PCI_DEVICE_ID_ATHEROS_AR5211_FPGA11B 0xf11b /* AR5211 (emulation board) */
+#define PCI_DEVICE_ID_ATHEROS_AR5312_REV2 0x0052 /* AR5312 WMAC (AP31) */
+#define PCI_DEVICE_ID_ATHEROS_AR5312_REV7 0x0057 /* AR5312 WMAC (AP30-040) */
+#define PCI_DEVICE_ID_ATHEROS_AR5312_REV8 0x0058 /* AR5312 WMAC (AP43-030) */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0014 0x0014 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0015 0x0015 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0016 0x0016 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0017 0x0017 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0018 0x0018 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR5212_0019 0x0019 /* AR5212 compatible */
+#define PCI_DEVICE_ID_ATHEROS_AR2413 0x001a /* AR2413 (Griffin-lite) */
+#define PCI_DEVICE_ID_ATHEROS_AR5413 0x001b /* AR5413 (Eagle) */
+#define PCI_DEVICE_ID_ATHEROS_AR5424 0x001c /* AR5424 (Condor PCI-E) */
+#define PCI_DEVICE_ID_ATHEROS_AR5416 0x0023 /* AR5416 */
+#define PCI_DEVICE_ID_ATHEROS_AR5418 0x0024 /* AR5418 */
/****************************\
GENERIC DRIVER DEFINITIONS
\****************************/
-#define ATH5K_PRINTF(fmt, ...) printk("%s: " fmt, __func__, ##__VA_ARGS__)
+#define ATH5K_PRINTF(fmt, ...) \
+ printk(KERN_WARNING "%s: " fmt, __func__, ##__VA_ARGS__)
#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \
printk(_level "ath5k %s: " _fmt, \
@@ -155,7 +160,7 @@
} while (0)
/*
- * Some tuneable values (these should be changeable by the user)
+ * Some tunable values (these should be changeable by the user)
* TODO: Make use of them and add more options OR use debug/configfs
*/
#define AR5K_TUNE_DMA_BEACON_RESP 2
@@ -170,8 +175,8 @@
#define AR5K_TUNE_RSSI_THRES 129
/* This must be set when setting the RSSI threshold otherwise it can
* prevent a reset. If AR5K_RSSI_THR is read after writing to it
- * the BMISS_THRES will be seen as 0, seems harware doesn't keep
- * track of it. Max value depends on harware. For AR5210 this is just 7.
+ * the BMISS_THRES will be seen as 0, seems hardware doesn't keep
+ * track of it. Max value depends on hardware. For AR5210 this is just 7.
* For AR5211+ this seems to be up to 255. */
#define AR5K_TUNE_BMISS_THRES 7
#define AR5K_TUNE_REGISTER_DWELL_TIME 20000
@@ -361,7 +366,7 @@ struct ath5k_srev_name {
/*
* Some of this information is based on Documentation from:
*
- * http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
+ * http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
*
* Modulation for Atheros' eXtended Range - range enhancing extension that is
* supposed to double the distance an Atheros client device can keep a
@@ -374,12 +379,12 @@ struct ath5k_srev_name {
* they are exclusive.
*
*/
-#define MODULATION_XR 0x00000200
+#define MODULATION_XR 0x00000200
/*
* Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a
* throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s
* signaling rate achieved through the bonding of two 54Mbit/s 802.11g
- * channels. To use this feature your Access Point must also suport it.
+ * channels. To use this feature your Access Point must also support it.
* There is also a distinction between "static" and "dynamic" turbo modes:
*
* - Static: is the dumb version: devices set to this mode stick to it until
@@ -495,9 +500,9 @@ enum ath5k_tx_queue {
*/
enum ath5k_tx_queue_subtype {
AR5K_WME_AC_BK = 0, /*Background traffic*/
- AR5K_WME_AC_BE, /*Best-effort (normal) traffic)*/
- AR5K_WME_AC_VI, /*Video traffic*/
- AR5K_WME_AC_VO, /*Voice traffic*/
+ AR5K_WME_AC_BE, /*Best-effort (normal) traffic*/
+ AR5K_WME_AC_VI, /*Video traffic*/
+ AR5K_WME_AC_VO, /*Voice traffic*/
};
/*
@@ -537,6 +542,27 @@ enum ath5k_tx_queue_id {
#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/
/*
+ * Data transmit queue state. One of these exists for each
+ * hardware transmit queue. Packets sent to us from above
+ * are assigned to queues based on their priority. Not all
+ * devices support a complete set of hardware transmit queues.
+ * For those devices the array sc_ac2q will map multiple
+ * priorities to fewer hardware queues (typically all to one
+ * hardware queue).
+ */
+struct ath5k_txq {
+ unsigned int qnum; /* hardware q number */
+ u32 *link; /* link ptr in last TX desc */
+ struct list_head q; /* transmit queue */
+ spinlock_t lock; /* lock on q and link */
+ bool setup;
+ int txq_len; /* number of queued buffers */
+ int txq_max; /* max allowed num of queued buffers */
+ bool txq_poll_mark;
+ unsigned int txq_stuck; /* informational counter */
+};
+
+/*
* A struct to hold tx queue's parameters
*/
struct ath5k_txq_info {
@@ -616,8 +642,8 @@ struct ath5k_rx_status {
#define AR5K_RXERR_FIFO 0x04
#define AR5K_RXERR_DECRYPT 0x08
#define AR5K_RXERR_MIC 0x10
-#define AR5K_RXKEYIX_INVALID ((u8) - 1)
-#define AR5K_TXKEYIX_INVALID ((u32) - 1)
+#define AR5K_RXKEYIX_INVALID ((u8) -1)
+#define AR5K_TXKEYIX_INVALID ((u32) -1)
/**************************\
@@ -678,17 +704,18 @@ struct ath5k_gain {
#define CHANNEL_DYN 0x0400 /* Dynamic CCK-OFDM channel (for g operation) */
#define CHANNEL_XR 0x0800 /* XR channel */
-#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
-#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
-#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
-#define CHANNEL_X (CHANNEL_5GHZ|CHANNEL_OFDM|CHANNEL_XR)
+#define CHANNEL_A (CHANNEL_5GHZ | CHANNEL_OFDM)
+#define CHANNEL_B (CHANNEL_2GHZ | CHANNEL_CCK)
+#define CHANNEL_G (CHANNEL_2GHZ | CHANNEL_OFDM)
+#define CHANNEL_X (CHANNEL_5GHZ | CHANNEL_OFDM | CHANNEL_XR)
-#define CHANNEL_ALL (CHANNEL_OFDM|CHANNEL_CCK|CHANNEL_2GHZ|CHANNEL_5GHZ)
+#define CHANNEL_ALL (CHANNEL_OFDM | CHANNEL_CCK | \
+ CHANNEL_2GHZ | CHANNEL_5GHZ)
#define CHANNEL_MODES CHANNEL_ALL
/*
- * Used internaly for reset_tx_queue).
+ * Used internally for ath5k_hw_reset_tx_queue().
* Also see struct struct ieee80211_channel.
*/
#define IS_CHAN_XR(_c) ((_c->hw_value & CHANNEL_XR) != 0)
@@ -710,7 +737,7 @@ struct ath5k_athchan_2ghz {
\******************/
/**
- * Seems the ar5xxx harware supports up to 32 rates, indexed by 1-32.
+ * Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32.
*
* The rate code is used to get the RX rate or set the TX rate on the
* hardware descriptors. It is also used for internal modulation control
@@ -767,6 +794,7 @@ struct ath5k_athchan_2ghz {
*/
#define AR5K_KEYCACHE_SIZE 8
+extern int ath5k_modparam_nohwcrypt;
/***********************\
HW RELATED DEFINITIONS
@@ -775,11 +803,11 @@ struct ath5k_athchan_2ghz {
/*
* Misc definitions
*/
-#define AR5K_RSSI_EP_MULTIPLIER (1<<7)
+#define AR5K_RSSI_EP_MULTIPLIER (1 << 7)
#define AR5K_ASSERT_ENTRY(_e, _s) do { \
if (_e >= _s) \
- return (false); \
+ return false; \
} while (0)
/*
@@ -790,52 +818,52 @@ struct ath5k_athchan_2ghz {
* enum ath5k_int - Hardware interrupt masks helpers
*
* @AR5K_INT_RX: mask to identify received frame interrupts, of type
- * AR5K_ISR_RXOK or AR5K_ISR_RXERR
+ * AR5K_ISR_RXOK or AR5K_ISR_RXERR
* @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?)
* @AR5K_INT_RXNOFRM: No frame received (?)
* @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The
- * Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's
- * LinkPtr is NULL. For more details, refer to:
- * http://www.freepatentsonline.com/20030225739.html
+ * Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's
+ * LinkPtr is NULL. For more details, refer to:
+ * http://www.freepatentsonline.com/20030225739.html
* @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors).
- * Note that Rx overrun is not always fatal, on some chips we can continue
- * operation without reseting the card, that's why int_fatal is not
- * common for all chips.
+ * Note that Rx overrun is not always fatal, on some chips we can continue
+ * operation without resetting the card, that's why int_fatal is not
+ * common for all chips.
* @AR5K_INT_TX: mask to identify received frame interrupts, of type
- * AR5K_ISR_TXOK or AR5K_ISR_TXERR
+ * AR5K_ISR_TXOK or AR5K_ISR_TXERR
* @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?)
* @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
- * We currently do increments on interrupt by
- * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
+ * We currently do increments on interrupt by
+ * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
* @AR5K_INT_MIB: Indicates the either Management Information Base counters or
* one of the PHY error counters reached the maximum value and should be
* read and cleared.
* @AR5K_INT_RXPHY: RX PHY Error
* @AR5K_INT_RXKCM: RX Key cache miss
* @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
- * beacon that must be handled in software. The alternative is if you
- * have VEOL support, in that case you let the hardware deal with things.
+ * beacon that must be handled in software. The alternative is if you
+ * have VEOL support, in that case you let the hardware deal with things.
* @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing
- * beacons from the AP have associated with, we should probably try to
- * reassociate. When in IBSS mode this might mean we have not received
- * any beacons from any local stations. Note that every station in an
- * IBSS schedules to send beacons at the Target Beacon Transmission Time
- * (TBTT) with a random backoff.
+ * beacons from the AP have associated with, we should probably try to
+ * reassociate. When in IBSS mode this might mean we have not received
+ * any beacons from any local stations. Note that every station in an
+ * IBSS schedules to send beacons at the Target Beacon Transmission Time
+ * (TBTT) with a random backoff.
* @AR5K_INT_BNR: Beacon Not Ready interrupt - ??
* @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now
- * until properly handled
+ * until properly handled
* @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA
- * errors. These types of errors we can enable seem to be of type
- * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR.
+ * errors. These types of errors we can enable seem to be of type
+ * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR.
* @AR5K_INT_GLOBAL: Used to clear and set the IER
* @AR5K_INT_NOCARD: signals the card has been removed
- * @AR5K_INT_COMMON: common interrupts shared amogst MACs with the same
- * bit value
+ * @AR5K_INT_COMMON: common interrupts shared among MACs with the same
+ * bit value
*
* These are mapped to take advantage of some common bits
* between the MACs, to be able to set intr properties
* easier. Some of them are not used yet inside hw.c. Most map
- * to the respective hw interrupt value as they are common amogst different
+ * to the respective hw interrupt value as they are common among different
* MACs.
*/
enum ath5k_int {
@@ -944,35 +972,6 @@ enum ath5k_power_mode {
#define AR5K_SOFTLED_ON 0
#define AR5K_SOFTLED_OFF 1
-/*
- * Chipset capabilities -see ath5k_hw_get_capability-
- * get_capability function is not yet fully implemented
- * in ath5k so most of these don't work yet...
- * TODO: Implement these & merge with _TUNE_ stuff above
- */
-enum ath5k_capability_type {
- AR5K_CAP_REG_DMN = 0, /* Used to get current reg. domain id */
- AR5K_CAP_TKIP_MIC = 2, /* Can handle TKIP MIC in hardware */
- AR5K_CAP_TKIP_SPLIT = 3, /* TKIP uses split keys */
- AR5K_CAP_PHYCOUNTERS = 4, /* PHY error counters */
- AR5K_CAP_DIVERSITY = 5, /* Supports fast diversity */
- AR5K_CAP_NUM_TXQUEUES = 6, /* Used to get max number of hw txqueues */
- AR5K_CAP_VEOL = 7, /* Supports virtual EOL */
- AR5K_CAP_COMPRESSION = 8, /* Supports compression */
- AR5K_CAP_BURST = 9, /* Supports packet bursting */
- AR5K_CAP_FASTFRAME = 10, /* Supports fast frames */
- AR5K_CAP_TXPOW = 11, /* Used to get global tx power limit */
- AR5K_CAP_TPC = 12, /* Can do per-packet tx power control (needed for 802.11a) */
- AR5K_CAP_BSSIDMASK = 13, /* Supports bssid mask */
- AR5K_CAP_MCAST_KEYSRCH = 14, /* Supports multicast key search */
- AR5K_CAP_TSF_ADJUST = 15, /* Supports beacon tsf adjust */
- AR5K_CAP_XR = 16, /* Supports XR mode */
- AR5K_CAP_WME_TKIPMIC = 17, /* Supports TKIP MIC when using WMM */
- AR5K_CAP_CHAN_HALFRATE = 18, /* Supports half rate channels */
- AR5K_CAP_CHAN_QUARTERRATE = 19, /* Supports quarter rate channels */
- AR5K_CAP_RFSILENT = 20, /* Supports RFsilent */
-};
-
/* XXX: we *may* move cap_range stuff to struct wiphy */
struct ath5k_capabilities {
@@ -1009,8 +1008,7 @@ struct ath5k_capabilities {
/* size of noise floor history (keep it a power of two) */
#define ATH5K_NF_CAL_HIST_MAX 8
-struct ath5k_nfcal_hist
-{
+struct ath5k_nfcal_hist {
s16 index; /* current index into nfval */
s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
};
@@ -1025,9 +1023,66 @@ struct ath5k_avg_val {
int avg_weight;
};
-/***************************************\
- HARDWARE ABSTRACTION LAYER STRUCTURE
-\***************************************/
+#define ATH5K_LED_MAX_NAME_LEN 31
+
+/*
+ * State for LED triggers
+ */
+struct ath5k_led {
+ char name[ATH5K_LED_MAX_NAME_LEN + 1]; /* name of the LED in sysfs */
+ struct ath5k_hw *ah; /* driver state */
+ struct led_classdev led_dev; /* led classdev */
+};
+
+/* Rfkill */
+struct ath5k_rfkill {
+ /* GPIO PIN for rfkill */
+ u16 gpio;
+ /* polarity of rfkill GPIO PIN */
+ bool polarity;
+ /* RFKILL toggle tasklet */
+ struct tasklet_struct toggleq;
+};
+
+/* statistics */
+struct ath5k_statistics {
+ /* antenna use */
+ unsigned int antenna_rx[5]; /* frames count per antenna RX */
+ unsigned int antenna_tx[5]; /* frames count per antenna TX */
+
+ /* frame errors */
+ unsigned int rx_all_count; /* all RX frames, including errors */
+ unsigned int tx_all_count; /* all TX frames, including errors */
+ unsigned int rx_bytes_count; /* all RX bytes, including errored pkts
+ * and the MAC headers for each packet
+ */
+ unsigned int tx_bytes_count; /* all TX bytes, including errored pkts
+ * and the MAC headers and padding for
+ * each packet.
+ */
+ unsigned int rxerr_crc;
+ unsigned int rxerr_phy;
+ unsigned int rxerr_phy_code[32];
+ unsigned int rxerr_fifo;
+ unsigned int rxerr_decrypt;
+ unsigned int rxerr_mic;
+ unsigned int rxerr_proc;
+ unsigned int rxerr_jumbo;
+ unsigned int txerr_retry;
+ unsigned int txerr_fifo;
+ unsigned int txerr_filt;
+
+ /* MIB counters */
+ unsigned int ack_fail;
+ unsigned int rts_fail;
+ unsigned int rts_ok;
+ unsigned int fcs_error;
+ unsigned int beacons;
+
+ unsigned int mib_intr;
+ unsigned int rxorn_intr;
+ unsigned int rxeol_intr;
+};
/*
* Misc defines
@@ -1036,12 +1091,114 @@ struct ath5k_avg_val {
#define AR5K_MAX_GPIO 10
#define AR5K_MAX_RF_BANKS 8
-/* TODO: Clean up and merge with ath5k_softc */
+#if CHAN_DEBUG
+#define ATH_CHAN_MAX (26 + 26 + 26 + 200 + 200)
+#else
+#define ATH_CHAN_MAX (14 + 14 + 14 + 252 + 20)
+#endif
+
+#define ATH_RXBUF 40 /* number of RX buffers */
+#define ATH_TXBUF 200 /* number of TX buffers */
+#define ATH_BCBUF 4 /* number of beacon buffers */
+#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
+#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
+
+/* Driver state associated with an instance of a device */
struct ath5k_hw {
struct ath_common common;
- struct ath5k_softc *ah_sc;
- void __iomem *ah_iobase;
+ struct pci_dev *pdev;
+ struct device *dev; /* for dma mapping */
+ int irq;
+ u16 devid;
+ void __iomem *iobase; /* address of the device */
+ struct mutex lock; /* dev-level lock */
+ struct ieee80211_hw *hw; /* IEEE 802.11 common */
+ struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ieee80211_channel channels[ATH_CHAN_MAX];
+ struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
+ s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
+ enum nl80211_iftype opmode;
+
+#ifdef CONFIG_ATH5K_DEBUG
+ struct ath5k_dbg_info debug; /* debug info */
+#endif /* CONFIG_ATH5K_DEBUG */
+
+ struct ath5k_buf *bufptr; /* allocated buffer ptr */
+ struct ath5k_desc *desc; /* TX/RX descriptors */
+ dma_addr_t desc_daddr; /* DMA (physical) address */
+ size_t desc_len; /* size of TX/RX descriptors */
+
+ DECLARE_BITMAP(status, 6);
+#define ATH_STAT_INVALID 0 /* disable hardware accesses */
+#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
+#define ATH_STAT_PROMISC 2
+#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
+#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
+#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
+
+ unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
+ struct ieee80211_channel *curchan; /* current h/w channel */
+
+ u16 nvifs;
+
+ enum ath5k_int imask; /* interrupt mask copy */
+
+ spinlock_t irqlock;
+ bool rx_pending; /* rx tasklet pending */
+ bool tx_pending; /* tx tasklet pending */
+
+ u8 lladdr[ETH_ALEN];
+ u8 bssidmask[ETH_ALEN];
+
+ unsigned int led_pin, /* GPIO pin for driving LED */
+ led_on; /* pin setting for LED on */
+
+ struct work_struct reset_work; /* deferred chip reset */
+
+ unsigned int rxbufsize; /* rx size based on mtu */
+ struct list_head rxbuf; /* receive buffer */
+ spinlock_t rxbuflock;
+ u32 *rxlink; /* link ptr in last RX desc */
+ struct tasklet_struct rxtq; /* rx intr tasklet */
+ struct ath5k_led rx_led; /* rx led */
+
+ struct list_head txbuf; /* transmit buffer */
+ spinlock_t txbuflock;
+ unsigned int txbuf_len; /* buf count in txbuf list */
+ struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */
+ struct tasklet_struct txtq; /* tx intr tasklet */
+ struct ath5k_led tx_led; /* tx led */
+
+ struct ath5k_rfkill rf_kill;
+
+ struct tasklet_struct calib; /* calibration tasklet */
+
+ spinlock_t block; /* protects beacon */
+ struct tasklet_struct beacontq; /* beacon intr tasklet */
+ struct list_head bcbuf; /* beacon buffer */
+ struct ieee80211_vif *bslot[ATH_BCBUF];
+ u16 num_ap_vifs;
+ u16 num_adhoc_vifs;
+ unsigned int bhalq, /* SW q for outgoing beacons */
+ bmisscount, /* missed beacon transmits */
+ bintval, /* beacon interval in TU */
+ bsent;
+ unsigned int nexttbtt; /* next beacon time in TU */
+ struct ath5k_txq *cabq; /* content after beacon */
+
+ int power_level; /* Requested tx power in dBm */
+ bool assoc; /* associate state */
+ bool enable_beacon; /* true if beacons are on */
+
+ struct ath5k_statistics stats;
+
+ struct ath5k_ani_state ani_state;
+ struct tasklet_struct ani_tasklet; /* ANI calibration */
+
+ struct delayed_work tx_complete_work;
+
+ struct survey_info survey; /* collected survey info */
enum ath5k_int ah_imr;
@@ -1065,6 +1222,8 @@ struct ath5k_hw {
u8 ah_retry_long;
u8 ah_retry_short;
+ u32 ah_use_32khz_clock;
+
u8 ah_coverage_class;
bool ah_ack_bitrate_high;
u8 ah_bwmode;
@@ -1168,43 +1327,43 @@ struct ath_bus_ops {
extern const struct ieee80211_ops ath5k_hw_ops;
/* Initialization and detach functions */
-int ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops);
-void ath5k_deinit_softc(struct ath5k_softc *sc);
-int ath5k_hw_init(struct ath5k_softc *sc);
+int ath5k_init_softc(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops);
+void ath5k_deinit_softc(struct ath5k_hw *ah);
+int ath5k_hw_init(struct ath5k_hw *ah);
void ath5k_hw_deinit(struct ath5k_hw *ah);
-int ath5k_sysfs_register(struct ath5k_softc *sc);
-void ath5k_sysfs_unregister(struct ath5k_softc *sc);
+int ath5k_sysfs_register(struct ath5k_hw *ah);
+void ath5k_sysfs_unregister(struct ath5k_hw *ah);
/* base.c */
struct ath5k_buf;
struct ath5k_txq;
-void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
-bool ath_any_vif_assoc(struct ath5k_softc *sc);
+void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
+bool ath5k_any_vif_assoc(struct ath5k_hw *ah);
void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_txq *txq);
-int ath5k_init_hw(struct ath5k_softc *sc);
-int ath5k_stop_hw(struct ath5k_softc *sc);
-void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
-void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
+int ath5k_start(struct ieee80211_hw *hw);
+void ath5k_stop(struct ieee80211_hw *hw);
+void ath5k_mode_setup(struct ath5k_hw *ah, struct ieee80211_vif *vif);
+void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
struct ieee80211_vif *vif);
-int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
-void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
+int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
+void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf);
int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-void ath5k_beacon_config(struct ath5k_softc *sc);
-void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
-void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
+void ath5k_beacon_config(struct ath5k_hw *ah);
+void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
+void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
/*Chip id helper functions */
const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
int ath5k_hw_read_srev(struct ath5k_hw *ah);
/* LED functions */
-int ath5k_init_leds(struct ath5k_softc *sc);
-void ath5k_led_enable(struct ath5k_softc *sc);
-void ath5k_led_off(struct ath5k_softc *sc);
-void ath5k_unregister_leds(struct ath5k_softc *sc);
+int ath5k_init_leds(struct ath5k_hw *ah);
+void ath5k_led_enable(struct ath5k_hw *ah);
+void ath5k_led_off(struct ath5k_hw *ah);
+void ath5k_unregister_leds(struct ath5k_hw *ah);
/* Reset Functions */
@@ -1253,7 +1412,7 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
int len, struct ieee80211_rate *rate, bool shortpre);
unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
-extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
+int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
/* RX filter control*/
int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
@@ -1318,9 +1477,6 @@ void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
/* Misc functions TODO: Cleanup */
int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
-int ath5k_hw_get_capability(struct ath5k_hw *ah,
- enum ath5k_capability_type cap_type, u32 capability,
- u32 *result);
int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
@@ -1356,17 +1512,17 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
u8 mode, bool fast);
/*
- * Functions used internaly
+ * Functions used internally
*/
static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
{
- return &ah->common;
+ return &ah->common;
}
static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
{
- return &(ath5k_hw_common(ah)->regulatory);
+ return &(ath5k_hw_common(ah)->regulatory);
}
#ifdef CONFIG_ATHEROS_AR231X
@@ -1377,10 +1533,10 @@ static inline void __iomem *ath5k_ahb_reg(struct ath5k_hw *ah, u16 reg)
/* On AR2315 and AR2317 the PCI clock domain registers
* are outside of the WMAC register space */
if (unlikely((reg >= 0x4000) && (reg < 0x5000) &&
- (ah->ah_mac_srev >= AR5K_SREV_AR2315_R6)))
+ (ah->ah_mac_srev >= AR5K_SREV_AR2315_R6)))
return AR5K_AR2315_PCI_BASE + reg;
- return ah->ah_iobase + reg;
+ return ah->iobase + reg;
}
static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
@@ -1397,12 +1553,12 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
{
- return ioread32(ah->ah_iobase + reg);
+ return ioread32(ah->iobase + reg);
}
static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
{
- iowrite32(val, ah->ah_iobase + reg);
+ iowrite32(val, ah->iobase + reg);
}
#endif
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 1588401de3c..f8a6b380d96 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -59,7 +59,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
cur_val = ath5k_hw_reg_read(ah, cur_reg);
if (cur_val != var_pattern) {
- ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
+ ATH5K_ERR(ah, "POST Failed !!!\n");
return -EAGAIN;
}
@@ -74,7 +74,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
cur_val = ath5k_hw_reg_read(ah, cur_reg);
if (cur_val != var_pattern) {
- ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
+ ATH5K_ERR(ah, "POST Failed !!!\n");
return -EAGAIN;
}
@@ -95,18 +95,18 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
/**
* ath5k_hw_init - Check if hw is supported and init the needed structs
*
- * @sc: The &struct ath5k_softc we got from the driver's init_softc function
+ * @ah: The &struct ath5k_hw we got from the driver's init_softc function
*
* Check if the device is supported, perform a POST and initialize the needed
* structs. Returns -ENOMEM if we don't have memory for the needed structs,
* -ENODEV if the device is not supported or prints an error msg if something
* else went wrong.
*/
-int ath5k_hw_init(struct ath5k_softc *sc)
+int ath5k_hw_init(struct ath5k_hw *ah)
{
- struct ath5k_hw *ah = sc->ah;
+ static const u8 zero_mac[ETH_ALEN] = { };
struct ath_common *common = ath5k_hw_common(ah);
- struct pci_dev *pdev = sc->pdev;
+ struct pci_dev *pdev = ah->pdev;
struct ath5k_eeprom_info *ee;
int ret;
u32 srev;
@@ -122,8 +122,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
ah->ah_noise_floor = -95; /* until first NF calibration is run */
- sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
- ah->ah_current_channel = &sc->channels[0];
+ ah->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
+ ah->ah_current_channel = &ah->channels[0];
/*
* Find the mac version
@@ -191,7 +191,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
break;
case AR5K_SREV_RAD_5424:
if (ah->ah_mac_version == AR5K_SREV_AR2425 ||
- ah->ah_mac_version == AR5K_SREV_AR2417){
+ ah->ah_mac_version == AR5K_SREV_AR2417) {
ah->ah_radio = AR5K_RF2425;
ah->ah_single_chip = true;
} else {
@@ -210,43 +210,42 @@ int ath5k_hw_init(struct ath5k_softc *sc)
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
CHANNEL_2GHZ);
} else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
- ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
- ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
+ ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
+ ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
ah->ah_radio = AR5K_RF2425;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2425;
} else if (srev == AR5K_SREV_AR5213A &&
- ah->ah_phy_revision == AR5K_SREV_PHY_5212B) {
+ ah->ah_phy_revision == AR5K_SREV_PHY_5212B) {
ah->ah_radio = AR5K_RF5112;
ah->ah_single_chip = false;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B;
} else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4) ||
- ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) {
+ ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) {
ah->ah_radio = AR5K_RF2316;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
} else if (ah->ah_mac_version == (AR5K_SREV_AR5414 >> 4) ||
- ah->ah_phy_revision == AR5K_SREV_PHY_5413) {
+ ah->ah_phy_revision == AR5K_SREV_PHY_5413) {
ah->ah_radio = AR5K_RF5413;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5413;
} else if (ah->ah_mac_version == (AR5K_SREV_AR2414 >> 4) ||
- ah->ah_phy_revision == AR5K_SREV_PHY_2413) {
+ ah->ah_phy_revision == AR5K_SREV_PHY_2413) {
ah->ah_radio = AR5K_RF2413;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2413;
} else {
- ATH5K_ERR(sc, "Couldn't identify radio revision.\n");
+ ATH5K_ERR(ah, "Couldn't identify radio revision.\n");
ret = -ENODEV;
goto err;
}
}
- /* Return on unsuported chips (unsupported eeprom etc) */
- if ((srev >= AR5K_SREV_AR5416) &&
- (srev < AR5K_SREV_AR2425)) {
- ATH5K_ERR(sc, "Device not yet supported.\n");
+ /* Return on unsupported chips (unsupported eeprom etc) */
+ if ((srev >= AR5K_SREV_AR5416) && (srev < AR5K_SREV_AR2425)) {
+ ATH5K_ERR(ah, "Device not yet supported.\n");
ret = -ENODEV;
goto err;
}
@@ -268,7 +267,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
*/
ret = ath5k_eeprom_init(ah);
if (ret) {
- ATH5K_ERR(sc, "unable to init EEPROM\n");
+ ATH5K_ERR(ah, "unable to init EEPROM\n");
goto err;
}
@@ -285,7 +284,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
ath5k_hw_reg_write(ah, 0x28000039, AR5K_PCIE_SERDES);
ath5k_hw_reg_write(ah, 0x53160824, AR5K_PCIE_SERDES);
- /* If serdes programing is enabled, increase PCI-E
+ /* If serdes programming is enabled, increase PCI-E
* tx power for systems with long trace from host
* to minicard connector. */
if (ee->ee_serdes)
@@ -309,17 +308,17 @@ int ath5k_hw_init(struct ath5k_softc *sc)
/* Get misc capabilities */
ret = ath5k_hw_set_capabilities(ah);
if (ret) {
- ATH5K_ERR(sc, "unable to get device capabilities\n");
+ ATH5K_ERR(ah, "unable to get device capabilities\n");
goto err;
}
- if (test_bit(ATH_STAT_2G_DISABLED, sc->status)) {
+ if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
__clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
__clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
}
/* Crypto settings */
- common->keymax = (sc->ah->ah_version == AR5K_AR5210 ?
+ common->keymax = (ah->ah_version == AR5K_AR5210 ?
AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
if (srev >= AR5K_SREV_AR5212_V4 &&
@@ -334,12 +333,12 @@ int ath5k_hw_init(struct ath5k_softc *sc)
}
/* MAC address is cleared until add_interface */
- ath5k_hw_set_lladdr(ah, (u8[ETH_ALEN]){});
+ ath5k_hw_set_lladdr(ah, zero_mac);
/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
ath5k_hw_set_bssid(ah);
- ath5k_hw_set_opmode(ah, sc->opmode);
+ ath5k_hw_set_opmode(ah, ah->opmode);
ath5k_hw_rfgain_opt_init(ah);
@@ -360,7 +359,7 @@ err:
*/
void ath5k_hw_deinit(struct ath5k_hw *ah)
{
- __set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
+ __set_bit(ATH_STAT_INVALID, ah->status);
if (ah->ah_rf_banks != NULL)
kfree(ah->ah_rf_banks);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index b6c5d3715b9..c3119a6caac 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -42,6 +42,7 @@
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/hardirq.h>
#include <linux/if.h>
#include <linux/io.h>
@@ -85,10 +86,8 @@ MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
static int ath5k_init(struct ieee80211_hw *hw);
-static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
+static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
bool skip_pcu);
-int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
/* Known SREVs */
static const struct ath5k_srev_name srev_names[] = {
@@ -239,8 +238,8 @@ static const struct ath_ops ath5k_common_ops = {
static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath5k_softc *sc = hw->priv;
- struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
+ struct ath5k_hw *ah = hw->priv;
+ struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
return ath_reg_notifier_apply(wiphy, request, regulatory);
}
@@ -290,7 +289,7 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
band = IEEE80211_BAND_2GHZ;
break;
default:
- ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
+ ATH5K_WARN(ah, "bad mode, not copying channels\n");
return 0;
}
@@ -328,51 +327,50 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
}
static void
-ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
+ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
{
u8 i;
for (i = 0; i < AR5K_MAX_RATES; i++)
- sc->rate_idx[b->band][i] = -1;
+ ah->rate_idx[b->band][i] = -1;
for (i = 0; i < b->n_bitrates; i++) {
- sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
+ ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
if (b->bitrates[i].hw_value_short)
- sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
+ ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
}
}
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = hw->priv;
struct ieee80211_supported_band *sband;
int max_c, count_c = 0;
int i;
- BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
- max_c = ARRAY_SIZE(sc->channels);
+ BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
+ max_c = ARRAY_SIZE(ah->channels);
/* 2GHz band */
- sband = &sc->sbands[IEEE80211_BAND_2GHZ];
+ sband = &ah->sbands[IEEE80211_BAND_2GHZ];
sband->band = IEEE80211_BAND_2GHZ;
- sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
+ sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
- if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
+ if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
/* G mode */
memcpy(sband->bitrates, &ath5k_rates[0],
sizeof(struct ieee80211_rate) * 12);
sband->n_bitrates = 12;
- sband->channels = sc->channels;
+ sband->channels = ah->channels;
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11G, max_c);
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
count_c = sband->n_channels;
max_c -= count_c;
- } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
+ } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
/* B mode */
memcpy(sband->bitrates, &ath5k_rates[0],
sizeof(struct ieee80211_rate) * 4);
@@ -391,7 +389,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
}
}
- sband->channels = sc->channels;
+ sband->channels = ah->channels;
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11B, max_c);
@@ -399,27 +397,27 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
count_c = sband->n_channels;
max_c -= count_c;
}
- ath5k_setup_rate_idx(sc, sband);
+ ath5k_setup_rate_idx(ah, sband);
/* 5GHz band, A mode */
- if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
- sband = &sc->sbands[IEEE80211_BAND_5GHZ];
+ if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
+ sband = &ah->sbands[IEEE80211_BAND_5GHZ];
sband->band = IEEE80211_BAND_5GHZ;
- sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
+ sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
memcpy(sband->bitrates, &ath5k_rates[4],
sizeof(struct ieee80211_rate) * 8);
sband->n_bitrates = 8;
- sband->channels = &sc->channels[count_c];
+ sband->channels = &ah->channels[count_c];
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11A, max_c);
hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
}
- ath5k_setup_rate_idx(sc, sband);
+ ath5k_setup_rate_idx(ah, sband);
- ath5k_debug_dump_bands(sc);
+ ath5k_debug_dump_bands(ah);
return 0;
}
@@ -429,14 +427,14 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
* To accomplish this we must first cleanup any pending DMA,
* then restart stuff after a la ath5k_init.
*
- * Called with sc->lock.
+ * Called with ah->lock.
*/
int
-ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
+ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
{
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"channel set, resetting (%u -> %u MHz)\n",
- sc->curchan->center_freq, chan->center_freq);
+ ah->curchan->center_freq, chan->center_freq);
/*
* To switch channels clear any pending DMA operations;
@@ -444,7 +442,7 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
* hardware at the new frequency, and then re-enable
* the relevant bits of the h/w.
*/
- return ath5k_reset(sc, chan, true);
+ return ath5k_reset(ah, chan, true);
}
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
@@ -488,10 +486,10 @@ void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
}
void
-ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
+ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
struct ieee80211_vif *vif)
{
- struct ath_common *common = ath5k_hw_common(sc->ah);
+ struct ath_common *common = ath5k_hw_common(ah);
struct ath5k_vif_iter_data iter_data;
u32 rfilt;
@@ -510,41 +508,41 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
ath5k_vif_iter(&iter_data, vif->addr, vif);
/* Get list of all active MAC addresses */
- ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
+ ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
&iter_data);
- memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);
+ memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
- sc->opmode = iter_data.opmode;
- if (sc->opmode == NL80211_IFTYPE_UNSPECIFIED)
+ ah->opmode = iter_data.opmode;
+ if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
/* Nothing active, default to station mode */
- sc->opmode = NL80211_IFTYPE_STATION;
+ ah->opmode = NL80211_IFTYPE_STATION;
- ath5k_hw_set_opmode(sc->ah, sc->opmode);
- ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
- sc->opmode, ath_opmode_to_string(sc->opmode));
+ ath5k_hw_set_opmode(ah, ah->opmode);
+ ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
+ ah->opmode, ath_opmode_to_string(ah->opmode));
if (iter_data.need_set_hw_addr && iter_data.found_active)
- ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);
+ ath5k_hw_set_lladdr(ah, iter_data.active_mac);
- if (ath5k_hw_hasbssidmask(sc->ah))
- ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
+ if (ath5k_hw_hasbssidmask(ah))
+ ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
/* Set up RX Filter */
if (iter_data.n_stas > 1) {
/* If you have multiple STA interfaces connected to
* different APs, ARPs are not received (most of the time?)
- * Enabling PROMISC appears to fix that probem.
+ * Enabling PROMISC appears to fix that problem.
*/
- sc->filter_flags |= AR5K_RX_FILTER_PROM;
+ ah->filter_flags |= AR5K_RX_FILTER_PROM;
}
- rfilt = sc->filter_flags;
- ath5k_hw_set_rx_filter(sc->ah, rfilt);
- ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
+ rfilt = ah->filter_flags;
+ ath5k_hw_set_rx_filter(ah, rfilt);
+ ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
static inline int
-ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
+ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
{
int rix;
@@ -553,7 +551,7 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
"hw_rix out of bounds: %x\n", hw_rix))
return 0;
- rix = sc->rate_idx[sc->curchan->band][hw_rix];
+ rix = ah->rate_idx[ah->curchan->band][hw_rix];
if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
rix = 0;
@@ -565,9 +563,9 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
\***************/
static
-struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
+struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
{
- struct ath_common *common = ath5k_hw_common(sc->ah);
+ struct ath_common *common = ath5k_hw_common(ah);
struct sk_buff *skb;
/*
@@ -579,17 +577,17 @@ struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
GFP_ATOMIC);
if (!skb) {
- ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
+ ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
common->rx_bufsize);
return NULL;
}
- *skb_addr = dma_map_single(sc->dev,
+ *skb_addr = dma_map_single(ah->dev,
skb->data, common->rx_bufsize,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(sc->dev, *skb_addr))) {
- ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
+ if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
+ ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
return NULL;
}
@@ -597,15 +595,14 @@ struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
}
static int
-ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
+ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
{
- struct ath5k_hw *ah = sc->ah;
struct sk_buff *skb = bf->skb;
struct ath5k_desc *ds;
int ret;
if (!skb) {
- skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
+ skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
if (!skb)
return -ENOMEM;
bf->skb = skb;
@@ -631,13 +628,13 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
ds->ds_data = bf->skbaddr;
ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
if (ret) {
- ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
+ ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
return ret;
}
- if (sc->rxlink != NULL)
- *sc->rxlink = bf->daddr;
- sc->rxlink = &ds->ds_link;
+ if (ah->rxlink != NULL)
+ *ah->rxlink = bf->daddr;
+ ah->rxlink = &ds->ds_link;
return 0;
}
@@ -665,10 +662,9 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
}
static int
-ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
+ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
struct ath5k_txq *txq, int padsize)
{
- struct ath5k_hw *ah = sc->ah;
struct ath5k_desc *ds = bf->desc;
struct sk_buff *skb = bf->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -684,10 +680,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
/* XXX endianness */
- bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
+ bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- rate = ieee80211_get_tx_rate(sc->hw, info);
+ rate = ieee80211_get_tx_rate(ah->hw, info);
if (!rate) {
ret = -EINVAL;
goto err_unmap;
@@ -711,20 +707,20 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
}
if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
flags |= AR5K_TXDESC_RTSENA;
- cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
- duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
+ cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
+ duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
info->control.vif, pktlen, info));
}
if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
flags |= AR5K_TXDESC_CTSENA;
- cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
- duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
+ cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
+ duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
info->control.vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
ieee80211_get_hdrlen_from_skb(skb), padsize,
get_hw_packet_type(skb),
- (sc->power_level * 2),
+ (ah->power_level * 2),
hw_rate,
info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
cts_rate, duration);
@@ -734,7 +730,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
memset(mrr_rate, 0, sizeof(mrr_rate));
memset(mrr_tries, 0, sizeof(mrr_tries));
for (i = 0; i < 3; i++) {
- rate = ieee80211_get_alt_retry_rate(sc->hw, info, i);
+ rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
if (!rate)
break;
@@ -765,7 +761,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
return 0;
err_unmap:
- dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
+ dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
return ret;
}
@@ -774,7 +770,7 @@ err_unmap:
\*******************/
static int
-ath5k_desc_alloc(struct ath5k_softc *sc)
+ath5k_desc_alloc(struct ath5k_hw *ah)
{
struct ath5k_desc *ds;
struct ath5k_buf *bf;
@@ -783,69 +779,68 @@ ath5k_desc_alloc(struct ath5k_softc *sc)
int ret;
/* allocate descriptors */
- sc->desc_len = sizeof(struct ath5k_desc) *
+ ah->desc_len = sizeof(struct ath5k_desc) *
(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
- sc->desc = dma_alloc_coherent(sc->dev, sc->desc_len,
- &sc->desc_daddr, GFP_KERNEL);
- if (sc->desc == NULL) {
- ATH5K_ERR(sc, "can't allocate descriptors\n");
+ ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
+ &ah->desc_daddr, GFP_KERNEL);
+ if (ah->desc == NULL) {
+ ATH5K_ERR(ah, "can't allocate descriptors\n");
ret = -ENOMEM;
goto err;
}
- ds = sc->desc;
- da = sc->desc_daddr;
- ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
- ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
+ ds = ah->desc;
+ da = ah->desc_daddr;
+ ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
+ ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
sizeof(struct ath5k_buf), GFP_KERNEL);
if (bf == NULL) {
- ATH5K_ERR(sc, "can't allocate bufptr\n");
+ ATH5K_ERR(ah, "can't allocate bufptr\n");
ret = -ENOMEM;
goto err_free;
}
- sc->bufptr = bf;
+ ah->bufptr = bf;
- INIT_LIST_HEAD(&sc->rxbuf);
+ INIT_LIST_HEAD(&ah->rxbuf);
for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
bf->desc = ds;
bf->daddr = da;
- list_add_tail(&bf->list, &sc->rxbuf);
+ list_add_tail(&bf->list, &ah->rxbuf);
}
- INIT_LIST_HEAD(&sc->txbuf);
- sc->txbuf_len = ATH_TXBUF;
- for (i = 0; i < ATH_TXBUF; i++, bf++, ds++,
- da += sizeof(*ds)) {
+ INIT_LIST_HEAD(&ah->txbuf);
+ ah->txbuf_len = ATH_TXBUF;
+ for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
bf->desc = ds;
bf->daddr = da;
- list_add_tail(&bf->list, &sc->txbuf);
+ list_add_tail(&bf->list, &ah->txbuf);
}
/* beacon buffers */
- INIT_LIST_HEAD(&sc->bcbuf);
+ INIT_LIST_HEAD(&ah->bcbuf);
for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
bf->desc = ds;
bf->daddr = da;
- list_add_tail(&bf->list, &sc->bcbuf);
+ list_add_tail(&bf->list, &ah->bcbuf);
}
return 0;
err_free:
- dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
+ dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
err:
- sc->desc = NULL;
+ ah->desc = NULL;
return ret;
}
void
-ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
+ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
{
BUG_ON(!bf);
if (!bf->skb)
return;
- dma_unmap_single(sc->dev, bf->skbaddr, bf->skb->len,
+ dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(bf->skb);
bf->skb = NULL;
@@ -854,15 +849,14 @@ ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
}
void
-ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
+ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
{
- struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
BUG_ON(!bf);
if (!bf->skb)
return;
- dma_unmap_single(sc->dev, bf->skbaddr, common->rx_bufsize,
+ dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
DMA_FROM_DEVICE);
dev_kfree_skb_any(bf->skb);
bf->skb = NULL;
@@ -871,24 +865,24 @@ ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
}
static void
-ath5k_desc_free(struct ath5k_softc *sc)
+ath5k_desc_free(struct ath5k_hw *ah)
{
struct ath5k_buf *bf;
- list_for_each_entry(bf, &sc->txbuf, list)
- ath5k_txbuf_free_skb(sc, bf);
- list_for_each_entry(bf, &sc->rxbuf, list)
- ath5k_rxbuf_free_skb(sc, bf);
- list_for_each_entry(bf, &sc->bcbuf, list)
- ath5k_txbuf_free_skb(sc, bf);
+ list_for_each_entry(bf, &ah->txbuf, list)
+ ath5k_txbuf_free_skb(ah, bf);
+ list_for_each_entry(bf, &ah->rxbuf, list)
+ ath5k_rxbuf_free_skb(ah, bf);
+ list_for_each_entry(bf, &ah->bcbuf, list)
+ ath5k_txbuf_free_skb(ah, bf);
/* Free memory associated with all descriptors */
- dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
- sc->desc = NULL;
- sc->desc_daddr = 0;
+ dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
+ ah->desc = NULL;
+ ah->desc_daddr = 0;
- kfree(sc->bufptr);
- sc->bufptr = NULL;
+ kfree(ah->bufptr);
+ ah->bufptr = NULL;
}
@@ -897,10 +891,9 @@ ath5k_desc_free(struct ath5k_softc *sc)
\**************/
static struct ath5k_txq *
-ath5k_txq_setup(struct ath5k_softc *sc,
+ath5k_txq_setup(struct ath5k_hw *ah,
int qtype, int subtype)
{
- struct ath5k_hw *ah = sc->ah;
struct ath5k_txq *txq;
struct ath5k_txq_info qi = {
.tqi_subtype = subtype,
@@ -934,13 +927,13 @@ ath5k_txq_setup(struct ath5k_softc *sc,
*/
return ERR_PTR(qnum);
}
- if (qnum >= ARRAY_SIZE(sc->txqs)) {
- ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n",
- qnum, ARRAY_SIZE(sc->txqs));
+ if (qnum >= ARRAY_SIZE(ah->txqs)) {
+ ATH5K_ERR(ah, "hw qnum %u out of range, max %tu!\n",
+ qnum, ARRAY_SIZE(ah->txqs));
ath5k_hw_release_tx_queue(ah, qnum);
return ERR_PTR(-EINVAL);
}
- txq = &sc->txqs[qnum];
+ txq = &ah->txqs[qnum];
if (!txq->setup) {
txq->qnum = qnum;
txq->link = NULL;
@@ -952,7 +945,7 @@ ath5k_txq_setup(struct ath5k_softc *sc,
txq->txq_poll_mark = false;
txq->txq_stuck = 0;
}
- return &sc->txqs[qnum];
+ return &ah->txqs[qnum];
}
static int
@@ -972,18 +965,17 @@ ath5k_beaconq_setup(struct ath5k_hw *ah)
}
static int
-ath5k_beaconq_config(struct ath5k_softc *sc)
+ath5k_beaconq_config(struct ath5k_hw *ah)
{
- struct ath5k_hw *ah = sc->ah;
struct ath5k_txq_info qi;
int ret;
- ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
+ ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
if (ret)
goto err;
- if (sc->opmode == NL80211_IFTYPE_AP ||
- sc->opmode == NL80211_IFTYPE_MESH_POINT) {
+ if (ah->opmode == NL80211_IFTYPE_AP ||
+ ah->opmode == NL80211_IFTYPE_MESH_POINT) {
/*
* Always burst out beacon and CAB traffic
* (aifs = cwmin = cwmax = 0)
@@ -991,7 +983,7 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
qi.tqi_aifs = 0;
qi.tqi_cw_min = 0;
qi.tqi_cw_max = 0;
- } else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
+ } else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
/*
* Adhoc mode; backoff between 0 and (2 * cw_min).
*/
@@ -1000,17 +992,17 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
}
- ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
- ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
+ ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
if (ret) {
- ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
+ ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
"hardware queue!\n", __func__);
goto err;
}
- ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
+ ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */
if (ret)
goto err;
@@ -1019,7 +1011,7 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
if (ret)
goto err;
- qi.tqi_ready_time = (sc->bintval * 80) / 100;
+ qi.tqi_ready_time = (ah->bintval * 80) / 100;
ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
if (ret)
goto err;
@@ -1032,7 +1024,7 @@ err:
/**
* ath5k_drain_tx_buffs - Empty tx buffers
*
- * @sc The &struct ath5k_softc
+ * @ah The &struct ath5k_hw
*
* Empty tx buffers from all queues in preparation
* of a reset or during shutdown.
@@ -1041,26 +1033,26 @@ err:
* we do not need to block ath5k_tx_tasklet
*/
static void
-ath5k_drain_tx_buffs(struct ath5k_softc *sc)
+ath5k_drain_tx_buffs(struct ath5k_hw *ah)
{
struct ath5k_txq *txq;
struct ath5k_buf *bf, *bf0;
int i;
- for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
- if (sc->txqs[i].setup) {
- txq = &sc->txqs[i];
+ for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
+ if (ah->txqs[i].setup) {
+ txq = &ah->txqs[i];
spin_lock_bh(&txq->lock);
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
- ath5k_debug_printtxbuf(sc, bf);
+ ath5k_debug_printtxbuf(ah, bf);
- ath5k_txbuf_free_skb(sc, bf);
+ ath5k_txbuf_free_skb(ah, bf);
- spin_lock_bh(&sc->txbuflock);
- list_move_tail(&bf->list, &sc->txbuf);
- sc->txbuf_len++;
+ spin_lock_bh(&ah->txbuflock);
+ list_move_tail(&bf->list, &ah->txbuf);
+ ah->txbuf_len++;
txq->txq_len--;
- spin_unlock_bh(&sc->txbuflock);
+ spin_unlock_bh(&ah->txbuflock);
}
txq->link = NULL;
txq->txq_poll_mark = false;
@@ -1070,14 +1062,14 @@ ath5k_drain_tx_buffs(struct ath5k_softc *sc)
}
static void
-ath5k_txq_release(struct ath5k_softc *sc)
+ath5k_txq_release(struct ath5k_hw *ah)
{
- struct ath5k_txq *txq = sc->txqs;
+ struct ath5k_txq *txq = ah->txqs;
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++)
+ for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++)
if (txq->setup) {
- ath5k_hw_release_tx_queue(sc->ah, txq->qnum);
+ ath5k_hw_release_tx_queue(ah, txq->qnum);
txq->setup = false;
}
}
@@ -1091,33 +1083,32 @@ ath5k_txq_release(struct ath5k_softc *sc)
* Enable the receive h/w following a reset.
*/
static int
-ath5k_rx_start(struct ath5k_softc *sc)
+ath5k_rx_start(struct ath5k_hw *ah)
{
- struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
struct ath5k_buf *bf;
int ret;
common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
common->cachelsz, common->rx_bufsize);
- spin_lock_bh(&sc->rxbuflock);
- sc->rxlink = NULL;
- list_for_each_entry(bf, &sc->rxbuf, list) {
- ret = ath5k_rxbuf_setup(sc, bf);
+ spin_lock_bh(&ah->rxbuflock);
+ ah->rxlink = NULL;
+ list_for_each_entry(bf, &ah->rxbuf, list) {
+ ret = ath5k_rxbuf_setup(ah, bf);
if (ret != 0) {
- spin_unlock_bh(&sc->rxbuflock);
+ spin_unlock_bh(&ah->rxbuflock);
goto err;
}
}
- bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
+ bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
ath5k_hw_set_rxdp(ah, bf->daddr);
- spin_unlock_bh(&sc->rxbuflock);
+ spin_unlock_bh(&ah->rxbuflock);
ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
- ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */
+ ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */
ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
return 0;
@@ -1133,21 +1124,19 @@ err:
* does.
*/
static void
-ath5k_rx_stop(struct ath5k_softc *sc)
+ath5k_rx_stop(struct ath5k_hw *ah)
{
- struct ath5k_hw *ah = sc->ah;
ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
- ath5k_debug_printrxbuffs(sc, ah);
+ ath5k_debug_printrxbuffs(ah);
}
static unsigned int
-ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
+ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb,
struct ath5k_rx_status *rs)
{
- struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int keyix, hlen;
@@ -1174,10 +1163,10 @@ ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
static void
-ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
+ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
struct ieee80211_rx_status *rxs)
{
- struct ath_common *common = ath5k_hw_common(sc->ah);
+ struct ath_common *common = ath5k_hw_common(ah);
u64 tsf, bc_tstamp;
u32 hw_tu;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
@@ -1190,11 +1179,11 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
* have updated the local TSF. We have to work around various
* hardware bugs, though...
*/
- tsf = ath5k_hw_get_tsf64(sc->ah);
+ tsf = ath5k_hw_get_tsf64(ah);
bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
hw_tu = TSF_TO_TU(tsf);
- ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
(unsigned long long)bc_tstamp,
(unsigned long long)rxs->mactime,
@@ -1213,7 +1202,7 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
* received, not like mac80211 which defines it at the start.
*/
if (bc_tstamp > rxs->mactime) {
- ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"fixing mactime from %llx to %llx\n",
(unsigned long long)rxs->mactime,
(unsigned long long)tsf);
@@ -1226,25 +1215,24 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
* beacons. This also takes care of synchronizing beacon sending
* times with other stations.
*/
- if (hw_tu >= sc->nexttbtt)
- ath5k_beacon_update_timers(sc, bc_tstamp);
+ if (hw_tu >= ah->nexttbtt)
+ ath5k_beacon_update_timers(ah, bc_tstamp);
/* Check if the beacon timers are still correct, because a TSF
* update might have created a window between them - for a
* longer description see the comment of this function: */
- if (!ath5k_hw_check_beacon_timers(sc->ah, sc->bintval)) {
- ath5k_beacon_update_timers(sc, bc_tstamp);
- ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
+ if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) {
+ ath5k_beacon_update_timers(ah, bc_tstamp);
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"fixed beacon timers after beacon receive\n");
}
}
}
static void
-ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
+ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi)
{
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
- struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
/* only beacons from our BSSID */
@@ -1263,16 +1251,15 @@ ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
*/
static int ath5k_common_padpos(struct sk_buff *skb)
{
- struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 frame_control = hdr->frame_control;
int padpos = 24;
- if (ieee80211_has_a4(frame_control)) {
+ if (ieee80211_has_a4(frame_control))
padpos += ETH_ALEN;
- }
- if (ieee80211_is_data_qos(frame_control)) {
+
+ if (ieee80211_is_data_qos(frame_control))
padpos += IEEE80211_QOS_CTL_LEN;
- }
return padpos;
}
@@ -1286,13 +1273,13 @@ static int ath5k_add_padding(struct sk_buff *skb)
int padpos = ath5k_common_padpos(skb);
int padsize = padpos & 3;
- if (padsize && skb->len>padpos) {
+ if (padsize && skb->len > padpos) {
if (skb_headroom(skb) < padsize)
return -1;
skb_push(skb, padsize);
- memmove(skb->data, skb->data+padsize, padpos);
+ memmove(skb->data, skb->data + padsize, padpos);
return padsize;
}
@@ -1317,7 +1304,7 @@ static int ath5k_remove_padding(struct sk_buff *skb)
int padpos = ath5k_common_padpos(skb);
int padsize = padpos & 3;
- if (padsize && skb->len>=padpos+padsize) {
+ if (padsize && skb->len >= padpos + padsize) {
memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
return padsize;
@@ -1327,7 +1314,7 @@ static int ath5k_remove_padding(struct sk_buff *skb)
}
static void
-ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
+ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
struct ath5k_rx_status *rs)
{
struct ieee80211_rx_status *rxs;
@@ -1353,44 +1340,44 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
* timestamp (beginning of phy frame, data frame, end of rx?).
* The only thing we know is that it is hardware specific...
* On AR5213 it seems the rx timestamp is at the end of the
- * frame, but i'm not sure.
+ * frame, but I'm not sure.
*
* NOTE: mac80211 defines mactime at the beginning of the first
* data symbol. Since we don't have any time references it's
* impossible to comply to that. This affects IBSS merge only
* right now, so it's not too bad...
*/
- rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
+ rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
rxs->flag |= RX_FLAG_MACTIME_MPDU;
- rxs->freq = sc->curchan->center_freq;
- rxs->band = sc->curchan->band;
+ rxs->freq = ah->curchan->center_freq;
+ rxs->band = ah->curchan->band;
- rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
+ rxs->signal = ah->ah_noise_floor + rs->rs_rssi;
rxs->antenna = rs->rs_antenna;
if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
- sc->stats.antenna_rx[rs->rs_antenna]++;
+ ah->stats.antenna_rx[rs->rs_antenna]++;
else
- sc->stats.antenna_rx[0]++; /* invalid */
+ ah->stats.antenna_rx[0]++; /* invalid */
- rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
- rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
+ rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
+ rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
if (rxs->rate_idx >= 0 && rs->rs_rate ==
- sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
+ ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
rxs->flag |= RX_FLAG_SHORTPRE;
- trace_ath5k_rx(sc, skb);
+ trace_ath5k_rx(ah, skb);
- ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
+ ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi);
/* check beacons in IBSS mode */
- if (sc->opmode == NL80211_IFTYPE_ADHOC)
- ath5k_check_ibss_tsf(sc, skb, rxs);
+ if (ah->opmode == NL80211_IFTYPE_ADHOC)
+ ath5k_check_ibss_tsf(ah, skb, rxs);
- ieee80211_rx(sc->hw, skb);
+ ieee80211_rx(ah->hw, skb);
}
/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
@@ -1399,20 +1386,20 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
* statistics. Return true if we want this frame, false if not.
*/
static bool
-ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
+ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
{
- sc->stats.rx_all_count++;
- sc->stats.rx_bytes_count += rs->rs_datalen;
+ ah->stats.rx_all_count++;
+ ah->stats.rx_bytes_count += rs->rs_datalen;
if (unlikely(rs->rs_status)) {
if (rs->rs_status & AR5K_RXERR_CRC)
- sc->stats.rxerr_crc++;
+ ah->stats.rxerr_crc++;
if (rs->rs_status & AR5K_RXERR_FIFO)
- sc->stats.rxerr_fifo++;
+ ah->stats.rxerr_fifo++;
if (rs->rs_status & AR5K_RXERR_PHY) {
- sc->stats.rxerr_phy++;
+ ah->stats.rxerr_phy++;
if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
- sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
+ ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
return false;
}
if (rs->rs_status & AR5K_RXERR_DECRYPT) {
@@ -1426,13 +1413,13 @@ ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
*
* XXX do key cache faulting
*/
- sc->stats.rxerr_decrypt++;
+ ah->stats.rxerr_decrypt++;
if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
!(rs->rs_status & AR5K_RXERR_CRC))
return true;
}
if (rs->rs_status & AR5K_RXERR_MIC) {
- sc->stats.rxerr_mic++;
+ ah->stats.rxerr_mic++;
return true;
}
@@ -1442,25 +1429,26 @@ ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
}
if (unlikely(rs->rs_more)) {
- sc->stats.rxerr_jumbo++;
+ ah->stats.rxerr_jumbo++;
return false;
}
return true;
}
static void
-ath5k_set_current_imask(struct ath5k_softc *sc)
+ath5k_set_current_imask(struct ath5k_hw *ah)
{
- enum ath5k_int imask = sc->imask;
+ enum ath5k_int imask;
unsigned long flags;
- spin_lock_irqsave(&sc->irqlock, flags);
- if (sc->rx_pending)
+ spin_lock_irqsave(&ah->irqlock, flags);
+ imask = ah->imask;
+ if (ah->rx_pending)
imask &= ~AR5K_INT_RX_ALL;
- if (sc->tx_pending)
+ if (ah->tx_pending)
imask &= ~AR5K_INT_TX_ALL;
- ath5k_hw_set_imr(sc->ah, imask);
- spin_unlock_irqrestore(&sc->irqlock, flags);
+ ath5k_hw_set_imr(ah, imask);
+ spin_unlock_irqrestore(&ah->irqlock, flags);
}
static void
@@ -1469,39 +1457,38 @@ ath5k_tasklet_rx(unsigned long data)
struct ath5k_rx_status rs = {};
struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr;
- struct ath5k_softc *sc = (void *)data;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = (void *)data;
struct ath_common *common = ath5k_hw_common(ah);
struct ath5k_buf *bf;
struct ath5k_desc *ds;
int ret;
- spin_lock(&sc->rxbuflock);
- if (list_empty(&sc->rxbuf)) {
- ATH5K_WARN(sc, "empty rx buf pool\n");
+ spin_lock(&ah->rxbuflock);
+ if (list_empty(&ah->rxbuf)) {
+ ATH5K_WARN(ah, "empty rx buf pool\n");
goto unlock;
}
do {
- bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
+ bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
BUG_ON(bf->skb == NULL);
skb = bf->skb;
ds = bf->desc;
/* bail if HW is still using self-linked descriptor */
- if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr)
+ if (ath5k_hw_get_rxdp(ah) == bf->daddr)
break;
- ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
+ ret = ah->ah_proc_rx_desc(ah, ds, &rs);
if (unlikely(ret == -EINPROGRESS))
break;
else if (unlikely(ret)) {
- ATH5K_ERR(sc, "error in processing rx descriptor\n");
- sc->stats.rxerr_proc++;
+ ATH5K_ERR(ah, "error in processing rx descriptor\n");
+ ah->stats.rxerr_proc++;
break;
}
- if (ath5k_receive_frame_ok(sc, &rs)) {
- next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
+ if (ath5k_receive_frame_ok(ah, &rs)) {
+ next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr);
/*
* If we can't replace bf->skb with a new skb under
@@ -1510,24 +1497,24 @@ ath5k_tasklet_rx(unsigned long data)
if (!next_skb)
goto next;
- dma_unmap_single(sc->dev, bf->skbaddr,
+ dma_unmap_single(ah->dev, bf->skbaddr,
common->rx_bufsize,
DMA_FROM_DEVICE);
skb_put(skb, rs.rs_datalen);
- ath5k_receive_frame(sc, skb, &rs);
+ ath5k_receive_frame(ah, skb, &rs);
bf->skb = next_skb;
bf->skbaddr = next_skb_addr;
}
next:
- list_move_tail(&bf->list, &sc->rxbuf);
- } while (ath5k_rxbuf_setup(sc, bf) == 0);
+ list_move_tail(&bf->list, &ah->rxbuf);
+ } while (ath5k_rxbuf_setup(ah, bf) == 0);
unlock:
- spin_unlock(&sc->rxbuflock);
- sc->rx_pending = false;
- ath5k_set_current_imask(sc);
+ spin_unlock(&ah->rxbuflock);
+ ah->rx_pending = false;
+ ath5k_set_current_imask(ah);
}
@@ -1539,12 +1526,12 @@ void
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_txq *txq)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
struct ath5k_buf *bf;
unsigned long flags;
int padsize;
- trace_ath5k_tx(sc, skb, txq);
+ trace_ath5k_tx(ah, skb, txq);
/*
* The hardware expects the header padded to 4 byte boundaries.
@@ -1552,36 +1539,37 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
*/
padsize = ath5k_add_padding(skb);
if (padsize < 0) {
- ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
+ ATH5K_ERR(ah, "tx hdrlen not %%4: not enough"
" headroom to pad");
goto drop_packet;
}
- if (txq->txq_len >= txq->txq_max)
+ if (txq->txq_len >= txq->txq_max &&
+ txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX)
ieee80211_stop_queue(hw, txq->qnum);
- spin_lock_irqsave(&sc->txbuflock, flags);
- if (list_empty(&sc->txbuf)) {
- ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
- spin_unlock_irqrestore(&sc->txbuflock, flags);
+ spin_lock_irqsave(&ah->txbuflock, flags);
+ if (list_empty(&ah->txbuf)) {
+ ATH5K_ERR(ah, "no further txbuf available, dropping packet\n");
+ spin_unlock_irqrestore(&ah->txbuflock, flags);
ieee80211_stop_queues(hw);
goto drop_packet;
}
- bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
+ bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list);
list_del(&bf->list);
- sc->txbuf_len--;
- if (list_empty(&sc->txbuf))
+ ah->txbuf_len--;
+ if (list_empty(&ah->txbuf))
ieee80211_stop_queues(hw);
- spin_unlock_irqrestore(&sc->txbuflock, flags);
+ spin_unlock_irqrestore(&ah->txbuflock, flags);
bf->skb = skb;
- if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
+ if (ath5k_txbuf_setup(ah, bf, txq, padsize)) {
bf->skb = NULL;
- spin_lock_irqsave(&sc->txbuflock, flags);
- list_add_tail(&bf->list, &sc->txbuf);
- sc->txbuf_len++;
- spin_unlock_irqrestore(&sc->txbuflock, flags);
+ spin_lock_irqsave(&ah->txbuflock, flags);
+ list_add_tail(&bf->list, &ah->txbuf);
+ ah->txbuf_len++;
+ spin_unlock_irqrestore(&ah->txbuflock, flags);
goto drop_packet;
}
return;
@@ -1591,15 +1579,15 @@ drop_packet:
}
static void
-ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
+ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
struct ath5k_txq *txq, struct ath5k_tx_status *ts)
{
struct ieee80211_tx_info *info;
u8 tries[3];
int i;
- sc->stats.tx_all_count++;
- sc->stats.tx_bytes_count += skb->len;
+ ah->stats.tx_all_count++;
+ ah->stats.tx_bytes_count += skb->len;
info = IEEE80211_SKB_CB(skb);
tries[0] = info->status.rates[0].count;
@@ -1619,15 +1607,15 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
info->status.rates[ts->ts_final_idx + 1].idx = -1;
if (unlikely(ts->ts_status)) {
- sc->stats.ack_fail++;
+ ah->stats.ack_fail++;
if (ts->ts_status & AR5K_TXERR_FILT) {
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
- sc->stats.txerr_filt++;
+ ah->stats.txerr_filt++;
}
if (ts->ts_status & AR5K_TXERR_XRETRY)
- sc->stats.txerr_retry++;
+ ah->stats.txerr_retry++;
if (ts->ts_status & AR5K_TXERR_FIFO)
- sc->stats.txerr_fifo++;
+ ah->stats.txerr_fifo++;
} else {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ts->ts_rssi;
@@ -1643,16 +1631,16 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
ath5k_remove_padding(skb);
if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
- sc->stats.antenna_tx[ts->ts_antenna]++;
+ ah->stats.antenna_tx[ts->ts_antenna]++;
else
- sc->stats.antenna_tx[0]++; /* invalid */
+ ah->stats.antenna_tx[0]++; /* invalid */
- trace_ath5k_tx_complete(sc, skb, txq, ts);
- ieee80211_tx_status(sc->hw, skb);
+ trace_ath5k_tx_complete(ah, skb, txq, ts);
+ ieee80211_tx_status(ah->hw, skb);
}
static void
-ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
+ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
{
struct ath5k_tx_status ts = {};
struct ath5k_buf *bf, *bf0;
@@ -1669,11 +1657,11 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
if (bf->skb != NULL) {
ds = bf->desc;
- ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
+ ret = ah->ah_proc_tx_desc(ah, ds, &ts);
if (unlikely(ret == -EINPROGRESS))
break;
else if (unlikely(ret)) {
- ATH5K_ERR(sc,
+ ATH5K_ERR(ah,
"error %d while processing "
"queue %u\n", ret, txq->qnum);
break;
@@ -1682,9 +1670,9 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
skb = bf->skb;
bf->skb = NULL;
- dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
+ dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
DMA_TO_DEVICE);
- ath5k_tx_frame_completed(sc, skb, txq, &ts);
+ ath5k_tx_frame_completed(ah, skb, txq, &ts);
}
/*
@@ -1693,31 +1681,31 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
* host memory and moved on.
* Always keep the last descriptor to avoid HW races...
*/
- if (ath5k_hw_get_txdp(sc->ah, txq->qnum) != bf->daddr) {
- spin_lock(&sc->txbuflock);
- list_move_tail(&bf->list, &sc->txbuf);
- sc->txbuf_len++;
+ if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) {
+ spin_lock(&ah->txbuflock);
+ list_move_tail(&bf->list, &ah->txbuf);
+ ah->txbuf_len++;
txq->txq_len--;
- spin_unlock(&sc->txbuflock);
+ spin_unlock(&ah->txbuflock);
}
}
spin_unlock(&txq->lock);
if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
- ieee80211_wake_queue(sc->hw, txq->qnum);
+ ieee80211_wake_queue(ah->hw, txq->qnum);
}
static void
ath5k_tasklet_tx(unsigned long data)
{
int i;
- struct ath5k_softc *sc = (void *)data;
+ struct ath5k_hw *ah = (void *)data;
- for (i=0; i < AR5K_NUM_TX_QUEUES; i++)
- if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i)))
- ath5k_tx_processq(sc, &sc->txqs[i]);
+ for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
+ if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i)))
+ ath5k_tx_processq(ah, &ah->txqs[i]);
- sc->tx_pending = false;
- ath5k_set_current_imask(sc);
+ ah->tx_pending = false;
+ ath5k_set_current_imask(ah);
}
@@ -1729,25 +1717,26 @@ ath5k_tasklet_tx(unsigned long data)
* Setup the beacon frame for transmit.
*/
static int
-ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
+ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
{
struct sk_buff *skb = bf->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ath5k_hw *ah = sc->ah;
struct ath5k_desc *ds;
int ret = 0;
u8 antenna;
u32 flags;
const int padsize = 0;
- bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
+ bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
"skbaddr %llx\n", skb, skb->data, skb->len,
(unsigned long long)bf->skbaddr);
- if (dma_mapping_error(sc->dev, bf->skbaddr)) {
- ATH5K_ERR(sc, "beacon DMA mapping failed\n");
+ if (dma_mapping_error(ah->dev, bf->skbaddr)) {
+ ATH5K_ERR(ah, "beacon DMA mapping failed\n");
+ dev_kfree_skb_any(skb);
+ bf->skb = NULL;
return -EIO;
}
@@ -1755,7 +1744,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
antenna = ah->ah_tx_ant;
flags = AR5K_TXDESC_NOACK;
- if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
+ if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
ds->ds_link = bf->daddr; /* self-linked */
flags |= AR5K_TXDESC_VEOL;
} else
@@ -1767,7 +1756,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
* 4 beacons to make sure everybody hears our AP.
* When a client tries to associate, hw will keep
* track of the tx antenna to be used for this client
- * automaticaly, based on ACKed packets.
+ * automatically, based on ACKed packets.
*
* Note: AP still listens and transmits RTS on the
* default antenna which is supposed to be an omni.
@@ -1780,7 +1769,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
* on all of them.
*/
if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
- antenna = sc->bsent & 4 ? 2 : 1;
+ antenna = ah->bsent & 4 ? 2 : 1;
/* FIXME: If we are in g mode and rate is a CCK rate
@@ -1789,8 +1778,8 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
ds->ds_data = bf->skbaddr;
ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
ieee80211_get_hdrlen_from_skb(skb), padsize,
- AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
- ieee80211_get_tx_rate(sc->hw, info)->hw_value,
+ AR5K_PKT_TYPE_BEACON, (ah->power_level * 2),
+ ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1, AR5K_TXKEYIX_INVALID,
antenna, flags, 0, 0);
if (ret)
@@ -1798,7 +1787,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
return 0;
err_unmap:
- dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
+ dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
return ret;
}
@@ -1813,7 +1802,7 @@ int
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
int ret;
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
struct ath5k_vif *avf = (void *)vif->drv_priv;
struct sk_buff *skb;
@@ -1829,11 +1818,9 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
goto out;
}
- ath5k_txbuf_free_skb(sc, avf->bbuf);
+ ath5k_txbuf_free_skb(ah, avf->bbuf);
avf->bbuf->skb = skb;
- ret = ath5k_beacon_setup(sc, avf->bbuf);
- if (ret)
- avf->bbuf->skb = NULL;
+ ret = ath5k_beacon_setup(ah, avf->bbuf);
out:
return ret;
}
@@ -1847,15 +1834,15 @@ out:
* or user context from ath5k_beacon_config.
*/
static void
-ath5k_beacon_send(struct ath5k_softc *sc)
+ath5k_beacon_send(struct ath5k_hw *ah)
{
- struct ath5k_hw *ah = sc->ah;
struct ieee80211_vif *vif;
struct ath5k_vif *avf;
struct ath5k_buf *bf;
struct sk_buff *skb;
+ int err;
- ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
/*
* Check if the previous beacon has gone out. If
@@ -1864,85 +1851,93 @@ ath5k_beacon_send(struct ath5k_softc *sc)
* indicate a problem and should not occur. If we
* miss too many consecutive beacons reset the device.
*/
- if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) {
- sc->bmisscount++;
- ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
- "missed %u consecutive beacons\n", sc->bmisscount);
- if (sc->bmisscount > 10) { /* NB: 10 is a guess */
- ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
+ if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) {
+ ah->bmisscount++;
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
+ "missed %u consecutive beacons\n", ah->bmisscount);
+ if (ah->bmisscount > 10) { /* NB: 10 is a guess */
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
"stuck beacon time (%u missed)\n",
- sc->bmisscount);
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ ah->bmisscount);
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"stuck beacon, resetting\n");
- ieee80211_queue_work(sc->hw, &sc->reset_work);
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
}
return;
}
- if (unlikely(sc->bmisscount != 0)) {
- ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
+ if (unlikely(ah->bmisscount != 0)) {
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
"resume beacon xmit after %u misses\n",
- sc->bmisscount);
- sc->bmisscount = 0;
+ ah->bmisscount);
+ ah->bmisscount = 0;
}
- if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) ||
- sc->opmode == NL80211_IFTYPE_MESH_POINT) {
+ if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) ||
+ ah->opmode == NL80211_IFTYPE_MESH_POINT) {
u64 tsf = ath5k_hw_get_tsf64(ah);
u32 tsftu = TSF_TO_TU(tsf);
- int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval;
- vif = sc->bslot[(slot + 1) % ATH_BCBUF];
- ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
+ int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval;
+ vif = ah->bslot[(slot + 1) % ATH_BCBUF];
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
"tsf %llx tsftu %x intval %u slot %u vif %p\n",
- (unsigned long long)tsf, tsftu, sc->bintval, slot, vif);
+ (unsigned long long)tsf, tsftu, ah->bintval, slot, vif);
} else /* only one interface */
- vif = sc->bslot[0];
+ vif = ah->bslot[0];
if (!vif)
return;
avf = (void *)vif->drv_priv;
bf = avf->bbuf;
- if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION ||
- sc->opmode == NL80211_IFTYPE_MONITOR)) {
- ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
- return;
- }
/*
* Stop any current dma and put the new frame on the queue.
* This should never fail since we check above that no frames
* are still pending on the queue.
*/
- if (unlikely(ath5k_hw_stop_beacon_queue(ah, sc->bhalq))) {
- ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
+ if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) {
+ ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq);
/* NB: hw still stops DMA, so proceed */
}
/* refresh the beacon for AP or MESH mode */
- if (sc->opmode == NL80211_IFTYPE_AP ||
- sc->opmode == NL80211_IFTYPE_MESH_POINT)
- ath5k_beacon_update(sc->hw, vif);
+ if (ah->opmode == NL80211_IFTYPE_AP ||
+ ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+ err = ath5k_beacon_update(ah->hw, vif);
+ if (err)
+ return;
+ }
- trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);
+ if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
+ ah->opmode == NL80211_IFTYPE_MONITOR)) {
+ ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
+ return;
+ }
- ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
- ath5k_hw_start_tx_dma(ah, sc->bhalq);
- ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
- sc->bhalq, (unsigned long long)bf->daddr, bf->desc);
+ trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
- skb = ieee80211_get_buffered_bc(sc->hw, vif);
+ ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr);
+ ath5k_hw_start_tx_dma(ah, ah->bhalq);
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
+ ah->bhalq, (unsigned long long)bf->daddr, bf->desc);
+
+ skb = ieee80211_get_buffered_bc(ah->hw, vif);
while (skb) {
- ath5k_tx_queue(sc->hw, skb, sc->cabq);
- skb = ieee80211_get_buffered_bc(sc->hw, vif);
+ ath5k_tx_queue(ah->hw, skb, ah->cabq);
+
+ if (ah->cabq->txq_len >= ah->cabq->txq_max)
+ break;
+
+ skb = ieee80211_get_buffered_bc(ah->hw, vif);
}
- sc->bsent++;
+ ah->bsent++;
}
/**
* ath5k_beacon_update_timers - update beacon timers
*
- * @sc: struct ath5k_softc pointer we are operating on
+ * @ah: struct ath5k_hw pointer we are operating on
* @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
* beacon timer update based on the current HW TSF.
*
@@ -1956,17 +1951,16 @@ ath5k_beacon_send(struct ath5k_softc *sc)
* function to have it all together in one place.
*/
void
-ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
+ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
{
- struct ath5k_hw *ah = sc->ah;
u32 nexttbtt, intval, hw_tu, bc_tu;
u64 hw_tsf;
- intval = sc->bintval & AR5K_BEACON_PERIOD;
- if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {
+ intval = ah->bintval & AR5K_BEACON_PERIOD;
+ if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) {
intval /= ATH_BCBUF; /* staggered multi-bss beacons */
if (intval < 15)
- ATH5K_WARN(sc, "intval %u is too low, min 15\n",
+ ATH5K_WARN(ah, "intval %u is too low, min 15\n",
intval);
}
if (WARN_ON(!intval))
@@ -1979,7 +1973,7 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
hw_tsf = ath5k_hw_get_tsf64(ah);
hw_tu = TSF_TO_TU(hw_tsf);
-#define FUDGE AR5K_TUNE_SW_BEACON_RESP + 3
+#define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
* Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
* configuration we need to make sure it is bigger than that. */
@@ -2005,7 +1999,7 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
* automatically update the TSF and then we need to reconfigure
* the timers.
*/
- ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"need to wait for HW TSF sync\n");
return;
} else {
@@ -2020,7 +2014,7 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
}
#undef FUDGE
- sc->nexttbtt = nexttbtt;
+ ah->nexttbtt = nexttbtt;
intval |= AR5K_BEACON_ENA;
ath5k_hw_init_beacon(ah, nexttbtt, intval);
@@ -2030,20 +2024,20 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
* of this function
*/
if (bc_tsf == -1)
- ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"reconfigured timers based on HW TSF\n");
else if (bc_tsf == 0)
- ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"reset HW TSF and timers\n");
else
- ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"updated timers based on beacon TSF\n");
- ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
(unsigned long long) bc_tsf,
(unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
- ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
intval & AR5K_BEACON_PERIOD,
intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
@@ -2052,22 +2046,21 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
/**
* ath5k_beacon_config - Configure the beacon queues and interrupts
*
- * @sc: struct ath5k_softc pointer we are operating on
+ * @ah: struct ath5k_hw pointer we are operating on
*
* In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
* interrupts to detect TSF updates only.
*/
void
-ath5k_beacon_config(struct ath5k_softc *sc)
+ath5k_beacon_config(struct ath5k_hw *ah)
{
- struct ath5k_hw *ah = sc->ah;
unsigned long flags;
- spin_lock_irqsave(&sc->block, flags);
- sc->bmisscount = 0;
- sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
+ spin_lock_irqsave(&ah->block, flags);
+ ah->bmisscount = 0;
+ ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
- if (sc->enable_beacon) {
+ if (ah->enable_beacon) {
/*
* In IBSS mode we use a self-linked tx descriptor and let the
* hardware send the beacons automatically. We have to load it
@@ -2075,50 +2068,50 @@ ath5k_beacon_config(struct ath5k_softc *sc)
* We use the SWBA interrupt only to keep track of the beacon
* timers in order to detect automatic TSF updates.
*/
- ath5k_beaconq_config(sc);
+ ath5k_beaconq_config(ah);
- sc->imask |= AR5K_INT_SWBA;
+ ah->imask |= AR5K_INT_SWBA;
- if (sc->opmode == NL80211_IFTYPE_ADHOC) {
+ if (ah->opmode == NL80211_IFTYPE_ADHOC) {
if (ath5k_hw_hasveol(ah))
- ath5k_beacon_send(sc);
+ ath5k_beacon_send(ah);
} else
- ath5k_beacon_update_timers(sc, -1);
+ ath5k_beacon_update_timers(ah, -1);
} else {
- ath5k_hw_stop_beacon_queue(sc->ah, sc->bhalq);
+ ath5k_hw_stop_beacon_queue(ah, ah->bhalq);
}
- ath5k_hw_set_imr(ah, sc->imask);
+ ath5k_hw_set_imr(ah, ah->imask);
mmiowb();
- spin_unlock_irqrestore(&sc->block, flags);
+ spin_unlock_irqrestore(&ah->block, flags);
}
static void ath5k_tasklet_beacon(unsigned long data)
{
- struct ath5k_softc *sc = (struct ath5k_softc *) data;
+ struct ath5k_hw *ah = (struct ath5k_hw *) data;
/*
* Software beacon alert--time to send a beacon.
*
* In IBSS mode we use this interrupt just to
* keep track of the next TBTT (target beacon
- * transmission time) in order to detect wether
+ * transmission time) in order to detect whether
* automatic TSF updates happened.
*/
- if (sc->opmode == NL80211_IFTYPE_ADHOC) {
- /* XXX: only if VEOL suppported */
- u64 tsf = ath5k_hw_get_tsf64(sc->ah);
- sc->nexttbtt += sc->bintval;
- ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
+ if (ah->opmode == NL80211_IFTYPE_ADHOC) {
+ /* XXX: only if VEOL supported */
+ u64 tsf = ath5k_hw_get_tsf64(ah);
+ ah->nexttbtt += ah->bintval;
+ ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
"SWBA nexttbtt: %x hw_tu: %x "
"TSF: %llx\n",
- sc->nexttbtt,
+ ah->nexttbtt,
TSF_TO_TU(tsf),
(unsigned long long) tsf);
} else {
- spin_lock(&sc->block);
- ath5k_beacon_send(sc);
- spin_unlock(&sc->block);
+ spin_lock(&ah->block);
+ ath5k_beacon_send(ah);
+ spin_unlock(&ah->block);
}
}
@@ -2135,12 +2128,12 @@ ath5k_intr_calibration_poll(struct ath5k_hw *ah)
/* run ANI only when full calibration is not active */
ah->ah_cal_next_ani = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
- tasklet_schedule(&ah->ah_sc->ani_tasklet);
+ tasklet_schedule(&ah->ani_tasklet);
} else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
ah->ah_cal_next_full = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
- tasklet_schedule(&ah->ah_sc->calib);
+ tasklet_schedule(&ah->calib);
}
/* we could use SWI to generate enough interrupts to meet our
* calibration interval requirements, if necessary:
@@ -2148,44 +2141,43 @@ ath5k_intr_calibration_poll(struct ath5k_hw *ah)
}
static void
-ath5k_schedule_rx(struct ath5k_softc *sc)
+ath5k_schedule_rx(struct ath5k_hw *ah)
{
- sc->rx_pending = true;
- tasklet_schedule(&sc->rxtq);
+ ah->rx_pending = true;
+ tasklet_schedule(&ah->rxtq);
}
static void
-ath5k_schedule_tx(struct ath5k_softc *sc)
+ath5k_schedule_tx(struct ath5k_hw *ah)
{
- sc->tx_pending = true;
- tasklet_schedule(&sc->txtq);
+ ah->tx_pending = true;
+ tasklet_schedule(&ah->txtq);
}
-irqreturn_t
+static irqreturn_t
ath5k_intr(int irq, void *dev_id)
{
- struct ath5k_softc *sc = dev_id;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = dev_id;
enum ath5k_int status;
unsigned int counter = 1000;
- if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
+ if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
((ath5k_get_bus_type(ah) != ATH_AHB) &&
!ath5k_hw_is_intr_pending(ah))))
return IRQ_NONE;
do {
ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
- ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
- status, sc->imask);
+ ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
+ status, ah->imask);
if (unlikely(status & AR5K_INT_FATAL)) {
/*
* Fatal errors are unrecoverable.
* Typically these are caused by DMA errors.
*/
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"fatal int, resetting\n");
- ieee80211_queue_work(sc->hw, &sc->reset_work);
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
} else if (unlikely(status & AR5K_INT_RXORN)) {
/*
* Receive buffers are full. Either the bus is busy or
@@ -2196,45 +2188,44 @@ ath5k_intr(int irq, void *dev_id)
* We don't know exactly which versions need a reset -
* this guess is copied from the HAL.
*/
- sc->stats.rxorn_intr++;
+ ah->stats.rxorn_intr++;
if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"rx overrun, resetting\n");
- ieee80211_queue_work(sc->hw, &sc->reset_work);
- }
- else
- ath5k_schedule_rx(sc);
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
+ } else
+ ath5k_schedule_rx(ah);
} else {
- if (status & AR5K_INT_SWBA) {
- tasklet_hi_schedule(&sc->beacontq);
- }
+ if (status & AR5K_INT_SWBA)
+ tasklet_hi_schedule(&ah->beacontq);
+
if (status & AR5K_INT_RXEOL) {
/*
* NB: the hardware should re-read the link when
* RXE bit is written, but it doesn't work at
* least on older hardware revs.
*/
- sc->stats.rxeol_intr++;
+ ah->stats.rxeol_intr++;
}
if (status & AR5K_INT_TXURN) {
/* bump tx trigger level */
ath5k_hw_update_tx_triglevel(ah, true);
}
if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
- ath5k_schedule_rx(sc);
+ ath5k_schedule_rx(ah);
if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
| AR5K_INT_TXERR | AR5K_INT_TXEOL))
- ath5k_schedule_tx(sc);
+ ath5k_schedule_tx(ah);
if (status & AR5K_INT_BMISS) {
/* TODO */
}
if (status & AR5K_INT_MIB) {
- sc->stats.mib_intr++;
+ ah->stats.mib_intr++;
ath5k_hw_update_mib_counters(ah);
ath5k_ani_mib_intr(ah);
}
if (status & AR5K_INT_GPIO)
- tasklet_schedule(&sc->rf_kill.toggleq);
+ tasklet_schedule(&ah->rf_kill.toggleq);
}
@@ -2243,11 +2234,11 @@ ath5k_intr(int irq, void *dev_id)
} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
- if (sc->rx_pending || sc->tx_pending)
- ath5k_set_current_imask(sc);
+ if (ah->rx_pending || ah->tx_pending)
+ ath5k_set_current_imask(ah);
if (unlikely(!counter))
- ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
+ ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
ath5k_intr_calibration_poll(ah);
@@ -2261,28 +2252,27 @@ ath5k_intr(int irq, void *dev_id)
static void
ath5k_tasklet_calibrate(unsigned long data)
{
- struct ath5k_softc *sc = (void *)data;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = (void *)data;
/* Only full calibration for now */
ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
- ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
- ieee80211_frequency_to_channel(sc->curchan->center_freq),
- sc->curchan->hw_value);
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
+ ieee80211_frequency_to_channel(ah->curchan->center_freq),
+ ah->curchan->hw_value);
if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
/*
* Rfgain is out of bounds, reset the chip
* to load new gain values.
*/
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
- ieee80211_queue_work(sc->hw, &sc->reset_work);
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
}
- if (ath5k_hw_phy_calibrate(ah, sc->curchan))
- ATH5K_ERR(sc, "calibration of channel %u failed\n",
+ if (ath5k_hw_phy_calibrate(ah, ah->curchan))
+ ATH5K_ERR(ah, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
- sc->curchan->center_freq));
+ ah->curchan->center_freq));
/* Noise floor calibration interrupts rx/tx path while I/Q calibration
* doesn't.
@@ -2301,8 +2291,7 @@ ath5k_tasklet_calibrate(unsigned long data)
static void
ath5k_tasklet_ani(unsigned long data)
{
- struct ath5k_softc *sc = (void *)data;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = (void *)data;
ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
ath5k_ani_calibration(ah);
@@ -2313,21 +2302,21 @@ ath5k_tasklet_ani(unsigned long data)
static void
ath5k_tx_complete_poll_work(struct work_struct *work)
{
- struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
+ struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
tx_complete_work.work);
struct ath5k_txq *txq;
int i;
bool needreset = false;
- mutex_lock(&sc->lock);
+ mutex_lock(&ah->lock);
- for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
- if (sc->txqs[i].setup) {
- txq = &sc->txqs[i];
+ for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
+ if (ah->txqs[i].setup) {
+ txq = &ah->txqs[i];
spin_lock_bh(&txq->lock);
if (txq->txq_len > 1) {
if (txq->txq_poll_mark) {
- ATH5K_DBG(sc, ATH5K_DEBUG_XMIT,
+ ATH5K_DBG(ah, ATH5K_DEBUG_XMIT,
"TX queue stuck %d\n",
txq->qnum);
needreset = true;
@@ -2343,14 +2332,14 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
}
if (needreset) {
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"TX queues stuck, resetting\n");
- ath5k_reset(sc, NULL, true);
+ ath5k_reset(ah, NULL, true);
}
- mutex_unlock(&sc->lock);
+ mutex_unlock(&ah->lock);
- ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
+ ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}
@@ -2359,16 +2348,16 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
* Initialization routines *
\*************************/
-int
-ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
+int __devinit
+ath5k_init_softc(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
{
- struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_hw *hw = ah->hw;
struct ath_common *common;
int ret;
int csz;
/* Initialize driver private data */
- SET_IEEE80211_DEV(hw, sc->dev);
+ SET_IEEE80211_DEV(hw, ah->dev);
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
@@ -2391,39 +2380,30 @@ ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
* Mark the device as detached to avoid processing
* interrupts until setup is complete.
*/
- __set_bit(ATH_STAT_INVALID, sc->status);
+ __set_bit(ATH_STAT_INVALID, ah->status);
- sc->opmode = NL80211_IFTYPE_STATION;
- sc->bintval = 1000;
- mutex_init(&sc->lock);
- spin_lock_init(&sc->rxbuflock);
- spin_lock_init(&sc->txbuflock);
- spin_lock_init(&sc->block);
- spin_lock_init(&sc->irqlock);
+ ah->opmode = NL80211_IFTYPE_STATION;
+ ah->bintval = 1000;
+ mutex_init(&ah->lock);
+ spin_lock_init(&ah->rxbuflock);
+ spin_lock_init(&ah->txbuflock);
+ spin_lock_init(&ah->block);
+ spin_lock_init(&ah->irqlock);
/* Setup interrupt handler */
- ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
+ ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah);
if (ret) {
- ATH5K_ERR(sc, "request_irq failed\n");
+ ATH5K_ERR(ah, "request_irq failed\n");
goto err;
}
- /* If we passed the test, malloc an ath5k_hw struct */
- sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
- if (!sc->ah) {
- ret = -ENOMEM;
- ATH5K_ERR(sc, "out of memory\n");
- goto err_irq;
- }
-
- sc->ah->ah_sc = sc;
- sc->ah->ah_iobase = sc->iobase;
- common = ath5k_hw_common(sc->ah);
+ common = ath5k_hw_common(ah);
common->ops = &ath5k_common_ops;
common->bus_ops = bus_ops;
- common->ah = sc->ah;
+ common->ah = ah;
common->hw = hw;
- common->priv = sc;
+ common->priv = ah;
+ common->clockrate = 40;
/*
* Cache line size is used to size and align various
@@ -2435,12 +2415,12 @@ ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
spin_lock_init(&common->cc_lock);
/* Initialize device */
- ret = ath5k_hw_init(sc);
+ ret = ath5k_hw_init(ah);
if (ret)
- goto err_free_ah;
+ goto err_irq;
/* set up multi-rate retry capabilities */
- if (sc->ah->ah_version == AR5K_AR5212) {
+ if (ah->ah_version == AR5K_AR5212) {
hw->max_rates = 4;
hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
AR5K_INIT_RETRY_LONG);
@@ -2453,77 +2433,74 @@ ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
if (ret)
goto err_ah;
- ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
- ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
- sc->ah->ah_mac_srev,
- sc->ah->ah_phy_revision);
+ ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
+ ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev),
+ ah->ah_mac_srev,
+ ah->ah_phy_revision);
- if (!sc->ah->ah_single_chip) {
+ if (!ah->ah_single_chip) {
/* Single chip radio (!RF5111) */
- if (sc->ah->ah_radio_5ghz_revision &&
- !sc->ah->ah_radio_2ghz_revision) {
+ if (ah->ah_radio_5ghz_revision &&
+ !ah->ah_radio_2ghz_revision) {
/* No 5GHz support -> report 2GHz radio */
if (!test_bit(AR5K_MODE_11A,
- sc->ah->ah_capabilities.cap_mode)) {
- ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
+ ah->ah_capabilities.cap_mode)) {
+ ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
ath5k_chip_name(AR5K_VERSION_RAD,
- sc->ah->ah_radio_5ghz_revision),
- sc->ah->ah_radio_5ghz_revision);
+ ah->ah_radio_5ghz_revision),
+ ah->ah_radio_5ghz_revision);
/* No 2GHz support (5110 and some
- * 5Ghz only cards) -> report 5Ghz radio */
+ * 5GHz only cards) -> report 5GHz radio */
} else if (!test_bit(AR5K_MODE_11B,
- sc->ah->ah_capabilities.cap_mode)) {
- ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
+ ah->ah_capabilities.cap_mode)) {
+ ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
ath5k_chip_name(AR5K_VERSION_RAD,
- sc->ah->ah_radio_5ghz_revision),
- sc->ah->ah_radio_5ghz_revision);
+ ah->ah_radio_5ghz_revision),
+ ah->ah_radio_5ghz_revision);
/* Multiband radio */
} else {
- ATH5K_INFO(sc, "RF%s multiband radio found"
+ ATH5K_INFO(ah, "RF%s multiband radio found"
" (0x%x)\n",
ath5k_chip_name(AR5K_VERSION_RAD,
- sc->ah->ah_radio_5ghz_revision),
- sc->ah->ah_radio_5ghz_revision);
+ ah->ah_radio_5ghz_revision),
+ ah->ah_radio_5ghz_revision);
}
}
/* Multi chip radio (RF5111 - RF2111) ->
* report both 2GHz/5GHz radios */
- else if (sc->ah->ah_radio_5ghz_revision &&
- sc->ah->ah_radio_2ghz_revision){
- ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
+ else if (ah->ah_radio_5ghz_revision &&
+ ah->ah_radio_2ghz_revision) {
+ ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
ath5k_chip_name(AR5K_VERSION_RAD,
- sc->ah->ah_radio_5ghz_revision),
- sc->ah->ah_radio_5ghz_revision);
- ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
+ ah->ah_radio_5ghz_revision),
+ ah->ah_radio_5ghz_revision);
+ ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
ath5k_chip_name(AR5K_VERSION_RAD,
- sc->ah->ah_radio_2ghz_revision),
- sc->ah->ah_radio_2ghz_revision);
+ ah->ah_radio_2ghz_revision),
+ ah->ah_radio_2ghz_revision);
}
}
- ath5k_debug_init_device(sc);
+ ath5k_debug_init_device(ah);
/* ready to process interrupts */
- __clear_bit(ATH_STAT_INVALID, sc->status);
+ __clear_bit(ATH_STAT_INVALID, ah->status);
return 0;
err_ah:
- ath5k_hw_deinit(sc->ah);
-err_free_ah:
- kfree(sc->ah);
+ ath5k_hw_deinit(ah);
err_irq:
- free_irq(sc->irq, sc);
+ free_irq(ah->irq, ah);
err:
return ret;
}
static int
-ath5k_stop_locked(struct ath5k_softc *sc)
+ath5k_stop_locked(struct ath5k_hw *ah)
{
- struct ath5k_hw *ah = sc->ah;
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
- test_bit(ATH_STAT_INVALID, sc->status));
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n",
+ test_bit(ATH_STAT_INVALID, ah->status));
/*
* Shutdown the hardware and driver:
@@ -2540,37 +2517,36 @@ ath5k_stop_locked(struct ath5k_softc *sc)
* Note that some of this work is not possible if the
* hardware is gone (invalid).
*/
- ieee80211_stop_queues(sc->hw);
+ ieee80211_stop_queues(ah->hw);
- if (!test_bit(ATH_STAT_INVALID, sc->status)) {
- ath5k_led_off(sc);
+ if (!test_bit(ATH_STAT_INVALID, ah->status)) {
+ ath5k_led_off(ah);
ath5k_hw_set_imr(ah, 0);
- synchronize_irq(sc->irq);
- ath5k_rx_stop(sc);
+ synchronize_irq(ah->irq);
+ ath5k_rx_stop(ah);
ath5k_hw_dma_stop(ah);
- ath5k_drain_tx_buffs(sc);
+ ath5k_drain_tx_buffs(ah);
ath5k_hw_phy_disable(ah);
}
return 0;
}
-int
-ath5k_init_hw(struct ath5k_softc *sc)
+int ath5k_start(struct ieee80211_hw *hw)
{
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = hw->priv;
struct ath_common *common = ath5k_hw_common(ah);
int ret, i;
- mutex_lock(&sc->lock);
+ mutex_lock(&ah->lock);
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode);
/*
* Stop anything previously setup. This is safe
* no matter this is the first time through or not.
*/
- ath5k_stop_locked(sc);
+ ath5k_stop_locked(ah);
/*
* The basic interface to setting the hardware in a good
@@ -2579,12 +2555,12 @@ ath5k_init_hw(struct ath5k_softc *sc)
* be followed by initialization of the appropriate bits
* and then setup of the interrupt mask.
*/
- sc->curchan = sc->hw->conf.channel;
- sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
+ ah->curchan = ah->hw->conf.channel;
+ ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
- ret = ath5k_reset(sc, NULL, false);
+ ret = ath5k_reset(ah, NULL, false);
if (ret)
goto done;
@@ -2601,29 +2577,29 @@ ath5k_init_hw(struct ath5k_softc *sc)
* rate */
ah->ah_ack_bitrate_high = true;
- for (i = 0; i < ARRAY_SIZE(sc->bslot); i++)
- sc->bslot[i] = NULL;
+ for (i = 0; i < ARRAY_SIZE(ah->bslot); i++)
+ ah->bslot[i] = NULL;
ret = 0;
done:
mmiowb();
- mutex_unlock(&sc->lock);
+ mutex_unlock(&ah->lock);
- ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
+ ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
return ret;
}
-static void stop_tasklets(struct ath5k_softc *sc)
+static void ath5k_stop_tasklets(struct ath5k_hw *ah)
{
- sc->rx_pending = false;
- sc->tx_pending = false;
- tasklet_kill(&sc->rxtq);
- tasklet_kill(&sc->txtq);
- tasklet_kill(&sc->calib);
- tasklet_kill(&sc->beacontq);
- tasklet_kill(&sc->ani_tasklet);
+ ah->rx_pending = false;
+ ah->tx_pending = false;
+ tasklet_kill(&ah->rxtq);
+ tasklet_kill(&ah->txtq);
+ tasklet_kill(&ah->calib);
+ tasklet_kill(&ah->beacontq);
+ tasklet_kill(&ah->ani_tasklet);
}
/*
@@ -2632,14 +2608,14 @@ static void stop_tasklets(struct ath5k_softc *sc)
* if another thread does a system call and the thread doing the
* stop is preempted).
*/
-int
-ath5k_stop_hw(struct ath5k_softc *sc)
+void ath5k_stop(struct ieee80211_hw *hw)
{
+ struct ath5k_hw *ah = hw->priv;
int ret;
- mutex_lock(&sc->lock);
- ret = ath5k_stop_locked(sc);
- if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
+ mutex_lock(&ah->lock);
+ ret = ath5k_stop_locked(ah);
+ if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) {
/*
* Don't set the card in full sleep mode!
*
@@ -2660,82 +2636,78 @@ ath5k_stop_hw(struct ath5k_softc *sc)
* and Sam's HAL do anyway). Instead Perform a full reset
* on the device (same as initial state after attach) and
* leave it idle (keep MAC/BB on warm reset) */
- ret = ath5k_hw_on_hold(sc->ah);
+ ret = ath5k_hw_on_hold(ah);
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"putting device to sleep\n");
}
mmiowb();
- mutex_unlock(&sc->lock);
-
- stop_tasklets(sc);
+ mutex_unlock(&ah->lock);
- cancel_delayed_work_sync(&sc->tx_complete_work);
+ ath5k_stop_tasklets(ah);
- ath5k_rfkill_hw_stop(sc->ah);
+ cancel_delayed_work_sync(&ah->tx_complete_work);
- return ret;
+ ath5k_rfkill_hw_stop(ah);
}
/*
* Reset the hardware. If chan is not NULL, then also pause rx/tx
* and change to the given channel.
*
- * This should be called with sc->lock.
+ * This should be called with ah->lock.
*/
static int
-ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
+ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
bool skip_pcu)
{
- struct ath5k_hw *ah = sc->ah;
struct ath_common *common = ath5k_hw_common(ah);
int ret, ani_mode;
bool fast;
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
ath5k_hw_set_imr(ah, 0);
- synchronize_irq(sc->irq);
- stop_tasklets(sc);
+ synchronize_irq(ah->irq);
+ ath5k_stop_tasklets(ah);
/* Save ani mode and disable ANI during
* reset. If we don't we might get false
* PHY error interrupts. */
- ani_mode = ah->ah_sc->ani_state.ani_mode;
+ ani_mode = ah->ani_state.ani_mode;
ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
/* We are going to empty hw queues
* so we should also free any remaining
* tx buffers */
- ath5k_drain_tx_buffs(sc);
+ ath5k_drain_tx_buffs(ah);
if (chan)
- sc->curchan = chan;
+ ah->curchan = chan;
fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;
- ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, fast,
- skip_pcu);
+ ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
if (ret) {
- ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
+ ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
goto err;
}
- ret = ath5k_rx_start(sc);
+ ret = ath5k_rx_start(ah);
if (ret) {
- ATH5K_ERR(sc, "can't start recv logic\n");
+ ATH5K_ERR(ah, "can't start recv logic\n");
goto err;
}
ath5k_ani_init(ah, ani_mode);
- ah->ah_cal_next_full = jiffies;
+ ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100);
ah->ah_cal_next_ani = jiffies;
ah->ah_cal_next_nf = jiffies;
ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
/* clear survey data and cycle counters */
- memset(&sc->survey, 0, sizeof(sc->survey));
+ memset(&ah->survey, 0, sizeof(ah->survey));
spin_lock_bh(&common->cc_lock);
ath_hw_cycle_counters_update(common);
memset(&common->cc_survey, 0, sizeof(common->cc_survey));
@@ -2751,12 +2723,12 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
*
* XXX needed?
*/
-/* ath5k_chan_change(sc, c); */
+/* ath5k_chan_change(ah, c); */
- ath5k_beacon_config(sc);
+ ath5k_beacon_config(ah);
/* intrs are enabled by ath5k_beacon_config */
- ieee80211_wake_queues(sc->hw);
+ ieee80211_wake_queues(ah->hw);
return 0;
err:
@@ -2765,20 +2737,19 @@ err:
static void ath5k_reset_work(struct work_struct *work)
{
- struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
+ struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
reset_work);
- mutex_lock(&sc->lock);
- ath5k_reset(sc, NULL, true);
- mutex_unlock(&sc->lock);
+ mutex_lock(&ah->lock);
+ ath5k_reset(ah, NULL, true);
+ mutex_unlock(&ah->lock);
}
-static int
+static int __devinit
ath5k_init(struct ieee80211_hw *hw)
{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = hw->priv;
struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
struct ath5k_txq *txq;
u8 mac[ETH_ALEN] = {};
@@ -2797,26 +2768,26 @@ ath5k_init(struct ieee80211_hw *hw)
if (ret < 0)
goto err;
if (ret > 0)
- __set_bit(ATH_STAT_MRRETRY, sc->status);
+ __set_bit(ATH_STAT_MRRETRY, ah->status);
/*
* Collect the channel list. The 802.11 layer
- * is resposible for filtering this list based
+ * is responsible for filtering this list based
* on settings like the phy mode and regulatory
* domain restrictions.
*/
ret = ath5k_setup_bands(hw);
if (ret) {
- ATH5K_ERR(sc, "can't get channels\n");
+ ATH5K_ERR(ah, "can't get channels\n");
goto err;
}
/*
* Allocate tx+rx descriptors and populate the lists.
*/
- ret = ath5k_desc_alloc(sc);
+ ret = ath5k_desc_alloc(ah);
if (ret) {
- ATH5K_ERR(sc, "can't allocate descriptors\n");
+ ATH5K_ERR(ah, "can't allocate descriptors\n");
goto err;
}
@@ -2828,14 +2799,14 @@ ath5k_init(struct ieee80211_hw *hw)
*/
ret = ath5k_beaconq_setup(ah);
if (ret < 0) {
- ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
+ ATH5K_ERR(ah, "can't setup a beacon xmit queue\n");
goto err_desc;
}
- sc->bhalq = ret;
- sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
- if (IS_ERR(sc->cabq)) {
- ATH5K_ERR(sc, "can't setup cab queue\n");
- ret = PTR_ERR(sc->cabq);
+ ah->bhalq = ret;
+ ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0);
+ if (IS_ERR(ah->cabq)) {
+ ATH5K_ERR(ah, "can't setup cab queue\n");
+ ret = PTR_ERR(ah->cabq);
goto err_bhal;
}
@@ -2844,97 +2815,97 @@ ath5k_init(struct ieee80211_hw *hw)
if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
/* This order matches mac80211's queue priority, so we can
* directly use the mac80211 queue number without any mapping */
- txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
+ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
if (IS_ERR(txq)) {
- ATH5K_ERR(sc, "can't setup xmit queue\n");
+ ATH5K_ERR(ah, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
- txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
+ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
if (IS_ERR(txq)) {
- ATH5K_ERR(sc, "can't setup xmit queue\n");
+ ATH5K_ERR(ah, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
- txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
+ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
if (IS_ERR(txq)) {
- ATH5K_ERR(sc, "can't setup xmit queue\n");
+ ATH5K_ERR(ah, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
- txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
+ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
if (IS_ERR(txq)) {
- ATH5K_ERR(sc, "can't setup xmit queue\n");
+ ATH5K_ERR(ah, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
hw->queues = 4;
} else {
/* older hardware (5210) can only support one data queue */
- txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
+ txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
if (IS_ERR(txq)) {
- ATH5K_ERR(sc, "can't setup xmit queue\n");
+ ATH5K_ERR(ah, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
hw->queues = 1;
}
- tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
- tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
- tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
- tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
- tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
+ tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
+ tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
+ tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
+ tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
+ tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
- INIT_WORK(&sc->reset_work, ath5k_reset_work);
- INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
+ INIT_WORK(&ah->reset_work, ath5k_reset_work);
+ INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
if (ret) {
- ATH5K_ERR(sc, "unable to read address from EEPROM\n");
+ ATH5K_ERR(ah, "unable to read address from EEPROM\n");
goto err_queues;
}
SET_IEEE80211_PERM_ADDR(hw, mac);
- memcpy(&sc->lladdr, mac, ETH_ALEN);
+ memcpy(&ah->lladdr, mac, ETH_ALEN);
/* All MAC address bits matter for ACKs */
- ath5k_update_bssid_mask_and_opmode(sc, NULL);
+ ath5k_update_bssid_mask_and_opmode(ah, NULL);
regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
if (ret) {
- ATH5K_ERR(sc, "can't initialize regulatory system\n");
+ ATH5K_ERR(ah, "can't initialize regulatory system\n");
goto err_queues;
}
ret = ieee80211_register_hw(hw);
if (ret) {
- ATH5K_ERR(sc, "can't register ieee80211 hw\n");
+ ATH5K_ERR(ah, "can't register ieee80211 hw\n");
goto err_queues;
}
if (!ath_is_world_regd(regulatory))
regulatory_hint(hw->wiphy, regulatory->alpha2);
- ath5k_init_leds(sc);
+ ath5k_init_leds(ah);
- ath5k_sysfs_register(sc);
+ ath5k_sysfs_register(ah);
return 0;
err_queues:
- ath5k_txq_release(sc);
+ ath5k_txq_release(ah);
err_bhal:
- ath5k_hw_release_tx_queue(ah, sc->bhalq);
+ ath5k_hw_release_tx_queue(ah, ah->bhalq);
err_desc:
- ath5k_desc_free(sc);
+ ath5k_desc_free(ah);
err:
return ret;
}
void
-ath5k_deinit_softc(struct ath5k_softc *sc)
+ath5k_deinit_softc(struct ath5k_hw *ah)
{
- struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_hw *hw = ah->hw;
/*
* NB: the order of these is important:
@@ -2950,23 +2921,23 @@ ath5k_deinit_softc(struct ath5k_softc *sc)
* Other than that, it's straightforward...
*/
ieee80211_unregister_hw(hw);
- ath5k_desc_free(sc);
- ath5k_txq_release(sc);
- ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
- ath5k_unregister_leds(sc);
+ ath5k_desc_free(ah);
+ ath5k_txq_release(ah);
+ ath5k_hw_release_tx_queue(ah, ah->bhalq);
+ ath5k_unregister_leds(ah);
- ath5k_sysfs_unregister(sc);
+ ath5k_sysfs_unregister(ah);
/*
* NB: can't reclaim these until after ieee80211_ifdetach
* returns because we'll get called back to reclaim node
* state and potentially want to use them.
*/
- ath5k_hw_deinit(sc->ah);
- free_irq(sc->irq, sc);
+ ath5k_hw_deinit(ah);
+ free_irq(ah->irq, ah);
}
bool
-ath_any_vif_assoc(struct ath5k_softc *sc)
+ath5k_any_vif_assoc(struct ath5k_hw *ah)
{
struct ath5k_vif_iter_data iter_data;
iter_data.hw_macaddr = NULL;
@@ -2974,16 +2945,15 @@ ath_any_vif_assoc(struct ath5k_softc *sc)
iter_data.need_set_hw_addr = false;
iter_data.found_active = true;
- ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
+ ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
&iter_data);
return iter_data.any_assoc;
}
void
-set_beacon_filter(struct ieee80211_hw *hw, bool enable)
+ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = hw->priv;
u32 rfilt;
rfilt = ath5k_hw_get_rx_filter(ah);
if (enable)
@@ -2991,5 +2961,5 @@ set_beacon_filter(struct ieee80211_hw *hw, bool enable)
else
rfilt &= ~AR5K_RX_FILTER_BEACON;
ath5k_hw_set_rx_filter(ah, rfilt);
- sc->filter_flags = rfilt;
+ ah->filter_flags = rfilt;
}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index b294f330501..a81f28d5bdd 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -45,23 +45,13 @@
#include <linux/list.h>
#include <linux/wireless.h>
#include <linux/if_ether.h>
-#include <linux/leds.h>
#include <linux/rfkill.h>
#include <linux/workqueue.h>
#include "ath5k.h"
-#include "debug.h"
-#include "ani.h"
-
#include "../regd.h"
#include "../ath.h"
-#define ATH_RXBUF 40 /* number of RX buffers */
-#define ATH_TXBUF 200 /* number of TX buffers */
-#define ATH_BCBUF 4 /* number of beacon buffers */
-#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
-#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
-
struct ath5k_buf {
struct list_head list;
struct ath5k_desc *desc; /* virtual addr of desc */
@@ -70,95 +60,6 @@ struct ath5k_buf {
dma_addr_t skbaddr;/* physical addr of skb data */
};
-/*
- * Data transmit queue state. One of these exists for each
- * hardware transmit queue. Packets sent to us from above
- * are assigned to queues based on their priority. Not all
- * devices support a complete set of hardware transmit queues.
- * For those devices the array sc_ac2q will map multiple
- * priorities to fewer hardware queues (typically all to one
- * hardware queue).
- */
-struct ath5k_txq {
- unsigned int qnum; /* hardware q number */
- u32 *link; /* link ptr in last TX desc */
- struct list_head q; /* transmit queue */
- spinlock_t lock; /* lock on q and link */
- bool setup;
- int txq_len; /* number of queued buffers */
- int txq_max; /* max allowed num of queued buffers */
- bool txq_poll_mark;
- unsigned int txq_stuck; /* informational counter */
-};
-
-#define ATH5K_LED_MAX_NAME_LEN 31
-
-/*
- * State for LED triggers
- */
-struct ath5k_led
-{
- char name[ATH5K_LED_MAX_NAME_LEN + 1]; /* name of the LED in sysfs */
- struct ath5k_softc *sc; /* driver state */
- struct led_classdev led_dev; /* led classdev */
-};
-
-/* Rfkill */
-struct ath5k_rfkill {
- /* GPIO PIN for rfkill */
- u16 gpio;
- /* polarity of rfkill GPIO PIN */
- bool polarity;
- /* RFKILL toggle tasklet */
- struct tasklet_struct toggleq;
-};
-
-/* statistics */
-struct ath5k_statistics {
- /* antenna use */
- unsigned int antenna_rx[5]; /* frames count per antenna RX */
- unsigned int antenna_tx[5]; /* frames count per antenna TX */
-
- /* frame errors */
- unsigned int rx_all_count; /* all RX frames, including errors */
- unsigned int tx_all_count; /* all TX frames, including errors */
- unsigned int rx_bytes_count; /* all RX bytes, including errored pks
- * and the MAC headers for each packet
- */
- unsigned int tx_bytes_count; /* all TX bytes, including errored pkts
- * and the MAC headers and padding for
- * each packet.
- */
- unsigned int rxerr_crc;
- unsigned int rxerr_phy;
- unsigned int rxerr_phy_code[32];
- unsigned int rxerr_fifo;
- unsigned int rxerr_decrypt;
- unsigned int rxerr_mic;
- unsigned int rxerr_proc;
- unsigned int rxerr_jumbo;
- unsigned int txerr_retry;
- unsigned int txerr_fifo;
- unsigned int txerr_filt;
-
- /* MIB counters */
- unsigned int ack_fail;
- unsigned int rts_fail;
- unsigned int rts_ok;
- unsigned int fcs_error;
- unsigned int beacons;
-
- unsigned int mib_intr;
- unsigned int rxorn_intr;
- unsigned int rxeol_intr;
-};
-
-#if CHAN_DEBUG
-#define ATH_CHAN_MAX (26+26+26+200+200)
-#else
-#define ATH_CHAN_MAX (14+14+14+252+20)
-#endif
-
struct ath5k_vif {
bool assoc; /* are we associated or not */
enum nl80211_iftype opmode;
@@ -167,104 +68,6 @@ struct ath5k_vif {
u8 lladdr[ETH_ALEN];
};
-/* Software Carrier, keeps track of the driver state
- * associated with an instance of a device */
-struct ath5k_softc {
- struct pci_dev *pdev;
- struct device *dev; /* for dma mapping */
- int irq;
- u16 devid;
- void __iomem *iobase; /* address of the device */
- struct mutex lock; /* dev-level lock */
- struct ieee80211_hw *hw; /* IEEE 802.11 common */
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
- struct ieee80211_channel channels[ATH_CHAN_MAX];
- struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
- s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
- enum nl80211_iftype opmode;
- struct ath5k_hw *ah; /* Atheros HW */
-
-#ifdef CONFIG_ATH5K_DEBUG
- struct ath5k_dbg_info debug; /* debug info */
-#endif /* CONFIG_ATH5K_DEBUG */
-
- struct ath5k_buf *bufptr; /* allocated buffer ptr */
- struct ath5k_desc *desc; /* TX/RX descriptors */
- dma_addr_t desc_daddr; /* DMA (physical) address */
- size_t desc_len; /* size of TX/RX descriptors */
-
- DECLARE_BITMAP(status, 6);
-#define ATH_STAT_INVALID 0 /* disable hardware accesses */
-#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
-#define ATH_STAT_PROMISC 2
-#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
-#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
-#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
-
- unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
- struct ieee80211_channel *curchan; /* current h/w channel */
-
- u16 nvifs;
-
- enum ath5k_int imask; /* interrupt mask copy */
-
- spinlock_t irqlock;
- bool rx_pending; /* rx tasklet pending */
- bool tx_pending; /* tx tasklet pending */
-
- u8 lladdr[ETH_ALEN];
- u8 bssidmask[ETH_ALEN];
-
- unsigned int led_pin, /* GPIO pin for driving LED */
- led_on; /* pin setting for LED on */
-
- struct work_struct reset_work; /* deferred chip reset */
-
- unsigned int rxbufsize; /* rx size based on mtu */
- struct list_head rxbuf; /* receive buffer */
- spinlock_t rxbuflock;
- u32 *rxlink; /* link ptr in last RX desc */
- struct tasklet_struct rxtq; /* rx intr tasklet */
- struct ath5k_led rx_led; /* rx led */
-
- struct list_head txbuf; /* transmit buffer */
- spinlock_t txbuflock;
- unsigned int txbuf_len; /* buf count in txbuf list */
- struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */
- struct tasklet_struct txtq; /* tx intr tasklet */
- struct ath5k_led tx_led; /* tx led */
-
- struct ath5k_rfkill rf_kill;
-
- struct tasklet_struct calib; /* calibration tasklet */
-
- spinlock_t block; /* protects beacon */
- struct tasklet_struct beacontq; /* beacon intr tasklet */
- struct list_head bcbuf; /* beacon buffer */
- struct ieee80211_vif *bslot[ATH_BCBUF];
- u16 num_ap_vifs;
- u16 num_adhoc_vifs;
- unsigned int bhalq, /* SW q for outgoing beacons */
- bmisscount, /* missed beacon transmits */
- bintval, /* beacon interval in TU */
- bsent;
- unsigned int nexttbtt; /* next beacon time in TU */
- struct ath5k_txq *cabq; /* content after beacon */
-
- int power_level; /* Requested tx power in dbm */
- bool assoc; /* associate state */
- bool enable_beacon; /* true if beacons are on */
-
- struct ath5k_statistics stats;
-
- struct ath5k_ani_state ani_state;
- struct tasklet_struct ani_tasklet; /* ANI calibration */
-
- struct delayed_work tx_complete_work;
-
- struct survey_info survey; /* collected survey info */
-};
-
struct ath5k_vif_iter_data {
const u8 *hw_macaddr;
u8 mask[ETH_ALEN];
@@ -278,9 +81,10 @@ struct ath5k_vif_iter_data {
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
-#define ath5k_hw_hasbssidmask(_ah) \
- (ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0)
-#define ath5k_hw_hasveol(_ah) \
- (ath5k_hw_get_capability(_ah, AR5K_CAP_VEOL, 0, NULL) == 0)
+/* Check whether BSSID mask is supported */
+#define ath5k_hw_hasbssidmask(_ah) (ah->ah_version == AR5K_AR5212)
+
+/* Check whether virtual EOL is supported */
+#define ath5k_hw_hasveol(_ah) (ah->ah_version != AR5K_AR5210)
#endif
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 7dd88e1c3ff..eefe670e28a 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -52,8 +52,8 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
__set_bit(AR5K_MODE_11A, caps->cap_mode);
} else {
/*
- * XXX The tranceiver supports frequencies from 4920 to 6100GHz
- * XXX and from 2312 to 2732GHz. There are problems with the
+ * XXX The transceiver supports frequencies from 4920 to 6100MHz
+ * XXX and from 2312 to 2732MHz. There are problems with the
* XXX current ieee80211 implementation because the IEEE
* XXX channel mapping does not support negative channel
* XXX numbers (2312MHz is channel -19). Of course, this
@@ -112,51 +112,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
return 0;
}
-/* Main function used by the driver part to check caps */
-int ath5k_hw_get_capability(struct ath5k_hw *ah,
- enum ath5k_capability_type cap_type,
- u32 capability, u32 *result)
-{
- switch (cap_type) {
- case AR5K_CAP_NUM_TXQUEUES:
- if (result) {
- if (ah->ah_version == AR5K_AR5210)
- *result = AR5K_NUM_TX_QUEUES_NOQCU;
- else
- *result = AR5K_NUM_TX_QUEUES;
- goto yes;
- }
- case AR5K_CAP_VEOL:
- goto yes;
- case AR5K_CAP_COMPRESSION:
- if (ah->ah_version == AR5K_AR5212)
- goto yes;
- else
- goto no;
- case AR5K_CAP_BURST:
- goto yes;
- case AR5K_CAP_TPC:
- goto yes;
- case AR5K_CAP_BSSIDMASK:
- if (ah->ah_version == AR5K_AR5212)
- goto yes;
- else
- goto no;
- case AR5K_CAP_XR:
- if (ah->ah_version == AR5K_AR5212)
- goto yes;
- else
- goto no;
- default:
- goto no;
- }
-
-no:
- return -EINVAL;
-yes:
- return 0;
-}
-
/*
* TODO: Following functions should be part of a new function
* set_capability
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 0bf7313b8a1..ccca724de17 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -157,10 +157,10 @@ static void *reg_next(struct seq_file *seq, void *p, loff_t *pos)
static int reg_show(struct seq_file *seq, void *p)
{
- struct ath5k_softc *sc = seq->private;
+ struct ath5k_hw *ah = seq->private;
struct reg *r = p;
seq_printf(seq, "%-25s0x%08x\n", r->name,
- ath5k_hw_reg_read(sc->ah, r->addr));
+ ath5k_hw_reg_read(ah, r->addr));
return 0;
}
@@ -197,43 +197,42 @@ static const struct file_operations fops_registers = {
static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = file->private_data;
char buf[500];
unsigned int len = 0;
unsigned int v;
u64 tsf;
- v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
- len += snprintf(buf+len, sizeof(buf)-len,
+ v = ath5k_hw_reg_read(ah, AR5K_BEACON);
+ len += snprintf(buf + len, sizeof(buf) - len,
"%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
"AR5K_BEACON", v, v & AR5K_BEACON_PERIOD,
(v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S);
- len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\n",
- "AR5K_LAST_TSTP", ath5k_hw_reg_read(sc->ah, AR5K_LAST_TSTP));
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n",
+ "AR5K_LAST_TSTP", ath5k_hw_reg_read(ah, AR5K_LAST_TSTP));
- len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\n\n",
- "AR5K_BEACON_CNT", ath5k_hw_reg_read(sc->ah, AR5K_BEACON_CNT));
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n",
+ "AR5K_BEACON_CNT", ath5k_hw_reg_read(ah, AR5K_BEACON_CNT));
- v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER0);
- len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\tTU: %08x\n",
+ v = ath5k_hw_reg_read(ah, AR5K_TIMER0);
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER0 (TBTT)", v, v);
- v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER1);
- len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\tTU: %08x\n",
+ v = ath5k_hw_reg_read(ah, AR5K_TIMER1);
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER1 (DMA)", v, v >> 3);
- v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER2);
- len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\tTU: %08x\n",
+ v = ath5k_hw_reg_read(ah, AR5K_TIMER2);
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER2 (SWBA)", v, v >> 3);
- v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER3);
- len += snprintf(buf+len, sizeof(buf)-len, "%-24s0x%08x\tTU: %08x\n",
+ v = ath5k_hw_reg_read(ah, AR5K_TIMER3);
+ len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER3 (ATIM)", v, v);
- tsf = ath5k_hw_get_tsf64(sc->ah);
- len += snprintf(buf+len, sizeof(buf)-len,
+ tsf = ath5k_hw_get_tsf64(ah);
+ len += snprintf(buf + len, sizeof(buf) - len,
"TSF\t\t0x%016llx\tTU: %08x\n",
(unsigned long long)tsf, TSF_TO_TU(tsf));
@@ -247,8 +246,7 @@ static ssize_t write_file_beacon(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = file->private_data;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
@@ -279,9 +277,9 @@ static ssize_t write_file_reset(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
- ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
- ieee80211_queue_work(sc->hw, &sc->reset_work);
+ struct ath5k_hw *ah = file->private_data;
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
return count;
}
@@ -318,23 +316,23 @@ static const struct {
static ssize_t read_file_debug(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
+ struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
unsigned int i;
- len += snprintf(buf+len, sizeof(buf)-len,
- "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) {
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
- sc->debug.level & dbg_info[i].level ? '+' : ' ',
+ ah->debug.level & dbg_info[i].level ? '+' : ' ',
dbg_info[i].level, dbg_info[i].desc);
}
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
- sc->debug.level == dbg_info[i].level ? '+' : ' ',
+ ah->debug.level == dbg_info[i].level ? '+' : ' ',
dbg_info[i].level, dbg_info[i].desc);
if (len > sizeof(buf))
@@ -347,7 +345,7 @@ static ssize_t write_file_debug(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
+ struct ath5k_hw *ah = file->private_data;
unsigned int i;
char buf[20];
@@ -357,7 +355,7 @@ static ssize_t write_file_debug(struct file *file,
for (i = 0; i < ARRAY_SIZE(dbg_info); i++) {
if (strncmp(buf, dbg_info[i].name,
strlen(dbg_info[i].name)) == 0) {
- sc->debug.level ^= dbg_info[i].level; /* toggle bit */
+ ah->debug.level ^= dbg_info[i].level; /* toggle bit */
break;
}
}
@@ -378,66 +376,66 @@ static const struct file_operations fops_debug = {
static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
+ struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
unsigned int i;
unsigned int v;
- len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
- sc->ah->ah_ant_mode);
- len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
- sc->ah->ah_def_ant);
- len += snprintf(buf+len, sizeof(buf)-len, "tx antenna\t%d\n",
- sc->ah->ah_tx_ant);
+ len += snprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n",
+ ah->ah_ant_mode);
+ len += snprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n",
+ ah->ah_def_ant);
+ len += snprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n",
+ ah->ah_tx_ant);
- len += snprintf(buf+len, sizeof(buf)-len, "\nANTENNA\t\tRX\tTX\n");
- for (i = 1; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n");
+ for (i = 1; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
+ len += snprintf(buf + len, sizeof(buf) - len,
"[antenna %d]\t%d\t%d\n",
- i, sc->stats.antenna_rx[i], sc->stats.antenna_tx[i]);
+ i, ah->stats.antenna_rx[i], ah->stats.antenna_tx[i]);
}
- len += snprintf(buf+len, sizeof(buf)-len, "[invalid]\t%d\t%d\n",
- sc->stats.antenna_rx[0], sc->stats.antenna_tx[0]);
+ len += snprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n",
+ ah->stats.antenna_rx[0], ah->stats.antenna_tx[0]);
- v = ath5k_hw_reg_read(sc->ah, AR5K_DEFAULT_ANTENNA);
- len += snprintf(buf+len, sizeof(buf)-len,
+ v = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
+ len += snprintf(buf + len, sizeof(buf) - len,
"\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
- v = ath5k_hw_reg_read(sc->ah, AR5K_STA_ID1);
- len += snprintf(buf+len, sizeof(buf)-len,
+ v = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
+ len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
(v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
- v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_AGCCTL);
- len += snprintf(buf+len, sizeof(buf)-len,
+ v = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL);
+ len += snprintf(buf + len, sizeof(buf) - len,
"\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
(v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
- v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_RESTART);
- len += snprintf(buf+len, sizeof(buf)-len,
+ v = ath5k_hw_reg_read(ah, AR5K_PHY_RESTART);
+ len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
(v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
- v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_FAST_ANT_DIV);
- len += snprintf(buf+len, sizeof(buf)-len,
+ v = ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ANT_DIV);
+ len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
(v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
- v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
- len += snprintf(buf+len, sizeof(buf)-len,
+ v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
+ len += snprintf(buf + len, sizeof(buf) - len,
"\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v);
- v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
- len += snprintf(buf+len, sizeof(buf)-len,
+ v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
+ len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v);
if (len > sizeof(buf))
@@ -450,7 +448,7 @@ static ssize_t write_file_antenna(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
+ struct ath5k_hw *ah = file->private_data;
unsigned int i;
char buf[20];
@@ -458,18 +456,18 @@ static ssize_t write_file_antenna(struct file *file,
return -EFAULT;
if (strncmp(buf, "diversity", 9) == 0) {
- ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
printk(KERN_INFO "ath5k debug: enable diversity\n");
} else if (strncmp(buf, "fixed-a", 7) == 0) {
- ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
} else if (strncmp(buf, "fixed-b", 7) == 0) {
- ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
printk(KERN_INFO "ath5k debug: fixed antenna B\n");
} else if (strncmp(buf, "clear", 5) == 0) {
- for (i = 0; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
- sc->stats.antenna_rx[i] = 0;
- sc->stats.antenna_tx[i] = 0;
+ for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
+ ah->stats.antenna_rx[i] = 0;
+ ah->stats.antenna_tx[i] = 0;
}
printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
}
@@ -489,42 +487,42 @@ static const struct file_operations fops_antenna = {
static ssize_t read_file_misc(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
+ struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
- u32 filt = ath5k_hw_get_rx_filter(sc->ah);
+ u32 filt = ath5k_hw_get_rx_filter(ah);
- len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
- sc->bssidmask);
- len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
+ len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
+ ah->bssidmask);
+ len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
filt);
if (filt & AR5K_RX_FILTER_UCAST)
- len += snprintf(buf+len, sizeof(buf)-len, " UCAST");
+ len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
if (filt & AR5K_RX_FILTER_MCAST)
- len += snprintf(buf+len, sizeof(buf)-len, " MCAST");
+ len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
if (filt & AR5K_RX_FILTER_BCAST)
- len += snprintf(buf+len, sizeof(buf)-len, " BCAST");
+ len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
if (filt & AR5K_RX_FILTER_CONTROL)
- len += snprintf(buf+len, sizeof(buf)-len, " CONTROL");
+ len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
if (filt & AR5K_RX_FILTER_BEACON)
- len += snprintf(buf+len, sizeof(buf)-len, " BEACON");
+ len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
if (filt & AR5K_RX_FILTER_PROM)
- len += snprintf(buf+len, sizeof(buf)-len, " PROM");
+ len += snprintf(buf + len, sizeof(buf) - len, " PROM");
if (filt & AR5K_RX_FILTER_XRPOLL)
- len += snprintf(buf+len, sizeof(buf)-len, " XRPOLL");
+ len += snprintf(buf + len, sizeof(buf) - len, " XRPOLL");
if (filt & AR5K_RX_FILTER_PROBEREQ)
- len += snprintf(buf+len, sizeof(buf)-len, " PROBEREQ");
+ len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
if (filt & AR5K_RX_FILTER_PHYERR_5212)
- len += snprintf(buf+len, sizeof(buf)-len, " PHYERR-5212");
+ len += snprintf(buf + len, sizeof(buf) - len, " PHYERR-5212");
if (filt & AR5K_RX_FILTER_RADARERR_5212)
- len += snprintf(buf+len, sizeof(buf)-len, " RADARERR-5212");
+ len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5212");
if (filt & AR5K_RX_FILTER_PHYERR_5211)
- snprintf(buf+len, sizeof(buf)-len, " PHYERR-5211");
+ snprintf(buf + len, sizeof(buf) - len, " PHYERR-5211");
if (filt & AR5K_RX_FILTER_RADARERR_5211)
- len += snprintf(buf+len, sizeof(buf)-len, " RADARERR-5211");
+ len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5211");
- len += snprintf(buf+len, sizeof(buf)-len, "\nopmode: %s (%d)\n",
- ath_opmode_to_string(sc->opmode), sc->opmode);
+ len += snprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n",
+ ath_opmode_to_string(ah->opmode), ah->opmode);
if (len > sizeof(buf))
len = sizeof(buf);
@@ -544,71 +542,71 @@ static const struct file_operations fops_misc = {
static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
- struct ath5k_statistics *st = &sc->stats;
+ struct ath5k_hw *ah = file->private_data;
+ struct ath5k_statistics *st = &ah->stats;
char buf[700];
unsigned int len = 0;
int i;
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"RX\n---------------------\n");
- len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
+ len += snprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
st->rxerr_crc,
st->rx_all_count > 0 ?
- st->rxerr_crc*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%u\t(%u%%)\n",
+ st->rxerr_crc * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "PHY\t%u\t(%u%%)\n",
st->rxerr_phy,
st->rx_all_count > 0 ?
- st->rxerr_phy*100/st->rx_all_count : 0);
+ st->rxerr_phy * 100 / st->rx_all_count : 0);
for (i = 0; i < 32; i++) {
if (st->rxerr_phy_code[i])
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
" phy_err[%u]\t%u\n",
i, st->rxerr_phy_code[i]);
}
- len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%u\t(%u%%)\n",
+ len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
st->rxerr_fifo,
st->rx_all_count > 0 ?
- st->rxerr_fifo*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%u\t(%u%%)\n",
+ st->rxerr_fifo * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "decrypt\t%u\t(%u%%)\n",
st->rxerr_decrypt,
st->rx_all_count > 0 ?
- st->rxerr_decrypt*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%u\t(%u%%)\n",
+ st->rxerr_decrypt * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "MIC\t%u\t(%u%%)\n",
st->rxerr_mic,
st->rx_all_count > 0 ?
- st->rxerr_mic*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "process\t%u\t(%u%%)\n",
+ st->rxerr_mic * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "process\t%u\t(%u%%)\n",
st->rxerr_proc,
st->rx_all_count > 0 ?
- st->rxerr_proc*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%u\t(%u%%)\n",
+ st->rxerr_proc * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "jumbo\t%u\t(%u%%)\n",
st->rxerr_jumbo,
st->rx_all_count > 0 ?
- st->rxerr_jumbo*100/st->rx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%u]\n",
+ st->rxerr_jumbo * 100 / st->rx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "[RX all\t%u]\n",
st->rx_all_count);
- len += snprintf(buf+len, sizeof(buf)-len, "RX-all-bytes\t%u\n",
+ len += snprintf(buf + len, sizeof(buf) - len, "RX-all-bytes\t%u\n",
st->rx_bytes_count);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"\nTX\n---------------------\n");
- len += snprintf(buf+len, sizeof(buf)-len, "retry\t%u\t(%u%%)\n",
+ len += snprintf(buf + len, sizeof(buf) - len, "retry\t%u\t(%u%%)\n",
st->txerr_retry,
st->tx_all_count > 0 ?
- st->txerr_retry*100/st->tx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%u\t(%u%%)\n",
+ st->txerr_retry * 100 / st->tx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
st->txerr_fifo,
st->tx_all_count > 0 ?
- st->txerr_fifo*100/st->tx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "filter\t%u\t(%u%%)\n",
+ st->txerr_fifo * 100 / st->tx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "filter\t%u\t(%u%%)\n",
st->txerr_filt,
st->tx_all_count > 0 ?
- st->txerr_filt*100/st->tx_all_count : 0);
- len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%u]\n",
+ st->txerr_filt * 100 / st->tx_all_count : 0);
+ len += snprintf(buf + len, sizeof(buf) - len, "[TX all\t%u]\n",
st->tx_all_count);
- len += snprintf(buf+len, sizeof(buf)-len, "TX-all-bytes\t%u\n",
+ len += snprintf(buf + len, sizeof(buf) - len, "TX-all-bytes\t%u\n",
st->tx_bytes_count);
if (len > sizeof(buf))
@@ -621,8 +619,8 @@ static ssize_t write_file_frameerrors(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
- struct ath5k_statistics *st = &sc->stats;
+ struct ath5k_hw *ah = file->private_data;
+ struct ath5k_statistics *st = &ah->stats;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
@@ -660,100 +658,104 @@ static const struct file_operations fops_frameerrors = {
static ssize_t read_file_ani(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
- struct ath5k_statistics *st = &sc->stats;
- struct ath5k_ani_state *as = &sc->ani_state;
+ struct ath5k_hw *ah = file->private_data;
+ struct ath5k_statistics *st = &ah->stats;
+ struct ath5k_ani_state *as = &ah->ani_state;
char buf[700];
unsigned int len = 0;
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"HW has PHY error counters:\t%s\n",
- sc->ah->ah_capabilities.cap_has_phyerr_counters ?
+ ah->ah_capabilities.cap_has_phyerr_counters ?
"yes" : "no");
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"HW max spur immunity level:\t%d\n",
as->max_spur_level);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"\nANI state\n--------------------------------------------\n");
- len += snprintf(buf+len, sizeof(buf)-len, "operating mode:\t\t\t");
+ len += snprintf(buf + len, sizeof(buf) - len, "operating mode:\t\t\t");
switch (as->ani_mode) {
case ATH5K_ANI_MODE_OFF:
- len += snprintf(buf+len, sizeof(buf)-len, "OFF\n");
+ len += snprintf(buf + len, sizeof(buf) - len, "OFF\n");
break;
case ATH5K_ANI_MODE_MANUAL_LOW:
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"MANUAL LOW\n");
break;
case ATH5K_ANI_MODE_MANUAL_HIGH:
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"MANUAL HIGH\n");
break;
case ATH5K_ANI_MODE_AUTO:
- len += snprintf(buf+len, sizeof(buf)-len, "AUTO\n");
+ len += snprintf(buf + len, sizeof(buf) - len, "AUTO\n");
break;
default:
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"??? (not good)\n");
break;
}
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"noise immunity level:\t\t%d\n",
as->noise_imm_level);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"spur immunity level:\t\t%d\n",
as->spur_level);
- len += snprintf(buf+len, sizeof(buf)-len, "firstep level:\t\t\t%d\n",
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "firstep level:\t\t\t%d\n",
as->firstep_level);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"OFDM weak signal detection:\t%s\n",
as->ofdm_weak_sig ? "on" : "off");
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"CCK weak signal detection:\t%s\n",
as->cck_weak_sig ? "on" : "off");
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"\nMIB INTERRUPTS:\t\t%u\n",
st->mib_intr);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"beacon RSSI average:\t%d\n",
- (int)ewma_read(&sc->ah->ah_beacon_rssi_avg));
+ (int)ewma_read(&ah->ah_beacon_rssi_avg));
#define CC_PRINT(_struct, _field) \
_struct._field, \
_struct.cycles > 0 ? \
- _struct._field*100/_struct.cycles : 0
+ _struct._field * 100 / _struct.cycles : 0
- len += snprintf(buf+len, sizeof(buf)-len, "profcnt tx\t\t%u\t(%d%%)\n",
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "profcnt tx\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, tx_frame));
- len += snprintf(buf+len, sizeof(buf)-len, "profcnt rx\t\t%u\t(%d%%)\n",
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "profcnt rx\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, rx_frame));
- len += snprintf(buf+len, sizeof(buf)-len, "profcnt busy\t\t%u\t(%d%%)\n",
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "profcnt busy\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, rx_busy));
#undef CC_PRINT
- len += snprintf(buf+len, sizeof(buf)-len, "profcnt cycles\t\t%u\n",
+ len += snprintf(buf + len, sizeof(buf) - len, "profcnt cycles\t\t%u\n",
as->last_cc.cycles);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"listen time\t\t%d\tlast: %d\n",
as->listen_time, as->last_listen);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
as->ofdm_errors, as->last_ofdm_errors,
as->sum_ofdm_errors);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"CCK errors\t\t%u\tlast: %u\tsum: %u\n",
as->cck_errors, as->last_cck_errors,
as->sum_cck_errors);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
- ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1),
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1),
ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
- ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1)));
- len += snprintf(buf+len, sizeof(buf)-len,
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)));
+ len += snprintf(buf + len, sizeof(buf) - len,
"AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
- ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2),
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2),
ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
- ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2)));
+ ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2)));
if (len > sizeof(buf))
len = sizeof(buf);
@@ -765,42 +767,42 @@ static ssize_t write_file_ani(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
+ struct ath5k_hw *ah = file->private_data;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
return -EFAULT;
if (strncmp(buf, "sens-low", 8) == 0) {
- ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_HIGH);
+ ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH);
} else if (strncmp(buf, "sens-high", 9) == 0) {
- ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_LOW);
+ ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_LOW);
} else if (strncmp(buf, "ani-off", 7) == 0) {
- ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_OFF);
+ ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
} else if (strncmp(buf, "ani-on", 6) == 0) {
- ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_AUTO);
+ ath5k_ani_init(ah, ATH5K_ANI_MODE_AUTO);
} else if (strncmp(buf, "noise-low", 9) == 0) {
- ath5k_ani_set_noise_immunity_level(sc->ah, 0);
+ ath5k_ani_set_noise_immunity_level(ah, 0);
} else if (strncmp(buf, "noise-high", 10) == 0) {
- ath5k_ani_set_noise_immunity_level(sc->ah,
+ ath5k_ani_set_noise_immunity_level(ah,
ATH5K_ANI_MAX_NOISE_IMM_LVL);
} else if (strncmp(buf, "spur-low", 8) == 0) {
- ath5k_ani_set_spur_immunity_level(sc->ah, 0);
+ ath5k_ani_set_spur_immunity_level(ah, 0);
} else if (strncmp(buf, "spur-high", 9) == 0) {
- ath5k_ani_set_spur_immunity_level(sc->ah,
- sc->ani_state.max_spur_level);
+ ath5k_ani_set_spur_immunity_level(ah,
+ ah->ani_state.max_spur_level);
} else if (strncmp(buf, "fir-low", 7) == 0) {
- ath5k_ani_set_firstep_level(sc->ah, 0);
+ ath5k_ani_set_firstep_level(ah, 0);
} else if (strncmp(buf, "fir-high", 8) == 0) {
- ath5k_ani_set_firstep_level(sc->ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
+ ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
} else if (strncmp(buf, "ofdm-off", 8) == 0) {
- ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, false);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
} else if (strncmp(buf, "ofdm-on", 7) == 0) {
- ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, true);
+ ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
} else if (strncmp(buf, "cck-off", 7) == 0) {
- ath5k_ani_set_cck_weak_signal_detection(sc->ah, false);
+ ath5k_ani_set_cck_weak_signal_detection(ah, false);
} else if (strncmp(buf, "cck-on", 6) == 0) {
- ath5k_ani_set_cck_weak_signal_detection(sc->ah, true);
+ ath5k_ani_set_cck_weak_signal_detection(ah, true);
}
return count;
}
@@ -819,7 +821,7 @@ static const struct file_operations fops_ani = {
static ssize_t read_file_queue(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
+ struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
@@ -827,13 +829,13 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
struct ath5k_buf *bf, *bf0;
int i, n;
- len += snprintf(buf+len, sizeof(buf)-len,
- "available txbuffers: %d\n", sc->txbuf_len);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "available txbuffers: %d\n", ah->txbuf_len);
- for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
- txq = &sc->txqs[i];
+ for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
+ txq = &ah->txqs[i];
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
"%02d: %ssetup\n", i, txq->setup ? "" : "not ");
if (!txq->setup)
@@ -845,9 +847,9 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
n++;
spin_unlock_bh(&txq->lock);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
" len: %d bufs: %d\n", txq->txq_len, n);
- len += snprintf(buf+len, sizeof(buf)-len,
+ len += snprintf(buf + len, sizeof(buf) - len,
" stuck: %d\n", txq->txq_stuck);
}
@@ -861,16 +863,16 @@ static ssize_t write_file_queue(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct ath5k_softc *sc = file->private_data;
+ struct ath5k_hw *ah = file->private_data;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
return -EFAULT;
if (strncmp(buf, "start", 5) == 0)
- ieee80211_wake_queues(sc->hw);
+ ieee80211_wake_queues(ah->hw);
else if (strncmp(buf, "stop", 4) == 0)
- ieee80211_stop_queues(sc->hw);
+ ieee80211_stop_queues(ah->hw);
return count;
}
@@ -886,54 +888,57 @@ static const struct file_operations fops_queue = {
void
-ath5k_debug_init_device(struct ath5k_softc *sc)
+ath5k_debug_init_device(struct ath5k_hw *ah)
{
struct dentry *phydir;
- sc->debug.level = ath5k_debug;
+ ah->debug.level = ath5k_debug;
- phydir = debugfs_create_dir("ath5k", sc->hw->wiphy->debugfsdir);
+ phydir = debugfs_create_dir("ath5k", ah->hw->wiphy->debugfsdir);
if (!phydir)
- return;
+ return;
- debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, sc,
+ debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, ah,
&fops_debug);
- debugfs_create_file("registers", S_IRUSR, phydir, sc, &fops_registers);
+ debugfs_create_file("registers", S_IRUSR, phydir, ah, &fops_registers);
- debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, sc,
+ debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, ah,
&fops_beacon);
- debugfs_create_file("reset", S_IWUSR, phydir, sc, &fops_reset);
+ debugfs_create_file("reset", S_IWUSR, phydir, ah, &fops_reset);
- debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, sc,
+ debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, ah,
&fops_antenna);
- debugfs_create_file("misc", S_IRUSR, phydir, sc, &fops_misc);
+ debugfs_create_file("misc", S_IRUSR, phydir, ah, &fops_misc);
- debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, sc,
+ debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, ah,
&fops_frameerrors);
- debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, sc, &fops_ani);
+ debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, ah, &fops_ani);
- debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, sc,
+ debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, ah,
&fops_queue);
+
+ debugfs_create_bool("32khz_clock", S_IWUSR | S_IRUSR, phydir,
+ &ah->ah_use_32khz_clock);
}
/* functions used in other places */
void
-ath5k_debug_dump_bands(struct ath5k_softc *sc)
+ath5k_debug_dump_bands(struct ath5k_hw *ah)
{
unsigned int b, i;
- if (likely(!(sc->debug.level & ATH5K_DEBUG_DUMPBANDS)))
+ if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS)))
return;
- BUG_ON(!sc->sbands);
+ BUG_ON(!ah->sbands);
for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
- struct ieee80211_supported_band *band = &sc->sbands[b];
+ struct ieee80211_supported_band *band = &ah->sbands[b];
char bname[6];
switch (band->band) {
case IEEE80211_BAND_2GHZ:
@@ -983,41 +988,41 @@ ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done,
}
void
-ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
+ath5k_debug_printrxbuffs(struct ath5k_hw *ah)
{
struct ath5k_desc *ds;
struct ath5k_buf *bf;
struct ath5k_rx_status rs = {};
int status;
- if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
+ if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
return;
printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
- ath5k_hw_get_rxdp(ah), sc->rxlink);
+ ath5k_hw_get_rxdp(ah), ah->rxlink);
- spin_lock_bh(&sc->rxbuflock);
- list_for_each_entry(bf, &sc->rxbuf, list) {
+ spin_lock_bh(&ah->rxbuflock);
+ list_for_each_entry(bf, &ah->rxbuf, list) {
ds = bf->desc;
status = ah->ah_proc_rx_desc(ah, ds, &rs);
if (!status)
ath5k_debug_printrxbuf(bf, status == 0, &rs);
}
- spin_unlock_bh(&sc->rxbuflock);
+ spin_unlock_bh(&ah->rxbuflock);
}
void
-ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
+ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf)
{
struct ath5k_desc *ds = bf->desc;
struct ath5k_hw_5212_tx_desc *td = &ds->ud.ds_tx5212;
struct ath5k_tx_status ts = {};
int done;
- if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
+ if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
return;
- done = sc->ah->ah_proc_tx_desc(sc->ah, bf->desc, &ts);
+ done = ah->ah_proc_tx_desc(ah, bf->desc, &ts);
printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x "
"%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link,
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 193dd2d4ea3..7f37df3125f 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -61,7 +61,6 @@
#ifndef _ATH5K_DEBUG_H
#define _ATH5K_DEBUG_H
-struct ath5k_softc;
struct ath5k_hw;
struct sk_buff;
struct ath5k_buf;
@@ -127,39 +126,39 @@ enum ath5k_debug_level {
} while (0)
void
-ath5k_debug_init_device(struct ath5k_softc *sc);
+ath5k_debug_init_device(struct ath5k_hw *ah);
void
-ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah);
+ath5k_debug_printrxbuffs(struct ath5k_hw *ah);
void
-ath5k_debug_dump_bands(struct ath5k_softc *sc);
+ath5k_debug_dump_bands(struct ath5k_hw *ah);
void
-ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
+ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf);
#else /* no debugging */
#include <linux/compiler.h>
static inline void __attribute__ ((format (printf, 3, 4)))
-ATH5K_DBG(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) {}
+ATH5K_DBG(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...) {}
static inline void __attribute__ ((format (printf, 3, 4)))
-ATH5K_DBG_UNLIMIT(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...)
+ATH5K_DBG_UNLIMIT(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...)
{}
static inline void
-ath5k_debug_init_device(struct ath5k_softc *sc) {}
+ath5k_debug_init_device(struct ath5k_hw *ah) {}
static inline void
-ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) {}
+ath5k_debug_printrxbuffs(struct ath5k_hw *ah) {}
static inline void
-ath5k_debug_dump_bands(struct ath5k_softc *sc) {}
+ath5k_debug_dump_bands(struct ath5k_hw *ah) {}
static inline void
-ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {}
+ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf) {}
#endif /* ifdef CONFIG_ATH5K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 62172d58572..846535f59ef 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -55,12 +55,12 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
* noise on the channel, so it is important to avoid this.
*/
if (unlikely(tx_tries0 == 0)) {
- ATH5K_ERR(ah->ah_sc, "zero retries\n");
+ ATH5K_ERR(ah, "zero retries\n");
WARN_ON(1);
return -EINVAL;
}
if (unlikely(tx_rate0 == 0)) {
- ATH5K_ERR(ah->ah_sc, "zero rate\n");
+ ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
@@ -107,10 +107,13 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
case AR5K_PKT_TYPE_BEACON:
case AR5K_PKT_TYPE_PROBE_RESP:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
+ break;
case AR5K_PKT_TYPE_PIFS:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
+ break;
default:
frame_type = type;
+ break;
}
tx_ctl->tx_control_0 |=
@@ -200,12 +203,12 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
* noise on the channel, so it is important to avoid this.
*/
if (unlikely(tx_tries0 == 0)) {
- ATH5K_ERR(ah->ah_sc, "zero retries\n");
+ ATH5K_ERR(ah, "zero retries\n");
WARN_ON(1);
return -EINVAL;
}
if (unlikely(tx_rate0 == 0)) {
- ATH5K_ERR(ah->ah_sc, "zero rate\n");
+ ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
@@ -313,7 +316,7 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
(tx_rate2 == 0 && tx_tries2 != 0) ||
(tx_rate3 == 0 && tx_tries3 != 0))) {
- ATH5K_ERR(ah->ah_sc, "zero rate\n");
+ ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 2509d0bf037..cfd529b548f 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -58,11 +58,11 @@ struct ath5k_hw_rx_status {
#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 /* reception success */
#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 /* CRC error */
#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210 0x00000008 /* [5210] FIFO overrun */
-#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010 /* decyption CRC failure */
+#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010 /* decryption CRC failure */
#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0 /* PHY error */
#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S 5
#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100 /* key index valid */
-#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00 /* decyption key index */
+#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00 /* decryption key index */
#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S 9
#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000 /* 13 bit of TSF */
#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 15
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 21091c26a9a..0d5d4033f12 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -25,7 +25,7 @@
*
* Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
* handle queue setup for 5210 chipset (rest are handled on qcu.c).
- * Also we setup interrupt mask register (IMR) and read the various iterrupt
+ * Also we setup interrupt mask register (IMR) and read the various interrupt
* status registers (ISR).
*
* TODO: Handle SISR on 5211+ and introduce a function to return the queue
@@ -73,7 +73,7 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
udelay(100);
if (!i)
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"failed to stop RX DMA !\n");
return i ? 0 : -EBUSY;
@@ -100,7 +100,7 @@ u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
{
if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"tried to set RXDP while rx was active !\n");
return -EIO;
}
@@ -243,7 +243,7 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
udelay(100);
if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"queue %i didn't stop !\n", queue);
/* Check for pending frames */
@@ -258,7 +258,7 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
/* For 2413+ order PCU to drop packets using
* QUIET mechanism */
if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
- pending){
+ pending) {
/* Set periodicity and duration */
ath5k_hw_reg_write(ah,
AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
@@ -295,7 +295,7 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
if (pending)
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"quiet mechanism didn't work q:%i !\n",
queue);
}
@@ -309,7 +309,7 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
/* Clear register */
ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
if (pending) {
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"tx dma didn't stop (q:%i, frm:%i) !\n",
queue, pending);
return -EBUSY;
@@ -333,7 +333,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
int ret;
ret = ath5k_hw_stop_tx_dma(ah, queue);
if (ret) {
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
+ ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"beacon queue didn't stop !\n");
return -EIO;
}
@@ -726,7 +726,7 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
int_mask |= AR5K_IMR_RXDOPPLER;
/* Note: Per queue interrupt masks
- * are set via reset_tx_queue (qcu.c) */
+ * are set via ath5k_hw_reset_tx_queue() (qcu.c) */
ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
@@ -783,7 +783,7 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
* for all PCI-E cards to be safe).
*
* XXX: need to check 5210 for this
- * TODO: Check out tx triger level, it's always 64 on dumps but I
+ * TODO: Check out tx trigger level, it's always 64 on dumps but I
* guess we can tweak it and see how it goes ;-)
*/
if (ah->ah_version != AR5K_AR5210) {
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 1fef84f87c7..9068b916526 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -105,7 +105,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
* big still, waiting on a better value.
*/
if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
- ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: "
+ ATH5K_ERR(ah, "Invalid max custom EEPROM size: "
"%d (0x%04x) max expected: %d (0x%04x)\n",
eep_max, eep_max,
3 * AR5K_EEPROM_INFO_MAX,
@@ -119,7 +119,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
cksum ^= val;
}
if (cksum != AR5K_EEPROM_INFO_CKSUM) {
- ATH5K_ERR(ah->ah_sc, "Invalid EEPROM "
+ ATH5K_ERR(ah, "Invalid EEPROM "
"checksum: 0x%04x eep_max: 0x%04x (%s)\n",
cksum, eep_max,
eep_max == AR5K_EEPROM_INFO_MAX ?
@@ -223,14 +223,14 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
ah->ah_ant_ctl[mode][AR5K_ANT_CTL] =
(ee->ee_ant_control[mode][0] << 4);
ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_A] =
- ee->ee_ant_control[mode][1] |
- (ee->ee_ant_control[mode][2] << 6) |
+ ee->ee_ant_control[mode][1] |
+ (ee->ee_ant_control[mode][2] << 6) |
(ee->ee_ant_control[mode][3] << 12) |
(ee->ee_ant_control[mode][4] << 18) |
(ee->ee_ant_control[mode][5] << 24);
ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_B] =
- ee->ee_ant_control[mode][6] |
- (ee->ee_ant_control[mode][7] << 6) |
+ ee->ee_ant_control[mode][6] |
+ (ee->ee_ant_control[mode][7] << 6) |
(ee->ee_ant_control[mode][8] << 12) |
(ee->ee_ant_control[mode][9] << 18) |
(ee->ee_ant_control[mode][10] << 24);
@@ -255,7 +255,7 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
ee->ee_n_piers[mode] = 0;
AR5K_EEPROM_READ(o++, val);
ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
- switch(mode) {
+ switch (mode) {
case AR5K_EEPROM_MODE_11A:
ee->ee_ob[mode][3] = (val >> 5) & 0x7;
ee->ee_db[mode][3] = (val >> 2) & 0x7;
@@ -349,7 +349,7 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
/* Note: >= v5 have bg freq piers on another location
* so these freq piers are ignored for >= v5 (should be 0xff
* anyway) */
- switch(mode) {
+ switch (mode) {
case AR5K_EEPROM_MODE_11A:
if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_1)
break;
@@ -422,7 +422,7 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
if (ee->ee_version < AR5K_EEPROM_VERSION_5_0)
goto done;
- switch (mode){
+ switch (mode) {
case AR5K_EEPROM_MODE_11A:
ee->ee_switch_settling_turbo[mode] = (val >> 6) & 0x7f;
@@ -436,7 +436,7 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
ee->ee_adc_desired_size_turbo[mode] |= (val & 0x1) << 7;
ee->ee_pga_desired_size_turbo[mode] = (val >> 1) & 0xff;
- if (AR5K_EEPROM_EEMAP(ee->ee_misc0) >=2)
+ if (AR5K_EEPROM_EEMAP(ee->ee_misc0) >= 2)
ee->ee_pd_gain_overlap = (val >> 9) & 0xf;
break;
case AR5K_EEPROM_MODE_11G:
@@ -516,7 +516,7 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
u16 val;
ee->ee_n_piers[mode] = 0;
- while(i < max) {
+ while (i < max) {
AR5K_EEPROM_READ(o++, val);
freq1 = val & 0xff;
@@ -602,7 +602,7 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info *pcal;
- switch(mode) {
+ switch (mode) {
case AR5K_EEPROM_MODE_11B:
pcal = ee->ee_pwr_cal_b;
break;
@@ -634,7 +634,7 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
/* Used to match PCDAC steps with power values on RF5111 chips
* (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC
* steps that match with the power values we read from eeprom. On
- * older eeprom versions (< 3.2) these steps are equaly spaced at
+ * older eeprom versions (< 3.2) these steps are equally spaced at
* 10% of the pcdac curve -until the curve reaches its maximum-
* (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
* these 11 steps are spaced in a different way. This function returns
@@ -644,10 +644,12 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
static inline void
ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
{
- static const u16 intercepts3[] =
- { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 };
- static const u16 intercepts3_2[] =
- { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
+ static const u16 intercepts3[] = {
+ 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100
+ };
+ static const u16 intercepts3_2[] = {
+ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100
+ };
const u16 *ip;
int i;
@@ -691,14 +693,12 @@ ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
if (!chinfo[pier].pd_curves)
continue;
- for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
+ for (pdg = 0; pdg < AR5K_EEPROM_N_PD_CURVES; pdg++) {
struct ath5k_pdgain_info *pd =
&chinfo[pier].pd_curves[pdg];
- if (pd != NULL) {
- kfree(pd->pd_step);
- kfree(pd->pd_pwr);
- }
+ kfree(pd->pd_step);
+ kfree(pd->pd_pwr);
}
kfree(chinfo[pier].pd_curves);
@@ -764,7 +764,7 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
/* Fill raw dataset
* (convert power to 0.25dB units
- * for RF5112 combatibility) */
+ * for RF5112 compatibility) */
for (point = 0; point < pd->pd_points; point++) {
/* Absolute values */
@@ -798,7 +798,7 @@ ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
u16 val;
offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
- switch(mode) {
+ switch (mode) {
case AR5K_EEPROM_MODE_11A:
if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
return 0;
@@ -884,7 +884,7 @@ ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
* Read power calibration for RF5112 chips
*
* For RF5112 we have 4 XPD -eXternal Power Detector- curves
- * for each calibrated channel on 0, -6, -12 and -18dbm but we only
+ * for each calibrated channel on 0, -6, -12 and -18dBm but we only
* use the higher (3) and the lower (0) curves. Each curve has 0.5dB
* power steps on x axis and PCDAC steps on y axis and looks like a
* linear function. To recreate the curve and pass the power values
@@ -1165,7 +1165,7 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
{
u32 offset = AR5K_EEPROM_CAL_DATA_START(ee->ee_misc4);
- switch(mode) {
+ switch (mode) {
case AR5K_EEPROM_MODE_11G:
if (AR5K_EEPROM_HDR_11B(ee->ee_header))
offset += ath5k_pdgains_size_2413(ee,
@@ -1241,7 +1241,7 @@ ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode,
/* Fill raw dataset
* convert all pwr levels to
- * quarter dB for RF5112 combatibility */
+ * quarter dB for RF5112 compatibility */
pd->pd_step[0] = pcinfo->pddac_i[pdg];
pd->pd_pwr[0] = 4 * pcinfo->pwr_i[pdg];
@@ -1622,8 +1622,8 @@ ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
offset += AR5K_EEPROM_GROUPS_START(ee->ee_version);
rep = ee->ee_ctl_pwr;
- for(i = 0; i < ee->ee_ctls; i++) {
- switch(ee->ee_ctl[i] & AR5K_CTL_MODE_M) {
+ for (i = 0; i < ee->ee_ctls; i++) {
+ switch (ee->ee_ctl[i] & AR5K_CTL_MODE_M) {
case AR5K_CTL_11A:
case AR5K_CTL_TURBO:
ctl_mode = AR5K_EEPROM_MODE_11A;
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 6511c27d938..dc2bcfeadeb 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -50,7 +50,7 @@
#define AR5K_EEPROM_VERSION AR5K_EEPROM_INFO(1) /* EEPROM Version */
#define AR5K_EEPROM_VERSION_3_0 0x3000 /* No idea what's going on before this version */
-#define AR5K_EEPROM_VERSION_3_1 0x3001 /* ob/db values for 2Ghz (ar5211_rfregs) */
+#define AR5K_EEPROM_VERSION_3_1 0x3001 /* ob/db values for 2GHz (ar5211_rfregs) */
#define AR5K_EEPROM_VERSION_3_2 0x3002 /* different frequency representation (eeprom_bin2freq) */
#define AR5K_EEPROM_VERSION_3_3 0x3003 /* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */
#define AR5K_EEPROM_VERSION_3_4 0x3004 /* has ee_i_gain, ee_cck_ofdm_power_delta (eeprom_read_modes) */
@@ -75,11 +75,11 @@
#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
-#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz */
+#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2GHz */
#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for < 2W power consumption */
#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) /* Device type (1 Cardbus, 2 PCI, 3 MiniPCI, 4 AP) */
#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
-#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */
+#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5GHz */
/* Newer EEPROMs are using a different offset */
#define AR5K_EEPROM_OFF(_v, _v3_0, _v3_3) \
@@ -120,7 +120,7 @@
#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) /* disable fast frames */
#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) /* disable bursting */
#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) /* max number of QCUs. defaults to 10 */
-#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) /* enable heayy clipping */
+#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) /* enable heavy clipping */
#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) /* key cache size. defaults to 128 */
#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10)
@@ -223,7 +223,7 @@
#define AR5K_EEPROM_CCK_OFDM_DELTA 15
#define AR5K_EEPROM_N_IQ_CAL 2
/* 5GHz/2GHz */
-enum ath5k_eeprom_freq_bands{
+enum ath5k_eeprom_freq_bands {
AR5K_EEPROM_BAND_5GHZ = 0,
AR5K_EEPROM_BAND_2GHZ = 1,
AR5K_EEPROM_N_FREQ_BANDS,
@@ -270,7 +270,7 @@ enum ath5k_ctl_mode {
/* Per channel calibration data, used for power table setup */
struct ath5k_chan_pcal_info_rf5111 {
- /* Power levels in half dbm units
+ /* Power levels in half dBm units
* for one power curve. */
u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111];
/* PCDAC table steps
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index e49340d18df..5ab607f40e0 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -113,8 +113,8 @@ static const struct ath5k_ini ar5210_ini[] = {
{ AR5K_PHY(28), 0x0000000f },
{ AR5K_PHY(29), 0x00000080 },
{ AR5K_PHY(30), 0x00000004 },
- { AR5K_PHY(31), 0x00000018 }, /* 0x987c */
- { AR5K_PHY(64), 0x00000000 }, /* 0x9900 */
+ { AR5K_PHY(31), 0x00000018 }, /* 0x987c */
+ { AR5K_PHY(64), 0x00000000 }, /* 0x9900 */
{ AR5K_PHY(65), 0x00000000 },
{ AR5K_PHY(66), 0x00000000 },
{ AR5K_PHY(67), 0x00800000 },
@@ -549,7 +549,7 @@ static const struct ath5k_ini ar5212_ini_common_start[] = {
{ AR5K_DIAG_SW_5211, 0x00000000 },
{ AR5K_ADDAC_TEST, 0x00000000 },
{ AR5K_DEFAULT_ANTENNA, 0x00000000 },
- { AR5K_FRAME_CTL_QOSM, 0x000fc78f },
+ { AR5K_FRAME_CTL_QOSM, 0x000fc78f },
{ AR5K_XRMODE, 0x2a82301a },
{ AR5K_XRDELAY, 0x05dc01e0 },
{ AR5K_XRTIMEOUT, 0x1f402710 },
@@ -760,9 +760,9 @@ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
static const struct ath5k_ini rf5111_ini_common_end[] = {
{ AR5K_DCU_FP, 0x00000000 },
- { AR5K_PHY_AGC, 0x00000000 },
- { AR5K_PHY_ADC_CTL, 0x00022ffe },
- { 0x983c, 0x00020100 },
+ { AR5K_PHY_AGC, 0x00000000 },
+ { AR5K_PHY_ADC_CTL, 0x00022ffe },
+ { 0x983c, 0x00020100 },
{ AR5K_PHY_GAIN_OFFSET, 0x1284613c },
{ AR5K_PHY_PAPD_PROBE, 0x00004883 },
{ 0x9940, 0x00000004 },
@@ -1409,7 +1409,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
* Write initial register settings
*/
- /* For AR5212 and combatible */
+ /* For AR5212 and compatible */
if (ah->ah_version == AR5K_AR5212) {
/* First set of mode-specific settings */
@@ -1542,7 +1542,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
/* AR5K_MODE_11B */
if (mode > 2) {
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"unsupported channel mode: %d\n", mode);
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 576edf2965d..8c17a00f7da 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -43,16 +43,16 @@
#include "ath5k.h"
#include "base.h"
-#define ATH_SDEVICE(subv,subd) \
+#define ATH_SDEVICE(subv, subd) \
.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
.subvendor = (subv), .subdevice = (subd)
-#define ATH_LED(pin,polarity) .driver_data = (((pin) << 8) | (polarity))
+#define ATH_LED(pin, polarity) .driver_data = (((pin) << 8) | (polarity))
#define ATH_PIN(data) ((data) >> 8)
#define ATH_POLARITY(data) ((data) & 0xff)
/* Devices we match on for LED config info (typically laptops) */
-static const struct pci_device_id ath5k_led_devices[] = {
+static DEFINE_PCI_DEVICE_TABLE(ath5k_led_devices) = {
/* AR5211 */
{ PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5211), ATH_LED(0, 0) },
/* HP Compaq nc6xx, nc4000, nx6000 */
@@ -86,26 +86,26 @@ static const struct pci_device_id ath5k_led_devices[] = {
{ }
};
-void ath5k_led_enable(struct ath5k_softc *sc)
+void ath5k_led_enable(struct ath5k_hw *ah)
{
- if (test_bit(ATH_STAT_LEDSOFT, sc->status)) {
- ath5k_hw_set_gpio_output(sc->ah, sc->led_pin);
- ath5k_led_off(sc);
+ if (test_bit(ATH_STAT_LEDSOFT, ah->status)) {
+ ath5k_hw_set_gpio_output(ah, ah->led_pin);
+ ath5k_led_off(ah);
}
}
-static void ath5k_led_on(struct ath5k_softc *sc)
+static void ath5k_led_on(struct ath5k_hw *ah)
{
- if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
+ if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
return;
- ath5k_hw_set_gpio(sc->ah, sc->led_pin, sc->led_on);
+ ath5k_hw_set_gpio(ah, ah->led_pin, ah->led_on);
}
-void ath5k_led_off(struct ath5k_softc *sc)
+void ath5k_led_off(struct ath5k_hw *ah)
{
- if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
+ if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
return;
- ath5k_hw_set_gpio(sc->ah, sc->led_pin, !sc->led_on);
+ ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on);
}
static void
@@ -116,27 +116,27 @@ ath5k_led_brightness_set(struct led_classdev *led_dev,
led_dev);
if (brightness == LED_OFF)
- ath5k_led_off(led->sc);
+ ath5k_led_off(led->ah);
else
- ath5k_led_on(led->sc);
+ ath5k_led_on(led->ah);
}
static int
-ath5k_register_led(struct ath5k_softc *sc, struct ath5k_led *led,
+ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
const char *name, char *trigger)
{
int err;
- led->sc = sc;
+ led->ah = ah;
strncpy(led->name, name, sizeof(led->name));
led->led_dev.name = led->name;
led->led_dev.default_trigger = trigger;
led->led_dev.brightness_set = ath5k_led_brightness_set;
- err = led_classdev_register(sc->dev, &led->led_dev);
+ err = led_classdev_register(ah->dev, &led->led_dev);
if (err) {
- ATH5K_WARN(sc, "could not register LED %s\n", name);
- led->sc = NULL;
+ ATH5K_WARN(ah, "could not register LED %s\n", name);
+ led->ah = NULL;
}
return err;
}
@@ -144,30 +144,30 @@ ath5k_register_led(struct ath5k_softc *sc, struct ath5k_led *led,
static void
ath5k_unregister_led(struct ath5k_led *led)
{
- if (!led->sc)
+ if (!led->ah)
return;
led_classdev_unregister(&led->led_dev);
- ath5k_led_off(led->sc);
- led->sc = NULL;
+ ath5k_led_off(led->ah);
+ led->ah = NULL;
}
-void ath5k_unregister_leds(struct ath5k_softc *sc)
+void ath5k_unregister_leds(struct ath5k_hw *ah)
{
- ath5k_unregister_led(&sc->rx_led);
- ath5k_unregister_led(&sc->tx_led);
+ ath5k_unregister_led(&ah->rx_led);
+ ath5k_unregister_led(&ah->tx_led);
}
-int ath5k_init_leds(struct ath5k_softc *sc)
+int __devinit ath5k_init_leds(struct ath5k_hw *ah)
{
int ret = 0;
- struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_hw *hw = ah->hw;
#ifndef CONFIG_ATHEROS_AR231X
- struct pci_dev *pdev = sc->pdev;
+ struct pci_dev *pdev = ah->pdev;
#endif
char name[ATH5K_LED_MAX_NAME_LEN + 1];
const struct pci_device_id *match;
- if (!sc->pdev)
+ if (!ah->pdev)
return 0;
#ifdef CONFIG_ATHEROS_AR231X
@@ -176,24 +176,24 @@ int ath5k_init_leds(struct ath5k_softc *sc)
match = pci_match_id(&ath5k_led_devices[0], pdev);
#endif
if (match) {
- __set_bit(ATH_STAT_LEDSOFT, sc->status);
- sc->led_pin = ATH_PIN(match->driver_data);
- sc->led_on = ATH_POLARITY(match->driver_data);
+ __set_bit(ATH_STAT_LEDSOFT, ah->status);
+ ah->led_pin = ATH_PIN(match->driver_data);
+ ah->led_on = ATH_POLARITY(match->driver_data);
}
- if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
+ if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
goto out;
- ath5k_led_enable(sc);
+ ath5k_led_enable(ah);
snprintf(name, sizeof(name), "ath5k-%s::rx", wiphy_name(hw->wiphy));
- ret = ath5k_register_led(sc, &sc->rx_led, name,
+ ret = ath5k_register_led(ah, &ah->rx_led, name,
ieee80211_get_rx_led_name(hw));
if (ret)
goto out;
snprintf(name, sizeof(name), "ath5k-%s::tx", wiphy_name(hw->wiphy));
- ret = ath5k_register_led(sc, &sc->tx_led, name,
+ ret = ath5k_register_led(ah, &ah->tx_led, name,
ieee80211_get_tx_led_name(hw));
out:
return ret;
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 807bd644016..2a715ca0c5e 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -46,8 +46,6 @@
#include "base.h"
#include "reg.h"
-extern int ath5k_modparam_nohwcrypt;
-
/********************\
* Mac80211 functions *
\********************/
@@ -55,44 +53,30 @@ extern int ath5k_modparam_nohwcrypt;
static void
ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
u16 qnum = skb_get_queue_mapping(skb);
- if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
+ if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) {
dev_kfree_skb_any(skb);
return;
}
- ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
-}
-
-
-static int
-ath5k_start(struct ieee80211_hw *hw)
-{
- return ath5k_init_hw(hw->priv);
-}
-
-
-static void
-ath5k_stop(struct ieee80211_hw *hw)
-{
- ath5k_stop_hw(hw->priv);
+ ath5k_tx_queue(hw, skb, &ah->txqs[qnum]);
}
static int
ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
int ret;
struct ath5k_vif *avf = (void *)vif->drv_priv;
- mutex_lock(&sc->lock);
+ mutex_lock(&ah->lock);
if ((vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
- && (sc->num_ap_vifs + sc->num_adhoc_vifs) >= ATH_BCBUF) {
+ && (ah->num_ap_vifs + ah->num_adhoc_vifs) >= ATH_BCBUF) {
ret = -ELNRNG;
goto end;
}
@@ -102,9 +86,9 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
* We would need to operate the HW in ad-hoc mode to allow TSF updates
* for the IBSS, but this breaks with additional AP or STA interfaces
* at the moment. */
- if (sc->num_adhoc_vifs ||
- (sc->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
- ATH5K_ERR(sc, "Only one single ad-hoc interface is allowed.\n");
+ if (ah->num_adhoc_vifs ||
+ (ah->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
+ ATH5K_ERR(ah, "Only one single ad-hoc interface is allowed.\n");
ret = -ELNRNG;
goto end;
}
@@ -121,8 +105,8 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
goto end;
}
- sc->nvifs++;
- ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
+ ah->nvifs++;
+ ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
/* Assign the vap/adhoc to a beacon xmit slot. */
if ((avf->opmode == NL80211_IFTYPE_AP) ||
@@ -130,38 +114,38 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
(avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
int slot;
- WARN_ON(list_empty(&sc->bcbuf));
- avf->bbuf = list_first_entry(&sc->bcbuf, struct ath5k_buf,
+ WARN_ON(list_empty(&ah->bcbuf));
+ avf->bbuf = list_first_entry(&ah->bcbuf, struct ath5k_buf,
list);
list_del(&avf->bbuf->list);
avf->bslot = 0;
for (slot = 0; slot < ATH_BCBUF; slot++) {
- if (!sc->bslot[slot]) {
+ if (!ah->bslot[slot]) {
avf->bslot = slot;
break;
}
}
- BUG_ON(sc->bslot[avf->bslot] != NULL);
- sc->bslot[avf->bslot] = vif;
+ BUG_ON(ah->bslot[avf->bslot] != NULL);
+ ah->bslot[avf->bslot] = vif;
if (avf->opmode == NL80211_IFTYPE_AP)
- sc->num_ap_vifs++;
+ ah->num_ap_vifs++;
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
- sc->num_adhoc_vifs++;
+ ah->num_adhoc_vifs++;
}
/* Any MAC address is fine, all others are included through the
* filter.
*/
- memcpy(&sc->lladdr, vif->addr, ETH_ALEN);
- ath5k_hw_set_lladdr(sc->ah, vif->addr);
+ memcpy(&ah->lladdr, vif->addr, ETH_ALEN);
+ ath5k_hw_set_lladdr(ah, vif->addr);
memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
- ath5k_update_bssid_mask_and_opmode(sc, vif);
+ ath5k_update_bssid_mask_and_opmode(ah, vif);
ret = 0;
end:
- mutex_unlock(&sc->lock);
+ mutex_unlock(&ah->lock);
return ret;
}
@@ -170,31 +154,31 @@ static void
ath5k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
struct ath5k_vif *avf = (void *)vif->drv_priv;
unsigned int i;
- mutex_lock(&sc->lock);
- sc->nvifs--;
+ mutex_lock(&ah->lock);
+ ah->nvifs--;
if (avf->bbuf) {
- ath5k_txbuf_free_skb(sc, avf->bbuf);
- list_add_tail(&avf->bbuf->list, &sc->bcbuf);
+ ath5k_txbuf_free_skb(ah, avf->bbuf);
+ list_add_tail(&avf->bbuf->list, &ah->bcbuf);
for (i = 0; i < ATH_BCBUF; i++) {
- if (sc->bslot[i] == vif) {
- sc->bslot[i] = NULL;
+ if (ah->bslot[i] == vif) {
+ ah->bslot[i] = NULL;
break;
}
}
avf->bbuf = NULL;
}
if (avf->opmode == NL80211_IFTYPE_AP)
- sc->num_ap_vifs--;
+ ah->num_ap_vifs--;
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
- sc->num_adhoc_vifs--;
+ ah->num_adhoc_vifs--;
- ath5k_update_bssid_mask_and_opmode(sc, NULL);
- mutex_unlock(&sc->lock);
+ ath5k_update_bssid_mask_and_opmode(ah, NULL);
+ mutex_unlock(&ah->lock);
}
@@ -204,23 +188,22 @@ ath5k_remove_interface(struct ieee80211_hw *hw,
static int
ath5k_config(struct ieee80211_hw *hw, u32 changed)
{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
int i;
- mutex_lock(&sc->lock);
+ mutex_lock(&ah->lock);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ret = ath5k_chan_set(sc, conf->channel);
+ ret = ath5k_chan_set(ah, conf->channel);
if (ret < 0)
goto unlock;
}
if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
- (sc->power_level != conf->power_level)) {
- sc->power_level = conf->power_level;
+ (ah->power_level != conf->power_level)) {
+ ah->power_level = conf->power_level;
/* Half dB steps */
ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
@@ -254,7 +237,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
unlock:
- mutex_unlock(&sc->lock);
+ mutex_unlock(&ah->lock);
return ret;
}
@@ -264,12 +247,11 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u32 changes)
{
struct ath5k_vif *avf = (void *)vif->drv_priv;
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = hw->priv;
struct ath_common *common = ath5k_hw_common(ah);
unsigned long flags;
- mutex_lock(&sc->lock);
+ mutex_lock(&ah->lock);
if (changes & BSS_CHANGED_BSSID) {
/* Cache for later use during resets */
@@ -280,7 +262,7 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changes & BSS_CHANGED_BEACON_INT)
- sc->bintval = bss_conf->beacon_int;
+ ah->bintval = bss_conf->beacon_int;
if (changes & BSS_CHANGED_ERP_SLOT) {
int slot_time;
@@ -294,16 +276,16 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changes & BSS_CHANGED_ASSOC) {
avf->assoc = bss_conf->assoc;
if (bss_conf->assoc)
- sc->assoc = bss_conf->assoc;
+ ah->assoc = bss_conf->assoc;
else
- sc->assoc = ath_any_vif_assoc(sc);
+ ah->assoc = ath5k_any_vif_assoc(ah);
- if (sc->opmode == NL80211_IFTYPE_STATION)
- set_beacon_filter(hw, sc->assoc);
- ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
+ if (ah->opmode == NL80211_IFTYPE_STATION)
+ ath5k_set_beacon_filter(hw, ah->assoc);
+ ath5k_hw_set_ledstate(ah, ah->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
if (bss_conf->assoc) {
- ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
+ ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
"Bss Info ASSOC %d, bssid: %pM\n",
bss_conf->aid, common->curbssid);
common->curaid = bss_conf->aid;
@@ -313,19 +295,19 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changes & BSS_CHANGED_BEACON) {
- spin_lock_irqsave(&sc->block, flags);
+ spin_lock_irqsave(&ah->block, flags);
ath5k_beacon_update(hw, vif);
- spin_unlock_irqrestore(&sc->block, flags);
+ spin_unlock_irqrestore(&ah->block, flags);
}
if (changes & BSS_CHANGED_BEACON_ENABLED)
- sc->enable_beacon = bss_conf->enable_beacon;
+ ah->enable_beacon = bss_conf->enable_beacon;
if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_BEACON_INT))
- ath5k_beacon_config(sc);
+ ath5k_beacon_config(ah);
- mutex_unlock(&sc->lock);
+ mutex_unlock(&ah->lock);
}
@@ -350,7 +332,7 @@ ath5k_prepare_multicast(struct ieee80211_hw *hw,
mfilt[pos / 32] |= (1 << (pos % 32));
/* XXX: we might be able to just do this instead,
* but not sure, needs testing, if we do use this we'd
- * neet to inform below to not reset the mcast */
+ * need to inform below not to reset the mcast */
/* ath5k_hw_set_mcast_filterindex(ah,
* ha->addr[5]); */
}
@@ -386,12 +368,11 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
FIF_BCN_PRBRESP_PROMISC)
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = hw->priv;
u32 mfilt[2], rfilt;
struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */
- mutex_lock(&sc->lock);
+ mutex_lock(&ah->lock);
mfilt[0] = multicast;
mfilt[1] = multicast >> 32;
@@ -409,12 +390,12 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
if (*new_flags & FIF_PROMISC_IN_BSS)
- __set_bit(ATH_STAT_PROMISC, sc->status);
+ __set_bit(ATH_STAT_PROMISC, ah->status);
else
- __clear_bit(ATH_STAT_PROMISC, sc->status);
+ __clear_bit(ATH_STAT_PROMISC, ah->status);
}
- if (test_bit(ATH_STAT_PROMISC, sc->status))
+ if (test_bit(ATH_STAT_PROMISC, ah->status))
rfilt |= AR5K_RX_FILTER_PROM;
/* Note, AR5K_RX_FILTER_MCAST is already enabled */
@@ -429,7 +410,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
/* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
* and probes for any BSSID */
- if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (sc->nvifs > 1))
+ if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (ah->nvifs > 1))
rfilt |= AR5K_RX_FILTER_BEACON;
/* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
@@ -444,7 +425,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
/* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
- switch (sc->opmode) {
+ switch (ah->opmode) {
case NL80211_IFTYPE_MESH_POINT:
rfilt |= AR5K_RX_FILTER_CONTROL |
AR5K_RX_FILTER_BEACON |
@@ -457,7 +438,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
AR5K_RX_FILTER_BEACON;
break;
case NL80211_IFTYPE_STATION:
- if (sc->assoc)
+ if (ah->assoc)
rfilt |= AR5K_RX_FILTER_BEACON;
default:
break;
@@ -466,14 +447,14 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
iter_data.hw_macaddr = NULL;
iter_data.n_stas = 0;
iter_data.need_set_hw_addr = false;
- ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
+ ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
&iter_data);
/* Set up RX Filter */
if (iter_data.n_stas > 1) {
/* If you have multiple STA interfaces connected to
* different APs, ARPs are not received (most of the time?)
- * Enabling PROMISC appears to fix that probem.
+ * Enabling PROMISC appears to fix that problem.
*/
rfilt |= AR5K_RX_FILTER_PROM;
}
@@ -485,9 +466,9 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
/* Set the cached hw filter flags, this will later actually
* be set in HW */
- sc->filter_flags = rfilt;
+ ah->filter_flags = rfilt;
- mutex_unlock(&sc->lock);
+ mutex_unlock(&ah->lock);
}
@@ -496,8 +477,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = hw->priv;
struct ath_common *common = ath5k_hw_common(ah);
int ret = 0;
@@ -518,7 +498,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EINVAL;
}
- mutex_lock(&sc->lock);
+ mutex_lock(&ah->lock);
switch (cmd) {
case SET_KEY:
@@ -542,7 +522,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
mmiowb();
- mutex_unlock(&sc->lock);
+ mutex_unlock(&ah->lock);
return ret;
}
@@ -550,17 +530,17 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static void
ath5k_sw_scan_start(struct ieee80211_hw *hw)
{
- struct ath5k_softc *sc = hw->priv;
- if (!sc->assoc)
- ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN);
+ struct ath5k_hw *ah = hw->priv;
+ if (!ah->assoc)
+ ath5k_hw_set_ledstate(ah, AR5K_LED_SCAN);
}
static void
ath5k_sw_scan_complete(struct ieee80211_hw *hw)
{
- struct ath5k_softc *sc = hw->priv;
- ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
+ struct ath5k_hw *ah = hw->priv;
+ ath5k_hw_set_ledstate(ah, ah->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
}
@@ -569,15 +549,15 @@ static int
ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
/* Force update */
- ath5k_hw_update_mib_counters(sc->ah);
+ ath5k_hw_update_mib_counters(ah);
- stats->dot11ACKFailureCount = sc->stats.ack_fail;
- stats->dot11RTSFailureCount = sc->stats.rts_fail;
- stats->dot11RTSSuccessCount = sc->stats.rts_ok;
- stats->dot11FCSErrorCount = sc->stats.fcs_error;
+ stats->dot11ACKFailureCount = ah->stats.ack_fail;
+ stats->dot11RTSFailureCount = ah->stats.rts_fail;
+ stats->dot11RTSSuccessCount = ah->stats.rts_ok;
+ stats->dot11FCSErrorCount = ah->stats.fcs_error;
return 0;
}
@@ -587,15 +567,14 @@ static int
ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
+ struct ath5k_hw *ah = hw->priv;
struct ath5k_txq_info qi;
int ret = 0;
if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
return 0;
- mutex_lock(&sc->lock);
+ mutex_lock(&ah->lock);
ath5k_hw_get_tx_queueprops(ah, queue, &qi);
@@ -604,20 +583,20 @@ ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
qi.tqi_cw_max = params->cw_max;
qi.tqi_burst_time = params->txop;
- ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
+ ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
"Configure tx [queue %d], "
"aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, params->aifs, params->cw_min,
params->cw_max, params->txop);
if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
- ATH5K_ERR(sc,
+ ATH5K_ERR(ah,
"Unable to update hardware queue %u!\n", queue);
ret = -EIO;
} else
ath5k_hw_reset_tx_queue(ah, queue);
- mutex_unlock(&sc->lock);
+ mutex_unlock(&ah->lock);
return ret;
}
@@ -626,43 +605,43 @@ ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
static u64
ath5k_get_tsf(struct ieee80211_hw *hw)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
- return ath5k_hw_get_tsf64(sc->ah);
+ return ath5k_hw_get_tsf64(ah);
}
static void
ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
- ath5k_hw_set_tsf64(sc->ah, tsf);
+ ath5k_hw_set_tsf64(ah, tsf);
}
static void
ath5k_reset_tsf(struct ieee80211_hw *hw)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
/*
* in IBSS mode we need to update the beacon timers too.
* this will also reset the TSF if we call it with 0
*/
- if (sc->opmode == NL80211_IFTYPE_ADHOC)
- ath5k_beacon_update_timers(sc, 0);
+ if (ah->opmode == NL80211_IFTYPE_ADHOC)
+ ath5k_beacon_update_timers(ah, 0);
else
- ath5k_hw_reset_tsf(sc->ah);
+ ath5k_hw_reset_tsf(ah);
}
static int
ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
- struct ath_common *common = ath5k_hw_common(sc->ah);
+ struct ath_common *common = ath5k_hw_common(ah);
struct ath_cycle_counters *cc = &common->cc_survey;
unsigned int div = common->clockrate * 1000;
@@ -672,18 +651,18 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
spin_lock_bh(&common->cc_lock);
ath_hw_cycle_counters_update(common);
if (cc->cycles > 0) {
- sc->survey.channel_time += cc->cycles / div;
- sc->survey.channel_time_busy += cc->rx_busy / div;
- sc->survey.channel_time_rx += cc->rx_frame / div;
- sc->survey.channel_time_tx += cc->tx_frame / div;
+ ah->survey.channel_time += cc->cycles / div;
+ ah->survey.channel_time_busy += cc->rx_busy / div;
+ ah->survey.channel_time_rx += cc->rx_frame / div;
+ ah->survey.channel_time_tx += cc->tx_frame / div;
}
memset(cc, 0, sizeof(*cc));
spin_unlock_bh(&common->cc_lock);
- memcpy(survey, &sc->survey, sizeof(*survey));
+ memcpy(survey, &ah->survey, sizeof(*survey));
survey->channel = conf->channel;
- survey->noise = sc->ah->ah_noise_floor;
+ survey->noise = ah->ah_noise_floor;
survey->filled = SURVEY_INFO_NOISE_DBM |
SURVEY_INFO_CHANNEL_TIME |
SURVEY_INFO_CHANNEL_TIME_BUSY |
@@ -707,25 +686,25 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
static void
ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
- mutex_lock(&sc->lock);
- ath5k_hw_set_coverage_class(sc->ah, coverage_class);
- mutex_unlock(&sc->lock);
+ mutex_lock(&ah->lock);
+ ath5k_hw_set_coverage_class(ah, coverage_class);
+ mutex_unlock(&ah->lock);
}
static int
ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
if (tx_ant == 1 && rx_ant == 1)
- ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
else if (tx_ant == 2 && rx_ant == 2)
- ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
else if ((tx_ant & 3) == 3 && (rx_ant & 3) == 3)
- ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
+ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
else
return -EINVAL;
return 0;
@@ -735,9 +714,9 @@ ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
static int
ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
- switch (sc->ah->ah_ant_mode) {
+ switch (ah->ah_ant_mode) {
case AR5K_ANTMODE_FIXED_A:
*tx_ant = 1; *rx_ant = 1; break;
case AR5K_ANTMODE_FIXED_B:
@@ -752,9 +731,9 @@ ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
static void ath5k_get_ringparam(struct ieee80211_hw *hw,
u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
- *tx = sc->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
+ *tx = ah->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
*tx_max = ATH5K_TXQ_LEN_MAX;
*rx = *rx_max = ATH_RXBUF;
@@ -763,7 +742,7 @@ static void ath5k_get_ringparam(struct ieee80211_hw *hw,
static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
{
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
u16 qnum;
/* only support setting tx ring size for now */
@@ -774,16 +753,16 @@ static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
if (!tx || tx > ATH5K_TXQ_LEN_MAX)
return -EINVAL;
- for (qnum = 0; qnum < ARRAY_SIZE(sc->txqs); qnum++) {
- if (!sc->txqs[qnum].setup)
+ for (qnum = 0; qnum < ARRAY_SIZE(ah->txqs); qnum++) {
+ if (!ah->txqs[qnum].setup)
continue;
- if (sc->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
- sc->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
+ if (ah->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
+ ah->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
continue;
- sc->txqs[qnum].txq_max = tx;
- if (sc->txqs[qnum].txq_len >= sc->txqs[qnum].txq_max)
- ieee80211_stop_queue(hw, sc->txqs[qnum].qnum);
+ ah->txqs[qnum].txq_max = tx;
+ if (ah->txqs[qnum].txq_len >= ah->txqs[qnum].txq_max)
+ ieee80211_stop_queue(hw, ah->txqs[qnum].qnum);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 296c316a834..eaf79b49341 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -34,12 +34,12 @@ static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
{ PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */
{ PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */
{ PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
- { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */
- { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */
- { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */
- { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */
- { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */
- { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */
+ { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 compatible */
+ { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 compatible */
+ { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 compatible */
+ { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 compatible */
+ { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 compatible */
+ { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
{ PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
{ PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
@@ -51,10 +51,10 @@ MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
/* return bus cachesize in 4B word units */
static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
{
- struct ath5k_softc *sc = (struct ath5k_softc *) common->priv;
+ struct ath5k_hw *ah = (struct ath5k_hw *) common->priv;
u8 u8tmp;
- pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
+ pci_read_config_byte(ah->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
*csz = (int)u8tmp;
/*
@@ -156,7 +156,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void __iomem *mem;
- struct ath5k_softc *sc;
+ struct ath5k_hw *ah;
struct ieee80211_hw *hw;
int ret;
u8 csz;
@@ -234,7 +234,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
mem = pci_iomap(pdev, 0, 0);
if (!mem) {
- dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
+ dev_err(&pdev->dev, "cannot remap PCI memory region\n");
ret = -EIO;
goto err_reg;
}
@@ -243,7 +243,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
* Allocate hw (mac80211 main struct)
* and hw->priv (driver private data)
*/
- hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
+ hw = ieee80211_alloc_hw(sizeof(*ah), &ath5k_hw_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
ret = -ENOMEM;
@@ -252,16 +252,16 @@ ath5k_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
- sc = hw->priv;
- sc->hw = hw;
- sc->pdev = pdev;
- sc->dev = &pdev->dev;
- sc->irq = pdev->irq;
- sc->devid = id->device;
- sc->iobase = mem; /* So we can unmap it on detach */
+ ah = hw->priv;
+ ah->hw = hw;
+ ah->pdev = pdev;
+ ah->dev = &pdev->dev;
+ ah->irq = pdev->irq;
+ ah->devid = id->device;
+ ah->iobase = mem; /* So we can unmap it on detach */
/* Initialize */
- ret = ath5k_init_softc(sc, &ath_pci_bus_ops);
+ ret = ath5k_init_softc(ah, &ath_pci_bus_ops);
if (ret)
goto err_free;
@@ -285,10 +285,10 @@ static void __devexit
ath5k_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = hw->priv;
- ath5k_deinit_softc(sc);
- pci_iounmap(pdev, sc->iobase);
+ ath5k_deinit_softc(ah);
+ pci_iounmap(pdev, ah->iobase);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
ieee80211_free_hw(hw);
@@ -297,16 +297,19 @@ ath5k_pci_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
- struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_hw *ah = hw->priv;
- ath5k_led_off(sc);
+ ath5k_led_off(ah);
return 0;
}
static int ath5k_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct ath5k_softc *sc = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_hw *ah = hw->priv;
/*
* Suspend/Resume resets the PCI configuration space, so we have to
@@ -315,7 +318,7 @@ static int ath5k_pci_resume(struct device *dev)
*/
pci_write_config_byte(pdev, 0x41, 0);
- ath5k_led_enable(sc);
+ ath5k_led_enable(ah);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 712a9ac4000..06731384506 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -32,7 +32,7 @@
#include "base.h"
/*
- * AR5212+ can use higher rates for ack transmition
+ * AR5212+ can use higher rates for ack transmission
* based on current tx rate instead of the base rate.
* It does this to better utilize channel usage.
* This is a mapping between G rates (that cover both
@@ -77,14 +77,13 @@ static const unsigned int ack_rates_high[] =
int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
int len, struct ieee80211_rate *rate, bool shortpre)
{
- struct ath5k_softc *sc = ah->ah_sc;
int sifs, preamble, plcp_bits, sym_time;
int bitrate, bits, symbols, symbol_bits;
int dur;
/* Fallback */
if (!ah->ah_bwmode) {
- __le16 raw_dur = ieee80211_generic_frame_duration(sc->hw,
+ __le16 raw_dur = ieee80211_generic_frame_duration(ah->hw,
NULL, len, rate);
/* subtract difference between long and short preamble */
@@ -205,7 +204,7 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
*/
void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
- struct ath5k_statistics *stats = &ah->ah_sc->stats;
+ struct ath5k_statistics *stats = &ah->stats;
/* Read-And-Clear */
stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
@@ -240,25 +239,24 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
*/
static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
{
- struct ath5k_softc *sc = ah->ah_sc;
struct ieee80211_rate *rate;
unsigned int i;
/* 802.11g covers both OFDM and CCK */
u8 band = IEEE80211_BAND_2GHZ;
/* Write rate duration table */
- for (i = 0; i < sc->sbands[band].n_bitrates; i++) {
+ for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
u32 reg;
u16 tx_time;
if (ah->ah_ack_bitrate_high)
- rate = &sc->sbands[band].bitrates[ack_rates_high[i]];
+ rate = &ah->sbands[band].bitrates[ack_rates_high[i]];
/* CCK -> 1Mb */
else if (i < 4)
- rate = &sc->sbands[band].bitrates[0];
+ rate = &ah->sbands[band].bitrates[0];
/* OFDM -> 6Mb */
else
- rate = &sc->sbands[band].bitrates[4];
+ rate = &ah->sbands[band].bitrates[4];
/* Set ACK timeout */
reg = AR5K_RATE_DUR(rate->hw_value);
@@ -534,9 +532,9 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
local_irq_restore(flags);
- WARN_ON( i == ATH5K_MAX_TSF_READ );
+ WARN_ON(i == ATH5K_MAX_TSF_READ);
- return (((u64)tsf_upper1 << 32) | tsf_lower);
+ return ((u64)tsf_upper1 << 32) | tsf_lower;
}
/**
@@ -586,7 +584,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
/*
* Set the additional timers by mode
*/
- switch (ah->ah_sc->opmode) {
+ switch (ah->opmode) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
/* In STA mode timer1 is used as next wakeup
@@ -623,8 +621,8 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
* Set the beacon register and enable all timers.
*/
/* When in AP or Mesh Point mode zero timer0 to start TSF */
- if (ah->ah_sc->opmode == NL80211_IFTYPE_AP ||
- ah->ah_sc->opmode == NL80211_IFTYPE_MESH_POINT)
+ if (ah->opmode == NL80211_IFTYPE_AP ||
+ ah->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
@@ -643,14 +641,14 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
/* Flush any pending BMISS interrupts on ISR by
* performing a clear-on-write operation on PISR
* register for the BMISS bit (writing a bit on
- * ISR togles a reset for that bit and leaves
- * the rest bits intact) */
+ * ISR toggles a reset for that bit and leaves
+ * the remaining bits intact) */
if (ah->ah_version == AR5K_AR5210)
ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_ISR);
else
ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_PISR);
- /* TODO: Set enchanced sleep registers on AR5212
+ /* TODO: Set enhanced sleep registers on AR5212
* based on vif->bss_conf params, until then
* disable power save reporting.*/
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PWR_SV);
@@ -738,7 +736,7 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
dma = ath5k_hw_reg_read(ah, AR5K_TIMER1) >> 3;
/* NOTE: SWBA is different. Having a wrong window there does not
- * stop us from sending data and this condition is catched thru
+ * stop us from sending data and this condition is caught by
* other means (SWBA interrupt) */
if (ath5k_check_timer_win(nbtt, atim, 1, intval) &&
@@ -814,7 +812,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
+ ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
/* Preserve rest settings */
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
@@ -890,13 +888,13 @@ void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
* XXX: rethink this after new mode changes to
* mac80211 are integrated */
if (ah->ah_version == AR5K_AR5212 &&
- ah->ah_sc->nvifs)
+ ah->nvifs)
ath5k_hw_write_rate_duration(ah);
/* Set RSSI/BRSSI thresholds
*
* Note: If we decide to set this value
- * dynamicaly, have in mind that when AR5K_RSSI_THR
+ * dynamically, have in mind that when AR5K_RSSI_THR
* register is read it might return 0x40 if we haven't
* wrote anything to it plus BMISS RSSI threshold is zeroed.
* So doing a save/restore procedure here isn't the right
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 55441913344..81e465e7017 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include "ath5k.h"
#include "reg.h"
@@ -105,6 +106,7 @@ bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
if ((ah->ah_radio == AR5K_RF5112) ||
(ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2413) ||
(ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
refclk_freq = 40;
else
@@ -173,7 +175,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
data = ath5k_hw_bitswap(val, num_bits);
for (bits_shifted = 0, bits_left = num_bits; bits_left > 0;
- position = 0, entry++) {
+ position = 0, entry++) {
last_bit = (position + bits_left > 8) ? 8 :
position + bits_left;
@@ -363,7 +365,7 @@ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
return 0;
}
-/* Schedule a gain probe check on the next transmited packet.
+/* Schedule a gain probe check on the next transmitted packet.
* That means our next packet is going to be sent with lower
* tx power and a Peak to Average Power Detector (PAPD) will try
* to measure the gain.
@@ -472,7 +474,7 @@ static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
level[0] = 0;
level[1] = (step == 63) ? 50 : step + 4;
level[2] = (step != 63) ? 64 : level[0];
- level[3] = level[2] + 50 ;
+ level[3] = level[2] + 50;
ah->ah_gain.g_high = level[3] -
(step == 63 ? AR5K_GAIN_DYN_ADJUST_HI_MARGIN : -5);
@@ -549,7 +551,7 @@ static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
for (ah->ah_gain.g_target = ah->ah_gain.g_current;
ah->ah_gain.g_target <= ah->ah_gain.g_low &&
- ah->ah_gain.g_step_idx < go->go_steps_count-1;
+ ah->ah_gain.g_step_idx < go->go_steps_count - 1;
g_step = &go->go_step[ah->ah_gain.g_step_idx])
ah->ah_gain.g_target -= 2 *
(go->go_step[++ah->ah_gain.g_step_idx].gos_gain -
@@ -560,7 +562,7 @@ static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
}
done:
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"ret %d, gain step %u, current gain %u, target gain %u\n",
ret, ah->ah_gain.g_step_idx, ah->ah_gain.g_current,
ah->ah_gain.g_target);
@@ -614,13 +616,13 @@ enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
ath5k_hw_rf_gainf_corr(ah);
ah->ah_gain.g_current =
ah->ah_gain.g_current >= ah->ah_gain.g_f_corr ?
- (ah->ah_gain.g_current-ah->ah_gain.g_f_corr) :
+ (ah->ah_gain.g_current - ah->ah_gain.g_f_corr) :
0;
}
/* Check if measurement is ok and if we need
* to adjust gain, schedule a gain adjustment,
- * else switch back to the acive state */
+ * else switch back to the active state */
if (ath5k_hw_rf_check_gainf_readback(ah) &&
AR5K_GAIN_CHECK_ADJUST(&ah->ah_gain) &&
ath5k_hw_rf_gainf_adjust(ah)) {
@@ -772,7 +774,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
ah->ah_rf_banks = kmalloc(sizeof(u32) * ah->ah_rf_banks_size,
GFP_KERNEL);
if (ah->ah_rf_banks == NULL) {
- ATH5K_ERR(ah->ah_sc, "out of memory\n");
+ ATH5K_ERR(ah, "out of memory\n");
return -ENOMEM;
}
}
@@ -782,7 +784,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
for (i = 0; i < ah->ah_rf_banks_size; i++) {
if (ini_rfb[i].rfb_bank >= AR5K_MAX_RF_BANKS) {
- ATH5K_ERR(ah->ah_sc, "invalid bank\n");
+ ATH5K_ERR(ah, "invalid bank\n");
return -EINVAL;
}
@@ -807,7 +809,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
* use b_OB and b_DB parameters stored
* in eeprom on ee->ee_ob[ee_mode][0]
*
- * For all other chips we use OB/DB for 2Ghz
+ * For all other chips we use OB/DB for 2GHz
* stored in the b/g modal section just like
* 802.11a on ee->ee_ob[ee_mode][1] */
if ((ah->ah_radio == AR5K_RF5111) ||
@@ -970,17 +972,20 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
}
/* Lower synth voltage on Rev 2 */
- ath5k_hw_rfb_op(ah, rf_regs, 2,
- AR5K_RF_HIGH_VC_CP, true);
+ if (ah->ah_radio == AR5K_RF5112 &&
+ (ah->ah_radio_5ghz_revision & AR5K_SREV_REV) > 0) {
+ ath5k_hw_rfb_op(ah, rf_regs, 2,
+ AR5K_RF_HIGH_VC_CP, true);
- ath5k_hw_rfb_op(ah, rf_regs, 2,
- AR5K_RF_MID_VC_CP, true);
+ ath5k_hw_rfb_op(ah, rf_regs, 2,
+ AR5K_RF_MID_VC_CP, true);
- ath5k_hw_rfb_op(ah, rf_regs, 2,
- AR5K_RF_LOW_VC_CP, true);
+ ath5k_hw_rfb_op(ah, rf_regs, 2,
+ AR5K_RF_LOW_VC_CP, true);
- ath5k_hw_rfb_op(ah, rf_regs, 2,
- AR5K_RF_PUSH_UP, true);
+ ath5k_hw_rfb_op(ah, rf_regs, 2,
+ AR5K_RF_PUSH_UP, true);
+ }
/* Decrease power consumption on 5213+ BaseBand */
if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
@@ -1259,12 +1264,12 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
{
int ret;
/*
- * Check bounds supported by the PHY (we don't care about regultory
+ * Check bounds supported by the PHY (we don't care about regulatory
* restrictions at this point). Note: hw_value already has the band
* (CHANNEL_2GHZ, or CHANNEL_5GHZ) so we inform ath5k_channel_ok()
* of the band by that */
if (!ath5k_channel_ok(ah, channel->center_freq, channel->hw_value)) {
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"channel frequency (%u MHz) out of supported "
"band range\n",
channel->center_freq);
@@ -1331,7 +1336,7 @@ void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
{
struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
- hist->index = (hist->index + 1) & (ATH5K_NF_CAL_HIST_MAX-1);
+ hist->index = (hist->index + 1) & (ATH5K_NF_CAL_HIST_MAX - 1);
hist->nfval[hist->index] = noise_floor;
}
@@ -1344,18 +1349,18 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
memcpy(sort, ah->ah_nfcal_hist.nfval, sizeof(sort));
for (i = 0; i < ATH5K_NF_CAL_HIST_MAX - 1; i++) {
for (j = 1; j < ATH5K_NF_CAL_HIST_MAX - i; j++) {
- if (sort[j] > sort[j-1]) {
+ if (sort[j] > sort[j - 1]) {
tmp = sort[j];
- sort[j] = sort[j-1];
- sort[j-1] = tmp;
+ sort[j] = sort[j - 1];
+ sort[j - 1] = tmp;
}
}
}
for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) {
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"cal %d:%d\n", i, sort[i]);
}
- return sort[(ATH5K_NF_CAL_HIST_MAX-1) / 2];
+ return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
}
/*
@@ -1378,7 +1383,7 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
/* keep last value if calibration hasn't completed */
if (ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL) & AR5K_PHY_AGCCTL_NF) {
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"NF did not complete in calibration window\n");
return;
@@ -1391,7 +1396,7 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
threshold = ee->ee_noise_floor_thr[ee_mode];
if (nf > threshold) {
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"noise floor failure detected; "
"read %d, threshold %d\n",
nf, threshold);
@@ -1428,7 +1433,7 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
ah->ah_noise_floor = nf;
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"noise floor calibrated: %d\n", nf);
}
@@ -1516,7 +1521,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
ath5k_hw_reg_write(ah, phy_sat, AR5K_PHY_ADCSAT);
if (ret) {
- ATH5K_ERR(ah->ah_sc, "calibration timeout (%uMHz)\n",
+ ATH5K_ERR(ah, "calibration timeout (%uMHz)\n",
channel->center_freq);
return ret;
}
@@ -1551,7 +1556,7 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
"iq_corr:%x i_pwr:%x q_pwr:%x", iq_corr, i_pwr, q_pwr);
if (i_pwr && q_pwr)
break;
@@ -1577,7 +1582,7 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
q_coff = (i_pwr / q_coffd) - 128;
q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
- ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
"new I:%d Q:%d (i_coffd:%x q_coffd:%x)",
i_coff, q_coff, i_coffd, q_coffd);
@@ -1604,11 +1609,13 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
int ret;
if (ah->ah_radio == AR5K_RF5110)
- ret = ath5k_hw_rf5110_calibrate(ah, channel);
- else {
- ret = ath5k_hw_rf511x_iq_calibrate(ah);
+ return ath5k_hw_rf5110_calibrate(ah, channel);
+
+ ret = ath5k_hw_rf511x_iq_calibrate(ah);
+
+ if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) &&
+ (channel->hw_value & CHANNEL_OFDM))
ath5k_hw_request_rfgain_probe(ah);
- }
return ret;
}
@@ -1815,7 +1822,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
} else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) &
AR5K_PHY_IQ_SPUR_FILT_EN) {
- /* Clean up spur mitigation settings and disable fliter */
+ /* Clean up spur mitigation settings and disable filter */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
AR5K_PHY_BIN_MASK_CTL_RATE, 0);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_IQ,
@@ -1960,7 +1967,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
ee_mode = ath5k_eeprom_mode_from_channel(channel);
if (ee_mode < 0) {
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"invalid channel: %d\n", channel->center_freq);
return;
}
@@ -2080,7 +2087,7 @@ ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
* always 1 instead of 1.25, 1.75 etc). We scale up by 100
* to have some accuracy both for 0.5 and 0.25 steps.
*/
- ratio = ((100 * y_right - 100 * y_left)/(x_right - x_left));
+ ratio = ((100 * y_right - 100 * y_left) / (x_right - x_left));
/* Now scale down to be in range */
result = y_left + (ratio * (target - x_left) / 100);
@@ -2159,7 +2166,7 @@ ath5k_create_power_curve(s16 pmin, s16 pmax,
u8 *vpd_table, u8 type)
{
u8 idx[2] = { 0, 1 };
- s16 pwr_i = 2*pmin;
+ s16 pwr_i = 2 * pmin;
int i;
if (num_points < 2)
@@ -2437,7 +2444,7 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
}
if (edge_pwr)
- ah->ah_txpower.txp_max_pwr = 4*min(edge_pwr, max_chan_pwr);
+ ah->ah_txpower.txp_max_pwr = 4 * min(edge_pwr, max_chan_pwr);
}
@@ -2456,7 +2463,7 @@ static void
ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
s16 *table_max)
{
- u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
+ u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
u8 *pcdac_tmp = ah->ah_txpower.tmpL[0];
u8 pcdac_0, pcdac_n, pcdac_i, pwr_idx, i;
s16 min_pwr, max_pwr;
@@ -2475,8 +2482,8 @@ ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
/* Copy values from pcdac_tmp */
pwr_idx = min_pwr;
- for (i = 0 ; pwr_idx <= max_pwr &&
- pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE; i++) {
+ for (i = 0; pwr_idx <= max_pwr &&
+ pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE; i++) {
pcdac_out[pcdac_i++] = pcdac_tmp[i];
pwr_idx++;
}
@@ -2502,7 +2509,7 @@ static void
ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
s16 *table_max, u8 pdcurves)
{
- u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
+ u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
u8 *pcdac_low_pwr;
u8 *pcdac_high_pwr;
u8 *pcdac_tmp;
@@ -2510,8 +2517,8 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
s16 max_pwr_idx;
s16 min_pwr_idx;
s16 mid_pwr_idx = 0;
- /* Edge flag turs on the 7nth bit on the PCDAC
- * to delcare the higher power curve (force values
+ /* Edge flag turns on the 7nth bit on the PCDAC
+ * to declare the higher power curve (force values
* to be greater than 64). If we only have one curve
* we don't need to set this, if we have 2 curves and
* fill the table backwards this can also be used to
@@ -2552,7 +2559,7 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
}
/* This is used when setting tx power*/
- ah->ah_txpower.txp_min_idx = min_pwr_idx/2;
+ ah->ah_txpower.txp_min_idx = min_pwr_idx / 2;
/* Fill Power to PCDAC table backwards */
pwr = max_pwr_idx;
@@ -2561,14 +2568,14 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
* edge flag and set pcdac_tmp to lower
* power curve.*/
if (edge_flag == 0x40 &&
- (2*pwr <= (table_max[1] - table_min[0]) || pwr == 0)) {
+ (2 * pwr <= (table_max[1] - table_min[0]) || pwr == 0)) {
edge_flag = 0x00;
pcdac_tmp = pcdac_low_pwr;
- pwr = mid_pwr_idx/2;
+ pwr = mid_pwr_idx / 2;
}
/* Don't go below 1, extrapolate below if we have
- * already swithced to the lower power curve -or
+ * already switched to the lower power curve -or
* we only have one curve and edge_flag is zero
* anyway */
if (pcdac_tmp[pwr] < 1 && (edge_flag == 0x00)) {
@@ -2596,7 +2603,7 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
static void
ath5k_write_pcdac_table(struct ath5k_hw *ah)
{
- u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
+ u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
int i;
/*
@@ -2604,8 +2611,8 @@ ath5k_write_pcdac_table(struct ath5k_hw *ah)
*/
for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
ath5k_hw_reg_write(ah,
- (((pcdac_out[2*i + 0] << 8 | 0xff) & 0xffff) << 0) |
- (((pcdac_out[2*i + 1] << 8 | 0xff) & 0xffff) << 16),
+ (((pcdac_out[2 * i + 0] << 8 | 0xff) & 0xffff) << 0) |
+ (((pcdac_out[2 * i + 1] << 8 | 0xff) & 0xffff) << 16),
AR5K_PHY_PCDAC_TXPOWER(i));
}
}
@@ -2788,12 +2795,8 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
* Write TX power values
*/
for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
- ath5k_hw_reg_write(ah,
- ((pdadc_out[4*i + 0] & 0xff) << 0) |
- ((pdadc_out[4*i + 1] & 0xff) << 8) |
- ((pdadc_out[4*i + 2] & 0xff) << 16) |
- ((pdadc_out[4*i + 3] & 0xff) << 24),
- AR5K_PHY_PDADC_TXPOWER(i));
+ u32 val = get_unaligned_le32(&pdadc_out[4 * i]);
+ ath5k_hw_reg_write(ah, val, AR5K_PHY_PDADC_TXPOWER(i));
}
}
@@ -2805,7 +2808,7 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
/*
* This is the main function that uses all of the above
* to set PCDAC/PDADC table on hw for the current channel.
- * This table is used for tx power calibration on the basband,
+ * This table is used for tx power calibration on the baseband,
* without it we get weird tx power levels and in some cases
* distorted spectral mask
*/
@@ -3116,13 +3119,13 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
int ret;
if (txpower > AR5K_TUNE_MAX_TXPOWER) {
- ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
+ ATH5K_ERR(ah, "invalid tx power: %u\n", txpower);
return -EINVAL;
}
ee_mode = ath5k_eeprom_mode_from_channel(channel);
if (ee_mode < 0) {
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"invalid channel: %d\n", channel->center_freq);
return -EINVAL;
}
@@ -3223,7 +3226,7 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
{
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER,
+ ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
"changing txpower to %d\n", txpower);
return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
@@ -3434,7 +3437,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
* during ath5k_phy_calibrate) */
if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_CAL, 0, false)) {
- ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
+ ATH5K_ERR(ah, "gain calibration timeout (%uMHz)\n",
channel->center_freq);
}
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index b18c5021aac..65f10398999 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -187,7 +187,7 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
break;
case AR5K_TX_QUEUE_XR_DATA:
if (ah->ah_version != AR5K_AR5212)
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"XR data queues only supported in"
" 5212!\n");
queue = AR5K_TX_QUEUE_ID_XR_DATA;
@@ -510,7 +510,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
- struct ath5k_softc *sc = ah->ah_sc;
struct ieee80211_rate *rate;
u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
@@ -546,9 +545,9 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
* Also we have different lowest rate for 802.11a
*/
if (channel->hw_value & CHANNEL_5GHZ)
- rate = &sc->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
+ rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
else
- rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
+ rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
@@ -622,7 +621,7 @@ int ath5k_hw_init_queues(struct ath5k_hw *ah)
for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
ret = ath5k_hw_reset_tx_queue(ah, i);
if (ret) {
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"failed to reset TX queue #%d\n", i);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index d12b827033c..f5c1000045d 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -72,7 +72,7 @@
#define AR5K_CFG_SWRD 0x00000004 /* Byte-swap RX descriptor */
#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer */
#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register access */
-#define AR5K_CFG_IBSS 0x00000020 /* 0-BSS, 1-IBSS [5211+] */
+#define AR5K_CFG_IBSS 0x00000020 /* 0-BSS, 1-IBSS [5211+] */
#define AR5K_CFG_PHY_OK 0x00000100 /* [5211+] */
#define AR5K_CFG_EEBS 0x00000200 /* EEPROM is busy */
#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (Disable dynamic clock) */
@@ -170,7 +170,7 @@
#define AR5K_TXCFG_SDMAMR_S 0
#define AR5K_TXCFG_B_MODE 0x00000008 /* Set b mode for 5111 (enable 2111) */
#define AR5K_TXCFG_TXFSTP 0x00000008 /* TX DMA full Stop [5210] */
-#define AR5K_TXCFG_TXFULL 0x000003f0 /* TX Triger level mask */
+#define AR5K_TXCFG_TXFULL 0x000003f0 /* TX Trigger level mask */
#define AR5K_TXCFG_TXFULL_S 4
#define AR5K_TXCFG_TXFULL_0B 0x00000000
#define AR5K_TXCFG_TXFULL_64B 0x00000010
@@ -283,16 +283,16 @@
*/
#define AR5K_ISR 0x001c /* Register Address [5210] */
#define AR5K_PISR 0x0080 /* Register Address [5211+] */
-#define AR5K_ISR_RXOK 0x00000001 /* Frame successfuly received */
+#define AR5K_ISR_RXOK 0x00000001 /* Frame successfully received */
#define AR5K_ISR_RXDESC 0x00000002 /* RX descriptor request */
#define AR5K_ISR_RXERR 0x00000004 /* Receive error */
#define AR5K_ISR_RXNOFRM 0x00000008 /* No frame received (receive timeout) */
#define AR5K_ISR_RXEOL 0x00000010 /* Empty RX descriptor */
#define AR5K_ISR_RXORN 0x00000020 /* Receive FIFO overrun */
-#define AR5K_ISR_TXOK 0x00000040 /* Frame successfuly transmited */
+#define AR5K_ISR_TXOK 0x00000040 /* Frame successfully transmitted */
#define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */
#define AR5K_ISR_TXERR 0x00000100 /* Transmit error */
-#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmited (transmit timeout) */
+#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout) */
#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */
#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */
#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */
@@ -303,7 +303,7 @@
#define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
-#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
+#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
@@ -377,16 +377,16 @@
*/
#define AR5K_IMR 0x0020 /* Register Address [5210] */
#define AR5K_PIMR 0x00a0 /* Register Address [5211+] */
-#define AR5K_IMR_RXOK 0x00000001 /* Frame successfuly received*/
+#define AR5K_IMR_RXOK 0x00000001 /* Frame successfully received*/
#define AR5K_IMR_RXDESC 0x00000002 /* RX descriptor request*/
#define AR5K_IMR_RXERR 0x00000004 /* Receive error*/
#define AR5K_IMR_RXNOFRM 0x00000008 /* No frame received (receive timeout)*/
#define AR5K_IMR_RXEOL 0x00000010 /* Empty RX descriptor*/
#define AR5K_IMR_RXORN 0x00000020 /* Receive FIFO overrun*/
-#define AR5K_IMR_TXOK 0x00000040 /* Frame successfuly transmited*/
+#define AR5K_IMR_TXOK 0x00000040 /* Frame successfully transmitted*/
#define AR5K_IMR_TXDESC 0x00000080 /* TX descriptor request*/
#define AR5K_IMR_TXERR 0x00000100 /* Transmit error*/
-#define AR5K_IMR_TXNOFRM 0x00000200 /* No frame transmited (transmit timeout)*/
+#define AR5K_IMR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout)*/
#define AR5K_IMR_TXEOL 0x00000400 /* Empty TX descriptor*/
#define AR5K_IMR_TXURN 0x00000800 /* Transmit FIFO underrun*/
#define AR5K_IMR_MIB 0x00001000 /* Update MIB counters*/
@@ -397,7 +397,7 @@
#define AR5K_IMR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
#define AR5K_IMR_BMISS 0x00040000 /* Beacon missed*/
#define AR5K_IMR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
-#define AR5K_IMR_BNR 0x00100000 /* Beacon not ready [5211+] */
+#define AR5K_IMR_BNR 0x00100000 /* Beacon not ready [5211+] */
#define AR5K_IMR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
#define AR5K_IMR_RXCHIRP 0x00200000 /* CHIRP Received [5212+]*/
#define AR5K_IMR_SSERR 0x00200000 /* Signaled System Error [5210] */
@@ -601,7 +601,7 @@
* QCU misc registers
*/
#define AR5K_QCU_MISC_BASE 0x09c0 /* Register Address -Queue0 MISC */
-#define AR5K_QCU_MISC_FRSHED_M 0x0000000f /* Frame sheduling mask */
+#define AR5K_QCU_MISC_FRSHED_M 0x0000000f /* Frame scheduling mask */
#define AR5K_QCU_MISC_FRSHED_ASAP 0 /* ASAP */
#define AR5K_QCU_MISC_FRSHED_CBR 1 /* Constant Bit Rate */
#define AR5K_QCU_MISC_FRSHED_DBA_GT 2 /* DMA Beacon alert gated */
@@ -653,13 +653,13 @@
* registers [5211+]
*
* These registers control the various characteristics of each queue
- * for 802.11e (WME) combatibility so they go together with
+ * for 802.11e (WME) compatibility so they go together with
* QCU registers in pairs. For each queue we have a QCU mask register,
* (0x1000 - 0x102c), a local-IFS settings register (0x1040 - 0x106c),
* a retry limit register (0x1080 - 0x10ac), a channel time register
* (0x10c0 - 0x10ec), a misc-settings register (0x1100 - 0x112c) and
* a sequence number register (0x1140 - 0x116c). It seems that "global"
- * registers here afect all queues (see use of DCU_GBL_IFS_SLOT in ar5k).
+ * registers here affect all queues (see use of DCU_GBL_IFS_SLOT in ar5k).
* We use the same macros here for easier register access.
*
*/
@@ -779,7 +779,7 @@
* and it's used for generating pseudo-random
* number sequences.
*
- * (If i understand corectly, random numbers are
+ * (If i understand correctly, random numbers are
* used for idle sensing -multiplied with cwmin/max etc-)
*/
#define AR5K_DCU_GBL_IFS_MISC 0x10f0 /* Register Address */
@@ -1007,7 +1007,7 @@
#define AR5K_PCIE_WAEN 0x407c
/*
- * PCI-E Serializer/Desirializer
+ * PCI-E Serializer/Deserializer
* registers
*/
#define AR5K_PCIE_SERDES 0x4080
@@ -1227,7 +1227,7 @@
AR5K_USEC_5210 : AR5K_USEC_5211)
#define AR5K_USEC_1 0x0000007f /* clock cycles for 1us */
#define AR5K_USEC_1_S 0
-#define AR5K_USEC_32 0x00003f80 /* clock cycles for 1us while on 32Mhz clock */
+#define AR5K_USEC_32 0x00003f80 /* clock cycles for 1us while on 32MHz clock */
#define AR5K_USEC_32_S 7
#define AR5K_USEC_TX_LATENCY_5211 0x007fc000
#define AR5K_USEC_TX_LATENCY_5211_S 14
@@ -1328,16 +1328,16 @@
#define AR5K_RX_FILTER_5211 0x803c /* Register Address [5211+] */
#define AR5K_RX_FILTER (ah->ah_version == AR5K_AR5210 ? \
AR5K_RX_FILTER_5210 : AR5K_RX_FILTER_5211)
-#define AR5K_RX_FILTER_UCAST 0x00000001 /* Don't filter unicast frames */
-#define AR5K_RX_FILTER_MCAST 0x00000002 /* Don't filter multicast frames */
-#define AR5K_RX_FILTER_BCAST 0x00000004 /* Don't filter broadcast frames */
-#define AR5K_RX_FILTER_CONTROL 0x00000008 /* Don't filter control frames */
-#define AR5K_RX_FILTER_BEACON 0x00000010 /* Don't filter beacon frames */
-#define AR5K_RX_FILTER_PROM 0x00000020 /* Set promiscuous mode */
-#define AR5K_RX_FILTER_XRPOLL 0x00000040 /* Don't filter XR poll frame [5212+] */
+#define AR5K_RX_FILTER_UCAST 0x00000001 /* Don't filter unicast frames */
+#define AR5K_RX_FILTER_MCAST 0x00000002 /* Don't filter multicast frames */
+#define AR5K_RX_FILTER_BCAST 0x00000004 /* Don't filter broadcast frames */
+#define AR5K_RX_FILTER_CONTROL 0x00000008 /* Don't filter control frames */
+#define AR5K_RX_FILTER_BEACON 0x00000010 /* Don't filter beacon frames */
+#define AR5K_RX_FILTER_PROM 0x00000020 /* Set promiscuous mode */
+#define AR5K_RX_FILTER_XRPOLL 0x00000040 /* Don't filter XR poll frame [5212+] */
#define AR5K_RX_FILTER_PROBEREQ 0x00000080 /* Don't filter probe requests [5212+] */
#define AR5K_RX_FILTER_PHYERR_5212 0x00000100 /* Don't filter phy errors [5212+] */
-#define AR5K_RX_FILTER_RADARERR_5212 0x00000200 /* Don't filter phy radar errors [5212+] */
+#define AR5K_RX_FILTER_RADARERR_5212 0x00000200 /* Don't filter phy radar errors [5212+] */
#define AR5K_RX_FILTER_PHYERR_5211 0x00000040 /* [5211] */
#define AR5K_RX_FILTER_RADARERR_5211 0x00000080 /* [5211] */
#define AR5K_RX_FILTER_PHYERR \
@@ -1461,7 +1461,7 @@
* ADDAC test register [5211+]
*/
#define AR5K_ADDAC_TEST 0x8054 /* Register Address */
-#define AR5K_ADDAC_TEST_TXCONT 0x00000001 /* Test continuous tx */
+#define AR5K_ADDAC_TEST_TXCONT 0x00000001 /* Test continuous tx */
#define AR5K_ADDAC_TEST_TST_MODE 0x00000002 /* Test mode */
#define AR5K_ADDAC_TEST_LOOP_EN 0x00000004 /* Enable loop */
#define AR5K_ADDAC_TEST_LOOP_LEN 0x00000008 /* Loop length (field) */
@@ -1632,7 +1632,7 @@
#define AR5K_SLEEP0_NEXT_DTIM 0x0007ffff /* Mask for next DTIM (?) */
#define AR5K_SLEEP0_NEXT_DTIM_S 0
#define AR5K_SLEEP0_ASSUME_DTIM 0x00080000 /* Assume DTIM */
-#define AR5K_SLEEP0_ENH_SLEEP_EN 0x00100000 /* Enable enchanced sleep control */
+#define AR5K_SLEEP0_ENH_SLEEP_EN 0x00100000 /* Enable enhanced sleep control */
#define AR5K_SLEEP0_CABTO 0xff000000 /* Mask for CAB Time Out */
#define AR5K_SLEEP0_CABTO_S 24
@@ -1657,7 +1657,7 @@
/*
* TX power control (TPC) register
*
- * XXX: PCDAC steps (0.5dbm) or DBM ?
+ * XXX: PCDAC steps (0.5dBm) or dBm ?
*
*/
#define AR5K_TXPC 0x80e8 /* Register Address */
@@ -1673,7 +1673,7 @@
/*
* Profile count registers
*
- * These registers can be cleared and freezed with ATH5K_MIBC, but they do not
+ * These registers can be cleared and frozen with ATH5K_MIBC, but they do not
* generate a MIB interrupt.
* Instead of overflowing, they shift by one bit to the right. All registers
* shift together, i.e. when one reaches the max, all shift at the same time by
@@ -1838,7 +1838,7 @@
#define AR5K_PHY_TST2_TRIG_SEL 0x00000007 /* Trigger select (?)*/
#define AR5K_PHY_TST2_TRIG 0x00000010 /* Trigger (?) */
#define AR5K_PHY_TST2_CBUS_MODE 0x00000060 /* Cardbus mode (?) */
-#define AR5K_PHY_TST2_CLK32 0x00000400 /* CLK_OUT is CLK32 (32Khz external) */
+#define AR5K_PHY_TST2_CLK32 0x00000400 /* CLK_OUT is CLK32 (32kHz external) */
#define AR5K_PHY_TST2_CHANCOR_DUMP_EN 0x00000800 /* Enable Chancor dump (?) */
#define AR5K_PHY_TST2_EVEN_CHANCOR_DUMP 0x00001000 /* Even Chancor dump (?) */
#define AR5K_PHY_TST2_RFSILENT_EN 0x00002000 /* Enable RFSILENT */
@@ -2002,7 +2002,7 @@
#define AR5K_PHY_AGCCTL_OFDM_DIV_DIS 0x00000008 /* Disable antenna diversity on OFDM modes */
#define AR5K_PHY_AGCCTL_NF_EN 0x00008000 /* Enable nf calibration to happen (?) */
#define AR5K_PHY_AGCTL_FLTR_CAL 0x00010000 /* Allow filter calibration (?) */
-#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automaticaly */
+#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automatically */
/*
* PHY noise floor status register (CCA = Clear Channel Assessment)
@@ -2038,7 +2038,7 @@
#define AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_S 24
/* Low thresholds */
-#define AR5K_PHY_WEAK_OFDM_LOW_THR 0x986c
+#define AR5K_PHY_WEAK_OFDM_LOW_THR 0x986c
#define AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN 0x00000001
#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT 0x00003f00
#define AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT_S 8
@@ -2089,7 +2089,7 @@
*
* It's obvious from the code that 0x989c is the buffer register but
* for the other special registers that we write to after sending each
- * packet, i have no idea. So i'll name them BUFFER_CONTROL_X registers
+ * packet, i have no idea. So I'll name them BUFFER_CONTROL_X registers
* for now. It's interesting that they are also used for some other operations.
*/
@@ -2259,12 +2259,13 @@
#define AR5K_PHY_FRAME_CTL_ILLLEN_ERR 0x08000000 /* Illegal length */
#define AR5K_PHY_FRAME_CTL_SERVICE_ERR 0x20000000
#define AR5K_PHY_FRAME_CTL_TXURN_ERR 0x40000000 /* TX underrun */
-#define AR5K_PHY_FRAME_CTL_INI AR5K_PHY_FRAME_CTL_SERVICE_ERR | \
- AR5K_PHY_FRAME_CTL_TXURN_ERR | \
- AR5K_PHY_FRAME_CTL_ILLLEN_ERR | \
- AR5K_PHY_FRAME_CTL_ILLRATE_ERR | \
- AR5K_PHY_FRAME_CTL_PARITY_ERR | \
- AR5K_PHY_FRAME_CTL_TIMING_ERR
+#define AR5K_PHY_FRAME_CTL_INI \
+ (AR5K_PHY_FRAME_CTL_SERVICE_ERR | \
+ AR5K_PHY_FRAME_CTL_TXURN_ERR | \
+ AR5K_PHY_FRAME_CTL_ILLLEN_ERR | \
+ AR5K_PHY_FRAME_CTL_ILLRATE_ERR | \
+ AR5K_PHY_FRAME_CTL_PARITY_ERR | \
+ AR5K_PHY_FRAME_CTL_TIMING_ERR)
/*
* PHY Tx Power adjustment register [5212A+]
@@ -2281,22 +2282,22 @@
#define AR5K_PHY_RADAR 0x9954
#define AR5K_PHY_RADAR_ENABLE 0x00000001
#define AR5K_PHY_RADAR_DISABLE 0x00000000
-#define AR5K_PHY_RADAR_INBANDTHR 0x0000003e /* Inband threshold
+#define AR5K_PHY_RADAR_INBANDTHR 0x0000003e /* Inband threshold
5-bits, units unknown {0..31}
(? MHz ?) */
#define AR5K_PHY_RADAR_INBANDTHR_S 1
-#define AR5K_PHY_RADAR_PRSSI_THR 0x00000fc0 /* Pulse RSSI/SNR threshold
+#define AR5K_PHY_RADAR_PRSSI_THR 0x00000fc0 /* Pulse RSSI/SNR threshold
6-bits, dBm range {0..63}
in dBm units. */
#define AR5K_PHY_RADAR_PRSSI_THR_S 6
-#define AR5K_PHY_RADAR_PHEIGHT_THR 0x0003f000 /* Pulse height threshold
+#define AR5K_PHY_RADAR_PHEIGHT_THR 0x0003f000 /* Pulse height threshold
6-bits, dBm range {0..63}
in dBm units. */
#define AR5K_PHY_RADAR_PHEIGHT_THR_S 12
-#define AR5K_PHY_RADAR_RSSI_THR 0x00fc0000 /* Radar RSSI/SNR threshold.
+#define AR5K_PHY_RADAR_RSSI_THR 0x00fc0000 /* Radar RSSI/SNR threshold.
6-bits, dBm range {0..63}
in dBm units. */
#define AR5K_PHY_RADAR_RSSI_THR_S 18
@@ -2339,7 +2340,7 @@
#define AR5K_PHY_RESTART_DIV_GC_S 18
/*
- * RF Bus access request register (for synth-oly channel switching)
+ * RF Bus access request register (for synth-only channel switching)
*/
#define AR5K_PHY_RFBUS_REQ 0x997C
#define AR5K_PHY_RFBUS_REQ_REQUEST 0x00000001
@@ -2381,7 +2382,7 @@
*/
#define AR5K_BB_GAIN_BASE 0x9b00 /* BaseBand Amplifier Gain table base address */
#define AR5K_BB_GAIN(_n) (AR5K_BB_GAIN_BASE + ((_n) << 2))
-#define AR5K_RF_GAIN_BASE 0x9a00 /* RF Amplrifier Gain table base address */
+#define AR5K_RF_GAIN_BASE 0x9a00 /* RF Amplifier Gain table base address */
#define AR5K_RF_GAIN(_n) (AR5K_RF_GAIN_BASE + ((_n) << 2))
/*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 126a4eab35f..0686c5d8d56 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -25,7 +25,7 @@
#include <asm/unaligned.h>
-#include <linux/pci.h> /* To determine if a card is pci-e */
+#include <linux/pci.h> /* To determine if a card is pci-e */
#include <linux/log2.h>
#include <linux/platform_device.h>
#include "ath5k.h"
@@ -142,10 +142,11 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
/* Set 32MHz USEC counter */
if ((ah->ah_radio == AR5K_RF5112) ||
- (ah->ah_radio == AR5K_RF5413) ||
- (ah->ah_radio == AR5K_RF2316) ||
- (ah->ah_radio == AR5K_RF2317))
- /* Remain on 40MHz clock ? */
+ (ah->ah_radio == AR5K_RF2413) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2316) ||
+ (ah->ah_radio == AR5K_RF2317))
+ /* Remain on 40MHz clock ? */
sclock = 40 - 1;
else
sclock = 32 - 1;
@@ -213,7 +214,7 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
usec_reg = (usec | sclock | txlat | rxlat);
ath5k_hw_reg_write(ah, usec_reg, AR5K_USEC);
- /* On 5112 set tx frane to tx data start delay */
+ /* On 5112 set tx frame to tx data start delay */
if (ah->ah_radio == AR5K_RF5112) {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RF_CTL2,
AR5K_PHY_RF_CTL2_TXF2TXD_START,
@@ -233,7 +234,7 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
- u32 scal, spending;
+ u32 scal, spending, sclock;
/* Only set 32KHz settings if we have an external
* 32KHz crystal present */
@@ -317,6 +318,15 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
/* Set up tsf increment on each cycle */
AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
+
+ if ((ah->ah_radio == AR5K_RF5112) ||
+ (ah->ah_radio == AR5K_RF5413) ||
+ (ah->ah_radio == AR5K_RF2316) ||
+ (ah->ah_radio == AR5K_RF2317))
+ sclock = 40 - 1;
+ else
+ sclock = 32 - 1;
+ AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, sclock);
}
}
@@ -375,20 +385,20 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
{
u32 mask = flags ? flags : ~0U;
- volatile u32 *reg;
+ u32 __iomem *reg;
u32 regval;
u32 val = 0;
/* ah->ah_mac_srev is not available at this point yet */
- if (ah->ah_sc->devid >= AR5K_SREV_AR2315_R6) {
- reg = (u32 *) AR5K_AR2315_RESET;
+ if (ah->devid >= AR5K_SREV_AR2315_R6) {
+ reg = (u32 __iomem *) AR5K_AR2315_RESET;
if (mask & AR5K_RESET_CTL_PCU)
val |= AR5K_AR2315_RESET_WMAC;
if (mask & AR5K_RESET_CTL_BASEBAND)
val |= AR5K_AR2315_RESET_BB_WARM;
} else {
- reg = (u32 *) AR5K_AR5312_RESET;
- if (to_platform_device(ah->ah_sc->dev)->id == 0) {
+ reg = (u32 __iomem *) AR5K_AR5312_RESET;
+ if (to_platform_device(ah->dev)->id == 0) {
if (mask & AR5K_RESET_CTL_PCU)
val |= AR5K_AR5312_RESET_WMAC0;
if (mask & AR5K_RESET_CTL_BASEBAND)
@@ -520,7 +530,7 @@ commit:
*/
int ath5k_hw_on_hold(struct ath5k_hw *ah)
{
- struct pci_dev *pdev = ah->ah_sc->pdev;
+ struct pci_dev *pdev = ah->pdev;
u32 bus_flags;
int ret;
@@ -530,7 +540,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
/* Make sure device is awake */
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
- ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
+ ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
}
@@ -539,7 +549,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
*
* Note: putting PCI core on warm reset on PCI-E cards
* results card to hang and always return 0xffff... so
- * we ingore that flag for PCI-E cards. On PCI cards
+ * we ignore that flag for PCI-E cards. On PCI cards
* this flag gets cleared after 64 PCI clocks.
*/
bus_flags = (pdev && pci_is_pcie(pdev)) ? 0 : AR5K_RESET_CTL_PCI;
@@ -555,14 +565,14 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
}
if (ret) {
- ATH5K_ERR(ah->ah_sc, "failed to put device on warm reset\n");
+ ATH5K_ERR(ah, "failed to put device on warm reset\n");
return -EIO;
}
/* ...wakeup again!*/
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
- ATH5K_ERR(ah->ah_sc, "failed to put device on hold\n");
+ ATH5K_ERR(ah, "failed to put device on hold\n");
return ret;
}
@@ -574,7 +584,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
*/
int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
{
- struct pci_dev *pdev = ah->ah_sc->pdev;
+ struct pci_dev *pdev = ah->pdev;
u32 turbo, mode, clock, bus_flags;
int ret;
@@ -586,7 +596,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
/* Wakeup the device */
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
- ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
+ ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
}
}
@@ -596,7 +606,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
*
* Note: putting PCI core on warm reset on PCI-E cards
* results card to hang and always return 0xffff... so
- * we ingore that flag for PCI-E cards. On PCI cards
+ * we ignore that flag for PCI-E cards. On PCI cards
* this flag gets cleared after 64 PCI clocks.
*/
bus_flags = (pdev && pci_is_pcie(pdev)) ? 0 : AR5K_RESET_CTL_PCI;
@@ -616,18 +626,18 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
}
if (ret) {
- ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip\n");
+ ATH5K_ERR(ah, "failed to reset the MAC Chip\n");
return -EIO;
}
/* ...wakeup again!...*/
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
- ATH5K_ERR(ah->ah_sc, "failed to resume the MAC Chip\n");
+ ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
return ret;
}
- /* ...reset configuration regiter on Wisoc ...
+ /* ...reset configuration register on Wisoc ...
* ...clear reset control register and pull device out of
* warm reset on others */
if (ath5k_get_bus_type(ah) == ATH_AHB)
@@ -636,7 +646,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
ret = ath5k_hw_nic_reset(ah, 0);
if (ret) {
- ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n");
+ ATH5K_ERR(ah, "failed to warm reset the MAC Chip\n");
return -EIO;
}
@@ -677,7 +687,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
else
mode |= AR5K_PHY_MODE_MOD_DYN;
} else {
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"invalid radio modulation mode\n");
return -EINVAL;
}
@@ -693,18 +703,18 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
if (flags & CHANNEL_OFDM)
mode |= AR5K_PHY_MODE_MOD_OFDM;
else {
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"invalid radio modulation mode\n");
return -EINVAL;
}
} else {
- ATH5K_ERR(ah->ah_sc, "invalid radio frequency mode\n");
+ ATH5K_ERR(ah, "invalid radio frequency mode\n");
return -EINVAL;
}
/*XXX: Can bwmode be used with dynamic mode ?
* (I don't think it supports 44MHz) */
- /* On 2425 initvals TURBO_SHORT is not pressent */
+ /* On 2425 initvals TURBO_SHORT is not present */
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
turbo = AR5K_PHY_TURBO_MODE |
(ah->ah_radio == AR5K_RF2425) ? 0 :
@@ -1066,7 +1076,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/* RF Bus grant won't work if we have pending
* frames */
if (ret && fast) {
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"DMA didn't stop, falling back to normal reset\n");
fast = 0;
/* Non fatal, just continue with
@@ -1081,7 +1091,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
case CHANNEL_G:
if (ah->ah_version <= AR5K_AR5211) {
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"G mode not available on 5210/5211");
return -EINVAL;
}
@@ -1091,7 +1101,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
case CHANNEL_B:
if (ah->ah_version < AR5K_AR5211) {
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"B mode not available on 5210");
return -EINVAL;
}
@@ -1100,14 +1110,14 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
break;
case CHANNEL_XR:
if (ah->ah_version == AR5K_AR5211) {
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"XR mode not available on 5211");
return -EINVAL;
}
mode = AR5K_MODE_XR;
break;
default:
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"invalid channel: %d\n", channel->center_freq);
return -EINVAL;
}
@@ -1119,13 +1129,13 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
if (fast) {
ret = ath5k_hw_phy_init(ah, channel, mode, true);
if (ret) {
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"fast chan change failed, falling back to normal reset\n");
/* Non fatal, can happen eg.
* on mode change */
ret = 0;
} else {
- ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"fast chan change successful\n");
return 0;
}
@@ -1258,7 +1268,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
*/
ret = ath5k_hw_phy_init(ah, channel, mode, false);
if (ret) {
- ATH5K_ERR(ah->ah_sc,
+ ATH5K_ERR(ah,
"failed to initialize PHY (%i) !\n", ret);
return ret;
}
@@ -1277,11 +1287,16 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
ath5k_hw_dma_init(ah);
- /* Enable 32KHz clock function for AR5212+ chips
+ /*
+ * Enable 32KHz clock function for AR5212+ chips
* Set clocks to 32KHz operation and use an
* external 32KHz crystal when sleeping if one
- * exists */
- if (ah->ah_version == AR5K_AR5212 &&
+ * exists.
+ * Disabled by default because it is also disabled in
+ * other drivers and it is known to cause stability
+ * issues on some devices
+ */
+ if (ah->ah_use_32khz_clock && ah->ah_version == AR5K_AR5212 &&
op_mode != NL80211_IFTYPE_AP)
ath5k_hw_set_sleep_clock(ah, true);
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index 16b67e84906..5d11c23b429 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -254,7 +254,7 @@ static const struct ath5k_ini_rfbuffer rfb_5111[] = {
/* RFX112 (Derby 1) */
-/* BANK 6 len pos col */
+/* BANK 6 len pos col */
#define AR5K_RF5112_OB_2GHZ { 3, 269, 0 }
#define AR5K_RF5112_DB_2GHZ { 3, 272, 0 }
@@ -495,7 +495,7 @@ static const struct ath5k_ini_rfbuffer rfb_5112a[] = {
/* BANK 2 len pos col */
#define AR5K_RF2413_RF_TURBO { 1, 1, 2 }
-/* BANK 6 len pos col */
+/* BANK 6 len pos col */
#define AR5K_RF2413_OB_2GHZ { 3, 168, 0 }
#define AR5K_RF2413_DB_2GHZ { 3, 165, 0 }
diff --git a/drivers/net/wireless/ath/ath5k/rfgain.h b/drivers/net/wireless/ath/ath5k/rfgain.h
index 1354d8c392c..ebfae052d89 100644
--- a/drivers/net/wireless/ath/ath5k/rfgain.h
+++ b/drivers/net/wireless/ath/ath5k/rfgain.h
@@ -30,7 +30,7 @@ struct ath5k_ini_rfgain {
/* Initial RF Gain settings for RF5111 */
static const struct ath5k_ini_rfgain rfgain_5111[] = {
- /* 5Ghz 2Ghz */
+ /* 5GHz 2GHz */
{ AR5K_RF_GAIN(0), { 0x000001a9, 0x00000000 } },
{ AR5K_RF_GAIN(1), { 0x000001e9, 0x00000040 } },
{ AR5K_RF_GAIN(2), { 0x00000029, 0x00000080 } },
@@ -99,7 +99,7 @@ static const struct ath5k_ini_rfgain rfgain_5111[] = {
/* Initial RF Gain settings for RF5112 */
static const struct ath5k_ini_rfgain rfgain_5112[] = {
- /* 5Ghz 2Ghz */
+ /* 5GHz 2GHz */
{ AR5K_RF_GAIN(0), { 0x00000007, 0x00000007 } },
{ AR5K_RF_GAIN(1), { 0x00000047, 0x00000047 } },
{ AR5K_RF_GAIN(2), { 0x00000087, 0x00000087 } },
@@ -305,7 +305,7 @@ static const struct ath5k_ini_rfgain rfgain_2316[] = {
/* Initial RF Gain settings for RF5413 */
static const struct ath5k_ini_rfgain rfgain_5413[] = {
- /* 5Ghz 2Ghz */
+ /* 5GHz 2GHz */
{ AR5K_RF_GAIN(0), { 0x00000000, 0x00000000 } },
{ AR5K_RF_GAIN(1), { 0x00000040, 0x00000040 } },
{ AR5K_RF_GAIN(2), { 0x00000080, 0x00000080 } },
@@ -452,7 +452,7 @@ static const struct ath5k_ini_rfgain rfgain_2425[] = {
/* Check if our current measurement is inside our
* current variable attenuation window */
-#define AR5K_GAIN_CHECK_ADJUST(_g) \
+#define AR5K_GAIN_CHECK_ADJUST(_g) \
((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
struct ath5k_gain_opt_step {
diff --git a/drivers/net/wireless/ath/ath5k/rfkill.c b/drivers/net/wireless/ath/ath5k/rfkill.c
index 41a877b73fc..945fc9f21e7 100644
--- a/drivers/net/wireless/ath/ath5k/rfkill.c
+++ b/drivers/net/wireless/ath/ath5k/rfkill.c
@@ -36,86 +36,81 @@
#include "base.h"
-static inline void ath5k_rfkill_disable(struct ath5k_softc *sc)
+static inline void ath5k_rfkill_disable(struct ath5k_hw *ah)
{
- ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n",
- sc->rf_kill.gpio, sc->rf_kill.polarity);
- ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio);
- ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, !sc->rf_kill.polarity);
+ ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n",
+ ah->rf_kill.gpio, ah->rf_kill.polarity);
+ ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
+ ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, !ah->rf_kill.polarity);
}
-static inline void ath5k_rfkill_enable(struct ath5k_softc *sc)
+static inline void ath5k_rfkill_enable(struct ath5k_hw *ah)
{
- ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n",
- sc->rf_kill.gpio, sc->rf_kill.polarity);
- ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio);
- ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, sc->rf_kill.polarity);
+ ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n",
+ ah->rf_kill.gpio, ah->rf_kill.polarity);
+ ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
+ ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, ah->rf_kill.polarity);
}
-static inline void ath5k_rfkill_set_intr(struct ath5k_softc *sc, bool enable)
+static inline void ath5k_rfkill_set_intr(struct ath5k_hw *ah, bool enable)
{
- struct ath5k_hw *ah = sc->ah;
u32 curval;
- ath5k_hw_set_gpio_input(ah, sc->rf_kill.gpio);
- curval = ath5k_hw_get_gpio(ah, sc->rf_kill.gpio);
- ath5k_hw_set_gpio_intr(ah, sc->rf_kill.gpio, enable ?
+ ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);
+ curval = ath5k_hw_get_gpio(ah, ah->rf_kill.gpio);
+ ath5k_hw_set_gpio_intr(ah, ah->rf_kill.gpio, enable ?
!!curval : !curval);
}
static bool
-ath5k_is_rfkill_set(struct ath5k_softc *sc)
+ath5k_is_rfkill_set(struct ath5k_hw *ah)
{
/* configuring GPIO for input for some reason disables rfkill */
- /*ath5k_hw_set_gpio_input(sc->ah, sc->rf_kill.gpio);*/
- return ath5k_hw_get_gpio(sc->ah, sc->rf_kill.gpio) ==
- sc->rf_kill.polarity;
+ /*ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);*/
+ return ath5k_hw_get_gpio(ah, ah->rf_kill.gpio) ==
+ ah->rf_kill.polarity;
}
static void
ath5k_tasklet_rfkill_toggle(unsigned long data)
{
- struct ath5k_softc *sc = (void *)data;
+ struct ath5k_hw *ah = (void *)data;
bool blocked;
- blocked = ath5k_is_rfkill_set(sc);
- wiphy_rfkill_set_hw_state(sc->hw->wiphy, blocked);
+ blocked = ath5k_is_rfkill_set(ah);
+ wiphy_rfkill_set_hw_state(ah->hw->wiphy, blocked);
}
void
ath5k_rfkill_hw_start(struct ath5k_hw *ah)
{
- struct ath5k_softc *sc = ah->ah_sc;
-
/* read rfkill GPIO configuration from EEPROM header */
- sc->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
- sc->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
+ ah->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
+ ah->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
- tasklet_init(&sc->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
- (unsigned long)sc);
+ tasklet_init(&ah->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
+ (unsigned long)ah);
- ath5k_rfkill_disable(sc);
+ ath5k_rfkill_disable(ah);
/* enable interrupt for rfkill switch */
if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
- ath5k_rfkill_set_intr(sc, true);
+ ath5k_rfkill_set_intr(ah, true);
}
void
ath5k_rfkill_hw_stop(struct ath5k_hw *ah)
{
- struct ath5k_softc *sc = ah->ah_sc;
-
/* disable interrupt for rfkill switch */
if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
- ath5k_rfkill_set_intr(sc, false);
+ ath5k_rfkill_set_intr(ah, false);
- tasklet_kill(&sc->rf_kill.toggleq);
+ tasklet_kill(&ah->rf_kill.toggleq);
/* enable RFKILL when stopping HW so Wifi LED is turned off */
- ath5k_rfkill_enable(sc);
+ ath5k_rfkill_enable(ah);
}
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
index 929c68cdf8a..0244a36ba95 100644
--- a/drivers/net/wireless/ath/ath5k/sysfs.c
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -10,19 +10,23 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
- return snprintf(buf, PAGE_SIZE, "%d\n", get); \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_hw *ah = hw->priv; \
+ return snprintf(buf, PAGE_SIZE, "%d\n", get); \
} \
\
static ssize_t ath5k_attr_store_##name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
- int val; \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_hw *ah = hw->priv; \
+ int val, ret; \
\
- val = (int)simple_strtoul(buf, NULL, 10); \
- set(sc->ah, val); \
+ ret = kstrtoint(buf, 10, &val); \
+ if (ret < 0) \
+ return ret; \
+ set(ah, val); \
return count; \
} \
static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \
@@ -33,25 +37,26 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
- return snprintf(buf, PAGE_SIZE, "%d\n", get); \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_hw *ah = hw->priv; \
+ return snprintf(buf, PAGE_SIZE, "%d\n", get); \
} \
static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
/*** ANI ***/
-SIMPLE_SHOW_STORE(ani_mode, sc->ani_state.ani_mode, ath5k_ani_init);
-SIMPLE_SHOW_STORE(noise_immunity_level, sc->ani_state.noise_imm_level,
+SIMPLE_SHOW_STORE(ani_mode, ah->ani_state.ani_mode, ath5k_ani_init);
+SIMPLE_SHOW_STORE(noise_immunity_level, ah->ani_state.noise_imm_level,
ath5k_ani_set_noise_immunity_level);
-SIMPLE_SHOW_STORE(spur_level, sc->ani_state.spur_level,
+SIMPLE_SHOW_STORE(spur_level, ah->ani_state.spur_level,
ath5k_ani_set_spur_immunity_level);
-SIMPLE_SHOW_STORE(firstep_level, sc->ani_state.firstep_level,
+SIMPLE_SHOW_STORE(firstep_level, ah->ani_state.firstep_level,
ath5k_ani_set_firstep_level);
-SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, sc->ani_state.ofdm_weak_sig,
+SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, ah->ani_state.ofdm_weak_sig,
ath5k_ani_set_ofdm_weak_signal_detection);
-SIMPLE_SHOW_STORE(cck_weak_signal_detection, sc->ani_state.cck_weak_sig,
+SIMPLE_SHOW_STORE(cck_weak_signal_detection, ah->ani_state.cck_weak_sig,
ath5k_ani_set_cck_weak_signal_detection);
-SIMPLE_SHOW(spur_level_max, sc->ani_state.max_spur_level);
+SIMPLE_SHOW(spur_level_max, ah->ani_state.max_spur_level);
static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev,
struct device_attribute *attr,
@@ -93,14 +98,14 @@ static struct attribute_group ath5k_attribute_group_ani = {
/*** register / unregister ***/
int
-ath5k_sysfs_register(struct ath5k_softc *sc)
+ath5k_sysfs_register(struct ath5k_hw *ah)
{
- struct device *dev = sc->dev;
+ struct device *dev = ah->dev;
int err;
err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani);
if (err) {
- ATH5K_ERR(sc, "failed to create sysfs group\n");
+ ATH5K_ERR(ah, "failed to create sysfs group\n");
return err;
}
@@ -108,9 +113,9 @@ ath5k_sysfs_register(struct ath5k_softc *sc)
}
void
-ath5k_sysfs_unregister(struct ath5k_softc *sc)
+ath5k_sysfs_unregister(struct ath5k_hw *ah)
{
- struct device *dev = sc->dev;
+ struct device *dev = ah->dev;
sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani);
}
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
index 2de68adb624..c741c871f4e 100644
--- a/drivers/net/wireless/ath/ath5k/trace.h
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -12,22 +12,19 @@ static inline void trace_ ## name(proto) {}
struct sk_buff;
-#define PRIV_ENTRY __field(struct ath5k_softc *, priv)
-#define PRIV_ASSIGN __entry->priv = priv
-
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ath5k
TRACE_EVENT(ath5k_rx,
- TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb),
+ TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb),
TP_ARGS(priv, skb),
TP_STRUCT__entry(
- PRIV_ENTRY
+ __field(struct ath5k_hw *, priv)
__field(unsigned long, skbaddr)
__dynamic_array(u8, frame, skb->len)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ __entry->priv = priv;
__entry->skbaddr = (unsigned long) skb;
memcpy(__get_dynamic_array(frame), skb->data, skb->len);
),
@@ -37,20 +34,20 @@ TRACE_EVENT(ath5k_rx,
);
TRACE_EVENT(ath5k_tx,
- TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+ TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
struct ath5k_txq *q),
TP_ARGS(priv, skb, q),
TP_STRUCT__entry(
- PRIV_ENTRY
+ __field(struct ath5k_hw *, priv)
__field(unsigned long, skbaddr)
__field(u8, qnum)
__dynamic_array(u8, frame, skb->len)
),
TP_fast_assign(
- PRIV_ASSIGN;
+ __entry->priv = priv;
__entry->skbaddr = (unsigned long) skb;
__entry->qnum = (u8) q->qnum;
memcpy(__get_dynamic_array(frame), skb->data, skb->len);
@@ -63,13 +60,13 @@ TRACE_EVENT(ath5k_tx,
);
TRACE_EVENT(ath5k_tx_complete,
- TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+ TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
struct ath5k_txq *q, struct ath5k_tx_status *ts),
TP_ARGS(priv, skb, q, ts),
TP_STRUCT__entry(
- PRIV_ENTRY
+ __field(struct ath5k_hw *, priv)
__field(unsigned long, skbaddr)
__field(u8, qnum)
__field(u8, ts_status)
@@ -78,7 +75,7 @@ TRACE_EVENT(ath5k_tx_complete,
),
TP_fast_assign(
- PRIV_ASSIGN;
+ __entry->priv = priv;
__entry->skbaddr = (unsigned long) skb;
__entry->qnum = (u8) q->qnum;
__entry->ts_status = ts->ts_status;
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 5b49cd03bfd..0b36fcf8a28 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -27,6 +27,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
.driver_data = AR5416_AR9100_DEVID,
},
{
+ .name = "ar933x_wmac",
+ .driver_data = AR9300_DEVID_AR9330,
+ },
+ {
.name = "ar934x_wmac",
.driver_data = AR9300_DEVID_AR9340,
},
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 441bb33f17a..fac2c6da6ca 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -627,6 +627,11 @@ static void ar5008_hw_init_bb(struct ath_hw *ah,
else
synthDelay /= 10;
+ if (IS_CHAN_HALF_RATE(chan))
+ synthDelay *= 2;
+ else if (IS_CHAN_QUARTER_RATE(chan))
+ synthDelay *= 4;
+
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
udelay(synthDelay + BASE_ACTIVATE_DELAY);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index f344cc2b3d5..44d9d8d5649 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -309,11 +309,7 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
u8 i;
u32 val;
- if (ah->is_pciexpress != true)
- return;
-
- /* Do not touch SerDes registers */
- if (ah->config.pcie_powersave_enable == 2)
+ if (ah->is_pciexpress != true || ah->aspm_enabled != true)
return;
/* Nothing to do on restore for 11N */
@@ -499,45 +495,6 @@ void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
}
}
-/*
- * If Async FIFO is enabled, the following counters change as MAC now runs
- * at 117 Mhz instead of 88/44MHz when async FIFO is disabled.
- *
- * The values below tested for ht40 2 chain.
- * Overwrite the delay/timeouts initialized in process ini.
- */
-void ar9002_hw_update_async_fifo(struct ath_hw *ah)
-{
- if (AR_SREV_9287_13_OR_LATER(ah)) {
- REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
- AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
- AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
- AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
-
- REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
- REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
-
- REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
- AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
- REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
- AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
- }
-}
-
-/*
- * We don't enable WEP aggregation on mac80211 but we keep this
- * around for HAL unification purposes.
- */
-void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah)
-{
- if (AR_SREV_9287_13_OR_LATER(ah)) {
- REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
- AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
- }
-}
-
/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
void ar9002_hw_attach_ops(struct ath_hw *ah)
{
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 077e8a6983f..45b262fe2c2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -28,11 +28,6 @@ static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
((struct ath_desc*) ds)->ds_link = ds_link;
}
-static void ar9002_hw_get_desc_link(void *ds, u32 **ds_link)
-{
- *ds_link = &((struct ath_desc *)ds)->ds_link;
-}
-
static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
{
u32 isr = 0;
@@ -437,7 +432,6 @@ void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
ops->rx_enable = ar9002_hw_rx_enable;
ops->set_desc_link = ar9002_hw_set_desc_link;
- ops->get_desc_link = ar9002_hw_get_desc_link;
ops->get_isr = ar9002_hw_get_isr;
ops->fill_txdesc = ar9002_hw_fill_txdesc;
ops->proc_txdesc = ar9002_hw_proc_txdesc;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 2fe0a34cbab..3cbbb033fce 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -111,7 +111,9 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
case 0:
- if ((freq % 20) == 0)
+ if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))
+ aModeRefSel = 0;
+ else if ((freq % 20) == 0)
aModeRefSel = 3;
else if ((freq % 10) == 0)
aModeRefSel = 2;
@@ -129,8 +131,9 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
channelSel = CHANSEL_5G(freq);
/* RefDivA setting */
- REG_RMW_FIELD(ah, AR_AN_SYNTH9,
- AR_AN_SYNTH9_REFDIVA, refDivA);
+ ath9k_hw_analog_shift_rmw(ah, AR_AN_SYNTH9,
+ AR_AN_SYNTH9_REFDIVA,
+ AR_AN_SYNTH9_REFDIVA_S, refDivA);
}
@@ -447,26 +450,27 @@ static void ar9002_olc_init(struct ath_hw *ah)
static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah,
struct ath9k_channel *chan)
{
+ int ref_div = 5;
+ int pll_div = 0x2c;
u32 pll;
- pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
+ if (chan && IS_CHAN_5GHZ(chan) && !IS_CHAN_A_FAST_CLOCK(ah, chan)) {
+ if (AR_SREV_9280_20(ah)) {
+ ref_div = 10;
+ pll_div = 0x50;
+ } else {
+ pll_div = 0x28;
+ }
+ }
+
+ pll = SM(ref_div, AR_RTC_9160_PLL_REFDIV);
+ pll |= SM(pll_div, AR_RTC_9160_PLL_DIV);
if (chan && IS_CHAN_HALF_RATE(chan))
pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
else if (chan && IS_CHAN_QUARTER_RATE(chan))
pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
- if (chan && IS_CHAN_5GHZ(chan)) {
- if (IS_CHAN_A_FAST_CLOCK(ah, chan))
- pll = 0x142c;
- else if (AR_SREV_9280_20(ah))
- pll = 0x2850;
- else
- pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
- } else {
- pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
- }
-
return pll;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index e8ac70da5ac..2339728a730 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -653,8 +653,8 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
- {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
@@ -761,7 +761,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a3ec, 0x20202020},
{0x0000a3f0, 0x00000000},
{0x0000a3f4, 0x00000246},
- {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3f8, 0x0c9bd380},
{0x0000a3fc, 0x000f0f01},
{0x0000a400, 0x8fa91f01},
{0x0000a404, 0x00000000},
@@ -780,7 +780,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a43c, 0x00100000},
{0x0000a440, 0x00000000},
{0x0000a444, 0x00000000},
- {0x0000a448, 0x06000080},
+ {0x0000a448, 0x05000080},
{0x0000a44c, 0x00000001},
{0x0000a450, 0x00010000},
{0x0000a458, 0x00000000},
@@ -1500,8 +1500,6 @@ static const u32 ar9300_2p2_mac_core[][2] = {
{0x0000816c, 0x00000000},
{0x000081c0, 0x00000000},
{0x000081c4, 0x33332210},
- {0x000081c8, 0x00000000},
- {0x000081cc, 0x00000000},
{0x000081ec, 0x00000000},
{0x000081f0, 0x00000000},
{0x000081f4, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index ff8150e46f0..c34bef1bf2b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <asm/unaligned.h>
#include "hw.h"
#include "ar9003_phy.h"
#include "ar9003_eeprom.h"
@@ -306,7 +307,7 @@ static const struct ar9300_eeprom ar9300_default = {
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
@@ -883,7 +884,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
@@ -1461,7 +1462,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
@@ -2039,7 +2040,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
@@ -2616,7 +2617,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
- { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
+ { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
{ { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
@@ -3006,11 +3007,11 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
switch (param) {
case EEP_MAC_LSW:
- return eep->macAddr[0] << 8 | eep->macAddr[1];
+ return get_unaligned_be16(eep->macAddr);
case EEP_MAC_MID:
- return eep->macAddr[2] << 8 | eep->macAddr[3];
+ return get_unaligned_be16(eep->macAddr + 2);
case EEP_MAC_MSW:
- return eep->macAddr[4] << 8 | eep->macAddr[5];
+ return get_unaligned_be16(eep->macAddr + 4);
case EEP_REG_0:
return le16_to_cpu(pBase->regDmn[0]);
case EEP_REG_1:
@@ -3038,7 +3039,7 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
case EEP_CHAIN_MASK_REDUCE:
return (pBase->miscConfiguration >> 0x3) & 0x1;
case EEP_ANT_DIV_CTL1:
- return le32_to_cpu(eep->base_ext1.ant_div_control);
+ return eep->base_ext1.ant_div_control;
default:
return 0;
}
@@ -3324,6 +3325,8 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
read = ar9300_read_eeprom;
if (AR_SREV_9485(ah))
cptr = AR9300_BASE_ADDR_4K;
+ else if (AR_SREV_9330(ah))
+ cptr = AR9300_BASE_ADDR_512;
else
cptr = AR9300_BASE_ADDR;
ath_dbg(common, ATH_DBG_EEPROM,
@@ -3378,8 +3381,7 @@ found:
osize = length;
read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
- mchecksum = word[COMP_HDR_LEN + osize] |
- (word[COMP_HDR_LEN + osize + 1] << 8);
+ mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]);
ath_dbg(common, ATH_DBG_EEPROM,
"checksum %x %x\n", checksum, mchecksum);
if (checksum == mchecksum) {
@@ -3442,7 +3444,7 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
{
int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
- if (AR_SREV_9485(ah) || AR_SREV_9340(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
else {
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@@ -3523,7 +3525,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
}
}
- if (AR_SREV_9485(ah)) {
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1);
/*
* main_lnaconf, alt_lnaconf, main_tb, alt_tb
@@ -3710,7 +3712,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
if (internal_regulator) {
- if (AR_SREV_9485(ah)) {
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
int reg_pmu_set;
reg_pmu_set = REG_READ(ah, AR_PHY_PMU2) & ~AR_PHY_PMU2_PGM;
@@ -3718,9 +3720,24 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
return;
- reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) |
- (2 << 14) | (6 << 17) | (1 << 20) |
- (3 << 24) | (1 << 28);
+ if (AR_SREV_9330(ah)) {
+ if (ah->is_clk_25mhz) {
+ reg_pmu_set = (3 << 1) | (8 << 4) |
+ (3 << 8) | (1 << 14) |
+ (6 << 17) | (1 << 20) |
+ (3 << 24);
+ } else {
+ reg_pmu_set = (4 << 1) | (7 << 4) |
+ (3 << 8) | (1 << 14) |
+ (6 << 17) | (1 << 20) |
+ (3 << 24);
+ }
+ } else {
+ reg_pmu_set = (5 << 1) | (7 << 4) |
+ (2 << 8) | (2 << 14) |
+ (6 << 17) | (1 << 20) |
+ (3 << 24) | (1 << 28);
+ }
REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
if (!is_pmu_set(ah, AR_PHY_PMU1, reg_pmu_set))
@@ -3751,7 +3768,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
AR_RTC_REG_CONTROL1_SWREG_PROGRAM);
}
} else {
- if (AR_SREV_9485(ah)) {
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0);
while (REG_READ_FIELD(ah, AR_PHY_PMU2,
AR_PHY_PMU2_PGM))
@@ -3795,9 +3812,9 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
ar9003_hw_drive_strength_apply(ah);
ar9003_hw_atten_apply(ah, chan);
- if (!AR_SREV_9340(ah))
+ if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah))
ar9003_hw_internal_regulator_apply(ah);
- if (AR_SREV_9485(ah) || AR_SREV_9340(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
ar9003_hw_apply_tuning_caps(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 392bf0f8ff1..ad2bb2bf4e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -19,6 +19,8 @@
#include "ar9003_2p2_initvals.h"
#include "ar9485_initvals.h"
#include "ar9340_initvals.h"
+#include "ar9330_1p1_initvals.h"
+#include "ar9330_1p2_initvals.h"
/* General hardware code for the AR9003 hadware family */
@@ -29,7 +31,113 @@
*/
static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
{
- if (AR_SREV_9340(ah)) {
+ if (AR_SREV_9330_11(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9331_1p1_mac_core,
+ ARRAY_SIZE(ar9331_1p1_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9331_1p1_mac_postamble,
+ ARRAY_SIZE(ar9331_1p1_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9331_1p1_baseband_core,
+ ARRAY_SIZE(ar9331_1p1_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9331_1p1_baseband_postamble,
+ ARRAY_SIZE(ar9331_1p1_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9331_1p1_radio_core,
+ ARRAY_SIZE(ar9331_1p1_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST], NULL, 0, 0);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9331_1p1_soc_preamble,
+ ARRAY_SIZE(ar9331_1p1_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9331_1p1_soc_postamble,
+ ARRAY_SIZE(ar9331_1p1_soc_postamble), 2);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_rx_gain_1p1,
+ ARRAY_SIZE(ar9331_common_rx_gain_1p1), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_lowest_ob_db_tx_gain_1p1,
+ ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p1),
+ 5);
+
+ /* additional clock settings */
+ if (ah->is_clk_25mhz)
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9331_1p1_xtal_25M,
+ ARRAY_SIZE(ar9331_1p1_xtal_25M), 2);
+ else
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9331_1p1_xtal_40M,
+ ARRAY_SIZE(ar9331_1p1_xtal_40M), 2);
+ } else if (AR_SREV_9330_12(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar9331_1p2_mac_core,
+ ARRAY_SIZE(ar9331_1p2_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar9331_1p2_mac_postamble,
+ ARRAY_SIZE(ar9331_1p2_mac_postamble), 5);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar9331_1p2_baseband_core,
+ ARRAY_SIZE(ar9331_1p2_baseband_core), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar9331_1p2_baseband_postamble,
+ ARRAY_SIZE(ar9331_1p2_baseband_postamble), 5);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar9331_1p2_radio_core,
+ ARRAY_SIZE(ar9331_1p2_radio_core), 2);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST], NULL, 0, 0);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar9331_1p2_soc_preamble,
+ ARRAY_SIZE(ar9331_1p2_soc_preamble), 2);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar9331_1p2_soc_postamble,
+ ARRAY_SIZE(ar9331_1p2_soc_postamble), 2);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_rx_gain_1p2,
+ ARRAY_SIZE(ar9331_common_rx_gain_1p2), 2);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_lowest_ob_db_tx_gain_1p2,
+ ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p2),
+ 5);
+
+ /* additional clock settings */
+ if (ah->is_clk_25mhz)
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9331_1p2_xtal_25M,
+ ARRAY_SIZE(ar9331_1p2_xtal_25M), 2);
+ else
+ INIT_INI_ARRAY(&ah->iniModesAdditional,
+ ar9331_1p2_xtal_40M,
+ ARRAY_SIZE(ar9331_1p2_xtal_40M), 2);
+ } else if (AR_SREV_9340(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -220,7 +328,17 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
switch (ar9003_hw_get_tx_gain_idx(ah)) {
case 0:
default:
- if (AR_SREV_9340(ah))
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_lowest_ob_db_tx_gain_1p2,
+ ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p2),
+ 5);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_lowest_ob_db_tx_gain_1p1,
+ ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p1),
+ 5);
+ else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
@@ -237,7 +355,17 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
5);
break;
case 1:
- if (AR_SREV_9340(ah))
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_ob_db_tx_gain_1p2,
+ ARRAY_SIZE(ar9331_modes_high_ob_db_tx_gain_1p2),
+ 5);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_ob_db_tx_gain_1p1,
+ ARRAY_SIZE(ar9331_modes_high_ob_db_tx_gain_1p1),
+ 5);
+ else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
@@ -254,7 +382,17 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
5);
break;
case 2:
- if (AR_SREV_9340(ah))
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_low_ob_db_tx_gain_1p2,
+ ARRAY_SIZE(ar9331_modes_low_ob_db_tx_gain_1p2),
+ 5);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_low_ob_db_tx_gain_1p1,
+ ARRAY_SIZE(ar9331_modes_low_ob_db_tx_gain_1p1),
+ 5);
+ else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
@@ -271,7 +409,17 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
5);
break;
case 3:
- if (AR_SREV_9340(ah))
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_power_tx_gain_1p2,
+ ARRAY_SIZE(ar9331_modes_high_power_tx_gain_1p2),
+ 5);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9331_modes_high_power_tx_gain_1p1,
+ ARRAY_SIZE(ar9331_modes_high_power_tx_gain_1p1),
+ 5);
+ else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
@@ -295,7 +443,17 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
switch (ar9003_hw_get_rx_gain_idx(ah)) {
case 0:
default:
- if (AR_SREV_9340(ah))
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_rx_gain_1p2,
+ ARRAY_SIZE(ar9331_common_rx_gain_1p2),
+ 2);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_rx_gain_1p1,
+ ARRAY_SIZE(ar9331_common_rx_gain_1p1),
+ 2);
+ else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9340Common_rx_gain_table_1p0,
ARRAY_SIZE(ar9340Common_rx_gain_table_1p0),
@@ -312,7 +470,17 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
2);
break;
case 1:
- if (AR_SREV_9340(ah))
+ if (AR_SREV_9330_12(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_wo_xlna_rx_gain_1p2,
+ ARRAY_SIZE(ar9331_common_wo_xlna_rx_gain_1p2),
+ 2);
+ else if (AR_SREV_9330_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar9331_common_wo_xlna_rx_gain_1p1,
+ ARRAY_SIZE(ar9331_common_wo_xlna_rx_gain_1p1),
+ 2);
+ else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9340Common_wo_xlna_rx_gain_table_1p0,
ARRAY_SIZE(ar9340Common_wo_xlna_rx_gain_table_1p0),
@@ -351,11 +519,7 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
int restore,
int power_off)
{
- if (ah->is_pciexpress != true)
- return;
-
- /* Do not touch SerDes registers */
- if (ah->config.pcie_powersave_enable == 2)
+ if (ah->is_pciexpress != true || ah->aspm_enabled != true)
return;
/* Nothing to do on restore for 11N */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 10d71f7d3fc..8ff0b88a29b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -43,13 +43,6 @@ static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
}
-static void ar9003_hw_get_desc_link(void *ds, u32 **ds_link)
-{
- struct ar9003_txc *ads = ds;
-
- *ds_link = &ads->link;
-}
-
static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
{
u32 isr = 0;
@@ -236,6 +229,7 @@ static void ar9003_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
struct ath_tx_status *ts)
{
+ struct ar9003_txc *txc = (struct ar9003_txc *) ds;
struct ar9003_txs *ads;
u32 status;
@@ -245,7 +239,11 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
if ((status & AR_TxDone) == 0)
return -EINPROGRESS;
- ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
+ ts->qid = MS(ads->ds_info, AR_TxQcuNum);
+ if (!txc || (MS(txc->info, AR_TxQcuNum) == ts->qid))
+ ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
+ else
+ return -ENOENT;
if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
(MS(ads->ds_info, AR_TxRxDesc) != 1)) {
@@ -261,7 +259,6 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
ts->ts_seqnum = MS(status, AR_SeqNum);
ts->tid = MS(status, AR_TxTid);
- ts->qid = MS(ads->ds_info, AR_TxQcuNum);
ts->desc_id = MS(ads->status1, AR_TxDescId);
ts->ts_tstamp = ads->status4;
ts->ts_status = 0;
@@ -498,7 +495,6 @@ void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
ops->rx_enable = ar9003_hw_rx_enable;
ops->set_desc_link = ar9003_hw_set_desc_link;
- ops->get_desc_link = ar9003_hw_get_desc_link;
ops->get_isr = ar9003_hw_get_isr;
ops->fill_txdesc = ar9003_hw_fill_txdesc;
ops->proc_txdesc = ar9003_hw_proc_txdesc;
@@ -629,8 +625,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (rxsp->status11 & AR_MichaelErr)
rxs->rs_status |= ATH9K_RXERR_MIC;
-
- if (rxsp->status11 & AR_KeyMiss)
+ else if (rxsp->status11 & AR_KeyMiss)
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index e4d6a87ec53..f80d1d63398 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -21,6 +21,36 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val)
{
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath9k_channel *chan = ah->curchan;
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+
+ /*
+ * 3 bits for modalHeader5G.papdRateMaskHt20
+ * is used for sub-band disabling of PAPRD.
+ * 5G band is divided into 3 sub-bands -- upper,
+ * middle, lower.
+ * if bit 30 of modalHeader5G.papdRateMaskHt20 is set
+ * -- disable PAPRD for upper band 5GHz
+ * if bit 29 of modalHeader5G.papdRateMaskHt20 is set
+ * -- disable PAPRD for middle band 5GHz
+ * if bit 28 of modalHeader5G.papdRateMaskHt20 is set
+ * -- disable PAPRD for lower band 5GHz
+ */
+
+ if (IS_CHAN_5GHZ(chan)) {
+ if (chan->channel >= UPPER_5G_SUB_BAND_START) {
+ if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ & BIT(30))
+ val = false;
+ } else if (chan->channel >= MID_5G_SUB_BAND_START) {
+ if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ & BIT(29))
+ val = false;
+ } else {
+ if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20)
+ & BIT(28))
+ val = false;
+ }
+ }
if (val) {
ah->paprd_table_write_done = true;
@@ -46,11 +76,10 @@ EXPORT_SYMBOL(ar9003_paprd_enable);
static int ar9003_get_training_power_2g(struct ath_hw *ah)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- struct ar9300_modal_eep_header *hdr = &eep->modalHeader2G;
+ struct ath9k_channel *chan = ah->curchan;
unsigned int power, scale, delta;
- scale = MS(le32_to_cpu(hdr->papdRateMaskHt20), AR9300_PAPRD_SCALE_1);
+ scale = ar9003_get_paprd_scale_factor(ah, chan);
power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
@@ -67,20 +96,10 @@ static int ar9003_get_training_power_2g(struct ath_hw *ah)
static int ar9003_get_training_power_5g(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- struct ar9300_modal_eep_header *hdr = &eep->modalHeader5G;
struct ath9k_channel *chan = ah->curchan;
unsigned int power, scale, delta;
- if (chan->channel >= 5700)
- scale = MS(le32_to_cpu(hdr->papdRateMaskHt20),
- AR9300_PAPRD_SCALE_1);
- else if (chan->channel >= 5400)
- scale = MS(le32_to_cpu(hdr->papdRateMaskHt40),
- AR9300_PAPRD_SCALE_2);
- else
- scale = MS(le32_to_cpu(hdr->papdRateMaskHt40),
- AR9300_PAPRD_SCALE_1);
+ scale = ar9003_get_paprd_scale_factor(ah, chan);
if (IS_CHAN_HT40(chan))
power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE8,
@@ -94,7 +113,23 @@ static int ar9003_get_training_power_5g(struct ath_hw *ah)
if (delta > scale)
return -1;
- power += 2 * get_streams(common->tx_chainmask);
+ switch (get_streams(common->tx_chainmask)) {
+ case 1:
+ delta = 6;
+ break;
+ case 2:
+ delta = 4;
+ break;
+ case 3:
+ delta = 2;
+ break;
+ default:
+ delta = 0;
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Invalid tx-chainmask: %u\n", common->tx_chainmask);
+ }
+
+ power += delta;
return power;
}
@@ -119,15 +154,16 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
else
training_power = ar9003_get_training_power_5g(ah);
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "Training power: %d, Target power: %d\n",
+ training_power, ah->paprd_target_power);
+
if (training_power < 0) {
ath_dbg(common, ATH_DBG_CALIBRATE,
"PAPRD target power delta out of range");
return -ERANGE;
}
ah->paprd_training_power = training_power;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Training power: %d, Target power: %d\n",
- ah->paprd_training_power, ah->paprd_target_power);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK,
ah->paprd_ratemask);
@@ -230,7 +266,7 @@ static void ar9003_paprd_get_gain_table(struct ath_hw *ah)
memset(entry, 0, sizeof(ah->paprd_gain_table_entries));
memset(index, 0, sizeof(ah->paprd_gain_table_index));
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < PAPRD_GAIN_TABLE_ENTRIES; i++) {
entry[i] = REG_READ(ah, reg);
index[i] = (entry[i] >> 24) & 0xff;
reg += 4;
@@ -240,13 +276,13 @@ static void ar9003_paprd_get_gain_table(struct ath_hw *ah)
static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
int target_power)
{
- int olpc_gain_delta = 0;
+ int olpc_gain_delta = 0, cl_gain_mod;
int alpha_therm, alpha_volt;
int therm_cal_value, volt_cal_value;
int therm_value, volt_value;
int thermal_gain_corr, voltage_gain_corr;
int desired_scale, desired_gain = 0;
- u32 reg;
+ u32 reg_olpc = 0, reg_cl_gain = 0;
REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
@@ -265,15 +301,29 @@ static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
volt_value = REG_READ_FIELD(ah, AR_PHY_BB_THERM_ADC_4,
AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE);
- if (chain == 0)
- reg = AR_PHY_TPC_11_B0;
- else if (chain == 1)
- reg = AR_PHY_TPC_11_B1;
- else
- reg = AR_PHY_TPC_11_B2;
+ switch (chain) {
+ case 0:
+ reg_olpc = AR_PHY_TPC_11_B0;
+ reg_cl_gain = AR_PHY_CL_TAB_0;
+ break;
+ case 1:
+ reg_olpc = AR_PHY_TPC_11_B1;
+ reg_cl_gain = AR_PHY_CL_TAB_1;
+ break;
+ case 2:
+ reg_olpc = AR_PHY_TPC_11_B2;
+ reg_cl_gain = AR_PHY_CL_TAB_2;
+ break;
+ default:
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "Invalid chainmask: %d\n", chain);
+ break;
+ }
- olpc_gain_delta = REG_READ_FIELD(ah, reg,
+ olpc_gain_delta = REG_READ_FIELD(ah, reg_olpc,
AR_PHY_TPC_11_OLPC_GAIN_DELTA);
+ cl_gain_mod = REG_READ_FIELD(ah, reg_cl_gain,
+ AR_PHY_CL_TAB_CL_GAIN_MOD);
if (olpc_gain_delta >= 128)
olpc_gain_delta = olpc_gain_delta - 256;
@@ -283,7 +333,7 @@ static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
voltage_gain_corr = (alpha_volt * (volt_value - volt_cal_value) +
(128 / 2)) / 128;
desired_gain = target_power - olpc_gain_delta - thermal_gain_corr -
- voltage_gain_corr + desired_scale;
+ voltage_gain_corr + desired_scale + cl_gain_mod;
return desired_gain;
}
@@ -721,7 +771,7 @@ int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
desired_gain = ar9003_get_desired_gain(ah, chain, train_power);
gain_index = 0;
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < PAPRD_GAIN_TABLE_ENTRIES; i++) {
if (ah->paprd_gain_table_index[i] >= desired_gain)
break;
gain_index++;
@@ -795,7 +845,26 @@ EXPORT_SYMBOL(ar9003_paprd_init_table);
bool ar9003_paprd_is_done(struct ath_hw *ah)
{
- return !!REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ int paprd_done, agc2_pwr;
+ paprd_done = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
+
+ if (paprd_done == 0x1) {
+ agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR);
+
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ "AGC2_PWR = 0x%x training done = 0x%x\n",
+ agc2_pwr, paprd_done);
+ /*
+ * agc2_pwr range should not be less than 'IDEAL_AGC2_PWR_CHANGE'
+ * when the training is completely done, otherwise retraining is
+ * done to make sure the value is in ideal range
+ */
+ if (agc2_pwr <= PAPRD_IDEAL_AGC2_PWR_RANGE)
+ paprd_done = 0;
+ }
+
+ return !!paprd_done;
}
EXPORT_SYMBOL(ar9003_paprd_is_done);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 892c48b1543..1baca8e4715 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -75,7 +75,19 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
freq = centers.synth_center;
if (freq < 4800) { /* 2 GHz, fractional mode */
- if (AR_SREV_9485(ah)) {
+ if (AR_SREV_9330(ah)) {
+ u32 chan_frac;
+ u32 div;
+
+ if (ah->is_clk_25mhz)
+ div = 75;
+ else
+ div = 120;
+
+ channelSel = (freq * 4) / div;
+ chan_frac = (((freq * 4) % div) * 0x20000) / div;
+ channelSel = (channelSel << 17) | chan_frac;
+ } else if (AR_SREV_9485(ah)) {
u32 chan_frac;
/*
@@ -104,7 +116,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
u32 chan_frac;
channelSel = (freq * 2) / 75;
- chan_frac = ((freq % 75) * 0x20000) / 75;
+ chan_frac = (((freq * 2) % 75) * 0x20000) / 75;
channelSel = (channelSel << 17) | chan_frac;
} else {
channelSel = CHANSEL_5G(freq);
@@ -168,7 +180,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
* is out-of-band and can be ignored.
*/
- if (AR_SREV_9485(ah) || AR_SREV_9340(ah)) {
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) {
spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah,
IS_CHAN_2GHZ(chan));
if (spur_fbin_ptr[0] == 0) /* No spur */
@@ -193,7 +205,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
for (i = 0; i < max_spur_cnts; i++) {
negative = 0;
- if (AR_SREV_9485(ah) || AR_SREV_9340(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i],
IS_CHAN_2GHZ(chan)) - synth_freq;
else
@@ -659,6 +671,9 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
REG_WRITE_ARRAY(&ah->iniModesAdditional,
modesIndex, regWrites);
+ if (AR_SREV_9300(ah))
+ REG_WRITE_ARRAY(&ah->iniModesAdditional, 1, regWrites);
+
if (AR_SREV_9340(ah) && !ah->is_clk_25mhz)
REG_WRITE_ARRAY(&ah->iniModesAdditional_40M, 1, regWrites);
@@ -1074,7 +1089,10 @@ static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
{
ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ;
ah->nf_2g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ;
- ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9300_2GHZ;
+ if (AR_SREV_9330(ah))
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9330_2GHZ;
+ else
+ ah->nf_2g.nominal = AR_PHY_CCA_NOM_VAL_9300_2GHZ;
ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ;
ah->nf_5g.min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ;
ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_9300_5GHZ;
@@ -1196,8 +1214,17 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
AR_PHY_9485_ANT_DIV_ALT_LNACONF_S;
antconf->fast_div_bias = (regval & AR_PHY_9485_ANT_FAST_DIV_BIAS) >>
AR_PHY_9485_ANT_FAST_DIV_BIAS_S;
- antconf->lna1_lna2_delta = -9;
- antconf->div_group = 2;
+
+ if (AR_SREV_9330_11(ah)) {
+ antconf->lna1_lna2_delta = -9;
+ antconf->div_group = 1;
+ } else if (AR_SREV_9485(ah)) {
+ antconf->lna1_lna2_delta = -9;
+ antconf->div_group = 2;
+ } else {
+ antconf->lna1_lna2_delta = -3;
+ antconf->div_group = 0;
+ }
}
static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 443090d278e..5c590429f12 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -332,6 +332,8 @@
#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -95
#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -100
+#define AR_PHY_CCA_NOM_VAL_9330_2GHZ -118
+
/*
* AGC Field Definitions
*/
@@ -623,11 +625,11 @@
#define AR_PHY_65NM_CH2_RXTX1 0x16900
#define AR_PHY_65NM_CH2_RXTX2 0x16904
-#define AR_CH0_TOP2 (AR_SREV_9485(ah) ? 0x00016284 : 0x0001628c)
+#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : 0x16284)
#define AR_CH0_TOP2_XPABIASLVL 0xf000
#define AR_CH0_TOP2_XPABIASLVL_S 12
-#define AR_CH0_XTAL (AR_SREV_9485(ah) ? 0x16290 : 0x16294)
+#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : 0x16290)
#define AR_CH0_XTAL_CAPINDAC 0x7f000000
#define AR_CH0_XTAL_CAPINDAC_S 24
#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
@@ -848,7 +850,7 @@
#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
-#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM_BASE + 0x450 + ((_i) << 2))
+#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2))
/*
* Channel 2 Register Map
@@ -1119,6 +1121,9 @@
#define AR_PHY_POWERTX_RATE8_POWERTXHT40_5 0x3F00
#define AR_PHY_POWERTX_RATE8_POWERTXHT40_5_S 8
+#define AR_PHY_CL_TAB_CL_GAIN_MOD 0x1f
+#define AR_PHY_CL_TAB_CL_GAIN_MOD_S 0
+
void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
new file mode 100644
index 00000000000..f11d9b2677f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -0,0 +1,1147 @@
+/*
+ * Copyright (c) 2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9330_1P1_H
+#define INITVALS_9330_1P1_H
+
+static const u32 ar9331_1p1_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
+ {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x00000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
+ {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
+ {0x0000a2e4, 0xfffff000, 0xfffff000, 0xfffff000, 0xfffff000},
+ {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffe0000, 0xfffe0000},
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d0, 0x000050d0},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2d000a20, 0x2d000a20},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000a22, 0x31000a22},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000a24, 0x35000a24},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000a43, 0x38000a43},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3b000e42, 0x3b000e42},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x3f000e44, 0x3f000e44},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x42000e64, 0x42000e64},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46000e66, 0x46000e66},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802},
+ {0x0000a624, 0x03010a03, 0x03010a03, 0x03010a03, 0x03010a03},
+ {0x0000a628, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a62c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a630, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a634, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a638, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a63c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x00016044, 0x034922db, 0x034922db, 0x034922db, 0x034922db},
+ {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
+};
+
+static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52},
+ {0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84},
+ {0x0000a2e4, 0xff43e000, 0xff43e000, 0xff43e000, 0xff43e000},
+ {0x0000a2e8, 0xfffc0000, 0xfffc0000, 0xfffc0000, 0xfffc0000},
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x3d001620, 0x3d001620},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x3f001621, 0x3f001621},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x42001640, 0x42001640},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x44001641, 0x44001641},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x46001642, 0x46001642},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49001644, 0x49001644},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4c001a81, 0x4c001a81},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4f001a83, 0x4f001a83},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x52001c84, 0x52001c84},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001ce3, 0x55001ce3},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x59001ce5, 0x59001ce5},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5d001ce9, 0x5d001ce9},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x64001eec, 0x64001eec},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x64001eec, 0x64001eec},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x64001eec, 0x64001eec},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802},
+ {0x0000a624, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03},
+ {0x0000a628, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a62c, 0x04015005, 0x04015005, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04015005, 0x04015005, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04015005, 0x04015005, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04015005, 0x04015005, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04015005, 0x04015005, 0x04015005, 0x04015005},
+};
+
+static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
+ {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
+ {0x0000a2e4, 0xfffff000, 0xfffff000, 0xfffff000, 0xfffff000},
+ {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffe0000, 0xfffe0000},
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d0, 0x000050d0},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2d000a20, 0x2d000a20},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000a22, 0x31000a22},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000a24, 0x35000a24},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000a43, 0x38000a43},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3b000e42, 0x3b000e42},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x3f000e44, 0x3f000e44},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x42000e64, 0x42000e64},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46000e66, 0x46000e66},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802},
+ {0x0000a624, 0x03010a03, 0x03010a03, 0x03010a03, 0x03010a03},
+ {0x0000a628, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a62c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a630, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a634, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a638, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a63c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x00016044, 0x034922db, 0x034922db, 0x034922db, 0x034922db},
+ {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
+};
+
+static const u32 ar9331_1p1_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9331_1p1_xtal_25M[][2] = {
+ /* Addr allmodes */
+ {0x00007038, 0x000002f8},
+ {0x00008244, 0x0010f3d7},
+ {0x0000824c, 0x0001e7ae},
+ {0x0001609c, 0x0f508f29},
+};
+
+static const u32 ar9331_1p1_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73800000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x00016044, 0x03db62db},
+ {0x00016048, 0x6c924268},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x4db6db8c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f081c},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd411eb84},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160ac, 0x24651800},
+ {0x000160b0, 0x03284f3e},
+ {0x000160b4, 0x92480040},
+ {0x000160c0, 0x006db6db},
+ {0x000160c4, 0x0186db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6de6c300},
+ {0x000160d0, 0x14500820},
+ {0x00016100, 0x04cb0001},
+ {0x00016104, 0xfff80015},
+ {0x00016108, 0x00080010},
+ {0x0001610c, 0x00170000},
+ {0x00016140, 0x10804000},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x01000015},
+ {0x00016284, 0x14d20000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016290, 0x4b96210f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+};
+
+static const u32 ar9331_1p1_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
+};
+
+static const u32 ar9331_common_wo_xlna_rx_gain_1p1[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9331_1p1_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x9927b515},
+ {0x00009e28, 0x12ef0200},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+};
+
+static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
+ {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
+ {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
+ {0x0000a2e4, 0xfffff000, 0xfffff000, 0xfffff000, 0xfffff000},
+ {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffe0000, 0xfffe0000},
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d0, 0x000050d0},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x2d000a20, 0x2d000a20},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000a22, 0x31000a22},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000a24, 0x35000a24},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000a43, 0x38000a43},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3b000e42, 0x3b000e42},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x3f000e44, 0x3f000e44},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x42000e64, 0x42000e64},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46000e66, 0x46000e66},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x4a000ea6, 0x4a000ea6},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802},
+ {0x0000a624, 0x03010a03, 0x03010a03, 0x03010a03, 0x03010a03},
+ {0x0000a628, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a62c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a630, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a634, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a638, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x0000a63c, 0x03010c04, 0x03010c04, 0x03010c04, 0x03010c04},
+ {0x00016044, 0x034922db, 0x034922db, 0x034922db, 0x034922db},
+ {0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
+};
+
+static const u32 ar9331_1p1_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9331_1p1_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000002f8},
+};
+
+static const u32 ar9331_1p1_xtal_40M[][2] = {
+ /* Addr allmodes */
+ {0x00007038, 0x000004c2},
+ {0x00008244, 0x0010f400},
+ {0x0000824c, 0x0001e800},
+ {0x0001609c, 0x0b283f31},
+};
+
+static const u32 ar9331_1p1_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008248, 0x00000800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9331_common_rx_gain_1p1[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+};
+
+static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
+ {0},
+ {3},
+ {0},
+ {0},
+};
+
+static const u32 ar9331_1p1_chansel_xtal_25M[] = {
+ 0x0101479e,
+ 0x0101d027,
+ 0x010258af,
+ 0x0102e138,
+ 0x010369c0,
+ 0x0103f249,
+ 0x01047ad1,
+ 0x0105035a,
+ 0x01058be2,
+ 0x0106146b,
+ 0x01069cf3,
+ 0x0107257c,
+ 0x0107ae04,
+ 0x0108f5b2,
+};
+
+static const u32 ar9331_1p1_chansel_xtal_40M[] = {
+ 0x00a0ccbe,
+ 0x00a12213,
+ 0x00a17769,
+ 0x00a1ccbe,
+ 0x00a22213,
+ 0x00a27769,
+ 0x00a2ccbe,
+ 0x00a32213,
+ 0x00a37769,
+ 0x00a3ccbe,
+ 0x00a42213,
+ 0x00a47769,
+ 0x00a4ccbe,
+ 0x00a5998b,
+};
+
+#endif /* INITVALS_9330_1P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
new file mode 100644
index 00000000000..0e6ca0834b3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -0,0 +1,1080 @@
+/*
+ * Copyright (c) 2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9330_1P2_H
+#define INITVALS_9330_1P2_H
+
+static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
+ {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+};
+
+static const u32 ar9331_1p2_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
+ {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
+ {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x00000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
+ {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+};
+
+static const u32 ar9331_modes_low_ob_db_tx_gain_1p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
+ {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+};
+
+static const u32 ar9331_1p2_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar9331_1p2_xtal_25M[][2] = {
+ /* Addr allmodes */
+ {0x00007038, 0x000002f8},
+ {0x00008244, 0x0010f3d7},
+ {0x0000824c, 0x0001e7ae},
+ {0x0001609c, 0x0f508f29},
+};
+
+static const u32 ar9331_1p2_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73800000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x00016044, 0x03d6d2db},
+ {0x00016048, 0x6c924268},
+ {0x0001604c, 0x000f0278},
+ {0x00016050, 0x4db6db8c},
+ {0x00016054, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f081c},
+ {0x00016090, 0x24926490},
+ {0x00016098, 0xd411eb84},
+ {0x000160a0, 0xc2108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160ac, 0x24651800},
+ {0x000160b0, 0x03284f3e},
+ {0x000160b4, 0x92480040},
+ {0x000160c0, 0x006db6db},
+ {0x000160c4, 0x0186db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x6de6c300},
+ {0x000160d0, 0x14500820},
+ {0x00016100, 0x04cb0001},
+ {0x00016104, 0xfff80015},
+ {0x00016108, 0x00080010},
+ {0x0001610c, 0x00170000},
+ {0x00016140, 0x10804000},
+ {0x00016144, 0x01884080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x01000015},
+ {0x00016284, 0x14d20000},
+ {0x00016288, 0x00318000},
+ {0x0001628c, 0x50000000},
+ {0x00016290, 0x4b96210f},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00800700},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+};
+
+static const u32 ar9331_1p2_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
+};
+
+static const u32 ar9331_common_wo_xlna_rx_gain_1p2[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+static const u32 ar9331_1p2_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x9927b515},
+ {0x00009e28, 0x12ef0200},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x803e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a640, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000001},
+};
+
+static const u32 ar9331_modes_high_power_tx_gain_1p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
+ {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+ {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+ {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+ {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+ {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+ {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
+ {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
+ {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
+ {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
+ {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
+ {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
+ {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
+ {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
+ {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
+ {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
+ {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
+ {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
+ {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
+ {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
+ {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
+ {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
+ {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
+ {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
+ {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+ {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+ {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+ {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
+ {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
+ {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
+ {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
+ {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
+ {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
+ {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
+ {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
+ {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
+ {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
+ {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
+ {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
+ {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
+ {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
+ {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
+ {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
+ {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
+ {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
+ {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
+ {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
+ {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
+ {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
+ {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
+ {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
+ {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+ {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
+};
+
+static const u32 ar9331_1p2_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9331_1p2_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000002f8},
+};
+
+static const u32 ar9331_1p2_xtal_40M[][2] = {
+ /* Addr allmodes */
+ {0x00007038, 0x000004c2},
+ {0x00008244, 0x0010f400},
+ {0x0000824c, 0x0001e800},
+ {0x0001609c, 0x0b283f31},
+};
+
+static const u32 ar9331_1p2_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c20},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081c8, 0x00000000},
+ {0x000081cc, 0x00000000},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008248, 0x00000800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9331_common_rx_gain_1p2[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x01800082},
+ {0x0000a014, 0x01820181},
+ {0x0000a018, 0x01840183},
+ {0x0000a01c, 0x01880185},
+ {0x0000a020, 0x018a0189},
+ {0x0000a024, 0x02850284},
+ {0x0000a028, 0x02890288},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x21212128},
+ {0x0000a098, 0x171c1c1c},
+ {0x0000a09c, 0x02020212},
+ {0x0000a0a0, 0x00000202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x111f1100},
+ {0x0000a0c8, 0x111d111e},
+ {0x0000a0cc, 0x111b111c},
+ {0x0000a0d0, 0x22032204},
+ {0x0000a0d4, 0x22012202},
+ {0x0000a0d8, 0x221f2200},
+ {0x0000a0dc, 0x221d221e},
+ {0x0000a0e0, 0x33013302},
+ {0x0000a0e4, 0x331f3300},
+ {0x0000a0e8, 0x4402331e},
+ {0x0000a0ec, 0x44004401},
+ {0x0000a0f0, 0x441e441f},
+ {0x0000a0f4, 0x55015502},
+ {0x0000a0f8, 0x551f5500},
+ {0x0000a0fc, 0x6602551e},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
+};
+
+#endif /* INITVALS_9330_1P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index f75068b4b31..46393f90f16 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -19,6 +19,7 @@
#include <linux/etherdevice.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
#include <linux/leds.h>
#include <linux/completion.h>
@@ -54,8 +55,6 @@ struct ath_node;
(_l) &= ((_sz) - 1); \
} while (0)
-#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
-
#define TSF_TO_TU(_h,_l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
@@ -102,6 +101,11 @@ enum buffer_type {
#define ATH_TXSTATUS_RING_SIZE 64
+#define DS2PHYS(_dd, _ds) \
+ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
+#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
+#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
+
struct ath_descdma {
void *dd_desc;
dma_addr_t dd_desc_paddr;
@@ -179,7 +183,7 @@ enum ATH_AGGR_STATUS {
struct ath_txq {
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
u32 axq_qnum; /* ath9k hardware queue number */
- u32 *axq_link;
+ void *axq_link;
struct list_head axq_q;
spinlock_t axq_lock;
u32 axq_depth;
@@ -188,7 +192,6 @@ struct ath_txq {
bool axq_tx_inprogress;
struct list_head axq_acq;
struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
- struct list_head txq_fifo_pending;
u8 txq_headidx;
u8 txq_tailidx;
int pending_frames;
@@ -428,6 +431,7 @@ void ath_hw_check(struct work_struct *work);
void ath_hw_pll_work(struct work_struct *work);
void ath_paprd_calibrate(struct work_struct *work);
void ath_ani_calibrate(unsigned long data);
+void ath_start_ani(struct ath_common *common);
/**********/
/* BTCOEX */
@@ -579,7 +583,7 @@ struct ath9k_vif_iter_data {
int naps; /* number of AP vifs */
int nmeshes; /* number of mesh vifs */
int nstations; /* number of station vifs */
- int nwds; /* number of nwd vifs */
+ int nwds; /* number of WDS vifs */
int nadhocs; /* number of adhoc vifs */
int nothers; /* number of vifs not specified above. */
};
@@ -669,12 +673,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops);
void ath9k_deinit_device(struct ath_softc *sc);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
-int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *hchan);
-void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
-bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
bool ath9k_uses_beacons(int type);
#ifdef CONFIG_ATH9K_PCI
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index d4d8ceced89..0d13ff74a68 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/dma-mapping.h>
#include "ath9k.h"
#define FUDGE 2
@@ -360,6 +361,7 @@ void ath_beacon_tasklet(unsigned long data)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf = NULL;
struct ieee80211_vif *vif;
+ struct ath_tx_status ts;
int slot;
u32 bfaddr, bc = 0;
@@ -384,7 +386,9 @@ void ath_beacon_tasklet(unsigned long data)
ath_dbg(common, ATH_DBG_BSTUCK,
"beacon is officially stuck\n");
sc->sc_flags |= SC_OP_TSF_RESET;
+ spin_lock(&sc->sc_pcu_lock);
ath_reset(sc, true);
+ spin_unlock(&sc->sc_pcu_lock);
}
return;
@@ -464,6 +468,11 @@ void ath_beacon_tasklet(unsigned long data)
ath9k_hw_txstart(ah, sc->beacon.beaconq);
sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ spin_lock_bh(&sc->sc_pcu_lock);
+ ath9k_hw_txprocdesc(ah, bf->bf_desc, (void *)&ts);
+ spin_unlock_bh(&sc->sc_pcu_lock);
+ }
}
}
@@ -496,7 +505,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
u32 nexttbtt, intval;
/* NB: the beacon interval is kept internally in TU's */
- intval = TU_TO_USEC(conf->beacon_interval & ATH9K_BEACON_PERIOD);
+ intval = TU_TO_USEC(conf->beacon_interval);
intval /= ATH_BCBUF; /* for staggered beacons */
nexttbtt = intval;
@@ -543,7 +552,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
}
memset(&bs, 0, sizeof(bs));
- intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ intval = conf->beacon_interval;
/*
* Setup dtim and cfp parameters according to
@@ -652,22 +661,13 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- u32 tsf, delta, intval, nexttbtt;
+ u32 tsf, intval, nexttbtt;
ath9k_reset_beacon_status(sc);
- tsf = ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE);
- intval = TU_TO_USEC(conf->beacon_interval & ATH9K_BEACON_PERIOD);
-
- if (!sc->beacon.bc_tstamp)
- nexttbtt = tsf + intval;
- else {
- if (tsf > sc->beacon.bc_tstamp)
- delta = (tsf - sc->beacon.bc_tstamp);
- else
- delta = (tsf + 1 + (~0U - sc->beacon.bc_tstamp));
- nexttbtt = tsf + intval - (delta % intval);
- }
+ intval = TU_TO_USEC(conf->beacon_interval);
+ tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval);
+ nexttbtt = tsf + intval;
ath_dbg(common, ATH_DBG_BEACON,
"IBSS nexttbtt %u intval %u (%u)\n",
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 41ce0b13988..6635c377dc0 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -50,7 +50,7 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
.bt_first_slot_time = 5,
.bt_hold_rx_clear = true,
};
- u32 i;
+ u32 i, idx;
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
if (AR_SREV_9300_20_OR_LATER(ah))
@@ -73,8 +73,10 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
AR_BT_DISABLE_BT_ANT;
- for (i = 0; i < 32; i++)
- ah->hw_gen_timers.gen_timer_index[(debruijn32 << i) >> 27] = i;
+ for (i = 0; i < 32; i++) {
+ idx = (debruijn32 << i) >> 27;
+ ah->hw_gen_timers.gen_timer_index[idx] = i;
+ }
}
EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index d55ffd7d4bd..d1eb89611ff 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -176,6 +176,56 @@ static const struct file_operations fops_rx_chainmask = {
.llseek = default_llseek,
};
+static ssize_t read_file_disable_ani(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "%d\n", common->disable_ani);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_disable_ani(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long disable_ani;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (strict_strtoul(buf, 0, &disable_ani))
+ return -EINVAL;
+
+ common->disable_ani = !!disable_ani;
+
+ if (disable_ani) {
+ sc->sc_flags &= ~SC_OP_ANI_RUN;
+ del_timer_sync(&common->ani.timer);
+ } else {
+ sc->sc_flags |= SC_OP_ANI_RUN;
+ ath_start_ani(common);
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_disable_ani = {
+ .read = read_file_disable_ani,
+ .write = write_file_disable_ani,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
static ssize_t read_file_dma(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -550,6 +600,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
PR("MPDUs Queued: ", queued);
PR("MPDUs Completed: ", completed);
+ PR("MPDUs XRetried: ", xretries);
PR("Aggregates: ", a_aggr);
PR("AMPDUs Queued HW:", a_queued_hw);
PR("AMPDUs Queued SW:", a_queued_sw);
@@ -587,7 +638,6 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
PRQLE("axq_q empty: ", axq_q);
PRQLE("axq_acq empty: ", axq_acq);
- PRQLE("txq_fifo_pending: ", txq_fifo_pending);
for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
PRQLE(tmp, txq_fifo[i]);
@@ -699,7 +749,6 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
char *buf;
unsigned int len = 0, size = 8000;
ssize_t retval = 0;
- const char *tmp;
unsigned int reg;
struct ath9k_vif_iter_data iter_data;
@@ -709,31 +758,14 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
if (buf == NULL)
return -ENOMEM;
- switch (sc->sc_ah->opmode) {
- case NL80211_IFTYPE_ADHOC:
- tmp = "ADHOC";
- break;
- case NL80211_IFTYPE_MESH_POINT:
- tmp = "MESH";
- break;
- case NL80211_IFTYPE_AP:
- tmp = "AP";
- break;
- case NL80211_IFTYPE_STATION:
- tmp = "STATION";
- break;
- default:
- tmp = "???";
- break;
- }
-
ath9k_ps_wakeup(sc);
len += snprintf(buf + len, size - len,
"curbssid: %pM\n"
"OP-Mode: %s(%i)\n"
"Beacon-Timer-Register: 0x%x\n",
common->curbssid,
- tmp, (int)(sc->sc_ah->opmode),
+ ath_opmode_to_string(sc->sc_ah->opmode),
+ (int)(sc->sc_ah->opmode),
REG_READ(ah, AR_BEACON_PERIOD));
reg = REG_READ(ah, AR_TIMER_MODE);
@@ -807,7 +839,10 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
else
TX_STAT_INC(qnum, a_completed);
} else {
- TX_STAT_INC(qnum, completed);
+ if (bf_isxretried(bf))
+ TX_STAT_INC(qnum, xretries);
+ else
+ TX_STAT_INC(qnum, completed);
}
if (ts->ts_status & ATH9K_TXERR_FIFO)
@@ -1160,6 +1195,8 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, sc, &fops_rx_chainmask);
debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_tx_chainmask);
+ debugfs_create_file("disable_ani", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_disable_ani);
debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
sc, &fops_regidx);
debugfs_create_file("regval", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 8ce6ad80f4e..4a04510e111 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -116,6 +116,7 @@ struct ath_tx_stats {
u32 tx_bytes_all;
u32 queued;
u32 completed;
+ u32 xretries;
u32 a_aggr;
u32 a_queued_hw;
u32 a_queued_sw;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 5b1e894f3d6..47cc95086e6 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <asm/unaligned.h>
#include "hw.h"
#include "ar9002_phy.h"
@@ -203,11 +204,11 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
case EEP_NFTHRESH_2:
return pModal->noiseFloorThreshCh[0];
case EEP_MAC_LSW:
- return pBase->macAddr[0] << 8 | pBase->macAddr[1];
+ return get_unaligned_be16(pBase->macAddr);
case EEP_MAC_MID:
- return pBase->macAddr[2] << 8 | pBase->macAddr[3];
+ return get_unaligned_be16(pBase->macAddr + 2);
case EEP_MAC_MSW:
- return pBase->macAddr[4] << 8 | pBase->macAddr[5];
+ return get_unaligned_be16(pBase->macAddr + 4);
case EEP_REG_0:
return pBase->regDmn[0];
case EEP_REG_1:
@@ -331,10 +332,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
for (j = 0; j < 32; j++) {
- reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
- ((pdadcValues[4 * j + 1] & 0xFF) << 8) |
- ((pdadcValues[4 * j + 2] & 0xFF) << 16)|
- ((pdadcValues[4 * j + 3] & 0xFF) << 24);
+ reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
REG_WRITE(ah, regOffset, reg32);
ath_dbg(common, ATH_DBG_EEPROM,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 343fc9f946d..d6f6b192f45 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <asm/unaligned.h>
#include "hw.h"
#include "ar9002_phy.h"
@@ -195,11 +196,11 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
case EEP_NFTHRESH_2:
return pModal->noiseFloorThreshCh[0];
case EEP_MAC_LSW:
- return pBase->macAddr[0] << 8 | pBase->macAddr[1];
+ return get_unaligned_be16(pBase->macAddr);
case EEP_MAC_MID:
- return pBase->macAddr[2] << 8 | pBase->macAddr[3];
+ return get_unaligned_be16(pBase->macAddr + 2);
case EEP_MAC_MSW:
- return pBase->macAddr[4] << 8 | pBase->macAddr[5];
+ return get_unaligned_be16(pBase->macAddr + 4);
case EEP_REG_0:
return pBase->regDmn[0];
case EEP_REG_1:
@@ -434,10 +435,7 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
(672 << 2) + regChainOffset;
for (j = 0; j < 32; j++) {
- reg32 = ((pdadcValues[4*j + 0] & 0xFF) << 0)
- | ((pdadcValues[4*j + 1] & 0xFF) << 8)
- | ((pdadcValues[4*j + 2] & 0xFF) << 16)
- | ((pdadcValues[4*j + 3] & 0xFF) << 24);
+ reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
REG_WRITE(ah, regOffset, reg32);
regOffset += 4;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 17f0a680620..b9540a99261 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <asm/unaligned.h>
#include "hw.h"
#include "ar9002_phy.h"
@@ -276,11 +277,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
case EEP_NFTHRESH_2:
return pModal[1].noiseFloorThreshCh[0];
case EEP_MAC_LSW:
- return pBase->macAddr[0] << 8 | pBase->macAddr[1];
+ return get_unaligned_be16(pBase->macAddr);
case EEP_MAC_MID:
- return pBase->macAddr[2] << 8 | pBase->macAddr[3];
+ return get_unaligned_be16(pBase->macAddr + 2);
case EEP_MAC_MSW:
- return pBase->macAddr[4] << 8 | pBase->macAddr[5];
+ return get_unaligned_be16(pBase->macAddr + 4);
case EEP_REG_0:
return pBase->regDmn[0];
case EEP_REG_1:
@@ -831,10 +832,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
for (j = 0; j < 32; j++) {
- reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
- ((pdadcValues[4 * j + 1] & 0xFF) << 8) |
- ((pdadcValues[4 * j + 2] & 0xFF) << 16)|
- ((pdadcValues[4 * j + 3] & 0xFF) << 24);
+ reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
REG_WRITE(ah, regOffset, reg32);
ath_dbg(common, ATH_DBG_EEPROM,
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 260f1f37a60..d3f4a59cd45 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <asm/unaligned.h>
#include "htc.h"
/* identify firmware images */
@@ -49,6 +50,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
.driver_info = AR9280_USB }, /* Netgear WNDA3200 */
{ USB_DEVICE(0x083A, 0xA704),
.driver_info = AR9280_USB }, /* SMC Networks */
+ { USB_DEVICE(0x0411, 0x017f),
+ .driver_info = AR9280_USB }, /* Sony UWA-BR100 */
{ USB_DEVICE(0x0cf3, 0x20ff),
.driver_info = STORAGE_DEVICE },
@@ -127,12 +130,14 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
static void hif_usb_mgmt_cb(struct urb *urb)
{
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
- struct hif_device_usb *hif_dev = cmd->hif_dev;
+ struct hif_device_usb *hif_dev;
bool txok = true;
if (!cmd || !cmd->skb || !cmd->hif_dev)
return;
+ hif_dev = cmd->hif_dev;
+
switch (urb->status) {
case 0:
break;
@@ -555,8 +560,8 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
ptr = (u8 *) skb->data;
- pkt_len = ptr[index] + (ptr[index+1] << 8);
- pkt_tag = ptr[index+2] + (ptr[index+3] << 8);
+ pkt_len = get_unaligned_le16(ptr + index);
+ pkt_tag = get_unaligned_le16(ptr + index + 2);
if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
RX_STAT_INC(skb_dropped);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index aa6a7311870..57fe22b2424 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -79,7 +79,7 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
memset(&bs, 0, sizeof(bs));
- intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ intval = bss_conf->beacon_interval;
bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
/*
@@ -194,7 +194,7 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
u8 cmd_rsp;
u64 tsf;
- intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ intval = bss_conf->beacon_interval;
intval /= ATH9K_HTC_MAX_BCN_VIF;
nexttbtt = intval;
@@ -250,7 +250,7 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
u8 cmd_rsp;
u64 tsf;
- intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
+ intval = bss_conf->beacon_interval;
nexttbtt = intval;
/*
@@ -427,7 +427,7 @@ static int ath9k_htc_choose_bslot(struct ath9k_htc_priv *priv,
u16 intval;
int slot;
- intval = priv->cur_beacon_conf.beacon_interval & ATH9K_BEACON_PERIOD;
+ intval = priv->cur_beacon_conf.beacon_interval;
tsf = be64_to_cpu(swba->tsf);
tsftu = TSF_TO_TU(tsf >> 32, tsf);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index aa48b3abbc4..d3ff33c71aa 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -623,11 +623,8 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
pBase9287->openLoopPwrCntl);
}
- len += snprintf(buf + len, size - len,
- "%20s : %02X:%02X:%02X:%02X:%02X:%02X\n",
- "MacAddress",
- pBase->macAddr[0], pBase->macAddr[1], pBase->macAddr[2],
- pBase->macAddr[3], pBase->macAddr[4], pBase->macAddr[5]);
+ len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+ pBase->macAddr);
if (len > size)
len = size;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 61e6d395071..3bea7ea86f0 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -754,6 +754,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
hw->wiphy->interface_modes =
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 7b779689543..7212acb2bd6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1294,11 +1294,16 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
u32 rfilt;
mutex_lock(&priv->mutex);
- ath9k_htc_ps_wakeup(priv);
-
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
+ if (priv->op_flags & OP_INVALID) {
+ ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY,
+ "Unable to configure filter on invalid state\n");
+ return;
+ }
+ ath9k_htc_ps_wakeup(priv);
+
priv->rxfilter = *total_flags;
rfilt = ath9k_htc_calcrxfilter(priv);
ath9k_hw_setrxfilter(priv->ah, rfilt);
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 2f3e07263fc..cb29e887538 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -39,11 +39,6 @@ static inline void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds,
ath9k_hw_ops(ah)->set_desc_link(ds, link);
}
-static inline void ath9k_hw_get_desc_link(struct ath_hw *ah, void *ds,
- u32 **link)
-{
- ath9k_hw_ops(ah)->get_desc_link(ds, link);
-}
static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
struct ath9k_channel *chan,
u8 rxchainmask,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1be7c8bbef8..8dcefe74f4c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -87,7 +87,10 @@ static void ath9k_hw_set_clockrate(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
unsigned int clockrate;
- if (!ah->curchan) /* should really check for CCK instead */
+ /* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */
+ if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah))
+ clockrate = 117;
+ else if (!ah->curchan) /* should really check for CCK instead */
clockrate = ATH9K_CLOCK_RATE_CCK;
else if (conf->channel->band == IEEE80211_BAND_2GHZ)
clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
@@ -99,6 +102,13 @@ static void ath9k_hw_set_clockrate(struct ath_hw *ah)
if (conf_is_ht40(conf))
clockrate *= 2;
+ if (ah->curchan) {
+ if (IS_CHAN_HALF_RATE(ah->curchan))
+ clockrate /= 2;
+ if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ clockrate /= 4;
+ }
+
common->clockrate = clockrate;
}
@@ -251,6 +261,15 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
case AR5416_AR9100_DEVID:
ah->hw_version.macVersion = AR_SREV_VERSION_9100;
break;
+ case AR9300_DEVID_AR9330:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9330;
+ if (ah->get_mac_revision) {
+ ah->hw_version.macRev = ah->get_mac_revision();
+ } else {
+ val = REG_READ(ah, AR_SREV);
+ ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
+ }
+ return;
case AR9300_DEVID_AR9340:
ah->hw_version.macVersion = AR_SREV_VERSION_9340;
val = REG_READ(ah, AR_SREV);
@@ -299,6 +318,14 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
}
+static void ath9k_hw_aspm_init(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (common->bus_ops->aspm_init)
+ common->bus_ops->aspm_init(common);
+}
+
/* This should work for all families including legacy */
static bool ath9k_hw_chip_test(struct ath_hw *ah)
{
@@ -359,7 +386,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.additional_swba_backoff = 0;
ah->config.ack_6mb = 0x0;
ah->config.cwm_ignore_extcca = 0;
- ah->config.pcie_powersave_enable = 0;
ah->config.pcie_clock_req = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
@@ -551,6 +577,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
case AR_SREV_VERSION_9287:
case AR_SREV_VERSION_9271:
case AR_SREV_VERSION_9300:
+ case AR_SREV_VERSION_9330:
case AR_SREV_VERSION_9485:
case AR_SREV_VERSION_9340:
break;
@@ -561,7 +588,8 @@ static int __ath9k_hw_init(struct ath_hw *ah)
return -EOPNOTSUPP;
}
- if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah))
+ if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) ||
+ AR_SREV_9330(ah))
ah->is_pciexpress = false;
ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
@@ -577,7 +605,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (ah->is_pciexpress)
- ath9k_hw_configpcipowersave(ah, 0, 0);
+ ath9k_hw_aspm_init(ah);
else
ath9k_hw_disablepcie(ah);
@@ -604,7 +632,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
else
ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
- ah->bb_watchdog_timeout_ms = 25;
+ if (AR_SREV_9330(ah))
+ ah->bb_watchdog_timeout_ms = 85;
+ else
+ ah->bb_watchdog_timeout_ms = 25;
common->state = ATH_HW_INITIALIZED;
@@ -630,6 +661,7 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR2427_DEVID_PCIE:
case AR9300_DEVID_PCIE:
case AR9300_DEVID_AR9485_PCIE:
+ case AR9300_DEVID_AR9330:
case AR9300_DEVID_AR9340:
break;
default:
@@ -722,6 +754,39 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
AR_CH0_BB_DPLL2_PLL_PWD, 0x0);
udelay(1000);
+ } else if (AR_SREV_9330(ah)) {
+ u32 ddr_dpll2, pll_control2, kd;
+
+ if (ah->is_clk_25mhz) {
+ ddr_dpll2 = 0x18e82f01;
+ pll_control2 = 0xe04a3d;
+ kd = 0x1d;
+ } else {
+ ddr_dpll2 = 0x19e82f01;
+ pll_control2 = 0x886666;
+ kd = 0x3d;
+ }
+
+ /* program DDR PLL ki and kd value */
+ REG_WRITE(ah, AR_CH0_DDR_DPLL2, ddr_dpll2);
+
+ /* program DDR PLL phase_shift */
+ REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
+ AR_CH0_DPLL3_PHASE_SHIFT, 0x1);
+
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
+ udelay(1000);
+
+ /* program refdiv, nint, frac to RTC register */
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL2, pll_control2);
+
+ /* program BB PLL kd and ki value */
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KD, kd);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KI, 0x06);
+
+ /* program BB PLL phase_shift */
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
+ AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
} else if (AR_SREV_9340(ah)) {
u32 regval, pll2_divint, pll2_divfrac, refdiv;
@@ -763,7 +828,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
- if (AR_SREV_9485(ah) || AR_SREV_9340(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
udelay(1000);
/* Switch the core clock for ar9271 to 117Mhz */
@@ -847,6 +912,13 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
}
}
+static void ath9k_hw_set_sifs_time(struct ath_hw *ah, u32 us)
+{
+ u32 val = ath9k_hw_mac_to_clks(ah, us - 2);
+ val = min(val, (u32) 0xFFFF);
+ REG_WRITE(ah, AR_D_GBL_IFS_SIFS, val);
+}
+
static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
{
u32 val = ath9k_hw_mac_to_clks(ah, us);
@@ -884,25 +956,60 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
void ath9k_hw_init_global_settings(struct ath_hw *ah)
{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_conf *conf = &common->hw->conf;
+ const struct ath9k_channel *chan = ah->curchan;
int acktimeout;
int slottime;
int sifstime;
+ int rx_lat = 0, tx_lat = 0, eifs = 0;
+ u32 reg;
ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
ah->misc_mode);
+ if (!chan)
+ return;
+
if (ah->misc_mode != 0)
REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode);
- if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
- sifstime = 16;
- else
- sifstime = 10;
+ rx_lat = 37;
+ tx_lat = 54;
+
+ if (IS_CHAN_HALF_RATE(chan)) {
+ eifs = 175;
+ rx_lat *= 2;
+ tx_lat *= 2;
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ tx_lat += 11;
+
+ slottime = 13;
+ sifstime = 32;
+ } else if (IS_CHAN_QUARTER_RATE(chan)) {
+ eifs = 340;
+ rx_lat *= 4;
+ tx_lat *= 4;
+ if (IS_CHAN_A_FAST_CLOCK(ah, chan))
+ tx_lat += 22;
+
+ slottime = 21;
+ sifstime = 64;
+ } else {
+ eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS);
+ reg = REG_READ(ah, AR_USEC);
+ rx_lat = MS(reg, AR_USEC_RX_LAT);
+ tx_lat = MS(reg, AR_USEC_TX_LAT);
+
+ slottime = ah->slottime;
+ if (IS_CHAN_5GHZ(chan))
+ sifstime = 16;
+ else
+ sifstime = 10;
+ }
/* As defined by IEEE 802.11-2007 17.3.8.6 */
- slottime = ah->slottime + 3 * ah->coverage_class;
- acktimeout = slottime + sifstime;
+ acktimeout = slottime + sifstime + 3 * ah->coverage_class;
/*
* Workaround for early ACK timeouts, add an offset to match the
@@ -914,11 +1021,20 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
acktimeout += 64 - sifstime - ah->slottime;
- ath9k_hw_setslottime(ah, ah->slottime);
+ ath9k_hw_set_sifs_time(ah, sifstime);
+ ath9k_hw_setslottime(ah, slottime);
ath9k_hw_set_ack_timeout(ah, acktimeout);
ath9k_hw_set_cts_timeout(ah, acktimeout);
if (ah->globaltxtimeout != (u32) -1)
ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
+
+ REG_WRITE(ah, AR_D_GBL_IFS_EIFS, ath9k_hw_mac_to_clks(ah, eifs));
+ REG_RMW(ah, AR_USEC,
+ (common->clockrate - 1) |
+ SM(rx_lat, AR_USEC_RX_LAT) |
+ SM(tx_lat, AR_USEC_TX_LAT),
+ AR_USEC_TX_LAT | AR_USEC_RX_LAT | AR_USEC_USEC);
+
}
EXPORT_SYMBOL(ath9k_hw_init_global_settings);
@@ -1114,6 +1230,41 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
rst_flags |= AR_RTC_RC_MAC_COLD;
}
+ if (AR_SREV_9330(ah)) {
+ int npend = 0;
+ int i;
+
+ /* AR9330 WAR:
+ * call external reset function to reset WMAC if:
+ * - doing a cold reset
+ * - we have pending frames in the TX queues
+ */
+
+ for (i = 0; i < AR_NUM_QCU; i++) {
+ npend = ath9k_hw_numtxpending(ah, i);
+ if (npend)
+ break;
+ }
+
+ if (ah->external_reset &&
+ (npend || type == ATH9K_RESET_COLD)) {
+ int reset_err = 0;
+
+ ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+ "reset MAC via external reset\n");
+
+ reset_err = ah->external_reset();
+ if (reset_err) {
+ ath_err(ath9k_hw_common(ah),
+ "External reset failed, err=%d\n",
+ reset_err);
+ return false;
+ }
+
+ REG_WRITE(ah, AR_RTC_RESET, 1);
+ }
+ }
+
REG_WRITE(ah, AR_RTC_RC, rst_flags);
REGWRITE_BUFFER_FLUSH(ah);
@@ -1487,9 +1638,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_global_settings(ah);
- if (!AR_SREV_9300_20_OR_LATER(ah)) {
- ar9002_hw_update_async_fifo(ah);
- ar9002_hw_enable_wep_aggregation(ah);
+ if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
+ REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
+ AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
+ REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
+ AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
+ REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
+ AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
}
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
@@ -1545,7 +1700,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
}
#ifdef __BIG_ENDIAN
- else if (AR_SREV_9340(ah))
+ else if (AR_SREV_9330(ah) || AR_SREV_9340(ah))
REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
else
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -1785,16 +1940,16 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
REG_WRITE(ah, AR_BEACON_PERIOD,
- TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
+ TU_TO_USEC(bs->bs_intval));
REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
- TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
+ TU_TO_USEC(bs->bs_intval));
REGWRITE_BUFFER_FLUSH(ah);
REG_RMW_FIELD(ah, AR_RSSI_THR,
AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
- beaconintval = bs->bs_intval & ATH9K_BEACON_PERIOD;
+ beaconintval = bs->bs_intval;
if (bs->bs_sleepduration > beaconintval)
beaconintval = bs->bs_sleepduration;
@@ -1849,12 +2004,22 @@ EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers);
/* HW Capabilities */
/*******************/
+static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
+{
+ eeprom_chainmask &= chip_chainmask;
+ if (eeprom_chainmask)
+ return eeprom_chainmask;
+ else
+ return chip_chainmask;
+}
+
int ath9k_hw_fill_cap_info(struct ath_hw *ah)
{
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ unsigned int chip_chainmask;
u16 eeval;
u8 ant_div_ctl1, tx_chainmask, rx_chainmask;
@@ -1891,6 +2056,15 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (eeval & AR5416_OPFLAGS_11G)
pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
+ if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
+ chip_chainmask = 1;
+ else if (!AR_SREV_9280_20_OR_LATER(ah))
+ chip_chainmask = 7;
+ else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
+ chip_chainmask = 3;
+ else
+ chip_chainmask = 7;
+
pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
/*
* For AR9271 we will temporarilly uses the rx chainmax as read from
@@ -1907,6 +2081,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
/* Use rx_chainmask from EEPROM. */
pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
+ pCap->tx_chainmask = fixup_chainmask(chip_chainmask, pCap->tx_chainmask);
+ pCap->rx_chainmask = fixup_chainmask(chip_chainmask, pCap->rx_chainmask);
+
ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
/* enable key search for every frame in an aggregate */
@@ -1983,7 +2160,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
- if (!AR_SREV_9485(ah))
+ if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah))
pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
@@ -1996,10 +2173,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
} else {
pCap->tx_desc_len = sizeof(struct ath_desc);
- if (AR_SREV_9280_20(ah) &&
- ((ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) <=
- AR5416_EEP_MINOR_VER_16) ||
- ah->eep_ops->get_eeprom(ah, EEP_FSTCLK_5G)))
+ if (AR_SREV_9280_20(ah))
pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK;
}
@@ -2025,7 +2199,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
}
- if (AR_SREV_9485(ah)) {
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
/*
* enable the diversity-combining algorithm only when
@@ -2574,6 +2748,7 @@ static struct {
{ AR_SREV_VERSION_9287, "9287" },
{ AR_SREV_VERSION_9271, "9271" },
{ AR_SREV_VERSION_9300, "9300" },
+ { AR_SREV_VERSION_9330, "9330" },
{ AR_SREV_VERSION_9485, "9485" },
};
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 4b157c53d1a..c79889036ec 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -45,6 +45,7 @@
#define AR9300_DEVID_PCIE 0x0030
#define AR9300_DEVID_AR9340 0x0031
#define AR9300_DEVID_AR9485_PCIE 0x0032
+#define AR9300_DEVID_AR9330 0x0035
#define AR5416_AR9100_DEVID 0x000b
@@ -142,6 +143,8 @@
#define AR_KEYTABLE_SIZE 128
#define POWER_UP_TIME 10000
#define SPUR_RSSI_THRESH 40
+#define UPPER_5G_SUB_BAND_START 5700
+#define MID_5G_SUB_BAND_START 5400
#define CAB_TIMEOUT_VAL 10
#define BEACON_TIMEOUT_VAL 10
@@ -157,8 +160,9 @@
#define ATH9K_HW_RX_HP_QDEPTH 16
#define ATH9K_HW_RX_LP_QDEPTH 128
-#define PAPRD_GAIN_TABLE_ENTRIES 32
-#define PAPRD_TABLE_SZ 24
+#define PAPRD_GAIN_TABLE_ENTRIES 32
+#define PAPRD_TABLE_SZ 24
+#define PAPRD_IDEAL_AGC2_PWR_RANGE 0xe0
enum ath_hw_txq_subtype {
ATH_TXQ_AC_BE = 0,
@@ -215,7 +219,6 @@ struct ath9k_ops_config {
int additional_swba_backoff;
int ack_6mb;
u32 cwm_ignore_extcca;
- u8 pcie_powersave_enable;
bool pcieSerDesWrite;
u8 pcie_clock_req;
u32 pcie_waen;
@@ -403,7 +406,6 @@ struct ath9k_beacon_state {
u32 bs_nexttbtt;
u32 bs_nextdtim;
u32 bs_intval;
-#define ATH9K_BEACON_PERIOD 0x0000ffff
#define ATH9K_TSFOOR_THRESHOLD 0x00004240 /* 16k us */
u32 bs_dtimperiod;
u16 bs_cfpperiod;
@@ -603,7 +605,6 @@ struct ath_hw_ops {
int power_off);
void (*rx_enable)(struct ath_hw *ah);
void (*set_desc_link)(void *ds, u32 link);
- void (*get_desc_link)(void *ds, u32 **link);
bool (*calibrate)(struct ath_hw *ah,
struct ath9k_channel *chan,
u8 rxchainmask,
@@ -671,6 +672,7 @@ struct ath_hw {
bool sw_mgmt_crypto;
bool is_pciexpress;
+ bool aspm_enabled;
bool is_monitoring;
bool need_an_top2_fixup;
u16 tx_trig_level;
@@ -862,6 +864,8 @@ struct ath_hw {
u32 ent_mode;
bool is_clk_25mhz;
+ int (*get_mac_revision)(void);
+ int (*external_reset)(void);
};
struct ath_bus_ops {
@@ -870,6 +874,7 @@ struct ath_bus_ops {
bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
void (*bt_coex_prep)(struct ath_common *common);
void (*extn_synch_en)(struct ath_common *common);
+ void (*aspm_init)(struct ath_common *common);
};
static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -981,8 +986,6 @@ void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
void ar9002_hw_cck_chan14_spread(struct ath_hw *ah);
int ar9002_hw_rf_claim(struct ath_hw *ah);
void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
-void ar9002_hw_update_async_fifo(struct ath_hw *ah);
-void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
/*
* Code specific to AR9003, we stuff these here to avoid callbacks
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 45c585a337e..aa0ff7e2c92 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/ath9k_platform.h>
@@ -196,6 +197,19 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
return val;
}
+static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
+ u32 set, u32 clr)
+{
+ u32 val;
+
+ val = ioread32(sc->mem + reg_offset);
+ val &= ~clr;
+ val |= set;
+ iowrite32(val, sc->mem + reg_offset);
+
+ return val;
+}
+
static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
{
struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -204,16 +218,12 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl
unsigned long uninitialized_var(flags);
u32 val;
- if (ah->config.serialize_regmode == SER_REG_MODE_ON)
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
spin_lock_irqsave(&sc->sc_serial_rw, flags);
-
- val = ioread32(sc->mem + reg_offset);
- val &= ~clr;
- val |= set;
- iowrite32(val, sc->mem + reg_offset);
-
- if (ah->config.serialize_regmode == SER_REG_MODE_ON)
+ val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
return val;
}
@@ -245,7 +255,7 @@ static void setup_ht_cap(struct ath_softc *sc,
ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
- if (AR_SREV_9485(ah))
+ if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
max_streams = 1;
else if (AR_SREV_9300_20_OR_LATER(ah))
max_streams = 3;
@@ -298,10 +308,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct list_head *head, const char *name,
int nbuf, int ndesc, bool is_tx)
{
-#define DS2PHYS(_dd, _ds) \
- ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
-#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
-#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u8 *ds;
struct ath_buf *bf;
@@ -396,9 +402,6 @@ fail2:
fail:
memset(dd, 0, sizeof(*dd));
return error;
-#undef ATH_DESC_4KB_BOUND_CHECK
-#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
-#undef DS2PHYS
}
void ath9k_init_crypto(struct ath_softc *sc)
@@ -519,7 +522,6 @@ static void ath9k_init_misc(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int i = 0;
-
setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
sc->config.txpowlimit = ATH_TXPOWER_MAX;
@@ -575,6 +577,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
sc->sc_ah->gpio_val = pdata->gpio_val;
sc->sc_ah->led_pin = pdata->led_pin;
ah->is_clk_25mhz = pdata->is_clk_25mhz;
+ ah->get_mac_revision = pdata->get_mac_revision;
+ ah->external_reset = pdata->external_reset;
}
common = ath9k_hw_common(ah);
@@ -585,6 +589,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
common->priv = sc;
common->debug_mask = ath9k_debug;
common->btcoex_enabled = ath9k_btcoex_enable == 1;
+ common->disable_ani = false;
spin_lock_init(&common->cc_lock);
spin_lock_init(&sc->sc_serial_rw);
@@ -665,8 +670,10 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
static void ath9k_init_txpower_limits(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath9k_channel *curchan = ah->curchan;
+ ah->txchainmask = common->tx_chainmask;
if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index c2091f1f409..b6b523a897e 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -645,8 +645,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
-
- if (ads.ds_rxstatus8 & AR_KeyMiss)
+ else if (ads.ds_rxstatus8 & AR_KeyMiss)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 2ca351fe6d3..9098aaad97a 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -62,14 +62,12 @@ static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
if (txq->axq_depth || !list_empty(&txq->axq_acq))
pending = true;
- else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
- pending = !list_empty(&txq->txq_fifo_pending);
spin_unlock_bh(&txq->axq_lock);
return pending;
}
-bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
+static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
{
unsigned long flags;
bool ret;
@@ -136,7 +134,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
}
-static void ath_start_ani(struct ath_common *common)
+void ath_start_ani(struct ath_common *common)
{
struct ath_hw *ah = common->ah;
unsigned long timestamp = jiffies_to_msecs(jiffies);
@@ -219,7 +217,7 @@ static int ath_update_survey_stats(struct ath_softc *sc)
* by reseting the chip. To accomplish this we must first cleanup any pending
* DMA, then restart stuff.
*/
-int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
+static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *hchan)
{
struct ath_hw *ah = sc->sc_ah;
@@ -302,7 +300,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
ath_set_beacon(sc);
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
- ath_start_ani(common);
+ if (!common->disable_ani)
+ ath_start_ani(common);
}
ps_restore:
@@ -361,7 +360,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
txctl.paprd = BIT(chain);
if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, ATH_DBG_XMIT, "PAPRD TX failed\n");
+ ath_dbg(common, ATH_DBG_CALIBRATE, "PAPRD TX failed\n");
dev_kfree_skb_any(skb);
return false;
}
@@ -370,7 +369,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
if (!time_left)
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE,
+ ath_dbg(common, ATH_DBG_CALIBRATE,
"Timeout waiting for paprd training on TX chain %d\n",
chain);
@@ -394,12 +393,14 @@ void ath_paprd_calibrate(struct work_struct *work)
if (!caldata)
return;
+ ath9k_ps_wakeup(sc);
+
if (ar9003_paprd_init_table(ah) < 0)
- return;
+ goto fail_paprd;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
- return;
+ goto fail_paprd;
skb_put(skb, len);
memset(skb->data, 0, len);
@@ -411,7 +412,6 @@ void ath_paprd_calibrate(struct work_struct *work)
memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
- ath9k_ps_wakeup(sc);
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
if (!(common->tx_chainmask & BIT(chain)))
continue;
@@ -431,11 +431,18 @@ void ath_paprd_calibrate(struct work_struct *work)
if (!ath_paprd_send_frame(sc, skb, chain))
goto fail_paprd;
- if (!ar9003_paprd_is_done(ah))
+ if (!ar9003_paprd_is_done(ah)) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "PAPRD not yet done on chain %d\n", chain);
break;
+ }
- if (ar9003_paprd_create_curve(ah, caldata, chain) != 0)
+ if (ar9003_paprd_create_curve(ah, caldata, chain)) {
+ ath_dbg(common, ATH_DBG_CALIBRATE,
+ "PAPRD create curve failed on chain %d\n",
+ chain);
break;
+ }
chain_ok = 1;
}
@@ -515,24 +522,19 @@ void ath_ani_calibrate(unsigned long data)
common->ani.checkani_timer = timestamp;
}
- /* Skip all processing if there's nothing to do. */
- if (longcal || shortcal || aniflag) {
- /* Call ANI routine if necessary */
- if (aniflag) {
- spin_lock_irqsave(&common->cc_lock, flags);
- ath9k_hw_ani_monitor(ah, ah->curchan);
- ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
- }
+ /* Call ANI routine if necessary */
+ if (aniflag) {
+ spin_lock_irqsave(&common->cc_lock, flags);
+ ath9k_hw_ani_monitor(ah, ah->curchan);
+ ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+ }
- /* Perform calibration if necessary */
- if (longcal || shortcal) {
- common->ani.caldone =
- ath9k_hw_calibrate(ah,
- ah->curchan,
- common->rx_chainmask,
- longcal);
- }
+ /* Perform calibration if necessary */
+ if (longcal || shortcal) {
+ common->ani.caldone =
+ ath9k_hw_calibrate(ah, ah->curchan,
+ common->rx_chainmask, longcal);
}
ath9k_ps_restore(sc);
@@ -615,8 +617,11 @@ void ath_hw_check(struct work_struct *work)
ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
"busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
if (busy >= 99) {
- if (++sc->hw_busy_count >= 3)
+ if (++sc->hw_busy_count >= 3) {
+ spin_lock_bh(&sc->sc_pcu_lock);
ath_reset(sc, true);
+ spin_unlock_bh(&sc->sc_pcu_lock);
+ }
} else if (busy >= 0)
sc->hw_busy_count = 0;
@@ -635,7 +640,9 @@ static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
/* Rx is hung for more than 500ms. Reset it */
ath_dbg(common, ATH_DBG_RESET,
"Possible RX hang, resetting");
+ spin_lock_bh(&sc->sc_pcu_lock);
ath_reset(sc, true);
+ spin_unlock_bh(&sc->sc_pcu_lock);
count = 0;
}
} else
@@ -672,7 +679,9 @@ void ath9k_tasklet(unsigned long data)
if ((status & ATH9K_INT_FATAL) ||
(status & ATH9K_INT_BB_WATCHDOG)) {
+ spin_lock(&sc->sc_pcu_lock);
ath_reset(sc, true);
+ spin_unlock(&sc->sc_pcu_lock);
return;
}
@@ -868,7 +877,7 @@ chip_reset:
#undef SCHED_INTR
}
-void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
+static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -974,10 +983,10 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
sc->hw_busy_count = 0;
/* Stop ANI */
+
del_timer_sync(&common->ani.timer);
ath9k_ps_wakeup(sc);
- spin_lock_bh(&sc->sc_pcu_lock);
ieee80211_stop_queues(hw);
@@ -1020,10 +1029,11 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
}
ieee80211_wake_queues(hw);
- spin_unlock_bh(&sc->sc_pcu_lock);
/* Start ANI */
- ath_start_ani(common);
+ if (!common->disable_ani)
+ ath_start_ani(common);
+
ath9k_ps_restore(sc);
return r;
@@ -1261,7 +1271,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(ah);
- ath9k_hw_configpcipowersave(ah, 1, 1);
spin_unlock_bh(&sc->sc_pcu_lock);
@@ -1412,10 +1421,14 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
ath9k_hw_set_interrupts(ah, ah->imask);
/* Set up ANI */
- if ((iter_data.naps + iter_data.nadhocs) > 0) {
+ if (iter_data.naps > 0) {
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
- sc->sc_flags |= SC_OP_ANI_RUN;
- ath_start_ani(common);
+
+ if (!common->disable_ani) {
+ sc->sc_flags |= SC_OP_ANI_RUN;
+ ath_start_ani(common);
+ }
+
} else {
sc->sc_flags &= ~SC_OP_ANI_RUN;
del_timer_sync(&common->ani.timer);
@@ -1952,50 +1965,38 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_vif *avp = (void *)vif->drv_priv;
- switch (sc->sc_ah->opmode) {
- case NL80211_IFTYPE_ADHOC:
- /* There can be only one vif available */
+ /*
+ * Skip iteration if primary station vif's bss info
+ * was not changed
+ */
+ if (sc->sc_flags & SC_OP_PRIM_STA_VIF)
+ return;
+
+ if (bss_conf->assoc) {
+ sc->sc_flags |= SC_OP_PRIM_STA_VIF;
+ avp->primary_sta_vif = true;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah);
- /* configure beacon */
- if (bss_conf->enable_beacon)
- ath_beacon_config(sc, vif);
- break;
- case NL80211_IFTYPE_STATION:
- /*
- * Skip iteration if primary station vif's bss info
- * was not changed
- */
- if (sc->sc_flags & SC_OP_PRIM_STA_VIF)
- break;
-
- if (bss_conf->assoc) {
- sc->sc_flags |= SC_OP_PRIM_STA_VIF;
- avp->primary_sta_vif = true;
- memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
- common->curaid = bss_conf->aid;
- ath9k_hw_write_associd(sc->sc_ah);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, ATH_DBG_CONFIG,
"Bss Info ASSOC %d, bssid: %pM\n",
bss_conf->aid, common->curbssid);
- ath_beacon_config(sc, vif);
- /*
- * Request a re-configuration of Beacon related timers
- * on the receipt of the first Beacon frame (i.e.,
- * after time sync with the AP).
- */
- sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
- /* Reset rssi stats */
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
- sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
+ ath_beacon_config(sc, vif);
+ /*
+ * Request a re-configuration of Beacon related timers
+ * on the receipt of the first Beacon frame (i.e.,
+ * after time sync with the AP).
+ */
+ sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+ /* Reset rssi stats */
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
+ if (!common->disable_ani) {
sc->sc_flags |= SC_OP_ANI_RUN;
ath_start_ani(common);
}
- break;
- default:
- break;
+
}
}
@@ -2005,6 +2006,9 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_vif *avp = (void *)vif->drv_priv;
+ if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
+ return;
+
/* Reconfigure bss info */
if (avp->primary_sta_vif && !bss_conf->assoc) {
ath_dbg(common, ATH_DBG_CONFIG,
@@ -2023,8 +2027,7 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
* None of station vifs are associated.
* Clear bssid & aid
*/
- if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
- !(sc->sc_flags & SC_OP_PRIM_STA_VIF)) {
+ if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF)) {
ath9k_hw_write_associd(sc->sc_ah);
/* Stop ANI */
sc->sc_flags &= ~SC_OP_ANI_RUN;
@@ -2054,6 +2057,26 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
common->curbssid, common->curaid);
}
+ if (changed & BSS_CHANGED_IBSS) {
+ /* There can be only one vif available */
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ common->curaid = bss_conf->aid;
+ ath9k_hw_write_associd(sc->sc_ah);
+
+ if (bss_conf->ibss_joined) {
+ sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
+
+ if (!common->disable_ani) {
+ sc->sc_flags |= SC_OP_ANI_RUN;
+ ath_start_ani(common);
+ }
+
+ } else {
+ sc->sc_flags &= ~SC_OP_ANI_RUN;
+ del_timer_sync(&common->ani.timer);
+ }
+ }
+
/* Enable transmission of beacons (AP, IBSS, MESH) */
if ((changed & BSS_CHANGED_BEACON) ||
((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
@@ -2308,9 +2331,9 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
ath9k_ps_wakeup(sc);
spin_lock_bh(&sc->sc_pcu_lock);
drain_txq = ath_drain_all_txq(sc, false);
- spin_unlock_bh(&sc->sc_pcu_lock);
if (!drain_txq)
ath_reset(sc, false);
+ spin_unlock_bh(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
ieee80211_wake_queues(hw);
@@ -2334,7 +2357,7 @@ static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
return false;
}
-int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
+static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index b8cbfc70721..be4ea132981 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -16,6 +16,7 @@
#include <linux/nl80211.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/ath9k_platform.h>
#include "ath9k.h"
@@ -115,12 +116,38 @@ static void ath_pci_extn_synch_enable(struct ath_common *common)
pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl);
}
+static void ath_pci_aspm_init(struct ath_common *common)
+{
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct pci_dev *pdev = to_pci_dev(sc->dev);
+ struct pci_dev *parent;
+ int pos;
+ u8 aspm;
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ parent = pdev->bus->self;
+ if (WARN_ON(!parent))
+ return;
+
+ pos = pci_pcie_cap(parent);
+ pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm);
+ if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
+ ah->aspm_enabled = true;
+ /* Initialize PCIe PM and SERDES registers. */
+ ath9k_hw_configpcipowersave(ah, 0, 0);
+ }
+}
+
static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath_pci_read_cachesize,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,
.extn_synch_en = ath_pci_extn_synch_enable,
+ .aspm_init = ath_pci_aspm_init,
};
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -278,6 +305,12 @@ static int ath_pci_suspend(struct device *device)
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+ /* The device has to be moved to FULLSLEEP forcibly.
+ * Otherwise the chip never moved to full sleep,
+ * when no interface is up.
+ */
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index ba7f36ab0a7..c04a6c3cac7 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -379,7 +379,30 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
};
static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
- struct ieee80211_tx_rate *rate);
+ struct ieee80211_tx_rate *rate)
+{
+ int rix = 0, i = 0;
+ static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
+
+ if (!(rate->flags & IEEE80211_TX_RC_MCS))
+ return rate->idx;
+
+ while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) {
+ rix++; i++;
+ }
+
+ rix += rate->idx + rate_table->mcs_start;
+
+ if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
+ (rate->flags & IEEE80211_TX_RC_SHORT_GI))
+ rix = rate_table->info[rix].ht_index;
+ else if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ rix = rate_table->info[rix].sgi_index;
+ else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ rix = rate_table->info[rix].cw40index;
+
+ return rix;
+}
static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
struct ath_rate_priv *ath_rc_priv)
@@ -533,7 +556,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
[valid_rate_count] = j;
ath_rc_priv->valid_phy_ratecnt[phy] += 1;
ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
- hi = A_MAX(hi, j);
+ hi = max(hi, j);
}
}
}
@@ -569,7 +592,7 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
[ath_rc_priv->valid_phy_ratecnt[phy]] = j;
ath_rc_priv->valid_phy_ratecnt[phy] += 1;
ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
- hi = A_MAX(hi, j);
+ hi = max(hi, j);
}
}
@@ -1080,31 +1103,6 @@ static void ath_rc_update_ht(struct ath_softc *sc,
}
-static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
- struct ieee80211_tx_rate *rate)
-{
- int rix = 0, i = 0;
- static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
-
- if (!(rate->flags & IEEE80211_TX_RC_MCS))
- return rate->idx;
-
- while (i < ARRAY_SIZE(mcs_rix_off) && rate->idx > mcs_rix_off[i]) {
- rix++; i++;
- }
-
- rix += rate->idx + rate_table->mcs_start;
-
- if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- (rate->flags & IEEE80211_TX_RC_SHORT_GI))
- rix = rate_table->info[rix].ht_index;
- else if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- rix = rate_table->info[rix].sgi_index;
- else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- rix = rate_table->info[rix].cw40index;
-
- return rix;
-}
static void ath_rc_tx_status(struct ath_softc *sc,
struct ath_rate_priv *ath_rc_priv,
@@ -1228,7 +1226,7 @@ static void ath_rc_init(struct ath_softc *sc,
ht_mcs,
ath_rc_priv->ht_cap);
}
- hi = A_MAX(hi, hthi);
+ hi = max(hi, hthi);
}
ath_rc_priv->rate_table_size = hi + 1;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 07e35e59c9e..9a4850154fb 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/dma-mapping.h>
#include "ath9k.h"
#include "ar9003_mac.h"
@@ -39,6 +40,7 @@ static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
result = true;
break;
case 1:
+ case 2:
if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
(curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
(alt_rssi_avg >= (main_rssi_avg - 5))) ||
@@ -813,16 +815,19 @@ static bool ath9k_rx_accept(struct ath_common *common,
struct ath_rx_status *rx_stats,
bool *decrypt_error)
{
-#define is_mc_or_valid_tkip_keyix ((is_mc || \
- (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \
- test_bit(rx_stats->rs_keyix, common->tkip_keymap))))
-
+ bool is_mc, is_valid_tkip, strip_mic, mic_error;
struct ath_hw *ah = common->ah;
__le16 fc;
u8 rx_status_len = ah->caps.rx_status_len;
fc = hdr->frame_control;
+ is_mc = !!is_multicast_ether_addr(hdr->addr1);
+ is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
+ test_bit(rx_stats->rs_keyix, common->tkip_keymap);
+ strip_mic = is_valid_tkip && !(rx_stats->rs_status &
+ (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC));
+
if (!rx_stats->rs_datalen)
return false;
/*
@@ -837,6 +842,11 @@ static bool ath9k_rx_accept(struct ath_common *common,
if (rx_stats->rs_more)
return true;
+ mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
+ !ieee80211_has_morefrags(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
+ (rx_stats->rs_status & ATH9K_RXERR_MIC);
+
/*
* The rx_stats->rs_status will not be set until the end of the
* chained descriptors so it can be ignored if rs_more is set. The
@@ -844,30 +854,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
* descriptors.
*/
if (rx_stats->rs_status != 0) {
- if (rx_stats->rs_status & ATH9K_RXERR_CRC)
+ if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+ mic_error = false;
+ }
if (rx_stats->rs_status & ATH9K_RXERR_PHY)
return false;
if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
*decrypt_error = true;
- } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
- bool is_mc;
- /*
- * The MIC error bit is only valid if the frame
- * is not a control frame or fragment, and it was
- * decrypted using a valid TKIP key.
- */
- is_mc = !!is_multicast_ether_addr(hdr->addr1);
-
- if (!ieee80211_is_ctl(fc) &&
- !ieee80211_has_morefrags(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
- is_mc_or_valid_tkip_keyix)
- rxs->flag |= RX_FLAG_MMIC_ERROR;
- else
- rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
+ mic_error = false;
}
+
/*
* Reject error frames with the exception of
* decryption and MIC failures. For monitor mode,
@@ -885,6 +883,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
}
}
}
+
+ /*
+ * For unicast frames the MIC error bit can have false positives,
+ * so all MIC error reports need to be validated in software.
+ * False negatives are not common, so skip software verification
+ * if the hardware considers the MIC valid.
+ */
+ if (strip_mic)
+ rxs->flag |= RX_FLAG_MMIC_STRIPPED;
+ else if (is_mc && mic_error)
+ rxs->flag |= RX_FLAG_MMIC_ERROR;
+
return true;
}
@@ -1075,39 +1085,39 @@ static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
antcomb->rssi_lna1 = main_rssi_avg;
switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
- case (0x10): /* LNA2 A-B */
+ case 0x10: /* LNA2 A-B */
antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
antcomb->first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
break;
- case (0x20): /* LNA1 A-B */
+ case 0x20: /* LNA1 A-B */
antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
antcomb->first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
break;
- case (0x21): /* LNA1 LNA2 */
+ case 0x21: /* LNA1 LNA2 */
antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
antcomb->first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
antcomb->second_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
break;
- case (0x12): /* LNA2 LNA1 */
+ case 0x12: /* LNA2 LNA1 */
antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
antcomb->first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
antcomb->second_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
break;
- case (0x13): /* LNA2 A+B */
+ case 0x13: /* LNA2 A+B */
antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
antcomb->first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
break;
- case (0x23): /* LNA1 A+B */
+ case 0x23: /* LNA1 A+B */
antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
antcomb->first_quick_scan_conf =
ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
@@ -1324,65 +1334,148 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
/* Adjust the fast_div_bias based on main and alt lna conf */
switch ((ant_conf->main_lna_conf << 4) |
ant_conf->alt_lna_conf) {
- case (0x01): /* A-B LNA2 */
+ case 0x01: /* A-B LNA2 */
ant_conf->fast_div_bias = 0x3b;
break;
- case (0x02): /* A-B LNA1 */
+ case 0x02: /* A-B LNA1 */
ant_conf->fast_div_bias = 0x3d;
break;
- case (0x03): /* A-B A+B */
+ case 0x03: /* A-B A+B */
ant_conf->fast_div_bias = 0x1;
break;
- case (0x10): /* LNA2 A-B */
+ case 0x10: /* LNA2 A-B */
ant_conf->fast_div_bias = 0x7;
break;
- case (0x12): /* LNA2 LNA1 */
+ case 0x12: /* LNA2 LNA1 */
ant_conf->fast_div_bias = 0x2;
break;
- case (0x13): /* LNA2 A+B */
+ case 0x13: /* LNA2 A+B */
ant_conf->fast_div_bias = 0x7;
break;
- case (0x20): /* LNA1 A-B */
+ case 0x20: /* LNA1 A-B */
ant_conf->fast_div_bias = 0x6;
break;
- case (0x21): /* LNA1 LNA2 */
+ case 0x21: /* LNA1 LNA2 */
ant_conf->fast_div_bias = 0x0;
break;
- case (0x23): /* LNA1 A+B */
+ case 0x23: /* LNA1 A+B */
ant_conf->fast_div_bias = 0x6;
break;
- case (0x30): /* A+B A-B */
+ case 0x30: /* A+B A-B */
ant_conf->fast_div_bias = 0x1;
break;
- case (0x31): /* A+B LNA2 */
+ case 0x31: /* A+B LNA2 */
ant_conf->fast_div_bias = 0x3b;
break;
- case (0x32): /* A+B LNA1 */
+ case 0x32: /* A+B LNA1 */
ant_conf->fast_div_bias = 0x3d;
break;
default:
break;
}
+ } else if (ant_conf->div_group == 1) {
+ /* Adjust the fast_div_bias based on main and alt_lna_conf */
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x10: /* LNA2 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x13: /* LNA2 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x20: /* LNA1 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x23: /* LNA1 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ default:
+ break;
+ }
} else if (ant_conf->div_group == 2) {
/* Adjust the fast_div_bias based on main and alt_lna_conf */
switch ((ant_conf->main_lna_conf << 4) |
ant_conf->alt_lna_conf) {
- case (0x01): /* A-B LNA2 */
+ case 0x01: /* A-B LNA2 */
ant_conf->fast_div_bias = 0x1;
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
break;
- case (0x02): /* A-B LNA1 */
+ case 0x02: /* A-B LNA1 */
ant_conf->fast_div_bias = 0x1;
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
break;
- case (0x03): /* A-B A+B */
+ case 0x03: /* A-B A+B */
ant_conf->fast_div_bias = 0x1;
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
break;
- case (0x10): /* LNA2 A-B */
+ case 0x10: /* LNA2 A-B */
if (!(antcomb->scan) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
ant_conf->fast_div_bias = 0x1;
@@ -1391,12 +1484,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
break;
- case (0x12): /* LNA2 LNA1 */
+ case 0x12: /* LNA2 LNA1 */
ant_conf->fast_div_bias = 0x1;
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
break;
- case (0x13): /* LNA2 A+B */
+ case 0x13: /* LNA2 A+B */
if (!(antcomb->scan) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
ant_conf->fast_div_bias = 0x1;
@@ -1405,7 +1498,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
break;
- case (0x20): /* LNA1 A-B */
+ case 0x20: /* LNA1 A-B */
if (!(antcomb->scan) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
ant_conf->fast_div_bias = 0x1;
@@ -1414,12 +1507,12 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
break;
- case (0x21): /* LNA1 LNA2 */
+ case 0x21: /* LNA1 LNA2 */
ant_conf->fast_div_bias = 0x1;
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
break;
- case (0x23): /* LNA1 A+B */
+ case 0x23: /* LNA1 A+B */
if (!(antcomb->scan) &&
(alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
ant_conf->fast_div_bias = 0x1;
@@ -1428,17 +1521,17 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
break;
- case (0x30): /* A+B A-B */
+ case 0x30: /* A+B A-B */
ant_conf->fast_div_bias = 0x1;
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
break;
- case (0x31): /* A+B LNA2 */
+ case 0x31: /* A+B LNA2 */
ant_conf->fast_div_bias = 0x1;
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
break;
- case (0x32): /* A+B LNA1 */
+ case 0x32: /* A+B LNA1 */
ant_conf->fast_div_bias = 0x1;
ant_conf->main_gaintb = 0;
ant_conf->alt_gaintb = 0;
@@ -1446,9 +1539,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
default:
break;
}
-
}
-
}
/* Antenna diversity and combining */
@@ -1856,6 +1947,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
sc->rx.rxotherant = 0;
}
+ if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
+ skb_trim(skb, skb->len - 8);
+
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index c18ee9921fb..fa4c0bbce6b 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -600,7 +600,6 @@
#define AR_D_GBL_IFS_SIFS 0x1030
#define AR_D_GBL_IFS_SIFS_M 0x0000FFFF
-#define AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR 0x000003AB
#define AR_D_GBL_IFS_SIFS_RESV0 0xFFFFFFFF
#define AR_D_TXBLK_BASE 0x1038
@@ -616,12 +615,10 @@
#define AR_D_GBL_IFS_SLOT 0x1070
#define AR_D_GBL_IFS_SLOT_M 0x0000FFFF
#define AR_D_GBL_IFS_SLOT_RESV0 0xFFFF0000
-#define AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR 0x00000420
#define AR_D_GBL_IFS_EIFS 0x10b0
#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF
#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000
-#define AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR 0x0000A5EB
#define AR_D_GBL_IFS_MISC 0x10f0
#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007
@@ -788,6 +785,10 @@
#define AR_SREV_REVISION_9271_11 1
#define AR_SREV_VERSION_9300 0x1c0
#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
+#define AR_SREV_VERSION_9330 0x200
+#define AR_SREV_REVISION_9330_10 0
+#define AR_SREV_REVISION_9330_11 1
+#define AR_SREV_REVISION_9330_12 2
#define AR_SREV_VERSION_9485 0x240
#define AR_SREV_REVISION_9485_10 0
#define AR_SREV_REVISION_9485_11 1
@@ -862,6 +863,18 @@
#define AR_SREV_9300_20_OR_LATER(_ah) \
((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9300)
+#define AR_SREV_9330(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9330))
+#define AR_SREV_9330_10(_ah) \
+ (AR_SREV_9330((_ah)) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_10))
+#define AR_SREV_9330_11(_ah) \
+ (AR_SREV_9330((_ah)) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_11))
+#define AR_SREV_9330_12(_ah) \
+ (AR_SREV_9330((_ah)) && \
+ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9330_12))
+
#define AR_SREV_9485(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
#define AR_SREV_9485_10(_ah) \
@@ -1461,7 +1474,6 @@ enum {
#define AR_TIME_OUT_ACK_S 0
#define AR_TIME_OUT_CTS 0x3FFF0000
#define AR_TIME_OUT_CTS_S 16
-#define AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR 0x16001D56
#define AR_RSSI_THR 0x8018
#define AR_RSSI_THR_MASK 0x000000FF
@@ -1477,7 +1489,6 @@ enum {
#define AR_USEC_TX_LAT_S 14
#define AR_USEC_RX_LAT 0x1F800000
#define AR_USEC_RX_LAT_S 23
-#define AR_USEC_ASYNC_FIFO_DUR 0x12e00074
#define AR_RESET_TSF 0x8020
#define AR_RESET_TSF_ONCE 0x01000000
@@ -1862,29 +1873,6 @@ enum {
#define AR_RATE_DURATION(_n) (AR_RATE_DURATION_0 + ((_n)<<2))
-#define AR_KEYTABLE_0 0x8800
-#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32))
-#define AR_KEY_CACHE_SIZE 128
-#define AR_RSVD_KEYTABLE_ENTRIES 4
-#define AR_KEY_TYPE 0x00000007
-#define AR_KEYTABLE_TYPE_40 0x00000000
-#define AR_KEYTABLE_TYPE_104 0x00000001
-#define AR_KEYTABLE_TYPE_128 0x00000003
-#define AR_KEYTABLE_TYPE_TKIP 0x00000004
-#define AR_KEYTABLE_TYPE_AES 0x00000005
-#define AR_KEYTABLE_TYPE_CCM 0x00000006
-#define AR_KEYTABLE_TYPE_CLR 0x00000007
-#define AR_KEYTABLE_ANT 0x00000008
-#define AR_KEYTABLE_VALID 0x00008000
-#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0)
-#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4)
-#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8)
-#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12)
-#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16)
-#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20)
-#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
-#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
-
#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 3779b8977d4..cc595712f51 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -14,6 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/dma-mapping.h>
#include "ath9k.h"
#include "ar9003_mac.h"
@@ -53,7 +54,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
struct ath_tx_status *ts, int txok, int sendbar);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
- struct list_head *head);
+ struct list_head *head, bool internal);
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
@@ -377,8 +378,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf_next = bf->bf_next;
bf->bf_state.bf_type |= BUF_XRETRY;
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
- !bf->bf_stale || bf_next != NULL)
+ if (!bf->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
@@ -463,20 +463,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
}
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
- bf_next == NULL) {
- /*
- * Make sure the last desc is reclaimed if it
- * not a holding desc.
- */
- if (!bf_last->bf_stale)
- list_move_tail(&bf->list, &bf_head);
- else
- INIT_LIST_HEAD(&bf_head);
- } else {
- BUG_ON(list_empty(bf_q));
+ /*
+ * Make sure the last desc is reclaimed if it
+ * not a holding desc.
+ */
+ if (!bf_last->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- }
+ else
+ INIT_LIST_HEAD(&bf_head);
if (!txpending || (tid->state & AGGR_CLEANUP)) {
/*
@@ -572,11 +566,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
rcu_read_unlock();
- if (needreset) {
- spin_unlock_bh(&sc->sc_pcu_lock);
+ if (needreset)
ath_reset(sc, false);
- spin_lock_bh(&sc->sc_pcu_lock);
- }
}
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -671,7 +662,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
* TODO - this could be improved to be dependent on the rate.
* The hardware can keep up at lower rates, but not higher rates
*/
- if (fi->keyix != ATH9K_TXKEYIX_INVALID)
+ if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
+ !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
ndelim += ATH_AGGR_ENCRYPTDELIM;
/*
@@ -837,7 +829,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_state.bf_type &= ~BUF_AGGR;
ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
ath_buf_set_rate(sc, bf, fi->framelen);
- ath_tx_txqaddbuf(sc, txq, &bf_q);
+ ath_tx_txqaddbuf(sc, txq, &bf_q, false);
continue;
}
@@ -849,7 +841,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
/* anchor last desc of aggregate */
ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
- ath_tx_txqaddbuf(sc, txq, &bf_q);
+ ath_tx_txqaddbuf(sc, txq, &bf_q, false);
TX_STAT_INC(txq->axq_qnum, a_aggr);
} while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
@@ -1085,7 +1077,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
txq->txq_headidx = txq->txq_tailidx = 0;
for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
INIT_LIST_HEAD(&txq->txq_fifo[i]);
- INIT_LIST_HEAD(&txq->txq_fifo_pending);
}
return &sc->tx.txq[axq_qnum];
}
@@ -1155,13 +1146,10 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
}
-/*
- * Drain a given TX queue (could be Beacon or Data)
- *
- * This assumes output has been stopped and
- * we do not need to block ath_tx_tasklet.
- */
-void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
+static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
+ struct list_head *list, bool retry_tx)
+ __releases(txq->axq_lock)
+ __acquires(txq->axq_lock)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
@@ -1170,93 +1158,63 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
- for (;;) {
- spin_lock_bh(&txq->axq_lock);
+ while (!list_empty(list)) {
+ bf = list_first_entry(list, struct ath_buf, list);
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
- if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
- txq->txq_headidx = txq->txq_tailidx = 0;
- spin_unlock_bh(&txq->axq_lock);
- break;
- } else {
- bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
- struct ath_buf, list);
- }
- } else {
- if (list_empty(&txq->axq_q)) {
- txq->axq_link = NULL;
- spin_unlock_bh(&txq->axq_lock);
- break;
- }
- bf = list_first_entry(&txq->axq_q, struct ath_buf,
- list);
-
- if (bf->bf_stale) {
- list_del(&bf->list);
- spin_unlock_bh(&txq->axq_lock);
+ if (bf->bf_stale) {
+ list_del(&bf->list);
- ath_tx_return_buffer(sc, bf);
- continue;
- }
+ ath_tx_return_buffer(sc, bf);
+ continue;
}
lastbf = bf->bf_lastbf;
-
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
- list_cut_position(&bf_head,
- &txq->txq_fifo[txq->txq_tailidx],
- &lastbf->list);
- INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
- } else {
- /* remove ath_buf's of the same mpdu from txq */
- list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
- }
+ list_cut_position(&bf_head, list, &lastbf->list);
txq->axq_depth--;
if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--;
- spin_unlock_bh(&txq->axq_lock);
+ spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
retry_tx);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
+ spin_lock_bh(&txq->axq_lock);
}
+}
+/*
+ * Drain a given TX queue (could be Beacon or Data)
+ *
+ * This assumes output has been stopped and
+ * we do not need to block ath_tx_tasklet.
+ */
+void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
+{
spin_lock_bh(&txq->axq_lock);
- txq->axq_tx_inprogress = false;
- spin_unlock_bh(&txq->axq_lock);
-
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
- spin_lock_bh(&txq->axq_lock);
- while (!list_empty(&txq->txq_fifo_pending)) {
- bf = list_first_entry(&txq->txq_fifo_pending,
- struct ath_buf, list);
- list_cut_position(&bf_head,
- &txq->txq_fifo_pending,
- &bf->bf_lastbf->list);
- spin_unlock_bh(&txq->axq_lock);
+ int idx = txq->txq_tailidx;
- if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head,
- &ts, 0, retry_tx);
- else
- ath_tx_complete_buf(sc, bf, txq, &bf_head,
- &ts, 0, 0);
- spin_lock_bh(&txq->axq_lock);
+ while (!list_empty(&txq->txq_fifo[idx])) {
+ ath_drain_txq_list(sc, txq, &txq->txq_fifo[idx],
+ retry_tx);
+
+ INCR(idx, ATH_TXFIFO_DEPTH);
}
- spin_unlock_bh(&txq->axq_lock);
+ txq->txq_tailidx = idx;
}
+ txq->axq_link = NULL;
+ txq->axq_tx_inprogress = false;
+ ath_drain_txq_list(sc, txq, &txq->axq_q, retry_tx);
+
/* flush any pending frames if aggregation is enabled */
- if (sc->sc_flags & SC_OP_TXAGGR) {
- if (!retry_tx) {
- spin_lock_bh(&txq->axq_lock);
- ath_txq_drain_pending_buffers(sc, txq);
- spin_unlock_bh(&txq->axq_lock);
- }
- }
+ if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
+ ath_txq_drain_pending_buffers(sc, txq);
+
+ spin_unlock_bh(&txq->axq_lock);
}
bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1370,11 +1328,13 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
* assume the descriptors are already chained together by caller.
*/
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
- struct list_head *head)
+ struct list_head *head, bool internal)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- struct ath_buf *bf;
+ struct ath_buf *bf, *bf_last;
+ bool puttxbuf = false;
+ bool edma;
/*
* Insert the frame on the outbound list and
@@ -1384,51 +1344,49 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
if (list_empty(head))
return;
+ edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
bf = list_first_entry(head, struct ath_buf, list);
+ bf_last = list_entry(head->prev, struct ath_buf, list);
ath_dbg(common, ATH_DBG_QUEUE,
"qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
- if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
- list_splice_tail_init(head, &txq->txq_fifo_pending);
- return;
- }
- if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
- ath_dbg(common, ATH_DBG_XMIT,
- "Initializing tx fifo %d which is non-empty\n",
- txq->txq_headidx);
- INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
- list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
+ if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
+ list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
- TX_STAT_INC(txq->axq_qnum, puttxbuf);
- ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
- ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
- txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
+ puttxbuf = true;
} else {
list_splice_tail_init(head, &txq->axq_q);
- if (txq->axq_link == NULL) {
- TX_STAT_INC(txq->axq_qnum, puttxbuf);
- ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
- ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
- txq->axq_qnum, ito64(bf->bf_daddr),
- bf->bf_desc);
- } else {
- *txq->axq_link = bf->bf_daddr;
+ if (txq->axq_link) {
+ ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
ath_dbg(common, ATH_DBG_XMIT,
"link[%u] (%p)=%llx (%p)\n",
txq->axq_qnum, txq->axq_link,
ito64(bf->bf_daddr), bf->bf_desc);
- }
- ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
- &txq->axq_link);
+ } else if (!edma)
+ puttxbuf = true;
+
+ txq->axq_link = bf_last->bf_desc;
+ }
+
+ if (puttxbuf) {
+ TX_STAT_INC(txq->axq_qnum, puttxbuf);
+ ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+ ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
+ txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
+ }
+
+ if (!edma) {
TX_STAT_INC(txq->axq_qnum, txstart);
ath9k_hw_txstart(ah, txq->axq_qnum);
}
- txq->axq_depth++;
- if (bf_is_ampdu_not_probing(bf))
- txq->axq_ampdu_depth++;
+
+ if (!internal) {
+ txq->axq_depth++;
+ if (bf_is_ampdu_not_probing(bf))
+ txq->axq_ampdu_depth++;
+ }
}
static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -1470,7 +1428,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
bf->bf_lastbf = bf;
ath_buf_set_rate(sc, bf, fi->framelen);
- ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
+ ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
}
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
@@ -1490,7 +1448,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_lastbf = bf;
fi = get_frame_info(bf->bf_mpdu);
ath_buf_set_rate(sc, bf, fi->framelen);
- ath_tx_txqaddbuf(sc, txq, bf_head);
+ ath_tx_txqaddbuf(sc, txq, bf_head, false);
TX_STAT_INC(txq->axq_qnum, queued);
}
@@ -2077,6 +2035,40 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
}
+static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_tx_status *ts, struct ath_buf *bf,
+ struct list_head *bf_head)
+ __releases(txq->axq_lock)
+ __acquires(txq->axq_lock)
+{
+ int txok;
+
+ txq->axq_depth--;
+ txok = !(ts->ts_status & ATH9K_TXERR_MASK);
+ txq->axq_tx_inprogress = false;
+ if (bf_is_ampdu_not_probing(bf))
+ txq->axq_ampdu_depth--;
+
+ spin_unlock_bh(&txq->axq_lock);
+
+ if (!bf_isampdu(bf)) {
+ /*
+ * This frame is sent out as a single frame.
+ * Use hardware retry status for this frame.
+ */
+ if (ts->ts_status & ATH9K_TXERR_XRETRY)
+ bf->bf_state.bf_type |= BUF_XRETRY;
+ ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true);
+ ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
+ } else
+ ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
+
+ spin_lock_bh(&txq->axq_lock);
+
+ if (sc->sc_flags & SC_OP_TXAGGR)
+ ath_txq_schedule(sc, txq);
+}
+
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_hw *ah = sc->sc_ah;
@@ -2085,20 +2077,18 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct list_head bf_head;
struct ath_desc *ds;
struct ath_tx_status ts;
- int txok;
int status;
ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
txq->axq_link);
+ spin_lock_bh(&txq->axq_lock);
for (;;) {
- spin_lock_bh(&txq->axq_lock);
if (list_empty(&txq->axq_q)) {
txq->axq_link = NULL;
if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
- spin_unlock_bh(&txq->axq_lock);
break;
}
bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
@@ -2114,13 +2104,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
bf_held = NULL;
if (bf->bf_stale) {
bf_held = bf;
- if (list_is_last(&bf_held->list, &txq->axq_q)) {
- spin_unlock_bh(&txq->axq_lock);
+ if (list_is_last(&bf_held->list, &txq->axq_q))
break;
- } else {
- bf = list_entry(bf_held->list.next,
- struct ath_buf, list);
- }
+
+ bf = list_entry(bf_held->list.next, struct ath_buf,
+ list);
}
lastbf = bf->bf_lastbf;
@@ -2128,10 +2116,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
memset(&ts, 0, sizeof(ts));
status = ath9k_hw_txprocdesc(ah, ds, &ts);
- if (status == -EINPROGRESS) {
- spin_unlock_bh(&txq->axq_lock);
+ if (status == -EINPROGRESS)
break;
- }
+
TX_STAT_INC(txq->axq_qnum, txprocdesc);
/*
@@ -2145,42 +2132,14 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
list_cut_position(&bf_head,
&txq->axq_q, lastbf->list.prev);
- txq->axq_depth--;
- txok = !(ts.ts_status & ATH9K_TXERR_MASK);
- txq->axq_tx_inprogress = false;
- if (bf_held)
+ if (bf_held) {
list_del(&bf_held->list);
-
- if (bf_is_ampdu_not_probing(bf))
- txq->axq_ampdu_depth--;
-
- spin_unlock_bh(&txq->axq_lock);
-
- if (bf_held)
ath_tx_return_buffer(sc, bf_held);
-
- if (!bf_isampdu(bf)) {
- /*
- * This frame is sent out as a single frame.
- * Use hardware retry status for this frame.
- */
- if (ts.ts_status & ATH9K_TXERR_XRETRY)
- bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
}
- if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
- true);
- else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
-
- spin_lock_bh(&txq->axq_lock);
-
- if (sc->sc_flags & SC_OP_TXAGGR)
- ath_txq_schedule(sc, txq);
- spin_unlock_bh(&txq->axq_lock);
+ ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
}
+ spin_unlock_bh(&txq->axq_lock);
}
static void ath_tx_complete_poll_work(struct work_struct *work)
@@ -2213,7 +2172,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
if (needreset) {
ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
"tx hung, resetting the chip\n");
+ spin_lock_bh(&sc->sc_pcu_lock);
ath_reset(sc, true);
+ spin_unlock_bh(&sc->sc_pcu_lock);
}
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
@@ -2237,17 +2198,16 @@ void ath_tx_tasklet(struct ath_softc *sc)
void ath_tx_edma_tasklet(struct ath_softc *sc)
{
- struct ath_tx_status txs;
+ struct ath_tx_status ts;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_hw *ah = sc->sc_ah;
struct ath_txq *txq;
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
int status;
- int txok;
for (;;) {
- status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
+ status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
if (status == -EINPROGRESS)
break;
if (status == -EIO) {
@@ -2257,12 +2217,13 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
}
/* Skip beacon completions */
- if (txs.qid == sc->beacon.beaconq)
+ if (ts.qid == sc->beacon.beaconq)
continue;
- txq = &sc->tx.txq[txs.qid];
+ txq = &sc->tx.txq[ts.qid];
spin_lock_bh(&txq->axq_lock);
+
if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
spin_unlock_bh(&txq->axq_lock);
return;
@@ -2275,41 +2236,21 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
INIT_LIST_HEAD(&bf_head);
list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
&lastbf->list);
- INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
- txq->axq_depth--;
- txq->axq_tx_inprogress = false;
- if (bf_is_ampdu_not_probing(bf))
- txq->axq_ampdu_depth--;
- spin_unlock_bh(&txq->axq_lock);
- txok = !(txs.ts_status & ATH9K_TXERR_MASK);
-
- if (!bf_isampdu(bf)) {
- if (txs.ts_status & ATH9K_TXERR_XRETRY)
- bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
- }
-
- if (bf_isampdu(bf))
- ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
- txok, true);
- else
- ath_tx_complete_buf(sc, bf, txq, &bf_head,
- &txs, txok, 0);
+ if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
+ INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
- spin_lock_bh(&txq->axq_lock);
+ if (!list_empty(&txq->axq_q)) {
+ struct list_head bf_q;
- if (!list_empty(&txq->txq_fifo_pending)) {
- INIT_LIST_HEAD(&bf_head);
- bf = list_first_entry(&txq->txq_fifo_pending,
- struct ath_buf, list);
- list_cut_position(&bf_head,
- &txq->txq_fifo_pending,
- &bf->bf_lastbf->list);
- ath_tx_txqaddbuf(sc, txq, &bf_head);
- } else if (sc->sc_flags & SC_OP_TXAGGR)
- ath_txq_schedule(sc, txq);
+ INIT_LIST_HEAD(&bf_q);
+ txq->axq_link = NULL;
+ list_splice_tail_init(&txq->axq_q, &bf_q);
+ ath_tx_txqaddbuf(sc, txq, &bf_q, true);
+ }
+ }
+ ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
spin_unlock_bh(&txq->axq_lock);
}
}
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 4da01a9f568..c5427a72a1e 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -67,6 +67,8 @@
#define PAYLOAD_MAX (CARL9170_MAX_CMD_LEN / 4 - 1)
+static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 3, 2, 1, 0 };
+
enum carl9170_rf_init_mode {
CARL9170_RFI_NONE,
CARL9170_RFI_WARM,
@@ -175,7 +177,7 @@ struct carl9170_tx_queue_stats {
struct carl9170_vif {
unsigned int id;
- struct ieee80211_vif *vif;
+ struct ieee80211_vif __rcu *vif;
};
struct carl9170_vif_info {
@@ -309,7 +311,7 @@ struct ar9170 {
spinlock_t beacon_lock;
unsigned int global_pretbtt;
unsigned int global_beacon_int;
- struct carl9170_vif_info *beacon_iter;
+ struct carl9170_vif_info __rcu *beacon_iter;
unsigned int beacon_enabled;
/* cryptographic engine */
@@ -387,7 +389,7 @@ struct ar9170 {
/* tx ampdu */
struct work_struct ampdu_work;
spinlock_t tx_ampdu_list_lock;
- struct carl9170_sta_tid *tx_ampdu_iter;
+ struct carl9170_sta_tid __rcu *tx_ampdu_iter;
struct list_head tx_ampdu_list;
atomic_t tx_ampdu_upload;
atomic_t tx_ampdu_scheduler;
@@ -440,7 +442,6 @@ struct ar9170 {
enum carl9170_ps_off_override_reasons {
PS_OFF_VIF = BIT(0),
PS_OFF_BCN = BIT(1),
- PS_OFF_5GHZ = BIT(2),
};
struct carl9170_ba_stats {
@@ -455,7 +456,7 @@ struct carl9170_sta_info {
bool sleeping;
atomic_t pending_frames;
unsigned int ampdu_max_len;
- struct carl9170_sta_tid *agg[CARL9170_NUM_TID];
+ struct carl9170_sta_tid __rcu *agg[CARL9170_NUM_TID];
struct carl9170_ba_stats stats[CARL9170_NUM_TID];
};
@@ -531,7 +532,6 @@ int carl9170_set_ampdu_settings(struct ar9170 *ar);
int carl9170_set_slot_time(struct ar9170 *ar);
int carl9170_set_mac_rates(struct ar9170 *ar);
int carl9170_set_hwretry_limit(struct ar9170 *ar, const u32 max_retry);
-int carl9170_update_beacon(struct ar9170 *ar, const bool submit);
int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
const u8 ktype, const u8 keyidx, const u8 *keydata, const int keylen);
int carl9170_disable_key(struct ar9170 *ar, const u8 id);
@@ -552,6 +552,7 @@ void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb);
void carl9170_tx_scheduler(struct ar9170 *ar);
void carl9170_tx_get_skb(struct sk_buff *skb);
int carl9170_tx_put_skb(struct sk_buff *skb);
+int carl9170_update_beacon(struct ar9170 *ar, const bool submit);
/* LEDs */
#ifdef CONFIG_CARL9170_LEDS
diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h
index 568174c71b9..d5f95bdc75c 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.h
+++ b/drivers/net/wireless/ath/carl9170/cmd.h
@@ -87,7 +87,7 @@ do { \
__ar->cmd_buf[2 * __nreg + 1] = cpu_to_le32(r); \
__ar->cmd_buf[2 * __nreg + 2] = cpu_to_le32(v); \
__nreg++; \
- if ((__nreg >= PAYLOAD_MAX/2)) { \
+ if ((__nreg >= PAYLOAD_MAX / 2)) { \
if (IS_ACCEPTING_CMD(__ar)) \
__err = carl9170_exec_cmd(__ar, \
CARL9170_CMD_WREG, 8 * __nreg, \
@@ -160,7 +160,7 @@ do { \
} while (0)
#define carl9170_async_regwrite_finish() do { \
-__async_regwrite_out : \
+__async_regwrite_out: \
if (__cmd != NULL && __err == 0) \
carl9170_async_regwrite_flush(); \
kfree(__cmd); \
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index 0ac1124c2a0..de57f90e1d5 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -695,7 +695,7 @@ static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf,
}
__DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED);
-static const char *erp_modes[] = {
+static const char *const erp_modes[] = {
[CARL9170_ERP_INVALID] = "INVALID",
[CARL9170_ERP_AUTO] = "Automatic",
[CARL9170_ERP_MAC80211] = "Set by MAC80211",
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 221957c5d37..39ddea5794f 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -237,7 +237,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
ar->disable_offload = true;
}
- if (SUPP(CARL9170FW_PSM))
+ if (SUPP(CARL9170FW_PSM) && SUPP(CARL9170FW_FIXED_5GHZ_PSM))
ar->hw->flags |= IEEE80211_HW_SUPPORTS_PS;
if (!SUPP(CARL9170FW_USB_INIT_FIRMWARE)) {
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index 30449d21b76..0a6dec529b5 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -4,7 +4,7 @@
* Firmware command interface definitions
*
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
+ * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -54,6 +54,7 @@ enum carl9170_cmd_oids {
CARL9170_CMD_BCN_CTRL = 0x05,
CARL9170_CMD_READ_TSF = 0x06,
CARL9170_CMD_RX_FILTER = 0x07,
+ CARL9170_CMD_WOL = 0x08,
/* CAM */
CARL9170_CMD_EKEY = 0x10,
@@ -180,6 +181,21 @@ struct carl9170_bcn_ctrl_cmd {
#define CARL9170_BCN_CTRL_DRAIN 0
#define CARL9170_BCN_CTRL_CAB_TRIGGER 1
+struct carl9170_wol_cmd {
+ __le32 flags;
+ u8 mac[6];
+ u8 bssid[6];
+ __le32 null_interval;
+ __le32 free_for_use2;
+ __le32 mask;
+ u8 pattern[32];
+} __packed;
+
+#define CARL9170_WOL_CMD_SIZE 60
+
+#define CARL9170_WOL_DISCONNECT 1
+#define CARL9170_WOL_MAGIC_PKT 2
+
struct carl9170_cmd_head {
union {
struct {
@@ -203,6 +219,7 @@ struct carl9170_cmd {
struct carl9170_write_reg wreg;
struct carl9170_rf_init rf_init;
struct carl9170_psm psm;
+ struct carl9170_wol_cmd wol;
struct carl9170_bcn_ctrl_cmd bcn_ctrl;
struct carl9170_rx_filter_cmd rx_filter;
u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 921066822dd..6d9c0891ce7 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -3,7 +3,7 @@
*
* Firmware descriptor format
*
- * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
+ * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -72,6 +72,12 @@ enum carl9170fw_feature_list {
/* Wake up on WLAN */
CARL9170FW_WOL,
+ /* Firmware supports PSM in the 5GHZ Band */
+ CARL9170FW_FIXED_5GHZ_PSM,
+
+ /* HW (ANI, CCA, MIB) tally counters */
+ CARL9170FW_HW_COUNTERS,
+
/* KEEP LAST */
__CARL9170FW_FEATURE_NUM
};
@@ -82,6 +88,7 @@ enum carl9170fw_feature_list {
#define DBG_MAGIC "DBG\0"
#define CHK_MAGIC "CHK\0"
#define TXSQ_MAGIC "TXSQ"
+#define WOL_MAGIC "WOL\0"
#define LAST_MAGIC "LAST"
#define CARL9170FW_SET_DAY(d) (((d) - 1) % 31)
@@ -104,7 +111,7 @@ struct carl9170fw_desc_head {
(sizeof(struct carl9170fw_desc_head))
#define CARL9170FW_OTUS_DESC_MIN_VER 6
-#define CARL9170FW_OTUS_DESC_CUR_VER 6
+#define CARL9170FW_OTUS_DESC_CUR_VER 7
struct carl9170fw_otus_desc {
struct carl9170fw_desc_head head;
__le32 feature_set;
@@ -186,6 +193,16 @@ struct carl9170fw_txsq_desc {
#define CARL9170FW_TXSQ_DESC_SIZE \
(sizeof(struct carl9170fw_txsq_desc))
+#define CARL9170FW_WOL_DESC_MIN_VER 1
+#define CARL9170FW_WOL_DESC_CUR_VER 1
+struct carl9170fw_wol_desc {
+ struct carl9170fw_desc_head head;
+
+ __le32 supported_triggers; /* CARL9170_WOL_ */
+} __packed;
+#define CARL9170FW_WOL_DESC_SIZE \
+ (sizeof(struct carl9170fw_wol_desc))
+
#define CARL9170FW_LAST_DESC_MIN_VER 1
#define CARL9170FW_LAST_DESC_CUR_VER 2
struct carl9170fw_last_desc {
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index 4e30762dd90..fa834c1460f 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -4,7 +4,7 @@
* Register map, hardware-specific definitions
*
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
+ * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -174,6 +174,7 @@
#define AR9170_MAC_SNIFFER_ENABLE_PROMISC BIT(0)
#define AR9170_MAC_SNIFFER_DEFAULTS 0x02000000
#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678)
+#define AR9170_MAC_ENCRYPTION_MGMT_RX_SOFTWARE BIT(2)
#define AR9170_MAC_ENCRYPTION_RX_SOFTWARE BIT(3)
#define AR9170_MAC_ENCRYPTION_DEFAULTS 0x70
@@ -222,6 +223,12 @@
#define AR9170_MAC_REG_TX_BLOCKACKS (AR9170_MAC_REG_BASE + 0x6c0)
#define AR9170_MAC_REG_NAV_COUNT (AR9170_MAC_REG_BASE + 0x6c4)
#define AR9170_MAC_REG_BACKOFF_STATUS (AR9170_MAC_REG_BASE + 0x6c8)
+#define AR9170_MAC_BACKOFF_CCA BIT(24)
+#define AR9170_MAC_BACKOFF_TX_PEX BIT(25)
+#define AR9170_MAC_BACKOFF_RX_PE BIT(26)
+#define AR9170_MAC_BACKOFF_MD_READY BIT(27)
+#define AR9170_MAC_BACKOFF_TX_PE BIT(28)
+
#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6cc)
#define AR9170_MAC_REG_TX_COMPLETE (AR9170_MAC_REG_BASE + 0x6d4)
@@ -357,7 +364,18 @@
#define AR9170_MAC_REG_DMA_WLAN_STATUS (AR9170_MAC_REG_BASE + 0xd38)
#define AR9170_MAC_REG_DMA_STATUS (AR9170_MAC_REG_BASE + 0xd3c)
-
+#define AR9170_MAC_REG_DMA_TXQ_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd40)
+#define AR9170_MAC_REG_DMA_TXQ0_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd40)
+#define AR9170_MAC_REG_DMA_TXQ1_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd44)
+#define AR9170_MAC_REG_DMA_TXQ2_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd48)
+#define AR9170_MAC_REG_DMA_TXQ3_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd4c)
+#define AR9170_MAC_REG_DMA_TXQ4_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd50)
+#define AR9170_MAC_REG_DMA_TXQ0Q1_LEN (AR9170_MAC_REG_BASE + 0xd54)
+#define AR9170_MAC_REG_DMA_TXQ2Q3_LEN (AR9170_MAC_REG_BASE + 0xd58)
+#define AR9170_MAC_REG_DMA_TXQ4_LEN (AR9170_MAC_REG_BASE + 0xd5c)
+
+#define AR9170_MAC_REG_DMA_TXQX_LAST_ADDR (AR9170_MAC_REG_BASE + 0xd74)
+#define AR9170_MAC_REG_DMA_TXQX_FAIL_ADDR (AR9170_MAC_REG_BASE + 0xd78)
#define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xd7c)
#define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f
#define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0
@@ -377,10 +395,40 @@
#define AR9170_MAC_REG_BCN_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd98)
#define AR9170_MAC_REG_BCN_COUNT (AR9170_MAC_REG_BASE + 0xd9c)
-
-
#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xda0)
+#define AR9170_MAC_BCN_HT1_HT_EN BIT(0)
+#define AR9170_MAC_BCN_HT1_GF_PMB BIT(1)
+#define AR9170_MAC_BCN_HT1_SP_EXP BIT(2)
+#define AR9170_MAC_BCN_HT1_TX_BF BIT(3)
+#define AR9170_MAC_BCN_HT1_PWR_CTRL_S 4
+#define AR9170_MAC_BCN_HT1_PWR_CTRL 0x70
+#define AR9170_MAC_BCN_HT1_TX_ANT1 BIT(7)
+#define AR9170_MAC_BCN_HT1_TX_ANT0 BIT(8)
+#define AR9170_MAC_BCN_HT1_NUM_LFT_S 9
+#define AR9170_MAC_BCN_HT1_NUM_LFT 0x600
+#define AR9170_MAC_BCN_HT1_BWC_20M_EXT BIT(16)
+#define AR9170_MAC_BCN_HT1_BWC_40M_SHARED BIT(17)
+#define AR9170_MAC_BCN_HT1_BWC_40M_DUP (BIT(16) | BIT(17))
+#define AR9170_MAC_BCN_HT1_BF_MCS_S 18
+#define AR9170_MAC_BCN_HT1_BF_MCS 0x1c0000
+#define AR9170_MAC_BCN_HT1_TPC_S 21
+#define AR9170_MAC_BCN_HT1_TPC 0x7e00000
+#define AR9170_MAC_BCN_HT1_CHAIN_MASK_S 27
+#define AR9170_MAC_BCN_HT1_CHAIN_MASK 0x38000000
+
#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xda4)
+#define AR9170_MAC_BCN_HT2_MCS_S 0
+#define AR9170_MAC_BCN_HT2_MCS 0x7f
+#define AR9170_MAC_BCN_HT2_BW40 BIT(8)
+#define AR9170_MAC_BCN_HT2_SMOOTHING BIT(9)
+#define AR9170_MAC_BCN_HT2_SS BIT(10)
+#define AR9170_MAC_BCN_HT2_NSS BIT(11)
+#define AR9170_MAC_BCN_HT2_STBC_S 12
+#define AR9170_MAC_BCN_HT2_STBC 0x3000
+#define AR9170_MAC_BCN_HT2_ADV_COD BIT(14)
+#define AR9170_MAC_BCN_HT2_SGI BIT(15)
+#define AR9170_MAC_BCN_HT2_LEN_S 16
+#define AR9170_MAC_BCN_HT2_LEN 0xffff0000
#define AR9170_MAC_REG_DMA_TXQX_ADDR_CURR (AR9170_MAC_REG_BASE + 0xdc0)
diff --git a/drivers/net/wireless/ath/carl9170/led.c b/drivers/net/wireless/ath/carl9170/led.c
index 4bb2cbd8bd9..78dadc79755 100644
--- a/drivers/net/wireless/ath/carl9170/led.c
+++ b/drivers/net/wireless/ath/carl9170/led.c
@@ -118,7 +118,7 @@ static void carl9170_led_set_brightness(struct led_classdev *led,
}
if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled))
- ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ/10);
+ ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ / 10);
}
static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index 385cf508479..dfda9197099 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -455,135 +455,6 @@ int carl9170_set_beacon_timers(struct ar9170 *ar)
return carl9170_regwrite_result();
}
-int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
-{
- struct sk_buff *skb = NULL;
- struct carl9170_vif_info *cvif;
- struct ieee80211_tx_info *txinfo;
- __le32 *data, *old = NULL;
- u32 word, off, addr, len;
- int i = 0, err = 0;
-
- rcu_read_lock();
- cvif = rcu_dereference(ar->beacon_iter);
-retry:
- if (ar->vifs == 0 || !cvif)
- goto out_unlock;
-
- list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
- if (cvif->active && cvif->enable_beacon)
- goto found;
- }
-
- if (!ar->beacon_enabled || i++)
- goto out_unlock;
-
- goto retry;
-
-found:
- rcu_assign_pointer(ar->beacon_iter, cvif);
-
- skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
- NULL, NULL);
-
- if (!skb) {
- err = -ENOMEM;
- goto err_free;
- }
-
- txinfo = IEEE80211_SKB_CB(skb);
- if (txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS) {
- err = -EINVAL;
- goto err_free;
- }
-
- spin_lock_bh(&ar->beacon_lock);
- data = (__le32 *)skb->data;
- if (cvif->beacon)
- old = (__le32 *)cvif->beacon->data;
-
- off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
- addr = ar->fw.beacon_addr + off;
- len = roundup(skb->len + FCS_LEN, 4);
-
- if ((off + len) > ar->fw.beacon_max_len) {
- if (net_ratelimit()) {
- wiphy_err(ar->hw->wiphy, "beacon does not "
- "fit into device memory!\n");
- }
- err = -EINVAL;
- goto err_unlock;
- }
-
- if (len > AR9170_MAC_BCN_LENGTH_MAX) {
- if (net_ratelimit()) {
- wiphy_err(ar->hw->wiphy, "no support for beacons "
- "bigger than %d (yours:%d).\n",
- AR9170_MAC_BCN_LENGTH_MAX, len);
- }
-
- err = -EMSGSIZE;
- goto err_unlock;
- }
-
- i = txinfo->control.rates[0].idx;
- if (txinfo->band != IEEE80211_BAND_2GHZ)
- i += 4;
-
- word = __carl9170_ratetable[i].hw_value & 0xf;
- if (i < 4)
- word |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
- else
- word |= ((skb->len + FCS_LEN) << 16) + 0x0010;
-
- carl9170_async_regwrite_begin(ar);
- carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, word);
-
- for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
- /*
- * XXX: This accesses beyond skb data for up
- * to the last 3 bytes!!
- */
-
- if (old && (data[i] == old[i]))
- continue;
-
- word = le32_to_cpu(data[i]);
- carl9170_async_regwrite(addr + 4 * i, word);
- }
- carl9170_async_regwrite_finish();
-
- dev_kfree_skb_any(cvif->beacon);
- cvif->beacon = NULL;
-
- err = carl9170_async_regwrite_result();
- if (!err)
- cvif->beacon = skb;
- spin_unlock_bh(&ar->beacon_lock);
- if (err)
- goto err_free;
-
- if (submit) {
- err = carl9170_bcn_ctrl(ar, cvif->id,
- CARL9170_BCN_CTRL_CAB_TRIGGER,
- addr, skb->len + FCS_LEN);
-
- if (err)
- goto err_free;
- }
-out_unlock:
- rcu_read_unlock();
- return 0;
-
-err_unlock:
- spin_unlock_bh(&ar->beacon_lock);
-
-err_free:
- rcu_read_unlock();
- dev_kfree_skb_any(skb);
- return err;
-}
-
int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
const u8 ktype, const u8 keyidx, const u8 *keydata,
const int keylen)
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 54d093c2ab4..0122930b14c 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -345,11 +345,11 @@ static int carl9170_op_start(struct ieee80211_hw *hw)
carl9170_zap_queues(ar);
/* reset QoS defaults */
- CARL9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT */
- CARL9170_FILL_QUEUE(ar->edcf[1], 2, 7, 15, 94); /* VIDEO */
- CARL9170_FILL_QUEUE(ar->edcf[2], 2, 3, 7, 47); /* VOICE */
- CARL9170_FILL_QUEUE(ar->edcf[3], 7, 15, 1023, 0); /* BACKGROUND */
- CARL9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
+ CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
+ CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
+ CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
+ CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
+ CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
ar->current_factor = ar->current_density = -1;
/* "The first key is unique." */
@@ -1484,6 +1484,13 @@ static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
}
}
+static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
+{
+ struct ar9170 *ar = hw->priv;
+
+ return !!atomic_read(&ar->tx_total_queued);
+}
+
static const struct ieee80211_ops carl9170_ops = {
.start = carl9170_op_start,
.stop = carl9170_op_stop,
@@ -1504,6 +1511,7 @@ static const struct ieee80211_ops carl9170_ops = {
.get_survey = carl9170_op_get_survey,
.get_stats = carl9170_op_get_stats,
.ampdu_action = carl9170_op_ampdu_action,
+ .tx_frames_pending = carl9170_tx_frames_pending,
};
void *carl9170_alloc(size_t priv_size)
@@ -1577,6 +1585,7 @@ void *carl9170_alloc(size_t priv_size)
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_NEED_DTIM_PERIOD |
IEEE80211_HW_SIGNAL_DBM;
if (!modparam_noht) {
@@ -1621,7 +1630,7 @@ static int carl9170_read_eeprom(struct ar9170 *ar)
BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
#endif
- for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
+ for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
for (j = 0; j < RW; j++)
offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
RB * i + 4 * j);
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index b6ae0e179c8..aa147a9120b 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -1098,7 +1098,7 @@ static u8 carl9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
* Isn't it just DIV_ROUND_UP(y, 1<<SHIFT)?
* Can we rely on the compiler to optimise away the div?
*/
- return (y >> SHIFT) + ((y & (1<<(SHIFT-1))) >> (SHIFT - 1));
+ return (y >> SHIFT) + ((y & (1 << (SHIFT - 1))) >> (SHIFT - 1));
#undef SHIFT
}
@@ -1379,7 +1379,7 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
modes[i].max_power =
carl9170_get_max_edge_power(ar,
- freq+f_off, EDGES(ctl_idx, 1));
+ freq + f_off, EDGES(ctl_idx, 1));
/*
* TODO: check if the regulatory max. power is
@@ -1441,7 +1441,7 @@ static int carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
if (freq < 3000)
f = freq - 2300;
else
- f = (freq - 4800)/5;
+ f = (freq - 4800) / 5;
/*
* cycle through the various modes
@@ -1783,12 +1783,6 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
}
}
- /* FIXME: PSM does not work in 5GHz Band */
- if (channel->band == IEEE80211_BAND_5GHZ)
- ar->ps.off_override |= PS_OFF_5GHZ;
- else
- ar->ps.off_override &= ~PS_OFF_5GHZ;
-
ar->channel = channel;
ar->ht_settings = new_ht;
return 0;
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index ec21ea9fd8d..dc99030ea8b 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -472,7 +472,7 @@ static struct sk_buff *carl9170_rx_copy_data(u8 *buf, int len)
u8 *qc = ieee80211_get_qos_ctl(hdr);
reserved += NET_IP_ALIGN;
- if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
+ if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
reserved += NET_IP_ALIGN;
}
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index e94084fcf6f..d20946939cd 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -661,11 +661,67 @@ void carl9170_tx_process_status(struct ar9170 *ar,
}
}
+static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
+ struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate,
+ unsigned int *phyrate, unsigned int *tpc, unsigned int *chains)
+{
+ struct ieee80211_rate *rate = NULL;
+ u8 *txpower;
+ unsigned int idx;
+
+ idx = txrate->idx;
+ *tpc = 0;
+ *phyrate = 0;
+
+ if (txrate->flags & IEEE80211_TX_RC_MCS) {
+ if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
+ /* +1 dBm for HT40 */
+ *tpc += 2;
+
+ if (info->band == IEEE80211_BAND_2GHZ)
+ txpower = ar->power_2G_ht40;
+ else
+ txpower = ar->power_5G_ht40;
+ } else {
+ if (info->band == IEEE80211_BAND_2GHZ)
+ txpower = ar->power_2G_ht20;
+ else
+ txpower = ar->power_5G_ht20;
+ }
+
+ *phyrate = txrate->idx;
+ *tpc += txpower[idx & 7];
+ } else {
+ if (info->band == IEEE80211_BAND_2GHZ) {
+ if (idx < 4)
+ txpower = ar->power_2G_cck;
+ else
+ txpower = ar->power_2G_ofdm;
+ } else {
+ txpower = ar->power_5G_leg;
+ idx += 4;
+ }
+
+ rate = &__carl9170_ratetable[idx];
+ *tpc += txpower[(rate->hw_value & 0x30) >> 4];
+ *phyrate = rate->hw_value & 0xf;
+ }
+
+ if (ar->eeprom.tx_mask == 1) {
+ *chains = AR9170_TX_PHY_TXCHAIN_1;
+ } else {
+ if (!(txrate->flags & IEEE80211_TX_RC_MCS) &&
+ rate && rate->bitrate >= 360)
+ *chains = AR9170_TX_PHY_TXCHAIN_1;
+ else
+ *chains = AR9170_TX_PHY_TXCHAIN_2;
+ }
+}
+
static __le32 carl9170_tx_physet(struct ar9170 *ar,
struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
{
- struct ieee80211_rate *rate = NULL;
- u32 power, chains;
+ unsigned int power = 0, chains = 0, phyrate = 0;
__le32 tmp;
tmp = cpu_to_le32(0);
@@ -682,35 +738,12 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
if (txrate->flags & IEEE80211_TX_RC_MCS) {
- u32 r = txrate->idx;
- u8 *txpower;
+ SET_VAL(AR9170_TX_PHY_MCS, phyrate, txrate->idx);
/* heavy clip control */
- tmp |= cpu_to_le32((r & 0x7) <<
+ tmp |= cpu_to_le32((txrate->idx & 0x7) <<
AR9170_TX_PHY_TX_HEAVY_CLIP_S);
- if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
- if (info->band == IEEE80211_BAND_5GHZ)
- txpower = ar->power_5G_ht40;
- else
- txpower = ar->power_2G_ht40;
- } else {
- if (info->band == IEEE80211_BAND_5GHZ)
- txpower = ar->power_5G_ht20;
- else
- txpower = ar->power_2G_ht20;
- }
-
- power = txpower[r & 7];
-
- /* +1 dBm for HT40 */
- if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- power += 2;
-
- r <<= AR9170_TX_PHY_MCS_S;
- BUG_ON(r & ~AR9170_TX_PHY_MCS);
-
- tmp |= cpu_to_le32(r & AR9170_TX_PHY_MCS);
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
/*
@@ -720,34 +753,15 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
* tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
*/
} else {
- u8 *txpower;
- u32 mod;
- u32 phyrate;
- u8 idx = txrate->idx;
-
- if (info->band != IEEE80211_BAND_2GHZ) {
- idx += 4;
- txpower = ar->power_5G_leg;
- mod = AR9170_TX_PHY_MOD_OFDM;
+ if (info->band == IEEE80211_BAND_2GHZ) {
+ if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
+ tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
+ else
+ tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
} else {
- if (idx < 4) {
- txpower = ar->power_2G_cck;
- mod = AR9170_TX_PHY_MOD_CCK;
- } else {
- mod = AR9170_TX_PHY_MOD_OFDM;
- txpower = ar->power_2G_ofdm;
- }
+ tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
}
- rate = &__carl9170_ratetable[idx];
-
- phyrate = rate->hw_value & 0xF;
- power = txpower[(rate->hw_value & 0x30) >> 4];
- phyrate <<= AR9170_TX_PHY_MCS_S;
-
- tmp |= cpu_to_le32(mod);
- tmp |= cpu_to_le32(phyrate);
-
/*
* short preamble seems to be broken too.
*
@@ -755,23 +769,12 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
* tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
*/
}
- power <<= AR9170_TX_PHY_TX_PWR_S;
- power &= AR9170_TX_PHY_TX_PWR;
- tmp |= cpu_to_le32(power);
-
- /* set TX chains */
- if (ar->eeprom.tx_mask == 1) {
- chains = AR9170_TX_PHY_TXCHAIN_1;
- } else {
- chains = AR9170_TX_PHY_TXCHAIN_2;
-
- /* >= 36M legacy OFDM - use only one chain */
- if (rate && rate->bitrate >= 360 &&
- !(txrate->flags & IEEE80211_TX_RC_MCS))
- chains = AR9170_TX_PHY_TXCHAIN_1;
- }
- tmp |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_S);
+ carl9170_tx_rate_tpc_chains(ar, info, txrate,
+ &phyrate, &power, &chains);
+ tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_MCS, phyrate));
+ tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TX_PWR, power));
+ tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TXCHAIN, chains));
return tmp;
}
@@ -1438,3 +1441,154 @@ void carl9170_tx_scheduler(struct ar9170 *ar)
if (ar->tx_schedule)
carl9170_tx(ar);
}
+
+int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
+{
+ struct sk_buff *skb = NULL;
+ struct carl9170_vif_info *cvif;
+ struct ieee80211_tx_info *txinfo;
+ struct ieee80211_tx_rate *rate;
+ __le32 *data, *old = NULL;
+ unsigned int plcp, power, chains;
+ u32 word, ht1, off, addr, len;
+ int i = 0, err = 0;
+
+ rcu_read_lock();
+ cvif = rcu_dereference(ar->beacon_iter);
+retry:
+ if (ar->vifs == 0 || !cvif)
+ goto out_unlock;
+
+ list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
+ if (cvif->active && cvif->enable_beacon)
+ goto found;
+ }
+
+ if (!ar->beacon_enabled || i++)
+ goto out_unlock;
+
+ goto retry;
+
+found:
+ rcu_assign_pointer(ar->beacon_iter, cvif);
+
+ skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
+ NULL, NULL);
+
+ if (!skb) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ txinfo = IEEE80211_SKB_CB(skb);
+ spin_lock_bh(&ar->beacon_lock);
+ data = (__le32 *)skb->data;
+ if (cvif->beacon)
+ old = (__le32 *)cvif->beacon->data;
+
+ off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
+ addr = ar->fw.beacon_addr + off;
+ len = roundup(skb->len + FCS_LEN, 4);
+
+ if ((off + len) > ar->fw.beacon_max_len) {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "beacon does not "
+ "fit into device memory!\n");
+ }
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (len > AR9170_MAC_BCN_LENGTH_MAX) {
+ if (net_ratelimit()) {
+ wiphy_err(ar->hw->wiphy, "no support for beacons "
+ "bigger than %d (yours:%d).\n",
+ AR9170_MAC_BCN_LENGTH_MAX, len);
+ }
+
+ err = -EMSGSIZE;
+ goto err_unlock;
+ }
+
+ ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
+ rate = &txinfo->control.rates[0];
+ carl9170_tx_rate_tpc_chains(ar, txinfo, rate, &plcp, &power, &chains);
+ if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS)) {
+ if (plcp <= AR9170_TX_PHY_RATE_CCK_11M)
+ plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
+ else
+ plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
+ } else {
+ ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ plcp |= AR9170_MAC_BCN_HT2_SGI;
+
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
+ ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
+ plcp |= AR9170_MAC_BCN_HT2_BW40;
+ }
+ if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
+ ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
+ plcp |= AR9170_MAC_BCN_HT2_BW40;
+ }
+
+ SET_VAL(AR9170_MAC_BCN_HT2_LEN, plcp, skb->len + FCS_LEN);
+ }
+
+ SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, ht1, 7);
+ SET_VAL(AR9170_MAC_BCN_HT1_TPC, ht1, power);
+ SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, ht1, chains);
+ if (chains == AR9170_TX_PHY_TXCHAIN_2)
+ ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
+
+ carl9170_async_regwrite_begin(ar);
+ carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1);
+ if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS))
+ carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
+ else
+ carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp);
+
+ for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
+ /*
+ * XXX: This accesses beyond skb data for up
+ * to the last 3 bytes!!
+ */
+
+ if (old && (data[i] == old[i]))
+ continue;
+
+ word = le32_to_cpu(data[i]);
+ carl9170_async_regwrite(addr + 4 * i, word);
+ }
+ carl9170_async_regwrite_finish();
+
+ dev_kfree_skb_any(cvif->beacon);
+ cvif->beacon = NULL;
+
+ err = carl9170_async_regwrite_result();
+ if (!err)
+ cvif->beacon = skb;
+ spin_unlock_bh(&ar->beacon_lock);
+ if (err)
+ goto err_free;
+
+ if (submit) {
+ err = carl9170_bcn_ctrl(ar, cvif->id,
+ CARL9170_BCN_CTRL_CAB_TRIGGER,
+ addr, skb->len + FCS_LEN);
+
+ if (err)
+ goto err_free;
+ }
+out_unlock:
+ rcu_read_unlock();
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&ar->beacon_lock);
+
+err_free:
+ rcu_read_unlock();
+ dev_kfree_skb_any(skb);
+ return err;
+}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 2fb53d06751..333b69ef2ae 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -112,6 +112,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
{ USB_DEVICE(0x04bb, 0x093f) },
/* NEC WL300NU-G */
{ USB_DEVICE(0x0409, 0x0249) },
+ /* NEC WL300NU-AG */
+ { USB_DEVICE(0x0409, 0x02b4) },
/* AVM FRITZ!WLAN USB Stick N */
{ USB_DEVICE(0x057c, 0x8401) },
/* AVM FRITZ!WLAN USB Stick N 2.4 */
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index 15095c03516..64703778cfe 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
#define CARL9170FW_VERSION_YEAR 11
-#define CARL9170FW_VERSION_MONTH 1
-#define CARL9170FW_VERSION_DAY 22
-#define CARL9170FW_VERSION_GIT "1.9.2"
+#define CARL9170FW_VERSION_MONTH 6
+#define CARL9170FW_VERSION_DAY 30
+#define CARL9170FW_VERSION_GIT "1.9.4"
#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
index 9e1324b67e0..ea17995b32f 100644
--- a/drivers/net/wireless/ath/carl9170/wlan.h
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -4,7 +4,7 @@
* RX/TX meta descriptor format
*
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
+ * Copyright 2009-2011 Christian Lamparter <chunkeey@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -278,7 +278,7 @@ struct ar9170_tx_frame {
struct carl9170_tx_superframe {
struct carl9170_tx_superdesc s;
struct ar9170_tx_frame f;
-} __packed;
+} __packed __aligned(4);
#endif /* __CARL9170FW__ */
@@ -328,7 +328,7 @@ struct _carl9170_tx_superframe {
struct _carl9170_tx_superdesc s;
struct _ar9170_tx_hwdesc f;
u8 frame_data[0];
-} __packed;
+} __packed __aligned(4);
#define CARL9170_TX_SUPERDESC_LEN 24
#define AR9170_TX_HWDESC_LEN 8
@@ -404,16 +404,6 @@ static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t)
(t->DAidx & 0xc0) >> 6;
}
-enum ar9170_txq {
- AR9170_TXQ_BE,
-
- AR9170_TXQ_VI,
- AR9170_TXQ_VO,
- AR9170_TXQ_BK,
-
- __AR9170_NUM_TXQ,
-};
-
/*
* This is an workaround for several undocumented bugs.
* Don't mess with the QoS/AC <-> HW Queue map, if you don't
@@ -431,7 +421,14 @@ enum ar9170_txq {
* result, this makes the device pretty much useless
* for any serious 802.11n setup.
*/
-static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 };
+enum ar9170_txq {
+ AR9170_TXQ_BK = 0, /* TXQ0 */
+ AR9170_TXQ_BE, /* TXQ1 */
+ AR9170_TXQ_VI, /* TXQ2 */
+ AR9170_TXQ_VO, /* TXQ3 */
+
+ __AR9170_NUM_TXQ,
+};
#define AR9170_TXQ_DEPTH 32
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index a61ef3d6d89..17b0efd86f9 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -105,11 +105,8 @@ static bool ath_hw_keysetmac(struct ath_common *common,
if (mac[0] & 0x01)
unicast_flag = 0;
- macHi = (mac[5] << 8) | mac[4];
- macLo = (mac[3] << 24) |
- (mac[2] << 16) |
- (mac[1] << 8) |
- mac[0];
+ macLo = get_unaligned_le32(mac);
+ macHi = get_unaligned_le16(mac + 4);
macLo >>= 1;
macLo |= (macHi & 1) << 31;
macHi >>= 1;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 39a11e8af4f..7e45ca2e78e 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -40,6 +40,7 @@
******************************************************************************/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 480595f0441..3cab843afb0 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -26,6 +26,16 @@ config B43
This driver can be built as a module (recommended) that will be called "b43".
If unsure, say M.
+config B43_BCMA
+ bool "Support for BCMA bus"
+ depends on B43 && BCMA
+ default y
+
+config B43_SSB
+ bool
+ depends on B43 && SSB
+ default y
+
# Auto-select SSB PCI-HOST support, if possible
config B43_PCI_AUTOSELECT
bool
@@ -80,6 +90,12 @@ config B43_SDIO
#Data transfers to the device via PIO. We want it as a fallback even
# if we can do DMA.
+config B43_BCMA_PIO
+ bool
+ depends on B43_BCMA
+ select BCMA_BLOCKIO
+ default y
+
config B43_PIO
bool
depends on B43
@@ -107,6 +123,22 @@ config B43_PHY_LP
and embedded devices. It supports 802.11a/g
(802.11a support is optional, and currently disabled).
+config B43_PHY_HT
+ bool "Support for HT-PHY devices (BROKEN)"
+ depends on B43 && BROKEN
+ ---help---
+ Support for the HT-PHY.
+
+ Say N, this is BROKEN and crashes driver.
+
+config B43_PHY_LCN
+ bool "Support for LCN-PHY devices (BROKEN)"
+ depends on B43 && BROKEN
+ ---help---
+ Support for the LCN-PHY.
+
+ Say N, this is BROKEN and crashes driver.
+
# This config option automatically enables b43 LEDS support,
# if it's possible.
config B43_LEDS
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index cef334a8c66..4648bbf76ab 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -1,4 +1,5 @@
b43-y += main.o
+b43-y += bus.o
b43-y += tables.o
b43-$(CONFIG_B43_PHY_N) += tables_nphy.o
b43-$(CONFIG_B43_PHY_N) += radio_2055.o
@@ -9,6 +10,10 @@ b43-y += phy_a.o
b43-$(CONFIG_B43_PHY_N) += phy_n.o
b43-$(CONFIG_B43_PHY_LP) += phy_lp.o
b43-$(CONFIG_B43_PHY_LP) += tables_lpphy.o
+b43-$(CONFIG_B43_PHY_HT) += phy_ht.o
+b43-$(CONFIG_B43_PHY_HT) += tables_phy_ht.o
+b43-$(CONFIG_B43_PHY_HT) += radio_2059.o
+b43-$(CONFIG_B43_PHY_LCN) += phy_lcn.o tables_phy_lcn.o
b43-y += sysfs.o
b43-y += xmit.o
b43-y += lo.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 25a78cfb7d1..c818b0bc88e 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -5,12 +5,14 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/hw_random.h>
+#include <linux/bcma/bcma.h>
#include <linux/ssb/ssb.h>
#include <net/mac80211.h>
#include "debugfs.h"
#include "leds.h"
#include "rfkill.h"
+#include "bus.h"
#include "lo.h"
#include "phy_common.h"
@@ -90,6 +92,8 @@
#define B43_MMIO_PIO11_BASE4 0x300
#define B43_MMIO_PIO11_BASE5 0x340
+#define B43_MMIO_RADIO24_CONTROL 0x3D8 /* core rev >= 24 only */
+#define B43_MMIO_RADIO24_DATA 0x3DA /* core rev >= 24 only */
#define B43_MMIO_PHY_VER 0x3E0
#define B43_MMIO_PHY_RADIO 0x3E2
#define B43_MMIO_PHY0 0x3E6
@@ -361,6 +365,10 @@ enum {
#define B43_PHYTYPE_G 0x02
#define B43_PHYTYPE_N 0x04
#define B43_PHYTYPE_LP 0x05
+#define B43_PHYTYPE_SSLPN 0x06
+#define B43_PHYTYPE_HT 0x07
+#define B43_PHYTYPE_LCN 0x08
+#define B43_PHYTYPE_LCNXN 0x09
/* PHYRegisters */
#define B43_PHY_ILT_A_CTRL 0x0072
@@ -414,6 +422,23 @@ enum {
#define B43_MACCMD_CCA 0x00000008 /* Clear channel assessment */
#define B43_MACCMD_BGNOISE 0x00000010 /* Background noise */
+/* BCMA 802.11 core specific IO Control (BCMA_IOCTL) flags */
+#define B43_BCMA_IOCTL_PHY_CLKEN 0x00000004 /* PHY Clock Enable */
+#define B43_BCMA_IOCTL_PHY_RESET 0x00000008 /* PHY Reset */
+#define B43_BCMA_IOCTL_MACPHYCLKEN 0x00000010 /* MAC PHY Clock Control Enable */
+#define B43_BCMA_IOCTL_PLLREFSEL 0x00000020 /* PLL Frequency Reference Select */
+#define B43_BCMA_IOCTL_PHY_BW 0x000000C0 /* PHY band width and clock speed mask (N-PHY+ only?) */
+#define B43_BCMA_IOCTL_PHY_BW_10MHZ 0x00000000 /* 10 MHz bandwidth, 40 MHz PHY */
+#define B43_BCMA_IOCTL_PHY_BW_20MHZ 0x00000040 /* 20 MHz bandwidth, 80 MHz PHY */
+#define B43_BCMA_IOCTL_PHY_BW_40MHZ 0x00000080 /* 40 MHz bandwidth, 160 MHz PHY */
+#define B43_BCMA_IOCTL_GMODE 0x00002000 /* G Mode Enable */
+
+/* BCMA 802.11 core specific IO status (BCMA_IOST) flags */
+#define B43_BCMA_IOST_2G_PHY 0x00000001 /* 2.4G capable phy */
+#define B43_BCMA_IOST_5G_PHY 0x00000002 /* 5G capable phy */
+#define B43_BCMA_IOST_FASTCLKA 0x00000004 /* Fast Clock Available */
+#define B43_BCMA_IOST_DUALB_PHY 0x00000008 /* Dualband phy */
+
/* 802.11 core specific TM State Low (SSB_TMSLOW) flags */
#define B43_TMSLOW_GMODE 0x20000000 /* G Mode Enable */
#define B43_TMSLOW_PHY_BANDWIDTH 0x00C00000 /* PHY band width and clock speed mask (N-PHY only) */
@@ -569,6 +594,7 @@ struct b43_dma {
struct b43_dmaring *rx_ring;
u32 translation; /* Routing bits */
+ bool parity; /* Check for parity */
};
struct b43_pio_txqueue;
@@ -707,7 +733,7 @@ enum {
/* Data structure for one wireless device (802.11 core) */
struct b43_wldev {
- struct ssb_device *sdev;
+ struct b43_bus_dev *dev;
struct b43_wl *wl;
/* The device initialization status.
@@ -879,36 +905,59 @@ static inline enum ieee80211_band b43_current_band(struct b43_wl *wl)
return wl->hw->conf.channel->band;
}
+static inline int b43_bus_may_powerdown(struct b43_wldev *wldev)
+{
+ return wldev->dev->bus_may_powerdown(wldev->dev);
+}
+static inline int b43_bus_powerup(struct b43_wldev *wldev, bool dynamic_pctl)
+{
+ return wldev->dev->bus_powerup(wldev->dev, dynamic_pctl);
+}
+static inline int b43_device_is_enabled(struct b43_wldev *wldev)
+{
+ return wldev->dev->device_is_enabled(wldev->dev);
+}
+static inline void b43_device_enable(struct b43_wldev *wldev,
+ u32 core_specific_flags)
+{
+ wldev->dev->device_enable(wldev->dev, core_specific_flags);
+}
+static inline void b43_device_disable(struct b43_wldev *wldev,
+ u32 core_specific_flags)
+{
+ wldev->dev->device_disable(wldev->dev, core_specific_flags);
+}
+
static inline u16 b43_read16(struct b43_wldev *dev, u16 offset)
{
- return ssb_read16(dev->sdev, offset);
+ return dev->dev->read16(dev->dev, offset);
}
static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value)
{
- ssb_write16(dev->sdev, offset, value);
+ dev->dev->write16(dev->dev, offset, value);
}
static inline u32 b43_read32(struct b43_wldev *dev, u16 offset)
{
- return ssb_read32(dev->sdev, offset);
+ return dev->dev->read32(dev->dev, offset);
}
static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
{
- ssb_write32(dev->sdev, offset, value);
+ dev->dev->write32(dev->dev, offset, value);
}
static inline void b43_block_read(struct b43_wldev *dev, void *buffer,
size_t count, u16 offset, u8 reg_width)
{
- ssb_block_read(dev->sdev, buffer, count, offset, reg_width);
+ dev->dev->block_read(dev->dev, buffer, count, offset, reg_width);
}
static inline void b43_block_write(struct b43_wldev *dev, const void *buffer,
size_t count, u16 offset, u8 reg_width)
{
- ssb_block_write(dev->sdev, buffer, count, offset, reg_width);
+ dev->dev->block_write(dev->dev, buffer, count, offset, reg_width);
}
static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/bus.c b/drivers/net/wireless/b43/bus.c
new file mode 100644
index 00000000000..05f6c7bff6a
--- /dev/null
+++ b/drivers/net/wireless/b43/bus.c
@@ -0,0 +1,255 @@
+/*
+
+ Broadcom B43 wireless driver
+ Bus abstraction layer
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+ Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "bus.h"
+
+/* BCMA */
+#ifdef CONFIG_B43_BCMA
+static int b43_bus_bcma_bus_may_powerdown(struct b43_bus_dev *dev)
+{
+ return 0; /* bcma_bus_may_powerdown(dev->bdev->bus); */
+}
+static int b43_bus_bcma_bus_powerup(struct b43_bus_dev *dev,
+ bool dynamic_pctl)
+{
+ return 0; /* bcma_bus_powerup(dev->sdev->bus, dynamic_pctl); */
+}
+static int b43_bus_bcma_device_is_enabled(struct b43_bus_dev *dev)
+{
+ return bcma_core_is_enabled(dev->bdev);
+}
+static void b43_bus_bcma_device_enable(struct b43_bus_dev *dev,
+ u32 core_specific_flags)
+{
+ bcma_core_enable(dev->bdev, core_specific_flags);
+}
+static void b43_bus_bcma_device_disable(struct b43_bus_dev *dev,
+ u32 core_specific_flags)
+{
+ bcma_core_disable(dev->bdev, core_specific_flags);
+}
+static u16 b43_bus_bcma_read16(struct b43_bus_dev *dev, u16 offset)
+{
+ return bcma_read16(dev->bdev, offset);
+}
+static u32 b43_bus_bcma_read32(struct b43_bus_dev *dev, u16 offset)
+{
+ return bcma_read32(dev->bdev, offset);
+}
+static
+void b43_bus_bcma_write16(struct b43_bus_dev *dev, u16 offset, u16 value)
+{
+ bcma_write16(dev->bdev, offset, value);
+}
+static
+void b43_bus_bcma_write32(struct b43_bus_dev *dev, u16 offset, u32 value)
+{
+ bcma_write32(dev->bdev, offset, value);
+}
+static
+void b43_bus_bcma_block_read(struct b43_bus_dev *dev, void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ bcma_block_read(dev->bdev, buffer, count, offset, reg_width);
+}
+static
+void b43_bus_bcma_block_write(struct b43_bus_dev *dev, const void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ bcma_block_write(dev->bdev, buffer, count, offset, reg_width);
+}
+
+struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core)
+{
+ struct b43_bus_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ dev->bus_type = B43_BUS_BCMA;
+ dev->bdev = core;
+
+ dev->bus_may_powerdown = b43_bus_bcma_bus_may_powerdown;
+ dev->bus_powerup = b43_bus_bcma_bus_powerup;
+ dev->device_is_enabled = b43_bus_bcma_device_is_enabled;
+ dev->device_enable = b43_bus_bcma_device_enable;
+ dev->device_disable = b43_bus_bcma_device_disable;
+
+ dev->read16 = b43_bus_bcma_read16;
+ dev->read32 = b43_bus_bcma_read32;
+ dev->write16 = b43_bus_bcma_write16;
+ dev->write32 = b43_bus_bcma_write32;
+ dev->block_read = b43_bus_bcma_block_read;
+ dev->block_write = b43_bus_bcma_block_write;
+
+ dev->dev = &core->dev;
+ dev->dma_dev = core->dma_dev;
+ dev->irq = core->irq;
+
+ /*
+ dev->board_vendor = core->bus->boardinfo.vendor;
+ dev->board_type = core->bus->boardinfo.type;
+ dev->board_rev = core->bus->boardinfo.rev;
+ */
+
+ dev->chip_id = core->bus->chipinfo.id;
+ dev->chip_rev = core->bus->chipinfo.rev;
+ dev->chip_pkg = core->bus->chipinfo.pkg;
+
+ dev->bus_sprom = &core->bus->sprom;
+
+ dev->core_id = core->id.id;
+ dev->core_rev = core->id.rev;
+
+ return dev;
+}
+#endif /* CONFIG_B43_BCMA */
+
+/* SSB */
+#ifdef CONFIG_B43_SSB
+static int b43_bus_ssb_bus_may_powerdown(struct b43_bus_dev *dev)
+{
+ return ssb_bus_may_powerdown(dev->sdev->bus);
+}
+static int b43_bus_ssb_bus_powerup(struct b43_bus_dev *dev,
+ bool dynamic_pctl)
+{
+ return ssb_bus_powerup(dev->sdev->bus, dynamic_pctl);
+}
+static int b43_bus_ssb_device_is_enabled(struct b43_bus_dev *dev)
+{
+ return ssb_device_is_enabled(dev->sdev);
+}
+static void b43_bus_ssb_device_enable(struct b43_bus_dev *dev,
+ u32 core_specific_flags)
+{
+ ssb_device_enable(dev->sdev, core_specific_flags);
+}
+static void b43_bus_ssb_device_disable(struct b43_bus_dev *dev,
+ u32 core_specific_flags)
+{
+ ssb_device_disable(dev->sdev, core_specific_flags);
+}
+
+static u16 b43_bus_ssb_read16(struct b43_bus_dev *dev, u16 offset)
+{
+ return ssb_read16(dev->sdev, offset);
+}
+static u32 b43_bus_ssb_read32(struct b43_bus_dev *dev, u16 offset)
+{
+ return ssb_read32(dev->sdev, offset);
+}
+static void b43_bus_ssb_write16(struct b43_bus_dev *dev, u16 offset, u16 value)
+{
+ ssb_write16(dev->sdev, offset, value);
+}
+static void b43_bus_ssb_write32(struct b43_bus_dev *dev, u16 offset, u32 value)
+{
+ ssb_write32(dev->sdev, offset, value);
+}
+static void b43_bus_ssb_block_read(struct b43_bus_dev *dev, void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ ssb_block_read(dev->sdev, buffer, count, offset, reg_width);
+}
+static
+void b43_bus_ssb_block_write(struct b43_bus_dev *dev, const void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ ssb_block_write(dev->sdev, buffer, count, offset, reg_width);
+}
+
+struct b43_bus_dev *b43_bus_dev_ssb_init(struct ssb_device *sdev)
+{
+ struct b43_bus_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ dev->bus_type = B43_BUS_SSB;
+ dev->sdev = sdev;
+
+ dev->bus_may_powerdown = b43_bus_ssb_bus_may_powerdown;
+ dev->bus_powerup = b43_bus_ssb_bus_powerup;
+ dev->device_is_enabled = b43_bus_ssb_device_is_enabled;
+ dev->device_enable = b43_bus_ssb_device_enable;
+ dev->device_disable = b43_bus_ssb_device_disable;
+
+ dev->read16 = b43_bus_ssb_read16;
+ dev->read32 = b43_bus_ssb_read32;
+ dev->write16 = b43_bus_ssb_write16;
+ dev->write32 = b43_bus_ssb_write32;
+ dev->block_read = b43_bus_ssb_block_read;
+ dev->block_write = b43_bus_ssb_block_write;
+
+ dev->dev = sdev->dev;
+ dev->dma_dev = sdev->dma_dev;
+ dev->irq = sdev->irq;
+
+ dev->board_vendor = sdev->bus->boardinfo.vendor;
+ dev->board_type = sdev->bus->boardinfo.type;
+ dev->board_rev = sdev->bus->boardinfo.rev;
+
+ dev->chip_id = sdev->bus->chip_id;
+ dev->chip_rev = sdev->bus->chip_rev;
+ dev->chip_pkg = sdev->bus->chip_package;
+
+ dev->bus_sprom = &sdev->bus->sprom;
+
+ dev->core_id = sdev->id.coreid;
+ dev->core_rev = sdev->id.revision;
+
+ return dev;
+}
+#endif /* CONFIG_B43_SSB */
+
+void *b43_bus_get_wldev(struct b43_bus_dev *dev)
+{
+ switch (dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ return bcma_get_drvdata(dev->bdev);
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ return ssb_get_drvdata(dev->sdev);
+#endif
+ }
+ return NULL;
+}
+
+void b43_bus_set_wldev(struct b43_bus_dev *dev, void *wldev)
+{
+ switch (dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ bcma_set_drvdata(dev->bdev, wldev);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ ssb_set_drvdata(dev->sdev, wldev);
+ break;
+#endif
+ }
+}
diff --git a/drivers/net/wireless/b43/bus.h b/drivers/net/wireless/b43/bus.h
new file mode 100644
index 00000000000..184c9565927
--- /dev/null
+++ b/drivers/net/wireless/b43/bus.h
@@ -0,0 +1,70 @@
+#ifndef B43_BUS_H_
+#define B43_BUS_H_
+
+enum b43_bus_type {
+#ifdef CONFIG_B43_BCMA
+ B43_BUS_BCMA,
+#endif
+ B43_BUS_SSB,
+};
+
+struct b43_bus_dev {
+ enum b43_bus_type bus_type;
+ union {
+ struct bcma_device *bdev;
+ struct ssb_device *sdev;
+ };
+
+ int (*bus_may_powerdown)(struct b43_bus_dev *dev);
+ int (*bus_powerup)(struct b43_bus_dev *dev, bool dynamic_pctl);
+ int (*device_is_enabled)(struct b43_bus_dev *dev);
+ void (*device_enable)(struct b43_bus_dev *dev,
+ u32 core_specific_flags);
+ void (*device_disable)(struct b43_bus_dev *dev,
+ u32 core_specific_flags);
+
+ u16 (*read16)(struct b43_bus_dev *dev, u16 offset);
+ u32 (*read32)(struct b43_bus_dev *dev, u16 offset);
+ void (*write16)(struct b43_bus_dev *dev, u16 offset, u16 value);
+ void (*write32)(struct b43_bus_dev *dev, u16 offset, u32 value);
+ void (*block_read)(struct b43_bus_dev *dev, void *buffer,
+ size_t count, u16 offset, u8 reg_width);
+ void (*block_write)(struct b43_bus_dev *dev, const void *buffer,
+ size_t count, u16 offset, u8 reg_width);
+
+ struct device *dev;
+ struct device *dma_dev;
+ unsigned int irq;
+
+ u16 board_vendor;
+ u16 board_type;
+ u16 board_rev;
+
+ u16 chip_id;
+ u8 chip_rev;
+ u8 chip_pkg;
+
+ struct ssb_sprom *bus_sprom;
+
+ u16 core_id;
+ u8 core_rev;
+};
+
+static inline bool b43_bus_host_is_pcmcia(struct b43_bus_dev *dev)
+{
+ return (dev->bus_type == B43_BUS_SSB &&
+ dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA);
+}
+static inline bool b43_bus_host_is_sdio(struct b43_bus_dev *dev)
+{
+ return (dev->bus_type == B43_BUS_SSB &&
+ dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO);
+}
+
+struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core);
+struct b43_bus_dev *b43_bus_dev_ssb_init(struct ssb_device *sdev);
+
+void *b43_bus_get_wldev(struct b43_bus_dev *dev);
+void b43_bus_set_wldev(struct b43_bus_dev *dev, void *data);
+
+#endif /* B43_BUS_H_ */
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index 59f59fa4033..e751fdee89b 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -4,7 +4,7 @@
debugfs driver debugging code
- Copyright (c) 2005-2007 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005-2007 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 47d44bcff37..481e534534e 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -4,7 +4,7 @@
DMA ringbuffer and descriptor allocation/management
- Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
Some code in this file is derived from the b44.c driver
Copyright (C) 2002 David S. Miller
@@ -174,7 +174,7 @@ static void op64_fill_descriptor(struct b43_dmaring *ring,
addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
>> SSB_DMA_TRANSLATION_SHIFT;
- addrhi |= (ring->dev->dma.translation << 1);
+ addrhi |= ring->dev->dma.translation;
if (slot == ring->nr_slots - 1)
ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
if (start)
@@ -333,10 +333,10 @@ static inline
dma_addr_t dmaaddr;
if (tx) {
- dmaaddr = dma_map_single(ring->dev->sdev->dma_dev,
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
buf, len, DMA_TO_DEVICE);
} else {
- dmaaddr = dma_map_single(ring->dev->sdev->dma_dev,
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
buf, len, DMA_FROM_DEVICE);
}
@@ -348,10 +348,10 @@ static inline
dma_addr_t addr, size_t len, int tx)
{
if (tx) {
- dma_unmap_single(ring->dev->sdev->dma_dev,
+ dma_unmap_single(ring->dev->dev->dma_dev,
addr, len, DMA_TO_DEVICE);
} else {
- dma_unmap_single(ring->dev->sdev->dma_dev,
+ dma_unmap_single(ring->dev->dev->dma_dev,
addr, len, DMA_FROM_DEVICE);
}
}
@@ -361,7 +361,7 @@ static inline
dma_addr_t addr, size_t len)
{
B43_WARN_ON(ring->tx);
- dma_sync_single_for_cpu(ring->dev->sdev->dma_dev,
+ dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
addr, len, DMA_FROM_DEVICE);
}
@@ -370,7 +370,7 @@ static inline
dma_addr_t addr, size_t len)
{
B43_WARN_ON(ring->tx);
- dma_sync_single_for_device(ring->dev->sdev->dma_dev,
+ dma_sync_single_for_device(ring->dev->dev->dma_dev,
addr, len, DMA_FROM_DEVICE);
}
@@ -401,7 +401,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
*/
if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA;
- ring->descbase = dma_alloc_coherent(ring->dev->sdev->dma_dev,
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
B43_DMA_RINGMEMSIZE,
&(ring->dmabase), flags);
if (!ring->descbase) {
@@ -415,7 +415,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
static void free_ringmemory(struct b43_dmaring *ring)
{
- dma_free_coherent(ring->dev->sdev->dma_dev, B43_DMA_RINGMEMSIZE,
+ dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
ring->descbase, ring->dmabase);
}
@@ -523,7 +523,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
dma_addr_t addr,
size_t buffersize, bool dma_to_device)
{
- if (unlikely(dma_mapping_error(ring->dev->sdev->dma_dev, addr)))
+ if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
return 1;
switch (ring->type) {
@@ -659,6 +659,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
u32 value;
u32 addrext;
u32 trans = ring->dev->dma.translation;
+ bool parity = ring->dev->dma.parity;
if (ring->tx) {
if (ring->type == B43_DMA_64BIT) {
@@ -669,13 +670,15 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
value = B43_DMA64_TXENABLE;
value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
& B43_DMA64_TXADDREXT_MASK;
+ if (!parity)
+ value |= B43_DMA64_TXPARITYDISABLE;
b43_dma_write(ring, B43_DMA64_TXCTL, value);
b43_dma_write(ring, B43_DMA64_TXRINGLO,
(ringbase & 0xFFFFFFFF));
b43_dma_write(ring, B43_DMA64_TXRINGHI,
((ringbase >> 32) &
~SSB_DMA_TRANSLATION_MASK)
- | (trans << 1));
+ | trans);
} else {
u32 ringbase = (u32) (ring->dmabase);
@@ -684,6 +687,8 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
value = B43_DMA32_TXENABLE;
value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
& B43_DMA32_TXADDREXT_MASK;
+ if (!parity)
+ value |= B43_DMA32_TXPARITYDISABLE;
b43_dma_write(ring, B43_DMA32_TXCTL, value);
b43_dma_write(ring, B43_DMA32_TXRING,
(ringbase & ~SSB_DMA_TRANSLATION_MASK)
@@ -702,13 +707,15 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
value |= B43_DMA64_RXENABLE;
value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
& B43_DMA64_RXADDREXT_MASK;
+ if (!parity)
+ value |= B43_DMA64_RXPARITYDISABLE;
b43_dma_write(ring, B43_DMA64_RXCTL, value);
b43_dma_write(ring, B43_DMA64_RXRINGLO,
(ringbase & 0xFFFFFFFF));
b43_dma_write(ring, B43_DMA64_RXRINGHI,
((ringbase >> 32) &
~SSB_DMA_TRANSLATION_MASK)
- | (trans << 1));
+ | trans);
b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
sizeof(struct b43_dmadesc64));
} else {
@@ -720,6 +727,8 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
value |= B43_DMA32_RXENABLE;
value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
& B43_DMA32_RXADDREXT_MASK;
+ if (!parity)
+ value |= B43_DMA32_RXPARITYDISABLE;
b43_dma_write(ring, B43_DMA32_RXCTL, value);
b43_dma_write(ring, B43_DMA32_RXRING,
(ringbase & ~SSB_DMA_TRANSLATION_MASK)
@@ -757,14 +766,14 @@ static void dmacontroller_cleanup(struct b43_dmaring *ring)
static void free_all_descbuffers(struct b43_dmaring *ring)
{
- struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
int i;
if (!ring->used_slots)
return;
for (i = 0; i < ring->nr_slots; i++) {
- desc = ring->ops->idx2desc(ring, i, &meta);
+ /* get meta - ignore returned value */
+ ring->ops->idx2desc(ring, i, &meta);
if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
B43_WARN_ON(!ring->tx);
@@ -786,9 +795,23 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
u32 tmp;
u16 mmio_base;
- tmp = b43_read32(dev, SSB_TMSHIGH);
- if (tmp & SSB_TMSHIGH_DMA64)
- return DMA_BIT_MASK(64);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
+ if (tmp & BCMA_IOST_DMA64)
+ return DMA_BIT_MASK(64);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
+ if (tmp & SSB_TMSHIGH_DMA64)
+ return DMA_BIT_MASK(64);
+ break;
+#endif
+ }
+
mmio_base = b43_dmacontroller_base(0, 0);
b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
@@ -869,7 +892,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */
- dma_test = dma_map_single(dev->sdev->dma_dev,
+ dma_test = dma_map_single(dev->dev->dma_dev,
ring->txhdr_cache,
b43_txhdr_size(dev),
DMA_TO_DEVICE);
@@ -884,7 +907,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
if (!ring->txhdr_cache)
goto err_kfree_meta;
- dma_test = dma_map_single(dev->sdev->dma_dev,
+ dma_test = dma_map_single(dev->dev->dma_dev,
ring->txhdr_cache,
b43_txhdr_size(dev),
DMA_TO_DEVICE);
@@ -898,7 +921,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
}
}
- dma_unmap_single(dev->sdev->dma_dev,
+ dma_unmap_single(dev->dev->dma_dev,
dma_test, b43_txhdr_size(dev),
DMA_TO_DEVICE);
}
@@ -1013,9 +1036,9 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = dma_set_mask(dev->sdev->dma_dev, mask);
+ err = dma_set_mask(dev->dev->dma_dev, mask);
if (!err) {
- err = dma_set_coherent_mask(dev->sdev->dma_dev, mask);
+ err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
if (!err)
break;
}
@@ -1055,7 +1078,26 @@ int b43_dma_init(struct b43_wldev *dev)
err = b43_dma_set_mask(dev, dmamask);
if (err)
return err;
- dma->translation = ssb_dma_translation(dev->sdev);
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ dma->translation = bcma_core_dma_translation(dev->dev->bdev);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ dma->translation = ssb_dma_translation(dev->dev->sdev);
+ break;
+#endif
+ }
+
+ dma->parity = true;
+#ifdef CONFIG_B43_BCMA
+ /* TODO: find out which SSB devices need disabling parity */
+ if (dev->dev->bus_type == B43_BUS_BCMA)
+ dma->parity = false;
+#endif
err = -ENOMEM;
/* setup TX DMA channels. */
@@ -1085,7 +1127,7 @@ int b43_dma_init(struct b43_wldev *dev)
goto err_destroy_mcast;
/* No support for the TX status DMA ring. */
- B43_WARN_ON(dev->sdev->id.revision < 5);
+ B43_WARN_ON(dev->dev->core_rev < 5);
b43dbg(dev->wl, "%u-bit DMA initialized\n",
(unsigned int)type);
@@ -1388,7 +1430,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
{
const struct b43_dma_ops *ops;
struct b43_dmaring *ring;
- struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
int slot, firstused;
bool frame_succeed;
@@ -1416,7 +1457,8 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
ops = ring->ops;
while (1) {
B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
- desc = ops->idx2desc(ring, slot, &meta);
+ /* get meta - ignore returned value */
+ ops->idx2desc(ring, slot, &meta);
if (b43_dma_ptr_is_poisoned(meta->skb)) {
b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
@@ -1600,6 +1642,7 @@ void b43_dma_rx(struct b43_dmaring *ring)
dma_rx(ring, &slot);
update_max_used_slots(ring, ++used_slots);
}
+ wmb();
ops->set_current_rxslot(ring, slot);
ring->current_slot = slot;
}
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index e8a80a1251b..cdf87094efe 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -20,6 +20,7 @@
#define B43_DMA32_TXSUSPEND 0x00000002
#define B43_DMA32_TXLOOPBACK 0x00000004
#define B43_DMA32_TXFLUSH 0x00000010
+#define B43_DMA32_TXPARITYDISABLE 0x00000800
#define B43_DMA32_TXADDREXT_MASK 0x00030000
#define B43_DMA32_TXADDREXT_SHIFT 16
#define B43_DMA32_TXRING 0x04
@@ -44,6 +45,7 @@
#define B43_DMA32_RXFROFF_MASK 0x000000FE
#define B43_DMA32_RXFROFF_SHIFT 1
#define B43_DMA32_RXDIRECTFIFO 0x00000100
+#define B43_DMA32_RXPARITYDISABLE 0x00000800
#define B43_DMA32_RXADDREXT_MASK 0x00030000
#define B43_DMA32_RXADDREXT_SHIFT 16
#define B43_DMA32_RXRING 0x14
@@ -84,6 +86,7 @@ struct b43_dmadesc32 {
#define B43_DMA64_TXSUSPEND 0x00000002
#define B43_DMA64_TXLOOPBACK 0x00000004
#define B43_DMA64_TXFLUSH 0x00000010
+#define B43_DMA64_TXPARITYDISABLE 0x00000800
#define B43_DMA64_TXADDREXT_MASK 0x00030000
#define B43_DMA64_TXADDREXT_SHIFT 16
#define B43_DMA64_TXINDEX 0x04
@@ -111,6 +114,7 @@ struct b43_dmadesc32 {
#define B43_DMA64_RXFROFF_MASK 0x000000FE
#define B43_DMA64_RXFROFF_SHIFT 1
#define B43_DMA64_RXDIRECTFIFO 0x00000100
+#define B43_DMA64_RXPARITYDISABLE 0x00000800
#define B43_DMA64_RXADDREXT_MASK 0x00030000
#define B43_DMA64_RXADDREXT_SHIFT 16
#define B43_DMA64_RXINDEX 0x24
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index 0cafafe368a..a38c1c6446a 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -5,7 +5,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Copyright (c) 2005 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (c) 2005-2007 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005-2007 Michael Buesch <m@bues.ch>
Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
@@ -138,7 +138,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
led->led_dev.default_trigger = default_trigger;
led->led_dev.brightness_set = b43_led_brightness_set;
- err = led_classdev_register(dev->sdev->dev, &led->led_dev);
+ err = led_classdev_register(dev->dev->dev, &led->led_dev);
if (err) {
b43warn(dev->wl, "LEDs: Failed to register %s\n", name);
led->wl = NULL;
@@ -215,13 +215,12 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
enum b43_led_behaviour *behaviour,
bool *activelow)
{
- struct ssb_bus *bus = dev->sdev->bus;
u8 sprom[4];
- sprom[0] = bus->sprom.gpio0;
- sprom[1] = bus->sprom.gpio1;
- sprom[2] = bus->sprom.gpio2;
- sprom[3] = bus->sprom.gpio3;
+ sprom[0] = dev->dev->bus_sprom->gpio0;
+ sprom[1] = dev->dev->bus_sprom->gpio1;
+ sprom[2] = dev->dev->bus_sprom->gpio2;
+ sprom[3] = dev->dev->bus_sprom->gpio3;
if (sprom[led_index] == 0xFF) {
/* There is no LED information in the SPROM
@@ -231,12 +230,12 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
case 0:
*behaviour = B43_LED_ACTIVITY;
*activelow = 1;
- if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
+ if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ)
*behaviour = B43_LED_RADIO_ALL;
break;
case 1:
*behaviour = B43_LED_RADIO_B;
- if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK)
+ if (dev->dev->board_vendor == PCI_VENDOR_ID_ASUSTEK)
*behaviour = B43_LED_ASSOC;
break;
case 2:
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 2ef7d4b3854..4c82d582a52 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -6,7 +6,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Copyright (c) 2005, 2006 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (c) 2005-2007 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005-2007 Michael Buesch <m@bues.ch>
Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
@@ -98,7 +98,7 @@ static u16 lo_measure_feedthrough(struct b43_wldev *dev,
rfover |= pga;
rfover |= lna;
rfover |= trsw_rx;
- if ((dev->sdev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA)
+ if ((dev->dev->bus_sprom->boardflags_lo & B43_BFL_EXTLNA)
&& phy->rev > 6)
rfover |= B43_PHY_RFOVERVAL_EXTLNA;
@@ -301,14 +301,12 @@ static void lo_measure_gain_values(struct b43_wldev *dev,
max_rx_gain = 0;
if (has_loopback_gain(phy)) {
- int trsw_rx = 0;
int trsw_rx_gain;
if (use_trsw_rx) {
trsw_rx_gain = gphy->trsw_rx_gain / 2;
if (max_rx_gain >= trsw_rx_gain) {
trsw_rx_gain = max_rx_gain - trsw_rx_gain;
- trsw_rx = 0x20;
}
} else
trsw_rx_gain = max_rx_gain;
@@ -387,7 +385,7 @@ struct lo_g_saved_values {
static void lo_measure_setup(struct b43_wldev *dev,
struct lo_g_saved_values *sav)
{
- struct ssb_sprom *sprom = &dev->sdev->bus->sprom;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
struct b43_phy *phy = &dev->phy;
struct b43_phy_g *gphy = phy->g;
struct b43_txpower_lo_control *lo = gphy->lo_control;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index eb415968698..26f1ab840cc 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4,7 +4,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>
Copyright (c) 2005 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (c) 2005-2009 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005-2009 Michael Buesch <m@bues.ch>
Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
@@ -113,6 +113,17 @@ static int b43_modparam_pio = B43_PIO_DEFAULT;
module_param_named(pio, b43_modparam_pio, int, 0644);
MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
+#ifdef CONFIG_B43_BCMA
+static const struct bcma_device_id b43_bcma_tbl[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS),
+ BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, b43_bcma_tbl);
+#endif
+
+#ifdef CONFIG_B43_SSB
static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 6),
@@ -126,8 +137,8 @@ static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16),
SSB_DEVTABLE_END
};
-
MODULE_DEVICE_TABLE(ssb, b43_ssb_tbl);
+#endif
/* Channel and ratetables are shared for all devices.
* They can't be const, because ieee80211 puts some precalculated
@@ -548,7 +559,7 @@ void b43_tsf_read(struct b43_wldev *dev, u64 *tsf)
{
u32 low, high;
- B43_WARN_ON(dev->sdev->id.revision < 3);
+ B43_WARN_ON(dev->dev->core_rev < 3);
/* The hardware guarantees us an atomic read, if we
* read the low register first. */
@@ -586,7 +597,7 @@ static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf)
{
u32 low, high;
- B43_WARN_ON(dev->sdev->id.revision < 3);
+ B43_WARN_ON(dev->dev->core_rev < 3);
low = tsf;
high = (tsf >> 32);
@@ -714,7 +725,7 @@ void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on)
b43_ram_write(dev, i * 4, buffer[i]);
b43_write16(dev, 0x0568, 0x0000);
- if (dev->sdev->id.revision < 11)
+ if (dev->dev->core_rev < 11)
b43_write16(dev, 0x07C0, 0x0000);
else
b43_write16(dev, 0x07C0, 0x0100);
@@ -1132,7 +1143,7 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
b43_write32(dev, B43_MMIO_MACCTL, macctl);
/* Commit write */
b43_read32(dev, B43_MMIO_MACCTL);
- if (awake && dev->sdev->id.revision >= 5) {
+ if (awake && dev->dev->core_rev >= 5) {
/* Wait for the microcode to wake up. */
for (i = 0; i < 100; i++) {
ucstat = b43_shm_read16(dev, B43_SHM_SHARED,
@@ -1144,35 +1155,85 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
}
}
-static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, u32 flags)
+#ifdef CONFIG_B43_BCMA
+static void b43_bcma_phy_reset(struct b43_wldev *dev)
{
+ u32 flags;
+
+ /* Put PHY into reset */
+ flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ flags |= B43_BCMA_IOCTL_PHY_RESET;
+ flags |= B43_BCMA_IOCTL_PHY_BW_20MHZ; /* Make 20 MHz def */
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
+ udelay(2);
+
+ /* Take PHY out of reset */
+ flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ flags &= ~B43_BCMA_IOCTL_PHY_RESET;
+ flags |= BCMA_IOCTL_FGC;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
+ udelay(1);
+
+ /* Do not force clock anymore */
+ flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ flags &= ~BCMA_IOCTL_FGC;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
+ udelay(1);
+}
+
+static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
+{
+ b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
+ bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
+ b43_bcma_phy_reset(dev);
+ bcma_core_pll_ctl(dev->dev->bdev, 0x300, 0x3000000, true);
+}
+#endif
+
+static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, bool gmode)
+{
+ struct ssb_device *sdev = dev->dev->sdev;
u32 tmslow;
+ u32 flags = 0;
+ if (gmode)
+ flags |= B43_TMSLOW_GMODE;
flags |= B43_TMSLOW_PHYCLKEN;
flags |= B43_TMSLOW_PHYRESET;
if (dev->phy.type == B43_PHYTYPE_N)
flags |= B43_TMSLOW_PHY_BANDWIDTH_20MHZ; /* Make 20 MHz def */
- ssb_device_enable(dev->sdev, flags);
+ b43_device_enable(dev, flags);
msleep(2); /* Wait for the PLL to turn on. */
/* Now take the PHY out of Reset again */
- tmslow = ssb_read32(dev->sdev, SSB_TMSLOW);
+ tmslow = ssb_read32(sdev, SSB_TMSLOW);
tmslow |= SSB_TMSLOW_FGC;
tmslow &= ~B43_TMSLOW_PHYRESET;
- ssb_write32(dev->sdev, SSB_TMSLOW, tmslow);
- ssb_read32(dev->sdev, SSB_TMSLOW); /* flush */
+ ssb_write32(sdev, SSB_TMSLOW, tmslow);
+ ssb_read32(sdev, SSB_TMSLOW); /* flush */
msleep(1);
tmslow &= ~SSB_TMSLOW_FGC;
- ssb_write32(dev->sdev, SSB_TMSLOW, tmslow);
- ssb_read32(dev->sdev, SSB_TMSLOW); /* flush */
+ ssb_write32(sdev, SSB_TMSLOW, tmslow);
+ ssb_read32(sdev, SSB_TMSLOW); /* flush */
msleep(1);
}
-void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
+void b43_wireless_core_reset(struct b43_wldev *dev, bool gmode)
{
u32 macctl;
- b43_ssb_wireless_core_reset(dev, flags);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ b43_bcma_wireless_core_reset(dev, gmode);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ b43_ssb_wireless_core_reset(dev, gmode);
+ break;
+#endif
+ }
/* Turn Analog ON, but only if we already know the PHY-type.
* This protects against very early setup where we don't know the
@@ -1183,7 +1244,7 @@ void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
macctl = b43_read32(dev, B43_MMIO_MACCTL);
macctl &= ~B43_MACCTL_GMODE;
- if (flags & B43_TMSLOW_GMODE)
+ if (gmode)
macctl |= B43_MACCTL_GMODE;
macctl |= B43_MACCTL_IHR_ENABLED;
b43_write32(dev, B43_MMIO_MACCTL, macctl);
@@ -1221,7 +1282,7 @@ static void drain_txstatus_queue(struct b43_wldev *dev)
{
u32 dummy;
- if (dev->sdev->id.revision < 5)
+ if (dev->dev->core_rev < 5)
return;
/* Read all entries from the microcode TXstatus FIFO
* and throw them away.
@@ -1427,9 +1488,9 @@ u8 b43_ieee80211_antenna_sanitize(struct b43_wldev *dev,
/* Get the mask of available antennas. */
if (dev->phy.gmode)
- antenna_mask = dev->sdev->bus->sprom.ant_available_bg;
+ antenna_mask = dev->dev->bus_sprom->ant_available_bg;
else
- antenna_mask = dev->sdev->bus->sprom.ant_available_a;
+ antenna_mask = dev->dev->bus_sprom->ant_available_a;
if (!(antenna_mask & (1 << (antenna_nr - 1)))) {
/* This antenna is not available. Fall back to default. */
@@ -1644,7 +1705,7 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) {
- if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
+ if (b43_bus_host_is_sdio(dev->dev)) {
/* wl->mutex is enough. */
b43_do_beacon_update_trigger_work(dev);
mmiowb();
@@ -1689,7 +1750,7 @@ static void b43_update_templates(struct b43_wl *wl)
static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int)
{
b43_time_lock(dev);
- if (dev->sdev->id.revision >= 3) {
+ if (dev->dev->core_rev >= 3) {
b43_write32(dev, B43_MMIO_TSF_CFP_REP, (beacon_int << 16));
b43_write32(dev, B43_MMIO_TSF_CFP_START, (beacon_int << 10));
} else {
@@ -1923,7 +1984,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
return IRQ_NONE;
reason &= dev->irq_mask;
if (!reason)
- return IRQ_HANDLED;
+ return IRQ_NONE;
dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
& 0x0001DC00;
@@ -2063,7 +2124,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
B43_WARN_ON(1);
return -ENOSYS;
}
- err = request_firmware(&blob, ctx->fwname, ctx->dev->sdev->dev);
+ err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
if (err == -ENOENT) {
snprintf(ctx->errors[ctx->req_type],
sizeof(ctx->errors[ctx->req_type]),
@@ -2113,26 +2174,48 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
{
struct b43_wldev *dev = ctx->dev;
struct b43_firmware *fw = &ctx->dev->fw;
- const u8 rev = ctx->dev->sdev->id.revision;
+ const u8 rev = ctx->dev->dev->core_rev;
const char *filename;
u32 tmshigh;
int err;
+ /* Files for HT and LCN were found by trying one by one */
+
/* Get microcode */
- if ((rev >= 5) && (rev <= 10))
+ if ((rev >= 5) && (rev <= 10)) {
filename = "ucode5";
- else if ((rev >= 11) && (rev <= 12))
+ } else if ((rev >= 11) && (rev <= 12)) {
filename = "ucode11";
- else if (rev == 13)
+ } else if (rev == 13) {
filename = "ucode13";
- else if (rev == 14)
+ } else if (rev == 14) {
filename = "ucode14";
- else if (rev == 15)
+ } else if (rev == 15) {
filename = "ucode15";
- else if ((rev >= 16) && (rev <= 20))
- filename = "ucode16_mimo";
- else
- goto err_no_ucode;
+ } else {
+ switch (dev->phy.type) {
+ case B43_PHYTYPE_N:
+ if (rev >= 16)
+ filename = "ucode16_mimo";
+ else
+ goto err_no_ucode;
+ break;
+ case B43_PHYTYPE_HT:
+ if (rev == 29)
+ filename = "ucode29_mimo";
+ else
+ goto err_no_ucode;
+ break;
+ case B43_PHYTYPE_LCN:
+ if (rev == 24)
+ filename = "ucode24_mimo";
+ else
+ goto err_no_ucode;
+ break;
+ default:
+ goto err_no_ucode;
+ }
+ }
err = b43_do_request_fw(ctx, filename, &fw->ucode);
if (err)
goto err_load;
@@ -2157,7 +2240,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
switch (dev->phy.type) {
case B43_PHYTYPE_A:
if ((rev >= 5) && (rev <= 10)) {
- tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH);
+ tmshigh = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY)
filename = "a0g1initvals5";
else
@@ -2191,6 +2274,18 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
else
goto err_no_initvals;
break;
+ case B43_PHYTYPE_HT:
+ if (rev == 29)
+ filename = "ht0initvals29";
+ else
+ goto err_no_initvals;
+ break;
+ case B43_PHYTYPE_LCN:
+ if (rev == 24)
+ filename = "lcn0initvals24";
+ else
+ goto err_no_initvals;
+ break;
default:
goto err_no_initvals;
}
@@ -2202,7 +2297,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
switch (dev->phy.type) {
case B43_PHYTYPE_A:
if ((rev >= 5) && (rev <= 10)) {
- tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH);
+ tmshigh = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY)
filename = "a0g1bsinitvals5";
else
@@ -2238,6 +2333,18 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
else
goto err_no_initvals;
break;
+ case B43_PHYTYPE_HT:
+ if (rev == 29)
+ filename = "ht0bsinitvals29";
+ else
+ goto err_no_initvals;
+ break;
+ case B43_PHYTYPE_LCN:
+ if (rev == 24)
+ filename = "lcn0bsinitvals24";
+ else
+ goto err_no_initvals;
+ break;
default:
goto err_no_initvals;
}
@@ -2448,7 +2555,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "%u.%u",
dev->fw.rev, dev->fw.patch);
- wiphy->hw_version = dev->sdev->id.coreid;
+ wiphy->hw_version = dev->dev->core_id;
if (b43_is_old_txhdr_format(dev)) {
/* We're over the deadline, but we keep support for old fw
@@ -2566,7 +2673,7 @@ out:
*/
static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_bus *bus = dev->dev->sdev->bus;
#ifdef CONFIG_SSB_DRIVER_PCICORE
return (bus->chipco.dev ? bus->chipco.dev : bus->pcicore.dev);
@@ -2588,7 +2695,7 @@ static int b43_gpio_init(struct b43_wldev *dev)
mask = 0x0000001F;
set = 0x0000000F;
- if (dev->sdev->bus->chip_id == 0x4301) {
+ if (dev->dev->chip_id == 0x4301) {
mask |= 0x0060;
set |= 0x0060;
}
@@ -2599,21 +2706,34 @@ static int b43_gpio_init(struct b43_wldev *dev)
mask |= 0x0180;
set |= 0x0180;
}
- if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) {
+ if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) {
b43_write16(dev, B43_MMIO_GPIO_MASK,
b43_read16(dev, B43_MMIO_GPIO_MASK)
| 0x0200);
mask |= 0x0200;
set |= 0x0200;
}
- if (dev->sdev->id.revision >= 2)
+ if (dev->dev->core_rev >= 2)
mask |= 0x0010; /* FIXME: This is redundant. */
- gpiodev = b43_ssb_gpio_dev(dev);
- if (gpiodev)
- ssb_write32(gpiodev, B43_GPIO_CONTROL,
- (ssb_read32(gpiodev, B43_GPIO_CONTROL)
- & mask) | set);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL,
+ (bcma_cc_read32(&dev->dev->bdev->bus->drv_cc,
+ BCMA_CC_GPIOCTL) & mask) | set);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ gpiodev = b43_ssb_gpio_dev(dev);
+ if (gpiodev)
+ ssb_write32(gpiodev, B43_GPIO_CONTROL,
+ (ssb_read32(gpiodev, B43_GPIO_CONTROL)
+ & mask) | set);
+ break;
+#endif
+ }
return 0;
}
@@ -2623,9 +2743,21 @@ static void b43_gpio_cleanup(struct b43_wldev *dev)
{
struct ssb_device *gpiodev;
- gpiodev = b43_ssb_gpio_dev(dev);
- if (gpiodev)
- ssb_write32(gpiodev, B43_GPIO_CONTROL, 0);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL,
+ 0);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ gpiodev = b43_ssb_gpio_dev(dev);
+ if (gpiodev)
+ ssb_write32(gpiodev, B43_GPIO_CONTROL, 0);
+ break;
+#endif
+ }
}
/* http://bcm-specs.sipsolutions.net/EnableMac */
@@ -2697,12 +2829,30 @@ out:
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */
void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on)
{
- u32 tmslow = ssb_read32(dev->sdev, SSB_TMSLOW);
- if (on)
- tmslow |= B43_TMSLOW_MACPHYCLKEN;
- else
- tmslow &= ~B43_TMSLOW_MACPHYCLKEN;
- ssb_write32(dev->sdev, SSB_TMSLOW, tmslow);
+ u32 tmp;
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ if (on)
+ tmp |= B43_BCMA_IOCTL_MACPHYCLKEN;
+ else
+ tmp &= ~B43_BCMA_IOCTL_MACPHYCLKEN;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ if (on)
+ tmp |= B43_TMSLOW_MACPHYCLKEN;
+ else
+ tmp &= ~B43_TMSLOW_MACPHYCLKEN;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ break;
+#endif
+ }
}
static void b43_adjust_opmode(struct b43_wldev *dev)
@@ -2741,15 +2891,15 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
/* Workaround: On old hardware the HW-MAC-address-filter
* doesn't work properly, so always run promisc in filter
* it in software. */
- if (dev->sdev->id.revision <= 4)
+ if (dev->dev->core_rev <= 4)
ctl |= B43_MACCTL_PROMISC;
b43_write32(dev, B43_MMIO_MACCTL, ctl);
cfp_pretbtt = 2;
if ((ctl & B43_MACCTL_INFRA) && !(ctl & B43_MACCTL_AP)) {
- if (dev->sdev->bus->chip_id == 0x4306 &&
- dev->sdev->bus->chip_rev == 3)
+ if (dev->dev->chip_id == 0x4306 &&
+ dev->dev->chip_rev == 3)
cfp_pretbtt = 100;
else
cfp_pretbtt = 50;
@@ -2907,7 +3057,7 @@ static int b43_chip_init(struct b43_wldev *dev)
b43_write16(dev, 0x005E, value16);
}
b43_write32(dev, 0x0100, 0x01000000);
- if (dev->sdev->id.revision < 5)
+ if (dev->dev->core_rev < 5)
b43_write32(dev, 0x010C, 0x01000000);
b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
@@ -2922,7 +3072,7 @@ static int b43_chip_init(struct b43_wldev *dev)
/* Initially set the wireless operation mode. */
b43_adjust_opmode(dev);
- if (dev->sdev->id.revision < 3) {
+ if (dev->dev->core_rev < 3) {
b43_write16(dev, 0x060E, 0x0000);
b43_write16(dev, 0x0610, 0x8000);
b43_write16(dev, 0x0604, 0x0000);
@@ -2941,8 +3091,20 @@ static int b43_chip_init(struct b43_wldev *dev)
b43_mac_phy_clock_set(dev, true);
- b43_write16(dev, B43_MMIO_POWERUP_DELAY,
- dev->sdev->bus->chipco.fast_pwrup_delay);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ /* FIXME: 0xE74 is quite common, but should be read from CC */
+ b43_write16(dev, B43_MMIO_POWERUP_DELAY, 0xE74);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ b43_write16(dev, B43_MMIO_POWERUP_DELAY,
+ dev->dev->sdev->bus->chipco.fast_pwrup_delay);
+ break;
+#endif
+ }
err = 0;
b43dbg(dev->wl, "Chip initialized\n");
@@ -3105,7 +3267,7 @@ static int b43_validate_chipaccess(struct b43_wldev *dev)
b43_shm_write32(dev, B43_SHM_SHARED, 0, backup0);
b43_shm_write32(dev, B43_SHM_SHARED, 4, backup4);
- if ((dev->sdev->id.revision >= 3) && (dev->sdev->id.revision <= 10)) {
+ if ((dev->dev->core_rev >= 3) && (dev->dev->core_rev <= 10)) {
/* The 32bit register shadows the two 16bit registers
* with update sideeffects. Validate this. */
b43_write16(dev, B43_MMIO_TSF_CFP_START, 0xAAAA);
@@ -3458,21 +3620,33 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
static void b43_put_phy_into_reset(struct b43_wldev *dev)
{
- struct ssb_device *sdev = dev->sdev;
- u32 tmslow;
+ u32 tmp;
- tmslow = ssb_read32(sdev, SSB_TMSLOW);
- tmslow &= ~B43_TMSLOW_GMODE;
- tmslow |= B43_TMSLOW_PHYRESET;
- tmslow |= SSB_TMSLOW_FGC;
- ssb_write32(sdev, SSB_TMSLOW, tmslow);
- msleep(1);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ b43err(dev->wl,
+ "Putting PHY into reset not supported on BCMA\n");
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ tmp &= ~B43_TMSLOW_GMODE;
+ tmp |= B43_TMSLOW_PHYRESET;
+ tmp |= SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ msleep(1);
+
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ tmp &= ~SSB_TMSLOW_FGC;
+ tmp |= B43_TMSLOW_PHYRESET;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ msleep(1);
- tmslow = ssb_read32(sdev, SSB_TMSLOW);
- tmslow &= ~SSB_TMSLOW_FGC;
- tmslow |= B43_TMSLOW_PHYRESET;
- ssb_write32(sdev, SSB_TMSLOW, tmslow);
- msleep(1);
+ break;
+#endif
+ }
}
static const char *band_to_string(enum ieee80211_band band)
@@ -3954,7 +4128,7 @@ redo:
/* Disable interrupts on the device. */
b43_set_status(dev, B43_STAT_INITIALIZED);
- if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
+ if (b43_bus_host_is_sdio(dev->dev)) {
/* wl->mutex is locked. That is enough. */
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */
@@ -3967,11 +4141,11 @@ redo:
/* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */
orig_dev = dev;
mutex_unlock(&wl->mutex);
- if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
+ if (b43_bus_host_is_sdio(dev->dev)) {
b43_sdio_free_irq(dev);
} else {
- synchronize_irq(dev->sdev->irq);
- free_irq(dev->sdev->irq, dev);
+ synchronize_irq(dev->dev->irq);
+ free_irq(dev->dev->irq, dev);
}
mutex_lock(&wl->mutex);
dev = wl->current_dev;
@@ -4004,19 +4178,19 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED);
drain_txstatus_queue(dev);
- if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) {
+ if (b43_bus_host_is_sdio(dev->dev)) {
err = b43_sdio_request_irq(dev, b43_sdio_interrupt_handler);
if (err) {
b43err(dev->wl, "Cannot request SDIO IRQ\n");
goto out;
}
} else {
- err = request_threaded_irq(dev->sdev->irq, b43_interrupt_handler,
+ err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler,
b43_interrupt_thread_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (err) {
b43err(dev->wl, "Cannot request IRQ-%d\n",
- dev->sdev->irq);
+ dev->dev->irq);
goto out;
}
}
@@ -4083,9 +4257,21 @@ static int b43_phy_versioning(struct b43_wldev *dev)
unsupported = 1;
break;
#endif
+#ifdef CONFIG_B43_PHY_HT
+ case B43_PHYTYPE_HT:
+ if (phy_rev > 1)
+ unsupported = 1;
+ break;
+#endif
+#ifdef CONFIG_B43_PHY_LCN
+ case B43_PHYTYPE_LCN:
+ if (phy_rev > 1)
+ unsupported = 1;
+ break;
+#endif
default:
unsupported = 1;
- };
+ }
if (unsupported) {
b43err(dev->wl, "FOUND UNSUPPORTED PHY "
"(Analog %u, Type %u, Revision %u)\n",
@@ -4096,22 +4282,42 @@ static int b43_phy_versioning(struct b43_wldev *dev)
analog_type, phy_type, phy_rev);
/* Get RADIO versioning */
- if (dev->sdev->bus->chip_id == 0x4317) {
- if (dev->sdev->bus->chip_rev == 0)
- tmp = 0x3205017F;
- else if (dev->sdev->bus->chip_rev == 1)
- tmp = 0x4205017F;
- else
- tmp = 0x5205017F;
+ if (dev->dev->core_rev >= 24) {
+ u16 radio24[3];
+
+ for (tmp = 0; tmp < 3; tmp++) {
+ b43_write16(dev, B43_MMIO_RADIO24_CONTROL, tmp);
+ radio24[tmp] = b43_read16(dev, B43_MMIO_RADIO24_DATA);
+ }
+
+ /* Broadcom uses "id" for our "ver" and has separated "ver" */
+ /* radio_ver = (radio24[0] & 0xF0) >> 4; */
+
+ radio_manuf = 0x17F;
+ radio_ver = (radio24[2] << 8) | radio24[1];
+ radio_rev = (radio24[0] & 0xF);
} else {
- b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID);
- tmp = b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
- b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID);
- tmp |= (u32)b43_read16(dev, B43_MMIO_RADIO_DATA_HIGH) << 16;
- }
- radio_manuf = (tmp & 0x00000FFF);
- radio_ver = (tmp & 0x0FFFF000) >> 12;
- radio_rev = (tmp & 0xF0000000) >> 28;
+ if (dev->dev->chip_id == 0x4317) {
+ if (dev->dev->chip_rev == 0)
+ tmp = 0x3205017F;
+ else if (dev->dev->chip_rev == 1)
+ tmp = 0x4205017F;
+ else
+ tmp = 0x5205017F;
+ } else {
+ b43_write16(dev, B43_MMIO_RADIO_CONTROL,
+ B43_RADIOCTL_ID);
+ tmp = b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
+ b43_write16(dev, B43_MMIO_RADIO_CONTROL,
+ B43_RADIOCTL_ID);
+ tmp |= (u32)b43_read16(dev, B43_MMIO_RADIO_DATA_HIGH)
+ << 16;
+ }
+ radio_manuf = (tmp & 0x00000FFF);
+ radio_ver = (tmp & 0x0FFFF000) >> 12;
+ radio_rev = (tmp & 0xF0000000) >> 28;
+ }
+
if (radio_manuf != 0x17F /* Broadcom */)
unsupported = 1;
switch (phy_type) {
@@ -4139,6 +4345,14 @@ static int b43_phy_versioning(struct b43_wldev *dev)
if (radio_ver != 0x2062 && radio_ver != 0x2063)
unsupported = 1;
break;
+ case B43_PHYTYPE_HT:
+ if (radio_ver != 0x2059)
+ unsupported = 1;
+ break;
+ case B43_PHYTYPE_LCN:
+ if (radio_ver != 0x2064)
+ unsupported = 1;
+ break;
default:
B43_WARN_ON(1);
}
@@ -4204,7 +4418,7 @@ static void setup_struct_wldev_for_init(struct b43_wldev *dev)
static void b43_bluetooth_coext_enable(struct b43_wldev *dev)
{
- struct ssb_sprom *sprom = &dev->sdev->bus->sprom;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
u64 hf;
if (!modparam_btcoex)
@@ -4231,16 +4445,21 @@ static void b43_bluetooth_coext_disable(struct b43_wldev *dev)
static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_bus *bus;
u32 tmp;
+ if (dev->dev->bus_type != B43_BUS_SSB)
+ return;
+
+ bus = dev->dev->sdev->bus;
+
if ((bus->chip_id == 0x4311 && bus->chip_rev == 2) ||
(bus->chip_id == 0x4312)) {
- tmp = ssb_read32(dev->sdev, SSB_IMCFGLO);
+ tmp = ssb_read32(dev->dev->sdev, SSB_IMCFGLO);
tmp &= ~SSB_IMCFGLO_REQTO;
tmp &= ~SSB_IMCFGLO_SERTO;
tmp |= 0x3;
- ssb_write32(dev->sdev, SSB_IMCFGLO, tmp);
+ ssb_write32(dev->dev->sdev, SSB_IMCFGLO, tmp);
ssb_commit_settings(bus);
}
}
@@ -4310,36 +4529,45 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
dev->wl->current_beacon = NULL;
}
- ssb_device_disable(dev->sdev, 0);
- ssb_bus_may_powerdown(dev->sdev->bus);
+ b43_device_disable(dev, 0);
+ b43_bus_may_powerdown(dev);
}
/* Initialize a wireless core */
static int b43_wireless_core_init(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
- struct ssb_sprom *sprom = &bus->sprom;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
struct b43_phy *phy = &dev->phy;
int err;
u64 hf;
- u32 tmp;
B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT);
- err = ssb_bus_powerup(bus, 0);
+ err = b43_bus_powerup(dev, 0);
if (err)
goto out;
- if (!ssb_device_is_enabled(dev->sdev)) {
- tmp = phy->gmode ? B43_TMSLOW_GMODE : 0;
- b43_wireless_core_reset(dev, tmp);
- }
+ if (!b43_device_is_enabled(dev))
+ b43_wireless_core_reset(dev, phy->gmode);
/* Reset all data structures. */
setup_struct_wldev_for_init(dev);
phy->ops->prepare_structs(dev);
/* Enable IRQ routing to this device. */
- ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->sdev);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci,
+ dev->dev->bdev, true);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ ssb_pcicore_dev_irqvecs_enable(&dev->dev->sdev->bus->pcicore,
+ dev->dev->sdev);
+ break;
+#endif
+ }
b43_imcfglo_timeouts_workaround(dev);
b43_bluetooth_coext_disable(dev);
@@ -4352,7 +4580,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
if (err)
goto err_busdown;
b43_shm_write16(dev, B43_SHM_SHARED,
- B43_SHM_SH_WLCOREREV, dev->sdev->id.revision);
+ B43_SHM_SH_WLCOREREV, dev->dev->core_rev);
hf = b43_hf_read(dev);
if (phy->type == B43_PHYTYPE_G) {
hf |= B43_HF_SYMW;
@@ -4370,8 +4598,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
if (sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW)
hf |= B43_HF_DSCRQ; /* Disable slowclock requests from ucode. */
#ifdef CONFIG_SSB_DRIVER_PCICORE
- if ((bus->bustype == SSB_BUSTYPE_PCI) &&
- (bus->pcicore.dev->id.revision <= 10))
+ if (dev->dev->bus_type == B43_BUS_SSB &&
+ dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
+ dev->dev->sdev->bus->pcicore.dev->id.revision <= 10)
hf |= B43_HF_PCISCW; /* PCI slow clock workaround. */
#endif
hf &= ~B43_HF_SKCFPUP;
@@ -4399,8 +4628,8 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
/* Maximum Contention Window */
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
- if ((dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
- (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) ||
+ if (b43_bus_host_is_pcmcia(dev->dev) ||
+ b43_bus_host_is_sdio(dev->dev) ||
dev->use_pio) {
dev->__using_pio_transfers = 1;
err = b43_pio_init(dev);
@@ -4414,7 +4643,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
b43_set_synth_pu_delay(dev, 1);
b43_bluetooth_coext_enable(dev);
- ssb_bus_powerup(bus, !(sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW));
+ b43_bus_powerup(dev, !(sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW));
b43_upload_card_macaddress(dev);
b43_security_init(dev);
@@ -4431,7 +4660,7 @@ out:
err_chip_exit:
b43_chip_exit(dev);
err_busdown:
- ssb_bus_may_powerdown(bus);
+ b43_bus_may_powerdown(dev);
B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT);
return err;
}
@@ -4737,11 +4966,10 @@ static void b43_wireless_core_detach(struct b43_wldev *dev)
static int b43_wireless_core_attach(struct b43_wldev *dev)
{
struct b43_wl *wl = dev->wl;
- struct ssb_bus *bus = dev->sdev->bus;
- struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
+ struct pci_dev *pdev = NULL;
int err;
- bool have_2ghz_phy = 0, have_5ghz_phy = 0;
u32 tmp;
+ bool have_2ghz_phy = 0, have_5ghz_phy = 0;
/* Do NOT do any device initialization here.
* Do it in wireless_core_init() instead.
@@ -4750,25 +4978,42 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
* that in core_init(), too.
*/
- err = ssb_bus_powerup(bus, 0);
+#ifdef CONFIG_B43_SSB
+ if (dev->dev->bus_type == B43_BUS_SSB &&
+ dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI)
+ pdev = dev->dev->sdev->bus->host_pci;
+#endif
+
+ err = b43_bus_powerup(dev, 0);
if (err) {
b43err(wl, "Bus powerup failed\n");
goto out;
}
- /* Get the PHY type. */
- if (dev->sdev->id.revision >= 5) {
- u32 tmshigh;
- tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH);
- have_2ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY);
- have_5ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_5GHZ_PHY);
- } else
- B43_WARN_ON(1);
+ /* Get the PHY type. */
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
+ have_2ghz_phy = !!(tmp & B43_BCMA_IOST_2G_PHY);
+ have_5ghz_phy = !!(tmp & B43_BCMA_IOST_5G_PHY);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ if (dev->dev->core_rev >= 5) {
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
+ have_2ghz_phy = !!(tmp & B43_TMSHIGH_HAVE_2GHZ_PHY);
+ have_5ghz_phy = !!(tmp & B43_TMSHIGH_HAVE_5GHZ_PHY);
+ } else
+ B43_WARN_ON(1);
+ break;
+#endif
+ }
dev->phy.gmode = have_2ghz_phy;
dev->phy.radio_on = 1;
- tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
- b43_wireless_core_reset(dev, tmp);
+ b43_wireless_core_reset(dev, dev->phy.gmode);
err = b43_phy_versioning(dev);
if (err)
@@ -4790,6 +5035,8 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
#endif
case B43_PHYTYPE_G:
case B43_PHYTYPE_N:
+ case B43_PHYTYPE_HT:
+ case B43_PHYTYPE_LCN:
have_2ghz_phy = 1;
break;
default:
@@ -4816,8 +5063,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
goto err_powerdown;
dev->phy.gmode = have_2ghz_phy;
- tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
- b43_wireless_core_reset(dev, tmp);
+ b43_wireless_core_reset(dev, dev->phy.gmode);
err = b43_validate_chipaccess(dev);
if (err)
@@ -4832,8 +5078,8 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
INIT_WORK(&dev->restart_work, b43_chip_reset);
dev->phy.ops->switch_analog(dev, 0);
- ssb_device_disable(dev->sdev, 0);
- ssb_bus_may_powerdown(bus);
+ b43_device_disable(dev, 0);
+ b43_bus_may_powerdown(dev);
out:
return err;
@@ -4841,11 +5087,11 @@ out:
err_phy_free:
b43_phy_free(dev);
err_powerdown:
- ssb_bus_may_powerdown(bus);
+ b43_bus_may_powerdown(dev);
return err;
}
-static void b43_one_core_detach(struct ssb_device *dev)
+static void b43_one_core_detach(struct b43_bus_dev *dev)
{
struct b43_wldev *wldev;
struct b43_wl *wl;
@@ -4853,17 +5099,17 @@ static void b43_one_core_detach(struct ssb_device *dev)
/* Do not cancel ieee80211-workqueue based work here.
* See comment in b43_remove(). */
- wldev = ssb_get_drvdata(dev);
+ wldev = b43_bus_get_wldev(dev);
wl = wldev->wl;
b43_debugfs_remove_device(wldev);
b43_wireless_core_detach(wldev);
list_del(&wldev->list);
wl->nr_devs--;
- ssb_set_drvdata(dev, NULL);
+ b43_bus_set_wldev(dev, NULL);
kfree(wldev);
}
-static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
+static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl)
{
struct b43_wldev *wldev;
int err = -ENOMEM;
@@ -4873,7 +5119,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
goto out;
wldev->use_pio = b43_modparam_pio;
- wldev->sdev = dev;
+ wldev->dev = dev;
wldev->wl = wl;
b43_set_status(wldev, B43_STAT_UNINIT);
wldev->bad_frames_preempt = modparam_bad_frames_preempt;
@@ -4885,7 +5131,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
list_add(&wldev->list, &wl->devlist);
wl->nr_devs++;
- ssb_set_drvdata(dev, wldev);
+ b43_bus_set_wldev(dev, wldev);
b43_debugfs_add_device(wldev);
out:
@@ -4926,19 +5172,20 @@ static void b43_sprom_fixup(struct ssb_bus *bus)
}
}
-static void b43_wireless_exit(struct ssb_device *dev, struct b43_wl *wl)
+static void b43_wireless_exit(struct b43_bus_dev *dev, struct b43_wl *wl)
{
struct ieee80211_hw *hw = wl->hw;
- ssb_set_devtypedata(dev, NULL);
+ ssb_set_devtypedata(dev->sdev, NULL);
ieee80211_free_hw(hw);
}
-static struct b43_wl *b43_wireless_init(struct ssb_device *dev)
+static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
{
- struct ssb_sprom *sprom = &dev->bus->sprom;
+ struct ssb_sprom *sprom = dev->bus_sprom;
struct ieee80211_hw *hw;
struct b43_wl *wl;
+ char chip_name[6];
hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops);
if (!hw) {
@@ -4977,29 +5224,105 @@ static struct b43_wl *b43_wireless_init(struct ssb_device *dev)
INIT_WORK(&wl->tx_work, b43_tx_work);
skb_queue_head_init(&wl->tx_queue);
- b43info(wl, "Broadcom %04X WLAN found (core revision %u)\n",
- dev->bus->chip_id, dev->id.revision);
+ snprintf(chip_name, ARRAY_SIZE(chip_name),
+ (dev->chip_id > 0x9999) ? "%d" : "%04X", dev->chip_id);
+ b43info(wl, "Broadcom %s WLAN found (core revision %u)\n", chip_name,
+ dev->core_rev);
return wl;
}
-static int b43_ssb_probe(struct ssb_device *dev, const struct ssb_device_id *id)
+#ifdef CONFIG_B43_BCMA
+static int b43_bcma_probe(struct bcma_device *core)
+{
+ struct b43_bus_dev *dev;
+ struct b43_wl *wl;
+ int err;
+
+ dev = b43_bus_dev_bcma_init(core);
+ if (!dev)
+ return -ENODEV;
+
+ wl = b43_wireless_init(dev);
+ if (IS_ERR(wl)) {
+ err = PTR_ERR(wl);
+ goto bcma_out;
+ }
+
+ err = b43_one_core_attach(dev, wl);
+ if (err)
+ goto bcma_err_wireless_exit;
+
+ err = ieee80211_register_hw(wl->hw);
+ if (err)
+ goto bcma_err_one_core_detach;
+ b43_leds_register(wl->current_dev);
+
+bcma_out:
+ return err;
+
+bcma_err_one_core_detach:
+ b43_one_core_detach(dev);
+bcma_err_wireless_exit:
+ ieee80211_free_hw(wl->hw);
+ return err;
+}
+
+static void b43_bcma_remove(struct bcma_device *core)
{
+ struct b43_wldev *wldev = bcma_get_drvdata(core);
+ struct b43_wl *wl = wldev->wl;
+
+ /* We must cancel any work here before unregistering from ieee80211,
+ * as the ieee80211 unreg will destroy the workqueue. */
+ cancel_work_sync(&wldev->restart_work);
+
+ /* Restore the queues count before unregistering, because firmware detect
+ * might have modified it. Restoring is important, so the networking
+ * stack can properly free resources. */
+ wl->hw->queues = wl->mac80211_initially_registered_queues;
+ b43_leds_stop(wldev);
+ ieee80211_unregister_hw(wl->hw);
+
+ b43_one_core_detach(wldev->dev);
+
+ b43_leds_unregister(wl);
+
+ ieee80211_free_hw(wl->hw);
+}
+
+static struct bcma_driver b43_bcma_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = b43_bcma_tbl,
+ .probe = b43_bcma_probe,
+ .remove = b43_bcma_remove,
+};
+#endif
+
+#ifdef CONFIG_B43_SSB
+static
+int b43_ssb_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
+{
+ struct b43_bus_dev *dev;
struct b43_wl *wl;
int err;
int first = 0;
- wl = ssb_get_devtypedata(dev);
+ dev = b43_bus_dev_ssb_init(sdev);
+ if (!dev)
+ return -ENOMEM;
+
+ wl = ssb_get_devtypedata(sdev);
if (!wl) {
/* Probing the first core. Must setup common struct b43_wl */
first = 1;
- b43_sprom_fixup(dev->bus);
+ b43_sprom_fixup(sdev->bus);
wl = b43_wireless_init(dev);
if (IS_ERR(wl)) {
err = PTR_ERR(wl);
goto out;
}
- ssb_set_devtypedata(dev, wl);
- B43_WARN_ON(ssb_get_devtypedata(dev) != wl);
+ ssb_set_devtypedata(sdev, wl);
+ B43_WARN_ON(ssb_get_devtypedata(sdev) != wl);
}
err = b43_one_core_attach(dev, wl);
if (err)
@@ -5023,10 +5346,11 @@ static int b43_ssb_probe(struct ssb_device *dev, const struct ssb_device_id *id)
return err;
}
-static void b43_ssb_remove(struct ssb_device *dev)
+static void b43_ssb_remove(struct ssb_device *sdev)
{
- struct b43_wl *wl = ssb_get_devtypedata(dev);
- struct b43_wldev *wldev = ssb_get_drvdata(dev);
+ struct b43_wl *wl = ssb_get_devtypedata(sdev);
+ struct b43_wldev *wldev = ssb_get_drvdata(sdev);
+ struct b43_bus_dev *dev = wldev->dev;
/* We must cancel any work here before unregistering from ieee80211,
* as the ieee80211 unreg will destroy the workqueue. */
@@ -5053,6 +5377,14 @@ static void b43_ssb_remove(struct ssb_device *dev)
}
}
+static struct ssb_driver b43_ssb_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = b43_ssb_tbl,
+ .probe = b43_ssb_probe,
+ .remove = b43_ssb_remove,
+};
+#endif /* CONFIG_B43_SSB */
+
/* Perform a hardware reset. This can be called from any context. */
void b43_controller_restart(struct b43_wldev *dev, const char *reason)
{
@@ -5063,13 +5395,6 @@ void b43_controller_restart(struct b43_wldev *dev, const char *reason)
ieee80211_queue_work(dev->wl->hw, &dev->restart_work);
}
-static struct ssb_driver b43_ssb_driver = {
- .name = KBUILD_MODNAME,
- .id_table = b43_ssb_tbl,
- .probe = b43_ssb_probe,
- .remove = b43_ssb_remove,
-};
-
static void b43_print_driverinfo(void)
{
const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "",
@@ -5108,14 +5433,27 @@ static int __init b43_init(void)
err = b43_sdio_init();
if (err)
goto err_pcmcia_exit;
- err = ssb_driver_register(&b43_ssb_driver);
+#ifdef CONFIG_B43_BCMA
+ err = bcma_driver_register(&b43_bcma_driver);
if (err)
goto err_sdio_exit;
+#endif
+#ifdef CONFIG_B43_SSB
+ err = ssb_driver_register(&b43_ssb_driver);
+ if (err)
+ goto err_bcma_driver_exit;
+#endif
b43_print_driverinfo();
return err;
+#ifdef CONFIG_B43_SSB
+err_bcma_driver_exit:
+#endif
+#ifdef CONFIG_B43_BCMA
+ bcma_driver_unregister(&b43_bcma_driver);
err_sdio_exit:
+#endif
b43_sdio_exit();
err_pcmcia_exit:
b43_pcmcia_exit();
@@ -5126,7 +5464,12 @@ err_dfs_exit:
static void __exit b43_exit(void)
{
+#ifdef CONFIG_B43_SSB
ssb_driver_unregister(&b43_ssb_driver);
+#endif
+#ifdef CONFIG_B43_BCMA
+ bcma_driver_unregister(&b43_bcma_driver);
+#endif
b43_sdio_exit();
b43_pcmcia_exit();
b43_debugfs_exit();
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index a0d327f1318..8c684cd3352 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -4,7 +4,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Stefano Brivio <stefano.brivio@polimi.it>
- Michael Buesch <mb@bu3sch.de>
+ Michael Buesch <m@bues.ch>
Danny van Dyk <kugelfang@gentoo.org>
Andreas Jaggi <andreas.jaggi@waterwave.ch>
@@ -121,7 +121,7 @@ void b43_hf_write(struct b43_wldev *dev, u64 value);
void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on);
-void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags);
+void b43_wireless_core_reset(struct b43_wldev *dev, bool gmode);
void b43_controller_restart(struct b43_wldev *dev, const char *reason);
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 2c8461dcf1b..12b6b4067a3 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -2,7 +2,7 @@
Broadcom B43 wireless driver
- Copyright (c) 2007 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2007 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c
index b01c8ced57c..a6c38104693 100644
--- a/drivers/net/wireless/b43/phy_a.c
+++ b/drivers/net/wireless/b43/phy_a.c
@@ -5,7 +5,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005-2008 Michael Buesch <m@bues.ch>
Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
@@ -265,7 +265,6 @@ static void hardware_pctl_init_aphy(struct b43_wldev *dev)
void b43_phy_inita(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
struct b43_phy *phy = &dev->phy;
/* This lowlevel A-PHY init is also called from G-PHY init.
@@ -296,9 +295,9 @@ void b43_phy_inita(struct b43_wldev *dev)
b43_radio_init2060(dev);
- if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
- ((bus->boardinfo.type == SSB_BOARD_BU4306) ||
- (bus->boardinfo.type == SSB_BOARD_BU4309))) {
+ if ((dev->dev->board_vendor == SSB_BOARDVENDOR_BCM) &&
+ ((dev->dev->board_type == SSB_BOARD_BU4306) ||
+ (dev->dev->board_type == SSB_BOARD_BU4309))) {
; //TODO: A PHY LO
}
@@ -311,7 +310,7 @@ void b43_phy_inita(struct b43_wldev *dev)
}
if ((phy->type == B43_PHYTYPE_G) &&
- (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) {
+ (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)) {
b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF);
}
}
@@ -323,17 +322,17 @@ static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev)
struct b43_phy_a *aphy = phy->a;
s16 pab0, pab1, pab2;
- pab0 = (s16) (dev->sdev->bus->sprom.pa1b0);
- pab1 = (s16) (dev->sdev->bus->sprom.pa1b1);
- pab2 = (s16) (dev->sdev->bus->sprom.pa1b2);
+ pab0 = (s16) (dev->dev->bus_sprom->pa1b0);
+ pab1 = (s16) (dev->dev->bus_sprom->pa1b1);
+ pab2 = (s16) (dev->dev->bus_sprom->pa1b2);
if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
pab0 != -1 && pab1 != -1 && pab2 != -1) {
/* The pabX values are set in SPROM. Use them. */
- if ((s8) dev->sdev->bus->sprom.itssi_a != 0 &&
- (s8) dev->sdev->bus->sprom.itssi_a != -1)
+ if ((s8) dev->dev->bus_sprom->itssi_a != 0 &&
+ (s8) dev->dev->bus_sprom->itssi_a != -1)
aphy->tgt_idle_tssi =
- (s8) (dev->sdev->bus->sprom.itssi_a);
+ (s8) (dev->dev->bus_sprom->itssi_a);
else
aphy->tgt_idle_tssi = 62;
aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index e46b2f4f092..07f009ff5ee 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -5,7 +5,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005-2008 Michael Buesch <m@bues.ch>
Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
@@ -31,6 +31,8 @@
#include "phy_a.h"
#include "phy_n.h"
#include "phy_lp.h"
+#include "phy_ht.h"
+#include "phy_lcn.h"
#include "b43.h"
#include "main.h"
@@ -59,6 +61,16 @@ int b43_phy_allocate(struct b43_wldev *dev)
phy->ops = &b43_phyops_lp;
#endif
break;
+ case B43_PHYTYPE_HT:
+#ifdef CONFIG_B43_PHY_HT
+ phy->ops = &b43_phyops_ht;
+#endif
+ break;
+ case B43_PHYTYPE_LCN:
+#ifdef CONFIG_B43_PHY_LCN
+ phy->ops = &b43_phyops_lcn;
+#endif
+ break;
}
if (B43_WARN_ON(!phy->ops))
return -ENODEV;
@@ -168,7 +180,7 @@ void b43_phy_lock(struct b43_wldev *dev)
B43_WARN_ON(dev->phy.phy_locked);
dev->phy.phy_locked = 1;
#endif
- B43_WARN_ON(dev->sdev->id.revision < 3);
+ B43_WARN_ON(dev->dev->core_rev < 3);
if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
@@ -180,7 +192,7 @@ void b43_phy_unlock(struct b43_wldev *dev)
B43_WARN_ON(!dev->phy.phy_locked);
dev->phy.phy_locked = 0;
#endif
- B43_WARN_ON(dev->sdev->id.revision < 3);
+ B43_WARN_ON(dev->dev->core_rev < 3);
if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
b43_power_saving_ctl_bits(dev, 0);
@@ -368,8 +380,8 @@ void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags)
/* The next check will be needed in two seconds, or later. */
phy->next_txpwr_check_time = round_jiffies(now + (HZ * 2));
- if ((dev->sdev->bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
- (dev->sdev->bus->boardinfo.type == SSB_BOARD_BU4306))
+ if ((dev->dev->board_vendor == SSB_BOARDVENDOR_BCM) &&
+ (dev->dev->board_type == SSB_BOARD_BU4306))
return; /* No software txpower adjustment needed */
result = phy->ops->recalc_txpower(dev, !!(flags & B43_TXPWR_IGNORE_TSSI));
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 2401bee8b08..aa77ba612a9 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -194,6 +194,8 @@ struct b43_phy_a;
struct b43_phy_g;
struct b43_phy_n;
struct b43_phy_lp;
+struct b43_phy_ht;
+struct b43_phy_lcn;
struct b43_phy {
/* Hardware operation callbacks. */
@@ -216,6 +218,10 @@ struct b43_phy {
struct b43_phy_n *n;
/* LP-PHY specific information */
struct b43_phy_lp *lp;
+ /* HT-PHY specific information */
+ struct b43_phy_ht *ht;
+ /* LCN-PHY specific information */
+ struct b43_phy_lcn *lcn;
};
/* Band support flags. */
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 1758a282f91..8e157bc213f 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -5,7 +5,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005-2008 Michael Buesch <m@bues.ch>
Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
@@ -718,7 +718,7 @@ static void b43_calc_nrssi_threshold(struct b43_wldev *dev)
B43_WARN_ON(phy->type != B43_PHYTYPE_G);
if (!phy->gmode ||
- !(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
+ !(dev->dev->bus_sprom->boardflags_lo & B43_BFL_RSSI)) {
tmp16 = b43_nrssi_hw_read(dev, 0x20);
if (tmp16 >= 0x20)
tmp16 -= 0x40;
@@ -1114,7 +1114,7 @@ static u16 radio2050_rfover_val(struct b43_wldev *dev,
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_g *gphy = phy->g;
- struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
if (!phy->gmode)
return 0;
@@ -1491,7 +1491,6 @@ static u16 b43_radio_init2050(struct b43_wldev *dev)
static void b43_phy_initb5(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
struct b43_phy *phy = &dev->phy;
struct b43_phy_g *gphy = phy->g;
u16 offset, value;
@@ -1500,8 +1499,8 @@ static void b43_phy_initb5(struct b43_wldev *dev)
if (phy->analog == 1) {
b43_radio_set(dev, 0x007A, 0x0050);
}
- if ((bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM) &&
- (bus->boardinfo.type != SSB_BOARD_BU4306)) {
+ if ((dev->dev->board_vendor != SSB_BOARDVENDOR_BCM) &&
+ (dev->dev->board_type != SSB_BOARD_BU4306)) {
value = 0x2120;
for (offset = 0x00A8; offset < 0x00C7; offset++) {
b43_phy_write(dev, offset, value);
@@ -1620,7 +1619,7 @@ static void b43_phy_initb6(struct b43_wldev *dev)
b43_radio_write16(dev, 0x5A, 0x88);
b43_radio_write16(dev, 0x5B, 0x6B);
b43_radio_write16(dev, 0x5C, 0x0F);
- if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) {
+ if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_ALTIQ) {
b43_radio_write16(dev, 0x5D, 0xFA);
b43_radio_write16(dev, 0x5E, 0xD8);
} else {
@@ -1787,7 +1786,7 @@ static void b43_calc_loopback_gain(struct b43_wldev *dev)
b43_phy_set(dev, B43_PHY_RFOVER, 0x0100);
b43_phy_mask(dev, B43_PHY_RFOVERVAL, 0xCFFF);
- if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) {
+ if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_EXTLNA) {
if (phy->rev >= 7) {
b43_phy_set(dev, B43_PHY_RFOVER, 0x0800);
b43_phy_set(dev, B43_PHY_RFOVERVAL, 0x8000);
@@ -1922,7 +1921,6 @@ static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev)
/* Initialize B/G PHY power control */
static void b43_phy_init_pctl(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
struct b43_phy *phy = &dev->phy;
struct b43_phy_g *gphy = phy->g;
struct b43_rfatt old_rfatt;
@@ -1931,8 +1929,8 @@ static void b43_phy_init_pctl(struct b43_wldev *dev)
B43_WARN_ON(phy->type != B43_PHYTYPE_G);
- if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
- (bus->boardinfo.type == SSB_BOARD_BU4306))
+ if ((dev->dev->board_vendor == SSB_BOARDVENDOR_BCM) &&
+ (dev->dev->board_type == SSB_BOARD_BU4306))
return;
b43_phy_write(dev, 0x0028, 0x8018);
@@ -2053,7 +2051,7 @@ static void b43_phy_initg(struct b43_wldev *dev)
if (phy->rev >= 6) {
b43_phy_maskset(dev, B43_PHY_CCK(0x36), 0x0FFF, (gphy->lo_control->tx_bias << 12));
}
- if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
+ if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)
b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
else
b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
@@ -2066,7 +2064,7 @@ static void b43_phy_initg(struct b43_wldev *dev)
b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078);
}
- if (!(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
+ if (!(dev->dev->bus_sprom->boardflags_lo & B43_BFL_RSSI)) {
/* The specs state to update the NRSSI LT with
* the value 0x7FFFFFFF here. I think that is some weird
* compiler optimization in the original driver.
@@ -2088,8 +2086,8 @@ static void b43_phy_initg(struct b43_wldev *dev)
/* FIXME: The spec says in the following if, the 0 should be replaced
'if OFDM may not be used in the current locale'
but OFDM is legal everywhere */
- if ((dev->sdev->bus->chip_id == 0x4306
- && dev->sdev->bus->chip_package == 2) || 0) {
+ if ((dev->dev->chip_id == 0x4306
+ && dev->dev->chip_pkg == 2) || 0) {
b43_phy_mask(dev, B43_PHY_CRS0, 0xBFFF);
b43_phy_mask(dev, B43_PHY_OFDM(0xC3), 0x7FFF);
}
@@ -2105,7 +2103,7 @@ void b43_gphy_channel_switch(struct b43_wldev *dev,
b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
if (channel == 14) {
- if (dev->sdev->bus->sprom.country_code ==
+ if (dev->dev->bus_sprom->country_code ==
SSB_SPROM1CCODE_JAPAN)
b43_hf_write(dev,
b43_hf_read(dev) & ~B43_HF_ACPR);
@@ -2136,17 +2134,17 @@ static void default_baseband_attenuation(struct b43_wldev *dev,
static void default_radio_attenuation(struct b43_wldev *dev,
struct b43_rfatt *rf)
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct b43_bus_dev *bdev = dev->dev;
struct b43_phy *phy = &dev->phy;
rf->with_padmix = 0;
- if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM &&
- bus->boardinfo.type == SSB_BOARD_BCM4309G) {
- if (bus->boardinfo.rev < 0x43) {
+ if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
+ dev->dev->board_type == SSB_BOARD_BCM4309G) {
+ if (dev->dev->board_rev < 0x43) {
rf->att = 2;
return;
- } else if (bus->boardinfo.rev < 0x51) {
+ } else if (dev->dev->board_rev < 0x51) {
rf->att = 3;
return;
}
@@ -2172,21 +2170,21 @@ static void default_radio_attenuation(struct b43_wldev *dev,
return;
case 1:
if (phy->type == B43_PHYTYPE_G) {
- if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
- && bus->boardinfo.type == SSB_BOARD_BCM4309G
- && bus->boardinfo.rev >= 30)
+ if (bdev->board_vendor == SSB_BOARDVENDOR_BCM
+ && bdev->board_type == SSB_BOARD_BCM4309G
+ && bdev->board_rev >= 30)
rf->att = 3;
- else if (bus->boardinfo.vendor ==
+ else if (bdev->board_vendor ==
SSB_BOARDVENDOR_BCM
- && bus->boardinfo.type ==
+ && bdev->board_type ==
SSB_BOARD_BU4306)
rf->att = 3;
else
rf->att = 1;
} else {
- if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
- && bus->boardinfo.type == SSB_BOARD_BCM4309G
- && bus->boardinfo.rev >= 30)
+ if (bdev->board_vendor == SSB_BOARDVENDOR_BCM
+ && bdev->board_type == SSB_BOARD_BCM4309G
+ && bdev->board_rev >= 30)
rf->att = 7;
else
rf->att = 6;
@@ -2194,16 +2192,16 @@ static void default_radio_attenuation(struct b43_wldev *dev,
return;
case 2:
if (phy->type == B43_PHYTYPE_G) {
- if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
- && bus->boardinfo.type == SSB_BOARD_BCM4309G
- && bus->boardinfo.rev >= 30)
+ if (bdev->board_vendor == SSB_BOARDVENDOR_BCM
+ && bdev->board_type == SSB_BOARD_BCM4309G
+ && bdev->board_rev >= 30)
rf->att = 3;
- else if (bus->boardinfo.vendor ==
+ else if (bdev->board_vendor ==
SSB_BOARDVENDOR_BCM
- && bus->boardinfo.type ==
+ && bdev->board_type ==
SSB_BOARD_BU4306)
rf->att = 5;
- else if (bus->chip_id == 0x4320)
+ else if (bdev->chip_id == 0x4320)
rf->att = 4;
else
rf->att = 3;
@@ -2384,11 +2382,11 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
struct b43_phy_g *gphy = phy->g;
s16 pab0, pab1, pab2;
- pab0 = (s16) (dev->sdev->bus->sprom.pa0b0);
- pab1 = (s16) (dev->sdev->bus->sprom.pa0b1);
- pab2 = (s16) (dev->sdev->bus->sprom.pa0b2);
+ pab0 = (s16) (dev->dev->bus_sprom->pa0b0);
+ pab1 = (s16) (dev->dev->bus_sprom->pa0b1);
+ pab2 = (s16) (dev->dev->bus_sprom->pa0b2);
- B43_WARN_ON((dev->sdev->bus->chip_id == 0x4301) &&
+ B43_WARN_ON((dev->dev->chip_id == 0x4301) &&
(phy->radio_ver != 0x2050)); /* Not supported anymore */
gphy->dyn_tssi_tbl = 0;
@@ -2396,10 +2394,10 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
pab0 != -1 && pab1 != -1 && pab2 != -1) {
/* The pabX values are set in SPROM. Use them. */
- if ((s8) dev->sdev->bus->sprom.itssi_bg != 0 &&
- (s8) dev->sdev->bus->sprom.itssi_bg != -1) {
+ if ((s8) dev->dev->bus_sprom->itssi_bg != 0 &&
+ (s8) dev->dev->bus_sprom->itssi_bg != -1) {
gphy->tgt_idle_tssi =
- (s8) (dev->sdev->bus->sprom.itssi_bg);
+ (s8) (dev->dev->bus_sprom->itssi_bg);
} else
gphy->tgt_idle_tssi = 62;
gphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
@@ -2537,7 +2535,7 @@ static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev)
b43_wireless_core_reset(dev, 0);
b43_phy_initg(dev);
phy->gmode = 1;
- b43_wireless_core_reset(dev, B43_TMSLOW_GMODE);
+ b43_wireless_core_reset(dev, 1);
}
return 0;
@@ -2840,7 +2838,7 @@ static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev)
B43_TXCTL_TXMIX;
rfatt += 2;
bbatt += 2;
- } else if (dev->sdev->bus->sprom.
+ } else if (dev->dev->bus_sprom->
boardflags_lo &
B43_BFL_PACTRL) {
bbatt += 4 * (rfatt - 2);
@@ -2914,14 +2912,14 @@ static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev,
estimated_pwr = b43_gphy_estimate_power_out(dev, average_tssi);
B43_WARN_ON(phy->type != B43_PHYTYPE_G);
- max_pwr = dev->sdev->bus->sprom.maxpwr_bg;
- if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
+ max_pwr = dev->dev->bus_sprom->maxpwr_bg;
+ if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL)
max_pwr -= 3; /* minus 0.75 */
if (unlikely(max_pwr >= INT_TO_Q52(30/*dBm*/))) {
b43warn(dev->wl,
"Invalid max-TX-power value in SPROM.\n");
max_pwr = INT_TO_Q52(20); /* fake it */
- dev->sdev->bus->sprom.maxpwr_bg = max_pwr;
+ dev->dev->bus_sprom->maxpwr_bg = max_pwr;
}
/* Get desired power (in Q5.2) */
@@ -3014,7 +3012,7 @@ static void b43_gphy_op_pwork_60sec(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
- if (!(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI))
+ if (!(dev->dev->bus_sprom->boardflags_lo & B43_BFL_RSSI))
return;
b43_mac_suspend(dev);
diff --git a/drivers/net/wireless/b43/phy_ht.c b/drivers/net/wireless/b43/phy_ht.c
new file mode 100644
index 00000000000..7c40919651a
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_ht.c
@@ -0,0 +1,413 @@
+/*
+
+ Broadcom B43 wireless driver
+ IEEE 802.11n HT-PHY support
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+ Boston, MA 02110-1301, USA.
+
+*/
+
+#include <linux/slab.h>
+
+#include "b43.h"
+#include "phy_ht.h"
+#include "tables_phy_ht.h"
+#include "radio_2059.h"
+#include "main.h"
+
+/**************************************************
+ * Radio 2059.
+ **************************************************/
+
+static void b43_radio_2059_channel_setup(struct b43_wldev *dev,
+ const struct b43_phy_ht_channeltab_e_radio2059 *e)
+{
+ u8 i;
+ u16 routing;
+
+ b43_radio_write(dev, 0x16, e->radio_syn16);
+ b43_radio_write(dev, 0x17, e->radio_syn17);
+ b43_radio_write(dev, 0x22, e->radio_syn22);
+ b43_radio_write(dev, 0x25, e->radio_syn25);
+ b43_radio_write(dev, 0x27, e->radio_syn27);
+ b43_radio_write(dev, 0x28, e->radio_syn28);
+ b43_radio_write(dev, 0x29, e->radio_syn29);
+ b43_radio_write(dev, 0x2c, e->radio_syn2c);
+ b43_radio_write(dev, 0x2d, e->radio_syn2d);
+ b43_radio_write(dev, 0x37, e->radio_syn37);
+ b43_radio_write(dev, 0x41, e->radio_syn41);
+ b43_radio_write(dev, 0x43, e->radio_syn43);
+ b43_radio_write(dev, 0x47, e->radio_syn47);
+ b43_radio_write(dev, 0x4a, e->radio_syn4a);
+ b43_radio_write(dev, 0x58, e->radio_syn58);
+ b43_radio_write(dev, 0x5a, e->radio_syn5a);
+ b43_radio_write(dev, 0x6a, e->radio_syn6a);
+ b43_radio_write(dev, 0x6d, e->radio_syn6d);
+ b43_radio_write(dev, 0x6e, e->radio_syn6e);
+ b43_radio_write(dev, 0x92, e->radio_syn92);
+ b43_radio_write(dev, 0x98, e->radio_syn98);
+
+ for (i = 0; i < 2; i++) {
+ routing = i ? R2059_RXRX1 : R2059_TXRX0;
+ b43_radio_write(dev, routing | 0x4a, e->radio_rxtx4a);
+ b43_radio_write(dev, routing | 0x58, e->radio_rxtx58);
+ b43_radio_write(dev, routing | 0x5a, e->radio_rxtx5a);
+ b43_radio_write(dev, routing | 0x6a, e->radio_rxtx6a);
+ b43_radio_write(dev, routing | 0x6d, e->radio_rxtx6d);
+ b43_radio_write(dev, routing | 0x6e, e->radio_rxtx6e);
+ b43_radio_write(dev, routing | 0x92, e->radio_rxtx92);
+ b43_radio_write(dev, routing | 0x98, e->radio_rxtx98);
+ }
+
+ udelay(50);
+
+ /* Calibration */
+ b43_radio_mask(dev, 0x2b, ~0x1);
+ b43_radio_mask(dev, 0x2e, ~0x4);
+ b43_radio_set(dev, 0x2e, 0x4);
+ b43_radio_set(dev, 0x2b, 0x1);
+
+ udelay(300);
+}
+
+static void b43_radio_2059_init(struct b43_wldev *dev)
+{
+ const u16 routing[] = { R2059_SYN, R2059_TXRX0, R2059_RXRX1 };
+ const u16 radio_values[3][2] = {
+ { 0x61, 0xE9 }, { 0x69, 0xD5 }, { 0x73, 0x99 },
+ };
+ u16 i, j;
+
+ b43_radio_write(dev, R2059_ALL | 0x51, 0x0070);
+ b43_radio_write(dev, R2059_ALL | 0x5a, 0x0003);
+
+ for (i = 0; i < ARRAY_SIZE(routing); i++)
+ b43_radio_set(dev, routing[i] | 0x146, 0x3);
+
+ b43_radio_set(dev, 0x2e, 0x0078);
+ b43_radio_set(dev, 0xc0, 0x0080);
+ msleep(2);
+ b43_radio_mask(dev, 0x2e, ~0x0078);
+ b43_radio_mask(dev, 0xc0, ~0x0080);
+
+ if (1) { /* FIXME */
+ b43_radio_set(dev, R2059_RXRX1 | 0x4, 0x1);
+ udelay(10);
+ b43_radio_set(dev, R2059_RXRX1 | 0x0BF, 0x1);
+ b43_radio_maskset(dev, R2059_RXRX1 | 0x19B, 0x3, 0x2);
+
+ b43_radio_set(dev, R2059_RXRX1 | 0x4, 0x2);
+ udelay(100);
+ b43_radio_mask(dev, R2059_RXRX1 | 0x4, ~0x2);
+
+ for (i = 0; i < 10000; i++) {
+ if (b43_radio_read(dev, R2059_RXRX1 | 0x145) & 1) {
+ i = 0;
+ break;
+ }
+ udelay(100);
+ }
+ if (i)
+ b43err(dev->wl, "radio 0x945 timeout\n");
+
+ b43_radio_mask(dev, R2059_RXRX1 | 0x4, ~0x1);
+ b43_radio_set(dev, 0xa, 0x60);
+
+ for (i = 0; i < 3; i++) {
+ b43_radio_write(dev, 0x17F, radio_values[i][0]);
+ b43_radio_write(dev, 0x13D, 0x6E);
+ b43_radio_write(dev, 0x13E, radio_values[i][1]);
+ b43_radio_write(dev, 0x13C, 0x55);
+
+ for (j = 0; j < 10000; j++) {
+ if (b43_radio_read(dev, 0x140) & 2) {
+ j = 0;
+ break;
+ }
+ udelay(500);
+ }
+ if (j)
+ b43err(dev->wl, "radio 0x140 timeout\n");
+
+ b43_radio_write(dev, 0x13C, 0x15);
+ }
+
+ b43_radio_mask(dev, 0x17F, ~0x1);
+ }
+
+ b43_radio_mask(dev, 0x11, ~0x0008);
+}
+
+/**************************************************
+ * Channel switching ops.
+ **************************************************/
+
+static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
+ const struct b43_phy_ht_channeltab_e_phy *e,
+ struct ieee80211_channel *new_channel)
+{
+ bool old_band_5ghz;
+ u8 i;
+
+ old_band_5ghz = b43_phy_read(dev, B43_PHY_HT_BANDCTL) & 0; /* FIXME */
+ if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) {
+ /* TODO */
+ } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) {
+ /* TODO */
+ }
+
+ b43_phy_write(dev, B43_PHY_HT_BW1, e->bw1);
+ b43_phy_write(dev, B43_PHY_HT_BW2, e->bw2);
+ b43_phy_write(dev, B43_PHY_HT_BW3, e->bw3);
+ b43_phy_write(dev, B43_PHY_HT_BW4, e->bw4);
+ b43_phy_write(dev, B43_PHY_HT_BW5, e->bw5);
+ b43_phy_write(dev, B43_PHY_HT_BW6, e->bw6);
+
+ /* TODO: some ops on PHY regs 0x0B0 and 0xC0A */
+
+ /* TODO: separated function? */
+ for (i = 0; i < 3; i++) {
+ u16 mask;
+ u32 tmp = b43_httab_read(dev, B43_HTTAB32(26, 0xE8));
+
+ if (0) /* FIXME */
+ mask = 0x2 << (i * 4);
+ else
+ mask = 0;
+ b43_phy_mask(dev, B43_PHY_EXTG(0x108), mask);
+
+ b43_httab_write(dev, B43_HTTAB16(7, 0x110 + i), tmp >> 16);
+ b43_httab_write(dev, B43_HTTAB8(13, 0x63 + (i * 4)),
+ tmp & 0xFF);
+ b43_httab_write(dev, B43_HTTAB8(13, 0x73 + (i * 4)),
+ tmp & 0xFF);
+ }
+
+ b43_phy_write(dev, 0x017e, 0x3830);
+}
+
+static int b43_phy_ht_set_channel(struct b43_wldev *dev,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ const struct b43_phy_ht_channeltab_e_radio2059 *chent_r2059 = NULL;
+
+ if (phy->radio_ver == 0x2059) {
+ chent_r2059 = b43_phy_ht_get_channeltab_e_r2059(dev,
+ channel->center_freq);
+ if (!chent_r2059)
+ return -ESRCH;
+ } else {
+ return -ESRCH;
+ }
+
+ /* TODO: In case of N-PHY some bandwidth switching goes here */
+
+ if (phy->radio_ver == 0x2059) {
+ b43_radio_2059_channel_setup(dev, chent_r2059);
+ b43_phy_ht_channel_setup(dev, &(chent_r2059->phy_regs),
+ channel);
+ } else {
+ return -ESRCH;
+ }
+
+ return 0;
+}
+
+/**************************************************
+ * Basic PHY ops.
+ **************************************************/
+
+static int b43_phy_ht_op_allocate(struct b43_wldev *dev)
+{
+ struct b43_phy_ht *phy_ht;
+
+ phy_ht = kzalloc(sizeof(*phy_ht), GFP_KERNEL);
+ if (!phy_ht)
+ return -ENOMEM;
+ dev->phy.ht = phy_ht;
+
+ return 0;
+}
+
+static void b43_phy_ht_op_prepare_structs(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_ht *phy_ht = phy->ht;
+
+ memset(phy_ht, 0, sizeof(*phy_ht));
+}
+
+static int b43_phy_ht_op_init(struct b43_wldev *dev)
+{
+ b43_phy_ht_tables_init(dev);
+
+ return 0;
+}
+
+static void b43_phy_ht_op_free(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_ht *phy_ht = phy->ht;
+
+ kfree(phy_ht);
+ phy->ht = NULL;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
+static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev,
+ bool blocked)
+{
+ if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
+ b43err(dev->wl, "MAC not suspended\n");
+
+ /* In the following PHY ops we copy wl's dummy behaviour.
+ * TODO: Find out if reads (currently hidden in masks/masksets) are
+ * needed and replace following ops with just writes or w&r.
+ * Note: B43_PHY_HT_RF_CTL1 register is tricky, wrong operation can
+ * cause delayed (!) machine lock up. */
+ if (blocked) {
+ b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
+ } else {
+ b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
+ b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x1);
+ b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
+ b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x2);
+
+ if (dev->phy.radio_ver == 0x2059)
+ b43_radio_2059_init(dev);
+ else
+ B43_WARN_ON(1);
+
+ b43_switch_channel(dev, dev->phy.channel);
+ }
+}
+
+static void b43_phy_ht_op_switch_analog(struct b43_wldev *dev, bool on)
+{
+ if (on) {
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL2, 0x00cd);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0x0000);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL4, 0x00cd);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0x0000);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL6, 0x00cd);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0x0000);
+ } else {
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0x07ff);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL2, 0x00fd);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0x07ff);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL4, 0x00fd);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0x07ff);
+ b43_phy_write(dev, B43_PHY_HT_AFE_CTL6, 0x00fd);
+ }
+}
+
+static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
+ unsigned int new_channel)
+{
+ struct ieee80211_channel *channel = dev->wl->hw->conf.channel;
+ enum nl80211_channel_type channel_type = dev->wl->hw->conf.channel_type;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if ((new_channel < 1) || (new_channel > 14))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ return b43_phy_ht_set_channel(dev, channel, channel_type);
+}
+
+static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ return 11;
+ return 36;
+}
+
+/**************************************************
+ * R/W ops.
+ **************************************************/
+
+static u16 b43_phy_ht_op_read(struct b43_wldev *dev, u16 reg)
+{
+ b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+ return b43_read16(dev, B43_MMIO_PHY_DATA);
+}
+
+static void b43_phy_ht_op_write(struct b43_wldev *dev, u16 reg, u16 value)
+{
+ b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+ b43_write16(dev, B43_MMIO_PHY_DATA, value);
+}
+
+static void b43_phy_ht_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask,
+ u16 set)
+{
+ b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
+ b43_write16(dev, B43_MMIO_PHY_DATA,
+ (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set);
+}
+
+static u16 b43_phy_ht_op_radio_read(struct b43_wldev *dev, u16 reg)
+{
+ /* HT-PHY needs 0x200 for read access */
+ reg |= 0x200;
+
+ b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg);
+ return b43_read16(dev, B43_MMIO_RADIO24_DATA);
+}
+
+static void b43_phy_ht_op_radio_write(struct b43_wldev *dev, u16 reg,
+ u16 value)
+{
+ b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg);
+ b43_write16(dev, B43_MMIO_RADIO24_DATA, value);
+}
+
+static enum b43_txpwr_result
+b43_phy_ht_op_recalc_txpower(struct b43_wldev *dev, bool ignore_tssi)
+{
+ return B43_TXPWR_RES_DONE;
+}
+
+static void b43_phy_ht_op_adjust_txpower(struct b43_wldev *dev)
+{
+}
+
+/**************************************************
+ * PHY ops struct.
+ **************************************************/
+
+const struct b43_phy_operations b43_phyops_ht = {
+ .allocate = b43_phy_ht_op_allocate,
+ .free = b43_phy_ht_op_free,
+ .prepare_structs = b43_phy_ht_op_prepare_structs,
+ .init = b43_phy_ht_op_init,
+ .phy_read = b43_phy_ht_op_read,
+ .phy_write = b43_phy_ht_op_write,
+ .phy_maskset = b43_phy_ht_op_maskset,
+ .radio_read = b43_phy_ht_op_radio_read,
+ .radio_write = b43_phy_ht_op_radio_write,
+ .software_rfkill = b43_phy_ht_op_software_rfkill,
+ .switch_analog = b43_phy_ht_op_switch_analog,
+ .switch_channel = b43_phy_ht_op_switch_channel,
+ .get_default_chan = b43_phy_ht_op_get_default_chan,
+ .recalc_txpower = b43_phy_ht_op_recalc_txpower,
+ .adjust_txpower = b43_phy_ht_op_adjust_txpower,
+};
diff --git a/drivers/net/wireless/b43/phy_ht.h b/drivers/net/wireless/b43/phy_ht.h
new file mode 100644
index 00000000000..7ad7affc8df
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_ht.h
@@ -0,0 +1,46 @@
+#ifndef B43_PHY_HT_H_
+#define B43_PHY_HT_H_
+
+#include "phy_common.h"
+
+
+#define B43_PHY_HT_BANDCTL 0x009 /* Band control */
+#define B43_PHY_HT_TABLE_ADDR 0x072 /* Table address */
+#define B43_PHY_HT_TABLE_DATALO 0x073 /* Table data low */
+#define B43_PHY_HT_TABLE_DATAHI 0x074 /* Table data high */
+#define B43_PHY_HT_BW1 0x1CE
+#define B43_PHY_HT_BW2 0x1CF
+#define B43_PHY_HT_BW3 0x1D0
+#define B43_PHY_HT_BW4 0x1D1
+#define B43_PHY_HT_BW5 0x1D2
+#define B43_PHY_HT_BW6 0x1D3
+
+#define B43_PHY_HT_RF_CTL1 B43_PHY_EXTG(0x010)
+
+#define B43_PHY_HT_AFE_CTL1 B43_PHY_EXTG(0x110)
+#define B43_PHY_HT_AFE_CTL2 B43_PHY_EXTG(0x111)
+#define B43_PHY_HT_AFE_CTL3 B43_PHY_EXTG(0x114)
+#define B43_PHY_HT_AFE_CTL4 B43_PHY_EXTG(0x115)
+#define B43_PHY_HT_AFE_CTL5 B43_PHY_EXTG(0x118)
+#define B43_PHY_HT_AFE_CTL6 B43_PHY_EXTG(0x119)
+
+
+/* Values for PHY registers used on channel switching */
+struct b43_phy_ht_channeltab_e_phy {
+ u16 bw1;
+ u16 bw2;
+ u16 bw3;
+ u16 bw4;
+ u16 bw5;
+ u16 bw6;
+};
+
+
+struct b43_phy_ht {
+};
+
+
+struct b43_phy_operations;
+extern const struct b43_phy_operations b43_phyops_ht;
+
+#endif /* B43_PHY_HT_H_ */
diff --git a/drivers/net/wireless/b43/phy_lcn.c b/drivers/net/wireless/b43/phy_lcn.c
new file mode 100644
index 00000000000..9f7dbbd5ced
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_lcn.c
@@ -0,0 +1,52 @@
+/*
+
+ Broadcom B43 wireless driver
+ IEEE 802.11n LCN-PHY support
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+ Boston, MA 02110-1301, USA.
+
+*/
+
+#include <linux/slab.h>
+
+#include "b43.h"
+#include "phy_lcn.h"
+#include "tables_phy_lcn.h"
+#include "main.h"
+
+/**************************************************
+ * PHY ops struct.
+ **************************************************/
+
+const struct b43_phy_operations b43_phyops_lcn = {
+ /*
+ .allocate = b43_phy_lcn_op_allocate,
+ .free = b43_phy_lcn_op_free,
+ .prepare_structs = b43_phy_lcn_op_prepare_structs,
+ .init = b43_phy_lcn_op_init,
+ .phy_read = b43_phy_lcn_op_read,
+ .phy_write = b43_phy_lcn_op_write,
+ .phy_maskset = b43_phy_lcn_op_maskset,
+ .radio_read = b43_phy_lcn_op_radio_read,
+ .radio_write = b43_phy_lcn_op_radio_write,
+ .software_rfkill = b43_phy_lcn_op_software_rfkill,
+ .switch_analog = b43_phy_lcn_op_switch_analog,
+ .switch_channel = b43_phy_lcn_op_switch_channel,
+ .get_default_chan = b43_phy_lcn_op_get_default_chan,
+ .recalc_txpower = b43_phy_lcn_op_recalc_txpower,
+ .adjust_txpower = b43_phy_lcn_op_adjust_txpower,
+ */
+};
diff --git a/drivers/net/wireless/b43/phy_lcn.h b/drivers/net/wireless/b43/phy_lcn.h
new file mode 100644
index 00000000000..c046c2a6cab
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_lcn.h
@@ -0,0 +1,14 @@
+#ifndef B43_PHY_LCN_H_
+#define B43_PHY_LCN_H_
+
+#include "phy_common.h"
+
+
+struct b43_phy_lcn {
+};
+
+
+struct b43_phy_operations;
+extern const struct b43_phy_operations b43_phyops_lcn;
+
+#endif /* B43_PHY_LCN_H_ */ \ No newline at end of file
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 012c8da2f94..f93d66b1817 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -3,7 +3,7 @@
Broadcom B43 wireless driver
IEEE 802.11a/g LP-PHY driver
- Copyright (c) 2008-2009 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2008-2009 Michael Buesch <m@bues.ch>
Copyright (c) 2009 Gábor Stefanik <netrolller.3d@gmail.com>
This program is free software; you can redistribute it and/or modify
@@ -85,39 +85,39 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
static void lpphy_read_band_sprom(struct b43_wldev *dev)
{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
struct b43_phy_lp *lpphy = dev->phy.lp;
- struct ssb_bus *bus = dev->sdev->bus;
u16 cckpo, maxpwr;
u32 ofdmpo;
int i;
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- lpphy->tx_isolation_med_band = bus->sprom.tri2g;
- lpphy->bx_arch = bus->sprom.bxa2g;
- lpphy->rx_pwr_offset = bus->sprom.rxpo2g;
- lpphy->rssi_vf = bus->sprom.rssismf2g;
- lpphy->rssi_vc = bus->sprom.rssismc2g;
- lpphy->rssi_gs = bus->sprom.rssisav2g;
- lpphy->txpa[0] = bus->sprom.pa0b0;
- lpphy->txpa[1] = bus->sprom.pa0b1;
- lpphy->txpa[2] = bus->sprom.pa0b2;
- maxpwr = bus->sprom.maxpwr_bg;
+ lpphy->tx_isolation_med_band = sprom->tri2g;
+ lpphy->bx_arch = sprom->bxa2g;
+ lpphy->rx_pwr_offset = sprom->rxpo2g;
+ lpphy->rssi_vf = sprom->rssismf2g;
+ lpphy->rssi_vc = sprom->rssismc2g;
+ lpphy->rssi_gs = sprom->rssisav2g;
+ lpphy->txpa[0] = sprom->pa0b0;
+ lpphy->txpa[1] = sprom->pa0b1;
+ lpphy->txpa[2] = sprom->pa0b2;
+ maxpwr = sprom->maxpwr_bg;
lpphy->max_tx_pwr_med_band = maxpwr;
- cckpo = bus->sprom.cck2gpo;
+ cckpo = sprom->cck2gpo;
/*
* We don't read SPROM's opo as specs say. On rev8 SPROMs
* opo == ofdm2gpo and we don't know any SSB with LP-PHY
* and SPROM rev below 8.
*/
- B43_WARN_ON(bus->sprom.revision < 8);
- ofdmpo = bus->sprom.ofdm2gpo;
+ B43_WARN_ON(sprom->revision < 8);
+ ofdmpo = sprom->ofdm2gpo;
if (cckpo) {
for (i = 0; i < 4; i++) {
lpphy->tx_max_rate[i] =
maxpwr - (ofdmpo & 0xF) * 2;
ofdmpo >>= 4;
}
- ofdmpo = bus->sprom.ofdm2gpo;
+ ofdmpo = sprom->ofdm2gpo;
for (i = 4; i < 15; i++) {
lpphy->tx_max_rate[i] =
maxpwr - (ofdmpo & 0xF) * 2;
@@ -131,39 +131,39 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
lpphy->tx_max_rate[i] = maxpwr - ofdmpo;
}
} else { /* 5GHz */
- lpphy->tx_isolation_low_band = bus->sprom.tri5gl;
- lpphy->tx_isolation_med_band = bus->sprom.tri5g;
- lpphy->tx_isolation_hi_band = bus->sprom.tri5gh;
- lpphy->bx_arch = bus->sprom.bxa5g;
- lpphy->rx_pwr_offset = bus->sprom.rxpo5g;
- lpphy->rssi_vf = bus->sprom.rssismf5g;
- lpphy->rssi_vc = bus->sprom.rssismc5g;
- lpphy->rssi_gs = bus->sprom.rssisav5g;
- lpphy->txpa[0] = bus->sprom.pa1b0;
- lpphy->txpa[1] = bus->sprom.pa1b1;
- lpphy->txpa[2] = bus->sprom.pa1b2;
- lpphy->txpal[0] = bus->sprom.pa1lob0;
- lpphy->txpal[1] = bus->sprom.pa1lob1;
- lpphy->txpal[2] = bus->sprom.pa1lob2;
- lpphy->txpah[0] = bus->sprom.pa1hib0;
- lpphy->txpah[1] = bus->sprom.pa1hib1;
- lpphy->txpah[2] = bus->sprom.pa1hib2;
- maxpwr = bus->sprom.maxpwr_al;
- ofdmpo = bus->sprom.ofdm5glpo;
+ lpphy->tx_isolation_low_band = sprom->tri5gl;
+ lpphy->tx_isolation_med_band = sprom->tri5g;
+ lpphy->tx_isolation_hi_band = sprom->tri5gh;
+ lpphy->bx_arch = sprom->bxa5g;
+ lpphy->rx_pwr_offset = sprom->rxpo5g;
+ lpphy->rssi_vf = sprom->rssismf5g;
+ lpphy->rssi_vc = sprom->rssismc5g;
+ lpphy->rssi_gs = sprom->rssisav5g;
+ lpphy->txpa[0] = sprom->pa1b0;
+ lpphy->txpa[1] = sprom->pa1b1;
+ lpphy->txpa[2] = sprom->pa1b2;
+ lpphy->txpal[0] = sprom->pa1lob0;
+ lpphy->txpal[1] = sprom->pa1lob1;
+ lpphy->txpal[2] = sprom->pa1lob2;
+ lpphy->txpah[0] = sprom->pa1hib0;
+ lpphy->txpah[1] = sprom->pa1hib1;
+ lpphy->txpah[2] = sprom->pa1hib2;
+ maxpwr = sprom->maxpwr_al;
+ ofdmpo = sprom->ofdm5glpo;
lpphy->max_tx_pwr_low_band = maxpwr;
for (i = 4; i < 12; i++) {
lpphy->tx_max_ratel[i] = maxpwr - (ofdmpo & 0xF) * 2;
ofdmpo >>= 4;
}
- maxpwr = bus->sprom.maxpwr_a;
- ofdmpo = bus->sprom.ofdm5gpo;
+ maxpwr = sprom->maxpwr_a;
+ ofdmpo = sprom->ofdm5gpo;
lpphy->max_tx_pwr_med_band = maxpwr;
for (i = 4; i < 12; i++) {
lpphy->tx_max_rate[i] = maxpwr - (ofdmpo & 0xF) * 2;
ofdmpo >>= 4;
}
- maxpwr = bus->sprom.maxpwr_ah;
- ofdmpo = bus->sprom.ofdm5ghpo;
+ maxpwr = sprom->maxpwr_ah;
+ ofdmpo = sprom->ofdm5ghpo;
lpphy->max_tx_pwr_hi_band = maxpwr;
for (i = 4; i < 12; i++) {
lpphy->tx_max_rateh[i] = maxpwr - (ofdmpo & 0xF) * 2;
@@ -214,7 +214,8 @@ static void lpphy_table_init(struct b43_wldev *dev)
static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_bus *bus = dev->dev->sdev->bus;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 tmp, tmp2;
@@ -242,9 +243,9 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0x00FF, 0xAD00);
b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB,
0xFF00, lpphy->rx_pwr_offset);
- if ((bus->sprom.boardflags_lo & B43_BFL_FEM) &&
+ if ((sprom->boardflags_lo & B43_BFL_FEM) &&
((b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
- (bus->sprom.boardflags_hi & B43_BFH_PAREF))) {
+ (sprom->boardflags_hi & B43_BFH_PAREF))) {
ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28);
ssb_pmu_set_ldo_paref(&bus->chipco, true);
if (dev->phy.rev == 0) {
@@ -260,7 +261,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
}
tmp = lpphy->rssi_vf | lpphy->rssi_vc << 4 | 0xA000;
b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_0, tmp);
- if (bus->sprom.boardflags_hi & B43_BFH_RSSIINV)
+ if (sprom->boardflags_hi & B43_BFH_RSSIINV)
b43_phy_maskset(dev, B43_LPPHY_AFE_RSSI_CTL_1, 0xF000, 0x0AAA);
else
b43_phy_maskset(dev, B43_LPPHY_AFE_RSSI_CTL_1, 0xF000, 0x02AA);
@@ -268,7 +269,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_LPPHY_RX_RADIO_CTL,
0xFFF9, (lpphy->bx_arch << 1));
if (dev->phy.rev == 1 &&
- (bus->sprom.boardflags_hi & B43_BFH_FEM_BT)) {
+ (sprom->boardflags_hi & B43_BFH_FEM_BT)) {
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0x3F00, 0x0900);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x000A);
@@ -286,8 +287,8 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00);
} else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ ||
- (bus->boardinfo.type == 0x048A) || ((dev->phy.rev == 0) &&
- (bus->sprom.boardflags_lo & B43_BFL_FEM))) {
+ (dev->dev->board_type == 0x048A) || ((dev->phy.rev == 0) &&
+ (sprom->boardflags_lo & B43_BFL_FEM))) {
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0400);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0001);
@@ -297,7 +298,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0002);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0A00);
} else if (dev->phy.rev == 1 ||
- (bus->sprom.boardflags_lo & B43_BFL_FEM)) {
+ (sprom->boardflags_lo & B43_BFL_FEM)) {
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0004);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0800);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0004);
@@ -316,15 +317,15 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0006);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0700);
}
- if (dev->phy.rev == 1 && (bus->sprom.boardflags_hi & B43_BFH_PAREF)) {
+ if (dev->phy.rev == 1 && (sprom->boardflags_hi & B43_BFH_PAREF)) {
b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_5, B43_LPPHY_TR_LOOKUP_1);
b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_6, B43_LPPHY_TR_LOOKUP_2);
b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_7, B43_LPPHY_TR_LOOKUP_3);
b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_8, B43_LPPHY_TR_LOOKUP_4);
}
- if ((bus->sprom.boardflags_hi & B43_BFH_FEM_BT) &&
- (bus->chip_id == 0x5354) &&
- (bus->chip_package == SSB_CHIPPACK_BCM4712S)) {
+ if ((sprom->boardflags_hi & B43_BFH_FEM_BT) &&
+ (dev->dev->chip_id == 0x5354) &&
+ (dev->dev->chip_pkg == SSB_CHIPPACK_BCM4712S)) {
b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0006);
b43_phy_write(dev, B43_LPPHY_GPIO_SELECT, 0x0005);
b43_phy_write(dev, B43_LPPHY_GPIO_OUTEN, 0xFFFF);
@@ -412,7 +413,6 @@ static void lpphy_restore_dig_flt_state(struct b43_wldev *dev)
static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
struct b43_phy_lp *lpphy = dev->phy.lp;
b43_phy_write(dev, B43_LPPHY_AFE_DAC_CTL, 0x50);
@@ -432,7 +432,7 @@ static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x4000);
b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x2000);
b43_phy_set(dev, B43_PHY_OFDM(0x10A), 0x1);
- if (bus->boardinfo.rev >= 0x18) {
+ if (dev->dev->board_rev >= 0x18) {
b43_lptab_write(dev, B43_LPTAB32(17, 65), 0xEC);
b43_phy_maskset(dev, B43_PHY_OFDM(0x10A), 0xFF01, 0x14);
} else {
@@ -449,7 +449,7 @@ static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFC1F, 0xA0);
b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xE0FF, 0x300);
b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0x00FF, 0x2A00);
- if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) {
+ if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x2100);
b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0xA);
} else {
@@ -467,7 +467,7 @@ static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFFE0, 0x12);
b43_phy_maskset(dev, B43_LPPHY_GAINMISMATCH, 0x0FFF, 0x9000);
- if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) {
+ if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
b43_lptab_write(dev, B43_LPTAB16(0x08, 0x14), 0);
b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40);
}
@@ -492,7 +492,7 @@ static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
0x2000 | ((u16)lpphy->rssi_gs << 10) |
((u16)lpphy->rssi_vc << 4) | lpphy->rssi_vf);
- if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) {
+ if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
b43_phy_set(dev, B43_LPPHY_AFE_ADC_CTL_0, 0x1C);
b43_phy_maskset(dev, B43_LPPHY_AFE_CTL, 0x00FF, 0x8800);
b43_phy_maskset(dev, B43_LPPHY_AFE_ADC_CTL_1, 0xFC3C, 0x0400);
@@ -519,7 +519,7 @@ struct b2062_freqdata {
static void lpphy_2062_init(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_bus *bus = dev->dev->sdev->bus;
u32 crystalfreq, tmp, ref;
unsigned int i;
const struct b2062_freqdata *fd = NULL;
@@ -697,7 +697,7 @@ static void lpphy_radio_init(struct b43_wldev *dev)
lpphy_sync_stx(dev);
b43_phy_write(dev, B43_PHY_OFDM(0xF0), 0x5F80);
b43_phy_write(dev, B43_PHY_OFDM(0xF1), 0);
- if (dev->sdev->bus->chip_id == 0x4325) {
+ if (dev->dev->chip_id == 0x4325) {
// TODO SSB PMU recalibration
}
}
@@ -1289,7 +1289,7 @@ finish:
static void lpphy_rev2plus_rc_calib(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_bus *bus = dev->dev->sdev->bus;
u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
u8 tmp = b43_radio_read(dev, B2063_RX_BB_SP8) & 0xFF;
int i;
@@ -1840,7 +1840,6 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
- struct ssb_bus *bus = dev->sdev->bus;
struct lpphy_tx_gains gains, oldgains;
int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
@@ -1854,7 +1853,7 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
- if (bus->chip_id == 0x4325 && bus->chip_rev == 0)
+ if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
lpphy_papd_cal(dev, gains, 0, 1, 30);
else
lpphy_papd_cal(dev, gains, 0, 1, 65);
@@ -1870,7 +1869,6 @@ static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx,
bool rx, bool pa, struct lpphy_tx_gains *gains)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
- struct ssb_bus *bus = dev->sdev->bus;
const struct lpphy_rx_iq_comp *iqcomp = NULL;
struct lpphy_tx_gains nogains, oldgains;
u16 tmp;
@@ -1879,7 +1877,7 @@ static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx,
memset(&nogains, 0, sizeof(nogains));
memset(&oldgains, 0, sizeof(oldgains));
- if (bus->chip_id == 0x5354) {
+ if (dev->dev->chip_id == 0x5354) {
for (i = 0; i < ARRAY_SIZE(lpphy_5354_iq_table); i++) {
if (lpphy_5354_iq_table[i].chan == lpphy->channel) {
iqcomp = &lpphy_5354_iq_table[i];
@@ -2408,11 +2406,9 @@ static const struct b206x_channel b2063_chantbl[] = {
static void lpphy_b2062_reset_pll_bias(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
-
b43_radio_write(dev, B2062_S_RFPLL_CTL2, 0xFF);
udelay(20);
- if (bus->chip_id == 0x5354) {
+ if (dev->dev->chip_id == 0x5354) {
b43_radio_write(dev, B2062_N_COMM1, 4);
b43_radio_write(dev, B2062_S_RFPLL_CTL2, 4);
} else {
@@ -2432,7 +2428,7 @@ static int lpphy_b2062_tune(struct b43_wldev *dev,
unsigned int channel)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_bus *bus = dev->dev->sdev->bus;
const struct b206x_channel *chandata = NULL;
u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
u32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9;
@@ -2522,7 +2518,7 @@ static void lpphy_b2063_vco_calib(struct b43_wldev *dev)
static int lpphy_b2063_tune(struct b43_wldev *dev,
unsigned int channel)
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_bus *bus = dev->dev->sdev->bus;
static const struct b206x_channel *chandata = NULL;
u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
@@ -2670,6 +2666,11 @@ static int b43_lpphy_op_init(struct b43_wldev *dev)
{
int err;
+ if (dev->dev->bus_type != B43_BUS_SSB) {
+ b43err(dev->wl, "LP-PHY is supported only on SSB!\n");
+ return -EOPNOTSUPP;
+ }
+
lpphy_read_band_sprom(dev); //FIXME should this be in prepare_structs?
lpphy_baseband_init(dev);
lpphy_radio_init(dev);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 05960ddde24..3b46360da99 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -3,7 +3,7 @@
Broadcom B43 wireless driver
IEEE 802.11n PHY support
- Copyright (c) 2008 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2008 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -299,7 +299,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
u8 txpi[2], bbmult, i;
u16 tmp, radio_gain, dac_gain;
@@ -423,16 +423,15 @@ static void b43_radio_init2055_pre(struct b43_wldev *dev)
static void b43_radio_init2055_post(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
- struct ssb_boardinfo *binfo = &(dev->sdev->bus->boardinfo);
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
int i;
u16 val;
bool workaround = false;
if (sprom->revision < 4)
- workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM &&
- binfo->type == 0x46D &&
- binfo->rev >= 0x41);
+ workaround = (dev->dev->board_vendor != PCI_VENDOR_ID_BROADCOM
+ && dev->dev->board_type == 0x46D
+ && dev->dev->board_rev >= 0x41);
else
workaround =
!(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS);
@@ -604,17 +603,33 @@ static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
{
- u32 tmslow;
+ u32 tmp;
if (dev->phy.type != B43_PHYTYPE_N)
return;
- tmslow = ssb_read32(dev->sdev, SSB_TMSLOW);
- if (force)
- tmslow |= SSB_TMSLOW_FGC;
- else
- tmslow &= ~SSB_TMSLOW_FGC;
- ssb_write32(dev->sdev, SSB_TMSLOW, tmslow);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
+ if (force)
+ tmp |= BCMA_IOCTL_FGC;
+ else
+ tmp &= ~BCMA_IOCTL_FGC;
+ bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ tmp = ssb_read32(dev->dev->sdev, SSB_TMSLOW);
+ if (force)
+ tmp |= SSB_TMSLOW_FGC;
+ else
+ tmp &= ~SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev->sdev, SSB_TMSLOW, tmp);
+ break;
+#endif
+ }
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
@@ -959,8 +974,21 @@ static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
- ssb_chipco_gpio_control(&dev->sdev->bus->chipco, 0xFC00,
- 0xFC00);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc,
+ 0xFC00, 0xFC00);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco,
+ 0xFC00, 0xFC00);
+ break;
+#endif
+ }
+
b43_write32(dev, B43_MMIO_MACCTL,
b43_read32(dev, B43_MMIO_MACCTL) &
~B43_MACCTL_GPOUTSMSK);
@@ -983,7 +1011,7 @@ static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
{
u16 tmp;
- if (dev->sdev->id.revision == 16)
+ if (dev->dev->core_rev == 16)
b43_mac_suspend(dev);
tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
@@ -993,7 +1021,7 @@ static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
tmp |= (val & mask);
b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
- if (dev->sdev->id.revision == 16)
+ if (dev->dev->core_rev == 16)
b43_mac_enable(dev);
return tmp;
@@ -1168,7 +1196,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = &(dev->sdev->bus->sprom);
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
/* PHY rev 0, 1, 2 */
u8 i, j;
@@ -1373,7 +1401,7 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
static void b43_nphy_workarounds(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = phy->n;
@@ -1443,9 +1471,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
/* N PHY WAR TX Chain Update with hw_phytxchain as argument */
- if ((bus->sprom.boardflags2_lo & B43_BFL2_APLL_WAR &&
+ if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
- (bus->sprom.boardflags2_lo & B43_BFL2_GPLL_WAR &&
+ (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
tmp32 = 0x00088888;
else
@@ -1503,8 +1531,8 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
- if (bus->sprom.boardflags2_lo & 0x100 &&
- bus->boardinfo.type == 0x8B) {
+ if (sprom->boardflags2_lo & 0x100 &&
+ dev->dev->board_type == 0x8B) {
delays1[0] = 0x1;
delays1[5] = 0x14;
}
@@ -3586,7 +3614,7 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
*/
int b43_phy_initn(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = phy->n;
u8 tx_pwr_state;
@@ -3599,9 +3627,22 @@ int b43_phy_initn(struct b43_wldev *dev)
bool do_cal = false;
if ((dev->phy.rev >= 3) &&
- (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
+ (sprom->boardflags_lo & B43_BFL_EXTLNA) &&
(b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
- chipco_set32(&dev->sdev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ bcma_cc_set32(&dev->dev->bdev->bus->drv_cc,
+ BCMA_CC_CHIPCTL, 0x40);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ chipco_set32(&dev->dev->sdev->bus->chipco,
+ SSB_CHIPCO_CHIPCTL, 0x40);
+ break;
+#endif
+ }
}
nphy->deaf_count = 0;
b43_nphy_tables_init(dev);
@@ -3639,9 +3680,9 @@ int b43_phy_initn(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20);
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20);
- if (bus->sprom.boardflags2_lo & 0x100 ||
- (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
- bus->boardinfo.type == 0x8B))
+ if (sprom->boardflags2_lo & 0x100 ||
+ (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
+ dev->dev->board_type == 0x8B))
b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0);
else
b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8);
@@ -4026,11 +4067,24 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
{
- u16 val = on ? 0 : 0x7FFF;
+ u16 override = on ? 0x0 : 0x7FFF;
+ u16 core = on ? 0xD : 0x00FD;
- if (dev->phy.rev >= 3)
- b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, val);
- b43_phy_write(dev, B43_NPHY_AFECTL_OVER, val);
+ if (dev->phy.rev >= 3) {
+ if (on) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, core);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, override);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, core);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, override);
+ } else {
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, override);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, core);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, override);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, core);
+ }
+ } else {
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, override);
+ }
}
static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 72ab94df756..6e4228c3ed1 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -4,7 +4,7 @@
PIO data transfer
- Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005-2008 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -111,7 +111,7 @@ static u16 index_to_pioqueue_base(struct b43_wldev *dev,
B43_MMIO_PIO11_BASE5,
};
- if (dev->sdev->id.revision >= 11) {
+ if (dev->dev->core_rev >= 11) {
B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
return bases_rev11[index];
}
@@ -121,14 +121,14 @@ static u16 index_to_pioqueue_base(struct b43_wldev *dev,
static u16 pio_txqueue_offset(struct b43_wldev *dev)
{
- if (dev->sdev->id.revision >= 11)
+ if (dev->dev->core_rev >= 11)
return 0x18;
return 0;
}
static u16 pio_rxqueue_offset(struct b43_wldev *dev)
{
- if (dev->sdev->id.revision >= 11)
+ if (dev->dev->core_rev >= 11)
return 0x38;
return 8;
}
@@ -144,7 +144,7 @@ static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
if (!q)
return NULL;
q->dev = dev;
- q->rev = dev->sdev->id.revision;
+ q->rev = dev->dev->core_rev;
q->mmio_base = index_to_pioqueue_base(dev, index) +
pio_txqueue_offset(dev);
q->index = index;
@@ -178,7 +178,7 @@ static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
if (!q)
return NULL;
q->dev = dev;
- q->rev = dev->sdev->id.revision;
+ q->rev = dev->dev->core_rev;
q->mmio_base = index_to_pioqueue_base(dev, index) +
pio_rxqueue_offset(dev);
diff --git a/drivers/net/wireless/b43/radio_2055.c b/drivers/net/wireless/b43/radio_2055.c
index 44c6dea6688..93643f18c2b 100644
--- a/drivers/net/wireless/b43/radio_2055.c
+++ b/drivers/net/wireless/b43/radio_2055.c
@@ -3,7 +3,7 @@
Broadcom B43 wireless driver
IEEE 802.11n PHY and radio device data tables
- Copyright (c) 2008 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2008 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/wireless/b43/radio_2055.h b/drivers/net/wireless/b43/radio_2055.h
index d9bfa0f21b7..67f96122f8d 100644
--- a/drivers/net/wireless/b43/radio_2055.h
+++ b/drivers/net/wireless/b43/radio_2055.h
@@ -251,4 +251,9 @@ struct b43_nphy_channeltab_entry_rev2 {
void b2055_upload_inittab(struct b43_wldev *dev,
bool ghz5, bool ignore_uploadflag);
+/* Get the NPHY Channel Switch Table entry for a channel.
+ * Returns NULL on failure to find an entry. */
+const struct b43_nphy_channeltab_entry_rev2 *
+b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel);
+
#endif /* B43_RADIO_2055_H_ */
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
index d601f6e7e31..d52df6be705 100644
--- a/drivers/net/wireless/b43/radio_2056.h
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -1117,4 +1117,9 @@ struct b43_nphy_channeltab_entry_rev3 {
void b2056_upload_inittabs(struct b43_wldev *dev,
bool ghz5, bool ignore_uploadflag);
+/* Get the NPHY Channel Switch Table entry for a channel.
+ * Returns NULL on failure to find an entry. */
+const struct b43_nphy_channeltab_entry_rev3 *
+b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq);
+
#endif /* B43_RADIO_2056_H_ */
diff --git a/drivers/net/wireless/b43/radio_2059.c b/drivers/net/wireless/b43/radio_2059.c
new file mode 100644
index 00000000000..f029f6e1f5d
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2059.c
@@ -0,0 +1,174 @@
+/*
+
+ Broadcom B43 wireless driver
+ IEEE 802.11n 2059 radio device data tables
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+ Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "radio_2059.h"
+
+#define RADIOREGS(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
+ r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
+ r20, r21, r22, r23, r24, r25, r26, r27, r28) \
+ .radio_syn16 = r00, \
+ .radio_syn17 = r01, \
+ .radio_syn22 = r02, \
+ .radio_syn25 = r03, \
+ .radio_syn27 = r04, \
+ .radio_syn28 = r05, \
+ .radio_syn29 = r06, \
+ .radio_syn2c = r07, \
+ .radio_syn2d = r08, \
+ .radio_syn37 = r09, \
+ .radio_syn41 = r10, \
+ .radio_syn43 = r11, \
+ .radio_syn47 = r12, \
+ .radio_syn4a = r13, \
+ .radio_syn58 = r14, \
+ .radio_syn5a = r15, \
+ .radio_syn6a = r16, \
+ .radio_syn6d = r17, \
+ .radio_syn6e = r18, \
+ .radio_syn92 = r19, \
+ .radio_syn98 = r20, \
+ .radio_rxtx4a = r21, \
+ .radio_rxtx58 = r22, \
+ .radio_rxtx5a = r23, \
+ .radio_rxtx6a = r24, \
+ .radio_rxtx6d = r25, \
+ .radio_rxtx6e = r26, \
+ .radio_rxtx92 = r27, \
+ .radio_rxtx98 = r28
+
+#define PHYREGS(r0, r1, r2, r3, r4, r5) \
+ .phy_regs.bw1 = r0, \
+ .phy_regs.bw2 = r1, \
+ .phy_regs.bw3 = r2, \
+ .phy_regs.bw4 = r3, \
+ .phy_regs.bw5 = r4, \
+ .phy_regs.bw6 = r5
+
+static const struct b43_phy_ht_channeltab_e_radio2059 b43_phy_ht_channeltab_radio2059[] = {
+ { .freq = 2412,
+ RADIOREGS(0x48, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x6c,
+ 0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
+ },
+ { .freq = 2417,
+ RADIOREGS(0x4b, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x71,
+ 0x09, 0x0f, 0x0a, 0x00, 0x0a, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
+ },
+ { .freq = 2422,
+ RADIOREGS(0x4e, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x76,
+ 0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
+ },
+ { .freq = 2427,
+ RADIOREGS(0x52, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x7b,
+ 0x09, 0x0f, 0x09, 0x00, 0x09, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
+ },
+ { .freq = 2432,
+ RADIOREGS(0x55, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x80,
+ 0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
+ },
+ { .freq = 2437,
+ RADIOREGS(0x58, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x85,
+ 0x09, 0x0f, 0x08, 0x00, 0x08, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
+ },
+ { .freq = 2442,
+ RADIOREGS(0x5c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8a,
+ 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
+ },
+ { .freq = 2447,
+ RADIOREGS(0x5f, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x8f,
+ 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
+ },
+ { .freq = 2452,
+ RADIOREGS(0x62, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x94,
+ 0x09, 0x0f, 0x07, 0x00, 0x07, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
+ },
+ { .freq = 2457,
+ RADIOREGS(0x66, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x99,
+ 0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
+ },
+ { .freq = 2462,
+ RADIOREGS(0x69, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x9e,
+ 0x09, 0x0f, 0x06, 0x00, 0x06, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
+ },
+ { .freq = 2467,
+ RADIOREGS(0x6c, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0xa3,
+ 0x09, 0x0f, 0x05, 0x00, 0x05, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
+ },
+ { .freq = 2472,
+ RADIOREGS(0x70, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0xa8,
+ 0x09, 0x0f, 0x05, 0x00, 0x05, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x61, 0x03,
+ 0x00, 0x00, 0x00, 0xf0, 0x00),
+ PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
+ },
+};
+
+const struct b43_phy_ht_channeltab_e_radio2059
+*b43_phy_ht_get_channeltab_e_r2059(struct b43_wldev *dev, u16 freq)
+{
+ const struct b43_phy_ht_channeltab_e_radio2059 *e;
+ unsigned int i;
+
+ e = b43_phy_ht_channeltab_radio2059;
+ for (i = 0; i < ARRAY_SIZE(b43_phy_ht_channeltab_radio2059); i++, e++) {
+ if (e->freq == freq)
+ return e;
+ }
+
+ return NULL;
+}
diff --git a/drivers/net/wireless/b43/radio_2059.h b/drivers/net/wireless/b43/radio_2059.h
new file mode 100644
index 00000000000..e4d69e55e9f
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2059.h
@@ -0,0 +1,54 @@
+#ifndef B43_RADIO_2059_H_
+#define B43_RADIO_2059_H_
+
+#include <linux/types.h>
+
+#include "phy_ht.h"
+
+#define R2059_SYN 0x000
+#define R2059_TXRX0 0x400
+#define R2059_RXRX1 0x800
+#define R2059_ALL 0xC00
+
+/* Values for various registers uploaded on channel switching */
+struct b43_phy_ht_channeltab_e_radio2059 {
+ /* The channel frequency in MHz */
+ u16 freq;
+ /* Values for radio registers */
+ u8 radio_syn16;
+ u8 radio_syn17;
+ u8 radio_syn22;
+ u8 radio_syn25;
+ u8 radio_syn27;
+ u8 radio_syn28;
+ u8 radio_syn29;
+ u8 radio_syn2c;
+ u8 radio_syn2d;
+ u8 radio_syn37;
+ u8 radio_syn41;
+ u8 radio_syn43;
+ u8 radio_syn47;
+ u8 radio_syn4a;
+ u8 radio_syn58;
+ u8 radio_syn5a;
+ u8 radio_syn6a;
+ u8 radio_syn6d;
+ u8 radio_syn6e;
+ u8 radio_syn92;
+ u8 radio_syn98;
+ u8 radio_rxtx4a;
+ u8 radio_rxtx58;
+ u8 radio_rxtx5a;
+ u8 radio_rxtx6a;
+ u8 radio_rxtx6d;
+ u8 radio_rxtx6e;
+ u8 radio_rxtx92;
+ u8 radio_rxtx98;
+ /* Values for PHY registers */
+ struct b43_phy_ht_channeltab_e_phy phy_regs;
+};
+
+const struct b43_phy_ht_channeltab_e_radio2059
+*b43_phy_ht_get_channeltab_e_r2059(struct b43_wldev *dev, u16 freq);
+
+#endif /* B43_RADIO_2059_H_ */
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index a617efe3828..70c2fcedd1b 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -3,7 +3,7 @@
Broadcom B43 wireless driver
RFKILL support
- Copyright (c) 2007 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2007 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -37,17 +37,16 @@ void b43_rfkill_poll(struct ieee80211_hw *hw)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
- struct ssb_bus *bus = dev->sdev->bus;
bool enabled;
bool brought_up = false;
mutex_lock(&wl->mutex);
if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED)) {
- if (ssb_bus_powerup(bus, 0)) {
+ if (b43_bus_powerup(dev, 0)) {
mutex_unlock(&wl->mutex);
return;
}
- ssb_device_enable(dev->sdev, 0);
+ b43_device_enable(dev, 0);
brought_up = true;
}
@@ -63,8 +62,8 @@ void b43_rfkill_poll(struct ieee80211_hw *hw)
}
if (brought_up) {
- ssb_device_disable(dev->sdev, 0);
- ssb_bus_may_powerdown(bus);
+ b43_device_disable(dev, 0);
+ b43_bus_may_powerdown(dev);
}
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 808e25b7970..80b0755ed3a 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -4,7 +4,7 @@
* SDIO over Sonics Silicon Backplane bus glue for b43.
*
* Copyright (C) 2009 Albert Herranz
- * Copyright (C) 2009 Michael Buesch <mb@bu3sch.de>
+ * Copyright (C) 2009 Michael Buesch <m@bues.ch>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -66,7 +66,7 @@ static void b43_sdio_interrupt_dispatcher(struct sdio_func *func)
int b43_sdio_request_irq(struct b43_wldev *dev,
void (*handler)(struct b43_wldev *dev))
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_bus *bus = dev->dev->sdev->bus;
struct sdio_func *func = bus->host_sdio;
struct b43_sdio *sdio = sdio_get_drvdata(func);
int err;
@@ -82,7 +82,7 @@ int b43_sdio_request_irq(struct b43_wldev *dev,
void b43_sdio_free_irq(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_bus *bus = dev->dev->sdev->bus;
struct sdio_func *func = bus->host_sdio;
struct b43_sdio *sdio = sdio_get_drvdata(func);
@@ -93,8 +93,8 @@ void b43_sdio_free_irq(struct b43_wldev *dev)
sdio->irq_handler = NULL;
}
-static int b43_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
+static int __devinit b43_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
{
struct b43_sdio *sdio;
struct sdio_func_tuple *tuple;
@@ -171,7 +171,7 @@ out:
return error;
}
-static void b43_sdio_remove(struct sdio_func *func)
+static void __devexit b43_sdio_remove(struct sdio_func *func)
{
struct b43_sdio *sdio = sdio_get_drvdata(func);
diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c
index 57af619725c..8e8431d4eb0 100644
--- a/drivers/net/wireless/b43/sysfs.c
+++ b/drivers/net/wireless/b43/sysfs.c
@@ -4,7 +4,7 @@
SYSFS support routines
- Copyright (c) 2006 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2006 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -140,7 +140,7 @@ static DEVICE_ATTR(interference, 0644,
int b43_sysfs_register(struct b43_wldev *wldev)
{
- struct device *dev = wldev->sdev->dev;
+ struct device *dev = wldev->dev->dev;
B43_WARN_ON(b43_status(wldev) != B43_STAT_INITIALIZED);
@@ -149,7 +149,7 @@ int b43_sysfs_register(struct b43_wldev *wldev)
void b43_sysfs_unregister(struct b43_wldev *wldev)
{
- struct device *dev = wldev->sdev->dev;
+ struct device *dev = wldev->dev->dev;
device_remove_file(dev, &dev_attr_interference);
}
diff --git a/drivers/net/wireless/b43/tables.c b/drivers/net/wireless/b43/tables.c
index 1ef9a6463ec..ea288df8aee 100644
--- a/drivers/net/wireless/b43/tables.c
+++ b/drivers/net/wireless/b43/tables.c
@@ -4,7 +4,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (c) 2006, 2006 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2006, 2006 Michael Buesch <m@bues.ch>
Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
diff --git a/drivers/net/wireless/b43/tables_lpphy.c b/drivers/net/wireless/b43/tables_lpphy.c
index 59df3c64af6..cff187c5616 100644
--- a/drivers/net/wireless/b43/tables_lpphy.c
+++ b/drivers/net/wireless/b43/tables_lpphy.c
@@ -3,7 +3,7 @@
Broadcom B43 wireless driver
IEEE 802.11a/g LP-PHY and radio device data tables
- Copyright (c) 2009 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2009 Michael Buesch <m@bues.ch>
Copyright (c) 2009 Gábor Stefanik <netrolller.3d@gmail.com>
This program is free software; you can redistribute it and/or modify
@@ -2304,7 +2304,6 @@ void lpphy_rev0_1_table_init(struct b43_wldev *dev)
void lpphy_rev2plus_table_init(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
int i;
B43_WARN_ON(dev->phy.rev < 2);
@@ -2341,7 +2340,7 @@ void lpphy_rev2plus_table_init(struct b43_wldev *dev)
b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0),
ARRAY_SIZE(lpphy_papd_mult_table), lpphy_papd_mult_table);
- if ((bus->chip_id == 0x4325) && (bus->chip_rev == 0)) {
+ if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
b43_lptab_write_bulk(dev, B43_LPTAB32(13, 0),
ARRAY_SIZE(lpphy_a0_gain_idx_table), lpphy_a0_gain_idx_table);
b43_lptab_write_bulk(dev, B43_LPTAB16(14, 0),
@@ -2416,12 +2415,12 @@ void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count,
void lpphy_init_tx_gain_table(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
switch (dev->phy.rev) {
case 0:
- if ((bus->sprom.boardflags_hi & B43_BFH_NOPA) ||
- (bus->sprom.boardflags_lo & B43_BFL_HGPA))
+ if ((sprom->boardflags_hi & B43_BFH_NOPA) ||
+ (sprom->boardflags_lo & B43_BFL_HGPA))
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev0_nopa_tx_gain_table);
else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
@@ -2432,8 +2431,8 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
lpphy_rev0_5ghz_tx_gain_table);
break;
case 1:
- if ((bus->sprom.boardflags_hi & B43_BFH_NOPA) ||
- (bus->sprom.boardflags_lo & B43_BFL_HGPA))
+ if ((sprom->boardflags_hi & B43_BFH_NOPA) ||
+ (sprom->boardflags_lo & B43_BFL_HGPA))
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev1_nopa_tx_gain_table);
else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
@@ -2444,7 +2443,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
lpphy_rev1_5ghz_tx_gain_table);
break;
default:
- if (bus->sprom.boardflags_hi & B43_BFH_NOPA)
+ if (sprom->boardflags_hi & B43_BFH_NOPA)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev2_nopa_tx_gain_table);
else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 2de483b3d3b..916f238a71d 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3,7 +3,7 @@
Broadcom B43 wireless driver
IEEE 802.11n PHY data tables
- Copyright (c) 2008 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2008 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 18569367ce4..a81696bff0e 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -60,16 +60,8 @@ struct nphy_gain_ctl_workaround_entry {
struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
struct b43_wldev *dev, bool ghz5, bool ext_lna);
-/* Get the NPHY Channel Switch Table entry for a channel.
- * Returns NULL on failure to find an entry. */
-const struct b43_nphy_channeltab_entry_rev2 *
-b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel);
-const struct b43_nphy_channeltab_entry_rev3 *
-b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq);
-
/* The N-PHY tables. */
-
#define B43_NTAB_TYPEMASK 0xF0000000
#define B43_NTAB_8BIT 0x10000000
#define B43_NTAB_16BIT 0x20000000
diff --git a/drivers/net/wireless/b43/tables_phy_ht.c b/drivers/net/wireless/b43/tables_phy_ht.c
new file mode 100644
index 00000000000..603938657b1
--- /dev/null
+++ b/drivers/net/wireless/b43/tables_phy_ht.c
@@ -0,0 +1,750 @@
+/*
+
+ Broadcom B43 wireless driver
+ IEEE 802.11n HT-PHY data tables
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+ Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "tables_phy_ht.h"
+#include "phy_common.h"
+#include "phy_ht.h"
+
+static const u16 b43_httab_0x12[] = {
+ 0x0000, 0x0008, 0x000a, 0x0010, 0x0012, 0x0019,
+ 0x001a, 0x001c, 0x0080, 0x0088, 0x008a, 0x0090,
+ 0x0092, 0x0099, 0x009a, 0x009c, 0x0100, 0x0108,
+ 0x010a, 0x0110, 0x0112, 0x0119, 0x011a, 0x011c,
+ 0x0180, 0x0188, 0x018a, 0x0190, 0x0192, 0x0199,
+ 0x019a, 0x019c, 0x0000, 0x0098, 0x00a0, 0x00a8,
+ 0x009a, 0x00a2, 0x00aa, 0x0120, 0x0128, 0x0128,
+ 0x0130, 0x0138, 0x0138, 0x0140, 0x0122, 0x012a,
+ 0x012a, 0x0132, 0x013a, 0x013a, 0x0142, 0x01a8,
+ 0x01b0, 0x01b8, 0x01b0, 0x01b8, 0x01c0, 0x01c8,
+ 0x01c0, 0x01c8, 0x01d0, 0x01d0, 0x01d8, 0x01aa,
+ 0x01b2, 0x01ba, 0x01b2, 0x01ba, 0x01c2, 0x01ca,
+ 0x01c2, 0x01ca, 0x01d2, 0x01d2, 0x01da, 0x0001,
+ 0x0002, 0x0004, 0x0009, 0x000c, 0x0011, 0x0014,
+ 0x0018, 0x0020, 0x0021, 0x0022, 0x0024, 0x0081,
+ 0x0082, 0x0084, 0x0089, 0x008c, 0x0091, 0x0094,
+ 0x0098, 0x00a0, 0x00a1, 0x00a2, 0x00a4, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007,
+ 0x0007, 0x0007,
+};
+
+static const u16 b43_httab_0x27[] = {
+ 0x0009, 0x000e, 0x0011, 0x0014, 0x0017, 0x001a,
+ 0x001d, 0x0020, 0x0009, 0x000e, 0x0011, 0x0014,
+ 0x0017, 0x001a, 0x001d, 0x0020, 0x0009, 0x000e,
+ 0x0011, 0x0014, 0x0017, 0x001a, 0x001d, 0x0020,
+ 0x0009, 0x000e, 0x0011, 0x0014, 0x0017, 0x001a,
+ 0x001d, 0x0020,
+};
+
+static const u16 b43_httab_0x26[] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000,
+};
+
+static const u32 b43_httab_0x25[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_httab_0x2f[] = {
+ 0x00035700, 0x0002cc9a, 0x00026666, 0x0001581f,
+ 0x0001581f, 0x0001581f, 0x0001581f, 0x0001581f,
+ 0x0001581f, 0x0001581f, 0x0001581f, 0x00035700,
+ 0x0002cc9a, 0x00026666, 0x0001581f, 0x0001581f,
+ 0x0001581f, 0x0001581f, 0x0001581f, 0x0001581f,
+ 0x0001581f, 0x0001581f,
+};
+
+static const u16 b43_httab_0x1a[] = {
+ 0x0055, 0x0054, 0x0054, 0x0053, 0x0052, 0x0052,
+ 0x0051, 0x0051, 0x0050, 0x004f, 0x004f, 0x004e,
+ 0x004e, 0x004d, 0x004c, 0x004c, 0x004b, 0x004a,
+ 0x0049, 0x0049, 0x0048, 0x0047, 0x0046, 0x0046,
+ 0x0045, 0x0044, 0x0043, 0x0042, 0x0041, 0x0040,
+ 0x0040, 0x003f, 0x003e, 0x003d, 0x003c, 0x003a,
+ 0x0039, 0x0038, 0x0037, 0x0036, 0x0035, 0x0033,
+ 0x0032, 0x0031, 0x002f, 0x002e, 0x002c, 0x002b,
+ 0x0029, 0x0027, 0x0025, 0x0023, 0x0021, 0x001f,
+ 0x001d, 0x001a, 0x0018, 0x0015, 0x0012, 0x000e,
+ 0x000b, 0x0007, 0x0002, 0x00fd,
+};
+
+static const u16 b43_httab_0x1b[] = {
+ 0x0055, 0x0054, 0x0054, 0x0053, 0x0052, 0x0052,
+ 0x0051, 0x0051, 0x0050, 0x004f, 0x004f, 0x004e,
+ 0x004e, 0x004d, 0x004c, 0x004c, 0x004b, 0x004a,
+ 0x0049, 0x0049, 0x0048, 0x0047, 0x0046, 0x0046,
+ 0x0045, 0x0044, 0x0043, 0x0042, 0x0041, 0x0040,
+ 0x0040, 0x003f, 0x003e, 0x003d, 0x003c, 0x003a,
+ 0x0039, 0x0038, 0x0037, 0x0036, 0x0035, 0x0033,
+ 0x0032, 0x0031, 0x002f, 0x002e, 0x002c, 0x002b,
+ 0x0029, 0x0027, 0x0025, 0x0023, 0x0021, 0x001f,
+ 0x001d, 0x001a, 0x0018, 0x0015, 0x0012, 0x000e,
+ 0x000b, 0x0007, 0x0002, 0x00fd,
+};
+
+static const u16 b43_httab_0x1c[] = {
+ 0x0055, 0x0054, 0x0054, 0x0053, 0x0052, 0x0052,
+ 0x0051, 0x0051, 0x0050, 0x004f, 0x004f, 0x004e,
+ 0x004e, 0x004d, 0x004c, 0x004c, 0x004b, 0x004a,
+ 0x0049, 0x0049, 0x0048, 0x0047, 0x0046, 0x0046,
+ 0x0045, 0x0044, 0x0043, 0x0042, 0x0041, 0x0040,
+ 0x0040, 0x003f, 0x003e, 0x003d, 0x003c, 0x003a,
+ 0x0039, 0x0038, 0x0037, 0x0036, 0x0035, 0x0033,
+ 0x0032, 0x0031, 0x002f, 0x002e, 0x002c, 0x002b,
+ 0x0029, 0x0027, 0x0025, 0x0023, 0x0021, 0x001f,
+ 0x001d, 0x001a, 0x0018, 0x0015, 0x0012, 0x000e,
+ 0x000b, 0x0007, 0x0002, 0x00fd,
+};
+
+static const u32 b43_httab_0x1a_0xc0[] = {
+ 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e,
+ 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037,
+ 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031,
+ 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040,
+ 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039,
+ 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033,
+ 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e,
+ 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037,
+ 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031,
+ 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c,
+ 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e,
+ 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037,
+ 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031,
+ 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c,
+ 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042,
+ 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b,
+ 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034,
+ 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f,
+ 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e,
+ 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037,
+ 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031,
+ 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c,
+ 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027,
+ 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023,
+ 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e,
+ 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037,
+ 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031,
+ 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c,
+ 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027,
+ 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023,
+ 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f,
+ 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c,
+};
+
+static const u32 b43_httab_0x1a_0x140[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_httab_0x1b_0x140[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u32 b43_httab_0x1c_0x140[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static const u16 b43_httab_0x1a_0x1c0[] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000,
+};
+
+static const u16 b43_httab_0x1b_0x1c0[] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000,
+};
+
+static const u16 b43_httab_0x1c_0x1c0[] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000,
+};
+
+static const u16 b43_httab_0x1a_0x240[] = {
+ 0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
+ 0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
+ 0x0036, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
+ 0x002a, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
+ 0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
+ 0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
+ 0x001e, 0x001e, 0x001e, 0x001e, 0x000e, 0x000e,
+ 0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
+ 0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
+ 0x000e, 0x000e, 0x000e, 0x000e, 0x01fc, 0x01fc,
+ 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
+ 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6,
+};
+
+static const u16 b43_httab_0x1b_0x240[] = {
+ 0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
+ 0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
+ 0x0036, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
+ 0x002a, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
+ 0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
+ 0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
+ 0x001e, 0x001e, 0x001e, 0x001e, 0x000e, 0x000e,
+ 0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
+ 0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
+ 0x000e, 0x000e, 0x000e, 0x000e, 0x01fc, 0x01fc,
+ 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
+ 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6,
+};
+
+static const u16 b43_httab_0x1c_0x240[] = {
+ 0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
+ 0x0036, 0x0036, 0x0036, 0x0036, 0x0036, 0x0036,
+ 0x0036, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
+ 0x002a, 0x002a, 0x002a, 0x002a, 0x002a, 0x002a,
+ 0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
+ 0x001e, 0x001e, 0x001e, 0x001e, 0x001e, 0x001e,
+ 0x001e, 0x001e, 0x001e, 0x001e, 0x000e, 0x000e,
+ 0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
+ 0x000e, 0x000e, 0x000e, 0x000e, 0x000e, 0x000e,
+ 0x000e, 0x000e, 0x000e, 0x000e, 0x01fc, 0x01fc,
+ 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
+ 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc, 0x01fc,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee, 0x01ee,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6, 0x01d6,
+ 0x01d6, 0x01d6,
+};
+
+static const u32 b43_httab_0x1f[] = {
+ 0x00000000, 0x00000000, 0x00016023, 0x00006028,
+ 0x00034036, 0x0003402e, 0x0007203c, 0x0006e037,
+ 0x00070030, 0x0009401f, 0x0009a00f, 0x000b600d,
+ 0x000c8007, 0x000ce007, 0x00101fff, 0x00121ff9,
+ 0x0012e004, 0x0014dffc, 0x0016dff6, 0x0018dfe9,
+ 0x001b3fe5, 0x001c5fd0, 0x001ddfc2, 0x001f1fb6,
+ 0x00207fa4, 0x00219f8f, 0x0022ff7d, 0x00247f6c,
+ 0x0024df5b, 0x00267f4b, 0x0027df3b, 0x0029bf3b,
+ 0x002b5f2f, 0x002d3f2e, 0x002f5f2a, 0x002fff15,
+ 0x00315f0b, 0x0032defa, 0x0033beeb, 0x0034fed9,
+ 0x00353ec5, 0x00361eb0, 0x00363e9b, 0x0036be87,
+ 0x0036be70, 0x0038fe67, 0x0044beb2, 0x00513ef3,
+ 0x00595f11, 0x00669f3d, 0x0078dfdf, 0x00a143aa,
+ 0x01642fff, 0x0162afff, 0x01620fff, 0x0160cfff,
+ 0x015f0fff, 0x015dafff, 0x015bcfff, 0x015bcfff,
+ 0x015b4fff, 0x015acfff, 0x01590fff, 0x0156cfff,
+};
+
+static const u32 b43_httab_0x21[] = {
+ 0x00000000, 0x00000000, 0x00016023, 0x00006028,
+ 0x00034036, 0x0003402e, 0x0007203c, 0x0006e037,
+ 0x00070030, 0x0009401f, 0x0009a00f, 0x000b600d,
+ 0x000c8007, 0x000ce007, 0x00101fff, 0x00121ff9,
+ 0x0012e004, 0x0014dffc, 0x0016dff6, 0x0018dfe9,
+ 0x001b3fe5, 0x001c5fd0, 0x001ddfc2, 0x001f1fb6,
+ 0x00207fa4, 0x00219f8f, 0x0022ff7d, 0x00247f6c,
+ 0x0024df5b, 0x00267f4b, 0x0027df3b, 0x0029bf3b,
+ 0x002b5f2f, 0x002d3f2e, 0x002f5f2a, 0x002fff15,
+ 0x00315f0b, 0x0032defa, 0x0033beeb, 0x0034fed9,
+ 0x00353ec5, 0x00361eb0, 0x00363e9b, 0x0036be87,
+ 0x0036be70, 0x0038fe67, 0x0044beb2, 0x00513ef3,
+ 0x00595f11, 0x00669f3d, 0x0078dfdf, 0x00a143aa,
+ 0x01642fff, 0x0162afff, 0x01620fff, 0x0160cfff,
+ 0x015f0fff, 0x015dafff, 0x015bcfff, 0x015bcfff,
+ 0x015b4fff, 0x015acfff, 0x01590fff, 0x0156cfff,
+};
+
+static const u32 b43_httab_0x23[] = {
+ 0x00000000, 0x00000000, 0x00016023, 0x00006028,
+ 0x00034036, 0x0003402e, 0x0007203c, 0x0006e037,
+ 0x00070030, 0x0009401f, 0x0009a00f, 0x000b600d,
+ 0x000c8007, 0x000ce007, 0x00101fff, 0x00121ff9,
+ 0x0012e004, 0x0014dffc, 0x0016dff6, 0x0018dfe9,
+ 0x001b3fe5, 0x001c5fd0, 0x001ddfc2, 0x001f1fb6,
+ 0x00207fa4, 0x00219f8f, 0x0022ff7d, 0x00247f6c,
+ 0x0024df5b, 0x00267f4b, 0x0027df3b, 0x0029bf3b,
+ 0x002b5f2f, 0x002d3f2e, 0x002f5f2a, 0x002fff15,
+ 0x00315f0b, 0x0032defa, 0x0033beeb, 0x0034fed9,
+ 0x00353ec5, 0x00361eb0, 0x00363e9b, 0x0036be87,
+ 0x0036be70, 0x0038fe67, 0x0044beb2, 0x00513ef3,
+ 0x00595f11, 0x00669f3d, 0x0078dfdf, 0x00a143aa,
+ 0x01642fff, 0x0162afff, 0x01620fff, 0x0160cfff,
+ 0x015f0fff, 0x015dafff, 0x015bcfff, 0x015bcfff,
+ 0x015b4fff, 0x015acfff, 0x01590fff, 0x0156cfff,
+};
+
+static const u32 b43_httab_0x20[] = {
+ 0x0b5e002d, 0x0ae2002f, 0x0a3b0032, 0x09a70035,
+ 0x09220038, 0x08ab003b, 0x081f003f, 0x07a20043,
+ 0x07340047, 0x06d2004b, 0x067a004f, 0x06170054,
+ 0x05bf0059, 0x0571005e, 0x051e0064, 0x04d3006a,
+ 0x04910070, 0x044c0077, 0x040f007e, 0x03d90085,
+ 0x03a1008d, 0x036f0095, 0x033d009e, 0x030b00a8,
+ 0x02e000b2, 0x02b900bc, 0x029200c7, 0x026d00d3,
+ 0x024900e0, 0x022900ed, 0x020a00fb, 0x01ec010a,
+ 0x01d20119, 0x01b7012a, 0x019e013c, 0x0188014e,
+ 0x01720162, 0x015d0177, 0x0149018e, 0x013701a5,
+ 0x012601be, 0x011501d8, 0x010601f4, 0x00f70212,
+ 0x00e90231, 0x00dc0253, 0x00d00276, 0x00c4029b,
+ 0x00b902c3, 0x00af02ed, 0x00a50319, 0x009c0348,
+ 0x0093037a, 0x008b03af, 0x008303e6, 0x007c0422,
+ 0x00750460, 0x006e04a3, 0x006804e9, 0x00620533,
+ 0x005d0582, 0x005805d6, 0x0053062e, 0x004e068c,
+};
+
+static const u32 b43_httab_0x22[] = {
+ 0x0b5e002d, 0x0ae2002f, 0x0a3b0032, 0x09a70035,
+ 0x09220038, 0x08ab003b, 0x081f003f, 0x07a20043,
+ 0x07340047, 0x06d2004b, 0x067a004f, 0x06170054,
+ 0x05bf0059, 0x0571005e, 0x051e0064, 0x04d3006a,
+ 0x04910070, 0x044c0077, 0x040f007e, 0x03d90085,
+ 0x03a1008d, 0x036f0095, 0x033d009e, 0x030b00a8,
+ 0x02e000b2, 0x02b900bc, 0x029200c7, 0x026d00d3,
+ 0x024900e0, 0x022900ed, 0x020a00fb, 0x01ec010a,
+ 0x01d20119, 0x01b7012a, 0x019e013c, 0x0188014e,
+ 0x01720162, 0x015d0177, 0x0149018e, 0x013701a5,
+ 0x012601be, 0x011501d8, 0x010601f4, 0x00f70212,
+ 0x00e90231, 0x00dc0253, 0x00d00276, 0x00c4029b,
+ 0x00b902c3, 0x00af02ed, 0x00a50319, 0x009c0348,
+ 0x0093037a, 0x008b03af, 0x008303e6, 0x007c0422,
+ 0x00750460, 0x006e04a3, 0x006804e9, 0x00620533,
+ 0x005d0582, 0x005805d6, 0x0053062e, 0x004e068c,
+};
+
+static const u32 b43_httab_0x24[] = {
+ 0x0b5e002d, 0x0ae2002f, 0x0a3b0032, 0x09a70035,
+ 0x09220038, 0x08ab003b, 0x081f003f, 0x07a20043,
+ 0x07340047, 0x06d2004b, 0x067a004f, 0x06170054,
+ 0x05bf0059, 0x0571005e, 0x051e0064, 0x04d3006a,
+ 0x04910070, 0x044c0077, 0x040f007e, 0x03d90085,
+ 0x03a1008d, 0x036f0095, 0x033d009e, 0x030b00a8,
+ 0x02e000b2, 0x02b900bc, 0x029200c7, 0x026d00d3,
+ 0x024900e0, 0x022900ed, 0x020a00fb, 0x01ec010a,
+ 0x01d20119, 0x01b7012a, 0x019e013c, 0x0188014e,
+ 0x01720162, 0x015d0177, 0x0149018e, 0x013701a5,
+ 0x012601be, 0x011501d8, 0x010601f4, 0x00f70212,
+ 0x00e90231, 0x00dc0253, 0x00d00276, 0x00c4029b,
+ 0x00b902c3, 0x00af02ed, 0x00a50319, 0x009c0348,
+ 0x0093037a, 0x008b03af, 0x008303e6, 0x007c0422,
+ 0x00750460, 0x006e04a3, 0x006804e9, 0x00620533,
+ 0x005d0582, 0x005805d6, 0x0053062e, 0x004e068c,
+};
+
+/**************************************************
+ * R/W ops.
+ **************************************************/
+
+u32 b43_httab_read(struct b43_wldev *dev, u32 offset)
+{
+ u32 type, value;
+
+ type = offset & B43_HTTAB_TYPEMASK;
+ offset &= ~B43_HTTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ switch (type) {
+ case B43_HTTAB_8BIT:
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO) & 0xFF;
+ break;
+ case B43_HTTAB_16BIT:
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO);
+ break;
+ case B43_HTTAB_32BIT:
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_PHY_HT_TABLE_DATAHI);
+ value <<= 16;
+ value |= b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO);
+ break;
+ default:
+ B43_WARN_ON(1);
+ value = 0;
+ }
+
+ return value;
+}
+
+void b43_httab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data)
+{
+ u32 type;
+ u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_HTTAB_TYPEMASK;
+ offset &= ~B43_HTTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_HTTAB_8BIT:
+ *data = b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO) & 0xFF;
+ data++;
+ break;
+ case B43_HTTAB_16BIT:
+ *((u16 *)data) = b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO);
+ data += 2;
+ break;
+ case B43_HTTAB_32BIT:
+ *((u32 *)data) = b43_phy_read(dev, B43_PHY_HT_TABLE_DATAHI);
+ *((u32 *)data) <<= 16;
+ *((u32 *)data) |= b43_phy_read(dev, B43_PHY_HT_TABLE_DATALO);
+ data += 4;
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
+void b43_httab_write(struct b43_wldev *dev, u32 offset, u32 value)
+{
+ u32 type;
+
+ type = offset & B43_HTTAB_TYPEMASK;
+ offset &= 0xFFFF;
+
+ switch (type) {
+ case B43_HTTAB_8BIT:
+ B43_WARN_ON(value & ~0xFF);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
+ break;
+ case B43_HTTAB_16BIT:
+ B43_WARN_ON(value & ~0xFFFF);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
+ break;
+ case B43_HTTAB_32BIT:
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATAHI, value >> 16);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value & 0xFFFF);
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+
+ return;
+}
+
+void b43_httab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data)
+{
+ u32 type, value;
+ const u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_HTTAB_TYPEMASK;
+ offset &= ~B43_HTTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_HTTAB_8BIT:
+ value = *data;
+ data++;
+ B43_WARN_ON(value & ~0xFF);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
+ break;
+ case B43_HTTAB_16BIT:
+ value = *((u16 *)data);
+ data += 2;
+ B43_WARN_ON(value & ~0xFFFF);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, value);
+ break;
+ case B43_HTTAB_32BIT:
+ value = *((u32 *)data);
+ data += 4;
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATAHI, value >> 16);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO,
+ value & 0xFFFF);
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
+/**************************************************
+ * Tables ops.
+ **************************************************/
+
+#define httab_upload(dev, offset, data) do { \
+ b43_httab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \
+ } while (0)
+void b43_phy_ht_tables_init(struct b43_wldev *dev)
+{
+ httab_upload(dev, B43_HTTAB16(0x12, 0), b43_httab_0x12);
+ httab_upload(dev, B43_HTTAB16(0x27, 0), b43_httab_0x27);
+ httab_upload(dev, B43_HTTAB16(0x26, 0), b43_httab_0x26);
+ httab_upload(dev, B43_HTTAB32(0x25, 0), b43_httab_0x25);
+ httab_upload(dev, B43_HTTAB32(0x2f, 0), b43_httab_0x2f);
+ httab_upload(dev, B43_HTTAB16(0x1a, 0), b43_httab_0x1a);
+ httab_upload(dev, B43_HTTAB16(0x1b, 0), b43_httab_0x1b);
+ httab_upload(dev, B43_HTTAB16(0x1c, 0), b43_httab_0x1c);
+ httab_upload(dev, B43_HTTAB32(0x1a, 0x0c0), b43_httab_0x1a_0xc0);
+ httab_upload(dev, B43_HTTAB32(0x1a, 0x140), b43_httab_0x1a_0x140);
+ httab_upload(dev, B43_HTTAB32(0x1b, 0x140), b43_httab_0x1b_0x140);
+ httab_upload(dev, B43_HTTAB32(0x1c, 0x140), b43_httab_0x1c_0x140);
+ httab_upload(dev, B43_HTTAB16(0x1a, 0x1c0), b43_httab_0x1a_0x1c0);
+ httab_upload(dev, B43_HTTAB16(0x1b, 0x1c0), b43_httab_0x1b_0x1c0);
+ httab_upload(dev, B43_HTTAB16(0x1c, 0x1c0), b43_httab_0x1c_0x1c0);
+ httab_upload(dev, B43_HTTAB16(0x1a, 0x240), b43_httab_0x1a_0x240);
+ httab_upload(dev, B43_HTTAB16(0x1b, 0x240), b43_httab_0x1b_0x240);
+ httab_upload(dev, B43_HTTAB16(0x1c, 0x240), b43_httab_0x1c_0x240);
+ httab_upload(dev, B43_HTTAB32(0x1f, 0), b43_httab_0x1f);
+ httab_upload(dev, B43_HTTAB32(0x21, 0), b43_httab_0x21);
+ httab_upload(dev, B43_HTTAB32(0x23, 0), b43_httab_0x23);
+ httab_upload(dev, B43_HTTAB32(0x20, 0), b43_httab_0x20);
+ httab_upload(dev, B43_HTTAB32(0x22, 0), b43_httab_0x22);
+ httab_upload(dev, B43_HTTAB32(0x24, 0), b43_httab_0x24);
+}
diff --git a/drivers/net/wireless/b43/tables_phy_ht.h b/drivers/net/wireless/b43/tables_phy_ht.h
new file mode 100644
index 00000000000..ea3be382c89
--- /dev/null
+++ b/drivers/net/wireless/b43/tables_phy_ht.h
@@ -0,0 +1,22 @@
+#ifndef B43_TABLES_PHY_HT_H_
+#define B43_TABLES_PHY_HT_H_
+
+/* The HT-PHY tables. */
+#define B43_HTTAB_TYPEMASK 0xF0000000
+#define B43_HTTAB_8BIT 0x10000000
+#define B43_HTTAB_16BIT 0x20000000
+#define B43_HTTAB_32BIT 0x30000000
+#define B43_HTTAB8(table, offset) (((table) << 10) | (offset) | B43_HTTAB_8BIT)
+#define B43_HTTAB16(table, offset) (((table) << 10) | (offset) | B43_HTTAB_16BIT)
+#define B43_HTTAB32(table, offset) (((table) << 10) | (offset) | B43_HTTAB_32BIT)
+
+u32 b43_httab_read(struct b43_wldev *dev, u32 offset);
+void b43_httab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data);
+void b43_httab_write(struct b43_wldev *dev, u32 offset, u32 value);
+void b43_httab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data);
+
+void b43_phy_ht_tables_init(struct b43_wldev *dev);
+
+#endif /* B43_TABLES_PHY_HT_H_ */
diff --git a/drivers/net/wireless/b43/tables_phy_lcn.c b/drivers/net/wireless/b43/tables_phy_lcn.c
new file mode 100644
index 00000000000..40c1d0915dd
--- /dev/null
+++ b/drivers/net/wireless/b43/tables_phy_lcn.c
@@ -0,0 +1,34 @@
+/*
+
+ Broadcom B43 wireless driver
+ IEEE 802.11n LCN-PHY data tables
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; see the file COPYING. If not, write to
+ the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
+ Boston, MA 02110-1301, USA.
+
+*/
+
+#include "b43.h"
+#include "tables_phy_lcn.h"
+#include "phy_common.h"
+#include "phy_lcn.h"
+
+/**************************************************
+ * Tables ops.
+ **************************************************/
+
+void b43_phy_lcn_tables_init(struct b43_wldev *dev)
+{
+}
diff --git a/drivers/net/wireless/b43/tables_phy_lcn.h b/drivers/net/wireless/b43/tables_phy_lcn.h
new file mode 100644
index 00000000000..5e31b15b81e
--- /dev/null
+++ b/drivers/net/wireless/b43/tables_phy_lcn.h
@@ -0,0 +1,6 @@
+#ifndef B43_TABLES_PHY_LCN_H_
+#define B43_TABLES_PHY_LCN_H_
+
+void b43_phy_lcn_tables_init(struct b43_wldev *dev);
+
+#endif /* B43_TABLES_PHY_LCN_H_ */
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index 8f4db448ec3..9b1a038be08 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -5,7 +5,7 @@
PHY workarounds.
Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (c) 2005-2007 Michael Buesch <mbuesch@freenet.de>
+ Copyright (c) 2005-2007 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -458,17 +458,15 @@ static void b43_wa_rssi_adc(struct b43_wldev *dev)
static void b43_wa_boards_a(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
-
- if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM &&
- bus->boardinfo.type == SSB_BOARD_BU4306 &&
- bus->boardinfo.rev < 0x30) {
+ if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
+ dev->dev->board_type == SSB_BOARD_BU4306 &&
+ dev->dev->board_rev < 0x30) {
b43_phy_write(dev, 0x0010, 0xE000);
b43_phy_write(dev, 0x0013, 0x0140);
b43_phy_write(dev, 0x0014, 0x0280);
} else {
- if (bus->boardinfo.type == SSB_BOARD_MP4318 &&
- bus->boardinfo.rev < 0x20) {
+ if (dev->dev->board_type == SSB_BOARD_MP4318 &&
+ dev->dev->board_rev < 0x20) {
b43_phy_write(dev, 0x0013, 0x0210);
b43_phy_write(dev, 0x0014, 0x0840);
} else {
@@ -486,19 +484,19 @@ static void b43_wa_boards_a(struct b43_wldev *dev)
static void b43_wa_boards_g(struct b43_wldev *dev)
{
- struct ssb_bus *bus = dev->sdev->bus;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
struct b43_phy *phy = &dev->phy;
- if (bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM ||
- bus->boardinfo.type != SSB_BOARD_BU4306 ||
- bus->boardinfo.rev != 0x17) {
+ if (dev->dev->board_vendor != SSB_BOARDVENDOR_BCM ||
+ dev->dev->board_type != SSB_BOARD_BU4306 ||
+ dev->dev->board_rev != 0x17) {
if (phy->rev < 2) {
b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX_R1, 1, 0x0002);
b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX_R1, 2, 0x0001);
} else {
b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 1, 0x0002);
b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 2, 0x0001);
- if ((bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
+ if ((sprom->boardflags_lo & B43_BFL_EXTLNA) &&
(phy->rev >= 7)) {
b43_phy_mask(dev, B43_PHY_EXTG(0x11), 0xF7FF);
b43_ofdmtab_write16(dev, B43_OFDMTAB_GAINX, 0x0020, 0x0001);
@@ -510,7 +508,7 @@ static void b43_wa_boards_g(struct b43_wldev *dev)
}
}
}
- if (bus->sprom.boardflags_lo & B43_BFL_FEM) {
+ if (sprom->boardflags_lo & B43_BFL_FEM) {
b43_phy_write(dev, B43_PHY_GTABCTL, 0x3120);
b43_phy_write(dev, B43_PHY_GTABDATA, 0xC480);
}
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index c8f99aebe01..b74f25ec1ab 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -6,7 +6,7 @@
Copyright (C) 2005 Martin Langer <martin-langer@gmx.de>
Copyright (C) 2005 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (C) 2005, 2006 Michael Buesch <mb@bu3sch.de>
+ Copyright (C) 2005, 2006 Michael Buesch <m@bues.ch>
Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (C) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
@@ -323,8 +323,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
/* we give the phase1key and iv16 here, the key is stored in
* shm. With that the hardware can do phase 2 and encryption.
*/
- ieee80211_get_tkip_key(info->control.hw_key, skb_frag,
- IEEE80211_TKIP_P1_KEY, (u8*)phase1key);
+ ieee80211_get_tkip_p1k(info->control.hw_key, skb_frag, phase1key);
/* phase1key is in host endian. Copy to little-endian txhdr->iv. */
for (i = 0; i < 5; i++) {
txhdr->iv[i * 2 + 0] = phase1key[i];
@@ -547,7 +546,7 @@ static s8 b43_rssi_postprocess(struct b43_wldev *dev,
else
tmp -= 3;
} else {
- if (dev->sdev->bus->sprom.
+ if (dev->dev->bus_sprom->
boardflags_lo & B43_BFL_RSSI) {
if (in_rssi > 63)
in_rssi = 63;
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 23583be1ee0..a610a352102 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -8,7 +8,7 @@
#include <linux/stringify.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/io.h>
#include <linux/ssb/ssb.h>
@@ -532,6 +532,8 @@ struct b43legacy_dma {
struct b43legacy_dmaring *rx_ring0;
struct b43legacy_dmaring *rx_ring3; /* only on core.rev < 5 */
+
+ u32 translation; /* Routing bits */
};
/* Data structures for PIO transmission, per 80211 core. */
diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
index f232618f2cd..5e28ad0d6d1 100644
--- a/drivers/net/wireless/b43legacy/debugfs.c
+++ b/drivers/net/wireless/b43legacy/debugfs.c
@@ -4,7 +4,7 @@
debugfs driver debugging code
- Copyright (c) 2005-2007 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005-2007 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index e03e01d0bc3..5010c477abd 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -4,7 +4,7 @@
DMA ringbuffer and descriptor allocation/management
- Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
Some code in this file is derived from the b44.c driver
Copyright (C) 2002 David S. Miller
@@ -73,7 +73,7 @@ static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
>> SSB_DMA_TRANSLATION_SHIFT;
- addr |= ssb_dma_translation(ring->dev->dev);
+ addr |= ring->dev->dma.translation;
ctl = (bufsize - ring->frameoffset)
& B43legacy_DMA32_DCTL_BYTECNT;
if (slot == ring->nr_slots - 1)
@@ -175,7 +175,7 @@ static void op64_fill_descriptor(struct b43legacy_dmaring *ring,
addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
>> SSB_DMA_TRANSLATION_SHIFT;
- addrhi |= ssb_dma_translation(ring->dev->dev);
+ addrhi |= ring->dev->dma.translation;
if (slot == ring->nr_slots - 1)
ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND;
if (start)
@@ -709,7 +709,7 @@ static int dmacontroller_setup(struct b43legacy_dmaring *ring)
int err = 0;
u32 value;
u32 addrext;
- u32 trans = ssb_dma_translation(ring->dev->dev);
+ u32 trans = ring->dev->dma.translation;
if (ring->tx) {
if (ring->type == B43legacy_DMA_64BIT) {
@@ -817,14 +817,13 @@ static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
static void free_all_descbuffers(struct b43legacy_dmaring *ring)
{
- struct b43legacy_dmadesc_generic *desc;
struct b43legacy_dmadesc_meta *meta;
int i;
if (!ring->used_slots)
return;
for (i = 0; i < ring->nr_slots; i++) {
- desc = ring->ops->idx2desc(ring, i, &meta);
+ ring->ops->idx2desc(ring, i, &meta);
if (!meta->skb) {
B43legacy_WARN_ON(!ring->tx);
@@ -1094,6 +1093,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
return -EOPNOTSUPP;
#endif
}
+ dma->translation = ssb_dma_translation(dev->dev);
err = -ENOMEM;
/* setup TX DMA channels. */
@@ -1371,10 +1371,8 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb)
{
struct b43legacy_dmaring *ring;
- struct ieee80211_hdr *hdr;
int err = 0;
unsigned long flags;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
spin_lock_irqsave(&ring->lock, flags);
@@ -1401,8 +1399,6 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
* into the skb data or cb now. */
- hdr = NULL;
- info = NULL;
err = dma_tx_fragment(ring, &skb);
if (unlikely(err == -ENOKEY)) {
/* Drop this packet, as we don't have the encryption key
@@ -1435,7 +1431,6 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
{
const struct b43legacy_dma_ops *ops;
struct b43legacy_dmaring *ring;
- struct b43legacy_dmadesc_generic *desc;
struct b43legacy_dmadesc_meta *meta;
int retry_limit;
int slot;
@@ -1450,7 +1445,7 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
ops = ring->ops;
while (1) {
B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
- desc = ops->idx2desc(ring, slot, &meta);
+ ops->idx2desc(ring, slot, &meta);
if (meta->skb)
unmap_descbuffer(ring, meta->dmaaddr,
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index f89c3422628..686941c242f 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -5,7 +5,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/linkage.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "b43legacy.h"
diff --git a/drivers/net/wireless/b43legacy/ilt.c b/drivers/net/wireless/b43legacy/ilt.c
index a849078aea6..ee5682e5420 100644
--- a/drivers/net/wireless/b43legacy/ilt.c
+++ b/drivers/net/wireless/b43legacy/ilt.c
@@ -4,7 +4,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Stefano Brivio <stefano.brivio@polimi.it>
- Michael Buesch <mbuesch@freenet.de>
+ Michael Buesch <m@bues.ch>
Danny van Dyk <kugelfang@gentoo.org>
Andreas Jaggi <andreas.jaggi@waterwave.ch>
diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c
index 37e9be89356..2f1bfdc44f9 100644
--- a/drivers/net/wireless/b43legacy/leds.c
+++ b/drivers/net/wireless/b43legacy/leds.c
@@ -5,7 +5,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Copyright (c) 2005 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (c) 2005-2007 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005-2007 Michael Buesch <m@bues.ch>
Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1ab8861dd43..04c03b212a5 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -4,7 +4,7 @@
*
* Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>
* Copyright (c) 2005-2008 Stefano Brivio <stefano.brivio@polimi.it>
- * Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
+ * Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
* Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
* Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
* Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net>
@@ -1564,10 +1564,10 @@ static int b43legacy_request_firmware(struct b43legacy_wldev *dev)
struct b43legacy_firmware *fw = &dev->fw;
const u8 rev = dev->dev->id.revision;
const char *filename;
- u32 tmshigh;
int err;
- tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH);
+ /* do dummy read */
+ ssb_read32(dev->dev, SSB_TMSHIGH);
if (!fw->ucode) {
if (rev == 2)
filename = "ucode2";
@@ -2634,11 +2634,9 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
unsigned long flags;
unsigned int new_phymode = 0xFFFF;
int antenna_tx;
- int antenna_rx;
int err = 0;
antenna_tx = B43legacy_ANTENNA_DEFAULT;
- antenna_rx = B43legacy_ANTENNA_DEFAULT;
mutex_lock(&wl->mutex);
dev = wl->current_dev;
@@ -2775,14 +2773,12 @@ static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev;
- struct b43legacy_phy *phy;
unsigned long flags;
mutex_lock(&wl->mutex);
B43legacy_WARN_ON(wl->vif != vif);
dev = wl->current_dev;
- phy = &dev->phy;
/* Disable IRQs while reconfiguring the device.
* This makes it possible to drop the spinlock throughout
@@ -2974,7 +2970,7 @@ static int b43legacy_phy_versioning(struct b43legacy_wldev *dev)
break;
default:
unsupported = 1;
- };
+ }
if (unsupported) {
b43legacyerr(dev->wl, "FOUND UNSUPPORTED PHY "
"(Analog %u, Type %u, Revision %u)\n",
diff --git a/drivers/net/wireless/b43legacy/main.h b/drivers/net/wireless/b43legacy/main.h
index 1f0e2e379b0..b74a058d7ba 100644
--- a/drivers/net/wireless/b43legacy/main.h
+++ b/drivers/net/wireless/b43legacy/main.h
@@ -4,7 +4,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Copyright (c) 2005 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net>
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 28e477d0158..96faaef3661 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -4,7 +4,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Stefano Brivio <stefano.brivio@polimi.it>
- Michael Buesch <mbuesch@freenet.de>
+ Michael Buesch <m@bues.ch>
Danny van Dyk <kugelfang@gentoo.org>
Andreas Jaggi <andreas.jaggi@waterwave.ch>
Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net>
diff --git a/drivers/net/wireless/b43legacy/phy.h b/drivers/net/wireless/b43legacy/phy.h
index ecbe409f9a9..831a7a4760e 100644
--- a/drivers/net/wireless/b43legacy/phy.h
+++ b/drivers/net/wireless/b43legacy/phy.h
@@ -4,7 +4,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Stefano Brivio <stefano.brivio@polimi.it>
- Michael Buesch <mbuesch@freenet.de>
+ Michael Buesch <m@bues.ch>
Danny van Dyk <kugelfang@gentoo.org>
Andreas Jaggi <andreas.jaggi@waterwave.ch>
Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net>
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index b033b0ed4ca..192251adf98 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -4,7 +4,7 @@
PIO Transmission
- Copyright (c) 2005 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2005 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index 2df545cfad1..475eb14e665 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -4,7 +4,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Stefano Brivio <stefano.brivio@polimi.it>
- Michael Buesch <mbuesch@freenet.de>
+ Michael Buesch <m@bues.ch>
Danny van Dyk <kugelfang@gentoo.org>
Andreas Jaggi <andreas.jaggi@waterwave.ch>
Copyright (c) 2007 Larry Finger <Larry.Finger@lwfinger.net>
diff --git a/drivers/net/wireless/b43legacy/radio.h b/drivers/net/wireless/b43legacy/radio.h
index ec4de2811c5..bccb3d7da68 100644
--- a/drivers/net/wireless/b43legacy/radio.h
+++ b/drivers/net/wireless/b43legacy/radio.h
@@ -4,7 +4,7 @@
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Stefano Brivio <stefano.brivio@polimi.it>
- Michael Buesch <mbuesch@freenet.de>
+ Michael Buesch <m@bues.ch>
Danny van Dyk <kugelfang@gentoo.org>
Andreas Jaggi <andreas.jaggi@waterwave.ch>
diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c
index b90f223fb31..c4559bcbc70 100644
--- a/drivers/net/wireless/b43legacy/rfkill.c
+++ b/drivers/net/wireless/b43legacy/rfkill.c
@@ -3,7 +3,7 @@
Broadcom B43 wireless driver
RFKILL support
- Copyright (c) 2007 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2007 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/wireless/b43legacy/sysfs.c b/drivers/net/wireless/b43legacy/sysfs.c
index 56c384fa9b1..57f8b089767 100644
--- a/drivers/net/wireless/b43legacy/sysfs.c
+++ b/drivers/net/wireless/b43legacy/sysfs.c
@@ -4,7 +4,7 @@
SYSFS support routines
- Copyright (c) 2006 Michael Buesch <mb@bu3sch.de>
+ Copyright (c) 2006 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 3a95541708a..5188fab0b37 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -6,7 +6,7 @@
Copyright (C) 2005 Martin Langer <martin-langer@gmx.de>
Copyright (C) 2005 Stefano Brivio <stefano.brivio@polimi.it>
- Copyright (C) 2005, 2006 Michael Buesch <mb@bu3sch.de>
+ Copyright (C) 2005, 2006 Michael Buesch <m@bues.ch>
Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (C) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
Copyright (C) 2007 Larry Finger <Larry.Finger@lwfinger.net>
@@ -321,11 +321,9 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
struct ieee80211_hdr *hdr;
int rts_rate;
int rts_rate_fb;
- int rts_rate_ofdm;
int rts_rate_fb_ofdm;
rts_rate = ieee80211_get_rts_cts_rate(dev->wl->hw, info)->hw_value;
- rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate);
rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate);
rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb);
if (rts_rate_fb_ofdm)
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index c052a0d5cbd..5441ad19511 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -648,6 +648,8 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
0x74c5e40d),
PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil",
0x4b801a17),
+ PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.02",
+ 0x4b74baa0),
PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
0x7a954bd9, 0x74be00c6),
PCMCIA_DEVICE_PROD_ID123(
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index d5084829c9e..89a116fba1d 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -855,6 +855,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
iface = netdev_priv(dev);
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
/* kernel callbacks */
if (iface) {
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index 88dc6a52bdf..7bb0b4b3f2c 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -1,6 +1,7 @@
#ifndef HOSTAP_WLAN_H
#define HOSTAP_WLAN_H
+#include <linux/interrupt.h>
#include <linux/wireless.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 44307753587..3774dd03474 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -287,7 +287,7 @@ static const char *command_types[] = {
"unused", /* HOST_INTERRUPT_COALESCING */
"undefined",
"CARD_DISABLE_PHY_OFF",
- "MSDU_TX_RATES" "undefined",
+ "MSDU_TX_RATES",
"undefined",
"SET_STATION_STAT_BITS",
"CLEAR_STATIONS_STAT_BITS",
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index 91795b5a93c..ecb561d7a7a 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/pci.h>
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index e5ad76cd77d..32a9966c3bf 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -442,7 +442,7 @@ int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
* 802.11, but makes it easier to use different keys with
* stations that do not support WEP key mapping). */
- if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
+ if (is_unicast_ether_addr(hdr->addr1) || local->bcrx_sta_key)
(void)hostap_handle_sta_crypto(local, hdr, &crypt,
&sta);
#endif
@@ -772,7 +772,7 @@ int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
#ifdef NOT_YET
if (ieee->iw_mode == IW_MODE_MASTER && !wds && ieee->ap->bridge_packets) {
- if (dst[0] & 0x01) {
+ if (is_multicast_ether_addr(dst)) {
/* copy multicast frame both to the higher layers and
* to the wireless media */
ieee->ap->bridged_multicast++;
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
index d7bd6cf00a8..6623e505225 100644
--- a/drivers/net/wireless/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -30,6 +30,7 @@
******************************************************************************/
+#include <linux/hardirq.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
index d096dc28204..73fe3cdf796 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ b/drivers/net/wireless/iwlegacy/iwl-3945.c
@@ -408,7 +408,6 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
#endif
- iwl_legacy_recover_from_statistics(priv, pkt);
memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
}
@@ -1747,7 +1746,11 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
}
memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
return 0;
}
@@ -2640,7 +2643,6 @@ static struct iwl_lib_ops iwl3945_lib = {
.txq_free_tfd = iwl3945_hw_txq_free_tfd,
.txq_init = iwl3945_hw_tx_queue_init,
.load_ucode = iwl3945_load_bsm,
- .dump_nic_event_log = iwl3945_dump_nic_event_log,
.dump_nic_error_log = iwl3945_dump_nic_error_log,
.apm_ops = {
.init = iwl3945_apm_init,
@@ -2698,9 +2700,7 @@ static struct iwl_base_params iwl3945_base_params = {
.set_l0s = false,
.use_bsm = true,
.led_compensation = 64,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
- .max_event_log_size = 512,
};
static struct iwl_cfg iwl3945_bg_cfg = {
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
index a7a4739880d..2be6d9e3b01 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
@@ -694,47 +694,6 @@ void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
sizeof(struct iwl_rx_phy_res));
}
-static int iwl4965_get_single_channel_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- struct iwl_scan_channel *scan_ch)
-{
- const struct ieee80211_supported_band *sband;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added = 0;
- u16 channel = 0;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband) {
- IWL_ERR(priv, "invalid band\n");
- return added;
- }
-
- active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
- passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- channel = iwl_legacy_get_single_channel_number(priv, band);
- if (channel) {
- scan_ch->channel = cpu_to_le16(channel);
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
- added++;
- } else
- IWL_ERR(priv, "no valid channel found\n");
- return added;
-}
-
static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
struct ieee80211_vif *vif,
enum ieee80211_band band,
@@ -858,16 +817,13 @@ int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
if (iwl_legacy_is_any_associated(priv)) {
- u16 interval = 0;
+ u16 interval;
u32 extra;
u32 suspend_time = 100;
u32 scan_suspend_time = 100;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- if (priv->is_internal_short_scan)
- interval = 0;
- else
- interval = vif->bss_conf.beacon_int;
+ interval = vif->bss_conf.beacon_int;
scan->suspend_time = 0;
scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -882,9 +838,7 @@ int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan_suspend_time, interval);
}
- if (priv->is_internal_short_scan) {
- IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
- } else if (priv->scan_request->n_ssids) {
+ if (priv->scan_request->n_ssids) {
int i, p = 0;
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
for (i = 0; i < priv->scan_request->n_ssids; i++) {
@@ -981,38 +935,21 @@ int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
scan->rx_chain = cpu_to_le16(rx_chain);
- if (!priv->is_internal_short_scan) {
- cmd_len = iwl_legacy_fill_probe_req(priv,
+
+ cmd_len = iwl_legacy_fill_probe_req(priv,
(struct ieee80211_mgmt *)scan->data,
vif->addr,
priv->scan_request->ie,
priv->scan_request->ie_len,
IWL_MAX_SCAN_SIZE - sizeof(*scan));
- } else {
- /* use bcast addr, will not be transmitted but must be valid */
- cmd_len = iwl_legacy_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- iwlegacy_bcast_addr, NULL, 0,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
-
- }
scan->tx_cmd.len = cpu_to_le16(cmd_len);
scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
RXON_FILTER_BCON_AWARE_MSK);
- if (priv->is_internal_short_scan) {
- scan->channel_count =
- iwl4965_get_single_channel_for_scan(priv, vif, band,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
- } else {
- scan->channel_count =
- iwl4965_get_channels_for_scan(priv, vif, band,
- is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
- }
+ scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band,
+ is_active, n_probes,
+ (void *)&scan->data[cmd_len]);
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
return -EIO;
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
index 24d149909ba..9b65153bdd0 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
@@ -2275,6 +2275,9 @@ iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
if (rate_control_send_low(sta, priv_sta, txrc))
return;
+ if (!lq_sta)
+ return;
+
rate_idx = lq_sta->last_txrate_idx;
if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
index b9fa2f6411a..2b144bbfc3c 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
@@ -151,81 +151,6 @@ static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
#define REG_RECALIB_PERIOD (60)
-/**
- * iwl4965_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-bool iwl4965_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt)
-{
- bool rc = true;
- int combined_plcp_delta;
- unsigned int plcp_msec;
- unsigned long plcp_received_jiffies;
-
- if (priv->cfg->base_params->plcp_delta_threshold ==
- IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
- IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
- return rc;
- }
-
- /*
- * check for plcp_err and trigger radio reset if it exceeds
- * the plcp error threshold plcp_delta.
- */
- plcp_received_jiffies = jiffies;
- plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
- (long) priv->plcp_jiffies);
- priv->plcp_jiffies = plcp_received_jiffies;
- /*
- * check to make sure plcp_msec is not 0 to prevent division
- * by zero.
- */
- if (plcp_msec) {
- struct statistics_rx_phy *ofdm;
- struct statistics_rx_ht_phy *ofdm_ht;
-
- ofdm = &pkt->u.stats.rx.ofdm;
- ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
- combined_plcp_delta =
- (le32_to_cpu(ofdm->plcp_err) -
- le32_to_cpu(priv->_4965.statistics.
- rx.ofdm.plcp_err)) +
- (le32_to_cpu(ofdm_ht->plcp_err) -
- le32_to_cpu(priv->_4965.statistics.
- rx.ofdm_ht.plcp_err));
-
- if ((combined_plcp_delta > 0) &&
- ((combined_plcp_delta * 100) / plcp_msec) >
- priv->cfg->base_params->plcp_delta_threshold) {
- /*
- * if plcp_err exceed the threshold,
- * the following data is printed in csv format:
- * Text: plcp_err exceeded %d,
- * Received ofdm.plcp_err,
- * Current ofdm.plcp_err,
- * Received ofdm_ht.plcp_err,
- * Current ofdm_ht.plcp_err,
- * combined_plcp_delta,
- * plcp_msec
- */
- IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
- "%u, %u, %u, %u, %d, %u mSecs\n",
- priv->cfg->base_params->plcp_delta_threshold,
- le32_to_cpu(ofdm->plcp_err),
- le32_to_cpu(ofdm->plcp_err),
- le32_to_cpu(ofdm_ht->plcp_err),
- le32_to_cpu(ofdm_ht->plcp_err),
- combined_plcp_delta, plcp_msec);
-
- rc = false;
- }
- }
- return rc;
-}
-
void iwl4965_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -248,8 +173,7 @@ void iwl4965_rx_statistics(struct iwl_priv *priv,
iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
#endif
- iwl_legacy_recover_from_statistics(priv, pkt);
-
+ /* TODO: reading some of statistics is unneeded */
memcpy(&priv->_4965.statistics, &pkt->u.stats,
sizeof(priv->_4965.statistics));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
index 79ac081832f..ac4f64de136 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
@@ -240,8 +240,7 @@ static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
case WLAN_CIPHER_SUITE_TKIP:
tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
- ieee80211_get_tkip_key(keyconf, skb_frag,
- IEEE80211_TKIP_P2_KEY, tx_cmd->key);
+ ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
break;
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index facc94e74b0..ecdc6e55742 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -496,7 +496,7 @@ static s32 iwl4965_get_tx_atten_grp(u16 channel)
channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
return CALIB_CH_GROUP_4;
- return -1;
+ return -EINVAL;
}
static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
@@ -915,7 +915,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
if (txatten_grp < 0) {
IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
channel);
- return -EINVAL;
+ return txatten_grp;
}
IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
@@ -1185,8 +1185,6 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
sizeof(rxon_assoc), &rxon_assoc, NULL);
- if (ret)
- return ret;
return ret;
}
@@ -1237,7 +1235,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
iwl_legacy_print_rx_config_cmd(priv, ctx);
- goto set_tx_power;
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
+ return 0;
}
/* If we are currently associated and the new config requires
@@ -1317,7 +1320,6 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
iwl4965_init_sensitivity(priv);
-set_tx_power:
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
@@ -2071,7 +2073,6 @@ static struct iwl_lib_ops iwl4965_lib = {
.is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
.init_alive_start = iwl4965_init_alive_start,
.load_ucode = iwl4965_load_bsm,
- .dump_nic_event_log = iwl4965_dump_nic_event_log,
.dump_nic_error_log = iwl4965_dump_nic_error_log,
.dump_fh = iwl4965_dump_fh,
.set_channel_switch = iwl4965_hw_channel_switch,
@@ -2102,7 +2103,6 @@ static struct iwl_lib_ops iwl4965_lib = {
.tx_stats_read = iwl4965_ucode_tx_stats_read,
.general_stats_read = iwl4965_ucode_general_stats_read,
},
- .check_plcp_health = iwl4965_good_plcp_health,
};
static const struct iwl_legacy_ops iwl4965_legacy_ops = {
@@ -2152,10 +2152,8 @@ static struct iwl_base_params iwl4965_base_params = {
.use_bsm = true,
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
- .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.temperature_kelvin = true,
- .max_event_log_size = 512,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h
index 17a1d504348..89904054473 100644
--- a/drivers/net/wireless/iwlegacy/iwl-commands.h
+++ b/drivers/net/wireless/iwlegacy/iwl-commands.h
@@ -2297,14 +2297,7 @@ struct iwl_spectrum_notification {
#define IWL_POWER_VEC_SIZE 5
#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
-#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
-#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
-#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
-#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
-#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
-#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
-#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
struct iwl3945_powertable_cmd {
__le16 flags;
@@ -2624,8 +2617,8 @@ struct iwl_scanstart_notification {
__le32 status;
} __packed;
-#define SCAN_OWNER_STATUS 0x1;
-#define MEASURE_OWNER_STATUS 0x2;
+#define SCAN_OWNER_STATUS 0x1
+#define MEASURE_OWNER_STATUS 0x2
#define IWL_PROBE_STATUS_OK 0
#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 3be76bd5499..35cd2537e7f 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -931,7 +931,6 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
priv->cfg->ops->lib->dump_nic_error_log(priv);
if (priv->cfg->ops->lib->dump_fh)
priv->cfg->ops->lib->dump_fh(priv, NULL, false);
- priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
iwl_legacy_print_rx_config_cmd(priv,
@@ -1707,41 +1706,14 @@ iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
EXPORT_SYMBOL(iwl_legacy_update_stats);
#endif
-static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv)
-{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (!iwl_legacy_is_any_associated(priv)) {
- IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
- return;
- }
- /*
- * There is no easy and better way to force reset the radio,
- * the only known method is switching channel which will force to
- * reset and tune the radio.
- * Use internal short scan (single channel) operation to should
- * achieve this objective.
- * Driver should reset the radio when number of consecutive missed
- * beacon, or any other uCode error condition detected.
- */
- IWL_DEBUG_INFO(priv, "perform radio reset.\n");
- iwl_legacy_internal_short_hw_scan(priv);
-}
-
-
-int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
+int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
{
struct iwl_force_reset *force_reset;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return -EINVAL;
- if (mode >= IWL_MAX_FORCE_RESET) {
- IWL_DEBUG_INFO(priv, "invalid reset request.\n");
- return -EINVAL;
- }
- force_reset = &priv->force_reset[mode];
+ force_reset = &priv->force_reset;
force_reset->reset_request_count++;
if (!external) {
if (force_reset->last_force_reset_jiffies &&
@@ -1754,37 +1726,34 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external)
}
force_reset->reset_success_count++;
force_reset->last_force_reset_jiffies = jiffies;
- IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
- switch (mode) {
- case IWL_RF_RESET:
- _iwl_legacy_force_rf_reset(priv);
- break;
- case IWL_FW_RESET:
- /*
- * if the request is from external(ex: debugfs),
- * then always perform the request in regardless the module
- * parameter setting
- * if the request is from internal (uCode error or driver
- * detect failure), then fw_restart module parameter
- * need to be check before performing firmware reload
- */
- if (!external && !priv->cfg->mod_params->restart_fw) {
- IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
- "module parameter setting\n");
- break;
- }
- IWL_ERR(priv, "On demand firmware reload\n");
- /* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
- wake_up_interruptible(&priv->wait_command_queue);
- /*
- * Keep the restart process from trying to send host
- * commands by clearing the INIT status bit
- */
- clear_bit(STATUS_READY, &priv->status);
- queue_work(priv->workqueue, &priv->restart);
- break;
+
+ /*
+ * if the request is from external(ex: debugfs),
+ * then always perform the request in regardless the module
+ * parameter setting
+ * if the request is from internal (uCode error or driver
+ * detect failure), then fw_restart module parameter
+ * need to be check before performing firmware reload
+ */
+
+ if (!external && !priv->cfg->mod_params->restart_fw) {
+ IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
+ "module parameter setting\n");
+ return 0;
}
+
+ IWL_ERR(priv, "On demand firmware reload\n");
+
+ /* Set the FW error flag -- cleared on iwl_down */
+ set_bit(STATUS_FW_ERROR, &priv->status);
+ wake_up_interruptible(&priv->wait_command_queue);
+ /*
+ * Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit
+ */
+ clear_bit(STATUS_READY, &priv->status);
+ queue_work(priv->workqueue, &priv->restart);
+
return 0;
}
@@ -1879,7 +1848,7 @@ static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
if (time_after(jiffies, timeout)) {
IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
q->id, priv->cfg->base_params->wd_timeout);
- ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false);
+ ret = iwl_legacy_force_reset(priv, false);
return (ret == -EAGAIN) ? 0 : 1;
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
index c5fbda0760d..84da79376ef 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -143,8 +143,7 @@ struct iwl_lib_ops {
int (*is_valid_rtc_data_addr)(u32 addr);
/* 1st ucode load */
int (*load_ucode)(struct iwl_priv *priv);
- int (*dump_nic_event_log)(struct iwl_priv *priv,
- bool full_log, char **buf, bool display);
+
void (*dump_nic_error_log)(struct iwl_priv *priv);
int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
int (*set_channel_switch)(struct iwl_priv *priv,
@@ -161,9 +160,6 @@ struct iwl_lib_ops {
/* temperature */
struct iwl_temp_ops temp_ops;
- /* check for plcp health */
- bool (*check_plcp_health)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
struct iwl_debugfs_ops debugfs_ops;
@@ -207,11 +203,8 @@ struct iwl_mod_params {
* to the deviation to achieve the desired led frequency.
* The detail algorithm is described in iwl-led.c
* @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @plcp_delta_threshold: plcp error rate threshold used to trigger
- * radio tuning when there is a high receiving plcp error rate
* @wd_timeout: TX queues watchdog timeout
* @temperature_kelvin: temperature report by uCode in kelvin
- * @max_event_log_size: size of event log buffer size for ucode event logging
* @ucode_tracing: support ucode continuous tracing
* @sensitivity_calib_by_driver: driver has the capability to perform
* sensitivity calibration operation
@@ -229,10 +222,8 @@ struct iwl_base_params {
u16 led_compensation;
int chain_noise_num_beacons;
- u8 plcp_delta_threshold;
unsigned int wd_timeout;
bool temperature_kelvin;
- u32 max_event_log_size;
const bool ucode_tracing;
const bool sensitivity_calib_by_driver;
const bool chain_noise_calib_by_driver;
@@ -441,7 +432,7 @@ int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
-int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external);
+int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
struct ieee80211_mgmt *frame,
const u8 *ta, const u8 *ie, int ie_len, int left);
@@ -493,7 +484,7 @@ static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
{
int pos;
u16 pci_lnk_ctl;
- pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(priv->pci_dev);
pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
return pci_lnk_ctl;
}
@@ -521,8 +512,6 @@ extern const struct dev_pm_ops iwl_legacy_pm_ops;
* Error Handling Debugging
******************************************************/
void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
-int iwl4965_dump_nic_event_log(struct iwl_priv *priv,
- bool full_log, char **buf, bool display);
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
index 2d32438b4cb..996996a7165 100644
--- a/drivers/net/wireless/iwlegacy/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
@@ -391,48 +391,6 @@ static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
return ret;
}
-static ssize_t iwl_legacy_dbgfs_log_event_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- ssize_t ret = -ENOMEM;
-
- ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
- priv, true, &buf, true);
- if (buf) {
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- }
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_log_event_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- u32 event_log_flag;
- char buf[8];
- int buf_size;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &event_log_flag) != 1)
- return -EFAULT;
- if (event_log_flag == 1)
- priv->cfg->ops->lib->dump_nic_event_log(priv, true,
- NULL, false);
-
- return count;
-}
-
-
-
static ssize_t
iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -706,7 +664,6 @@ static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
}
DEBUGFS_READ_WRITE_FILE_OPS(sram);
-DEBUGFS_READ_WRITE_FILE_OPS(log_event);
DEBUGFS_READ_FILE_OPS(nvm);
DEBUGFS_READ_FILE_OPS(stations);
DEBUGFS_READ_FILE_OPS(channels);
@@ -1098,56 +1055,6 @@ static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
return count;
}
-static ssize_t iwl_legacy_dbgfs_ucode_tracing_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[128];
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
- priv->event_log.ucode_trace ? "On" : "Off");
- pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
- priv->event_log.non_wraps_count);
- pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
- priv->event_log.wraps_once_count);
- pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
- priv->event_log.wraps_more_count);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_tracing_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int trace;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &trace) != 1)
- return -EFAULT;
-
- if (trace) {
- priv->event_log.ucode_trace = true;
- /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
- mod_timer(&priv->ucode_trace,
- jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
- } else {
- priv->event_log.ucode_trace = false;
- del_timer_sync(&priv->ucode_trace);
- }
-
- return count;
-}
-
static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
@@ -1236,72 +1143,31 @@ static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
return count;
}
-static ssize_t iwl_legacy_dbgfs_plcp_delta_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[12];
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
- priv->cfg->base_params->plcp_delta_threshold);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_plcp_delta_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int plcp;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &plcp) != 1)
- return -EINVAL;
- if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
- (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
- priv->cfg->base_params->plcp_delta_threshold =
- IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
- else
- priv->cfg->base_params->plcp_delta_threshold = plcp;
- return count;
-}
-
static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
struct iwl_priv *priv = file->private_data;
- int i, pos = 0;
+ int pos = 0;
char buf[300];
const size_t bufsz = sizeof(buf);
struct iwl_force_reset *force_reset;
- for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
- force_reset = &priv->force_reset[i];
- pos += scnprintf(buf + pos, bufsz - pos,
- "Force reset method %d\n", i);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request: %d\n",
- force_reset->reset_request_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request success: %d\n",
- force_reset->reset_success_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request reject: %d\n",
- force_reset->reset_reject_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\treset duration: %lu\n",
- force_reset->reset_duration);
- }
+ force_reset = &priv->force_reset;
+
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request: %d\n",
+ force_reset->reset_request_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request success: %d\n",
+ force_reset->reset_success_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request reject: %d\n",
+ force_reset->reset_reject_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\treset duration: %lu\n",
+ force_reset->reset_duration);
+
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -1309,25 +1175,11 @@ static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos) {
+ int ret;
struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int reset, ret;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &reset) != 1)
- return -EINVAL;
- switch (reset) {
- case IWL_RF_RESET:
- case IWL_FW_RESET:
- ret = iwl_legacy_force_reset(priv, reset, true);
- break;
- default:
- return -EINVAL;
- }
+ ret = iwl_legacy_force_reset(priv, true);
+
return ret ? ret : count;
}
@@ -1367,10 +1219,8 @@ DEBUGFS_READ_FILE_OPS(chain_noise);
DEBUGFS_READ_FILE_OPS(power_save_status);
DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
-DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
-DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
DEBUGFS_READ_FILE_OPS(rxon_flags);
DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
@@ -1403,7 +1253,6 @@ int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
@@ -1420,7 +1269,6 @@ int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
@@ -1430,8 +1278,6 @@ int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
if (priv->cfg->base_params->chain_noise_calib_by_driver)
DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
- if (priv->cfg->base_params->ucode_tracing)
- DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
index ea30122669e..9c786edf56f 100644
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -32,6 +32,7 @@
#ifndef __iwl_legacy_dev_h__
#define __iwl_legacy_dev_h__
+#include <linux/interrupt.h>
#include <linux/pci.h> /* for struct pci_device_id */
#include <linux/kernel.h>
#include <linux/leds.h>
@@ -855,32 +856,6 @@ struct traffic_stats {
};
/*
- * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
- * to perform continuous uCode event logging operation if enabled
- */
-#define UCODE_TRACE_PERIOD (100)
-
-/*
- * iwl_event_log: current uCode event log position
- *
- * @ucode_trace: enable/disable ucode continuous trace timer
- * @num_wraps: how many times the event buffer wraps
- * @next_entry: the entry just before the next one that uCode would fill
- * @non_wraps_count: counter for no wrap detected when dump ucode events
- * @wraps_once_count: counter for wrap once detected when dump ucode events
- * @wraps_more_count: counter for wrap more than once detected
- * when dump ucode events
- */
-struct iwl_event_log {
- bool ucode_trace;
- u32 num_wraps;
- u32 next_entry;
- int non_wraps_count;
- int wraps_once_count;
- int wraps_more_count;
-};
-
-/*
* host interrupt timeout value
* used with setting interrupt coalescing timer
* the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
@@ -895,18 +870,6 @@ struct iwl_event_log {
#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
-/*
- * This is the threshold value of plcp error rate per 100mSecs. It is
- * used to set and check for the validity of plcp_delta.
- */
-#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
-#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
-#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
-#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
-#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
-#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
-
-#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
/* TX queue watchdog timeouts in mSecs */
@@ -914,12 +877,6 @@ struct iwl_event_log {
#define IWL_LONG_WD_TIMEOUT (10000)
#define IWL_MAX_WD_TIMEOUT (120000)
-enum iwl_reset {
- IWL_RF_RESET = 0,
- IWL_FW_RESET,
- IWL_MAX_FORCE_RESET,
-};
-
struct iwl_force_reset {
int reset_request_count;
int reset_success_count;
@@ -1032,11 +989,8 @@ struct iwl_priv {
/* track IBSS manager (last beacon) status */
u32 ibss_manager;
- /* storing the jiffies when the plcp error rate is received */
- unsigned long plcp_jiffies;
-
/* force reset */
- struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
+ struct iwl_force_reset force_reset;
/* we allocate array of iwl_channel_info for NIC's valid channels.
* Access via channel # using indirect index array */
@@ -1057,7 +1011,6 @@ struct iwl_priv {
enum ieee80211_band scan_band;
struct cfg80211_scan_request *scan_request;
struct ieee80211_vif *scan_vif;
- bool is_internal_short_scan;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
u8 mgmt_tx_ant;
@@ -1212,12 +1165,6 @@ struct iwl_priv {
#endif
#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
struct {
- /*
- * reporting the number of tids has AGG on. 0 means
- * no AGGREGATION
- */
- u8 agg_tids_count;
-
struct iwl_rx_phy_res last_phy_res;
bool last_phy_res_valid;
@@ -1256,7 +1203,6 @@ struct iwl_priv {
struct iwl_rxon_context *beacon_ctx;
struct sk_buff *beacon_skb;
- struct work_struct start_internal_scan;
struct work_struct tx_flush;
struct tasklet_struct irq_tasklet;
@@ -1293,12 +1239,9 @@ struct iwl_priv {
u32 disable_tx_power_cal;
struct work_struct run_time_calib_work;
struct timer_list statistics_periodic;
- struct timer_list ucode_trace;
struct timer_list watchdog;
bool hw_ready;
- struct iwl_event_log event_log;
-
struct led_classdev led;
unsigned long blink_on, blink_off;
bool led_registered;
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
index 080b852b33b..acec99197ce 100644
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
@@ -38,8 +38,5 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event);
#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
index 9612aa0f6ec..a443725ba6b 100644
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
@@ -96,47 +96,6 @@ TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_legacy_ucode
-TRACE_EVENT(iwlwifi_legacy_dev_ucode_cont_event,
- TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
- TP_ARGS(priv, time, data, ev),
- TP_STRUCT__entry(
- PRIV_ENTRY
-
- __field(u32, time)
- __field(u32, data)
- __field(u32, ev)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->time = time;
- __entry->data = data;
- __entry->ev = ev;
- ),
- TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
- __entry->priv, __entry->time, __entry->data, __entry->ev)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_ucode_wrap_event,
- TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
- TP_ARGS(priv, wraps, n_entry, p_entry),
- TP_STRUCT__entry(
- PRIV_ENTRY
-
- __field(u32, wraps)
- __field(u32, n_entry)
- __field(u32, p_entry)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->wraps = wraps;
- __entry->n_entry = n_entry;
- __entry->p_entry = p_entry;
- ),
- TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
- __entry->priv, __entry->wraps, __entry->n_entry,
- __entry->p_entry)
-);
-
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi
@@ -242,25 +201,6 @@ TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
__entry->blink2, __entry->ilink1, __entry->ilink2)
);
-TRACE_EVENT(iwlwifi_legacy_dev_ucode_event,
- TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
- TP_ARGS(priv, time, data, ev),
- TP_STRUCT__entry(
- PRIV_ENTRY
-
- __field(u32, time)
- __field(u32, data)
- __field(u32, ev)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->time = time;
- __entry->data = data;
- __entry->ev = ev;
- ),
- TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
- __entry->priv, __entry->time, __entry->data, __entry->ev)
-);
#endif /* __IWLWIFI_DEVICE_TRACE */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
index cb346d1a9ff..5bf3f49b74a 100644
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -316,7 +316,6 @@ static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
break;
default:
BUG();
- return;
}
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
index a6effdae63f..5cf23eaecbb 100644
--- a/drivers/net/wireless/iwlegacy/iwl-helpers.h
+++ b/drivers/net/wireless/iwlegacy/iwl-helpers.h
@@ -132,7 +132,16 @@ static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
ieee80211_stop_queue(priv->hw, ac);
}
+#ifdef ieee80211_stop_queue
+#undef ieee80211_stop_queue
+#endif
+
#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+
+#ifdef ieee80211_wake_queue
+#undef ieee80211_wake_queue
+#endif
+
#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
index 654cf233a38..9b5d0abe8be 100644
--- a/drivers/net/wireless/iwlegacy/iwl-rx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-rx.c
@@ -227,27 +227,6 @@ void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
-void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt)
-{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
- if (iwl_legacy_is_any_associated(priv)) {
- if (priv->cfg->ops->lib->check_plcp_health) {
- if (!priv->cfg->ops->lib->check_plcp_health(
- priv, pkt)) {
- /*
- * high plcp error detected
- * reset Radio
- */
- iwl_legacy_force_reset(priv,
- IWL_RF_RESET, false);
- }
- }
- }
-}
-EXPORT_SYMBOL(iwl_legacy_recover_from_statistics);
-
/*
* returns non-zero if packet should be dropped
*/
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
index 353234a02c6..a6b5222fc59 100644
--- a/drivers/net/wireless/iwlegacy/iwl-scan.c
+++ b/drivers/net/wireless/iwlegacy/iwl-scan.c
@@ -101,7 +101,6 @@ static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
ieee80211_scan_completed(priv->hw, aborted);
}
- priv->is_internal_short_scan = false;
priv->scan_vif = NULL;
priv->scan_request = NULL;
}
@@ -329,10 +328,8 @@ void iwl_legacy_init_scan_params(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_legacy_init_scan_params);
-static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool internal,
- enum ieee80211_band band)
+static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
{
int ret;
@@ -359,18 +356,14 @@ static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv,
return -EBUSY;
}
- IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
- internal ? "internal short " : "");
+ IWL_DEBUG_SCAN(priv, "Starting scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
- priv->is_internal_short_scan = internal;
priv->scan_start = jiffies;
- priv->scan_band = band;
ret = priv->cfg->ops->utils->request_scan(priv, vif);
if (ret) {
clear_bit(STATUS_SCANNING, &priv->status);
- priv->is_internal_short_scan = false;
return ret;
}
@@ -394,8 +387,7 @@ int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- if (test_bit(STATUS_SCANNING, &priv->status) &&
- !priv->is_internal_short_scan) {
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
ret = -EAGAIN;
goto out_unlock;
@@ -404,17 +396,9 @@ int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
/* mac80211 will only ask for one band at a time */
priv->scan_request = req;
priv->scan_vif = vif;
+ priv->scan_band = req->channels[0]->band;
- /*
- * If an internal scan is in progress, just set
- * up the scan_request as per above.
- */
- if (priv->is_internal_short_scan) {
- IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
- ret = 0;
- } else
- ret = iwl_legacy_scan_initiate(priv, vif, false,
- req->channels[0]->band);
+ ret = iwl_legacy_scan_initiate(priv, vif);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -425,40 +409,6 @@ out_unlock:
}
EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
-/*
- * internal short scan, this function should only been called while associated.
- * It will reset and tune the radio to prevent possible RF related problem
- */
-void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv)
-{
- queue_work(priv->workqueue, &priv->start_internal_scan);
-}
-
-static void iwl_legacy_bg_start_internal_scan(struct work_struct *work)
-{
- struct iwl_priv *priv =
- container_of(work, struct iwl_priv, start_internal_scan);
-
- IWL_DEBUG_SCAN(priv, "Start internal scan\n");
-
- mutex_lock(&priv->mutex);
-
- if (priv->is_internal_short_scan == true) {
- IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
- goto unlock;
- }
-
- if (test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- goto unlock;
- }
-
- if (iwl_legacy_scan_initiate(priv, NULL, true, priv->band))
- IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
- unlock:
- mutex_unlock(&priv->mutex);
-}
-
static void iwl_legacy_bg_scan_check(struct work_struct *data)
{
struct iwl_priv *priv =
@@ -542,8 +492,7 @@ static void iwl_legacy_bg_scan_completed(struct work_struct *work)
container_of(work, struct iwl_priv, scan_completed);
bool aborted;
- IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
- priv->is_internal_short_scan ? "internal short " : "");
+ IWL_DEBUG_SCAN(priv, "Completed scan.\n");
cancel_delayed_work(&priv->scan_check);
@@ -558,27 +507,6 @@ static void iwl_legacy_bg_scan_completed(struct work_struct *work)
goto out_settings;
}
- if (priv->is_internal_short_scan && !aborted) {
- int err;
-
- /* Check if mac80211 requested scan during our internal scan */
- if (priv->scan_request == NULL)
- goto out_complete;
-
- /* If so request a new scan */
- err = iwl_legacy_scan_initiate(priv, priv->scan_vif, false,
- priv->scan_request->channels[0]->band);
- if (err) {
- IWL_DEBUG_SCAN(priv,
- "failed to initiate pending scan: %d\n", err);
- aborted = true;
- goto out_complete;
- }
-
- goto out;
- }
-
-out_complete:
iwl_legacy_complete_scan(priv, aborted);
out_settings:
@@ -590,8 +518,7 @@ out_settings:
* We do not commit power settings while scan is pending,
* do it now if the settings changed.
*/
- iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next,
- false);
+ iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
priv->cfg->ops->utils->post_scan(priv);
@@ -604,15 +531,12 @@ void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
{
INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
- INIT_WORK(&priv->start_internal_scan,
- iwl_legacy_bg_start_internal_scan);
INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
}
EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
{
- cancel_work_sync(&priv->start_internal_scan);
cancel_work_sync(&priv->abort_scan);
cancel_work_sync(&priv->scan_completed);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 0ee6be6a9c5..795826a014e 100644
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -1409,212 +1409,6 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
}
}
-#define EVENT_START_OFFSET (6 * sizeof(u32))
-
-/**
- * iwl3945_print_event_log - Dump error event log to syslog
- *
- */
-static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode,
- int pos, char **buf, size_t bufsz)
-{
- u32 i;
- u32 base; /* SRAM byte address of event log header */
- u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
- u32 ptr; /* SRAM byte address of log data */
- u32 ev, time, data; /* event log data */
- unsigned long reg_flags;
-
- if (num_events == 0)
- return pos;
-
- base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
-
- if (mode == 0)
- event_size = 2 * sizeof(u32);
- else
- event_size = 3 * sizeof(u32);
-
- ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
-
- /* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
-
- /* Set starting address; reads will auto-increment */
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
- rmb();
-
- /* "time" is actually "data" for mode 0 (no timestamp).
- * place event id # at far right for easier visual parsing. */
- for (i = 0; i < num_events; i++) {
- ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (mode == 0) {
- /* data, ev */
- if (bufsz) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- "0x%08x:%04u\n",
- time, ev);
- } else {
- IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
- trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
- time, ev);
- }
- } else {
- data = _iwl_legacy_read_direct32(priv,
- HBUS_TARG_MEM_RDAT);
- if (bufsz) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- "%010u:0x%08x:%04u\n",
- time, data, ev);
- } else {
- IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
- time, data, ev);
- trace_iwlwifi_legacy_dev_ucode_event(priv, time,
- data, ev);
- }
- }
- }
-
- /* Allow device to power down */
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return pos;
-}
-
-/**
- * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
- */
-static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
- u32 num_wraps, u32 next_entry,
- u32 size, u32 mode,
- int pos, char **buf, size_t bufsz)
-{
- /*
- * display the newest DEFAULT_LOG_ENTRIES entries
- * i.e the entries just before the next ont that uCode would fill.
- */
- if (num_wraps) {
- if (next_entry < size) {
- pos = iwl3945_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode,
- pos, buf, bufsz);
- pos = iwl3945_print_event_log(priv, 0,
- next_entry, mode,
- pos, buf, bufsz);
- } else
- pos = iwl3945_print_event_log(priv, next_entry - size,
- size, mode,
- pos, buf, bufsz);
- } else {
- if (next_entry < size)
- pos = iwl3945_print_event_log(priv, 0,
- next_entry, mode,
- pos, buf, bufsz);
- else
- pos = iwl3945_print_event_log(priv, next_entry - size,
- size, mode,
- pos, buf, bufsz);
- }
- return pos;
-}
-
-#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
-
-int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
- char **buf, bool display)
-{
- u32 base; /* SRAM byte address of event log header */
- u32 capacity; /* event log capacity in # entries */
- u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
- u32 num_wraps; /* # times uCode wrapped to top of log */
- u32 next_entry; /* index of next entry to be written by uCode */
- u32 size; /* # entries that we'll print */
- int pos = 0;
- size_t bufsz = 0;
-
- base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
- if (!iwl3945_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
- return -EINVAL;
- }
-
- /* event log header */
- capacity = iwl_legacy_read_targ_mem(priv, base);
- mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
- num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
- next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
-
- if (capacity > priv->cfg->base_params->max_event_log_size) {
- IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
- capacity, priv->cfg->base_params->max_event_log_size);
- capacity = priv->cfg->base_params->max_event_log_size;
- }
-
- if (next_entry > priv->cfg->base_params->max_event_log_size) {
- IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
- next_entry, priv->cfg->base_params->max_event_log_size);
- next_entry = priv->cfg->base_params->max_event_log_size;
- }
-
- size = num_wraps ? capacity : next_entry;
-
- /* bail out if nothing in log */
- if (size == 0) {
- IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return pos;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
- size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
- ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
-#else
- size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
- ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
-#endif
-
- IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
- size);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (display) {
- if (full_log)
- bufsz = capacity * 48;
- else
- bufsz = size * 48;
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
- }
- if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
- /* if uCode has wrapped back to top of log,
- * start at the oldest entry,
- * i.e the next one that uCode would fill.
- */
- if (num_wraps)
- pos = iwl3945_print_event_log(priv, next_entry,
- capacity - next_entry, mode,
- pos, buf, bufsz);
-
- /* (then/else) start at top of log */
- pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
- pos, buf, bufsz);
- } else
- pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode,
- pos, buf, bufsz);
-#else
- pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode,
- pos, buf, bufsz);
-#endif
- return pos;
-}
-
static void iwl3945_irq_tasklet(struct iwl_priv *priv)
{
u32 inta, handled = 0;
@@ -1762,49 +1556,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
#endif
}
-static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- struct iwl3945_scan_channel *scan_ch)
-{
- const struct ieee80211_supported_band *sband;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added = 0;
- u8 channel = 0;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband) {
- IWL_ERR(priv, "invalid band\n");
- return added;
- }
-
- active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0);
- passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
-
- channel = iwl_legacy_get_single_channel_number(priv, band);
-
- if (channel) {
- scan_ch->channel = channel;
- scan_ch->type = 0; /* passive */
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
- /* Set txpower levels to defaults */
- scan_ch->tpc.dsp_atten = 110;
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
- added++;
- } else
- IWL_ERR(priv, "no valid channel found\n");
- return added;
-}
-
static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
enum ieee80211_band band,
u8 is_active, u8 n_probes,
@@ -2816,6 +2567,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
enum ieee80211_band band;
bool is_active = false;
int ret;
+ u16 len;
lockdep_assert_held(&priv->mutex);
@@ -2834,17 +2586,14 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- u16 interval = 0;
+ u16 interval;
u32 extra;
u32 suspend_time = 100;
u32 scan_suspend_time = 100;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- if (priv->is_internal_short_scan)
- interval = 0;
- else
- interval = vif->bss_conf.beacon_int;
+ interval = vif->bss_conf.beacon_int;
scan->suspend_time = 0;
scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -2866,9 +2615,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan_suspend_time, interval);
}
- if (priv->is_internal_short_scan) {
- IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
- } else if (priv->scan_request->n_ssids) {
+ if (priv->scan_request->n_ssids) {
int i, p = 0;
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
for (i = 0; i < priv->scan_request->n_ssids; i++) {
@@ -2919,36 +2666,17 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
IWL_GOOD_CRC_TH_DISABLED;
- if (!priv->is_internal_short_scan) {
- scan->tx_cmd.len = cpu_to_le16(
- iwl_legacy_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- vif->addr,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan)));
- } else {
- /* use bcast addr, will not be transmitted but must be valid */
- scan->tx_cmd.len = cpu_to_le16(
- iwl_legacy_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- iwlegacy_bcast_addr, NULL, 0,
- IWL_MAX_SCAN_SIZE - sizeof(*scan)));
- }
+ len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
+ vif->addr, priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ scan->tx_cmd.len = cpu_to_le16(len);
+
/* select Rx antennas */
scan->flags |= iwl3945_get_antenna_flags(priv);
- if (priv->is_internal_short_scan) {
- scan->channel_count =
- iwl3945_get_single_channel_for_scan(priv, vif, band,
- (void *)&scan->data[le16_to_cpu(
- scan->tx_cmd.len)]);
- } else {
- scan->channel_count =
- iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif);
- }
-
+ scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
+ (void *)&scan->data[len], vif);
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
return -EIO;
@@ -3824,10 +3552,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
/* initialize force reset */
- priv->force_reset[IWL_RF_RESET].reset_duration =
- IWL_DELAY_NEXT_FORCE_RF_RESET;
- priv->force_reset[IWL_FW_RESET].reset_duration =
- IWL_DELAY_NEXT_FORCE_FW_RELOAD;
+ priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index 7157ba52968..14334668034 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -488,134 +488,6 @@ static void iwl4965_bg_statistics_periodic(unsigned long data)
iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
}
-
-static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base,
- u32 start_idx, u32 num_events,
- u32 mode)
-{
- u32 i;
- u32 ptr; /* SRAM byte address of log data */
- u32 ev, time, data; /* event log data */
- unsigned long reg_flags;
-
- if (mode == 0)
- ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
- else
- ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
-
- /* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (iwl_grab_nic_access(priv)) {
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return;
- }
-
- /* Set starting address; reads will auto-increment */
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
- rmb();
-
- /*
- * "time" is actually "data" for mode 0 (no timestamp).
- * place event id # at far right for easier visual parsing.
- */
- for (i = 0; i < num_events; i++) {
- ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (mode == 0) {
- trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
- 0, time, ev);
- } else {
- data = _iwl_legacy_read_direct32(priv,
- HBUS_TARG_MEM_RDAT);
- trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
- time, data, ev);
- }
- }
- /* Allow device to power down */
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static void iwl4965_continuous_event_trace(struct iwl_priv *priv)
-{
- u32 capacity; /* event log capacity in # entries */
- u32 base; /* SRAM byte address of event log header */
- u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
- u32 num_wraps; /* # times uCode wrapped to top of log */
- u32 next_entry; /* index of next entry to be written by uCode */
-
- if (priv->ucode_type == UCODE_INIT)
- base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
- else
- base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
- if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
- capacity = iwl_legacy_read_targ_mem(priv, base);
- num_wraps = iwl_legacy_read_targ_mem(priv,
- base + (2 * sizeof(u32)));
- mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
- next_entry = iwl_legacy_read_targ_mem(priv,
- base + (3 * sizeof(u32)));
- } else
- return;
-
- if (num_wraps == priv->event_log.num_wraps) {
- iwl4965_print_cont_event_trace(priv,
- base, priv->event_log.next_entry,
- next_entry - priv->event_log.next_entry,
- mode);
- priv->event_log.non_wraps_count++;
- } else {
- if ((num_wraps - priv->event_log.num_wraps) > 1)
- priv->event_log.wraps_more_count++;
- else
- priv->event_log.wraps_once_count++;
- trace_iwlwifi_legacy_dev_ucode_wrap_event(priv,
- num_wraps - priv->event_log.num_wraps,
- next_entry, priv->event_log.next_entry);
- if (next_entry < priv->event_log.next_entry) {
- iwl4965_print_cont_event_trace(priv, base,
- priv->event_log.next_entry,
- capacity - priv->event_log.next_entry,
- mode);
-
- iwl4965_print_cont_event_trace(priv, base, 0,
- next_entry, mode);
- } else {
- iwl4965_print_cont_event_trace(priv, base,
- next_entry, capacity - next_entry,
- mode);
-
- iwl4965_print_cont_event_trace(priv, base, 0,
- next_entry, mode);
- }
- }
- priv->event_log.num_wraps = num_wraps;
- priv->event_log.next_entry = next_entry;
-}
-
-/**
- * iwl4965_bg_ucode_trace - Timer callback to log ucode event
- *
- * The timer is continually set to execute every
- * UCODE_TRACE_PERIOD milliseconds after the last timer expired
- * this function is to perform continuous uCode event logging operation
- * if enabled
- */
-static void iwl4965_bg_ucode_trace(unsigned long data)
-{
- struct iwl_priv *priv = (struct iwl_priv *)data;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (priv->event_log.ucode_trace) {
- iwl4965_continuous_event_trace(priv);
- /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
- mod_timer(&priv->ucode_trace,
- jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
- }
-}
-
static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -1612,7 +1484,7 @@ static const char * const desc_lookup_text[] = {
"NMI_INTERRUPT_DATA_ACTION_PT",
"NMI_TRM_HW_ER",
"NMI_INTERRUPT_TRM",
- "NMI_INTERRUPT_BREAK_POINT"
+ "NMI_INTERRUPT_BREAK_POINT",
"DEBUG_0",
"DEBUG_1",
"DEBUG_2",
@@ -1711,209 +1583,6 @@ void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
pc, blink1, blink2, ilink1, ilink2, hcmd);
}
-#define EVENT_START_OFFSET (4 * sizeof(u32))
-
-/**
- * iwl4965_print_event_log - Dump error event log to syslog
- *
- */
-static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode,
- int pos, char **buf, size_t bufsz)
-{
- u32 i;
- u32 base; /* SRAM byte address of event log header */
- u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
- u32 ptr; /* SRAM byte address of log data */
- u32 ev, time, data; /* event log data */
- unsigned long reg_flags;
-
- if (num_events == 0)
- return pos;
-
- if (priv->ucode_type == UCODE_INIT) {
- base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
- } else {
- base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
- }
-
- if (mode == 0)
- event_size = 2 * sizeof(u32);
- else
- event_size = 3 * sizeof(u32);
-
- ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
-
- /* Make sure device is powered up for SRAM reads */
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
-
- /* Set starting address; reads will auto-increment */
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
- rmb();
-
- /* "time" is actually "data" for mode 0 (no timestamp).
- * place event id # at far right for easier visual parsing. */
- for (i = 0; i < num_events; i++) {
- ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (mode == 0) {
- /* data, ev */
- if (bufsz) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- "EVT_LOG:0x%08x:%04u\n",
- time, ev);
- } else {
- trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
- time, ev);
- IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
- time, ev);
- }
- } else {
- data = _iwl_legacy_read_direct32(priv,
- HBUS_TARG_MEM_RDAT);
- if (bufsz) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- "EVT_LOGT:%010u:0x%08x:%04u\n",
- time, data, ev);
- } else {
- IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
- time, data, ev);
- trace_iwlwifi_legacy_dev_ucode_event(priv, time,
- data, ev);
- }
- }
- }
-
- /* Allow device to power down */
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return pos;
-}
-
-/**
- * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog
- */
-static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
- u32 num_wraps, u32 next_entry,
- u32 size, u32 mode,
- int pos, char **buf, size_t bufsz)
-{
- /*
- * display the newest DEFAULT_LOG_ENTRIES entries
- * i.e the entries just before the next ont that uCode would fill.
- */
- if (num_wraps) {
- if (next_entry < size) {
- pos = iwl4965_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode,
- pos, buf, bufsz);
- pos = iwl4965_print_event_log(priv, 0,
- next_entry, mode,
- pos, buf, bufsz);
- } else
- pos = iwl4965_print_event_log(priv, next_entry - size,
- size, mode, pos, buf, bufsz);
- } else {
- if (next_entry < size) {
- pos = iwl4965_print_event_log(priv, 0, next_entry,
- mode, pos, buf, bufsz);
- } else {
- pos = iwl4965_print_event_log(priv, next_entry - size,
- size, mode, pos, buf, bufsz);
- }
- }
- return pos;
-}
-
-#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
-
-int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
- char **buf, bool display)
-{
- u32 base; /* SRAM byte address of event log header */
- u32 capacity; /* event log capacity in # entries */
- u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
- u32 num_wraps; /* # times uCode wrapped to top of log */
- u32 next_entry; /* index of next entry to be written by uCode */
- u32 size; /* # entries that we'll print */
- int pos = 0;
- size_t bufsz = 0;
-
- if (priv->ucode_type == UCODE_INIT) {
- base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
- } else {
- base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
- }
-
- if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
- IWL_ERR(priv,
- "Invalid event log pointer 0x%08X for %s uCode\n",
- base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
- return -EINVAL;
- }
-
- /* event log header */
- capacity = iwl_legacy_read_targ_mem(priv, base);
- mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
- num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
- next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
-
- size = num_wraps ? capacity : next_entry;
-
- /* bail out if nothing in log */
- if (size == 0) {
- IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return pos;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
- size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
- ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#else
- size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
- ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#endif
- IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
- size);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (display) {
- if (full_log)
- bufsz = capacity * 48;
- else
- bufsz = size * 48;
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
- }
- if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
- /*
- * if uCode has wrapped back to top of log,
- * start at the oldest entry,
- * i.e the next one that uCode would fill.
- */
- if (num_wraps)
- pos = iwl4965_print_event_log(priv, next_entry,
- capacity - next_entry, mode,
- pos, buf, bufsz);
- /* (then/else) start at top of log */
- pos = iwl4965_print_event_log(priv, 0,
- next_entry, mode, pos, buf, bufsz);
- } else
- pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode,
- pos, buf, bufsz);
-#else
- pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode,
- pos, buf, bufsz);
-#endif
- return pos;
-}
-
static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
{
struct iwl_ct_kill_config cmd;
@@ -2773,20 +2442,10 @@ int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
- if (ret == 0) {
- priv->_4965.agg_tids_count++;
- IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
- priv->_4965.agg_tids_count);
- }
break;
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
- if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) {
- priv->_4965.agg_tids_count--;
- IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
- priv->_4965.agg_tids_count);
- }
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
ret = 0;
break;
@@ -2851,7 +2510,6 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
u16 ch;
- unsigned long flags = 0;
IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -2868,64 +2526,64 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
if (!iwl_legacy_is_associated_ctx(ctx))
goto out;
- if (priv->cfg->ops->lib->set_channel_switch) {
+ if (!priv->cfg->ops->lib->set_channel_switch)
+ goto out;
- ch = channel->hw_value;
- if (le16_to_cpu(ctx->active.channel) != ch) {
- ch_info = iwl_legacy_get_channel_info(priv,
- channel->band,
- ch);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "invalid channel\n");
- goto out;
- }
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->current_ht_config.smps = conf->smps_mode;
-
- /* Configure HT40 channels */
- ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_legacy_set_rxon_channel(priv, channel, ctx);
- iwl_legacy_set_rxon_ht(priv, ht_conf);
- iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
- ctx->vif);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl_legacy_set_rate(priv);
- /*
- * at this point, staging_rxon has the
- * configuration for channel switch
- */
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
- priv->switch_channel = cpu_to_le16(ch);
- if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
- &priv->status);
- priv->switch_channel = 0;
- ieee80211_chswitch_done(ctx->vif, false);
- }
+ ch = channel->hw_value;
+ if (le16_to_cpu(ctx->active.channel) == ch)
+ goto out;
+
+ ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
+ if (!iwl_legacy_is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+ goto out;
+ }
+
+ spin_lock_irq(&priv->lock);
+
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
}
+ } else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ iwl_legacy_set_rxon_channel(priv, channel, ctx);
+ iwl_legacy_set_rxon_ht(priv, ht_conf);
+ iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irq(&priv->lock);
+
+ iwl_legacy_set_rate(priv);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ priv->switch_channel = cpu_to_le16(ch);
+ if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ priv->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
}
+
out:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -3034,10 +2692,6 @@ static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
priv->statistics_periodic.data = (unsigned long)priv;
priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
- init_timer(&priv->ucode_trace);
- priv->ucode_trace.data = (unsigned long)priv;
- priv->ucode_trace.function = iwl4965_bg_ucode_trace;
-
init_timer(&priv->watchdog);
priv->watchdog.data = (unsigned long)priv;
priv->watchdog.function = iwl_legacy_bg_watchdog;
@@ -3056,7 +2710,6 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
iwl_legacy_cancel_scan_deferred_work(priv);
del_timer_sync(&priv->statistics_periodic);
- del_timer_sync(&priv->ucode_trace);
}
static void iwl4965_init_hw_rates(struct iwl_priv *priv,
@@ -3132,13 +2785,9 @@ static int iwl4965_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
- priv->_4965.agg_tids_count = 0;
/* initialize force reset */
- priv->force_reset[IWL_RF_RESET].reset_duration =
- IWL_DELAY_NEXT_FORCE_RF_RESET;
- priv->force_reset[IWL_FW_RESET].reset_duration =
- IWL_DELAY_NEXT_FORCE_FW_RELOAD;
+ priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
/* Choose which receivers/antennas to use */
if (priv->cfg->ops->hcmd->set_rxon_chain)
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 822660483f9..48ab9142af3 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -5,14 +5,16 @@ iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
-iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwlagn-objs += iwl-rx.o iwl-tx.o iwl-sta.o
+iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-power.o
+iwlagn-objs += iwl-rx.o iwl-sta.o
iwlagn-objs += iwl-scan.o iwl-led.o
-iwlagn-objs += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
+iwlagn-objs += iwl-agn-rxon.o
iwlagn-objs += iwl-5000.o
iwlagn-objs += iwl-6000.o
iwlagn-objs += iwl-1000.o
iwlagn-objs += iwl-2000.o
+iwlagn-objs += iwl-pci.o
+iwlagn-objs += iwl-trans.o iwl-trans-rx-pcie.o iwl-trans-tx-pcie.o
iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 61d4a11f566..01b49eb8c8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -27,8 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
@@ -36,6 +34,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
@@ -55,10 +54,10 @@
#define IWL100_UCODE_API_MIN 5
#define IWL1000_FW_PRE "iwlwifi-1000-"
-#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE #api ".ucode"
+#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
#define IWL100_FW_PRE "iwlwifi-100-"
-#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE #api ".ucode"
+#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
/*
@@ -126,7 +125,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
iwlagn_mod_params.num_of_queues;
priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->base_params->num_of_queues *
sizeof(struct iwlagn_scd_bc_tbl);
@@ -139,7 +137,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
@@ -171,15 +168,7 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
static struct iwl_lib_ops iwl1000_lib = {
.set_hw_params = iwl1000_hw_set_hw_params,
- .rx_handler_setup = iwlagn_rx_handler_setup,
- .setup_deferred_work = iwlagn_setup_deferred_work,
- .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
- .send_tx_power = iwlagn_send_tx_power,
- .update_chain_flags = iwl_update_chain_flags,
- .apm_ops = {
- .init = iwl_apm_init,
- .config = iwl1000_nic_config,
- },
+ .nic_config = iwl1000_nic_config,
.eeprom_ops = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -190,19 +179,8 @@ static struct iwl_lib_ops iwl1000_lib = {
EEPROM_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .query_addr = iwlagn_eeprom_query_addr,
},
- .temp_ops = {
- .temperature = iwlagn_temperature,
- },
- .txfifo_flush = iwlagn_txfifo_flush,
- .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
-};
-
-static const struct iwl_ops iwl1000_ops = {
- .lib = &iwl1000_lib,
- .hcmd = &iwlagn_hcmd,
- .utils = &iwlagn_hcmd_utils,
+ .temperature = iwlagn_temperature,
};
static struct iwl_base_params iwl1000_base_params = {
@@ -223,6 +201,7 @@ static struct iwl_base_params iwl1000_base_params = {
static struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
+ .smps_mode = IEEE80211_SMPS_STATIC,
};
#define IWL_DEVICE_1000 \
@@ -231,7 +210,7 @@ static struct iwl_ht_params iwl1000_ht_params = {
.ucode_api_min = IWL1000_UCODE_API_MIN, \
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
- .ops = &iwl1000_ops, \
+ .lib = &iwl1000_lib, \
.base_params = &iwl1000_base_params, \
.led_mode = IWL_LED_BLINK
@@ -252,7 +231,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
.ucode_api_min = IWL100_UCODE_API_MIN, \
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
- .ops = &iwl1000_ops, \
+ .lib = &iwl1000_lib, \
.base_params = &iwl1000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 2282279cffc..0e13f0bb2e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -27,8 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
@@ -36,6 +34,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
@@ -51,20 +50,25 @@
#define IWL2030_UCODE_API_MAX 5
#define IWL2000_UCODE_API_MAX 5
#define IWL105_UCODE_API_MAX 5
+#define IWL135_UCODE_API_MAX 5
/* Lowest firmware API version supported */
#define IWL2030_UCODE_API_MIN 5
#define IWL2000_UCODE_API_MIN 5
#define IWL105_UCODE_API_MIN 5
+#define IWL135_UCODE_API_MIN 5
#define IWL2030_FW_PRE "iwlwifi-2030-"
-#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
+#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
#define IWL2000_FW_PRE "iwlwifi-2000-"
-#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
+#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
#define IWL105_FW_PRE "iwlwifi-105-"
-#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE #api ".ucode"
+#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
+
+#define IWL135_FW_PRE "iwlwifi-135-"
+#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE #api ".ucode"
static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
{
@@ -76,28 +80,11 @@ static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
/* NIC configuration for 2000 series */
static void iwl2000_nic_config(struct iwl_priv *priv)
{
- u16 radio_cfg;
-
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
- /* write radio config values to register */
- if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
- EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
-
- /* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+ iwl_rf_config(priv);
if (priv->cfg->iq_invert)
iwl_set_bit(priv, CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
-
- if (priv->cfg->disable_otp_refresh)
- iwl_write_prph(priv, APMG_ANALOG_SVR_REG, 0x80000010);
}
static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
@@ -133,7 +120,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
iwlagn_mod_params.num_of_queues;
priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->base_params->num_of_queues *
sizeof(struct iwlagn_scd_bc_tbl);
@@ -146,7 +132,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
@@ -168,7 +153,7 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
+ priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
if (priv->cfg->need_temp_offset_calib)
priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
@@ -179,16 +164,7 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
static struct iwl_lib_ops iwl2000_lib = {
.set_hw_params = iwl2000_hw_set_hw_params,
- .rx_handler_setup = iwlagn_rx_handler_setup,
- .setup_deferred_work = iwlagn_bt_setup_deferred_work,
- .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
- .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
- .send_tx_power = iwlagn_send_tx_power,
- .update_chain_flags = iwl_update_chain_flags,
- .apm_ops = {
- .init = iwl_apm_init,
- .config = iwl2000_nic_config,
- },
+ .nic_config = iwl2000_nic_config,
.eeprom_ops = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -199,38 +175,30 @@ static struct iwl_lib_ops iwl2000_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REGULATORY_BAND_NO_HT40,
},
- .query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
- .temp_ops = {
- .temperature = iwlagn_temperature,
- },
- .txfifo_flush = iwlagn_txfifo_flush,
- .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
+ .temperature = iwlagn_temperature,
};
-static const struct iwl_ops iwl2000_ops = {
- .lib = &iwl2000_lib,
- .hcmd = &iwlagn_hcmd,
- .utils = &iwlagn_hcmd_utils,
-};
-
-static const struct iwl_ops iwl2030_ops = {
- .lib = &iwl2000_lib,
- .hcmd = &iwlagn_bt_hcmd,
- .utils = &iwlagn_hcmd_utils,
-};
-
-static const struct iwl_ops iwl105_ops = {
- .lib = &iwl2000_lib,
- .hcmd = &iwlagn_hcmd,
- .utils = &iwlagn_hcmd_utils,
-};
-
-static const struct iwl_ops iwl135_ops = {
- .lib = &iwl2000_lib,
- .hcmd = &iwlagn_bt_hcmd,
- .utils = &iwlagn_hcmd_utils,
+static struct iwl_lib_ops iwl2030_lib = {
+ .set_hw_params = iwl2000_hw_set_hw_params,
+ .bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
+ .bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
+ .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
+ .nic_config = iwl2000_nic_config,
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ },
+ .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ },
+ .temperature = iwlagn_temperature,
};
static struct iwl_base_params iwl2000_base_params = {
@@ -291,13 +259,12 @@ static struct iwl_bt_params iwl2030_bt_params = {
.ucode_api_min = IWL2000_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .ops = &iwl2000_ops, \
+ .lib = &iwl2000_lib, \
.base_params = &iwl2000_base_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE, \
- .iq_invert = true, \
- .disable_otp_refresh = true \
+ .iq_invert = true \
struct iwl_cfg iwl2000_2bgn_cfg = {
.name = "2000 Series 2x2 BGN",
@@ -316,7 +283,7 @@ struct iwl_cfg iwl2000_2bg_cfg = {
.ucode_api_min = IWL2030_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .ops = &iwl2030_ops, \
+ .lib = &iwl2030_lib, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
.need_dc_calib = true, \
@@ -342,13 +309,14 @@ struct iwl_cfg iwl2030_2bg_cfg = {
.ucode_api_min = IWL105_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .ops = &iwl105_ops, \
+ .lib = &iwl2000_lib, \
.base_params = &iwl2000_base_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true, \
- .rx_with_siso_diversity = true \
+ .rx_with_siso_diversity = true, \
+ .iq_invert = true \
struct iwl_cfg iwl105_bg_cfg = {
.name = "105 Series 1x1 BG",
@@ -362,27 +330,28 @@ struct iwl_cfg iwl105_bgn_cfg = {
};
#define IWL_DEVICE_135 \
- .fw_name_pre = IWL105_FW_PRE, \
- .ucode_api_max = IWL105_UCODE_API_MAX, \
- .ucode_api_min = IWL105_UCODE_API_MIN, \
+ .fw_name_pre = IWL135_FW_PRE, \
+ .ucode_api_max = IWL135_UCODE_API_MAX, \
+ .ucode_api_min = IWL135_UCODE_API_MIN, \
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
- .ops = &iwl135_ops, \
+ .lib = &iwl2030_lib, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true, \
- .rx_with_siso_diversity = true \
+ .rx_with_siso_diversity = true, \
+ .iq_invert = true \
struct iwl_cfg iwl135_bg_cfg = {
- .name = "105 Series 1x1 BG/BT",
+ .name = "135 Series 1x1 BG/BT",
IWL_DEVICE_135,
};
struct iwl_cfg iwl135_bgn_cfg = {
- .name = "105 Series 1x1 BGN/BT",
+ .name = "135 Series 1x1 BGN/BT",
IWL_DEVICE_135,
.ht_params = &iwl2000_ht_params,
};
@@ -390,3 +359,4 @@ struct iwl_cfg iwl135_bgn_cfg = {
MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 05ad47628b6..f9630a3c79f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -75,7 +75,7 @@ static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
{
u16 temperature, voltage;
__le16 *temp_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_TEMPERATURE);
temperature = le16_to_cpu(temp_calib[0]);
voltage = le16_to_cpu(temp_calib[1]);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index f99f9c19335..c95cefd529d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -27,8 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
@@ -37,6 +35,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
@@ -47,6 +46,7 @@
#include "iwl-agn.h"
#include "iwl-agn-hw.h"
#include "iwl-5000-hw.h"
+#include "iwl-trans.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 5
@@ -57,32 +57,19 @@
#define IWL5150_UCODE_API_MIN 1
#define IWL5000_FW_PRE "iwlwifi-5000-"
-#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE #api ".ucode"
+#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
#define IWL5150_FW_PRE "iwlwifi-5150-"
-#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode"
+#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
/* NIC configuration for 5000 series */
static void iwl5000_nic_config(struct iwl_priv *priv)
{
unsigned long flags;
- u16 radio_cfg;
- spin_lock_irqsave(&priv->lock, flags);
-
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
- /* write radio config values to register */
- if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_RF_CONFIG_TYPE_MAX)
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
- EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+ iwl_rf_config(priv);
- /* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+ spin_lock_irqsave(&priv->lock, flags);
/* W/A : NIC is stuck in a reset state after Early PCIe power off
* (PCIe power is lost before PERST# is asserted),
@@ -170,7 +157,6 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
iwlagn_mod_params.num_of_queues;
priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->base_params->num_of_queues *
sizeof(struct iwlagn_scd_bc_tbl);
@@ -183,7 +169,6 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
@@ -215,7 +200,6 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
iwlagn_mod_params.num_of_queues;
priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->base_params->num_of_queues *
sizeof(struct iwlagn_scd_bc_tbl);
@@ -228,7 +212,6 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
@@ -332,21 +315,13 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
return -EFAULT;
}
- return iwl_send_cmd_sync(priv, &hcmd);
+ return trans_send_cmd(&priv->trans, &hcmd);
}
static struct iwl_lib_ops iwl5000_lib = {
.set_hw_params = iwl5000_hw_set_hw_params,
- .rx_handler_setup = iwlagn_rx_handler_setup,
- .setup_deferred_work = iwlagn_setup_deferred_work,
- .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
- .send_tx_power = iwlagn_send_tx_power,
- .update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl5000_hw_channel_switch,
- .apm_ops = {
- .init = iwl_apm_init,
- .config = iwl5000_nic_config,
- },
+ .nic_config = iwl5000_nic_config,
.eeprom_ops = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -357,27 +332,14 @@ static struct iwl_lib_ops iwl5000_lib = {
EEPROM_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .query_addr = iwlagn_eeprom_query_addr,
},
- .temp_ops = {
- .temperature = iwlagn_temperature,
- },
- .txfifo_flush = iwlagn_txfifo_flush,
- .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
+ .temperature = iwlagn_temperature,
};
static struct iwl_lib_ops iwl5150_lib = {
.set_hw_params = iwl5150_hw_set_hw_params,
- .rx_handler_setup = iwlagn_rx_handler_setup,
- .setup_deferred_work = iwlagn_setup_deferred_work,
- .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
- .send_tx_power = iwlagn_send_tx_power,
- .update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl5000_hw_channel_switch,
- .apm_ops = {
- .init = iwl_apm_init,
- .config = iwl5000_nic_config,
- },
+ .nic_config = iwl5000_nic_config,
.eeprom_ops = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -388,25 +350,8 @@ static struct iwl_lib_ops iwl5150_lib = {
EEPROM_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .query_addr = iwlagn_eeprom_query_addr,
},
- .temp_ops = {
- .temperature = iwl5150_temperature,
- },
- .txfifo_flush = iwlagn_txfifo_flush,
- .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
-};
-
-static const struct iwl_ops iwl5000_ops = {
- .lib = &iwl5000_lib,
- .hcmd = &iwlagn_hcmd,
- .utils = &iwlagn_hcmd_utils,
-};
-
-static const struct iwl_ops iwl5150_ops = {
- .lib = &iwl5150_lib,
- .hcmd = &iwlagn_hcmd,
- .utils = &iwlagn_hcmd_utils,
+ .temperature = iwl5150_temperature,
};
static struct iwl_base_params iwl5000_base_params = {
@@ -420,6 +365,7 @@ static struct iwl_base_params iwl5000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
+ .no_idle_support = true,
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
@@ -431,7 +377,7 @@ static struct iwl_ht_params iwl5000_ht_params = {
.ucode_api_min = IWL5000_UCODE_API_MIN, \
.eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
- .ops = &iwl5000_ops, \
+ .lib = &iwl5000_lib, \
.base_params = &iwl5000_base_params, \
.led_mode = IWL_LED_BLINK
@@ -474,7 +420,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
.ucode_api_min = IWL5000_UCODE_API_MIN,
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
- .ops = &iwl5000_ops,
+ .lib = &iwl5000_lib,
.base_params = &iwl5000_base_params,
.ht_params = &iwl5000_ht_params,
.led_mode = IWL_LED_BLINK,
@@ -487,7 +433,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
.ucode_api_min = IWL5150_UCODE_API_MIN, \
.eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
- .ops = &iwl5150_ops, \
+ .lib = &iwl5150_lib, \
.base_params = &iwl5000_base_params, \
.need_dc_calib = true, \
.led_mode = IWL_LED_BLINK, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index fbe565c816e..973d1972e8c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -27,8 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
@@ -36,6 +34,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
@@ -46,6 +45,7 @@
#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-6000-hw.h"
+#include "iwl-trans.h"
/* Highest firmware API version supported */
#define IWL6000_UCODE_API_MAX 4
@@ -58,16 +58,16 @@
#define IWL6000G2_UCODE_API_MIN 4
#define IWL6000_FW_PRE "iwlwifi-6000-"
-#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
+#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
#define IWL6050_FW_PRE "iwlwifi-6050-"
-#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
+#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
-#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
+#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
-#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
+#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
@@ -97,21 +97,7 @@ static void iwl6150_additional_nic_config(struct iwl_priv *priv)
/* NIC configuration for 6000 series */
static void iwl6000_nic_config(struct iwl_priv *priv)
{
- u16 radio_cfg;
-
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
- /* write radio config values to register */
- if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
- EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
-
- /* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+ iwl_rf_config(priv);
/* no locking required for register write */
if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
@@ -120,10 +106,8 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
}
/* do additional nic configuration if needed */
- if (priv->cfg->ops->nic &&
- priv->cfg->ops->nic->additional_nic_config) {
- priv->cfg->ops->nic->additional_nic_config(priv);
- }
+ if (priv->cfg->additional_nic_config)
+ priv->cfg->additional_nic_config(priv);
}
static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
@@ -159,7 +143,6 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
iwlagn_mod_params.num_of_queues;
priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->base_params->num_of_queues *
sizeof(struct iwlagn_scd_bc_tbl);
@@ -172,7 +155,6 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
if (priv->cfg->rx_with_siso_diversity)
@@ -194,7 +176,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
if (priv->cfg->need_dc_calib)
- priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
+ priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
if (priv->cfg->need_temp_offset_calib)
priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
@@ -271,21 +253,13 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
return -EFAULT;
}
- return iwl_send_cmd_sync(priv, &hcmd);
+ return trans_send_cmd(&priv->trans, &hcmd);
}
static struct iwl_lib_ops iwl6000_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
- .rx_handler_setup = iwlagn_rx_handler_setup,
- .setup_deferred_work = iwlagn_setup_deferred_work,
- .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
- .send_tx_power = iwlagn_send_tx_power,
- .update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl6000_hw_channel_switch,
- .apm_ops = {
- .init = iwl_apm_init,
- .config = iwl6000_nic_config,
- },
+ .nic_config = iwl6000_nic_config,
.eeprom_ops = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -296,29 +270,18 @@ static struct iwl_lib_ops iwl6000_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
- .temp_ops = {
- .temperature = iwlagn_temperature,
- },
- .txfifo_flush = iwlagn_txfifo_flush,
- .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
+ .temperature = iwlagn_temperature,
};
static struct iwl_lib_ops iwl6030_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
- .rx_handler_setup = iwlagn_bt_rx_handler_setup,
- .setup_deferred_work = iwlagn_bt_setup_deferred_work,
+ .bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
+ .bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
.cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
- .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
- .send_tx_power = iwlagn_send_tx_power,
- .update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl6000_hw_channel_switch,
- .apm_ops = {
- .init = iwl_apm_init,
- .config = iwl6000_nic_config,
- },
+ .nic_config = iwl6000_nic_config,
.eeprom_ops = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
@@ -329,48 +292,9 @@ static struct iwl_lib_ops iwl6030_lib = {
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
- .query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
- .temp_ops = {
- .temperature = iwlagn_temperature,
- },
- .txfifo_flush = iwlagn_txfifo_flush,
- .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
-};
-
-static struct iwl_nic_ops iwl6050_nic_ops = {
- .additional_nic_config = &iwl6050_additional_nic_config,
-};
-
-static struct iwl_nic_ops iwl6150_nic_ops = {
- .additional_nic_config = &iwl6150_additional_nic_config,
-};
-
-static const struct iwl_ops iwl6000_ops = {
- .lib = &iwl6000_lib,
- .hcmd = &iwlagn_hcmd,
- .utils = &iwlagn_hcmd_utils,
-};
-
-static const struct iwl_ops iwl6050_ops = {
- .lib = &iwl6000_lib,
- .hcmd = &iwlagn_hcmd,
- .utils = &iwlagn_hcmd_utils,
- .nic = &iwl6050_nic_ops,
-};
-
-static const struct iwl_ops iwl6150_ops = {
- .lib = &iwl6000_lib,
- .hcmd = &iwlagn_hcmd,
- .utils = &iwlagn_hcmd_utils,
- .nic = &iwl6150_nic_ops,
-};
-
-static const struct iwl_ops iwl6030_ops = {
- .lib = &iwl6030_lib,
- .hcmd = &iwlagn_bt_hcmd,
- .utils = &iwlagn_hcmd_utils,
+ .temperature = iwlagn_temperature,
};
static struct iwl_base_params iwl6000_base_params = {
@@ -446,7 +370,7 @@ static struct iwl_bt_params iwl6000_bt_params = {
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
- .ops = &iwl6000_ops, \
+ .lib = &iwl6000_lib, \
.base_params = &iwl6000_g2_base_params, \
.need_dc_calib = true, \
.need_temp_offset_calib = true, \
@@ -474,7 +398,7 @@ struct iwl_cfg iwl6005_2bg_cfg = {
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
- .ops = &iwl6030_ops, \
+ .lib = &iwl6030_lib, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
.need_dc_calib = true, \
@@ -555,7 +479,7 @@ struct iwl_cfg iwl130_bg_cfg = {
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
.eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
- .ops = &iwl6000_ops, \
+ .lib = &iwl6000_lib, \
.base_params = &iwl6000_base_params, \
.pa_type = IWL_PA_INTERNAL, \
.led_mode = IWL_LED_BLINK
@@ -582,7 +506,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.ucode_api_min = IWL6050_UCODE_API_MIN, \
.valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
.valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
- .ops = &iwl6050_ops, \
+ .lib = &iwl6000_lib, \
+ .additional_nic_config = iwl6050_additional_nic_config, \
.eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
@@ -605,7 +530,8 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.fw_name_pre = IWL6050_FW_PRE, \
.ucode_api_max = IWL6050_UCODE_API_MAX, \
.ucode_api_min = IWL6050_UCODE_API_MIN, \
- .ops = &iwl6150_ops, \
+ .lib = &iwl6000_lib, \
+ .additional_nic_config = iwl6150_additional_nic_config, \
.eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
@@ -631,7 +557,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.ucode_api_min = IWL6000_UCODE_API_MIN,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
- .ops = &iwl6000_ops,
+ .lib = &iwl6000_lib,
.base_params = &iwl6000_base_params,
.ht_params = &iwl6000_ht_params,
.need_dc_calib = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index c9255def108..72d6297602b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -66,6 +66,8 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-agn-calib.h"
+#include "iwl-trans.h"
+#include "iwl-agn.h"
/*****************************************************************************
* INIT calibrations framework
@@ -87,6 +89,7 @@ int iwl_send_calib_results(struct iwl_priv *priv)
struct iwl_host_cmd hcmd = {
.id = REPLY_PHY_CALIBRATION_CMD,
+ .flags = CMD_SYNC,
};
for (i = 0; i < IWL_CALIB_MAX; i++) {
@@ -95,7 +98,7 @@ int iwl_send_calib_results(struct iwl_priv *priv)
hcmd.len[0] = priv->calib_results[i].buf_len;
hcmd.data[0] = priv->calib_results[i].buf;
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- ret = iwl_send_cmd_sync(priv, &hcmd);
+ ret = trans_send_cmd(&priv->trans, &hcmd);
if (ret) {
IWL_ERR(priv, "Error %d iteration %d\n",
ret, i);
@@ -481,7 +484,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
sizeof(u16)*HD_TABLE_SIZE);
- return iwl_send_cmd(priv, &cmd_out);
+ return trans_send_cmd(&priv->trans, &cmd_out);
}
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
@@ -545,7 +548,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
&(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
- return iwl_send_cmd(priv, &cmd_out);
+ return trans_send_cmd(&priv->trans, &cmd_out);
}
void iwl_init_sensitivity(struct iwl_priv *priv)
@@ -837,6 +840,65 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
active_chains);
}
+static void iwlagn_gain_computation(struct iwl_priv *priv,
+ u32 average_noise[NUM_RX_CHAINS],
+ u16 min_average_noise_antenna_i,
+ u32 min_average_noise,
+ u8 default_chain)
+{
+ int i;
+ s32 delta_g;
+ struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+
+ /*
+ * Find Gain Code for the chains based on "default chain"
+ */
+ for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
+ if ((data->disconn_array[i])) {
+ data->delta_gain_code[i] = 0;
+ continue;
+ }
+
+ delta_g = (priv->cfg->base_params->chain_noise_scale *
+ ((s32)average_noise[default_chain] -
+ (s32)average_noise[i])) / 1500;
+
+ /* bound gain by 2 bits value max, 3rd bit is sign */
+ data->delta_gain_code[i] =
+ min(abs(delta_g),
+ (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+
+ if (delta_g < 0)
+ /*
+ * set negative sign ...
+ * note to Intel developers: This is uCode API format,
+ * not the format of any internal device registers.
+ * Do not change this format for e.g. 6050 or similar
+ * devices. Change format only if more resolution
+ * (i.e. more than 2 bits magnitude) is needed.
+ */
+ data->delta_gain_code[i] |= (1 << 2);
+ }
+
+ IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
+ data->delta_gain_code[1], data->delta_gain_code[2]);
+
+ if (!data->radio_write) {
+ struct iwl_calib_chain_noise_gain_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwl_set_calib_hdr(&cmd.hdr,
+ priv->phy_calib_chain_noise_gain_cmd);
+ cmd.delta_gain_1 = data->delta_gain_code[1];
+ cmd.delta_gain_2 = data->delta_gain_code[2];
+ trans_send_cmd_pdu(&priv->trans, REPLY_PHY_CALIBRATION_CMD,
+ CMD_ASYNC, sizeof(cmd), &cmd);
+
+ data->radio_write = 1;
+ data->state = IWL_CHAIN_NOISE_CALIBRATED;
+ }
+}
/*
* Accumulate 16 beacons of signal and noise statistics for each of
@@ -991,16 +1053,14 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
min_average_noise, min_average_noise_antenna_i);
- if (priv->cfg->ops->utils->gain_computation)
- priv->cfg->ops->utils->gain_computation(priv, average_noise,
+ iwlagn_gain_computation(priv, average_noise,
min_average_noise_antenna_i, min_average_noise,
find_first_chain(priv->cfg->valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
*/
- if (priv->cfg->ops->lib->update_chain_flags)
- priv->cfg->ops->lib->update_chain_flags(priv);
+ iwl_update_chain_flags(priv);
data->state = IWL_CHAIN_NOISE_DONE;
iwl_power_update_mode(priv, false);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index 4ef4dd93425..a869fc9205d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -71,13 +71,6 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv);
void iwl_init_sensitivity(struct iwl_priv *priv);
void iwl_reset_run_time_calib(struct iwl_priv *priv);
-static inline void iwl_chain_noise_reset(struct iwl_priv *priv)
-{
-
- if (!priv->disable_chain_noise_cal &&
- priv->cfg->ops->utils->chain_noise_reset)
- priv->cfg->ops->utils->chain_noise_reset(priv);
-}
int iwl_send_calib_results(struct iwl_priv *priv);
int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
index 2ef9448b1c2..b8347db850e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -108,18 +108,16 @@ err:
int iwl_eeprom_check_sku(struct iwl_priv *priv)
{
- u16 eeprom_sku;
u16 radio_cfg;
- eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
-
if (!priv->cfg->sku) {
/* not using sku overwrite */
- priv->cfg->sku =
- ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
- EEPROM_SKU_CAP_BAND_POS);
- if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
- priv->cfg->sku |= IWL_SKU_N;
+ priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
+ if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
+ !priv->cfg->ht_params) {
+ IWL_ERR(priv, "Invalid 11n configuration\n");
+ return -EINVAL;
+ }
}
if (!priv->cfg->sku) {
IWL_ERR(priv, "Invalid device sku\n");
@@ -152,7 +150,7 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
{
- const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv,
+ const u8 *addr = iwl_eeprom_query_addr(priv,
EEPROM_MAC_ADDRESS);
memcpy(mac, addr, ETH_ALEN);
}
@@ -247,10 +245,10 @@ void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
/* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
+ txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
- txp_array = (void *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
+ txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
for (idx = 0; idx < entries; idx++) {
txp = &txp_array[idx];
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
deleted file mode 100644
index 23fa93deae9..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-agn.h"
-
-int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
-{
- struct iwl_tx_ant_config_cmd tx_ant_cmd = {
- .valid = cpu_to_le32(valid_tx_ant),
- };
-
- if (IWL_UCODE_API(priv->ucode_ver) > 1) {
- IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
- return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
- sizeof(struct iwl_tx_ant_config_cmd),
- &tx_ant_cmd);
- } else {
- IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
- return -EOPNOTSUPP;
- }
-}
-
-static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
- u16 size = (u16)sizeof(struct iwl_addsta_cmd);
- struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
- memcpy(addsta, cmd, size);
- /* resrved in 5000 */
- addsta->rate_n_flags = cpu_to_le16(0);
- return size;
-}
-
-static void iwlagn_gain_computation(struct iwl_priv *priv,
- u32 average_noise[NUM_RX_CHAINS],
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
-{
- int i;
- s32 delta_g;
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
-
- /*
- * Find Gain Code for the chains based on "default chain"
- */
- for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
- if ((data->disconn_array[i])) {
- data->delta_gain_code[i] = 0;
- continue;
- }
-
- delta_g = (priv->cfg->base_params->chain_noise_scale *
- ((s32)average_noise[default_chain] -
- (s32)average_noise[i])) / 1500;
-
- /* bound gain by 2 bits value max, 3rd bit is sign */
- data->delta_gain_code[i] =
- min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
-
- if (delta_g < 0)
- /*
- * set negative sign ...
- * note to Intel developers: This is uCode API format,
- * not the format of any internal device registers.
- * Do not change this format for e.g. 6050 or similar
- * devices. Change format only if more resolution
- * (i.e. more than 2 bits magnitude) is needed.
- */
- data->delta_gain_code[i] |= (1 << 2);
- }
-
- IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
- data->delta_gain_code[1], data->delta_gain_code[2]);
-
- if (!data->radio_write) {
- struct iwl_calib_chain_noise_gain_cmd cmd;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.hdr.op_code = priv->_agn.phy_calib_chain_noise_gain_cmd;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- cmd.delta_gain_1 = data->delta_gain_code[1];
- cmd.delta_gain_2 = data->delta_gain_code[2];
- iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd, NULL);
-
- data->radio_write = 1;
- data->state = IWL_CHAIN_NOISE_CALIBRATED;
- }
-}
-
-static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
-{
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
- int ret;
-
- if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
- iwl_is_any_associated(priv)) {
- struct iwl_calib_chain_noise_reset_cmd cmd;
-
- /* clear data for chain noise calibration algorithm */
- data->chain_noise_a = 0;
- data->chain_noise_b = 0;
- data->chain_noise_c = 0;
- data->chain_signal_a = 0;
- data->chain_signal_b = 0;
- data->chain_signal_c = 0;
- data->beacon_count = 0;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.op_code = priv->_agn.phy_calib_chain_noise_reset_cmd;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(priv,
- "Could not send REPLY_PHY_CALIBRATION_CMD\n");
- data->state = IWL_CHAIN_NOISE_ACCUMULATE;
- IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
- }
-}
-
-static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags)
-{
- if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
- info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
- info->flags & IEEE80211_TX_CTL_AMPDU)
- *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
-}
-
-/* Calc max signal level (dBm) among 3 possible receivers */
-static int iwlagn_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host
- */
- struct iwlagn_non_cfg_phy *ncphy =
- (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
- u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
- u8 agc;
-
- val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]);
- agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info.
- */
- val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]);
- rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >>
- IWLAGN_OFDM_RSSI_A_BIT_POS;
- rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >>
- IWLAGN_OFDM_RSSI_B_BIT_POS;
- val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]);
- rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >>
- IWLAGN_OFDM_RSSI_C_BIT_POS;
-
- max_rssi = max_t(u32, rssi_a, rssi_b);
- max_rssi = max_t(u32, max_rssi, rssi_c);
-
- IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- rssi_a, rssi_b, rssi_c, max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWLAGN_RSSI_OFFSET;
-}
-
-static int iwlagn_set_pan_params(struct iwl_priv *priv)
-{
- struct iwl_wipan_params_cmd cmd;
- struct iwl_rxon_context *ctx_bss, *ctx_pan;
- int slot0 = 300, slot1 = 0;
- int ret;
-
- if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
- return 0;
-
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
-
- lockdep_assert_held(&priv->mutex);
-
- ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
- ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
-
- /*
- * If the PAN context is inactive, then we don't need
- * to update the PAN parameters, the last thing we'll
- * have done before it goes inactive is making the PAN
- * parameters be WLAN-only.
- */
- if (!ctx_pan->is_active)
- return 0;
-
- memset(&cmd, 0, sizeof(cmd));
-
- /* only 2 slots are currently allowed */
- cmd.num_slots = 2;
-
- cmd.slots[0].type = 0; /* BSS */
- cmd.slots[1].type = 1; /* PAN */
-
- if (priv->_agn.hw_roc_channel) {
- /* both contexts must be used for this to happen */
- slot1 = priv->_agn.hw_roc_duration;
- slot0 = IWL_MIN_SLOT_TIME;
- } else if (ctx_bss->vif && ctx_pan->vif) {
- int bcnint = ctx_pan->vif->bss_conf.beacon_int;
- int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
-
- /* should be set, but seems unused?? */
- cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
-
- if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
- bcnint &&
- bcnint != ctx_bss->vif->bss_conf.beacon_int) {
- IWL_ERR(priv,
- "beacon intervals don't match (%d, %d)\n",
- ctx_bss->vif->bss_conf.beacon_int,
- ctx_pan->vif->bss_conf.beacon_int);
- } else
- bcnint = max_t(int, bcnint,
- ctx_bss->vif->bss_conf.beacon_int);
- if (!bcnint)
- bcnint = DEFAULT_BEACON_INTERVAL;
- slot0 = bcnint / 2;
- slot1 = bcnint - slot0;
-
- if (test_bit(STATUS_SCAN_HW, &priv->status) ||
- (!ctx_bss->vif->bss_conf.idle &&
- !ctx_bss->vif->bss_conf.assoc)) {
- slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
- slot1 = IWL_MIN_SLOT_TIME;
- } else if (!ctx_pan->vif->bss_conf.idle &&
- !ctx_pan->vif->bss_conf.assoc) {
- slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
- slot0 = IWL_MIN_SLOT_TIME;
- }
- } else if (ctx_pan->vif) {
- slot0 = 0;
- slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
- ctx_pan->vif->bss_conf.beacon_int;
- slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
-
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
- slot1 = IWL_MIN_SLOT_TIME;
- }
- }
-
- cmd.slots[0].width = cpu_to_le16(slot0);
- cmd.slots[1].width = cpu_to_le16(slot1);
-
- ret = iwl_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
-
- return ret;
-}
-
-struct iwl_hcmd_ops iwlagn_hcmd = {
- .commit_rxon = iwlagn_commit_rxon,
- .set_rxon_chain = iwlagn_set_rxon_chain,
- .set_tx_ant = iwlagn_send_tx_ant_config,
- .send_bt_config = iwl_send_bt_config,
- .set_pan_params = iwlagn_set_pan_params,
-};
-
-struct iwl_hcmd_ops iwlagn_bt_hcmd = {
- .commit_rxon = iwlagn_commit_rxon,
- .set_rxon_chain = iwlagn_set_rxon_chain,
- .set_tx_ant = iwlagn_send_tx_ant_config,
- .send_bt_config = iwlagn_send_advance_bt_config,
- .set_pan_params = iwlagn_set_pan_params,
-};
-
-struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
- .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
- .gain_computation = iwlagn_gain_computation,
- .chain_noise_reset = iwlagn_chain_noise_reset,
- .tx_cmd_protection = iwlagn_tx_cmd_protection,
- .calc_rssi = iwlagn_calc_rssi,
- .request_scan = iwlagn_request_scan,
- .post_scan = iwlagn_post_scan,
-};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 7bd19f4e66d..0e5b842529c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -81,13 +81,6 @@
/* RSSI to dBm */
#define IWLAGN_RSSI_OFFSET 44
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT 0x041
-
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
#define IWLAGN_DEFAULT_TX_RETRY 15
/* Limit range of txpower output target to be between these values */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
deleted file mode 100644
index 0d5fda44c3a..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/gfp.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-agn.h"
-#include "iwl-helpers.h"
-
-#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
-
-/* Free dram table */
-void iwl_free_isr_ict(struct iwl_priv *priv)
-{
- if (priv->_agn.ict_tbl_vir) {
- dma_free_coherent(&priv->pci_dev->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- priv->_agn.ict_tbl_vir,
- priv->_agn.ict_tbl_dma);
- priv->_agn.ict_tbl_vir = NULL;
- }
-}
-
-
-/* allocate dram shared table it is a PAGE_SIZE aligned
- * also reset all data related to ICT table interrupt.
- */
-int iwl_alloc_isr_ict(struct iwl_priv *priv)
-{
-
- /* allocate shrared data table */
- priv->_agn.ict_tbl_vir =
- dma_alloc_coherent(&priv->pci_dev->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- &priv->_agn.ict_tbl_dma, GFP_KERNEL);
- if (!priv->_agn.ict_tbl_vir)
- return -ENOMEM;
-
- /* align table to PAGE_SIZE boundary */
- priv->_agn.aligned_ict_tbl_dma = ALIGN(priv->_agn.ict_tbl_dma, PAGE_SIZE);
-
- IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
- (unsigned long long)priv->_agn.ict_tbl_dma,
- (unsigned long long)priv->_agn.aligned_ict_tbl_dma,
- (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
-
- priv->_agn.ict_tbl = priv->_agn.ict_tbl_vir +
- (priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma);
-
- IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
- priv->_agn.ict_tbl, priv->_agn.ict_tbl_vir,
- (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
-
- /* reset table and index to all 0 */
- memset(priv->_agn.ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
- priv->_agn.ict_index = 0;
-
- /* add periodic RX interrupt */
- priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
- return 0;
-}
-
-/* Device is going up inform it about using ICT interrupt table,
- * also we need to tell the driver to start using ICT interrupt.
- */
-int iwl_reset_ict(struct iwl_priv *priv)
-{
- u32 val;
- unsigned long flags;
-
- if (!priv->_agn.ict_tbl_vir)
- return 0;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
-
- memset(&priv->_agn.ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
-
- val = priv->_agn.aligned_ict_tbl_dma >> PAGE_SHIFT;
-
- val |= CSR_DRAM_INT_TBL_ENABLE;
- val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
-
- IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
- "aligned dma address %Lx\n",
- val, (unsigned long long)priv->_agn.aligned_ict_tbl_dma);
-
- iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
- priv->_agn.use_ict = true;
- priv->_agn.ict_index = 0;
- iwl_write32(priv, CSR_INT, priv->inta_mask);
- iwl_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-/* Device is going down disable ict interrupt usage */
-void iwl_disable_ict(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->_agn.use_ict = false;
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static irqreturn_t iwl_isr(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- unsigned long flags;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_fh;
-#endif
- if (!priv)
- return IRQ_NONE;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
- "fh 0x%08x\n", inta, inta_mask, inta_fh);
- }
-#endif
-
- priv->_agn.inta |= inta;
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
- iwl_enable_interrupts(priv);
-
- unplugged:
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq and no schedules tasklet. */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
-}
-
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_isr_ict(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 val = 0;
- unsigned long flags;
-
- if (!priv)
- return IRQ_NONE;
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (!priv->_agn.use_ict)
- return iwl_isr(irq, data);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here.
- */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!priv->_agn.ict_tbl[priv->_agn.ict_index]) {
- IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- /* read all entries that not 0 start with ict_index */
- while (priv->_agn.ict_tbl[priv->_agn.ict_index]) {
-
- val |= le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]);
- IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
- priv->_agn.ict_index,
- le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]));
- priv->_agn.ict_tbl[priv->_agn.ict_index] = 0;
- priv->_agn.ict_index = iwl_queue_inc_wrap(priv->_agn.ict_index,
- ICT_COUNT);
-
- }
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
- inta, inta_mask, val);
-
- inta &= priv->inta_mask;
- priv->_agn.inta |= inta;
-
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta))
- tasklet_schedule(&priv->irq_tasklet);
- else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) {
- /* Allow interrupt if was disabled by this handler and
- * no tasklet was schedules, We should not enable interrupt,
- * tasklet will enable it.
- */
- iwl_enable_interrupts(priv);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service.
- * only Re-enable if disabled by irq.
- */
- if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
- iwl_enable_interrupts(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index f803fb62f8b..3bee0f119bc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -39,6 +39,7 @@
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
#include "iwl-sta.h"
+#include "iwl-trans.h"
static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
{
@@ -52,73 +53,73 @@ static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
switch (status) {
case TX_STATUS_POSTPONE_DELAY:
- priv->_agn.reply_tx_stats.pp_delay++;
+ priv->reply_tx_stats.pp_delay++;
break;
case TX_STATUS_POSTPONE_FEW_BYTES:
- priv->_agn.reply_tx_stats.pp_few_bytes++;
+ priv->reply_tx_stats.pp_few_bytes++;
break;
case TX_STATUS_POSTPONE_BT_PRIO:
- priv->_agn.reply_tx_stats.pp_bt_prio++;
+ priv->reply_tx_stats.pp_bt_prio++;
break;
case TX_STATUS_POSTPONE_QUIET_PERIOD:
- priv->_agn.reply_tx_stats.pp_quiet_period++;
+ priv->reply_tx_stats.pp_quiet_period++;
break;
case TX_STATUS_POSTPONE_CALC_TTAK:
- priv->_agn.reply_tx_stats.pp_calc_ttak++;
+ priv->reply_tx_stats.pp_calc_ttak++;
break;
case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
- priv->_agn.reply_tx_stats.int_crossed_retry++;
+ priv->reply_tx_stats.int_crossed_retry++;
break;
case TX_STATUS_FAIL_SHORT_LIMIT:
- priv->_agn.reply_tx_stats.short_limit++;
+ priv->reply_tx_stats.short_limit++;
break;
case TX_STATUS_FAIL_LONG_LIMIT:
- priv->_agn.reply_tx_stats.long_limit++;
+ priv->reply_tx_stats.long_limit++;
break;
case TX_STATUS_FAIL_FIFO_UNDERRUN:
- priv->_agn.reply_tx_stats.fifo_underrun++;
+ priv->reply_tx_stats.fifo_underrun++;
break;
case TX_STATUS_FAIL_DRAIN_FLOW:
- priv->_agn.reply_tx_stats.drain_flow++;
+ priv->reply_tx_stats.drain_flow++;
break;
case TX_STATUS_FAIL_RFKILL_FLUSH:
- priv->_agn.reply_tx_stats.rfkill_flush++;
+ priv->reply_tx_stats.rfkill_flush++;
break;
case TX_STATUS_FAIL_LIFE_EXPIRE:
- priv->_agn.reply_tx_stats.life_expire++;
+ priv->reply_tx_stats.life_expire++;
break;
case TX_STATUS_FAIL_DEST_PS:
- priv->_agn.reply_tx_stats.dest_ps++;
+ priv->reply_tx_stats.dest_ps++;
break;
case TX_STATUS_FAIL_HOST_ABORTED:
- priv->_agn.reply_tx_stats.host_abort++;
+ priv->reply_tx_stats.host_abort++;
break;
case TX_STATUS_FAIL_BT_RETRY:
- priv->_agn.reply_tx_stats.bt_retry++;
+ priv->reply_tx_stats.bt_retry++;
break;
case TX_STATUS_FAIL_STA_INVALID:
- priv->_agn.reply_tx_stats.sta_invalid++;
+ priv->reply_tx_stats.sta_invalid++;
break;
case TX_STATUS_FAIL_FRAG_DROPPED:
- priv->_agn.reply_tx_stats.frag_drop++;
+ priv->reply_tx_stats.frag_drop++;
break;
case TX_STATUS_FAIL_TID_DISABLE:
- priv->_agn.reply_tx_stats.tid_disable++;
+ priv->reply_tx_stats.tid_disable++;
break;
case TX_STATUS_FAIL_FIFO_FLUSHED:
- priv->_agn.reply_tx_stats.fifo_flush++;
+ priv->reply_tx_stats.fifo_flush++;
break;
case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
- priv->_agn.reply_tx_stats.insuff_cf_poll++;
+ priv->reply_tx_stats.insuff_cf_poll++;
break;
case TX_STATUS_FAIL_PASSIVE_NO_RX:
- priv->_agn.reply_tx_stats.fail_hw_drop++;
+ priv->reply_tx_stats.fail_hw_drop++;
break;
case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
- priv->_agn.reply_tx_stats.sta_color_mismatch++;
+ priv->reply_tx_stats.sta_color_mismatch++;
break;
default:
- priv->_agn.reply_tx_stats.unknown++;
+ priv->reply_tx_stats.unknown++;
break;
}
}
@@ -129,43 +130,43 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
switch (status) {
case AGG_TX_STATE_UNDERRUN_MSK:
- priv->_agn.reply_agg_tx_stats.underrun++;
+ priv->reply_agg_tx_stats.underrun++;
break;
case AGG_TX_STATE_BT_PRIO_MSK:
- priv->_agn.reply_agg_tx_stats.bt_prio++;
+ priv->reply_agg_tx_stats.bt_prio++;
break;
case AGG_TX_STATE_FEW_BYTES_MSK:
- priv->_agn.reply_agg_tx_stats.few_bytes++;
+ priv->reply_agg_tx_stats.few_bytes++;
break;
case AGG_TX_STATE_ABORT_MSK:
- priv->_agn.reply_agg_tx_stats.abort++;
+ priv->reply_agg_tx_stats.abort++;
break;
case AGG_TX_STATE_LAST_SENT_TTL_MSK:
- priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
+ priv->reply_agg_tx_stats.last_sent_ttl++;
break;
case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
- priv->_agn.reply_agg_tx_stats.last_sent_try++;
+ priv->reply_agg_tx_stats.last_sent_try++;
break;
case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
- priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
+ priv->reply_agg_tx_stats.last_sent_bt_kill++;
break;
case AGG_TX_STATE_SCD_QUERY_MSK:
- priv->_agn.reply_agg_tx_stats.scd_query++;
+ priv->reply_agg_tx_stats.scd_query++;
break;
case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
- priv->_agn.reply_agg_tx_stats.bad_crc32++;
+ priv->reply_agg_tx_stats.bad_crc32++;
break;
case AGG_TX_STATE_RESPONSE_MSK:
- priv->_agn.reply_agg_tx_stats.response++;
+ priv->reply_agg_tx_stats.response++;
break;
case AGG_TX_STATE_DUMP_TX_MSK:
- priv->_agn.reply_agg_tx_stats.dump_tx++;
+ priv->reply_agg_tx_stats.dump_tx++;
break;
case AGG_TX_STATE_DELAY_TX_MSK:
- priv->_agn.reply_agg_tx_stats.delay_tx++;
+ priv->reply_agg_tx_stats.delay_tx++;
break;
default:
- priv->_agn.reply_agg_tx_stats.unknown++;
+ priv->reply_agg_tx_stats.unknown++;
break;
}
}
@@ -390,8 +391,7 @@ void iwl_check_abort_status(struct iwl_priv *priv,
}
}
-static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -400,6 +400,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct ieee80211_tx_info *info;
struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ struct ieee80211_hdr *hdr;
struct iwl_tx_info *txb;
u32 status = le16_to_cpu(tx_resp->status.status);
int tid;
@@ -408,9 +409,9 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
unsigned long flags;
if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
+ IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
+ "index %d is out of range [0-%d] %d %d\n", __func__,
+ txq_id, index, txq->q.n_bd, txq->q.write_ptr,
txq->q.read_ptr);
return;
}
@@ -426,6 +427,11 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
IWLAGN_TX_RES_RA_POS;
spin_lock_irqsave(&priv->sta_lock, flags);
+
+ hdr = (void *)txb->skb->data;
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ priv->last_seq_ctl = tx_resp->seq_ctl;
+
if (txq->sched_retry) {
const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
struct iwl_ht_agg *agg;
@@ -438,7 +444,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
priv->cfg->bt_params &&
priv->cfg->bt_params->advanced_bt_coexist) {
- IWL_WARN(priv, "receive reply tx with bt_kill\n");
+ IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n");
}
iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
@@ -478,27 +484,6 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
-void iwlagn_rx_handler_setup(struct iwl_priv *priv)
-{
- /* init calibration handlers */
- priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
- iwlagn_rx_calib_result;
- priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
-
- /* set up notification wait support */
- spin_lock_init(&priv->_agn.notif_wait_lock);
- INIT_LIST_HEAD(&priv->_agn.notif_waits);
- init_waitqueue_head(&priv->_agn.notif_waitq);
-}
-
-void iwlagn_setup_deferred_work(struct iwl_priv *priv)
-{
- /*
- * nothing need to be done here anymore
- * still keep for future use if needed
- */
-}
-
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
{
return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
@@ -540,8 +525,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
- return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
- &tx_power_cmd);
+ return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC,
+ sizeof(tx_power_cmd), &tx_power_cmd);
}
void iwlagn_temperature(struct iwl_priv *priv)
@@ -610,8 +595,7 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
return (address & ADDRESS_MSK) + (offset << 1);
}
-const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset)
+const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
{
u32 address = eeprom_indirect_address(priv, offset);
BUG_ON(address >= priv->cfg->base_params->eeprom_size);
@@ -622,367 +606,12 @@ struct iwl_mod_params iwlagn_mod_params = {
.amsdu_size_8K = 1,
.restart_fw = 1,
.plcp_check = true,
+ .bt_coex_active = true,
+ .no_sleep_autoadjust = true,
+ .power_level = IWL_POWER_INDEX_1,
/* the rest are 0 by default */
};
-void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
-
- rb_timeout = RX_RB_TIMEOUT;
-
- if (iwlagn_mod_params.amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
- * the credit mechanism in 5000 HW RX FIFO
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- return 0;
-}
-
-static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
-
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-}
-
-int iwlagn_hw_nic_init(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
- int ret;
-
- /* nic_init */
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwlagn_set_pwr_vmain(priv);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- ret = iwl_rx_queue_alloc(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwlagn_rx_queue_reset(priv, rxq);
-
- iwlagn_rx_replenish(priv);
-
- iwlagn_rx_init(priv, rxq);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(priv, rxq);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Allocate or reset and init all Tx and Command queues */
- if (!priv->txq) {
- ret = iwlagn_txq_ctx_alloc(priv);
- if (ret)
- return ret;
- } else
- iwlagn_txq_ctx_reset(priv);
-
- if (priv->cfg->base_params->shadow_reg_enable) {
- /* enable shadow regs in HW */
- iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
- 0x800FFFFF);
- }
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-/**
- * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-void iwlagn_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
-
- spin_lock_irqsave(&rxq->lock, flags);
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* The overwritten rxb must be a used one */
- rxb = rxq->queue[rxq->write];
- BUG_ON(rxb && rxb->page);
-
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
- rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
- "order: %d\n",
- priv->hw_params.rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- BUG_ON(rxb->page);
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwlagn_rx_replenish(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- iwlagn_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwlagn_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-void iwlagn_rx_replenish_now(struct iwl_priv *priv)
-{
- iwlagn_rx_allocate(priv, GFP_ATOMIC);
-
- iwlagn_rx_queue_restock(priv);
-}
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-
-int iwlagn_rxq_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
- return 0;
-}
-
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
{
int idx = 0;
@@ -1126,7 +755,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
{
- struct sk_buff *skb = priv->_agn.offchan_tx_skb;
+ struct sk_buff *skb = priv->offchan_tx_skb;
if (skb->len < maxlen)
maxlen = skb->len;
@@ -1141,6 +770,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_CMD,
.len = { sizeof(struct iwl_scan_cmd), },
+ .flags = CMD_SYNC,
};
struct iwl_scan_cmd *scan;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
@@ -1211,7 +841,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
} else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
scan->suspend_time = 0;
scan->max_out_time =
- cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout);
+ cpu_to_le32(1024 * priv->offchan_tx_timeout);
}
switch (priv->scan_type) {
@@ -1399,9 +1029,9 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan_ch = (void *)&scan->data[cmd_len];
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
scan_ch->channel =
- cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value);
+ cpu_to_le16(priv->offchan_tx_chan->hw_value);
scan_ch->active_dwell =
- cpu_to_le16(priv->_agn.offchan_tx_timeout);
+ cpu_to_le16(priv->offchan_tx_timeout);
scan_ch->passive_dwell = 0;
/* Set txpower levels to defaults */
@@ -1411,7 +1041,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
* power level:
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
*/
- if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
+ if (priv->offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -1433,17 +1063,14 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
/* set scan bit here for PAN params */
set_bit(STATUS_SCAN_HW, &priv->status);
- if (priv->cfg->ops->hcmd->set_pan_params) {
- ret = priv->cfg->ops->hcmd->set_pan_params(priv);
- if (ret)
- return ret;
- }
+ ret = iwlagn_set_pan_params(priv);
+ if (ret)
+ return ret;
- ret = iwl_send_cmd_sync(priv, &cmd);
+ ret = trans_send_cmd(&priv->trans, &cmd);
if (ret) {
clear_bit(STATUS_SCAN_HW, &priv->status);
- if (priv->cfg->ops->hcmd->set_pan_params)
- priv->cfg->ops->hcmd->set_pan_params(priv);
+ iwlagn_set_pan_params(priv);
}
return ret;
@@ -1528,23 +1155,32 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
might_sleep();
memset(&flush_cmd, 0, sizeof(flush_cmd));
- flush_cmd.fifo_control = IWL_TX_FIFO_VO_MSK | IWL_TX_FIFO_VI_MSK |
- IWL_TX_FIFO_BE_MSK | IWL_TX_FIFO_BK_MSK;
- if (priv->cfg->sku & IWL_SKU_N)
+ if (flush_control & BIT(IWL_RXON_CTX_BSS))
+ flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
+ IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
+ IWL_SCD_MGMT_MSK;
+ if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
+ (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
+ flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
+ IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
+ IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
+ IWL_PAN_SCD_MULTICAST_MSK;
+
+ if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
flush_cmd.fifo_control);
flush_cmd.flush_control = cpu_to_le16(flush_control);
- return iwl_send_cmd(priv, &cmd);
+ return trans_send_cmd(&priv->trans, &cmd);
}
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
{
mutex_lock(&priv->mutex);
ieee80211_stop_queues(priv->hw);
- if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) {
+ if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
}
@@ -1699,18 +1335,21 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
* (might be in monitor mode), or the interface is in
* IBSS mode (no proper uCode support for coex then).
*/
- if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
+ if (!iwlagn_mod_params.bt_coex_active ||
+ priv->iw_mode == NL80211_IFTYPE_ADHOC) {
basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
} else {
basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_sco_disable)
+
+ if (!priv->bt_enable_pspoll)
basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
+ else
+ basic.flags &= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
if (priv->bt_ch_announce)
basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
- IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags);
+ IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags);
}
priv->bt_enable_flag = basic.flags;
if (priv->bt_full_concurrent)
@@ -1720,7 +1359,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
sizeof(iwlagn_def_3w_lookup));
- IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
+ IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n",
basic.flags ? "active" : "disabled",
priv->bt_full_concurrent ?
"full concurrency" : "3-wire");
@@ -1728,19 +1367,97 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
if (priv->cfg->bt_params->bt_session_2) {
memcpy(&bt_cmd_2000.basic, &basic,
sizeof(basic));
- ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(bt_cmd_2000), &bt_cmd_2000);
+ ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
} else {
memcpy(&bt_cmd_6000.basic, &basic,
sizeof(basic));
- ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(bt_cmd_6000), &bt_cmd_6000);
+ ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
}
if (ret)
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
+void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
+{
+ struct iwl_rxon_context *ctx, *found_ctx = NULL;
+ bool found_ap = false;
+
+ lockdep_assert_held(&priv->mutex);
+
+ /* Check whether AP or GO mode is active. */
+ if (rssi_ena) {
+ for_each_context(priv, ctx) {
+ if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_AP &&
+ iwl_is_associated_ctx(ctx)) {
+ found_ap = true;
+ break;
+ }
+ }
+ }
+
+ /*
+ * If disable was received or If GO/AP mode, disable RSSI
+ * measurements.
+ */
+ if (!rssi_ena || found_ap) {
+ if (priv->cur_rssi_ctx) {
+ ctx = priv->cur_rssi_ctx;
+ ieee80211_disable_rssi_reports(ctx->vif);
+ priv->cur_rssi_ctx = NULL;
+ }
+ return;
+ }
+
+ /*
+ * If rssi measurements need to be enabled, consider all cases now.
+ * Figure out how many contexts are active.
+ */
+ for_each_context(priv, ctx) {
+ if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
+ iwl_is_associated_ctx(ctx)) {
+ found_ctx = ctx;
+ break;
+ }
+ }
+
+ /*
+ * rssi monitor already enabled for the correct interface...nothing
+ * to do.
+ */
+ if (found_ctx == priv->cur_rssi_ctx)
+ return;
+
+ /*
+ * Figure out if rssi monitor is currently enabled, and needs
+ * to be changed. If rssi monitor is already enabled, disable
+ * it first else just enable rssi measurements on the
+ * interface found above.
+ */
+ if (priv->cur_rssi_ctx) {
+ ctx = priv->cur_rssi_ctx;
+ if (ctx->vif)
+ ieee80211_disable_rssi_reports(ctx->vif);
+ }
+
+ priv->cur_rssi_ctx = found_ctx;
+
+ if (!found_ctx)
+ return;
+
+ ieee80211_enable_rssi_reports(found_ctx->vif,
+ IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD,
+ IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD);
+}
+
+static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg)
+{
+ return BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3 >>
+ BT_UART_MSG_FRAME3SCOESCO_POS;
+}
+
static void iwlagn_bt_traffic_change_work(struct work_struct *work)
{
struct iwl_priv *priv =
@@ -1758,7 +1475,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
* coex profile notifications. Ignore that since only bad consequence
* can be not matching debug print with actual state.
*/
- IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
+ IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n",
priv->bt_traffic_load);
switch (priv->bt_traffic_load) {
@@ -1793,23 +1510,43 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
if (test_bit(STATUS_SCAN_HW, &priv->status))
goto out;
- if (priv->cfg->ops->lib->update_chain_flags)
- priv->cfg->ops->lib->update_chain_flags(priv);
+ iwl_update_chain_flags(priv);
if (smps_request != -1) {
+ priv->current_ht_config.smps = smps_request;
for_each_context(priv, ctx) {
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
ieee80211_request_smps(ctx->vif, smps_request);
}
}
+
+ /*
+ * Dynamic PS poll related functionality. Adjust RSSI measurements if
+ * necessary.
+ */
+ iwlagn_bt_coex_rssi_monitor(priv);
out:
mutex_unlock(&priv->mutex);
}
+/*
+ * If BT sco traffic, and RSSI monitor is enabled, move measurements to the
+ * correct interface or disable it if this is the last interface to be
+ * removed.
+ */
+void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv)
+{
+ if (priv->bt_is_sco &&
+ priv->bt_traffic_load == IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS)
+ iwlagn_bt_adjust_rssi_monitor(priv, true);
+ else
+ iwlagn_bt_adjust_rssi_monitor(priv, false);
+}
+
static void iwlagn_print_uartmsg(struct iwl_priv *priv,
struct iwl_bt_uart_msg *uart_msg)
{
- IWL_DEBUG_NOTIF(priv, "Message Type = 0x%X, SSN = 0x%X, "
+ IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
"Update Req = 0x%X",
(BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
BT_UART_MSG_FRAME1MSGTYPE_POS,
@@ -1818,7 +1555,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
(BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
BT_UART_MSG_FRAME1UPDATEREQ_POS);
- IWL_DEBUG_NOTIF(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
+ IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
"Chl_SeqN = 0x%X, In band = 0x%X",
(BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
@@ -1829,7 +1566,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
(BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
BT_UART_MSG_FRAME2INBAND_POS);
- IWL_DEBUG_NOTIF(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
+ IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
"ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
(BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
BT_UART_MSG_FRAME3SCOESCO_POS,
@@ -1844,11 +1581,11 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
(BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
BT_UART_MSG_FRAME3OBEX_POS);
- IWL_DEBUG_NOTIF(priv, "Idle duration = 0x%X",
+ IWL_DEBUG_COEX(priv, "Idle duration = 0x%X",
(BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
BT_UART_MSG_FRAME4IDLEDURATION_POS);
- IWL_DEBUG_NOTIF(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
+ IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
"eSCO Retransmissions = 0x%X",
(BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
BT_UART_MSG_FRAME5TXACTIVITY_POS,
@@ -1857,13 +1594,13 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
(BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
- IWL_DEBUG_NOTIF(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
+ IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
(BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
(BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
BT_UART_MSG_FRAME6DISCOVERABLE_POS);
- IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = "
+ IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
"0x%X, Inquiry = 0x%X, Connectable = 0x%X",
(BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
@@ -1913,14 +1650,16 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
return;
}
- IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
- IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
- IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load);
- IWL_DEBUG_NOTIF(priv, " CI compliance: %d\n",
+ IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
+ IWL_DEBUG_COEX(priv, " status: %d\n", coex->bt_status);
+ IWL_DEBUG_COEX(priv, " traffic load: %d\n", coex->bt_traffic_load);
+ IWL_DEBUG_COEX(priv, " CI compliance: %d\n",
coex->bt_ci_compliance);
iwlagn_print_uartmsg(priv, uart_msg);
priv->last_bt_traffic_load = priv->bt_traffic_load;
+ priv->bt_is_sco = iwlagn_bt_traffic_is_sco(uart_msg);
+
if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
if (priv->bt_status != coex->bt_status ||
priv->last_bt_traffic_load != coex->bt_traffic_load) {
@@ -1954,15 +1693,12 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
{
- iwlagn_rx_handler_setup(priv);
priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
iwlagn_bt_coex_profile_notif;
}
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
{
- iwlagn_setup_deferred_work(priv);
-
INIT_WORK(&priv->bt_traffic_change_work,
iwlagn_bt_traffic_change_work);
}
@@ -2274,9 +2010,9 @@ void iwlagn_init_notification_wait(struct iwl_priv *priv,
wait_entry->triggered = false;
wait_entry->aborted = false;
- spin_lock_bh(&priv->_agn.notif_wait_lock);
- list_add(&wait_entry->list, &priv->_agn.notif_waits);
- spin_unlock_bh(&priv->_agn.notif_wait_lock);
+ spin_lock_bh(&priv->notif_wait_lock);
+ list_add(&wait_entry->list, &priv->notif_waits);
+ spin_unlock_bh(&priv->notif_wait_lock);
}
int iwlagn_wait_notification(struct iwl_priv *priv,
@@ -2285,13 +2021,13 @@ int iwlagn_wait_notification(struct iwl_priv *priv,
{
int ret;
- ret = wait_event_timeout(priv->_agn.notif_waitq,
+ ret = wait_event_timeout(priv->notif_waitq,
wait_entry->triggered || wait_entry->aborted,
timeout);
- spin_lock_bh(&priv->_agn.notif_wait_lock);
+ spin_lock_bh(&priv->notif_wait_lock);
list_del(&wait_entry->list);
- spin_unlock_bh(&priv->_agn.notif_wait_lock);
+ spin_unlock_bh(&priv->notif_wait_lock);
if (wait_entry->aborted)
return -EIO;
@@ -2305,91 +2041,7 @@ int iwlagn_wait_notification(struct iwl_priv *priv,
void iwlagn_remove_notification(struct iwl_priv *priv,
struct iwl_notification_wait *wait_entry)
{
- spin_lock_bh(&priv->_agn.notif_wait_lock);
+ spin_lock_bh(&priv->notif_wait_lock);
list_del(&wait_entry->list);
- spin_unlock_bh(&priv->_agn.notif_wait_lock);
-}
-
-int iwlagn_start_device(struct iwl_priv *priv)
-{
- int ret;
-
- if (iwl_prepare_card_hw(priv)) {
- IWL_WARN(priv, "Exit HW not ready\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (iwl_is_rfkill(priv)) {
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
- iwl_enable_interrupts(priv);
- return -ERFKILL;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- ret = iwlagn_hw_nic_init(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to init nic\n");
- return ret;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- return 0;
-}
-
-void iwlagn_stop_device(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl_synchronize_irq(priv);
-
- /* device going down, Stop using ICT table */
- iwl_disable_ict(priv);
-
- /*
- * If a HW restart happens during firmware loading,
- * then the firmware loading might call this function
- * and later it might be called again due to the
- * restart. So don't process again if the device is
- * already dead.
- */
- if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
- iwlagn_txq_ctx_stop(priv);
- iwlagn_rxq_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
- }
-
- /* Make sure (redundant) we've released our request to stay awake */
- iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /* Stop the device, and put it in low power state */
- iwl_apm_stop(priv);
+ spin_unlock_bh(&priv->notif_wait_lock);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 592b0cfcf71..3789ff4bf53 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -336,6 +336,12 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
}
#ifdef CONFIG_MAC80211_DEBUGFS
+/**
+ * Program the device to use fixed rate for frame transmit
+ * This is for debugging/testing only
+ * once the device start use fixed rate, we need to reload the module
+ * to being back the normal operation.
+ */
static void rs_program_fix_rate(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta)
{
@@ -348,13 +354,17 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
- lq_sta->dbg_fixed_rate = priv->dbg_fixed_rate;
+#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+ /* testmode has higher priority to overwirte the fixed rate */
+ if (priv->tm_fixed_rate)
+ lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
+#endif
IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
- lq_sta->lq.sta_id, priv->dbg_fixed_rate);
+ lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
- if (priv->dbg_fixed_rate) {
- rs_fill_link_cmd(NULL, lq_sta, priv->dbg_fixed_rate);
+ if (lq_sta->dbg_fixed_rate) {
+ rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
false);
}
@@ -426,7 +436,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
ieee80211_stop_tx_ba_session(sta, tid);
}
} else {
- IWL_ERR(priv, "Aggregation not enabled for tid %d "
+ IWL_DEBUG_HT(priv, "Aggregation not enabled for tid %d "
"because load = %u\n", tid, load);
}
return ret;
@@ -1072,8 +1082,10 @@ done:
/* See if there's a better rate or modulation mode to try. */
if (sta && sta->supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
-#ifdef CONFIG_MAC80211_DEBUGFS
- if (priv->dbg_fixed_rate != lq_sta->dbg_fixed_rate)
+
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL)
+ if ((priv->tm_fixed_rate) &&
+ (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
rs_program_fix_rate(priv, lq_sta);
#endif
if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
@@ -2895,8 +2907,9 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
if (sband->band == IEEE80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
-
- priv->dbg_fixed_rate = 0;
+#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+ priv->tm_fixed_rate = 0;
+#endif
#ifdef CONFIG_MAC80211_DEBUGFS
lq_sta->dbg_fixed_rate = 0;
#endif
@@ -3095,7 +3108,6 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
} else {
lq_sta->dbg_fixed_rate = 0;
- priv->dbg_fixed_rate = 0;
IWL_ERR(priv,
"Invalid antenna selection 0x%X, Valid is 0x%X\n",
ant_sel_tx, valid_tx_ant);
@@ -3123,9 +3135,9 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
return -EFAULT;
if (sscanf(buf, "%x", &parsed_rate) == 1)
- priv->dbg_fixed_rate = lq_sta->dbg_fixed_rate = parsed_rate;
+ lq_sta->dbg_fixed_rate = parsed_rate;
else
- priv->dbg_fixed_rate = lq_sta->dbg_fixed_rate = 0;
+ lq_sta->dbg_fixed_rate = 0;
rs_program_fix_rate(priv, lq_sta);
@@ -3155,7 +3167,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
lq_sta->total_failed, lq_sta->total_success,
lq_sta->active_legacy_rate);
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
- priv->dbg_fixed_rate);
+ lq_sta->dbg_fixed_rate);
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
(priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
(priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 09f679d6046..d42ef1763a7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -30,6 +30,7 @@
#include "iwl-core.h"
#include "iwl-agn-calib.h"
#include "iwl-helpers.h"
+#include "iwl-trans.h"
static int iwlagn_disable_bss(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
@@ -39,7 +40,8 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
int ret;
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
+ ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
+ CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
@@ -64,7 +66,8 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
send->dev_type = RXON_DEV_TYPE_P2P;
- ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
+ ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
+ CMD_SYNC, sizeof(*send), send);
send->filter_flags = old_filter;
send->dev_type = old_dev_type;
@@ -81,6 +84,22 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
return ret;
}
+static int iwlagn_disconn_pan(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct iwl_rxon_cmd *send)
+{
+ __le32 old_filter = send->filter_flags;
+ int ret;
+
+ send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
+ sizeof(*send), send);
+
+ send->filter_flags = old_filter;
+
+ return ret;
+}
+
static void iwlagn_update_qos(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
@@ -102,7 +121,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
- ret = iwl_send_cmd_pdu(priv, ctx->qos_cmd,
+ ret = trans_send_cmd_pdu(&priv->trans, ctx->qos_cmd, CMD_SYNC,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
@@ -161,11 +180,8 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
ctx->staging.ofdm_ht_triple_stream_basic_rates;
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
- ret = iwl_send_cmd_pdu_async(priv, ctx->rxon_assoc_cmd,
- sizeof(rxon_assoc), &rxon_assoc, NULL);
- if (ret)
- return ret;
-
+ ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_assoc_cmd,
+ CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
return ret;
}
@@ -175,10 +191,21 @@ static int iwlagn_rxon_disconn(struct iwl_priv *priv,
int ret;
struct iwl_rxon_cmd *active = (void *)&ctx->active;
- if (ctx->ctxid == IWL_RXON_CTX_BSS)
+ if (ctx->ctxid == IWL_RXON_CTX_BSS) {
ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
- else
+ } else {
ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
+ if (ret)
+ return ret;
+ if (ctx->vif) {
+ ret = iwl_send_rxon_timing(priv, ctx);
+ if (ret) {
+ IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
+ return ret;
+ }
+ ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging);
+ }
+ }
if (ret)
return ret;
@@ -187,6 +214,8 @@ static int iwlagn_rxon_disconn(struct iwl_priv *priv,
* keys, so we have to restore those afterwards.
*/
iwl_clear_ucode_stations(priv, ctx);
+ /* update -- might need P2P now */
+ iwl_update_bcast_station(priv, ctx);
iwl_restore_stations(priv, ctx);
ret = iwl_restore_default_wep_keys(priv, ctx);
if (ret) {
@@ -205,10 +234,12 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
struct iwl_rxon_cmd *active = (void *)&ctx->active;
/* RXON timing must be before associated RXON */
- ret = iwl_send_rxon_timing(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
- return ret;
+ if (ctx->ctxid == IWL_RXON_CTX_BSS) {
+ ret = iwl_send_rxon_timing(priv, ctx);
+ if (ret) {
+ IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
+ return ret;
+ }
}
/* QoS info may be cleared by previous un-assoc RXON */
iwlagn_update_qos(priv, ctx);
@@ -235,7 +266,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
* Associated RXON doesn't clear the station table in uCode,
* so we don't need to restore stations etc. after this.
*/
- ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
+ ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
sizeof(struct iwl_rxon_cmd), &ctx->staging);
if (ret) {
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
@@ -263,9 +294,107 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
return ret;
}
+
+ if ((ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) &&
+ priv->cfg->ht_params->smps_mode)
+ ieee80211_request_smps(ctx->vif,
+ priv->cfg->ht_params->smps_mode);
+
return 0;
}
+int iwlagn_set_pan_params(struct iwl_priv *priv)
+{
+ struct iwl_wipan_params_cmd cmd;
+ struct iwl_rxon_context *ctx_bss, *ctx_pan;
+ int slot0 = 300, slot1 = 0;
+ int ret;
+
+ if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
+ return 0;
+
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+ lockdep_assert_held(&priv->mutex);
+
+ ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
+ ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
+
+ /*
+ * If the PAN context is inactive, then we don't need
+ * to update the PAN parameters, the last thing we'll
+ * have done before it goes inactive is making the PAN
+ * parameters be WLAN-only.
+ */
+ if (!ctx_pan->is_active)
+ return 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ /* only 2 slots are currently allowed */
+ cmd.num_slots = 2;
+
+ cmd.slots[0].type = 0; /* BSS */
+ cmd.slots[1].type = 1; /* PAN */
+
+ if (priv->hw_roc_channel) {
+ /* both contexts must be used for this to happen */
+ slot1 = priv->hw_roc_duration;
+ slot0 = IWL_MIN_SLOT_TIME;
+ } else if (ctx_bss->vif && ctx_pan->vif) {
+ int bcnint = ctx_pan->beacon_int;
+ int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
+
+ /* should be set, but seems unused?? */
+ cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
+
+ if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
+ bcnint &&
+ bcnint != ctx_bss->beacon_int) {
+ IWL_ERR(priv,
+ "beacon intervals don't match (%d, %d)\n",
+ ctx_bss->beacon_int, ctx_pan->beacon_int);
+ } else
+ bcnint = max_t(int, bcnint,
+ ctx_bss->beacon_int);
+ if (!bcnint)
+ bcnint = DEFAULT_BEACON_INTERVAL;
+ slot0 = bcnint / 2;
+ slot1 = bcnint - slot0;
+
+ if (test_bit(STATUS_SCAN_HW, &priv->status) ||
+ (!ctx_bss->vif->bss_conf.idle &&
+ !ctx_bss->vif->bss_conf.assoc)) {
+ slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
+ slot1 = IWL_MIN_SLOT_TIME;
+ } else if (!ctx_pan->vif->bss_conf.idle &&
+ !ctx_pan->vif->bss_conf.assoc) {
+ slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
+ slot0 = IWL_MIN_SLOT_TIME;
+ }
+ } else if (ctx_pan->vif) {
+ slot0 = 0;
+ slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
+ ctx_pan->beacon_int;
+ slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
+
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
+ slot1 = IWL_MIN_SLOT_TIME;
+ }
+ }
+
+ cmd.slots[0].width = cpu_to_le16(slot0);
+ cmd.slots[1].width = cpu_to_le16(slot1);
+
+ ret = trans_send_cmd_pdu(&priv->trans, REPLY_WIPAN_PARAMS, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
+
+ return ret;
+}
+
/**
* iwlagn_commit_rxon - commit staging_rxon to hardware
*
@@ -308,8 +437,8 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
/* always get timestamp with Rx frame */
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
- if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) {
- struct ieee80211_channel *chan = priv->_agn.hw_roc_channel;
+ if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->hw_roc_channel) {
+ struct ieee80211_channel *chan = priv->hw_roc_channel;
iwl_set_rxon_channel(priv, chan, ctx);
iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
@@ -375,13 +504,11 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* do it now if after settings changed.
*/
iwl_set_tx_power(priv, priv->tx_power_next, false);
- return 0;
- }
- if (priv->cfg->ops->hcmd->set_pan_params) {
- ret = priv->cfg->ops->hcmd->set_pan_params(priv);
- if (ret)
- return ret;
+ /* make sure we are in the right PS state */
+ iwl_power_update_mode(priv, true);
+
+ return 0;
}
iwl_set_rxon_hwcrypto(priv, ctx, !iwlagn_mod_params.sw_crypto);
@@ -405,6 +532,10 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (ret)
return ret;
+ ret = iwlagn_set_pan_params(priv);
+ if (ret)
+ return ret;
+
if (new_assoc)
return iwlagn_rxon_connect(priv, ctx);
@@ -446,9 +577,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
* set up the SM PS mode to OFF if an HT channel is
* configured.
*/
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- for_each_context(priv, ctx)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ for_each_context(priv, ctx)
+ iwlagn_set_rxon_chain(priv, ctx);
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -636,6 +766,38 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
ht_conf->single_chain_sufficient = !need_multiple;
}
+static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
+{
+ struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+ int ret;
+
+ if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
+ iwl_is_any_associated(priv)) {
+ struct iwl_calib_chain_noise_reset_cmd cmd;
+
+ /* clear data for chain noise calibration algorithm */
+ data->chain_noise_a = 0;
+ data->chain_noise_b = 0;
+ data->chain_noise_c = 0;
+ data->chain_signal_a = 0;
+ data->chain_signal_b = 0;
+ data->chain_signal_c = 0;
+ data->beacon_count = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ iwl_set_calib_hdr(&cmd.hdr,
+ priv->phy_calib_chain_noise_reset_cmd);
+ ret = trans_send_cmd_pdu(&priv->trans,
+ REPLY_PHY_CALIBRATION_CMD,
+ CMD_SYNC, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(priv,
+ "Could not send REPLY_PHY_CALIBRATION_CMD\n");
+ data->state = IWL_CHAIN_NOISE_ACCUMULATE;
+ IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
+ }
+}
+
void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -692,7 +854,12 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
iwl_wake_any_queue(priv, ctx);
}
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+ if (ctx->ctxid == IWL_RXON_CTX_BSS)
+ priv->have_rekey_data = false;
}
+
+ iwlagn_bt_coex_rssi_monitor(priv);
}
if (ctx->ht.enabled) {
@@ -704,8 +871,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
iwl_set_rxon_ht(priv, &priv->current_ht_config);
}
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ iwlagn_set_rxon_chain(priv, ctx);
if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
@@ -743,7 +909,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
iwl_power_update_mode(priv, false);
/* Enable RX differential gain and sensitivity calibrations */
- iwl_chain_noise_reset(priv);
+ if (!priv->disable_chain_noise_cal)
+ iwlagn_chain_noise_reset(priv);
priv->start_calib = 1;
}
@@ -770,6 +937,13 @@ void iwlagn_post_scan(struct iwl_priv *priv)
struct iwl_rxon_context *ctx;
/*
+ * We do not commit power settings while scan is pending,
+ * do it now if the settings changed.
+ */
+ iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
+ iwl_set_tx_power(priv, priv->tx_power_next, false);
+
+ /*
* Since setting the RXON may have been deferred while
* performing the scan, fire one off if needed
*/
@@ -777,6 +951,5 @@ void iwlagn_post_scan(struct iwl_priv *priv)
if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
iwlagn_commit_rxon(priv, ctx);
- if (priv->cfg->ops->hcmd->set_pan_params)
- priv->cfg->ops->hcmd->set_pan_params(priv);
+ iwlagn_set_pan_params(priv);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 0bd722cee5a..37e624095e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -33,9 +33,10 @@
#include "iwl-core.h"
#include "iwl-sta.h"
#include "iwl-agn.h"
+#include "iwl-trans.h"
static struct iwl_link_quality_cmd *
-iwl_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
+iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
{
int i, r;
struct iwl_link_quality_cmd *link_cmd;
@@ -47,10 +48,15 @@ iwl_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
return NULL;
}
+
+ lockdep_assert_held(&priv->mutex);
+
/* Set up the rate scaling to start at selected rate, fall back
* all the way down to 1M in IEEE order, and then spin on 1M */
if (priv->band == IEEE80211_BAND_5GHZ)
r = IWL_RATE_6M_INDEX;
+ else if (ctx && ctx->vif && ctx->vif->p2p)
+ r = IWL_RATE_6M_INDEX;
else
r = IWL_RATE_1M_INDEX;
@@ -115,7 +121,7 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
spin_unlock_irqrestore(&priv->sta_lock, flags);
/* Set up default rate scaling table in device's station table */
- link_cmd = iwl_sta_alloc_lq(priv, sta_id);
+ link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
if (!link_cmd) {
IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n",
addr);
@@ -133,6 +139,14 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
return 0;
}
+/*
+ * static WEP keys
+ *
+ * For each context, the device has a table of 4 static WEP keys
+ * (one for each key index) that is updated with the following
+ * commands.
+ */
+
static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
bool send_if_empty)
@@ -175,7 +189,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
cmd.len[0] = cmd_size;
if (not_empty || send_if_empty)
- return iwl_send_cmd(priv, &cmd);
+ return trans_send_cmd(&priv->trans, &cmd);
else
return 0;
}
@@ -226,9 +240,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
return -EINVAL;
}
- keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->hw_key_idx = HW_KEY_DEFAULT;
- priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+ keyconf->hw_key_idx = IWLAGN_HW_KEY_DEFAULT;
ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
@@ -241,166 +253,117 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
return ret;
}
-static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- struct iwl_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
-
- key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (keyconf->keylen == WEP_KEY_LEN_128)
- key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
- priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
-
- memcpy(priv->stations[sta_id].keyinfo.key,
- keyconf->key, keyconf->keylen);
-
- memcpy(&priv->stations[sta_id].sta.key.key[3],
- keyconf->key, keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
+/*
+ * dynamic (per-station) keys
+ *
+ * The dynamic keys are a little more complicated. The device has
+ * a key cache of up to STA_KEY_MAX_NUM/STA_KEY_MAX_NUM_PAN keys.
+ * These are linked to stations by a table that contains an index
+ * into the key table for each station/key index/{mcast,unicast},
+ * i.e. it's basically an array of pointers like this:
+ * key_offset_t key_mapping[NUM_STATIONS][4][2];
+ * (it really works differently, but you can think of it as such)
+ *
+ * The key uploading and linking happens in the same command, the
+ * add station command with STA_MODIFY_KEY_MASK.
+ */
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
+static u8 iwlagn_key_sta_id(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ u8 sta_id = IWL_INVALID_STATION;
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ if (sta)
+ sta_id = iwl_sta_id(sta);
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ /*
+ * The device expects GTKs for station interfaces to be
+ * installed as GTKs for the AP station. If we have no
+ * station ID, then use the ap_sta_id in that case.
+ */
+ if (!sta && vif && vif_priv->ctx) {
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ sta_id = vif_priv->ctx->ap_sta_id;
+ break;
+ default:
+ /*
+ * In all other cases, the key will be
+ * used either for TX only or is bound
+ * to a station already.
+ */
+ break;
+ }
+ }
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
+ return sta_id;
}
-static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
+static int iwlagn_send_sta_key(struct iwl_priv *priv,
+ struct ieee80211_key_conf *keyconf,
+ u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
+ u32 cmd_flags)
{
unsigned long flags;
- __le16 key_flags = 0;
+ __le16 key_flags;
struct iwl_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ int i;
spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
- keyconf->keylen);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
- keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
spin_unlock_irqrestore(&priv->sta_lock, flags);
- return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
+ key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
-static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- int ret = 0;
- __le16 key_flags = 0;
-
- key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ key_flags |= STA_KEY_FLG_CCMP;
+ memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key_flags |= STA_KEY_FLG_TKIP;
+ sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+ for (i = 0; i < 5; i++)
+ sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+ memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ key_flags |= STA_KEY_FLG_WEP;
+ memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
- if (sta_id == ctx->bcast_sta_id)
+ if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
key_flags |= STA_KEY_MULTICAST_MSK;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = 16;
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
-
+ /* key pointer (offset) */
+ sta_cmd.key.key_offset = keyconf->hw_key_idx;
- /* This copy is acutally not needed: we get the key with each TX */
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
+ sta_cmd.key.key_flags = key_flags;
+ sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
+ sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
+ return iwl_send_add_sta(priv, &sta_cmd, cmd_flags);
}
void iwl_update_tkip_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
+ struct ieee80211_vif *vif,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
{
- u8 sta_id;
- unsigned long flags;
- int i;
+ u8 sta_id = iwlagn_key_sta_id(priv, vif, sta);
+
+ if (sta_id == IWL_INVALID_STATION)
+ return;
if (iwl_scan_cancel(priv)) {
/* cancel scan failed, just live w/ bad key and rely
@@ -408,121 +371,110 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
return;
}
- sta_id = iwl_sta_id_or_broadcast(priv, ctx, sta);
- if (sta_id == IWL_INVALID_STATION)
- return;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
-
- for (i = 0; i < 5; i++)
- priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
- cpu_to_le16(phase1key[i]);
-
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
+ iwlagn_send_sta_key(priv, keyconf, sta_id,
+ iv32, phase1key, CMD_ASYNC);
}
int iwl_remove_dynamic_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
- u8 sta_id)
+ struct ieee80211_sta *sta)
{
unsigned long flags;
- u16 key_flags;
- u8 keyidx;
struct iwl_addsta_cmd sta_cmd;
+ u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
+
+ /* if station isn't there, neither is the key */
+ if (sta_id == IWL_INVALID_STATION)
+ return -ENOENT;
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
+ if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
+ sta_id = IWL_INVALID_STATION;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+
+ if (sta_id == IWL_INVALID_STATION)
+ return 0;
lockdep_assert_held(&priv->mutex);
ctx->key_mapping_keys--;
- spin_lock_irqsave(&priv->sta_lock, flags);
- key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
- keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
-
IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
- if (keyconf->keyidx != keyidx) {
- /* We need to remove a key with index different that the one
- * in the uCode. This means that the key we need to remove has
- * been replaced by another one with different index.
- * Don't do anything and return ok
- */
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
- IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
- keyconf->keyidx, key_flags);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
- &priv->ucode_key_table))
- IWL_ERR(priv, "index %d not used in uCode key table.\n",
- priv->stations[sta_id].sta.key.key_offset);
- memset(&priv->stations[sta_id].keyinfo, 0,
- sizeof(struct iwl_hw_key));
- memset(&priv->stations[sta_id].sta.key, 0,
- sizeof(struct iwl_keyinfo));
- priv->stations[sta_id].sta.key.key_flags =
- STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
- priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ if (!test_and_clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table))
+ IWL_ERR(priv, "offset %d not used in uCode key table.\n",
+ keyconf->hw_key_idx);
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ sta_cmd.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+ sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
+ sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
-int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf, u8 sta_id)
+int iwl_set_dynamic_key(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta)
{
+ struct ieee80211_key_seq seq;
+ u16 p1k[5];
int ret;
+ u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
+ const u8 *addr;
+
+ if (sta_id == IWL_INVALID_STATION)
+ return -EINVAL;
lockdep_assert_held(&priv->mutex);
+ keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
+ if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
+ return -ENOSPC;
+
ctx->key_mapping_keys++;
- keyconf->hw_key_idx = HW_KEY_DYNAMIC;
switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- ret = iwl_set_ccmp_dynamic_key_info(priv, ctx, keyconf, sta_id);
- break;
case WLAN_CIPHER_SUITE_TKIP:
- ret = iwl_set_tkip_dynamic_key_info(priv, ctx, keyconf, sta_id);
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ if (sta)
+ addr = sta->addr;
+ else /* station mode case only */
+ addr = ctx->active.bssid_addr;
+
+ /* pre-fill phase 1 key into device cache */
+ ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+ ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+ ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
+ seq.tkip.iv32, p1k, CMD_SYNC);
break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ /* fall through */
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- ret = iwl_set_wep_dynamic_key_info(priv, ctx, keyconf, sta_id);
+ ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
+ 0, NULL, CMD_SYNC);
break;
default:
- IWL_ERR(priv,
- "Unknown alg: %s cipher = %x\n", __func__,
- keyconf->cipher);
+ IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
ret = -EINVAL;
}
- IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+ if (ret) {
+ ctx->key_mapping_keys--;
+ clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table);
+ }
+
+ IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta_id, ret);
+ sta ? sta->addr : NULL, ret);
return ret;
}
@@ -554,7 +506,7 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
priv->stations[sta_id].used |= IWL_STA_BCAST;
spin_unlock_irqrestore(&priv->sta_lock, flags);
- link_cmd = iwl_sta_alloc_lq(priv, sta_id);
+ link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
if (!link_cmd) {
IWL_ERR(priv,
"Unable to initialize rate scaling for bcast station.\n");
@@ -574,14 +526,14 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
* Only used by iwlagn. Placed here to have all bcast station management
* code together.
*/
-static int iwl_update_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
+int iwl_update_bcast_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
{
unsigned long flags;
struct iwl_link_quality_cmd *link_cmd;
u8 sta_id = ctx->bcast_sta_id;
- link_cmd = iwl_sta_alloc_lq(priv, sta_id);
+ link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
if (!link_cmd) {
IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index 348f74f1c8e..f501d742984 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -198,7 +198,7 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
/* Reschedule the ct_kill timer to occur in
* CT_KILL_EXIT_DURATION seconds to ensure we get a
* thermal update */
- IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n");
+ IWL_DEBUG_TEMP(priv, "schedule ct_kill exit timer\n");
mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
jiffies + CT_KILL_EXIT_DURATION * HZ);
}
@@ -208,15 +208,15 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
bool stop)
{
if (stop) {
- IWL_DEBUG_POWER(priv, "Stop all queues\n");
+ IWL_DEBUG_TEMP(priv, "Stop all queues\n");
if (priv->mac80211_registered)
ieee80211_stop_queues(priv->hw);
- IWL_DEBUG_POWER(priv,
+ IWL_DEBUG_TEMP(priv,
"Schedule 5 seconds CT_KILL Timer\n");
mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
jiffies + CT_KILL_EXIT_DURATION * HZ);
} else {
- IWL_DEBUG_POWER(priv, "Wake all queues\n");
+ IWL_DEBUG_TEMP(priv, "Wake all queues\n");
if (priv->mac80211_registered)
ieee80211_wake_queues(priv->hw);
}
@@ -232,7 +232,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
/* temperature timer expired, ready to go into CT_KILL state */
if (tt->state != IWL_TI_CT_KILL) {
- IWL_DEBUG_POWER(priv, "entering CT_KILL state when "
+ IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
"temperature timer expired\n");
tt->state = IWL_TI_CT_KILL;
set_bit(STATUS_CT_KILL, &priv->status);
@@ -242,7 +242,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
{
- IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n");
+ IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n");
/* make request to retrieve statistics information */
iwl_send_statistics_request(priv, CMD_SYNC, false);
/* Reschedule the ct_kill wait timer */
@@ -273,7 +273,7 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
(temp > tt->tt_previous_temp) &&
((temp - tt->tt_previous_temp) >
IWL_TT_INCREASE_MARGIN)) {
- IWL_DEBUG_POWER(priv,
+ IWL_DEBUG_TEMP(priv,
"Temperature increase %d degree Celsius\n",
(temp - tt->tt_previous_temp));
}
@@ -338,9 +338,9 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
} else if (old_state == IWL_TI_CT_KILL &&
tt->state != IWL_TI_CT_KILL)
iwl_perform_ct_kill_task(priv, false);
- IWL_DEBUG_POWER(priv, "Temperature state changed %u\n",
+ IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n",
tt->state);
- IWL_DEBUG_POWER(priv, "Power Index change to %u\n",
+ IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
tt->tt_power_mode);
}
mutex_unlock(&priv->mutex);
@@ -397,7 +397,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
(temp > tt->tt_previous_temp) &&
((temp - tt->tt_previous_temp) >
IWL_TT_INCREASE_MARGIN)) {
- IWL_DEBUG_POWER(priv,
+ IWL_DEBUG_TEMP(priv,
"Temperature increase %d "
"degree Celsius\n",
(temp - tt->tt_previous_temp));
@@ -467,13 +467,13 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
set_bit(STATUS_CT_KILL, &priv->status);
tt->state = old_state;
} else {
- IWL_DEBUG_POWER(priv,
+ IWL_DEBUG_TEMP(priv,
"Thermal Throttling to new state: %u\n",
tt->state);
if (old_state != IWL_TI_CT_KILL &&
tt->state == IWL_TI_CT_KILL) {
if (force) {
- IWL_DEBUG_POWER(priv,
+ IWL_DEBUG_TEMP(priv,
"Enter IWL_TI_CT_KILL\n");
set_bit(STATUS_CT_KILL, &priv->status);
iwl_perform_ct_kill_task(priv, true);
@@ -483,7 +483,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
}
} else if (old_state == IWL_TI_CT_KILL &&
tt->state != IWL_TI_CT_KILL) {
- IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n");
+ IWL_DEBUG_TEMP(priv, "Exit IWL_TI_CT_KILL\n");
iwl_perform_ct_kill_task(priv, false);
}
}
@@ -568,7 +568,7 @@ void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n");
+ IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
queue_work(priv->workqueue, &priv->ct_enter);
}
@@ -577,7 +577,7 @@ void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n");
+ IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
queue_work(priv->workqueue, &priv->ct_exit);
}
@@ -603,7 +603,7 @@ void iwl_tt_handler(struct iwl_priv *priv)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n");
+ IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
queue_work(priv->workqueue, &priv->tt_work);
}
@@ -618,7 +618,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
struct iwl_tt_trans *transaction;
- IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
+ IWL_DEBUG_TEMP(priv, "Initialize Thermal Throttling\n");
memset(tt, 0, sizeof(struct iwl_tt_mgmt));
@@ -638,7 +638,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
if (priv->cfg->base_params->adv_thermal_throttle) {
- IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
+ IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
IWL_TI_STATE_MAX, GFP_KERNEL);
tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) *
@@ -671,7 +671,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
priv->thermal_throttle.advanced_tt = true;
}
} else {
- IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
+ IWL_DEBUG_TEMP(priv, "Legacy Thermal Throttling\n");
priv->thermal_throttle.advanced_tt = false;
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 4974cd7837c..53bb59ee719 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -39,6 +39,7 @@
#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
+#include "iwl-trans.h"
/*
* mac80211 queues, ACs, hardware queues, FIFOs.
@@ -95,132 +96,8 @@ static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
return -EINVAL;
}
-/**
- * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-static void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int write_ptr = txq->q.write_ptr;
- int txq_id = txq->q.id;
- u8 sec_ctl = 0;
- u8 sta_id = 0;
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
-
- WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
- sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
- sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += WEP_IV_LEN + WEP_ICV_LEN;
- break;
- }
-
- bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int read_ptr = txq->q.read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
-
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != priv->cmd_queue)
- sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
-
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
-
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
-}
-
-static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
- u16 txq_id)
-{
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = priv->scd_base_addr +
- IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-
-static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_write_prph(priv,
- IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
- int txq_id, u32 index)
-{
- iwl_write_direct32(priv, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry)
-{
- int txq_id = txq->q.id;
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
- iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
- (active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) |
- (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) |
- IWLAGN_SCD_QUEUE_STTS_REG_MSK);
-
- txq->sched_retry = scd_retry;
-
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
-}
-
-static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id, int tid)
+static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
+ int tid)
{
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
(IWLAGN_FIRST_AMPDU_QUEUE +
@@ -237,106 +114,14 @@ static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
}
-void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv,
- struct ieee80211_sta *sta,
- int tid, int frame_limit)
+static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
+ struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags)
{
- int sta_id, tx_fifo, txq_id, ssn_idx;
- u16 ra_tid;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- sta_id = iwl_sta_id(sta);
- if (WARN_ON(sta_id == IWL_INVALID_STATION))
- return;
- if (WARN_ON(tid >= MAX_TID_COUNT))
- return;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn_idx = SEQ_TO_SN(tid_data->seq_number);
- txq_id = tid_data->agg.txq_id;
- tx_fifo = tid_data->agg.tx_fifo;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ra_tid = BUILD_RAxTID(sta_id, tid);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Stop this Tx queue before configuring it */
- iwlagn_tx_queue_stop_scheduler(priv, txq_id);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
- /* Set this queue as a chain-building queue */
- iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id));
-
- /* enable aggregations for the queue */
- iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id));
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
- sizeof(u32),
- ((frame_limit <<
- IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((frame_limit <<
- IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
-
- iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
-{
- if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_ERR(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
- IWLAGN_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- iwlagn_tx_queue_stop_scheduler(priv, txq_id);
-
- iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id));
-
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
- return 0;
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
+ info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
+ info->flags & IEEE80211_TX_CTL_AMPDU)
+ *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
}
/*
@@ -353,19 +138,15 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
__le32 tx_flags = tx_cmd->tx_flags;
tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
+ else
+ tx_flags &= ~TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_back_req(fc))
+ if (ieee80211_is_probe_resp(fc))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
else if (info->band == IEEE80211_BAND_2GHZ &&
priv->cfg->bt_params &&
@@ -388,7 +169,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
}
- priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
+ iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
if (ieee80211_is_mgmt(fc)) {
@@ -436,6 +217,18 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
if (ieee80211_is_data(fc)) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+ if (priv->tm_fixed_rate) {
+ /*
+ * rate overwrite by testmode
+ * we not only send lq command to change rate
+ * we also re-enforce per data pkt base.
+ */
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK;
+ memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
+ sizeof(tx_cmd->rate_n_flags));
+ }
+#endif
return;
}
@@ -497,8 +290,7 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
case WLAN_CIPHER_SUITE_TKIP:
tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
- ieee80211_get_tkip_key(keyconf, skb_frag,
- IEEE80211_TKIP_P2_KEY, tx_cmd->key);
+ ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
break;
@@ -528,26 +320,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
struct iwl_station_priv *sta_priv = NULL;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- struct iwl_tx_cmd *tx_cmd;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_tx_cmd *tx_cmd;
int txq_id;
- dma_addr_t phys_addr = 0;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, firstlen, secondlen;
+
u16 seq_number = 0;
__le16 fc;
u8 hdr_len;
+ u16 len;
u8 sta_id;
- u8 wait_write_ptr = 0;
u8 tid = 0;
- u8 *qc = NULL;
unsigned long flags;
bool is_agg = false;
@@ -595,8 +378,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
- if (sta)
- sta_priv = (void *)sta->drv_priv;
+ if (info->control.sta)
+ sta_priv = (void *)info->control.sta->drv_priv;
if (sta_priv && sta_priv->asleep &&
(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
@@ -631,6 +414,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
spin_lock(&priv->sta_lock);
if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = NULL;
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
@@ -651,38 +435,13 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
}
}
- txq = &priv->txq[txq_id];
- q = &txq->q;
-
- if (unlikely(iwl_queue_space(q) < q->high_mark))
+ tx_cmd = trans_get_tx_cmd(&priv->trans, txq_id);
+ if (unlikely(!tx_cmd))
goto drop_unlock_sta;
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = ctx;
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[q->write_ptr];
- out_meta = &txq->meta[q->write_ptr];
- tx_cmd = &out_cmd->cmd.tx;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdr_len);
-
/* Total # bytes to be transmitted */
len = (u16)skb->len;
tx_cmd->len = cpu_to_le16(len);
@@ -697,54 +456,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
iwl_update_stats(priv, true, fc, len);
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- firstlen = (len + 3) & ~3;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (firstlen != len)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev,
- &out_cmd->hdr, firstlen,
- PCI_DMA_BIDIRECTIONAL);
- if (unlikely(pci_dma_mapping_error(priv->pci_dev, txcmd_phys)))
- goto drop_unlock_sta;
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = skb->len - hdr_len;
- if (secondlen > 0) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- secondlen, PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(out_meta, mapping),
- dma_unmap_len(out_meta, len),
- PCI_DMA_BIDIRECTIONAL);
- goto drop_unlock_sta;
- }
- }
+ if (trans_tx(&priv->trans, skb, tx_cmd, txq_id, fc, is_agg, ctx))
+ goto drop_unlock_sta;
if (ieee80211_is_data_qos(fc)) {
priv->stations[sta_id].tid[tid].tfds_in_queue++;
@@ -753,55 +467,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
}
spin_unlock(&priv->sta_lock);
-
- /* Attach buffers to TFD */
- iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
- if (secondlen > 0)
- iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
- secondlen, 0);
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- /* take back ownership of DMA buffer to enable update */
- pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
- firstlen, PCI_DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
- /* Set up entry for this TFD in Tx byte-count array */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- iwlagn_txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
-
- pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
- firstlen, PCI_DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_dev_tx(priv,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &out_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
/*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
-
- /*
* Avoid atomic ops if it isn't an associated client.
* Also, if this is a packet for aggregation, don't
* increase the counter because the ucode will stop
@@ -811,17 +479,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (sta_priv && sta_priv->client && !is_agg)
atomic_inc(&sta_priv->pending_frames);
- if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- } else {
- iwl_stop_queue(priv, txq);
- }
- }
-
return 0;
drop_unlock_sta:
@@ -831,178 +488,6 @@ drop_unlock_priv:
return -1;
}
-static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
- GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
-/**
- * iwlagn_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq) {
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == priv->cmd_queue)
- iwl_cmd_queue_free(priv);
- else
- iwl_tx_queue_free(priv, txq_id);
- }
- iwlagn_free_dma_ptr(priv, &priv->kw);
-
- iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
-
- /* free tx queue structure */
- iwl_free_txq_mem(priv);
-}
-
-/**
- * iwlagn_txq_ctx_alloc - allocate TX queue context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
-
- /* Free all tx/cmd queues and keep-warm buffer */
- iwlagn_hw_txq_ctx_free(priv);
-
- ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
- priv->hw_params.scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
- goto error_bc_tbls;
- }
- /* Alloc keep-warm buffer */
- ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(priv, "Keep Warm allocation failed\n");
- goto error_kw;
- }
-
- /* allocate tx queue structure */
- ret = iwl_alloc_txq_mem(priv);
- if (ret)
- goto error;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwlagn_txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == priv->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return ret;
-
- error:
- iwlagn_hw_txq_ctx_free(priv);
- iwlagn_free_dma_ptr(priv, &priv->kw);
- error_kw:
- iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
- error_bc_tbls:
- return ret;
-}
-
-void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
-{
- int txq_id, slots_num;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwlagn_txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = txq_id == priv->cmd_queue ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
- }
-}
-
-/**
- * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
- */
-void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
-{
- int ch, txq_id;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&priv->lock, flags);
-
- iwlagn_txq_set_sched(priv, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000))
- IWL_ERR(priv, "Failing on timeout while stopping"
- " DMA channel %d [0x%08x]", ch,
- iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!priv->txq)
- return;
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == priv->cmd_queue)
- iwl_cmd_queue_unmap(priv);
- else
- iwl_tx_queue_unmap(priv, txq_id);
-}
-
/*
* Find first available (lowest unused) Tx Queue, mark it "active".
* Called only when finding queue for aggregation.
@@ -1033,8 +518,8 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
if (unlikely(tx_fifo < 0))
return tx_fifo;
- IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
- __func__, sta->addr, tid);
+ IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
+ sta->addr, tid);
sta_id = iwl_sta_id(sta);
if (sta_id == IWL_INVALID_STATION) {
@@ -1150,7 +635,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
* to deactivate the uCode queue, just return "success" to allow
* mac80211 to clean up it own data.
*/
- iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
+ trans_txq_agg_disable(&priv->trans, txq_id, ssn, tx_fifo_id);
spin_unlock_irqrestore(&priv->lock, flags);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -1179,7 +664,8 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
u16 ssn = SEQ_TO_SN(tid_data->seq_number);
int tx_fifo = get_fifo_from_tid(ctx, tid);
IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
- iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
+ trans_txq_agg_disable(&priv->trans, txq_id,
+ ssn, tx_fifo);
tid_data->agg.state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
}
@@ -1236,9 +722,9 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
struct ieee80211_hdr *hdr;
if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- index, q->n_bd, q->write_ptr, q->read_ptr);
+ IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
+ "index %d is out of range [0-%d] %d %d.\n", __func__,
+ txq_id, index, q->n_bd, q->write_ptr, q->read_ptr);
return 0;
}
@@ -1261,7 +747,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
- iwlagn_txq_free_tfd(priv, txq);
+ iwlagn_txq_free_tfd(priv, txq, txq->q.read_ptr);
}
return nfreed;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 97de5d9de67..a895a099d08 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -39,38 +39,7 @@
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
#include "iwl-agn-calib.h"
-
-#define IWL_AC_UNSET -1
-
-struct queue_to_fifo_ac {
- s8 fifo, ac;
-};
-
-static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
- { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
- { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
- { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
- { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
- { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
- { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
-};
-
-static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
- { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
- { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
- { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
- { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
- { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
- { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
- { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
- { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
- { IWL_TX_FIFO_BE_IPAN, 2, },
- { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
-};
+#include "iwl-trans.h"
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
@@ -143,7 +112,7 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
- IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
+ IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
ret = wait_event_interruptible_timeout(priv->wait_command_queue,
priv->ucode_write_complete, 5 * HZ);
if (ret == -ERESTARTSYS) {
@@ -183,10 +152,7 @@ static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
__le16 *xtal_calib =
(__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
+ iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
@@ -197,17 +163,16 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_cmd cmd;
__le16 *offset_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD;
- cmd.hdr.first_group = 0;
- cmd.hdr.groups_num = 1;
- cmd.hdr.data_valid = 1;
- cmd.radio_sensor_offset = le16_to_cpu(offset_calib[1]);
+ (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_TEMPERATURE);
+
+ memset(&cmd, 0, sizeof(cmd));
+ iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
+ memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib));
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
- cmd.reserved = 0;
+
IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
- cmd.radio_sensor_offset);
+ le16_to_cpu(cmd.radio_sensor_offset));
return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
(u8 *)&cmd, sizeof(cmd));
}
@@ -225,9 +190,10 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
- calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
+ calib_cfg_cmd.ucd_calib_cfg.flags =
+ IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
- return iwl_send_cmd(priv, &cmd);
+ return trans_send_cmd(&priv->trans, &cmd);
}
void iwlagn_rx_calib_result(struct iwl_priv *priv,
@@ -325,7 +291,8 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
/* coexistence is disabled */
memset(&coex_cmd, 0, sizeof(coex_cmd));
}
- return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
+ return trans_send_cmd_pdu(&priv->trans,
+ COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
sizeof(coex_cmd), &coex_cmd);
}
@@ -357,7 +324,8 @@ void iwlagn_send_prio_tbl(struct iwl_priv *priv)
memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
sizeof(iwlagn_bt_prio_tbl));
- if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PRIO_TABLE,
+ if (trans_send_cmd_pdu(&priv->trans,
+ REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
IWL_ERR(priv, "failed to send BT prio tbl command\n");
}
@@ -369,7 +337,8 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
env_cmd.action = action;
env_cmd.type = type;
- ret = iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PROT_ENV,
+ ret = trans_send_cmd_pdu(&priv->trans,
+ REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
sizeof(env_cmd), &env_cmd);
if (ret)
IWL_ERR(priv, "failed to send BT env command\n");
@@ -379,109 +348,9 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
static int iwlagn_alive_notify(struct iwl_priv *priv)
{
- const struct queue_to_fifo_ac *queue_to_fifo;
- struct iwl_rxon_context *ctx;
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
int ret;
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
- for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
- a += 4)
- iwl_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_write_targ_mem(priv, a, 0);
-
- iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
- iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
- IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
- iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
-
- /* initiate the queues */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
- iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(priv, priv->scd_base_addr +
- IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- }
-
- iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
- IWL_MASK(0, priv->hw_params.max_txq_num));
-
- /* Activate all Tx DMA/FIFO channels */
- iwlagn_txq_set_sched(priv, IWL_MASK(0, 7));
-
- /* map queues to FIFOs */
- if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
- queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
- else
- queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
-
- iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
- for_each_context(priv, ctx)
- ctx->last_tx_rejected = false;
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
- BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
-
- for (i = 0; i < 10; i++) {
- int fifo = queue_to_fifo[i].fifo;
- int ac = queue_to_fifo[i].ac;
-
- iwl_txq_ctx_activate(priv, i);
-
- if (fifo == IWL_TX_FIFO_UNUSED)
- continue;
-
- if (ac != IWL_AC_UNSET)
- iwl_set_swq_id(&priv->txq[i], ac, i);
- iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Enable L1-Active */
- iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ trans_tx_start(&priv->trans);
ret = iwlagn_send_wimax_coex(priv);
if (ret)
@@ -508,7 +377,7 @@ static int iwlcore_verify_inst_sparse(struct iwl_priv *priv,
u32 val;
u32 i;
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
@@ -533,7 +402,7 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
u32 offs;
int errors = 0;
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
IWLAGN_RTC_INST_LOWER_BOUND);
@@ -559,7 +428,7 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
{
if (!iwlcore_verify_inst_sparse(priv, &img->code)) {
- IWL_DEBUG_INFO(priv, "uCode is good in inst SRAM\n");
+ IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
return 0;
}
@@ -583,7 +452,7 @@ static void iwlagn_alive_fn(struct iwl_priv *priv,
palive = &pkt->u.alive_frame;
- IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
+ IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
@@ -602,14 +471,14 @@ static void iwlagn_alive_fn(struct iwl_priv *priv,
int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
struct fw_img *image,
- int subtype, int alternate_subtype)
+ enum iwlagn_ucode_type ucode_type)
{
struct iwl_notification_wait alive_wait;
struct iwlagn_alive_data alive_data;
int ret;
- enum iwlagn_ucode_subtype old_type;
+ enum iwlagn_ucode_type old_type;
- ret = iwlagn_start_device(priv);
+ ret = trans_start_device(&priv->trans);
if (ret)
return ret;
@@ -617,7 +486,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
iwlagn_alive_fn, &alive_data);
old_type = priv->ucode_type;
- priv->ucode_type = subtype;
+ priv->ucode_type = ucode_type;
ret = iwlagn_load_given_ucode(priv, image);
if (ret) {
@@ -626,8 +495,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
return ret;
}
- /* Remove all resets to allow NIC to operate */
- iwl_write32(priv, CSR_RESET, 0);
+ trans_kick_nic(&priv->trans);
/*
* Some things may run in the background now, but we
@@ -645,24 +513,22 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
return -EIO;
}
- if (alive_data.subtype != subtype &&
- alive_data.subtype != alternate_subtype) {
- IWL_ERR(priv,
- "Loaded ucode is not expected type (got %d, expected %d)!\n",
- alive_data.subtype, subtype);
- priv->ucode_type = old_type;
- return -EIO;
- }
+ /*
+ * This step takes a long time (60-80ms!!) and
+ * WoWLAN image should be loaded quickly, so
+ * skip it for WoWLAN.
+ */
+ if (ucode_type != IWL_UCODE_WOWLAN) {
+ ret = iwl_verify_ucode(priv, image);
+ if (ret) {
+ priv->ucode_type = old_type;
+ return ret;
+ }
- ret = iwl_verify_ucode(priv, image);
- if (ret) {
- priv->ucode_type = old_type;
- return ret;
+ /* delay a bit to give rfkill time to run */
+ msleep(5);
}
- /* delay a bit to give rfkill time to run */
- msleep(5);
-
ret = iwlagn_alive_notify(priv);
if (ret) {
IWL_WARN(priv,
@@ -685,7 +551,7 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
if (!priv->ucode_init.code.len)
return 0;
- if (priv->ucode_type != UCODE_SUBTYPE_NONE_LOADED)
+ if (priv->ucode_type != IWL_UCODE_NONE)
return 0;
iwlagn_init_notification_wait(priv, &calib_wait,
@@ -694,7 +560,7 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
/* Will also start the device */
ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
- UCODE_SUBTYPE_INIT, -1);
+ IWL_UCODE_INIT);
if (ret)
goto error;
@@ -714,6 +580,6 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
iwlagn_remove_notification(priv, &calib_wait);
out:
/* Whatever happened, stop the device */
- iwlagn_stop_device(priv);
+ trans_stop_device(&priv->trans);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 8e1942ebd9a..b0ae4de7f08 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -26,14 +26,9 @@
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
@@ -49,8 +44,6 @@
#include <asm/div64.h>
-#define DRV_NAME "iwlagn"
-
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
@@ -59,7 +52,8 @@
#include "iwl-sta.h"
#include "iwl-agn-calib.h"
#include "iwl-agn.h"
-
+#include "iwl-bus.h"
+#include "iwl-trans.h"
/******************************************************************************
*
@@ -93,12 +87,10 @@ void iwl_update_chain_flags(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx;
- if (priv->cfg->ops->hcmd->set_rxon_chain) {
- for_each_context(priv, ctx) {
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- if (ctx->active.rx_chain != ctx->staging.rx_chain)
- iwlcore_commit_rxon(priv, ctx);
- }
+ for_each_context(priv, ctx) {
+ iwlagn_set_rxon_chain(priv, ctx);
+ if (ctx->active.rx_chain != ctx->staging.rx_chain)
+ iwlagn_commit_rxon(priv, ctx);
}
}
@@ -134,7 +126,9 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
struct iwl_tx_beacon_cmd *tx_beacon_cmd;
struct iwl_host_cmd cmd = {
.id = REPLY_TX_BEACON,
+ .flags = CMD_SYNC,
};
+ struct ieee80211_tx_info *info;
u32 frame_size;
u32 rate_flags;
u32 rate;
@@ -175,14 +169,31 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
frame_size);
/* Set up packet rate and flags */
- rate = iwl_rate_get_lowest_plcp(priv, priv->beacon_ctx);
+ info = IEEE80211_SKB_CB(priv->beacon_skb);
+
+ /*
+ * Let's set up the rate at least somewhat correctly;
+ * it will currently not actually be used by the uCode,
+ * it uses the broadcast station's rate instead.
+ */
+ if (info->control.rates[0].idx < 0 ||
+ info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
+ rate = 0;
+ else
+ rate = info->control.rates[0].idx;
+
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
priv->hw_params.valid_tx_ant);
rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
- if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
+
+ /* In mac80211, rates for 5 GHz start at 0 */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate += IWL_FIRST_OFDM_RATE;
+ else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
- tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate,
- rate_flags);
+
+ tx_beacon_cmd->tx.rate_n_flags =
+ iwl_hw_set_rate_n_flags(rate, rate_flags);
/* Submit command */
cmd.len[0] = sizeof(*tx_beacon_cmd);
@@ -192,7 +203,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
cmd.data[1] = priv->beacon_skb->data;
cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
- return iwl_send_cmd_sync(priv, &cmd);
+ return trans_send_cmd(&priv->trans, &cmd);
}
static void iwl_bg_beacon_update(struct work_struct *work)
@@ -245,7 +256,7 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
/* dont send host command if rf-kill is on */
if (!iwl_is_ready_rf(priv))
return;
- priv->cfg->ops->hcmd->send_bt_config(priv);
+ iwlagn_send_advance_bt_config(priv);
}
static void iwl_bg_bt_full_concurrency(struct work_struct *work)
@@ -272,12 +283,11 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
* to avoid 3-wire collisions
*/
for_each_context(priv, ctx) {
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- iwlcore_commit_rxon(priv, ctx);
+ iwlagn_set_rxon_chain(priv, ctx);
+ iwlagn_commit_rxon(priv, ctx);
}
- priv->cfg->ops->hcmd->send_bt_config(priv);
+ iwlagn_send_advance_bt_config(priv);
out:
mutex_unlock(&priv->mutex);
}
@@ -362,7 +372,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
u32 next_entry; /* index of next entry to be written by uCode */
base = priv->device_pointers.error_event_table;
- if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ if (iwlagn_hw_valid_rtc_data_addr(base)) {
capacity = iwl_read_targ_mem(priv, base);
num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
@@ -440,383 +450,8 @@ static void iwl_bg_tx_flush(struct work_struct *work)
if (!iwl_is_ready_rf(priv))
return;
- if (priv->cfg->ops->lib->txfifo_flush) {
- IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
- iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
- }
-}
-
-/**
- * iwl_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-static void iwl_rx_handle(struct iwl_priv *priv)
-{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
- u32 r, i;
- int reclaim;
- unsigned long flags;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty;
-
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
- i = rxq->read;
-
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
-
- while (i != r) {
- int len;
-
- rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- if (WARN_ON(rxb == NULL)) {
- i = (i + 1) & RX_QUEUE_MASK;
- continue;
- }
-
- rxq->queue[i] = NULL;
-
- pci_unmap_page(priv->pci_dev, rxb->page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- pkt = rxb_addr(rxb);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_dev_rx(priv, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
- (pkt->hdr.cmd != REPLY_RX) &&
- (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
- (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- /*
- * Do the notification wait before RX handlers so
- * even if the RX handler consumes the RXB we have
- * access to it in the notification wait entry.
- */
- if (!list_empty(&priv->_agn.notif_waits)) {
- struct iwl_notification_wait *w;
-
- spin_lock(&priv->_agn.notif_wait_lock);
- list_for_each_entry(w, &priv->_agn.notif_waits, list) {
- if (w->cmd == pkt->hdr.cmd) {
- w->triggered = true;
- if (w->fn)
- w->fn(priv, pkt, w->fn_data);
- }
- }
- spin_unlock(&priv->_agn.notif_wait_lock);
-
- wake_up_all(&priv->_agn.notif_waitq);
- }
- if (priv->pre_rx_handler)
- priv->pre_rx_handler(priv, rxb);
-
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
- i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
- priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv,
- "r %d i %d No handler needed for %s, 0x%02x\n",
- r, i, get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- }
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking iwl_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_tx_cmd_complete(priv, rxb);
- else
- IWL_WARN(priv, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
- 0, PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode wont assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- iwlagn_rx_replenish_now(priv);
- count = 0;
- }
- }
- }
-
- /* Backtrack one entry */
- rxq->read = i;
- if (fill_rx)
- iwlagn_rx_replenish_now(priv);
- else
- iwlagn_rx_queue_restock(priv);
-}
-
-/* tasklet for iwlagn interrupt */
-static void iwl_irq_tasklet(struct iwl_priv *priv)
-{
- u32 inta = 0;
- u32 handled = 0;
- unsigned long flags;
- u32 i;
-#ifdef CONFIG_IWLWIFI_DEBUG
- u32 inta_mask;
-#endif
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- */
- /* There is a hardware bug in the interrupt mask function that some
- * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
- * they are disabled in the CSR_INT_MASK register. Furthermore the
- * ICT interrupt handling mechanism has another bug that might cause
- * these unmasked interrupts fail to be detected. We workaround the
- * hardware bugs here by ACKing all the possible interrupts so that
- * interrupt coalescing can still be achieved.
- */
- iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
-
- inta = priv->_agn.inta;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
- /* just for debug */
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ",
- inta, inta_mask);
- }
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* saved interrupt in inta variable now we can reset priv->_agn.inta */
- priv->_agn.inta = 0;
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(priv, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_disable_interrupts(priv);
-
- priv->isr_stats.hw++;
- iwl_irq_handle_error(priv);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
- "the frame/frames.\n");
- priv->isr_stats.sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(priv, "Alive interrupt\n");
- priv->isr_stats.alive++;
- }
- }
-#endif
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* HW RF KILL switch toggled */
- if (inta & CSR_INT_BIT_RF_KILL) {
- int hw_rf_kill = 0;
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rf_kill = 1;
-
- IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
- hw_rf_kill ? "disable radio" : "enable radio");
-
- priv->isr_stats.rfkill++;
-
- /* driver only loads ucode once setting the interface up.
- * the driver allows loading the ucode even if the radio
- * is killed. Hence update the killswitch state here. The
- * rfkill handler will care about restarting if needed.
- */
- if (!test_bit(STATUS_ALIVE, &priv->status)) {
- if (hw_rf_kill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
- }
-
- handled |= CSR_INT_BIT_RF_KILL;
- }
-
- /* Chip got too hot and stopped itself */
- if (inta & CSR_INT_BIT_CT_KILL) {
- IWL_ERR(priv, "Microcode CT kill error detected.\n");
- priv->isr_stats.ctkill++;
- handled |= CSR_INT_BIT_CT_KILL;
- }
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(priv, "Microcode SW error detected. "
- " Restarting 0x%X.\n", inta);
- priv->isr_stats.sw++;
- iwl_irq_handle_error(priv);
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /* uCode wakes up after power-down sleep */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
- for (i = 0; i < priv->hw_params.max_txq_num; i++)
- iwl_txq_update_write_ptr(priv, &priv->txq[i]);
-
- priv->isr_stats.wakeup++;
-
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
- CSR_INT_BIT_RX_PERIODIC)) {
- IWL_DEBUG_ISR(priv, "Rx interrupt\n");
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- iwl_write32(priv, CSR_FH_INT_STATUS,
- CSR_FH_INT_RX_MASK);
- }
- if (inta & CSR_INT_BIT_RX_PERIODIC) {
- handled |= CSR_INT_BIT_RX_PERIODIC;
- iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
- }
- /* Sending RX interrupt require many steps to be done in the
- * the device:
- * 1- write interrupt to current index in ICT table.
- * 2- dma RX frame.
- * 3- update RX shared data to indicate last write index.
- * 4- send interrupt.
- * This could lead to RX race, driver could receive RX interrupt
- * but the shared data changes does not reflect this;
- * periodic interrupt will detect any dangling Rx activity.
- */
-
- /* Disable periodic interrupt; we use it as just a one-shot. */
- iwl_write8(priv, CSR_INT_PERIODIC_REG,
- CSR_INT_PERIODIC_DIS);
- iwl_rx_handle(priv);
-
- /*
- * Enable periodic interrupt in 8 msec only if we received
- * real RX interrupt (instead of just periodic int), to catch
- * any dangling Rx interrupt. If it was just the periodic
- * interrupt, there was no dangling Rx activity, and no need
- * to extend the periodic interrupt; one-shot is enough.
- */
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
- iwl_write8(priv, CSR_INT_PERIODIC_REG,
- CSR_INT_PERIODIC_ENA);
-
- priv->isr_stats.rx++;
- }
-
- /* This "Tx" DMA channel is used only for loading uCode */
- if (inta & CSR_INT_BIT_FH_TX) {
- iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
- IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
- priv->isr_stats.tx++;
- handled |= CSR_INT_BIT_FH_TX;
- /* Wake up uCode load routine, now that load is complete */
- priv->ucode_write_complete = 1;
- wake_up_interruptible(&priv->wait_command_queue);
- }
-
- if (inta & ~handled) {
- IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- priv->isr_stats.unhandled++;
- }
-
- if (inta & ~(priv->inta_mask)) {
- IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~priv->inta_mask);
- }
-
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_enable_interrupts(priv);
- /* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_enable_rfkill_int(priv);
+ IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
+ iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
}
/*****************************************************************************
@@ -939,22 +574,29 @@ static struct attribute_group iwl_attribute_group = {
*
******************************************************************************/
-static void iwl_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc)
{
if (desc->v_addr)
- dma_free_coherent(&pci_dev->dev, desc->len,
+ dma_free_coherent(priv->bus->dev, desc->len,
desc->v_addr, desc->p_addr);
desc->v_addr = NULL;
desc->len = 0;
}
-static void iwl_free_fw_img(struct pci_dev *pci_dev, struct fw_img *img)
+static void iwl_free_fw_img(struct iwl_priv *priv, struct fw_img *img)
+{
+ iwl_free_fw_desc(priv, &img->code);
+ iwl_free_fw_desc(priv, &img->data);
+}
+
+static void iwl_dealloc_ucode(struct iwl_priv *priv)
{
- iwl_free_fw_desc(pci_dev, &img->code);
- iwl_free_fw_desc(pci_dev, &img->data);
+ iwl_free_fw_img(priv, &priv->ucode_rt);
+ iwl_free_fw_img(priv, &priv->ucode_init);
+ iwl_free_fw_img(priv, &priv->ucode_wowlan);
}
-static int iwl_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc,
+static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
const void *data, size_t len)
{
if (!len) {
@@ -962,21 +604,16 @@ static int iwl_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc,
return -EINVAL;
}
- desc->v_addr = dma_alloc_coherent(&pci_dev->dev, len,
+ desc->v_addr = dma_alloc_coherent(priv->bus->dev, len,
&desc->p_addr, GFP_KERNEL);
if (!desc->v_addr)
return -ENOMEM;
+
desc->len = len;
memcpy(desc->v_addr, data, len);
return 0;
}
-static void iwl_dealloc_ucode_pci(struct iwl_priv *priv)
-{
- iwl_free_fw_img(priv->pci_dev, &priv->ucode_rt);
- iwl_free_fw_img(priv->pci_dev, &priv->ucode_init);
-}
-
struct iwlagn_ucode_capabilities {
u32 max_probe_length;
u32 standard_phy_calibration_size;
@@ -1021,13 +658,14 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
priv->firmware_name);
return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
- &priv->pci_dev->dev, GFP_KERNEL, priv,
- iwl_ucode_callback);
+ priv->bus->dev,
+ GFP_KERNEL, priv, iwl_ucode_callback);
}
struct iwlagn_firmware_pieces {
- const void *inst, *data, *init, *init_data;
- size_t inst_size, data_size, init_size, init_data_size;
+ const void *inst, *data, *init, *init_data, *wowlan_inst, *wowlan_data;
+ size_t inst_size, data_size, init_size, init_data_size,
+ wowlan_inst_size, wowlan_data_size;
u32 build;
@@ -1266,6 +904,14 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
goto invalid_tlv_len;
priv->enhance_sensitivity_table = true;
break;
+ case IWL_UCODE_TLV_WOWLAN_INST:
+ pieces->wowlan_inst = tlv_data;
+ pieces->wowlan_inst_size = tlv_len;
+ break;
+ case IWL_UCODE_TLV_WOWLAN_DATA:
+ pieces->wowlan_data = tlv_data;
+ pieces->wowlan_data_size = tlv_len;
+ break;
case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
@@ -1443,23 +1089,35 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Runtime instructions and 2 copies of data:
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
- if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_rt.code,
+ if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.code,
pieces.inst, pieces.inst_size))
goto err_pci_alloc;
- if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_rt.data,
+ if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.data,
pieces.data, pieces.data_size))
goto err_pci_alloc;
/* Initialization instructions and data */
if (pieces.init_size && pieces.init_data_size) {
- if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init.code,
+ if (iwl_alloc_fw_desc(priv, &priv->ucode_init.code,
pieces.init, pieces.init_size))
goto err_pci_alloc;
- if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init.data,
+ if (iwl_alloc_fw_desc(priv, &priv->ucode_init.data,
pieces.init_data, pieces.init_data_size))
goto err_pci_alloc;
}
+ /* WoWLAN instructions and data */
+ if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
+ if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.code,
+ pieces.wowlan_inst,
+ pieces.wowlan_inst_size))
+ goto err_pci_alloc;
+ if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.data,
+ pieces.wowlan_data,
+ pieces.wowlan_data_size))
+ goto err_pci_alloc;
+ }
+
/* Now that we can no longer fail, copy information */
/*
@@ -1467,25 +1125,26 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
* for each event, which is of mode 1 (including timestamp) for all
* new microcodes that include this information.
*/
- priv->_agn.init_evtlog_ptr = pieces.init_evtlog_ptr;
+ priv->init_evtlog_ptr = pieces.init_evtlog_ptr;
if (pieces.init_evtlog_size)
- priv->_agn.init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
+ priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
else
- priv->_agn.init_evtlog_size =
+ priv->init_evtlog_size =
priv->cfg->base_params->max_event_log_size;
- priv->_agn.init_errlog_ptr = pieces.init_errlog_ptr;
- priv->_agn.inst_evtlog_ptr = pieces.inst_evtlog_ptr;
+ priv->init_errlog_ptr = pieces.init_errlog_ptr;
+ priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
if (pieces.inst_evtlog_size)
- priv->_agn.inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
+ priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
else
- priv->_agn.inst_evtlog_size =
+ priv->inst_evtlog_size =
priv->cfg->base_params->max_event_log_size;
- priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr;
+ priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
priv->new_scan_threshold_behaviour =
!!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
- if (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN) {
+ if ((priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE) &&
+ (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN)) {
priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
} else
@@ -1505,9 +1164,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
ucode_capa.standard_phy_calibration_size =
IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
- priv->_agn.phy_calib_chain_noise_reset_cmd =
+ priv->phy_calib_chain_noise_reset_cmd =
ucode_capa.standard_phy_calibration_size;
- priv->_agn.phy_calib_chain_noise_gain_cmd =
+ priv->phy_calib_chain_noise_gain_cmd =
ucode_capa.standard_phy_calibration_size + 1;
/**************************************************
@@ -1523,7 +1182,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
if (err)
IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
- err = sysfs_create_group(&priv->pci_dev->dev.kobj,
+ err = sysfs_create_group(&(priv->bus->dev->kobj),
&iwl_attribute_group);
if (err) {
IWL_ERR(priv, "failed to create sysfs device attributes\n");
@@ -1532,7 +1191,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
- complete(&priv->_agn.firmware_loading_complete);
+ complete(&priv->firmware_loading_complete);
return;
try_again:
@@ -1544,14 +1203,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
err_pci_alloc:
IWL_ERR(priv, "failed to allocate pci memory\n");
- iwl_dealloc_ucode_pci(priv);
+ iwl_dealloc_ucode(priv);
out_unbind:
- complete(&priv->_agn.firmware_loading_complete);
- device_release_driver(&priv->pci_dev->dev);
+ complete(&priv->firmware_loading_complete);
+ device_release_driver(priv->bus->dev);
release_firmware(ucode_raw);
}
-static const char *desc_lookup_text[] = {
+static const char * const desc_lookup_text[] = {
"OK",
"FAIL",
"BAD_PARAM",
@@ -1575,7 +1234,7 @@ static const char *desc_lookup_text[] = {
"NMI_INTERRUPT_DATA_ACTION_PT",
"NMI_TRM_HW_ER",
"NMI_INTERRUPT_TRM",
- "NMI_INTERRUPT_BREAK_POINT"
+ "NMI_INTERRUPT_BREAK_POINT",
"DEBUG_0",
"DEBUG_1",
"DEBUG_2",
@@ -1626,19 +1285,19 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
struct iwl_error_event_table table;
base = priv->device_pointers.error_event_table;
- if (priv->ucode_type == UCODE_SUBTYPE_INIT) {
+ if (priv->ucode_type == IWL_UCODE_INIT) {
if (!base)
- base = priv->_agn.init_errlog_ptr;
+ base = priv->init_errlog_ptr;
} else {
if (!base)
- base = priv->_agn.inst_errlog_ptr;
+ base = priv->inst_errlog_ptr;
}
- if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ if (!iwlagn_hw_valid_rtc_data_addr(base)) {
IWL_ERR(priv,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
- (priv->ucode_type == UCODE_SUBTYPE_INIT)
+ (priv->ucode_type == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
@@ -1702,12 +1361,12 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
return pos;
base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == UCODE_SUBTYPE_INIT) {
+ if (priv->ucode_type == IWL_UCODE_INIT) {
if (!base)
- base = priv->_agn.init_evtlog_ptr;
+ base = priv->init_evtlog_ptr;
} else {
if (!base)
- base = priv->_agn.inst_evtlog_ptr;
+ base = priv->inst_evtlog_ptr;
}
if (mode == 0)
@@ -1815,21 +1474,21 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
size_t bufsz = 0;
base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == UCODE_SUBTYPE_INIT) {
- logsize = priv->_agn.init_evtlog_size;
+ if (priv->ucode_type == IWL_UCODE_INIT) {
+ logsize = priv->init_evtlog_size;
if (!base)
- base = priv->_agn.init_evtlog_ptr;
+ base = priv->init_evtlog_ptr;
} else {
- logsize = priv->_agn.inst_evtlog_size;
+ logsize = priv->inst_evtlog_size;
if (!base)
- base = priv->_agn.inst_evtlog_ptr;
+ base = priv->inst_evtlog_ptr;
}
- if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ if (!iwlagn_hw_valid_rtc_data_addr(base)) {
IWL_ERR(priv,
"Invalid event log pointer 0x%08X for %s uCode\n",
base,
- (priv->ucode_type == UCODE_SUBTYPE_INIT)
+ (priv->ucode_type == IWL_UCODE_INIT)
? "Init" : "RT");
return -EINVAL;
}
@@ -1928,8 +1587,9 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
adv_cmd.critical_temperature_exit =
cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
- ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
- sizeof(adv_cmd), &adv_cmd);
+ ret = trans_send_cmd_pdu(&priv->trans,
+ REPLY_CT_KILL_CONFIG_CMD,
+ CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
if (ret)
IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
else
@@ -1943,8 +1603,9 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
cmd.critical_temperature_R =
cpu_to_le32(priv->hw_params.ct_kill_threshold);
- ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
- sizeof(cmd), &cmd);
+ ret = trans_send_cmd_pdu(&priv->trans,
+ REPLY_CT_KILL_CONFIG_CMD,
+ CMD_SYNC, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
else
@@ -1968,10 +1629,29 @@ static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
- return iwl_send_cmd(priv, &cmd);
+ return trans_send_cmd(&priv->trans, &cmd);
}
+static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
+{
+ struct iwl_tx_ant_config_cmd tx_ant_cmd = {
+ .valid = cpu_to_le32(valid_tx_ant),
+ };
+
+ if (IWL_UCODE_API(priv->ucode_ver) > 1) {
+ IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
+ return trans_send_cmd_pdu(&priv->trans,
+ TX_ANT_CONFIGURATION_CMD,
+ CMD_SYNC,
+ sizeof(struct iwl_tx_ant_config_cmd),
+ &tx_ant_cmd);
+ } else {
+ IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
/**
* iwl_alive_start - called after REPLY_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
@@ -1982,6 +1662,7 @@ int iwl_alive_start(struct iwl_priv *priv)
int ret = 0;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ /*TODO: this should go to the transport layer */
iwl_reset_ict(priv);
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
@@ -1999,11 +1680,18 @@ int iwl_alive_start(struct iwl_priv *priv)
if (priv->cfg->bt_params &&
priv->cfg->bt_params->advanced_bt_coexist) {
/* Configure Bluetooth device coexistence support */
+ if (priv->cfg->bt_params->bt_sco_disable)
+ priv->bt_enable_pspoll = false;
+ else
+ priv->bt_enable_pspoll = true;
+
priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
- priv->cfg->ops->hcmd->send_bt_config(priv);
+ iwlagn_send_advance_bt_config(priv);
priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
+ priv->cur_rssi_ctx = NULL;
+
iwlagn_send_prio_tbl(priv);
/* FIXME: w/a to force change uCode BT state machine */
@@ -2015,7 +1703,13 @@ int iwl_alive_start(struct iwl_priv *priv)
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
+ } else {
+ /*
+ * default is 2-wire BT coexexistence support
+ */
+ iwl_send_bt_config(priv);
}
+
if (priv->hw_params.calib_rt_cfg)
iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg);
@@ -2024,10 +1718,9 @@ int iwl_alive_start(struct iwl_priv *priv)
priv->active_rate = IWL_RATES_MASK;
/* Configure Tx antenna selection based on H/W config */
- if (priv->cfg->ops->hcmd->set_tx_ant)
- priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant);
+ iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant);
- if (iwl_is_associated_ctx(ctx)) {
+ if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
struct iwl_rxon_cmd *active_rxon =
(struct iwl_rxon_cmd *)&ctx->active;
/* apply any changes in staging */
@@ -2039,24 +1732,18 @@ int iwl_alive_start(struct iwl_priv *priv)
for_each_context(priv, tmp)
iwl_connection_init_rx_config(priv, tmp);
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ iwlagn_set_rxon_chain(priv, ctx);
}
- if (!priv->cfg->bt_params || (priv->cfg->bt_params &&
- !priv->cfg->bt_params->advanced_bt_coexist)) {
- /*
- * default is 2-wire BT coexexistence support
- */
- priv->cfg->ops->hcmd->send_bt_config(priv);
+ if (!priv->wowlan) {
+ /* WoWLAN ucode will not reply in the same way, skip it */
+ iwl_reset_run_time_calib(priv);
}
- iwl_reset_run_time_calib(priv);
-
set_bit(STATUS_READY, &priv->status);
/* Configure the adapter for unassociated operation */
- ret = iwlcore_commit_rxon(priv, ctx);
+ ret = iwlagn_commit_rxon(priv, ctx);
if (ret)
return ret;
@@ -2090,6 +1777,8 @@ static void __iwl_down(struct iwl_priv *priv)
/* reset BT coex data */
priv->bt_status = 0;
+ priv->cur_rssi_ctx = NULL;
+ priv->bt_is_sco = 0;
if (priv->cfg->bt_params)
priv->bt_traffic_load =
priv->cfg->bt_params->bt_init_traffic_load;
@@ -2116,7 +1805,7 @@ static void __iwl_down(struct iwl_priv *priv)
test_bit(STATUS_EXIT_PENDING, &priv->status) <<
STATUS_EXIT_PENDING;
- iwlagn_stop_device(priv);
+ trans_stop_device(&priv->trans);
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = NULL;
@@ -2131,55 +1820,6 @@ static void iwl_down(struct iwl_priv *priv)
iwl_cancel_deferred_work(priv);
}
-#define HW_READY_TIMEOUT (50)
-
-/* Note: returns poll_bit return value, which is >= 0 if success */
-static int iwl_set_hw_ready(struct iwl_priv *priv)
-{
- int ret;
-
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
-
- /* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- HW_READY_TIMEOUT);
-
- IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
- return ret;
-}
-
-/* Note: returns standard 0/-ERROR code */
-int iwl_prepare_card_hw(struct iwl_priv *priv)
-{
- int ret;
-
- IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n");
-
- ret = iwl_set_hw_ready(priv);
- if (ret >= 0)
- return 0;
-
- /* If HW is not ready, prepare the conditions to check again */
- iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
-
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
-
- if (ret < 0)
- return ret;
-
- /* HW should be ready by now, check again. */
- ret = iwl_set_hw_ready(priv);
- if (ret >= 0)
- return 0;
- return ret;
-}
-
#define MAX_HW_RESTARTS 5
static int __iwl_up(struct iwl_priv *priv)
@@ -2210,8 +1850,7 @@ static int __iwl_up(struct iwl_priv *priv)
ret = iwlagn_load_ucode_wait_alive(priv,
&priv->ucode_rt,
- UCODE_SUBTYPE_REGULAR,
- UCODE_SUBTYPE_REGULAR_NEW);
+ IWL_UCODE_REGULAR);
if (ret) {
IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
goto error;
@@ -2266,6 +1905,7 @@ static void iwlagn_prepare_restart(struct iwl_priv *priv)
u8 bt_ci_compliance;
u8 bt_load;
u8 bt_status;
+ bool bt_is_sco;
lockdep_assert_held(&priv->mutex);
@@ -2286,6 +1926,7 @@ static void iwlagn_prepare_restart(struct iwl_priv *priv)
bt_ci_compliance = priv->bt_ci_compliance;
bt_load = priv->bt_traffic_load;
bt_status = priv->bt_status;
+ bt_is_sco = priv->bt_is_sco;
__iwl_down(priv);
@@ -2293,6 +1934,7 @@ static void iwlagn_prepare_restart(struct iwl_priv *priv)
priv->bt_ci_compliance = bt_ci_compliance;
priv->bt_traffic_load = bt_load;
priv->bt_status = bt_status;
+ priv->bt_is_sco = bt_is_sco;
}
static void iwl_bg_restart(struct work_struct *data)
@@ -2313,19 +1955,6 @@ static void iwl_bg_restart(struct work_struct *data)
}
}
-static void iwl_bg_rx_replenish(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- iwlagn_rx_replenish(priv);
- mutex_unlock(&priv->mutex);
-}
-
static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
@@ -2360,7 +1989,7 @@ static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
/* TODO: queue up if scanning? */
if (test_bit(STATUS_SCANNING, &priv->status) ||
- priv->_agn.offchan_tx_skb) {
+ priv->offchan_tx_skb) {
ret = -EBUSY;
goto out;
}
@@ -2374,14 +2003,14 @@ static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
goto out;
}
- priv->_agn.offchan_tx_skb = skb;
- priv->_agn.offchan_tx_timeout = wait;
- priv->_agn.offchan_tx_chan = chan;
+ priv->offchan_tx_skb = skb;
+ priv->offchan_tx_timeout = wait;
+ priv->offchan_tx_chan = chan;
ret = iwl_scan_initiate(priv, priv->contexts[IWL_RXON_CTX_PAN].vif,
IWL_SCAN_OFFCH_TX, chan->band);
if (ret)
- priv->_agn.offchan_tx_skb = NULL;
+ priv->offchan_tx_skb = NULL;
out:
mutex_unlock(&priv->mutex);
free:
@@ -2398,12 +2027,12 @@ static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- if (!priv->_agn.offchan_tx_skb) {
+ if (!priv->offchan_tx_skb) {
ret = -EINVAL;
goto unlock;
}
- priv->_agn.offchan_tx_skb = NULL;
+ priv->offchan_tx_skb = NULL;
ret = iwl_scan_cancel_timeout(priv, 200);
if (ret)
@@ -2420,6 +2049,77 @@ unlock:
*
*****************************************************************************/
+static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_dualmode[] = {
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .beacon_int_infra_match = true,
+ .limits = iwlagn_sta_ap_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
+ },
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .limits = iwlagn_2sta_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
+ },
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_p2p[] = {
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .beacon_int_infra_match = true,
+ .limits = iwlagn_p2p_sta_go_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
+ },
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .limits = iwlagn_p2p_2sta_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
+ },
+};
+
/*
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
@@ -2445,7 +2145,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
- if (priv->cfg->sku & IWL_SKU_N)
+ if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
@@ -2460,17 +2160,45 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
}
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+ if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
+ hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwlagn_iface_combinations_p2p);
+ } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+ hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
+ }
+
hw->wiphy->max_remain_on_channel_duration = 1000;
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
- /*
- * For now, disable PS by default because it affects
- * RX performance significantly.
- */
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ if (priv->ucode_wowlan.code.len && device_can_wakeup(priv->bus->dev)) {
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE;
+ if (!iwlagn_mod_params.sw_crypto)
+ hw->wiphy->wowlan.flags |=
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE;
+
+ hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
+ hw->wiphy->wowlan.pattern_min_len =
+ IWLAGN_WOWLAN_MIN_PATTERN_LEN;
+ hw->wiphy->wowlan.pattern_max_len =
+ IWLAGN_WOWLAN_MAX_PATTERN_LEN;
+ }
+
+ if (iwlagn_mod_params.power_save)
+ hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
/* we create the 802.11 header and a zero-length SSID element */
@@ -2551,6 +2279,471 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+#ifdef CONFIG_PM
+static int iwlagn_send_patterns(struct iwl_priv *priv,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .flags = CMD_SYNC,
+ };
+ int i, err;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = sizeof(*pattern_cmd) +
+ wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
+
+ pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ memcpy(&pattern_cmd->patterns[i].mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].mask_size = mask_len;
+ pattern_cmd->patterns[i].pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ err = trans_send_cmd(&priv->trans, &cmd);
+ kfree(pattern_cmd);
+ return err;
+}
+#endif
+
+static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ if (iwlagn_mod_params.sw_crypto)
+ return;
+
+ mutex_lock(&priv->mutex);
+
+ if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
+ goto out;
+
+ memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
+ memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
+ priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+ priv->have_rekey_data = true;
+
+ out:
+ mutex_unlock(&priv->mutex);
+}
+
+struct wowlan_key_data {
+ struct iwl_rxon_context *ctx;
+ struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+ struct iwlagn_wowlan_tkip_params_cmd *tkip;
+ const u8 *bssid;
+ bool error, use_rsc_tsc, use_tkip;
+};
+
+#ifdef CONFIG_PM
+static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
+{
+ int i;
+
+ for (i = 0; i < IWLAGN_P1K_SIZE; i++)
+ out[i] = cpu_to_le16(p1k[i]);
+}
+
+static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct wowlan_key_data *data = _data;
+ struct iwl_rxon_context *ctx = data->ctx;
+ struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+ struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+ struct iwlagn_p1k_cache *rx_p1ks;
+ u8 *rx_mic_key;
+ struct ieee80211_key_seq seq;
+ u32 cur_rx_iv32 = 0;
+ u16 p1k[IWLAGN_P1K_SIZE];
+ int ret, i;
+
+ mutex_lock(&priv->mutex);
+
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+ !sta && !ctx->key_mapping_keys)
+ ret = iwl_set_default_wep_key(priv, ctx, key);
+ else
+ ret = iwl_set_dynamic_key(priv, ctx, key, sta);
+
+ if (ret) {
+ IWL_ERR(priv, "Error setting key during suspend!\n");
+ data->error = true;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+ tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+
+ rx_p1ks = data->tkip->rx_uni;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+
+ ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+ iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
+
+ memcpy(data->tkip->mic_keys.tx,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWLAGN_MIC_KEY_SIZE);
+
+ rx_mic_key = data->tkip->mic_keys.rx_unicast;
+ } else {
+ tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+ rx_p1ks = data->tkip->rx_multi;
+ rx_mic_key = data->tkip->mic_keys.rx_mcast;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 (as they need to to avoid replay attacks)
+ * for checking the IV in the frames.
+ */
+ for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+ /* wrapping isn't allowed, AP must rekey */
+ if (seq.tkip.iv32 > cur_rx_iv32)
+ cur_rx_iv32 = seq.tkip.iv32;
+ }
+
+ ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
+ iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
+ ieee80211_get_tkip_rx_p1k(key, data->bssid,
+ cur_rx_iv32 + 1, p1k);
+ iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+ memcpy(rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWLAGN_MIC_KEY_SIZE);
+
+ data->use_tkip = true;
+ data->use_rsc_tsc = true;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (sta) {
+ u8 *pn = seq.ccmp.pn;
+
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+ aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ aes_tx_sc->pn = cpu_to_le64(
+ (u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ } else
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 for checking the IV in the frames.
+ */
+ for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+ u8 *pn = seq.ccmp.pn;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ aes_sc->pn = cpu_to_le64(
+ (u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+ data->use_rsc_tsc = true;
+ break;
+ }
+
+ mutex_unlock(&priv->mutex);
+}
+
+static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
+ struct iwl_rxon_cmd rxon;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
+ struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
+ struct wowlan_key_data key_data = {
+ .ctx = ctx,
+ .bssid = ctx->active.bssid_addr,
+ .use_rsc_tsc = false,
+ .tkip = &tkip_cmd,
+ .use_tkip = false,
+ };
+ int ret, i;
+ u16 seq;
+
+ if (WARN_ON(!wowlan))
+ return -EINVAL;
+
+ mutex_lock(&priv->mutex);
+
+ /* Don't attempt WoWLAN when not associated, tear down instead. */
+ if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
+ !iwl_is_associated_ctx(ctx)) {
+ ret = 1;
+ goto out;
+ }
+
+ key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+ if (!key_data.rsc_tsc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
+
+ /*
+ * We know the last used seqno, and the uCode expects to know that
+ * one, it will increment before TX.
+ */
+ seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
+ wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
+
+ /*
+ * For QoS counters, we store the one to use next, so subtract 0x10
+ * since the uCode will add 0x10 before using the value.
+ */
+ for (i = 0; i < 8; i++) {
+ seq = priv->stations[IWL_AP_ID].tid[i].seq_number;
+ seq -= 0x10;
+ wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
+ }
+
+ if (wowlan->disconnect)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+ IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
+ if (wowlan->magic_pkt)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
+ if (wowlan->gtk_rekey_failure)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+ if (wowlan->eap_identity_req)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+ if (wowlan->four_way_handshake)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+ if (wowlan->rfkill_release)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL);
+ if (wowlan->n_patterns)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+ iwl_scan_cancel_timeout(priv, 200);
+
+ memcpy(&rxon, &ctx->active, sizeof(rxon));
+
+ trans_stop_device(&priv->trans);
+
+ priv->wowlan = true;
+
+ ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan,
+ IWL_UCODE_WOWLAN);
+ if (ret)
+ goto error;
+
+ /* now configure WoWLAN ucode */
+ ret = iwl_alive_start(priv);
+ if (ret)
+ goto error;
+
+ memcpy(&ctx->staging, &rxon, sizeof(rxon));
+ ret = iwlagn_commit_rxon(priv, ctx);
+ if (ret)
+ goto error;
+
+ ret = iwl_power_update_mode(priv, true);
+ if (ret)
+ goto error;
+
+ if (!iwlagn_mod_params.sw_crypto) {
+ /* mark all keys clear */
+ priv->ucode_key_table = 0;
+ ctx->key_mapping_keys = 0;
+
+ /*
+ * This needs to be unlocked due to lock ordering
+ * constraints. Since we're in the suspend path
+ * that isn't really a problem though.
+ */
+ mutex_unlock(&priv->mutex);
+ ieee80211_iter_keys(priv->hw, ctx->vif,
+ iwlagn_wowlan_program_keys,
+ &key_data);
+ mutex_lock(&priv->mutex);
+ if (key_data.error) {
+ ret = -EIO;
+ goto error;
+ }
+
+ if (key_data.use_rsc_tsc) {
+ struct iwl_host_cmd rsc_tsc_cmd = {
+ .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
+ .flags = CMD_SYNC,
+ .data[0] = key_data.rsc_tsc,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .len[0] = sizeof(*key_data.rsc_tsc),
+ };
+
+ ret = trans_send_cmd(&priv->trans, &rsc_tsc_cmd);
+ if (ret)
+ goto error;
+ }
+
+ if (key_data.use_tkip) {
+ ret = trans_send_cmd_pdu(&priv->trans,
+ REPLY_WOWLAN_TKIP_PARAMS,
+ CMD_SYNC, sizeof(tkip_cmd),
+ &tkip_cmd);
+ if (ret)
+ goto error;
+ }
+
+ if (priv->have_rekey_data) {
+ memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+ memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
+ kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+ memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
+ kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+ kek_kck_cmd.replay_ctr = priv->replay_ctr;
+
+ ret = trans_send_cmd_pdu(&priv->trans,
+ REPLY_WOWLAN_KEK_KCK_MATERIAL,
+ CMD_SYNC, sizeof(kek_kck_cmd),
+ &kek_kck_cmd);
+ if (ret)
+ goto error;
+ }
+ }
+
+ ret = trans_send_cmd_pdu(&priv->trans, REPLY_WOWLAN_WAKEUP_FILTER,
+ CMD_SYNC, sizeof(wakeup_filter_cmd),
+ &wakeup_filter_cmd);
+ if (ret)
+ goto error;
+
+ ret = iwlagn_send_patterns(priv, wowlan);
+ if (ret)
+ goto error;
+
+ device_set_wakeup_enable(priv->bus->dev, true);
+
+ /* Now let the ucode operate on its own */
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ goto out;
+
+ error:
+ priv->wowlan = false;
+ iwlagn_prepare_restart(priv);
+ ieee80211_restart_hw(priv->hw);
+ out:
+ mutex_unlock(&priv->mutex);
+ kfree(key_data.rsc_tsc);
+ return ret;
+}
+
+static int iwlagn_mac_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct ieee80211_vif *vif;
+ unsigned long flags;
+ u32 base, status = 0xffffffff;
+ int ret = -EIO;
+
+ mutex_lock(&priv->mutex);
+
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ base = priv->device_pointers.error_event_table;
+ if (iwlagn_hw_valid_rtc_data_addr(base)) {
+ spin_lock_irqsave(&priv->reg_lock, flags);
+ ret = iwl_grab_nic_access_silent(priv);
+ if (ret == 0) {
+ iwl_write32(priv, HBUS_TARG_MEM_RADDR, base);
+ status = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
+ iwl_release_nic_access(priv);
+ }
+ spin_unlock_irqrestore(&priv->reg_lock, flags);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (ret == 0) {
+ if (!priv->wowlan_sram)
+ priv->wowlan_sram =
+ kzalloc(priv->ucode_wowlan.data.len,
+ GFP_KERNEL);
+
+ if (priv->wowlan_sram)
+ _iwl_read_targ_mem_words(
+ priv, 0x800000, priv->wowlan_sram,
+ priv->ucode_wowlan.data.len / 4);
+ }
+#endif
+ }
+
+ /* we'll clear ctx->vif during iwlagn_prepare_restart() */
+ vif = ctx->vif;
+
+ priv->wowlan = false;
+
+ device_set_wakeup_enable(priv->bus->dev, false);
+
+ iwlagn_prepare_restart(priv);
+
+ memset((void *)&ctx->active, 0, sizeof(ctx->active));
+ iwl_connection_init_rx_config(priv, ctx);
+ iwlagn_set_rxon_chain(priv, ctx);
+
+ mutex_unlock(&priv->mutex);
+
+ ieee80211_resume_disconnect(vif);
+
+ return 1;
+}
+#endif
+
static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct iwl_priv *priv = hw->priv;
@@ -2573,14 +2766,8 @@ static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
u32 iv32, u16 *phase1key)
{
struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- iwl_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
- iv32, phase1key);
- IWL_DEBUG_MAC80211(priv, "leave\n");
+ iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
}
static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -2592,7 +2779,6 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *ctx = vif_priv->ctx;
int ret;
- u8 sta_id;
bool is_default_wep_key = false;
IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -2603,20 +2789,27 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
/*
- * To support IBSS RSN, don't program group keys in IBSS, the
- * hardware will then not attempt to decrypt the frames.
+ * We could program these keys into the hardware as well, but we
+ * don't expect much multicast traffic in IBSS and having keys
+ * for more stations is probably more useful.
+ *
+ * Mark key TX-only and return 0.
*/
if (vif->type == NL80211_IFTYPE_ADHOC &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ key->hw_key_idx = WEP_INVALID_OFFSET;
+ return 0;
+ }
- sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
- if (sta_id == IWL_INVALID_STATION)
- return -EINVAL;
+ /* If they key was TX-only, accept deletion */
+ if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
+ return 0;
mutex_lock(&priv->mutex);
iwl_scan_cancel_timeout(priv, 100);
+ BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
+
/*
* If we are getting WEP group key and we didn't receive any key mapping
* so far, we are in legacy wep mode (group key only), otherwise we are
@@ -2624,22 +2817,30 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
* In legacy wep mode, we use another host command to the uCode.
*/
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
- !sta) {
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
if (cmd == SET_KEY)
is_default_wep_key = !ctx->key_mapping_keys;
else
is_default_wep_key =
- (key->hw_key_idx == HW_KEY_DEFAULT);
+ key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
}
+
switch (cmd) {
case SET_KEY:
- if (is_default_wep_key)
+ if (is_default_wep_key) {
ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
- else
- ret = iwl_set_dynamic_key(priv, vif_priv->ctx,
- key, sta_id);
+ break;
+ }
+ ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
+ if (ret) {
+ /*
+ * can't add key for RX, but we don't need it
+ * in the device for TX so still return 0
+ */
+ ret = 0;
+ key->hw_key_idx = WEP_INVALID_OFFSET;
+ }
IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
break;
@@ -2647,7 +2848,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (is_default_wep_key)
ret = iwl_remove_default_wep_key(priv, ctx, key);
else
- ret = iwl_remove_dynamic_key(priv, ctx, key, sta_id);
+ ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
break;
@@ -2674,7 +2875,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
- if (!(priv->cfg->sku & IWL_SKU_N))
+ if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
return -EACCES;
mutex_lock(&priv->mutex);
@@ -2694,29 +2895,26 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
if (ret == 0) {
- priv->_agn.agg_tids_count++;
- IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
- priv->_agn.agg_tids_count);
+ priv->agg_tids_count++;
+ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+ priv->agg_tids_count);
}
break;
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
- if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) {
- priv->_agn.agg_tids_count--;
- IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
- priv->_agn.agg_tids_count);
+ if ((ret == 0) && (priv->agg_tids_count > 0)) {
+ priv->agg_tids_count--;
+ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+ priv->agg_tids_count);
}
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
ret = 0;
if (priv->cfg->ht_params &&
priv->cfg->ht_params->use_rts_for_aggregation) {
- struct iwl_station_priv *sta_priv =
- (void *) sta->drv_priv;
/*
* switch off RTS/CTS if it was previously enabled
*/
-
sta_priv->lq_sta.lq.general_params.flags &=
~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
@@ -2726,7 +2924,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_OPERATIONAL:
buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
- iwlagn_txq_agg_queue_setup(priv, sta, tid, buf_size);
+ trans_txq_agg_setup(&priv->trans, iwl_sta_id(sta), tid,
+ buf_size);
/*
* If the limit is 0, then it wasn't initialised yet,
@@ -2764,6 +2963,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
&sta_priv->lq_sta.lq, CMD_ASYNC, false);
+
+ IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
+ sta->addr, tid);
ret = 0;
break;
}
@@ -2833,7 +3035,6 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
u16 ch;
- unsigned long flags = 0;
IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -2850,65 +3051,64 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
if (!iwl_is_associated_ctx(ctx))
goto out;
- if (priv->cfg->ops->lib->set_channel_switch) {
+ if (!priv->cfg->lib->set_channel_switch)
+ goto out;
- ch = channel->hw_value;
- if (le16_to_cpu(ctx->active.channel) != ch) {
- ch_info = iwl_get_channel_info(priv,
- channel->band,
- ch);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "invalid channel\n");
- goto out;
- }
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->current_ht_config.smps = conf->smps_mode;
-
- /* Configure HT40 channels */
- ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_set_rxon_channel(priv, channel, ctx);
- iwl_set_rxon_ht(priv, ht_conf);
- iwl_set_flags_for_band(priv, ctx, channel->band,
- ctx->vif);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl_set_rate(priv);
- /*
- * at this point, staging_rxon has the
- * configuration for channel switch
- */
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
- priv->switch_channel = cpu_to_le16(ch);
- if (priv->cfg->ops->lib->set_channel_switch(priv,
- ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
- &priv->status);
- priv->switch_channel = 0;
- ieee80211_chswitch_done(ctx->vif, false);
- }
+ ch = channel->hw_value;
+ if (le16_to_cpu(ctx->active.channel) == ch)
+ goto out;
+
+ ch_info = iwl_get_channel_info(priv, channel->band, ch);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+ goto out;
+ }
+
+ spin_lock_irq(&priv->lock);
+
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
}
+ } else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ iwl_set_rxon_channel(priv, channel, ctx);
+ iwl_set_rxon_ht(priv, ht_conf);
+ iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irq(&priv->lock);
+
+ iwl_set_rate(priv);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ priv->switch_channel = cpu_to_le16(ch);
+ if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+ priv->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
}
+
out:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2971,10 +3171,6 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
mutex_lock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "enter\n");
- /* do not support "flush" */
- if (!priv->cfg->ops->lib->txfifo_flush)
- goto done;
-
if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
goto done;
@@ -2990,7 +3186,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
*/
if (drop) {
IWL_DEBUG_MAC80211(priv, "send flush command\n");
- if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) {
+ if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
}
@@ -3017,9 +3213,9 @@ static void iwlagn_disable_roc(struct iwl_priv *priv)
iwl_set_rxon_channel(priv, chan, ctx);
iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
- priv->_agn.hw_roc_channel = NULL;
+ priv->hw_roc_channel = NULL;
- iwlcore_commit_rxon(priv, ctx);
+ iwlagn_commit_rxon(priv, ctx);
ctx->is_active = false;
}
@@ -3027,7 +3223,7 @@ static void iwlagn_disable_roc(struct iwl_priv *priv)
static void iwlagn_bg_roc_done(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv,
- _agn.hw_roc_work.work);
+ hw_roc_work.work);
mutex_lock(&priv->mutex);
ieee80211_remain_on_channel_expired(priv->hw);
@@ -3059,11 +3255,11 @@ static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
}
priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
- priv->_agn.hw_roc_channel = channel;
- priv->_agn.hw_roc_chantype = channel_type;
- priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
- iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
- queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work,
+ priv->hw_roc_channel = channel;
+ priv->hw_roc_chantype = channel_type;
+ priv->hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
+ iwlagn_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
+ queue_delayed_work(priv->workqueue, &priv->hw_roc_work,
msecs_to_jiffies(duration + 20));
msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
@@ -3082,7 +3278,7 @@ static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
return -EOPNOTSUPP;
- cancel_delayed_work_sync(&priv->_agn.hw_roc_work);
+ cancel_delayed_work_sync(&priv->hw_roc_work);
mutex_lock(&priv->mutex);
iwlagn_disable_roc(priv);
@@ -3104,18 +3300,17 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
init_waitqueue_head(&priv->wait_command_queue);
INIT_WORK(&priv->restart, iwl_bg_restart);
- INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
- INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done);
+ INIT_DELAYED_WORK(&priv->hw_roc_work, iwlagn_bg_roc_done);
iwl_setup_scan_deferred_work(priv);
- if (priv->cfg->ops->lib->setup_deferred_work)
- priv->cfg->ops->lib->setup_deferred_work(priv);
+ if (priv->cfg->lib->bt_setup_deferred_work)
+ priv->cfg->lib->bt_setup_deferred_work(priv);
init_timer(&priv->statistics_periodic);
priv->statistics_periodic.data = (unsigned long)priv;
@@ -3128,15 +3323,12 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
init_timer(&priv->watchdog);
priv->watchdog.data = (unsigned long)priv;
priv->watchdog.function = iwl_bg_watchdog;
-
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl_irq_tasklet, (unsigned long)priv);
}
static void iwl_cancel_deferred_work(struct iwl_priv *priv)
{
- if (priv->cfg->ops->lib->cancel_deferred_work)
- priv->cfg->ops->lib->cancel_deferred_work(priv);
+ if (priv->cfg->lib->cancel_deferred_work)
+ priv->cfg->lib->cancel_deferred_work(priv);
cancel_work_sync(&priv->run_time_calib_work);
cancel_work_sync(&priv->beacon_update);
@@ -3187,7 +3379,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
- priv->_agn.agg_tids_count = 0;
+ priv->agg_tids_count = 0;
/* initialize force reset */
priv->force_reset[IWL_RF_RESET].reset_duration =
@@ -3198,9 +3390,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->rx_statistics_jiffies = jiffies;
/* Choose which receivers/antennas to use */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
+ iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
iwl_init_scan_params(priv);
@@ -3243,12 +3433,42 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
iwl_free_channel_map(priv);
kfree(priv->scan_cmd);
kfree(priv->beacon_cmd);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ kfree(priv->wowlan_sram);
+#endif
+}
+
+static void iwl_mac_rssi_callback(struct ieee80211_hw *hw,
+ enum ieee80211_rssi_event rssi_event)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ mutex_lock(&priv->mutex);
+
+ if (priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist) {
+ if (rssi_event == RSSI_EVENT_LOW)
+ priv->bt_enable_pspoll = true;
+ else if (rssi_event == RSSI_EVENT_HIGH)
+ priv->bt_enable_pspoll = false;
+
+ iwlagn_send_advance_bt_config(priv);
+ } else {
+ IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
+ "ignoring RSSI callback\n");
+ }
+
+ mutex_unlock(&priv->mutex);
}
struct ieee80211_ops iwlagn_hw_ops = {
.tx = iwlagn_mac_tx,
.start = iwlagn_mac_start,
.stop = iwlagn_mac_stop,
+#ifdef CONFIG_PM
+ .suspend = iwlagn_mac_suspend,
+ .resume = iwlagn_mac_resume,
+#endif
.add_interface = iwl_mac_add_interface,
.remove_interface = iwl_mac_remove_interface,
.change_interface = iwl_mac_change_interface,
@@ -3256,6 +3476,7 @@ struct ieee80211_ops iwlagn_hw_ops = {
.configure_filter = iwlagn_configure_filter,
.set_key = iwlagn_mac_set_key,
.update_tkip_key = iwlagn_mac_update_tkip_key,
+ .set_rekey_data = iwlagn_mac_set_rekey_data,
.conf_tx = iwl_mac_conf_tx,
.bss_info_changed = iwlagn_bss_info_changed,
.ampdu_action = iwlagn_mac_ampdu_action,
@@ -3270,15 +3491,13 @@ struct ieee80211_ops iwlagn_hw_ops = {
.cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
.offchannel_tx = iwl_mac_offchannel_tx,
.offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait,
+ .rssi_callback = iwl_mac_rssi_callback,
CFG80211_TESTMODE_CMD(iwl_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(iwl_testmode_dump)
};
static u32 iwl_hw_detect(struct iwl_priv *priv)
{
- u8 rev_id;
-
- pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
- IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
return iwl_read32(priv, CSR_HW_REV);
}
@@ -3294,10 +3513,10 @@ static int iwl_set_hw_params(struct iwl_priv *priv)
priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
if (iwlagn_mod_params.disable_11n)
- priv->cfg->sku &= ~IWL_SKU_N;
+ priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
/* Device-specific setup */
- return priv->cfg->ops->lib->set_hw_params(priv);
+ return priv->cfg->lib->set_hw_params(priv);
}
static const u8 iwlagn_bss_ac_to_fifo[] = {
@@ -3344,29 +3563,9 @@ out:
return hw;
}
-static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static void iwl_init_context(struct iwl_priv *priv)
{
- int err = 0, i;
- struct iwl_priv *priv;
- struct ieee80211_hw *hw;
- struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- unsigned long flags;
- u16 pci_cmd, num_mac;
- u32 hw_rev;
-
- /************************
- * 1. Allocating HW data
- ************************/
-
- hw = iwl_alloc_all(cfg);
- if (!hw) {
- err = -ENOMEM;
- goto out;
- }
- priv = hw->priv;
- /* At this point both hw and priv are allocated. */
-
- priv->ucode_type = UCODE_SUBTYPE_NONE_LOADED;
+ int i;
/*
* The default context is always valid,
@@ -3398,8 +3597,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
- priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd = REPLY_WIPAN_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd = REPLY_WIPAN_RXON_ASSOC;
+ priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
+ REPLY_WIPAN_RXON_TIMING;
+ priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
+ REPLY_WIPAN_RXON_ASSOC;
priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
@@ -3419,12 +3620,35 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+}
- SET_IEEE80211_DEV(hw, &pdev->dev);
+int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
+{
+ int err = 0;
+ struct iwl_priv *priv;
+ struct ieee80211_hw *hw;
+ u16 num_mac;
+ u32 hw_rev;
+
+ /************************
+ * 1. Allocating HW data
+ ************************/
+ hw = iwl_alloc_all(cfg);
+ if (!hw) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ priv = hw->priv;
+ priv->bus = bus;
+ bus_set_drv_data(priv->bus, priv);
+
+ /* At this point both hw and priv are allocated. */
+
+ SET_IEEE80211_DEV(hw, priv->bus->dev);
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
priv->cfg = cfg;
- priv->pci_dev = pdev;
priv->inta_mask = CSR_INI_SET_MASK;
/* is antenna coupling more than 35dB ? */
@@ -3440,53 +3664,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (iwl_alloc_traffic_mem(priv))
IWL_ERR(priv, "Not enough memory to generate traffic log\n");
- /**************************
- * 2. Initializing PCI bus
- **************************/
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
- goto out_ieee80211_free_hw;
- }
-
- pci_set_master(pdev);
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
- if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- /* both attempts failed: */
- if (err) {
- IWL_WARN(priv, "No suitable DMA available.\n");
- goto out_pci_disable_device;
- }
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err)
- goto out_pci_disable_device;
-
- pci_set_drvdata(pdev, priv);
-
-
- /***********************
- * 3. Read REV register
- ***********************/
- priv->hw_base = pci_iomap(pdev, 0, 0);
- if (!priv->hw_base) {
- err = -ENODEV;
- goto out_pci_release_regions;
- }
-
- IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
/* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
@@ -3500,17 +3677,21 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+ /***********************
+ * 3. Read REV register
+ ***********************/
hw_rev = iwl_hw_detect(priv);
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
priv->cfg->name, hw_rev);
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+ err = iwl_trans_register(&priv->trans, priv);
+ if (err)
+ goto out_free_traffic_mem;
- if (iwl_prepare_card_hw(priv)) {
+ if (trans_prepare_card_hw(&priv->trans)) {
+ err = -EIO;
IWL_WARN(priv, "Failed, HW not ready\n");
- goto out_iounmap;
+ goto out_free_trans;
}
/*****************
@@ -3520,7 +3701,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = iwl_eeprom_init(priv, hw_rev);
if (err) {
IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_iounmap;
+ goto out_free_trans;
}
err = iwl_eeprom_check_version(priv);
if (err)
@@ -3543,10 +3724,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->hw->wiphy->n_addresses++;
}
+ /* initialize all valid contexts */
+ iwl_init_context(priv);
+
/************************
* 5. Setup HW constants
************************/
if (iwl_set_hw_params(priv)) {
+ err = -ENOENT;
IWL_ERR(priv, "failed to set hw parameters\n");
goto out_free_eeprom;
}
@@ -3563,36 +3748,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/********************
* 7. Setup services
********************/
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- pci_enable_msi(priv->pci_dev);
-
- iwl_alloc_isr_ict(priv);
-
- err = request_irq(priv->pci_dev->irq, iwl_isr_ict,
- IRQF_SHARED, DRV_NAME, priv);
- if (err) {
- IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
- goto out_disable_msi;
- }
-
iwl_setup_deferred_work(priv);
iwl_setup_rx_handlers(priv);
iwl_testmode_init(priv);
/*********************************************
- * 8. Enable interrupts and read RFKILL state
+ * 8. Enable interrupts
*********************************************/
- /* enable rfkill interrupt: hw bug w/a */
- pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
- if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
- pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
- }
-
iwl_enable_rfkill_int(priv);
/* If platform's RF_KILL switch is NOT set to KILL */
@@ -3607,7 +3770,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
- init_completion(&priv->_agn.firmware_loading_complete);
+ init_completion(&priv->firmware_loading_complete);
err = iwl_request_firmware(priv, true);
if (err)
@@ -3615,44 +3778,32 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
- out_destroy_workqueue:
+out_destroy_workqueue:
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
- free_irq(priv->pci_dev->irq, priv);
- iwl_free_isr_ict(priv);
- out_disable_msi:
- pci_disable_msi(priv->pci_dev);
iwl_uninit_drv(priv);
- out_free_eeprom:
+out_free_eeprom:
iwl_eeprom_free(priv);
- out_iounmap:
- pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
- pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
- out_pci_disable_device:
- pci_disable_device(pdev);
- out_ieee80211_free_hw:
+out_free_trans:
+ trans_free(&priv->trans);
+out_free_traffic_mem:
iwl_free_traffic_mem(priv);
ieee80211_free_hw(priv->hw);
- out:
+out:
return err;
}
-static void __devexit iwl_pci_remove(struct pci_dev *pdev)
+void __devexit iwl_remove(struct iwl_priv * priv)
{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
unsigned long flags;
- if (!priv)
- return;
-
- wait_for_completion(&priv->_agn.firmware_loading_complete);
+ wait_for_completion(&priv->firmware_loading_complete);
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
iwl_dbgfs_unregister(priv);
- sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
+ sysfs_remove_group(&priv->bus->dev->kobj,
+ &iwl_attribute_group);
/* ieee80211_unregister_hw call wil cause iwl_mac_stop to
* to be called and iwl_down since we are removing the device
@@ -3680,17 +3831,15 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
iwl_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
- iwl_synchronize_irq(priv);
+ trans_sync_irq(&priv->trans);
- iwl_dealloc_ucode_pci(priv);
+ iwl_dealloc_ucode(priv);
- if (priv->rxq.bd)
- iwlagn_rx_queue_free(priv, &priv->rxq);
- iwlagn_hw_txq_ctx_free(priv);
+ trans_rx_free(&priv->trans);
+ trans_tx_free(&priv->trans);
iwl_eeprom_free(priv);
-
/*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue);
@@ -3701,16 +3850,11 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
priv->workqueue = NULL;
iwl_free_traffic_mem(priv);
- free_irq(priv->pci_dev->irq, priv);
- pci_disable_msi(priv->pci_dev);
- pci_iounmap(pdev, priv->hw_base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
+ trans_free(&priv->trans);
- iwl_uninit_drv(priv);
+ bus_set_drv_data(priv->bus, NULL);
- iwl_free_isr_ict(priv);
+ iwl_uninit_drv(priv);
dev_kfree_skb(priv->beacon_skb);
@@ -3723,206 +3867,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
* driver and module entry point
*
*****************************************************************************/
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
- {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
-
-/* 5300 Series WiFi */
- {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
-
-/* 5350 Series WiFi/WiMax */
- {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
-
-/* 5150 Series Wifi/WiMax */
- {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
-
- {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
- {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
- {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
-
-/* 6x00 Series */
- {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
- {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
- {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
- {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
- {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
-
-/* 6x05 Series */
- {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
-
-/* 6x30 Series */
- {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
-
-/* 6x50 WiFi/WiMax Series */
- {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
- {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
- {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
-
-/* 6150 WiFi/WiMax Series */
- {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
- {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
-
-/* 1000 Series WiFi */
- {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
- {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
-
-/* 100 Series WiFi */
- {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
- {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
- {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
- {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
- {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
- {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
-
-/* 130 Series WiFi */
- {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
- {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
-
-/* 2x00 Series */
- {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
-
-/* 2x30 Series */
- {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
-
-/* 6x35 Series */
- {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
-
-/* 105 Series */
- {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
- {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
-
-/* 135 Series */
- {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
- {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
- {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
-
- {0}
-};
-MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
-
-static struct pci_driver iwl_driver = {
- .name = DRV_NAME,
- .id_table = iwl_hw_card_ids,
- .probe = iwl_pci_probe,
- .remove = __devexit_p(iwl_pci_remove),
- .driver.pm = IWL_PM_OPS,
-};
-
static int __init iwl_init(void)
{
@@ -3936,12 +3880,10 @@ static int __init iwl_init(void)
return ret;
}
- ret = pci_register_driver(&iwl_driver);
- if (ret) {
- pr_err("Unable to initialize PCI module\n");
- goto error_register;
- }
+ ret = iwl_pci_register_driver();
+ if (ret)
+ goto error_register;
return ret;
error_register:
@@ -3951,7 +3893,7 @@ error_register:
static void __exit iwl_exit(void)
{
- pci_unregister_driver(&iwl_driver);
+ iwl_pci_unregister_driver();
iwlagn_rate_control_unregister();
}
@@ -3993,3 +3935,51 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
+
+module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
+MODULE_PARM_DESC(wd_disable,
+ "Disable stuck queue watchdog timer (default: 0 [enabled])");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ * Able to scan and finding all the available AP
+ * Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+module_param_named(bt_coex_active, iwlagn_mod_params.bt_coex_active,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
+
+module_param_named(led_mode, iwlagn_mod_params.led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode, "0=system default, "
+ "1=On(RF On)/Off(RF Off), 2=blinking (default: 0)");
+
+module_param_named(power_save, iwlagn_mod_params.power_save,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(power_save,
+ "enable WiFi power management (default: disable)");
+
+module_param_named(power_level, iwlagn_mod_params.power_level,
+ int, S_IRUGO);
+MODULE_PARM_DESC(power_level,
+ "default power save level (range from 1 - 5, default: 1)");
+
+/*
+ * For now, keep using power level 1 instead of automatically
+ * adjusting ...
+ */
+module_param_named(no_sleep_autoadjust, iwlagn_mod_params.no_sleep_autoadjust,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(no_sleep_autoadjust,
+ "don't automatically adjust sleep level "
+ "according to maximum network latency (default: true)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index d1716844002..d941c4c98e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -109,42 +109,25 @@ extern struct iwl_cfg iwl135_bg_cfg;
extern struct iwl_cfg iwl135_bgn_cfg;
extern struct iwl_mod_params iwlagn_mod_params;
-extern struct iwl_hcmd_ops iwlagn_hcmd;
-extern struct iwl_hcmd_ops iwlagn_bt_hcmd;
-extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
extern struct ieee80211_ops iwlagn_hw_ops;
int iwl_reset_ict(struct iwl_priv *priv);
-void iwl_disable_ict(struct iwl_priv *priv);
-int iwl_alloc_isr_ict(struct iwl_priv *priv);
-void iwl_free_isr_ict(struct iwl_priv *priv);
-irqreturn_t iwl_isr_ict(int irq, void *data);
-/* call this function to flush any scheduled tasklet */
-static inline void iwl_synchronize_irq(struct iwl_priv *priv)
+static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
{
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(priv->pci_dev->irq);
- tasklet_kill(&priv->irq_tasklet);
+ hdr->op_code = cmd;
+ hdr->first_group = 0;
+ hdr->groups_num = 1;
+ hdr->data_valid = 1;
}
-int iwl_prepare_card_hw(struct iwl_priv *priv);
-
-int iwlagn_start_device(struct iwl_priv *priv);
-void iwlagn_stop_device(struct iwl_priv *priv);
-
/* tx queue */
-void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
- int txq_id, u32 index);
-void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry);
-void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
int sta_id, int tid, int freed);
/* RXON */
+int iwlagn_set_pan_params(struct iwl_priv *priv);
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
@@ -161,41 +144,29 @@ void iwlagn_send_prio_tbl(struct iwl_priv *priv);
int iwlagn_run_init_ucode(struct iwl_priv *priv);
int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
struct fw_img *image,
- int subtype, int alternate_subtype);
+ enum iwlagn_ucode_type ucode_type);
/* lib */
void iwl_check_abort_status(struct iwl_priv *priv,
u8 frame_count, u32 status);
-void iwlagn_rx_handler_setup(struct iwl_priv *priv);
-void iwlagn_setup_deferred_work(struct iwl_priv *priv);
int iwlagn_hw_valid_rtc_data_addr(u32 addr);
int iwlagn_send_tx_power(struct iwl_priv *priv);
void iwlagn_temperature(struct iwl_priv *priv);
u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
-const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset);
-void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwlagn_hw_nic_init(struct iwl_priv *priv);
int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv);
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
+int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
/* rx */
-void iwlagn_rx_queue_restock(struct iwl_priv *priv);
-void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority);
-void iwlagn_rx_replenish(struct iwl_priv *priv);
-void iwlagn_rx_replenish_now(struct iwl_priv *priv);
-void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwlagn_rxq_stop(struct iwl_priv *priv);
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
void iwl_setup_rx_handlers(struct iwl_priv *priv);
+void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
+
/* tx */
-void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset);
+void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int index);
void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
struct ieee80211_tx_info *info);
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
@@ -203,18 +174,12 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
-void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv,
- struct ieee80211_sta *sta,
- int tid, int frame_limit);
int iwlagn_txq_check_empty(struct iwl_priv *priv,
int sta_id, u8 tid, int txq_id);
void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
-void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv);
-int iwlagn_txq_ctx_alloc(struct iwl_priv *priv);
-void iwlagn_txq_ctx_reset(struct iwl_priv *priv);
-void iwlagn_txq_ctx_stop(struct iwl_priv *priv);
static inline u32 iwl_tx_status_to_mac80211(u32 status)
{
@@ -249,10 +214,6 @@ void iwlagn_post_scan(struct iwl_priv *priv);
int iwlagn_manage_ibss_station(struct iwl_priv *priv,
struct ieee80211_vif *vif, bool add);
-/* hcmd */
-int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
-int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
-
/* bt coex */
void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
@@ -260,6 +221,8 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
+void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv);
+void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena);
#ifdef CONFIG_IWLWIFI_DEBUG
const char *iwl_get_tx_fail_reason(u32 status);
@@ -283,11 +246,13 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
int iwl_restore_default_wep_keys(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key, u8 sta_id);
+ struct ieee80211_key_conf *key,
+ struct ieee80211_sta *sta);
int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key, u8 sta_id);
+ struct ieee80211_key_conf *key,
+ struct ieee80211_sta *sta);
void iwl_update_tkip_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
+ struct ieee80211_vif *vif,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
@@ -296,6 +261,8 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
int tid);
void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
+int iwl_update_bcast_station(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx);
int iwl_update_bcast_stations(struct iwl_priv *priv);
void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -343,6 +310,9 @@ extern int iwl_alive_start(struct iwl_priv *priv);
/* svtool */
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
extern int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len);
+extern int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len);
extern void iwl_testmode_init(struct iwl_priv *priv);
extern void iwl_testmode_cleanup(struct iwl_priv *priv);
#else
@@ -352,6 +322,13 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
return -ENOSYS;
}
static inline
+int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len)
+{
+ return -ENOSYS;
+}
+static inline
void iwl_testmode_init(struct iwl_priv *priv)
{
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h
new file mode 100644
index 00000000000..f3ee1c0c004
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-bus.h
@@ -0,0 +1,139 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_pci_h__
+#define __iwl_pci_h__
+
+struct iwl_bus;
+
+/**
+ * struct iwl_bus_ops - bus specific operations
+ * @get_pm_support: must returns true if the bus can go to sleep
+ * @apm_config: will be called during the config of the APM configuration
+ * @set_drv_data: set the drv_data pointer to the bus layer
+ * @get_hw_id: prints the hw_id in the provided buffer
+ * @write8: write a byte to register at offset ofs
+ * @write32: write a dword to register at offset ofs
+ * @wread32: read a dword at register at offset ofs
+ */
+struct iwl_bus_ops {
+ bool (*get_pm_support)(struct iwl_bus *bus);
+ void (*apm_config)(struct iwl_bus *bus);
+ void (*set_drv_data)(struct iwl_bus *bus, void *drv_data);
+ void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
+ void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
+ void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
+ u32 (*read32)(struct iwl_bus *bus, u32 ofs);
+};
+
+struct iwl_bus {
+ /* Common data to all buses */
+ void *drv_data; /* driver's context */
+ struct device *dev;
+ struct iwl_bus_ops *ops;
+
+ unsigned int irq;
+
+ /* pointer to bus specific struct */
+ /*Ensure that this pointer will always be aligned to sizeof pointer */
+ char bus_specific[0] __attribute__((__aligned__(sizeof(void *))));
+};
+
+static inline bool bus_get_pm_support(struct iwl_bus *bus)
+{
+ return bus->ops->get_pm_support(bus);
+}
+
+static inline void bus_apm_config(struct iwl_bus *bus)
+{
+ bus->ops->apm_config(bus);
+}
+
+static inline void bus_set_drv_data(struct iwl_bus *bus, void *drv_data)
+{
+ bus->ops->set_drv_data(bus, drv_data);
+}
+
+static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len)
+{
+ bus->ops->get_hw_id(bus, buf, buf_len);
+}
+
+static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val)
+{
+ bus->ops->write8(bus, ofs, val);
+}
+
+static inline void bus_write32(struct iwl_bus *bus, u32 ofs, u32 val)
+{
+ bus->ops->write32(bus, ofs, val);
+}
+
+static inline u32 bus_read32(struct iwl_bus *bus, u32 ofs)
+{
+ return bus->ops->read32(bus, ofs);
+}
+
+int __must_check iwl_pci_register_driver(void);
+void iwl_pci_unregister_driver(void);
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 6ee5f1aa555..e9e9d1d1778 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -188,6 +188,13 @@ enum {
REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
+ REPLY_WOWLAN_PATTERNS = 0xe0,
+ REPLY_WOWLAN_WAKEUP_FILTER = 0xe1,
+ REPLY_WOWLAN_TSC_RSC_PARAMS = 0xe2,
+ REPLY_WOWLAN_TKIP_PARAMS = 0xe3,
+ REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
+ REPLY_WOWLAN_GET_STATUS = 0xe5,
+
REPLY_MAX = 0xff
};
@@ -384,18 +391,6 @@ struct iwl_tx_ant_config_cmd {
#define UCODE_VALID_OK cpu_to_le32(0x1)
-enum iwlagn_ucode_subtype {
- UCODE_SUBTYPE_REGULAR = 0,
- UCODE_SUBTYPE_REGULAR_NEW = 1,
- UCODE_SUBTYPE_INIT = 9,
-
- /*
- * Not a valid subtype, the ucode has just a u8, so
- * we can use something > 0xff for this value.
- */
- UCODE_SUBTYPE_NONE_LOADED = 0x100,
-};
-
/**
* REPLY_ALIVE = 0x1 (response only, not a command)
*
@@ -844,6 +839,8 @@ struct iwl_qosparam_cmd {
#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
#define STA_KEY_MAX_NUM 8
#define STA_KEY_MAX_NUM_PAN 16
+/* must not match WEP_INVALID_OFFSET */
+#define IWLAGN_HW_KEY_DEFAULT 0xfe
/* Flags indicate whether to modify vs. don't change various station params */
#define STA_MODIFY_KEY_MASK 0x01
@@ -984,15 +981,26 @@ struct iwl_rem_sta_cmd {
u8 reserved2[2];
} __packed;
-#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
-#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
-#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
-#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
+
+/* WiFi queues mask */
+#define IWL_SCD_BK_MSK cpu_to_le32(BIT(0))
+#define IWL_SCD_BE_MSK cpu_to_le32(BIT(1))
+#define IWL_SCD_VI_MSK cpu_to_le32(BIT(2))
+#define IWL_SCD_VO_MSK cpu_to_le32(BIT(3))
+#define IWL_SCD_MGMT_MSK cpu_to_le32(BIT(3))
+
+/* PAN queues mask */
+#define IWL_PAN_SCD_BK_MSK cpu_to_le32(BIT(4))
+#define IWL_PAN_SCD_BE_MSK cpu_to_le32(BIT(5))
+#define IWL_PAN_SCD_VI_MSK cpu_to_le32(BIT(6))
+#define IWL_PAN_SCD_VO_MSK cpu_to_le32(BIT(7))
+#define IWL_PAN_SCD_MGMT_MSK cpu_to_le32(BIT(7))
+#define IWL_PAN_SCD_MULTICAST_MSK cpu_to_le32(BIT(8))
+
#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
#define IWL_DROP_SINGLE 0
-#define IWL_DROP_SELECTED 1
-#define IWL_DROP_ALL 2
+#define IWL_DROP_ALL (BIT(IWL_RXON_CTX_BSS) | BIT(IWL_RXON_CTX_PAN))
/*
* REPLY_TXFIFO_FLUSH = 0x1e(command and response)
@@ -1932,6 +1940,9 @@ struct iwl_bt_cmd {
/* Disable Sync PSPoll on SCO/eSCO */
#define IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE BIT(7)
+#define IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD -75 /* dBm */
+#define IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD -65 /* dBm */
+
#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
#define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0
@@ -2457,8 +2468,8 @@ struct iwl_scanstart_notification {
__le32 status;
} __packed;
-#define SCAN_OWNER_STATUS 0x1;
-#define MEASURE_OWNER_STATUS 0x2;
+#define SCAN_OWNER_STATUS 0x1
+#define MEASURE_OWNER_STATUS 0x2
#define IWL_PROBE_STATUS_OK 0
#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
@@ -3153,7 +3164,6 @@ struct iwl_enhance_sensitivity_cmd {
/* The default calibrate table size if not specified by firmware */
#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
enum {
- IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
IWL_PHY_CALIBRATE_DC_CMD = 8,
IWL_PHY_CALIBRATE_LO_CMD = 9,
IWL_PHY_CALIBRATE_TX_IQ_CMD = 11,
@@ -3166,22 +3176,36 @@ enum {
#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
-#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(0xffffffff)
-
/* This enum defines the bitmap of various calibrations to enable in both
* init ucode and runtime ucode through CALIBRATION_CFG_CMD.
*/
enum iwl_ucode_calib_cfg {
- IWL_CALIB_CFG_RX_BB_IDX,
- IWL_CALIB_CFG_DC_IDX,
- IWL_CALIB_CFG_TX_IQ_IDX,
- IWL_CALIB_CFG_RX_IQ_IDX,
- IWL_CALIB_CFG_NOISE_IDX,
- IWL_CALIB_CFG_CRYSTAL_IDX,
- IWL_CALIB_CFG_TEMPERATURE_IDX,
- IWL_CALIB_CFG_PAPD_IDX,
+ IWL_CALIB_CFG_RX_BB_IDX = BIT(0),
+ IWL_CALIB_CFG_DC_IDX = BIT(1),
+ IWL_CALIB_CFG_LO_IDX = BIT(2),
+ IWL_CALIB_CFG_TX_IQ_IDX = BIT(3),
+ IWL_CALIB_CFG_RX_IQ_IDX = BIT(4),
+ IWL_CALIB_CFG_NOISE_IDX = BIT(5),
+ IWL_CALIB_CFG_CRYSTAL_IDX = BIT(6),
+ IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(7),
+ IWL_CALIB_CFG_PAPD_IDX = BIT(8),
+ IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(9),
+ IWL_CALIB_CFG_TX_PWR_IDX = BIT(10),
};
+#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \
+ IWL_CALIB_CFG_DC_IDX | \
+ IWL_CALIB_CFG_LO_IDX | \
+ IWL_CALIB_CFG_TX_IQ_IDX | \
+ IWL_CALIB_CFG_RX_IQ_IDX | \
+ IWL_CALIB_CFG_NOISE_IDX | \
+ IWL_CALIB_CFG_CRYSTAL_IDX | \
+ IWL_CALIB_CFG_TEMPERATURE_IDX | \
+ IWL_CALIB_CFG_PAPD_IDX | \
+ IWL_CALIB_CFG_SENSITIVITY_IDX | \
+ IWL_CALIB_CFG_TX_PWR_IDX)
+
+#define IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK cpu_to_le32(BIT(0))
struct iwl_calib_cfg_elmnt_s {
__le32 is_enable;
@@ -3215,15 +3239,6 @@ struct iwl_calib_cmd {
u8 data[0];
} __packed;
-/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
-struct iwl_calib_diff_gain_cmd {
- struct iwl_calib_hdr hdr;
- s8 diff_gain_a; /* see above */
- s8 diff_gain_b;
- s8 diff_gain_c;
- u8 reserved1;
-} __packed;
-
struct iwl_calib_xtal_freq_cmd {
struct iwl_calib_hdr hdr;
u8 cap_pin1;
@@ -3231,11 +3246,11 @@ struct iwl_calib_xtal_freq_cmd {
u8 pad[2];
} __packed;
-#define DEFAULT_RADIO_SENSOR_OFFSET 2700
+#define DEFAULT_RADIO_SENSOR_OFFSET cpu_to_le16(2700)
struct iwl_calib_temperature_offset_cmd {
struct iwl_calib_hdr hdr;
- s16 radio_sensor_offset;
- s16 reserved;
+ __le16 radio_sensor_offset;
+ __le16 reserved;
} __packed;
/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
@@ -3756,6 +3771,127 @@ struct iwl_bt_coex_prot_env_cmd {
u8 reserved[2];
} __attribute__((packed));
+/*
+ * REPLY_WOWLAN_PATTERNS
+ */
+#define IWLAGN_WOWLAN_MIN_PATTERN_LEN 16
+#define IWLAGN_WOWLAN_MAX_PATTERN_LEN 128
+
+struct iwlagn_wowlan_pattern {
+ u8 mask[IWLAGN_WOWLAN_MAX_PATTERN_LEN / 8];
+ u8 pattern[IWLAGN_WOWLAN_MAX_PATTERN_LEN];
+ u8 mask_size;
+ u8 pattern_size;
+ __le16 reserved;
+} __packed;
+
+#define IWLAGN_WOWLAN_MAX_PATTERNS 20
+
+struct iwlagn_wowlan_patterns_cmd {
+ __le32 n_patterns;
+ struct iwlagn_wowlan_pattern patterns[];
+} __packed;
+
+/*
+ * REPLY_WOWLAN_WAKEUP_FILTER
+ */
+enum iwlagn_wowlan_wakeup_filters {
+ IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
+ IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
+ IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
+ IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
+ IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
+ IWLAGN_WOWLAN_WAKEUP_RFKILL = BIT(5),
+ IWLAGN_WOWLAN_WAKEUP_UCODE_ERROR = BIT(6),
+ IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(7),
+ IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(8),
+ IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(9),
+ IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(10),
+};
+
+struct iwlagn_wowlan_wakeup_filter_cmd {
+ __le32 enabled;
+ __le16 non_qos_seq;
+ u8 min_sleep_seconds;
+ u8 reserved;
+ __le16 qos_seq[8];
+};
+
+/*
+ * REPLY_WOWLAN_TSC_RSC_PARAMS
+ */
+#define IWLAGN_NUM_RSC 16
+
+struct tkip_sc {
+ __le16 iv16;
+ __le16 pad;
+ __le32 iv32;
+} __packed;
+
+struct iwlagn_tkip_rsc_tsc {
+ struct tkip_sc unicast_rsc[IWLAGN_NUM_RSC];
+ struct tkip_sc multicast_rsc[IWLAGN_NUM_RSC];
+ struct tkip_sc tsc;
+} __packed;
+
+struct aes_sc {
+ __le64 pn;
+} __packed;
+
+struct iwlagn_aes_rsc_tsc {
+ struct aes_sc unicast_rsc[IWLAGN_NUM_RSC];
+ struct aes_sc multicast_rsc[IWLAGN_NUM_RSC];
+ struct aes_sc tsc;
+} __packed;
+
+union iwlagn_all_tsc_rsc {
+ struct iwlagn_tkip_rsc_tsc tkip;
+ struct iwlagn_aes_rsc_tsc aes;
+};
+
+struct iwlagn_wowlan_rsc_tsc_params_cmd {
+ union iwlagn_all_tsc_rsc all_tsc_rsc;
+} __packed;
+
+/*
+ * REPLY_WOWLAN_TKIP_PARAMS
+ */
+#define IWLAGN_MIC_KEY_SIZE 8
+#define IWLAGN_P1K_SIZE 5
+struct iwlagn_mic_keys {
+ u8 tx[IWLAGN_MIC_KEY_SIZE];
+ u8 rx_unicast[IWLAGN_MIC_KEY_SIZE];
+ u8 rx_mcast[IWLAGN_MIC_KEY_SIZE];
+} __packed;
+
+struct iwlagn_p1k_cache {
+ __le16 p1k[IWLAGN_P1K_SIZE];
+} __packed;
+
+#define IWLAGN_NUM_RX_P1K_CACHE 2
+
+struct iwlagn_wowlan_tkip_params_cmd {
+ struct iwlagn_mic_keys mic_keys;
+ struct iwlagn_p1k_cache tx;
+ struct iwlagn_p1k_cache rx_uni[IWLAGN_NUM_RX_P1K_CACHE];
+ struct iwlagn_p1k_cache rx_multi[IWLAGN_NUM_RX_P1K_CACHE];
+} __packed;
+
+/*
+ * REPLY_WOWLAN_KEK_KCK_MATERIAL
+ */
+
+#define IWLAGN_KCK_MAX_SIZE 32
+#define IWLAGN_KEK_MAX_SIZE 32
+
+struct iwlagn_wowlan_kek_kck_material_cmd {
+ u8 kck[IWLAGN_KCK_MAX_SIZE];
+ u8 kek[IWLAGN_KEK_MAX_SIZE];
+ __le16 kck_len;
+ __le16 kek_len;
+ __le64 replay_ctr;
+} __packed;
+
/******************************************************************************
* (13)
* Union of all expected notifications/responses:
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 213c80c6a66..cf376f62b2f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -42,27 +42,7 @@
#include "iwl-sta.h"
#include "iwl-helpers.h"
#include "iwl-agn.h"
-
-
-/*
- * set bt_coex_active to true, uCode will do kill/defer
- * every time the priority line is asserted (BT is sending signals on the
- * priority line in the PCIx).
- * set bt_coex_active to false, uCode will ignore the BT activity and
- * perform the normal operation
- *
- * User might experience transmit issue on some platform due to WiFi/BT
- * co-exist problem. The possible behaviors are:
- * Able to scan and finding all the available AP
- * Not able to associate with any AP
- * On those platforms, WiFi communication can be restored by set
- * "bt_coex_active" module parameter to "false"
- *
- * default: bt_coex_active = true (BT_COEX_ENABLE)
- */
-bool bt_coex_active = true;
-module_param(bt_coex_active, bool, S_IRUGO);
-MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+#include "iwl-trans.h"
u32 iwl_debug_level;
@@ -164,7 +144,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
- if (priv->cfg->sku & IWL_SKU_N)
+ if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_5GHZ);
@@ -174,7 +154,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
sband->bitrates = rates;
sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
- if (priv->cfg->sku & IWL_SKU_N)
+ if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_2GHZ);
@@ -229,12 +209,12 @@ int iwlcore_init_geos(struct iwl_priv *priv)
priv->tx_power_next = max_tx_power;
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- priv->cfg->sku & IWL_SKU_A) {
+ priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
+ char buf[32];
+ bus_get_hw_id(priv->bus, buf, sizeof(buf));
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
- "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
- priv->pci_dev->device,
- priv->pci_dev->subsystem_device);
- priv->cfg->sku &= ~IWL_SKU_A;
+ "Please send your %s to maintainer.\n", buf);
+ priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
}
IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
@@ -383,6 +363,8 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
}
+ ctx->beacon_int = beacon_int;
+
tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
interval_tm = beacon_int * TIME_UNIT;
rem = do_div(tsf, interval_tm);
@@ -396,8 +378,8 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
le32_to_cpu(ctx->timing.beacon_init_val),
le16_to_cpu(ctx->timing.atim_window));
- return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
- sizeof(ctx->timing), &ctx->timing);
+ return trans_send_cmd_pdu(&priv->trans, ctx->rxon_timing_cmd,
+ CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
}
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
@@ -547,19 +529,6 @@ int iwl_full_rxon_required(struct iwl_priv *priv,
return 0;
}
-u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- /*
- * Assign the lowest rate -- should really get this from
- * the beacon skb from mac80211.
- */
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
- return IWL_RATE_1M_PLCP;
- else
- return IWL_RATE_6M_PLCP;
-}
-
static void _iwl_set_rxon_ht(struct iwl_priv *priv,
struct iwl_ht_config *ht_conf,
struct iwl_rxon_context *ctx)
@@ -619,8 +588,7 @@ static void _iwl_set_rxon_ht(struct iwl_priv *priv,
rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
}
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ iwlagn_set_rxon_chain(priv, ctx);
IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
"extension channel offset 0x%x\n",
@@ -874,12 +842,12 @@ static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
unsigned long flags;
struct iwl_notification_wait *wait_entry;
- spin_lock_irqsave(&priv->_agn.notif_wait_lock, flags);
- list_for_each_entry(wait_entry, &priv->_agn.notif_waits, list)
+ spin_lock_irqsave(&priv->notif_wait_lock, flags);
+ list_for_each_entry(wait_entry, &priv->notif_waits, list)
wait_entry->aborted = true;
- spin_unlock_irqrestore(&priv->_agn.notif_wait_lock, flags);
+ spin_unlock_irqrestore(&priv->notif_wait_lock, flags);
- wake_up_all(&priv->_agn.notif_waitq);
+ wake_up_all(&priv->notif_waitq);
}
void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
@@ -1018,8 +986,6 @@ void iwl_apm_stop(struct iwl_priv *priv)
int iwl_apm_init(struct iwl_priv *priv)
{
int ret = 0;
- u16 lctl;
-
IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
/*
@@ -1048,27 +1014,7 @@ int iwl_apm_init(struct iwl_priv *priv)
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
- /*
- * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
- */
- lctl = iwl_pcie_link_ctl(priv);
- if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
- PCI_CFG_LINK_CTRL_VAL_L1_EN) {
- /* L1-ASPM enabled; disable(!) L0S */
- iwl_set_bit(priv, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
- } else {
- /* L1-ASPM disabled; enable(!) L0S */
- iwl_clear_bit(priv, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
- }
+ bus_apm_config(priv->bus);
/* Configure analog phase-lock-loop before activating to D0A */
if (priv->cfg->base_params->pll_cfg_val)
@@ -1127,9 +1073,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
if (priv->tx_power_user_lmt == tx_power && !force)
return 0;
- if (!priv->cfg->ops->lib->send_tx_power)
- return -EOPNOTSUPP;
-
if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
IWL_WARN(priv,
"Requested user TXPOWER %d below lower limit %d.\n",
@@ -1163,7 +1106,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
prev_tx_power = priv->tx_power_user_lmt;
priv->tx_power_user_lmt = tx_power;
- ret = priv->cfg->ops->lib->send_tx_power(priv);
+ ret = iwlagn_send_tx_power(priv);
/* if fail to set tx_power, restore the orig. tx power */
if (ret) {
@@ -1182,7 +1125,7 @@ void iwl_send_bt_config(struct iwl_priv *priv)
.kill_cts_mask = 0,
};
- if (!bt_coex_active)
+ if (!iwlagn_mod_params.bt_coex_active)
bt_cmd.flags = BT_COEX_DISABLE;
else
bt_cmd.flags = BT_COEX_ENABLE;
@@ -1191,8 +1134,8 @@ void iwl_send_bt_config(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "BT coex %s\n",
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
- if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(struct iwl_bt_cmd), &bt_cmd))
+ if (trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
@@ -1204,11 +1147,13 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
};
if (flags & CMD_ASYNC)
- return iwl_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
+ return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
+ CMD_ASYNC,
sizeof(struct iwl_statistics_cmd),
- &statistics_cmd, NULL);
+ &statistics_cmd);
else
- return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
+ return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
+ CMD_SYNC,
sizeof(struct iwl_statistics_cmd),
&statistics_cmd);
}
@@ -1275,10 +1220,9 @@ static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
iwl_connection_init_rx_config(priv, ctx);
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
+ iwlagn_set_rxon_chain(priv, ctx);
- return iwlcore_commit_rxon(priv, ctx);
+ return iwlagn_commit_rxon(priv, ctx);
}
static int iwl_setup_interface(struct iwl_priv *priv,
@@ -1431,26 +1375,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
}
-int iwl_alloc_txq_mem(struct iwl_priv *priv)
-{
- if (!priv->txq)
- priv->txq = kzalloc(
- sizeof(struct iwl_tx_queue) *
- priv->cfg->base_params->num_of_queues,
- GFP_KERNEL);
- if (!priv->txq) {
- IWL_ERR(priv, "Not enough memory for txq\n");
- return -ENOMEM;
- }
- return 0;
-}
-
-void iwl_free_txq_mem(struct iwl_priv *priv)
-{
- kfree(priv->txq);
- priv->txq = NULL;
-}
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
@@ -1763,6 +1687,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_context *tmp;
+ enum nl80211_iftype newviftype = newtype;
u32 interface_modes;
int err;
@@ -1818,7 +1743,7 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* success */
iwl_teardown_interface(priv, vif, true);
- vif->type = newtype;
+ vif->type = newviftype;
vif->p2p = newp2p;
err = iwl_setup_interface(priv, ctx);
WARN_ON(err);
@@ -1911,7 +1836,7 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
{
unsigned int timeout = priv->cfg->base_params->wd_timeout;
- if (timeout)
+ if (timeout && !iwlagn_mod_params.wd_disable)
mod_timer(&priv->watchdog,
jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
else
@@ -1972,35 +1897,28 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
#ifdef CONFIG_PM
-int iwl_pci_suspend(struct device *device)
+int iwl_suspend(struct iwl_priv *priv)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
-
/*
* This function is called when system goes into suspend state
* mac80211 will call iwl_mac_stop() from the mac80211 suspend function
* first but since iwl_mac_stop() has no knowledge of who the caller is,
* it will not call apm_ops.stop() to stop the DMA operation.
* Calling apm_ops.stop here to make sure we stop the DMA.
+ *
+ * But of course ... if we have configured WoWLAN then we did other
+ * things already :-)
*/
- iwl_apm_stop(priv);
+ if (!priv->wowlan)
+ iwl_apm_stop(priv);
return 0;
}
-int iwl_pci_resume(struct device *device)
+int iwl_resume(struct iwl_priv *priv)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
bool hw_rfkill = false;
- /*
- * We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state.
- */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
iwl_enable_interrupts(priv);
if (!(iwl_read32(priv, CSR_GP_CNTRL) &
@@ -2017,13 +1935,4 @@ int iwl_pci_resume(struct device *device)
return 0;
}
-const struct dev_pm_ops iwl_pm_ops = {
- .suspend = iwl_pci_suspend,
- .resume = iwl_pci_resume,
- .freeze = iwl_pci_suspend,
- .thaw = iwl_pci_resume,
- .poweroff = iwl_pci_suspend,
- .restore = iwl_pci_resume,
-};
-
#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index a54d416ec34..02817a43855 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -76,95 +76,29 @@ struct iwl_cmd;
#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
-#define IWL_PCI_DEVICE(dev, subdev, cfg) \
- .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
- .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
- .driver_data = (kernel_ulong_t)&(cfg)
-
#define TIME_UNIT 1024
-#define IWL_SKU_G 0x1
-#define IWL_SKU_A 0x2
-#define IWL_SKU_N 0x8
-
#define IWL_CMD(x) case x: return #x
-struct iwl_hcmd_ops {
- int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
- void (*set_rxon_chain)(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
- int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
- void (*send_bt_config)(struct iwl_priv *priv);
- int (*set_pan_params)(struct iwl_priv *priv);
-};
-
-struct iwl_hcmd_utils_ops {
- u16 (*build_addsta_hcmd)(const struct iwl_addsta_cmd *cmd, u8 *data);
- void (*gain_computation)(struct iwl_priv *priv,
- u32 *average_noise,
- u16 min_average_noise_antennat_i,
- u32 min_average_noise,
- u8 default_chain);
- void (*chain_noise_reset)(struct iwl_priv *priv);
- void (*tx_cmd_protection)(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags);
- int (*calc_rssi)(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp);
- int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
- void (*post_scan)(struct iwl_priv *priv);
-};
-
-struct iwl_apm_ops {
- int (*init)(struct iwl_priv *priv);
- void (*config)(struct iwl_priv *priv);
-};
-
-struct iwl_temp_ops {
- void (*temperature)(struct iwl_priv *priv);
-};
-
struct iwl_lib_ops {
/* set hw dependent parameters */
int (*set_hw_params)(struct iwl_priv *priv);
- /* setup Rx handler */
- void (*rx_handler_setup)(struct iwl_priv *priv);
- /* setup deferred work */
- void (*setup_deferred_work)(struct iwl_priv *priv);
+ /* setup BT Rx handler */
+ void (*bt_rx_handler_setup)(struct iwl_priv *priv);
+ /* setup BT related deferred work */
+ void (*bt_setup_deferred_work)(struct iwl_priv *priv);
/* cancel deferred work */
void (*cancel_deferred_work)(struct iwl_priv *priv);
- /* check validity of rtc data address */
- int (*is_valid_rtc_data_addr)(u32 addr);
int (*set_channel_switch)(struct iwl_priv *priv,
struct ieee80211_channel_switch *ch_switch);
- /* power management */
- struct iwl_apm_ops apm_ops;
-
- /* power */
- int (*send_tx_power) (struct iwl_priv *priv);
- void (*update_chain_flags)(struct iwl_priv *priv);
+ /* device specific configuration */
+ void (*nic_config)(struct iwl_priv *priv);
/* eeprom operations (as defined in iwl-eeprom.h) */
struct iwl_eeprom_ops eeprom_ops;
/* temperature */
- struct iwl_temp_ops temp_ops;
-
- int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
- void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
-
-};
-
-/* NIC specific ops */
-struct iwl_nic_ops {
- void (*additional_nic_config)(struct iwl_priv *priv);
-};
-
-struct iwl_ops {
- const struct iwl_lib_ops *lib;
- const struct iwl_hcmd_ops *hcmd;
- const struct iwl_hcmd_utils_ops *utils;
- const struct iwl_nic_ops *nic;
+ void (*temperature)(struct iwl_priv *priv);
};
struct iwl_mod_params {
@@ -176,6 +110,12 @@ struct iwl_mod_params {
int restart_fw; /* def: 1 = restart firmware */
bool plcp_check; /* def: true = enable plcp health check */
bool ack_check; /* def: false = disable ack health check */
+ bool wd_disable; /* def: false = enable stuck queue check */
+ bool bt_coex_active; /* def: true = enable bt coex */
+ int led_mode; /* def: 0 = system default */
+ bool no_sleep_autoadjust; /* def: true = disable autoadjust */
+ bool power_save; /* def: false = disable power save */
+ int power_level; /* def: 1 = power level */
};
/*
@@ -195,6 +135,7 @@ struct iwl_mod_params {
* @temperature_kelvin: temperature report by uCode in kelvin
* @max_event_log_size: size of event log buffer size for ucode event logging
* @shadow_reg_enable: HW shadhow register bit
+ * @no_idle_support: do not support idle mode
*/
struct iwl_base_params {
int eeprom_size;
@@ -216,6 +157,7 @@ struct iwl_base_params {
bool temperature_kelvin;
u32 max_event_log_size;
const bool shadow_reg_enable;
+ const bool no_idle_support;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
@@ -225,7 +167,7 @@ struct iwl_base_params {
* @ampdu_factor: Maximum A-MPDU length factor
* @ampdu_density: Minimum A-MPDU spacing
* @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
-*/
+ */
struct iwl_bt_params {
bool advanced_bt_coexist;
u8 bt_init_traffic_load;
@@ -238,19 +180,31 @@ struct iwl_bt_params {
};
/*
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
-*/
+ */
struct iwl_ht_params {
const bool ht_greenfield_support; /* if used set to true */
bool use_rts_for_aggregation;
+ enum ieee80211_smps_mode smps_mode;
};
/**
* struct iwl_cfg
+ * @name: Offical name of the device
* @fw_name_pre: Firmware filename prefix. The api version and extension
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
* @ucode_api_max: Highest version of uCode API supported by driver.
* @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @valid_tx_ant: valid transmit antenna
+ * @valid_rx_ant: valid receive antenna
+ * @sku: sku information from EEPROM
+ * @eeprom_ver: EEPROM version
+ * @eeprom_calib_ver: EEPROM calibration version
+ * @lib: pointer to the lib ops
+ * @additional_nic_config: additional nic configuration
+ * @base_params: pointer to basic parameters
+ * @ht_params: point to ht patameters
+ * @bt_params: pointer to bt parameters
* @pa_type: used by 6000 series only to identify the type of Power Amplifier
* @need_dc_calib: need to perform init dc calibration
* @need_temp_offset_calib: need to perform temperature offset calibration
@@ -260,7 +214,6 @@ struct iwl_ht_params {
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
* @internal_wimax_coex: internal wifi/wimax combo device
* @iq_invert: I/Q inversion
- * @disable_otp_refresh: disable OTP refresh current limit
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
@@ -277,11 +230,7 @@ struct iwl_ht_params {
* }
*
* The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision. That is, through utilizing the
- * iwl_hcmd_utils_ops etc. we accommodate different command structures
- * and flows between hardware versions (4965/5000) as well as their API
- * versions.
- *
+ * release as a new hardware revision.
*/
struct iwl_cfg {
/* params specific to an individual device within a device family */
@@ -291,10 +240,11 @@ struct iwl_cfg {
const unsigned int ucode_api_min;
u8 valid_tx_ant;
u8 valid_rx_ant;
- unsigned int sku;
+ u16 sku;
u16 eeprom_ver;
u16 eeprom_calib_ver;
- const struct iwl_ops *ops;
+ const struct iwl_lib_ops *lib;
+ void (*additional_nic_config)(struct iwl_priv *priv);
/* params not likely to change within a device family */
struct iwl_base_params *base_params;
/* params likely to change within a device family */
@@ -309,7 +259,6 @@ struct iwl_cfg {
const bool rx_with_siso_diversity;
const bool internal_wimax_coex;
const bool iq_invert;
- const bool disable_otp_refresh;
};
/***************************
@@ -346,9 +295,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
int iwl_mac_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
-int iwl_alloc_txq_mem(struct iwl_priv *priv);
-void iwl_free_txq_mem(struct iwl_priv *priv);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -390,28 +336,8 @@ static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
/*****************************************************
* RX
******************************************************/
-void iwl_cmd_queue_free(struct iwl_priv *priv);
-void iwl_cmd_queue_unmap(struct iwl_priv *priv);
-int iwl_rx_queue_alloc(struct iwl_priv *priv);
-void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q);
-int iwl_rx_queue_space(const struct iwl_rx_queue *q);
-void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-
void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
-/* TX helpers */
-
-/*****************************************************
-* TX
-******************************************************/
-void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id);
-void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id);
-void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
-void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
void iwl_setup_watchdog(struct iwl_priv *priv);
/*****************************************************
* TX power
@@ -419,13 +345,6 @@ void iwl_setup_watchdog(struct iwl_priv *priv);
int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
/*******************************************************************************
- * Rate
- ******************************************************************************/
-
-u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/*******************************************************************************
* Scanning
******************************************************************************/
void iwl_init_scan_params(struct iwl_priv *priv);
@@ -469,51 +388,19 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
*****************************************************/
const char *get_cmd_string(u8 cmd);
-int __must_check iwl_send_cmd_sync(struct iwl_priv *priv,
- struct iwl_host_cmd *cmd);
-int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id,
- u16 len, const void *data);
-int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
- const void *data,
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt));
-
-int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-
-
-/*****************************************************
- * PCI *
- *****************************************************/
-
-static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
-{
- int pos;
- u16 pci_lnk_ctl;
- pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
- return pci_lnk_ctl;
-}
-
void iwl_bg_watchdog(unsigned long data);
u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
u32 addon, u32 beacon_interval);
#ifdef CONFIG_PM
-int iwl_pci_suspend(struct device *device);
-int iwl_pci_resume(struct device *device);
-extern const struct dev_pm_ops iwl_pm_ops;
-
-#define IWL_PM_OPS (&iwl_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define IWL_PM_OPS NULL
-
+int iwl_suspend(struct iwl_priv *priv);
+int iwl_resume(struct iwl_priv *priv);
#endif /* !CONFIG_PM */
+int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg);
+void __devexit iwl_remove(struct iwl_priv * priv);
+
/*****************************************************
* Error Handling Debugging
******************************************************/
@@ -613,11 +500,7 @@ void iwl_apm_stop(struct iwl_priv *priv);
int iwl_apm_init(struct iwl_priv *priv);
int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-static inline int iwlcore_commit_rxon(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
-}
+
static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
struct iwl_priv *priv, enum ieee80211_band band)
{
@@ -630,7 +513,6 @@ static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
priv->cfg->bt_params->advanced_bt_coexist;
}
-extern bool bt_coex_active;
extern bool bt_siso_mode;
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 5ab90ba7a02..d6dbb042304 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -351,6 +351,7 @@
#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
+#define CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020)
/* GP Driver */
#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK (0x00000003)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 2824ccbcc1f..f9a407e40af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -32,10 +32,10 @@
struct iwl_priv;
extern u32 iwl_debug_level;
-#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
-#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
-#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
-#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
+#define IWL_ERR(p, f, a...) dev_err(p->bus->dev, f, ## a)
+#define IWL_WARN(p, f, a...) dev_warn(p->bus->dev, f, ## a)
+#define IWL_INFO(p, f, a...) dev_info(p->bus->dev, f, ## a)
+#define IWL_CRIT(p, f, a...) dev_crit(p->bus->dev, f, ## a)
#define iwl_print_hex_error(priv, p, len) \
do { \
@@ -78,8 +78,6 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
void iwl_dbgfs_unregister(struct iwl_priv *priv);
-extern int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
- int bufsz);
#else
static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
@@ -125,13 +123,13 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
/* 0x00000F00 - 0x00000100 */
#define IWL_DL_POWER (1 << 8)
#define IWL_DL_TEMP (1 << 9)
-#define IWL_DL_NOTIF (1 << 10)
+/* reserved (1 << 10) */
#define IWL_DL_SCAN (1 << 11)
/* 0x0000F000 - 0x00001000 */
#define IWL_DL_ASSOC (1 << 12)
#define IWL_DL_DROP (1 << 13)
-#define IWL_DL_TXPOWER (1 << 14)
-#define IWL_DL_AP (1 << 15)
+/* reserved (1 << 14) */
+#define IWL_DL_COEX (1 << 15)
/* 0x000F0000 - 0x00010000 */
#define IWL_DL_FW (1 << 16)
#define IWL_DL_RF_KILL (1 << 17)
@@ -171,12 +169,10 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
-#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
+#define IWL_DEBUG_COEX(p, f, a...) IWL_DEBUG(p, IWL_DL_COEX, f, ## a)
#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
#define IWL_DEBUG_ASSOC(p, f, a...) \
IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 0e6a04b739a..ec1485b2d3f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -227,7 +227,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
/* default is to dump the entire data segment */
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
priv->dbgfs_sram_offset = 0x800000;
- if (priv->ucode_type == UCODE_SUBTYPE_INIT)
+ if (priv->ucode_type == IWL_UCODE_INIT)
priv->dbgfs_sram_len = priv->ucode_init.data.len;
else
priv->dbgfs_sram_len = priv->ucode_rt.data.len;
@@ -322,6 +322,19 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+
+ if (!priv->wowlan_sram)
+ return -ENODATA;
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ priv->wowlan_sram,
+ priv->ucode_wowlan.data.len);
+}
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -856,6 +869,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
}
DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_FILE_OPS(wowlan_sram);
DEBUGFS_READ_WRITE_FILE_OPS(log_event);
DEBUGFS_READ_FILE_OPS(nvm);
DEBUGFS_READ_FILE_OPS(stations);
@@ -1915,121 +1929,121 @@ static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY),
- priv->_agn.reply_tx_stats.pp_delay);
+ priv->reply_tx_stats.pp_delay);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES),
- priv->_agn.reply_tx_stats.pp_few_bytes);
+ priv->reply_tx_stats.pp_few_bytes);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO),
- priv->_agn.reply_tx_stats.pp_bt_prio);
+ priv->reply_tx_stats.pp_bt_prio);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD),
- priv->_agn.reply_tx_stats.pp_quiet_period);
+ priv->reply_tx_stats.pp_quiet_period);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK),
- priv->_agn.reply_tx_stats.pp_calc_ttak);
+ priv->reply_tx_stats.pp_calc_ttak);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
iwl_get_tx_fail_reason(
TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY),
- priv->_agn.reply_tx_stats.int_crossed_retry);
+ priv->reply_tx_stats.int_crossed_retry);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT),
- priv->_agn.reply_tx_stats.short_limit);
+ priv->reply_tx_stats.short_limit);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT),
- priv->_agn.reply_tx_stats.long_limit);
+ priv->reply_tx_stats.long_limit);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN),
- priv->_agn.reply_tx_stats.fifo_underrun);
+ priv->reply_tx_stats.fifo_underrun);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW),
- priv->_agn.reply_tx_stats.drain_flow);
+ priv->reply_tx_stats.drain_flow);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH),
- priv->_agn.reply_tx_stats.rfkill_flush);
+ priv->reply_tx_stats.rfkill_flush);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE),
- priv->_agn.reply_tx_stats.life_expire);
+ priv->reply_tx_stats.life_expire);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS),
- priv->_agn.reply_tx_stats.dest_ps);
+ priv->reply_tx_stats.dest_ps);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED),
- priv->_agn.reply_tx_stats.host_abort);
+ priv->reply_tx_stats.host_abort);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY),
- priv->_agn.reply_tx_stats.pp_delay);
+ priv->reply_tx_stats.pp_delay);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID),
- priv->_agn.reply_tx_stats.sta_invalid);
+ priv->reply_tx_stats.sta_invalid);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED),
- priv->_agn.reply_tx_stats.frag_drop);
+ priv->reply_tx_stats.frag_drop);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE),
- priv->_agn.reply_tx_stats.tid_disable);
+ priv->reply_tx_stats.tid_disable);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED),
- priv->_agn.reply_tx_stats.fifo_flush);
+ priv->reply_tx_stats.fifo_flush);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
iwl_get_tx_fail_reason(
TX_STATUS_FAIL_INSUFFICIENT_CF_POLL),
- priv->_agn.reply_tx_stats.insuff_cf_poll);
+ priv->reply_tx_stats.insuff_cf_poll);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX),
- priv->_agn.reply_tx_stats.fail_hw_drop);
+ priv->reply_tx_stats.fail_hw_drop);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
iwl_get_tx_fail_reason(
TX_STATUS_FAIL_NO_BEACON_ON_RADAR),
- priv->_agn.reply_tx_stats.sta_color_mismatch);
+ priv->reply_tx_stats.sta_color_mismatch);
pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
- priv->_agn.reply_tx_stats.unknown);
+ priv->reply_tx_stats.unknown);
pos += scnprintf(buf + pos, bufsz - pos,
"\nStatistics_Agg_TX_Error:\n");
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK),
- priv->_agn.reply_agg_tx_stats.underrun);
+ priv->reply_agg_tx_stats.underrun);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK),
- priv->_agn.reply_agg_tx_stats.bt_prio);
+ priv->reply_agg_tx_stats.bt_prio);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK),
- priv->_agn.reply_agg_tx_stats.few_bytes);
+ priv->reply_agg_tx_stats.few_bytes);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK),
- priv->_agn.reply_agg_tx_stats.abort);
+ priv->reply_agg_tx_stats.abort);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
iwl_get_agg_tx_fail_reason(
AGG_TX_STATE_LAST_SENT_TTL_MSK),
- priv->_agn.reply_agg_tx_stats.last_sent_ttl);
+ priv->reply_agg_tx_stats.last_sent_ttl);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
iwl_get_agg_tx_fail_reason(
AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK),
- priv->_agn.reply_agg_tx_stats.last_sent_try);
+ priv->reply_agg_tx_stats.last_sent_try);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
iwl_get_agg_tx_fail_reason(
AGG_TX_STATE_LAST_SENT_BT_KILL_MSK),
- priv->_agn.reply_agg_tx_stats.last_sent_bt_kill);
+ priv->reply_agg_tx_stats.last_sent_bt_kill);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK),
- priv->_agn.reply_agg_tx_stats.scd_query);
+ priv->reply_agg_tx_stats.scd_query);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
iwl_get_agg_tx_fail_reason(
AGG_TX_STATE_TEST_BAD_CRC32_MSK),
- priv->_agn.reply_agg_tx_stats.bad_crc32);
+ priv->reply_agg_tx_stats.bad_crc32);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK),
- priv->_agn.reply_agg_tx_stats.response);
+ priv->reply_agg_tx_stats.response);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK),
- priv->_agn.reply_agg_tx_stats.dump_tx);
+ priv->reply_agg_tx_stats.dump_tx);
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK),
- priv->_agn.reply_agg_tx_stats.delay_tx);
+ priv->reply_agg_tx_stats.delay_tx);
pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
- priv->_agn.reply_agg_tx_stats.unknown);
+ priv->reply_agg_tx_stats.unknown);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -2493,7 +2507,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
if (iwl_is_rfkill(priv))
return -EFAULT;
- priv->cfg->ops->lib->dev_txfifo_flush(priv, IWL_DROP_ALL);
+ iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
return count;
}
@@ -2667,6 +2681,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
@@ -2693,8 +2708,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
- if (priv->cfg->ops->lib->dev_txfifo_flush)
- DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index c8de236c141..6c9790cac8d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -31,6 +31,7 @@
#ifndef __iwl_dev_h__
#define __iwl_dev_h__
+#include <linux/interrupt.h>
#include <linux/pci.h> /* for struct pci_device_id */
#include <linux/kernel.h>
#include <linux/wait.h>
@@ -47,6 +48,10 @@
#include "iwl-power.h"
#include "iwl-agn-rs.h"
#include "iwl-agn-tt.h"
+#include "iwl-bus.h"
+#include "iwl-trans.h"
+
+#define DRV_NAME "iwlagn"
struct iwl_tx_queue;
@@ -257,11 +262,9 @@ struct iwl_channel_info {
enum {
CMD_SYNC = 0,
- CMD_SIZE_NORMAL = 0,
- CMD_NO_SKB = 0,
- CMD_ASYNC = (1 << 1),
- CMD_WANT_SKB = (1 << 2),
- CMD_MAPPED = (1 << 3),
+ CMD_ASYNC = BIT(0),
+ CMD_WANT_SKB = BIT(1),
+ CMD_ON_DEMAND = BIT(2),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -294,6 +297,16 @@ enum iwl_hcmd_dataflag {
IWL_HCMD_DFL_NOCOPY = BIT(0),
};
+/**
+ * struct iwl_host_cmd - Host command to the uCode
+ * @data: array of chunks that composes the data of the host command
+ * @reply_page: pointer to the page that holds the response to the host command
+ * @callback:
+ * @flags: can be CMD_* note CMD_WANT_SKB is incompatible withe CMD_ASYNC
+ * @len: array of the lenths of the chunks in data
+ * @dataflags:
+ * @id: id of the host command
+ */
struct iwl_host_cmd {
const void *data[IWL_MAX_CMD_TFDS];
unsigned long reply_page;
@@ -385,13 +398,6 @@ struct iwl_tid_data {
struct iwl_ht_agg agg;
};
-struct iwl_hw_key {
- u32 cipher;
- int keylen;
- u8 keyidx;
- u8 key[32];
-};
-
union iwl_ht_rate_supp {
u16 rates;
struct {
@@ -444,7 +450,6 @@ struct iwl_station_entry {
struct iwl_addsta_cmd sta;
struct iwl_tid_data tid[MAX_TID_COUNT];
u8 used, ctxid;
- struct iwl_hw_key keyinfo;
struct iwl_link_quality_cmd *lq;
};
@@ -547,7 +552,8 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
- /* 16 and 17 reserved for future use */
+ IWL_UCODE_TLV_WOWLAN_INST = 16,
+ IWL_UCODE_TLV_WOWLAN_DATA = 17,
IWL_UCODE_TLV_FLAGS = 18,
};
@@ -631,7 +637,6 @@ struct iwl_sensitivity_ranges {
/**
* struct iwl_hw_params
* @max_txq_num: Max # Tx queues supported
- * @dma_chnl_num: Number of Tx DMA/FIFO channels
* @scd_bc_tbls_size: size of scheduler byte count tables
* @tfd_size: TFD size
* @tx/rx_chains_num: Number of TX/RX chains
@@ -653,7 +658,6 @@ struct iwl_sensitivity_ranges {
*/
struct iwl_hw_params {
u8 max_txq_num;
- u8 dma_chnl_num;
u16 scd_bc_tbls_size;
u32 tfd_size;
u8 tx_chains_num;
@@ -663,7 +667,6 @@ struct iwl_hw_params {
u16 max_rxq_size;
u16 max_rxq_log;
u32 rx_page_order;
- u32 rx_wrt_ptr_reg;
u8 max_stations;
u8 ht40_channel;
u8 max_beacon_itrvl; /* in 1024 ms */
@@ -694,8 +697,6 @@ struct iwl_hw_params {
****************************************************************************/
extern void iwl_update_chain_flags(struct iwl_priv *priv);
extern const u8 iwl_bcast_addr[ETH_ALEN];
-extern int iwl_rxq_stop(struct iwl_priv *priv);
-extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
extern int iwl_queue_space(const struct iwl_queue *q);
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
{
@@ -1152,6 +1153,8 @@ struct iwl_rxon_context {
__le32 station_flags;
+ int beacon_int;
+
struct {
bool non_gf_sta_present;
u8 protection;
@@ -1168,14 +1171,29 @@ enum iwl_scan_type {
IWL_SCAN_OFFCH_TX,
};
+enum iwlagn_ucode_type {
+ IWL_UCODE_NONE,
+ IWL_UCODE_REGULAR,
+ IWL_UCODE_INIT,
+ IWL_UCODE_WOWLAN,
+};
+
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
struct iwl_testmode_trace {
+ u32 buff_size;
+ u32 total_size;
+ u32 num_chunks;
u8 *cpu_addr;
u8 *trace_addr;
dma_addr_t dma_addr;
bool trace_enabled;
};
#endif
+
+/* uCode ownership */
+#define IWL_OWNERSHIP_DRIVER 0
+#define IWL_OWNERSHIP_TM 1
+
struct iwl_priv {
/* ieee device used by generic ieee processing code */
@@ -1243,11 +1261,8 @@ struct iwl_priv {
spinlock_t reg_lock; /* protect hw register access */
struct mutex mutex;
- /* basic pci-network driver stuff */
- struct pci_dev *pci_dev;
-
- /* pci hardware address support */
- void __iomem *hw_base;
+ struct iwl_bus *bus; /* bus specific data */
+ struct iwl_trans trans;
/* microcode/device supports multiple contexts */
u8 valid_contexts;
@@ -1267,10 +1282,15 @@ struct iwl_priv {
int fw_index; /* firmware we're trying to load */
u32 ucode_ver; /* version of ucode, copy of
iwl_ucode.ver */
+
+ /* uCode owner: default: IWL_OWNERSHIP_DRIVER */
+ u8 ucode_owner;
+
struct fw_img ucode_rt;
struct fw_img ucode_init;
+ struct fw_img ucode_wowlan;
- enum iwlagn_ucode_subtype ucode_type;
+ enum iwlagn_ucode_type ucode_type;
u8 ucode_write_complete; /* the image write is complete */
char firmware_name[25];
@@ -1341,6 +1361,8 @@ struct iwl_priv {
u8 mac80211_registered;
+ bool wowlan;
+
/* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
int nvm_device_type;
@@ -1376,56 +1398,54 @@ struct iwl_priv {
} accum_stats, delta_stats, max_delta_stats;
#endif
- struct {
- /* INT ICT Table */
- __le32 *ict_tbl;
- void *ict_tbl_vir;
- dma_addr_t ict_tbl_dma;
- dma_addr_t aligned_ict_tbl_dma;
- int ict_index;
- u32 inta;
- bool use_ict;
- /*
- * reporting the number of tids has AGG on. 0 means
- * no AGGREGATION
- */
- u8 agg_tids_count;
-
- struct iwl_rx_phy_res last_phy_res;
- bool last_phy_res_valid;
-
- struct completion firmware_loading_complete;
-
- u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
- u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
-
- /*
- * chain noise reset and gain commands are the
- * two extra calibration commands follows the standard
- * phy calibration commands
- */
- u8 phy_calib_chain_noise_reset_cmd;
- u8 phy_calib_chain_noise_gain_cmd;
-
- /* counts reply_tx error */
- struct reply_tx_error_statistics reply_tx_stats;
- struct reply_agg_tx_error_statistics reply_agg_tx_stats;
- /* notification wait support */
- struct list_head notif_waits;
- spinlock_t notif_wait_lock;
- wait_queue_head_t notif_waitq;
-
- /* remain-on-channel offload support */
- struct ieee80211_channel *hw_roc_channel;
- struct delayed_work hw_roc_work;
- enum nl80211_channel_type hw_roc_chantype;
- int hw_roc_duration;
- bool hw_roc_setup;
-
- struct sk_buff *offchan_tx_skb;
- int offchan_tx_timeout;
- struct ieee80211_channel *offchan_tx_chan;
- } _agn;
+ /* INT ICT Table */
+ __le32 *ict_tbl;
+ void *ict_tbl_vir;
+ dma_addr_t ict_tbl_dma;
+ dma_addr_t aligned_ict_tbl_dma;
+ int ict_index;
+ u32 inta;
+ bool use_ict;
+ /*
+ * reporting the number of tids has AGG on. 0 means
+ * no AGGREGATION
+ */
+ u8 agg_tids_count;
+
+ struct iwl_rx_phy_res last_phy_res;
+ bool last_phy_res_valid;
+
+ struct completion firmware_loading_complete;
+
+ u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+ u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+
+ /*
+ * chain noise reset and gain commands are the
+ * two extra calibration commands follows the standard
+ * phy calibration commands
+ */
+ u8 phy_calib_chain_noise_reset_cmd;
+ u8 phy_calib_chain_noise_gain_cmd;
+
+ /* counts reply_tx error */
+ struct reply_tx_error_statistics reply_tx_stats;
+ struct reply_agg_tx_error_statistics reply_agg_tx_stats;
+ /* notification wait support */
+ struct list_head notif_waits;
+ spinlock_t notif_wait_lock;
+ wait_queue_head_t notif_waitq;
+
+ /* remain-on-channel offload support */
+ struct ieee80211_channel *hw_roc_channel;
+ struct delayed_work hw_roc_work;
+ enum nl80211_channel_type hw_roc_chantype;
+ int hw_roc_duration;
+ bool hw_roc_setup;
+
+ struct sk_buff *offchan_tx_skb;
+ int offchan_tx_timeout;
+ struct ieee80211_channel *offchan_tx_chan;
/* bt coex */
u8 bt_enable_flag;
@@ -1442,6 +1462,9 @@ struct iwl_priv {
u16 dynamic_frag_thresh;
u8 bt_ci_compliance;
struct work_struct bt_traffic_change_work;
+ bool bt_enable_pspoll;
+ struct iwl_rxon_context *cur_rssi_ctx;
+ bool bt_is_sco;
struct iwl_hw_params hw_params;
@@ -1492,6 +1515,7 @@ struct iwl_priv {
struct dentry *debugfs_dir;
u32 dbgfs_sram_offset, dbgfs_sram_len;
bool disable_ht40;
+ void *wowlan_sram;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
struct work_struct txpower_work;
@@ -1509,9 +1533,14 @@ struct iwl_priv {
bool led_registered;
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
struct iwl_testmode_trace testmode_trace;
+ u32 tm_fixed_rate;
#endif
- u32 dbg_fixed_rate;
+ /* WoWLAN GTK rekey data */
+ u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
+ __le64 replay_ctr;
+ __le16 last_seq_ctl;
+ bool have_rekey_data;
}; /*iwl_priv */
static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 47a56bc1cd1..19d31a5e32e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -407,11 +407,6 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
return -EINVAL;
}
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
-{
- return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
-}
-
u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
{
if (!priv->eeprom)
@@ -449,7 +444,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
}
e = (__le16 *)priv->eeprom;
- priv->cfg->ops->lib->apm_ops.init(priv);
+ iwl_apm_init(priv);
ret = iwl_eeprom_verify_signature(priv);
if (ret < 0) {
@@ -548,7 +543,7 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
const struct iwl_eeprom_channel **eeprom_ch_info,
const u8 **eeprom_ch_index)
{
- u32 offset = priv->cfg->ops->lib->
+ u32 offset = priv->cfg->lib->
eeprom_ops.regulatory_bands[eep_band - 1];
switch (eep_band) {
case 1: /* 2.4GHz band */
@@ -754,9 +749,9 @@ int iwl_init_channel_map(struct iwl_priv *priv)
}
/* Check if we do have HT40 channels */
- if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
+ if (priv->cfg->lib->eeprom_ops.regulatory_bands[5] ==
EEPROM_REGULATORY_BAND_NO_HT40 &&
- priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
+ priv->cfg->lib->eeprom_ops.regulatory_bands[6] ==
EEPROM_REGULATORY_BAND_NO_HT40)
return 0;
@@ -792,8 +787,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
* driver need to process addition information
* to determine the max channel tx power limits
*/
- if (priv->cfg->ops->lib->eeprom_ops.update_enhanced_txpower)
- priv->cfg->ops->lib->eeprom_ops.update_enhanced_txpower(priv);
+ if (priv->cfg->lib->eeprom_ops.update_enhanced_txpower)
+ priv->cfg->lib->eeprom_ops.update_enhanced_txpower(priv);
return 0;
}
@@ -834,3 +829,28 @@ const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
return NULL;
}
+
+void iwl_rf_config(struct iwl_priv *priv)
+{
+ u16 radio_cfg;
+
+ radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+
+ /* write radio config values to register */
+ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+ IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg),
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg),
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+ } else
+ WARN_ON(1);
+
+ /* set CSR_HW_CONFIG_REG for uCode use */
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index c960c6fa009..e4bf8ac5e64 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -110,12 +110,10 @@ enum {
};
/* SKU Capabilities */
-/* 5000 and up */
-#define EEPROM_SKU_CAP_BAND_POS (4)
-#define EEPROM_SKU_CAP_BAND_SELECTION \
- (3 << EEPROM_SKU_CAP_BAND_POS)
+#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
+#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
-#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
+#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
/* *regulatory* channel data format in eeprom, one for each channel.
@@ -164,16 +162,12 @@ struct iwl_eeprom_enhanced_txpwr {
s8 mimo3_max;
} __packed;
-/* 5000 Specific */
-#define EEPROM_5000_TX_POWER_VERSION (4)
-#define EEPROM_5000_EEPROM_VERSION (0x11A)
-
-/* 5000 and up calibration */
+/* calibration */
#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
-/* 5000 temperature */
-#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
+/* temperature */
+#define EEPROM_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
/* agn links */
#define EEPROM_LINK_HOST (2*0x64)
@@ -205,6 +199,10 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
+/* 5000 Specific */
+#define EEPROM_5000_TX_POWER_VERSION (4)
+#define EEPROM_5000_EEPROM_VERSION (0x11A)
+
/* 5050 Specific */
#define EEPROM_5050_TX_POWER_VERSION (4)
#define EEPROM_5050_EEPROM_VERSION (0x21E)
@@ -270,13 +268,13 @@ extern const u8 iwl_eeprom_band_1[14];
/* General */
#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
+#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
#define EEPROM_VERSION (2*0x44) /* 2 bytes */
#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
-#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
@@ -294,7 +292,6 @@ extern const u8 iwl_eeprom_band_1[14];
struct iwl_eeprom_ops {
const u32 regulatory_bands[7];
- const u8* (*query_addr) (const struct iwl_priv *priv, size_t offset);
void (*update_enhanced_txpower) (struct iwl_priv *priv);
};
@@ -311,5 +308,6 @@ void iwl_free_channel_map(struct iwl_priv *priv);
const struct iwl_channel_info *iwl_get_channel_info(
const struct iwl_priv *priv,
enum ieee80211_band band, u16 channel);
+void iwl_rf_config(struct iwl_priv *priv);
#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 6dfa806aefe..0ad60b3c04d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -326,7 +326,7 @@
#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
/* Find Control/Status reg for given Tx DMA/FIFO channel */
-#define FH50_TCSR_CHNL_NUM (8)
+#define FH_TCSR_CHNL_NUM (8)
/* TCSR: tx_config register values */
#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
deleted file mode 100644
index 76f99662314..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h" /* FIXME: remove */
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-
-
-const char *get_cmd_string(u8 cmd)
-{
- switch (cmd) {
- IWL_CMD(REPLY_ALIVE);
- IWL_CMD(REPLY_ERROR);
- IWL_CMD(REPLY_RXON);
- IWL_CMD(REPLY_RXON_ASSOC);
- IWL_CMD(REPLY_QOS_PARAM);
- IWL_CMD(REPLY_RXON_TIMING);
- IWL_CMD(REPLY_ADD_STA);
- IWL_CMD(REPLY_REMOVE_STA);
- IWL_CMD(REPLY_REMOVE_ALL_STA);
- IWL_CMD(REPLY_TXFIFO_FLUSH);
- IWL_CMD(REPLY_WEPKEY);
- IWL_CMD(REPLY_TX);
- IWL_CMD(REPLY_LEDS_CMD);
- IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
- IWL_CMD(COEX_PRIORITY_TABLE_CMD);
- IWL_CMD(COEX_MEDIUM_NOTIFICATION);
- IWL_CMD(COEX_EVENT_CMD);
- IWL_CMD(REPLY_QUIET_CMD);
- IWL_CMD(REPLY_CHANNEL_SWITCH);
- IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
- IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
- IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
- IWL_CMD(POWER_TABLE_CMD);
- IWL_CMD(PM_SLEEP_NOTIFICATION);
- IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
- IWL_CMD(REPLY_SCAN_CMD);
- IWL_CMD(REPLY_SCAN_ABORT_CMD);
- IWL_CMD(SCAN_START_NOTIFICATION);
- IWL_CMD(SCAN_RESULTS_NOTIFICATION);
- IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
- IWL_CMD(BEACON_NOTIFICATION);
- IWL_CMD(REPLY_TX_BEACON);
- IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
- IWL_CMD(QUIET_NOTIFICATION);
- IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
- IWL_CMD(MEASURE_ABORT_NOTIFICATION);
- IWL_CMD(REPLY_BT_CONFIG);
- IWL_CMD(REPLY_STATISTICS_CMD);
- IWL_CMD(STATISTICS_NOTIFICATION);
- IWL_CMD(REPLY_CARD_STATE_CMD);
- IWL_CMD(CARD_STATE_NOTIFICATION);
- IWL_CMD(MISSED_BEACONS_NOTIFICATION);
- IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
- IWL_CMD(SENSITIVITY_CMD);
- IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
- IWL_CMD(REPLY_RX_PHY_CMD);
- IWL_CMD(REPLY_RX_MPDU_CMD);
- IWL_CMD(REPLY_RX);
- IWL_CMD(REPLY_COMPRESSED_BA);
- IWL_CMD(CALIBRATION_CFG_CMD);
- IWL_CMD(CALIBRATION_RES_NOTIFICATION);
- IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
- IWL_CMD(REPLY_TX_POWER_DBM_CMD);
- IWL_CMD(TEMPERATURE_NOTIFICATION);
- IWL_CMD(TX_ANT_CONFIGURATION_CMD);
- IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
- IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
- IWL_CMD(REPLY_BT_COEX_PROT_ENV);
- IWL_CMD(REPLY_WIPAN_PARAMS);
- IWL_CMD(REPLY_WIPAN_RXON);
- IWL_CMD(REPLY_WIPAN_RXON_TIMING);
- IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
- IWL_CMD(REPLY_WIPAN_QOS_PARAM);
- IWL_CMD(REPLY_WIPAN_WEPKEY);
- IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
- IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
- IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
- default:
- return "UNKNOWN";
-
- }
-}
-
-#define HOST_COMPLETE_TIMEOUT (HZ / 2)
-
-static void iwl_generic_cmd_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
- get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- switch (cmd->hdr.cmd) {
- case REPLY_TX_LINK_QUALITY_CMD:
- case SENSITIVITY_CMD:
- IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
- get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- break;
- default:
- IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
- get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- }
-#endif
-}
-
-static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int ret;
-
- if (WARN_ON(!(cmd->flags & CMD_ASYNC)))
- return -EINVAL;
-
- /* An asynchronous command can not expect an SKB to be set. */
- if (WARN_ON(cmd->flags & CMD_WANT_SKB))
- return -EINVAL;
-
- /* Assign a generic callback if one is not provided */
- if (!cmd->callback)
- cmd->callback = iwl_generic_cmd_callback;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EBUSY;
-
- ret = iwl_enqueue_hcmd(priv, cmd);
- if (ret < 0) {
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- get_cmd_string(cmd->id), ret);
- return ret;
- }
- return 0;
-}
-
-int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int cmd_idx;
- int ret;
-
- if (WARN_ON(cmd->flags & CMD_ASYNC))
- return -EINVAL;
-
- /* A synchronous command can not have a callback set. */
- if (WARN_ON(cmd->callback))
- return -EINVAL;
-
- IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
- get_cmd_string(cmd->id));
-
- set_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
- get_cmd_string(cmd->id));
-
- cmd_idx = iwl_enqueue_hcmd(priv, cmd);
- if (cmd_idx < 0) {
- ret = cmd_idx;
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- get_cmd_string(cmd->id), ret);
- return ret;
- }
-
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
- IWL_ERR(priv,
- "Error sending %s: time out after %dms.\n",
- get_cmd_string(cmd->id),
- jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
- get_cmd_string(cmd->id));
- ret = -ETIMEDOUT;
- goto cancel;
- }
- }
-
- if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
- IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
- get_cmd_string(cmd->id));
- ret = -ECANCELED;
- goto fail;
- }
- if (test_bit(STATUS_FW_ERROR, &priv->status)) {
- IWL_ERR(priv, "Command %s failed: FW Error\n",
- get_cmd_string(cmd->id));
- ret = -EIO;
- goto fail;
- }
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
- IWL_ERR(priv, "Error: Response NULL in '%s'\n",
- get_cmd_string(cmd->id));
- ret = -EIO;
- goto cancel;
- }
-
- return 0;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
- ~CMD_WANT_SKB;
- }
-fail:
- if (cmd->reply_page) {
- iwl_free_pages(priv, cmd->reply_page);
- cmd->reply_page = 0;
- }
-
- return ret;
-}
-
-int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- if (cmd->flags & CMD_ASYNC)
- return iwl_send_cmd_async(priv, cmd);
-
- return iwl_send_cmd_sync(priv, cmd);
-}
-
-int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = { len, },
- .data = { data, },
- };
-
- return iwl_send_cmd_sync(priv, &cmd);
-}
-
-int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
- u8 id, u16 len, const void *data,
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt))
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = { len, },
- .data = { data, },
- };
-
- cmd.flags |= CMD_ASYNC;
- cmd.callback = callback;
-
- return iwl_send_cmd_async(priv, &cmd);
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 41207a3645b..9d91552d13c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -120,7 +120,16 @@ static inline void iwl_wake_any_queue(struct iwl_priv *priv,
}
}
+#ifdef ieee80211_stop_queue
+#undef ieee80211_stop_queue
+#endif
+
#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+
+#ifdef ieee80211_wake_queue
+#undef ieee80211_wake_queue
+#endif
+
#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
static inline void iwl_disable_interrupts(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 869edc580ec..19a09310112 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -34,22 +34,23 @@
#include "iwl-dev.h"
#include "iwl-debug.h"
#include "iwl-devtrace.h"
+#include "iwl-bus.h"
static inline void iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val)
{
trace_iwlwifi_dev_iowrite8(priv, ofs, val);
- iowrite8(val, priv->hw_base + ofs);
+ bus_write8(priv->bus, ofs, val);
}
static inline void iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val)
{
trace_iwlwifi_dev_iowrite32(priv, ofs, val);
- iowrite32(val, priv->hw_base + ofs);
+ bus_write32(priv->bus, ofs, val);
}
static inline u32 iwl_read32(struct iwl_priv *priv, u32 ofs)
{
- u32 val = ioread32(priv->hw_base + ofs);
+ u32 val = bus_read32(priv->bus, ofs);
trace_iwlwifi_dev_ioread32(priv, ofs, val);
return val;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 7c23beb49d7..a67ae56d546 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -28,8 +28,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
@@ -40,13 +38,9 @@
#include "iwl-dev.h"
#include "iwl-core.h"
+#include "iwl-agn.h"
#include "iwl-io.h"
-
-/* default: IWL_LED_BLINK(0) using blinking index table */
-static int led_mode;
-module_param(led_mode, int, S_IRUGO);
-MODULE_PARM_DESC(led_mode, "0=system default, "
- "1=On(RF On)/Off(RF Off), 2=blinking");
+#include "iwl-trans.h"
/* Throughput OFF time(ms) ON time (ms)
* >300 25 25
@@ -118,7 +112,7 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
- return iwl_send_cmd(priv, &cmd);
+ return trans_send_cmd(&priv->trans, &cmd);
}
/* Set led pattern command */
@@ -181,7 +175,7 @@ static int iwl_led_blink_set(struct led_classdev *led_cdev,
void iwl_leds_init(struct iwl_priv *priv)
{
- int mode = led_mode;
+ int mode = iwlagn_mod_params.led_mode;
int ret;
if (mode == IWL_LED_DEFAULT)
@@ -209,7 +203,8 @@ void iwl_leds_init(struct iwl_priv *priv)
break;
}
- ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
+ ret = led_classdev_register(priv->bus->dev,
+ &priv->led);
if (ret) {
kfree(priv->led.name);
return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
new file mode 100644
index 00000000000..2fdbffa079c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -0,0 +1,564 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+
+#include "iwl-bus.h"
+#include "iwl-agn.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+struct iwl_pci_bus {
+ /* basic pci-network driver stuff */
+ struct pci_dev *pci_dev;
+
+ /* pci hardware address support */
+ void __iomem *hw_base;
+};
+
+#define IWL_BUS_GET_PCI_BUS(_iwl_bus) \
+ ((struct iwl_pci_bus *) ((_iwl_bus)->bus_specific))
+
+#define IWL_BUS_GET_PCI_DEV(_iwl_bus) \
+ ((IWL_BUS_GET_PCI_BUS(_iwl_bus))->pci_dev)
+
+static u16 iwl_pciexp_link_ctrl(struct iwl_bus *bus)
+{
+ int pos;
+ u16 pci_lnk_ctl;
+ struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
+
+ pos = pci_pcie_cap(pci_dev);
+ pci_read_config_word(pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ return pci_lnk_ctl;
+}
+
+static bool iwl_pci_is_pm_supported(struct iwl_bus *bus)
+{
+ u16 lctl = iwl_pciexp_link_ctrl(bus);
+
+ return !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+}
+
+static void iwl_pci_apm_config(struct iwl_bus *bus)
+{
+ /*
+ * HW bug W/A for instability in PCIe bus L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ u16 lctl = iwl_pciexp_link_ctrl(bus);
+
+ if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+ PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ /* L1-ASPM enabled; disable(!) L0S */
+ iwl_set_bit(bus->drv_data, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n");
+ } else {
+ /* L1-ASPM disabled; enable(!) L0S */
+ iwl_clear_bit(bus->drv_data, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n");
+ }
+}
+
+static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data)
+{
+ bus->drv_data = drv_data;
+ pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data);
+}
+
+static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
+ int buf_len)
+{
+ struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
+
+ snprintf(buf, buf_len, "PCI ID: 0x%04X:0x%04X", pci_dev->device,
+ pci_dev->subsystem_device);
+}
+
+static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val)
+{
+ iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
+}
+
+static void iwl_pci_write32(struct iwl_bus *bus, u32 ofs, u32 val)
+{
+ iowrite32(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
+}
+
+static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
+{
+ u32 val = ioread32(IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
+ return val;
+}
+
+static struct iwl_bus_ops pci_ops = {
+ .get_pm_support = iwl_pci_is_pm_supported,
+ .apm_config = iwl_pci_apm_config,
+ .set_drv_data = iwl_pci_set_drv_data,
+ .get_hw_id = iwl_pci_get_hw_id,
+ .write8 = iwl_pci_write8,
+ .write32 = iwl_pci_write32,
+ .read32 = iwl_pci_read32,
+};
+
+#define IWL_PCI_DEVICE(dev, subdev, cfg) \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+ {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
+
+/* 5300 Series WiFi */
+ {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
+
+/* 5350 Series WiFi/WiMax */
+ {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
+
+/* 5150 Series Wifi/WiMax */
+ {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+
+ {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
+ {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
+
+/* 6x00 Series */
+ {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+
+/* 6x05 Series */
+ {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
+
+/* 6x30 Series */
+ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
+
+/* 6x50 WiFi/WiMax Series */
+ {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
+
+/* 6150 WiFi/WiMax Series */
+ {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
+
+/* 1000 Series WiFi */
+ {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
+
+/* 100 Series WiFi */
+ {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
+ {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
+
+/* 130 Series WiFi */
+ {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
+
+/* 2x00 Series */
+ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
+
+/* 2x30 Series */
+ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
+
+/* 6x35 Series */
+ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
+
+/* 105 Series */
+ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
+
+/* 135 Series */
+ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
+
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+
+static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+ struct iwl_bus *bus;
+ struct iwl_pci_bus *pci_bus;
+ u16 pci_cmd;
+ int err;
+
+ bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL);
+ if (!bus) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "Couldn't allocate iwl_pci_bus");
+ err = -ENOMEM;
+ goto out_no_pci;
+ }
+
+ pci_bus = IWL_BUS_GET_PCI_BUS(bus);
+ pci_bus->pci_dev = pdev;
+
+ /* W/A - seems to solve weird behavior. We need to remove this if we
+ * don't want to stay in L1 all the time. This wastes a lot of power */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_no_pci;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (err) {
+ dev_printk(KERN_ERR, bus->dev,
+ "No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed");
+ goto out_pci_disable_device;
+ }
+
+ pci_bus->hw_base = pci_iomap(pdev, 0, 0);
+ if (!pci_bus->hw_base) {
+ dev_printk(KERN_ERR, bus->dev, "pci_iomap failed");
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "pci_resource_len = 0x%08llx\n",
+ (unsigned long long) pci_resource_len(pdev, 0));
+ dev_printk(KERN_INFO, &pdev->dev,
+ "pci_resource_base = %p\n", pci_bus->hw_base);
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "HW Revision ID = 0x%X\n", pdev->revision);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ err = pci_enable_msi(pdev);
+ if (err) {
+ dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed");
+ goto out_iounmap;
+ }
+
+ /* TODO: Move this away, not needed if not MSI */
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ }
+
+ bus->dev = &pdev->dev;
+ bus->irq = pdev->irq;
+ bus->ops = &pci_ops;
+
+ err = iwl_probe(bus, cfg);
+ if (err)
+ goto out_disable_msi;
+ return 0;
+
+out_disable_msi:
+ pci_disable_msi(pdev);
+out_iounmap:
+ pci_iounmap(pdev, pci_bus->hw_base);
+out_pci_release_regions:
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_disable_device(pdev);
+out_no_pci:
+ kfree(bus);
+ return err;
+}
+
+static void __devexit iwl_pci_remove(struct pci_dev *pdev)
+{
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+ struct iwl_bus *bus = priv->bus;
+ struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus);
+ struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
+
+ iwl_remove(priv);
+
+ pci_disable_msi(pci_dev);
+ pci_iounmap(pci_dev, pci_bus->hw_base);
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+ pci_set_drvdata(pci_dev, NULL);
+
+ kfree(bus);
+}
+
+#ifdef CONFIG_PM
+
+static int iwl_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+
+ /* Before you put code here, think about WoWLAN. You cannot check here
+ * whether WoWLAN is enabled or not, and your code will run even if
+ * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
+ */
+
+ return iwl_suspend(priv);
+}
+
+static int iwl_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_priv *priv = pci_get_drvdata(pdev);
+
+ /* Before you put code here, think about WoWLAN. You cannot check here
+ * whether WoWLAN is enabled or not, and your code will run even if
+ * WoWLAN is enabled - the NIC may be alive.
+ */
+
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ return iwl_resume(priv);
+}
+
+static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
+
+#define IWL_PM_OPS (&iwl_dev_pm_ops)
+
+#else
+
+#define IWL_PM_OPS NULL
+
+#endif
+
+static struct pci_driver iwl_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = iwl_hw_card_ids,
+ .probe = iwl_pci_probe,
+ .remove = __devexit_p(iwl_pci_remove),
+ .driver.pm = IWL_PM_OPS,
+};
+
+int __must_check iwl_pci_register_driver(void)
+{
+ int ret;
+ ret = pci_register_driver(&iwl_pci_driver);
+ if (ret)
+ pr_err("Unable to initialize PCI module\n");
+
+ return ret;
+}
+
+void iwl_pci_unregister_driver(void)
+{
+ pci_unregister_driver(&iwl_pci_driver);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 595c930b28a..cd64df05f9e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -36,11 +36,13 @@
#include "iwl-eeprom.h"
#include "iwl-dev.h"
+#include "iwl-agn.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-commands.h"
#include "iwl-debug.h"
#include "iwl-power.h"
+#include "iwl-trans.h"
/*
* Setting power level allows the card to go to sleep when not busy.
@@ -51,16 +53,6 @@
*/
/*
- * For now, keep using power level 1 instead of automatically
- * adjusting ...
- */
-bool no_sleep_autoadjust = true;
-module_param(no_sleep_autoadjust, bool, S_IRUGO);
-MODULE_PARM_DESC(no_sleep_autoadjust,
- "don't automatically adjust sleep level "
- "according to maximum network latency");
-
-/*
* This defines the old power levels. They are still used by default
* (level 1) and for thermal throttle (levels 3 through 5)
*/
@@ -254,7 +246,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
}
}
- if (priv->power_data.pci_pm)
+ if (priv->power_data.bus_pm)
cmd->flags |= IWL_POWER_PCI_PM_MSK;
else
cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
@@ -269,7 +261,7 @@ static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
{
memset(cmd, 0, sizeof(*cmd));
- if (priv->power_data.pci_pm)
+ if (priv->power_data.bus_pm)
cmd->flags |= IWL_POWER_PCI_PM_MSK;
IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
@@ -305,7 +297,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK |
IWL_POWER_FAST_PD; /* no use seeing frames for others */
- if (priv->power_data.pci_pm)
+ if (priv->power_data.bus_pm)
cmd->flags |= IWL_POWER_PCI_PM_MSK;
if (priv->cfg->base_params->shadow_reg_enable)
@@ -343,7 +335,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
le32_to_cpu(cmd->sleep_interval[3]),
le32_to_cpu(cmd->sleep_interval[4]));
- return iwl_send_cmd_pdu(priv, POWER_TABLE_CMD,
+ return trans_send_cmd_pdu(&priv->trans, POWER_TABLE_CMD, CMD_SYNC,
sizeof(struct iwl_powertable_cmd), cmd);
}
@@ -355,7 +347,10 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
dtimper = priv->hw->conf.ps_dtim_period ?: 1;
- if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
+ if (priv->wowlan)
+ iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
+ else if (!priv->cfg->base_params->no_idle_support &&
+ priv->hw->conf.flags & IEEE80211_CONF_IDLE)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
else if (iwl_tt_is_low_power_state(priv)) {
/* in thermal throttling low power state */
@@ -367,9 +362,15 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
iwl_static_sleep_cmd(priv, cmd,
priv->power_data.debug_sleep_level_override,
dtimper);
- else if (no_sleep_autoadjust)
- iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_1, dtimper);
- else
+ else if (iwlagn_mod_params.no_sleep_autoadjust) {
+ if (iwlagn_mod_params.power_level > IWL_POWER_INDEX_1 &&
+ iwlagn_mod_params.power_level <= IWL_POWER_INDEX_5)
+ iwl_static_sleep_cmd(priv, cmd,
+ iwlagn_mod_params.power_level, dtimper);
+ else
+ iwl_static_sleep_cmd(priv, cmd,
+ IWL_POWER_INDEX_1, dtimper);
+ } else
iwl_power_fill_sleep_cmd(priv, cmd,
priv->hw->conf.dynamic_ps_timeout,
priv->hw->conf.max_sleep_period);
@@ -408,9 +409,9 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
clear_bit(STATUS_POWER_PMI, &priv->status);
- if (priv->cfg->ops->lib->update_chain_flags && update_chains)
- priv->cfg->ops->lib->update_chain_flags(priv);
- else if (priv->cfg->ops->lib->update_chain_flags)
+ if (update_chains)
+ iwl_update_chain_flags(priv);
+ else
IWL_DEBUG_POWER(priv,
"Cannot update the power, chain noise "
"calibration running: %d\n",
@@ -434,9 +435,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
/* initialize to default */
void iwl_power_initialize(struct iwl_priv *priv)
{
- u16 lctl = iwl_pcie_link_ctl(priv);
-
- priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+ priv->power_data.bus_pm = bus_get_pm_support(priv->bus);
priv->power_data.debug_sleep_level_override = -1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 59635d784e2..5f7b720cf1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -43,7 +43,7 @@ struct iwl_power_mgr {
struct iwl_powertable_cmd sleep_cmd;
struct iwl_powertable_cmd sleep_cmd_next;
int debug_sleep_level_override;
- bool pci_pm;
+ bool bus_pm;
};
int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index f00d188b2cf..2f267b8aabb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -168,6 +168,7 @@
* the scheduler (especially for queue #4/#9, the command queue, otherwise
* the driver can't issue commands!):
*/
+#define SCD_MEM_LOWER_BOUND (0x0000)
/**
* Max Tx window size is the max number of contiguous TFDs that the scheduler
@@ -177,53 +178,61 @@
#define SCD_WIN_SIZE 64
#define SCD_FRAME_LIMIT 64
-#define IWL_SCD_TXFIFO_POS_TID (0)
-#define IWL_SCD_TXFIFO_POS_RA (4)
-#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
+#define SCD_TXFIFO_POS_TID (0)
+#define SCD_TXFIFO_POS_RA (4)
+#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
/* agn SCD */
-#define IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF (0)
-#define IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
-#define IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL (4)
-#define IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
-#define IWLAGN_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
-
-#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
-#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
-#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
-#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
-#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
-#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
-#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
-#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
-
-#define IWLAGN_SCD_CONTEXT_DATA_OFFSET (0x600)
-#define IWLAGN_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
-#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
-
-#define IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(x)\
- (IWLAGN_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
-
-#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
- ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
-
-#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv) \
+#define SCD_QUEUE_STTS_REG_POS_TXF (0)
+#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
+#define SCD_QUEUE_STTS_REG_POS_WSL (4)
+#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
+#define SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
+
+#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
+#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
+#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
+#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
+#define SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
+#define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
+#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
+#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+
+/* Context Data */
+#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600)
+#define SCD_CONTEXT_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0)
+
+/* Tx status */
+#define SCD_TX_STTS_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0)
+#define SCD_TX_STTS_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0)
+
+/* Translation Data */
+#define SCD_TRANS_TBL_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0)
+#define SCD_TRANS_TBL_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x808)
+
+#define SCD_CONTEXT_QUEUE_OFFSET(x)\
+ (SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
+
+#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
+ ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
+
+#define SCD_QUEUECHAIN_SEL_ALL(priv) \
(((1<<(priv)->hw_params.max_txq_num) - 1) &\
(~(1<<(priv)->cmd_queue)))
-#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00)
-
-#define IWLAGN_SCD_SRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x0)
-#define IWLAGN_SCD_DRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x8)
-#define IWLAGN_SCD_AIT (IWLAGN_SCD_BASE + 0x0c)
-#define IWLAGN_SCD_TXFACT (IWLAGN_SCD_BASE + 0x10)
-#define IWLAGN_SCD_ACTIVE (IWLAGN_SCD_BASE + 0x14)
-#define IWLAGN_SCD_QUEUE_WRPTR(x) (IWLAGN_SCD_BASE + 0x18 + (x) * 4)
-#define IWLAGN_SCD_QUEUE_RDPTR(x) (IWLAGN_SCD_BASE + 0x68 + (x) * 4)
-#define IWLAGN_SCD_QUEUECHAIN_SEL (IWLAGN_SCD_BASE + 0xe8)
-#define IWLAGN_SCD_AGGR_SEL (IWLAGN_SCD_BASE + 0x248)
-#define IWLAGN_SCD_INTERRUPT_MASK (IWLAGN_SCD_BASE + 0x108)
-#define IWLAGN_SCD_QUEUE_STATUS_BITS(x) (IWLAGN_SCD_BASE + 0x10c + (x) * 4)
+#define SCD_BASE (PRPH_BASE + 0xa02c00)
+
+#define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0)
+#define SCD_DRAM_BASE_ADDR (SCD_BASE + 0x8)
+#define SCD_AIT (SCD_BASE + 0x0c)
+#define SCD_TXFACT (SCD_BASE + 0x10)
+#define SCD_ACTIVE (SCD_BASE + 0x14)
+#define SCD_QUEUE_WRPTR(x) (SCD_BASE + 0x18 + (x) * 4)
+#define SCD_QUEUE_RDPTR(x) (SCD_BASE + 0x68 + (x) * 4)
+#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
+#define SCD_AGGR_SEL (SCD_BASE + 0x248)
+#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
+#define SCD_QUEUE_STATUS_BITS(x) (SCD_BASE + 0x10c + (x) * 4)
/*********************** END TX SCHEDULER *************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index b774517aa9f..8e314003b63 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -41,183 +41,6 @@
#include "iwl-agn-calib.h"
#include "iwl-agn.h"
-/******************************************************************************
- *
- * RX path functions
- *
- ******************************************************************************/
-
-/*
- * Rx theory of operation
- *
- * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
- * each of which point to Receive Buffers to be filled by the NIC. These get
- * used not only for Rx frames, but for any command response or notification
- * from the NIC. The driver and NIC manage the Rx buffers by means
- * of indexes into the circular buffer.
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl_rx_queue_alloc() Allocates rx_free
- * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
-
-/**
- * iwl_rx_queue_space - Return number of free slots available in queue.
- */
-int iwl_rx_queue_space(const struct iwl_rx_queue *q)
-{
- int s = q->read - q->write;
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-
-/**
- * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
- */
-void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
-{
- unsigned long flags;
- u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
- u32 reg;
-
- spin_lock_irqsave(&q->lock, flags);
-
- if (q->need_update == 0)
- goto exit_unlock;
-
- if (priv->cfg->base_params->shadow_reg_enable) {
- /* shadow register enabled */
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
- } else {
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Rx queue requesting wakeup,"
- " GP1 = 0x%x\n", reg);
- iwl_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
-
- q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(priv, rx_wrt_ptr_reg,
- q->write_actual);
-
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_write_direct32(priv, rx_wrt_ptr_reg,
- q->write_actual);
- }
- }
- q->need_update = 0;
-
- exit_unlock:
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-int iwl_rx_queue_alloc(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- spin_lock_init(&rxq->lock);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
-
- /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
- GFP_KERNEL);
- if (!rxq->bd)
- goto err_bd;
-
- rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
- &rxq->rb_stts_dma, GFP_KERNEL);
- if (!rxq->rb_stts)
- goto err_rb;
-
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- rxq->need_update = 0;
- return 0;
-
-err_rb:
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
-err_bd:
- return -ENOMEM;
-}
/******************************************************************************
*
@@ -347,7 +170,7 @@ static bool iwl_good_ack_health(struct iwl_priv *priv,
int actual_delta, expected_delta, ba_timeout_delta;
struct statistics_tx *old;
- if (priv->_agn.agg_tids_count)
+ if (priv->agg_tids_count)
return true;
old = &priv->statistics.tx;
@@ -665,8 +488,8 @@ static void iwl_rx_statistics(struct iwl_priv *priv,
iwl_rx_calc_noise(priv);
queue_work(priv->workqueue, &priv->run_time_calib_work);
}
- if (priv->cfg->ops->lib->temp_ops.temperature && change)
- priv->cfg->ops->lib->temp_ops.temperature(priv);
+ if (priv->cfg->lib->temperature && change)
+ priv->cfg->lib->temperature(priv);
}
static void iwl_rx_reply_statistics(struct iwl_priv *priv,
@@ -769,8 +592,8 @@ static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- priv->_agn.last_phy_res_valid = true;
- memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
+ priv->last_phy_res_valid = true;
+ memcpy(&priv->last_phy_res, pkt->u.raw,
sizeof(struct iwl_rx_phy_res));
}
@@ -943,6 +766,47 @@ static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
return decrypt_out;
}
+/* Calc max signal level (dBm) among 3 possible receivers */
+static int iwlagn_calc_rssi(struct iwl_priv *priv,
+ struct iwl_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host
+ */
+ struct iwlagn_non_cfg_phy *ncphy =
+ (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
+ u8 agc;
+
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]);
+ agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info.
+ */
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]);
+ rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >>
+ IWLAGN_OFDM_RSSI_A_BIT_POS;
+ rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >>
+ IWLAGN_OFDM_RSSI_B_BIT_POS;
+ val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]);
+ rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >>
+ IWLAGN_OFDM_RSSI_C_BIT_POS;
+
+ max_rssi = max_t(u32, rssi_a, rssi_b);
+ max_rssi = max_t(u32, max_rssi, rssi_c);
+
+ IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ rssi_a, rssi_b, rssi_c, max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IWLAGN_RSSI_OFFSET;
+}
+
/* Called for REPLY_RX (legacy ABG frames), or
* REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
static void iwl_rx_reply_rx(struct iwl_priv *priv,
@@ -977,11 +841,11 @@ static void iwl_rx_reply_rx(struct iwl_priv *priv,
phy_res->cfg_phy_cnt + len);
ampdu_status = le32_to_cpu(rx_pkt_status);
} else {
- if (!priv->_agn.last_phy_res_valid) {
+ if (!priv->last_phy_res_valid) {
IWL_ERR(priv, "MPDU frame without cached PHY data\n");
return;
}
- phy_res = &priv->_agn.last_phy_res;
+ phy_res = &priv->last_phy_res;
amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
@@ -1024,7 +888,7 @@ static void iwl_rx_reply_rx(struct iwl_priv *priv,
priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
/* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
+ rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
iwl_dbg_log_rx_data_frame(priv, len, header);
IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
@@ -1102,6 +966,64 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
/* block ack */
handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
- /* Set up hardware specific Rx handlers */
- priv->cfg->ops->lib->rx_handler_setup(priv);
+ /* init calibration handlers */
+ priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
+ iwlagn_rx_calib_result;
+ priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
+
+ /* set up notification wait support */
+ spin_lock_init(&priv->notif_wait_lock);
+ INIT_LIST_HEAD(&priv->notif_waits);
+ init_waitqueue_head(&priv->notif_waitq);
+
+ /* Set up BT Rx handlers */
+ if (priv->cfg->lib->bt_rx_handler_setup)
+ priv->cfg->lib->bt_rx_handler_setup(priv);
+
+}
+
+void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ /*
+ * Do the notification wait before RX handlers so
+ * even if the RX handler consumes the RXB we have
+ * access to it in the notification wait entry.
+ */
+ if (!list_empty(&priv->notif_waits)) {
+ struct iwl_notification_wait *w;
+
+ spin_lock(&priv->notif_wait_lock);
+ list_for_each_entry(w, &priv->notif_waits, list) {
+ if (w->cmd != pkt->hdr.cmd)
+ continue;
+ IWL_DEBUG_RX(priv,
+ "Notif: %s, 0x%02x - wake the callers up\n",
+ get_cmd_string(pkt->hdr.cmd),
+ pkt->hdr.cmd);
+ w->triggered = true;
+ if (w->fn)
+ w->fn(priv, pkt, w->fn_data);
+ }
+ spin_unlock(&priv->notif_wait_lock);
+
+ wake_up_all(&priv->notif_waitq);
+ }
+
+ if (priv->pre_rx_handler)
+ priv->pre_rx_handler(priv, rxb);
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * rx_handlers table. See iwl_setup_rx_handlers() */
+ if (priv->rx_handlers[pkt->hdr.cmd]) {
+ priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
+ priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
+ } else {
+ /* No handling needed */
+ IWL_DEBUG_RX(priv,
+ "No handler needed for %s, 0x%02x\n",
+ get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ }
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index d60d630cb93..dd6937e9705 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -36,6 +36,8 @@
#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
+#include "iwl-agn.h"
+#include "iwl-trans.h"
/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
* sending probe req. This should be set long enough to hear probe responses
@@ -60,7 +62,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
struct iwl_rx_packet *pkt;
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_ABORT_CMD,
- .flags = CMD_WANT_SKB,
+ .flags = CMD_SYNC | CMD_WANT_SKB,
};
/* Exit instantly with error when device is not ready
@@ -73,7 +75,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
test_bit(STATUS_EXIT_PENDING, &priv->status))
return -EIO;
- ret = iwl_send_cmd_sync(priv, &cmd);
+ ret = trans_send_cmd(&priv->trans, &cmd);
if (ret)
return ret;
@@ -348,9 +350,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
lockdep_assert_held(&priv->mutex);
- if (WARN_ON(!priv->cfg->ops->utils->request_scan))
- return -EOPNOTSUPP;
-
cancel_delayed_work(&priv->scan_check);
if (!iwl_is_ready_rf(priv)) {
@@ -379,7 +378,7 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
priv->scan_start = jiffies;
priv->scan_band = band;
- ret = priv->cfg->ops->utils->request_scan(priv, vif);
+ ret = iwlagn_request_scan(priv, vif);
if (ret) {
clear_bit(STATUS_SCANNING, &priv->status);
priv->scan_type = IWL_SCAN_NORMAL;
@@ -566,10 +565,10 @@ static void iwl_bg_scan_completed(struct work_struct *work)
goto out_settings;
}
- if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->_agn.offchan_tx_skb) {
+ if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->offchan_tx_skb) {
ieee80211_tx_status_irqsafe(priv->hw,
- priv->_agn.offchan_tx_skb);
- priv->_agn.offchan_tx_skb = NULL;
+ priv->offchan_tx_skb);
+ priv->offchan_tx_skb = NULL;
}
if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
@@ -600,14 +599,7 @@ out_settings:
if (!iwl_is_ready_rf(priv))
goto out;
- /*
- * We do not commit power settings while scan is pending,
- * do it now if the settings changed.
- */
- iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
- iwl_set_tx_power(priv, priv->tx_power_next, false);
-
- priv->cfg->ops->utils->post_scan(priv);
+ iwlagn_post_scan(priv);
out:
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 7df2814fd4f..1ef3b7106ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -35,6 +35,8 @@
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-sta.h"
+#include "iwl-trans.h"
+#include "iwl-agn.h"
/* priv->sta_lock must be held */
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
@@ -132,6 +134,16 @@ static void iwl_add_sta_callback(struct iwl_priv *priv,
}
+static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
+{
+ u16 size = (u16)sizeof(struct iwl_addsta_cmd);
+ struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
+ memcpy(addsta, cmd, size);
+ /* resrved in 5000 */
+ addsta->rate_n_flags = cpu_to_le16(0);
+ return size;
+}
+
int iwl_send_add_sta(struct iwl_priv *priv,
struct iwl_addsta_cmd *sta, u8 flags)
{
@@ -155,8 +167,8 @@ int iwl_send_add_sta(struct iwl_priv *priv,
might_sleep();
}
- cmd.len[0] = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
- ret = iwl_send_cmd(priv, &cmd);
+ cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
+ ret = trans_send_cmd(&priv->trans, &cmd);
if (ret || (flags & CMD_ASYNC))
return ret;
@@ -412,7 +424,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
cmd.flags |= CMD_WANT_SKB;
- ret = iwl_send_cmd(priv, &cmd);
+ ret = trans_send_cmd(&priv->trans, &cmd);
if (ret)
return ret;
@@ -657,7 +669,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
}
-int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
+int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
{
int i;
@@ -781,7 +793,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
return -EINVAL;
if (is_lq_table_valid(priv, ctx, lq))
- ret = iwl_send_cmd(priv, &cmd);
+ ret = trans_send_cmd(&priv->trans, &cmd);
else
ret = -EINVAL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index ff64027ff4c..9a6768d6685 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -31,9 +31,6 @@
#include "iwl-dev.h"
-#define HW_KEY_DYNAMIC 0
-#define HW_KEY_DEFAULT 1
-
#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
@@ -47,7 +44,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
void iwl_clear_ucode_stations(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
-int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
+int iwl_get_free_ucode_key_offset(struct iwl_priv *priv);
int iwl_send_add_sta(struct iwl_priv *priv,
struct iwl_addsta_cmd *sta, u8 flags);
int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
index 69b7e6bf2d6..b11f60de4f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
@@ -69,7 +69,6 @@
#include <net/mac80211.h>
#include <net/netlink.h>
-
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
@@ -77,7 +76,7 @@
#include "iwl-io.h"
#include "iwl-agn.h"
#include "iwl-testmode.h"
-
+#include "iwl-trans.h"
/* The TLVs used in the gnl message policy between the kernel module and
* user space application. iwl_testmode_gnl_msg_policy is to be carried
@@ -101,9 +100,12 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
[IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
[IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
- [IWL_TM_ATTR_TRACE_DATA] = { .type = NLA_UNSPEC, },
+ [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
+ [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
[IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
};
/*
@@ -179,19 +181,19 @@ void iwl_testmode_init(struct iwl_priv *priv)
static void iwl_trace_cleanup(struct iwl_priv *priv)
{
- struct device *dev = &priv->pci_dev->dev;
-
if (priv->testmode_trace.trace_enabled) {
if (priv->testmode_trace.cpu_addr &&
priv->testmode_trace.dma_addr)
- dma_free_coherent(dev,
- TRACE_TOTAL_SIZE,
+ dma_free_coherent(priv->bus->dev,
+ priv->testmode_trace.total_size,
priv->testmode_trace.cpu_addr,
priv->testmode_trace.dma_addr);
priv->testmode_trace.trace_enabled = false;
priv->testmode_trace.cpu_addr = NULL;
priv->testmode_trace.trace_addr = NULL;
priv->testmode_trace.dma_addr = 0;
+ priv->testmode_trace.buff_size = 0;
+ priv->testmode_trace.total_size = 0;
}
}
@@ -229,6 +231,7 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
return -ENOMSG;
}
+ cmd.flags = CMD_ON_DEMAND;
cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
@@ -236,7 +239,7 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
" len %d\n", cmd.id, cmd.flags, cmd.len[0]);
/* ok, let's submit the command to ucode */
- return iwl_send_cmd(priv, &cmd);
+ return trans_send_cmd(&priv->trans, &cmd);
}
@@ -394,7 +397,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
- UCODE_SUBTYPE_INIT, -1);
+ IWL_UCODE_INIT);
if (status)
IWL_DEBUG_INFO(priv,
"Error loading init ucode: %d\n", status);
@@ -402,14 +405,13 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
iwl_testmode_cfg_init_calib(priv);
- iwlagn_stop_device(priv);
+ trans_stop_device(&priv->trans);
break;
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
status = iwlagn_load_ucode_wait_alive(priv,
&priv->ucode_rt,
- UCODE_SUBTYPE_REGULAR,
- UCODE_SUBTYPE_REGULAR_NEW);
+ IWL_UCODE_REGULAR);
if (status) {
IWL_DEBUG_INFO(priv,
"Error loading runtime ucode: %d\n", status);
@@ -450,7 +452,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
"Error finding fixrate setting\n");
return -ENOMSG;
}
- priv->dbg_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
+ priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
break;
default:
@@ -482,16 +484,29 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
struct iwl_priv *priv = hw->priv;
struct sk_buff *skb;
int status = 0;
- struct device *dev = &priv->pci_dev->dev;
+ struct device *dev = priv->bus->dev;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
if (priv->testmode_trace.trace_enabled)
return -EBUSY;
+ if (!tb[IWL_TM_ATTR_TRACE_SIZE])
+ priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF;
+ else
+ priv->testmode_trace.buff_size =
+ nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
+ if (!priv->testmode_trace.buff_size)
+ return -EINVAL;
+ if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN ||
+ priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX)
+ return -EINVAL;
+
+ priv->testmode_trace.total_size =
+ priv->testmode_trace.buff_size + TRACE_BUFF_PADD;
priv->testmode_trace.cpu_addr =
dma_alloc_coherent(dev,
- TRACE_TOTAL_SIZE,
+ priv->testmode_trace.total_size,
&priv->testmode_trace.dma_addr,
GFP_KERNEL);
if (!priv->testmode_trace.cpu_addr)
@@ -500,7 +515,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
priv->testmode_trace.cpu_addr, 0x100);
memset(priv->testmode_trace.trace_addr, 0x03B,
- TRACE_BUFF_SIZE);
+ priv->testmode_trace.buff_size);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
sizeof(priv->testmode_trace.dma_addr) + 20);
if (!skb) {
@@ -518,34 +533,14 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
"Error sending msg : %d\n",
status);
}
+ priv->testmode_trace.num_chunks =
+ DIV_ROUND_UP(priv->testmode_trace.buff_size,
+ TRACE_CHUNK_SIZE);
break;
case IWL_TM_CMD_APP2DEV_END_TRACE:
iwl_trace_cleanup(priv);
break;
-
- case IWL_TM_CMD_APP2DEV_READ_TRACE:
- if (priv->testmode_trace.trace_enabled &&
- priv->testmode_trace.trace_addr) {
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- 20 + TRACE_BUFF_SIZE);
- if (skb == NULL) {
- IWL_DEBUG_INFO(priv,
- "Error allocating memory\n");
- return -ENOMEM;
- }
- NLA_PUT(skb, IWL_TM_ATTR_TRACE_DATA,
- TRACE_BUFF_SIZE,
- priv->testmode_trace.trace_addr);
- status = cfg80211_testmode_reply(skb);
- if (status < 0) {
- IWL_DEBUG_INFO(priv,
- "Error sending msg : %d\n", status);
- }
- } else
- return -EFAULT;
- break;
-
default:
IWL_DEBUG_INFO(priv, "Unknown testmode mem command ID\n");
return -ENOSYS;
@@ -560,6 +555,73 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct iwl_priv *priv = hw->priv;
+ int idx, length;
+
+ if (priv->testmode_trace.trace_enabled &&
+ priv->testmode_trace.trace_addr) {
+ idx = cb->args[4];
+ if (idx >= priv->testmode_trace.num_chunks)
+ return -ENOENT;
+ length = TRACE_CHUNK_SIZE;
+ if (((idx + 1) == priv->testmode_trace.num_chunks) &&
+ (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE))
+ length = priv->testmode_trace.buff_size %
+ TRACE_CHUNK_SIZE;
+
+ NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
+ priv->testmode_trace.trace_addr +
+ (TRACE_CHUNK_SIZE * idx));
+ idx++;
+ cb->args[4] = idx;
+ return 0;
+ } else
+ return -EFAULT;
+
+ nla_put_failure:
+ return -ENOBUFS;
+}
+
+/*
+ * This function handles the user application switch ucode ownership.
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
+ * decide who the current owner of the uCode
+ *
+ * If the current owner is OWNERSHIP_TM, then the only host command
+ * can deliver to uCode is from testmode, all the other host commands
+ * will dropped.
+ *
+ * default driver is the owner of uCode in normal operational mode
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+ struct iwl_priv *priv = hw->priv;
+ u8 owner;
+
+ if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
+ IWL_DEBUG_INFO(priv, "Error finding ucode owner\n");
+ return -ENOMSG;
+ }
+
+ owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
+ if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM))
+ priv->ucode_owner = owner;
+ else {
+ IWL_DEBUG_INFO(priv, "Invalid owner\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
/* The testmode gnl message handler that takes the gnl message from the
* user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
* invoke the corresponding handlers.
@@ -581,7 +643,7 @@ nla_put_failure:
*/
int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
{
- struct nlattr *tb[IWL_TM_ATTR_MAX - 1];
+ struct nlattr *tb[IWL_TM_ATTR_MAX];
struct iwl_priv *priv = hw->priv;
int result;
@@ -629,6 +691,11 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
result = iwl_testmode_trace(hw, tb);
break;
+ case IWL_TM_CMD_APP2DEV_OWNERSHIP:
+ IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
+ result = iwl_testmode_ownership(hw, tb);
+ break;
+
default:
IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
result = -ENOSYS;
@@ -638,3 +705,50 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
mutex_unlock(&priv->mutex);
return result;
}
+
+int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len)
+{
+ struct nlattr *tb[IWL_TM_ATTR_MAX];
+ struct iwl_priv *priv = hw->priv;
+ int result;
+ u32 cmd;
+
+ if (cb->args[3]) {
+ /* offset by 1 since commands start at 0 */
+ cmd = cb->args[3] - 1;
+ } else {
+ result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
+ iwl_testmode_gnl_msg_policy);
+ if (result) {
+ IWL_DEBUG_INFO(priv,
+ "Error parsing the gnl message : %d\n", result);
+ return result;
+ }
+
+ /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
+ if (!tb[IWL_TM_ATTR_COMMAND]) {
+ IWL_DEBUG_INFO(priv,
+ "Error finding testmode command type\n");
+ return -ENOMSG;
+ }
+ cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
+ cb->args[3] = cmd + 1;
+ }
+
+ /* in case multiple accesses to the device happens */
+ mutex_lock(&priv->mutex);
+ switch (cmd) {
+ case IWL_TM_CMD_APP2DEV_READ_TRACE:
+ IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
+ result = iwl_testmode_trace_dump(hw, tb, skb, cb);
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&priv->mutex);
+ return result;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index a88085e9b36..b980bda4b0f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -66,120 +66,161 @@
#include <linux/types.h>
-/* Commands from user space to kernel space(IWL_TM_CMD_ID_APP2DEV_XX) and
+/*
+ * Commands from user space to kernel space(IWL_TM_CMD_ID_APP2DEV_XX) and
* from and kernel space to user space(IWL_TM_CMD_ID_DEV2APP_XX).
- * The command ID is carried with IWL_TM_ATTR_COMMAND. There are three types of
- * of command from user space and two types of command from kernel space.
- * See below.
+ * The command ID is carried with IWL_TM_ATTR_COMMAND.
+ *
+ * @IWL_TM_CMD_APP2DEV_UCODE:
+ * commands from user application to the uCode,
+ * the actual uCode host command ID is carried with
+ * IWL_TM_ATTR_UCODE_CMD_ID
+ *
+ * @IWL_TM_CMD_APP2DEV_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_REG_WRITE32:
+ * @IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ * commands from user applicaiton to access register
+ *
+ * @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
+ * @IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: load initial uCode image
+ * @IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: perform calibration
+ * @IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: load runtime uCode image
+ * @IWL_TM_CMD_APP2DEV_GET_EEPROM: request EEPROM data
+ * @IWL_TM_CMD_APP2DEV_FIXRATE_REQ: set fix MCS
+ * commands fom user space for pure driver level operations
+ *
+ * @IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
+ * @IWL_TM_CMD_APP2DEV_END_TRACE:
+ * @IWL_TM_CMD_APP2DEV_READ_TRACE:
+ * commands fom user space for uCode trace operations
+ *
+ * @IWL_TM_CMD_DEV2APP_SYNC_RSP:
+ * commands from kernel space to carry the synchronous response
+ * to user application
+ * @IWL_TM_CMD_DEV2APP_UCODE_RX_PKT:
+ * commands from kernel space to multicast the spontaneous messages
+ * to user application
+ * @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
+ * commands from kernel space to carry the eeprom response
+ * to user application
+ * @IWL_TM_CMD_APP2DEV_OWNERSHIP:
+ * commands from user application to own change the ownership of the uCode
+ * if application has the ownership, the only host command from
+ * testmode will deliver to uCode. Default owner is driver
*/
enum iwl_tm_cmd_t {
- /* commands from user application to the uCode,
- * the actual uCode host command ID is carried with
- * IWL_TM_ATTR_UCODE_CMD_ID */
- IWL_TM_CMD_APP2DEV_UCODE = 1,
-
- /* commands from user applicaiton to access register */
- IWL_TM_CMD_APP2DEV_REG_READ32,
- IWL_TM_CMD_APP2DEV_REG_WRITE32,
- IWL_TM_CMD_APP2DEV_REG_WRITE8,
-
- /* commands fom user space for pure driver level operations */
- IWL_TM_CMD_APP2DEV_GET_DEVICENAME,
- IWL_TM_CMD_APP2DEV_LOAD_INIT_FW,
- IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB,
- IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW,
- IWL_TM_CMD_APP2DEV_GET_EEPROM,
- IWL_TM_CMD_APP2DEV_FIXRATE_REQ,
- /* if there is other new command for the driver layer operation,
- * append them here */
-
- /* commands fom user space for uCode trace operations */
- IWL_TM_CMD_APP2DEV_BEGIN_TRACE,
- IWL_TM_CMD_APP2DEV_END_TRACE,
- IWL_TM_CMD_APP2DEV_READ_TRACE,
-
- /* commands from kernel space to carry the synchronous response
- * to user application */
- IWL_TM_CMD_DEV2APP_SYNC_RSP,
-
- /* commands from kernel space to multicast the spontaneous messages
- * to user application */
- IWL_TM_CMD_DEV2APP_UCODE_RX_PKT,
-
- /* commands from kernel space to carry the eeprom response
- * to user application */
- IWL_TM_CMD_DEV2APP_EEPROM_RSP,
-
- IWL_TM_CMD_MAX,
+ IWL_TM_CMD_APP2DEV_UCODE = 1,
+ IWL_TM_CMD_APP2DEV_REG_READ32 = 2,
+ IWL_TM_CMD_APP2DEV_REG_WRITE32 = 3,
+ IWL_TM_CMD_APP2DEV_REG_WRITE8 = 4,
+ IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5,
+ IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6,
+ IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7,
+ IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW = 8,
+ IWL_TM_CMD_APP2DEV_GET_EEPROM = 9,
+ IWL_TM_CMD_APP2DEV_FIXRATE_REQ = 10,
+ IWL_TM_CMD_APP2DEV_BEGIN_TRACE = 11,
+ IWL_TM_CMD_APP2DEV_END_TRACE = 12,
+ IWL_TM_CMD_APP2DEV_READ_TRACE = 13,
+ IWL_TM_CMD_DEV2APP_SYNC_RSP = 14,
+ IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
+ IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
+ IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
+ IWL_TM_CMD_MAX = 18,
};
+/*
+ * Atrribute filed in testmode command
+ * See enum iwl_tm_cmd_t.
+ *
+ * @IWL_TM_ATTR_NOT_APPLICABLE:
+ * The attribute is not applicable or invalid
+ * @IWL_TM_ATTR_COMMAND:
+ * From user space to kernel space:
+ * the command either destines to ucode, driver, or register;
+ * From kernel space to user space:
+ * the command either carries synchronous response,
+ * or the spontaneous message multicast from the device;
+ *
+ * @IWL_TM_ATTR_UCODE_CMD_ID:
+ * @IWL_TM_ATTR_UCODE_CMD_DATA:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE,
+ * The mandatory fields are :
+ * IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID;
+ * IWL_TM_ATTR_COMMAND_FLAG for the flags of the commands;
+ * The optional fields are:
+ * IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload
+ * to the ucode
+ *
+ * @IWL_TM_ATTR_REG_OFFSET:
+ * @IWL_TM_ATTR_REG_VALUE8:
+ * @IWL_TM_ATTR_REG_VALUE32:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_XXX,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_REG_OFFSET for the offset of the target register;
+ * IWL_TM_ATTR_REG_VALUE8 or IWL_TM_ATTR_REG_VALUE32 for value
+ *
+ * @IWL_TM_ATTR_SYNC_RSP:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_SYNC_RSP,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_SYNC_RSP for the data content responding to the user
+ * application command
+ *
+ * @IWL_TM_ATTR_UCODE_RX_PKT:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_UCODE_RX_PKT,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_UCODE_RX_PKT for the data content multicast to the user
+ * application
+ *
+ * @IWL_TM_ATTR_EEPROM:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_EEPROM for the data content responging to the user
+ * application
+ *
+ * @IWL_TM_ATTR_TRACE_ADDR:
+ * @IWL_TM_ATTR_TRACE_SIZE:
+ * @IWL_TM_ATTR_TRACE_DUMP:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address
+ * IWL_TM_ATTR_MEM_TRACE_SIZE for the trace buffer size
+ * IWL_TM_ATTR_MEM_TRACE_DUMP for the trace dump
+ *
+ * @IWL_TM_ATTR_FIXRATE:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_FIXRATE for the fixed rate
+ *
+ * @IWL_TM_ATTR_UCODE_OWNER:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_UCODE_OWNER for the new owner
+ */
enum iwl_tm_attr_t {
- IWL_TM_ATTR_NOT_APPLICABLE = 0,
-
- /* From user space to kernel space:
- * the command either destines to ucode, driver, or register;
- * See enum iwl_tm_cmd_t.
- *
- * From kernel space to user space:
- * the command either carries synchronous response,
- * or the spontaneous message multicast from the device;
- * See enum iwl_tm_cmd_t. */
- IWL_TM_ATTR_COMMAND,
-
- /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_UCODE,
- * The mandatory fields are :
- * IWL_TM_ATTR_UCODE_CMD_ID for recognizable command ID;
- * IWL_TM_ATTR_COMMAND_FLAG for the flags of the commands;
- * The optional fields are:
- * IWL_TM_ATTR_UCODE_CMD_DATA for the actual command payload
- * to the ucode */
- IWL_TM_ATTR_UCODE_CMD_ID,
- IWL_TM_ATTR_UCODE_CMD_DATA,
-
- /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_XXX,
- * The mandatory fields are:
- * IWL_TM_ATTR_REG_OFFSET for the offset of the target register;
- * IWL_TM_ATTR_REG_VALUE8 or IWL_TM_ATTR_REG_VALUE32 for value */
- IWL_TM_ATTR_REG_OFFSET,
- IWL_TM_ATTR_REG_VALUE8,
- IWL_TM_ATTR_REG_VALUE32,
-
- /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_SYNC_RSP,
- * The mandatory fields are:
- * IWL_TM_ATTR_SYNC_RSP for the data content responding to the user
- * application command */
- IWL_TM_ATTR_SYNC_RSP,
- /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_UCODE_RX_PKT,
- * The mandatory fields are:
- * IWL_TM_ATTR_UCODE_RX_PKT for the data content multicast to the user
- * application */
- IWL_TM_ATTR_UCODE_RX_PKT,
-
- /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM,
- * The mandatory fields are:
- * IWL_TM_ATTR_EEPROM for the data content responging to the user
- * application */
- IWL_TM_ATTR_EEPROM,
-
- /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE,
- * The mandatory fields are:
- * IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address
- */
- IWL_TM_ATTR_TRACE_ADDR,
- IWL_TM_ATTR_TRACE_DATA,
-
- /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ,
- * The mandatory fields are:
- * IWL_TM_ATTR_FIXRATE for the fixed rate
- */
- IWL_TM_ATTR_FIXRATE,
-
- IWL_TM_ATTR_MAX,
+ IWL_TM_ATTR_NOT_APPLICABLE = 0,
+ IWL_TM_ATTR_COMMAND = 1,
+ IWL_TM_ATTR_UCODE_CMD_ID = 2,
+ IWL_TM_ATTR_UCODE_CMD_DATA = 3,
+ IWL_TM_ATTR_REG_OFFSET = 4,
+ IWL_TM_ATTR_REG_VALUE8 = 5,
+ IWL_TM_ATTR_REG_VALUE32 = 6,
+ IWL_TM_ATTR_SYNC_RSP = 7,
+ IWL_TM_ATTR_UCODE_RX_PKT = 8,
+ IWL_TM_ATTR_EEPROM = 9,
+ IWL_TM_ATTR_TRACE_ADDR = 10,
+ IWL_TM_ATTR_TRACE_SIZE = 11,
+ IWL_TM_ATTR_TRACE_DUMP = 12,
+ IWL_TM_ATTR_FIXRATE = 13,
+ IWL_TM_ATTR_UCODE_OWNER = 14,
+ IWL_TM_ATTR_MAX = 15,
};
/* uCode trace buffer */
-#define TRACE_BUFF_SIZE 0x20000
+#define TRACE_BUFF_SIZE_MAX 0x200000
+#define TRACE_BUFF_SIZE_MIN 0x20000
+#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN
#define TRACE_BUFF_PADD 0x2000
-#define TRACE_TOTAL_SIZE (TRACE_BUFF_SIZE + TRACE_BUFF_PADD)
+#define TRACE_CHUNK_SIZE (PAGE_SIZE - 1024)
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
new file mode 100644
index 00000000000..b79330d8418
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
@@ -0,0 +1,82 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __iwl_trans_int_pcie_h__
+#define __iwl_trans_int_pcie_h__
+
+/*This file includes the declaration that are internal to the
+ * trans_pcie layer */
+
+/*****************************************************
+* RX
+******************************************************/
+void iwl_bg_rx_replenish(struct work_struct *data);
+void iwl_irq_tasklet(struct iwl_priv *priv);
+void iwlagn_rx_replenish(struct iwl_priv *priv);
+void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
+ struct iwl_rx_queue *q);
+
+/*****************************************************
+* ICT
+******************************************************/
+int iwl_reset_ict(struct iwl_priv *priv);
+void iwl_disable_ict(struct iwl_priv *priv);
+int iwl_alloc_isr_ict(struct iwl_priv *priv);
+void iwl_free_isr_ict(struct iwl_priv *priv);
+irqreturn_t iwl_isr_ict(int irq, void *data);
+
+
+/*****************************************************
+* TX / HCMD
+******************************************************/
+void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int index);
+int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset);
+int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
+ int count, int slots_num, u32 id);
+int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags,
+ u16 len, const void *data);
+void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
+void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt);
+int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo);
+void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
+ int txq_id, u32 index);
+void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
+ int frame_limit);
+
+#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
new file mode 100644
index 00000000000..47486029040
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
@@ -0,0 +1,979 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/gfp.h>
+
+#include "iwl-dev.h"
+#include "iwl-agn.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-helpers.h"
+#include "iwl-trans-int-pcie.h"
+
+/******************************************************************************
+ *
+ * RX path functions
+ *
+ ******************************************************************************/
+
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC. These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC. The driver and NIC manage the Rx buffers by means
+ * of indexes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt. The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ INDEX is updated (updating the
+ * 'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' index is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl_rx_queue_alloc() Allocates rx_free
+ * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * iwl_rx_queue_restock
+ * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE index. If insufficient rx_free buffers
+ * are available, schedules iwl_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
+ * READ INDEX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls iwl_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * iwl_rx_queue_space - Return number of free slots available in queue.
+ */
+static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
+{
+ int s = q->read - q->write;
+ if (s <= 0)
+ s += RX_QUEUE_SIZE;
+ /* keep some buffer to not confuse full and empty queue */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+
+/**
+ * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
+ struct iwl_rx_queue *q)
+{
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ if (q->need_update == 0)
+ goto exit_unlock;
+
+ if (priv->cfg->base_params->shadow_reg_enable) {
+ /* shadow register enabled */
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual);
+ } else {
+ /* If power-saving is in use, make sure device is awake */
+ if (test_bit(STATUS_POWER_PMI, &priv->status)) {
+ reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ IWL_DEBUG_INFO(priv,
+ "Rx queue requesting wakeup,"
+ " GP1 = 0x%x\n", reg);
+ iwl_set_bit(priv, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
+ }
+
+ q->write_actual = (q->write & ~0x7);
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
+ q->write_actual);
+
+ /* Else device is assumed to be awake */
+ } else {
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
+ q->write_actual);
+ }
+ }
+ q->need_update = 0;
+
+ exit_unlock:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+
+/**
+ * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
+ dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/**
+ * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
+ rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(priv->workqueue, &priv->rx_replenish);
+
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ iwl_rx_queue_update_write_ptr(priv, rxq);
+ }
+}
+
+/**
+ * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via iwl_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (priv->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(priv, "alloc_pages failed, "
+ "order: %d\n",
+ priv->hw_params.rx_page_order);
+
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(priv, "Failed to alloc_pages with %s."
+ "Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ?
+ "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, priv->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma = dma_map_page(priv->bus->dev, page, 0,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ DMA_FROM_DEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void iwlagn_rx_replenish(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ iwlagn_rx_allocate(priv, GFP_KERNEL);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwlagn_rx_queue_restock(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void iwlagn_rx_replenish_now(struct iwl_priv *priv)
+{
+ iwlagn_rx_allocate(priv, GFP_ATOMIC);
+
+ iwlagn_rx_queue_restock(priv);
+}
+
+void iwl_bg_rx_replenish(struct work_struct *data)
+{
+ struct iwl_priv *priv =
+ container_of(data, struct iwl_priv, rx_replenish);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ iwlagn_rx_replenish(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+/**
+ * iwl_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the priv->rx_handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void iwl_rx_handle(struct iwl_priv *priv)
+{
+ struct iwl_rx_mem_buffer *rxb;
+ struct iwl_rx_packet *pkt;
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
+
+ /* uCode's read index (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
+ while (i != r) {
+ int len;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ if (WARN_ON(rxb == NULL)) {
+ i = (i + 1) & RX_QUEUE_MASK;
+ continue;
+ }
+
+ rxq->queue[i] = NULL;
+
+ dma_unmap_page(priv->bus->dev, rxb->page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ DMA_FROM_DEVICE);
+ pkt = rxb_addr(rxb);
+
+ IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
+ i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+
+ len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+ trace_iwlwifi_dev_rx(priv, pkt, len);
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
+ (pkt->hdr.cmd != REPLY_RX) &&
+ (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
+ (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
+ (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
+ (pkt->hdr.cmd != REPLY_TX);
+
+ iwl_rx_dispatch(priv, rxb);
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some rx_handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking
+ * trans_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ iwl_tx_cmd_complete(priv, rxb);
+ else
+ IWL_WARN(priv, "Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma = dma_map_page(priv->bus->dev, rxb->page,
+ 0, PAGE_SIZE << priv->hw_params.rx_page_order,
+ DMA_FROM_DEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ iwlagn_rx_replenish_now(priv);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ iwlagn_rx_replenish_now(priv);
+ else
+ iwlagn_rx_queue_restock(priv);
+}
+
+/* tasklet for iwlagn interrupt */
+void iwl_irq_tasklet(struct iwl_priv *priv)
+{
+ u32 inta = 0;
+ u32 handled = 0;
+ unsigned long flags;
+ u32 i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ */
+ /* There is a hardware bug in the interrupt mask function that some
+ * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
+ * they are disabled in the CSR_INT_MASK register. Furthermore the
+ * ICT interrupt handling mechanism has another bug that might cause
+ * these unmasked interrupts fail to be detected. We workaround the
+ * hardware bugs here by ACKing all the possible interrupts so that
+ * interrupt coalescing can still be achieved.
+ */
+ iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask);
+
+ inta = priv->inta;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
+ /* just for debug */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK);
+ IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ",
+ inta, inta_mask);
+ }
+#endif
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* saved interrupt in inta variable now we can reset priv->inta */
+ priv->inta = 0;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IWL_ERR(priv, "Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ iwl_disable_interrupts(priv);
+
+ priv->isr_stats.hw++;
+ iwl_irq_handle_error(priv);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
+ "the frame/frames.\n");
+ priv->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ IWL_DEBUG_ISR(priv, "Alive interrupt\n");
+ priv->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ int hw_rf_kill = 0;
+ if (!(iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rf_kill = 1;
+
+ IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
+ hw_rf_kill ? "disable radio" : "enable radio");
+
+ priv->isr_stats.rfkill++;
+
+ /* driver only loads ucode once setting the interface up.
+ * the driver allows loading the ucode even if the radio
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+ if (!test_bit(STATUS_ALIVE, &priv->status)) {
+ if (hw_rf_kill)
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
+ }
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IWL_ERR(priv, "Microcode CT kill error detected.\n");
+ priv->isr_stats.ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IWL_ERR(priv, "Microcode SW error detected. "
+ " Restarting 0x%X.\n", inta);
+ priv->isr_stats.sw++;
+ iwl_irq_handle_error(priv);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /* uCode wakes up after power-down sleep */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
+ iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
+ for (i = 0; i < priv->hw_params.max_txq_num; i++)
+ iwl_txq_update_write_ptr(priv, &priv->txq[i]);
+
+ priv->isr_stats.wakeup++;
+
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
+ CSR_INT_BIT_RX_PERIODIC)) {
+ IWL_DEBUG_ISR(priv, "Rx interrupt\n");
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ iwl_write32(priv, CSR_FH_INT_STATUS,
+ CSR_FH_INT_RX_MASK);
+ }
+ if (inta & CSR_INT_BIT_RX_PERIODIC) {
+ handled |= CSR_INT_BIT_RX_PERIODIC;
+ iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
+ }
+ /* Sending RX interrupt require many steps to be done in the
+ * the device:
+ * 1- write interrupt to current index in ICT table.
+ * 2- dma RX frame.
+ * 3- update RX shared data to indicate last write index.
+ * 4- send interrupt.
+ * This could lead to RX race, driver could receive RX interrupt
+ * but the shared data changes does not reflect this;
+ * periodic interrupt will detect any dangling Rx activity.
+ */
+
+ /* Disable periodic interrupt; we use it as just a one-shot. */
+ iwl_write8(priv, CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_DIS);
+ iwl_rx_handle(priv);
+
+ /*
+ * Enable periodic interrupt in 8 msec only if we received
+ * real RX interrupt (instead of just periodic int), to catch
+ * any dangling Rx interrupt. If it was just the periodic
+ * interrupt, there was no dangling Rx activity, and no need
+ * to extend the periodic interrupt; one-shot is enough.
+ */
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
+ iwl_write8(priv, CSR_INT_PERIODIC_REG,
+ CSR_INT_PERIODIC_ENA);
+
+ priv->isr_stats.rx++;
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta & CSR_INT_BIT_FH_TX) {
+ iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
+ IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
+ priv->isr_stats.tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* Wake up uCode load routine, now that load is complete */
+ priv->ucode_write_complete = 1;
+ wake_up_interruptible(&priv->wait_command_queue);
+ }
+
+ if (inta & ~handled) {
+ IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ priv->isr_stats.unhandled++;
+ }
+
+ if (inta & ~(priv->inta_mask)) {
+ IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
+ inta & ~priv->inta_mask);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status))
+ iwl_enable_interrupts(priv);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(priv);
+}
+
+/******************************************************************************
+ *
+ * ICT functions
+ *
+ ******************************************************************************/
+#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
+
+/* Free dram table */
+void iwl_free_isr_ict(struct iwl_priv *priv)
+{
+ if (priv->ict_tbl_vir) {
+ dma_free_coherent(priv->bus->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ priv->ict_tbl_vir,
+ priv->ict_tbl_dma);
+ priv->ict_tbl_vir = NULL;
+ memset(&priv->ict_tbl_dma, 0,
+ sizeof(priv->ict_tbl_dma));
+ memset(&priv->aligned_ict_tbl_dma, 0,
+ sizeof(priv->aligned_ict_tbl_dma));
+ }
+}
+
+
+/* allocate dram shared table it is a PAGE_SIZE aligned
+ * also reset all data related to ICT table interrupt.
+ */
+int iwl_alloc_isr_ict(struct iwl_priv *priv)
+{
+
+ /* allocate shrared data table */
+ priv->ict_tbl_vir =
+ dma_alloc_coherent(priv->bus->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ &priv->ict_tbl_dma, GFP_KERNEL);
+ if (!priv->ict_tbl_vir)
+ return -ENOMEM;
+
+ /* align table to PAGE_SIZE boundary */
+ priv->aligned_ict_tbl_dma =
+ ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
+
+ IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
+ (unsigned long long)priv->ict_tbl_dma,
+ (unsigned long long)priv->aligned_ict_tbl_dma,
+ (int)(priv->aligned_ict_tbl_dma -
+ priv->ict_tbl_dma));
+
+ priv->ict_tbl = priv->ict_tbl_vir +
+ (priv->aligned_ict_tbl_dma -
+ priv->ict_tbl_dma);
+
+ IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
+ priv->ict_tbl, priv->ict_tbl_vir,
+ (int)(priv->aligned_ict_tbl_dma -
+ priv->ict_tbl_dma));
+
+ /* reset table and index to all 0 */
+ memset(priv->ict_tbl_vir, 0,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
+ priv->ict_index = 0;
+
+ /* add periodic RX interrupt */
+ priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
+ return 0;
+}
+
+/* Device is going up inform it about using ICT interrupt table,
+ * also we need to tell the driver to start using ICT interrupt.
+ */
+int iwl_reset_ict(struct iwl_priv *priv)
+{
+ u32 val;
+ unsigned long flags;
+
+ if (!priv->ict_tbl_vir)
+ return 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_disable_interrupts(priv);
+
+ memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
+
+ val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
+
+ val |= CSR_DRAM_INT_TBL_ENABLE;
+ val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+
+ IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
+ "aligned dma address %Lx\n",
+ val,
+ (unsigned long long)priv->aligned_ict_tbl_dma);
+
+ iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
+ priv->use_ict = true;
+ priv->ict_index = 0;
+ iwl_write32(priv, CSR_INT, priv->inta_mask);
+ iwl_enable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/* Device is going down disable ict interrupt usage */
+void iwl_disable_ict(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->use_ict = false;
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static irqreturn_t iwl_isr(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+ unsigned long flags;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u32 inta_fh;
+#endif
+ if (!priv)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(priv, CSR_INT);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta) {
+ IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
+ inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
+ "fh 0x%08x\n", inta, inta_mask, inta_fh);
+ }
+#endif
+
+ priv->inta |= inta;
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&priv->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &priv->status) &&
+ !priv->inta)
+ iwl_enable_interrupts(priv);
+
+ unplugged:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if disabled by irq and no schedules tasklet. */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
+ iwl_enable_interrupts(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_NONE;
+}
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+irqreturn_t iwl_isr_ict(int irq, void *data)
+{
+ struct iwl_priv *priv = data;
+ u32 inta, inta_mask;
+ u32 val = 0;
+ unsigned long flags;
+
+ if (!priv)
+ return IRQ_NONE;
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (!priv->use_ict)
+ return iwl_isr(irq, data);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here.
+ */
+ inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
+ iwl_write32(priv, CSR_INT_MASK, 0x00000000);
+
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!priv->ict_tbl[priv->ict_index]) {
+ IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
+ goto none;
+ }
+
+ /* read all entries that not 0 start with ict_index */
+ while (priv->ict_tbl[priv->ict_index]) {
+
+ val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]);
+ IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
+ priv->ict_index,
+ le32_to_cpu(
+ priv->ict_tbl[priv->ict_index]));
+ priv->ict_tbl[priv->ict_index] = 0;
+ priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
+ ICT_COUNT);
+
+ }
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
+ inta, inta_mask, val);
+
+ inta &= priv->inta_mask;
+ priv->inta |= inta;
+
+ /* iwl_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta))
+ tasklet_schedule(&priv->irq_tasklet);
+ else if (test_bit(STATUS_INT_ENABLED, &priv->status) &&
+ !priv->inta) {
+ /* Allow interrupt if was disabled by this handler and
+ * no tasklet was schedules, We should not enable interrupt,
+ * tasklet will enable it.
+ */
+ iwl_enable_interrupts(priv);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_HANDLED;
+
+ none:
+ /* re-enable interrupts here since we don't have anything to service.
+ * only Re-enable if disabled by irq.
+ */
+ if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
+ iwl_enable_interrupts(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return IRQ_NONE;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
index 686e176b5eb..a6b2b1db0b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -26,18 +26,58 @@
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
-
#include <linux/etherdevice.h>
-#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include <net/mac80211.h>
-#include "iwl-eeprom.h"
+
#include "iwl-agn.h"
#include "iwl-dev.h"
#include "iwl-core.h"
-#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
+#include "iwl-trans-int-pcie.h"
+
+/**
+ * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ int write_ptr = txq->q.write_ptr;
+ int txq_id = txq->q.id;
+ u8 sec_ctl = 0;
+ u8 sta_id = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+
+ WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
+ sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += WEP_IV_LEN + WEP_ICV_LEN;
+ break;
+ }
+
+ bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
+}
/**
* iwl_txq_update_write_ptr - Send new write index to hardware
@@ -126,9 +166,8 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
}
static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd)
+ struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
{
- struct pci_dev *dev = priv->pci_dev;
int i;
int num_tbs;
@@ -143,42 +182,44 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
/* Unmap tx_cmd */
if (num_tbs)
- pci_unmap_single(dev,
+ dma_unmap_single(priv->bus->dev,
dma_unmap_addr(meta, mapping),
dma_unmap_len(meta, len),
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
- pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
- iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
+ dma_unmap_single(priv->bus->dev, iwl_tfd_tb_get_addr(tfd, i),
+ iwl_tfd_tb_get_len(tfd, i), dma_dir);
}
/**
* iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @priv - driver private data
* @txq - tx queue
+ * @index - the index of the TFD to be freed
*
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
-void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int index)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
- int index = txq->q.read_ptr;
- iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]);
+ iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
+ DMA_TO_DEVICE);
/* free SKB */
if (txq->txb) {
struct sk_buff *skb;
- skb = txq->txb[txq->q.read_ptr].skb;
+ skb = txq->txb[index].skb;
/* can be called from irqs-disabled context */
if (skb) {
dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb = NULL;
+ txq->txb[index].skb = NULL;
}
}
}
@@ -220,142 +261,6 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
return 0;
}
-/*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- *
- * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
- * channels supported in hardware.
- */
-static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- int txq_id = txq->q.id;
-
- /* Circular buffer (TFD queue in DRAM) physical base address */
- iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
- txq->q.dma_addr >> 8);
-
- return 0;
-}
-
-/**
- * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
- */
-void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
-
- if (q->n_bd == 0)
- return;
-
- while (q->write_ptr != q->read_ptr) {
- iwlagn_txq_free_tfd(priv, txq);
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-}
-
-/**
- * iwl_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- iwl_tx_queue_unmap(priv, txq_id);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd)
- dma_free_coherent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
-
- /* De-alloc array of per-TFD driver data */
- kfree(txq->txb);
- txq->txb = NULL;
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-
-/**
- * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
- */
-void iwl_cmd_queue_unmap(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct iwl_queue *q = &txq->q;
- int i;
-
- if (q->n_bd == 0)
- return;
-
- while (q->read_ptr != q->write_ptr) {
- i = get_cmd_index(q, q->read_ptr);
-
- if (txq->meta[i].flags & CMD_MAPPED) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
- PCI_DMA_BIDIRECTIONAL);
- txq->meta[i].flags = 0;
- }
-
- q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-}
-
-/**
- * iwl_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_cmd_queue_free(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- iwl_cmd_queue_unmap(priv);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i < TFD_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd)
- dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
- txq->tfds, txq->q.dma_addr);
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-
/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
* DMA services
*
@@ -394,11 +299,10 @@ int iwl_queue_space(const struct iwl_queue *q)
return s;
}
-
/**
* iwl_queue_init - Initialize queue's high/low-water and read/write indexes
*/
-static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
+int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
int count, int slots_num, u32 id)
{
q->n_bd = count;
@@ -428,127 +332,185 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
return 0;
}
-/**
- * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
- */
-static int iwl_tx_queue_alloc(struct iwl_priv *priv,
- struct iwl_tx_queue *txq, u32 id)
+/*TODO: this functions should NOT be exported from trans module - export it
+ * until the reclaim flow will be brought to the transport module too.
+ * Add a declaration to make sparse happy */
+void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+
+void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
{
- struct device *dev = &priv->pci_dev->dev;
- size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
-
- /* Driver private data, only for Tx (not command) queues,
- * not shared with device. */
- if (id != priv->cmd_queue) {
- txq->txb = kzalloc(sizeof(txq->txb[0]) *
- TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
- if (!txq->txb) {
- IWL_ERR(priv, "kmalloc for auxiliary BD "
- "structures failed\n");
- goto error;
- }
- } else {
- txq->txb = NULL;
- }
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int read_ptr = txq->q.read_ptr;
+ u8 sta_id = 0;
+ __le16 bc_ent;
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
- GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
- goto error;
- }
- txq->q.id = id;
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != priv->cmd_queue)
+ sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+}
+
+static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
+ u16 txq_id)
+{
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr = priv->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
return 0;
+}
+
+static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
+{
+ /* Simply stop the queue, but don't change any configuration;
+ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+ iwl_write_prph(priv,
+ SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+ (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
- error:
- kfree(txq->txb);
- txq->txb = NULL;
+void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
+ int txq_id, u32 index)
+{
+ iwl_write_direct32(priv, HBUS_TARG_WRPTR,
+ (index & 0xff) | (txq_id << 8));
+ iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index);
+}
+
+void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ int tx_fifo_id, int scd_retry)
+{
+ int txq_id = txq->q.id;
+ int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
- return -ENOMEM;
+ iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id),
+ (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
+ SCD_QUEUE_STTS_REG_MSK);
+
+ txq->sched_retry = scd_retry;
+
+ IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
+ active ? "Activate" : "Deactivate",
+ scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
}
-/**
- * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
- */
-int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
+void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
+ int frame_limit)
{
- int i, len;
- int ret;
+ int tx_fifo, txq_id, ssn_idx;
+ u16 ra_tid;
+ unsigned long flags;
+ struct iwl_tid_data *tid_data;
- txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num,
- GFP_KERNEL);
- txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num,
- GFP_KERNEL);
+ if (WARN_ON(sta_id == IWL_INVALID_STATION))
+ return;
+ if (WARN_ON(tid >= MAX_TID_COUNT))
+ return;
- if (!txq->meta || !txq->cmd)
- goto out_free_arrays;
+ spin_lock_irqsave(&priv->sta_lock, flags);
+ tid_data = &priv->stations[sta_id].tid[tid];
+ ssn_idx = SEQ_TO_SN(tid_data->seq_number);
+ txq_id = tid_data->agg.txq_id;
+ tx_fifo = tid_data->agg.tx_fifo;
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
- len = sizeof(struct iwl_device_cmd);
- for (i = 0; i < slots_num; i++) {
- txq->cmd[i] = kmalloc(len, GFP_KERNEL);
- if (!txq->cmd[i])
- goto err;
- }
+ ra_tid = BUILD_RAxTID(sta_id, tid);
- /* Alloc driver data array and TFD circular buffer */
- ret = iwl_tx_queue_alloc(priv, txq, txq_id);
- if (ret)
- goto err;
+ spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 0;
+ /* Stop this Tx queue before configuring it */
+ iwlagn_tx_queue_stop_scheduler(priv, txq_id);
- /*
- * For the default queues 0-3, set up the swq_id
- * already -- all others need to get one later
- * (if they need one at all).
- */
- if (txq_id < 4)
- iwl_set_swq_id(txq, txq_id, txq_id);
+ /* Map receiver-address / traffic-ID to this queue */
+ iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
- * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+ /* Set this queue as a chain-building queue */
+ iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id));
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
- if (ret)
- return ret;
+ /* enable aggregations for the queue */
+ iwl_set_bits_prph(priv, SCD_AGGR_SEL, (1<<txq_id));
- /* Tell device where to find queue */
- iwlagn_tx_queue_init(priv, txq);
+ /* Place first TFD at index corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
- return 0;
-err:
- for (i = 0; i < slots_num; i++)
- kfree(txq->cmd[i]);
-out_free_arrays:
- kfree(txq->meta);
- kfree(txq->cmd);
-
- return -ENOMEM;
+ /* Set up Tx window size and frame limit for this queue */
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
+ sizeof(u32),
+ ((frame_limit <<
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((frame_limit <<
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+
+ iwl_set_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
+
+ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+ iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
}
-void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
+int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo)
{
- int actual_slots = slots_num;
+ if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IWLAGN_FIRST_AMPDU_QUEUE +
+ priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IWL_ERR(priv,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ priv->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
- if (txq_id == priv->cmd_queue)
- actual_slots++;
+ iwlagn_tx_queue_stop_scheduler(priv, txq_id);
- memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
+ iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id));
- txq->need_update = 0;
+ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx);
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+ iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(priv, txq_id);
+ iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
- /* Tell device where to find queue */
- iwlagn_tx_queue_init(priv, txq);
+ return 0;
}
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -562,7 +524,7 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
* failed. On success, it turns the index (> 0) of command in the
* command queue.
*/
-int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
struct iwl_queue *q = &txq->q;
@@ -587,6 +549,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
return -EIO;
}
+ if ((priv->ucode_owner == IWL_OWNERSHIP_TM) &&
+ !(cmd->flags & CMD_ON_DEMAND)) {
+ IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n");
+ return -EIO;
+ }
+
copy_size = sizeof(out_cmd->hdr);
cmd_size = sizeof(out_cmd->hdr);
@@ -640,11 +608,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
out_cmd = txq->cmd[idx];
out_meta = &txq->meta[idx];
- if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return -ENOSPC;
- }
-
memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
@@ -677,9 +640,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
q->write_ptr, idx, priv->cmd_queue);
- phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
- copy_size, PCI_DMA_BIDIRECTIONAL);
- if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
+ phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
idx = -ENOMEM;
goto out;
}
@@ -699,11 +662,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
continue;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
- phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
- cmd->len[i], PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
+ phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i],
+ cmd->len[i], DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(priv->bus->dev, phys_addr)) {
iwlagn_unmap_tfd(priv, out_meta,
- &txq->tfds[q->write_ptr]);
+ &txq->tfds[q->write_ptr],
+ DMA_BIDIRECTIONAL);
idx = -ENOMEM;
goto out;
}
@@ -717,7 +681,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
#endif
}
- out_meta->flags = cmd->flags | CMD_MAPPED;
+ out_meta->flags = cmd->flags;
txq->need_update = 1;
@@ -753,9 +717,9 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
int nfreed = 0;
if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- idx, q->n_bd, q->write_ptr, q->read_ptr);
+ IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), "
+ "index %d is out of range [0-%d] %d %d.\n", __func__,
+ txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
return;
}
@@ -807,7 +771,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
- iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]);
+ iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -827,8 +791,246 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
wake_up_interruptible(&priv->wait_command_queue);
}
- /* Mark as unmapped */
meta->flags = 0;
spin_unlock_irqrestore(&priv->hcmd_lock, flags);
}
+
+const char *get_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ IWL_CMD(REPLY_ALIVE);
+ IWL_CMD(REPLY_ERROR);
+ IWL_CMD(REPLY_RXON);
+ IWL_CMD(REPLY_RXON_ASSOC);
+ IWL_CMD(REPLY_QOS_PARAM);
+ IWL_CMD(REPLY_RXON_TIMING);
+ IWL_CMD(REPLY_ADD_STA);
+ IWL_CMD(REPLY_REMOVE_STA);
+ IWL_CMD(REPLY_REMOVE_ALL_STA);
+ IWL_CMD(REPLY_TXFIFO_FLUSH);
+ IWL_CMD(REPLY_WEPKEY);
+ IWL_CMD(REPLY_TX);
+ IWL_CMD(REPLY_LEDS_CMD);
+ IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
+ IWL_CMD(COEX_PRIORITY_TABLE_CMD);
+ IWL_CMD(COEX_MEDIUM_NOTIFICATION);
+ IWL_CMD(COEX_EVENT_CMD);
+ IWL_CMD(REPLY_QUIET_CMD);
+ IWL_CMD(REPLY_CHANNEL_SWITCH);
+ IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
+ IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
+ IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
+ IWL_CMD(POWER_TABLE_CMD);
+ IWL_CMD(PM_SLEEP_NOTIFICATION);
+ IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
+ IWL_CMD(REPLY_SCAN_CMD);
+ IWL_CMD(REPLY_SCAN_ABORT_CMD);
+ IWL_CMD(SCAN_START_NOTIFICATION);
+ IWL_CMD(SCAN_RESULTS_NOTIFICATION);
+ IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
+ IWL_CMD(BEACON_NOTIFICATION);
+ IWL_CMD(REPLY_TX_BEACON);
+ IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
+ IWL_CMD(QUIET_NOTIFICATION);
+ IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
+ IWL_CMD(MEASURE_ABORT_NOTIFICATION);
+ IWL_CMD(REPLY_BT_CONFIG);
+ IWL_CMD(REPLY_STATISTICS_CMD);
+ IWL_CMD(STATISTICS_NOTIFICATION);
+ IWL_CMD(REPLY_CARD_STATE_CMD);
+ IWL_CMD(CARD_STATE_NOTIFICATION);
+ IWL_CMD(MISSED_BEACONS_NOTIFICATION);
+ IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
+ IWL_CMD(SENSITIVITY_CMD);
+ IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
+ IWL_CMD(REPLY_RX_PHY_CMD);
+ IWL_CMD(REPLY_RX_MPDU_CMD);
+ IWL_CMD(REPLY_RX);
+ IWL_CMD(REPLY_COMPRESSED_BA);
+ IWL_CMD(CALIBRATION_CFG_CMD);
+ IWL_CMD(CALIBRATION_RES_NOTIFICATION);
+ IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
+ IWL_CMD(REPLY_TX_POWER_DBM_CMD);
+ IWL_CMD(TEMPERATURE_NOTIFICATION);
+ IWL_CMD(TX_ANT_CONFIGURATION_CMD);
+ IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
+ IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
+ IWL_CMD(REPLY_BT_COEX_PROT_ENV);
+ IWL_CMD(REPLY_WIPAN_PARAMS);
+ IWL_CMD(REPLY_WIPAN_RXON);
+ IWL_CMD(REPLY_WIPAN_RXON_TIMING);
+ IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
+ IWL_CMD(REPLY_WIPAN_QOS_PARAM);
+ IWL_CMD(REPLY_WIPAN_WEPKEY);
+ IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
+ IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
+ IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
+ IWL_CMD(REPLY_WOWLAN_PATTERNS);
+ IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER);
+ IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS);
+ IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
+ IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
+ IWL_CMD(REPLY_WOWLAN_GET_STATUS);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+
+static void iwl_generic_cmd_callback(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt)
+{
+ if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
+ IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
+ get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ return;
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ switch (cmd->hdr.cmd) {
+ case REPLY_TX_LINK_QUALITY_CMD:
+ case SENSITIVITY_CMD:
+ IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
+ get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ break;
+ default:
+ IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
+ get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ }
+#endif
+}
+
+static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ int ret;
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+ return -EINVAL;
+
+ /* Assign a generic callback if one is not provided */
+ if (!cmd->callback)
+ cmd->callback = iwl_generic_cmd_callback;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EBUSY;
+
+ ret = iwl_enqueue_hcmd(priv, cmd);
+ if (ret < 0) {
+ IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+ get_cmd_string(cmd->id), ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ int cmd_idx;
+ int ret;
+
+ lockdep_assert_held(&priv->mutex);
+
+ /* A synchronous command can not have a callback set. */
+ if (WARN_ON(cmd->callback))
+ return -EINVAL;
+
+ IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
+ get_cmd_string(cmd->id));
+
+ set_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
+ get_cmd_string(cmd->id));
+
+ cmd_idx = iwl_enqueue_hcmd(priv, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
+ get_cmd_string(cmd->id), ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible_timeout(priv->wait_command_queue,
+ !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
+ IWL_ERR(priv,
+ "Error sending %s: time out after %dms.\n",
+ get_cmd_string(cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command"
+ "%s\n", get_cmd_string(cmd->id));
+ ret = -ETIMEDOUT;
+ goto cancel;
+ }
+ }
+
+ if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
+ IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
+ get_cmd_string(cmd->id));
+ ret = -ECANCELED;
+ goto fail;
+ }
+ if (test_bit(STATUS_FW_ERROR, &priv->status)) {
+ IWL_ERR(priv, "Command %s failed: FW Error\n",
+ get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto fail;
+ }
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+ IWL_ERR(priv, "Error: Response NULL in '%s'\n",
+ get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto cancel;
+ }
+
+ return 0;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
+ ~CMD_WANT_SKB;
+ }
+fail:
+ if (cmd->reply_page) {
+ iwl_free_pages(priv, cmd->reply_page);
+ cmd->reply_page = 0;
+ }
+
+ return ret;
+}
+
+int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+ if (cmd->flags & CMD_ASYNC)
+ return iwl_send_cmd_async(priv, cmd);
+
+ return iwl_send_cmd_sync(priv, cmd);
+}
+
+int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
+ const void *data)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwl_send_cmd(priv, &cmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
new file mode 100644
index 00000000000..41f0de91400
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -0,0 +1,1172 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "iwl-dev.h"
+#include "iwl-trans.h"
+#include "iwl-core.h"
+#include "iwl-helpers.h"
+#include "iwl-trans-int-pcie.h"
+/*TODO remove uneeded includes when the transport layer tx_free will be here */
+#include "iwl-agn.h"
+#include "iwl-core.h"
+
+static int iwl_trans_rx_alloc(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct device *dev = priv->bus->dev;
+
+ memset(&priv->rxq, 0, sizeof(priv->rxq));
+
+ spin_lock_init(&rxq->lock);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ if (WARN_ON(rxq->bd || rxq->rb_stts))
+ return -EINVAL;
+
+ /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+ memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
+
+ /*Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb_stts;
+ memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
+
+ return 0;
+
+err_rb_stts:
+ dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+err_bd:
+ return -ENOMEM;
+}
+
+static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ int i;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << priv->hw_params.rx_page_order,
+ DMA_FROM_DEVICE);
+ __iwl_free_pages(priv, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+}
+
+static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
+ struct iwl_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
+
+ rb_timeout = RX_RB_TIMEOUT;
+
+ if (iwlagn_mod_params.amsdu_size_8K)
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write index */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+ * the credit mechanism in 5000 HW RX FIFO
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size|
+ (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+}
+
+static int iwl_rx_init(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ int i, err;
+ unsigned long flags;
+
+ if (!rxq->bd) {
+ err = iwl_trans_rx_alloc(priv);
+ if (err)
+ return err;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ iwl_trans_rxq_free_rx_bufs(priv);
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ iwlagn_rx_replenish(priv);
+
+ iwl_trans_rx_hw_init(priv, rxq);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ rxq->need_update = 1;
+ iwl_rx_queue_update_write_ptr(priv, rxq);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static void iwl_trans_rx_free(struct iwl_priv *priv)
+{
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ unsigned long flags;
+
+ /*if rxq->bd is NULL, it means that nothing has been allocated,
+ * exit now */
+ if (!rxq->bd) {
+ IWL_DEBUG_INFO(priv, "Free NULL rx context\n");
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ iwl_trans_rxq_free_rx_bufs(priv);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ rxq->bd, rxq->bd_dma);
+ memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(priv->bus->dev,
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ else
+ IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n");
+ memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
+ rxq->rb_stts = NULL;
+}
+
+static int iwl_trans_rx_stop(struct iwl_priv *priv)
+{
+
+ /* stop Rx DMA */
+ iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+}
+
+static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr, size_t size)
+{
+ if (WARN_ON(ptr->addr))
+ return -EINVAL;
+
+ ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
+ &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
+ struct iwl_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id)
+{
+ size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+ int i;
+
+ if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
+ return -EINVAL;
+
+ txq->q.n_window = slots_num;
+
+ txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
+ GFP_KERNEL);
+ txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
+ GFP_KERNEL);
+
+ if (!txq->meta || !txq->cmd)
+ goto error;
+
+ for (i = 0; i < slots_num; i++) {
+ txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->cmd[i])
+ goto error;
+ }
+
+ /* Alloc driver data array and TFD circular buffer */
+ /* Driver private data, only for Tx (not command) queues,
+ * not shared with device. */
+ if (txq_id != priv->cmd_queue) {
+ txq->txb = kzalloc(sizeof(txq->txb[0]) *
+ TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
+ if (!txq->txb) {
+ IWL_ERR(priv, "kmalloc for auxiliary BD "
+ "structures failed\n");
+ goto error;
+ }
+ } else {
+ txq->txb = NULL;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
+ GFP_KERNEL);
+ if (!txq->tfds) {
+ IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = txq_id;
+
+ return 0;
+error:
+ kfree(txq->txb);
+ txq->txb = NULL;
+ /* since txq->cmd has been zeroed,
+ * all non allocated cmd[i] will be NULL */
+ if (txq->cmd)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->cmd[i]);
+ kfree(txq->meta);
+ kfree(txq->cmd);
+ txq->meta = NULL;
+ txq->cmd = NULL;
+
+ return -ENOMEM;
+
+}
+
+static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
+ int slots_num, u32 txq_id)
+{
+ int ret;
+
+ txq->need_update = 0;
+ memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
+
+ /*
+ * For the default queues 0-3, set up the swq_id
+ * already -- all others need to get one later
+ * (if they need one at all).
+ */
+ if (txq_id < 4)
+ iwl_set_swq_id(txq, txq_id, txq_id);
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
+ txq_id);
+ if (ret)
+ return ret;
+
+ /*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ * Circular buffer (TFD queue in DRAM) physical base address */
+ iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/**
+ * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+
+ if (!q->n_bd)
+ return;
+
+ while (q->write_ptr != q->read_ptr) {
+ /* The read_ptr needs to bound by q->n_window */
+ iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
+ q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+}
+
+/**
+ * iwl_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct device *dev = priv->bus->dev;
+ int i;
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_tx_queue_unmap(priv, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i < txq->q.n_window; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd) {
+ dma_free_coherent(dev, priv->hw_params.tfd_size *
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+ }
+
+ /* De-alloc array of per-TFD driver data */
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+
+/**
+ * iwl_trans_tx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+static void iwl_trans_tx_free(struct iwl_priv *priv)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (priv->txq) {
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ iwl_tx_queue_free(priv, txq_id);
+ }
+
+ kfree(priv->txq);
+ priv->txq = NULL;
+
+ iwlagn_free_dma_ptr(priv, &priv->kw);
+
+ iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
+}
+
+/**
+ * iwl_trans_tx_alloc - allocate TX context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param priv
+ * @return error code
+ */
+static int iwl_trans_tx_alloc(struct iwl_priv *priv)
+{
+ int ret;
+ int txq_id, slots_num;
+
+ /*It is not allowed to alloc twice, so warn when this happens.
+ * We cannot rely on the previous allocation, so free and fail */
+ if (WARN_ON(priv->txq)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
+ priv->hw_params.scd_bc_tbls_size);
+ if (ret) {
+ IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
+ goto error;
+ }
+
+ /* Alloc keep-warm buffer */
+ ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
+ if (ret) {
+ IWL_ERR(priv, "Keep Warm allocation failed\n");
+ goto error;
+ }
+
+ priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
+ priv->cfg->base_params->num_of_queues, GFP_KERNEL);
+ if (!priv->txq) {
+ IWL_ERR(priv, "Not enough memory for txq\n");
+ ret = ENOMEM;
+ goto error;
+ }
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = (txq_id == priv->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
+ txq_id);
+ if (ret) {
+ IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ trans_tx_free(&priv->trans);
+
+ return ret;
+}
+static int iwl_tx_init(struct iwl_priv *priv)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+ bool alloc = false;
+
+ if (!priv->txq) {
+ ret = iwl_trans_tx_alloc(priv);
+ if (ret)
+ goto error;
+ alloc = true;
+ }
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ iwl_write_prph(priv, SCD_TXFACT, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
+ slots_num = (txq_id == priv->cmd_queue) ?
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
+ txq_id);
+ if (ret) {
+ IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ /*Upon error, free only if we allocated something */
+ if (alloc)
+ trans_tx_free(&priv->trans);
+ return ret;
+}
+
+static void iwl_set_pwr_vmain(struct iwl_priv *priv)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+ if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
+ iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+ iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+static int iwl_nic_init(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ /* nic_init */
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_apm_init(priv);
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ iwl_set_pwr_vmain(priv);
+
+ priv->cfg->lib->nic_config(priv);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ iwl_rx_init(priv);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (iwl_tx_init(priv))
+ return -ENOMEM;
+
+ if (priv->cfg->base_params->shadow_reg_enable) {
+ /* enable shadow regs in HW */
+ iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
+ 0x800FFFFF);
+ }
+
+ set_bit(STATUS_INIT, &priv->status);
+
+ return 0;
+}
+
+#define HW_READY_TIMEOUT (50)
+
+/* Note: returns poll_bit return value, which is >= 0 if success */
+static int iwl_set_hw_ready(struct iwl_priv *priv)
+{
+ int ret;
+
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ HW_READY_TIMEOUT);
+
+ IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
+ return ret;
+}
+
+/* Note: returns standard 0/-ERROR code */
+static int iwl_trans_prepare_card_hw(struct iwl_priv *priv)
+{
+ int ret;
+
+ IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
+
+ ret = iwl_set_hw_ready(priv);
+ if (ret >= 0)
+ return 0;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+ if (ret < 0)
+ return ret;
+
+ /* HW should be ready by now, check again. */
+ ret = iwl_set_hw_ready(priv);
+ if (ret >= 0)
+ return 0;
+ return ret;
+}
+
+static int iwl_trans_start_device(struct iwl_priv *priv)
+{
+ int ret;
+
+ priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
+
+ if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
+ iwl_trans_prepare_card_hw(priv)) {
+ IWL_WARN(priv, "Exit HW not ready\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (iwl_read32(priv, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(STATUS_RF_KILL_HW, &priv->status);
+ else
+ set_bit(STATUS_RF_KILL_HW, &priv->status);
+
+ if (iwl_is_rfkill(priv)) {
+ wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
+ iwl_enable_interrupts(priv);
+ return -ERFKILL;
+ }
+
+ iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+
+ ret = iwl_nic_init(priv);
+ if (ret) {
+ IWL_ERR(priv, "Unable to init nic\n");
+ return ret;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
+ iwl_enable_interrupts(priv);
+
+ /* really make sure rfkill handshake bits are cleared */
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ return 0;
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under priv->lock and mac access
+ */
+static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
+{
+ iwl_write_prph(priv, SCD_TXFACT, mask);
+}
+
+#define IWL_AC_UNSET -1
+
+struct queue_to_fifo_ac {
+ s8 fifo, ac;
+};
+
+static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
+ { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
+ { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+ { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
+};
+
+static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
+ { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
+ { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
+ { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
+ { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
+ { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
+ { IWL_TX_FIFO_BE_IPAN, 2, },
+ { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
+};
+static void iwl_trans_tx_start(struct iwl_priv *priv)
+{
+ const struct queue_to_fifo_ac *queue_to_fifo;
+ struct iwl_rxon_context *ctx;
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
+ a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
+ /* reset conext data memory */
+ for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+ /* reset tx status memory */
+ for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
+ a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+ for (; a < priv->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
+ iwl_write_targ_mem(priv, a, 0);
+
+ iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
+ priv->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
+ iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
+ iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
+ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
+ SCD_QUEUECHAIN_SEL_ALL(priv));
+ iwl_write_prph(priv, SCD_AGGR_SEL, 0);
+
+ /* initiate the queues */
+ for (i = 0; i < priv->hw_params.max_txq_num; i++) {
+ iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
+ iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(i), 0);
+ iwl_write_targ_mem(priv, priv->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ ((SCD_WIN_SIZE <<
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((SCD_FRAME_LIMIT <<
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+ }
+
+ iwl_write_prph(priv, SCD_INTERRUPT_MASK,
+ IWL_MASK(0, priv->hw_params.max_txq_num));
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
+
+ /* map queues to FIFOs */
+ if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
+ queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
+ else
+ queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
+
+ iwl_trans_set_wr_ptrs(priv, priv->cmd_queue, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+ for_each_context(priv, ctx)
+ ctx->last_tx_rejected = false;
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
+ BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
+
+ for (i = 0; i < 10; i++) {
+ int fifo = queue_to_fifo[i].fifo;
+ int ac = queue_to_fifo[i].ac;
+
+ iwl_txq_ctx_activate(priv, i);
+
+ if (fifo == IWL_TX_FIFO_UNUSED)
+ continue;
+
+ if (ac != IWL_AC_UNSET)
+ iwl_set_swq_id(&priv->txq[i], ac, i);
+ iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Enable L1-Active */
+ iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+}
+
+/**
+ * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
+ */
+static int iwl_trans_tx_stop(struct iwl_priv *priv)
+{
+ int ch, txq_id;
+ unsigned long flags;
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ iwl_trans_txq_set_sched(priv, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+ iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+ 1000))
+ IWL_ERR(priv, "Failing on timeout while stopping"
+ " DMA channel %d [0x%08x]", ch,
+ iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (!priv->txq) {
+ IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
+ return 0;
+ }
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
+ iwl_tx_queue_unmap(priv, txq_id);
+
+ return 0;
+}
+
+static void iwl_trans_stop_device(struct iwl_priv *priv)
+{
+ unsigned long flags;
+
+ /* stop and reset the on-board processor */
+ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ iwl_disable_interrupts(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ trans_sync_irq(&priv->trans);
+
+ /* device going down, Stop using ICT table */
+ iwl_disable_ict(priv);
+
+ /*
+ * If a HW restart happens during firmware loading,
+ * then the firmware loading might call this function
+ * and later it might be called again due to the
+ * restart. So don't process again if the device is
+ * already dead.
+ */
+ if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
+ iwl_trans_tx_stop(priv);
+ iwl_trans_rx_stop(priv);
+
+ /* Power-down device's busmaster DMA clocks */
+ iwl_write_prph(priv, APMG_CLK_DIS_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+ }
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ iwl_apm_stop(priv);
+}
+
+static struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_priv *priv,
+ int txq_id)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ struct iwl_device_cmd *dev_cmd;
+
+ if (unlikely(iwl_queue_space(q) < q->high_mark))
+ return NULL;
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD index within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ dev_cmd = txq->cmd[q->write_ptr];
+ memset(dev_cmd, 0, sizeof(*dev_cmd));
+ dev_cmd->hdr.cmd = REPLY_TX;
+ dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
+ return &dev_cmd->cmd.tx;
+}
+
+static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
+ struct iwl_rxon_context *ctx)
+{
+ struct iwl_tx_queue *txq = &priv->txq[txq_id];
+ struct iwl_queue *q = &txq->q;
+ struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
+ struct iwl_cmd_meta *out_meta;
+
+ dma_addr_t phys_addr = 0;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u8 wait_write_ptr = 0;
+ u8 hdr_len = ieee80211_hdrlen(fc);
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
+ txq->txb[q->write_ptr].skb = skb;
+ txq->txb[q->write_ptr].ctx = ctx;
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->meta[q->write_ptr];
+
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct iwl_tx_cmd) +
+ sizeof(struct iwl_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys = dma_map_single(priv->bus->dev,
+ &dev_cmd->hdr, firstlen,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
+ return -1;
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+
+ if (!ieee80211_has_morefrags(fc)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
+ secondlen, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
+ dma_unmap_single(priv->bus->dev,
+ dma_unmap_addr(out_meta, mapping),
+ dma_unmap_len(out_meta, len),
+ DMA_BIDIRECTIONAL);
+ return -1;
+ }
+ }
+
+ /* Attach buffers to TFD */
+ iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
+ if (secondlen > 0)
+ iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
+ secondlen, 0);
+
+ scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
+ offsetof(struct iwl_tx_cmd, scratch);
+
+ /* take back ownership of DMA buffer to enable update */
+ dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+ IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
+ le16_to_cpu(dev_cmd->hdr.sequence));
+ IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (ampdu)
+ iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
+ le16_to_cpu(tx_cmd->len));
+
+ dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
+ DMA_BIDIRECTIONAL);
+
+ trace_iwlwifi_dev_tx(priv,
+ &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &dev_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
+ iwl_txq_update_write_ptr(priv, txq);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+ if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
+ if (wait_write_ptr) {
+ txq->need_update = 1;
+ iwl_txq_update_write_ptr(priv, txq);
+ } else {
+ iwl_stop_queue(priv, txq);
+ }
+ }
+ return 0;
+}
+
+static void iwl_trans_kick_nic(struct iwl_priv *priv)
+{
+ /* Remove all resets to allow NIC to operate */
+ iwl_write32(priv, CSR_RESET, 0);
+}
+
+static void iwl_trans_sync_irq(struct iwl_priv *priv)
+{
+ /* wait to make sure we flush pending tasklet*/
+ synchronize_irq(priv->bus->irq);
+ tasklet_kill(&priv->irq_tasklet);
+}
+
+static void iwl_trans_free(struct iwl_priv *priv)
+{
+ free_irq(priv->bus->irq, priv);
+ iwl_free_isr_ict(priv);
+}
+
+static const struct iwl_trans_ops trans_ops = {
+ .start_device = iwl_trans_start_device,
+ .prepare_card_hw = iwl_trans_prepare_card_hw,
+ .stop_device = iwl_trans_stop_device,
+
+ .tx_start = iwl_trans_tx_start,
+
+ .rx_free = iwl_trans_rx_free,
+ .tx_free = iwl_trans_tx_free,
+
+ .send_cmd = iwl_send_cmd,
+ .send_cmd_pdu = iwl_send_cmd_pdu,
+
+ .get_tx_cmd = iwl_trans_get_tx_cmd,
+ .tx = iwl_trans_tx,
+
+ .txq_agg_disable = iwl_trans_txq_agg_disable,
+ .txq_agg_setup = iwl_trans_txq_agg_setup,
+
+ .kick_nic = iwl_trans_kick_nic,
+
+ .sync_irq = iwl_trans_sync_irq,
+ .free = iwl_trans_free,
+};
+
+int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv)
+{
+ int err;
+
+ priv->trans.ops = &trans_ops;
+ priv->trans.priv = priv;
+
+ tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+ iwl_irq_tasklet, (unsigned long)priv);
+
+ iwl_alloc_isr_ict(priv);
+
+ err = request_irq(priv->bus->irq, iwl_isr_ict, IRQF_SHARED,
+ DRV_NAME, priv);
+ if (err) {
+ IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq);
+ iwl_free_isr_ict(priv);
+ return err;
+ }
+
+ INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
new file mode 100644
index 00000000000..7993aa7ae66
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -0,0 +1,225 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_trans_h__
+#define __iwl_trans_h__
+
+ /*This file includes the declaration that are exported from the transport
+ * layer */
+
+struct iwl_priv;
+struct iwl_rxon_context;
+struct iwl_host_cmd;
+
+/**
+ * struct iwl_trans_ops - transport specific operations
+ * @start_device: allocates and inits all the resources for the transport
+ * layer.
+ * @prepare_card_hw: claim the ownership on the HW. Will be called during
+ * probe.
+ * @tx_start: starts and configures all the Tx fifo - usually done once the fw
+ * is alive.
+ * @stop_device:stops the whole device (embedded CPU put to reset)
+ * @rx_free: frees the rx memory
+ * @tx_free: frees the tx memory
+ * @send_cmd:send a host command
+ * @send_cmd_pdu:send a host command: flags can be CMD_*
+ * @get_tx_cmd: returns a pointer to a new Tx cmd for the upper layer use
+ * @tx: send an skb
+ * @txq_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
+ * ready and a successful ADDBA response has been received.
+ * @txq_agg_disable: de-configure a Tx queue to send AMPDUs
+ * @kick_nic: remove the RESET from the embedded CPU and let it run
+ * @sync_irq: the upper layer will typically disable interrupt and call this
+ * handler. After this handler returns, it is guaranteed that all
+ * the ISR / tasklet etc... have finished running and the transport
+ * layer shall not pass any Rx.
+ * @free: release all the ressource for the transport layer itself such as
+ * irq, tasklet etc...
+ */
+struct iwl_trans_ops {
+
+ int (*start_device)(struct iwl_priv *priv);
+ int (*prepare_card_hw)(struct iwl_priv *priv);
+ void (*stop_device)(struct iwl_priv *priv);
+ void (*tx_start)(struct iwl_priv *priv);
+ void (*tx_free)(struct iwl_priv *priv);
+ void (*rx_free)(struct iwl_priv *priv);
+
+ int (*send_cmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+
+ int (*send_cmd_pdu)(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
+ const void *data);
+ struct iwl_tx_cmd * (*get_tx_cmd)(struct iwl_priv *priv, int txq_id);
+ int (*tx)(struct iwl_priv *priv, struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
+ struct iwl_rxon_context *ctx);
+
+ int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo);
+ void (*txq_agg_setup)(struct iwl_priv *priv, int sta_id, int tid,
+ int frame_limit);
+
+ void (*kick_nic)(struct iwl_priv *priv);
+
+ void (*sync_irq)(struct iwl_priv *priv);
+ void (*free)(struct iwl_priv *priv);
+};
+
+struct iwl_trans {
+ const struct iwl_trans_ops *ops;
+ struct iwl_priv *priv;
+};
+
+static inline int trans_start_device(struct iwl_trans *trans)
+{
+ return trans->ops->start_device(trans->priv);
+}
+
+static inline int trans_prepare_card_hw(struct iwl_trans *trans)
+{
+ return trans->ops->prepare_card_hw(trans->priv);
+}
+
+static inline void trans_stop_device(struct iwl_trans *trans)
+{
+ trans->ops->stop_device(trans->priv);
+}
+
+static inline void trans_tx_start(struct iwl_trans *trans)
+{
+ trans->ops->tx_start(trans->priv);
+}
+
+static inline void trans_rx_free(struct iwl_trans *trans)
+{
+ trans->ops->rx_free(trans->priv);
+}
+
+static inline void trans_tx_free(struct iwl_trans *trans)
+{
+ trans->ops->tx_free(trans->priv);
+}
+
+static inline int trans_send_cmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ return trans->ops->send_cmd(trans->priv, cmd);
+}
+
+static inline int trans_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
+ u16 len, const void *data)
+{
+ return trans->ops->send_cmd_pdu(trans->priv, id, flags, len, data);
+}
+
+static inline struct iwl_tx_cmd *trans_get_tx_cmd(struct iwl_trans *trans,
+ int txq_id)
+{
+ return trans->ops->get_tx_cmd(trans->priv, txq_id);
+}
+
+static inline int trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
+ struct iwl_rxon_context *ctx)
+{
+ return trans->ops->tx(trans->priv, skb, tx_cmd, txq_id, fc, ampdu, ctx);
+}
+
+static inline int trans_txq_agg_disable(struct iwl_trans *trans, u16 txq_id,
+ u16 ssn_idx, u8 tx_fifo)
+{
+ return trans->ops->txq_agg_disable(trans->priv, txq_id,
+ ssn_idx, tx_fifo);
+}
+
+static inline void trans_txq_agg_setup(struct iwl_trans *trans, int sta_id,
+ int tid, int frame_limit)
+{
+ trans->ops->txq_agg_setup(trans->priv, sta_id, tid, frame_limit);
+}
+
+static inline void trans_kick_nic(struct iwl_trans *trans)
+{
+ trans->ops->kick_nic(trans->priv);
+}
+
+static inline void trans_sync_irq(struct iwl_trans *trans)
+{
+ trans->ops->sync_irq(trans->priv);
+}
+
+static inline void trans_free(struct iwl_trans *trans)
+{
+ trans->ops->free(trans->priv);
+}
+
+int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv);
+
+/*TODO: this functions should NOT be exported from trans module - export it
+ * until the reclaim flow will be brought to the transport module too */
+
+struct iwl_tx_queue;
+void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq);
+
+#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
index 49067092d33..6f1afe6bbc8 100644
--- a/drivers/net/wireless/iwmc3200wifi/fw.c
+++ b/drivers/net/wireless/iwmc3200wifi/fw.c
@@ -187,7 +187,7 @@ static int iwm_load_img(struct iwm_priv *iwm, const char *img_name)
if (ret < 0)
goto err_release_fw;
opcode_idx++;
- };
+ }
/* Read firmware version */
fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_SW_VER, 0);
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 5d637af2d7c..b456a53b64b 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/hardirq.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 71c8f3fccfa..dbd24a4607e 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -3,6 +3,7 @@
* It prepares command and sends it to firmware when it is ready.
*/
+#include <linux/hardirq.h>
#include <linux/kfifo.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -873,6 +874,7 @@ int lbs_get_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 *value)
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_GET);
+ cmd.offset = cpu_to_le16(offset);
if (reg != CMD_MAC_REG_ACCESS &&
reg != CMD_BBP_REG_ACCESS &&
@@ -882,7 +884,7 @@ int lbs_get_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 *value)
}
ret = lbs_cmd_with_response(priv, reg, &cmd);
- if (ret) {
+ if (!ret) {
if (reg == CMD_BBP_REG_ACCESS || reg == CMD_RF_REG_ACCESS)
*value = cmd.value.bbp_rf;
else if (reg == CMD_MAC_REG_ACCESS)
@@ -915,6 +917,7 @@ int lbs_set_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 value)
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.size = cpu_to_le16(sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
+ cmd.offset = cpu_to_le16(offset);
if (reg == CMD_BBP_REG_ACCESS || reg == CMD_RF_REG_ACCESS)
cmd.value.bbp_rf = (u8) (value & 0xFF);
@@ -1067,16 +1070,34 @@ static void lbs_cleanup_and_insert_cmd(struct lbs_private *priv,
spin_unlock_irqrestore(&priv->driver_lock, flags);
}
-void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
- int result)
+void __lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
+ int result)
{
+ /*
+ * Normally, commands are removed from cmdpendingq before being
+ * submitted. However, we can arrive here on alternative codepaths
+ * where the command is still pending. Make sure the command really
+ * isn't part of a list at this point.
+ */
+ list_del_init(&cmd->list);
+
cmd->result = result;
cmd->cmdwaitqwoken = 1;
- wake_up_interruptible(&cmd->cmdwait_q);
+ wake_up(&cmd->cmdwait_q);
if (!cmd->callback || cmd->callback == lbs_cmd_async_callback)
__lbs_cleanup_and_insert_cmd(priv, cmd);
priv->cur_cmd = NULL;
+ wake_up_interruptible(&priv->waitq);
+}
+
+void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
+ int result)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&priv->driver_lock, flags);
+ __lbs_complete_command(priv, cmd, result);
+ spin_unlock_irqrestore(&priv->driver_lock, flags);
}
int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
@@ -1248,7 +1269,7 @@ static struct cmd_ctrl_node *lbs_get_free_cmd_node(struct lbs_private *priv)
if (!list_empty(&priv->cmdfreeq)) {
tempnode = list_first_entry(&priv->cmdfreeq,
struct cmd_ctrl_node, list);
- list_del(&tempnode->list);
+ list_del_init(&tempnode->list);
} else {
lbs_deb_host("GET_CMD_NODE: cmd_ctrl_node is not available\n");
tempnode = NULL;
@@ -1356,10 +1377,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) {
lbs_deb_host(
"EXEC_NEXT_CMD: ignore ENTER_PS cmd\n");
- spin_lock_irqsave(&priv->driver_lock, flags);
- list_del(&cmdnode->list);
lbs_complete_command(priv, cmdnode, 0);
- spin_unlock_irqrestore(&priv->driver_lock, flags);
ret = 0;
goto done;
@@ -1369,10 +1387,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
(priv->psstate == PS_STATE_PRE_SLEEP)) {
lbs_deb_host(
"EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n");
- spin_lock_irqsave(&priv->driver_lock, flags);
- list_del(&cmdnode->list);
lbs_complete_command(priv, cmdnode, 0);
- spin_unlock_irqrestore(&priv->driver_lock, flags);
priv->needtowakeup = 1;
ret = 0;
@@ -1384,7 +1399,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
}
}
spin_lock_irqsave(&priv->driver_lock, flags);
- list_del(&cmdnode->list);
+ list_del_init(&cmdnode->list);
spin_unlock_irqrestore(&priv->driver_lock, flags);
lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n",
le16_to_cpu(cmd->command));
@@ -1667,7 +1682,13 @@ int __lbs_cmd(struct lbs_private *priv, uint16_t command,
}
might_sleep();
- wait_event_interruptible(cmdnode->cmdwait_q, cmdnode->cmdwaitqwoken);
+
+ /*
+ * Be careful with signals here. A signal may be received as the system
+ * goes into suspend or resume. We do not want this to interrupt the
+ * command, so we perform an uninterruptible sleep.
+ */
+ wait_event(cmdnode->cmdwait_q, cmdnode->cmdwaitqwoken);
spin_lock_irqsave(&priv->driver_lock, flags);
ret = cmdnode->result;
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 7109d6b717e..b280ef7a0ae 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -59,6 +59,8 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv);
int lbs_free_cmd_buffer(struct lbs_private *priv);
int lbs_execute_next_command(struct lbs_private *priv);
+void __lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
+ int result);
void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
int result);
int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 207fc361db8..178b222b3ce 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -3,6 +3,7 @@
* responses as well as events generated by firmware.
*/
+#include <linux/hardirq.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/sched.h>
@@ -165,7 +166,7 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
lbs_deb_host("CMD_RESP: PS action 0x%X\n", action);
}
- lbs_complete_command(priv, priv->cur_cmd, result);
+ __lbs_complete_command(priv, priv->cur_cmd, result);
spin_unlock_irqrestore(&priv->driver_lock, flags);
ret = 0;
@@ -186,7 +187,7 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
break;
}
- lbs_complete_command(priv, priv->cur_cmd, result);
+ __lbs_complete_command(priv, priv->cur_cmd, result);
spin_unlock_irqrestore(&priv->driver_lock, flags);
ret = -1;
@@ -204,7 +205,7 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
if (priv->cur_cmd) {
/* Clean up and Put current command back to cmdfreeq */
- lbs_complete_command(priv, priv->cur_cmd, result);
+ __lbs_complete_command(priv, priv->cur_cmd, result);
}
spin_unlock_irqrestore(&priv->driver_lock, flags);
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 23250f62176..1af18277884 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -1,6 +1,7 @@
#include <linux/dcache.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/hardirq.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 76d018beebf..adb3490e3cf 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -44,9 +44,7 @@ struct lbs_private {
/* Mesh */
struct net_device *mesh_dev; /* Virtual device */
#ifdef CONFIG_LIBERTAS_MESH
- u32 mesh_connect_status;
struct lbs_mesh_stats mstats;
- int mesh_open;
uint16_t mesh_tlv;
u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 mesh_ssid_len;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 29dbce4a9f8..4dfb3bfd2cf 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -1,3 +1,4 @@
+#include <linux/hardirq.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 224e9853c48..387786e1b39 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -892,6 +892,37 @@ static int if_sdio_reset_deep_sleep_wakeup(struct lbs_private *priv)
}
+static struct mmc_host *reset_host;
+
+static void if_sdio_reset_card_worker(struct work_struct *work)
+{
+ /*
+ * The actual reset operation must be run outside of lbs_thread. This
+ * is because mmc_remove_host() will cause the device to be instantly
+ * destroyed, and the libertas driver then needs to end lbs_thread,
+ * leading to a deadlock.
+ *
+ * We run it in a workqueue totally independent from the if_sdio_card
+ * instance for that reason.
+ */
+
+ pr_info("Resetting card...");
+ mmc_remove_host(reset_host);
+ mmc_add_host(reset_host);
+}
+static DECLARE_WORK(card_reset_work, if_sdio_reset_card_worker);
+
+static void if_sdio_reset_card(struct lbs_private *priv)
+{
+ struct if_sdio_card *card = priv->card;
+
+ if (work_pending(&card_reset_work))
+ return;
+
+ reset_host = card->func->card->host;
+ schedule_work(&card_reset_work);
+}
+
/*******************************************************************/
/* SDIO callbacks */
/*******************************************************************/
@@ -1065,6 +1096,7 @@ static int if_sdio_probe(struct sdio_func *func,
priv->enter_deep_sleep = if_sdio_enter_deep_sleep;
priv->exit_deep_sleep = if_sdio_exit_deep_sleep;
priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup;
+ priv->reset_card = if_sdio_reset_card;
sdio_claim_host(func);
@@ -1301,6 +1333,8 @@ static void __exit if_sdio_exit_module(void)
/* Set the flag as user is removing this module. */
user_rmmod = 1;
+ cancel_work_sync(&card_reset_work);
+
sdio_unregister_driver(&if_sdio_driver);
lbs_deb_leave(LBS_DEB_SDIO);
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 463352c890d..e0286cfbc91 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -19,6 +19,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
@@ -1032,7 +1034,6 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
static int if_spi_init_card(struct if_spi_card *card)
{
struct lbs_private *priv = card->priv;
- struct spi_device *spi = card->spi;
int err, i;
u32 scratch;
const struct firmware *helper = NULL;
@@ -1080,8 +1081,9 @@ static int if_spi_init_card(struct if_spi_card *card)
"attached to SPI bus_num %d, chip_select %d. "
"spi->max_speed_hz=%d\n",
card->card_id, card->card_rev,
- spi->master->bus_num, spi->chip_select,
- spi->max_speed_hz);
+ card->spi->master->bus_num,
+ card->spi->chip_select,
+ card->spi->max_speed_hz);
err = if_spi_prog_helper_firmware(card, helper);
if (err)
goto out;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 8c40949cb07..94652c5a25d 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -9,6 +9,7 @@
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
+#include <linux/hardirq.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/kthread.h>
@@ -511,7 +512,7 @@ static int lbs_thread(void *data)
if (priv->connect_status == LBS_CONNECTED)
netif_wake_queue(priv->dev);
if (priv->mesh_dev &&
- lbs_mesh_connected(priv))
+ netif_running(priv->mesh_dev))
netif_wake_queue(priv->mesh_dev);
}
}
@@ -638,6 +639,14 @@ static void lbs_cmd_timeout_handler(unsigned long data)
le16_to_cpu(priv->cur_cmd->cmdbuf->command));
priv->cmd_timed_out = 1;
+
+ /*
+ * If the device didn't even acknowledge the command, reset the state
+ * so that we don't block all future commands due to this one timeout.
+ */
+ if (priv->dnld_sent == DNLD_CMD_SENT)
+ priv->dnld_sent = DNLD_RES_RECEIVED;
+
wake_up_interruptible(&priv->waitq);
out:
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -994,7 +1003,7 @@ void lbs_stop_card(struct lbs_private *priv)
list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
cmdnode->result = -ENOENT;
cmdnode->cmdwaitqwoken = 1;
- wake_up_interruptible(&cmdnode->cmdwait_q);
+ wake_up(&cmdnode->cmdwait_q);
}
/* Flush the command the card is currently processing */
@@ -1002,7 +1011,7 @@ void lbs_stop_card(struct lbs_private *priv)
lbs_deb_main("clearing current command\n");
priv->cur_cmd->result = -ENOENT;
priv->cur_cmd->cmdwaitqwoken = 1;
- wake_up_interruptible(&priv->cur_cmd->cmdwait_q);
+ wake_up(&priv->cur_cmd->cmdwait_q);
}
lbs_deb_main("done clearing commands\n");
spin_unlock_irqrestore(&priv->driver_lock, flags);
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 24cf06680c6..be72c08ea2a 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -2,6 +2,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
+#include <linux/hardirq.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
@@ -14,6 +15,121 @@
#include "cmd.h"
+static int lbs_add_mesh(struct lbs_private *priv);
+
+/***************************************************************************
+ * Mesh command handling
+ */
+
+static int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
+ struct cmd_ds_mesh_access *cmd)
+{
+ int ret;
+
+ lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
+
+ cmd->hdr.command = cpu_to_le16(CMD_MESH_ACCESS);
+ cmd->hdr.size = cpu_to_le16(sizeof(*cmd));
+ cmd->hdr.result = 0;
+
+ cmd->action = cpu_to_le16(cmd_action);
+
+ ret = lbs_cmd_with_response(priv, CMD_MESH_ACCESS, cmd);
+
+ lbs_deb_leave(LBS_DEB_CMD);
+ return ret;
+}
+
+static int __lbs_mesh_config_send(struct lbs_private *priv,
+ struct cmd_ds_mesh_config *cmd,
+ uint16_t action, uint16_t type)
+{
+ int ret;
+ u16 command = CMD_MESH_CONFIG_OLD;
+
+ lbs_deb_enter(LBS_DEB_CMD);
+
+ /*
+ * Command id is 0xac for v10 FW along with mesh interface
+ * id in bits 14-13-12.
+ */
+ if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
+ command = CMD_MESH_CONFIG |
+ (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
+
+ cmd->hdr.command = cpu_to_le16(command);
+ cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_mesh_config));
+ cmd->hdr.result = 0;
+
+ cmd->type = cpu_to_le16(type);
+ cmd->action = cpu_to_le16(action);
+
+ ret = lbs_cmd_with_response(priv, command, cmd);
+
+ lbs_deb_leave(LBS_DEB_CMD);
+ return ret;
+}
+
+static int lbs_mesh_config_send(struct lbs_private *priv,
+ struct cmd_ds_mesh_config *cmd,
+ uint16_t action, uint16_t type)
+{
+ int ret;
+
+ if (!(priv->fwcapinfo & FW_CAPINFO_PERSISTENT_CONFIG))
+ return -EOPNOTSUPP;
+
+ ret = __lbs_mesh_config_send(priv, cmd, action, type);
+ return ret;
+}
+
+/* This function is the CMD_MESH_CONFIG legacy function. It only handles the
+ * START and STOP actions. The extended actions supported by CMD_MESH_CONFIG
+ * are all handled by preparing a struct cmd_ds_mesh_config and passing it to
+ * lbs_mesh_config_send.
+ */
+static int lbs_mesh_config(struct lbs_private *priv, uint16_t action,
+ uint16_t chan)
+{
+ struct cmd_ds_mesh_config cmd;
+ struct mrvl_meshie *ie;
+ DECLARE_SSID_BUF(ssid);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.channel = cpu_to_le16(chan);
+ ie = (struct mrvl_meshie *)cmd.data;
+
+ switch (action) {
+ case CMD_ACT_MESH_CONFIG_START:
+ ie->id = WLAN_EID_GENERIC;
+ ie->val.oui[0] = 0x00;
+ ie->val.oui[1] = 0x50;
+ ie->val.oui[2] = 0x43;
+ ie->val.type = MARVELL_MESH_IE_TYPE;
+ ie->val.subtype = MARVELL_MESH_IE_SUBTYPE;
+ ie->val.version = MARVELL_MESH_IE_VERSION;
+ ie->val.active_protocol_id = MARVELL_MESH_PROTO_ID_HWMP;
+ ie->val.active_metric_id = MARVELL_MESH_METRIC_ID;
+ ie->val.mesh_capability = MARVELL_MESH_CAPABILITY;
+ ie->val.mesh_id_len = priv->mesh_ssid_len;
+ memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len);
+ ie->len = sizeof(struct mrvl_meshie_val) -
+ IEEE80211_MAX_SSID_LEN + priv->mesh_ssid_len;
+ cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val));
+ break;
+ case CMD_ACT_MESH_CONFIG_STOP:
+ break;
+ default:
+ return -1;
+ }
+ lbs_deb_cmd("mesh config action %d type %x channel %d SSID %s\n",
+ action, priv->mesh_tlv, chan,
+ print_ssid(ssid, priv->mesh_ssid, priv->mesh_ssid_len));
+
+ return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
+}
+
+
/***************************************************************************
* Mesh sysfs support
*/
@@ -154,17 +270,11 @@ static ssize_t lbs_mesh_set(struct device *dev,
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
int enable;
- int ret, action = CMD_ACT_MESH_CONFIG_STOP;
sscanf(buf, "%x", &enable);
enable = !!enable;
if (enable == !!priv->mesh_dev)
return count;
- if (enable)
- action = CMD_ACT_MESH_CONFIG_START;
- ret = lbs_mesh_config(priv, action, priv->channel);
- if (ret)
- return ret;
if (enable)
lbs_add_mesh(priv);
@@ -199,582 +309,11 @@ static struct attribute *lbs_mesh_sysfs_entries[] = {
NULL,
};
-static struct attribute_group lbs_mesh_attr_group = {
+static const struct attribute_group lbs_mesh_attr_group = {
.attrs = lbs_mesh_sysfs_entries,
};
-
-/***************************************************************************
- * Initializing and starting, stopping mesh
- */
-
-/*
- * Check mesh FW version and appropriately send the mesh start
- * command
- */
-int lbs_init_mesh(struct lbs_private *priv)
-{
- struct net_device *dev = priv->dev;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_MESH);
-
- priv->mesh_connect_status = LBS_DISCONNECTED;
-
- /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
- /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
- /* 5.110.22 have mesh command with 0xa3 command id */
- /* 10.0.0.p0 FW brings in mesh config command with different id */
- /* Check FW version MSB and initialize mesh_fw_ver */
- if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) {
- /* Enable mesh, if supported, and work out which TLV it uses.
- 0x100 + 291 is an unofficial value used in 5.110.20.pXX
- 0x100 + 37 is the official value used in 5.110.21.pXX
- but we check them in that order because 20.pXX doesn't
- give an error -- it just silently fails. */
-
- /* 5.110.20.pXX firmware will fail the command if the channel
- doesn't match the existing channel. But only if the TLV
- is correct. If the channel is wrong, _BOTH_ versions will
- give an error to 0x100+291, and allow 0x100+37 to succeed.
- It's just that 5.110.20.pXX will not have done anything
- useful */
-
- priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID;
- if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
- priv->channel)) {
- priv->mesh_tlv = TLV_TYPE_MESH_ID;
- if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
- priv->channel))
- priv->mesh_tlv = 0;
- }
- } else
- if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
- (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) {
- /* 10.0.0.pXX new firmwares should succeed with TLV
- * 0x100+37; Do not invoke command with old TLV.
- */
- priv->mesh_tlv = TLV_TYPE_MESH_ID;
- if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
- priv->channel))
- priv->mesh_tlv = 0;
- }
-
-
- if (priv->mesh_tlv) {
- sprintf(priv->mesh_ssid, "mesh");
- priv->mesh_ssid_len = 4;
-
- lbs_add_mesh(priv);
-
- if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
- netdev_err(dev, "cannot register lbs_mesh attribute\n");
-
- ret = 1;
- }
-
- lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
- return ret;
-}
-
-
-int lbs_deinit_mesh(struct lbs_private *priv)
-{
- struct net_device *dev = priv->dev;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_MESH);
-
- if (priv->mesh_tlv) {
- device_remove_file(&dev->dev, &dev_attr_lbs_mesh);
- ret = 1;
- }
-
- lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
- return ret;
-}
-
-
-/**
- * lbs_mesh_stop - close the mshX interface
- *
- * @dev: A pointer to &net_device structure
- * returns: 0
- */
-static int lbs_mesh_stop(struct net_device *dev)
-{
- struct lbs_private *priv = dev->ml_priv;
-
- lbs_deb_enter(LBS_DEB_MESH);
- spin_lock_irq(&priv->driver_lock);
-
- priv->mesh_open = 0;
- priv->mesh_connect_status = LBS_DISCONNECTED;
-
- netif_stop_queue(dev);
- netif_carrier_off(dev);
-
- spin_unlock_irq(&priv->driver_lock);
-
- schedule_work(&priv->mcast_work);
-
- lbs_deb_leave(LBS_DEB_MESH);
- return 0;
-}
-
-/**
- * lbs_mesh_dev_open - open the mshX interface
- *
- * @dev: A pointer to &net_device structure
- * returns: 0 or -EBUSY if monitor mode active
- */
-static int lbs_mesh_dev_open(struct net_device *dev)
-{
- struct lbs_private *priv = dev->ml_priv;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_NET);
-
- spin_lock_irq(&priv->driver_lock);
-
- if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
- ret = -EBUSY;
- goto out;
- }
-
- priv->mesh_open = 1;
- priv->mesh_connect_status = LBS_CONNECTED;
- netif_carrier_on(dev);
-
- if (!priv->tx_pending_len)
- netif_wake_queue(dev);
- out:
-
- spin_unlock_irq(&priv->driver_lock);
- lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
- return ret;
-}
-
-static const struct net_device_ops mesh_netdev_ops = {
- .ndo_open = lbs_mesh_dev_open,
- .ndo_stop = lbs_mesh_stop,
- .ndo_start_xmit = lbs_hard_start_xmit,
- .ndo_set_mac_address = lbs_set_mac_address,
- .ndo_set_multicast_list = lbs_set_multicast_list,
-};
-
-/**
- * lbs_add_mesh - add mshX interface
- *
- * @priv: A pointer to the &struct lbs_private structure
- * returns: 0 if successful, -X otherwise
- */
-int lbs_add_mesh(struct lbs_private *priv)
-{
- struct net_device *mesh_dev = NULL;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_MESH);
-
- /* Allocate a virtual mesh device */
- mesh_dev = alloc_netdev(0, "msh%d", ether_setup);
- if (!mesh_dev) {
- lbs_deb_mesh("init mshX device failed\n");
- ret = -ENOMEM;
- goto done;
- }
- mesh_dev->ml_priv = priv;
- priv->mesh_dev = mesh_dev;
-
- mesh_dev->netdev_ops = &mesh_netdev_ops;
- mesh_dev->ethtool_ops = &lbs_ethtool_ops;
- memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
-
- SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
-
- mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- /* Register virtual mesh interface */
- ret = register_netdev(mesh_dev);
- if (ret) {
- pr_err("cannot register mshX virtual interface\n");
- goto err_free;
- }
-
- ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- if (ret)
- goto err_unregister;
-
- lbs_persist_config_init(mesh_dev);
-
- /* Everything successful */
- ret = 0;
- goto done;
-
-err_unregister:
- unregister_netdev(mesh_dev);
-
-err_free:
- free_netdev(mesh_dev);
-
-done:
- lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
- return ret;
-}
-
-void lbs_remove_mesh(struct lbs_private *priv)
-{
- struct net_device *mesh_dev;
-
- mesh_dev = priv->mesh_dev;
- if (!mesh_dev)
- return;
-
- lbs_deb_enter(LBS_DEB_MESH);
- netif_stop_queue(mesh_dev);
- netif_carrier_off(mesh_dev);
- sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- lbs_persist_config_remove(mesh_dev);
- unregister_netdev(mesh_dev);
- priv->mesh_dev = NULL;
- free_netdev(mesh_dev);
- lbs_deb_leave(LBS_DEB_MESH);
-}
-
-
-
-/***************************************************************************
- * Sending and receiving
- */
-struct net_device *lbs_mesh_set_dev(struct lbs_private *priv,
- struct net_device *dev, struct rxpd *rxpd)
-{
- if (priv->mesh_dev) {
- if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) {
- if (rxpd->rx_control & RxPD_MESH_FRAME)
- dev = priv->mesh_dev;
- } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) {
- if (rxpd->u.bss.bss_num == MESH_IFACE_ID)
- dev = priv->mesh_dev;
- }
- }
- return dev;
-}
-
-
-void lbs_mesh_set_txpd(struct lbs_private *priv,
- struct net_device *dev, struct txpd *txpd)
-{
- if (dev == priv->mesh_dev) {
- if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID)
- txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
- else if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
- txpd->u.bss.bss_num = MESH_IFACE_ID;
- }
-}
-
-
-/***************************************************************************
- * Mesh command handling
- */
-
-/**
- * lbs_mesh_bt_add_del - Add or delete Mesh Blinding Table entries
- *
- * @priv: A pointer to &struct lbs_private structure
- * @add: TRUE to add the entry, FALSE to delete it
- * @addr1: Destination address to blind or unblind
- *
- * returns: 0 on success, error on failure
- */
-int lbs_mesh_bt_add_del(struct lbs_private *priv, bool add, u8 *addr1)
-{
- struct cmd_ds_bt_access cmd;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- BUG_ON(addr1 == NULL);
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- memcpy(cmd.addr1, addr1, ETH_ALEN);
- if (add) {
- cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_ADD);
- lbs_deb_hex(LBS_DEB_MESH, "BT_ADD: blinded MAC addr",
- addr1, ETH_ALEN);
- } else {
- cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_DEL);
- lbs_deb_hex(LBS_DEB_MESH, "BT_DEL: blinded MAC addr",
- addr1, ETH_ALEN);
- }
-
- ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
-
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return ret;
-}
-
-/**
- * lbs_mesh_bt_reset - Reset/clear the mesh blinding table
- *
- * @priv: A pointer to &struct lbs_private structure
- *
- * returns: 0 on success, error on failure
- */
-int lbs_mesh_bt_reset(struct lbs_private *priv)
-{
- struct cmd_ds_bt_access cmd;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_RESET);
-
- ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
-
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return ret;
-}
-
-/**
- * lbs_mesh_bt_get_inverted - Gets the inverted status of the mesh
- * blinding table
- *
- * Normally the firmware "blinds" or ignores traffic from mesh nodes in the
- * table, but an inverted table allows *only* traffic from nodes listed in
- * the table.
- *
- * @priv: A pointer to &struct lbs_private structure
- * @inverted: On success, TRUE if the blinding table is inverted,
- * FALSE if it is not inverted
- *
- * returns: 0 on success, error on failure
- */
-int lbs_mesh_bt_get_inverted(struct lbs_private *priv, bool *inverted)
-{
- struct cmd_ds_bt_access cmd;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- BUG_ON(inverted == NULL);
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_GET_INVERT);
-
- ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
- if (ret == 0)
- *inverted = !!cmd.id;
-
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return ret;
-}
-
-/**
- * lbs_mesh_bt_set_inverted - Sets the inverted status of the mesh
- * blinding table
- *
- * Normally the firmware "blinds" or ignores traffic from mesh nodes in the
- * table, but an inverted table allows *only* traffic from nodes listed in
- * the table.
- *
- * @priv: A pointer to &struct lbs_private structure
- * @inverted: TRUE to invert the blinding table (only traffic from
- * listed nodes allowed), FALSE to return it
- * to normal state (listed nodes ignored)
- *
- * returns: 0 on success, error on failure
- */
-int lbs_mesh_bt_set_inverted(struct lbs_private *priv, bool inverted)
-{
- struct cmd_ds_bt_access cmd;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_SET_INVERT);
- cmd.id = cpu_to_le32(!!inverted);
-
- ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
-
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return ret;
-}
-
-/**
- * lbs_mesh_bt_get_entry - List an entry in the mesh blinding table
- *
- * @priv: A pointer to &struct lbs_private structure
- * @id: The ID of the entry to list
- * @addr1: MAC address associated with the table entry
- *
- * returns: 0 on success, error on failure
- */
-int lbs_mesh_bt_get_entry(struct lbs_private *priv, u32 id, u8 *addr1)
-{
- struct cmd_ds_bt_access cmd;
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- BUG_ON(addr1 == NULL);
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_SET_INVERT);
- cmd.id = cpu_to_le32(id);
-
- ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
- if (ret == 0)
- memcpy(addr1, cmd.addr1, sizeof(cmd.addr1));
-
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return ret;
-}
-
-/**
- * lbs_cmd_fwt_access - Access the mesh forwarding table
- *
- * @priv: A pointer to &struct lbs_private structure
- * @cmd_action: The forwarding table action to perform
- * @cmd: The pre-filled FWT_ACCESS command
- *
- * returns: 0 on success and 'cmd' will be filled with the
- * firmware's response
- */
-int lbs_cmd_fwt_access(struct lbs_private *priv, u16 cmd_action,
- struct cmd_ds_fwt_access *cmd)
-{
- int ret;
-
- lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
-
- cmd->hdr.command = cpu_to_le16(CMD_FWT_ACCESS);
- cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access));
- cmd->hdr.result = 0;
- cmd->action = cpu_to_le16(cmd_action);
-
- ret = lbs_cmd_with_response(priv, CMD_FWT_ACCESS, cmd);
-
- lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
- return 0;
-}
-
-int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
- struct cmd_ds_mesh_access *cmd)
-{
- int ret;
-
- lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
-
- cmd->hdr.command = cpu_to_le16(CMD_MESH_ACCESS);
- cmd->hdr.size = cpu_to_le16(sizeof(*cmd));
- cmd->hdr.result = 0;
-
- cmd->action = cpu_to_le16(cmd_action);
-
- ret = lbs_cmd_with_response(priv, CMD_MESH_ACCESS, cmd);
-
- lbs_deb_leave(LBS_DEB_CMD);
- return ret;
-}
-
-static int __lbs_mesh_config_send(struct lbs_private *priv,
- struct cmd_ds_mesh_config *cmd,
- uint16_t action, uint16_t type)
-{
- int ret;
- u16 command = CMD_MESH_CONFIG_OLD;
-
- lbs_deb_enter(LBS_DEB_CMD);
-
- /*
- * Command id is 0xac for v10 FW along with mesh interface
- * id in bits 14-13-12.
- */
- if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
- command = CMD_MESH_CONFIG |
- (MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
-
- cmd->hdr.command = cpu_to_le16(command);
- cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_mesh_config));
- cmd->hdr.result = 0;
-
- cmd->type = cpu_to_le16(type);
- cmd->action = cpu_to_le16(action);
-
- ret = lbs_cmd_with_response(priv, command, cmd);
-
- lbs_deb_leave(LBS_DEB_CMD);
- return ret;
-}
-
-int lbs_mesh_config_send(struct lbs_private *priv,
- struct cmd_ds_mesh_config *cmd,
- uint16_t action, uint16_t type)
-{
- int ret;
-
- if (!(priv->fwcapinfo & FW_CAPINFO_PERSISTENT_CONFIG))
- return -EOPNOTSUPP;
-
- ret = __lbs_mesh_config_send(priv, cmd, action, type);
- return ret;
-}
-
-/* This function is the CMD_MESH_CONFIG legacy function. It only handles the
- * START and STOP actions. The extended actions supported by CMD_MESH_CONFIG
- * are all handled by preparing a struct cmd_ds_mesh_config and passing it to
- * lbs_mesh_config_send.
- */
-int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
-{
- struct cmd_ds_mesh_config cmd;
- struct mrvl_meshie *ie;
- DECLARE_SSID_BUF(ssid);
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.channel = cpu_to_le16(chan);
- ie = (struct mrvl_meshie *)cmd.data;
-
- switch (action) {
- case CMD_ACT_MESH_CONFIG_START:
- ie->id = WLAN_EID_GENERIC;
- ie->val.oui[0] = 0x00;
- ie->val.oui[1] = 0x50;
- ie->val.oui[2] = 0x43;
- ie->val.type = MARVELL_MESH_IE_TYPE;
- ie->val.subtype = MARVELL_MESH_IE_SUBTYPE;
- ie->val.version = MARVELL_MESH_IE_VERSION;
- ie->val.active_protocol_id = MARVELL_MESH_PROTO_ID_HWMP;
- ie->val.active_metric_id = MARVELL_MESH_METRIC_ID;
- ie->val.mesh_capability = MARVELL_MESH_CAPABILITY;
- ie->val.mesh_id_len = priv->mesh_ssid_len;
- memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len);
- ie->len = sizeof(struct mrvl_meshie_val) -
- IEEE80211_MAX_SSID_LEN + priv->mesh_ssid_len;
- cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val));
- break;
- case CMD_ACT_MESH_CONFIG_STOP:
- break;
- default:
- return -1;
- }
- lbs_deb_cmd("mesh config action %d type %x channel %d SSID %s\n",
- action, priv->mesh_tlv, chan,
- print_ssid(ssid, priv->mesh_ssid, priv->mesh_ssid_len));
-
- return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
-}
-
-
-
/***************************************************************************
* Persistent configuration support
*/
@@ -1231,7 +770,7 @@ static struct attribute *boot_opts_attrs[] = {
NULL
};
-static struct attribute_group boot_opts_group = {
+static const struct attribute_group boot_opts_group = {
.name = "boot_options",
.attrs = boot_opts_attrs,
};
@@ -1244,31 +783,299 @@ static struct attribute *mesh_ie_attrs[] = {
NULL
};
-static struct attribute_group mesh_ie_group = {
+static const struct attribute_group mesh_ie_group = {
.name = "mesh_ie",
.attrs = mesh_ie_attrs,
};
-void lbs_persist_config_init(struct net_device *dev)
+static void lbs_persist_config_init(struct net_device *dev)
{
int ret;
ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
}
-void lbs_persist_config_remove(struct net_device *dev)
+static void lbs_persist_config_remove(struct net_device *dev)
{
sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
}
+/***************************************************************************
+ * Initializing and starting, stopping mesh
+ */
+
+/*
+ * Check mesh FW version and appropriately send the mesh start
+ * command
+ */
+int lbs_init_mesh(struct lbs_private *priv)
+{
+ struct net_device *dev = priv->dev;
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_MESH);
+
+ /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
+ /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
+ /* 5.110.22 have mesh command with 0xa3 command id */
+ /* 10.0.0.p0 FW brings in mesh config command with different id */
+ /* Check FW version MSB and initialize mesh_fw_ver */
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) {
+ /* Enable mesh, if supported, and work out which TLV it uses.
+ 0x100 + 291 is an unofficial value used in 5.110.20.pXX
+ 0x100 + 37 is the official value used in 5.110.21.pXX
+ but we check them in that order because 20.pXX doesn't
+ give an error -- it just silently fails. */
+
+ /* 5.110.20.pXX firmware will fail the command if the channel
+ doesn't match the existing channel. But only if the TLV
+ is correct. If the channel is wrong, _BOTH_ versions will
+ give an error to 0x100+291, and allow 0x100+37 to succeed.
+ It's just that 5.110.20.pXX will not have done anything
+ useful */
+
+ priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID;
+ if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
+ priv->channel)) {
+ priv->mesh_tlv = TLV_TYPE_MESH_ID;
+ if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
+ priv->channel))
+ priv->mesh_tlv = 0;
+ }
+ } else
+ if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
+ (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) {
+ /* 10.0.0.pXX new firmwares should succeed with TLV
+ * 0x100+37; Do not invoke command with old TLV.
+ */
+ priv->mesh_tlv = TLV_TYPE_MESH_ID;
+ if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
+ priv->channel))
+ priv->mesh_tlv = 0;
+ }
+
+ /* Stop meshing until interface is brought up */
+ lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, priv->channel);
+
+ if (priv->mesh_tlv) {
+ sprintf(priv->mesh_ssid, "mesh");
+ priv->mesh_ssid_len = 4;
+
+ lbs_add_mesh(priv);
+
+ if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
+ netdev_err(dev, "cannot register lbs_mesh attribute\n");
+
+ ret = 1;
+ }
+
+ lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
+ return ret;
+}
+
+
+int lbs_deinit_mesh(struct lbs_private *priv)
+{
+ struct net_device *dev = priv->dev;
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_MESH);
+
+ if (priv->mesh_tlv) {
+ device_remove_file(&dev->dev, &dev_attr_lbs_mesh);
+ ret = 1;
+ }
+
+ lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
+ return ret;
+}
+
+
+/**
+ * lbs_mesh_stop - close the mshX interface
+ *
+ * @dev: A pointer to &net_device structure
+ * returns: 0
+ */
+static int lbs_mesh_stop(struct net_device *dev)
+{
+ struct lbs_private *priv = dev->ml_priv;
+
+ lbs_deb_enter(LBS_DEB_MESH);
+ lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_STOP, priv->channel);
+
+ spin_lock_irq(&priv->driver_lock);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ spin_unlock_irq(&priv->driver_lock);
+
+ schedule_work(&priv->mcast_work);
+
+ lbs_deb_leave(LBS_DEB_MESH);
+ return 0;
+}
+
+/**
+ * lbs_mesh_dev_open - open the mshX interface
+ *
+ * @dev: A pointer to &net_device structure
+ * returns: 0 or -EBUSY if monitor mode active
+ */
+static int lbs_mesh_dev_open(struct net_device *dev)
+{
+ struct lbs_private *priv = dev->ml_priv;
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_NET);
+
+ spin_lock_irq(&priv->driver_lock);
+
+ if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+ ret = -EBUSY;
+ spin_unlock_irq(&priv->driver_lock);
+ goto out;
+ }
+
+ netif_carrier_on(dev);
+
+ if (!priv->tx_pending_len)
+ netif_wake_queue(dev);
+
+ spin_unlock_irq(&priv->driver_lock);
+
+ ret = lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, priv->channel);
+
+out:
+ lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
+ return ret;
+}
+
+static const struct net_device_ops mesh_netdev_ops = {
+ .ndo_open = lbs_mesh_dev_open,
+ .ndo_stop = lbs_mesh_stop,
+ .ndo_start_xmit = lbs_hard_start_xmit,
+ .ndo_set_mac_address = lbs_set_mac_address,
+ .ndo_set_multicast_list = lbs_set_multicast_list,
+};
+
+/**
+ * lbs_add_mesh - add mshX interface
+ *
+ * @priv: A pointer to the &struct lbs_private structure
+ * returns: 0 if successful, -X otherwise
+ */
+static int lbs_add_mesh(struct lbs_private *priv)
+{
+ struct net_device *mesh_dev = NULL;
+ int ret = 0;
+
+ lbs_deb_enter(LBS_DEB_MESH);
+
+ /* Allocate a virtual mesh device */
+ mesh_dev = alloc_netdev(0, "msh%d", ether_setup);
+ if (!mesh_dev) {
+ lbs_deb_mesh("init mshX device failed\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+ mesh_dev->ml_priv = priv;
+ priv->mesh_dev = mesh_dev;
+
+ mesh_dev->netdev_ops = &mesh_netdev_ops;
+ mesh_dev->ethtool_ops = &lbs_ethtool_ops;
+ memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
+
+ SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
+
+ mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+ /* Register virtual mesh interface */
+ ret = register_netdev(mesh_dev);
+ if (ret) {
+ pr_err("cannot register mshX virtual interface\n");
+ goto err_free;
+ }
+
+ ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
+ if (ret)
+ goto err_unregister;
+
+ lbs_persist_config_init(mesh_dev);
+
+ /* Everything successful */
+ ret = 0;
+ goto done;
+
+err_unregister:
+ unregister_netdev(mesh_dev);
+
+err_free:
+ free_netdev(mesh_dev);
+
+done:
+ lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
+ return ret;
+}
+
+void lbs_remove_mesh(struct lbs_private *priv)
+{
+ struct net_device *mesh_dev;
+
+ mesh_dev = priv->mesh_dev;
+ if (!mesh_dev)
+ return;
+
+ lbs_deb_enter(LBS_DEB_MESH);
+ netif_stop_queue(mesh_dev);
+ netif_carrier_off(mesh_dev);
+ sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
+ lbs_persist_config_remove(mesh_dev);
+ unregister_netdev(mesh_dev);
+ priv->mesh_dev = NULL;
+ free_netdev(mesh_dev);
+ lbs_deb_leave(LBS_DEB_MESH);
+}
+
+
+/***************************************************************************
+ * Sending and receiving
+ */
+struct net_device *lbs_mesh_set_dev(struct lbs_private *priv,
+ struct net_device *dev, struct rxpd *rxpd)
+{
+ if (priv->mesh_dev) {
+ if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) {
+ if (rxpd->rx_control & RxPD_MESH_FRAME)
+ dev = priv->mesh_dev;
+ } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) {
+ if (rxpd->u.bss.bss_num == MESH_IFACE_ID)
+ dev = priv->mesh_dev;
+ }
+ }
+ return dev;
+}
+
+
+void lbs_mesh_set_txpd(struct lbs_private *priv,
+ struct net_device *dev, struct txpd *txpd)
+{
+ if (dev == priv->mesh_dev) {
+ if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID)
+ txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
+ else if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
+ txpd->u.bss.bss_num = MESH_IFACE_ID;
+ }
+}
+
/***************************************************************************
* Ethtool related
*/
-static const char *mesh_stat_strings[] = {
+static const char * const mesh_stat_strings[] = {
"drop_duplicate_bcast",
"drop_ttl_zero",
"drop_no_fwd_route",
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index ee95c73ed5f..50144913f2a 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -31,7 +31,6 @@ struct lbs_private;
int lbs_init_mesh(struct lbs_private *priv);
int lbs_deinit_mesh(struct lbs_private *priv);
-int lbs_add_mesh(struct lbs_private *priv);
void lbs_remove_mesh(struct lbs_private *priv);
@@ -52,29 +51,6 @@ struct cmd_ds_command;
struct cmd_ds_mesh_access;
struct cmd_ds_mesh_config;
-int lbs_mesh_bt_add_del(struct lbs_private *priv, bool add, u8 *addr1);
-int lbs_mesh_bt_reset(struct lbs_private *priv);
-int lbs_mesh_bt_get_inverted(struct lbs_private *priv, bool *inverted);
-int lbs_mesh_bt_set_inverted(struct lbs_private *priv, bool inverted);
-int lbs_mesh_bt_get_entry(struct lbs_private *priv, u32 id, u8 *addr1);
-
-int lbs_cmd_fwt_access(struct lbs_private *priv, u16 cmd_action,
- struct cmd_ds_fwt_access *cmd);
-
-int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
- struct cmd_ds_mesh_access *cmd);
-int lbs_mesh_config_send(struct lbs_private *priv,
- struct cmd_ds_mesh_config *cmd,
- uint16_t action, uint16_t type);
-int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
-
-
-
-/* Persistent configuration */
-
-void lbs_persist_config_init(struct net_device *net);
-void lbs_persist_config_remove(struct net_device *net);
-
/* Ethtool statistics */
@@ -87,11 +63,6 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *s);
-/* Accessors */
-
-#define lbs_mesh_open(priv) (priv->mesh_open)
-#define lbs_mesh_connected(priv) (priv->mesh_connect_status == LBS_CONNECTED)
-
#else
#define lbs_init_mesh(priv)
@@ -101,8 +72,6 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev,
#define lbs_mesh_set_dev(priv, dev, rxpd) (dev)
#define lbs_mesh_set_txpd(priv, dev, txpd)
#define lbs_mesh_config(priv, enable, chan)
-#define lbs_mesh_open(priv) (0)
-#define lbs_mesh_connected(priv) (0)
#endif
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index fdb0448301a..bfb8898ae51 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/etherdevice.h>
+#include <linux/hardirq.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <net/cfg80211.h>
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index bbb95f88dc0..a6e85134cfe 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -1,6 +1,7 @@
/*
* This file contains the handling of TX in wlan driver.
*/
+#include <linux/hardirq.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/sched.h>
@@ -198,7 +199,7 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
if (priv->connect_status == LBS_CONNECTED)
netif_wake_queue(priv->dev);
- if (priv->mesh_dev && lbs_mesh_connected(priv))
+ if (priv->mesh_dev && netif_running(priv->mesh_dev))
netif_wake_queue(priv->mesh_dev);
}
EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index 8945afd6ce3..13557fe0bf9 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -9,6 +9,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/hardirq.h>
#include <linux/slab.h>
#include "libertas_tf.h"
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index d4005081f1d..acc461aa385 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -9,6 +9,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/hardirq.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
@@ -585,7 +586,7 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
need_padding ^= ieee80211_has_a4(hdr->frame_control);
need_padding ^= ieee80211_is_data_qos(hdr->frame_control) &&
(*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CONTROL_A_MSDU_PRESENT);
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
if (need_padding) {
memmove(skb->data + 2, skb->data, skb->len);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9d4a40ee16c..031cd89b176 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1,6 +1,7 @@
/*
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
+ * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -25,11 +26,17 @@
#include <linux/rtnetlink.h>
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
+#include <net/genetlink.h>
+#include "mac80211_hwsim.h"
+
+#define WARN_QUEUE 100
+#define MAX_QUEUE 200
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
MODULE_LICENSE("GPL");
+int wmediumd_pid;
static int radios = 2;
module_param(radios, int, 0444);
MODULE_PARM_DESC(radios, "Number of simulated radios");
@@ -302,6 +309,7 @@ struct mac80211_hwsim_data {
struct dentry *debugfs;
struct dentry *debugfs_ps;
+ struct sk_buff_head pending; /* packets pending */
/*
* Only radios in the same group can communicate together (the
* channel has to match too). Each bit represents a group. A
@@ -322,6 +330,32 @@ struct hwsim_radiotap_hdr {
__le16 rt_chbitmask;
} __packed;
+/* MAC80211_HWSIM netlinf family */
+static struct genl_family hwsim_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = 0,
+ .name = "MAC80211_HWSIM",
+ .version = 1,
+ .maxattr = HWSIM_ATTR_MAX,
+};
+
+/* MAC80211_HWSIM netlink policy */
+
+static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
+ [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC,
+ .len = 6*sizeof(u8) },
+ [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC,
+ .len = 6*sizeof(u8) },
+ [HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN },
+ [HWSIM_ATTR_FLAGS] = { .type = NLA_U32 },
+ [HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 },
+ [HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 },
+ [HWSIM_ATTR_TX_INFO] = { .type = NLA_UNSPEC,
+ .len = IEEE80211_TX_MAX_RATES*sizeof(
+ struct hwsim_tx_rate)},
+ [HWSIM_ATTR_COOKIE] = { .type = NLA_U64 },
+};
static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
@@ -478,9 +512,89 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
return md.ret;
}
+static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
+ struct sk_buff *my_skb,
+ int dst_pid)
+{
+ struct sk_buff *skb;
+ struct mac80211_hwsim_data *data = hw->priv;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) my_skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(my_skb);
+ void *msg_head;
+ unsigned int hwsim_flags = 0;
+ int i;
+ struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES];
+
+ if (data->idle) {
+ wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
+ dev_kfree_skb(my_skb);
+ return;
+ }
+
+ if (data->ps != PS_DISABLED)
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+ /* If the queue contains MAX_QUEUE skb's drop some */
+ if (skb_queue_len(&data->pending) >= MAX_QUEUE) {
+ /* Droping until WARN_QUEUE level */
+ while (skb_queue_len(&data->pending) >= WARN_QUEUE)
+ skb_dequeue(&data->pending);
+ }
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (skb == NULL)
+ goto nla_put_failure;
+
+ msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0,
+ HWSIM_CMD_FRAME);
+ if (msg_head == NULL) {
+ printk(KERN_DEBUG "mac80211_hwsim: problem with msg_head\n");
+ goto nla_put_failure;
+ }
+
+ NLA_PUT(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
+ sizeof(struct mac_address), data->addresses[1].addr);
-static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
- struct sk_buff *skb)
+ /* We get the skb->data */
+ NLA_PUT(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data);
+
+ /* We get the flags for this transmission, and we translate them to
+ wmediumd flags */
+
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
+ hwsim_flags |= HWSIM_TX_CTL_REQ_TX_STATUS;
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ hwsim_flags |= HWSIM_TX_CTL_NO_ACK;
+
+ NLA_PUT_U32(skb, HWSIM_ATTR_FLAGS, hwsim_flags);
+
+ /* We get the tx control (rate and retries) info*/
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ tx_attempts[i].idx = info->status.rates[i].idx;
+ tx_attempts[i].count = info->status.rates[i].count;
+ }
+
+ NLA_PUT(skb, HWSIM_ATTR_TX_INFO,
+ sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
+ tx_attempts);
+
+ /* We create a cookie to identify this skb */
+ NLA_PUT_U64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb);
+
+ genlmsg_end(skb, msg_head);
+ genlmsg_unicast(&init_net, skb, dst_pid);
+
+ /* Enqueue the packet */
+ skb_queue_tail(&data->pending, my_skb);
+ return;
+
+nla_put_failure:
+ printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+}
+
+static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
{
struct mac80211_hwsim_data *data = hw->priv, *data2;
bool ack = false;
@@ -540,11 +654,11 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
return ack;
}
-
static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
bool ack;
struct ieee80211_tx_info *txi;
+ int _pid;
mac80211_hwsim_monitor_rx(hw, skb);
@@ -554,7 +668,15 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
- ack = mac80211_hwsim_tx_frame(hw, skb);
+ /* wmediumd mode check */
+ _pid = wmediumd_pid;
+
+ if (_pid)
+ return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+
+ /* NO wmediumd detected, perfect medium simulation */
+ ack = mac80211_hwsim_tx_frame_no_nl(hw, skb);
+
if (ack && skb->len >= 16) {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
mac80211_hwsim_monitor_ack(hw, hdr->addr2);
@@ -635,6 +757,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_hw *hw = arg;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
+ int _pid;
hwsim_check_magic(vif);
@@ -649,7 +772,14 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
info = IEEE80211_SKB_CB(skb);
mac80211_hwsim_monitor_rx(hw, skb);
- mac80211_hwsim_tx_frame(hw, skb);
+
+ /* wmediumd mode check */
+ _pid = wmediumd_pid;
+
+ if (_pid)
+ return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
+
+ mac80211_hwsim_tx_frame_no_nl(hw, skb);
dev_kfree_skb(skb);
}
@@ -966,12 +1096,7 @@ static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
{
- /*
- * In this special case, there's nothing we need to
- * do because hwsim does transmission synchronously.
- * In the future, when it does transmissions via
- * userspace, we may need to do something.
- */
+ /* Not implemented, queues only on kernel side */
}
struct hw_scan_done {
@@ -1005,6 +1130,8 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
for (i = 0; i < req->n_channels; i++)
printk(KERN_DEBUG "hwsim hw_scan freq %d\n",
req->channels[i]->center_freq);
+ print_hex_dump(KERN_DEBUG, "scan IEs: ", DUMP_PREFIX_OFFSET,
+ 16, 1, req->ie, req->ie_len, 1);
ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
@@ -1119,6 +1246,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_pspoll *pspoll;
+ int _pid;
if (!vp->assoc)
return;
@@ -1137,8 +1265,15 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
memcpy(pspoll->ta, mac, ETH_ALEN);
- if (!mac80211_hwsim_tx_frame(data->hw, skb))
- printk(KERN_DEBUG "%s: PS-Poll frame not ack'ed\n", __func__);
+
+ /* wmediumd mode check */
+ _pid = wmediumd_pid;
+
+ if (_pid)
+ return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
+
+ if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
+ printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__);
dev_kfree_skb(skb);
}
@@ -1149,6 +1284,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
+ int _pid;
if (!vp->assoc)
return;
@@ -1168,7 +1304,14 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
memcpy(hdr->addr2, mac, ETH_ALEN);
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
- if (!mac80211_hwsim_tx_frame(data->hw, skb))
+
+ /* wmediumd mode check */
+ _pid = wmediumd_pid;
+
+ if (_pid)
+ return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
+
+ if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
dev_kfree_skb(skb);
}
@@ -1248,6 +1391,273 @@ DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
hwsim_fops_group_read, hwsim_fops_group_write,
"%llx\n");
+struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(
+ struct mac_address *addr)
+{
+ struct mac80211_hwsim_data *data;
+ bool _found = false;
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_for_each_entry(data, &hwsim_radios, list) {
+ if (memcmp(data->addresses[1].addr, addr,
+ sizeof(struct mac_address)) == 0) {
+ _found = true;
+ break;
+ }
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+
+ if (!_found)
+ return NULL;
+
+ return data;
+}
+
+static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
+ struct genl_info *info)
+{
+
+ struct ieee80211_hdr *hdr;
+ struct mac80211_hwsim_data *data2;
+ struct ieee80211_tx_info *txi;
+ struct hwsim_tx_rate *tx_attempts;
+ struct sk_buff __user *ret_skb;
+ struct sk_buff *skb, *tmp;
+ struct mac_address *src;
+ unsigned int hwsim_flags;
+
+ int i;
+ bool found = false;
+
+ if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
+ !info->attrs[HWSIM_ATTR_FLAGS] ||
+ !info->attrs[HWSIM_ATTR_COOKIE] ||
+ !info->attrs[HWSIM_ATTR_TX_INFO])
+ goto out;
+
+ src = (struct mac_address *)nla_data(
+ info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
+ hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]);
+
+ ret_skb = (struct sk_buff __user *)
+ (unsigned long) nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
+
+ data2 = get_hwsim_data_ref_from_addr(src);
+
+ if (data2 == NULL)
+ goto out;
+
+ /* look for the skb matching the cookie passed back from user */
+ skb_queue_walk_safe(&data2->pending, skb, tmp) {
+ if (skb == ret_skb) {
+ skb_unlink(skb, &data2->pending);
+ found = true;
+ break;
+ }
+ }
+
+ /* not found */
+ if (!found)
+ goto out;
+
+ /* Tx info received because the frame was broadcasted on user space,
+ so we get all the necessary info: tx attempts and skb control buff */
+
+ tx_attempts = (struct hwsim_tx_rate *)nla_data(
+ info->attrs[HWSIM_ATTR_TX_INFO]);
+
+ /* now send back TX status */
+ txi = IEEE80211_SKB_CB(skb);
+
+ if (txi->control.vif)
+ hwsim_check_magic(txi->control.vif);
+ if (txi->control.sta)
+ hwsim_check_sta_magic(txi->control.sta);
+
+ ieee80211_tx_info_clear_status(txi);
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ txi->status.rates[i].idx = tx_attempts[i].idx;
+ txi->status.rates[i].count = tx_attempts[i].count;
+ /*txi->status.rates[i].flags = 0;*/
+ }
+
+ txi->status.ack_signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
+
+ if (!(hwsim_flags & HWSIM_TX_CTL_NO_ACK) &&
+ (hwsim_flags & HWSIM_TX_STAT_ACK)) {
+ if (skb->len >= 16) {
+ hdr = (struct ieee80211_hdr *) skb->data;
+ mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
+ }
+ }
+ ieee80211_tx_status_irqsafe(data2->hw, skb);
+ return 0;
+out:
+ return -EINVAL;
+
+}
+
+static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
+ struct genl_info *info)
+{
+
+ struct mac80211_hwsim_data *data2;
+ struct ieee80211_rx_status rx_status;
+ struct mac_address *dst;
+ int frame_data_len;
+ char *frame_data;
+ struct sk_buff *skb = NULL;
+
+ if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
+ !info->attrs[HWSIM_ATTR_FRAME] ||
+ !info->attrs[HWSIM_ATTR_RX_RATE] ||
+ !info->attrs[HWSIM_ATTR_SIGNAL])
+ goto out;
+
+ dst = (struct mac_address *)nla_data(
+ info->attrs[HWSIM_ATTR_ADDR_RECEIVER]);
+
+ frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]);
+ frame_data = (char *)nla_data(info->attrs[HWSIM_ATTR_FRAME]);
+
+ /* Allocate new skb here */
+ skb = alloc_skb(frame_data_len, GFP_KERNEL);
+ if (skb == NULL)
+ goto err;
+
+ if (frame_data_len <= IEEE80211_MAX_DATA_LEN) {
+ /* Copy the data */
+ memcpy(skb_put(skb, frame_data_len), frame_data,
+ frame_data_len);
+ } else
+ goto err;
+
+ data2 = get_hwsim_data_ref_from_addr(dst);
+
+ if (data2 == NULL)
+ goto out;
+
+ /* check if radio is configured properly */
+
+ if (data2->idle || !data2->started || !data2->channel)
+ goto out;
+
+ /*A frame is received from user space*/
+ memset(&rx_status, 0, sizeof(rx_status));
+ rx_status.freq = data2->channel->center_freq;
+ rx_status.band = data2->channel->band;
+ rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
+ rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
+
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+ ieee80211_rx_irqsafe(data2->hw, skb);
+
+ return 0;
+err:
+ printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+ goto out;
+out:
+ dev_kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int hwsim_register_received_nl(struct sk_buff *skb_2,
+ struct genl_info *info)
+{
+ if (info == NULL)
+ goto out;
+
+ wmediumd_pid = info->snd_pid;
+
+ printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, "
+ "switching to wmediumd mode with pid %d\n", info->snd_pid);
+
+ return 0;
+out:
+ printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+ return -EINVAL;
+}
+
+/* Generic Netlink operations array */
+static struct genl_ops hwsim_ops[] = {
+ {
+ .cmd = HWSIM_CMD_REGISTER,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_register_received_nl,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = HWSIM_CMD_FRAME,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_cloned_frame_received_nl,
+ },
+ {
+ .cmd = HWSIM_CMD_TX_INFO_FRAME,
+ .policy = hwsim_genl_policy,
+ .doit = hwsim_tx_info_frame_received_nl,
+ },
+};
+
+static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
+ unsigned long state,
+ void *_notify)
+{
+ struct netlink_notify *notify = _notify;
+
+ if (state != NETLINK_URELEASE)
+ return NOTIFY_DONE;
+
+ if (notify->pid == wmediumd_pid) {
+ printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink"
+ " socket, switching to perfect channel medium\n");
+ wmediumd_pid = 0;
+ }
+ return NOTIFY_DONE;
+
+}
+
+static struct notifier_block hwsim_netlink_notifier = {
+ .notifier_call = mac80211_hwsim_netlink_notify,
+};
+
+static int hwsim_init_netlink(void)
+{
+ int rc;
+ printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
+
+ wmediumd_pid = 0;
+
+ rc = genl_register_family_with_ops(&hwsim_genl_family,
+ hwsim_ops, ARRAY_SIZE(hwsim_ops));
+ if (rc)
+ goto failure;
+
+ rc = netlink_register_notifier(&hwsim_netlink_notifier);
+ if (rc)
+ goto failure;
+
+ return 0;
+
+failure:
+ printk(KERN_DEBUG "mac80211_hwsim: error occured in %s\n", __func__);
+ return -EINVAL;
+}
+
+static void hwsim_exit_netlink(void)
+{
+ int ret;
+
+ printk(KERN_INFO "mac80211_hwsim: closing netlink\n");
+ /* unregister the notifier */
+ netlink_unregister_notifier(&hwsim_netlink_notifier);
+ /* unregister the family */
+ ret = genl_unregister_family(&hwsim_genl_family);
+ if (ret)
+ printk(KERN_DEBUG "mac80211_hwsim: "
+ "unregister family %i\n", ret);
+}
+
static int __init init_mac80211_hwsim(void)
{
int i, err = 0;
@@ -1298,6 +1708,7 @@ static int __init init_mac80211_hwsim(void)
goto failed_drvdata;
}
data->dev->driver = &mac80211_hwsim_driver;
+ skb_queue_head_init(&data->pending);
SET_IEEE80211_DEV(hw, data->dev);
addr[3] = i >> 8;
@@ -1379,6 +1790,10 @@ static int __init init_mac80211_hwsim(void)
data->group = 1;
mutex_init(&data->mutex);
+ /* Enable frame retransmissions for lossy channels */
+ hw->max_rates = 4;
+ hw->max_rate_tries = 11;
+
/* Work to be done prior to ieee80211_register_hw() */
switch (regtest) {
case HWSIM_REGTEST_DISABLED:
@@ -1515,12 +1930,29 @@ static int __init init_mac80211_hwsim(void)
if (hwsim_mon == NULL)
goto failed;
- err = register_netdev(hwsim_mon);
+ rtnl_lock();
+
+ err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
if (err < 0)
goto failed_mon;
+
+ err = register_netdevice(hwsim_mon);
+ if (err < 0)
+ goto failed_mon;
+
+ rtnl_unlock();
+
+ err = hwsim_init_netlink();
+ if (err < 0)
+ goto failed_nl;
+
return 0;
+failed_nl:
+ printk(KERN_DEBUG "mac_80211_hwsim: failed initializing netlink\n");
+ return err;
+
failed_mon:
rtnl_unlock();
free_netdev(hwsim_mon);
@@ -1541,6 +1973,8 @@ static void __exit exit_mac80211_hwsim(void)
{
printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n");
+ hwsim_exit_netlink();
+
mac80211_hwsim_free();
unregister_netdev(hwsim_mon);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
new file mode 100644
index 00000000000..afaad5a443b
--- /dev/null
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -0,0 +1,133 @@
+/*
+ * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
+ * Copyright (c) 2008, Jouni Malinen <j@w1.fi>
+ * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAC80211_HWSIM_H
+#define __MAC80211_HWSIM_H
+
+/**
+ * enum hwsim_tx_control_flags - flags to describe transmission info/status
+ *
+ * These flags are used to give the wmediumd extra information in order to
+ * modify its behavior for each frame
+ *
+ * @HWSIM_TX_CTL_REQ_TX_STATUS: require TX status callback for this frame.
+ * @HWSIM_TX_CTL_NO_ACK: tell the wmediumd not to wait for an ack
+ * @HWSIM_TX_STAT_ACK: Frame was acknowledged
+ *
+ */
+enum hwsim_tx_control_flags {
+ HWSIM_TX_CTL_REQ_TX_STATUS = BIT(0),
+ HWSIM_TX_CTL_NO_ACK = BIT(1),
+ HWSIM_TX_STAT_ACK = BIT(2),
+};
+
+/**
+ * DOC: Frame transmission/registration support
+ *
+ * Frame transmission and registration support exists to allow userspace
+ * entities such as wmediumd to receive and process all broadcasted
+ * frames from a mac80211_hwsim radio device.
+ *
+ * This allow user space applications to decide if the frame should be
+ * dropped or not and implement a wireless medium simulator at user space.
+ *
+ * Registration is done by sending a register message to the driver and
+ * will be automatically unregistered if the user application doesn't
+ * responds to sent frames.
+ * Once registered the user application has to take responsibility of
+ * broadcasting the frames to all listening mac80211_hwsim radio
+ * interfaces.
+ *
+ * For more technical details, see the corresponding command descriptions
+ * below.
+ */
+
+/**
+ * enum hwsim_commands - supported hwsim commands
+ *
+ * @HWSIM_CMD_UNSPEC: unspecified command to catch errors
+ *
+ * @HWSIM_CMD_REGISTER: request to register and received all broadcasted
+ * frames by any mac80211_hwsim radio device.
+ * @HWSIM_CMD_FRAME: send/receive a broadcasted frame from/to kernel/user
+ * space, uses:
+ * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_ADDR_RECEIVER,
+ * %HWSIM_ATTR_FRAME, %HWSIM_ATTR_FLAGS, %HWSIM_ATTR_RX_RATE,
+ * %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ * @HWSIM_CMD_TX_INFO_FRAME: Transmission info report from user space to
+ * kernel, uses:
+ * %HWSIM_ATTR_ADDR_TRANSMITTER, %HWSIM_ATTR_FLAGS,
+ * %HWSIM_ATTR_TX_INFO, %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
+ * @__HWSIM_CMD_MAX: enum limit
+ */
+enum {
+ HWSIM_CMD_UNSPEC,
+ HWSIM_CMD_REGISTER,
+ HWSIM_CMD_FRAME,
+ HWSIM_CMD_TX_INFO_FRAME,
+ __HWSIM_CMD_MAX,
+};
+#define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1)
+
+/**
+ * enum hwsim_attrs - hwsim netlink attributes
+ *
+ * @HWSIM_ATTR_UNSPEC: unspecified attribute to catch errors
+ *
+ * @HWSIM_ATTR_ADDR_RECEIVER: MAC address of the radio device that
+ * the frame is broadcasted to
+ * @HWSIM_ATTR_ADDR_TRANSMITTER: MAC address of the radio device that
+ * the frame was broadcasted from
+ * @HWSIM_ATTR_FRAME: Data array
+ * @HWSIM_ATTR_FLAGS: mac80211 transmission flags, used to process
+ properly the frame at user space
+ * @HWSIM_ATTR_RX_RATE: estimated rx rate index for this frame at user
+ space
+ * @HWSIM_ATTR_SIGNAL: estimated RX signal for this frame at user
+ space
+ * @HWSIM_ATTR_TX_INFO: ieee80211_tx_rate array
+ * @HWSIM_ATTR_COOKIE: sk_buff cookie to identify the frame
+ * @__HWSIM_ATTR_MAX: enum limit
+ */
+
+
+enum {
+ HWSIM_ATTR_UNSPEC,
+ HWSIM_ATTR_ADDR_RECEIVER,
+ HWSIM_ATTR_ADDR_TRANSMITTER,
+ HWSIM_ATTR_FRAME,
+ HWSIM_ATTR_FLAGS,
+ HWSIM_ATTR_RX_RATE,
+ HWSIM_ATTR_SIGNAL,
+ HWSIM_ATTR_TX_INFO,
+ HWSIM_ATTR_COOKIE,
+ __HWSIM_ATTR_MAX,
+};
+#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
+
+/**
+ * struct hwsim_tx_rate - rate selection/status
+ *
+ * @idx: rate index to attempt to send with
+ * @count: number of tries in this rate before going to the next rate
+ *
+ * A value of -1 for @idx indicates an invalid rate and, if used
+ * in an array of retry rates, that no more rates should be tried.
+ *
+ * When used for transmit status reporting, the driver should
+ * always report the rate and number of retries used.
+ *
+ */
+struct hwsim_tx_rate {
+ s8 idx;
+ u8 count;
+} __packed;
+
+#endif /* __MAC80211_HWSIM_H */
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 916183d3900..34bba523429 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -185,13 +185,12 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
*
* Handling includes changing the header fields into CPU format.
*/
-int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp, void *data_buf)
+int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
+ struct mwifiex_ds_11n_tx_cfg *tx_cfg)
{
- struct mwifiex_ds_11n_tx_cfg *tx_cfg;
struct host_cmd_ds_11n_cfg *htcfg = &resp->params.htcfg;
- if (data_buf) {
- tx_cfg = (struct mwifiex_ds_11n_tx_cfg *) data_buf;
+ if (tx_cfg) {
tx_cfg->tx_htcap = le16_to_cpu(htcfg->ht_tx_cap);
tx_cfg->tx_htinfo = le16_to_cpu(htcfg->ht_tx_info);
}
@@ -208,11 +207,10 @@ int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp, void *data_buf)
*/
int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, int cmd_action,
- void *data_buf)
+ u16 *buf_size)
{
struct host_cmd_ds_txbuf_cfg *tx_buf = &cmd->params.tx_buf;
u16 action = (u16) cmd_action;
- u16 buf_size = *((u16 *) data_buf);
cmd->command = cpu_to_le16(HostCmd_CMD_RECONFIGURE_TX_BUFF);
cmd->size =
@@ -220,8 +218,8 @@ int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
tx_buf->action = cpu_to_le16(action);
switch (action) {
case HostCmd_ACT_GEN_SET:
- dev_dbg(priv->adapter->dev, "cmd: set tx_buf=%d\n", buf_size);
- tx_buf->buff_size = cpu_to_le16(buf_size);
+ dev_dbg(priv->adapter->dev, "cmd: set tx_buf=%d\n", *buf_size);
+ tx_buf->buff_size = cpu_to_le16(*buf_size);
break;
case HostCmd_ACT_GEN_GET:
default:
@@ -240,13 +238,12 @@ int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
* - Ensuring correct endian-ness
*/
int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
- int cmd_action, void *data_buf)
+ int cmd_action,
+ struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl)
{
struct host_cmd_ds_amsdu_aggr_ctrl *amsdu_ctrl =
&cmd->params.amsdu_aggr_ctrl;
u16 action = (u16) cmd_action;
- struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl =
- (struct mwifiex_ds_11n_amsdu_aggr_ctrl *) data_buf;
cmd->command = cpu_to_le16(HostCmd_CMD_AMSDU_AGGR_CTRL);
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_amsdu_aggr_ctrl)
@@ -272,15 +269,13 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
* Handling includes changing the header fields into CPU format.
*/
int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
- void *data_buf)
+ struct mwifiex_ds_11n_amsdu_aggr_ctrl
+ *amsdu_aggr_ctrl)
{
- struct mwifiex_ds_11n_amsdu_aggr_ctrl *amsdu_aggr_ctrl;
struct host_cmd_ds_amsdu_aggr_ctrl *amsdu_ctrl =
&resp->params.amsdu_aggr_ctrl;
- if (data_buf) {
- amsdu_aggr_ctrl =
- (struct mwifiex_ds_11n_amsdu_aggr_ctrl *) data_buf;
+ if (amsdu_aggr_ctrl) {
amsdu_aggr_ctrl->enable = le16_to_cpu(amsdu_ctrl->enable);
amsdu_aggr_ctrl->curr_buf_size =
le16_to_cpu(amsdu_ctrl->curr_buf_size);
@@ -296,12 +291,10 @@ int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
* - Setting HT Tx capability and HT Tx information fields
* - Ensuring correct endian-ness
*/
-int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd,
- u16 cmd_action, void *data_buf)
+int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
+ struct mwifiex_ds_11n_tx_cfg *txcfg)
{
struct host_cmd_ds_11n_cfg *htcfg = &cmd->params.htcfg;
- struct mwifiex_ds_11n_tx_cfg *txcfg =
- (struct mwifiex_ds_11n_tx_cfg *) data_buf;
cmd->command = cpu_to_le16(HostCmd_CMD_11N_CFG);
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_11n_cfg) + S_DS_GEN);
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index a4390a1a2a9..90b421e343d 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -29,9 +29,9 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
int mwifiex_ret_11n_cfg(struct host_cmd_ds_command *resp,
- void *data_buf);
-int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd,
- u16 cmd_action, void *data_buf);
+ struct mwifiex_ds_11n_tx_cfg *tx_cfg);
+int mwifiex_cmd_11n_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action,
+ struct mwifiex_ds_11n_tx_cfg *txcfg);
int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc,
@@ -62,12 +62,14 @@ int mwifiex_get_rx_reorder_tbl(struct mwifiex_private *priv,
int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
struct mwifiex_ds_tx_ba_stream_tbl *buf);
int mwifiex_ret_amsdu_aggr_ctrl(struct host_cmd_ds_command *resp,
- void *data_buf);
+ struct mwifiex_ds_11n_amsdu_aggr_ctrl
+ *amsdu_aggr_ctrl);
int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- int cmd_action, void *data_buf);
+ int cmd_action, u16 *buf_size);
int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
- int cmd_action, void *data_buf);
+ int cmd_action,
+ struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
/*
* This function checks whether AMPDU is allowed or not for a particular TID.
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index f807447e4d9..1a453a605b3 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -164,12 +164,13 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
struct mwifiex_tx_param tx_param;
struct txpd *ptx_pd = NULL;
- if (skb_queue_empty(&pra_list->skb_head)) {
+ skb_src = skb_peek(&pra_list->skb_head);
+ if (!skb_src) {
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
return 0;
}
- skb_src = skb_peek(&pra_list->skb_head);
+
tx_info_src = MWIFIEX_SKB_TXCB(skb_src);
skb_aggr = dev_alloc_skb(adapter->tx_buf_size);
if (!skb_aggr) {
@@ -184,17 +185,15 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
tx_info_aggr->bss_index = tx_info_src->bss_index;
skb_aggr->priority = skb_src->priority;
- while (skb_src && ((skb_headroom(skb_aggr) + skb_src->len
- + LLC_SNAP_LEN)
- <= adapter->tx_buf_size)) {
+ do {
+ /* Check if AMSDU can accommodate this MSDU */
+ if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
+ break;
- if (!skb_queue_empty(&pra_list->skb_head))
- skb_src = skb_dequeue(&pra_list->skb_head);
- else
- skb_src = NULL;
+ skb_src = skb_dequeue(&pra_list->skb_head);
- if (skb_src)
- pra_list->total_pkts_size -= skb_src->len;
+ pra_list->total_pkts_size -= skb_src->len;
+ pra_list->total_pkts--;
atomic_dec(&priv->wmm.tx_pkts_queued);
@@ -212,11 +211,15 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
return -1;
}
- if (!skb_queue_empty(&pra_list->skb_head))
- skb_src = skb_peek(&pra_list->skb_head);
- else
- skb_src = NULL;
- }
+ if (skb_tailroom(skb_aggr) < pad) {
+ pad = 0;
+ break;
+ }
+ skb_put(skb_aggr, pad);
+
+ skb_src = skb_peek(&pra_list->skb_head);
+
+ } while (skb_src);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
@@ -230,11 +233,19 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_push(skb_aggr, headroom);
- tx_param.next_pkt_len = ((pra_list->total_pkts_size) ?
- (((pra_list->total_pkts_size) >
- adapter->tx_buf_size) ? adapter->
- tx_buf_size : pra_list->total_pkts_size +
- LLC_SNAP_LEN + sizeof(struct txpd)) : 0);
+ /*
+ * Padding per MSDU will affect the length of next
+ * packet and hence the exact length of next packet
+ * is uncertain here.
+ *
+ * Also, aggregation of transmission buffer, while
+ * downloading the data to the card, wont gain much
+ * on the AMSDU packets as the AMSDU packets utilizes
+ * the transmission buffer space to the maximum
+ * (adapter->tx_buf_size).
+ */
+ tx_param.next_pkt_len = 0;
+
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
skb_aggr->data,
skb_aggr->len, &tx_param);
@@ -258,6 +269,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_queue_tail(&pra_list->skb_head, skb_aggr);
pra_list->total_pkts_size += skb_aggr->len;
+ pra_list->total_pkts++;
atomic_inc(&priv->wmm.tx_pkts_queued);
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index e5dfdc39a92..7aa9aa0ac95 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -328,13 +328,12 @@ int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf)
*/
int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- void *data_buf)
+ struct host_cmd_ds_11n_addba_req
+ *cmd_addba_req)
{
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
(struct host_cmd_ds_11n_addba_rsp *)
&cmd->params.add_ba_rsp;
- struct host_cmd_ds_11n_addba_req *cmd_addba_req =
- (struct host_cmd_ds_11n_addba_req *) data_buf;
u8 tid;
int win_size;
uint16_t block_ack_param_set;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index f3ca8c8c18f..033c8adbdcd 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -52,8 +52,9 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd,
void *data_buf);
int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
- struct host_cmd_ds_command
- *cmd, void *data_buf);
+ struct host_cmd_ds_command *cmd,
+ struct host_cmd_ds_11n_addba_req
+ *cmd_addba_req);
int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd,
void *data_buf);
void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 687c1f22349..352d2c5da1f 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -672,6 +672,59 @@ static const u32 mwifiex_cipher_suites[] = {
};
/*
+ * CFG802.11 operation handler for setting bit rates.
+ *
+ * Function selects legacy bang B/G/BG from corresponding bitrates selection.
+ * Currently only 2.4GHz band is supported.
+ */
+static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *peer,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct mwifiex_ds_band_cfg band_cfg;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ int index = 0, mode = 0, i;
+
+ /* Currently only 2.4GHz is supported */
+ for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) {
+ /*
+ * Rates below 6 Mbps in the table are CCK rates; 802.11b
+ * and from 6 they are OFDM; 802.11G
+ */
+ if (mwifiex_rates[i].bitrate == 60) {
+ index = 1 << i;
+ break;
+ }
+ }
+
+ if (mask->control[IEEE80211_BAND_2GHZ].legacy < index) {
+ mode = BAND_B;
+ } else {
+ mode = BAND_G;
+ if (mask->control[IEEE80211_BAND_2GHZ].legacy % index)
+ mode |= BAND_B;
+ }
+
+ memset(&band_cfg, 0, sizeof(band_cfg));
+ band_cfg.config_bands = mode;
+
+ if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
+ band_cfg.adhoc_start_band = mode;
+
+ band_cfg.sec_chan_offset = NO_SEC_CHANNEL;
+
+ if (mwifiex_set_radio_band_cfg(priv, &band_cfg))
+ return -EFAULT;
+
+ wiphy_debug(wiphy, "info: device configured in 802.11%s%s mode\n",
+ (mode & BAND_B) ? "b" : "",
+ (mode & BAND_G) ? "g" : "");
+
+ return 0;
+}
+
+/*
* CFG802.11 operation handler for disconnection request.
*
* This function does not work when there is already a disconnection
@@ -960,7 +1013,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
ret = mwifiex_set_gen_ie(priv, sme->ie, sme->ie_len);
if (sme->key) {
- if (mwifiex_is_alg_wep(0) | mwifiex_is_alg_wep(0)) {
+ if (mwifiex_is_alg_wep(priv->sec_info.encryption_mode)) {
dev_dbg(priv->adapter->dev,
"info: setting wep encryption"
" with key len %d\n", sme->key_len);
@@ -1225,6 +1278,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_default_key = mwifiex_cfg80211_set_default_key,
.set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
.set_tx_power = mwifiex_cfg80211_set_tx_power,
+ .set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask,
};
/*
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index cd89fed206a..b5352afb871 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -104,13 +104,11 @@ mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter,
* main thread.
*/
static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd, void *data_buf)
+ struct host_cmd_ds_command *cmd,
+ struct mwifiex_ds_misc_cmd *pcmd_ptr)
{
- struct mwifiex_ds_misc_cmd *pcmd_ptr =
- (struct mwifiex_ds_misc_cmd *) data_buf;
-
/* Copy the HOST command to command buffer */
- memcpy((void *) cmd, pcmd_ptr->cmd, pcmd_ptr->len);
+ memcpy(cmd, pcmd_ptr->cmd, pcmd_ptr->len);
dev_dbg(priv->adapter->dev, "cmd: host cmd size = %d\n", pcmd_ptr->len);
return 0;
}
@@ -707,15 +705,14 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
/* Copy original response back to response buffer */
- struct mwifiex_ds_misc_cmd *hostcmd = NULL;
+ struct mwifiex_ds_misc_cmd *hostcmd;
uint16_t size = le16_to_cpu(resp->size);
dev_dbg(adapter->dev, "info: host cmd resp size = %d\n", size);
size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER);
if (adapter->curr_cmd->data_buf) {
- hostcmd = (struct mwifiex_ds_misc_cmd *)
- adapter->curr_cmd->data_buf;
+ hostcmd = adapter->curr_cmd->data_buf;
hostcmd->len = size;
- memcpy(hostcmd->cmd, (void *) resp, size);
+ memcpy(hostcmd->cmd, resp, size);
}
}
orig_cmdresp_no = le16_to_cpu(resp->command);
@@ -1155,7 +1152,7 @@ EXPORT_SYMBOL_GPL(mwifiex_process_sleep_confirm_resp);
int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
u16 cmd_action, uint16_t ps_bitmap,
- void *data_buf)
+ struct mwifiex_ds_auto_ds *auto_ds)
{
struct host_cmd_ds_802_11_ps_mode_enh *psmode_enh =
&cmd->params.psmode_enh;
@@ -1218,9 +1215,8 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie_types_header));
cmd_size += sizeof(*auto_ds_tlv);
tlv += sizeof(*auto_ds_tlv);
- if (data_buf)
- idletime = ((struct mwifiex_ds_auto_ds *)
- data_buf)->idle_time;
+ if (auto_ds)
+ idletime = auto_ds->idle_time;
dev_dbg(priv->adapter->dev,
"cmd: PS Command: Enter Auto Deep Sleep\n");
auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime);
@@ -1239,7 +1235,7 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
*/
int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp,
- void *data_buf)
+ struct mwifiex_ds_pm_cfg *pm_cfg)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_ps_mode_enh *ps_mode =
@@ -1282,10 +1278,8 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
dev_dbg(adapter->dev, "cmd: ps_bitmap=%#x\n", ps_bitmap);
- if (data_buf) {
+ if (pm_cfg) {
/* This section is for get power save mode */
- struct mwifiex_ds_pm_cfg *pm_cfg =
- (struct mwifiex_ds_pm_cfg *)data_buf;
if (ps_bitmap & BITMAP_STA_PS)
pm_cfg->param.ps_mode = 1;
else
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 46d65e02c7b..d26a78b6b3c 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -27,8 +27,8 @@ static struct dentry *mwifiex_dfs_dir;
static char *bss_modes[] = {
"Unknown",
- "Managed",
"Ad-hoc",
+ "Managed",
"Auto"
};
@@ -216,28 +216,19 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
p += sprintf(p, "media_state=\"%s\"\n",
(!priv->media_connected ? "Disconnected" : "Connected"));
- p += sprintf(p, "mac_address=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n",
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr);
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
p += sprintf(p, "multicast_count=\"%d\"\n",
netdev_mc_count(netdev));
p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
- p += sprintf(p, "bssid=\"%02x:%02x:%02x:%02x:%02x:%02x\"\n",
- info.bssid[0], info.bssid[1],
- info.bssid[2], info.bssid[3],
- info.bssid[4], info.bssid[5]);
+ p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
p += sprintf(p, "region_code = \"%02x\"\n", info.region_code);
netdev_for_each_mc_addr(ha, netdev)
- p += sprintf(p, "multicast_address[%d]="
- "\"%02x:%02x:%02x:%02x:%02x:%02x\"\n", i++,
- ha->addr[0], ha->addr[1],
- ha->addr[2], ha->addr[3],
- ha->addr[4], ha->addr[5]);
+ p += sprintf(p, "multicast_address[%d]=\"%pM\"\n",
+ i++, ha->addr);
}
p += sprintf(p, "num_tx_bytes = %lu\n", priv->stats.tx_bytes);
@@ -451,26 +442,18 @@ mwifiex_debug_read(struct file *file, char __user *ubuf,
if (info.tx_tbl_num) {
p += sprintf(p, "Tx BA stream table:\n");
for (i = 0; i < info.tx_tbl_num; i++)
- p += sprintf(p, "tid = %d, "
- "ra = %02x:%02x:%02x:%02x:%02x:%02x\n",
- info.tx_tbl[i].tid, info.tx_tbl[i].ra[0],
- info.tx_tbl[i].ra[1], info.tx_tbl[i].ra[2],
- info.tx_tbl[i].ra[3], info.tx_tbl[i].ra[4],
- info.tx_tbl[i].ra[5]);
+ p += sprintf(p, "tid = %d, ra = %pM\n",
+ info.tx_tbl[i].tid, info.tx_tbl[i].ra);
}
if (info.rx_tbl_num) {
p += sprintf(p, "Rx reorder table:\n");
for (i = 0; i < info.rx_tbl_num; i++) {
-
- p += sprintf(p, "tid = %d, "
- "ta = %02x:%02x:%02x:%02x:%02x:%02x, "
+ p += sprintf(p, "tid = %d, ta = %pM, "
"start_win = %d, "
"win_size = %d, buffer: ",
info.rx_tbl[i].tid,
- info.rx_tbl[i].ta[0], info.rx_tbl[i].ta[1],
- info.rx_tbl[i].ta[2], info.rx_tbl[i].ta[3],
- info.rx_tbl[i].ta[4], info.rx_tbl[i].ta[5],
+ info.rx_tbl[i].ta,
info.rx_tbl[i].start_win,
info.rx_tbl[i].win_size);
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 0e90b0986ed..94ddc9038cb 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -30,7 +30,9 @@
#define MWIFIEX_MAX_BSS_NUM (1)
-#define MWIFIEX_MIN_DATA_HEADER_LEN 32 /* (sizeof(mwifiex_txpd)) */
+#define MWIFIEX_MIN_DATA_HEADER_LEN 36 /* sizeof(mwifiex_txpd)
+ * + 4 byte alignment
+ */
#define MWIFIEX_MAX_TX_BASTREAM_SUPPORTED 2
#define MWIFIEX_MAX_RX_BASTREAM_SUPPORTED 16
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index afdd145dff0..4fee0993b18 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -157,6 +157,17 @@ enum MWIFIEX_802_11_WEP_STATUS {
#define ISSUPP_RXSTBC(Dot11nDevCap) (Dot11nDevCap & BIT(26))
#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29))
+/* httxcfg bitmap
+ * 0 reserved
+ * 1 20/40 Mhz enable(1)/disable(0)
+ * 2-3 reserved
+ * 4 green field enable(1)/disable(0)
+ * 5 short GI in 20 Mhz enable(1)/disable(0)
+ * 6 short GI in 40 Mhz enable(1)/disable(0)
+ * 7-15 reserved
+ */
+#define MWIFIEX_FW_DEF_HTTXCFG (BIT(1) | BIT(4) | BIT(5) | BIT(6))
+
#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
#define SETHT_MCS32(x) (x[4] |= 1)
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 7c1c5ee40eb..f6bcc868562 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -249,6 +249,7 @@ struct mwifiex_ds_hs_cfg {
};
#define DEEP_SLEEP_ON 1
+#define DEEP_SLEEP_OFF 0
#define DEEP_SLEEP_IDLE_TIME 100
#define PS_MODE_AUTO 1
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 5eab3dc29b1..644e2e405cb 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -364,10 +364,9 @@ static int mwifiex_append_rsn_ie_wpa_wpa2(struct mwifiex_private *priv,
*/
int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- void *data_buf)
+ struct mwifiex_bssdescriptor *bss_desc)
{
struct host_cmd_ds_802_11_associate *assoc = &cmd->params.associate;
- struct mwifiex_bssdescriptor *bss_desc;
struct mwifiex_ie_types_ssid_param_set *ssid_tlv;
struct mwifiex_ie_types_phy_param_set *phy_tlv;
struct mwifiex_ie_types_ss_param_set *ss_tlv;
@@ -380,7 +379,6 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
u8 *pos;
int rsn_ie_len = 0;
- bss_desc = (struct mwifiex_bssdescriptor *) data_buf;
pos = (u8 *) assoc;
mwifiex_cfg_tx_buf(priv, bss_desc);
@@ -748,7 +746,8 @@ done:
*/
int
mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd, void *data_buf)
+ struct host_cmd_ds_command *cmd,
+ struct mwifiex_802_11_ssid *req_ssid)
{
int rsn_ie_len = 0;
struct mwifiex_adapter *adapter = priv->adapter;
@@ -786,20 +785,15 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN);
- memcpy(adhoc_start->ssid,
- ((struct mwifiex_802_11_ssid *) data_buf)->ssid,
- ((struct mwifiex_802_11_ssid *) data_buf)->ssid_len);
+ memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: SSID = %s\n",
adhoc_start->ssid);
memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN);
- memcpy(bss_desc->ssid.ssid,
- ((struct mwifiex_802_11_ssid *) data_buf)->ssid,
- ((struct mwifiex_802_11_ssid *) data_buf)->ssid_len);
+ memcpy(bss_desc->ssid.ssid, req_ssid->ssid, req_ssid->ssid_len);
- bss_desc->ssid.ssid_len =
- ((struct mwifiex_802_11_ssid *) data_buf)->ssid_len;
+ bss_desc->ssid.ssid_len = req_ssid->ssid_len;
/* Set the BSS mode */
adhoc_start->bss_mode = HostCmd_BSS_MODE_IBSS;
@@ -1036,13 +1030,12 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
*/
int
mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd, void *data_buf)
+ struct host_cmd_ds_command *cmd,
+ struct mwifiex_bssdescriptor *bss_desc)
{
int rsn_ie_len = 0;
struct host_cmd_ds_802_11_ad_hoc_join *adhoc_join =
&cmd->params.adhoc_join;
- struct mwifiex_bssdescriptor *bss_desc =
- (struct mwifiex_bssdescriptor *) data_buf;
struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
u32 cmd_append_size = 0;
u16 tmp_cap;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index f0582259c93..e5fc53dc688 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -26,17 +26,12 @@
const char driver_version[] = "mwifiex " VERSION " (%s) ";
-struct mwifiex_adapter *g_adapter;
-EXPORT_SYMBOL_GPL(g_adapter);
-
static struct mwifiex_bss_attr mwifiex_bss_sta[] = {
{MWIFIEX_BSS_TYPE_STA, MWIFIEX_DATA_FRAME_TYPE_ETH_II, true, 0, 0},
};
static int drv_mode = DRV_MODE_STA;
-static char fw_name[32] = DEFAULT_FW_NAME;
-
/* Supported drv_mode table */
static struct mwifiex_drv_mode mwifiex_drv_mode_tbl[] = {
{
@@ -62,7 +57,8 @@ static struct mwifiex_drv_mode mwifiex_drv_mode_tbl[] = {
* proper cleanup before exiting.
*/
static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
- struct mwifiex_drv_mode *drv_mode_ptr)
+ struct mwifiex_drv_mode *drv_mode_ptr,
+ void **padapter)
{
struct mwifiex_adapter *adapter;
int i;
@@ -71,7 +67,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
if (!adapter)
return -ENOMEM;
- g_adapter = adapter;
+ *padapter = adapter;
adapter->card = card;
/* Save interface specific operations in adapter */
@@ -326,7 +322,7 @@ exit_main_proc:
* and initializing the private structures.
*/
static int
-mwifiex_init_sw(void *card, struct mwifiex_if_ops *if_ops)
+mwifiex_init_sw(void *card, struct mwifiex_if_ops *if_ops, void **padapter)
{
int i;
struct mwifiex_drv_mode *drv_mode_ptr;
@@ -345,7 +341,7 @@ mwifiex_init_sw(void *card, struct mwifiex_if_ops *if_ops)
return -1;
}
- if (mwifiex_register(card, if_ops, drv_mode_ptr))
+ if (mwifiex_register(card, if_ops, drv_mode_ptr, padapter))
return -1;
return 0;
@@ -384,20 +380,8 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
memset(&fw, 0, sizeof(struct mwifiex_fw_image));
- switch (adapter->revision_id) {
- case SD8787_W0:
- case SD8787_W1:
- strcpy(fw_name, SD8787_W1_FW_NAME);
- break;
- case SD8787_A0:
- case SD8787_A1:
- strcpy(fw_name, SD8787_AX_FW_NAME);
- break;
- default:
- break;
- }
-
- err = request_firmware(&adapter->firmware, fw_name, adapter->dev);
+ err = request_firmware(&adapter->firmware, adapter->fw_name,
+ adapter->dev);
if (err < 0) {
dev_err(adapter->dev, "request_firmware() returned"
" error code %#x\n", err);
@@ -569,7 +553,7 @@ static int
mwifiex_set_mac_address(struct net_device *dev, void *addr)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- struct sockaddr *hw_addr = (struct sockaddr *) addr;
+ struct sockaddr *hw_addr = addr;
int ret;
memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN);
@@ -869,13 +853,11 @@ mwifiex_add_card(void *card, struct semaphore *sem,
if (down_interruptible(sem))
goto exit_sem_err;
- if (mwifiex_init_sw(card, if_ops)) {
+ if (mwifiex_init_sw(card, if_ops, (void **)&adapter)) {
pr_err("%s: software init failed\n", __func__);
goto err_init_sw;
}
- adapter = g_adapter;
-
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
adapter->surprise_removed = false;
init_waitqueue_head(&adapter->init_wait_q);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 8316b3cd92c..2215c3c9735 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -39,7 +39,6 @@
#include "fw.h"
extern const char driver_version[];
-extern struct mwifiex_adapter *g_adapter;
enum {
MWIFIEX_ASYNC_CMD,
@@ -48,15 +47,6 @@ enum {
#define DRV_MODE_STA 0x1
-#define SD8787_W0 0x30
-#define SD8787_W1 0x31
-#define SD8787_A0 0x40
-#define SD8787_A1 0x41
-
-#define DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
-#define SD8787_W1_FW_NAME "mrvl/sd8787_uapsta_w1.bin"
-#define SD8787_AX_FW_NAME "mrvl/sd8787_uapsta.bin"
-
struct mwifiex_drv_mode {
u16 drv_mode;
u16 intf_num;
@@ -190,6 +180,7 @@ struct mwifiex_ra_list_tbl {
struct sk_buff_head skb_head;
u8 ra[ETH_ALEN];
u32 total_pkts_size;
+ u32 total_pkts;
u32 is_11n_enabled;
};
@@ -576,10 +567,10 @@ struct mwifiex_adapter {
u8 priv_num;
struct mwifiex_drv_mode *drv_mode;
const struct firmware *firmware;
+ char fw_name[32];
struct device *dev;
bool surprise_removed;
u32 fw_release_number;
- u32 revision_id;
u16 init_wait_q_woken;
wait_queue_head_t init_wait_q;
void *card;
@@ -745,10 +736,10 @@ void mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *, u8 *,
int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
u16 cmd_action, uint16_t ps_bitmap,
- void *data_buf);
+ struct mwifiex_ds_auto_ds *auto_ds);
int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp,
- void *data_buf);
+ struct mwifiex_ds_pm_cfg *pm_cfg);
void mwifiex_process_hs_config(struct mwifiex_adapter *adapter);
void mwifiex_hs_activated_event(struct mwifiex_private *priv,
u8 activated);
@@ -760,7 +751,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *, uint16_t cmd_no,
u16 cmd_action, u32 cmd_oid,
void *data_buf, void *cmd_buf);
int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
- void *cmd_buf);
+ struct host_cmd_ds_command *resp);
int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *,
struct sk_buff *skb);
int mwifiex_process_sta_event(struct mwifiex_private *);
@@ -769,7 +760,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *, u8 first_sta);
int mwifiex_scan_networks(struct mwifiex_private *priv,
const struct mwifiex_user_scan_cfg *user_scan_in);
int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
- void *data_buf);
+ struct mwifiex_scan_cmd_config *scan_cfg);
void mwifiex_queue_scan_cmd(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node);
int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
@@ -786,8 +777,8 @@ s32 mwifiex_ssid_cmp(struct mwifiex_802_11_ssid *ssid1,
int mwifiex_associate(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
- struct host_cmd_ds_command
- *cmd, void *data_buf);
+ struct host_cmd_ds_command *cmd,
+ struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
void mwifiex_reset_connect_state(struct mwifiex_private *priv);
@@ -800,10 +791,10 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- void *data_buf);
+ struct mwifiex_802_11_ssid *req_ssid);
int mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- void *data_buf);
+ struct mwifiex_bssdescriptor *bss_desc);
int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
int mwifiex_cmd_802_11_bg_scan_query(struct host_cmd_ds_command *cmd);
@@ -938,6 +929,7 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv,
struct mwifiex_ds_hs_cfg *hscfg);
int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
int mwifiex_enable_hs(struct mwifiex_adapter *adapter);
+int mwifiex_disable_auto_ds(struct mwifiex_private *priv);
int mwifiex_get_signal_info(struct mwifiex_private *priv,
struct mwifiex_ds_get_signal *signal);
int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 5c22860fb40..6f88c8ab5de 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -2357,12 +2357,10 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
* - Setting command ID, and proper size
* - Ensuring correct endian-ness
*/
-int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd, void *data_buf)
+int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd,
+ struct mwifiex_scan_cmd_config *scan_cfg)
{
struct host_cmd_ds_802_11_scan *scan_cmd = &cmd->params.scan;
- struct mwifiex_scan_cmd_config *scan_cfg;
-
- scan_cfg = (struct mwifiex_scan_cmd_config *) data_buf;
/* Set fixed field variables in scan command */
scan_cmd->bss_mode = scan_cfg->bss_mode;
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index d425dbd91d1..82098ac483b 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -31,10 +31,27 @@
#define SDIO_VERSION "1.0"
+/* The mwifiex_sdio_remove() callback function is called when
+ * user removes this module from kernel space or ejects
+ * the card from the slot. The driver handles these 2 cases
+ * differently.
+ * If the user is removing the module, the few commands (FUNC_SHUTDOWN,
+ * HS_CANCEL etc.) are sent to the firmware.
+ * If the card is removed, there is no need to send these command.
+ *
+ * The variable 'user_rmmod' is used to distinguish these two
+ * scenarios. This flag is initialized as FALSE in case the card
+ * is removed, and will be set to TRUE for module removal when
+ * module_exit function is called.
+ */
+static u8 user_rmmod;
+
static struct mwifiex_if_ops sdio_ops;
static struct semaphore add_remove_card_sem;
+static int mwifiex_sdio_resume(struct device *dev);
+
/*
* SDIO probe.
*
@@ -93,17 +110,39 @@ static void
mwifiex_sdio_remove(struct sdio_func *func)
{
struct sdio_mmc_card *card;
+ struct mwifiex_adapter *adapter;
+ int i;
pr_debug("info: SDIO func num=%d\n", func->num);
- if (func) {
- card = sdio_get_drvdata(func);
- if (card) {
- mwifiex_remove_card(card->adapter,
- &add_remove_card_sem);
- kfree(card);
- }
+ card = sdio_get_drvdata(func);
+ if (!card)
+ return;
+
+ adapter = card->adapter;
+ if (!adapter || !adapter->priv_num)
+ return;
+
+ if (user_rmmod) {
+ if (adapter->is_suspended)
+ mwifiex_sdio_resume(adapter->dev);
+
+ for (i = 0; i < adapter->priv_num; i++)
+ if ((GET_BSS_ROLE(adapter->priv[i]) ==
+ MWIFIEX_BSS_ROLE_STA) &&
+ adapter->priv[i]->media_connected)
+ mwifiex_deauthenticate(adapter->priv[i], NULL);
+
+ mwifiex_disable_auto_ds(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY));
+
+ mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_FUNC_SHUTDOWN);
}
+
+ mwifiex_remove_card(card->adapter, &add_remove_card_sem);
+ kfree(card);
}
/*
@@ -1283,7 +1322,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
if (!(card->mp_wr_bitmap &
(1 << card->curr_wr_port))
|| !MP_TX_AGGR_BUF_HAS_ROOM(
- card, next_pkt_len))
+ card, pkt_len + next_pkt_len))
f_send_aggr_buf = 1;
} else {
/* No room in Aggr buf, send it */
@@ -1531,6 +1570,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
sdio_set_drvdata(func, card);
adapter->dev = &func->dev;
+ strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
return 0;
@@ -1552,7 +1592,6 @@ disable_func:
* the first interrupt got from bootloader
* - Disable host interrupt mask register
* - Get SDIO port
- * - Get revision ID
* - Initialize SDIO variables in card
* - Allocate MP registers
* - Allocate MPA Tx and Rx buffers
@@ -1576,10 +1615,6 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
/* Get SDIO ioport */
mwifiex_init_sdio_ioport(adapter);
- /* Get revision ID */
-#define REV_ID_REG 0x5c
- mwifiex_read_reg(adapter, REV_ID_REG, &adapter->revision_id);
-
/* Initialize SDIO variables in card */
card->mp_rd_bitmap = 0;
card->mp_wr_bitmap = 0;
@@ -1700,6 +1735,9 @@ mwifiex_sdio_init_module(void)
{
sema_init(&add_remove_card_sem, 1);
+ /* Clear the flag in case user removes the card. */
+ user_rmmod = 0;
+
return sdio_register_driver(&mwifiex_sdio);
}
@@ -1715,32 +1753,12 @@ mwifiex_sdio_init_module(void)
static void
mwifiex_sdio_cleanup_module(void)
{
- struct mwifiex_adapter *adapter = g_adapter;
- int i;
-
- if (down_interruptible(&add_remove_card_sem))
- goto exit_sem_err;
-
- if (!adapter || !adapter->priv_num)
- goto exit;
-
- if (adapter->is_suspended)
- mwifiex_sdio_resume(adapter->dev);
-
- for (i = 0; i < adapter->priv_num; i++)
- if ((GET_BSS_ROLE(adapter->priv[i]) == MWIFIEX_BSS_ROLE_STA) &&
- adapter->priv[i]->media_connected)
- mwifiex_deauthenticate(adapter->priv[i], NULL);
-
- if (!adapter->surprise_removed)
- mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY),
- MWIFIEX_FUNC_SHUTDOWN);
+ if (!down_interruptible(&add_remove_card_sem))
+ up(&add_remove_card_sem);
-exit:
- up(&add_remove_card_sem);
+ /* Set the flag as user is removing this module. */
+ user_rmmod = 1;
-exit_sem_err:
sdio_unregister_driver(&mwifiex_sdio);
}
@@ -1751,4 +1769,4 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
MODULE_VERSION(SDIO_VERSION);
MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("sd8787.bin");
+MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 4e97e90aa39..524f78f4ee6 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -28,6 +28,8 @@
#include "main.h"
+#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
+
#define BLOCK_MODE 1
#define BYTE_MODE 0
@@ -52,10 +54,10 @@
#define SDIO_MP_AGGR_DEF_PKT_LIMIT 8
-#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (4096) /* 4K */
+#define SDIO_MP_TX_AGGR_DEF_BUF_SIZE (8192) /* 8K */
/* Multi port RX aggregation buffer size */
-#define SDIO_MP_RX_AGGR_DEF_BUF_SIZE (4096) /* 4K */
+#define SDIO_MP_RX_AGGR_DEF_BUF_SIZE (16384) /* 16K */
/* Misc. Config Register : Auto Re-enable interrupts */
#define AUTO_RE_ENABLE_INT BIT(4)
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 8af3a78d272..c54ee287b87 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -67,10 +67,9 @@ mwifiex_cmd_802_11_rssi_info(struct mwifiex_private *priv,
*/
static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- u16 cmd_action, void *data_buf)
+ u16 cmd_action, u16 *action)
{
struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl;
- u16 action = *((u16 *) data_buf);
if (cmd_action != HostCmd_ACT_GEN_SET) {
dev_err(priv->adapter->dev,
@@ -81,7 +80,7 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
cmd->command = cpu_to_le16(HostCmd_CMD_MAC_CONTROL);
cmd->size =
cpu_to_le16(sizeof(struct host_cmd_ds_mac_control) + S_DS_GEN);
- mac_ctrl->action = cpu_to_le16(action);
+ mac_ctrl->action = cpu_to_le16(*action);
return 0;
}
@@ -104,10 +103,9 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
u16 cmd_action, u32 cmd_oid,
- void *data_buf)
+ u32 *ul_temp)
{
struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib;
- u32 ul_temp;
dev_dbg(priv->adapter->dev, "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SNMP_MIB);
@@ -127,9 +125,8 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
if (cmd_action == HostCmd_ACT_GEN_SET) {
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
- ul_temp = *((u32 *) data_buf);
*((__le16 *) (snmp_mib->value)) =
- cpu_to_le16((u16) ul_temp);
+ cpu_to_le16((u16) *ul_temp);
cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
+ sizeof(u16));
}
@@ -139,9 +136,8 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
if (cmd_action == HostCmd_ACT_GEN_SET) {
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
- ul_temp = *((u32 *) data_buf);
*(__le16 *) (snmp_mib->value) =
- cpu_to_le16((u16) ul_temp);
+ cpu_to_le16((u16) *ul_temp);
cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
+ sizeof(u16));
}
@@ -152,9 +148,8 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
if (cmd_action == HostCmd_ACT_GEN_SET) {
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
- ul_temp = (*(u32 *) data_buf);
*((__le16 *) (snmp_mib->value)) =
- cpu_to_le16((u16) ul_temp);
+ cpu_to_le16((u16) *ul_temp);
cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
+ sizeof(u16));
}
@@ -164,9 +159,8 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
if (cmd_action == HostCmd_ACT_GEN_SET) {
snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
- ul_temp = *(u32 *) data_buf;
*((__le16 *) (snmp_mib->value)) =
- cpu_to_le16((u16) ul_temp);
+ cpu_to_le16((u16) *ul_temp);
cmd->size = cpu_to_le16(le16_to_cpu(cmd->size)
+ sizeof(u16));
}
@@ -209,13 +203,11 @@ mwifiex_cmd_802_11_get_log(struct host_cmd_ds_command *cmd)
*/
static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- u16 cmd_action, void *data_buf)
+ u16 cmd_action, u16 *pbitmap_rates)
{
struct host_cmd_ds_tx_rate_cfg *rate_cfg = &cmd->params.tx_rate_cfg;
struct mwifiex_rate_scope *rate_scope;
struct mwifiex_rate_drop_pattern *rate_drop;
- u16 *pbitmap_rates = (u16 *) data_buf;
-
u32 i;
cmd->command = cpu_to_le16(HostCmd_CMD_TX_RATE_CFG);
@@ -272,10 +264,10 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
* - Ensuring correct endian-ness
*/
static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd,
- u16 cmd_action, void *data_buf)
+ u16 cmd_action,
+ struct host_cmd_ds_txpwr_cfg *txp)
{
struct mwifiex_types_power_group *pg_tlv;
- struct host_cmd_ds_txpwr_cfg *txp;
struct host_cmd_ds_txpwr_cfg *cmd_txp_cfg = &cmd->params.txp_cfg;
cmd->command = cpu_to_le16(HostCmd_CMD_TXPWR_CFG);
@@ -283,12 +275,11 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd,
cpu_to_le16(S_DS_GEN + sizeof(struct host_cmd_ds_txpwr_cfg));
switch (cmd_action) {
case HostCmd_ACT_GEN_SET:
- txp = (struct host_cmd_ds_txpwr_cfg *) data_buf;
if (txp->mode) {
pg_tlv = (struct mwifiex_types_power_group
- *) ((unsigned long) data_buf +
+ *) ((unsigned long) txp +
sizeof(struct host_cmd_ds_txpwr_cfg));
- memmove(cmd_txp_cfg, data_buf,
+ memmove(cmd_txp_cfg, txp,
sizeof(struct host_cmd_ds_txpwr_cfg) +
sizeof(struct mwifiex_types_power_group) +
pg_tlv->length);
@@ -300,8 +291,7 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd,
sizeof(struct mwifiex_types_power_group) +
pg_tlv->length);
} else {
- memmove(cmd_txp_cfg, data_buf,
- sizeof(struct host_cmd_ds_txpwr_cfg));
+ memmove(cmd_txp_cfg, txp, sizeof(*txp));
}
cmd_txp_cfg->action = cpu_to_le16(cmd_action);
break;
@@ -322,22 +312,23 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd,
* (as required)
* - Ensuring correct endian-ness
*/
-static int mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd,
- u16 cmd_action,
- struct mwifiex_hs_config_param *data_buf)
+static int
+mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action,
+ struct mwifiex_hs_config_param *hscfg_param)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg;
u16 hs_activate = false;
- if (data_buf == NULL)
+ if (!hscfg_param)
/* New Activate command */
hs_activate = true;
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH);
if (!hs_activate &&
- (data_buf->conditions
+ (hscfg_param->conditions
!= cpu_to_le32(HOST_SLEEP_CFG_CANCEL))
&& ((adapter->arp_filter_size > 0)
&& (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
@@ -359,9 +350,9 @@ static int mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
hs_cfg->params.hs_activate.resp_ctrl = RESP_NEEDED;
} else {
hs_cfg->action = cpu_to_le16(HS_CONFIGURE);
- hs_cfg->params.hs_config.conditions = data_buf->conditions;
- hs_cfg->params.hs_config.gpio = data_buf->gpio;
- hs_cfg->params.hs_config.gap = data_buf->gap;
+ hs_cfg->params.hs_config.conditions = hscfg_param->conditions;
+ hs_cfg->params.hs_config.gpio = hscfg_param->gpio;
+ hs_cfg->params.hs_config.gap = hscfg_param->gap;
dev_dbg(adapter->dev,
"cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
hs_cfg->params.hs_config.conditions,
@@ -405,11 +396,11 @@ static int mwifiex_cmd_802_11_mac_address(struct mwifiex_private *priv,
* - Setting MAC multicast address
* - Ensuring correct endian-ness
*/
-static int mwifiex_cmd_mac_multicast_adr(struct host_cmd_ds_command *cmd,
- u16 cmd_action, void *data_buf)
+static int
+mwifiex_cmd_mac_multicast_adr(struct host_cmd_ds_command *cmd,
+ u16 cmd_action,
+ struct mwifiex_multicast_list *mcast_list)
{
- struct mwifiex_multicast_list *mcast_list =
- (struct mwifiex_multicast_list *) data_buf;
struct host_cmd_ds_mac_multicast_adr *mcast_addr = &cmd->params.mc_addr;
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_mac_multicast_adr) +
@@ -435,7 +426,7 @@ static int mwifiex_cmd_mac_multicast_adr(struct host_cmd_ds_command *cmd,
*/
static int mwifiex_cmd_802_11_deauthenticate(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- void *data_buf)
+ u8 *mac)
{
struct host_cmd_ds_802_11_deauthenticate *deauth = &cmd->params.deauth;
@@ -444,7 +435,7 @@ static int mwifiex_cmd_802_11_deauthenticate(struct mwifiex_private *priv,
+ S_DS_GEN);
/* Set AP MAC address */
- memcpy(deauth->mac_addr, (u8 *) data_buf, ETH_ALEN);
+ memcpy(deauth->mac_addr, mac, ETH_ALEN);
dev_dbg(priv->adapter->dev, "cmd: Deauth: %pM\n", deauth->mac_addr);
@@ -543,15 +534,14 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
* encryption (TKIP, AES) (as required)
* - Ensuring correct endian-ness
*/
-static int mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd,
- u16 cmd_action,
- u32 cmd_oid, void *data_buf)
+static int
+mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
{
struct host_cmd_ds_802_11_key_material *key_material =
&cmd->params.key_material;
- struct mwifiex_ds_encrypt_key *enc_key =
- (struct mwifiex_ds_encrypt_key *) data_buf;
u16 key_param_len = 0;
int ret = 0;
const u8 bc_mac[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -741,7 +731,7 @@ static int mwifiex_cmd_802_11d_domain_info(struct mwifiex_private *priv,
*/
static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
- u16 cmd_action, void *data_buf)
+ u16 cmd_action, u16 *channel)
{
struct host_cmd_ds_802_11_rf_channel *rf_chan =
&cmd->params.rf_channel;
@@ -759,7 +749,7 @@ static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv,
rf_type = le16_to_cpu(rf_chan->rf_type);
SET_SECONDARYCHAN(rf_type, priv->adapter->chan_offset);
- rf_chan->current_channel = cpu_to_le16(*((u16 *) data_buf));
+ rf_chan->current_channel = cpu_to_le16(*channel);
}
rf_chan->action = cpu_to_le16(cmd_action);
return 0;
@@ -774,11 +764,10 @@ static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv,
* - Ensuring correct endian-ness
*/
static int mwifiex_cmd_ibss_coalescing_status(struct host_cmd_ds_command *cmd,
- u16 cmd_action, void *data_buf)
+ u16 cmd_action, u16 *enable)
{
struct host_cmd_ds_802_11_ibss_status *ibss_coal =
&(cmd->params.ibss_coalescing);
- u16 enable = 0;
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_IBSS_COALESCING_STATUS);
cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_ibss_status) +
@@ -788,9 +777,10 @@ static int mwifiex_cmd_ibss_coalescing_status(struct host_cmd_ds_command *cmd,
switch (cmd_action) {
case HostCmd_ACT_GEN_SET:
- if (data_buf != NULL)
- enable = *(u16 *) data_buf;
- ibss_coal->enable = cpu_to_le16(enable);
+ if (enable)
+ ibss_coal->enable = cpu_to_le16(*enable);
+ else
+ ibss_coal->enable = 0;
break;
/* In other case.. Nothing to do */
@@ -822,9 +812,8 @@ static int mwifiex_cmd_ibss_coalescing_status(struct host_cmd_ds_command *cmd,
static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
u16 cmd_action, void *data_buf)
{
- struct mwifiex_ds_reg_rw *reg_rw;
+ struct mwifiex_ds_reg_rw *reg_rw = data_buf;
- reg_rw = (struct mwifiex_ds_reg_rw *) data_buf;
switch (le16_to_cpu(cmd->command)) {
case HostCmd_CMD_MAC_REG_ACCESS:
{
@@ -893,8 +882,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
}
case HostCmd_CMD_802_11_EEPROM_ACCESS:
{
- struct mwifiex_ds_read_eeprom *rd_eeprom =
- (struct mwifiex_ds_read_eeprom *) data_buf;
+ struct mwifiex_ds_read_eeprom *rd_eeprom = data_buf;
struct host_cmd_ds_802_11_eeprom_access *cmd_eeprom =
(struct host_cmd_ds_802_11_eeprom_access *)
&cmd->params.eeprom;
@@ -923,8 +911,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
u16 cmd_action, u32 cmd_oid,
void *data_buf, void *cmd_buf)
{
- struct host_cmd_ds_command *cmd_ptr =
- (struct host_cmd_ds_command *) cmd_buf;
+ struct host_cmd_ds_command *cmd_ptr = cmd_buf;
int ret = 0;
/* Prepare command */
@@ -1126,6 +1113,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl;
struct mwifiex_ds_auto_ds auto_ds;
enum state_11d_t state_11d;
+ struct mwifiex_ds_11n_tx_cfg tx_cfg;
if (first_sta) {
@@ -1181,7 +1169,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
/* Send request to firmware */
ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
HostCmd_ACT_GEN_SET, 0,
- (void *) &amsdu_aggr_ctrl);
+ &amsdu_aggr_ctrl);
if (ret)
return -1;
/* MAC Control must be the last command in init_fw */
@@ -1211,8 +1199,15 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
if (ret)
dev_err(priv->adapter->dev, "11D: failed to enable 11D\n");
+ /* Send cmd to FW to configure 11n specific configuration
+ * (Short GI, Channel BW, Green field support etc.) for transmit
+ */
+ tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG;
+ ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_CFG,
+ HostCmd_ACT_GEN_SET, 0, &tx_cfg);
+
/* set last_init_cmd */
- priv->adapter->last_init_cmd = HostCmd_CMD_802_11_SNMP_MIB;
+ priv->adapter->last_init_cmd = HostCmd_CMD_11N_CFG;
ret = -EINPROGRESS;
return ret;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index d08f76429a0..6804239d87b 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -120,11 +120,10 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
*/
static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp,
- void *data_buf)
+ struct mwifiex_ds_get_signal *signal)
{
struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
&resp->params.rssi_info_rsp;
- struct mwifiex_ds_get_signal *signal;
priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
@@ -139,9 +138,8 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
priv->bcn_nf_avg = le16_to_cpu(rssi_info_rsp->bcn_nf_avg);
/* Need to indicate IOCTL complete */
- if (data_buf) {
- signal = (struct mwifiex_ds_get_signal *) data_buf;
- memset(signal, 0, sizeof(struct mwifiex_ds_get_signal));
+ if (signal) {
+ memset(signal, 0, sizeof(*signal));
signal->selector = ALL_RSSI_INFO_MASK;
@@ -185,7 +183,7 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
*/
static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp,
- void *data_buf)
+ u32 *data_buf)
{
struct host_cmd_ds_802_11_snmp_mib *smib = &resp->params.smib;
u16 oid = le16_to_cpu(smib->oid);
@@ -198,7 +196,7 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
if (query_type == HostCmd_ACT_GEN_GET) {
ul_temp = le16_to_cpu(*((__le16 *) (smib->value)));
if (data_buf)
- *(u32 *)data_buf = ul_temp;
+ *data_buf = ul_temp;
switch (oid) {
case FRAG_THRESH_I:
dev_dbg(priv->adapter->dev,
@@ -228,14 +226,12 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
*/
static int mwifiex_ret_get_log(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp,
- void *data_buf)
+ struct mwifiex_ds_get_stats *stats)
{
struct host_cmd_ds_802_11_get_log *get_log =
(struct host_cmd_ds_802_11_get_log *) &resp->params.get_log;
- struct mwifiex_ds_get_stats *stats;
- if (data_buf) {
- stats = (struct mwifiex_ds_get_stats *) data_buf;
+ if (stats) {
stats->mcast_tx_frame = le32_to_cpu(get_log->mcast_tx_frame);
stats->failed = le32_to_cpu(get_log->failed);
stats->retry = le32_to_cpu(get_log->retry);
@@ -278,9 +274,8 @@ static int mwifiex_ret_get_log(struct mwifiex_private *priv,
*/
static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp,
- void *data_buf)
+ struct mwifiex_rate_cfg *ds_rate)
{
- struct mwifiex_rate_cfg *ds_rate;
struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg;
struct mwifiex_rate_scope *rate_scope;
struct mwifiex_ie_types_header *head;
@@ -329,8 +324,7 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
HostCmd_CMD_802_11_TX_RATE_QUERY,
HostCmd_ACT_GEN_GET, 0, NULL);
- if (data_buf) {
- ds_rate = (struct mwifiex_rate_cfg *) data_buf;
+ if (ds_rate) {
if (le16_to_cpu(rate_cfg->action) == HostCmd_ACT_GEN_GET) {
if (priv->is_data_rate_auto) {
ds_rate->is_rate_auto = 1;
@@ -413,8 +407,7 @@ static int mwifiex_get_power_level(struct mwifiex_private *priv, void *data_buf)
* and saving the current Tx power level in driver.
*/
static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
- struct host_cmd_ds_command *resp,
- void *data_buf)
+ struct host_cmd_ds_command *resp)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_txpwr_cfg *txp_cfg = &resp->params.txp_cfg;
@@ -631,7 +624,7 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
*/
static int mwifiex_ret_802_11_rf_channel(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp,
- void *data_buf)
+ u16 *data_buf)
{
struct host_cmd_ds_802_11_rf_channel *rf_channel =
&resp->params.rf_channel;
@@ -644,8 +637,9 @@ static int mwifiex_ret_802_11_rf_channel(struct mwifiex_private *priv,
/* Update the channel again */
priv->curr_bss_params.bss_descriptor.channel = new_channel;
}
+
if (data_buf)
- *((u16 *)data_buf) = new_channel;
+ *data_buf = new_channel;
return 0;
}
@@ -658,13 +652,11 @@ static int mwifiex_ret_802_11_rf_channel(struct mwifiex_private *priv,
*/
static int mwifiex_ret_ver_ext(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp,
- void *data_buf)
+ struct host_cmd_ds_version_ext *version_ext)
{
struct host_cmd_ds_version_ext *ver_ext = &resp->params.verext;
- struct host_cmd_ds_version_ext *version_ext;
- if (data_buf) {
- version_ext = (struct host_cmd_ds_version_ext *)data_buf;
+ if (version_ext) {
version_ext->version_str_sel = ver_ext->version_str_sel;
memcpy(version_ext->version_str, ver_ext->version_str,
sizeof(char) * 128);
@@ -686,8 +678,8 @@ static int mwifiex_ret_reg_access(u16 type, struct host_cmd_ds_command *resp,
struct mwifiex_ds_read_eeprom *eeprom;
if (data_buf) {
- reg_rw = (struct mwifiex_ds_reg_rw *) data_buf;
- eeprom = (struct mwifiex_ds_read_eeprom *) data_buf;
+ reg_rw = data_buf;
+ eeprom = data_buf;
switch (type) {
case HostCmd_CMD_MAC_REG_ACCESS:
{
@@ -825,13 +817,11 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
* This is a generic function, which calls command specific
* response handlers based on the command ID.
*/
-int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv,
- u16 cmdresp_no, void *cmd_buf)
+int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
+ struct host_cmd_ds_command *resp)
{
int ret = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- struct host_cmd_ds_command *resp =
- (struct host_cmd_ds_command *) cmd_buf;
void *data_buf = adapter->curr_cmd->data_buf;
/* If the command is not successful, cleanup and return failure */
@@ -865,7 +855,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv,
"info: CMD_RESP: BG_SCAN result is ready!\n");
break;
case HostCmd_CMD_TXPWR_CFG:
- ret = mwifiex_ret_tx_power_cfg(priv, resp, data_buf);
+ ret = mwifiex_ret_tx_power_cfg(priv, resp);
break;
case HostCmd_CMD_802_11_PS_MODE_ENH:
ret = mwifiex_ret_enh_power_mode(priv, resp, data_buf);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index d05907d0503..c34ff8c4f4f 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -487,6 +487,20 @@ int mwifiex_set_radio_band_cfg(struct mwifiex_private *priv,
}
/*
+ * The function disables auto deep sleep mode.
+ */
+int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
+{
+ struct mwifiex_ds_auto_ds auto_ds;
+
+ auto_ds.auto_ds = DEEP_SLEEP_OFF;
+
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+ DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds);
+}
+EXPORT_SYMBOL_GPL(mwifiex_disable_auto_ds);
+
+/*
* IOCTL request handler to set/get active channel.
*
* This function performs validity checking on channel/frequency
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 1fdddece747..27430512f7c 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -187,7 +187,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
ret = mwifiex_11n_rx_reorder_pkt(priv, local_rx_pd->seq_num,
local_rx_pd->priority, ta,
(u8) local_rx_pd->rx_pkt_type,
- (void *) skb);
+ skb);
if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
if (priv && (ret == -1))
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index fa6221bc910..1822bfad889 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -47,6 +47,7 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct txpd *local_tx_pd;
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
+ u8 pad;
if (!skb->len) {
dev_err(adapter->dev, "Tx: bad packet length: %d\n",
@@ -55,15 +56,19 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
return skb->data;
}
- BUG_ON(skb_headroom(skb) < (sizeof(*local_tx_pd) + INTF_HEADER_LEN));
- skb_push(skb, sizeof(*local_tx_pd));
+ /* If skb->data is not aligned; add padding */
+ pad = (4 - (((void *)skb->data - NULL) & 0x3)) % 4;
+
+ BUG_ON(skb_headroom(skb) < (sizeof(*local_tx_pd) + INTF_HEADER_LEN
+ + pad));
+ skb_push(skb, sizeof(*local_tx_pd) + pad);
local_tx_pd = (struct txpd *) skb->data;
memset(local_tx_pd, 0, sizeof(struct txpd));
local_tx_pd->bss_num = priv->bss_num;
local_tx_pd->bss_type = priv->bss_type;
local_tx_pd->tx_pkt_length = cpu_to_le16((u16) (skb->len -
- sizeof(struct txpd)));
+ (sizeof(struct txpd) + pad)));
local_tx_pd->priority = (u8) skb->priority;
local_tx_pd->pkt_delay_2ms =
@@ -88,7 +93,7 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
}
/* Offset of actual data */
- local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
+ local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) + pad);
/* make space for INTF_HEADER_LEN */
skb_push(skb, INTF_HEADER_LEN);
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index aaa50c07419..6190b2fa57a 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -71,7 +71,7 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
u8 *head_ptr;
struct txpd *local_tx_pd = NULL;
- head_ptr = (u8 *) mwifiex_process_sta_txpd(priv, skb);
+ head_ptr = mwifiex_process_sta_txpd(priv, skb);
if (head_ptr) {
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
local_tx_pd =
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 91634daec30..69e260b4171 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -121,6 +121,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
memcpy(ra_list->ra, ra, ETH_ALEN);
ra_list->total_pkts_size = 0;
+ ra_list->total_pkts = 0;
dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
@@ -633,6 +634,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
ra_list = NULL;
} else {
memcpy(ra, skb->data, ETH_ALEN);
+ if (ra[0] & 0x01)
+ memset(ra, 0xff, ETH_ALEN);
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
}
@@ -645,6 +648,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter,
skb_queue_tail(&ra_list->skb_head, skb);
ra_list->total_pkts_size += skb->len;
+ ra_list->total_pkts++;
atomic_inc(&priv->wmm.tx_pkts_queued);
@@ -971,28 +975,6 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
}
/*
- * This function gets the number of packets in the Tx queue of a
- * particular RA list.
- */
-static int
-mwifiex_num_pkts_in_txq(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ptr, int max_buf_size)
-{
- int count = 0, total_size = 0;
- struct sk_buff *skb, *tmp;
-
- skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
- total_size += skb->len;
- if (total_size < max_buf_size)
- ++count;
- else
- break;
- }
-
- return count;
-}
-
-/*
* This function sends a single packet to firmware for transmission.
*/
static void
@@ -1019,6 +1001,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
ptr->total_pkts_size -= skb->len;
+ ptr->total_pkts--;
if (!skb_queue_empty(&ptr->skb_head))
skb_next = skb_peek(&ptr->skb_head);
@@ -1044,6 +1027,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
skb_queue_tail(&ptr->skb_head, skb);
ptr->total_pkts_size += skb->len;
+ ptr->total_pkts++;
tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
@@ -1231,9 +1215,9 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
}
/* Minimum number of AMSDU */
#define MIN_NUM_AMSDU 2
+
if (mwifiex_is_amsdu_allowed(priv, tid) &&
- (mwifiex_num_pkts_in_txq(priv, ptr, adapter->tx_buf_size) >=
- MIN_NUM_AMSDU))
+ (ptr->total_pkts >= MIN_NUM_AMSDU))
mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
ptr_index, flags);
/* ra_list_spinlock has been freed in
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index aeac3cc4dbe..da36dbf8d87 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -10,6 +10,7 @@
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -1891,9 +1892,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
txpriority = index;
- if (ieee80211_is_data_qos(wh->frame_control) &&
- skb->protocol != cpu_to_be16(ETH_P_PAE) &&
- sta->ht_cap.ht_supported && priv->ap_fw) {
+ if (priv->ap_fw && sta && sta->ht_cap.ht_supported
+ && skb->protocol != cpu_to_be16(ETH_P_PAE)
+ && ieee80211_is_data_qos(wh->frame_control)) {
tid = qos & 0xf;
mwl8k_tx_count_packet(sta, tid);
spin_lock(&priv->stream_lock);
diff --git a/drivers/net/wireless/orinoco/airport.c b/drivers/net/wireless/orinoco/airport.c
index 4a0a0e5265c..0ca8b1455cd 100644
--- a/drivers/net/wireless/orinoco/airport.c
+++ b/drivers/net/wireless/orinoco/airport.c
@@ -150,7 +150,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
struct orinoco_private *priv;
struct airport *card;
unsigned long phys_addr;
- hermes_t *hw;
+ struct hermes *hw;
if (macio_resource_count(mdev) < 1 || macio_irq_count(mdev) < 1) {
printk(KERN_ERR PFX "Wrong interrupt/addresses in OF tree\n");
@@ -228,10 +228,9 @@ MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("Driver for the Apple Airport wireless card.");
MODULE_LICENSE("Dual MPL/GPL");
-static struct of_device_id airport_match[] =
-{
+static struct of_device_id airport_match[] = {
{
- .name = "radio",
+ .name = "radio",
},
{},
};
@@ -240,7 +239,7 @@ MODULE_DEVICE_TABLE(of, airport_match);
static struct macio_driver airport_driver = {
.driver = {
- .name = DRIVER_NAME,
+ .name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = airport_match,
},
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 736bbb9bd1d..f7b15b8934f 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -59,7 +59,7 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
for (i = 0; i < NUM_CHANNELS; i++) {
if (priv->channel_mask & (1 << i)) {
priv->channels[i].center_freq =
- ieee80211_dsss_chan_to_freq(i+1);
+ ieee80211_dsss_chan_to_freq(i + 1);
channels++;
}
}
@@ -182,7 +182,7 @@ static int orinoco_set_channel(struct wiphy *wiphy,
channel = ieee80211_freq_to_dsss_chan(chan->center_freq);
if ((channel < 1) || (channel > NUM_CHANNELS) ||
- !(priv->channel_mask & (1 << (channel-1))))
+ !(priv->channel_mask & (1 << (channel - 1))))
return -EINVAL;
if (orinoco_lock(priv, &flags) != 0)
@@ -191,7 +191,7 @@ static int orinoco_set_channel(struct wiphy *wiphy,
priv->channel = channel;
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Fast channel change - no commit if successful */
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_SET_CHANNEL,
channel, NULL);
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 259d7585398..527cf5333db 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -100,7 +100,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
/* Plug Data Area (PDA) */
__le16 *pda;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
const struct firmware *fw_entry;
const struct orinoco_fw_header *hdr;
const unsigned char *first_block;
@@ -205,7 +205,7 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
const unsigned char *image, const void *end,
int secondary)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int ret = 0;
const unsigned char *ptr;
const unsigned char *first_block;
@@ -322,9 +322,8 @@ symbol_dl_firmware(struct orinoco_private *priv,
fw_entry->data + fw_entry->size, 1);
if (!orinoco_cached_fw_get(priv, false))
release_firmware(fw_entry);
- if (ret) {
+ if (ret)
dev_err(dev, "Secondary firmware download failed\n");
- }
return ret;
}
diff --git a/drivers/net/wireless/orinoco/fw.h b/drivers/net/wireless/orinoco/fw.h
index 89fc26d25b0..aca63e3c4b5 100644
--- a/drivers/net/wireless/orinoco/fw.h
+++ b/drivers/net/wireless/orinoco/fw.h
@@ -14,7 +14,7 @@ int orinoco_download(struct orinoco_private *priv);
void orinoco_cache_fw(struct orinoco_private *priv, int ap);
void orinoco_uncache_fw(struct orinoco_private *priv);
#else
-#define orinoco_cache_fw(priv, ap) do { } while(0)
+#define orinoco_cache_fw(priv, ap) do { } while (0)
#define orinoco_uncache_fw(priv) do { } while (0)
#endif
diff --git a/drivers/net/wireless/orinoco/hermes.c b/drivers/net/wireless/orinoco/hermes.c
index 6c6a23e08df..75c15bc7b34 100644
--- a/drivers/net/wireless/orinoco/hermes.c
+++ b/drivers/net/wireless/orinoco/hermes.c
@@ -103,7 +103,7 @@ static const struct hermes_ops hermes_ops_local;
Callable from any context.
*/
-static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0,
+static int hermes_issue_cmd(struct hermes *hw, u16 cmd, u16 param0,
u16 param1, u16 param2)
{
int k = CMD_BUSY_TIMEOUT;
@@ -132,7 +132,7 @@ static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0,
*/
/* For doing cmds that wipe the magic constant in SWSUPPORT0 */
-static int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
+static int hermes_doicmd_wait(struct hermes *hw, u16 cmd,
u16 parm0, u16 parm1, u16 parm2,
struct hermes_response *resp)
{
@@ -185,7 +185,8 @@ out:
return err;
}
-void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
+void hermes_struct_init(struct hermes *hw, void __iomem *address,
+ int reg_spacing)
{
hw->iobase = address;
hw->reg_spacing = reg_spacing;
@@ -195,7 +196,7 @@ void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
}
EXPORT_SYMBOL(hermes_struct_init);
-static int hermes_init(hermes_t *hw)
+static int hermes_init(struct hermes *hw)
{
u16 reg;
int err = 0;
@@ -249,7 +250,7 @@ static int hermes_init(hermes_t *hw)
* > 0 on error returned by the firmware
*
* Callable from any context, but locking is your problem. */
-static int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+static int hermes_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0,
struct hermes_response *resp)
{
int err;
@@ -313,7 +314,7 @@ static int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
return err;
}
-static int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
+static int hermes_allocate(struct hermes *hw, u16 size, u16 *fid)
{
int err = 0;
int k;
@@ -363,7 +364,7 @@ static int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
* from firmware
*
* Callable from any context */
-static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
+static int hermes_bap_seek(struct hermes *hw, int bap, u16 id, u16 offset)
{
int sreg = bap ? HERMES_SELECT1 : HERMES_SELECT0;
int oreg = bap ? HERMES_OFFSET1 : HERMES_OFFSET0;
@@ -422,7 +423,7 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
* 0 on success
* > 0 on error from firmware
*/
-static int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
+static int hermes_bap_pread(struct hermes *hw, int bap, void *buf, int len,
u16 id, u16 offset)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
@@ -436,7 +437,7 @@ static int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
goto out;
/* Actually do the transfer */
- hermes_read_words(hw, dreg, buf, len/2);
+ hermes_read_words(hw, dreg, buf, len / 2);
out:
return err;
@@ -450,8 +451,8 @@ static int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
* 0 on success
* > 0 on error from firmware
*/
-static int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
- u16 id, u16 offset)
+static int hermes_bap_pwrite(struct hermes *hw, int bap, const void *buf,
+ int len, u16 id, u16 offset)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
int err = 0;
@@ -478,8 +479,8 @@ static int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
* practice.
*
* Callable from user or bh context. */
-static int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
- u16 *length, void *buf)
+static int hermes_read_ltv(struct hermes *hw, int bap, u16 rid,
+ unsigned bufsize, u16 *length, void *buf)
{
int err = 0;
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
@@ -523,7 +524,7 @@ static int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
return 0;
}
-static int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
+static int hermes_write_ltv(struct hermes *hw, int bap, u16 rid,
u16 length, const void *value)
{
int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
@@ -553,14 +554,14 @@ static int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
/*** Hermes AUX control ***/
static inline void
-hermes_aux_setaddr(hermes_t *hw, u32 addr)
+hermes_aux_setaddr(struct hermes *hw, u32 addr)
{
hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
}
static inline int
-hermes_aux_control(hermes_t *hw, int enabled)
+hermes_aux_control(struct hermes *hw, int enabled)
{
int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
@@ -594,7 +595,7 @@ hermes_aux_control(hermes_t *hw, int enabled)
* wl_lkm Agere fw does
* Don't know about intersil
*/
-static int hermesi_program_init(hermes_t *hw, u32 offset)
+static int hermesi_program_init(struct hermes *hw, u32 offset)
{
int err;
@@ -643,7 +644,7 @@ static int hermesi_program_init(hermes_t *hw, u32 offset)
* wl_lkm Agere fw does
* Don't know about intersil
*/
-static int hermesi_program_end(hermes_t *hw)
+static int hermesi_program_end(struct hermes *hw)
{
struct hermes_response resp;
int rc = 0;
@@ -684,7 +685,8 @@ static int hermes_program_bytes(struct hermes *hw, const char *data,
}
/* Read PDA from the adapter */
-static int hermes_read_pda(hermes_t *hw, __le16 *pda, u32 pda_addr, u16 pda_len)
+static int hermes_read_pda(struct hermes *hw, __le16 *pda, u32 pda_addr,
+ u16 pda_len)
{
int ret;
u16 pda_size;
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index d9f18c11682..28a42448d32 100644
--- a/drivers/net/wireless/orinoco/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
@@ -28,7 +28,7 @@
*
* As a module of low level hardware access routines, there is no
* locking. Users of this module should ensure that they serialize
- * access to the hermes_t structure, and to the hardware
+ * access to the hermes structure, and to the hardware
*/
#include <linux/if_ether.h>
@@ -43,7 +43,7 @@
#define HERMES_BAP_DATALEN_MAX (4096)
#define HERMES_BAP_OFFSET_MAX (4096)
#define HERMES_PORTID_MAX (7)
-#define HERMES_NUMPORTS_MAX (HERMES_PORTID_MAX+1)
+#define HERMES_NUMPORTS_MAX (HERMES_PORTID_MAX + 1)
#define HERMES_PDR_LEN_MAX (260) /* in bytes, from EK */
#define HERMES_PDA_RECS_MAX (200) /* a guess */
#define HERMES_PDA_LEN_MAX (1024) /* in bytes, from EK */
@@ -148,7 +148,7 @@
#define HERMES_CMD_WRITEMIF (0x0031)
/*--- Debugging Commands -----------------------------*/
-#define HERMES_CMD_TEST (0x0038)
+#define HERMES_CMD_TEST (0x0038)
/* Test command arguments */
@@ -178,8 +178,8 @@
#define HERMES_DESCRIPTOR_OFFSET 0
#define HERMES_802_11_OFFSET (14)
-#define HERMES_802_3_OFFSET (14+32)
-#define HERMES_802_2_OFFSET (14+32+14)
+#define HERMES_802_3_OFFSET (14 + 32)
+#define HERMES_802_2_OFFSET (14 + 32 + 14)
#define HERMES_TXCNTL2_OFFSET (HERMES_802_3_OFFSET - 2)
#define HERMES_RXSTAT_ERR (0x0003)
@@ -406,7 +406,7 @@ struct hermes_ops {
};
/* Basic control structure */
-typedef struct hermes {
+struct hermes {
void __iomem *iobase;
int reg_spacing;
#define HERMES_16BIT_REGSPACING 0
@@ -415,7 +415,7 @@ typedef struct hermes {
bool eeprom_pda;
const struct hermes_ops *ops;
void *priv;
-} hermes_t;
+};
/* Register access convenience macros */
#define hermes_read_reg(hw, off) \
@@ -427,28 +427,29 @@ typedef struct hermes {
hermes_write_reg((hw), HERMES_##name, (val))
/* Function prototypes */
-void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing);
+void hermes_struct_init(struct hermes *hw, void __iomem *address,
+ int reg_spacing);
/* Inline functions */
-static inline int hermes_present(hermes_t *hw)
+static inline int hermes_present(struct hermes *hw)
{
return hermes_read_regn(hw, SWSUPPORT0) == HERMES_MAGIC;
}
-static inline void hermes_set_irqmask(hermes_t *hw, u16 events)
+static inline void hermes_set_irqmask(struct hermes *hw, u16 events)
{
hw->inten = events;
hermes_write_regn(hw, INTEN, events);
}
-static inline int hermes_enable_port(hermes_t *hw, int port)
+static inline int hermes_enable_port(struct hermes *hw, int port)
{
return hw->ops->cmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
0, NULL);
}
-static inline int hermes_disable_port(hermes_t *hw, int port)
+static inline int hermes_disable_port(struct hermes *hw, int port)
{
return hw->ops->cmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
0, NULL);
@@ -456,13 +457,13 @@ static inline int hermes_disable_port(hermes_t *hw, int port)
/* Initiate an INQUIRE command (tallies or scan). The result will come as an
* information frame in __orinoco_ev_info() */
-static inline int hermes_inquire(hermes_t *hw, u16 rid)
+static inline int hermes_inquire(struct hermes *hw, u16 rid)
{
return hw->ops->cmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
}
-#define HERMES_BYTES_TO_RECLEN(n) ((((n)+1)/2) + 1)
-#define HERMES_RECLEN_TO_BYTES(n) (((n)-1) * 2)
+#define HERMES_BYTES_TO_RECLEN(n) ((((n) + 1) / 2) + 1)
+#define HERMES_RECLEN_TO_BYTES(n) (((n) - 1) * 2)
/* Note that for the next two, the count is in 16-bit words, not bytes */
static inline void hermes_read_words(struct hermes *hw, int off,
@@ -498,7 +499,8 @@ static inline void hermes_clear_words(struct hermes *hw, int off,
(hw->ops->write_ltv((hw), (bap), (rid), \
HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
-static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
+static inline int hermes_read_wordrec(struct hermes *hw, int bap, u16 rid,
+ u16 *word)
{
__le16 rec;
int err;
@@ -508,7 +510,8 @@ static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
return err;
}
-static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word)
+static inline int hermes_write_wordrec(struct hermes *hw, int bap, u16 rid,
+ u16 word)
{
__le16 rec = cpu_to_le16(word);
return HERMES_WRITE_RECORD(hw, bap, rid, &rec);
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index 2b2b9a1a979..4a10b7aca04 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -193,7 +193,7 @@ hermes_find_pdi(const struct pdi *first_pdi, u32 record_id, const void *end)
/* Process one Plug Data Item - find corresponding PDR and plug it */
static int
-hermes_plug_pdi(hermes_t *hw, const struct pdr *first_pdr,
+hermes_plug_pdi(struct hermes *hw, const struct pdr *first_pdr,
const struct pdi *pdi, const void *pdr_end)
{
const struct pdr *pdr;
@@ -220,7 +220,7 @@ hermes_plug_pdi(hermes_t *hw, const struct pdr *first_pdr,
* Attempt to write every records that is in the specified pda
* which also has a valid production data record for the firmware.
*/
-int hermes_apply_pda(hermes_t *hw,
+int hermes_apply_pda(struct hermes *hw,
const char *first_pdr,
const void *pdr_end,
const __le16 *pda,
@@ -274,7 +274,7 @@ hermes_blocks_length(const char *first_block, const void *end)
/*** Hermes programming ***/
/* Program the data blocks */
-int hermes_program(hermes_t *hw, const char *first_block, const void *end)
+int hermes_program(struct hermes *hw, const char *first_block, const void *end)
{
const struct dblock *blk;
u32 blkaddr;
@@ -387,7 +387,7 @@ DEFINE_DEFAULT_PDR(0x0161, 256,
*
* For certain records, use defaults if they are not found in pda.
*/
-int hermes_apply_pda_with_defaults(hermes_t *hw,
+int hermes_apply_pda_with_defaults(struct hermes *hw,
const char *first_pdr,
const void *pdr_end,
const __le16 *pda,
diff --git a/drivers/net/wireless/orinoco/hermes_dld.h b/drivers/net/wireless/orinoco/hermes_dld.h
index 583a5bcf917..b5377e232c6 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.h
+++ b/drivers/net/wireless/orinoco/hermes_dld.h
@@ -27,21 +27,21 @@
#include "hermes.h"
-int hermesi_program_init(hermes_t *hw, u32 offset);
-int hermesi_program_end(hermes_t *hw);
-int hermes_program(hermes_t *hw, const char *first_block, const void *end);
+int hermesi_program_init(struct hermes *hw, u32 offset);
+int hermesi_program_end(struct hermes *hw);
+int hermes_program(struct hermes *hw, const char *first_block, const void *end);
-int hermes_read_pda(hermes_t *hw,
+int hermes_read_pda(struct hermes *hw,
__le16 *pda,
u32 pda_addr,
u16 pda_len,
int use_eeprom);
-int hermes_apply_pda(hermes_t *hw,
+int hermes_apply_pda(struct hermes *hw,
const char *first_pdr,
const void *pdr_end,
const __le16 *pda,
const void *pda_end);
-int hermes_apply_pda_with_defaults(hermes_t *hw,
+int hermes_apply_pda_with_defaults(struct hermes *hw,
const char *first_pdr,
const void *pdr_end,
const __le16 *pda,
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 3c7877a7c31..c09c8437c0b 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -47,7 +47,7 @@ struct comp_id {
u16 id, variant, major, minor;
} __packed;
-static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
+static inline enum fwtype determine_firmware_type(struct comp_id *nic_id)
{
if (nic_id->id < 0x8000)
return FIRMWARE_TYPE_AGERE;
@@ -71,11 +71,11 @@ int determine_fw_capabilities(struct orinoco_private *priv,
u32 *hw_ver)
{
struct device *dev = priv->dev;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err;
struct comp_id nic_id, sta_id;
unsigned int firmver;
- char tmp[SYMBOL_MAX_VER_LEN+1] __attribute__((aligned(2)));
+ char tmp[SYMBOL_MAX_VER_LEN + 1] __attribute__((aligned(2)));
/* Get the hardware version */
err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id);
@@ -280,7 +280,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
{
struct device *dev = priv->dev;
struct hermes_idstring nickbuf;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int len;
int err;
u16 reclen;
@@ -458,7 +458,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
struct wireless_dev *wdev = netdev_priv(dev);
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err;
struct hermes_idstring idbuf;
@@ -529,7 +529,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
/* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
- HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
+ HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid) + 2),
&idbuf);
if (err) {
printk(KERN_ERR "%s: Error %d setting OWNSSID\n",
@@ -537,7 +537,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
return err;
}
err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
- HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
+ HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid) + 2),
&idbuf);
if (err) {
printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n",
@@ -549,7 +549,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
idbuf.len = cpu_to_le16(strlen(priv->nick));
memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
- HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
+ HERMES_BYTES_TO_RECLEN(strlen(priv->nick) + 2),
&idbuf);
if (err) {
printk(KERN_ERR "%s: Error %d setting nickname\n",
@@ -689,7 +689,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
/* Get tsc from the firmware */
int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err = 0;
u8 tsc_arr[4][ORINOCO_SEQ_LEN];
@@ -706,7 +706,7 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int ratemode = priv->bitratemode;
int err = 0;
@@ -737,7 +737,7 @@ int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
int orinoco_hw_get_act_bitrate(struct orinoco_private *priv, int *bitrate)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int i;
int err = 0;
u16 val;
@@ -786,7 +786,7 @@ int __orinoco_hw_set_wap(struct orinoco_private *priv)
{
int roaming_flag;
int err = 0;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
switch (priv->firmware_type) {
case FIRMWARE_TYPE_AGERE:
@@ -818,7 +818,7 @@ int __orinoco_hw_set_wap(struct orinoco_private *priv)
* which is needed for 802.1x implementations. */
int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err = 0;
int i;
@@ -902,7 +902,7 @@ int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
int __orinoco_hw_setup_enc(struct orinoco_private *priv)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err = 0;
int master_wep_flag;
int auth_flag;
@@ -999,7 +999,7 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
u8 rx_mic[MIC_KEYLEN];
u8 tsc[ORINOCO_SEQ_LEN];
} __packed buf;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int ret;
int err;
int k;
@@ -1052,7 +1052,7 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err;
err = hermes_write_wordrec(hw, USER_BAP,
@@ -1068,7 +1068,7 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
struct net_device *dev,
int mc_count, int promisc)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err = 0;
if (promisc != priv->promiscuous) {
@@ -1111,9 +1111,9 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
/* Return : < 0 -> error code ; >= 0 -> length */
int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
- char buf[IW_ESSID_MAX_SIZE+1])
+ char buf[IW_ESSID_MAX_SIZE + 1])
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err = 0;
struct hermes_idstring essidbuf;
char *p = (char *)(&essidbuf.val);
@@ -1166,7 +1166,7 @@ int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
int orinoco_hw_get_freq(struct orinoco_private *priv)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err = 0;
u16 channel;
int freq = 0;
@@ -1206,7 +1206,7 @@ int orinoco_hw_get_freq(struct orinoco_private *priv)
int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
int *numrates, s32 *rates, int max)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
struct hermes_idstring list;
unsigned char *p = (unsigned char *)&list.val;
int err = 0;
@@ -1238,7 +1238,7 @@ int orinoco_hw_trigger_scan(struct orinoco_private *priv,
const struct cfg80211_ssid *ssid)
{
struct net_device *dev = priv->ndev;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
unsigned long flags;
int err = 0;
@@ -1323,7 +1323,7 @@ int orinoco_hw_trigger_scan(struct orinoco_private *priv,
int orinoco_hw_disassociate(struct orinoco_private *priv,
u8 *addr, u16 reason_code)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err;
struct {
@@ -1346,7 +1346,7 @@ int orinoco_hw_disassociate(struct orinoco_private *priv,
int orinoco_hw_get_current_bssid(struct orinoco_private *priv,
u8 *addr)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err;
err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 97af71e7995..8f6831f4e32 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -45,7 +45,7 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
struct net_device *dev,
int mc_count, int promisc);
int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
- char buf[IW_ESSID_MAX_SIZE+1]);
+ char buf[IW_ESSID_MAX_SIZE + 1]);
int orinoco_hw_get_freq(struct orinoco_private *priv);
int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
int *numrates, s32 *rates, int max);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 62c6b2b37db..ef7efe839bb 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -4,7 +4,7 @@
* adaptors, with Lucent/Agere, Intersil or Symbol firmware.
*
* Current maintainers (as of 29 September 2003) are:
- * Pavel Roskin <proski AT gnu.org>
+ * Pavel Roskin <proski AT gnu.org>
* and David Gibson <hermes AT gibson.dropbear.id.au>
*
* (C) Copyright David Gibson, IBM Corporation 2001-2003.
@@ -146,10 +146,10 @@ static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
#define ORINOCO_MAX_MTU (IEEE80211_MAX_DATA_LEN - ENCAPS_OVERHEAD)
#define MAX_IRQLOOPS_PER_IRQ 10
-#define MAX_IRQLOOPS_PER_JIFFY (20000/HZ) /* Based on a guestimate of
- * how many events the
- * device could
- * legitimately generate */
+#define MAX_IRQLOOPS_PER_JIFFY (20000 / HZ) /* Based on a guestimate of
+ * how many events the
+ * device could
+ * legitimately generate */
#define DUMMY_FID 0xFFFF
@@ -157,7 +157,7 @@ static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
HERMES_MAX_MULTICAST : 0)*/
#define MAX_MULTICAST(priv) (HERMES_MAX_MULTICAST)
-#define ORINOCO_INTEN (HERMES_EV_RX | HERMES_EV_ALLOC \
+#define ORINOCO_INTEN (HERMES_EV_RX | HERMES_EV_ALLOC \
| HERMES_EV_TX | HERMES_EV_TXEXC \
| HERMES_EV_WTERR | HERMES_EV_INFO \
| HERMES_EV_INFDROP)
@@ -437,12 +437,12 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err = 0;
u16 txfid = priv->txfid;
int tx_control;
unsigned long flags;
- u8 mic_buf[MICHAEL_MIC_LEN+1];
+ u8 mic_buf[MICHAEL_MIC_LEN + 1];
if (!netif_running(dev)) {
printk(KERN_ERR "%s: Tx on stopped device!\n",
@@ -579,7 +579,7 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
-static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
+static void __orinoco_ev_alloc(struct net_device *dev, struct hermes *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
u16 fid = hermes_read_regn(hw, ALLOCFID);
@@ -594,7 +594,7 @@ static void __orinoco_ev_alloc(struct net_device *dev, hermes_t *hw)
hermes_write_regn(hw, ALLOCFID, DUMMY_FID);
}
-static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw)
+static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
@@ -606,7 +606,7 @@ static void __orinoco_ev_tx(struct net_device *dev, hermes_t *hw)
hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
}
-static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
+static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
@@ -753,7 +753,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
struct sk_buff *skb;
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
len = le16_to_cpu(desc->data_len);
@@ -840,7 +840,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
stats->rx_dropped++;
}
-void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
+void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
@@ -918,7 +918,7 @@ void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
32bit boundary, plus 1 byte so we can read in odd length
packets from the card, which has an IO granularity of 16
bits */
- skb = dev_alloc_skb(length+ETH_HLEN+2+1);
+ skb = dev_alloc_skb(length + ETH_HLEN + 2 + 1);
if (!skb) {
printk(KERN_WARNING "%s: Can't allocate skb for Rx\n",
dev->name);
@@ -1402,7 +1402,7 @@ static void orinoco_process_scan_results(struct work_struct *work)
spin_unlock_irqrestore(&priv->scan_lock, flags);
}
-void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
+void __orinoco_ev_info(struct net_device *dev, struct hermes *hw)
{
struct orinoco_private *priv = ndev_priv(dev);
u16 infofid;
@@ -1620,7 +1620,7 @@ void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
}
EXPORT_SYMBOL(__orinoco_ev_info);
-static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
+static void __orinoco_ev_infdrop(struct net_device *dev, struct hermes *hw)
{
if (net_ratelimit())
printk(KERN_DEBUG "%s: Information frame lost.\n", dev->name);
@@ -1831,7 +1831,7 @@ static int __orinoco_commit(struct orinoco_private *priv)
int orinoco_commit(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err;
if (priv->broken_disableport) {
@@ -1874,12 +1874,12 @@ int orinoco_commit(struct orinoco_private *priv)
/* Interrupt handler */
/********************************************************************/
-static void __orinoco_ev_tick(struct net_device *dev, hermes_t *hw)
+static void __orinoco_ev_tick(struct net_device *dev, struct hermes *hw)
{
printk(KERN_DEBUG "%s: TICK\n", dev->name);
}
-static void __orinoco_ev_wterr(struct net_device *dev, hermes_t *hw)
+static void __orinoco_ev_wterr(struct net_device *dev, struct hermes *hw)
{
/* This seems to happen a fair bit under load, but ignoring it
seems to work fine...*/
@@ -1891,7 +1891,7 @@ irqreturn_t orinoco_interrupt(int irq, void *dev_id)
{
struct orinoco_private *priv = dev_id;
struct net_device *dev = priv->ndev;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int count = MAX_IRQLOOPS_PER_IRQ;
u16 evstat, events;
/* These are used to detect a runaway interrupt situation.
@@ -1958,7 +1958,7 @@ irqreturn_t orinoco_interrupt(int irq, void *dev_id)
evstat = hermes_read_regn(hw, EVSTAT);
events = evstat & hw->inten;
- };
+ }
orinoco_unlock(priv, &flags);
return IRQ_HANDLED;
@@ -2017,8 +2017,8 @@ static void orinoco_unregister_pm_notifier(struct orinoco_private *priv)
unregister_pm_notifier(&priv->pm_notifier);
}
#else /* !PM_SLEEP || HERMES_CACHE_FW_ON_INIT */
-#define orinoco_register_pm_notifier(priv) do { } while(0)
-#define orinoco_unregister_pm_notifier(priv) do { } while(0)
+#define orinoco_register_pm_notifier(priv) do { } while (0)
+#define orinoco_unregister_pm_notifier(priv) do { } while (0)
#endif
/********************************************************************/
@@ -2029,7 +2029,7 @@ int orinoco_init(struct orinoco_private *priv)
{
struct device *dev = priv->dev;
struct wiphy *wiphy = priv_to_wiphy(priv);
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err = 0;
/* No need to lock, the hw_unavailable flag is already set in
diff --git a/drivers/net/wireless/orinoco/mic.c b/drivers/net/wireless/orinoco/mic.c
index c03e7f54d1b..fce4a843e65 100644
--- a/drivers/net/wireless/orinoco/mic.c
+++ b/drivers/net/wireless/orinoco/mic.c
@@ -59,10 +59,10 @@ int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
/* Copy header into buffer. We need the padding on the end zeroed */
memcpy(&hdr[0], da, ETH_ALEN);
memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN);
- hdr[ETH_ALEN*2] = priority;
- hdr[ETH_ALEN*2+1] = 0;
- hdr[ETH_ALEN*2+2] = 0;
- hdr[ETH_ALEN*2+3] = 0;
+ hdr[ETH_ALEN * 2] = priority;
+ hdr[ETH_ALEN * 2 + 1] = 0;
+ hdr[ETH_ALEN * 2 + 2] = 0;
+ hdr[ETH_ALEN * 2 + 3] = 0;
/* Use scatter gather to MIC header and data in one go */
sg_init_table(sg, 2);
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 255710ef082..3bb936b9558 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -49,11 +49,11 @@ enum orinoco_alg {
ORINOCO_ALG_TKIP
};
-typedef enum {
+enum fwtype {
FIRMWARE_TYPE_AGERE,
FIRMWARE_TYPE_INTERSIL,
FIRMWARE_TYPE_SYMBOL
-} fwtype_t;
+};
struct firmware;
@@ -88,11 +88,11 @@ struct orinoco_private {
struct iw_statistics wstats;
/* Hardware control variables */
- hermes_t hw;
+ struct hermes hw;
u16 txfid;
/* Capabilities of the hardware/firmware */
- fwtype_t firmware_type;
+ enum fwtype firmware_type;
int ibss_port;
int nicbuf_size;
u16 channel_mask;
@@ -122,8 +122,8 @@ struct orinoco_private {
struct key_params keys[ORINOCO_MAX_KEYS];
int bitratemode;
- char nick[IW_ESSID_MAX_SIZE+1];
- char desired_essid[IW_ESSID_MAX_SIZE+1];
+ char nick[IW_ESSID_MAX_SIZE + 1];
+ char desired_essid[IW_ESSID_MAX_SIZE + 1];
char desired_bssid[ETH_ALEN];
int bssid_fixed;
u16 frag_thresh, mwo_robust;
@@ -197,8 +197,8 @@ extern int orinoco_up(struct orinoco_private *priv);
extern void orinoco_down(struct orinoco_private *priv);
extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
-extern void __orinoco_ev_info(struct net_device *dev, hermes_t *hw);
-extern void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw);
+extern void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
+extern void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
int orinoco_process_xmit_skb(struct sk_buff *skb,
struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 88e3c0ebcaa..d7dbc00bcfb 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -65,7 +65,7 @@ static void orinoco_cs_release(struct pcmcia_device *link);
static void orinoco_cs_detach(struct pcmcia_device *p_dev);
/********************************************************************/
-/* Device methods */
+/* Device methods */
/********************************************************************/
static int
@@ -89,7 +89,7 @@ orinoco_cs_hard_reset(struct orinoco_private *priv)
}
/********************************************************************/
-/* PCMCIA stuff */
+/* PCMCIA stuff */
/********************************************************************/
static int
@@ -134,7 +134,7 @@ static int
orinoco_cs_config(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int ret;
void __iomem *mem;
@@ -239,7 +239,6 @@ static int orinoco_cs_resume(struct pcmcia_device *link)
static const struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
- PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
@@ -272,6 +271,7 @@ static const struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
+ PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.01", 0xd27deb1a), /* Lucent Orinoco */
#ifdef CONFIG_HERMES_PRISM
/* Only entries that certainly identify Prism chipset */
PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
@@ -321,6 +321,9 @@ static const struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
+
+ /* This may be Agere or Intersil Firmware */
+ PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
#endif
PCMCIA_DEVICE_NULL,
};
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index bc3ea0b67a4..326396b313a 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -296,8 +296,7 @@ static struct pci_driver orinoco_nortel_driver = {
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
" (Tobias Hoffmann & Christoph Jungegger <disdos@traum404.de>)";
MODULE_AUTHOR("Christoph Jungegger <disdos@traum404.de>");
-MODULE_DESCRIPTION
- ("Driver for wireless LAN cards using the Nortel PCI bridge");
+MODULE_DESCRIPTION("Driver for wireless LAN cards using the Nortel PCI bridge");
MODULE_LICENSE("Dual MPL/GPL");
static int __init orinoco_nortel_init(void)
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 468197f8667..6058c66b844 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -6,7 +6,7 @@
* hermes registers, as well as the COR register.
*
* Current maintainers are:
- * Pavel Roskin <proski AT gnu.org>
+ * Pavel Roskin <proski AT gnu.org>
* and David Gibson <hermes AT gibson.dropbear.id.au>
*
* Some of this code is borrowed from orinoco_plx.c
@@ -81,7 +81,7 @@
*/
static int orinoco_pci_cor_reset(struct orinoco_private *priv)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
unsigned long timeout;
u16 reg;
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 9358f4d2307..2bac8248a99 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -4,7 +4,7 @@
* but are connected to the PCI bus by a PLX9052.
*
* Current maintainers are:
- * Pavel Roskin <proski AT gnu.org>
+ * Pavel Roskin <proski AT gnu.org>
* and David Gibson <hermes AT gibson.dropbear.id.au>
*
* (C) Copyright David Gibson, IBM Corp. 2001-2003.
@@ -102,14 +102,14 @@
#define PLX_RESET_TIME (500) /* milliseconds */
#define PLX_INTCSR 0x4c /* Interrupt Control & Status Register */
-#define PLX_INTCSR_INTEN (1<<6) /* Interrupt Enable bit */
+#define PLX_INTCSR_INTEN (1 << 6) /* Interrupt Enable bit */
/*
* Do a soft reset of the card using the Configuration Option Register
*/
static int orinoco_plx_cor_reset(struct orinoco_private *priv)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
struct orinoco_pci_card *card = priv->card;
unsigned long timeout;
u16 reg;
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 784605f0af1..93159d68ec9 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -59,7 +59,7 @@
*/
static int orinoco_tmd_cor_reset(struct orinoco_private *priv)
{
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
struct orinoco_pci_card *card = priv->card;
unsigned long timeout;
u16 reg;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index b9aedf18a04..811e87f8a34 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -199,7 +199,7 @@ MODULE_FIRMWARE("orinoco_ezusb_fw");
#define EZUSB_FRAME_DATA 1
#define EZUSB_FRAME_CONTROL 2
-#define DEF_TIMEOUT (3*HZ)
+#define DEF_TIMEOUT (3 * HZ)
#define BULK_BUF_SIZE 2048
@@ -959,7 +959,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
return retval;
}
-static int ezusb_write_ltv(hermes_t *hw, int bap, u16 rid,
+static int ezusb_write_ltv(struct hermes *hw, int bap, u16 rid,
u16 length, const void *data)
{
struct ezusb_priv *upriv = hw->priv;
@@ -989,7 +989,7 @@ static int ezusb_write_ltv(hermes_t *hw, int bap, u16 rid,
NULL, 0, NULL);
}
-static int ezusb_read_ltv(hermes_t *hw, int bap, u16 rid,
+static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid,
unsigned bufsize, u16 *length, void *buf)
{
struct ezusb_priv *upriv = hw->priv;
@@ -1006,7 +1006,7 @@ static int ezusb_read_ltv(hermes_t *hw, int bap, u16 rid,
buf, bufsize, length);
}
-static int ezusb_doicmd_wait(hermes_t *hw, u16 cmd, u16 parm0, u16 parm1,
+static int ezusb_doicmd_wait(struct hermes *hw, u16 cmd, u16 parm0, u16 parm1,
u16 parm2, struct hermes_response *resp)
{
struct ezusb_priv *upriv = hw->priv;
@@ -1028,7 +1028,7 @@ static int ezusb_doicmd_wait(hermes_t *hw, u16 cmd, u16 parm0, u16 parm1,
EZUSB_FRAME_CONTROL, NULL, 0, NULL);
}
-static int ezusb_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
+static int ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0,
struct hermes_response *resp)
{
struct ezusb_priv *upriv = hw->priv;
@@ -1196,7 +1196,7 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
struct orinoco_private *priv = ndev_priv(dev);
struct net_device_stats *stats = &priv->stats;
struct ezusb_priv *upriv = priv->card;
- u8 mic[MICHAEL_MIC_LEN+1];
+ u8 mic[MICHAEL_MIC_LEN + 1];
int err = 0;
int tx_control;
unsigned long flags;
@@ -1356,7 +1356,7 @@ static int ezusb_hard_reset(struct orinoco_private *priv)
}
-static int ezusb_init(hermes_t *hw)
+static int ezusb_init(struct hermes *hw)
{
struct ezusb_priv *upriv = hw->priv;
int retval;
@@ -1438,7 +1438,7 @@ static void ezusb_bulk_in_callback(struct urb *urb)
} else if (upriv->dev) {
struct net_device *dev = upriv->dev;
struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
if (hermes_rid == EZUSB_RID_RX) {
__orinoco_ev_rx(dev, hw);
@@ -1575,7 +1575,7 @@ static int ezusb_probe(struct usb_interface *interface,
{
struct usb_device *udev = interface_to_usbdev(interface);
struct orinoco_private *priv;
- hermes_t *hw;
+ struct hermes *hw;
struct ezusb_priv *upriv = NULL;
struct usb_interface_descriptor *iface_desc;
struct usb_endpoint_descriptor *ep;
@@ -1757,7 +1757,7 @@ static struct usb_driver orinoco_driver = {
/* Can't be declared "const" or the whole __initdata section will
* become const */
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
- " (Manuel Estrada Sainz)";
+ " (Manuel Estrada Sainz)";
static int __init ezusb_module_init(void)
{
@@ -1787,6 +1787,5 @@ module_init(ezusb_module_init);
module_exit(ezusb_module_exit);
MODULE_AUTHOR("Manuel Estrada Sainz");
-MODULE_DESCRIPTION
- ("Driver for Orinoco wireless LAN cards using EZUSB bridge");
+MODULE_DESCRIPTION("Driver for Orinoco wireless LAN cards using EZUSB bridge");
MODULE_LICENSE("Dual MPL/GPL");
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index 81f3673d31d..6e28ee4e9c5 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -11,9 +11,9 @@
*
* Copyright (C) 2002-2005 Pavel Roskin <proski@gnu.org>
* Portions based on orinoco_cs.c:
- * Copyright (C) David Gibson, Linuxcare Australia
+ * Copyright (C) David Gibson, Linuxcare Australia
* Portions based on Spectrum24tDnld.c from original spectrum24 driver:
- * Copyright (C) Symbol Technologies.
+ * Copyright (C) Symbol Technologies.
*
* See copyright notice in file main.c.
*/
@@ -125,7 +125,7 @@ failed:
}
/********************************************************************/
-/* Device methods */
+/* Device methods */
/********************************************************************/
static int
@@ -150,7 +150,7 @@ spectrum_cs_stop_firmware(struct orinoco_private *priv, int idle)
}
/********************************************************************/
-/* PCMCIA stuff */
+/* PCMCIA stuff */
/********************************************************************/
static int
@@ -197,7 +197,7 @@ static int
spectrum_cs_config(struct pcmcia_device *link)
{
struct orinoco_private *priv = link->priv;
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int ret;
void __iomem *mem;
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index e793679e2e1..bbb9beb206b 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -87,7 +87,7 @@ nomem:
static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
struct iw_statistics *wstats = &priv->wstats;
int err;
unsigned long flags;
@@ -448,7 +448,7 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
}
if ((chan < 1) || (chan > NUM_CHANNELS) ||
- !(priv->channel_mask & (1 << (chan-1))))
+ !(priv->channel_mask & (1 << (chan - 1))))
return -EINVAL;
if (orinoco_lock(priv, &flags) != 0)
@@ -457,7 +457,7 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
priv->channel = chan;
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Fast channel change - no commit if successful */
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_SET_CHANNEL,
chan, NULL);
@@ -492,7 +492,7 @@ static int orinoco_ioctl_getsens(struct net_device *dev,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
u16 val;
int err;
unsigned long flags;
@@ -668,7 +668,7 @@ static int orinoco_ioctl_getpower(struct net_device *dev,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int err = 0;
u16 enable, period, timeout, mcast;
unsigned long flags;
@@ -873,7 +873,7 @@ static int orinoco_ioctl_set_auth(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
struct iw_param *param = &wrqu->param;
unsigned long flags;
int ret = -EINPROGRESS;
@@ -1269,7 +1269,7 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
- hermes_t *hw = &priv->hw;
+ struct hermes *hw = &priv->hw;
int rid = data->flags;
u16 length;
int err;
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index ee9bc62a4fa..7aa509f7e38 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -1,5 +1,6 @@
#ifndef P54PCI_H
#define P54PCI_H
+#include <linux/interrupt.h>
/*
* Defines for PCI based mac80211 Prism54 driver
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index ec2c75d77ce..5d0f61508a2 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -18,6 +18,7 @@
*
*/
+#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index c4d0f19b7cb..c40403877f9 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -22,6 +22,7 @@
#ifndef _ISLPCI_DEV_H
#define _ISLPCI_DEV_H
+#include <linux/irqreturn.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index b5e64d71b7a..9e68e0cb718 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -17,6 +17,7 @@
*
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index b2f8b8fd4d2..a0a7854facc 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -83,14 +83,12 @@ config RT2800PCI_RT33XX
config RT2800PCI_RT35XX
bool "rt2800pci - Include support for rt35xx devices (EXPERIMENTAL)"
depends on EXPERIMENTAL
- default n
+ default y
---help---
This adds support for rt35xx wireless chipset family to the
rt2800pci driver.
Supported chips: RT3060, RT3062, RT3562, RT3592
- Support for these devices is non-functional at the moment and is
- intended for testers and developers.
config RT2800PCI_RT53XX
bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)"
@@ -154,15 +152,12 @@ config RT2800USB_RT33XX
config RT2800USB_RT35XX
bool "rt2800usb - Include support for rt35xx devices (EXPERIMENTAL)"
depends on EXPERIMENTAL
- default n
+ default y
---help---
This adds support for rt35xx wireless chipset family to the
rt2800usb driver.
Supported chips: RT3572
- Support for these devices is non-functional at the moment and is
- intended for testers and developers.
-
config RT2800USB_RT53XX
bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
depends on EXPERIMENTAL
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 937f9e8bf05..76bcc354797 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1723,6 +1723,7 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
.set_antenna = rt2x00mac_set_antenna,
.get_antenna = rt2x00mac_get_antenna,
.get_ringparam = rt2x00mac_get_ringparam,
+ .tx_frames_pending = rt2x00mac_tx_frames_pending,
};
static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index d27d7b8ba3b..c288d951c03 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -2016,6 +2016,7 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
.set_antenna = rt2x00mac_set_antenna,
.get_antenna = rt2x00mac_get_antenna,
.get_ringparam = rt2x00mac_get_ringparam,
+ .tx_frames_pending = rt2x00mac_tx_frames_pending,
};
static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 15237c27548..53c5f878f61 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1827,6 +1827,7 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
.set_antenna = rt2x00mac_set_antenna,
.get_antenna = rt2x00mac_get_antenna,
.get_ringparam = rt2x00mac_get_ringparam,
+ .tx_frames_pending = rt2x00mac_tx_frames_pending,
};
static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index f67bc9b31b2..c69a7d71f4c 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -1740,6 +1740,7 @@ struct mac_iveiv_entry {
/*
* BBP 3: RX Antenna
*/
+#define BBP3_RX_ADC FIELD8(0x03)
#define BBP3_RX_ANTENNA FIELD8(0x18)
#define BBP3_HT40_MINUS FIELD8(0x20)
@@ -1783,6 +1784,8 @@ struct mac_iveiv_entry {
#define RFCSR1_TX0_PD FIELD8(0x08)
#define RFCSR1_RX1_PD FIELD8(0x10)
#define RFCSR1_TX1_PD FIELD8(0x20)
+#define RFCSR1_RX2_PD FIELD8(0x40)
+#define RFCSR1_TX2_PD FIELD8(0x80)
/*
* RFCSR 2:
@@ -1790,15 +1793,25 @@ struct mac_iveiv_entry {
#define RFCSR2_RESCAL_EN FIELD8(0x80)
/*
+ * FRCSR 5:
+ */
+#define RFCSR5_R1 FIELD8(0x0c)
+
+/*
* RFCSR 6:
*/
#define RFCSR6_R1 FIELD8(0x03)
#define RFCSR6_R2 FIELD8(0x40)
+#define RFCSR6_TXDIV FIELD8(0x0c)
/*
* RFCSR 7:
*/
#define RFCSR7_RF_TUNING FIELD8(0x01)
+#define RFCSR7_R02 FIELD8(0x07)
+#define RFCSR7_R3 FIELD8(0x08)
+#define RFCSR7_R45 FIELD8(0x30)
+#define RFCSR7_R67 FIELD8(0xc0)
/*
* RFCSR 11:
@@ -1809,11 +1822,13 @@ struct mac_iveiv_entry {
* RFCSR 12:
*/
#define RFCSR12_TX_POWER FIELD8(0x1f)
+#define RFCSR12_DR0 FIELD8(0xe0)
/*
* RFCSR 13:
*/
#define RFCSR13_TX_POWER FIELD8(0x1f)
+#define RFCSR13_DR0 FIELD8(0xe0)
/*
* RFCSR 15:
@@ -2256,6 +2271,7 @@ struct mac_iveiv_entry {
#define MCU_ANT_SELECT 0X73
#define MCU_BBP_SIGNAL 0x80
#define MCU_POWER_SAVE 0x83
+#define MCU_BAND_SELECT 0x91
/*
* MCU mailbox tokens
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 2a6aa85cc6c..ef67f6786a8 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -401,7 +401,8 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
return -EBUSY;
if (rt2x00_is_pci(rt2x00dev)) {
- if (rt2x00_rt(rt2x00dev, RT5390)) {
+ if (rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
@@ -600,49 +601,6 @@ void rt2800_process_rxwi(struct queue_entry *entry,
}
EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
-static bool rt2800_txdone_entry_check(struct queue_entry *entry, u32 reg)
-{
- __le32 *txwi;
- u32 word;
- int wcid, ack, pid;
- int tx_wcid, tx_ack, tx_pid;
-
- wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
- ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
- pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
-
- /*
- * This frames has returned with an IO error,
- * so the status report is not intended for this
- * frame.
- */
- if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) {
- rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
- return false;
- }
-
- /*
- * Validate if this TX status report is intended for
- * this entry by comparing the WCID/ACK/PID fields.
- */
- txwi = rt2800_drv_get_txwi(entry);
-
- rt2x00_desc_read(txwi, 1, &word);
- tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
- tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
- tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
-
- if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid)) {
- WARNING(entry->queue->rt2x00dev,
- "TX status report missed for queue %d entry %d\n",
- entry->queue->qid, entry->entry_idx);
- rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
- return false;
- }
-
- return true;
-}
-
void rt2800_txdone_entry(struct queue_entry *entry, u32 status)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
@@ -725,45 +683,6 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status)
}
EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
-void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
- struct queue_entry *entry;
- u32 reg;
- u8 qid;
-
- while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
-
- /* TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus
- * qid is guaranteed to be one of the TX QIDs
- */
- qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
- queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
- if (unlikely(!queue)) {
- WARNING(rt2x00dev, "Got TX status for an unavailable "
- "queue %u, dropping\n", qid);
- continue;
- }
-
- /*
- * Inside each queue, we process each entry in a chronological
- * order. We first check that the queue is not empty.
- */
- entry = NULL;
- while (!rt2x00queue_empty(queue)) {
- entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- if (rt2800_txdone_entry_check(entry, reg))
- break;
- }
-
- if (!entry || rt2x00queue_empty(queue))
- break;
-
- rt2800_txdone_entry(entry, reg);
- }
-}
-EXPORT_SYMBOL_GPL(rt2800_txdone);
-
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
@@ -784,8 +703,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
/*
* Add space for the TXWI in front of the skb.
*/
- skb_push(entry->skb, TXWI_DESC_SIZE);
- memset(entry->skb, 0, TXWI_DESC_SIZE);
+ memset(skb_push(entry->skb, TXWI_DESC_SIZE), 0, TXWI_DESC_SIZE);
/*
* Register descriptor details in skb frame descriptor.
@@ -1355,7 +1273,7 @@ static void rt2800_config_ht_opmode(struct rt2x00_dev *rt2x00dev,
gf20_rate = gf40_rate = 0x0003;
}
break;
- };
+ }
/* check for STAs not supporting greenfield mode */
if (any_sta_nongf)
@@ -1433,6 +1351,40 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
}
EXPORT_SYMBOL_GPL(rt2800_config_erp);
+static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+ u16 eeprom;
+ u8 led_ctrl, led_g_mode, led_r_mode;
+
+ rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+ if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ rt2x00_set_field32(&reg, GPIO_SWITCH_0, 1);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_1, 1);
+ } else {
+ rt2x00_set_field32(&reg, GPIO_SWITCH_0, 0);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_1, 0);
+ }
+ rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
+
+ rt2800_register_read(rt2x00dev, LED_CFG, &reg);
+ led_g_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 3 : 0;
+ led_r_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 0 : 3;
+ if (led_g_mode != rt2x00_get_field32(reg, LED_CFG_G_LED_MODE) ||
+ led_r_mode != rt2x00_get_field32(reg, LED_CFG_R_LED_MODE)) {
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+ led_ctrl = rt2x00_get_field16(eeprom, EEPROM_FREQ_LED_MODE);
+ if (led_ctrl == 0 || led_ctrl > 0x40) {
+ rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, led_g_mode);
+ rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, led_r_mode);
+ rt2800_register_write(rt2x00dev, LED_CFG, reg);
+ } else {
+ rt2800_mcu_request(rt2x00dev, MCU_BAND_SELECT, 0xff,
+ (led_g_mode << 2) | led_r_mode, 1);
+ }
+ }
+}
+
static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev,
enum antenna ant)
{
@@ -1463,6 +1415,10 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2800_bbp_read(rt2x00dev, 1, &r1);
rt2800_bbp_read(rt2x00dev, 3, &r3);
+ if (rt2x00_rt(rt2x00dev, RT3572) &&
+ test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2800_config_3572bt_ant(rt2x00dev);
+
/*
* Configure the TX antenna.
*/
@@ -1471,7 +1427,11 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
break;
case 2:
- rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
+ if (rt2x00_rt(rt2x00dev, RT3572) &&
+ test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1);
+ else
+ rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
break;
case 3:
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
@@ -1496,7 +1456,15 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
break;
case 2:
- rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
+ if (rt2x00_rt(rt2x00dev, RT3572) &&
+ test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
+ rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
+ rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+ rt2800_set_ant_diversity(rt2x00dev, ANTENNA_B);
+ } else {
+ rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
+ }
break;
case 3:
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
@@ -1630,6 +1598,161 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
}
+static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ u8 rfcsr;
+ u32 reg;
+
+ if (rf->channel <= 14) {
+ rt2800_bbp_write(rt2x00dev, 25, 0x15);
+ rt2800_bbp_write(rt2x00dev, 26, 0x85);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 25, 0x09);
+ rt2800_bbp_write(rt2x00dev, 26, 0xff);
+ }
+
+ rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1);
+ rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
+
+ rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR6_TXDIV, 2);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR6_TXDIV, 1);
+ rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 5, &rfcsr);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR5_R1, 1);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR5_R1, 2);
+ rt2800_rfcsr_write(rt2x00dev, 5, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
+ if (rf->channel <= 14) {
+ rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 3);
+ rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
+ (info->default_power1 & 0x3) |
+ ((info->default_power1 & 0xC) << 1));
+ } else {
+ rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 7);
+ rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
+ (info->default_power1 & 0x3) |
+ ((info->default_power1 & 0xC) << 1));
+ }
+ rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
+ if (rf->channel <= 14) {
+ rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 3);
+ rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
+ (info->default_power2 & 0x3) |
+ ((info->default_power2 & 0xC) << 1));
+ } else {
+ rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 7);
+ rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
+ (info->default_power2 & 0x3) |
+ ((info->default_power2 & 0xC) << 1));
+ }
+ rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+ if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+ if (rf->channel <= 14) {
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+ }
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
+ } else {
+ switch (rt2x00dev->default_ant.tx_chain_num) {
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
+ break;
+ }
+
+ switch (rt2x00dev->default_ant.rx_chain_num) {
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
+ break;
+ }
+ }
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
+ rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 24,
+ rt2x00dev->calibration[conf_is_ht40(conf)]);
+ rt2800_rfcsr_write(rt2x00dev, 31,
+ rt2x00dev->calibration[conf_is_ht40(conf)]);
+
+ if (rf->channel <= 14) {
+ rt2800_rfcsr_write(rt2x00dev, 7, 0xd8);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0xc3);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0xb9);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x4c);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x93);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xb3);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x15);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
+ } else {
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x14);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x43);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x7a);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
+ if (rf->channel <= 64) {
+ rt2800_rfcsr_write(rt2x00dev, 19, 0xb7);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xf6);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x3d);
+ } else if (rf->channel <= 128) {
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x74);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xf4);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+ } else {
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x72);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xf3);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+ }
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x87);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x01);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x9f);
+ }
+
+ rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT7, 0);
+ if (rf->channel <= 14)
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 1);
+ else
+ rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT7, 0);
+ rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
+
+ rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
+ rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
+}
#define RT5390_POWER_BOUND 0x27
#define RT5390_FREQ_OFFSET_BOUND 0x5f
@@ -1748,9 +1871,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_rf(rt2x00dev, RF3020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022) ||
- rt2x00_rf(rt2x00dev, RF3052) ||
rt2x00_rf(rt2x00dev, RF3320))
rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
+ else if (rt2x00_rf(rt2x00dev, RF3052))
+ rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
else if (rt2x00_rf(rt2x00dev, RF5370) ||
rt2x00_rf(rt2x00dev, RF5390))
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
@@ -1777,7 +1901,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
}
}
} else {
- rt2800_bbp_write(rt2x00dev, 82, 0xf2);
+ if (rt2x00_rt(rt2x00dev, RT3572))
+ rt2800_bbp_write(rt2x00dev, 82, 0x94);
+ else
+ rt2800_bbp_write(rt2x00dev, 82, 0xf2);
if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
rt2800_bbp_write(rt2x00dev, 75, 0x46);
@@ -1791,12 +1918,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg);
+ if (rt2x00_rt(rt2x00dev, RT3572))
+ rt2800_rfcsr_write(rt2x00dev, 8, 0);
+
tx_pin = 0;
/* Turn on unused PA or LNA when not using 1T or 1R */
if (rt2x00dev->default_ant.tx_chain_num == 2) {
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN,
+ rf->channel > 14);
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN,
+ rf->channel <= 14);
}
/* Turn on unused PA or LNA when not using 1T or 1R */
@@ -1809,11 +1941,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14);
+ if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
+ else
+ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
+ rf->channel <= 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
+ if (rt2x00_rt(rt2x00dev, RT3572))
+ rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
+
rt2800_bbp_read(rt2x00dev, 4, &bbp);
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
rt2800_bbp_write(rt2x00dev, 4, bbp);
@@ -2413,6 +2552,9 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030);
+ } else if (rt2x00_rt(rt2x00dev, RT3572)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
} else if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -2799,6 +2941,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
}
if (rt2800_is_305x_soc(rt2x00dev) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 31, 0x08);
@@ -2828,6 +2971,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
@@ -2868,6 +3012,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2800_is_305x_soc(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
@@ -2895,6 +3040,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_read(rt2x00dev, 138, &value);
@@ -3031,6 +3177,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
+ !rt2x00_rt(rt2x00dev, RT3572) &&
!rt2x00_rt(rt2x00dev, RT5390) &&
!rt2800_is_305x_soc(rt2x00dev))
return 0;
@@ -3109,6 +3256,38 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
+ } else if (rt2x00_rt(rt2x00dev, RT3572)) {
+ rt2800_rfcsr_write(rt2x00dev, 0, 0x70);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x81);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x4c);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x05);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0xd8);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0xc3);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0xb9);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x70);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x65);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x4c);
+ rt2800_rfcsr_write(rt2x00dev, 17, 0x23);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0xac);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x93);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0xb3);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0xd0);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x3c);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x15);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x9b);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x10);
} else if (rt2800_is_305x_soc(rt2x00dev)) {
rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
@@ -3258,6 +3437,19 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
+ } else if (rt2x00_rt(rt2x00dev, RT3572)) {
+ rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
+ rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
+ rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+ msleep(1);
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
}
/*
@@ -3270,7 +3462,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
} else if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
- rt2x00_rt(rt2x00dev, RT3390)) {
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT3572)) {
rt2x00dev->calibration[0] =
rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x13);
rt2x00dev->calibration[1] =
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index f2d15941c71..69deb3148ae 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -152,7 +152,6 @@ void rt2800_write_tx_data(struct queue_entry *entry,
struct txentry_desc *txdesc);
void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc);
-void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
void rt2800_txdone_entry(struct queue_entry *entry, u32 status);
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index cc4a54f571b..ebc17ad61de 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -219,7 +219,7 @@ static void rt2800pci_start_queue(struct data_queue *queue)
break;
default:
break;
- };
+ }
}
static void rt2800pci_kick_queue(struct data_queue *queue)
@@ -501,7 +501,9 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
rt2x00pci_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
- if (rt2x00_rt(rt2x00dev, RT5390)) {
+ if (rt2x00_is_pcie(rt2x00dev) &&
+ (rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT5390))) {
rt2x00pci_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
@@ -1029,6 +1031,7 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
.flush = rt2x00mac_flush,
.get_survey = rt2800_get_survey,
.get_ringparam = rt2x00mac_get_ringparam,
+ .tx_frames_pending = rt2x00mac_tx_frames_pending,
};
static const struct rt2800_ops rt2800pci_rt2800_ops = {
@@ -1158,6 +1161,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
#endif
#ifdef CONFIG_RT2800PCI_RT53XX
{ PCI_DEVICE(0x1814, 0x5390) },
+ { PCI_DEVICE(0x1814, 0x539f) },
#endif
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index ba82c972703..dbf501ca317 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -457,6 +457,95 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
/*
* TX control handlers
*/
+static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg)
+{
+ __le32 *txwi;
+ u32 word;
+ int wcid, ack, pid;
+ int tx_wcid, tx_ack, tx_pid;
+
+ if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+ !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) {
+ WARNING(entry->queue->rt2x00dev,
+ "Data pending for entry %u in queue %u\n",
+ entry->entry_idx, entry->queue->qid);
+ cond_resched();
+ return false;
+ }
+
+ wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
+ ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
+ pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
+
+ /*
+ * This frames has returned with an IO error,
+ * so the status report is not intended for this
+ * frame.
+ */
+ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) {
+ rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
+ return false;
+ }
+
+ /*
+ * Validate if this TX status report is intended for
+ * this entry by comparing the WCID/ACK/PID fields.
+ */
+ txwi = rt2800usb_get_txwi(entry);
+
+ rt2x00_desc_read(txwi, 1, &word);
+ tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+ tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
+ tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
+
+ if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid)) {
+ WARNING(entry->queue->rt2x00dev,
+ "TX status report missed for queue %d entry %d\n",
+ entry->queue->qid, entry->entry_idx);
+ rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
+ return false;
+ }
+
+ return true;
+}
+
+static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ struct queue_entry *entry;
+ u32 reg;
+ u8 qid;
+
+ while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
+
+ /* TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus
+ * qid is guaranteed to be one of the TX QIDs
+ */
+ qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
+ queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
+ if (unlikely(!queue)) {
+ WARNING(rt2x00dev, "Got TX status for an unavailable "
+ "queue %u, dropping\n", qid);
+ continue;
+ }
+
+ /*
+ * Inside each queue, we process each entry in a chronological
+ * order. We first check that the queue is not empty.
+ */
+ entry = NULL;
+ while (!rt2x00queue_empty(queue)) {
+ entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
+ if (rt2800usb_txdone_entry_check(entry, reg))
+ break;
+ entry = NULL;
+ }
+
+ if (entry)
+ rt2800_txdone_entry(entry, reg);
+ }
+}
+
static void rt2800usb_work_txdone(struct work_struct *work)
{
struct rt2x00_dev *rt2x00dev =
@@ -464,7 +553,7 @@ static void rt2800usb_work_txdone(struct work_struct *work)
struct data_queue *queue;
struct queue_entry *entry;
- rt2800_txdone(rt2x00dev);
+ rt2800usb_txdone(rt2x00dev);
/*
* Process any trailing TX status reports for IO failures,
@@ -477,8 +566,10 @@ static void rt2800usb_work_txdone(struct work_struct *work)
while (!rt2x00queue_empty(queue)) {
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+ if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
+ !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
break;
+
if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
else if (rt2x00queue_status_timeout(entry))
@@ -676,6 +767,7 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
.flush = rt2x00mac_flush,
.get_survey = rt2800_get_survey,
.get_ringparam = rt2x00mac_get_ringparam,
+ .tx_frames_pending = rt2x00mac_tx_frames_pending,
};
static const struct rt2800_ops rt2800usb_rt2800_ops = {
@@ -839,6 +931,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c16) },
/* Draytek */
{ USB_DEVICE(0x07fa, 0x7712) },
+ /* DVICO */
+ { USB_DEVICE(0x0fe9, 0xb307) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7711) },
{ USB_DEVICE(0x7392, 0x7717) },
@@ -939,6 +1033,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0df6, 0x0048) },
{ USB_DEVICE(0x0df6, 0x0051) },
{ USB_DEVICE(0x0df6, 0x005f) },
+ { USB_DEVICE(0x0df6, 0x0060) },
/* SMC */
{ USB_DEVICE(0x083a, 0x6618) },
{ USB_DEVICE(0x083a, 0x7511) },
@@ -971,6 +1066,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0586, 0x341e) },
{ USB_DEVICE(0x0586, 0x343e) },
#ifdef CONFIG_RT2800USB_RT33XX
+ /* Belkin */
+ { USB_DEVICE(0x050d, 0x945b) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x3370) },
{ USB_DEVICE(0x148f, 0x8070) },
@@ -995,6 +1092,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x148f, 0x3572) },
/* Sitecom */
{ USB_DEVICE(0x0df6, 0x0041) },
+ { USB_DEVICE(0x0df6, 0x0062) },
/* Toshiba */
{ USB_DEVICE(0x0930, 0x0a07) },
/* Zinwell */
@@ -1093,8 +1191,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0df6, 0x004a) },
{ USB_DEVICE(0x0df6, 0x004d) },
{ USB_DEVICE(0x0df6, 0x0053) },
- { USB_DEVICE(0x0df6, 0x0060) },
- { USB_DEVICE(0x0df6, 0x0062) },
/* SMC */
{ USB_DEVICE(0x083a, 0xa512) },
{ USB_DEVICE(0x083a, 0xc522) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index c446db69bd3..f82bfeb79eb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -29,6 +29,7 @@
#define RT2X00_H
#include <linux/bitops.h>
+#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/firmware.h>
@@ -1276,6 +1277,7 @@ int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
+bool rt2x00mac_tx_frames_pending(struct ieee80211_hw *hw);
/*
* Driver allocation handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 1bb9d46077f..1ca4c7ffc18 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -45,11 +45,11 @@ enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
}
}
-void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
+void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !hw_key)
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 939821b4af2..0955c941317 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -583,6 +583,18 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
/*
+ * Check for valid size in case we get corrupted descriptor from
+ * hardware.
+ */
+ if (unlikely(rxdesc.size == 0 ||
+ rxdesc.size > entry->queue->data_size)) {
+ WARNING(rt2x00dev, "Wrong frame size %d max %d.\n",
+ rxdesc.size, entry->queue->data_size);
+ dev_kfree_skb(entry->skb);
+ goto renew_skb;
+ }
+
+ /*
* The data behind the ieee80211 header must be
* aligned on a 4 byte boundary.
*/
@@ -642,6 +654,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
ieee80211_rx_ni(rt2x00dev->hw, entry->skb);
+renew_skb:
/*
* Replace the skb with the freshly allocated one.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 322cc4f3de5..4cdf247a870 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -336,7 +336,8 @@ static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
*/
#ifdef CONFIG_RT2X00_LIB_CRYPTO
enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key);
-void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
+void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc);
unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb);
@@ -354,7 +355,8 @@ static inline enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *
return CIPHER_NONE;
}
-static inline void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
+static inline void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc)
{
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 93bec140e59..4ccf2380597 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -113,7 +113,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* due to possible race conditions in mac80211.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- goto exit_fail;
+ goto exit_free_skb;
/*
* Use the ATIM queue if appropriate and present.
@@ -127,7 +127,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
ERROR(rt2x00dev,
"Attempt to send packet over invalid queue %d.\n"
"Please file bug report to %s.\n", qid, DRV_PROJECT);
- goto exit_fail;
+ goto exit_free_skb;
}
/*
@@ -159,6 +159,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
exit_fail:
rt2x00queue_pause_queue(queue);
+ exit_free_skb:
dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL_GPL(rt2x00mac_tx);
@@ -818,3 +819,17 @@ void rt2x00mac_get_ringparam(struct ieee80211_hw *hw,
*rx_max = rt2x00dev->rx->limit;
}
EXPORT_SYMBOL_GPL(rt2x00mac_get_ringparam);
+
+bool rt2x00mac_tx_frames_pending(struct ieee80211_hw *hw)
+{
+ struct rt2x00_dev *rt2x00dev = hw->priv;
+ struct data_queue *queue;
+
+ tx_queue_for_each(rt2x00dev, queue) {
+ if (!rt2x00queue_empty(queue))
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(rt2x00mac_tx_frames_pending);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index ab8c16f8bca..29edb9fbe6f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -200,20 +200,20 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
skb_pull(skb, l2pad);
}
-static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
+static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc)
{
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
- unsigned long irqflags;
if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
return;
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
- if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags))
+ if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags))
return;
/*
@@ -227,23 +227,23 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
* sequence counting per-frame, since those will override the
* sequence counter given by mac80211.
*/
- spin_lock_irqsave(&intf->seqlock, irqflags);
+ spin_lock(&intf->seqlock);
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
intf->seqno += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
- spin_unlock_irqrestore(&intf->seqlock, irqflags);
+ spin_unlock(&intf->seqlock);
}
-static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
+static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc,
const struct rt2x00_rate *hwrate)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
unsigned int data_length;
unsigned int duration;
@@ -260,8 +260,8 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
txdesc->u.plcp.ifs = IFS_SIFS;
/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
- data_length = entry->skb->len + 4;
- data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
+ data_length = skb->len + 4;
+ data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
/*
* PLCP setup
@@ -302,13 +302,14 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
}
}
-static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry,
+static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc,
const struct rt2x00_rate *hwrate)
{
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (tx_info->control.sta)
txdesc->u.ht.mpdu_density =
@@ -381,12 +382,12 @@ static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry,
txdesc->u.ht.txop = TXOP_HTTXOP;
}
-static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
+static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
struct ieee80211_rate *rate;
const struct rt2x00_rate *hwrate = NULL;
@@ -396,8 +397,8 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Header and frame information.
*/
- txdesc->length = entry->skb->len;
- txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
+ txdesc->length = skb->len;
+ txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
/*
* Check whether this frame is to be acked.
@@ -472,13 +473,15 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Apply TX descriptor handling by components
*/
- rt2x00crypto_create_tx_descriptor(entry, txdesc);
- rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
+ rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
+ rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
- rt2x00queue_create_tx_descriptor_ht(entry, txdesc, hwrate);
+ rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
+ hwrate);
else
- rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
+ rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
+ hwrate);
}
static int rt2x00queue_write_tx_data(struct queue_entry *entry,
@@ -556,33 +559,18 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
bool local)
{
struct ieee80211_tx_info *tx_info;
- struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ struct queue_entry *entry;
struct txentry_desc txdesc;
struct skb_frame_desc *skbdesc;
u8 rate_idx, rate_flags;
-
- if (unlikely(rt2x00queue_full(queue))) {
- ERROR(queue->rt2x00dev,
- "Dropping frame due to full tx queue %d.\n", queue->qid);
- return -ENOBUFS;
- }
-
- if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
- &entry->flags))) {
- ERROR(queue->rt2x00dev,
- "Arrived at non-free entry in the non-full queue %d.\n"
- "Please file bug report to %s.\n",
- queue->qid, DRV_PROJECT);
- return -EINVAL;
- }
+ int ret = 0;
/*
* Copy all TX descriptor information into txdesc,
* after that we are free to use the skb->cb array
* for our information.
*/
- entry->skb = skb;
- rt2x00queue_create_tx_descriptor(entry, &txdesc);
+ rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
/*
* All information is retrieved from the skb->cb array,
@@ -594,7 +582,6 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
rate_flags = tx_info->control.rates[0].flags;
skbdesc = get_skb_frame_desc(skb);
memset(skbdesc, 0, sizeof(*skbdesc));
- skbdesc->entry = entry;
skbdesc->tx_rate_idx = rate_idx;
skbdesc->tx_rate_flags = rate_flags;
@@ -623,9 +610,33 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
* for PCI devices.
*/
if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
- rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
+ rt2x00queue_insert_l2pad(skb, txdesc.header_length);
else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
- rt2x00queue_align_frame(entry->skb);
+ rt2x00queue_align_frame(skb);
+
+ spin_lock(&queue->tx_lock);
+
+ if (unlikely(rt2x00queue_full(queue))) {
+ ERROR(queue->rt2x00dev,
+ "Dropping frame due to full tx queue %d.\n", queue->qid);
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+
+ if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
+ &entry->flags))) {
+ ERROR(queue->rt2x00dev,
+ "Arrived at non-free entry in the non-full queue %d.\n"
+ "Please file bug report to %s.\n",
+ queue->qid, DRV_PROJECT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ skbdesc->entry = entry;
+ entry->skb = skb;
/*
* It could be possible that the queue was corrupted and this
@@ -635,7 +646,8 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
entry->skb = NULL;
- return -EIO;
+ ret = -EIO;
+ goto out;
}
set_bit(ENTRY_DATA_PENDING, &entry->flags);
@@ -644,7 +656,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
rt2x00queue_write_tx_descriptor(entry, &txdesc);
rt2x00queue_kick_tx_queue(queue, &txdesc);
- return 0;
+out:
+ spin_unlock(&queue->tx_lock);
+ return ret;
}
int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
@@ -698,7 +712,7 @@ int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
* after that we are free to use the skb->cb array
* for our information.
*/
- rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
+ rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
/*
* Fill in skb descriptor
@@ -1185,6 +1199,7 @@ static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue, enum data_queue_qid qid)
{
mutex_init(&queue->status_lock);
+ spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->index_lock);
queue->rt2x00dev = rt2x00dev;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 167d45873dc..f2100f4ddcf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -54,7 +54,7 @@
* @QID_RX: RX queue
* @QID_OTHER: None of the above (don't use, only present for completeness)
* @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
- * @QID_ATIM: Atim queue (value unspeficied, don't send it to device)
+ * @QID_ATIM: Atim queue (value unspecified, don't send it to device)
*/
enum data_queue_qid {
QID_AC_VO = 0,
@@ -432,6 +432,7 @@ enum data_queue_flags {
* @flags: Entry flags, see &enum queue_entry_flags.
* @status_lock: The mutex for protecting the start/stop/flush
* handling on this queue.
+ * @tx_lock: Spinlock to serialize tx operations on this queue.
* @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
* @index_crypt needs to be changed this lock should be grabbed to prevent
* index corruption due to concurrency.
@@ -458,6 +459,7 @@ struct data_queue {
unsigned long flags;
struct mutex status_lock;
+ spinlock_t tx_lock;
spinlock_t index_lock;
unsigned int count;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 8f90f626807..7fbb55c9da8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -262,23 +262,20 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
struct queue_entry *entry = (struct queue_entry *)urb->context;
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+ if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return;
-
- if (rt2x00dev->ops->lib->tx_dma_done)
- rt2x00dev->ops->lib->tx_dma_done(entry);
-
- /*
- * Report the frame as DMA done
- */
- rt2x00lib_dmadone(entry);
-
/*
* Check if the frame was correctly uploaded
*/
if (urb->status)
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
+ /*
+ * Report the frame as DMA done
+ */
+ rt2x00lib_dmadone(entry);
+ if (rt2x00dev->ops->lib->tx_dma_done)
+ rt2x00dev->ops->lib->tx_dma_done(entry);
/*
* Schedule the delayed work for reading the TX status
* from the device.
@@ -802,6 +799,7 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
int retval;
usb_dev = usb_get_dev(usb_dev);
+ usb_reset_device(usb_dev);
hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
if (!hw) {
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 9d35ec16a3a..53110b83bf6 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2982,6 +2982,7 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
.set_antenna = rt2x00mac_set_antenna,
.get_antenna = rt2x00mac_get_antenna,
.get_ringparam = rt2x00mac_get_ringparam,
+ .tx_frames_pending = rt2x00mac_tx_frames_pending,
};
static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index ad20953cbf0..0baeb894f09 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2314,6 +2314,7 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
.set_antenna = rt2x00mac_set_antenna,
.get_antenna = rt2x00mac_get_antenna,
.get_ringparam = rt2x00mac_get_ringparam,
+ .tx_frames_pending = rt2x00mac_tx_frames_pending,
};
static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
@@ -2419,6 +2420,7 @@ static struct usb_device_id rt73usb_device_table[] = {
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00d8) },
{ USB_DEVICE(0x0411, 0x00d9) },
+ { USB_DEVICE(0x0411, 0x00e6) },
{ USB_DEVICE(0x0411, 0x00f4) },
{ USB_DEVICE(0x0411, 0x0116) },
{ USB_DEVICE(0x0411, 0x0119) },
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 80db5cabc9b..66b29dc07cc 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -16,6 +16,7 @@
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 5aee8b22d74..45e14760c16 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -21,6 +21,17 @@ config RTL8192SE
If you choose to build it as a module, it will be called rtl8192se
+config RTL8192DE
+ tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
+ depends on MAC80211 && EXPERIMENTAL
+ select FW_LOADER
+ select RTLWIFI
+ ---help---
+ This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe
+ wireless network adapters.
+
+ If you choose to build it as a module, it will be called rtl8192de
+
config RTL8192CU
tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
depends on MAC80211 && USB && EXPERIMENTAL
@@ -35,10 +46,10 @@ config RTL8192CU
config RTLWIFI
tristate
- depends on RTL8192CE || RTL8192CU || RTL8192SE
+ depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE
default m
config RTL8192C_COMMON
tristate
- depends on RTL8192CE || RTL8192CU || RTL8192SE
+ depends on RTL8192CE || RTL8192CU
default m
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index 7acce83c378..97935c565ba 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -23,5 +23,6 @@ obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
obj-$(CONFIG_RTL8192CE) += rtl8192ce/
obj-$(CONFIG_RTL8192CU) += rtl8192cu/
obj-$(CONFIG_RTL8192SE) += rtl8192se/
+obj-$(CONFIG_RTL8192DE) += rtl8192de/
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index ccb6da38fe2..0b598db38da 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -27,6 +27,8 @@
*
*****************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/ip.h>
#include "wifi.h"
#include "rc.h"
@@ -397,8 +399,8 @@ void rtl_init_rfkill(struct ieee80211_hw *hw)
radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
if (valid) {
- printk(KERN_INFO "rtlwifi: wireless switch is %s\n",
- rtlpriv->rfkill.rfkill_state ? "on" : "off");
+ pr_info("wireless switch is %s\n",
+ rtlpriv->rfkill.rfkill_state ? "on" : "off");
rtlpriv->rfkill.rfkill_state = radio_state;
@@ -523,7 +525,7 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw,
mac->opmode == NL80211_IFTYPE_ADHOC)
bw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- if ((bw_40 == true) && sgi_40)
+ if (bw_40 && sgi_40)
tcb_desc->use_shortgi = true;
else if ((bw_40 == false) && sgi_20)
tcb_desc->use_shortgi = true;
@@ -756,18 +758,17 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
return false;
RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- ("%s ACT_ADDBAREQ From :" MAC_FMT "\n",
- is_tx ? "Tx" : "Rx", MAC_ARG(hdr->addr2)));
+ ("%s ACT_ADDBAREQ From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2));
break;
case ACT_ADDBARSP:
RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- ("%s ACT_ADDBARSP From :" MAC_FMT "\n",
- is_tx ? "Tx" : "Rx", MAC_ARG(hdr->addr2)));
+ ("%s ACT_ADDBARSP From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2));
break;
case ACT_DELBA:
RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- ("ACT_ADDBADEL From :" MAC_FMT "\n",
- MAC_ARG(hdr->addr2)));
+ ("ACT_ADDBADEL From :%pM\n", hdr->addr2));
break;
}
break;
@@ -888,7 +889,6 @@ int rtl_tx_agg_stop(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_tid_data *tid_data;
struct rtl_sta_info *sta_entry = NULL;
if (sta == NULL)
@@ -906,7 +906,6 @@ int rtl_tx_agg_stop(struct ieee80211_hw *hw,
return -EINVAL;
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- tid_data = &sta_entry->tids[tid];
sta_entry->tids[tid].agg.agg_state = RTL_AGG_STOP;
ieee80211_stop_tx_ba_cb_irqsafe(mac->vif, sta->addr, tid);
@@ -918,7 +917,6 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_tid_data *tid_data;
struct rtl_sta_info *sta_entry = NULL;
if (sta == NULL)
@@ -936,7 +934,6 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
return -EINVAL;
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
- tid_data = &sta_entry->tids[tid];
sta_entry->tids[tid].agg.agg_state = RTL_AGG_OPERATIONAL;
return 0;
@@ -1406,8 +1403,7 @@ MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
static int __init rtl_core_module_init(void)
{
if (rtl_rate_control_register())
- printk(KERN_ERR "rtlwifi: Unable to register rtl_rc,"
- "use default RC !!\n");
+ pr_err("Unable to register rtl_rc, use default RC !!\n");
return 0;
}
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 7295af0536b..7babb6acd95 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -27,6 +27,8 @@
*
*****************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "wifi.h"
#include "cam.h"
@@ -131,9 +133,9 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
("EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, "
- "ulUseDK=%x MacAddr" MAC_FMT "\n",
+ "ulUseDK=%x MacAddr %pM\n",
ul_entry_idx, ul_key_id, ul_enc_alg,
- ul_default_key, MAC_ARG(mac_addr)));
+ ul_default_key, mac_addr));
if (ul_key_id == TOTAL_CAM_ENTRY) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
@@ -347,7 +349,7 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
/* Remove from HW Security CAM */
memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN);
rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
- printk(KERN_INFO "&&&&&&&&&del entry %d\n", i);
+ pr_info("&&&&&&&&&del entry %d\n", i);
}
}
return;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index d2ec2535aa3..1bdc1aa305c 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -335,8 +335,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
* before going offchannel, or dis-association or delete BA will
* happen by AP
*/
- if (rtlpriv->mac80211.offchan_deley) {
- rtlpriv->mac80211.offchan_deley = false;
+ if (rtlpriv->mac80211.offchan_delay) {
+ rtlpriv->mac80211.offchan_delay = false;
mdelay(50);
}
rtlphy->current_channel = wide_chan;
@@ -443,11 +443,11 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
sta_entry->wireless_mode = WIRELESS_MODE_G;
if (sta->supp_rates[0] <= 0xf)
sta_entry->wireless_mode = WIRELESS_MODE_B;
- if (sta->ht_cap.ht_supported == true)
+ if (sta->ht_cap.ht_supported)
sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
} else if (rtlhal->current_bandtype == BAND_ON_5G) {
sta_entry->wireless_mode = WIRELESS_MODE_A;
- if (sta->ht_cap.ht_supported == true)
+ if (sta->ht_cap.ht_supported)
sta_entry->wireless_mode = WIRELESS_MODE_N_24G;
}
@@ -456,7 +456,7 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
sta_entry->wireless_mode = WIRELESS_MODE_G;
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- ("Add sta addr is "MAC_FMT"\n", MAC_ARG(sta->addr)));
+ ("Add sta addr is %pM\n", sta->addr));
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
}
return 0;
@@ -469,7 +469,7 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry;
if (sta) {
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- ("Remove sta addr is "MAC_FMT"\n", MAC_ARG(sta->addr)));
+ ("Remove sta addr is %pM\n", sta->addr));
sta_entry = (struct rtl_sta_info *) sta->drv_priv;
sta_entry->wireless_mode = 0;
sta_entry->ratr_index = 0;
@@ -650,7 +650,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
("BSS_CHANGED_HT\n"));
rcu_read_lock();
- sta = get_sta(hw, vif, (u8 *)bss_conf->bssid);
+ sta = get_sta(hw, vif, bss_conf->bssid);
if (sta) {
if (sta->ht_cap.ampdu_density >
mac->current_ampdu_density)
@@ -678,14 +678,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
(u8 *) bss_conf->bssid);
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- (MAC_FMT "\n", MAC_ARG(bss_conf->bssid)));
+ ("%pM\n", bss_conf->bssid));
mac->vendor = PEER_UNKNOWN;
memcpy(mac->bssid, bss_conf->bssid, 6);
rtlpriv->cfg->ops->set_network_type(hw, vif->type);
rcu_read_lock();
- sta = get_sta(hw, vif, (u8 *)bss_conf->bssid);
+ sta = get_sta(hw, vif, bss_conf->bssid);
if (!sta) {
rcu_read_unlock();
goto out;
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 4b247db2861..f02824a3b74 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -30,6 +30,8 @@
#ifndef __RTL_CORE_H__
#define __RTL_CORE_H__
+#include <net/mac80211.h>
+
#define RTL_SUPPORTED_FILTERS \
(FIF_PROMISC_IN_BSS | \
FIF_ALLMULTI | FIF_CONTROL | \
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h
index e4aa8687408..160dd068521 100644
--- a/drivers/net/wireless/rtlwifi/debug.h
+++ b/drivers/net/wireless/rtlwifi/debug.h
@@ -204,10 +204,5 @@ enum dbgp_flag_e {
} \
} while (0);
-#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
-#define MAC_ARG(x) \
- ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2],\
- ((u8 *)(x))[3], ((u8 *)(x))[4], ((u8 *)(x))[5]
-
void rtl_dbgp_flag_init(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 50de6f5d8a5..3fc21f60bb0 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -382,7 +382,7 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
}
}
- if (wordchanged == true)
+ if (wordchanged)
hdr_num++;
}
@@ -453,7 +453,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
base = offset * 8;
for (i = 0; i < 8; i++) {
- if (first_pg == true) {
+ if (first_pg) {
word_en &= ~(BIT(i / 2));
@@ -505,7 +505,7 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- if (rtlefuse->autoload_failflag == true)
+ if (rtlefuse->autoload_failflag)
memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF,
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
else
@@ -690,7 +690,7 @@ static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
}
}
- if (dataempty == true) {
+ if (dataempty) {
*readstate = PG_STATE_DATA;
} else {
*efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
@@ -925,7 +925,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct pgpkt_struct target_pkt;
u8 write_state = PG_STATE_HEADER;
- int continual = true, dataempty = true, result = true;
+ int continual = true, result = true;
u16 efuse_addr = 0;
u8 efuse_data;
u8 target_word_cnts = 0;
@@ -953,7 +953,6 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
(EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))) {
if (write_state == PG_STATE_HEADER) {
- dataempty = true;
badworden = 0x0F;
RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
("efuse PG_STATE_HEADER\n"));
@@ -1176,13 +1175,12 @@ static u16 efuse_get_current_size(struct ieee80211_hw *hw)
{
int continual = true;
u16 efuse_addr = 0;
- u8 hoffset, hworden;
+ u8 hworden;
u8 efuse_data, word_cnts;
while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
&& (efuse_addr < EFUSE_MAX_SIZE)) {
if (efuse_data != 0xFF) {
- hoffset = (efuse_data >> 4) & 0x0F;
hworden = efuse_data & 0x0F;
word_cnts = efuse_calculate_word_cnts(hworden);
efuse_addr = efuse_addr + (word_cnts * 2) + 1;
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 9f8ccae9331..56f12358389 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -35,10 +35,10 @@
#include "efuse.h"
static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
- INTEL_VENDOR_ID,
- ATI_VENDOR_ID,
- AMD_VENDOR_ID,
- SIS_VENDOR_ID
+ PCI_VENDOR_ID_INTEL,
+ PCI_VENDOR_ID_ATI,
+ PCI_VENDOR_ID_AMD,
+ PCI_VENDOR_ID_SI
};
static const u8 ac_to_hwq[] = {
@@ -390,7 +390,7 @@ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
u8 linkctrl_reg;
/*Link Control Register */
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(pdev);
pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
@@ -581,7 +581,7 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
fc = rtl_get_fc(skb);
if (ieee80211_is_nullfunc(fc)) {
if (ieee80211_has_pm(fc)) {
- rtlpriv->mac80211.offchan_deley = true;
+ rtlpriv->mac80211.offchan_delay = true;
rtlpriv->psc.state_inap = 1;
} else {
rtlpriv->psc.state_inap = 0;
@@ -622,10 +622,60 @@ tx_status_ok:
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
- rtl_lps_leave(hw);
+ tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
}
}
+static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct ieee80211_rx_status rx_status)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
+ __le16 fc = rtl_get_fc(skb);
+ bool unicast = false;
+ struct sk_buff *uskb = NULL;
+ u8 *pdata;
+
+
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+ if (is_broadcast_ether_addr(hdr->addr1)) {
+ ;/*TODO*/
+ } else if (is_multicast_ether_addr(hdr->addr1)) {
+ ;/*TODO*/
+ } else {
+ unicast = true;
+ rtlpriv->stats.rxbytesunicast += skb->len;
+ }
+
+ rtl_is_special_data(hw, skb, false);
+
+ if (ieee80211_is_data(fc)) {
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
+
+ if (unicast)
+ rtlpriv->link_info.num_rx_inperiod++;
+ }
+
+ /* for sw lps */
+ rtl_swlps_beacon(hw, (void *)skb->data, skb->len);
+ rtl_recognize_peer(hw, (void *)skb->data, skb->len);
+ if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
+ (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) &&
+ (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)))
+ return;
+
+ if (unlikely(!rtl_action_proc(hw, skb, false)))
+ return;
+
+ uskb = dev_alloc_skb(skb->len + 128);
+ memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status));
+ pdata = (u8 *)skb_put(uskb, skb->len);
+ memcpy(pdata, skb->data, skb->len);
+
+ ieee80211_rx_irqsafe(hw, uskb);
+}
+
static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -637,185 +687,112 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
u8 own;
u8 tmp_one;
u32 bufferaddress;
- bool unicast = false;
struct rtl_stats stats = {
.signal = 0,
.noise = -98,
.rate = 0,
};
+ int index = rtlpci->rx_ring[rx_queue_idx].idx;
/*RX NORMAL PKT */
while (count--) {
/*rx descriptor */
struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[
- rtlpci->rx_ring[rx_queue_idx].idx];
+ index];
/*rx pkt */
struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[
- rtlpci->rx_ring[rx_queue_idx].idx];
+ index];
+ struct sk_buff *new_skb = NULL;
own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
false, HW_DESC_OWN);
- if (own) {
- /*wait data to be filled by hardware */
- return;
- } else {
- struct ieee80211_hdr *hdr;
- __le16 fc;
- struct sk_buff *new_skb = NULL;
-
- rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
- &rx_status,
- (u8 *) pdesc, skb);
-
- new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
- if (unlikely(!new_skb)) {
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
- DBG_DMESG,
- ("can't alloc skb for rx\n"));
- goto done;
- }
+ /*wait data to be filled by hardware */
+ if (own)
+ break;
- pci_unmap_single(rtlpci->pdev,
- *((dma_addr_t *) skb->cb),
- rtlpci->rxbuffersize,
- PCI_DMA_FROMDEVICE);
+ rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
+ &rx_status,
+ (u8 *) pdesc, skb);
- skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
- false,
- HW_DESC_RXPKT_LEN));
- skb_reserve(skb,
- stats.rx_drvinfo_size + stats.rx_bufshift);
+ if (stats.crc || stats.hwerror)
+ goto done;
- /*
- *NOTICE This can not be use for mac80211,
- *this is done in mac80211 code,
- *if you done here sec DHCP will fail
- *skb_trim(skb, skb->len - 4);
- */
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (unlikely(!new_skb)) {
+ RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
+ DBG_DMESG,
+ ("can't alloc skb for rx\n"));
+ goto done;
+ }
- hdr = rtl_get_hdr(skb);
- fc = rtl_get_fc(skb);
-
- if (!stats.crc && !stats.hwerror) {
- memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
- sizeof(rx_status));
-
- if (is_broadcast_ether_addr(hdr->addr1)) {
- ;/*TODO*/
- } else if (is_multicast_ether_addr(hdr->addr1)) {
- ;/*TODO*/
- } else {
- unicast = true;
- rtlpriv->stats.rxbytesunicast +=
- skb->len;
- }
-
- rtl_is_special_data(hw, skb, false);
-
- if (ieee80211_is_data(fc)) {
- rtlpriv->cfg->ops->led_control(hw,
- LED_CTL_RX);
-
- if (unicast)
- rtlpriv->link_info.
- num_rx_inperiod++;
- }
-
- /* for sw lps */
- rtl_swlps_beacon(hw, (void *)skb->data,
- skb->len);
- rtl_recognize_peer(hw, (void *)skb->data,
- skb->len);
- if ((rtlpriv->mac80211.opmode ==
- NL80211_IFTYPE_AP) &&
- (rtlpriv->rtlhal.current_bandtype ==
- BAND_ON_2_4G) &&
- (ieee80211_is_beacon(fc) ||
- ieee80211_is_probe_resp(fc))) {
- dev_kfree_skb_any(skb);
- } else {
- if (unlikely(!rtl_action_proc(hw, skb,
- false))) {
- dev_kfree_skb_any(skb);
- } else {
- struct sk_buff *uskb = NULL;
- u8 *pdata;
- uskb = dev_alloc_skb(skb->len
- + 128);
- memcpy(IEEE80211_SKB_RXCB(uskb),
- &rx_status,
- sizeof(rx_status));
- pdata = (u8 *)skb_put(uskb,
- skb->len);
- memcpy(pdata, skb->data,
- skb->len);
- dev_kfree_skb_any(skb);
-
- ieee80211_rx_irqsafe(hw, uskb);
- }
- }
- } else {
- dev_kfree_skb_any(skb);
- }
+ pci_unmap_single(rtlpci->pdev,
+ *((dma_addr_t *) skb->cb),
+ rtlpci->rxbuffersize,
+ PCI_DMA_FROMDEVICE);
- if (((rtlpriv->link_info.num_rx_inperiod +
- rtlpriv->link_info.num_tx_inperiod) > 8) ||
- (rtlpriv->link_info.num_rx_inperiod > 2)) {
- rtl_lps_leave(hw);
- }
+ skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, false,
+ HW_DESC_RXPKT_LEN));
+ skb_reserve(skb, stats.rx_drvinfo_size + stats.rx_bufshift);
- skb = new_skb;
+ /*
+ * NOTICE This can not be use for mac80211,
+ * this is done in mac80211 code,
+ * if you done here sec DHCP will fail
+ * skb_trim(skb, skb->len - 4);
+ */
- rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci->
- rx_ring
- [rx_queue_idx].
- idx] = skb;
- *((dma_addr_t *) skb->cb) =
+ _rtl_receive_one(hw, skb, rx_status);
+
+ if (((rtlpriv->link_info.num_rx_inperiod +
+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ (rtlpriv->link_info.num_rx_inperiod > 2)) {
+ tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+ }
+
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+
+ rtlpci->rx_ring[rx_queue_idx].rx_buf[index] = skb;
+ *((dma_addr_t *) skb->cb) =
pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
rtlpci->rxbuffersize,
PCI_DMA_FROMDEVICE);
- }
done:
bufferaddress = (*((dma_addr_t *)skb->cb));
tmp_one = 1;
rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
HW_DESC_RXBUFF_ADDR,
(u8 *)&bufferaddress);
- rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
- (u8 *)&tmp_one);
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
HW_DESC_RXPKT_LEN,
(u8 *)&rtlpci->rxbuffersize);
- if (rtlpci->rx_ring[rx_queue_idx].idx ==
- rtlpci->rxringcount - 1)
+ if (index == rtlpci->rxringcount - 1)
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
HW_DESC_RXERO,
(u8 *)&tmp_one);
- rtlpci->rx_ring[rx_queue_idx].idx =
- (rtlpci->rx_ring[rx_queue_idx].idx + 1) %
- rtlpci->rxringcount;
+ rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
+ (u8 *)&tmp_one);
+
+ index = (index + 1) % rtlpci->rxringcount;
}
+ rtlpci->rx_ring[rx_queue_idx].idx = index;
}
static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
{
struct ieee80211_hw *hw = dev_id;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
unsigned long flags;
u32 inta = 0;
u32 intb = 0;
- if (rtlpci->irq_enabled == 0)
- return IRQ_HANDLED;
-
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
/*read ISR: 4/8bytes */
@@ -938,6 +915,11 @@ static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
_rtl_pci_tx_chk_waitq(hw);
}
+static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw)
+{
+ rtl_lps_leave(hw);
+}
+
static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1036,6 +1018,9 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
(void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
(unsigned long)hw);
+ tasklet_init(&rtlpriv->works.ips_leave_tasklet,
+ (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet,
+ (unsigned long)hw);
}
static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
@@ -1505,6 +1490,7 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
synchronize_irq(rtlpci->pdev->irq);
tasklet_kill(&rtlpriv->works.irq_tasklet);
+ tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
flush_workqueue(rtlpriv->works.rtl_wq);
destroy_workqueue(rtlpriv->works.rtl_wq);
@@ -1579,6 +1565,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
set_hal_stop(rtlhal);
rtlpriv->cfg->ops->disable_interrupt(hw);
+ tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
while (ppsc->rfchange_inprogress) {
@@ -1624,6 +1611,16 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
pci_read_config_byte(pdev, 0x8, &revisionid);
pci_read_config_word(pdev, 0x3C, &irqline);
+ /* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
+ * r8192e_pci, and RTL8192SE, which uses this driver. If the
+ * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
+ * the correct driver is r8192e_pci, thus this routine should
+ * return false.
+ */
+ if (deviceid == RTL_PCI_8192SE_DID &&
+ revisionid == RTL_PCI_REVISION_ID_8192PCIE)
+ return false;
+
if (deviceid == RTL_PCI_8192_DID ||
deviceid == RTL_PCI_0044_DID ||
deviceid == RTL_PCI_0047_DID ||
@@ -1699,15 +1696,17 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
- /*find bridge info */
- pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
- for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
- if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
- pcipriv->ndis_adapter.pcibridge_vendor = tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- ("Pci Bridge Vendor is found index: %d\n",
- tmp));
- break;
+ if (bridge_pdev) {
+ /*find bridge info if available */
+ pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
+ for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
+ if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
+ pcipriv->ndis_adapter.pcibridge_vendor = tmp;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("Pci Bridge Vendor is found index:"
+ " %d\n", tmp));
+ break;
+ }
}
}
@@ -1856,7 +1855,8 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
pci_write_config_byte(pdev, 0x04, 0x07);
/* find adapter */
- _rtl_pci_find_adapter(pdev, hw);
+ if (!_rtl_pci_find_adapter(pdev, hw))
+ goto fail3;
/* Init IO handler */
_rtl_pci_io_handler_init(&pdev->dev, hw);
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index 671b1f5aa0c..c53c6204674 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -62,12 +62,6 @@
.subdevice = PCI_ANY_ID,\
.driver_data = (kernel_ulong_t)&(cfg)
-#define INTEL_VENDOR_ID 0x8086
-#define SIS_VENDOR_ID 0x1039
-#define ATI_VENDOR_ID 0x1002
-#define ATI_DEVICE_ID 0x7914
-#define AMD_VENDOR_ID 0x1022
-
#define PCI_MAX_BRIDGE_NUMBER 255
#define PCI_MAX_DEVICES 32
#define PCI_MAX_FUNCTION 8
@@ -75,11 +69,6 @@
#define PCI_CONF_ADDRESS 0x0CF8 /*PCI Configuration Space Address */
#define PCI_CONF_DATA 0x0CFC /*PCI Configuration Space Data */
-#define PCI_CLASS_BRIDGE_DEV 0x06
-#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04
-#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10
-#define PCI_CAP_ID_EXP 0x10
-
#define U1DONTCARE 0xFF
#define U2DONTCARE 0xFFFF
#define U4DONTCARE 0xFFFFFFFF
@@ -169,7 +158,6 @@ struct rtl_pci {
bool first_init;
bool being_init_adapter;
bool init_ready;
- bool irq_enabled;
/*Tx */
struct rtl8192_tx_ring tx_ring[RTL_PCI_MAX_TX_QUEUE_COUNT];
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 39b0297ce92..a693feffbe7 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -68,6 +68,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
/*<2> Disable Interrupt */
rtlpriv->cfg->ops->disable_interrupt(hw);
+ tasklet_kill(&rtlpriv->works.irq_tasklet);
/*<3> Disable Adapter */
rtlpriv->cfg->ops->hw_disable(hw);
@@ -78,65 +79,18 @@ EXPORT_SYMBOL(rtl_ps_disable_nic);
bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
enum rf_pwrstate state_toset,
- u32 changesource, bool protect_or_not)
+ u32 changesource)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- enum rf_pwrstate rtstate;
bool actionallowed = false;
- u16 rfwait_cnt = 0;
- unsigned long flag;
-
- /*protect_or_not = true; */
-
- if (protect_or_not)
- goto no_protect;
-
- /*
- *Only one thread can change
- *the RF state at one time, and others
- *should wait to be executed.
- */
- while (true) {
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
- if (ppsc->rfchange_inprogress) {
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock,
- flag);
-
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- ("RF Change in progress!"
- "Wait to set..state_toset(%d).\n",
- state_toset));
-
- /* Set RF after the previous action is done. */
- while (ppsc->rfchange_inprogress) {
- rfwait_cnt++;
- mdelay(1);
-
- /*
- *Wait too long, return false to avoid
- *to be stuck here.
- */
- if (rfwait_cnt > 100)
- return false;
- }
- } else {
- ppsc->rfchange_inprogress = true;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock,
- flag);
- break;
- }
- }
-
-no_protect:
- rtstate = ppsc->rfpwr_state;
switch (state_toset) {
case ERFON:
ppsc->rfoff_reason &= (~changesource);
if ((changesource == RF_CHANGE_BY_HW) &&
- (ppsc->hwradiooff == true)) {
+ (ppsc->hwradiooff)) {
ppsc->hwradiooff = false;
}
@@ -172,12 +126,6 @@ no_protect:
if (actionallowed)
rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
- if (!protect_or_not) {
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
- ppsc->rfchange_inprogress = false;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
- }
-
return actionallowed;
}
EXPORT_SYMBOL(rtl_ps_set_rf_state);
@@ -200,8 +148,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
}
}
- rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate,
- RF_CHANGE_BY_IPS, false);
+ rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate, RF_CHANGE_BY_IPS);
if (ppsc->inactive_pwrstate == ERFOFF &&
rtlhal->interface == INTF_PCI) {
@@ -289,12 +236,11 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
enum rf_pwrstate rtstate;
- unsigned long flags;
if (mac->opmode != NL80211_IFTYPE_STATION)
return;
- spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
+ spin_lock(&rtlpriv->locks.ips_lock);
if (ppsc->inactiveps) {
rtstate = ppsc->rfpwr_state;
@@ -310,7 +256,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
}
}
- spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
+ spin_unlock(&rtlpriv->locks.ips_lock);
}
/*for FW LPS*/
@@ -428,7 +374,6 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_priv *rtlpriv = rtl_priv(hw);
- unsigned long flag;
if (!ppsc->fwctrl_lps)
return;
@@ -449,7 +394,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
if (mac->link_state != MAC80211_LINKED)
return;
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ spin_lock(&rtlpriv->locks.lps_lock);
/* Idle for a while if we connect to AP a while ago. */
if (mac->cnt_after_linked >= 2) {
@@ -461,7 +406,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
}
}
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ spin_unlock(&rtlpriv->locks.lps_lock);
}
/*Leave the leisure power save mode.*/
@@ -470,9 +415,8 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- unsigned long flag;
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
+ spin_lock(&rtlpriv->locks.lps_lock);
if (ppsc->fwctrl_lps) {
if (ppsc->dot11_psmode != EACTIVE) {
@@ -493,7 +437,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
rtl_lps_set_psmode(hw, EACTIVE);
}
}
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ spin_unlock(&rtlpriv->locks.lps_lock);
}
/* For sw LPS*/
@@ -582,7 +526,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- unsigned long flag;
if (!rtlpriv->psc.swctrl_lps)
return;
@@ -595,9 +538,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
- rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS, false);
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ spin_lock(&rtlpriv->locks.lps_lock);
+ rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
+ spin_unlock(&rtlpriv->locks.lps_lock);
}
void rtl_swlps_rfon_wq_callback(void *data)
@@ -614,7 +557,6 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- unsigned long flag;
u8 sleep_intv;
if (!rtlpriv->psc.sw_ps_enabled)
@@ -631,16 +573,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
if (rtlpriv->link_info.busytraffic)
return;
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
- if (rtlpriv->psc.rfchange_inprogress) {
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
- return;
- }
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
-
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
- rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS, false);
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
+ spin_lock(&rtlpriv->locks.lps_lock);
+ rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
+ spin_unlock(&rtlpriv->locks.lps_lock);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index e3bf8984037..84628e6041c 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -33,8 +33,7 @@
#define MAX_SW_LPS_SLEEP_INTV 5
bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
- enum rf_pwrstate state_toset, u32 changesource,
- bool protect_or_not);
+ enum rf_pwrstate state_toset, u32 changesource);
bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
bool rtl_ps_disable_nic(struct ieee80211_hw *hw);
void rtl_ips_nic_off(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index 30da68a7778..539df66dce0 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -200,7 +200,7 @@ static void rtl_tx_status(void *ppriv,
if (sta) {
/* Check if aggregation has to be enabled for this tid */
sta_entry = (struct rtl_sta_info *) sta->drv_priv;
- if ((sta->ht_cap.ht_supported == true) &&
+ if ((sta->ht_cap.ht_supported) &&
!(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
if (ieee80211_is_data_qos(fc)) {
u8 tid = rtl_get_tid(skb);
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index 8f6718f163e..9fedb1f7091 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -303,22 +303,6 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
return;
}
-static void _rtl_dump_channel_map(struct wiphy *wiphy)
-{
- enum ieee80211_band band;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *ch;
- unsigned int i;
-
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
- if (!wiphy->bands[band])
- continue;
- sband = wiphy->bands[band];
- for (i = 0; i < sband->n_channels; i++)
- ch = &sband->channels[i];
- }
-}
-
static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct rtl_regulatory *reg)
@@ -336,8 +320,6 @@ static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
break;
}
- _rtl_dump_channel_map(wiphy);
-
return 0;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 97183829b9b..a00774e7090 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -474,7 +474,7 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- if (mac->act_scanning == true)
+ if (mac->act_scanning)
return;
if (mac->link_state >= MAC80211_LINKED)
@@ -670,7 +670,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
u8 ofdm_index[2], cck_index = 0, ofdm_index_old[2], cck_index_old = 0;
int i;
bool is2t = IS_92C_SERIAL(rtlhal->version);
- u8 txpwr_level[2] = {0, 0};
+ s8 txpwr_level[2] = {0, 0};
u8 ofdm_min_index = 6, rf;
rtlpriv->dm.txpower_trackinginit = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 50303e1adff..49a064bdbce 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -27,6 +27,8 @@
*
*****************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/firmware.h>
#include "../wifi.h"
#include "../pci.h"
@@ -224,8 +226,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
u32 fwsize;
enum version_8192c version = rtlhal->version;
- printk(KERN_INFO "rtl8192c: Loading firmware file %s\n",
- rtlpriv->cfg->fw_name);
+ pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
if (!rtlhal->pfirmware)
return 1;
@@ -546,7 +547,6 @@ static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring;
struct rtl_tx_desc *pdesc;
- u8 own;
unsigned long flags;
struct sk_buff *pskb = NULL;
@@ -559,7 +559,6 @@ static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
pdesc = &ring->desc[0];
- own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index d2cc81586a6..3b11642d3f7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -1253,10 +1253,9 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
const u32 retrycount = 2;
- u32 bbvalue;
-
if (t == 0) {
- bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
+ /* dummy read */
+ rtl_get_bbreg(hw, 0x800, MASKDWORD);
_rtl92c_phy_save_adda_registers(hw, adda_reg,
rtlphy->adda_backup, 16);
@@ -1762,8 +1761,7 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
long result[4][8];
u8 i, final_candidate;
bool patha_ok, pathb_ok;
- long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
- reg_ecc, reg_tmp = 0;
+ long reg_e94, reg_e9c, reg_ea4, reg_eb4, reg_ebc, reg_ec4, reg_tmp = 0;
bool is12simular, is13simular, is23simular;
bool start_conttx = false, singletone = false;
u32 iqk_bb_reg[10] = {
@@ -1841,21 +1839,17 @@ void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
reg_e94 = result[i][0];
reg_e9c = result[i][1];
reg_ea4 = result[i][2];
- reg_eac = result[i][3];
reg_eb4 = result[i][4];
reg_ebc = result[i][5];
reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
}
if (final_candidate != 0xff) {
rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
reg_ea4 = result[final_candidate][2];
- reg_eac = result[final_candidate][3];
rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
reg_ec4 = result[final_candidate][6];
- reg_ecc = result[final_candidate][7];
patha_ok = pathb_ok = true;
} else {
rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index defb4370cf7..a3deaefa788 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -488,7 +488,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_CORRECT_TSF:{
u8 btype_ibss = ((u8 *) (val))[0];
- if (btype_ibss == true)
+ if (btype_ibss)
_rtl92ce_stop_tx_beacon(hw);
_rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(3));
@@ -500,7 +500,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
_rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
- if (btype_ibss == true)
+ if (btype_ibss)
_rtl92ce_resume_tx_beacon(hw);
break;
@@ -763,11 +763,9 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
u8 reg_bw_opmode;
- u32 reg_ratr, reg_prsr;
+ u32 reg_prsr;
reg_bw_opmode = BW_OPMODE_20MHZ;
- reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
- RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
@@ -1123,7 +1121,7 @@ void rtl92ce_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
- if (check_bssid == true) {
+ if (check_bssid) {
reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
(u8 *) (&reg_rcr));
@@ -1185,7 +1183,6 @@ void rtl92ce_enable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
- rtlpci->irq_enabled = true;
}
void rtl92ce_disable_interrupt(struct ieee80211_hw *hw)
@@ -1195,7 +1192,7 @@ void rtl92ce_disable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
- rtlpci->irq_enabled = false;
+ synchronize_irq(rtlpci->pdev->irq);
}
static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
@@ -1586,7 +1583,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag = false;
}
- if (rtlefuse->autoload_failflag == true)
+ if (rtlefuse->autoload_failflag)
return;
for (i = 0; i < 6; i += 2) {
@@ -1595,7 +1592,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- (MAC_FMT "\n", MAC_ARG(rtlefuse->dev_addr)));
+ ("%pM\n", rtlefuse->dev_addr));
_rtl92ce_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag,
@@ -1969,7 +1966,7 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+ enum rf_pwrstate e_rfpowerstate_toset;
u8 u1tmp;
bool actuallyset = false;
unsigned long flag;
@@ -1989,15 +1986,13 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
}
- cur_rfstate = ppsc->rfpwr_state;
-
rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, rtl_read_byte(rtlpriv,
REG_MAC_PINMUX_CFG)&~(BIT(3)));
u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
- if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
+ if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
("GPIOChangeRF - HW Radio ON, RF ON\n"));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
index 9dd1ed7b642..28a1a707d09 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
@@ -84,7 +84,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
- if (pcipriv->ledctl.led_opendrain == true)
+ if (pcipriv->ledctl.led_opendrain)
rtl_write_byte(rtlpriv, REG_LEDCFG2,
(ledcfg | BIT(1) | BIT(5) | BIT(6)));
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index abe0fcc7536..592a10ac592 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -46,13 +46,12 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
"rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask));
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
original_value = _rtl92c_phy_rf_serial_read(hw,
@@ -65,7 +64,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
("regaddr(%#x), rfpath(%#x), "
@@ -120,13 +119,12 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 original_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath));
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
if (bitmask != RFREG_OFFSET_MASK) {
@@ -153,7 +151,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
_rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
}
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
"bitmask(%#x), data(%#x), "
@@ -281,7 +279,6 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
{
int i;
- bool rtstatus = true;
u32 *radioa_array_table;
u32 *radiob_array_table;
u16 radioa_arraylen, radiob_arraylen;
@@ -308,7 +305,6 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
}
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
- rtstatus = true;
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
@@ -521,7 +517,6 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
u8 i, queue_id;
struct rtl8192_tx_ring *ring = NULL;
- ppsc->set_rfpowerstate_inprogress = true;
switch (rfpwr_state) {
case ERFON:{
if ((ppsc->rfpwr_state == ERFOFF) &&
@@ -617,7 +612,6 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
if (bresult)
ppsc->rfpwr_state = rfpwr_state;
- ppsc->set_rfpowerstate_inprogress = false;
return bresult;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
index 598cecc63f4..ba5ff0411f0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -1074,10 +1074,10 @@
#define _SRL(x) (((x) & 0x3F) << 8)
#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
-#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8);
+#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8)
#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
-#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8);
+#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8)
#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
@@ -1203,7 +1203,9 @@
#define EPROM_CMD_CONFIG 0x3
#define EPROM_CMD_LOAD 1
+#define HWSET_MAX_SIZE 128
#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE
+#define EFUSE_MAX_SECTION 16
#define WL_HWPDN_EN BIT(0)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
index 90d0f2cf3b2..d3b01e6023b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
@@ -76,7 +76,7 @@ void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
if (rtlefuse->eeprom_regulatory != 0)
turbo_scanoff = true;
- if (mac->act_scanning == true) {
+ if (mac->act_scanning) {
tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 54b2bd53d36..230bbe900d8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -225,7 +225,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct phy_sts_cck_8192s_t *cck_buf;
- s8 rx_pwr_all, rx_pwr[4];
+ s8 rx_pwr_all = 0, rx_pwr[4];
u8 evm, pwdb_all, rf_rx_num = 0;
u8 i, max_spatial_stream;
u32 rssi, total_rssi = 0;
@@ -592,7 +592,6 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr;
u8 *tmp_buf;
u8 *praddr;
- u8 *psaddr;
__le16 fc;
u16 type, c_fc;
bool packet_matchbssid, packet_toself, packet_beacon;
@@ -604,7 +603,6 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
c_fc = le16_to_cpu(fc);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
- psaddr = hdr->addr2;
packet_matchbssid =
((IEEE80211_FTYPE_CTL != type) &&
@@ -680,7 +678,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
GET_RX_DESC_PAGGR(pdesc));
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
- if (phystatus == true) {
+ if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
stats->rx_bufshift);
@@ -929,9 +927,10 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
{
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
+ wmb();
SET_TX_DESC_OWN(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
@@ -945,6 +944,7 @@ void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
} else {
switch (desc_name) {
case HW_DESC_RXOWN:
+ wmb();
SET_RX_DESC_OWN(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
@@ -968,7 +968,7 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
{
u32 ret = 0;
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
ret = GET_TX_DESC_OWN(p_desc);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 52e2af58c1e..814c05df51e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -27,6 +27,8 @@
*
*****************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "../wifi.h"
#include "../efuse.h"
#include "../base.h"
@@ -337,7 +339,7 @@ static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents)
rtlefuse->board_type = boardType;
if (IS_HIGHT_PA(rtlefuse->board_type))
rtlefuse->external_pa = 1;
- printk(KERN_INFO "rtl8192cu: Board Type %x\n", rtlefuse->board_type);
+ pr_info("Board Type %x\n", rtlefuse->board_type);
#ifdef CONFIG_ANTENNA_DIVERSITY
/* Antenna Diversity setting. */
@@ -346,8 +348,7 @@ static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents)
else
rtl_efuse->antenna_cfg = registry_par->antdiv_cfg; /* 0:OFF, */
- printk(KERN_INFO "rtl8192cu: Antenna Config %x\n",
- rtl_efuse->antenna_cfg);
+ pr_info("Antenna Config %x\n", rtl_efuse->antenna_cfg);
#endif
}
@@ -384,71 +385,57 @@ static void _update_bt_param(_adapter *padapter)
pbtpriv->bBTNonTrafficModeSet = _FALSE;
pbtpriv->CurrentState = 0;
pbtpriv->PreviousState = 0;
- printk(KERN_INFO "rtl8192cu: BT Coexistance = %s\n",
- (pbtpriv->BT_Coexist == _TRUE) ? "enable" : "disable");
+ pr_info("BT Coexistance = %s\n",
+ (pbtpriv->BT_Coexist == _TRUE) ? "enable" : "disable");
if (pbtpriv->BT_Coexist) {
if (pbtpriv->BT_Ant_Num == Ant_x2)
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
- "Ant_Num = Antx2\n");
+ pr_info("BlueTooth BT_Ant_Num = Antx2\n");
else if (pbtpriv->BT_Ant_Num == Ant_x1)
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
- "Ant_Num = Antx1\n");
+ pr_info("BlueTooth BT_Ant_Num = Antx1\n");
switch (pbtpriv->BT_CoexistType) {
case BT_2Wire:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
- "CoexistType = BT_2Wire\n");
+ pr_info("BlueTooth BT_CoexistType = BT_2Wire\n");
break;
case BT_ISSC_3Wire:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
- "CoexistType = BT_ISSC_3Wire\n");
+ pr_info("BlueTooth BT_CoexistType = BT_ISSC_3Wire\n");
break;
case BT_Accel:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
- "CoexistType = BT_Accel\n");
+ pr_info("BlueTooth BT_CoexistType = BT_Accel\n");
break;
case BT_CSR_BC4:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
- "CoexistType = BT_CSR_BC4\n");
+ pr_info("BlueTooth BT_CoexistType = BT_CSR_BC4\n");
break;
case BT_CSR_BC8:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
- "CoexistType = BT_CSR_BC8\n");
+ pr_info("BlueTooth BT_CoexistType = BT_CSR_BC8\n");
break;
case BT_RTL8756:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
- "CoexistType = BT_RTL8756\n");
+ pr_info("BlueTooth BT_CoexistType = BT_RTL8756\n");
break;
default:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_"
- "CoexistType = Unknown\n");
+ pr_info("BlueTooth BT_CoexistType = Unknown\n");
break;
}
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_Ant_isolation = %d\n",
- pbtpriv->BT_Ant_isolation);
+ pr_info("BlueTooth BT_Ant_isolation = %d\n",
+ pbtpriv->BT_Ant_isolation);
switch (pbtpriv->BT_Service) {
case BT_OtherAction:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
- "BT_OtherAction\n");
+ pr_info("BlueTooth BT_Service = BT_OtherAction\n");
break;
case BT_SCO:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
- "BT_SCO\n");
+ pr_info("BlueTooth BT_Service = BT_SCO\n");
break;
case BT_Busy:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
- "BT_Busy\n");
+ pr_info("BlueTooth BT_Service = BT_Busy\n");
break;
case BT_OtherBusy:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
- "BT_OtherBusy\n");
+ pr_info("BlueTooth BT_Service = BT_OtherBusy\n");
break;
default:
- printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = "
- "BT_Idle\n");
+ pr_info("BlueTooth BT_Service = BT_Idle\n");
break;
}
- printk(KERN_INFO "rtl8192cu: BT_RadioSharedType = 0x%x\n",
- pbtpriv->BT_RadioSharedType);
+ pr_info("BT_RadioSharedType = 0x%x\n",
+ pbtpriv->BT_RadioSharedType);
}
}
@@ -520,13 +507,13 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
rtlefuse->autoload_failflag = false;
}
- if (rtlefuse->autoload_failflag == true)
+ if (rtlefuse->autoload_failflag)
return;
for (i = 0; i < 6; i += 2) {
usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
*((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
}
- printk(KERN_INFO "rtl8192cu: MAC address: %pM\n", rtlefuse->dev_addr);
+ pr_info("MAC address: %pM\n", rtlefuse->dev_addr);
_rtl92cu_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag, hwinfo);
rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
@@ -665,7 +652,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_APS_FSMCO, value16);
do {
if (!(rtl_read_word(rtlpriv, REG_APS_FSMCO) & APFM_ONMAC)) {
- printk(KERN_INFO "rtl8192cu: MAC auto ON okay!\n");
+ pr_info("MAC auto ON okay!\n");
break;
}
if (pollingCount++ > 100) {
@@ -819,7 +806,7 @@ static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw,
}
_rtl92c_init_chipN_reg_priority(hw, value, value, value, value,
value, value);
- printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
+ pr_info("Tx queue select: 0x%02x\n", queue_sel);
}
static void _rtl92cu_init_chipN_two_out_ep_priority(struct ieee80211_hw *hw,
@@ -863,7 +850,7 @@ static void _rtl92cu_init_chipN_two_out_ep_priority(struct ieee80211_hw *hw,
hiQ = valueHi;
}
_rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
- printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel);
+ pr_info("Tx queue select: 0x%02x\n", queue_sel);
}
static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
@@ -1594,7 +1581,7 @@ static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
default:
break;
}
- if (filterout_non_associated_bssid == true) {
+ if (filterout_non_associated_bssid) {
if (IS_NORMAL_CHIP(rtlhal->version)) {
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
@@ -2155,7 +2142,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_CORRECT_TSF:{
u8 btype_ibss = ((u8 *) (val))[0];
- if (btype_ibss == true)
+ if (btype_ibss)
_rtl92cu_stop_tx_beacon(hw);
_rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3));
rtl_write_dword(rtlpriv, REG_TSFTR, (u32)(mac->tsf &
@@ -2163,7 +2150,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_dword(rtlpriv, REG_TSFTR + 4,
(u32)((mac->tsf >> 32) & 0xffffffff));
_rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
- if (btype_ibss == true)
+ if (btype_ibss)
_rtl92cu_resume_tx_beacon(hw);
break;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
index 332c74348a6..2ff9d8314e7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c
@@ -82,7 +82,7 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
- if (usbpriv->ledctl.led_opendrain == true)
+ if (usbpriv->ledctl.led_opendrain)
rtl_write_byte(rtlpriv, REG_LEDCFG2,
(ledcfg | BIT(1) | BIT(5) | BIT(6)));
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index f8514cba17b..194fc693c1f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -26,6 +26,9 @@
* Larry Finger <Larry.Finger@lwfinger.net>
*
****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include "../wifi.h"
@@ -213,14 +216,14 @@ bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
for (i = 0; i < (boundary - 1); i++) {
rst = rtl92c_llt_write(hw, i , i + 1);
if (true != rst) {
- printk(KERN_ERR "===> %s #1 fail\n", __func__);
+ pr_err("===> %s #1 fail\n", __func__);
return rst;
}
}
/* end of list */
rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF);
if (true != rst) {
- printk(KERN_ERR "===> %s #2 fail\n", __func__);
+ pr_err("===> %s #2 fail\n", __func__);
return rst;
}
/* Make the other pages as ring buffer
@@ -231,14 +234,14 @@ bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) {
rst = rtl92c_llt_write(hw, i, (i + 1));
if (true != rst) {
- printk(KERN_ERR "===> %s #3 fail\n", __func__);
+ pr_err("===> %s #3 fail\n", __func__);
return rst;
}
}
/* Let last entry point to the start entry of ring buffer */
rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary);
if (true != rst) {
- printk(KERN_ERR "===> %s #4 fail\n", __func__);
+ pr_err("===> %s #4 fail\n", __func__);
return rst;
}
return rst;
@@ -380,13 +383,11 @@ void rtl92c_enable_interrupt(struct ieee80211_hw *hw)
0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] &
0xFFFFFFFF);
- rtlpci->irq_enabled = true;
} else {
rtl_write_dword(rtlpriv, REG_HIMR, rtlusb->irq_mask[0] &
0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlusb->irq_mask[1] &
0xFFFFFFFF);
- rtlusb->irq_enabled = true;
}
}
@@ -398,16 +399,9 @@ void rtl92c_init_interrupt(struct ieee80211_hw *hw)
void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
- if (IS_HARDWARE_TYPE_8192CE(rtlhal))
- rtlpci->irq_enabled = false;
- else if (IS_HARDWARE_TYPE_8192CU(rtlhal))
- rtlusb->irq_enabled = false;
}
void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
@@ -1113,7 +1107,6 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr;
u8 *tmp_buf;
u8 *praddr;
- u8 *psaddr;
__le16 fc;
u16 type, cpu_fc;
bool packet_matchbssid, packet_toself, packet_beacon;
@@ -1124,7 +1117,6 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
cpu_fc = le16_to_cpu(fc);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
- psaddr = hdr->addr2;
packet_matchbssid =
((IEEE80211_FTYPE_CTL != type) &&
(!compare_ether_addr(mac->bssid,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 9a3d0239e27..72852900df8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -470,7 +470,6 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
u8 i, queue_id;
struct rtl8192_tx_ring *ring = NULL;
- ppsc->set_rfpowerstate_inprogress = true;
switch (rfpwr_state) {
case ERFON:
if ((ppsc->rfpwr_state == ERFOFF) &&
@@ -590,7 +589,6 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
if (bresult)
ppsc->rfpwr_state = rfpwr_state;
- ppsc->set_rfpowerstate_inprogress = false;
return bresult;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index c7576ec4744..17a8e962851 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -82,7 +82,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
(rtlefuse->external_pa))
turbo_scanoff = true;
}
- if (mac->act_scanning == true) {
+ if (mac->act_scanning) {
tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
if (turbo_scanoff) {
@@ -104,7 +104,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
tx_agc[RF90_PATH_A] = 0x10101010;
tx_agc[RF90_PATH_B] = 0x10101010;
} else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
- TXHIGHPWRLEVEL_LEVEL1) {
+ TXHIGHPWRLEVEL_LEVEL2) {
tx_agc[RF90_PATH_A] = 0x00000000;
tx_agc[RF90_PATH_B] = 0x00000000;
} else{
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index bee7c1480f6..ef63c0df006 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -53,6 +53,8 @@ MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ const struct firmware *firmware;
+ int err;
rtlpriv->dm.dm_initialgain_enable = 1;
rtlpriv->dm.dm_flag = 0;
@@ -64,6 +66,24 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
("Can't alloc buffer for fw.\n"));
return 1;
}
+ /* request fw */
+ err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
+ rtlpriv->io.dev);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Failed to request firmware!\n"));
+ return 1;
+ }
+ if (firmware->size > 0x4000) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Firmware is too big!\n"));
+ release_firmware(firmware);
+ return 1;
+ }
+ memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
+ rtlpriv->rtlhal.fwsize = firmware->size;
+ release_firmware(firmware);
+
return 0;
}
@@ -261,6 +281,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard (b/g mode only) */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
+ /* 8188RU in Alfa AWUS036NHR */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
/* 8188 Combo for BC4 */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
@@ -278,24 +300,28 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
{RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
/* HP - Lite-On ,8188CUS Slim Combo */
{RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
{RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
{RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
{RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
{RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
{RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
- {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
+ {RTL_USB_DEVICE(0x13d3, 0x3358, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/
/* Russian customer -Azwave (8188CE-VAU b/g mode only) */
- {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x13d3, 0x3359, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(0x4855, 0x0090, rtl92cu_hal_cfg)}, /* Feixun */
+ {RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
+ {RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
/****** 8192CU ********/
{RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
{RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
- {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 3a92ba3c4a1..906e7aa55bc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -342,7 +342,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
(u8)GET_RX_DESC_RX_MCS(pdesc),
(bool)GET_RX_DESC_PAGGR(pdesc));
rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
- if (phystatus == true) {
+ if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
p_drvinfo);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/Makefile b/drivers/net/wireless/rtlwifi/rtl8192de/Makefile
new file mode 100644
index 00000000000..e3213c8264b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/Makefile
@@ -0,0 +1,14 @@
+rtl8192de-objs := \
+ dm.o \
+ fw.o \
+ hw.o \
+ led.o \
+ phy.o \
+ rf.o \
+ sw.o \
+ table.o \
+ trx.o
+
+obj-$(CONFIG_RTL8192DE) += rtl8192de.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
new file mode 100644
index 00000000000..f0f5f9bfbb7
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
@@ -0,0 +1,269 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92D_DEF_H__
+#define __RTL92D_DEF_H__
+
+/* Min Spacing related settings. */
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG 0x3F
+#define RF6052_MAX_PATH 2
+
+#define HAL_RETRY_LIMIT_INFRA 48
+#define HAL_RETRY_LIMIT_AP_ADHOC 7
+
+#define PHY_RSSI_SLID_WIN_MAX 100
+#define PHY_LINKQUALITY_SLID_WIN_MAX 20
+#define PHY_BEACON_RSSI_SLID_WIN_MAX 10
+
+#define RESET_DELAY_8185 20
+
+#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
+#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
+
+#define NUM_OF_FIRMWARE_QUEUE 10
+#define NUM_OF_PAGES_IN_FW 0x100
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
+
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
+
+#define MAX_LINES_HWCONFIG_TXT 1000
+#define MAX_BYTES_LINE_HWCONFIG_TXT 256
+
+#define SW_THREE_WIRE 0
+#define HW_THREE_WIRE 2
+
+#define BT_DEMO_BOARD 0
+#define BT_QA_BOARD 1
+#define BT_FPGA 2
+
+#define RX_SMOOTH_FACTOR 20
+
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
+#define HAL_PRIME_CHNL_OFFSET_LOWER 1
+#define HAL_PRIME_CHNL_OFFSET_UPPER 2
+
+#define MAX_H2C_QUEUE_NUM 10
+
+#define RX_MPDU_QUEUE 0
+#define RX_CMD_QUEUE 1
+#define RX_MAX_QUEUE 2
+
+#define C2H_RX_CMD_HDR_LEN 8
+#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 0, 16)
+#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 16, 8)
+#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 24, 7)
+#define GET_C2H_CMD_CONTINUE(__prxhdr) \
+ LE_BITS_TO_4BYTE((__prxhdr), 31, 1)
+#define GET_C2H_CMD_CONTENT(__prxhdr) \
+ ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN)
+
+#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8)
+#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16)
+#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5)
+#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1)
+#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
+#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
+ LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
+
+/*
+ * 92D chip ver:
+ * BIT8: IS 92D
+ * BIT9: single phy
+ * BIT10: C-cut
+ * BIT11: D-cut
+ */
+
+/* Chip specific */
+#define CHIP_92C BIT(0)
+#define CHIP_92C_1T2R BIT(1)
+#define CHIP_8723 BIT(2) /* RTL8723 With BT feature */
+#define CHIP_8723_DRV_REV BIT(3) /* RTL8723 Driver Revised */
+#define NORMAL_CHIP BIT(4)
+#define CHIP_VENDOR_UMC BIT(5)
+#define CHIP_VENDOR_UMC_B_CUT BIT(6) /* Chip version for ECO */
+
+/* for 92D */
+#define CHIP_92D BIT(8)
+#define CHIP_92D_SINGLEPHY BIT(9)
+#define CHIP_92D_C_CUT BIT(10)
+#define CHIP_92D_D_CUT BIT(11)
+
+enum version_8192d {
+ VERSION_TEST_CHIP_88C = 0x00,
+ VERSION_TEST_CHIP_92C = 0x01,
+ VERSION_NORMAL_TSMC_CHIP_88C = 0x10,
+ VERSION_NORMAL_TSMC_CHIP_92C = 0x11,
+ VERSION_NORMAL_TSMC_CHIP_92C_1T2R = 0x13,
+ VERSION_NORMAL_UMC_CHIP_88C_A_CUT = 0x30,
+ VERSION_NORMAL_UMC_CHIP_92C_A_CUT = 0x31,
+ VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT = 0x33,
+ VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT = 0x34,
+ VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT = 0x3c,
+ VERSION_NORMAL_UMC_CHIP_88C_B_CUT = 0x70,
+ VERSION_NORMAL_UMC_CHIP_92C_B_CUT = 0x71,
+ VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT = 0x73,
+ VERSION_TEST_CHIP_92D_SINGLEPHY = 0x300,
+ VERSION_TEST_CHIP_92D_DUALPHY = 0x100,
+ VERSION_NORMAL_CHIP_92D_SINGLEPHY = 0x310,
+ VERSION_NORMAL_CHIP_92D_DUALPHY = 0x110,
+ VERSION_NORMAL_CHIP_92D_C_CUT_SINGLEPHY = 0x710,
+ VERSION_NORMAL_CHIP_92D_C_CUT_DUALPHY = 0x510,
+ VERSION_NORMAL_CHIP_92D_D_CUT_SINGLEPHY = 0xB10,
+ VERSION_NORMAL_CHIP_92D_D_CUT_DUALPHY = 0x910,
+};
+
+#define IS_92D_SINGLEPHY(version) \
+ ((version & CHIP_92D_SINGLEPHY) ? true : false)
+#define IS_92D_C_CUT(version) \
+ ((version & CHIP_92D_C_CUT) ? true : false)
+#define IS_92D_D_CUT(version) \
+ ((version & CHIP_92D_D_CUT) ? true : false)
+
+enum rf_optype {
+ RF_OP_BY_SW_3WIRE = 0,
+ RF_OP_BY_FW,
+ RF_OP_MAX
+};
+
+enum rtl_desc_qsel {
+ QSLT_BK = 0x2,
+ QSLT_BE = 0x0,
+ QSLT_VI = 0x5,
+ QSLT_VO = 0x7,
+ QSLT_BEACON = 0x10,
+ QSLT_HIGH = 0x11,
+ QSLT_MGNT = 0x12,
+ QSLT_CMD = 0x13,
+};
+
+enum rtl_desc92d_rate {
+ DESC92D_RATE1M = 0x00,
+ DESC92D_RATE2M = 0x01,
+ DESC92D_RATE5_5M = 0x02,
+ DESC92D_RATE11M = 0x03,
+
+ DESC92D_RATE6M = 0x04,
+ DESC92D_RATE9M = 0x05,
+ DESC92D_RATE12M = 0x06,
+ DESC92D_RATE18M = 0x07,
+ DESC92D_RATE24M = 0x08,
+ DESC92D_RATE36M = 0x09,
+ DESC92D_RATE48M = 0x0a,
+ DESC92D_RATE54M = 0x0b,
+
+ DESC92D_RATEMCS0 = 0x0c,
+ DESC92D_RATEMCS1 = 0x0d,
+ DESC92D_RATEMCS2 = 0x0e,
+ DESC92D_RATEMCS3 = 0x0f,
+ DESC92D_RATEMCS4 = 0x10,
+ DESC92D_RATEMCS5 = 0x11,
+ DESC92D_RATEMCS6 = 0x12,
+ DESC92D_RATEMCS7 = 0x13,
+ DESC92D_RATEMCS8 = 0x14,
+ DESC92D_RATEMCS9 = 0x15,
+ DESC92D_RATEMCS10 = 0x16,
+ DESC92D_RATEMCS11 = 0x17,
+ DESC92D_RATEMCS12 = 0x18,
+ DESC92D_RATEMCS13 = 0x19,
+ DESC92D_RATEMCS14 = 0x1a,
+ DESC92D_RATEMCS15 = 0x1b,
+ DESC92D_RATEMCS15_SG = 0x1c,
+ DESC92D_RATEMCS32 = 0x20,
+};
+
+enum channel_plan {
+ CHPL_FCC = 0,
+ CHPL_IC = 1,
+ CHPL_ETSI = 2,
+ CHPL_SPAIN = 3,
+ CHPL_FRANCE = 4,
+ CHPL_MKK = 5,
+ CHPL_MKK1 = 6,
+ CHPL_ISRAEL = 7,
+ CHPL_TELEC = 8,
+ CHPL_GLOBAL = 9,
+ CHPL_WORLD = 10,
+};
+
+struct phy_sts_cck_8192d {
+ u8 adc_pwdb_X[4];
+ u8 sq_rpt;
+ u8 cck_agc_rpt;
+};
+
+struct h2c_cmd_8192c {
+ u8 element_id;
+ u32 cmd_len;
+ u8 *p_cmdbuffer;
+};
+
+struct txpower_info {
+ u8 cck_index[RF6052_MAX_PATH][CHANNEL_GROUP_MAX];
+ u8 ht40_1sindex[RF6052_MAX_PATH][CHANNEL_GROUP_MAX];
+ u8 ht40_2sindexdiff[RF6052_MAX_PATH][CHANNEL_GROUP_MAX];
+ u8 ht20indexdiff[RF6052_MAX_PATH][CHANNEL_GROUP_MAX];
+ u8 ofdmindexdiff[RF6052_MAX_PATH][CHANNEL_GROUP_MAX];
+ u8 ht40maxoffset[RF6052_MAX_PATH][CHANNEL_GROUP_MAX];
+ u8 ht20maxoffset[RF6052_MAX_PATH][CHANNEL_GROUP_MAX];
+ u8 tssi_a[3]; /* 5GL/5GM/5GH */
+ u8 tssi_b[3];
+};
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
new file mode 100644
index 00000000000..3cd0736fe8e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -0,0 +1,1355 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+
+#define UNDEC_SM_PWDB entry_min_undecoratedsmoothed_pwdb
+
+struct dig_t de_digtable;
+
+static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
+ 0x7f8001fe, /* 0, +6.0dB */
+ 0x788001e2, /* 1, +5.5dB */
+ 0x71c001c7, /* 2, +5.0dB */
+ 0x6b8001ae, /* 3, +4.5dB */
+ 0x65400195, /* 4, +4.0dB */
+ 0x5fc0017f, /* 5, +3.5dB */
+ 0x5a400169, /* 6, +3.0dB */
+ 0x55400155, /* 7, +2.5dB */
+ 0x50800142, /* 8, +2.0dB */
+ 0x4c000130, /* 9, +1.5dB */
+ 0x47c0011f, /* 10, +1.0dB */
+ 0x43c0010f, /* 11, +0.5dB */
+ 0x40000100, /* 12, +0dB */
+ 0x3c8000f2, /* 13, -0.5dB */
+ 0x390000e4, /* 14, -1.0dB */
+ 0x35c000d7, /* 15, -1.5dB */
+ 0x32c000cb, /* 16, -2.0dB */
+ 0x300000c0, /* 17, -2.5dB */
+ 0x2d4000b5, /* 18, -3.0dB */
+ 0x2ac000ab, /* 19, -3.5dB */
+ 0x288000a2, /* 20, -4.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x24000090, /* 22, -5.0dB */
+ 0x22000088, /* 23, -5.5dB */
+ 0x20000080, /* 24, -6.0dB */
+ 0x1e400079, /* 25, -6.5dB */
+ 0x1c800072, /* 26, -7.0dB */
+ 0x1b00006c, /* 27. -7.5dB */
+ 0x19800066, /* 28, -8.0dB */
+ 0x18000060, /* 29, -8.5dB */
+ 0x16c0005b, /* 30, -9.0dB */
+ 0x15800056, /* 31, -9.5dB */
+ 0x14400051, /* 32, -10.0dB */
+ 0x1300004c, /* 33, -10.5dB */
+ 0x12000048, /* 34, -11.0dB */
+ 0x11000044, /* 35, -11.5dB */
+ 0x10000040, /* 36, -12.0dB */
+ 0x0f00003c, /* 37, -12.5dB */
+ 0x0e400039, /* 38, -13.0dB */
+ 0x0d800036, /* 39, -13.5dB */
+ 0x0cc00033, /* 40, -14.0dB */
+ 0x0c000030, /* 41, -14.5dB */
+ 0x0b40002d, /* 42, -15.0dB */
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
+};
+
+static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
+{
+ de_digtable.dig_enable_flag = true;
+ de_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
+ de_digtable.cur_igvalue = 0x20;
+ de_digtable.pre_igvalue = 0x0;
+ de_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+ de_digtable.presta_connectstate = DIG_STA_DISCONNECT;
+ de_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
+ de_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
+ de_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
+ de_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+ de_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+ de_digtable.rx_gain_range_max = DM_DIG_FA_UPPER;
+ de_digtable.rx_gain_range_min = DM_DIG_FA_LOWER;
+ de_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
+ de_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
+ de_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
+ de_digtable.pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
+ de_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
+ de_digtable.large_fa_hit = 0;
+ de_digtable.recover_cnt = 0;
+ de_digtable.forbidden_igi = DM_DIG_FA_LOWER;
+}
+
+static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+ u32 ret_value;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+ unsigned long flag = 0;
+
+ /* hold ofdm counter */
+ rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
+ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */
+
+ ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, BMASKDWORD);
+ falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
+ falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, BMASKDWORD);
+ falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, BMASKDWORD);
+ falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
+ falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, BMASKDWORD);
+ falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
+ falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_fast_fsync_fail +
+ falsealm_cnt->cnt_sb_search_fail;
+
+ if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
+ /* hold cck counter */
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, BMASKBYTE0);
+ falsealm_cnt->cnt_cck_fail = ret_value;
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, BMASKBYTE3);
+ falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ } else {
+ falsealm_cnt->cnt_cck_fail = 0;
+ }
+
+ /* reset false alarm counter registers */
+ falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail +
+ falsealm_cnt->cnt_sb_search_fail +
+ falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_cck_fail;
+
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
+ /* update ofdm counter */
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
+ /* update page C counter */
+ rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0);
+ /* update page D counter */
+ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0);
+ if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
+ /* reset cck counter */
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
+ /* enable cck counter */
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ }
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("Cnt_Fast_Fsync_fail = %x, "
+ "Cnt_SB_Search_fail = %x\n",
+ falsealm_cnt->cnt_fast_fsync_fail,
+ falsealm_cnt->cnt_sb_search_fail));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("Cnt_Parity_Fail = %x, "
+ "Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, "
+ "Cnt_Mcs_fail = %x\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail,
+ falsealm_cnt->cnt_mcs_fail));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ ("Cnt_Ofdm_fail = %x, " "Cnt_Cck_fail = %x, "
+ "Cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail,
+ falsealm_cnt->cnt_all));
+}
+
+static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ /* Determine the minimum RSSI */
+ if ((mac->link_state < MAC80211_LINKED) &&
+ (rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
+ de_digtable.min_undecorated_pwdb_for_dm = 0;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ ("Not connected to any\n"));
+ }
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ de_digtable.min_undecorated_pwdb_for_dm =
+ rtlpriv->dm.UNDEC_SM_PWDB;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ ("AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.UNDEC_SM_PWDB));
+ } else {
+ de_digtable.min_undecorated_pwdb_for_dm =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ ("STA Default Port PWDB = 0x%x\n",
+ de_digtable.min_undecorated_pwdb_for_dm));
+ }
+ } else {
+ de_digtable.min_undecorated_pwdb_for_dm =
+ rtlpriv->dm.UNDEC_SM_PWDB;
+ RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ ("AP Ext Port or disconnet PWDB = 0x%x\n",
+ de_digtable.min_undecorated_pwdb_for_dm));
+ }
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("MinUndecoratedPWDBForDM =%d\n",
+ de_digtable.min_undecorated_pwdb_for_dm));
+}
+
+static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned long flag = 0;
+
+ if (de_digtable.cursta_connectctate == DIG_STA_CONNECT) {
+ if (de_digtable.pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
+ if (de_digtable.min_undecorated_pwdb_for_dm <= 25)
+ de_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_LOWRSSI;
+ else
+ de_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_HIGHRSSI;
+ } else {
+ if (de_digtable.min_undecorated_pwdb_for_dm <= 20)
+ de_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_LOWRSSI;
+ else
+ de_digtable.cur_cck_pd_state =
+ CCK_PD_STAGE_HIGHRSSI;
+ }
+ } else {
+ de_digtable.cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
+ }
+ if (de_digtable.pre_cck_pd_state != de_digtable.cur_cck_pd_state) {
+ if (de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83);
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ } else {
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd);
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ }
+ de_digtable.pre_cck_pd_state = de_digtable.cur_cck_pd_state;
+ }
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("CurSTAConnectState=%s\n",
+ (de_digtable.cursta_connectctate == DIG_STA_CONNECT ?
+ "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT")));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("CCKPDStage=%s\n",
+ (de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
+ "Low RSSI " : "High RSSI ")));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("is92d single phy =%x\n",
+ IS_92D_SINGLEPHY(rtlpriv->rtlhal.version)));
+
+}
+
+void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("cur_igvalue = 0x%x, "
+ "pre_igvalue = 0x%x, backoff_val = %d\n",
+ de_digtable.cur_igvalue, de_digtable.pre_igvalue,
+ de_digtable.backoff_val));
+ if (de_digtable.dig_enable_flag == false) {
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("DIG is disabled\n"));
+ de_digtable.pre_igvalue = 0x17;
+ return;
+ }
+ if (de_digtable.pre_igvalue != de_digtable.cur_igvalue) {
+ rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
+ de_digtable.cur_igvalue);
+ rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
+ de_digtable.cur_igvalue);
+ de_digtable.pre_igvalue = de_digtable.cur_igvalue;
+ }
+}
+
+static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
+{
+ if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
+ (rtlpriv->mac80211.vendor == PEER_CISCO)) {
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ ("IOT_PEER = CISCO\n"));
+ if (de_digtable.last_min_undecorated_pwdb_for_dm >= 50
+ && de_digtable.min_undecorated_pwdb_for_dm < 50) {
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ ("Early Mode Off\n"));
+ } else if (de_digtable.last_min_undecorated_pwdb_for_dm <= 55 &&
+ de_digtable.min_undecorated_pwdb_for_dm > 55) {
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ ("Early Mode On\n"));
+ }
+ } else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) {
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("Early Mode On\n"));
+ }
+}
+
+static void rtl92d_dm_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value_igi = de_digtable.cur_igvalue;
+ struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("==>\n"));
+ if (rtlpriv->rtlhal.earlymode_enable) {
+ rtl92d_early_mode_enabled(rtlpriv);
+ de_digtable.last_min_undecorated_pwdb_for_dm =
+ de_digtable.min_undecorated_pwdb_for_dm;
+ }
+ if (rtlpriv->dm.dm_initialgain_enable == false)
+ return;
+
+ /* because we will send data pkt when scanning
+ * this will cause some ap like gear-3700 wep TP
+ * lower if we retrun here, this is the diff of
+ * mac80211 driver vs ieee80211 driver */
+ /* if (rtlpriv->mac80211.act_scanning)
+ * return; */
+
+ /* Not STA mode return tmp */
+ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+ return;
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("progress\n"));
+ /* Decide the current status and if modify initial gain or not */
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ de_digtable.cursta_connectctate = DIG_STA_CONNECT;
+ else
+ de_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
+
+ /* adjust initial gain according to false alarm counter */
+ if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
+ value_igi--;
+ else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH1)
+ value_igi += 0;
+ else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH2)
+ value_igi++;
+ else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2)
+ value_igi += 2;
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ ("dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable.large_fa_hit, de_digtable.forbidden_igi));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ ("dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n",
+ de_digtable.recover_cnt, de_digtable.rx_gain_range_min));
+
+ /* deal with abnorally large false alarm */
+ if (falsealm_cnt->cnt_all > 10000) {
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ ("dm_DIG(): Abnornally false alarm case.\n"));
+
+ de_digtable.large_fa_hit++;
+ if (de_digtable.forbidden_igi < de_digtable.cur_igvalue) {
+ de_digtable.forbidden_igi = de_digtable.cur_igvalue;
+ de_digtable.large_fa_hit = 1;
+ }
+ if (de_digtable.large_fa_hit >= 3) {
+ if ((de_digtable.forbidden_igi + 1) > DM_DIG_MAX)
+ de_digtable.rx_gain_range_min = DM_DIG_MAX;
+ else
+ de_digtable.rx_gain_range_min =
+ (de_digtable.forbidden_igi + 1);
+ de_digtable.recover_cnt = 3600; /* 3600=2hr */
+ }
+ } else {
+ /* Recovery mechanism for IGI lower bound */
+ if (de_digtable.recover_cnt != 0) {
+ de_digtable.recover_cnt--;
+ } else {
+ if (de_digtable.large_fa_hit == 0) {
+ if ((de_digtable.forbidden_igi - 1) <
+ DM_DIG_FA_LOWER) {
+ de_digtable.forbidden_igi =
+ DM_DIG_FA_LOWER;
+ de_digtable.rx_gain_range_min =
+ DM_DIG_FA_LOWER;
+
+ } else {
+ de_digtable.forbidden_igi--;
+ de_digtable.rx_gain_range_min =
+ (de_digtable.forbidden_igi + 1);
+ }
+ } else if (de_digtable.large_fa_hit == 3) {
+ de_digtable.large_fa_hit = 0;
+ }
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ ("dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable.large_fa_hit, de_digtable.forbidden_igi));
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
+ ("dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n",
+ de_digtable.recover_cnt, de_digtable.rx_gain_range_min));
+
+ if (value_igi > DM_DIG_MAX)
+ value_igi = DM_DIG_MAX;
+ else if (value_igi < de_digtable.rx_gain_range_min)
+ value_igi = de_digtable.rx_gain_range_min;
+ de_digtable.cur_igvalue = value_igi;
+ rtl92d_dm_write_dig(hw);
+ if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
+ rtl92d_dm_cck_packet_detection_thresh(hw);
+ RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, ("<<==\n"));
+}
+
+static void rtl92d_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dynamic_txpower_enable = true;
+ rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+
+static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long undecorated_smoothed_pwdb;
+
+ if ((!rtlpriv->dm.dynamic_txpower_enable)
+ || rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+ return;
+ }
+ if ((mac->link_state < MAC80211_LINKED) &&
+ (rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ ("Not connected to any\n"));
+ rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+ rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+ return;
+ }
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.UNDEC_SM_PWDB;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("IBSS Client PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("STA Default Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ }
+ } else {
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.UNDEC_SM_PWDB;
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("AP Ext Port PWDB = 0x%lx\n",
+ undecorated_smoothed_pwdb));
+ }
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ if (undecorated_smoothed_pwdb >= 0x33) {
+ rtlpriv->dm.dynamic_txhighpower_lvl =
+ TXHIGHPWRLEVEL_LEVEL2;
+ RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
+ ("5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n"));
+ } else if ((undecorated_smoothed_pwdb < 0x33)
+ && (undecorated_smoothed_pwdb >= 0x2b)) {
+ rtlpriv->dm.dynamic_txhighpower_lvl =
+ TXHIGHPWRLEVEL_LEVEL1;
+ RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
+ ("5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n"));
+ } else if (undecorated_smoothed_pwdb < 0x2b) {
+ rtlpriv->dm.dynamic_txhighpower_lvl =
+ TXHIGHPWRLEVEL_NORMAL;
+ RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
+ ("5G:TxHighPwrLevel_Normal\n"));
+ }
+ } else {
+ if (undecorated_smoothed_pwdb >=
+ TX_POWER_NEAR_FIELD_THRESH_LVL2) {
+ rtlpriv->dm.dynamic_txhighpower_lvl =
+ TXHIGHPWRLEVEL_LEVEL2;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
+ } else
+ if ((undecorated_smoothed_pwdb <
+ (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3))
+ && (undecorated_smoothed_pwdb >=
+ TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
+
+ rtlpriv->dm.dynamic_txhighpower_lvl =
+ TXHIGHPWRLEVEL_LEVEL1;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
+ } else if (undecorated_smoothed_pwdb <
+ (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
+ rtlpriv->dm.dynamic_txhighpower_lvl =
+ TXHIGHPWRLEVEL_NORMAL;
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("TXHIGHPWRLEVEL_NORMAL\n"));
+ }
+ }
+ if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel));
+ rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
+ }
+ rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
+}
+
+static void rtl92d_dm_pwdb_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* AP & ADHOC & MESH will return tmp */
+ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+ return;
+ /* Indicate Rx signal strength to FW. */
+ if (rtlpriv->dm.useramask) {
+ u32 temp = rtlpriv->dm.undecorated_smoothed_pwdb;
+
+ temp <<= 16;
+ temp |= 0x100;
+ /* fw v12 cmdid 5:use max macid ,for nic ,
+ * default macid is 0 ,max macid is 1 */
+ rtl92d_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, (u8 *) (&temp));
+ } else {
+ rtl_write_byte(rtlpriv, 0x4fe,
+ (u8) rtlpriv->dm.undecorated_smoothed_pwdb);
+ }
+}
+
+void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.current_turbo_edca = false;
+ rtlpriv->dm.is_any_nonbepkts = false;
+ rtlpriv->dm.is_cur_rdlstate = false;
+}
+
+static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ static u64 last_txok_cnt;
+ static u64 last_rxok_cnt;
+ u64 cur_txok_cnt;
+ u64 cur_rxok_cnt;
+ u32 edca_be_ul = 0x5ea42b;
+ u32 edca_be_dl = 0x5ea42b;
+
+ if (mac->link_state != MAC80211_LINKED) {
+ rtlpriv->dm.current_turbo_edca = false;
+ goto exit;
+ }
+
+ /* Enable BEQ TxOP limit configuration in wireless G-mode. */
+ /* To check whether we shall force turn on TXOP configuration. */
+ if ((!rtlpriv->dm.disable_framebursting) &&
+ (rtlpriv->sec.pairwise_enc_algorithm == WEP40_ENCRYPTION ||
+ rtlpriv->sec.pairwise_enc_algorithm == WEP104_ENCRYPTION ||
+ rtlpriv->sec.pairwise_enc_algorithm == TKIP_ENCRYPTION)) {
+ /* Force TxOP limit to 0x005e for UL. */
+ if (!(edca_be_ul & 0xffff0000))
+ edca_be_ul |= 0x005e0000;
+ /* Force TxOP limit to 0x005e for DL. */
+ if (!(edca_be_dl & 0xffff0000))
+ edca_be_dl |= 0x005e0000;
+ }
+
+ if ((!rtlpriv->dm.is_any_nonbepkts) &&
+ (!rtlpriv->dm.disable_framebursting)) {
+ cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
+ cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
+ if (cur_rxok_cnt > 4 * cur_txok_cnt) {
+ if (!rtlpriv->dm.is_cur_rdlstate ||
+ !rtlpriv->dm.current_turbo_edca) {
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
+ edca_be_dl);
+ rtlpriv->dm.is_cur_rdlstate = true;
+ }
+ } else {
+ if (rtlpriv->dm.is_cur_rdlstate ||
+ !rtlpriv->dm.current_turbo_edca) {
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
+ edca_be_ul);
+ rtlpriv->dm.is_cur_rdlstate = false;
+ }
+ }
+ rtlpriv->dm.current_turbo_edca = true;
+ } else {
+ if (rtlpriv->dm.current_turbo_edca) {
+ u8 tmp = AC0_BE;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ (u8 *) (&tmp));
+ rtlpriv->dm.current_turbo_edca = false;
+ }
+ }
+
+exit:
+ rtlpriv->dm.is_any_nonbepkts = false;
+ last_txok_cnt = rtlpriv->stats.txbytesunicast;
+ last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 index_mapping[RX_INDEX_MAPPING_NUM] = {
+ 0x0f, 0x0f, 0x0d, 0x0c, 0x0b,
+ 0x0a, 0x09, 0x08, 0x07, 0x06,
+ 0x05, 0x04, 0x04, 0x03, 0x02
+ };
+ int i;
+ u32 u4tmp;
+
+ u4tmp = (index_mapping[(rtlpriv->efuse.eeprom_thermalmeter -
+ rtlpriv->dm.thermalvalue_rxgain)]) << 12;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("===> Rx Gain %x\n", u4tmp));
+ for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
+ rtl_set_rfreg(hw, i, 0x3C, BRFREGOFFSETMASK,
+ (rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
+}
+
+static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
+ u8 *cck_index_old)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i;
+ unsigned long flag = 0;
+ long temp_cck;
+
+ /* Query CCK default setting From 0xa24 */
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
+ BMASKDWORD) & BMASKCCK;
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ for (i = 0; i < CCK_TABLE_LENGTH; i++) {
+ if (rtlpriv->dm.cck_inch14) {
+ if (!memcmp((void *)&temp_cck,
+ (void *)&cckswing_table_ch14[i][2], 4)) {
+ *cck_index_old = (u8) i;
+ RT_TRACE(rtlpriv,
+ COMP_POWER_TRACKING,
+ DBG_LOUD,
+ ("Initial reg0x%x = 0x%lx, "
+ "cck_index=0x%x, ch 14 %d\n",
+ RCCK0_TXFILTER2,
+ temp_cck, *cck_index_old,
+ rtlpriv->dm.cck_inch14));
+ break;
+ }
+ } else {
+ if (!memcmp((void *) &temp_cck,
+ &cckswing_table_ch1ch13[i][2], 4)) {
+ *cck_index_old = (u8) i;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ ("Initial reg0x%x = 0x%lx, "
+ "cck_index = 0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2,
+ temp_cck, *cck_index_old,
+ rtlpriv->dm.cck_inch14));
+ break;
+ }
+ }
+ }
+ *temp_cckg = temp_cck;
+}
+
+static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index,
+ bool *internal_pa, u8 thermalvalue, u8 delta,
+ u8 rf, struct rtl_efuse *rtlefuse,
+ struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy,
+ u8 index_mapping[5][INDEX_MAPPING_NUM],
+ u8 index_mapping_pa[8][INDEX_MAPPING_NUM])
+{
+ int i;
+ u8 index;
+ u8 offset = 0;
+
+ for (i = 0; i < rf; i++) {
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 1) /* MAC 1 5G */
+ *internal_pa = rtlefuse->internal_pa_5g[1];
+ else
+ *internal_pa = rtlefuse->internal_pa_5g[i];
+ if (*internal_pa) {
+ if (rtlhal->interfaceindex == 1 || i == rf)
+ offset = 4;
+ else
+ offset = 0;
+ if (rtlphy->current_channel >= 100 &&
+ rtlphy->current_channel <= 165)
+ offset += 2;
+ } else {
+ if (rtlhal->interfaceindex == 1 || i == rf)
+ offset = 2;
+ else
+ offset = 0;
+ }
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter)
+ offset++;
+ if (*internal_pa) {
+ if (delta > INDEX_MAPPING_NUM - 1)
+ index = index_mapping_pa[offset]
+ [INDEX_MAPPING_NUM - 1];
+ else
+ index =
+ index_mapping_pa[offset][delta];
+ } else {
+ if (delta > INDEX_MAPPING_NUM - 1)
+ index =
+ index_mapping[offset][INDEX_MAPPING_NUM - 1];
+ else
+ index = index_mapping[offset][delta];
+ }
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
+ if (*internal_pa && thermalvalue > 0x12) {
+ ofdm_index[i] = rtlpriv->dm.ofdm_index[i] -
+ ((delta / 2) * 3 + (delta % 2));
+ } else {
+ ofdm_index[i] -= index;
+ }
+ } else {
+ ofdm_index[i] += index;
+ }
+ }
+}
+
+static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
+ struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 thermalvalue, delta, delta_lck, delta_iqk, delta_rxgain;
+ u8 offset, thermalvalue_avg_count = 0;
+ u32 thermalvalue_avg = 0;
+ bool internal_pa = false;
+ long ele_a = 0, ele_d, temp_cck, val_x, value32;
+ long val_y, ele_c = 0;
+ u8 ofdm_index[2];
+ u8 cck_index = 0;
+ u8 ofdm_index_old[2];
+ u8 cck_index_old = 0;
+ u8 index;
+ int i;
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
+ u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf;
+ u8 indexforchannel =
+ rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel);
+ u8 index_mapping[5][INDEX_MAPPING_NUM] = {
+ /* 5G, path A/MAC 0, decrease power */
+ {0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
+ /* 5G, path A/MAC 0, increase power */
+ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
+ /* 5G, path B/MAC 1, decrease power */
+ {0, 2, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
+ /* 5G, path B/MAC 1, increase power */
+ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
+ /* 2.4G, for decreas power */
+ {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10},
+ };
+ u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = {
+ /* 5G, path A/MAC 0, ch36-64, decrease power */
+ {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
+ /* 5G, path A/MAC 0, ch36-64, increase power */
+ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
+ /* 5G, path A/MAC 0, ch100-165, decrease power */
+ {0, 1, 2, 3, 5, 6, 8, 10, 11, 13, 14, 15, 15},
+ /* 5G, path A/MAC 0, ch100-165, increase power */
+ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
+ /* 5G, path B/MAC 1, ch36-64, decrease power */
+ {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
+ /* 5G, path B/MAC 1, ch36-64, increase power */
+ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
+ /* 5G, path B/MAC 1, ch100-165, decrease power */
+ {0, 1, 2, 3, 5, 6, 8, 9, 10, 12, 13, 14, 14},
+ /* 5G, path B/MAC 1, ch100-165, increase power */
+ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
+ };
+
+ rtlpriv->dm.txpower_trackinginit = true;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("\n"));
+ thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+ "eeprom_thermalmeter 0x%x\n", thermalvalue,
+ rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter));
+ rtl92d_phy_ap_calibrate(hw, (thermalvalue -
+ rtlefuse->eeprom_thermalmeter));
+ if (is2t)
+ rf = 2;
+ else
+ rf = 1;
+ if (thermalvalue) {
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
+ BMASKDWORD) & BMASKOFDM_D;
+ for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
+ if (ele_d == (ofdmswing_table[i] & BMASKOFDM_D)) {
+ ofdm_index_old[0] = (u8) i;
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Initial pathA ele_d reg0x%x = 0x%lx,"
+ " ofdm_index=0x%x\n",
+ ROFDM0_XATxIQIMBALANCE,
+ ele_d, ofdm_index_old[0]));
+ break;
+ }
+ }
+ if (is2t) {
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
+ BMASKDWORD) & BMASKOFDM_D;
+ for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
+ if (ele_d ==
+ (ofdmswing_table[i] & BMASKOFDM_D)) {
+ ofdm_index_old[1] = (u8) i;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ ("Initial pathB ele_d reg "
+ "0x%x = 0x%lx, ofdm_index "
+ "= 0x%x\n",
+ ROFDM0_XBTxIQIMBALANCE, ele_d,
+ ofdm_index_old[1]));
+ break;
+ }
+ }
+ }
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old);
+ } else {
+ temp_cck = 0x090e1317;
+ cck_index_old = 12;
+ }
+
+ if (!rtlpriv->dm.thermalvalue) {
+ rtlpriv->dm.thermalvalue =
+ rtlefuse->eeprom_thermalmeter;
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+ rtlpriv->dm.thermalvalue_rxgain =
+ rtlefuse->eeprom_thermalmeter;
+ for (i = 0; i < rf; i++)
+ rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
+ rtlpriv->dm.cck_index = cck_index_old;
+ }
+ if (rtlhal->reloadtxpowerindex) {
+ for (i = 0; i < rf; i++)
+ rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
+ rtlpriv->dm.cck_index = cck_index_old;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("reload ofdm index for band switch\n"));
+ }
+ rtlpriv->dm.thermalvalue_avg
+ [rtlpriv->dm.thermalvalue_avg_index] = thermalvalue;
+ rtlpriv->dm.thermalvalue_avg_index++;
+ if (rtlpriv->dm.thermalvalue_avg_index == AVG_THERMAL_NUM)
+ rtlpriv->dm.thermalvalue_avg_index = 0;
+ for (i = 0; i < AVG_THERMAL_NUM; i++) {
+ if (rtlpriv->dm.thermalvalue_avg[i]) {
+ thermalvalue_avg +=
+ rtlpriv->dm.thermalvalue_avg[i];
+ thermalvalue_avg_count++;
+ }
+ }
+ if (thermalvalue_avg_count)
+ thermalvalue = (u8) (thermalvalue_avg /
+ thermalvalue_avg_count);
+ if (rtlhal->reloadtxpowerindex) {
+ delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
+ (thermalvalue - rtlefuse->eeprom_thermalmeter) :
+ (rtlefuse->eeprom_thermalmeter - thermalvalue);
+ rtlhal->reloadtxpowerindex = false;
+ rtlpriv->dm.done_txpower = false;
+ } else if (rtlpriv->dm.done_txpower) {
+ delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue) :
+ (rtlpriv->dm.thermalvalue - thermalvalue);
+ } else {
+ delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
+ (thermalvalue - rtlefuse->eeprom_thermalmeter) :
+ (rtlefuse->eeprom_thermalmeter - thermalvalue);
+ }
+ delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
+ (rtlpriv->dm.thermalvalue_lck - thermalvalue);
+ delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
+ (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
+ delta_rxgain =
+ (thermalvalue > rtlpriv->dm.thermalvalue_rxgain) ?
+ (thermalvalue - rtlpriv->dm.thermalvalue_rxgain) :
+ (rtlpriv->dm.thermalvalue_rxgain - thermalvalue);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x"
+ " eeprom_thermalmeter 0x%x delta 0x%x "
+ "delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+ delta_iqk));
+ if ((delta_lck > rtlefuse->delta_lck) &&
+ (rtlefuse->delta_lck != 0)) {
+ rtlpriv->dm.thermalvalue_lck = thermalvalue;
+ rtl92d_phy_lc_calibrate(hw);
+ }
+ if (delta > 0 && rtlpriv->dm.txpower_track_control) {
+ rtlpriv->dm.done_txpower = true;
+ delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
+ (thermalvalue - rtlefuse->eeprom_thermalmeter) :
+ (rtlefuse->eeprom_thermalmeter - thermalvalue);
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ offset = 4;
+ if (delta > INDEX_MAPPING_NUM - 1)
+ index = index_mapping[offset]
+ [INDEX_MAPPING_NUM - 1];
+ else
+ index = index_mapping[offset][delta];
+ if (thermalvalue > rtlpriv->dm.thermalvalue) {
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] -= delta;
+ cck_index -= delta;
+ } else {
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] += index;
+ cck_index += index;
+ }
+ } else if (rtlhal->current_bandtype == BAND_ON_5G) {
+ rtl92d_bandtype_5G(rtlhal, ofdm_index,
+ &internal_pa, thermalvalue,
+ delta, rf, rtlefuse, rtlpriv,
+ rtlphy, index_mapping,
+ index_mapping_internal_pa);
+ }
+ if (is2t) {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("temp OFDM_A_index=0x%x, OFDM_B_index"
+ " = 0x%x,cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.ofdm_index[1],
+ rtlpriv->dm.cck_index));
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("temp OFDM_A_index=0x%x,cck_index = "
+ "0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.cck_index));
+ }
+ for (i = 0; i < rf; i++) {
+ if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1)
+ ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1;
+ else if (ofdm_index[i] < ofdm_min_index)
+ ofdm_index[i] = ofdm_min_index;
+ }
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (cck_index > CCK_TABLE_SIZE - 1) {
+ cck_index = CCK_TABLE_SIZE - 1;
+ } else if (internal_pa ||
+ rtlhal->current_bandtype ==
+ BAND_ON_2_4G) {
+ if (ofdm_index[i] <
+ ofdm_min_index_internal_pa)
+ ofdm_index[i] =
+ ofdm_min_index_internal_pa;
+ } else if (cck_index < 0) {
+ cck_index = 0;
+ }
+ }
+ if (is2t) {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("new OFDM_A_index=0x%x, OFDM_B_index "
+ "= 0x%x, cck_index=0x%x\n",
+ ofdm_index[0], ofdm_index[1],
+ cck_index));
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("new OFDM_A_index=0x%x,cck_index = "
+ "0x%x\n",
+ ofdm_index[0], cck_index));
+ }
+ ele_d = (ofdmswing_table[(u8) ofdm_index[0]] &
+ 0xFFC00000) >> 22;
+ val_x = rtlphy->iqk_matrix_regsetting
+ [indexforchannel].value[0][0];
+ val_y = rtlphy->iqk_matrix_regsetting
+ [indexforchannel].value[0][1];
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+ ele_a =
+ ((val_x * ele_d) >> 8) & 0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
+
+ /* wirte new elements A, C, D to regC80 and
+ * regC94, element B is always 0 */
+ value32 = (ele_d << 22) | ((ele_c & 0x3F) <<
+ 16) | ele_a;
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
+ BMASKDWORD, value32);
+
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
+ value32);
+
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
+ value32);
+
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
+ BMASKDWORD,
+ ofdmswing_table
+ [(u8)ofdm_index[0]]);
+ rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
+ 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(24), 0x00);
+ }
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxPwrTracking for interface %d path A: X ="
+ " 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = "
+ "0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = "
+ "0x%lx\n", rtlhal->interfaceindex,
+ val_x, val_y, ele_a, ele_c, ele_d,
+ val_x, val_y));
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* Adjust CCK according to IQK result */
+ if (!rtlpriv->dm.cck_inch14) {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch1ch13
+ [(u8)cck_index][0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch1ch13
+ [(u8)cck_index][1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch1ch13
+ [(u8)cck_index][2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch1ch13
+ [(u8)cck_index][3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch1ch13
+ [(u8)cck_index][4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch1ch13
+ [(u8)cck_index][5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch1ch13
+ [(u8)cck_index][6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch1ch13
+ [(u8)cck_index][7]);
+ } else {
+ rtl_write_byte(rtlpriv, 0xa22,
+ cckswing_table_ch14
+ [(u8)cck_index][0]);
+ rtl_write_byte(rtlpriv, 0xa23,
+ cckswing_table_ch14
+ [(u8)cck_index][1]);
+ rtl_write_byte(rtlpriv, 0xa24,
+ cckswing_table_ch14
+ [(u8)cck_index][2]);
+ rtl_write_byte(rtlpriv, 0xa25,
+ cckswing_table_ch14
+ [(u8)cck_index][3]);
+ rtl_write_byte(rtlpriv, 0xa26,
+ cckswing_table_ch14
+ [(u8)cck_index][4]);
+ rtl_write_byte(rtlpriv, 0xa27,
+ cckswing_table_ch14
+ [(u8)cck_index][5]);
+ rtl_write_byte(rtlpriv, 0xa28,
+ cckswing_table_ch14
+ [(u8)cck_index][6]);
+ rtl_write_byte(rtlpriv, 0xa29,
+ cckswing_table_ch14
+ [(u8)cck_index][7]);
+ }
+ }
+ if (is2t) {
+ ele_d = (ofdmswing_table[(u8) ofdm_index[1]] &
+ 0xFFC00000) >> 22;
+ val_x = rtlphy->iqk_matrix_regsetting
+ [indexforchannel].value[0][4];
+ val_y = rtlphy->iqk_matrix_regsetting
+ [indexforchannel].value[0][5];
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ /* consider minus */
+ val_x = val_x | 0xFFFFFC00;
+ ele_a = ((val_x * ele_d) >> 8) &
+ 0x000003FF;
+ /* new element C = element D x Y */
+ if ((val_y & 0x00000200) != 0)
+ val_y =
+ val_y | 0xFFFFFC00;
+ ele_c =
+ ((val_y *
+ ele_d) >> 8) & 0x00003FF;
+ /* write new elements A, C, D to regC88
+ * and regC9C, element B is always 0
+ */
+ value32 = (ele_d << 22) |
+ ((ele_c & 0x3F) << 16) |
+ ele_a;
+ rtl_set_bbreg(hw,
+ ROFDM0_XBTxIQIMBALANCE,
+ BMASKDWORD, value32);
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
+ BMASKH4BITS, value32);
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(28), value32);
+ } else {
+ rtl_set_bbreg(hw,
+ ROFDM0_XBTxIQIMBALANCE,
+ BMASKDWORD,
+ ofdmswing_table
+ [(u8) ofdm_index[1]]);
+ rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
+ BMASKH4BITS, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
+ BIT(28), 0x00);
+ }
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxPwrTracking path B: X = 0x%lx, "
+ "Y = 0x%lx ele_A = 0x%lx ele_C = 0x"
+ "%lx ele_D = 0x%lx 0xeb4 = 0x%lx "
+ "0xebc = 0x%lx\n",
+ val_x, val_y, ele_a, ele_c,
+ ele_d, val_x, val_y));
+ }
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("TxPwrTracking 0xc80 = 0x%x, 0xc94 = "
+ "0x%x RF 0x24 = 0x%x\n",
+ rtl_get_bbreg(hw, 0xc80, BMASKDWORD),
+ rtl_get_bbreg(hw, 0xc94, BMASKDWORD),
+ rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
+ BRFREGOFFSETMASK)));
+ }
+ if ((delta_iqk > rtlefuse->delta_iqk) &&
+ (rtlefuse->delta_iqk != 0)) {
+ rtl92d_phy_reset_iqk_result(hw);
+ rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+ rtl92d_phy_iq_calibrate(hw);
+ }
+ if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G
+ && thermalvalue <= rtlefuse->eeprom_thermalmeter) {
+ rtlpriv->dm.thermalvalue_rxgain = thermalvalue;
+ rtl92d_dm_rxgain_tracking_thermalmeter(hw);
+ }
+ if (rtlpriv->dm.txpower_track_control)
+ rtlpriv->dm.thermalvalue = thermalvalue;
+ }
+
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
+}
+
+static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.txpower_tracking = true;
+ rtlpriv->dm.txpower_trackinginit = false;
+ rtlpriv->dm.txpower_track_control = true;
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking));
+}
+
+void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ static u8 tm_trigger;
+
+ if (!rtlpriv->dm.txpower_tracking)
+ return;
+
+ if (!tm_trigger) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
+ BIT(16), 0x03);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Trigger 92S Thermal Meter!!\n"));
+ tm_trigger = 1;
+ return;
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ ("Schedule TxPowerTracking direct call!!\n"));
+ rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
+ tm_trigger = 0;
+ }
+}
+
+void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rate_adaptive *ra = &(rtlpriv->ra);
+
+ ra->ratr_state = DM_RATR_STA_INIT;
+ ra->pre_ratr_state = DM_RATR_STA_INIT;
+ if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+ rtlpriv->dm.useramask = true;
+ else
+ rtlpriv->dm.useramask = false;
+}
+
+void rtl92d_dm_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ rtl92d_dm_diginit(hw);
+ rtl92d_dm_init_dynamic_txpower(hw);
+ rtl92d_dm_init_edca_turbo(hw);
+ rtl92d_dm_init_rate_adaptive_mask(hw);
+ rtl92d_dm_initialize_txpower_tracking(hw);
+}
+
+void rtl92d_dm_watchdog(struct ieee80211_hw *hw)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool fw_current_inpsmode = false;
+ bool fwps_awake = true;
+
+ /* 1. RF is OFF. (No need to do DM.)
+ * 2. Fw is under power saving mode for FwLPS.
+ * (Prevent from SW/FW I/O racing.)
+ * 3. IPS workitem is scheduled. (Prevent from IPS sequence
+ * to be swapped with DM.
+ * 4. RFChangeInProgress is TRUE.
+ * (Prevent from broken by IPS/HW/SW Rf off.) */
+
+ if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) &&
+ fwps_awake) && (!ppsc->rfchange_inprogress)) {
+ rtl92d_dm_pwdb_monitor(hw);
+ rtl92d_dm_false_alarm_counter_statistics(hw);
+ rtl92d_dm_find_minimum_rssi(hw);
+ rtl92d_dm_dig(hw);
+ /* rtl92d_dm_dynamic_bb_powersaving(hw); */
+ rtl92d_dm_dynamic_txpower(hw);
+ /* rtl92d_dm_check_txpower_tracking_thermal_meter(hw); */
+ /* rtl92d_dm_refresh_rate_adaptive_mask(hw); */
+ /* rtl92d_dm_interrupt_migration(hw); */
+ rtl92d_dm_check_edca_turbo(hw);
+ }
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
new file mode 100644
index 00000000000..69354657f0f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
@@ -0,0 +1,212 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92C_DM_H__
+#define __RTL92C_DM_H__
+
+#define HAL_DM_DIG_DISABLE BIT(0)
+#define HAL_DM_HIPWR_DISABLE BIT(1)
+
+#define OFDM_TABLE_LENGTH 37
+#define OFDM_TABLE_SIZE_92D 43
+#define CCK_TABLE_LENGTH 33
+
+#define CCK_TABLE_SIZE 33
+
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_FALSEALARM_THRESH_LOW 400
+#define DM_FALSEALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX 0x3e
+#define DM_DIG_MIN 0x1c
+
+#define DM_DIG_FA_UPPER 0x32
+#define DM_DIG_FA_LOWER 0x20
+#define DM_DIG_FA_TH0 0x100
+#define DM_DIG_FA_TH1 0x400
+#define DM_DIG_FA_TH2 0x600
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+#define RXPATHSELECTION_SS_TH_lOW 30
+#define RXPATHSELECTION_DIFF_TH 18
+
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+#define CTS2SELF_THVAL 30
+#define REGC38_TH 20
+
+#define WAIOTTHVAL 25
+
+#define TXHIGHPWRLEVEL_NORMAL 0
+#define TXHIGHPWRLEVEL_LEVEL1 1
+#define TXHIGHPWRLEVEL_LEVEL2 2
+#define TXHIGHPWRLEVEL_BT1 3
+#define TXHIGHPWRLEVEL_BT2 4
+
+#define DM_TYPE_BYFW 0
+#define DM_TYPE_BYDRIVER 1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define INDEX_MAPPING_NUM 13
+
+struct ps_t {
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+
+ u8 pre_rfstate;
+ u8 cur_rfstate;
+
+ long rssi_val_min;
+};
+
+struct dig_t {
+ u8 dig_enable_flag;
+ u8 dig_ext_port_stage;
+
+ u32 rssi_lowthresh;
+ u32 rssi_highthresh;
+
+ u32 fa_lowthresh;
+ u32 fa_highthresh;
+
+ u8 cursta_connectctate;
+ u8 presta_connectstate;
+ u8 curmultista_connectstate;
+
+ u8 pre_igvalue;
+ u8 cur_igvalue;
+
+ char backoff_val;
+ char backoff_val_range_max;
+ char backoff_val_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 min_undecorated_pwdb_for_dm;
+ long last_min_undecorated_pwdb_for_dm;
+
+ u8 pre_cck_pd_state;
+ u8 cur_cck_pd_state;
+
+ u8 pre_cck_fa_state;
+ u8 cur_cck_fa_state;
+
+ u8 pre_ccastate;
+ u8 cur_ccasate;
+
+ u8 large_fa_hit;
+ u8 forbidden_igi;
+ u32 recover_cnt;
+};
+
+struct swat {
+ u8 failure_cnt;
+ u8 try_flag;
+ u8 stop_trying;
+ long pre_rssi;
+ long trying_threshold;
+ u8 cur_antenna;
+ u8 pre_antenna;
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+ DIG_TYPE_THRESH_HIGH = 0,
+ DIG_TYPE_THRESH_LOW = 1,
+ DIG_TYPE_BACKOFF = 2,
+ DIG_TYPE_RX_GAIN_MIN = 3,
+ DIG_TYPE_RX_GAIN_MAX = 4,
+ DIG_TYPE_ENABLE = 5,
+ DIG_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+};
+
+enum tag_cck_packet_detection_threshold_type_definition {
+ CCK_PD_STAGE_LOWRSSI = 0,
+ CCK_PD_STAGE_HIGHRSSI = 1,
+ CCK_FA_STAGE_LOW = 2,
+ CCK_FA_STAGE_HIGH = 3,
+ CCK_PD_STAGE_MAX = 4,
+};
+
+enum dm_1r_cca {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf {
+ RF_SAVE = 0,
+ RF_NORMAL = 1,
+ RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch {
+ ANS_ANTENNA_B = 1,
+ ANS_ANTENNA_A = 2,
+ ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg {
+ DIG_EXT_PORT_STAGE_0 = 0,
+ DIG_EXT_PORT_STAGE_1 = 1,
+ DIG_EXT_PORT_STAGE_2 = 2,
+ DIG_EXT_PORT_STAGE_3 = 3,
+ DIG_EXT_PORT_STAGE_MAX = 4,
+};
+
+enum dm_dig_connect {
+ DIG_STA_DISCONNECT = 0,
+ DIG_STA_CONNECT = 1,
+ DIG_STA_BEFORE_CONNECT = 2,
+ DIG_MULTISTA_DISCONNECT = 3,
+ DIG_MULTISTA_CONNECT = 4,
+ DIG_CONNECT_MAX
+};
+
+extern struct dig_t de_digtable;
+
+void rtl92d_dm_init(struct ieee80211_hw *hw);
+void rtl92d_dm_watchdog(struct ieee80211_hw *hw);
+void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl92d_dm_write_dig(struct ieee80211_hw *hw);
+void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw);
+void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
new file mode 100644
index 00000000000..82f060bdbc0
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -0,0 +1,790 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "fw.h"
+#include "sw.h"
+
+static bool _rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv)
+{
+ return (rtl_read_dword(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY) ?
+ true : false;
+}
+
+static void _rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp;
+
+ if (enable) {
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+ } else {
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+ /* Reserved for fw extension.
+ * 0x81[7] is used for mac0 status ,
+ * so don't write this reg here
+ * rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);*/
+ }
+}
+
+static void _rtl92d_fw_block_write(struct ieee80211_hw *hw,
+ const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 blocksize = sizeof(u32);
+ u8 *bufferptr = (u8 *) buffer;
+ u32 *pu4BytePtr = (u32 *) buffer;
+ u32 i, offset, blockCount, remainSize;
+
+ blockCount = size / blocksize;
+ remainSize = size % blocksize;
+ for (i = 0; i < blockCount; i++) {
+ offset = i * blocksize;
+ rtl_write_dword(rtlpriv, (FW_8192D_START_ADDRESS + offset),
+ *(pu4BytePtr + i));
+ }
+ if (remainSize) {
+ offset = blockCount * blocksize;
+ bufferptr += offset;
+ for (i = 0; i < remainSize; i++) {
+ rtl_write_byte(rtlpriv, (FW_8192D_START_ADDRESS +
+ offset + i), *(bufferptr + i));
+ }
+ }
+}
+
+static void _rtl92d_fw_page_write(struct ieee80211_hw *hw,
+ u32 page, const u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 value8;
+ u8 u8page = (u8) (page & 0x07);
+
+ value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+ rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+ _rtl92d_fw_block_write(hw, buffer, size);
+}
+
+static void _rtl92d_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
+{
+ u32 fwlen = *pfwlen;
+ u8 remain = (u8) (fwlen % 4);
+
+ remain = (remain == 0) ? 0 : (4 - remain);
+ while (remain > 0) {
+ pfwbuf[fwlen] = 0;
+ fwlen++;
+ remain--;
+ }
+ *pfwlen = fwlen;
+}
+
+static void _rtl92d_write_fw(struct ieee80211_hw *hw,
+ enum version_8192d version, u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 *bufferPtr = (u8 *) buffer;
+ u32 pagenums, remainSize;
+ u32 page, offset;
+
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size));
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
+ _rtl92d_fill_dummy(bufferPtr, &size);
+ pagenums = size / FW_8192D_PAGE_SIZE;
+ remainSize = size % FW_8192D_PAGE_SIZE;
+ if (pagenums > 8) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Page numbers should not greater then 8\n"));
+ }
+ for (page = 0; page < pagenums; page++) {
+ offset = page * FW_8192D_PAGE_SIZE;
+ _rtl92d_fw_page_write(hw, page, (bufferPtr + offset),
+ FW_8192D_PAGE_SIZE);
+ }
+ if (remainSize) {
+ offset = pagenums * FW_8192D_PAGE_SIZE;
+ page = pagenums;
+ _rtl92d_fw_page_write(hw, page, (bufferPtr + offset),
+ remainSize);
+ }
+}
+
+static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 counter = 0;
+ u32 value32;
+
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) &&
+ (!(value32 & FWDL_ChkSum_rpt)));
+ if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("chksum report faill ! REG_MCUFWDL:0x%08x .\n",
+ value32));
+ return -EIO;
+ }
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32));
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ value32 |= MCUFWDL_RDY;
+ rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+ return 0;
+}
+
+void rtl92d_firmware_selfreset(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 u1b_tmp;
+ u8 delay = 100;
+
+ /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */
+ rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ while (u1b_tmp & BIT(2)) {
+ delay--;
+ if (delay == 0)
+ break;
+ udelay(50);
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ }
+ RT_ASSERT((delay > 0), ("8051 reset failed!\n"));
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ ("=====> 8051 reset success (%d) .\n", delay));
+}
+
+static int _rtl92d_fw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 counter;
+
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, ("FW already have download\n"));
+ /* polling for FW ready */
+ counter = 0;
+ do {
+ if (rtlhal->interfaceindex == 0) {
+ if (rtl_read_byte(rtlpriv, FW_MAC0_READY) &
+ MAC0_READY) {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ ("Polling FW ready success!! "
+ "REG_MCUFWDL: 0x%x .\n",
+ rtl_read_byte(rtlpriv,
+ FW_MAC0_READY)));
+ return 0;
+ }
+ udelay(5);
+ } else {
+ if (rtl_read_byte(rtlpriv, FW_MAC1_READY) &
+ MAC1_READY) {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ ("Polling FW ready success!! "
+ "REG_MCUFWDL: 0x%x .\n",
+ rtl_read_byte(rtlpriv,
+ FW_MAC1_READY)));
+ return 0;
+ }
+ udelay(5);
+ }
+ } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
+
+ if (rtlhal->interfaceindex == 0) {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ ("Polling FW ready fail!! MAC0 FW init not ready: "
+ "0x%x .\n",
+ rtl_read_byte(rtlpriv, FW_MAC0_READY)));
+ } else {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ ("Polling FW ready fail!! MAC1 FW init not ready: "
+ "0x%x .\n",
+ rtl_read_byte(rtlpriv, FW_MAC1_READY)));
+ }
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ ("Polling FW ready fail!! REG_MCUFWDL:0x%08ul .\n",
+ rtl_read_dword(rtlpriv, REG_MCUFWDL)));
+ return -1;
+}
+
+int rtl92d_download_fw(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 *pfwheader;
+ u8 *pfwdata;
+ u32 fwsize;
+ int err;
+ enum version_8192d version = rtlhal->version;
+ u8 value;
+ u32 count;
+ bool fw_downloaded = false, fwdl_in_process = false;
+ unsigned long flags;
+
+ if (!rtlhal->pfirmware)
+ return 1;
+ fwsize = rtlhal->fwsize;
+ pfwheader = (u8 *) rtlhal->pfirmware;
+ pfwdata = (u8 *) rtlhal->pfirmware;
+ rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader);
+ rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, (" FirmwareVersion(%d),"
+ "FirmwareSubVersion(%d), Signature(%#x)\n",
+ rtlhal->fw_version, rtlhal->fw_subversion,
+ GET_FIRMWARE_HDR_SIGNATURE(pfwheader)));
+ if (IS_FW_HEADER_EXIST(pfwheader)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("Shift 32 bytes for FW header!!\n"));
+ pfwdata = pfwdata + 32;
+ fwsize = fwsize - 32;
+ }
+
+ spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
+ fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv);
+ if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5))
+ fwdl_in_process = true;
+ else
+ fwdl_in_process = false;
+ if (fw_downloaded) {
+ spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags);
+ goto exit;
+ } else if (fwdl_in_process) {
+ spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags);
+ for (count = 0; count < 5000; count++) {
+ udelay(500);
+ spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
+ fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv);
+ if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5))
+ fwdl_in_process = true;
+ else
+ fwdl_in_process = false;
+ spin_unlock_irqrestore(&globalmutex_for_fwdownload,
+ flags);
+ if (fw_downloaded)
+ goto exit;
+ else if (!fwdl_in_process)
+ break;
+ else
+ RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ ("Wait for another mac "
+ "download fw\n"));
+ }
+ spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
+ value = rtl_read_byte(rtlpriv, 0x1f);
+ value |= BIT(5);
+ rtl_write_byte(rtlpriv, 0x1f, value);
+ spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags);
+ } else {
+ value = rtl_read_byte(rtlpriv, 0x1f);
+ value |= BIT(5);
+ rtl_write_byte(rtlpriv, 0x1f, value);
+ spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags);
+ }
+
+ /* If 8051 is running in RAM code, driver should
+ * inform Fw to reset by itself, or it will cause
+ * download Fw fail.*/
+ /* 8051 RAM code */
+ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
+ rtl92d_firmware_selfreset(hw);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+ }
+ _rtl92d_enable_fw_download(hw, true);
+ _rtl92d_write_fw(hw, version, pfwdata, fwsize);
+ _rtl92d_enable_fw_download(hw, false);
+ spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
+ err = _rtl92d_fw_free_to_go(hw);
+ /* download fw over,clear 0x1f[5] */
+ value = rtl_read_byte(rtlpriv, 0x1f);
+ value &= (~BIT(5));
+ rtl_write_byte(rtlpriv, 0x1f, value);
+ spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("fw is not ready to run!\n"));
+ goto exit;
+ } else {
+ RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ ("fw is ready to run!\n"));
+ }
+exit:
+ err = _rtl92d_fw_init(hw);
+ return err;
+}
+
+static bool _rtl92d_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val_hmetfr;
+ bool result = false;
+
+ val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+ if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
+ result = true;
+ return result;
+}
+
+static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *cmdbuffer)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 boxnum;
+ u16 box_reg = 0, box_extreg = 0;
+ u8 u1b_tmp;
+ bool isfw_read = false;
+ u8 buf_index = 0;
+ bool bwrite_sucess = false;
+ u8 wait_h2c_limmit = 100;
+ u8 wait_writeh2c_limmit = 100;
+ u8 boxcontent[4], boxextcontent[2];
+ u32 h2c_waitcounter = 0;
+ unsigned long flag;
+ u8 idx;
+
+ if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("Return as RF is off!!!\n"));
+ return;
+ }
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("come in\n"));
+ while (true) {
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ if (rtlhal->h2c_setinprogress) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("H2C set in progress! Wait to set.."
+ "element_id(%d).\n", element_id));
+
+ while (rtlhal->h2c_setinprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+ flag);
+ h2c_waitcounter++;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("Wait 100 us (%d times)...\n",
+ h2c_waitcounter));
+ udelay(100);
+
+ if (h2c_waitcounter > 1000)
+ return;
+
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+ flag);
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ } else {
+ rtlhal->h2c_setinprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ break;
+ }
+ }
+ while (!bwrite_sucess) {
+ wait_writeh2c_limmit--;
+ if (wait_writeh2c_limmit == 0) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Write H2C fail because no trigger "
+ "for FW INT!\n"));
+ break;
+ }
+ boxnum = rtlhal->last_hmeboxnum;
+ switch (boxnum) {
+ case 0:
+ box_reg = REG_HMEBOX_0;
+ box_extreg = REG_HMEBOX_EXT_0;
+ break;
+ case 1:
+ box_reg = REG_HMEBOX_1;
+ box_extreg = REG_HMEBOX_EXT_1;
+ break;
+ case 2:
+ box_reg = REG_HMEBOX_2;
+ box_extreg = REG_HMEBOX_EXT_2;
+ break;
+ case 3:
+ box_reg = REG_HMEBOX_3;
+ box_extreg = REG_HMEBOX_EXT_3;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
+ while (!isfw_read) {
+ wait_h2c_limmit--;
+ if (wait_h2c_limmit == 0) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("Wating too long for FW read "
+ "clear HMEBox(%d)!\n", boxnum));
+ break;
+ }
+ udelay(10);
+ isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
+ u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("Wating for FW read clear HMEBox(%d)!!! "
+ "0x1BF = %2x\n", boxnum, u1b_tmp));
+ }
+ if (!isfw_read) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("Write H2C register BOX[%d] fail!!!!! "
+ "Fw do not read.\n", boxnum));
+ break;
+ }
+ memset(boxcontent, 0, sizeof(boxcontent));
+ memset(boxextcontent, 0, sizeof(boxextcontent));
+ boxcontent[0] = element_id;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id));
+ switch (cmd_len) {
+ case 1:
+ boxcontent[0] &= ~(BIT(7));
+ memcpy(boxcontent + 1, cmdbuffer + buf_index, 1);
+ for (idx = 0; idx < 4; idx++)
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ break;
+ case 2:
+ boxcontent[0] &= ~(BIT(7));
+ memcpy(boxcontent + 1, cmdbuffer + buf_index, 2);
+ for (idx = 0; idx < 4; idx++)
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ break;
+ case 3:
+ boxcontent[0] &= ~(BIT(7));
+ memcpy(boxcontent + 1, cmdbuffer + buf_index, 3);
+ for (idx = 0; idx < 4; idx++)
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ break;
+ case 4:
+ boxcontent[0] |= (BIT(7));
+ memcpy(boxextcontent, cmdbuffer + buf_index, 2);
+ memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 2);
+ for (idx = 0; idx < 2; idx++)
+ rtl_write_byte(rtlpriv, box_extreg + idx,
+ boxextcontent[idx]);
+ for (idx = 0; idx < 4; idx++)
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ break;
+ case 5:
+ boxcontent[0] |= (BIT(7));
+ memcpy(boxextcontent, cmdbuffer + buf_index, 2);
+ memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 3);
+ for (idx = 0; idx < 2; idx++)
+ rtl_write_byte(rtlpriv, box_extreg + idx,
+ boxextcontent[idx]);
+ for (idx = 0; idx < 4; idx++)
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ bwrite_sucess = true;
+ rtlhal->last_hmeboxnum = boxnum + 1;
+ if (rtlhal->last_hmeboxnum == 4)
+ rtlhal->last_hmeboxnum = 0;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum));
+ }
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ rtlhal->h2c_setinprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n"));
+}
+
+void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *cmdbuffer)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 tmp_cmdbuf[2];
+
+ if (rtlhal->fw_ready == false) {
+ RT_ASSERT(false, ("return H2C cmd because of Fw "
+ "download fail!!!\n"));
+ return;
+ }
+ memset(tmp_cmdbuf, 0, 8);
+ memcpy(tmp_cmdbuf, cmdbuffer, cmd_len);
+ _rtl92d_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
+ return;
+}
+
+void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 u1_h2c_set_pwrmode[3] = { 0 };
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode));
+ SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
+ SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
+ SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
+ ppsc->reg_max_lps_awakeintvl);
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl92d_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
+ u1_h2c_set_pwrmode, 3);
+ rtl92d_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
+}
+
+static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
+ struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl8192_tx_ring *ring;
+ struct rtl_tx_desc *pdesc;
+ u8 idx = 0;
+ unsigned long flags;
+ struct sk_buff *pskb;
+
+ ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ pskb = __skb_dequeue(&ring->queue);
+ if (pskb)
+ kfree_skb(pskb);
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ pdesc = &ring->desc[idx];
+ /* discard output from call below */
+ rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
+ rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
+ __skb_queue_tail(&ring->queue, skb);
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+ rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+ return true;
+}
+
+#define BEACON_PG 0 /*->1 */
+#define PSPOLL_PG 2
+#define NULL_PG 3
+#define PROBERSP_PG 4 /*->5 */
+#define TOTAL_RESERVED_PKT_LEN 768
+
+static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
+ /* page 0 beacon */
+ 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+ 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x50, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
+ 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
+ 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
+ 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
+ 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
+ 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
+ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
+ 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 1 beacon */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x10, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 2 ps-poll */
+ 0xA4, 0x10, 0x01, 0xC0, 0x00, 0x40, 0x10, 0x10,
+ 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x18, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 3 null */
+ 0x48, 0x01, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
+ 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+ 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x72, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 4 probe_resp */
+ 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
+ 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+ 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
+ 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
+ 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
+ 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
+ 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
+ 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
+ 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
+ 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
+ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
+ 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* page 5 probe_resp */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct sk_buff *skb = NULL;
+ u32 totalpacketlen;
+ bool rtstatus;
+ u8 u1RsvdPageLoc[3] = { 0 };
+ bool dlok = false;
+ u8 *beacon;
+ u8 *p_pspoll;
+ u8 *nullfunc;
+ u8 *p_probersp;
+ /*---------------------------------------------------------
+ (1) beacon
+ ---------------------------------------------------------*/
+ beacon = &reserved_page_packet[BEACON_PG * 128];
+ SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+ /*-------------------------------------------------------
+ (2) ps-poll
+ --------------------------------------------------------*/
+ p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
+ SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+ SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+ SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+ SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
+ /*--------------------------------------------------------
+ (3) null data
+ ---------------------------------------------------------*/
+ nullfunc = &reserved_page_packet[NULL_PG * 128];
+ SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+ SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+ SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG);
+ /*---------------------------------------------------------
+ (4) probe response
+ ----------------------------------------------------------*/
+ p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
+ SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+ SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+ SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+ SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG);
+ totalpacketlen = TOTAL_RESERVED_PKT_LEN;
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+ &reserved_page_packet[0], totalpacketlen);
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
+ u1RsvdPageLoc, 3);
+ skb = dev_alloc_skb(totalpacketlen);
+ memcpy((u8 *) skb_put(skb, totalpacketlen), &reserved_page_packet,
+ totalpacketlen);
+ rtstatus = _rtl92d_cmd_send_packet(hw, skb);
+
+ if (rtstatus)
+ dlok = true;
+ if (dlok) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("Set RSVD page location to Fw.\n"));
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+ "H2C_RSVDPAGE:\n", u1RsvdPageLoc, 3);
+ rtl92d_fill_h2c_cmd(hw, H2C_RSVDPAGE,
+ sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
+ } else
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
+}
+
+void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+ u8 u1_joinbssrpt_parm[1] = {0};
+
+ SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
+ rtl92d_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
new file mode 100644
index 00000000000..0c4d489eaa4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
@@ -0,0 +1,155 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92D__FW__H__
+#define __RTL92D__FW__H__
+
+#define FW_8192D_START_ADDRESS 0x1000
+#define FW_8192D_PAGE_SIZE 4096
+#define FW_8192D_POLLING_TIMEOUT_COUNT 1000
+
+#define IS_FW_HEADER_EXIST(_pfwhdr) \
+ ((GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x92C0 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x88C0 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D0 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D1 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D2 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D3)
+
+/* Define a macro that takes an le32 word, converts it to host ordering,
+ * right shifts by a specified count, creates a mask of the specified
+ * bit count, and extracts that number of bits.
+ */
+
+#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
+ ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
+ BIT_LEN_MASK_32(__mask))
+
+/* Firmware Header(8-byte alinment required) */
+/* --- LONG WORD 0 ---- */
+#define GET_FIRMWARE_HDR_SIGNATURE(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr, 0, 16)
+#define GET_FIRMWARE_HDR_CATEGORY(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr, 16, 8)
+#define GET_FIRMWARE_HDR_FUNCTION(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr, 24, 8)
+#define GET_FIRMWARE_HDR_VERSION(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 4, 0, 16)
+#define GET_FIRMWARE_HDR_SUB_VER(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 4, 16, 8)
+#define GET_FIRMWARE_HDR_RSVD1(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 4, 24, 8)
+
+/* --- LONG WORD 1 ---- */
+#define GET_FIRMWARE_HDR_MONTH(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 8, 0, 8)
+#define GET_FIRMWARE_HDR_DATE(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 8, 8, 8)
+#define GET_FIRMWARE_HDR_HOUR(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 8, 16, 8)
+#define GET_FIRMWARE_HDR_MINUTE(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 8, 24, 8)
+#define GET_FIRMWARE_HDR_ROMCODE_SIZE(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 12, 0, 16)
+#define GET_FIRMWARE_HDR_RSVD2(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 12, 16, 16)
+
+/* --- LONG WORD 2 ---- */
+#define GET_FIRMWARE_HDR_SVN_IDX(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 16, 0, 32)
+#define GET_FIRMWARE_HDR_RSVD3(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 20, 0, 32)
+
+/* --- LONG WORD 3 ---- */
+#define GET_FIRMWARE_HDR_RSVD4(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 24, 0, 32)
+#define GET_FIRMWARE_HDR_RSVD5(__fwhdr) \
+ SHIFT_AND_MASK_LE(__fwhdr + 28, 0, 32)
+
+#define pagenum_128(_len) \
+ (u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0))
+
+#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 1, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
+#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 1, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
+ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
+
+struct rtl92d_firmware_header {
+ u16 signature;
+ u8 category;
+ u8 function;
+ u16 version;
+ u8 subversion;
+ u8 rsvd1;
+
+ u8 month;
+ u8 date;
+ u8 hour;
+ u8 minute;
+ u16 ramcodeSize;
+ u16 rsvd2;
+
+ u32 svnindex;
+ u32 rsvd3;
+
+ u32 rsvd4;
+ u32 rsvd5;
+};
+
+enum rtl8192d_h2c_cmd {
+ H2C_AP_OFFLOAD = 0,
+ H2C_SETPWRMODE = 1,
+ H2C_JOINBSSRPT = 2,
+ H2C_RSVDPAGE = 3,
+ H2C_RSSI_REPORT = 5,
+ H2C_RA_MASK = 6,
+ H2C_MAC_MODE_SEL = 9,
+ H2C_PWRM = 15,
+ MAX_H2CCMD
+};
+
+int rtl92d_download_fw(struct ieee80211_hw *hw);
+void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
+void rtl92d_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl92d_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
+void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
new file mode 100644
index 00000000000..0073cf106af
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -0,0 +1,2329 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+#include "led.h"
+#include "sw.h"
+#include "hw.h"
+
+u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset, u8 direct)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 value;
+
+ rtl_write_word(rtlpriv, REG_DBI_CTRL, (offset & 0xFFC));
+ rtl_write_byte(rtlpriv, REG_DBI_FLAG, BIT(1) | direct);
+ udelay(10);
+ value = rtl_read_dword(rtlpriv, REG_DBI_RDATA);
+ return value;
+}
+
+void rtl92de_write_dword_dbi(struct ieee80211_hw *hw,
+ u16 offset, u32 value, u8 direct)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_word(rtlpriv, REG_DBI_CTRL, ((offset & 0xFFC) | 0xF000));
+ rtl_write_dword(rtlpriv, REG_DBI_WDATA, value);
+ rtl_write_byte(rtlpriv, REG_DBI_FLAG, BIT(0) | direct);
+}
+
+static void _rtl92de_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+ u8 set_bits, u8 clear_bits)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpci->reg_bcn_ctrl_val |= set_bits;
+ rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+}
+
+static void _rtl92de_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte &= ~(BIT(0));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl92de_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0x0a);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte |= BIT(0);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl92de_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(1));
+}
+
+static void _rtl92de_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl92de_set_bcn_ctrl_reg(hw, BIT(1), 0);
+}
+
+void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ switch (variable) {
+ case HW_VAR_RCR:
+ *((u32 *) (val)) = rtlpci->receive_config;
+ break;
+ case HW_VAR_RF_STATE:
+ *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+ break;
+ case HW_VAR_FWLPS_RF_ON:{
+ enum rf_pwrstate rfState;
+ u32 val_rcr;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
+ (u8 *) (&rfState));
+ if (rfState == ERFOFF) {
+ *((bool *) (val)) = true;
+ } else {
+ val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ val_rcr &= 0x00070000;
+ if (val_rcr)
+ *((bool *) (val)) = false;
+ else
+ *((bool *) (val)) = true;
+ }
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ *((bool *) (val)) = ppsc->fw_current_inpsmode;
+ break;
+ case HW_VAR_CORRECT_TSF:{
+ u64 tsf;
+ u32 *ptsf_low = (u32 *)&tsf;
+ u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+ *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+ *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+ *((u64 *) (val)) = tsf;
+ break;
+ }
+ case HW_VAR_INT_MIGRATION:
+ *((bool *)(val)) = rtlpriv->dm.interrupt_migration;
+ break;
+ case HW_VAR_INT_AC:
+ *((bool *)(val)) = rtlpriv->dm.disable_tx_int;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+}
+
+void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 idx;
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_MACID + idx),
+ val[idx]);
+ }
+ break;
+ case HW_VAR_BASIC_RATE: {
+ u16 rate_cfg = ((u16 *) val)[0];
+ u8 rate_index = 0;
+
+ rate_cfg = rate_cfg & 0x15f;
+ if (mac->vendor == PEER_CISCO &&
+ ((rate_cfg & 0x150) == 0))
+ rate_cfg |= 0x01;
+ rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+ rtl_write_byte(rtlpriv, REG_RRSR + 1,
+ (rate_cfg >> 8) & 0xff);
+ while (rate_cfg > 0x1) {
+ rate_cfg = (rate_cfg >> 1);
+ rate_index++;
+ }
+ if (rtlhal->fw_version > 0xe)
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+ rate_index);
+ break;
+ }
+ case HW_VAR_BSSID:
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+ val[idx]);
+ }
+ break;
+ case HW_VAR_SIFS:
+ rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
+ rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+ if (!mac->ht_enable)
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ 0x0e0e);
+ else
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ *((u16 *) val));
+ break;
+ case HW_VAR_SLOT_TIME: {
+ u8 e_aci;
+
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("HW_VAR_SLOT_TIME %x\n", val[0]));
+ rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ (u8 *) (&e_aci));
+ break;
+ }
+ case HW_VAR_ACK_PREAMBLE: {
+ u8 reg_tmp;
+ u8 short_preamble = (bool) (*(u8 *) val);
+
+ reg_tmp = (mac->cur_40_prime_sc) << 5;
+ if (short_preamble)
+ reg_tmp |= 0x80;
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
+ break;
+ }
+ case HW_VAR_AMPDU_MIN_SPACE: {
+ u8 min_spacing_to_set;
+ u8 sec_min_space;
+
+ min_spacing_to_set = *((u8 *) val);
+ if (min_spacing_to_set <= 7) {
+ sec_min_space = 0;
+ if (min_spacing_to_set < sec_min_space)
+ min_spacing_to_set = sec_min_space;
+ mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) |
+ min_spacing_to_set);
+ *val = min_spacing_to_set;
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg));
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ }
+ break;
+ }
+ case HW_VAR_SHORTGI_DENSITY: {
+ u8 density_to_set;
+
+ density_to_set = *((u8 *) val);
+ mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
+ mac->min_space_cfg |= (density_to_set << 3);
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg));
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ break;
+ }
+ case HW_VAR_AMPDU_FACTOR: {
+ u8 factor_toset;
+ u32 regtoSet;
+ u8 *ptmp_byte = NULL;
+ u8 index;
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY)
+ regtoSet = 0xb9726641;
+ else if (rtlhal->macphymode == DUALMAC_SINGLEPHY)
+ regtoSet = 0x66626641;
+ else
+ regtoSet = 0xb972a841;
+ factor_toset = *((u8 *) val);
+ if (factor_toset <= 3) {
+ factor_toset = (1 << (factor_toset + 2));
+ if (factor_toset > 0xf)
+ factor_toset = 0xf;
+ for (index = 0; index < 4; index++) {
+ ptmp_byte = (u8 *) (&regtoSet) + index;
+ if ((*ptmp_byte & 0xf0) >
+ (factor_toset << 4))
+ *ptmp_byte = (*ptmp_byte & 0x0f)
+ | (factor_toset << 4);
+ if ((*ptmp_byte & 0x0f) > factor_toset)
+ *ptmp_byte = (*ptmp_byte & 0xf0)
+ | (factor_toset);
+ }
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoSet);
+ RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+ ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset));
+ }
+ break;
+ }
+ case HW_VAR_AC_PARAM: {
+ u8 e_aci = *((u8 *) val);
+ rtl92d_dm_init_edca_turbo(hw);
+ if (rtlpci->acm_method != eAcmWay2_SW)
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
+ (u8 *) (&e_aci));
+ break;
+ }
+ case HW_VAR_ACM_CTRL: {
+ u8 e_aci = *((u8 *) val);
+ union aci_aifsn *p_aci_aifsn =
+ (union aci_aifsn *)(&(mac->ac[0].aifs));
+ u8 acm = p_aci_aifsn->f.acm;
+ u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+ acm_ctrl = acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
+ if (acm) {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl |= ACMHW_BEQEN;
+ break;
+ case AC2_VI:
+ acm_ctrl |= ACMHW_VIQEN;
+ break;
+ case AC3_VO:
+ acm_ctrl |= ACMHW_VOQEN;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("HW_VAR_ACM_CTRL acm set "
+ "failed: eACI is %d\n", acm));
+ break;
+ }
+ } else {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl &= (~ACMHW_BEQEN);
+ break;
+ case AC2_VI:
+ acm_ctrl &= (~ACMHW_VIQEN);
+ break;
+ case AC3_VO:
+ acm_ctrl &= (~ACMHW_VOQEN);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
+ ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+ "Write 0x%X\n", acm_ctrl));
+ rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+ break;
+ }
+ case HW_VAR_RCR:
+ rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
+ rtlpci->receive_config = ((u32 *) (val))[0];
+ break;
+ case HW_VAR_RETRY_LIMIT: {
+ u8 retry_limit = ((u8 *) (val))[0];
+
+ rtl_write_word(rtlpriv, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+ retry_limit << RETRY_LIMIT_LONG_SHIFT);
+ break;
+ }
+ case HW_VAR_DUAL_TSF_RST:
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ rtlefuse->efuse_usedbytes = *((u16 *) val);
+ break;
+ case HW_VAR_EFUSE_USAGE:
+ rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ break;
+ case HW_VAR_IO_CMD:
+ rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val));
+ break;
+ case HW_VAR_WPA_CONFIG:
+ rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ break;
+ case HW_VAR_SET_RPWM:
+ rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (u8 *) (val));
+ break;
+ case HW_VAR_H2C_FW_PWRMODE:
+ break;
+ case HW_VAR_FW_PSMODE_STATUS:
+ ppsc->fw_current_inpsmode = *((bool *) val);
+ break;
+ case HW_VAR_H2C_FW_JOINBSSRPT: {
+ u8 mstatus = (*(u8 *) val);
+ u8 tmp_regcr, tmp_reg422;
+ bool recover = false;
+
+ if (mstatus == RT_MEDIA_CONNECT) {
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AID, NULL);
+ tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr | BIT(0)));
+ _rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(3));
+ _rtl92de_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ tmp_reg422 = rtl_read_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2);
+ if (tmp_reg422 & BIT(6))
+ recover = true;
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 & (~BIT(6)));
+ rtl92d_set_fw_rsvdpagepkt(hw, 0);
+ _rtl92de_set_bcn_ctrl_reg(hw, BIT(3), 0);
+ _rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(4));
+ if (recover)
+ rtl_write_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422);
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ (tmp_regcr & ~(BIT(0))));
+ }
+ rtl92d_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ break;
+ }
+ case HW_VAR_AID: {
+ u16 u2btmp;
+ u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+ u2btmp &= 0xC000;
+ rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
+ mac->assoc_id));
+ break;
+ }
+ case HW_VAR_CORRECT_TSF: {
+ u8 btype_ibss = ((u8 *) (val))[0];
+
+ if (btype_ibss)
+ _rtl92de_stop_tx_beacon(hw);
+ _rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(3));
+ rtl_write_dword(rtlpriv, REG_TSFTR,
+ (u32) (mac->tsf & 0xffffffff));
+ rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+ (u32) ((mac->tsf >> 32) & 0xffffffff));
+ _rtl92de_set_bcn_ctrl_reg(hw, BIT(3), 0);
+ if (btype_ibss)
+ _rtl92de_resume_tx_beacon(hw);
+
+ break;
+ }
+ case HW_VAR_INT_MIGRATION: {
+ bool int_migration = *(bool *) (val);
+
+ if (int_migration) {
+ /* Set interrrupt migration timer and
+ * corresponging Tx/Rx counter.
+ * timer 25ns*0xfa0=100us for 0xf packets.
+ * 0x306:Rx, 0x307:Tx */
+ rtl_write_dword(rtlpriv, REG_INT_MIG, 0xfe000fa0);
+ rtlpriv->dm.interrupt_migration = int_migration;
+ } else {
+ /* Reset all interrupt migration settings. */
+ rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
+ rtlpriv->dm.interrupt_migration = int_migration;
+ }
+ break;
+ }
+ case HW_VAR_INT_AC: {
+ bool disable_ac_int = *((bool *) val);
+
+ /* Disable four ACs interrupts. */
+ if (disable_ac_int) {
+ /* Disable VO, VI, BE and BK four AC interrupts
+ * to gain more efficient CPU utilization.
+ * When extremely highly Rx OK occurs,
+ * we will disable Tx interrupts.
+ */
+ rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
+ RT_AC_INT_MASKS);
+ rtlpriv->dm.disable_tx_int = disable_ac_int;
+ /* Enable four ACs interrupts. */
+ } else {
+ rtlpriv->cfg->ops->update_interrupt_mask(hw,
+ RT_AC_INT_MASKS, 0);
+ rtlpriv->dm.disable_tx_int = disable_ac_int;
+ }
+ break;
+ }
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+}
+
+static bool _rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool status = true;
+ long count = 0;
+ u32 value = _LLT_INIT_ADDR(address) |
+ _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+ rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+ do {
+ value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+ break;
+ if (count > POLLING_LLT_THRESHOLD) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Failed to polling write LLT done at "
+ "address %d!\n", address));
+ status = false;
+ break;
+ }
+ } while (++count);
+ return status;
+}
+
+static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned short i;
+ u8 txpktbuf_bndy;
+ u8 maxPage;
+ bool status;
+ u32 value32; /* High+low page number */
+ u8 value8; /* normal page number */
+
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) {
+ maxPage = 255;
+ txpktbuf_bndy = 246;
+ value8 = 0;
+ value32 = 0x80bf0d29;
+ } else if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) {
+ maxPage = 127;
+ txpktbuf_bndy = 123;
+ value8 = 0;
+ value32 = 0x80750005;
+ }
+
+ /* Set reserved page for each queue */
+ /* 11. RQPN 0x200[31:0] = 0x80BD1C1C */
+ /* load RQPN */
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
+ rtl_write_dword(rtlpriv, REG_RQPN, value32);
+
+ /* 12. TXRKTBUG_PG_BNDY 0x114[31:0] = 0x27FF00F6 */
+ /* TXRKTBUG_PG_BNDY */
+ rtl_write_dword(rtlpriv, REG_TRXFF_BNDY,
+ (rtl_read_word(rtlpriv, REG_TRXFF_BNDY + 2) << 16 |
+ txpktbuf_bndy));
+
+ /* 13. TDECTRL[15:8] 0x209[7:0] = 0xF6 */
+ /* Beacon Head for TXDMA */
+ rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
+
+ /* 14. BCNQ_PGBNDY 0x424[7:0] = 0xF6 */
+ /* BCNQ_PGBNDY */
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+
+ /* 15. WMAC_LBK_BF_HD 0x45D[7:0] = 0xF6 */
+ /* WMAC_LBK_BF_HD */
+ rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy);
+
+ /* Set Tx/Rx page size (Tx must be 128 Bytes, */
+ /* Rx can be 64,128,256,512,1024 bytes) */
+ /* 16. PBP [7:0] = 0x11 */
+ /* TRX page size */
+ rtl_write_byte(rtlpriv, REG_PBP, 0x11);
+
+ /* 17. DRV_INFO_SZ = 0x04 */
+ rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
+
+ /* 18. LLT_table_init(Adapter); */
+ for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+ status = _rtl92de_llt_write(hw, i, i + 1);
+ if (true != status)
+ return status;
+ }
+
+ /* end of list */
+ status = _rtl92de_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+ if (true != status)
+ return status;
+
+ /* Make the other pages as ring buffer */
+ /* This ring buffer is used as beacon buffer if we */
+ /* config this MAC as two MAC transfer. */
+ /* Otherwise used as local loopback buffer. */
+ for (i = txpktbuf_bndy; i < maxPage; i++) {
+ status = _rtl92de_llt_write(hw, i, (i + 1));
+ if (true != status)
+ return status;
+ }
+
+ /* Let last entry point to the start entry of ring buffer */
+ status = _rtl92de_llt_write(hw, maxPage, txpktbuf_bndy);
+ if (true != status)
+ return status;
+
+ return true;
+}
+
+static void _rtl92de_gen_refresh_led_state(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+
+ if (rtlpci->up_first_time)
+ return;
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+ rtl92de_sw_led_on(hw, pLed0);
+ else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
+ rtl92de_sw_led_on(hw, pLed0);
+ else
+ rtl92de_sw_led_off(hw, pLed0);
+}
+
+static bool _rtl92de_init_mac(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ unsigned char bytetmp;
+ unsigned short wordtmp;
+ u16 retry;
+
+ rtl92d_phy_set_poweron(hw);
+ /* Add for resume sequence of power domain according
+ * to power document V11. Chapter V.11.... */
+ /* 0. RSV_CTRL 0x1C[7:0] = 0x00 */
+ /* unlock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+ rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x05);
+
+ /* 1. AFE_XTAL_CTRL [7:0] = 0x0F enable XTAL */
+ /* 2. SPS0_CTRL 0x11[7:0] = 0x2b enable SPS into PWM mode */
+ /* 3. delay (1ms) this is not necessary when initially power on */
+
+ /* C. Resume Sequence */
+ /* a. SPS0_CTRL 0x11[7:0] = 0x2b */
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+
+ /* b. AFE_XTAL_CTRL [7:0] = 0x0F */
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0F);
+
+ /* c. DRV runs power on init flow */
+
+ /* auto enable WLAN */
+ /* 4. APS_FSMCO 0x04[8] = 1; wait till 0x04[8] = 0 */
+ /* Power On Reset for MAC Block */
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) | BIT(0);
+ udelay(2);
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
+ udelay(2);
+
+ /* 5. Wait while 0x04[8] == 0 goto 2, otherwise goto 1 */
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+ udelay(50);
+ retry = 0;
+ while ((bytetmp & BIT(0)) && retry < 1000) {
+ retry++;
+ bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+ udelay(50);
+ }
+
+ /* Enable Radio off, GPIO, and LED function */
+ /* 6. APS_FSMCO 0x04[15:0] = 0x0012 when enable HWPDN */
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x1012);
+
+ /* release RF digital isolation */
+ /* 7. SYS_ISO_CTRL 0x01[1] = 0x0; */
+ /*Set REG_SYS_ISO_CTRL 0x1=0x82 to prevent wake# problem. */
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x82);
+ udelay(2);
+
+ /* make sure that BB reset OK. */
+ /* rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); */
+
+ /* Disable REG_CR before enable it to assure reset */
+ rtl_write_word(rtlpriv, REG_CR, 0x0);
+
+ /* Release MAC IO register reset */
+ rtl_write_word(rtlpriv, REG_CR, 0x2ff);
+
+ /* clear stopping tx/rx dma */
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0x0);
+
+ /* rtl_write_word(rtlpriv,REG_CR+2, 0x2); */
+
+ /* System init */
+ /* 18. LLT_table_init(Adapter); */
+ if (_rtl92de_llt_table_init(hw) == false)
+ return false;
+
+ /* Clear interrupt and enable interrupt */
+ /* 19. HISR 0x124[31:0] = 0xffffffff; */
+ /* HISRE 0x12C[7:0] = 0xFF */
+ rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
+ rtl_write_byte(rtlpriv, REG_HISRE, 0xff);
+
+ /* 20. HIMR 0x120[31:0] |= [enable INT mask bit map]; */
+ /* 21. HIMRE 0x128[7:0] = [enable INT mask bit map] */
+ /* The IMR should be enabled later after all init sequence
+ * is finished. */
+
+ /* 22. PCIE configuration space configuration */
+ /* 23. Ensure PCIe Device 0x80[15:0] = 0x0143 (ASPM+CLKREQ), */
+ /* and PCIe gated clock function is enabled. */
+ /* PCIE configuration space will be written after
+ * all init sequence.(Or by BIOS) */
+
+ rtl92d_phy_config_maccoexist_rfpage(hw);
+
+ /* THe below section is not related to power document Vxx . */
+ /* This is only useful for driver and OS setting. */
+ /* -------------------Software Relative Setting---------------------- */
+ wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
+ wordtmp &= 0xf;
+ wordtmp |= 0xF771;
+ rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
+
+ /* Reported Tx status from HW for rate adaptive. */
+ /* This should be realtive to power on step 14. But in document V11 */
+ /* still not contain the description.!!! */
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
+
+ /* Set Tx/Rx page size (Tx must be 128 Bytes,
+ * Rx can be 64,128,256,512,1024 bytes) */
+ /* rtl_write_byte(rtlpriv,REG_PBP, 0x11); */
+
+ /* Set RCR register */
+ rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+ /* rtl_write_byte(rtlpriv,REG_RX_DRVINFO_SZ, 4); */
+
+ /* Set TCR register */
+ rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
+
+ /* disable earlymode */
+ rtl_write_byte(rtlpriv, 0x4d0, 0x0);
+
+ /* Set TX/RX descriptor physical address(from OS API). */
+ rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
+ rtlpci->tx_ring[BEACON_QUEUE].dma);
+ rtl_write_dword(rtlpriv, REG_MGQ_DESA, rtlpci->tx_ring[MGNT_QUEUE].dma);
+ rtl_write_dword(rtlpriv, REG_VOQ_DESA, rtlpci->tx_ring[VO_QUEUE].dma);
+ rtl_write_dword(rtlpriv, REG_VIQ_DESA, rtlpci->tx_ring[VI_QUEUE].dma);
+ rtl_write_dword(rtlpriv, REG_BEQ_DESA, rtlpci->tx_ring[BE_QUEUE].dma);
+ rtl_write_dword(rtlpriv, REG_BKQ_DESA, rtlpci->tx_ring[BK_QUEUE].dma);
+ rtl_write_dword(rtlpriv, REG_HQ_DESA, rtlpci->tx_ring[HIGH_QUEUE].dma);
+ /* Set RX Desc Address */
+ rtl_write_dword(rtlpriv, REG_RX_DESA,
+ rtlpci->rx_ring[RX_MPDU_QUEUE].dma);
+
+ /* if we want to support 64 bit DMA, we should set it here,
+ * but now we do not support 64 bit DMA*/
+
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x33);
+
+ /* Reset interrupt migration setting when initialization */
+ rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
+
+ /* Reconsider when to do this operation after asking HWSD. */
+ bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6));
+ do {
+ retry++;
+ bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+ } while ((retry < 200) && !(bytetmp & BIT(7)));
+
+ /* After MACIO reset,we must refresh LED state. */
+ _rtl92de_gen_refresh_led_state(hw);
+
+ /* Reset H2C protection register */
+ rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0);
+
+ return true;
+}
+
+static void _rtl92de_hw_configure(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 reg_bw_opmode = BW_OPMODE_20MHZ;
+ u32 reg_rrsr;
+
+ reg_rrsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ rtl_write_dword(rtlpriv, REG_RRSR, reg_rrsr);
+ rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, 0x0);
+ rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F80);
+ rtl_write_word(rtlpriv, REG_RL, 0x0707);
+ rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x02012802);
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+ rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000);
+ rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504);
+ rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
+ rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
+ /* Aggregation threshold */
+ if (rtlhal->macphymode == DUALMAC_DUALPHY)
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb9726641);
+ else if (rtlhal->macphymode == DUALMAC_SINGLEPHY)
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x66626641);
+ else
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841);
+ rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2);
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0x0a);
+ rtlpci->reg_bcn_ctrl_val = 0x1f;
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+ rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
+ rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+ rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
+ /* For throughput */
+ rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0x6666);
+ /* ACKTO for IOT issue. */
+ rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
+ /* Set Spec SIFS (used in NAV) */
+ rtl_write_word(rtlpriv, REG_SPEC_SIFS, 0x1010);
+ rtl_write_word(rtlpriv, REG_MAC_SPEC_SIFS, 0x1010);
+ /* Set SIFS for CCK */
+ rtl_write_word(rtlpriv, REG_SIFS_CTX, 0x1010);
+ /* Set SIFS for OFDM */
+ rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x1010);
+ /* Set Multicast Address. */
+ rtl_write_dword(rtlpriv, REG_MAR, 0xffffffff);
+ rtl_write_dword(rtlpriv, REG_MAR + 4, 0xffffffff);
+ switch (rtlpriv->phy.rf_type) {
+ case RF_1T2R:
+ case RF_1T1R:
+ rtlhal->minspace_cfg = (MAX_MSS_DENSITY_1T << 3);
+ break;
+ case RF_2T2R:
+ case RF_2T2R_GREEN:
+ rtlhal->minspace_cfg = (MAX_MSS_DENSITY_2T << 3);
+ break;
+ }
+}
+
+static void _rtl92de_enable_aspm_back_door(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ rtl_write_byte(rtlpriv, 0x34b, 0x93);
+ rtl_write_word(rtlpriv, 0x350, 0x870c);
+ rtl_write_byte(rtlpriv, 0x352, 0x1);
+ if (ppsc->support_backdoor)
+ rtl_write_byte(rtlpriv, 0x349, 0x1b);
+ else
+ rtl_write_byte(rtlpriv, 0x349, 0x03);
+ rtl_write_word(rtlpriv, 0x350, 0x2718);
+ rtl_write_byte(rtlpriv, 0x352, 0x1);
+}
+
+void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 sec_reg_value;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm));
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("not open hw encryption\n"));
+ return;
+ }
+ sec_reg_value = SCR_TXENCENABLE | SCR_RXENCENABLE;
+ if (rtlpriv->sec.use_defaultkey) {
+ sec_reg_value |= SCR_TXUSEDK;
+ sec_reg_value |= SCR_RXUSEDK;
+ }
+ sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The SECR-value %x\n", sec_reg_value));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+}
+
+int rtl92de_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool rtstatus = true;
+ u8 tmp_u1b;
+ int i;
+ int err;
+ unsigned long flags;
+
+ rtlpci->being_init_adapter = true;
+ rtlpci->init_ready = false;
+ spin_lock_irqsave(&globalmutex_for_power_and_efuse, flags);
+ /* we should do iqk after disable/enable */
+ rtl92d_phy_reset_iqk_result(hw);
+ /* rtlpriv->intf_ops->disable_aspm(hw); */
+ rtstatus = _rtl92de_init_mac(hw);
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Init MAC failed\n"));
+ err = 1;
+ spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
+ return err;
+ }
+ err = rtl92d_download_fw(hw);
+ spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Failed to download FW. Init HW "
+ "without FW..\n"));
+ rtlhal->fw_ready = false;
+ return 1;
+ } else {
+ rtlhal->fw_ready = true;
+ }
+ rtlhal->last_hmeboxnum = 0;
+ rtlpriv->psc.fw_current_inpsmode = false;
+
+ tmp_u1b = rtl_read_byte(rtlpriv, 0x605);
+ tmp_u1b = tmp_u1b | 0x30;
+ rtl_write_byte(rtlpriv, 0x605, tmp_u1b);
+
+ if (rtlhal->earlymode_enable) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("EarlyMode Enabled!!!\n"));
+
+ tmp_u1b = rtl_read_byte(rtlpriv, 0x4d0);
+ tmp_u1b = tmp_u1b | 0x1f;
+ rtl_write_byte(rtlpriv, 0x4d0, tmp_u1b);
+
+ rtl_write_byte(rtlpriv, 0x4d3, 0x80);
+
+ tmp_u1b = rtl_read_byte(rtlpriv, 0x605);
+ tmp_u1b = tmp_u1b | 0x40;
+ rtl_write_byte(rtlpriv, 0x605, tmp_u1b);
+ }
+
+ if (mac->rdg_en) {
+ rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xff);
+ rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200);
+ rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05);
+ }
+
+ rtl92d_phy_mac_config(hw);
+ /* because last function modify RCR, so we update
+ * rcr var here, or TP will unstable for receive_config
+ * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
+ * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252*/
+ rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
+ rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
+
+ rtl92d_phy_bb_config(hw);
+
+ rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+ /* set before initialize RF */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
+
+ /* config RF */
+ rtl92d_phy_rf_config(hw);
+
+ /* After read predefined TXT, we must set BB/MAC/RF
+ * register as our requirement */
+ /* After load BB,RF params,we need do more for 92D. */
+ rtl92d_update_bbrf_configuration(hw);
+ /* set default value after initialize RF, */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0);
+ rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
+ RF_CHNLBW, BRFREGOFFSETMASK);
+ rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
+ RF_CHNLBW, BRFREGOFFSETMASK);
+
+ /*---- Set CCK and OFDM Block "ON"----*/
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+ if (rtlhal->interfaceindex == 0) {
+ /* RFPGA0_ANALOGPARAMETER2: cck clock select,
+ * set to 20MHz by default */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10) |
+ BIT(11), 3);
+ } else {
+ /* Mac1 */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(11) |
+ BIT(10), 3);
+ }
+
+ _rtl92de_hw_configure(hw);
+
+ /* reset hw sec */
+ rtl_cam_reset_all_entry(hw);
+ rtl92de_enable_hw_security_config(hw);
+
+ /* Read EEPROM TX power index and PHY_REG_PG.txt to capture correct */
+ /* TX power index for different rate set. */
+ rtl92d_phy_get_hw_reg_originalvalue(hw);
+ rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
+
+ ppsc->rfpwr_state = ERFON;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+
+ _rtl92de_enable_aspm_back_door(hw);
+ /* rtlpriv->intf_ops->enable_aspm(hw); */
+
+ rtl92d_dm_init(hw);
+ rtlpci->being_init_adapter = false;
+
+ if (ppsc->rfpwr_state == ERFON) {
+ rtl92d_phy_lc_calibrate(hw);
+ /* 5G and 2.4G must wait sometime to let RF LO ready */
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ u32 tmp_rega;
+ for (i = 0; i < 10000; i++) {
+ udelay(MAX_STALL_TIME);
+
+ tmp_rega = rtl_get_rfreg(hw,
+ (enum radio_path)RF90_PATH_A,
+ 0x2a, BMASKDWORD);
+
+ if (((tmp_rega & BIT(11)) == BIT(11)))
+ break;
+ }
+ /* check that loop was successful. If not, exit now */
+ if (i == 10000) {
+ rtlpci->init_ready = false;
+ return 1;
+ }
+ }
+ }
+ rtlpci->init_ready = true;
+ return err;
+}
+
+static enum version_8192d _rtl92de_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ enum version_8192d version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
+ u32 value32;
+
+ value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
+ if (!(value32 & 0x000f0000)) {
+ version = VERSION_TEST_CHIP_92D_SINGLEPHY;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("TEST CHIP!!!\n"));
+ } else {
+ version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Normal CHIP!!!\n"));
+ }
+ return version;
+}
+
+static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
+ enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+ u8 bcnfunc_enable;
+
+ bt_msr &= 0xfc;
+
+ if (type == NL80211_IFTYPE_UNSPECIFIED ||
+ type == NL80211_IFTYPE_STATION) {
+ _rtl92de_stop_tx_beacon(hw);
+ _rtl92de_enable_bcn_sub_func(hw);
+ } else if (type == NL80211_IFTYPE_ADHOC ||
+ type == NL80211_IFTYPE_AP) {
+ _rtl92de_resume_tx_beacon(hw);
+ _rtl92de_disable_bcn_sub_func(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("Set HW_VAR_MEDIA_STATUS: No such media "
+ "status(%x).\n", type));
+ }
+ bcnfunc_enable = rtl_read_byte(rtlpriv, REG_BCN_CTRL);
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ bt_msr |= MSR_NOLINK;
+ ledaction = LED_CTL_LINK;
+ bcnfunc_enable &= 0xF7;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to NO LINK!\n"));
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ bt_msr |= MSR_ADHOC;
+ bcnfunc_enable |= 0x08;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to Ad Hoc!\n"));
+ break;
+ case NL80211_IFTYPE_STATION:
+ bt_msr |= MSR_INFRA;
+ ledaction = LED_CTL_LINK;
+ bcnfunc_enable &= 0xF7;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to STA!\n"));
+ break;
+ case NL80211_IFTYPE_AP:
+ bt_msr |= MSR_AP;
+ bcnfunc_enable |= 0x08;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Set Network type to AP!\n"));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Network type %d not support!\n", type));
+ return 1;
+ break;
+
+ }
+ rtl_write_byte(rtlpriv, REG_CR + 2, bt_msr);
+ rtlpriv->cfg->ops->led_control(hw, ledaction);
+ if ((bt_msr & 0xfc) == MSR_AP)
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ else
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+ return 0;
+}
+
+void rtl92de_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u32 reg_rcr = rtlpci->receive_config;
+
+ if (rtlpriv->psc.rfpwr_state != ERFON)
+ return;
+ if (check_bssid) {
+ reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+ _rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(4));
+ } else if (check_bssid == false) {
+ reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+ _rtl92de_set_bcn_ctrl_reg(hw, BIT(4), 0);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+ }
+}
+
+int rtl92de_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (_rtl92de_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+
+ /* check bssid */
+ if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+ if (type != NL80211_IFTYPE_AP)
+ rtl92de_set_check_bssid(hw, true);
+ } else {
+ rtl92de_set_check_bssid(hw, false);
+ }
+ return 0;
+}
+
+/* do iqk or reload iqk */
+/* windows just rtl92d_phy_reload_iqk_setting in set channel,
+ * but it's very strict for time sequence so we add
+ * rtl92d_phy_reload_iqk_setting here */
+void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 indexforchannel;
+ u8 channel = rtlphy->current_channel;
+
+ indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
+ if (!rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done) {
+ RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_DMESG,
+ ("Do IQK for channel:%d.\n", channel));
+ rtl92d_phy_iq_calibrate(hw);
+ }
+}
+
+/* don't set REG_EDCA_BE_PARAM here because
+ * mac80211 will send pkt when scan */
+void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ rtl92d_dm_init_edca_turbo(hw);
+ return;
+ switch (aci) {
+ case AC1_BK:
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
+ break;
+ case AC0_BE:
+ break;
+ case AC2_VI:
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
+ break;
+ case AC3_VO:
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
+ break;
+ default:
+ RT_ASSERT(false, ("invalid aci: %d !\n", aci));
+ break;
+ }
+}
+
+void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+ rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+}
+
+void rtl92de_disable_interrupt(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
+ rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
+ synchronize_irq(rtlpci->pdev->irq);
+}
+
+static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 u1b_tmp;
+ unsigned long flags;
+
+ rtlpriv->intf_ops->enable_aspm(hw);
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFPARAMETER, BIT(3), 0);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFPARAMETER, BIT(15), 0);
+
+ /* 0x20:value 05-->04 */
+ rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x04);
+
+ /* ==== Reset digital sequence ====== */
+ rtl92d_firmware_selfreset(hw);
+
+ /* f. SYS_FUNC_EN 0x03[7:0]=0x51 reset MCU, MAC register, DCORE */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51);
+
+ /* g. MCUFWDL 0x80[1:0]=0 reset MCU ready status */
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+
+ /* ==== Pull GPIO PIN to balance level and LED control ====== */
+
+ /* h. GPIO_PIN_CTRL 0x44[31:0]=0x000 */
+ rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00000000);
+
+ /* i. Value = GPIO_PIN_CTRL[7:0] */
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL);
+
+ /* j. GPIO_PIN_CTRL 0x44[31:0] = 0x00FF0000 | (value <<8); */
+ /* write external PIN level */
+ rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL,
+ 0x00FF0000 | (u1b_tmp << 8));
+
+ /* k. GPIO_MUXCFG 0x42 [15:0] = 0x0780 */
+ rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, 0x0790);
+
+ /* l. LEDCFG 0x4C[15:0] = 0x8080 */
+ rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
+
+ /* ==== Disable analog sequence === */
+
+ /* m. AFE_PLL_CTRL[7:0] = 0x80 disable PLL */
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
+
+ /* n. SPS0_CTRL 0x11[7:0] = 0x22 enter PFM mode */
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
+
+ /* o. AFE_XTAL_CTRL 0x24[7:0] = 0x0E disable XTAL, if No BT COEX */
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0e);
+
+ /* p. RSV_CTRL 0x1C[7:0] = 0x0E lock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
+
+ /* ==== interface into suspend === */
+
+ /* q. APS_FSMCO[15:8] = 0x58 PCIe suspend mode */
+ /* According to power document V11, we need to set this */
+ /* value as 0x18. Otherwise, we may not L0s sometimes. */
+ /* This indluences power consumption. Bases on SD1's test, */
+ /* set as 0x00 do not affect power current. And if it */
+ /* is set as 0x18, they had ever met auto load fail problem. */
+ rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, 0x10);
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("In PowerOff,reg0x%x=%X\n", REG_SPS0_CTRL,
+ rtl_read_byte(rtlpriv, REG_SPS0_CTRL)));
+ /* r. Note: for PCIe interface, PON will not turn */
+ /* off m-bias and BandGap in PCIe suspend mode. */
+
+ /* 0x17[7] 1b': power off in process 0b' : power off over */
+ if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) {
+ spin_lock_irqsave(&globalmutex_power, flags);
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS);
+ u1b_tmp &= (~BIT(7));
+ rtl_write_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS, u1b_tmp);
+ spin_unlock_irqrestore(&globalmutex_power, flags);
+ }
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("<=======\n"));
+}
+
+void rtl92de_card_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum nl80211_iftype opmode;
+
+ mac->link_state = MAC80211_NOLINK;
+ opmode = NL80211_IFTYPE_UNSPECIFIED;
+ _rtl92de_set_media_status(hw, opmode);
+
+ if (rtlpci->driver_is_goingto_unload ||
+ ppsc->rfoff_reason > RF_CHANGE_BY_PS)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ /* Power sequence for each MAC. */
+ /* a. stop tx DMA */
+ /* b. close RF */
+ /* c. clear rx buf */
+ /* d. stop rx DMA */
+ /* e. reset MAC */
+
+ /* a. stop tx DMA */
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xFE);
+ udelay(50);
+
+ /* b. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue */
+
+ /* c. ========RF OFF sequence========== */
+ /* 0x88c[23:20] = 0xf. */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+
+ /* APSD_CTRL 0x600[7:0] = 0x40 */
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+
+ /* Close antenna 0,0xc04,0xd04 */
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0);
+ rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0);
+
+ /* SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB state machine */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+
+ /* Mac0 can not do Global reset. Mac1 can do. */
+ /* SYS_FUNC_EN 0x02[7:0] = 0xE0 reset BB state machine */
+ if (rtlpriv->rtlhal.interfaceindex == 1)
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0);
+ udelay(50);
+
+ /* d. stop tx/rx dma before disable REG_CR (0x100) to fix */
+ /* dma hang issue when disable/enable device. */
+ rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xff);
+ udelay(50);
+ rtl_write_byte(rtlpriv, REG_CR, 0x0);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("==> Do power off.......\n"));
+ if (rtl92d_phy_check_poweroff(hw))
+ _rtl92de_poweroff_adapter(hw);
+ return;
+}
+
+void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+ rtl_write_dword(rtlpriv, ISR, *p_inta);
+
+ /*
+ * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+ * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
+ */
+}
+
+void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval, atim_window;
+
+ bcn_interval = mac->beacon_interval;
+ atim_window = 2;
+ /*rtl92de_disable_interrupt(hw); */
+ rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x20);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x30);
+ else
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x20);
+ rtl_write_byte(rtlpriv, 0x606, 0x30);
+}
+
+void rtl92de_set_beacon_interval(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval = mac->beacon_interval;
+
+ RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
+ ("beacon_interval:%d\n", bcn_interval));
+ /* rtl92de_disable_interrupt(hw); */
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ /* rtl92de_enable_interrupt(hw); */
+}
+
+void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
+ ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr));
+ if (add_msr)
+ rtlpci->irq_mask[0] |= add_msr;
+ if (rm_msr)
+ rtlpci->irq_mask[0] &= (~rm_msr);
+ rtl92de_disable_interrupt(hw);
+ rtl92de_enable_interrupt(hw);
+}
+
+static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo,
+ u8 *rom_content, bool autoLoadfail)
+{
+ u32 rfpath, eeaddr, group, offset1, offset2;
+ u8 i;
+
+ memset(pwrinfo, 0, sizeof(struct txpower_info));
+ if (autoLoadfail) {
+ for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ if (group < CHANNEL_GROUP_MAX_2G) {
+ pwrinfo->cck_index[rfpath][group] =
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G;
+ pwrinfo->ht40_1sindex[rfpath][group] =
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G;
+ } else {
+ pwrinfo->ht40_1sindex[rfpath][group] =
+ EEPROM_DEFAULT_TXPOWERLEVEL_5G;
+ }
+ pwrinfo->ht40_2sindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT40_2SDIFF;
+ pwrinfo->ht20indexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT20_DIFF;
+ pwrinfo->ofdmindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+ pwrinfo->ht40maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT40_PWRMAXOFFSET;
+ pwrinfo->ht20maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT20_PWRMAXOFFSET;
+ }
+ }
+ for (i = 0; i < 3; i++) {
+ pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI;
+ pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI;
+ }
+ return;
+ }
+
+ /* Maybe autoload OK,buf the tx power index value is not filled.
+ * If we find it, we set it to default value. */
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ for (group = 0; group < CHANNEL_GROUP_MAX_2G; group++) {
+ eeaddr = EEPROM_CCK_TX_PWR_INX_2G + (rfpath * 3)
+ + group;
+ pwrinfo->cck_index[rfpath][group] =
+ (rom_content[eeaddr] == 0xFF) ?
+ (eeaddr > 0x7B ?
+ EEPROM_DEFAULT_TXPOWERLEVEL_5G :
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G) :
+ rom_content[eeaddr];
+ }
+ }
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
+ offset1 = group / 3;
+ offset2 = group % 3;
+ eeaddr = EEPROM_HT40_1S_TX_PWR_INX_2G + (rfpath * 3) +
+ offset2 + offset1 * 21;
+ pwrinfo->ht40_1sindex[rfpath][group] =
+ (rom_content[eeaddr] == 0xFF) ? (eeaddr > 0x7B ?
+ EEPROM_DEFAULT_TXPOWERLEVEL_5G :
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G) :
+ rom_content[eeaddr];
+ }
+ }
+ /* These just for 92D efuse offset. */
+ for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ int base1 = EEPROM_HT40_2S_TX_PWR_INX_DIFF_2G;
+
+ offset1 = group / 3;
+ offset2 = group % 3;
+
+ if (rom_content[base1 + offset2 + offset1 * 21] != 0xFF)
+ pwrinfo->ht40_2sindexdiff[rfpath][group] =
+ (rom_content[base1 +
+ offset2 + offset1 * 21] >> (rfpath * 4))
+ & 0xF;
+ else
+ pwrinfo->ht40_2sindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT40_2SDIFF;
+ if (rom_content[EEPROM_HT20_TX_PWR_INX_DIFF_2G + offset2
+ + offset1 * 21] != 0xFF)
+ pwrinfo->ht20indexdiff[rfpath][group] =
+ (rom_content[EEPROM_HT20_TX_PWR_INX_DIFF_2G
+ + offset2 + offset1 * 21] >> (rfpath * 4))
+ & 0xF;
+ else
+ pwrinfo->ht20indexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT20_DIFF;
+ if (rom_content[EEPROM_OFDM_TX_PWR_INX_DIFF_2G + offset2
+ + offset1 * 21] != 0xFF)
+ pwrinfo->ofdmindexdiff[rfpath][group] =
+ (rom_content[EEPROM_OFDM_TX_PWR_INX_DIFF_2G
+ + offset2 + offset1 * 21] >> (rfpath * 4))
+ & 0xF;
+ else
+ pwrinfo->ofdmindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+ if (rom_content[EEPROM_HT40_MAX_PWR_OFFSET_2G + offset2
+ + offset1 * 21] != 0xFF)
+ pwrinfo->ht40maxoffset[rfpath][group] =
+ (rom_content[EEPROM_HT40_MAX_PWR_OFFSET_2G
+ + offset2 + offset1 * 21] >> (rfpath * 4))
+ & 0xF;
+ else
+ pwrinfo->ht40maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT40_PWRMAXOFFSET;
+ if (rom_content[EEPROM_HT20_MAX_PWR_OFFSET_2G + offset2
+ + offset1 * 21] != 0xFF)
+ pwrinfo->ht20maxoffset[rfpath][group] =
+ (rom_content[EEPROM_HT20_MAX_PWR_OFFSET_2G +
+ offset2 + offset1 * 21] >> (rfpath * 4)) &
+ 0xF;
+ else
+ pwrinfo->ht20maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT20_PWRMAXOFFSET;
+ }
+ }
+ if (rom_content[EEPROM_TSSI_A_5G] != 0xFF) {
+ /* 5GL */
+ pwrinfo->tssi_a[0] = rom_content[EEPROM_TSSI_A_5G] & 0x3F;
+ pwrinfo->tssi_b[0] = rom_content[EEPROM_TSSI_B_5G] & 0x3F;
+ /* 5GM */
+ pwrinfo->tssi_a[1] = rom_content[EEPROM_TSSI_AB_5G] & 0x3F;
+ pwrinfo->tssi_b[1] =
+ (rom_content[EEPROM_TSSI_AB_5G] & 0xC0) >> 6 |
+ (rom_content[EEPROM_TSSI_AB_5G + 1] & 0x0F) << 2;
+ /* 5GH */
+ pwrinfo->tssi_a[2] = (rom_content[EEPROM_TSSI_AB_5G + 1] &
+ 0xF0) >> 4 |
+ (rom_content[EEPROM_TSSI_AB_5G + 2] & 0x03) << 4;
+ pwrinfo->tssi_b[2] = (rom_content[EEPROM_TSSI_AB_5G + 2] &
+ 0xFC) >> 2;
+ } else {
+ for (i = 0; i < 3; i++) {
+ pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI;
+ pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI;
+ }
+ }
+}
+
+static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct txpower_info pwrinfo;
+ u8 tempval[2], i, pwr, diff;
+ u32 ch, rfPath, group;
+
+ _rtl92de_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail);
+ if (!autoload_fail) {
+ /* bit0~2 */
+ rtlefuse->eeprom_regulatory = (hwinfo[EEPROM_RF_OPT1] & 0x7);
+ rtlefuse->eeprom_thermalmeter =
+ hwinfo[EEPROM_THERMAL_METER] & 0x1f;
+ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_K];
+ tempval[0] = hwinfo[EEPROM_IQK_DELTA] & 0x03;
+ tempval[1] = (hwinfo[EEPROM_LCK_DELTA] & 0x0C) >> 2;
+ rtlefuse->txpwr_fromeprom = true;
+ if (IS_92D_D_CUT(rtlpriv->rtlhal.version)) {
+ rtlefuse->internal_pa_5g[0] =
+ !((hwinfo[EEPROM_TSSI_A_5G] &
+ BIT(6)) >> 6);
+ rtlefuse->internal_pa_5g[1] =
+ !((hwinfo[EEPROM_TSSI_B_5G] &
+ BIT(6)) >> 6);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("Is D cut,Internal PA0 %d Internal PA1 %d\n",
+ rtlefuse->internal_pa_5g[0],
+ rtlefuse->internal_pa_5g[1]))
+ }
+ rtlefuse->eeprom_c9 = hwinfo[EEPROM_RF_OPT6];
+ rtlefuse->eeprom_cc = hwinfo[EEPROM_RF_OPT7];
+ } else {
+ rtlefuse->eeprom_regulatory = 0;
+ rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
+ rtlefuse->crystalcap = EEPROM_DEFAULT_CRYSTALCAP;
+ tempval[0] = tempval[1] = 3;
+ }
+
+ /* Use default value to fill parameters if
+ * efuse is not filled on some place. */
+
+ /* ThermalMeter from EEPROM */
+ if (rtlefuse->eeprom_thermalmeter < 0x06 ||
+ rtlefuse->eeprom_thermalmeter > 0x1c)
+ rtlefuse->eeprom_thermalmeter = 0x12;
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+
+ /* check XTAL_K */
+ if (rtlefuse->crystalcap == 0xFF)
+ rtlefuse->crystalcap = 0;
+ if (rtlefuse->eeprom_regulatory > 3)
+ rtlefuse->eeprom_regulatory = 0;
+
+ for (i = 0; i < 2; i++) {
+ switch (tempval[i]) {
+ case 0:
+ tempval[i] = 5;
+ break;
+ case 1:
+ tempval[i] = 4;
+ break;
+ case 2:
+ tempval[i] = 3;
+ break;
+ case 3:
+ default:
+ tempval[i] = 0;
+ break;
+ }
+ }
+
+ rtlefuse->delta_iqk = tempval[0];
+ if (tempval[1] > 0)
+ rtlefuse->delta_lck = tempval[1] - 1;
+ if (rtlefuse->eeprom_c9 == 0xFF)
+ rtlefuse->eeprom_c9 = 0x00;
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
+ ("EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
+ ("ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
+ ("CrystalCap = 0x%x\n", rtlefuse->crystalcap));
+ RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
+ ("Delta_IQK = 0x%x Delta_LCK = 0x%x\n", rtlefuse->delta_iqk,
+ rtlefuse->delta_lck));
+
+ for (rfPath = 0; rfPath < RF6052_MAX_PATH; rfPath++) {
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
+ group = rtl92d_get_chnlgroup_fromarray((u8) ch);
+ if (ch < CHANNEL_MAX_NUMBER_2G)
+ rtlefuse->txpwrlevel_cck[rfPath][ch] =
+ pwrinfo.cck_index[rfPath][group];
+ rtlefuse->txpwrlevel_ht40_1s[rfPath][ch] =
+ pwrinfo.ht40_1sindex[rfPath][group];
+ rtlefuse->txpwr_ht20diff[rfPath][ch] =
+ pwrinfo.ht20indexdiff[rfPath][group];
+ rtlefuse->txpwr_legacyhtdiff[rfPath][ch] =
+ pwrinfo.ofdmindexdiff[rfPath][group];
+ rtlefuse->pwrgroup_ht20[rfPath][ch] =
+ pwrinfo.ht20maxoffset[rfPath][group];
+ rtlefuse->pwrgroup_ht40[rfPath][ch] =
+ pwrinfo.ht40maxoffset[rfPath][group];
+ pwr = pwrinfo.ht40_1sindex[rfPath][group];
+ diff = pwrinfo.ht40_2sindexdiff[rfPath][group];
+ rtlefuse->txpwrlevel_ht40_2s[rfPath][ch] =
+ (pwr > diff) ? (pwr - diff) : 0;
+ }
+ }
+}
+
+static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw,
+ u8 *content)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 macphy_crvalue = content[EEPROM_MAC_FUNCTION];
+
+ if (macphy_crvalue & BIT(3)) {
+ rtlhal->macphymode = SINGLEMAC_SINGLEPHY;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("MacPhyMode SINGLEMAC_SINGLEPHY\n"));
+ } else {
+ rtlhal->macphymode = DUALMAC_DUALPHY;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("MacPhyMode DUALMAC_DUALPHY\n"));
+ }
+}
+
+static void _rtl92de_read_macphymode_and_bandtype(struct ieee80211_hw *hw,
+ u8 *content)
+{
+ _rtl92de_read_macphymode_from_prom(hw, content);
+ rtl92d_phy_config_macphymode(hw);
+ rtl92d_phy_config_macphymode_info(hw);
+}
+
+static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ enum version_8192d chipver = rtlpriv->rtlhal.version;
+ u8 cutvalue[2];
+ u16 chipvalue;
+
+ rtlpriv->intf_ops->read_efuse_byte(hw, EEPROME_CHIP_VERSION_H,
+ &cutvalue[1]);
+ rtlpriv->intf_ops->read_efuse_byte(hw, EEPROME_CHIP_VERSION_L,
+ &cutvalue[0]);
+ chipvalue = (cutvalue[1] << 8) | cutvalue[0];
+ switch (chipvalue) {
+ case 0xAA55:
+ chipver |= CHIP_92D_C_CUT;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("C-CUT!!!\n"));
+ break;
+ case 0x9966:
+ chipver |= CHIP_92D_D_CUT;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("D-CUT!!!\n"));
+ break;
+ default:
+ chipver |= CHIP_92D_D_CUT;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, ("Unkown CUT!\n"));
+ break;
+ }
+ rtlpriv->rtlhal.version = chipver;
+}
+
+static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u16 i, usvalue;
+ u8 hwinfo[HWSET_MAX_SIZE];
+ u16 eeprom_id;
+ unsigned long flags;
+
+ if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+ spin_lock_irqsave(&globalmutex_for_power_and_efuse, flags);
+ rtl_efuse_shadow_map_update(hw);
+ _rtl92de_efuse_update_chip_version(hw);
+ spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
+ memcpy((void *)hwinfo, (void *)&rtlefuse->efuse_map
+ [EFUSE_INIT_MAP][0],
+ HWSET_MAX_SIZE);
+ } else if (rtlefuse->epromtype == EEPROM_93C46) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("RTL819X Not boot from eeprom, check it !!"));
+ }
+ RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
+ hwinfo, HWSET_MAX_SIZE);
+
+ eeprom_id = *((u16 *)&hwinfo[0]);
+ if (eeprom_id != RTL8190_EEPROM_ID) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
+ rtlefuse->autoload_failflag = true;
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+ rtlefuse->autoload_failflag = false;
+ }
+ if (rtlefuse->autoload_failflag) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("RTL819X Not boot from eeprom, check it !!"));
+ return;
+ }
+ rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ _rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
+
+ /* VID, DID SE 0xA-D */
+ rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
+ rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+ rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
+ rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("EEPROMId = 0x%4x\n", eeprom_id));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid));
+
+ /* Read Permanent MAC address */
+ if (rtlhal->interfaceindex == 0) {
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR_MAC0_92D + i];
+ *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+ }
+ } else {
+ for (i = 0; i < 6; i += 2) {
+ usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR_MAC1_92D + i];
+ *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
+ }
+ }
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR,
+ rtlefuse->dev_addr);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ ("%pM\n", rtlefuse->dev_addr));
+ _rtl92de_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo);
+
+ /* Read Channel Plan */
+ switch (rtlhal->bandset) {
+ case BAND_ON_2_4G:
+ rtlefuse->channel_plan = COUNTRY_CODE_TELEC;
+ break;
+ case BAND_ON_5G:
+ rtlefuse->channel_plan = COUNTRY_CODE_FCC;
+ break;
+ case BAND_ON_BOTH:
+ rtlefuse->channel_plan = COUNTRY_CODE_FCC;
+ break;
+ default:
+ rtlefuse->channel_plan = COUNTRY_CODE_FCC;
+ break;
+ }
+ rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+ rtlefuse->txpwr_fromeprom = true;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
+}
+
+void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_u1b;
+
+ rtlhal->version = _rtl92de_read_chip_version(hw);
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+ rtlefuse->autoload_status = tmp_u1b;
+ if (tmp_u1b & BIT(4)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EEPROM\n"));
+ rtlefuse->epromtype = EEPROM_93C46;
+ } else {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EFUSE\n"));
+ rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
+ }
+ if (tmp_u1b & BIT(5)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
+
+ rtlefuse->autoload_failflag = false;
+ _rtl92de_read_adapter_info(hw);
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n"));
+ }
+ return;
+}
+
+static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 ratr_value;
+ u8 ratr_index = 0;
+ u8 nmode = mac->ht_enable;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+ u16 shortgi_rate;
+ u32 tmp_ratr_value;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = mac->mode;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_value = sta->supp_rates[1] << 4;
+ else
+ ratr_value = sta->supp_rates[0];
+ ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_A:
+ ratr_value &= 0x00000FF0;
+ break;
+ case WIRELESS_MODE_B:
+ if (ratr_value & 0x0000000c)
+ ratr_value &= 0x0000000d;
+ else
+ ratr_value &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_value &= 0x00000FF5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ nmode = 1;
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
+ ratr_value &= 0x0007F005;
+ } else {
+ u32 ratr_mask;
+
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R) {
+ ratr_mask = 0x000ff005;
+ } else {
+ ratr_mask = 0x0f0ff005;
+ }
+
+ ratr_value &= ratr_mask;
+ }
+ break;
+ default:
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_value &= 0x000ff0ff;
+ else
+ ratr_value &= 0x0f0ff0ff;
+
+ break;
+ }
+ ratr_value &= 0x0FFFFFFF;
+ if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz))) {
+ ratr_value |= 0x10000000;
+ tmp_ratr_value = (ratr_value >> 12);
+ for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+ if ((1 << shortgi_rate) & tmp_ratr_value)
+ break;
+ }
+ shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ ("%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)));
+}
+
+static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_sta_info *sta_entry = NULL;
+ u32 ratr_bitmap;
+ u8 ratr_index;
+ u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
+ ? 1 : 0;
+ u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = 0;
+ bool shortgi = false;
+ u32 value[2];
+ u8 macid = 0;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ mimo_ps = sta_entry->mimo_ps;
+ wirelessmode = sta_entry->wireless_mode;
+ if (mac->opmode == NL80211_IFTYPE_STATION)
+ curtxbw_40mhz = mac->bw_40;
+ else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC)
+ macid = sta->aid + 1;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_bitmap = sta->supp_rates[1] << 4;
+ else
+ ratr_bitmap = sta->supp_rates[0];
+ ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ ratr_index = RATR_INX_WIRELESS_B;
+ if (ratr_bitmap & 0x0000000c)
+ ratr_bitmap &= 0x0000000d;
+ else
+ ratr_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_index = RATR_INX_WIRELESS_GB;
+
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00000f00;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x00000ff0;
+ else
+ ratr_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_A:
+ ratr_index = RATR_INX_WIRELESS_G;
+ ratr_bitmap &= 0x00000ff0;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ if (wirelessmode == WIRELESS_MODE_N_24G)
+ ratr_index = RATR_INX_WIRELESS_NGB;
+ else
+ ratr_index = RATR_INX_WIRELESS_NG;
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00070000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0007f000;
+ else
+ ratr_bitmap &= 0x0007f005;
+ } else {
+ if (rtlphy->rf_type == RF_1T2R ||
+ rtlphy->rf_type == RF_1T1R) {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
+ } else {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff005;
+ }
+ }
+ }
+ if ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz)) {
+
+ if (macid == 0)
+ shortgi = true;
+ else if (macid == 1)
+ shortgi = false;
+ }
+ break;
+ default:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_bitmap &= 0x000ff0ff;
+ else
+ ratr_bitmap &= 0x0f0ff0ff;
+ break;
+ }
+
+ value[0] = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28);
+ value[1] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ ("ratr_bitmap :%x value0:%x value1:%x\n",
+ ratr_bitmap, value[0], value[1]));
+ rtl92d_fill_h2c_cmd(hw, H2C_RA_MASK, 5, (u8 *) value);
+ if (macid != 0)
+ sta_entry->ratr_index = ratr_index;
+}
+
+void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->dm.useramask)
+ rtl92de_update_hal_rate_mask(hw, sta, rssi_level);
+ else
+ rtl92de_update_hal_rate_table(hw, sta);
+}
+
+void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 sifs_timer;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+ (u8 *)&mac->slot_time);
+ if (!mac->ht_enable)
+ sifs_timer = 0x0a0a;
+ else
+ sifs_timer = 0x1010;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+
+bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ enum rf_pwrstate e_rfpowerstate_toset;
+ u8 u1tmp;
+ bool actuallyset = false;
+ unsigned long flag;
+
+ if (rtlpci->being_init_adapter)
+ return false;
+ if (ppsc->swrf_processing)
+ return false;
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ return false;
+ } else {
+ ppsc->rfchange_inprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ }
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, rtl_read_byte(rtlpriv,
+ REG_MAC_PINMUX_CFG) & ~(BIT(3)));
+ u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
+ e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
+ if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("GPIOChangeRF - HW Radio ON, RF ON\n"));
+ e_rfpowerstate_toset = ERFON;
+ ppsc->hwradiooff = false;
+ actuallyset = true;
+ } else if ((ppsc->hwradiooff == false)
+ && (e_rfpowerstate_toset == ERFOFF)) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("GPIOChangeRF - HW Radio OFF, RF OFF\n"));
+ e_rfpowerstate_toset = ERFOFF;
+ ppsc->hwradiooff = true;
+ actuallyset = true;
+ }
+ if (actuallyset) {
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ } else {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ }
+ *valid = 1;
+ return !ppsc->hwradiooff;
+}
+
+void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 *macaddr = p_macaddr;
+ u32 entry_id;
+ bool is_pairwise = false;
+ static u8 cam_const_addr[4][6] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+ };
+ static u8 cam_const_broad[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ if (clear_all) {
+ u8 idx;
+ u8 cam_offset = 0;
+ u8 clear_number = 5;
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
+ for (idx = 0; idx < clear_number; idx++) {
+ rtl_cam_mark_invalid(hw, cam_offset + idx);
+ rtl_cam_empty_entry(hw, cam_offset + idx);
+
+ if (idx < 5) {
+ memset(rtlpriv->sec.key_buf[idx], 0,
+ MAX_KEY_LEN);
+ rtlpriv->sec.key_len[idx] = 0;
+ }
+ }
+ } else {
+ switch (enc_algo) {
+ case WEP40_ENCRYPTION:
+ enc_algo = CAM_WEP40;
+ break;
+ case WEP104_ENCRYPTION:
+ enc_algo = CAM_WEP104;
+ break;
+ case TKIP_ENCRYPTION:
+ enc_algo = CAM_TKIP;
+ break;
+ case AESCCMP_ENCRYPTION:
+ enc_algo = CAM_AES;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
+ "not process\n"));
+ enc_algo = CAM_TKIP;
+ break;
+ }
+ if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+ macaddr = cam_const_addr[key_index];
+ entry_id = key_index;
+ } else {
+ if (is_group) {
+ macaddr = cam_const_broad;
+ entry_id = key_index;
+ } else {
+ if (mac->opmode == NL80211_IFTYPE_AP) {
+ entry_id = rtl_cam_get_free_entry(hw,
+ p_macaddr);
+ if (entry_id >= TOTAL_CAM_ENTRY) {
+ RT_TRACE(rtlpriv, COMP_SEC,
+ DBG_EMERG, ("Can not "
+ "find free hw security"
+ " cam entry\n"));
+ return;
+ }
+ } else {
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ }
+ key_index = PAIRWISE_KEYIDX;
+ is_pairwise = true;
+ }
+ }
+ if (rtlpriv->sec.key_len[key_index] == 0) {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("delete one entry, entry_id is %d\n",
+ entry_id));
+ if (mac->opmode == NL80211_IFTYPE_AP)
+ rtl_cam_del_entry(hw, p_macaddr);
+ rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ ("The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]));
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("add one entry\n"));
+ if (is_pairwise) {
+ RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
+ "Pairwiase Key content :",
+ rtlpriv->sec.pairwise_key,
+ rtlpriv->
+ sec.key_len[PAIRWISE_KEYIDX]);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("set Pairwiase key\n"));
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->
+ sec.key_buf[key_index]);
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ ("set group key\n"));
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_cam_add_one_entry(hw,
+ rtlefuse->dev_addr,
+ PAIRWISE_KEYIDX,
+ CAM_PAIRWISE_KEY_POSITION,
+ enc_algo, CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[entry_id]);
+ }
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf
+ [entry_id]);
+ }
+ }
+ }
+}
+
+void rtl92de_suspend(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->rtlhal.macphyctl_reg = rtl_read_byte(rtlpriv,
+ REG_MAC_PHY_CTRL_NORMAL);
+}
+
+void rtl92de_resume(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_MAC_PHY_CTRL_NORMAL,
+ rtlpriv->rtlhal.macphyctl_reg);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
new file mode 100644
index 00000000000..ad44ffa520e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.h
@@ -0,0 +1,66 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92DE_HW_H__
+#define __RTL92DE_HW_H__
+
+void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92de_read_eeprom_info(struct ieee80211_hw *hw);
+void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
+ u32 *p_inta, u32 *p_intb);
+int rtl92de_hw_init(struct ieee80211_hw *hw);
+void rtl92de_card_disable(struct ieee80211_hw *hw);
+void rtl92de_enable_interrupt(struct ieee80211_hw *hw);
+void rtl92de_disable_interrupt(struct ieee80211_hw *hw);
+int rtl92de_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92de_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+void rtl92de_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl92de_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u8 rssi_level);
+void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+
+extern void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset,
+ u32 value, u8 direct);
+extern u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset,
+ u8 direct);
+void rtl92de_suspend(struct ieee80211_hw *hw);
+void rtl92de_resume(struct ieee80211_hw *hw);
+void rtl92d_linked_set_reg(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/rtlwifi/rtl8192de/led.c
new file mode 100644
index 00000000000..f1552f4df65
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/led.c
@@ -0,0 +1,159 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "reg.h"
+#include "led.h"
+
+static void _rtl92ce_init_led(struct ieee80211_hw *hw,
+ struct rtl_led *pled, enum rtl_led_pin ledpin)
+{
+ pled->hw = hw;
+ pled->ledpin = ledpin;
+ pled->ledon = false;
+}
+
+void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ u8 ledcfg;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+ if ((rtlpriv->efuse.eeprom_did == 0x8176) ||
+ (rtlpriv->efuse.eeprom_did == 0x8193))
+ /* BIT7 of REG_LEDCFG2 should be set to
+ * make sure we could emit the led2. */
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) |
+ BIT(7) | BIT(5) | BIT(6));
+ else
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) |
+ BIT(7) | BIT(5));
+ break;
+ case LED_PIN_LED1:
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ pled->ledon = true;
+}
+
+void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ u8 ledcfg;
+
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+ ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
+
+ ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+ switch (pled->ledpin) {
+ case LED_PIN_GPIO0:
+ break;
+ case LED_PIN_LED0:
+ ledcfg &= 0xf0;
+ if (pcipriv->ledctl.led_opendrain)
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(1) | BIT(5) | BIT(6)));
+ else
+ rtl_write_byte(rtlpriv, REG_LEDCFG2,
+ (ledcfg | BIT(3) | BIT(5) | BIT(6)));
+ break;
+ case LED_PIN_LED1:
+ ledcfg &= 0x0f;
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ pled->ledon = false;
+}
+
+void rtl92de_init_sw_leds(struct ieee80211_hw *hw)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
+ _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+static void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
+ enum led_ctl_mode ledaction)
+{
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+ switch (ledaction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ rtl92de_sw_led_on(hw, pLed0);
+ break;
+ case LED_CTL_POWER_OFF:
+ rtl92de_sw_led_off(hw, pLed0);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtl92de_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+ (ledaction == LED_CTL_TX ||
+ ledaction == LED_CTL_RX ||
+ ledaction == LED_CTL_SITE_SURVEY ||
+ ledaction == LED_CTL_LINK ||
+ ledaction == LED_CTL_NO_LINK ||
+ ledaction == LED_CTL_START_TO_LINK ||
+ ledaction == LED_CTL_POWER_ON)) {
+ return;
+ }
+ RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n", ledaction));
+
+ _rtl92ce_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.h b/drivers/net/wireless/rtlwifi/rtl8192de/led.h
new file mode 100644
index 00000000000..57f4a3c583d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/led.h
@@ -0,0 +1,38 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92CE_LED_H__
+#define __RTL92CE_LED_H__
+
+void rtl92de_init_sw_leds(struct ieee80211_hw *hw);
+void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl92de_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
new file mode 100644
index 00000000000..3ac7af1c550
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -0,0 +1,3831 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+#include "sw.h"
+#include "hw.h"
+
+#define MAX_RF_IMR_INDEX 12
+#define MAX_RF_IMR_INDEX_NORMAL 13
+#define RF_REG_NUM_FOR_C_CUT_5G 6
+#define RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA 7
+#define RF_REG_NUM_FOR_C_CUT_2G 5
+#define RF_CHNL_NUM_5G 19
+#define RF_CHNL_NUM_5G_40M 17
+#define TARGET_CHNL_NUM_5G 221
+#define TARGET_CHNL_NUM_2G 14
+#define CV_CURVE_CNT 64
+
+static u32 rf_reg_for_5g_swchnl_normal[MAX_RF_IMR_INDEX_NORMAL] = {
+ 0, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x0
+};
+
+static u8 rf_reg_for_c_cut_5g[RF_REG_NUM_FOR_C_CUT_5G] = {
+ RF_SYN_G1, RF_SYN_G2, RF_SYN_G3, RF_SYN_G4, RF_SYN_G5, RF_SYN_G6
+};
+
+static u8 rf_reg_for_c_cut_2g[RF_REG_NUM_FOR_C_CUT_2G] = {
+ RF_SYN_G1, RF_SYN_G2, RF_SYN_G3, RF_SYN_G7, RF_SYN_G8
+};
+
+static u8 rf_for_c_cut_5g_internal_pa[RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA] = {
+ 0x0B, 0x48, 0x49, 0x4B, 0x03, 0x04, 0x0E
+};
+
+static u32 rf_reg_mask_for_c_cut_2g[RF_REG_NUM_FOR_C_CUT_2G] = {
+ BIT(19) | BIT(18) | BIT(17) | BIT(14) | BIT(1),
+ BIT(10) | BIT(9),
+ BIT(18) | BIT(17) | BIT(16) | BIT(1),
+ BIT(2) | BIT(1),
+ BIT(15) | BIT(14) | BIT(13) | BIT(12) | BIT(11)
+};
+
+static u8 rf_chnl_5g[RF_CHNL_NUM_5G] = {
+ 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108,
+ 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static u8 rf_chnl_5g_40m[RF_CHNL_NUM_5G_40M] = {
+ 38, 42, 46, 50, 54, 58, 62, 102, 106, 110, 114,
+ 118, 122, 126, 130, 134, 138
+};
+static u32 rf_reg_pram_c_5g[5][RF_REG_NUM_FOR_C_CUT_5G] = {
+ {0xE43BE, 0xFC638, 0x77C0A, 0xDE471, 0xd7110, 0x8EB04},
+ {0xE43BE, 0xFC078, 0xF7C1A, 0xE0C71, 0xD7550, 0xAEB04},
+ {0xE43BF, 0xFF038, 0xF7C0A, 0xDE471, 0xE5550, 0xAEB04},
+ {0xE43BF, 0xFF079, 0xF7C1A, 0xDE471, 0xE5550, 0xAEB04},
+ {0xE43BF, 0xFF038, 0xF7C1A, 0xDE471, 0xd7550, 0xAEB04}
+};
+
+static u32 rf_reg_param_for_c_cut_2g[3][RF_REG_NUM_FOR_C_CUT_2G] = {
+ {0x643BC, 0xFC038, 0x77C1A, 0x41289, 0x01840},
+ {0x643BC, 0xFC038, 0x07C1A, 0x41289, 0x01840},
+ {0x243BC, 0xFC438, 0x07C1A, 0x4128B, 0x0FC41}
+};
+
+static u32 rf_syn_g4_for_c_cut_2g = 0xD1C31 & 0x7FF;
+
+static u32 rf_pram_c_5g_int_pa[3][RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA] = {
+ {0x01a00, 0x40443, 0x00eb5, 0x89bec, 0x94a12, 0x94a12, 0x94a12},
+ {0x01800, 0xc0443, 0x00730, 0x896ee, 0x94a52, 0x94a52, 0x94a52},
+ {0x01800, 0xc0443, 0x00730, 0x896ee, 0x94a12, 0x94a12, 0x94a12}
+};
+
+/* [mode][patha+b][reg] */
+static u32 rf_imr_param_normal[1][3][MAX_RF_IMR_INDEX_NORMAL] = {
+ {
+ /* channel 1-14. */
+ {
+ 0x70000, 0x00ff0, 0x4400f, 0x00ff0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x64888, 0xe266c, 0x00090, 0x22fff
+ },
+ /* path 36-64 */
+ {
+ 0x70000, 0x22880, 0x4470f, 0x55880, 0x00070, 0x88000,
+ 0x0, 0x88080, 0x70000, 0x64a82, 0xe466c, 0x00090,
+ 0x32c9a
+ },
+ /* 100 -165 */
+ {
+ 0x70000, 0x44880, 0x4477f, 0x77880, 0x00070, 0x88000,
+ 0x0, 0x880b0, 0x0, 0x64b82, 0xe466c, 0x00090, 0x32c9a
+ }
+ }
+};
+
+static u32 curveindex_5g[TARGET_CHNL_NUM_5G] = {0};
+
+static u32 curveindex_2g[TARGET_CHNL_NUM_2G] = {0};
+
+static u32 targetchnl_5g[TARGET_CHNL_NUM_5G] = {
+ 25141, 25116, 25091, 25066, 25041,
+ 25016, 24991, 24966, 24941, 24917,
+ 24892, 24867, 24843, 24818, 24794,
+ 24770, 24765, 24721, 24697, 24672,
+ 24648, 24624, 24600, 24576, 24552,
+ 24528, 24504, 24480, 24457, 24433,
+ 24409, 24385, 24362, 24338, 24315,
+ 24291, 24268, 24245, 24221, 24198,
+ 24175, 24151, 24128, 24105, 24082,
+ 24059, 24036, 24013, 23990, 23967,
+ 23945, 23922, 23899, 23876, 23854,
+ 23831, 23809, 23786, 23764, 23741,
+ 23719, 23697, 23674, 23652, 23630,
+ 23608, 23586, 23564, 23541, 23519,
+ 23498, 23476, 23454, 23432, 23410,
+ 23388, 23367, 23345, 23323, 23302,
+ 23280, 23259, 23237, 23216, 23194,
+ 23173, 23152, 23130, 23109, 23088,
+ 23067, 23046, 23025, 23003, 22982,
+ 22962, 22941, 22920, 22899, 22878,
+ 22857, 22837, 22816, 22795, 22775,
+ 22754, 22733, 22713, 22692, 22672,
+ 22652, 22631, 22611, 22591, 22570,
+ 22550, 22530, 22510, 22490, 22469,
+ 22449, 22429, 22409, 22390, 22370,
+ 22350, 22336, 22310, 22290, 22271,
+ 22251, 22231, 22212, 22192, 22173,
+ 22153, 22134, 22114, 22095, 22075,
+ 22056, 22037, 22017, 21998, 21979,
+ 21960, 21941, 21921, 21902, 21883,
+ 21864, 21845, 21826, 21807, 21789,
+ 21770, 21751, 21732, 21713, 21695,
+ 21676, 21657, 21639, 21620, 21602,
+ 21583, 21565, 21546, 21528, 21509,
+ 21491, 21473, 21454, 21436, 21418,
+ 21400, 21381, 21363, 21345, 21327,
+ 21309, 21291, 21273, 21255, 21237,
+ 21219, 21201, 21183, 21166, 21148,
+ 21130, 21112, 21095, 21077, 21059,
+ 21042, 21024, 21007, 20989, 20972,
+ 25679, 25653, 25627, 25601, 25575,
+ 25549, 25523, 25497, 25471, 25446,
+ 25420, 25394, 25369, 25343, 25318,
+ 25292, 25267, 25242, 25216, 25191,
+ 25166
+};
+
+/* channel 1~14 */
+static u32 targetchnl_2g[TARGET_CHNL_NUM_2G] = {
+ 26084, 26030, 25976, 25923, 25869, 25816, 25764,
+ 25711, 25658, 25606, 25554, 25502, 25451, 25328
+};
+
+static u32 _rtl92d_phy_calculate_bit_shift(u32 bitmask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((bitmask >> i) & 0x1) == 1)
+ break;
+ }
+
+ return i;
+}
+
+u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u32 returnvalue, originalvalue, bitshift;
+ u8 dbi_direct;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "bitmask(%#x)\n", regaddr, bitmask));
+ if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob) {
+ /* mac1 use phy0 read radio_b. */
+ /* mac0 use phy1 read radio_b. */
+ if (rtlhal->during_mac1init_radioa)
+ dbi_direct = BIT(3);
+ else if (rtlhal->during_mac0init_radiob)
+ dbi_direct = BIT(3) | BIT(2);
+ originalvalue = rtl92de_read_dword_dbi(hw, (u16)regaddr,
+ dbi_direct);
+ } else {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ }
+ bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
+ "Addr[0x%x]=0x%x\n", bitmask, regaddr, originalvalue));
+ return returnvalue;
+}
+
+void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 dbi_direct = 0;
+ u32 originalvalue, bitshift;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+ " data(%#x)\n", regaddr, bitmask, data));
+ if (rtlhal->during_mac1init_radioa)
+ dbi_direct = BIT(3);
+ else if (rtlhal->during_mac0init_radiob)
+ /* mac0 use phy1 write radio_b. */
+ dbi_direct = BIT(3) | BIT(2);
+ if (bitmask != BMASKDWORD) {
+ if (rtlhal->during_mac1init_radioa ||
+ rtlhal->during_mac0init_radiob)
+ originalvalue = rtl92de_read_dword_dbi(hw,
+ (u16) regaddr,
+ dbi_direct);
+ else
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ data = ((originalvalue & (~bitmask)) | (data << bitshift));
+ }
+ if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob)
+ rtl92de_write_dword_dbi(hw, (u16) regaddr, data, dbi_direct);
+ else
+ rtl_write_dword(rtlpriv, regaddr, data);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
+ " data(%#x)\n", regaddr, bitmask, data));
+}
+
+static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+ u32 newoffset;
+ u32 tmplong, tmplong2;
+ u8 rfpi_enable = 0;
+ u32 retvalue;
+
+ newoffset = offset;
+ tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD);
+ if (rfpath == RF90_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, BMASKDWORD);
+ tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+ (newoffset << 23) | BLSSIREADEDGE;
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD,
+ tmplong & (~BLSSIREADEDGE));
+ udelay(10);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, BMASKDWORD, tmplong2);
+ udelay(50);
+ udelay(50);
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD,
+ tmplong | BLSSIREADEDGE);
+ udelay(10);
+ if (rfpath == RF90_PATH_A)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ else if (rfpath == RF90_PATH_B)
+ rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+ BIT(8));
+ if (rfpi_enable)
+ retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
+ BLSSIREADBACKDATA);
+ else
+ retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
+ BLSSIREADBACKDATA);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x] = 0x%x\n",
+ rfpath, pphyreg->rflssi_readback, retvalue));
+ return retvalue;
+}
+
+static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 offset, u32 data)
+{
+ u32 data_and_addr;
+ u32 newoffset;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ newoffset = offset;
+ /* T65 RF */
+ data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+ rtl_set_bbreg(hw, pphyreg->rf3wire_offset, BMASKDWORD, data_and_addr);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr));
+}
+
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, readback_value, bitshift;
+ unsigned long flags;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask));
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
+ bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), rfpath(%#x), "
+ "bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value));
+ return readback_value;
+}
+
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 original_value, bitshift;
+ unsigned long flags;
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath));
+ if (bitmask == 0)
+ return;
+ spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ if (rtlphy->rf_mode != RF_OP_BY_FW) {
+ if (bitmask != BRFREGOFFSETMASK) {
+ original_value = _rtl92d_phy_rf_serial_read(hw,
+ rfpath, regaddr);
+ bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
+ data = ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+ _rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data);
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
+ "bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath));
+}
+
+bool rtl92d_phy_mac_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+ u32 arraylength;
+ u32 *ptrarray;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
+ arraylength = MAC_2T_ARRAYLENGTH;
+ ptrarray = rtl8192de_mac_2tarray;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Img:Rtl819XMAC_Array\n"));
+ for (i = 0; i < arraylength; i = i + 2)
+ rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) {
+ /* improve 2-stream TX EVM */
+ /* rtl_write_byte(rtlpriv, 0x14,0x71); */
+ /* AMPDU aggregation number 9 */
+ /* rtl_write_word(rtlpriv, REG_MAX_AGGR_NUM, MAX_AGGR_NUM); */
+ rtl_write_byte(rtlpriv, REG_MAX_AGGR_NUM, 0x0B);
+ } else {
+ /* 92D need to test to decide the num. */
+ rtl_write_byte(rtlpriv, REG_MAX_AGGR_NUM, 0x07);
+ }
+ return true;
+}
+
+static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ /* RF Interface Sowrtware Control */
+ /* 16 LSBs if read 32-bit from 0x870 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ /* 16 LSBs if read 32-bit from 0x874 */
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+ /* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */
+
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+ /* RF Interface Readback Value */
+ /* 16 LSBs if read 32-bit from 0x8E0 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ /* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ /* 16 LSBs if read 32-bit from 0x8E4 */
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+ /* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+
+ /* RF Interface Output (and Enable) */
+ /* 16 LSBs if read 32-bit from 0x860 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+ /* 16 LSBs if read 32-bit from 0x864 */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+ /* RF Interface (Output and) Enable */
+ /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+ /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+ /* Addr of LSSI. Wirte RF register by driver */
+ /* LSSI Parameter */
+ rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
+ RFPGA0_XA_LSSIPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
+ RFPGA0_XB_LSSIPARAMETER;
+
+ /* RF parameter */
+ /* BB Band Select */
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER;
+
+ /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+
+ /* Tranceiver A~D HSSI Parameter-1 */
+ /* wire control parameter1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
+ /* wire control parameter1 */
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
+
+ /* Tranceiver A~D HSSI Parameter-2 */
+ /* wire control parameter2 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
+ /* wire control parameter2 */
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
+
+ /* RF switch Control */
+ /* TR/Ant switch control */
+ rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
+ RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
+ RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
+ RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
+ RFPGA0_XCD_SWITCHCONTROL;
+
+ /* AGC control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
+
+ /* AGC control 2 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
+
+ /* RX AFE control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
+ ROFDM0_XARXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
+ ROFDM0_XBRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
+ ROFDM0_XCRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
+ ROFDM0_XDRXIQIMBALANCE;
+
+ /*RX AFE control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
+
+ /* Tx AFE control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
+ ROFDM0_XATxIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
+ ROFDM0_XBTxIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
+ ROFDM0_XCTxIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
+ ROFDM0_XDTxIQIMBALANCE;
+
+ /* Tx AFE control 2 */
+ rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATxAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTxAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTxAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTxAFE;
+
+ /* Tranceiver LSSI Readback SI mode */
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
+ RFPGA0_XA_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
+ RFPGA0_XB_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
+ RFPGA0_XC_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
+ RFPGA0_XD_LSSIREADBACK;
+
+ /* Tranceiver LSSI Readback PI mode */
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
+ TRANSCEIVERA_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
+ TRANSCEIVERB_HSPI_READBACK;
+}
+
+static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ int i;
+ u32 *phy_regarray_table;
+ u32 *agctab_array_table = NULL;
+ u32 *agctab_5garray_table;
+ u16 phy_reg_arraylen, agctab_arraylen = 0, agctab_5garraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+ /* Normal chip,Mac0 use AGC_TAB.txt for 2G and 5G band. */
+ if (rtlhal->interfaceindex == 0) {
+ agctab_arraylen = AGCTAB_ARRAYLENGTH;
+ agctab_array_table = rtl8192de_agctab_array;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ (" ===> phy:MAC0, Rtl819XAGCTAB_Array\n"));
+ } else {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ agctab_arraylen = AGCTAB_2G_ARRAYLENGTH;
+ agctab_array_table = rtl8192de_agctab_2garray;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ (" ===> phy:MAC1, Rtl819XAGCTAB_2GArray\n"));
+ } else {
+ agctab_5garraylen = AGCTAB_5G_ARRAYLENGTH;
+ agctab_5garray_table = rtl8192de_agctab_5garray;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ (" ===> phy:MAC1, Rtl819XAGCTAB_5GArray\n"));
+
+ }
+ }
+ phy_reg_arraylen = PHY_REG_2T_ARRAYLENGTH;
+ phy_regarray_table = rtl8192de_phy_reg_2tarray;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ (" ===> phy:Rtl819XPHY_REG_Array_PG\n"));
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_reg_arraylen; i = i + 2) {
+ if (phy_regarray_table[i] == 0xfe)
+ mdelay(50);
+ else if (phy_regarray_table[i] == 0xfd)
+ mdelay(5);
+ else if (phy_regarray_table[i] == 0xfc)
+ mdelay(1);
+ else if (phy_regarray_table[i] == 0xfb)
+ udelay(50);
+ else if (phy_regarray_table[i] == 0xfa)
+ udelay(5);
+ else if (phy_regarray_table[i] == 0xf9)
+ udelay(1);
+ rtl_set_bbreg(hw, phy_regarray_table[i], BMASKDWORD,
+ phy_regarray_table[i + 1]);
+ udelay(1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("The phy_regarray_table[0] is %x"
+ " Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]));
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ if (rtlhal->interfaceindex == 0) {
+ for (i = 0; i < agctab_arraylen; i = i + 2) {
+ rtl_set_bbreg(hw, agctab_array_table[i],
+ BMASKDWORD,
+ agctab_array_table[i + 1]);
+ /* Add 1us delay between BB/RF register
+ * setting. */
+ udelay(1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("The Rtl819XAGCTAB_Array_"
+ "Table[0] is %ul "
+ "Rtl819XPHY_REGArray[1] is %ul\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]));
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("Normal Chip, MAC0, load "
+ "Rtl819XAGCTAB_Array\n"));
+ } else {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ for (i = 0; i < agctab_arraylen; i = i + 2) {
+ rtl_set_bbreg(hw, agctab_array_table[i],
+ BMASKDWORD,
+ agctab_array_table[i + 1]);
+ /* Add 1us delay between BB/RF register
+ * setting. */
+ udelay(1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("The Rtl819XAGCTAB_Array_"
+ "Table[0] is %ul Rtl819XPHY_"
+ "REGArray[1] is %ul\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]));
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("Load Rtl819XAGCTAB_2GArray\n"));
+ } else {
+ for (i = 0; i < agctab_5garraylen; i = i + 2) {
+ rtl_set_bbreg(hw,
+ agctab_5garray_table[i],
+ BMASKDWORD,
+ agctab_5garray_table[i + 1]);
+ /* Add 1us delay between BB/RF registeri
+ * setting. */
+ udelay(1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("The Rtl819XAGCTAB_5GArray_"
+ "Table[0] is %ul Rtl819XPHY_"
+ "REGArray[1] is %ul\n",
+ agctab_5garray_table[i],
+ agctab_5garray_table[i + 1]));
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("Load Rtl819XAGCTAB_5GArray\n"));
+ }
+ }
+ }
+ return true;
+}
+
+static void _rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask,
+ u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ if (regaddr == RTXAGC_A_RATE18_06) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][0]));
+ }
+ if (regaddr == RTXAGC_A_RATE54_24) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][1]));
+ }
+ if (regaddr == RTXAGC_A_CCK1_MCS32) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][6]));
+ }
+ if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][7]));
+ }
+ if (regaddr == RTXAGC_A_MCS03_MCS00) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][2]));
+ }
+ if (regaddr == RTXAGC_A_MCS07_MCS04) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][3]));
+ }
+ if (regaddr == RTXAGC_A_MCS11_MCS08) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][4]));
+ }
+ if (regaddr == RTXAGC_A_MCS15_MCS12) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][5]));
+ }
+ if (regaddr == RTXAGC_B_RATE18_06) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][8]));
+ }
+ if (regaddr == RTXAGC_B_RATE54_24) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][9]));
+ }
+ if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][14]));
+ }
+ if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][15]));
+ }
+ if (regaddr == RTXAGC_B_MCS03_MCS00) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][10]));
+ }
+ if (regaddr == RTXAGC_B_MCS07_MCS04) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][11]));
+ }
+ if (regaddr == RTXAGC_B_MCS11_MCS08) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][12]));
+ }
+ if (regaddr == RTXAGC_B_MCS15_MCS12) {
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
+ data;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%ulx\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][13]));
+ rtlphy->pwrgroup_cnt++;
+ }
+}
+
+static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+ u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i;
+ u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+
+ phy_regarray_pg_len = PHY_REG_ARRAY_PG_LENGTH;
+ phy_regarray_table_pg = rtl8192de_phy_reg_array_pg;
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
+ if (phy_regarray_table_pg[i] == 0xfe)
+ mdelay(50);
+ else if (phy_regarray_table_pg[i] == 0xfd)
+ mdelay(5);
+ else if (phy_regarray_table_pg[i] == 0xfc)
+ mdelay(1);
+ else if (phy_regarray_table_pg[i] == 0xfb)
+ udelay(50);
+ else if (phy_regarray_table_pg[i] == 0xfa)
+ udelay(5);
+ else if (phy_regarray_table_pg[i] == 0xf9)
+ udelay(1);
+ _rtl92d_store_pwrindex_diffrate_offset(hw,
+ phy_regarray_table_pg[i],
+ phy_regarray_table_pg[i + 1],
+ phy_regarray_table_pg[i + 2]);
+ }
+ } else {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ ("configtype != BaseBand_Config_PHY_REG\n"));
+ }
+ return true;
+}
+
+static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ bool rtstatus = true;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
+ rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
+ return false;
+ }
+
+ /* if (rtlphy->rf_type == RF_1T2R) {
+ * _rtl92c_phy_bb_config_1t(hw);
+ * RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
+ *} */
+
+ if (rtlefuse->autoload_failflag == false) {
+ rtlphy->pwrgroup_cnt = 0;
+ rtstatus = _rtl92d_phy_config_bb_with_pgheaderfile(hw,
+ BASEBAND_CONFIG_PHY_REG);
+ }
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
+ return false;
+ }
+ rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_AGC_TAB);
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
+ return false;
+ }
+ rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2, 0x200));
+
+ return true;
+}
+
+bool rtl92d_phy_bb_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 regval;
+ u32 regvaldw;
+ u8 value;
+
+ _rtl92d_phy_init_bb_rf_register_definition(hw);
+ regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
+ regval | BIT(13) | BIT(0) | BIT(1));
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
+ /* 0x1f bit7 bit6 represent for mac0/mac1 driver ready */
+ value = rtl_read_byte(rtlpriv, REG_RF_CTRL);
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, value | RF_EN | RF_RSTB |
+ RF_SDMRSTB);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA |
+ FEN_DIO_PCIE | FEN_BB_GLB_RSTn | FEN_BBRSTB);
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
+ if (!(IS_92D_SINGLEPHY(rtlpriv->rtlhal.version))) {
+ regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
+ rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
+ }
+
+ return _rtl92d_phy_bb_config(hw);
+}
+
+bool rtl92d_phy_rf_config(struct ieee80211_hw *hw)
+{
+ return rtl92d_phy_rf6052_config(hw);
+}
+
+bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum rf_content content,
+ enum radio_path rfpath)
+{
+ int i;
+ u32 *radioa_array_table;
+ u32 *radiob_array_table;
+ u16 radioa_arraylen, radiob_arraylen;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ radioa_arraylen = RADIOA_2T_ARRAYLENGTH;
+ radioa_array_table = rtl8192de_radioa_2tarray;
+ radiob_arraylen = RADIOB_2T_ARRAYLENGTH;
+ radiob_array_table = rtl8192de_radiob_2tarray;
+ if (rtlpriv->efuse.internal_pa_5g[0]) {
+ radioa_arraylen = RADIOA_2T_INT_PA_ARRAYLENGTH;
+ radioa_array_table = rtl8192de_radioa_2t_int_paarray;
+ }
+ if (rtlpriv->efuse.internal_pa_5g[1]) {
+ radiob_arraylen = RADIOB_2T_INT_PA_ARRAYLENGTH;
+ radiob_array_table = rtl8192de_radiob_2t_int_paarray;
+ }
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("PHY_ConfigRFWithHeaderFile() "
+ "Radio_A:Rtl819XRadioA_1TArray\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("PHY_ConfigRFWithHeaderFile() "
+ "Radio_B:Rtl819XRadioB_1TArray\n"));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
+
+ /* this only happens when DMDP, mac0 start on 2.4G,
+ * mac1 start on 5G, mac 0 has to set phy0&phy1
+ * pathA or mac1 has to set phy0&phy1 pathA */
+ if ((content == radiob_txt) && (rfpath == RF90_PATH_A)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ (" ===> althougth Path A, we load radiob.txt\n"));
+ radioa_arraylen = radiob_arraylen;
+ radioa_array_table = radiob_array_table;
+ }
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen; i = i + 2) {
+ if (radioa_array_table[i] == 0xfe) {
+ mdelay(50);
+ } else if (radioa_array_table[i] == 0xfd) {
+ /* delay_ms(5); */
+ mdelay(5);
+ } else if (radioa_array_table[i] == 0xfc) {
+ /* delay_ms(1); */
+ mdelay(1);
+ } else if (radioa_array_table[i] == 0xfb) {
+ udelay(50);
+ } else if (radioa_array_table[i] == 0xfa) {
+ udelay(5);
+ } else if (radioa_array_table[i] == 0xf9) {
+ udelay(1);
+ } else {
+ rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
+ BRFREGOFFSETMASK,
+ radioa_array_table[i + 1]);
+ /* Add 1us delay between BB/RF register set. */
+ udelay(1);
+ }
+ }
+ break;
+ case RF90_PATH_B:
+ for (i = 0; i < radiob_arraylen; i = i + 2) {
+ if (radiob_array_table[i] == 0xfe) {
+ /* Delay specific ms. Only RF configuration
+ * requires delay. */
+ mdelay(50);
+ } else if (radiob_array_table[i] == 0xfd) {
+ /* delay_ms(5); */
+ mdelay(5);
+ } else if (radiob_array_table[i] == 0xfc) {
+ /* delay_ms(1); */
+ mdelay(1);
+ } else if (radiob_array_table[i] == 0xfb) {
+ udelay(50);
+ } else if (radiob_array_table[i] == 0xfa) {
+ udelay(5);
+ } else if (radiob_array_table[i] == 0xf9) {
+ udelay(1);
+ } else {
+ rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
+ BRFREGOFFSETMASK,
+ radiob_array_table[i + 1]);
+ /* Add 1us delay between BB/RF register set. */
+ udelay(1);
+ }
+ }
+ break;
+ case RF90_PATH_C:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ case RF90_PATH_D:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ return true;
+}
+
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->default_initialgain[0] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, BMASKBYTE0);
+ rtlphy->default_initialgain[1] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, BMASKBYTE0);
+ rtlphy->default_initialgain[2] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, BMASKBYTE0);
+ rtlphy->default_initialgain[3] =
+ (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, BMASKBYTE0);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Default initial gain (c50=0x%x, "
+ "c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]));
+ rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
+ BMASKBYTE0);
+ rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
+ BMASKDWORD);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync));
+}
+
+static void _rtl92d_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
+ u8 *cckpowerlevel, u8 *ofdmpowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 index = (channel - 1);
+
+ /* 1. CCK */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* RF-A */
+ cckpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
+ /* RF-B */
+ cckpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
+ } else {
+ cckpowerlevel[RF90_PATH_A] = 0;
+ cckpowerlevel[RF90_PATH_B] = 0;
+ }
+ /* 2. OFDM for 1S or 2S */
+ if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) {
+ /* Read HT 40 OFDM TX power */
+ ofdmpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
+ ofdmpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
+ } else if (rtlphy->rf_type == RF_2T2R) {
+ /* Read HT 40 OFDM TX power */
+ ofdmpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
+ ofdmpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
+ }
+}
+
+static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw,
+ u8 channel, u8 *cckpowerlevel, u8 *ofdmpowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
+ rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
+}
+
+static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
+{
+ u8 channel_5g[59] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
+ 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
+ 114, 116, 118, 120, 122, 124, 126, 128,
+ 130, 132, 134, 136, 138, 140, 149, 151,
+ 153, 155, 157, 159, 161, 163, 165
+ };
+ u8 place = chnl;
+
+ if (chnl > 14) {
+ for (place = 14; place < sizeof(channel_5g); place++) {
+ if (channel_5g[place] == chnl) {
+ place++;
+ break;
+ }
+ }
+ }
+ return place;
+}
+
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 cckpowerlevel[2], ofdmpowerlevel[2];
+
+ if (rtlefuse->txpwr_fromeprom == false)
+ return;
+ channel = _rtl92c_phy_get_rightchnlplace(channel);
+ _rtl92d_get_txpower_index(hw, channel, &cckpowerlevel[0],
+ &ofdmpowerlevel[0]);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ _rtl92d_ccxpower_index_check(hw, channel, &cckpowerlevel[0],
+ &ofdmpowerlevel[0]);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ rtl92d_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
+ rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
+}
+
+void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ enum io_type iotype;
+
+ if (!is_hal_stop(rtlhal)) {
+ switch (operation) {
+ case SCAN_OPT_BACKUP:
+ rtlhal->current_bandtypebackup =
+ rtlhal->current_bandtype;
+ iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ case SCAN_OPT_RESTORE:
+ iotype = IO_CMD_RESUME_DM_BY_SCAN;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
+ (u8 *)&iotype);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Unknown Scan Backup operation.\n"));
+ break;
+ }
+ }
+}
+
+void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ unsigned long flag = 0;
+ u8 reg_prsr_rsc;
+ u8 reg_bw_opmode;
+
+ if (rtlphy->set_bwmode_inprogress)
+ return;
+ if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("FALSE driver sleep or unload\n"));
+ return;
+ }
+ rtlphy->set_bwmode_inprogress = true;
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+ ("Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz"));
+ reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+ reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ reg_bw_opmode |= BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+
+ reg_prsr_rsc = (reg_prsr_rsc & 0x90) |
+ (mac->cur_40_prime_sc << 5);
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ break;
+ }
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+ /* SET BIT10 BIT11 for receive cck */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10) |
+ BIT(11), 3);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+ /* Set Control channel to upper or lower.
+ * These settings are required only for 40MHz */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCKSIDEBAND,
+ (mac->cur_40_prime_sc >> 1));
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ }
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+ /* SET BIT10 BIT11 for receive cck */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10) |
+ BIT(11), 0);
+ rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
+ (mac->cur_40_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
+ break;
+
+ }
+ rtl92d_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+ rtlphy->set_bwmode_inprogress = false;
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+}
+
+static void _rtl92d_phy_stop_trx_before_changeband(struct ieee80211_hw *hw)
+{
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x00);
+ rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x0);
+}
+
+static void rtl92d_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 value8;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("==>\n"));
+ rtlhal->bandset = band;
+ rtlhal->current_bandtype = band;
+ if (IS_92D_SINGLEPHY(rtlhal->version))
+ rtlhal->bandset = BAND_ON_BOTH;
+ /* stop RX/Tx */
+ _rtl92d_phy_stop_trx_before_changeband(hw);
+ /* reconfig BB/RF according to wireless mode */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* BB & RF Config */
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("====>2.4G\n"));
+ if (rtlhal->interfaceindex == 1)
+ _rtl92d_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_AGC_TAB);
+ } else {
+ /* 5G band */
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("====>5G\n"));
+ if (rtlhal->interfaceindex == 1)
+ _rtl92d_phy_config_bb_with_headerfile(hw,
+ BASEBAND_CONFIG_AGC_TAB);
+ }
+ rtl92d_update_bbrf_configuration(hw);
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+
+ /* 20M BW. */
+ /* rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1); */
+ rtlhal->reloadtxpowerindex = true;
+ /* notice fw know band status 0x81[1]/0x53[1] = 0: 5G, 1: 2G */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ value8 = rtl_read_byte(rtlpriv, (rtlhal->interfaceindex ==
+ 0 ? REG_MAC0 : REG_MAC1));
+ value8 |= BIT(1);
+ rtl_write_byte(rtlpriv, (rtlhal->interfaceindex ==
+ 0 ? REG_MAC0 : REG_MAC1), value8);
+ } else {
+ value8 = rtl_read_byte(rtlpriv, (rtlhal->interfaceindex ==
+ 0 ? REG_MAC0 : REG_MAC1));
+ value8 &= (~BIT(1));
+ rtl_write_byte(rtlpriv, (rtlhal->interfaceindex ==
+ 0 ? REG_MAC0 : REG_MAC1), value8);
+ }
+ mdelay(1);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("<==Switch Band OK.\n"));
+}
+
+static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
+ u8 channel, u8 rfpath)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 imr_num = MAX_RF_IMR_INDEX;
+ u32 rfmask = BRFREGOFFSETMASK;
+ u8 group, i;
+ unsigned long flag = 0;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>path %d\n", rfpath));
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>5G\n"));
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(25) | BIT(24), 0);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
+ /* fc area 0xd2c */
+ if (channel > 99)
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(13) |
+ BIT(14), 2);
+ else
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(13) |
+ BIT(14), 1);
+ /* leave 0 for channel1-14. */
+ group = channel <= 64 ? 1 : 2;
+ imr_num = MAX_RF_IMR_INDEX_NORMAL;
+ for (i = 0; i < imr_num; i++)
+ rtl_set_rfreg(hw, (enum radio_path)rfpath,
+ rf_reg_for_5g_swchnl_normal[i], rfmask,
+ rf_imr_param_normal[0][group][i]);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 1);
+ } else {
+ /* G band. */
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
+ ("Load RF IMR parameters for G band. IMR already "
+ "setting %d\n",
+ rtlpriv->rtlhal.load_imrandiqk_setting_for2g));
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>2.4G\n"));
+ if (!rtlpriv->rtlhal.load_imrandiqk_setting_for2g) {
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
+ ("Load RF IMR parameters "
+ "for G band. %d\n", rfpath));
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(25) | BIT(24), 0);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
+ 0x00f00000, 0xf);
+ imr_num = MAX_RF_IMR_INDEX_NORMAL;
+ for (i = 0; i < imr_num; i++) {
+ rtl_set_rfreg(hw, (enum radio_path)rfpath,
+ rf_reg_for_5g_swchnl_normal[i],
+ BRFREGOFFSETMASK,
+ rf_imr_param_normal[0][0][i]);
+ }
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
+ 0x00f00000, 0);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN | BCCKEN, 3);
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("<====\n"));
+}
+
+static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
+ u8 rfpath, u32 *pu4_regval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("====>\n"));
+ /*----Store original RFENV control type----*/
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ *pu4_regval = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ *pu4_regval =
+ rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16);
+ break;
+ }
+ /*----Set RF_ENV enable----*/
+ rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
+ /*----Set RF_ENV output high----*/
+ rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
+ /* Set bit number of Address and Data for RF register */
+ /* Set 1 to 4 bits for 8255 */
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDRESSLENGTH, 0x0);
+ udelay(1);
+ /*Set 0 to 12 bits for 8255 */
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("<====\n"));
+}
+
+static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
+ u32 *pu4_regval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("=====>\n"));
+ /*----Restore RFENV control type----*/ ;
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, *pu4_regval);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16,
+ *pu4_regval);
+ break;
+ }
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("<=====\n"));
+}
+
+static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u8 path = rtlhal->current_bandtype ==
+ BAND_ON_5G ? RF90_PATH_A : RF90_PATH_B;
+ u8 index = 0, i = 0, rfpath = RF90_PATH_A;
+ bool need_pwr_down = false, internal_pa = false;
+ u32 u4regvalue, mask = 0x1C000, value = 0, u4tmp, u4tmp2;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>\n"));
+ /* config path A for 5G */
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>5G\n"));
+ u4tmp = curveindex_5g[channel - 1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("ver 1 set RF-A, 5G, "
+ "0x28 = 0x%x !!\n", u4tmp));
+ for (i = 0; i < RF_CHNL_NUM_5G; i++) {
+ if (channel == rf_chnl_5g[i] && channel <= 140)
+ index = 0;
+ }
+ for (i = 0; i < RF_CHNL_NUM_5G_40M; i++) {
+ if (channel == rf_chnl_5g_40m[i] && channel <= 140)
+ index = 1;
+ }
+ if (channel == 149 || channel == 155 || channel == 161)
+ index = 2;
+ else if (channel == 151 || channel == 153 || channel == 163
+ || channel == 165)
+ index = 3;
+ else if (channel == 157 || channel == 159)
+ index = 4;
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY
+ && rtlhal->interfaceindex == 1) {
+ need_pwr_down = rtl92d_phy_enable_anotherphy(hw, false);
+ rtlhal->during_mac1init_radioa = true;
+ /* asume no this case */
+ if (need_pwr_down)
+ _rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
+ }
+ for (i = 0; i < RF_REG_NUM_FOR_C_CUT_5G; i++) {
+ if (i == 0 && (rtlhal->macphymode == DUALMAC_DUALPHY)) {
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ BRFREGOFFSETMASK, 0xE439D);
+ } else if (rf_reg_for_c_cut_5g[i] == RF_SYN_G4) {
+ u4tmp2 = (rf_reg_pram_c_5g[index][i] &
+ 0x7FF) | (u4tmp << 11);
+ if (channel == 36)
+ u4tmp2 &= ~(BIT(7) | BIT(6));
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ BRFREGOFFSETMASK, u4tmp2);
+ } else {
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ BRFREGOFFSETMASK,
+ rf_reg_pram_c_5g[index][i]);
+ }
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ ("offset 0x%x value 0x%x "
+ "path %d index %d readback 0x%x\n",
+ rf_reg_for_c_cut_5g[i],
+ rf_reg_pram_c_5g[index][i], path,
+ index, rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i], BRFREGOFFSETMASK)));
+ }
+ if (need_pwr_down)
+ _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+ if (rtlhal->during_mac1init_radioa)
+ rtl92d_phy_powerdown_anotherphy(hw, false);
+ if (channel < 149)
+ value = 0x07;
+ else if (channel >= 149)
+ value = 0x02;
+ if (channel >= 36 && channel <= 64)
+ index = 0;
+ else if (channel >= 100 && channel <= 140)
+ index = 1;
+ else
+ index = 2;
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 1) /* MAC 1 5G */
+ internal_pa = rtlpriv->efuse.internal_pa_5g[1];
+ else
+ internal_pa =
+ rtlpriv->efuse.internal_pa_5g[rfpath];
+ if (internal_pa) {
+ for (i = 0;
+ i < RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA;
+ i++) {
+ rtl_set_rfreg(hw, rfpath,
+ rf_for_c_cut_5g_internal_pa[i],
+ BRFREGOFFSETMASK,
+ rf_pram_c_5g_int_pa[index][i]);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
+ ("offset 0x%x value 0x%x "
+ "path %d index %d\n",
+ rf_for_c_cut_5g_internal_pa[i],
+ rf_pram_c_5g_int_pa[index][i],
+ rfpath, index));
+ }
+ } else {
+ rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
+ mask, value);
+ }
+ }
+ } else if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("====>2.4G\n"));
+ u4tmp = curveindex_2g[channel - 1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("ver 3 set RF-B, 2G, "
+ "0x28 = 0x%x !!\n", u4tmp));
+ if (channel == 1 || channel == 2 || channel == 4 || channel == 9
+ || channel == 10 || channel == 11 || channel == 12)
+ index = 0;
+ else if (channel == 3 || channel == 13 || channel == 14)
+ index = 1;
+ else if (channel >= 5 && channel <= 8)
+ index = 2;
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ path = RF90_PATH_A;
+ if (rtlhal->interfaceindex == 0) {
+ need_pwr_down =
+ rtl92d_phy_enable_anotherphy(hw, true);
+ rtlhal->during_mac0init_radiob = true;
+
+ if (need_pwr_down)
+ _rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
+ }
+ }
+ for (i = 0; i < RF_REG_NUM_FOR_C_CUT_2G; i++) {
+ if (rf_reg_for_c_cut_2g[i] == RF_SYN_G7)
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ BRFREGOFFSETMASK,
+ (rf_reg_param_for_c_cut_2g[index][i] |
+ BIT(17)));
+ else
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ BRFREGOFFSETMASK,
+ rf_reg_param_for_c_cut_2g
+ [index][i]);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+ ("offset 0x%x value 0x%x mak 0x%x path %d "
+ "index %d readback 0x%x\n",
+ rf_reg_for_c_cut_2g[i],
+ rf_reg_param_for_c_cut_2g[index][i],
+ rf_reg_mask_for_c_cut_2g[i], path, index,
+ rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ BRFREGOFFSETMASK)));
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
+ rf_syn_g4_for_c_cut_2g | (u4tmp << 11)));
+
+ rtl_set_rfreg(hw, (enum radio_path)path, RF_SYN_G4,
+ BRFREGOFFSETMASK,
+ rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
+ if (need_pwr_down)
+ _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+ if (rtlhal->during_mac0init_radiob)
+ rtl92d_phy_powerdown_anotherphy(hw, true);
+ }
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("<====\n"));
+}
+
+u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
+{
+ u8 channel_all[59] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
+ 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
+ 114, 116, 118, 120, 122, 124, 126, 128, 130,
+ 132, 134, 136, 138, 140, 149, 151, 153, 155,
+ 157, 159, 161, 163, 165
+ };
+ u8 place = chnl;
+
+ if (chnl > 14) {
+ for (place = 14; place < sizeof(channel_all); place++) {
+ if (channel_all[place] == chnl)
+ return place - 13;
+ }
+ }
+
+ return 0;
+}
+
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 1 /* ms */
+#define MAX_TOLERANCE_92D 3
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92d_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 regeac, rege94, rege9c, regea4;
+ u8 result = 0;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A IQK!\n"));
+ /* path-A IQK setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path-A IQK setting!\n"));
+ if (rtlhal->interfaceindex == 0) {
+ rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x10008c1f);
+ rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x10008c1f);
+ } else {
+ rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x10008c22);
+ }
+ rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x28160206);
+ /* path-B IQK setting */
+ if (configpathb) {
+ rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x28160206);
+ }
+ /* LO calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("LO calibration setting!\n"));
+ rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+ /* One shot, path A LOK & IQK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("One shot, path A LOK & IQK!\n"));
+ rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+ /* delay x ms */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Delay %d ms for One shot, path A LOK & IQK.\n",
+ IQK_DELAY_TIME));
+ mdelay(IQK_DELAY_TIME);
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeac = 0x%x\n", regeac));
+ rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xe94 = 0x%x\n", rege94));
+ rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xe9c = 0x%x\n", rege9c));
+ regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xea4 = 0x%x\n", regea4));
+ if (!(regeac & BIT(28)) && (((rege94 & 0x03FF0000) >> 16) != 0x142) &&
+ (((rege9c & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else /* if Tx not OK, ignore Rx */
+ return result;
+ /* if Tx is OK, check whether Rx is OK */
+ if (!(regeac & BIT(27)) && (((regea4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((regeac & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ else
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A Rx IQK fail!!\n"));
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
+ bool configpathb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 regeac, rege94, rege9c, regea4;
+ u8 result = 0;
+ u8 i;
+ u8 retrycount = 2;
+ u32 TxOKBit = BIT(28), RxOKBit = BIT(27);
+
+ if (rtlhal->interfaceindex == 1) { /* PHY1 */
+ TxOKBit = BIT(31);
+ RxOKBit = BIT(30);
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A IQK!\n"));
+ /* path-A IQK setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path-A IQK setting!\n"));
+ rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82140307);
+ rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x68160960);
+ /* path-B IQK setting */
+ if (configpathb) {
+ rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82110000);
+ rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68110000);
+ }
+ /* LO calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("LO calibration setting!\n"));
+ rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+ /* path-A PA on */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 0x07000f60);
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BMASKDWORD, 0x66e60e30);
+ for (i = 0; i < retrycount; i++) {
+ /* One shot, path A LOK & IQK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("One shot, path A LOK & IQK!\n"));
+ rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+ /* delay x ms */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Delay %d ms for One shot, path A LOK & IQK.\n",
+ IQK_DELAY_TIME));
+ mdelay(IQK_DELAY_TIME * 10);
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeac = 0x%x\n", regeac));
+ rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xe94 = 0x%x\n", rege94));
+ rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xe9c = 0x%x\n", rege9c));
+ regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xea4 = 0x%x\n", regea4));
+ if (!(regeac & TxOKBit) &&
+ (((rege94 & 0x03FF0000) >> 16) != 0x142)) {
+ result |= 0x01;
+ } else { /* if Tx not OK, ignore Rx */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path A Tx IQK fail!!\n"));
+ continue;
+ }
+
+ /* if Tx is OK, check whether Rx is OK */
+ if (!(regeac & RxOKBit) &&
+ (((regea4 & 0x03FF0000) >> 16) != 0x132)) {
+ result |= 0x02;
+ break;
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path A Rx IQK fail!!\n"));
+ }
+ }
+ /* path A PA off */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD,
+ rtlphy->iqk_bb_backup[0]);
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BMASKDWORD,
+ rtlphy->iqk_bb_backup[1]);
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92d_phy_pathb_iqk(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 regeac, regeb4, regebc, regec4, regecc;
+ u8 result = 0;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path B IQK!\n"));
+ /* One shot, path B LOK & IQK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("One shot, path A LOK & IQK!\n"));
+ rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000002);
+ rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000000);
+ /* delay x ms */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Delay %d ms for One shot, path B LOK & IQK.\n",
+ IQK_DELAY_TIME));
+ mdelay(IQK_DELAY_TIME);
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeac = 0x%x\n", regeac));
+ regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeb4 = 0x%x\n", regeb4));
+ regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xebc = 0x%x\n", regebc));
+ regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xec4 = 0x%x\n", regec4));
+ regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xecc = 0x%x\n", regecc));
+ if (!(regeac & BIT(31)) && (((regeb4 & 0x03FF0000) >> 16) != 0x142) &&
+ (((regebc & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+ if (!(regeac & BIT(30)) && (((regec4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((regecc & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ else
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path B Rx IQK fail!!\n"));
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 regeac, regeb4, regebc, regec4, regecc;
+ u8 result = 0;
+ u8 i;
+ u8 retrycount = 2;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path B IQK!\n"));
+ /* path-A IQK setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path-A IQK setting!\n"));
+ rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82110000);
+ rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x68110000);
+
+ /* path-B IQK setting */
+ rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82140307);
+ rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68160960);
+
+ /* LO calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("LO calibration setting!\n"));
+ rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+
+ /* path-B PA on */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 0x0f600700);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BMASKDWORD, 0x061f0d30);
+
+ for (i = 0; i < retrycount; i++) {
+ /* One shot, path B LOK & IQK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("One shot, path A LOK & IQK!\n"));
+ rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xfa000000);
+ rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+
+ /* delay x ms */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Delay %d ms for One shot, path B LOK & IQK.\n", 10));
+ mdelay(IQK_DELAY_TIME * 10);
+
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeac = 0x%x\n", regeac));
+ regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xeb4 = 0x%x\n", regeb4));
+ regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xebc = 0x%x\n", regebc));
+ regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xec4 = 0x%x\n", regec4));
+ regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xecc = 0x%x\n", regecc));
+ if (!(regeac & BIT(31)) &&
+ (((regeb4 & 0x03FF0000) >> 16) != 0x142))
+ result |= 0x01;
+ else
+ continue;
+ if (!(regeac & BIT(30)) &&
+ (((regec4 & 0x03FF0000) >> 16) != 0x132)) {
+ result |= 0x02;
+ break;
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path B Rx IQK fail!!\n"));
+ }
+ }
+
+ /* path B PA off */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD,
+ rtlphy->iqk_bb_backup[0]);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BMASKDWORD,
+ rtlphy->iqk_bb_backup[2]);
+ return result;
+}
+
+static void _rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw,
+ u32 *adda_reg, u32 *adda_backup,
+ u32 regnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Save ADDA parameters.\n"));
+ for (i = 0; i < regnum; i++)
+ adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], BMASKDWORD);
+}
+
+static void _rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Save MAC parameters.\n"));
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
+ macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
+}
+
+static void _rtl92d_phy_reload_adda_registers(struct ieee80211_hw *hw,
+ u32 *adda_reg, u32 *adda_backup,
+ u32 regnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Reload ADDA power saving parameters !\n"));
+ for (i = 0; i < regnum; i++)
+ rtl_set_bbreg(hw, adda_reg[i], BMASKDWORD, adda_backup[i]);
+}
+
+static void _rtl92d_phy_reload_mac_registers(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Reload MAC parameters !\n"));
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
+ rtl_write_byte(rtlpriv, macreg[i], macbackup[i]);
+}
+
+static void _rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
+ u32 *adda_reg, bool patha_on, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 pathon;
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("ADDA ON.\n"));
+ pathon = patha_on ? 0x04db25a4 : 0x0b1b25a4;
+ if (patha_on)
+ pathon = rtlpriv->rtlhal.interfaceindex == 0 ?
+ 0x04db25a4 : 0x0b1b25a4;
+ for (i = 0; i < IQK_ADDA_REG_NUM; i++)
+ rtl_set_bbreg(hw, adda_reg[i], BMASKDWORD, pathon);
+}
+
+static void _rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+ u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("MAC settings for Calibration.\n"));
+ rtl_write_byte(rtlpriv, macreg[0], 0x3F);
+
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i], (u8)(macbackup[i] &
+ (~BIT(3))));
+ rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
+}
+
+static void _rtl92d_phy_patha_standby(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path-A standby mode!\n"));
+
+ rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x0);
+ rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, BMASKDWORD, 0x00010000);
+ rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
+}
+
+static void _rtl92d_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 mode;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("BB Switch to %s mode!\n", (pi_mode ? "PI" : "SI")));
+ mode = pi_mode ? 0x01000100 : 0x01000000;
+ rtl_set_bbreg(hw, 0x820, BMASKDWORD, mode);
+ rtl_set_bbreg(hw, 0x828, BMASKDWORD, mode);
+}
+
+static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
+ u8 t, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u32 i;
+ u8 patha_ok, pathb_ok;
+ static u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ RFPGA0_XCD_SWITCHCONTROL, 0xe6c, 0xe70, 0xe74,
+ 0xe78, 0xe7c, 0xe80, 0xe84,
+ 0xe88, 0xe8c, 0xed0, 0xed4,
+ 0xed8, 0xedc, 0xee0, 0xeec
+ };
+ static u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ 0x522, 0x550, 0x551, 0x040
+ };
+ static u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
+ RFPGA0_XAB_RFINTERFACESW, RFPGA0_XA_RFINTERFACEOE,
+ RFPGA0_XB_RFINTERFACEOE, ROFDM0_TRMUXPAR,
+ RFPGA0_XCD_RFINTERFACESW, ROFDM0_TRXPATHENABLE,
+ RFPGA0_RFMOD, RFPGA0_ANALOGPARAMETER4,
+ ROFDM0_XAAGCCORE1, ROFDM0_XBAGCCORE1
+ };
+ const u32 retrycount = 2;
+ u32 bbvalue;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQK for 2.4G :Start!!!\n"));
+ if (t == 0) {
+ bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("==>0x%08x\n", bbvalue));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQ Calibration for %s\n",
+ (is2t ? "2T2R" : "1T1R")));
+
+ /* Save ADDA parameters, turn Path A ADDA on */
+ _rtl92d_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, IQK_ADDA_REG_NUM);
+ _rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup, IQK_BB_REG_NUM);
+ }
+ _rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
+ if (t == 0)
+ rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER1, BIT(8));
+
+ /* Switch BB to PI mode to do IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92d_phy_pimode_switch(hw, true);
+
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, BMASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BMASKDWORD, 0x22204000);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
+ if (is2t) {
+ rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, BMASKDWORD,
+ 0x00010000);
+ rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, BMASKDWORD,
+ 0x00010000);
+ }
+ /* MAC settings */
+ _rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ /* Page B init */
+ rtl_set_bbreg(hw, 0xb68, BMASKDWORD, 0x0f600000);
+ if (is2t)
+ rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000);
+ /* IQ calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQK setting!\n"));
+ rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x01007c00);
+ rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800);
+ for (i = 0; i < retrycount; i++) {
+ patha_ok = _rtl92d_phy_patha_iqk(hw, is2t);
+ if (patha_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path A IQK Success!!\n"));
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][2] = (rtl_get_bbreg(hw, 0xea4, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][3] = (rtl_get_bbreg(hw, 0xeac, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ break;
+ } else if (i == (retrycount - 1) && patha_ok == 0x01) {
+ /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path A IQK Only Tx Success!!\n"));
+
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ }
+ }
+ if (0x00 == patha_ok)
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A IQK failed!!\n"));
+ if (is2t) {
+ _rtl92d_phy_patha_standby(hw);
+ /* Turn Path B ADDA on */
+ _rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
+ for (i = 0; i < retrycount; i++) {
+ pathb_ok = _rtl92d_phy_pathb_iqk(hw);
+ if (pathb_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path B IQK Success!!\n"));
+ result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
+ BMASKDWORD) & 0x3FF0000) >> 16;
+ result[t][5] = (rtl_get_bbreg(hw, 0xebc,
+ BMASKDWORD) & 0x3FF0000) >> 16;
+ result[t][6] = (rtl_get_bbreg(hw, 0xec4,
+ BMASKDWORD) & 0x3FF0000) >> 16;
+ result[t][7] = (rtl_get_bbreg(hw, 0xecc,
+ BMASKDWORD) & 0x3FF0000) >> 16;
+ break;
+ } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
+ /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path B Only Tx IQK Success!!\n"));
+ result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
+ BMASKDWORD) & 0x3FF0000) >> 16;
+ result[t][5] = (rtl_get_bbreg(hw, 0xebc,
+ BMASKDWORD) & 0x3FF0000) >> 16;
+ }
+ }
+ if (0x00 == pathb_ok)
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path B IQK failed!!\n"));
+ }
+
+ /* Back to BB mode, load original value */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("IQK:Back to BB mode, load original value!\n"));
+
+ rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0);
+ if (t != 0) {
+ /* Switch back BB to SI mode after finish IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92d_phy_pimode_switch(hw, false);
+ /* Reload ADDA power saving parameters */
+ _rtl92d_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup, IQK_ADDA_REG_NUM);
+ /* Reload MAC parameters */
+ _rtl92d_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ if (is2t)
+ _rtl92d_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ else
+ _rtl92d_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM - 1);
+ /* load 0xe30 IQC default value */
+ rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x01008c00);
+ rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x01008c00);
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("<==\n"));
+}
+
+static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
+ long result[][8], u8 t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u8 patha_ok, pathb_ok;
+ static u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ RFPGA0_XCD_SWITCHCONTROL, 0xe6c, 0xe70, 0xe74,
+ 0xe78, 0xe7c, 0xe80, 0xe84,
+ 0xe88, 0xe8c, 0xed0, 0xed4,
+ 0xed8, 0xedc, 0xee0, 0xeec
+ };
+ static u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ 0x522, 0x550, 0x551, 0x040
+ };
+ static u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
+ RFPGA0_XAB_RFINTERFACESW, RFPGA0_XA_RFINTERFACEOE,
+ RFPGA0_XB_RFINTERFACEOE, ROFDM0_TRMUXPAR,
+ RFPGA0_XCD_RFINTERFACESW, ROFDM0_TRXPATHENABLE,
+ RFPGA0_RFMOD, RFPGA0_ANALOGPARAMETER4,
+ ROFDM0_XAAGCCORE1, ROFDM0_XBAGCCORE1
+ };
+ u32 bbvalue;
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
+
+ /* Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt */
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQK for 5G NORMAL:Start!!!\n"));
+ mdelay(IQK_DELAY_TIME * 20);
+ if (t == 0) {
+ bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("==>0x%08x\n", bbvalue));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQ Calibration for %s\n",
+ (is2t ? "2T2R" : "1T1R")));
+ /* Save ADDA parameters, turn Path A ADDA on */
+ _rtl92d_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+ _rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ if (is2t)
+ _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ else
+ _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM - 1);
+ }
+ _rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
+ /* MAC settings */
+ _rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ if (t == 0)
+ rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER1, BIT(8));
+ /* Switch BB to PI mode to do IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92d_phy_pimode_switch(hw, true);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, BMASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BMASKDWORD, 0x22208000);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
+
+ /* Page B init */
+ rtl_set_bbreg(hw, 0xb68, BMASKDWORD, 0x0f600000);
+ if (is2t)
+ rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000);
+ /* IQ calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("IQK setting!\n"));
+ rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
+ rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x10007c00);
+ rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800);
+ patha_ok = _rtl92d_phy_patha_iqk_5g_normal(hw, is2t);
+ if (patha_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A IQK Success!!\n"));
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][2] = (rtl_get_bbreg(hw, 0xea4, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][3] = (rtl_get_bbreg(hw, 0xeac, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ } else if (patha_ok == 0x01) { /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path A IQK Only Tx Success!!\n"));
+
+ result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path A IQK Fail!!\n"));
+ }
+ if (is2t) {
+ /* _rtl92d_phy_patha_standby(hw); */
+ /* Turn Path B ADDA on */
+ _rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
+ pathb_ok = _rtl92d_phy_pathb_iqk_5g_normal(hw);
+ if (pathb_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path B IQK Success!!\n"));
+ result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][6] = (rtl_get_bbreg(hw, 0xec4, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][7] = (rtl_get_bbreg(hw, 0xecc, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ } else if (pathb_ok == 0x01) { /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path B Only Tx IQK Success!!\n"));
+ result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) &
+ 0x3FF0000) >> 16;
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path B IQK failed!!\n"));
+ }
+ }
+
+ /* Back to BB mode, load original value */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("IQK:Back to BB mode, load original value!\n"));
+ rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0);
+ if (t != 0) {
+ if (is2t)
+ _rtl92d_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ else
+ _rtl92d_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM - 1);
+ /* Reload MAC parameters */
+ _rtl92d_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ /* Switch back BB to SI mode after finish IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92d_phy_pimode_switch(hw, false);
+ /* Reload ADDA power saving parameters */
+ _rtl92d_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("<==\n"));
+}
+
+static bool _rtl92d_phy_simularity_compare(struct ieee80211_hw *hw,
+ long result[][8], u8 c1, u8 c2)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u32 i, j, diff, sim_bitmap, bound;
+ u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */
+ bool bresult = true;
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
+
+ if (is2t)
+ bound = 8;
+ else
+ bound = 4;
+ sim_bitmap = 0;
+ for (i = 0; i < bound; i++) {
+ diff = (result[c1][i] > result[c2][i]) ? (result[c1][i] -
+ result[c2][i]) : (result[c2][i] - result[c1][i]);
+ if (diff > MAX_TOLERANCE_92D) {
+ if ((i == 2 || i == 6) && !sim_bitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ final_candidate[(i / 4)] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ final_candidate[(i / 4)] = c1;
+ else
+ sim_bitmap = sim_bitmap | (1 << i);
+ } else {
+ sim_bitmap = sim_bitmap | (1 << i);
+ }
+ }
+ }
+ if (sim_bitmap == 0) {
+ for (i = 0; i < (bound / 4); i++) {
+ if (final_candidate[i] != 0xFF) {
+ for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+ result[3][j] =
+ result[final_candidate[i]][j];
+ bresult = false;
+ }
+ }
+ return bresult;
+ }
+ if (!(sim_bitmap & 0x0F)) { /* path A OK */
+ for (i = 0; i < 4; i++)
+ result[3][i] = result[c1][i];
+ } else if (!(sim_bitmap & 0x03)) { /* path A, Tx OK */
+ for (i = 0; i < 2; i++)
+ result[3][i] = result[c1][i];
+ }
+ if (!(sim_bitmap & 0xF0) && is2t) { /* path B OK */
+ for (i = 4; i < 8; i++)
+ result[3][i] = result[c1][i];
+ } else if (!(sim_bitmap & 0x30)) { /* path B, Tx OK */
+ for (i = 4; i < 6; i++)
+ result[3][i] = result[c1][i];
+ }
+ return false;
+}
+
+static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok, long result[][8],
+ u8 final_candidate, bool txonly)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u32 oldval_0, val_x, tx0_a, reg;
+ long val_y, tx0_c;
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version) ||
+ rtlhal->macphymode == DUALMAC_DUALPHY;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("Path A IQ Calibration %s !\n",
+ (iqk_ok) ? "Success" : "Failed"));
+ if (final_candidate == 0xFF) {
+ return;
+ } else if (iqk_ok) {
+ oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
+ BMASKDWORD) >> 22) & 0x3FF; /* OFDM0_D */
+ val_x = result[final_candidate][0];
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+ tx0_a = (val_x * oldval_0) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("X = 0x%x, tx0_a = 0x%x,"
+ " oldval_0 0x%x\n", val_x, tx0_a, oldval_0));
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 0x3FF, tx0_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
+ ((val_x * oldval_0 >> 7) & 0x1));
+ val_y = result[final_candidate][1];
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ /* path B IQK result + 3 */
+ if (rtlhal->interfaceindex == 1 &&
+ rtlhal->current_bandtype == BAND_ON_5G)
+ val_y += 3;
+ tx0_c = (val_y * oldval_0) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Y = 0x%lx, tx0_c = 0x%lx\n",
+ val_y, tx0_c));
+ rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000,
+ ((tx0_c & 0x3C0) >> 6));
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 0x003F0000,
+ (tx0_c & 0x3F));
+ if (is2t)
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(26),
+ ((val_y * oldval_0 >> 7) & 0x1));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("0xC80 = 0x%x\n",
+ rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
+ BMASKDWORD)));
+ if (txonly) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("only Tx OK\n"));
+ return;
+ }
+ reg = result[final_candidate][2];
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][3] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
+ }
+}
+
+static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok, long result[][8], u8 final_candidate, bool txonly)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u32 oldval_1, val_x, tx1_a, reg;
+ long val_y, tx1_c;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Path B IQ Calibration %s !\n",
+ (iqk_ok) ? "Success" : "Failed"));
+ if (final_candidate == 0xFF) {
+ return;
+ } else if (iqk_ok) {
+ oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
+ BMASKDWORD) >> 22) & 0x3FF;
+ val_x = result[final_candidate][4];
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+ tx1_a = (val_x * oldval_1) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("X = 0x%x, tx1_a = 0x%x\n",
+ val_x, tx1_a));
+ rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, 0x3FF, tx1_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28),
+ ((val_x * oldval_1 >> 7) & 0x1));
+ val_y = result[final_candidate][5];
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ val_y += 3;
+ tx1_c = (val_y * oldval_1) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("Y = 0x%lx, tx1_c = 0x%lx\n",
+ val_y, tx1_c));
+ rtl_set_bbreg(hw, ROFDM0_XDTxAFE, 0xF0000000,
+ ((tx1_c & 0x3C0) >> 6));
+ rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, 0x003F0000,
+ (tx1_c & 0x3F));
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30),
+ ((val_y * oldval_1 >> 7) & 0x1));
+ if (txonly)
+ return;
+ reg = result[final_candidate][6];
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][7] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][7] >> 6) & 0xF;
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
+ }
+}
+
+void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ long result[4][8];
+ u8 i, final_candidate, indexforchannel;
+ bool patha_ok, pathb_ok;
+ long rege94, rege9c, regea4, regeac, regeb4;
+ long regebc, regec4, regecc, regtmp = 0;
+ bool is12simular, is13simular, is23simular;
+ unsigned long flag = 0;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("IQK:Start!!!channel %d\n", rtlphy->current_channel));
+ for (i = 0; i < 8; i++) {
+ result[0][i] = 0;
+ result[1][i] = 0;
+ result[2][i] = 0;
+ result[3][i] = 0;
+ }
+ final_candidate = 0xff;
+ patha_ok = false;
+ pathb_ok = false;
+ is12simular = false;
+ is23simular = false;
+ is13simular = false;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("IQK !!!currentband %d\n", rtlhal->current_bandtype));
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ for (i = 0; i < 3; i++) {
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ _rtl92d_phy_iq_calibrate_5g_normal(hw, result, i);
+ } else if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (IS_92D_SINGLEPHY(rtlhal->version))
+ _rtl92d_phy_iq_calibrate(hw, result, i, true);
+ else
+ _rtl92d_phy_iq_calibrate(hw, result, i, false);
+ }
+ if (i == 1) {
+ is12simular = _rtl92d_phy_simularity_compare(hw, result,
+ 0, 1);
+ if (is12simular) {
+ final_candidate = 0;
+ break;
+ }
+ }
+ if (i == 2) {
+ is13simular = _rtl92d_phy_simularity_compare(hw, result,
+ 0, 2);
+ if (is13simular) {
+ final_candidate = 0;
+ break;
+ }
+ is23simular = _rtl92d_phy_simularity_compare(hw, result,
+ 1, 2);
+ if (is23simular) {
+ final_candidate = 1;
+ } else {
+ for (i = 0; i < 8; i++)
+ regtmp += result[3][i];
+
+ if (regtmp != 0)
+ final_candidate = 3;
+ else
+ final_candidate = 0xFF;
+ }
+ }
+ }
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ for (i = 0; i < 4; i++) {
+ rege94 = result[i][0];
+ rege9c = result[i][1];
+ regea4 = result[i][2];
+ regeac = result[i][3];
+ regeb4 = result[i][4];
+ regebc = result[i][5];
+ regec4 = result[i][6];
+ regecc = result[i][7];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx "
+ "regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n ",
+ rege94, rege9c, regea4, regeac, regeb4, regebc, regec4,
+ regecc));
+ }
+ if (final_candidate != 0xff) {
+ rtlphy->reg_e94 = rege94 = result[final_candidate][0];
+ rtlphy->reg_e9c = rege9c = result[final_candidate][1];
+ regea4 = result[final_candidate][2];
+ regeac = result[final_candidate][3];
+ rtlphy->reg_eb4 = regeb4 = result[final_candidate][4];
+ rtlphy->reg_ebc = regebc = result[final_candidate][5];
+ regec4 = result[final_candidate][6];
+ regecc = result[final_candidate][7];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("IQK: final_candidate is %x\n", final_candidate));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx "
+ "regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n ",
+ rege94, rege9c, regea4, regeac, regeb4, regebc, regec4,
+ regecc));
+ patha_ok = pathb_ok = true;
+ } else {
+ rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100; /* X default value */
+ rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0; /* Y default value */
+ }
+ if ((rege94 != 0) /*&&(regea4 != 0) */)
+ _rtl92d_phy_patha_fill_iqk_matrix(hw, patha_ok, result,
+ final_candidate, (regea4 == 0));
+ if (IS_92D_SINGLEPHY(rtlhal->version)) {
+ if ((regeb4 != 0) /*&&(regec4 != 0) */)
+ _rtl92d_phy_pathb_fill_iqk_matrix(hw, pathb_ok, result,
+ final_candidate, (regec4 == 0));
+ }
+ if (final_candidate != 0xFF) {
+ indexforchannel = rtl92d_get_rightchnlplace_for_iqk(
+ rtlphy->current_channel);
+
+ for (i = 0; i < IQK_MATRIX_REG_NUM; i++)
+ rtlphy->iqk_matrix_regsetting[indexforchannel].
+ value[0][i] = result[final_candidate][i];
+ rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done =
+ true;
+
+ RT_TRACE(rtlpriv, COMP_SCAN | COMP_MLME, DBG_LOUD,
+ ("\nIQK OK indexforchannel %d.\n", indexforchannel));
+ }
+}
+
+void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u8 indexforchannel;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("channel %d\n", channel));
+ /*------Do IQK for normal chip and test chip 5G band------- */
+ indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+ ("indexforchannel %d done %d\n", indexforchannel,
+ rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done));
+ if (0 && !rtlphy->iqk_matrix_regsetting[indexforchannel].iqk_done &&
+ rtlphy->need_iqk) {
+ /* Re Do IQK. */
+ RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_LOUD,
+ ("Do IQK Matrix reg for channel:%d....\n", channel));
+ rtl92d_phy_iq_calibrate(hw);
+ } else {
+ /* Just load the value. */
+ /* 2G band just load once. */
+ if (((!rtlhal->load_imrandiqk_setting_for2g) &&
+ indexforchannel == 0) || indexforchannel > 0) {
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
+ ("Just Read IQK Matrix reg for channel:%d"
+ "....\n", channel));
+ if ((rtlphy->iqk_matrix_regsetting[indexforchannel].
+ value[0] != NULL)
+ /*&&(regea4 != 0) */)
+ _rtl92d_phy_patha_fill_iqk_matrix(hw, true,
+ rtlphy->iqk_matrix_regsetting[
+ indexforchannel].value, 0,
+ (rtlphy->iqk_matrix_regsetting[
+ indexforchannel].value[0][2] == 0));
+ if (IS_92D_SINGLEPHY(rtlhal->version)) {
+ if ((rtlphy->iqk_matrix_regsetting[
+ indexforchannel].value[0][4] != 0)
+ /*&&(regec4 != 0) */)
+ _rtl92d_phy_pathb_fill_iqk_matrix(hw,
+ true,
+ rtlphy->iqk_matrix_regsetting[
+ indexforchannel].value, 0,
+ (rtlphy->iqk_matrix_regsetting[
+ indexforchannel].value[0][6]
+ == 0));
+ }
+ }
+ }
+ rtlphy->need_iqk = false;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("<====\n"));
+}
+
+static u32 _rtl92d_phy_get_abs(u32 val1, u32 val2)
+{
+ u32 ret;
+
+ if (val1 >= val2)
+ ret = val1 - val2;
+ else
+ ret = val2 - val1;
+ return ret;
+}
+
+static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel)
+{
+
+ int i;
+ u8 channel_5g[45] = {
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
+ 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
+ 114, 116, 118, 120, 122, 124, 126, 128, 130, 132,
+ 134, 136, 138, 140, 149, 151, 153, 155, 157, 159,
+ 161, 163, 165
+ };
+
+ for (i = 0; i < sizeof(channel_5g); i++)
+ if (channel == channel_5g[i])
+ return true;
+ return false;
+}
+
+static void _rtl92d_phy_calc_curvindex(struct ieee80211_hw *hw,
+ u32 *targetchnl, u32 * curvecount_val,
+ bool is5g, u32 *curveindex)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 smallest_abs_val = 0xffffffff, u4tmp;
+ u8 i, j;
+ u8 chnl_num = is5g ? TARGET_CHNL_NUM_5G : TARGET_CHNL_NUM_2G;
+
+ for (i = 0; i < chnl_num; i++) {
+ if (is5g && !_rtl92d_is_legal_5g_channel(hw, i + 1))
+ continue;
+ curveindex[i] = 0;
+ for (j = 0; j < (CV_CURVE_CNT * 2); j++) {
+ u4tmp = _rtl92d_phy_get_abs(targetchnl[i],
+ curvecount_val[j]);
+
+ if (u4tmp < smallest_abs_val) {
+ curveindex[i] = j;
+ smallest_abs_val = u4tmp;
+ }
+ }
+ smallest_abs_val = 0xffffffff;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("curveindex[%d] = %x\n", i,
+ curveindex[i]));
+ }
+}
+
+static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
+ u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 erfpath = rtlpriv->rtlhal.current_bandtype ==
+ BAND_ON_5G ? RF90_PATH_A :
+ IS_92D_SINGLEPHY(rtlpriv->rtlhal.version) ?
+ RF90_PATH_B : RF90_PATH_A;
+ u32 u4tmp = 0, u4regvalue = 0;
+ bool bneed_powerdown_radio = false;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("path %d\n", erfpath));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("band type = %d\n",
+ rtlpriv->rtlhal.current_bandtype));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("channel = %d\n", channel));
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {/* Path-A for 5G */
+ u4tmp = curveindex_5g[channel-1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("ver 1 set RF-A, 5G, 0x28 = 0x%ulx !!\n", u4tmp));
+ if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY &&
+ rtlpriv->rtlhal.interfaceindex == 1) {
+ bneed_powerdown_radio =
+ rtl92d_phy_enable_anotherphy(hw, false);
+ rtlpriv->rtlhal.during_mac1init_radioa = true;
+ /* asume no this case */
+ if (bneed_powerdown_radio)
+ _rtl92d_phy_enable_rf_env(hw, erfpath,
+ &u4regvalue);
+ }
+ rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp);
+ if (bneed_powerdown_radio)
+ _rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
+ if (rtlpriv->rtlhal.during_mac1init_radioa)
+ rtl92d_phy_powerdown_anotherphy(hw, false);
+ } else if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) {
+ u4tmp = curveindex_2g[channel-1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("ver 3 set RF-B, 2G, 0x28 = 0x%ulx !!\n", u4tmp));
+ if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY &&
+ rtlpriv->rtlhal.interfaceindex == 0) {
+ bneed_powerdown_radio =
+ rtl92d_phy_enable_anotherphy(hw, true);
+ rtlpriv->rtlhal.during_mac0init_radiob = true;
+ if (bneed_powerdown_radio)
+ _rtl92d_phy_enable_rf_env(hw, erfpath,
+ &u4regvalue);
+ }
+ rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("ver 3 set RF-B, 2G, 0x28 = 0x%ulx !!\n",
+ rtl_get_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800)));
+ if (bneed_powerdown_radio)
+ _rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
+ if (rtlpriv->rtlhal.during_mac0init_radiob)
+ rtl92d_phy_powerdown_anotherphy(hw, true);
+ }
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("<====\n"));
+}
+
+static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 tmpreg, index, rf_mode[2];
+ u8 path = is2t ? 2 : 1;
+ u8 i;
+ u32 u4tmp, offset;
+ u32 curvecount_val[CV_CURVE_CNT * 2] = {0};
+ u16 timeout = 800, timecount = 0;
+
+ /* Check continuous TX and Packet TX */
+ tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+ /* if Deal with contisuous TX case, disable all continuous TX */
+ /* if Deal with Packet TX case, block all queues */
+ if ((tmpreg & 0x70) != 0)
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+ else
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xF00000, 0x0F);
+ for (index = 0; index < path; index++) {
+ /* 1. Read original RF mode */
+ offset = index == 0 ? ROFDM0_XAAGCCORE1 : ROFDM0_XBAGCCORE1;
+ rf_mode[index] = rtl_read_byte(rtlpriv, offset);
+ /* 2. Set RF mode = standby mode */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_AC,
+ BRFREGOFFSETMASK, 0x010000);
+ if (rtlpci->init_ready) {
+ /* switch CV-curve control by LC-calibration */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
+ BIT(17), 0x0);
+ /* 4. Set LC calibration begin */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW,
+ 0x08000, 0x01);
+ }
+ u4tmp = rtl_get_rfreg(hw, (enum radio_path)index, RF_SYN_G6,
+ BRFREGOFFSETMASK);
+ while ((!(u4tmp & BIT(11))) && timecount <= timeout) {
+ mdelay(50);
+ timecount += 50;
+ u4tmp = rtl_get_rfreg(hw, (enum radio_path)index,
+ RF_SYN_G6, BRFREGOFFSETMASK);
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("PHY_LCK finish delay for %d ms=2\n", timecount));
+ u4tmp = rtl_get_rfreg(hw, index, RF_SYN_G4, BRFREGOFFSETMASK);
+ if (index == 0 && rtlhal->interfaceindex == 0) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("path-A / 5G LCK\n"));
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("path-B / 2.4G LCK\n"));
+ }
+ memset(&curvecount_val[0], 0, CV_CURVE_CNT * 2);
+ /* Set LC calibration off */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW,
+ 0x08000, 0x0);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("set RF 0x18[15] = 0\n"));
+ /* save Curve-counting number */
+ for (i = 0; i < CV_CURVE_CNT; i++) {
+ u32 readval = 0, readval2 = 0;
+ rtl_set_rfreg(hw, (enum radio_path)index, 0x3F,
+ 0x7f, i);
+
+ rtl_set_rfreg(hw, (enum radio_path)index, 0x4D,
+ BRFREGOFFSETMASK, 0x0);
+ readval = rtl_get_rfreg(hw, (enum radio_path)index,
+ 0x4F, BRFREGOFFSETMASK);
+ curvecount_val[2 * i + 1] = (readval & 0xfffe0) >> 5;
+ /* reg 0x4f [4:0] */
+ /* reg 0x50 [19:10] */
+ readval2 = rtl_get_rfreg(hw, (enum radio_path)index,
+ 0x50, 0xffc00);
+ curvecount_val[2 * i] = (((readval & 0x1F) << 10) |
+ readval2);
+ }
+ if (index == 0 && rtlhal->interfaceindex == 0)
+ _rtl92d_phy_calc_curvindex(hw, targetchnl_5g,
+ curvecount_val,
+ true, curveindex_5g);
+ else
+ _rtl92d_phy_calc_curvindex(hw, targetchnl_2g,
+ curvecount_val,
+ false, curveindex_2g);
+ /* switch CV-curve control mode */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
+ BIT(17), 0x1);
+ }
+
+ /* Restore original situation */
+ for (index = 0; index < path; index++) {
+ offset = index == 0 ? ROFDM0_XAAGCCORE1 : ROFDM0_XBAGCCORE1;
+ rtl_write_byte(rtlpriv, offset, 0x50);
+ rtl_write_byte(rtlpriv, offset, rf_mode[index]);
+ }
+ if ((tmpreg & 0x70) != 0)
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+ else /*Deal with Packet TX case */
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xF00000, 0x00);
+ _rtl92d_phy_reload_lck_setting(hw, rtlpriv->phy.current_channel);
+}
+
+static void _rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("cosa PHY_LCK ver=2\n"));
+ _rtl92d_phy_lc_calibrate_sw(hw, is2t);
+}
+
+void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u32 timeout = 2000, timecount = 0;
+
+ while (rtlpriv->mac80211.act_scanning && timecount < timeout) {
+ udelay(50);
+ timecount += 50;
+ }
+
+ rtlphy->lck_inprogress = true;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ ("LCK:Start!!! currentband %x delay %d ms\n",
+ rtlhal->current_bandtype, timecount));
+ if (IS_92D_SINGLEPHY(rtlhal->version)) {
+ _rtl92d_phy_lc_calibrate(hw, true);
+ } else {
+ /* For 1T1R */
+ _rtl92d_phy_lc_calibrate(hw, false);
+ }
+ rtlphy->lck_inprogress = false;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, ("LCK:Finish!!!\n"));
+}
+
+void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+{
+ return;
+}
+
+static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+ u32 cmdtableidx, u32 cmdtablesz, enum swchnlcmd_id cmdid,
+ u32 para1, u32 para2, u32 msdelay)
+{
+ struct swchnlcmd *pcmd;
+
+ if (cmdtable == NULL) {
+ RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
+ return false;
+ }
+ if (cmdtableidx >= cmdtablesz)
+ return false;
+
+ pcmd = cmdtable + cmdtableidx;
+ pcmd->cmdid = cmdid;
+ pcmd->para1 = para1;
+ pcmd->para2 = para2;
+ pcmd->msdelay = msdelay;
+ return true;
+}
+
+void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 i;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("settings regs %d default regs %d\n",
+ (int)(sizeof(rtlphy->iqk_matrix_regsetting) /
+ sizeof(struct iqk_matrix_regs)),
+ IQK_MATRIX_REG_NUM));
+ /* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */
+ for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
+ rtlphy->iqk_matrix_regsetting[i].value[0][0] = 0x100;
+ rtlphy->iqk_matrix_regsetting[i].value[0][2] = 0x100;
+ rtlphy->iqk_matrix_regsetting[i].value[0][4] = 0x100;
+ rtlphy->iqk_matrix_regsetting[i].value[0][6] = 0x100;
+ rtlphy->iqk_matrix_regsetting[i].value[0][1] = 0x0;
+ rtlphy->iqk_matrix_regsetting[i].value[0][3] = 0x0;
+ rtlphy->iqk_matrix_regsetting[i].value[0][5] = 0x0;
+ rtlphy->iqk_matrix_regsetting[i].value[0][7] = 0x0;
+ rtlphy->iqk_matrix_regsetting[i].iqk_done = false;
+ }
+}
+
+static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
+ u8 channel, u8 *stage, u8 *step,
+ u32 *delay)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
+ u32 precommoncmdcnt;
+ struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
+ u32 postcommoncmdcnt;
+ struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
+ u32 rfdependcmdcnt;
+ struct swchnlcmd *currentcmd = NULL;
+ u8 rfpath;
+ u8 num_total_rfpath = rtlphy->num_total_rfpath;
+
+ precommoncmdcnt = 0;
+ _rtl92d_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT,
+ CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
+ _rtl92d_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+ MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+ postcommoncmdcnt = 0;
+ _rtl92d_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+ MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
+ rfdependcmdcnt = 0;
+ _rtl92d_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
+ RF_CHNLBW, channel, 0);
+ _rtl92d_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+ MAX_RFDEPENDCMD_CNT, CMDID_END,
+ 0, 0, 0);
+
+ do {
+ switch (*stage) {
+ case 0:
+ currentcmd = &precommoncmd[*step];
+ break;
+ case 1:
+ currentcmd = &rfdependcmd[*step];
+ break;
+ case 2:
+ currentcmd = &postcommoncmd[*step];
+ break;
+ }
+ if (currentcmd->cmdid == CMDID_END) {
+ if ((*stage) == 2) {
+ return true;
+ } else {
+ (*stage)++;
+ (*step) = 0;
+ continue;
+ }
+ }
+ switch (currentcmd->cmdid) {
+ case CMDID_SET_TXPOWEROWER_LEVEL:
+ rtl92d_phy_set_txpower_level(hw, channel);
+ break;
+ case CMDID_WRITEPORT_ULONG:
+ rtl_write_dword(rtlpriv, currentcmd->para1,
+ currentcmd->para2);
+ break;
+ case CMDID_WRITEPORT_USHORT:
+ rtl_write_word(rtlpriv, currentcmd->para1,
+ (u16)currentcmd->para2);
+ break;
+ case CMDID_WRITEPORT_UCHAR:
+ rtl_write_byte(rtlpriv, currentcmd->para1,
+ (u8)currentcmd->para2);
+ break;
+ case CMDID_RF_WRITEREG:
+ for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] =
+ ((rtlphy->rfreg_chnlval[rfpath] &
+ 0xffffff00) | currentcmd->para2);
+ if (rtlpriv->rtlhal.current_bandtype ==
+ BAND_ON_5G) {
+ if (currentcmd->para2 > 99)
+ rtlphy->rfreg_chnlval[rfpath] =
+ rtlphy->rfreg_chnlval
+ [rfpath] | (BIT(18));
+ else
+ rtlphy->rfreg_chnlval[rfpath] =
+ rtlphy->rfreg_chnlval
+ [rfpath] & (~BIT(18));
+ rtlphy->rfreg_chnlval[rfpath] |=
+ (BIT(16) | BIT(8));
+ } else {
+ rtlphy->rfreg_chnlval[rfpath] &=
+ ~(BIT(8) | BIT(16) | BIT(18));
+ }
+ rtl_set_rfreg(hw, (enum radio_path)rfpath,
+ currentcmd->para1,
+ BRFREGOFFSETMASK,
+ rtlphy->rfreg_chnlval[rfpath]);
+ _rtl92d_phy_reload_imr_setting(hw, channel,
+ rfpath);
+ }
+ _rtl92d_phy_switch_rf_setting(hw, channel);
+ /* do IQK when all parameters are ready */
+ rtl92d_phy_reload_iqk_setting(hw, channel);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ break;
+ } while (true);
+ (*delay) = currentcmd->msdelay;
+ (*step)++;
+ return false;
+}
+
+u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 delay;
+ u32 timeout = 1000, timecount = 0;
+ u8 channel = rtlphy->current_channel;
+ u32 ret_value;
+
+ if (rtlphy->sw_chnl_inprogress)
+ return 0;
+ if (rtlphy->set_bwmode_inprogress)
+ return 0;
+
+ if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
+ RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+ ("sw_chnl_inprogress false driver sleep or unload\n"));
+ return 0;
+ }
+ while (rtlphy->lck_inprogress && timecount < timeout) {
+ mdelay(50);
+ timecount += 50;
+ }
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY &&
+ rtlhal->bandset == BAND_ON_BOTH) {
+ ret_value = rtl_get_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
+ BMASKDWORD);
+ if (rtlphy->current_channel > 14 && !(ret_value & BIT(0)))
+ rtl92d_phy_switch_wirelessband(hw, BAND_ON_5G);
+ else if (rtlphy->current_channel <= 14 && (ret_value & BIT(0)))
+ rtl92d_phy_switch_wirelessband(hw, BAND_ON_2_4G);
+ }
+ switch (rtlhal->current_bandtype) {
+ case BAND_ON_5G:
+ /* Get first channel error when change between
+ * 5G and 2.4G band. */
+ if (channel <= 14)
+ return 0;
+ RT_ASSERT((channel > 14), ("5G but channel<=14"));
+ break;
+ case BAND_ON_2_4G:
+ /* Get first channel error when change between
+ * 5G and 2.4G band. */
+ if (channel > 14)
+ return 0;
+ RT_ASSERT((channel <= 14), ("2G but channel>14"));
+ break;
+ default:
+ RT_ASSERT(false,
+ ("Invalid WirelessMode(%#x)!!\n",
+ rtlpriv->mac80211.mode));
+ break;
+ }
+ rtlphy->sw_chnl_inprogress = true;
+ if (channel == 0)
+ channel = 1;
+ rtlphy->sw_chnl_stage = 0;
+ rtlphy->sw_chnl_step = 0;
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+ ("switch to channel%d\n", rtlphy->current_channel));
+
+ do {
+ if (!rtlphy->sw_chnl_inprogress)
+ break;
+ if (!_rtl92d_phy_sw_chnl_step_by_step(hw,
+ rtlphy->current_channel,
+ &rtlphy->sw_chnl_stage, &rtlphy->sw_chnl_step, &delay)) {
+ if (delay > 0)
+ mdelay(delay);
+ else
+ continue;
+ } else {
+ rtlphy->sw_chnl_inprogress = false;
+ }
+ break;
+ } while (true);
+ RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
+ rtlphy->sw_chnl_inprogress = false;
+ return 1;
+}
+
+static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress));
+ switch (rtlphy->current_io_type) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ de_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
+ rtl92d_dm_write_dig(hw);
+ rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ rtlphy->initgain_backup.xaagccore1 = de_digtable.cur_igvalue;
+ de_digtable.cur_igvalue = 0x17;
+ rtl92d_dm_write_dig(hw);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ rtlphy->set_io_inprogress = false;
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("<---(%#x)\n", rtlphy->current_io_type));
+}
+
+bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool postprocessing = false;
+
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress));
+ do {
+ switch (iotype) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("[IO CMD] Resume DM after scan.\n"));
+ postprocessing = true;
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+ ("[IO CMD] Pause DM before scan.\n"));
+ postprocessing = true;
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ break;
+ }
+ } while (false);
+ if (postprocessing && !rtlphy->set_io_inprogress) {
+ rtlphy->set_io_inprogress = true;
+ rtlphy->current_io_type = iotype;
+ } else {
+ return false;
+ }
+ rtl92d_phy_set_io(hw);
+ RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
+ return true;
+}
+
+static void _rtl92d_phy_set_rfon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* a. SYS_CLKR 0x08[11] = 1 restore MAC clock */
+ /* b. SPS_CTRL 0x11[7:0] = 0x2b */
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY)
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+ /* c. For PCIE: SYS_FUNC_EN 0x02[7:0] = 0xE3 enable BB TRX function */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ /* RF_ON_EXCEP(d~g): */
+ /* d. APSD_CTRL 0x600[7:0] = 0x00 */
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+ /* e. SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB TRX function again */
+ /* f. SYS_FUNC_EN 0x02[7:0] = 0xE3 enable BB TRX function*/
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ /* g. txpause 0x522[7:0] = 0x00 enable mac tx queue */
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+
+static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 u4btmp;
+ u8 delay = 5;
+
+ /* a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue */
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+ /* b. RF path 0 offset 0x00 = 0x00 disable RF */
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+ /* c. APSD_CTRL 0x600[7:0] = 0x40 */
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ /* d. APSD_CTRL 0x600[7:0] = 0x00
+ * APSD_CTRL 0x600[7:0] = 0x00
+ * RF path 0 offset 0x00 = 0x00
+ * APSD_CTRL 0x600[7:0] = 0x40
+ * */
+ u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, BRFREGOFFSETMASK);
+ while (u4btmp != 0 && delay > 0) {
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, BRFREGOFFSETMASK);
+ delay--;
+ }
+ if (delay == 0) {
+ /* Jump out the LPS turn off sequence to RF_ON_EXCEP */
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("Fail !!! Switch RF timeout.\n"));
+ return;
+ }
+ /* e. For PCIE: SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB TRX function */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ /* f. SPS_CTRL 0x11[7:0] = 0x22 */
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY)
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+ /* g. SYS_CLKR 0x08[11] = 0 gated MAC clock */
+}
+
+bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+
+ bool bresult = true;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ u8 i, queue_id;
+ struct rtl8192_tx_ring *ring = NULL;
+
+ if (rfpwr_state == ppsc->rfpwr_state)
+ return false;
+ switch (rfpwr_state) {
+ case ERFON:
+ if ((ppsc->rfpwr_state == ERFOFF) &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+ bool rtstatus;
+ u32 InitializeCount = 0;
+ do {
+ InitializeCount++;
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic enable\n"));
+ rtstatus = rtl_ps_enable_nic(hw);
+ } while ((rtstatus != true) &&
+ (InitializeCount < 10));
+
+ RT_CLEAR_PS_LEVEL(ppsc,
+ RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+ ("awake, sleeped:%d ms state_"
+ "inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies),
+ rtlpriv->psc.state_inap));
+ ppsc->last_awake_jiffies = jiffies;
+ _rtl92d_phy_set_rfon(hw);
+ }
+
+ if (mac->link_state == MAC80211_LINKED)
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_LINK);
+ else
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ break;
+ case ERFOFF:
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+ RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+ ("IPS Set eRf nic disable\n"));
+ rtl_ps_disable_nic(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_NO_LINK);
+ else
+ rtlpriv->cfg->ops->led_control(hw,
+ LED_CTL_POWER_OFF);
+ }
+ break;
+ case ERFSLEEP:
+ if (ppsc->rfpwr_state == ERFOFF)
+ break;
+
+ for (queue_id = 0, i = 0;
+ queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+ ring = &pcipriv->dev.tx_ring[queue_id];
+ if (skb_queue_len(&ring->queue) == 0 ||
+ queue_id == BEACON_QUEUE) {
+ queue_id++;
+ continue;
+ } else if (rtlpci->pdev->current_state != PCI_D0) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ ("eRf Off/Sleep: %d times TcbBusyQueu"
+ "e[%d] !=0 but lower power state!\n",
+ (i + 1), queue_id));
+ break;
+ } else {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("eRf Off/Sleep: %d times TcbBusyQueu"
+ "e[%d] =%d "
+ "before doze!\n", (i + 1), queue_id,
+ skb_queue_len(&ring->queue)));
+ udelay(10);
+ i++;
+ }
+
+ if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+ ("\nERFOFF: %d times TcbBusyQueue[%d] "
+ "= %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x, queue_id,
+ skb_queue_len(&ring->queue)));
+ break;
+ }
+ }
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
+ ("Set rfsleep awaked:%d ms\n",
+ jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies)));
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, ("sleep awaked:%d ms "
+ "state_inap:%x\n", jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies), rtlpriv->psc.state_inap));
+ ppsc->last_sleep_jiffies = jiffies;
+ _rtl92d_phy_set_rfsleep(hw);
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("switch case not process\n"));
+ bresult = false;
+ break;
+ }
+ if (bresult)
+ ppsc->rfpwr_state = rfpwr_state;
+ return bresult;
+}
+
+void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 offset = REG_MAC_PHY_CTRL_NORMAL;
+
+ switch (rtlhal->macphymode) {
+ case DUALMAC_DUALPHY:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("MacPhyMode: DUALMAC_DUALPHY\n"));
+ rtl_write_byte(rtlpriv, offset, 0xF3);
+ break;
+ case SINGLEMAC_SINGLEPHY:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("MacPhyMode: SINGLEMAC_SINGLEPHY\n"));
+ rtl_write_byte(rtlpriv, offset, 0xF4);
+ break;
+ case DUALMAC_SINGLEPHY:
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("MacPhyMode: DUALMAC_SINGLEPHY\n"));
+ rtl_write_byte(rtlpriv, offset, 0xF1);
+ break;
+ }
+}
+
+void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+ switch (rtlhal->macphymode) {
+ case DUALMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+ rtlhal->version |= CHIP_92D_SINGLEPHY;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case SINGLEMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+ rtlhal->version |= CHIP_92D_SINGLEPHY;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case DUALMAC_DUALPHY:
+ rtlphy->rf_type = RF_1T1R;
+ rtlhal->version &= (~CHIP_92D_SINGLEPHY);
+ /* Now we let MAC0 run on 5G band. */
+ if (rtlhal->interfaceindex == 0) {
+ rtlhal->bandset = BAND_ON_5G;
+ rtlhal->current_bandtype = BAND_ON_5G;
+ } else {
+ rtlhal->bandset = BAND_ON_2_4G;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+u8 rtl92d_get_chnlgroup_fromarray(u8 chnl)
+{
+ u8 group;
+ u8 channel_info[59] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56,
+ 58, 60, 62, 64, 100, 102, 104, 106, 108,
+ 110, 112, 114, 116, 118, 120, 122, 124,
+ 126, 128, 130, 132, 134, 136, 138, 140,
+ 149, 151, 153, 155, 157, 159, 161, 163,
+ 165
+ };
+
+ if (channel_info[chnl] <= 3)
+ group = 0;
+ else if (channel_info[chnl] <= 9)
+ group = 1;
+ else if (channel_info[chnl] <= 14)
+ group = 2;
+ else if (channel_info[chnl] <= 44)
+ group = 3;
+ else if (channel_info[chnl] <= 54)
+ group = 4;
+ else if (channel_info[chnl] <= 64)
+ group = 5;
+ else if (channel_info[chnl] <= 112)
+ group = 6;
+ else if (channel_info[chnl] <= 126)
+ group = 7;
+ else if (channel_info[chnl] <= 140)
+ group = 8;
+ else if (channel_info[chnl] <= 153)
+ group = 9;
+ else if (channel_info[chnl] <= 159)
+ group = 10;
+ else
+ group = 11;
+ return group;
+}
+
+void rtl92d_phy_set_poweron(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ unsigned long flags;
+ u8 value8;
+ u16 i;
+ u32 mac_reg = (rtlhal->interfaceindex == 0 ? REG_MAC0 : REG_MAC1);
+
+ /* notice fw know band status 0x81[1]/0x53[1] = 0: 5G, 1: 2G */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ value8 = rtl_read_byte(rtlpriv, mac_reg);
+ value8 |= BIT(1);
+ rtl_write_byte(rtlpriv, mac_reg, value8);
+ } else {
+ value8 = rtl_read_byte(rtlpriv, mac_reg);
+ value8 &= (~BIT(1));
+ rtl_write_byte(rtlpriv, mac_reg, value8);
+ }
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) {
+ value8 = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, value8 | MAC0_ON);
+ } else {
+ spin_lock_irqsave(&globalmutex_power, flags);
+ if (rtlhal->interfaceindex == 0) {
+ value8 = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, value8 | MAC0_ON);
+ } else {
+ value8 = rtl_read_byte(rtlpriv, REG_MAC1);
+ rtl_write_byte(rtlpriv, REG_MAC1, value8 | MAC1_ON);
+ }
+ value8 = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS);
+ spin_unlock_irqrestore(&globalmutex_power, flags);
+ for (i = 0; i < 200; i++) {
+ if ((value8 & BIT(7)) == 0) {
+ break;
+ } else {
+ udelay(500);
+ spin_lock_irqsave(&globalmutex_power, flags);
+ value8 = rtl_read_byte(rtlpriv,
+ REG_POWER_OFF_IN_PROCESS);
+ spin_unlock_irqrestore(&globalmutex_power,
+ flags);
+ }
+ }
+ if (i == 200)
+ RT_ASSERT(false, ("Another mac power off over time\n"));
+ }
+}
+
+void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (rtlpriv->rtlhal.macphymode) {
+ case DUALMAC_DUALPHY:
+ rtl_write_byte(rtlpriv, REG_DMC, 0x0);
+ rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08);
+ rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff);
+ break;
+ case DUALMAC_SINGLEPHY:
+ rtl_write_byte(rtlpriv, REG_DMC, 0xf8);
+ rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08);
+ rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff);
+ break;
+ case SINGLEMAC_SINGLEPHY:
+ rtl_write_byte(rtlpriv, REG_DMC, 0x0);
+ rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x10);
+ rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 rfpath, i;
+
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("==>\n"));
+ /* r_select_5G for path_A/B 0 for 2.4G, 1 for 5G */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* r_select_5G for path_A/B,0x878 */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(0), 0x0);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0x0);
+ if (rtlhal->macphymode != DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(16), 0x0);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(31), 0x0);
+ }
+ /* rssi_table_select:index 0 for 2.4G.1~3 for 5G,0xc78 */
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, BIT(6) | BIT(7), 0x0);
+ /* fc_area 0xd2c */
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(14) | BIT(13), 0x0);
+ /* 5G LAN ON */
+ rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0xa);
+ /* TX BB gain shift*1,Just for testchip,0xc80,0xc88 */
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+ 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+ 0x40000100);
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(3)) >> 3) |
+ (rtlefuse->eeprom_c9 & BIT(1)) |
+ ((rtlefuse->eeprom_cc & BIT(1)) << 4));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(2)) >> 2) |
+ ((rtlefuse->eeprom_c9 & BIT(0)) << 1) |
+ ((rtlefuse->eeprom_cc & BIT(0)) << 5));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(26) | BIT(22) | BIT(21) | BIT(10) |
+ BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(3)) >> 3) |
+ (rtlefuse->eeprom_c9 & BIT(1)) |
+ ((rtlefuse->eeprom_cc & BIT(1)) << 4) |
+ ((rtlefuse->eeprom_c9 & BIT(7)) << 9) |
+ ((rtlefuse->eeprom_c9 & BIT(5)) << 12) |
+ ((rtlefuse->eeprom_cc & BIT(3)) << 18));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(2)) >> 2) |
+ ((rtlefuse->eeprom_c9 & BIT(0)) << 1) |
+ ((rtlefuse->eeprom_cc & BIT(0)) << 5));
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(6)) >> 6) |
+ ((rtlefuse->eeprom_c9 & BIT(4)) >> 3) |
+ ((rtlefuse->eeprom_cc & BIT(2)) << 3));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
+ BIT(31) | BIT(15), 0);
+ }
+ /* 1.5V_LDO */
+ } else {
+ /* r_select_5G for path_A/B */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(0), 0x1);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0x1);
+ if (rtlhal->macphymode != DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(16), 0x1);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(31), 0x1);
+ }
+ /* rssi_table_select:index 0 for 2.4G.1~3 for 5G */
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, BIT(6) | BIT(7), 0x1);
+ /* fc_area */
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(14) | BIT(13), 0x1);
+ /* 5G LAN ON */
+ rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0x0);
+ /* TX BB gain shift,Just for testchip,0xc80,0xc88 */
+ if (rtlefuse->internal_pa_5g[0])
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+ 0x2d4000b5);
+ else
+ rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+ 0x20000080);
+ if (rtlefuse->internal_pa_5g[1])
+ rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+ 0x2d4000b5);
+ else
+ rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+ 0x20000080);
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(10) | BIT(6) | BIT(5),
+ (rtlefuse->eeprom_cc & BIT(5)));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(10),
+ ((rtlefuse->eeprom_cc & BIT(4)) >> 4));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15),
+ (rtlefuse->eeprom_cc & BIT(4)) >> 4);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(26) | BIT(22) | BIT(21) | BIT(10) |
+ BIT(6) | BIT(5),
+ (rtlefuse->eeprom_cc & BIT(5)) |
+ ((rtlefuse->eeprom_cc & BIT(7)) << 14));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(10),
+ ((rtlefuse->eeprom_cc & BIT(4)) >> 4));
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BIT(10),
+ ((rtlefuse->eeprom_cc & BIT(6)) >> 6));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
+ BIT(31) | BIT(15),
+ ((rtlefuse->eeprom_cc & BIT(4)) >> 4) |
+ ((rtlefuse->eeprom_cc & BIT(6)) << 10));
+ }
+ }
+ /* update IQK related settings */
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, BMASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, BMASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30) | BIT(28) |
+ BIT(26) | BIT(24), 0x00);
+ rtl_set_bbreg(hw, ROFDM0_XDTxAFE, 0xF0000000, 0x00);
+ rtl_set_bbreg(hw, 0xca0, 0xF0000000, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, 0x00);
+
+ /* Update RF */
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) |
+ BIT(18), 0);
+ /* RF0x0b[16:14] =3b'111 */
+ rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
+ 0x1c000, 0x07);
+ } else {
+ /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) |
+ BIT(16) | BIT(18),
+ (BIT(16) | BIT(8)) >> 8);
+ }
+ }
+ /* Update for all band. */
+ /* DMDP */
+ if (rtlphy->rf_type == RF_1T1R) {
+ /* Use antenna 0,0xc04,0xd04 */
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x11);
+ rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x1);
+
+ /* enable ad/da clock1 for dual-phy reg0x888 */
+ if (rtlhal->interfaceindex == 0) {
+ rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) |
+ BIT(13), 0x3);
+ } else {
+ rtl92d_phy_enable_anotherphy(hw, false);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ ("MAC1 use DBI to update 0x888"));
+ /* 0x888 */
+ rtl92de_write_dword_dbi(hw, RFPGA0_ADDALLOCKEN,
+ rtl92de_read_dword_dbi(hw,
+ RFPGA0_ADDALLOCKEN,
+ BIT(3)) | BIT(12) | BIT(13),
+ BIT(3));
+ rtl92d_phy_powerdown_anotherphy(hw, false);
+ }
+ } else {
+ /* Single PHY */
+ /* Use antenna 0 & 1,0xc04,0xd04 */
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x33);
+ rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x3);
+ /* disable ad/da clock1,0x888 */
+ rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) | BIT(13), 0);
+ }
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] = rtl_get_rfreg(hw, rfpath,
+ RF_CHNLBW, BRFREGOFFSETMASK);
+ rtlphy->reg_rf3c[rfpath] = rtl_get_rfreg(hw, rfpath, 0x3C,
+ BRFREGOFFSETMASK);
+ }
+ for (i = 0; i < 2; i++)
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[i]));
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("<==\n"));
+
+}
+
+bool rtl92d_phy_check_poweroff(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 u1btmp;
+ unsigned long flags;
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) {
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, u1btmp & (~MAC0_ON));
+ return true;
+ }
+ spin_lock_irqsave(&globalmutex_power, flags);
+ if (rtlhal->interfaceindex == 0) {
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, u1btmp & (~MAC0_ON));
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC1);
+ u1btmp &= MAC1_ON;
+ } else {
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC1);
+ rtl_write_byte(rtlpriv, REG_MAC1, u1btmp & (~MAC1_ON));
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC0);
+ u1btmp &= MAC0_ON;
+ }
+ if (u1btmp) {
+ spin_unlock_irqrestore(&globalmutex_power, flags);
+ return false;
+ }
+ u1btmp = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS);
+ u1btmp |= BIT(7);
+ rtl_write_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS, u1btmp);
+ spin_unlock_irqrestore(&globalmutex_power, flags);
+ return true;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
new file mode 100644
index 00000000000..a52c824b41e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.h
@@ -0,0 +1,178 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92D_PHY_H__
+#define __RTL92D_PHY_H__
+
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+#define RT_CANNOT_IO(hw) false
+#define HIGHPOWER_RADIOA_ARRAYLEN 22
+
+#define IQK_ADDA_REG_NUM 16
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 1
+
+#define APK_BB_REG_NUM 5
+#define APK_AFE_REG_NUM 16
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 2
+
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50
+#define ANTENNA_DIVERSITY_VALUE 0x80
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define RESET_CNT_LIMIT 3
+
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM 10
+#define IQK_BB_REG_NUM_test 6
+#define IQK_MAC_REG_NUM 4
+#define RX_INDEX_MAPPING_NUM 15
+
+#define IQK_DELAY_TIME 1
+
+#define CT_OFFSET_MAC_ADDR 0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
+
+#define CT_OFFSET_CHANNEL_PLAH 0x75
+#define CT_OFFSET_THERMAL_METER 0x78
+#define CT_OFFSET_RF_OPTION 0x79
+#define CT_OFFSET_VERSION 0x7E
+#define CT_OFFSET_CUSTOMER_ID 0x7F
+
+enum swchnlcmd_id {
+ CMDID_END,
+ CMDID_SET_TXPOWEROWER_LEVEL,
+ CMDID_BBREGWRITE10,
+ CMDID_WRITEPORT_ULONG,
+ CMDID_WRITEPORT_USHORT,
+ CMDID_WRITEPORT_UCHAR,
+ CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+ enum swchnlcmd_id cmdid;
+ u32 para1;
+ u32 para2;
+ u32 msdelay;
+};
+
+enum baseband_config_type {
+ BASEBAND_CONFIG_PHY_REG = 0,
+ BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum rf_content {
+ radioa_txt = 0,
+ radiob_txt = 1,
+ radioc_txt = 2,
+ radiod_txt = 3
+};
+
+static inline void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->rtlhal.interfaceindex == 1)
+ spin_lock_irqsave(&rtlpriv->locks.cck_and_rw_pagea_lock, *flag);
+}
+
+static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->rtlhal.interfaceindex == 1)
+ spin_unlock_irqrestore(&rtlpriv->locks.cck_and_rw_pagea_lock,
+ *flag);
+}
+
+extern u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+extern void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+extern u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask);
+extern void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 regaddr,
+ u32 bitmask, u32 data);
+extern bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
+extern bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
+extern bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
+extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+ enum radio_path rfpath);
+extern void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+extern void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+extern void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw,
+ u8 operation);
+extern void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+extern u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
+bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum rf_content content,
+ enum radio_path rfpath);
+bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+extern bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+
+void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
+void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
+u8 rtl92d_get_chnlgroup_fromarray(u8 chnl);
+void rtl92d_phy_set_poweron(struct ieee80211_hw *hw);
+void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw);
+bool rtl92d_phy_check_poweroff(struct ieee80211_hw *hw);
+void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw);
+void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
+void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw);
+void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag);
+void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag);
+u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
+void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
+void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
new file mode 100644
index 00000000000..131acc306fc
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/reg.h
@@ -0,0 +1,1313 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92D_REG_H__
+#define __RTL92D_REG_H__
+
+/* ----------------------------------------------------- */
+/* 0x0000h ~ 0x00FFh System Configuration */
+/* ----------------------------------------------------- */
+#define REG_SYS_ISO_CTRL 0x0000
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_APS_FSMCO 0x0004
+#define REG_SYS_CLKR 0x0008
+#define REG_9346CR 0x000A
+#define REG_EE_VPD 0x000C
+#define REG_AFE_MISC 0x0010
+#define REG_SPS0_CTRL 0x0011
+#define REG_POWER_OFF_IN_PROCESS 0x0017
+#define REG_SPS_OCP_CFG 0x0018
+#define REG_RSV_CTRL 0x001C
+#define REG_RF_CTRL 0x001F
+#define REG_LDOA15_CTRL 0x0020
+#define REG_LDOV12D_CTRL 0x0021
+#define REG_LDOHCI12_CTRL 0x0022
+#define REG_LPLDO_CTRL 0x0023
+#define REG_AFE_XTAL_CTRL 0x0024
+#define REG_AFE_PLL_CTRL 0x0028
+/* for 92d, DMDP,SMSP,DMSP contrl */
+#define REG_MAC_PHY_CTRL 0x002c
+#define REG_EFUSE_CTRL 0x0030
+#define REG_EFUSE_TEST 0x0034
+#define REG_PWR_DATA 0x0038
+#define REG_CAL_TIMER 0x003C
+#define REG_ACLK_MON 0x003E
+#define REG_GPIO_MUXCFG 0x0040
+#define REG_GPIO_IO_SEL 0x0042
+#define REG_MAC_PINMUX_CFG 0x0043
+#define REG_GPIO_PIN_CTRL 0x0044
+#define REG_GPIO_INTM 0x0048
+#define REG_LEDCFG0 0x004C
+#define REG_LEDCFG1 0x004D
+#define REG_LEDCFG2 0x004E
+#define REG_LEDCFG3 0x004F
+#define REG_FSIMR 0x0050
+#define REG_FSISR 0x0054
+
+#define REG_MCUFWDL 0x0080
+
+#define REG_HMEBOX_EXT_0 0x0088
+#define REG_HMEBOX_EXT_1 0x008A
+#define REG_HMEBOX_EXT_2 0x008C
+#define REG_HMEBOX_EXT_3 0x008E
+
+#define REG_BIST_SCAN 0x00D0
+#define REG_BIST_RPT 0x00D4
+#define REG_BIST_ROM_RPT 0x00D8
+#define REG_USB_SIE_INTF 0x00E0
+#define REG_PCIE_MIO_INTF 0x00E4
+#define REG_PCIE_MIO_INTD 0x00E8
+#define REG_HPON_FSM 0x00EC
+#define REG_SYS_CFG 0x00F0
+#define REG_MAC_PHY_CTRL_NORMAL 0x00f8
+
+#define REG_MAC0 0x0081
+#define REG_MAC1 0x0053
+#define FW_MAC0_READY 0x18
+#define FW_MAC1_READY 0x1A
+#define MAC0_ON BIT(7)
+#define MAC1_ON BIT(0)
+#define MAC0_READY BIT(0)
+#define MAC1_READY BIT(0)
+
+/* ----------------------------------------------------- */
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+/* ----------------------------------------------------- */
+#define REG_CR 0x0100
+#define REG_PBP 0x0104
+#define REG_TRXDMA_CTRL 0x010C
+#define REG_TRXFF_BNDY 0x0114
+#define REG_TRXFF_STATUS 0x0118
+#define REG_RXFF_PTR 0x011C
+#define REG_HIMR 0x0120
+#define REG_HISR 0x0124
+#define REG_HIMRE 0x0128
+#define REG_HISRE 0x012C
+#define REG_CPWM 0x012F
+#define REG_FWIMR 0x0130
+#define REG_FWISR 0x0134
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_PKTBUF_DBG_DATA_L 0x0144
+#define REG_PKTBUF_DBG_DATA_H 0x0148
+
+#define REG_TC0_CTRL 0x0150
+#define REG_TC1_CTRL 0x0154
+#define REG_TC2_CTRL 0x0158
+#define REG_TC3_CTRL 0x015C
+#define REG_TC4_CTRL 0x0160
+#define REG_TCUNIT_BASE 0x0164
+#define REG_MBIST_START 0x0174
+#define REG_MBIST_DONE 0x0178
+#define REG_MBIST_FAIL 0x017C
+#define REG_C2HEVT_MSG_NORMAL 0x01A0
+#define REG_C2HEVT_MSG_TEST 0x01B8
+#define REG_C2HEVT_CLEAR 0x01BF
+#define REG_MCUTST_1 0x01c0
+#define REG_FMETHR 0x01C8
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX_0 0x01D0
+#define REG_HMEBOX_1 0x01D4
+#define REG_HMEBOX_2 0x01D8
+#define REG_HMEBOX_3 0x01DC
+
+#define REG_LLT_INIT 0x01E0
+#define REG_BB_ACCEESS_CTRL 0x01E8
+#define REG_BB_ACCESS_DATA 0x01EC
+
+
+/* ----------------------------------------------------- */
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* ----------------------------------------------------- */
+#define REG_RQPN 0x0200
+#define REG_FIFOPAGE 0x0204
+#define REG_TDECTRL 0x0208
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define REG_RQPN_NPQ 0x0214
+
+/* ----------------------------------------------------- */
+/* 0x0280h ~ 0x02FFh RXDMA Configuration */
+/* ----------------------------------------------------- */
+#define REG_RXDMA_AGG_PG_TH 0x0280
+#define REG_RXPKT_NUM 0x0284
+#define REG_RXDMA_STATUS 0x0288
+
+/* ----------------------------------------------------- */
+/* 0x0300h ~ 0x03FFh PCIe */
+/* ----------------------------------------------------- */
+#define REG_PCIE_CTRL_REG 0x0300
+#define REG_INT_MIG 0x0304
+#define REG_BCNQ_DESA 0x0308
+#define REG_HQ_DESA 0x0310
+#define REG_MGQ_DESA 0x0318
+#define REG_VOQ_DESA 0x0320
+#define REG_VIQ_DESA 0x0328
+#define REG_BEQ_DESA 0x0330
+#define REG_BKQ_DESA 0x0338
+#define REG_RX_DESA 0x0340
+#define REG_DBI 0x0348
+#define REG_DBI_WDATA 0x0348
+#define REG_DBI_RDATA 0x034C
+#define REG_DBI_CTRL 0x0350
+#define REG_DBI_FLAG 0x0352
+#define REG_MDIO 0x0354
+#define REG_DBG_SEL 0x0360
+#define REG_PCIE_HRPWM 0x0361
+#define REG_PCIE_HCPWM 0x0363
+#define REG_UART_CTRL 0x0364
+#define REG_UART_TX_DESA 0x0370
+#define REG_UART_RX_DESA 0x0378
+
+/* ----------------------------------------------------- */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* ----------------------------------------------------- */
+#define REG_VOQ_INFORMATION 0x0400
+#define REG_VIQ_INFORMATION 0x0404
+#define REG_BEQ_INFORMATION 0x0408
+#define REG_BKQ_INFORMATION 0x040C
+#define REG_MGQ_INFORMATION 0x0410
+#define REG_HGQ_INFORMATION 0x0414
+#define REG_BCNQ_INFORMATION 0x0418
+
+
+#define REG_CPU_MGQ_INFORMATION 0x041C
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define REG_HWSEQ_CTRL 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY 0x0425
+#define REG_MULTI_BCNQ_EN 0x0426
+#define REG_MULTI_BCNQ_OFFSET 0x0427
+#define REG_SPEC_SIFS 0x0428
+#define REG_RL 0x042A
+#define REG_DARFRC 0x0430
+#define REG_RARFRC 0x0438
+#define REG_RRSR 0x0440
+#define REG_ARFR0 0x0444
+#define REG_ARFR1 0x0448
+#define REG_ARFR2 0x044C
+#define REG_ARFR3 0x0450
+#define REG_AGGLEN_LMT 0x0458
+#define REG_AMPDU_MIN_SPACE 0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
+#define REG_FAST_EDCA_CTRL 0x0460
+#define REG_RD_RESP_PKT_TH 0x0463
+#define REG_INIRTS_RATE_SEL 0x0480
+#define REG_INIDATA_RATE_SEL 0x0484
+#define REG_POWER_STATUS 0x04A4
+#define REG_POWER_STAGE1 0x04B4
+#define REG_POWER_STAGE2 0x04B8
+#define REG_PKT_LIFE_TIME 0x04C0
+#define REG_STBC_SETTING 0x04C4
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_MAX_AGGR_NUM 0x04CA
+#define REG_RTS_MAX_AGGR_NUM 0x04CB
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
+#define REG_EARLY_MODE_CONTROL 0x4D0
+#define REG_NQOS_SEQ 0x04DC
+#define REG_QOS_SEQ 0x04DE
+#define REG_NEED_CPU_HANDLE 0x04E0
+#define REG_PKT_LOSE_RPT 0x04E1
+#define REG_PTCL_ERR_STATUS 0x04E2
+#define REG_DUMMY 0x04FC
+
+/* ----------------------------------------------------- */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* ----------------------------------------------------- */
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_BCNTCFG 0x0510
+#define REG_PIFS 0x0512
+#define REG_RDG_PIFS 0x0513
+#define REG_SIFS_CTX 0x0514
+#define REG_SIFS_TRX 0x0516
+#define REG_AGGR_BREAK_TIME 0x051A
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define REG_TXPAUSE 0x0522
+#define REG_DIS_TXREQ_CLR 0x0523
+#define REG_RD_CTRL 0x0524
+#define REG_TBTT_PROHIBIT 0x0540
+#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
+#define REG_BCN_CTRL 0x0550
+#define REG_USTIME_TSF 0x0551
+#define REG_MBID_NUM 0x0552
+#define REG_DUAL_TSF_RST 0x0553
+#define REG_BCN_INTERVAL 0x0554
+#define REG_MBSSID_BCN_SPACE 0x0554
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_RXTSF_OFFSET_OFDM 0x055F
+#define REG_TSFTR 0x0560
+#define REG_INIT_TSFTR 0x0564
+#define REG_PSTIMER 0x0580
+#define REG_TIMER0 0x0584
+#define REG_TIMER1 0x0588
+#define REG_ACMHWCTRL 0x05C0
+#define REG_ACMRSTCTRL 0x05C1
+#define REG_ACMAVG 0x05C2
+#define REG_VO_ADMTIME 0x05C4
+#define REG_VI_ADMTIME 0x05C6
+#define REG_BE_ADMTIME 0x05C8
+#define REG_EDCA_RANDOM_GEN 0x05CC
+#define REG_SCH_TXCMD 0x05D0
+
+/* Dual MAC Co-Existence Register */
+#define REG_DMC 0x05F0
+
+/* ----------------------------------------------------- */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* ----------------------------------------------------- */
+#define REG_APSD_CTRL 0x0600
+#define REG_BWOPMODE 0x0603
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DLK_TIME 0x060D
+#define REG_RX_DRVINFO_SZ 0x060F
+
+#define REG_MACID 0x0610
+#define REG_BSSID 0x0618
+#define REG_MAR 0x0620
+#define REG_MBIDCAMCFG 0x0628
+
+#define REG_USTIME_EDCA 0x0638
+#define REG_MAC_SPEC_SIFS 0x063A
+#define REG_RESP_SIFS_CCK 0x063C
+#define REG_RESP_SIFS_OFDM 0x063E
+#define REG_ACKTO 0x0640
+#define REG_CTS2TO 0x0641
+#define REG_EIFS 0x0642
+
+
+/* WMA, BA, CCX */
+#define REG_NAV_CTRL 0x0650
+#define REG_BACAMCMD 0x0654
+#define REG_BACAMCONTENT 0x0658
+#define REG_LBDLY 0x0660
+#define REG_FWDLY 0x0661
+#define REG_RXERR_RPT 0x0664
+#define REG_WMAC_TRXPTCL_CTL 0x0668
+
+
+/* Security */
+#define REG_CAMCMD 0x0670
+#define REG_CAMWRITE 0x0674
+#define REG_CAMREAD 0x0678
+#define REG_CAMDBG 0x067C
+#define REG_SECCFG 0x0680
+
+/* Power */
+#define REG_WOW_CTRL 0x0690
+#define REG_PSSTATUS 0x0691
+#define REG_PS_RX_INFO 0x0692
+#define REG_LPNAV_CTRL 0x0694
+#define REG_WKFMCAM_CMD 0x0698
+#define REG_WKFMCAM_RWD 0x069C
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BCN_PSR_RPT 0x06A8
+#define REG_CALB32K_CTRL 0x06AC
+#define REG_PKT_MON_CTRL 0x06B4
+#define REG_BT_COEX_TABLE 0x06C0
+#define REG_WMAC_RESP_TXINFO 0x06D8
+
+
+/* ----------------------------------------------------- */
+/* Redifine 8192C register definition for compatibility */
+/* ----------------------------------------------------- */
+#define CR9346 REG_9346CR
+#define MSR (REG_CR + 2)
+#define ISR REG_HISR
+#define TSFR REG_TSFTR
+
+#define MACIDR0 REG_MACID
+#define MACIDR4 (REG_MACID + 4)
+
+#define PBP REG_PBP
+
+#define IDR0 MACIDR0
+#define IDR4 MACIDR4
+
+/* ----------------------------------------------------- */
+/* 8192C (MSR) Media Status Register(Offset 0x4C, 8 bits)*/
+/* ----------------------------------------------------- */
+#define MSR_NOLINK 0x00
+#define MSR_ADHOC 0x01
+#define MSR_INFRA 0x02
+#define MSR_AP 0x03
+
+/* 6. Adaptive Control Registers (Offset: 0x0160 - 0x01CF) */
+/* ----------------------------------------------------- */
+/* 8192C Response Rate Set Register(offset 0x181, 24bits)*/
+/* ----------------------------------------------------- */
+#define RRSR_RSC_OFFSET 21
+#define RRSR_SHORT_OFFSET 23
+#define RRSR_RSC_BW_40M 0x600000
+#define RRSR_RSC_UPSUBCHNL 0x400000
+#define RRSR_RSC_LOWSUBCHNL 0x200000
+#define RRSR_SHORT 0x800000
+#define RRSR_1M BIT0
+#define RRSR_2M BIT1
+#define RRSR_5_5M BIT2
+#define RRSR_11M BIT3
+#define RRSR_6M BIT4
+#define RRSR_9M BIT5
+#define RRSR_12M BIT6
+#define RRSR_18M BIT7
+#define RRSR_24M BIT8
+#define RRSR_36M BIT9
+#define RRSR_48M BIT10
+#define RRSR_54M BIT11
+#define RRSR_MCS0 BIT12
+#define RRSR_MCS1 BIT13
+#define RRSR_MCS2 BIT14
+#define RRSR_MCS3 BIT15
+#define RRSR_MCS4 BIT16
+#define RRSR_MCS5 BIT17
+#define RRSR_MCS6 BIT18
+#define RRSR_MCS7 BIT19
+#define BRSR_ACKSHORTPMB BIT23
+
+/* ----------------------------------------------------- */
+/* 8192C Rate Definition */
+/* ----------------------------------------------------- */
+/* CCK */
+#define RATR_1M 0x00000001
+#define RATR_2M 0x00000002
+#define RATR_55M 0x00000004
+#define RATR_11M 0x00000008
+/* OFDM */
+#define RATR_6M 0x00000010
+#define RATR_9M 0x00000020
+#define RATR_12M 0x00000040
+#define RATR_18M 0x00000080
+#define RATR_24M 0x00000100
+#define RATR_36M 0x00000200
+#define RATR_48M 0x00000400
+#define RATR_54M 0x00000800
+/* MCS 1 Spatial Stream */
+#define RATR_MCS0 0x00001000
+#define RATR_MCS1 0x00002000
+#define RATR_MCS2 0x00004000
+#define RATR_MCS3 0x00008000
+#define RATR_MCS4 0x00010000
+#define RATR_MCS5 0x00020000
+#define RATR_MCS6 0x00040000
+#define RATR_MCS7 0x00080000
+/* MCS 2 Spatial Stream */
+#define RATR_MCS8 0x00100000
+#define RATR_MCS9 0x00200000
+#define RATR_MCS10 0x00400000
+#define RATR_MCS11 0x00800000
+#define RATR_MCS12 0x01000000
+#define RATR_MCS13 0x02000000
+#define RATR_MCS14 0x04000000
+#define RATR_MCS15 0x08000000
+
+/* CCK */
+#define RATE_1M BIT(0)
+#define RATE_2M BIT(1)
+#define RATE_5_5M BIT(2)
+#define RATE_11M BIT(3)
+/* OFDM */
+#define RATE_6M BIT(4)
+#define RATE_9M BIT(5)
+#define RATE_12M BIT(6)
+#define RATE_18M BIT(7)
+#define RATE_24M BIT(8)
+#define RATE_36M BIT(9)
+#define RATE_48M BIT(10)
+#define RATE_54M BIT(11)
+/* MCS 1 Spatial Stream */
+#define RATE_MCS0 BIT(12)
+#define RATE_MCS1 BIT(13)
+#define RATE_MCS2 BIT(14)
+#define RATE_MCS3 BIT(15)
+#define RATE_MCS4 BIT(16)
+#define RATE_MCS5 BIT(17)
+#define RATE_MCS6 BIT(18)
+#define RATE_MCS7 BIT(19)
+/* MCS 2 Spatial Stream */
+#define RATE_MCS8 BIT(20)
+#define RATE_MCS9 BIT(21)
+#define RATE_MCS10 BIT(22)
+#define RATE_MCS11 BIT(23)
+#define RATE_MCS12 BIT(24)
+#define RATE_MCS13 BIT(25)
+#define RATE_MCS14 BIT(26)
+#define RATE_MCS15 BIT(27)
+
+/* ALL CCK Rate */
+#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | \
+ RATR_11M)
+#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | \
+ RATR_18M | RATR_24M | \
+ RATR_36M | RATR_48M | RATR_54M)
+#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | \
+ RATR_MCS3 | RATR_MCS4 | RATR_MCS5 | \
+ RATR_MCS6 | RATR_MCS7)
+#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \
+ RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \
+ RATR_MCS14 | RATR_MCS15)
+
+/* ----------------------------------------------------- */
+/* 8192C BW_OPMODE bits (Offset 0x203, 8bit) */
+/* ----------------------------------------------------- */
+#define BW_OPMODE_20MHZ BIT(2)
+#define BW_OPMODE_5G BIT(1)
+#define BW_OPMODE_11J BIT(0)
+
+
+/* ----------------------------------------------------- */
+/* 8192C CAM Config Setting (offset 0x250, 1 byte) */
+/* ----------------------------------------------------- */
+#define CAM_VALID BIT(15)
+#define CAM_NOTVALID 0x0000
+#define CAM_USEDK BIT(5)
+
+#define CAM_NONE 0x0
+#define CAM_WEP40 0x01
+#define CAM_TKIP 0x02
+#define CAM_AES 0x04
+#define CAM_WEP104 0x05
+#define CAM_SMS4 0x6
+
+
+#define TOTAL_CAM_ENTRY 32
+#define HALF_CAM_ENTRY 16
+
+#define CAM_WRITE BIT(16)
+#define CAM_READ 0x00000000
+#define CAM_POLLINIG BIT(31)
+
+/* 10. Power Save Control Registers (Offset: 0x0260 - 0x02DF) */
+#define WOW_PMEN BIT0 /* Power management Enable. */
+#define WOW_WOMEN BIT1 /* WoW function on or off. */
+#define WOW_MAGIC BIT2 /* Magic packet */
+#define WOW_UWF BIT3 /* Unicast Wakeup frame. */
+
+/* 12. Host Interrupt Status Registers (Offset: 0x0300 - 0x030F) */
+/* ----------------------------------------------------- */
+/* 8190 IMR/ISR bits (offset 0xfd, 8bits) */
+/* ----------------------------------------------------- */
+#define IMR8190_DISABLED 0x0
+#define IMR_BCNDMAINT6 BIT(31)
+#define IMR_BCNDMAINT5 BIT(30)
+#define IMR_BCNDMAINT4 BIT(29)
+#define IMR_BCNDMAINT3 BIT(28)
+#define IMR_BCNDMAINT2 BIT(27)
+#define IMR_BCNDMAINT1 BIT(26)
+#define IMR_BCNDOK8 BIT(25)
+#define IMR_BCNDOK7 BIT(24)
+#define IMR_BCNDOK6 BIT(23)
+#define IMR_BCNDOK5 BIT(22)
+#define IMR_BCNDOK4 BIT(21)
+#define IMR_BCNDOK3 BIT(20)
+#define IMR_BCNDOK2 BIT(19)
+#define IMR_BCNDOK1 BIT(18)
+#define IMR_TIMEOUT2 BIT(17)
+#define IMR_TIMEOUT1 BIT(16)
+#define IMR_TXFOVW BIT(15)
+#define IMR_PSTIMEOUT BIT(14)
+#define IMR_BcnInt BIT(13)
+#define IMR_RXFOVW BIT(12)
+#define IMR_RDU BIT(11)
+#define IMR_ATIMEND BIT(10)
+#define IMR_BDOK BIT(9)
+#define IMR_HIGHDOK BIT(8)
+#define IMR_TBDOK BIT(7)
+#define IMR_MGNTDOK BIT(6)
+#define IMR_TBDER BIT(5)
+#define IMR_BKDOK BIT(4)
+#define IMR_BEDOK BIT(3)
+#define IMR_VIDOK BIT(2)
+#define IMR_VODOK BIT(1)
+#define IMR_ROK BIT(0)
+
+#define IMR_TXERR BIT(11)
+#define IMR_RXERR BIT(10)
+#define IMR_C2HCMD BIT(9)
+#define IMR_CPWM BIT(8)
+#define IMR_OCPINT BIT(1)
+#define IMR_WLANOFF BIT(0)
+
+/* ----------------------------------------------------- */
+/* 8192C EFUSE */
+/* ----------------------------------------------------- */
+#define HWSET_MAX_SIZE 256
+#define EFUSE_MAX_SECTION 32
+#define EFUSE_REAL_CONTENT_LEN 512
+
+/* ----------------------------------------------------- */
+/* 8192C EEPROM/EFUSE share register definition. */
+/* ----------------------------------------------------- */
+#define EEPROM_DEFAULT_TSSI 0x0
+#define EEPROM_DEFAULT_CRYSTALCAP 0x0
+#define EEPROM_DEFAULT_THERMALMETER 0x12
+
+#define EEPROM_DEFAULT_TXPOWERLEVEL_2G 0x2C
+#define EEPROM_DEFAULT_TXPOWERLEVEL_5G 0x22
+
+#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
+/* HT20<->40 default Tx Power Index Difference */
+#define EEPROM_DEFAULT_HT20_DIFF 2
+/* OFDM Tx Power index diff */
+#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x4
+#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
+#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
+
+#define EEPROM_CHANNEL_PLAN_FCC 0x0
+#define EEPROM_CHANNEL_PLAN_IC 0x1
+#define EEPROM_CHANNEL_PLAN_ETSI 0x2
+#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
+#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
+#define EEPROM_CHANNEL_PLAN_MKK 0x5
+#define EEPROM_CHANNEL_PLAN_MKK1 0x6
+#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
+#define EEPROM_CHANNEL_PLAN_TELEC 0x8
+#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
+#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
+#define EEPROM_CHANNEL_PLAN_NCC 0xB
+#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_TOSHIBA 0x4
+#define EEPROM_CID_CCX 0x10
+#define EEPROM_CID_QMI 0x0D
+#define EEPROM_CID_WHQL 0xFE
+
+
+#define RTL8192_EEPROM_ID 0x8129
+#define EEPROM_WAPI_SUPPORT 0x78
+
+
+#define RTL8190_EEPROM_ID 0x8129 /* 0-1 */
+#define EEPROM_HPON 0x02 /* LDO settings.2-5 */
+#define EEPROM_CLK 0x06 /* Clock settings.6-7 */
+#define EEPROM_MAC_FUNCTION 0x08 /* SE Test mode.8 */
+
+#define EEPROM_VID 0x28 /* SE Vendor ID.A-B */
+#define EEPROM_DID 0x2A /* SE Device ID. C-D */
+#define EEPROM_SVID 0x2C /* SE Vendor ID.E-F */
+#define EEPROM_SMID 0x2E /* SE PCI Subsystem ID. 10-11 */
+
+#define EEPROM_MAC_ADDR 0x16 /* SEMAC Address. 12-17 */
+#define EEPROM_MAC_ADDR_MAC0_92D 0x55
+#define EEPROM_MAC_ADDR_MAC1_92D 0x5B
+
+/* 2.4G band Tx power index setting */
+#define EEPROM_CCK_TX_PWR_INX_2G 0x61
+#define EEPROM_HT40_1S_TX_PWR_INX_2G 0x67
+#define EEPROM_HT40_2S_TX_PWR_INX_DIFF_2G 0x6D
+#define EEPROM_HT20_TX_PWR_INX_DIFF_2G 0x70
+#define EEPROM_OFDM_TX_PWR_INX_DIFF_2G 0x73
+#define EEPROM_HT40_MAX_PWR_OFFSET_2G 0x76
+#define EEPROM_HT20_MAX_PWR_OFFSET_2G 0x79
+
+/*5GL channel 32-64 */
+#define EEPROM_HT40_1S_TX_PWR_INX_5GL 0x7C
+#define EEPROM_HT40_2S_TX_PWR_INX_DIFF_5GL 0x82
+#define EEPROM_HT20_TX_PWR_INX_DIFF_5GL 0x85
+#define EEPROM_OFDM_TX_PWR_INX_DIFF_5GL 0x88
+#define EEPROM_HT40_MAX_PWR_OFFSET_5GL 0x8B
+#define EEPROM_HT20_MAX_PWR_OFFSET_5GL 0x8E
+
+/* 5GM channel 100-140 */
+#define EEPROM_HT40_1S_TX_PWR_INX_5GM 0x91
+#define EEPROM_HT40_2S_TX_PWR_INX_DIFF_5GM 0x97
+#define EEPROM_HT20_TX_PWR_INX_DIFF_5GM 0x9A
+#define EEPROM_OFDM_TX_PWR_INX_DIFF_5GM 0x9D
+#define EEPROM_HT40_MAX_PWR_OFFSET_5GM 0xA0
+#define EEPROM_HT20_MAX_PWR_OFFSET_5GM 0xA3
+
+/* 5GH channel 149-165 */
+#define EEPROM_HT40_1S_TX_PWR_INX_5GH 0xA6
+#define EEPROM_HT40_2S_TX_PWR_INX_DIFF_5GH 0xAC
+#define EEPROM_HT20_TX_PWR_INX_DIFF_5GH 0xAF
+#define EEPROM_OFDM_TX_PWR_INX_DIFF_5GH 0xB2
+#define EEPROM_HT40_MAX_PWR_OFFSET_5GH 0xB5
+#define EEPROM_HT20_MAX_PWR_OFFSET_5GH 0xB8
+
+/* Map of supported channels. */
+#define EEPROM_CHANNEL_PLAN 0xBB
+#define EEPROM_IQK_DELTA 0xBC
+#define EEPROM_LCK_DELTA 0xBC
+#define EEPROM_XTAL_K 0xBD /* [7:5] */
+#define EEPROM_TSSI_A_5G 0xBE
+#define EEPROM_TSSI_B_5G 0xBF
+#define EEPROM_TSSI_AB_5G 0xC0
+#define EEPROM_THERMAL_METER 0xC3 /* [4:0] */
+#define EEPROM_RF_OPT1 0xC4
+#define EEPROM_RF_OPT2 0xC5
+#define EEPROM_RF_OPT3 0xC6
+#define EEPROM_RF_OPT4 0xC7
+#define EEPROM_RF_OPT5 0xC8
+#define EEPROM_RF_OPT6 0xC9
+#define EEPROM_VERSION 0xCA
+#define EEPROM_CUSTOMER_ID 0xCB
+#define EEPROM_RF_OPT7 0xCC
+
+#define EEPROM_DEF_PART_NO 0x3FD /* Byte */
+#define EEPROME_CHIP_VERSION_L 0x3FF
+#define EEPROME_CHIP_VERSION_H 0x3FE
+
+/*
+ * Current IOREG MAP
+ * 0x0000h ~ 0x00FFh System Configuration (256 Bytes)
+ * 0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
+ * 0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
+ * 0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
+ * 0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
+ * 0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
+ * 0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
+ * 0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
+ * 0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
+ */
+
+/* ----------------------------------------------------- */
+/* 8192C (RCR) (Offset 0x608, 32 bits) */
+/* ----------------------------------------------------- */
+#define RCR_APPFCS BIT(31)
+#define RCR_APP_MIC BIT(30)
+#define RCR_APP_ICV BIT(29)
+#define RCR_APP_PHYST_RXFF BIT(28)
+#define RCR_APP_BA_SSN BIT(27)
+#define RCR_ENMBID BIT(24)
+#define RCR_LSIGEN BIT(23)
+#define RCR_MFBEN BIT(22)
+#define RCR_HTC_LOC_CTRL BIT(14)
+#define RCR_AMF BIT(13)
+#define RCR_ACF BIT(12)
+#define RCR_ADF BIT(11)
+#define RCR_AICV BIT(9)
+#define RCR_ACRC32 BIT(8)
+#define RCR_CBSSID_BCN BIT(7)
+#define RCR_CBSSID_DATA BIT(6)
+#define RCR_APWRMGT BIT(5)
+#define RCR_ADD3 BIT(4)
+#define RCR_AB BIT(3)
+#define RCR_AM BIT(2)
+#define RCR_APM BIT(1)
+#define RCR_AAP BIT(0)
+#define RCR_MXDMA_OFFSET 8
+#define RCR_FIFO_OFFSET 13
+
+/* ----------------------------------------------------- */
+/* 8192C Regsiter Bit and Content definition */
+/* ----------------------------------------------------- */
+/* ----------------------------------------------------- */
+/* 0x0000h ~ 0x00FFh System Configuration */
+/* ----------------------------------------------------- */
+
+/* SPS0_CTRL */
+#define SW18_FPWM BIT(3)
+
+
+/* SYS_ISO_CTRL */
+#define ISO_MD2PP BIT(0)
+#define ISO_UA2USB BIT(1)
+#define ISO_UD2CORE BIT(2)
+#define ISO_PA2PCIE BIT(3)
+#define ISO_PD2CORE BIT(4)
+#define ISO_IP2MAC BIT(5)
+#define ISO_DIOP BIT(6)
+#define ISO_DIOE BIT(7)
+#define ISO_EB2CORE BIT(8)
+#define ISO_DIOR BIT(9)
+
+#define PWC_EV25V BIT(14)
+#define PWC_EV12V BIT(15)
+
+
+/* SYS_FUNC_EN */
+#define FEN_BBRSTB BIT(0)
+#define FEN_BB_GLB_RSTn BIT(1)
+#define FEN_USBA BIT(2)
+#define FEN_UPLL BIT(3)
+#define FEN_USBD BIT(4)
+#define FEN_DIO_PCIE BIT(5)
+#define FEN_PCIEA BIT(6)
+#define FEN_PPLL BIT(7)
+#define FEN_PCIED BIT(8)
+#define FEN_DIOE BIT(9)
+#define FEN_CPUEN BIT(10)
+#define FEN_DCORE BIT(11)
+#define FEN_ELDR BIT(12)
+#define FEN_DIO_RF BIT(13)
+#define FEN_HWPDN BIT(14)
+#define FEN_MREGEN BIT(15)
+
+/* APS_FSMCO */
+#define PFM_LDALL BIT(0)
+#define PFM_ALDN BIT(1)
+#define PFM_LDKP BIT(2)
+#define PFM_WOWL BIT(3)
+#define EnPDN BIT(4)
+#define PDN_PL BIT(5)
+#define APFM_ONMAC BIT(8)
+#define APFM_OFF BIT(9)
+#define APFM_RSM BIT(10)
+#define AFSM_HSUS BIT(11)
+#define AFSM_PCIE BIT(12)
+#define APDM_MAC BIT(13)
+#define APDM_HOST BIT(14)
+#define APDM_HPDN BIT(15)
+#define RDY_MACON BIT(16)
+#define SUS_HOST BIT(17)
+#define ROP_ALD BIT(20)
+#define ROP_PWR BIT(21)
+#define ROP_SPS BIT(22)
+#define SOP_MRST BIT(25)
+#define SOP_FUSE BIT(26)
+#define SOP_ABG BIT(27)
+#define SOP_AMB BIT(28)
+#define SOP_RCK BIT(29)
+#define SOP_A8M BIT(30)
+#define XOP_BTCK BIT(31)
+
+/* SYS_CLKR */
+#define ANAD16V_EN BIT(0)
+#define ANA8M BIT(1)
+#define MACSLP BIT(4)
+#define LOADER_CLK_EN BIT(5)
+#define _80M_SSC_DIS BIT(7)
+#define _80M_SSC_EN_HO BIT(8)
+#define PHY_SSC_RSTB BIT(9)
+#define SEC_CLK_EN BIT(10)
+#define MAC_CLK_EN BIT(11)
+#define SYS_CLK_EN BIT(12)
+#define RING_CLK_EN BIT(13)
+
+
+/* 9346CR */
+#define BOOT_FROM_EEPROM BIT(4)
+#define EEPROM_EN BIT(5)
+
+/* AFE_MISC */
+#define AFE_BGEN BIT(0)
+#define AFE_MBEN BIT(1)
+#define MAC_ID_EN BIT(7)
+
+/* RSV_CTRL */
+#define WLOCK_ALL BIT(0)
+#define WLOCK_00 BIT(1)
+#define WLOCK_04 BIT(2)
+#define WLOCK_08 BIT(3)
+#define WLOCK_40 BIT(4)
+#define R_DIS_PRST_0 BIT(5)
+#define R_DIS_PRST_1 BIT(6)
+#define LOCK_ALL_EN BIT(7)
+
+/* RF_CTRL */
+#define RF_EN BIT(0)
+#define RF_RSTB BIT(1)
+#define RF_SDMRSTB BIT(2)
+
+
+
+/* LDOA15_CTRL */
+#define LDA15_EN BIT(0)
+#define LDA15_STBY BIT(1)
+#define LDA15_OBUF BIT(2)
+#define LDA15_REG_VOS BIT(3)
+#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
+
+
+
+/* LDOV12D_CTRL */
+#define LDV12_EN BIT(0)
+#define LDV12_SDBY BIT(1)
+#define LPLDO_HSM BIT(2)
+#define LPLDO_LSM_DIS BIT(3)
+#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
+
+
+/* AFE_XTAL_CTRL */
+#define XTAL_EN BIT(0)
+#define XTAL_BSEL BIT(1)
+#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
+#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
+#define XTAL_GATE_USB BIT(8)
+#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
+#define XTAL_GATE_AFE BIT(11)
+#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
+#define XTAL_RF_GATE BIT(14)
+#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
+#define XTAL_GATE_DIG BIT(17)
+#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
+#define XTAL_BT_GATE BIT(20)
+#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
+#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
+
+
+#define CKDLY_AFE BIT(26)
+#define CKDLY_USB BIT(27)
+#define CKDLY_DIG BIT(28)
+#define CKDLY_BT BIT(29)
+
+
+/* AFE_PLL_CTRL */
+#define APLL_EN BIT(0)
+#define APLL_320_EN BIT(1)
+#define APLL_FREF_SEL BIT(2)
+#define APLL_EDGE_SEL BIT(3)
+#define APLL_WDOGB BIT(4)
+#define APLL_LPFEN BIT(5)
+
+#define APLL_REF_CLK_13MHZ 0x1
+#define APLL_REF_CLK_19_2MHZ 0x2
+#define APLL_REF_CLK_20MHZ 0x3
+#define APLL_REF_CLK_25MHZ 0x4
+#define APLL_REF_CLK_26MHZ 0x5
+#define APLL_REF_CLK_38_4MHZ 0x6
+#define APLL_REF_CLK_40MHZ 0x7
+
+#define APLL_320EN BIT(14)
+#define APLL_80EN BIT(15)
+#define APLL_1MEN BIT(24)
+
+
+/* EFUSE_CTRL */
+#define ALD_EN BIT(18)
+#define EF_PD BIT(19)
+#define EF_FLAG BIT(31)
+
+/* EFUSE_TEST */
+#define EF_TRPT BIT(7)
+#define LDOE25_EN BIT(31)
+
+/* MCUFWDL */
+#define MCUFWDL_EN BIT(0)
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_ChkSum_rpt BIT(2)
+#define MACINI_RDY BIT(3)
+#define BBINI_RDY BIT(4)
+#define RFINI_RDY BIT(5)
+#define WINTINI_RDY BIT(6)
+#define MAC1_WINTINI_RDY BIT(11)
+#define CPRST BIT(23)
+
+/* REG_SYS_CFG */
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define TRP_B15V_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23)
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+/* LLT_INIT */
+#define _LLT_NO_ACTIVE 0x0
+#define _LLT_WRITE_ACCESS 0x1
+#define _LLT_READ_ACCESS 0x2
+
+#define _LLT_INIT_DATA(x) ((x) & 0xFF)
+#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
+#define _LLT_OP(x) (((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
+
+
+/* ----------------------------------------------------- */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* ----------------------------------------------------- */
+#define RETRY_LIMIT_SHORT_SHIFT 8
+#define RETRY_LIMIT_LONG_SHIFT 0
+
+
+/* ----------------------------------------------------- */
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+/* ----------------------------------------------------- */
+/* EDCA setting */
+#define AC_PARAM_TXOP_LIMIT_OFFSET 16
+#define AC_PARAM_ECW_MAX_OFFSET 12
+#define AC_PARAM_ECW_MIN_OFFSET 8
+#define AC_PARAM_AIFS_OFFSET 0
+
+/* ACMHWCTRL */
+#define ACMHW_HWEN BIT(0)
+#define ACMHW_BEQEN BIT(1)
+#define ACMHW_VIQEN BIT(2)
+#define ACMHW_VOQEN BIT(3)
+
+/* ----------------------------------------------------- */
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* ----------------------------------------------------- */
+
+/* TCR */
+#define TSFRST BIT(0)
+#define DIS_GCLK BIT(1)
+#define PAD_SEL BIT(2)
+#define PWR_ST BIT(6)
+#define PWRBIT_OW_EN BIT(7)
+#define ACRC BIT(8)
+#define CFENDFORM BIT(9)
+#define ICV BIT(10)
+
+/* SECCFG */
+#define SCR_TXUSEDK BIT(0)
+#define SCR_RXUSEDK BIT(1)
+#define SCR_TXENCENABLE BIT(2)
+#define SCR_RXENCENABLE BIT(3)
+#define SCR_SKBYA2 BIT(4)
+#define SCR_NOSKMC BIT(5)
+#define SCR_TXBCUSEDK BIT(6)
+#define SCR_RXBCUSEDK BIT(7)
+
+/* General definitions */
+#define MAC_ADDR_LEN 6
+#define LAST_ENTRY_OF_TX_PKT_BUFFER 255
+#define LAST_ENTRY_OF_TX_PKT_BUFFER_DUAL_MAC 127
+
+#define POLLING_LLT_THRESHOLD 20
+#define POLLING_READY_TIMEOUT_COUNT 1000
+
+/* Min Spacing related settings. */
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+
+/* BB-PHY register PMAC 0x100 PHY 0x800 - 0xEFF */
+/* 1. PMAC duplicate register due to connection: */
+/* RF_Mode, TRxRN, NumOf L-STF */
+/* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */
+/* 3. RF register 0x00-2E */
+/* 4. Bit Mask for BB/RF register */
+/* 5. Other defintion for BB/RF R/W */
+
+/* 3. Page8(0x800) */
+#define RFPGA0_RFMOD 0x800
+
+#define RFPGA0_TXINFO 0x804
+#define RFPGA0_PSDFUNCTION 0x808
+
+#define RFPGA0_TXGAINSTAGE 0x80c
+
+#define RFPGA0_RFTIMING1 0x810
+#define RFPGA0_RFTIMING2 0x814
+
+#define RFPGA0_XA_HSSIPARAMETER1 0x820
+#define RFPGA0_XA_HSSIPARAMETER2 0x824
+#define RFPGA0_XB_HSSIPARAMETER1 0x828
+#define RFPGA0_XB_HSSIPARAMETER2 0x82c
+
+#define RFPGA0_XA_LSSIPARAMETER 0x840
+#define RFPGA0_XB_LSSIPARAMETER 0x844
+
+#define RFPGA0_RFWAkEUPPARAMETER 0x850
+#define RFPGA0_RFSLEEPUPPARAMETER 0x854
+
+#define RFPGA0_XAB_SWITCHCONTROL 0x858
+#define RFPGA0_XCD_SWITCHCONTROL 0x85c
+
+#define RFPGA0_XA_RFINTERFACEOE 0x860
+#define RFPGA0_XB_RFINTERFACEOE 0x864
+
+#define RFPGA0_XAB_RFINTERFACESW 0x870
+#define RFPGA0_XCD_RFINTERFACESW 0x874
+
+#define RFPGA0_XAB_RFPARAMETER 0x878
+#define RFPGA0_XCD_RFPARAMETER 0x87c
+
+#define RFPGA0_ANALOGPARAMETER1 0x880
+#define RFPGA0_ANALOGPARAMETER2 0x884
+#define RFPGA0_ANALOGPARAMETER3 0x888
+#define RFPGA0_ADDALLOCKEN 0x888
+#define RFPGA0_ANALOGPARAMETER4 0x88c
+
+#define RFPGA0_XA_LSSIREADBACK 0x8a0
+#define RFPGA0_XB_LSSIREADBACK 0x8a4
+#define RFPGA0_XC_LSSIREADBACK 0x8a8
+#define RFPGA0_XD_LSSIREADBACK 0x8ac
+
+#define RFPGA0_PSDREPORT 0x8b4
+#define TRANSCEIVERA_HSPI_READBACK 0x8b8
+#define TRANSCEIVERB_HSPI_READBACK 0x8bc
+#define RFPGA0_XAB_RFINTERFACERB 0x8e0
+#define RFPGA0_XCD_RFINTERFACERB 0x8e4
+
+/* 4. Page9(0x900) */
+#define RFPGA1_RFMOD 0x900
+
+#define RFPGA1_TXBLOCK 0x904
+#define RFPGA1_DEBUGSELECT 0x908
+#define RFPGA1_TXINFO 0x90c
+
+/* 5. PageA(0xA00) */
+#define RCCK0_SYSTEM 0xa00
+
+#define RCCK0_AFESSTTING 0xa04
+#define RCCK0_CCA 0xa08
+
+#define RCCK0_RXAGC1 0xa0c
+#define RCCK0_RXAGC2 0xa10
+
+#define RCCK0_RXHP 0xa14
+
+#define RCCK0_DSPPARAMETER1 0xa18
+#define RCCK0_DSPPARAMETER2 0xa1c
+
+#define RCCK0_TXFILTER1 0xa20
+#define RCCK0_TXFILTER2 0xa24
+#define RCCK0_DEBUGPORT 0xa28
+#define RCCK0_FALSEALARMREPORT 0xa2c
+#define RCCK0_TRSSIREPORT 0xa50
+#define RCCK0_RXREPORT 0xa54
+#define RCCK0_FACOUNTERLOWER 0xa5c
+#define RCCK0_FACOUNTERUPPER 0xa58
+
+/* 6. PageC(0xC00) */
+#define ROFDM0_LSTF 0xc00
+
+#define ROFDM0_TRXPATHENABLE 0xc04
+#define ROFDM0_TRMUXPAR 0xc08
+#define ROFDM0_TRSWISOLATION 0xc0c
+
+#define ROFDM0_XARXAFE 0xc10
+#define ROFDM0_XARXIQIMBALANCE 0xc14
+#define ROFDM0_XBRXAFE 0xc18
+#define ROFDM0_XBRXIQIMBALANCE 0xc1c
+#define ROFDM0_XCRXAFE 0xc20
+#define ROFDM0_XCRXIQIMBALANCE 0xc24
+#define ROFDM0_XDRXAFE 0xc28
+#define ROFDM0_XDRXIQIMBALANCE 0xc2c
+
+#define ROFDM0_RXDETECTOR1 0xc30
+#define ROFDM0_RXDETECTOR2 0xc34
+#define ROFDM0_RXDETECTOR3 0xc38
+#define ROFDM0_RXDETECTOR4 0xc3c
+
+#define ROFDM0_RXDSP 0xc40
+#define ROFDM0_CFOANDDAGC 0xc44
+#define ROFDM0_CCADROPTHRESHOLD 0xc48
+#define ROFDM0_ECCATHRESHOLD 0xc4c
+
+#define ROFDM0_XAAGCCORE1 0xc50
+#define ROFDM0_XAAGCCORE2 0xc54
+#define ROFDM0_XBAGCCORE1 0xc58
+#define ROFDM0_XBAGCCORE2 0xc5c
+#define ROFDM0_XCAGCCORE1 0xc60
+#define ROFDM0_XCAGCCORE2 0xc64
+#define ROFDM0_XDAGCCORE1 0xc68
+#define ROFDM0_XDAGCCORE2 0xc6c
+
+#define ROFDM0_AGCPARAMETER1 0xc70
+#define ROFDM0_AGCPARAMETER2 0xc74
+#define ROFDM0_AGCRSSITABLE 0xc78
+#define ROFDM0_HTSTFAGC 0xc7c
+
+#define ROFDM0_XATxIQIMBALANCE 0xc80
+#define ROFDM0_XATxAFE 0xc84
+#define ROFDM0_XBTxIQIMBALANCE 0xc88
+#define ROFDM0_XBTxAFE 0xc8c
+#define ROFDM0_XCTxIQIMBALANCE 0xc90
+#define ROFDM0_XCTxAFE 0xc94
+#define ROFDM0_XDTxIQIMBALANCE 0xc98
+#define ROFDM0_XDTxAFE 0xc9c
+
+#define ROFDM0_RXHPPARAMETER 0xce0
+#define ROFDM0_TXPSEUDONOISEWGT 0xce4
+#define ROFDM0_FRAMESYNC 0xcf0
+#define ROFDM0_DFSREPORT 0xcf4
+#define ROFDM0_TXCOEFF1 0xca4
+#define ROFDM0_TXCOEFF2 0xca8
+#define ROFDM0_TXCOEFF3 0xcac
+#define ROFDM0_TXCOEFF4 0xcb0
+#define ROFDM0_TXCOEFF5 0xcb4
+#define ROFDM0_TXCOEFF6 0xcb8
+
+/* 7. PageD(0xD00) */
+#define ROFDM1_LSTF 0xd00
+#define ROFDM1_TRXPATHENABLE 0xd04
+
+#define ROFDM1_CFO 0xd08
+#define ROFDM1_CSI1 0xd10
+#define ROFDM1_SBD 0xd14
+#define ROFDM1_CSI2 0xd18
+#define ROFDM1_CFOTRACKING 0xd2c
+#define ROFDM1_TRXMESAURE1 0xd34
+#define ROFDM1_INTFDET 0xd3c
+#define ROFDM1_PSEUDONOISESTATEAB 0xd50
+#define ROFDM1_PSEUDONOISESTATECD 0xd54
+#define ROFDM1_RXPSEUDONOISEWGT 0xd58
+
+#define ROFDM_PHYCOUNTER1 0xda0
+#define ROFDM_PHYCOUNTER2 0xda4
+#define ROFDM_PHYCOUNTER3 0xda8
+
+#define ROFDM_SHORTCFOAB 0xdac
+#define ROFDM_SHORTCFOCD 0xdb0
+#define ROFDM_LONGCFOAB 0xdb4
+#define ROFDM_LONGCFOCD 0xdb8
+#define ROFDM_TAILCFOAB 0xdbc
+#define ROFDM_TAILCFOCD 0xdc0
+#define ROFDM_PWMEASURE1 0xdc4
+#define ROFDM_PWMEASURE2 0xdc8
+#define ROFDM_BWREPORT 0xdcc
+#define ROFDM_AGCREPORT 0xdd0
+#define ROFDM_RXSNR 0xdd4
+#define ROFDM_RXEVMCSI 0xdd8
+#define ROFDM_SIGReport 0xddc
+
+/* 8. PageE(0xE00) */
+#define RTXAGC_A_RATE18_06 0xe00
+#define RTXAGC_A_RATE54_24 0xe04
+#define RTXAGC_A_CCK1_MCS32 0xe08
+#define RTXAGC_A_MCS03_MCS00 0xe10
+#define RTXAGC_A_MCS07_MCS04 0xe14
+#define RTXAGC_A_MCS11_MCS08 0xe18
+#define RTXAGC_A_MCS15_MCS12 0xe1c
+
+#define RTXAGC_B_RATE18_06 0x830
+#define RTXAGC_B_RATE54_24 0x834
+#define RTXAGC_B_CCK1_55_MCS32 0x838
+#define RTXAGC_B_MCS03_MCS00 0x83c
+#define RTXAGC_B_MCS07_MCS04 0x848
+#define RTXAGC_B_MCS11_MCS08 0x84c
+#define RTXAGC_B_MCS15_MCS12 0x868
+#define RTXAGC_B_CCK11_A_CCK2_11 0x86c
+
+/* RL6052 Register definition */
+#define RF_AC 0x00
+
+#define RF_IQADJ_G1 0x01
+#define RF_IQADJ_G2 0x02
+#define RF_POW_TRSW 0x05
+
+#define RF_GAIN_RX 0x06
+#define RF_GAIN_TX 0x07
+
+#define RF_TXM_IDAC 0x08
+#define RF_BS_IQGEN 0x0F
+
+#define RF_MODE1 0x10
+#define RF_MODE2 0x11
+
+#define RF_RX_AGC_HP 0x12
+#define RF_TX_AGC 0x13
+#define RF_BIAS 0x14
+#define RF_IPA 0x15
+#define RF_POW_ABILITY 0x17
+#define RF_MODE_AG 0x18
+#define rRfChannel 0x18
+#define RF_CHNLBW 0x18
+#define RF_TOP 0x19
+
+#define RF_RX_G1 0x1A
+#define RF_RX_G2 0x1B
+
+#define RF_RX_BB2 0x1C
+#define RF_RX_BB1 0x1D
+
+#define RF_RCK1 0x1E
+#define RF_RCK2 0x1F
+
+#define RF_TX_G1 0x20
+#define RF_TX_G2 0x21
+#define RF_TX_G3 0x22
+
+#define RF_TX_BB1 0x23
+
+#define RF_T_METER 0x42
+
+#define RF_SYN_G1 0x25
+#define RF_SYN_G2 0x26
+#define RF_SYN_G3 0x27
+#define RF_SYN_G4 0x28
+#define RF_SYN_G5 0x29
+#define RF_SYN_G6 0x2A
+#define RF_SYN_G7 0x2B
+#define RF_SYN_G8 0x2C
+
+#define RF_RCK_OS 0x30
+
+#define RF_TXPA_G1 0x31
+#define RF_TXPA_G2 0x32
+#define RF_TXPA_G3 0x33
+
+/* Bit Mask */
+
+/* 2. Page8(0x800) */
+#define BRFMOD 0x1
+#define BCCKTXSC 0x30
+#define BCCKEN 0x1000000
+#define BOFDMEN 0x2000000
+
+#define B3WIREDATALENGTH 0x800
+#define B3WIREADDRESSLENGTH 0x400
+
+#define BRFSI_RFENV 0x10
+
+#define BLSSIREADADDRESS 0x7f800000
+#define BLSSIREADEDGE 0x80000000
+#define BLSSIREADBACKDATA 0xfffff
+/* 4. PageA(0xA00) */
+#define BCCKSIDEBAND 0x10
+
+/* Other Definition */
+#define BBYTE0 0x1
+#define BBYTE1 0x2
+#define BBYTE2 0x4
+#define BBYTE3 0x8
+#define BWORD0 0x3
+#define BWORD1 0xc
+#define BDWORD 0xf
+
+#define BMASKBYTE0 0xff
+#define BMASKBYTE1 0xff00
+#define BMASKBYTE2 0xff0000
+#define BMASKBYTE3 0xff000000
+#define BMASKHWORD 0xffff0000
+#define BMASKLWORD 0x0000ffff
+#define BMASKDWORD 0xffffffff
+#define BMASK12BITS 0xfff
+#define BMASKH4BITS 0xf0000000
+#define BMASKOFDM_D 0xffc00000
+#define BMASKCCK 0x3f3f3f3f
+
+#define BRFREGOFFSETMASK 0xfffff
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
new file mode 100644
index 00000000000..db27cebaac2
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
@@ -0,0 +1,628 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+#include "hw.h"
+
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 rfpath;
+
+ switch (bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] = ((rtlphy->rfreg_chnlval
+ [rfpath] & 0xfffff3ff) | 0x0400);
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) |
+ BIT(11), 0x01);
+
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
+ ("20M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]));
+ }
+
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] =
+ ((rtlphy->rfreg_chnlval[rfpath] & 0xfffff3ff));
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) | BIT(11),
+ 0x00);
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
+ ("40M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]));
+ }
+ break;
+ default:
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("unknown bandwidth: %#X\n", bandwidth));
+ break;
+ }
+}
+
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 tx_agc[2] = {0, 0}, tmpval;
+ bool turbo_scanoff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+
+ if (rtlefuse->eeprom_regulatory != 0)
+ turbo_scanoff = true;
+ if (mac->act_scanning) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+ if (turbo_scanoff) {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ }
+ } else {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ if (rtlefuse->eeprom_regulatory == 0) {
+ tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][6]) +
+ (rtlphy->mcs_txpwrlevel_origoffset[0][7] << 8);
+ tx_agc[RF90_PATH_A] += tmpval;
+ tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) +
+ (rtlphy->mcs_txpwrlevel_origoffset[0][15] << 24);
+ tx_agc[RF90_PATH_B] += tmpval;
+ }
+ }
+
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ ptr = (u8 *) (&(tx_agc[idx1]));
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+
+ tmpval = tx_agc[RF90_PATH_A] & 0xff;
+ rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, BMASKBYTE1, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_A_CCK1_MCS32));
+ tmpval = tx_agc[RF90_PATH_A] >> 8;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11));
+ tmpval = tx_agc[RF90_PATH_B] >> 24;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, BMASKBYTE0, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK11_A_CCK2_11));
+ tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+ RTXAGC_B_CCK1_55_MCS32));
+}
+
+static void _rtl92d_phy_get_power_base(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel,
+ u32 *ofdmbase, u32 *mcsbase)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u32 powerbase0, powerbase1;
+ u8 legacy_pwrdiff, ht20_pwrdiff;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerlevel[i] = ppowerlevel[i];
+ legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
+ powerbase0 = powerlevel[i] + legacy_pwrdiff;
+ powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
+ (powerbase0 << 8) | powerbase0;
+ *(ofdmbase + i) = powerbase0;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [OFDM power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+ ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
+ powerlevel[i] += ht20_pwrdiff;
+ }
+ powerbase1 = powerlevel[i];
+ powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) |
+ (powerbase1 << 8) | powerbase1;
+ *(mcsbase + i) = powerbase1;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ (" [MCS power base index rf(%c) = 0x%x]\n",
+ ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
+ }
+}
+
+static u8 _rtl92d_phy_get_chnlgroup_bypg(u8 chnlindex)
+{
+ u8 group;
+ u8 channel_info[59] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
+ 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
+ 114, 116, 118, 120, 122, 124, 126, 128, 130, 132,
+ 134, 136, 138, 140, 149, 151, 153, 155, 157, 159,
+ 161, 163, 165
+ };
+
+ if (channel_info[chnlindex] <= 3) /* Chanel 1-3 */
+ group = 0;
+ else if (channel_info[chnlindex] <= 9) /* Channel 4-9 */
+ group = 1;
+ else if (channel_info[chnlindex] <= 14) /* Channel 10-14 */
+ group = 2;
+ else if (channel_info[chnlindex] <= 64)
+ group = 6;
+ else if (channel_info[chnlindex] <= 140)
+ group = 7;
+ else
+ group = 8;
+ return group;
+}
+
+static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ u8 channel, u8 index,
+ u32 *powerbase0,
+ u32 *powerbase1,
+ u32 *p_outwriteval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 i, chnlgroup = 0, pwr_diff_limit[4];
+ u32 writeval = 0, customer_limit, rf;
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (rtlefuse->eeprom_regulatory) {
+ case 0:
+ chnlgroup = 0;
+ writeval = rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index +
+ (rf ? 8 : 0)] + ((index < 2) ?
+ powerbase0[rf] :
+ powerbase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("RTK better "
+ "performance, writeval(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval));
+ break;
+ case 1:
+ if (rtlphy->pwrgroup_cnt == 1)
+ chnlgroup = 0;
+ if (rtlphy->pwrgroup_cnt >= MAX_PG_GROUP) {
+ chnlgroup = _rtl92d_phy_get_chnlgroup_bypg(
+ channel - 1);
+ if (rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20)
+ chnlgroup++;
+ else
+ chnlgroup += 4;
+ writeval = rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index +
+ (rf ? 8 : 0)] + ((index < 2) ?
+ powerbase0[rf] :
+ powerbase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Realtek regulatory, "
+ "20MHz, writeval(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'),
+ writeval));
+ }
+ break;
+ case 2:
+ writeval = ((index < 2) ? powerbase0[rf] :
+ powerbase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("Better regulatory, "
+ "writeval(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval));
+ break;
+ case 3:
+ chnlgroup = 0;
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 40MHz rf(%c) = "
+ "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht40[rf]
+ [channel - 1]));
+ } else {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("customer's limit, 20MHz rf(%c) = "
+ "0x%x\n", ((rf == 0) ? 'A' : 'B'),
+ rtlefuse->pwrgroup_ht20[rf]
+ [channel - 1]));
+ }
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] =
+ (u8)((rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index + (rf ? 8 : 0)] &
+ (0x7f << (i * 8))) >> (i * 8));
+ if (rtlphy->current_chan_bw ==
+ HT_CHANNEL_WIDTH_20_40) {
+ if (pwr_diff_limit[i] >
+ rtlefuse->pwrgroup_ht40[rf]
+ [channel - 1])
+ pwr_diff_limit[i] =
+ rtlefuse->pwrgroup_ht40
+ [rf][channel - 1];
+ } else {
+ if (pwr_diff_limit[i] >
+ rtlefuse->pwrgroup_ht20[rf][
+ channel - 1])
+ pwr_diff_limit[i] =
+ rtlefuse->pwrgroup_ht20[rf]
+ [channel - 1];
+ }
+ }
+ customer_limit = (pwr_diff_limit[3] << 24) |
+ (pwr_diff_limit[2] << 16) |
+ (pwr_diff_limit[1] << 8) |
+ (pwr_diff_limit[0]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer's limit rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), customer_limit));
+ writeval = customer_limit + ((index < 2) ?
+ powerbase0[rf] : powerbase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Customer, writeval rf(%c)= 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval));
+ break;
+ default:
+ chnlgroup = 0;
+ writeval = rtlphy->mcs_txpwrlevel_origoffset
+ [chnlgroup][index +
+ (rf ? 8 : 0)] + ((index < 2) ?
+ powerbase0[rf] : powerbase1[rf]);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("RTK better performance, writeval "
+ "rf(%c) = 0x%x\n",
+ ((rf == 0) ? 'A' : 'B'), writeval));
+ break;
+ }
+ *(p_outwriteval + rf) = writeval;
+ }
+}
+
+static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ u8 index, u32 *pvalue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ static u16 regoffset_a[6] = {
+ RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+ RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+ RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+ };
+ static u16 regoffset_b[6] = {
+ RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+ RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+ RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+ };
+ u8 i, rf, pwr_val[4];
+ u32 writeval;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeval = pvalue[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8) ((writeval & (0x7f <<
+ (i * 8))) >> (i * 8));
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ (pwr_val[1] << 8) | pwr_val[0];
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+ rtl_set_bbreg(hw, regoffset, BMASKDWORD, writeval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ ("Set 0x%x = %08x\n", regoffset, writeval));
+ if (((get_rf_type(rtlphy) == RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS15_MCS12)) ||
+ ((get_rf_type(rtlphy) != RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS07_MCS04 ||
+ regoffset == RTXAGC_B_MCS07_MCS04))) {
+ writeval = pwr_val[3];
+ if (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_A_MCS07_MCS04)
+ regoffset = 0xc90;
+ if (regoffset == RTXAGC_B_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS07_MCS04)
+ regoffset = 0xc98;
+ for (i = 0; i < 3; i++) {
+ if (i != 2)
+ writeval = (writeval > 8) ?
+ (writeval - 8) : 0;
+ else
+ writeval = (writeval > 6) ?
+ (writeval - 6) : 0;
+ rtl_write_byte(rtlpriv, (u32) (regoffset + i),
+ (u8) writeval);
+ }
+ }
+ }
+}
+
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel)
+{
+ u32 writeval[2], powerbase0[2], powerbase1[2];
+ u8 index;
+
+ _rtl92d_phy_get_power_base(hw, ppowerlevel, channel,
+ &powerbase0[0], &powerbase1[0]);
+ for (index = 0; index < 6; index++) {
+ _rtl92d_get_txpower_writeval_by_regulatory(hw,
+ channel, index, &powerbase0[0],
+ &powerbase1[0], &writeval[0]);
+ _rtl92d_write_ofdm_power_reg(hw, index, &writeval[0]);
+ }
+}
+
+bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u8 u1btmp;
+ u8 direct = bmac0 ? BIT(3) | BIT(2) : BIT(3);
+ u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0;
+ u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON;
+ bool bresult = true; /* true: need to enable BB/RF power */
+
+ rtlhal->during_mac0init_radiob = false;
+ rtlhal->during_mac1init_radioa = false;
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("===>\n"));
+ /* MAC0 Need PHY1 load radio_b.txt . Driver use DBI to write. */
+ u1btmp = rtl_read_byte(rtlpriv, mac_reg);
+ if (!(u1btmp & mac_on_bit)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("enable BB & RF\n"));
+ /* Enable BB and RF power */
+ rtl92de_write_dword_dbi(hw, REG_SYS_ISO_CTRL,
+ rtl92de_read_dword_dbi(hw, REG_SYS_ISO_CTRL, direct) |
+ BIT(29) | BIT(16) | BIT(17), direct);
+ } else {
+ /* We think if MAC1 is ON,then radio_a.txt
+ * and radio_b.txt has been load. */
+ bresult = false;
+ }
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("<===\n"));
+ return bresult;
+
+}
+
+void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u8 u1btmp;
+ u8 direct = bmac0 ? BIT(3) | BIT(2) : BIT(3);
+ u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0;
+ u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON;
+
+ rtlhal->during_mac0init_radiob = false;
+ rtlhal->during_mac1init_radioa = false;
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("====>\n"));
+ /* check MAC0 enable or not again now, if
+ * enabled, not power down radio A. */
+ u1btmp = rtl_read_byte(rtlpriv, mac_reg);
+ if (!(u1btmp & mac_on_bit)) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("power down\n"));
+ /* power down RF radio A according to YuNan's advice. */
+ rtl92de_write_dword_dbi(hw, RFPGA0_XA_LSSIPARAMETER,
+ 0x00000000, direct);
+ }
+ RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, ("<====\n"));
+}
+
+bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ bool rtstatus = true;
+ struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+ u32 u4_regvalue = 0;
+ u8 rfpath;
+ struct bb_reg_def *pphyreg;
+ bool mac1_initradioa_first = false, mac0_initradiob_first = false;
+ bool need_pwrdown_radioa = false, need_pwrdown_radiob = false;
+ bool true_bpath = false;
+
+ if (rtlphy->rf_type == RF_1T1R)
+ rtlphy->num_total_rfpath = 1;
+ else
+ rtlphy->num_total_rfpath = 2;
+
+ /* Single phy mode: use radio_a radio_b config path_A path_B */
+ /* seperately by MAC0, and MAC1 needn't configure RF; */
+ /* Dual PHY mode:MAC0 use radio_a config 1st phy path_A, */
+ /* MAC1 use radio_b config 2nd PHY path_A. */
+ /* DMDP,MAC0 on G band,MAC1 on A band. */
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G &&
+ rtlhal->interfaceindex == 0) {
+ /* MAC0 needs PHY1 load radio_b.txt.
+ * Driver use DBI to write. */
+ if (rtl92d_phy_enable_anotherphy(hw, true)) {
+ rtlphy->num_total_rfpath = 2;
+ mac0_initradiob_first = true;
+ } else {
+ /* We think if MAC1 is ON,then radio_a.txt and
+ * radio_b.txt has been load. */
+ return rtstatus;
+ }
+ } else if (rtlhal->current_bandtype == BAND_ON_5G &&
+ rtlhal->interfaceindex == 1) {
+ /* MAC1 needs PHY0 load radio_a.txt.
+ * Driver use DBI to write. */
+ if (rtl92d_phy_enable_anotherphy(hw, false)) {
+ rtlphy->num_total_rfpath = 2;
+ mac1_initradioa_first = true;
+ } else {
+ /* We think if MAC0 is ON,then radio_a.txt and
+ * radio_b.txt has been load. */
+ return rtstatus;
+ }
+ } else if (rtlhal->interfaceindex == 1) {
+ /* MAC0 enabled, only init radia B. */
+ true_bpath = true;
+ }
+ }
+
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ /* Mac1 use PHY0 write */
+ if (mac1_initradioa_first) {
+ if (rfpath == RF90_PATH_A) {
+ rtlhal->during_mac1init_radioa = true;
+ need_pwrdown_radioa = true;
+ } else if (rfpath == RF90_PATH_B) {
+ rtlhal->during_mac1init_radioa = false;
+ mac1_initradioa_first = false;
+ rfpath = RF90_PATH_A;
+ true_bpath = true;
+ rtlphy->num_total_rfpath = 1;
+ }
+ } else if (mac0_initradiob_first) {
+ /* Mac0 use PHY1 write */
+ if (rfpath == RF90_PATH_A)
+ rtlhal->during_mac0init_radiob = false;
+ if (rfpath == RF90_PATH_B) {
+ rtlhal->during_mac0init_radiob = true;
+ mac0_initradiob_first = false;
+ need_pwrdown_radiob = true;
+ rfpath = RF90_PATH_A;
+ true_bpath = true;
+ rtlphy->num_total_rfpath = 1;
+ }
+ }
+ pphyreg = &rtlphy->phyreg_def[rfpath];
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16);
+ break;
+ }
+ rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
+ /* Set bit number of Address and Data for RF register */
+ /* Set 1 to 4 bits for 8255 */
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+ B3WIREADDRESSLENGTH, 0x0);
+ udelay(1);
+ /* Set 0 to 12 bits for 8255 */
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
+ switch (rfpath) {
+ case RF90_PATH_A:
+ if (true_bpath)
+ rtstatus = rtl92d_phy_config_rf_with_headerfile(
+ hw, radiob_txt,
+ (enum radio_path)rfpath);
+ else
+ rtstatus = rtl92d_phy_config_rf_with_headerfile(
+ hw, radioa_txt,
+ (enum radio_path)rfpath);
+ break;
+ case RF90_PATH_B:
+ rtstatus =
+ rtl92d_phy_config_rf_with_headerfile(hw, radiob_txt,
+ (enum radio_path) rfpath);
+ break;
+ case RF90_PATH_C:
+ break;
+ case RF90_PATH_D:
+ break;
+ }
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV,
+ u4_regvalue);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16,
+ u4_regvalue);
+ break;
+ }
+ if (rtstatus != true) {
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ ("Radio[%d] Fail!!", rfpath));
+ goto phy_rf_cfg_fail;
+ }
+
+ }
+
+ /* check MAC0 enable or not again, if enabled,
+ * not power down radio A. */
+ /* check MAC1 enable or not again, if enabled,
+ * not power down radio B. */
+ if (need_pwrdown_radioa)
+ rtl92d_phy_powerdown_anotherphy(hw, false);
+ else if (need_pwrdown_radiob)
+ rtl92d_phy_powerdown_anotherphy(hw, true);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
+ return rtstatus;
+
+phy_rf_cfg_fail:
+ return rtstatus;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
new file mode 100644
index 00000000000..74b9cfc39a8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.h
@@ -0,0 +1,44 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92D_RF_H__
+#define __RTL92D_RF_H__
+
+extern void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+ u8 bandwidth);
+extern void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+extern void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+extern bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
+extern bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+extern void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw,
+ bool bmac0);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
new file mode 100644
index 00000000000..351765df517
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -0,0 +1,423 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/vmalloc.h>
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "hw.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+
+static void rtl92d_init_aspm_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+ /*close ASPM for AMD defaultly */
+ rtlpci->const_amdpci_aspm = 0;
+
+ /*
+ * ASPM PS mode.
+ * 0 - Disable ASPM,
+ * 1 - Enable ASPM without Clock Req,
+ * 2 - Enable ASPM with Clock Req,
+ * 3 - Alwyas Enable ASPM with Clock Req,
+ * 4 - Always Enable ASPM without Clock Req.
+ * set defult to RTL8192CE:3 RTL8192E:2
+ * */
+ rtlpci->const_pci_aspm = 3;
+
+ /*Setting for PCI-E device */
+ rtlpci->const_devicepci_aspm_setting = 0x03;
+
+ /*Setting for PCI-E bridge */
+ rtlpci->const_hostpci_aspm_setting = 0x02;
+
+ /*
+ * In Hw/Sw Radio Off situation.
+ * 0 - Default,
+ * 1 - From ASPM setting without low Mac Pwr,
+ * 2 - From ASPM setting with low Mac Pwr,
+ * 3 - Bus D3
+ * set default to RTL8192CE:0 RTL8192SE:2
+ */
+ rtlpci->const_hwsw_rfoff_d3 = 0;
+
+ /*
+ * This setting works for those device with
+ * backdoor ASPM setting such as EPHY setting.
+ * 0 - Not support ASPM,
+ * 1 - Support ASPM,
+ * 2 - According to chipset.
+ */
+ rtlpci->const_support_pciaspm = 1;
+}
+
+static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
+{
+ int err;
+ u8 tid;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ const struct firmware *firmware;
+ static int header_print;
+
+ rtlpriv->dm.dm_initialgain_enable = true;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpriv->dm.useramask = 1;
+
+ /* dual mac */
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
+ rtlpriv->phy.current_channel = 36;
+ else
+ rtlpriv->phy.current_channel = 1;
+
+ if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) {
+ rtlpriv->rtlhal.disable_amsdu_8k = true;
+ /* No long RX - reduce fragmentation */
+ rtlpci->rxbuffersize = 4096;
+ }
+
+ rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
+
+ rtlpci->receive_config = (
+ RCR_APPFCS
+ | RCR_AMF
+ | RCR_ADF
+ | RCR_APP_MIC
+ | RCR_APP_ICV
+ | RCR_AICV
+ | RCR_ACRC32
+ | RCR_AB
+ | RCR_AM
+ | RCR_APM
+ | RCR_APP_PHYST_RXFF
+ | RCR_HTC_LOC_CTRL
+ );
+
+ rtlpci->irq_mask[0] = (u32) (
+ IMR_ROK
+ | IMR_VODOK
+ | IMR_VIDOK
+ | IMR_BEDOK
+ | IMR_BKDOK
+ | IMR_MGNTDOK
+ | IMR_HIGHDOK
+ | IMR_BDOK
+ | IMR_RDU
+ | IMR_RXFOVW
+ );
+
+ rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD);
+
+ /* for LPS & IPS */
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ rtlpriv->psc.reg_fwctrl_lps = 3;
+ rtlpriv->psc.reg_max_lps_awakeintvl = 5;
+ /* for ASPM, you can close aspm through
+ * set const_support_pciaspm = 0 */
+ rtl92d_init_aspm_vars(hw);
+
+ if (rtlpriv->psc.reg_fwctrl_lps == 1)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
+ else if (rtlpriv->psc.reg_fwctrl_lps == 2)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
+ else if (rtlpriv->psc.reg_fwctrl_lps == 3)
+ rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
+
+ /* for firmware buf */
+ rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
+ if (!rtlpriv->rtlhal.pfirmware) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Can't alloc buffer for fw.\n"));
+ return 1;
+ }
+
+ if (!header_print) {
+ pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
+ pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
+ header_print++;
+ }
+ /* request fw */
+ err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
+ rtlpriv->io.dev);
+ if (err) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Failed to request firmware!\n"));
+ return 1;
+ }
+ if (firmware->size > 0x8000) {
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ ("Firmware is too big!\n"));
+ release_firmware(firmware);
+ return 1;
+ }
+ memcpy(rtlpriv->rtlhal.pfirmware, firmware->data, firmware->size);
+ rtlpriv->rtlhal.fwsize = firmware->size;
+ release_firmware(firmware);
+
+ /* for early mode */
+ rtlpriv->rtlhal.earlymode_enable = true;
+ for (tid = 0; tid < 8; tid++)
+ skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]);
+ return 0;
+}
+
+static void rtl92d_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tid;
+
+ if (rtlpriv->rtlhal.pfirmware) {
+ vfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+ }
+ for (tid = 0; tid < 8; tid++)
+ skb_queue_purge(&rtlpriv->mac80211.skb_waitq[tid]);
+}
+
+static struct rtl_hal_ops rtl8192de_hal_ops = {
+ .init_sw_vars = rtl92d_init_sw_vars,
+ .deinit_sw_vars = rtl92d_deinit_sw_vars,
+ .read_eeprom_info = rtl92de_read_eeprom_info,
+ .interrupt_recognized = rtl92de_interrupt_recognized,
+ .hw_init = rtl92de_hw_init,
+ .hw_disable = rtl92de_card_disable,
+ .hw_suspend = rtl92de_suspend,
+ .hw_resume = rtl92de_resume,
+ .enable_interrupt = rtl92de_enable_interrupt,
+ .disable_interrupt = rtl92de_disable_interrupt,
+ .set_network_type = rtl92de_set_network_type,
+ .set_chk_bssid = rtl92de_set_check_bssid,
+ .set_qos = rtl92de_set_qos,
+ .set_bcn_reg = rtl92de_set_beacon_related_registers,
+ .set_bcn_intv = rtl92de_set_beacon_interval,
+ .update_interrupt_mask = rtl92de_update_interrupt_mask,
+ .get_hw_reg = rtl92de_get_hw_reg,
+ .set_hw_reg = rtl92de_set_hw_reg,
+ .update_rate_tbl = rtl92de_update_hal_rate_tbl,
+ .fill_tx_desc = rtl92de_tx_fill_desc,
+ .fill_tx_cmddesc = rtl92de_tx_fill_cmddesc,
+ .query_rx_desc = rtl92de_rx_query_desc,
+ .set_channel_access = rtl92de_update_channel_access_setting,
+ .radio_onoff_checking = rtl92de_gpio_radio_on_off_checking,
+ .set_bw_mode = rtl92d_phy_set_bw_mode,
+ .switch_channel = rtl92d_phy_sw_chnl,
+ .dm_watchdog = rtl92d_dm_watchdog,
+ .scan_operation_backup = rtl92d_phy_scan_operation_backup,
+ .set_rf_power_state = rtl92d_phy_set_rf_power_state,
+ .led_control = rtl92de_led_control,
+ .set_desc = rtl92de_set_desc,
+ .get_desc = rtl92de_get_desc,
+ .tx_polling = rtl92de_tx_polling,
+ .enable_hw_sec = rtl92de_enable_hw_security_config,
+ .set_key = rtl92de_set_key,
+ .init_sw_leds = rtl92de_init_sw_leds,
+ .get_bbreg = rtl92d_phy_query_bb_reg,
+ .set_bbreg = rtl92d_phy_set_bb_reg,
+ .get_rfreg = rtl92d_phy_query_rf_reg,
+ .set_rfreg = rtl92d_phy_set_rf_reg,
+ .linked_set_reg = rtl92d_linked_set_reg,
+};
+
+static struct rtl_mod_params rtl92de_mod_params = {
+ .sw_crypto = false,
+ .inactiveps = true,
+ .swctrl_lps = true,
+ .fwctrl_lps = false,
+};
+
+static struct rtl_hal_cfg rtl92de_hal_cfg = {
+ .bar_id = 2,
+ .write_readback = true,
+ .name = "rtl8192de",
+ .fw_name = "rtlwifi/rtl8192defw.bin",
+ .ops = &rtl8192de_hal_ops,
+ .mod_params = &rtl92de_mod_params,
+
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+ .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+ .maps[SYS_CLK] = REG_SYS_CLKR,
+ .maps[MAC_RCR_AM] = RCR_AM,
+ .maps[MAC_RCR_AB] = RCR_AB,
+ .maps[MAC_RCR_ACRC32] = RCR_ACRC32,
+ .maps[MAC_RCR_ACF] = RCR_ACF,
+ .maps[MAC_RCR_AAP] = RCR_AAP,
+
+ .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+ .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_CLK] = 0, /* just for 92se */
+ .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+ .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+ .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+ .maps[EFUSE_ANA8M] = 0, /* just for 92se */
+ .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+ .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+ .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+
+ .maps[RWCAM] = REG_CAMCMD,
+ .maps[WCAMI] = REG_CAMWRITE,
+ .maps[RCAMO] = REG_CAMREAD,
+ .maps[CAMDBG] = REG_CAMDBG,
+ .maps[SECR] = REG_SECCFG,
+ .maps[SEC_CAM_NONE] = CAM_NONE,
+ .maps[SEC_CAM_WEP40] = CAM_WEP40,
+ .maps[SEC_CAM_TKIP] = CAM_TKIP,
+ .maps[SEC_CAM_AES] = CAM_AES,
+ .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+ .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+ .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+ .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+ .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+ .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+ .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+ .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
+ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+ .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+ .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+ .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+ .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+ .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+ .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+ .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
+ .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
+
+ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+ .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+ .maps[RTL_IMR_BcnInt] = IMR_BcnInt,
+ .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+ .maps[RTL_IMR_RDU] = IMR_RDU,
+ .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+ .maps[RTL_IMR_BDOK] = IMR_BDOK,
+ .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+ .maps[RTL_IMR_TBDER] = IMR_TBDER,
+ .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+ .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+ .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+ .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+ .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+ .maps[RTL_IMR_VODOK] = IMR_VODOK,
+ .maps[RTL_IMR_ROK] = IMR_ROK,
+ .maps[RTL_IBSS_INT_MASKS] = (IMR_BcnInt | IMR_TBDOK | IMR_TBDER),
+
+ .maps[RTL_RC_CCK_RATE1M] = DESC92D_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC92D_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC92D_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC92D_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC92D_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC92D_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC92D_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC92D_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC92D_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC92D_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC92D_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC92D_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC92D_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC92D_RATEMCS15,
+};
+
+static struct pci_device_id rtl92de_pci_ids[] __devinitdata = {
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8193, rtl92de_hal_cfg)},
+ {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x002B, rtl92de_hal_cfg)},
+ {},
+};
+
+MODULE_DEVICE_TABLE(pci, rtl92de_pci_ids);
+
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192DE 802.11n Dual Mac PCI wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8192defw.bin");
+
+module_param_named(swenc, rtl92de_mod_params.sw_crypto, bool, 0444);
+module_param_named(ips, rtl92de_mod_params.inactiveps, bool, 0444);
+module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
+module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
+MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
+MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
+MODULE_PARM_DESC(swlps, "using linked sw control power save (default 1"
+ " is open)\n");
+
+static struct pci_driver rtl92de_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = rtl92de_pci_ids,
+ .probe = rtl_pci_probe,
+ .remove = rtl_pci_disconnect,
+
+#ifdef CONFIG_PM
+ .suspend = rtl_pci_suspend,
+ .resume = rtl_pci_resume,
+#endif
+
+};
+
+/* add global spin lock to solve the problem that
+ * Dul mac register operation on the same time */
+spinlock_t globalmutex_power;
+spinlock_t globalmutex_for_fwdownload;
+spinlock_t globalmutex_for_power_and_efuse;
+
+static int __init rtl92de_module_init(void)
+{
+ int ret = 0;
+
+ spin_lock_init(&globalmutex_power);
+ spin_lock_init(&globalmutex_for_fwdownload);
+ spin_lock_init(&globalmutex_for_power_and_efuse);
+
+ ret = pci_register_driver(&rtl92de_driver);
+ if (ret)
+ RT_ASSERT(false, (": No device found\n"));
+ return ret;
+}
+
+static void __exit rtl92de_module_exit(void)
+{
+ pci_unregister_driver(&rtl92de_driver);
+}
+
+module_init(rtl92de_module_init);
+module_exit(rtl92de_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.h b/drivers/net/wireless/rtlwifi/rtl8192de/sw.h
new file mode 100644
index 00000000000..c95e47de134
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92DE_SW_H__
+#define __RTL92DE_SW_H__
+
+extern spinlock_t globalmutex_power;
+extern spinlock_t globalmutex_for_fwdownload;
+extern spinlock_t globalmutex_for_power_and_efuse;
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.c b/drivers/net/wireless/rtlwifi/rtl8192de/table.c
new file mode 100644
index 00000000000..bad7f9449ec
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/table.c
@@ -0,0 +1,1690 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ * Created on 2010/12/23, 6:38
+ *****************************************************************************/
+
+#include <linux/types.h>
+
+#include "table.h"
+
+u32 rtl8192de_phy_reg_2tarray[PHY_REG_2T_ARRAYLENGTH] = {
+ 0x024, 0x0011800d,
+ 0x028, 0x00ffdb83,
+ 0x014, 0x088ba955,
+ 0x010, 0x49022b03,
+ 0x800, 0x80040002,
+ 0x804, 0x00000003,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000000a,
+ 0x810, 0x80706388,
+ 0x814, 0x020c3d10,
+ 0x818, 0x02200385,
+ 0x81c, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390004,
+ 0x828, 0x01000100,
+ 0x82c, 0x00390004,
+ 0x830, 0x27272727,
+ 0x834, 0x27272727,
+ 0x838, 0x27272727,
+ 0x83c, 0x27272727,
+ 0x840, 0x00010000,
+ 0x844, 0x00010000,
+ 0x848, 0x27272727,
+ 0x84c, 0x27272727,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569a569a,
+ 0x85c, 0x0c1b25a4,
+ 0x860, 0x66e60230,
+ 0x864, 0x061f0130,
+ 0x868, 0x27272727,
+ 0x86c, 0x272b2b2b,
+ 0x870, 0x07000700,
+ 0x874, 0x22188000,
+ 0x878, 0x08080808,
+ 0x87c, 0x00007ff8,
+ 0x880, 0xc0083070,
+ 0x884, 0x00000cd5,
+ 0x888, 0x00000000,
+ 0x88c, 0xcc0000c0,
+ 0x890, 0x00000800,
+ 0x894, 0xfffffffe,
+ 0x898, 0x40302010,
+ 0x89c, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x81121313,
+ 0xa00, 0x00d047c8,
+ 0xa04, 0x80ff000c,
+ 0xa08, 0x8c838300,
+ 0xa0c, 0x2e68120f,
+ 0xa10, 0x9500bb78,
+ 0xa14, 0x11144028,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140f00,
+ 0xa20, 0x1a1b0000,
+ 0xa24, 0x090e1317,
+ 0xa28, 0x00000204,
+ 0xa2c, 0x00d30000,
+ 0xa70, 0x101fbf00,
+ 0xa74, 0x00000007,
+ 0xc00, 0x40071d40,
+ 0xc04, 0x03a05633,
+ 0xc08, 0x001000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08800000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x00000000,
+ 0xc24, 0x00000000,
+ 0xc28, 0x00000000,
+ 0xc2c, 0x00000000,
+ 0xc30, 0x69e9ac44,
+ 0xc34, 0x469652cf,
+ 0xc38, 0x49795994,
+ 0xc3c, 0x0a979718,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020107,
+ 0xc4c, 0x007f037f,
+ 0xc50, 0x69543420,
+ 0xc54, 0x43bc009e,
+ 0xc58, 0x69543420,
+ 0xc5c, 0x433c00a8,
+ 0xc60, 0x00000000,
+ 0xc64, 0x5116848b,
+ 0xc68, 0x47c00bff,
+ 0xc6c, 0x00000036,
+ 0xc70, 0x2c7f000d,
+ 0xc74, 0x058610db,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x40b95612,
+ 0xc80, 0x40000100,
+ 0xc84, 0x20f60000,
+ 0xc88, 0x40000100,
+ 0xc8c, 0x20e00000,
+ 0xc90, 0x00121820,
+ 0xc94, 0x00000007,
+ 0xc98, 0x00121820,
+ 0xc9c, 0x00007f7f,
+ 0xca0, 0x00000000,
+ 0xca4, 0x00000080,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x28000000,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b11e20,
+ 0xcdc, 0xe8767533,
+ 0xce0, 0x00222222,
+ 0xce4, 0x00000000,
+ 0xce8, 0x37644302,
+ 0xcec, 0x2f97d40c,
+ 0xd00, 0x00080740,
+ 0xd04, 0x00020403,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x20010201,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x3333bc43,
+ 0xd18, 0x7a8f5b6b,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x80608404,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027293,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x00000000,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x30032064,
+ 0xd60, 0x4653de68,
+ 0xd64, 0x04518a3c,
+ 0xd68, 0x00002101,
+ 0xd6c, 0x2a201c16,
+ 0xd70, 0x1812362e,
+ 0xd74, 0x322c2220,
+ 0xd78, 0x000e3c24,
+ 0xe00, 0x2a2a2a2a,
+ 0xe04, 0x2a2a2a2a,
+ 0xe08, 0x03902a2a,
+ 0xe10, 0x2a2a2a2a,
+ 0xe14, 0x2a2a2a2a,
+ 0xe18, 0x2a2a2a2a,
+ 0xe1c, 0x2a2a2a2a,
+ 0xe28, 0x00000000,
+ 0xe30, 0x1000dc1f,
+ 0xe34, 0x10008c1f,
+ 0xe38, 0x02140102,
+ 0xe3c, 0x681604c2,
+ 0xe40, 0x01007c00,
+ 0xe44, 0x01004800,
+ 0xe48, 0xfb000000,
+ 0xe4c, 0x000028d1,
+ 0xe50, 0x1000dc1f,
+ 0xe54, 0x10008c1f,
+ 0xe58, 0x02140102,
+ 0xe5c, 0x28160d05,
+ 0xe60, 0x00000010,
+ 0xe68, 0x001b25a4,
+ 0xe6c, 0x63db25a4,
+ 0xe70, 0x63db25a4,
+ 0xe74, 0x0c126da4,
+ 0xe78, 0x0c126da4,
+ 0xe7c, 0x0c126da4,
+ 0xe80, 0x0c126da4,
+ 0xe84, 0x63db25a4,
+ 0xe88, 0x0c126da4,
+ 0xe8c, 0x63db25a4,
+ 0xed0, 0x63db25a4,
+ 0xed4, 0x63db25a4,
+ 0xed8, 0x63db25a4,
+ 0xedc, 0x001b25a4,
+ 0xee0, 0x001b25a4,
+ 0xeec, 0x6fdb25a4,
+ 0xf14, 0x00000003,
+ 0xf1c, 0x00000064,
+ 0xf4c, 0x00000004,
+ 0xf00, 0x00000300,
+};
+
+u32 rtl8192de_phy_reg_array_pg[PHY_REG_ARRAY_PG_LENGTH] = {
+ 0xe00, 0xffffffff, 0x07090c0c,
+ 0xe04, 0xffffffff, 0x01020405,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x0b0c0c0e,
+ 0xe14, 0xffffffff, 0x01030506,
+ 0xe18, 0xffffffff, 0x0b0c0d0e,
+ 0xe1c, 0xffffffff, 0x01030509,
+ 0x830, 0xffffffff, 0x07090c0c,
+ 0x834, 0xffffffff, 0x01020405,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x0b0c0c0e,
+ 0x848, 0xffffffff, 0x01030506,
+ 0x84c, 0xffffffff, 0x0b0c0d0e,
+ 0x868, 0xffffffff, 0x01030509,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x06060606,
+ 0xe14, 0xffffffff, 0x00020406,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x06060606,
+ 0x848, 0xffffffff, 0x00020406,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+};
+
+u32 rtl8192de_radioa_2tarray[RADIOA_2T_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00017524,
+ 0x019, 0x00000000,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff454,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000de471,
+ 0x029, 0x000d7110,
+ 0x02a, 0x0008cb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000f9c43,
+ 0x049, 0x00002e0c,
+ 0x04a, 0x000546eb,
+ 0x04b, 0x0008966c,
+ 0x04c, 0x0000dde9,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b0,
+ 0x013, 0x0000c0a4,
+ 0x013, 0x0000b02c,
+ 0x013, 0x00004020,
+ 0x013, 0x00000014,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00037524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b0,
+ 0x013, 0x0000c0a4,
+ 0x013, 0x0000b02c,
+ 0x013, 0x00004020,
+ 0x013, 0x00000014,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00057568,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b0,
+ 0x013, 0x0000c0a4,
+ 0x013, 0x0000b02c,
+ 0x013, 0x00004020,
+ 0x013, 0x00000014,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00097524,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x0006aaaa,
+ 0x02e, 0x000b4d01,
+ 0x02d, 0x00080000,
+ 0x02e, 0x00004d02,
+ 0x02d, 0x00095555,
+ 0x02e, 0x00054d03,
+ 0x02d, 0x000aaaaa,
+ 0x02e, 0x000b4d04,
+ 0x02d, 0x000c0000,
+ 0x02e, 0x00004d05,
+ 0x02d, 0x000d5555,
+ 0x02e, 0x00054d06,
+ 0x02d, 0x000eaaaa,
+ 0x02e, 0x000b4d07,
+ 0x02d, 0x00000000,
+ 0x02e, 0x00005108,
+ 0x02d, 0x00015555,
+ 0x02e, 0x00055109,
+ 0x02d, 0x0002aaaa,
+ 0x02e, 0x000b510a,
+ 0x02d, 0x00040000,
+ 0x02e, 0x0000510b,
+ 0x02d, 0x00055555,
+ 0x02e, 0x0005510c,
+};
+
+u32 rtl8192de_radiob_2tarray[RADIOB_2T_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00007401,
+ 0x019, 0x00000060,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff454,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000d1c31,
+ 0x029, 0x000d7110,
+ 0x02a, 0x000aeb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000f9c43,
+ 0x049, 0x00002e0c,
+ 0x04a, 0x000546eb,
+ 0x04b, 0x0008966c,
+ 0x04c, 0x0000dde9,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b0,
+ 0x013, 0x0000c0a4,
+ 0x013, 0x0000b02c,
+ 0x013, 0x00004020,
+ 0x013, 0x00000014,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00037524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b0,
+ 0x013, 0x0000c0a4,
+ 0x013, 0x0000b02c,
+ 0x013, 0x00004020,
+ 0x013, 0x00000014,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00057524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b0,
+ 0x013, 0x0000c0a4,
+ 0x013, 0x0000b02c,
+ 0x013, 0x00004020,
+ 0x013, 0x00000014,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00087401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x00066666,
+ 0x02e, 0x00064001,
+ 0x02d, 0x00091111,
+ 0x02e, 0x00014002,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4003,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x00064004,
+ 0x02d, 0x00088888,
+ 0x02e, 0x00084005,
+ 0x02d, 0x0009dddd,
+ 0x02e, 0x000d4006,
+ 0x02d, 0x000b3333,
+ 0x02e, 0x00034007,
+ 0x02d, 0x00048888,
+ 0x02e, 0x00084408,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4409,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x0006440a,
+ 0x02d, 0x00011111,
+ 0x02e, 0x0001480b,
+ 0x02d, 0x0003bbbb,
+ 0x02e, 0x000b480c,
+ 0x02d, 0x00066666,
+ 0x02e, 0x0006480d,
+ 0x02d, 0x000ccccc,
+ 0x02e, 0x000c480e,
+};
+
+u32 rtl8192de_radioa_2t_int_paarray[RADIOA_2T_INT_PA_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00017524,
+ 0x019, 0x00000000,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff454,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000de471,
+ 0x029, 0x000d7110,
+ 0x02a, 0x0008eb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000c0443,
+ 0x049, 0x00000730,
+ 0x04a, 0x00050f0f,
+ 0x04b, 0x000896ee,
+ 0x04c, 0x0000ddee,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00037564,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00057595,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00097524,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x0006aaaa,
+ 0x02e, 0x000b4d01,
+ 0x02d, 0x00080000,
+ 0x02e, 0x00004d02,
+ 0x02d, 0x00095555,
+ 0x02e, 0x00054d03,
+ 0x02d, 0x000aaaaa,
+ 0x02e, 0x000b4d04,
+ 0x02d, 0x000c0000,
+ 0x02e, 0x00004d05,
+ 0x02d, 0x000d5555,
+ 0x02e, 0x00054d06,
+ 0x02d, 0x000eaaaa,
+ 0x02e, 0x000b4d07,
+ 0x02d, 0x00000000,
+ 0x02e, 0x00005108,
+ 0x02d, 0x00015555,
+ 0x02e, 0x00055109,
+ 0x02d, 0x0002aaaa,
+ 0x02e, 0x000b510a,
+ 0x02d, 0x00040000,
+ 0x02e, 0x0000510b,
+ 0x02d, 0x00055555,
+ 0x02e, 0x0005510c,
+};
+
+u32 rtl8192de_radiob_2t_int_paarray[RADIOB_2T_INT_PA_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00007401,
+ 0x019, 0x00000060,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff454,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000d1c31,
+ 0x029, 0x000d7110,
+ 0x02a, 0x000aeb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000c0443,
+ 0x049, 0x00000730,
+ 0x04a, 0x00050f0f,
+ 0x04b, 0x000896ee,
+ 0x04c, 0x0000ddee,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00037564,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00057595,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00087401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x00066666,
+ 0x02e, 0x00064001,
+ 0x02d, 0x00091111,
+ 0x02e, 0x00014002,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4003,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x00064004,
+ 0x02d, 0x00088888,
+ 0x02e, 0x00084005,
+ 0x02d, 0x0009dddd,
+ 0x02e, 0x000d4006,
+ 0x02d, 0x000b3333,
+ 0x02e, 0x00034007,
+ 0x02d, 0x00048888,
+ 0x02e, 0x00084408,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4409,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x0006440a,
+ 0x02d, 0x00011111,
+ 0x02e, 0x0001480b,
+ 0x02d, 0x0003bbbb,
+ 0x02e, 0x000b480c,
+ 0x02d, 0x00066666,
+ 0x02e, 0x0006480d,
+ 0x02d, 0x000ccccc,
+ 0x02e, 0x000c480e,
+};
+
+u32 rtl8192de_mac_2tarray[MAC_2T_ARRAYLENGTH] = {
+ 0x420, 0x00000080,
+ 0x423, 0x00000000,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000006,
+ 0x437, 0x00000007,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43a, 0x00000000,
+ 0x43b, 0x00000001,
+ 0x43c, 0x00000004,
+ 0x43d, 0x00000005,
+ 0x43e, 0x00000006,
+ 0x43f, 0x00000007,
+ 0x440, 0x00000050,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000015,
+ 0x445, 0x000000f0,
+ 0x446, 0x0000000f,
+ 0x447, 0x00000000,
+ 0x462, 0x00000008,
+ 0x463, 0x00000003,
+ 0x4c8, 0x000000ff,
+ 0x4c9, 0x00000008,
+ 0x4cc, 0x000000ff,
+ 0x4cd, 0x000000ff,
+ 0x4ce, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000a2,
+ 0x502, 0x0000002f,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000a3,
+ 0x506, 0x0000005e,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002b,
+ 0x509, 0x000000a4,
+ 0x50a, 0x0000005e,
+ 0x50b, 0x00000000,
+ 0x50c, 0x0000004f,
+ 0x50d, 0x000000a4,
+ 0x50e, 0x00000000,
+ 0x50f, 0x00000000,
+ 0x512, 0x0000001c,
+ 0x514, 0x0000000a,
+ 0x515, 0x00000010,
+ 0x516, 0x0000000a,
+ 0x517, 0x00000010,
+ 0x51a, 0x00000016,
+ 0x524, 0x0000000f,
+ 0x525, 0x0000004f,
+ 0x546, 0x00000040,
+ 0x547, 0x00000000,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55a, 0x00000002,
+ 0x55d, 0x000000ff,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000e,
+ 0x609, 0x0000002a,
+ 0x652, 0x00000020,
+ 0x63c, 0x0000000a,
+ 0x63d, 0x0000000a,
+ 0x63e, 0x0000000e,
+ 0x63f, 0x0000000e,
+ 0x66e, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70a, 0x00000065,
+ 0x70b, 0x00000087,
+};
+
+u32 rtl8192de_agctab_array[AGCTAB_ARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7b060001,
+ 0xc78, 0x7a070001,
+ 0xc78, 0x79080001,
+ 0xc78, 0x78090001,
+ 0xc78, 0x770a0001,
+ 0xc78, 0x760b0001,
+ 0xc78, 0x750c0001,
+ 0xc78, 0x740d0001,
+ 0xc78, 0x730e0001,
+ 0xc78, 0x720f0001,
+ 0xc78, 0x71100001,
+ 0xc78, 0x70110001,
+ 0xc78, 0x6f120001,
+ 0xc78, 0x6e130001,
+ 0xc78, 0x6d140001,
+ 0xc78, 0x6c150001,
+ 0xc78, 0x6b160001,
+ 0xc78, 0x6a170001,
+ 0xc78, 0x69180001,
+ 0xc78, 0x68190001,
+ 0xc78, 0x671a0001,
+ 0xc78, 0x661b0001,
+ 0xc78, 0x651c0001,
+ 0xc78, 0x641d0001,
+ 0xc78, 0x631e0001,
+ 0xc78, 0x621f0001,
+ 0xc78, 0x61200001,
+ 0xc78, 0x60210001,
+ 0xc78, 0x49220001,
+ 0xc78, 0x48230001,
+ 0xc78, 0x47240001,
+ 0xc78, 0x46250001,
+ 0xc78, 0x45260001,
+ 0xc78, 0x44270001,
+ 0xc78, 0x43280001,
+ 0xc78, 0x42290001,
+ 0xc78, 0x412a0001,
+ 0xc78, 0x402b0001,
+ 0xc78, 0x262c0001,
+ 0xc78, 0x252d0001,
+ 0xc78, 0x242e0001,
+ 0xc78, 0x232f0001,
+ 0xc78, 0x22300001,
+ 0xc78, 0x21310001,
+ 0xc78, 0x20320001,
+ 0xc78, 0x06330001,
+ 0xc78, 0x05340001,
+ 0xc78, 0x04350001,
+ 0xc78, 0x03360001,
+ 0xc78, 0x02370001,
+ 0xc78, 0x01380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7b400001,
+ 0xc78, 0x7b410001,
+ 0xc78, 0x7a420001,
+ 0xc78, 0x79430001,
+ 0xc78, 0x78440001,
+ 0xc78, 0x77450001,
+ 0xc78, 0x76460001,
+ 0xc78, 0x75470001,
+ 0xc78, 0x74480001,
+ 0xc78, 0x73490001,
+ 0xc78, 0x724a0001,
+ 0xc78, 0x714b0001,
+ 0xc78, 0x704c0001,
+ 0xc78, 0x6f4d0001,
+ 0xc78, 0x6e4e0001,
+ 0xc78, 0x6d4f0001,
+ 0xc78, 0x6c500001,
+ 0xc78, 0x6b510001,
+ 0xc78, 0x6a520001,
+ 0xc78, 0x69530001,
+ 0xc78, 0x68540001,
+ 0xc78, 0x67550001,
+ 0xc78, 0x66560001,
+ 0xc78, 0x65570001,
+ 0xc78, 0x64580001,
+ 0xc78, 0x63590001,
+ 0xc78, 0x625a0001,
+ 0xc78, 0x615b0001,
+ 0xc78, 0x605c0001,
+ 0xc78, 0x485d0001,
+ 0xc78, 0x475e0001,
+ 0xc78, 0x465f0001,
+ 0xc78, 0x45600001,
+ 0xc78, 0x44610001,
+ 0xc78, 0x43620001,
+ 0xc78, 0x42630001,
+ 0xc78, 0x41640001,
+ 0xc78, 0x40650001,
+ 0xc78, 0x27660001,
+ 0xc78, 0x26670001,
+ 0xc78, 0x25680001,
+ 0xc78, 0x24690001,
+ 0xc78, 0x236a0001,
+ 0xc78, 0x226b0001,
+ 0xc78, 0x216c0001,
+ 0xc78, 0x206d0001,
+ 0xc78, 0x206e0001,
+ 0xc78, 0x206f0001,
+ 0xc78, 0x20700001,
+ 0xc78, 0x20710001,
+ 0xc78, 0x20720001,
+ 0xc78, 0x20730001,
+ 0xc78, 0x20740001,
+ 0xc78, 0x20750001,
+ 0xc78, 0x20760001,
+ 0xc78, 0x20770001,
+ 0xc78, 0x20780001,
+ 0xc78, 0x20790001,
+ 0xc78, 0x207a0001,
+ 0xc78, 0x207b0001,
+ 0xc78, 0x207c0001,
+ 0xc78, 0x207d0001,
+ 0xc78, 0x207e0001,
+ 0xc78, 0x207f0001,
+ 0xc78, 0x38000002,
+ 0xc78, 0x38010002,
+ 0xc78, 0x38020002,
+ 0xc78, 0x38030002,
+ 0xc78, 0x38040002,
+ 0xc78, 0x38050002,
+ 0xc78, 0x38060002,
+ 0xc78, 0x38070002,
+ 0xc78, 0x38080002,
+ 0xc78, 0x3c090002,
+ 0xc78, 0x3e0a0002,
+ 0xc78, 0x400b0002,
+ 0xc78, 0x440c0002,
+ 0xc78, 0x480d0002,
+ 0xc78, 0x4c0e0002,
+ 0xc78, 0x500f0002,
+ 0xc78, 0x52100002,
+ 0xc78, 0x56110002,
+ 0xc78, 0x5a120002,
+ 0xc78, 0x5e130002,
+ 0xc78, 0x60140002,
+ 0xc78, 0x60150002,
+ 0xc78, 0x60160002,
+ 0xc78, 0x62170002,
+ 0xc78, 0x62180002,
+ 0xc78, 0x62190002,
+ 0xc78, 0x621a0002,
+ 0xc78, 0x621b0002,
+ 0xc78, 0x621c0002,
+ 0xc78, 0x621d0002,
+ 0xc78, 0x621e0002,
+ 0xc78, 0x621f0002,
+ 0xc78, 0x32000044,
+ 0xc78, 0x32010044,
+ 0xc78, 0x32020044,
+ 0xc78, 0x32030044,
+ 0xc78, 0x32040044,
+ 0xc78, 0x32050044,
+ 0xc78, 0x32060044,
+ 0xc78, 0x32070044,
+ 0xc78, 0x32080044,
+ 0xc78, 0x34090044,
+ 0xc78, 0x350a0044,
+ 0xc78, 0x360b0044,
+ 0xc78, 0x370c0044,
+ 0xc78, 0x380d0044,
+ 0xc78, 0x390e0044,
+ 0xc78, 0x3a0f0044,
+ 0xc78, 0x3e100044,
+ 0xc78, 0x42110044,
+ 0xc78, 0x44120044,
+ 0xc78, 0x46130044,
+ 0xc78, 0x4a140044,
+ 0xc78, 0x4e150044,
+ 0xc78, 0x50160044,
+ 0xc78, 0x55170044,
+ 0xc78, 0x5a180044,
+ 0xc78, 0x5e190044,
+ 0xc78, 0x641a0044,
+ 0xc78, 0x6e1b0044,
+ 0xc78, 0x6e1c0044,
+ 0xc78, 0x6e1d0044,
+ 0xc78, 0x6e1e0044,
+ 0xc78, 0x6e1f0044,
+ 0xc78, 0x6e1f0000,
+};
+
+u32 rtl8192de_agctab_5garray[AGCTAB_5G_ARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7a020001,
+ 0xc78, 0x79030001,
+ 0xc78, 0x78040001,
+ 0xc78, 0x77050001,
+ 0xc78, 0x76060001,
+ 0xc78, 0x75070001,
+ 0xc78, 0x74080001,
+ 0xc78, 0x73090001,
+ 0xc78, 0x720a0001,
+ 0xc78, 0x710b0001,
+ 0xc78, 0x700c0001,
+ 0xc78, 0x6f0d0001,
+ 0xc78, 0x6e0e0001,
+ 0xc78, 0x6d0f0001,
+ 0xc78, 0x6c100001,
+ 0xc78, 0x6b110001,
+ 0xc78, 0x6a120001,
+ 0xc78, 0x69130001,
+ 0xc78, 0x68140001,
+ 0xc78, 0x67150001,
+ 0xc78, 0x66160001,
+ 0xc78, 0x65170001,
+ 0xc78, 0x64180001,
+ 0xc78, 0x63190001,
+ 0xc78, 0x621a0001,
+ 0xc78, 0x611b0001,
+ 0xc78, 0x601c0001,
+ 0xc78, 0x481d0001,
+ 0xc78, 0x471e0001,
+ 0xc78, 0x461f0001,
+ 0xc78, 0x45200001,
+ 0xc78, 0x44210001,
+ 0xc78, 0x43220001,
+ 0xc78, 0x42230001,
+ 0xc78, 0x41240001,
+ 0xc78, 0x40250001,
+ 0xc78, 0x27260001,
+ 0xc78, 0x26270001,
+ 0xc78, 0x25280001,
+ 0xc78, 0x24290001,
+ 0xc78, 0x232a0001,
+ 0xc78, 0x222b0001,
+ 0xc78, 0x212c0001,
+ 0xc78, 0x202d0001,
+ 0xc78, 0x202e0001,
+ 0xc78, 0x202f0001,
+ 0xc78, 0x20300001,
+ 0xc78, 0x20310001,
+ 0xc78, 0x20320001,
+ 0xc78, 0x20330001,
+ 0xc78, 0x20340001,
+ 0xc78, 0x20350001,
+ 0xc78, 0x20360001,
+ 0xc78, 0x20370001,
+ 0xc78, 0x20380001,
+ 0xc78, 0x20390001,
+ 0xc78, 0x203a0001,
+ 0xc78, 0x203b0001,
+ 0xc78, 0x203c0001,
+ 0xc78, 0x203d0001,
+ 0xc78, 0x203e0001,
+ 0xc78, 0x203f0001,
+ 0xc78, 0x32000044,
+ 0xc78, 0x32010044,
+ 0xc78, 0x32020044,
+ 0xc78, 0x32030044,
+ 0xc78, 0x32040044,
+ 0xc78, 0x32050044,
+ 0xc78, 0x32060044,
+ 0xc78, 0x32070044,
+ 0xc78, 0x32080044,
+ 0xc78, 0x34090044,
+ 0xc78, 0x350a0044,
+ 0xc78, 0x360b0044,
+ 0xc78, 0x370c0044,
+ 0xc78, 0x380d0044,
+ 0xc78, 0x390e0044,
+ 0xc78, 0x3a0f0044,
+ 0xc78, 0x3e100044,
+ 0xc78, 0x42110044,
+ 0xc78, 0x44120044,
+ 0xc78, 0x46130044,
+ 0xc78, 0x4a140044,
+ 0xc78, 0x4e150044,
+ 0xc78, 0x50160044,
+ 0xc78, 0x55170044,
+ 0xc78, 0x5a180044,
+ 0xc78, 0x5e190044,
+ 0xc78, 0x641a0044,
+ 0xc78, 0x6e1b0044,
+ 0xc78, 0x6e1c0044,
+ 0xc78, 0x6e1d0044,
+ 0xc78, 0x6e1e0044,
+ 0xc78, 0x6e1f0044,
+ 0xc78, 0x6e1f0000,
+};
+
+u32 rtl8192de_agctab_2garray[AGCTAB_2G_ARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7b060001,
+ 0xc78, 0x7a070001,
+ 0xc78, 0x79080001,
+ 0xc78, 0x78090001,
+ 0xc78, 0x770a0001,
+ 0xc78, 0x760b0001,
+ 0xc78, 0x750c0001,
+ 0xc78, 0x740d0001,
+ 0xc78, 0x730e0001,
+ 0xc78, 0x720f0001,
+ 0xc78, 0x71100001,
+ 0xc78, 0x70110001,
+ 0xc78, 0x6f120001,
+ 0xc78, 0x6e130001,
+ 0xc78, 0x6d140001,
+ 0xc78, 0x6c150001,
+ 0xc78, 0x6b160001,
+ 0xc78, 0x6a170001,
+ 0xc78, 0x69180001,
+ 0xc78, 0x68190001,
+ 0xc78, 0x671a0001,
+ 0xc78, 0x661b0001,
+ 0xc78, 0x651c0001,
+ 0xc78, 0x641d0001,
+ 0xc78, 0x631e0001,
+ 0xc78, 0x621f0001,
+ 0xc78, 0x61200001,
+ 0xc78, 0x60210001,
+ 0xc78, 0x49220001,
+ 0xc78, 0x48230001,
+ 0xc78, 0x47240001,
+ 0xc78, 0x46250001,
+ 0xc78, 0x45260001,
+ 0xc78, 0x44270001,
+ 0xc78, 0x43280001,
+ 0xc78, 0x42290001,
+ 0xc78, 0x412a0001,
+ 0xc78, 0x402b0001,
+ 0xc78, 0x262c0001,
+ 0xc78, 0x252d0001,
+ 0xc78, 0x242e0001,
+ 0xc78, 0x232f0001,
+ 0xc78, 0x22300001,
+ 0xc78, 0x21310001,
+ 0xc78, 0x20320001,
+ 0xc78, 0x06330001,
+ 0xc78, 0x05340001,
+ 0xc78, 0x04350001,
+ 0xc78, 0x03360001,
+ 0xc78, 0x02370001,
+ 0xc78, 0x01380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x38000002,
+ 0xc78, 0x38010002,
+ 0xc78, 0x38020002,
+ 0xc78, 0x38030002,
+ 0xc78, 0x38040002,
+ 0xc78, 0x38050002,
+ 0xc78, 0x38060002,
+ 0xc78, 0x38070002,
+ 0xc78, 0x38080002,
+ 0xc78, 0x3c090002,
+ 0xc78, 0x3e0a0002,
+ 0xc78, 0x400b0002,
+ 0xc78, 0x440c0002,
+ 0xc78, 0x480d0002,
+ 0xc78, 0x4c0e0002,
+ 0xc78, 0x500f0002,
+ 0xc78, 0x52100002,
+ 0xc78, 0x56110002,
+ 0xc78, 0x5a120002,
+ 0xc78, 0x5e130002,
+ 0xc78, 0x60140002,
+ 0xc78, 0x60150002,
+ 0xc78, 0x60160002,
+ 0xc78, 0x62170002,
+ 0xc78, 0x62180002,
+ 0xc78, 0x62190002,
+ 0xc78, 0x621a0002,
+ 0xc78, 0x621b0002,
+ 0xc78, 0x621c0002,
+ 0xc78, 0x621d0002,
+ 0xc78, 0x621e0002,
+ 0xc78, 0x621f0002,
+ 0xc78, 0x6e1f0000,
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.h b/drivers/net/wireless/rtlwifi/rtl8192de/table.h
new file mode 100644
index 00000000000..93f30ca62d8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/table.h
@@ -0,0 +1,57 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ * Created on 2010/ 5/18, 1:41
+ *****************************************************************************/
+
+#ifndef __RTL92DE_TABLE__H_
+#define __RTL92DE_TABLE__H_
+
+/*Created on 2011/ 1/14, 1:35*/
+
+#define PHY_REG_2T_ARRAYLENGTH 380
+extern u32 rtl8192de_phy_reg_2tarray[PHY_REG_2T_ARRAYLENGTH];
+#define PHY_REG_ARRAY_PG_LENGTH 624
+extern u32 rtl8192de_phy_reg_array_pg[PHY_REG_ARRAY_PG_LENGTH];
+#define RADIOA_2T_ARRAYLENGTH 378
+extern u32 rtl8192de_radioa_2tarray[RADIOA_2T_ARRAYLENGTH];
+#define RADIOB_2T_ARRAYLENGTH 384
+extern u32 rtl8192de_radiob_2tarray[RADIOB_2T_ARRAYLENGTH];
+#define RADIOA_2T_INT_PA_ARRAYLENGTH 378
+extern u32 rtl8192de_radioa_2t_int_paarray[RADIOA_2T_INT_PA_ARRAYLENGTH];
+#define RADIOB_2T_INT_PA_ARRAYLENGTH 384
+extern u32 rtl8192de_radiob_2t_int_paarray[RADIOB_2T_INT_PA_ARRAYLENGTH];
+#define MAC_2T_ARRAYLENGTH 160
+extern u32 rtl8192de_mac_2tarray[MAC_2T_ARRAYLENGTH];
+#define AGCTAB_ARRAYLENGTH 386
+extern u32 rtl8192de_agctab_array[AGCTAB_ARRAYLENGTH];
+#define AGCTAB_5G_ARRAYLENGTH 194
+extern u32 rtl8192de_agctab_5garray[AGCTAB_5G_ARRAYLENGTH];
+#define AGCTAB_2G_ARRAYLENGTH 194
+extern u32 rtl8192de_agctab_2garray[AGCTAB_2G_ARRAYLENGTH];
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
new file mode 100644
index 00000000000..dc86fcb0b3a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -0,0 +1,959 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "trx.h"
+#include "led.h"
+
+static u8 _rtl92de_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
+{
+ __le16 fc = rtl_get_fc(skb);
+
+ if (unlikely(ieee80211_is_beacon(fc)))
+ return QSLT_BEACON;
+ if (ieee80211_is_mgmt(fc))
+ return QSLT_MGNT;
+
+ return skb->priority;
+}
+
+static int _rtl92de_rate_mapping(bool isht, u8 desc_rate)
+{
+ int rate_idx;
+
+ if (false == isht) {
+ switch (desc_rate) {
+ case DESC92D_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC92D_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC92D_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC92D_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC92D_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC92D_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC92D_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC92D_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC92D_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC92D_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC92D_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC92D_RATE54M:
+ rate_idx = 11;
+ break;
+ default:
+ rate_idx = 0;
+ break;
+ }
+ return rate_idx;
+ } else {
+ switch (desc_rate) {
+ case DESC92D_RATE1M:
+ rate_idx = 0;
+ break;
+ case DESC92D_RATE2M:
+ rate_idx = 1;
+ break;
+ case DESC92D_RATE5_5M:
+ rate_idx = 2;
+ break;
+ case DESC92D_RATE11M:
+ rate_idx = 3;
+ break;
+ case DESC92D_RATE6M:
+ rate_idx = 4;
+ break;
+ case DESC92D_RATE9M:
+ rate_idx = 5;
+ break;
+ case DESC92D_RATE12M:
+ rate_idx = 6;
+ break;
+ case DESC92D_RATE18M:
+ rate_idx = 7;
+ break;
+ case DESC92D_RATE24M:
+ rate_idx = 8;
+ break;
+ case DESC92D_RATE36M:
+ rate_idx = 9;
+ break;
+ case DESC92D_RATE48M:
+ rate_idx = 10;
+ break;
+ case DESC92D_RATE54M:
+ rate_idx = 11;
+ break;
+ default:
+ rate_idx = 11;
+ break;
+ }
+ return rate_idx;
+ }
+}
+
+static u8 _rtl92d_query_rxpwrpercentage(char antpower)
+{
+ if ((antpower <= -100) || (antpower >= 20))
+ return 0;
+ else if (antpower >= 0)
+ return 100;
+ else
+ return 100 + antpower;
+}
+
+static u8 _rtl92d_evm_db_to_percentage(char value)
+{
+ char ret_val = value;
+
+ if (ret_val >= 0)
+ ret_val = 0;
+ if (ret_val <= -33)
+ ret_val = -33;
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+ if (ret_val == 99)
+ ret_val = 100;
+ return ret_val;
+}
+
+static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
+ u8 signal_strength_index)
+{
+ long signal_power;
+
+ signal_power = (long)((signal_strength_index + 1) >> 1);
+ signal_power -= 95;
+ return signal_power;
+}
+
+static long _rtl92de_signal_scale_mapping(struct ieee80211_hw *hw, long currsig)
+{
+ long retsig;
+
+ if (currsig >= 61 && currsig <= 100)
+ retsig = 90 + ((currsig - 60) / 4);
+ else if (currsig >= 41 && currsig <= 60)
+ retsig = 78 + ((currsig - 40) / 2);
+ else if (currsig >= 31 && currsig <= 40)
+ retsig = 66 + (currsig - 30);
+ else if (currsig >= 21 && currsig <= 30)
+ retsig = 54 + (currsig - 20);
+ else if (currsig >= 5 && currsig <= 20)
+ retsig = 42 + (((currsig - 5) * 2) / 3);
+ else if (currsig == 4)
+ retsig = 36;
+ else if (currsig == 3)
+ retsig = 27;
+ else if (currsig == 2)
+ retsig = 18;
+ else if (currsig == 1)
+ retsig = 9;
+ else
+ retsig = currsig;
+ return retsig;
+}
+
+static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats,
+ struct rx_desc_92d *pdesc,
+ struct rx_fwinfo_92d *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+ bool packet_beacon)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct phy_sts_cck_8192d *cck_buf;
+ s8 rx_pwr_all, rx_pwr[4];
+ u8 rf_rx_num = 0, evm, pwdb_all;
+ u8 i, max_spatial_stream;
+ u32 rssi, total_rssi = 0;
+ bool is_cck_rate;
+
+ is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
+ pstats->packet_matchbssid = packet_match_bssid;
+ pstats->packet_toself = packet_toself;
+ pstats->packet_beacon = packet_beacon;
+ pstats->is_cck = is_cck_rate;
+ pstats->rx_mimo_signalquality[0] = -1;
+ pstats->rx_mimo_signalquality[1] = -1;
+
+ if (is_cck_rate) {
+ u8 report, cck_highpwr;
+ cck_buf = (struct phy_sts_cck_8192d *)p_drvinfo;
+ if (ppsc->rfpwr_state == ERFON)
+ cck_highpwr = (u8) rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ BIT(9));
+ else
+ cck_highpwr = false;
+ if (!cck_highpwr) {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+ report = cck_buf->cck_agc_rpt & 0xc0;
+ report = report >> 6;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+ break;
+ }
+ } else {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+ report = p_drvinfo->cfosho[0] & 0x60;
+ report = report >> 5;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ }
+ }
+ pwdb_all = _rtl92d_query_rxpwrpercentage(rx_pwr_all);
+ /* CCK gain is smaller than OFDM/MCS gain, */
+ /* so we add gain diff by experiences, the val is 6 */
+ pwdb_all += 6;
+ if (pwdb_all > 100)
+ pwdb_all = 100;
+ /* modify the offset to make the same gain index with OFDM. */
+ if (pwdb_all > 34 && pwdb_all <= 42)
+ pwdb_all -= 2;
+ else if (pwdb_all > 26 && pwdb_all <= 34)
+ pwdb_all -= 6;
+ else if (pwdb_all > 14 && pwdb_all <= 26)
+ pwdb_all -= 8;
+ else if (pwdb_all > 4 && pwdb_all <= 14)
+ pwdb_all -= 4;
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->recvsignalpower = rx_pwr_all;
+ if (packet_match_bssid) {
+ u8 sq;
+ if (pstats->rx_pwdb_all > 40) {
+ sq = 100;
+ } else {
+ sq = cck_buf->sq_rpt;
+ if (sq > 64)
+ sq = 0;
+ else if (sq < 20)
+ sq = 100;
+ else
+ sq = ((64 - sq) * 100) / 44;
+ }
+ pstats->signalquality = sq;
+ pstats->rx_mimo_signalquality[0] = sq;
+ pstats->rx_mimo_signalquality[1] = -1;
+ }
+ } else {
+ rtlpriv->dm.rfpath_rxenable[0] = true;
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+ for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
+ if (rtlpriv->dm.rfpath_rxenable[i])
+ rf_rx_num++;
+ rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2)
+ - 110;
+ rssi = _rtl92d_query_rxpwrpercentage(rx_pwr[i]);
+ total_rssi += rssi;
+ rtlpriv->stats.rx_snr_db[i] =
+ (long)(p_drvinfo->rxsnr[i] / 2);
+ if (packet_match_bssid)
+ pstats->rx_mimo_signalstrength[i] = (u8) rssi;
+ }
+ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 106;
+ pwdb_all = _rtl92d_query_rxpwrpercentage(rx_pwr_all);
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->rxpower = rx_pwr_all;
+ pstats->recvsignalpower = rx_pwr_all;
+ if (pdesc->rxht && pdesc->rxmcs >= DESC92D_RATEMCS8 &&
+ pdesc->rxmcs <= DESC92D_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+ for (i = 0; i < max_spatial_stream; i++) {
+ evm = _rtl92d_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+ if (packet_match_bssid) {
+ if (i == 0)
+ pstats->signalquality =
+ (u8)(evm & 0xff);
+ pstats->rx_mimo_signalquality[i] =
+ (u8)(evm & 0xff);
+ }
+ }
+ }
+ if (is_cck_rate)
+ pstats->signalstrength = (u8)(_rtl92de_signal_scale_mapping(hw,
+ pwdb_all));
+ else if (rf_rx_num != 0)
+ pstats->signalstrength = (u8)(_rtl92de_signal_scale_mapping(hw,
+ total_rssi /= rf_rx_num));
+}
+
+static void rtl92d_loop_over_paths(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
+ u8 rfpath;
+
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ pstats->rx_mimo_signalstrength[rfpath];
+
+ }
+ if (pstats->rx_mimo_signalstrength[rfpath] >
+ rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ rtlpriv->stats.rx_rssi_percentage[rfpath] + 1;
+ } else {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+}
+
+static void _rtl92de_process_ui_rssi(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 last_rssi, tmpval;
+
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ rtlpriv->stats.rssi_calculate_cnt++;
+ if (rtlpriv->stats.ui_rssi.total_num++ >=
+ PHY_RSSI_SLID_WIN_MAX) {
+ rtlpriv->stats.ui_rssi.total_num =
+ PHY_RSSI_SLID_WIN_MAX;
+ last_rssi = rtlpriv->stats.ui_rssi.elements[
+ rtlpriv->stats.ui_rssi.index];
+ rtlpriv->stats.ui_rssi.total_val -= last_rssi;
+ }
+ rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
+ rtlpriv->stats.ui_rssi.elements
+ [rtlpriv->stats.ui_rssi.index++] =
+ pstats->signalstrength;
+ if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
+ rtlpriv->stats.ui_rssi.index = 0;
+ tmpval = rtlpriv->stats.ui_rssi.total_val /
+ rtlpriv->stats.ui_rssi.total_num;
+ rtlpriv->stats.signal_strength = _rtl92de_translate_todbm(hw,
+ (u8) tmpval);
+ pstats->rssi = rtlpriv->stats.signal_strength;
+ }
+ if (!pstats->is_cck && pstats->packet_toself)
+ rtl92d_loop_over_paths(hw, pstats);
+}
+
+static void _rtl92de_update_rxsignalstatistics(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int weighting = 0;
+
+ if (rtlpriv->stats.recv_signal_power == 0)
+ rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
+ if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
+ weighting = 5;
+ else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
+ weighting = (-5);
+ rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power *
+ 5 + pstats->recvsignalpower + weighting) / 6;
+}
+
+static void _rtl92de_process_pwdb(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long undecorated_smoothed_pwdb;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_AP)
+ return;
+ else
+ undecorated_smoothed_pwdb =
+ rtlpriv->dm.undecorated_smoothed_pwdb;
+
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ if (undecorated_smoothed_pwdb < 0)
+ undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
+ if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
+ undecorated_smoothed_pwdb =
+ (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ undecorated_smoothed_pwdb =
+ undecorated_smoothed_pwdb + 1;
+ } else {
+ undecorated_smoothed_pwdb =
+ (((undecorated_smoothed_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ }
+ rtlpriv->dm.undecorated_smoothed_pwdb =
+ undecorated_smoothed_pwdb;
+ _rtl92de_update_rxsignalstatistics(hw, pstats);
+ }
+}
+
+static void rtl92d_loop_over_streams(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int stream;
+
+ for (stream = 0; stream < 2; stream++) {
+ if (pstats->rx_mimo_signalquality[stream] != -1) {
+ if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
+ rtlpriv->stats.rx_evm_percentage[stream] =
+ pstats->rx_mimo_signalquality[stream];
+ }
+ rtlpriv->stats.rx_evm_percentage[stream] =
+ ((rtlpriv->stats.rx_evm_percentage[stream]
+ * (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalquality[stream] * 1)) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+}
+
+static void _rtl92de_process_ui_link_quality(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 last_evm, tmpval;
+
+ if (pstats->signalquality == 0)
+ return;
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ if (rtlpriv->stats.ui_link_quality.total_num++ >=
+ PHY_LINKQUALITY_SLID_WIN_MAX) {
+ rtlpriv->stats.ui_link_quality.total_num =
+ PHY_LINKQUALITY_SLID_WIN_MAX;
+ last_evm = rtlpriv->stats.ui_link_quality.elements[
+ rtlpriv->stats.ui_link_quality.index];
+ rtlpriv->stats.ui_link_quality.total_val -= last_evm;
+ }
+ rtlpriv->stats.ui_link_quality.total_val +=
+ pstats->signalquality;
+ rtlpriv->stats.ui_link_quality.elements[
+ rtlpriv->stats.ui_link_quality.index++] =
+ pstats->signalquality;
+ if (rtlpriv->stats.ui_link_quality.index >=
+ PHY_LINKQUALITY_SLID_WIN_MAX)
+ rtlpriv->stats.ui_link_quality.index = 0;
+ tmpval = rtlpriv->stats.ui_link_quality.total_val /
+ rtlpriv->stats.ui_link_quality.total_num;
+ rtlpriv->stats.signal_quality = tmpval;
+ rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+ rtl92d_loop_over_streams(hw, pstats);
+ }
+}
+
+static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw,
+ u8 *buffer,
+ struct rtl_stats *pcurrent_stats)
+{
+
+ if (!pcurrent_stats->packet_matchbssid &&
+ !pcurrent_stats->packet_beacon)
+ return;
+
+ _rtl92de_process_ui_rssi(hw, pcurrent_stats);
+ _rtl92de_process_pwdb(hw, pcurrent_stats);
+ _rtl92de_process_ui_link_quality(hw, pcurrent_stats);
+}
+
+static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstats,
+ struct rx_desc_92d *pdesc,
+ struct rx_fwinfo_92d *p_drvinfo)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct ieee80211_hdr *hdr;
+ u8 *tmp_buf;
+ u8 *praddr;
+ u16 type, cfc;
+ __le16 fc;
+ bool packet_matchbssid, packet_toself, packet_beacon;
+
+ tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
+ hdr = (struct ieee80211_hdr *)tmp_buf;
+ fc = hdr->frame_control;
+ cfc = le16_to_cpu(fc);
+ type = WLAN_FC_GET_TYPE(fc);
+ praddr = hdr->addr1;
+ packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
+ (!compare_ether_addr(mac->bssid, (cfc & IEEE80211_FCTL_TODS) ?
+ hdr->addr1 : (cfc & IEEE80211_FCTL_FROMDS) ?
+ hdr->addr2 : hdr->addr3)) && (!pstats->hwerror) &&
+ (!pstats->crc) && (!pstats->icv));
+ packet_toself = packet_matchbssid &&
+ (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+ if (ieee80211_is_beacon(fc))
+ packet_beacon = true;
+ _rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
+ packet_matchbssid, packet_toself,
+ packet_beacon);
+ _rtl92de_process_phyinfo(hw, tmp_buf, pstats);
+}
+
+bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *p_desc, struct sk_buff *skb)
+{
+ struct rx_fwinfo_92d *p_drvinfo;
+ struct rx_desc_92d *pdesc = (struct rx_desc_92d *)p_desc;
+ u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+
+ stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+ stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+ stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
+ stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ stats->hwerror = (stats->crc | stats->icv);
+ stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+ stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
+ stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+ stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+ stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
+ && (GET_RX_DESC_FAGGR(pdesc) == 1));
+ stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+ stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+ rx_status->freq = hw->conf.channel->center_freq;
+ rx_status->band = hw->conf.channel->band;
+ if (GET_RX_DESC_CRC32(pdesc))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (!GET_RX_DESC_SWDEC(pdesc))
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (GET_RX_DESC_BW(pdesc))
+ rx_status->flag |= RX_FLAG_40MHZ;
+ if (GET_RX_DESC_RXHT(pdesc))
+ rx_status->flag |= RX_FLAG_HT;
+ rx_status->flag |= RX_FLAG_MACTIME_MPDU;
+ if (stats->decrypted)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ rx_status->rate_idx = _rtl92de_rate_mapping((bool)
+ GET_RX_DESC_RXHT(pdesc),
+ (u8)
+ GET_RX_DESC_RXMCS(pdesc));
+ rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
+ if (phystatus) {
+ p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
+ stats->rx_bufshift);
+ _rtl92de_translate_rx_signal_stuff(hw,
+ skb, stats, pdesc,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+ rx_status->signal = stats->rssi + 10;
+ /*rx_status->noise = -stats->noise; */
+ return true;
+}
+
+static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
+ u8 *virtualaddress)
+{
+ memset(virtualaddress, 0, 8);
+
+ SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
+ SET_EARLYMODE_LEN0(virtualaddress, ptcb_desc->empkt_len[0]);
+ SET_EARLYMODE_LEN1(virtualaddress, ptcb_desc->empkt_len[1]);
+ SET_EARLYMODE_LEN2_1(virtualaddress, ptcb_desc->empkt_len[2] & 0xF);
+ SET_EARLYMODE_LEN2_2(virtualaddress, ptcb_desc->empkt_len[2] >> 4);
+ SET_EARLYMODE_LEN3(virtualaddress, ptcb_desc->empkt_len[3]);
+ SET_EARLYMODE_LEN4(virtualaddress, ptcb_desc->empkt_len[4]);
+}
+
+void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_tx_info *info, struct sk_buff *skb,
+ u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct ieee80211_sta *sta = info->control.sta;
+ u8 *pdesc = (u8 *) pdesc_tx;
+ u16 seq_number;
+ __le16 fc = hdr->frame_control;
+ unsigned int buf_len = 0;
+ unsigned int skb_len = skb->len;
+ u8 fw_qsel = _rtl92de_map_hwqueue_to_fwqueue(skb, hw_queue);
+ bool firstseg = ((hdr->seq_ctrl &
+ cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+ bool lastseg = ((hdr->frame_control &
+ cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+ dma_addr_t mapping;
+ u8 bw_40 = 0;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION) {
+ bw_40 = mac->bw_40;
+ } else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ if (sta)
+ bw_40 = sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ }
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
+ /* reserve 8 byte for AMPDU early mode */
+ if (rtlhal->earlymode_enable) {
+ skb_push(skb, EM_HDR_LEN);
+ memset(skb->data, 0, EM_HDR_LEN);
+ }
+ buf_len = skb->len;
+ mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92d));
+ if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
+ firstseg = true;
+ lastseg = true;
+ }
+ if (firstseg) {
+ if (rtlhal->earlymode_enable) {
+ SET_TX_DESC_PKT_OFFSET(pdesc, 1);
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
+ EM_HDR_LEN);
+ if (ptcb_desc->empkt_num) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
+ ("Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num));
+ _rtl92de_insert_emcontent(ptcb_desc,
+ (u8 *)(skb->data));
+ }
+ } else {
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ }
+ /* 5G have no CCK rate */
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ if (ptcb_desc->hw_rate < DESC92D_RATE6M)
+ ptcb_desc->hw_rate = DESC92D_RATE6M;
+ SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ if (ptcb_desc->use_shortgi || ptcb_desc->use_shortpreamble)
+ SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ ptcb_desc->hw_rate == DESC92D_RATEMCS7)
+ SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ SET_TX_DESC_AGG_ENABLE(pdesc, 1);
+ SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ }
+ SET_TX_DESC_SEQ(pdesc, seq_number);
+ SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
+ !ptcb_desc->cts_enable) ? 1 : 0));
+ SET_TX_DESC_HW_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable
+ || ptcb_desc->cts_enable) ? 1 : 0));
+ SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
+ SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
+ /* 5G have no CCK rate */
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ if (ptcb_desc->rts_rate < DESC92D_RATE6M)
+ ptcb_desc->rts_rate = DESC92D_RATE6M;
+ SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
+ SET_TX_DESC_RTS_BW(pdesc, 0);
+ SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
+ SET_TX_DESC_RTS_SHORT(pdesc, ((ptcb_desc->rts_rate <=
+ DESC92D_RATE54M) ?
+ (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
+ (ptcb_desc->rts_use_shortgi ? 1 : 0)));
+ if (bw_40) {
+ if (ptcb_desc->packet_bw) {
+ SET_TX_DESC_DATA_BW(pdesc, 1);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ mac->cur_40_prime_sc);
+ }
+ } else {
+ SET_TX_DESC_DATA_BW(pdesc, 0);
+ SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ }
+ SET_TX_DESC_LINIP(pdesc, 0);
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
+ if (sta) {
+ u8 ampdu_density = sta->ht_cap.ampdu_density;
+ SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ }
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *keyconf;
+
+ keyconf = info->control.hw_key;
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ break;
+ default:
+ SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ break;
+
+ }
+ }
+ SET_TX_DESC_PKT_ID(pdesc, 0);
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+ SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+ SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
+ SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
+ 1 : 0);
+ SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+
+ /* Set TxRate and RTSRate in TxDesc */
+ /* This prevent Tx initial rate of new-coming packets */
+ /* from being overwritten by retried packet rate.*/
+ if (!ptcb_desc->use_driver_rate) {
+ SET_TX_DESC_RTS_RATE(pdesc, 0x08);
+ /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */
+ }
+ if (ieee80211_is_data_qos(fc)) {
+ if (mac->rdg_en) {
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ ("Enable RDG function.\n"));
+ SET_TX_DESC_RDG_ENABLE(pdesc, 1);
+ SET_TX_DESC_HTC(pdesc, 1);
+ }
+ }
+ }
+
+ SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
+ SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ if (rtlpriv->dm.useramask) {
+ SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
+ SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ } else {
+ SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
+ SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index);
+ }
+ if (ieee80211_is_data_qos(fc))
+ SET_TX_DESC_QOS(pdesc, 1);
+
+ if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ SET_TX_DESC_PKT_ID(pdesc, 8);
+ }
+ SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+ RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, ("\n"));
+}
+
+void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
+ u8 *pdesc, bool firstseg,
+ bool lastseg, struct sk_buff *skb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 fw_queue = QSLT_BEACON;
+ dma_addr_t mapping = pci_map_single(rtlpci->pdev,
+ skb->data, skb->len, PCI_DMA_TODEVICE);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+ __le16 fc = hdr->frame_control;
+
+ CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+ if (firstseg)
+ SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ /* 5G have no CCK rate
+ * Caution: The macros below are multi-line expansions.
+ * The braces are needed no matter what checkpatch says
+ */
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ SET_TX_DESC_TX_RATE(pdesc, DESC92D_RATE6M);
+ } else {
+ SET_TX_DESC_TX_RATE(pdesc, DESC92D_RATE1M);
+ }
+ SET_TX_DESC_SEQ(pdesc, 0);
+ SET_TX_DESC_LINIP(pdesc, 0);
+ SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
+ SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ SET_TX_DESC_RATE_ID(pdesc, 7);
+ SET_TX_DESC_MACID(pdesc, 0);
+ SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_FIRST_SEG(pdesc, 1);
+ SET_TX_DESC_LAST_SEG(pdesc, 1);
+ SET_TX_DESC_OFFSET(pdesc, 0x20);
+ SET_TX_DESC_USE_RATE(pdesc, 1);
+
+ if (!ieee80211_is_data_qos(fc) && ppsc->fwctrl_lps) {
+ SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ SET_TX_DESC_PKT_ID(pdesc, 8);
+ }
+
+ RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C Tx Cmd Content\n", pdesc, TX_DESC_SIZE);
+ wmb();
+ SET_TX_DESC_OWN(pdesc, 1);
+}
+
+void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+{
+ if (istx) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ wmb();
+ SET_TX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_TX_NEXTDESC_ADDR:
+ SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR txdesc :%d"
+ " not process\n", desc_name));
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_RXOWN:
+ wmb();
+ SET_RX_DESC_OWN(pdesc, 1);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ break;
+ case HW_DESC_RXERO:
+ SET_RX_DESC_EOR(pdesc, 1);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR rxdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ }
+}
+
+u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name)
+{
+ u32 ret = 0;
+
+ if (istx) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_TX_DESC_OWN(p_desc);
+ break;
+ case HW_DESC_TXBUFF_ADDR:
+ ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR txdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ } else {
+ struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = GET_RX_DESC_OWN(pdesc);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ ret = GET_RX_DESC_PKT_LEN(pdesc);
+ break;
+ default:
+ RT_ASSERT(false, ("ERR rxdesc :%d "
+ "not process\n", desc_name));
+ break;
+ }
+ }
+ return ret;
+}
+
+void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ if (hw_queue == BEACON_QUEUE)
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
+ else
+ rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
+ BIT(0) << (hw_queue));
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
new file mode 100644
index 00000000000..992d6766e66
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -0,0 +1,756 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010 Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL92DE_TRX_H__
+#define __RTL92DE_TRX_H__
+
+#define TX_DESC_SIZE 64
+#define TX_DESC_AGGR_SUBFRAME_SIZE 32
+
+#define RX_DESC_SIZE 32
+#define RX_DRV_INFO_SIZE_UNIT 8
+
+#define TX_DESC_NEXT_DESC_OFFSET 40
+#define USB_HWDESC_HEADER_LEN 32
+#define CRCLENGTH 4
+
+/* Define a macro that takes a le32 word, converts it to host ordering,
+ * right shifts by a specified count, creates a mask of the specified
+ * bit count, and extracts that number of bits.
+ */
+
+#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
+ ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
+ BIT_LEN_MASK_32(__mask))
+
+/* Define a macro that clears a bit field in an le32 word and
+ * sets the specified value into that bit field. The resulting
+ * value remains in le32 ordering; however, it is properly converted
+ * to host ordering for the clear and set operations before conversion
+ * back to le32.
+ */
+
+#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
+ (*(__le32 *)(__pdesc) = \
+ (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
+ (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
+ (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
+
+/* macros to read/write various fields in RX or TX descriptors */
+
+#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
+#define SET_TX_DESC_OFFSET(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
+#define SET_TX_DESC_BMC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
+#define SET_TX_DESC_HTC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
+#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
+#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
+#define SET_TX_DESC_LINIP(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
+#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
+#define SET_TX_DESC_GF(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
+#define SET_TX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
+
+#define GET_TX_DESC_PKT_SIZE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 0, 16)
+#define GET_TX_DESC_OFFSET(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 16, 8)
+#define GET_TX_DESC_BMC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 24, 1)
+#define GET_TX_DESC_HTC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 25, 1)
+#define GET_TX_DESC_LAST_SEG(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 26, 1)
+#define GET_TX_DESC_FIRST_SEG(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 27, 1)
+#define GET_TX_DESC_LINIP(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 28, 1)
+#define GET_TX_DESC_NO_ACM(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 29, 1)
+#define GET_TX_DESC_GF(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 30, 1)
+#define GET_TX_DESC_OWN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 31, 1)
+
+#define SET_TX_DESC_MACID(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
+#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
+#define SET_TX_DESC_BK(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
+#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
+#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
+#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
+#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
+#define SET_TX_DESC_PIFS(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
+#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
+#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
+#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
+#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
+#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+4, 26, 8, __val)
+
+#define GET_TX_DESC_MACID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
+#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
+#define GET_TX_DESC_AGG_BREAK(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
+#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
+#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
+#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
+#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
+#define GET_TX_DESC_PIFS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
+#define GET_TX_DESC_RATE_ID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
+#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
+#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
+#define GET_TX_DESC_SEC_TYPE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
+#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
+
+#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
+#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
+#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
+#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
+#define SET_TX_DESC_RAW(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
+#define SET_TX_DESC_CCX(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
+#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
+#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
+#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
+#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
+#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
+#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
+
+#define GET_TX_DESC_RTS_RC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
+#define GET_TX_DESC_DATA_RC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
+#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
+#define GET_TX_DESC_MORE_FRAG(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
+#define GET_TX_DESC_RAW(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
+#define GET_TX_DESC_CCX(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
+#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
+#define GET_TX_DESC_ANTSEL_A(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
+#define GET_TX_DESC_ANTSEL_B(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
+#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
+#define GET_TX_DESC_TX_ANTL(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
+#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
+
+#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
+#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
+#define SET_TX_DESC_SEQ(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
+#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
+
+#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
+#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
+#define GET_TX_DESC_SEQ(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
+#define GET_TX_DESC_PKT_ID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
+
+#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
+#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
+#define SET_TX_DESC_QOS(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
+#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
+#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
+#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
+#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
+#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
+#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
+#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
+#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
+#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
+#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
+#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
+#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
+#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
+#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
+#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
+#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
+#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
+#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
+
+#define GET_TX_DESC_RTS_RATE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
+#define GET_TX_DESC_AP_DCFE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
+#define GET_TX_DESC_QOS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
+#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
+#define GET_TX_DESC_USE_RATE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
+#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
+#define GET_TX_DESC_DISABLE_FB(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
+#define GET_TX_DESC_CTS2SELF(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
+#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
+#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
+#define GET_TX_DESC_PORT_ID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
+#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
+#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
+#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
+#define GET_TX_DESC_TX_STBC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
+#define GET_TX_DESC_DATA_SHORT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
+#define GET_TX_DESC_DATA_BW(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
+#define GET_TX_DESC_RTS_SHORT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
+#define GET_TX_DESC_RTS_BW(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
+#define GET_TX_DESC_RTS_SC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
+#define GET_TX_DESC_RTS_STBC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
+
+#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
+#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
+#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
+
+#define GET_TX_DESC_TX_RATE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
+#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
+#define GET_TX_DESC_CCX_TAG(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
+#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
+#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
+#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
+#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
+#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
+
+#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
+#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
+#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
+#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
+#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
+#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
+#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
+#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
+
+#define GET_TX_DESC_TXAGC_A(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
+#define GET_TX_DESC_TXAGC_B(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
+#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
+#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
+#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
+#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
+#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
+#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
+
+#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
+#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
+#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
+#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
+#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
+
+#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
+#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
+#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
+#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
+#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
+
+#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
+#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
+
+#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
+#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
+
+#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
+#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
+
+#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
+#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
+
+#define GET_RX_DESC_PKT_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 0, 14)
+#define GET_RX_DESC_CRC32(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 14, 1)
+#define GET_RX_DESC_ICV(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 15, 1)
+#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 20, 3)
+#define GET_RX_DESC_QOS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 24, 2)
+#define GET_RX_DESC_PHYST(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 27, 1)
+#define GET_RX_DESC_LS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 28, 1)
+#define GET_RX_DESC_FS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 29, 1)
+#define GET_RX_DESC_EOR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 30, 1)
+#define GET_RX_DESC_OWN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc, 31, 1)
+
+#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
+#define SET_RX_DESC_EOR(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
+#define SET_RX_DESC_OWN(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
+
+#define GET_RX_DESC_MACID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
+#define GET_RX_DESC_TID(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
+#define GET_RX_DESC_HWRSVD(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
+#define GET_RX_DESC_PAGGR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
+#define GET_RX_DESC_FAGGR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
+#define GET_RX_DESC_A2_FIT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
+#define GET_RX_DESC_PAM(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
+#define GET_RX_DESC_MD(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
+#define GET_RX_DESC_MF(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
+#define GET_RX_DESC_SEQ(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
+#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
+#define GET_RX_DESC_NEXT_IND(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
+#define GET_RX_DESC_RSVD(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
+
+#define GET_RX_DESC_RXMCS(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
+#define GET_RX_DESC_RXHT(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
+#define GET_RX_DESC_SPLCP(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
+#define GET_RX_DESC_BW(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
+#define GET_RX_DESC_HTC(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
+#define GET_RX_DESC_HWPC_ERR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
+#define GET_RX_DESC_HWPC_IND(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
+#define GET_RX_DESC_IV0(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
+
+#define GET_RX_DESC_IV1(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
+#define GET_RX_DESC_TSFL(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
+
+#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
+#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
+ SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
+
+#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
+#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
+ SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
+
+#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
+do { \
+ if (_size > TX_DESC_NEXT_DESC_OFFSET) \
+ memset((void *)__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
+ else \
+ memset((void *)__pdesc, 0, _size); \
+} while (0);
+
+#define RX_HAL_IS_CCK_RATE(_pdesc)\
+ (_pdesc->rxmcs == DESC92D_RATE1M || \
+ _pdesc->rxmcs == DESC92D_RATE2M || \
+ _pdesc->rxmcs == DESC92D_RATE5_5M || \
+ _pdesc->rxmcs == DESC92D_RATE11M)
+
+/* For 92D early mode */
+#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
+ SET_BITS_OFFSET_LE(__paddr, 0, 3, __value)
+#define SET_EARLYMODE_LEN0(__paddr, __value) \
+ SET_BITS_OFFSET_LE(__paddr, 4, 12, __value)
+#define SET_EARLYMODE_LEN1(__paddr, __value) \
+ SET_BITS_OFFSET_LE(__paddr, 16, 12, __value)
+#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
+ SET_BITS_OFFSET_LE(__paddr, 28, 4, __value)
+#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
+ SET_BITS_OFFSET_LE(__paddr+4, 0, 8, __value)
+#define SET_EARLYMODE_LEN3(__paddr, __value) \
+ SET_BITS_OFFSET_LE(__paddr+4, 8, 12, __value)
+#define SET_EARLYMODE_LEN4(__paddr, __value) \
+ SET_BITS_OFFSET_LE(__paddr+4, 20, 12, __value)
+
+struct rx_fwinfo_92d {
+ u8 gain_trsw[4];
+ u8 pwdb_all;
+ u8 cfosho[4];
+ u8 cfotail[4];
+ char rxevm[2];
+ char rxsnr[4];
+ u8 pdsnr[2];
+ u8 csi_current[2];
+ u8 csi_target[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+} __packed;
+
+struct tx_desc_92d {
+ u32 pktsize:16;
+ u32 offset:8;
+ u32 bmc:1;
+ u32 htc:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 linip:1;
+ u32 noacm:1;
+ u32 gf:1;
+ u32 own:1;
+
+ u32 macid:5;
+ u32 agg_en:1;
+ u32 bk:1;
+ u32 rdg_en:1;
+ u32 queuesel:5;
+ u32 rd_nav_ext:1;
+ u32 lsig_txop_en:1;
+ u32 pifs:1;
+ u32 rateid:4;
+ u32 nav_usehdr:1;
+ u32 en_descid:1;
+ u32 sectype:2;
+ u32 pktoffset:8;
+
+ u32 rts_rc:6;
+ u32 data_rc:6;
+ u32 rsvd0:2;
+ u32 bar_retryht:2;
+ u32 rsvd1:1;
+ u32 morefrag:1;
+ u32 raw:1;
+ u32 ccx:1;
+ u32 ampdudensity:3;
+ u32 rsvd2:1;
+ u32 ant_sela:1;
+ u32 ant_selb:1;
+ u32 txant_cck:2;
+ u32 txant_l:2;
+ u32 txant_ht:2;
+
+ u32 nextheadpage:8;
+ u32 tailpage:8;
+ u32 seq:12;
+ u32 pktid:4;
+
+ u32 rtsrate:5;
+ u32 apdcfe:1;
+ u32 qos:1;
+ u32 hwseq_enable:1;
+ u32 userrate:1;
+ u32 dis_rtsfb:1;
+ u32 dis_datafb:1;
+ u32 cts2self:1;
+ u32 rts_en:1;
+ u32 hwrts_en:1;
+ u32 portid:1;
+ u32 rsvd3:3;
+ u32 waitdcts:1;
+ u32 cts2ap_en:1;
+ u32 txsc:2;
+ u32 stbc:2;
+ u32 txshort:1;
+ u32 txbw:1;
+ u32 rtsshort:1;
+ u32 rtsbw:1;
+ u32 rtssc:2;
+ u32 rtsstbc:2;
+
+ u32 txrate:6;
+ u32 shortgi:1;
+ u32 ccxt:1;
+ u32 txrate_fb_lmt:5;
+ u32 rtsrate_fb_lmt:4;
+ u32 retrylmt_en:1;
+ u32 txretrylmt:6;
+ u32 usb_txaggnum:8;
+
+ u32 txagca:5;
+ u32 txagcb:5;
+ u32 usemaxlen:1;
+ u32 maxaggnum:5;
+ u32 mcsg1maxlen:4;
+ u32 mcsg2maxlen:4;
+ u32 mcsg3maxlen:4;
+ u32 mcs7sgimaxlen:4;
+
+ u32 txbuffersize:16;
+ u32 mcsg4maxlen:4;
+ u32 mcsg5maxlen:4;
+ u32 mcsg6maxlen:4;
+ u32 mcsg15sgimaxlen:4;
+
+ u32 txbuffaddr;
+ u32 txbufferaddr64;
+ u32 nextdescaddress;
+ u32 nextdescaddress64;
+
+ u32 reserve_pass_pcie_mm_limit[4];
+} __packed;
+
+struct rx_desc_92d {
+ u32 length:14;
+ u32 crc32:1;
+ u32 icverror:1;
+ u32 drv_infosize:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 phystatus:1;
+ u32 swdec:1;
+ u32 lastseg:1;
+ u32 firstseg:1;
+ u32 eor:1;
+ u32 own:1;
+
+ u32 macid:5;
+ u32 tid:4;
+ u32 hwrsvd:5;
+ u32 paggr:1;
+ u32 faggr:1;
+ u32 a1_fit:4;
+ u32 a2_fit:4;
+ u32 pam:1;
+ u32 pwr:1;
+ u32 moredata:1;
+ u32 morefrag:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+
+ u32 seq:12;
+ u32 frag:4;
+ u32 nextpktlen:14;
+ u32 nextind:1;
+ u32 rsvd:1;
+
+ u32 rxmcs:6;
+ u32 rxht:1;
+ u32 amsdu:1;
+ u32 splcp:1;
+ u32 bandwidth:1;
+ u32 htc:1;
+ u32 tcpchk_rpt:1;
+ u32 ipcchk_rpt:1;
+ u32 tcpchk_valid:1;
+ u32 hwpcerr:1;
+ u32 hwpcind:1;
+ u32 iv0:16;
+
+ u32 iv1;
+
+ u32 tsfl;
+
+ u32 bufferaddress;
+ u32 bufferaddress64;
+
+} __packed;
+
+void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr,
+ u8 *pdesc, struct ieee80211_tx_info *info,
+ struct sk_buff *skb, u8 hw_queue,
+ struct rtl_tcb_desc *ptcb_desc);
+bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+u32 rtl92de_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
+void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+ bool b_firstseg, bool b_lastseg,
+ struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index da86db86fa4..4203a8531ca 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -222,7 +222,6 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
u32 low_rssi_thresh = 0;
u32 middle_rssi_thresh = 0;
u32 high_rssi_thresh = 0;
- u8 rssi_level;
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal))
@@ -272,18 +271,14 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
if (rtlpriv->dm.undecorated_smoothed_pwdb >
(long)high_rssi_thresh) {
ra->ratr_state = DM_RATR_STA_HIGH;
- rssi_level = 1;
} else if (rtlpriv->dm.undecorated_smoothed_pwdb >
(long)middle_rssi_thresh) {
ra->ratr_state = DM_RATR_STA_LOW;
- rssi_level = 3;
} else if (rtlpriv->dm.undecorated_smoothed_pwdb >
(long)low_rssi_thresh) {
ra->ratr_state = DM_RATR_STA_LOW;
- rssi_level = 5;
} else {
ra->ratr_state = DM_RATR_STA_ULTRALOW;
- rssi_level = 6;
}
if (ra->pre_ratr_state != ra->ratr_state) {
@@ -457,7 +452,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
- if (digtable.backoff_enable_flag == true)
+ if (digtable.backoff_enable_flag)
rtl92s_backoff_enable_flag(hw);
else
digtable.backoff_val = DM_DIG_BACKOFF;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
index 3b5af0113d7..6f91a148c22 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
@@ -358,7 +358,6 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
struct fw_priv *pfw_priv = NULL;
u8 *puc_mappedfile = NULL;
u32 ul_filelength = 0;
- u32 file_length = 0;
u8 fwhdr_size = RT_8192S_FIRMWARE_HDR_SIZE;
u8 fwstatus = FW_STATUS_INIT;
bool rtstatus = true;
@@ -370,7 +369,6 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
firmware->fwstatus = FW_STATUS_INIT;
puc_mappedfile = firmware->sz_fw_tmpbuffer;
- file_length = firmware->sz_fw_tmpbufferlen;
/* 1. Retrieve FW header. */
firmware->pfwheader = (struct fw_hdr *) puc_mappedfile;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 2e9005d0454..d59f66cb776 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -27,6 +27,8 @@
*
*****************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "../wifi.h"
#include "../efuse.h"
#include "../base.h"
@@ -465,8 +467,7 @@ static u8 _rtl92ce_halset_sysclk(struct ieee80211_hw *hw, u8 data)
if ((tmpvalue & BIT(6)))
break;
- printk(KERN_ERR "wait for BIT(6) return value %x\n",
- tmpvalue);
+ pr_err("wait for BIT(6) return value %x\n", tmpvalue);
if (waitcount == 0)
break;
@@ -516,7 +517,7 @@ static u8 _rtl92se_rf_onoff_detect(struct ieee80211_hw *hw)
mdelay(10);
/* check GPIO3 */
- u1tmp = rtl_read_byte(rtlpriv, GPIO_IN);
+ u1tmp = rtl_read_byte(rtlpriv, GPIO_IN_SE);
retval = (u1tmp & HAL_8192S_HW_GPIO_OFF_BIT) ? ERFON : ERFOFF;
return retval;
@@ -884,12 +885,10 @@ static void _rtl92se_hw_configure(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 reg_bw_opmode = 0;
- u32 reg_ratr = 0, reg_rrsr = 0;
+ u32 reg_rrsr = 0;
u8 regtmp = 0;
reg_bw_opmode = BW_OPMODE_20MHZ;
- reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG | RATE_ALL_OFDM_1SS |
- RATE_ALL_OFDM_2SS;
reg_rrsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
regtmp = rtl_read_byte(rtlpriv, INIRTSMCS_SEL);
@@ -996,7 +995,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
rtlpriv->psc.rfoff_reason = RF_CHANGE_BY_INIT;
rtlpriv->psc.rfpwr_state = ERFON;
- rtl_ps_set_rf_state(hw, ERFOFF, rfoffreason, true);
+ /* FIXME: check spinlocks if this block is uncommented */
+ rtl_ps_set_rf_state(hw, ERFOFF, rfoffreason);
} else {
/* gpio radio on/off is out of adapter start */
if (rtlpriv->psc.hwradiooff == false) {
@@ -1107,7 +1107,7 @@ void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
if (rtlpriv->psc.rfpwr_state != ERFON)
return;
- if (check_bssid == true) {
+ if (check_bssid) {
reg_rcr |= (RCR_CBSSID);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
} else if (check_bssid == false) {
@@ -1122,14 +1122,12 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
- enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
u32 temp;
bt_msr &= ~MSR_LINK_MASK;
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
bt_msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
- ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Set Network type to NO LINK!\n"));
break;
@@ -1140,7 +1138,6 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
bt_msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
- ledaction = LED_CTL_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Set Network type to STA!\n"));
break;
@@ -1218,8 +1215,6 @@ void rtl92se_enable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, INTA_MASK, rtlpci->irq_mask[0]);
/* Support Bit 32-37(Assign as Bit 0-5) interrupt setting now */
rtl_write_dword(rtlpriv, INTA_MASK + 4, rtlpci->irq_mask[1] & 0x3F);
-
- rtlpci->irq_enabled = true;
}
void rtl92se_disable_interrupt(struct ieee80211_hw *hw)
@@ -1230,7 +1225,7 @@ void rtl92se_disable_interrupt(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, INTA_MASK, 0);
rtl_write_dword(rtlpriv, INTA_MASK + 4, 0);
- rtlpci->irq_enabled = false;
+ synchronize_irq(rtlpci->pdev->irq);
}
@@ -1261,8 +1256,7 @@ static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data)
if ((tmp & BIT(6)))
break;
- printk(KERN_ERR "wait for BIT(6) return value %x\n",
- tmp);
+ pr_err("wait for BIT(6) return value %x\n", tmp);
if (waitcnt == 0)
break;
@@ -1321,7 +1315,7 @@ static void _rtl92s_phy_set_rfhalt(struct ieee80211_hw *hw)
if (u1btmp & BIT(7)) {
u1btmp &= ~(BIT(6) | BIT(7));
if (!_rtl92s_set_sysclk(hw, u1btmp)) {
- printk(KERN_ERR "Switch ctrl path fail\n");
+ pr_err("Switch ctrl path fail\n");
return;
}
}
@@ -1655,7 +1649,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag = false;
}
- if (rtlefuse->autoload_failflag == true)
+ if (rtlefuse->autoload_failflag)
return;
_rtl8192se_get_IC_Inferiority(hw);
@@ -1688,7 +1682,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, MACIDR0 + i, rtlefuse->dev_addr[i]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- (MAC_FMT "\n", MAC_ARG(rtlefuse->dev_addr)));
+ ("%pM\n", rtlefuse->dev_addr));
/* Get Tx Power Level by Channel */
/* Read Tx power of Channel 1 ~ 14 from EEPROM. */
@@ -2271,7 +2265,7 @@ bool rtl92se_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- enum rf_pwrstate rfpwr_toset, cur_rfstate;
+ enum rf_pwrstate rfpwr_toset /*, cur_rfstate */;
unsigned long flag = 0;
bool actuallyset = false;
bool turnonbypowerdomain = false;
@@ -2292,7 +2286,7 @@ bool rtl92se_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
}
- cur_rfstate = ppsc->rfpwr_state;
+ /* cur_rfstate = ppsc->rfpwr_state;*/
/* because after _rtl92s_phy_set_rfhalt, all power
* closed, so we must open some power for GPIO check,
@@ -2305,7 +2299,7 @@ bool rtl92se_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
rfpwr_toset = _rtl92se_rf_onoff_detect(hw);
- if ((ppsc->hwradiooff == true) && (rfpwr_toset == ERFON)) {
+ if ((ppsc->hwradiooff) && (rfpwr_toset == ERFON)) {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
("RFKILL-HW Radio ON, RF ON\n"));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/rtlwifi/rtl8192se/led.c
index 6d4f6661668..e3fe7c90ebf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/led.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/led.c
@@ -90,7 +90,7 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
case LED_PIN_LED0:
ledcfg &= 0xf0;
- if (pcipriv->ledctl.led_opendrain == true)
+ if (pcipriv->ledctl.led_opendrain)
rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(1)));
else
rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3)));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 63b45e60a95..f27171af979 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -27,6 +27,8 @@
*
*****************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "../wifi.h"
#include "../pci.h"
#include "../ps.h"
@@ -180,19 +182,18 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- unsigned long flags;
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), rfpath(%#x), "
"bitmask(%#x)\n", regaddr, rfpath, bitmask));
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl92s_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), rfpath(%#x), "
"bitmask(%#x), original_value(%#x)\n", regaddr, rfpath,
@@ -207,7 +208,6 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 original_value, bitshift;
- unsigned long flags;
if (!((rtlphy->rf_pathmap >> rfpath) & 0x1))
return;
@@ -215,7 +215,7 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
" data(%#x), rfpath(%#x)\n", regaddr, bitmask, data, rfpath));
- spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+ spin_lock(&rtlpriv->locks.rf_lock);
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92s_phy_rf_serial_read(hw, rfpath,
@@ -226,7 +226,7 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
_rtl92s_phy_rf_serial_write(hw, rfpath, regaddr, data);
- spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+ spin_unlock(&rtlpriv->locks.rf_lock);
RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x), "
"data(%#x), rfpath(%#x)\n", regaddr, bitmask, data, rfpath));
@@ -263,7 +263,6 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u8 reg_bw_opmode;
- u8 reg_prsr_rsc;
RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("Switch to %s bandwidth\n",
rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
@@ -277,7 +276,8 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
rtlphy->set_bwmode_inprogress = true;
reg_bw_opmode = rtl_read_byte(rtlpriv, BW_OPMODE);
- reg_prsr_rsc = rtl_read_byte(rtlpriv, RRSR + 2);
+ /* dummy read */
+ rtl_read_byte(rtlpriv, RRSR + 2);
switch (rtlphy->current_chan_bw) {
case HT_CHANNEL_WIDTH_20:
@@ -546,8 +546,6 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
if (rfpwr_state == ppsc->rfpwr_state)
return false;
- ppsc->set_rfpowerstate_inprogress = true;
-
switch (rfpwr_state) {
case ERFON:{
if ((ppsc->rfpwr_state == ERFOFF) &&
@@ -659,8 +657,6 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
if (bresult)
ppsc->rfpwr_state = rfpwr_state;
- ppsc->set_rfpowerstate_inprogress = false;
-
return bresult;
}
@@ -1022,8 +1018,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl92s_phy_config_bb(hw, BASEBAND_CONFIG_AGC_TAB);
if (rtstatus != true) {
- printk(KERN_ERR "_rtl92s_phy_bb_config_parafile(): "
- "AGC Table Fail\n");
+ pr_err("%s(): AGC Table Fail\n", __func__);
goto phy_BB8190_Config_ParaFile_Fail;
}
@@ -1422,7 +1417,7 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
break;
case FW_CMD_HIGH_PWR_ENABLE:
if ((rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) ||
- (rtlpriv->dm.dynamic_txpower_enable == true))
+ rtlpriv->dm.dynamic_txpower_enable)
break;
/* CCA threshold */
@@ -1614,7 +1609,7 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
if ((rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) ||
- (rtlpriv->dm.dynamic_txpower_enable == true))
+ rtlpriv->dm.dynamic_txpower_enable)
fw_cmdmap &= ~FW_HIGH_PWR_ENABLE_CTL;
if ((digtable.dig_ext_port_stage ==
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
index 0116eaddbfa..ea32ef2d409 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/reg.h
@@ -248,12 +248,8 @@
#define PSTIME 0x02E0
#define TIMER0 0x02E4
#define TIMER1 0x02E8
-#define GPIO_CTRL 0x02EC
-#define GPIO_IN 0x02EC
-#define GPIO_OUT 0x02ED
+#define GPIO_IN_SE 0x02EC
#define GPIO_IO_SEL 0x02EE
-#define GPIO_MOD 0x02EF
-#define GPIO_INTCTRL 0x02F0
#define MAC_PINMUX_CFG 0x02F1
#define LEDCFG 0x02F2
#define PHY_REG 0x02F3
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
index 1d3a4833039..0ad50fe44aa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
@@ -27,6 +27,8 @@
*
*****************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "../wifi.h"
#include "reg.h"
#include "def.h"
@@ -410,7 +412,7 @@ void rtl92s_phy_rf6052_set_ccktxpower(struct ieee80211_hw *hw, u8 pwrlevel)
(rtlefuse->eeprom_regulatory != 0)))
dont_inc_cck_or_turboscanoff = true;
- if (mac->act_scanning == true) {
+ if (mac->act_scanning) {
txagc = 0x3f;
if (dont_inc_cck_or_turboscanoff)
txagc = pwrlevel;
@@ -507,7 +509,7 @@ bool rtl92s_phy_rf6052_config(struct ieee80211_hw *hw)
}
if (rtstatus != true) {
- printk(KERN_ERR "Radio[%d] Fail!!", rfpath);
+ pr_err("Radio[%d] Fail!!\n", rfpath);
goto fail;
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 1c6cb1d7d66..3876078a63d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -27,6 +27,8 @@
*
*****************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/vmalloc.h>
#include "../wifi.h"
@@ -183,8 +185,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- printk(KERN_INFO "rtl8192se: Driver for Realtek RTL8192SE/RTL8191SE\n"
- " Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
+ "Loading firmware %s\n", rtlpriv->cfg->fw_name);
/* request fw */
err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
rtlpriv->io.dev);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 5cf442373d4..cffe30851f7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -581,7 +581,6 @@ static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr;
u8 *tmp_buf;
u8 *praddr;
- u8 *psaddr;
__le16 fc;
u16 type, cfc;
bool packet_matchbssid, packet_toself, packet_beacon;
@@ -593,7 +592,6 @@ static void _rtl92se_translate_rx_signal_stuff(struct ieee80211_hw *hw,
cfc = le16_to_cpu(fc);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
- psaddr = hdr->addr2;
packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
(!compare_ether_addr(mac->bssid, (cfc & IEEE80211_FCTL_TODS) ?
@@ -663,7 +661,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
rx_status->mactime = GET_RX_STATUS_DESC_TSFL(pdesc);
- if (phystatus == true) {
+ if (phystatus) {
p_drvinfo = (struct rx_fwinfo *)(skb->data +
stats->rx_bufshift);
_rtl92se_translate_rx_signal_stuff(hw, skb, stats, pdesc,
@@ -875,6 +873,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+ wmb();
SET_TX_DESC_OWN(pdesc, 1);
} else { /* H2C Command Desc format (Host TXCMD) */
/* 92SE must set as 1 for firmware download HW DMA error */
@@ -893,6 +892,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
+ wmb();
SET_TX_DESC_OWN(pdesc, 1);
}
@@ -900,9 +900,10 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
{
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
+ wmb();
SET_TX_DESC_OWN(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
@@ -916,6 +917,7 @@ void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
} else {
switch (desc_name) {
case HW_DESC_RXOWN:
+ wmb();
SET_RX_STATUS_DESC_OWN(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
@@ -939,7 +941,7 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
{
u32 ret = 0;
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
ret = GET_TX_DESC_OWN(desc);
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a9367eba1ea..8b1cef0ffde 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -24,6 +24,9 @@
* Hsinchu 300, Taiwan.
*
*****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/usb.h>
#include "core.h"
#include "wifi.h"
@@ -104,9 +107,8 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
pdata, len, 0); /* max. timeout */
if (status < 0)
- printk(KERN_ERR "reg 0x%x, usbctrl_vendorreq TimeOut! "
- "status:0x%x value=0x%x\n", value, status,
- *(u32 *)pdata);
+ pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
+ value, status, *(u32 *)pdata);
return status;
}
@@ -316,7 +318,7 @@ static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
rtlusb->usb_rx_segregate_hdl =
rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl;
- printk(KERN_INFO "rtl8192cu: rx_max_size %d, rx_urb_num %d, in_ep %d\n",
+ pr_info("rx_max_size %d, rx_urb_num %d, in_ep %d\n",
rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep);
init_usb_anchor(&rtlusb->rx_submitted);
return 0;
@@ -580,7 +582,7 @@ static void _rtl_rx_completed(struct urb *_urb)
} else{
/* TO DO */
_rtl_rx_pre_process(hw, skb);
- printk(KERN_ERR "rtlwifi: rx agg not supported\n");
+ pr_err("rx agg not supported\n");
}
goto resubmit;
}
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 693395ee98f..d3c3ffd3898 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -32,7 +32,6 @@
#include <linux/sched.h>
#include <linux/firmware.h>
-#include <linux/version.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/usb.h>
@@ -303,9 +302,6 @@ enum hw_variables {
HW_VAR_DATA_FILTER,
};
-#define HWSET_MAX_SIZE 128
-#define EFUSE_MAX_SECTION 16
-
enum _RT_MEDIA_STATUS {
RT_MEDIA_DISCONNECT = 0,
RT_MEDIA_CONNECT = 1
@@ -938,7 +934,7 @@ struct rtl_mac {
int n_channels;
int n_bitrates;
- bool offchan_deley;
+ bool offchan_delay;
/*filters */
u32 rx_conf;
@@ -1188,7 +1184,6 @@ struct rtl_efuse {
struct rtl_ps_ctl {
bool pwrdomain_protect;
- bool set_rfpowerstate_inprogress;
bool in_powersavemode;
bool rfchange_inprogress;
bool swrf_processing;
@@ -1536,6 +1531,7 @@ struct rtl_works {
/* For SW LPS */
struct delayed_work ps_work;
struct delayed_work ps_rfon_wq;
+ struct tasklet_struct ips_leave_tasklet;
};
struct rtl_debug {
@@ -1983,7 +1979,7 @@ static inline u16 rtl_get_tid(struct sk_buff *skb)
static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- u8 *bssid)
+ const u8 *bssid)
{
return ieee80211_find_sta(vif, bssid);
}
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c
index ef8370edace..ad87a1ac646 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/wl1251/acx.c
@@ -140,8 +140,6 @@ int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth)
auth->sleep_auth = sleep_auth;
ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
- if (ret < 0)
- return ret;
out:
kfree(auth);
@@ -681,10 +679,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl)
ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD,
detection, sizeof(*detection));
- if (ret < 0) {
+ if (ret < 0)
wl1251_warning("failed to set cca threshold: %d", ret);
- return ret;
- }
out:
kfree(detection);
diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/wl1251/cmd.c
index 81f164bc488..d14d69d733a 100644
--- a/drivers/net/wireless/wl1251/cmd.c
+++ b/drivers/net/wireless/wl1251/cmd.c
@@ -241,7 +241,7 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
if (ret < 0) {
wl1251_error("tx %s cmd for channel %d failed",
enable ? "start" : "stop", channel);
- return ret;
+ goto out;
}
wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
index f51a0241a44..f78694295c3 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/wl1251/sdio.c
@@ -19,6 +19,7 @@
* Copyright (C) 2008 Google Inc
* Copyright (C) 2009 Bob Copeland (me@bobcopeland.com)
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/mmc/sdio_func.h>
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index af6448c4d3e..eaa5f955620 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -19,6 +19,7 @@
*
*/
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 35ce7b0f4a6..07bcb1548d8 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -11,7 +11,6 @@ config WL12XX
depends on WL12XX_MENU && GENERIC_HARDIRQS
depends on INET
select FW_LOADER
- select CRC7
---help---
This module adds support for wireless adapters based on TI wl1271 and
TI wl1273 chipsets. This module does *not* include support for wl1251.
@@ -33,6 +32,7 @@ config WL12XX_HT
config WL12XX_SPI
tristate "TI wl12xx SPI support"
depends on WL12XX && SPI_MASTER
+ select CRC7
---help---
This module adds support for the SPI interface of adapters using
TI wl12xx chipsets. Select this if your platform is using
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index c6ee530e5bf..34f6ab53e51 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
@@ -78,8 +77,6 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
auth->sleep_auth = sleep_auth;
ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
- if (ret < 0)
- return ret;
out:
kfree(auth);
@@ -91,7 +88,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
struct acx_current_tx_power *acx;
int ret;
- wl1271_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr");
+ wl1271_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr %d", power);
if (power < 0 || power > 25)
return -EINVAL;
@@ -625,10 +622,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl)
ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
detection, sizeof(*detection));
- if (ret < 0) {
+ if (ret < 0)
wl1271_warning("failed to set cca threshold: %d", ret);
- return ret;
- }
out:
kfree(detection);
@@ -1068,6 +1063,7 @@ int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
mem_conf->tx_free_req = mem->min_req_tx_blocks;
mem_conf->rx_free_req = mem->min_req_rx_blocks;
mem_conf->tx_min = mem->tx_min;
+ mem_conf->fwlog_blocks = wl->conf.fwlog.mem_blocks;
ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
sizeof(*mem_conf));
@@ -1577,22 +1573,69 @@ out:
return ret;
}
-int wl1271_acx_max_tx_retry(struct wl1271 *wl)
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
+{
+ struct wl1271_acx_ps_rx_streaming *rx_streaming;
+ u32 conf_queues, enable_queues;
+ int i, ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx ps rx streaming");
+
+ rx_streaming = kzalloc(sizeof(*rx_streaming), GFP_KERNEL);
+ if (!rx_streaming) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ conf_queues = wl->conf.rx_streaming.queues;
+ if (enable)
+ enable_queues = conf_queues;
+ else
+ enable_queues = 0;
+
+ for (i = 0; i < 8; i++) {
+ /*
+ * Skip non-changed queues, to avoid redundant acxs.
+ * this check assumes conf.rx_streaming.queues can't
+ * be changed while rx_streaming is enabled.
+ */
+ if (!(conf_queues & BIT(i)))
+ continue;
+
+ rx_streaming->tid = i;
+ rx_streaming->enable = enable_queues & BIT(i);
+ rx_streaming->period = wl->conf.rx_streaming.interval;
+ rx_streaming->timeout = wl->conf.rx_streaming.interval;
+
+ ret = wl1271_cmd_configure(wl, ACX_PS_RX_STREAMING,
+ rx_streaming,
+ sizeof(*rx_streaming));
+ if (ret < 0) {
+ wl1271_warning("acx ps rx streaming failed: %d", ret);
+ goto out;
+ }
+ }
+out:
+ kfree(rx_streaming);
+ return ret;
+}
+
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
{
- struct wl1271_acx_max_tx_retry *acx = NULL;
+ struct wl1271_acx_ap_max_tx_retry *acx = NULL;
int ret;
- wl1271_debug(DEBUG_ACX, "acx max tx retry");
+ wl1271_debug(DEBUG_ACX, "acx ap max tx retry");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
- acx->max_tx_retry = cpu_to_le16(wl->conf.tx.ap_max_tx_retries);
+ acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
if (ret < 0) {
- wl1271_warning("acx max tx retry failed: %d", ret);
+ wl1271_warning("acx ap max tx retry failed: %d", ret);
goto out;
}
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 9a895e3cc61..d2eb86eccc0 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -828,6 +828,8 @@ struct wl1271_acx_sta_config_memory {
u8 tx_free_req;
u8 rx_free_req;
u8 tx_min;
+ u8 fwlog_blocks;
+ u8 padding[3];
} __packed;
struct wl1271_acx_mem_map {
@@ -1153,7 +1155,20 @@ struct wl1271_acx_fw_tsf_information {
u8 padding[3];
} __packed;
-struct wl1271_acx_max_tx_retry {
+struct wl1271_acx_ps_rx_streaming {
+ struct acx_header header;
+
+ u8 tid;
+ u8 enable;
+
+ /* interval between triggers (10-100 msec) */
+ u8 period;
+
+ /* timeout before first trigger (0-200 msec) */
+ u8 timeout;
+} __packed;
+
+struct wl1271_acx_ap_max_tx_retry {
struct acx_header header;
/*
@@ -1384,7 +1399,8 @@ int wl1271_acx_set_ba_session(struct wl1271 *wl,
int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
bool enable);
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
-int wl1271_acx_max_tx_retry(struct wl1271 *wl);
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
int wl1271_acx_config_ps(struct wl1271 *wl);
int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
int wl1271_acx_set_ap_beacon_filter(struct wl1271 *wl, bool enable);
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index b07f8b7e5f1..5ebc64d8940 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -102,6 +102,33 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
}
+static unsigned int wl12xx_get_fw_ver_quirks(struct wl1271 *wl)
+{
+ unsigned int quirks = 0;
+ unsigned int *fw_ver = wl->chip.fw_ver;
+
+ /* Only for wl127x */
+ if ((fw_ver[FW_VER_CHIP] == FW_VER_CHIP_WL127X) &&
+ /* Check STA version */
+ (((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
+ (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_STA_MIN)) ||
+ /* Check AP version */
+ ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) &&
+ (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_AP_MIN))))
+ quirks |= WL12XX_QUIRK_USE_2_SPARE_BLOCKS;
+
+ /* Only new station firmwares support routing fw logs to the host */
+ if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
+ (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
+ quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED;
+
+ /* This feature is not yet supported for AP mode */
+ if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP)
+ quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED;
+
+ return quirks;
+}
+
static void wl1271_parse_fw_ver(struct wl1271 *wl)
{
int ret;
@@ -116,6 +143,9 @@ static void wl1271_parse_fw_ver(struct wl1271 *wl)
memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
return;
}
+
+ /* Check if any quirks are needed with older fw versions */
+ wl->quirks |= wl12xx_get_fw_ver_quirks(wl);
}
static void wl1271_boot_fw_version(struct wl1271 *wl)
@@ -483,9 +513,12 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
PERIODIC_SCAN_COMPLETE_EVENT_ID;
if (wl->bss_type == BSS_TYPE_AP_BSS)
- wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID;
+ wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID |
+ INACTIVE_STA_EVENT_ID |
+ MAX_TX_RETRY_EVENT_ID;
else
- wl->event_mask |= DUMMY_PACKET_EVENT_ID;
+ wl->event_mask |= DUMMY_PACKET_EVENT_ID |
+ BA_SESSION_RX_CONSTRAINT_EVENT_ID;
ret = wl1271_event_unmask(wl);
if (ret < 0) {
@@ -748,6 +781,9 @@ int wl1271_load_firmware(struct wl1271 *wl)
clk |= (wl->ref_clock << 1) << 4;
}
+ if (wl->quirks & WL12XX_QUIRK_LPD_MODE)
+ clk |= SCRATCH_ENABLE_LPD;
+
wl1271_write32(wl, DRPW_SCRATCH_START, clk);
wl1271_set_partition(wl, &part_table[PART_WORK]);
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 42935ac7266..97dd237a958 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
@@ -106,7 +105,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
fail:
WARN_ON(1);
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ wl12xx_queue_recovery_work(wl);
return ret;
}
@@ -135,6 +134,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
/* Override the REF CLK from the NVS with the one from platform data */
gen_parms->general_params.ref_clock = wl->ref_clock;
+ /* LPD mode enable (bits 6-7) in WL1271 AP mode only */
+ if (wl->quirks & WL12XX_QUIRK_LPD_MODE)
+ gen_parms->general_params.general_settings |=
+ GENERAL_SETTINGS_DRPW_LPD;
+
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
if (ret < 0) {
wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
@@ -352,7 +356,7 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask);
if (ret != 0) {
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ wl12xx_queue_recovery_work(wl);
return ret;
}
@@ -396,10 +400,6 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
- /* reset TX security counters */
- wl->tx_security_last_seq = 0;
- wl->tx_security_seq = 0;
-
wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x",
join->basic_rate_set, join->supported_rate_set);
@@ -1080,7 +1080,7 @@ int wl1271_cmd_start_bss(struct wl1271 *wl)
memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
- cmd->aging_period = cpu_to_le16(WL1271_AP_DEF_INACTIV_SEC);
+ cmd->aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
cmd->bss_index = WL1271_AP_BSS_INDEX;
cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
@@ -1167,14 +1167,7 @@ int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
cmd->bss_index = WL1271_AP_BSS_INDEX;
cmd->aid = sta->aid;
cmd->hlid = hlid;
-
- /*
- * FIXME: Does STA support QOS? We need to propagate this info from
- * hostapd. Currently not that important since this is only used for
- * sending the correct flavor of null-data packet in response to a
- * trigger.
- */
- cmd->wmm = 0;
+ cmd->wmm = sta->wme ? 1 : 0;
cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
sta->supp_rates[wl->band]));
@@ -1230,3 +1223,87 @@ out_free:
out:
return ret;
}
+
+int wl12xx_cmd_config_fwlog(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_config_fwlog *cmd;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd config firmware logger");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->logger_mode = wl->conf.fwlog.mode;
+ cmd->log_severity = wl->conf.fwlog.severity;
+ cmd->timestamp = wl->conf.fwlog.timestamp;
+ cmd->output = wl->conf.fwlog.output;
+ cmd->threshold = wl->conf.fwlog.threshold;
+
+ ret = wl1271_cmd_send(wl, CMD_CONFIG_FWLOGGER, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send config firmware logger command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl12xx_cmd_start_fwlog(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_start_fwlog *cmd;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd start firmware logger");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wl1271_cmd_send(wl, CMD_START_FWLOGGER, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send start firmware logger command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl12xx_cmd_stop_fwlog(struct wl1271 *wl)
+{
+ struct wl12xx_cmd_stop_fwlog *cmd;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd stop firmware logger");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wl1271_cmd_send(wl, CMD_STOP_FWLOGGER, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to send stop firmware logger command");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index 5cac95d9480..1f7037292c1 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -70,6 +70,9 @@ int wl1271_cmd_start_bss(struct wl1271 *wl);
int wl1271_cmd_stop_bss(struct wl1271 *wl);
int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
+int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
+int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
+int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
enum wl1271_commands {
CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -107,6 +110,9 @@ enum wl1271_commands {
CMD_START_PERIODIC_SCAN = 50,
CMD_STOP_PERIODIC_SCAN = 51,
CMD_SET_STA_STATE = 52,
+ CMD_CONFIG_FWLOGGER = 53,
+ CMD_START_FWLOGGER = 54,
+ CMD_STOP_FWLOGGER = 55,
/* AP mode commands */
CMD_BSS_START = 60,
@@ -575,4 +581,60 @@ struct wl1271_cmd_remove_sta {
u8 padding1;
} __packed;
+/*
+ * Continuous mode - packets are transferred to the host periodically
+ * via the data path.
+ * On demand - Log messages are stored in a cyclic buffer in the
+ * firmware, and only transferred to the host when explicitly requested
+ */
+enum wl12xx_fwlogger_log_mode {
+ WL12XX_FWLOG_CONTINUOUS,
+ WL12XX_FWLOG_ON_DEMAND
+};
+
+/* Include/exclude timestamps from the log messages */
+enum wl12xx_fwlogger_timestamp {
+ WL12XX_FWLOG_TIMESTAMP_DISABLED,
+ WL12XX_FWLOG_TIMESTAMP_ENABLED
+};
+
+/*
+ * Logs can be routed to the debug pinouts (where available), to the host bus
+ * (SDIO/SPI), or dropped
+ */
+enum wl12xx_fwlogger_output {
+ WL12XX_FWLOG_OUTPUT_NONE,
+ WL12XX_FWLOG_OUTPUT_DBG_PINS,
+ WL12XX_FWLOG_OUTPUT_HOST,
+};
+
+struct wl12xx_cmd_config_fwlog {
+ struct wl1271_cmd_header header;
+
+ /* See enum wl12xx_fwlogger_log_mode */
+ u8 logger_mode;
+
+ /* Minimum log level threshold */
+ u8 log_severity;
+
+ /* Include/exclude timestamps from the log messages */
+ u8 timestamp;
+
+ /* See enum wl1271_fwlogger_output */
+ u8 output;
+
+ /* Regulates the frequency of log messages */
+ u8 threshold;
+
+ u8 padding[3];
+} __packed;
+
+struct wl12xx_cmd_start_fwlog {
+ struct wl1271_cmd_header header;
+} __packed;
+
+struct wl12xx_cmd_stop_fwlog {
+ struct wl1271_cmd_header header;
+} __packed;
+
#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index c83fefb6662..6080e01d92c 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -713,8 +713,16 @@ struct conf_tx_settings {
/*
* AP-mode - allow this number of TX retries to a station before an
* event is triggered from FW.
+ * In AP-mode the hlids of unreachable stations are given in the
+ * "sta_tx_retry_exceeded" member in the event mailbox.
*/
- u16 ap_max_tx_retries;
+ u8 max_tx_retries;
+
+ /*
+ * AP-mode - after this number of seconds a connected station is
+ * considered inactive.
+ */
+ u16 ap_aging_period;
/*
* Configuration for TID parameters.
@@ -1248,6 +1256,59 @@ struct conf_fm_coex {
u8 swallow_clk_diff;
};
+struct conf_rx_streaming_settings {
+ /*
+ * RX Streaming duration (in msec) from last tx/rx
+ *
+ * Range: u32
+ */
+ u32 duration;
+
+ /*
+ * Bitmap of tids to be polled during RX streaming.
+ * (Note: it doesn't look like it really matters)
+ *
+ * Range: 0x1-0xff
+ */
+ u8 queues;
+
+ /*
+ * RX Streaming interval.
+ * (Note:this value is also used as the rx streaming timeout)
+ * Range: 0 (disabled), 10 - 100
+ */
+ u8 interval;
+
+ /*
+ * enable rx streaming also when there is no coex activity
+ */
+ u8 always;
+};
+
+struct conf_fwlog {
+ /* Continuous or on-demand */
+ u8 mode;
+
+ /*
+ * Number of memory blocks dedicated for the FW logger
+ *
+ * Range: 1-3, or 0 to disable the FW logger
+ */
+ u8 mem_blocks;
+
+ /* Minimum log level threshold */
+ u8 severity;
+
+ /* Include/exclude timestamps from the log messages */
+ u8 timestamp;
+
+ /* See enum wl1271_fwlogger_output */
+ u8 output;
+
+ /* Regulates the frequency of log messages */
+ u8 threshold;
+};
+
struct conf_drv_settings {
struct conf_sg_settings sg;
struct conf_rx_settings rx;
@@ -1263,6 +1324,8 @@ struct conf_drv_settings {
struct conf_memory_settings mem_wl127x;
struct conf_memory_settings mem_wl128x;
struct conf_fm_coex fm_coex;
+ struct conf_rx_streaming_settings rx_streaming;
+ struct conf_fwlog fwlog;
u8 hci_io_ds;
};
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index f1f8df9b6cd..37934b5601c 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -30,6 +30,7 @@
#include "acx.h"
#include "ps.h"
#include "io.h"
+#include "tx.h"
/* ms */
#define WL1271_DEBUGFS_STATS_LIFETIME 1000
@@ -71,6 +72,14 @@ static const struct file_operations name## _ops = { \
if (!entry || IS_ERR(entry)) \
goto err; \
+#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
+ do { \
+ entry = debugfs_create_file(#name, 0400, parent, \
+ wl, &prefix## _## name## _ops); \
+ if (!entry || IS_ERR(entry)) \
+ goto err; \
+ } while (0);
+
#define DEBUGFS_FWSTATS_FILE(sub, name, fmt) \
static ssize_t sub## _ ##name## _read(struct file *file, \
char __user *userbuf, \
@@ -225,7 +234,7 @@ static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
char buf[20];
int res;
- queue_len = wl->tx_queue_count;
+ queue_len = wl1271_tx_total_queue_count(wl);
res = scnprintf(buf, sizeof(buf), "%u\n", queue_len);
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
@@ -298,7 +307,7 @@ static ssize_t start_recovery_write(struct file *file,
struct wl1271 *wl = file->private_data;
mutex_lock(&wl->mutex);
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ wl12xx_queue_recovery_work(wl);
mutex_unlock(&wl->mutex);
return count;
@@ -330,10 +339,16 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
#define DRIVER_STATE_PRINT_HEX(x) DRIVER_STATE_PRINT(x, "0x%x")
DRIVER_STATE_PRINT_INT(tx_blocks_available);
- DRIVER_STATE_PRINT_INT(tx_allocated_blocks);
+ DRIVER_STATE_PRINT_INT(tx_allocated_blocks[0]);
+ DRIVER_STATE_PRINT_INT(tx_allocated_blocks[1]);
+ DRIVER_STATE_PRINT_INT(tx_allocated_blocks[2]);
+ DRIVER_STATE_PRINT_INT(tx_allocated_blocks[3]);
DRIVER_STATE_PRINT_INT(tx_frames_cnt);
DRIVER_STATE_PRINT_LHEX(tx_frames_map[0]);
- DRIVER_STATE_PRINT_INT(tx_queue_count);
+ DRIVER_STATE_PRINT_INT(tx_queue_count[0]);
+ DRIVER_STATE_PRINT_INT(tx_queue_count[1]);
+ DRIVER_STATE_PRINT_INT(tx_queue_count[2]);
+ DRIVER_STATE_PRINT_INT(tx_queue_count[3]);
DRIVER_STATE_PRINT_INT(tx_packets_count);
DRIVER_STATE_PRINT_INT(tx_results_count);
DRIVER_STATE_PRINT_LHEX(flags);
@@ -341,7 +356,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_INT(tx_blocks_freed[1]);
DRIVER_STATE_PRINT_INT(tx_blocks_freed[2]);
DRIVER_STATE_PRINT_INT(tx_blocks_freed[3]);
- DRIVER_STATE_PRINT_INT(tx_security_last_seq);
+ DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
DRIVER_STATE_PRINT_INT(rx_counter);
DRIVER_STATE_PRINT_INT(session_counter);
DRIVER_STATE_PRINT_INT(state);
@@ -527,11 +542,129 @@ static const struct file_operations beacon_interval_ops = {
.llseek = default_llseek,
};
+static ssize_t rx_streaming_interval_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ char buf[10];
+ size_t len;
+ unsigned long value;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in rx_streaming_interval!");
+ return -EINVAL;
+ }
+
+ /* valid values: 0, 10-100 */
+ if (value && (value < 10 || value > 100)) {
+ wl1271_warning("value is not in range!");
+ return -ERANGE;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ wl->conf.rx_streaming.interval = value;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wl1271_recalc_rx_streaming(wl);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static ssize_t rx_streaming_interval_read(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ return wl1271_format_buffer(userbuf, count, ppos,
+ "%d\n", wl->conf.rx_streaming.interval);
+}
+
+static const struct file_operations rx_streaming_interval_ops = {
+ .read = rx_streaming_interval_read,
+ .write = rx_streaming_interval_write,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t rx_streaming_always_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ char buf[10];
+ size_t len;
+ unsigned long value;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in rx_streaming_write!");
+ return -EINVAL;
+ }
+
+ /* valid values: 0, 10-100 */
+ if (!(value == 0 || value == 1)) {
+ wl1271_warning("value is not in valid!");
+ return -EINVAL;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ wl->conf.rx_streaming.always = value;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wl1271_recalc_rx_streaming(wl);
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static ssize_t rx_streaming_always_read(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ return wl1271_format_buffer(userbuf, count, ppos,
+ "%d\n", wl->conf.rx_streaming.always);
+}
+
+static const struct file_operations rx_streaming_always_ops = {
+ .read = rx_streaming_always_read,
+ .write = rx_streaming_always_write,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
static int wl1271_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
int ret = 0;
- struct dentry *entry, *stats;
+ struct dentry *entry, *stats, *streaming;
stats = debugfs_create_dir("fw-statistics", rootdir);
if (!stats || IS_ERR(stats)) {
@@ -640,6 +773,14 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(dtim_interval, rootdir);
DEBUGFS_ADD(beacon_interval, rootdir);
+ streaming = debugfs_create_dir("rx_streaming", rootdir);
+ if (!streaming || IS_ERR(streaming))
+ goto err;
+
+ DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming);
+ DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming);
+
+
return 0;
err:
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index c3c554cd658..304aaa2ee01 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -133,10 +133,13 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
if (ret < 0)
break;
- /* enable beacon early termination */
- ret = wl1271_acx_bet_enable(wl, true);
- if (ret < 0)
- break;
+ /*
+ * BET has only a minor effect in 5GHz and masks
+ * channel switch IEs, so we only enable BET on 2.4GHz
+ */
+ if (wl->band == IEEE80211_BAND_2GHZ)
+ /* enable beacon early termination */
+ ret = wl1271_acx_bet_enable(wl, true);
if (wl->ps_compl) {
complete(wl->ps_compl);
@@ -168,6 +171,36 @@ static void wl1271_event_rssi_trigger(struct wl1271 *wl,
wl->last_rssi_event = event;
}
+static void wl1271_stop_ba_event(struct wl1271 *wl, u8 ba_allowed)
+{
+ /* Convert the value to bool */
+ wl->ba_allowed = !!ba_allowed;
+
+ /*
+ * Return in case:
+ * there are not BA open or the event indication is to allowed BA
+ */
+ if ((!wl->ba_rx_bitmap) || (wl->ba_allowed))
+ return;
+
+ ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap, wl->bssid);
+}
+
+static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
+ u8 enable)
+{
+ if (enable) {
+ /* disable dynamic PS when requested by the firmware */
+ ieee80211_disable_dyn_ps(wl->vif);
+ set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
+ } else {
+ ieee80211_enable_dyn_ps(wl->vif);
+ clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
+ wl1271_recalc_rx_streaming(wl);
+ }
+
+}
+
static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
{
wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -181,6 +214,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
u32 vector;
bool beacon_loss = false;
bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ bool disconnect_sta = false;
+ unsigned long sta_bitmap = 0;
wl1271_event_mbox_dump(mbox);
@@ -211,14 +246,10 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
}
}
- /* disable dynamic PS when requested by the firmware */
if (vector & SOFT_GEMINI_SENSE_EVENT_ID &&
- wl->bss_type == BSS_TYPE_STA_BSS) {
- if (mbox->soft_gemini_sense_info)
- ieee80211_disable_dyn_ps(wl->vif);
- else
- ieee80211_enable_dyn_ps(wl->vif);
- }
+ wl->bss_type == BSS_TYPE_STA_BSS)
+ wl12xx_event_soft_gemini_sense(wl,
+ mbox->soft_gemini_sense_info);
/*
* The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
@@ -252,12 +283,60 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
wl1271_event_rssi_trigger(wl, mbox);
}
+ if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) && !is_ap) {
+ wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
+ "ba_allowed = 0x%x", mbox->ba_allowed);
+
+ if (wl->vif)
+ wl1271_stop_ba_event(wl, mbox->ba_allowed);
+ }
+
if ((vector & DUMMY_PACKET_EVENT_ID) && !is_ap) {
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
if (wl->vif)
wl1271_tx_dummy_packet(wl);
}
+ /*
+ * "TX retries exceeded" has a different meaning according to mode.
+ * In AP mode the offending station is disconnected.
+ */
+ if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) {
+ wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
+ sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
+ disconnect_sta = true;
+ }
+
+ if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) {
+ wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
+ sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
+ disconnect_sta = true;
+ }
+
+ if (is_ap && disconnect_sta) {
+ u32 num_packets = wl->conf.tx.max_tx_retries;
+ struct ieee80211_sta *sta;
+ const u8 *addr;
+ int h;
+
+ for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS);
+ h < AP_MAX_LINKS;
+ h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) {
+ if (!wl1271_is_active_sta(wl, h))
+ continue;
+
+ addr = wl->links[h].addr;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wl->vif, addr);
+ if (sta) {
+ wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
+ ieee80211_report_low_ack(sta, num_packets);
+ }
+ rcu_read_unlock();
+ }
+ }
+
if (wl->vif && beacon_loss)
ieee80211_connection_loss(wl->vif);
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index b6cf06e565a..e524ad6fe4e 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -58,20 +58,23 @@ enum {
CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17),
BSS_LOSE_EVENT_ID = BIT(18),
REGAINED_BSS_EVENT_ID = BIT(19),
- ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20),
+ MAX_TX_RETRY_EVENT_ID = BIT(20),
/* STA: dummy paket for dynamic mem blocks */
DUMMY_PACKET_EVENT_ID = BIT(21),
/* AP: STA remove complete */
STA_REMOVE_COMPLETE_EVENT_ID = BIT(21),
SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
+ /* STA: SG prediction */
SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
+ /* AP: Inactive STA */
+ INACTIVE_STA_EVENT_ID = BIT(23),
SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25),
DBG_EVENT_ID = BIT(26),
HEALTH_CHECK_REPLY_EVENT_ID = BIT(27),
PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28),
PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29),
- BA_SESSION_TEAR_DOWN_EVENT_ID = BIT(30),
+ BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(30),
EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff,
};
@@ -119,10 +122,27 @@ struct event_mailbox {
/* AP FW only */
u8 hlid_removed;
+
+ /* a bitmap of hlids for stations that have been inactive too long */
__le16 sta_aging_status;
+
+ /* a bitmap of hlids for stations which didn't respond to TX */
__le16 sta_tx_retry_exceeded;
- u8 reserved_5[24];
+ /*
+ * Bitmap, Each bit set represents the Role ID for which this constraint
+ * is set. Range: 0 - FF, FF means ANY role
+ */
+ u8 ba_role_id;
+ /*
+ * Bitmap, Each bit set represents the Link ID for which this constraint
+ * is set. Not applicable if ba_role_id is set to ANY role (FF).
+ * Range: 0 - FFFF, FFFF means ANY link in that role
+ */
+ u8 ba_link_id;
+ u8 ba_allowed;
+
+ u8 reserved_5[21];
} __packed;
int wl1271_event_unmask(struct wl1271 *wl);
@@ -130,4 +150,7 @@ void wl1271_event_mbox_config(struct wl1271 *wl);
int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
void wl1271_pspoll_work(struct work_struct *work);
+/* Functions from main.c */
+bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid);
+
#endif
diff --git a/drivers/net/wireless/wl12xx/ini.h b/drivers/net/wireless/wl12xx/ini.h
index 1420c842b8f..4cf9ecc5621 100644
--- a/drivers/net/wireless/wl12xx/ini.h
+++ b/drivers/net/wireless/wl12xx/ini.h
@@ -24,6 +24,9 @@
#ifndef __INI_H__
#define __INI_H__
+#define GENERAL_SETTINGS_DRPW_LPD 0xc0
+#define SCRATCH_ENABLE_LPD BIT(25)
+
#define WL1271_INI_MAX_SMART_REFLEX_PARAM 16
struct wl1271_ini_general_params {
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index a8f4f156c05..c3e9a2e4410 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -321,6 +321,20 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
return 0;
}
+static int wl12xx_init_fwlog(struct wl1271 *wl)
+{
+ int ret;
+
+ if (wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED)
+ return 0;
+
+ ret = wl12xx_cmd_config_fwlog(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int wl1271_sta_hw_init(struct wl1271 *wl)
{
int ret;
@@ -382,6 +396,11 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
+ /* Configure the FW logger */
+ ret = wl12xx_init_fwlog(wl);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -428,7 +447,7 @@ static int wl1271_ap_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_acx_max_tx_retry(wl);
+ ret = wl1271_acx_ap_max_tx_retry(wl);
if (ret < 0)
return ret;
@@ -436,6 +455,11 @@ static int wl1271_ap_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
+ /* initialize Tx power */
+ ret = wl1271_acx_tx_power(wl, wl->power_level);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -541,6 +565,7 @@ static int wl1271_set_ba_policies(struct wl1271 *wl)
/* Reset the BA RX indicators */
wl->ba_rx_bitmap = 0;
+ wl->ba_allowed = true;
/* validate that FW support BA */
wl1271_check_ba_support(wl);
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c
index da5c1ad942a..c2da66f4504 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include "wl12xx.h"
@@ -128,12 +127,14 @@ EXPORT_SYMBOL_GPL(wl1271_set_partition);
void wl1271_io_reset(struct wl1271 *wl)
{
- wl->if_ops->reset(wl);
+ if (wl->if_ops->reset)
+ wl->if_ops->reset(wl);
}
void wl1271_io_init(struct wl1271 *wl)
{
- wl->if_ops->init(wl);
+ if (wl->if_ops->init)
+ wl->if_ops->init(wl);
}
void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index beed621a8ae..a2fe4f506ad 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -25,6 +25,7 @@
#ifndef __IO_H__
#define __IO_H__
+#include <linux/irqreturn.h>
#include "reg.h"
#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
@@ -128,6 +129,20 @@ static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf,
wl1271_raw_write(wl, physical, buf, len, fixed);
}
+static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
+ void *buf, size_t len, bool fixed)
+{
+ int physical;
+ int addr;
+
+ /* Addresses are stored internally as addresses to 32 bytes blocks */
+ addr = hwaddr << 5;
+
+ physical = wl1271_translate_addr(wl, addr);
+
+ wl1271_raw_read(wl, physical, buf, len, fixed);
+}
+
static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
{
return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index e6497dc669d..e58c22d21e3 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -31,6 +31,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/wl12xx.h>
+#include <linux/sched.h>
#include "wl12xx.h"
#include "wl12xx_80211.h"
@@ -209,7 +210,8 @@ static struct conf_drv_settings default_conf = {
.tx_op_limit = 1504,
},
},
- .ap_max_tx_retries = 100,
+ .max_tx_retries = 100,
+ .ap_aging_period = 300,
.tid_conf_count = 4,
.tid_conf = {
[CONF_TX_AC_BE] = {
@@ -362,9 +364,25 @@ static struct conf_drv_settings default_conf = {
.fm_disturbed_band_margin = 0xff, /* default */
.swallow_clk_diff = 0xff, /* default */
},
+ .rx_streaming = {
+ .duration = 150,
+ .queues = 0x1,
+ .interval = 20,
+ .always = 0,
+ },
+ .fwlog = {
+ .mode = WL12XX_FWLOG_ON_DEMAND,
+ .mem_blocks = 2,
+ .severity = 0,
+ .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
+ .output = WL12XX_FWLOG_OUTPUT_HOST,
+ .threshold = 0,
+ },
.hci_io_ds = HCI_IO_DS_6MA,
};
+static char *fwlog_param;
+
static void __wl1271_op_remove_interface(struct wl1271 *wl,
bool reset_tx_queues);
static void wl1271_free_ap_keys(struct wl1271 *wl);
@@ -388,6 +406,22 @@ static struct platform_device wl1271_device = {
static DEFINE_MUTEX(wl_list_mutex);
static LIST_HEAD(wl_list);
+static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate)
+{
+ int ret;
+ if (operstate != IF_OPER_UP)
+ return 0;
+
+ if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags))
+ return 0;
+
+ ret = wl1271_cmd_set_sta_state(wl);
+ if (ret < 0)
+ return ret;
+
+ wl1271_info("Association completed.");
+ return 0;
+}
static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
void *arg)
{
@@ -437,11 +471,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (ret < 0)
goto out;
- if ((dev->operstate == IF_OPER_UP) &&
- !test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags)) {
- wl1271_cmd_set_sta_state(wl);
- wl1271_info("Association completed.");
- }
+ wl1271_check_operstate(wl, dev->operstate);
wl1271_ps_elp_sleep(wl);
@@ -473,6 +503,117 @@ static int wl1271_reg_notify(struct wiphy *wiphy,
return 0;
}
+static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable)
+{
+ int ret = 0;
+
+ /* we should hold wl->mutex */
+ ret = wl1271_acx_ps_rx_streaming(wl, enable);
+ if (ret < 0)
+ goto out;
+
+ if (enable)
+ set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+ else
+ clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+out:
+ return ret;
+}
+
+/*
+ * this function is being called when the rx_streaming interval
+ * has beed changed or rx_streaming should be disabled
+ */
+int wl1271_recalc_rx_streaming(struct wl1271 *wl)
+{
+ int ret = 0;
+ int period = wl->conf.rx_streaming.interval;
+
+ /* don't reconfigure if rx_streaming is disabled */
+ if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ goto out;
+
+ /* reconfigure/disable according to new streaming_period */
+ if (period &&
+ test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) &&
+ (wl->conf.rx_streaming.always ||
+ test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
+ ret = wl1271_set_rx_streaming(wl, true);
+ else {
+ ret = wl1271_set_rx_streaming(wl, false);
+ /* don't cancel_work_sync since we might deadlock */
+ del_timer_sync(&wl->rx_streaming_timer);
+ }
+out:
+ return ret;
+}
+
+static void wl1271_rx_streaming_enable_work(struct work_struct *work)
+{
+ int ret;
+ struct wl1271 *wl =
+ container_of(work, struct wl1271, rx_streaming_enable_work);
+
+ mutex_lock(&wl->mutex);
+
+ if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) ||
+ !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
+ (!wl->conf.rx_streaming.always &&
+ !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
+ goto out;
+
+ if (!wl->conf.rx_streaming.interval)
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_set_rx_streaming(wl, true);
+ if (ret < 0)
+ goto out_sleep;
+
+ /* stop it after some time of inactivity */
+ mod_timer(&wl->rx_streaming_timer,
+ jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+static void wl1271_rx_streaming_disable_work(struct work_struct *work)
+{
+ int ret;
+ struct wl1271 *wl =
+ container_of(work, struct wl1271, rx_streaming_disable_work);
+
+ mutex_lock(&wl->mutex);
+
+ if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_set_rx_streaming(wl, false);
+ if (ret)
+ goto out_sleep;
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+static void wl1271_rx_streaming_timer(unsigned long data)
+{
+ struct wl1271 *wl = (struct wl1271 *)data;
+ ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work);
+}
+
static void wl1271_conf_init(struct wl1271 *wl)
{
@@ -488,8 +629,24 @@ static void wl1271_conf_init(struct wl1271 *wl)
/* apply driver default configuration */
memcpy(&wl->conf, &default_conf, sizeof(default_conf));
-}
+ /* Adjust settings according to optional module parameters */
+ if (fwlog_param) {
+ if (!strcmp(fwlog_param, "continuous")) {
+ wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
+ } else if (!strcmp(fwlog_param, "ondemand")) {
+ wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
+ } else if (!strcmp(fwlog_param, "dbgpins")) {
+ wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
+ wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
+ } else if (!strcmp(fwlog_param, "disable")) {
+ wl->conf.fwlog.mem_blocks = 0;
+ wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
+ } else {
+ wl1271_error("Unknown fwlog parameter %s", fwlog_param);
+ }
+ }
+}
static int wl1271_plt_init(struct wl1271 *wl)
{
@@ -667,13 +824,24 @@ static void wl1271_irq_update_links_status(struct wl1271 *wl,
}
}
+static u32 wl1271_tx_allocated_blocks(struct wl1271 *wl)
+{
+ int i;
+ u32 total_alloc_blocks = 0;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ total_alloc_blocks += wl->tx_allocated_blocks[i];
+
+ return total_alloc_blocks;
+}
+
static void wl1271_fw_status(struct wl1271 *wl,
struct wl1271_fw_full_status *full_status)
{
struct wl1271_fw_common_status *status = &full_status->common;
struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
- u32 freed_blocks = 0;
+ u32 freed_blocks = 0, ac_freed_blocks;
int i;
if (wl->bss_type == BSS_TYPE_AP_BSS) {
@@ -693,21 +861,23 @@ static void wl1271_fw_status(struct wl1271 *wl,
/* update number of available TX blocks */
for (i = 0; i < NUM_TX_QUEUES; i++) {
- freed_blocks += le32_to_cpu(status->tx_released_blks[i]) -
- wl->tx_blocks_freed[i];
+ ac_freed_blocks = le32_to_cpu(status->tx_released_blks[i]) -
+ wl->tx_blocks_freed[i];
+ freed_blocks += ac_freed_blocks;
+
+ wl->tx_allocated_blocks[i] -= ac_freed_blocks;
wl->tx_blocks_freed[i] =
le32_to_cpu(status->tx_released_blks[i]);
}
- wl->tx_allocated_blocks -= freed_blocks;
-
if (wl->bss_type == BSS_TYPE_AP_BSS) {
/* Update num of allocated TX blocks per link and ps status */
wl1271_irq_update_links_status(wl, &full_status->ap);
wl->tx_blocks_available += freed_blocks;
} else {
- int avail = full_status->sta.tx_total - wl->tx_allocated_blocks;
+ int avail = full_status->sta.tx_total -
+ wl1271_tx_allocated_blocks(wl);
/*
* The FW might change the total number of TX memblocks before
@@ -741,7 +911,7 @@ static void wl1271_flush_deferred_work(struct wl1271 *wl)
/* Return sent skbs to the network stack */
while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
- ieee80211_tx_status(wl->hw, skb);
+ ieee80211_tx_status_ni(wl->hw, skb);
}
static void wl1271_netstack_work(struct work_struct *work)
@@ -808,7 +978,7 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
wl1271_error("watchdog interrupt received! "
"starting recovery.");
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ wl12xx_queue_recovery_work(wl);
/* restarting the chip. ignore any other interrupt. */
goto out;
@@ -822,7 +992,7 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
/* Check if any tx blocks were freed */
spin_lock_irqsave(&wl->wl_lock, flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
- wl->tx_queue_count) {
+ wl1271_tx_total_queue_count(wl) > 0) {
spin_unlock_irqrestore(&wl->wl_lock, flags);
/*
* In order to avoid starvation of the TX path,
@@ -870,7 +1040,7 @@ out:
/* In case TX was not handled here, queue TX work */
clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
- wl->tx_queue_count)
+ wl1271_tx_total_queue_count(wl) > 0)
ieee80211_queue_work(wl->hw, &wl->tx_work);
spin_unlock_irqrestore(&wl->wl_lock, flags);
@@ -970,6 +1140,89 @@ out:
return ret;
}
+void wl12xx_queue_recovery_work(struct wl1271 *wl)
+{
+ if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
+ ieee80211_queue_work(wl->hw, &wl->recovery_work);
+}
+
+size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
+{
+ size_t len = 0;
+
+ /* The FW log is a length-value list, find where the log end */
+ while (len < maxlen) {
+ if (memblock[len] == 0)
+ break;
+ if (len + memblock[len] + 1 > maxlen)
+ break;
+ len += memblock[len] + 1;
+ }
+
+ /* Make sure we have enough room */
+ len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
+
+ /* Fill the FW log file, consumed by the sysfs fwlog entry */
+ memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
+ wl->fwlog_size += len;
+
+ return len;
+}
+
+static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
+{
+ u32 addr;
+ u32 first_addr;
+ u8 *block;
+
+ if ((wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
+ (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
+ (wl->conf.fwlog.mem_blocks == 0))
+ return;
+
+ wl1271_info("Reading FW panic log");
+
+ block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
+ if (!block)
+ return;
+
+ /*
+ * Make sure the chip is awake and the logger isn't active.
+ * This might fail if the firmware hanged.
+ */
+ if (!wl1271_ps_elp_wakeup(wl))
+ wl12xx_cmd_stop_fwlog(wl);
+
+ /* Read the first memory block address */
+ wl1271_fw_status(wl, wl->fw_status);
+ first_addr = __le32_to_cpu(wl->fw_status->sta.log_start_addr);
+ if (!first_addr)
+ goto out;
+
+ /* Traverse the memory blocks linked list */
+ addr = first_addr;
+ do {
+ memset(block, 0, WL12XX_HW_BLOCK_SIZE);
+ wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
+ false);
+
+ /*
+ * Memory blocks are linked to one another. The first 4 bytes
+ * of each memory block hold the hardware address of the next
+ * one. The last memory block points to the first one.
+ */
+ addr = __le32_to_cpup((__le32 *)block);
+ if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
+ WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
+ break;
+ } while (addr && (addr != first_addr));
+
+ wake_up_interruptible(&wl->fwlog_waitq);
+
+out:
+ kfree(block);
+}
+
static void wl1271_recovery_work(struct work_struct *work)
{
struct wl1271 *wl =
@@ -980,9 +1233,23 @@ static void wl1271_recovery_work(struct work_struct *work)
if (wl->state != WL1271_STATE_ON)
goto out;
+ /* Avoid a recursive recovery */
+ set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
+
+ wl12xx_read_fwlog_panic(wl);
+
wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4));
+ /*
+ * Advance security sequence number to overcome potential progress
+ * in the firmware during recovery. This doens't hurt if the network is
+ * not encrypted.
+ */
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
+ test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+ wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING;
+
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
ieee80211_connection_loss(wl->vif);
@@ -996,6 +1263,9 @@ static void wl1271_recovery_work(struct work_struct *work)
/* reboot the chipset */
__wl1271_op_remove_interface(wl, false);
+
+ clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
+
ieee80211_restart_hw(wl->hw);
/*
@@ -1074,9 +1344,13 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
- /* end-of-transaction flag should be set in wl127x AP mode */
+ /*
+ * 'end-of-transaction flag' and 'LPD mode flag'
+ * should be set in wl127x AP mode only
+ */
if (wl->bss_type == BSS_TYPE_AP_BSS)
- wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
+ wl->quirks |= (WL12XX_QUIRK_END_OF_TRANSACTION |
+ WL12XX_QUIRK_LPD_MODE);
ret = wl1271_setup(wl);
if (ret < 0)
@@ -1089,6 +1363,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
+
if (wl1271_set_block_size(wl))
wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
break;
@@ -1117,24 +1392,6 @@ out:
return ret;
}
-static unsigned int wl1271_get_fw_ver_quirks(struct wl1271 *wl)
-{
- unsigned int quirks = 0;
- unsigned int *fw_ver = wl->chip.fw_ver;
-
- /* Only for wl127x */
- if ((fw_ver[FW_VER_CHIP] == FW_VER_CHIP_WL127X) &&
- /* Check STA version */
- (((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
- (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_STA_MIN)) ||
- /* Check AP version */
- ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) &&
- (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_AP_MIN))))
- quirks |= WL12XX_QUIRK_USE_2_SPARE_BLOCKS;
-
- return quirks;
-}
-
int wl1271_plt_start(struct wl1271 *wl)
{
int retries = WL1271_BOOT_RETRIES;
@@ -1171,8 +1428,6 @@ int wl1271_plt_start(struct wl1271 *wl)
wl1271_notice("firmware booted in PLT mode (%s)",
wl->chip.fw_ver_str);
- /* Check if any quirks are needed with older fw versions */
- wl->quirks |= wl1271_get_fw_ver_quirks(wl);
goto out;
irq_disable:
@@ -1242,26 +1497,27 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
unsigned long flags;
- int q;
+ int q, mapping;
u8 hlid = 0;
- q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
+ mapping = skb_get_queue_mapping(skb);
+ q = wl1271_tx_get_queue(mapping);
if (wl->bss_type == BSS_TYPE_AP_BSS)
hlid = wl1271_tx_get_hlid(skb);
spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count++;
+ wl->tx_queue_count[q]++;
/*
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
- if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
- wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
- ieee80211_stop_queues(wl->hw);
- set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
+ if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
+ wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
+ ieee80211_stop_queue(wl->hw, mapping);
+ set_bit(q, &wl->stopped_queues_map);
}
/* queue the packet */
@@ -1287,10 +1543,11 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
int wl1271_tx_dummy_packet(struct wl1271 *wl)
{
unsigned long flags;
+ int q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
- wl->tx_queue_count++;
+ wl->tx_queue_count[q]++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
/* The FW is low on RX memory blocks, so send the dummy packet asap */
@@ -1352,15 +1609,15 @@ static struct notifier_block wl1271_dev_notifier = {
};
#ifdef CONFIG_PM
-static int wl1271_configure_suspend(struct wl1271 *wl)
+static int wl1271_configure_suspend_sta(struct wl1271 *wl)
{
- int ret;
-
- if (wl->bss_type != BSS_TYPE_STA_BSS)
- return 0;
+ int ret = 0;
mutex_lock(&wl->mutex);
+ if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ goto out_unlock;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
@@ -1403,11 +1660,44 @@ out:
}
+static int wl1271_configure_suspend_ap(struct wl1271 *wl)
+{
+ int ret = 0;
+
+ mutex_lock(&wl->mutex);
+
+ if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+ goto out_unlock;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_unlock;
+
+ ret = wl1271_acx_set_ap_beacon_filter(wl, true);
+
+ wl1271_ps_elp_sleep(wl);
+out_unlock:
+ mutex_unlock(&wl->mutex);
+ return ret;
+
+}
+
+static int wl1271_configure_suspend(struct wl1271 *wl)
+{
+ if (wl->bss_type == BSS_TYPE_STA_BSS)
+ return wl1271_configure_suspend_sta(wl);
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ return wl1271_configure_suspend_ap(wl);
+ return 0;
+}
+
static void wl1271_configure_resume(struct wl1271 *wl)
{
int ret;
+ bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS;
+ bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS;
- if (wl->bss_type != BSS_TYPE_STA_BSS)
+ if (!is_sta && !is_ap)
return;
mutex_lock(&wl->mutex);
@@ -1415,10 +1705,14 @@ static void wl1271_configure_resume(struct wl1271 *wl)
if (ret < 0)
goto out;
- /* exit psm if it wasn't configured */
- if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags))
- wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (is_sta) {
+ /* exit psm if it wasn't configured */
+ if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags))
+ wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
+ wl->basic_rate, true);
+ } else if (is_ap) {
+ wl1271_acx_set_ap_beacon_filter(wl, false);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -1429,69 +1723,68 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wow)
{
struct wl1271 *wl = hw->priv;
+ int ret;
+
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
- wl->wow_enabled = !!wow;
- if (wl->wow_enabled) {
- int ret;
- ret = wl1271_configure_suspend(wl);
- if (ret < 0) {
- wl1271_warning("couldn't prepare device to suspend");
- return ret;
- }
- /* flush any remaining work */
- wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
- flush_delayed_work(&wl->scan_complete_work);
+ WARN_ON(!wow || !wow->any);
- /*
- * disable and re-enable interrupts in order to flush
- * the threaded_irq
- */
- wl1271_disable_interrupts(wl);
+ wl->wow_enabled = true;
+ ret = wl1271_configure_suspend(wl);
+ if (ret < 0) {
+ wl1271_warning("couldn't prepare device to suspend");
+ return ret;
+ }
+ /* flush any remaining work */
+ wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
- /*
- * set suspended flag to avoid triggering a new threaded_irq
- * work. no need for spinlock as interrupts are disabled.
- */
- set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
+ /*
+ * disable and re-enable interrupts in order to flush
+ * the threaded_irq
+ */
+ wl1271_disable_interrupts(wl);
+
+ /*
+ * set suspended flag to avoid triggering a new threaded_irq
+ * work. no need for spinlock as interrupts are disabled.
+ */
+ set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
+
+ wl1271_enable_interrupts(wl);
+ flush_work(&wl->tx_work);
+ flush_delayed_work(&wl->pspoll_work);
+ flush_delayed_work(&wl->elp_work);
- wl1271_enable_interrupts(wl);
- flush_work(&wl->tx_work);
- flush_delayed_work(&wl->pspoll_work);
- flush_delayed_work(&wl->elp_work);
- }
return 0;
}
static int wl1271_op_resume(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
+ unsigned long flags;
+ bool run_irq_work = false;
+
wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
wl->wow_enabled);
+ WARN_ON(!wl->wow_enabled);
/*
* re-enable irq_work enqueuing, and call irq_work directly if
* there is a pending work.
*/
- if (wl->wow_enabled) {
- struct wl1271 *wl = hw->priv;
- unsigned long flags;
- bool run_irq_work = false;
-
- spin_lock_irqsave(&wl->wl_lock, flags);
- clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
- if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
- run_irq_work = true;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- if (run_irq_work) {
- wl1271_debug(DEBUG_MAC80211,
- "run postponed irq_work directly");
- wl1271_irq(0, wl);
- wl1271_enable_interrupts(wl);
- }
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
+ if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
+ run_irq_work = true;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
- wl1271_configure_resume(wl);
+ if (run_irq_work) {
+ wl1271_debug(DEBUG_MAC80211,
+ "run postponed irq_work directly");
+ wl1271_irq(0, wl);
+ wl1271_enable_interrupts(wl);
}
+ wl1271_configure_resume(wl);
+ wl->wow_enabled = false;
return 0;
}
@@ -1629,9 +1922,6 @@ power_off:
strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
sizeof(wiphy->fw_version));
- /* Check if any quirks are needed with older fw versions */
- wl->quirks |= wl1271_get_fw_ver_quirks(wl);
-
/*
* Now we know if 11a is supported (info from the NVS), so disable
* 11a channels if not supported
@@ -1694,6 +1984,9 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
+ del_timer_sync(&wl->rx_streaming_timer);
+ cancel_work_sync(&wl->rx_streaming_enable_work);
+ cancel_work_sync(&wl->rx_streaming_disable_work);
cancel_delayed_work_sync(&wl->pspoll_work);
cancel_delayed_work_sync(&wl->elp_work);
@@ -1714,11 +2007,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->tx_blocks_available = 0;
- wl->tx_allocated_blocks = 0;
wl->tx_results_count = 0;
wl->tx_packets_count = 0;
- wl->tx_security_last_seq = 0;
- wl->tx_security_seq = 0;
wl->time_offset = 0;
wl->session_counter = 0;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
@@ -1737,8 +2027,10 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
*/
wl->flags = 0;
- for (i = 0; i < NUM_TX_QUEUES; i++)
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
wl->tx_blocks_freed[i] = 0;
+ wl->tx_allocated_blocks[i] = 0;
+ }
wl1271_debugfs_reset(wl);
@@ -1891,6 +2183,10 @@ static int wl1271_unjoin(struct wl1271 *wl)
clear_bit(WL1271_FLAG_JOINED, &wl->flags);
memset(wl->bssid, 0, ETH_ALEN);
+ /* reset TX security counters on a clean disconnect */
+ wl->tx_security_last_seq_lsb = 0;
+ wl->tx_security_seq = 0;
+
/* stop filtering packets based on bssid */
wl1271_configure_filters(wl, FIF_OTHER_BSS);
@@ -1983,6 +2279,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
wl->channel = channel;
}
+ if ((changed & IEEE80211_CONF_CHANGE_POWER))
+ wl->power_level = conf->power_level;
+
goto out;
}
@@ -2490,6 +2789,44 @@ out:
return ret;
}
+static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
+
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF)
+ goto out;
+
+ if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
+ ret = wl1271_scan_stop(wl);
+ if (ret < 0)
+ goto out_sleep;
+ }
+ wl->scan.state = WL1271_SCAN_STATE_IDLE;
+ memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+ wl->scan.req = NULL;
+ ieee80211_scan_completed(wl->hw, true);
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+
+ cancel_delayed_work_sync(&wl->scan_complete_work);
+}
+
static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
@@ -2780,24 +3117,6 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
}
}
- if (changed & BSS_CHANGED_IBSS) {
- wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
- bss_conf->ibss_joined);
-
- if (bss_conf->ibss_joined) {
- u32 rates = bss_conf->basic_rates;
- wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
- rates);
- wl->basic_rate = wl1271_tx_min_rate_get(wl);
-
- /* by default, use 11b rates */
- wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
- ret = wl1271_acx_sta_rate_policies(wl);
- if (ret < 0)
- goto out;
- }
- }
-
ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
if (ret < 0)
goto out;
@@ -3023,6 +3342,24 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
}
+ if (changed & BSS_CHANGED_IBSS) {
+ wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
+ bss_conf->ibss_joined);
+
+ if (bss_conf->ibss_joined) {
+ u32 rates = bss_conf->basic_rates;
+ wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
+ rates);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+
+ /* by default, use 11b rates */
+ wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
+ ret = wl1271_acx_sta_rate_policies(wl);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
if (ret < 0)
goto out;
@@ -3061,6 +3398,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
wl1271_warning("cmd join failed %d", ret);
goto out;
}
+ wl1271_check_operstate(wl, ieee80211_get_operstate(vif));
}
out:
@@ -3251,6 +3589,12 @@ static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
}
+bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
+{
+ int id = hlid - WL1271_AP_STA_HLID_START;
+ return test_bit(id, wl->ap_hlid_map);
+}
+
static int wl1271_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -3354,9 +3698,12 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
+ tid, action);
+
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (wl->ba_support) {
+ if ((wl->ba_support) && (wl->ba_allowed)) {
ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
true);
if (!ret)
@@ -3406,7 +3753,7 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
goto out;
/* packets are considered pending if in the TX queue or the FW */
- ret = (wl->tx_queue_count > 0) || (wl->tx_frames_cnt > 0);
+ ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
/* the above is appropriate for STA mode for PS purposes */
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
@@ -3569,40 +3916,40 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
/* 5 GHz band channels for WL1273 */
static struct ieee80211_channel wl1271_channels_5ghz[] = {
- { .hw_value = 7, .center_freq = 5035},
- { .hw_value = 8, .center_freq = 5040},
- { .hw_value = 9, .center_freq = 5045},
- { .hw_value = 11, .center_freq = 5055},
- { .hw_value = 12, .center_freq = 5060},
- { .hw_value = 16, .center_freq = 5080},
- { .hw_value = 34, .center_freq = 5170},
- { .hw_value = 36, .center_freq = 5180},
- { .hw_value = 38, .center_freq = 5190},
- { .hw_value = 40, .center_freq = 5200},
- { .hw_value = 42, .center_freq = 5210},
- { .hw_value = 44, .center_freq = 5220},
- { .hw_value = 46, .center_freq = 5230},
- { .hw_value = 48, .center_freq = 5240},
- { .hw_value = 52, .center_freq = 5260},
- { .hw_value = 56, .center_freq = 5280},
- { .hw_value = 60, .center_freq = 5300},
- { .hw_value = 64, .center_freq = 5320},
- { .hw_value = 100, .center_freq = 5500},
- { .hw_value = 104, .center_freq = 5520},
- { .hw_value = 108, .center_freq = 5540},
- { .hw_value = 112, .center_freq = 5560},
- { .hw_value = 116, .center_freq = 5580},
- { .hw_value = 120, .center_freq = 5600},
- { .hw_value = 124, .center_freq = 5620},
- { .hw_value = 128, .center_freq = 5640},
- { .hw_value = 132, .center_freq = 5660},
- { .hw_value = 136, .center_freq = 5680},
- { .hw_value = 140, .center_freq = 5700},
- { .hw_value = 149, .center_freq = 5745},
- { .hw_value = 153, .center_freq = 5765},
- { .hw_value = 157, .center_freq = 5785},
- { .hw_value = 161, .center_freq = 5805},
- { .hw_value = 165, .center_freq = 5825},
+ { .hw_value = 7, .center_freq = 5035, .max_power = 25 },
+ { .hw_value = 8, .center_freq = 5040, .max_power = 25 },
+ { .hw_value = 9, .center_freq = 5045, .max_power = 25 },
+ { .hw_value = 11, .center_freq = 5055, .max_power = 25 },
+ { .hw_value = 12, .center_freq = 5060, .max_power = 25 },
+ { .hw_value = 16, .center_freq = 5080, .max_power = 25 },
+ { .hw_value = 34, .center_freq = 5170, .max_power = 25 },
+ { .hw_value = 36, .center_freq = 5180, .max_power = 25 },
+ { .hw_value = 38, .center_freq = 5190, .max_power = 25 },
+ { .hw_value = 40, .center_freq = 5200, .max_power = 25 },
+ { .hw_value = 42, .center_freq = 5210, .max_power = 25 },
+ { .hw_value = 44, .center_freq = 5220, .max_power = 25 },
+ { .hw_value = 46, .center_freq = 5230, .max_power = 25 },
+ { .hw_value = 48, .center_freq = 5240, .max_power = 25 },
+ { .hw_value = 52, .center_freq = 5260, .max_power = 25 },
+ { .hw_value = 56, .center_freq = 5280, .max_power = 25 },
+ { .hw_value = 60, .center_freq = 5300, .max_power = 25 },
+ { .hw_value = 64, .center_freq = 5320, .max_power = 25 },
+ { .hw_value = 100, .center_freq = 5500, .max_power = 25 },
+ { .hw_value = 104, .center_freq = 5520, .max_power = 25 },
+ { .hw_value = 108, .center_freq = 5540, .max_power = 25 },
+ { .hw_value = 112, .center_freq = 5560, .max_power = 25 },
+ { .hw_value = 116, .center_freq = 5580, .max_power = 25 },
+ { .hw_value = 120, .center_freq = 5600, .max_power = 25 },
+ { .hw_value = 124, .center_freq = 5620, .max_power = 25 },
+ { .hw_value = 128, .center_freq = 5640, .max_power = 25 },
+ { .hw_value = 132, .center_freq = 5660, .max_power = 25 },
+ { .hw_value = 136, .center_freq = 5680, .max_power = 25 },
+ { .hw_value = 140, .center_freq = 5700, .max_power = 25 },
+ { .hw_value = 149, .center_freq = 5745, .max_power = 25 },
+ { .hw_value = 153, .center_freq = 5765, .max_power = 25 },
+ { .hw_value = 157, .center_freq = 5785, .max_power = 25 },
+ { .hw_value = 161, .center_freq = 5805, .max_power = 25 },
+ { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
};
/* mapping to indexes for wl1271_rates_5ghz */
@@ -3663,6 +4010,7 @@ static const struct ieee80211_ops wl1271_ops = {
.tx = wl1271_op_tx,
.set_key = wl1271_op_set_key,
.hw_scan = wl1271_op_hw_scan,
+ .cancel_hw_scan = wl1271_op_cancel_hw_scan,
.sched_scan_start = wl1271_op_sched_scan_start,
.sched_scan_stop = wl1271_op_sched_scan_stop,
.bss_info_changed = wl1271_op_bss_info_changed,
@@ -3781,6 +4129,69 @@ static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
static DEVICE_ATTR(hw_pg_ver, S_IRUGO | S_IWUSR,
wl1271_sysfs_show_hw_pg_ver, NULL);
+static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct wl1271 *wl = dev_get_drvdata(dev);
+ ssize_t len;
+ int ret;
+
+ ret = mutex_lock_interruptible(&wl->mutex);
+ if (ret < 0)
+ return -ERESTARTSYS;
+
+ /* Let only one thread read the log at a time, blocking others */
+ while (wl->fwlog_size == 0) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait_exclusive(&wl->fwlog_waitq,
+ &wait,
+ TASK_INTERRUPTIBLE);
+
+ if (wl->fwlog_size != 0) {
+ finish_wait(&wl->fwlog_waitq, &wait);
+ break;
+ }
+
+ mutex_unlock(&wl->mutex);
+
+ schedule();
+ finish_wait(&wl->fwlog_waitq, &wait);
+
+ if (signal_pending(current))
+ return -ERESTARTSYS;
+
+ ret = mutex_lock_interruptible(&wl->mutex);
+ if (ret < 0)
+ return -ERESTARTSYS;
+ }
+
+ /* Check if the fwlog is still valid */
+ if (wl->fwlog_size < 0) {
+ mutex_unlock(&wl->mutex);
+ return 0;
+ }
+
+ /* Seeking is not supported - old logs are not kept. Disregard pos. */
+ len = min(count, (size_t)wl->fwlog_size);
+ wl->fwlog_size -= len;
+ memcpy(buffer, wl->fwlog, len);
+
+ /* Make room for new messages */
+ memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
+
+ mutex_unlock(&wl->mutex);
+
+ return len;
+}
+
+static struct bin_attribute fwlog_attr = {
+ .attr = {.name = "fwlog", .mode = S_IRUSR},
+ .read = wl1271_sysfs_read_fwlog,
+};
+
int wl1271_register_hw(struct wl1271 *wl)
{
int ret;
@@ -3961,6 +4372,17 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
+ INIT_WORK(&wl->rx_streaming_enable_work,
+ wl1271_rx_streaming_enable_work);
+ INIT_WORK(&wl->rx_streaming_disable_work,
+ wl1271_rx_streaming_disable_work);
+
+ wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
+ if (!wl->freezable_wq) {
+ ret = -ENOMEM;
+ goto err_hw;
+ }
+
wl->channel = WL1271_DEFAULT_CHANNEL;
wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
wl->default_key = 0;
@@ -3986,6 +4408,13 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->quirks = 0;
wl->platform_quirks = 0;
wl->sched_scanning = false;
+ wl->tx_security_seq = 0;
+ wl->tx_security_last_seq_lsb = 0;
+
+ setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
+ (unsigned long) wl);
+ wl->fwlog_size = 0;
+ init_waitqueue_head(&wl->fwlog_waitq);
memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
@@ -4003,7 +4432,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
if (!wl->aggr_buf) {
ret = -ENOMEM;
- goto err_hw;
+ goto err_wq;
}
wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
@@ -4012,11 +4441,18 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
goto err_aggr;
}
+ /* Allocate one page for the FW log */
+ wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
+ if (!wl->fwlog) {
+ ret = -ENOMEM;
+ goto err_dummy_packet;
+ }
+
/* Register platform device */
ret = platform_device_register(wl->plat_dev);
if (ret) {
wl1271_error("couldn't register platform device");
- goto err_dummy_packet;
+ goto err_fwlog;
}
dev_set_drvdata(&wl->plat_dev->dev, wl);
@@ -4034,20 +4470,36 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
goto err_bt_coex_state;
}
+ /* Create sysfs file for the FW log */
+ ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file fwlog");
+ goto err_hw_pg_ver;
+ }
+
return hw;
+err_hw_pg_ver:
+ device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
+
err_bt_coex_state:
device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
err_platform:
platform_device_unregister(wl->plat_dev);
+err_fwlog:
+ free_page((unsigned long)wl->fwlog);
+
err_dummy_packet:
dev_kfree_skb(wl->dummy_packet);
err_aggr:
free_pages((unsigned long)wl->aggr_buf, order);
+err_wq:
+ destroy_workqueue(wl->freezable_wq);
+
err_hw:
wl1271_debugfs_exit(wl);
kfree(plat_dev);
@@ -4063,7 +4515,15 @@ EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
int wl1271_free_hw(struct wl1271 *wl)
{
+ /* Unblock any fwlog readers */
+ mutex_lock(&wl->mutex);
+ wl->fwlog_size = -1;
+ wake_up_interruptible_all(&wl->fwlog_waitq);
+ mutex_unlock(&wl->mutex);
+
+ device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr);
platform_device_unregister(wl->plat_dev);
+ free_page((unsigned long)wl->fwlog);
dev_kfree_skb(wl->dummy_packet);
free_pages((unsigned long)wl->aggr_buf,
get_order(WL1271_AGGR_BUFFER_SIZE));
@@ -4078,6 +4538,7 @@ int wl1271_free_hw(struct wl1271 *wl)
kfree(wl->fw_status);
kfree(wl->tx_res_if);
+ destroy_workqueue(wl->freezable_wq);
ieee80211_free_hw(wl->hw);
@@ -4090,6 +4551,10 @@ EXPORT_SYMBOL_GPL(wl12xx_debug_level);
module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
+module_param_named(fwlog, fwlog_param, charp, 0);
+MODULE_PARM_DESC(keymap,
+ "FW logger options: continuous, ondemand, dbgpins or disable");
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index b59b67711a1..3548377ab9c 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -118,7 +118,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
&compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
if (ret == 0) {
wl1271_error("ELP wakeup timeout!");
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ wl12xx_queue_recovery_work(wl);
ret = -ETIMEDOUT;
goto err;
} else if (ret < 0) {
@@ -169,9 +169,11 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
wl1271_debug(DEBUG_PSM, "leaving psm");
/* disable beacon early termination */
- ret = wl1271_acx_bet_enable(wl, false);
- if (ret < 0)
- return ret;
+ if (wl->band == IEEE80211_BAND_2GHZ) {
+ ret = wl1271_acx_bet_enable(wl, false);
+ if (ret < 0)
+ return ret;
+ }
/* disable beacon filtering */
ret = wl1271_acx_beacon_filter_opt(wl, false);
@@ -191,24 +193,27 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
{
- int i, filtered = 0;
+ int i;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
unsigned long flags;
+ int filtered[NUM_TX_QUEUES];
/* filter all frames currently the low level queus for this hlid */
for (i = 0; i < NUM_TX_QUEUES; i++) {
+ filtered[i] = 0;
while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
info = IEEE80211_SKB_CB(skb);
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
info->status.rates[0].idx = -1;
- ieee80211_tx_status(wl->hw, skb);
- filtered++;
+ ieee80211_tx_status_ni(wl->hw, skb);
+ filtered[i]++;
}
}
spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count -= filtered;
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wl->tx_queue_count[i] -= filtered[i];
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_handle_tx_low_watermark(wl);
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index 70091035e01..0450fb49dbb 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -22,6 +22,7 @@
*/
#include <linux/gfp.h>
+#include <linux/sched.h>
#include "wl12xx.h"
#include "acx.h"
@@ -95,6 +96,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
struct ieee80211_hdr *hdr;
u8 *buf;
u8 beacon = 0;
+ u8 is_data = 0;
/*
* In PLT mode we seem to get frames and mac80211 warns about them,
@@ -106,6 +108,13 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
/* the data read starts with the descriptor */
desc = (struct wl1271_rx_descriptor *) data;
+ if (desc->packet_class == WL12XX_RX_CLASS_LOGGER) {
+ size_t len = length - sizeof(*desc);
+ wl12xx_copy_fwlog(wl, data + sizeof(*desc), len);
+ wake_up_interruptible(&wl->fwlog_waitq);
+ return 0;
+ }
+
switch (desc->status & WL1271_RX_DESC_STATUS_MASK) {
/* discard corrupted packets */
case WL1271_RX_DESC_DRIVER_RX_Q_FAIL:
@@ -137,6 +146,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_beacon(hdr->frame_control))
beacon = 1;
+ if (ieee80211_is_data_present(hdr->frame_control))
+ is_data = 1;
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
@@ -147,9 +158,9 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
skb_trim(skb, skb->len - desc->pad_len);
skb_queue_tail(&wl->deferred_rx_queue, skb);
- ieee80211_queue_work(wl->hw, &wl->netstack_work);
+ queue_work(wl->freezable_wq, &wl->netstack_work);
- return 0;
+ return is_data;
}
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
@@ -162,6 +173,8 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
u32 mem_block;
u32 pkt_length;
u32 pkt_offset;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ bool had_data = false;
while (drv_rx_counter != fw_rx_counter) {
buf_size = 0;
@@ -214,9 +227,11 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
* conditions, in that case the received frame will just
* be dropped.
*/
- wl1271_rx_handle_data(wl,
- wl->aggr_buf + pkt_offset,
- pkt_length);
+ if (wl1271_rx_handle_data(wl,
+ wl->aggr_buf + pkt_offset,
+ pkt_length) == 1)
+ had_data = true;
+
wl->rx_counter++;
drv_rx_counter++;
drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
@@ -230,6 +245,20 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
*/
if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
+
+ if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
+ (wl->conf.rx_streaming.always ||
+ test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
+ u32 timeout = wl->conf.rx_streaming.duration;
+
+ /* restart rx streaming */
+ if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ ieee80211_queue_work(wl->hw,
+ &wl->rx_streaming_enable_work);
+
+ mod_timer(&wl->rx_streaming_timer,
+ jiffies + msecs_to_jiffies(timeout));
+ }
}
void wl1271_set_default_filters(struct wl1271 *wl)
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/wl12xx/rx.h
index 75fabf83649..c88e3fa1d60 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -97,6 +97,18 @@
#define RX_BUF_SIZE_MASK 0xFFF00
#define RX_BUF_SIZE_SHIFT_DIV 6
+enum {
+ WL12XX_RX_CLASS_UNKNOWN,
+ WL12XX_RX_CLASS_MANAGEMENT,
+ WL12XX_RX_CLASS_DATA,
+ WL12XX_RX_CLASS_QOS_DATA,
+ WL12XX_RX_CLASS_BCN_PRBRSP,
+ WL12XX_RX_CLASS_EAPOL,
+ WL12XX_RX_CLASS_BA_EVENT,
+ WL12XX_RX_CLASS_AMSDU,
+ WL12XX_RX_CLASS_LOGGER,
+};
+
struct wl1271_rx_descriptor {
__le16 length;
u8 status;
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index 56f76abc754..edfe01c321c 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -62,7 +62,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
if (wl->scan.failed) {
wl1271_info("Scan completed due to error.");
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ wl12xx_queue_recovery_work(wl);
}
out:
@@ -321,12 +321,39 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
return 0;
}
+int wl1271_scan_stop(struct wl1271 *wl)
+{
+ struct wl1271_cmd_header *cmd = NULL;
+ int ret = 0;
+
+ if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE))
+ return -EINVAL;
+
+ wl1271_debug(DEBUG_CMD, "cmd scan stop");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd,
+ sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("cmd stop_scan failed");
+ goto out;
+ }
+out:
+ kfree(cmd);
+ return ret;
+}
+
static int
wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
struct cfg80211_sched_scan_request *req,
struct conn_scan_ch_params *channels,
u32 band, bool radar, bool passive,
- int start)
+ int start, int max_channels)
{
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, j;
@@ -334,7 +361,7 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
bool force_passive = !req->n_ssids;
for (i = 0, j = start;
- i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS;
+ i < req->n_channels && j < max_channels;
i++) {
flags = req->channels[i]->flags;
@@ -380,46 +407,42 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
return j - start;
}
-static int
+static bool
wl1271_scan_sched_scan_channels(struct wl1271 *wl,
struct cfg80211_sched_scan_request *req,
struct wl1271_cmd_sched_scan_config *cfg)
{
- int idx = 0;
-
cfg->passive[0] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
+ wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
IEEE80211_BAND_2GHZ,
- false, true, idx);
- idx += cfg->passive[0];
-
+ false, true, 0,
+ MAX_CHANNELS_2GHZ);
cfg->active[0] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
+ wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
IEEE80211_BAND_2GHZ,
- false, false, idx);
- /*
- * 5GHz channels always start at position 14, not immediately
- * after the last 2.4GHz channel
- */
- idx = 14;
-
+ false, false,
+ cfg->passive[0],
+ MAX_CHANNELS_2GHZ);
cfg->passive[1] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
+ wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
IEEE80211_BAND_5GHZ,
- false, true, idx);
- idx += cfg->passive[1];
-
+ false, true, 0,
+ MAX_CHANNELS_5GHZ);
cfg->dfs =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
+ wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
IEEE80211_BAND_5GHZ,
- true, true, idx);
- idx += cfg->dfs;
-
+ true, true,
+ cfg->passive[1],
+ MAX_CHANNELS_5GHZ);
cfg->active[1] =
- wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
+ wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
IEEE80211_BAND_5GHZ,
- false, false, idx);
- idx += cfg->active[1];
+ false, false,
+ cfg->passive[1] + cfg->dfs,
+ MAX_CHANNELS_5GHZ);
+ /* 802.11j channels are not supported yet */
+ cfg->passive[2] = 0;
+ cfg->active[2] = 0;
wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
cfg->active[0], cfg->passive[0]);
@@ -427,7 +450,9 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
cfg->active[1], cfg->passive[1]);
wl1271_debug(DEBUG_SCAN, " DFS: %d", cfg->dfs);
- return idx;
+ return cfg->passive[0] || cfg->active[0] ||
+ cfg->passive[1] || cfg->active[1] || cfg->dfs ||
+ cfg->passive[2] || cfg->active[2];
}
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
@@ -436,7 +461,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
{
struct wl1271_cmd_sched_scan_config *cfg = NULL;
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
- int i, total_channels, ret;
+ int i, ret;
bool force_passive = !req->n_ssids;
wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
@@ -471,8 +496,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
cfg->ssid_len = 0;
}
- total_channels = wl1271_scan_sched_scan_channels(wl, req, cfg);
- if (total_channels == 0) {
+ if (!wl1271_scan_sched_scan_channels(wl, req, cfg)) {
wl1271_error("scan channel list is empty");
ret = -EINVAL;
goto out;
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index a0b6c5d67b0..d882e4da71b 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -28,6 +28,7 @@
int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req);
+int wl1271_scan_stop(struct wl1271 *wl);
int wl1271_scan_build_probe_req(struct wl1271 *wl,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
@@ -112,19 +113,14 @@ struct wl1271_cmd_trigger_scan_to {
__le32 timeout;
} __packed;
-#define MAX_CHANNELS_ALL_BANDS 41
+#define MAX_CHANNELS_2GHZ 14
+#define MAX_CHANNELS_5GHZ 23
+#define MAX_CHANNELS_4GHZ 4
+
#define SCAN_MAX_CYCLE_INTERVALS 16
#define SCAN_MAX_BANDS 3
enum {
- SCAN_CHANNEL_TYPE_2GHZ_PASSIVE,
- SCAN_CHANNEL_TYPE_2GHZ_ACTIVE,
- SCAN_CHANNEL_TYPE_5GHZ_PASSIVE,
- SCAN_CHANNEL_TYPE_5GHZ_ACTIVE,
- SCAN_CHANNEL_TYPE_5GHZ_DFS,
-};
-
-enum {
SCAN_SSID_FILTER_ANY = 0,
SCAN_SSID_FILTER_SPECIFIC = 1,
SCAN_SSID_FILTER_LIST = 2,
@@ -182,7 +178,9 @@ struct wl1271_cmd_sched_scan_config {
u8 padding[3];
- struct conn_scan_ch_params channels[MAX_CHANNELS_ALL_BANDS];
+ struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
+ struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ];
+ struct conn_scan_ch_params channels_4[MAX_CHANNELS_4GHZ];
} __packed;
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 536e5065454..5cf18c2c23f 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -23,7 +23,6 @@
#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/crc7.h>
#include <linux/vmalloc.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
@@ -45,7 +44,7 @@
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
#endif
-static const struct sdio_device_id wl1271_devices[] = {
+static const struct sdio_device_id wl1271_devices[] __devinitconst = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
{}
};
@@ -107,14 +106,6 @@ static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
enable_irq(wl->irq);
}
-static void wl1271_sdio_reset(struct wl1271 *wl)
-{
-}
-
-static void wl1271_sdio_init(struct wl1271 *wl)
-{
-}
-
static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
@@ -170,15 +161,17 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
struct sdio_func *func = wl_to_func(wl);
int ret;
- /* Make sure the card will not be powered off by runtime PM */
- ret = pm_runtime_get_sync(&func->dev);
- if (ret < 0)
- goto out;
-
- /* Runtime PM might be disabled, so power up the card manually */
- ret = mmc_power_restore_host(func->card->host);
- if (ret < 0)
- goto out;
+ /* If enabled, tell runtime PM not to power off the card */
+ if (pm_runtime_enabled(&func->dev)) {
+ ret = pm_runtime_get_sync(&func->dev);
+ if (ret)
+ goto out;
+ } else {
+ /* Runtime PM is disabled: power up the card manually */
+ ret = mmc_power_restore_host(func->card->host);
+ if (ret < 0)
+ goto out;
+ }
sdio_claim_host(func);
sdio_enable_func(func);
@@ -195,13 +188,16 @@ static int wl1271_sdio_power_off(struct wl1271 *wl)
sdio_disable_func(func);
sdio_release_host(func);
- /* Runtime PM might be disabled, so power off the card manually */
+ /* Power off the card manually, even if runtime PM is enabled. */
ret = mmc_power_save_host(func->card->host);
if (ret < 0)
return ret;
- /* Let runtime PM know the card is powered off */
- return pm_runtime_put_sync(&func->dev);
+ /* If enabled, let runtime PM know the card is powered off */
+ if (pm_runtime_enabled(&func->dev))
+ ret = pm_runtime_put_sync(&func->dev);
+
+ return ret;
}
static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
@@ -215,8 +211,6 @@ static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
static struct wl1271_if_operations sdio_ops = {
.read = wl1271_sdio_raw_read,
.write = wl1271_sdio_raw_write,
- .reset = wl1271_sdio_reset,
- .init = wl1271_sdio_init,
.power = wl1271_sdio_set_power,
.dev = wl1271_sdio_wl_to_dev,
.enable_irq = wl1271_sdio_enable_interrupts,
@@ -278,17 +272,19 @@ static int __devinit wl1271_probe(struct sdio_func *func,
goto out_free;
}
- enable_irq_wake(wl->irq);
- device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1);
+ ret = enable_irq_wake(wl->irq);
+ if (!ret) {
+ wl->irq_wake_enabled = true;
+ device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1);
- disable_irq(wl->irq);
-
- /* if sdio can keep power while host is suspended, enable wow */
- mmcflags = sdio_get_host_pm_caps(func);
- wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags);
+ /* if sdio can keep power while host is suspended, enable wow */
+ mmcflags = sdio_get_host_pm_caps(func);
+ wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags);
- if (mmcflags & MMC_PM_KEEP_POWER)
- hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+ if (mmcflags & MMC_PM_KEEP_POWER)
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+ }
+ disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);
if (ret)
@@ -303,8 +299,6 @@ static int __devinit wl1271_probe(struct sdio_func *func,
/* Tell PM core that we don't need the card to be powered now */
pm_runtime_put_noidle(&func->dev);
- wl1271_notice("initialized");
-
return 0;
out_irq:
@@ -324,8 +318,10 @@ static void __devexit wl1271_remove(struct sdio_func *func)
pm_runtime_get_noresume(&func->dev);
wl1271_unregister_hw(wl);
- device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0);
- disable_irq_wake(wl->irq);
+ if (wl->irq_wake_enabled) {
+ device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0);
+ disable_irq_wake(wl->irq);
+ }
free_irq(wl->irq, wl);
wl1271_free_hw(wl);
}
@@ -402,23 +398,12 @@ static struct sdio_driver wl1271_sdio_driver = {
static int __init wl1271_init(void)
{
- int ret;
-
- ret = sdio_register_driver(&wl1271_sdio_driver);
- if (ret < 0) {
- wl1271_error("failed to register sdio driver: %d", ret);
- goto out;
- }
-
-out:
- return ret;
+ return sdio_register_driver(&wl1271_sdio_driver);
}
static void __exit wl1271_exit(void)
{
sdio_unregister_driver(&wl1271_sdio_driver);
-
- wl1271_notice("unloaded");
}
module_init(wl1271_init);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 51662bb6801..e0b3736d7e1 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -21,6 +21,7 @@
*
*/
+#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/crc7.h>
@@ -435,8 +436,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
if (ret)
goto out_irq;
- wl1271_notice("initialized");
-
return 0;
out_irq:
@@ -473,23 +472,12 @@ static struct spi_driver wl1271_spi_driver = {
static int __init wl1271_init(void)
{
- int ret;
-
- ret = spi_register_driver(&wl1271_spi_driver);
- if (ret < 0) {
- wl1271_error("failed to register spi driver: %d", ret);
- goto out;
- }
-
-out:
- return ret;
+ return spi_register_driver(&wl1271_spi_driver);
}
static void __exit wl1271_exit(void)
{
spi_unregister_driver(&wl1271_spi_driver);
-
- wl1271_notice("unloaded");
}
module_init(wl1271_init);
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index da351d7cd1f..88add68bd9a 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -139,12 +139,15 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
if (ret < 0) {
wl1271_warning("testmode cmd interrogate failed: %d", ret);
+ kfree(cmd);
return ret;
}
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
- if (!skb)
+ if (!skb) {
+ kfree(cmd);
return -ENOMEM;
+ }
NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
@@ -260,7 +263,7 @@ static int wl1271_tm_cmd_recover(struct wl1271 *wl, struct nlattr *tb[])
{
wl1271_debug(DEBUG_TESTMODE, "testmode cmd recover");
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ wl12xx_queue_recovery_work(wl);
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index ca3ab1c1ace..48fde96ce0d 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -168,7 +168,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
u32 len;
u32 total_blocks;
- int id, ret = -EBUSY;
+ int id, ret = -EBUSY, ac;
u32 spare_blocks;
if (unlikely(wl->quirks & WL12XX_QUIRK_USE_2_SPARE_BLOCKS))
@@ -206,7 +206,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
desc->id = id;
wl->tx_blocks_available -= total_blocks;
- wl->tx_allocated_blocks += total_blocks;
+
+ ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
+ wl->tx_allocated_blocks[ac] += total_blocks;
if (wl->bss_type == BSS_TYPE_AP_BSS)
wl->links[hlid].allocated_blks += total_blocks;
@@ -383,6 +385,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
if (ret < 0)
return ret;
+ wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
+
if (wl->bss_type == BSS_TYPE_AP_BSS) {
wl1271_tx_ap_update_inconnection_sta(wl, skb);
wl1271_tx_regulate_link(wl, hlid);
@@ -390,8 +394,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
wl1271_tx_update_filters(wl, skb);
}
- wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
-
/*
* The length of each packet is stored in terms of
* words. Thus, we must pad the skb data to make sure its
@@ -442,37 +444,62 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{
unsigned long flags;
+ int i;
- if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
- wl->tx_queue_count <= WL1271_TX_QUEUE_LOW_WATERMARK) {
- /* firmware buffer has space, restart queues */
- spin_lock_irqsave(&wl->wl_lock, flags);
- ieee80211_wake_queues(wl->hw);
- clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ if (test_bit(i, &wl->stopped_queues_map) &&
+ wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
+ /* firmware buffer has space, restart queues */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ ieee80211_wake_queue(wl->hw,
+ wl1271_tx_get_mac80211_queue(i));
+ clear_bit(i, &wl->stopped_queues_map);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ }
}
}
+static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
+ struct sk_buff_head *queues)
+{
+ int i, q = -1;
+ u32 min_blks = 0xffffffff;
+
+ /*
+ * Find a non-empty ac where:
+ * 1. There are packets to transmit
+ * 2. The FW has the least allocated blocks
+ */
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ if (!skb_queue_empty(&queues[i]) &&
+ (wl->tx_allocated_blocks[i] < min_blks)) {
+ q = i;
+ min_blks = wl->tx_allocated_blocks[q];
+ }
+
+ if (q == -1)
+ return NULL;
+
+ return &queues[q];
+}
+
static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
{
struct sk_buff *skb = NULL;
unsigned long flags;
+ struct sk_buff_head *queue;
- skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VO]);
- if (skb)
- goto out;
- skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VI]);
- if (skb)
- goto out;
- skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BE]);
- if (skb)
+ queue = wl1271_select_queue(wl, wl->tx_queue);
+ if (!queue)
goto out;
- skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BK]);
+
+ skb = skb_dequeue(queue);
out:
if (skb) {
+ int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count--;
+ wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@@ -484,6 +511,7 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
struct sk_buff *skb = NULL;
unsigned long flags;
int i, h, start_hlid;
+ struct sk_buff_head *queue;
/* start from the link after the last one */
start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
@@ -492,25 +520,25 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
for (i = 0; i < AP_MAX_LINKS; i++) {
h = (start_hlid + i) % AP_MAX_LINKS;
- skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
- if (skb)
- goto out;
- skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
- if (skb)
- goto out;
- skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
- if (skb)
- goto out;
- skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
+ /* only consider connected stations */
+ if (h >= WL1271_AP_STA_HLID_START &&
+ !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map))
+ continue;
+
+ queue = wl1271_select_queue(wl, wl->links[h].tx_queue);
+ if (!queue)
+ continue;
+
+ skb = skb_dequeue(queue);
if (skb)
- goto out;
+ break;
}
-out:
if (skb) {
+ int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->last_tx_hlid = h;
spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count--;
+ wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
} else {
wl->last_tx_hlid = 0;
@@ -531,9 +559,12 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
+ int q;
+
skb = wl->dummy_packet;
+ q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count--;
+ wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@@ -558,21 +589,33 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
}
spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count++;
+ wl->tx_queue_count[q]++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
+static bool wl1271_tx_is_data_present(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
+
+ return ieee80211_is_data_present(hdr->frame_control);
+}
+
void wl1271_tx_work_locked(struct wl1271 *wl)
{
struct sk_buff *skb;
u32 buf_offset = 0;
bool sent_packets = false;
+ bool had_data = false;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
int ret;
if (unlikely(wl->state == WL1271_STATE_OFF))
return;
while ((skb = wl1271_skb_dequeue(wl))) {
+ if (wl1271_tx_is_data_present(skb))
+ had_data = true;
+
ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
if (ret == -EAGAIN) {
/*
@@ -619,6 +662,19 @@ out_ack:
wl1271_handle_tx_low_watermark(wl);
}
+ if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
+ (wl->conf.rx_streaming.always ||
+ test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
+ u32 timeout = wl->conf.rx_streaming.duration;
+
+ /* enable rx streaming */
+ if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ ieee80211_queue_work(wl->hw,
+ &wl->rx_streaming_enable_work);
+
+ mod_timer(&wl->rx_streaming_timer,
+ jiffies + msecs_to_jiffies(timeout));
+ }
}
void wl1271_tx_work(struct work_struct *work)
@@ -679,10 +735,24 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
wl->stats.retry_count += result->ack_failures;
- /* update security sequence number */
- wl->tx_security_seq += (result->lsb_security_sequence_number -
- wl->tx_security_last_seq);
- wl->tx_security_last_seq = result->lsb_security_sequence_number;
+ /*
+ * update sequence number only when relevant, i.e. only in
+ * sessions of TKIP, AES and GEM (not in open or WEP sessions)
+ */
+ if (info->control.hw_key &&
+ (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+ info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
+ u8 fw_lsb = result->tx_security_sequence_number_lsb;
+ u8 cur_lsb = wl->tx_security_last_seq_lsb;
+
+ /*
+ * update security sequence number, taking care of potential
+ * wrap-around
+ */
+ wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256;
+ wl->tx_security_last_seq_lsb = fw_lsb;
+ }
/* remove private header from packet */
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
@@ -702,7 +772,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
/* return the packet to the stack */
skb_queue_tail(&wl->deferred_tx_queue, skb);
- ieee80211_queue_work(wl->hw, &wl->netstack_work);
+ queue_work(wl->freezable_wq, &wl->netstack_work);
wl1271_free_tx_id(wl, result->id);
}
@@ -747,23 +817,26 @@ void wl1271_tx_complete(struct wl1271 *wl)
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
{
struct sk_buff *skb;
- int i, total = 0;
+ int i;
unsigned long flags;
struct ieee80211_tx_info *info;
+ int total[NUM_TX_QUEUES];
for (i = 0; i < NUM_TX_QUEUES; i++) {
+ total[i] = 0;
while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
info = IEEE80211_SKB_CB(skb);
info->status.rates[0].idx = -1;
info->status.rates[0].count = 0;
- ieee80211_tx_status(wl->hw, skb);
- total++;
+ ieee80211_tx_status_ni(wl->hw, skb);
+ total[i]++;
}
}
spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count -= total;
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wl->tx_queue_count[i] -= total[i];
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_handle_tx_low_watermark(wl);
@@ -795,13 +868,14 @@ void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
info = IEEE80211_SKB_CB(skb);
info->status.rates[0].idx = -1;
info->status.rates[0].count = 0;
- ieee80211_tx_status(wl->hw, skb);
+ ieee80211_tx_status_ni(wl->hw, skb);
}
}
+ wl->tx_queue_count[i] = 0;
}
}
- wl->tx_queue_count = 0;
+ wl->stopped_queues_map = 0;
/*
* Make sure the driver is at a consistent state, in case this
@@ -838,7 +912,7 @@ void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
info->status.rates[0].idx = -1;
info->status.rates[0].count = 0;
- ieee80211_tx_status(wl->hw, skb);
+ ieee80211_tx_status_ni(wl->hw, skb);
}
}
}
@@ -854,8 +928,10 @@ void wl1271_tx_flush(struct wl1271 *wl)
while (!time_after(jiffies, timeout)) {
mutex_lock(&wl->mutex);
wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
- wl->tx_frames_cnt, wl->tx_queue_count);
- if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
+ wl->tx_frames_cnt,
+ wl1271_tx_total_queue_count(wl));
+ if ((wl->tx_frames_cnt == 0) &&
+ (wl1271_tx_total_queue_count(wl) == 0)) {
mutex_unlock(&wl->mutex);
return;
}
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index 832f9258d67..5d719b5a3d1 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -150,7 +150,7 @@ struct wl1271_tx_hw_res_descr {
(from 1st EDCA AIFS counter until TX Complete). */
__le32 medium_delay;
/* LS-byte of last TKIP seq-num (saved per AC for recovery). */
- u8 lsb_security_sequence_number;
+ u8 tx_security_sequence_number_lsb;
/* Retry count - number of transmissions without successful ACK.*/
u8 ack_failures;
/* The rate that succeeded getting ACK
@@ -182,6 +182,32 @@ static inline int wl1271_tx_get_queue(int queue)
}
}
+static inline int wl1271_tx_get_mac80211_queue(int queue)
+{
+ switch (queue) {
+ case CONF_TX_AC_VO:
+ return 0;
+ case CONF_TX_AC_VI:
+ return 1;
+ case CONF_TX_AC_BE:
+ return 2;
+ case CONF_TX_AC_BK:
+ return 3;
+ default:
+ return 2;
+ }
+}
+
+static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
+{
+ int i, count = 0;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ count += wl->tx_queue_count[i];
+
+ return count;
+}
+
void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_work_locked(struct wl1271 *wl);
void wl1271_tx_complete(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index fbe8f46d123..1a8751eb814 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -144,6 +144,7 @@ extern u32 wl12xx_debug_level;
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
+#define WL1271_TX_SQN_POST_RECOVERY_PADDING 0xff
#define WL1271_CIPHER_SUITE_GEM 0x00147201
@@ -172,7 +173,6 @@ extern u32 wl12xx_debug_level;
#define WL1271_PS_STA_MAX_BLOCKS (2 * 9)
#define WL1271_AP_BSS_INDEX 0
-#define WL1271_AP_DEF_INACTIV_SEC 300
#define WL1271_AP_DEF_BEACON_EXP 20
#define ACX_TX_DESCRIPTORS 32
@@ -226,6 +226,8 @@ enum {
#define FW_VER_MINOR_1_SPARE_STA_MIN 58
#define FW_VER_MINOR_1_SPARE_AP_MIN 47
+#define FW_VER_MINOR_FWLOG_STA_MIN 70
+
struct wl1271_chip {
u32 id;
char fw_ver_str[ETHTOOL_BUSINFO_LEN];
@@ -284,8 +286,7 @@ struct wl1271_fw_sta_status {
u8 tx_total;
u8 reserved1;
__le16 reserved2;
- /* Total structure size is 68 bytes */
- u32 padding;
+ __le32 log_start_addr;
} __packed;
struct wl1271_fw_full_status {
@@ -359,6 +360,9 @@ enum wl12xx_flags {
WL1271_FLAG_DUMMY_PACKET_PENDING,
WL1271_FLAG_SUSPENDED,
WL1271_FLAG_PENDING_WORK,
+ WL1271_FLAG_SOFT_GEMINI,
+ WL1271_FLAG_RX_STREAMING_STARTED,
+ WL1271_FLAG_RECOVERY_IN_PROGRESS,
};
struct wl1271_link {
@@ -420,7 +424,7 @@ struct wl1271 {
/* Accounting for allocated / available TX blocks on HW */
u32 tx_blocks_freed[NUM_TX_QUEUES];
u32 tx_blocks_available;
- u32 tx_allocated_blocks;
+ u32 tx_allocated_blocks[NUM_TX_QUEUES];
u32 tx_results_count;
/* Transmitted TX packets counter for chipset interface */
@@ -434,7 +438,8 @@ struct wl1271 {
/* Frames scheduled for transmission, not handled yet */
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
- int tx_queue_count;
+ int tx_queue_count[NUM_TX_QUEUES];
+ long stopped_queues_map;
/* Frames received, not handled yet by mac80211 */
struct sk_buff_head deferred_rx_queue;
@@ -443,15 +448,23 @@ struct wl1271 {
struct sk_buff_head deferred_tx_queue;
struct work_struct tx_work;
+ struct workqueue_struct *freezable_wq;
/* Pending TX frames */
unsigned long tx_frames_map[BITS_TO_LONGS(ACX_TX_DESCRIPTORS)];
struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
int tx_frames_cnt;
- /* Security sequence number counters */
- u8 tx_security_last_seq;
- s64 tx_security_seq;
+ /*
+ * Security sequence number
+ * bits 0-15: lower 16 bits part of sequence number
+ * bits 16-47: higher 32 bits part of sequence number
+ * bits 48-63: not in use
+ */
+ u64 tx_security_seq;
+
+ /* 8 bits of the last sequence number in use */
+ u8 tx_security_last_seq_lsb;
/* FW Rx counter */
u32 rx_counter;
@@ -468,6 +481,15 @@ struct wl1271 {
/* Network stack work */
struct work_struct netstack_work;
+ /* FW log buffer */
+ u8 *fwlog;
+
+ /* Number of valid bytes in the FW log buffer */
+ ssize_t fwlog_size;
+
+ /* Sysfs FW log entry readers wait queue */
+ wait_queue_head_t fwlog_waitq;
+
/* Hardware recovery work */
struct work_struct recovery_work;
@@ -508,6 +530,11 @@ struct wl1271 {
/* Default key (for WEP) */
u32 default_key;
+ /* Rx Streaming */
+ struct work_struct rx_streaming_enable_work;
+ struct work_struct rx_streaming_disable_work;
+ struct timer_list rx_streaming_timer;
+
unsigned int filters;
unsigned int rx_config;
unsigned int rx_filter;
@@ -564,6 +591,7 @@ struct wl1271 {
/* RX BA constraint value */
bool ba_support;
u8 ba_rx_bitmap;
+ bool ba_allowed;
int tcxo_clock;
@@ -572,6 +600,7 @@ struct wl1271 {
* (currently, only "ANY" trigger is supported)
*/
bool wow_enabled;
+ bool irq_wake_enabled;
/*
* AP-mode - links indexed by HLID. The global and broadcast links
@@ -601,6 +630,9 @@ struct wl1271_station {
int wl1271_plt_start(struct wl1271 *wl);
int wl1271_plt_stop(struct wl1271 *wl);
+int wl1271_recalc_rx_streaming(struct wl1271 *wl);
+void wl12xx_queue_recovery_work(struct wl1271 *wl);
+size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
@@ -608,8 +640,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_DEFAULT_POWER_LEVEL 0
-#define WL1271_TX_QUEUE_LOW_WATERMARK 10
-#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
+#define WL1271_TX_QUEUE_LOW_WATERMARK 32
+#define WL1271_TX_QUEUE_HIGH_WATERMARK 256
#define WL1271_DEFERRED_QUEUE_LIMIT 64
@@ -636,4 +668,15 @@ int wl1271_plt_stop(struct wl1271 *wl);
/* WL128X requires aggregated packets to be aligned to the SDIO block size */
#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2)
+/*
+ * WL127X AP mode requires Low Power DRPw (LPD) enable to reduce power
+ * consumption
+ */
+#define WL12XX_QUIRK_LPD_MODE BIT(3)
+
+/* Older firmwares did not implement the FW logger over bus feature */
+#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4)
+
+#define WL12XX_HW_BLOCK_SIZE 256
+
#endif
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 4be7c3b5b26..117c4123943 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -21,6 +21,8 @@
#ifndef _ZD_CHIP_H
#define _ZD_CHIP_H
+#include <net/mac80211.h>
+
#include "zd_rf.h"
#include "zd_usb.h"
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index 5463ca9ebc0..9a1b013f81b 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -37,9 +37,15 @@ typedef u16 __nocast zd_addr_t;
if (net_ratelimit()) \
dev_printk_f(KERN_DEBUG, dev, fmt, ## args); \
} while (0)
+# define dev_dbg_f_cond(dev, cond, fmt, args...) ({ \
+ bool __cond = !!(cond); \
+ if (unlikely(__cond)) \
+ dev_printk_f(KERN_DEBUG, dev, fmt, ## args); \
+})
#else
# define dev_dbg_f(dev, fmt, args...) do { (void)(dev); } while (0)
# define dev_dbg_f_limit(dev, fmt, args...) do { (void)(dev); } while (0)
+# define dev_dbg_f_cond(dev, cond, fmt, args...) do { (void)(dev); } while (0)
#endif /* DEBUG */
#ifdef DEBUG
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 5037c8b2b41..cabfae1e70b 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -143,7 +143,7 @@ static void beacon_enable(struct zd_mac *mac);
static void beacon_disable(struct zd_mac *mac);
static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble);
static int zd_mac_config_beacon(struct ieee80211_hw *hw,
- struct sk_buff *beacon);
+ struct sk_buff *beacon, bool in_intr);
static int zd_reg2alpha2(u8 regdomain, char *alpha2)
{
@@ -160,6 +160,22 @@ static int zd_reg2alpha2(u8 regdomain, char *alpha2)
return 1;
}
+static int zd_check_signal(struct ieee80211_hw *hw, int signal)
+{
+ struct zd_mac *mac = zd_hw_mac(hw);
+
+ dev_dbg_f_cond(zd_mac_dev(mac), signal < 0 || signal > 100,
+ "%s: signal value from device not in range 0..100, "
+ "but %d.\n", __func__, signal);
+
+ if (signal < 0)
+ signal = 0;
+ else if (signal > 100)
+ signal = 100;
+
+ return signal;
+}
+
int zd_mac_preinit_hw(struct ieee80211_hw *hw)
{
int r;
@@ -387,10 +403,8 @@ int zd_restore_settings(struct zd_mac *mac)
mac->type == NL80211_IFTYPE_AP) {
if (mac->vif != NULL) {
beacon = ieee80211_beacon_get(mac->hw, mac->vif);
- if (beacon) {
- zd_mac_config_beacon(mac->hw, beacon);
- kfree_skb(beacon);
- }
+ if (beacon)
+ zd_mac_config_beacon(mac->hw, beacon, false);
}
zd_set_beacon_interval(&mac->chip, beacon_interval,
@@ -461,7 +475,7 @@ static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
if (i<IEEE80211_TX_MAX_RATES)
info->status.rates[i].idx = -1; /* terminate */
- info->status.ack_signal = ackssi;
+ info->status.ack_signal = zd_check_signal(hw, ackssi);
ieee80211_tx_status_irqsafe(hw, skb);
}
@@ -664,7 +678,34 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
/* FIXME: Management frame? */
}
-static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
+static bool zd_mac_match_cur_beacon(struct zd_mac *mac, struct sk_buff *beacon)
+{
+ if (!mac->beacon.cur_beacon)
+ return false;
+
+ if (mac->beacon.cur_beacon->len != beacon->len)
+ return false;
+
+ return !memcmp(beacon->data, mac->beacon.cur_beacon->data, beacon->len);
+}
+
+static void zd_mac_free_cur_beacon_locked(struct zd_mac *mac)
+{
+ ZD_ASSERT(mutex_is_locked(&mac->chip.mutex));
+
+ kfree_skb(mac->beacon.cur_beacon);
+ mac->beacon.cur_beacon = NULL;
+}
+
+static void zd_mac_free_cur_beacon(struct zd_mac *mac)
+{
+ mutex_lock(&mac->chip.mutex);
+ zd_mac_free_cur_beacon_locked(mac);
+ mutex_unlock(&mac->chip.mutex);
+}
+
+static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon,
+ bool in_intr)
{
struct zd_mac *mac = zd_hw_mac(hw);
int r, ret, num_cmds, req_pos = 0;
@@ -674,13 +715,21 @@ static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
unsigned long end_jiffies, message_jiffies;
struct zd_ioreq32 *ioreqs;
+ mutex_lock(&mac->chip.mutex);
+
+ /* Check if hw already has this beacon. */
+ if (zd_mac_match_cur_beacon(mac, beacon)) {
+ r = 0;
+ goto out_nofree;
+ }
+
/* Alloc memory for full beacon write at once. */
num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len;
ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL);
- if (!ioreqs)
- return -ENOMEM;
-
- mutex_lock(&mac->chip.mutex);
+ if (!ioreqs) {
+ r = -ENOMEM;
+ goto out_nofree;
+ }
r = zd_iowrite32_locked(&mac->chip, 0, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
@@ -688,6 +737,10 @@ static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
goto release_sema;
+ if (in_intr && tmp & 0x2) {
+ r = -EBUSY;
+ goto release_sema;
+ }
end_jiffies = jiffies + HZ / 2; /*~500ms*/
message_jiffies = jiffies + HZ / 10; /*~100ms*/
@@ -742,7 +795,7 @@ release_sema:
end_jiffies = jiffies + HZ / 2; /*~500ms*/
ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
while (ret < 0) {
- if (time_is_before_eq_jiffies(end_jiffies)) {
+ if (in_intr || time_is_before_eq_jiffies(end_jiffies)) {
ret = -ETIMEDOUT;
break;
}
@@ -757,9 +810,19 @@ release_sema:
if (r < 0 || ret < 0) {
if (r >= 0)
r = ret;
+
+ /* We don't know if beacon was written successfully or not,
+ * so clear current. */
+ zd_mac_free_cur_beacon_locked(mac);
+
goto out;
}
+ /* Beacon has now been written successfully, update current. */
+ zd_mac_free_cur_beacon_locked(mac);
+ mac->beacon.cur_beacon = beacon;
+ beacon = NULL;
+
/* 802.11b/g 2.4G CCK 1Mb
* 802.11a, not yet implemented, uses different values (see GPL vendor
* driver)
@@ -767,11 +830,17 @@ release_sema:
r = zd_iowrite32_locked(&mac->chip, 0x00000400 | (full_len << 19),
CR_BCN_PLCP_CFG);
out:
- mutex_unlock(&mac->chip.mutex);
kfree(ioreqs);
+out_nofree:
+ kfree_skb(beacon);
+ mutex_unlock(&mac->chip.mutex);
+
return r;
reset_device:
+ zd_mac_free_cur_beacon_locked(mac);
+ kfree_skb(beacon);
+
mutex_unlock(&mac->chip.mutex);
kfree(ioreqs);
@@ -982,7 +1051,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
stats.band = IEEE80211_BAND_2GHZ;
- stats.signal = status->signal_strength;
+ stats.signal = zd_check_signal(hw, status->signal_strength);
rate = zd_rx_rate(buffer, status);
@@ -1057,6 +1126,8 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
mac->vif = NULL;
zd_set_beacon_interval(&mac->chip, 0, 0, NL80211_IFTYPE_UNSPECIFIED);
zd_write_mac_addr(&mac->chip, NULL);
+
+ zd_mac_free_cur_beacon(mac);
}
static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
@@ -1094,10 +1165,8 @@ static void zd_beacon_done(struct zd_mac *mac)
* Fetch next beacon so that tim_count is updated.
*/
beacon = ieee80211_beacon_get(mac->hw, mac->vif);
- if (beacon) {
- zd_mac_config_beacon(mac->hw, beacon);
- kfree_skb(beacon);
- }
+ if (beacon)
+ zd_mac_config_beacon(mac->hw, beacon, true);
spin_lock_irq(&mac->lock);
mac->beacon.last_update = jiffies;
@@ -1222,9 +1291,8 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
if (beacon) {
zd_chip_disable_hwint(&mac->chip);
- zd_mac_config_beacon(hw, beacon);
+ zd_mac_config_beacon(hw, beacon, false);
zd_chip_enable_hwint(&mac->chip);
- kfree_skb(beacon);
}
}
@@ -1361,7 +1429,8 @@ static void beacon_watchdog_handler(struct work_struct *work)
spin_lock_irq(&mac->lock);
interval = mac->beacon.interval;
period = mac->beacon.period;
- timeout = mac->beacon.last_update + msecs_to_jiffies(interval) + HZ;
+ timeout = mac->beacon.last_update +
+ msecs_to_jiffies(interval * 1024 / 1000) * 3;
spin_unlock_irq(&mac->lock);
if (interval > 0 && time_is_before_jiffies(timeout)) {
@@ -1374,8 +1443,9 @@ static void beacon_watchdog_handler(struct work_struct *work)
beacon = ieee80211_beacon_get(mac->hw, mac->vif);
if (beacon) {
- zd_mac_config_beacon(mac->hw, beacon);
- kfree_skb(beacon);
+ zd_mac_free_cur_beacon(mac);
+
+ zd_mac_config_beacon(mac->hw, beacon, false);
}
zd_set_beacon_interval(&mac->chip, interval, period, mac->type);
@@ -1410,6 +1480,8 @@ static void beacon_disable(struct zd_mac *mac)
{
dev_dbg_f(zd_mac_dev(mac), "\n");
cancel_delayed_work_sync(&mac->beacon.watchdog_work);
+
+ zd_mac_free_cur_beacon(mac);
}
#define LINK_LED_WORK_DELAY HZ
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index f8c93c3fe75..c01eca859f9 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -165,6 +165,7 @@ struct housekeeping {
struct beacon {
struct delayed_work watchdog_work;
+ struct sk_buff *cur_beacon;
unsigned long last_update;
u16 interval;
u8 period;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 631194d4982..cf0d69dd7be 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -111,6 +111,9 @@ MODULE_DEVICE_TABLE(usb, usb_ids);
#define FW_ZD1211_PREFIX "zd1211/zd1211_"
#define FW_ZD1211B_PREFIX "zd1211/zd1211b_"
+static bool check_read_regs(struct zd_usb *usb, struct usb_req_read_regs *req,
+ unsigned int count);
+
/* USB device initialization */
static void int_urb_complete(struct urb *urb);
@@ -365,6 +368,20 @@ exit:
#define urb_dev(urb) (&(urb)->dev->dev)
+static inline void handle_regs_int_override(struct urb *urb)
+{
+ struct zd_usb *usb = urb->context;
+ struct zd_usb_interrupt *intr = &usb->intr;
+
+ spin_lock(&intr->lock);
+ if (atomic_read(&intr->read_regs_enabled)) {
+ atomic_set(&intr->read_regs_enabled, 0);
+ intr->read_regs_int_overridden = 1;
+ complete(&intr->read_regs.completion);
+ }
+ spin_unlock(&intr->lock);
+}
+
static inline void handle_regs_int(struct urb *urb)
{
struct zd_usb *usb = urb->context;
@@ -383,25 +400,45 @@ static inline void handle_regs_int(struct urb *urb)
USB_MAX_EP_INT_BUFFER);
spin_unlock(&mac->lock);
schedule_work(&mac->process_intr);
- } else if (intr->read_regs_enabled) {
- intr->read_regs.length = len = urb->actual_length;
-
+ } else if (atomic_read(&intr->read_regs_enabled)) {
+ len = urb->actual_length;
+ intr->read_regs.length = urb->actual_length;
if (len > sizeof(intr->read_regs.buffer))
len = sizeof(intr->read_regs.buffer);
+
memcpy(intr->read_regs.buffer, urb->transfer_buffer, len);
- intr->read_regs_enabled = 0;
+
+ /* Sometimes USB_INT_ID_REGS is not overridden, but comes after
+ * USB_INT_ID_RETRY_FAILED. Read-reg retry then gets this
+ * delayed USB_INT_ID_REGS, but leaves USB_INT_ID_REGS of
+ * retry unhandled. Next read-reg command then might catch
+ * this wrong USB_INT_ID_REGS. Fix by ignoring wrong reads.
+ */
+ if (!check_read_regs(usb, intr->read_regs.req,
+ intr->read_regs.req_count))
+ goto out;
+
+ atomic_set(&intr->read_regs_enabled, 0);
+ intr->read_regs_int_overridden = 0;
complete(&intr->read_regs.completion);
+
goto out;
}
out:
spin_unlock(&intr->lock);
+
+ /* CR_INTERRUPT might override read_reg too. */
+ if (int_num == CR_INTERRUPT && atomic_read(&intr->read_regs_enabled))
+ handle_regs_int_override(urb);
}
static void int_urb_complete(struct urb *urb)
{
int r;
struct usb_int_header *hdr;
+ struct zd_usb *usb;
+ struct zd_usb_interrupt *intr;
switch (urb->status) {
case 0:
@@ -430,6 +467,14 @@ static void int_urb_complete(struct urb *urb)
goto resubmit;
}
+ /* USB_INT_ID_RETRY_FAILED triggered by tx-urb submit can override
+ * pending USB_INT_ID_REGS causing read command timeout.
+ */
+ usb = urb->context;
+ intr = &usb->intr;
+ if (hdr->id != USB_INT_ID_REGS && atomic_read(&intr->read_regs_enabled))
+ handle_regs_int_override(urb);
+
switch (hdr->id) {
case USB_INT_ID_REGS:
handle_regs_int(urb);
@@ -579,8 +624,8 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
if (length < sizeof(struct rx_length_info)) {
/* It's not a complete packet anyhow. */
- printk("%s: invalid, small RX packet : %d\n",
- __func__, length);
+ dev_dbg_f(zd_usb_dev(usb), "invalid, small RX packet : %d\n",
+ length);
return;
}
length_info = (struct rx_length_info *)
@@ -1129,6 +1174,7 @@ static inline void init_usb_interrupt(struct zd_usb *usb)
spin_lock_init(&intr->lock);
intr->interval = int_urb_interval(zd_usb_to_usbdev(usb));
init_completion(&intr->read_regs.completion);
+ atomic_set(&intr->read_regs_enabled, 0);
intr->read_regs.cr_int_addr = cpu_to_le16((u16)CR_INTERRUPT);
}
@@ -1563,12 +1609,16 @@ static int usb_int_regs_length(unsigned int count)
return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data);
}
-static void prepare_read_regs_int(struct zd_usb *usb)
+static void prepare_read_regs_int(struct zd_usb *usb,
+ struct usb_req_read_regs *req,
+ unsigned int count)
{
struct zd_usb_interrupt *intr = &usb->intr;
spin_lock_irq(&intr->lock);
- intr->read_regs_enabled = 1;
+ atomic_set(&intr->read_regs_enabled, 1);
+ intr->read_regs.req = req;
+ intr->read_regs.req_count = count;
INIT_COMPLETION(intr->read_regs.completion);
spin_unlock_irq(&intr->lock);
}
@@ -1578,22 +1628,18 @@ static void disable_read_regs_int(struct zd_usb *usb)
struct zd_usb_interrupt *intr = &usb->intr;
spin_lock_irq(&intr->lock);
- intr->read_regs_enabled = 0;
+ atomic_set(&intr->read_regs_enabled, 0);
spin_unlock_irq(&intr->lock);
}
-static int get_results(struct zd_usb *usb, u16 *values,
- struct usb_req_read_regs *req, unsigned int count)
+static bool check_read_regs(struct zd_usb *usb, struct usb_req_read_regs *req,
+ unsigned int count)
{
- int r;
int i;
struct zd_usb_interrupt *intr = &usb->intr;
struct read_regs_int *rr = &intr->read_regs;
struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer;
- spin_lock_irq(&intr->lock);
-
- r = -EIO;
/* The created block size seems to be larger than expected.
* However results appear to be correct.
*/
@@ -1601,13 +1647,14 @@ static int get_results(struct zd_usb *usb, u16 *values,
dev_dbg_f(zd_usb_dev(usb),
"error: actual length %d less than expected %d\n",
rr->length, usb_int_regs_length(count));
- goto error_unlock;
+ return false;
}
+
if (rr->length > sizeof(rr->buffer)) {
dev_dbg_f(zd_usb_dev(usb),
"error: actual length %d exceeds buffer size %zu\n",
rr->length, sizeof(rr->buffer));
- goto error_unlock;
+ return false;
}
for (i = 0; i < count; i++) {
@@ -1617,8 +1664,39 @@ static int get_results(struct zd_usb *usb, u16 *values,
"rd[%d] addr %#06hx expected %#06hx\n", i,
le16_to_cpu(rd->addr),
le16_to_cpu(req->addr[i]));
- goto error_unlock;
+ return false;
}
+ }
+
+ return true;
+}
+
+static int get_results(struct zd_usb *usb, u16 *values,
+ struct usb_req_read_regs *req, unsigned int count,
+ bool *retry)
+{
+ int r;
+ int i;
+ struct zd_usb_interrupt *intr = &usb->intr;
+ struct read_regs_int *rr = &intr->read_regs;
+ struct usb_int_regs *regs = (struct usb_int_regs *)rr->buffer;
+
+ spin_lock_irq(&intr->lock);
+
+ r = -EIO;
+
+ /* Read failed because firmware bug? */
+ *retry = !!intr->read_regs_int_overridden;
+ if (*retry)
+ goto error_unlock;
+
+ if (!check_read_regs(usb, req, count)) {
+ dev_dbg_f(zd_usb_dev(usb), "error: invalid read regs\n");
+ goto error_unlock;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct reg_data *rd = &regs->regs[i];
values[i] = le16_to_cpu(rd->value);
}
@@ -1631,11 +1709,11 @@ error_unlock:
int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
const zd_addr_t *addresses, unsigned int count)
{
- int r;
- int i, req_len, actual_req_len;
+ int r, i, req_len, actual_req_len, try_count = 0;
struct usb_device *udev;
struct usb_req_read_regs *req = NULL;
unsigned long timeout;
+ bool retry = false;
if (count < 1) {
dev_dbg_f(zd_usb_dev(usb), "error: count is zero\n");
@@ -1671,8 +1749,10 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
for (i = 0; i < count; i++)
req->addr[i] = cpu_to_le16((u16)addresses[i]);
+retry_read:
+ try_count++;
udev = zd_usb_to_usbdev(usb);
- prepare_read_regs_int(usb);
+ prepare_read_regs_int(usb, req, count);
r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
@@ -1696,7 +1776,12 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
goto error;
}
- r = get_results(usb, values, req, count);
+ r = get_results(usb, values, req, count, &retry);
+ if (retry && try_count < 20) {
+ dev_dbg_f(zd_usb_dev(usb), "read retry, tries so far: %d\n",
+ try_count);
+ goto retry_read;
+ }
error:
return r;
}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index bf942843b73..99193b456a7 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -144,6 +144,8 @@ struct usb_int_retry_fail {
struct read_regs_int {
struct completion completion;
+ struct usb_req_read_regs *req;
+ unsigned int req_count;
/* Stores the USB int structure and contains the USB address of the
* first requested register before request.
*/
@@ -169,7 +171,8 @@ struct zd_usb_interrupt {
void *buffer;
dma_addr_t buffer_dma;
int interval;
- u8 read_regs_enabled:1;
+ atomic_t read_regs_enabled;
+ u8 read_regs_int_overridden:1;
};
static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0e4851b8a77..fd00f25d985 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1743,3 +1743,4 @@ failed_init:
module_init(netback_init);
MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("xen-backend:vif");
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d29365a232a..d7c8a98daff 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -70,6 +70,14 @@ struct netfront_cb {
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
+struct netfront_stats {
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
+
struct netfront_info {
struct list_head list;
struct net_device *netdev;
@@ -122,6 +130,8 @@ struct netfront_info {
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
/* Statistics */
+ struct netfront_stats __percpu *stats;
+
unsigned long rx_gso_checksum_fixup;
};
@@ -468,6 +478,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
+ struct netfront_stats *stats = this_cpu_ptr(np->stats);
struct xen_netif_tx_request *tx;
struct xen_netif_extra_info *extra;
char *data = skb->data;
@@ -552,8 +563,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (notify)
notify_remote_via_irq(np->netdev->irq);
- dev->stats.tx_bytes += skb->len;
- dev->stats.tx_packets++;
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ u64_stats_update_end(&stats->syncp);
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
xennet_tx_buf_gc(dev);
@@ -847,6 +860,8 @@ out:
static int handle_incoming_queue(struct net_device *dev,
struct sk_buff_head *rxq)
{
+ struct netfront_info *np = netdev_priv(dev);
+ struct netfront_stats *stats = this_cpu_ptr(np->stats);
int packets_dropped = 0;
struct sk_buff *skb;
@@ -871,8 +886,10 @@ static int handle_incoming_queue(struct net_device *dev,
continue;
}
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
/* Pass it up. */
netif_receive_skb(skb);
@@ -1034,6 +1051,38 @@ static int xennet_change_mtu(struct net_device *dev, int mtu)
return 0;
}
+static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->syncp);
+
+ rx_packets = stats->rx_packets;
+ tx_packets = stats->tx_packets;
+ rx_bytes = stats->rx_bytes;
+ tx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+
+ tot->rx_packets += rx_packets;
+ tot->tx_packets += tx_packets;
+ tot->rx_bytes += rx_bytes;
+ tot->tx_bytes += tx_bytes;
+ }
+
+ tot->rx_errors = dev->stats.rx_errors;
+ tot->tx_dropped = dev->stats.tx_dropped;
+
+ return tot;
+}
+
static void xennet_release_tx_bufs(struct netfront_info *np)
{
struct sk_buff *skb;
@@ -1182,6 +1231,7 @@ static const struct net_device_ops xennet_netdev_ops = {
.ndo_stop = xennet_close,
.ndo_start_xmit = xennet_start_xmit,
.ndo_change_mtu = xennet_change_mtu,
+ .ndo_get_stats64 = xennet_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_fix_features = xennet_fix_features,
@@ -1216,6 +1266,11 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
np->rx_refill_timer.data = (unsigned long)netdev;
np->rx_refill_timer.function = rx_refill_timeout;
+ err = -ENOMEM;
+ np->stats = alloc_percpu(struct netfront_stats);
+ if (np->stats == NULL)
+ goto exit;
+
/* Initialise tx_skbs as a free chain containing every entry. */
np->tx_skb_freelist = 0;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
@@ -1234,7 +1289,7 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
&np->gref_tx_head) < 0) {
printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
err = -ENOMEM;
- goto exit;
+ goto exit_free_stats;
}
/* A grant for every rx ring slot */
if (gnttab_alloc_grant_references(RX_MAX_TARGET,
@@ -1270,6 +1325,8 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
exit_free_tx:
gnttab_free_grant_references(np->gref_tx_head);
+ exit_free_stats:
+ free_percpu(np->stats);
exit:
free_netdev(netdev);
return ERR_PTR(err);
@@ -1869,6 +1926,8 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
xennet_sysfs_delif(info->netdev);
+ free_percpu(info->stats);
+
free_netdev(info->netdev);
return 0;
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 372572c0adc..8018d7d045b 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -26,6 +26,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
+#include <linux/interrupt.h>
#define DRIVER_NAME "xilinx_emaclite"
@@ -251,11 +252,11 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
u16 *from_u16_ptr, *to_u16_ptr;
to_u32_ptr = dest_ptr;
- from_u16_ptr = (u16 *) src_ptr;
+ from_u16_ptr = src_ptr;
align_buffer = 0;
for (; length > 3; length -= 4) {
- to_u16_ptr = (u16 *) ((void *) &align_buffer);
+ to_u16_ptr = (u16 *)&align_buffer;
*to_u16_ptr++ = *from_u16_ptr++;
*to_u16_ptr++ = *from_u16_ptr++;
@@ -647,7 +648,8 @@ static void xemaclite_rx_handler(struct net_device *dev)
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- netif_rx(skb); /* Send the packet upstream */
+ if (!skb_defer_rx_timestamp(skb))
+ netif_rx(skb); /* Send the packet upstream */
}
/**
@@ -1029,15 +1031,19 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
spin_lock_irqsave(&lp->reset_lock, flags);
if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
/* If the Emaclite Tx buffer is busy, stop the Tx queue and
- * defer the skb for transmission at a later point when the
+ * defer the skb for transmission during the ISR, after the
* current transmission is complete */
netif_stop_queue(dev);
lp->deferred_skb = new_skb;
+ /* Take the time stamp now, since we can't do this in an ISR. */
+ skb_tx_timestamp(new_skb);
spin_unlock_irqrestore(&lp->reset_lock, flags);
return 0;
}
spin_unlock_irqrestore(&lp->reset_lock, flags);
+ skb_tx_timestamp(new_skb);
+
dev->stats.tx_bytes += len;
dev_kfree_skb(new_skb);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index ec47e22fa18..3e5ac60b89a 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -442,19 +442,19 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_cleardev;
- np->tx_ring = (struct yellowfin_desc *)ring_space;
+ np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
- np->rx_ring = (struct yellowfin_desc *)ring_space;
+ np->rx_ring = ring_space;
np->rx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_rx;
- np->tx_status = (struct tx_status_words *)ring_space;
+ np->tx_status = ring_space;
np->tx_status_dma = ring_dma;
if (dev->mem_start)
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index ec2800ff8d4..8b8881718f5 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -731,7 +731,7 @@ static void znet_rx(struct net_device *dev)
cur_frame_end_offset -= ((count + 1)>>1) + 3;
if (cur_frame_end_offset < 0)
cur_frame_end_offset += RX_BUF_SIZE/2;
- };
+ }
/* Now step forward through the list. */
do {
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index 8c7c522a056..15e7751a273 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -19,6 +19,8 @@
* Ethernet Controllers.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -34,115 +36,242 @@
#include <asm/amigaints.h>
#include <asm/amigahw.h>
-#define EI_SHIFT(x) (ei_local->reg_offset[x])
-#define ei_inb(port) in_8(port)
-#define ei_outb(val,port) out_8(port,val)
-#define ei_inb_p(port) in_8(port)
-#define ei_outb_p(val,port) out_8(port,val)
+#define EI_SHIFT(x) (ei_local->reg_offset[x])
+#define ei_inb(port) in_8(port)
+#define ei_outb(val, port) out_8(port, val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val, port) out_8(port, val)
static const char version[] =
- "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
+ "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
#include "lib8390.c"
#define DRV_NAME "zorro8390"
#define NE_BASE (dev->base_addr)
-#define NE_CMD (0x00*2)
-#define NE_DATAPORT (0x10*2) /* NatSemi-defined port window offset. */
-#define NE_RESET (0x1f*2) /* Issue a read to reset, a write to clear. */
-#define NE_IO_EXTENT (0x20*2)
-
-#define NE_EN0_ISR (0x07*2)
-#define NE_EN0_DCFG (0x0e*2)
-
-#define NE_EN0_RSARLO (0x08*2)
-#define NE_EN0_RSARHI (0x09*2)
-#define NE_EN0_RCNTLO (0x0a*2)
-#define NE_EN0_RXCR (0x0c*2)
-#define NE_EN0_TXCR (0x0d*2)
-#define NE_EN0_RCNTHI (0x0b*2)
-#define NE_EN0_IMR (0x0f*2)
+#define NE_CMD (0x00 * 2)
+#define NE_DATAPORT (0x10 * 2) /* NatSemi-defined port window offset */
+#define NE_RESET (0x1f * 2) /* Issue a read to reset,
+ * a write to clear. */
+#define NE_IO_EXTENT (0x20 * 2)
+
+#define NE_EN0_ISR (0x07 * 2)
+#define NE_EN0_DCFG (0x0e * 2)
+
+#define NE_EN0_RSARLO (0x08 * 2)
+#define NE_EN0_RSARHI (0x09 * 2)
+#define NE_EN0_RCNTLO (0x0a * 2)
+#define NE_EN0_RXCR (0x0c * 2)
+#define NE_EN0_TXCR (0x0d * 2)
+#define NE_EN0_RCNTHI (0x0b * 2)
+#define NE_EN0_IMR (0x0f * 2)
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-#define WORDSWAP(a) ((((a)>>8)&0xff) | ((a)<<8))
-
+#define WORDSWAP(a) ((((a) >> 8) & 0xff) | ((a) << 8))
static struct card_info {
- zorro_id id;
- const char *name;
- unsigned int offset;
+ zorro_id id;
+ const char *name;
+ unsigned int offset;
} cards[] __devinitdata = {
- { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 },
- { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 },
+ { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 },
+ { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 },
};
-static int __devinit zorro8390_init_one(struct zorro_dev *z,
- const struct zorro_device_id *ent);
-static int __devinit zorro8390_init(struct net_device *dev,
- unsigned long board, const char *name,
- unsigned long ioaddr);
-static int zorro8390_open(struct net_device *dev);
-static int zorro8390_close(struct net_device *dev);
-static void zorro8390_reset_8390(struct net_device *dev);
+/* Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
+static void zorro8390_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+
+ if (ei_debug > 1)
+ netdev_dbg(dev, "resetting - t=%ld...\n", jiffies);
+
+ z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RESET) == 0)
+ if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
+ netdev_warn(dev, "%s: did not complete\n", __func__);
+ break;
+ }
+ z_writeb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly.
+ */
static void zorro8390_get_8390_hdr(struct net_device *dev,
- struct e8390_pkt_hdr *hdr, int ring_page);
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ int nic_base = dev->base_addr;
+ int cnt;
+ short *ptrs;
+
+ /* This *shouldn't* happen.
+ * If it does, it's the last thing you'll see
+ */
+ if (ei_status.dmaing) {
+ netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+
+ ei_status.dmaing |= 0x01;
+ z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
+ z_writeb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO);
+ z_writeb(0, nic_base + NE_EN0_RCNTHI);
+ z_writeb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */
+ z_writeb(ring_page, nic_base + NE_EN0_RSARHI);
+ z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+ ptrs = (short *)hdr;
+ for (cnt = 0; cnt < sizeof(struct e8390_pkt_hdr) >> 1; cnt++)
+ *ptrs++ = z_readw(NE_BASE + NE_DATAPORT);
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */
+
+ hdr->count = WORDSWAP(hdr->count);
+
+ ei_status.dmaing &= ~0x01;
+}
+
+/* Block input and output, similar to the Crynwr packet driver.
+ * If you are porting to a new ethercard, look at the packet driver source
+ * for hints. The NEx000 doesn't share the on-board packet memory --
+ * you have to put the packet out through the "remote DMA" dataport
+ * using z_writeb.
+ */
static void zorro8390_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void zorro8390_block_output(struct net_device *dev, const int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ int nic_base = dev->base_addr;
+ char *buf = skb->data;
+ short *ptrs;
+ int cnt;
+
+ /* This *shouldn't* happen.
+ * If it does, it's the last thing you'll see
+ */
+ if (ei_status.dmaing) {
+ netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ z_writeb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
+ z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO);
+ z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI);
+ z_writeb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO);
+ z_writeb(ring_offset >> 8, nic_base + NE_EN0_RSARHI);
+ z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+ ptrs = (short *)buf;
+ for (cnt = 0; cnt < count >> 1; cnt++)
+ *ptrs++ = z_readw(NE_BASE + NE_DATAPORT);
+ if (count & 0x01)
+ buf[count - 1] = z_readb(NE_BASE + NE_DATAPORT);
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */
+ ei_status.dmaing &= ~0x01;
+}
+
+static void zorro8390_block_output(struct net_device *dev, int count,
const unsigned char *buf,
- const int start_page);
-static void __devexit zorro8390_remove_one(struct zorro_dev *z);
+ const int start_page)
+{
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+ short *ptrs;
+ int cnt;
+
+ /* Round the count up for word writes. Do we need to do this?
+ * What effect will an odd byte count have on the 8390?
+ * I should check someday.
+ */
+ if (count & 0x01)
+ count++;
+
+ /* This *shouldn't* happen.
+ * If it does, it's the last thing you'll see
+ */
+ if (ei_status.dmaing) {
+ netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ __func__, ei_status.dmaing, ei_status.irqlock);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ z_writeb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
+
+ /* Now the normal output. */
+ z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO);
+ z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI);
+ z_writeb(0x00, nic_base + NE_EN0_RSARLO);
+ z_writeb(start_page, nic_base + NE_EN0_RSARHI);
+
+ z_writeb(E8390_RWRITE + E8390_START, nic_base + NE_CMD);
+ ptrs = (short *)buf;
+ for (cnt = 0; cnt < count >> 1; cnt++)
+ z_writew(*ptrs++, NE_BASE + NE_DATAPORT);
+
+ dma_start = jiffies;
+
+ while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
+ if (time_after(jiffies, dma_start + 2 * HZ / 100)) {
+ /* 20ms */
+ netdev_err(dev, "timeout waiting for Tx RDC\n");
+ zorro8390_reset_8390(dev);
+ __NS8390_init(dev, 1);
+ break;
+ }
+
+ z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr */
+ ei_status.dmaing &= ~0x01;
+}
-static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = {
- { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, },
- { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
- { 0 }
-};
-MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl);
+static int zorro8390_open(struct net_device *dev)
+{
+ __ei_open(dev);
+ return 0;
+}
-static struct zorro_driver zorro8390_driver = {
- .name = "zorro8390",
- .id_table = zorro8390_zorro_tbl,
- .probe = zorro8390_init_one,
- .remove = __devexit_p(zorro8390_remove_one),
-};
+static int zorro8390_close(struct net_device *dev)
+{
+ if (ei_debug > 1)
+ netdev_dbg(dev, "Shutting down ethercard\n");
+ __ei_close(dev);
+ return 0;
+}
-static int __devinit zorro8390_init_one(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static void __devexit zorro8390_remove_one(struct zorro_dev *z)
{
- struct net_device *dev;
- unsigned long board, ioaddr;
- int err, i;
-
- for (i = ARRAY_SIZE(cards)-1; i >= 0; i--)
- if (z->id == cards[i].id)
- break;
- if (i < 0)
- return -ENODEV;
-
- board = z->resource.start;
- ioaddr = board+cards[i].offset;
- dev = ____alloc_ei_netdev(0);
- if (!dev)
- return -ENOMEM;
- if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) {
- free_netdev(dev);
- return -EBUSY;
- }
- if ((err = zorro8390_init(dev, board, cards[i].name,
- ZTWO_VADDR(ioaddr)))) {
- release_mem_region(ioaddr, NE_IO_EXTENT*2);
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr), NE_IO_EXTENT * 2);
free_netdev(dev);
- return err;
- }
- zorro_set_drvdata(z, dev);
- return 0;
}
+static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = {
+ { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, },
+ { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl);
+
static const struct net_device_ops zorro8390_netdev_ops = {
.ndo_open = zorro8390_open,
.ndo_stop = zorro8390_close,
@@ -151,7 +280,7 @@ static const struct net_device_ops zorro8390_netdev_ops = {
.ndo_get_stats = __ei_get_stats,
.ndo_set_multicast_list = __ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = __ei_poll,
@@ -162,295 +291,159 @@ static int __devinit zorro8390_init(struct net_device *dev,
unsigned long board, const char *name,
unsigned long ioaddr)
{
- int i;
- int err;
- unsigned char SA_prom[32];
- int start_page, stop_page;
- static u32 zorro8390_offsets[16] = {
- 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
- 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
- };
-
- /* Reset card. Who knows what dain-bramaged state it was left in. */
- {
- unsigned long reset_start_time = jiffies;
-
- z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET);
-
- while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
- if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(KERN_WARNING " not found (no reset ack).\n");
- return -ENODEV;
- }
-
- z_writeb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */
- }
-
- /* Read the 16 bytes of station address PROM.
- We must first initialize registers, similar to NS8390_init(eifdev, 0).
- We can't reliably read the SAPROM address without this.
- (I learned the hard way!). */
- {
- struct {
- u32 value;
- u32 offset;
- } program_seq[] = {
- {E8390_NODMA+E8390_PAGE0+E8390_STOP, NE_CMD}, /* Select page 0*/
- {0x48, NE_EN0_DCFG}, /* Set byte-wide (0x48) access. */
- {0x00, NE_EN0_RCNTLO}, /* Clear the count regs. */
- {0x00, NE_EN0_RCNTHI},
- {0x00, NE_EN0_IMR}, /* Mask completion irq. */
- {0xFF, NE_EN0_ISR},
- {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
- {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode. */
- {32, NE_EN0_RCNTLO},
- {0x00, NE_EN0_RCNTHI},
- {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000. */
- {0x00, NE_EN0_RSARHI},
- {E8390_RREAD+E8390_START, NE_CMD},
+ int i;
+ int err;
+ unsigned char SA_prom[32];
+ int start_page, stop_page;
+ static u32 zorro8390_offsets[16] = {
+ 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
+ 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
};
- for (i = 0; i < ARRAY_SIZE(program_seq); i++) {
- z_writeb(program_seq[i].value, ioaddr + program_seq[i].offset);
- }
- }
- for (i = 0; i < 16; i++) {
- SA_prom[i] = z_readb(ioaddr + NE_DATAPORT);
- (void)z_readb(ioaddr + NE_DATAPORT);
- }
- /* We must set the 8390 for word mode. */
- z_writeb(0x49, ioaddr + NE_EN0_DCFG);
- start_page = NESM_START_PG;
- stop_page = NESM_STOP_PG;
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ {
+ unsigned long reset_start_time = jiffies;
- dev->base_addr = ioaddr;
- dev->irq = IRQ_AMIGA_PORTS;
+ z_writeb(z_readb(ioaddr + NE_RESET), ioaddr + NE_RESET);
- /* Install the Interrupt handler */
- i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt, IRQF_SHARED, DRV_NAME, dev);
- if (i) return i;
+ while ((z_readb(ioaddr + NE_EN0_ISR) & ENISR_RESET) == 0)
+ if (time_after(jiffies,
+ reset_start_time + 2 * HZ / 100)) {
+ netdev_warn(dev, "not found (no reset ack)\n");
+ return -ENODEV;
+ }
- for(i = 0; i < ETHER_ADDR_LEN; i++)
- dev->dev_addr[i] = SA_prom[i];
-
-#ifdef DEBUG
- printk("%pM", dev->dev_addr);
-#endif
+ z_writeb(0xff, ioaddr + NE_EN0_ISR); /* Ack all intr. */
+ }
- ei_status.name = name;
- ei_status.tx_start_page = start_page;
- ei_status.stop_page = stop_page;
- ei_status.word16 = 1;
+ /* Read the 16 bytes of station address PROM.
+ * We must first initialize registers,
+ * similar to NS8390_init(eifdev, 0).
+ * We can't reliably read the SAPROM address without this.
+ * (I learned the hard way!).
+ */
+ {
+ static const struct {
+ u32 value;
+ u32 offset;
+ } program_seq[] = {
+ {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD},
+ /* Select page 0 */
+ {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */
+ {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_IMR}, /* Mask completion irq */
+ {0xFF, NE_EN0_ISR},
+ {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */
+ {32, NE_EN0_RCNTLO},
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */
+ {0x00, NE_EN0_RSARHI},
+ {E8390_RREAD + E8390_START, NE_CMD},
+ };
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++)
+ z_writeb(program_seq[i].value,
+ ioaddr + program_seq[i].offset);
+ }
+ for (i = 0; i < 16; i++) {
+ SA_prom[i] = z_readb(ioaddr + NE_DATAPORT);
+ (void)z_readb(ioaddr + NE_DATAPORT);
+ }
- ei_status.rx_start_page = start_page + TX_PAGES;
+ /* We must set the 8390 for word mode. */
+ z_writeb(0x49, ioaddr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
- ei_status.reset_8390 = &zorro8390_reset_8390;
- ei_status.block_input = &zorro8390_block_input;
- ei_status.block_output = &zorro8390_block_output;
- ei_status.get_8390_hdr = &zorro8390_get_8390_hdr;
- ei_status.reg_offset = zorro8390_offsets;
+ dev->base_addr = ioaddr;
+ dev->irq = IRQ_AMIGA_PORTS;
- dev->netdev_ops = &zorro8390_netdev_ops;
- __NS8390_init(dev, 0);
- err = register_netdev(dev);
- if (err) {
- free_irq(IRQ_AMIGA_PORTS, dev);
- return err;
- }
+ /* Install the Interrupt handler */
+ i = request_irq(IRQ_AMIGA_PORTS, __ei_interrupt,
+ IRQF_SHARED, DRV_NAME, dev);
+ if (i)
+ return i;
- printk(KERN_INFO "%s: %s at 0x%08lx, Ethernet Address %pM\n",
- dev->name, name, board, dev->dev_addr);
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ dev->dev_addr[i] = SA_prom[i];
- return 0;
-}
+ pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
-static int zorro8390_open(struct net_device *dev)
-{
- __ei_open(dev);
- return 0;
-}
+ ei_status.name = name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = 1;
-static int zorro8390_close(struct net_device *dev)
-{
- if (ei_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
- __ei_close(dev);
- return 0;
-}
+ ei_status.rx_start_page = start_page + TX_PAGES;
-/* Hard reset the card. This used to pause for the same period that a
- 8390 reset command required, but that shouldn't be necessary. */
-static void zorro8390_reset_8390(struct net_device *dev)
-{
- unsigned long reset_start_time = jiffies;
+ ei_status.reset_8390 = zorro8390_reset_8390;
+ ei_status.block_input = zorro8390_block_input;
+ ei_status.block_output = zorro8390_block_output;
+ ei_status.get_8390_hdr = zorro8390_get_8390_hdr;
+ ei_status.reg_offset = zorro8390_offsets;
- if (ei_debug > 1)
- printk(KERN_DEBUG "resetting the 8390 t=%ld...\n", jiffies);
-
- z_writeb(z_readb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
-
- ei_status.txing = 0;
- ei_status.dmaing = 0;
-
- /* This check _should_not_ be necessary, omit eventually. */
- while ((z_readb(NE_BASE+NE_EN0_ISR) & ENISR_RESET) == 0)
- if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
- printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n",
- dev->name);
- break;
+ dev->netdev_ops = &zorro8390_netdev_ops;
+ __NS8390_init(dev, 0);
+ err = register_netdev(dev);
+ if (err) {
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ return err;
}
- z_writeb(ENISR_RESET, NE_BASE + NE_EN0_ISR); /* Ack intr. */
-}
-/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
+ netdev_info(dev, "%s at 0x%08lx, Ethernet Address %pM\n",
+ name, board, dev->dev_addr);
-static void zorro8390_get_8390_hdr(struct net_device *dev,
- struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int nic_base = dev->base_addr;
- int cnt;
- short *ptrs;
-
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in ne_get_8390_hdr "
- "[DMAstat:%d][irqlock:%d].\n", dev->name, ei_status.dmaing,
- ei_status.irqlock);
- return;
- }
-
- ei_status.dmaing |= 0x01;
- z_writeb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
- z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
- z_writeb(sizeof(struct e8390_pkt_hdr), nic_base + NE_EN0_RCNTLO);
- z_writeb(0, nic_base + NE_EN0_RCNTHI);
- z_writeb(0, nic_base + NE_EN0_RSARLO); /* On page boundary */
- z_writeb(ring_page, nic_base + NE_EN0_RSARHI);
- z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
-
- ptrs = (short*)hdr;
- for (cnt = 0; cnt < (sizeof(struct e8390_pkt_hdr)>>1); cnt++)
- *ptrs++ = z_readw(NE_BASE + NE_DATAPORT);
-
- z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
-
- hdr->count = WORDSWAP(hdr->count);
-
- ei_status.dmaing &= ~0x01;
+ return 0;
}
-/* Block input and output, similar to the Crynwr packet driver. If you
- are porting to a new ethercard, look at the packet driver source for hints.
- The NEx000 doesn't share the on-board packet memory -- you have to put
- the packet out through the "remote DMA" dataport using z_writeb. */
-
-static void zorro8390_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset)
+static int __devinit zorro8390_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
{
- int nic_base = dev->base_addr;
- char *buf = skb->data;
- short *ptrs;
- int cnt;
-
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in ne_block_input "
- "[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
- return;
- }
- ei_status.dmaing |= 0x01;
- z_writeb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
- z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
- z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO);
- z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI);
- z_writeb(ring_offset & 0xff, nic_base + NE_EN0_RSARLO);
- z_writeb(ring_offset >> 8, nic_base + NE_EN0_RSARHI);
- z_writeb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
- ptrs = (short*)buf;
- for (cnt = 0; cnt < (count>>1); cnt++)
- *ptrs++ = z_readw(NE_BASE + NE_DATAPORT);
- if (count & 0x01)
- buf[count-1] = z_readb(NE_BASE + NE_DATAPORT);
-
- z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
-}
+ struct net_device *dev;
+ unsigned long board, ioaddr;
+ int err, i;
+
+ for (i = ARRAY_SIZE(cards) - 1; i >= 0; i--)
+ if (z->id == cards[i].id)
+ break;
+ if (i < 0)
+ return -ENODEV;
-static void zorro8390_block_output(struct net_device *dev, int count,
- const unsigned char *buf,
- const int start_page)
-{
- int nic_base = NE_BASE;
- unsigned long dma_start;
- short *ptrs;
- int cnt;
-
- /* Round the count up for word writes. Do we need to do this?
- What effect will an odd byte count have on the 8390?
- I should check someday. */
- if (count & 0x01)
- count++;
-
- /* This *shouldn't* happen. If it does, it's the last thing you'll see */
- if (ei_status.dmaing) {
- printk(KERN_ERR "%s: DMAing conflict in ne_block_output."
- "[DMAstat:%d][irqlock:%d]\n", dev->name, ei_status.dmaing,
- ei_status.irqlock);
- return;
- }
- ei_status.dmaing |= 0x01;
- /* We should already be in page 0, but to be safe... */
- z_writeb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
-
- z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR);
-
- /* Now the normal output. */
- z_writeb(count & 0xff, nic_base + NE_EN0_RCNTLO);
- z_writeb(count >> 8, nic_base + NE_EN0_RCNTHI);
- z_writeb(0x00, nic_base + NE_EN0_RSARLO);
- z_writeb(start_page, nic_base + NE_EN0_RSARHI);
-
- z_writeb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
- ptrs = (short*)buf;
- for (cnt = 0; cnt < count>>1; cnt++)
- z_writew(*ptrs++, NE_BASE+NE_DATAPORT);
-
- dma_start = jiffies;
-
- while ((z_readb(NE_BASE + NE_EN0_ISR) & ENISR_RDC) == 0)
- if (time_after(jiffies, dma_start + 2*HZ/100)) { /* 20ms */
- printk(KERN_ERR "%s: timeout waiting for Tx RDC.\n",
- dev->name);
- zorro8390_reset_8390(dev);
- __NS8390_init(dev,1);
- break;
+ board = z->resource.start;
+ ioaddr = board + cards[i].offset;
+ dev = ____alloc_ei_netdev(0);
+ if (!dev)
+ return -ENOMEM;
+ if (!request_mem_region(ioaddr, NE_IO_EXTENT * 2, DRV_NAME)) {
+ free_netdev(dev);
+ return -EBUSY;
}
-
- z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
- ei_status.dmaing &= ~0x01;
+ err = zorro8390_init(dev, board, cards[i].name, ZTWO_VADDR(ioaddr));
+ if (err) {
+ release_mem_region(ioaddr, NE_IO_EXTENT * 2);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+ return 0;
}
-static void __devexit zorro8390_remove_one(struct zorro_dev *z)
-{
- struct net_device *dev = zorro_get_drvdata(z);
-
- unregister_netdev(dev);
- free_irq(IRQ_AMIGA_PORTS, dev);
- release_mem_region(ZTWO_PADDR(dev->base_addr), NE_IO_EXTENT*2);
- free_netdev(dev);
-}
+static struct zorro_driver zorro8390_driver = {
+ .name = "zorro8390",
+ .id_table = zorro8390_zorro_tbl,
+ .probe = zorro8390_init_one,
+ .remove = __devexit_p(zorro8390_remove_one),
+};
static int __init zorro8390_init_module(void)
{
- return zorro_register_driver(&zorro8390_driver);
+ return zorro_register_driver(&zorro8390_driver);
}
static void __exit zorro8390_cleanup_module(void)
{
- zorro_unregister_driver(&zorro8390_driver);
+ zorro_unregister_driver(&zorro8390_driver);
}
module_init(zorro8390_init_module);
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ea158008534..2acff4307ca 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -2,17 +2,8 @@
# Near Field Communication (NFC) devices
#
-menuconfig NFC_DEVICES
- bool "Near Field Communication (NFC) devices"
- default n
- ---help---
- You'll have to say Y if your computer contains an NFC device that
- you want to use under Linux.
-
- You can say N here if you don't have any Near Field Communication
- devices connected to your computer.
-
-if NFC_DEVICES
+menu "Near Field Communication (NFC) devices"
+ depends on NFC
config PN544_NFC
tristate "PN544 NFC driver"
@@ -26,5 +17,14 @@ config PN544_NFC
To compile this driver as a module, choose m here. The module will
be called pn544.
+config NFC_PN533
+ tristate "NXP PN533 USB driver"
+ depends on USB
+ help
+ NXP PN533 USB driver.
+ This driver provides support for NFC NXP PN533 devices.
+
+ Say Y here to compile support for PN533 devices into the
+ kernel or say M to compile it as module (pn533).
-endif # NFC_DEVICES
+endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index a4efb164ec4..8ef446d2c1b 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_PN544_NFC) += pn544.o
+obj-$(CONFIG_NFC_PN533) += pn533.o
+
+ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
new file mode 100644
index 00000000000..c77e0543e50
--- /dev/null
+++ b/drivers/nfc/pn533.c
@@ -0,0 +1,1632 @@
+/*
+ * Copyright (C) 2011 Instituto Nokia de Tecnologia
+ *
+ * Authors:
+ * Lauro Ramos Venancio <lauro.venancio@openbossa.org>
+ * Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/nfc.h>
+#include <linux/netdevice.h>
+#include <net/nfc.h>
+
+#define VERSION "0.1"
+
+#define PN533_VENDOR_ID 0x4CC
+#define PN533_PRODUCT_ID 0x2533
+
+#define SCM_VENDOR_ID 0x4E6
+#define SCL3711_PRODUCT_ID 0x5591
+
+static const struct usb_device_id pn533_table[] = {
+ { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID) },
+ { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, pn533_table);
+
+/* frame definitions */
+#define PN533_FRAME_TAIL_SIZE 2
+#define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \
+ PN533_FRAME_TAIL_SIZE)
+#define PN533_FRAME_ACK_SIZE (sizeof(struct pn533_frame) + 1)
+#define PN533_FRAME_CHECKSUM(f) (f->data[f->datalen])
+#define PN533_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
+
+/* start of frame */
+#define PN533_SOF 0x00FF
+
+/* frame identifier: in/out/error */
+#define PN533_FRAME_IDENTIFIER(f) (f->data[0])
+#define PN533_DIR_OUT 0xD4
+#define PN533_DIR_IN 0xD5
+
+/* PN533 Commands */
+#define PN533_FRAME_CMD(f) (f->data[1])
+#define PN533_FRAME_CMD_PARAMS_PTR(f) (&f->data[2])
+#define PN533_FRAME_CMD_PARAMS_LEN(f) (f->datalen - 2)
+
+#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
+#define PN533_CMD_RF_CONFIGURATION 0x32
+#define PN533_CMD_IN_DATA_EXCHANGE 0x40
+#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
+#define PN533_CMD_IN_ATR 0x50
+#define PN533_CMD_IN_RELEASE 0x52
+
+#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
+
+/* PN533 Return codes */
+#define PN533_CMD_RET_MASK 0x3F
+#define PN533_CMD_MI_MASK 0x40
+#define PN533_CMD_RET_SUCCESS 0x00
+
+struct pn533;
+
+typedef int (*pn533_cmd_complete_t) (struct pn533 *dev, void *arg,
+ u8 *params, int params_len);
+
+/* structs for pn533 commands */
+
+/* PN533_CMD_GET_FIRMWARE_VERSION */
+struct pn533_fw_version {
+ u8 ic;
+ u8 ver;
+ u8 rev;
+ u8 support;
+};
+
+/* PN533_CMD_RF_CONFIGURATION */
+#define PN533_CFGITEM_MAX_RETRIES 0x05
+
+#define PN533_CONFIG_MAX_RETRIES_NO_RETRY 0x00
+#define PN533_CONFIG_MAX_RETRIES_ENDLESS 0xFF
+
+struct pn533_config_max_retries {
+ u8 mx_rty_atr;
+ u8 mx_rty_psl;
+ u8 mx_rty_passive_act;
+} __packed;
+
+/* PN533_CMD_IN_LIST_PASSIVE_TARGET */
+
+/* felica commands opcode */
+#define PN533_FELICA_OPC_SENSF_REQ 0
+#define PN533_FELICA_OPC_SENSF_RES 1
+/* felica SENSF_REQ parameters */
+#define PN533_FELICA_SENSF_SC_ALL 0xFFFF
+#define PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE 0
+#define PN533_FELICA_SENSF_RC_SYSTEM_CODE 1
+#define PN533_FELICA_SENSF_RC_ADVANCED_PROTOCOL 2
+
+/* type B initiator_data values */
+#define PN533_TYPE_B_AFI_ALL_FAMILIES 0
+#define PN533_TYPE_B_POLL_METHOD_TIMESLOT 0
+#define PN533_TYPE_B_POLL_METHOD_PROBABILISTIC 1
+
+union pn533_cmd_poll_initdata {
+ struct {
+ u8 afi;
+ u8 polling_method;
+ } __packed type_b;
+ struct {
+ u8 opcode;
+ __be16 sc;
+ u8 rc;
+ u8 tsn;
+ } __packed felica;
+};
+
+/* Poll modulations */
+enum {
+ PN533_POLL_MOD_106KBPS_A,
+ PN533_POLL_MOD_212KBPS_FELICA,
+ PN533_POLL_MOD_424KBPS_FELICA,
+ PN533_POLL_MOD_106KBPS_JEWEL,
+ PN533_POLL_MOD_847KBPS_B,
+
+ __PN533_POLL_MOD_AFTER_LAST,
+};
+#define PN533_POLL_MOD_MAX (__PN533_POLL_MOD_AFTER_LAST - 1)
+
+struct pn533_poll_modulations {
+ struct {
+ u8 maxtg;
+ u8 brty;
+ union pn533_cmd_poll_initdata initiator_data;
+ } __packed data;
+ u8 len;
+};
+
+const struct pn533_poll_modulations poll_mod[] = {
+ [PN533_POLL_MOD_106KBPS_A] = {
+ .data = {
+ .maxtg = 1,
+ .brty = 0,
+ },
+ .len = 2,
+ },
+ [PN533_POLL_MOD_212KBPS_FELICA] = {
+ .data = {
+ .maxtg = 1,
+ .brty = 1,
+ .initiator_data.felica = {
+ .opcode = PN533_FELICA_OPC_SENSF_REQ,
+ .sc = PN533_FELICA_SENSF_SC_ALL,
+ .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE,
+ .tsn = 0,
+ },
+ },
+ .len = 7,
+ },
+ [PN533_POLL_MOD_424KBPS_FELICA] = {
+ .data = {
+ .maxtg = 1,
+ .brty = 2,
+ .initiator_data.felica = {
+ .opcode = PN533_FELICA_OPC_SENSF_REQ,
+ .sc = PN533_FELICA_SENSF_SC_ALL,
+ .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE,
+ .tsn = 0,
+ },
+ },
+ .len = 7,
+ },
+ [PN533_POLL_MOD_106KBPS_JEWEL] = {
+ .data = {
+ .maxtg = 1,
+ .brty = 4,
+ },
+ .len = 2,
+ },
+ [PN533_POLL_MOD_847KBPS_B] = {
+ .data = {
+ .maxtg = 1,
+ .brty = 8,
+ .initiator_data.type_b = {
+ .afi = PN533_TYPE_B_AFI_ALL_FAMILIES,
+ .polling_method =
+ PN533_TYPE_B_POLL_METHOD_TIMESLOT,
+ },
+ },
+ .len = 3,
+ },
+};
+
+/* PN533_CMD_IN_ATR */
+
+struct pn533_cmd_activate_param {
+ u8 tg;
+ u8 next;
+} __packed;
+
+struct pn533_cmd_activate_response {
+ u8 status;
+ u8 nfcid3t[10];
+ u8 didt;
+ u8 bst;
+ u8 brt;
+ u8 to;
+ u8 ppt;
+ /* optional */
+ u8 gt[];
+} __packed;
+
+
+struct pn533 {
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ struct nfc_dev *nfc_dev;
+
+ struct urb *out_urb;
+ int out_maxlen;
+ struct pn533_frame *out_frame;
+
+ struct urb *in_urb;
+ int in_maxlen;
+ struct pn533_frame *in_frame;
+
+ struct tasklet_struct tasklet;
+ struct pn533_frame *tklt_in_frame;
+ int tklt_in_error;
+
+ pn533_cmd_complete_t cmd_complete;
+ void *cmd_complete_arg;
+ struct semaphore cmd_lock;
+ u8 cmd;
+
+ struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
+ u8 poll_mod_count;
+ u8 poll_mod_curr;
+ u32 poll_protocols;
+
+ u8 tgt_available_prots;
+ u8 tgt_active_prot;
+};
+
+struct pn533_frame {
+ u8 preamble;
+ __be16 start_frame;
+ u8 datalen;
+ u8 datalen_checksum;
+ u8 data[];
+} __packed;
+
+/* The rule: value + checksum = 0 */
+static inline u8 pn533_checksum(u8 value)
+{
+ return ~value + 1;
+}
+
+/* The rule: sum(data elements) + checksum = 0 */
+static u8 pn533_data_checksum(u8 *data, int datalen)
+{
+ u8 sum = 0;
+ int i;
+
+ for (i = 0; i < datalen; i++)
+ sum += data[i];
+
+ return pn533_checksum(sum);
+}
+
+/**
+ * pn533_tx_frame_ack - create a ack frame
+ * @frame: The frame to be set as ack
+ *
+ * Ack is different type of standard frame. As a standard frame, it has
+ * preamble and start_frame. However the checksum of this frame must fail,
+ * i.e. datalen + datalen_checksum must NOT be zero. When the checksum test
+ * fails and datalen = 0 and datalen_checksum = 0xFF, the frame is a ack.
+ * After datalen_checksum field, the postamble is placed.
+ */
+static void pn533_tx_frame_ack(struct pn533_frame *frame)
+{
+ frame->preamble = 0;
+ frame->start_frame = cpu_to_be16(PN533_SOF);
+ frame->datalen = 0;
+ frame->datalen_checksum = 0xFF;
+ /* data[0] is used as postamble */
+ frame->data[0] = 0;
+}
+
+static void pn533_tx_frame_init(struct pn533_frame *frame, u8 cmd)
+{
+ frame->preamble = 0;
+ frame->start_frame = cpu_to_be16(PN533_SOF);
+ PN533_FRAME_IDENTIFIER(frame) = PN533_DIR_OUT;
+ PN533_FRAME_CMD(frame) = cmd;
+ frame->datalen = 2;
+}
+
+static void pn533_tx_frame_finish(struct pn533_frame *frame)
+{
+ frame->datalen_checksum = pn533_checksum(frame->datalen);
+
+ PN533_FRAME_CHECKSUM(frame) =
+ pn533_data_checksum(frame->data, frame->datalen);
+
+ PN533_FRAME_POSTAMBLE(frame) = 0;
+}
+
+static bool pn533_rx_frame_is_valid(struct pn533_frame *frame)
+{
+ u8 checksum;
+
+ if (frame->start_frame != cpu_to_be16(PN533_SOF))
+ return false;
+
+ checksum = pn533_checksum(frame->datalen);
+ if (checksum != frame->datalen_checksum)
+ return false;
+
+ checksum = pn533_data_checksum(frame->data, frame->datalen);
+ if (checksum != PN533_FRAME_CHECKSUM(frame))
+ return false;
+
+ return true;
+}
+
+static bool pn533_rx_frame_is_ack(struct pn533_frame *frame)
+{
+ if (frame->start_frame != cpu_to_be16(PN533_SOF))
+ return false;
+
+ if (frame->datalen != 0 || frame->datalen_checksum != 0xFF)
+ return false;
+
+ return true;
+}
+
+static bool pn533_rx_frame_is_cmd_response(struct pn533_frame *frame, u8 cmd)
+{
+ return (PN533_FRAME_CMD(frame) == PN533_CMD_RESPONSE(cmd));
+}
+
+static void pn533_tasklet_cmd_complete(unsigned long arg)
+{
+ struct pn533 *dev = (struct pn533 *) arg;
+ struct pn533_frame *in_frame = dev->tklt_in_frame;
+ int rc;
+
+ if (dev->tklt_in_error)
+ rc = dev->cmd_complete(dev, dev->cmd_complete_arg, NULL,
+ dev->tklt_in_error);
+ else
+ rc = dev->cmd_complete(dev, dev->cmd_complete_arg,
+ PN533_FRAME_CMD_PARAMS_PTR(in_frame),
+ PN533_FRAME_CMD_PARAMS_LEN(in_frame));
+
+ if (rc != -EINPROGRESS)
+ up(&dev->cmd_lock);
+}
+
+static void pn533_recv_response(struct urb *urb)
+{
+ struct pn533 *dev = urb->context;
+ struct pn533_frame *in_frame;
+
+ dev->tklt_in_frame = NULL;
+
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
+ " status: %d", urb->status);
+ dev->tklt_in_error = urb->status;
+ goto sched_tasklet;
+ default:
+ nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:"
+ " %d", urb->status);
+ dev->tklt_in_error = urb->status;
+ goto sched_tasklet;
+ }
+
+ in_frame = dev->in_urb->transfer_buffer;
+
+ if (!pn533_rx_frame_is_valid(in_frame)) {
+ nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
+ dev->tklt_in_error = -EIO;
+ goto sched_tasklet;
+ }
+
+ if (!pn533_rx_frame_is_cmd_response(in_frame, dev->cmd)) {
+ nfc_dev_err(&dev->interface->dev, "The received frame is not "
+ "response to the last command");
+ dev->tklt_in_error = -EIO;
+ goto sched_tasklet;
+ }
+
+ nfc_dev_dbg(&dev->interface->dev, "Received a valid frame");
+ dev->tklt_in_error = 0;
+ dev->tklt_in_frame = in_frame;
+
+sched_tasklet:
+ tasklet_schedule(&dev->tasklet);
+}
+
+static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags)
+{
+ dev->in_urb->complete = pn533_recv_response;
+
+ return usb_submit_urb(dev->in_urb, flags);
+}
+
+static void pn533_recv_ack(struct urb *urb)
+{
+ struct pn533 *dev = urb->context;
+ struct pn533_frame *in_frame;
+ int rc;
+
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
+ " status: %d", urb->status);
+ dev->tklt_in_error = urb->status;
+ goto sched_tasklet;
+ default:
+ nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:"
+ " %d", urb->status);
+ dev->tklt_in_error = urb->status;
+ goto sched_tasklet;
+ }
+
+ in_frame = dev->in_urb->transfer_buffer;
+
+ if (!pn533_rx_frame_is_ack(in_frame)) {
+ nfc_dev_err(&dev->interface->dev, "Received an invalid ack");
+ dev->tklt_in_error = -EIO;
+ goto sched_tasklet;
+ }
+
+ nfc_dev_dbg(&dev->interface->dev, "Received a valid ack");
+
+ rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC);
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev, "usb_submit_urb failed with"
+ " result %d", rc);
+ dev->tklt_in_error = rc;
+ goto sched_tasklet;
+ }
+
+ return;
+
+sched_tasklet:
+ dev->tklt_in_frame = NULL;
+ tasklet_schedule(&dev->tasklet);
+}
+
+static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
+{
+ dev->in_urb->complete = pn533_recv_ack;
+
+ return usb_submit_urb(dev->in_urb, flags);
+}
+
+static int pn533_send_ack(struct pn533 *dev, gfp_t flags)
+{
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ pn533_tx_frame_ack(dev->out_frame);
+
+ dev->out_urb->transfer_buffer = dev->out_frame;
+ dev->out_urb->transfer_buffer_length = PN533_FRAME_ACK_SIZE;
+ rc = usb_submit_urb(dev->out_urb, flags);
+
+ return rc;
+}
+
+static int __pn533_send_cmd_frame_async(struct pn533 *dev,
+ struct pn533_frame *out_frame,
+ struct pn533_frame *in_frame,
+ int in_frame_len,
+ pn533_cmd_complete_t cmd_complete,
+ void *arg, gfp_t flags)
+{
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "Sending command 0x%x",
+ PN533_FRAME_CMD(out_frame));
+
+ dev->cmd = PN533_FRAME_CMD(out_frame);
+ dev->cmd_complete = cmd_complete;
+ dev->cmd_complete_arg = arg;
+
+ dev->out_urb->transfer_buffer = out_frame;
+ dev->out_urb->transfer_buffer_length =
+ PN533_FRAME_SIZE(out_frame);
+
+ dev->in_urb->transfer_buffer = in_frame;
+ dev->in_urb->transfer_buffer_length = in_frame_len;
+
+ rc = usb_submit_urb(dev->out_urb, flags);
+ if (rc)
+ return rc;
+
+ rc = pn533_submit_urb_for_ack(dev, flags);
+ if (rc)
+ goto error;
+
+ return 0;
+
+error:
+ usb_unlink_urb(dev->out_urb);
+ return rc;
+}
+
+static int pn533_send_cmd_frame_async(struct pn533 *dev,
+ struct pn533_frame *out_frame,
+ struct pn533_frame *in_frame,
+ int in_frame_len,
+ pn533_cmd_complete_t cmd_complete,
+ void *arg, gfp_t flags)
+{
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (down_trylock(&dev->cmd_lock))
+ return -EBUSY;
+
+ rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
+ in_frame_len, cmd_complete, arg, flags);
+ if (rc)
+ goto error;
+
+ return 0;
+error:
+ up(&dev->cmd_lock);
+ return rc;
+}
+
+struct pn533_sync_cmd_response {
+ int rc;
+ struct completion done;
+};
+
+static int pn533_sync_cmd_complete(struct pn533 *dev, void *_arg,
+ u8 *params, int params_len)
+{
+ struct pn533_sync_cmd_response *arg = _arg;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ arg->rc = 0;
+
+ if (params_len < 0) /* error */
+ arg->rc = params_len;
+
+ complete(&arg->done);
+
+ return 0;
+}
+
+static int pn533_send_cmd_frame_sync(struct pn533 *dev,
+ struct pn533_frame *out_frame,
+ struct pn533_frame *in_frame,
+ int in_frame_len)
+{
+ int rc;
+ struct pn533_sync_cmd_response arg;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ init_completion(&arg.done);
+
+ rc = pn533_send_cmd_frame_async(dev, out_frame, in_frame, in_frame_len,
+ pn533_sync_cmd_complete, &arg, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ wait_for_completion(&arg.done);
+
+ return arg.rc;
+}
+
+static void pn533_send_complete(struct urb *urb)
+{
+ struct pn533 *dev = urb->context;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ switch (urb->status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
+ " status: %d", urb->status);
+ break;
+ default:
+ nfc_dev_dbg(&dev->interface->dev, "Nonzero urb status received:"
+ " %d", urb->status);
+ }
+}
+
+struct pn533_target_type_a {
+ __be16 sens_res;
+ u8 sel_res;
+ u8 nfcid_len;
+ u8 nfcid_data[];
+} __packed;
+
+
+#define PN533_TYPE_A_SENS_RES_NFCID1(x) ((u8)((be16_to_cpu(x) & 0x00C0) >> 6))
+#define PN533_TYPE_A_SENS_RES_SSD(x) ((u8)((be16_to_cpu(x) & 0x001F) >> 0))
+#define PN533_TYPE_A_SENS_RES_PLATCONF(x) ((u8)((be16_to_cpu(x) & 0x0F00) >> 8))
+
+#define PN533_TYPE_A_SENS_RES_SSD_JEWEL 0x00
+#define PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL 0x0C
+
+#define PN533_TYPE_A_SEL_PROT(x) (((x) & 0x60) >> 5)
+#define PN533_TYPE_A_SEL_CASCADE(x) (((x) & 0x04) >> 2)
+
+#define PN533_TYPE_A_SEL_PROT_MIFARE 0
+#define PN533_TYPE_A_SEL_PROT_ISO14443 1
+#define PN533_TYPE_A_SEL_PROT_DEP 2
+#define PN533_TYPE_A_SEL_PROT_ISO14443_DEP 3
+
+static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
+ int target_data_len)
+{
+ u8 ssd;
+ u8 platconf;
+
+ if (target_data_len < sizeof(struct pn533_target_type_a))
+ return false;
+
+ /* The lenght check of nfcid[] and ats[] are not being performed because
+ the values are not being used */
+
+ /* Requirement 4.6.3.3 from NFC Forum Digital Spec */
+ ssd = PN533_TYPE_A_SENS_RES_SSD(type_a->sens_res);
+ platconf = PN533_TYPE_A_SENS_RES_PLATCONF(type_a->sens_res);
+
+ if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
+ platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) ||
+ (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
+ platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL))
+ return false;
+
+ /* Requirements 4.8.2.1, 4.8.2.3, 4.8.2.5 and 4.8.2.7 from NFC Forum */
+ if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
+ return false;
+
+ return true;
+}
+
+static int pn533_target_found_type_a(struct nfc_target *nfc_tgt, u8 *tgt_data,
+ int tgt_data_len)
+{
+ struct pn533_target_type_a *tgt_type_a;
+
+ tgt_type_a = (struct pn533_target_type_a *) tgt_data;
+
+ if (!pn533_target_type_a_is_valid(tgt_type_a, tgt_data_len))
+ return -EPROTO;
+
+ switch (PN533_TYPE_A_SEL_PROT(tgt_type_a->sel_res)) {
+ case PN533_TYPE_A_SEL_PROT_MIFARE:
+ nfc_tgt->supported_protocols = NFC_PROTO_MIFARE_MASK;
+ break;
+ case PN533_TYPE_A_SEL_PROT_ISO14443:
+ nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK;
+ break;
+ case PN533_TYPE_A_SEL_PROT_DEP:
+ nfc_tgt->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ break;
+ case PN533_TYPE_A_SEL_PROT_ISO14443_DEP:
+ nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK |
+ NFC_PROTO_NFC_DEP_MASK;
+ break;
+ }
+
+ nfc_tgt->sens_res = be16_to_cpu(tgt_type_a->sens_res);
+ nfc_tgt->sel_res = tgt_type_a->sel_res;
+
+ return 0;
+}
+
+struct pn533_target_felica {
+ u8 pol_res;
+ u8 opcode;
+ u8 nfcid2[8];
+ u8 pad[8];
+ /* optional */
+ u8 syst_code[];
+} __packed;
+
+#define PN533_FELICA_SENSF_NFCID2_DEP_B1 0x01
+#define PN533_FELICA_SENSF_NFCID2_DEP_B2 0xFE
+
+static bool pn533_target_felica_is_valid(struct pn533_target_felica *felica,
+ int target_data_len)
+{
+ if (target_data_len < sizeof(struct pn533_target_felica))
+ return false;
+
+ if (felica->opcode != PN533_FELICA_OPC_SENSF_RES)
+ return false;
+
+ return true;
+}
+
+static int pn533_target_found_felica(struct nfc_target *nfc_tgt, u8 *tgt_data,
+ int tgt_data_len)
+{
+ struct pn533_target_felica *tgt_felica;
+
+ tgt_felica = (struct pn533_target_felica *) tgt_data;
+
+ if (!pn533_target_felica_is_valid(tgt_felica, tgt_data_len))
+ return -EPROTO;
+
+ if (tgt_felica->nfcid2[0] == PN533_FELICA_SENSF_NFCID2_DEP_B1 &&
+ tgt_felica->nfcid2[1] ==
+ PN533_FELICA_SENSF_NFCID2_DEP_B2)
+ nfc_tgt->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ else
+ nfc_tgt->supported_protocols = NFC_PROTO_FELICA_MASK;
+
+ return 0;
+}
+
+struct pn533_target_jewel {
+ __be16 sens_res;
+ u8 jewelid[4];
+} __packed;
+
+static bool pn533_target_jewel_is_valid(struct pn533_target_jewel *jewel,
+ int target_data_len)
+{
+ u8 ssd;
+ u8 platconf;
+
+ if (target_data_len < sizeof(struct pn533_target_jewel))
+ return false;
+
+ /* Requirement 4.6.3.3 from NFC Forum Digital Spec */
+ ssd = PN533_TYPE_A_SENS_RES_SSD(jewel->sens_res);
+ platconf = PN533_TYPE_A_SENS_RES_PLATCONF(jewel->sens_res);
+
+ if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
+ platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) ||
+ (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL &&
+ platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL))
+ return false;
+
+ return true;
+}
+
+static int pn533_target_found_jewel(struct nfc_target *nfc_tgt, u8 *tgt_data,
+ int tgt_data_len)
+{
+ struct pn533_target_jewel *tgt_jewel;
+
+ tgt_jewel = (struct pn533_target_jewel *) tgt_data;
+
+ if (!pn533_target_jewel_is_valid(tgt_jewel, tgt_data_len))
+ return -EPROTO;
+
+ nfc_tgt->supported_protocols = NFC_PROTO_JEWEL_MASK;
+ nfc_tgt->sens_res = be16_to_cpu(tgt_jewel->sens_res);
+
+ return 0;
+}
+
+struct pn533_type_b_prot_info {
+ u8 bitrate;
+ u8 fsci_type;
+ u8 fwi_adc_fo;
+} __packed;
+
+#define PN533_TYPE_B_PROT_FCSI(x) (((x) & 0xF0) >> 4)
+#define PN533_TYPE_B_PROT_TYPE(x) (((x) & 0x0F) >> 0)
+#define PN533_TYPE_B_PROT_TYPE_RFU_MASK 0x8
+
+struct pn533_type_b_sens_res {
+ u8 opcode;
+ u8 nfcid[4];
+ u8 appdata[4];
+ struct pn533_type_b_prot_info prot_info;
+} __packed;
+
+#define PN533_TYPE_B_OPC_SENSB_RES 0x50
+
+struct pn533_target_type_b {
+ struct pn533_type_b_sens_res sensb_res;
+ u8 attrib_res_len;
+ u8 attrib_res[];
+} __packed;
+
+static bool pn533_target_type_b_is_valid(struct pn533_target_type_b *type_b,
+ int target_data_len)
+{
+ if (target_data_len < sizeof(struct pn533_target_type_b))
+ return false;
+
+ if (type_b->sensb_res.opcode != PN533_TYPE_B_OPC_SENSB_RES)
+ return false;
+
+ if (PN533_TYPE_B_PROT_TYPE(type_b->sensb_res.prot_info.fsci_type) &
+ PN533_TYPE_B_PROT_TYPE_RFU_MASK)
+ return false;
+
+ return true;
+}
+
+static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data,
+ int tgt_data_len)
+{
+ struct pn533_target_type_b *tgt_type_b;
+
+ tgt_type_b = (struct pn533_target_type_b *) tgt_data;
+
+ if (!pn533_target_type_b_is_valid(tgt_type_b, tgt_data_len))
+ return -EPROTO;
+
+ nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK;
+
+ return 0;
+}
+
+struct pn533_poll_response {
+ u8 nbtg;
+ u8 tg;
+ u8 target_data[];
+} __packed;
+
+static int pn533_target_found(struct pn533 *dev,
+ struct pn533_poll_response *resp, int resp_len)
+{
+ int target_data_len;
+ struct nfc_target nfc_tgt;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s - modulation=%d", __func__,
+ dev->poll_mod_curr);
+
+ if (resp->tg != 1)
+ return -EPROTO;
+
+ target_data_len = resp_len - sizeof(struct pn533_poll_response);
+
+ switch (dev->poll_mod_curr) {
+ case PN533_POLL_MOD_106KBPS_A:
+ rc = pn533_target_found_type_a(&nfc_tgt, resp->target_data,
+ target_data_len);
+ break;
+ case PN533_POLL_MOD_212KBPS_FELICA:
+ case PN533_POLL_MOD_424KBPS_FELICA:
+ rc = pn533_target_found_felica(&nfc_tgt, resp->target_data,
+ target_data_len);
+ break;
+ case PN533_POLL_MOD_106KBPS_JEWEL:
+ rc = pn533_target_found_jewel(&nfc_tgt, resp->target_data,
+ target_data_len);
+ break;
+ case PN533_POLL_MOD_847KBPS_B:
+ rc = pn533_target_found_type_b(&nfc_tgt, resp->target_data,
+ target_data_len);
+ break;
+ default:
+ nfc_dev_err(&dev->interface->dev, "Unknown current poll"
+ " modulation");
+ return -EPROTO;
+ }
+
+ if (rc)
+ return rc;
+
+ if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
+ nfc_dev_dbg(&dev->interface->dev, "The target found does not"
+ " have the desired protocol");
+ return -EAGAIN;
+ }
+
+ nfc_dev_dbg(&dev->interface->dev, "Target found - supported protocols: "
+ "0x%x", nfc_tgt.supported_protocols);
+
+ dev->tgt_available_prots = nfc_tgt.supported_protocols;
+
+ nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1);
+
+ return 0;
+}
+
+static void pn533_poll_reset_mod_list(struct pn533 *dev)
+{
+ dev->poll_mod_count = 0;
+}
+
+static void pn533_poll_add_mod(struct pn533 *dev, u8 mod_index)
+{
+ dev->poll_mod_active[dev->poll_mod_count] =
+ (struct pn533_poll_modulations *) &poll_mod[mod_index];
+ dev->poll_mod_count++;
+}
+
+static void pn533_poll_create_mod_list(struct pn533 *dev, u32 protocols)
+{
+ pn533_poll_reset_mod_list(dev);
+
+ if (protocols & NFC_PROTO_MIFARE_MASK
+ || protocols & NFC_PROTO_ISO14443_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK)
+ pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A);
+
+ if (protocols & NFC_PROTO_FELICA_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK) {
+ pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA);
+ pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA);
+ }
+
+ if (protocols & NFC_PROTO_JEWEL_MASK)
+ pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_JEWEL);
+
+ if (protocols & NFC_PROTO_ISO14443_MASK)
+ pn533_poll_add_mod(dev, PN533_POLL_MOD_847KBPS_B);
+}
+
+static void pn533_start_poll_frame(struct pn533_frame *frame,
+ struct pn533_poll_modulations *mod)
+{
+
+ pn533_tx_frame_init(frame, PN533_CMD_IN_LIST_PASSIVE_TARGET);
+
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), &mod->data, mod->len);
+ frame->datalen += mod->len;
+
+ pn533_tx_frame_finish(frame);
+}
+
+static int pn533_start_poll_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
+{
+ struct pn533_poll_response *resp;
+ struct pn533_poll_modulations *next_mod;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (params_len == -ENOENT) {
+ nfc_dev_dbg(&dev->interface->dev, "Polling operation has been"
+ " stopped");
+ goto stop_poll;
+ }
+
+ if (params_len < 0) {
+ nfc_dev_err(&dev->interface->dev, "Error %d when running poll",
+ params_len);
+ goto stop_poll;
+ }
+
+ resp = (struct pn533_poll_response *) params;
+ if (resp->nbtg) {
+ rc = pn533_target_found(dev, resp, params_len);
+
+ /* We must stop the poll after a valid target found */
+ if (rc == 0)
+ goto stop_poll;
+
+ if (rc != -EAGAIN)
+ nfc_dev_err(&dev->interface->dev, "The target found is"
+ " not valid - continuing to poll");
+ }
+
+ dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count;
+
+ next_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+ nfc_dev_dbg(&dev->interface->dev, "Polling next modulation (0x%x)",
+ dev->poll_mod_curr);
+
+ pn533_start_poll_frame(dev->out_frame, next_mod);
+
+ /* Don't need to down the semaphore again */
+ rc = __pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen, pn533_start_poll_complete,
+ NULL, GFP_ATOMIC);
+
+ if (rc == -EPERM) {
+ nfc_dev_dbg(&dev->interface->dev, "Cannot poll next modulation"
+ " because poll has been stopped");
+ goto stop_poll;
+ }
+
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev, "Error %d when trying to poll"
+ " next modulation", rc);
+ goto stop_poll;
+ }
+
+ /* Inform caller function to do not up the semaphore */
+ return -EINPROGRESS;
+
+stop_poll:
+ pn533_poll_reset_mod_list(dev);
+ dev->poll_protocols = 0;
+ return 0;
+}
+
+static int pn533_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct pn533_poll_modulations *start_mod;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s - protocols=0x%x", __func__,
+ protocols);
+
+ if (dev->poll_mod_count) {
+ nfc_dev_err(&dev->interface->dev, "Polling operation already"
+ " active");
+ return -EBUSY;
+ }
+
+ if (dev->tgt_active_prot) {
+ nfc_dev_err(&dev->interface->dev, "Cannot poll with a target"
+ " already activated");
+ return -EBUSY;
+ }
+
+ pn533_poll_create_mod_list(dev, protocols);
+
+ if (!dev->poll_mod_count) {
+ nfc_dev_err(&dev->interface->dev, "No valid protocols"
+ " specified");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ nfc_dev_dbg(&dev->interface->dev, "It will poll %d modulations types",
+ dev->poll_mod_count);
+
+ dev->poll_mod_curr = 0;
+ start_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+ pn533_start_poll_frame(dev->out_frame, start_mod);
+
+ rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen, pn533_start_poll_complete,
+ NULL, GFP_KERNEL);
+
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev, "Error %d when trying to"
+ " start poll", rc);
+ goto error;
+ }
+
+ dev->poll_protocols = protocols;
+
+ return 0;
+
+error:
+ pn533_poll_reset_mod_list(dev);
+ return rc;
+}
+
+static void pn533_stop_poll(struct nfc_dev *nfc_dev)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (!dev->poll_mod_count) {
+ nfc_dev_dbg(&dev->interface->dev, "Polling operation was not"
+ " running");
+ return;
+ }
+
+ /* An ack will cancel the last issued command (poll) */
+ pn533_send_ack(dev, GFP_KERNEL);
+
+ /* prevent pn533_start_poll_complete to issue a new poll meanwhile */
+ usb_kill_urb(dev->in_urb);
+}
+
+static int pn533_activate_target_nfcdep(struct pn533 *dev)
+{
+ struct pn533_cmd_activate_param param;
+ struct pn533_cmd_activate_response *resp;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_ATR);
+
+ param.tg = 1;
+ param.next = 0;
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), &param,
+ sizeof(struct pn533_cmd_activate_param));
+ dev->out_frame->datalen += sizeof(struct pn533_cmd_activate_param);
+
+ pn533_tx_frame_finish(dev->out_frame);
+
+ rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen);
+ if (rc)
+ return rc;
+
+ resp = (struct pn533_cmd_activate_response *)
+ PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame);
+ rc = resp->status & PN533_CMD_RET_MASK;
+ if (rc != PN533_CMD_RET_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
+ u32 protocol)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s - protocol=%u", __func__,
+ protocol);
+
+ if (dev->poll_mod_count) {
+ nfc_dev_err(&dev->interface->dev, "Cannot activate while"
+ " polling");
+ return -EBUSY;
+ }
+
+ if (dev->tgt_active_prot) {
+ nfc_dev_err(&dev->interface->dev, "There is already an active"
+ " target");
+ return -EBUSY;
+ }
+
+ if (!dev->tgt_available_prots) {
+ nfc_dev_err(&dev->interface->dev, "There is no available target"
+ " to activate");
+ return -EINVAL;
+ }
+
+ if (!(dev->tgt_available_prots & (1 << protocol))) {
+ nfc_dev_err(&dev->interface->dev, "The target does not support"
+ " the requested protocol %u", protocol);
+ return -EINVAL;
+ }
+
+ if (protocol == NFC_PROTO_NFC_DEP) {
+ rc = pn533_activate_target_nfcdep(dev);
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev, "Error %d when"
+ " activating target with"
+ " NFC_DEP protocol", rc);
+ return rc;
+ }
+ }
+
+ dev->tgt_active_prot = protocol;
+ dev->tgt_available_prots = 0;
+
+ return 0;
+}
+
+static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ u8 tg;
+ u8 status;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (!dev->tgt_active_prot) {
+ nfc_dev_err(&dev->interface->dev, "There is no active target");
+ return;
+ }
+
+ dev->tgt_active_prot = 0;
+
+ pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_RELEASE);
+
+ tg = 1;
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), &tg, sizeof(u8));
+ dev->out_frame->datalen += sizeof(u8);
+
+ pn533_tx_frame_finish(dev->out_frame);
+
+ rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen);
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev, "Error when sending release"
+ " command to the controller");
+ return;
+ }
+
+ status = PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame)[0];
+ rc = status & PN533_CMD_RET_MASK;
+ if (rc != PN533_CMD_RET_SUCCESS)
+ nfc_dev_err(&dev->interface->dev, "Error 0x%x when releasing"
+ " the target", rc);
+
+ return;
+}
+
+#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
+#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
+
+static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb)
+{
+ int payload_len = skb->len;
+ struct pn533_frame *out_frame;
+ struct sk_buff *discarded;
+ u8 tg;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s - Sending %d bytes", __func__,
+ payload_len);
+
+ if (payload_len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
+ /* TODO: Implement support to multi-part data exchange */
+ nfc_dev_err(&dev->interface->dev, "Data length greater than the"
+ " max allowed: %d",
+ PN533_CMD_DATAEXCH_DATA_MAXLEN);
+ return -ENOSYS;
+ }
+
+ /* Reserving header space */
+ if (skb_cow_head(skb, PN533_CMD_DATAEXCH_HEAD_LEN)) {
+ nfc_dev_err(&dev->interface->dev, "Error to add header data");
+ return -ENOMEM;
+ }
+
+ /* Reserving tail space, see pn533_tx_frame_finish */
+ if (skb_cow_data(skb, PN533_FRAME_TAIL_SIZE, &discarded) < 0) {
+ nfc_dev_err(&dev->interface->dev, "Error to add tail data");
+ return -ENOMEM;
+ }
+
+ skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN);
+ out_frame = (struct pn533_frame *) skb->data;
+
+ pn533_tx_frame_init(out_frame, PN533_CMD_IN_DATA_EXCHANGE);
+
+ tg = 1;
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(out_frame), &tg, sizeof(u8));
+ out_frame->datalen += sizeof(u8);
+
+ /* The data is already in the out_frame, just update the datalen */
+ out_frame->datalen += payload_len;
+
+ pn533_tx_frame_finish(out_frame);
+ skb_put(skb, PN533_FRAME_TAIL_SIZE);
+
+ return 0;
+}
+
+struct pn533_data_exchange_arg {
+ struct sk_buff *skb_resp;
+ struct sk_buff *skb_out;
+ data_exchange_cb_t cb;
+ void *cb_context;
+};
+
+static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
+ u8 *params, int params_len)
+{
+ struct pn533_data_exchange_arg *arg = _arg;
+ struct sk_buff *skb_resp = arg->skb_resp;
+ struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data;
+ int err = 0;
+ u8 status;
+ u8 cmd_ret;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ dev_kfree_skb_irq(arg->skb_out);
+
+ if (params_len < 0) { /* error */
+ err = params_len;
+ goto error;
+ }
+
+ skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
+
+ status = params[0];
+
+ cmd_ret = status & PN533_CMD_RET_MASK;
+ if (cmd_ret != PN533_CMD_RET_SUCCESS) {
+ nfc_dev_err(&dev->interface->dev, "PN533 reported error %d when"
+ " exchanging data", cmd_ret);
+ err = -EIO;
+ goto error;
+ }
+
+ if (status & PN533_CMD_MI_MASK) {
+ /* TODO: Implement support to multi-part data exchange */
+ nfc_dev_err(&dev->interface->dev, "Multi-part message not yet"
+ " supported");
+ /* Prevent the other messages from controller */
+ pn533_send_ack(dev, GFP_ATOMIC);
+ err = -ENOSYS;
+ goto error;
+ }
+
+ skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN);
+ skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE);
+
+ arg->cb(arg->cb_context, skb_resp, 0);
+ kfree(arg);
+ return 0;
+
+error:
+ dev_kfree_skb_irq(skb_resp);
+ arg->cb(arg->cb_context, NULL, err);
+ kfree(arg);
+ return 0;
+}
+
+int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
+ struct sk_buff *skb,
+ data_exchange_cb_t cb,
+ void *cb_context)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct pn533_frame *out_frame, *in_frame;
+ struct pn533_data_exchange_arg *arg;
+ struct sk_buff *skb_resp;
+ int skb_resp_len;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (!dev->tgt_active_prot) {
+ nfc_dev_err(&dev->interface->dev, "Cannot exchange data if"
+ " there is no active target");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = pn533_data_exchange_tx_frame(dev, skb);
+ if (rc)
+ goto error;
+
+ skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN +
+ PN533_CMD_DATAEXCH_DATA_MAXLEN +
+ PN533_FRAME_TAIL_SIZE;
+
+ skb_resp = nfc_alloc_skb(skb_resp_len, GFP_KERNEL);
+ if (!skb_resp) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ in_frame = (struct pn533_frame *) skb_resp->data;
+ out_frame = (struct pn533_frame *) skb->data;
+
+ arg = kmalloc(sizeof(struct pn533_data_exchange_arg), GFP_KERNEL);
+ if (!arg) {
+ rc = -ENOMEM;
+ goto free_skb_resp;
+ }
+
+ arg->skb_resp = skb_resp;
+ arg->skb_out = skb;
+ arg->cb = cb;
+ arg->cb_context = cb_context;
+
+ rc = pn533_send_cmd_frame_async(dev, out_frame, in_frame, skb_resp_len,
+ pn533_data_exchange_complete, arg,
+ GFP_KERNEL);
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev, "Error %d when trying to"
+ " perform data_exchange", rc);
+ goto free_arg;
+ }
+
+ return 0;
+
+free_arg:
+ kfree(arg);
+free_skb_resp:
+ kfree_skb(skb_resp);
+error:
+ kfree_skb(skb);
+ return rc;
+}
+
+static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
+ u8 cfgdata_len)
+{
+ int rc;
+ u8 *params;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ pn533_tx_frame_init(dev->out_frame, PN533_CMD_RF_CONFIGURATION);
+
+ params = PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame);
+ params[0] = cfgitem;
+ memcpy(&params[1], cfgdata, cfgdata_len);
+ dev->out_frame->datalen += (1 + cfgdata_len);
+
+ pn533_tx_frame_finish(dev->out_frame);
+
+ rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen);
+
+ return rc;
+}
+
+struct nfc_ops pn533_nfc_ops = {
+ .start_poll = pn533_start_poll,
+ .stop_poll = pn533_stop_poll,
+ .activate_target = pn533_activate_target,
+ .deactivate_target = pn533_deactivate_target,
+ .data_exchange = pn533_data_exchange,
+};
+
+static int pn533_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct pn533_fw_version *fw_ver;
+ struct pn533 *dev;
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ struct pn533_config_max_retries max_retries;
+ int in_endpoint = 0;
+ int out_endpoint = 0;
+ int rc = -ENOMEM;
+ int i;
+ u32 protocols;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->udev = usb_get_dev(interface_to_usbdev(interface));
+ dev->interface = interface;
+ sema_init(&dev->cmd_lock, 1);
+
+ iface_desc = interface->cur_altsetting;
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint)) {
+ dev->in_maxlen = le16_to_cpu(endpoint->wMaxPacketSize);
+ in_endpoint = endpoint->bEndpointAddress;
+ }
+
+ if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint)) {
+ dev->out_maxlen =
+ le16_to_cpu(endpoint->wMaxPacketSize);
+ out_endpoint = endpoint->bEndpointAddress;
+ }
+ }
+
+ if (!in_endpoint || !out_endpoint) {
+ nfc_dev_err(&interface->dev, "Could not find bulk-in or"
+ " bulk-out endpoint");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ dev->in_frame = kmalloc(dev->in_maxlen, GFP_KERNEL);
+ dev->in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->out_frame = kmalloc(dev->out_maxlen, GFP_KERNEL);
+ dev->out_urb = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (!dev->in_frame || !dev->out_frame ||
+ !dev->in_urb || !dev->out_urb)
+ goto error;
+
+ usb_fill_bulk_urb(dev->in_urb, dev->udev,
+ usb_rcvbulkpipe(dev->udev, in_endpoint),
+ NULL, 0, NULL, dev);
+ usb_fill_bulk_urb(dev->out_urb, dev->udev,
+ usb_sndbulkpipe(dev->udev, out_endpoint),
+ NULL, 0,
+ pn533_send_complete, dev);
+
+ tasklet_init(&dev->tasklet, pn533_tasklet_cmd_complete, (ulong)dev);
+
+ usb_set_intfdata(interface, dev);
+
+ pn533_tx_frame_init(dev->out_frame, PN533_CMD_GET_FIRMWARE_VERSION);
+ pn533_tx_frame_finish(dev->out_frame);
+
+ rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen);
+ if (rc)
+ goto kill_tasklet;
+
+ fw_ver = (struct pn533_fw_version *)
+ PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame);
+ nfc_dev_info(&dev->interface->dev, "NXP PN533 firmware ver %d.%d now"
+ " attached", fw_ver->ver, fw_ver->rev);
+
+ protocols = NFC_PROTO_JEWEL_MASK
+ | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
+ | NFC_PROTO_ISO14443_MASK
+ | NFC_PROTO_NFC_DEP_MASK;
+
+ dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols);
+ if (!dev->nfc_dev)
+ goto kill_tasklet;
+
+ nfc_set_parent_dev(dev->nfc_dev, &interface->dev);
+ nfc_set_drvdata(dev->nfc_dev, dev);
+
+ rc = nfc_register_device(dev->nfc_dev);
+ if (rc)
+ goto free_nfc_dev;
+
+ max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS;
+ max_retries.mx_rty_psl = 2;
+ max_retries.mx_rty_passive_act = PN533_CONFIG_MAX_RETRIES_NO_RETRY;
+
+ rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
+ (u8 *) &max_retries, sizeof(max_retries));
+
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev, "Error on setting MAX_RETRIES"
+ " config");
+ goto free_nfc_dev;
+ }
+
+ return 0;
+
+free_nfc_dev:
+ nfc_free_device(dev->nfc_dev);
+kill_tasklet:
+ tasklet_kill(&dev->tasklet);
+error:
+ kfree(dev->in_frame);
+ usb_free_urb(dev->in_urb);
+ kfree(dev->out_frame);
+ usb_free_urb(dev->out_urb);
+ kfree(dev);
+ return rc;
+}
+
+static void pn533_disconnect(struct usb_interface *interface)
+{
+ struct pn533 *dev;
+
+ dev = usb_get_intfdata(interface);
+ usb_set_intfdata(interface, NULL);
+
+ nfc_unregister_device(dev->nfc_dev);
+ nfc_free_device(dev->nfc_dev);
+
+ usb_kill_urb(dev->in_urb);
+ usb_kill_urb(dev->out_urb);
+
+ tasklet_kill(&dev->tasklet);
+
+ kfree(dev->in_frame);
+ usb_free_urb(dev->in_urb);
+ kfree(dev->out_frame);
+ usb_free_urb(dev->out_urb);
+ kfree(dev);
+
+ nfc_dev_info(&interface->dev, "NXP PN533 NFC device disconnected");
+}
+
+static struct usb_driver pn533_driver = {
+ .name = "pn533",
+ .probe = pn533_probe,
+ .disconnect = pn533_disconnect,
+ .id_table = pn533_table,
+};
+
+static int __init pn533_init(void)
+{
+ int rc;
+
+ rc = usb_register(&pn533_driver);
+ if (rc)
+ err("usb_register failed. Error number %d", rc);
+
+ return rc;
+}
+
+static void __exit pn533_exit(void)
+{
+ usb_deregister(&pn533_driver);
+}
+
+module_init(pn533_init);
+module_exit(pn533_exit);
+
+MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>,"
+ " Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
+MODULE_DESCRIPTION("PN533 usb driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index d06a6374ed6..cac63c9f49a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -71,8 +71,14 @@ config OF_MDIO
config OF_PCI
def_tristate PCI
- depends on PCI && (PPC || MICROBLAZE || X86)
+ depends on PCI
help
OpenFirmware PCI bus accessors
+config OF_PCI_IRQ
+ def_tristate PCI
+ depends on OF_PCI && OF_IRQ
+ help
+ OpenFirmware PCI IRQ routing helpers
+
endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index f7861ed2f28..dccb1176be5 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_SPI) += of_spi.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
+obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index b4559c58c09..72c33fbe451 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -577,6 +577,24 @@ int of_address_to_resource(struct device_node *dev, int index,
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
+struct device_node *of_find_matching_node_by_address(struct device_node *from,
+ const struct of_device_id *matches,
+ u64 base_address)
+{
+ struct device_node *dn = of_find_matching_node(from, matches);
+ struct resource res;
+
+ while (dn) {
+ if (of_address_to_resource(dn, 0, &res))
+ continue;
+ if (res.start == base_address)
+ return dn;
+ dn = of_find_matching_node(dn, matches);
+ }
+
+ return NULL;
+}
+
/**
* of_iomap - Maps the memory mapped IO for a given device_node
@@ -592,6 +610,6 @@ void __iomem *of_iomap(struct device_node *np, int index)
if (of_address_to_resource(np, index, &res))
return NULL;
- return ioremap(res.start, 1 + res.end - res.start);
+ return ioremap(res.start, resource_size(&res));
}
EXPORT_SYMBOL(of_iomap);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 632ebae7f17..3ff22e32b60 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -596,6 +596,72 @@ struct device_node *of_find_node_by_phandle(phandle handle)
EXPORT_SYMBOL(of_find_node_by_phandle);
/**
+ * of_property_read_u32_array - Find and read an array of 32 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_value: pointer to return value, modified only if return value is 0.
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u32 value can be decoded.
+ */
+int of_property_read_u32_array(const struct device_node *np,
+ const char *propname, u32 *out_values,
+ size_t sz)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+ const __be32 *val;
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+ if ((sz * sizeof(*out_values)) > prop->length)
+ return -EOVERFLOW;
+
+ val = prop->value;
+ while (sz--)
+ *out_values++ = be32_to_cpup(val++);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u32_array);
+
+/**
+ * of_property_read_string - Find and read a string from a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_string: pointer to null terminated return string, modified only if
+ * return value is 0.
+ *
+ * Search for a property in a device tree node and retrieve a null
+ * terminated string value (pointer to data, not a copy). Returns 0 on
+ * success, -EINVAL if the property does not exist, -ENODATA if property
+ * does not have a value, and -EILSEQ if the string is not null-terminated
+ * within the length of the property data.
+ *
+ * The out_string pointer is modified only if a valid string can be decoded.
+ */
+int of_property_read_string(struct device_node *np, const char *propname,
+ const char **out_string)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+ if (strnlen(prop->value, prop->length) >= prop->length)
+ return -EILSEQ;
+ *out_string = prop->value;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_string);
+
+/**
* of_parse_phandle - Resolve a phandle property to a device_node pointer
* @np: Pointer to device node holding phandle property
* @phandle_name: Name of property holding a phandle value
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index 905960338fb..ef0105fa52b 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -21,8 +21,9 @@
#include <linux/slab.h>
/**
- * of_get_gpio_flags - Get a GPIO number and flags to use with GPIO API
+ * of_get_named_gpio_flags() - Get a GPIO number and flags to use with GPIO API
* @np: device node to get GPIO from
+ * @propname: property name containing gpio specifier(s)
* @index: index of the GPIO
* @flags: a flags pointer to fill in
*
@@ -30,8 +31,8 @@
* value on the error condition. If @flags is not NULL the function also fills
* in flags for the GPIO.
*/
-int of_get_gpio_flags(struct device_node *np, int index,
- enum of_gpio_flags *flags)
+int of_get_named_gpio_flags(struct device_node *np, const char *propname,
+ int index, enum of_gpio_flags *flags)
{
int ret;
struct device_node *gpio_np;
@@ -40,7 +41,7 @@ int of_get_gpio_flags(struct device_node *np, int index,
const void *gpio_spec;
const __be32 *gpio_cells;
- ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index,
+ ret = of_parse_phandles_with_args(np, propname, "#gpio-cells", index,
&gpio_np, &gpio_spec);
if (ret) {
pr_debug("%s: can't parse gpios property\n", __func__);
@@ -79,7 +80,7 @@ err0:
pr_debug("%s exited with status %d\n", __func__, ret);
return ret;
}
-EXPORT_SYMBOL(of_get_gpio_flags);
+EXPORT_SYMBOL(of_get_named_gpio_flags);
/**
* of_gpio_count - Count GPIOs for a device
@@ -126,8 +127,8 @@ EXPORT_SYMBOL(of_gpio_count);
* gpio chips. This function performs only one sanity check: whether gpio
* is less than ngpios (that is specified in the gpio_chip).
*/
-static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
- const void *gpio_spec, u32 *flags)
+int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
+ const void *gpio_spec, u32 *flags)
{
const __be32 *gpio = gpio_spec;
const u32 n = be32_to_cpup(gpio);
@@ -151,6 +152,7 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
return n;
}
+EXPORT_SYMBOL(of_gpio_simple_xlate);
/**
* of_mm_gpiochip_add - Add memory mapped GPIO chip (bank)
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index 86f334a2769..bb184717588 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -8,6 +8,51 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/of_net.h>
+#include <linux/phy.h>
+
+/**
+ * It maps 'enum phy_interface_t' found in include/linux/phy.h
+ * into the device tree binding of 'phy-mode', so that Ethernet
+ * device driver can get phy interface from device tree.
+ */
+static const char *phy_modes[] = {
+ [PHY_INTERFACE_MODE_NA] = "",
+ [PHY_INTERFACE_MODE_MII] = "mii",
+ [PHY_INTERFACE_MODE_GMII] = "gmii",
+ [PHY_INTERFACE_MODE_SGMII] = "sgmii",
+ [PHY_INTERFACE_MODE_TBI] = "tbi",
+ [PHY_INTERFACE_MODE_RMII] = "rmii",
+ [PHY_INTERFACE_MODE_RGMII] = "rgmii",
+ [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
+ [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
+ [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
+ [PHY_INTERFACE_MODE_RTBI] = "rtbi",
+ [PHY_INTERFACE_MODE_SMII] = "smii",
+};
+
+/**
+ * of_get_phy_mode - Get phy mode for given device_node
+ * @np: Pointer to the given device_node
+ *
+ * The function gets phy interface string from property 'phy-mode',
+ * and return its index in phy_modes table, or errno in error case.
+ */
+const int of_get_phy_mode(struct device_node *np)
+{
+ const char *pm;
+ int err, i;
+
+ err = of_property_read_string(np, "phy-mode", &pm);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
+ if (!strcasecmp(pm, phy_modes[i]))
+ return i;
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(of_get_phy_mode);
/**
* Search the device tree for the best MAC address to use. 'mac-address' is
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index ac1ec54e4fd..3701b62c1d5 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -1,92 +1,41 @@
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
-#include <linux/of_irq.h>
#include <asm/prom.h>
-/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
- * @pdev: the device whose interrupt is to be resolved
- * @out_irq: structure of_irq filled by this function
- *
- * This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
- * walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
- * resolving using the OF tree walking.
- */
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+static inline int __of_pci_pci_compare(struct device_node *node,
+ unsigned int devfn)
{
- struct device_node *dn, *ppnode;
- struct pci_dev *ppdev;
- u32 lspec;
- __be32 lspec_be;
- __be32 laddr[3];
- u8 pin;
- int rc;
+ unsigned int size;
+ const __be32 *reg = of_get_property(node, "reg", &size);
- /* Check if we have a device node, if yes, fallback to standard
- * device tree parsing
- */
- dn = pci_device_to_OF_node(pdev);
- if (dn) {
- rc = of_irq_map_one(dn, 0, out_irq);
- if (!rc)
- return rc;
- }
-
- /* Ok, we don't, time to have fun. Let's start by building up an
- * interrupt spec. we assume #interrupt-cells is 1, which is standard
- * for PCI. If you do different, then don't use that routine.
- */
- rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
- if (rc != 0)
- return rc;
- /* No pin, exit */
- if (pin == 0)
- return -ENODEV;
-
- /* Now we walk up the PCI tree */
- lspec = pin;
- for (;;) {
- /* Get the pci_dev of our parent */
- ppdev = pdev->bus->self;
-
- /* Ouch, it's a host bridge... */
- if (ppdev == NULL) {
- ppnode = pci_bus_to_OF_node(pdev->bus);
-
- /* No node for host bridge ? give up */
- if (ppnode == NULL)
- return -EINVAL;
- } else {
- /* We found a P2P bridge, check if it has a node */
- ppnode = pci_device_to_OF_node(ppdev);
- }
-
- /* Ok, we have found a parent with a device-node, hand over to
- * the OF parsing code.
- * We build a unit address from the linux device to be used for
- * resolution. Note that we use the linux bus number which may
- * not match your firmware bus numbering.
- * Fortunately, in most cases, interrupt-map-mask doesn't
- * include the bus number as part of the matching.
- * You should still be careful about that though if you intend
- * to rely on this function (you ship a firmware that doesn't
- * create device nodes for all PCI devices).
- */
- if (ppnode)
- break;
+ if (!reg || size < 5 * sizeof(__be32))
+ return 0;
+ return ((be32_to_cpup(&reg[0]) >> 8) & 0xff) == devfn;
+}
- /* We can only get here if we hit a P2P bridge with no node,
- * let's do standard swizzling and try again
+struct device_node *of_pci_find_child_device(struct device_node *parent,
+ unsigned int devfn)
+{
+ struct device_node *node, *node2;
+
+ for_each_child_of_node(parent, node) {
+ if (__of_pci_pci_compare(node, devfn))
+ return node;
+ /*
+ * Some OFs create a parent node "multifunc-device" as
+ * a fake root for all functions of a multi-function
+ * device we go down them as well.
*/
- lspec = pci_swizzle_interrupt_pin(pdev, lspec);
- pdev = ppdev;
+ if (!strcmp(node->name, "multifunc-device")) {
+ for_each_child_of_node(node, node2) {
+ if (__of_pci_pci_compare(node2, devfn)) {
+ of_node_put(node);
+ return node2;
+ }
+ }
+ }
}
-
- lspec_be = cpu_to_be32(lspec);
- laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
- laddr[1] = laddr[2] = cpu_to_be32(0);
- return of_irq_map_raw(ppnode, &lspec_be, 1, laddr, out_irq);
+ return NULL;
}
-EXPORT_SYMBOL_GPL(of_irq_map_pci);
+EXPORT_SYMBOL_GPL(of_pci_find_child_device);
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
new file mode 100644
index 00000000000..ac1ec54e4fd
--- /dev/null
+++ b/drivers/of/of_pci_irq.c
@@ -0,0 +1,92 @@
+#include <linux/kernel.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <asm/prom.h>
+
+/**
+ * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * @pdev: the device whose interrupt is to be resolved
+ * @out_irq: structure of_irq filled by this function
+ *
+ * This function resolves the PCI interrupt for a given PCI device. If a
+ * device-node exists for a given pci_dev, it will use normal OF tree
+ * walking. If not, it will implement standard swizzling and walk up the
+ * PCI tree until an device-node is found, at which point it will finish
+ * resolving using the OF tree walking.
+ */
+int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+{
+ struct device_node *dn, *ppnode;
+ struct pci_dev *ppdev;
+ u32 lspec;
+ __be32 lspec_be;
+ __be32 laddr[3];
+ u8 pin;
+ int rc;
+
+ /* Check if we have a device node, if yes, fallback to standard
+ * device tree parsing
+ */
+ dn = pci_device_to_OF_node(pdev);
+ if (dn) {
+ rc = of_irq_map_one(dn, 0, out_irq);
+ if (!rc)
+ return rc;
+ }
+
+ /* Ok, we don't, time to have fun. Let's start by building up an
+ * interrupt spec. we assume #interrupt-cells is 1, which is standard
+ * for PCI. If you do different, then don't use that routine.
+ */
+ rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+ if (rc != 0)
+ return rc;
+ /* No pin, exit */
+ if (pin == 0)
+ return -ENODEV;
+
+ /* Now we walk up the PCI tree */
+ lspec = pin;
+ for (;;) {
+ /* Get the pci_dev of our parent */
+ ppdev = pdev->bus->self;
+
+ /* Ouch, it's a host bridge... */
+ if (ppdev == NULL) {
+ ppnode = pci_bus_to_OF_node(pdev->bus);
+
+ /* No node for host bridge ? give up */
+ if (ppnode == NULL)
+ return -EINVAL;
+ } else {
+ /* We found a P2P bridge, check if it has a node */
+ ppnode = pci_device_to_OF_node(ppdev);
+ }
+
+ /* Ok, we have found a parent with a device-node, hand over to
+ * the OF parsing code.
+ * We build a unit address from the linux device to be used for
+ * resolution. Note that we use the linux bus number which may
+ * not match your firmware bus numbering.
+ * Fortunately, in most cases, interrupt-map-mask doesn't
+ * include the bus number as part of the matching.
+ * You should still be careful about that though if you intend
+ * to rely on this function (you ship a firmware that doesn't
+ * create device nodes for all PCI devices).
+ */
+ if (ppnode)
+ break;
+
+ /* We can only get here if we hit a P2P bridge with no node,
+ * let's do standard swizzling and try again
+ */
+ lspec = pci_swizzle_interrupt_pin(pdev, lspec);
+ pdev = ppdev;
+ }
+
+ lspec_be = cpu_to_be32(lspec);
+ laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
+ laddr[1] = laddr[2] = cpu_to_be32(0);
+ return of_irq_map_raw(ppnode, &lspec_be, 1, laddr, out_irq);
+}
+EXPORT_SYMBOL_GPL(of_irq_map_pci);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 63d3cb73bdb..ed5a6d3c26a 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -13,6 +13,7 @@
*/
#include <linux/errno.h>
#include <linux/module.h>
+#include <linux/amba/bus.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -22,6 +23,14 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+const struct of_device_id of_default_bus_match_table[] = {
+ { .compatible = "simple-bus", },
+#ifdef CONFIG_ARM_AMBA
+ { .compatible = "arm,amba-bus", },
+#endif /* CONFIG_ARM_AMBA */
+ {} /* Empty terminated list */
+};
+
static int of_dev_node_match(struct device *dev, void *data)
{
return dev->of_node == data;
@@ -153,7 +162,7 @@ struct platform_device *of_device_alloc(struct device_node *np,
}
dev->dev.of_node = of_node_get(np);
-#if defined(CONFIG_PPC) || defined(CONFIG_MICROBLAZE)
+#if defined(CONFIG_MICROBLAZE)
dev->dev.dma_mask = &dev->archdata.dma_mask;
#endif
dev->dev.parent = parent;
@@ -168,17 +177,20 @@ struct platform_device *of_device_alloc(struct device_node *np,
EXPORT_SYMBOL(of_device_alloc);
/**
- * of_platform_device_create - Alloc, initialize and register an of_device
+ * of_platform_device_create_pdata - Alloc, initialize and register an of_device
* @np: pointer to node to create device for
* @bus_id: name to assign device
+ * @platform_data: pointer to populate platform_data pointer with
* @parent: Linux device model parent device.
*
* Returns pointer to created platform device, or NULL if a device was not
* registered. Unavailable devices will not get registered.
*/
-struct platform_device *of_platform_device_create(struct device_node *np,
- const char *bus_id,
- struct device *parent)
+struct platform_device *of_platform_device_create_pdata(
+ struct device_node *np,
+ const char *bus_id,
+ void *platform_data,
+ struct device *parent)
{
struct platform_device *dev;
@@ -189,11 +201,12 @@ struct platform_device *of_platform_device_create(struct device_node *np,
if (!dev)
return NULL;
-#if defined(CONFIG_PPC) || defined(CONFIG_MICROBLAZE)
+#if defined(CONFIG_MICROBLAZE)
dev->archdata.dma_mask = 0xffffffffUL;
#endif
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
dev->dev.bus = &platform_bus_type;
+ dev->dev.platform_data = platform_data;
/* We do not fill the DMA ops for platform devices by default.
* This is currently the responsibility of the platform code
@@ -207,8 +220,111 @@ struct platform_device *of_platform_device_create(struct device_node *np,
return dev;
}
+
+/**
+ * of_platform_device_create - Alloc, initialize and register an of_device
+ * @np: pointer to node to create device for
+ * @bus_id: name to assign device
+ * @parent: Linux device model parent device.
+ *
+ * Returns pointer to created platform device, or NULL if a device was not
+ * registered. Unavailable devices will not get registered.
+ */
+struct platform_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent)
+{
+ return of_platform_device_create_pdata(np, bus_id, NULL, parent);
+}
EXPORT_SYMBOL(of_platform_device_create);
+#ifdef CONFIG_ARM_AMBA
+static struct amba_device *of_amba_device_create(struct device_node *node,
+ const char *bus_id,
+ void *platform_data,
+ struct device *parent)
+{
+ struct amba_device *dev;
+ const void *prop;
+ int i, ret;
+
+ pr_debug("Creating amba device %s\n", node->full_name);
+
+ if (!of_device_is_available(node))
+ return NULL;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ /* setup generic device info */
+ dev->dev.coherent_dma_mask = ~0;
+ dev->dev.of_node = of_node_get(node);
+ dev->dev.parent = parent;
+ dev->dev.platform_data = platform_data;
+ if (bus_id)
+ dev_set_name(&dev->dev, "%s", bus_id);
+ else
+ of_device_make_bus_id(&dev->dev);
+
+ /* setup amba-specific device info */
+ dev->dma_mask = ~0;
+
+ /* Allow the HW Peripheral ID to be overridden */
+ prop = of_get_property(node, "arm,primecell-periphid", NULL);
+ if (prop)
+ dev->periphid = of_read_ulong(prop, 1);
+
+ /* Decode the IRQs and address ranges */
+ for (i = 0; i < AMBA_NR_IRQS; i++)
+ dev->irq[i] = irq_of_parse_and_map(node, i);
+
+ ret = of_address_to_resource(node, 0, &dev->res);
+ if (ret)
+ goto err_free;
+
+ ret = amba_device_register(dev, &iomem_resource);
+ if (ret)
+ goto err_free;
+
+ return dev;
+
+err_free:
+ kfree(dev);
+ return NULL;
+}
+#else /* CONFIG_ARM_AMBA */
+static struct amba_device *of_amba_device_create(struct device_node *node,
+ const char *bus_id,
+ void *platform_data,
+ struct device *parent)
+{
+ return NULL;
+}
+#endif /* CONFIG_ARM_AMBA */
+
+/**
+ * of_devname_lookup() - Given a device node, lookup the preferred Linux name
+ */
+static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *lookup,
+ struct device_node *np)
+{
+ struct resource res;
+ if (lookup) {
+ for(; lookup->name != NULL; lookup++) {
+ if (!of_device_is_compatible(np, lookup->compatible))
+ continue;
+ if (of_address_to_resource(np, 0, &res))
+ continue;
+ if (res.start != lookup->phys_addr)
+ continue;
+ pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
+ return lookup;
+ }
+ }
+ return NULL;
+}
+
/**
* of_platform_bus_create() - Create a device for a node and its children.
* @bus: device node of the bus to instantiate
@@ -221,19 +337,41 @@ EXPORT_SYMBOL(of_platform_device_create);
*/
static int of_platform_bus_create(struct device_node *bus,
const struct of_device_id *matches,
- struct device *parent)
+ const struct of_dev_auxdata *lookup,
+ struct device *parent, bool strict)
{
+ const struct of_dev_auxdata *auxdata;
struct device_node *child;
struct platform_device *dev;
+ const char *bus_id = NULL;
+ void *platform_data = NULL;
int rc = 0;
- dev = of_platform_device_create(bus, NULL, parent);
+ /* Make sure it has a compatible property */
+ if (strict && (!of_get_property(bus, "compatible", NULL))) {
+ pr_debug("%s() - skipping %s, no compatible prop\n",
+ __func__, bus->full_name);
+ return 0;
+ }
+
+ auxdata = of_dev_lookup(lookup, bus);
+ if (auxdata) {
+ bus_id = auxdata->name;
+ platform_data = auxdata->platform_data;
+ }
+
+ if (of_device_is_compatible(bus, "arm,primecell")) {
+ of_amba_device_create(bus, bus_id, platform_data, parent);
+ return 0;
+ }
+
+ dev = of_platform_device_create_pdata(bus, bus_id, platform_data, parent);
if (!dev || !of_match_node(matches, bus))
return 0;
for_each_child_of_node(bus, child) {
pr_debug(" create child: %s\n", child->full_name);
- rc = of_platform_bus_create(child, matches, &dev->dev);
+ rc = of_platform_bus_create(child, matches, lookup, &dev->dev, strict);
if (rc) {
of_node_put(child);
break;
@@ -267,11 +405,11 @@ int of_platform_bus_probe(struct device_node *root,
/* Do a self check of bus type, if there's a match, create children */
if (of_match_node(matches, root)) {
- rc = of_platform_bus_create(root, matches, parent);
+ rc = of_platform_bus_create(root, matches, NULL, parent, false);
} else for_each_child_of_node(root, child) {
if (!of_match_node(matches, child))
continue;
- rc = of_platform_bus_create(child, matches, parent);
+ rc = of_platform_bus_create(child, matches, NULL, parent, false);
if (rc)
break;
}
@@ -280,4 +418,44 @@ int of_platform_bus_probe(struct device_node *root,
return rc;
}
EXPORT_SYMBOL(of_platform_bus_probe);
+
+/**
+ * of_platform_populate() - Populate platform_devices from device tree data
+ * @root: parent of the first level to probe or NULL for the root of the tree
+ * @matches: match table, NULL to use the default
+ * @parent: parent to hook devices from, NULL for toplevel
+ *
+ * Similar to of_platform_bus_probe(), this function walks the device tree
+ * and creates devices from nodes. It differs in that it follows the modern
+ * convention of requiring all device nodes to have a 'compatible' property,
+ * and it is suitable for creating devices which are children of the root
+ * node (of_platform_bus_probe will only create children of the root which
+ * are selected by the @matches argument).
+ *
+ * New board support should be using this function instead of
+ * of_platform_bus_probe().
+ *
+ * Returns 0 on success, < 0 on failure.
+ */
+int of_platform_populate(struct device_node *root,
+ const struct of_device_id *matches,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent)
+{
+ struct device_node *child;
+ int rc = 0;
+
+ root = root ? of_node_get(root) : of_find_node_by_path("/");
+ if (!root)
+ return -EINVAL;
+
+ for_each_child_of_node(root, child) {
+ rc = of_platform_bus_create(child, matches, lookup, parent, true);
+ if (rc)
+ break;
+ }
+
+ of_node_put(root);
+ return rc;
+}
#endif /* !CONFIG_SPARC */
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index 9046f7b2ed7..94796f39bc4 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -31,7 +31,7 @@ static int num_counters;
/*
* Overflow callback for oprofile.
*/
-static void op_overflow_handler(struct perf_event *event, int unused,
+static void op_overflow_handler(struct perf_event *event,
struct perf_sample_data *data, struct pt_regs *regs)
{
int id;
@@ -79,7 +79,7 @@ static int op_create_counter(int cpu, int event)
pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
cpu, NULL,
- op_overflow_handler);
+ op_overflow_handler, NULL);
if (IS_ERR(pevent))
return PTR_ERR(pevent);
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 0b54e46c3c1..38b6fc02898 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -10,7 +10,7 @@
#ifndef OPROFILE_STATS_H
#define OPROFILE_STATS_H
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct oprofile_stat_struct {
atomic_t sample_lost_no_mm;
diff --git a/drivers/parport/parport_ax88796.c b/drivers/parport/parport_ax88796.c
index 2c5ac2bf5c5..844f6137970 100644
--- a/drivers/parport/parport_ax88796.c
+++ b/drivers/parport/parport_ax88796.c
@@ -293,7 +293,7 @@ static int parport_ax88796_probe(struct platform_device *pdev)
goto exit_mem;
}
- size = (res->end - res->start) + 1;
+ size = resource_size(res);
spacing = size / 3;
dd->io = request_mem_region(res->start, size, pdev->name);
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index f330338c2f2..d1cdb9449f8 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2864,24 +2864,6 @@ enum parport_pc_pci_cards {
lava_parallel_dual_b,
boca_ioppar,
plx_9050,
- timedia_4078a,
- timedia_4079h,
- timedia_4085h,
- timedia_4088a,
- timedia_4089a,
- timedia_4095a,
- timedia_4096a,
- timedia_4078u,
- timedia_4079a,
- timedia_4085u,
- timedia_4079r,
- timedia_4079s,
- timedia_4079d,
- timedia_4079e,
- timedia_4079f,
- timedia_9079a,
- timedia_9079b,
- timedia_9079c,
timedia_4006a,
timedia_4014,
timedia_4008a,
@@ -2940,24 +2922,6 @@ static struct parport_pc_pci {
/* lava_parallel_dual_b */ { 1, { { 0, -1 }, } },
/* boca_ioppar */ { 1, { { 0, -1 }, } },
/* plx_9050 */ { 2, { { 4, -1 }, { 5, -1 }, } },
- /* timedia_4078a */ { 1, { { 2, -1 }, } },
- /* timedia_4079h */ { 1, { { 2, 3 }, } },
- /* timedia_4085h */ { 2, { { 2, -1 }, { 4, -1 }, } },
- /* timedia_4088a */ { 2, { { 2, 3 }, { 4, 5 }, } },
- /* timedia_4089a */ { 2, { { 2, 3 }, { 4, 5 }, } },
- /* timedia_4095a */ { 2, { { 2, 3 }, { 4, 5 }, } },
- /* timedia_4096a */ { 2, { { 2, 3 }, { 4, 5 }, } },
- /* timedia_4078u */ { 1, { { 2, -1 }, } },
- /* timedia_4079a */ { 1, { { 2, 3 }, } },
- /* timedia_4085u */ { 2, { { 2, -1 }, { 4, -1 }, } },
- /* timedia_4079r */ { 1, { { 2, 3 }, } },
- /* timedia_4079s */ { 1, { { 2, 3 }, } },
- /* timedia_4079d */ { 1, { { 2, 3 }, } },
- /* timedia_4079e */ { 1, { { 2, 3 }, } },
- /* timedia_4079f */ { 1, { { 2, 3 }, } },
- /* timedia_9079a */ { 1, { { 2, 3 }, } },
- /* timedia_9079b */ { 1, { { 2, 3 }, } },
- /* timedia_9079c */ { 1, { { 2, 3 }, } },
/* timedia_4006a */ { 1, { { 0, -1 }, } },
/* timedia_4014 */ { 2, { { 0, -1 }, { 2, -1 }, } },
/* timedia_4008a */ { 1, { { 0, 1 }, } },
@@ -3019,24 +2983,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
PCI_SUBVENDOR_ID_EXSYS, PCI_SUBDEVICE_ID_EXSYS_4014, 0, 0, plx_9050 },
/* PCI_VENDOR_ID_TIMEDIA/SUNIX has many differing cards ...*/
- { 0x1409, 0x7168, 0x1409, 0x4078, 0, 0, timedia_4078a },
- { 0x1409, 0x7168, 0x1409, 0x4079, 0, 0, timedia_4079h },
- { 0x1409, 0x7168, 0x1409, 0x4085, 0, 0, timedia_4085h },
- { 0x1409, 0x7168, 0x1409, 0x4088, 0, 0, timedia_4088a },
- { 0x1409, 0x7168, 0x1409, 0x4089, 0, 0, timedia_4089a },
- { 0x1409, 0x7168, 0x1409, 0x4095, 0, 0, timedia_4095a },
- { 0x1409, 0x7168, 0x1409, 0x4096, 0, 0, timedia_4096a },
- { 0x1409, 0x7168, 0x1409, 0x5078, 0, 0, timedia_4078u },
- { 0x1409, 0x7168, 0x1409, 0x5079, 0, 0, timedia_4079a },
- { 0x1409, 0x7168, 0x1409, 0x5085, 0, 0, timedia_4085u },
- { 0x1409, 0x7168, 0x1409, 0x6079, 0, 0, timedia_4079r },
- { 0x1409, 0x7168, 0x1409, 0x7079, 0, 0, timedia_4079s },
- { 0x1409, 0x7168, 0x1409, 0x8079, 0, 0, timedia_4079d },
- { 0x1409, 0x7168, 0x1409, 0x9079, 0, 0, timedia_4079e },
- { 0x1409, 0x7168, 0x1409, 0xa079, 0, 0, timedia_4079f },
- { 0x1409, 0x7168, 0x1409, 0xb079, 0, 0, timedia_9079a },
- { 0x1409, 0x7168, 0x1409, 0xc079, 0, 0, timedia_9079b },
- { 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c },
{ 0x1409, 0x7268, 0x1409, 0x0101, 0, 0, timedia_4006a },
{ 0x1409, 0x7268, 0x1409, 0x0102, 0, 0, timedia_4014 },
{ 0x1409, 0x7268, 0x1409, 0x0103, 0, 0, timedia_4008a },
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index f01e26440f1..e9c32274df3 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -33,6 +33,9 @@ enum parport_pc_pci_cards {
netmos_9xx5_combo,
netmos_9855,
netmos_9855_2p,
+ netmos_9900,
+ netmos_9900_2p,
+ netmos_99xx_1p,
avlab_1s1p,
avlab_1s2p,
avlab_2s1p,
@@ -41,6 +44,24 @@ enum parport_pc_pci_cards {
siig_2p1s_20x,
siig_1s1p_20x,
siig_2s1p_20x,
+ timedia_4078a,
+ timedia_4079h,
+ timedia_4085h,
+ timedia_4088a,
+ timedia_4089a,
+ timedia_4095a,
+ timedia_4096a,
+ timedia_4078u,
+ timedia_4079a,
+ timedia_4085u,
+ timedia_4079r,
+ timedia_4079s,
+ timedia_4079d,
+ timedia_4079e,
+ timedia_4079f,
+ timedia_9079a,
+ timedia_9079b,
+ timedia_9079c,
};
/* each element directly indexed from enum list, above */
@@ -72,22 +93,20 @@ static int __devinit netmos_parallel_init(struct pci_dev *dev, struct parport_pc
dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return -ENODEV;
- /*
- * Netmos uses the subdevice ID to indicate the number of parallel
- * and serial ports. The form is 0x00PS, where <P> is the number of
- * parallel ports and <S> is the number of serial ports.
- */
- par->numports = (dev->subsystem_device & 0xf0) >> 4;
- if (par->numports > ARRAY_SIZE(par->addr))
- par->numports = ARRAY_SIZE(par->addr);
- /*
- * This function is currently only called for cards with up to
- * one parallel port.
- * Parallel port BAR is either before or after serial ports BARS;
- * hence, lo should be either 0 or equal to the number of serial ports.
- */
- if (par->addr[0].lo != 0)
- par->addr[0].lo = dev->subsystem_device & 0xf;
+
+ if (dev->device == PCI_DEVICE_ID_NETMOS_9912) {
+ par->numports = 1;
+ } else {
+ /*
+ * Netmos uses the subdevice ID to indicate the number of parallel
+ * and serial ports. The form is 0x00PS, where <P> is the number of
+ * parallel ports and <S> is the number of serial ports.
+ */
+ par->numports = (dev->subsystem_device & 0xf0) >> 4;
+ if (par->numports > ARRAY_SIZE(par->addr))
+ par->numports = ARRAY_SIZE(par->addr);
+ }
+
return 0;
}
@@ -97,6 +116,9 @@ static struct parport_pc_pci cards[] __devinitdata = {
/* netmos_9xx5_combo */ { 1, { { 2, -1 }, }, netmos_parallel_init },
/* netmos_9855 */ { 1, { { 0, -1 }, }, netmos_parallel_init },
/* netmos_9855_2p */ { 2, { { 0, -1 }, { 2, -1 }, } },
+ /* netmos_9900 */ {1, { { 3, 4 }, }, netmos_parallel_init },
+ /* netmos_9900_2p */ {2, { { 0, 1 }, { 3, 4 }, } },
+ /* netmos_99xx_1p */ {1, { { 0, 1 }, } },
/* avlab_1s1p */ { 1, { { 1, 2}, } },
/* avlab_1s2p */ { 2, { { 1, 2}, { 3, 4 },} },
/* avlab_2s1p */ { 1, { { 2, 3}, } },
@@ -105,6 +127,24 @@ static struct parport_pc_pci cards[] __devinitdata = {
/* siig_2p1s_20x */ { 2, { { 1, 2 }, { 3, 4 }, } },
/* siig_1s1p_20x */ { 1, { { 1, 2 }, } },
/* siig_2s1p_20x */ { 1, { { 2, 3 }, } },
+ /* timedia_4078a */ { 1, { { 2, -1 }, } },
+ /* timedia_4079h */ { 1, { { 2, 3 }, } },
+ /* timedia_4085h */ { 2, { { 2, -1 }, { 4, -1 }, } },
+ /* timedia_4088a */ { 2, { { 2, 3 }, { 4, 5 }, } },
+ /* timedia_4089a */ { 2, { { 2, 3 }, { 4, 5 }, } },
+ /* timedia_4095a */ { 2, { { 2, 3 }, { 4, 5 }, } },
+ /* timedia_4096a */ { 2, { { 2, 3 }, { 4, 5 }, } },
+ /* timedia_4078u */ { 1, { { 2, -1 }, } },
+ /* timedia_4079a */ { 1, { { 2, 3 }, } },
+ /* timedia_4085u */ { 2, { { 2, -1 }, { 4, -1 }, } },
+ /* timedia_4079r */ { 1, { { 2, 3 }, } },
+ /* timedia_4079s */ { 1, { { 2, 3 }, } },
+ /* timedia_4079d */ { 1, { { 2, 3 }, } },
+ /* timedia_4079e */ { 1, { { 2, 3 }, } },
+ /* timedia_4079f */ { 1, { { 2, 3 }, } },
+ /* timedia_9079a */ { 1, { { 2, 3 }, } },
+ /* timedia_9079b */ { 1, { { 2, 3 }, } },
+ /* timedia_9079c */ { 1, { { 2, 3 }, } },
};
static struct pci_device_id parport_serial_pci_tbl[] = {
@@ -127,6 +167,14 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
0x1000, 0x0022, 0, 0, netmos_9855_2p },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9855,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9855 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 0xA000, 0x3011, 0, 0, netmos_9900 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 0xA000, 0x3012, 0, 0, netmos_9900 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 0xA000, 0x3020, 0, 0, netmos_9900_2p },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9912,
+ 0xA000, 0x2000, 0, 0, netmos_99xx_1p },
/* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
{ PCI_VENDOR_ID_AFAVLAB, 0x2110,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1s1p },
@@ -176,6 +224,25 @@ static struct pci_device_id parport_serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_20x },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_2S1P_20x_850,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, siig_2s1p_20x },
+ /* PCI_VENDOR_ID_TIMEDIA/SUNIX has many differing cards ...*/
+ { 0x1409, 0x7168, 0x1409, 0x4078, 0, 0, timedia_4078a },
+ { 0x1409, 0x7168, 0x1409, 0x4079, 0, 0, timedia_4079h },
+ { 0x1409, 0x7168, 0x1409, 0x4085, 0, 0, timedia_4085h },
+ { 0x1409, 0x7168, 0x1409, 0x4088, 0, 0, timedia_4088a },
+ { 0x1409, 0x7168, 0x1409, 0x4089, 0, 0, timedia_4089a },
+ { 0x1409, 0x7168, 0x1409, 0x4095, 0, 0, timedia_4095a },
+ { 0x1409, 0x7168, 0x1409, 0x4096, 0, 0, timedia_4096a },
+ { 0x1409, 0x7168, 0x1409, 0x5078, 0, 0, timedia_4078u },
+ { 0x1409, 0x7168, 0x1409, 0x5079, 0, 0, timedia_4079a },
+ { 0x1409, 0x7168, 0x1409, 0x5085, 0, 0, timedia_4085u },
+ { 0x1409, 0x7168, 0x1409, 0x6079, 0, 0, timedia_4079r },
+ { 0x1409, 0x7168, 0x1409, 0x7079, 0, 0, timedia_4079s },
+ { 0x1409, 0x7168, 0x1409, 0x8079, 0, 0, timedia_4079d },
+ { 0x1409, 0x7168, 0x1409, 0x9079, 0, 0, timedia_4079e },
+ { 0x1409, 0x7168, 0x1409, 0xa079, 0, 0, timedia_4079f },
+ { 0x1409, 0x7168, 0x1409, 0xb079, 0, 0, timedia_9079a },
+ { 0x1409, 0x7168, 0x1409, 0xc079, 0, 0, timedia_9079b },
+ { 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c },
{ 0, } /* terminate list */
};
@@ -219,6 +286,24 @@ static struct pciserial_board pci_parport_serial_boards[] __devinitdata = {
.base_baud = 115200,
.uart_offset = 8,
},
+ [netmos_9900] = { /* n/t */
+ .flags = FL_BASE0 | FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [netmos_9900_2p] = { /* parallel only */ /* n/t */
+ .flags = FL_BASE0,
+ .num_ports = 0,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
+ [netmos_99xx_1p] = { /* parallel only */ /* n/t */
+ .flags = FL_BASE0,
+ .num_ports = 0,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
[avlab_1s1p] = { /* n/t */
.flags = FL_BASE0 | FL_BASE_BARS,
.num_ports = 1,
@@ -267,6 +352,114 @@ static struct pciserial_board pci_parport_serial_boards[] __devinitdata = {
.base_baud = 921600,
.uart_offset = 8,
},
+ [timedia_4078a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079h] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4085h] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4088a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4089a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4095a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4096a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4078u] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4085u] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079r] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079s] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079d] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079e] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_4079f] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_9079a] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_9079b] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
+ [timedia_9079c] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 1,
+ .base_baud = 921600,
+ .uart_offset = 8,
+ },
};
struct parport_serial_private {
@@ -285,6 +478,10 @@ static int __devinit serial_register (struct pci_dev *dev,
struct serial_private *serial;
board = &pci_parport_serial_boards[id->driver_data];
+
+ if (board->num_ports == 0)
+ return 0;
+
serial = pciserial_init_ports(dev, board);
if (IS_ERR(serial))
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 094308e41be..6fadae3ad13 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -29,11 +29,6 @@ obj-$(CONFIG_PCI_MSI) += msi.o
# Build the Hypertransport interrupt support
obj-$(CONFIG_HT_IRQ) += htirq.o
-# Build Intel IOMMU support
-obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o
-
-obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o
-
obj-$(CONFIG_PCI_IOV) += iov.o
#
@@ -71,4 +66,6 @@ obj-$(CONFIG_PCI_STUB) += pci-stub.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
+obj-$(CONFIG_OF) += of.o
+
ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 8f3faf343f7..095f29e1373 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -408,7 +408,7 @@ got_one:
}
EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
-static int is_ejectable(acpi_handle handle)
+static int pcihp_is_ejectable(acpi_handle handle)
{
acpi_status status;
acpi_handle tmp;
@@ -442,7 +442,7 @@ int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle)
return 0;
if (bridge_handle != parent_handle)
return 0;
- return is_ejectable(handle);
+ return pcihp_is_ejectable(handle);
}
EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable);
@@ -450,7 +450,7 @@ static acpi_status
check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int *found = (int *)context;
- if (is_ejectable(handle)) {
+ if (pcihp_is_ejectable(handle)) {
*found = 1;
return AE_CTRL_TERMINATE;
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a70fa89f76f..220285760b6 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -110,7 +110,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
}
-static struct acpi_dock_ops acpiphp_dock_ops = {
+static const struct acpi_dock_ops acpiphp_dock_ops = {
.handler = handle_hotplug_event_func,
};
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index d703e73fffa..3fadf2f135e 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -32,7 +32,7 @@
#include <linux/pci_hotplug.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include "cpci_hotplug.h"
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 4952c3b9379..f1ce99cceac 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -840,8 +840,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Need to read VID early b/c it's used to differentiate CPQ and INTC
* discovery
*/
- rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
- if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) {
+ vendor_id = pdev->vendor;
+ if ((vendor_id != PCI_VENDOR_ID_COMPAQ) &&
+ (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_non_compaq_or_intel);
rc = -ENODEV;
goto err_disable_device;
@@ -868,11 +869,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* TODO: This code can be made to support non-Compaq or Intel
* subsystem IDs
*/
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_disable_device;
- }
+ subsystem_vid = pdev->subsystem_vendor;
dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_non_compaq_or_intel);
@@ -887,11 +884,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_disable_device;
}
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_free_ctrl;
- }
+ subsystem_deviceid = pdev->subsystem_device;
info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 085dbb5fc16..1e9c9aacc3a 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -213,6 +213,9 @@ static int board_added(struct slot *p_slot)
goto err_exit;
}
+ /* Wait for 1 second after checking link training status */
+ msleep(1000);
+
/* Check for a power fault */
if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 50a23da5d24..96dc4734e4a 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -275,16 +275,9 @@ int pciehp_check_link_status(struct controller *ctrl)
* hot-plug capable downstream port. But old controller might
* not implement it. In this case, we wait for 1000 ms.
*/
- if (ctrl->link_active_reporting){
- /* Wait for Data Link Layer Link Active bit to be set */
+ if (ctrl->link_active_reporting)
pcie_wait_link_active(ctrl);
- /*
- * We must wait for 100 ms after the Data Link Layer
- * Link Active bit reads 1b before initiating a
- * configuration access to the hot added device.
- */
- msleep(100);
- } else
+ else
msleep(1000);
retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index 749fdf07031..753b21aaea6 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -158,47 +158,6 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
*/
}
-/* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */
-static int pci_set_payload(struct pci_dev *dev)
-{
- int pos, ppos;
- u16 pctl, psz;
- u16 dctl, dsz, dcap, dmax;
- struct pci_dev *parent;
-
- parent = dev->bus->self;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (!pos)
- return 0;
-
- /* Read Device MaxPayload capability and setting */
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl);
- pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap);
- dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
- dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD);
-
- /* Read Parent MaxPayload setting */
- ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
- if (!ppos)
- return 0;
- pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
- psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
-
- /* If parent payload > device max payload -> error
- * If parent payload > device payload -> set speed
- * If parent payload <= device payload -> do nothing
- */
- if (psz > dmax)
- return -1;
- else if (psz > dsz) {
- dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz);
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
- (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) +
- (psz << 5));
- }
- return 0;
-}
-
void pci_configure_slot(struct pci_dev *dev)
{
struct pci_dev *cdev;
@@ -210,9 +169,7 @@ void pci_configure_slot(struct pci_dev *dev)
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
return;
- ret = pci_set_payload(dev);
- if (ret)
- dev_warn(&dev->dev, "could not set device max payload\n");
+ pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 083034710fa..1d002b1c2bf 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -158,7 +158,7 @@ static void dlpar_pci_add_bus(struct device_node *dn)
/* Scan below the new bridge */
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
- of_scan_pci_bridge(dn, dev);
+ of_scan_pci_bridge(dev);
/* Map IO space for child bus, which may or may not succeed */
pcibios_map_io_space(dev->subordinate);
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c
index 071b7dc0094..efa30da1ae8 100644
--- a/drivers/pci/hotplug/shpchp_sysfs.c
+++ b/drivers/pci/hotplug/shpchp_sysfs.c
@@ -50,29 +50,26 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_MEM) &&
!(res->flags & IORESOURCE_PREFETCH)) {
- out += sprintf(out, "start = %8.8llx, "
- "length = %8.8llx\n",
- (unsigned long long)res->start,
- (unsigned long long)(res->end - res->start));
+ out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
}
}
out += sprintf(out, "Free resources: prefetchable memory\n");
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_MEM) &&
(res->flags & IORESOURCE_PREFETCH)) {
- out += sprintf(out, "start = %8.8llx, "
- "length = %8.8llx\n",
- (unsigned long long)res->start,
- (unsigned long long)(res->end - res->start));
+ out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
}
}
out += sprintf(out, "Free resources: IO\n");
pci_bus_for_each_resource(bus, res, index) {
if (res && (res->flags & IORESOURCE_IO)) {
- out += sprintf(out, "start = %8.8llx, "
- "length = %8.8llx\n",
- (unsigned long long)res->start,
- (unsigned long long)(res->end - res->start));
+ out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
}
}
out += sprintf(out, "Free resources: bus numbers\n");
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
new file mode 100644
index 00000000000..f0929934bb7
--- /dev/null
+++ b/drivers/pci/of.c
@@ -0,0 +1,61 @@
+/*
+ * PCI <-> OF mapping helpers
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include "pci.h"
+
+void pci_set_of_node(struct pci_dev *dev)
+{
+ if (!dev->bus->dev.of_node)
+ return;
+ dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
+ dev->devfn);
+}
+
+void pci_release_of_node(struct pci_dev *dev)
+{
+ of_node_put(dev->dev.of_node);
+ dev->dev.of_node = NULL;
+}
+
+void pci_set_bus_of_node(struct pci_bus *bus)
+{
+ if (bus->self == NULL)
+ bus->dev.of_node = pcibios_get_phb_of_node(bus);
+ else
+ bus->dev.of_node = of_node_get(bus->self->dev.of_node);
+}
+
+void pci_release_bus_of_node(struct pci_bus *bus)
+{
+ of_node_put(bus->dev.of_node);
+ bus->dev.of_node = NULL;
+}
+
+struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ /* This should only be called for PHBs */
+ if (WARN_ON(bus->self || bus->parent))
+ return NULL;
+
+ /* Look for a node pointer in either the intermediary device we
+ * create above the root bus or it's own parent. Normally only
+ * the later is populated.
+ */
+ if (bus->bridge->of_node)
+ return of_node_get(bus->bridge->of_node);
+ if (bus->bridge->parent && bus->bridge->parent->of_node)
+ return of_node_get(bus->bridge->parent->of_node);
+ return NULL;
+}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 135df164a4c..12d1e81a8ab 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/cpu.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include "pci.h"
struct pci_dynid {
@@ -616,6 +617,21 @@ static int pci_pm_prepare(struct device *dev)
int error = 0;
/*
+ * If a PCI device configured to wake up the system from sleep states
+ * has been suspended at run time and there's a resume request pending
+ * for it, this is equivalent to the device signaling wakeup, so the
+ * system suspend operation should be aborted.
+ */
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
+
+ if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
+ return -EBUSY;
+ }
+
+ /*
* PCI devices suspended at run time need to be resumed at this
* point, because in general it is necessary to reconfigure them for
* system suspend. Namely, if the device is supposed to wake up the
@@ -638,6 +654,8 @@ static void pci_pm_complete(struct device *dev)
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
+
+ pm_runtime_put_sync(dev);
}
#else /* !CONFIG_PM_SLEEP */
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 77cb2a14c89..81525ae5d86 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -55,7 +55,7 @@ enum smbios_attr_enum {
SMBIOS_ATTR_INSTANCE_SHOW,
};
-static mode_t
+static size_t
find_smbios_instance_string(struct pci_dev *pdev, char *buf,
enum smbios_attr_enum attribute)
{
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 5f10c23dff9..0ce67423a0a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -77,6 +77,8 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
+
/*
* The default CLS is used if arch didn't set CLS explicitly and not
* all pci devices agree on the same value. Arch can override either
@@ -1905,7 +1907,7 @@ void pci_enable_ari(struct pci_dev *dev)
{
int pos;
u32 cap;
- u16 ctrl;
+ u16 flags, ctrl;
struct pci_dev *bridge;
if (!pci_is_pcie(dev) || dev->devfn)
@@ -1923,6 +1925,11 @@ void pci_enable_ari(struct pci_dev *dev)
if (!pos)
return;
+ /* ARI is a PCIe v2 feature */
+ pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
+ if ((flags & PCI_EXP_FLAGS_VERS) < 2)
+ return;
+
pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
if (!(cap & PCI_EXP_DEVCAP2_ARI))
return;
@@ -3186,7 +3193,7 @@ EXPORT_SYMBOL(pcie_get_readrq);
* @rq: maximum memory read count in bytes
* valid values are 128, 256, 512, 1024, 2048, 4096
*
- * If possible sets maximum read byte count
+ * If possible sets maximum memory read request in bytes
*/
int pcie_set_readrq(struct pci_dev *dev, int rq)
{
@@ -3209,7 +3216,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
ctl &= ~PCI_EXP_DEVCTL_READRQ;
ctl |= v;
- err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
+ err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
}
out:
@@ -3218,6 +3225,67 @@ out:
EXPORT_SYMBOL(pcie_set_readrq);
/**
+ * pcie_get_mps - get PCI Express maximum payload size
+ * @dev: PCI device to query
+ *
+ * Returns maximum payload size in bytes
+ * or appropriate error value.
+ */
+int pcie_get_mps(struct pci_dev *dev)
+{
+ int ret, cap;
+ u16 ctl;
+
+ cap = pci_pcie_cap(dev);
+ if (!cap)
+ return -EINVAL;
+
+ ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
+ if (!ret)
+ ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+
+ return ret;
+}
+
+/**
+ * pcie_set_mps - set PCI Express maximum payload size
+ * @dev: PCI device to query
+ * @mps: maximum payload size in bytes
+ * valid values are 128, 256, 512, 1024, 2048, 4096
+ *
+ * If possible sets maximum payload size
+ */
+int pcie_set_mps(struct pci_dev *dev, int mps)
+{
+ int cap, err = -EINVAL;
+ u16 ctl, v;
+
+ if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
+ goto out;
+
+ v = ffs(mps) - 8;
+ if (v > dev->pcie_mpss)
+ goto out;
+ v <<= 5;
+
+ cap = pci_pcie_cap(dev);
+ if (!cap)
+ goto out;
+
+ err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
+ if (err)
+ goto out;
+
+ if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
+ ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ ctl |= v;
+ err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
+ }
+out:
+ return err;
+}
+
+/**
* pci_select_bars - Make BAR mask from the type of resource
* @dev: the PCI device for which BAR mask is made
* @flags: resource type mask to be selected
@@ -3284,7 +3352,7 @@ static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
* @dev: the PCI device
* @decode: true = enable decoding, false = disable decoding
* @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
- * @change_bridge_flags: traverse ancestors and change bridges
+ * @flags: traverse ancestors and change bridges
* CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
*/
int pci_set_vga_state(struct pci_dev *dev, bool decode,
@@ -3483,6 +3551,8 @@ static int __init pci_setup(char *str)
pci_no_msi();
} else if (!strcmp(str, "noaer")) {
pci_no_aer();
+ } else if (!strncmp(str, "realloc", 7)) {
+ pci_realloc();
} else if (!strcmp(str, "nodomains")) {
pci_no_domains();
} else if (!strncmp(str, "cbiosize=", 9)) {
@@ -3498,6 +3568,10 @@ static int __init pci_setup(char *str)
pci_hotplug_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "hpmemsize=", 10)) {
pci_hotplug_mem_size = memparse(str + 10, &str);
+ } else if (!strncmp(str, "pcie_bus_safe", 13)) {
+ pcie_bus_config = PCIE_BUS_SAFE;
+ } else if (!strncmp(str, "pcie_bus_perf", 13)) {
+ pcie_bus_config = PCIE_BUS_PERFORMANCE;
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 731e20265ac..b74084e9ca1 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -146,6 +146,8 @@ static inline void pci_no_msi(void) { }
static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
#endif
+extern void pci_realloc(void);
+
static inline int pci_no_d1d2(struct pci_dev *dev)
{
unsigned int parent_dstates = 0;
@@ -184,8 +186,6 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
return NULL;
}
-struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
-
/* PCI slot sysfs helper code */
#define to_pci_slot(s) container_of(s, struct pci_slot, kobj)
@@ -283,6 +283,8 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
#endif /* CONFIG_PCI_IOV */
+extern unsigned long pci_cardbus_resource_alignment(struct resource *);
+
static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
struct resource *res)
{
@@ -292,6 +294,8 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
return pci_sriov_resource_alignment(dev, resno);
#endif
+ if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
+ return pci_cardbus_resource_alignment(res);
return resource_alignment(res);
}
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 43421fbe080..9674e9f30d4 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -24,6 +24,7 @@
#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/kfifo.h>
#include "aerdrv.h"
static int forceload;
@@ -445,8 +446,7 @@ static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
return drv;
}
-static pci_ers_result_t reset_link(struct pcie_device *aerdev,
- struct pci_dev *dev)
+static pci_ers_result_t reset_link(struct pci_dev *dev)
{
struct pci_dev *udev;
pci_ers_result_t status;
@@ -486,7 +486,6 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
/**
* do_recovery - handle nonfatal/fatal error recovery process
- * @aerdev: pointer to a pcie_device data structure of root port
* @dev: pointer to a pci_dev data structure of agent detecting an error
* @severity: error severity type
*
@@ -494,8 +493,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
* error detected message to all downstream drivers within a hierarchy in
* question and return the returned code.
*/
-static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev,
- int severity)
+static void do_recovery(struct pci_dev *dev, int severity)
{
pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
enum pci_channel_state state;
@@ -511,7 +509,7 @@ static void do_recovery(struct pcie_device *aerdev, struct pci_dev *dev,
report_error_detected);
if (severity == AER_FATAL) {
- result = reset_link(aerdev, dev);
+ result = reset_link(dev);
if (result != PCI_ERS_RESULT_RECOVERED)
goto failed;
}
@@ -576,9 +574,73 @@ static void handle_error_source(struct pcie_device *aerdev,
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
} else
- do_recovery(aerdev, dev, info->severity);
+ do_recovery(dev, info->severity);
}
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+static void aer_recover_work_func(struct work_struct *work);
+
+#define AER_RECOVER_RING_ORDER 4
+#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
+
+struct aer_recover_entry
+{
+ u8 bus;
+ u8 devfn;
+ u16 domain;
+ int severity;
+};
+
+static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
+ AER_RECOVER_RING_SIZE);
+/*
+ * Mutual exclusion for writers of aer_recover_ring, reader side don't
+ * need lock, because there is only one reader and lock is not needed
+ * between reader and writer.
+ */
+static DEFINE_SPINLOCK(aer_recover_ring_lock);
+static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
+
+void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
+ int severity)
+{
+ unsigned long flags;
+ struct aer_recover_entry entry = {
+ .bus = bus,
+ .devfn = devfn,
+ .domain = domain,
+ .severity = severity,
+ };
+
+ spin_lock_irqsave(&aer_recover_ring_lock, flags);
+ if (kfifo_put(&aer_recover_ring, &entry))
+ schedule_work(&aer_recover_work);
+ else
+ pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
+ domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
+}
+EXPORT_SYMBOL_GPL(aer_recover_queue);
+
+static void aer_recover_work_func(struct work_struct *work)
+{
+ struct aer_recover_entry entry;
+ struct pci_dev *pdev;
+
+ while (kfifo_get(&aer_recover_ring, &entry)) {
+ pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
+ entry.devfn);
+ if (!pdev) {
+ pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
+ entry.domain, entry.bus,
+ PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+ continue;
+ }
+ do_recovery(pdev, entry.severity);
+ }
+}
+#endif
+
/**
* get_device_error_info - read error status from dev and store it to info
* @dev: pointer to the device expected to have a error record
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index b07a42e0b35..3ea51736f18 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -204,7 +204,7 @@ void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
-static int cper_severity_to_aer(int cper_severity)
+int cper_severity_to_aer(int cper_severity)
{
switch (cper_severity) {
case CPER_SEV_RECOVERABLE:
@@ -215,6 +215,7 @@ static int cper_severity_to_aer(int cper_severity)
return AER_CORRECTABLE;
}
}
+EXPORT_SYMBOL_GPL(cper_severity_to_aer);
void cper_print_aer(const char *prefix, int cper_severity,
struct aer_capability_regs *aer)
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 6892601fc76..cbfbab18be9 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -940,7 +940,7 @@ static int __init pcie_aspm_disable(char *str)
printk(KERN_INFO "PCIe ASPM is disabled\n");
} else if (!strcmp(str, "force")) {
aspm_force = 1;
- printk(KERN_INFO "PCIe ASPM is forcedly enabled\n");
+ printk(KERN_INFO "PCIe ASPM is forcibly enabled\n");
}
return 1;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 48849ffdd67..8473727b29f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -52,6 +52,7 @@ static void release_pcibus_dev(struct device *dev)
if (pci_bus->bridge)
put_device(pci_bus->bridge);
pci_bus_remove_resources(pci_bus);
+ pci_release_bus_of_node(pci_bus);
kfree(pci_bus);
}
@@ -67,21 +68,6 @@ static int __init pcibus_class_init(void)
}
postcore_initcall(pcibus_class_init);
-/*
- * Translate the low bits of the PCI base
- * to the resource type
- */
-static inline unsigned int pci_calc_resource_flags(unsigned int flags)
-{
- if (flags & PCI_BASE_ADDRESS_SPACE_IO)
- return IORESOURCE_IO;
-
- if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
- return IORESOURCE_MEM | IORESOURCE_PREFETCH;
-
- return IORESOURCE_MEM;
-}
-
static u64 pci_size(u64 base, u64 maxbase, u64 mask)
{
u64 size = mask & maxbase; /* Find the significant bits */
@@ -100,18 +86,39 @@ static u64 pci_size(u64 base, u64 maxbase, u64 mask)
return size;
}
-static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
+static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
{
+ u32 mem_type;
+ unsigned long flags;
+
if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
- res->flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
- return pci_bar_io;
+ flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
+ flags |= IORESOURCE_IO;
+ return flags;
}
- res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
+ flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
+ flags |= IORESOURCE_MEM;
+ if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
+ flags |= IORESOURCE_PREFETCH;
- if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
- return pci_bar_mem64;
- return pci_bar_mem32;
+ mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n");
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ flags |= IORESOURCE_MEM_64;
+ break;
+ default:
+ dev_warn(&dev->dev,
+ "mem unknown type %x treated as 32-bit BAR\n",
+ mem_type);
+ break;
+ }
+ return flags;
}
/**
@@ -164,11 +171,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
l = 0;
if (type == pci_bar_unknown) {
- type = decode_bar(res, l);
- res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
- if (type == pci_bar_io) {
+ res->flags = decode_bar(dev, l);
+ res->flags |= IORESOURCE_SIZEALIGN;
+ if (res->flags & IORESOURCE_IO) {
l &= PCI_BASE_ADDRESS_IO_MASK;
- mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
+ mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
} else {
l &= PCI_BASE_ADDRESS_MEM_MASK;
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
@@ -179,7 +186,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
mask = (u32)PCI_ROM_ADDRESS_MASK;
}
- if (type == pci_bar_mem64) {
+ if (res->flags & IORESOURCE_MEM_64) {
u64 l64 = l;
u64 sz64 = sz;
u64 mask64 = mask | (u64)~0 << 32;
@@ -203,7 +210,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
goto fail;
}
- res->flags |= IORESOURCE_MEM_64;
if ((sizeof(resource_size_t) < 8) && l) {
/* Address above 32-bit boundary; disable the BAR */
pci_write_config_dword(dev, pos, 0);
@@ -229,7 +235,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
}
out:
- return (type == pci_bar_mem64) ? 1 : 0;
+ return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
fail:
res->flags = 0;
goto out;
@@ -283,10 +289,6 @@ static void __devinit pci_read_bridge_io(struct pci_bus *child)
if (!res->end)
res->end = limit + 0xfff;
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [io %#06lx-%#06lx] (disabled)\n",
- base, limit);
}
}
@@ -307,10 +309,6 @@ static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
res->start = base;
res->end = limit + 0xfffff;
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [mem %#010lx-%#010lx] (disabled)\n",
- base, limit + 0xfffff);
}
}
@@ -358,10 +356,6 @@ static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
res->start = base;
res->end = limit + 0xfffff;
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
- } else {
- dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [mem %#010lx-%#010lx pref] (disabled)\n",
- base, limit + 0xfffff);
}
}
@@ -588,7 +582,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
child->self = bridge;
child->bridge = get_device(&bridge->dev);
-
+ pci_set_bus_of_node(child);
pci_set_bus_speed(child);
/* Set up default resource pointers and names.. */
@@ -724,12 +718,14 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
pci_write_config_word(dev, PCI_STATUS, 0xffff);
/* Prevent assigning a bus number that already exists.
- * This can happen when a bridge is hot-plugged */
- if (pci_find_bus(pci_domain_nr(bus), max+1))
- goto out;
- child = pci_add_new_bus(bus, dev, ++max);
- if (!child)
- goto out;
+ * This can happen when a bridge is hot-plugged, so in
+ * this case we only re-scan this bus. */
+ child = pci_find_bus(pci_domain_nr(bus), max+1);
+ if (!child) {
+ child = pci_add_new_bus(bus, dev, ++max);
+ if (!child)
+ goto out;
+ }
buses = (buses & 0xff000000)
| ((unsigned int)(child->primary) << 0)
| ((unsigned int)(child->secondary) << 8)
@@ -860,6 +856,8 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
+ pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
}
void set_pcie_hotplug_bridge(struct pci_dev *pdev)
@@ -1038,6 +1036,7 @@ static void pci_release_dev(struct device *dev)
pci_dev = to_pci_dev(dev);
pci_release_capabilities(pci_dev);
+ pci_release_of_node(pci_dev);
kfree(pci_dev);
}
@@ -1157,6 +1156,8 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
dev->vendor = l & 0xffff;
dev->device = (l >> 16) & 0xffff;
+ pci_set_of_node(dev);
+
if (pci_setup_device(dev)) {
kfree(dev);
return NULL;
@@ -1327,6 +1328,150 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
return nr;
}
+static int pcie_find_smpss(struct pci_dev *dev, void *data)
+{
+ u8 *smpss = data;
+
+ if (!pci_is_pcie(dev))
+ return 0;
+
+ /* For PCIE hotplug enabled slots not connected directly to a
+ * PCI-E root port, there can be problems when hotplugging
+ * devices. This is due to the possibility of hotplugging a
+ * device into the fabric with a smaller MPS that the devices
+ * currently running have configured. Modifying the MPS on the
+ * running devices could cause a fatal bus error due to an
+ * incoming frame being larger than the newly configured MPS.
+ * To work around this, the MPS for the entire fabric must be
+ * set to the minimum size. Any devices hotplugged into this
+ * fabric will have the minimum MPS set. If the PCI hotplug
+ * slot is directly connected to the root port and there are not
+ * other devices on the fabric (which seems to be the most
+ * common case), then this is not an issue and MPS discovery
+ * will occur as normal.
+ */
+ if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
+ dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))
+ *smpss = 0;
+
+ if (*smpss > dev->pcie_mpss)
+ *smpss = dev->pcie_mpss;
+
+ return 0;
+}
+
+static void pcie_write_mps(struct pci_dev *dev, int mps)
+{
+ int rc, dev_mpss;
+
+ dev_mpss = 128 << dev->pcie_mpss;
+
+ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
+ if (dev->bus->self) {
+ dev_dbg(&dev->bus->dev, "Bus MPSS %d\n",
+ 128 << dev->bus->self->pcie_mpss);
+
+ /* For "MPS Force Max", the assumption is made that
+ * downstream communication will never be larger than
+ * the MRRS. So, the MPS only needs to be configured
+ * for the upstream communication. This being the case,
+ * walk from the top down and set the MPS of the child
+ * to that of the parent bus.
+ */
+ mps = 128 << dev->bus->self->pcie_mpss;
+ if (mps > dev_mpss)
+ dev_warn(&dev->dev, "MPS configured higher than"
+ " maximum supported by the device. If"
+ " a bus issue occurs, try running with"
+ " pci=pcie_bus_safe.\n");
+ }
+
+ dev->pcie_mpss = ffs(mps) - 8;
+ }
+
+ rc = pcie_set_mps(dev, mps);
+ if (rc)
+ dev_err(&dev->dev, "Failed attempting to set the MPS\n");
+}
+
+static void pcie_write_mrrs(struct pci_dev *dev, int mps)
+{
+ int rc, mrrs;
+
+ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
+ int dev_mpss = 128 << dev->pcie_mpss;
+
+ /* For Max performance, the MRRS must be set to the largest
+ * supported value. However, it cannot be configured larger
+ * than the MPS the device or the bus can support. This assumes
+ * that the largest MRRS available on the device cannot be
+ * smaller than the device MPSS.
+ */
+ mrrs = mps < dev_mpss ? mps : dev_mpss;
+ } else
+ /* In the "safe" case, configure the MRRS for fairness on the
+ * bus by making all devices have the same size
+ */
+ mrrs = mps;
+
+
+ /* MRRS is a R/W register. Invalid values can be written, but a
+ * subsiquent read will verify if the value is acceptable or not.
+ * If the MRRS value provided is not acceptable (e.g., too large),
+ * shrink the value until it is acceptable to the HW.
+ */
+ while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
+ rc = pcie_set_readrq(dev, mrrs);
+ if (rc)
+ dev_err(&dev->dev, "Failed attempting to set the MRRS\n");
+
+ mrrs /= 2;
+ }
+}
+
+static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
+{
+ int mps = 128 << *(u8 *)data;
+
+ if (!pci_is_pcie(dev))
+ return 0;
+
+ dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+ pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
+
+ pcie_write_mps(dev, mps);
+ pcie_write_mrrs(dev, mps);
+
+ dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+ pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
+
+ return 0;
+}
+
+/* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down,
+ * parents then children fashion. If this changes, then this code will not
+ * work as designed.
+ */
+void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
+{
+ u8 smpss = mpss;
+
+ if (!bus->self)
+ return;
+
+ if (!pci_is_pcie(bus->self))
+ return;
+
+ if (pcie_bus_config == PCIE_BUS_SAFE) {
+ pcie_find_smpss(bus->self, &smpss);
+ pci_walk_bus(bus, pcie_find_smpss, &smpss);
+ }
+
+ pcie_bus_configure_set(bus->self, &smpss);
+ pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
+}
+EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
+
unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
{
unsigned int devfn, pass, max = bus->secondary;
@@ -1409,6 +1554,7 @@ struct pci_bus * pci_create_bus(struct device *parent,
goto dev_reg_err;
b->bridge = get_device(dev);
device_enable_async_suspend(b->bridge);
+ pci_set_bus_of_node(b);
if (!parent)
set_dev_node(b->bridge, pcibus_to_node(b));
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e8a140669f9..1196f61a4ab 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2758,9 +2758,34 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
+
+ /*
+ * RICOH 0xe823 SD/MMC card reader fails to recognize
+ * certain types of SD/MMC cards. Lowering the SD base
+ * clock frequency from 200Mhz to 50Mhz fixes this issue.
+ *
+ * 0x150 - SD2.0 mode enable for changing base clock
+ * frequency to 50Mhz
+ * 0xe1 - Base clock frequency
+ * 0x32 - 50Mhz new clock frequency
+ * 0xf9 - Key register for 0x150
+ * 0xfc - key register for 0xe1
+ */
+ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
+ pci_write_config_byte(dev, 0xf9, 0xfc);
+ pci_write_config_byte(dev, 0x150, 0x10);
+ pci_write_config_byte(dev, 0xf9, 0x00);
+ pci_write_config_byte(dev, 0xfc, 0x01);
+ pci_write_config_byte(dev, 0xe1, 0x32);
+ pci_write_config_byte(dev, 0xfc, 0x00);
+
+ dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
+ }
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
#endif /*CONFIG_MMC_RICOH_MMC*/
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 1e9e5a5b8c8..784da9d3602 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -34,6 +34,7 @@ struct resource_list_x {
resource_size_t start;
resource_size_t end;
resource_size_t add_size;
+ resource_size_t min_align;
unsigned long flags;
};
@@ -47,6 +48,13 @@ struct resource_list_x {
(head)->next = NULL; \
} while (0)
+int pci_realloc_enable = 0;
+#define pci_realloc_enabled() pci_realloc_enable
+void pci_realloc(void)
+{
+ pci_realloc_enable = 1;
+}
+
/**
* add_to_list() - add a new resource tracker to the list
* @head: Head of the list
@@ -58,7 +66,7 @@ struct resource_list_x {
*/
static void add_to_list(struct resource_list_x *head,
struct pci_dev *dev, struct resource *res,
- resource_size_t add_size)
+ resource_size_t add_size, resource_size_t min_align)
{
struct resource_list_x *list = head;
struct resource_list_x *ln = list->next;
@@ -77,13 +85,16 @@ static void add_to_list(struct resource_list_x *head,
tmp->end = res->end;
tmp->flags = res->flags;
tmp->add_size = add_size;
+ tmp->min_align = min_align;
list->next = tmp;
}
static void add_to_failed_list(struct resource_list_x *head,
struct pci_dev *dev, struct resource *res)
{
- add_to_list(head, dev, res, 0);
+ add_to_list(head, dev, res,
+ 0 /* dont care */,
+ 0 /* dont care */);
}
static void __dev_sort_resources(struct pci_dev *dev,
@@ -114,18 +125,18 @@ static inline void reset_resource(struct resource *res)
}
/**
- * adjust_resources_sorted() - satisfy any additional resource requests
+ * reassign_resources_sorted() - satisfy any additional resource requests
*
- * @add_head : head of the list tracking requests requiring additional
+ * @realloc_head : head of the list tracking requests requiring additional
* resources
* @head : head of the list tracking requests with allocated
* resources
*
- * Walk through each element of the add_head and try to procure
+ * Walk through each element of the realloc_head and try to procure
* additional resources for the element, provided the element
* is in the head list.
*/
-static void adjust_resources_sorted(struct resource_list_x *add_head,
+static void reassign_resources_sorted(struct resource_list_x *realloc_head,
struct resource_list *head)
{
struct resource *res;
@@ -134,8 +145,8 @@ static void adjust_resources_sorted(struct resource_list_x *add_head,
resource_size_t add_size;
int idx;
- prev = add_head;
- for (list = add_head->next; list;) {
+ prev = realloc_head;
+ for (list = realloc_head->next; list;) {
res = list->res;
/* skip resource that has been reset */
if (!res->flags)
@@ -152,13 +163,17 @@ static void adjust_resources_sorted(struct resource_list_x *add_head,
idx = res - &list->dev->resource[0];
add_size=list->add_size;
- if (!resource_size(res) && add_size) {
- res->end = res->start + add_size - 1;
- if(pci_assign_resource(list->dev, idx))
+ if (!resource_size(res)) {
+ res->start = list->start;
+ res->end = res->start + add_size - 1;
+ if(pci_assign_resource(list->dev, idx))
reset_resource(res);
- } else if (add_size) {
- adjust_resource(res, res->start,
- resource_size(res) + add_size);
+ } else {
+ resource_size_t align = list->min_align;
+ res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
+ if (pci_reassign_resource(list->dev, idx, add_size, align))
+ dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n",
+ res);
}
out:
tmp = list;
@@ -203,16 +218,16 @@ static void assign_requested_resources_sorted(struct resource_list *head,
}
static void __assign_resources_sorted(struct resource_list *head,
- struct resource_list_x *add_head,
+ struct resource_list_x *realloc_head,
struct resource_list_x *fail_head)
{
/* Satisfy the must-have resource requests */
assign_requested_resources_sorted(head, fail_head);
- /* Try to satisfy any additional nice-to-have resource
+ /* Try to satisfy any additional optional resource
requests */
- if (add_head)
- adjust_resources_sorted(add_head, head);
+ if (realloc_head)
+ reassign_resources_sorted(realloc_head, head);
free_list(resource_list, head);
}
@@ -228,7 +243,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev,
}
static void pbus_assign_resources_sorted(const struct pci_bus *bus,
- struct resource_list_x *add_head,
+ struct resource_list_x *realloc_head,
struct resource_list_x *fail_head)
{
struct pci_dev *dev;
@@ -238,7 +253,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus,
list_for_each_entry(dev, &bus->devices, bus_list)
__dev_sort_resources(dev, &head);
- __assign_resources_sorted(&head, add_head, fail_head);
+ __assign_resources_sorted(&head, realloc_head, fail_head);
}
void pci_setup_cardbus(struct pci_bus *bus)
@@ -329,7 +344,6 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
/* Clear upper 16 bits of I/O base/limit. */
io_upper16 = 0;
l = 0x00f0;
- dev_info(&bridge->dev, " bridge window [io disabled]\n");
}
/* Temporarily disable the I/O range before updating PCI_IO_BASE. */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
@@ -355,7 +369,6 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
dev_info(&bridge->dev, " bridge window %pR\n", res);
} else {
l = 0x0000fff0;
- dev_info(&bridge->dev, " bridge window [mem disabled]\n");
}
pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
}
@@ -386,7 +399,6 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
dev_info(&bridge->dev, " bridge window %pR\n", res);
} else {
l = 0x0000fff0;
- dev_info(&bridge->dev, " bridge window [mem pref disabled]\n");
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
@@ -536,13 +548,27 @@ static resource_size_t calculate_memsize(resource_size_t size,
return size;
}
+static resource_size_t get_res_add_size(struct resource_list_x *realloc_head,
+ struct resource *res)
+{
+ struct resource_list_x *list;
+
+ /* check if it is in realloc_head list */
+ for (list = realloc_head->next; list && list->res != res;
+ list = list->next);
+ if (list)
+ return list->add_size;
+
+ return 0;
+}
+
/**
* pbus_size_io() - size the io window of a given bus
*
* @bus : the bus
* @min_size : the minimum io window that must to be allocated
* @add_size : additional optional io window
- * @add_head : track the additional io window on this list
+ * @realloc_head : track the additional io window on this list
*
* Sizing the IO windows of the PCI-PCI bridge is trivial,
* since these windows have 4K granularity and the IO ranges
@@ -550,11 +576,12 @@ static resource_size_t calculate_memsize(resource_size_t size,
* We must be careful with the ISA aliasing though.
*/
static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
- resource_size_t add_size, struct resource_list_x *add_head)
+ resource_size_t add_size, struct resource_list_x *realloc_head)
{
struct pci_dev *dev;
struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
unsigned long size = 0, size0 = 0, size1 = 0;
+ resource_size_t children_add_size = 0;
if (!b_res)
return;
@@ -575,11 +602,16 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
size += r_size;
else
size1 += r_size;
+
+ if (realloc_head)
+ children_add_size += get_res_add_size(realloc_head, r);
}
}
size0 = calculate_iosize(size, min_size, size1,
resource_size(b_res), 4096);
- size1 = (!add_head || (add_head && !add_size)) ? size0 :
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
calculate_iosize(size, min_size+add_size, size1,
resource_size(b_res), 4096);
if (!size0 && !size1) {
@@ -594,8 +626,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
b_res->start = 4096;
b_res->end = b_res->start + size0 - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
- if (size1 > size0 && add_head)
- add_to_list(add_head, bus->self, b_res, size1-size0);
+ if (size1 > size0 && realloc_head)
+ add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096);
}
/**
@@ -604,7 +636,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
* @bus : the bus
* @min_size : the minimum memory window that must to be allocated
* @add_size : additional optional memory window
- * @add_head : track the additional memory window on this list
+ * @realloc_head : track the additional memory window on this list
*
* Calculate the size of the bus and minimal alignment which
* guarantees that all child resources fit in this size.
@@ -612,7 +644,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
unsigned long type, resource_size_t min_size,
resource_size_t add_size,
- struct resource_list_x *add_head)
+ struct resource_list_x *realloc_head)
{
struct pci_dev *dev;
resource_size_t min_align, align, size, size0, size1;
@@ -620,6 +652,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
int order, max_order;
struct resource *b_res = find_free_bus_resource(bus, type);
unsigned int mem64_mask = 0;
+ resource_size_t children_add_size = 0;
if (!b_res)
return 0;
@@ -641,6 +674,16 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (r->parent || (r->flags & mask) != type)
continue;
r_size = resource_size(r);
+#ifdef CONFIG_PCI_IOV
+ /* put SRIOV requested res to the optional list */
+ if (realloc_head && i >= PCI_IOV_RESOURCES &&
+ i <= PCI_IOV_RESOURCE_END) {
+ r->end = r->start - 1;
+ add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */);
+ children_add_size += r_size;
+ continue;
+ }
+#endif
/* For bridges size != alignment */
align = pci_resource_alignment(dev, r);
order = __ffs(align) - 20;
@@ -661,6 +704,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (order > max_order)
max_order = order;
mem64_mask &= r->flags & IORESOURCE_MEM_64;
+
+ if (realloc_head)
+ children_add_size += get_res_add_size(realloc_head, r);
}
}
align = 0;
@@ -677,7 +723,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
align += aligns[order];
}
size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
- size1 = (!add_head || (add_head && !add_size)) ? size0 :
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
calculate_memsize(size, min_size+add_size, 0,
resource_size(b_res), min_align);
if (!size0 && !size1) {
@@ -691,12 +739,22 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
b_res->start = min_align;
b_res->end = size0 + min_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask;
- if (size1 > size0 && add_head)
- add_to_list(add_head, bus->self, b_res, size1-size0);
+ if (size1 > size0 && realloc_head)
+ add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
return 1;
}
-static void pci_bus_size_cardbus(struct pci_bus *bus)
+unsigned long pci_cardbus_resource_alignment(struct resource *res)
+{
+ if (res->flags & IORESOURCE_IO)
+ return pci_cardbus_io_size;
+ if (res->flags & IORESOURCE_MEM)
+ return pci_cardbus_mem_size;
+ return 0;
+}
+
+static void pci_bus_size_cardbus(struct pci_bus *bus,
+ struct resource_list_x *realloc_head)
{
struct pci_dev *bridge = bus->self;
struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
@@ -707,12 +765,14 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
* a fixed amount of bus space for CardBus bridges.
*/
b_res[0].start = 0;
- b_res[0].end = pci_cardbus_io_size - 1;
b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
+ if (realloc_head)
+ add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */);
b_res[1].start = 0;
- b_res[1].end = pci_cardbus_io_size - 1;
b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
+ if (realloc_head)
+ add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */);
/*
* Check whether prefetchable memory is supported
@@ -732,21 +792,31 @@ static void pci_bus_size_cardbus(struct pci_bus *bus)
*/
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
b_res[2].start = 0;
- b_res[2].end = pci_cardbus_mem_size - 1;
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
+ if (realloc_head)
+ add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */);
b_res[3].start = 0;
- b_res[3].end = pci_cardbus_mem_size - 1;
b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
+ if (realloc_head)
+ add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */);
} else {
b_res[3].start = 0;
- b_res[3].end = pci_cardbus_mem_size * 2 - 1;
b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
+ if (realloc_head)
+ add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */);
}
+
+ /* set the size of the resource to zero, so that the resource does not
+ * get assigned during required-resource allocation cycle but gets assigned
+ * during the optional-resource allocation cycle.
+ */
+ b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1;
+ b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0;
}
void __ref __pci_bus_size_bridges(struct pci_bus *bus,
- struct resource_list_x *add_head)
+ struct resource_list_x *realloc_head)
{
struct pci_dev *dev;
unsigned long mask, prefmask;
@@ -759,12 +829,12 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_CARDBUS:
- pci_bus_size_cardbus(b);
+ pci_bus_size_cardbus(b, realloc_head);
break;
case PCI_CLASS_BRIDGE_PCI:
default:
- __pci_bus_size_bridges(b, add_head);
+ __pci_bus_size_bridges(b, realloc_head);
break;
}
}
@@ -788,7 +858,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
* Follow thru
*/
default:
- pbus_size_io(bus, 0, additional_io_size, add_head);
+ pbus_size_io(bus, 0, additional_io_size, realloc_head);
/* If the bridge supports prefetchable range, size it
separately. If it doesn't, or its prefetchable window
has already been allocated by arch code, try
@@ -796,11 +866,11 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
resources. */
mask = IORESOURCE_MEM;
prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head))
+ if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, realloc_head))
mask = prefmask; /* Success, size non-prefetch only. */
else
additional_mem_size += additional_mem_size;
- pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head);
+ pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, realloc_head);
break;
}
}
@@ -812,20 +882,20 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
EXPORT_SYMBOL(pci_bus_size_bridges);
static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
- struct resource_list_x *add_head,
+ struct resource_list_x *realloc_head,
struct resource_list_x *fail_head)
{
struct pci_bus *b;
struct pci_dev *dev;
- pbus_assign_resources_sorted(bus, add_head, fail_head);
+ pbus_assign_resources_sorted(bus, realloc_head, fail_head);
list_for_each_entry(dev, &bus->devices, bus_list) {
b = dev->subordinate;
if (!b)
continue;
- __pci_bus_assign_resources(b, add_head, fail_head);
+ __pci_bus_assign_resources(b, realloc_head, fail_head);
switch (dev->class >> 8) {
case PCI_CLASS_BRIDGE_PCI:
@@ -1025,6 +1095,7 @@ static int __init pci_get_max_depth(void)
return depth;
}
+
/*
* first try will not touch pci bridge res
* second and later try will clear small leaf bridge res
@@ -1034,7 +1105,7 @@ void __init
pci_assign_unassigned_resources(void)
{
struct pci_bus *bus;
- struct resource_list_x add_list; /* list of resources that
+ struct resource_list_x realloc_list; /* list of resources that
want additional resources */
int tried_times = 0;
enum release_type rel_type = leaf_only;
@@ -1047,7 +1118,7 @@ pci_assign_unassigned_resources(void)
head.next = NULL;
- add_list.next = NULL;
+ realloc_list.next = NULL;
pci_try_num = max_depth + 1;
printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
@@ -1057,17 +1128,24 @@ again:
/* Depth first, calculate sizes and alignments of all
subordinate buses. */
list_for_each_entry(bus, &pci_root_buses, node)
- __pci_bus_size_bridges(bus, &add_list);
+ __pci_bus_size_bridges(bus, &realloc_list);
/* Depth last, allocate resources and update the hardware. */
list_for_each_entry(bus, &pci_root_buses, node)
- __pci_bus_assign_resources(bus, &add_list, &head);
- BUG_ON(add_list.next);
+ __pci_bus_assign_resources(bus, &realloc_list, &head);
+ BUG_ON(realloc_list.next);
tried_times++;
/* any device complain? */
if (!head.next)
goto enable_and_dump;
+
+ /* don't realloc if asked to do so */
+ if (!pci_realloc_enabled()) {
+ free_list(resource_list_x, &head);
+ goto enable_and_dump;
+ }
+
failed_type = 0;
for (list = head.next; list;) {
failed_type |= list->flags;
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index eec9738f349..eb219a1d16f 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -21,7 +21,7 @@
static void __init
pdev_fixup_irq(struct pci_dev *dev,
u8 (*swizzle)(struct pci_dev *, u8 *),
- int (*map_irq)(struct pci_dev *, u8, u8))
+ int (*map_irq)(const struct pci_dev *, u8, u8))
{
u8 pin, slot;
int irq = 0;
@@ -56,7 +56,7 @@ pdev_fixup_irq(struct pci_dev *dev,
void __init
pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
- int (*map_irq)(struct pci_dev *, u8, u8))
+ int (*map_irq)(const struct pci_dev *, u8, u8))
{
struct pci_dev *dev = NULL;
for_each_pci_dev(dev)
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index bc0e6eea0ff..51a9095c7da 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -74,8 +74,7 @@ void pci_update_resource(struct pci_dev *dev, int resno)
resno, new, check);
}
- if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
- (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+ if (res->flags & IORESOURCE_MEM_64) {
new = region.start >> 16 >> 16;
pci_write_config_dword(dev, reg + 4, new);
pci_read_config_dword(dev, reg + 4, &check);
@@ -129,16 +128,16 @@ void pci_disable_bridge_window(struct pci_dev *dev)
}
#endif /* CONFIG_PCI_QUIRKS */
+
+
static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
- int resno)
+ int resno, resource_size_t size, resource_size_t align)
{
struct resource *res = dev->resource + resno;
- resource_size_t size, min, align;
+ resource_size_t min;
int ret;
- size = resource_size(res);
min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
- align = pci_resource_alignment(dev, res);
/* First, try exact prefetching match.. */
ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -155,56 +154,101 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
pcibios_align_resource, dev);
}
+ return ret;
+}
- if (ret < 0 && dev->fw_addr[resno]) {
- struct resource *root, *conflict;
- resource_size_t start, end;
+static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+ int resno, resource_size_t size)
+{
+ struct resource *root, *conflict;
+ resource_size_t start, end;
+ int ret = 0;
- /*
- * If we failed to assign anything, let's try the address
- * where firmware left it. That at least has a chance of
- * working, which is better than just leaving it disabled.
- */
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ else
+ root = &iomem_resource;
+
+ start = res->start;
+ end = res->end;
+ res->start = dev->fw_addr[resno];
+ res->end = res->start + size - 1;
+ dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
+ resno, res);
+ conflict = request_resource_conflict(root, res);
+ if (conflict) {
+ dev_info(&dev->dev,
+ "BAR %d: %pR conflicts with %s %pR\n", resno,
+ res, conflict->name, conflict);
+ res->start = start;
+ res->end = end;
+ ret = 1;
+ }
+ return ret;
+}
+
+static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align)
+{
+ struct resource *res = dev->resource + resno;
+ struct pci_bus *bus;
+ int ret;
+ char *type;
- if (res->flags & IORESOURCE_IO)
- root = &ioport_resource;
+ bus = dev->bus;
+ while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) {
+ if (!bus->parent || !bus->self->transparent)
+ break;
+ bus = bus->parent;
+ }
+
+ if (ret) {
+ if (res->flags & IORESOURCE_MEM)
+ if (res->flags & IORESOURCE_PREFETCH)
+ type = "mem pref";
+ else
+ type = "mem";
+ else if (res->flags & IORESOURCE_IO)
+ type = "io";
else
- root = &iomem_resource;
-
- start = res->start;
- end = res->end;
- res->start = dev->fw_addr[resno];
- res->end = res->start + size - 1;
- dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
- resno, res);
- conflict = request_resource_conflict(root, res);
- if (conflict) {
- dev_info(&dev->dev,
- "BAR %d: %pR conflicts with %s %pR\n", resno,
- res, conflict->name, conflict);
- res->start = start;
- res->end = end;
- } else
- ret = 0;
+ type = "unknown";
+ dev_info(&dev->dev,
+ "BAR %d: can't assign %s (size %#llx)\n",
+ resno, type, (unsigned long long) resource_size(res));
}
+ return ret;
+}
+
+int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
+ resource_size_t min_align)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t new_size;
+ int ret;
+
+ if (!res->parent) {
+ dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resouce %pR "
+ "\n", resno, res);
+ return -EINVAL;
+ }
+
+ new_size = resource_size(res) + addsize + min_align;
+ ret = _pci_assign_resource(dev, resno, new_size, min_align);
if (!ret) {
res->flags &= ~IORESOURCE_STARTALIGN;
dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
}
-
return ret;
}
int pci_assign_resource(struct pci_dev *dev, int resno)
{
struct resource *res = dev->resource + resno;
- resource_size_t align;
+ resource_size_t align, size;
struct pci_bus *bus;
int ret;
- char *type;
align = pci_resource_alignment(dev, res);
if (!align) {
@@ -214,34 +258,27 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
}
bus = dev->bus;
- while ((ret = __pci_assign_resource(bus, dev, resno))) {
- if (bus->parent && bus->self->transparent)
- bus = bus->parent;
- else
- bus = NULL;
- if (bus)
- continue;
- break;
- }
+ size = resource_size(res);
+ ret = _pci_assign_resource(dev, resno, size, align);
- if (ret) {
- if (res->flags & IORESOURCE_MEM)
- if (res->flags & IORESOURCE_PREFETCH)
- type = "mem pref";
- else
- type = "mem";
- else if (res->flags & IORESOURCE_IO)
- type = "io";
- else
- type = "unknown";
- dev_info(&dev->dev,
- "BAR %d: can't assign %s (size %#llx)\n",
- resno, type, (unsigned long long) resource_size(res));
- }
+ /*
+ * If we failed to assign anything, let's try the address
+ * where firmware left it. That at least has a chance of
+ * working, which is better than just leaving it disabled.
+ */
+ if (ret < 0 && dev->fw_addr[resno])
+ ret = pci_revert_fw_address(res, dev, resno, size);
+ if (!ret) {
+ res->flags &= ~IORESOURCE_STARTALIGN;
+ dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+ }
return ret;
}
+
/* Sort resources by alignment */
void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
{
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 492b7d807fe..6fa215a3861 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -16,7 +16,7 @@
#include <xen/interface/io/pciif.h>
#include <asm/xen/pci.h>
#include <linux/interrupt.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/time.h>
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index fb33fa42d24..4902206f53d 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -283,8 +283,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
}
/* reserve chip-select regions */
- if (!request_mem_region(io->start, io->end + 1 - io->start,
- driver_name)) {
+ if (!request_mem_region(io->start, resource_size(io), driver_name)) {
status = -ENXIO;
goto fail1;
}
@@ -308,7 +307,7 @@ static int __init at91_cf_probe(struct platform_device *pdev)
return 0;
fail2:
- release_mem_region(io->start, io->end + 1 - io->start);
+ release_mem_region(io->start, resource_size(io));
fail1:
if (cf->socket.io_offset)
iounmap((void __iomem *) cf->socket.io_offset);
@@ -339,7 +338,7 @@ static int __exit at91_cf_remove(struct platform_device *pdev)
struct resource *io = cf->socket.io[0].res;
pcmcia_unregister_socket(&cf->socket);
- release_mem_region(io->start, io->end + 1 - io->start);
+ release_mem_region(io->start, resource_size(io));
iounmap((void __iomem *) cf->socket.io_offset);
if (board->irq_pin) {
free_irq(board->irq_pin, cf);
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 6defd4a8168..06ad3e5e7d3 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -209,9 +209,9 @@ static int __devinit electra_cf_probe(struct platform_device *ofdev)
cf->ofdev = ofdev;
cf->mem_phys = mem.start;
- cf->mem_size = PAGE_ALIGN(mem.end - mem.start);
+ cf->mem_size = PAGE_ALIGN(resource_size(&mem));
cf->mem_base = ioremap(cf->mem_phys, cf->mem_size);
- cf->io_size = PAGE_ALIGN(io.end - io.start);
+ cf->io_size = PAGE_ALIGN(resource_size(&io));
area = __get_vm_area(cf->io_size, 0, PHB_IO_BASE, PHB_IO_END);
if (area == NULL)
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c
index 4c3e94c0ae8..f56d7de7c75 100644
--- a/drivers/pcmcia/pxa2xx_balloon3.c
+++ b/drivers/pcmcia/pxa2xx_balloon3.c
@@ -103,22 +103,12 @@ static int balloon3_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void balloon3_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void balloon3_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level balloon3_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = balloon3_pcmcia_hw_init,
.hw_shutdown = balloon3_pcmcia_hw_shutdown,
.socket_state = balloon3_pcmcia_socket_state,
.configure_socket = balloon3_pcmcia_configure_socket,
- .socket_init = balloon3_pcmcia_socket_init,
- .socket_suspend = balloon3_pcmcia_socket_suspend,
.first = 0,
.nr = 1,
};
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c
index 05913d0bbdb..63f4d5211ed 100644
--- a/drivers/pcmcia/pxa2xx_cm_x255.c
+++ b/drivers/pcmcia/pxa2xx_cm_x255.c
@@ -102,23 +102,12 @@ static int cmx255_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void cmx255_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void cmx255_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
-
static struct pcmcia_low_level cmx255_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = cmx255_pcmcia_hw_init,
.hw_shutdown = cmx255_pcmcia_shutdown,
.socket_state = cmx255_pcmcia_socket_state,
.configure_socket = cmx255_pcmcia_configure_socket,
- .socket_init = cmx255_pcmcia_socket_init,
- .socket_suspend = cmx255_pcmcia_socket_suspend,
.nr = 1,
};
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index 5662646b84d..6ee42b4c3e6 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -82,23 +82,12 @@ static int cmx270_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void cmx270_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void cmx270_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
-
static struct pcmcia_low_level cmx270_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = cmx270_pcmcia_hw_init,
.hw_shutdown = cmx270_pcmcia_shutdown,
.socket_state = cmx270_pcmcia_socket_state,
.configure_socket = cmx270_pcmcia_configure_socket,
- .socket_init = cmx270_pcmcia_socket_init,
- .socket_suspend = cmx270_pcmcia_socket_suspend,
.nr = 1,
};
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c
index 443cb7fc872..c6dec572a05 100644
--- a/drivers/pcmcia/pxa2xx_colibri.c
+++ b/drivers/pcmcia/pxa2xx_colibri.c
@@ -116,14 +116,6 @@ colibri_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void colibri_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void colibri_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level colibri_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -135,9 +127,6 @@ static struct pcmcia_low_level colibri_pcmcia_ops = {
.socket_state = colibri_pcmcia_socket_state,
.configure_socket = colibri_pcmcia_configure_socket,
-
- .socket_init = colibri_pcmcia_socket_init,
- .socket_suspend = colibri_pcmcia_socket_suspend,
};
static struct platform_device *colibri_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 92016fe932b..aded706c0b9 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -128,22 +128,12 @@ static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return ret;
}
-static void mst_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void mst_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level mst_pcmcia_ops __initdata = {
.owner = THIS_MODULE,
.hw_init = mst_pcmcia_hw_init,
.hw_shutdown = mst_pcmcia_hw_shutdown,
.socket_state = mst_pcmcia_socket_state,
.configure_socket = mst_pcmcia_configure_socket,
- .socket_init = mst_pcmcia_socket_init,
- .socket_suspend = mst_pcmcia_socket_suspend,
.nr = 2,
};
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c
index 69f73670949..d589ad1dcd4 100644
--- a/drivers/pcmcia/pxa2xx_palmld.c
+++ b/drivers/pcmcia/pxa2xx_palmld.c
@@ -65,14 +65,6 @@ static int palmld_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void palmld_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void palmld_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level palmld_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -84,9 +76,6 @@ static struct pcmcia_low_level palmld_pcmcia_ops = {
.socket_state = palmld_pcmcia_socket_state,
.configure_socket = palmld_pcmcia_configure_socket,
-
- .socket_init = palmld_pcmcia_socket_init,
- .socket_suspend = palmld_pcmcia_socket_suspend,
};
static struct platform_device *palmld_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
index d0ad6a76bbd..9c6a04b2f71 100644
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -117,14 +117,6 @@ static int palmtc_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return ret;
}
-static void palmtc_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void palmtc_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level palmtc_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -136,9 +128,6 @@ static struct pcmcia_low_level palmtc_pcmcia_ops = {
.socket_state = palmtc_pcmcia_socket_state,
.configure_socket = palmtc_pcmcia_configure_socket,
-
- .socket_init = palmtc_pcmcia_socket_init,
- .socket_suspend = palmtc_pcmcia_socket_suspend,
};
static struct platform_device *palmtc_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
index 1a258045040..80645a688ee 100644
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -67,14 +67,6 @@ palmtx_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void palmtx_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void palmtx_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level palmtx_pcmcia_ops = {
.owner = THIS_MODULE,
@@ -86,9 +78,6 @@ static struct pcmcia_low_level palmtx_pcmcia_ops = {
.socket_state = palmtx_pcmcia_socket_state,
.configure_socket = palmtx_pcmcia_configure_socket,
-
- .socket_init = palmtx_pcmcia_socket_init,
- .socket_suspend = palmtx_pcmcia_socket_suspend,
};
static struct platform_device *palmtx_pcmcia_device;
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index 81af2b3bcc0..69ae2fd2240 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -48,9 +48,6 @@ static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret;
- if (platform_scoop_config->pcmcia_init)
- platform_scoop_config->pcmcia_init();
-
/* Register interrupts */
if (SCOOP_DEV[skt->nr].cd_irq >= 0) {
struct pcmcia_irqs cd_irq;
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
index d08802fe35f..939622251df 100644
--- a/drivers/pcmcia/pxa2xx_stargate2.c
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -28,7 +28,6 @@
#include "soc_common.h"
-#define SG2_S0_BUFF_CTL 120
#define SG2_S0_POWER_CTL 108
#define SG2_S0_GPIO_RESET 82
#define SG2_S0_GPIO_DETECT 53
@@ -38,6 +37,11 @@ static struct pcmcia_irqs irqs[] = {
{ 0, IRQ_GPIO(SG2_S0_GPIO_DETECT), "PCMCIA0 CD" },
};
+static struct gpio sg2_pcmcia_gpios[] = {
+ { SG2_S0_GPIO_RESET, GPIOF_OUT_INIT_HIGH, "PCMCIA Reset" },
+ { SG2_S0_POWER_CTL, GPIOF_OUT_INIT_HIGH, "PCMCIA Power Ctrl" },
+};
+
static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
skt->socket.pci_irq = IRQ_GPIO(SG2_S0_GPIO_READY);
@@ -122,37 +126,23 @@ static int __init sg2_pcmcia_init(void)
if (!sg2_pcmcia_device)
return -ENOMEM;
- ret = gpio_request(SG2_S0_BUFF_CTL, "SG2 CF buff ctl");
+ ret = gpio_request_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
if (ret)
goto error_put_platform_device;
- ret = gpio_request(SG2_S0_POWER_CTL, "SG2 CF power ctl");
- if (ret)
- goto error_free_gpio_buff_ctl;
- ret = gpio_request(SG2_S0_GPIO_RESET, "SG2 CF reset");
- if (ret)
- goto error_free_gpio_power_ctl;
- /* Set gpio directions */
- gpio_direction_output(SG2_S0_BUFF_CTL, 0);
- gpio_direction_output(SG2_S0_POWER_CTL, 1);
- gpio_direction_output(SG2_S0_GPIO_RESET, 1);
ret = platform_device_add_data(sg2_pcmcia_device,
&sg2_pcmcia_ops,
sizeof(sg2_pcmcia_ops));
if (ret)
- goto error_free_gpio_reset;
+ goto error_free_gpios;
ret = platform_device_add(sg2_pcmcia_device);
if (ret)
- goto error_free_gpio_reset;
+ goto error_free_gpios;
return 0;
-error_free_gpio_reset:
- gpio_free(SG2_S0_GPIO_RESET);
-error_free_gpio_power_ctl:
- gpio_free(SG2_S0_POWER_CTL);
-error_free_gpio_buff_ctl:
- gpio_free(SG2_S0_BUFF_CTL);
+error_free_gpios:
+ gpio_free_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
error_put_platform_device:
platform_device_put(sg2_pcmcia_device);
@@ -162,9 +152,7 @@ error_put_platform_device:
static void __exit sg2_pcmcia_exit(void)
{
platform_device_unregister(sg2_pcmcia_device);
- gpio_free(SG2_S0_BUFF_CTL);
- gpio_free(SG2_S0_POWER_CTL);
- gpio_free(SG2_S0_GPIO_RESET);
+ gpio_free_array(sg2_pcmcia_gpios, ARRAY_SIZE(sg2_pcmcia_gpios));
}
fs_initcall(sg2_pcmcia_init);
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c
index b829e655457..57ddb969d88 100644
--- a/drivers/pcmcia/pxa2xx_trizeps4.c
+++ b/drivers/pcmcia/pxa2xx_trizeps4.c
@@ -55,10 +55,6 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
}
skt->socket.pci_irq = IRQ_GPIO(GPIO_PRDY);
break;
-
-#ifndef CONFIG_MACH_TRIZEPS_CONXS
- case 1:
-#endif
default:
break;
}
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index a51f2077644..1064b1c2869 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -136,22 +136,12 @@ static int viper_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
return 0;
}
-static void viper_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
-{
-}
-
-static void viper_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
-}
-
static struct pcmcia_low_level viper_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = viper_pcmcia_hw_init,
.hw_shutdown = viper_pcmcia_hw_shutdown,
.socket_state = viper_pcmcia_socket_state,
.configure_socket = viper_pcmcia_configure_socket,
- .socket_init = viper_pcmcia_socket_init,
- .socket_suspend = viper_pcmcia_socket_suspend,
.nr = 1,
};
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index 712baab3c83..e956f659089 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -76,10 +76,10 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
if (skt->nr == 0)
- gpio_request_array(vpac270_pcmcia_gpios,
+ gpio_free_array(vpac270_pcmcia_gpios,
ARRAY_SIZE(vpac270_pcmcia_gpios));
else
- gpio_request_array(vpac270_cf_gpios,
+ gpio_free_array(vpac270_cf_gpios,
ARRAY_SIZE(vpac270_cf_gpios));
}
diff --git a/drivers/pcmcia/rsrc_iodyn.c b/drivers/pcmcia/rsrc_iodyn.c
index 523eb691c30..f53c237bda2 100644
--- a/drivers/pcmcia/rsrc_iodyn.c
+++ b/drivers/pcmcia/rsrc_iodyn.c
@@ -135,7 +135,7 @@ static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
try = res->end + 1;
if ((*base == 0) || (*base == try)) {
if (adjust_resource(s->io[i].res, res->start,
- res->end - res->start + num + 1))
+ resource_size(res) + num))
continue;
*base = try;
s->io[i].InUse += num;
@@ -147,8 +147,8 @@ static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
try = res->start - num;
if ((*base == 0) || (*base == try)) {
if (adjust_resource(s->io[i].res,
- res->start - num,
- res->end - res->start + num + 1))
+ res->start - num,
+ resource_size(res) + num))
continue;
*base = try;
s->io[i].InUse += num;
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index b187555d438..9da9656242a 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -770,7 +770,7 @@ static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr,
res->end + num);
if (!ret) {
ret = adjust_resource(s->io[i].res, res->start,
- res->end - res->start + num + 1);
+ resource_size(res) + num);
if (ret)
continue;
*base = try;
@@ -788,8 +788,8 @@ static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr,
res->end);
if (!ret) {
ret = adjust_resource(s->io[i].res,
- res->start - num,
- res->end - res->start + num + 1);
+ res->start - num,
+ resource_size(res) + num);
if (ret)
continue;
*base = try;
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 768f9572a8c..a0a9c2aa8d7 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -186,8 +186,8 @@ static int soc_common_pcmcia_sock_init(struct pcmcia_socket *sock)
struct soc_pcmcia_socket *skt = to_soc_pcmcia_socket(sock);
debug(skt, 2, "initializing socket\n");
-
- skt->ops->socket_init(skt);
+ if (skt->ops->socket_init)
+ skt->ops->socket_init(skt);
return 0;
}
@@ -207,7 +207,8 @@ static int soc_common_pcmcia_suspend(struct pcmcia_socket *sock)
debug(skt, 2, "suspending socket\n");
- skt->ops->socket_suspend(skt);
+ if (skt->ops->socket_suspend)
+ skt->ops->socket_suspend(skt);
return 0;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 45e0191c35d..1e88d478532 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -769,4 +769,12 @@ config INTEL_OAKTRAIL
enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y
here; it will only load on supported platforms.
+config SAMSUNG_Q10
+ tristate "Samsung Q10 Extras"
+ depends on SERIO_I8042
+ select BACKLIGHT_CLASS_DEVICE
+ ---help---
+ This driver provides support for backlight control on Samsung Q10
+ and related laptops, including Dell Latitude X200.
+
endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index afc1f832aa6..293a320d9fa 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
+obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 005417bd429..af2bb20cb2f 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -99,6 +99,7 @@ enum acer_wmi_event_ids {
static const struct key_entry acer_wmi_keymap[] = {
{KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */
{KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */
+ {KE_KEY, 0x04, {KEY_WLAN} }, /* WiFi */
{KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */
{KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
@@ -304,6 +305,10 @@ static struct quirk_entry quirk_fujitsu_amilo_li_1718 = {
.wireless = 2,
};
+static struct quirk_entry quirk_lenovo_ideapad_s205 = {
+ .wireless = 3,
+};
+
/* The Aspire One has a dummy ACPI-WMI interface - disable it */
static struct dmi_system_id __devinitdata acer_blacklist[] = {
{
@@ -450,6 +455,15 @@ static struct dmi_system_id acer_quirks[] = {
},
.driver_data = &quirk_medion_md_98300,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo Ideapad S205",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"),
+ },
+ .driver_data = &quirk_lenovo_ideapad_s205,
+ },
{}
};
@@ -542,6 +556,12 @@ struct wmi_interface *iface)
return AE_ERROR;
*value = result & 0x1;
return AE_OK;
+ case 3:
+ err = ec_read(0x78, &result);
+ if (err)
+ return AE_ERROR;
+ *value = result & 0x1;
+ return AE_OK;
default:
err = ec_read(0xA, &result);
if (err)
@@ -1156,9 +1176,9 @@ static acpi_status wmid3_set_device_status(u32 value, u16 device)
struct wmid3_gds_input_param params = {
.function_num = 0x1,
.hotkey_number = 0x01,
- .devices = ACER_WMID3_GDS_WIRELESS &
- ACER_WMID3_GDS_THREEG &
- ACER_WMID3_GDS_WIMAX &
+ .devices = ACER_WMID3_GDS_WIRELESS |
+ ACER_WMID3_GDS_THREEG |
+ ACER_WMID3_GDS_WIMAX |
ACER_WMID3_GDS_BLUETOOTH,
};
struct acpi_buffer input = {
@@ -1266,8 +1286,13 @@ static void acer_rfkill_update(struct work_struct *ignored)
acpi_status status;
status = get_u32(&state, ACER_CAP_WIRELESS);
- if (ACPI_SUCCESS(status))
- rfkill_set_sw_state(wireless_rfkill, !state);
+ if (ACPI_SUCCESS(status)) {
+ if (quirks->wireless == 3) {
+ rfkill_set_hw_state(wireless_rfkill, !state);
+ } else {
+ rfkill_set_sw_state(wireless_rfkill, !state);
+ }
+ }
if (has_cap(ACER_CAP_BLUETOOTH)) {
status = get_u32(&state, ACER_CAP_BLUETOOTH);
@@ -1400,6 +1425,9 @@ static ssize_t show_bool_threeg(struct device *dev,
{
u32 result; \
acpi_status status;
+
+ pr_info("This threeg sysfs will be removed in 2012"
+ " - used by: %s\n", current->comm);
if (wmi_has_guid(WMID_GUID3))
status = wmid3_get_device_status(&result,
ACER_WMID3_GDS_THREEG);
@@ -1415,8 +1443,10 @@ static ssize_t set_bool_threeg(struct device *dev,
{
u32 tmp = simple_strtoul(buf, NULL, 10);
acpi_status status = set_u32(tmp, ACER_CAP_THREEG);
- if (ACPI_FAILURE(status))
- return -EINVAL;
+ pr_info("This threeg sysfs will be removed in 2012"
+ " - used by: %s\n", current->comm);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
return count;
}
static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
@@ -1425,6 +1455,8 @@ static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
char *buf)
{
+ pr_info("This interface sysfs will be removed in 2012"
+ " - used by: %s\n", current->comm);
switch (interface->type) {
case ACER_AMW0:
return sprintf(buf, "AMW0\n");
@@ -1445,6 +1477,8 @@ static void acer_wmi_notify(u32 value, void *context)
union acpi_object *obj;
struct event_return_value return_value;
acpi_status status;
+ u16 device_state;
+ const struct key_entry *key;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
@@ -1472,23 +1506,32 @@ static void acer_wmi_notify(u32 value, void *context)
switch (return_value.function) {
case WMID_HOTKEY_EVENT:
- if (return_value.device_state) {
- u16 device_state = return_value.device_state;
- pr_debug("device state: 0x%x\n", device_state);
- if (has_cap(ACER_CAP_WIRELESS))
- rfkill_set_sw_state(wireless_rfkill,
- !(device_state & ACER_WMID3_GDS_WIRELESS));
- if (has_cap(ACER_CAP_BLUETOOTH))
- rfkill_set_sw_state(bluetooth_rfkill,
- !(device_state & ACER_WMID3_GDS_BLUETOOTH));
- if (has_cap(ACER_CAP_THREEG))
- rfkill_set_sw_state(threeg_rfkill,
- !(device_state & ACER_WMID3_GDS_THREEG));
- }
- if (!sparse_keymap_report_event(acer_wmi_input_dev,
- return_value.key_num, 1, true))
+ device_state = return_value.device_state;
+ pr_debug("device state: 0x%x\n", device_state);
+
+ key = sparse_keymap_entry_from_scancode(acer_wmi_input_dev,
+ return_value.key_num);
+ if (!key) {
pr_warn("Unknown key number - 0x%x\n",
return_value.key_num);
+ } else {
+ switch (key->keycode) {
+ case KEY_WLAN:
+ case KEY_BLUETOOTH:
+ if (has_cap(ACER_CAP_WIRELESS))
+ rfkill_set_sw_state(wireless_rfkill,
+ !(device_state & ACER_WMID3_GDS_WIRELESS));
+ if (has_cap(ACER_CAP_THREEG))
+ rfkill_set_sw_state(threeg_rfkill,
+ !(device_state & ACER_WMID3_GDS_THREEG));
+ if (has_cap(ACER_CAP_BLUETOOTH))
+ rfkill_set_sw_state(bluetooth_rfkill,
+ !(device_state & ACER_WMID3_GDS_BLUETOOTH));
+ break;
+ }
+ sparse_keymap_report_entry(acer_wmi_input_dev, key,
+ 1, true);
+ }
break;
default:
pr_warn("Unknown function number - %d - %d\n",
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index fca3489218b..760c6d7624f 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -182,6 +182,7 @@ static const struct bios_settings_t bios_tbl[] = {
{"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
/* Acer 531 */
{"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} },
/* Gateway */
@@ -703,15 +704,15 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Feuerer");
MODULE_DESCRIPTION("Aspire One temperature and fan driver");
MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
-MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1410*:");
-MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1810*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:");
MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMU*:");
-MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
module_init(acerhdf_init);
module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d65df92e2ac..fa6d7ec68b2 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -70,11 +70,10 @@ MODULE_LICENSE("GPL");
* WAPF defines the behavior of the Fn+Fx wlan key
* The significance of values is yet to be found, but
* most of the time:
- * 0x0 will do nothing
- * 0x1 will allow to control the device with Fn+Fx key.
- * 0x4 will send an ACPI event (0x88) while pressing the Fn+Fx key
- * 0x5 like 0x1 or 0x4
- * So, if something doesn't work as you want, just try other values =)
+ * Bit | Bluetooth | WLAN
+ * 0 | Hardware | Hardware
+ * 1 | Hardware | Software
+ * 4 | Software | Software
*/
static uint wapf = 1;
module_param(wapf, uint, 0444);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 0580d99b079..b0859d4183e 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -38,6 +38,24 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID);
+/*
+ * WAPF defines the behavior of the Fn+Fx wlan key
+ * The significance of values is yet to be found, but
+ * most of the time:
+ * Bit | Bluetooth | WLAN
+ * 0 | Hardware | Hardware
+ * 1 | Hardware | Software
+ * 4 | Software | Software
+ */
+static uint wapf;
+module_param(wapf, uint, 0444);
+MODULE_PARM_DESC(wapf, "WAPF value");
+
+static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
+{
+ driver->wapf = wapf;
+}
+
static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
@@ -53,16 +71,16 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x51, { KEY_WWW } },
{ KE_KEY, 0x55, { KEY_CALC } },
{ KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */
- { KE_KEY, 0x5D, { KEY_WLAN } },
- { KE_KEY, 0x5E, { KEY_WLAN } },
- { KE_KEY, 0x5F, { KEY_WLAN } },
+ { KE_KEY, 0x5D, { KEY_WLAN } }, /* Wireless console Toggle */
+ { KE_KEY, 0x5E, { KEY_WLAN } }, /* Wireless console Enable */
+ { KE_KEY, 0x5F, { KEY_WLAN } }, /* Wireless console Disable */
{ KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
- { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
{ KE_KEY, 0x7D, { KEY_BLUETOOTH } },
+ { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
{ KE_KEY, 0x82, { KEY_CAMERA } },
{ KE_KEY, 0x88, { KEY_RFKILL } },
{ KE_KEY, 0x8A, { KEY_PROG1 } },
@@ -81,6 +99,7 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
.keymap = asus_nb_wmi_keymap,
.input_name = "Asus WMI hotkeys",
.input_phys = ASUS_NB_WMI_FILE "/input0",
+ .quirks = asus_nb_wmi_quirks,
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 00460cb9587..95cba9ebf6c 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -44,6 +44,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
+#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -66,6 +67,8 @@ MODULE_LICENSE("GPL");
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e
+#define NOTIFY_KBD_BRTUP 0xc4
+#define NOTIFY_KBD_BRTDWN 0xc5
/* WMI Methods */
#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
@@ -93,6 +96,7 @@ MODULE_LICENSE("GPL");
/* Wireless */
#define ASUS_WMI_DEVID_HW_SWITCH 0x00010001
#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
+#define ASUS_WMI_DEVID_CWAP 0x00010003
#define ASUS_WMI_DEVID_WLAN 0x00010011
#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
#define ASUS_WMI_DEVID_GPS 0x00010015
@@ -102,6 +106,12 @@ MODULE_LICENSE("GPL");
/* Leds */
/* 0x000200XX and 0x000400XX */
+#define ASUS_WMI_DEVID_LED1 0x00020011
+#define ASUS_WMI_DEVID_LED2 0x00020012
+#define ASUS_WMI_DEVID_LED3 0x00020013
+#define ASUS_WMI_DEVID_LED4 0x00020014
+#define ASUS_WMI_DEVID_LED5 0x00020015
+#define ASUS_WMI_DEVID_LED6 0x00020016
/* Backlight and Brightness */
#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
@@ -174,13 +184,18 @@ struct asus_wmi {
struct led_classdev tpd_led;
int tpd_led_wk;
+ struct led_classdev kbd_led;
+ int kbd_led_wk;
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
+ struct work_struct kbd_led_work;
struct asus_rfkill wlan;
struct asus_rfkill bluetooth;
struct asus_rfkill wimax;
struct asus_rfkill wwan3g;
+ struct asus_rfkill gps;
+ struct asus_rfkill uwb;
struct hotplug_slot *hotplug_slot;
struct mutex hotplug_lock;
@@ -205,6 +220,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
asus->inputdev->phys = asus->driver->input_phys;
asus->inputdev->id.bustype = BUS_HOST;
asus->inputdev->dev.parent = &asus->platform_device->dev;
+ set_bit(EV_REP, asus->inputdev->evbit);
err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL);
if (err)
@@ -359,30 +375,80 @@ static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
return read_tpd_led_state(asus);
}
-static int asus_wmi_led_init(struct asus_wmi *asus)
+static void kbd_led_update(struct work_struct *work)
{
- int rv;
+ int ctrl_param = 0;
+ struct asus_wmi *asus;
- if (read_tpd_led_state(asus) < 0)
- return 0;
+ asus = container_of(work, struct asus_wmi, kbd_led_work);
- asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
- if (!asus->led_workqueue)
- return -ENOMEM;
- INIT_WORK(&asus->tpd_led_work, tpd_led_update);
+ /*
+ * bits 0-2: level
+ * bit 7: light on/off
+ */
+ if (asus->kbd_led_wk > 0)
+ ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
- asus->tpd_led.name = "asus::touchpad";
- asus->tpd_led.brightness_set = tpd_led_set;
- asus->tpd_led.brightness_get = tpd_led_get;
- asus->tpd_led.max_brightness = 1;
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
+}
- rv = led_classdev_register(&asus->platform_device->dev, &asus->tpd_led);
- if (rv) {
- destroy_workqueue(asus->led_workqueue);
- return rv;
+static int kbd_led_read(struct asus_wmi *asus, int *level, int *env)
+{
+ int retval;
+
+ /*
+ * bits 0-2: level
+ * bit 7: light on/off
+ * bit 8-10: environment (0: dark, 1: normal, 2: light)
+ * bit 17: status unknown
+ */
+ retval = asus_wmi_get_devstate_bits(asus, ASUS_WMI_DEVID_KBD_BACKLIGHT,
+ 0xFFFF);
+
+ /* Unknown status is considered as off */
+ if (retval == 0x8000)
+ retval = 0;
+
+ if (retval >= 0) {
+ if (level)
+ *level = retval & 0x80 ? retval & 0x7F : 0;
+ if (env)
+ *env = (retval >> 8) & 0x7F;
+ retval = 0;
}
- return 0;
+ return retval;
+}
+
+static void kbd_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(led_cdev, struct asus_wmi, kbd_led);
+
+ if (value > asus->kbd_led.max_brightness)
+ value = asus->kbd_led.max_brightness;
+ else if (value < 0)
+ value = 0;
+
+ asus->kbd_led_wk = value;
+ queue_work(asus->led_workqueue, &asus->kbd_led_work);
+}
+
+static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+ int retval, value;
+
+ asus = container_of(led_cdev, struct asus_wmi, kbd_led);
+
+ retval = kbd_led_read(asus, &value, NULL);
+
+ if (retval < 0)
+ return retval;
+
+ return value;
}
static void asus_wmi_led_exit(struct asus_wmi *asus)
@@ -393,6 +459,48 @@ static void asus_wmi_led_exit(struct asus_wmi *asus)
destroy_workqueue(asus->led_workqueue);
}
+static int asus_wmi_led_init(struct asus_wmi *asus)
+{
+ int rv = 0;
+
+ asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!asus->led_workqueue)
+ return -ENOMEM;
+
+ if (read_tpd_led_state(asus) >= 0) {
+ INIT_WORK(&asus->tpd_led_work, tpd_led_update);
+
+ asus->tpd_led.name = "asus::touchpad";
+ asus->tpd_led.brightness_set = tpd_led_set;
+ asus->tpd_led.brightness_get = tpd_led_get;
+ asus->tpd_led.max_brightness = 1;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->tpd_led);
+ if (rv)
+ goto error;
+ }
+
+ if (kbd_led_read(asus, NULL, NULL) >= 0) {
+ INIT_WORK(&asus->kbd_led_work, kbd_led_update);
+
+ asus->kbd_led.name = "asus::kbd_backlight";
+ asus->kbd_led.brightness_set = kbd_led_set;
+ asus->kbd_led.brightness_get = kbd_led_get;
+ asus->kbd_led.max_brightness = 3;
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->kbd_led);
+ }
+
+error:
+ if (rv)
+ asus_wmi_led_exit(asus);
+
+ return rv;
+}
+
+
/*
* PCI hotplug (for wlan rfkill)
*/
@@ -729,6 +837,16 @@ static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
rfkill_destroy(asus->wwan3g.rfkill);
asus->wwan3g.rfkill = NULL;
}
+ if (asus->gps.rfkill) {
+ rfkill_unregister(asus->gps.rfkill);
+ rfkill_destroy(asus->gps.rfkill);
+ asus->gps.rfkill = NULL;
+ }
+ if (asus->uwb.rfkill) {
+ rfkill_unregister(asus->uwb.rfkill);
+ rfkill_destroy(asus->uwb.rfkill);
+ asus->uwb.rfkill = NULL;
+ }
}
static int asus_wmi_rfkill_init(struct asus_wmi *asus)
@@ -763,6 +881,18 @@ static int asus_wmi_rfkill_init(struct asus_wmi *asus)
if (result && result != -ENODEV)
goto exit;
+ result = asus_new_rfkill(asus, &asus->gps, "asus-gps",
+ RFKILL_TYPE_GPS, ASUS_WMI_DEVID_GPS);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
+ result = asus_new_rfkill(asus, &asus->uwb, "asus-uwb",
+ RFKILL_TYPE_UWB, ASUS_WMI_DEVID_UWB);
+
+ if (result && result != -ENODEV)
+ goto exit;
+
if (!asus->driver->hotplug_wireless)
goto exit;
@@ -797,8 +927,8 @@ exit:
* Hwmon device
*/
static ssize_t asus_hwmon_pwm1(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct asus_wmi *asus = dev_get_drvdata(dev);
u32 value;
@@ -809,7 +939,7 @@ static ssize_t asus_hwmon_pwm1(struct device *dev,
if (err < 0)
return err;
- value |= 0xFF;
+ value &= 0xFF;
if (value == 1) /* Low Speed */
value = 85;
@@ -825,7 +955,26 @@ static ssize_t asus_hwmon_pwm1(struct device *dev,
return sprintf(buf, "%d\n", value);
}
+static ssize_t asus_hwmon_temp1(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ u32 value;
+ int err;
+
+ err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_THERMAL_CTRL, &value);
+
+ if (err < 0)
+ return err;
+
+ value = KELVIN_TO_CELSIUS((value & 0xFFFF)) * 1000;
+
+ return sprintf(buf, "%d\n", value);
+}
+
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL, 0);
static ssize_t
show_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -836,12 +985,13 @@ static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
NULL
};
static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
- struct attribute *attr, int idx)
+ struct attribute *attr, int idx)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct platform_device *pdev = to_platform_device(dev->parent);
@@ -852,12 +1002,14 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
if (attr == &sensor_dev_attr_pwm1.dev_attr.attr)
dev_id = ASUS_WMI_DEVID_FAN_CTRL;
+ else if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr)
+ dev_id = ASUS_WMI_DEVID_THERMAL_CTRL;
if (dev_id != -1) {
int err = asus_wmi_get_devstate(asus, dev_id, &value);
if (err < 0)
- return err;
+ return 0; /* can't return negative here */
}
if (dev_id == ASUS_WMI_DEVID_FAN_CTRL) {
@@ -869,9 +1021,13 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
* - reverved bits are non-zero
* - sfun and presence bit are not set
*/
- if (value != ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
+ if (value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000
|| (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT)))
ok = false;
+ } else if (dev_id == ASUS_WMI_DEVID_THERMAL_CTRL) {
+ /* If value is zero, something is clearly wrong */
+ if (value == 0)
+ ok = false;
}
return ok ? attr->mode : 0;
@@ -904,6 +1060,7 @@ static int asus_wmi_hwmon_init(struct asus_wmi *asus)
pr_err("Could not register asus hwmon device\n");
return PTR_ERR(hwmon);
}
+ dev_set_drvdata(hwmon, asus);
asus->hwmon_device = hwmon;
result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group);
if (result)
@@ -1025,6 +1182,7 @@ static int asus_wmi_backlight_init(struct asus_wmi *asus)
return power;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max;
bd = backlight_device_register(asus->driver->name,
&asus->platform_device->dev, asus,
@@ -1059,6 +1217,8 @@ static void asus_wmi_notify(u32 value, void *context)
acpi_status status;
int code;
int orig_code;
+ unsigned int key_value = 1;
+ bool autorelease = 1;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
@@ -1074,6 +1234,13 @@ static void asus_wmi_notify(u32 value, void *context)
code = obj->integer.value;
orig_code = code;
+ if (asus->driver->key_filter) {
+ asus->driver->key_filter(asus->driver, &code, &key_value,
+ &autorelease);
+ if (code == ASUS_WMI_KEY_IGNORE)
+ goto exit;
+ }
+
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
code = NOTIFY_BRNUP_MIN;
else if (code >= NOTIFY_BRNDOWN_MIN &&
@@ -1083,7 +1250,8 @@ static void asus_wmi_notify(u32 value, void *context)
if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
if (!acpi_video_backlight_support())
asus_wmi_backlight_notify(asus, orig_code);
- } else if (!sparse_keymap_report_event(asus->inputdev, code, 1, true))
+ } else if (!sparse_keymap_report_event(asus->inputdev, code,
+ key_value, autorelease))
pr_info("Unknown key %x pressed\n", code);
exit:
@@ -1163,14 +1331,18 @@ ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int value;
+ int value, rv;
if (!count || sscanf(buf, "%i", &value) != 1)
return -EINVAL;
if (value < 0 || value > 2)
return -EINVAL;
- return asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL);
+ rv = asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL);
+ if (rv < 0)
+ return rv;
+
+ return count;
}
static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv);
@@ -1233,7 +1405,7 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
/* We don't know yet what to do with this version... */
if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) {
- pr_info("BIOS WMI version: %d.%d", rv >> 8, rv & 0xFF);
+ pr_info("BIOS WMI version: %d.%d", rv >> 16, rv & 0xFF);
asus->spec = rv;
}
@@ -1265,6 +1437,12 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
return -ENODEV;
}
+ /* CWAP allow to define the behavior of the Fn+F2 key,
+ * this method doesn't seems to be present on Eee PCs */
+ if (asus->driver->wapf >= 0)
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_CWAP,
+ asus->driver->wapf, NULL);
+
return asus_wmi_sysfs_init(asus->platform_device);
}
@@ -1567,6 +1745,14 @@ static int asus_hotk_restore(struct device *device)
bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G);
rfkill_set_sw_state(asus->wwan3g.rfkill, bl);
}
+ if (asus->gps.rfkill) {
+ bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPS);
+ rfkill_set_sw_state(asus->gps.rfkill, bl);
+ }
+ if (asus->uwb.rfkill) {
+ bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_UWB);
+ rfkill_set_sw_state(asus->uwb.rfkill, bl);
+ }
return 0;
}
@@ -1603,7 +1789,7 @@ static int asus_wmi_probe(struct platform_device *pdev)
static bool used;
-int asus_wmi_register_driver(struct asus_wmi_driver *driver)
+int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver)
{
struct platform_driver *platform_driver;
struct platform_device *platform_device;
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index c044522c876..8147c10161c 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -29,12 +29,15 @@
#include <linux/platform_device.h>
+#define ASUS_WMI_KEY_IGNORE (-1)
+
struct module;
struct key_entry;
struct asus_wmi;
struct asus_wmi_driver {
bool hotplug_wireless;
+ int wapf;
const char *name;
struct module *owner;
@@ -44,6 +47,10 @@ struct asus_wmi_driver {
const struct key_entry *keymap;
const char *input_name;
const char *input_phys;
+ /* Returns new code, value, and autorelease values in arguments.
+ * Return ASUS_WMI_KEY_IGNORE in code if event should be ignored. */
+ void (*key_filter) (struct asus_wmi_driver *driver, int *code,
+ unsigned int *value, bool *autorelease);
int (*probe) (struct platform_device *device);
void (*quirks) (struct asus_wmi_driver *driver);
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 3f204fde1b0..8877b836d27 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -1030,8 +1030,10 @@ static int __devinit compal_probe(struct platform_device *pdev)
initialize_fan_control_data(data);
err = sysfs_create_group(&pdev->dev.kobj, &compal_attribute_group);
- if (err)
+ if (err) {
+ kfree(data);
return err;
+ }
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index d3841de6a8c..f31fa4efa72 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -292,12 +292,9 @@ static int dell_rfkill_set(void *data, bool blocked)
dell_send_request(buffer, 17, 11);
/* If the hardware switch controls this radio, and the hardware
- switch is disabled, don't allow changing the software state.
- If the hardware switch is reported as not supported, always
- fire the SMI to toggle the killswitch. */
+ switch is disabled, don't allow changing the software state */
if ((hwswitch_state & BIT(hwswitch_bit)) &&
- !(buffer->output[1] & BIT(16)) &&
- (buffer->output[1] & BIT(0))) {
+ !(buffer->output[1] & BIT(16))) {
ret = -EINVAL;
goto out;
}
@@ -403,23 +400,6 @@ static const struct file_operations dell_debugfs_fops = {
static void dell_update_rfkill(struct work_struct *ignored)
{
- int status;
-
- get_buffer();
- dell_send_request(buffer, 17, 11);
- status = buffer->output[1];
- release_buffer();
-
- /* if hardware rfkill is not supported, set it explicitly */
- if (!(status & BIT(0))) {
- if (wifi_rfkill)
- dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
- if (bluetooth_rfkill)
- dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
- if (wwan_rfkill)
- dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
- }
-
if (wifi_rfkill)
dell_rfkill_query(wifi_rfkill, (void *)1);
if (bluetooth_rfkill)
@@ -560,11 +540,11 @@ static int dell_get_intensity(struct backlight_device *bd)
else
dell_send_request(buffer, 0, 1);
+ ret = buffer->output[1];
+
out:
release_buffer();
- if (ret)
- return ret;
- return buffer->output[1];
+ return ret;
}
static const struct backlight_ops dell_ops = {
@@ -632,7 +612,6 @@ static int __init dell_init(void)
if (!bufferpage)
goto fail_buffer;
buffer = page_address(bufferpage);
- mutex_init(&buffer_mutex);
ret = dell_setup_rfkill();
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index ce790827e19..fa9a2171cc1 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -54,6 +54,8 @@ MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
*/
static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
+ { KE_IGNORE, 0x003a, { KEY_CAPSLOCK } },
+
{ KE_KEY, 0xe045, { KEY_PROG1 } },
{ KE_KEY, 0xe009, { KEY_EJECTCD } },
@@ -85,6 +87,11 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
{ KE_IGNORE, 0xe013, { KEY_RESERVED } },
{ KE_IGNORE, 0xe020, { KEY_MUTE } },
+
+ /* Shortcut and audio panel keys */
+ { KE_IGNORE, 0xe025, { KEY_RESERVED } },
+ { KE_IGNORE, 0xe026, { KEY_RESERVED } },
+
{ KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } },
{ KE_IGNORE, 0xe030, { KEY_VOLUMEUP } },
{ KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } },
@@ -92,6 +99,9 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
{ KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } },
{ KE_IGNORE, 0xe045, { KEY_NUMLOCK } },
{ KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } },
+ { KE_IGNORE, 0xe0f7, { KEY_MUTE } },
+ { KE_IGNORE, 0xe0f8, { KEY_VOLUMEDOWN } },
+ { KE_IGNORE, 0xe0f9, { KEY_VOLUMEUP } },
{ KE_END, 0 }
};
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 4aa867a9b88..9f6e64302b4 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -56,6 +56,11 @@ MODULE_PARM_DESC(hotplug_wireless,
"If your laptop needs that, please report to "
"acpi4asus-user@lists.sourceforge.net.");
+/* Values for T101MT "Home" key */
+#define HOME_PRESS 0xe4
+#define HOME_HOLD 0xea
+#define HOME_RELEASE 0xe5
+
static const struct key_entry eeepc_wmi_keymap[] = {
/* Sleep already handled via generic ACPI code */
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
@@ -71,6 +76,7 @@ static const struct key_entry eeepc_wmi_keymap[] = {
{ KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */
{ KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
+ { KE_KEY, HOME_PRESS, { KEY_CONFIG } }, /* Home/Express gate key */
{ KE_KEY, 0xe8, { KEY_SCREENLOCK } },
{ KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } },
{ KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
@@ -81,6 +87,25 @@ static const struct key_entry eeepc_wmi_keymap[] = {
{ KE_END, 0},
};
+static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
+ unsigned int *value, bool *autorelease)
+{
+ switch (*code) {
+ case HOME_PRESS:
+ *value = 1;
+ *autorelease = 0;
+ break;
+ case HOME_HOLD:
+ *code = ASUS_WMI_KEY_IGNORE;
+ break;
+ case HOME_RELEASE:
+ *code = HOME_PRESS;
+ *value = 0;
+ *autorelease = 0;
+ break;
+ }
+}
+
static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
void *context, void **retval)
{
@@ -141,6 +166,7 @@ static void eeepc_dmi_check(struct asus_wmi_driver *driver)
static void eeepc_wmi_quirks(struct asus_wmi_driver *driver)
{
driver->hotplug_wireless = hotplug_wireless;
+ driver->wapf = -1;
eeepc_dmi_check(driver);
}
@@ -151,6 +177,7 @@ static struct asus_wmi_driver asus_wmi_driver = {
.keymap = eeepc_wmi_keymap,
.input_name = "Eee PC WMI hotkeys",
.input_phys = EEEPC_WMI_FILE "/input0",
+ .key_filter = eeepc_wmi_key_filter,
.probe = eeepc_wmi_probe,
.quirks = eeepc_wmi_quirks,
};
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index f94017bcdd6..e2faa3cbb79 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -207,6 +207,7 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ u32 rc;
if (WARN_ON(insize > sizeof(args.data)))
return -EINVAL;
@@ -224,13 +225,13 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
}
bios_return = (struct bios_return *)obj->buffer.pointer;
+ rc = bios_return->return_code;
- if (bios_return->return_code) {
- if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE)
- pr_warn("query 0x%x returned error 0x%x\n",
- query, bios_return->return_code);
+ if (rc) {
+ if (rc != HPWMI_RET_UNKNOWN_CMDTYPE)
+ pr_warn("query 0x%x returned error 0x%x\n", query, rc);
kfree(obj);
- return bios_return->return_code;
+ return rc;
}
if (!outsize) {
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index bfdda33feb2..0c595410e78 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -32,13 +32,22 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
#define IDEAPAD_RFKILL_DEV_NUM (3)
+#define CFG_BT_BIT (16)
+#define CFG_3G_BIT (17)
+#define CFG_WIFI_BIT (18)
+#define CFG_CAMERA_BIT (19)
+
struct ideapad_private {
struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
struct platform_device *platform_device;
struct input_dev *inputdev;
+ struct backlight_device *blightdev;
+ unsigned long cfg;
};
static acpi_handle ideapad_handle;
@@ -155,7 +164,7 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
}
/*
- * camera power
+ * sysfs
*/
static ssize_t show_ideapad_cam(struct device *dev,
struct device_attribute *attr,
@@ -186,6 +195,44 @@ static ssize_t store_ideapad_cam(struct device *dev,
static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
+static ssize_t show_ideapad_cfg(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%.8lX\n", priv->cfg);
+}
+
+static DEVICE_ATTR(cfg, 0444, show_ideapad_cfg, NULL);
+
+static struct attribute *ideapad_attributes[] = {
+ &dev_attr_camera_power.attr,
+ &dev_attr_cfg.attr,
+ NULL
+};
+
+static mode_t ideapad_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int idx)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool supported;
+
+ if (attr == &dev_attr_camera_power.attr)
+ supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
+ else
+ supported = true;
+
+ return supported ? attr->mode : 0;
+}
+
+static struct attribute_group ideapad_attribute_group = {
+ .is_visible = ideapad_is_visible,
+ .attrs = ideapad_attributes
+};
+
/*
* Rfkill
*/
@@ -197,9 +244,9 @@ struct ideapad_rfk_data {
};
const struct ideapad_rfk_data ideapad_rfk_data[] = {
- { "ideapad_wlan", 18, 0x15, RFKILL_TYPE_WLAN },
- { "ideapad_bluetooth", 16, 0x17, RFKILL_TYPE_BLUETOOTH },
- { "ideapad_3g", 17, 0x20, RFKILL_TYPE_WWAN },
+ { "ideapad_wlan", CFG_WIFI_BIT, 0x15, RFKILL_TYPE_WLAN },
+ { "ideapad_bluetooth", CFG_BT_BIT, 0x17, RFKILL_TYPE_BLUETOOTH },
+ { "ideapad_3g", CFG_3G_BIT, 0x20, RFKILL_TYPE_WWAN },
};
static int ideapad_rfk_set(void *data, bool blocked)
@@ -265,8 +312,7 @@ static int __devinit ideapad_register_rfkill(struct acpi_device *adevice,
return 0;
}
-static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice,
- int dev)
+static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
{
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
@@ -280,15 +326,6 @@ static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice,
/*
* Platform device
*/
-static struct attribute *ideapad_attributes[] = {
- &dev_attr_camera_power.attr,
- NULL
-};
-
-static struct attribute_group ideapad_attribute_group = {
- .attrs = ideapad_attributes
-};
-
static int __devinit ideapad_platform_init(struct ideapad_private *priv)
{
int result;
@@ -369,7 +406,7 @@ err_free_dev:
return error;
}
-static void __devexit ideapad_input_exit(struct ideapad_private *priv)
+static void ideapad_input_exit(struct ideapad_private *priv)
{
sparse_keymap_free(priv->inputdev);
input_unregister_device(priv->inputdev);
@@ -383,6 +420,98 @@ static void ideapad_input_report(struct ideapad_private *priv,
}
/*
+ * backlight
+ */
+static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
+{
+ unsigned long now;
+
+ if (read_ec_data(ideapad_handle, 0x12, &now))
+ return -EIO;
+ return now;
+}
+
+static int ideapad_backlight_update_status(struct backlight_device *blightdev)
+{
+ if (write_ec_cmd(ideapad_handle, 0x13, blightdev->props.brightness))
+ return -EIO;
+ if (write_ec_cmd(ideapad_handle, 0x33,
+ blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
+ return -EIO;
+
+ return 0;
+}
+
+static const struct backlight_ops ideapad_backlight_ops = {
+ .get_brightness = ideapad_backlight_get_brightness,
+ .update_status = ideapad_backlight_update_status,
+};
+
+static int ideapad_backlight_init(struct ideapad_private *priv)
+{
+ struct backlight_device *blightdev;
+ struct backlight_properties props;
+ unsigned long max, now, power;
+
+ if (read_ec_data(ideapad_handle, 0x11, &max))
+ return -EIO;
+ if (read_ec_data(ideapad_handle, 0x12, &now))
+ return -EIO;
+ if (read_ec_data(ideapad_handle, 0x18, &power))
+ return -EIO;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = max;
+ props.type = BACKLIGHT_PLATFORM;
+ blightdev = backlight_device_register("ideapad",
+ &priv->platform_device->dev,
+ priv,
+ &ideapad_backlight_ops,
+ &props);
+ if (IS_ERR(blightdev)) {
+ pr_err("Could not register backlight device\n");
+ return PTR_ERR(blightdev);
+ }
+
+ priv->blightdev = blightdev;
+ blightdev->props.brightness = now;
+ blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+ backlight_update_status(blightdev);
+
+ return 0;
+}
+
+static void ideapad_backlight_exit(struct ideapad_private *priv)
+{
+ if (priv->blightdev)
+ backlight_device_unregister(priv->blightdev);
+ priv->blightdev = NULL;
+}
+
+static void ideapad_backlight_notify_power(struct ideapad_private *priv)
+{
+ unsigned long power;
+ struct backlight_device *blightdev = priv->blightdev;
+
+ if (read_ec_data(ideapad_handle, 0x18, &power))
+ return;
+ blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
+}
+
+static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
+{
+ unsigned long now;
+
+ /* if we control brightness via acpi video driver */
+ if (priv->blightdev == NULL) {
+ read_ec_data(ideapad_handle, 0x12, &now);
+ return;
+ }
+
+ backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY);
+}
+
+/*
* module init/exit
*/
static const struct acpi_device_id ideapad_device_ids[] = {
@@ -393,10 +522,11 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
{
- int ret, i, cfg;
+ int ret, i;
+ unsigned long cfg;
struct ideapad_private *priv;
- if (read_method_int(adevice->handle, "_CFG", &cfg))
+ if (read_method_int(adevice->handle, "_CFG", (int *)&cfg))
return -ENODEV;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -404,6 +534,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
return -ENOMEM;
dev_set_drvdata(&adevice->dev, priv);
ideapad_handle = adevice->handle;
+ priv->cfg = cfg;
ret = ideapad_platform_init(priv);
if (ret)
@@ -414,15 +545,25 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
goto input_failed;
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
- if (test_bit(ideapad_rfk_data[i].cfgbit, (unsigned long *)&cfg))
+ if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg))
ideapad_register_rfkill(adevice, i);
else
priv->rfk[i] = NULL;
}
ideapad_sync_rfk_state(adevice);
+ if (!acpi_video_backlight_support()) {
+ ret = ideapad_backlight_init(priv);
+ if (ret && ret != -ENODEV)
+ goto backlight_failed;
+ }
+
return 0;
+backlight_failed:
+ for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
+ ideapad_unregister_rfkill(adevice, i);
+ ideapad_input_exit(priv);
input_failed:
ideapad_platform_exit(priv);
platform_failed:
@@ -435,6 +576,7 @@ static int __devexit ideapad_acpi_remove(struct acpi_device *adevice, int type)
struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
int i;
+ ideapad_backlight_exit(priv);
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
ideapad_unregister_rfkill(adevice, i);
ideapad_input_exit(priv);
@@ -459,12 +601,19 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
vpc1 = (vpc2 << 8) | vpc1;
for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
if (test_bit(vpc_bit, &vpc1)) {
- if (vpc_bit == 9)
+ switch (vpc_bit) {
+ case 9:
ideapad_sync_rfk_state(adevice);
- else if (vpc_bit == 4)
- read_ec_data(handle, 0x12, &vpc2);
- else
+ break;
+ case 4:
+ ideapad_backlight_notify_brightness(priv);
+ break;
+ case 2:
+ ideapad_backlight_notify_power(priv);
+ break;
+ default:
ideapad_input_report(priv, vpc_bit);
+ }
}
}
}
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 5ffe7c39814..809a3ae943c 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -403,7 +403,7 @@ static void ips_cpu_raise(struct ips_driver *ips)
thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8);
- turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN;
+ turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~TURBO_TDP_MASK;
@@ -438,7 +438,7 @@ static void ips_cpu_lower(struct ips_driver *ips)
thm_writew(THM_MPCPC, (new_limit * 10) / 8);
- turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN;
+ turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN;
wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override);
turbo_override &= ~TURBO_TDP_MASK;
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 809adea4965..abddc83e9fd 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -477,6 +477,8 @@ static acpi_status intel_menlow_register_sensor(acpi_handle handle, u32 lvl,
return AE_ERROR;
}
+ return AE_OK;
+
aux1_not_found:
if (status == AE_NOT_FOUND)
return AE_OK;
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 3a578323122..ccd7b1f8351 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -493,20 +493,30 @@ static int mid_thermal_probe(struct platform_device *pdev)
/* Register each sensor with the generic thermal framework*/
for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
+ struct thermal_device_info *td_info = initialize_sensor(i);
+
+ if (!td_info) {
+ ret = -ENOMEM;
+ goto err;
+ }
pinfo->tzd[i] = thermal_zone_device_register(name[i],
- 0, initialize_sensor(i), &tzd_ops, 0, 0, 0, 0);
- if (IS_ERR(pinfo->tzd[i]))
- goto reg_fail;
+ 0, td_info, &tzd_ops, 0, 0, 0, 0);
+ if (IS_ERR(pinfo->tzd[i])) {
+ kfree(td_info);
+ ret = PTR_ERR(pinfo->tzd[i]);
+ goto err;
+ }
}
pinfo->pdev = pdev;
platform_set_drvdata(pdev, pinfo);
return 0;
-reg_fail:
- ret = PTR_ERR(pinfo->tzd[i]);
- while (--i >= 0)
+err:
+ while (--i >= 0) {
+ kfree(pinfo->tzd[i]->devdata);
thermal_zone_device_unregister(pinfo->tzd[i]);
+ }
configure_adc(0);
kfree(pinfo);
return ret;
@@ -524,8 +534,10 @@ static int mid_thermal_remove(struct platform_device *pdev)
int i;
struct platform_info *pinfo = platform_get_drvdata(pdev);
- for (i = 0; i < MSIC_THERMAL_SENSORS; i++)
+ for (i = 0; i < MSIC_THERMAL_SENSORS; i++) {
+ kfree(pinfo->tzd[i]->devdata);
thermal_zone_device_unregister(pinfo->tzd[i]);
+ }
kfree(pinfo);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index e936364a609..7f88c7923fc 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -250,6 +250,7 @@ static int oaktrail_backlight_init(void)
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = OT_EC_BL_BRIGHTNESS_MAX;
bd = backlight_device_register(DRIVER_NAME,
&oaktrail_device->dev, NULL,
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c
index bde47e9080c..c8a6aed4527 100644
--- a/drivers/platform/x86/intel_rar_register.c
+++ b/drivers/platform/x86/intel_rar_register.c
@@ -637,15 +637,13 @@ end_function:
return error;
}
-const struct pci_device_id rar_pci_id_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rar_pci_id_tbl) = {
{ PCI_VDEVICE(INTEL, 0x4110) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl);
-const struct pci_device_id *my_id_table = rar_pci_id_tbl;
-
/* field for registering driver to PCI device */
static struct pci_driver rar_pci_driver = {
.name = "rar_register_driver",
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 940accbe28d..c86665369a2 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -725,7 +725,7 @@ static void ipc_remove(struct pci_dev *pdev)
intel_scu_devices_destroy();
}
-static const struct pci_device_id pci_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)},
{ 0,}
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 3ff629df9f0..f204643c505 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -538,6 +538,15 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
},
.callback = dmi_check_cb
},
+ {
+ .ident = "MSI U270",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Micro-Star International Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U270 series"),
+ },
+ .callback = dmi_check_cb
+ },
{ }
};
@@ -996,3 +1005,4 @@ MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N034:*");
MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*");
MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*");
MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*");
+MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnU270series:*");
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index c832e3356cd..6f40bf202dc 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -272,6 +272,7 @@ static int __init msi_wmi_init(void)
err_free_backlight:
backlight_device_unregister(backlight);
err_free_input:
+ sparse_keymap_free(msi_wmi_input_dev);
input_unregister_device(msi_wmi_input_dev);
err_uninstall_notifier:
wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index d347116d150..35916301104 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -521,6 +521,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
+ .ident = "N510",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N510"),
+ DMI_MATCH(DMI_BOARD_NAME, "N510"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
.ident = "X125",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
@@ -601,6 +611,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
+ .ident = "N150/N210/N220",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
+ DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
.ident = "N150/N210/N220/N230",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
new file mode 100644
index 00000000000..1e54ae74274
--- /dev/null
+++ b/drivers/platform/x86/samsung-q10.c
@@ -0,0 +1,196 @@
+/*
+ * Driver for Samsung Q10 and related laptops: controls the backlight
+ *
+ * Copyright (c) 2011 Frederick van der Wyck <fvanderwyck@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/i8042.h>
+#include <linux/dmi.h>
+
+#define SAMSUNGQ10_BL_MAX_INTENSITY 255
+#define SAMSUNGQ10_BL_DEFAULT_INTENSITY 185
+
+#define SAMSUNGQ10_BL_8042_CMD 0xbe
+#define SAMSUNGQ10_BL_8042_DATA { 0x89, 0x91 }
+
+static int samsungq10_bl_brightness;
+
+static bool force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force,
+ "Disable the DMI check and force the driver to be loaded");
+
+static int samsungq10_bl_set_intensity(struct backlight_device *bd)
+{
+
+ int brightness = bd->props.brightness;
+ unsigned char c[3] = SAMSUNGQ10_BL_8042_DATA;
+
+ c[2] = (unsigned char)brightness;
+ i8042_lock_chip();
+ i8042_command(c, (0x30 << 8) | SAMSUNGQ10_BL_8042_CMD);
+ i8042_unlock_chip();
+ samsungq10_bl_brightness = brightness;
+
+ return 0;
+}
+
+static int samsungq10_bl_get_intensity(struct backlight_device *bd)
+{
+ return samsungq10_bl_brightness;
+}
+
+static const struct backlight_ops samsungq10_bl_ops = {
+ .get_brightness = samsungq10_bl_get_intensity,
+ .update_status = samsungq10_bl_set_intensity,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int samsungq10_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int samsungq10_resume(struct device *dev)
+{
+
+ struct backlight_device *bd = dev_get_drvdata(dev);
+
+ samsungq10_bl_set_intensity(bd);
+ return 0;
+}
+#else
+#define samsungq10_suspend NULL
+#define samsungq10_resume NULL
+#endif
+
+static SIMPLE_DEV_PM_OPS(samsungq10_pm_ops,
+ samsungq10_suspend, samsungq10_resume);
+
+static int __devinit samsungq10_probe(struct platform_device *pdev)
+{
+
+ struct backlight_properties props;
+ struct backlight_device *bd;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = SAMSUNGQ10_BL_MAX_INTENSITY;
+ bd = backlight_device_register("samsung", &pdev->dev, NULL,
+ &samsungq10_bl_ops, &props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ platform_set_drvdata(pdev, bd);
+
+ bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
+ samsungq10_bl_set_intensity(bd);
+
+ return 0;
+}
+
+static int __devexit samsungq10_remove(struct platform_device *pdev)
+{
+
+ struct backlight_device *bd = platform_get_drvdata(pdev);
+
+ bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY;
+ samsungq10_bl_set_intensity(bd);
+
+ backlight_device_unregister(bd);
+
+ return 0;
+}
+
+static struct platform_driver samsungq10_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .pm = &samsungq10_pm_ops,
+ },
+ .probe = samsungq10_probe,
+ .remove = __devexit_p(samsungq10_remove),
+};
+
+static struct platform_device *samsungq10_device;
+
+static int __init dmi_check_callback(const struct dmi_system_id *id)
+{
+ printk(KERN_INFO KBUILD_MODNAME ": found model '%s'\n", id->ident);
+ return 1;
+}
+
+static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
+ {
+ .ident = "Samsung Q10",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Samsung"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SQ10"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Samsung Q20",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SENS Q20"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Samsung Q25",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NQ25"),
+ },
+ .callback = dmi_check_callback,
+ },
+ {
+ .ident = "Dell Latitude X200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X200"),
+ },
+ .callback = dmi_check_callback,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(dmi, samsungq10_dmi_table);
+
+static int __init samsungq10_init(void)
+{
+ if (!force && !dmi_check_system(samsungq10_dmi_table))
+ return -ENODEV;
+
+ samsungq10_device = platform_create_bundle(&samsungq10_driver,
+ samsungq10_probe,
+ NULL, 0, NULL, 0);
+
+ if (IS_ERR(samsungq10_device))
+ return PTR_ERR(samsungq10_device);
+
+ return 0;
+}
+
+static void __exit samsungq10_exit(void)
+{
+ platform_device_unregister(samsungq10_device);
+ platform_driver_unregister(&samsungq10_driver);
+}
+
+module_init(samsungq10_init);
+module_exit(samsungq10_exit);
+
+MODULE_AUTHOR("Frederick van der Wyck <fvanderwyck@gmail.com>");
+MODULE_DESCRIPTION("Samsung Q10 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 77f6e707a2a..7bd829f247e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -184,6 +184,10 @@ enum tpacpi_hkey_event_t {
/* Misc bay events */
TP_HKEY_EV_OPTDRV_EJ = 0x3006, /* opt. drive tray ejected */
+ TP_HKEY_EV_HOTPLUG_DOCK = 0x4010, /* docked into hotplug dock
+ or port replicator */
+ TP_HKEY_EV_HOTPLUG_UNDOCK = 0x4011, /* undocked from hotplug
+ dock or port replicator */
/* User-interface events */
TP_HKEY_EV_LID_CLOSE = 0x5001, /* laptop lid closed */
@@ -194,6 +198,10 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_PEN_REMOVED = 0x500c, /* tablet pen removed */
TP_HKEY_EV_BRGHT_CHANGED = 0x5010, /* backlight control event */
+ /* Key-related user-interface events */
+ TP_HKEY_EV_KEY_NUMLOCK = 0x6000, /* NumLock key pressed */
+ TP_HKEY_EV_KEY_FN = 0x6005, /* Fn key pressed? E420 */
+
/* Thermal events */
TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */
@@ -201,6 +209,10 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* thermal table changed */
+ TP_HKEY_EV_UNK_6040 = 0x6040, /* Related to AC change?
+ some sort of APM hint,
+ W520 */
+
/* Misc */
TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
};
@@ -3174,8 +3186,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
/* (assignments unknown, please report if found) */
+ KEY_UNKNOWN, KEY_UNKNOWN,
+
+ /*
+ * The mic mute button only sends 0x1a. It does not
+ * automatically mute the mic or change the mute light.
+ */
+ KEY_MICMUTE, /* 0x1a: Mic mute (since ?400 or so) */
+
+ /* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
- KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN,
},
};
@@ -3513,6 +3534,34 @@ static bool hotkey_notify_wakeup(const u32 hkey,
return true;
}
+static bool hotkey_notify_dockevent(const u32 hkey,
+ bool *send_acpi_ev,
+ bool *ignore_acpi_ev)
+{
+ /* 0x4000-0x4FFF: dock-related events */
+ *send_acpi_ev = true;
+ *ignore_acpi_ev = false;
+
+ switch (hkey) {
+ case TP_HKEY_EV_UNDOCK_ACK:
+ /* ACPI undock operation completed after wakeup */
+ hotkey_autosleep_ack = 1;
+ pr_info("undocked\n");
+ hotkey_wakeup_hotunplug_complete_notify_change();
+ return true;
+
+ case TP_HKEY_EV_HOTPLUG_DOCK: /* docked to port replicator */
+ pr_info("docked into hotplug port replicator\n");
+ return true;
+ case TP_HKEY_EV_HOTPLUG_UNDOCK: /* undocked from port replicator */
+ pr_info("undocked from hotplug port replicator\n");
+ return true;
+
+ default:
+ return false;
+ }
+}
+
static bool hotkey_notify_usrevent(const u32 hkey,
bool *send_acpi_ev,
bool *ignore_acpi_ev)
@@ -3547,13 +3596,13 @@ static bool hotkey_notify_usrevent(const u32 hkey,
static void thermal_dump_all_sensors(void);
-static bool hotkey_notify_thermal(const u32 hkey,
+static bool hotkey_notify_6xxx(const u32 hkey,
bool *send_acpi_ev,
bool *ignore_acpi_ev)
{
bool known = true;
- /* 0x6000-0x6FFF: thermal alarms */
+ /* 0x6000-0x6FFF: thermal alarms/notices and keyboard events */
*send_acpi_ev = true;
*ignore_acpi_ev = false;
@@ -3582,8 +3631,17 @@ static bool hotkey_notify_thermal(const u32 hkey,
"a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
+
+ case TP_HKEY_EV_KEY_NUMLOCK:
+ case TP_HKEY_EV_KEY_FN:
+ /* key press events, we just ignore them as long as the EC
+ * is still reporting them in the normal keyboard stream */
+ *send_acpi_ev = false;
+ *ignore_acpi_ev = true;
+ return true;
+
default:
- pr_alert("THERMAL ALERT: unknown thermal alarm received\n");
+ pr_warn("unknown possible thermal alarm or keyboard event received\n");
known = false;
}
@@ -3652,15 +3710,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
}
break;
case 4:
- /* 0x4000-0x4FFF: dock-related wakeups */
- if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
- hotkey_autosleep_ack = 1;
- pr_info("undocked\n");
- hotkey_wakeup_hotunplug_complete_notify_change();
- known_ev = true;
- } else {
- known_ev = false;
- }
+ /* 0x4000-0x4FFF: dock-related events */
+ known_ev = hotkey_notify_dockevent(hkey, &send_acpi_ev,
+ &ignore_acpi_ev);
break;
case 5:
/* 0x5000-0x5FFF: human interface helpers */
@@ -3668,8 +3720,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
&ignore_acpi_ev);
break;
case 6:
- /* 0x6000-0x6FFF: thermal alarms */
- known_ev = hotkey_notify_thermal(hkey, &send_acpi_ev,
+ /* 0x6000-0x6FFF: thermal alarms/notices and
+ * keyboard events */
+ known_ev = hotkey_notify_6xxx(hkey, &send_acpi_ev,
&ignore_acpi_ev);
break;
case 7:
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 100e4d9372f..bbf3edd85be 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -509,15 +509,15 @@ static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev,
struct acpi_resource_dma *p)
{
int i;
- unsigned char map = 0, flags;
+ unsigned char map = 0, flags = 0;
if (p->channel_count == 0)
- return;
+ flags |= IORESOURCE_DISABLED;
for (i = 0; i < p->channel_count; i++)
map |= 1 << p->channels[i];
- flags = dma_flags(dev, p->type, p->bus_master, p->transfer);
+ flags |= dma_flags(dev, p->type, p->bus_master, p->transfer);
pnp_register_dma_resource(dev, option_flags, map, flags);
}
@@ -527,17 +527,17 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
{
int i;
pnp_irq_mask_t map;
- unsigned char flags;
+ unsigned char flags = 0;
if (p->interrupt_count == 0)
- return;
+ flags |= IORESOURCE_DISABLED;
bitmap_zero(map.bits, PNP_IRQ_NR);
for (i = 0; i < p->interrupt_count; i++)
if (p->interrupts[i])
__set_bit(p->interrupts[i], map.bits);
- flags = irq_flags(p->triggering, p->polarity, p->sharable);
+ flags |= irq_flags(p->triggering, p->polarity, p->sharable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
@@ -547,10 +547,10 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
{
int i;
pnp_irq_mask_t map;
- unsigned char flags;
+ unsigned char flags = 0;
if (p->interrupt_count == 0)
- return;
+ flags |= IORESOURCE_DISABLED;
bitmap_zero(map.bits, PNP_IRQ_NR);
for (i = 0; i < p->interrupt_count; i++) {
@@ -564,7 +564,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
}
}
- flags = irq_flags(p->triggering, p->polarity, p->sharable);
+ flags |= irq_flags(p->triggering, p->polarity, p->sharable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
@@ -575,10 +575,10 @@ static __init void pnpacpi_parse_port_option(struct pnp_dev *dev,
unsigned char flags = 0;
if (io->address_length == 0)
- return;
+ flags |= IORESOURCE_DISABLED;
if (io->io_decode == ACPI_DECODE_16)
- flags = IORESOURCE_IO_16BIT_ADDR;
+ flags |= IORESOURCE_IO_16BIT_ADDR;
pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum,
io->alignment, io->address_length, flags);
}
@@ -587,11 +587,13 @@ static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_fixed_io *io)
{
+ unsigned char flags = 0;
+
if (io->address_length == 0)
- return;
+ flags |= IORESOURCE_DISABLED;
pnp_register_port_resource(dev, option_flags, io->address, io->address,
- 0, io->address_length, IORESOURCE_IO_FIXED);
+ 0, io->address_length, flags | IORESOURCE_IO_FIXED);
}
static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
@@ -601,10 +603,10 @@ static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
unsigned char flags = 0;
if (p->address_length == 0)
- return;
+ flags |= IORESOURCE_DISABLED;
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
- flags = IORESOURCE_MEM_WRITEABLE;
+ flags |= IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
p->alignment, p->address_length, flags);
}
@@ -616,10 +618,10 @@ static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev,
unsigned char flags = 0;
if (p->address_length == 0)
- return;
+ flags |= IORESOURCE_DISABLED;
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
- flags = IORESOURCE_MEM_WRITEABLE;
+ flags |= IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
p->alignment, p->address_length, flags);
}
@@ -631,10 +633,10 @@ static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev,
unsigned char flags = 0;
if (p->address_length == 0)
- return;
+ flags |= IORESOURCE_DISABLED;
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
- flags = IORESOURCE_MEM_WRITEABLE;
+ flags |= IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->address, p->address,
0, p->address_length, flags);
}
@@ -655,18 +657,18 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
}
if (p->address_length == 0)
- return;
+ flags |= IORESOURCE_DISABLED;
if (p->resource_type == ACPI_MEMORY_RANGE) {
if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
- flags = IORESOURCE_MEM_WRITEABLE;
+ flags |= IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
flags);
} else if (p->resource_type == ACPI_IO_RANGE)
pnp_register_port_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
- IORESOURCE_IO_FIXED);
+ flags | IORESOURCE_IO_FIXED);
}
static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
@@ -677,18 +679,18 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
unsigned char flags = 0;
if (p->address_length == 0)
- return;
+ flags |= IORESOURCE_DISABLED;
if (p->resource_type == ACPI_MEMORY_RANGE) {
if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
- flags = IORESOURCE_MEM_WRITEABLE;
+ flags |= IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
flags);
} else if (p->resource_type == ACPI_IO_RANGE)
pnp_register_port_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
- IORESOURCE_IO_FIXED);
+ flags | IORESOURCE_IO_FIXED);
}
struct acpipnp_parse_option_s {
@@ -1018,7 +1020,7 @@ static void pnpacpi_encode_io(struct pnp_dev *dev,
io->minimum = p->start;
io->maximum = p->end;
io->alignment = 0; /* Correct? */
- io->address_length = p->end - p->start + 1;
+ io->address_length = resource_size(p);
} else {
io->minimum = 0;
io->address_length = 0;
@@ -1036,7 +1038,7 @@ static void pnpacpi_encode_fixed_io(struct pnp_dev *dev,
if (pnp_resource_enabled(p)) {
fixed_io->address = p->start;
- fixed_io->address_length = p->end - p->start + 1;
+ fixed_io->address_length = resource_size(p);
} else {
fixed_io->address = 0;
fixed_io->address_length = 0;
@@ -1059,7 +1061,7 @@ static void pnpacpi_encode_mem24(struct pnp_dev *dev,
memory24->minimum = p->start;
memory24->maximum = p->end;
memory24->alignment = 0;
- memory24->address_length = p->end - p->start + 1;
+ memory24->address_length = resource_size(p);
} else {
memory24->minimum = 0;
memory24->address_length = 0;
@@ -1083,7 +1085,7 @@ static void pnpacpi_encode_mem32(struct pnp_dev *dev,
memory32->minimum = p->start;
memory32->maximum = p->end;
memory32->alignment = 0;
- memory32->address_length = p->end - p->start + 1;
+ memory32->address_length = resource_size(p);
} else {
memory32->minimum = 0;
memory32->alignment = 0;
@@ -1106,7 +1108,7 @@ static void pnpacpi_encode_fixed_mem32(struct pnp_dev *dev,
p->flags & IORESOURCE_MEM_WRITEABLE ?
ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
fixed_memory32->address = p->start;
- fixed_memory32->address_length = p->end - p->start + 1;
+ fixed_memory32->address_length = resource_size(p);
} else {
fixed_memory32->address = 0;
fixed_memory32->address_length = 0;
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index cb1f47bfee9..cca2f9f9f3e 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -505,7 +505,7 @@ static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p,
if (pnp_resource_enabled(res)) {
base = res->start;
- len = res->end - res->start + 1;
+ len = resource_size(res);
} else {
base = 0;
len = 0;
@@ -529,7 +529,7 @@ static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p,
if (pnp_resource_enabled(res)) {
base = res->start;
- len = res->end - res->start + 1;
+ len = resource_size(res);
} else {
base = 0;
len = 0;
@@ -559,7 +559,7 @@ static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p,
if (pnp_resource_enabled(res)) {
base = res->start;
- len = res->end - res->start + 1;
+ len = resource_size(res);
} else {
base = 0;
len = 0;
@@ -617,7 +617,7 @@ static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
if (pnp_resource_enabled(res)) {
base = res->start;
- len = res->end - res->start + 1;
+ len = resource_size(res);
} else {
base = 0;
len = 0;
@@ -636,11 +636,11 @@ static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p,
struct resource *res)
{
unsigned long base = res->start;
- unsigned long len = res->end - res->start + 1;
+ unsigned long len = resource_size(res);
if (pnp_resource_enabled(res)) {
base = res->start;
- len = res->end - res->start + 1;
+ len = resource_size(res);
} else {
base = 0;
len = 0;
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index e57b50b3856..57de051a74b 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -235,4 +235,18 @@ config CHARGER_GPIO
This driver can be build as a module. If so, the module will be
called gpio-charger.
+config CHARGER_MAX8997
+ tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
+ depends on MFD_MAX8997 && REGULATOR_MAX8997
+ help
+ Say Y to enable support for the battery charger control sysfs and
+ platform data of MAX8997/LP3974 PMICs.
+
+config CHARGER_MAX8998
+ tristate "Maxim MAX8998/LP3974 PMIC battery charger driver"
+ depends on MFD_MAX8998 && REGULATOR_MAX8998
+ help
+ Say Y to enable support for the battery charger control sysfs and
+ platform data of MAX8998/LP3974 PMICs.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 009a90fa8ac..b4af13dd8b6 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -36,3 +36,5 @@ obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
+obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
+obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index dc628cb2e76..8a612dec913 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -14,11 +14,11 @@
#include <linux/apm-emulation.h>
-#define PSY_PROP(psy, prop, val) psy->get_property(psy, \
- POWER_SUPPLY_PROP_##prop, val)
+#define PSY_PROP(psy, prop, val) (psy->get_property(psy, \
+ POWER_SUPPLY_PROP_##prop, val))
-#define _MPSY_PROP(prop, val) main_battery->get_property(main_battery, \
- prop, val)
+#define _MPSY_PROP(prop, val) (main_battery->get_property(main_battery, \
+ prop, val))
#define MPSY_PROP(prop, val) _MPSY_PROP(POWER_SUPPLY_PROP_##prop, val)
diff --git a/drivers/power/bq20z75.c b/drivers/power/bq20z75.c
index 506585e31a5..9c5e5beda3a 100644
--- a/drivers/power/bq20z75.c
+++ b/drivers/power/bq20z75.c
@@ -152,6 +152,10 @@ struct bq20z75_info {
bool gpio_detect;
bool enable_detection;
int irq;
+ int last_state;
+ int poll_time;
+ struct delayed_work work;
+ int ignore_changes;
};
static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
@@ -279,6 +283,7 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
int reg_offset, enum power_supply_property psp,
union power_supply_propval *val)
{
+ struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
s32 ret;
ret = bq20z75_read_word_data(client,
@@ -293,15 +298,24 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
if (ret >= bq20z75_data[reg_offset].min_value &&
ret <= bq20z75_data[reg_offset].max_value) {
val->intval = ret;
- if (psp == POWER_SUPPLY_PROP_STATUS) {
- if (ret & BATTERY_FULL_CHARGED)
- val->intval = POWER_SUPPLY_STATUS_FULL;
- else if (ret & BATTERY_FULL_DISCHARGED)
- val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
- else if (ret & BATTERY_DISCHARGING)
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- else
- val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ if (psp != POWER_SUPPLY_PROP_STATUS)
+ return 0;
+
+ if (ret & BATTERY_FULL_CHARGED)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else if (ret & BATTERY_FULL_DISCHARGED)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (ret & BATTERY_DISCHARGING)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+
+ if (bq20z75_device->poll_time == 0)
+ bq20z75_device->last_state = val->intval;
+ else if (bq20z75_device->last_state != val->intval) {
+ cancel_delayed_work_sync(&bq20z75_device->work);
+ power_supply_changed(&bq20z75_device->power_supply);
+ bq20z75_device->poll_time = 0;
}
} else {
if (psp == POWER_SUPPLY_PROP_STATUS)
@@ -545,6 +559,60 @@ static irqreturn_t bq20z75_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static void bq20z75_external_power_changed(struct power_supply *psy)
+{
+ struct bq20z75_info *bq20z75_device;
+
+ bq20z75_device = container_of(psy, struct bq20z75_info, power_supply);
+
+ if (bq20z75_device->ignore_changes > 0) {
+ bq20z75_device->ignore_changes--;
+ return;
+ }
+
+ /* cancel outstanding work */
+ cancel_delayed_work_sync(&bq20z75_device->work);
+
+ schedule_delayed_work(&bq20z75_device->work, HZ);
+ bq20z75_device->poll_time = bq20z75_device->pdata->poll_retry_count;
+}
+
+static void bq20z75_delayed_work(struct work_struct *work)
+{
+ struct bq20z75_info *bq20z75_device;
+ s32 ret;
+
+ bq20z75_device = container_of(work, struct bq20z75_info, work.work);
+
+ ret = bq20z75_read_word_data(bq20z75_device->client,
+ bq20z75_data[REG_STATUS].addr);
+ /* if the read failed, give up on this work */
+ if (ret < 0) {
+ bq20z75_device->poll_time = 0;
+ return;
+ }
+
+ if (ret & BATTERY_FULL_CHARGED)
+ ret = POWER_SUPPLY_STATUS_FULL;
+ else if (ret & BATTERY_FULL_DISCHARGED)
+ ret = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (ret & BATTERY_DISCHARGING)
+ ret = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ ret = POWER_SUPPLY_STATUS_CHARGING;
+
+ if (bq20z75_device->last_state != ret) {
+ bq20z75_device->poll_time = 0;
+ power_supply_changed(&bq20z75_device->power_supply);
+ return;
+ }
+ if (bq20z75_device->poll_time > 0) {
+ schedule_delayed_work(&bq20z75_device->work, HZ);
+ bq20z75_device->poll_time--;
+ return;
+ }
+}
+
static int __devinit bq20z75_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -566,6 +634,13 @@ static int __devinit bq20z75_probe(struct i2c_client *client,
bq20z75_device->power_supply.num_properties =
ARRAY_SIZE(bq20z75_properties);
bq20z75_device->power_supply.get_property = bq20z75_get_property;
+ /* ignore first notification of external change, it is generated
+ * from the power_supply_register call back
+ */
+ bq20z75_device->ignore_changes = 1;
+ bq20z75_device->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
+ bq20z75_device->power_supply.external_power_changed =
+ bq20z75_external_power_changed;
if (pdata) {
bq20z75_device->gpio_detect =
@@ -625,6 +700,10 @@ skip_gpio:
dev_info(&client->dev,
"%s: battery gas gauge device registered\n", client->name);
+ INIT_DELAYED_WORK(&bq20z75_device->work, bq20z75_delayed_work);
+
+ bq20z75_device->enable_detection = true;
+
return 0;
exit_psupply:
@@ -648,6 +727,9 @@ static int __devexit bq20z75_remove(struct i2c_client *client)
gpio_free(bq20z75_device->pdata->battery_detect);
power_supply_unregister(&bq20z75_device->power_supply);
+
+ cancel_delayed_work_sync(&bq20z75_device->work);
+
kfree(bq20z75_device);
bq20z75_device = NULL;
@@ -661,6 +743,9 @@ static int bq20z75_suspend(struct i2c_client *client,
struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
s32 ret;
+ if (bq20z75_device->poll_time > 0)
+ cancel_delayed_work_sync(&bq20z75_device->work);
+
/* write to manufacturer access with sleep command */
ret = bq20z75_write_word_data(client,
bq20z75_data[REG_MANUFACTURER_DATA].addr,
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 4d2dc4fa288..bfbce5de49d 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2009 Bluewater Systems Ltd
*
- * Author: Ryan Mallon <ryan@bluewatersys.com>
+ * Author: Ryan Mallon
*
* DS2786 added by Yulia Vilensky <vilensky@compulab.co.il>
*
@@ -416,6 +416,6 @@ static void __exit ds278x_exit(void)
}
module_exit(ds278x_exit);
-MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com>");
+MODULE_AUTHOR("Ryan Mallon");
MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 718f2c53782..a64b8854cfd 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -127,7 +127,7 @@ static int __devinit gpio_charger_probe(struct platform_device *pdev)
ret = request_any_context_irq(irq, gpio_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
dev_name(&pdev->dev), charger);
- if (ret)
+ if (ret < 0)
dev_warn(&pdev->dev, "Failed to request irq: %d\n", ret);
else
gpio_charger->irq = irq;
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index c5c8805156c..98bfab35b8e 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -29,74 +29,6 @@
#include <linux/power_supply.h>
#include <linux/power/max17042_battery.h>
-enum max17042_register {
- MAX17042_STATUS = 0x00,
- MAX17042_VALRT_Th = 0x01,
- MAX17042_TALRT_Th = 0x02,
- MAX17042_SALRT_Th = 0x03,
- MAX17042_AtRate = 0x04,
- MAX17042_RepCap = 0x05,
- MAX17042_RepSOC = 0x06,
- MAX17042_Age = 0x07,
- MAX17042_TEMP = 0x08,
- MAX17042_VCELL = 0x09,
- MAX17042_Current = 0x0A,
- MAX17042_AvgCurrent = 0x0B,
- MAX17042_Qresidual = 0x0C,
- MAX17042_SOC = 0x0D,
- MAX17042_AvSOC = 0x0E,
- MAX17042_RemCap = 0x0F,
- MAX17402_FullCAP = 0x10,
- MAX17042_TTE = 0x11,
- MAX17042_V_empty = 0x12,
-
- MAX17042_RSLOW = 0x14,
-
- MAX17042_AvgTA = 0x16,
- MAX17042_Cycles = 0x17,
- MAX17042_DesignCap = 0x18,
- MAX17042_AvgVCELL = 0x19,
- MAX17042_MinMaxTemp = 0x1A,
- MAX17042_MinMaxVolt = 0x1B,
- MAX17042_MinMaxCurr = 0x1C,
- MAX17042_CONFIG = 0x1D,
- MAX17042_ICHGTerm = 0x1E,
- MAX17042_AvCap = 0x1F,
- MAX17042_ManName = 0x20,
- MAX17042_DevName = 0x21,
- MAX17042_DevChem = 0x22,
-
- MAX17042_TempNom = 0x24,
- MAX17042_TempCold = 0x25,
- MAX17042_TempHot = 0x26,
- MAX17042_AIN = 0x27,
- MAX17042_LearnCFG = 0x28,
- MAX17042_SHFTCFG = 0x29,
- MAX17042_RelaxCFG = 0x2A,
- MAX17042_MiscCFG = 0x2B,
- MAX17042_TGAIN = 0x2C,
- MAx17042_TOFF = 0x2D,
- MAX17042_CGAIN = 0x2E,
- MAX17042_COFF = 0x2F,
-
- MAX17042_Q_empty = 0x33,
- MAX17042_T_empty = 0x34,
-
- MAX17042_RCOMP0 = 0x38,
- MAX17042_TempCo = 0x39,
- MAX17042_Rx = 0x3A,
- MAX17042_T_empty0 = 0x3B,
- MAX17042_TaskPeriod = 0x3C,
- MAX17042_FSTAT = 0x3D,
-
- MAX17042_SHDNTIMER = 0x3F,
-
- MAX17042_VFRemCap = 0x4A,
-
- MAX17042_QH = 0x4D,
- MAX17042_QL = 0x4E,
-};
-
struct max17042_chip {
struct i2c_client *client;
struct power_supply battery;
@@ -123,10 +55,27 @@ static int max17042_read_reg(struct i2c_client *client, u8 reg)
return ret;
}
+static void max17042_set_reg(struct i2c_client *client,
+ struct max17042_reg_data *data, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ max17042_write_reg(client, data[i].addr, data[i].data);
+}
+
static enum power_supply_property max17042_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
};
static int max17042_get_property(struct power_supply *psy,
@@ -137,6 +86,30 @@ static int max17042_get_property(struct power_supply *psy,
struct max17042_chip, battery);
switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_STATUS);
+ if (val->intval & MAX17042_STATUS_BattAbsent)
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_Cycles);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_MinMaxVolt);
+ val->intval >>= 8;
+ val->intval *= 20000; /* Units of LSB = 20mV */
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_V_empty);
+ val->intval >>= 7;
+ val->intval *= 10000; /* Units of LSB = 10mV */
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = max17042_read_reg(chip->client,
MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */
@@ -149,6 +122,57 @@ static int max17042_get_property(struct power_supply *psy,
val->intval = max17042_read_reg(chip->client,
MAX17042_SOC) / 256;
break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_RepSOC);
+ if ((val->intval / 256) >= MAX17042_BATTERY_FULL)
+ val->intval = 1;
+ else if (val->intval >= 0)
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_TEMP);
+ /* The value is signed. */
+ if (val->intval & 0x8000) {
+ val->intval = (0x7fff & ~val->intval) + 1;
+ val->intval *= -1;
+ }
+ /* The value is converted into deci-centigrade scale */
+ /* Units of LSB = 1 / 256 degree Celsius */
+ val->intval = val->intval * 10 / 256;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ if (chip->pdata->enable_current_sense) {
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_Current);
+ if (val->intval & 0x8000) {
+ /* Negative */
+ val->intval = ~val->intval & 0x7fff;
+ val->intval++;
+ val->intval *= -1;
+ }
+ val->intval >>= 4;
+ val->intval *= 1000000 * 25 / chip->pdata->r_sns;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ if (chip->pdata->enable_current_sense) {
+ val->intval = max17042_read_reg(chip->client,
+ MAX17042_AvgCurrent);
+ if (val->intval & 0x8000) {
+ /* Negative */
+ val->intval = ~val->intval & 0x7fff;
+ val->intval++;
+ val->intval *= -1;
+ }
+ val->intval *= 1562500 / chip->pdata->r_sns;
+ } else {
+ return -EINVAL;
+ }
+ break;
default:
return -EINVAL;
}
@@ -180,18 +204,30 @@ static int __devinit max17042_probe(struct i2c_client *client,
chip->battery.properties = max17042_battery_props;
chip->battery.num_properties = ARRAY_SIZE(max17042_battery_props);
+ /* When current is not measured,
+ * CURRENT_NOW and CURRENT_AVG properties should be invisible. */
+ if (!chip->pdata->enable_current_sense)
+ chip->battery.num_properties -= 2;
+
ret = power_supply_register(&client->dev, &chip->battery);
if (ret) {
dev_err(&client->dev, "failed: power supply register\n");
- i2c_set_clientdata(client, NULL);
kfree(chip);
return ret;
}
+ /* Initialize registers according to values from the platform data */
+ if (chip->pdata->init_data)
+ max17042_set_reg(client, chip->pdata->init_data,
+ chip->pdata->num_init_data);
+
if (!chip->pdata->enable_current_sense) {
max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
+ } else {
+ if (chip->pdata->r_sns == 0)
+ chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
}
return 0;
@@ -202,7 +238,6 @@ static int __devexit max17042_remove(struct i2c_client *client)
struct max17042_chip *chip = i2c_get_clientdata(client);
power_supply_unregister(&chip->battery);
- i2c_set_clientdata(client, NULL);
kfree(chip);
return 0;
}
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
index 33ff0e37809..a9b0209a2f5 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/max8903_charger.c
@@ -28,7 +28,7 @@
#include <linux/power/max8903_charger.h>
struct max8903_data {
- struct max8903_pdata *pdata;
+ struct max8903_pdata pdata;
struct device *dev;
struct power_supply psy;
bool fault;
@@ -52,8 +52,8 @@ static int max8903_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
- if (data->pdata->chg) {
- if (gpio_get_value(data->pdata->chg) == 0)
+ if (data->pdata.chg) {
+ if (gpio_get_value(data->pdata.chg) == 0)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (data->usb_in || data->ta_in)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -80,7 +80,7 @@ static int max8903_get_property(struct power_supply *psy,
static irqreturn_t max8903_dcin(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
bool ta_in;
enum power_supply_type old_type;
@@ -121,7 +121,7 @@ static irqreturn_t max8903_dcin(int irq, void *_data)
static irqreturn_t max8903_usbin(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
bool usb_in;
enum power_supply_type old_type;
@@ -160,7 +160,7 @@ static irqreturn_t max8903_usbin(int irq, void *_data)
static irqreturn_t max8903_fault(int irq, void *_data)
{
struct max8903_data *data = _data;
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
bool fault;
fault = gpio_get_value(pdata->flt) ? false : true;
@@ -193,7 +193,7 @@ static __devinit int max8903_probe(struct platform_device *pdev)
dev_err(dev, "Cannot allocate memory.\n");
return -ENOMEM;
}
- data->pdata = pdata;
+ memcpy(&data->pdata, pdata, sizeof(struct max8903_pdata));
data->dev = dev;
platform_set_drvdata(pdev, data);
@@ -349,7 +349,7 @@ static __devexit int max8903_remove(struct platform_device *pdev)
struct max8903_data *data = platform_get_drvdata(pdev);
if (data) {
- struct max8903_pdata *pdata = data->pdata;
+ struct max8903_pdata *pdata = &data->pdata;
if (pdata->flt)
free_irq(gpio_to_irq(pdata->flt), data);
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c
new file mode 100644
index 00000000000..ffc5033ea9c
--- /dev/null
+++ b/drivers/power/max8997_charger.c
@@ -0,0 +1,207 @@
+/*
+ * max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+struct charger_data {
+ struct device *dev;
+ struct max8997_dev *iodev;
+ struct power_supply battery;
+};
+
+static enum power_supply_property max8997_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS, /* "FULL" or "NOT FULL" only. */
+ POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
+ POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
+};
+
+/* Note that the charger control is done by a current regulator "CHARGER" */
+static int max8997_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct charger_data *charger = container_of(psy,
+ struct charger_data, battery);
+ struct i2c_client *i2c = charger->iodev->i2c;
+ int ret;
+ u8 reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
+ if (ret)
+ return ret;
+ if ((reg & (1 << 0)) == 0x1)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
+ if (ret)
+ return ret;
+ if ((reg & (1 << 2)) == 0x0)
+ val->intval = 1;
+
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = 0;
+ ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
+ if (ret)
+ return ret;
+ /* DCINOK */
+ if (reg & (1 << 1))
+ val->intval = 1;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static __devinit int max8997_battery_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct charger_data *charger;
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (pdata->eoc_mA) {
+ u8 val = (pdata->eoc_mA - 50) / 10;
+ if (val < 0)
+ val = 0;
+ if (val > 0xf)
+ val = 0xf;
+
+ ret = max8997_update_reg(iodev->i2c,
+ MAX8997_REG_MBCCTRL5, val, 0xf);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot use i2c bus.\n");
+ return ret;
+ }
+ }
+
+ switch (pdata->timeout) {
+ case 5:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x2 << 4, 0x7 << 4);
+ break;
+ case 6:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x3 << 4, 0x7 << 4);
+ break;
+ case 7:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x4 << 4, 0x7 << 4);
+ break;
+ case 0:
+ ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
+ 0x7 << 4, 0x7 << 4);
+ break;
+ default:
+ dev_err(&pdev->dev, "incorrect timeout value (%d)\n",
+ pdata->timeout);
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot use i2c bus.\n");
+ return ret;
+ }
+
+ charger = kzalloc(sizeof(struct charger_data), GFP_KERNEL);
+ if (charger == NULL) {
+ dev_err(&pdev->dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, charger);
+
+ charger->battery.name = "max8997_pmic";
+ charger->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ charger->battery.get_property = max8997_battery_get_property;
+ charger->battery.properties = max8997_battery_props;
+ charger->battery.num_properties = ARRAY_SIZE(max8997_battery_props);
+
+ charger->dev = &pdev->dev;
+ charger->iodev = iodev;
+
+ ret = power_supply_register(&pdev->dev, &charger->battery);
+ if (ret) {
+ dev_err(&pdev->dev, "failed: power supply register\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ kfree(charger);
+ return ret;
+}
+
+static int __devexit max8997_battery_remove(struct platform_device *pdev)
+{
+ struct charger_data *charger = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&charger->battery);
+ kfree(charger);
+ return 0;
+}
+
+static const struct platform_device_id max8997_battery_id[] = {
+ { "max8997-battery", 0 },
+};
+
+static struct platform_driver max8997_battery_driver = {
+ .driver = {
+ .name = "max8997-battery",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_battery_probe,
+ .remove = __devexit_p(max8997_battery_remove),
+ .id_table = max8997_battery_id,
+};
+
+static int __init max8997_battery_init(void)
+{
+ return platform_driver_register(&max8997_battery_driver);
+}
+subsys_initcall(max8997_battery_init);
+
+static void __exit max8997_battery_cleanup(void)
+{
+ platform_driver_unregister(&max8997_battery_driver);
+}
+module_exit(max8997_battery_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8997/8966 battery control driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
new file mode 100644
index 00000000000..ef8efadb58c
--- /dev/null
+++ b/drivers/power/max8998_charger.c
@@ -0,0 +1,219 @@
+/*
+ * max8998_charger.c - Power supply consumer driver for the Maxim 8998/LP3974
+ *
+ * Copyright (C) 2009-2010 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/max8998.h>
+#include <linux/mfd/max8998-private.h>
+
+struct max8998_battery_data {
+ struct device *dev;
+ struct max8998_dev *iodev;
+ struct power_supply battery;
+};
+
+static enum power_supply_property max8998_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
+ POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
+};
+
+/* Note that the charger control is done by a current regulator "CHARGER" */
+static int max8998_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct max8998_battery_data *max8998 = container_of(psy,
+ struct max8998_battery_data, battery);
+ struct i2c_client *i2c = max8998->iodev->i2c;
+ int ret;
+ u8 reg;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, &reg);
+ if (ret)
+ return ret;
+ if (reg & (1 << 4))
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, &reg);
+ if (ret)
+ return ret;
+ if (reg & (1 << 3))
+ val->intval = 0;
+ else
+ val->intval = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static __devinit int max8998_battery_probe(struct platform_device *pdev)
+{
+ struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8998_battery_data *max8998;
+ struct i2c_client *i2c;
+ int ret = 0;
+
+ if (!pdata) {
+ dev_err(pdev->dev.parent, "No platform init data supplied\n");
+ return -ENODEV;
+ }
+
+ max8998 = kzalloc(sizeof(struct max8998_battery_data), GFP_KERNEL);
+ if (!max8998)
+ return -ENOMEM;
+
+ max8998->dev = &pdev->dev;
+ max8998->iodev = iodev;
+ platform_set_drvdata(pdev, max8998);
+ i2c = max8998->iodev->i2c;
+
+ /* Setup "End of Charge" */
+ /* If EOC value equals 0,
+ * remain value set from bootloader or default value */
+ if (pdata->eoc >= 10 && pdata->eoc <= 45) {
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1,
+ (pdata->eoc / 5 - 2) << 5, 0x7 << 5);
+ } else if (pdata->eoc == 0) {
+ dev_dbg(max8998->dev,
+ "EOC value not set: leave it unchanged.\n");
+ } else {
+ dev_err(max8998->dev, "Invalid EOC value\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Setup Charge Restart Level */
+ switch (pdata->restart) {
+ case 100:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x1 << 3, 0x3 << 3);
+ break;
+ case 150:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x0 << 3, 0x3 << 3);
+ break;
+ case 200:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x2 << 3, 0x3 << 3);
+ break;
+ case -1:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR1, 0x3 << 3, 0x3 << 3);
+ break;
+ case 0:
+ dev_dbg(max8998->dev,
+ "Restart Level not set: leave it unchanged.\n");
+ break;
+ default:
+ dev_err(max8998->dev, "Invalid Restart Level\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Setup Charge Full Timeout */
+ switch (pdata->timeout) {
+ case 5:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x0 << 4, 0x3 << 4);
+ break;
+ case 6:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x1 << 4, 0x3 << 4);
+ break;
+ case 7:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x2 << 4, 0x3 << 4);
+ break;
+ case -1:
+ max8998_update_reg(i2c, MAX8998_REG_CHGR2, 0x3 << 4, 0x3 << 4);
+ break;
+ case 0:
+ dev_dbg(max8998->dev,
+ "Full Timeout not set: leave it unchanged.\n");
+ default:
+ dev_err(max8998->dev, "Invalid Full Timeout value\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ max8998->battery.name = "max8998_pmic";
+ max8998->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+ max8998->battery.get_property = max8998_battery_get_property;
+ max8998->battery.properties = max8998_battery_props;
+ max8998->battery.num_properties = ARRAY_SIZE(max8998_battery_props);
+
+ ret = power_supply_register(max8998->dev, &max8998->battery);
+ if (ret) {
+ dev_err(max8998->dev, "failed: power supply register\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ kfree(max8998);
+ return ret;
+}
+
+static int __devexit max8998_battery_remove(struct platform_device *pdev)
+{
+ struct max8998_battery_data *max8998 = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&max8998->battery);
+ kfree(max8998);
+
+ return 0;
+}
+
+static const struct platform_device_id max8998_battery_id[] = {
+ { "max8998-battery", TYPE_MAX8998 },
+};
+
+static struct platform_driver max8998_battery_driver = {
+ .driver = {
+ .name = "max8998-battery",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8998_battery_probe,
+ .remove = __devexit_p(max8998_battery_remove),
+ .id_table = max8998_battery_id,
+};
+
+static int __init max8998_battery_init(void)
+{
+ return platform_driver_register(&max8998_battery_driver);
+}
+module_init(max8998_battery_init);
+
+static void __exit max8998_battery_cleanup(void)
+{
+ platform_driver_unregister(&max8998_battery_driver);
+}
+module_exit(max8998_battery_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 8998 battery control driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:max8998-battery");
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index d36c289aaef..d32d0d70f9b 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -20,6 +20,7 @@
#include <linux/s3c_adc_battery.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <plat/adc.h>
@@ -266,7 +267,7 @@ static irqreturn_t s3c_adc_bat_charged(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init s3c_adc_bat_probe(struct platform_device *pdev)
+static int __devinit s3c_adc_bat_probe(struct platform_device *pdev)
{
struct s3c_adc_client *client;
struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index 92c16e1677b..54b9198fa57 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -62,7 +62,7 @@
#define TWL4030_MSTATEC_COMPLETE4 0x0e
static bool allow_usb;
-module_param(allow_usb, bool, 1);
+module_param(allow_usb, bool, 0644);
MODULE_PARM_DESC(allow_usb, "Allow USB charge drawing default current");
struct twl4030_bci {
@@ -425,7 +425,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
{
struct twl4030_bci *bci;
int ret;
- int reg;
+ u32 reg;
bci = kzalloc(sizeof(*bci), GFP_KERNEL);
if (bci == NULL)
@@ -486,7 +486,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
}
/* Enable interrupts now. */
- reg = ~(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 |
+ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 |
TWL4030_TBATOR1 | TWL4030_BATSTS);
ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
TWL4030_INTERRUPTS_BCIIMR1A);
@@ -495,7 +495,7 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
goto fail_unmask_interrupts;
}
- reg = ~(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
+ reg = ~(u32)(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
TWL4030_INTERRUPTS_BCIIMR2A);
if (ret < 0)
@@ -572,7 +572,7 @@ static void __exit twl4030_bci_exit(void)
}
module_exit(twl4030_bci_exit);
-MODULE_AUTHOR("Gražydas Ignotas");
+MODULE_AUTHOR("Gražvydas Ignotas");
MODULE_DESCRIPTION("TWL4030 Battery Charger Interface driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:twl4030_bci");
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c
index 0fd130d80f5..e648cbea1e6 100644
--- a/drivers/power/wm831x_backup.c
+++ b/drivers/power/wm831x_backup.c
@@ -22,6 +22,7 @@
struct wm831x_backup {
struct wm831x *wm831x;
struct power_supply backup;
+ char name[20];
};
static int wm831x_backup_read_voltage(struct wm831x *wm831x,
@@ -163,6 +164,7 @@ static enum power_supply_property wm831x_backup_props[] = {
static __devinit int wm831x_backup_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
struct wm831x_backup *devdata;
struct power_supply *backup;
int ret;
@@ -182,7 +184,14 @@ static __devinit int wm831x_backup_probe(struct platform_device *pdev)
*/
wm831x_config_backup(wm831x);
- backup->name = "wm831x-backup";
+ if (wm831x_pdata && wm831x_pdata->wm831x_num)
+ snprintf(devdata->name, sizeof(devdata->name),
+ "wm831x-backup.%d", wm831x_pdata->wm831x_num);
+ else
+ snprintf(devdata->name, sizeof(devdata->name),
+ "wm831x-backup");
+
+ backup->name = devdata->name;
backup->type = POWER_SUPPLY_TYPE_BATTERY;
backup->properties = wm831x_backup_props;
backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
@@ -203,6 +212,7 @@ static __devexit int wm831x_backup_remove(struct platform_device *pdev)
struct wm831x_backup *devdata = platform_get_drvdata(pdev);
power_supply_unregister(&devdata->backup);
+ kfree(devdata->backup.name);
kfree(devdata);
return 0;
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index ddf8cf5f320..6cc2ca6427f 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -24,6 +24,9 @@ struct wm831x_power {
struct power_supply wall;
struct power_supply usb;
struct power_supply battery;
+ char wall_name[20];
+ char usb_name[20];
+ char battery_name[20];
};
static int wm831x_power_check_online(struct wm831x *wm831x, int supply,
@@ -486,6 +489,7 @@ static irqreturn_t wm831x_pwr_src_irq(int irq, void *data)
static __devinit int wm831x_power_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data;
struct wm831x_power *power;
struct power_supply *usb;
struct power_supply *battery;
@@ -503,12 +507,28 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
battery = &power->battery;
wall = &power->wall;
+ if (wm831x_pdata && wm831x_pdata->wm831x_num) {
+ snprintf(power->wall_name, sizeof(power->wall_name),
+ "wm831x-wall.%d", wm831x_pdata->wm831x_num);
+ snprintf(power->battery_name, sizeof(power->wall_name),
+ "wm831x-battery.%d", wm831x_pdata->wm831x_num);
+ snprintf(power->usb_name, sizeof(power->wall_name),
+ "wm831x-usb.%d", wm831x_pdata->wm831x_num);
+ } else {
+ snprintf(power->wall_name, sizeof(power->wall_name),
+ "wm831x-wall");
+ snprintf(power->battery_name, sizeof(power->wall_name),
+ "wm831x-battery");
+ snprintf(power->usb_name, sizeof(power->wall_name),
+ "wm831x-usb");
+ }
+
/* We ignore configuration failures since we can still read back
* the status without enabling the charger.
*/
wm831x_config_battery(wm831x);
- wall->name = "wm831x-wall";
+ wall->name = power->wall_name;
wall->type = POWER_SUPPLY_TYPE_MAINS;
wall->properties = wm831x_wall_props;
wall->num_properties = ARRAY_SIZE(wm831x_wall_props);
@@ -517,7 +537,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret)
goto err_kmalloc;
- battery->name = "wm831x-battery";
+ battery->name = power->battery_name;
battery->properties = wm831x_bat_props;
battery->num_properties = ARRAY_SIZE(wm831x_bat_props);
battery->get_property = wm831x_bat_get_prop;
@@ -526,7 +546,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret)
goto err_wall;
- usb->name = "wm831x-usb",
+ usb->name = power->usb_name,
usb->type = POWER_SUPPLY_TYPE_USB;
usb->properties = wm831x_usb_props;
usb->num_properties = ARRAY_SIZE(wm831x_usb_props);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index d7ed20f293d..c7fd2c0e3f2 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -235,6 +235,7 @@ config REGULATOR_TPS6105X
config REGULATOR_TPS65023
tristate "TI TPS65023 Power regulators"
depends on I2C
+ select REGMAP_I2C
help
This driver supports TPS65023 voltage regulator chips. TPS65023 provides
three step-down converters and two general-purpose LDO voltage regulators.
@@ -248,6 +249,12 @@ config REGULATOR_TPS6507X
three step-down converters and two general-purpose LDO voltage regulators.
It supports TI's software based Class-2 SmartReflex implementation.
+config REGULATOR_TPS65912
+ tristate "TI TPS65912 Power regulator"
+ depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
+ help
+ This driver supports TPS65912 voltage regulator chip.
+
config REGULATOR_88PM8607
bool "Marvell 88PM8607 Power regulators"
depends on MFD_88PM860X=y
@@ -303,5 +310,12 @@ config REGULATOR_TPS65910
help
This driver supports TPS65910 voltage regulator chips.
+config REGULATOR_AAT2870
+ tristate "AnalogicTech AAT2870 Regulators"
+ depends on MFD_AAT2870_CORE
+ help
+ If you have a AnalogicTech AAT2870 say Y to enable the
+ regulator driver.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 3932d2ec38f..040d5aa6353 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -38,10 +38,12 @@ obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
+obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
+obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
new file mode 100644
index 00000000000..cd4104542f0
--- /dev/null
+++ b/drivers/regulator/aat2870-regulator.c
@@ -0,0 +1,232 @@
+/*
+ * linux/drivers/regulator/aat2870-regulator.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/aat2870.h>
+
+struct aat2870_regulator {
+ struct platform_device *pdev;
+ struct regulator_desc desc;
+
+ const int *voltages; /* uV */
+
+ int min_uV;
+ int max_uV;
+
+ u8 enable_addr;
+ u8 enable_shift;
+ u8 enable_mask;
+
+ u8 voltage_addr;
+ u8 voltage_shift;
+ u8 voltage_mask;
+};
+
+static int aat2870_ldo_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+
+ return ri->voltages[selector];
+}
+
+static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->voltage_addr, ri->voltage_mask,
+ (selector << ri->voltage_shift) & ri->voltage_mask);
+}
+
+static int aat2870_ldo_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+ u8 val;
+ int ret;
+
+ ret = aat2870->read(aat2870, ri->voltage_addr, &val);
+ if (ret)
+ return ret;
+
+ return (val & ri->voltage_mask) >> ri->voltage_shift;
+}
+
+static int aat2870_ldo_enable(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask,
+ ri->enable_mask);
+}
+
+static int aat2870_ldo_disable(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+
+ return aat2870->update(aat2870, ri->enable_addr, ri->enable_mask, 0);
+}
+
+static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
+{
+ struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
+ struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
+ u8 val;
+ int ret;
+
+ ret = aat2870->read(aat2870, ri->enable_addr, &val);
+ if (ret)
+ return ret;
+
+ return val & ri->enable_mask ? 1 : 0;
+}
+
+static struct regulator_ops aat2870_ldo_ops = {
+ .list_voltage = aat2870_ldo_list_voltage,
+ .set_voltage_sel = aat2870_ldo_set_voltage_sel,
+ .get_voltage_sel = aat2870_ldo_get_voltage_sel,
+ .enable = aat2870_ldo_enable,
+ .disable = aat2870_ldo_disable,
+ .is_enabled = aat2870_ldo_is_enabled,
+};
+
+static const int aat2870_ldo_voltages[] = {
+ 1200000, 1300000, 1500000, 1600000,
+ 1800000, 2000000, 2200000, 2500000,
+ 2600000, 2700000, 2800000, 2900000,
+ 3000000, 3100000, 3200000, 3300000,
+};
+
+#define AAT2870_LDO(ids) \
+ { \
+ .desc = { \
+ .name = #ids, \
+ .id = AAT2870_ID_##ids, \
+ .n_voltages = ARRAY_SIZE(aat2870_ldo_voltages), \
+ .ops = &aat2870_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ .voltages = aat2870_ldo_voltages, \
+ .min_uV = 1200000, \
+ .max_uV = 3300000, \
+ }
+
+static struct aat2870_regulator aat2870_regulators[] = {
+ AAT2870_LDO(LDOA),
+ AAT2870_LDO(LDOB),
+ AAT2870_LDO(LDOC),
+ AAT2870_LDO(LDOD),
+};
+
+static struct aat2870_regulator *aat2870_get_regulator(int id)
+{
+ struct aat2870_regulator *ri = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aat2870_regulators); i++) {
+ ri = &aat2870_regulators[i];
+ if (ri->desc.id == id)
+ break;
+ }
+
+ if (!ri)
+ return NULL;
+
+ ri->enable_addr = AAT2870_LDO_EN;
+ ri->enable_shift = id - AAT2870_ID_LDOA;
+ ri->enable_mask = 0x1 << ri->enable_shift;
+
+ ri->voltage_addr = (id - AAT2870_ID_LDOA) / 2 ?
+ AAT2870_LDO_CD : AAT2870_LDO_AB;
+ ri->voltage_shift = (id - AAT2870_ID_LDOA) % 2 ? 0 : 4;
+ ri->voltage_mask = 0xF << ri->voltage_shift;
+
+ return ri;
+}
+
+static int aat2870_regulator_probe(struct platform_device *pdev)
+{
+ struct aat2870_regulator *ri;
+ struct regulator_dev *rdev;
+
+ ri = aat2870_get_regulator(pdev->id);
+ if (!ri) {
+ dev_err(&pdev->dev, "Invalid device ID, %d\n", pdev->id);
+ return -EINVAL;
+ }
+ ri->pdev = pdev;
+
+ rdev = regulator_register(&ri->desc, &pdev->dev,
+ pdev->dev.platform_data, ri);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "Failed to register regulator %s\n",
+ ri->desc.name);
+ return PTR_ERR(rdev);
+ }
+ platform_set_drvdata(pdev, rdev);
+
+ return 0;
+}
+
+static int __devexit aat2870_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+ return 0;
+}
+
+static struct platform_driver aat2870_regulator_driver = {
+ .driver = {
+ .name = "aat2870-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = aat2870_regulator_probe,
+ .remove = __devexit_p(aat2870_regulator_remove),
+};
+
+static int __init aat2870_regulator_init(void)
+{
+ return platform_driver_register(&aat2870_regulator_driver);
+}
+subsys_initcall(aat2870_regulator_init);
+
+static void __exit aat2870_regulator_exit(void)
+{
+ platform_driver_unregister(&aat2870_regulator_driver);
+}
+module_exit(aat2870_regulator_exit);
+
+MODULE_DESCRIPTION("AnalogicTech AAT2870 Regulator");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d3e38790906..d8e6a429e8b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -20,6 +20,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/async.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
@@ -33,6 +34,8 @@
#include "dummy.h"
+#define rdev_crit(rdev, fmt, ...) \
+ pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
#define rdev_err(rdev, fmt, ...) \
pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
#define rdev_warn(rdev, fmt, ...) \
@@ -78,11 +81,13 @@ struct regulator {
char *supply_name;
struct device_attribute dev_attr;
struct regulator_dev *rdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
};
static int _regulator_is_enabled(struct regulator_dev *rdev);
-static int _regulator_disable(struct regulator_dev *rdev,
- struct regulator_dev **supply_rdev_ptr);
+static int _regulator_disable(struct regulator_dev *rdev);
static int _regulator_get_voltage(struct regulator_dev *rdev);
static int _regulator_get_current_limit(struct regulator_dev *rdev);
static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
@@ -90,6 +95,9 @@ static void _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV);
+static struct regulator *create_regulator(struct regulator_dev *rdev,
+ struct device *dev,
+ const char *supply_name);
static const char *rdev_get_name(struct regulator_dev *rdev)
{
@@ -143,8 +151,11 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
if (*min_uV < rdev->constraints->min_uV)
*min_uV = rdev->constraints->min_uV;
- if (*min_uV > *max_uV)
+ if (*min_uV > *max_uV) {
+ rdev_err(rdev, "unsupportable voltage range: %d-%duV\n",
+ *min_uV, *max_uV);
return -EINVAL;
+ }
return 0;
}
@@ -197,8 +208,11 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
if (*min_uA < rdev->constraints->min_uA)
*min_uA = rdev->constraints->min_uA;
- if (*min_uA > *max_uA)
+ if (*min_uA > *max_uA) {
+ rdev_err(rdev, "unsupportable current range: %d-%duA\n",
+ *min_uA, *max_uA);
return -EINVAL;
+ }
return 0;
}
@@ -213,6 +227,7 @@ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
case REGULATOR_MODE_STANDBY:
break;
default:
+ rdev_err(rdev, "invalid mode %x specified\n", *mode);
return -EINVAL;
}
@@ -779,7 +794,6 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
if (ret < 0) {
rdev_err(rdev, "failed to apply %duV constraint\n",
rdev->constraints->min_uV);
- rdev->constraints = NULL;
return ret;
}
}
@@ -882,7 +896,6 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = suspend_prepare(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
- rdev->constraints = NULL;
goto out;
}
}
@@ -909,13 +922,15 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = ops->enable(rdev);
if (ret < 0) {
rdev_err(rdev, "failed to enable\n");
- rdev->constraints = NULL;
goto out;
}
}
print_constraints(rdev);
+ return 0;
out:
+ kfree(rdev->constraints);
+ rdev->constraints = NULL;
return ret;
}
@@ -929,21 +944,20 @@ out:
* core if it's child is enabled.
*/
static int set_supply(struct regulator_dev *rdev,
- struct regulator_dev *supply_rdev)
+ struct regulator_dev *supply_rdev)
{
int err;
- err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj,
- "supply");
- if (err) {
- rdev_err(rdev, "could not add device link %s err %d\n",
- supply_rdev->dev.kobj.name, err);
- goto out;
+ rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
+
+ rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
+ if (IS_ERR(rdev->supply)) {
+ err = PTR_ERR(rdev->supply);
+ rdev->supply = NULL;
+ return err;
}
- rdev->supply = supply_rdev;
- list_add(&rdev->slist, &supply_rdev->supply_list);
-out:
- return err;
+
+ return 0;
}
/**
@@ -1032,7 +1046,7 @@ static void unset_regulator_supplies(struct regulator_dev *rdev)
}
}
-#define REG_STR_SIZE 32
+#define REG_STR_SIZE 64
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
@@ -1052,8 +1066,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
if (dev) {
/* create a 'requested_microamps_name' sysfs entry */
- size = scnprintf(buf, REG_STR_SIZE, "microamps_requested_%s",
- supply_name);
+ size = scnprintf(buf, REG_STR_SIZE,
+ "microamps_requested_%s-%s",
+ dev_name(dev), supply_name);
if (size >= REG_STR_SIZE)
goto overflow_err;
@@ -1088,7 +1103,28 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
dev->kobj.name, err);
goto link_name_err;
}
+ } else {
+ regulator->supply_name = kstrdup(supply_name, GFP_KERNEL);
+ if (regulator->supply_name == NULL)
+ goto attr_err;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ regulator->debugfs = debugfs_create_dir(regulator->supply_name,
+ rdev->debugfs);
+ if (IS_ERR_OR_NULL(regulator->debugfs)) {
+ rdev_warn(rdev, "Failed to create debugfs directory\n");
+ regulator->debugfs = NULL;
+ } else {
+ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+ &regulator->uA_load);
+ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+ &regulator->min_uV);
+ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+ &regulator->max_uV);
}
+#endif
+
mutex_unlock(&rdev->mutex);
return regulator;
link_name_err:
@@ -1267,13 +1303,17 @@ void regulator_put(struct regulator *regulator)
mutex_lock(&regulator_list_mutex);
rdev = regulator->rdev;
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(regulator->debugfs);
+#endif
+
/* remove any sysfs entries */
if (regulator->dev) {
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
- kfree(regulator->supply_name);
device_remove_file(regulator->dev, &regulator->dev_attr);
kfree(regulator->dev_attr.attr.name);
}
+ kfree(regulator->supply_name);
list_del(&regulator->list);
kfree(regulator);
@@ -1301,19 +1341,6 @@ static int _regulator_enable(struct regulator_dev *rdev)
{
int ret, delay;
- if (rdev->use_count == 0) {
- /* do we need to enable the supply regulator first */
- if (rdev->supply) {
- mutex_lock(&rdev->supply->mutex);
- ret = _regulator_enable(rdev->supply);
- mutex_unlock(&rdev->supply->mutex);
- if (ret < 0) {
- rdev_err(rdev, "failed to enable: %d\n", ret);
- return ret;
- }
- }
- }
-
/* check voltage and requested load before enabling */
if (rdev->constraints &&
(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
@@ -1388,19 +1415,27 @@ int regulator_enable(struct regulator *regulator)
struct regulator_dev *rdev = regulator->rdev;
int ret = 0;
+ if (rdev->supply) {
+ ret = regulator_enable(rdev->supply);
+ if (ret != 0)
+ return ret;
+ }
+
mutex_lock(&rdev->mutex);
ret = _regulator_enable(rdev);
mutex_unlock(&rdev->mutex);
+
+ if (ret != 0)
+ regulator_disable(rdev->supply);
+
return ret;
}
EXPORT_SYMBOL_GPL(regulator_enable);
/* locks held by regulator_disable() */
-static int _regulator_disable(struct regulator_dev *rdev,
- struct regulator_dev **supply_rdev_ptr)
+static int _regulator_disable(struct regulator_dev *rdev)
{
int ret = 0;
- *supply_rdev_ptr = NULL;
if (WARN(rdev->use_count <= 0,
"unbalanced disables for %s\n", rdev_get_name(rdev)))
@@ -1427,9 +1462,6 @@ static int _regulator_disable(struct regulator_dev *rdev,
NULL);
}
- /* decrease our supplies ref count and disable if required */
- *supply_rdev_ptr = rdev->supply;
-
rdev->use_count = 0;
} else if (rdev->use_count > 1) {
@@ -1440,6 +1472,7 @@ static int _regulator_disable(struct regulator_dev *rdev,
rdev->use_count--;
}
+
return ret;
}
@@ -1458,29 +1491,21 @@ static int _regulator_disable(struct regulator_dev *rdev,
int regulator_disable(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
- struct regulator_dev *supply_rdev = NULL;
int ret = 0;
mutex_lock(&rdev->mutex);
- ret = _regulator_disable(rdev, &supply_rdev);
+ ret = _regulator_disable(rdev);
mutex_unlock(&rdev->mutex);
- /* decrease our supplies ref count and disable if required */
- while (supply_rdev != NULL) {
- rdev = supply_rdev;
-
- mutex_lock(&rdev->mutex);
- _regulator_disable(rdev, &supply_rdev);
- mutex_unlock(&rdev->mutex);
- }
+ if (ret == 0 && rdev->supply)
+ regulator_disable(rdev->supply);
return ret;
}
EXPORT_SYMBOL_GPL(regulator_disable);
/* locks held by regulator_force_disable() */
-static int _regulator_force_disable(struct regulator_dev *rdev,
- struct regulator_dev **supply_rdev_ptr)
+static int _regulator_force_disable(struct regulator_dev *rdev)
{
int ret = 0;
@@ -1497,10 +1522,6 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
REGULATOR_EVENT_DISABLE, NULL);
}
- /* decrease our supplies ref count and disable if required */
- *supply_rdev_ptr = rdev->supply;
-
- rdev->use_count = 0;
return ret;
}
@@ -1516,16 +1537,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
int regulator_force_disable(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
- struct regulator_dev *supply_rdev = NULL;
int ret;
mutex_lock(&rdev->mutex);
regulator->uA_load = 0;
- ret = _regulator_force_disable(rdev, &supply_rdev);
+ ret = _regulator_force_disable(regulator->rdev);
mutex_unlock(&rdev->mutex);
- if (supply_rdev)
- regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev)));
+ if (rdev->supply)
+ while (rdev->open_count--)
+ regulator_disable(rdev->supply);
return ret;
}
@@ -2136,7 +2157,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
/* get input voltage */
input_uV = 0;
if (rdev->supply)
- input_uV = _regulator_get_voltage(rdev->supply);
+ input_uV = regulator_get_voltage(rdev->supply);
if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
if (input_uV <= 0) {
@@ -2206,17 +2227,8 @@ EXPORT_SYMBOL_GPL(regulator_unregister_notifier);
static void _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
- struct regulator_dev *_rdev;
-
/* call rdev chain first */
blocking_notifier_call_chain(&rdev->notifier, event, NULL);
-
- /* now notify regulator we supply */
- list_for_each_entry(_rdev, &rdev->supply_list, slist) {
- mutex_lock(&_rdev->mutex);
- _notifier_call_chain(_rdev, event, data);
- mutex_unlock(&_rdev->mutex);
- }
}
/**
@@ -2264,6 +2276,13 @@ err:
}
EXPORT_SYMBOL_GPL(regulator_bulk_get);
+static void regulator_bulk_enable_async(void *data, async_cookie_t cookie)
+{
+ struct regulator_bulk_data *bulk = data;
+
+ bulk->ret = regulator_enable(bulk->consumer);
+}
+
/**
* regulator_bulk_enable - enable multiple regulator consumers
*
@@ -2279,21 +2298,33 @@ EXPORT_SYMBOL_GPL(regulator_bulk_get);
int regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers)
{
+ LIST_HEAD(async_domain);
int i;
- int ret;
+ int ret = 0;
+
+ for (i = 0; i < num_consumers; i++)
+ async_schedule_domain(regulator_bulk_enable_async,
+ &consumers[i], &async_domain);
+
+ async_synchronize_full_domain(&async_domain);
+ /* If any consumer failed we need to unwind any that succeeded */
for (i = 0; i < num_consumers; i++) {
- ret = regulator_enable(consumers[i].consumer);
- if (ret != 0)
+ if (consumers[i].ret != 0) {
+ ret = consumers[i].ret;
goto err;
+ }
}
return 0;
err:
- pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret);
- for (--i; i >= 0; --i)
- regulator_disable(consumers[i].consumer);
+ for (i = 0; i < num_consumers; i++)
+ if (consumers[i].ret == 0)
+ regulator_disable(consumers[i].consumer);
+ else
+ pr_err("Failed to enable %s: %d\n",
+ consumers[i].supply, consumers[i].ret);
return ret;
}
@@ -2589,9 +2620,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
rdev->owner = regulator_desc->owner;
rdev->desc = regulator_desc;
INIT_LIST_HEAD(&rdev->consumer_list);
- INIT_LIST_HEAD(&rdev->supply_list);
INIT_LIST_HEAD(&rdev->list);
- INIT_LIST_HEAD(&rdev->slist);
BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
/* preform any regulator specific init */
@@ -2672,6 +2701,7 @@ unset_supplies:
unset_regulator_supplies(rdev);
scrub:
+ kfree(rdev->constraints);
device_unregister(&rdev->dev);
/* device core frees rdev */
rdev = ERR_PTR(ret);
@@ -2703,7 +2733,7 @@ void regulator_unregister(struct regulator_dev *rdev)
unset_regulator_supplies(rdev);
list_del(&rdev->list);
if (rdev->supply)
- sysfs_remove_link(&rdev->dev.kobj, "supply");
+ regulator_put(rdev->supply);
device_unregister(&rdev->dev);
kfree(rdev->constraints);
mutex_unlock(&regulator_list_mutex);
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index e5f7b8fe51f..2bb8f451cc0 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -266,7 +266,7 @@ static struct regulator_ops db8500_regulator_switch_ops = {
* Regulator information
*/
static struct db8500_regulator_info
- db8500_regulator_info[DB8500_NUM_REGULATORS] = {
+db8500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VAPE] = {
.desc = {
.name = "db8500-vape",
@@ -492,11 +492,9 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
info->desc.name, err);
/* if failing, unregister all earlier regulators */
- i--;
- while (i >= 0) {
+ while (--i >= 0) {
info = &db8500_regulator_info[i];
regulator_unregister(info->rdev);
- i--;
}
return err;
}
@@ -536,13 +534,7 @@ static struct platform_driver db8500_regulator_driver = {
static int __init db8500_regulator_init(void)
{
- int ret;
-
- ret = platform_driver_register(&db8500_regulator_driver);
- if (ret < 0)
- return -ENODEV;
-
- return 0;
+ return platform_driver_register(&db8500_regulator_driver);
}
static void __exit db8500_regulator_exit(void)
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index c7410bde7b5..f6ef6694ab9 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -36,6 +36,29 @@ static struct regulator_desc dummy_desc = {
.ops = &dummy_ops,
};
+static int __devinit dummy_regulator_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
+ &dummy_initdata, NULL);
+ if (IS_ERR(dummy_regulator_rdev)) {
+ ret = PTR_ERR(dummy_regulator_rdev);
+ pr_err("Failed to register regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver dummy_regulator_driver = {
+ .probe = dummy_regulator_probe,
+ .driver = {
+ .name = "reg-dummy",
+ .owner = THIS_MODULE,
+ },
+};
+
static struct platform_device *dummy_pdev;
void __init regulator_dummy_init(void)
@@ -55,12 +78,9 @@ void __init regulator_dummy_init(void)
return;
}
- dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
- &dummy_initdata, NULL);
- if (IS_ERR(dummy_regulator_rdev)) {
- ret = PTR_ERR(dummy_regulator_rdev);
- pr_err("Failed to register regulator: %d\n", ret);
+ ret = platform_driver_register(&dummy_regulator_driver);
+ if (ret != 0) {
+ pr_err("Failed to register dummy regulator driver: %d\n", ret);
platform_device_unregister(dummy_pdev);
- return;
}
}
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index daff7fd0e95..486ed8141fc 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -139,7 +139,7 @@ static int max8952_set_voltage(struct regulator_dev *rdev,
s8 vid = -1, i;
if (!gpio_is_valid(max8952->pdata->gpio_vid0) ||
- !gpio_is_valid(max8952->pdata->gpio_vid0)) {
+ !gpio_is_valid(max8952->pdata->gpio_vid1)) {
/* DVS not supported */
return -EPERM;
}
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 10d5a1d9768..ad6628ca94f 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -39,25 +39,28 @@ struct max8997_data {
struct regulator_dev **rdev;
int ramp_delay; /* in mV/us */
+ bool buck1_gpiodvs;
+ bool buck2_gpiodvs;
+ bool buck5_gpiodvs;
u8 buck1_vol[8];
u8 buck2_vol[8];
u8 buck5_vol[8];
+ int buck125_gpios[3];
int buck125_gpioindex;
+ bool ignore_gpiodvs_side_effect;
u8 saved_states[MAX8997_REG_MAX];
};
static inline void max8997_set_gpio(struct max8997_data *max8997)
{
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
int set3 = (max8997->buck125_gpioindex) & 0x1;
int set2 = ((max8997->buck125_gpioindex) >> 1) & 0x1;
int set1 = ((max8997->buck125_gpioindex) >> 2) & 0x1;
- gpio_set_value(pdata->buck125_gpios[0], set1);
- gpio_set_value(pdata->buck125_gpios[1], set2);
- gpio_set_value(pdata->buck125_gpios[2], set3);
+ gpio_set_value(max8997->buck125_gpios[0], set1);
+ gpio_set_value(max8997->buck125_gpios[1], set2);
+ gpio_set_value(max8997->buck125_gpios[2], set3);
}
struct voltage_map_desc {
@@ -380,8 +383,6 @@ static int max8997_get_voltage_register(struct regulator_dev *rdev,
static int max8997_get_voltage(struct regulator_dev *rdev)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
struct i2c_client *i2c = max8997->iodev->i2c;
int reg, shift, mask, ret;
int rid = max8997_get_rid(rdev);
@@ -391,9 +392,9 @@ static int max8997_get_voltage(struct regulator_dev *rdev)
if (ret)
return ret;
- if ((rid == MAX8997_BUCK1 && pdata->buck1_gpiodvs) ||
- (rid == MAX8997_BUCK2 && pdata->buck2_gpiodvs) ||
- (rid == MAX8997_BUCK5 && pdata->buck5_gpiodvs))
+ if ((rid == MAX8997_BUCK1 && max8997->buck1_gpiodvs) ||
+ (rid == MAX8997_BUCK2 && max8997->buck2_gpiodvs) ||
+ (rid == MAX8997_BUCK5 && max8997->buck5_gpiodvs))
reg += max8997->buck125_gpioindex;
ret = max8997_read_reg(i2c, reg, &val);
@@ -543,7 +544,8 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
rid == MAX8997_BUCK4 || rid == MAX8997_BUCK5) {
/* If the voltage is increasing */
if (org < i)
- udelay(desc->step * (i - org) / max8997->ramp_delay);
+ udelay(DIV_ROUND_UP(desc->step * (i - org),
+ max8997->ramp_delay));
}
return ret;
@@ -561,8 +563,6 @@ static int max8997_assess_side_effect(struct regulator_dev *rdev,
u8 new_val, int *best)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
int rid = max8997_get_rid(rdev);
u8 *buckx_val[3];
bool buckx_gpiodvs[3];
@@ -589,9 +589,9 @@ static int max8997_assess_side_effect(struct regulator_dev *rdev,
buckx_val[0] = max8997->buck1_vol;
buckx_val[1] = max8997->buck2_vol;
buckx_val[2] = max8997->buck5_vol;
- buckx_gpiodvs[0] = pdata->buck1_gpiodvs;
- buckx_gpiodvs[1] = pdata->buck2_gpiodvs;
- buckx_gpiodvs[2] = pdata->buck5_gpiodvs;
+ buckx_gpiodvs[0] = max8997->buck1_gpiodvs;
+ buckx_gpiodvs[1] = max8997->buck2_gpiodvs;
+ buckx_gpiodvs[2] = max8997->buck5_gpiodvs;
for (i = 0; i < 8; i++) {
int others;
@@ -640,8 +640,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
int rid = max8997_get_rid(rdev);
const struct voltage_map_desc *desc;
int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
@@ -653,15 +651,15 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
switch (rid) {
case MAX8997_BUCK1:
- if (pdata->buck1_gpiodvs)
+ if (max8997->buck1_gpiodvs)
gpio_dvs_mode = true;
break;
case MAX8997_BUCK2:
- if (pdata->buck2_gpiodvs)
+ if (max8997->buck2_gpiodvs)
gpio_dvs_mode = true;
break;
case MAX8997_BUCK5:
- if (pdata->buck5_gpiodvs)
+ if (max8997->buck5_gpiodvs)
gpio_dvs_mode = true;
break;
}
@@ -695,7 +693,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
new_idx = tmp_idx;
new_val = tmp_val;
- if (pdata->ignore_gpiodvs_side_effect == false)
+ if (max8997->ignore_gpiodvs_side_effect == false)
return -EINVAL;
dev_warn(&rdev->dev, "MAX8997 GPIO-DVS Side Effect Warning: GPIO SET:"
@@ -993,6 +991,11 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
i2c = max8997->iodev->i2c;
max8997->buck125_gpioindex = pdata->buck125_default_idx;
+ max8997->buck1_gpiodvs = pdata->buck1_gpiodvs;
+ max8997->buck2_gpiodvs = pdata->buck2_gpiodvs;
+ max8997->buck5_gpiodvs = pdata->buck5_gpiodvs;
+ memcpy(max8997->buck125_gpios, pdata->buck125_gpios, sizeof(int) * 3);
+ max8997->ignore_gpiodvs_side_effect = pdata->ignore_gpiodvs_side_effect;
for (i = 0; i < 8; i++) {
max8997->buck1_vol[i] = ret =
@@ -1124,6 +1127,10 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
0x3f);
}
+ /* Misc Settings */
+ max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
+ max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
+
for (i = 0; i < pdata->num_regulators; i++) {
const struct voltage_map_desc *desc;
int id = pdata->regulators[i].id;
@@ -1148,10 +1155,6 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
}
}
- /* Misc Settings */
- max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
- max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
-
return 0;
err:
for (i = 0; i < max8997->num_regulators; i++)
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index fbddc15e181..701a5900f83 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -25,6 +25,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/regmap.h>
/* Register definitions */
#define TPS65023_REG_VERSION 0
@@ -125,93 +126,35 @@ struct tps_pmic {
struct i2c_client *client;
struct regulator_dev *rdev[TPS65023_NUM_REGULATOR];
const struct tps_info *info[TPS65023_NUM_REGULATOR];
- struct mutex io_lock;
+ struct regmap *regmap;
};
-static inline int tps_65023_read(struct tps_pmic *tps, u8 reg)
-{
- return i2c_smbus_read_byte_data(tps->client, reg);
-}
-
-static inline int tps_65023_write(struct tps_pmic *tps, u8 reg, u8 val)
-{
- return i2c_smbus_write_byte_data(tps->client, reg, val);
-}
-
static int tps_65023_set_bits(struct tps_pmic *tps, u8 reg, u8 mask)
{
- int err, data;
-
- mutex_lock(&tps->io_lock);
-
- data = tps_65023_read(tps, reg);
- if (data < 0) {
- dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
- err = data;
- goto out;
- }
-
- data |= mask;
- err = tps_65023_write(tps, reg, data);
- if (err)
- dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
-
-out:
- mutex_unlock(&tps->io_lock);
- return err;
+ return regmap_update_bits(tps->regmap, reg, mask, mask);
}
static int tps_65023_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask)
{
- int err, data;
-
- mutex_lock(&tps->io_lock);
-
- data = tps_65023_read(tps, reg);
- if (data < 0) {
- dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
- err = data;
- goto out;
- }
-
- data &= ~mask;
-
- err = tps_65023_write(tps, reg, data);
- if (err)
- dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
-
-out:
- mutex_unlock(&tps->io_lock);
- return err;
-
+ return regmap_update_bits(tps->regmap, reg, mask, 0);
}
static int tps_65023_reg_read(struct tps_pmic *tps, u8 reg)
{
- int data;
+ unsigned int val;
+ int ret;
- mutex_lock(&tps->io_lock);
+ ret = regmap_read(tps->regmap, reg, &val);
- data = tps_65023_read(tps, reg);
- if (data < 0)
- dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg);
-
- mutex_unlock(&tps->io_lock);
- return data;
+ if (ret != 0)
+ return ret;
+ else
+ return val;
}
static int tps_65023_reg_write(struct tps_pmic *tps, u8 reg, u8 val)
{
- int err;
-
- mutex_lock(&tps->io_lock);
-
- err = tps_65023_write(tps, reg, val);
- if (err < 0)
- dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg);
-
- mutex_unlock(&tps->io_lock);
- return err;
+ return regmap_write(tps->regmap, reg, val);
}
static int tps65023_dcdc_is_enabled(struct regulator_dev *dev)
@@ -463,6 +406,11 @@ static struct regulator_ops tps65023_ldo_ops = {
.list_voltage = tps65023_ldo_list_voltage,
};
+static struct regmap_config tps65023_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
static int __devinit tps_65023_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -488,7 +436,13 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
if (!tps)
return -ENOMEM;
- mutex_init(&tps->io_lock);
+ tps->regmap = regmap_init_i2c(client, &tps65023_regmap_config);
+ if (IS_ERR(tps->regmap)) {
+ error = PTR_ERR(tps->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ error);
+ goto fail_alloc;
+ }
/* common for all regulators */
tps->client = client;
@@ -527,6 +481,8 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
while (--i >= 0)
regulator_unregister(tps->rdev[i]);
+ regmap_exit(tps->regmap);
+ fail_alloc:
kfree(tps);
return error;
}
@@ -545,6 +501,7 @@ static int __devexit tps_65023_remove(struct i2c_client *client)
for (i = 0; i < TPS65023_NUM_REGULATOR; i++)
regulator_unregister(tps->rdev[i]);
+ regmap_exit(tps->regmap);
kfree(tps);
return 0;
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 55dd4e6650d..66d2d60b436 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -49,7 +49,6 @@
#define TPS65911_REG_LDO7 11
#define TPS65911_REG_LDO8 12
-#define TPS65910_NUM_REGULATOR 13
#define TPS65910_SUPPLY_STATE_ENABLED 0x1
/* supported VIO voltages in milivolts */
@@ -264,11 +263,12 @@ static struct tps_info tps65911_regs[] = {
};
struct tps65910_reg {
- struct regulator_desc desc[TPS65910_NUM_REGULATOR];
+ struct regulator_desc *desc;
struct tps65910 *mfd;
- struct regulator_dev *rdev[TPS65910_NUM_REGULATOR];
- struct tps_info *info[TPS65910_NUM_REGULATOR];
+ struct regulator_dev **rdev;
+ struct tps_info **info;
struct mutex mutex;
+ int num_regulators;
int mode;
int (*get_ctrl_reg)(int);
};
@@ -759,8 +759,13 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
mult = (selector / VDD1_2_NUM_VOLTS) + 1;
volt = VDD1_2_MIN_VOLT +
(selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
+ break;
case TPS65911_REG_VDDCTRL:
volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
+ break;
+ default:
+ BUG();
+ return -EINVAL;
}
return volt * 100 * mult;
@@ -897,16 +902,42 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
switch(tps65910_chip_id(tps65910)) {
case TPS65910:
pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
+ pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
info = tps65910_regs;
+ break;
case TPS65911:
pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
+ pmic->num_regulators = ARRAY_SIZE(tps65911_regs);
info = tps65911_regs;
+ break;
default:
pr_err("Invalid tps chip version\n");
+ kfree(pmic);
return -ENODEV;
}
- for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) {
+ pmic->desc = kcalloc(pmic->num_regulators,
+ sizeof(struct regulator_desc), GFP_KERNEL);
+ if (!pmic->desc) {
+ err = -ENOMEM;
+ goto err_free_pmic;
+ }
+
+ pmic->info = kcalloc(pmic->num_regulators,
+ sizeof(struct tps_info *), GFP_KERNEL);
+ if (!pmic->info) {
+ err = -ENOMEM;
+ goto err_free_desc;
+ }
+
+ pmic->rdev = kcalloc(pmic->num_regulators,
+ sizeof(struct regulator_dev *), GFP_KERNEL);
+ if (!pmic->rdev) {
+ err = -ENOMEM;
+ goto err_free_info;
+ }
+
+ for (i = 0; i < pmic->num_regulators; i++, info++, reg_data++) {
/* Register the regulators */
pmic->info[i] = info;
@@ -938,7 +969,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
"failed to register %s regulator\n",
pdev->name);
err = PTR_ERR(rdev);
- goto err;
+ goto err_unregister_regulator;
}
/* Save regulator for cleanup */
@@ -946,23 +977,31 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
}
return 0;
-err:
+err_unregister_regulator:
while (--i >= 0)
regulator_unregister(pmic->rdev[i]);
-
+ kfree(pmic->rdev);
+err_free_info:
+ kfree(pmic->info);
+err_free_desc:
+ kfree(pmic->desc);
+err_free_pmic:
kfree(pmic);
return err;
}
static int __devexit tps65910_remove(struct platform_device *pdev)
{
- struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev);
+ struct tps65910_reg *pmic = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < TPS65910_NUM_REGULATOR; i++)
- regulator_unregister(tps65910_reg->rdev[i]);
+ for (i = 0; i < pmic->num_regulators; i++)
+ regulator_unregister(pmic->rdev[i]);
- kfree(tps65910_reg);
+ kfree(pmic->rdev);
+ kfree(pmic->info);
+ kfree(pmic->desc);
+ kfree(pmic);
return 0;
}
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
new file mode 100644
index 00000000000..3a9313e00fa
--- /dev/null
+++ b/drivers/regulator/tps65912-regulator.c
@@ -0,0 +1,800 @@
+/*
+ * tps65912.c -- TI tps65912
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This driver is based on wm8350 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65912.h>
+
+/* DCDC's */
+#define TPS65912_REG_DCDC1 0
+#define TPS65912_REG_DCDC2 1
+#define TPS65912_REG_DCDC3 2
+#define TPS65912_REG_DCDC4 3
+
+/* LDOs */
+#define TPS65912_REG_LDO1 4
+#define TPS65912_REG_LDO2 5
+#define TPS65912_REG_LDO3 6
+#define TPS65912_REG_LDO4 7
+#define TPS65912_REG_LDO5 8
+#define TPS65912_REG_LDO6 9
+#define TPS65912_REG_LDO7 10
+#define TPS65912_REG_LDO8 11
+#define TPS65912_REG_LDO9 12
+#define TPS65912_REG_LDO10 13
+
+#define TPS65912_MAX_REG_ID TPS65912_REG_LDO_10
+
+/* Number of step-down converters available */
+#define TPS65912_NUM_DCDC 4
+
+/* Number of LDO voltage regulators available */
+#define TPS65912_NUM_LDO 10
+
+/* Number of total regulators available */
+#define TPS65912_NUM_REGULATOR (TPS65912_NUM_DCDC + TPS65912_NUM_LDO)
+
+#define TPS65912_REG_ENABLED 0x80
+#define OP_SELREG_MASK 0x40
+#define OP_SELREG_SHIFT 6
+
+struct tps_info {
+ const char *name;
+};
+
+static struct tps_info tps65912_regs[] = {
+ {
+ .name = "DCDC1",
+ },
+ {
+ .name = "DCDC2",
+ },
+ {
+ .name = "DCDC3",
+ },
+ {
+ .name = "DCDC4",
+ },
+ {
+ .name = "LDO1",
+ },
+ {
+ .name = "LDO2",
+ },
+ {
+ .name = "LDO3",
+ },
+ {
+ .name = "LDO4",
+ },
+ {
+ .name = "LDO5",
+ },
+ {
+ .name = "LDO6",
+ },
+ {
+ .name = "LDO7",
+ },
+ {
+ .name = "LDO8",
+ },
+ {
+ .name = "LDO9",
+ },
+ {
+ .name = "LDO10",
+ },
+};
+
+struct tps65912_reg {
+ struct regulator_desc desc[TPS65912_NUM_REGULATOR];
+ struct tps65912 *mfd;
+ struct regulator_dev *rdev[TPS65912_NUM_REGULATOR];
+ struct tps_info *info[TPS65912_NUM_REGULATOR];
+ /* for read/write access */
+ struct mutex io_lock;
+ int mode;
+ int (*get_ctrl_reg)(int);
+ int dcdc1_range;
+ int dcdc2_range;
+ int dcdc3_range;
+ int dcdc4_range;
+ int pwm_mode_reg;
+ int eco_reg;
+};
+
+static int tps65912_get_range(struct tps65912_reg *pmic, int id)
+{
+ struct tps65912 *mfd = pmic->mfd;
+
+ if (id > TPS65912_REG_DCDC4)
+ return 0;
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ pmic->dcdc1_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC1_LIMIT);
+ if (pmic->dcdc1_range < 0)
+ return pmic->dcdc1_range;
+ pmic->dcdc1_range = (pmic->dcdc1_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc1_range;
+ case TPS65912_REG_DCDC2:
+ pmic->dcdc2_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC2_LIMIT);
+ if (pmic->dcdc2_range < 0)
+ return pmic->dcdc2_range;
+ pmic->dcdc2_range = (pmic->dcdc2_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc2_range;
+ case TPS65912_REG_DCDC3:
+ pmic->dcdc3_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC3_LIMIT);
+ if (pmic->dcdc3_range < 0)
+ return pmic->dcdc3_range;
+ pmic->dcdc3_range = (pmic->dcdc3_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc3_range;
+ case TPS65912_REG_DCDC4:
+ pmic->dcdc4_range = tps65912_reg_read(mfd,
+ TPS65912_DCDC4_LIMIT);
+ if (pmic->dcdc4_range < 0)
+ return pmic->dcdc4_range;
+ pmic->dcdc4_range = (pmic->dcdc4_range &
+ DCDC_LIMIT_RANGE_MASK) >> DCDC_LIMIT_RANGE_SHIFT;
+ return pmic->dcdc4_range;
+ default:
+ return 0;
+ }
+}
+
+static unsigned long tps65912_vsel_to_uv_range0(u8 vsel)
+{
+ unsigned long uv;
+
+ uv = ((vsel * 12500) + 500000);
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_range1(u8 vsel)
+{
+ unsigned long uv;
+
+ uv = ((vsel * 12500) + 700000);
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_range2(u8 vsel)
+{
+ unsigned long uv;
+
+ uv = ((vsel * 25000) + 500000);
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_range3(u8 vsel)
+{
+ unsigned long uv;
+
+ if (vsel == 0x3f)
+ uv = 3800000;
+ else
+ uv = ((vsel * 50000) + 500000);
+
+ return uv;
+}
+
+static unsigned long tps65912_vsel_to_uv_ldo(u8 vsel)
+{
+ unsigned long uv = 0;
+
+ if (vsel <= 32)
+ uv = ((vsel * 25000) + 800000);
+ else if (vsel > 32 && vsel <= 60)
+ uv = (((vsel - 32) * 50000) + 1600000);
+ else if (vsel > 60)
+ uv = (((vsel - 60) * 100000) + 3000000);
+
+ return uv;
+}
+
+static int tps65912_get_ctrl_register(int id)
+{
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ return TPS65912_DCDC1_AVS;
+ case TPS65912_REG_DCDC2:
+ return TPS65912_DCDC2_AVS;
+ case TPS65912_REG_DCDC3:
+ return TPS65912_DCDC3_AVS;
+ case TPS65912_REG_DCDC4:
+ return TPS65912_DCDC4_AVS;
+ case TPS65912_REG_LDO1:
+ return TPS65912_LDO1_AVS;
+ case TPS65912_REG_LDO2:
+ return TPS65912_LDO2_AVS;
+ case TPS65912_REG_LDO3:
+ return TPS65912_LDO3_AVS;
+ case TPS65912_REG_LDO4:
+ return TPS65912_LDO4_AVS;
+ case TPS65912_REG_LDO5:
+ return TPS65912_LDO5;
+ case TPS65912_REG_LDO6:
+ return TPS65912_LDO6;
+ case TPS65912_REG_LDO7:
+ return TPS65912_LDO7;
+ case TPS65912_REG_LDO8:
+ return TPS65912_LDO8;
+ case TPS65912_REG_LDO9:
+ return TPS65912_LDO9;
+ case TPS65912_REG_LDO10:
+ return TPS65912_LDO10;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tps65912_get_dcdc_sel_register(struct tps65912_reg *pmic, int id)
+{
+ struct tps65912 *mfd = pmic->mfd;
+ int opvsel = 0, sr = 0;
+ u8 reg = 0;
+
+ if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_DCDC4)
+ return -EINVAL;
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC1_OP);
+ sr = ((opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT);
+ if (sr)
+ reg = TPS65912_DCDC1_AVS;
+ else
+ reg = TPS65912_DCDC1_OP;
+ break;
+ case TPS65912_REG_DCDC2:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC2_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_DCDC2_AVS;
+ else
+ reg = TPS65912_DCDC2_OP;
+ break;
+ case TPS65912_REG_DCDC3:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC3_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_DCDC3_AVS;
+ else
+ reg = TPS65912_DCDC3_OP;
+ break;
+ case TPS65912_REG_DCDC4:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC4_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_DCDC4_AVS;
+ else
+ reg = TPS65912_DCDC4_OP;
+ break;
+ }
+ return reg;
+}
+
+static int tps65912_get_ldo_sel_register(struct tps65912_reg *pmic, int id)
+{
+ struct tps65912 *mfd = pmic->mfd;
+ int opvsel = 0, sr = 0;
+ u8 reg = 0;
+
+ if (id < TPS65912_REG_LDO1 || id > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ switch (id) {
+ case TPS65912_REG_LDO1:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO1_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO1_AVS;
+ else
+ reg = TPS65912_LDO1_OP;
+ break;
+ case TPS65912_REG_LDO2:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO2_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO2_AVS;
+ else
+ reg = TPS65912_LDO2_OP;
+ break;
+ case TPS65912_REG_LDO3:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO3_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO3_AVS;
+ else
+ reg = TPS65912_LDO3_OP;
+ break;
+ case TPS65912_REG_LDO4:
+ opvsel = tps65912_reg_read(mfd, TPS65912_LDO4_OP);
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ reg = TPS65912_LDO4_AVS;
+ else
+ reg = TPS65912_LDO4_OP;
+ break;
+ case TPS65912_REG_LDO5:
+ reg = TPS65912_LDO5;
+ break;
+ case TPS65912_REG_LDO6:
+ reg = TPS65912_LDO6;
+ break;
+ case TPS65912_REG_LDO7:
+ reg = TPS65912_LDO7;
+ break;
+ case TPS65912_REG_LDO8:
+ reg = TPS65912_LDO8;
+ break;
+ case TPS65912_REG_LDO9:
+ reg = TPS65912_LDO9;
+ break;
+ case TPS65912_REG_LDO10:
+ reg = TPS65912_LDO10;
+ break;
+ }
+
+ return reg;
+}
+
+static int tps65912_get_mode_regiters(struct tps65912_reg *pmic, int id)
+{
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ pmic->pwm_mode_reg = TPS65912_DCDC1_CTRL;
+ pmic->eco_reg = TPS65912_DCDC1_AVS;
+ break;
+ case TPS65912_REG_DCDC2:
+ pmic->pwm_mode_reg = TPS65912_DCDC2_CTRL;
+ pmic->eco_reg = TPS65912_DCDC2_AVS;
+ break;
+ case TPS65912_REG_DCDC3:
+ pmic->pwm_mode_reg = TPS65912_DCDC3_CTRL;
+ pmic->eco_reg = TPS65912_DCDC3_AVS;
+ break;
+ case TPS65912_REG_DCDC4:
+ pmic->pwm_mode_reg = TPS65912_DCDC4_CTRL;
+ pmic->eco_reg = TPS65912_DCDC4_AVS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tps65912_reg_is_enabled(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int reg, value, id = rdev_get_id(dev);
+
+ if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ value = tps65912_reg_read(mfd, reg);
+ if (value < 0)
+ return value;
+
+ return value & TPS65912_REG_ENABLED;
+}
+
+static int tps65912_reg_enable(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev);
+ int reg;
+
+ if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ return tps65912_set_bits(mfd, reg, TPS65912_REG_ENABLED);
+}
+
+static int tps65912_reg_disable(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev), reg;
+
+ reg = pmic->get_ctrl_reg(id);
+ if (reg < 0)
+ return reg;
+
+ return tps65912_clear_bits(mfd, reg, TPS65912_REG_ENABLED);
+}
+
+static int tps65912_set_mode(struct regulator_dev *dev, unsigned int mode)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int pwm_mode, eco, id = rdev_get_id(dev);
+
+ tps65912_get_mode_regiters(pmic, id);
+
+ pwm_mode = tps65912_reg_read(mfd, pmic->pwm_mode_reg);
+ eco = tps65912_reg_read(mfd, pmic->eco_reg);
+
+ pwm_mode &= DCDCCTRL_DCDC_MODE_MASK;
+ eco &= DCDC_AVS_ECO_MASK;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ /* Verify if mode alredy set */
+ if (pwm_mode && !eco)
+ break;
+ tps65912_set_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
+ tps65912_clear_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ case REGULATOR_MODE_IDLE:
+ if (!pwm_mode && !eco)
+ break;
+ tps65912_clear_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
+ tps65912_clear_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
+ break;
+ case REGULATOR_MODE_STANDBY:
+ if (!pwm_mode && eco)
+ break;
+ tps65912_clear_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
+ tps65912_set_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned int tps65912_get_mode(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int pwm_mode, eco, mode = 0, id = rdev_get_id(dev);
+
+ tps65912_get_mode_regiters(pmic, id);
+
+ pwm_mode = tps65912_reg_read(mfd, pmic->pwm_mode_reg);
+ eco = tps65912_reg_read(mfd, pmic->eco_reg);
+
+ pwm_mode &= DCDCCTRL_DCDC_MODE_MASK;
+ eco &= DCDC_AVS_ECO_MASK;
+
+ if (pwm_mode && !eco)
+ mode = REGULATOR_MODE_FAST;
+ else if (!pwm_mode && !eco)
+ mode = REGULATOR_MODE_NORMAL;
+ else if (!pwm_mode && eco)
+ mode = REGULATOR_MODE_STANDBY;
+
+ return mode;
+}
+
+static int tps65912_get_voltage_dcdc(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev), voltage = 0, range;
+ int opvsel = 0, avsel = 0, sr, vsel;
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC1_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC1_AVS);
+ range = pmic->dcdc1_range;
+ break;
+ case TPS65912_REG_DCDC2:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC2_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC2_AVS);
+ range = pmic->dcdc2_range;
+ break;
+ case TPS65912_REG_DCDC3:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC3_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC3_AVS);
+ range = pmic->dcdc3_range;
+ break;
+ case TPS65912_REG_DCDC4:
+ opvsel = tps65912_reg_read(mfd, TPS65912_DCDC4_OP);
+ avsel = tps65912_reg_read(mfd, TPS65912_DCDC4_AVS);
+ range = pmic->dcdc4_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sr = (opvsel & OP_SELREG_MASK) >> OP_SELREG_SHIFT;
+ if (sr)
+ vsel = avsel;
+ else
+ vsel = opvsel;
+ vsel &= 0x3F;
+
+ switch (range) {
+ case 0:
+ /* 0.5 - 1.2875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range0(vsel);
+ break;
+ case 1:
+ /* 0.7 - 1.4875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range1(vsel);
+ break;
+ case 2:
+ /* 0.5 - 2.075V in 25mV steps */
+ voltage = tps65912_vsel_to_uv_range2(vsel);
+ break;
+ case 3:
+ /* 0.5 - 3.8V in 50mV steps */
+ voltage = tps65912_vsel_to_uv_range3(vsel);
+ break;
+ }
+ return voltage;
+}
+
+static int tps65912_set_voltage_dcdc(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev);
+ int value;
+ u8 reg;
+
+ reg = tps65912_get_dcdc_sel_register(pmic, id);
+ value = tps65912_reg_read(mfd, reg);
+ value &= 0xC0;
+ return tps65912_reg_write(mfd, reg, selector | value);
+}
+
+static int tps65912_get_voltage_ldo(struct regulator_dev *dev)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev);
+ int vsel = 0;
+ u8 reg;
+
+ reg = tps65912_get_ldo_sel_register(pmic, id);
+ vsel = tps65912_reg_read(mfd, reg);
+ vsel &= 0x3F;
+
+ return tps65912_vsel_to_uv_ldo(vsel);
+}
+
+static int tps65912_set_voltage_ldo(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ struct tps65912 *mfd = pmic->mfd;
+ int id = rdev_get_id(dev), reg, value;
+
+ reg = tps65912_get_ldo_sel_register(pmic, id);
+ value = tps65912_reg_read(mfd, reg);
+ value &= 0xC0;
+ return tps65912_reg_write(mfd, reg, selector | value);
+}
+
+static int tps65912_list_voltage_dcdc(struct regulator_dev *dev,
+ unsigned selector)
+{
+ struct tps65912_reg *pmic = rdev_get_drvdata(dev);
+ int range, voltage = 0, id = rdev_get_id(dev);
+
+ switch (id) {
+ case TPS65912_REG_DCDC1:
+ range = pmic->dcdc1_range;
+ break;
+ case TPS65912_REG_DCDC2:
+ range = pmic->dcdc2_range;
+ break;
+ case TPS65912_REG_DCDC3:
+ range = pmic->dcdc3_range;
+ break;
+ case TPS65912_REG_DCDC4:
+ range = pmic->dcdc4_range;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (range) {
+ case 0:
+ /* 0.5 - 1.2875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range0(selector);
+ break;
+ case 1:
+ /* 0.7 - 1.4875V in 12.5mV steps */
+ voltage = tps65912_vsel_to_uv_range1(selector);
+ break;
+ case 2:
+ /* 0.5 - 2.075V in 25mV steps */
+ voltage = tps65912_vsel_to_uv_range2(selector);
+ break;
+ case 3:
+ /* 0.5 - 3.8V in 50mV steps */
+ voltage = tps65912_vsel_to_uv_range3(selector);
+ break;
+ }
+ return voltage;
+}
+
+static int tps65912_list_voltage_ldo(struct regulator_dev *dev,
+ unsigned selector)
+{
+ int ldo = rdev_get_id(dev);
+
+ if (ldo < TPS65912_REG_LDO1 || ldo > TPS65912_REG_LDO10)
+ return -EINVAL;
+
+ return tps65912_vsel_to_uv_ldo(selector);
+}
+
+/* Operations permitted on DCDCx */
+static struct regulator_ops tps65912_ops_dcdc = {
+ .is_enabled = tps65912_reg_is_enabled,
+ .enable = tps65912_reg_enable,
+ .disable = tps65912_reg_disable,
+ .set_mode = tps65912_set_mode,
+ .get_mode = tps65912_get_mode,
+ .get_voltage = tps65912_get_voltage_dcdc,
+ .set_voltage_sel = tps65912_set_voltage_dcdc,
+ .list_voltage = tps65912_list_voltage_dcdc,
+};
+
+/* Operations permitted on LDOx */
+static struct regulator_ops tps65912_ops_ldo = {
+ .is_enabled = tps65912_reg_is_enabled,
+ .enable = tps65912_reg_enable,
+ .disable = tps65912_reg_disable,
+ .get_voltage = tps65912_get_voltage_ldo,
+ .set_voltage_sel = tps65912_set_voltage_ldo,
+ .list_voltage = tps65912_list_voltage_ldo,
+};
+
+static __devinit int tps65912_probe(struct platform_device *pdev)
+{
+ struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
+ struct tps_info *info;
+ struct regulator_init_data *reg_data;
+ struct regulator_dev *rdev;
+ struct tps65912_reg *pmic;
+ struct tps65912_board *pmic_plat_data;
+ int i, err;
+
+ pmic_plat_data = dev_get_platdata(tps65912->dev);
+ if (!pmic_plat_data)
+ return -EINVAL;
+
+ reg_data = pmic_plat_data->tps65912_pmic_init_data;
+
+ pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ mutex_init(&pmic->io_lock);
+ pmic->mfd = tps65912;
+ platform_set_drvdata(pdev, pmic);
+
+ pmic->get_ctrl_reg = &tps65912_get_ctrl_register;
+ info = tps65912_regs;
+
+ for (i = 0; i < TPS65912_NUM_REGULATOR; i++, info++, reg_data++) {
+ int range = 0;
+ /* Register the regulators */
+ pmic->info[i] = info;
+
+ pmic->desc[i].name = info->name;
+ pmic->desc[i].id = i;
+ pmic->desc[i].n_voltages = 64;
+ pmic->desc[i].ops = (i > TPS65912_REG_DCDC4 ?
+ &tps65912_ops_ldo : &tps65912_ops_dcdc);
+ pmic->desc[i].type = REGULATOR_VOLTAGE;
+ pmic->desc[i].owner = THIS_MODULE;
+ range = tps65912_get_range(pmic, i);
+ rdev = regulator_register(&pmic->desc[i],
+ tps65912->dev, reg_data, pmic);
+ if (IS_ERR(rdev)) {
+ dev_err(tps65912->dev,
+ "failed to register %s regulator\n",
+ pdev->name);
+ err = PTR_ERR(rdev);
+ goto err;
+ }
+
+ /* Save regulator for cleanup */
+ pmic->rdev[i] = rdev;
+ }
+ return 0;
+
+err:
+ while (--i >= 0)
+ regulator_unregister(pmic->rdev[i]);
+
+ kfree(pmic);
+ return err;
+}
+
+static int __devexit tps65912_remove(struct platform_device *pdev)
+{
+ struct tps65912_reg *tps65912_reg = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < TPS65912_NUM_REGULATOR; i++)
+ regulator_unregister(tps65912_reg->rdev[i]);
+
+ kfree(tps65912_reg);
+ return 0;
+}
+
+static struct platform_driver tps65912_driver = {
+ .driver = {
+ .name = "tps65912-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = tps65912_probe,
+ .remove = __devexit_p(tps65912_remove),
+};
+
+/**
+ * tps65912_init
+ *
+ * Module init function
+ */
+static int __init tps65912_init(void)
+{
+ return platform_driver_register(&tps65912_driver);
+}
+subsys_initcall(tps65912_init);
+
+/**
+ * tps65912_cleanup
+ *
+ * Module exit function
+ */
+static void __exit tps65912_cleanup(void)
+{
+ platform_driver_unregister(&tps65912_driver);
+}
+module_exit(tps65912_cleanup);
+
+MODULE_AUTHOR("Margarita Olaya Cabrera <magi@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS65912 voltage regulator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65912-pmic");
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 87fe0f75a56..ee8747f4fa0 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -835,8 +835,8 @@ static struct regulator_ops twlsmps_ops = {
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf, TWL4030, twl4030fixed_ops)
-#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \
- TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
+#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
+ TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \
0x0, TWL6030, twl6030fixed_ops)
#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
@@ -856,24 +856,22 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
-#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
+#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
- .id = num, \
.min_mV = min_mVolts, \
.max_mV = max_mVolts, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
- .n_voltages = (max_mVolts - min_mVolts)/100, \
+ .n_voltages = (max_mVolts - min_mVolts)/100 + 1, \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
}
-#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
+#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
- .id = num, \
.min_mV = min_mVolts, \
.max_mV = max_mVolts, \
.desc = { \
@@ -903,9 +901,8 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
-#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \
+#define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) { \
.base = offset, \
- .id = num, \
.delay = turnon_delay, \
.desc = { \
.name = #label, \
@@ -916,9 +913,8 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
-#define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \
+#define TWL6025_ADJUSTABLE_SMPS(label, offset) { \
.base = offset, \
- .id = num, \
.min_mV = 600, \
.max_mV = 2100, \
.desc = { \
@@ -961,32 +957,32 @@ static struct twlreg_info twl_regs[] = {
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
/* Turnon-delay and remap configuration values for 6030 are not
verified since the specification is not public */
- TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1),
- TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2),
- TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3),
- TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4),
- TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5),
- TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7),
- TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0),
- TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0),
- TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0),
- TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0),
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300),
+ TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0),
+ TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0),
+ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0),
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0),
+ TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0),
/* 6025 are renamed compared to 6030 versions */
- TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1),
- TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2),
- TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3),
- TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4),
- TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5),
- TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7),
- TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16),
- TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17),
- TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18),
-
- TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1),
- TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2),
- TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3),
+ TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300),
+ TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300),
+
+ TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34),
+ TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10),
+ TWL6025_ADJUSTABLE_SMPS(VIO, 0x16),
};
static u8 twl_get_smps_offset(void)
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index a0982e80985..bd3531d8b2a 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -267,23 +267,6 @@ static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev,
return vsel;
}
-static int wm831x_buckv_select_max_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- u16 vsel;
-
- if (max_uV < 600000 || max_uV > 1800000)
- return -EINVAL;
-
- vsel = ((max_uV - 600000) / 12500) + 8;
-
- if (wm831x_buckv_list_voltage(rdev, vsel) < min_uV ||
- wm831x_buckv_list_voltage(rdev, vsel) < max_uV)
- return -EINVAL;
-
- return vsel;
-}
-
static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
@@ -338,28 +321,23 @@ static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
if (ret < 0)
return ret;
- /* Set the high voltage as the DVS voltage. This is optimised
- * for CPUfreq usage, most processors will keep the maximum
- * voltage constant and lower the minimum with the frequency. */
- vsel = wm831x_buckv_select_max_voltage(rdev, min_uV, max_uV);
- if (vsel < 0) {
- /* This should never happen - at worst the same vsel
- * should be chosen */
- WARN_ON(vsel < 0);
- return 0;
+ /*
+ * If this VSEL is higher than the last one we've seen then
+ * remember it as the DVS VSEL. This is optimised for CPUfreq
+ * usage where we want to get to the highest voltage very
+ * quickly.
+ */
+ if (vsel > dcdc->dvs_vsel) {
+ ret = wm831x_set_bits(wm831x, dvs_reg,
+ WM831X_DC1_DVS_VSEL_MASK,
+ dcdc->dvs_vsel);
+ if (ret == 0)
+ dcdc->dvs_vsel = vsel;
+ else
+ dev_warn(wm831x->dev,
+ "Failed to set DCDC DVS VSEL: %d\n", ret);
}
- /* Don't bother if it's the same VSEL we're already using */
- if (vsel == dcdc->on_vsel)
- return 0;
-
- ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, vsel);
- if (ret == 0)
- dcdc->dvs_vsel = vsel;
- else
- dev_warn(wm831x->dev, "Failed to set DCDC DVS VSEL: %d\n",
- ret);
-
return 0;
}
@@ -456,27 +434,6 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
if (!pdata || !pdata->dvs_gpio)
return;
- switch (pdata->dvs_control_src) {
- case 1:
- ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
- break;
- case 2:
- ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
- break;
- default:
- dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
- pdata->dvs_control_src, dcdc->name);
- return;
- }
-
- ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
- WM831X_DC1_DVS_SRC_MASK, ctrl);
- if (ret < 0) {
- dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
- dcdc->name, ret);
- return;
- }
-
ret = gpio_request(pdata->dvs_gpio, "DCDC DVS");
if (ret < 0) {
dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n",
@@ -498,17 +455,57 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
}
dcdc->dvs_gpio = pdata->dvs_gpio;
+
+ switch (pdata->dvs_control_src) {
+ case 1:
+ ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT;
+ break;
+ case 2:
+ ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT;
+ break;
+ default:
+ dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n",
+ pdata->dvs_control_src, dcdc->name);
+ return;
+ }
+
+ /* If DVS_VSEL is set to the minimum value then raise it to ON_VSEL
+ * to make bootstrapping a bit smoother.
+ */
+ if (!dcdc->dvs_vsel) {
+ ret = wm831x_set_bits(wm831x,
+ dcdc->base + WM831X_DCDC_DVS_CONTROL,
+ WM831X_DC1_DVS_VSEL_MASK, dcdc->on_vsel);
+ if (ret == 0)
+ dcdc->dvs_vsel = dcdc->on_vsel;
+ else
+ dev_warn(wm831x->dev, "Failed to set DVS_VSEL: %d\n",
+ ret);
+ }
+
+ ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL,
+ WM831X_DC1_DVS_SRC_MASK, ctrl);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n",
+ dcdc->name, ret);
+ }
}
static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->dcdc);
+ int id;
struct wm831x_dcdc *dcdc;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1);
if (pdata == NULL || pdata->dcdc[id] == NULL)
@@ -545,7 +542,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
}
dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK;
- ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG);
+ ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL);
if (ret < 0) {
dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret);
goto err;
@@ -709,11 +706,17 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->dcdc);
+ int id;
struct wm831x_dcdc *dcdc;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1);
if (pdata == NULL || pdata->dcdc[id] == NULL)
@@ -1046,3 +1049,4 @@ MODULE_DESCRIPTION("WM831x DC-DC convertor driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-buckv");
MODULE_ALIAS("platform:wm831x-buckp");
+MODULE_ALIAS("platform:wm831x-epe");
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 2220cf8defb..6709710a059 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -310,11 +310,17 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+ int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
if (pdata == NULL || pdata->ldo[id] == NULL)
@@ -574,11 +580,17 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+ int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret, irq;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
if (pdata == NULL || pdata->ldo[id] == NULL)
@@ -764,11 +776,18 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+ int id;
struct wm831x_ldo *ldo;
struct resource *res;
int ret;
+ if (pdata && pdata->wm831x_num)
+ id = (pdata->wm831x_num * 10) + 1;
+ else
+ id = 0;
+ id = pdev->id - id;
+
+
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
if (pdata == NULL || pdata->ldo[id] == NULL)
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 35b2958d510..1a6a690f24d 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -43,7 +43,7 @@ static int wm8994_ldo_enable(struct regulator_dev *rdev)
if (!ldo->enable)
return 0;
- gpio_set_value(ldo->enable, 1);
+ gpio_set_value_cansleep(ldo->enable, 1);
ldo->is_enabled = true;
return 0;
@@ -57,7 +57,7 @@ static int wm8994_ldo_disable(struct regulator_dev *rdev)
if (!ldo->enable)
return -EINVAL;
- gpio_set_value(ldo->enable, 0);
+ gpio_set_value_cansleep(ldo->enable, 0);
ldo->is_enabled = false;
return 0;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index ce2aabf5c55..5a538fc1cc8 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -981,11 +981,11 @@ config RTC_DRV_COH901331
config RTC_DRV_STMP
- tristate "Freescale STMP3xxx RTC"
- depends on ARCH_STMP3XXX
+ tristate "Freescale STMP3xxx/i.MX23/i.MX28 RTC"
+ depends on ARCH_MXS
help
If you say yes here you will get support for the onboard
- STMP3xxx RTC.
+ STMP3xxx/i.MX23/i.MX28 RTC.
This driver can also be built as a module. If so, the module
will be called rtc-stmp3xxx.
@@ -1006,10 +1006,10 @@ config RTC_DRV_MC13XXX
config RTC_DRV_MPC5121
tristate "Freescale MPC5121 built-in RTC"
- depends on PPC_MPC512x && RTC_CLASS
+ depends on PPC_MPC512x || PPC_MPC52xx
help
If you say yes here you will get support for the
- built-in RTC MPC5121.
+ built-in RTC on MPC5121 or on MPC5200.
This driver can also be built as a module. If so, the module
will be called rtc-mpc5121.
@@ -1034,6 +1034,16 @@ config RTC_DRV_LPC32XX
This driver can also be buillt as a module. If so, the module
will be called rtc-lpc32xx.
+config RTC_DRV_PM8XXX
+ tristate "Qualcomm PMIC8XXX RTC"
+ depends on MFD_PM8XXX
+ help
+ If you say yes here you get support for the
+ Qualcomm PMIC8XXX RTC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rtc-pm8xxx.
+
config RTC_DRV_TEGRA
tristate "NVIDIA Tegra Internal RTC driver"
depends on RTC_CLASS && ARCH_TEGRA
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 0ffefe877bf..6e6982335c1 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -77,6 +77,7 @@ obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
+obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 4194e59e14c..01a7df5317c 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -41,20 +41,41 @@ static void rtc_device_release(struct device *dev)
* system's wall clock; restore it on resume().
*/
-static time_t oldtime;
-static struct timespec oldts;
+static struct timespec old_rtc, old_system, old_delta;
+
static int rtc_suspend(struct device *dev, pm_message_t mesg)
{
struct rtc_device *rtc = to_rtc_device(dev);
struct rtc_time tm;
-
+ struct timespec delta, delta_delta;
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
return 0;
+ /* snapshot the current RTC and system time at suspend*/
rtc_read_time(rtc, &tm);
- ktime_get_ts(&oldts);
- rtc_tm_to_time(&tm, &oldtime);
+ getnstimeofday(&old_system);
+ rtc_tm_to_time(&tm, &old_rtc.tv_sec);
+
+
+ /*
+ * To avoid drift caused by repeated suspend/resumes,
+ * which each can add ~1 second drift error,
+ * try to compensate so the difference in system time
+ * and rtc time stays close to constant.
+ */
+ delta = timespec_sub(old_system, old_rtc);
+ delta_delta = timespec_sub(delta, old_delta);
+ if (abs(delta_delta.tv_sec) >= 2) {
+ /*
+ * if delta_delta is too large, assume time correction
+ * has occured and set old_delta to the current delta.
+ */
+ old_delta = delta;
+ } else {
+ /* Otherwise try to adjust old_system to compensate */
+ old_system = timespec_sub(old_system, delta_delta);
+ }
return 0;
}
@@ -63,32 +84,42 @@ static int rtc_resume(struct device *dev)
{
struct rtc_device *rtc = to_rtc_device(dev);
struct rtc_time tm;
- time_t newtime;
- struct timespec time;
- struct timespec newts;
+ struct timespec new_system, new_rtc;
+ struct timespec sleep_time;
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
return 0;
- ktime_get_ts(&newts);
+ /* snapshot the current rtc and system time at resume */
+ getnstimeofday(&new_system);
rtc_read_time(rtc, &tm);
if (rtc_valid_tm(&tm) != 0) {
pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev));
return 0;
}
- rtc_tm_to_time(&tm, &newtime);
- if (newtime <= oldtime) {
- if (newtime < oldtime)
+ rtc_tm_to_time(&tm, &new_rtc.tv_sec);
+ new_rtc.tv_nsec = 0;
+
+ if (new_rtc.tv_sec <= old_rtc.tv_sec) {
+ if (new_rtc.tv_sec < old_rtc.tv_sec)
pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
return 0;
}
- /* calculate the RTC time delta */
- set_normalized_timespec(&time, newtime - oldtime, 0);
- /* subtract kernel time between rtc_suspend to rtc_resume */
- time = timespec_sub(time, timespec_sub(newts, oldts));
+ /* calculate the RTC time delta (sleep time)*/
+ sleep_time = timespec_sub(new_rtc, old_rtc);
+
+ /*
+ * Since these RTC suspend/resume handlers are not called
+ * at the very end of suspend or the start of resume,
+ * some run-time may pass on either sides of the sleep time
+ * so subtract kernel run-time between rtc_suspend to rtc_resume
+ * to keep things accurate.
+ */
+ sleep_time = timespec_sub(sleep_time,
+ timespec_sub(new_system, old_system));
- timekeeping_inject_sleeptime(&time);
+ timekeeping_inject_sleeptime(&sleep_time);
return 0;
}
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index df68618f6db..44e91e598f8 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -636,6 +636,29 @@ void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task)
}
EXPORT_SYMBOL_GPL(rtc_irq_unregister);
+static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
+{
+ /*
+ * We always cancel the timer here first, because otherwise
+ * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
+ * when we manage to start the timer before the callback
+ * returns HRTIMER_RESTART.
+ *
+ * We cannot use hrtimer_cancel() here as a running callback
+ * could be blocked on rtc->irq_task_lock and hrtimer_cancel()
+ * would spin forever.
+ */
+ if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0)
+ return -1;
+
+ if (enabled) {
+ ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq);
+
+ hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
+ }
+ return 0;
+}
+
/**
* rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs
* @rtc: the rtc device
@@ -651,21 +674,21 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled
int err = 0;
unsigned long flags;
+retry:
spin_lock_irqsave(&rtc->irq_task_lock, flags);
if (rtc->irq_task != NULL && task == NULL)
err = -EBUSY;
if (rtc->irq_task != task)
err = -EACCES;
-
- if (enabled) {
- ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
- hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
- } else {
- hrtimer_cancel(&rtc->pie_timer);
+ if (!err) {
+ if (rtc_update_hrtimer(rtc, enabled) < 0) {
+ spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
+ cpu_relax();
+ goto retry;
+ }
+ rtc->pie_enabled = enabled;
}
- rtc->pie_enabled = enabled;
spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
-
return err;
}
EXPORT_SYMBOL_GPL(rtc_irq_set_state);
@@ -685,22 +708,20 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
int err = 0;
unsigned long flags;
- if (freq <= 0)
+ if (freq <= 0 || freq > RTC_MAX_FREQ)
return -EINVAL;
-
+retry:
spin_lock_irqsave(&rtc->irq_task_lock, flags);
if (rtc->irq_task != NULL && task == NULL)
err = -EBUSY;
if (rtc->irq_task != task)
err = -EACCES;
- if (err == 0) {
+ if (!err) {
rtc->irq_freq = freq;
- if (rtc->pie_enabled) {
- ktime_t period;
- hrtimer_cancel(&rtc->pie_timer);
- period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
- hrtimer_start(&rtc->pie_timer, period,
- HRTIMER_MODE_REL);
+ if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) {
+ spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
+ cpu_relax();
+ goto retry;
}
}
spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index e725d51e773..8dd08305aae 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -223,7 +223,7 @@ static int __init at32_rtc_probe(struct platform_device *pdev)
}
rtc->irq = irq;
- rtc->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ rtc->regs = ioremap(regs->start, resource_size(regs));
if (!rtc->regs) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "could not map I/O memory\n");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 911e75cdc12..05beb6c1ca7 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -606,7 +606,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
* (needing ioremap etc), not i/o space resources like this ...
*/
ports = request_region(ports->start,
- ports->end + 1 - ports->start,
+ resource_size(ports),
driver_name);
if (!ports) {
dev_dbg(dev, "i/o registers already in use\n");
@@ -750,7 +750,7 @@ cleanup1:
cmos_rtc.dev = NULL;
rtc_device_unregister(cmos_rtc.rtc);
cleanup0:
- release_region(ports->start, ports->end + 1 - ports->start);
+ release_region(ports->start, resource_size(ports));
return retval;
}
@@ -779,7 +779,7 @@ static void __exit cmos_do_remove(struct device *dev)
cmos->rtc = NULL;
ports = cmos->iomem;
- release_region(ports->start, ports->end + 1 - ports->start);
+ release_region(ports->start, resource_size(ports));
cmos->iomem = NULL;
cmos->dev = NULL;
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index 47e681df31e..68e6caf2549 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -343,7 +343,7 @@ static int __devinit ds1286_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->size = res->end - res->start + 1;
+ priv->size = resource_size(res);
if (!request_mem_region(res->start, priv->size, pdev->name)) {
ret = -EBUSY;
goto out;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 4724ba3acf1..b2005b44e4f 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -149,6 +149,7 @@ static const struct i2c_device_id ds1307_id[] = {
{ "ds1340", ds_1340 },
{ "ds3231", ds_3231 },
{ "m41t00", m41t00 },
+ { "pt7c4338", ds_1307 },
{ "rx8025", rx_8025 },
{ }
};
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index fbabc773dde..568ad30617e 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -490,7 +490,7 @@ ds1511_rtc_probe(struct platform_device *pdev)
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->size = res->end - res->start + 1;
+ pdata->size = resource_size(res);
if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
pdev->name))
return -EBUSY;
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 042630c90dd..d84a448dd75 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -173,7 +173,7 @@ static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->size = res->end - res->start + 1;
+ pdata->size = resource_size(res);
if (!devm_request_mem_region(&pdev->dev, res->start, pdata->size,
pdev->name))
return -EBUSY;
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index 7410875e583..8e2a24e33ed 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -154,7 +154,7 @@ static int __devinit m48t35_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->size = res->end - res->start + 1;
+ priv->size = resource_size(res);
/*
* kludge: remove the #ifndef after ioc3 resource
* conflicts are resolved
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 3978f4caf72..28365388fb6 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -433,7 +433,7 @@ static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
if (!m48t59->ioaddr) {
/* ioaddr not mapped externally */
- m48t59->ioaddr = ioremap(res->start, res->end - res->start + 1);
+ m48t59->ioaddr = ioremap(res->start, resource_size(res));
if (!m48t59->ioaddr)
goto out;
}
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 09ccd8d3ba2..da60915818b 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -3,6 +3,7 @@
*
* Copyright 2007, Domen Puncer <domen.puncer@telargo.com>
* Copyright 2008, Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2011, Dmitry Eremin-Solenikov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -145,6 +146,55 @@ static int mpc5121_rtc_set_time(struct device *dev, struct rtc_time *tm)
return 0;
}
+static int mpc5200_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
+ struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
+ int tmp;
+
+ tm->tm_sec = in_8(&regs->second);
+ tm->tm_min = in_8(&regs->minute);
+
+ /* 12 hour format? */
+ if (in_8(&regs->hour) & 0x20)
+ tm->tm_hour = (in_8(&regs->hour) >> 1) +
+ (in_8(&regs->hour) & 1 ? 12 : 0);
+ else
+ tm->tm_hour = in_8(&regs->hour);
+
+ tmp = in_8(&regs->wday_mday);
+ tm->tm_mday = tmp & 0x1f;
+ tm->tm_mon = in_8(&regs->month) - 1;
+ tm->tm_year = in_be16(&regs->year) - 1900;
+ tm->tm_wday = (tmp >> 5) % 7;
+ tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
+ tm->tm_isdst = 0;
+
+ return 0;
+}
+
+static int mpc5200_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
+ struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
+
+ mpc5121_rtc_update_smh(regs, tm);
+
+ /* date */
+ out_8(&regs->month_set, tm->tm_mon + 1);
+ out_8(&regs->weekday_set, tm->tm_wday ? tm->tm_wday : 7);
+ out_8(&regs->date_set, tm->tm_mday);
+ out_be16(&regs->year_set, tm->tm_year + 1900);
+
+ /* set date sequence */
+ out_8(&regs->set_date, 0x1);
+ out_8(&regs->set_date, 0x3);
+ out_8(&regs->set_date, 0x1);
+ out_8(&regs->set_date, 0x0);
+
+ return 0;
+}
+
static int mpc5121_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
@@ -248,11 +298,18 @@ static const struct rtc_class_ops mpc5121_rtc_ops = {
.alarm_irq_enable = mpc5121_rtc_alarm_irq_enable,
};
+static const struct rtc_class_ops mpc5200_rtc_ops = {
+ .read_time = mpc5200_rtc_read_time,
+ .set_time = mpc5200_rtc_set_time,
+ .read_alarm = mpc5121_rtc_read_alarm,
+ .set_alarm = mpc5121_rtc_set_alarm,
+ .alarm_irq_enable = mpc5121_rtc_alarm_irq_enable,
+};
+
static int __devinit mpc5121_rtc_probe(struct platform_device *op)
{
struct mpc5121_rtc_data *rtc;
int err = 0;
- u32 ka;
rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
if (!rtc)
@@ -287,15 +344,22 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op)
goto out_dispose2;
}
- ka = in_be32(&rtc->regs->keep_alive);
- if (ka & 0x02) {
- dev_warn(&op->dev,
- "mpc5121-rtc: Battery or oscillator failure!\n");
- out_be32(&rtc->regs->keep_alive, ka);
+ if (of_device_is_compatible(op->dev.of_node, "fsl,mpc5121-rtc")) {
+ u32 ka;
+ ka = in_be32(&rtc->regs->keep_alive);
+ if (ka & 0x02) {
+ dev_warn(&op->dev,
+ "mpc5121-rtc: Battery or oscillator failure!\n");
+ out_be32(&rtc->regs->keep_alive, ka);
+ }
+
+ rtc->rtc = rtc_device_register("mpc5121-rtc", &op->dev,
+ &mpc5121_rtc_ops, THIS_MODULE);
+ } else {
+ rtc->rtc = rtc_device_register("mpc5200-rtc", &op->dev,
+ &mpc5200_rtc_ops, THIS_MODULE);
}
- rtc->rtc = rtc_device_register("mpc5121-rtc", &op->dev,
- &mpc5121_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc)) {
err = PTR_ERR(rtc->rtc);
goto out_free_irq;
@@ -340,6 +404,7 @@ static int __devexit mpc5121_rtc_remove(struct platform_device *op)
static struct of_device_id mpc5121_rtc_match[] __devinitdata = {
{ .compatible = "fsl,mpc5121-rtc", },
+ { .compatible = "fsl,mpc5200-rtc", },
{},
};
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 0cec5650d56..d33544802a2 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -332,9 +332,8 @@ vrtc_mrst_do_probe(struct device *dev, struct resource *iomem, int rtc_irq)
if (!iomem)
return -ENODEV;
- iomem = request_mem_region(iomem->start,
- iomem->end + 1 - iomem->start,
- driver_name);
+ iomem = request_mem_region(iomem->start, resource_size(iomem),
+ driver_name);
if (!iomem) {
dev_dbg(dev, "i/o mem already in use.\n");
return -EBUSY;
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index bcae8dd4149..7789002bdd5 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -368,7 +368,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
pr_info("%s: already running\n", pdev->name);
/* force to 24 hour mode */
- new_ctrl = reg & ~(OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP);
+ new_ctrl = reg & (OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP);
new_ctrl |= OMAP_RTC_CTRL_STOP;
/* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE:
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
new file mode 100644
index 00000000000..d420e9d877e
--- /dev/null
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -0,0 +1,550 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/rtc.h>
+
+
+/* RTC Register offsets from RTC CTRL REG */
+#define PM8XXX_ALARM_CTRL_OFFSET 0x01
+#define PM8XXX_RTC_WRITE_OFFSET 0x02
+#define PM8XXX_RTC_READ_OFFSET 0x06
+#define PM8XXX_ALARM_RW_OFFSET 0x0A
+
+/* RTC_CTRL register bit fields */
+#define PM8xxx_RTC_ENABLE BIT(7)
+#define PM8xxx_RTC_ALARM_ENABLE BIT(1)
+#define PM8xxx_RTC_ALARM_CLEAR BIT(0)
+
+#define NUM_8_BIT_RTC_REGS 0x4
+
+/**
+ * struct pm8xxx_rtc - rtc driver internal structure
+ * @rtc: rtc device for this driver.
+ * @rtc_alarm_irq: rtc alarm irq number.
+ * @rtc_base: address of rtc control register.
+ * @rtc_read_base: base address of read registers.
+ * @rtc_write_base: base address of write registers.
+ * @alarm_rw_base: base address of alarm registers.
+ * @ctrl_reg: rtc control register.
+ * @rtc_dev: device structure.
+ * @ctrl_reg_lock: spinlock protecting access to ctrl_reg.
+ */
+struct pm8xxx_rtc {
+ struct rtc_device *rtc;
+ int rtc_alarm_irq;
+ int rtc_base;
+ int rtc_read_base;
+ int rtc_write_base;
+ int alarm_rw_base;
+ u8 ctrl_reg;
+ struct device *rtc_dev;
+ spinlock_t ctrl_reg_lock;
+};
+
+/*
+ * The RTC registers need to be read/written one byte at a time. This is a
+ * hardware limitation.
+ */
+static int pm8xxx_read_wrapper(struct pm8xxx_rtc *rtc_dd, u8 *rtc_val,
+ int base, int count)
+{
+ int i, rc;
+ struct device *parent = rtc_dd->rtc_dev->parent;
+
+ for (i = 0; i < count; i++) {
+ rc = pm8xxx_readb(parent, base + i, &rtc_val[i]);
+ if (rc < 0) {
+ dev_err(rtc_dd->rtc_dev, "PMIC read failed\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int pm8xxx_write_wrapper(struct pm8xxx_rtc *rtc_dd, u8 *rtc_val,
+ int base, int count)
+{
+ int i, rc;
+ struct device *parent = rtc_dd->rtc_dev->parent;
+
+ for (i = 0; i < count; i++) {
+ rc = pm8xxx_writeb(parent, base + i, rtc_val[i]);
+ if (rc < 0) {
+ dev_err(rtc_dd->rtc_dev, "PMIC write failed\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Steps to write the RTC registers.
+ * 1. Disable alarm if enabled.
+ * 2. Write 0x00 to LSB.
+ * 3. Write Byte[1], Byte[2], Byte[3] then Byte[0].
+ * 4. Enable alarm if disabled in step 1.
+ */
+static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ int rc, i;
+ unsigned long secs, irq_flags;
+ u8 value[NUM_8_BIT_RTC_REGS], reg = 0, alarm_enabled = 0, ctrl_reg;
+ struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rtc_tm_to_time(tm, &secs);
+
+ for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) {
+ value[i] = secs & 0xFF;
+ secs >>= 8;
+ }
+
+ dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
+
+ spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+ ctrl_reg = rtc_dd->ctrl_reg;
+
+ if (ctrl_reg & PM8xxx_RTC_ALARM_ENABLE) {
+ alarm_enabled = 1;
+ ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
+ rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base,
+ 1);
+ if (rc < 0) {
+ dev_err(dev, "Write to RTC control register "
+ "failed\n");
+ goto rtc_rw_fail;
+ }
+ rtc_dd->ctrl_reg = ctrl_reg;
+ } else
+ spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+
+ /* Write 0 to Byte[0] */
+ reg = 0;
+ rc = pm8xxx_write_wrapper(rtc_dd, &reg, rtc_dd->rtc_write_base, 1);
+ if (rc < 0) {
+ dev_err(dev, "Write to RTC write data register failed\n");
+ goto rtc_rw_fail;
+ }
+
+ /* Write Byte[1], Byte[2], Byte[3] */
+ rc = pm8xxx_write_wrapper(rtc_dd, value + 1,
+ rtc_dd->rtc_write_base + 1, 3);
+ if (rc < 0) {
+ dev_err(dev, "Write to RTC write data register failed\n");
+ goto rtc_rw_fail;
+ }
+
+ /* Write Byte[0] */
+ rc = pm8xxx_write_wrapper(rtc_dd, value, rtc_dd->rtc_write_base, 1);
+ if (rc < 0) {
+ dev_err(dev, "Write to RTC write data register failed\n");
+ goto rtc_rw_fail;
+ }
+
+ if (alarm_enabled) {
+ ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE;
+ rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base,
+ 1);
+ if (rc < 0) {
+ dev_err(dev, "Write to RTC control register "
+ "failed\n");
+ goto rtc_rw_fail;
+ }
+ rtc_dd->ctrl_reg = ctrl_reg;
+ }
+
+rtc_rw_fail:
+ if (alarm_enabled)
+ spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+
+ return rc;
+}
+
+static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ int rc;
+ u8 value[NUM_8_BIT_RTC_REGS], reg;
+ unsigned long secs;
+ struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rc = pm8xxx_read_wrapper(rtc_dd, value, rtc_dd->rtc_read_base,
+ NUM_8_BIT_RTC_REGS);
+ if (rc < 0) {
+ dev_err(dev, "RTC read data register failed\n");
+ return rc;
+ }
+
+ /*
+ * Read the LSB again and check if there has been a carry over.
+ * If there is, redo the read operation.
+ */
+ rc = pm8xxx_read_wrapper(rtc_dd, &reg, rtc_dd->rtc_read_base, 1);
+ if (rc < 0) {
+ dev_err(dev, "RTC read data register failed\n");
+ return rc;
+ }
+
+ if (unlikely(reg < value[0])) {
+ rc = pm8xxx_read_wrapper(rtc_dd, value,
+ rtc_dd->rtc_read_base, NUM_8_BIT_RTC_REGS);
+ if (rc < 0) {
+ dev_err(dev, "RTC read data register failed\n");
+ return rc;
+ }
+ }
+
+ secs = value[0] | (value[1] << 8) | (value[2] << 16) | (value[3] << 24);
+
+ rtc_time_to_tm(secs, tm);
+
+ rc = rtc_valid_tm(tm);
+ if (rc < 0) {
+ dev_err(dev, "Invalid time read from RTC\n");
+ return rc;
+ }
+
+ dev_dbg(dev, "secs = %lu, h:m:s == %d:%d:%d, d/m/y = %d/%d/%d\n",
+ secs, tm->tm_hour, tm->tm_min, tm->tm_sec,
+ tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+ return 0;
+}
+
+static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ int rc, i;
+ u8 value[NUM_8_BIT_RTC_REGS], ctrl_reg;
+ unsigned long secs, irq_flags;
+ struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rtc_tm_to_time(&alarm->time, &secs);
+
+ for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) {
+ value[i] = secs & 0xFF;
+ secs >>= 8;
+ }
+
+ spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+
+ rc = pm8xxx_write_wrapper(rtc_dd, value, rtc_dd->alarm_rw_base,
+ NUM_8_BIT_RTC_REGS);
+ if (rc < 0) {
+ dev_err(dev, "Write to RTC ALARM register failed\n");
+ goto rtc_rw_fail;
+ }
+
+ ctrl_reg = rtc_dd->ctrl_reg;
+ ctrl_reg = alarm->enabled ? (ctrl_reg | PM8xxx_RTC_ALARM_ENABLE) :
+ (ctrl_reg & ~PM8xxx_RTC_ALARM_ENABLE);
+
+ rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
+ if (rc < 0) {
+ dev_err(dev, "Write to RTC control register failed\n");
+ goto rtc_rw_fail;
+ }
+
+ rtc_dd->ctrl_reg = ctrl_reg;
+
+ dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+ alarm->time.tm_hour, alarm->time.tm_min,
+ alarm->time.tm_sec, alarm->time.tm_mday,
+ alarm->time.tm_mon, alarm->time.tm_year);
+rtc_rw_fail:
+ spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+ return rc;
+}
+
+static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ int rc;
+ u8 value[NUM_8_BIT_RTC_REGS];
+ unsigned long secs;
+ struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rc = pm8xxx_read_wrapper(rtc_dd, value, rtc_dd->alarm_rw_base,
+ NUM_8_BIT_RTC_REGS);
+ if (rc < 0) {
+ dev_err(dev, "RTC alarm time read failed\n");
+ return rc;
+ }
+
+ secs = value[0] | (value[1] << 8) | (value[2] << 16) | (value[3] << 24);
+
+ rtc_time_to_tm(secs, &alarm->time);
+
+ rc = rtc_valid_tm(&alarm->time);
+ if (rc < 0) {
+ dev_err(dev, "Invalid alarm time read from RTC\n");
+ return rc;
+ }
+
+ dev_dbg(dev, "Alarm set for - h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+ alarm->time.tm_hour, alarm->time.tm_min,
+ alarm->time.tm_sec, alarm->time.tm_mday,
+ alarm->time.tm_mon, alarm->time.tm_year);
+
+ return 0;
+}
+
+static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+ int rc;
+ unsigned long irq_flags;
+ struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+ u8 ctrl_reg;
+
+ spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+ ctrl_reg = rtc_dd->ctrl_reg;
+ ctrl_reg = (enable) ? (ctrl_reg | PM8xxx_RTC_ALARM_ENABLE) :
+ (ctrl_reg & ~PM8xxx_RTC_ALARM_ENABLE);
+
+ rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
+ if (rc < 0) {
+ dev_err(dev, "Write to RTC control register failed\n");
+ goto rtc_rw_fail;
+ }
+
+ rtc_dd->ctrl_reg = ctrl_reg;
+
+rtc_rw_fail:
+ spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+ return rc;
+}
+
+static struct rtc_class_ops pm8xxx_rtc_ops = {
+ .read_time = pm8xxx_rtc_read_time,
+ .set_alarm = pm8xxx_rtc_set_alarm,
+ .read_alarm = pm8xxx_rtc_read_alarm,
+ .alarm_irq_enable = pm8xxx_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
+{
+ struct pm8xxx_rtc *rtc_dd = dev_id;
+ u8 ctrl_reg;
+ int rc;
+ unsigned long irq_flags;
+
+ rtc_update_irq(rtc_dd->rtc, 1, RTC_IRQF | RTC_AF);
+
+ spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+
+ /* Clear the alarm enable bit */
+ ctrl_reg = rtc_dd->ctrl_reg;
+ ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE;
+
+ rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+ dev_err(rtc_dd->rtc_dev, "Write to RTC control register "
+ "failed\n");
+ goto rtc_alarm_handled;
+ }
+
+ rtc_dd->ctrl_reg = ctrl_reg;
+ spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
+
+ /* Clear RTC alarm register */
+ rc = pm8xxx_read_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base +
+ PM8XXX_ALARM_CTRL_OFFSET, 1);
+ if (rc < 0) {
+ dev_err(rtc_dd->rtc_dev, "RTC Alarm control register read "
+ "failed\n");
+ goto rtc_alarm_handled;
+ }
+
+ ctrl_reg &= ~PM8xxx_RTC_ALARM_CLEAR;
+ rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base +
+ PM8XXX_ALARM_CTRL_OFFSET, 1);
+ if (rc < 0)
+ dev_err(rtc_dd->rtc_dev, "Write to RTC Alarm control register"
+ " failed\n");
+
+rtc_alarm_handled:
+ return IRQ_HANDLED;
+}
+
+static int __devinit pm8xxx_rtc_probe(struct platform_device *pdev)
+{
+ int rc;
+ u8 ctrl_reg;
+ bool rtc_write_enable = false;
+ struct pm8xxx_rtc *rtc_dd;
+ struct resource *rtc_resource;
+ const struct pm8xxx_rtc_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+
+ if (pdata != NULL)
+ rtc_write_enable = pdata->rtc_write_enable;
+
+ rtc_dd = kzalloc(sizeof(*rtc_dd), GFP_KERNEL);
+ if (rtc_dd == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ /* Initialise spinlock to protect RTC control register */
+ spin_lock_init(&rtc_dd->ctrl_reg_lock);
+
+ rtc_dd->rtc_alarm_irq = platform_get_irq(pdev, 0);
+ if (rtc_dd->rtc_alarm_irq < 0) {
+ dev_err(&pdev->dev, "Alarm IRQ resource absent!\n");
+ rc = -ENXIO;
+ goto fail_rtc_enable;
+ }
+
+ rtc_resource = platform_get_resource_byname(pdev, IORESOURCE_IO,
+ "pmic_rtc_base");
+ if (!(rtc_resource && rtc_resource->start)) {
+ dev_err(&pdev->dev, "RTC IO resource absent!\n");
+ rc = -ENXIO;
+ goto fail_rtc_enable;
+ }
+
+ rtc_dd->rtc_base = rtc_resource->start;
+
+ /* Setup RTC register addresses */
+ rtc_dd->rtc_write_base = rtc_dd->rtc_base + PM8XXX_RTC_WRITE_OFFSET;
+ rtc_dd->rtc_read_base = rtc_dd->rtc_base + PM8XXX_RTC_READ_OFFSET;
+ rtc_dd->alarm_rw_base = rtc_dd->rtc_base + PM8XXX_ALARM_RW_OFFSET;
+
+ rtc_dd->rtc_dev = &pdev->dev;
+
+ /* Check if the RTC is on, else turn it on */
+ rc = pm8xxx_read_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base, 1);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "RTC control register read failed!\n");
+ goto fail_rtc_enable;
+ }
+
+ if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
+ ctrl_reg |= PM8xxx_RTC_ENABLE;
+ rc = pm8xxx_write_wrapper(rtc_dd, &ctrl_reg, rtc_dd->rtc_base,
+ 1);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Write to RTC control register "
+ "failed\n");
+ goto fail_rtc_enable;
+ }
+ }
+
+ rtc_dd->ctrl_reg = ctrl_reg;
+ if (rtc_write_enable == true)
+ pm8xxx_rtc_ops.set_time = pm8xxx_rtc_set_time;
+
+ platform_set_drvdata(pdev, rtc_dd);
+
+ /* Register the RTC device */
+ rtc_dd->rtc = rtc_device_register("pm8xxx_rtc", &pdev->dev,
+ &pm8xxx_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc_dd->rtc)) {
+ dev_err(&pdev->dev, "%s: RTC registration failed (%ld)\n",
+ __func__, PTR_ERR(rtc_dd->rtc));
+ rc = PTR_ERR(rtc_dd->rtc);
+ goto fail_rtc_enable;
+ }
+
+ /* Request the alarm IRQ */
+ rc = request_any_context_irq(rtc_dd->rtc_alarm_irq,
+ pm8xxx_alarm_trigger, IRQF_TRIGGER_RISING,
+ "pm8xxx_rtc_alarm", rtc_dd);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Request IRQ failed (%d)\n", rc);
+ goto fail_req_irq;
+ }
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ dev_dbg(&pdev->dev, "Probe success !!\n");
+
+ return 0;
+
+fail_req_irq:
+ rtc_device_unregister(rtc_dd->rtc);
+fail_rtc_enable:
+ platform_set_drvdata(pdev, NULL);
+ kfree(rtc_dd);
+ return rc;
+}
+
+static int __devexit pm8xxx_rtc_remove(struct platform_device *pdev)
+{
+ struct pm8xxx_rtc *rtc_dd = platform_get_drvdata(pdev);
+
+ device_init_wakeup(&pdev->dev, 0);
+ free_irq(rtc_dd->rtc_alarm_irq, rtc_dd);
+ rtc_device_unregister(rtc_dd->rtc);
+ platform_set_drvdata(pdev, NULL);
+ kfree(rtc_dd);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pm8xxx_rtc_resume(struct device *dev)
+{
+ struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(rtc_dd->rtc_alarm_irq);
+
+ return 0;
+}
+
+static int pm8xxx_rtc_suspend(struct device *dev)
+{
+ struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(rtc_dd->rtc_alarm_irq);
+
+ return 0;
+}
+#endif
+
+SIMPLE_DEV_PM_OPS(pm8xxx_rtc_pm_ops, pm8xxx_rtc_suspend, pm8xxx_rtc_resume);
+
+static struct platform_driver pm8xxx_rtc_driver = {
+ .probe = pm8xxx_rtc_probe,
+ .remove = __devexit_p(pm8xxx_rtc_remove),
+ .driver = {
+ .name = PM8XXX_RTC_DEV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &pm8xxx_rtc_pm_ops,
+ },
+};
+
+static int __init pm8xxx_rtc_init(void)
+{
+ return platform_driver_register(&pm8xxx_rtc_driver);
+}
+module_init(pm8xxx_rtc_init);
+
+static void __exit pm8xxx_rtc_exit(void)
+{
+ platform_driver_unregister(&pm8xxx_rtc_driver);
+}
+module_exit(pm8xxx_rtc_exit);
+
+MODULE_ALIAS("platform:rtc-pm8xxx");
+MODULE_DESCRIPTION("PMIC8xxx RTC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anirudh Ghayal <aghayal@codeaurora.org>");
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index 46f14b82f3a..b3eba3cddd4 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -267,9 +267,8 @@ static int puv3_rtc_probe(struct platform_device *pdev)
return -ENOENT;
}
- puv3_rtc_mem = request_mem_region(res->start,
- res->end-res->start+1,
- pdev->name);
+ puv3_rtc_mem = request_mem_region(res->start, resource_size(res),
+ pdev->name);
if (puv3_rtc_mem == NULL) {
dev_err(&pdev->dev, "failed to reserve memory region\n");
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 16512ecae31..9329dbb9eba 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -57,11 +57,13 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
{
struct rtc_device *rdev = id;
+ clk_enable(rtc_clk);
rtc_update_irq(rdev, 1, RTC_AF | RTC_IRQF);
if (s3c_rtc_cpu_type == TYPE_S3C64XX)
writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP);
+ clk_disable(rtc_clk);
return IRQ_HANDLED;
}
@@ -69,11 +71,13 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
{
struct rtc_device *rdev = id;
+ clk_enable(rtc_clk);
rtc_update_irq(rdev, 1, RTC_PF | RTC_IRQF);
if (s3c_rtc_cpu_type == TYPE_S3C64XX)
writeb(S3C2410_INTP_TIC, s3c_rtc_base + S3C2410_INTP);
+ clk_disable(rtc_clk);
return IRQ_HANDLED;
}
@@ -84,12 +88,14 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
pr_debug("%s: aie=%d\n", __func__, enabled);
+ clk_enable(rtc_clk);
tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
if (enabled)
tmp |= S3C2410_RTCALM_ALMEN;
writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
+ clk_disable(rtc_clk);
return 0;
}
@@ -103,6 +109,7 @@ static int s3c_rtc_setfreq(struct device *dev, int freq)
if (!is_power_of_2(freq))
return -EINVAL;
+ clk_enable(rtc_clk);
spin_lock_irq(&s3c_rtc_pie_lock);
if (s3c_rtc_cpu_type == TYPE_S3C2410) {
@@ -114,6 +121,7 @@ static int s3c_rtc_setfreq(struct device *dev, int freq)
writel(tmp, s3c_rtc_base + S3C2410_TICNT);
spin_unlock_irq(&s3c_rtc_pie_lock);
+ clk_disable(rtc_clk);
return 0;
}
@@ -125,6 +133,7 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
unsigned int have_retried = 0;
void __iomem *base = s3c_rtc_base;
+ clk_enable(rtc_clk);
retry_get_time:
rtc_tm->tm_min = readb(base + S3C2410_RTCMIN);
rtc_tm->tm_hour = readb(base + S3C2410_RTCHOUR);
@@ -157,6 +166,7 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
rtc_tm->tm_year += 100;
rtc_tm->tm_mon -= 1;
+ clk_disable(rtc_clk);
return rtc_valid_tm(rtc_tm);
}
@@ -165,6 +175,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
+ clk_enable(rtc_clk);
pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -182,6 +193,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
writeb(bin2bcd(tm->tm_mday), base + S3C2410_RTCDATE);
writeb(bin2bcd(tm->tm_mon + 1), base + S3C2410_RTCMON);
writeb(bin2bcd(year), base + S3C2410_RTCYEAR);
+ clk_disable(rtc_clk);
return 0;
}
@@ -192,6 +204,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
void __iomem *base = s3c_rtc_base;
unsigned int alm_en;
+ clk_enable(rtc_clk);
alm_tm->tm_sec = readb(base + S3C2410_ALMSEC);
alm_tm->tm_min = readb(base + S3C2410_ALMMIN);
alm_tm->tm_hour = readb(base + S3C2410_ALMHOUR);
@@ -243,6 +256,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
else
alm_tm->tm_year = -1;
+ clk_disable(rtc_clk);
return 0;
}
@@ -252,6 +266,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
void __iomem *base = s3c_rtc_base;
unsigned int alrm_en;
+ clk_enable(rtc_clk);
pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alrm->enabled,
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
@@ -282,6 +297,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
s3c_rtc_setaie(dev, alrm->enabled);
+ clk_disable(rtc_clk);
return 0;
}
@@ -289,6 +305,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
{
unsigned int ticnt;
+ clk_enable(rtc_clk);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
ticnt = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt &= S3C64XX_RTCCON_TICEN;
@@ -298,6 +315,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
}
seq_printf(seq, "periodic_IRQ\t: %s\n", ticnt ? "yes" : "no");
+ clk_disable(rtc_clk);
return 0;
}
@@ -360,6 +378,7 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
if (s3c_rtc_base == NULL)
return;
+ clk_enable(rtc_clk);
if (!en) {
tmp = readw(base + S3C2410_RTCCON);
if (s3c_rtc_cpu_type == TYPE_S3C64XX)
@@ -399,6 +418,7 @@ static void s3c_rtc_enable(struct platform_device *pdev, int en)
base + S3C2410_RTCCON);
}
}
+ clk_disable(rtc_clk);
}
static int __devexit s3c_rtc_remove(struct platform_device *dev)
@@ -410,7 +430,6 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
s3c_rtc_setaie(&dev->dev, 0);
- clk_disable(rtc_clk);
clk_put(rtc_clk);
rtc_clk = NULL;
@@ -455,8 +474,7 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
return -ENOENT;
}
- s3c_rtc_mem = request_mem_region(res->start,
- res->end-res->start+1,
+ s3c_rtc_mem = request_mem_region(res->start, resource_size(res),
pdev->name);
if (s3c_rtc_mem == NULL) {
@@ -465,7 +483,7 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
goto err_nores;
}
- s3c_rtc_base = ioremap(res->start, res->end - res->start + 1);
+ s3c_rtc_base = ioremap(res->start, resource_size(res));
if (s3c_rtc_base == NULL) {
dev_err(&pdev->dev, "failed ioremap()\n");
ret = -EINVAL;
@@ -530,6 +548,8 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_setfreq(&pdev->dev, 1);
+ clk_disable(rtc_clk);
+
return 0;
err_nortc:
@@ -555,6 +575,7 @@ static int ticnt_save, ticnt_en_save;
static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
{
+ clk_enable(rtc_clk);
/* save TICNT for anyone using periodic interrupts */
ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
@@ -569,6 +590,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
else
dev_err(&pdev->dev, "enable_irq_wake failed\n");
}
+ clk_disable(rtc_clk);
return 0;
}
@@ -577,6 +599,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
{
unsigned int tmp;
+ clk_enable(rtc_clk);
s3c_rtc_enable(pdev, 1);
writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
@@ -588,6 +611,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
disable_irq_wake(s3c_rtc_alarmno);
wake_en = false;
}
+ clk_disable(rtc_clk);
return 0;
}
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 572e9534b59..7315068daa5 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -6,6 +6,7 @@
*
* Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ * Copyright 2011 Wolfram Sang, Pengutronix e.K.
*/
/*
@@ -18,21 +19,41 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/rtc.h>
#include <linux/slab.h>
-#include <mach/platform.h>
-#include <mach/stmp3xxx.h>
-#include <mach/regs-rtc.h>
+#include <mach/common.h>
+
+#define STMP3XXX_RTC_CTRL 0x0
+#define STMP3XXX_RTC_CTRL_SET 0x4
+#define STMP3XXX_RTC_CTRL_CLR 0x8
+#define STMP3XXX_RTC_CTRL_ALARM_IRQ_EN 0x00000001
+#define STMP3XXX_RTC_CTRL_ONEMSEC_IRQ_EN 0x00000002
+#define STMP3XXX_RTC_CTRL_ALARM_IRQ 0x00000004
+
+#define STMP3XXX_RTC_STAT 0x10
+#define STMP3XXX_RTC_STAT_STALE_SHIFT 16
+#define STMP3XXX_RTC_STAT_RTC_PRESENT 0x80000000
+
+#define STMP3XXX_RTC_SECONDS 0x30
+
+#define STMP3XXX_RTC_ALARM 0x40
+
+#define STMP3XXX_RTC_PERSISTENT0 0x60
+#define STMP3XXX_RTC_PERSISTENT0_SET 0x64
+#define STMP3XXX_RTC_PERSISTENT0_CLR 0x68
+#define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN 0x00000002
+#define STMP3XXX_RTC_PERSISTENT0_ALARM_EN 0x00000004
+#define STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE 0x00000080
struct stmp3xxx_rtc_data {
struct rtc_device *rtc;
- unsigned irq_count;
void __iomem *io;
- int irq_alarm, irq_1msec;
+ int irq_alarm;
};
static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
@@ -42,8 +63,8 @@ static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
* NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0,
* 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS
*/
- while (__raw_readl(rtc_data->io + HW_RTC_STAT) &
- BF(0x80, RTC_STAT_STALE_REGS))
+ while (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
+ (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))
cpu_relax();
}
@@ -53,7 +74,7 @@ static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
stmp3xxx_wait_time(rtc_data);
- rtc_time_to_tm(__raw_readl(rtc_data->io + HW_RTC_SECONDS), rtc_tm);
+ rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm);
return 0;
}
@@ -61,7 +82,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t)
{
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
- __raw_writel(t, rtc_data->io + HW_RTC_SECONDS);
+ writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS);
stmp3xxx_wait_time(rtc_data);
return 0;
}
@@ -70,47 +91,34 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t)
static irqreturn_t stmp3xxx_rtc_interrupt(int irq, void *dev_id)
{
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev_id);
- u32 status;
- u32 events = 0;
-
- status = __raw_readl(rtc_data->io + HW_RTC_CTRL) &
- (BM_RTC_CTRL_ALARM_IRQ | BM_RTC_CTRL_ONEMSEC_IRQ);
+ u32 status = readl(rtc_data->io + STMP3XXX_RTC_CTRL);
- if (status & BM_RTC_CTRL_ALARM_IRQ) {
- stmp3xxx_clearl(BM_RTC_CTRL_ALARM_IRQ,
- rtc_data->io + HW_RTC_CTRL);
- events |= RTC_AF | RTC_IRQF;
+ if (status & STMP3XXX_RTC_CTRL_ALARM_IRQ) {
+ writel(STMP3XXX_RTC_CTRL_ALARM_IRQ,
+ rtc_data->io + STMP3XXX_RTC_CTRL_CLR);
+ rtc_update_irq(rtc_data->rtc, 1, RTC_AF | RTC_IRQF);
+ return IRQ_HANDLED;
}
- if (status & BM_RTC_CTRL_ONEMSEC_IRQ) {
- stmp3xxx_clearl(BM_RTC_CTRL_ONEMSEC_IRQ,
- rtc_data->io + HW_RTC_CTRL);
- if (++rtc_data->irq_count % 1000 == 0) {
- events |= RTC_UF | RTC_IRQF;
- rtc_data->irq_count = 0;
- }
- }
-
- if (events)
- rtc_update_irq(rtc_data->rtc, 1, events);
-
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
static int stmp3xxx_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
- void __iomem *p = rtc_data->io + HW_RTC_PERSISTENT0,
- *ctl = rtc_data->io + HW_RTC_CTRL;
if (enabled) {
- stmp3xxx_setl(BM_RTC_PERSISTENT0_ALARM_EN |
- BM_RTC_PERSISTENT0_ALARM_WAKE_EN, p);
- stmp3xxx_setl(BM_RTC_CTRL_ALARM_IRQ_EN, ctl);
+ writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN |
+ STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN,
+ rtc_data->io + STMP3XXX_RTC_PERSISTENT0_SET);
+ writel(STMP3XXX_RTC_CTRL_ALARM_IRQ_EN,
+ rtc_data->io + STMP3XXX_RTC_CTRL_SET);
} else {
- stmp3xxx_clearl(BM_RTC_PERSISTENT0_ALARM_EN |
- BM_RTC_PERSISTENT0_ALARM_WAKE_EN, p);
- stmp3xxx_clearl(BM_RTC_CTRL_ALARM_IRQ_EN, ctl);
+ writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN |
+ STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN,
+ rtc_data->io + STMP3XXX_RTC_PERSISTENT0_CLR);
+ writel(STMP3XXX_RTC_CTRL_ALARM_IRQ_EN,
+ rtc_data->io + STMP3XXX_RTC_CTRL_CLR);
}
return 0;
}
@@ -119,7 +127,7 @@ static int stmp3xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
- rtc_time_to_tm(__raw_readl(rtc_data->io + HW_RTC_ALARM), &alm->time);
+ rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_ALARM), &alm->time);
return 0;
}
@@ -129,7 +137,10 @@ static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
rtc_tm_to_time(&alm->time, &t);
- __raw_writel(t, rtc_data->io + HW_RTC_ALARM);
+ writel(t, rtc_data->io + STMP3XXX_RTC_ALARM);
+
+ stmp3xxx_alarm_irq_enable(dev, alm->enabled);
+
return 0;
}
@@ -149,11 +160,11 @@ static int stmp3xxx_rtc_remove(struct platform_device *pdev)
if (!rtc_data)
return 0;
- stmp3xxx_clearl(BM_RTC_CTRL_ONEMSEC_IRQ_EN | BM_RTC_CTRL_ALARM_IRQ_EN,
- rtc_data->io + HW_RTC_CTRL);
+ writel(STMP3XXX_RTC_CTRL_ALARM_IRQ_EN,
+ rtc_data->io + STMP3XXX_RTC_CTRL_CLR);
free_irq(rtc_data->irq_alarm, &pdev->dev);
- free_irq(rtc_data->irq_1msec, &pdev->dev);
rtc_device_unregister(rtc_data->rtc);
+ platform_set_drvdata(pdev, NULL);
iounmap(rtc_data->io);
kfree(rtc_data);
@@ -185,20 +196,26 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
}
rtc_data->irq_alarm = platform_get_irq(pdev, 0);
- rtc_data->irq_1msec = platform_get_irq(pdev, 1);
- if (!(__raw_readl(HW_RTC_STAT + rtc_data->io) &
- BM_RTC_STAT_RTC_PRESENT)) {
+ if (!(readl(STMP3XXX_RTC_STAT + rtc_data->io) &
+ STMP3XXX_RTC_STAT_RTC_PRESENT)) {
dev_err(&pdev->dev, "no device onboard\n");
err = -ENODEV;
goto out_remap;
}
- stmp3xxx_reset_block(rtc_data->io, true);
- stmp3xxx_clearl(BM_RTC_PERSISTENT0_ALARM_EN |
- BM_RTC_PERSISTENT0_ALARM_WAKE_EN |
- BM_RTC_PERSISTENT0_ALARM_WAKE,
- rtc_data->io + HW_RTC_PERSISTENT0);
+ platform_set_drvdata(pdev, rtc_data);
+
+ mxs_reset_block(rtc_data->io);
+ writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN |
+ STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN |
+ STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE,
+ rtc_data->io + STMP3XXX_RTC_PERSISTENT0_CLR);
+
+ writel(STMP3XXX_RTC_CTRL_ONEMSEC_IRQ_EN |
+ STMP3XXX_RTC_CTRL_ALARM_IRQ_EN,
+ rtc_data->io + STMP3XXX_RTC_CTRL_CLR);
+
rtc_data->rtc = rtc_device_register(pdev->name, &pdev->dev,
&stmp3xxx_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc_data->rtc)) {
@@ -206,33 +223,20 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
goto out_remap;
}
- rtc_data->irq_count = 0;
- err = request_irq(rtc_data->irq_alarm, stmp3xxx_rtc_interrupt,
- IRQF_DISABLED, "RTC alarm", &pdev->dev);
+ err = request_irq(rtc_data->irq_alarm, stmp3xxx_rtc_interrupt, 0,
+ "RTC alarm", &pdev->dev);
if (err) {
dev_err(&pdev->dev, "Cannot claim IRQ%d\n",
rtc_data->irq_alarm);
goto out_irq_alarm;
}
- err = request_irq(rtc_data->irq_1msec, stmp3xxx_rtc_interrupt,
- IRQF_DISABLED, "RTC tick", &pdev->dev);
- if (err) {
- dev_err(&pdev->dev, "Cannot claim IRQ%d\n",
- rtc_data->irq_1msec);
- goto out_irq1;
- }
-
- platform_set_drvdata(pdev, rtc_data);
return 0;
-out_irq1:
- free_irq(rtc_data->irq_alarm, &pdev->dev);
out_irq_alarm:
- stmp3xxx_clearl(BM_RTC_CTRL_ONEMSEC_IRQ_EN | BM_RTC_CTRL_ALARM_IRQ_EN,
- rtc_data->io + HW_RTC_CTRL);
rtc_device_unregister(rtc_data->rtc);
out_remap:
+ platform_set_drvdata(pdev, NULL);
iounmap(rtc_data->io);
out_free:
kfree(rtc_data);
@@ -249,11 +253,11 @@ static int stmp3xxx_rtc_resume(struct platform_device *dev)
{
struct stmp3xxx_rtc_data *rtc_data = platform_get_drvdata(dev);
- stmp3xxx_reset_block(rtc_data->io, true);
- stmp3xxx_clearl(BM_RTC_PERSISTENT0_ALARM_EN |
- BM_RTC_PERSISTENT0_ALARM_WAKE_EN |
- BM_RTC_PERSISTENT0_ALARM_WAKE,
- rtc_data->io + HW_RTC_PERSISTENT0);
+ mxs_reset_block(rtc_data->io);
+ writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN |
+ STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN |
+ STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE,
+ rtc_data->io + STMP3XXX_RTC_PERSISTENT0_CLR);
return 0;
}
#else
@@ -286,5 +290,6 @@ module_init(stmp3xxx_rtc_init);
module_exit(stmp3xxx_rtc_exit);
MODULE_DESCRIPTION("STMP3xxx RTC Driver");
-MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com>");
+MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com> and "
+ "Wolfram Sang <w.sang@pengutronix.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 2fc31aac3f4..75259fe3860 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -343,7 +343,7 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
/* set context info. */
info->pdev = pdev;
- info->tegra_rtc_lock = __SPIN_LOCK_UNLOCKED(info->tegra_rtc_lock);
+ spin_lock_init(&info->tegra_rtc_lock);
platform_set_drvdata(pdev, info);
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index f9a2799c44d..9a81f778d6b 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -275,7 +275,7 @@ static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
goto out;
save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M;
- twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
+ ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
if (ret < 0)
goto out;
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index b8bc862903a..f93f412423c 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -74,11 +74,12 @@
#define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */
#define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */
+#define VT8500_RTC_IS_ALARM (1 << 0) /* Alarm interrupt status */
+
struct vt8500_rtc {
void __iomem *regbase;
struct resource *res;
int irq_alarm;
- int irq_hz;
struct rtc_device *rtc;
spinlock_t lock; /* Protects this structure */
};
@@ -97,13 +98,9 @@ static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id)
spin_unlock(&vt8500_rtc->lock);
- if (isr & 1)
+ if (isr & VT8500_RTC_IS_ALARM)
events |= RTC_AF | RTC_IRQF;
- /* Only second/minute interrupts are supported */
- if (isr & 2)
- events |= RTC_UF | RTC_IRQF;
-
rtc_update_irq(vt8500_rtc->rtc, 1, events);
return IRQ_HANDLED;
@@ -166,8 +163,8 @@ static int vt8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->time.tm_sec = bcd2bin((alarm & TIME_SEC_MASK));
alrm->enabled = (alarm & ALARM_ENABLE_MASK) ? 1 : 0;
+ alrm->pending = (isr & VT8500_RTC_IS_ALARM) ? 1 : 0;
- alrm->pending = (isr & 1) ? 1 : 0;
return rtc_valid_tm(&alrm->time);
}
@@ -199,27 +196,12 @@ static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
- unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR);
-
- if (enabled)
- tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE;
- else
- tmp &= ~VT8500_RTC_CR_SM_ENABLE;
-
- writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR);
- return 0;
-}
-
static const struct rtc_class_ops vt8500_rtc_ops = {
.read_time = vt8500_rtc_read_time,
.set_time = vt8500_rtc_set_time,
.read_alarm = vt8500_rtc_read_alarm,
.set_alarm = vt8500_rtc_set_alarm,
.alarm_irq_enable = vt8500_alarm_irq_enable,
- .update_irq_enable = vt8500_update_irq_enable,
};
static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
@@ -248,13 +230,6 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
goto err_free;
}
- vt8500_rtc->irq_hz = platform_get_irq(pdev, 1);
- if (vt8500_rtc->irq_hz < 0) {
- dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
- ret = -ENXIO;
- goto err_free;
- }
-
vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
resource_size(vt8500_rtc->res),
"vt8500-rtc");
@@ -272,9 +247,8 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
goto err_release;
}
- /* Enable the second/minute interrupt generation and enable RTC */
- writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H
- | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC,
+ /* Enable RTC and set it to 24-hour mode */
+ writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H,
vt8500_rtc->regbase + VT8500_RTC_CR);
vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
@@ -286,26 +260,16 @@ static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
goto err_unmap;
}
- ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0,
- "rtc 1Hz", vt8500_rtc);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get irq %i, err %d\n",
- vt8500_rtc->irq_hz, ret);
- goto err_unreg;
- }
-
ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
"rtc alarm", vt8500_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "can't get irq %i, err %d\n",
vt8500_rtc->irq_alarm, ret);
- goto err_free_hz;
+ goto err_unreg;
}
return 0;
-err_free_hz:
- free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
err_unreg:
rtc_device_unregister(vt8500_rtc->rtc);
err_unmap:
@@ -323,7 +287,6 @@ static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
- free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
rtc_device_unregister(vt8500_rtc->rtc);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 86b6f1cc1b1..a1d3ddba99c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -22,6 +22,9 @@
#include <linux/hdreg.h>
#include <linux/async.h>
#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
@@ -45,6 +48,7 @@
* SECTION: exported variables of dasd.c
*/
debug_info_t *dasd_debug_area;
+static struct dentry *dasd_debugfs_root_entry;
struct dasd_discipline *dasd_diag_discipline_pointer;
void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
@@ -71,6 +75,8 @@ static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(unsigned long);
static void dasd_block_timeout(unsigned long);
static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
+static void dasd_profile_init(struct dasd_profile *, struct dentry *);
+static void dasd_profile_exit(struct dasd_profile *);
/*
* SECTION: Operations on the device structure.
@@ -121,7 +127,7 @@ struct dasd_device *dasd_alloc_device(void)
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
mutex_init(&device->state_mutex);
-
+ spin_lock_init(&device->profile.lock);
return device;
}
@@ -159,6 +165,7 @@ struct dasd_block *dasd_alloc_block(void)
init_timer(&block->timer);
block->timer.function = dasd_block_timeout;
block->timer.data = (unsigned long) block;
+ spin_lock_init(&block->profile.lock);
return block;
}
@@ -222,19 +229,44 @@ static int dasd_state_known_to_new(struct dasd_device *device)
return 0;
}
+static struct dentry *dasd_debugfs_setup(const char *name,
+ struct dentry *base_dentry)
+{
+ struct dentry *pde;
+
+ if (!base_dentry)
+ return NULL;
+ pde = debugfs_create_dir(name, base_dentry);
+ if (!pde || IS_ERR(pde))
+ return NULL;
+ return pde;
+}
+
/*
* Request the irq line for the device.
*/
static int dasd_state_known_to_basic(struct dasd_device *device)
{
+ struct dasd_block *block = device->block;
int rc;
/* Allocate and register gendisk structure. */
- if (device->block) {
- rc = dasd_gendisk_alloc(device->block);
+ if (block) {
+ rc = dasd_gendisk_alloc(block);
if (rc)
return rc;
- }
+ block->debugfs_dentry =
+ dasd_debugfs_setup(block->gdp->disk_name,
+ dasd_debugfs_root_entry);
+ dasd_profile_init(&block->profile, block->debugfs_dentry);
+ if (dasd_global_profile_level == DASD_PROFILE_ON)
+ dasd_profile_on(&device->block->profile);
+ }
+ device->debugfs_dentry =
+ dasd_debugfs_setup(dev_name(&device->cdev->dev),
+ dasd_debugfs_root_entry);
+ dasd_profile_init(&device->profile, device->debugfs_dentry);
+
/* register 'device' debug area, used for all DBF_DEV_XXX calls */
device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
8 * sizeof(long));
@@ -253,6 +285,9 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
{
int rc;
if (device->block) {
+ dasd_profile_exit(&device->block->profile);
+ if (device->block->debugfs_dentry)
+ debugfs_remove(device->block->debugfs_dentry);
dasd_gendisk_free(device->block);
dasd_block_clear_timer(device->block);
}
@@ -260,6 +295,9 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
if (rc)
return rc;
dasd_device_clear_timer(device);
+ dasd_profile_exit(&device->profile);
+ if (device->debugfs_dentry)
+ debugfs_remove(device->debugfs_dentry);
DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
if (device->debug_area != NULL) {
@@ -609,21 +647,13 @@ void dasd_enable_device(struct dasd_device *device)
/*
* SECTION: device operation (interrupt handler, start i/o, term i/o ...)
*/
-#ifdef CONFIG_DASD_PROFILE
-struct dasd_profile_info_t dasd_global_profile;
-unsigned int dasd_profile_level = DASD_PROFILE_OFF;
+unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
-/*
- * Increments counter in global and local profiling structures.
- */
-#define dasd_profile_counter(value, counter, block) \
-{ \
- int index; \
- for (index = 0; index < 31 && value >> (2+index); index++); \
- dasd_global_profile.counter[index]++; \
- block->profile.counter[index]++; \
-}
+#ifdef CONFIG_DASD_PROFILE
+struct dasd_profile_info dasd_global_profile_data;
+static struct dentry *dasd_global_profile_dentry;
+static struct dentry *dasd_debugfs_global_entry;
/*
* Add profiling information for cqr before execution.
@@ -634,30 +664,121 @@ static void dasd_profile_start(struct dasd_block *block,
{
struct list_head *l;
unsigned int counter;
-
- if (dasd_profile_level != DASD_PROFILE_ON)
- return;
+ struct dasd_device *device;
/* count the length of the chanq for statistics */
counter = 0;
- list_for_each(l, &block->ccw_queue)
- if (++counter >= 31)
- break;
- dasd_global_profile.dasd_io_nr_req[counter]++;
- block->profile.dasd_io_nr_req[counter]++;
+ if (dasd_global_profile_level || block->profile.data)
+ list_for_each(l, &block->ccw_queue)
+ if (++counter >= 31)
+ break;
+
+ if (dasd_global_profile_level) {
+ dasd_global_profile_data.dasd_io_nr_req[counter]++;
+ if (rq_data_dir(req) == READ)
+ dasd_global_profile_data.dasd_read_nr_req[counter]++;
+ }
+
+ spin_lock(&block->profile.lock);
+ if (block->profile.data)
+ block->profile.data->dasd_io_nr_req[counter]++;
+ if (rq_data_dir(req) == READ)
+ block->profile.data->dasd_read_nr_req[counter]++;
+ spin_unlock(&block->profile.lock);
+
+ /*
+ * We count the request for the start device, even though it may run on
+ * some other device due to error recovery. This way we make sure that
+ * we count each request only once.
+ */
+ device = cqr->startdev;
+ if (device->profile.data) {
+ counter = 1; /* request is not yet queued on the start device */
+ list_for_each(l, &device->ccw_queue)
+ if (++counter >= 31)
+ break;
+ }
+ spin_lock(&device->profile.lock);
+ if (device->profile.data) {
+ device->profile.data->dasd_io_nr_req[counter]++;
+ if (rq_data_dir(req) == READ)
+ device->profile.data->dasd_read_nr_req[counter]++;
+ }
+ spin_unlock(&device->profile.lock);
}
/*
* Add profiling information for cqr after execution.
*/
+
+#define dasd_profile_counter(value, index) \
+{ \
+ for (index = 0; index < 31 && value >> (2+index); index++) \
+ ; \
+}
+
+static void dasd_profile_end_add_data(struct dasd_profile_info *data,
+ int is_alias,
+ int is_tpm,
+ int is_read,
+ long sectors,
+ int sectors_ind,
+ int tottime_ind,
+ int tottimeps_ind,
+ int strtime_ind,
+ int irqtime_ind,
+ int irqtimeps_ind,
+ int endtime_ind)
+{
+ /* in case of an overflow, reset the whole profile */
+ if (data->dasd_io_reqs == UINT_MAX) {
+ memset(data, 0, sizeof(*data));
+ getnstimeofday(&data->starttod);
+ }
+ data->dasd_io_reqs++;
+ data->dasd_io_sects += sectors;
+ if (is_alias)
+ data->dasd_io_alias++;
+ if (is_tpm)
+ data->dasd_io_tpm++;
+
+ data->dasd_io_secs[sectors_ind]++;
+ data->dasd_io_times[tottime_ind]++;
+ data->dasd_io_timps[tottimeps_ind]++;
+ data->dasd_io_time1[strtime_ind]++;
+ data->dasd_io_time2[irqtime_ind]++;
+ data->dasd_io_time2ps[irqtimeps_ind]++;
+ data->dasd_io_time3[endtime_ind]++;
+
+ if (is_read) {
+ data->dasd_read_reqs++;
+ data->dasd_read_sects += sectors;
+ if (is_alias)
+ data->dasd_read_alias++;
+ if (is_tpm)
+ data->dasd_read_tpm++;
+ data->dasd_read_secs[sectors_ind]++;
+ data->dasd_read_times[tottime_ind]++;
+ data->dasd_read_time1[strtime_ind]++;
+ data->dasd_read_time2[irqtime_ind]++;
+ data->dasd_read_time3[endtime_ind]++;
+ }
+}
+
static void dasd_profile_end(struct dasd_block *block,
struct dasd_ccw_req *cqr,
struct request *req)
{
long strtime, irqtime, endtime, tottime; /* in microseconds */
long tottimeps, sectors;
+ struct dasd_device *device;
+ int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
+ int irqtime_ind, irqtimeps_ind, endtime_ind;
- if (dasd_profile_level != DASD_PROFILE_ON)
+ device = cqr->startdev;
+ if (!(dasd_global_profile_level ||
+ block->profile.data ||
+ device->profile.data))
return;
sectors = blk_rq_sectors(req);
@@ -672,29 +793,392 @@ static void dasd_profile_end(struct dasd_block *block,
tottime = ((cqr->endclk - cqr->buildclk) >> 12);
tottimeps = tottime / sectors;
- if (!dasd_global_profile.dasd_io_reqs)
- memset(&dasd_global_profile, 0,
- sizeof(struct dasd_profile_info_t));
- dasd_global_profile.dasd_io_reqs++;
- dasd_global_profile.dasd_io_sects += sectors;
-
- if (!block->profile.dasd_io_reqs)
- memset(&block->profile, 0,
- sizeof(struct dasd_profile_info_t));
- block->profile.dasd_io_reqs++;
- block->profile.dasd_io_sects += sectors;
-
- dasd_profile_counter(sectors, dasd_io_secs, block);
- dasd_profile_counter(tottime, dasd_io_times, block);
- dasd_profile_counter(tottimeps, dasd_io_timps, block);
- dasd_profile_counter(strtime, dasd_io_time1, block);
- dasd_profile_counter(irqtime, dasd_io_time2, block);
- dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
- dasd_profile_counter(endtime, dasd_io_time3, block);
+ dasd_profile_counter(sectors, sectors_ind);
+ dasd_profile_counter(tottime, tottime_ind);
+ dasd_profile_counter(tottimeps, tottimeps_ind);
+ dasd_profile_counter(strtime, strtime_ind);
+ dasd_profile_counter(irqtime, irqtime_ind);
+ dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
+ dasd_profile_counter(endtime, endtime_ind);
+
+ if (dasd_global_profile_level) {
+ dasd_profile_end_add_data(&dasd_global_profile_data,
+ cqr->startdev != block->base,
+ cqr->cpmode == 1,
+ rq_data_dir(req) == READ,
+ sectors, sectors_ind, tottime_ind,
+ tottimeps_ind, strtime_ind,
+ irqtime_ind, irqtimeps_ind,
+ endtime_ind);
+ }
+
+ spin_lock(&block->profile.lock);
+ if (block->profile.data)
+ dasd_profile_end_add_data(block->profile.data,
+ cqr->startdev != block->base,
+ cqr->cpmode == 1,
+ rq_data_dir(req) == READ,
+ sectors, sectors_ind, tottime_ind,
+ tottimeps_ind, strtime_ind,
+ irqtime_ind, irqtimeps_ind,
+ endtime_ind);
+ spin_unlock(&block->profile.lock);
+
+ spin_lock(&device->profile.lock);
+ if (device->profile.data)
+ dasd_profile_end_add_data(device->profile.data,
+ cqr->startdev != block->base,
+ cqr->cpmode == 1,
+ rq_data_dir(req) == READ,
+ sectors, sectors_ind, tottime_ind,
+ tottimeps_ind, strtime_ind,
+ irqtime_ind, irqtimeps_ind,
+ endtime_ind);
+ spin_unlock(&device->profile.lock);
+}
+
+void dasd_profile_reset(struct dasd_profile *profile)
+{
+ struct dasd_profile_info *data;
+
+ spin_lock_bh(&profile->lock);
+ data = profile->data;
+ if (!data) {
+ spin_unlock_bh(&profile->lock);
+ return;
+ }
+ memset(data, 0, sizeof(*data));
+ getnstimeofday(&data->starttod);
+ spin_unlock_bh(&profile->lock);
+}
+
+void dasd_global_profile_reset(void)
+{
+ memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data));
+ getnstimeofday(&dasd_global_profile_data.starttod);
+}
+
+int dasd_profile_on(struct dasd_profile *profile)
+{
+ struct dasd_profile_info *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ spin_lock_bh(&profile->lock);
+ if (profile->data) {
+ spin_unlock_bh(&profile->lock);
+ kfree(data);
+ return 0;
+ }
+ getnstimeofday(&data->starttod);
+ profile->data = data;
+ spin_unlock_bh(&profile->lock);
+ return 0;
+}
+
+void dasd_profile_off(struct dasd_profile *profile)
+{
+ spin_lock_bh(&profile->lock);
+ kfree(profile->data);
+ profile->data = NULL;
+ spin_unlock_bh(&profile->lock);
+}
+
+char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
+{
+ char *buffer;
+
+ buffer = vmalloc(user_len + 1);
+ if (buffer == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_user(buffer, user_buf, user_len) != 0) {
+ vfree(buffer);
+ return ERR_PTR(-EFAULT);
+ }
+ /* got the string, now strip linefeed. */
+ if (buffer[user_len - 1] == '\n')
+ buffer[user_len - 1] = 0;
+ else
+ buffer[user_len] = 0;
+ return buffer;
}
+
+static ssize_t dasd_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t user_len, loff_t *pos)
+{
+ char *buffer, *str;
+ int rc;
+ struct seq_file *m = (struct seq_file *)file->private_data;
+ struct dasd_profile *prof = m->private;
+
+ if (user_len > 65536)
+ user_len = 65536;
+ buffer = dasd_get_user_string(user_buf, user_len);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ str = skip_spaces(buffer);
+ rc = user_len;
+ if (strncmp(str, "reset", 5) == 0) {
+ dasd_profile_reset(prof);
+ } else if (strncmp(str, "on", 2) == 0) {
+ rc = dasd_profile_on(prof);
+ if (!rc)
+ rc = user_len;
+ } else if (strncmp(str, "off", 3) == 0) {
+ dasd_profile_off(prof);
+ } else
+ rc = -EINVAL;
+ vfree(buffer);
+ return rc;
+}
+
+static void dasd_stats_array(struct seq_file *m, unsigned int *array)
+{
+ int i;
+
+ for (i = 0; i < 32; i++)
+ seq_printf(m, "%u ", array[i]);
+ seq_putc(m, '\n');
+}
+
+static void dasd_stats_seq_print(struct seq_file *m,
+ struct dasd_profile_info *data)
+{
+ seq_printf(m, "start_time %ld.%09ld\n",
+ data->starttod.tv_sec, data->starttod.tv_nsec);
+ seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
+ seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
+ seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
+ seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
+ seq_printf(m, "histogram_sectors ");
+ dasd_stats_array(m, data->dasd_io_secs);
+ seq_printf(m, "histogram_io_times ");
+ dasd_stats_array(m, data->dasd_io_times);
+ seq_printf(m, "histogram_io_times_weighted ");
+ dasd_stats_array(m, data->dasd_io_timps);
+ seq_printf(m, "histogram_time_build_to_ssch ");
+ dasd_stats_array(m, data->dasd_io_time1);
+ seq_printf(m, "histogram_time_ssch_to_irq ");
+ dasd_stats_array(m, data->dasd_io_time2);
+ seq_printf(m, "histogram_time_ssch_to_irq_weighted ");
+ dasd_stats_array(m, data->dasd_io_time2ps);
+ seq_printf(m, "histogram_time_irq_to_end ");
+ dasd_stats_array(m, data->dasd_io_time3);
+ seq_printf(m, "histogram_ccw_queue_length ");
+ dasd_stats_array(m, data->dasd_io_nr_req);
+ seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
+ seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
+ seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
+ seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
+ seq_printf(m, "histogram_read_sectors ");
+ dasd_stats_array(m, data->dasd_read_secs);
+ seq_printf(m, "histogram_read_times ");
+ dasd_stats_array(m, data->dasd_read_times);
+ seq_printf(m, "histogram_read_time_build_to_ssch ");
+ dasd_stats_array(m, data->dasd_read_time1);
+ seq_printf(m, "histogram_read_time_ssch_to_irq ");
+ dasd_stats_array(m, data->dasd_read_time2);
+ seq_printf(m, "histogram_read_time_irq_to_end ");
+ dasd_stats_array(m, data->dasd_read_time3);
+ seq_printf(m, "histogram_read_ccw_queue_length ");
+ dasd_stats_array(m, data->dasd_read_nr_req);
+}
+
+static int dasd_stats_show(struct seq_file *m, void *v)
+{
+ struct dasd_profile *profile;
+ struct dasd_profile_info *data;
+
+ profile = m->private;
+ spin_lock_bh(&profile->lock);
+ data = profile->data;
+ if (!data) {
+ spin_unlock_bh(&profile->lock);
+ seq_printf(m, "disabled\n");
+ return 0;
+ }
+ dasd_stats_seq_print(m, data);
+ spin_unlock_bh(&profile->lock);
+ return 0;
+}
+
+static int dasd_stats_open(struct inode *inode, struct file *file)
+{
+ struct dasd_profile *profile = inode->i_private;
+ return single_open(file, dasd_stats_show, profile);
+}
+
+static const struct file_operations dasd_stats_raw_fops = {
+ .owner = THIS_MODULE,
+ .open = dasd_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = dasd_stats_write,
+};
+
+static ssize_t dasd_stats_global_write(struct file *file,
+ const char __user *user_buf,
+ size_t user_len, loff_t *pos)
+{
+ char *buffer, *str;
+ ssize_t rc;
+
+ if (user_len > 65536)
+ user_len = 65536;
+ buffer = dasd_get_user_string(user_buf, user_len);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+ str = skip_spaces(buffer);
+ rc = user_len;
+ if (strncmp(str, "reset", 5) == 0) {
+ dasd_global_profile_reset();
+ } else if (strncmp(str, "on", 2) == 0) {
+ dasd_global_profile_reset();
+ dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
+ } else if (strncmp(str, "off", 3) == 0) {
+ dasd_global_profile_level = DASD_PROFILE_OFF;
+ } else
+ rc = -EINVAL;
+ vfree(buffer);
+ return rc;
+}
+
+static int dasd_stats_global_show(struct seq_file *m, void *v)
+{
+ if (!dasd_global_profile_level) {
+ seq_printf(m, "disabled\n");
+ return 0;
+ }
+ dasd_stats_seq_print(m, &dasd_global_profile_data);
+ return 0;
+}
+
+static int dasd_stats_global_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dasd_stats_global_show, NULL);
+}
+
+static const struct file_operations dasd_stats_global_fops = {
+ .owner = THIS_MODULE,
+ .open = dasd_stats_global_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = dasd_stats_global_write,
+};
+
+static void dasd_profile_init(struct dasd_profile *profile,
+ struct dentry *base_dentry)
+{
+ mode_t mode;
+ struct dentry *pde;
+
+ if (!base_dentry)
+ return;
+ profile->dentry = NULL;
+ profile->data = NULL;
+ mode = (S_IRUSR | S_IWUSR | S_IFREG);
+ pde = debugfs_create_file("statistics", mode, base_dentry,
+ profile, &dasd_stats_raw_fops);
+ if (pde && !IS_ERR(pde))
+ profile->dentry = pde;
+ return;
+}
+
+static void dasd_profile_exit(struct dasd_profile *profile)
+{
+ dasd_profile_off(profile);
+ if (profile->dentry) {
+ debugfs_remove(profile->dentry);
+ profile->dentry = NULL;
+ }
+}
+
+static void dasd_statistics_removeroot(void)
+{
+ dasd_global_profile_level = DASD_PROFILE_OFF;
+ if (dasd_global_profile_dentry) {
+ debugfs_remove(dasd_global_profile_dentry);
+ dasd_global_profile_dentry = NULL;
+ }
+ if (dasd_debugfs_global_entry)
+ debugfs_remove(dasd_debugfs_global_entry);
+ if (dasd_debugfs_root_entry)
+ debugfs_remove(dasd_debugfs_root_entry);
+}
+
+static void dasd_statistics_createroot(void)
+{
+ mode_t mode;
+ struct dentry *pde;
+
+ dasd_debugfs_root_entry = NULL;
+ dasd_debugfs_global_entry = NULL;
+ dasd_global_profile_dentry = NULL;
+ pde = debugfs_create_dir("dasd", NULL);
+ if (!pde || IS_ERR(pde))
+ goto error;
+ dasd_debugfs_root_entry = pde;
+ pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
+ if (!pde || IS_ERR(pde))
+ goto error;
+ dasd_debugfs_global_entry = pde;
+
+ mode = (S_IRUSR | S_IWUSR | S_IFREG);
+ pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry,
+ NULL, &dasd_stats_global_fops);
+ if (!pde || IS_ERR(pde))
+ goto error;
+ dasd_global_profile_dentry = pde;
+ return;
+
+error:
+ DBF_EVENT(DBF_ERR, "%s",
+ "Creation of the dasd debugfs interface failed");
+ dasd_statistics_removeroot();
+ return;
+}
+
#else
#define dasd_profile_start(block, cqr, req) do {} while (0)
#define dasd_profile_end(block, cqr, req) do {} while (0)
+
+static void dasd_statistics_createroot(void)
+{
+ return;
+}
+
+static void dasd_statistics_removeroot(void)
+{
+ return;
+}
+
+int dasd_stats_generic_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "Statistics are not activated in this kernel\n");
+ return 0;
+}
+
+static void dasd_profile_init(struct dasd_profile *profile,
+ struct dentry *base_dentry)
+{
+ return;
+}
+
+static void dasd_profile_exit(struct dasd_profile *profile)
+{
+ return;
+}
+
+int dasd_profile_on(struct dasd_profile *profile)
+{
+ return 0;
+}
+
#endif /* CONFIG_DASD_PROFILE */
/*
@@ -2441,6 +2925,7 @@ dasd_exit(void)
debug_unregister(dasd_debug_area);
dasd_debug_area = NULL;
}
+ dasd_statistics_removeroot();
}
/*
@@ -2992,6 +3477,8 @@ static int __init dasd_init(void)
dasd_diag_discipline_pointer = NULL;
+ dasd_statistics_createroot();
+
rc = dasd_devmap_init();
if (rc)
goto failed;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 30fb979d684..6e835c9fdfc 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1461,6 +1461,15 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
"Read device characteristic failed, rc=%d", rc);
goto out_err3;
}
+
+ if ((device->features & DASD_FEATURE_USERAW) &&
+ !(private->rdc_data.facilities.RT_in_LR)) {
+ dev_err(&device->cdev->dev, "The storage server does not "
+ "support raw-track access\n");
+ rc = -EINVAL;
+ goto out_err3;
+ }
+
/* find the valid cylinder size */
if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
private->rdc_data.long_no_cyl)
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 77f778b7b07..16c5208c3dc 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index d1e4f2c1264..1dd12bd85a6 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -382,6 +382,41 @@ struct dasd_path {
__u8 npm;
};
+struct dasd_profile_info {
+ /* legacy part of profile data, as in dasd_profile_info_t */
+ unsigned int dasd_io_reqs; /* number of requests processed */
+ unsigned int dasd_io_sects; /* number of sectors processed */
+ unsigned int dasd_io_secs[32]; /* histogram of request's sizes */
+ unsigned int dasd_io_times[32]; /* histogram of requests's times */
+ unsigned int dasd_io_timps[32]; /* h. of requests's times per sector */
+ unsigned int dasd_io_time1[32]; /* hist. of time from build to start */
+ unsigned int dasd_io_time2[32]; /* hist. of time from start to irq */
+ unsigned int dasd_io_time2ps[32]; /* hist. of time from start to irq */
+ unsigned int dasd_io_time3[32]; /* hist. of time from irq to end */
+ unsigned int dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
+
+ /* new data */
+ struct timespec starttod; /* time of start or last reset */
+ unsigned int dasd_io_alias; /* requests using an alias */
+ unsigned int dasd_io_tpm; /* requests using transport mode */
+ unsigned int dasd_read_reqs; /* total number of read requests */
+ unsigned int dasd_read_sects; /* total number read sectors */
+ unsigned int dasd_read_alias; /* read request using an alias */
+ unsigned int dasd_read_tpm; /* read requests in transport mode */
+ unsigned int dasd_read_secs[32]; /* histogram of request's sizes */
+ unsigned int dasd_read_times[32]; /* histogram of requests's times */
+ unsigned int dasd_read_time1[32]; /* hist. time from build to start */
+ unsigned int dasd_read_time2[32]; /* hist. of time from start to irq */
+ unsigned int dasd_read_time3[32]; /* hist. of time from irq to end */
+ unsigned int dasd_read_nr_req[32]; /* hist. of # of requests in chanq */
+};
+
+struct dasd_profile {
+ struct dentry *dentry;
+ struct dasd_profile_info *data;
+ spinlock_t lock;
+};
+
struct dasd_device {
/* Block device stuff. */
struct dasd_block *block;
@@ -431,6 +466,9 @@ struct dasd_device {
/* default expiration time in s */
unsigned long default_expires;
+
+ struct dentry *debugfs_dentry;
+ struct dasd_profile profile;
};
struct dasd_block {
@@ -453,9 +491,8 @@ struct dasd_block {
struct tasklet_struct tasklet;
struct timer_list timer;
-#ifdef CONFIG_DASD_PROFILE
- struct dasd_profile_info_t profile;
-#endif
+ struct dentry *debugfs_dentry;
+ struct dasd_profile profile;
};
@@ -589,12 +626,13 @@ dasd_check_blocksize(int bsize)
}
/* externals in dasd.c */
-#define DASD_PROFILE_ON 1
-#define DASD_PROFILE_OFF 0
+#define DASD_PROFILE_OFF 0
+#define DASD_PROFILE_ON 1
+#define DASD_PROFILE_GLOBAL_ONLY 2
extern debug_info_t *dasd_debug_area;
-extern struct dasd_profile_info_t dasd_global_profile;
-extern unsigned int dasd_profile_level;
+extern struct dasd_profile_info dasd_global_profile_data;
+extern unsigned int dasd_global_profile_level;
extern const struct block_device_operations dasd_device_operations;
extern struct kmem_cache *dasd_page_cache;
@@ -662,6 +700,11 @@ void dasd_device_remove_stop_bits(struct dasd_device *, int);
int dasd_device_is_ro(struct dasd_device *);
+void dasd_profile_reset(struct dasd_profile *);
+int dasd_profile_on(struct dasd_profile *);
+void dasd_profile_off(struct dasd_profile *);
+void dasd_global_profile_reset(void);
+char *dasd_get_user_string(const char __user *, size_t);
/* externals in dasd_devmap.c */
extern int dasd_max_devindex;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 72261e4c516..eb4e034378c 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -239,7 +239,7 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
*/
static int dasd_ioctl_reset_profile(struct dasd_block *block)
{
- memset(&block->profile, 0, sizeof(struct dasd_profile_info_t));
+ dasd_profile_reset(&block->profile);
return 0;
}
@@ -248,10 +248,40 @@ static int dasd_ioctl_reset_profile(struct dasd_block *block)
*/
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
{
- if (dasd_profile_level == DASD_PROFILE_OFF)
+ struct dasd_profile_info_t *data;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_bh(&block->profile.lock);
+ if (block->profile.data) {
+ data->dasd_io_reqs = block->profile.data->dasd_io_reqs;
+ data->dasd_io_sects = block->profile.data->dasd_io_sects;
+ memcpy(data->dasd_io_secs, block->profile.data->dasd_io_secs,
+ sizeof(data->dasd_io_secs));
+ memcpy(data->dasd_io_times, block->profile.data->dasd_io_times,
+ sizeof(data->dasd_io_times));
+ memcpy(data->dasd_io_timps, block->profile.data->dasd_io_timps,
+ sizeof(data->dasd_io_timps));
+ memcpy(data->dasd_io_time1, block->profile.data->dasd_io_time1,
+ sizeof(data->dasd_io_time1));
+ memcpy(data->dasd_io_time2, block->profile.data->dasd_io_time2,
+ sizeof(data->dasd_io_time2));
+ memcpy(data->dasd_io_time2ps,
+ block->profile.data->dasd_io_time2ps,
+ sizeof(data->dasd_io_time2ps));
+ memcpy(data->dasd_io_time3, block->profile.data->dasd_io_time3,
+ sizeof(data->dasd_io_time3));
+ memcpy(data->dasd_io_nr_req,
+ block->profile.data->dasd_io_nr_req,
+ sizeof(data->dasd_io_nr_req));
+ spin_unlock_bh(&block->profile.lock);
+ } else {
+ spin_unlock_bh(&block->profile.lock);
return -EIO;
- if (copy_to_user(argp, &block->profile,
- sizeof(struct dasd_profile_info_t)))
+ }
+ if (copy_to_user(argp, data, sizeof(*data)))
return -EFAULT;
return 0;
}
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index c4a6a31bd9c..e12989fff4f 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -32,28 +32,6 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL;
static struct proc_dir_entry *dasd_devices_entry = NULL;
static struct proc_dir_entry *dasd_statistics_entry = NULL;
-#ifdef CONFIG_DASD_PROFILE
-static char *
-dasd_get_user_string(const char __user *user_buf, size_t user_len)
-{
- char *buffer;
-
- buffer = kmalloc(user_len + 1, GFP_KERNEL);
- if (buffer == NULL)
- return ERR_PTR(-ENOMEM);
- if (copy_from_user(buffer, user_buf, user_len) != 0) {
- kfree(buffer);
- return ERR_PTR(-EFAULT);
- }
- /* got the string, now strip linefeed. */
- if (buffer[user_len - 1] == '\n')
- buffer[user_len - 1] = 0;
- else
- buffer[user_len] = 0;
- return buffer;
-}
-#endif /* CONFIG_DASD_PROFILE */
-
static int
dasd_devices_show(struct seq_file *m, void *v)
{
@@ -167,6 +145,55 @@ static const struct file_operations dasd_devices_file_ops = {
};
#ifdef CONFIG_DASD_PROFILE
+static int dasd_stats_all_block_on(void)
+{
+ int i, rc;
+ struct dasd_device *device;
+
+ rc = 0;
+ for (i = 0; i < dasd_max_devindex; ++i) {
+ device = dasd_device_from_devindex(i);
+ if (IS_ERR(device))
+ continue;
+ if (device->block)
+ rc = dasd_profile_on(&device->block->profile);
+ dasd_put_device(device);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void dasd_stats_all_block_off(void)
+{
+ int i;
+ struct dasd_device *device;
+
+ for (i = 0; i < dasd_max_devindex; ++i) {
+ device = dasd_device_from_devindex(i);
+ if (IS_ERR(device))
+ continue;
+ if (device->block)
+ dasd_profile_off(&device->block->profile);
+ dasd_put_device(device);
+ }
+}
+
+static void dasd_stats_all_block_reset(void)
+{
+ int i;
+ struct dasd_device *device;
+
+ for (i = 0; i < dasd_max_devindex; ++i) {
+ device = dasd_device_from_devindex(i);
+ if (IS_ERR(device))
+ continue;
+ if (device->block)
+ dasd_profile_reset(&device->block->profile);
+ dasd_put_device(device);
+ }
+}
+
static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
{
int i;
@@ -183,18 +210,18 @@ static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int f
static int dasd_stats_proc_show(struct seq_file *m, void *v)
{
#ifdef CONFIG_DASD_PROFILE
- struct dasd_profile_info_t *prof;
+ struct dasd_profile_info *prof;
int factor;
/* check for active profiling */
- if (dasd_profile_level == DASD_PROFILE_OFF) {
+ if (!dasd_global_profile_level) {
seq_printf(m, "Statistics are off - they might be "
"switched on using 'echo set on > "
"/proc/dasd/statistics'\n");
return 0;
}
+ prof = &dasd_global_profile_data;
- prof = &dasd_global_profile;
/* prevent counter 'overflow' on output */
for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
factor *= 10);
@@ -245,6 +272,7 @@ static ssize_t dasd_stats_proc_write(struct file *file,
{
#ifdef CONFIG_DASD_PROFILE
char *buffer, *str;
+ int rc;
if (user_len > 65536)
user_len = 65536;
@@ -259,32 +287,40 @@ static ssize_t dasd_stats_proc_write(struct file *file,
str = skip_spaces(str + 4);
if (strcmp(str, "on") == 0) {
/* switch on statistics profiling */
- dasd_profile_level = DASD_PROFILE_ON;
+ rc = dasd_stats_all_block_on();
+ if (rc) {
+ dasd_stats_all_block_off();
+ goto out_error;
+ }
+ dasd_global_profile_reset();
+ dasd_global_profile_level = DASD_PROFILE_ON;
pr_info("The statistics feature has been switched "
"on\n");
} else if (strcmp(str, "off") == 0) {
/* switch off and reset statistics profiling */
- memset(&dasd_global_profile,
- 0, sizeof (struct dasd_profile_info_t));
- dasd_profile_level = DASD_PROFILE_OFF;
+ dasd_global_profile_level = DASD_PROFILE_OFF;
+ dasd_global_profile_reset();
+ dasd_stats_all_block_off();
pr_info("The statistics feature has been switched "
"off\n");
} else
- goto out_error;
+ goto out_parse_error;
} else if (strncmp(str, "reset", 5) == 0) {
/* reset the statistics */
- memset(&dasd_global_profile, 0,
- sizeof (struct dasd_profile_info_t));
+ dasd_global_profile_reset();
+ dasd_stats_all_block_reset();
pr_info("The statistics have been reset\n");
} else
- goto out_error;
- kfree(buffer);
+ goto out_parse_error;
+ vfree(buffer);
return user_len;
-out_error:
+out_parse_error:
+ rc = -EINVAL;
pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
str);
- kfree(buffer);
- return -EINVAL;
+out_error:
+ vfree(buffer);
+ return rc;
#else
pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
return user_len;
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index a4f117d9fdc..2c9a776bd63 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -116,9 +116,6 @@ config S390_TAPE
called tape390 and include all selected interfaces and
hardware drivers.
-comment "S/390 tape interface support"
- depends on S390_TAPE
-
comment "S/390 tape hardware support"
depends on S390_TAPE
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index 7ad30e72f86..5f9f929e891 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -82,12 +82,9 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write,
return -EFAULT;
} else {
len = *count;
- rc = copy_from_user(buf, buffer, sizeof(buf));
- if (rc != 0)
- return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
- if (strict_strtoul(buf, 0, &val) != 0)
- return -EINVAL;
+ rc = kstrtoul_from_user(buffer, len, 0, &val);
+ if (rc)
+ return rc;
if (val != 0 && val != 1)
return -EINVAL;
callhome_enabled = val;
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 05909a7df8b..a90a02c28d6 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -13,7 +13,7 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/reboot.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/sigp.h>
#include <asm/smp.h>
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c837d7419a6..524d988d89d 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -21,7 +21,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 12ef9121d4f..11312f401c7 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -258,13 +258,13 @@ static int vmwdt_suspend(void)
if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
pr_err("The system cannot be suspended while the watchdog"
" is in use\n");
- return NOTIFY_BAD;
+ return notifier_from_errno(-EBUSY);
}
if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
clear_bit(VMWDT_OPEN, &vmwdt_is_open);
pr_err("The system cannot be suspended while the watchdog"
" is running\n");
- return NOTIFY_BAD;
+ return notifier_from_errno(-EBUSY);
}
return NOTIFY_DONE;
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index c47b25fd3f4..92d7324acb1 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -814,8 +814,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
mutex_unlock(&css->mutex);
continue;
}
- if (__chsc_do_secm(css, 0))
- ret = NOTIFY_BAD;
+ ret = __chsc_do_secm(css, 0);
+ ret = notifier_from_errno(ret);
mutex_unlock(&css->mutex);
}
break;
@@ -831,8 +831,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
mutex_unlock(&css->mutex);
continue;
}
- if (__chsc_do_secm(css, 1))
- ret = NOTIFY_BAD;
+ ret = __chsc_do_secm(css, 1);
+ ret = notifier_from_errno(ret);
mutex_unlock(&css->mutex);
}
/* search for subchannels, which appeared during hibernation */
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 7e297c7bb5f..0b7245c72d5 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -2,7 +2,7 @@
#define S390_DEVICE_H
#include <asm/ccwdev.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/notifier.h>
#include "io_sch.h"
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 7bc643f3f5a..e5c966462c5 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -14,6 +14,8 @@
#include "chsc.h"
#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
+#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
+#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
/*
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index f8b03a636e4..0e615cb912d 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -188,19 +188,13 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
struct qdio_irq *irq_ptr = seq->private;
struct qdio_q *q;
unsigned long val;
- char buf[8];
int ret, i;
if (!irq_ptr)
return 0;
- if (count >= sizeof(buf))
- return -EINVAL;
- if (copy_from_user(&buf, ubuf, count))
- return -EFAULT;
- buf[count] = 0;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret < 0)
+
+ ret = kstrtoul_from_user(ubuf, count, 10, &val);
+ if (ret)
return ret;
switch (val) {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 570d4da1069..288c9140290 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -15,7 +15,7 @@
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/kernel_stat.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
@@ -313,7 +313,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
- int cc;
+ int retries = 0, cc;
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
@@ -325,6 +325,7 @@ again:
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
+ retries++;
if (!start_time) {
start_time = get_clock();
@@ -333,6 +334,11 @@ again:
if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
+ if (retries) {
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
+ "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
+ DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
+ }
return cc;
}
@@ -728,13 +734,14 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
static int qdio_kick_outbound_q(struct qdio_q *q)
{
+ int retries = 0, cc;
unsigned int busy_bit;
- int cc;
if (!need_siga_out(q))
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
+retry:
qperf_inc(q, siga_write);
cc = qdio_siga_output(q, &busy_bit);
@@ -743,7 +750,11 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
break;
case 2:
if (busy_bit) {
- DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr);
+ while (++retries < QDIO_BUSY_BIT_RETRIES) {
+ mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
+ goto retry;
+ }
+ DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
cc |= QDIO_ERROR_SIGA_BUSY;
} else
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
@@ -753,6 +764,10 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
break;
}
+ if (retries) {
+ DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
+ DBF_ERROR("count:%u", retries);
+ }
return cc;
}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 5c4e741d822..2a1d4dfaf85 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -9,7 +9,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/kernel_stat.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/airq.h>
@@ -95,9 +95,11 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
}
}
-static inline u32 shared_ind_set(void)
+static inline u32 clear_shared_ind(void)
{
- return q_indicators[TIQDIO_SHARED_IND].ind;
+ if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
+ return 0;
+ return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
}
/**
@@ -107,7 +109,7 @@ static inline u32 shared_ind_set(void)
*/
static void tiqdio_thinint_handler(void *alsi, void *data)
{
- u32 si_used = shared_ind_set();
+ u32 si_used = clear_shared_ind();
struct qdio_q *q;
last_ai_time = S390_lowcore.int_clock;
@@ -150,13 +152,6 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
qperf_inc(q, adapter_int);
}
rcu_read_unlock();
-
- /*
- * If the shared indicator was used clear it now after all queues
- * were processed.
- */
- if (si_used && shared_ind_set())
- xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
}
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 16e4a25596e..b77ae519d79 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -6,6 +6,7 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* Felix Beck <felix.beck@de.ibm.com>
+ * Holger Dengler <hd@linux.vnet.ibm.com>
*
* Adjunct processor bus.
*
@@ -40,7 +41,7 @@
#include <linux/mutex.h>
#include <asm/reset.h>
#include <asm/airq.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#include <asm/isc.h>
#include <linux/hrtimer.h>
@@ -222,47 +223,52 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind)
}
#endif
-static inline struct ap_queue_status __ap_4096_commands_available(ap_qid_t qid,
- int *support)
+#ifdef CONFIG_64BIT
+static inline struct ap_queue_status
+__ap_query_functions(ap_qid_t qid, unsigned int *functions)
{
register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
- register struct ap_queue_status reg1 asm ("1");
- register unsigned long reg2 asm ("2") = 0UL;
+ register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID;
+ register unsigned long reg2 asm ("2");
asm volatile(
".long 0xb2af0000\n"
- "0: la %1,0\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (reg0), "=d" (reg1), "=d" (reg2)
+ "0:\n"
+ EX_TABLE(0b, 0b)
+ : "+d" (reg0), "+d" (reg1), "=d" (reg2)
:
: "cc");
- if (reg2 & 0x6000000000000000ULL)
- *support = 1;
- else
- *support = 0;
-
+ *functions = (unsigned int)(reg2 >> 32);
return reg1;
}
+#endif
/**
- * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
- * support.
+ * ap_query_functions(): Query supported functions.
* @qid: The AP queue number
+ * @functions: Pointer to functions field.
*
- * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
+ * Returns
+ * 0 on success.
+ * -ENODEV if queue not valid.
+ * -EBUSY if device busy.
+ * -EINVAL if query function is not supported
*/
-int ap_4096_commands_available(ap_qid_t qid)
+static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
{
+#ifdef CONFIG_64BIT
struct ap_queue_status status;
- int i, support = 0;
- status = __ap_4096_commands_available(qid, &support);
+ int i;
+ status = __ap_query_functions(qid, functions);
for (i = 0; i < AP_MAX_RESET; i++) {
+ if (ap_queue_status_invalid_test(&status))
+ return -ENODEV;
+
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- return support;
+ return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
break;
@@ -270,7 +276,7 @@ int ap_4096_commands_available(ap_qid_t qid)
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
case AP_RESPONSE_INVALID_ADDRESS:
- return 0;
+ return -ENODEV;
case AP_RESPONSE_OTHERWISE_CHANGED:
break;
default:
@@ -278,10 +284,31 @@ int ap_4096_commands_available(ap_qid_t qid)
}
if (i < AP_MAX_RESET - 1) {
udelay(5);
- status = __ap_4096_commands_available(qid, &support);
+ status = __ap_query_functions(qid, functions);
}
}
- return support;
+ return -EBUSY;
+#else
+ return -EINVAL;
+#endif
+}
+
+/**
+ * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
+ * support.
+ * @qid: The AP queue number
+ *
+ * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
+ */
+int ap_4096_commands_available(ap_qid_t qid)
+{
+ unsigned int functions;
+
+ if (ap_query_functions(qid, &functions))
+ return 0;
+
+ return test_ap_facility(functions, 1) &&
+ test_ap_facility(functions, 2);
}
EXPORT_SYMBOL(ap_4096_commands_available);
@@ -1135,6 +1162,7 @@ static void ap_scan_bus(struct work_struct *unused)
struct device *dev;
ap_qid_t qid;
int queue_depth, device_type;
+ unsigned int device_functions;
int rc, i;
if (ap_select_domain() != 0)
@@ -1183,14 +1211,30 @@ static void ap_scan_bus(struct work_struct *unused)
INIT_LIST_HEAD(&ap_dev->list);
setup_timer(&ap_dev->timeout, ap_request_timeout,
(unsigned long) ap_dev);
- if (device_type == 0) {
+ switch (device_type) {
+ case 0:
if (ap_probe_device_type(ap_dev)) {
kfree(ap_dev);
continue;
}
- }
- else
+ break;
+ case 10:
+ if (ap_query_functions(qid, &device_functions)) {
+ kfree(ap_dev);
+ continue;
+ }
+ if (test_ap_facility(device_functions, 3))
+ ap_dev->device_type = AP_DEVICE_TYPE_CEX3C;
+ else if (test_ap_facility(device_functions, 4))
+ ap_dev->device_type = AP_DEVICE_TYPE_CEX3A;
+ else {
+ kfree(ap_dev);
+ continue;
+ }
+ break;
+ default:
ap_dev->device_type = device_type;
+ }
ap_dev->device.bus = &ap_bus_type;
ap_dev->device.parent = ap_root_device;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 08b9738285b..d960a6309ee 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -6,6 +6,7 @@
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* Felix Beck <felix.beck@de.ibm.com>
+ * Holger Dengler <hd@linux.vnet.ibm.com>
*
* Adjunct processor bus header file.
*
@@ -72,7 +73,26 @@ struct ap_queue_status {
unsigned int int_enabled : 1;
unsigned int response_code : 8;
unsigned int pad2 : 16;
-};
+} __packed;
+
+#define AP_QUEUE_STATUS_INVALID \
+ { 1, 1, 1, 0xF, 1, 0xFF, 0xFFFF }
+
+static inline
+int ap_queue_status_invalid_test(struct ap_queue_status *status)
+{
+ struct ap_queue_status invalid = AP_QUEUE_STATUS_INVALID;
+ return !(memcmp(status, &invalid, sizeof(struct ap_queue_status)));
+}
+
+#define MAX_AP_FACILITY 31
+
+static inline int test_ap_facility(unsigned int function, unsigned int nr)
+{
+ if (nr > MAX_AP_FACILITY)
+ return 0;
+ return function & (unsigned int)(0x80000000 >> nr);
+}
#define AP_RESPONSE_NORMAL 0x00
#define AP_RESPONSE_Q_NOT_AVAIL 0x01
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 8e65447f76b..88ad33ed5d3 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -36,7 +36,7 @@
#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <linux/hw_random.h>
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 2176d00b395..da171b5f399 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -30,7 +30,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c
index 44253fdd413..eb313c3fb2d 100644
--- a/drivers/s390/crypto/zcrypt_mono.c
+++ b/drivers/s390/crypto/zcrypt_mono.c
@@ -32,7 +32,7 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/compat.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 1afb69c75fe..d84816f144d 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -30,7 +30,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index aa4c050a569..bdbdbe19299 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -30,7 +30,7 @@
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/err.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 4f85eb725f4..dd4737808e0 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -31,7 +31,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "ap_bus.h"
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
index 5336120cddf..1fa07b0c11c 100644
--- a/drivers/s390/net/ctcm_mpc.h
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -12,6 +12,7 @@
#ifndef _CTC_MPC_H_
#define _CTC_MPC_H_
+#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include "fsm.h"
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index 1e8b235d95b..a4510cf5903 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -8,7 +8,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/string.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/**
* Define this to get debugging messages.
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d3cee33e554..26a4110eeb2 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -720,7 +720,7 @@ struct qeth_card {
wait_queue_head_t wait_q;
spinlock_t vlanlock;
spinlock_t mclock;
- struct vlan_group *vlangrp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct list_head vid_list;
struct list_head mc_list;
struct work_struct kernel_thread_starter;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index dd08f7b42fb..4550573c25e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1097,7 +1097,6 @@ static int qeth_setup_card(struct qeth_card *card)
card->dev = NULL;
spin_lock_init(&card->vlanlock);
spin_lock_init(&card->mclock);
- card->vlangrp = NULL;
spin_lock_init(&card->lock);
spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fd69da3fa6b..fafb8c29954 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -23,6 +24,7 @@
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/arp.h>
@@ -1696,16 +1698,18 @@ static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
static void qeth_l3_add_vlan_mc(struct qeth_card *card)
{
struct in_device *in_dev;
- struct vlan_group *vg;
- int i;
+ u16 vid;
QETH_CARD_TEXT(card, 4, "addmcvl");
- if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
+ if (!qeth_is_supported(card, IPA_FULL_VLAN))
return;
- vg = card->vlangrp;
- for (i = 0; i < VLAN_N_VID; i++) {
- struct net_device *netdev = vlan_group_get_device(vg, i);
+ for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
+ struct net_device *netdev;
+
+ rcu_read_lock();
+ netdev = __vlan_find_dev_deep(card->dev, vid);
+ rcu_read_unlock();
if (netdev == NULL ||
!(netdev->flags & IFF_UP))
continue;
@@ -1759,16 +1763,16 @@ static void qeth_l3_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
{
struct inet6_dev *in_dev;
- struct vlan_group *vg;
- int i;
+ u16 vid;
QETH_CARD_TEXT(card, 4, "admc6vl");
- if (!qeth_is_supported(card, IPA_FULL_VLAN) || (card->vlangrp == NULL))
+ if (!qeth_is_supported(card, IPA_FULL_VLAN))
return;
- vg = card->vlangrp;
- for (i = 0; i < VLAN_N_VID; i++) {
- struct net_device *netdev = vlan_group_get_device(vg, i);
+ for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
+ struct net_device *netdev;
+
+ netdev = __vlan_find_dev_deep(card->dev, vid);
if (netdev == NULL ||
!(netdev->flags & IFF_UP))
continue;
@@ -1806,10 +1810,12 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
struct in_device *in_dev;
struct in_ifaddr *ifa;
struct qeth_ipaddr *addr;
+ struct net_device *netdev;
QETH_CARD_TEXT(card, 4, "frvaddr4");
- in_dev = in_dev_get(vlan_group_get_device(card->vlangrp, vid));
+ netdev = __vlan_find_dev_deep(card->dev, vid);
+ in_dev = in_dev_get(netdev);
if (!in_dev)
return;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
@@ -1832,10 +1838,12 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
struct inet6_dev *in6_dev;
struct inet6_ifaddr *ifa;
struct qeth_ipaddr *addr;
+ struct net_device *netdev;
QETH_CARD_TEXT(card, 4, "frvaddr6");
- in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
+ netdev = __vlan_find_dev_deep(card->dev, vid);
+ in6_dev = in6_dev_get(netdev);
if (!in6_dev)
return;
list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
@@ -1856,26 +1864,15 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
unsigned short vid)
{
- if (!card->vlangrp)
- return;
qeth_l3_free_vlan_addresses4(card, vid);
qeth_l3_free_vlan_addresses6(card, vid);
}
-static void qeth_l3_vlan_rx_register(struct net_device *dev,
- struct vlan_group *grp)
+static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
- unsigned long flags;
-
- QETH_CARD_TEXT(card, 4, "vlanreg");
- spin_lock_irqsave(&card->vlanlock, flags);
- card->vlangrp = grp;
- spin_unlock_irqrestore(&card->vlanlock, flags);
-}
-static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
-{
+ set_bit(vid, card->active_vlans);
return;
}
@@ -1892,7 +1889,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */
qeth_l3_free_vlan_addresses(card, vid);
- vlan_group_set_device(card->vlangrp, vid, NULL);
+ clear_bit(vid, card->active_vlans);
spin_unlock_irqrestore(&card->vlanlock, flags);
qeth_l3_set_multicast_list(card->dev);
}
@@ -2014,10 +2011,8 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
&vlan_tag);
len = skb->len;
if (is_vlan && !card->options.sniffer)
- vlan_gro_receive(&card->napi, card->vlangrp,
- vlan_tag, skb);
- else
- napi_gro_receive(&card->napi, skb);
+ __vlan_hwaccel_put_tag(skb, vlan_tag);
+ napi_gro_receive(&card->napi, skb);
break;
case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
skb->pkt_type = PACKET_HOST;
@@ -2118,15 +2113,15 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
struct qeth_card *card)
{
int rc = 0;
- struct vlan_group *vg;
- int i;
+ u16 vid;
- vg = card->vlangrp;
- if (!vg)
- return rc;
+ for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) {
+ struct net_device *netdev;
- for (i = 0; i < VLAN_N_VID; i++) {
- if (vlan_group_get_device(vg, i) == dev) {
+ rcu_read_lock();
+ netdev = __vlan_find_dev_deep(dev, vid);
+ rcu_read_unlock();
+ if (netdev == dev) {
rc = QETH_VLAN_CARD;
break;
}
@@ -2742,9 +2737,14 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
{
int cast_type = RTN_UNSPEC;
-
- if (skb_dst(skb) && skb_dst(skb)->neighbour) {
- cast_type = skb_dst(skb)->neighbour->type;
+ struct neighbour *n = NULL;
+ struct dst_entry *dst;
+
+ dst = skb_dst(skb);
+ if (dst)
+ n = dst_get_neighbour(dst);
+ if (n) {
+ cast_type = n->type;
if ((cast_type == RTN_BROADCAST) ||
(cast_type == RTN_MULTICAST) ||
(cast_type == RTN_ANYCAST))
@@ -2787,6 +2787,9 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type)
{
+ struct neighbour *n = NULL;
+ struct dst_entry *dst;
+
memset(hdr, 0, sizeof(struct qeth_hdr));
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
hdr->hdr.l3.ext_flags = 0;
@@ -2795,7 +2798,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
* before we're going to overwrite this location with next hop ip.
* v6 uses passthrough, v4 sets the tag in the QDIO header.
*/
- if (card->vlangrp && vlan_tx_tag_present(skb)) {
+ if (vlan_tx_tag_present(skb)) {
if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
else
@@ -2804,13 +2807,16 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
}
hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+ dst = skb_dst(skb);
+ if (dst)
+ n = dst_get_neighbour(dst);
if (ipv == 4) {
/* IPv4 */
hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
memset(hdr->hdr.l3.dest_addr, 0, 12);
- if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) {
+ if (n) {
*((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
- *((u32 *) skb_dst(skb)->neighbour->primary_key);
+ *((u32 *) n->primary_key);
} else {
/* fill in destination address used in ip header */
*((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
@@ -2821,9 +2827,9 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
if (card->info.type == QETH_CARD_TYPE_IQD)
hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
- if ((skb_dst(skb)) && (skb_dst(skb)->neighbour)) {
+ if (n) {
memcpy(hdr->hdr.l3.dest_addr,
- skb_dst(skb)->neighbour->primary_key, 16);
+ n->primary_key, 16);
} else {
/* fill in destination address used in ip header */
memcpy(hdr->hdr.l3.dest_addr,
@@ -2977,8 +2983,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_pull(new_skb, ETH_HLEN);
}
- if (ipv != 4 && card->vlangrp &&
- vlan_tx_tag_present(new_skb)) {
+ if (ipv != 4 && vlan_tx_tag_present(new_skb)) {
skb_push(new_skb, VLAN_HLEN);
skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
skb_copy_to_linear_data_offset(new_skb, 4,
@@ -3222,14 +3227,13 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = qeth_l3_set_multicast_list,
- .ndo_do_ioctl = qeth_l3_do_ioctl,
- .ndo_change_mtu = qeth_change_mtu,
- .ndo_fix_features = qeth_l3_fix_features,
- .ndo_set_features = qeth_l3_set_features,
- .ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
+ .ndo_do_ioctl = qeth_l3_do_ioctl,
+ .ndo_change_mtu = qeth_change_mtu,
+ .ndo_fix_features = qeth_l3_fix_features,
+ .ndo_set_features = qeth_l3_set_features,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
- .ndo_tx_timeout = qeth_tx_timeout,
+ .ndo_tx_timeout = qeth_tx_timeout,
};
static const struct net_device_ops qeth_l3_osa_netdev_ops = {
@@ -3239,14 +3243,13 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = qeth_l3_set_multicast_list,
- .ndo_do_ioctl = qeth_l3_do_ioctl,
- .ndo_change_mtu = qeth_change_mtu,
- .ndo_fix_features = qeth_l3_fix_features,
- .ndo_set_features = qeth_l3_set_features,
- .ndo_vlan_rx_register = qeth_l3_vlan_rx_register,
+ .ndo_do_ioctl = qeth_l3_do_ioctl,
+ .ndo_change_mtu = qeth_change_mtu,
+ .ndo_fix_features = qeth_l3_fix_features,
+ .ndo_set_features = qeth_l3_set_features,
.ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid,
- .ndo_tx_timeout = qeth_tx_timeout,
+ .ndo_tx_timeout = qeth_tx_timeout,
.ndo_neigh_setup = qeth_l3_neigh_setup,
};
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 2a4991d6d4d..7cac873c738 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -13,7 +13,7 @@
#include <linux/slab.h>
#include <scsi/fc/fc_fcp.h>
#include <scsi/scsi_eh.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "zfcp_ext.h"
#include "zfcp_dbf.h"
#include "zfcp_fc.h"
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 740da446544..965a1fccd66 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -16,7 +16,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h> /* put_/get_user */
#include <asm/io.h>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4a1f029c4fe..8d9dae89f06 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,6 +830,19 @@ config SCSI_GDTH
To compile this driver as a module, choose M here: the
module will be called gdth.
+config SCSI_ISCI
+ tristate "Intel(R) C600 Series Chipset SAS Controller"
+ depends on PCI && SCSI
+ depends on X86
+ # (temporary): known alpha quality driver
+ depends on EXPERIMENTAL
+ select SCSI_SAS_LIBSAS
+ ---help---
+ This driver supports the 6Gb/s SAS capabilities of the storage
+ control unit found in the Intel(R) C600 series chipset.
+
+ The experimental tag will be removed after the driver exits alpha
+
config SCSI_GENERIC_NCR5380
tristate "Generic NCR5380/53c400 SCSI PIO support"
depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 7ad0b8a79ae..3c08f5352b2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_SCSI_AACRAID) += aacraid/
obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
obj-$(CONFIG_SCSI_PM8001) += pm8001/
+obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index c5169f01c1c..f17c92cf808 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -422,10 +422,19 @@ MODULE_PARM_DESC(aha152x1, "parameters for second controller");
#ifdef __ISAPNP__
static struct isapnp_device_id id_table[] __devinitdata = {
- { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('A','D','P'), ISAPNP_FUNCTION(0x1505), 0 },
- { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('A','D','P'), ISAPNP_FUNCTION(0x1530), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1502), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1505), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1510), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1515), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1520), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2015), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1522), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x2215), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1530), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3015), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1532), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x3215), 0 },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x6360), 0 },
{ ISAPNP_DEVICE_SINGLE_END, }
};
MODULE_DEVICE_TABLE(isapnp, id_table);
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index ea439f93ed8..2db79b469d9 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -892,6 +892,11 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
return 0;
}
+static void NCR5380_exit(struct Scsi_Host *instance)
+{
+ /* Empty, as we didn't schedule any delayed work */
+}
+
/*
* Function : int NCR5380_queue_command (Scsi_Cmnd *cmd,
* void (*done)(Scsi_Cmnd *))
@@ -914,7 +919,6 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
{
SETUP_HOSTDATA(cmd->device->host);
Scsi_Cmnd *tmp;
- int oldto;
unsigned long flags;
#if (NDEBUG & NDEBUG_NO_WRITE)
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 3e8658e2f15..04a154f87e3 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -730,6 +730,7 @@ int atari_scsi_release(struct Scsi_Host *sh)
free_irq(IRQ_TT_MFP_SCSI, sh);
if (atari_dma_buffer)
atari_stram_free(atari_dma_buffer);
+ NCR5380_exit(sh);
return 1;
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 94b9a07845d..0a9bdfa3d93 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -215,73 +215,62 @@ unlock:
static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
{
struct beiscsi_hba *phba = data;
+ struct mgmt_session_info *boot_sess = &phba->boot_sess;
+ struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
char *str = buf;
int rc;
switch (type) {
case ISCSI_BOOT_TGT_NAME:
rc = sprintf(buf, "%.*s\n",
- (int)strlen(phba->boot_sess.target_name),
- (char *)&phba->boot_sess.target_name);
+ (int)strlen(boot_sess->target_name),
+ (char *)&boot_sess->target_name);
break;
case ISCSI_BOOT_TGT_IP_ADDR:
- if (phba->boot_sess.conn_list[0].dest_ipaddr.ip_type == 0x1)
+ if (boot_conn->dest_ipaddr.ip_type == 0x1)
rc = sprintf(buf, "%pI4\n",
- (char *)&phba->boot_sess.conn_list[0].
- dest_ipaddr.ip_address);
+ (char *)&boot_conn->dest_ipaddr.ip_address);
else
rc = sprintf(str, "%pI6\n",
- (char *)&phba->boot_sess.conn_list[0].
- dest_ipaddr.ip_address);
+ (char *)&boot_conn->dest_ipaddr.ip_address);
break;
case ISCSI_BOOT_TGT_PORT:
- rc = sprintf(str, "%d\n", phba->boot_sess.conn_list[0].
- dest_port);
+ rc = sprintf(str, "%d\n", boot_conn->dest_port);
break;
case ISCSI_BOOT_TGT_CHAP_NAME:
rc = sprintf(str, "%.*s\n",
- phba->boot_sess.conn_list[0].
- negotiated_login_options.auth_data.chap.
- target_chap_name_length,
- (char *)&phba->boot_sess.conn_list[0].
- negotiated_login_options.auth_data.chap.
- target_chap_name);
+ boot_conn->negotiated_login_options.auth_data.chap.
+ target_chap_name_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.target_chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
rc = sprintf(str, "%.*s\n",
- phba->boot_sess.conn_list[0].
- negotiated_login_options.auth_data.chap.
- target_secret_length,
- (char *)&phba->boot_sess.conn_list[0].
- negotiated_login_options.auth_data.chap.
- target_secret);
-
+ boot_conn->negotiated_login_options.auth_data.chap.
+ target_secret_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.target_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
rc = sprintf(str, "%.*s\n",
- phba->boot_sess.conn_list[0].
- negotiated_login_options.auth_data.chap.
- intr_chap_name_length,
- (char *)&phba->boot_sess.conn_list[0].
- negotiated_login_options.auth_data.chap.
- intr_chap_name);
-
+ boot_conn->negotiated_login_options.auth_data.chap.
+ intr_chap_name_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.intr_chap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
- rc = sprintf(str, "%.*s\n",
- phba->boot_sess.conn_list[0].
- negotiated_login_options.auth_data.chap.
- intr_secret_length,
- (char *)&phba->boot_sess.conn_list[0].
- negotiated_login_options.auth_data.chap.
- intr_secret);
+ rc = sprintf(str, "%.*s\n",
+ boot_conn->negotiated_login_options.auth_data.chap.
+ intr_secret_length,
+ (char *)&boot_conn->negotiated_login_options.
+ auth_data.chap.intr_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = sprintf(str, "2\n");
+ rc = sprintf(str, "2\n");
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
- rc = sprintf(str, "0\n");
+ rc = sprintf(str, "0\n");
break;
default:
rc = -ENOSYS;
@@ -315,10 +304,10 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
- rc = sprintf(str, "2\n");
+ rc = sprintf(str, "2\n");
break;
case ISCSI_BOOT_ETH_INDEX:
- rc = sprintf(str, "0\n");
+ rc = sprintf(str, "0\n");
break;
case ISCSI_BOOT_ETH_MAC:
rc = beiscsi_get_macaddr(buf, phba);
@@ -391,40 +380,6 @@ static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
return rc;
}
-static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
-{
- struct iscsi_boot_kobj *boot_kobj;
-
- phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
- if (!phba->boot_kset)
- return -ENOMEM;
-
- /* get boot info using mgmt cmd */
- boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
- beiscsi_show_boot_tgt_info,
- beiscsi_tgt_get_attr_visibility);
- if (!boot_kobj)
- goto free_kset;
-
- boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
- beiscsi_show_boot_ini_info,
- beiscsi_ini_get_attr_visibility);
- if (!boot_kobj)
- goto free_kset;
-
- boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
- beiscsi_show_boot_eth_info,
- beiscsi_eth_get_attr_visibility);
- if (!boot_kobj)
- goto free_kset;
- return 0;
-
-free_kset:
- if (phba->boot_kset)
- iscsi_boot_destroy_kset(phba->boot_kset);
- return -ENOMEM;
-}
-
/*------------------- PCI Driver operations and data ----------------- */
static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -483,14 +438,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
if (iscsi_host_add(shost, &phba->pcidev->dev))
goto free_devices;
- if (beiscsi_setup_boot_info(phba))
- /*
- * log error but continue, because we may not be using
- * iscsi boot.
- */
- shost_printk(KERN_ERR, phba->shost, "Could not set up "
- "iSCSI boot info.");
-
return phba;
free_devices:
@@ -3511,6 +3458,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
unsigned int tag, wrb_num;
unsigned short status, extd_status;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ int ret = -ENOMEM;
tag = beiscsi_get_boot_target(phba);
if (!tag) {
@@ -3535,8 +3483,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
boot_resp = embedded_payload(wrb);
if (boot_resp->boot_session_handle < 0) {
- printk(KERN_ERR "No Boot Session for this pci_func,"
- "session Hndl = %d\n", boot_resp->boot_session_handle);
+ shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
return -ENXIO;
}
@@ -3574,14 +3521,70 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
wrb = queue_get_wrb(mccq, wrb_num);
free_mcc_tag(&phba->ctrl, tag);
session_resp = nonemb_cmd.va ;
+
memcpy(&phba->boot_sess, &session_resp->session_info,
sizeof(struct mgmt_session_info));
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
- return 0;
+ ret = 0;
+
boot_freemem:
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
+ return ret;
+}
+
+static void beiscsi_boot_release(void *data)
+{
+ struct beiscsi_hba *phba = data;
+
+ scsi_host_put(phba->shost);
+}
+
+static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
+{
+ struct iscsi_boot_kobj *boot_kobj;
+
+ /* get boot info using mgmt cmd */
+ if (beiscsi_get_boot_info(phba))
+ /* Try to see if we can carry on without this */
+ return 0;
+
+ phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
+ if (!phba->boot_kset)
+ return -ENOMEM;
+
+ /* get a ref because the show function will ref the phba */
+ if (!scsi_host_get(phba->shost))
+ goto free_kset;
+ boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
+ beiscsi_show_boot_tgt_info,
+ beiscsi_tgt_get_attr_visibility,
+ beiscsi_boot_release);
+ if (!boot_kobj)
+ goto put_shost;
+
+ if (!scsi_host_get(phba->shost))
+ goto free_kset;
+ boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
+ beiscsi_show_boot_ini_info,
+ beiscsi_ini_get_attr_visibility,
+ beiscsi_boot_release);
+ if (!boot_kobj)
+ goto put_shost;
+
+ if (!scsi_host_get(phba->shost))
+ goto free_kset;
+ boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
+ beiscsi_show_boot_eth_info,
+ beiscsi_eth_get_attr_visibility,
+ beiscsi_boot_release);
+ if (!boot_kobj)
+ goto put_shost;
+ return 0;
+
+put_shost:
+ scsi_host_put(phba->shost);
+free_kset:
+ iscsi_boot_destroy_kset(phba->boot_kset);
return -ENOMEM;
}
@@ -3963,11 +3966,10 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
}
memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
dw[offsetof(struct amap_pdu_data_out, lun) / 32],
- io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
+ &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
- cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
- lun[0]));
+ cpu_to_be16(*(unsigned short *)&io_task->cmd_bhs->iscsi_hdr.lun));
AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
io_task->pwrb_handle->wrb_index);
@@ -4150,8 +4152,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma);
- if (phba->boot_kset)
- iscsi_boot_destroy_kset(phba->boot_kset);
+ iscsi_boot_destroy_kset(phba->boot_kset);
iscsi_host_remove(phba->shost);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
@@ -4310,11 +4311,15 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
goto free_blkenbld;
}
hwi_enable_intr(phba);
- ret = beiscsi_get_boot_info(phba);
- if (ret < 0) {
- shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
- "No Boot Devices !!!!!\n");
- }
+
+ if (beiscsi_setup_boot_info(phba))
+ /*
+ * log error but continue, because we may not be using
+ * iscsi boot.
+ */
+ shost_printk(KERN_ERR, phba->shost, "Could not set up "
+ "iSCSI boot info.");
+
SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
return 0;
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 081c171a1ed..5ce5170254c 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -397,7 +397,7 @@ struct amap_pdu_data_out {
};
struct be_cmd_bhs {
- struct iscsi_cmd iscsi_hdr;
+ struct iscsi_scsi_req iscsi_hdr;
unsigned char pad1[16];
struct pdu_data_out iscsi_data_pdu;
unsigned char pad2[BE_SENSE_INFO_SIZE -
@@ -428,7 +428,7 @@ struct be_nonio_bhs {
};
struct be_status_bhs {
- struct iscsi_cmd iscsi_hdr;
+ struct iscsi_scsi_req iscsi_hdr;
unsigned char pad1[16];
/**
* The plus 2 below is to hold the sense info length that gets
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index 4ce6f494232..475cf925d5e 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
-bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
+bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o bfad_bsg.o
bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 7be6b5a8114..a796de93505 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -27,7 +27,7 @@
struct bfa_s;
typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
-typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
/*
* Interrupt message handlers
@@ -54,7 +54,8 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
+ bfa_reqq_pi((__bfa), (__reqq)))))
-#define bfa_reqq_produce(__bfa, __reqq) do { \
+#define bfa_reqq_produce(__bfa, __reqq, __mh) do { \
+ (__mh).mtag.h2i.qid = (__bfa)->iocfc.hw_qid[__reqq];\
(__bfa)->iocfc.req_cq_pi[__reqq]++; \
(__bfa)->iocfc.req_cq_pi[__reqq] &= \
((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
@@ -76,16 +77,6 @@ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
} while (0)
/*
- * Queue element to wait for room in request queue. FIFO order is
- * maintained when fullfilling requests.
- */
-struct bfa_reqq_wait_s {
- struct list_head qe;
- void (*qresume) (void *cbarg);
- void *cbarg;
-};
-
-/*
* Circular queue usage assignments
*/
enum {
@@ -128,21 +119,10 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
#define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe)
-
-/*
- * Generic BFA callback element.
- */
-struct bfa_cb_qe_s {
- struct list_head qe;
- bfa_cb_cbfn_t cbfn;
- bfa_boolean_t once;
- u32 rsvd;
- void *cbarg;
-};
-
#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do { \
(__hcb_qe)->cbfn = (__cbfn); \
(__hcb_qe)->cbarg = (__cbarg); \
+ (__hcb_qe)->pre_rmv = BFA_FALSE; \
list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
} while (0)
@@ -157,6 +137,11 @@ struct bfa_cb_qe_s {
} \
} while (0)
+#define bfa_cb_queue_status(__bfa, __hcb_qe, __status) do { \
+ (__hcb_qe)->fw_status = (__status); \
+ list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q); \
+} while (0)
+
#define bfa_cb_queue_done(__hcb_qe) do { \
(__hcb_qe)->once = BFA_FALSE; \
} while (0)
@@ -172,44 +157,14 @@ struct bfa_pciid_s {
extern char bfa_version[];
-/*
- * BFA memory resources
- */
-enum bfa_mem_type {
- BFA_MEM_TYPE_KVA = 1, /* Kernel Virtual Memory *(non-dma-able) */
- BFA_MEM_TYPE_DMA = 2, /* DMA-able memory */
- BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
-};
-
-struct bfa_mem_elem_s {
- enum bfa_mem_type mem_type; /* see enum bfa_mem_type */
- u32 mem_len; /* Total Length in Bytes */
- u8 *kva; /* kernel virtual address */
- u64 dma; /* dma address if DMA memory */
- u8 *kva_curp; /* kva allocation cursor */
- u64 dma_curp; /* dma allocation cursor */
-};
-
-struct bfa_meminfo_s {
- struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
-};
-#define bfa_meminfo_kva(_m) \
- ((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
-#define bfa_meminfo_dma_virt(_m) \
- ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
-#define bfa_meminfo_dma_phys(_m) \
- ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
-
struct bfa_iocfc_regs_s {
void __iomem *intr_status;
void __iomem *intr_mask;
void __iomem *cpe_q_pi[BFI_IOC_MAX_CQS];
void __iomem *cpe_q_ci[BFI_IOC_MAX_CQS];
- void __iomem *cpe_q_depth[BFI_IOC_MAX_CQS];
void __iomem *cpe_q_ctrl[BFI_IOC_MAX_CQS];
void __iomem *rme_q_ci[BFI_IOC_MAX_CQS];
void __iomem *rme_q_pi[BFI_IOC_MAX_CQS];
- void __iomem *rme_q_depth[BFI_IOC_MAX_CQS];
void __iomem *rme_q_ctrl[BFI_IOC_MAX_CQS];
};
@@ -229,27 +184,57 @@ struct bfa_msix_s {
struct bfa_hwif_s {
void (*hw_reginit)(struct bfa_s *bfa);
void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
- void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
+ void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci);
void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
- void (*hw_msix_install)(struct bfa_s *bfa);
+ void (*hw_msix_ctrl_install)(struct bfa_s *bfa);
+ void (*hw_msix_queue_install)(struct bfa_s *bfa);
void (*hw_msix_uninstall)(struct bfa_s *bfa);
void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
u32 *nvecs, u32 *maxvec);
void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
u32 *end);
+ int cpe_vec_q0;
+ int rme_vec_q0;
};
typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
+struct bfa_faa_cbfn_s {
+ bfa_cb_iocfc_t faa_cbfn;
+ void *faa_cbarg;
+};
+
+#define BFA_FAA_ENABLED 1
+#define BFA_FAA_DISABLED 2
+
+/*
+ * FAA attributes
+ */
+struct bfa_faa_attr_s {
+ wwn_t faa;
+ u8 faa_state;
+ u8 pwwn_source;
+ u8 rsvd[6];
+};
+
+struct bfa_faa_args_s {
+ struct bfa_faa_attr_s *faa_attr;
+ struct bfa_faa_cbfn_s faa_cb;
+ u8 faa_state;
+ bfa_boolean_t busy;
+};
+
struct bfa_iocfc_s {
struct bfa_s *bfa;
struct bfa_iocfc_cfg_s cfg;
int action;
u32 req_cq_pi[BFI_IOC_MAX_CQS];
u32 rsp_cq_ci[BFI_IOC_MAX_CQS];
+ u8 hw_qid[BFI_IOC_MAX_CQS];
struct bfa_cb_qe_s init_hcb_qe;
struct bfa_cb_qe_s stop_hcb_qe;
struct bfa_cb_qe_s dis_hcb_qe;
+ struct bfa_cb_qe_s en_hcb_qe;
struct bfa_cb_qe_s stats_hcb_qe;
bfa_boolean_t cfgdone;
@@ -257,7 +242,6 @@ struct bfa_iocfc_s {
struct bfi_iocfc_cfg_s *cfginfo;
struct bfa_dma_s cfgrsp_dma;
struct bfi_iocfc_cfgrsp_s *cfgrsp;
- struct bfi_iocfc_cfg_reply_s *cfg_reply;
struct bfa_dma_s req_cq_ba[BFI_IOC_MAX_CQS];
struct bfa_dma_s req_cq_shadow_ci[BFI_IOC_MAX_CQS];
struct bfa_dma_s rsp_cq_ba[BFI_IOC_MAX_CQS];
@@ -267,18 +251,40 @@ struct bfa_iocfc_s {
bfa_cb_iocfc_t updateq_cbfn; /* bios callback function */
void *updateq_cbarg; /* bios callback arg */
u32 intr_mask;
+ struct bfa_faa_args_s faa_args;
+ struct bfa_mem_dma_s ioc_dma;
+ struct bfa_mem_dma_s iocfc_dma;
+ struct bfa_mem_dma_s reqq_dma[BFI_IOC_MAX_CQS];
+ struct bfa_mem_dma_s rspq_dma[BFI_IOC_MAX_CQS];
+ struct bfa_mem_kva_s kva_seg;
};
-#define bfa_lpuid(__bfa) \
- bfa_ioc_portid(&(__bfa)->ioc)
+#define BFA_MEM_IOC_DMA(_bfa) (&((_bfa)->iocfc.ioc_dma))
+#define BFA_MEM_IOCFC_DMA(_bfa) (&((_bfa)->iocfc.iocfc_dma))
+#define BFA_MEM_REQQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.reqq_dma[(_qno)]))
+#define BFA_MEM_RSPQ_DMA(_bfa, _qno) (&((_bfa)->iocfc.rspq_dma[(_qno)]))
+#define BFA_MEM_IOCFC_KVA(_bfa) (&((_bfa)->iocfc.kva_seg))
+
+#define bfa_fn_lpu(__bfa) \
+ bfi_fn_lpu(bfa_ioc_pcifn(&(__bfa)->ioc), bfa_ioc_portid(&(__bfa)->ioc))
#define bfa_msix_init(__bfa, __nvecs) \
((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
-#define bfa_msix_install(__bfa) \
- ((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
+#define bfa_msix_ctrl_install(__bfa) \
+ ((__bfa)->iocfc.hwif.hw_msix_ctrl_install(__bfa))
+#define bfa_msix_queue_install(__bfa) \
+ ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa))
#define bfa_msix_uninstall(__bfa) \
((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
-#define bfa_isr_mode_set(__bfa, __msix) \
- ((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
+#define bfa_isr_rspq_ack(__bfa, __queue, __ci) \
+ ((__bfa)->iocfc.hwif.hw_rspq_ack(__bfa, __queue, __ci))
+#define bfa_isr_reqq_ack(__bfa, __queue) do { \
+ if ((__bfa)->iocfc.hwif.hw_reqq_ack) \
+ (__bfa)->iocfc.hwif.hw_reqq_ack(__bfa, __queue); \
+} while (0)
+#define bfa_isr_mode_set(__bfa, __msix) do { \
+ if ((__bfa)->iocfc.hwif.hw_isr_mode_set) \
+ (__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix); \
+} while (0)
#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec) \
((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap, \
__nvecs, __maxvec))
@@ -290,17 +296,17 @@ struct bfa_iocfc_s {
/*
* FC specific IOC functions.
*/
-void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
- u32 *dm_len);
+void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa);
void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev);
void bfa_iocfc_init(struct bfa_s *bfa);
void bfa_iocfc_start(struct bfa_s *bfa);
void bfa_iocfc_stop(struct bfa_s *bfa);
void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
-void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
+void bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa);
bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
void bfa_iocfc_reset_queues(struct bfa_s *bfa);
@@ -310,10 +316,10 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec);
void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
void bfa_hwcb_reginit(struct bfa_s *bfa);
-void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
-void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
-void bfa_hwcb_msix_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_queue_install(struct bfa_s *bfa);
void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
@@ -321,10 +327,13 @@ void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
u32 *end);
void bfa_hwct_reginit(struct bfa_s *bfa);
+void bfa_hwct2_reginit(struct bfa_s *bfa);
void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
-void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
+void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
-void bfa_hwct_msix_install(struct bfa_s *bfa);
+void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa);
+void bfa_hwct_msix_queue_install(struct bfa_s *bfa);
void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
@@ -373,11 +382,28 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
#define bfa_get_fw_clock_res(__bfa) \
((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
+/*
+ * lun mask macros return NULL when min cfg is enabled and there is
+ * no memory allocated for lunmask.
+ */
+#define bfa_get_lun_mask(__bfa) \
+ ((&(__bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
+ (&(BFA_DCONF_MOD(__bfa)->dconf->lun_mask))
+
+#define bfa_get_lun_mask_list(_bfa) \
+ ((&(_bfa)->modules.dconf_mod)->min_cfg) ? NULL : \
+ (bfa_get_lun_mask(_bfa)->lun_list)
+
+#define bfa_get_lun_mask_status(_bfa) \
+ (((&(_bfa)->modules.dconf_mod)->min_cfg) \
+ ? BFA_LUNMASK_MINCFG : ((bfa_get_lun_mask(_bfa))->status))
+
void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo);
+ struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa);
void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev);
@@ -402,7 +428,22 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
void bfa_iocfc_enable(struct bfa_s *bfa);
void bfa_iocfc_disable(struct bfa_s *bfa);
+void bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status);
#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \
bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
+struct bfa_cb_pending_q_s {
+ struct bfa_cb_qe_s hcb_qe;
+ void *data; /* Driver buffer */
+};
+
+/* Common macros to operate on pending stats/attr apis */
+#define bfa_pending_q_init(__qe, __cbfn, __cbarg, __data) do { \
+ bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
+ (__qe)->hcb_qe.cbfn = (__cbfn); \
+ (__qe)->hcb_qe.cbarg = (__cbarg); \
+ (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
+ (__qe)->data = (__data); \
+} while (0)
+
#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 91838c51fb7..4bd546bcc24 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -17,7 +17,7 @@
#include "bfad_drv.h"
#include "bfa_modules.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
BFA_TRC_FILE(HAL, CORE);
@@ -25,13 +25,15 @@ BFA_TRC_FILE(HAL, CORE);
* BFA module list terminated by NULL
*/
static struct bfa_module_s *hal_mods[] = {
+ &hal_mod_fcdiag,
&hal_mod_sgpg,
&hal_mod_fcport,
&hal_mod_fcxp,
&hal_mod_lps,
&hal_mod_uf,
&hal_mod_rport,
- &hal_mod_fcpim,
+ &hal_mod_fcp,
+ &hal_mod_dconf,
NULL
};
@@ -41,7 +43,7 @@ static struct bfa_module_s *hal_mods[] = {
static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
bfa_isr_unhandled, /* NONE */
bfa_isr_unhandled, /* BFI_MC_IOC */
- bfa_isr_unhandled, /* BFI_MC_DIAG */
+ bfa_fcdiag_intr, /* BFI_MC_DIAG */
bfa_isr_unhandled, /* BFI_MC_FLASH */
bfa_isr_unhandled, /* BFI_MC_CEE */
bfa_fcport_isr, /* BFI_MC_FCPORT */
@@ -51,7 +53,7 @@ static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
bfa_fcxp_isr, /* BFI_MC_FCXP */
bfa_lps_isr, /* BFI_MC_LPS */
bfa_rport_isr, /* BFI_MC_RPORT */
- bfa_itnim_isr, /* BFI_MC_ITNIM */
+ bfa_itn_isr, /* BFI_MC_ITN */
bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
@@ -89,23 +91,78 @@ static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
static void
-bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
+bfa_com_port_attach(struct bfa_s *bfa)
{
struct bfa_port_s *port = &bfa->modules.port;
- u32 dm_len;
- u8 *dm_kva;
- u64 dm_pa;
+ struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
- dm_len = bfa_port_meminfo();
- dm_kva = bfa_meminfo_dma_virt(mi);
- dm_pa = bfa_meminfo_dma_phys(mi);
-
- memset(port, 0, sizeof(struct bfa_port_s));
bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
- bfa_port_mem_claim(port, dm_kva, dm_pa);
+ bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
+}
+
+/*
+ * ablk module attach
+ */
+static void
+bfa_com_ablk_attach(struct bfa_s *bfa)
+{
+ struct bfa_ablk_s *ablk = &bfa->modules.ablk;
+ struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
+
+ bfa_ablk_attach(ablk, &bfa->ioc);
+ bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
+}
+
+static void
+bfa_com_cee_attach(struct bfa_s *bfa)
+{
+ struct bfa_cee_s *cee = &bfa->modules.cee;
+ struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
+
+ cee->trcmod = bfa->trcmod;
+ bfa_cee_attach(cee, &bfa->ioc, bfa);
+ bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
+}
+
+static void
+bfa_com_sfp_attach(struct bfa_s *bfa)
+{
+ struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa);
+ struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
+
+ bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
+ bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
+}
+
+static void
+bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+ struct bfa_flash_s *flash = BFA_FLASH(bfa);
+ struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
+
+ bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+ bfa_flash_memclaim(flash, flash_dma->kva_curp,
+ flash_dma->dma_curp, mincfg);
+}
+
+static void
+bfa_com_diag_attach(struct bfa_s *bfa)
+{
+ struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa);
+ struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
+
+ bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
+ bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
+}
+
+static void
+bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
+{
+ struct bfa_phy_s *phy = BFA_PHY(bfa);
+ struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
- bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
- bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
+ bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
+ bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
}
/*
@@ -122,6 +179,7 @@ enum {
BFA_IOCFC_ACT_INIT = 1,
BFA_IOCFC_ACT_STOP = 2,
BFA_IOCFC_ACT_DISABLE = 3,
+ BFA_IOCFC_ACT_ENABLE = 4,
};
#define DEF_CFG_NUM_FABRICS 1
@@ -173,10 +231,88 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid)
}
}
+static inline void
+bfa_isr_rspq(struct bfa_s *bfa, int qid)
+{
+ struct bfi_msg_s *m;
+ u32 pi, ci;
+ struct list_head *waitq;
+
+ ci = bfa_rspq_ci(bfa, qid);
+ pi = bfa_rspq_pi(bfa, qid);
+
+ while (ci != pi) {
+ m = bfa_rspq_elem(bfa, qid, ci);
+ WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
+
+ bfa_isrs[m->mhdr.msg_class] (bfa, m);
+ CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
+ }
+
+ /*
+ * acknowledge RME completions and update CI
+ */
+ bfa_isr_rspq_ack(bfa, qid, ci);
+
+ /*
+ * Resume any pending requests in the corresponding reqq.
+ */
+ waitq = bfa_reqq(bfa, qid);
+ if (!list_empty(waitq))
+ bfa_reqq_resume(bfa, qid);
+}
+
+static inline void
+bfa_isr_reqq(struct bfa_s *bfa, int qid)
+{
+ struct list_head *waitq;
+
+ bfa_isr_reqq_ack(bfa, qid);
+
+ /*
+ * Resume any pending requests in the corresponding reqq.
+ */
+ waitq = bfa_reqq(bfa, qid);
+ if (!list_empty(waitq))
+ bfa_reqq_resume(bfa, qid);
+}
+
void
bfa_msix_all(struct bfa_s *bfa, int vec)
{
- bfa_intx(bfa);
+ u32 intr, qintr;
+ int queue;
+
+ intr = readl(bfa->iocfc.bfa_regs.intr_status);
+ if (!intr)
+ return;
+
+ /*
+ * RME completion queue interrupt
+ */
+ qintr = intr & __HFN_INT_RME_MASK;
+ if (qintr && bfa->queue_process) {
+ for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+ bfa_isr_rspq(bfa, queue);
+ }
+
+ intr &= ~qintr;
+ if (!intr)
+ return;
+
+ /*
+ * CPE completion queue interrupt
+ */
+ qintr = intr & __HFN_INT_CPE_MASK;
+ if (qintr && bfa->queue_process) {
+ for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+ bfa_isr_reqq(bfa, queue);
+ }
+ intr &= ~qintr;
+ if (!intr)
+ return;
+
+ bfa_msix_lpu_err(bfa, intr);
}
bfa_boolean_t
@@ -186,20 +322,19 @@ bfa_intx(struct bfa_s *bfa)
int queue;
intr = readl(bfa->iocfc.bfa_regs.intr_status);
- if (!intr)
- return BFA_FALSE;
+
+ qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
+ if (qintr)
+ writel(qintr, bfa->iocfc.bfa_regs.intr_status);
/*
- * RME completion queue interrupt
+ * Unconditional RME completion queue interrupt
*/
- qintr = intr & __HFN_INT_RME_MASK;
- writel(qintr, bfa->iocfc.bfa_regs.intr_status);
-
- for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
- if (intr & (__HFN_INT_RME_Q0 << queue))
- bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
+ if (bfa->queue_process) {
+ for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+ bfa_isr_rspq(bfa, queue);
}
- intr &= ~qintr;
+
if (!intr)
return BFA_TRUE;
@@ -207,11 +342,9 @@ bfa_intx(struct bfa_s *bfa)
* CPE completion queue interrupt
*/
qintr = intr & __HFN_INT_CPE_MASK;
- writel(qintr, bfa->iocfc.bfa_regs.intr_status);
-
- for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
- if (intr & (__HFN_INT_CPE_Q0 << queue))
- bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
+ if (qintr && bfa->queue_process) {
+ for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
+ bfa_isr_reqq(bfa, queue);
}
intr &= ~qintr;
if (!intr)
@@ -225,32 +358,25 @@ bfa_intx(struct bfa_s *bfa)
void
bfa_isr_enable(struct bfa_s *bfa)
{
- u32 intr_unmask;
+ u32 umsk;
int pci_func = bfa_ioc_pcifn(&bfa->ioc);
bfa_trc(bfa, pci_func);
- bfa_msix_install(bfa);
- intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
- __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
- __HFN_INT_LL_HALT);
-
- if (pci_func == 0)
- intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
- __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
- __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
- __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
- __HFN_INT_MBOX_LPU0);
- else
- intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
- __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
- __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
- __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
- __HFN_INT_MBOX_LPU1);
-
- writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
- writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
- bfa->iocfc.intr_mask = ~intr_unmask;
+ bfa_msix_ctrl_install(bfa);
+
+ if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
+ umsk = __HFN_INT_ERR_MASK_CT2;
+ umsk |= pci_func == 0 ?
+ __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
+ } else {
+ umsk = __HFN_INT_ERR_MASK;
+ umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
+ }
+
+ writel(umsk, bfa->iocfc.bfa_regs.intr_status);
+ writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
+ bfa->iocfc.intr_mask = ~umsk;
bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
}
@@ -263,20 +389,9 @@ bfa_isr_disable(struct bfa_s *bfa)
}
void
-bfa_msix_reqq(struct bfa_s *bfa, int qid)
+bfa_msix_reqq(struct bfa_s *bfa, int vec)
{
- struct list_head *waitq;
-
- qid &= (BFI_IOC_MAX_CQS - 1);
-
- bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
-
- /*
- * Resume any pending requests in the corresponding reqq.
- */
- waitq = bfa_reqq(bfa, qid);
- if (!list_empty(waitq))
- bfa_reqq_resume(bfa, qid);
+ bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
}
void
@@ -290,57 +405,38 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
}
void
-bfa_msix_rspq(struct bfa_s *bfa, int qid)
+bfa_msix_rspq(struct bfa_s *bfa, int vec)
{
- struct bfi_msg_s *m;
- u32 pi, ci;
- struct list_head *waitq;
-
- qid &= (BFI_IOC_MAX_CQS - 1);
-
- bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
-
- ci = bfa_rspq_ci(bfa, qid);
- pi = bfa_rspq_pi(bfa, qid);
-
- if (bfa->rme_process) {
- while (ci != pi) {
- m = bfa_rspq_elem(bfa, qid, ci);
- bfa_isrs[m->mhdr.msg_class] (bfa, m);
- CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
- }
- }
-
- /*
- * update CI
- */
- bfa_rspq_ci(bfa, qid) = pi;
- writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
- mmiowb();
-
- /*
- * Resume any pending requests in the corresponding reqq.
- */
- waitq = bfa_reqq(bfa, qid);
- if (!list_empty(waitq))
- bfa_reqq_resume(bfa, qid);
+ bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
}
void
bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
{
u32 intr, curr_value;
+ bfa_boolean_t lpu_isr, halt_isr, pss_isr;
intr = readl(bfa->iocfc.bfa_regs.intr_status);
- if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
- bfa_ioc_mbox_isr(&bfa->ioc);
+ if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
+ halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
+ pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
+ lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
+ __HFN_INT_MBOX_LPU1_CT2);
+ intr &= __HFN_INT_ERR_MASK_CT2;
+ } else {
+ halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
+ (intr & __HFN_INT_LL_HALT) : 0;
+ pss_isr = intr & __HFN_INT_ERR_PSS;
+ lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
+ intr &= __HFN_INT_ERR_MASK;
+ }
- intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
- __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
+ if (lpu_isr)
+ bfa_ioc_mbox_isr(&bfa->ioc);
if (intr) {
- if (intr & __HFN_INT_LL_HALT) {
+ if (halt_isr) {
/*
* If LL_HALT bit is set then FW Init Halt LL Port
* Register needs to be cleared as well so Interrupt
@@ -351,7 +447,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
}
- if (intr & __HFN_INT_ERR_PSS) {
+ if (pss_isr) {
/*
* ERR_PSS bit needs to be cleared as well in case
* interrups are shared so driver's interrupt handler is
@@ -359,7 +455,6 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
*/
curr_value = readl(
bfa->ioc.ioc_regs.pss_err_status_reg);
- curr_value &= __PSS_ERR_STATUS_SET;
writel(curr_value,
bfa->ioc.ioc_regs.pss_err_status_reg);
}
@@ -377,41 +472,6 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
* BFA IOC private functions
*/
-static void
-bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
-{
- int i, per_reqq_sz, per_rspq_sz;
-
- per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
- BFA_DMA_ALIGN_SZ);
- per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
- BFA_DMA_ALIGN_SZ);
-
- /*
- * Calculate CQ size
- */
- for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
- *dm_len = *dm_len + per_reqq_sz;
- *dm_len = *dm_len + per_rspq_sz;
- }
-
- /*
- * Calculate Shadow CI/PI size
- */
- for (i = 0; i < cfg->fwcfg.num_cqs; i++)
- *dm_len += (2 * BFA_CACHELINE_SZ);
-}
-
-static void
-bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
-{
- *dm_len +=
- BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
- *dm_len +=
- BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
- BFA_CACHELINE_SZ);
-}
-
/*
* Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
*/
@@ -433,8 +493,13 @@ bfa_iocfc_send_cfg(void *bfa_arg)
/*
* initialize IOC configuration info
*/
+ cfg_info->single_msix_vec = 0;
+ if (bfa->msix.nvecs == 1)
+ cfg_info->single_msix_vec = 1;
cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
cfg_info->num_cqs = cfg->fwcfg.num_cqs;
+ cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
+ cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
/*
@@ -469,7 +534,7 @@ bfa_iocfc_send_cfg(void *bfa_arg)
* dma map IOC configuration itself
*/
bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
- bfa_lpuid(bfa));
+ bfa_fn_lpu(bfa));
bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
@@ -491,26 +556,40 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
/*
* Initialize chip specific handlers.
*/
- if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
+ if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
iocfc->hwif.hw_reginit = bfa_hwct_reginit;
iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
- iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
+ iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
+ iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
+ iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
+ iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
} else {
iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
- iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
+ iocfc->hwif.hw_reqq_ack = NULL;
iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
- iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
+ iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
+ iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
+ iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
+ bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
+ iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
+ bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
+ }
+
+ if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
+ iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
+ iocfc->hwif.hw_isr_mode_set = NULL;
+ iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
}
iocfc->hwif.hw_reginit(bfa);
@@ -518,48 +597,42 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
}
static void
-bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo)
+bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
{
- u8 *dm_kva;
- u64 dm_pa;
- int i, per_reqq_sz, per_rspq_sz;
+ u8 *dm_kva = NULL;
+ u64 dm_pa = 0;
+ int i, per_reqq_sz, per_rspq_sz, dbgsz;
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
- int dbgsz;
+ struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
+ struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
+ struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
- dm_kva = bfa_meminfo_dma_virt(meminfo);
- dm_pa = bfa_meminfo_dma_phys(meminfo);
+ /* First allocate dma memory for IOC */
+ bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
+ bfa_mem_dma_phys(ioc_dma));
- /*
- * First allocate dma memory for IOC.
- */
- bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
- dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
- dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
-
- /*
- * Claim DMA-able memory for the request/response queues and for shadow
- * ci/pi registers
- */
+ /* Claim DMA-able memory for the request/response queues */
per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
- BFA_DMA_ALIGN_SZ);
+ BFA_DMA_ALIGN_SZ);
per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
- BFA_DMA_ALIGN_SZ);
+ BFA_DMA_ALIGN_SZ);
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
- iocfc->req_cq_ba[i].kva = dm_kva;
- iocfc->req_cq_ba[i].pa = dm_pa;
- memset(dm_kva, 0, per_reqq_sz);
- dm_kva += per_reqq_sz;
- dm_pa += per_reqq_sz;
-
- iocfc->rsp_cq_ba[i].kva = dm_kva;
- iocfc->rsp_cq_ba[i].pa = dm_pa;
- memset(dm_kva, 0, per_rspq_sz);
- dm_kva += per_rspq_sz;
- dm_pa += per_rspq_sz;
+ reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
+ iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
+ iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
+ memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
+
+ rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
+ iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
+ iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
+ memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
}
+ /* Claim IOCFC dma memory - for shadow CI/PI */
+ dm_kva = bfa_mem_dma_virt(iocfc_dma);
+ dm_pa = bfa_mem_dma_phys(iocfc_dma);
+
for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
iocfc->req_cq_shadow_ci[i].kva = dm_kva;
iocfc->req_cq_shadow_ci[i].pa = dm_pa;
@@ -572,36 +645,27 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
dm_pa += BFA_CACHELINE_SZ;
}
- /*
- * Claim DMA-able memory for the config info page
- */
+ /* Claim IOCFC dma memory - for the config info page */
bfa->iocfc.cfg_info.kva = dm_kva;
bfa->iocfc.cfg_info.pa = dm_pa;
bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
- /*
- * Claim DMA-able memory for the config response
- */
+ /* Claim IOCFC dma memory - for the config response */
bfa->iocfc.cfgrsp_dma.kva = dm_kva;
bfa->iocfc.cfgrsp_dma.pa = dm_pa;
bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
-
- dm_kva +=
- BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
- BFA_CACHELINE_SZ);
+ dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+ BFA_CACHELINE_SZ);
dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
- BFA_CACHELINE_SZ);
-
-
- bfa_meminfo_dma_virt(meminfo) = dm_kva;
- bfa_meminfo_dma_phys(meminfo) = dm_pa;
+ BFA_CACHELINE_SZ);
+ /* Claim IOCFC kva memory */
dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
if (dbgsz > 0) {
- bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
- bfa_meminfo_kva(meminfo) += dbgsz;
+ bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
+ bfa_mem_kva_curp(iocfc) += dbgsz;
}
}
@@ -613,7 +677,9 @@ bfa_iocfc_start_submod(struct bfa_s *bfa)
{
int i;
- bfa->rme_process = BFA_TRUE;
+ bfa->queue_process = BFA_TRUE;
+ for (i = 0; i < BFI_IOC_MAX_CQS; i++)
+ bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
for (i = 0; hal_mods[i]; i++)
hal_mods[i]->start(bfa);
@@ -637,7 +703,7 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
struct bfa_s *bfa = bfa_arg;
if (complete) {
- if (bfa->iocfc.cfgdone)
+ if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
else
bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
@@ -660,6 +726,16 @@ bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
}
static void
+bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
+{
+ struct bfa_s *bfa = bfa_arg;
+ struct bfad_s *bfad = bfa->bfad;
+
+ if (compl)
+ complete(&bfad->enable_comp);
+}
+
+static void
bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
{
struct bfa_s *bfa = bfa_arg;
@@ -669,6 +745,37 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
complete(&bfad->disable_comp);
}
+/**
+ * configure queue registers from firmware response
+ */
+static void
+bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
+{
+ int i;
+ struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
+ void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
+
+ for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
+ bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
+ r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
+ r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
+ r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
+ r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
+ r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
+ r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
+ }
+}
+
+static void
+bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
+{
+ bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
+ bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
+ bfa_rport_res_recfg(bfa, fwcfg->num_rports);
+ bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
+ bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
+}
+
/*
* Update BFA configuration from firmware configuration.
*/
@@ -681,6 +788,7 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
fwcfg->num_cqs = fwcfg->num_cqs;
fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
+ fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
@@ -689,14 +797,35 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
iocfc->cfgdone = BFA_TRUE;
/*
+ * configure queue register offsets as learnt from firmware
+ */
+ bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
+
+ /*
+ * Re-configure resources as learnt from Firmware
+ */
+ bfa_iocfc_res_recfg(bfa, fwcfg);
+
+ /*
+ * Install MSIX queue handlers
+ */
+ bfa_msix_queue_install(bfa);
+
+ /*
* Configuration is complete - initialize/start submodules
*/
bfa_fcport_init(bfa);
- if (iocfc->action == BFA_IOCFC_ACT_INIT)
- bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
- else
+ if (iocfc->action == BFA_IOCFC_ACT_INIT) {
+ if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
+ bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
+ bfa_iocfc_init_cb, bfa);
+ } else {
+ if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
+ bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
+ bfa_iocfc_enable_cb, bfa);
bfa_iocfc_start_submod(bfa);
+ }
}
void
bfa_iocfc_reset_queues(struct bfa_s *bfa)
@@ -711,6 +840,181 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa)
}
}
+/* Fabric Assigned Address specific functions */
+
+/*
+ * Check whether IOC is ready before sending command down
+ */
+static bfa_status_t
+bfa_faa_validate_request(struct bfa_s *bfa)
+{
+ enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
+ u32 card_type = bfa->ioc.attr->card_type;
+
+ if (bfa_ioc_is_operational(&bfa->ioc)) {
+ if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
+ return BFA_STATUS_FEATURE_NOT_SUPPORTED;
+ } else {
+ if (!bfa_ioc_is_acq_addr(&bfa->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+ }
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
+{
+ struct bfi_faa_en_dis_s faa_enable_req;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ bfa_status_t status;
+
+ iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+ iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
+ status = bfa_faa_validate_request(bfa);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ if (iocfc->faa_args.busy == BFA_TRUE)
+ return BFA_STATUS_DEVBUSY;
+
+ if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
+ return BFA_STATUS_FAA_ENABLED;
+
+ if (bfa_fcport_is_trunk_enabled(bfa))
+ return BFA_STATUS_ERROR_TRUNK_ENABLED;
+
+ bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
+ iocfc->faa_args.busy = BFA_TRUE;
+
+ memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
+ bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
+ BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
+
+ bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
+ sizeof(struct bfi_faa_en_dis_s));
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
+ void *cbarg)
+{
+ struct bfi_faa_en_dis_s faa_disable_req;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ bfa_status_t status;
+
+ iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+ iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
+ status = bfa_faa_validate_request(bfa);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ if (iocfc->faa_args.busy == BFA_TRUE)
+ return BFA_STATUS_DEVBUSY;
+
+ if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
+ return BFA_STATUS_FAA_DISABLED;
+
+ bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
+ iocfc->faa_args.busy = BFA_TRUE;
+
+ memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
+ bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
+ BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
+
+ bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
+ sizeof(struct bfi_faa_en_dis_s));
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
+ bfa_cb_iocfc_t cbfn, void *cbarg)
+{
+ struct bfi_faa_query_s faa_attr_req;
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ bfa_status_t status;
+
+ iocfc->faa_args.faa_attr = attr;
+ iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
+ iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
+
+ status = bfa_faa_validate_request(bfa);
+ if (status != BFA_STATUS_OK)
+ return status;
+
+ if (iocfc->faa_args.busy == BFA_TRUE)
+ return BFA_STATUS_DEVBUSY;
+
+ iocfc->faa_args.busy = BFA_TRUE;
+ memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
+ bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
+ BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
+
+ bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
+ sizeof(struct bfi_faa_query_s));
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * FAA enable response
+ */
+static void
+bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
+ struct bfi_faa_en_dis_rsp_s *rsp)
+{
+ void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
+ bfa_status_t status = rsp->status;
+
+ WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
+
+ iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
+ iocfc->faa_args.busy = BFA_FALSE;
+}
+
+/*
+ * FAA disable response
+ */
+static void
+bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
+ struct bfi_faa_en_dis_rsp_s *rsp)
+{
+ void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
+ bfa_status_t status = rsp->status;
+
+ WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
+
+ iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
+ iocfc->faa_args.busy = BFA_FALSE;
+}
+
+/*
+ * FAA query response
+ */
+static void
+bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
+ bfi_faa_query_rsp_t *rsp)
+{
+ void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
+
+ if (iocfc->faa_args.faa_attr) {
+ iocfc->faa_args.faa_attr->faa = rsp->faa;
+ iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
+ iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
+ }
+
+ WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
+
+ iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
+ iocfc->faa_args.busy = BFA_FALSE;
+}
+
/*
* IOC enable request is complete
*/
@@ -719,15 +1023,25 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
{
struct bfa_s *bfa = bfa_arg;
+ if (status == BFA_STATUS_FAA_ACQ_ADDR) {
+ bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
+ bfa_iocfc_init_cb, bfa);
+ return;
+ }
+
if (status != BFA_STATUS_OK) {
bfa_isr_disable(bfa);
if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
bfa_iocfc_init_cb, bfa);
+ else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
+ bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
+ bfa_iocfc_enable_cb, bfa);
return;
}
bfa_iocfc_send_cfg(bfa);
+ bfa_dconf_modinit(bfa);
}
/*
@@ -759,7 +1073,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg)
{
struct bfa_s *bfa = bfa_arg;
- bfa->rme_process = BFA_FALSE;
+ bfa->queue_process = BFA_FALSE;
bfa_isr_disable(bfa);
bfa_iocfc_disable_submod(bfa);
@@ -786,15 +1100,47 @@ bfa_iocfc_reset_cbfn(void *bfa_arg)
* Query IOC memory requirement information.
*/
void
-bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
- u32 *dm_len)
+bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa)
{
- /* dma memory for IOC */
- *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
+ int q, per_reqq_sz, per_rspq_sz;
+ struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
+ struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
+ struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
+ u32 dm_len = 0;
+
+ /* dma memory setup for IOC */
+ bfa_mem_dma_setup(meminfo, ioc_dma,
+ BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
+
+ /* dma memory setup for REQ/RSP queues */
+ per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
+ BFA_DMA_ALIGN_SZ);
+ per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
+ BFA_DMA_ALIGN_SZ);
- bfa_iocfc_fw_cfg_sz(cfg, dm_len);
- bfa_iocfc_cqs_sz(cfg, dm_len);
- *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
+ for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
+ bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
+ per_reqq_sz);
+ bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
+ per_rspq_sz);
+ }
+
+ /* IOCFC dma memory - calculate Shadow CI/PI size */
+ for (q = 0; q < cfg->fwcfg.num_cqs; q++)
+ dm_len += (2 * BFA_CACHELINE_SZ);
+
+ /* IOCFC dma memory - calculate config info / rsp size */
+ dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
+ dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
+ BFA_CACHELINE_SZ);
+
+ /* dma memory setup for IOCFC */
+ bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
+
+ /* kva memory setup for IOCFC */
+ bfa_mem_kva_setup(meminfo, iocfc_kva,
+ ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
}
/*
@@ -802,7 +1148,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
*/
void
bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+ struct bfa_pcidev_s *pcidev)
{
int i;
struct bfa_ioc_s *ioc = &bfa->ioc;
@@ -815,17 +1161,11 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
ioc->trcmod = bfa->trcmod;
bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
- /*
- * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
- */
- if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
- bfa_ioc_set_fcmode(&bfa->ioc);
-
- bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
+ bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
- bfa_iocfc_mem_claim(bfa, cfg, meminfo);
+ bfa_iocfc_mem_claim(bfa, cfg);
INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
INIT_LIST_HEAD(&bfa->comp_q);
@@ -863,8 +1203,10 @@ bfa_iocfc_stop(struct bfa_s *bfa)
{
bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
- bfa->rme_process = BFA_FALSE;
- bfa_ioc_disable(&bfa->ioc);
+ bfa->queue_process = BFA_FALSE;
+ bfa_dconf_modexit(bfa);
+ if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
+ bfa_ioc_disable(&bfa->ioc);
}
void
@@ -879,12 +1221,22 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
switch (msg->mh.msg_id) {
case BFI_IOCFC_I2H_CFG_REPLY:
- iocfc->cfg_reply = &msg->cfg_reply;
bfa_iocfc_cfgrsp(bfa);
break;
case BFI_IOCFC_I2H_UPDATEQ_RSP:
iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
break;
+ case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
+ bfa_faa_enable_reply(iocfc,
+ (struct bfi_faa_en_dis_rsp_s *)msg);
+ break;
+ case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
+ bfa_faa_disable_reply(iocfc,
+ (struct bfi_faa_en_dis_rsp_s *)msg);
+ break;
+ case BFI_IOCFC_I2H_FAA_QUERY_RSP:
+ bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
+ break;
default:
WARN_ON(1);
}
@@ -926,7 +1278,7 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
return BFA_STATUS_DEVBUSY;
bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
- bfa_lpuid(bfa));
+ bfa_fn_lpu(bfa));
m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
m->delay = iocfc->cfginfo->intr_attr.delay;
m->latency = iocfc->cfginfo->intr_attr.latency;
@@ -934,17 +1286,17 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
bfa_trc(bfa, attr->delay);
bfa_trc(bfa, attr->latency);
- bfa_reqq_produce(bfa, BFA_REQQ_IOC);
+ bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
return BFA_STATUS_OK;
}
void
-bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
+bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
{
struct bfa_iocfc_s *iocfc = &bfa->iocfc;
iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
- bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
+ bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
}
/*
* Enable IOC after it is disabled.
@@ -954,6 +1306,7 @@ bfa_iocfc_enable(struct bfa_s *bfa)
{
bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
"IOC Enable");
+ bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
bfa_ioc_enable(&bfa->ioc);
}
@@ -964,7 +1317,7 @@ bfa_iocfc_disable(struct bfa_s *bfa)
"IOC Disable");
bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
- bfa->rme_process = BFA_FALSE;
+ bfa->queue_process = BFA_FALSE;
bfa_ioc_disable(&bfa->ioc);
}
@@ -1033,33 +1386,49 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
* starting address for each block and provide the same
* structure as input parameter to bfa_attach() call.
*
+ * @param[in] bfa - pointer to the bfa structure, used while fetching the
+ * dma, kva memory information of the bfa sub-modules.
+ *
* @return void
*
* Special Considerations: @note
*/
void
-bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
+bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa)
{
int i;
- u32 km_len = 0, dm_len = 0;
+ struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
+ struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
+ struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
+ struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
+ struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
+ struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
+ struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
WARN_ON((cfg == NULL) || (meminfo == NULL));
memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
- meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
- BFA_MEM_TYPE_KVA;
- meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
- BFA_MEM_TYPE_DMA;
- bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
-
- for (i = 0; hal_mods[i]; i++)
- hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
+ /* Initialize the DMA & KVA meminfo queues */
+ INIT_LIST_HEAD(&meminfo->dma_info.qe);
+ INIT_LIST_HEAD(&meminfo->kva_info.qe);
- dm_len += bfa_port_meminfo();
+ bfa_iocfc_meminfo(cfg, meminfo, bfa);
- meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
- meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->meminfo(cfg, meminfo, bfa);
+
+ /* dma info setup */
+ bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
+ bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
+ bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
+ bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
+ bfa_mem_dma_setup(meminfo, flash_dma,
+ bfa_flash_meminfo(cfg->drvcfg.min_cfg));
+ bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
+ bfa_mem_dma_setup(meminfo, phy_dma,
+ bfa_phy_meminfo(cfg->drvcfg.min_cfg));
}
/*
@@ -1092,28 +1461,46 @@ void
bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
{
- int i;
- struct bfa_mem_elem_s *melem;
+ int i;
+ struct bfa_mem_dma_s *dma_info, *dma_elem;
+ struct bfa_mem_kva_s *kva_info, *kva_elem;
+ struct list_head *dm_qe, *km_qe;
bfa->fcs = BFA_FALSE;
WARN_ON((cfg == NULL) || (meminfo == NULL));
- /*
- * initialize all memory pointers for iterative allocation
- */
- for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
- melem = meminfo->meminfo + i;
- melem->kva_curp = melem->kva;
- melem->dma_curp = melem->dma;
+ /* Initialize memory pointers for iterative allocation */
+ dma_info = &meminfo->dma_info;
+ dma_info->kva_curp = dma_info->kva;
+ dma_info->dma_curp = dma_info->dma;
+
+ kva_info = &meminfo->kva_info;
+ kva_info->kva_curp = kva_info->kva;
+
+ list_for_each(dm_qe, &dma_info->qe) {
+ dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+ dma_elem->kva_curp = dma_elem->kva;
+ dma_elem->dma_curp = dma_elem->dma;
}
- bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
+ list_for_each(km_qe, &kva_info->qe) {
+ kva_elem = (struct bfa_mem_kva_s *) km_qe;
+ kva_elem->kva_curp = kva_elem->kva;
+ }
- for (i = 0; hal_mods[i]; i++)
- hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
+ bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
- bfa_com_port_attach(bfa, meminfo);
+ for (i = 0; hal_mods[i]; i++)
+ hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
+
+ bfa_com_port_attach(bfa);
+ bfa_com_ablk_attach(bfa);
+ bfa_com_cee_attach(bfa);
+ bfa_com_sfp_attach(bfa);
+ bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
+ bfa_com_diag_attach(bfa);
+ bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
}
/*
@@ -1152,10 +1539,17 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
struct list_head *qe;
struct list_head *qen;
struct bfa_cb_qe_s *hcb_qe;
+ bfa_cb_cbfn_status_t cbfn;
list_for_each_safe(qe, qen, comp_q) {
hcb_qe = (struct bfa_cb_qe_s *) qe;
- hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
+ if (hcb_qe->pre_rmv) {
+ /* qe is invalid after return, dequeue before cbfn() */
+ list_del(qe);
+ cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
+ cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
+ } else
+ hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
}
}
@@ -1168,10 +1562,20 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
while (!list_empty(comp_q)) {
bfa_q_deq(comp_q, &qe);
hcb_qe = (struct bfa_cb_qe_s *) qe;
+ WARN_ON(hcb_qe->pre_rmv);
hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
}
}
+void
+bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
+{
+ if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
+ if (bfa->iocfc.cfgdone == BFA_TRUE)
+ bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
+ bfa_iocfc_init_cb, bfa);
+ }
+}
/*
* Return the list of PCI vendor/device id lists supported by this
@@ -1215,6 +1619,7 @@ bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
+ cfg->fwcfg.num_fwtio_reqs = 0;
cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
@@ -1236,6 +1641,7 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
cfg->fwcfg.num_rports = BFA_RPORT_MIN;
+ cfg->fwcfg.num_fwtio_reqs = 0;
cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index d85f93aea46..7b3d235d20b 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -40,7 +40,12 @@ enum {
BFA_MFG_TYPE_ASTRA = 807, /* Astra mezz card */
BFA_MFG_TYPE_LIGHTNING_P0 = 902, /* Lightning mezz card - old */
BFA_MFG_TYPE_LIGHTNING = 1741, /* Lightning mezz card */
- BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */
+ BFA_MFG_TYPE_PROWLER_F = 1560, /* Prowler FC only cards */
+ BFA_MFG_TYPE_PROWLER_N = 1410, /* Prowler NIC only cards */
+ BFA_MFG_TYPE_PROWLER_C = 1710, /* Prowler CNA only cards */
+ BFA_MFG_TYPE_PROWLER_D = 1860, /* Prowler Dual cards */
+ BFA_MFG_TYPE_CHINOOK = 1867, /* Chinook cards */
+ BFA_MFG_TYPE_INVALID = 0, /* Invalid card type */
};
#pragma pack(1)
@@ -53,7 +58,8 @@ enum {
(type) == BFA_MFG_TYPE_WANCHESE || \
(type) == BFA_MFG_TYPE_ASTRA || \
(type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
- (type) == BFA_MFG_TYPE_LIGHTNING))
+ (type) == BFA_MFG_TYPE_LIGHTNING || \
+ (type) == BFA_MFG_TYPE_CHINOOK))
/*
* Check if the card having old wwn/mac handling
@@ -124,30 +130,60 @@ enum bfa_status {
BFA_STATUS_ETIMER = 5, /* Timer expired - Retry, if persists,
* contact support */
BFA_STATUS_EPROTOCOL = 6, /* Protocol error */
+ BFA_STATUS_SFP_UNSUPP = 10, /* Unsupported SFP - Replace SFP */
+ BFA_STATUS_UNKNOWN_VFID = 11, /* VF_ID not found */
+ BFA_STATUS_DATACORRUPTED = 12, /* Diag returned data corrupted */
BFA_STATUS_DEVBUSY = 13, /* Device busy - Retry operation */
+ BFA_STATUS_HDMA_FAILED = 16, /* Host dma failed contact support */
+ BFA_STATUS_FLASH_BAD_LEN = 17, /* Flash bad length */
BFA_STATUS_UNKNOWN_LWWN = 18, /* LPORT PWWN not found */
BFA_STATUS_UNKNOWN_RWWN = 19, /* RPORT PWWN not found */
BFA_STATUS_VPORT_EXISTS = 21, /* VPORT already exists */
BFA_STATUS_VPORT_MAX = 22, /* Reached max VPORT supported limit */
BFA_STATUS_UNSUPP_SPEED = 23, /* Invalid Speed Check speed setting */
BFA_STATUS_INVLD_DFSZ = 24, /* Invalid Max data field size */
+ BFA_STATUS_CMD_NOTSUPP = 26, /* Command/API not supported */
BFA_STATUS_FABRIC_RJT = 29, /* Reject from attached fabric */
+ BFA_STATUS_UNKNOWN_VWWN = 30, /* VPORT PWWN not found */
+ BFA_STATUS_PORT_OFFLINE = 34, /* Port is not online */
BFA_STATUS_VPORT_WWN_BP = 46, /* WWN is same as base port's WWN */
+ BFA_STATUS_PORT_NOT_DISABLED = 47, /* Port not disabled disable port */
BFA_STATUS_NO_FCPIM_NEXUS = 52, /* No FCP Nexus exists with the rport */
BFA_STATUS_IOC_FAILURE = 56, /* IOC failure - Retry, if persists
* contact support */
BFA_STATUS_INVALID_WWN = 57, /* Invalid WWN */
+ BFA_STATUS_ADAPTER_ENABLED = 60, /* Adapter is not disabled */
+ BFA_STATUS_IOC_NON_OP = 61, /* IOC is not operational */
+ BFA_STATUS_VERSION_FAIL = 70, /* Application/Driver version mismatch */
BFA_STATUS_DIAG_BUSY = 71, /* diag busy */
+ BFA_STATUS_BEACON_ON = 72, /* Port Beacon already on */
BFA_STATUS_ENOFSAVE = 78, /* No saved firmware trace */
BFA_STATUS_IOC_DISABLED = 82, /* IOC is already disabled */
+ BFA_STATUS_NO_SFP_DEV = 89, /* No SFP device check or replace SFP */
+ BFA_STATUS_MEMTEST_FAILED = 90, /* Memory test failed contact support */
+ BFA_STATUS_LEDTEST_OP = 109, /* LED test is operating */
BFA_STATUS_INVALID_MAC = 134, /* Invalid MAC address */
BFA_STATUS_PBC = 154, /* Operation not allowed for pre-boot
* configuration */
+ BFA_STATUS_BAD_FWCFG = 156, /* Bad firmware configuration */
+ BFA_STATUS_INVALID_VENDOR = 158, /* Invalid switch vendor */
+ BFA_STATUS_SFP_NOT_READY = 159, /* SFP info is not ready. Retry */
BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
* this adapter */
BFA_STATUS_TRUNK_DISABLED = 165, /* Trunking is disabled on
* the adapter */
BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
+ BFA_STATUS_PHY_NOT_PRESENT = 183, /* PHY module not present */
+ BFA_STATUS_FEATURE_NOT_SUPPORTED = 192, /* Feature not supported */
+ BFA_STATUS_ENTRY_EXISTS = 193, /* Entry already exists */
+ BFA_STATUS_ENTRY_NOT_EXISTS = 194, /* Entry does not exist */
+ BFA_STATUS_NO_CHANGE = 195, /* Feature already in that state */
+ BFA_STATUS_FAA_ENABLED = 197, /* FAA is already enabled */
+ BFA_STATUS_FAA_DISABLED = 198, /* FAA is already disabled */
+ BFA_STATUS_FAA_ACQUIRED = 199, /* FAA is already acquired */
+ BFA_STATUS_FAA_ACQ_ADDR = 200, /* Acquiring addr */
+ BFA_STATUS_ERROR_TRUNK_ENABLED = 203, /* Trunk enabled on adapter */
+ BFA_STATUS_MAX_ENTRY_REACHED = 212, /* MAX entry reached */
BFA_STATUS_MAX_VAL /* Unknown error code */
};
#define bfa_status_t enum bfa_status
@@ -265,6 +301,8 @@ enum bfa_ioc_state {
BFA_IOC_DISABLED = 10, /* IOC is disabled */
BFA_IOC_FWMISMATCH = 11, /* IOC f/w different from drivers */
BFA_IOC_ENABLING = 12, /* IOC is being enabled */
+ BFA_IOC_HWFAIL = 13, /* PCI mapping doesn't exist */
+ BFA_IOC_ACQ_ADDR = 14, /* Acquiring addr from fabric */
};
/*
@@ -294,6 +332,7 @@ struct bfa_ioc_drv_stats_s {
u32 enable_reqs;
u32 disable_replies;
u32 enable_replies;
+ u32 rsvd;
};
/*
@@ -320,7 +359,143 @@ struct bfa_ioc_attr_s {
struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */
struct bfa_ioc_pci_attr_s pci_attr;
u8 port_id; /* port number */
- u8 rsvd[7]; /* 64bit align */
+ u8 port_mode; /* bfa_mode_s */
+ u8 cap_bm; /* capability */
+ u8 port_mode_cfg; /* bfa_mode_s */
+ u8 rsvd[4]; /* 64bit align */
+};
+
+/*
+ * AEN related definitions
+ */
+enum bfa_aen_category {
+ BFA_AEN_CAT_ADAPTER = 1,
+ BFA_AEN_CAT_PORT = 2,
+ BFA_AEN_CAT_LPORT = 3,
+ BFA_AEN_CAT_RPORT = 4,
+ BFA_AEN_CAT_ITNIM = 5,
+ BFA_AEN_CAT_AUDIT = 8,
+ BFA_AEN_CAT_IOC = 9,
+};
+
+/* BFA adapter level events */
+enum bfa_adapter_aen_event {
+ BFA_ADAPTER_AEN_ADD = 1, /* New Adapter found event */
+ BFA_ADAPTER_AEN_REMOVE = 2, /* Adapter removed event */
+};
+
+struct bfa_adapter_aen_data_s {
+ char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+ u32 nports; /* Number of NPorts */
+ wwn_t pwwn; /* WWN of one of its physical port */
+};
+
+/* BFA physical port Level events */
+enum bfa_port_aen_event {
+ BFA_PORT_AEN_ONLINE = 1, /* Physical Port online event */
+ BFA_PORT_AEN_OFFLINE = 2, /* Physical Port offline event */
+ BFA_PORT_AEN_RLIR = 3, /* RLIR event, not supported */
+ BFA_PORT_AEN_SFP_INSERT = 4, /* SFP inserted event */
+ BFA_PORT_AEN_SFP_REMOVE = 5, /* SFP removed event */
+ BFA_PORT_AEN_SFP_POM = 6, /* SFP POM event */
+ BFA_PORT_AEN_ENABLE = 7, /* Physical Port enable event */
+ BFA_PORT_AEN_DISABLE = 8, /* Physical Port disable event */
+ BFA_PORT_AEN_AUTH_ON = 9, /* Physical Port auth success event */
+ BFA_PORT_AEN_AUTH_OFF = 10, /* Physical Port auth fail event */
+ BFA_PORT_AEN_DISCONNECT = 11, /* Physical Port disconnect event */
+ BFA_PORT_AEN_QOS_NEG = 12, /* Base Port QOS negotiation event */
+ BFA_PORT_AEN_FABRIC_NAME_CHANGE = 13, /* Fabric Name/WWN change */
+ BFA_PORT_AEN_SFP_ACCESS_ERROR = 14, /* SFP read error event */
+ BFA_PORT_AEN_SFP_UNSUPPORT = 15, /* Unsupported SFP event */
+};
+
+enum bfa_port_aen_sfp_pom {
+ BFA_PORT_AEN_SFP_POM_GREEN = 1, /* Normal */
+ BFA_PORT_AEN_SFP_POM_AMBER = 2, /* Warning */
+ BFA_PORT_AEN_SFP_POM_RED = 3, /* Critical */
+ BFA_PORT_AEN_SFP_POM_MAX = BFA_PORT_AEN_SFP_POM_RED
+};
+
+struct bfa_port_aen_data_s {
+ wwn_t pwwn; /* WWN of the physical port */
+ wwn_t fwwn; /* WWN of the fabric port */
+ u32 phy_port_num; /* For SFP related events */
+ u16 ioc_type;
+ u16 level; /* Only transitions will be informed */
+ mac_t mac; /* MAC address of the ethernet port */
+ u16 rsvd;
+};
+
+/* BFA AEN logical port events */
+enum bfa_lport_aen_event {
+ BFA_LPORT_AEN_NEW = 1, /* LPort created event */
+ BFA_LPORT_AEN_DELETE = 2, /* LPort deleted event */
+ BFA_LPORT_AEN_ONLINE = 3, /* LPort online event */
+ BFA_LPORT_AEN_OFFLINE = 4, /* LPort offline event */
+ BFA_LPORT_AEN_DISCONNECT = 5, /* LPort disconnect event */
+ BFA_LPORT_AEN_NEW_PROP = 6, /* VPort created event */
+ BFA_LPORT_AEN_DELETE_PROP = 7, /* VPort deleted event */
+ BFA_LPORT_AEN_NEW_STANDARD = 8, /* VPort created event */
+ BFA_LPORT_AEN_DELETE_STANDARD = 9, /* VPort deleted event */
+ BFA_LPORT_AEN_NPIV_DUP_WWN = 10, /* VPort with duplicate WWN */
+ BFA_LPORT_AEN_NPIV_FABRIC_MAX = 11, /* Max NPIV in fabric/fport */
+ BFA_LPORT_AEN_NPIV_UNKNOWN = 12, /* Unknown NPIV Error code */
+};
+
+struct bfa_lport_aen_data_s {
+ u16 vf_id; /* vf_id of this logical port */
+ u16 roles; /* Logical port mode,IM/TM/IP etc */
+ u32 rsvd;
+ wwn_t ppwwn; /* WWN of its physical port */
+ wwn_t lpwwn; /* WWN of this logical port */
+};
+
+/* BFA ITNIM events */
+enum bfa_itnim_aen_event {
+ BFA_ITNIM_AEN_ONLINE = 1, /* Target online */
+ BFA_ITNIM_AEN_OFFLINE = 2, /* Target offline */
+ BFA_ITNIM_AEN_DISCONNECT = 3, /* Target disconnected */
+};
+
+struct bfa_itnim_aen_data_s {
+ u16 vf_id; /* vf_id of the IT nexus */
+ u16 rsvd[3];
+ wwn_t ppwwn; /* WWN of its physical port */
+ wwn_t lpwwn; /* WWN of logical port */
+ wwn_t rpwwn; /* WWN of remote(target) port */
+};
+
+/* BFA audit events */
+enum bfa_audit_aen_event {
+ BFA_AUDIT_AEN_AUTH_ENABLE = 1,
+ BFA_AUDIT_AEN_AUTH_DISABLE = 2,
+ BFA_AUDIT_AEN_FLASH_ERASE = 3,
+ BFA_AUDIT_AEN_FLASH_UPDATE = 4,
+};
+
+struct bfa_audit_aen_data_s {
+ wwn_t pwwn;
+ int partition_inst;
+ int partition_type;
+};
+
+/* BFA IOC level events */
+enum bfa_ioc_aen_event {
+ BFA_IOC_AEN_HBGOOD = 1, /* Heart Beat restore event */
+ BFA_IOC_AEN_HBFAIL = 2, /* Heart Beat failure event */
+ BFA_IOC_AEN_ENABLE = 3, /* IOC enabled event */
+ BFA_IOC_AEN_DISABLE = 4, /* IOC disabled event */
+ BFA_IOC_AEN_FWMISMATCH = 5, /* IOC firmware mismatch */
+ BFA_IOC_AEN_FWCFG_ERROR = 6, /* IOC firmware config error */
+ BFA_IOC_AEN_INVALID_VENDOR = 7,
+ BFA_IOC_AEN_INVALID_NWWN = 8, /* Zero NWWN */
+ BFA_IOC_AEN_INVALID_PWWN = 9 /* Zero PWWN */
+};
+
+struct bfa_ioc_aen_data_s {
+ wwn_t pwwn;
+ u16 ioc_type;
+ mac_t mac;
};
/*
@@ -337,6 +512,21 @@ struct bfa_ioc_attr_s {
#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
+/*
+ * Initial capability definition
+ */
+#define BFA_MFG_IC_FC 0x01
+#define BFA_MFG_IC_ETH 0x02
+
+/*
+ * Adapter capability mask definition
+ */
+#define BFA_CM_HBA 0x01
+#define BFA_CM_CNA 0x02
+#define BFA_CM_NIC 0x04
+#define BFA_CM_FC16G 0x08
+#define BFA_CM_SRIOV 0x10
+#define BFA_CM_MEZZ 0x20
#pragma pack(1)
@@ -344,31 +534,39 @@ struct bfa_ioc_attr_s {
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_block_s {
- u8 version; /* manufacturing block version */
- u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */
- u16 mfgsize; /* mfg block size */
- u16 u16_chksum; /* old u16 checksum */
- char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
- char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
- u8 mfg_day; /* manufacturing day */
- u8 mfg_month; /* manufacturing month */
- u16 mfg_year; /* manufacturing year */
- wwn_t mfg_wwn; /* wwn base for this adapter */
- u8 num_wwn; /* number of wwns assigned */
- u8 mfg_speeds; /* speeds allowed for this adapter */
- u8 rsv[2];
- char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
- char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
- char
- supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
- char
- supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
- mac_t mfg_mac; /* mac address */
- u8 num_mac; /* number of mac addresses */
- u8 rsv2;
- u32 mfg_type; /* card type */
- u8 rsv3[108];
- u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
+ u8 version; /*!< manufacturing block version */
+ u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
+ u16 mfgsize; /*!< mfg block size */
+ u16 u16_chksum; /*!< old u16 checksum */
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+ u8 mfg_day; /*!< manufacturing day */
+ u8 mfg_month; /*!< manufacturing month */
+ u16 mfg_year; /*!< manufacturing year */
+ wwn_t mfg_wwn; /*!< wwn base for this adapter */
+ u8 num_wwn; /*!< number of wwns assigned */
+ u8 mfg_speeds; /*!< speeds allowed for this adapter */
+ u8 rsv[2];
+ char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+ char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+ char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+ char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+ mac_t mfg_mac; /*!< base mac address */
+ u8 num_mac; /*!< number of mac addresses */
+ u8 rsv2;
+ u32 card_type; /*!< card type */
+ char cap_nic; /*!< capability nic */
+ char cap_cna; /*!< capability cna */
+ char cap_hba; /*!< capability hba */
+ char cap_fc16g; /*!< capability fc 16g */
+ char cap_sriov; /*!< capability sriov */
+ char cap_mezz; /*!< capability mezz */
+ u8 rsv3;
+ u8 mfg_nports; /*!< number of ports */
+ char media[8]; /*!< xfi/xaui */
+ char initial_mode[8]; /*!< initial mode: hba/cna/nic */
+ u8 rsv4[84];
+ u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
};
#pragma pack()
@@ -386,17 +584,27 @@ enum {
BFA_PCI_DEVICE_ID_FC_8G1P = 0x17,
BFA_PCI_DEVICE_ID_CT = 0x14,
BFA_PCI_DEVICE_ID_CT_FC = 0x21,
+ BFA_PCI_DEVICE_ID_CT2 = 0x22,
};
-#define bfa_asic_id_ct(devid) \
- ((devid) == BFA_PCI_DEVICE_ID_CT || \
- (devid) == BFA_PCI_DEVICE_ID_CT_FC)
+#define bfa_asic_id_cb(__d) \
+ ((__d) == BFA_PCI_DEVICE_ID_FC_8G2P || \
+ (__d) == BFA_PCI_DEVICE_ID_FC_8G1P)
+#define bfa_asic_id_ct(__d) \
+ ((__d) == BFA_PCI_DEVICE_ID_CT || \
+ (__d) == BFA_PCI_DEVICE_ID_CT_FC)
+#define bfa_asic_id_ct2(__d) ((__d) == BFA_PCI_DEVICE_ID_CT2)
+#define bfa_asic_id_ctc(__d) \
+ (bfa_asic_id_ct(__d) || bfa_asic_id_ct2(__d))
/*
* PCI sub-system device and vendor ID information
*/
enum {
BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
+ BFA_PCI_CT2_SSID_FCoE = 0x22,
+ BFA_PCI_CT2_SSID_ETH = 0x23,
+ BFA_PCI_CT2_SSID_FC = 0x24,
};
/*
@@ -416,9 +624,7 @@ enum bfa_port_speed {
BFA_PORT_SPEED_8GBPS = 8,
BFA_PORT_SPEED_10GBPS = 10,
BFA_PORT_SPEED_16GBPS = 16,
- BFA_PORT_SPEED_AUTO =
- (BFA_PORT_SPEED_1GBPS | BFA_PORT_SPEED_2GBPS |
- BFA_PORT_SPEED_4GBPS | BFA_PORT_SPEED_8GBPS),
+ BFA_PORT_SPEED_AUTO = 0xf,
};
#define bfa_port_speed_t enum bfa_port_speed
@@ -454,6 +660,20 @@ struct bfa_boot_bootlun_s {
/*
* BOOT boot configuraton
*/
+struct bfa_boot_cfg_s {
+ u8 version;
+ u8 rsvd1;
+ u16 chksum;
+ u8 enable; /* enable/disable SAN boot */
+ u8 speed; /* boot speed settings */
+ u8 topology; /* boot topology setting */
+ u8 bootopt; /* bfa_boot_bootopt_t */
+ u32 nbluns; /* number of boot luns */
+ u32 rsvd2;
+ struct bfa_boot_bootlun_s blun[BFA_BOOT_BOOTLUN_MAX];
+ struct bfa_boot_bootlun_s blun_disc[BFA_BOOT_BOOTLUN_MAX];
+};
+
struct bfa_boot_pbc_s {
u8 enable; /* enable/disable SAN boot */
u8 speed; /* boot speed settings */
@@ -463,4 +683,470 @@ struct bfa_boot_pbc_s {
struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
};
+struct bfa_ethboot_cfg_s {
+ u8 version;
+ u8 rsvd1;
+ u16 chksum;
+ u8 enable; /* enable/disable Eth/PXE boot */
+ u8 rsvd2;
+ u16 vlan;
+};
+
+/*
+ * ASIC block configuration related structures
+ */
+#define BFA_ABLK_MAX_PORTS 2
+#define BFA_ABLK_MAX_PFS 16
+#define BFA_ABLK_MAX 2
+
+#pragma pack(1)
+enum bfa_mode_s {
+ BFA_MODE_HBA = 1,
+ BFA_MODE_CNA = 2,
+ BFA_MODE_NIC = 3
+};
+
+struct bfa_adapter_cfg_mode_s {
+ u16 max_pf;
+ u16 max_vf;
+ enum bfa_mode_s mode;
+};
+
+struct bfa_ablk_cfg_pf_s {
+ u16 pers;
+ u8 port_id;
+ u8 optrom;
+ u8 valid;
+ u8 sriov;
+ u8 max_vfs;
+ u8 rsvd[1];
+ u16 num_qpairs;
+ u16 num_vectors;
+ u32 bw;
+};
+
+struct bfa_ablk_cfg_port_s {
+ u8 mode;
+ u8 type;
+ u8 max_pfs;
+ u8 rsvd[5];
+};
+
+struct bfa_ablk_cfg_inst_s {
+ u8 nports;
+ u8 max_pfs;
+ u8 rsvd[6];
+ struct bfa_ablk_cfg_pf_s pf_cfg[BFA_ABLK_MAX_PFS];
+ struct bfa_ablk_cfg_port_s port_cfg[BFA_ABLK_MAX_PORTS];
+};
+
+struct bfa_ablk_cfg_s {
+ struct bfa_ablk_cfg_inst_s inst[BFA_ABLK_MAX];
+};
+
+
+/*
+ * SFP module specific
+ */
+#define SFP_DIAGMON_SIZE 10 /* num bytes of diag monitor data */
+
+/* SFP state change notification event */
+#define BFA_SFP_SCN_REMOVED 0
+#define BFA_SFP_SCN_INSERTED 1
+#define BFA_SFP_SCN_POM 2
+#define BFA_SFP_SCN_FAILED 3
+#define BFA_SFP_SCN_UNSUPPORT 4
+#define BFA_SFP_SCN_VALID 5
+
+enum bfa_defs_sfp_media_e {
+ BFA_SFP_MEDIA_UNKNOWN = 0x00,
+ BFA_SFP_MEDIA_CU = 0x01,
+ BFA_SFP_MEDIA_LW = 0x02,
+ BFA_SFP_MEDIA_SW = 0x03,
+ BFA_SFP_MEDIA_EL = 0x04,
+ BFA_SFP_MEDIA_UNSUPPORT = 0x05,
+};
+
+/*
+ * values for xmtr_tech above
+ */
+enum {
+ SFP_XMTR_TECH_CU = (1 << 0), /* copper FC-BaseT */
+ SFP_XMTR_TECH_CP = (1 << 1), /* copper passive */
+ SFP_XMTR_TECH_CA = (1 << 2), /* copper active */
+ SFP_XMTR_TECH_LL = (1 << 3), /* longwave laser */
+ SFP_XMTR_TECH_SL = (1 << 4), /* shortwave laser w/ OFC */
+ SFP_XMTR_TECH_SN = (1 << 5), /* shortwave laser w/o OFC */
+ SFP_XMTR_TECH_EL_INTRA = (1 << 6), /* elec intra-enclosure */
+ SFP_XMTR_TECH_EL_INTER = (1 << 7), /* elec inter-enclosure */
+ SFP_XMTR_TECH_LC = (1 << 8), /* longwave laser */
+ SFP_XMTR_TECH_SA = (1 << 9)
+};
+
+/*
+ * Serial ID: Data Fields -- Address A0h
+ * Basic ID field total 64 bytes
+ */
+struct sfp_srlid_base_s {
+ u8 id; /* 00: Identifier */
+ u8 extid; /* 01: Extended Identifier */
+ u8 connector; /* 02: Connector */
+ u8 xcvr[8]; /* 03-10: Transceiver */
+ u8 encoding; /* 11: Encoding */
+ u8 br_norm; /* 12: BR, Nominal */
+ u8 rate_id; /* 13: Rate Identifier */
+ u8 len_km; /* 14: Length single mode km */
+ u8 len_100m; /* 15: Length single mode 100m */
+ u8 len_om2; /* 16: Length om2 fiber 10m */
+ u8 len_om1; /* 17: Length om1 fiber 10m */
+ u8 len_cu; /* 18: Length copper 1m */
+ u8 len_om3; /* 19: Length om3 fiber 10m */
+ u8 vendor_name[16];/* 20-35 */
+ u8 unalloc1;
+ u8 vendor_oui[3]; /* 37-39 */
+ u8 vendor_pn[16]; /* 40-55 */
+ u8 vendor_rev[4]; /* 56-59 */
+ u8 wavelen[2]; /* 60-61 */
+ u8 unalloc2;
+ u8 cc_base; /* 63: check code for base id field */
+};
+
+/*
+ * Serial ID: Data Fields -- Address A0h
+ * Extended id field total 32 bytes
+ */
+struct sfp_srlid_ext_s {
+ u8 options[2];
+ u8 br_max;
+ u8 br_min;
+ u8 vendor_sn[16];
+ u8 date_code[8];
+ u8 diag_mon_type; /* 92: Diagnostic Monitoring type */
+ u8 en_options;
+ u8 sff_8472;
+ u8 cc_ext;
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * Diagnostic and control/status base field total 96 bytes
+ */
+struct sfp_diag_base_s {
+ /*
+ * Alarm and warning Thresholds 40 bytes
+ */
+ u8 temp_high_alarm[2]; /* 00-01 */
+ u8 temp_low_alarm[2]; /* 02-03 */
+ u8 temp_high_warning[2]; /* 04-05 */
+ u8 temp_low_warning[2]; /* 06-07 */
+
+ u8 volt_high_alarm[2]; /* 08-09 */
+ u8 volt_low_alarm[2]; /* 10-11 */
+ u8 volt_high_warning[2]; /* 12-13 */
+ u8 volt_low_warning[2]; /* 14-15 */
+
+ u8 bias_high_alarm[2]; /* 16-17 */
+ u8 bias_low_alarm[2]; /* 18-19 */
+ u8 bias_high_warning[2]; /* 20-21 */
+ u8 bias_low_warning[2]; /* 22-23 */
+
+ u8 tx_pwr_high_alarm[2]; /* 24-25 */
+ u8 tx_pwr_low_alarm[2]; /* 26-27 */
+ u8 tx_pwr_high_warning[2]; /* 28-29 */
+ u8 tx_pwr_low_warning[2]; /* 30-31 */
+
+ u8 rx_pwr_high_alarm[2]; /* 32-33 */
+ u8 rx_pwr_low_alarm[2]; /* 34-35 */
+ u8 rx_pwr_high_warning[2]; /* 36-37 */
+ u8 rx_pwr_low_warning[2]; /* 38-39 */
+
+ u8 unallocate_1[16];
+
+ /*
+ * ext_cal_const[36]
+ */
+ u8 rx_pwr[20];
+ u8 tx_i[4];
+ u8 tx_pwr[4];
+ u8 temp[4];
+ u8 volt[4];
+ u8 unallocate_2[3];
+ u8 cc_dmi;
+};
+
+/*
+ * Diagnostic: Data Fields -- Address A2h
+ * Diagnostic and control/status extended field total 24 bytes
+ */
+struct sfp_diag_ext_s {
+ u8 diag[SFP_DIAGMON_SIZE];
+ u8 unalloc1[4];
+ u8 status_ctl;
+ u8 rsvd;
+ u8 alarm_flags[2];
+ u8 unalloc2[2];
+ u8 warning_flags[2];
+ u8 ext_status_ctl[2];
+};
+
+struct sfp_mem_s {
+ struct sfp_srlid_base_s srlid_base;
+ struct sfp_srlid_ext_s srlid_ext;
+ struct sfp_diag_base_s diag_base;
+ struct sfp_diag_ext_s diag_ext;
+};
+
+/*
+ * transceiver codes (SFF-8472 Rev 10.2 Table 3.5)
+ */
+union sfp_xcvr_e10g_code_u {
+ u8 b;
+ struct {
+#ifdef __BIGENDIAN
+ u8 e10g_unall:1; /* 10G Ethernet compliance */
+ u8 e10g_lrm:1;
+ u8 e10g_lr:1;
+ u8 e10g_sr:1;
+ u8 ib_sx:1; /* Infiniband compliance */
+ u8 ib_lx:1;
+ u8 ib_cu_a:1;
+ u8 ib_cu_p:1;
+#else
+ u8 ib_cu_p:1;
+ u8 ib_cu_a:1;
+ u8 ib_lx:1;
+ u8 ib_sx:1; /* Infiniband compliance */
+ u8 e10g_sr:1;
+ u8 e10g_lr:1;
+ u8 e10g_lrm:1;
+ u8 e10g_unall:1; /* 10G Ethernet compliance */
+#endif
+ } r;
+};
+
+union sfp_xcvr_so1_code_u {
+ u8 b;
+ struct {
+ u8 escon:2; /* ESCON compliance code */
+ u8 oc192_reach:1; /* SONET compliance code */
+ u8 so_reach:2;
+ u8 oc48_reach:3;
+ } r;
+};
+
+union sfp_xcvr_so2_code_u {
+ u8 b;
+ struct {
+ u8 reserved:1;
+ u8 oc12_reach:3; /* OC12 reach */
+ u8 reserved1:1;
+ u8 oc3_reach:3; /* OC3 reach */
+ } r;
+};
+
+union sfp_xcvr_eth_code_u {
+ u8 b;
+ struct {
+ u8 base_px:1;
+ u8 base_bx10:1;
+ u8 e100base_fx:1;
+ u8 e100base_lx:1;
+ u8 e1000base_t:1;
+ u8 e1000base_cx:1;
+ u8 e1000base_lx:1;
+ u8 e1000base_sx:1;
+ } r;
+};
+
+struct sfp_xcvr_fc1_code_s {
+ u8 link_len:5; /* FC link length */
+ u8 xmtr_tech2:3;
+ u8 xmtr_tech1:7; /* FC transmitter technology */
+ u8 reserved1:1;
+};
+
+union sfp_xcvr_fc2_code_u {
+ u8 b;
+ struct {
+ u8 tw_media:1; /* twin axial pair (tw) */
+ u8 tp_media:1; /* shielded twisted pair (sp) */
+ u8 mi_media:1; /* miniature coax (mi) */
+ u8 tv_media:1; /* video coax (tv) */
+ u8 m6_media:1; /* multimode, 62.5m (m6) */
+ u8 m5_media:1; /* multimode, 50m (m5) */
+ u8 reserved:1;
+ u8 sm_media:1; /* single mode (sm) */
+ } r;
+};
+
+union sfp_xcvr_fc3_code_u {
+ u8 b;
+ struct {
+#ifdef __BIGENDIAN
+ u8 rsv4:1;
+ u8 mb800:1; /* 800 Mbytes/sec */
+ u8 mb1600:1; /* 1600 Mbytes/sec */
+ u8 mb400:1; /* 400 Mbytes/sec */
+ u8 rsv2:1;
+ u8 mb200:1; /* 200 Mbytes/sec */
+ u8 rsv1:1;
+ u8 mb100:1; /* 100 Mbytes/sec */
+#else
+ u8 mb100:1; /* 100 Mbytes/sec */
+ u8 rsv1:1;
+ u8 mb200:1; /* 200 Mbytes/sec */
+ u8 rsv2:1;
+ u8 mb400:1; /* 400 Mbytes/sec */
+ u8 mb1600:1; /* 1600 Mbytes/sec */
+ u8 mb800:1; /* 800 Mbytes/sec */
+ u8 rsv4:1;
+#endif
+ } r;
+};
+
+struct sfp_xcvr_s {
+ union sfp_xcvr_e10g_code_u e10g;
+ union sfp_xcvr_so1_code_u so1;
+ union sfp_xcvr_so2_code_u so2;
+ union sfp_xcvr_eth_code_u eth;
+ struct sfp_xcvr_fc1_code_s fc1;
+ union sfp_xcvr_fc2_code_u fc2;
+ union sfp_xcvr_fc3_code_u fc3;
+};
+
+/*
+ * Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */
+#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */
+
+enum bfa_flash_part_type {
+ BFA_FLASH_PART_OPTROM = 1, /* option rom partition */
+ BFA_FLASH_PART_FWIMG = 2, /* firmware image partition */
+ BFA_FLASH_PART_FWCFG = 3, /* firmware tuneable config */
+ BFA_FLASH_PART_DRV = 4, /* IOC driver config */
+ BFA_FLASH_PART_BOOT = 5, /* boot config */
+ BFA_FLASH_PART_ASIC = 6, /* asic bootstrap configuration */
+ BFA_FLASH_PART_MFG = 7, /* manufacturing block partition */
+ BFA_FLASH_PART_OPTROM2 = 8, /* 2nd option rom partition */
+ BFA_FLASH_PART_VPD = 9, /* vpd data of OEM info */
+ BFA_FLASH_PART_PBC = 10, /* pre-boot config */
+ BFA_FLASH_PART_BOOTOVL = 11, /* boot overlay partition */
+ BFA_FLASH_PART_LOG = 12, /* firmware log partition */
+ BFA_FLASH_PART_PXECFG = 13, /* pxe boot config partition */
+ BFA_FLASH_PART_PXEOVL = 14, /* pxe boot overlay partition */
+ BFA_FLASH_PART_PORTCFG = 15, /* port cfg partition */
+ BFA_FLASH_PART_ASICBK = 16, /* asic backup partition */
+};
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr_s {
+ u32 part_type; /* partition type */
+ u32 part_instance; /* partition instance */
+ u32 part_off; /* partition offset */
+ u32 part_size; /* partition size */
+ u32 part_len; /* partition content length */
+ u32 part_status; /* partition status */
+ char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr_s {
+ u32 status; /* flash overall status */
+ u32 npart; /* num of partitions */
+ struct bfa_flash_part_attr_s part[BFA_FLASH_PART_MAX];
+};
+
+/*
+ * DIAG module specific
+ */
+#define LB_PATTERN_DEFAULT 0xB5B5B5B5
+#define QTEST_CNT_DEFAULT 10
+#define QTEST_PAT_DEFAULT LB_PATTERN_DEFAULT
+
+struct bfa_diag_memtest_s {
+ u8 algo;
+ u8 rsvd[7];
+};
+
+struct bfa_diag_memtest_result {
+ u32 status;
+ u32 addr;
+ u32 exp; /* expect value read from reg */
+ u32 act; /* actually value read */
+ u32 err_status; /* error status reg */
+ u32 err_status1; /* extra error info reg */
+ u32 err_addr; /* error address reg */
+ u8 algo;
+ u8 rsv[3];
+};
+
+struct bfa_diag_loopback_result_s {
+ u32 numtxmfrm; /* no. of transmit frame */
+ u32 numosffrm; /* no. of outstanding frame */
+ u32 numrcvfrm; /* no. of received good frame */
+ u32 badfrminf; /* mis-match info */
+ u32 badfrmnum; /* mis-match fram number */
+ u8 status; /* loopback test result */
+ u8 rsvd[3];
+};
+
+struct bfa_diag_ledtest_s {
+ u32 cmd; /* bfa_led_op_t */
+ u32 color; /* bfa_led_color_t */
+ u16 freq; /* no. of blinks every 10 secs */
+ u8 led; /* bitmap of LEDs to be tested */
+ u8 rsvd[5];
+};
+
+struct bfa_diag_loopback_s {
+ u32 loopcnt;
+ u32 pattern;
+ u8 lb_mode; /* bfa_port_opmode_t */
+ u8 speed; /* bfa_port_speed_t */
+ u8 rsvd[2];
+};
+
+/*
+ * PHY module specific
+ */
+enum bfa_phy_status_e {
+ BFA_PHY_STATUS_GOOD = 0, /* phy is good */
+ BFA_PHY_STATUS_NOT_PRESENT = 1, /* phy does not exist */
+ BFA_PHY_STATUS_BAD = 2, /* phy is bad */
+};
+
+/*
+ * phy attributes for phy query
+ */
+struct bfa_phy_attr_s {
+ u32 status; /* phy present/absent status */
+ u32 length; /* firmware length */
+ u32 fw_ver; /* firmware version */
+ u32 an_status; /* AN status */
+ u32 pma_pmd_status; /* PMA/PMD link status */
+ u32 pma_pmd_signal; /* PMA/PMD signal detect */
+ u32 pcs_status; /* PCS link status */
+};
+
+/*
+ * phy stats
+ */
+struct bfa_phy_stats_s {
+ u32 status; /* phy stats status */
+ u32 link_breaks; /* Num of link breaks after linkup */
+ u32 pma_pmd_fault; /* NPMA/PMD fault */
+ u32 pcs_fault; /* PCS fault */
+ u32 speed_neg; /* Num of speed negotiation */
+ u32 tx_eq_training; /* Num of TX EQ training */
+ u32 tx_eq_timeout; /* Num of TX EQ timeout */
+ u32 crc_error; /* Num of CRC errors */
+};
+
+#pragma pack()
+
#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
index 191d34a58b9..3bbc583f65c 100644
--- a/drivers/scsi/bfa/bfa_defs_fcs.h
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -90,12 +90,14 @@ enum bfa_lport_role {
* FCS port configuration.
*/
struct bfa_lport_cfg_s {
- wwn_t pwwn; /* port wwn */
- wwn_t nwwn; /* node wwn */
- struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
- bfa_boolean_t preboot_vp; /* vport created from PBC */
- enum bfa_lport_role roles; /* FCS port roles */
- u8 tag[16]; /* opaque tag from application */
+ wwn_t pwwn; /* port wwn */
+ wwn_t nwwn; /* node wwn */
+ struct bfa_lport_symname_s sym_name; /* vm port symbolic name */
+ enum bfa_lport_role roles; /* FCS port roles */
+ u32 rsvd;
+ bfa_boolean_t preboot_vp; /* vport created from PBC */
+ u8 tag[16]; /* opaque tag from application */
+ u8 padding[4];
};
/*
@@ -249,12 +251,13 @@ enum bfa_vport_state {
BFA_FCS_VPORT_FDISC_SEND = 2,
BFA_FCS_VPORT_FDISC = 3,
BFA_FCS_VPORT_FDISC_RETRY = 4,
- BFA_FCS_VPORT_ONLINE = 5,
- BFA_FCS_VPORT_DELETING = 6,
- BFA_FCS_VPORT_CLEANUP = 6,
- BFA_FCS_VPORT_LOGO_SEND = 7,
- BFA_FCS_VPORT_LOGO = 8,
- BFA_FCS_VPORT_ERROR = 9,
+ BFA_FCS_VPORT_FDISC_RSP_WAIT = 5,
+ BFA_FCS_VPORT_ONLINE = 6,
+ BFA_FCS_VPORT_DELETING = 7,
+ BFA_FCS_VPORT_CLEANUP = 8,
+ BFA_FCS_VPORT_LOGO_SEND = 9,
+ BFA_FCS_VPORT_LOGO = 10,
+ BFA_FCS_VPORT_ERROR = 11,
BFA_FCS_VPORT_MAX_STATE,
};
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 207f598877c..863c6ba7d5e 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -47,13 +47,12 @@ struct bfa_iocfc_fwcfg_s {
u16 num_rports; /* number of remote ports */
u16 num_ioim_reqs; /* number of IO reqs */
u16 num_tskim_reqs; /* task management requests */
- u16 num_iotm_reqs; /* number of TM IO reqs */
- u16 num_tsktm_reqs; /* TM task management requests*/
+ u16 num_fwtio_reqs; /* number of TM IO reqs in FW */
u16 num_fcxp_reqs; /* unassisted FC exchanges */
u16 num_uf_bufs; /* unsolicited recv buffers */
u8 num_cqs;
u8 fw_tick_res; /* FW clock resolution in ms */
- u8 rsvd[4];
+ u8 rsvd[2];
};
#pragma pack()
@@ -66,8 +65,12 @@ struct bfa_iocfc_drvcfg_s {
u16 ioc_recover; /* IOC recovery mode */
u16 min_cfg; /* minimum configuration */
u16 path_tov; /* device path timeout */
+ u16 num_tio_reqs; /*!< number of TM IO reqs */
+ u8 port_mode;
+ u8 rsvd_a;
bfa_boolean_t delay_comp; /* delay completion of
failed inflight IOs */
+ u16 num_ttsk_reqs; /* TM task management requests */
u32 rsvd;
};
@@ -82,7 +85,7 @@ struct bfa_iocfc_cfg_s {
/*
* IOC firmware IO stats
*/
-struct bfa_fw_io_stats_s {
+struct bfa_fw_ioim_stats_s {
u32 host_abort; /* IO aborted by host driver*/
u32 host_cleanup; /* IO clean up by host driver */
@@ -152,6 +155,54 @@ struct bfa_fw_io_stats_s {
*/
};
+struct bfa_fw_tio_stats_s {
+ u32 tio_conf_proc; /* TIO CONF processed */
+ u32 tio_conf_drop; /* TIO CONF dropped */
+ u32 tio_cleanup_req; /* TIO cleanup requested */
+ u32 tio_cleanup_comp; /* TIO cleanup completed */
+ u32 tio_abort_rsp; /* TIO abort response */
+ u32 tio_abort_rsp_comp; /* TIO abort rsp completed */
+ u32 tio_abts_req; /* TIO ABTS requested */
+ u32 tio_abts_ack; /* TIO ABTS ack-ed */
+ u32 tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */
+ u32 tio_abts_tmo; /* TIO ABTS timeout */
+ u32 tio_snsdata_dma; /* TIO sense data DMA */
+ u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */
+ u32 tio_rxwchan_avail; /* TIO RX wait channel available */
+ u32 tio_hit_bls; /* TIO IOH BLS event */
+ u32 tio_uf_recv; /* TIO received UF */
+ u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
+ u32 tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */
+
+ u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */
+ u32 ds_rxwchan_avail; /* DS RX wait channel available */
+ u32 ds_unaligned_rd; /* DS unaligned read */
+ u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */
+ u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */
+ u32 ds_flush_req; /* DS flush requested */
+ u32 ds_flush_comp; /* DS flush completed */
+ u32 ds_xfrdy_exp; /* DS XFER_RDY expired */
+ u32 ds_seq_cnt_err; /* DS seq cnt error */
+ u32 ds_seq_len_err; /* DS seq len error */
+ u32 ds_data_oor; /* DS data out of order */
+ u32 ds_hit_bls; /* DS hit BLS */
+ u32 ds_edtov_timer_exp; /* DS edtov expired */
+ u32 ds_cpu_owned; /* DS cpu owned */
+ u32 ds_hit_class2; /* DS hit class2 */
+ u32 ds_length_err; /* DS length error */
+ u32 ds_ro_ooo_err; /* DS relative offset out-of-order error */
+ u32 ds_rectov_timer_exp; /* DS rectov expired */
+ u32 ds_unexp_fr_err; /* DS unexp frame error */
+};
+
+/*
+ * IOC firmware IO stats
+ */
+struct bfa_fw_io_stats_s {
+ struct bfa_fw_ioim_stats_s ioim_stats;
+ struct bfa_fw_tio_stats_s tio_stats;
+};
+
/*
* IOC port firmware stats
*/
@@ -205,6 +256,7 @@ struct bfa_fw_port_lksm_stats_s {
u32 nos_tx; /* No. of times NOS tx started */
u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
+ u32 bbsc_lr; /* LKSM LR tx for credit recovery */
};
struct bfa_fw_port_snsm_stats_s {
@@ -216,6 +268,7 @@ struct bfa_fw_port_snsm_stats_s {
u32 error_resets; /* error resets initiated by upsm */
u32 sync_lost; /* Sync loss count */
u32 sig_lost; /* Signal loss count */
+ u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
};
struct bfa_fw_port_physm_stats_s {
@@ -266,8 +319,8 @@ struct bfa_fw_fcoe_stats_s {
* IOC firmware FCoE port stats
*/
struct bfa_fw_fcoe_port_stats_s {
- struct bfa_fw_fcoe_stats_s fcoe_stats;
- struct bfa_fw_fip_stats_s fip_stats;
+ struct bfa_fw_fcoe_stats_s fcoe_stats;
+ struct bfa_fw_fip_stats_s fip_stats;
};
/*
@@ -416,6 +469,7 @@ struct bfa_fw_stats_s {
* QoS states
*/
enum bfa_qos_state {
+ BFA_QOS_DISABLED = 0, /* QoS is disabled */
BFA_QOS_ONLINE = 1, /* QoS is online */
BFA_QOS_OFFLINE = 2, /* QoS is offline */
};
@@ -618,6 +672,12 @@ struct bfa_itnim_iostats_s {
u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
u32 tm_cleanups; /* TM cleanup requests */
u32 tm_cleanup_comps; /* TM cleanup completions */
+ u32 lm_lun_across_sg; /* LM lun is across sg data buf */
+ u32 lm_lun_not_sup; /* LM lun not supported */
+ u32 lm_rpl_data_changed; /* LM report-lun data changed */
+ u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
+ u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
+ u32 lm_lun_not_rdy; /* LM lun not ready */
};
/* Modify char* port_stt[] in bfal_port.c if a new state was added */
@@ -636,6 +696,7 @@ enum bfa_port_states {
BFA_PORT_ST_FWMISMATCH = 12,
BFA_PORT_ST_PREBOOT_DISABLED = 13,
BFA_PORT_ST_TOGGLING_QWAIT = 14,
+ BFA_PORT_ST_ACQ_ADDR = 15,
BFA_PORT_ST_MAX_STATE,
};
@@ -732,8 +793,51 @@ enum bfa_port_linkstate_rsn {
CEE_ISCSI_PRI_PFC_OFF = 42,
CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43
};
+
+#define MAX_LUN_MASK_CFG 16
+
+/*
+ * Initially flash content may be fff. On making LUN mask enable and disable
+ * state chnage. when report lun command is being processed it goes from
+ * BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
+ * BFA_LUN_MASK_ACTIVE.
+ */
+enum bfa_ioim_lun_mask_state_s {
+ BFA_IOIM_LUN_MASK_INACTIVE = 0,
+ BFA_IOIM_LUN_MASK_ACTIVE = 1,
+ BFA_IOIM_LUN_MASK_FETCHED = 2,
+};
+
+enum bfa_lunmask_state_s {
+ BFA_LUNMASK_DISABLED = 0x00,
+ BFA_LUNMASK_ENABLED = 0x01,
+ BFA_LUNMASK_MINCFG = 0x02,
+ BFA_LUNMASK_UNINITIALIZED = 0xff,
+};
+
#pragma pack(1)
/*
+ * LUN mask configuration
+ */
+struct bfa_lun_mask_s {
+ wwn_t lp_wwn;
+ wwn_t rp_wwn;
+ struct scsi_lun lun;
+ u8 ua;
+ u8 rsvd[3];
+ u16 rp_tag;
+ u8 lp_tag;
+ u8 state;
+};
+
+#define MAX_LUN_MASK_CFG 16
+struct bfa_lunmask_cfg_s {
+ u32 status;
+ u32 rsvd;
+ struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG];
+};
+
+/*
* Physical port configuration
*/
struct bfa_port_cfg_s {
@@ -748,6 +852,10 @@ struct bfa_port_cfg_s {
u8 tx_bbcredit; /* transmit buffer credits */
u8 ratelimit; /* ratelimit enabled or not */
u8 trl_def_speed; /* ratelimit default speed */
+ u8 bb_scn; /* BB_SCN value from FLOGI Exchg */
+ u8 bb_scn_state; /* Config state of BB_SCN */
+ u8 faa_state; /* FAA enabled/disabled */
+ u8 rsvd[1];
u16 path_tov; /* device path timeout */
u16 q_depth; /* SCSI Queue depth */
};
@@ -783,7 +891,7 @@ struct bfa_port_attr_s {
enum bfa_port_topology topology; /* current topology */
bfa_boolean_t beacon; /* current beacon status */
bfa_boolean_t link_e2e_beacon; /* link beacon is on */
- bfa_boolean_t plog_enabled; /* portlog is enabled */
+ bfa_boolean_t bbsc_op_status; /* fc credit recovery oper state */
/*
* Dynamic field - info from FCS
@@ -792,12 +900,10 @@ struct bfa_port_attr_s {
enum bfa_port_type port_type; /* current topology */
u32 loopback; /* external loopback */
u32 authfail; /* auth fail state */
- bfa_boolean_t io_profile; /* get it from fcpim mod */
- u8 pad[4]; /* for 64-bit alignement */
/* FCoE specific */
u16 fcoe_vlan;
- u8 rsvd1[6];
+ u8 rsvd1[2];
};
/*
@@ -988,6 +1094,19 @@ struct bfa_itnim_ioprofile_s {
};
/*
+ * vHBA port attribute values.
+ */
+struct bfa_vhba_attr_s {
+ wwn_t nwwn; /* node wwn */
+ wwn_t pwwn; /* port wwn */
+ u32 pid; /* port ID */
+ bfa_boolean_t io_profile; /* get it from fcpim mod */
+ bfa_boolean_t plog_enabled; /* portlog is enabled */
+ u16 path_tov;
+ u8 rsvd[2];
+};
+
+/*
* FC physical port statistics.
*/
struct bfa_port_fc_stats_s {
@@ -1020,6 +1139,9 @@ struct bfa_port_fc_stats_s {
u64 bad_os_count; /* Invalid ordered sets */
u64 err_enc_out; /* Encoding err nonframe_8b10b */
u64 err_enc; /* Encoding err frame_8b10b */
+ u64 bbsc_frames_lost; /* Credit Recovery-Frames Lost */
+ u64 bbsc_credits_lost; /* Credit Recovery-Credits Lost */
+ u64 bbsc_link_resets; /* Credit Recovery-Link Resets */
};
/*
@@ -1078,4 +1200,131 @@ union bfa_port_stats_u {
struct bfa_port_eth_stats_s eth;
};
+struct bfa_port_cfg_mode_s {
+ u16 max_pf;
+ u16 max_vf;
+ enum bfa_mode_s mode;
+};
+
+#pragma pack(1)
+
+#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
+#define BFA_CEE_DCBX_MAX_PRIORITY (8)
+#define BFA_CEE_DCBX_MAX_PGID (8)
+
+struct bfa_cee_lldp_str_s {
+ u8 sub_type;
+ u8 len;
+ u8 rsvd[2];
+ u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
+};
+
+struct bfa_cee_lldp_cfg_s {
+ struct bfa_cee_lldp_str_s chassis_id;
+ struct bfa_cee_lldp_str_s port_id;
+ struct bfa_cee_lldp_str_s port_desc;
+ struct bfa_cee_lldp_str_s sys_name;
+ struct bfa_cee_lldp_str_s sys_desc;
+ struct bfa_cee_lldp_str_s mgmt_addr;
+ u16 time_to_live;
+ u16 enabled_system_cap;
+};
+
+/* CEE/DCBX parameters */
+struct bfa_cee_dcbx_cfg_s {
+ u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
+ u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
+ u8 pfc_primap; /* bitmap of priorties with PFC enabled */
+ u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
+ u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
+ u8 dcbx_version; /* operating version:CEE or preCEE */
+ u8 lls_fcoe; /* FCoE Logical Link Status */
+ u8 lls_lan; /* LAN Logical Link Status */
+ u8 rsvd[2];
+};
+
+/* CEE Query */
+struct bfa_cee_attr_s {
+ u8 cee_status;
+ u8 error_reason;
+ struct bfa_cee_lldp_cfg_s lldp_remote;
+ struct bfa_cee_dcbx_cfg_s dcbx_remote;
+ mac_t src_mac;
+ u8 link_speed;
+ u8 nw_priority;
+ u8 filler[2];
+};
+
+/* LLDP/DCBX/CEE Statistics */
+struct bfa_cee_stats_s {
+ u32 lldp_tx_frames; /* LLDP Tx Frames */
+ u32 lldp_rx_frames; /* LLDP Rx Frames */
+ u32 lldp_rx_frames_invalid; /* LLDP Rx Frames invalid */
+ u32 lldp_rx_frames_new; /* LLDP Rx Frames new */
+ u32 lldp_tlvs_unrecognized; /* LLDP Rx unrecog. TLVs */
+ u32 lldp_rx_shutdown_tlvs; /* LLDP Rx shutdown TLVs */
+ u32 lldp_info_aged_out; /* LLDP remote info aged */
+ u32 dcbx_phylink_ups; /* DCBX phy link ups */
+ u32 dcbx_phylink_downs; /* DCBX phy link downs */
+ u32 dcbx_rx_tlvs; /* DCBX Rx TLVs */
+ u32 dcbx_rx_tlvs_invalid; /* DCBX Rx TLVs invalid */
+ u32 dcbx_control_tlv_error; /* DCBX control TLV errors */
+ u32 dcbx_feature_tlv_error; /* DCBX feature TLV errors */
+ u32 dcbx_cee_cfg_new; /* DCBX new CEE cfg rcvd */
+ u32 cee_status_down; /* DCB status down */
+ u32 cee_status_up; /* DCB status up */
+ u32 cee_hw_cfg_changed; /* DCB hw cfg changed */
+ u32 cee_rx_invalid_cfg; /* DCB invalid cfg */
+};
+
+#pragma pack()
+
+/*
+ * AEN related definitions
+ */
+#define BFAD_NL_VENDOR_ID (((u64)0x01 << SCSI_NL_VID_TYPE_SHIFT) \
+ | BFA_PCI_VENDOR_ID_BROCADE)
+
+/* BFA remote port events */
+enum bfa_rport_aen_event {
+ BFA_RPORT_AEN_ONLINE = 1, /* RPort online event */
+ BFA_RPORT_AEN_OFFLINE = 2, /* RPort offline event */
+ BFA_RPORT_AEN_DISCONNECT = 3, /* RPort disconnect event */
+ BFA_RPORT_AEN_QOS_PRIO = 4, /* QOS priority change event */
+ BFA_RPORT_AEN_QOS_FLOWID = 5, /* QOS flow Id change event */
+};
+
+struct bfa_rport_aen_data_s {
+ u16 vf_id; /* vf_id of this logical port */
+ u16 rsvd[3];
+ wwn_t ppwwn; /* WWN of its physical port */
+ wwn_t lpwwn; /* WWN of this logical port */
+ wwn_t rpwwn; /* WWN of this remote port */
+ union {
+ struct bfa_rport_qos_attr_s qos;
+ } priv;
+};
+
+union bfa_aen_data_u {
+ struct bfa_adapter_aen_data_s adapter;
+ struct bfa_port_aen_data_s port;
+ struct bfa_lport_aen_data_s lport;
+ struct bfa_rport_aen_data_s rport;
+ struct bfa_itnim_aen_data_s itnim;
+ struct bfa_audit_aen_data_s audit;
+ struct bfa_ioc_aen_data_s ioc;
+};
+
+#define BFA_AEN_MAX_ENTRY 512
+
+struct bfa_aen_entry_s {
+ struct list_head qe;
+ enum bfa_aen_category aen_category;
+ u32 aen_type;
+ union bfa_aen_data_u aen_data;
+ struct timeval aen_tv;
+ u32 seq_num;
+ u32 bfad_num;
+};
+
#endif /* __BFA_DEFS_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index bf0067e0fd0..50b6a1c8619 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,6 +56,161 @@ struct scsi_cdb_s {
#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
+#define SCSI_SENSE_CUR_ERR 0x70
+#define SCSI_SENSE_DEF_ERR 0x71
+
+/*
+ * SCSI additional sense codes
+ */
+#define SCSI_ASC_LUN_NOT_READY 0x04
+#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
+#define SCSI_ASC_TOCC 0x3F
+
+/*
+ * SCSI additional sense code qualifiers
+ */
+#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
+#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
+
+/*
+ * Methods of reporting informational exceptions
+ */
+#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
+
+struct scsi_report_luns_data_s {
+ u32 lun_list_length; /* length of LUN list length */
+ u32 reserved;
+ struct scsi_lun lun[1]; /* first LUN in lun list */
+};
+
+struct scsi_inquiry_vendor_s {
+ u8 vendor_id[8];
+};
+
+struct scsi_inquiry_prodid_s {
+ u8 product_id[16];
+};
+
+struct scsi_inquiry_prodrev_s {
+ u8 product_rev[4];
+};
+
+struct scsi_inquiry_data_s {
+#ifdef __BIG_ENDIAN
+ u8 peripheral_qual:3; /* peripheral qualifier */
+ u8 device_type:5; /* peripheral device type */
+ u8 rmb:1; /* removable medium bit */
+ u8 device_type_mod:7; /* device type modifier */
+ u8 version;
+ u8 aenc:1; /* async evt notification capability */
+ u8 trm_iop:1; /* terminate I/O process */
+ u8 norm_aca:1; /* normal ACA supported */
+ u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
+ u8 rsp_data_format:4;
+ u8 additional_len;
+ u8 sccs:1;
+ u8 reserved1:7;
+ u8 reserved2:1;
+ u8 enc_serv:1; /* enclosure service component */
+ u8 reserved3:1;
+ u8 multi_port:1; /* multi-port device */
+ u8 m_chngr:1; /* device in medium transport element */
+ u8 ack_req_q:1; /* SIP specific bit */
+ u8 addr32:1; /* SIP specific bit */
+ u8 addr16:1; /* SIP specific bit */
+ u8 rel_adr:1; /* relative address */
+ u8 w_bus32:1;
+ u8 w_bus16:1;
+ u8 synchronous:1;
+ u8 linked_commands:1;
+ u8 trans_dis:1;
+ u8 cmd_queue:1; /* command queueing supported */
+ u8 soft_reset:1; /* soft reset alternative (VS) */
+#else
+ u8 device_type:5; /* peripheral device type */
+ u8 peripheral_qual:3; /* peripheral qualifier */
+ u8 device_type_mod:7; /* device type modifier */
+ u8 rmb:1; /* removable medium bit */
+ u8 version;
+ u8 rsp_data_format:4;
+ u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
+ u8 norm_aca:1; /* normal ACA supported */
+ u8 terminate_iop:1;/* terminate I/O process */
+ u8 aenc:1; /* async evt notification capability */
+ u8 additional_len;
+ u8 reserved1:7;
+ u8 sccs:1;
+ u8 addr16:1; /* SIP specific bit */
+ u8 addr32:1; /* SIP specific bit */
+ u8 ack_req_q:1; /* SIP specific bit */
+ u8 m_chngr:1; /* device in medium transport element */
+ u8 multi_port:1; /* multi-port device */
+ u8 reserved3:1; /* TBD - Vendor Specific */
+ u8 enc_serv:1; /* enclosure service component */
+ u8 reserved2:1;
+ u8 soft_seset:1; /* soft reset alternative (VS) */
+ u8 cmd_queue:1; /* command queueing supported */
+ u8 trans_dis:1;
+ u8 linked_commands:1;
+ u8 synchronous:1;
+ u8 w_bus16:1;
+ u8 w_bus32:1;
+ u8 rel_adr:1; /* relative address */
+#endif
+ struct scsi_inquiry_vendor_s vendor_id;
+ struct scsi_inquiry_prodid_s product_id;
+ struct scsi_inquiry_prodrev_s product_rev;
+ u8 vendor_specific[20];
+ u8 reserved4[40];
+};
+
+/*
+ * SCSI sense data format
+ */
+struct scsi_sense_s {
+#ifdef __BIG_ENDIAN
+ u8 valid:1;
+ u8 rsp_code:7;
+#else
+ u8 rsp_code:7;
+ u8 valid:1;
+#endif
+ u8 seg_num;
+#ifdef __BIG_ENDIAN
+ u8 file_mark:1;
+ u8 eom:1; /* end of media */
+ u8 ili:1; /* incorrect length indicator */
+ u8 reserved:1;
+ u8 sense_key:4;
+#else
+ u8 sense_key:4;
+ u8 reserved:1;
+ u8 ili:1; /* incorrect length indicator */
+ u8 eom:1; /* end of media */
+ u8 file_mark:1;
+#endif
+ u8 information[4]; /* device-type or cmd specific info */
+ u8 add_sense_length; /* additional sense length */
+ u8 command_info[4];/* command specific information */
+ u8 asc; /* additional sense code */
+ u8 ascq; /* additional sense code qualifier */
+ u8 fru_code; /* field replaceable unit code */
+#ifdef __BIG_ENDIAN
+ u8 sksv:1; /* sense key specific valid */
+ u8 c_d:1; /* command/data bit */
+ u8 res1:2;
+ u8 bpv:1; /* bit pointer valid */
+ u8 bpointer:3; /* bit pointer */
+#else
+ u8 bpointer:3; /* bit pointer */
+ u8 bpv:1; /* bit pointer valid */
+ u8 res1:2;
+ u8 c_d:1; /* command/data bit */
+ u8 sksv:1; /* sense key specific valid */
+#endif
+ u8 fpointer[2]; /* field pointer */
+};
+
/*
* Fibre Channel Header Structure (FCHS) definition
*/
@@ -1021,7 +1176,7 @@ struct fc_symname_s {
#define FC_ED_TOV 2
#define FC_REC_TOV (FC_ED_TOV + 1)
#define FC_RA_TOV 10
-#define FC_ELS_TOV (2 * FC_RA_TOV)
+#define FC_ELS_TOV ((2 * FC_RA_TOV) + 1)
#define FC_FCCT_TOV (3 * FC_RA_TOV)
/*
@@ -1049,15 +1204,6 @@ struct fc_vft_s {
};
/*
- * FCP
- */
-enum {
- FCP_RJT = 0x01000000, /* SRR reject */
- FCP_SRR_ACCEPT = 0x02000000, /* SRR accept */
- FCP_SRR = 0x14000000, /* Sequence Retransmission Request */
-};
-
-/*
* FCP_CMND definitions
*/
#define FCP_CMND_CDB_LEN 16
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index b7e25345165..17b59b8b564 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -94,7 +94,6 @@ fcbuild_init(void)
*/
plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
- plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004);
plogi_tmpl.csp.ciro = 0x1;
plogi_tmpl.csp.cisc = 0x0;
plogi_tmpl.csp.altbbcred = 0x0;
@@ -156,6 +155,22 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
*/
}
+static void
+fc_gsresp_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
+{
+ memset(fchs, 0, sizeof(struct fchs_s));
+
+ fchs->routing = FC_RTG_FC4_DEV_DATA;
+ fchs->cat_info = FC_CAT_SOLICIT_CTRL;
+ fchs->type = FC_TYPE_SERVICES;
+ fchs->f_ctl =
+ bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH |
+ FCTL_END_SEQ | FCTL_SI_XFER);
+ fchs->d_id = d_id;
+ fchs->s_id = s_id;
+ fchs->ox_id = ox_id;
+}
+
void
fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
{
@@ -207,7 +222,7 @@ fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
static u16
fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
__be16 ox_id, wwn_t port_name, wwn_t node_name,
- u16 pdu_size, u8 els_code)
+ u16 pdu_size, u16 bb_cr, u8 els_code)
{
struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
@@ -220,6 +235,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size);
+ plogi->csp.bbcred = cpu_to_be16(bb_cr);
memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
@@ -268,15 +284,17 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
u16
fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
__be16 ox_id, wwn_t port_name, wwn_t node_name,
- u16 pdu_size, u16 local_bb_credits)
+ u16 pdu_size, u16 local_bb_credits, u8 bb_scn)
{
u32 d_id = 0;
+ u16 bbscn_rxsz = (bb_scn << 12) | pdu_size;
memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
fc_els_rsp_build(fchs, d_id, s_id, ox_id);
flogi->els_cmd.els_code = FC_ELS_ACC;
- flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
+ flogi->class3.rxsz = cpu_to_be16(pdu_size);
+ flogi->csp.rxsz = cpu_to_be16(bbscn_rxsz); /* bb_scn/rxsz */
flogi->port_name = port_name;
flogi->node_name = node_name;
@@ -306,19 +324,19 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
u16
fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name,
- u16 pdu_size)
+ u16 pdu_size, u16 bb_cr)
{
return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
- node_name, pdu_size, FC_ELS_PLOGI);
+ node_name, pdu_size, bb_cr, FC_ELS_PLOGI);
}
u16
fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name,
- u16 pdu_size)
+ u16 pdu_size, u16 bb_cr)
{
return fc_plogi_x_build(fchs, pld, d_id, s_id, ox_id, port_name,
- node_name, pdu_size, FC_ELS_ACC);
+ node_name, pdu_size, bb_cr, FC_ELS_ACC);
}
enum fc_parse_status
@@ -1096,6 +1114,21 @@ fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
}
u16
+fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr,
+ u32 d_id, u32 s_id, u16 ox_id, u8 reason_code,
+ u8 reason_code_expl)
+{
+ fc_gsresp_fchdr_build(fchs, d_id, s_id, ox_id);
+
+ cthdr->cmd_rsp_code = cpu_to_be16(CT_RSP_REJECT);
+ cthdr->rev_id = CT_GS3_REVISION;
+
+ cthdr->reason_code = reason_code;
+ cthdr->exp_code = reason_code_expl;
+ return sizeof(struct ct_hdr_s);
+}
+
+u16
fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
u8 set_br_reg, u32 s_id, u16 ox_id)
{
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
index ece51ec7620..42cd9d4da69 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.h
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -66,6 +66,9 @@ fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed)
case RPSC_OP_SPEED_8G:
return BFA_PORT_SPEED_8GBPS;
+ case RPSC_OP_SPEED_16G:
+ return BFA_PORT_SPEED_16GBPS;
+
case RPSC_OP_SPEED_10G:
return BFA_PORT_SPEED_10GBPS;
@@ -94,6 +97,9 @@ fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed)
case BFA_PORT_SPEED_8GBPS:
return RPSC_OP_SPEED_8G;
+ case BFA_PORT_SPEED_16GBPS:
+ return RPSC_OP_SPEED_16G;
+
case BFA_PORT_SPEED_10GBPS:
return RPSC_OP_SPEED_10G;
@@ -141,11 +147,11 @@ u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
u32 s_id, __be16 ox_id,
wwn_t port_name, wwn_t node_name,
u16 pdu_size,
- u16 local_bb_credits);
+ u16 local_bb_credits, u8 bb_scn);
u16 fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
u32 s_id, u16 ox_id, wwn_t port_name,
- wwn_t node_name, u16 pdu_size);
+ wwn_t node_name, u16 pdu_size, u16 bb_cr);
enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
@@ -177,13 +183,17 @@ u16 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
u16 fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, u32 port_id);
+u16 fc_gs_rjt_build(struct fchs_s *fchs, struct ct_hdr_s *cthdr,
+ u32 d_id, u32 s_id, u16 ox_id,
+ u8 reason_code, u8 reason_code_expl);
+
u16 fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
u8 set_br_reg, u32 s_id, u16 ox_id);
u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
u32 s_id, u16 ox_id,
wwn_t port_name, wwn_t node_name,
- u16 pdu_size);
+ u16 pdu_size, u16 bb_cr);
u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name,
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index c0353cdca92..e07bd4745d8 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -19,12 +19,14 @@
#include "bfa_modules.h"
BFA_TRC_FILE(HAL, FCPIM);
-BFA_MODULE(fcpim);
/*
* BFA ITNIM Related definitions
*/
static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
+static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
+static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
+static void bfa_ioim_lm_init(struct bfa_s *bfa);
#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
(((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
@@ -58,6 +60,14 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
} \
} while (0)
+#define bfa_ioim_rp_wwn(__ioim) \
+ (((struct bfa_fcs_rport_s *) \
+ (__ioim)->itnim->rport->rport_drv)->pwwn)
+
+#define bfa_ioim_lp_wwn(__ioim) \
+ ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
+ (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
+
#define bfa_itnim_sler_cb(__itnim) do { \
if ((__itnim)->bfa->fcs) \
bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -67,6 +77,18 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
} \
} while (0)
+enum bfa_ioim_lm_status {
+ BFA_IOIM_LM_PRESENT = 1,
+ BFA_IOIM_LM_LUN_NOT_SUP = 2,
+ BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
+ BFA_IOIM_LM_LUN_NOT_RDY = 4,
+};
+
+enum bfa_ioim_lm_ua_status {
+ BFA_IOIM_LM_UA_RESET = 0,
+ BFA_IOIM_LM_UA_SET = 1,
+};
+
/*
* itnim state machine event
*/
@@ -123,6 +145,9 @@ enum bfa_ioim_event {
BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
+ BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
+ BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
+ BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
};
@@ -220,6 +245,9 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
+static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
/*
* forward declaration of BFA IO state machine
@@ -287,24 +315,16 @@ static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
* Compute and return memory needed by FCP(im) module.
*/
static void
-bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
- u32 *dm_len)
+bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
{
- bfa_itnim_meminfo(cfg, km_len, dm_len);
+ bfa_itnim_meminfo(cfg, km_len);
/*
* IO memory
*/
- if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
- cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
- else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
- cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
-
*km_len += cfg->fwcfg.num_ioim_reqs *
(sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
- *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
-
/*
* task management command memory
*/
@@ -315,52 +335,41 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
static void
-bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
+ struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_fcpim_s *fcpim = &fcp->fcpim;
+ struct bfa_s *bfa = fcp->bfa;
bfa_trc(bfa, cfg->drvcfg.path_tov);
bfa_trc(bfa, cfg->fwcfg.num_rports);
bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
+ fcpim->fcp = fcp;
fcpim->bfa = bfa;
fcpim->num_itnims = cfg->fwcfg.num_rports;
- fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
fcpim->path_tov = cfg->drvcfg.path_tov;
fcpim->delay_comp = cfg->drvcfg.delay_comp;
fcpim->profile_comp = NULL;
fcpim->profile_start = NULL;
- bfa_itnim_attach(fcpim, meminfo);
- bfa_tskim_attach(fcpim, meminfo);
- bfa_ioim_attach(fcpim, meminfo);
+ bfa_itnim_attach(fcpim);
+ bfa_tskim_attach(fcpim);
+ bfa_ioim_attach(fcpim);
}
static void
-bfa_fcpim_detach(struct bfa_s *bfa)
+bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
{
-}
-
-static void
-bfa_fcpim_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcpim_stop(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcpim_iocdisable(struct bfa_s *bfa)
-{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_fcpim_s *fcpim = &fcp->fcpim;
struct bfa_itnim_s *itnim;
struct list_head *qe, *qen;
+ /* Enqueue unused ioim resources to free_q */
+ list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
+
list_for_each_safe(qe, qen, &fcpim->itnim_q) {
itnim = (struct bfa_itnim_s *) qe;
bfa_itnim_iocdisable(itnim);
@@ -370,7 +379,7 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa)
void
bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
fcpim->path_tov = path_tov * 1000;
if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
@@ -380,15 +389,146 @@ bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
u16
bfa_fcpim_path_tov_get(struct bfa_s *bfa)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
return fcpim->path_tov / 1000;
}
+#define bfa_fcpim_add_iostats(__l, __r, __stats) \
+ (__l->__stats += __r->__stats)
+
+void
+bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
+ struct bfa_itnim_iostats_s *rstats)
+{
+ bfa_fcpim_add_iostats(lstats, rstats, total_ios);
+ bfa_fcpim_add_iostats(lstats, rstats, qresumes);
+ bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
+ bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
+ bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
+ bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
+ bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
+ bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
+ bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
+ bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
+ bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
+ bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
+ bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
+ bfa_fcpim_add_iostats(lstats, rstats, onlines);
+ bfa_fcpim_add_iostats(lstats, rstats, offlines);
+ bfa_fcpim_add_iostats(lstats, rstats, creates);
+ bfa_fcpim_add_iostats(lstats, rstats, deletes);
+ bfa_fcpim_add_iostats(lstats, rstats, create_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, sler_events);
+ bfa_fcpim_add_iostats(lstats, rstats, fw_create);
+ bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
+ bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
+ bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_success);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
+ bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, io_comps);
+ bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
+ bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
+ bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
+ bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
+ bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
+}
+
+bfa_status_t
+bfa_fcpim_port_iostats(struct bfa_s *bfa,
+ struct bfa_itnim_iostats_s *stats, u8 lp_tag)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+
+ /* accumulate IO stats from itnim */
+ memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ if (itnim->rport->rport_info.lp_tag != lp_tag)
+ continue;
+ bfa_fcpim_add_stats(stats, &(itnim->stats));
+ }
+ return BFA_STATUS_OK;
+}
+
+void
+bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
+{
+ struct bfa_itnim_latency_s *io_lat =
+ &(ioim->itnim->ioprofile.io_latency);
+ u32 val, idx;
+
+ val = (u32)(jiffies - ioim->start_time);
+ idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
+ bfa_itnim_ioprofile_update(ioim->itnim, idx);
+
+ io_lat->count[idx]++;
+ io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
+ io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
+ io_lat->avg[idx] += val;
+}
+
+void
+bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
+{
+ ioim->start_time = jiffies;
+}
+
+bfa_status_t
+bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
+{
+ struct bfa_itnim_s *itnim;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct list_head *qe, *qen;
+
+ /* accumulate IO stats from itnim */
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_itnim_clear_stats(itnim);
+ }
+ fcpim->io_profile = BFA_TRUE;
+ fcpim->io_profile_start_time = time;
+ fcpim->profile_comp = bfa_ioim_profile_comp;
+ fcpim->profile_start = bfa_ioim_profile_start;
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_profile_off(struct bfa_s *bfa)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ fcpim->io_profile = BFA_FALSE;
+ fcpim->io_profile_start_time = 0;
+ fcpim->profile_comp = NULL;
+ fcpim->profile_start = NULL;
+ return BFA_STATUS_OK;
+}
+
u16
bfa_fcpim_qdepth_get(struct bfa_s *bfa)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
return fcpim->q_depth;
}
@@ -990,8 +1130,7 @@ bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
}
void
-bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
- u32 *dm_len)
+bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
{
/*
* ITN memory
@@ -1000,15 +1139,16 @@ bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
}
void
-bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
{
struct bfa_s *bfa = fcpim->bfa;
+ struct bfa_fcp_mod_s *fcp = fcpim->fcp;
struct bfa_itnim_s *itnim;
int i, j;
INIT_LIST_HEAD(&fcpim->itnim_q);
- itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
+ itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
fcpim->itnim_arr = itnim;
for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
@@ -1030,7 +1170,7 @@ bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
}
- bfa_meminfo_kva(minfo) = (u8 *) itnim;
+ bfa_mem_kva_curp(fcp) = (u8 *) itnim;
}
void
@@ -1043,7 +1183,7 @@ bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
static bfa_boolean_t
bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
{
- struct bfi_itnim_create_req_s *m;
+ struct bfi_itn_create_req_s *m;
itnim->msg_no++;
@@ -1056,8 +1196,8 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
return BFA_FALSE;
}
- bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
- bfa_lpuid(itnim->bfa));
+ bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
+ bfa_fn_lpu(itnim->bfa));
m->fw_handle = itnim->rport->fw_handle;
m->class = FC_CLASS_3;
m->seq_rec = itnim->seq_rec;
@@ -1067,14 +1207,14 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(itnim->bfa, itnim->reqq);
+ bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
return BFA_TRUE;
}
static bfa_boolean_t
bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
{
- struct bfi_itnim_delete_req_s *m;
+ struct bfi_itn_delete_req_s *m;
/*
* check for room in queue to send request now
@@ -1085,15 +1225,15 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
return BFA_FALSE;
}
- bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
- bfa_lpuid(itnim->bfa));
+ bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
+ bfa_fn_lpu(itnim->bfa));
m->fw_handle = itnim->rport->fw_handle;
bfa_stats(itnim, fw_delete);
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(itnim->bfa, itnim->reqq);
+ bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
return BFA_TRUE;
}
@@ -1224,7 +1364,7 @@ bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
static void
bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
fcpim->del_itn_stats.del_itn_iocomp_aborted +=
itnim->stats.iocomp_aborted;
fcpim->del_itn_stats.del_itn_iocomp_timedout +=
@@ -1250,8 +1390,8 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
void
bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
- union bfi_itnim_i2h_msg_u msg;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ union bfi_itn_i2h_msg_u msg;
struct bfa_itnim_s *itnim;
bfa_trc(bfa, m->mhdr.msg_id);
@@ -1259,7 +1399,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
msg.msg = m;
switch (m->mhdr.msg_id) {
- case BFI_ITNIM_I2H_CREATE_RSP:
+ case BFI_ITN_I2H_CREATE_RSP:
itnim = BFA_ITNIM_FROM_TAG(fcpim,
msg.create_rsp->bfa_handle);
WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
@@ -1267,7 +1407,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
break;
- case BFI_ITNIM_I2H_DELETE_RSP:
+ case BFI_ITN_I2H_DELETE_RSP:
itnim = BFA_ITNIM_FROM_TAG(fcpim,
msg.delete_rsp->bfa_handle);
WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
@@ -1275,7 +1415,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
break;
- case BFI_ITNIM_I2H_SLER_EVENT:
+ case BFI_ITN_I2H_SLER_EVENT:
itnim = BFA_ITNIM_FROM_TAG(fcpim,
msg.sler_event->bfa_handle);
bfa_stats(itnim, sler_events);
@@ -1295,9 +1435,11 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
struct bfa_itnim_s *
bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
struct bfa_itnim_s *itnim;
+ bfa_itn_create(bfa, rport, bfa_itnim_isr);
+
itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
WARN_ON(itnim->rport != rport);
@@ -1347,6 +1489,26 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
}
+#define bfa_io_lat_clock_res_div HZ
+#define bfa_io_lat_clock_res_mul 1000
+bfa_status_t
+bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+ struct bfa_itnim_ioprofile_s *ioprofile)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
+ if (!fcpim->io_profile)
+ return BFA_STATUS_IOPROFILE_OFF;
+
+ itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+ itnim->ioprofile.io_profile_start_time =
+ bfa_io_profile_start_time(itnim->bfa);
+ itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
+ itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
+ *ioprofile = itnim->ioprofile;
+
+ return BFA_STATUS_OK;
+}
+
void
bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
{
@@ -1415,7 +1577,28 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_abort, ioim);
+ __bfa_cb_ioim_abort, ioim);
+ break;
+
+ case BFA_IOIM_SM_LM_LUN_NOT_SUP:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_lm_lun_not_sup, ioim);
+ break;
+
+ case BFA_IOIM_SM_LM_RPL_DC:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_lm_rpl_dc, ioim);
+ break;
+
+ case BFA_IOIM_SM_LM_LUN_NOT_RDY:
+ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+ bfa_ioim_move_to_comp_q(ioim);
+ bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+ __bfa_cb_ioim_lm_lun_not_rdy, ioim);
break;
default:
@@ -1955,6 +2138,264 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
}
}
+/*
+ * This is called from bfa_fcpim_start after the bfa_init() with flash read
+ * is complete by driver. now invalidate the stale content of lun mask
+ * like unit attention, rp tag and lp tag.
+ */
+static void
+bfa_ioim_lm_init(struct bfa_s *bfa)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ int i;
+
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return;
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
+ lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+ lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+ }
+}
+
+/*
+ * Validate LUN for LUN masking
+ */
+static enum bfa_ioim_lm_status
+bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
+ struct bfa_rport_s *rp, struct scsi_lun lun)
+{
+ u8 i;
+ struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+ struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
+
+ if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
+ (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
+ return BFA_IOIM_LM_PRESENT;
+ }
+
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+
+ if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+ continue;
+
+ if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
+ scsilun_to_int((struct scsi_lun *)&lun))
+ && (rp->rport_tag == lun_list[i].rp_tag)
+ && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
+ lun_list[i].lp_tag)) {
+ bfa_trc(ioim->bfa, lun_list[i].rp_tag);
+ bfa_trc(ioim->bfa, lun_list[i].lp_tag);
+ bfa_trc(ioim->bfa, scsilun_to_int(
+ (struct scsi_lun *)&lun_list[i].lun));
+
+ if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
+ ((cdb->scsi_cdb[0] != INQUIRY) ||
+ (cdb->scsi_cdb[0] != REPORT_LUNS))) {
+ lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
+ return BFA_IOIM_LM_RPL_DATA_CHANGED;
+ }
+
+ if (cdb->scsi_cdb[0] == REPORT_LUNS)
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
+
+ return BFA_IOIM_LM_PRESENT;
+ }
+ }
+
+ if ((cdb->scsi_cdb[0] == INQUIRY) &&
+ (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
+ return BFA_IOIM_LM_PRESENT;
+ }
+
+ if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
+ return BFA_IOIM_LM_LUN_NOT_RDY;
+
+ return BFA_IOIM_LM_LUN_NOT_SUP;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
+{
+ return BFA_TRUE;
+}
+
+static void
+bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
+ int buf_lun_cnt)
+{
+ struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+ struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
+ struct scsi_lun lun;
+ int i, j;
+
+ bfa_trc(ioim->bfa, buf_lun_cnt);
+ for (j = 0; j < buf_lun_cnt; j++) {
+ lun = *((struct scsi_lun *)(lun_data + j));
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+ continue;
+ if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
+ (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
+ (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
+ == scsilun_to_int((struct scsi_lun *)&lun))) {
+ lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
+ break;
+ }
+ } /* next lun in mask DB */
+ } /* next lun in buf */
+}
+
+static int
+bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
+ struct scsi_report_luns_data_s *rl)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+ struct scatterlist *sg = scsi_sglist(cmnd);
+ struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
+ struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
+ int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
+ int lun_across_sg_bytes, bytes_from_next_buf;
+ u64 last_lun, temp_last_lun;
+
+ /* fetch luns from the first sg element */
+ bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
+ (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
+
+ /* fetch luns from multiple sg elements */
+ scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
+ if (sgeid == 0) {
+ prev_sg_len = sg_dma_len(sg);
+ prev_rl_data = (struct scsi_lun *)
+ phys_to_virt(sg_dma_address(sg));
+ continue;
+ }
+
+ /* if the buf is having more data */
+ lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
+ if (lun_across_sg_bytes) {
+ bfa_trc(ioim->bfa, lun_across_sg_bytes);
+ bfa_stats(ioim->itnim, lm_lun_across_sg);
+ bytes_from_next_buf = sizeof(struct scsi_lun) -
+ lun_across_sg_bytes;
+
+ /* from next buf take higher bytes */
+ temp_last_lun = *((u64 *)
+ phys_to_virt(sg_dma_address(sg)));
+ last_lun |= temp_last_lun >>
+ (lun_across_sg_bytes * BITS_PER_BYTE);
+
+ /* from prev buf take higher bytes */
+ temp_last_lun = *((u64 *)(prev_rl_data +
+ (prev_sg_len - lun_across_sg_bytes)));
+ temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
+ last_lun = last_lun | (temp_last_lun <<
+ (bytes_from_next_buf * BITS_PER_BYTE));
+
+ bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
+ } else
+ bytes_from_next_buf = 0;
+
+ *pgdlen += sg_dma_len(sg);
+ prev_sg_len = sg_dma_len(sg);
+ prev_rl_data = (struct scsi_lun *)
+ phys_to_virt(sg_dma_address(sg));
+ bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
+ bytes_from_next_buf,
+ sg_dma_len(sg) / sizeof(struct scsi_lun));
+ }
+
+ /* update the report luns data - based on fetched luns */
+ sg = scsi_sglist(cmnd);
+ base_rl_data = (struct scsi_lun *)rl->lun;
+ base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
+ for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
+ base_rl_data[j] = lun_list[i].lun;
+ lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
+ j++;
+ lun_fetched_cnt++;
+ }
+
+ if (j > base_count) {
+ j = 0;
+ sg = sg_next(sg);
+ base_rl_data = (struct scsi_lun *)
+ phys_to_virt(sg_dma_address(sg));
+ base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
+ }
+ }
+
+ bfa_trc(ioim->bfa, lun_fetched_cnt);
+ return lun_fetched_cnt;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
+{
+ struct scsi_inquiry_data_s *inq;
+ struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
+
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
+ inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
+
+ bfa_trc(ioim->bfa, inq->device_type);
+ inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
+ return 0;
+}
+
+static bfa_boolean_t
+bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+ struct scatterlist *sg = scsi_sglist(cmnd);
+ struct bfi_ioim_rsp_s *m;
+ struct scsi_report_luns_data_s *rl = NULL;
+ int lun_count = 0, lun_fetched_cnt = 0;
+ u32 residue, pgdlen = 0;
+
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
+ if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
+ return BFA_TRUE;
+
+ m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
+ if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
+ return BFA_TRUE;
+
+ pgdlen = sg_dma_len(sg);
+ bfa_trc(ioim->bfa, pgdlen);
+ rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
+ lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
+ lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
+
+ if (lun_count == lun_fetched_cnt)
+ return BFA_TRUE;
+
+ bfa_trc(ioim->bfa, lun_count);
+ bfa_trc(ioim->bfa, lun_fetched_cnt);
+ bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
+
+ if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
+ rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
+ sizeof(struct scsi_lun);
+ else
+ bfa_stats(ioim->itnim, lm_small_buf_addresidue);
+
+ bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
+ bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
+
+ residue = be32_to_cpu(m->residue);
+ residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
+ bfa_stats(ioim->itnim, lm_wire_residue_changed);
+ m->residue = be32_to_cpu(residue);
+ bfa_trc(ioim->bfa, ioim->nsges);
+ return BFA_FALSE;
+}
static void
__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
@@ -1991,7 +2432,8 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
m->sns_len) {
sns_len = m->sns_len;
- snsinfo = ioim->iosp->snsinfo;
+ snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
+ ioim->iotag);
}
/*
@@ -2013,6 +2455,299 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
}
static void
+__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+ int sns_len = 0xD;
+ u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+ struct scsi_sense_s *snsinfo;
+
+ if (!complete) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+ return;
+ }
+
+ snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
+ ioim->fcpim->fcp, ioim->iotag);
+ snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+ snsinfo->add_sense_length = 0xa;
+ snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
+ snsinfo->sense_key = ILLEGAL_REQUEST;
+ bfa_trc(ioim->bfa, residue);
+ bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+ SCSI_STATUS_CHECK_CONDITION, sns_len,
+ (u8 *)snsinfo, residue);
+}
+
+static void
+__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+ int sns_len = 0xD;
+ u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+ struct scsi_sense_s *snsinfo;
+
+ if (!complete) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+ return;
+ }
+
+ snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
+ ioim->iotag);
+ snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+ snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
+ snsinfo->asc = SCSI_ASC_TOCC;
+ snsinfo->add_sense_length = 0x6;
+ snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
+ bfa_trc(ioim->bfa, residue);
+ bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+ SCSI_STATUS_CHECK_CONDITION, sns_len,
+ (u8 *)snsinfo, residue);
+}
+
+static void
+__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
+{
+ struct bfa_ioim_s *ioim = cbarg;
+ int sns_len = 0xD;
+ u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
+ struct scsi_sense_s *snsinfo;
+
+ if (!complete) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+ return;
+ }
+
+ snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
+ ioim->fcpim->fcp, ioim->iotag);
+ snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
+ snsinfo->add_sense_length = 0xa;
+ snsinfo->sense_key = NOT_READY;
+ snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
+ snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
+ bfa_trc(ioim->bfa, residue);
+ bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
+ SCSI_STATUS_CHECK_CONDITION, sns_len,
+ (u8 *)snsinfo, residue);
+}
+
+void
+bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
+ u16 rp_tag, u8 lp_tag)
+{
+ struct bfa_lun_mask_s *lun_list;
+ u8 i;
+
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return;
+
+ lun_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+ if ((lun_list[i].lp_wwn == lp_wwn) &&
+ (lun_list[i].rp_wwn == rp_wwn)) {
+ lun_list[i].rp_tag = rp_tag;
+ lun_list[i].lp_tag = lp_tag;
+ }
+ }
+ }
+}
+
+/*
+ * set UA for all active luns in LM DB
+ */
+static void
+bfa_ioim_lm_set_ua(struct bfa_s *bfa)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ int i;
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+ continue;
+ lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+ }
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
+{
+ struct bfa_lunmask_cfg_s *lun_mask;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ if (bfa_get_lun_mask_status(bfa) == update)
+ return BFA_STATUS_NO_CHANGE;
+
+ lun_mask = bfa_get_lun_mask(bfa);
+ lun_mask->status = update;
+
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
+ bfa_ioim_lm_set_ua(bfa);
+
+ return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
+{
+ int i;
+ struct bfa_lun_mask_s *lunm_list;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
+ if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
+ bfa_rport_unset_lunmask(bfa,
+ BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
+ }
+ }
+
+ memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
+ return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
+{
+ struct bfa_lunmask_cfg_s *lun_mask;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ lun_mask = bfa_get_lun_mask(bfa);
+ memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+ wwn_t rpwwn, struct scsi_lun lun)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ struct bfa_rport_s *rp = NULL;
+ int i, free_index = MAX_LUN_MASK_CFG + 1;
+ struct bfa_fcs_lport_s *port = NULL;
+ struct bfa_fcs_rport_s *rp_fcs;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
+ vf_id, *pwwn);
+ if (port) {
+ *pwwn = port->port_cfg.pwwn;
+ rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+ rp = rp_fcs->bfa_rport;
+ }
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ /* if entry exists */
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
+ free_index = i;
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn) &&
+ (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+ scsilun_to_int((struct scsi_lun *)&lun)))
+ return BFA_STATUS_ENTRY_EXISTS;
+ }
+
+ if (free_index > MAX_LUN_MASK_CFG)
+ return BFA_STATUS_MAX_ENTRY_REACHED;
+
+ if (rp) {
+ lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
+ rp->rport_info.local_pid);
+ lunm_list[free_index].rp_tag = rp->rport_tag;
+ } else {
+ lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
+ lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
+ }
+
+ lunm_list[free_index].lp_wwn = *pwwn;
+ lunm_list[free_index].rp_wwn = rpwwn;
+ lunm_list[free_index].lun = lun;
+ lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
+
+ /* set for all luns in this rp */
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn))
+ lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+ }
+
+ return bfa_dconf_update(bfa);
+}
+
+bfa_status_t
+bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
+ wwn_t rpwwn, struct scsi_lun lun)
+{
+ struct bfa_lun_mask_s *lunm_list;
+ struct bfa_rport_s *rp = NULL;
+ struct bfa_fcs_lport_s *port = NULL;
+ struct bfa_fcs_rport_s *rp_fcs;
+ int i;
+
+ /* in min cfg lunm_list could be NULL but no commands should run. */
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
+ return BFA_STATUS_FAILED;
+
+ bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
+ bfa_trc(bfa, *pwwn);
+ bfa_trc(bfa, rpwwn);
+ bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
+
+ if (*pwwn == 0) {
+ port = bfa_fcs_lookup_port(
+ &((struct bfad_s *)bfa->bfad)->bfa_fcs,
+ vf_id, *pwwn);
+ if (port) {
+ *pwwn = port->port_cfg.pwwn;
+ rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
+ rp = rp_fcs->bfa_rport;
+ }
+ }
+
+ lunm_list = bfa_get_lun_mask_list(bfa);
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn) &&
+ (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
+ scsilun_to_int((struct scsi_lun *)&lun))) {
+ lunm_list[i].lp_wwn = 0;
+ lunm_list[i].rp_wwn = 0;
+ int_to_scsilun(0, &lunm_list[i].lun);
+ lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
+ if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
+ lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
+ lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
+ }
+ return bfa_dconf_update(bfa);
+ }
+ }
+
+ /* set for all luns in this rp */
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if ((lunm_list[i].lp_wwn == *pwwn) &&
+ (lunm_list[i].rp_wwn == rpwwn))
+ lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
+ }
+
+ return BFA_STATUS_ENTRY_NOT_EXISTS;
+}
+
+static void
__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
{
struct bfa_ioim_s *ioim = cbarg;
@@ -2022,6 +2757,7 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
return;
}
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
0, 0, NULL, 0);
}
@@ -2037,6 +2773,7 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
return;
}
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
0, 0, NULL, 0);
}
@@ -2051,6 +2788,7 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
return;
}
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
}
@@ -2189,12 +2927,12 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
*/
switch (m->cmnd.iodir) {
case FCP_IODIR_READ:
- bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
+ bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
bfa_stats(itnim, input_reqs);
ioim->itnim->stats.rd_throughput += fcp_dl;
break;
case FCP_IODIR_WRITE:
- bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
+ bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
bfa_stats(itnim, output_reqs);
ioim->itnim->stats.wr_throughput += fcp_dl;
break;
@@ -2202,16 +2940,16 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
bfa_stats(itnim, input_reqs);
bfa_stats(itnim, output_reqs);
default:
- bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
+ bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
}
if (itnim->seq_rec ||
(scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
- bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
+ bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(ioim->bfa, ioim->reqq);
+ bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
return BFA_TRUE;
}
@@ -2269,14 +3007,14 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
else
msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
- bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
+ bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
m->io_tag = cpu_to_be16(ioim->iotag);
m->abort_tag = ++ioim->abort_tag;
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(ioim->bfa, ioim->reqq);
+ bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
return BFA_TRUE;
}
@@ -2360,46 +3098,32 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
* Memory allocation and initialization.
*/
void
-bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
{
struct bfa_ioim_s *ioim;
+ struct bfa_fcp_mod_s *fcp = fcpim->fcp;
struct bfa_ioim_sp_s *iosp;
u16 i;
- u8 *snsinfo;
- u32 snsbufsz;
/*
* claim memory first
*/
- ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
+ ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
fcpim->ioim_arr = ioim;
- bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
+ bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
- iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
+ iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
fcpim->ioim_sp_arr = iosp;
- bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
-
- /*
- * Claim DMA memory for per IO sense data.
- */
- snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
- fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
- bfa_meminfo_dma_phys(minfo) += snsbufsz;
-
- fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
- bfa_meminfo_dma_virt(minfo) += snsbufsz;
- snsinfo = fcpim->snsbase.kva;
- bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
+ bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
/*
* Initialize ioim free queues
*/
- INIT_LIST_HEAD(&fcpim->ioim_free_q);
INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
INIT_LIST_HEAD(&fcpim->ioim_comp_q);
- for (i = 0; i < fcpim->num_ioim_reqs;
- i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
+ for (i = 0; i < fcpim->fcp->num_ioim_reqs;
+ i++, ioim++, iosp++) {
/*
* initialize IOIM
*/
@@ -2408,22 +3132,20 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
ioim->bfa = fcpim->bfa;
ioim->fcpim = fcpim;
ioim->iosp = iosp;
- iosp->snsinfo = snsinfo;
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
INIT_LIST_HEAD(&ioim->sgpg_q);
bfa_reqq_winit(&ioim->iosp->reqq_wait,
bfa_ioim_qresume, ioim);
bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
bfa_ioim_sgpg_alloced, ioim);
bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
-
- list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
}
}
void
bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
struct bfa_ioim_s *ioim;
u16 iotag;
@@ -2448,6 +3170,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
evt = BFA_IOIM_SM_DONE;
else
evt = BFA_IOIM_SM_COMP;
+ ioim->proc_rsp_data(ioim);
break;
case BFI_IOIM_STS_TIMEDOUT:
@@ -2483,6 +3206,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
if (rsp->abort_tag != ioim->abort_tag) {
bfa_trc(ioim->bfa, rsp->abort_tag);
bfa_trc(ioim->bfa, ioim->abort_tag);
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
return;
}
@@ -2501,13 +3225,14 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
WARN_ON(1);
}
+ ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_sm_send_event(ioim, evt);
}
void
bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
struct bfa_ioim_s *ioim;
u16 iotag;
@@ -2518,7 +3243,16 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
bfa_ioim_cb_profile_comp(fcpim, ioim);
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+
+ if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+ return;
+ }
+
+ if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+ else
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
}
/*
@@ -2573,18 +3307,21 @@ struct bfa_ioim_s *
bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
struct bfa_itnim_s *itnim, u16 nsges)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
struct bfa_ioim_s *ioim;
+ struct bfa_iotag_s *iotag = NULL;
/*
* alocate IOIM resource
*/
- bfa_q_deq(&fcpim->ioim_free_q, &ioim);
- if (!ioim) {
+ bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
+ if (!iotag) {
bfa_stats(itnim, no_iotags);
return NULL;
}
+ ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
+
ioim->dio = dio;
ioim->itnim = itnim;
ioim->nsges = nsges;
@@ -2601,7 +3338,8 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
void
bfa_ioim_free(struct bfa_ioim_s *ioim)
{
- struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
+ struct bfa_fcpim_s *fcpim = ioim->fcpim;
+ struct bfa_iotag_s *iotag;
if (ioim->nsgpgs > 0)
bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
@@ -2610,13 +3348,51 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
fcpim->ios_active--;
ioim->iotag &= BFA_IOIM_IOTAG_MASK;
+
+ WARN_ON(!(ioim->iotag <
+ (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
+ iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
+
+ if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
+ list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
+ else
+ list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
+
list_del(&ioim->qe);
- list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
}
void
bfa_ioim_start(struct bfa_ioim_s *ioim)
{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
+ struct bfa_lps_s *lps;
+ enum bfa_ioim_lm_status status;
+ struct scsi_lun scsilun;
+
+ if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
+ lps = BFA_IOIM_TO_LPS(ioim);
+ int_to_scsilun(cmnd->device->lun, &scsilun);
+ status = bfa_ioim_lm_check(ioim, lps,
+ ioim->itnim->rport, scsilun);
+ if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
+ bfa_stats(ioim->itnim, lm_lun_not_rdy);
+ return;
+ }
+
+ if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
+ bfa_stats(ioim->itnim, lm_lun_not_sup);
+ return;
+ }
+
+ if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
+ bfa_stats(ioim->itnim, lm_rpl_data_changed);
+ return;
+ }
+ }
+
bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
/*
@@ -3021,7 +3797,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
* build i/o request message next
*/
bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
- bfa_lpuid(tskim->bfa));
+ bfa_fn_lpu(tskim->bfa));
m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
m->itn_fhdl = tskim->itnim->rport->fw_handle;
@@ -3032,7 +3808,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim)
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(tskim->bfa, itnim->reqq);
+ bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
return BFA_TRUE;
}
@@ -3056,14 +3832,14 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
* build i/o request message next
*/
bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
- bfa_lpuid(tskim->bfa));
+ bfa_fn_lpu(tskim->bfa));
m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(tskim->bfa, itnim->reqq);
+ bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
return BFA_TRUE;
}
@@ -3129,14 +3905,16 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
* Memory allocation and initialization.
*/
void
-bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
{
struct bfa_tskim_s *tskim;
+ struct bfa_fcp_mod_s *fcp = fcpim->fcp;
u16 i;
INIT_LIST_HEAD(&fcpim->tskim_free_q);
+ INIT_LIST_HEAD(&fcpim->tskim_unused_q);
- tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
+ tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
fcpim->tskim_arr = tskim;
for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
@@ -3155,13 +3933,13 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
}
- bfa_meminfo_kva(minfo) = (u8 *) tskim;
+ bfa_mem_kva_curp(fcp) = (u8 *) tskim;
}
void
bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
struct bfa_tskim_s *tskim;
u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
@@ -3188,7 +3966,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
struct bfa_tskim_s *
bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
{
- struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
struct bfa_tskim_s *tskim;
bfa_q_deq(&fcpim->tskim_free_q, &tskim);
@@ -3233,3 +4011,221 @@ bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
list_add_tail(&tskim->qe, &itnim->tsk_q);
bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
}
+
+void
+bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
+{
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
+ struct list_head *qe;
+ int i;
+
+ for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
+ bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
+ list_add_tail(qe, &fcpim->tskim_unused_q);
+ }
+}
+
+/* BFA FCP module - parent module for fcpim */
+
+BFA_MODULE(fcp);
+
+static void
+bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+ struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
+ struct bfa_mem_dma_s *seg_ptr;
+ u16 nsegs, idx, per_seg_ios, num_io_req;
+ u32 km_len = 0;
+
+ /*
+ * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
+ * So if the values are non zero, adjust them appropriately.
+ */
+ if (cfg->fwcfg.num_ioim_reqs &&
+ cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
+ cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
+ else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
+ cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
+
+ if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
+ cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
+
+ num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+ if (num_io_req > BFA_IO_MAX) {
+ if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
+ cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
+ cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
+ } else if (cfg->fwcfg.num_fwtio_reqs)
+ cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
+ else
+ cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
+ }
+
+ bfa_fcpim_meminfo(cfg, &km_len);
+
+ num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+ km_len += num_io_req * sizeof(struct bfa_iotag_s);
+ km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
+
+ /* dma memory */
+ nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
+ per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
+
+ bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
+ if (num_io_req >= per_seg_ios) {
+ num_io_req -= per_seg_ios;
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ per_seg_ios * BFI_IOIM_SNSLEN);
+ } else
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ num_io_req * BFI_IOIM_SNSLEN);
+ }
+
+ /* kva memory */
+ bfa_mem_kva_setup(minfo, fcp_kva, km_len);
+}
+
+static void
+bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+ struct bfa_mem_dma_s *seg_ptr;
+ u16 idx, nsegs, num_io_req;
+
+ fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
+ fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
+ fcp->num_itns = cfg->fwcfg.num_rports;
+ fcp->bfa = bfa;
+
+ /*
+ * Setup the pool of snsbase addr's, that is passed to fw as
+ * part of bfi_iocfc_cfg_s.
+ */
+ num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
+ nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
+
+ bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
+
+ if (!bfa_mem_dma_virt(seg_ptr))
+ break;
+
+ fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
+ fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
+ bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
+ }
+
+ bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
+
+ bfa_iotag_attach(fcp);
+
+ fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
+ bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
+ (fcp->num_itns * sizeof(struct bfa_itn_s));
+ memset(fcp->itn_arr, 0,
+ (fcp->num_itns * sizeof(struct bfa_itn_s)));
+}
+
+static void
+bfa_fcp_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcp_start(struct bfa_s *bfa)
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+ /*
+ * bfa_init() with flash read is complete. now invalidate the stale
+ * content of lun mask like unit attention, rp tag and lp tag.
+ */
+ bfa_ioim_lm_init(fcp->bfa);
+}
+
+static void
+bfa_fcp_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcp_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+
+ /* Enqueue unused ioim resources to free_q */
+ list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
+
+ bfa_fcpim_iocdisable(fcp);
+}
+
+void
+bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
+{
+ struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
+ struct list_head *qe;
+ int i;
+
+ for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
+ bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
+ list_add_tail(qe, &mod->iotag_unused_q);
+ }
+}
+
+void
+bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+ void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+ struct bfa_itn_s *itn;
+
+ itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
+ itn->isr = isr;
+}
+
+/*
+ * Itn interrupt processing.
+ */
+void
+bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+ struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
+ union bfi_itn_i2h_msg_u msg;
+ struct bfa_itn_s *itn;
+
+ msg.msg = m;
+ itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
+
+ if (itn->isr)
+ itn->isr(bfa, m);
+ else
+ WARN_ON(1);
+}
+
+void
+bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
+{
+ struct bfa_iotag_s *iotag;
+ u16 num_io_req, i;
+
+ iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
+ fcp->iotag_arr = iotag;
+
+ INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
+ INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
+ INIT_LIST_HEAD(&fcp->iotag_unused_q);
+
+ num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
+ for (i = 0; i < num_io_req; i++, iotag++) {
+ memset(iotag, 0, sizeof(struct bfa_iotag_s));
+ iotag->tag = i;
+ if (i < fcp->num_ioim_reqs)
+ list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
+ else
+ list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
+ }
+
+ bfa_mem_kva_curp(fcp) = (u8 *) iotag;
+}
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 1e38dade842..1080bcb81cb 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -24,6 +24,34 @@
#include "bfa_defs_svc.h"
#include "bfa_cs.h"
+/* FCP module related definitions */
+#define BFA_IO_MAX BFI_IO_MAX
+#define BFA_FWTIO_MAX 2000
+
+struct bfa_fcp_mod_s;
+struct bfa_iotag_s {
+ struct list_head qe; /* queue element */
+ u16 tag; /* FW IO tag */
+};
+
+struct bfa_itn_s {
+ bfa_isr_func_t isr;
+};
+
+void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
+ void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
+void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m);
+void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp);
+void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw);
+
+#define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod)
+#define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg))
+#define BFA_IOTAG_FROM_TAG(_fcp, _tag) \
+ (&(_fcp)->iotag_arr[(_tag & BFA_IOIM_IOTAG_MASK)])
+#define BFA_ITN_FROM_TAG(_fcp, _tag) \
+ ((_fcp)->itn_arr + ((_tag) & ((_fcp)->num_itns - 1)))
+#define BFA_SNSINFO_FROM_TAG(_fcp, _tag) \
+ bfa_mem_get_dmabuf_kva(_fcp, _tag, BFI_IOIM_SNSLEN)
#define BFA_ITNIM_MIN 32
#define BFA_ITNIM_MAX 1024
@@ -51,14 +79,22 @@ bfa_ioim_get_index(u32 n) {
if (n >= (1UL)<<22)
return BFA_IOBUCKET_MAX - 1;
n >>= 8;
- if (n >= (1UL)<<16)
- n >>= 16; pos += 16;
- if (n >= 1 << 8)
- n >>= 8; pos += 8;
- if (n >= 1 << 4)
- n >>= 4; pos += 4;
- if (n >= 1 << 2)
- n >>= 2; pos += 2;
+ if (n >= (1UL)<<16) {
+ n >>= 16;
+ pos += 16;
+ }
+ if (n >= 1 << 8) {
+ n >>= 8;
+ pos += 8;
+ }
+ if (n >= 1 << 4) {
+ n >>= 4;
+ pos += 4;
+ }
+ if (n >= 1 << 2) {
+ n >>= 2;
+ pos += 2;
+ }
if (n >= 1 << 1)
pos += 1;
@@ -74,26 +110,26 @@ struct bfad_ioim_s;
struct bfad_tskim_s;
typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
+typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
-struct bfa_fcpim_mod_s {
+struct bfa_fcpim_s {
struct bfa_s *bfa;
+ struct bfa_fcp_mod_s *fcp;
struct bfa_itnim_s *itnim_arr;
struct bfa_ioim_s *ioim_arr;
struct bfa_ioim_sp_s *ioim_sp_arr;
struct bfa_tskim_s *tskim_arr;
- struct bfa_dma_s snsbase;
int num_itnims;
- int num_ioim_reqs;
int num_tskim_reqs;
u32 path_tov;
u16 q_depth;
u8 reqq; /* Request queue to be used */
- u8 rsvd;
+ u8 lun_masking_pending;
struct list_head itnim_q; /* queue of active itnim */
- struct list_head ioim_free_q; /* free IO resources */
struct list_head ioim_resfree_q; /* IOs waiting for f/w */
struct list_head ioim_comp_q; /* IO global comp Q */
struct list_head tskim_free_q;
+ struct list_head tskim_unused_q; /* Unused tskim Q */
u32 ios_active; /* current active IOs */
u32 delay_comp;
struct bfa_fcpim_del_itn_stats_s del_itn_stats;
@@ -104,6 +140,25 @@ struct bfa_fcpim_mod_s {
bfa_fcpim_profile_t profile_start;
};
+/* Max FCP dma segs required */
+#define BFA_FCP_DMA_SEGS BFI_IOIM_SNSBUF_SEGS
+
+struct bfa_fcp_mod_s {
+ struct bfa_s *bfa;
+ struct list_head iotag_ioim_free_q; /* free IO resources */
+ struct list_head iotag_tio_free_q; /* free IO resources */
+ struct list_head iotag_unused_q; /* unused IO resources*/
+ struct bfa_iotag_s *iotag_arr;
+ struct bfa_itn_s *itn_arr;
+ int num_ioim_reqs;
+ int num_fwtio_reqs;
+ int num_itns;
+ struct bfa_dma_s snsbase[BFA_FCP_DMA_SEGS];
+ struct bfa_fcpim_s fcpim;
+ struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS];
+ struct bfa_mem_kva_s kva_seg;
+};
+
/*
* BFA IO (initiator mode)
*/
@@ -111,7 +166,7 @@ struct bfa_ioim_s {
struct list_head qe; /* queue elememt */
bfa_sm_t sm; /* BFA ioim state machine */
struct bfa_s *bfa; /* BFA module */
- struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
+ struct bfa_fcpim_s *fcpim; /* parent fcpim module */
struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
struct bfad_ioim_s *dio; /* driver IO handle */
u16 iotag; /* FWI IO tag */
@@ -124,12 +179,13 @@ struct bfa_ioim_s {
bfa_cb_cbfn_t io_cbfn; /* IO completion handler */
struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */
u8 reqq; /* Request queue for I/O */
+ u8 mode; /* IO is passthrough or not */
u64 start_time; /* IO's Profile start val */
+ bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
};
struct bfa_ioim_sp_s {
struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */
- u8 *snsinfo; /* sense info for this IO */
struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
bfa_boolean_t abort_explicit; /* aborted by OS */
@@ -143,7 +199,7 @@ struct bfa_tskim_s {
struct list_head qe;
bfa_sm_t sm;
struct bfa_s *bfa; /* BFA module */
- struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */
+ struct bfa_fcpim_s *fcpim; /* parent fcpim module */
struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */
struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */
bfa_boolean_t notify; /* notify itnim on TM comp */
@@ -182,13 +238,13 @@ struct bfa_itnim_s {
struct bfa_wc_s wc; /* waiting counter */
struct bfa_timer_s timer; /* pending IO TOV */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
- struct bfa_fcpim_mod_s *fcpim; /* fcpim module */
+ struct bfa_fcpim_s *fcpim; /* fcpim module */
struct bfa_itnim_iostats_s stats;
struct bfa_itnim_ioprofile_s ioprofile;
};
#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
-#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
+#define BFA_FCPIM(_hal) (&(_hal)->modules.fcp_mod.fcpim)
#define BFA_IOIM_TAG_2_ID(_iotag) ((_iotag) & BFA_IOIM_IOTAG_MASK)
#define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \
(&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)])
@@ -196,15 +252,19 @@ struct bfa_itnim_s {
(&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
#define bfa_io_profile_start_time(_bfa) \
- (_bfa->modules.fcpim_mod.io_profile_start_time)
+ ((_bfa)->modules.fcp_mod.fcpim.io_profile_start_time)
#define bfa_fcpim_get_io_profile(_bfa) \
- (_bfa->modules.fcpim_mod.io_profile)
+ ((_bfa)->modules.fcp_mod.fcpim.io_profile)
#define bfa_ioim_update_iotag(__ioim) do { \
uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET; \
k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK; \
(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
} while (0)
+#define BFA_IOIM_TO_LPS(__ioim) \
+ BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
+ __ioim->itnim->rport->rport_info.lp_tag)
+
static inline bfa_boolean_t
bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
{
@@ -217,8 +277,7 @@ bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
/*
* function prototypes
*/
-void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
- struct bfa_meminfo_s *minfo);
+void bfa_ioim_attach(struct bfa_fcpim_s *fcpim);
void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_ioim_good_comp_isr(struct bfa_s *bfa,
struct bfi_msg_s *msg);
@@ -228,18 +287,15 @@ void bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
void bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
void bfa_ioim_tov(struct bfa_ioim_s *ioim);
-void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
- struct bfa_meminfo_s *minfo);
+void bfa_tskim_attach(struct bfa_fcpim_s *fcpim);
void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_tskim_iodone(struct bfa_tskim_s *tskim);
void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
void bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
+void bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw);
-void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
- u32 *dm_len);
-void bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
- struct bfa_meminfo_s *minfo);
-void bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
+void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len);
+void bfa_itnim_attach(struct bfa_fcpim_s *fcpim);
void bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
void bfa_itnim_iodone(struct bfa_itnim_s *itnim);
@@ -252,13 +308,19 @@ bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa);
u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa);
+bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
+ struct bfa_itnim_iostats_s *stats, u8 lp_tag);
+void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
+ struct bfa_itnim_iostats_s *itnim_stats);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
+bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
#define bfa_fcpim_ioredirect_enabled(__bfa) \
- (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
+ (((struct bfa_fcpim_s *)(BFA_FCPIM(__bfa)))->ioredirect)
#define bfa_fcpim_get_next_reqq(__bfa, __qid) \
{ \
- struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa); \
+ struct bfa_fcpim_s *__fcpim = BFA_FCPIM(__bfa); \
__fcpim->reqq++; \
__fcpim->reqq &= (BFI_IOC_MAX_CQS - 1); \
*(__qid) = __fcpim->reqq; \
@@ -352,4 +414,14 @@ void bfa_tskim_start(struct bfa_tskim_s *tskim,
void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
enum bfi_tskim_status tsk_status);
+void bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn,
+ wwn_t rp_wwn, u16 rp_tag, u8 lp_tag);
+bfa_status_t bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 on_off);
+bfa_status_t bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf);
+bfa_status_t bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id,
+ wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id,
+ wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun);
+bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa);
+
#endif /* __BFA_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 9b43ca4b677..eaac57e1dde 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -20,6 +20,7 @@
*/
#include "bfad_drv.h"
+#include "bfad_im.h"
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
@@ -92,25 +93,49 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
void
bfa_fcs_init(struct bfa_fcs_s *fcs)
{
- int i, npbc_vports;
+ int i;
struct bfa_fcs_mod_s *mod;
- struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
mod = &fcs_modules[i];
if (mod->modinit)
mod->modinit(fcs);
}
+}
+
+/*
+ * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
+ * with values learned during bfa_init firmware GETATTR REQ.
+ */
+void
+bfa_fcs_update_cfg(struct bfa_fcs_s *fcs)
+{
+ struct bfa_fcs_fabric_s *fabric = &fcs->fabric;
+ struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+ struct bfa_ioc_s *ioc = &fabric->fcs->bfa->ioc;
+
+ port_cfg->nwwn = ioc->attr->nwwn;
+ port_cfg->pwwn = ioc->attr->pwwn;
+}
+
+/*
+ * fcs pbc vport initialization
+ */
+void
+bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs)
+{
+ int i, npbc_vports;
+ struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
+
/* Initialize pbc vports */
if (!fcs->min_cfg) {
npbc_vports =
- bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
+ bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
for (i = 0; i < npbc_vports; i++)
bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
}
}
-
/*
* brief
* FCS driver details initialization.
@@ -168,11 +193,14 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs)
#define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */
#define bfa_fcs_fabric_set_opertype(__fabric) do { \
- if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \
- == BFA_PORT_TOPOLOGY_P2P) \
+ if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \
+ == BFA_PORT_TOPOLOGY_P2P) { \
+ if (fabric->fab_type == BFA_FCS_FABRIC_SWITCHED) \
(__fabric)->oper_type = BFA_PORT_TYPE_NPORT; \
else \
- (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \
+ (__fabric)->oper_type = BFA_PORT_TYPE_P2P; \
+ } else \
+ (__fabric)->oper_type = BFA_PORT_TYPE_NLPORT; \
} while (0)
/*
@@ -196,6 +224,9 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rspfchs);
+static u8 bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric);
+static bfa_boolean_t bfa_fcs_fabric_is_bbscn_enabled(
+ struct bfa_fcs_fabric_s *fabric);
static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
enum bfa_fcs_fabric_event event);
@@ -269,8 +300,8 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
break;
case BFA_FCS_FABRIC_SM_DELETE:
- bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
- bfa_wc_down(&fabric->fcs->wc);
+ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+ bfa_fcs_fabric_delete(fabric);
break;
default:
@@ -322,7 +353,8 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
case BFA_FCS_FABRIC_SM_CONT_OP:
bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
- fabric->bb_credit);
+ fabric->bb_credit,
+ bfa_fcs_fabric_oper_bbscn(fabric));
fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
if (fabric->auth_reqd && fabric->is_auth) {
@@ -350,7 +382,8 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
case BFA_FCS_FABRIC_SM_NO_FABRIC:
fabric->fab_type = BFA_FCS_FABRIC_N2N;
bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
- fabric->bb_credit);
+ fabric->bb_credit,
+ bfa_fcs_fabric_oper_bbscn(fabric));
bfa_fcs_fabric_notify_online(fabric);
bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
break;
@@ -518,7 +551,11 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
case BFA_FCS_FABRIC_SM_NO_FABRIC:
bfa_trc(fabric->fcs, fabric->bb_credit);
bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
- fabric->bb_credit);
+ fabric->bb_credit,
+ bfa_fcs_fabric_oper_bbscn(fabric));
+ break;
+
+ case BFA_FCS_FABRIC_SM_RETRY_OP:
break;
default:
@@ -764,6 +801,10 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
case BFA_STATUS_FABRIC_RJT:
fabric->stats.flogi_rejects++;
+ if (fabric->lps->lsrjt_rsn == FC_LS_RJT_RSN_LOGICAL_ERROR &&
+ fabric->lps->lsrjt_expl == FC_LS_RJT_EXP_NO_ADDL_INFO)
+ fabric->fcs->bbscn_flogi_rjt = BFA_TRUE;
+
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
return;
@@ -793,6 +834,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
*/
fabric->bport.port_topo.pn2n.rem_port_wwn =
fabric->lps->pr_pwwn;
+ fabric->fab_type = BFA_FCS_FABRIC_N2N;
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
}
@@ -808,13 +850,17 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
{
struct bfa_s *bfa = fabric->fcs->bfa;
struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
- u8 alpa = 0;
+ u8 alpa = 0, bb_scn = 0;
if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
alpa = bfa_fcport_get_myalpa(bfa);
+ if (bfa_fcs_fabric_is_bbscn_enabled(fabric) &&
+ (!fabric->fcs->bbscn_flogi_rjt))
+ bb_scn = BFA_FCS_PORT_DEF_BB_SCN;
+
bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
- pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
+ pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd, bb_scn);
fabric->stats.flogi_sent++;
}
@@ -873,6 +919,40 @@ bfa_fcs_fabric_delay(void *cbarg)
}
/*
+ * Computes operating BB_SCN value
+ */
+static u8
+bfa_fcs_fabric_oper_bbscn(struct bfa_fcs_fabric_s *fabric)
+{
+ u8 pr_bbscn = fabric->lps->pr_bbscn;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa);
+
+ if (!(fcport->cfg.bb_scn_state && pr_bbscn))
+ return 0;
+
+ /* return max of local/remote bb_scn values */
+ return ((pr_bbscn > BFA_FCS_PORT_DEF_BB_SCN) ?
+ pr_bbscn : BFA_FCS_PORT_DEF_BB_SCN);
+}
+
+/*
+ * Check if BB_SCN can be enabled.
+ */
+static bfa_boolean_t
+bfa_fcs_fabric_is_bbscn_enabled(struct bfa_fcs_fabric_s *fabric)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fabric->fcs->bfa);
+
+ if (bfa_ioc_get_fcmode(&fabric->fcs->bfa->ioc) &&
+ fcport->cfg.bb_scn_state &&
+ !bfa_fcport_is_qos_enabled(fabric->fcs->bfa) &&
+ !bfa_fcport_is_trunk_enabled(fabric->fcs->bfa))
+ return BFA_TRUE;
+ else
+ return BFA_FALSE;
+}
+
+/*
* Delete all vports and wait for vport delete completions.
*/
static void
@@ -989,6 +1069,7 @@ void
bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
{
bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+ fabric->fcs->bbscn_flogi_rjt = BFA_FALSE;
bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
}
@@ -1192,6 +1273,7 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
}
fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
+ fabric->lps->pr_bbscn = (be16_to_cpu(flogi->csp.rxsz) >> 12);
bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
@@ -1224,9 +1306,10 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
n2n_port->reply_oxid, pcfg->pwwn,
pcfg->nwwn,
bfa_fcport_get_maxfrsize(bfa),
- bfa_fcport_get_rx_bbcredit(bfa));
+ bfa_fcport_get_rx_bbcredit(bfa),
+ bfa_fcs_fabric_oper_bbscn(fabric));
- bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag,
+ bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->bfa_tag,
BFA_FALSE, FC_CLASS_3,
reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
FC_MAX_PDUSZ, 0);
@@ -1245,6 +1328,29 @@ bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_trc(fabric->fcs, status);
}
+
+/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_fabric_aen_post(struct bfa_fcs_lport_s *port,
+ enum bfa_port_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.port.pwwn = bfa_fcs_lport_get_pwwn(port);
+ aen_entry->aen_data.port.fwwn = bfa_fcs_lport_get_fabric_name(port);
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_PORT, event);
+}
+
/*
*
* @param[in] fabric - fabric
@@ -1276,6 +1382,8 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
"Base port WWN = %s Fabric WWN = %s\n",
pwwn_ptr, fwwn_ptr);
+ bfa_fcs_fabric_aen_post(&fabric->bport,
+ BFA_PORT_AEN_FABRIC_NAME_CHANGE);
}
}
@@ -1298,6 +1406,45 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
}
/*
+ * Return the list of local logical ports present in the given VF.
+ *
+ * @param[in] vf vf for which logical ports are returned
+ * @param[out] lpwwn returned logical port wwn list
+ * @param[in,out] nlports in:size of lpwwn list;
+ * out:total elements present,
+ * actual elements returned is limited by the size
+ */
+void
+bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
+{
+ struct list_head *qe;
+ struct bfa_fcs_vport_s *vport;
+ int i = 0;
+ struct bfa_fcs_s *fcs;
+
+ if (vf == NULL || lpwwn == NULL || *nlports == 0)
+ return;
+
+ fcs = vf->fcs;
+
+ bfa_trc(fcs, vf->vf_id);
+ bfa_trc(fcs, (uint32_t) *nlports);
+
+ lpwwn[i++] = vf->bport.port_cfg.pwwn;
+
+ list_for_each(qe, &vf->vport_q) {
+ if (i >= *nlports)
+ break;
+
+ vport = (struct bfa_fcs_vport_s *) qe;
+ lpwwn[i++] = vport->lport.port_cfg.pwwn;
+ }
+
+ bfa_trc(fcs, i);
+ *nlports = i;
+}
+
+/*
* BFA FCS PPORT ( physical port)
*/
static void
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 61cdce4bd91..e75e07d2591 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -254,6 +254,9 @@ struct bfa_fcs_fabric_s;
#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48
#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16
+/* bb_scn value in 2^bb_scn */
+#define BFA_FCS_PORT_DEF_BB_SCN 3
+
/*
* Get FC port ID for a logical port.
*/
@@ -379,6 +382,7 @@ void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport);
#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT 90 /* in secs */
#define BFA_FCS_RPORT_MAX_RETRIES (5)
@@ -420,6 +424,7 @@ struct bfa_fcs_rport_s {
enum fc_cos fc_cos; /* FC classes of service supp */
bfa_boolean_t cisc; /* CISC capable device */
bfa_boolean_t prlo; /* processing prlo or LOGO */
+ bfa_boolean_t plogi_pending; /* Rx Plogi Pending */
wwn_t pwwn; /* port wwn of rport */
wwn_t nwwn; /* node wwn of rport */
struct bfa_rport_symname_s psym_name; /* port symbolic name */
@@ -447,6 +452,8 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
/*
* bfa fcs rport API functions
*/
+void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
+ struct bfa_rport_attr_s *attr);
struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
wwn_t rpwwn);
struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
@@ -591,10 +598,21 @@ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
struct fchs_s *fchs, u16 len);
-#define BFA_FCS_FDMI_SUPORTED_SPEEDS (FDMI_TRANS_SPEED_1G | \
- FDMI_TRANS_SPEED_2G | \
- FDMI_TRANS_SPEED_4G | \
- FDMI_TRANS_SPEED_8G)
+#define BFA_FCS_FDMI_SUPP_SPEEDS_4G (FDMI_TRANS_SPEED_1G | \
+ FDMI_TRANS_SPEED_2G | \
+ FDMI_TRANS_SPEED_4G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_8G (FDMI_TRANS_SPEED_1G | \
+ FDMI_TRANS_SPEED_2G | \
+ FDMI_TRANS_SPEED_4G | \
+ FDMI_TRANS_SPEED_8G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_16G (FDMI_TRANS_SPEED_2G | \
+ FDMI_TRANS_SPEED_4G | \
+ FDMI_TRANS_SPEED_8G | \
+ FDMI_TRANS_SPEED_16G)
+
+#define BFA_FCS_FDMI_SUPP_SPEEDS_10G FDMI_TRANS_SPEED_10G
/*
* HBA Attribute Block : BFA internal representation. Note : Some variable
@@ -649,12 +667,15 @@ struct bfa_fcs_s {
struct bfa_trc_mod_s *trcmod; /* tracing module */
bfa_boolean_t vf_enabled; /* VF mode is enabled */
bfa_boolean_t fdmi_enabled; /* FDMI is enabled */
+ bfa_boolean_t bbscn_enabled; /* Driver Config Parameter */
+ bfa_boolean_t bbscn_flogi_rjt;/* FLOGI reject due to BB_SCN */
bfa_boolean_t min_cfg; /* min cfg enabled/disabled */
u16 port_vfid; /* port default VF ID */
struct bfa_fcs_driver_info_s driver_info;
struct bfa_fcs_fabric_s fabric; /* base fabric state machine */
struct bfa_fcs_stats_s stats; /* FCS statistics */
struct bfa_wc_s wc; /* waiting counter */
+ int fcs_aen_seq;
};
/*
@@ -715,6 +736,8 @@ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
struct bfad_s *bfad,
bfa_boolean_t min_cfg);
void bfa_fcs_init(struct bfa_fcs_s *fcs);
+void bfa_fcs_pbc_vport_init(struct bfa_fcs_s *fcs);
+void bfa_fcs_update_cfg(struct bfa_fcs_s *fcs);
void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
struct bfa_fcs_driver_info_s *driver_info);
void bfa_fcs_exit(struct bfa_fcs_s *fcs);
@@ -723,6 +746,7 @@ void bfa_fcs_exit(struct bfa_fcs_s *fcs);
* bfa fcs vf public functions
*/
bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
+void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
/*
* fabric protected interface functions
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index e7b49f4cb51..9272840a240 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -37,6 +37,8 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg,
struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs);
+static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_itnim_aen_event event);
/*
* fcs_itnim_sm FCS itnim state machine events
@@ -54,6 +56,7 @@ enum bfa_fcs_itnim_event {
BFA_FCS_ITNIM_SM_INITIATOR = 9, /* rport is initiator */
BFA_FCS_ITNIM_SM_DELETE = 10, /* delete event from rport */
BFA_FCS_ITNIM_SM_PRLO = 11, /* delete event from rport */
+ BFA_FCS_ITNIM_SM_RSP_NOT_SUPP = 12, /* cmd not supported rsp */
};
static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
@@ -178,6 +181,10 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
BFA_FCS_RETRY_TIMEOUT);
break;
+ case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP:
+ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
+ break;
+
case BFA_FCS_ITNIM_SM_OFFLINE:
bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
bfa_fcxp_discard(itnim->fcxp);
@@ -264,6 +271,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Target (WWN = %s) is online for initiator (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
+ bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
break;
case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -300,14 +308,17 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
bfa_itnim_offline(itnim->bfa_itnim);
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
wwn2str(rpwwn_buf, itnim->rport->pwwn);
- if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE)
+ if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Target (WWN = %s) connectivity lost for "
"initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
- else
+ bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
+ } else {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Target (WWN = %s) offlined by initiator (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
+ bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
+ }
break;
case BFA_FCS_ITNIM_SM_DELETE:
@@ -377,6 +388,33 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
}
static void
+bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
+ enum bfa_itnim_aen_event event)
+{
+ struct bfa_fcs_rport_s *rport = itnim->rport;
+ struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ /* Don't post events for well known addresses */
+ if (BFA_FCS_PID_IS_WKA(rport->pid))
+ return;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id;
+ aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(itnim->fcs));
+ aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+ aen_entry->aen_data.itnim.rpwwn = rport->pwwn;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_ITNIM, event);
+}
+
+static void
bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
@@ -447,6 +485,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
itnim->rport->scsi_function =
BFA_RPORT_INITIATOR;
itnim->stats.prli_rsp_acc++;
+ itnim->stats.initiator++;
bfa_sm_send_event(itnim,
BFA_FCS_ITNIM_SM_RSP_OK);
return;
@@ -472,6 +511,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_trc(itnim->fcs, ls_rjt->reason_code_expl);
itnim->stats.prli_rsp_rjt++;
+ if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) {
+ bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_NOT_SUPP);
+ return;
+ }
bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR);
}
}
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 1d6be8c1447..d4f951fe753 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -16,6 +16,7 @@
*/
#include "bfad_drv.h"
+#include "bfad_im.h"
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
#include "bfa_fc.h"
@@ -74,6 +75,7 @@ enum bfa_fcs_lport_event {
BFA_FCS_PORT_SM_OFFLINE = 3,
BFA_FCS_PORT_SM_DELETE = 4,
BFA_FCS_PORT_SM_DELRPORT = 5,
+ BFA_FCS_PORT_SM_STOP = 6,
};
static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port,
@@ -86,6 +88,8 @@ static void bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event);
static void bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event);
+static void bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event);
static void
bfa_fcs_lport_sm_uninit(
@@ -123,6 +127,12 @@ bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
bfa_fcs_lport_deleted(port);
break;
+ case BFA_FCS_PORT_SM_STOP:
+ /* If vport - send completion call back */
+ if (port->vport)
+ bfa_fcs_vport_stop_comp(port->vport);
+ break;
+
case BFA_FCS_PORT_SM_OFFLINE:
break;
@@ -148,6 +158,23 @@ bfa_fcs_lport_sm_online(
bfa_fcs_lport_offline_actions(port);
break;
+ case BFA_FCS_PORT_SM_STOP:
+ __port_action[port->fabric->fab_type].offline(port);
+
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+ /* If vport - send completion call back */
+ if (port->vport)
+ bfa_fcs_vport_stop_comp(port->vport);
+ } else {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
+ list_for_each_safe(qe, qen, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+ }
+ }
+ break;
+
case BFA_FCS_PORT_SM_DELETE:
__port_action[port->fabric->fab_type].offline(port);
@@ -189,6 +216,21 @@ bfa_fcs_lport_sm_offline(
bfa_fcs_lport_online_actions(port);
break;
+ case BFA_FCS_PORT_SM_STOP:
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+ /* If vport - send completion call back */
+ if (port->vport)
+ bfa_fcs_vport_stop_comp(port->vport);
+ } else {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
+ list_for_each_safe(qe, qen, &port->rport_q) {
+ rport = (struct bfa_fcs_rport_s *) qe;
+ bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
+ }
+ }
+ break;
+
case BFA_FCS_PORT_SM_DELETE:
if (port->num_rports == 0) {
bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
@@ -212,6 +254,28 @@ bfa_fcs_lport_sm_offline(
}
static void
+bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
+ enum bfa_fcs_lport_event event)
+{
+ bfa_trc(port->fcs, port->port_cfg.pwwn);
+ bfa_trc(port->fcs, event);
+
+ switch (event) {
+ case BFA_FCS_PORT_SM_DELRPORT:
+ if (port->num_rports == 0) {
+ bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
+ /* If vport - send completion call back */
+ if (port->vport)
+ bfa_fcs_vport_stop_comp(port->vport);
+ }
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, event);
+ }
+}
+
+static void
bfa_fcs_lport_sm_deleting(
struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event)
@@ -237,6 +301,31 @@ bfa_fcs_lport_sm_deleting(
*/
/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port,
+ enum bfa_lport_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+ aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+ aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(port->fcs));
+ aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_LPORT, event);
+}
+
+/*
* Send a LS reject
*/
static void
@@ -265,6 +354,40 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
}
/*
+ * Send a FCCT Reject
+ */
+static void
+bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port,
+ struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl)
+{
+ struct fchs_s fchs;
+ struct bfa_fcxp_s *fcxp;
+ struct bfa_rport_s *bfa_rport = NULL;
+ int len;
+ struct ct_hdr_s *rx_cthdr = (struct ct_hdr_s *)(rx_fchs + 1);
+ struct ct_hdr_s *ct_hdr;
+
+ bfa_trc(port->fcs, rx_fchs->d_id);
+ bfa_trc(port->fcs, rx_fchs->s_id);
+
+ fcxp = bfa_fcs_fcxp_alloc(port->fcs);
+ if (!fcxp)
+ return;
+
+ ct_hdr = bfa_fcxp_get_reqbuf(fcxp);
+ ct_hdr->gs_type = rx_cthdr->gs_type;
+ ct_hdr->gs_sub_type = rx_cthdr->gs_sub_type;
+
+ len = fc_gs_rjt_build(&fchs, ct_hdr, rx_fchs->s_id,
+ bfa_fcs_lport_get_fcid(port),
+ rx_fchs->ox_id, reason_code, reason_code_expl);
+
+ bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
+ BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
+ FC_MAX_PDUSZ, 0);
+}
+
+/*
* Process incoming plogi from a remote port.
*/
static void
@@ -496,6 +619,7 @@ bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Logical port online: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
bfad->bfad_flags |= BFAD_PORT_ONLINE;
}
@@ -514,14 +638,17 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
if (bfa_sm_cmp_state(port->fabric,
- bfa_fcs_fabric_sm_online) == BFA_TRUE)
+ bfa_fcs_fabric_sm_online) == BFA_TRUE) {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
- else
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
+ } else {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Logical port taken offline: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
+ }
list_for_each_safe(qe, qen, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
@@ -579,6 +706,7 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Logical port deleted: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
/* Base port will be deleted by the OS driver */
if (port->vport) {
@@ -647,6 +775,16 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
bfa_fcs_lport_abts_acc(lport, fchs);
return;
}
+
+ if (fchs->type == FC_TYPE_SERVICES) {
+ /*
+ * Unhandled FC-GS frames. Send a FC-CT Reject
+ */
+ bfa_fcs_lport_send_fcgs_rjt(lport, fchs, CT_RSN_NOT_SUPP,
+ CT_NS_EXP_NOADDITIONAL);
+ return;
+ }
+
/*
* look for a matching remote port ID
*/
@@ -835,8 +973,8 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
lport->fcs = fcs;
lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
lport->vport = vport;
- lport->lp_tag = (vport) ? vport->lps->lp_tag :
- lport->fabric->lps->lp_tag;
+ lport->lp_tag = (vport) ? vport->lps->bfa_tag :
+ lport->fabric->lps->bfa_tag;
INIT_LIST_HEAD(&lport->rport_q);
lport->num_rports = 0;
@@ -866,6 +1004,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"New logical port created: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
+ bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW);
bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
@@ -1074,6 +1213,8 @@ static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
struct bfa_fcs_fdmi_port_attr_s *port_attr);
+u32 bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed);
+
/*
* fcs_fdmi_sm FCS FDMI state machine
*/
@@ -1672,7 +1813,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
memcpy(attr->value, fcs_hba_attr->driver_version, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
- len += templen;;
+ len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
@@ -2160,12 +2301,36 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
/*
* Supported Speeds
*/
- port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPORTED_SPEEDS);
+ switch (pport_attr.speed_supported) {
+ case BFA_PORT_SPEED_16GBPS:
+ port_attr->supp_speed =
+ cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_16G);
+ break;
+
+ case BFA_PORT_SPEED_10GBPS:
+ port_attr->supp_speed =
+ cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_10G);
+ break;
+
+ case BFA_PORT_SPEED_8GBPS:
+ port_attr->supp_speed =
+ cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_8G);
+ break;
+
+ case BFA_PORT_SPEED_4GBPS:
+ port_attr->supp_speed =
+ cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_4G);
+ break;
+
+ default:
+ bfa_sm_fault(port->fcs, pport_attr.speed_supported);
+ }
/*
* Current Speed
*/
- port_attr->curr_speed = cpu_to_be32(pport_attr.speed);
+ port_attr->curr_speed = cpu_to_be32(
+ bfa_fcs_fdmi_convert_speed(pport_attr.speed));
/*
* Max PDU Size.
@@ -2186,6 +2351,41 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
}
+/*
+ * Convert BFA speed to FDMI format.
+ */
+u32
+bfa_fcs_fdmi_convert_speed(bfa_port_speed_t pport_speed)
+{
+ u32 ret;
+
+ switch (pport_speed) {
+ case BFA_PORT_SPEED_1GBPS:
+ case BFA_PORT_SPEED_2GBPS:
+ ret = pport_speed;
+ break;
+
+ case BFA_PORT_SPEED_4GBPS:
+ ret = FDMI_TRANS_SPEED_4G;
+ break;
+
+ case BFA_PORT_SPEED_8GBPS:
+ ret = FDMI_TRANS_SPEED_8G;
+ break;
+
+ case BFA_PORT_SPEED_10GBPS:
+ ret = FDMI_TRANS_SPEED_10G;
+ break;
+
+ case BFA_PORT_SPEED_16GBPS:
+ ret = FDMI_TRANS_SPEED_16G;
+ break;
+
+ default:
+ ret = FDMI_TRANS_SPEED_UNKNOWN;
+ }
+ return ret;
+}
void
bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms)
@@ -2829,7 +3029,8 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_hton3b(FC_MGMT_SERVER),
bfa_fcs_lport_get_fcid(port), 0,
port->port_cfg.pwwn, port->port_cfg.nwwn,
- bfa_fcport_get_maxfrsize(port->fcs->bfa));
+ bfa_fcport_get_maxfrsize(port->fcs->bfa),
+ bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
@@ -3573,7 +3774,7 @@ bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_trc(port->fcs, port->pid);
-fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
+ fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
port->stats.ns_plogi_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
@@ -3586,7 +3787,8 @@ fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
bfa_hton3b(FC_NAME_SERVER),
bfa_fcs_lport_get_fcid(port), 0,
port->port_cfg.pwwn, port->port_cfg.nwwn,
- bfa_fcport_get_maxfrsize(port->fcs->bfa));
+ bfa_fcport_get_maxfrsize(port->fcs->bfa),
+ bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
@@ -4762,8 +4964,8 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
while (qe != qh) {
rport = (struct bfa_fcs_rport_s *) qe;
if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
- (bfa_fcs_rport_get_state(rport) ==
- BFA_RPORT_OFFLINE)) {
+ (bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE) ||
+ (rport->scsi_function != BFA_RPORT_TARGET)) {
qe = bfa_q_next(qe);
continue;
}
@@ -4776,17 +4978,15 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
bfa_fcport_get_ratelim_speed(port->fcs->bfa);
}
- if ((rport_speed == BFA_PORT_SPEED_8GBPS) ||
- (rport_speed > port_speed)) {
- max_speed = rport_speed;
- break;
- } else if (rport_speed > max_speed) {
+ if (rport_speed > max_speed)
max_speed = rport_speed;
- }
qe = bfa_q_next(qe);
}
+ if (max_speed > port_speed)
+ max_speed = port_speed;
+
bfa_trc(fcs, max_speed);
return max_speed;
}
@@ -4918,6 +5118,7 @@ enum bfa_fcs_vport_event {
BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/
BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
+ BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */
};
static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
@@ -4930,6 +5131,8 @@ static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
@@ -4940,6 +5143,10 @@ static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
+static void bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event);
static struct bfa_sm_table_s vport_sm_table[] = {
{BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
@@ -4947,6 +5154,7 @@ static struct bfa_sm_table_s vport_sm_table[] = {
{BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
{BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
{BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
+ {BFA_SM(bfa_fcs_vport_sm_fdisc_rsp_wait), BFA_FCS_VPORT_FDISC_RSP_WAIT},
{BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
{BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
{BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
@@ -5042,6 +5250,11 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
bfa_fcs_vport_do_fdisc(vport);
break;
+ case BFA_FCS_VPORT_SM_STOP:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+ bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
+ break;
+
case BFA_FCS_VPORT_SM_OFFLINE:
/*
* This can happen if the vport couldn't be initialzied
@@ -5070,9 +5283,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
switch (event) {
case BFA_FCS_VPORT_SM_DELETE:
- bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
- bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
- bfa_fcs_lport_delete(&vport->lport);
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_rsp_wait);
break;
case BFA_FCS_VPORT_SM_OFFLINE:
@@ -5140,6 +5351,41 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
}
/*
+ * FDISC is in progress and we got a vport delete request -
+ * this is a wait state while we wait for fdisc response and
+ * we will transition to the appropriate state - on rsp status.
+ */
+static void
+bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_RSP_OK:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
+ bfa_fcs_lport_delete(&vport->lport);
+ break;
+
+ case BFA_FCS_VPORT_SM_DELETE:
+ break;
+
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ case BFA_FCS_VPORT_SM_RSP_ERROR:
+ case BFA_FCS_VPORT_SM_RSP_FAILED:
+ case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+ bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+ bfa_fcs_lport_delete(&vport->lport);
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
* Vport is online (FDISC is complete).
*/
static void
@@ -5155,6 +5401,11 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
bfa_fcs_lport_delete(&vport->lport);
break;
+ case BFA_FCS_VPORT_SM_STOP:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_stopping);
+ bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
+ break;
+
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
@@ -5167,6 +5418,32 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
}
/*
+ * Vport is being stopped - awaiting lport stop completion to send
+ * LOGO to fabric.
+ */
+static void
+bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_STOPCOMP:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo_for_stop);
+ bfa_fcs_vport_do_logo(vport);
+ break;
+
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
* Vport is being deleted - awaiting lport delete completion to send
* LOGO to fabric.
*/
@@ -5236,6 +5513,10 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
bfa_fcs_vport_free(vport);
break;
+ case BFA_FCS_VPORT_SM_STOPCOMP:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+ break;
+
case BFA_FCS_VPORT_SM_DELETE:
break;
@@ -5245,6 +5526,34 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
}
/*
+ * LOGO is sent to fabric. Vport stop is in progress. Lport stop cleanup
+ * is done.
+ */
+static void
+bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
+ enum bfa_fcs_vport_event event)
+{
+ bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
+ bfa_trc(__vport_fcs(vport), event);
+
+ switch (event) {
+ case BFA_FCS_VPORT_SM_OFFLINE:
+ bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
+ /*
+ * !!! fall through !!!
+ */
+
+ case BFA_FCS_VPORT_SM_RSP_OK:
+ case BFA_FCS_VPORT_SM_RSP_ERROR:
+ bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
+ break;
+
+ default:
+ bfa_sm_fault(__vport_fcs(vport), event);
+ }
+}
+
+/*
* LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
* is done.
*/
@@ -5282,6 +5591,31 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
* fcs_vport_private FCS virtual port private functions
*/
/*
+ * Send AEN notification
+ */
+static void
+bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port,
+ enum bfa_lport_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
+ aen_entry->aen_data.lport.roles = port->port_cfg.roles;
+ aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(port->fcs));
+ aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_LPORT, event);
+}
+
+/*
* This routine will be called to send a FDISC command.
*/
static void
@@ -5308,8 +5642,11 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
- else
+ else {
+ bfa_fcs_vport_aen_post(&vport->lport,
+ BFA_LPORT_AEN_NPIV_DUP_WWN);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
+ }
break;
case FC_LS_RJT_EXP_INSUFF_RES:
@@ -5319,11 +5656,17 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
*/
if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
- else
+ else {
+ bfa_fcs_vport_aen_post(&vport->lport,
+ BFA_LPORT_AEN_NPIV_FABRIC_MAX);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
+ }
break;
default:
+ if (vport->fdisc_retries == 0)
+ bfa_fcs_vport_aen_post(&vport->lport,
+ BFA_LPORT_AEN_NPIV_UNKNOWN);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
}
}
@@ -5391,7 +5734,10 @@ void
bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
{
vport->vport_stats.fab_online++;
- bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+ if (bfa_fcs_fabric_npiv_capable(__vport_fabric(vport)))
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+ else
+ vport->vport_stats.fab_no_npiv++;
}
/*
@@ -5422,6 +5768,15 @@ bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
}
/*
+ * Stop completion callback from associated lport
+ */
+void
+bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport)
+{
+ bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOPCOMP);
+}
+
+/*
* Delete completion callback from associated lport
*/
void
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index caaee6f0693..52628d5d3c9 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -20,6 +20,7 @@
*/
#include "bfad_drv.h"
+#include "bfad_im.h"
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
@@ -262,6 +263,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_PLOGI_RCVD:
+ case RPSM_EVENT_PLOGI_COMP:
case RPSM_EVENT_SCN:
/*
* Ignore, SCN is possibly online notification.
@@ -470,6 +472,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_PRLO_RCVD:
+ case RPSM_EVENT_PLOGI_COMP:
break;
case RPSM_EVENT_LOGO_RCVD:
@@ -484,9 +487,9 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
break;
case RPSM_EVENT_PLOGI_RCVD:
- bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
+ rport->plogi_pending = BFA_TRUE;
+ bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
- bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
case RPSM_EVENT_DELETE:
@@ -891,6 +894,18 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
switch (event) {
case RPSM_EVENT_HCB_OFFLINE:
+ if (bfa_fcs_lport_is_online(rport->port) &&
+ (rport->plogi_pending)) {
+ rport->plogi_pending = BFA_FALSE;
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_plogiacc_sending);
+ bfa_fcs_rport_send_plogiacc(rport, NULL);
+ break;
+ }
+ /*
+ * !! fall through !!
+ */
+
case RPSM_EVENT_ADDRESS_CHANGE:
if (bfa_fcs_lport_is_online(rport->port)) {
if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
@@ -921,6 +936,8 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
case RPSM_EVENT_SCN:
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
+ case RPSM_EVENT_PLOGI_RCVD:
+ case RPSM_EVENT_LOGO_IMP:
/*
* Ignore, already offline.
*/
@@ -957,10 +974,18 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
*/
if (bfa_fcs_lport_is_online(rport->port) &&
(!BFA_FCS_PID_IS_WKA(rport->pid))) {
- bfa_sm_set_state(rport,
- bfa_fcs_rport_sm_nsdisc_sending);
- rport->ns_retries = 0;
- bfa_fcs_rport_send_nsdisc(rport, NULL);
+ if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_nsdisc_sending);
+ rport->ns_retries = 0;
+ bfa_fcs_rport_send_nsdisc(rport, NULL);
+ } else {
+ /* For N2N Direct Attach, try to re-login */
+ bfa_sm_set_state(rport,
+ bfa_fcs_rport_sm_plogi_sending);
+ rport->plogi_retries = 0;
+ bfa_fcs_rport_send_plogi(rport, NULL);
+ }
} else {
/*
* if it is not a well known address, reset the
@@ -1356,7 +1381,8 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
bfa_fcs_lport_get_fcid(port), 0,
port->port_cfg.pwwn, port->port_cfg.nwwn,
- bfa_fcport_get_maxfrsize(port->fcs->bfa));
+ bfa_fcport_get_maxfrsize(port->fcs->bfa),
+ bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
@@ -1476,7 +1502,8 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
rport->pid, bfa_fcs_lport_get_fcid(port),
rport->reply_oxid, port->port_cfg.pwwn,
port->port_cfg.nwwn,
- bfa_fcport_get_maxfrsize(port->fcs->bfa));
+ bfa_fcport_get_maxfrsize(port->fcs->bfa),
+ bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
@@ -2015,6 +2042,35 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
}
static void
+bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
+ enum bfa_rport_aen_event event,
+ struct bfa_rport_aen_data_s *data)
+{
+ struct bfa_fcs_lport_s *port = rport->port;
+ struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ if (event == BFA_RPORT_AEN_QOS_PRIO)
+ aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+ else if (event == BFA_RPORT_AEN_QOS_FLOWID)
+ aen_entry->aen_data.rport.priv.qos = data->priv.qos;
+
+ aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id;
+ aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn(
+ bfa_fcs_get_base_port(rport->fcs));
+ aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
+ aen_entry->aen_data.rport.rpwwn = rport->pwwn;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
+ BFA_AEN_CAT_RPORT, event);
+}
+
+static void
bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
@@ -2024,6 +2080,11 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
rport->stats.onlines++;
+ if ((!rport->pid) || (!rport->pwwn)) {
+ bfa_trc(rport->fcs, rport->pid);
+ bfa_sm_fault(rport->fcs, rport->pid);
+ }
+
if (bfa_fcs_lport_is_initiator(port)) {
bfa_fcs_itnim_rport_online(rport->itnim);
if (!BFA_FCS_PID_IS_WKA(rport->pid))
@@ -2032,10 +2093,12 @@ bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
- if (!BFA_FCS_PID_IS_WKA(rport->pid))
+ if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Remote port (WWN = %s) online for logical port (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
+ bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
+ }
}
static void
@@ -2047,20 +2110,26 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
char rpwwn_buf[BFA_STRING_32];
rport->stats.offlines++;
+ rport->plogi_pending = BFA_FALSE;
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
- if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE)
+ if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Remote port (WWN = %s) connectivity lost for "
"logical port (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
- else
+ bfa_fcs_rport_aen_post(rport,
+ BFA_RPORT_AEN_DISCONNECT, NULL);
+ } else {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Remote port (WWN = %s) offlined by "
"logical port (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
+ bfa_fcs_rport_aen_post(rport,
+ BFA_RPORT_AEN_OFFLINE, NULL);
+ }
}
if (bfa_fcs_lport_is_initiator(port)) {
@@ -2120,7 +2189,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
- port->fabric->bb_credit);
+ port->fabric->bb_credit, 0);
}
}
@@ -2233,22 +2302,6 @@ bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
}
-static int
-wwn_compare(wwn_t wwn1, wwn_t wwn2)
-{
- u8 *b1 = (u8 *) &wwn1;
- u8 *b2 = (u8 *) &wwn2;
- int i;
-
- for (i = 0; i < sizeof(wwn_t); i++) {
- if (b1[i] < b2[i])
- return -1;
- if (b1[i] > b2[i])
- return 1;
- }
- return 0;
-}
-
/*
* Called by bport/vport to handle PLOGI received from an existing
* remote port.
@@ -2266,19 +2319,8 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
rport->reply_oxid = rx_fchs->ox_id;
bfa_trc(rport->fcs, rport->reply_oxid);
- /*
- * In Switched fabric topology,
- * PLOGI to each other. If our pwwn is smaller, ignore it,
- * if it is not a well known address.
- * If the link topology is N2N,
- * this Plogi should be accepted.
- */
- if ((wwn_compare(rport->port->port_cfg.pwwn, rport->pwwn) == -1) &&
- (bfa_fcs_fabric_is_switched(rport->port->fabric)) &&
- (!BFA_FCS_PID_IS_WKA(rport->pid))) {
- bfa_trc(rport->fcs, rport->pid);
- return;
- }
+ rport->pid = rx_fchs->s_id;
+ bfa_trc(rport->fcs, rport->pid);
rport->stats.plogi_rcvd++;
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
@@ -2361,8 +2403,11 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg,
struct bfa_rport_qos_attr_s new_qos_attr)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+ struct bfa_rport_aen_data_s aen_data;
bfa_trc(rport->fcs, rport->pwwn);
+ aen_data.priv.qos = new_qos_attr;
+ bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
}
/*
@@ -2385,8 +2430,11 @@ bfa_cb_rport_qos_scn_prio(void *cbarg,
struct bfa_rport_qos_attr_s new_qos_attr)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
+ struct bfa_rport_aen_data_s aen_data;
bfa_trc(rport->fcs, rport->pwwn);
+ aen_data.priv.qos = new_qos_attr;
+ bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
}
/*
@@ -2531,7 +2579,45 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
}
-
+void
+bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
+ struct bfa_rport_attr_s *rport_attr)
+{
+ struct bfa_rport_qos_attr_s qos_attr;
+ struct bfa_fcs_lport_s *port = rport->port;
+ bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
+
+ memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
+ memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s));
+
+ rport_attr->pid = rport->pid;
+ rport_attr->pwwn = rport->pwwn;
+ rport_attr->nwwn = rport->nwwn;
+ rport_attr->cos_supported = rport->fc_cos;
+ rport_attr->df_sz = rport->maxfrsize;
+ rport_attr->state = bfa_fcs_rport_get_state(rport);
+ rport_attr->fc_cos = rport->fc_cos;
+ rport_attr->cisc = rport->cisc;
+ rport_attr->scsi_function = rport->scsi_function;
+ rport_attr->curr_speed = rport->rpf.rpsc_speed;
+ rport_attr->assigned_speed = rport->rpf.assigned_speed;
+
+ qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
+ qos_attr.qos_flow_id =
+ cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
+ rport_attr->qos_attr = qos_attr;
+
+ rport_attr->trl_enforced = BFA_FALSE;
+ if (bfa_fcport_is_ratelim(port->fcs->bfa) &&
+ (rport->scsi_function == BFA_RPORT_TARGET)) {
+ if (rport_speed == BFA_PORT_SPEED_UNKNOWN)
+ rport_speed =
+ bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
+
+ if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
+ rport_attr->trl_enforced = BFA_TRUE;
+ }
+}
/*
* Remote port implementation.
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index 977e681ec80..ea24d4c6e67 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -17,14 +17,14 @@
#include "bfad_drv.h"
#include "bfa_modules.h"
-#include "bfi_cbreg.h"
+#include "bfi_reg.h"
void
bfa_hwcb_reginit(struct bfa_s *bfa)
{
struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
- int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
+ int fn = bfa_ioc_pcifn(&bfa->ioc);
if (fn == 0) {
bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@@ -33,29 +33,6 @@ bfa_hwcb_reginit(struct bfa_s *bfa)
bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
}
-
- for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
- /*
- * CPE registers
- */
- q = CPE_Q_NUM(fn, i);
- bfa_regs->cpe_q_pi[i] = (kva + CPE_Q_PI(q));
- bfa_regs->cpe_q_ci[i] = (kva + CPE_Q_CI(q));
- bfa_regs->cpe_q_depth[i] = (kva + CPE_Q_DEPTH(q));
-
- /*
- * RME registers
- */
- q = CPE_Q_NUM(fn, i);
- bfa_regs->rme_q_pi[i] = (kva + RME_Q_PI(q));
- bfa_regs->rme_q_ci[i] = (kva + RME_Q_CI(q));
- bfa_regs->rme_q_depth[i] = (kva + RME_Q_DEPTH(q));
- }
-}
-
-void
-bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
-{
}
static void
@@ -65,16 +42,36 @@ bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
bfa->iocfc.bfa_regs.intr_status);
}
-void
-bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
+/*
+ * Actions to respond RME Interrupt for Crossbow ASIC:
+ * - Write 1 to Interrupt Status register
+ * INTX - done in bfa_intx()
+ * MSIX - done in bfa_hwcb_rspq_ack_msix()
+ * - Update CI (only if new CI)
+ */
+static void
+bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci)
{
+ writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
+ bfa->iocfc.bfa_regs.intr_status);
+
+ if (bfa_rspq_ci(bfa, rspq) == ci)
+ return;
+
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
}
-static void
-bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
+void
+bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
{
- writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
- bfa->iocfc.bfa_regs.intr_status);
+ if (bfa_rspq_ci(bfa, rspq) == ci)
+ return;
+
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
}
void
@@ -104,43 +101,71 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
}
/*
+ * Dummy interrupt handler for handling spurious interrupts.
+ */
+static void
+bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec)
+{
+}
+
+/*
* No special setup required for crossbow -- vector assignments are implicit.
*/
void
bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs)
{
- int i;
-
WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS));
bfa->msix.nvecs = nvecs;
- if (nvecs == 1) {
- for (i = 0; i < BFA_MSIX_CB_MAX; i++)
+ bfa_hwcb_msix_uninstall(bfa);
+}
+
+void
+bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa)
+{
+ int i;
+
+ if (bfa->msix.nvecs == 0)
+ return;
+
+ if (bfa->msix.nvecs == 1) {
+ for (i = BFI_MSIX_CPE_QMIN_CB; i < BFI_MSIX_CB_MAX; i++)
bfa->msix.handler[i] = bfa_msix_all;
return;
}
- for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q7; i++)
- bfa->msix.handler[i] = bfa_msix_reqq;
-
- for (i = BFA_MSIX_RME_Q0; i <= BFA_MSIX_RME_Q7; i++)
- bfa->msix.handler[i] = bfa_msix_rspq;
-
- for (; i < BFA_MSIX_CB_MAX; i++)
+ for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++)
bfa->msix.handler[i] = bfa_msix_lpu_err;
}
-/*
- * Crossbow -- dummy, interrupts are masked
- */
void
-bfa_hwcb_msix_install(struct bfa_s *bfa)
+bfa_hwcb_msix_queue_install(struct bfa_s *bfa)
{
+ int i;
+
+ if (bfa->msix.nvecs == 0)
+ return;
+
+ if (bfa->msix.nvecs == 1) {
+ for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
+ bfa->msix.handler[i] = bfa_msix_all;
+ return;
+ }
+
+ for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++)
+ bfa->msix.handler[i] = bfa_msix_reqq;
+
+ for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++)
+ bfa->msix.handler[i] = bfa_msix_rspq;
}
void
bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
{
+ int i;
+
+ for (i = 0; i < BFI_MSIX_CB_MAX; i++)
+ bfa->msix.handler[i] = bfa_hwcb_msix_dummy;
}
/*
@@ -149,13 +174,18 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa)
void
bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
{
- bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
- bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
+ if (msix) {
+ bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
+ bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
+ } else {
+ bfa->iocfc.hwif.hw_reqq_ack = NULL;
+ bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
+ }
}
void
bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
{
- *start = BFA_MSIX_RME_Q0;
- *end = BFA_MSIX_RME_Q7;
+ *start = BFI_MSIX_RME_QMIN_CB;
+ *end = BFI_MSIX_RME_QMAX_CB;
}
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 21018d98a07..637527f48b4 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -17,29 +17,10 @@
#include "bfad_drv.h"
#include "bfa_modules.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
BFA_TRC_FILE(HAL, IOCFC_CT);
-static u32 __ct_msix_err_vec_reg[] = {
- HOST_MSIX_ERR_INDEX_FN0,
- HOST_MSIX_ERR_INDEX_FN1,
- HOST_MSIX_ERR_INDEX_FN2,
- HOST_MSIX_ERR_INDEX_FN3,
-};
-
-static void
-bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
-{
- int fn = bfa_ioc_pcifn(&bfa->ioc);
- void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
-
- if (msix)
- writel(vec, kva + __ct_msix_err_vec_reg[fn]);
- else
- writel(0, kva + __ct_msix_err_vec_reg[fn]);
-}
-
/*
* Dummy interrupt handler for handling spurious interrupt during chip-reinit.
*/
@@ -53,7 +34,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
{
struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
- int i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
+ int fn = bfa_ioc_pcifn(&bfa->ioc);
if (fn == 0) {
bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@@ -62,26 +43,16 @@ bfa_hwct_reginit(struct bfa_s *bfa)
bfa_regs->intr_status = (kva + HOSTFN1_INT_STATUS);
bfa_regs->intr_mask = (kva + HOSTFN1_INT_MSK);
}
+}
- for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
- /*
- * CPE registers
- */
- q = CPE_Q_NUM(fn, i);
- bfa_regs->cpe_q_pi[i] = (kva + CPE_PI_PTR_Q(q << 5));
- bfa_regs->cpe_q_ci[i] = (kva + CPE_CI_PTR_Q(q << 5));
- bfa_regs->cpe_q_depth[i] = (kva + CPE_DEPTH_Q(q << 5));
- bfa_regs->cpe_q_ctrl[i] = (kva + CPE_QCTRL_Q(q << 5));
-
- /*
- * RME registers
- */
- q = CPE_Q_NUM(fn, i);
- bfa_regs->rme_q_pi[i] = (kva + RME_PI_PTR_Q(q << 5));
- bfa_regs->rme_q_ci[i] = (kva + RME_CI_PTR_Q(q << 5));
- bfa_regs->rme_q_depth[i] = (kva + RME_DEPTH_Q(q << 5));
- bfa_regs->rme_q_ctrl[i] = (kva + RME_QCTRL_Q(q << 5));
- }
+void
+bfa_hwct2_reginit(struct bfa_s *bfa)
+{
+ struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs;
+ void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
+
+ bfa_regs->intr_status = (kva + CT2_HOSTFN_INT_STATUS);
+ bfa_regs->intr_mask = (kva + CT2_HOSTFN_INTR_MASK);
}
void
@@ -93,22 +64,45 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
}
+/*
+ * Actions to respond RME Interrupt for Catapult ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Acknowledge by writing to RME Queue Control register
+ * - Update CI
+ */
void
-bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
+bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
{
u32 r32;
r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
+}
+
+/*
+ * Actions to respond RME Interrupt for Catapult2 ASIC:
+ * - Write 1 to Interrupt Status register (INTx only - done in bfa_intx())
+ * - Update CI
+ */
+void
+bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci)
+{
+ bfa_rspq_ci(bfa, rspq) = ci;
+ writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]);
+ mmiowb();
}
void
bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
u32 *num_vecs, u32 *max_vec_bit)
{
- *msix_vecs_bmap = (1 << BFA_MSIX_CT_MAX) - 1;
- *max_vec_bit = (1 << (BFA_MSIX_CT_MAX - 1));
- *num_vecs = BFA_MSIX_CT_MAX;
+ *msix_vecs_bmap = (1 << BFI_MSIX_CT_MAX) - 1;
+ *max_vec_bit = (1 << (BFI_MSIX_CT_MAX - 1));
+ *num_vecs = BFI_MSIX_CT_MAX;
}
/*
@@ -117,7 +111,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap,
void
bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
{
- WARN_ON((nvecs != 1) && (nvecs != BFA_MSIX_CT_MAX));
+ WARN_ON((nvecs != 1) && (nvecs != BFI_MSIX_CT_MAX));
bfa_trc(bfa, nvecs);
bfa->msix.nvecs = nvecs;
@@ -125,7 +119,19 @@ bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs)
}
void
-bfa_hwct_msix_install(struct bfa_s *bfa)
+bfa_hwct_msix_ctrl_install(struct bfa_s *bfa)
+{
+ if (bfa->msix.nvecs == 0)
+ return;
+
+ if (bfa->msix.nvecs == 1)
+ bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all;
+ else
+ bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err;
+}
+
+void
+bfa_hwct_msix_queue_install(struct bfa_s *bfa)
{
int i;
@@ -133,19 +139,16 @@ bfa_hwct_msix_install(struct bfa_s *bfa)
return;
if (bfa->msix.nvecs == 1) {
- for (i = 0; i < BFA_MSIX_CT_MAX; i++)
+ for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++)
bfa->msix.handler[i] = bfa_msix_all;
return;
}
- for (i = BFA_MSIX_CPE_Q0; i <= BFA_MSIX_CPE_Q3; i++)
+ for (i = BFI_MSIX_CPE_QMIN_CT; i <= BFI_MSIX_CPE_QMAX_CT; i++)
bfa->msix.handler[i] = bfa_msix_reqq;
- for (; i <= BFA_MSIX_RME_Q3; i++)
+ for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++)
bfa->msix.handler[i] = bfa_msix_rspq;
-
- WARN_ON(i != BFA_MSIX_LPU_ERR);
- bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err;
}
void
@@ -153,7 +156,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa)
{
int i;
- for (i = 0; i < BFA_MSIX_CT_MAX; i++)
+ for (i = 0; i < BFI_MSIX_CT_MAX; i++)
bfa->msix.handler[i] = bfa_hwct_msix_dummy;
}
@@ -164,13 +167,12 @@ void
bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
{
bfa_trc(bfa, 0);
- bfa_hwct_msix_lpu_err_set(bfa, msix, BFA_MSIX_LPU_ERR);
bfa_ioc_isr_mode_set(&bfa->ioc, msix);
}
void
bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end)
{
- *start = BFA_MSIX_RME_Q0;
- *end = BFA_MSIX_RME_Q3;
+ *start = BFI_MSIX_RME_QMIN_CT;
+ *end = BFI_MSIX_RME_QMAX_CT;
}
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 6c7e0339dda..1ac5aecf25a 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -16,8 +16,9 @@
*/
#include "bfad_drv.h"
+#include "bfad_im.h"
#include "bfa_ioc.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
#include "bfa_defs.h"
#include "bfa_defs_svc.h"
@@ -29,8 +30,8 @@ BFA_TRC_FILE(CNA, IOC);
#define BFA_IOC_TOV 3000 /* msecs */
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
-#define BFA_IOC_HWINIT_MAX 5
#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
+#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
#define bfa_ioc_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
@@ -79,14 +80,17 @@ bfa_boolean_t bfa_auto_recover = BFA_TRUE;
static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
static void bfa_ioc_timeout(void *ioc);
+static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
-static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
+static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
+ enum bfa_ioc_event_e event);
static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
@@ -105,11 +109,12 @@ enum ioc_event {
IOC_E_ENABLED = 5, /* f/w enabled */
IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
IOC_E_DISABLED = 7, /* f/w disabled */
- IOC_E_INITFAILED = 8, /* failure notice by iocpf sm */
- IOC_E_PFFAILED = 9, /* failure notice by iocpf sm */
- IOC_E_HBFAIL = 10, /* heartbeat failure */
- IOC_E_HWERROR = 11, /* hardware error interrupt */
- IOC_E_TIMEOUT = 12, /* timeout */
+ IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
+ IOC_E_HBFAIL = 9, /* heartbeat failure */
+ IOC_E_HWERROR = 10, /* hardware error interrupt */
+ IOC_E_TIMEOUT = 11, /* timeout */
+ IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
+ IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */
};
bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
@@ -121,6 +126,8 @@ bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
+bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
@@ -132,6 +139,8 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
+ {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
+ {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
};
/*
@@ -143,9 +152,9 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
-#define bfa_iocpf_recovery_timer_start(__ioc) \
+#define bfa_iocpf_poll_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
- bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
+ bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
#define bfa_sem_timer_start(__ioc) \
bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
@@ -157,6 +166,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = {
*/
static void bfa_iocpf_timeout(void *ioc_arg);
static void bfa_iocpf_sem_timeout(void *ioc_arg);
+static void bfa_iocpf_poll_timeout(void *ioc_arg);
/*
* IOCPF state machine events
@@ -173,6 +183,7 @@ enum iocpf_event {
IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
+ IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
};
/*
@@ -314,11 +325,16 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
/* !!! fall through !!! */
case IOC_E_HWERROR:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
break;
+ case IOC_E_HWFAILED:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+ break;
+
case IOC_E_DISABLE:
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
@@ -356,17 +372,23 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
case IOC_E_FWRSP_GETATTR:
bfa_ioc_timer_stop(ioc);
bfa_ioc_check_attr_wwns(ioc);
+ bfa_ioc_hb_monitor(ioc);
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
+ case IOC_E_FWRSP_ACQ_ADDR:
+ bfa_ioc_timer_stop(ioc);
+ bfa_ioc_hb_monitor(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
break;
+
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_timer_stop(ioc);
/* !!! fall through !!! */
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
break;
@@ -384,6 +406,50 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
}
}
+/*
+ * Acquiring address from fabric (entry function)
+ */
+static void
+bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
+{
+}
+
+/*
+ * Acquiring address from the fabric
+ */
+static void
+bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_FWRSP_GETATTR:
+ bfa_ioc_check_attr_wwns(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
+ break;
+
+ case IOC_E_PFFAILED:
+ case IOC_E_HWERROR:
+ bfa_hb_timer_stop(ioc);
+ case IOC_E_HBFAIL:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ if (event != IOC_E_PFFAILED)
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
+ break;
+
+ case IOC_E_DISABLE:
+ bfa_hb_timer_stop(ioc);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
+ break;
+
+ case IOC_E_ENABLE:
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
static void
bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
@@ -391,8 +457,9 @@ bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
- bfa_ioc_hb_monitor(ioc);
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
}
static void
@@ -414,13 +481,13 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
bfa_hb_timer_stop(ioc);
/* !!! fall through !!! */
case IOC_E_HBFAIL:
- bfa_ioc_fail_notify(ioc);
-
if (ioc->iocpf.auto_recover)
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
else
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ bfa_ioc_fail_notify(ioc);
+
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
break;
@@ -437,6 +504,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
}
/*
@@ -461,6 +529,11 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
break;
+ case IOC_E_HWFAILED:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
+ bfa_ioc_disable_comp(ioc);
+ break;
+
default:
bfa_sm_fault(ioc, event);
}
@@ -525,12 +598,14 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
* Initialization retry failed.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
if (event != IOC_E_PFFAILED)
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
break;
- case IOC_E_INITFAILED:
- bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
+ case IOC_E_HWFAILED:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
break;
case IOC_E_ENABLE:
@@ -590,6 +665,35 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
}
}
+static void
+bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
+{
+ bfa_trc(ioc, 0);
+}
+
+static void
+bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
+{
+ bfa_trc(ioc, event);
+
+ switch (event) {
+ case IOC_E_ENABLE:
+ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
+ break;
+
+ case IOC_E_DISABLE:
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ break;
+
+ case IOC_E_DETACH:
+ bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
+ break;
+
+ default:
+ bfa_sm_fault(ioc, event);
+ }
+}
+
/*
* IOCPF State Machine
*/
@@ -600,7 +704,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
{
- iocpf->retry_count = 0;
+ iocpf->fw_mismatch_notified = BFA_FALSE;
iocpf->auto_recover = bfa_auto_recover;
}
@@ -633,6 +737,28 @@ bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
{
+ struct bfi_ioc_image_hdr_s fwhdr;
+ u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
+
+ /* h/w sem init */
+ if (fwstate == BFI_IOC_UNINIT)
+ goto sem_get;
+
+ bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
+
+ if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+ goto sem_get;
+
+ bfa_trc(iocpf->ioc, fwstate);
+ bfa_trc(iocpf->ioc, fwhdr.exec);
+ writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
+
+ /*
+ * Try to lock and then unlock the semaphore.
+ */
+ readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
+ writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
+sem_get:
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -650,7 +776,6 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
if (bfa_ioc_sync_start(ioc)) {
- iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
} else {
@@ -664,6 +789,11 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
}
break;
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+ break;
+
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
@@ -689,10 +819,10 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
/*
* Call only the first time sm enters fwmismatch state.
*/
- if (iocpf->retry_count == 0)
+ if (iocpf->fw_mismatch_notified == BFA_FALSE)
bfa_ioc_pf_fwmismatch(iocpf->ioc);
- iocpf->retry_count++;
+ iocpf->fw_mismatch_notified = BFA_TRUE;
bfa_iocpf_timer_start(iocpf->ioc);
}
@@ -757,6 +887,11 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
}
break;
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+ break;
+
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -770,7 +905,7 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
{
- bfa_iocpf_timer_start(iocpf->ioc);
+ iocpf->poll_time = 0;
bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
}
@@ -787,20 +922,12 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWREADY:
- bfa_iocpf_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
break;
- case IOCPF_E_INITFAIL:
- bfa_iocpf_timer_stop(ioc);
- /*
- * !!! fall through !!!
- */
-
case IOCPF_E_TIMEOUT:
writel(1, ioc->ioc_regs.ioc_sem_reg);
- if (event == IOCPF_E_TIMEOUT)
- bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
+ bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
break;
@@ -820,6 +947,10 @@ static void
bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
{
bfa_iocpf_timer_start(iocpf->ioc);
+ /*
+ * Enable Interrupts before sending fw IOC ENABLE cmd.
+ */
+ iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
bfa_ioc_send_enable(iocpf->ioc);
}
@@ -860,10 +991,6 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
break;
- case IOCPF_E_FWREADY:
- bfa_ioc_send_enable(ioc);
- break;
-
default:
bfa_sm_fault(ioc, event);
}
@@ -895,16 +1022,6 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
break;
- case IOCPF_E_FWREADY:
- if (bfa_ioc_is_operational(ioc)) {
- bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
- bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
- } else {
- bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
- bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
- }
- break;
-
default:
bfa_sm_fault(ioc, event);
}
@@ -929,7 +1046,6 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_FWRSP_DISABLE:
- case IOCPF_E_FWREADY:
bfa_iocpf_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
break;
@@ -976,6 +1092,11 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
break;
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+ break;
+
case IOCPF_E_FAIL:
break;
@@ -990,6 +1111,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
{
+ bfa_ioc_mbox_flush(iocpf->ioc);
bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
}
@@ -1002,7 +1124,6 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_ENABLE:
- iocpf->retry_count = 0;
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
break;
@@ -1019,6 +1140,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
{
+ bfa_ioc_debug_save_ftrc(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -1035,20 +1157,15 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
bfa_ioc_notify_fail(ioc);
- bfa_ioc_sync_ack(ioc);
- iocpf->retry_count++;
- if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
- bfa_ioc_sync_leave(ioc);
- writel(1, ioc->ioc_regs.ioc_sem_reg);
- bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
- } else {
- if (bfa_ioc_sync_complete(ioc))
- bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
- else {
- writel(1, ioc->ioc_regs.ioc_sem_reg);
- bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
- }
- }
+ bfa_ioc_sync_leave(ioc);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
+ break;
+
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
break;
case IOCPF_E_DISABLE:
@@ -1073,7 +1190,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
{
- bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
+ bfa_trc(iocpf->ioc, 0);
}
/*
@@ -1112,7 +1229,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
/*
* Flush any queued up mailbox requests.
*/
- bfa_ioc_mbox_hbfail(iocpf->ioc);
+ bfa_ioc_mbox_flush(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -1126,11 +1243,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
- iocpf->retry_count = 0;
bfa_ioc_sync_ack(ioc);
bfa_ioc_notify_fail(ioc);
if (!iocpf->auto_recover) {
bfa_ioc_sync_leave(ioc);
+ writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
writel(1, ioc->ioc_regs.ioc_sem_reg);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
} else {
@@ -1143,6 +1260,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
}
break;
+ case IOCPF_E_SEM_ERROR:
+ bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
+ bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
+ break;
+
case IOCPF_E_DISABLE:
bfa_sem_timer_stop(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
@@ -1159,6 +1281,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
{
+ bfa_trc(iocpf->ioc, 0);
}
/*
@@ -1185,23 +1308,28 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
* BFA IOC private functions
*/
+/*
+ * Notify common modules registered for notification.
+ */
static void
-bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
+bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
{
- struct list_head *qe;
- struct bfa_ioc_hbfail_notify_s *notify;
+ struct bfa_ioc_notify_s *notify;
+ struct list_head *qe;
- ioc->cbfn->disable_cbfn(ioc->bfa);
-
- /*
- * Notify common modules registered for notification.
- */
- list_for_each(qe, &ioc->hb_notify_q) {
- notify = (struct bfa_ioc_hbfail_notify_s *) qe;
- notify->cbfn(notify->cbarg);
+ list_for_each(qe, &ioc->notify_q) {
+ notify = (struct bfa_ioc_notify_s *)qe;
+ notify->cbfn(notify->cbarg, event);
}
}
+static void
+bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
+{
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
+}
+
bfa_boolean_t
bfa_ioc_sem_get(void __iomem *sem_reg)
{
@@ -1211,16 +1339,15 @@ bfa_ioc_sem_get(void __iomem *sem_reg)
r32 = readl(sem_reg);
- while (r32 && (cnt < BFA_SEM_SPINCNT)) {
+ while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
cnt++;
udelay(2);
r32 = readl(sem_reg);
}
- if (r32 == 0)
+ if (!(r32 & 1))
return BFA_TRUE;
- WARN_ON(cnt >= BFA_SEM_SPINCNT);
return BFA_FALSE;
}
@@ -1234,7 +1361,12 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
* will return 1. Semaphore is released by writing 1 to the register
*/
r32 = readl(ioc->ioc_regs.ioc_sem_reg);
- if (r32 == 0) {
+ if (r32 == ~0) {
+ WARN_ON(r32 == ~0);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
+ return;
+ }
+ if (!(r32 & 1)) {
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
return;
}
@@ -1343,7 +1475,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
int i;
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
- bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
@@ -1369,7 +1501,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
bfa_ioc_fwver_get(ioc, &fwhdr);
drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
- bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
+ bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
if (fwhdr.signature != drv_fwhdr->signature) {
bfa_trc(ioc, fwhdr.signature);
@@ -1377,8 +1509,8 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
return BFA_FALSE;
}
- if (swab32(fwhdr.param) != boot_env) {
- bfa_trc(ioc, fwhdr.param);
+ if (swab32(fwhdr.bootenv) != boot_env) {
+ bfa_trc(ioc, fwhdr.bootenv);
bfa_trc(ioc, boot_env);
return BFA_FALSE;
}
@@ -1414,8 +1546,8 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
bfa_trc(ioc, ioc_fwstate);
- boot_type = BFI_BOOT_TYPE_NORMAL;
- boot_env = BFI_BOOT_LOADER_OS;
+ boot_type = BFI_FWBOOT_TYPE_NORMAL;
+ boot_env = BFI_FWBOOT_ENV_OS;
/*
* check if firmware is valid
@@ -1425,6 +1557,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
if (!fwvalid) {
bfa_ioc_boot(ioc, boot_type, boot_env);
+ bfa_ioc_poll_fwinit(ioc);
return;
}
@@ -1433,7 +1566,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
* just wait for an initialization completion interrupt.
*/
if (ioc_fwstate == BFI_IOC_INITING) {
- ioc->cbfn->reset_cbfn(ioc->bfa);
+ bfa_ioc_poll_fwinit(ioc);
return;
}
@@ -1452,7 +1585,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
* be flushed. Otherwise MSI-X interrupts are not delivered.
*/
bfa_ioc_msgflush(ioc);
- ioc->cbfn->reset_cbfn(ioc->bfa);
bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
return;
}
@@ -1461,6 +1593,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
* Initialize the h/w for any other states.
*/
bfa_ioc_boot(ioc, boot_type, boot_env);
+ bfa_ioc_poll_fwinit(ioc);
}
static void
@@ -1508,7 +1641,7 @@ bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
- enable_req.ioc_class = ioc->ioc_mc;
+ enable_req.clscode = cpu_to_be16(ioc->clscode);
do_gettimeofday(&tv);
enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
@@ -1572,25 +1705,26 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
u32 loff = 0;
u32 chunkno = 0;
u32 i;
+ u32 asicmode;
/*
* Initialize LMEM first before code download
*/
bfa_ioc_lmem_init(ioc);
- bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
- fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
+ bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
pgoff = PSS_SMEM_PGOFF(loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
- for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
+ for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
- fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
+ fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
}
@@ -1616,11 +1750,15 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
ioc->ioc_regs.host_page_num_fn);
/*
- * Set boot type and boot param at the end.
- */
- bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
+ * Set boot type and device mode at the end.
+ */
+ asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
+ ioc->port0_mode, ioc->port1_mode);
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
+ swab32(asicmode));
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
swab32(boot_type));
- bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
+ bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
swab32(boot_env));
}
@@ -1636,6 +1774,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
attr->card_type = be32_to_cpu(attr->card_type);
attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
+ ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
@@ -1690,7 +1829,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
* Cleanup any pending requests.
*/
static void
-bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
+bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
{
struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd_s *cmd;
@@ -1752,6 +1891,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
/*
* release semaphore.
*/
+ readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
bfa_trc(ioc, pgnum);
@@ -1808,6 +1948,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
/*
* release semaphore.
*/
+ readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
bfa_trc(ioc, pgnum);
return BFA_STATUS_OK;
@@ -1816,23 +1957,19 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
static void
bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
{
- struct list_head *qe;
- struct bfa_ioc_hbfail_notify_s *notify;
struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
/*
* Notify driver and common modules registered for notification.
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
- list_for_each(qe, &ioc->hb_notify_q) {
- notify = (struct bfa_ioc_hbfail_notify_s *) qe;
- notify->cbfn(notify->cbarg);
- }
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
bfa_ioc_debug_save_ftrc(ioc);
BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
"Heart Beat of IOC has failed\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
}
@@ -1847,6 +1984,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
"Running firmware version is incompatible "
"with the driver version\n");
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
}
bfa_status_t
@@ -1864,6 +2002,7 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
/*
* release semaphore.
*/
+ readl(ioc->ioc_regs.ioc_init_sem_reg);
writel(1, ioc->ioc_regs.ioc_init_sem_reg);
return BFA_STATUS_OK;
@@ -1876,8 +2015,6 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
void
bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
{
- void __iomem *rb;
-
bfa_ioc_stats(ioc, ioc_boots);
if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
@@ -1886,22 +2023,16 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
/*
* Initialize IOC state of all functions on a chip reset.
*/
- rb = ioc->pcidev.pci_bar_kva;
- if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
- writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
- writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
+ if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
+ writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
} else {
- writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
- writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
+ writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
}
bfa_ioc_msgflush(ioc);
bfa_ioc_download_fw(ioc, boot_type, boot_env);
-
- /*
- * Enable interrupts just before starting LPU
- */
- ioc->cbfn->reset_cbfn(ioc->bfa);
bfa_ioc_lpu_start(ioc);
}
@@ -1932,13 +2063,17 @@ bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
(r32 != BFI_IOC_MEMTEST));
}
-void
+bfa_boolean_t
bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
{
__be32 *msgp = mbmsg;
u32 r32;
int i;
+ r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
+ if ((r32 & 1) == 0)
+ return BFA_FALSE;
+
/*
* read the MBOX msg
*/
@@ -1954,6 +2089,8 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
*/
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
readl(ioc->ioc_regs.lpu_mbox_cmd);
+
+ return BFA_TRUE;
}
void
@@ -1970,11 +2107,10 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
case BFI_IOC_I2H_HBEAT:
break;
- case BFI_IOC_I2H_READY_EVENT:
- bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
- break;
-
case BFI_IOC_I2H_ENABLE_REPLY:
+ ioc->port_mode = ioc->port_mode_cfg =
+ (enum bfa_mode_s)msg->fw_event.port_mode;
+ ioc->ad_cap_bm = msg->fw_event.cap_bm;
bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
break;
@@ -1986,6 +2122,10 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
bfa_ioc_getattr_reply(ioc);
break;
+ case BFI_IOC_I2H_ACQ_ADDR_REPLY:
+ bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
+ break;
+
default:
bfa_trc(ioc, msg->mh.msg_id);
WARN_ON(1);
@@ -2011,7 +2151,7 @@ bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
ioc->iocpf.ioc = ioc;
bfa_ioc_mbox_attach(ioc);
- INIT_LIST_HEAD(&ioc->hb_notify_q);
+ INIT_LIST_HEAD(&ioc->notify_q);
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(ioc, IOC_E_RESET);
@@ -2024,6 +2164,7 @@ void
bfa_ioc_detach(struct bfa_ioc_s *ioc)
{
bfa_fsm_send_event(ioc, IOC_E_DETACH);
+ INIT_LIST_HEAD(&ioc->notify_q);
}
/*
@@ -2033,20 +2174,80 @@ bfa_ioc_detach(struct bfa_ioc_s *ioc)
*/
void
bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
- enum bfi_mclass mc)
+ enum bfi_pcifn_class clscode)
{
- ioc->ioc_mc = mc;
+ ioc->clscode = clscode;
ioc->pcidev = *pcidev;
- ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
- ioc->cna = ioc->ctdev && !ioc->fcmode;
+
+ /*
+ * Initialize IOC and device personality
+ */
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
+ ioc->asic_mode = BFI_ASIC_MODE_FC;
+
+ switch (pcidev->device_id) {
+ case BFA_PCI_DEVICE_ID_FC_8G1P:
+ case BFA_PCI_DEVICE_ID_FC_8G2P:
+ ioc->asic_gen = BFI_ASIC_GEN_CB;
+ ioc->fcmode = BFA_TRUE;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+ ioc->ad_cap_bm = BFA_CM_HBA;
+ break;
+
+ case BFA_PCI_DEVICE_ID_CT:
+ ioc->asic_gen = BFI_ASIC_GEN_CT;
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+ ioc->asic_mode = BFI_ASIC_MODE_ETH;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
+ ioc->ad_cap_bm = BFA_CM_CNA;
+ break;
+
+ case BFA_PCI_DEVICE_ID_CT_FC:
+ ioc->asic_gen = BFI_ASIC_GEN_CT;
+ ioc->fcmode = BFA_TRUE;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+ ioc->ad_cap_bm = BFA_CM_HBA;
+ break;
+
+ case BFA_PCI_DEVICE_ID_CT2:
+ ioc->asic_gen = BFI_ASIC_GEN_CT2;
+ if (clscode == BFI_PCIFN_CLASS_FC &&
+ pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
+ ioc->asic_mode = BFI_ASIC_MODE_FC16;
+ ioc->fcmode = BFA_TRUE;
+ ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
+ ioc->ad_cap_bm = BFA_CM_HBA;
+ } else {
+ ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
+ ioc->asic_mode = BFI_ASIC_MODE_ETH;
+ if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
+ ioc->port_mode =
+ ioc->port_mode_cfg = BFA_MODE_CNA;
+ ioc->ad_cap_bm = BFA_CM_CNA;
+ } else {
+ ioc->port_mode =
+ ioc->port_mode_cfg = BFA_MODE_NIC;
+ ioc->ad_cap_bm = BFA_CM_NIC;
+ }
+ }
+ break;
+
+ default:
+ WARN_ON(1);
+ }
/*
* Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
*/
- if (ioc->ctdev)
- bfa_ioc_set_ct_hwif(ioc);
- else
+ if (ioc->asic_gen == BFI_ASIC_GEN_CB)
bfa_ioc_set_cb_hwif(ioc);
+ else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
+ bfa_ioc_set_ct_hwif(ioc);
+ else {
+ WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
+ bfa_ioc_set_ct2_hwif(ioc);
+ bfa_ioc_ct2_poweron(ioc);
+ }
bfa_ioc_map_port(ioc);
bfa_ioc_reg_init(ioc);
@@ -2172,36 +2373,38 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
struct bfi_mbmsg_s m;
int mc;
- bfa_ioc_msgget(ioc, &m);
+ if (bfa_ioc_msgget(ioc, &m)) {
+ /*
+ * Treat IOC message class as special.
+ */
+ mc = m.mh.msg_class;
+ if (mc == BFI_MC_IOC) {
+ bfa_ioc_isr(ioc, &m);
+ return;
+ }
- /*
- * Treat IOC message class as special.
- */
- mc = m.mh.msg_class;
- if (mc == BFI_MC_IOC) {
- bfa_ioc_isr(ioc, &m);
- return;
+ if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+ return;
+
+ mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
}
- if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
- return;
+ bfa_ioc_lpu_read_stat(ioc);
- mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
+ /*
+ * Try to send pending mailbox commands
+ */
+ bfa_ioc_mbox_poll(ioc);
}
void
bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
{
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ ioc->stats.hb_count = ioc->hb_count;
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
-void
-bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
-{
- ioc->fcmode = BFA_TRUE;
- ioc->port_id = bfa_ioc_pcifn(ioc);
-}
-
/*
* return true if IOC is disabled
*/
@@ -2213,6 +2416,15 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
}
/*
+ * Return TRUE if IOC is in acquiring address state
+ */
+bfa_boolean_t
+bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
+}
+
+/*
* return true if IOC firmware is different.
*/
bfa_boolean_t
@@ -2239,17 +2451,16 @@ bfa_boolean_t
bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
{
u32 ioc_state;
- void __iomem *rb = ioc->pcidev.pci_bar_kva;
if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
return BFA_FALSE;
- ioc_state = readl(rb + BFA_IOC0_STATE_REG);
+ ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
- ioc_state = readl(rb + BFA_IOC1_STATE_REG);
+ ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
if (!bfa_ioc_state_disabled(ioc_state))
return BFA_FALSE;
}
@@ -2308,24 +2519,21 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
- ad_attr->cna_capable = ioc->cna;
- ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
- !ad_attr->is_mezz;
+ ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
+ ad_attr->trunk_capable = (ad_attr->nports > 1) &&
+ !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
}
enum bfa_ioc_type_e
bfa_ioc_get_type(struct bfa_ioc_s *ioc)
{
- if (!ioc->ctdev || ioc->fcmode)
- return BFA_IOC_TYPE_FC;
- else if (ioc->ioc_mc == BFI_MC_IOCFC)
- return BFA_IOC_TYPE_FCoE;
- else if (ioc->ioc_mc == BFI_MC_LL)
- return BFA_IOC_TYPE_LL;
- else {
- WARN_ON(ioc->ioc_mc != BFI_MC_LL);
+ if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
return BFA_IOC_TYPE_LL;
- }
+
+ WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
+
+ return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
+ ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
}
void
@@ -2384,11 +2592,8 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
ioc_attr = ioc->attr;
- /*
- * model name
- */
snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
- BFA_MFG_NAME, ioc_attr->card_type);
+ BFA_MFG_NAME, ioc_attr->card_type);
}
enum bfa_ioc_state
@@ -2438,6 +2643,9 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
ioc_attr->state = bfa_ioc_get_state(ioc);
ioc_attr->port_id = ioc->port_id;
+ ioc_attr->port_mode = ioc->port_mode;
+ ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
+ ioc_attr->cap_bm = ioc->ad_cap_bm;
ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
@@ -2475,10 +2683,41 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
return m;
}
-bfa_boolean_t
-bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
+/*
+ * Send AEN notification
+ */
+void
+bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
{
- return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+ enum bfa_ioc_type_e ioc_type;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ ioc_type = bfa_ioc_get_type(ioc);
+ switch (ioc_type) {
+ case BFA_IOC_TYPE_FC:
+ aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+ break;
+ case BFA_IOC_TYPE_FCoE:
+ aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
+ aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+ break;
+ case BFA_IOC_TYPE_LL:
+ aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
+ break;
+ default:
+ WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
+ break;
+ }
+
+ /* Send the AEN notification */
+ aen_entry->aen_data.ioc.ioc_type = ioc_type;
+ bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+ BFA_AEN_CAT_IOC, event);
}
/*
@@ -2531,7 +2770,7 @@ bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
bfa_ioc_portid(ioc));
- req->ioc_class = ioc->ioc_mc;
+ req->clscode = cpu_to_be16(ioc->clscode);
bfa_ioc_mbox_queue(ioc, &cmd);
}
@@ -2673,6 +2912,7 @@ static void
bfa_ioc_recover(struct bfa_ioc_s *ioc)
{
bfa_ioc_stats(ioc, ioc_hbfails);
+ ioc->stats.hb_count = ioc->hb_count;
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
@@ -2681,6 +2921,10 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
{
if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
return;
+ if (ioc->attr->nwwn == 0)
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
+ if (ioc->attr->pwwn == 0)
+ bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
}
/*
@@ -2703,6 +2947,34 @@ bfa_iocpf_sem_timeout(void *ioc_arg)
bfa_ioc_hw_sem_get(ioc);
}
+static void
+bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
+{
+ u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ bfa_trc(ioc, fwstate);
+
+ if (fwstate == BFI_IOC_DISABLED) {
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
+ return;
+ }
+
+ if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
+ bfa_iocpf_timeout(ioc);
+ else {
+ ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
+ bfa_iocpf_poll_timer_start(ioc);
+ }
+}
+
+static void
+bfa_iocpf_poll_timeout(void *ioc_arg)
+{
+ struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
+
+ bfa_ioc_poll_fwinit(ioc);
+}
+
/*
* bfa timer function
*/
@@ -2770,3 +3042,2942 @@ bfa_timer_stop(struct bfa_timer_s *timer)
list_del(&timer->qe);
}
+
+/*
+ * ASIC block related
+ */
+static void
+bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
+{
+ struct bfa_ablk_cfg_inst_s *cfg_inst;
+ int i, j;
+ u16 be16;
+ u32 be32;
+
+ for (i = 0; i < BFA_ABLK_MAX; i++) {
+ cfg_inst = &cfg->inst[i];
+ for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
+ be16 = cfg_inst->pf_cfg[j].pers;
+ cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
+ be16 = cfg_inst->pf_cfg[j].num_qpairs;
+ cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
+ be16 = cfg_inst->pf_cfg[j].num_vectors;
+ cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
+ be32 = cfg_inst->pf_cfg[j].bw;
+ cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
+ }
+ }
+}
+
+static void
+bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
+{
+ struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
+ struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
+ bfa_ablk_cbfn_t cbfn;
+
+ WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
+ bfa_trc(ablk->ioc, msg->mh.msg_id);
+
+ switch (msg->mh.msg_id) {
+ case BFI_ABLK_I2H_QUERY:
+ if (rsp->status == BFA_STATUS_OK) {
+ memcpy(ablk->cfg, ablk->dma_addr.kva,
+ sizeof(struct bfa_ablk_cfg_s));
+ bfa_ablk_config_swap(ablk->cfg);
+ ablk->cfg = NULL;
+ }
+ break;
+
+ case BFI_ABLK_I2H_ADPT_CONFIG:
+ case BFI_ABLK_I2H_PORT_CONFIG:
+ /* update config port mode */
+ ablk->ioc->port_mode_cfg = rsp->port_mode;
+
+ case BFI_ABLK_I2H_PF_DELETE:
+ case BFI_ABLK_I2H_PF_UPDATE:
+ case BFI_ABLK_I2H_OPTROM_ENABLE:
+ case BFI_ABLK_I2H_OPTROM_DISABLE:
+ /* No-op */
+ break;
+
+ case BFI_ABLK_I2H_PF_CREATE:
+ *(ablk->pcifn) = rsp->pcifn;
+ ablk->pcifn = NULL;
+ break;
+
+ default:
+ WARN_ON(1);
+ }
+
+ ablk->busy = BFA_FALSE;
+ if (ablk->cbfn) {
+ cbfn = ablk->cbfn;
+ ablk->cbfn = NULL;
+ cbfn(ablk->cbarg, rsp->status);
+ }
+}
+
+static void
+bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+ struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
+
+ bfa_trc(ablk->ioc, event);
+
+ switch (event) {
+ case BFA_IOC_E_ENABLED:
+ WARN_ON(ablk->busy != BFA_FALSE);
+ break;
+
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ /* Fail any pending requests */
+ ablk->pcifn = NULL;
+ if (ablk->busy) {
+ if (ablk->cbfn)
+ ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
+ ablk->cbfn = NULL;
+ ablk->busy = BFA_FALSE;
+ }
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+u32
+bfa_ablk_meminfo(void)
+{
+ return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
+}
+
+void
+bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
+{
+ ablk->dma_addr.kva = dma_kva;
+ ablk->dma_addr.pa = dma_pa;
+}
+
+void
+bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
+{
+ ablk->ioc = ioc;
+
+ bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
+ bfa_q_qe_init(&ablk->ioc_notify);
+ bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
+ list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
+}
+
+bfa_status_t
+bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
+ bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_query_s *m;
+
+ WARN_ON(!ablk_cfg);
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cfg = ablk_cfg;
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
+ bfa_ioc_portid(ablk->ioc));
+ bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
+ u8 port, enum bfi_pcifn_class personality, int bw,
+ bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_pf_req_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->pcifn = pcifn;
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
+ bfa_ioc_portid(ablk->ioc));
+ m->pers = cpu_to_be16((u16)personality);
+ m->bw = cpu_to_be32(bw);
+ m->port = port;
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
+ bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_pf_req_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
+ bfa_ioc_portid(ablk->ioc));
+ m->pcifn = (u8)pcifn;
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
+ int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_cfg_req_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
+ bfa_ioc_portid(ablk->ioc));
+ m->mode = (u8)mode;
+ m->max_pf = (u8)max_pf;
+ m->max_vf = (u8)max_vf;
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
+ int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_cfg_req_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
+ bfa_ioc_portid(ablk->ioc));
+ m->port = (u8)port;
+ m->mode = (u8)mode;
+ m->max_pf = (u8)max_pf;
+ m->max_vf = (u8)max_vf;
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
+ bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_pf_req_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
+ bfa_ioc_portid(ablk->ioc));
+ m->pcifn = (u8)pcifn;
+ m->bw = cpu_to_be32(bw);
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_optrom_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
+ bfa_ioc_portid(ablk->ioc));
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_ablk_h2i_optrom_s *m;
+
+ if (!bfa_ioc_is_operational(ablk->ioc)) {
+ bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+
+ if (ablk->busy) {
+ bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ ablk->cbfn = cbfn;
+ ablk->cbarg = cbarg;
+ ablk->busy = BFA_TRUE;
+
+ m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
+ bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
+ bfa_ioc_portid(ablk->ioc));
+ bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * SFP module specific
+ */
+
+/* forward declarations */
+static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
+static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
+static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
+ enum bfa_port_speed portspeed);
+
+static void
+bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
+{
+ bfa_trc(sfp, sfp->lock);
+ if (sfp->cbfn)
+ sfp->cbfn(sfp->cbarg, sfp->status);
+ sfp->lock = 0;
+ sfp->cbfn = NULL;
+}
+
+static void
+bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
+{
+ bfa_trc(sfp, sfp->portspeed);
+ if (sfp->media) {
+ bfa_sfp_media_get(sfp);
+ if (sfp->state_query_cbfn)
+ sfp->state_query_cbfn(sfp->state_query_cbarg,
+ sfp->status);
+ sfp->media = NULL;
+ }
+
+ if (sfp->portspeed) {
+ sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
+ if (sfp->state_query_cbfn)
+ sfp->state_query_cbfn(sfp->state_query_cbarg,
+ sfp->status);
+ sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+ }
+
+ sfp->state_query_lock = 0;
+ sfp->state_query_cbfn = NULL;
+}
+
+/*
+ * IOC event handler.
+ */
+static void
+bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
+{
+ struct bfa_sfp_s *sfp = sfp_arg;
+
+ bfa_trc(sfp, event);
+ bfa_trc(sfp, sfp->lock);
+ bfa_trc(sfp, sfp->state_query_lock);
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (sfp->lock) {
+ sfp->status = BFA_STATUS_IOC_FAILURE;
+ bfa_cb_sfp_show(sfp);
+ }
+
+ if (sfp->state_query_lock) {
+ sfp->status = BFA_STATUS_IOC_FAILURE;
+ bfa_cb_sfp_state_query(sfp);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * SFP's State Change Notification post to AEN
+ */
+static void
+bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
+{
+ struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+ enum bfa_port_aen_event aen_evt = 0;
+
+ bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
+ ((u64)rsp->event));
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
+ aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
+ aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
+
+ switch (rsp->event) {
+ case BFA_SFP_SCN_INSERTED:
+ aen_evt = BFA_PORT_AEN_SFP_INSERT;
+ break;
+ case BFA_SFP_SCN_REMOVED:
+ aen_evt = BFA_PORT_AEN_SFP_REMOVE;
+ break;
+ case BFA_SFP_SCN_FAILED:
+ aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
+ break;
+ case BFA_SFP_SCN_UNSUPPORT:
+ aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
+ break;
+ case BFA_SFP_SCN_POM:
+ aen_evt = BFA_PORT_AEN_SFP_POM;
+ aen_entry->aen_data.port.level = rsp->pomlvl;
+ break;
+ default:
+ bfa_trc(sfp, rsp->event);
+ WARN_ON(1);
+ }
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
+ BFA_AEN_CAT_PORT, aen_evt);
+}
+
+/*
+ * SFP get data send
+ */
+static void
+bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
+{
+ struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+ bfa_trc(sfp, req->memtype);
+
+ /* build host command */
+ bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
+ bfa_ioc_portid(sfp->ioc));
+
+ /* send mbox cmd */
+ bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
+}
+
+/*
+ * SFP is valid, read sfp data
+ */
+static void
+bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
+{
+ struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+ WARN_ON(sfp->lock != 0);
+ bfa_trc(sfp, sfp->state);
+
+ sfp->lock = 1;
+ sfp->memtype = memtype;
+ req->memtype = memtype;
+
+ /* Setup SG list */
+ bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
+
+ bfa_sfp_getdata_send(sfp);
+}
+
+/*
+ * SFP scn handler
+ */
+static void
+bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
+{
+ struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
+
+ switch (rsp->event) {
+ case BFA_SFP_SCN_INSERTED:
+ sfp->state = BFA_SFP_STATE_INSERTED;
+ sfp->data_valid = 0;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_REMOVED:
+ sfp->state = BFA_SFP_STATE_REMOVED;
+ sfp->data_valid = 0;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_FAILED:
+ sfp->state = BFA_SFP_STATE_FAILED;
+ sfp->data_valid = 0;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_UNSUPPORT:
+ sfp->state = BFA_SFP_STATE_UNSUPPORT;
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ if (!sfp->lock)
+ bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+ break;
+ case BFA_SFP_SCN_POM:
+ bfa_sfp_scn_aen_post(sfp, rsp);
+ break;
+ case BFA_SFP_SCN_VALID:
+ sfp->state = BFA_SFP_STATE_VALID;
+ if (!sfp->lock)
+ bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+ break;
+ default:
+ bfa_trc(sfp, rsp->event);
+ WARN_ON(1);
+ }
+}
+
+/*
+ * SFP show complete
+ */
+static void
+bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
+{
+ struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
+
+ if (!sfp->lock) {
+ /*
+ * receiving response after ioc failure
+ */
+ bfa_trc(sfp, sfp->lock);
+ return;
+ }
+
+ bfa_trc(sfp, rsp->status);
+ if (rsp->status == BFA_STATUS_OK) {
+ sfp->data_valid = 1;
+ if (sfp->state == BFA_SFP_STATE_VALID)
+ sfp->status = BFA_STATUS_OK;
+ else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
+ sfp->status = BFA_STATUS_SFP_UNSUPP;
+ else
+ bfa_trc(sfp, sfp->state);
+ } else {
+ sfp->data_valid = 0;
+ sfp->status = rsp->status;
+ /* sfpshow shouldn't change sfp state */
+ }
+
+ bfa_trc(sfp, sfp->memtype);
+ if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
+ bfa_trc(sfp, sfp->data_valid);
+ if (sfp->data_valid) {
+ u32 size = sizeof(struct sfp_mem_s);
+ u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
+ memcpy(des, sfp->dbuf_kva, size);
+ }
+ /*
+ * Queue completion callback.
+ */
+ bfa_cb_sfp_show(sfp);
+ } else
+ sfp->lock = 0;
+
+ bfa_trc(sfp, sfp->state_query_lock);
+ if (sfp->state_query_lock) {
+ sfp->state = rsp->state;
+ /* Complete callback */
+ bfa_cb_sfp_state_query(sfp);
+ }
+}
+
+/*
+ * SFP query fw sfp state
+ */
+static void
+bfa_sfp_state_query(struct bfa_sfp_s *sfp)
+{
+ struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
+
+ /* Should not be doing query if not in _INIT state */
+ WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
+ WARN_ON(sfp->state_query_lock != 0);
+ bfa_trc(sfp, sfp->state);
+
+ sfp->state_query_lock = 1;
+ req->memtype = 0;
+
+ if (!sfp->lock)
+ bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
+}
+
+static void
+bfa_sfp_media_get(struct bfa_sfp_s *sfp)
+{
+ enum bfa_defs_sfp_media_e *media = sfp->media;
+
+ *media = BFA_SFP_MEDIA_UNKNOWN;
+
+ if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
+ *media = BFA_SFP_MEDIA_UNSUPPORT;
+ else if (sfp->state == BFA_SFP_STATE_VALID) {
+ union sfp_xcvr_e10g_code_u e10g;
+ struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
+ u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
+ (sfpmem->srlid_base.xcvr[5] >> 1);
+
+ e10g.b = sfpmem->srlid_base.xcvr[0];
+ bfa_trc(sfp, e10g.b);
+ bfa_trc(sfp, xmtr_tech);
+ /* check fc transmitter tech */
+ if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
+ (xmtr_tech & SFP_XMTR_TECH_CP) ||
+ (xmtr_tech & SFP_XMTR_TECH_CA))
+ *media = BFA_SFP_MEDIA_CU;
+ else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
+ (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
+ *media = BFA_SFP_MEDIA_EL;
+ else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
+ (xmtr_tech & SFP_XMTR_TECH_LC))
+ *media = BFA_SFP_MEDIA_LW;
+ else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
+ (xmtr_tech & SFP_XMTR_TECH_SN) ||
+ (xmtr_tech & SFP_XMTR_TECH_SA))
+ *media = BFA_SFP_MEDIA_SW;
+ /* Check 10G Ethernet Compilance code */
+ else if (e10g.b & 0x10)
+ *media = BFA_SFP_MEDIA_SW;
+ else if (e10g.b & 0x60)
+ *media = BFA_SFP_MEDIA_LW;
+ else if (e10g.r.e10g_unall & 0x80)
+ *media = BFA_SFP_MEDIA_UNKNOWN;
+ else
+ bfa_trc(sfp, 0);
+ } else
+ bfa_trc(sfp, sfp->state);
+}
+
+static bfa_status_t
+bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
+{
+ struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
+ struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
+ union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
+ union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
+
+ if (portspeed == BFA_PORT_SPEED_10GBPS) {
+ if (e10g.r.e10g_sr || e10g.r.e10g_lr)
+ return BFA_STATUS_OK;
+ else {
+ bfa_trc(sfp, e10g.b);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+ }
+ if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
+ ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
+ ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
+ ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
+ ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
+ return BFA_STATUS_OK;
+ else {
+ bfa_trc(sfp, portspeed);
+ bfa_trc(sfp, fc3.b);
+ bfa_trc(sfp, e10g.b);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+}
+
+/*
+ * SFP hmbox handler
+ */
+void
+bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
+{
+ struct bfa_sfp_s *sfp = sfparg;
+
+ switch (msg->mh.msg_id) {
+ case BFI_SFP_I2H_SHOW:
+ bfa_sfp_show_comp(sfp, msg);
+ break;
+
+ case BFI_SFP_I2H_SCN:
+ bfa_sfp_scn(sfp, msg);
+ break;
+
+ default:
+ bfa_trc(sfp, msg->mh.msg_id);
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Return DMA memory needed by sfp module.
+ */
+u32
+bfa_sfp_meminfo(void)
+{
+ return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Attach virtual and physical memory for SFP.
+ */
+void
+bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
+ struct bfa_trc_mod_s *trcmod)
+{
+ sfp->dev = dev;
+ sfp->ioc = ioc;
+ sfp->trcmod = trcmod;
+
+ sfp->cbfn = NULL;
+ sfp->cbarg = NULL;
+ sfp->sfpmem = NULL;
+ sfp->lock = 0;
+ sfp->data_valid = 0;
+ sfp->state = BFA_SFP_STATE_INIT;
+ sfp->state_query_lock = 0;
+ sfp->state_query_cbfn = NULL;
+ sfp->state_query_cbarg = NULL;
+ sfp->media = NULL;
+ sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
+ sfp->is_elb = BFA_FALSE;
+
+ bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
+ bfa_q_qe_init(&sfp->ioc_notify);
+ bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
+ list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
+}
+
+/*
+ * Claim Memory for SFP
+ */
+void
+bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
+{
+ sfp->dbuf_kva = dm_kva;
+ sfp->dbuf_pa = dm_pa;
+ memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
+
+ dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+ dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Show SFP eeprom content
+ *
+ * @param[in] sfp - bfa sfp module
+ *
+ * @param[out] sfpmem - sfp eeprom data
+ *
+ */
+bfa_status_t
+bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
+ bfa_cb_sfp_t cbfn, void *cbarg)
+{
+
+ if (!bfa_ioc_is_operational(sfp->ioc)) {
+ bfa_trc(sfp, 0);
+ return BFA_STATUS_IOC_NON_OP;
+ }
+
+ if (sfp->lock) {
+ bfa_trc(sfp, 0);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ sfp->cbfn = cbfn;
+ sfp->cbarg = cbarg;
+ sfp->sfpmem = sfpmem;
+
+ bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Return SFP Media type
+ *
+ * @param[in] sfp - bfa sfp module
+ *
+ * @param[out] media - port speed from user
+ *
+ */
+bfa_status_t
+bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
+ bfa_cb_sfp_t cbfn, void *cbarg)
+{
+ if (!bfa_ioc_is_operational(sfp->ioc)) {
+ bfa_trc(sfp, 0);
+ return BFA_STATUS_IOC_NON_OP;
+ }
+
+ sfp->media = media;
+ if (sfp->state == BFA_SFP_STATE_INIT) {
+ if (sfp->state_query_lock) {
+ bfa_trc(sfp, 0);
+ return BFA_STATUS_DEVBUSY;
+ } else {
+ sfp->state_query_cbfn = cbfn;
+ sfp->state_query_cbarg = cbarg;
+ bfa_sfp_state_query(sfp);
+ return BFA_STATUS_SFP_NOT_READY;
+ }
+ }
+
+ bfa_sfp_media_get(sfp);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Check if user set port speed is allowed by the SFP
+ *
+ * @param[in] sfp - bfa sfp module
+ * @param[in] portspeed - port speed from user
+ *
+ */
+bfa_status_t
+bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
+ bfa_cb_sfp_t cbfn, void *cbarg)
+{
+ WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
+
+ if (!bfa_ioc_is_operational(sfp->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* For Mezz card, all speed is allowed */
+ if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
+ return BFA_STATUS_OK;
+
+ /* Check SFP state */
+ sfp->portspeed = portspeed;
+ if (sfp->state == BFA_SFP_STATE_INIT) {
+ if (sfp->state_query_lock) {
+ bfa_trc(sfp, 0);
+ return BFA_STATUS_DEVBUSY;
+ } else {
+ sfp->state_query_cbfn = cbfn;
+ sfp->state_query_cbarg = cbarg;
+ bfa_sfp_state_query(sfp);
+ return BFA_STATUS_SFP_NOT_READY;
+ }
+ }
+
+ if (sfp->state == BFA_SFP_STATE_REMOVED ||
+ sfp->state == BFA_SFP_STATE_FAILED) {
+ bfa_trc(sfp, sfp->state);
+ return BFA_STATUS_NO_SFP_DEV;
+ }
+
+ if (sfp->state == BFA_SFP_STATE_INSERTED) {
+ bfa_trc(sfp, sfp->state);
+ return BFA_STATUS_DEVBUSY; /* sfp is reading data */
+ }
+
+ /* For eloopback, all speed is allowed */
+ if (sfp->is_elb)
+ return BFA_STATUS_OK;
+
+ return bfa_sfp_speed_valid(sfp, portspeed);
+}
+
+/*
+ * Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ 2048
+#define BFA_FLASH_DMA_BUF_SZ \
+ BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
+ int inst, int type)
+{
+ struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
+ aen_entry->aen_data.audit.partition_inst = inst;
+ aen_entry->aen_data.audit.partition_type = type;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
+ BFA_AEN_CAT_AUDIT, event);
+}
+
+static void
+bfa_flash_cb(struct bfa_flash_s *flash)
+{
+ flash->op_busy = 0;
+ if (flash->cbfn)
+ flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+ struct bfa_flash_s *flash = cbarg;
+
+ bfa_trc(flash, event);
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (flash->op_busy) {
+ flash->status = BFA_STATUS_IOC_FAILURE;
+ flash->cbfn(flash->cbarg, flash->status);
+ flash->op_busy = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Send flash attribute query request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_query_send(void *cbarg)
+{
+ struct bfa_flash_s *flash = cbarg;
+ struct bfi_flash_query_req_s *msg =
+ (struct bfi_flash_query_req_s *) flash->mb.msg;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
+ flash->dbuf_pa);
+ bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash_s *flash)
+{
+ struct bfi_flash_write_req_s *msg =
+ (struct bfi_flash_write_req_s *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+
+ /* indicate if it's the last msg of the whole write operation */
+ msg->last = (len == flash->residue) ? 1 : 0;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+ bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+
+ flash->residue -= len;
+ flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+ struct bfa_flash_s *flash = cbarg;
+ struct bfi_flash_read_req_s *msg =
+ (struct bfi_flash_read_req_s *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Send flash erase request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_erase_send(void *cbarg)
+{
+ struct bfa_flash_s *flash = cbarg;
+ struct bfi_flash_erase_req_s *msg =
+ (struct bfi_flash_erase_req_s *) flash->mb.msg;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
+{
+ struct bfa_flash_s *flash = flasharg;
+ u32 status;
+
+ union {
+ struct bfi_flash_query_rsp_s *query;
+ struct bfi_flash_erase_rsp_s *erase;
+ struct bfi_flash_write_rsp_s *write;
+ struct bfi_flash_read_rsp_s *read;
+ struct bfi_flash_event_s *event;
+ struct bfi_mbmsg_s *msg;
+ } m;
+
+ m.msg = msg;
+ bfa_trc(flash, msg->mh.msg_id);
+
+ if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
+ /* receiving response after ioc failure */
+ bfa_trc(flash, 0x9999);
+ return;
+ }
+
+ switch (msg->mh.msg_id) {
+ case BFI_FLASH_I2H_QUERY_RSP:
+ status = be32_to_cpu(m.query->status);
+ bfa_trc(flash, status);
+ if (status == BFA_STATUS_OK) {
+ u32 i;
+ struct bfa_flash_attr_s *attr, *f;
+
+ attr = (struct bfa_flash_attr_s *) flash->ubuf;
+ f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
+ attr->status = be32_to_cpu(f->status);
+ attr->npart = be32_to_cpu(f->npart);
+ bfa_trc(flash, attr->status);
+ bfa_trc(flash, attr->npart);
+ for (i = 0; i < attr->npart; i++) {
+ attr->part[i].part_type =
+ be32_to_cpu(f->part[i].part_type);
+ attr->part[i].part_instance =
+ be32_to_cpu(f->part[i].part_instance);
+ attr->part[i].part_off =
+ be32_to_cpu(f->part[i].part_off);
+ attr->part[i].part_size =
+ be32_to_cpu(f->part[i].part_size);
+ attr->part[i].part_len =
+ be32_to_cpu(f->part[i].part_len);
+ attr->part[i].part_status =
+ be32_to_cpu(f->part[i].part_status);
+ }
+ }
+ flash->status = status;
+ bfa_flash_cb(flash);
+ break;
+ case BFI_FLASH_I2H_ERASE_RSP:
+ status = be32_to_cpu(m.erase->status);
+ bfa_trc(flash, status);
+ flash->status = status;
+ bfa_flash_cb(flash);
+ break;
+ case BFI_FLASH_I2H_WRITE_RSP:
+ status = be32_to_cpu(m.write->status);
+ bfa_trc(flash, status);
+ if (status != BFA_STATUS_OK || flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else {
+ bfa_trc(flash, flash->offset);
+ bfa_flash_write_send(flash);
+ }
+ break;
+ case BFI_FLASH_I2H_READ_RSP:
+ status = be32_to_cpu(m.read->status);
+ bfa_trc(flash, status);
+ if (status != BFA_STATUS_OK) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else {
+ u32 len = be32_to_cpu(m.read->length);
+ bfa_trc(flash, flash->offset);
+ bfa_trc(flash, len);
+ memcpy(flash->ubuf + flash->offset,
+ flash->dbuf_kva, len);
+ flash->residue -= len;
+ flash->offset += len;
+ if (flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else
+ bfa_flash_read_send(flash);
+ }
+ break;
+ case BFI_FLASH_I2H_BOOT_VER_RSP:
+ break;
+ case BFI_FLASH_I2H_EVENT:
+ status = be32_to_cpu(m.event->status);
+ bfa_trc(flash, status);
+ if (status == BFA_STATUS_BAD_FWCFG)
+ bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
+ else if (status == BFA_STATUS_INVALID_VENDOR) {
+ u32 param;
+ param = be32_to_cpu(m.event->param);
+ bfa_trc(flash, param);
+ bfa_ioc_aen_post(flash->ioc,
+ BFA_IOC_AEN_INVALID_VENDOR);
+ }
+ break;
+
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_flash_meminfo(bfa_boolean_t mincfg)
+{
+ /* min driver doesn't need flash */
+ if (mincfg)
+ return 0;
+ return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc - ioc structure
+ * @param[in] dev - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
+ struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+ flash->ioc = ioc;
+ flash->trcmod = trcmod;
+ flash->cbfn = NULL;
+ flash->cbarg = NULL;
+ flash->op_busy = 0;
+
+ bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+ bfa_q_qe_init(&flash->ioc_notify);
+ bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+ list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+
+ /* min driver doesn't need flash */
+ if (mincfg) {
+ flash->dbuf_kva = NULL;
+ flash->dbuf_pa = 0;
+ }
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
+ bfa_boolean_t mincfg)
+{
+ if (mincfg)
+ return;
+
+ flash->dbuf_kva = dm_kva;
+ flash->dbuf_pa = dm_pa;
+ memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+ dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+ dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
+ bfa_cb_flash_t cbfn, void *cbarg)
+{
+ bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
+
+ if (!bfa_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (flash->op_busy) {
+ bfa_trc(flash, flash->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->ubuf = (u8 *) attr;
+ bfa_flash_query_send(flash);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Erase flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+ u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
+{
+ bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
+ bfa_trc(flash, type);
+ bfa_trc(flash, instance);
+
+ if (!bfa_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (flash->op_busy) {
+ bfa_trc(flash, flash->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+
+ bfa_flash_erase_send(flash);
+ bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
+ instance, type);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+ u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash_t cbfn, void *cbarg)
+{
+ bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
+ bfa_trc(flash, type);
+ bfa_trc(flash, instance);
+ bfa_trc(flash, len);
+ bfa_trc(flash, offset);
+
+ if (!bfa_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ * 'offset' must be in sector (16kb) boundary
+ */
+ if (!len || (len & 0x03) || (offset & 0x00003FFF))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (type == BFA_FLASH_PART_MFG)
+ return BFA_STATUS_EINVAL;
+
+ if (flash->op_busy) {
+ bfa_trc(flash, flash->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+
+ bfa_flash_write_send(flash);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
+ u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash_t cbfn, void *cbarg)
+{
+ bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
+ bfa_trc(flash, type);
+ bfa_trc(flash, instance);
+ bfa_trc(flash, len);
+ bfa_trc(flash, offset);
+
+ if (!bfa_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ * 'offset' must be in sector (16kb) boundary
+ */
+ if (!len || (len & 0x03) || (offset & 0x00003FFF))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (flash->op_busy) {
+ bfa_trc(flash, flash->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+ bfa_flash_read_send(flash);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * DIAG module specific
+ */
+
+#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
+#define BFA_DIAG_FWPING_TOV 1000 /* msec */
+
+/* IOC event handler */
+static void
+bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
+{
+ struct bfa_diag_s *diag = diag_arg;
+
+ bfa_trc(diag, event);
+ bfa_trc(diag, diag->block);
+ bfa_trc(diag, diag->fwping.lock);
+ bfa_trc(diag, diag->tsensor.lock);
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (diag->fwping.lock) {
+ diag->fwping.status = BFA_STATUS_IOC_FAILURE;
+ diag->fwping.cbfn(diag->fwping.cbarg,
+ diag->fwping.status);
+ diag->fwping.lock = 0;
+ }
+
+ if (diag->tsensor.lock) {
+ diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
+ diag->tsensor.cbfn(diag->tsensor.cbarg,
+ diag->tsensor.status);
+ diag->tsensor.lock = 0;
+ }
+
+ if (diag->block) {
+ if (diag->timer_active) {
+ bfa_timer_stop(&diag->timer);
+ diag->timer_active = 0;
+ }
+
+ diag->status = BFA_STATUS_IOC_FAILURE;
+ diag->cbfn(diag->cbarg, diag->status);
+ diag->block = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+bfa_diag_memtest_done(void *cbarg)
+{
+ struct bfa_diag_s *diag = cbarg;
+ struct bfa_ioc_s *ioc = diag->ioc;
+ struct bfa_diag_memtest_result *res = diag->result;
+ u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
+ u32 pgnum, pgoff, i;
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
+ pgoff = PSS_SMEM_PGOFF(loff);
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
+ sizeof(u32)); i++) {
+ /* read test result from smem */
+ *((u32 *) res + i) =
+ bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
+ loff += sizeof(u32);
+ }
+
+ /* Reset IOC fwstates to BFI_IOC_UNINIT */
+ bfa_ioc_reset_fwstate(ioc);
+
+ res->status = swab32(res->status);
+ bfa_trc(diag, res->status);
+
+ if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
+ diag->status = BFA_STATUS_OK;
+ else {
+ diag->status = BFA_STATUS_MEMTEST_FAILED;
+ res->addr = swab32(res->addr);
+ res->exp = swab32(res->exp);
+ res->act = swab32(res->act);
+ res->err_status = swab32(res->err_status);
+ res->err_status1 = swab32(res->err_status1);
+ res->err_addr = swab32(res->err_addr);
+ bfa_trc(diag, res->addr);
+ bfa_trc(diag, res->exp);
+ bfa_trc(diag, res->act);
+ bfa_trc(diag, res->err_status);
+ bfa_trc(diag, res->err_status1);
+ bfa_trc(diag, res->err_addr);
+ }
+ diag->timer_active = 0;
+ diag->cbfn(diag->cbarg, diag->status);
+ diag->block = 0;
+}
+
+/*
+ * Firmware ping
+ */
+
+/*
+ * Perform DMA test directly
+ */
+static void
+diag_fwping_send(struct bfa_diag_s *diag)
+{
+ struct bfi_diag_fwping_req_s *fwping_req;
+ u32 i;
+
+ bfa_trc(diag, diag->fwping.dbuf_pa);
+
+ /* fill DMA area with pattern */
+ for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
+ *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
+
+ /* Fill mbox msg */
+ fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
+
+ /* Setup SG list */
+ bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
+ diag->fwping.dbuf_pa);
+ /* Set up dma count */
+ fwping_req->count = cpu_to_be32(diag->fwping.count);
+ /* Set up data pattern */
+ fwping_req->data = diag->fwping.data;
+
+ /* build host command */
+ bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
+ bfa_ioc_portid(diag->ioc));
+
+ /* send mbox cmd */
+ bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
+}
+
+static void
+diag_fwping_comp(struct bfa_diag_s *diag,
+ struct bfi_diag_fwping_rsp_s *diag_rsp)
+{
+ u32 rsp_data = diag_rsp->data;
+ u8 rsp_dma_status = diag_rsp->dma_status;
+
+ bfa_trc(diag, rsp_data);
+ bfa_trc(diag, rsp_dma_status);
+
+ if (rsp_dma_status == BFA_STATUS_OK) {
+ u32 i, pat;
+ pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
+ diag->fwping.data;
+ /* Check mbox data */
+ if (diag->fwping.data != rsp_data) {
+ bfa_trc(diag, rsp_data);
+ diag->fwping.result->dmastatus =
+ BFA_STATUS_DATACORRUPTED;
+ diag->fwping.status = BFA_STATUS_DATACORRUPTED;
+ diag->fwping.cbfn(diag->fwping.cbarg,
+ diag->fwping.status);
+ diag->fwping.lock = 0;
+ return;
+ }
+ /* Check dma pattern */
+ for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
+ if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
+ bfa_trc(diag, i);
+ bfa_trc(diag, pat);
+ bfa_trc(diag,
+ *((u32 *)diag->fwping.dbuf_kva + i));
+ diag->fwping.result->dmastatus =
+ BFA_STATUS_DATACORRUPTED;
+ diag->fwping.status = BFA_STATUS_DATACORRUPTED;
+ diag->fwping.cbfn(diag->fwping.cbarg,
+ diag->fwping.status);
+ diag->fwping.lock = 0;
+ return;
+ }
+ }
+ diag->fwping.result->dmastatus = BFA_STATUS_OK;
+ diag->fwping.status = BFA_STATUS_OK;
+ diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
+ diag->fwping.lock = 0;
+ } else {
+ diag->fwping.status = BFA_STATUS_HDMA_FAILED;
+ diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
+ diag->fwping.lock = 0;
+ }
+}
+
+/*
+ * Temperature Sensor
+ */
+
+static void
+diag_tempsensor_send(struct bfa_diag_s *diag)
+{
+ struct bfi_diag_ts_req_s *msg;
+
+ msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
+ bfa_trc(diag, msg->temp);
+ /* build host command */
+ bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
+ bfa_ioc_portid(diag->ioc));
+ /* send mbox cmd */
+ bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
+}
+
+static void
+diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
+{
+ if (!diag->tsensor.lock) {
+ /* receiving response after ioc failure */
+ bfa_trc(diag, diag->tsensor.lock);
+ return;
+ }
+
+ /*
+ * ASIC junction tempsensor is a reg read operation
+ * it will always return OK
+ */
+ diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
+ diag->tsensor.temp->ts_junc = rsp->ts_junc;
+ diag->tsensor.temp->ts_brd = rsp->ts_brd;
+ diag->tsensor.temp->status = BFA_STATUS_OK;
+
+ if (rsp->ts_brd) {
+ if (rsp->status == BFA_STATUS_OK) {
+ diag->tsensor.temp->brd_temp =
+ be16_to_cpu(rsp->brd_temp);
+ } else {
+ bfa_trc(diag, rsp->status);
+ diag->tsensor.temp->brd_temp = 0;
+ diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
+ }
+ }
+ bfa_trc(diag, rsp->ts_junc);
+ bfa_trc(diag, rsp->temp);
+ bfa_trc(diag, rsp->ts_brd);
+ bfa_trc(diag, rsp->brd_temp);
+ diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
+ diag->tsensor.lock = 0;
+}
+
+/*
+ * LED Test command
+ */
+static void
+diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
+{
+ struct bfi_diag_ledtest_req_s *msg;
+
+ msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
+ /* build host command */
+ bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
+ bfa_ioc_portid(diag->ioc));
+
+ /*
+ * convert the freq from N blinks per 10 sec to
+ * crossbow ontime value. We do it here because division is need
+ */
+ if (ledtest->freq)
+ ledtest->freq = 500 / ledtest->freq;
+
+ if (ledtest->freq == 0)
+ ledtest->freq = 1;
+
+ bfa_trc(diag, ledtest->freq);
+ /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
+ msg->cmd = (u8) ledtest->cmd;
+ msg->color = (u8) ledtest->color;
+ msg->portid = bfa_ioc_portid(diag->ioc);
+ msg->led = ledtest->led;
+ msg->freq = cpu_to_be16(ledtest->freq);
+
+ /* send mbox cmd */
+ bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
+}
+
+static void
+diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s * msg)
+{
+ bfa_trc(diag, diag->ledtest.lock);
+ diag->ledtest.lock = BFA_FALSE;
+ /* no bfa_cb_queue is needed because driver is not waiting */
+}
+
+/*
+ * Port beaconing
+ */
+static void
+diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
+{
+ struct bfi_diag_portbeacon_req_s *msg;
+
+ msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
+ /* build host command */
+ bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
+ bfa_ioc_portid(diag->ioc));
+ msg->beacon = beacon;
+ msg->period = cpu_to_be32(sec);
+ /* send mbox cmd */
+ bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
+}
+
+static void
+diag_portbeacon_comp(struct bfa_diag_s *diag)
+{
+ bfa_trc(diag, diag->beacon.state);
+ diag->beacon.state = BFA_FALSE;
+ if (diag->cbfn_beacon)
+ diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
+}
+
+/*
+ * Diag hmbox handler
+ */
+void
+bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
+{
+ struct bfa_diag_s *diag = diagarg;
+
+ switch (msg->mh.msg_id) {
+ case BFI_DIAG_I2H_PORTBEACON:
+ diag_portbeacon_comp(diag);
+ break;
+ case BFI_DIAG_I2H_FWPING:
+ diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
+ break;
+ case BFI_DIAG_I2H_TEMPSENSOR:
+ diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
+ break;
+ case BFI_DIAG_I2H_LEDTEST:
+ diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
+ break;
+ default:
+ bfa_trc(diag, msg->mh.msg_id);
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Gen RAM Test
+ *
+ * @param[in] *diag - diag data struct
+ * @param[in] *memtest - mem test params input from upper layer,
+ * @param[in] pattern - mem test pattern
+ * @param[in] *result - mem test result
+ * @param[in] cbfn - mem test callback functioin
+ * @param[in] cbarg - callback functioin arg
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
+ u32 pattern, struct bfa_diag_memtest_result *result,
+ bfa_cb_diag_t cbfn, void *cbarg)
+{
+ bfa_trc(diag, pattern);
+
+ if (!bfa_ioc_adapter_is_disabled(diag->ioc))
+ return BFA_STATUS_ADAPTER_ENABLED;
+
+ /* check to see if there is another destructive diag cmd running */
+ if (diag->block) {
+ bfa_trc(diag, diag->block);
+ return BFA_STATUS_DEVBUSY;
+ } else
+ diag->block = 1;
+
+ diag->result = result;
+ diag->cbfn = cbfn;
+ diag->cbarg = cbarg;
+
+ /* download memtest code and take LPU0 out of reset */
+ bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
+
+ bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
+ bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
+ diag->timer_active = 1;
+ return BFA_STATUS_OK;
+}
+
+/*
+ * DIAG firmware ping command
+ *
+ * @param[in] *diag - diag data struct
+ * @param[in] cnt - dma loop count for testing PCIE
+ * @param[in] data - data pattern to pass in fw
+ * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
+ * @param[in] cbfn - callback function
+ * @param[in] *cbarg - callback functioin arg
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
+ struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
+ void *cbarg)
+{
+ bfa_trc(diag, cnt);
+ bfa_trc(diag, data);
+
+ if (!bfa_ioc_is_operational(diag->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
+ ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
+ return BFA_STATUS_CMD_NOTSUPP;
+
+ /* check to see if there is another destructive diag cmd running */
+ if (diag->block || diag->fwping.lock) {
+ bfa_trc(diag, diag->block);
+ bfa_trc(diag, diag->fwping.lock);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ /* Initialization */
+ diag->fwping.lock = 1;
+ diag->fwping.cbfn = cbfn;
+ diag->fwping.cbarg = cbarg;
+ diag->fwping.result = result;
+ diag->fwping.data = data;
+ diag->fwping.count = cnt;
+
+ /* Init test results */
+ diag->fwping.result->data = 0;
+ diag->fwping.result->status = BFA_STATUS_OK;
+
+ /* kick off the first ping */
+ diag_fwping_send(diag);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Read Temperature Sensor
+ *
+ * @param[in] *diag - diag data struct
+ * @param[in] *result - pt to bfa_diag_temp_t data struct
+ * @param[in] cbfn - callback function
+ * @param[in] *cbarg - callback functioin arg
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_diag_tsensor_query(struct bfa_diag_s *diag,
+ struct bfa_diag_results_tempsensor_s *result,
+ bfa_cb_diag_t cbfn, void *cbarg)
+{
+ /* check to see if there is a destructive diag cmd running */
+ if (diag->block || diag->tsensor.lock) {
+ bfa_trc(diag, diag->block);
+ bfa_trc(diag, diag->tsensor.lock);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ if (!bfa_ioc_is_operational(diag->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* Init diag mod params */
+ diag->tsensor.lock = 1;
+ diag->tsensor.temp = result;
+ diag->tsensor.cbfn = cbfn;
+ diag->tsensor.cbarg = cbarg;
+
+ /* Send msg to fw */
+ diag_tempsensor_send(diag);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * LED Test command
+ *
+ * @param[in] *diag - diag data struct
+ * @param[in] *ledtest - pt to ledtest data structure
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
+{
+ bfa_trc(diag, ledtest->cmd);
+
+ if (!bfa_ioc_is_operational(diag->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (diag->beacon.state)
+ return BFA_STATUS_BEACON_ON;
+
+ if (diag->ledtest.lock)
+ return BFA_STATUS_LEDTEST_OP;
+
+ /* Send msg to fw */
+ diag->ledtest.lock = BFA_TRUE;
+ diag_ledtest_send(diag, ledtest);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Port beaconing command
+ *
+ * @param[in] *diag - diag data struct
+ * @param[in] beacon - port beaconing 1:ON 0:OFF
+ * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
+ * @param[in] sec - beaconing duration in seconds
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
+ bfa_boolean_t link_e2e_beacon, uint32_t sec)
+{
+ bfa_trc(diag, beacon);
+ bfa_trc(diag, link_e2e_beacon);
+ bfa_trc(diag, sec);
+
+ if (!bfa_ioc_is_operational(diag->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (diag->ledtest.lock)
+ return BFA_STATUS_LEDTEST_OP;
+
+ if (diag->beacon.state && beacon) /* beacon alread on */
+ return BFA_STATUS_BEACON_ON;
+
+ diag->beacon.state = beacon;
+ diag->beacon.link_e2e = link_e2e_beacon;
+ if (diag->cbfn_beacon)
+ diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
+
+ /* Send msg to fw */
+ diag_portbeacon_send(diag, beacon, sec);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Return DMA memory needed by diag module.
+ */
+u32
+bfa_diag_meminfo(void)
+{
+ return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Attach virtual and physical memory for Diag.
+ */
+void
+bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
+ bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
+{
+ diag->dev = dev;
+ diag->ioc = ioc;
+ diag->trcmod = trcmod;
+
+ diag->block = 0;
+ diag->cbfn = NULL;
+ diag->cbarg = NULL;
+ diag->result = NULL;
+ diag->cbfn_beacon = cbfn_beacon;
+
+ bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
+ bfa_q_qe_init(&diag->ioc_notify);
+ bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
+ list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
+}
+
+void
+bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
+{
+ diag->fwping.dbuf_kva = dm_kva;
+ diag->fwping.dbuf_pa = dm_pa;
+ memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
+}
+
+/*
+ * PHY module specific
+ */
+#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
+#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
+
+static void
+bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
+{
+ int i, m = sz >> 2;
+
+ for (i = 0; i < m; i++)
+ obuf[i] = be32_to_cpu(ibuf[i]);
+}
+
+static bfa_boolean_t
+bfa_phy_present(struct bfa_phy_s *phy)
+{
+ return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
+}
+
+static void
+bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
+{
+ struct bfa_phy_s *phy = cbarg;
+
+ bfa_trc(phy, event);
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (phy->op_busy) {
+ phy->status = BFA_STATUS_IOC_FAILURE;
+ phy->cbfn(phy->cbarg, phy->status);
+ phy->op_busy = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Send phy attribute query request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_query_send(void *cbarg)
+{
+ struct bfa_phy_s *phy = cbarg;
+ struct bfi_phy_query_req_s *msg =
+ (struct bfi_phy_query_req_s *) phy->mb.msg;
+
+ msg->instance = phy->instance;
+ bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
+ bfa_ioc_portid(phy->ioc));
+ bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
+ bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Send phy write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_write_send(void *cbarg)
+{
+ struct bfa_phy_s *phy = cbarg;
+ struct bfi_phy_write_req_s *msg =
+ (struct bfi_phy_write_req_s *) phy->mb.msg;
+ u32 len;
+ u16 *buf, *dbuf;
+ int i, sz;
+
+ msg->instance = phy->instance;
+ msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
+ len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
+ phy->residue : BFA_PHY_DMA_BUF_SZ;
+ msg->length = cpu_to_be32(len);
+
+ /* indicate if it's the last msg of the whole write operation */
+ msg->last = (len == phy->residue) ? 1 : 0;
+
+ bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
+ bfa_ioc_portid(phy->ioc));
+ bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
+
+ buf = (u16 *) (phy->ubuf + phy->offset);
+ dbuf = (u16 *)phy->dbuf_kva;
+ sz = len >> 1;
+ for (i = 0; i < sz; i++)
+ buf[i] = cpu_to_be16(dbuf[i]);
+
+ bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+
+ phy->residue -= len;
+ phy->offset += len;
+}
+
+/*
+ * Send phy read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_read_send(void *cbarg)
+{
+ struct bfa_phy_s *phy = cbarg;
+ struct bfi_phy_read_req_s *msg =
+ (struct bfi_phy_read_req_s *) phy->mb.msg;
+ u32 len;
+
+ msg->instance = phy->instance;
+ msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
+ len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
+ phy->residue : BFA_PHY_DMA_BUF_SZ;
+ msg->length = cpu_to_be32(len);
+ bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
+ bfa_ioc_portid(phy->ioc));
+ bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
+ bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Send phy stats request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_phy_stats_send(void *cbarg)
+{
+ struct bfa_phy_s *phy = cbarg;
+ struct bfi_phy_stats_req_s *msg =
+ (struct bfi_phy_stats_req_s *) phy->mb.msg;
+
+ msg->instance = phy->instance;
+ bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
+ bfa_ioc_portid(phy->ioc));
+ bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
+ bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
+}
+
+/*
+ * Flash memory info API.
+ *
+ * @param[in] mincfg - minimal cfg variable
+ */
+u32
+bfa_phy_meminfo(bfa_boolean_t mincfg)
+{
+ /* min driver doesn't need phy */
+ if (mincfg)
+ return 0;
+
+ return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] ioc - ioc structure
+ * @param[in] dev - device structure
+ * @param[in] trcmod - trace module
+ * @param[in] logmod - log module
+ */
+void
+bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
+ struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
+{
+ phy->ioc = ioc;
+ phy->trcmod = trcmod;
+ phy->cbfn = NULL;
+ phy->cbarg = NULL;
+ phy->op_busy = 0;
+
+ bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
+ bfa_q_qe_init(&phy->ioc_notify);
+ bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
+ list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
+
+ /* min driver doesn't need phy */
+ if (mincfg) {
+ phy->dbuf_kva = NULL;
+ phy->dbuf_pa = 0;
+ }
+}
+
+/*
+ * Claim memory for phy
+ *
+ * @param[in] phy - phy structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ * @param[in] mincfg - minimal cfg variable
+ */
+void
+bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
+ bfa_boolean_t mincfg)
+{
+ if (mincfg)
+ return;
+
+ phy->dbuf_kva = dm_kva;
+ phy->dbuf_pa = dm_pa;
+ memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
+ dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+ dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+bfa_boolean_t
+bfa_phy_busy(struct bfa_ioc_s *ioc)
+{
+ void __iomem *rb;
+
+ rb = bfa_ioc_bar0(ioc);
+ return readl(rb + BFA_PHY_LOCK_STATUS);
+}
+
+/*
+ * Get phy attribute.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] attr - phy attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
+ struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
+{
+ bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
+ bfa_trc(phy, instance);
+
+ if (!bfa_phy_present(phy))
+ return BFA_STATUS_PHY_NOT_PRESENT;
+
+ if (!bfa_ioc_is_operational(phy->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+ bfa_trc(phy, phy->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ phy->op_busy = 1;
+ phy->cbfn = cbfn;
+ phy->cbarg = cbarg;
+ phy->instance = instance;
+ phy->ubuf = (uint8_t *) attr;
+ bfa_phy_query_send(phy);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Get phy stats.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] stats - pointer to phy stats
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
+ struct bfa_phy_stats_s *stats,
+ bfa_cb_phy_t cbfn, void *cbarg)
+{
+ bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
+ bfa_trc(phy, instance);
+
+ if (!bfa_phy_present(phy))
+ return BFA_STATUS_PHY_NOT_PRESENT;
+
+ if (!bfa_ioc_is_operational(phy->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+ bfa_trc(phy, phy->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ phy->op_busy = 1;
+ phy->cbfn = cbfn;
+ phy->cbarg = cbarg;
+ phy->instance = instance;
+ phy->ubuf = (u8 *) stats;
+ bfa_phy_stats_send(phy);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Update phy image.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_phy_t cbfn, void *cbarg)
+{
+ bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
+ bfa_trc(phy, instance);
+ bfa_trc(phy, len);
+ bfa_trc(phy, offset);
+
+ if (!bfa_phy_present(phy))
+ return BFA_STATUS_PHY_NOT_PRESENT;
+
+ if (!bfa_ioc_is_operational(phy->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* 'len' must be in word (4-byte) boundary */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FAILED;
+
+ if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+ bfa_trc(phy, phy->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ phy->op_busy = 1;
+ phy->cbfn = cbfn;
+ phy->cbarg = cbarg;
+ phy->instance = instance;
+ phy->residue = len;
+ phy->offset = 0;
+ phy->addr_off = offset;
+ phy->ubuf = buf;
+
+ bfa_phy_write_send(phy);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Read phy image.
+ *
+ * @param[in] phy - phy structure
+ * @param[in] instance - phy image instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+bfa_status_t
+bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_phy_t cbfn, void *cbarg)
+{
+ bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
+ bfa_trc(phy, instance);
+ bfa_trc(phy, len);
+ bfa_trc(phy, offset);
+
+ if (!bfa_phy_present(phy))
+ return BFA_STATUS_PHY_NOT_PRESENT;
+
+ if (!bfa_ioc_is_operational(phy->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* 'len' must be in word (4-byte) boundary */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FAILED;
+
+ if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
+ bfa_trc(phy, phy->op_busy);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ phy->op_busy = 1;
+ phy->cbfn = cbfn;
+ phy->cbarg = cbarg;
+ phy->instance = instance;
+ phy->residue = len;
+ phy->offset = 0;
+ phy->addr_off = offset;
+ phy->ubuf = buf;
+ bfa_phy_read_send(phy);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Process phy response messages upon receiving interrupts.
+ *
+ * @param[in] phyarg - phy structure
+ * @param[in] msg - message structure
+ */
+void
+bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
+{
+ struct bfa_phy_s *phy = phyarg;
+ u32 status;
+
+ union {
+ struct bfi_phy_query_rsp_s *query;
+ struct bfi_phy_stats_rsp_s *stats;
+ struct bfi_phy_write_rsp_s *write;
+ struct bfi_phy_read_rsp_s *read;
+ struct bfi_mbmsg_s *msg;
+ } m;
+
+ m.msg = msg;
+ bfa_trc(phy, msg->mh.msg_id);
+
+ if (!phy->op_busy) {
+ /* receiving response after ioc failure */
+ bfa_trc(phy, 0x9999);
+ return;
+ }
+
+ switch (msg->mh.msg_id) {
+ case BFI_PHY_I2H_QUERY_RSP:
+ status = be32_to_cpu(m.query->status);
+ bfa_trc(phy, status);
+
+ if (status == BFA_STATUS_OK) {
+ struct bfa_phy_attr_s *attr =
+ (struct bfa_phy_attr_s *) phy->ubuf;
+ bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
+ sizeof(struct bfa_phy_attr_s));
+ bfa_trc(phy, attr->status);
+ bfa_trc(phy, attr->length);
+ }
+
+ phy->status = status;
+ phy->op_busy = 0;
+ if (phy->cbfn)
+ phy->cbfn(phy->cbarg, phy->status);
+ break;
+ case BFI_PHY_I2H_STATS_RSP:
+ status = be32_to_cpu(m.stats->status);
+ bfa_trc(phy, status);
+
+ if (status == BFA_STATUS_OK) {
+ struct bfa_phy_stats_s *stats =
+ (struct bfa_phy_stats_s *) phy->ubuf;
+ bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
+ sizeof(struct bfa_phy_stats_s));
+ bfa_trc(phy, stats->status);
+ }
+
+ phy->status = status;
+ phy->op_busy = 0;
+ if (phy->cbfn)
+ phy->cbfn(phy->cbarg, phy->status);
+ break;
+ case BFI_PHY_I2H_WRITE_RSP:
+ status = be32_to_cpu(m.write->status);
+ bfa_trc(phy, status);
+
+ if (status != BFA_STATUS_OK || phy->residue == 0) {
+ phy->status = status;
+ phy->op_busy = 0;
+ if (phy->cbfn)
+ phy->cbfn(phy->cbarg, phy->status);
+ } else {
+ bfa_trc(phy, phy->offset);
+ bfa_phy_write_send(phy);
+ }
+ break;
+ case BFI_PHY_I2H_READ_RSP:
+ status = be32_to_cpu(m.read->status);
+ bfa_trc(phy, status);
+
+ if (status != BFA_STATUS_OK) {
+ phy->status = status;
+ phy->op_busy = 0;
+ if (phy->cbfn)
+ phy->cbfn(phy->cbarg, phy->status);
+ } else {
+ u32 len = be32_to_cpu(m.read->length);
+ u16 *buf = (u16 *)(phy->ubuf + phy->offset);
+ u16 *dbuf = (u16 *)phy->dbuf_kva;
+ int i, sz = len >> 1;
+
+ bfa_trc(phy, phy->offset);
+ bfa_trc(phy, len);
+
+ for (i = 0; i < sz; i++)
+ buf[i] = be16_to_cpu(dbuf[i]);
+
+ phy->residue -= len;
+ phy->offset += len;
+
+ if (phy->residue == 0) {
+ phy->status = status;
+ phy->op_busy = 0;
+ if (phy->cbfn)
+ phy->cbfn(phy->cbarg, phy->status);
+ } else
+ bfa_phy_read_send(phy);
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * DCONF module specific
+ */
+
+BFA_MODULE(dconf);
+
+/*
+ * DCONF state machine events
+ */
+enum bfa_dconf_event {
+ BFA_DCONF_SM_INIT = 1, /* dconf Init */
+ BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
+ BFA_DCONF_SM_WR = 3, /* binding change, map */
+ BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
+ BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
+ BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
+};
+
+/* forward declaration of DCONF state machine */
+static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event);
+
+static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
+static void bfa_dconf_timer(void *cbarg);
+static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
+static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
+
+/*
+ * Begining state of dconf module. Waiting for an event to start.
+ */
+static void
+bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_status_t bfa_status;
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_INIT:
+ if (dconf->min_cfg) {
+ bfa_trc(dconf->bfa, dconf->min_cfg);
+ return;
+ }
+ bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
+ dconf->flashdone = BFA_FALSE;
+ bfa_trc(dconf->bfa, dconf->flashdone);
+ bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
+ BFA_FLASH_PART_DRV, dconf->instance,
+ dconf->dconf,
+ sizeof(struct bfa_dconf_s), 0,
+ bfa_dconf_init_cb, dconf->bfa);
+ if (bfa_status != BFA_STATUS_OK) {
+ bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ return;
+ }
+ break;
+ case BFA_DCONF_SM_EXIT:
+ dconf->flashdone = BFA_TRUE;
+ case BFA_DCONF_SM_IOCDISABLE:
+ case BFA_DCONF_SM_WR:
+ case BFA_DCONF_SM_FLASH_COMP:
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * Read flash for dconf entries and make a call back to the driver once done.
+ */
+static void
+bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_FLASH_COMP:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+ break;
+ case BFA_DCONF_SM_TIMEOUT:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ dconf->flashdone = BFA_TRUE;
+ bfa_trc(dconf->bfa, dconf->flashdone);
+ case BFA_DCONF_SM_IOCDISABLE:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * DCONF Module is in ready state. Has completed the initialization.
+ */
+static void
+bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_WR:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ dconf->flashdone = BFA_TRUE;
+ bfa_trc(dconf->bfa, dconf->flashdone);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ break;
+ case BFA_DCONF_SM_INIT:
+ case BFA_DCONF_SM_IOCDISABLE:
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * entries are dirty, write back to the flash.
+ */
+
+static void
+bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_TIMEOUT:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
+ bfa_dconf_flash_write(dconf);
+ break;
+ case BFA_DCONF_SM_WR:
+ bfa_timer_stop(&dconf->timer);
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ bfa_timer_stop(&dconf->timer);
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+ bfa_dconf_flash_write(dconf);
+ break;
+ case BFA_DCONF_SM_FLASH_COMP:
+ break;
+ case BFA_DCONF_SM_IOCDISABLE:
+ bfa_timer_stop(&dconf->timer);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * Sync the dconf entries to the flash.
+ */
+static void
+bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_IOCDISABLE:
+ case BFA_DCONF_SM_FLASH_COMP:
+ bfa_timer_stop(&dconf->timer);
+ case BFA_DCONF_SM_TIMEOUT:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ dconf->flashdone = BFA_TRUE;
+ bfa_trc(dconf->bfa, dconf->flashdone);
+ bfa_ioc_disable(&dconf->bfa->ioc);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+static void
+bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_FLASH_COMP:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
+ break;
+ case BFA_DCONF_SM_WR:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
+ break;
+ case BFA_DCONF_SM_IOCDISABLE:
+ bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+static void
+bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
+ enum bfa_dconf_event event)
+{
+ bfa_trc(dconf->bfa, event);
+
+ switch (event) {
+ case BFA_DCONF_SM_INIT:
+ bfa_timer_start(dconf->bfa, &dconf->timer,
+ bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
+ bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
+ break;
+ case BFA_DCONF_SM_EXIT:
+ dconf->flashdone = BFA_TRUE;
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+ break;
+ case BFA_DCONF_SM_IOCDISABLE:
+ break;
+ default:
+ bfa_sm_fault(dconf->bfa, event);
+ }
+}
+
+/*
+ * Compute and return memory needed by DRV_CFG module.
+ */
+static void
+bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa)
+{
+ struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
+
+ if (cfg->drvcfg.min_cfg)
+ bfa_mem_kva_setup(meminfo, dconf_kva,
+ sizeof(struct bfa_dconf_hdr_s));
+ else
+ bfa_mem_kva_setup(meminfo, dconf_kva,
+ sizeof(struct bfa_dconf_s));
+}
+
+static void
+bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+ dconf->bfad = bfad;
+ dconf->bfa = bfa;
+ dconf->instance = bfa->ioc.port_id;
+ bfa_trc(bfa, dconf->instance);
+
+ dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
+ if (cfg->drvcfg.min_cfg) {
+ bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
+ dconf->min_cfg = BFA_TRUE;
+ /*
+ * Set the flashdone flag to TRUE explicitly as no flash
+ * write will happen in min_cfg mode.
+ */
+ dconf->flashdone = BFA_TRUE;
+ } else {
+ dconf->min_cfg = BFA_FALSE;
+ bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
+ }
+
+ bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
+ bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
+}
+
+static void
+bfa_dconf_init_cb(void *arg, bfa_status_t status)
+{
+ struct bfa_s *bfa = arg;
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+
+ dconf->flashdone = BFA_TRUE;
+ bfa_trc(bfa, dconf->flashdone);
+ bfa_iocfc_cb_dconf_modinit(bfa, status);
+ if (status == BFA_STATUS_OK) {
+ bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
+ if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
+ dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
+ if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
+ dconf->dconf->hdr.version = BFI_DCONF_VERSION;
+ }
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+}
+
+void
+bfa_dconf_modinit(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
+}
+static void
+bfa_dconf_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_dconf_stop(struct bfa_s *bfa)
+{
+}
+
+static void bfa_dconf_timer(void *cbarg)
+{
+ struct bfa_dconf_mod_s *dconf = cbarg;
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
+}
+static void
+bfa_dconf_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
+}
+
+static void
+bfa_dconf_detach(struct bfa_s *bfa)
+{
+}
+
+static bfa_status_t
+bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
+{
+ bfa_status_t bfa_status;
+ bfa_trc(dconf->bfa, 0);
+
+ bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
+ BFA_FLASH_PART_DRV, dconf->instance,
+ dconf->dconf, sizeof(struct bfa_dconf_s), 0,
+ bfa_dconf_cbfn, dconf);
+ if (bfa_status != BFA_STATUS_OK)
+ WARN_ON(bfa_status);
+ bfa_trc(dconf->bfa, bfa_status);
+
+ return bfa_status;
+}
+
+bfa_status_t
+bfa_dconf_update(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ bfa_trc(dconf->bfa, 0);
+ if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
+ return BFA_STATUS_FAILED;
+
+ if (dconf->min_cfg) {
+ bfa_trc(dconf->bfa, dconf->min_cfg);
+ return BFA_STATUS_FAILED;
+ }
+
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_dconf_cbfn(void *arg, bfa_status_t status)
+{
+ struct bfa_dconf_mod_s *dconf = arg;
+ WARN_ON(status);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
+}
+
+void
+bfa_dconf_modexit(struct bfa_s *bfa)
+{
+ struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
+ BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
+ bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
+ bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
+}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index c85182a704f..546d46b3710 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -85,12 +85,75 @@ struct bfa_sge_s {
#endif
/*
+ * BFA memory resources
+ */
+struct bfa_mem_dma_s {
+ struct list_head qe; /* Queue of DMA elements */
+ u32 mem_len; /* Total Length in Bytes */
+ u8 *kva; /* kernel virtual address */
+ u64 dma; /* dma address if DMA memory */
+ u8 *kva_curp; /* kva allocation cursor */
+ u64 dma_curp; /* dma allocation cursor */
+};
+#define bfa_mem_dma_t struct bfa_mem_dma_s
+
+struct bfa_mem_kva_s {
+ struct list_head qe; /* Queue of KVA elements */
+ u32 mem_len; /* Total Length in Bytes */
+ u8 *kva; /* kernel virtual address */
+ u8 *kva_curp; /* kva allocation cursor */
+};
+#define bfa_mem_kva_t struct bfa_mem_kva_s
+
+struct bfa_meminfo_s {
+ struct bfa_mem_dma_s dma_info;
+ struct bfa_mem_kva_s kva_info;
+};
+
+/* BFA memory segment setup macros */
+#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do { \
+ ((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz); \
+ if (_seg_sz) \
+ list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe, \
+ &(_meminfo)->dma_info.qe); \
+} while (0)
+
+#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do { \
+ ((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz); \
+ if (_seg_sz) \
+ list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe, \
+ &(_meminfo)->kva_info.qe); \
+} while (0)
+
+/* BFA dma memory segments iterator */
+#define bfa_mem_dma_sptr(_mod, _i) (&(_mod)->dma_seg[(_i)])
+#define bfa_mem_dma_seg_iter(_mod, _sptr, _nr, _i) \
+ for (_i = 0, _sptr = bfa_mem_dma_sptr(_mod, _i); _i < (_nr); \
+ _i++, _sptr = bfa_mem_dma_sptr(_mod, _i))
+
+#define bfa_mem_kva_curp(_mod) ((_mod)->kva_seg.kva_curp)
+#define bfa_mem_dma_virt(_sptr) ((_sptr)->kva_curp)
+#define bfa_mem_dma_phys(_sptr) ((_sptr)->dma_curp)
+#define bfa_mem_dma_len(_sptr) ((_sptr)->mem_len)
+
+/* Get the corresponding dma buf kva for a req - from the tag */
+#define bfa_mem_get_dmabuf_kva(_mod, _tag, _rqsz) \
+ (((u8 *)(_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].kva_curp) +\
+ BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
+
+/* Get the corresponding dma buf pa for a req - from the tag */
+#define bfa_mem_get_dmabuf_pa(_mod, _tag, _rqsz) \
+ ((_mod)->dma_seg[BFI_MEM_SEG_FROM_TAG(_tag, _rqsz)].dma_curp + \
+ BFI_MEM_SEG_REQ_OFFSET(_tag, _rqsz) * (_rqsz))
+
+/*
* PCI device information required by IOC
*/
struct bfa_pcidev_s {
int pci_slot;
u8 pci_func;
u16 device_id;
+ u16 ssid;
void __iomem *pci_bar_kva;
};
@@ -112,18 +175,6 @@ struct bfa_dma_s {
#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
-
-#define bfa_dma_addr_set(dma_addr, pa) \
- __bfa_dma_addr_set(&dma_addr, (u64)pa)
-
-static inline void
-__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
-{
- dma_addr->a32.addr_lo = (__be32) pa;
- dma_addr->a32.addr_hi = (__be32) (pa >> 32);
-}
-
-
#define bfa_dma_be_addr_set(dma_addr, pa) \
__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
static inline void
@@ -133,11 +184,22 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32);
}
+#define bfa_alen_set(__alen, __len, __pa) \
+ __bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen_s *alen, u32 len, u64 pa)
+{
+ alen->al_len = cpu_to_be32(len);
+ bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
struct bfa_ioc_regs_s {
void __iomem *hfn_mbox_cmd;
void __iomem *hfn_mbox;
void __iomem *lpu_mbox_cmd;
void __iomem *lpu_mbox;
+ void __iomem *lpu_read_stat;
void __iomem *pss_ctl_reg;
void __iomem *pss_err_status_reg;
void __iomem *app_pll_fast_ctl_reg;
@@ -199,18 +261,26 @@ struct bfa_ioc_cbfn_s {
};
/*
- * Heartbeat failure notification queue element.
+ * IOC event notification mechanism.
*/
-struct bfa_ioc_hbfail_notify_s {
+enum bfa_ioc_event_e {
+ BFA_IOC_E_ENABLED = 1,
+ BFA_IOC_E_DISABLED = 2,
+ BFA_IOC_E_FAILED = 3,
+};
+
+typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event_e);
+
+struct bfa_ioc_notify_s {
struct list_head qe;
- bfa_ioc_hbfail_cbfn_t cbfn;
+ bfa_ioc_notify_cbfn_t cbfn;
void *cbarg;
};
/*
- * Initialize a heartbeat failure notification structure
+ * Initialize a IOC event notification structure
*/
-#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
+#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
(__notify)->cbfn = (__cbfn); \
(__notify)->cbarg = (__cbarg); \
} while (0)
@@ -218,8 +288,9 @@ struct bfa_ioc_hbfail_notify_s {
struct bfa_iocpf_s {
bfa_fsm_t fsm;
struct bfa_ioc_s *ioc;
- u32 retry_count;
+ bfa_boolean_t fw_mismatch_notified;
bfa_boolean_t auto_recover;
+ u32 poll_time;
};
struct bfa_ioc_s {
@@ -231,17 +302,15 @@ struct bfa_ioc_s {
struct bfa_timer_s sem_timer;
struct bfa_timer_s hb_timer;
u32 hb_count;
- struct list_head hb_notify_q;
+ struct list_head notify_q;
void *dbg_fwsave;
int dbg_fwsave_len;
bfa_boolean_t dbg_fwsave_once;
- enum bfi_mclass ioc_mc;
+ enum bfi_pcifn_class clscode;
struct bfa_ioc_regs_s ioc_regs;
struct bfa_trc_mod_s *trcmod;
struct bfa_ioc_drv_stats_s stats;
bfa_boolean_t fcmode;
- bfa_boolean_t ctdev;
- bfa_boolean_t cna;
bfa_boolean_t pllinit;
bfa_boolean_t stats_busy; /* outstanding stats */
u8 port_id;
@@ -251,10 +320,18 @@ struct bfa_ioc_s {
struct bfa_ioc_mbox_mod_s mbox_mod;
struct bfa_ioc_hwif_s *ioc_hwif;
struct bfa_iocpf_s iocpf;
+ enum bfi_asic_gen asic_gen;
+ enum bfi_asic_mode asic_mode;
+ enum bfi_port_mode port0_mode;
+ enum bfi_port_mode port1_mode;
+ enum bfa_mode_s port_mode;
+ u8 ad_cap_bm; /* adapter cap bit mask */
+ u8 port_mode_cfg; /* config port mode */
+ int ioc_aen_seq;
};
struct bfa_ioc_hwif_s {
- bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode);
+ bfa_status_t (*ioc_pll_init) (void __iomem *rb, enum bfi_asic_mode m);
bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc);
void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc);
void (*ioc_reg_init) (struct bfa_ioc_s *ioc);
@@ -268,12 +345,400 @@ struct bfa_ioc_hwif_s {
void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
+ bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
+};
+
+/*
+ * Queue element to wait for room in request queue. FIFO order is
+ * maintained when fullfilling requests.
+ */
+struct bfa_reqq_wait_s {
+ struct list_head qe;
+ void (*qresume) (void *cbarg);
+ void *cbarg;
+};
+
+typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+
+/*
+ * Generic BFA callback element.
+ */
+struct bfa_cb_qe_s {
+ struct list_head qe;
+ bfa_cb_cbfn_t cbfn;
+ bfa_boolean_t once;
+ bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
+ bfa_status_t fw_status; /* to access fw status in comp proc */
+ void *cbarg;
+};
+
+/*
+ * ASIC block configurtion related
+ */
+
+typedef void (*bfa_ablk_cbfn_t)(void *, enum bfa_status);
+
+struct bfa_ablk_s {
+ struct bfa_ioc_s *ioc;
+ struct bfa_ablk_cfg_s *cfg;
+ u16 *pcifn;
+ struct bfa_dma_s dma_addr;
+ bfa_boolean_t busy;
+ struct bfa_mbox_cmd_s mb;
+ bfa_ablk_cbfn_t cbfn;
+ void *cbarg;
+ struct bfa_ioc_notify_s ioc_notify;
+ struct bfa_mem_dma_s ablk_dma;
+};
+#define BFA_MEM_ABLK_DMA(__bfa) (&((__bfa)->modules.ablk.ablk_dma))
+
+/*
+ * SFP module specific
+ */
+typedef void (*bfa_cb_sfp_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_sfp_s {
+ void *dev;
+ struct bfa_ioc_s *ioc;
+ struct bfa_trc_mod_s *trcmod;
+ struct sfp_mem_s *sfpmem;
+ bfa_cb_sfp_t cbfn;
+ void *cbarg;
+ enum bfi_sfp_mem_e memtype; /* mem access type */
+ u32 status;
+ struct bfa_mbox_cmd_s mbcmd;
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+ struct bfa_ioc_notify_s ioc_notify;
+ enum bfa_defs_sfp_media_e *media;
+ enum bfa_port_speed portspeed;
+ bfa_cb_sfp_t state_query_cbfn;
+ void *state_query_cbarg;
+ u8 lock;
+ u8 data_valid; /* data in dbuf is valid */
+ u8 state; /* sfp state */
+ u8 state_query_lock;
+ struct bfa_mem_dma_s sfp_dma;
+ u8 is_elb; /* eloopback */
+};
+
+#define BFA_SFP_MOD(__bfa) (&(__bfa)->modules.sfp)
+#define BFA_MEM_SFP_DMA(__bfa) (&(BFA_SFP_MOD(__bfa)->sfp_dma))
+
+u32 bfa_sfp_meminfo(void);
+
+void bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc,
+ void *dev, struct bfa_trc_mod_s *trcmod);
+
+void bfa_sfp_memclaim(struct bfa_sfp_s *diag, u8 *dm_kva, u64 dm_pa);
+void bfa_sfp_intr(void *bfaarg, struct bfi_mbmsg_s *msg);
+
+bfa_status_t bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
+ bfa_cb_sfp_t cbfn, void *cbarg);
+
+bfa_status_t bfa_sfp_media(struct bfa_sfp_s *sfp,
+ enum bfa_defs_sfp_media_e *media,
+ bfa_cb_sfp_t cbfn, void *cbarg);
+
+bfa_status_t bfa_sfp_speed(struct bfa_sfp_s *sfp,
+ enum bfa_port_speed portspeed,
+ bfa_cb_sfp_t cbfn, void *cbarg);
+
+/*
+ * Flash module specific
+ */
+typedef void (*bfa_cb_flash_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_flash_s {
+ struct bfa_ioc_s *ioc; /* back pointer to ioc */
+ struct bfa_trc_mod_s *trcmod;
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 op_busy; /* operation busy flag */
+ u32 residue; /* residual length */
+ u32 offset; /* offset */
+ bfa_status_t status; /* status */
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ bfa_cb_flash_t cbfn; /* user callback function */
+ void *cbarg; /* user callback arg */
+ u8 *ubuf; /* user supplied buffer */
+ struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */
+ u32 addr_off; /* partition address offset */
+ struct bfa_mbox_cmd_s mb; /* mailbox */
+ struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
+ struct bfa_mem_dma_s flash_dma;
+};
+
+#define BFA_FLASH(__bfa) (&(__bfa)->modules.flash)
+#define BFA_MEM_FLASH_DMA(__bfa) (&(BFA_FLASH(__bfa)->flash_dma))
+
+bfa_status_t bfa_flash_get_attr(struct bfa_flash_s *flash,
+ struct bfa_flash_attr_s *attr,
+ bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_erase_part(struct bfa_flash_s *flash,
+ enum bfa_flash_part_type type, u8 instance,
+ bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_update_part(struct bfa_flash_s *flash,
+ enum bfa_flash_part_type type, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_flash_t cbfn, void *cbarg);
+bfa_status_t bfa_flash_read_part(struct bfa_flash_s *flash,
+ enum bfa_flash_part_type type, u8 instance, void *buf,
+ u32 len, u32 offset, bfa_cb_flash_t cbfn, void *cbarg);
+u32 bfa_flash_meminfo(bfa_boolean_t mincfg);
+void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc,
+ void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_flash_memclaim(struct bfa_flash_s *flash,
+ u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+
+/*
+ * DIAG module specific
+ */
+
+typedef void (*bfa_cb_diag_t) (void *cbarg, bfa_status_t status);
+typedef void (*bfa_cb_diag_beacon_t) (void *dev, bfa_boolean_t beacon,
+ bfa_boolean_t link_e2e_beacon);
+
+/*
+ * Firmware ping test results
+ */
+struct bfa_diag_results_fwping {
+ u32 data; /* store the corrupted data */
+ u32 status;
+ u32 dmastatus;
+ u8 rsvd[4];
+};
+
+struct bfa_diag_qtest_result_s {
+ u32 status;
+ u16 count; /* sucessful queue test count */
+ u8 queue;
+ u8 rsvd; /* 64-bit align */
+};
+
+/*
+ * Firmware ping test results
+ */
+struct bfa_diag_fwping_s {
+ struct bfa_diag_results_fwping *result;
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ u32 data;
+ u8 lock;
+ u8 rsv[3];
+ u32 status;
+ u32 count;
+ struct bfa_mbox_cmd_s mbcmd;
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+};
+
+/*
+ * Temperature sensor query results
+ */
+struct bfa_diag_results_tempsensor_s {
+ u32 status;
+ u16 temp; /* 10-bit A/D value */
+ u16 brd_temp; /* 9-bit board temp */
+ u8 ts_junc; /* show junction tempsensor */
+ u8 ts_brd; /* show board tempsensor */
+ u8 rsvd[6]; /* keep 8 bytes alignment */
+};
+
+struct bfa_diag_tsensor_s {
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ struct bfa_diag_results_tempsensor_s *temp;
+ u8 lock;
+ u8 rsv[3];
+ u32 status;
+ struct bfa_mbox_cmd_s mbcmd;
+};
+
+struct bfa_diag_sfpshow_s {
+ struct sfp_mem_s *sfpmem;
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ u8 lock;
+ u8 static_data;
+ u8 rsv[2];
+ u32 status;
+ struct bfa_mbox_cmd_s mbcmd;
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+};
+
+struct bfa_diag_led_s {
+ struct bfa_mbox_cmd_s mbcmd;
+ bfa_boolean_t lock; /* 1: ledtest is operating */
+};
+
+struct bfa_diag_beacon_s {
+ struct bfa_mbox_cmd_s mbcmd;
+ bfa_boolean_t state; /* port beacon state */
+ bfa_boolean_t link_e2e; /* link beacon state */
+};
+
+struct bfa_diag_s {
+ void *dev;
+ struct bfa_ioc_s *ioc;
+ struct bfa_trc_mod_s *trcmod;
+ struct bfa_diag_fwping_s fwping;
+ struct bfa_diag_tsensor_s tsensor;
+ struct bfa_diag_sfpshow_s sfpshow;
+ struct bfa_diag_led_s ledtest;
+ struct bfa_diag_beacon_s beacon;
+ void *result;
+ struct bfa_timer_s timer;
+ bfa_cb_diag_beacon_t cbfn_beacon;
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ u8 block;
+ u8 timer_active;
+ u8 rsvd[2];
+ u32 status;
+ struct bfa_ioc_notify_s ioc_notify;
+ struct bfa_mem_dma_s diag_dma;
};
+#define BFA_DIAG_MOD(__bfa) (&(__bfa)->modules.diag_mod)
+#define BFA_MEM_DIAG_DMA(__bfa) (&(BFA_DIAG_MOD(__bfa)->diag_dma))
+
+u32 bfa_diag_meminfo(void);
+void bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa);
+void bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
+ bfa_cb_diag_beacon_t cbfn_beacon,
+ struct bfa_trc_mod_s *trcmod);
+bfa_status_t bfa_diag_reg_read(struct bfa_diag_s *diag, u32 offset,
+ u32 len, u32 *buf, u32 force);
+bfa_status_t bfa_diag_reg_write(struct bfa_diag_s *diag, u32 offset,
+ u32 len, u32 value, u32 force);
+bfa_status_t bfa_diag_tsensor_query(struct bfa_diag_s *diag,
+ struct bfa_diag_results_tempsensor_s *result,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt,
+ u32 pattern, struct bfa_diag_results_fwping *result,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_diag_sfpshow(struct bfa_diag_s *diag,
+ struct sfp_mem_s *sfpmem, u8 static_data,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_diag_memtest(struct bfa_diag_s *diag,
+ struct bfa_diag_memtest_s *memtest, u32 pattern,
+ struct bfa_diag_memtest_result *result,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_diag_ledtest(struct bfa_diag_s *diag,
+ struct bfa_diag_ledtest_s *ledtest);
+bfa_status_t bfa_diag_beacon_port(struct bfa_diag_s *diag,
+ bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon,
+ u32 sec);
+
+/*
+ * PHY module specific
+ */
+typedef void (*bfa_cb_phy_t) (void *cbarg, bfa_status_t status);
+
+struct bfa_phy_s {
+ struct bfa_ioc_s *ioc; /* back pointer to ioc */
+ struct bfa_trc_mod_s *trcmod; /* trace module */
+ u8 instance; /* port instance */
+ u8 op_busy; /* operation busy flag */
+ u8 rsv[2];
+ u32 residue; /* residual length */
+ u32 offset; /* offset */
+ bfa_status_t status; /* status */
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
+ bfa_cb_phy_t cbfn; /* user callback function */
+ void *cbarg; /* user callback arg */
+ u8 *ubuf; /* user supplied buffer */
+ struct bfa_cb_qe_s hcb_qe; /* comp: BFA callback qelem */
+ u32 addr_off; /* phy address offset */
+ struct bfa_mbox_cmd_s mb; /* mailbox */
+ struct bfa_ioc_notify_s ioc_notify; /* ioc event notify */
+ struct bfa_mem_dma_s phy_dma;
+};
+#define BFA_PHY(__bfa) (&(__bfa)->modules.phy)
+#define BFA_MEM_PHY_DMA(__bfa) (&(BFA_PHY(__bfa)->phy_dma))
+
+bfa_boolean_t bfa_phy_busy(struct bfa_ioc_s *ioc);
+bfa_status_t bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
+ struct bfa_phy_attr_s *attr,
+ bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
+ struct bfa_phy_stats_s *stats,
+ bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_phy_t cbfn, void *cbarg);
+bfa_status_t bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_phy_t cbfn, void *cbarg);
+
+u32 bfa_phy_meminfo(bfa_boolean_t mincfg);
+void bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc,
+ void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);
+void bfa_phy_memclaim(struct bfa_phy_s *phy,
+ u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg);
+void bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg);
+
+/*
+ * Driver Config( dconf) specific
+ */
+#define BFI_DCONF_SIGNATURE 0xabcdabcd
+#define BFI_DCONF_VERSION 1
+
+#pragma pack(1)
+struct bfa_dconf_hdr_s {
+ u32 signature;
+ u32 version;
+};
+
+struct bfa_dconf_s {
+ struct bfa_dconf_hdr_s hdr;
+ struct bfa_lunmask_cfg_s lun_mask;
+};
+#pragma pack()
+
+struct bfa_dconf_mod_s {
+ bfa_sm_t sm;
+ u8 instance;
+ bfa_boolean_t flashdone;
+ bfa_boolean_t read_data_valid;
+ bfa_boolean_t min_cfg;
+ struct bfa_timer_s timer;
+ struct bfa_s *bfa;
+ void *bfad;
+ void *trcmod;
+ struct bfa_dconf_s *dconf;
+ struct bfa_mem_kva_s kva_seg;
+};
+
+#define BFA_DCONF_MOD(__bfa) \
+ (&(__bfa)->modules.dconf_mod)
+#define BFA_MEM_DCONF_KVA(__bfa) (&(BFA_DCONF_MOD(__bfa)->kva_seg))
+#define bfa_dconf_read_data_valid(__bfa) \
+ (BFA_DCONF_MOD(__bfa)->read_data_valid)
+#define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */
+
+void bfa_dconf_modinit(struct bfa_s *bfa);
+void bfa_dconf_modexit(struct bfa_s *bfa);
+bfa_status_t bfa_dconf_update(struct bfa_s *bfa);
+
+/*
+ * IOC specfic macros
+ */
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
+#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen)
+#define bfa_ioc_is_cna(__ioc) \
+ ((bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_FCoE) || \
+ (bfa_ioc_get_type(__ioc) == BFA_IOC_TYPE_LL))
#define bfa_ioc_fetch_stats(__ioc, __stats) \
(((__stats)->drv_stats) = (__ioc)->stats)
#define bfa_ioc_clr_stats(__ioc) \
@@ -287,12 +752,9 @@ struct bfa_ioc_hwif_s {
#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
-#define BFA_IOC_FWIMG_TYPE(__ioc) \
- (((__ioc)->ctdev) ? \
- (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
- BFI_IMAGE_CB_FC)
-#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
- (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
+#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
+ ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \
+ ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
@@ -305,7 +767,7 @@ void bfa_ioc_mbox_register(struct bfa_ioc_s *ioc,
bfa_ioc_mbox_mcfunc_t *mcfuncs);
void bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc);
void bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len);
-void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
+bfa_boolean_t bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg);
void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
@@ -315,40 +777,49 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
#define bfa_ioc_pll_init_asic(__ioc) \
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
- (__ioc)->fcmode))
+ (__ioc)->asic_mode))
bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
-bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
-bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb);
-bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
+bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
+bfa_status_t bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode);
-#define bfa_ioc_isr_mode_set(__ioc, __msix) \
- ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+#define bfa_ioc_isr_mode_set(__ioc, __msix) do { \
+ if ((__ioc)->ioc_hwif->ioc_isr_mode_set) \
+ ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)); \
+} while (0)
#define bfa_ioc_ownership_reset(__ioc) \
((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+#define bfa_ioc_get_fcmode(__ioc) ((__ioc)->fcmode)
+#define bfa_ioc_lpu_read_stat(__ioc) do { \
+ if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \
+ ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \
+} while (0)
-
-void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc);
void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
void bfa_ioc_detach(struct bfa_ioc_s *ioc);
void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
- enum bfi_mclass mc);
+ enum bfi_pcifn_class clscode);
void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa);
void bfa_ioc_enable(struct bfa_ioc_s *ioc);
void bfa_ioc_disable(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
- u32 boot_param);
+ u32 boot_env);
void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc);
@@ -372,17 +843,43 @@ bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
int *trclen);
bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
u32 *offset, int *buflen);
-void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
-bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
struct bfi_ioc_image_hdr_s *fwhdr);
bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
struct bfi_ioc_image_hdr_s *fwhdr);
+void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
/*
+ * asic block configuration related APIs
+ */
+u32 bfa_ablk_meminfo(void);
+void bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa);
+void bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc);
+bfa_status_t bfa_ablk_query(struct bfa_ablk_s *ablk,
+ struct bfa_ablk_cfg_s *ablk_cfg,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_adapter_config(struct bfa_ablk_s *ablk,
+ enum bfa_mode_s mode, int max_pf, int max_vf,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port,
+ enum bfa_mode_s mode, int max_pf, int max_vf,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
+ u8 port, enum bfi_pcifn_class personality, int bw,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,
+ bfa_ablk_cbfn_t cbfn, void *cbarg);
+
+/*
* bfa mfg wwn API functions
*/
mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc);
@@ -391,50 +888,64 @@ mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
/*
* F/W Image Size & Chunk
*/
-extern u32 bfi_image_ct_fc_size;
-extern u32 bfi_image_ct_cna_size;
-extern u32 bfi_image_cb_fc_size;
-extern u32 *bfi_image_ct_fc;
-extern u32 *bfi_image_ct_cna;
-extern u32 *bfi_image_cb_fc;
+extern u32 bfi_image_cb_size;
+extern u32 bfi_image_ct_size;
+extern u32 bfi_image_ct2_size;
+extern u32 *bfi_image_cb;
+extern u32 *bfi_image_ct;
+extern u32 *bfi_image_ct2;
static inline u32 *
-bfi_image_ct_fc_get_chunk(u32 off)
-{ return (u32 *)(bfi_image_ct_fc + off); }
+bfi_image_cb_get_chunk(u32 off)
+{
+ return (u32 *)(bfi_image_cb + off);
+}
static inline u32 *
-bfi_image_ct_cna_get_chunk(u32 off)
-{ return (u32 *)(bfi_image_ct_cna + off); }
+bfi_image_ct_get_chunk(u32 off)
+{
+ return (u32 *)(bfi_image_ct + off);
+}
static inline u32 *
-bfi_image_cb_fc_get_chunk(u32 off)
-{ return (u32 *)(bfi_image_cb_fc + off); }
+bfi_image_ct2_get_chunk(u32 off)
+{
+ return (u32 *)(bfi_image_ct2 + off);
+}
static inline u32*
-bfa_cb_image_get_chunk(int type, u32 off)
+bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
{
- switch (type) {
- case BFI_IMAGE_CT_FC:
- return bfi_image_ct_fc_get_chunk(off); break;
- case BFI_IMAGE_CT_CNA:
- return bfi_image_ct_cna_get_chunk(off); break;
- case BFI_IMAGE_CB_FC:
- return bfi_image_cb_fc_get_chunk(off); break;
- default: return NULL;
+ switch (asic_gen) {
+ case BFI_ASIC_GEN_CB:
+ return bfi_image_cb_get_chunk(off);
+ break;
+ case BFI_ASIC_GEN_CT:
+ return bfi_image_ct_get_chunk(off);
+ break;
+ case BFI_ASIC_GEN_CT2:
+ return bfi_image_ct2_get_chunk(off);
+ break;
+ default:
+ return NULL;
}
}
static inline u32
-bfa_cb_image_get_size(int type)
+bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
{
- switch (type) {
- case BFI_IMAGE_CT_FC:
- return bfi_image_ct_fc_size; break;
- case BFI_IMAGE_CT_CNA:
- return bfi_image_ct_cna_size; break;
- case BFI_IMAGE_CB_FC:
- return bfi_image_cb_fc_size; break;
- default: return 0;
+ switch (asic_gen) {
+ case BFI_ASIC_GEN_CB:
+ return bfi_image_cb_size;
+ break;
+ case BFI_ASIC_GEN_CT:
+ return bfi_image_ct_size;
+ break;
+ case BFI_ASIC_GEN_CT2:
+ return bfi_image_ct2_size;
+ break;
+ default:
+ return 0;
}
}
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index 89ae4c8f95a..30df8a28471 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -17,7 +17,7 @@
#include "bfad_drv.h"
#include "bfa_ioc.h"
-#include "bfi_cbreg.h"
+#include "bfi_reg.h"
#include "bfa_defs.h"
BFA_TRC_FILE(CNA, IOC_CB);
@@ -69,21 +69,6 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
static bfa_boolean_t
bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
{
- struct bfi_ioc_image_hdr_s fwhdr;
- uint32_t fwstate = readl(ioc->ioc_regs.ioc_fwstate);
-
- if (fwstate == BFI_IOC_UNINIT)
- return BFA_TRUE;
-
- bfa_ioc_fwver_get(ioc, &fwhdr);
-
- if (swab32(fwhdr.exec) == BFI_BOOT_TYPE_NORMAL)
- return BFA_TRUE;
-
- bfa_trc(ioc, fwstate);
- bfa_trc(ioc, fwhdr.exec);
- writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
-
return BFA_TRUE;
}
@@ -98,7 +83,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
static void
bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc)
{
- writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+ writel(~0U, ioc->ioc_regs.err_set);
readl(ioc->ioc_regs.err_set);
}
@@ -152,8 +137,8 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
*/
ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
- ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG);
- ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
/*
* IOC semaphore registers and serialization
@@ -285,18 +270,18 @@ bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc)
}
bfa_status_t
-bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
+bfa_ioc_cb_pll_init(void __iomem *rb, enum bfi_asic_mode fcmode)
{
u32 pll_sclk, pll_fclk;
- pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
- __APP_PLL_212_P0_1(3U) |
- __APP_PLL_212_JITLMT0_1(3U) |
- __APP_PLL_212_CNTLMT0_1(3U);
- pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
- __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
- __APP_PLL_400_JITLMT0_1(3U) |
- __APP_PLL_400_CNTLMT0_1(3U);
+ pll_sclk = __APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN |
+ __APP_PLL_SCLK_P0_1(3U) |
+ __APP_PLL_SCLK_JITLMT0_1(3U) |
+ __APP_PLL_SCLK_CNTLMT0_1(3U);
+ pll_fclk = __APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN |
+ __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+ __APP_PLL_LCLK_JITLMT0_1(3U) |
+ __APP_PLL_LCLK_CNTLMT0_1(3U);
writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
@@ -305,24 +290,24 @@ bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
- writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
- writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET,
- rb + APP_PLL_212_CTL_REG);
- writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
- writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET,
- rb + APP_PLL_400_CTL_REG);
+ writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
+ writel(__APP_PLL_SCLK_BYPASS | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
+ writel(__APP_PLL_LCLK_BYPASS | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_LCLK_CTL_REG);
udelay(2);
- writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
- writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
- writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET,
- rb + APP_PLL_212_CTL_REG);
- writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET,
- rb + APP_PLL_400_CTL_REG);
+ writel(__APP_PLL_SCLK_LOGIC_SOFT_RESET, rb + APP_PLL_SCLK_CTL_REG);
+ writel(__APP_PLL_LCLK_LOGIC_SOFT_RESET, rb + APP_PLL_LCLK_CTL_REG);
+ writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_LCLK_CTL_REG);
udelay(2000);
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
- writel(pll_sclk, (rb + APP_PLL_212_CTL_REG));
- writel(pll_fclk, (rb + APP_PLL_400_CTL_REG));
+ writel(pll_sclk, (rb + APP_PLL_SCLK_CTL_REG));
+ writel(pll_fclk, (rb + APP_PLL_LCLK_CTL_REG));
return BFA_STATUS_OK;
}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index 93612520f0d..d1b8f0caaa7 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -17,7 +17,7 @@
#include "bfad_drv.h"
#include "bfa_ioc.h"
-#include "bfi_ctreg.h"
+#include "bfi_reg.h"
#include "bfa_defs.h"
BFA_TRC_FILE(CNA, IOC_CT);
@@ -36,9 +36,6 @@ BFA_TRC_FILE(CNA, IOC_CT);
*/
static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
-static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
-static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
-static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
@@ -48,29 +45,7 @@ static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
static struct bfa_ioc_hwif_s hwif_ct;
-
-/*
- * Called from bfa_ioc_attach() to map asic specific calls.
- */
-void
-bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
-{
- hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
- hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
- hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
- hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
- hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
- hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
- hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
- hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
- hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
- hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
- hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
- hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
- hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
-
- ioc->ioc_hwif = &hwif_ct;
-}
+static struct bfa_ioc_hwif_s hwif_ct2;
/*
* Return true if firmware of current driver matches the running firmware.
@@ -83,15 +58,9 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
struct bfi_ioc_image_hdr_s fwhdr;
/*
- * Firmware match check is relevant only for CNA.
- */
- if (!ioc->cna)
- return BFA_TRUE;
-
- /*
* If bios boot (flash based) -- do not increment usage count
*/
- if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+ if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return BFA_TRUE;
@@ -103,6 +72,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
*/
if (usecnt == 0) {
writel(1, ioc->ioc_regs.ioc_usage_reg);
+ readl(ioc->ioc_regs.ioc_usage_sem_reg);
writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
writel(0, ioc->ioc_regs.ioc_fail_sync);
bfa_trc(ioc, usecnt);
@@ -122,6 +92,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
*/
bfa_ioc_fwver_get(ioc, &fwhdr);
if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
+ readl(ioc->ioc_regs.ioc_usage_sem_reg);
writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
bfa_trc(ioc, usecnt);
return BFA_FALSE;
@@ -132,6 +103,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
*/
usecnt++;
writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
+ readl(ioc->ioc_regs.ioc_usage_sem_reg);
writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
bfa_trc(ioc, usecnt);
return BFA_TRUE;
@@ -143,15 +115,9 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
u32 usecnt;
/*
- * Firmware lock is relevant only for CNA.
- */
- if (!ioc->cna)
- return;
-
- /*
* If bios boot (flash based) -- do not decrement usage count
*/
- if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+ if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return;
@@ -166,6 +132,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
bfa_trc(ioc, usecnt);
+ readl(ioc->ioc_regs.ioc_usage_sem_reg);
writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
}
@@ -175,14 +142,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
static void
bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
{
- if (ioc->cna) {
+ if (bfa_ioc_is_cna(ioc)) {
writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
/* Wait for halt to take effect */
readl(ioc->ioc_regs.ll_halt);
readl(ioc->ioc_regs.alt_ll_halt);
} else {
- writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+ writel(~0U, ioc->ioc_regs.err_set);
readl(ioc->ioc_regs.err_set);
}
}
@@ -190,7 +157,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
/*
* Host to LPU mailbox message addresses
*/
-static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
@@ -200,21 +167,31 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
/*
* Host <-> LPU mailbox command/status registers - port 0
*/
-static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
- { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
- { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
- { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
- { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
+static struct { u32 hfn, lpu; } ct_p0reg[] = {
+ { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
+ { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
+ { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
+ { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
};
/*
* Host <-> LPU mailbox command/status registers - port 1
*/
-static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
- { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
- { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
- { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
- { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
+static struct { u32 hfn, lpu; } ct_p1reg[] = {
+ { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
+ { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
+ { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
+ { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
+};
+
+static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
+ ct2_reg[] = {
+ { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+ CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
+ CT2_HOSTFN_LPU0_READ_STAT},
+ { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
+ CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
+ CT2_HOSTFN_LPU1_READ_STAT},
};
static void
@@ -225,24 +202,24 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
rb = bfa_ioc_bar0(ioc);
- ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
- ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
- ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+ ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
if (ioc->port_id == 0) {
ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
- ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
- ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
} else {
ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
- ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
- ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
}
@@ -252,8 +229,8 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
*/
ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
- ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
- ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
/*
* IOC semaphore registers and serialization
@@ -276,6 +253,64 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
+static void
+bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
+{
+ void __iomem *rb;
+ int port = bfa_ioc_portid(ioc);
+
+ rb = bfa_ioc_bar0(ioc);
+
+ ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
+ ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
+ ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
+ ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
+ ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
+ ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
+
+ if (port == 0) {
+ ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
+ ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
+ } else {
+ ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
+ ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
+ ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
+ ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+ ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
+ }
+
+ /*
+ * PSS control registers
+ */
+ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+ ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+ ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
+ ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
+
+ /*
+ * IOC semaphore registers and serialization
+ */
+ ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
+ ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
+ ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
+ ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
+ ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
+
+ /*
+ * sram memory access
+ */
+ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+ ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+ /*
+ * err set reg : for notification of hb failure in fcmode
+ */
+ ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
/*
* Initialize IOC to port mapping.
*/
@@ -298,6 +333,19 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
bfa_trc(ioc, ioc->port_id);
}
+static void
+bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
+{
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
+
+ r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
+ ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
+
+ bfa_trc(ioc, bfa_ioc_pcifn(ioc));
+ bfa_trc(ioc, ioc->port_id);
+}
+
/*
* Set interrupt mode for a function: INTX or MSIX
*/
@@ -316,7 +364,7 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
/*
* If already in desired mode, do not change anything
*/
- if (!msix && mode)
+ if ((!msix && mode) || (msix && !mode))
return;
if (msix)
@@ -331,6 +379,20 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
writel(r32, rb + FNC_PERS_REG);
}
+bfa_boolean_t
+bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
+{
+ u32 r32;
+
+ r32 = readl(ioc->ioc_regs.lpu_read_stat);
+ if (r32) {
+ writel(1, ioc->ioc_regs.lpu_read_stat);
+ return BFA_TRUE;
+ }
+
+ return BFA_FALSE;
+}
+
/*
* Cleanup hw semaphore and usecnt registers
*/
@@ -338,9 +400,10 @@ static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
{
- if (ioc->cna) {
+ if (bfa_ioc_is_cna(ioc)) {
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
writel(0, ioc->ioc_regs.ioc_usage_reg);
+ readl(ioc->ioc_regs.ioc_usage_sem_reg);
writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
}
@@ -449,32 +512,99 @@ bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
return BFA_FALSE;
}
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+static void
+bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
+{
+ hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
+ hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
+ hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
+ hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+ hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
+ hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
+ hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
+ hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
+ hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
+
+ hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
+ hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
+ hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
+ hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
+ ioc->ioc_hwif = &hwif_ct;
+}
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
+{
+ bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
+
+ hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
+ hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
+ hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
+ hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
+ hwif_ct2.ioc_isr_mode_set = NULL;
+ ioc->ioc_hwif = &hwif_ct2;
+}
+
/*
- * Check the firmware state to know if pll_init has been completed already
+ * Workaround for MSI-X resource allocation for catapult-2 with no asic block
*/
-bfa_boolean_t
-bfa_ioc_ct_pll_init_complete(void __iomem *rb)
+#define HOSTFN_MSIX_DEFAULT 64
+#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
+#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
+#define __MSIX_VT_NUMVT__MK 0x003ff800
+#define __MSIX_VT_NUMVT__SH 11
+#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
+#define __MSIX_VT_OFST_ 0x000007ff
+void
+bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
{
- if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
- (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
- return BFA_TRUE;
+ void __iomem *rb = ioc->pcidev.pci_bar_kva;
+ u32 r32;
- return BFA_FALSE;
+ r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+ if (r32 & __MSIX_VT_NUMVT__MK) {
+ writel(r32 & __MSIX_VT_OFST_,
+ rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
+ return;
+ }
+
+ writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
+ HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+ rb + HOSTFN_MSIX_VT_OFST_NUMVT);
+ writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
+ rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
}
bfa_status_t
-bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
+bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
{
u32 pll_sclk, pll_fclk, r32;
+ bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
+
+ pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
+ __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
+ __APP_PLL_SCLK_JITLMT0_1(3U) |
+ __APP_PLL_SCLK_CNTLMT0_1(1U);
+ pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
+ __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
+ __APP_PLL_LCLK_JITLMT0_1(3U) |
+ __APP_PLL_LCLK_CNTLMT0_1(1U);
- pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
- __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
- __APP_PLL_312_JITLMT0_1(3U) |
- __APP_PLL_312_CNTLMT0_1(1U);
- pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
- __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
- __APP_PLL_425_JITLMT0_1(3U) |
- __APP_PLL_425_CNTLMT0_1(1U);
if (fcmode) {
writel(0, (rb + OP_MODE));
writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
@@ -491,20 +621,21 @@ bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
- writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
- rb + APP_PLL_312_CTL_REG);
- writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
- rb + APP_PLL_425_CTL_REG);
- writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
- rb + APP_PLL_312_CTL_REG);
- writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
- rb + APP_PLL_425_CTL_REG);
+ writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ rb + APP_PLL_LCLK_CTL_REG);
+ writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
+ __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
+ __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
readl(rb + HOSTFN0_INT_MSK);
udelay(2000);
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
- writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
- writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
+ writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
+ writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
+
if (!fcmode) {
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
@@ -524,3 +655,206 @@ bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
writel(0, (rb + MBIST_CTL_REG));
return BFA_STATUS_OK;
}
+
+static void
+bfa_ioc_ct2_sclk_init(void __iomem *rb)
+{
+ u32 r32;
+
+ /*
+ * put s_clk PLL and PLL FSM in reset
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
+ r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
+ __APP_PLL_SCLK_LOGIC_SOFT_RESET);
+ writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * Ignore mode and program for the max clock (which is FC16)
+ * Firmware/NFC will do the PLL init appropiately
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
+ writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * while doing PLL init dont clock gate ethernet subsystem
+ */
+ r32 = readl((rb + CT2_CHIP_MISC_PRG));
+ writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
+
+ r32 = readl((rb + CT2_PCIE_MISC_REG));
+ writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
+
+ /*
+ * set sclk value
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
+ __APP_PLL_SCLK_CLK_DIV2);
+ writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * poll for s_clk lock or delay 1ms
+ */
+ udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_lclk_init(void __iomem *rb)
+{
+ u32 r32;
+
+ /*
+ * put l_clk PLL and PLL FSM in reset
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
+ r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
+ __APP_PLL_LCLK_LOGIC_SOFT_RESET);
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * set LPU speed (set for FC16 which will work for other modes)
+ */
+ r32 = readl((rb + CT2_CHIP_MISC_PRG));
+ writel(r32, (rb + CT2_CHIP_MISC_PRG));
+
+ /*
+ * set LPU half speed (set for FC16 which will work for other modes)
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * set lclk for mode (set for FC16)
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
+ r32 |= 0x20c1731b;
+ writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * poll for s_clk lock or delay 1ms
+ */
+ udelay(1000);
+}
+
+static void
+bfa_ioc_ct2_mem_init(void __iomem *rb)
+{
+ u32 r32;
+
+ r32 = readl((rb + PSS_CTL_REG));
+ r32 &= ~__PSS_LMEM_RESET;
+ writel(r32, (rb + PSS_CTL_REG));
+ udelay(1000);
+
+ writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
+ udelay(1000);
+ writel(0, (rb + CT2_MBIST_CTL_REG));
+}
+
+void
+bfa_ioc_ct2_mac_reset(void __iomem *rb)
+{
+ u32 r32;
+
+ bfa_ioc_ct2_sclk_init(rb);
+ bfa_ioc_ct2_lclk_init(rb);
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /* put port0, port1 MAC & AHB in reset */
+ writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+ rb + CT2_CSI_MAC_CONTROL_REG(0));
+ writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
+ rb + CT2_CSI_MAC_CONTROL_REG(1));
+}
+
+#define CT2_NFC_MAX_DELAY 1000
+bfa_status_t
+bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
+{
+ u32 wgn, r32;
+ int i;
+
+ /*
+ * Initialize PLL if not already done by NFC
+ */
+ wgn = readl(rb + CT2_WGN_STATUS);
+ if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
+ writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
+ for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
+ r32 = readl(rb + CT2_NFC_CSR_SET_REG);
+ if (r32 & __NFC_CONTROLLER_HALTED)
+ break;
+ udelay(1000);
+ }
+ }
+
+ /*
+ * Mask the interrupts and clear any
+ * pending interrupts.
+ */
+ writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
+ writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
+
+ r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ if (r32 == 1) {
+ writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
+ }
+ r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ if (r32 == 1) {
+ writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
+ }
+
+ bfa_ioc_ct2_mac_reset(rb);
+ bfa_ioc_ct2_sclk_init(rb);
+ bfa_ioc_ct2_lclk_init(rb);
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
+ writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_SCLK_CTL_REG));
+
+ /*
+ * release soft reset on s_clk & l_clk
+ */
+ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
+ writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
+ (rb + CT2_APP_PLL_LCLK_CTL_REG));
+
+ /*
+ * Announce flash device presence, if flash was corrupted.
+ */
+ if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
+ r32 = readl((rb + PSS_GPIO_OUT_REG));
+ writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
+ r32 = readl((rb + PSS_GPIO_OE_REG));
+ writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
+ }
+
+ bfa_ioc_ct2_mem_init(rb);
+
+ writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
+ writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index ab79ff6fdee..2d36e482383 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -29,14 +29,22 @@
#include "bfa_port.h"
struct bfa_modules_s {
+ struct bfa_fcdiag_s fcdiag; /* fcdiag module */
struct bfa_fcport_s fcport; /* fc port module */
struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */
struct bfa_lps_mod_s lps_mod; /* fcxp module */
struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */
struct bfa_rport_mod_s rport_mod; /* remote port module */
- struct bfa_fcpim_mod_s fcpim_mod; /* FCP initiator module */
+ struct bfa_fcp_mod_s fcp_mod; /* FCP initiator module */
struct bfa_sgpg_mod_s sgpg_mod; /* SG page module */
struct bfa_port_s port; /* Physical port module */
+ struct bfa_ablk_s ablk; /* ASIC block config module */
+ struct bfa_cee_s cee; /* CEE Module */
+ struct bfa_sfp_s sfp; /* SFP module */
+ struct bfa_flash_s flash; /* flash module */
+ struct bfa_diag_s diag_mod; /* diagnostics module */
+ struct bfa_phy_s phy; /* phy module */
+ struct bfa_dconf_mod_s dconf_mod; /* DCONF common module */
};
/*
@@ -51,17 +59,16 @@ enum {
BFA_TRC_HAL_IOCFC_CB = 5,
};
-
/*
* Macro to define a new BFA module
*/
#define BFA_MODULE(__mod) \
static void bfa_ ## __mod ## _meminfo( \
- struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, \
- u32 *dm_len); \
+ struct bfa_iocfc_cfg_s *cfg, \
+ struct bfa_meminfo_s *meminfo, \
+ struct bfa_s *bfa); \
static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \
void *bfad, struct bfa_iocfc_cfg_s *cfg, \
- struct bfa_meminfo_s *meminfo, \
struct bfa_pcidev_s *pcidev); \
static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \
static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \
@@ -87,11 +94,11 @@ enum {
* can leave entry points as NULL)
*/
struct bfa_module_s {
- void (*meminfo) (struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
- u32 *dm_len);
+ void (*meminfo) (struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa);
void (*attach) (struct bfa_s *bfa, void *bfad,
struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev);
void (*detach) (struct bfa_s *bfa);
void (*start) (struct bfa_s *bfa);
@@ -109,19 +116,22 @@ struct bfa_s {
struct bfa_timer_mod_s timer_mod; /* timer module */
struct bfa_modules_s modules; /* BFA modules */
struct list_head comp_q; /* pending completions */
- bfa_boolean_t rme_process; /* RME processing enabled */
+ bfa_boolean_t queue_process; /* queue processing enabled */
struct list_head reqq_waitq[BFI_IOC_MAX_CQS];
bfa_boolean_t fcs; /* FCS is attached to BFA */
struct bfa_msix_s msix;
+ int bfa_aen_seq;
};
extern bfa_boolean_t bfa_auto_recover;
+extern struct bfa_module_s hal_mod_fcdiag;
extern struct bfa_module_s hal_mod_sgpg;
extern struct bfa_module_s hal_mod_fcport;
extern struct bfa_module_s hal_mod_fcxp;
extern struct bfa_module_s hal_mod_lps;
extern struct bfa_module_s hal_mod_uf;
extern struct bfa_module_s hal_mod_rport;
-extern struct bfa_module_s hal_mod_fcpim;
+extern struct bfa_module_s hal_mod_fcp;
+extern struct bfa_module_s hal_mod_dconf;
#endif /* __BFA_MODULES_H__ */
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index 3f8e9d6066e..95e4ad8759a 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -24,8 +24,6 @@
BFA_TRC_FILE(CNA, PORT);
-#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
-
static void
bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
{
@@ -236,6 +234,12 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
{
struct bfi_port_generic_req_s *m;
+ /* If port is PBC disabled, return error */
+ if (port->pbc_disabled) {
+ bfa_trc(port, BFA_STATUS_PBC);
+ return BFA_STATUS_PBC;
+ }
+
if (bfa_ioc_is_disabled(port->ioc)) {
bfa_trc(port, BFA_STATUS_IOC_DISABLED);
return BFA_STATUS_IOC_DISABLED;
@@ -280,6 +284,12 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
{
struct bfi_port_generic_req_s *m;
+ /* If port is PBC disabled, return error */
+ if (port->pbc_disabled) {
+ bfa_trc(port, BFA_STATUS_PBC);
+ return BFA_STATUS_PBC;
+ }
+
if (bfa_ioc_is_disabled(port->ioc)) {
bfa_trc(port, BFA_STATUS_IOC_DISABLED);
return BFA_STATUS_IOC_DISABLED;
@@ -387,32 +397,43 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
}
/*
- * bfa_port_hbfail()
+ * bfa_port_notify()
*
+ * Port module IOC event handler
*
* @param[in] Pointer to the Port module data structure.
+ * @param[in] IOC event structure
*
* @return void
*/
void
-bfa_port_hbfail(void *arg)
+bfa_port_notify(void *arg, enum bfa_ioc_event_e event)
{
struct bfa_port_s *port = (struct bfa_port_s *) arg;
- /* Fail any pending get_stats/clear_stats requests */
- if (port->stats_busy) {
- if (port->stats_cbfn)
- port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED);
- port->stats_cbfn = NULL;
- port->stats_busy = BFA_FALSE;
- }
-
- /* Clear any enable/disable is pending */
- if (port->endis_pending) {
- if (port->endis_cbfn)
- port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED);
- port->endis_cbfn = NULL;
- port->endis_pending = BFA_FALSE;
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ /* Fail any pending get_stats/clear_stats requests */
+ if (port->stats_busy) {
+ if (port->stats_cbfn)
+ port->stats_cbfn(port->stats_cbarg,
+ BFA_STATUS_FAILED);
+ port->stats_cbfn = NULL;
+ port->stats_busy = BFA_FALSE;
+ }
+
+ /* Clear any enable/disable is pending */
+ if (port->endis_pending) {
+ if (port->endis_cbfn)
+ port->endis_cbfn(port->endis_cbarg,
+ BFA_STATUS_FAILED);
+ port->endis_cbfn = NULL;
+ port->endis_pending = BFA_FALSE;
+ }
+ break;
+ default:
+ break;
}
}
@@ -445,10 +466,12 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
port->endis_pending = BFA_FALSE;
port->stats_cbfn = NULL;
port->endis_cbfn = NULL;
+ port->pbc_disabled = BFA_FALSE;
bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
- bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
- list_add_tail(&port->hbfail.qe, &port->ioc->hb_notify_q);
+ bfa_q_qe_init(&port->ioc_notify);
+ bfa_ioc_notify_init(&port->ioc_notify, bfa_port_notify, port);
+ list_add_tail(&port->ioc_notify.qe, &port->ioc->notify_q);
/*
* initialize time stamp for stats reset
@@ -458,3 +481,368 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
bfa_trc(port, 0);
}
+
+/*
+ * CEE module specific definitions
+ */
+
+/*
+ * bfa_cee_get_attr_isr()
+ *
+ * @brief CEE ISR for get-attributes responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+ struct bfa_cee_lldp_cfg_s *lldp_cfg = &cee->attr->lldp_remote;
+
+ cee->get_attr_status = status;
+ bfa_trc(cee, 0);
+ if (status == BFA_STATUS_OK) {
+ bfa_trc(cee, 0);
+ memcpy(cee->attr, cee->attr_dma.kva,
+ sizeof(struct bfa_cee_attr_s));
+ lldp_cfg->time_to_live = be16_to_cpu(lldp_cfg->time_to_live);
+ lldp_cfg->enabled_system_cap =
+ be16_to_cpu(lldp_cfg->enabled_system_cap);
+ }
+ cee->get_attr_pending = BFA_FALSE;
+ if (cee->cbfn.get_attr_cbfn) {
+ bfa_trc(cee, 0);
+ cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
+ }
+}
+
+/*
+ * bfa_cee_get_stats_isr()
+ *
+ * @brief CEE ISR for get-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+ u32 *buffer;
+ int i;
+
+ cee->get_stats_status = status;
+ bfa_trc(cee, 0);
+ if (status == BFA_STATUS_OK) {
+ bfa_trc(cee, 0);
+ memcpy(cee->stats, cee->stats_dma.kva,
+ sizeof(struct bfa_cee_stats_s));
+ /* swap the cee stats */
+ buffer = (u32 *)cee->stats;
+ for (i = 0; i < (sizeof(struct bfa_cee_stats_s) /
+ sizeof(u32)); i++)
+ buffer[i] = cpu_to_be32(buffer[i]);
+ }
+ cee->get_stats_pending = BFA_FALSE;
+ bfa_trc(cee, 0);
+ if (cee->cbfn.get_stats_cbfn) {
+ bfa_trc(cee, 0);
+ cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
+ }
+}
+
+/*
+ * bfa_cee_reset_stats_isr()
+ *
+ * @brief CEE ISR for reset-stats responses from f/w
+ *
+ * @param[in] cee - Pointer to the CEE module
+ * status - Return status from the f/w
+ *
+ * @return void
+ */
+static void
+bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
+{
+ cee->reset_stats_status = status;
+ cee->reset_stats_pending = BFA_FALSE;
+ if (cee->cbfn.reset_stats_cbfn)
+ cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
+}
+
+/*
+ * bfa_cee_meminfo()
+ *
+ * @brief Returns the size of the DMA memory needed by CEE module
+ *
+ * @param[in] void
+ *
+ * @return Size of DMA region
+ */
+u32
+bfa_cee_meminfo(void)
+{
+ return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ) +
+ BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * bfa_cee_mem_claim()
+ *
+ * @brief Initialized CEE DMA Memory
+ *
+ * @param[in] cee CEE module pointer
+ * dma_kva Kernel Virtual Address of CEE DMA Memory
+ * dma_pa Physical Address of CEE DMA Memory
+ *
+ * @return void
+ */
+void
+bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
+{
+ cee->attr_dma.kva = dma_kva;
+ cee->attr_dma.pa = dma_pa;
+ cee->stats_dma.kva = dma_kva + BFA_ROUNDUP(
+ sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
+ cee->stats_dma.pa = dma_pa + BFA_ROUNDUP(
+ sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
+ cee->attr = (struct bfa_cee_attr_s *) dma_kva;
+ cee->stats = (struct bfa_cee_stats_s *) (dma_kva + BFA_ROUNDUP(
+ sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ));
+}
+
+/*
+ * bfa_cee_get_attr()
+ *
+ * @brief
+ * Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req_s *cmd;
+
+ WARN_ON((cee == NULL) || (cee->ioc == NULL));
+ bfa_trc(cee, 0);
+ if (!bfa_ioc_is_operational(cee->ioc)) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+ if (cee->get_attr_pending == BFA_TRUE) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_DEVBUSY;
+ }
+ cee->get_attr_pending = BFA_TRUE;
+ cmd = (struct bfi_cee_get_req_s *) cee->get_cfg_mb.msg;
+ cee->attr = attr;
+ cee->cbfn.get_attr_cbfn = cbfn;
+ cee->cbfn.get_attr_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+ bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_get_stats()
+ *
+ * @brief
+ * Send the request to the f/w to fetch CEE statistics.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
+ bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req_s *cmd;
+
+ WARN_ON((cee == NULL) || (cee->ioc == NULL));
+
+ if (!bfa_ioc_is_operational(cee->ioc)) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+ if (cee->get_stats_pending == BFA_TRUE) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_DEVBUSY;
+ }
+ cee->get_stats_pending = BFA_TRUE;
+ cmd = (struct bfi_cee_get_req_s *) cee->get_stats_mb.msg;
+ cee->stats = stats;
+ cee->cbfn.get_stats_cbfn = cbfn;
+ cee->cbfn.get_stats_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
+ bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_reset_stats()
+ *
+ * @brief Clears CEE Stats in the f/w.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+
+bfa_status_t
+bfa_cee_reset_stats(struct bfa_cee_s *cee,
+ bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_reset_stats_s *cmd;
+
+ WARN_ON((cee == NULL) || (cee->ioc == NULL));
+ if (!bfa_ioc_is_operational(cee->ioc)) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_IOC_FAILURE;
+ }
+ if (cee->reset_stats_pending == BFA_TRUE) {
+ bfa_trc(cee, 0);
+ return BFA_STATUS_DEVBUSY;
+ }
+ cee->reset_stats_pending = BFA_TRUE;
+ cmd = (struct bfi_cee_reset_stats_s *) cee->reset_stats_mb.msg;
+ cee->cbfn.reset_stats_cbfn = cbfn;
+ cee->cbfn.reset_stats_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
+ bfa_ioc_portid(cee->ioc));
+ bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * bfa_cee_isrs()
+ *
+ * @brief Handles Mail-box interrupts for CEE module.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return void
+ */
+
+void
+bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
+{
+ union bfi_cee_i2h_msg_u *msg;
+ struct bfi_cee_get_rsp_s *get_rsp;
+ struct bfa_cee_s *cee = (struct bfa_cee_s *) cbarg;
+ msg = (union bfi_cee_i2h_msg_u *) m;
+ get_rsp = (struct bfi_cee_get_rsp_s *) m;
+ bfa_trc(cee, msg->mh.msg_id);
+ switch (msg->mh.msg_id) {
+ case BFI_CEE_I2H_GET_CFG_RSP:
+ bfa_trc(cee, get_rsp->cmd_status);
+ bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_GET_STATS_RSP:
+ bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ case BFI_CEE_I2H_RESET_STATS_RSP:
+ bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * bfa_cee_notify()
+ *
+ * @brief CEE module IOC event handler.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ * @param[in] IOC event type
+ *
+ * @return void
+ */
+
+void
+bfa_cee_notify(void *arg, enum bfa_ioc_event_e event)
+{
+ struct bfa_cee_s *cee = (struct bfa_cee_s *) arg;
+
+ bfa_trc(cee, event);
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (cee->get_attr_pending == BFA_TRUE) {
+ cee->get_attr_status = BFA_STATUS_FAILED;
+ cee->get_attr_pending = BFA_FALSE;
+ if (cee->cbfn.get_attr_cbfn) {
+ cee->cbfn.get_attr_cbfn(
+ cee->cbfn.get_attr_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->get_stats_pending == BFA_TRUE) {
+ cee->get_stats_status = BFA_STATUS_FAILED;
+ cee->get_stats_pending = BFA_FALSE;
+ if (cee->cbfn.get_stats_cbfn) {
+ cee->cbfn.get_stats_cbfn(
+ cee->cbfn.get_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ if (cee->reset_stats_pending == BFA_TRUE) {
+ cee->reset_stats_status = BFA_STATUS_FAILED;
+ cee->reset_stats_pending = BFA_FALSE;
+ if (cee->cbfn.reset_stats_cbfn) {
+ cee->cbfn.reset_stats_cbfn(
+ cee->cbfn.reset_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * bfa_cee_attach()
+ *
+ * @brief CEE module-attach API
+ *
+ * @param[in] cee - Pointer to the CEE module data structure
+ * ioc - Pointer to the ioc module data structure
+ * dev - Pointer to the device driver module data structure
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
+ *
+ * @return void
+ */
+void
+bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc,
+ void *dev)
+{
+ WARN_ON(cee == NULL);
+ cee->dev = dev;
+ cee->ioc = ioc;
+
+ bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
+ bfa_q_qe_init(&cee->ioc_notify);
+ bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
+ list_add_tail(&cee->ioc_notify.qe, &cee->ioc->notify_q);
+}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index c4ee9db6b47..947f897328d 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -43,12 +43,16 @@ struct bfa_port_s {
bfa_port_endis_cbfn_t endis_cbfn;
void *endis_cbarg;
bfa_status_t endis_status;
- struct bfa_ioc_hbfail_notify_s hbfail;
+ struct bfa_ioc_notify_s ioc_notify;
+ bfa_boolean_t pbc_disabled;
+ struct bfa_mem_dma_s port_dma;
};
+#define BFA_MEM_PORT_DMA(__bfa) (&((__bfa)->modules.port.port_dma))
+
void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
void *dev, struct bfa_trc_mod_s *trcmod);
-void bfa_port_hbfail(void *arg);
+void bfa_port_notify(void *arg, enum bfa_ioc_event_e event);
bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
union bfa_port_stats_u *stats,
@@ -62,4 +66,58 @@ bfa_status_t bfa_port_disable(struct bfa_port_s *port,
u32 bfa_port_meminfo(void);
void bfa_port_mem_claim(struct bfa_port_s *port,
u8 *dma_kva, u64 dma_pa);
+
+/*
+ * CEE declaration
+ */
+typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, bfa_status_t status);
+
+struct bfa_cee_cbfn_s {
+ bfa_cee_get_attr_cbfn_t get_attr_cbfn;
+ void *get_attr_cbarg;
+ bfa_cee_get_stats_cbfn_t get_stats_cbfn;
+ void *get_stats_cbarg;
+ bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
+ void *reset_stats_cbarg;
+};
+
+struct bfa_cee_s {
+ void *dev;
+ bfa_boolean_t get_attr_pending;
+ bfa_boolean_t get_stats_pending;
+ bfa_boolean_t reset_stats_pending;
+ bfa_status_t get_attr_status;
+ bfa_status_t get_stats_status;
+ bfa_status_t reset_stats_status;
+ struct bfa_cee_cbfn_s cbfn;
+ struct bfa_ioc_notify_s ioc_notify;
+ struct bfa_trc_mod_s *trcmod;
+ struct bfa_cee_attr_s *attr;
+ struct bfa_cee_stats_s *stats;
+ struct bfa_dma_s attr_dma;
+ struct bfa_dma_s stats_dma;
+ struct bfa_ioc_s *ioc;
+ struct bfa_mbox_cmd_s get_cfg_mb;
+ struct bfa_mbox_cmd_s get_stats_mb;
+ struct bfa_mbox_cmd_s reset_stats_mb;
+ struct bfa_mem_dma_s cee_dma;
+};
+
+#define BFA_MEM_CEE_DMA(__bfa) (&((__bfa)->modules.cee.cee_dma))
+
+u32 bfa_cee_meminfo(void);
+void bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa);
+void bfa_cee_attach(struct bfa_cee_s *cee,
+ struct bfa_ioc_s *ioc, void *dev);
+bfa_status_t bfa_cee_get_attr(struct bfa_cee_s *cee,
+ struct bfa_cee_attr_s *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_cee_get_stats(struct bfa_cee_s *cee,
+ struct bfa_cee_stats_s *stats,
+ bfa_cee_get_stats_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_cee_reset_stats(struct bfa_cee_s *cee,
+ bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg);
+
#endif /* __BFA_PORT_H__ */
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 16d9a5f61c1..aa8a0eaf91f 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -16,11 +16,13 @@
*/
#include "bfad_drv.h"
+#include "bfad_im.h"
#include "bfa_plog.h"
#include "bfa_cs.h"
#include "bfa_modules.h"
BFA_TRC_FILE(HAL, FCXP);
+BFA_MODULE(fcdiag);
BFA_MODULE(fcxp);
BFA_MODULE(sgpg);
BFA_MODULE(lps);
@@ -113,11 +115,10 @@ static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
/*
* forward declarations for LPS functions
*/
-static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
- u32 *dm_len);
+static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo,
struct bfa_pcidev_s *pcidev);
static void bfa_lps_detach(struct bfa_s *bfa);
static void bfa_lps_start(struct bfa_s *bfa);
@@ -125,6 +126,7 @@ static void bfa_lps_stop(struct bfa_s *bfa);
static void bfa_lps_iocdisable(struct bfa_s *bfa);
static void bfa_lps_login_rsp(struct bfa_s *bfa,
struct bfi_lps_login_rsp_s *rsp);
+static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
static void bfa_lps_logout_rsp(struct bfa_s *bfa,
struct bfi_lps_logout_rsp_s *rsp);
static void bfa_lps_reqq_resume(void *lps_arg);
@@ -430,51 +432,17 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
*/
static void
-claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
-{
- u8 *dm_kva = NULL;
- u64 dm_pa;
- u32 buf_pool_sz;
-
- dm_kva = bfa_meminfo_dma_virt(mi);
- dm_pa = bfa_meminfo_dma_phys(mi);
-
- buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
-
- /*
- * Initialize the fcxp req payload list
- */
- mod->req_pld_list_kva = dm_kva;
- mod->req_pld_list_pa = dm_pa;
- dm_kva += buf_pool_sz;
- dm_pa += buf_pool_sz;
- memset(mod->req_pld_list_kva, 0, buf_pool_sz);
-
- /*
- * Initialize the fcxp rsp payload list
- */
- buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
- mod->rsp_pld_list_kva = dm_kva;
- mod->rsp_pld_list_pa = dm_pa;
- dm_kva += buf_pool_sz;
- dm_pa += buf_pool_sz;
- memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
-
- bfa_meminfo_dma_virt(mi) = dm_kva;
- bfa_meminfo_dma_phys(mi) = dm_pa;
-}
-
-static void
-claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
+claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
{
u16 i;
struct bfa_fcxp_s *fcxp;
- fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
+ fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
INIT_LIST_HEAD(&mod->fcxp_free_q);
INIT_LIST_HEAD(&mod->fcxp_active_q);
+ INIT_LIST_HEAD(&mod->fcxp_unused_q);
mod->fcxp_list = fcxp;
@@ -489,40 +457,53 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
fcxp = fcxp + 1;
}
- bfa_meminfo_kva(mi) = (void *)fcxp;
+ bfa_mem_kva_curp(mod) = (void *)fcxp;
}
static void
-bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
- u32 *dm_len)
+bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
{
- u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
+ struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
+ struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
+ struct bfa_mem_dma_s *seg_ptr;
+ u16 nsegs, idx, per_seg_fcxp;
+ u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs;
+ u32 per_fcxp_sz;
- if (num_fcxp_reqs == 0)
+ if (num_fcxps == 0)
return;
- /*
- * Account for req/rsp payload
- */
- *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
if (cfg->drvcfg.min_cfg)
- *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
+ per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
else
- *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
+ per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
- /*
- * Account for fcxp structs
- */
- *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
+ /* dma memory */
+ nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
+ per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
+
+ bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
+ if (num_fcxps >= per_seg_fcxp) {
+ num_fcxps -= per_seg_fcxp;
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ per_seg_fcxp * per_fcxp_sz);
+ } else
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ num_fcxps * per_fcxp_sz);
+ }
+
+ /* kva memory */
+ bfa_mem_kva_setup(minfo, fcxp_kva,
+ cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
}
static void
bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+ struct bfa_pcidev_s *pcidev)
{
struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
- memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
mod->bfa = bfa;
mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
@@ -535,8 +516,7 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
INIT_LIST_HEAD(&mod->wait_q);
- claim_fcxp_req_rsp_mem(mod, meminfo);
- claim_fcxps_mem(mod, meminfo);
+ claim_fcxps_mem(mod);
}
static void
@@ -561,6 +541,9 @@ bfa_fcxp_iocdisable(struct bfa_s *bfa)
struct bfa_fcxp_s *fcxp;
struct list_head *qe, *qen;
+ /* Enqueue unused fcxp resources to free_q */
+ list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
+
list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
fcxp = (struct bfa_fcxp_s *) qe;
if (fcxp->caller == NULL) {
@@ -750,23 +733,6 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
}
static void
-hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
-{
- union bfi_addr_u sga_zero = { {0} };
-
- sge->sg_len = reqlen;
- sge->flags = BFI_SGE_DATA_LAST;
- bfa_dma_addr_set(sge[0].sga, req_pa);
- bfa_sge_to_be(sge);
- sge++;
-
- sge->sga = sga_zero;
- sge->sg_len = reqlen;
- sge->flags = BFI_SGE_PGDLEN;
- bfa_sge_to_be(sge);
-}
-
-static void
hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
struct fchs_s *fchs)
{
@@ -846,7 +812,7 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
struct bfa_rport_s *rport = reqi->bfa_rport;
bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
- bfa_lpuid(bfa));
+ bfa_fn_lpu(bfa));
send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
if (rport) {
@@ -860,7 +826,7 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
}
send_req->vf_id = cpu_to_be16(reqi->vf_id);
- send_req->lp_tag = reqi->lp_tag;
+ send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
send_req->class = reqi->class;
send_req->rsp_timeout = rspi->rsp_timeout;
send_req->cts = reqi->cts;
@@ -873,18 +839,16 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
* setup req sgles
*/
if (fcxp->use_ireqbuf == 1) {
- hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
+ bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
BFA_FCXP_REQ_PLD_PA(fcxp));
} else {
if (fcxp->nreq_sgles > 0) {
WARN_ON(fcxp->nreq_sgles != 1);
- hal_fcxp_set_local_sges(send_req->req_sge,
- reqi->req_tot_len,
- fcxp->req_sga_cbfn(fcxp->caller,
- 0));
+ bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
+ fcxp->req_sga_cbfn(fcxp->caller, 0));
} else {
WARN_ON(reqi->req_tot_len != 0);
- hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
+ bfa_alen_set(&send_req->rsp_alen, 0, 0);
}
}
@@ -894,25 +858,23 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
if (fcxp->use_irspbuf == 1) {
WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
- hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
+ bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
BFA_FCXP_RSP_PLD_PA(fcxp));
-
} else {
if (fcxp->nrsp_sgles > 0) {
WARN_ON(fcxp->nrsp_sgles != 1);
- hal_fcxp_set_local_sges(send_req->rsp_sge,
- rspi->rsp_maxlen,
- fcxp->rsp_sga_cbfn(fcxp->caller,
- 0));
+ bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
+ fcxp->rsp_sga_cbfn(fcxp->caller, 0));
+
} else {
WARN_ON(rspi->rsp_maxlen != 0);
- hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
+ bfa_alen_set(&send_req->rsp_alen, 0, 0);
}
}
hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
- bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
+ bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
@@ -978,8 +940,8 @@ bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
void *reqbuf;
WARN_ON(fcxp->use_ireqbuf != 1);
- reqbuf = ((u8 *)mod->req_pld_list_kva) +
- fcxp->fcxp_tag * mod->req_pld_sz;
+ reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
+ mod->req_pld_sz + mod->rsp_pld_sz);
return reqbuf;
}
@@ -1002,13 +964,15 @@ void *
bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
{
struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
- void *rspbuf;
+ void *fcxp_buf;
WARN_ON(fcxp->use_irspbuf != 1);
- rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
- fcxp->fcxp_tag * mod->rsp_pld_sz;
- return rspbuf;
+ fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
+ mod->req_pld_sz + mod->rsp_pld_sz);
+
+ /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
+ return ((u8 *) fcxp_buf) + mod->req_pld_sz;
}
/*
@@ -1181,6 +1145,18 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
return mod->rsp_pld_sz;
}
+void
+bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
+{
+ struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+ struct list_head *qe;
+ int i;
+
+ for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
+ bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
+ list_add_tail(qe, &mod->fcxp_unused_q);
+ }
+}
/*
* BFA LPS state machine functions
@@ -1192,7 +1168,7 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
static void
bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
- bfa_trc(lps->bfa, lps->lp_tag);
+ bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
@@ -1244,7 +1220,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
static void
bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
- bfa_trc(lps->bfa, lps->lp_tag);
+ bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
@@ -1278,6 +1254,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
break;
case BFA_LPS_SM_OFFLINE:
+ case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
@@ -1297,7 +1274,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
static void
bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
- bfa_trc(lps->bfa, lps->lp_tag);
+ bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
@@ -1306,6 +1283,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
break;
case BFA_LPS_SM_OFFLINE:
+ case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_reqq_wcancel(&lps->wqe);
break;
@@ -1329,7 +1307,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
static void
bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
- bfa_trc(lps->bfa, lps->lp_tag);
+ bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
@@ -1378,7 +1356,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
static void
bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
- bfa_trc(lps->bfa, lps->lp_tag);
+ bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
@@ -1420,7 +1398,7 @@ bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
static void
bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
- bfa_trc(lps->bfa, lps->lp_tag);
+ bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
@@ -1430,6 +1408,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
break;
case BFA_LPS_SM_OFFLINE:
+ case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
break;
@@ -1444,7 +1423,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
static void
bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
{
- bfa_trc(lps->bfa, lps->lp_tag);
+ bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, event);
switch (event) {
@@ -1454,6 +1433,7 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
break;
case BFA_LPS_SM_OFFLINE:
+ case BFA_LPS_SM_DELETE:
bfa_sm_set_state(lps, bfa_lps_sm_init);
bfa_reqq_wcancel(&lps->wqe);
break;
@@ -1473,13 +1453,17 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
* return memory requirement
*/
static void
-bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
- u32 *dm_len)
+bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
{
+ struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
+
if (cfg->drvcfg.min_cfg)
- *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
+ bfa_mem_kva_setup(minfo, lps_kva,
+ sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
else
- *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
+ bfa_mem_kva_setup(minfo, lps_kva,
+ sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
}
/*
@@ -1487,28 +1471,28 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
*/
static void
bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+ struct bfa_pcidev_s *pcidev)
{
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
int i;
- memset(mod, 0, sizeof(struct bfa_lps_mod_s));
mod->num_lps = BFA_LPS_MAX_LPORTS;
if (cfg->drvcfg.min_cfg)
mod->num_lps = BFA_LPS_MIN_LPORTS;
else
mod->num_lps = BFA_LPS_MAX_LPORTS;
- mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
+ mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
- bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
+ bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
INIT_LIST_HEAD(&mod->lps_free_q);
INIT_LIST_HEAD(&mod->lps_active_q);
+ INIT_LIST_HEAD(&mod->lps_login_q);
for (i = 0; i < mod->num_lps; i++, lps++) {
lps->bfa = bfa;
- lps->lp_tag = (u8) i;
+ lps->bfa_tag = (u8) i;
lps->reqq = BFA_REQQ_LPS;
bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
list_add_tail(&lps->qe, &mod->lps_free_q);
@@ -1544,6 +1528,11 @@ bfa_lps_iocdisable(struct bfa_s *bfa)
lps = (struct bfa_lps_s *) qe;
bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
}
+ list_for_each_safe(qe, qen, &mod->lps_login_q) {
+ lps = (struct bfa_lps_s *) qe;
+ bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
+ }
+ list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
}
/*
@@ -1555,12 +1544,13 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
- WARN_ON(rsp->lp_tag >= mod->num_lps);
- lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
+ WARN_ON(rsp->bfa_tag >= mod->num_lps);
+ lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
lps->status = rsp->status;
switch (rsp->status) {
case BFA_STATUS_OK:
+ lps->fw_tag = rsp->fw_tag;
lps->fport = rsp->f_port;
if (lps->fport)
lps->lp_pid = rsp->lp_pid;
@@ -1572,6 +1562,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
lps->lp_mac = rsp->lp_mac;
lps->brcd_switch = rsp->brcd_switch;
lps->fcf_mac = rsp->fcf_mac;
+ lps->pr_bbscn = rsp->bb_scn;
break;
@@ -1586,14 +1577,46 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
break;
+ case BFA_STATUS_VPORT_MAX:
+ if (!rsp->ext_status)
+ bfa_lps_no_res(lps, rsp->ext_status);
+ break;
+
default:
/* Nothing to do with other status */
break;
}
+ list_del(&lps->qe);
+ list_add_tail(&lps->qe, &mod->lps_active_q);
bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
}
+static void
+bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
+{
+ struct bfa_s *bfa = first_lps->bfa;
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+ struct list_head *qe, *qe_next;
+ struct bfa_lps_s *lps;
+
+ bfa_trc(bfa, count);
+
+ qe = bfa_q_next(first_lps);
+
+ while (count && qe) {
+ qe_next = bfa_q_next(qe);
+ lps = (struct bfa_lps_s *)qe;
+ bfa_trc(bfa, lps->bfa_tag);
+ lps->status = first_lps->status;
+ list_del(&lps->qe);
+ list_add_tail(&lps->qe, &mod->lps_active_q);
+ bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
+ qe = qe_next;
+ count--;
+ }
+}
+
/*
* Firmware logout response
*/
@@ -1603,8 +1626,8 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
- WARN_ON(rsp->lp_tag >= mod->num_lps);
- lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
+ WARN_ON(rsp->bfa_tag >= mod->num_lps);
+ lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
}
@@ -1618,7 +1641,7 @@ bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
struct bfa_lps_s *lps;
- lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
+ lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
}
@@ -1653,23 +1676,27 @@ bfa_lps_free(struct bfa_lps_s *lps)
static void
bfa_lps_send_login(struct bfa_lps_s *lps)
{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
struct bfi_lps_login_req_s *m;
m = bfa_reqq_next(lps->bfa, lps->reqq);
WARN_ON(!m);
bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
- bfa_lpuid(lps->bfa));
+ bfa_fn_lpu(lps->bfa));
- m->lp_tag = lps->lp_tag;
+ m->bfa_tag = lps->bfa_tag;
m->alpa = lps->alpa;
m->pdu_size = cpu_to_be16(lps->pdusz);
m->pwwn = lps->pwwn;
m->nwwn = lps->nwwn;
m->fdisc = lps->fdisc;
m->auth_en = lps->auth_en;
+ m->bb_scn = lps->bb_scn;
- bfa_reqq_produce(lps->bfa, lps->reqq);
+ bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
+ list_del(&lps->qe);
+ list_add_tail(&lps->qe, &mod->lps_login_q);
}
/*
@@ -1684,11 +1711,11 @@ bfa_lps_send_logout(struct bfa_lps_s *lps)
WARN_ON(!m);
bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
- bfa_lpuid(lps->bfa));
+ bfa_fn_lpu(lps->bfa));
- m->lp_tag = lps->lp_tag;
+ m->fw_tag = lps->fw_tag;
m->port_name = lps->pwwn;
- bfa_reqq_produce(lps->bfa, lps->reqq);
+ bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
}
/*
@@ -1703,11 +1730,11 @@ bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
WARN_ON(!m);
bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
- bfa_lpuid(lps->bfa));
+ bfa_fn_lpu(lps->bfa));
- m->lp_tag = lps->lp_tag;
+ m->fw_tag = lps->fw_tag;
m->lp_pid = lps->lp_pid;
- bfa_reqq_produce(lps->bfa, lps->reqq);
+ bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
}
/*
@@ -1859,7 +1886,7 @@ bfa_lps_delete(struct bfa_lps_s *lps)
*/
void
bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
- wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
+ wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
{
lps->uarg = uarg;
lps->alpa = alpa;
@@ -1868,6 +1895,7 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
lps->nwwn = nwwn;
lps->fdisc = BFA_FALSE;
lps->auth_en = auth_en;
+ lps->bb_scn = bb_scn;
bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
}
@@ -1898,6 +1926,13 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps)
bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
}
+u8
+bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
+{
+ struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
+
+ return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
+}
/*
* Return lport services tag given the pid
@@ -1911,7 +1946,7 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
if (lps->lp_pid == pid)
- return lps->lp_tag;
+ return lps->bfa_tag;
}
/* Return base port tag anyway */
@@ -1936,7 +1971,7 @@ bfa_lps_get_base_pid(struct bfa_s *bfa)
void
bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
{
- bfa_trc(lps->bfa, lps->lp_tag);
+ bfa_trc(lps->bfa, lps->bfa_tag);
bfa_trc(lps->bfa, n2n_pid);
lps->lp_pid = n2n_pid;
@@ -1955,15 +1990,15 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
msg.msg = m;
switch (m->mhdr.msg_id) {
- case BFI_LPS_H2I_LOGIN_RSP:
+ case BFI_LPS_I2H_LOGIN_RSP:
bfa_lps_login_rsp(bfa, msg.login_rsp);
break;
- case BFI_LPS_H2I_LOGOUT_RSP:
+ case BFI_LPS_I2H_LOGOUT_RSP:
bfa_lps_logout_rsp(bfa, msg.logout_rsp);
break;
- case BFI_LPS_H2I_CVL_EVENT:
+ case BFI_LPS_I2H_CVL_EVENT:
bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
break;
@@ -1973,6 +2008,24 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
}
}
+static void
+bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
+{
+ struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+ struct bfa_aen_entry_s *aen_entry;
+
+ bfad_get_aen_entry(bfad, aen_entry);
+ if (!aen_entry)
+ return;
+
+ aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
+ aen_entry->aen_data.port.pwwn = fcport->pwwn;
+
+ /* Send the AEN notification */
+ bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
+ BFA_AEN_CAT_PORT, event);
+}
+
/*
* FC PORT state machine functions
*/
@@ -2061,6 +2114,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_LINKUP:
@@ -2121,6 +2175,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_STOP:
@@ -2174,6 +2229,12 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port online: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
+
+ /* If QoS is enabled and it is not online, send AEN */
+ if (fcport->cfg.qos_enabled &&
+ fcport->qos_attr.state != BFA_QOS_ONLINE)
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
break;
case BFA_FCPORT_SM_LINKDOWN:
@@ -2200,6 +2261,7 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_STOP:
@@ -2245,8 +2307,10 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port disabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
break;
case BFA_FCPORT_SM_LINKDOWN:
@@ -2256,26 +2320,32 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
wwn2str(pwwn_buf, fcport->pwwn);
- if (BFA_PORT_IS_DISABLED(fcport->bfa))
+ if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
- else
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+ } else {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Base port (WWN = %s) "
"lost fabric connectivity\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ }
break;
case BFA_FCPORT_SM_STOP:
bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
bfa_fcport_reset_linkinfo(fcport);
wwn2str(pwwn_buf, fcport->pwwn);
- if (BFA_PORT_IS_DISABLED(fcport->bfa))
+ if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
- else
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+ } else {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Base port (WWN = %s) "
"lost fabric connectivity\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ }
break;
case BFA_FCPORT_SM_HWFAIL:
@@ -2283,13 +2353,16 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
bfa_fcport_reset_linkinfo(fcport);
bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
wwn2str(pwwn_buf, fcport->pwwn);
- if (BFA_PORT_IS_DISABLED(fcport->bfa))
+ if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port offline: WWN = %s\n", pwwn_buf);
- else
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+ } else {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Base port (WWN = %s) "
"lost fabric connectivity\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
+ }
break;
default:
@@ -2420,6 +2493,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port enabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
break;
case BFA_FCPORT_SM_STOP:
@@ -2474,6 +2548,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
wwn2str(pwwn_buf, fcport->pwwn);
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Base port enabled: WWN = %s\n", pwwn_buf);
+ bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
break;
case BFA_FCPORT_SM_DISABLE:
@@ -2777,10 +2852,12 @@ bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
BFA_CACHELINE_SZ))
static void
-bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
- u32 *dm_len)
+bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
{
- *dm_len += FCPORT_STATS_DMA_SZ;
+ struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
+
+ bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
}
static void
@@ -2792,23 +2869,14 @@ bfa_fcport_qresume(void *cbarg)
}
static void
-bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
+bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
{
- u8 *dm_kva;
- u64 dm_pa;
-
- dm_kva = bfa_meminfo_dma_virt(meminfo);
- dm_pa = bfa_meminfo_dma_phys(meminfo);
-
- fcport->stats_kva = dm_kva;
- fcport->stats_pa = dm_pa;
- fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
+ struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
- dm_kva += FCPORT_STATS_DMA_SZ;
- dm_pa += FCPORT_STATS_DMA_SZ;
-
- bfa_meminfo_dma_virt(meminfo) = dm_kva;
- bfa_meminfo_dma_phys(meminfo) = dm_pa;
+ fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
+ fcport->stats_pa = bfa_mem_dma_phys(fcport_dma);
+ fcport->stats = (union bfa_fcport_stats_u *)
+ bfa_mem_dma_virt(fcport_dma);
}
/*
@@ -2816,18 +2884,17 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
*/
static void
bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+ struct bfa_pcidev_s *pcidev)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
struct bfa_fcport_ln_s *ln = &fcport->ln;
struct timeval tv;
- memset(fcport, 0, sizeof(struct bfa_fcport_s));
fcport->bfa = bfa;
ln->fcport = fcport;
- bfa_fcport_mem_claim(fcport, meminfo);
+ bfa_fcport_mem_claim(fcport);
bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
@@ -2848,6 +2915,9 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
+ INIT_LIST_HEAD(&fcport->stats_pending_q);
+ INIT_LIST_HEAD(&fcport->statsclr_pending_q);
+
bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
}
@@ -2921,6 +2991,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
{
fcport->speed = BFA_PORT_SPEED_UNKNOWN;
fcport->topology = BFA_PORT_TOPOLOGY_NONE;
+ fcport->bbsc_op_state = BFA_FALSE;
}
/*
@@ -2948,7 +3019,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
}
bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
- bfa_lpuid(fcport->bfa));
+ bfa_fn_lpu(fcport->bfa));
m->nwwn = fcport->nwwn;
m->pwwn = fcport->pwwn;
m->port_cfg = fcport->cfg;
@@ -2962,7 +3033,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
return BFA_TRUE;
}
@@ -2991,13 +3062,13 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
}
bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
- bfa_lpuid(fcport->bfa));
+ bfa_fn_lpu(fcport->bfa));
m->msgtag = fcport->msgtag;
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
return BFA_TRUE;
}
@@ -3029,13 +3100,14 @@ bfa_fcport_send_txcredit(void *port_cbarg)
}
bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
- bfa_lpuid(fcport->bfa));
+ bfa_fn_lpu(fcport->bfa));
m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
+ m->bb_scn = fcport->cfg.bb_scn;
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
}
static void
@@ -3074,30 +3146,38 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
static void
__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
{
- struct bfa_fcport_s *fcport = cbarg;
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
+ struct bfa_cb_pending_q_s *cb;
+ struct list_head *qe, *qen;
+ union bfa_fcport_stats_u *ret;
if (complete) {
- if (fcport->stats_status == BFA_STATUS_OK) {
- struct timeval tv;
-
- /* Swap FC QoS or FCoE stats */
- if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
- bfa_fcport_qos_stats_swap(
- &fcport->stats_ret->fcqos,
- &fcport->stats->fcqos);
- } else {
- bfa_fcport_fcoe_stats_swap(
- &fcport->stats_ret->fcoe,
- &fcport->stats->fcoe);
-
- do_gettimeofday(&tv);
- fcport->stats_ret->fcoe.secs_reset =
+ struct timeval tv;
+ if (fcport->stats_status == BFA_STATUS_OK)
+ do_gettimeofday(&tv);
+
+ list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
+ bfa_q_deq(&fcport->stats_pending_q, &qe);
+ cb = (struct bfa_cb_pending_q_s *)qe;
+ if (fcport->stats_status == BFA_STATUS_OK) {
+ ret = (union bfa_fcport_stats_u *)cb->data;
+ /* Swap FC QoS or FCoE stats */
+ if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
+ bfa_fcport_qos_stats_swap(&ret->fcqos,
+ &fcport->stats->fcqos);
+ else {
+ bfa_fcport_fcoe_stats_swap(&ret->fcoe,
+ &fcport->stats->fcoe);
+ ret->fcoe.secs_reset =
tv.tv_sec - fcport->stats_reset_time;
+ }
}
+ bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+ fcport->stats_status);
}
- fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+ fcport->stats_status = BFA_STATUS_OK;
} else {
- fcport->stats_busy = BFA_FALSE;
+ INIT_LIST_HEAD(&fcport->stats_pending_q);
fcport->stats_status = BFA_STATUS_OK;
}
}
@@ -3115,8 +3195,7 @@ bfa_fcport_stats_get_timeout(void *cbarg)
}
fcport->stats_status = BFA_STATUS_ETIMER;
- bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
- fcport);
+ __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
}
static void
@@ -3139,14 +3218,16 @@ bfa_fcport_send_stats_get(void *cbarg)
memset(msg, 0, sizeof(struct bfi_fcport_req_s));
bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
- bfa_lpuid(fcport->bfa));
- bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+ bfa_fn_lpu(fcport->bfa));
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
}
static void
__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
{
- struct bfa_fcport_s *fcport = cbarg;
+ struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+ struct bfa_cb_pending_q_s *cb;
+ struct list_head *qe, *qen;
if (complete) {
struct timeval tv;
@@ -3156,10 +3237,15 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
*/
do_gettimeofday(&tv);
fcport->stats_reset_time = tv.tv_sec;
-
- fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+ list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
+ bfa_q_deq(&fcport->statsclr_pending_q, &qe);
+ cb = (struct bfa_cb_pending_q_s *)qe;
+ bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
+ fcport->stats_status);
+ }
+ fcport->stats_status = BFA_STATUS_OK;
} else {
- fcport->stats_busy = BFA_FALSE;
+ INIT_LIST_HEAD(&fcport->statsclr_pending_q);
fcport->stats_status = BFA_STATUS_OK;
}
}
@@ -3177,8 +3263,7 @@ bfa_fcport_stats_clr_timeout(void *cbarg)
}
fcport->stats_status = BFA_STATUS_ETIMER;
- bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
- __bfa_cb_fcport_stats_clr, fcport);
+ __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
}
static void
@@ -3201,8 +3286,8 @@ bfa_fcport_send_stats_clear(void *cbarg)
memset(msg, 0, sizeof(struct bfi_fcport_req_s));
bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
- bfa_lpuid(fcport->bfa));
- bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+ bfa_fn_lpu(fcport->bfa));
+ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
}
/*
@@ -3329,6 +3414,9 @@ bfa_fcport_init(struct bfa_s *bfa)
fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
+ if (bfa_fcport_is_pbcdisabled(bfa))
+ bfa->modules.port.pbc_disabled = BFA_TRUE;
+
WARN_ON(!fcport->cfg.maxfrsize);
WARN_ON(!fcport->cfg.rx_bbcredit);
WARN_ON(!fcport->speed_sup);
@@ -3371,6 +3459,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
fcport->use_flash_cfg = BFA_FALSE;
}
+ if (fcport->cfg.qos_enabled)
+ fcport->qos_attr.state = BFA_QOS_OFFLINE;
+ else
+ fcport->qos_attr.state = BFA_QOS_DISABLED;
+
bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
}
break;
@@ -3395,28 +3488,26 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
/*
* check for timer pop before processing the rsp
*/
- if (fcport->stats_busy == BFA_FALSE ||
- fcport->stats_status == BFA_STATUS_ETIMER)
+ if (list_empty(&fcport->stats_pending_q) ||
+ (fcport->stats_status == BFA_STATUS_ETIMER))
break;
bfa_timer_stop(&fcport->timer);
fcport->stats_status = i2hmsg.pstatsget_rsp->status;
- bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
- __bfa_cb_fcport_stats_get, fcport);
+ __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
break;
case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
/*
* check for timer pop before processing the rsp
*/
- if (fcport->stats_busy == BFA_FALSE ||
- fcport->stats_status == BFA_STATUS_ETIMER)
+ if (list_empty(&fcport->statsclr_pending_q) ||
+ (fcport->stats_status == BFA_STATUS_ETIMER))
break;
bfa_timer_stop(&fcport->timer);
fcport->stats_status = BFA_STATUS_OK;
- bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
- __bfa_cb_fcport_stats_clr, fcport);
+ __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
break;
case BFI_FCPORT_I2H_ENABLE_AEN:
@@ -3453,6 +3544,9 @@ bfa_fcport_enable(struct bfa_s *bfa)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ if (bfa_fcport_is_pbcdisabled(bfa))
+ return BFA_STATUS_PBC;
+
if (bfa_ioc_is_disabled(&bfa->ioc))
return BFA_STATUS_IOC_DISABLED;
@@ -3466,6 +3560,8 @@ bfa_fcport_enable(struct bfa_s *bfa)
bfa_status_t
bfa_fcport_disable(struct bfa_s *bfa)
{
+ if (bfa_fcport_is_pbcdisabled(bfa))
+ return BFA_STATUS_PBC;
if (bfa_ioc_is_disabled(&bfa->ioc))
return BFA_STATUS_IOC_DISABLED;
@@ -3474,6 +3570,21 @@ bfa_fcport_disable(struct bfa_s *bfa)
return BFA_STATUS_OK;
}
+/* If PBC is disabled on port, return error */
+bfa_status_t
+bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+ struct bfa_iocfc_s *iocfc = &bfa->iocfc;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
+
+ if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
+ bfa_trc(bfa, fcport->pwwn);
+ return BFA_STATUS_PBC;
+ }
+ return BFA_STATUS_OK;
+}
+
/*
* Configure port speed.
*/
@@ -3491,6 +3602,28 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
return BFA_STATUS_UNSUPP_SPEED;
}
+ /* For Mezz card, port speed entered needs to be checked */
+ if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
+ if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
+ /* For CT2, 1G is not supported */
+ if ((speed == BFA_PORT_SPEED_1GBPS) &&
+ (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+ return BFA_STATUS_UNSUPP_SPEED;
+
+ /* Already checked for Auto Speed and Max Speed supp */
+ if (!(speed == BFA_PORT_SPEED_1GBPS ||
+ speed == BFA_PORT_SPEED_2GBPS ||
+ speed == BFA_PORT_SPEED_4GBPS ||
+ speed == BFA_PORT_SPEED_8GBPS ||
+ speed == BFA_PORT_SPEED_16GBPS ||
+ speed == BFA_PORT_SPEED_AUTO))
+ return BFA_STATUS_UNSUPP_SPEED;
+ } else {
+ if (speed != BFA_PORT_SPEED_10GBPS)
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+ }
+
fcport->cfg.speed = speed;
return BFA_STATUS_OK;
@@ -3624,11 +3757,14 @@ bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
}
void
-bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
+bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
+ fcport->cfg.bb_scn = bb_scn;
+ if (bb_scn)
+ fcport->bbsc_op_state = BFA_TRUE;
bfa_fcport_send_txcredit(fcport);
}
@@ -3675,16 +3811,23 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
/* beacon attributes */
attr->beacon = fcport->beacon;
attr->link_e2e_beacon = fcport->link_e2e_beacon;
- attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
- attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
- if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
- attr->port_state = BFA_PORT_ST_IOCDIS;
- else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
- attr->port_state = BFA_PORT_ST_FWMISMATCH;
+ attr->bbsc_op_status = fcport->bbsc_op_state;
+
+ /* PBC Disabled State */
+ if (bfa_fcport_is_pbcdisabled(bfa))
+ attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
+ else {
+ if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
+ attr->port_state = BFA_PORT_ST_IOCDIS;
+ else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
+ attr->port_state = BFA_PORT_ST_FWMISMATCH;
+ else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
+ attr->port_state = BFA_PORT_ST_ACQ_ADDR;
+ }
/* FCoE vlan */
attr->fcoe_vlan = fcport->fcoe_vlan;
@@ -3696,25 +3839,25 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
* Fetch port statistics (FCQoS or FCoE).
*/
bfa_status_t
-bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
- bfa_cb_port_t cbfn, void *cbarg)
+bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- if (fcport->stats_busy) {
- bfa_trc(bfa, fcport->stats_busy);
- return BFA_STATUS_DEVBUSY;
- }
+ if (bfa_ioc_is_disabled(&bfa->ioc))
+ return BFA_STATUS_IOC_DISABLED;
- fcport->stats_busy = BFA_TRUE;
- fcport->stats_ret = stats;
- fcport->stats_cbfn = cbfn;
- fcport->stats_cbarg = cbarg;
+ if (!list_empty(&fcport->statsclr_pending_q))
+ return BFA_STATUS_DEVBUSY;
- bfa_fcport_send_stats_get(fcport);
+ if (list_empty(&fcport->stats_pending_q)) {
+ list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
+ bfa_fcport_send_stats_get(fcport);
+ bfa_timer_start(bfa, &fcport->timer,
+ bfa_fcport_stats_get_timeout,
+ fcport, BFA_FCPORT_STATS_TOV);
+ } else
+ list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
- bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
- fcport, BFA_FCPORT_STATS_TOV);
return BFA_STATUS_OK;
}
@@ -3722,27 +3865,25 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
* Reset port statistics (FCQoS or FCoE).
*/
bfa_status_t
-bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
+bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
{
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
- if (fcport->stats_busy) {
- bfa_trc(bfa, fcport->stats_busy);
+ if (!list_empty(&fcport->stats_pending_q))
return BFA_STATUS_DEVBUSY;
- }
-
- fcport->stats_busy = BFA_TRUE;
- fcport->stats_cbfn = cbfn;
- fcport->stats_cbarg = cbarg;
- bfa_fcport_send_stats_clear(fcport);
+ if (list_empty(&fcport->statsclr_pending_q)) {
+ list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
+ bfa_fcport_send_stats_clear(fcport);
+ bfa_timer_start(bfa, &fcport->timer,
+ bfa_fcport_stats_clr_timeout,
+ fcport, BFA_FCPORT_STATS_TOV);
+ } else
+ list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
- bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
- fcport, BFA_FCPORT_STATS_TOV);
return BFA_STATUS_OK;
}
-
/*
* Fetch port attributes.
*/
@@ -3766,6 +3907,18 @@ bfa_fcport_is_ratelim(struct bfa_s *bfa)
}
/*
+ * Enable/Disable FAA feature in port config
+ */
+void
+bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_trc(bfa, state);
+ fcport->cfg.faa_state = state;
+}
+
+/*
* Get default minimum ratelim speed
*/
enum bfa_port_speed
@@ -3778,6 +3931,22 @@ bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
}
+void
+bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+ bfa_boolean_t link_e2e_beacon)
+{
+ struct bfa_s *bfa = dev;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ bfa_trc(bfa, beacon);
+ bfa_trc(bfa, link_e2e_beacon);
+ bfa_trc(bfa, fcport->beacon);
+ bfa_trc(bfa, fcport->link_e2e_beacon);
+
+ fcport->beacon = beacon;
+ fcport->link_e2e_beacon = link_e2e_beacon;
+}
+
bfa_boolean_t
bfa_fcport_is_linkup(struct bfa_s *bfa)
{
@@ -3797,6 +3966,14 @@ bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
return fcport->cfg.qos_enabled;
}
+bfa_boolean_t
+bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+ return fcport->cfg.trunked;
+}
+
/*
* Rport State machine functions
*/
@@ -4286,18 +4463,22 @@ bfa_rport_qresume(void *cbarg)
}
static void
-bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
- u32 *dm_len)
+bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
{
+ struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
+
if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
cfg->fwcfg.num_rports = BFA_RPORT_MIN;
- *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
+ /* kva memory */
+ bfa_mem_kva_setup(minfo, rport_kva,
+ cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
}
static void
bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+ struct bfa_pcidev_s *pcidev)
{
struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
struct bfa_rport_s *rp;
@@ -4305,8 +4486,9 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
INIT_LIST_HEAD(&mod->rp_free_q);
INIT_LIST_HEAD(&mod->rp_active_q);
+ INIT_LIST_HEAD(&mod->rp_unused_q);
- rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
+ rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
mod->rps_list = rp;
mod->num_rports = cfg->fwcfg.num_rports;
@@ -4331,7 +4513,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
/*
* consume memory
*/
- bfa_meminfo_kva(meminfo) = (u8 *) rp;
+ bfa_mem_kva_curp(mod) = (u8 *) rp;
}
static void
@@ -4356,6 +4538,9 @@ bfa_rport_iocdisable(struct bfa_s *bfa)
struct bfa_rport_s *rport;
struct list_head *qe, *qen;
+ /* Enqueue unused rport resources to free_q */
+ list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
+
list_for_each_safe(qe, qen, &mod->rp_active_q) {
rport = (struct bfa_rport_s *) qe;
bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
@@ -4399,11 +4584,11 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
}
bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
- bfa_lpuid(rp->bfa));
+ bfa_fn_lpu(rp->bfa));
m->bfa_handle = rp->rport_tag;
m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
m->pid = rp->rport_info.pid;
- m->lp_tag = rp->rport_info.lp_tag;
+ m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
m->local_pid = rp->rport_info.local_pid;
m->fc_class = rp->rport_info.fc_class;
m->vf_en = rp->rport_info.vf_en;
@@ -4413,7 +4598,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
return BFA_TRUE;
}
@@ -4432,13 +4617,13 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
}
bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
- bfa_lpuid(rp->bfa));
+ bfa_fn_lpu(rp->bfa));
m->fw_handle = rp->fw_handle;
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
return BFA_TRUE;
}
@@ -4457,14 +4642,14 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
}
bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
- bfa_lpuid(rp->bfa));
+ bfa_fn_lpu(rp->bfa));
m->fw_handle = rp->fw_handle;
m->speed = (u8)rp->rport_info.speed;
/*
* queue I/O message to firmware
*/
- bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
return BFA_TRUE;
}
@@ -4492,6 +4677,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
rp->fw_handle = msg.create_rsp->fw_handle;
rp->qos_attr = msg.create_rsp->qos_attr;
+ bfa_rport_set_lunmask(bfa, rp);
WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
break;
@@ -4499,6 +4685,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
case BFI_RPORT_I2H_DELETE_RSP:
rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
+ bfa_rport_unset_lunmask(bfa, rp);
bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
break;
@@ -4514,7 +4701,18 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
}
}
+void
+bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
+{
+ struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
+ struct list_head *qe;
+ int i;
+ for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
+ bfa_q_deq_tail(&mod->rp_free_q, &qe);
+ list_add_tail(qe, &mod->rp_unused_q);
+ }
+}
/*
* bfa_rport_api
@@ -4568,6 +4766,37 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
}
+/* Set Rport LUN Mask */
+void
+bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+ struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
+ wwn_t lp_wwn, rp_wwn;
+ u8 lp_tag = (u8)rp->rport_info.lp_tag;
+
+ rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+ lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+ BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+ rp->lun_mask = BFA_TRUE;
+ bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
+}
+
+/* Unset Rport LUN mask */
+void
+bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
+{
+ struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
+ wwn_t lp_wwn, rp_wwn;
+
+ rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
+ lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
+
+ BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
+ rp->lun_mask = BFA_FALSE;
+ bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
+ BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
+}
/*
* SGPG related functions
@@ -4577,26 +4806,51 @@ bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
* Compute and return memory needed by FCP(im) module.
*/
static void
-bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
- u32 *dm_len)
+bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
{
+ struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
+ struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
+ struct bfa_mem_dma_s *seg_ptr;
+ u16 nsegs, idx, per_seg_sgpg, num_sgpg;
+ u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
+
if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
+ else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
+ cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
- *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
- *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
-}
+ num_sgpg = cfg->drvcfg.num_sgpgs;
+
+ nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
+ per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
+
+ bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
+ if (num_sgpg >= per_seg_sgpg) {
+ num_sgpg -= per_seg_sgpg;
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ per_seg_sgpg * sgpg_sz);
+ } else
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ num_sgpg * sgpg_sz);
+ }
+ /* kva memory */
+ bfa_mem_kva_setup(minfo, sgpg_kva,
+ cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
+}
static void
bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
+ struct bfa_pcidev_s *pcidev)
{
struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
- int i;
struct bfa_sgpg_s *hsgpg;
struct bfi_sgpg_s *sgpg;
u64 align_len;
+ struct bfa_mem_dma_s *seg_ptr;
+ u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
+ u16 i, idx, nsegs, per_seg_sgpg, num_sgpg;
union {
u64 pa;
@@ -4608,39 +4862,45 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
- mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
- mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
- align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
- mod->sgpg_arr_pa += align_len;
- mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
- align_len);
- mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
- align_len);
-
- hsgpg = mod->hsgpg_arr;
- sgpg = mod->sgpg_arr;
- sgpg_pa.pa = mod->sgpg_arr_pa;
- mod->free_sgpgs = mod->num_sgpgs;
-
- WARN_ON(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1));
-
- for (i = 0; i < mod->num_sgpgs; i++) {
- memset(hsgpg, 0, sizeof(*hsgpg));
- memset(sgpg, 0, sizeof(*sgpg));
-
- hsgpg->sgpg = sgpg;
- sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
- hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
- list_add_tail(&hsgpg->qe, &mod->sgpg_q);
-
- hsgpg++;
- sgpg++;
- sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
+ mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
+
+ num_sgpg = cfg->drvcfg.num_sgpgs;
+ nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
+
+ /* dma/kva mem claim */
+ hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
+
+ bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
+
+ if (!bfa_mem_dma_virt(seg_ptr))
+ break;
+
+ align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
+ bfa_mem_dma_phys(seg_ptr);
+
+ sgpg = (struct bfi_sgpg_s *)
+ (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
+ sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
+ WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
+
+ per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
+
+ for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
+ memset(hsgpg, 0, sizeof(*hsgpg));
+ memset(sgpg, 0, sizeof(*sgpg));
+
+ hsgpg->sgpg = sgpg;
+ sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
+ hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
+ list_add_tail(&hsgpg->qe, &mod->sgpg_q);
+
+ sgpg++;
+ hsgpg++;
+ sgpg_pa.pa += sgpg_sz;
+ }
}
- bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
- bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
- bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
+ bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
}
static void
@@ -4782,31 +5042,13 @@ __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
}
static void
-claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
-{
- u32 uf_pb_tot_sz;
-
- ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
- ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
- uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
- BFA_DMA_ALIGN_SZ);
-
- bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
- bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
-
- memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
-}
-
-static void
-claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
{
struct bfi_uf_buf_post_s *uf_bp_msg;
- struct bfi_sge_s *sge;
- union bfi_addr_u sga_zero = { {0} };
u16 i;
u16 buf_len;
- ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
+ ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
uf_bp_msg = ufm->uf_buf_posts;
for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
@@ -4817,28 +5059,18 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
buf_len = sizeof(struct bfa_uf_buf_s);
uf_bp_msg->buf_len = cpu_to_be16(buf_len);
bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
- bfa_lpuid(ufm->bfa));
-
- sge = uf_bp_msg->sge;
- sge[0].sg_len = buf_len;
- sge[0].flags = BFI_SGE_DATA_LAST;
- bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
- bfa_sge_to_be(sge);
-
- sge[1].sg_len = buf_len;
- sge[1].flags = BFI_SGE_PGDLEN;
- sge[1].sga = sga_zero;
- bfa_sge_to_be(&sge[1]);
+ bfa_fn_lpu(ufm->bfa));
+ bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
}
/*
* advance pointer beyond consumed memory
*/
- bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
+ bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
}
static void
-claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+claim_ufs(struct bfa_uf_mod_s *ufm)
{
u16 i;
struct bfa_uf_s *uf;
@@ -4846,7 +5078,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
/*
* Claim block of memory for UF list
*/
- ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
+ ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
/*
* Initialize UFs and queue it in UF free queue
@@ -4855,8 +5087,8 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
memset(uf, 0, sizeof(struct bfa_uf_s));
uf->bfa = ufm->bfa;
uf->uf_tag = i;
- uf->pb_len = sizeof(struct bfa_uf_buf_s);
- uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
+ uf->pb_len = BFA_PER_UF_DMA_SZ;
+ uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
uf->buf_pa = ufm_pbs_pa(ufm, i);
list_add_tail(&uf->qe, &ufm->uf_free_q);
}
@@ -4864,48 +5096,57 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
/*
* advance memory pointer
*/
- bfa_meminfo_kva(mi) = (u8 *) uf;
+ bfa_mem_kva_curp(ufm) = (u8 *) uf;
}
static void
-uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+uf_mem_claim(struct bfa_uf_mod_s *ufm)
{
- claim_uf_pbs(ufm, mi);
- claim_ufs(ufm, mi);
- claim_uf_post_msgs(ufm, mi);
+ claim_ufs(ufm);
+ claim_uf_post_msgs(ufm);
}
static void
-bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
+bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
+ struct bfa_s *bfa)
{
- u32 num_ufs = cfg->fwcfg.num_uf_bufs;
-
- /*
- * dma-able memory for UF posted bufs
- */
- *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
- BFA_DMA_ALIGN_SZ);
+ struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+ struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
+ u32 num_ufs = cfg->fwcfg.num_uf_bufs;
+ struct bfa_mem_dma_s *seg_ptr;
+ u16 nsegs, idx, per_seg_uf = 0;
+
+ nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
+ per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
+
+ bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
+ if (num_ufs >= per_seg_uf) {
+ num_ufs -= per_seg_uf;
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ per_seg_uf * BFA_PER_UF_DMA_SZ);
+ } else
+ bfa_mem_dma_setup(minfo, seg_ptr,
+ num_ufs * BFA_PER_UF_DMA_SZ);
+ }
- /*
- * kernel Virtual memory for UFs and UF buf post msg copies
- */
- *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
- *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
+ /* kva memory */
+ bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
+ (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
}
static void
bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
- struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+ struct bfa_pcidev_s *pcidev)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
- memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
ufm->bfa = bfa;
ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
INIT_LIST_HEAD(&ufm->uf_free_q);
INIT_LIST_HEAD(&ufm->uf_posted_q);
+ INIT_LIST_HEAD(&ufm->uf_unused_q);
- uf_mem_claim(ufm, meminfo);
+ uf_mem_claim(ufm);
}
static void
@@ -4939,7 +5180,7 @@ bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
sizeof(struct bfi_uf_buf_post_s));
- bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
+ bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
bfa_trc(ufm->bfa, uf->uf_tag);
@@ -4963,11 +5204,15 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
{
struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
u16 uf_tag = m->buf_tag;
- struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
- u8 *buf = &uf_buf->d[0];
+ struct bfa_uf_buf_s *uf_buf;
+ uint8_t *buf;
struct fchs_s *fchs;
+ uf_buf = (struct bfa_uf_buf_s *)
+ bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
+ buf = &uf_buf->d[0];
+
m->frm_len = be16_to_cpu(m->frm_len);
m->xfr_len = be16_to_cpu(m->xfr_len);
@@ -5008,6 +5253,9 @@ bfa_uf_iocdisable(struct bfa_s *bfa)
struct bfa_uf_s *uf;
struct list_head *qe, *qen;
+ /* Enqueue unused uf resources to free_q */
+ list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
+
list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
uf = (struct bfa_uf_s *) qe;
list_del(&uf->qe);
@@ -5072,4 +5320,433 @@ bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
}
}
+void
+bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
+{
+ struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa);
+ struct list_head *qe;
+ int i;
+
+ for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
+ bfa_q_deq_tail(&mod->uf_free_q, &qe);
+ list_add_tail(qe, &mod->uf_unused_q);
+ }
+}
+
+/*
+ * BFA fcdiag module
+ */
+#define BFA_DIAG_QTEST_TOV 1000 /* msec */
+
+/*
+ * Set port status to busy
+ */
+static void
+bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
+{
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
+
+ if (fcdiag->lb.lock)
+ fcport->diag_busy = BFA_TRUE;
+ else
+ fcport->diag_busy = BFA_FALSE;
+}
+
+static void
+bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
+ struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+ struct bfa_pcidev_s *pcidev)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ fcdiag->bfa = bfa;
+ fcdiag->trcmod = bfa->trcmod;
+ /* The common DIAG attach bfa_diag_attach() will do all memory claim */
+}
+
+static void
+bfa_fcdiag_iocdisable(struct bfa_s *bfa)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ bfa_trc(fcdiag, fcdiag->lb.lock);
+ if (fcdiag->lb.lock) {
+ fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
+ fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
+ fcdiag->lb.lock = 0;
+ bfa_fcdiag_set_busy_status(fcdiag);
+ }
+}
+
+static void
+bfa_fcdiag_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcdiag_queuetest_timeout(void *cbarg)
+{
+ struct bfa_fcdiag_s *fcdiag = cbarg;
+ struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
+
+ bfa_trc(fcdiag, fcdiag->qtest.all);
+ bfa_trc(fcdiag, fcdiag->qtest.count);
+
+ fcdiag->qtest.timer_active = 0;
+
+ res->status = BFA_STATUS_ETIMER;
+ res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
+ if (fcdiag->qtest.all)
+ res->queue = fcdiag->qtest.all;
+
+ bfa_trc(fcdiag, BFA_STATUS_ETIMER);
+ fcdiag->qtest.status = BFA_STATUS_ETIMER;
+ fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
+ fcdiag->qtest.lock = 0;
+}
+
+static bfa_status_t
+bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
+{
+ u32 i;
+ struct bfi_diag_qtest_req_s *req;
+
+ req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
+ if (!req)
+ return BFA_STATUS_DEVBUSY;
+
+ /* build host command */
+ bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
+ bfa_fn_lpu(fcdiag->bfa));
+
+ for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
+ req->data[i] = QTEST_PAT_DEFAULT;
+
+ bfa_trc(fcdiag, fcdiag->qtest.queue);
+ /* ring door bell */
+ bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
+ return BFA_STATUS_OK;
+}
+
+static void
+bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
+ bfi_diag_qtest_rsp_t *rsp)
+{
+ struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
+ bfa_status_t status = BFA_STATUS_OK;
+ int i;
+
+ /* Check timer, should still be active */
+ if (!fcdiag->qtest.timer_active) {
+ bfa_trc(fcdiag, fcdiag->qtest.timer_active);
+ return;
+ }
+
+ /* update count */
+ fcdiag->qtest.count--;
+
+ /* Check result */
+ for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
+ if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
+ res->status = BFA_STATUS_DATACORRUPTED;
+ break;
+ }
+ }
+
+ if (res->status == BFA_STATUS_OK) {
+ if (fcdiag->qtest.count > 0) {
+ status = bfa_fcdiag_queuetest_send(fcdiag);
+ if (status == BFA_STATUS_OK)
+ return;
+ else
+ res->status = status;
+ } else if (fcdiag->qtest.all > 0 &&
+ fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
+ fcdiag->qtest.count = QTEST_CNT_DEFAULT;
+ fcdiag->qtest.queue++;
+ status = bfa_fcdiag_queuetest_send(fcdiag);
+ if (status == BFA_STATUS_OK)
+ return;
+ else
+ res->status = status;
+ }
+ }
+
+ /* Stop timer when we comp all queue */
+ if (fcdiag->qtest.timer_active) {
+ bfa_timer_stop(&fcdiag->qtest.timer);
+ fcdiag->qtest.timer_active = 0;
+ }
+ res->queue = fcdiag->qtest.queue;
+ res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
+ bfa_trc(fcdiag, res->count);
+ bfa_trc(fcdiag, res->status);
+ fcdiag->qtest.status = res->status;
+ fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
+ fcdiag->qtest.lock = 0;
+}
+
+static void
+bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
+ struct bfi_diag_lb_rsp_s *rsp)
+{
+ struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
+
+ res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm);
+ res->numosffrm = be32_to_cpu(rsp->res.numosffrm);
+ res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm);
+ res->badfrminf = be32_to_cpu(rsp->res.badfrminf);
+ res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum);
+ res->status = rsp->res.status;
+ fcdiag->lb.status = rsp->res.status;
+ bfa_trc(fcdiag, fcdiag->lb.status);
+ fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
+ fcdiag->lb.lock = 0;
+ bfa_fcdiag_set_busy_status(fcdiag);
+}
+
+static bfa_status_t
+bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
+ struct bfa_diag_loopback_s *loopback)
+{
+ struct bfi_diag_lb_req_s *lb_req;
+
+ lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
+ if (!lb_req)
+ return BFA_STATUS_DEVBUSY;
+
+ /* build host command */
+ bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
+ bfa_fn_lpu(fcdiag->bfa));
+
+ lb_req->lb_mode = loopback->lb_mode;
+ lb_req->speed = loopback->speed;
+ lb_req->loopcnt = loopback->loopcnt;
+ lb_req->pattern = loopback->pattern;
+
+ /* ring door bell */
+ bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
+
+ bfa_trc(fcdiag, loopback->lb_mode);
+ bfa_trc(fcdiag, loopback->speed);
+ bfa_trc(fcdiag, loopback->loopcnt);
+ bfa_trc(fcdiag, loopback->pattern);
+ return BFA_STATUS_OK;
+}
+
+/*
+ * cpe/rme intr handler
+ */
+void
+bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+
+ switch (msg->mhdr.msg_id) {
+ case BFI_DIAG_I2H_LOOPBACK:
+ bfa_fcdiag_loopback_comp(fcdiag,
+ (struct bfi_diag_lb_rsp_s *) msg);
+ break;
+ case BFI_DIAG_I2H_QTEST:
+ bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
+ break;
+ default:
+ bfa_trc(fcdiag, msg->mhdr.msg_id);
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Loopback test
+ *
+ * @param[in] *bfa - bfa data struct
+ * @param[in] opmode - port operation mode
+ * @param[in] speed - port speed
+ * @param[in] lpcnt - loop count
+ * @param[in] pat - pattern to build packet
+ * @param[in] *result - pt to bfa_diag_loopback_result_t data struct
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback functioin arg
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
+ enum bfa_port_speed speed, u32 lpcnt, u32 pat,
+ struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
+ void *cbarg)
+{
+ struct bfa_diag_loopback_s loopback;
+ struct bfa_port_attr_s attr;
+ bfa_status_t status;
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+
+ if (!bfa_iocfc_is_operational(bfa))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* if port is PBC disabled, return error */
+ if (bfa_fcport_is_pbcdisabled(bfa)) {
+ bfa_trc(fcdiag, BFA_STATUS_PBC);
+ return BFA_STATUS_PBC;
+ }
+
+ if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
+ bfa_trc(fcdiag, opmode);
+ return BFA_STATUS_PORT_NOT_DISABLED;
+ }
+
+ /*
+ * Check if input speed is supported by the port mode
+ */
+ if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
+ if (!(speed == BFA_PORT_SPEED_1GBPS ||
+ speed == BFA_PORT_SPEED_2GBPS ||
+ speed == BFA_PORT_SPEED_4GBPS ||
+ speed == BFA_PORT_SPEED_8GBPS ||
+ speed == BFA_PORT_SPEED_16GBPS ||
+ speed == BFA_PORT_SPEED_AUTO)) {
+ bfa_trc(fcdiag, speed);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+ bfa_fcport_get_attr(bfa, &attr);
+ bfa_trc(fcdiag, attr.speed_supported);
+ if (speed > attr.speed_supported)
+ return BFA_STATUS_UNSUPP_SPEED;
+ } else {
+ if (speed != BFA_PORT_SPEED_10GBPS) {
+ bfa_trc(fcdiag, speed);
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+ }
+
+ /* For Mezz card, port speed entered needs to be checked */
+ if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
+ if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
+ if ((speed == BFA_PORT_SPEED_1GBPS) &&
+ (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
+ return BFA_STATUS_UNSUPP_SPEED;
+ if (!(speed == BFA_PORT_SPEED_1GBPS ||
+ speed == BFA_PORT_SPEED_2GBPS ||
+ speed == BFA_PORT_SPEED_4GBPS ||
+ speed == BFA_PORT_SPEED_8GBPS ||
+ speed == BFA_PORT_SPEED_16GBPS ||
+ speed == BFA_PORT_SPEED_AUTO))
+ return BFA_STATUS_UNSUPP_SPEED;
+ } else {
+ if (speed != BFA_PORT_SPEED_10GBPS)
+ return BFA_STATUS_UNSUPP_SPEED;
+ }
+ }
+
+ /* check to see if there is another destructive diag cmd running */
+ if (fcdiag->lb.lock) {
+ bfa_trc(fcdiag, fcdiag->lb.lock);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ fcdiag->lb.lock = 1;
+ loopback.lb_mode = opmode;
+ loopback.speed = speed;
+ loopback.loopcnt = lpcnt;
+ loopback.pattern = pat;
+ fcdiag->lb.result = result;
+ fcdiag->lb.cbfn = cbfn;
+ fcdiag->lb.cbarg = cbarg;
+ memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
+ bfa_fcdiag_set_busy_status(fcdiag);
+
+ /* Send msg to fw */
+ status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
+ return status;
+}
+
+/*
+ * DIAG queue test command
+ *
+ * @param[in] *bfa - bfa data struct
+ * @param[in] force - 1: don't do ioc op checking
+ * @param[in] queue - queue no. to test
+ * @param[in] *result - pt to bfa_diag_qtest_result_t data struct
+ * @param[in] cbfn - callback function
+ * @param[in] *cbarg - callback functioin arg
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
+ struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
+ void *cbarg)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ bfa_status_t status;
+ bfa_trc(fcdiag, force);
+ bfa_trc(fcdiag, queue);
+
+ if (!force && !bfa_iocfc_is_operational(bfa))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /* check to see if there is another destructive diag cmd running */
+ if (fcdiag->qtest.lock) {
+ bfa_trc(fcdiag, fcdiag->qtest.lock);
+ return BFA_STATUS_DEVBUSY;
+ }
+
+ /* Initialization */
+ fcdiag->qtest.lock = 1;
+ fcdiag->qtest.cbfn = cbfn;
+ fcdiag->qtest.cbarg = cbarg;
+ fcdiag->qtest.result = result;
+ fcdiag->qtest.count = QTEST_CNT_DEFAULT;
+
+ /* Init test results */
+ fcdiag->qtest.result->status = BFA_STATUS_OK;
+ fcdiag->qtest.result->count = 0;
+
+ /* send */
+ if (queue < BFI_IOC_MAX_CQS) {
+ fcdiag->qtest.result->queue = (u8)queue;
+ fcdiag->qtest.queue = (u8)queue;
+ fcdiag->qtest.all = 0;
+ } else {
+ fcdiag->qtest.result->queue = 0;
+ fcdiag->qtest.queue = 0;
+ fcdiag->qtest.all = 1;
+ }
+ status = bfa_fcdiag_queuetest_send(fcdiag);
+
+ /* Start a timer */
+ if (status == BFA_STATUS_OK) {
+ bfa_timer_start(bfa, &fcdiag->qtest.timer,
+ bfa_fcdiag_queuetest_timeout, fcdiag,
+ BFA_DIAG_QTEST_TOV);
+ fcdiag->qtest.timer_active = 1;
+ }
+ return status;
+}
+/*
+ * DIAG PLB is running
+ *
+ * @param[in] *bfa - bfa data struct
+ *
+ * @param[out]
+ */
+bfa_status_t
+bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
+{
+ struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
+ return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
+}
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 5902a45c080..95adb86d376 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -26,6 +26,7 @@
* Scatter-gather DMA related defines
*/
#define BFA_SGPG_MIN (16)
+#define BFA_SGPG_MAX (8192)
/*
* Alignment macro for SG page allocation
@@ -54,17 +55,21 @@ struct bfa_sgpg_s {
*/
#define BFA_SGPG_NPAGE(_nsges) (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
+/* Max SGPG dma segs required */
+#define BFA_SGPG_DMA_SEGS \
+ BFI_MEM_DMA_NSEGS(BFA_SGPG_MAX, (uint32_t)sizeof(struct bfi_sgpg_s))
+
struct bfa_sgpg_mod_s {
struct bfa_s *bfa;
int num_sgpgs; /* number of SG pages */
int free_sgpgs; /* number of free SG pages */
- struct bfa_sgpg_s *hsgpg_arr; /* BFA SG page array */
- struct bfi_sgpg_s *sgpg_arr; /* actual SG page array */
- u64 sgpg_arr_pa; /* SG page array DMA addr */
struct list_head sgpg_q; /* queue of free SG pages */
struct list_head sgpg_wait_q; /* wait queue for SG pages */
+ struct bfa_mem_dma_s dma_seg[BFA_SGPG_DMA_SEGS];
+ struct bfa_mem_kva_s kva_seg;
};
#define BFA_SGPG_MOD(__bfa) (&(__bfa)->modules.sgpg_mod)
+#define BFA_MEM_SGPG_KVA(__bfa) (&(BFA_SGPG_MOD(__bfa)->kva_seg))
bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
int nsgpgs);
@@ -79,26 +84,32 @@ void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
* FCXP related defines
*/
#define BFA_FCXP_MIN (1)
+#define BFA_FCXP_MAX (256)
#define BFA_FCXP_MAX_IBUF_SZ (2 * 1024 + 256)
#define BFA_FCXP_MAX_LBUF_SZ (4 * 1024 + 256)
+/* Max FCXP dma segs required */
+#define BFA_FCXP_DMA_SEGS \
+ BFI_MEM_DMA_NSEGS(BFA_FCXP_MAX, \
+ (u32)BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ)
+
struct bfa_fcxp_mod_s {
struct bfa_s *bfa; /* backpointer to BFA */
struct bfa_fcxp_s *fcxp_list; /* array of FCXPs */
u16 num_fcxps; /* max num FCXP requests */
struct list_head fcxp_free_q; /* free FCXPs */
struct list_head fcxp_active_q; /* active FCXPs */
- void *req_pld_list_kva; /* list of FCXP req pld */
- u64 req_pld_list_pa; /* list of FCXP req pld */
- void *rsp_pld_list_kva; /* list of FCXP resp pld */
- u64 rsp_pld_list_pa; /* list of FCXP resp pld */
struct list_head wait_q; /* wait queue for free fcxp */
+ struct list_head fcxp_unused_q; /* unused fcxps */
u32 req_pld_sz;
u32 rsp_pld_sz;
+ struct bfa_mem_dma_s dma_seg[BFA_FCXP_DMA_SEGS];
+ struct bfa_mem_kva_s kva_seg;
};
#define BFA_FCXP_MOD(__bfa) (&(__bfa)->modules.fcxp_mod)
#define BFA_FCXP_FROM_TAG(__mod, __tag) (&(__mod)->fcxp_list[__tag])
+#define BFA_MEM_FCXP_KVA(__bfa) (&(BFA_FCXP_MOD(__bfa)->kva_seg))
typedef void (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
void *cb_arg, bfa_status_t req_status,
@@ -206,13 +217,15 @@ struct bfa_fcxp_wqe_s {
#define BFA_FCXP_RSP_FCHS(_fcxp) (&((_fcxp)->rsp_info.fchs))
#define BFA_FCXP_RSP_PLD(_fcxp) (bfa_fcxp_get_rspbuf(_fcxp))
-#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
- ((_fcxp)->fcxp_mod->req_pld_list_pa + \
- ((_fcxp)->fcxp_mod->req_pld_sz * (_fcxp)->fcxp_tag))
+#define BFA_FCXP_REQ_PLD_PA(_fcxp) \
+ bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \
+ (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz)
-#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
- ((_fcxp)->fcxp_mod->rsp_pld_list_pa + \
- ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
+/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
+#define BFA_FCXP_RSP_PLD_PA(_fcxp) \
+ (bfa_mem_get_dmabuf_pa((_fcxp)->fcxp_mod, (_fcxp)->fcxp_tag, \
+ (_fcxp)->fcxp_mod->req_pld_sz + (_fcxp)->fcxp_mod->rsp_pld_sz) + \
+ (_fcxp)->fcxp_mod->req_pld_sz)
void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
@@ -238,10 +251,13 @@ struct bfa_rport_mod_s {
struct bfa_rport_s *rps_list; /* list of rports */
struct list_head rp_free_q; /* free bfa_rports */
struct list_head rp_active_q; /* free bfa_rports */
+ struct list_head rp_unused_q; /* unused bfa rports */
u16 num_rports; /* number of rports */
+ struct bfa_mem_kva_s kva_seg;
};
#define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod)
+#define BFA_MEM_RPORT_KVA(__bfa) (&(BFA_RPORT_MOD(__bfa)->kva_seg))
/*
* Convert rport tag to RPORT
@@ -254,6 +270,7 @@ struct bfa_rport_mod_s {
* protected functions
*/
void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw);
/*
* BFA rport information.
@@ -280,6 +297,7 @@ struct bfa_rport_s {
void *rport_drv; /* fcs/driver rport object */
u16 fw_handle; /* firmware rport handle */
u16 rport_tag; /* BFA rport tag */
+ u8 lun_mask; /* LUN mask flag */
struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */
struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */
struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */
@@ -298,7 +316,7 @@ struct bfa_rport_s {
*/
#define BFA_UF_MIN (4)
-
+#define BFA_UF_MAX (256)
struct bfa_uf_s {
struct list_head qe; /* queue element */
@@ -326,36 +344,41 @@ struct bfa_uf_s {
*/
typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
+#define BFA_UF_BUFSZ (2 * 1024 + 256)
+
+struct bfa_uf_buf_s {
+ u8 d[BFA_UF_BUFSZ];
+};
+
+#define BFA_PER_UF_DMA_SZ \
+ (u32)BFA_ROUNDUP(sizeof(struct bfa_uf_buf_s), BFA_DMA_ALIGN_SZ)
+
+/* Max UF dma segs required */
+#define BFA_UF_DMA_SEGS BFI_MEM_DMA_NSEGS(BFA_UF_MAX, BFA_PER_UF_DMA_SZ)
+
struct bfa_uf_mod_s {
struct bfa_s *bfa; /* back pointer to BFA */
struct bfa_uf_s *uf_list; /* array of UFs */
u16 num_ufs; /* num unsolicited rx frames */
struct list_head uf_free_q; /* free UFs */
struct list_head uf_posted_q; /* UFs posted to IOC */
- struct bfa_uf_buf_s *uf_pbs_kva; /* list UF bufs request pld */
- u64 uf_pbs_pa; /* phy addr for UF bufs */
+ struct list_head uf_unused_q; /* unused UF's */
struct bfi_uf_buf_post_s *uf_buf_posts;
/* pre-built UF post msgs */
bfa_cb_uf_recv_t ufrecv; /* uf recv handler function */
void *cbarg; /* uf receive handler arg */
+ struct bfa_mem_dma_s dma_seg[BFA_UF_DMA_SEGS];
+ struct bfa_mem_kva_s kva_seg;
};
#define BFA_UF_MOD(__bfa) (&(__bfa)->modules.uf_mod)
+#define BFA_MEM_UF_KVA(__bfa) (&(BFA_UF_MOD(__bfa)->kva_seg))
#define ufm_pbs_pa(_ufmod, _uftag) \
- ((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
+ bfa_mem_get_dmabuf_pa(_ufmod, _uftag, BFA_PER_UF_DMA_SZ)
void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-
-#define BFA_UF_BUFSZ (2 * 1024 + 256)
-
-/*
- * @todo private
- */
-struct bfa_uf_buf_s {
- u8 d[BFA_UF_BUFSZ];
-};
-
+void bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw);
/*
* LPS - bfa lport login/logout service interface
@@ -364,7 +387,8 @@ struct bfa_lps_s {
struct list_head qe; /* queue element */
struct bfa_s *bfa; /* parent bfa instance */
bfa_sm_t sm; /* finite state machine */
- u8 lp_tag; /* lport tag */
+ u8 bfa_tag; /* lport tag */
+ u8 fw_tag; /* lport fw tag */
u8 reqq; /* lport request queue */
u8 alpa; /* ALPA for loop topologies */
u32 lp_pid; /* lport port ID */
@@ -377,8 +401,11 @@ struct bfa_lps_s {
bfa_status_t status; /* login status */
u16 pdusz; /* max receive PDU size */
u16 pr_bbcred; /* BB_CREDIT from peer */
+ u8 pr_bbscn; /* BB_SCN from peer */
+ u8 bb_scn; /* local BB_SCN */
u8 lsrjt_rsn; /* LSRJT reason */
u8 lsrjt_expl; /* LSRJT explanation */
+ u8 lun_mask; /* LUN mask flag */
wwn_t pwwn; /* port wwn of lport */
wwn_t nwwn; /* node wwn of lport */
wwn_t pr_pwwn; /* port wwn of lport peer */
@@ -395,12 +422,15 @@ struct bfa_lps_s {
struct bfa_lps_mod_s {
struct list_head lps_free_q;
struct list_head lps_active_q;
+ struct list_head lps_login_q;
struct bfa_lps_s *lps_arr;
int num_lps;
+ struct bfa_mem_kva_s kva_seg;
};
#define BFA_LPS_MOD(__bfa) (&(__bfa)->modules.lps_mod)
#define BFA_LPS_FROM_TAG(__mod, __tag) (&(__mod)->lps_arr[__tag])
+#define BFA_MEM_LPS_KVA(__bfa) (&(BFA_LPS_MOD(__bfa)->kva_seg))
/*
* external functions
@@ -413,7 +443,6 @@ void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
*/
#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port))
-typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
/*
* Link notification data structure
@@ -467,21 +496,22 @@ struct bfa_fcport_s {
u8 *stats_kva;
u64 stats_pa;
union bfa_fcport_stats_u *stats;
- union bfa_fcport_stats_u *stats_ret; /* driver stats location */
bfa_status_t stats_status; /* stats/statsclr status */
- bfa_boolean_t stats_busy; /* outstanding stats/statsclr */
+ struct list_head stats_pending_q;
+ struct list_head statsclr_pending_q;
bfa_boolean_t stats_qfull;
u32 stats_reset_time; /* stats reset time stamp */
- bfa_cb_port_t stats_cbfn; /* driver callback function */
- void *stats_cbarg; /* *!< user callback arg */
bfa_boolean_t diag_busy; /* diag busy status */
bfa_boolean_t beacon; /* port beacon status */
bfa_boolean_t link_e2e_beacon; /* link beacon status */
+ bfa_boolean_t bbsc_op_state; /* Cred recov Oper State */
struct bfa_fcport_trunk_s trunk;
u16 fcoe_vlan;
+ struct bfa_mem_dma_s fcport_dma;
};
#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport)
+#define BFA_MEM_FCPORT_DMA(__bfa) (&(BFA_FCPORT_MOD(__bfa)->fcport_dma))
/*
* protected functions
@@ -515,15 +545,19 @@ void bfa_fcport_event_register(struct bfa_s *bfa,
bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
-void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
+void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn);
bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa);
+void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+ bfa_boolean_t link_e2e_beacon);
bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa);
bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
- union bfa_fcport_stats_u *stats,
- bfa_cb_port_t cbfn, void *cbarg);
-bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
- void *cbarg);
+ struct bfa_cb_pending_q_s *cb);
+bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa,
+ struct bfa_cb_pending_q_s *cb);
bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_is_trunk_enabled(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_is_pbcdisabled(struct bfa_s *bfa);
+void bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state);
/*
* bfa rport API functions
@@ -542,6 +576,19 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
struct bfa_rport_qos_attr_s new_qos_attr);
/*
+ * Rport LUN masking related
+ */
+#define BFA_RPORT_TAG_INVALID 0xffff
+#define BFA_LP_TAG_INVALID 0xff
+void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
+bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
+wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
+struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
+ wwn_t *lpwwn, wwn_t rpwwn);
+void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
+
+/*
* bfa fcxp API functions
*/
struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
@@ -577,6 +624,7 @@ void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
+void bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw);
static inline void *
bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
@@ -606,11 +654,12 @@ struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
void bfa_lps_delete(struct bfa_lps_s *lps);
void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
u16 pdusz, wwn_t pwwn, wwn_t nwwn,
- bfa_boolean_t auth_en);
+ bfa_boolean_t auth_en, u8 bb_scn);
void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
wwn_t pwwn, wwn_t nwwn);
void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid);
+u8 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag);
u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
@@ -618,4 +667,57 @@ void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
+/* FAA specific APIs */
+bfa_status_t bfa_faa_enable(struct bfa_s *bfa,
+ bfa_cb_iocfc_t cbfn, void *cbarg);
+bfa_status_t bfa_faa_disable(struct bfa_s *bfa,
+ bfa_cb_iocfc_t cbfn, void *cbarg);
+bfa_status_t bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
+ bfa_cb_iocfc_t cbfn, void *cbarg);
+
+/*
+ * FC DIAG data structure
+ */
+struct bfa_fcdiag_qtest_s {
+ struct bfa_diag_qtest_result_s *result;
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ struct bfa_timer_s timer;
+ u32 status;
+ u32 count;
+ u8 lock;
+ u8 queue;
+ u8 all;
+ u8 timer_active;
+};
+
+struct bfa_fcdiag_lb_s {
+ bfa_cb_diag_t cbfn;
+ void *cbarg;
+ void *result;
+ bfa_boolean_t lock;
+ u32 status;
+};
+
+struct bfa_fcdiag_s {
+ struct bfa_s *bfa; /* Back pointer to BFA */
+ struct bfa_trc_mod_s *trcmod;
+ struct bfa_fcdiag_lb_s lb;
+ struct bfa_fcdiag_qtest_s qtest;
+};
+
+#define BFA_FCDIAG_MOD(__bfa) (&(__bfa)->modules.fcdiag)
+
+void bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+bfa_status_t bfa_fcdiag_loopback(struct bfa_s *bfa,
+ enum bfa_port_opmode opmode,
+ enum bfa_port_speed speed, u32 lpcnt, u32 pat,
+ struct bfa_diag_loopback_result_s *result,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 ignore,
+ u32 queue, struct bfa_diag_qtest_result_s *result,
+ bfa_cb_diag_t cbfn, void *cbarg);
+bfa_status_t bfa_fcdiag_lb_is_running(struct bfa_s *bfa);
+
#endif /* __BFA_SVC_H__ */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 59b5e9b61d7..66fb72531b3 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -56,14 +56,15 @@ int fdmi_enable = BFA_TRUE;
int pcie_max_read_reqsz;
int bfa_debugfs_enable = 1;
int msix_disable_cb = 0, msix_disable_ct = 0;
+int max_xfer_size = BFAD_MAX_SECTORS >> 1;
/* Firmware releated */
-u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
-u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
+u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
+u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
-#define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
-#define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
-#define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
+#define BFAD_FW_FILE_CB "cbfw.bin"
+#define BFAD_FW_FILE_CT "ctfw.bin"
+#define BFAD_FW_FILE_CT2 "ct2fw.bin"
static u32 *bfad_load_fwimg(struct pci_dev *pdev);
static void bfad_free_fwimg(void);
@@ -71,18 +72,18 @@ static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
u32 *bfi_image_size, char *fw_name);
static const char *msix_name_ct[] = {
+ "ctrl",
"cpe0", "cpe1", "cpe2", "cpe3",
- "rme0", "rme1", "rme2", "rme3",
- "ctrl" };
+ "rme0", "rme1", "rme2", "rme3" };
static const char *msix_name_cb[] = {
"cpe0", "cpe1", "cpe2", "cpe3",
"rme0", "rme1", "rme2", "rme3",
"eemc", "elpu0", "elpu1", "epss", "mlpu" };
-MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
-MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
-MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
+MODULE_FIRMWARE(BFAD_FW_FILE_CB);
+MODULE_FIRMWARE(BFAD_FW_FILE_CT);
+MODULE_FIRMWARE(BFAD_FW_FILE_CT2);
module_param(os_name, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
@@ -144,6 +145,9 @@ MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
" Range[false:0|true:1]");
+module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
+ " Range[64k|128k|256k|512k|1024k|2048k]");
static void
bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
@@ -527,28 +531,26 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
void
bfad_hal_mem_release(struct bfad_s *bfad)
{
- int i;
struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
- struct bfa_mem_elem_s *meminfo_elem;
-
- for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
- meminfo_elem = &hal_meminfo->meminfo[i];
- if (meminfo_elem->kva != NULL) {
- switch (meminfo_elem->mem_type) {
- case BFA_MEM_TYPE_KVA:
- vfree(meminfo_elem->kva);
- break;
- case BFA_MEM_TYPE_DMA:
- dma_free_coherent(&bfad->pcidev->dev,
- meminfo_elem->mem_len,
- meminfo_elem->kva,
- (dma_addr_t) meminfo_elem->dma);
- break;
- default:
- WARN_ON(1);
- break;
- }
- }
+ struct bfa_mem_dma_s *dma_info, *dma_elem;
+ struct bfa_mem_kva_s *kva_info, *kva_elem;
+ struct list_head *dm_qe, *km_qe;
+
+ dma_info = &hal_meminfo->dma_info;
+ kva_info = &hal_meminfo->kva_info;
+
+ /* Iterate through the KVA meminfo queue */
+ list_for_each(km_qe, &kva_info->qe) {
+ kva_elem = (struct bfa_mem_kva_s *) km_qe;
+ vfree(kva_elem->kva);
+ }
+
+ /* Iterate through the DMA meminfo queue */
+ list_for_each(dm_qe, &dma_info->qe) {
+ dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+ dma_free_coherent(&bfad->pcidev->dev,
+ dma_elem->mem_len, dma_elem->kva,
+ (dma_addr_t) dma_elem->dma);
}
memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
@@ -563,15 +565,15 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
if (num_tms > 0)
bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
- if (num_fcxps > 0)
+ if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
- if (num_ufbufs > 0)
+ if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
if (reqq_size > 0)
bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
if (rspq_size > 0)
bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
- if (num_sgpgs > 0)
+ if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
/*
@@ -591,85 +593,46 @@ bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
bfa_status_t
bfad_hal_mem_alloc(struct bfad_s *bfad)
{
- int i;
struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
- struct bfa_mem_elem_s *meminfo_elem;
- dma_addr_t phys_addr;
- void *kva;
+ struct bfa_mem_dma_s *dma_info, *dma_elem;
+ struct bfa_mem_kva_s *kva_info, *kva_elem;
+ struct list_head *dm_qe, *km_qe;
bfa_status_t rc = BFA_STATUS_OK;
- int retry_count = 0;
- int reset_value = 1;
- int min_num_sgpgs = 512;
+ dma_addr_t phys_addr;
bfa_cfg_get_default(&bfad->ioc_cfg);
-
-retry:
bfad_update_hal_cfg(&bfad->ioc_cfg);
bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
- bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo);
-
- for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
- meminfo_elem = &hal_meminfo->meminfo[i];
- switch (meminfo_elem->mem_type) {
- case BFA_MEM_TYPE_KVA:
- kva = vmalloc(meminfo_elem->mem_len);
- if (kva == NULL) {
- bfad_hal_mem_release(bfad);
- rc = BFA_STATUS_ENOMEM;
- goto ext;
- }
- memset(kva, 0, meminfo_elem->mem_len);
- meminfo_elem->kva = kva;
- break;
- case BFA_MEM_TYPE_DMA:
- kva = dma_alloc_coherent(&bfad->pcidev->dev,
- meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
- if (kva == NULL) {
- bfad_hal_mem_release(bfad);
- /*
- * If we cannot allocate with default
- * num_sgpages try with half the value.
- */
- if (num_sgpgs > min_num_sgpgs) {
- printk(KERN_INFO
- "bfad[%d]: memory allocation failed"
- " with num_sgpgs: %d\n",
- bfad->inst_no, num_sgpgs);
- nextLowerInt(&num_sgpgs);
- printk(KERN_INFO
- "bfad[%d]: trying to allocate memory"
- " with num_sgpgs: %d\n",
- bfad->inst_no, num_sgpgs);
- retry_count++;
- goto retry;
- } else {
- if (num_sgpgs_parm > 0)
- num_sgpgs = num_sgpgs_parm;
- else {
- reset_value =
- (1 << retry_count);
- num_sgpgs *= reset_value;
- }
- rc = BFA_STATUS_ENOMEM;
- goto ext;
- }
- }
-
- if (num_sgpgs_parm > 0)
- num_sgpgs = num_sgpgs_parm;
- else {
- reset_value = (1 << retry_count);
- num_sgpgs *= reset_value;
- }
-
- memset(kva, 0, meminfo_elem->mem_len);
- meminfo_elem->kva = kva;
- meminfo_elem->dma = phys_addr;
- break;
- default:
- break;
+ bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
+
+ dma_info = &hal_meminfo->dma_info;
+ kva_info = &hal_meminfo->kva_info;
+
+ /* Iterate through the KVA meminfo queue */
+ list_for_each(km_qe, &kva_info->qe) {
+ kva_elem = (struct bfa_mem_kva_s *) km_qe;
+ kva_elem->kva = vmalloc(kva_elem->mem_len);
+ if (kva_elem->kva == NULL) {
+ bfad_hal_mem_release(bfad);
+ rc = BFA_STATUS_ENOMEM;
+ goto ext;
+ }
+ memset(kva_elem->kva, 0, kva_elem->mem_len);
+ }
+ /* Iterate through the DMA meminfo queue */
+ list_for_each(dm_qe, &dma_info->qe) {
+ dma_elem = (struct bfa_mem_dma_s *) dm_qe;
+ dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
+ dma_elem->mem_len,
+ &phys_addr, GFP_KERNEL);
+ if (dma_elem->kva == NULL) {
+ bfad_hal_mem_release(bfad);
+ rc = BFA_STATUS_ENOMEM;
+ goto ext;
}
+ dma_elem->dma = phys_addr;
+ memset(dma_elem->kva, 0, dma_elem->mem_len);
}
ext:
return rc;
@@ -780,13 +743,17 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_master(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
goto out_release_region;
}
+ }
bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
if (bfad->pci_bar0_kva == NULL) {
printk(KERN_ERR "Fail to map bar0\n");
@@ -797,6 +764,7 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
bfad->hal_pcidev.device_id = pdev->device;
+ bfad->hal_pcidev.ssid = pdev->subsystem_device;
bfad->pci_name = pci_name(pdev);
bfad->pci_attr.vendor_id = pdev->vendor;
@@ -868,6 +836,7 @@ void
bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
{
pci_iounmap(pdev, bfad->pci_bar0_kva);
+ pci_iounmap(pdev, bfad->pci_bar2_kva);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -908,12 +877,29 @@ bfad_drv_init(struct bfad_s *bfad)
bfad->bfa_fcs.trcmod = bfad->trcmod;
bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
+ bfa_fcs_init(&bfad->bfa_fcs);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
+ /* configure base port */
+ rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
+ if (rc != BFA_STATUS_OK)
+ goto out_cfg_pport_fail;
+
return BFA_STATUS_OK;
+out_cfg_pport_fail:
+ /* fcs exit - on cfg pport failure */
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ init_completion(&bfad->comp);
+ bfad->pport.flags |= BFAD_PORT_DELETE;
+ bfa_fcs_exit(&bfad->bfa_fcs);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->comp);
+ /* bfa detach - free hal memory */
+ bfa_detach(&bfad->bfa);
+ bfad_hal_mem_release(bfad);
out_hal_mem_alloc_failure:
return BFA_STATUS_FAILED;
}
@@ -945,6 +931,7 @@ bfad_drv_start(struct bfad_s *bfad)
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_iocfc_start(&bfad->bfa);
+ bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
bfad->bfad_flags |= BFAD_HAL_START_DONE;
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -1032,6 +1019,12 @@ bfad_start_ops(struct bfad_s *bfad) {
struct bfad_vport_s *vport, *vport_new;
struct bfa_fcs_driver_info_s driver_info;
+ /* Limit min/max. xfer size to [64k-32MB] */
+ if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
+ max_xfer_size = BFAD_MIN_SECTORS >> 1;
+ if (max_xfer_size > BFAD_MAX_SECTORS >> 1)
+ max_xfer_size = BFAD_MAX_SECTORS >> 1;
+
/* Fill the driver_info info to fcs*/
memset(&driver_info, 0, sizeof(driver_info));
strncpy(driver_info.version, BFAD_DRIVER_VERSION,
@@ -1049,19 +1042,19 @@ bfad_start_ops(struct bfad_s *bfad) {
strncpy(driver_info.os_device_name, bfad->pci_name,
sizeof(driver_info.os_device_name - 1));
- /* FCS INIT */
+ /* FCS driver info init */
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
- bfa_fcs_init(&bfad->bfa_fcs);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
- retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
- if (retval != BFA_STATUS_OK) {
- if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
- bfa_sm_set_state(bfad, bfad_sm_failed);
- bfad_stop(bfad);
- return BFA_STATUS_FAILED;
- }
+ /*
+ * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
+ * with values learned during bfa_init firmware GETATTR REQ.
+ */
+ bfa_fcs_update_cfg(&bfad->bfa_fcs);
+
+ /* Setup fc host fixed attribute if the lk supports */
+ bfad_fc_host_init(bfad->pport.im_port);
/* BFAD level FC4 IM specific resource allocation */
retval = bfad_im_probe(bfad);
@@ -1233,8 +1226,8 @@ bfad_install_msix_handler(struct bfad_s *bfad)
for (i = 0; i < bfad->nvec; i++) {
sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
bfad->pci_name,
- ((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ?
- msix_name_ct[i] : msix_name_cb[i]));
+ ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
+ msix_name_cb[i] : msix_name_ct[i]));
error = request_irq(bfad->msix_tab[i].msix.vector,
(irq_handler_t) bfad_msix, 0,
@@ -1248,6 +1241,9 @@ bfad_install_msix_handler(struct bfad_s *bfad)
free_irq(bfad->msix_tab[j].msix.vector,
&bfad->msix_tab[j]);
+ bfad->bfad_flags &= ~BFAD_MSIX_ON;
+ pci_disable_msix(bfad->pcidev);
+
return 1;
}
}
@@ -1265,6 +1261,7 @@ bfad_setup_intr(struct bfad_s *bfad)
u32 mask = 0, i, num_bit = 0, max_bit = 0;
struct msix_entry msix_entries[MAX_MSIX_ENTRY];
struct pci_dev *pdev = bfad->pcidev;
+ u16 reg;
/* Call BFA to get the msix map for this PCI function. */
bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
@@ -1272,8 +1269,8 @@ bfad_setup_intr(struct bfad_s *bfad)
/* Set up the msix entry table */
bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
- if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
- (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
+ if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
+ (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
if (error) {
@@ -1294,6 +1291,13 @@ bfad_setup_intr(struct bfad_s *bfad)
goto line_based;
}
+ /* Disable INTX in MSI-X mode */
+ pci_read_config_word(pdev, PCI_COMMAND, &reg);
+
+ if (!(reg & PCI_COMMAND_INTX_DISABLE))
+ pci_write_config_word(pdev, PCI_COMMAND,
+ reg | PCI_COMMAND_INTX_DISABLE);
+
/* Save the vectors */
for (i = 0; i < bfad->nvec; i++) {
bfa_trc(bfad, msix_entries[i].vector);
@@ -1315,6 +1319,7 @@ line_based:
/* Enable interrupt handler failed */
return 1;
}
+ bfad->bfad_flags |= BFAD_INTX_ON;
return error;
}
@@ -1331,7 +1336,7 @@ bfad_remove_intr(struct bfad_s *bfad)
pci_disable_msix(bfad->pcidev);
bfad->bfad_flags &= ~BFAD_MSIX_ON;
- } else {
+ } else if (bfad->bfad_flags & BFAD_INTX_ON) {
free_irq(bfad->pcidev->irq, bfad);
}
}
@@ -1343,7 +1348,7 @@ int
bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
struct bfad_s *bfad;
- int error = -ENODEV, retval;
+ int error = -ENODEV, retval, i;
/* For single port cards - only claim function 0 */
if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
@@ -1367,6 +1372,12 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
bfa_trc_init(bfad->trcmod);
bfa_trc(bfad, bfad_inst);
+ /* AEN INIT */
+ INIT_LIST_HEAD(&bfad->free_aen_q);
+ INIT_LIST_HEAD(&bfad->active_aen_q);
+ for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
+ list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
+
if (!(bfad_load_fwimg(pdev))) {
kfree(bfad->trcmod);
goto out_alloc_trace_failure;
@@ -1501,6 +1512,14 @@ struct pci_device_id bfad_id_table[] = {
.class = (PCI_CLASS_SERIAL_FIBER << 8),
.class_mask = ~0,
},
+ {
+ .vendor = BFA_PCI_VENDOR_ID_BROCADE,
+ .device = BFA_PCI_DEVICE_ID_CT2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_SERIAL_FIBER << 8),
+ .class_mask = ~0,
+ },
{0, 0},
};
@@ -1594,33 +1613,33 @@ out:
static u32 *
bfad_load_fwimg(struct pci_dev *pdev)
{
- if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
- if (bfi_image_ct_fc_size == 0)
- bfad_read_firmware(pdev, &bfi_image_ct_fc,
- &bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
- return bfi_image_ct_fc;
- } else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
- if (bfi_image_ct_cna_size == 0)
- bfad_read_firmware(pdev, &bfi_image_ct_cna,
- &bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
- return bfi_image_ct_cna;
+ if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
+ if (bfi_image_ct2_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_ct2,
+ &bfi_image_ct2_size, BFAD_FW_FILE_CT2);
+ return bfi_image_ct2;
+ } else if (bfa_asic_id_ct(pdev->device)) {
+ if (bfi_image_ct_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_ct,
+ &bfi_image_ct_size, BFAD_FW_FILE_CT);
+ return bfi_image_ct;
} else {
- if (bfi_image_cb_fc_size == 0)
- bfad_read_firmware(pdev, &bfi_image_cb_fc,
- &bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
- return bfi_image_cb_fc;
+ if (bfi_image_cb_size == 0)
+ bfad_read_firmware(pdev, &bfi_image_cb,
+ &bfi_image_cb_size, BFAD_FW_FILE_CB);
+ return bfi_image_cb;
}
}
static void
bfad_free_fwimg(void)
{
- if (bfi_image_ct_fc_size && bfi_image_ct_fc)
- vfree(bfi_image_ct_fc);
- if (bfi_image_ct_cna_size && bfi_image_ct_cna)
- vfree(bfi_image_ct_cna);
- if (bfi_image_cb_fc_size && bfi_image_cb_fc)
- vfree(bfi_image_cb_fc);
+ if (bfi_image_ct2_size && bfi_image_ct2)
+ vfree(bfi_image_ct2);
+ if (bfi_image_ct_size && bfi_image_ct)
+ vfree(bfi_image_ct);
+ if (bfi_image_cb_size && bfi_image_cb)
+ vfree(bfi_image_cb);
}
module_init(bfad_init);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index a94ea423543..9d95844ab46 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -218,6 +218,9 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
case BFA_PORT_SPEED_10GBPS:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
+ case BFA_PORT_SPEED_16GBPS:
+ fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
+ break;
case BFA_PORT_SPEED_8GBPS:
fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
break;
@@ -580,6 +583,8 @@ struct fc_function_template bfad_im_fc_function_template = {
.vport_create = bfad_im_vport_create,
.vport_delete = bfad_im_vport_delete,
.vport_disable = bfad_im_vport_disable,
+ .bsg_request = bfad_im_bsg_request,
+ .bsg_timeout = bfad_im_bsg_timeout,
};
struct fc_function_template bfad_im_vport_fc_function_template = {
@@ -674,8 +679,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
struct bfad_s *bfad = im_port->bfad;
char model[BFA_ADAPTER_MODEL_NAME_LEN];
char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+ int nports = 0;
bfa_get_adapter_model(&bfad->bfa, model);
+ nports = bfa_get_nports(&bfad->bfa);
if (!strcmp(model, "Brocade-425"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 4Gbps PCIe dual port FC HBA");
@@ -684,10 +691,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
"Brocade 8Gbps PCIe dual port FC HBA");
else if (!strcmp(model, "Brocade-42B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
- "HP 4Gbps PCIe dual port FC HBA");
+ "Brocade 4Gbps PCIe dual port FC HBA for HP");
else if (!strcmp(model, "Brocade-82B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
- "HP 8Gbps PCIe dual port FC HBA");
+ "Brocade 8Gbps PCIe dual port FC HBA for HP");
else if (!strcmp(model, "Brocade-1010"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 10Gbps single port CNA");
@@ -696,7 +703,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
"Brocade 10Gbps dual port CNA");
else if (!strcmp(model, "Brocade-1007"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
- "Brocade 10Gbps CNA");
+ "Brocade 10Gbps CNA for IBM Blade Center");
else if (!strcmp(model, "Brocade-415"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Brocade 4Gbps PCIe single port FC HBA");
@@ -705,17 +712,45 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
"Brocade 8Gbps PCIe single port FC HBA");
else if (!strcmp(model, "Brocade-41B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
- "HP 4Gbps PCIe single port FC HBA");
+ "Brocade 4Gbps PCIe single port FC HBA for HP");
else if (!strcmp(model, "Brocade-81B"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
- "HP 8Gbps PCIe single port FC HBA");
+ "Brocade 8Gbps PCIe single port FC HBA for HP");
else if (!strcmp(model, "Brocade-804"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
- "HP Bladesystem C-class 8Gbps FC HBA");
- else if (!strcmp(model, "Brocade-902"))
+ "Brocade 8Gbps FC HBA for HP Bladesystem C-class");
+ else if (!strcmp(model, "Brocade-902") ||
+ !strcmp(model, "Brocade-1741"))
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
- "Brocade 10Gbps CNA");
- else
+ "Brocade 10Gbps CNA for Dell M-Series Blade Servers");
+ else if (strstr(model, "Brocade-1560")) {
+ if (nports == 1)
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 16Gbps PCIe single port FC HBA");
+ else
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 16Gbps PCIe dual port FC HBA");
+ } else if (strstr(model, "Brocade-1710")) {
+ if (nports == 1)
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 10Gbps single port CNA");
+ else
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 10Gbps dual port CNA");
+ } else if (strstr(model, "Brocade-1860")) {
+ if (nports == 1 && bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 10Gbps single port CNA");
+ else if (nports == 1 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 16Gbps PCIe single port FC HBA");
+ else if (nports == 2 && bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 10Gbps dual port CNA");
+ else if (nports == 2 && !bfa_ioc_is_cna(&bfad->bfa.ioc))
+ snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+ "Brocade 16Gbps PCIe dual port FC HBA");
+ } else
snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
"Invalid Model");
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
new file mode 100644
index 00000000000..06fc00caeb4
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -0,0 +1,3235 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfad_bsg.h"
+
+BFA_TRC_FILE(LDRV, BSG);
+
+int
+bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ /* If IOC is not in disabled state - return */
+ if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_IOC_FAILURE;
+ return rc;
+ }
+
+ init_completion(&bfad->enable_comp);
+ bfa_iocfc_enable(&bfad->bfa);
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->enable_comp);
+
+ return rc;
+}
+
+int
+bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfad->disable_active) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return -EBUSY;
+ }
+
+ bfad->disable_active = BFA_TRUE;
+ init_completion(&bfad->disable_comp);
+ bfa_iocfc_disable(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ wait_for_completion(&bfad->disable_comp);
+ bfad->disable_active = BFA_FALSE;
+ iocmd->status = BFA_STATUS_OK;
+
+ return rc;
+}
+
+static int
+bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
+{
+ int i;
+ struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
+ struct bfad_im_port_s *im_port;
+ struct bfa_port_attr_s pattr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcport_get_attr(&bfad->bfa, &pattr);
+ iocmd->nwwn = pattr.nwwn;
+ iocmd->pwwn = pattr.pwwn;
+ iocmd->ioc_type = bfa_get_type(&bfad->bfa);
+ iocmd->mac = bfa_get_mac(&bfad->bfa);
+ iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
+ bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
+ iocmd->factorynwwn = pattr.factorynwwn;
+ iocmd->factorypwwn = pattr.factorypwwn;
+ iocmd->bfad_num = bfad->inst_no;
+ im_port = bfad->pport.im_port;
+ iocmd->host = im_port->shost->host_no;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ strcpy(iocmd->name, bfad->adapter_name);
+ strcpy(iocmd->port_name, bfad->port_name);
+ strcpy(iocmd->hwpath, bfad->pci_name);
+
+ /* set adapter hw path */
+ strcpy(iocmd->adapter_hwpath, bfad->pci_name);
+ i = strlen(iocmd->adapter_hwpath) - 1;
+ while (iocmd->adapter_hwpath[i] != '.')
+ i--;
+ iocmd->adapter_hwpath[i] = '\0';
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+static int
+bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* fill in driver attr info */
+ strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
+ strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
+ BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
+ strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
+ iocmd->ioc_attr.adapter_attr.fw_ver);
+ strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
+ iocmd->ioc_attr.adapter_attr.optrom_ver);
+
+ /* copy chip rev info first otherwise it will be overwritten */
+ memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
+ sizeof(bfad->pci_attr.chip_rev));
+ memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
+ sizeof(struct bfa_ioc_pci_attr_s));
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
+
+ bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_ioc_fwstats_s *iocmd =
+ (struct bfa_bsg_ioc_fwstats_s *)cmd;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_ioc_fwstats_s),
+ sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ goto out;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+out:
+ bfa_trc(bfad, 0x6666);
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ if (v_cmd == IOCMD_IOC_RESET_STATS) {
+ bfa_ioc_clear_stats(&bfad->bfa);
+ iocmd->status = BFA_STATUS_OK;
+ } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ }
+
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
+
+ if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
+ strcpy(bfad->adapter_name, iocmd->name);
+ else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
+ strcpy(bfad->port_name, iocmd->name);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
+
+ iocmd->status = BFA_STATUS_OK;
+ bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
+
+ return 0;
+}
+
+int
+bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ return 0;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ return 0;
+}
+
+int
+bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ return 0;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ return 0;
+}
+
+static int
+bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
+ struct bfa_lport_attr_s port_attr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
+ bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
+ iocmd->attr.pid = port_attr.pid;
+ else
+ iocmd->attr.pid = 0;
+
+ iocmd->attr.port_type = port_attr.port_type;
+ iocmd->attr.loopback = port_attr.loopback;
+ iocmd->attr.authfail = port_attr.authfail;
+ strncpy(iocmd->attr.port_symname.symname,
+ port_attr.port_cfg.sym_name.symname,
+ sizeof(port_attr.port_cfg.sym_name.symname));
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_port_stats_s),
+ sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
+ iocmd_bufptr, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ return 0;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ return 0;
+}
+
+int
+bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_PORT_CFG_TOPO)
+ cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CFG_SPEED)
+ cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CFG_ALPA)
+ cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CLR_ALPA)
+ cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
+ (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+ if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
+ fcport->cfg.bb_scn_state = BFA_TRUE;
+ else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
+ fcport->cfg.bb_scn_state = BFA_FALSE;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+static int
+bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_lport_stats_s *iocmd =
+ (struct bfa_bsg_lport_stats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_reset_stats_s *iocmd =
+ (struct bfa_bsg_reset_stats_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_clear_stats(fcs_port);
+ /* clear IO stats from all active itnims */
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
+ continue;
+ bfa_itnim_clear_stats(itnim);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_lport_iostats_s *iocmd =
+ (struct bfa_bsg_lport_iostats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
+ fcs_port->lp_tag);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_lport_get_rports_s *iocmd =
+ (struct bfa_bsg_lport_get_rports_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ unsigned long flags;
+ void *iocmd_bufptr;
+
+ if (iocmd->nrports == 0)
+ return -EINVAL;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_lport_get_rports_s),
+ sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd +
+ sizeof(struct bfa_bsg_lport_get_rports_s);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, 0);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr,
+ &iocmd->nrports);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+static int
+bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_scsi_addr_s *iocmd =
+ (struct bfa_bsg_rport_scsi_addr_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *fcs_itnim;
+ struct bfad_itnim_s *drv_itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_itnim == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ drv_itnim = fcs_itnim->itnim_drv;
+
+ if (drv_itnim && drv_itnim->im_port)
+ iocmd->host = drv_itnim->im_port->shost->host_no;
+ else {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ iocmd->target = drv_itnim->scsi_tgt_id;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->bus = 0;
+ iocmd->lun = 0;
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_stats_s *iocmd =
+ (struct bfa_bsg_rport_stats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
+ sizeof(struct bfa_rport_stats_s));
+ memcpy((void *)&iocmd->stats.hal_stats,
+ (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
+ sizeof(struct bfa_rport_hal_stats_s));
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_reset_stats_s *iocmd =
+ (struct bfa_bsg_rport_reset_stats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ struct bfa_rport_s *rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
+ rport = bfa_fcs_rport_get_halrport(fcs_rport);
+ memset(&rport->stats, 0, sizeof(rport->stats));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_set_speed_s *iocmd =
+ (struct bfa_bsg_rport_set_speed_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ fcs_rport->rpf.assigned_speed = iocmd->speed;
+ /* Set this speed in f/w only if the RPSC speed is not available */
+ if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
+ bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_vport_stats_s *iocmd =
+ (struct bfa_bsg_vport_stats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
+ sizeof(struct bfa_vport_stats_s));
+ memcpy((void *)&iocmd->vport_stats.port_stats,
+ (void *)&fcs_vport->lport.stats,
+ sizeof(struct bfa_lport_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_reset_stats_s *iocmd =
+ (struct bfa_bsg_reset_stats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
+ memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+static int
+bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_fabric_get_lports_s *iocmd =
+ (struct bfa_bsg_fabric_get_lports_s *)cmd;
+ bfa_fcs_vf_t *fcs_vf;
+ uint32_t nports = iocmd->nports;
+ unsigned long flags;
+ void *iocmd_bufptr;
+
+ if (nports == 0) {
+ iocmd->status = BFA_STATUS_EINVAL;
+ goto out;
+ }
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_fabric_get_lports_s),
+ sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ goto out;
+ }
+
+ iocmd_bufptr = (char *)iocmd +
+ sizeof(struct bfa_bsg_fabric_get_lports_s);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+ if (fcs_vf == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+ goto out;
+ }
+ bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->nports = nports;
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ if (cmd == IOCMD_RATELIM_ENABLE)
+ fcport->cfg.ratelimit = BFA_TRUE;
+ else if (cmd == IOCMD_RATELIM_DISABLE)
+ fcport->cfg.ratelimit = BFA_FALSE;
+
+ if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
+ fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+
+ return 0;
+}
+
+int
+bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ /* Auto and speeds greater than the supported speed, are invalid */
+ if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
+ (iocmd->speed > fcport->speed_sup)) {
+ iocmd->status = BFA_STATUS_UNSUPP_SPEED;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+ }
+
+ fcport->cfg.trl_def_speed = iocmd->speed;
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_modstats_s *iocmd =
+ (struct bfa_bsg_fcpim_modstats_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ /* accumulate IO stats from itnim */
+ memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
+ (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_itnim_clear_stats(itnim);
+ }
+ memset(&fcpim->del_itn_stats, 0,
+ sizeof(struct bfa_fcpim_del_itn_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
+ (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
+ sizeof(struct bfa_fcpim_del_itn_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port)
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ else
+ iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
+ iocmd->rpwwn, &iocmd->attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_iostats_s *iocmd =
+ (struct bfa_bsg_itnim_iostats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port) {
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ bfa_trc(bfad, 0);
+ } else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else {
+ iocmd->status = BFA_STATUS_OK;
+ memcpy((void *)&iocmd->iostats, (void *)
+ &(bfa_fcs_itnim_get_halitn(itnim)->stats),
+ sizeof(struct bfa_itnim_iostats_s));
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_reset_stats_s *iocmd =
+ (struct bfa_bsg_rport_reset_stats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (!fcs_port)
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else {
+ iocmd->status = BFA_STATUS_OK;
+ bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
+ bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_itnstats_s *iocmd =
+ (struct bfa_bsg_itnim_itnstats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port) {
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ bfa_trc(bfad, 0);
+ } else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else {
+ iocmd->status = BFA_STATUS_OK;
+ bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
+ &iocmd->itnstats);
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_enable(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_disable(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
+ &iocmd->pcifn_cfg,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
+ &iocmd->pcifn_id, iocmd->port,
+ iocmd->pcifn_class, iocmd->bandwidth,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
+ iocmd->pcifn_id,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
+ iocmd->pcifn_id, iocmd->bandwidth,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ bfa_trc(bfad, iocmd->status);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_adapter_cfg_mode_s *iocmd =
+ (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
+ iocmd->cfg.mode, iocmd->cfg.max_pf,
+ iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_port_cfg_mode_s *iocmd =
+ (struct bfa_bsg_port_cfg_mode_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
+ iocmd->instance, iocmd->cfg.mode,
+ iocmd->cfg.max_pf, iocmd->cfg.max_vf,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
+ iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
+ bfad_hcb_comp, &fcomp);
+ else
+ iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+ struct bfad_hal_comp fcomp;
+
+ init_completion(&fcomp.comp);
+ iocmd->status = BFA_STATUS_OK;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+ struct bfad_hal_comp fcomp;
+
+ init_completion(&fcomp.comp);
+ iocmd->status = BFA_STATUS_OK;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ iocmd->status = BFA_STATUS_OK;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+ struct bfa_bsg_cee_attr_s *iocmd =
+ (struct bfa_bsg_cee_attr_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp cee_comp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_cee_attr_s),
+ sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
+
+ cee_comp.status = 0;
+ init_completion(&cee_comp.comp);
+ mutex_lock(&bfad_mutex);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
+ bfad_hcb_comp, &cee_comp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ mutex_unlock(&bfad_mutex);
+ bfa_trc(bfad, 0x5555);
+ goto out;
+ }
+ wait_for_completion(&cee_comp.comp);
+ mutex_unlock(&bfad_mutex);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_cee_stats_s *iocmd =
+ (struct bfa_bsg_cee_stats_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp cee_comp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_cee_stats_s),
+ sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
+
+ cee_comp.status = 0;
+ init_completion(&cee_comp.comp);
+ mutex_lock(&bfad_mutex);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
+ bfad_hcb_comp, &cee_comp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ mutex_unlock(&bfad_mutex);
+ bfa_trc(bfad, 0x5555);
+ goto out;
+ }
+ wait_for_completion(&cee_comp.comp);
+ mutex_unlock(&bfad_mutex);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ bfa_trc(bfad, 0x5555);
+ return 0;
+}
+
+int
+bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_flash_attr_s *iocmd =
+ (struct bfa_bsg_flash_attr_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+ iocmd->instance, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_flash_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+ iocmd->type, iocmd->instance, iocmd_bufptr,
+ iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_flash_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+ iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_get_temp_s *iocmd =
+ (struct bfa_bsg_diag_get_temp_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
+ &iocmd->result, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_memtest_s *iocmd =
+ (struct bfa_bsg_diag_memtest_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
+ &iocmd->memtest, iocmd->pat,
+ &iocmd->result, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_loopback_s *iocmd =
+ (struct bfa_bsg_diag_loopback_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
+ iocmd->speed, iocmd->lpcnt, iocmd->pat,
+ &iocmd->result, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_fwping_s *iocmd =
+ (struct bfa_bsg_diag_fwping_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
+ iocmd->pattern, &iocmd->result,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ bfa_trc(bfad, 0x77771);
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
+ iocmd->queue, &iocmd->result,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_sfp_show_s *iocmd =
+ (struct bfa_bsg_sfp_show_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ bfa_trc(bfad, iocmd->status);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
+ &iocmd->ledtest);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_beacon_s *iocmd =
+ (struct bfa_bsg_diag_beacon_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
+ iocmd->beacon, iocmd->link_e2e_beacon,
+ iocmd->second);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_lb_stat_s *iocmd =
+ (struct bfa_bsg_diag_lb_stat_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+
+ return 0;
+}
+
+int
+bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_phy_attr_s *iocmd =
+ (struct bfa_bsg_phy_attr_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
+ &iocmd->attr, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_phy_stats_s *iocmd =
+ (struct bfa_bsg_phy_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
+ &iocmd->stats, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+ struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_phy_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
+ iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+ 0, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_vhba_attr_s *iocmd =
+ (struct bfa_bsg_vhba_attr_s *)cmd;
+ struct bfa_vhba_attr_s *attr = &iocmd->attr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ attr->pwwn = bfad->bfa.ioc.attr->pwwn;
+ attr->nwwn = bfad->bfa.ioc.attr->nwwn;
+ attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
+ attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
+ attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+ struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_phy_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
+ iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+ 0, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+ void *iocmd_bufptr;
+
+ if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
+ bfa_trc(bfad, sizeof(struct bfa_plog_s));
+ iocmd->status = BFA_STATUS_EINVAL;
+ goto out;
+ }
+
+ iocmd->status = BFA_STATUS_OK;
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+ memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
+out:
+ return 0;
+}
+
+#define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
+int
+bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
+ BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
+ !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
+ !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
+ bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
+ iocmd->status = BFA_STATUS_EINVAL;
+ goto out;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
+ (u32 *)&iocmd->offset, &iocmd->bufsz);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
+ bfad->plog_buf.head = bfad->plog_buf.tail = 0;
+ else if (v_cmd == IOCMD_DEBUG_START_DTRC)
+ bfa_trc_init(bfad->trcmod);
+ else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
+ bfa_trc_stop(bfad->trcmod);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
+
+ if (iocmd->ctl == BFA_TRUE)
+ bfad->plog_buf.plog_enabled = 1;
+ else
+ bfad->plog_buf.plog_enabled = 0;
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_fcpim_profile_s *iocmd =
+ (struct bfa_bsg_fcpim_profile_s *)cmd;
+ struct timeval tv;
+ unsigned long flags;
+
+ do_gettimeofday(&tv);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
+ iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
+ else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
+ iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_ioprofile_s *iocmd =
+ (struct bfa_bsg_itnim_ioprofile_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port)
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else
+ iocmd->status = bfa_itnim_get_ioprofile(
+ bfa_fcs_itnim_get_halitn(itnim),
+ &iocmd->ioprofile);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcport_stats_s *iocmd =
+ (struct bfa_bsg_fcport_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, &iocmd->stats);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+ &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+ &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
+ struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
+ pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
+ pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
+ memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_PXECFG,
+ bfad->bfa.ioc.port_id, &iocmd->cfg,
+ sizeof(struct bfa_ethboot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_PXECFG,
+ bfad->bfa.ioc.port_id, &iocmd->cfg,
+ sizeof(struct bfa_ethboot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ if (v_cmd == IOCMD_TRUNK_ENABLE) {
+ trunk->attr.state = BFA_TRUNK_OFFLINE;
+ bfa_fcport_disable(&bfad->bfa);
+ fcport->cfg.trunked = BFA_TRUE;
+ } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
+ trunk->attr.state = BFA_TRUNK_DISABLED;
+ bfa_fcport_disable(&bfad->bfa);
+ fcport->cfg.trunked = BFA_FALSE;
+ }
+
+ if (!bfa_fcport_is_disabled(&bfad->bfa))
+ bfa_fcport_enable(&bfad->bfa);
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
+ sizeof(struct bfa_trunk_attr_s));
+ iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+ if (v_cmd == IOCMD_QOS_ENABLE)
+ fcport->cfg.qos_enabled = BFA_TRUE;
+ else if (v_cmd == IOCMD_QOS_DISABLE)
+ fcport->cfg.qos_enabled = BFA_FALSE;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->attr.state = fcport->qos_attr.state;
+ iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_qos_vc_attr_s *iocmd =
+ (struct bfa_bsg_qos_vc_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
+ unsigned long flags;
+ u32 i = 0;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
+ iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
+ iocmd->attr.elp_opmode_flags =
+ be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
+
+ /* Individual VC info */
+ while (i < iocmd->attr.total_vc_count) {
+ iocmd->attr.vc_info[i].vc_credit =
+ bfa_vc_attr->vc_info[i].vc_credit;
+ iocmd->attr.vc_info[i].borrow_credit =
+ bfa_vc_attr->vc_info[i].borrow_credit;
+ iocmd->attr.vc_info[i].priority =
+ bfa_vc_attr->vc_info[i].priority;
+ i++;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcport_stats_s *iocmd =
+ (struct bfa_bsg_fcport_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, &iocmd->stats);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+ iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, NULL);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+ iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_vf_stats_s *iocmd =
+ (struct bfa_bsg_vf_stats_s *)cmd;
+ struct bfa_fcs_fabric_s *fcs_vf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+ if (fcs_vf == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+ goto out;
+ }
+ memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
+ sizeof(struct bfa_vf_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_vf_reset_stats_s *iocmd =
+ (struct bfa_bsg_vf_reset_stats_s *)cmd;
+ struct bfa_fcs_fabric_s *fcs_vf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+ if (fcs_vf == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+ goto out;
+ }
+ memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+ iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
+ else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+ iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
+ else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+ iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
+ (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
+ struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_fcpim_lunmask_s *iocmd =
+ (struct bfa_bsg_fcpim_lunmask_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
+ iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
+ &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
+ else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
+ iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
+ iocmd->vf_id, &iocmd->pwwn,
+ iocmd->rpwwn, iocmd->lun);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+static int
+bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
+ unsigned int payload_len)
+{
+ int rc = -EINVAL;
+
+ switch (cmd) {
+ case IOCMD_IOC_ENABLE:
+ rc = bfad_iocmd_ioc_enable(bfad, iocmd);
+ break;
+ case IOCMD_IOC_DISABLE:
+ rc = bfad_iocmd_ioc_disable(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_INFO:
+ rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_ATTR:
+ rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_STATS:
+ rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_FWSTATS:
+ rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_IOC_RESET_STATS:
+ case IOCMD_IOC_RESET_FWSTATS:
+ rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
+ break;
+ case IOCMD_IOC_SET_ADAPTER_NAME:
+ case IOCMD_IOC_SET_PORT_NAME:
+ rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
+ break;
+ case IOCMD_IOCFC_GET_ATTR:
+ rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_IOCFC_SET_INTR:
+ rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
+ break;
+ case IOCMD_PORT_ENABLE:
+ rc = bfad_iocmd_port_enable(bfad, iocmd);
+ break;
+ case IOCMD_PORT_DISABLE:
+ rc = bfad_iocmd_port_disable(bfad, iocmd);
+ break;
+ case IOCMD_PORT_GET_ATTR:
+ rc = bfad_iocmd_port_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_PORT_GET_STATS:
+ rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_PORT_RESET_STATS:
+ rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_PORT_CFG_TOPO:
+ case IOCMD_PORT_CFG_SPEED:
+ case IOCMD_PORT_CFG_ALPA:
+ case IOCMD_PORT_CLR_ALPA:
+ rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
+ break;
+ case IOCMD_PORT_CFG_MAXFRSZ:
+ rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
+ break;
+ case IOCMD_PORT_BBSC_ENABLE:
+ case IOCMD_PORT_BBSC_DISABLE:
+ rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
+ break;
+ case IOCMD_LPORT_GET_ATTR:
+ rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_GET_STATS:
+ rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_RESET_STATS:
+ rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_GET_IOSTATS:
+ rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_GET_RPORTS:
+ rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_RPORT_GET_ATTR:
+ rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_GET_ADDR:
+ rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_GET_STATS:
+ rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_RESET_STATS:
+ rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_SET_SPEED:
+ rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_GET_ATTR:
+ rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_GET_STATS:
+ rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_RESET_STATS:
+ rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
+ break;
+ case IOCMD_FABRIC_GET_LPORTS:
+ rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_RATELIM_ENABLE:
+ case IOCMD_RATELIM_DISABLE:
+ rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
+ break;
+ case IOCMD_RATELIM_DEF_SPEED:
+ rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
+ break;
+ case IOCMD_FCPIM_FAILOVER:
+ rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_MODSTATS:
+ rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_MODSTATSCLR:
+ rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_DEL_ITN_STATS:
+ rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_GET_ATTR:
+ rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_GET_IOSTATS:
+ rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_RESET_STATS:
+ rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_GET_ITNSTATS:
+ rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_ENABLE:
+ rc = bfad_iocmd_fcport_enable(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_DISABLE:
+ rc = bfad_iocmd_fcport_disable(bfad, iocmd);
+ break;
+ case IOCMD_IOC_PCIFN_CFG:
+ rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
+ break;
+ case IOCMD_PCIFN_CREATE:
+ rc = bfad_iocmd_pcifn_create(bfad, iocmd);
+ break;
+ case IOCMD_PCIFN_DELETE:
+ rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
+ break;
+ case IOCMD_PCIFN_BW:
+ rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
+ break;
+ case IOCMD_ADAPTER_CFG_MODE:
+ rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
+ break;
+ case IOCMD_PORT_CFG_MODE:
+ rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_ENABLE_OPTROM:
+ case IOCMD_FLASH_DISABLE_OPTROM:
+ rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
+ break;
+ case IOCMD_FAA_ENABLE:
+ rc = bfad_iocmd_faa_enable(bfad, iocmd);
+ break;
+ case IOCMD_FAA_DISABLE:
+ rc = bfad_iocmd_faa_disable(bfad, iocmd);
+ break;
+ case IOCMD_FAA_QUERY:
+ rc = bfad_iocmd_faa_query(bfad, iocmd);
+ break;
+ case IOCMD_CEE_GET_ATTR:
+ rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_CEE_GET_STATS:
+ rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_CEE_RESET_STATS:
+ rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_SFP_MEDIA:
+ rc = bfad_iocmd_sfp_media(bfad, iocmd);
+ break;
+ case IOCMD_SFP_SPEED:
+ rc = bfad_iocmd_sfp_speed(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_GET_ATTR:
+ rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_ERASE_PART:
+ rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_UPDATE_PART:
+ rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_FLASH_READ_PART:
+ rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_DIAG_TEMP:
+ rc = bfad_iocmd_diag_temp(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_MEMTEST:
+ rc = bfad_iocmd_diag_memtest(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_LOOPBACK:
+ rc = bfad_iocmd_diag_loopback(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_FWPING:
+ rc = bfad_iocmd_diag_fwping(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_QUEUETEST:
+ rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_SFP:
+ rc = bfad_iocmd_diag_sfp(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_LED:
+ rc = bfad_iocmd_diag_led(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_BEACON_LPORT:
+ rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_LB_STAT:
+ rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
+ break;
+ case IOCMD_PHY_GET_ATTR:
+ rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_PHY_GET_STATS:
+ rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_PHY_UPDATE_FW:
+ rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_PHY_READ_FW:
+ rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_VHBA_QUERY:
+ rc = bfad_iocmd_vhba_query(bfad, iocmd);
+ break;
+ case IOCMD_DEBUG_PORTLOG:
+ rc = bfad_iocmd_porglog_get(bfad, iocmd);
+ break;
+ case IOCMD_DEBUG_FW_CORE:
+ rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_DEBUG_FW_STATE_CLR:
+ case IOCMD_DEBUG_PORTLOG_CLR:
+ case IOCMD_DEBUG_START_DTRC:
+ case IOCMD_DEBUG_STOP_DTRC:
+ rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
+ break;
+ case IOCMD_DEBUG_PORTLOG_CTL:
+ rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_PROFILE_ON:
+ case IOCMD_FCPIM_PROFILE_OFF:
+ rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
+ break;
+ case IOCMD_ITNIM_GET_IOPROFILE:
+ rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_GET_STATS:
+ rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_RESET_STATS:
+ rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_BOOT_CFG:
+ rc = bfad_iocmd_boot_cfg(bfad, iocmd);
+ break;
+ case IOCMD_BOOT_QUERY:
+ rc = bfad_iocmd_boot_query(bfad, iocmd);
+ break;
+ case IOCMD_PREBOOT_QUERY:
+ rc = bfad_iocmd_preboot_query(bfad, iocmd);
+ break;
+ case IOCMD_ETHBOOT_CFG:
+ rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
+ break;
+ case IOCMD_ETHBOOT_QUERY:
+ rc = bfad_iocmd_ethboot_query(bfad, iocmd);
+ break;
+ case IOCMD_TRUNK_ENABLE:
+ case IOCMD_TRUNK_DISABLE:
+ rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
+ break;
+ case IOCMD_TRUNK_GET_ATTR:
+ rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_ENABLE:
+ case IOCMD_QOS_DISABLE:
+ rc = bfad_iocmd_qos(bfad, iocmd, cmd);
+ break;
+ case IOCMD_QOS_GET_ATTR:
+ rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_GET_VC_ATTR:
+ rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_GET_STATS:
+ rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_QOS_RESET_STATS:
+ rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_VF_GET_STATS:
+ rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_VF_RESET_STATS:
+ rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_ENABLE:
+ case IOCMD_FCPIM_LUNMASK_DISABLE:
+ case IOCMD_FCPIM_LUNMASK_CLEAR:
+ rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_QUERY:
+ rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_ADD:
+ case IOCMD_FCPIM_LUNMASK_DELETE:
+ rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static int
+bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
+{
+ uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ void *payload_kbuf;
+ int rc = -EINVAL;
+
+ /* Allocate a temp buffer to hold the passed in user space command */
+ payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+ if (!payload_kbuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, payload_kbuf,
+ job->request_payload.payload_len);
+
+ /* Invoke IOCMD handler - to handle all the vendor command requests */
+ rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
+ job->request_payload.payload_len);
+ if (rc != BFA_STATUS_OK)
+ goto error;
+
+ /* Copy the response data to the job->reply_payload sg_list */
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ payload_kbuf,
+ job->reply_payload.payload_len);
+
+ /* free the command buffer */
+ kfree(payload_kbuf);
+
+ /* Fill the BSG job reply data */
+ job->reply_len = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+ job->reply->result = rc;
+
+ job->job_done(job);
+ return rc;
+error:
+ /* free the command buffer */
+ kfree(payload_kbuf);
+out:
+ job->reply->result = rc;
+ job->reply_len = sizeof(uint32_t);
+ job->reply->reply_payload_rcv_len = 0;
+ return rc;
+}
+
+/* FC passthru call backs */
+u64
+bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+ u64 addr;
+
+ sge = drv_fcxp->req_sge + sgeid;
+ addr = (u64)(size_t) sge->sg_addr;
+ return addr;
+}
+
+u32
+bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+
+ sge = drv_fcxp->req_sge + sgeid;
+ return sge->sg_len;
+}
+
+u64
+bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+ u64 addr;
+
+ sge = drv_fcxp->rsp_sge + sgeid;
+ addr = (u64)(size_t) sge->sg_addr;
+ return addr;
+}
+
+u32
+bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+
+ sge = drv_fcxp->rsp_sge + sgeid;
+ return sge->sg_len;
+}
+
+void
+bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+
+ drv_fcxp->req_status = req_status;
+ drv_fcxp->rsp_len = rsp_len;
+
+ /* bfa_fcxp will be automatically freed by BFA */
+ drv_fcxp->bfa_fcxp = NULL;
+ complete(&drv_fcxp->comp);
+}
+
+struct bfad_buf_info *
+bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
+ uint32_t payload_len, uint32_t *num_sgles)
+{
+ struct bfad_buf_info *buf_base, *buf_info;
+ struct bfa_sge_s *sg_table;
+ int sge_num = 1;
+
+ buf_base = kzalloc((sizeof(struct bfad_buf_info) +
+ sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
+ if (!buf_base)
+ return NULL;
+
+ sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
+ (sizeof(struct bfad_buf_info) * sge_num));
+
+ /* Allocate dma coherent memory */
+ buf_info = buf_base;
+ buf_info->size = payload_len;
+ buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
+ &buf_info->phys, GFP_KERNEL);
+ if (!buf_info->virt)
+ goto out_free_mem;
+
+ /* copy the linear bsg buffer to buf_info */
+ memset(buf_info->virt, 0, buf_info->size);
+ memcpy(buf_info->virt, payload_kbuf, buf_info->size);
+
+ /*
+ * Setup SG table
+ */
+ sg_table->sg_len = buf_info->size;
+ sg_table->sg_addr = (void *)(size_t) buf_info->phys;
+
+ *num_sgles = sge_num;
+
+ return buf_base;
+
+out_free_mem:
+ kfree(buf_base);
+ return NULL;
+}
+
+void
+bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
+ uint32_t num_sgles)
+{
+ int i;
+ struct bfad_buf_info *buf_info = buf_base;
+
+ if (buf_base) {
+ for (i = 0; i < num_sgles; buf_info++, i++) {
+ if (buf_info->virt != NULL)
+ dma_free_coherent(&bfad->pcidev->dev,
+ buf_info->size, buf_info->virt,
+ buf_info->phys);
+ }
+ kfree(buf_base);
+ }
+}
+
+int
+bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
+ bfa_bsg_fcpt_t *bsg_fcpt)
+{
+ struct bfa_fcxp_s *hal_fcxp;
+ struct bfad_s *bfad = drv_fcxp->port->bfad;
+ unsigned long flags;
+ uint8_t lp_tag;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ /* Allocate bfa_fcxp structure */
+ hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
+ drv_fcxp->num_req_sgles,
+ drv_fcxp->num_rsp_sgles,
+ bfad_fcxp_get_req_sgaddr_cb,
+ bfad_fcxp_get_req_sglen_cb,
+ bfad_fcxp_get_rsp_sgaddr_cb,
+ bfad_fcxp_get_rsp_sglen_cb);
+ if (!hal_fcxp) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return BFA_STATUS_ENOMEM;
+ }
+
+ drv_fcxp->bfa_fcxp = hal_fcxp;
+
+ lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
+
+ bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
+ bsg_fcpt->cts, bsg_fcpt->cos,
+ job->request_payload.payload_len,
+ &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
+ job->reply_payload.payload_len, bsg_fcpt->tsecs);
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return BFA_STATUS_OK;
+}
+
+int
+bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
+{
+ struct bfa_bsg_data *bsg_data;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ bfa_bsg_fcpt_t *bsg_fcpt;
+ struct bfad_fcxp *drv_fcxp;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ uint32_t command_type = job->request->msgcode;
+ unsigned long flags;
+ struct bfad_buf_info *rsp_buf_info;
+ void *req_kbuf = NULL, *rsp_kbuf = NULL;
+ int rc = -EINVAL;
+
+ job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
+ job->reply->reply_payload_rcv_len = 0;
+
+ /* Get the payload passed in from userspace */
+ bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
+ sizeof(struct fc_bsg_request));
+ if (bsg_data == NULL)
+ goto out;
+
+ /*
+ * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
+ * buffer of size bsg_data->payload_len
+ */
+ bsg_fcpt = (struct bfa_bsg_fcpt_s *)
+ kzalloc(bsg_data->payload_len, GFP_KERNEL);
+ if (!bsg_fcpt)
+ goto out;
+
+ if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
+ bsg_data->payload_len)) {
+ kfree(bsg_fcpt);
+ goto out;
+ }
+
+ drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
+ if (drv_fcxp == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
+ bsg_fcpt->lpwwn);
+ if (fcs_port == NULL) {
+ bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ /* Check if the port is online before sending FC Passthru cmd */
+ if (!bfa_fcs_lport_is_online(fcs_port)) {
+ bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ drv_fcxp->port = fcs_port->bfad_port;
+
+ if (drv_fcxp->port->bfad == 0)
+ drv_fcxp->port->bfad = bfad;
+
+ /* Fetch the bfa_rport - if nexus needed */
+ if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
+ command_type == FC_BSG_HST_CT) {
+ /* BSG HST commands: no nexus needed */
+ drv_fcxp->bfa_rport = NULL;
+
+ } else if (command_type == FC_BSG_RPT_ELS ||
+ command_type == FC_BSG_RPT_CT) {
+ /* BSG RPT commands: nexus needed */
+ fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
+ bsg_fcpt->dpwwn);
+ if (fcs_rport == NULL) {
+ bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
+
+ } else { /* Unknown BSG msgcode; return -EINVAL */
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* allocate memory for req / rsp buffers */
+ req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+ if (!req_kbuf) {
+ printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
+ if (!rsp_kbuf) {
+ printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ /* map req sg - copy the sg_list passed in to the linear buffer */
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, req_kbuf,
+ job->request_payload.payload_len);
+
+ drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
+ job->request_payload.payload_len,
+ &drv_fcxp->num_req_sgles);
+ if (!drv_fcxp->reqbuf_info) {
+ printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ drv_fcxp->req_sge = (struct bfa_sge_s *)
+ (((uint8_t *)drv_fcxp->reqbuf_info) +
+ (sizeof(struct bfad_buf_info) *
+ drv_fcxp->num_req_sgles));
+
+ /* map rsp sg */
+ drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
+ job->reply_payload.payload_len,
+ &drv_fcxp->num_rsp_sgles);
+ if (!drv_fcxp->rspbuf_info) {
+ printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
+ drv_fcxp->rsp_sge = (struct bfa_sge_s *)
+ (((uint8_t *)drv_fcxp->rspbuf_info) +
+ (sizeof(struct bfad_buf_info) *
+ drv_fcxp->num_rsp_sgles));
+
+ /* fcxp send */
+ init_completion(&drv_fcxp->comp);
+ rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
+ if (rc == BFA_STATUS_OK) {
+ wait_for_completion(&drv_fcxp->comp);
+ bsg_fcpt->status = drv_fcxp->req_status;
+ } else {
+ bsg_fcpt->status = rc;
+ goto out_free_mem;
+ }
+
+ /* fill the job->reply data */
+ if (drv_fcxp->req_status == BFA_STATUS_OK) {
+ job->reply_len = drv_fcxp->rsp_len;
+ job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
+ job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ } else {
+ job->reply->reply_payload_rcv_len =
+ sizeof(struct fc_bsg_ctels_reply);
+ job->reply_len = sizeof(uint32_t);
+ job->reply->reply_data.ctels_reply.status =
+ FC_CTELS_STATUS_REJECT;
+ }
+
+ /* Copy the response data to the reply_payload sg list */
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ (uint8_t *)rsp_buf_info->virt,
+ job->reply_payload.payload_len);
+
+out_free_mem:
+ bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
+ drv_fcxp->num_rsp_sgles);
+ bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
+ drv_fcxp->num_req_sgles);
+ kfree(req_kbuf);
+ kfree(rsp_kbuf);
+
+ /* Need a copy to user op */
+ if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
+ bsg_data->payload_len))
+ rc = -EIO;
+
+ kfree(bsg_fcpt);
+ kfree(drv_fcxp);
+out:
+ job->reply->result = rc;
+
+ if (rc == BFA_STATUS_OK)
+ job->job_done(job);
+
+ return rc;
+}
+
+int
+bfad_im_bsg_request(struct fc_bsg_job *job)
+{
+ uint32_t rc = BFA_STATUS_OK;
+
+ switch (job->request->msgcode) {
+ case FC_BSG_HST_VENDOR:
+ /* Process BSG HST Vendor requests */
+ rc = bfad_im_bsg_vendor_request(job);
+ break;
+ case FC_BSG_HST_ELS_NOLOGIN:
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_CT:
+ case FC_BSG_RPT_CT:
+ /* Process BSG ELS/CT commands */
+ rc = bfad_im_bsg_els_ct_request(job);
+ break;
+ default:
+ job->reply->result = rc = -EINVAL;
+ job->reply->reply_payload_rcv_len = 0;
+ break;
+ }
+
+ return rc;
+}
+
+int
+bfad_im_bsg_timeout(struct fc_bsg_job *job)
+{
+ /* Don't complete the BSG job request - return -EAGAIN
+ * to reset bsg job timeout : for ELS/CT pass thru we
+ * already have timer to track the request.
+ */
+ return -EAGAIN;
+}
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
new file mode 100644
index 00000000000..e859adb9aa9
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -0,0 +1,746 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef BFAD_BSG_H
+#define BFAD_BSG_H
+
+#include "bfa_defs.h"
+#include "bfa_defs_fcs.h"
+
+/* Definitions of vendor unique structures and command codes passed in
+ * using FC_BSG_HST_VENDOR message code.
+ */
+enum {
+ IOCMD_IOC_ENABLE = 0x1,
+ IOCMD_IOC_DISABLE,
+ IOCMD_IOC_GET_ATTR,
+ IOCMD_IOC_GET_INFO,
+ IOCMD_IOC_GET_STATS,
+ IOCMD_IOC_GET_FWSTATS,
+ IOCMD_IOC_RESET_STATS,
+ IOCMD_IOC_RESET_FWSTATS,
+ IOCMD_IOC_SET_ADAPTER_NAME,
+ IOCMD_IOC_SET_PORT_NAME,
+ IOCMD_IOCFC_GET_ATTR,
+ IOCMD_IOCFC_SET_INTR,
+ IOCMD_PORT_ENABLE,
+ IOCMD_PORT_DISABLE,
+ IOCMD_PORT_GET_ATTR,
+ IOCMD_PORT_GET_STATS,
+ IOCMD_PORT_RESET_STATS,
+ IOCMD_PORT_CFG_TOPO,
+ IOCMD_PORT_CFG_SPEED,
+ IOCMD_PORT_CFG_ALPA,
+ IOCMD_PORT_CFG_MAXFRSZ,
+ IOCMD_PORT_CLR_ALPA,
+ IOCMD_PORT_BBSC_ENABLE,
+ IOCMD_PORT_BBSC_DISABLE,
+ IOCMD_LPORT_GET_ATTR,
+ IOCMD_LPORT_GET_RPORTS,
+ IOCMD_LPORT_GET_STATS,
+ IOCMD_LPORT_RESET_STATS,
+ IOCMD_LPORT_GET_IOSTATS,
+ IOCMD_RPORT_GET_ATTR,
+ IOCMD_RPORT_GET_ADDR,
+ IOCMD_RPORT_GET_STATS,
+ IOCMD_RPORT_RESET_STATS,
+ IOCMD_RPORT_SET_SPEED,
+ IOCMD_VPORT_GET_ATTR,
+ IOCMD_VPORT_GET_STATS,
+ IOCMD_VPORT_RESET_STATS,
+ IOCMD_FABRIC_GET_LPORTS,
+ IOCMD_RATELIM_ENABLE,
+ IOCMD_RATELIM_DISABLE,
+ IOCMD_RATELIM_DEF_SPEED,
+ IOCMD_FCPIM_FAILOVER,
+ IOCMD_FCPIM_MODSTATS,
+ IOCMD_FCPIM_MODSTATSCLR,
+ IOCMD_FCPIM_DEL_ITN_STATS,
+ IOCMD_ITNIM_GET_ATTR,
+ IOCMD_ITNIM_GET_IOSTATS,
+ IOCMD_ITNIM_RESET_STATS,
+ IOCMD_ITNIM_GET_ITNSTATS,
+ IOCMD_IOC_PCIFN_CFG,
+ IOCMD_FCPORT_ENABLE,
+ IOCMD_FCPORT_DISABLE,
+ IOCMD_PCIFN_CREATE,
+ IOCMD_PCIFN_DELETE,
+ IOCMD_PCIFN_BW,
+ IOCMD_ADAPTER_CFG_MODE,
+ IOCMD_PORT_CFG_MODE,
+ IOCMD_FLASH_ENABLE_OPTROM,
+ IOCMD_FLASH_DISABLE_OPTROM,
+ IOCMD_FAA_ENABLE,
+ IOCMD_FAA_DISABLE,
+ IOCMD_FAA_QUERY,
+ IOCMD_CEE_GET_ATTR,
+ IOCMD_CEE_GET_STATS,
+ IOCMD_CEE_RESET_STATS,
+ IOCMD_SFP_MEDIA,
+ IOCMD_SFP_SPEED,
+ IOCMD_FLASH_GET_ATTR,
+ IOCMD_FLASH_ERASE_PART,
+ IOCMD_FLASH_UPDATE_PART,
+ IOCMD_FLASH_READ_PART,
+ IOCMD_DIAG_TEMP,
+ IOCMD_DIAG_MEMTEST,
+ IOCMD_DIAG_LOOPBACK,
+ IOCMD_DIAG_FWPING,
+ IOCMD_DIAG_QUEUETEST,
+ IOCMD_DIAG_SFP,
+ IOCMD_DIAG_LED,
+ IOCMD_DIAG_BEACON_LPORT,
+ IOCMD_DIAG_LB_STAT,
+ IOCMD_PHY_GET_ATTR,
+ IOCMD_PHY_GET_STATS,
+ IOCMD_PHY_UPDATE_FW,
+ IOCMD_PHY_READ_FW,
+ IOCMD_VHBA_QUERY,
+ IOCMD_DEBUG_PORTLOG,
+ IOCMD_DEBUG_FW_CORE,
+ IOCMD_DEBUG_FW_STATE_CLR,
+ IOCMD_DEBUG_PORTLOG_CLR,
+ IOCMD_DEBUG_START_DTRC,
+ IOCMD_DEBUG_STOP_DTRC,
+ IOCMD_DEBUG_PORTLOG_CTL,
+ IOCMD_FCPIM_PROFILE_ON,
+ IOCMD_FCPIM_PROFILE_OFF,
+ IOCMD_ITNIM_GET_IOPROFILE,
+ IOCMD_FCPORT_GET_STATS,
+ IOCMD_FCPORT_RESET_STATS,
+ IOCMD_BOOT_CFG,
+ IOCMD_BOOT_QUERY,
+ IOCMD_PREBOOT_QUERY,
+ IOCMD_ETHBOOT_CFG,
+ IOCMD_ETHBOOT_QUERY,
+ IOCMD_TRUNK_ENABLE,
+ IOCMD_TRUNK_DISABLE,
+ IOCMD_TRUNK_GET_ATTR,
+ IOCMD_QOS_ENABLE,
+ IOCMD_QOS_DISABLE,
+ IOCMD_QOS_GET_ATTR,
+ IOCMD_QOS_GET_VC_ATTR,
+ IOCMD_QOS_GET_STATS,
+ IOCMD_QOS_RESET_STATS,
+ IOCMD_VF_GET_STATS,
+ IOCMD_VF_RESET_STATS,
+ IOCMD_FCPIM_LUNMASK_ENABLE,
+ IOCMD_FCPIM_LUNMASK_DISABLE,
+ IOCMD_FCPIM_LUNMASK_CLEAR,
+ IOCMD_FCPIM_LUNMASK_QUERY,
+ IOCMD_FCPIM_LUNMASK_ADD,
+ IOCMD_FCPIM_LUNMASK_DELETE,
+};
+
+struct bfa_bsg_gen_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+};
+
+struct bfa_bsg_portlogctl_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ bfa_boolean_t ctl;
+ int inst_no;
+};
+
+struct bfa_bsg_fcpim_profile_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+};
+
+struct bfa_bsg_itnim_ioprofile_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_ioprofile_s ioprofile;
+};
+
+struct bfa_bsg_fcport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ union bfa_fcport_stats_u stats;
+};
+
+struct bfa_bsg_ioc_name_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ char name[BFA_ADAPTER_SYM_NAME_LEN];
+};
+
+struct bfa_bsg_ioc_info_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ char serialnum[64];
+ char hwpath[BFA_STRING_32];
+ char adapter_hwpath[BFA_STRING_32];
+ char guid[BFA_ADAPTER_SYM_NAME_LEN*2];
+ char name[BFA_ADAPTER_SYM_NAME_LEN];
+ char port_name[BFA_ADAPTER_SYM_NAME_LEN];
+ char eth_name[BFA_ADAPTER_SYM_NAME_LEN];
+ wwn_t pwwn;
+ wwn_t nwwn;
+ wwn_t factorypwwn;
+ wwn_t factorynwwn;
+ mac_t mac;
+ mac_t factory_mac; /* Factory mac address */
+ mac_t current_mac; /* Currently assigned mac address */
+ enum bfa_ioc_type_e ioc_type;
+ u16 pvid; /* Port vlan id */
+ u16 rsvd1;
+ u32 host;
+ u32 bandwidth; /* For PF support */
+ u32 rsvd2;
+};
+
+struct bfa_bsg_ioc_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ioc_attr_s ioc_attr;
+};
+
+struct bfa_bsg_ioc_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ioc_stats_s ioc_stats;
+};
+
+struct bfa_bsg_ioc_fwstats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_iocfc_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_iocfc_attr_s iocfc_attr;
+};
+
+struct bfa_bsg_iocfc_intr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_iocfc_intr_attr_s attr;
+};
+
+struct bfa_bsg_port_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_port_attr_s attr;
+};
+
+struct bfa_bsg_port_cfg_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 param;
+ u32 rsvd1;
+};
+
+struct bfa_bsg_port_cfg_maxfrsize_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 maxfrsize;
+};
+
+struct bfa_bsg_port_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_lport_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ struct bfa_lport_attr_s port_attr;
+};
+
+struct bfa_bsg_lport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ struct bfa_lport_stats_s port_stats;
+};
+
+struct bfa_bsg_lport_iostats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_lport_get_rports_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ u64 rbuf_ptr;
+ u32 nrports;
+ u32 rsvd;
+};
+
+struct bfa_bsg_rport_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ struct bfa_rport_attr_s attr;
+};
+
+struct bfa_bsg_rport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ struct bfa_rport_stats_s stats;
+};
+
+struct bfa_bsg_rport_scsi_addr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ u32 host;
+ u32 bus;
+ u32 target;
+ u32 lun;
+};
+
+struct bfa_bsg_rport_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+};
+
+struct bfa_bsg_rport_set_speed_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ enum bfa_port_speed speed;
+ u32 rsvd;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+};
+
+struct bfa_bsg_vport_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+ struct bfa_vport_attr_s vport_attr;
+};
+
+struct bfa_bsg_vport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+ struct bfa_vport_stats_s vport_stats;
+};
+
+struct bfa_bsg_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+};
+
+struct bfa_bsg_fabric_get_lports_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ u64 buf_ptr;
+ u32 nports;
+ u32 rsvd;
+};
+
+struct bfa_bsg_trl_speed_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_fcpim_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 param;
+};
+
+struct bfa_bsg_fcpim_modstats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ struct bfa_itnim_iostats_s modstats;
+};
+
+struct bfa_bsg_fcpim_del_itn_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ struct bfa_fcpim_del_itn_stats_s modstats;
+};
+
+struct bfa_bsg_fcpim_modstatsclr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+};
+
+struct bfa_bsg_itnim_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_attr_s attr;
+};
+
+struct bfa_bsg_itnim_iostats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_itnim_itnstats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_stats_s itnstats;
+};
+
+struct bfa_bsg_pcifn_cfg_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ablk_cfg_s pcifn_cfg;
+};
+
+struct bfa_bsg_pcifn_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 pcifn_id;
+ u32 bandwidth;
+ u8 port;
+ enum bfi_pcifn_class pcifn_class;
+ u8 rsvd[1];
+};
+
+struct bfa_bsg_adapter_cfg_mode_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_adapter_cfg_mode_s cfg;
+};
+
+struct bfa_bsg_port_cfg_mode_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ struct bfa_port_cfg_mode_s cfg;
+};
+
+struct bfa_bsg_faa_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_faa_attr_s faa_attr;
+};
+
+struct bfa_bsg_cee_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_cee_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_sfp_media_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_defs_sfp_media_e media;
+};
+
+struct bfa_bsg_sfp_speed_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_flash_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_flash_attr_s attr;
+};
+
+struct bfa_bsg_flash_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u8 instance;
+ u8 rsvd;
+ enum bfa_flash_part_type type;
+ int bufsz;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_diag_get_temp_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_diag_results_tempsensor_s result;
+};
+
+struct bfa_bsg_diag_memtest_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd[3];
+ u32 pat;
+ struct bfa_diag_memtest_result result;
+ struct bfa_diag_memtest_s memtest;
+};
+
+struct bfa_bsg_diag_loopback_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_port_opmode opmode;
+ enum bfa_port_speed speed;
+ u32 lpcnt;
+ u32 pat;
+ struct bfa_diag_loopback_result_s result;
+};
+
+struct bfa_bsg_diag_fwping_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 cnt;
+ u32 pattern;
+ struct bfa_diag_results_fwping result;
+};
+
+struct bfa_bsg_diag_qtest_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 force;
+ u32 queue;
+ struct bfa_diag_qtest_result_s result;
+};
+
+struct bfa_bsg_sfp_show_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct sfp_mem_s sfp;
+};
+
+struct bfa_bsg_diag_led_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_diag_ledtest_s ledtest;
+};
+
+struct bfa_bsg_diag_beacon_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ bfa_boolean_t beacon;
+ bfa_boolean_t link_e2e_beacon;
+ u32 second;
+};
+
+struct bfa_bsg_diag_lb_stat_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+};
+
+struct bfa_bsg_phy_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ struct bfa_phy_attr_s attr;
+};
+
+struct bfa_bsg_phy_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ u64 bufsz;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_debug_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 bufsz;
+ int inst_no;
+ u64 buf_ptr;
+ u64 offset;
+};
+
+struct bfa_bsg_phy_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ struct bfa_phy_stats_s stats;
+};
+
+struct bfa_bsg_vhba_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 pcifn_id;
+ struct bfa_vhba_attr_s attr;
+};
+
+struct bfa_bsg_boot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_boot_cfg_s cfg;
+};
+
+struct bfa_bsg_preboot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_boot_pbc_s cfg;
+};
+
+struct bfa_bsg_ethboot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ethboot_cfg_s cfg;
+};
+
+struct bfa_bsg_trunk_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_trunk_attr_s attr;
+};
+
+struct bfa_bsg_qos_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_qos_attr_s attr;
+};
+
+struct bfa_bsg_qos_vc_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_qos_vc_attr_s attr;
+};
+
+struct bfa_bsg_vf_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ struct bfa_vf_stats_s stats;
+};
+
+struct bfa_bsg_vf_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+};
+
+struct bfa_bsg_fcpim_lunmask_query_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ struct bfa_lunmask_cfg_s lun_mask;
+};
+
+struct bfa_bsg_fcpim_lunmask_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ struct scsi_lun lun;
+};
+
+struct bfa_bsg_fcpt_s {
+ bfa_status_t status;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t dpwwn;
+ u32 tsecs;
+ int cts;
+ enum fc_cos cos;
+ struct fchs_s fchs;
+};
+#define bfa_bsg_fcpt_t struct bfa_bsg_fcpt_s
+
+struct bfa_bsg_data {
+ int payload_len;
+ void *payload;
+};
+
+#define bfad_chk_iocmd_sz(__payload_len, __hdrsz, __bufsz) \
+ (((__payload_len) != ((__hdrsz) + (__bufsz))) ? \
+ BFA_STATUS_FAILED : BFA_STATUS_OK)
+
+#endif /* BFAD_BSG_H */
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 48be0c54f2d..b412e0300dd 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -214,10 +214,10 @@ bfad_debugfs_read(struct file *file, char __user *buf,
#define BFA_REG_CT_ADDRSZ (0x40000)
#define BFA_REG_CB_ADDRSZ (0x20000)
-#define BFA_REG_ADDRSZ(__bfa) \
- ((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ? \
- BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)
-#define BFA_REG_ADDRMSK(__bfa) ((u32)(BFA_REG_ADDRSZ(__bfa) - 1))
+#define BFA_REG_ADDRSZ(__ioc) \
+ ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \
+ BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
static bfa_status_t
bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
@@ -236,7 +236,7 @@ bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
return BFA_STATUS_EINVAL;
} else {
/* CB register space 64KB */
- if ((offset + (len<<2)) > BFA_REG_ADDRMSK(bfa))
+ if ((offset + (len<<2)) > BFA_REG_ADDRMSK(&bfa->ioc))
return BFA_STATUS_EINVAL;
}
return BFA_STATUS_OK;
@@ -317,7 +317,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
bfad->reglen = len << 2;
rb = bfa_ioc_bar0(ioc);
- addr &= BFA_REG_ADDRMSK(bfa);
+ addr &= BFA_REG_ADDRMSK(ioc);
/* offset and len sanity check */
rc = bfad_reg_offset_check(bfa, addr, len);
@@ -380,7 +380,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
}
kfree(kern_buf);
- addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */
+ addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
/* offset and len sanity check */
rc = bfad_reg_offset_check(bfa, addr, 1);
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 7f9ea90254c..bda999ad9f5 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -43,6 +43,7 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_transport.h>
+#include <scsi/scsi_bsg_fc.h>
#include "bfa_modules.h"
#include "bfa_fcs.h"
@@ -55,7 +56,7 @@
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
-#define BFAD_DRIVER_VERSION "2.3.2.3"
+#define BFAD_DRIVER_VERSION "3.0.2.2"
#endif
#define BFAD_PROTO_NAME FCPI_NAME
@@ -79,7 +80,7 @@
#define BFAD_HAL_INIT_FAIL 0x00000100
#define BFAD_FC4_PROBE_DONE 0x00000200
#define BFAD_PORT_DELETE 0x00000001
-
+#define BFAD_INTX_ON 0x00000400
/*
* BFAD related definition
*/
@@ -92,6 +93,8 @@
*/
#define BFAD_LUN_QUEUE_DEPTH 32
#define BFAD_IO_MAX_SGE SG_ALL
+#define BFAD_MIN_SECTORS 128 /* 64k */
+#define BFAD_MAX_SECTORS 0xFFFF /* 32 MB */
#define bfad_isr_t irq_handler_t
@@ -110,6 +113,7 @@ struct bfad_msix_s {
enum {
BFA_TRC_LDRV_BFAD = 1,
BFA_TRC_LDRV_IM = 2,
+ BFA_TRC_LDRV_BSG = 3,
};
enum bfad_port_pvb_type {
@@ -189,8 +193,10 @@ struct bfad_s {
struct bfa_pcidev_s hal_pcidev;
struct bfa_ioc_pci_attr_s pci_attr;
void __iomem *pci_bar0_kva;
+ void __iomem *pci_bar2_kva;
struct completion comp;
struct completion suspend;
+ struct completion enable_comp;
struct completion disable_comp;
bfa_boolean_t disable_active;
struct bfad_port_s pport; /* physical port of the BFAD */
@@ -218,6 +224,10 @@ struct bfad_s {
char *regdata;
u32 reglen;
struct dentry *bfad_dentry_files[5];
+ struct list_head free_aen_q;
+ struct list_head active_aen_q;
+ struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
+ spinlock_t bfad_aen_spinlock;
};
/* BFAD state machine events */
@@ -273,21 +283,6 @@ struct bfad_hal_comp {
struct completion comp;
};
-/*
- * Macro to obtain the immediate lower power
- * of two for the integer.
- */
-#define nextLowerInt(x) \
-do { \
- int __i; \
- (*x)--; \
- for (__i = 1; __i < (sizeof(int)*8); __i <<= 1) \
- (*x) = (*x) | (*x) >> __i; \
- (*x)++; \
- (*x) = (*x) >> 1; \
-} while (0)
-
-
#define BFA_LOG(level, bfad, mask, fmt, arg...) \
do { \
if (((mask) == 4) || (level[1] <= '4')) \
@@ -354,6 +349,7 @@ extern int msix_disable_ct;
extern int fdmi_enable;
extern int supported_fc4s;
extern int pcie_max_read_reqsz;
+extern int max_xfer_size;
extern int bfa_debugfs_enable;
extern struct mutex bfad_mutex;
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index c2b36179e8e..01312381639 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -175,21 +175,11 @@ bfad_im_info(struct Scsi_Host *shost)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) shost->hostdata[0];
struct bfad_s *bfad = im_port->bfad;
- struct bfa_s *bfa = &bfad->bfa;
- struct bfa_ioc_s *ioc = &bfa->ioc;
- char model[BFA_ADAPTER_MODEL_NAME_LEN];
-
- bfa_get_adapter_model(bfa, model);
memset(bfa_buf, 0, sizeof(bfa_buf));
- if (ioc->ctdev && !ioc->fcmode)
- snprintf(bfa_buf, sizeof(bfa_buf),
- "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
- model, bfad->pci_name, BFAD_DRIVER_VERSION);
- else
- snprintf(bfa_buf, sizeof(bfa_buf),
- "Brocade FC Adapter, " "model: %s hwpath: %s driver: %s",
- model, bfad->pci_name, BFAD_DRIVER_VERSION);
+ snprintf(bfa_buf, sizeof(bfa_buf),
+ "Brocade FC/FCOE Adapter, " "hwpath: %s driver: %s",
+ bfad->pci_name, BFAD_DRIVER_VERSION);
return bfa_buf;
}
@@ -572,9 +562,6 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
goto out_fc_rel;
}
- /* setup host fixed attribute if the lk supports */
- bfad_fc_host_init(im_port);
-
return 0;
out_fc_rel:
@@ -669,6 +656,31 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
}
+static void bfad_aen_im_notify_handler(struct work_struct *work)
+{
+ struct bfad_im_s *im =
+ container_of(work, struct bfad_im_s, aen_im_notify_work);
+ struct bfa_aen_entry_s *aen_entry;
+ struct bfad_s *bfad = im->bfad;
+ struct Scsi_Host *shost = bfad->pport.im_port->shost;
+ void *event_data;
+ unsigned long flags;
+
+ while (!list_empty(&bfad->active_aen_q)) {
+ spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+ bfa_q_deq(&bfad->active_aen_q, &aen_entry);
+ spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+ event_data = (char *)aen_entry + sizeof(struct list_head);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(struct bfa_aen_entry_s) -
+ sizeof(struct list_head),
+ (char *)event_data, BFAD_NL_VENDOR_ID);
+ spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags);
+ list_add_tail(&aen_entry->qe, &bfad->free_aen_q);
+ spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags);
+ }
+}
+
bfa_status_t
bfad_im_probe(struct bfad_s *bfad)
{
@@ -689,6 +701,7 @@ bfad_im_probe(struct bfad_s *bfad)
rc = BFA_STATUS_FAILED;
}
+ INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
ext:
return rc;
}
@@ -713,6 +726,9 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
else
sht = &bfad_im_vport_template;
+ if (max_xfer_size != BFAD_MAX_SECTORS >> 1)
+ sht->max_sectors = max_xfer_size << 1;
+
sht->sg_tablesize = bfad->cfg_data.io_max_sge;
return scsi_host_alloc(sht, sizeof(unsigned long));
@@ -790,7 +806,8 @@ struct scsi_host_template bfad_im_scsi_host_template = {
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = bfad_im_host_attrs,
- .max_sectors = 0xFFFF,
+ .max_sectors = BFAD_MAX_SECTORS,
+ .vendor_id = BFA_PCI_VENDOR_ID_BROCADE,
};
struct scsi_host_template bfad_im_vport_template = {
@@ -811,7 +828,7 @@ struct scsi_host_template bfad_im_vport_template = {
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = bfad_im_vport_attrs,
- .max_sectors = 0xFFFF,
+ .max_sectors = BFAD_MAX_SECTORS,
};
bfa_status_t
@@ -925,7 +942,10 @@ bfad_im_supported_speeds(struct bfa_s *bfa)
return 0;
bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
- if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
+ if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS)
+ supported_speed |= FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+ FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT;
+ else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
if (ioc_attr->adapter_attr.is_mezz) {
supported_speed |= FC_PORTSPEED_8GBIT |
FC_PORTSPEED_4GBIT |
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index c296c896851..004b6cf848d 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -115,8 +115,30 @@ struct bfad_im_s {
struct bfad_s *bfad;
struct workqueue_struct *drv_workq;
char drv_workq_name[KOBJ_NAME_LEN];
+ struct work_struct aen_im_notify_work;
};
+#define bfad_get_aen_entry(_drv, _entry) do { \
+ unsigned long _flags; \
+ spin_lock_irqsave(&(_drv)->bfad_aen_spinlock, _flags); \
+ bfa_q_deq(&(_drv)->free_aen_q, &(_entry)); \
+ if (_entry) \
+ list_add_tail(&(_entry)->qe, &(_drv)->active_aen_q); \
+ spin_unlock_irqrestore(&(_drv)->bfad_aen_spinlock, _flags); \
+} while (0)
+
+/* post fc_host vendor event */
+#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
+ do_gettimeofday(&(_entry)->aen_tv); \
+ (_entry)->bfad_num = (_drv)->inst_no; \
+ (_entry)->seq_num = (_cnt); \
+ (_entry)->aen_category = (_cat); \
+ (_entry)->aen_type = (_evt); \
+ if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
+ queue_work((_drv)->im->drv_workq, \
+ &(_drv)->im->aen_im_notify_work); \
+} while (0)
+
struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
struct bfad_s *);
bfa_status_t bfad_thread_workq(struct bfad_s *bfad);
@@ -141,4 +163,7 @@ extern struct device_attribute *bfad_im_vport_attrs[];
irqreturn_t bfad_intx(int irq, void *dev_id);
+int bfad_im_bsg_request(struct fc_bsg_job *job);
+int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+
#endif
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 72b69a0c3b5..b2ba0b2e91b 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -23,17 +23,29 @@
#pragma pack(1)
+/* Per dma segment max size */
+#define BFI_MEM_DMA_SEG_SZ (131072)
+
+/* Get number of dma segments required */
+#define BFI_MEM_DMA_NSEGS(_num_reqs, _req_sz) \
+ ((u16)(((((_num_reqs) * (_req_sz)) + BFI_MEM_DMA_SEG_SZ - 1) & \
+ ~(BFI_MEM_DMA_SEG_SZ - 1)) / BFI_MEM_DMA_SEG_SZ))
+
+/* Get num dma reqs - that fit in a segment */
+#define BFI_MEM_NREQS_SEG(_rqsz) (BFI_MEM_DMA_SEG_SZ / (_rqsz))
+
+/* Get segment num from tag */
+#define BFI_MEM_SEG_FROM_TAG(_tag, _rqsz) ((_tag) / BFI_MEM_NREQS_SEG(_rqsz))
+
+/* Get dma req offset in a segment */
+#define BFI_MEM_SEG_REQ_OFFSET(_tag, _sz) \
+ ((_tag) - (BFI_MEM_SEG_FROM_TAG(_tag, _sz) * BFI_MEM_NREQS_SEG(_sz)))
+
/*
* BFI FW image type
*/
#define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
-enum {
- BFI_IMAGE_CB_FC,
- BFI_IMAGE_CT_FC,
- BFI_IMAGE_CT_CNA,
- BFI_IMAGE_MAX,
-};
/*
* Msg header common to all msgs
@@ -43,17 +55,20 @@ struct bfi_mhdr_s {
u8 msg_id; /* msg opcode with in the class */
union {
struct {
- u8 rsvd;
- u8 lpu_id; /* msg destination */
+ u8 qid;
+ u8 fn_lpu; /* msg destination */
} h2i;
u16 i2htok; /* token in msgs to host */
} mtag;
};
-#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
+#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu))
+#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1)
+
+#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \
(_mh).msg_class = (_mc); \
(_mh).msg_id = (_op); \
- (_mh).mtag.h2i.lpu_id = (_lpuid); \
+ (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \
} while (0)
#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
@@ -101,7 +116,7 @@ union bfi_addr_u {
};
/*
- * Scatter Gather Element
+ * Scatter Gather Element used for fast-path IO requests
*/
struct bfi_sge_s {
#ifdef __BIG_ENDIAN
@@ -116,6 +131,14 @@ struct bfi_sge_s {
union bfi_addr_u sga;
};
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen_s {
+ union bfi_addr_u al_addr; /* DMA addr of buffer */
+ u32 al_len; /* length of buffer */
+};
+
/*
* Scatter Gather Page
*/
@@ -127,6 +150,12 @@ struct bfi_sgpg_s {
u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
};
+/* FCP module definitions */
+#define BFI_IO_MAX (2000)
+#define BFI_IOIM_SNSLEN (256)
+#define BFI_IOIM_SNSBUF_SEGS \
+ BFI_MEM_DMA_NSEGS(BFI_IO_MAX, BFI_IOIM_SNSLEN)
+
/*
* Large Message structure - 128 Bytes size Msgs
*/
@@ -149,18 +178,29 @@ struct bfi_mbmsg_s {
};
/*
+ * Supported PCI function class codes (personality)
+ */
+enum bfi_pcifn_class {
+ BFI_PCIFN_CLASS_FC = 0x0c04,
+ BFI_PCIFN_CLASS_ETH = 0x0200,
+};
+
+/*
* Message Classes
*/
enum bfi_mclass {
BFI_MC_IOC = 1, /* IO Controller (IOC) */
+ BFI_MC_DIAG = 2, /* Diagnostic Msgs */
+ BFI_MC_FLASH = 3, /* Flash message class */
+ BFI_MC_CEE = 4, /* CEE */
BFI_MC_FCPORT = 5, /* FC port */
BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */
- BFI_MC_LL = 7, /* Link Layer */
+ BFI_MC_ABLK = 7, /* ASIC block configuration */
BFI_MC_UF = 8, /* Unsolicited frame receive */
BFI_MC_FCXP = 9, /* FC Transport */
BFI_MC_LPS = 10, /* lport fc login services */
BFI_MC_RPORT = 11, /* Remote port */
- BFI_MC_ITNIM = 12, /* I-T nexus (Initiator mode) */
+ BFI_MC_ITN = 12, /* I-T nexus (Initiator mode) */
BFI_MC_IOIM_READ = 13, /* read IO (Initiator mode) */
BFI_MC_IOIM_WRITE = 14, /* write IO (Initiator mode) */
BFI_MC_IOIM_IO = 15, /* IO (Initiator mode) */
@@ -168,6 +208,8 @@ enum bfi_mclass {
BFI_MC_IOIM_IOCOM = 17, /* good IO completion */
BFI_MC_TSKIM = 18, /* Initiator Task management */
BFI_MC_PORT = 21, /* Physical port */
+ BFI_MC_SFP = 22, /* SFP module */
+ BFI_MC_PHY = 25, /* External PHY message class */
BFI_MC_MAX = 32
};
@@ -175,23 +217,28 @@ enum bfi_mclass {
#define BFI_IOC_MAX_CQS_ASIC 8
#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
-#define BFI_BOOT_TYPE_OFF 8
-#define BFI_BOOT_LOADER_OFF 12
-
-#define BFI_BOOT_TYPE_NORMAL 0
-#define BFI_BOOT_TYPE_FLASH 1
-#define BFI_BOOT_TYPE_MEMTEST 2
-
-#define BFI_BOOT_LOADER_OS 0
-#define BFI_BOOT_LOADER_BIOS 1
-#define BFI_BOOT_LOADER_UEFI 2
-
/*
*----------------------------------------------------------------------
* IOC
*----------------------------------------------------------------------
*/
+/*
+ * Different asic generations
+ */
+enum bfi_asic_gen {
+ BFI_ASIC_GEN_CB = 1, /* crossbow 8G FC */
+ BFI_ASIC_GEN_CT = 2, /* catapult 8G FC or 10G CNA */
+ BFI_ASIC_GEN_CT2 = 3, /* catapult-2 16G FC or 10G CNA */
+};
+
+enum bfi_asic_mode {
+ BFI_ASIC_MODE_FC = 1, /* FC upto 8G speed */
+ BFI_ASIC_MODE_FC16 = 2, /* FC upto 16G speed */
+ BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */
+ BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */
+};
+
enum bfi_ioc_h2i_msgs {
BFI_IOC_H2I_ENABLE_REQ = 1,
BFI_IOC_H2I_DISABLE_REQ = 2,
@@ -204,8 +251,8 @@ enum bfi_ioc_i2h_msgs {
BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
- BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
- BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
+ BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
+ BFI_IOC_I2H_ACQ_ADDR_REPLY = BFA_I2HM(5),
};
/*
@@ -220,7 +267,8 @@ struct bfi_ioc_attr_s {
wwn_t mfg_pwwn; /* Mfg port wwn */
wwn_t mfg_nwwn; /* Mfg node wwn */
mac_t mfg_mac; /* Mfg mac */
- u16 rsvd_a;
+ u8 port_mode; /* bfi_port_mode */
+ u8 rsvd_a;
wwn_t pwwn;
wwn_t nwwn;
mac_t mac; /* PBC or Mfg mac */
@@ -272,21 +320,33 @@ struct bfi_ioc_getattr_reply_s {
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
#define BFI_IOC_MD5SUM_SZ 4
struct bfi_ioc_image_hdr_s {
- u32 signature; /* constant signature */
- u32 rsvd_a;
- u32 exec; /* exec vector */
- u32 param; /* parameters */
+ u32 signature; /* constant signature */
+ u8 asic_gen; /* asic generation */
+ u8 asic_mode;
+ u8 port0_mode; /* device mode for port 0 */
+ u8 port1_mode; /* device mode for port 1 */
+ u32 exec; /* exec vector */
+ u32 bootenv; /* fimware boot env */
u32 rsvd_b[4];
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
-/*
- * BFI_IOC_I2H_READY_EVENT message
- */
-struct bfi_ioc_rdy_event_s {
- struct bfi_mhdr_s mh; /* common msg header */
- u8 init_status; /* init event status */
- u8 rsvd[3];
+#define BFI_FWBOOT_DEVMODE_OFF 4
+#define BFI_FWBOOT_TYPE_OFF 8
+#define BFI_FWBOOT_ENV_OFF 12
+#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \
+ (((u32)(__asic_gen)) << 24 | \
+ ((u32)(__asic_mode)) << 16 | \
+ ((u32)(__p0_mode)) << 8 | \
+ ((u32)(__p1_mode)))
+
+#define BFI_FWBOOT_TYPE_NORMAL 0
+#define BFI_FWBOOT_TYPE_MEMTEST 2
+#define BFI_FWBOOT_ENV_OS 0
+
+enum bfi_port_mode {
+ BFI_PORT_MODE_FC = 1,
+ BFI_PORT_MODE_ETH = 2,
};
struct bfi_ioc_hbeat_s {
@@ -345,8 +405,8 @@ enum {
*/
struct bfi_ioc_ctrl_req_s {
struct bfi_mhdr_s mh;
- u8 ioc_class;
- u8 rsvd[3];
+ u16 clscode;
+ u16 rsvd;
u32 tv_sec;
};
#define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
@@ -358,7 +418,9 @@ struct bfi_ioc_ctrl_req_s {
struct bfi_ioc_ctrl_reply_s {
struct bfi_mhdr_s mh; /* Common msg header */
u8 status; /* enable/disable status */
- u8 rsvd[3];
+ u8 port_mode; /* bfa_mode_s */
+ u8 cap_bm; /* capability bit mask */
+ u8 rsvd;
};
#define bfi_ioc_enable_reply_t struct bfi_ioc_ctrl_reply_s;
#define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
@@ -380,7 +442,7 @@ union bfi_ioc_h2i_msg_u {
*/
union bfi_ioc_i2h_msg_u {
struct bfi_mhdr_s mh;
- struct bfi_ioc_rdy_event_s rdy_event;
+ struct bfi_ioc_ctrl_reply_s fw_event;
u32 mboxmsg[BFI_IOC_MSGSZ];
};
@@ -393,6 +455,7 @@ union bfi_ioc_i2h_msg_u {
#define BFI_PBC_MAX_BLUNS 8
#define BFI_PBC_MAX_VPORTS 16
+#define BFI_PBC_PORT_DISABLED 2
/*
* PBC boot lun configuration
@@ -574,6 +637,516 @@ union bfi_port_i2h_msg_u {
struct bfi_port_generic_rsp_s clearstats_rsp;
};
+/*
+ *----------------------------------------------------------------------
+ * ABLK
+ *----------------------------------------------------------------------
+ */
+enum bfi_ablk_h2i_msgs_e {
+ BFI_ABLK_H2I_QUERY = 1,
+ BFI_ABLK_H2I_ADPT_CONFIG = 2,
+ BFI_ABLK_H2I_PORT_CONFIG = 3,
+ BFI_ABLK_H2I_PF_CREATE = 4,
+ BFI_ABLK_H2I_PF_DELETE = 5,
+ BFI_ABLK_H2I_PF_UPDATE = 6,
+ BFI_ABLK_H2I_OPTROM_ENABLE = 7,
+ BFI_ABLK_H2I_OPTROM_DISABLE = 8,
+};
+
+enum bfi_ablk_i2h_msgs_e {
+ BFI_ABLK_I2H_QUERY = BFA_I2HM(BFI_ABLK_H2I_QUERY),
+ BFI_ABLK_I2H_ADPT_CONFIG = BFA_I2HM(BFI_ABLK_H2I_ADPT_CONFIG),
+ BFI_ABLK_I2H_PORT_CONFIG = BFA_I2HM(BFI_ABLK_H2I_PORT_CONFIG),
+ BFI_ABLK_I2H_PF_CREATE = BFA_I2HM(BFI_ABLK_H2I_PF_CREATE),
+ BFI_ABLK_I2H_PF_DELETE = BFA_I2HM(BFI_ABLK_H2I_PF_DELETE),
+ BFI_ABLK_I2H_PF_UPDATE = BFA_I2HM(BFI_ABLK_H2I_PF_UPDATE),
+ BFI_ABLK_I2H_OPTROM_ENABLE = BFA_I2HM(BFI_ABLK_H2I_OPTROM_ENABLE),
+ BFI_ABLK_I2H_OPTROM_DISABLE = BFA_I2HM(BFI_ABLK_H2I_OPTROM_DISABLE),
+};
+
+/* BFI_ABLK_H2I_QUERY */
+struct bfi_ablk_h2i_query_s {
+ struct bfi_mhdr_s mh;
+ union bfi_addr_u addr;
+};
+
+/* BFI_ABL_H2I_ADPT_CONFIG, BFI_ABLK_H2I_PORT_CONFIG */
+struct bfi_ablk_h2i_cfg_req_s {
+ struct bfi_mhdr_s mh;
+ u8 mode;
+ u8 port;
+ u8 max_pf;
+ u8 max_vf;
+};
+
+/*
+ * BFI_ABLK_H2I_PF_CREATE, BFI_ABLK_H2I_PF_DELETE,
+ */
+struct bfi_ablk_h2i_pf_req_s {
+ struct bfi_mhdr_s mh;
+ u8 pcifn;
+ u8 port;
+ u16 pers;
+ u32 bw;
+};
+
+/* BFI_ABLK_H2I_OPTROM_ENABLE, BFI_ABLK_H2I_OPTROM_DISABLE */
+struct bfi_ablk_h2i_optrom_s {
+ struct bfi_mhdr_s mh;
+};
+
+/*
+ * BFI_ABLK_I2H_QUERY
+ * BFI_ABLK_I2H_PORT_CONFIG
+ * BFI_ABLK_I2H_PF_CREATE
+ * BFI_ABLK_I2H_PF_DELETE
+ * BFI_ABLK_I2H_PF_UPDATE
+ * BFI_ABLK_I2H_OPTROM_ENABLE
+ * BFI_ABLK_I2H_OPTROM_DISABLE
+ */
+struct bfi_ablk_i2h_rsp_s {
+ struct bfi_mhdr_s mh;
+ u8 status;
+ u8 pcifn;
+ u8 port_mode;
+};
+
+
+/*
+ * CEE module specific messages
+ */
+
+/* Mailbox commands from host to firmware */
+enum bfi_cee_h2i_msgs_e {
+ BFI_CEE_H2I_GET_CFG_REQ = 1,
+ BFI_CEE_H2I_RESET_STATS = 2,
+ BFI_CEE_H2I_GET_STATS_REQ = 3,
+};
+
+enum bfi_cee_i2h_msgs_e {
+ BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
+ BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
+ BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
+};
+
+/*
+ * H2I command structure for resetting the stats
+ */
+struct bfi_cee_reset_stats_s {
+ struct bfi_mhdr_s mh;
+};
+
+/*
+ * Get configuration command from host
+ */
+struct bfi_cee_get_req_s {
+ struct bfi_mhdr_s mh;
+ union bfi_addr_u dma_addr;
+};
+
+/*
+ * Reply message from firmware
+ */
+struct bfi_cee_get_rsp_s {
+ struct bfi_mhdr_s mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/*
+ * Reply message from firmware
+ */
+struct bfi_cee_stats_rsp_s {
+ struct bfi_mhdr_s mh;
+ u8 cmd_status;
+ u8 rsvd[3];
+};
+
+/* Mailbox message structures from firmware to host */
+union bfi_cee_i2h_msg_u {
+ struct bfi_mhdr_s mh;
+ struct bfi_cee_get_rsp_s get_rsp;
+ struct bfi_cee_stats_rsp_s stats_rsp;
+};
+
+/*
+ * SFP related
+ */
+
+enum bfi_sfp_h2i_e {
+ BFI_SFP_H2I_SHOW = 1,
+ BFI_SFP_H2I_SCN = 2,
+};
+
+enum bfi_sfp_i2h_e {
+ BFI_SFP_I2H_SHOW = BFA_I2HM(BFI_SFP_H2I_SHOW),
+ BFI_SFP_I2H_SCN = BFA_I2HM(BFI_SFP_H2I_SCN),
+};
+
+/*
+ * SFP state change notification
+ */
+struct bfi_sfp_scn_s {
+ struct bfi_mhdr_s mhr; /* host msg header */
+ u8 event;
+ u8 sfpid;
+ u8 pomlvl; /* pom level: normal/warning/alarm */
+ u8 is_elb; /* e-loopback */
+};
+
+/*
+ * SFP state
+ */
+enum bfa_sfp_stat_e {
+ BFA_SFP_STATE_INIT = 0, /* SFP state is uninit */
+ BFA_SFP_STATE_REMOVED = 1, /* SFP is removed */
+ BFA_SFP_STATE_INSERTED = 2, /* SFP is inserted */
+ BFA_SFP_STATE_VALID = 3, /* SFP is valid */
+ BFA_SFP_STATE_UNSUPPORT = 4, /* SFP is unsupport */
+ BFA_SFP_STATE_FAILED = 5, /* SFP i2c read fail */
+};
+
+/*
+ * SFP memory access type
+ */
+enum bfi_sfp_mem_e {
+ BFI_SFP_MEM_ALL = 0x1, /* access all data field */
+ BFI_SFP_MEM_DIAGEXT = 0x2, /* access diag ext data field only */
+};
+
+struct bfi_sfp_req_s {
+ struct bfi_mhdr_s mh;
+ u8 memtype;
+ u8 rsvd[3];
+ struct bfi_alen_s alen;
+};
+
+struct bfi_sfp_rsp_s {
+ struct bfi_mhdr_s mh;
+ u8 status;
+ u8 state;
+ u8 rsvd[2];
+};
+
+/*
+ * FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+ BFI_FLASH_H2I_QUERY_REQ = 1,
+ BFI_FLASH_H2I_ERASE_REQ = 2,
+ BFI_FLASH_H2I_WRITE_REQ = 3,
+ BFI_FLASH_H2I_READ_REQ = 4,
+ BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+ BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+ BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+ BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+ BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+ BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+ BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ struct bfi_alen_s alen;
+};
+
+/*
+ * Flash erase request
+ */
+struct bfi_flash_erase_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ struct bfi_alen_s alen;
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 last;
+ u8 rsv[2];
+ u32 offset;
+ u32 length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 offset;
+ u32 length;
+ struct bfi_alen_s alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
+/*
+ * Flash erase response
+ */
+struct bfi_flash_erase_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+};
+
+/*
+ * Flash event notification
+ */
+struct bfi_flash_event_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ bfa_status_t status;
+ u32 param;
+};
+
+/*
+ *----------------------------------------------------------------------
+ * DIAG
+ *----------------------------------------------------------------------
+ */
+enum bfi_diag_h2i {
+ BFI_DIAG_H2I_PORTBEACON = 1,
+ BFI_DIAG_H2I_LOOPBACK = 2,
+ BFI_DIAG_H2I_FWPING = 3,
+ BFI_DIAG_H2I_TEMPSENSOR = 4,
+ BFI_DIAG_H2I_LEDTEST = 5,
+ BFI_DIAG_H2I_QTEST = 6,
+};
+
+enum bfi_diag_i2h {
+ BFI_DIAG_I2H_PORTBEACON = BFA_I2HM(BFI_DIAG_H2I_PORTBEACON),
+ BFI_DIAG_I2H_LOOPBACK = BFA_I2HM(BFI_DIAG_H2I_LOOPBACK),
+ BFI_DIAG_I2H_FWPING = BFA_I2HM(BFI_DIAG_H2I_FWPING),
+ BFI_DIAG_I2H_TEMPSENSOR = BFA_I2HM(BFI_DIAG_H2I_TEMPSENSOR),
+ BFI_DIAG_I2H_LEDTEST = BFA_I2HM(BFI_DIAG_H2I_LEDTEST),
+ BFI_DIAG_I2H_QTEST = BFA_I2HM(BFI_DIAG_H2I_QTEST),
+};
+
+#define BFI_DIAG_MAX_SGES 2
+#define BFI_DIAG_DMA_BUF_SZ (2 * 1024)
+#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
+#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
+
+struct bfi_diag_lb_req_s {
+ struct bfi_mhdr_s mh;
+ u32 loopcnt;
+ u32 pattern;
+ u8 lb_mode; /*!< bfa_port_opmode_t */
+ u8 speed; /*!< bfa_port_speed_t */
+ u8 rsvd[2];
+};
+
+struct bfi_diag_lb_rsp_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ struct bfa_diag_loopback_result_s res; /* 16 bytes */
+};
+
+struct bfi_diag_fwping_req_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ struct bfi_alen_s alen; /* 12 bytes */
+ u32 data; /* user input data pattern */
+ u32 count; /* user input dma count */
+ u8 qtag; /* track CPE vc */
+ u8 rsv[3];
+};
+
+struct bfi_diag_fwping_rsp_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ u32 data; /* user input data pattern */
+ u8 qtag; /* track CPE vc */
+ u8 dma_status; /* dma status */
+ u8 rsv[2];
+};
+
+/*
+ * Temperature Sensor
+ */
+struct bfi_diag_ts_req_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ u16 temp; /* 10-bit A/D value */
+ u16 brd_temp; /* 9-bit board temp */
+ u8 status;
+ u8 ts_junc; /* show junction tempsensor */
+ u8 ts_brd; /* show board tempsensor */
+ u8 rsv;
+};
+#define bfi_diag_ts_rsp_t struct bfi_diag_ts_req_s
+
+struct bfi_diag_ledtest_req_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ u8 cmd;
+ u8 color;
+ u8 portid;
+ u8 led; /* bitmap of LEDs to be tested */
+ u16 freq; /* no. of blinks every 10 secs */
+ u8 rsv[2];
+};
+
+/* notify host led operation is done */
+struct bfi_diag_ledtest_rsp_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+};
+
+struct bfi_diag_portbeacon_req_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ u32 period; /* beaconing period */
+ u8 beacon; /* 1: beacon on */
+ u8 rsvd[3];
+};
+
+/* notify host the beacon is off */
+struct bfi_diag_portbeacon_rsp_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+};
+
+struct bfi_diag_qtest_req_s {
+ struct bfi_mhdr_s mh; /* 4 bytes */
+ u32 data[BFI_LMSG_PL_WSZ]; /* fill up tcm prefetch area */
+};
+#define bfi_diag_qtest_rsp_t struct bfi_diag_qtest_req_s
+
+/*
+ * PHY module specific
+ */
+enum bfi_phy_h2i_msgs_e {
+ BFI_PHY_H2I_QUERY_REQ = 1,
+ BFI_PHY_H2I_STATS_REQ = 2,
+ BFI_PHY_H2I_WRITE_REQ = 3,
+ BFI_PHY_H2I_READ_REQ = 4,
+};
+
+enum bfi_phy_i2h_msgs_e {
+ BFI_PHY_I2H_QUERY_RSP = BFA_I2HM(1),
+ BFI_PHY_I2H_STATS_RSP = BFA_I2HM(2),
+ BFI_PHY_I2H_WRITE_RSP = BFA_I2HM(3),
+ BFI_PHY_I2H_READ_RSP = BFA_I2HM(4),
+};
+
+/*
+ * External PHY query request
+ */
+struct bfi_phy_query_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 instance;
+ u8 rsv[3];
+ struct bfi_alen_s alen;
+};
+
+/*
+ * External PHY stats request
+ */
+struct bfi_phy_stats_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 instance;
+ u8 rsv[3];
+ struct bfi_alen_s alen;
+};
+
+/*
+ * External PHY write request
+ */
+struct bfi_phy_write_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 instance;
+ u8 last;
+ u8 rsv[2];
+ u32 offset;
+ u32 length;
+ struct bfi_alen_s alen;
+};
+
+/*
+ * External PHY read request
+ */
+struct bfi_phy_read_req_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u8 instance;
+ u8 rsv[3];
+ u32 offset;
+ u32 length;
+ struct bfi_alen_s alen;
+};
+
+/*
+ * External PHY query response
+ */
+struct bfi_phy_query_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 status;
+};
+
+/*
+ * External PHY stats response
+ */
+struct bfi_phy_stats_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 status;
+};
+
+/*
+ * External PHY read response
+ */
+struct bfi_phy_read_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 status;
+ u32 length;
+};
+
+/*
+ * External PHY write response
+ */
+struct bfi_phy_write_rsp_s {
+ struct bfi_mhdr_s mh; /* Common msg header */
+ u32 status;
+ u32 length;
+};
+
#pragma pack()
#endif /* __BFI_H__ */
diff --git a/drivers/scsi/bfa/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h
deleted file mode 100644
index 39ad42b66b5..00000000000
--- a/drivers/scsi/bfa/bfi_cbreg.h
+++ /dev/null
@@ -1,305 +0,0 @@
-
-/*
- * bfi_cbreg.h crossbow host block register definitions
- *
- * !!! Do not edit. Auto generated. !!!
- */
-
-#ifndef __BFI_CBREG_H__
-#define __BFI_CBREG_H__
-
-
-#define HOSTFN0_INT_STATUS 0x00014000
-#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
-#define __HOSTFN0_INT_STATUS_LVL_SH 20
-#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
-#define __HOSTFN0_INT_STATUS_P 0x000fffff
-#define HOSTFN0_INT_MSK 0x00014004
-#define HOST_PAGE_NUM_FN0 0x00014008
-#define __HOST_PAGE_NUM_FN 0x000001ff
-#define HOSTFN1_INT_STATUS 0x00014100
-#define __HOSTFN1_INT_STAT_LVL_MK 0x00f00000
-#define __HOSTFN1_INT_STAT_LVL_SH 20
-#define __HOSTFN1_INT_STAT_LVL(_v) ((_v) << __HOSTFN1_INT_STAT_LVL_SH)
-#define __HOSTFN1_INT_STAT_P 0x000fffff
-#define HOSTFN1_INT_MSK 0x00014104
-#define HOST_PAGE_NUM_FN1 0x00014108
-#define APP_PLL_400_CTL_REG 0x00014204
-#define __P_400_PLL_LOCK 0x80000000
-#define __APP_PLL_400_SRAM_USE_100MHZ 0x00100000
-#define __APP_PLL_400_RESET_TIMER_MK 0x000e0000
-#define __APP_PLL_400_RESET_TIMER_SH 17
-#define __APP_PLL_400_RESET_TIMER(_v) ((_v) << __APP_PLL_400_RESET_TIMER_SH)
-#define __APP_PLL_400_LOGIC_SOFT_RESET 0x00010000
-#define __APP_PLL_400_CNTLMT0_1_MK 0x0000c000
-#define __APP_PLL_400_CNTLMT0_1_SH 14
-#define __APP_PLL_400_CNTLMT0_1(_v) ((_v) << __APP_PLL_400_CNTLMT0_1_SH)
-#define __APP_PLL_400_JITLMT0_1_MK 0x00003000
-#define __APP_PLL_400_JITLMT0_1_SH 12
-#define __APP_PLL_400_JITLMT0_1(_v) ((_v) << __APP_PLL_400_JITLMT0_1_SH)
-#define __APP_PLL_400_HREF 0x00000800
-#define __APP_PLL_400_HDIV 0x00000400
-#define __APP_PLL_400_P0_1_MK 0x00000300
-#define __APP_PLL_400_P0_1_SH 8
-#define __APP_PLL_400_P0_1(_v) ((_v) << __APP_PLL_400_P0_1_SH)
-#define __APP_PLL_400_Z0_2_MK 0x000000e0
-#define __APP_PLL_400_Z0_2_SH 5
-#define __APP_PLL_400_Z0_2(_v) ((_v) << __APP_PLL_400_Z0_2_SH)
-#define __APP_PLL_400_RSEL200500 0x00000010
-#define __APP_PLL_400_ENARST 0x00000008
-#define __APP_PLL_400_BYPASS 0x00000004
-#define __APP_PLL_400_LRESETN 0x00000002
-#define __APP_PLL_400_ENABLE 0x00000001
-#define APP_PLL_212_CTL_REG 0x00014208
-#define __P_212_PLL_LOCK 0x80000000
-#define __APP_PLL_212_RESET_TIMER_MK 0x000e0000
-#define __APP_PLL_212_RESET_TIMER_SH 17
-#define __APP_PLL_212_RESET_TIMER(_v) ((_v) << __APP_PLL_212_RESET_TIMER_SH)
-#define __APP_PLL_212_LOGIC_SOFT_RESET 0x00010000
-#define __APP_PLL_212_CNTLMT0_1_MK 0x0000c000
-#define __APP_PLL_212_CNTLMT0_1_SH 14
-#define __APP_PLL_212_CNTLMT0_1(_v) ((_v) << __APP_PLL_212_CNTLMT0_1_SH)
-#define __APP_PLL_212_JITLMT0_1_MK 0x00003000
-#define __APP_PLL_212_JITLMT0_1_SH 12
-#define __APP_PLL_212_JITLMT0_1(_v) ((_v) << __APP_PLL_212_JITLMT0_1_SH)
-#define __APP_PLL_212_HREF 0x00000800
-#define __APP_PLL_212_HDIV 0x00000400
-#define __APP_PLL_212_P0_1_MK 0x00000300
-#define __APP_PLL_212_P0_1_SH 8
-#define __APP_PLL_212_P0_1(_v) ((_v) << __APP_PLL_212_P0_1_SH)
-#define __APP_PLL_212_Z0_2_MK 0x000000e0
-#define __APP_PLL_212_Z0_2_SH 5
-#define __APP_PLL_212_Z0_2(_v) ((_v) << __APP_PLL_212_Z0_2_SH)
-#define __APP_PLL_212_RSEL200500 0x00000010
-#define __APP_PLL_212_ENARST 0x00000008
-#define __APP_PLL_212_BYPASS 0x00000004
-#define __APP_PLL_212_LRESETN 0x00000002
-#define __APP_PLL_212_ENABLE 0x00000001
-#define HOST_SEM0_REG 0x00014230
-#define __HOST_SEMAPHORE 0x00000001
-#define HOST_SEM1_REG 0x00014234
-#define HOST_SEM2_REG 0x00014238
-#define HOST_SEM3_REG 0x0001423c
-#define HOST_SEM0_INFO_REG 0x00014240
-#define HOST_SEM1_INFO_REG 0x00014244
-#define HOST_SEM2_INFO_REG 0x00014248
-#define HOST_SEM3_INFO_REG 0x0001424c
-#define HOSTFN0_LPU0_CMD_STAT 0x00019000
-#define __HOSTFN0_LPU0_MBOX_INFO_MK 0xfffffffe
-#define __HOSTFN0_LPU0_MBOX_INFO_SH 1
-#define __HOSTFN0_LPU0_MBOX_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX_INFO_SH)
-#define __HOSTFN0_LPU0_MBOX_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN0_CMD_STAT 0x00019008
-#define __LPU0_HOSTFN0_MBOX_INFO_MK 0xfffffffe
-#define __LPU0_HOSTFN0_MBOX_INFO_SH 1
-#define __LPU0_HOSTFN0_MBOX_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX_INFO_SH)
-#define __LPU0_HOSTFN0_MBOX_CMD_STATUS 0x00000001
-#define HOSTFN1_LPU1_CMD_STAT 0x00019014
-#define __HOSTFN1_LPU1_MBOX_INFO_MK 0xfffffffe
-#define __HOSTFN1_LPU1_MBOX_INFO_SH 1
-#define __HOSTFN1_LPU1_MBOX_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX_INFO_SH)
-#define __HOSTFN1_LPU1_MBOX_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN1_CMD_STAT 0x0001901c
-#define __LPU1_HOSTFN1_MBOX_INFO_MK 0xfffffffe
-#define __LPU1_HOSTFN1_MBOX_INFO_SH 1
-#define __LPU1_HOSTFN1_MBOX_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX_INFO_SH)
-#define __LPU1_HOSTFN1_MBOX_CMD_STATUS 0x00000001
-#define CPE_Q0_DEPTH 0x00010014
-#define CPE_Q0_PI 0x0001001c
-#define CPE_Q0_CI 0x00010020
-#define CPE_Q1_DEPTH 0x00010034
-#define CPE_Q1_PI 0x0001003c
-#define CPE_Q1_CI 0x00010040
-#define CPE_Q2_DEPTH 0x00010054
-#define CPE_Q2_PI 0x0001005c
-#define CPE_Q2_CI 0x00010060
-#define CPE_Q3_DEPTH 0x00010074
-#define CPE_Q3_PI 0x0001007c
-#define CPE_Q3_CI 0x00010080
-#define CPE_Q4_DEPTH 0x00010094
-#define CPE_Q4_PI 0x0001009c
-#define CPE_Q4_CI 0x000100a0
-#define CPE_Q5_DEPTH 0x000100b4
-#define CPE_Q5_PI 0x000100bc
-#define CPE_Q5_CI 0x000100c0
-#define CPE_Q6_DEPTH 0x000100d4
-#define CPE_Q6_PI 0x000100dc
-#define CPE_Q6_CI 0x000100e0
-#define CPE_Q7_DEPTH 0x000100f4
-#define CPE_Q7_PI 0x000100fc
-#define CPE_Q7_CI 0x00010100
-#define RME_Q0_DEPTH 0x00011014
-#define RME_Q0_PI 0x0001101c
-#define RME_Q0_CI 0x00011020
-#define RME_Q1_DEPTH 0x00011034
-#define RME_Q1_PI 0x0001103c
-#define RME_Q1_CI 0x00011040
-#define RME_Q2_DEPTH 0x00011054
-#define RME_Q2_PI 0x0001105c
-#define RME_Q2_CI 0x00011060
-#define RME_Q3_DEPTH 0x00011074
-#define RME_Q3_PI 0x0001107c
-#define RME_Q3_CI 0x00011080
-#define RME_Q4_DEPTH 0x00011094
-#define RME_Q4_PI 0x0001109c
-#define RME_Q4_CI 0x000110a0
-#define RME_Q5_DEPTH 0x000110b4
-#define RME_Q5_PI 0x000110bc
-#define RME_Q5_CI 0x000110c0
-#define RME_Q6_DEPTH 0x000110d4
-#define RME_Q6_PI 0x000110dc
-#define RME_Q6_CI 0x000110e0
-#define RME_Q7_DEPTH 0x000110f4
-#define RME_Q7_PI 0x000110fc
-#define RME_Q7_CI 0x00011100
-#define PSS_CTL_REG 0x00018800
-#define __PSS_I2C_CLK_DIV_MK 0x00030000
-#define __PSS_I2C_CLK_DIV_SH 16
-#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
-#define __PSS_LMEM_INIT_DONE 0x00001000
-#define __PSS_LMEM_RESET 0x00000200
-#define __PSS_LMEM_INIT_EN 0x00000100
-#define __PSS_LPU1_RESET 0x00000002
-#define __PSS_LPU0_RESET 0x00000001
-#define PSS_ERR_STATUS_REG 0x00018810
-#define __PSS_LMEM1_CORR_ERR 0x00000800
-#define __PSS_LMEM0_CORR_ERR 0x00000400
-#define __PSS_LMEM1_UNCORR_ERR 0x00000200
-#define __PSS_LMEM0_UNCORR_ERR 0x00000100
-#define __PSS_BAL_PERR 0x00000080
-#define __PSS_DIP_IF_ERR 0x00000040
-#define __PSS_IOH_IF_ERR 0x00000020
-#define __PSS_TDS_IF_ERR 0x00000010
-#define __PSS_RDS_IF_ERR 0x00000008
-#define __PSS_SGM_IF_ERR 0x00000004
-#define __PSS_LPU1_RAM_ERR 0x00000002
-#define __PSS_LPU0_RAM_ERR 0x00000001
-#define ERR_SET_REG 0x00018818
-#define __PSS_ERR_STATUS_SET 0x00000fff
-
-
-/*
- * These definitions are either in error/missing in spec. Its auto-generated
- * from hard coded values in regparse.pl.
- */
-#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
-#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
-#define __EMPHPRE_AT_4G_FIX 0x00000003
-#define __SFP_TXRATE_EN_FIX 0x00000100
-#define __SFP_RXRATE_EN_FIX 0x00000080
-
-
-/*
- * These register definitions are auto-generated from hard coded values
- * in regparse.pl.
- */
-#define HOSTFN0_LPU_MBOX0_0 0x00019200
-#define HOSTFN1_LPU_MBOX0_8 0x00019260
-#define LPU_HOSTFN0_MBOX0_0 0x00019280
-#define LPU_HOSTFN1_MBOX0_8 0x000192e0
-
-
-/*
- * These register mapping definitions are auto-generated from mapping tables
- * in regparse.pl.
- */
-#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
-#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
-#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
-#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
-#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
-#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
-
-#define CPE_Q_DEPTH(__n) \
- (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH))
-#define CPE_Q_PI(__n) \
- (CPE_Q0_PI + (__n) * (CPE_Q1_PI - CPE_Q0_PI))
-#define CPE_Q_CI(__n) \
- (CPE_Q0_CI + (__n) * (CPE_Q1_CI - CPE_Q0_CI))
-#define RME_Q_DEPTH(__n) \
- (RME_Q0_DEPTH + (__n) * (RME_Q1_DEPTH - RME_Q0_DEPTH))
-#define RME_Q_PI(__n) \
- (RME_Q0_PI + (__n) * (RME_Q1_PI - RME_Q0_PI))
-#define RME_Q_CI(__n) \
- (RME_Q0_CI + (__n) * (RME_Q1_CI - RME_Q0_CI))
-
-#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
-#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
-#define CPE_Q_MASK(__q) ((__q) & 0x3)
-#define RME_Q_MASK(__q) ((__q) & 0x3)
-
-
-/*
- * PCI MSI-X vector defines
- */
-enum {
- BFA_MSIX_CPE_Q0 = 0,
- BFA_MSIX_CPE_Q1 = 1,
- BFA_MSIX_CPE_Q2 = 2,
- BFA_MSIX_CPE_Q3 = 3,
- BFA_MSIX_CPE_Q4 = 4,
- BFA_MSIX_CPE_Q5 = 5,
- BFA_MSIX_CPE_Q6 = 6,
- BFA_MSIX_CPE_Q7 = 7,
- BFA_MSIX_RME_Q0 = 8,
- BFA_MSIX_RME_Q1 = 9,
- BFA_MSIX_RME_Q2 = 10,
- BFA_MSIX_RME_Q3 = 11,
- BFA_MSIX_RME_Q4 = 12,
- BFA_MSIX_RME_Q5 = 13,
- BFA_MSIX_RME_Q6 = 14,
- BFA_MSIX_RME_Q7 = 15,
- BFA_MSIX_ERR_EMC = 16,
- BFA_MSIX_ERR_LPU0 = 17,
- BFA_MSIX_ERR_LPU1 = 18,
- BFA_MSIX_ERR_PSS = 19,
- BFA_MSIX_MBOX_LPU0 = 20,
- BFA_MSIX_MBOX_LPU1 = 21,
- BFA_MSIX_CB_MAX = 22,
-};
-
-/*
- * And corresponding host interrupt status bit field defines
- */
-#define __HFN_INT_CPE_Q0 0x00000001U
-#define __HFN_INT_CPE_Q1 0x00000002U
-#define __HFN_INT_CPE_Q2 0x00000004U
-#define __HFN_INT_CPE_Q3 0x00000008U
-#define __HFN_INT_CPE_Q4 0x00000010U
-#define __HFN_INT_CPE_Q5 0x00000020U
-#define __HFN_INT_CPE_Q6 0x00000040U
-#define __HFN_INT_CPE_Q7 0x00000080U
-#define __HFN_INT_RME_Q0 0x00000100U
-#define __HFN_INT_RME_Q1 0x00000200U
-#define __HFN_INT_RME_Q2 0x00000400U
-#define __HFN_INT_RME_Q3 0x00000800U
-#define __HFN_INT_RME_Q4 0x00001000U
-#define __HFN_INT_RME_Q5 0x00002000U
-#define __HFN_INT_RME_Q6 0x00004000U
-#define __HFN_INT_RME_Q7 0x00008000U
-#define __HFN_INT_ERR_EMC 0x00010000U
-#define __HFN_INT_ERR_LPU0 0x00020000U
-#define __HFN_INT_ERR_LPU1 0x00040000U
-#define __HFN_INT_ERR_PSS 0x00080000U
-#define __HFN_INT_MBOX_LPU0 0x00100000U
-#define __HFN_INT_MBOX_LPU1 0x00200000U
-#define __HFN_INT_MBOX1_LPU0 0x00400000U
-#define __HFN_INT_MBOX1_LPU1 0x00800000U
-#define __HFN_INT_CPE_MASK 0x000000ffU
-#define __HFN_INT_RME_MASK 0x0000ff00U
-
-
-/*
- * crossbow memory map.
- */
-#define PSS_SMEM_PAGE_START 0x8000
-#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
-#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
-
-/*
- * End of crossbow memory map
- */
-
-
-#endif /* __BFI_CBREG_H__ */
-
diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h
deleted file mode 100644
index fc4ce4a5a18..00000000000
--- a/drivers/scsi/bfa/bfi_ctreg.h
+++ /dev/null
@@ -1,636 +0,0 @@
-
-/*
- * bfi_ctreg.h catapult host block register definitions
- *
- * !!! Do not edit. Auto generated. !!!
- */
-
-#ifndef __BFI_CTREG_H__
-#define __BFI_CTREG_H__
-
-
-#define HOSTFN0_LPU_MBOX0_0 0x00019200
-#define HOSTFN1_LPU_MBOX0_8 0x00019260
-#define LPU_HOSTFN0_MBOX0_0 0x00019280
-#define LPU_HOSTFN1_MBOX0_8 0x000192e0
-#define HOSTFN2_LPU_MBOX0_0 0x00019400
-#define HOSTFN3_LPU_MBOX0_8 0x00019460
-#define LPU_HOSTFN2_MBOX0_0 0x00019480
-#define LPU_HOSTFN3_MBOX0_8 0x000194e0
-#define HOSTFN0_INT_STATUS 0x00014000
-#define __HOSTFN0_HALT_OCCURRED 0x01000000
-#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
-#define __HOSTFN0_INT_STATUS_LVL_SH 20
-#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
-#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
-#define __HOSTFN0_INT_STATUS_P_SH 16
-#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
-#define __HOSTFN0_INT_STATUS_F 0x0000ffff
-#define HOSTFN0_INT_MSK 0x00014004
-#define HOST_PAGE_NUM_FN0 0x00014008
-#define __HOST_PAGE_NUM_FN 0x000001ff
-#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
-#define __MSIX_ERR_INDEX_FN 0x000001ff
-#define HOSTFN1_INT_STATUS 0x00014100
-#define __HOSTFN1_HALT_OCCURRED 0x01000000
-#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
-#define __HOSTFN1_INT_STATUS_LVL_SH 20
-#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
-#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
-#define __HOSTFN1_INT_STATUS_P_SH 16
-#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
-#define __HOSTFN1_INT_STATUS_F 0x0000ffff
-#define HOSTFN1_INT_MSK 0x00014104
-#define HOST_PAGE_NUM_FN1 0x00014108
-#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
-#define APP_PLL_425_CTL_REG 0x00014204
-#define __P_425_PLL_LOCK 0x80000000
-#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
-#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
-#define __APP_PLL_425_RESET_TIMER_SH 17
-#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
-#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
-#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
-#define __APP_PLL_425_CNTLMT0_1_SH 14
-#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
-#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
-#define __APP_PLL_425_JITLMT0_1_SH 12
-#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
-#define __APP_PLL_425_HREF 0x00000800
-#define __APP_PLL_425_HDIV 0x00000400
-#define __APP_PLL_425_P0_1_MK 0x00000300
-#define __APP_PLL_425_P0_1_SH 8
-#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
-#define __APP_PLL_425_Z0_2_MK 0x000000e0
-#define __APP_PLL_425_Z0_2_SH 5
-#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
-#define __APP_PLL_425_RSEL200500 0x00000010
-#define __APP_PLL_425_ENARST 0x00000008
-#define __APP_PLL_425_BYPASS 0x00000004
-#define __APP_PLL_425_LRESETN 0x00000002
-#define __APP_PLL_425_ENABLE 0x00000001
-#define APP_PLL_312_CTL_REG 0x00014208
-#define __P_312_PLL_LOCK 0x80000000
-#define __ENABLE_MAC_AHB_1 0x00800000
-#define __ENABLE_MAC_AHB_0 0x00400000
-#define __ENABLE_MAC_1 0x00200000
-#define __ENABLE_MAC_0 0x00100000
-#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
-#define __APP_PLL_312_RESET_TIMER_SH 17
-#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
-#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
-#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
-#define __APP_PLL_312_CNTLMT0_1_SH 14
-#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
-#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
-#define __APP_PLL_312_JITLMT0_1_SH 12
-#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
-#define __APP_PLL_312_HREF 0x00000800
-#define __APP_PLL_312_HDIV 0x00000400
-#define __APP_PLL_312_P0_1_MK 0x00000300
-#define __APP_PLL_312_P0_1_SH 8
-#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
-#define __APP_PLL_312_Z0_2_MK 0x000000e0
-#define __APP_PLL_312_Z0_2_SH 5
-#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
-#define __APP_PLL_312_RSEL200500 0x00000010
-#define __APP_PLL_312_ENARST 0x00000008
-#define __APP_PLL_312_BYPASS 0x00000004
-#define __APP_PLL_312_LRESETN 0x00000002
-#define __APP_PLL_312_ENABLE 0x00000001
-#define MBIST_CTL_REG 0x00014220
-#define __EDRAM_BISTR_START 0x00000004
-#define __MBIST_RESET 0x00000002
-#define __MBIST_START 0x00000001
-#define MBIST_STAT_REG 0x00014224
-#define __EDRAM_BISTR_STATUS 0x00000008
-#define __EDRAM_BISTR_DONE 0x00000004
-#define __MEM_BIT_STATUS 0x00000002
-#define __MBIST_DONE 0x00000001
-#define HOST_SEM0_REG 0x00014230
-#define __HOST_SEMAPHORE 0x00000001
-#define HOST_SEM1_REG 0x00014234
-#define HOST_SEM2_REG 0x00014238
-#define HOST_SEM3_REG 0x0001423c
-#define HOST_SEM0_INFO_REG 0x00014240
-#define HOST_SEM1_INFO_REG 0x00014244
-#define HOST_SEM2_INFO_REG 0x00014248
-#define HOST_SEM3_INFO_REG 0x0001424c
-#define ETH_MAC_SER_REG 0x00014288
-#define __APP_EMS_CKBUFAMPIN 0x00000020
-#define __APP_EMS_REFCLKSEL 0x00000010
-#define __APP_EMS_CMLCKSEL 0x00000008
-#define __APP_EMS_REFCKBUFEN2 0x00000004
-#define __APP_EMS_REFCKBUFEN1 0x00000002
-#define __APP_EMS_CHANNEL_SEL 0x00000001
-#define HOSTFN2_INT_STATUS 0x00014300
-#define __HOSTFN2_HALT_OCCURRED 0x01000000
-#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
-#define __HOSTFN2_INT_STATUS_LVL_SH 20
-#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
-#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
-#define __HOSTFN2_INT_STATUS_P_SH 16
-#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
-#define __HOSTFN2_INT_STATUS_F 0x0000ffff
-#define HOSTFN2_INT_MSK 0x00014304
-#define HOST_PAGE_NUM_FN2 0x00014308
-#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
-#define HOSTFN3_INT_STATUS 0x00014400
-#define __HALT_OCCURRED 0x01000000
-#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
-#define __HOSTFN3_INT_STATUS_LVL_SH 20
-#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
-#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
-#define __HOSTFN3_INT_STATUS_P_SH 16
-#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
-#define __HOSTFN3_INT_STATUS_F 0x0000ffff
-#define HOSTFN3_INT_MSK 0x00014404
-#define HOST_PAGE_NUM_FN3 0x00014408
-#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
-#define FNC_ID_REG 0x00014600
-#define __FUNCTION_NUMBER 0x00000007
-#define FNC_PERS_REG 0x00014604
-#define __F3_FUNCTION_ACTIVE 0x80000000
-#define __F3_FUNCTION_MODE 0x40000000
-#define __F3_PORT_MAP_MK 0x30000000
-#define __F3_PORT_MAP_SH 28
-#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
-#define __F3_VM_MODE 0x08000000
-#define __F3_INTX_STATUS_MK 0x07000000
-#define __F3_INTX_STATUS_SH 24
-#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
-#define __F2_FUNCTION_ACTIVE 0x00800000
-#define __F2_FUNCTION_MODE 0x00400000
-#define __F2_PORT_MAP_MK 0x00300000
-#define __F2_PORT_MAP_SH 20
-#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
-#define __F2_VM_MODE 0x00080000
-#define __F2_INTX_STATUS_MK 0x00070000
-#define __F2_INTX_STATUS_SH 16
-#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
-#define __F1_FUNCTION_ACTIVE 0x00008000
-#define __F1_FUNCTION_MODE 0x00004000
-#define __F1_PORT_MAP_MK 0x00003000
-#define __F1_PORT_MAP_SH 12
-#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
-#define __F1_VM_MODE 0x00000800
-#define __F1_INTX_STATUS_MK 0x00000700
-#define __F1_INTX_STATUS_SH 8
-#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
-#define __F0_FUNCTION_ACTIVE 0x00000080
-#define __F0_FUNCTION_MODE 0x00000040
-#define __F0_PORT_MAP_MK 0x00000030
-#define __F0_PORT_MAP_SH 4
-#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
-#define __F0_VM_MODE 0x00000008
-#define __F0_INTX_STATUS 0x00000007
-enum {
- __F0_INTX_STATUS_MSIX = 0x0,
- __F0_INTX_STATUS_INTA = 0x1,
- __F0_INTX_STATUS_INTB = 0x2,
- __F0_INTX_STATUS_INTC = 0x3,
- __F0_INTX_STATUS_INTD = 0x4,
-};
-#define OP_MODE 0x0001460c
-#define __APP_ETH_CLK_LOWSPEED 0x00000004
-#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
-#define __GLOBAL_FCOE_MODE 0x00000001
-#define HOST_SEM4_REG 0x00014610
-#define HOST_SEM5_REG 0x00014614
-#define HOST_SEM6_REG 0x00014618
-#define HOST_SEM7_REG 0x0001461c
-#define HOST_SEM4_INFO_REG 0x00014620
-#define HOST_SEM5_INFO_REG 0x00014624
-#define HOST_SEM6_INFO_REG 0x00014628
-#define HOST_SEM7_INFO_REG 0x0001462c
-#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
-#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
-#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
-#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
-#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
-#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
-#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
-#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
-#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
-#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
-#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
-#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
-#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
-#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
-#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
-#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
-#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
-#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
-#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
-#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
-#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
-#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
-#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
-#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
-#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
-#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
-#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
-#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
-#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
-#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
-#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
-#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
-#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
-#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
-#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
-#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
-#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
-#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
-#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
-#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
-#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
-#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
-#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
-#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
-#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
-#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
-#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
-#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
-#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
-#define FW_INIT_HALT_P0 0x000191ac
-#define __FW_INIT_HALT_P 0x00000001
-#define FW_INIT_HALT_P1 0x000191bc
-#define CPE_PI_PTR_Q0 0x00038000
-#define __CPE_PI_UNUSED_MK 0xffff0000
-#define __CPE_PI_UNUSED_SH 16
-#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
-#define __CPE_PI_PTR 0x0000ffff
-#define CPE_PI_PTR_Q1 0x00038040
-#define CPE_CI_PTR_Q0 0x00038004
-#define __CPE_CI_UNUSED_MK 0xffff0000
-#define __CPE_CI_UNUSED_SH 16
-#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
-#define __CPE_CI_PTR 0x0000ffff
-#define CPE_CI_PTR_Q1 0x00038044
-#define CPE_DEPTH_Q0 0x00038008
-#define __CPE_DEPTH_UNUSED_MK 0xf8000000
-#define __CPE_DEPTH_UNUSED_SH 27
-#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
-#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
-#define __CPE_MSIX_VEC_INDEX_SH 16
-#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
-#define __CPE_DEPTH 0x0000ffff
-#define CPE_DEPTH_Q1 0x00038048
-#define CPE_QCTRL_Q0 0x0003800c
-#define __CPE_CTRL_UNUSED30_MK 0xfc000000
-#define __CPE_CTRL_UNUSED30_SH 26
-#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
-#define __CPE_FUNC_INT_CTRL_MK 0x03000000
-#define __CPE_FUNC_INT_CTRL_SH 24
-#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
-enum {
- __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
- __CPE_FUNC_INT_CTRL_F2NF = 0x1,
- __CPE_FUNC_INT_CTRL_3QUART = 0x2,
- __CPE_FUNC_INT_CTRL_HALF = 0x3,
-};
-#define __CPE_CTRL_UNUSED20_MK 0x00f00000
-#define __CPE_CTRL_UNUSED20_SH 20
-#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
-#define __CPE_SCI_TH_MK 0x000f0000
-#define __CPE_SCI_TH_SH 16
-#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
-#define __CPE_CTRL_UNUSED10_MK 0x0000c000
-#define __CPE_CTRL_UNUSED10_SH 14
-#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
-#define __CPE_ACK_PENDING 0x00002000
-#define __CPE_CTRL_UNUSED40_MK 0x00001c00
-#define __CPE_CTRL_UNUSED40_SH 10
-#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
-#define __CPE_PCIEID_MK 0x00000300
-#define __CPE_PCIEID_SH 8
-#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
-#define __CPE_CTRL_UNUSED00_MK 0x000000fe
-#define __CPE_CTRL_UNUSED00_SH 1
-#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
-#define __CPE_ESIZE 0x00000001
-#define CPE_QCTRL_Q1 0x0003804c
-#define __CPE_CTRL_UNUSED31_MK 0xfc000000
-#define __CPE_CTRL_UNUSED31_SH 26
-#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
-#define __CPE_CTRL_UNUSED21_MK 0x00f00000
-#define __CPE_CTRL_UNUSED21_SH 20
-#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
-#define __CPE_CTRL_UNUSED11_MK 0x0000c000
-#define __CPE_CTRL_UNUSED11_SH 14
-#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
-#define __CPE_CTRL_UNUSED41_MK 0x00001c00
-#define __CPE_CTRL_UNUSED41_SH 10
-#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
-#define __CPE_CTRL_UNUSED01_MK 0x000000fe
-#define __CPE_CTRL_UNUSED01_SH 1
-#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
-#define RME_PI_PTR_Q0 0x00038020
-#define __LATENCY_TIME_STAMP_MK 0xffff0000
-#define __LATENCY_TIME_STAMP_SH 16
-#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
-#define __RME_PI_PTR 0x0000ffff
-#define RME_PI_PTR_Q1 0x00038060
-#define RME_CI_PTR_Q0 0x00038024
-#define __DELAY_TIME_STAMP_MK 0xffff0000
-#define __DELAY_TIME_STAMP_SH 16
-#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
-#define __RME_CI_PTR 0x0000ffff
-#define RME_CI_PTR_Q1 0x00038064
-#define RME_DEPTH_Q0 0x00038028
-#define __RME_DEPTH_UNUSED_MK 0xf8000000
-#define __RME_DEPTH_UNUSED_SH 27
-#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
-#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
-#define __RME_MSIX_VEC_INDEX_SH 16
-#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
-#define __RME_DEPTH 0x0000ffff
-#define RME_DEPTH_Q1 0x00038068
-#define RME_QCTRL_Q0 0x0003802c
-#define __RME_INT_LATENCY_TIMER_MK 0xff000000
-#define __RME_INT_LATENCY_TIMER_SH 24
-#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
-#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
-#define __RME_INT_DELAY_TIMER_SH 16
-#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
-#define __RME_INT_DELAY_DISABLE 0x00008000
-#define __RME_DLY_DELAY_DISABLE 0x00004000
-#define __RME_ACK_PENDING 0x00002000
-#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
-#define __RME_CTRL_UNUSED10_MK 0x00000c00
-#define __RME_CTRL_UNUSED10_SH 10
-#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
-#define __RME_PCIEID_MK 0x00000300
-#define __RME_PCIEID_SH 8
-#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
-#define __RME_CTRL_UNUSED00_MK 0x000000fe
-#define __RME_CTRL_UNUSED00_SH 1
-#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
-#define __RME_ESIZE 0x00000001
-#define RME_QCTRL_Q1 0x0003806c
-#define __RME_CTRL_UNUSED11_MK 0x00000c00
-#define __RME_CTRL_UNUSED11_SH 10
-#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
-#define __RME_CTRL_UNUSED01_MK 0x000000fe
-#define __RME_CTRL_UNUSED01_SH 1
-#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
-#define PSS_CTL_REG 0x00018800
-#define __PSS_I2C_CLK_DIV_MK 0x007f0000
-#define __PSS_I2C_CLK_DIV_SH 16
-#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
-#define __PSS_LMEM_INIT_DONE 0x00001000
-#define __PSS_LMEM_RESET 0x00000200
-#define __PSS_LMEM_INIT_EN 0x00000100
-#define __PSS_LPU1_RESET 0x00000002
-#define __PSS_LPU0_RESET 0x00000001
-#define PSS_ERR_STATUS_REG 0x00018810
-#define __PSS_LPU1_TCM_READ_ERR 0x00200000
-#define __PSS_LPU0_TCM_READ_ERR 0x00100000
-#define __PSS_LMEM5_CORR_ERR 0x00080000
-#define __PSS_LMEM4_CORR_ERR 0x00040000
-#define __PSS_LMEM3_CORR_ERR 0x00020000
-#define __PSS_LMEM2_CORR_ERR 0x00010000
-#define __PSS_LMEM1_CORR_ERR 0x00008000
-#define __PSS_LMEM0_CORR_ERR 0x00004000
-#define __PSS_LMEM5_UNCORR_ERR 0x00002000
-#define __PSS_LMEM4_UNCORR_ERR 0x00001000
-#define __PSS_LMEM3_UNCORR_ERR 0x00000800
-#define __PSS_LMEM2_UNCORR_ERR 0x00000400
-#define __PSS_LMEM1_UNCORR_ERR 0x00000200
-#define __PSS_LMEM0_UNCORR_ERR 0x00000100
-#define __PSS_BAL_PERR 0x00000080
-#define __PSS_DIP_IF_ERR 0x00000040
-#define __PSS_IOH_IF_ERR 0x00000020
-#define __PSS_TDS_IF_ERR 0x00000010
-#define __PSS_RDS_IF_ERR 0x00000008
-#define __PSS_SGM_IF_ERR 0x00000004
-#define __PSS_LPU1_RAM_ERR 0x00000002
-#define __PSS_LPU0_RAM_ERR 0x00000001
-#define ERR_SET_REG 0x00018818
-#define __PSS_ERR_STATUS_SET 0x003fffff
-#define PMM_1T_RESET_REG_P0 0x0002381c
-#define __PMM_1T_RESET_P 0x00000001
-#define PMM_1T_RESET_REG_P1 0x00023c1c
-#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
-#define __RXQ0_ADD_VECTORS_P 0x80000000
-#define __RXQ0_STOP_P 0x40000000
-#define __RXQ0_PRD_PTR_P 0x0000ffff
-#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
-#define __RXQ1_ADD_VECTORS_P 0x80000000
-#define __RXQ1_STOP_P 0x40000000
-#define __RXQ1_PRD_PTR_P 0x0000ffff
-#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
-#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
-#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
-#define __TXQ0_ADD_VECTORS_P 0x80000000
-#define __TXQ0_STOP_P 0x40000000
-#define __TXQ0_PRD_PTR_P 0x0000ffff
-#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
-#define __TXQ1_ADD_VECTORS_P 0x80000000
-#define __TXQ1_STOP_P 0x40000000
-#define __TXQ1_PRD_PTR_P 0x0000ffff
-#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
-#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
-#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
-#define __IB1_0_ACK_P 0x80000000
-#define __IB1_0_DISABLE_P 0x40000000
-#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000
-#define __IB1_0_COALESCING_CFG_P_SH 16
-#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH)
-#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
-#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
-#define __IB1_1_ACK_P 0x80000000
-#define __IB1_1_DISABLE_P 0x40000000
-#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000
-#define __IB1_1_COALESCING_CFG_P_SH 16
-#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH)
-#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
-#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
-#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
-#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
-#define __IB2_0_ACK_P 0x80000000
-#define __IB2_0_DISABLE_P 0x40000000
-#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000
-#define __IB2_0_COALESCING_CFG_P_SH 16
-#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH)
-#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
-#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
-#define __IB2_1_ACK_P 0x80000000
-#define __IB2_1_DISABLE_P 0x40000000
-#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000
-#define __IB2_1_COALESCING_CFG_P_SH 16
-#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH)
-#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
-#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
-#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
-
-
-/*
- * These definitions are either in error/missing in spec. Its auto-generated
- * from hard coded values in regparse.pl.
- */
-#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
-#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
-#define __EMPHPRE_AT_4G_FIX 0x00000003
-#define __SFP_TXRATE_EN_FIX 0x00000100
-#define __SFP_RXRATE_EN_FIX 0x00000080
-
-
-/*
- * These register definitions are auto-generated from hard coded values
- * in regparse.pl.
- */
-
-
-/*
- * These register mapping definitions are auto-generated from mapping tables
- * in regparse.pl.
- */
-#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
-#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
-#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
-#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
-#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
-#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
-
-#define CPE_DEPTH_Q(__n) \
- (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
-#define CPE_QCTRL_Q(__n) \
- (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
-#define CPE_PI_PTR_Q(__n) \
- (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
-#define CPE_CI_PTR_Q(__n) \
- (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
-#define RME_DEPTH_Q(__n) \
- (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
-#define RME_QCTRL_Q(__n) \
- (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
-#define RME_PI_PTR_Q(__n) \
- (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
-#define RME_CI_PTR_Q(__n) \
- (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
-#define HQM_QSET_RXQ_DRBL_P0(__n) \
- (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \
- (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
-#define HQM_QSET_TXQ_DRBL_P0(__n) \
- (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \
- (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
-#define HQM_QSET_IB_DRBL_1_P0(__n) \
- (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \
- (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
-#define HQM_QSET_IB_DRBL_2_P0(__n) \
- (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \
- (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
-#define HQM_QSET_RXQ_DRBL_P1(__n) \
- (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \
- (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
-#define HQM_QSET_TXQ_DRBL_P1(__n) \
- (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \
- (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
-#define HQM_QSET_IB_DRBL_1_P1(__n) \
- (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \
- (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
-#define HQM_QSET_IB_DRBL_2_P1(__n) \
- (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \
- (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
-
-#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
-#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
-#define CPE_Q_MASK(__q) ((__q) & 0x3)
-#define RME_Q_MASK(__q) ((__q) & 0x3)
-
-
-/*
- * PCI MSI-X vector defines
- */
-enum {
- BFA_MSIX_CPE_Q0 = 0,
- BFA_MSIX_CPE_Q1 = 1,
- BFA_MSIX_CPE_Q2 = 2,
- BFA_MSIX_CPE_Q3 = 3,
- BFA_MSIX_RME_Q0 = 4,
- BFA_MSIX_RME_Q1 = 5,
- BFA_MSIX_RME_Q2 = 6,
- BFA_MSIX_RME_Q3 = 7,
- BFA_MSIX_LPU_ERR = 8,
- BFA_MSIX_CT_MAX = 9,
-};
-
-/*
- * And corresponding host interrupt status bit field defines
- */
-#define __HFN_INT_CPE_Q0 0x00000001U
-#define __HFN_INT_CPE_Q1 0x00000002U
-#define __HFN_INT_CPE_Q2 0x00000004U
-#define __HFN_INT_CPE_Q3 0x00000008U
-#define __HFN_INT_CPE_Q4 0x00000010U
-#define __HFN_INT_CPE_Q5 0x00000020U
-#define __HFN_INT_CPE_Q6 0x00000040U
-#define __HFN_INT_CPE_Q7 0x00000080U
-#define __HFN_INT_RME_Q0 0x00000100U
-#define __HFN_INT_RME_Q1 0x00000200U
-#define __HFN_INT_RME_Q2 0x00000400U
-#define __HFN_INT_RME_Q3 0x00000800U
-#define __HFN_INT_RME_Q4 0x00001000U
-#define __HFN_INT_RME_Q5 0x00002000U
-#define __HFN_INT_RME_Q6 0x00004000U
-#define __HFN_INT_RME_Q7 0x00008000U
-#define __HFN_INT_ERR_EMC 0x00010000U
-#define __HFN_INT_ERR_LPU0 0x00020000U
-#define __HFN_INT_ERR_LPU1 0x00040000U
-#define __HFN_INT_ERR_PSS 0x00080000U
-#define __HFN_INT_MBOX_LPU0 0x00100000U
-#define __HFN_INT_MBOX_LPU1 0x00200000U
-#define __HFN_INT_MBOX1_LPU0 0x00400000U
-#define __HFN_INT_MBOX1_LPU1 0x00800000U
-#define __HFN_INT_LL_HALT 0x01000000U
-#define __HFN_INT_CPE_MASK 0x000000ffU
-#define __HFN_INT_RME_MASK 0x0000ff00U
-
-
-/*
- * catapult memory map.
- */
-#define LL_PGN_HQM0 0x0096
-#define LL_PGN_HQM1 0x0097
-#define PSS_SMEM_PAGE_START 0x8000
-#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
-#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
-
-/*
- * End of catapult memory map
- */
-
-
-#endif /* __BFI_CTREG_H__ */
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index 19e888a5755..0d9f1fb50db 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -28,11 +28,17 @@ enum bfi_iocfc_h2i_msgs {
BFI_IOCFC_H2I_CFG_REQ = 1,
BFI_IOCFC_H2I_SET_INTR_REQ = 2,
BFI_IOCFC_H2I_UPDATEQ_REQ = 3,
+ BFI_IOCFC_H2I_FAA_ENABLE_REQ = 4,
+ BFI_IOCFC_H2I_FAA_DISABLE_REQ = 5,
+ BFI_IOCFC_H2I_FAA_QUERY_REQ = 6,
};
enum bfi_iocfc_i2h_msgs {
BFI_IOCFC_I2H_CFG_REPLY = BFA_I2HM(1),
BFI_IOCFC_I2H_UPDATEQ_RSP = BFA_I2HM(3),
+ BFI_IOCFC_I2H_FAA_ENABLE_RSP = BFA_I2HM(4),
+ BFI_IOCFC_I2H_FAA_DISABLE_RSP = BFA_I2HM(5),
+ BFI_IOCFC_I2H_FAA_QUERY_RSP = BFA_I2HM(6),
};
struct bfi_iocfc_cfg_s {
@@ -40,6 +46,12 @@ struct bfi_iocfc_cfg_s {
u8 sense_buf_len; /* SCSI sense length */
u16 rsvd_1;
u32 endian_sig; /* endian signature of host */
+ u8 rsvd_2;
+ u8 single_msix_vec;
+ u8 rsvd[2];
+ __be16 num_ioim_reqs;
+ __be16 num_fwtio_reqs;
+
/*
* Request and response circular queue base addresses, size and
@@ -54,7 +66,8 @@ struct bfi_iocfc_cfg_s {
union bfi_addr_u stats_addr; /* DMA-able address for stats */
union bfi_addr_u cfgrsp_addr; /* config response dma address */
- union bfi_addr_u ioim_snsbase; /* IO sense buffer base address */
+ union bfi_addr_u ioim_snsbase[BFI_IOIM_SNSBUF_SEGS];
+ /* IO sense buf base addr segments */
struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */
};
@@ -68,11 +81,25 @@ struct bfi_iocfc_bootwwns {
u8 rsvd[7];
};
+/**
+ * Queue configuration response from firmware
+ */
+struct bfi_iocfc_qreg_s {
+ u32 cpe_q_ci_off[BFI_IOC_MAX_CQS];
+ u32 cpe_q_pi_off[BFI_IOC_MAX_CQS];
+ u32 cpe_qctl_off[BFI_IOC_MAX_CQS];
+ u32 rme_q_ci_off[BFI_IOC_MAX_CQS];
+ u32 rme_q_pi_off[BFI_IOC_MAX_CQS];
+ u32 rme_qctl_off[BFI_IOC_MAX_CQS];
+ u8 hw_qid[BFI_IOC_MAX_CQS];
+};
+
struct bfi_iocfc_cfgrsp_s {
struct bfa_iocfc_fwcfg_s fwcfg;
struct bfa_iocfc_intr_attr_s intr_attr;
struct bfi_iocfc_bootwwns bootwwns;
struct bfi_pbc_s pbc_cfg;
+ struct bfi_iocfc_qreg_s qreg;
};
/*
@@ -150,6 +177,37 @@ union bfi_iocfc_i2h_msg_u {
u32 mboxmsg[BFI_IOC_MSGSZ];
};
+/*
+ * BFI_IOCFC_H2I_FAA_ENABLE_REQ BFI_IOCFC_H2I_FAA_DISABLE_REQ message
+ */
+struct bfi_faa_en_dis_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+};
+
+/*
+ * BFI_IOCFC_H2I_FAA_QUERY_REQ message
+ */
+struct bfi_faa_query_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 faa_status; /* FAA status */
+ u8 addr_source; /* PWWN source */
+ u8 rsvd[2];
+ wwn_t faa; /* Fabric acquired PWWN */
+};
+
+/*
+ * BFI_IOCFC_I2H_FAA_ENABLE_RSP, BFI_IOCFC_I2H_FAA_DISABLE_RSP message
+ */
+struct bfi_faa_en_dis_rsp_s {
+ struct bfi_mhdr_s mh; /* common msg header */
+ u8 status; /* updateq status */
+ u8 rsvd[3];
+};
+
+/*
+ * BFI_IOCFC_I2H_FAA_QUERY_RSP message
+ */
+#define bfi_faa_query_rsp_t struct bfi_faa_query_s
enum bfi_fcport_h2i {
BFI_FCPORT_H2I_ENABLE_REQ = (1),
@@ -213,7 +271,8 @@ struct bfi_fcport_enable_req_s {
struct bfi_fcport_set_svc_params_req_s {
struct bfi_mhdr_s mh; /* msg header */
__be16 tx_bbcredit; /* Tx credits */
- u16 rsvd;
+ u8 bb_scn; /* BB_SC FC credit recovery */
+ u8 rsvd;
};
/*
@@ -293,12 +352,12 @@ struct bfi_fcxp_send_req_s {
u8 class; /* FC class used for req/rsp */
u8 rsp_timeout; /* timeout in secs, 0-no response */
u8 cts; /* continue sequence */
- u8 lp_tag; /* lport tag */
+ u8 lp_fwtag; /* lport tag */
struct fchs_s fchs; /* request FC header structure */
__be32 req_len; /* request payload length */
__be32 rsp_maxlen; /* max response length expected */
- struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */
- struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */
+ struct bfi_alen_s req_alen; /* request buffer */
+ struct bfi_alen_s rsp_alen; /* response buffer */
};
/*
@@ -328,7 +387,7 @@ struct bfi_uf_buf_post_s {
struct bfi_mhdr_s mh; /* Common msg header */
u16 buf_tag; /* buffer tag */
__be16 buf_len; /* total buffer length */
- struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */
+ struct bfi_alen_s alen; /* buffer address/len pair */
};
struct bfi_uf_frm_rcvd_s {
@@ -346,26 +405,27 @@ enum bfi_lps_h2i_msgs {
};
enum bfi_lps_i2h_msgs {
- BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1),
- BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2),
- BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3),
+ BFI_LPS_I2H_LOGIN_RSP = BFA_I2HM(1),
+ BFI_LPS_I2H_LOGOUT_RSP = BFA_I2HM(2),
+ BFI_LPS_I2H_CVL_EVENT = BFA_I2HM(3),
};
struct bfi_lps_login_req_s {
struct bfi_mhdr_s mh; /* common msg header */
- u8 lp_tag;
+ u8 bfa_tag;
u8 alpa;
__be16 pdu_size;
wwn_t pwwn;
wwn_t nwwn;
u8 fdisc;
u8 auth_en;
- u8 rsvd[2];
+ u8 lps_role;
+ u8 bb_scn;
};
struct bfi_lps_login_rsp_s {
struct bfi_mhdr_s mh; /* common msg header */
- u8 lp_tag;
+ u8 fw_tag;
u8 status;
u8 lsrjt_rsn;
u8 lsrjt_expl;
@@ -380,31 +440,33 @@ struct bfi_lps_login_rsp_s {
mac_t fcf_mac;
u8 ext_status;
u8 brcd_switch; /* attached peer is brcd switch */
+ u8 bb_scn; /* atatched port's bb_scn */
+ u8 bfa_tag;
};
struct bfi_lps_logout_req_s {
struct bfi_mhdr_s mh; /* common msg header */
- u8 lp_tag;
+ u8 fw_tag;
u8 rsvd[3];
wwn_t port_name;
};
struct bfi_lps_logout_rsp_s {
struct bfi_mhdr_s mh; /* common msg header */
- u8 lp_tag;
+ u8 bfa_tag;
u8 status;
u8 rsvd[2];
};
struct bfi_lps_cvl_event_s {
struct bfi_mhdr_s mh; /* common msg header */
- u8 lp_tag;
+ u8 bfa_tag;
u8 rsvd[3];
};
struct bfi_lps_n2n_pid_req_s {
struct bfi_mhdr_s mh; /* common msg header */
- u8 lp_tag;
+ u8 fw_tag;
u32 lp_pid:24;
};
@@ -439,7 +501,7 @@ struct bfi_rport_create_req_s {
u16 bfa_handle; /* host rport handle */
__be16 max_frmsz; /* max rcv pdu size */
u32 pid:24, /* remote port ID */
- lp_tag:8; /* local port tag */
+ lp_fwtag:8; /* local port tag */
u32 local_pid:24, /* local port ID */
cisc:8;
u8 fc_class; /* supported FC classes */
@@ -502,62 +564,63 @@ union bfi_rport_i2h_msg_u {
* Initiator mode I-T nexus interface defines.
*/
-enum bfi_itnim_h2i {
- BFI_ITNIM_H2I_CREATE_REQ = 1, /* i-t nexus creation */
- BFI_ITNIM_H2I_DELETE_REQ = 2, /* i-t nexus deletion */
+enum bfi_itn_h2i {
+ BFI_ITN_H2I_CREATE_REQ = 1, /* i-t nexus creation */
+ BFI_ITN_H2I_DELETE_REQ = 2, /* i-t nexus deletion */
};
-enum bfi_itnim_i2h {
- BFI_ITNIM_I2H_CREATE_RSP = BFA_I2HM(1),
- BFI_ITNIM_I2H_DELETE_RSP = BFA_I2HM(2),
- BFI_ITNIM_I2H_SLER_EVENT = BFA_I2HM(3),
+enum bfi_itn_i2h {
+ BFI_ITN_I2H_CREATE_RSP = BFA_I2HM(1),
+ BFI_ITN_I2H_DELETE_RSP = BFA_I2HM(2),
+ BFI_ITN_I2H_SLER_EVENT = BFA_I2HM(3),
};
-struct bfi_itnim_create_req_s {
+struct bfi_itn_create_req_s {
struct bfi_mhdr_s mh; /* common msg header */
u16 fw_handle; /* f/w handle for itnim */
u8 class; /* FC class for IO */
u8 seq_rec; /* sequence recovery support */
u8 msg_no; /* seq id of the msg */
+ u8 role;
};
-struct bfi_itnim_create_rsp_s {
+struct bfi_itn_create_rsp_s {
struct bfi_mhdr_s mh; /* common msg header */
u16 bfa_handle; /* bfa handle for itnim */
u8 status; /* fcp request status */
u8 seq_id; /* seq id of the msg */
};
-struct bfi_itnim_delete_req_s {
+struct bfi_itn_delete_req_s {
struct bfi_mhdr_s mh; /* common msg header */
u16 fw_handle; /* f/w itnim handle */
u8 seq_id; /* seq id of the msg */
u8 rsvd;
};
-struct bfi_itnim_delete_rsp_s {
+struct bfi_itn_delete_rsp_s {
struct bfi_mhdr_s mh; /* common msg header */
u16 bfa_handle; /* bfa handle for itnim */
u8 status; /* fcp request status */
u8 seq_id; /* seq id of the msg */
};
-struct bfi_itnim_sler_event_s {
+struct bfi_itn_sler_event_s {
struct bfi_mhdr_s mh; /* common msg header */
u16 bfa_handle; /* bfa handle for itnim */
u16 rsvd;
};
-union bfi_itnim_h2i_msg_u {
- struct bfi_itnim_create_req_s *create_req;
- struct bfi_itnim_delete_req_s *delete_req;
+union bfi_itn_h2i_msg_u {
+ struct bfi_itn_create_req_s *create_req;
+ struct bfi_itn_delete_req_s *delete_req;
struct bfi_msg_s *msg;
};
-union bfi_itnim_i2h_msg_u {
- struct bfi_itnim_create_rsp_s *create_rsp;
- struct bfi_itnim_delete_rsp_s *delete_rsp;
- struct bfi_itnim_sler_event_s *sler_event;
+union bfi_itn_i2h_msg_u {
+ struct bfi_itn_create_rsp_s *create_rsp;
+ struct bfi_itn_delete_rsp_s *delete_rsp;
+ struct bfi_itn_sler_event_s *sler_event;
struct bfi_msg_s *msg;
};
@@ -693,7 +756,6 @@ enum bfi_ioim_status {
BFI_IOIM_STS_PATHTOV = 8,
};
-#define BFI_IOIM_SNSLEN (256)
/*
* I/O response message
*/
@@ -772,4 +834,27 @@ struct bfi_tskim_rsp_s {
#pragma pack()
+/*
+ * Crossbow PCI MSI-X vector defines
+ */
+enum {
+ BFI_MSIX_CPE_QMIN_CB = 0,
+ BFI_MSIX_CPE_QMAX_CB = 7,
+ BFI_MSIX_RME_QMIN_CB = 8,
+ BFI_MSIX_RME_QMAX_CB = 15,
+ BFI_MSIX_CB_MAX = 22,
+};
+
+/*
+ * Catapult FC PCI MSI-X vector defines
+ */
+enum {
+ BFI_MSIX_LPU_ERR_CT = 0,
+ BFI_MSIX_CPE_QMIN_CT = 1,
+ BFI_MSIX_CPE_QMAX_CT = 4,
+ BFI_MSIX_RME_QMIN_CT = 5,
+ BFI_MSIX_RME_QMAX_CT = 8,
+ BFI_MSIX_CT_MAX = 9,
+};
+
#endif /* __BFI_MS_H__ */
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
new file mode 100644
index 00000000000..d892064b64a
--- /dev/null
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfi_reg.h ASIC register defines for all Brocade adapter ASICs
+ */
+
+#ifndef __BFI_REG_H__
+#define __BFI_REG_H__
+
+#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */
+#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */
+#define HOSTFN2_INT_STATUS 0x00014300 /* ct */
+#define HOSTFN3_INT_STATUS 0x00014400 /* ct */
+#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */
+#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */
+#define HOSTFN2_INT_MSK 0x00014304 /* ct */
+#define HOSTFN3_INT_MSK 0x00014404 /* ct */
+
+#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */
+#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */
+#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */
+#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */
+
+#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */
+#define __P_LCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000
+#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_LCLK_RESET_TIMER_SH 17
+#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH)
+#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_LCLK_CNTLMT0_1_SH 14
+#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH)
+#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_LCLK_JITLMT0_1_SH 12
+#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH)
+#define __APP_PLL_LCLK_HREF 0x00000800
+#define __APP_PLL_LCLK_HDIV 0x00000400
+#define __APP_PLL_LCLK_P0_1_MK 0x00000300
+#define __APP_PLL_LCLK_P0_1_SH 8
+#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH)
+#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0
+#define __APP_PLL_LCLK_Z0_2_SH 5
+#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH)
+#define __APP_PLL_LCLK_RSEL200500 0x00000010
+#define __APP_PLL_LCLK_ENARST 0x00000008
+#define __APP_PLL_LCLK_BYPASS 0x00000004
+#define __APP_PLL_LCLK_LRESETN 0x00000002
+#define __APP_PLL_LCLK_ENABLE 0x00000001
+#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */
+#define __P_SCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_SCLK_RESET_TIMER_SH 17
+#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH)
+#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_SCLK_CNTLMT0_1_SH 14
+#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH)
+#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_SCLK_JITLMT0_1_SH 12
+#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH)
+#define __APP_PLL_SCLK_HREF 0x00000800
+#define __APP_PLL_SCLK_HDIV 0x00000400
+#define __APP_PLL_SCLK_P0_1_MK 0x00000300
+#define __APP_PLL_SCLK_P0_1_SH 8
+#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH)
+#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0
+#define __APP_PLL_SCLK_Z0_2_SH 5
+#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH)
+#define __APP_PLL_SCLK_RSEL200500 0x00000010
+#define __APP_PLL_SCLK_ENARST 0x00000008
+#define __APP_PLL_SCLK_BYPASS 0x00000004
+#define __APP_PLL_SCLK_LRESETN 0x00000002
+#define __APP_PLL_SCLK_ENABLE 0x00000001
+#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */
+#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */
+#define __ENABLE_MAC_1 0x00200000 /* ct */
+#define __ENABLE_MAC_0 0x00100000 /* ct */
+
+#define HOST_SEM0_REG 0x00014230 /* cb/ct */
+#define HOST_SEM1_REG 0x00014234 /* cb/ct */
+#define HOST_SEM2_REG 0x00014238 /* cb/ct */
+#define HOST_SEM3_REG 0x0001423c /* cb/ct */
+#define HOST_SEM4_REG 0x00014610 /* cb/ct */
+#define HOST_SEM5_REG 0x00014614 /* cb/ct */
+#define HOST_SEM6_REG 0x00014618 /* cb/ct */
+#define HOST_SEM7_REG 0x0001461c /* cb/ct */
+#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */
+#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */
+#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */
+#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */
+#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */
+#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */
+#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */
+#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */
+
+#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */
+#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */
+#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */
+#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */
+#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */
+#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */
+#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */
+#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */
+#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */
+#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */
+#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */
+#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */
+#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */
+#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */
+#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */
+#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */
+
+#define PSS_CTL_REG 0x00018800 /* cb/ct */
+#define __PSS_I2C_CLK_DIV_MK 0x007f0000
+#define __PSS_I2C_CLK_DIV_SH 16
+#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
+#define __PSS_LMEM_INIT_DONE 0x00001000
+#define __PSS_LMEM_RESET 0x00000200
+#define __PSS_LMEM_INIT_EN 0x00000100
+#define __PSS_LPU1_RESET 0x00000002
+#define __PSS_LPU0_RESET 0x00000001
+#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */
+#define ERR_SET_REG 0x00018818 /* cb/ct */
+#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */
+#define __PSS_GPIO_OUT_REG 0x00000fff
+#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */
+#define __PSS_GPIO_OE_REG 0x000000ff
+
+#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */
+#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */
+#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */
+#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */
+#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */
+#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */
+#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */
+#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */
+
+#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */
+
+#define MBIST_CTL_REG 0x00014220 /* ct */
+#define __EDRAM_BISTR_START 0x00000004
+#define MBIST_STAT_REG 0x00014224 /* ct */
+#define ETH_MAC_SER_REG 0x00014288 /* ct */
+#define __APP_EMS_CKBUFAMPIN 0x00000020
+#define __APP_EMS_REFCLKSEL 0x00000010
+#define __APP_EMS_CMLCKSEL 0x00000008
+#define __APP_EMS_REFCKBUFEN2 0x00000004
+#define __APP_EMS_REFCKBUFEN1 0x00000002
+#define __APP_EMS_CHANNEL_SEL 0x00000001
+#define FNC_PERS_REG 0x00014604 /* ct */
+#define __F3_FUNCTION_ACTIVE 0x80000000
+#define __F3_FUNCTION_MODE 0x40000000
+#define __F3_PORT_MAP_MK 0x30000000
+#define __F3_PORT_MAP_SH 28
+#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
+#define __F3_VM_MODE 0x08000000
+#define __F3_INTX_STATUS_MK 0x07000000
+#define __F3_INTX_STATUS_SH 24
+#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
+#define __F2_FUNCTION_ACTIVE 0x00800000
+#define __F2_FUNCTION_MODE 0x00400000
+#define __F2_PORT_MAP_MK 0x00300000
+#define __F2_PORT_MAP_SH 20
+#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
+#define __F2_VM_MODE 0x00080000
+#define __F2_INTX_STATUS_MK 0x00070000
+#define __F2_INTX_STATUS_SH 16
+#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
+#define __F1_FUNCTION_ACTIVE 0x00008000
+#define __F1_FUNCTION_MODE 0x00004000
+#define __F1_PORT_MAP_MK 0x00003000
+#define __F1_PORT_MAP_SH 12
+#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
+#define __F1_VM_MODE 0x00000800
+#define __F1_INTX_STATUS_MK 0x00000700
+#define __F1_INTX_STATUS_SH 8
+#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
+#define __F0_FUNCTION_ACTIVE 0x00000080
+#define __F0_FUNCTION_MODE 0x00000040
+#define __F0_PORT_MAP_MK 0x00000030
+#define __F0_PORT_MAP_SH 4
+#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
+#define __F0_VM_MODE 0x00000008
+#define __F0_INTX_STATUS 0x00000007
+enum {
+ __F0_INTX_STATUS_MSIX = 0x0,
+ __F0_INTX_STATUS_INTA = 0x1,
+ __F0_INTX_STATUS_INTB = 0x2,
+ __F0_INTX_STATUS_INTC = 0x3,
+ __F0_INTX_STATUS_INTD = 0x4,
+};
+
+#define OP_MODE 0x0001460c /* ct */
+#define __APP_ETH_CLK_LOWSPEED 0x00000004
+#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
+#define __GLOBAL_FCOE_MODE 0x00000001
+#define FW_INIT_HALT_P0 0x000191ac /* ct */
+#define __FW_INIT_HALT_P 0x00000001
+#define FW_INIT_HALT_P1 0x000191bc /* ct */
+#define PMM_1T_RESET_REG_P0 0x0002381c /* ct */
+#define __PMM_1T_RESET_P 0x00000001
+#define PMM_1T_RESET_REG_P1 0x00023c1c /* ct */
+
+/**
+ * Catapult-2 specific defines
+ */
+#define CT2_PCI_CPQ_BASE 0x00030000
+#define CT2_PCI_APP_BASE 0x00030100
+#define CT2_PCI_ETH_BASE 0x00030400
+
+/*
+ * APP block registers
+ */
+#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00)
+#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04)
+#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08)
+#define __PME_STATUS_ 0x00200000
+#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000
+#define __PF_VF_BAR_SIZE_MODE__SH 19
+#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH)
+#define __FC_LL_PORT_MAP__MK 0x00060000
+#define __FC_LL_PORT_MAP__SH 17
+#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH)
+#define __PF_VF_ACTIVE_ 0x00010000
+#define __PF_VF_CFG_RDY_ 0x00008000
+#define __PF_VF_ENABLE_ 0x00004000
+#define __PF_DRIVER_ACTIVE_ 0x00002000
+#define __PF_PME_SEND_ENABLE_ 0x00001000
+#define __PF_EXROM_OFFSET__MK 0x00000ff0
+#define __PF_EXROM_OFFSET__SH 4
+#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH)
+#define __FC_LL_MODE_ 0x00000008
+#define __PF_INTX_PIN_ 0x00000007
+#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C)
+#define __PF_NUM_QUEUES1__MK 0xff000000
+#define __PF_NUM_QUEUES1__SH 24
+#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH)
+#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000
+#define __PF_VF_QUE_OFFSET1__SH 16
+#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH)
+#define __PF_VF_NUM_QUEUES__MK 0x0000ff00
+#define __PF_VF_NUM_QUEUES__SH 8
+#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH)
+#define __PF_VF_QUE_OFFSET_ 0x000000ff
+#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18)
+#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38)
+
+/*
+ * Catapult-2 CPQ block registers
+ */
+#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00)
+#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20)
+#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40)
+#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60)
+#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80)
+#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84)
+#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88)
+#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c)
+#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90)
+#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94)
+#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98)
+#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C)
+#define CT2_HOST_SEM0_REG 0x000148f0
+#define CT2_HOST_SEM1_REG 0x000148f4
+#define CT2_HOST_SEM2_REG 0x000148f8
+#define CT2_HOST_SEM3_REG 0x000148fc
+#define CT2_HOST_SEM4_REG 0x00014900
+#define CT2_HOST_SEM5_REG 0x00014904
+#define CT2_HOST_SEM6_REG 0x00014908
+#define CT2_HOST_SEM7_REG 0x0001490c
+#define CT2_HOST_SEM0_INFO_REG 0x000148b0
+#define CT2_HOST_SEM1_INFO_REG 0x000148b4
+#define CT2_HOST_SEM2_INFO_REG 0x000148b8
+#define CT2_HOST_SEM3_INFO_REG 0x000148bc
+#define CT2_HOST_SEM4_INFO_REG 0x000148c0
+#define CT2_HOST_SEM5_INFO_REG 0x000148c4
+#define CT2_HOST_SEM6_INFO_REG 0x000148c8
+#define CT2_HOST_SEM7_INFO_REG 0x000148cc
+
+#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808
+#define __APP_LPUCLK_HALFSPEED 0x40000000
+#define __APP_PLL_LCLK_LOAD 0x20000000
+#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000
+#define __APP_PLL_LCLK_FBCNT_SH 21
+#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+ __APP_PLL_LCLK_FBCNT_425_MHZ = 6,
+ __APP_PLL_LCLK_FBCNT_468_MHZ = 4,
+};
+#define __APP_PLL_LCLK_EXTFB 0x00000800
+#define __APP_PLL_LCLK_ENOUTS 0x00000400
+#define __APP_PLL_LCLK_RATE 0x00000010
+#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c
+#define __P_SCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000
+#define __APP_PLL_SCLK_CLK_DIV2 0x20000000
+#define __APP_PLL_SCLK_LOAD 0x10000000
+#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000
+#define __APP_PLL_SCLK_FBCNT_SH 20
+#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+ __APP_PLL_SCLK_FBCNT_NORM = 6,
+ __APP_PLL_SCLK_FBCNT_10G_FC = 10,
+};
+#define __APP_PLL_SCLK_EXTFB 0x00000800
+#define __APP_PLL_SCLK_ENOUTS 0x00000400
+#define __APP_PLL_SCLK_RATE 0x00000010
+#define CT2_PCIE_MISC_REG 0x00014804
+#define __ETH_CLK_ENABLE_PORT1 0x00000010
+#define CT2_CHIP_MISC_PRG 0x000148a4
+#define __ETH_CLK_ENABLE_PORT0 0x00004000
+#define __APP_LPU_SPEED 0x00000002
+#define CT2_MBIST_STAT_REG 0x00014818
+#define CT2_MBIST_CTL_REG 0x0001481c
+#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c
+#define __PMM_1T_PNDB_P 0x00000002
+#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c
+#define CT2_WGN_STATUS 0x00014990
+#define __WGN_READY 0x00000400
+#define __GLBL_PF_VF_CFG_RDY 0x00000200
+#define CT2_NFC_CSR_SET_REG 0x00027424
+#define __HALT_NFC_CONTROLLER 0x00000002
+#define __NFC_CONTROLLER_HALTED 0x00001000
+
+#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
+#define __CSI_MAC_RESET 0x00000010
+#define __CSI_MAC_AHB_RESET 0x00000008
+#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4
+#define CT2_CSI_MAC_CONTROL_REG(__n) \
+ (CT2_CSI_MAC0_CONTROL_REG + \
+ (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
+
+/*
+ * Name semaphore registers based on usage
+ */
+#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
+#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
+#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
+#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
+#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
+
+/*
+ * CT2 semaphore register locations changed
+ */
+#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG
+#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG
+#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG
+#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG
+#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG
+#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG
+
+#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+
+/*
+ * And corresponding host interrupt status bit field defines
+ */
+#define __HFN_INT_CPE_Q0 0x00000001U
+#define __HFN_INT_CPE_Q1 0x00000002U
+#define __HFN_INT_CPE_Q2 0x00000004U
+#define __HFN_INT_CPE_Q3 0x00000008U
+#define __HFN_INT_CPE_Q4 0x00000010U
+#define __HFN_INT_CPE_Q5 0x00000020U
+#define __HFN_INT_CPE_Q6 0x00000040U
+#define __HFN_INT_CPE_Q7 0x00000080U
+#define __HFN_INT_RME_Q0 0x00000100U
+#define __HFN_INT_RME_Q1 0x00000200U
+#define __HFN_INT_RME_Q2 0x00000400U
+#define __HFN_INT_RME_Q3 0x00000800U
+#define __HFN_INT_RME_Q4 0x00001000U
+#define __HFN_INT_RME_Q5 0x00002000U
+#define __HFN_INT_RME_Q6 0x00004000U
+#define __HFN_INT_RME_Q7 0x00008000U
+#define __HFN_INT_ERR_EMC 0x00010000U
+#define __HFN_INT_ERR_LPU0 0x00020000U
+#define __HFN_INT_ERR_LPU1 0x00040000U
+#define __HFN_INT_ERR_PSS 0x00080000U
+#define __HFN_INT_MBOX_LPU0 0x00100000U
+#define __HFN_INT_MBOX_LPU1 0x00200000U
+#define __HFN_INT_MBOX1_LPU0 0x00400000U
+#define __HFN_INT_MBOX1_LPU1 0x00800000U
+#define __HFN_INT_LL_HALT 0x01000000U
+#define __HFN_INT_CPE_MASK 0x000000ffU
+#define __HFN_INT_RME_MASK 0x0000ff00U
+#define __HFN_INT_ERR_MASK \
+ (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \
+ __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)
+#define __HFN_INT_FN0_MASK \
+ (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+ __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0)
+#define __HFN_INT_FN1_MASK \
+ (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+ __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1)
+
+/*
+ * Host interrupt status defines for catapult-2
+ */
+#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U
+#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U
+#define __HFN_INT_ERR_PSS_CT2 0x00040000U
+#define __HFN_INT_ERR_LPU0_CT2 0x00080000U
+#define __HFN_INT_ERR_LPU1_CT2 0x00100000U
+#define __HFN_INT_CPQ_HALT_CT2 0x00200000U
+#define __HFN_INT_ERR_WGN_CT2 0x00400000U
+#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U
+#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U
+#define __HFN_INT_ERR_MASK_CT2 \
+ (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \
+ __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \
+ __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \
+ __HFN_INT_ERR_LEHTX_CT2)
+#define __HFN_INT_FN0_MASK_CT2 \
+ (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+ __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2)
+#define __HFN_INT_FN1_MASK_CT2 \
+ (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+ __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2)
+
+/*
+ * asic memory map.
+ */
+#define PSS_SMEM_PAGE_START 0x8000
+#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
+#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
+
+#endif /* __BFI_REG_H__ */
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
index 97a61b4d81b..e1f1e3448f9 100644
--- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -19,6 +19,23 @@ struct b577xx_doorbell_hdr {
/*
* doorbell message sent to the chip
*/
+struct b577xx_doorbell {
+#if defined(__BIG_ENDIAN)
+ u16 zero_fill2;
+ u8 zero_fill1;
+ struct b577xx_doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct b577xx_doorbell_hdr header;
+ u8 zero_fill1;
+ u16 zero_fill2;
+#endif
+};
+
+
+
+/*
+ * doorbell message sent to the chip
+ */
struct b577xx_doorbell_set_prod {
#if defined(__BIG_ENDIAN)
u16 prod;
@@ -39,106 +56,63 @@ struct regpair {
/*
- * Fixed size structure in order to plant it in Union structure
+ * ABTS info $$KEEP_ENDIANNESS$$
*/
-struct fcoe_abts_rsp_union {
- u32 r_ctl;
- u32 abts_rsp_payload[7];
+struct fcoe_abts_info {
+ __le16 aborted_task_id;
+ __le16 reserved0;
+ __le32 reserved1;
};
/*
- * 4 regs size
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
*/
-struct fcoe_bd_ctx {
- u32 buf_addr_hi;
- u32 buf_addr_lo;
-#if defined(__BIG_ENDIAN)
- u16 rsrv0;
- u16 buf_len;
-#elif defined(__LITTLE_ENDIAN)
- u16 buf_len;
- u16 rsrv0;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 rsrv1;
- u16 flags;
-#elif defined(__LITTLE_ENDIAN)
- u16 flags;
- u16 rsrv1;
-#endif
+struct fcoe_abts_rsp_union {
+ u8 r_ctl;
+ u8 rsrv[3];
+ __le32 abts_rsp_payload[7];
};
-struct fcoe_cleanup_flow_info {
-#if defined(__BIG_ENDIAN)
- u16 reserved1;
- u16 task_id;
-#elif defined(__LITTLE_ENDIAN)
- u16 task_id;
- u16 reserved1;
-#endif
- u32 reserved2[7];
+/*
+ * 4 regs size $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_bd_ctx {
+ __le32 buf_addr_hi;
+ __le32 buf_addr_lo;
+ __le16 buf_len;
+ __le16 rsrv0;
+ __le16 flags;
+ __le16 rsrv1;
};
-struct fcoe_fcp_cmd_payload {
- u32 opaque[8];
-};
-
-struct fcoe_fc_hdr {
-#if defined(__BIG_ENDIAN)
- u8 cs_ctl;
- u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
- u8 s_id[3];
- u8 cs_ctl;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 r_ctl;
- u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
- u8 d_id[3];
- u8 r_ctl;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 seq_id;
- u8 df_ctl;
- u16 seq_cnt;
-#elif defined(__LITTLE_ENDIAN)
- u16 seq_cnt;
- u8 df_ctl;
- u8 seq_id;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 type;
- u8 f_ctl[3];
-#elif defined(__LITTLE_ENDIAN)
- u8 f_ctl[3];
- u8 type;
-#endif
- u32 parameters;
-#if defined(__BIG_ENDIAN)
- u16 ox_id;
- u16 rx_id;
-#elif defined(__LITTLE_ENDIAN)
- u16 rx_id;
- u16 ox_id;
-#endif
+/*
+ * FCoE cached sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cached_sge_ctx {
+ struct regpair cur_buf_addr;
+ __le16 cur_buf_rem;
+ __le16 second_buf_rem;
+ struct regpair second_buf_addr;
};
-struct fcoe_fc_frame {
- struct fcoe_fc_hdr fc_hdr;
- u32 reserved0[2];
-};
-union fcoe_cmd_flow_info {
- struct fcoe_fcp_cmd_payload fcp_cmd_payload;
- struct fcoe_fc_frame mp_fc_frame;
+/*
+ * Cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cleanup_info {
+ __le16 cleaned_task_id;
+ __le16 rolled_tx_seq_cnt;
+ __le32 rolled_tx_data_offset;
};
-
+/*
+ * Fcp RSP flags $$KEEP_ENDIANNESS$$
+ */
struct fcoe_fcp_rsp_flags {
u8 flags;
#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
@@ -155,95 +129,168 @@ struct fcoe_fcp_rsp_flags {
#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
};
-
+/*
+ * Fcp RSP payload $$KEEP_ENDIANNESS$$
+ */
struct fcoe_fcp_rsp_payload {
struct regpair reserved0;
- u32 fcp_resid;
-#if defined(__BIG_ENDIAN)
- u16 retry_delay_timer;
- struct fcoe_fcp_rsp_flags fcp_flags;
- u8 scsi_status_code;
-#elif defined(__LITTLE_ENDIAN)
+ __le32 fcp_resid;
u8 scsi_status_code;
struct fcoe_fcp_rsp_flags fcp_flags;
- u16 retry_delay_timer;
-#endif
- u32 fcp_rsp_len;
- u32 fcp_sns_len;
+ __le16 retry_delay_timer;
+ __le32 fcp_rsp_len;
+ __le32 fcp_sns_len;
};
-
/*
* Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
*/
struct fcoe_fcp_rsp_union {
struct fcoe_fcp_rsp_payload payload;
struct regpair reserved0;
};
+/*
+ * FC header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_hdr {
+ u8 s_id[3];
+ u8 cs_ctl;
+ u8 d_id[3];
+ u8 r_ctl;
+ __le16 seq_cnt;
+ u8 df_ctl;
+ u8 seq_id;
+ u8 f_ctl[3];
+ u8 type;
+ __le32 parameters;
+ __le16 rx_id;
+ __le16 ox_id;
+};
-struct fcoe_fcp_xfr_rdy_payload {
- u32 burst_len;
- u32 data_ro;
+/*
+ * FC header union $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mp_rsp_union {
+ struct fcoe_fc_hdr fc_hdr;
+ __le32 mp_payload_len;
+ __le32 rsrv;
};
-struct fcoe_read_flow_info {
- struct fcoe_fc_hdr fc_data_in_hdr;
- u32 reserved[2];
+/*
+ * Completion information $$KEEP_ENDIANNESS$$
+ */
+union fcoe_comp_flow_info {
+ struct fcoe_fcp_rsp_union fcp_rsp;
+ struct fcoe_abts_rsp_union abts_rsp;
+ struct fcoe_mp_rsp_union mp_rsp;
+ __le32 opaque[8];
};
-struct fcoe_write_flow_info {
- struct fcoe_fc_hdr fc_data_out_hdr;
- struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
+
+/*
+ * External ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_abts_info {
+ __le32 rsrv0[6];
+ struct fcoe_abts_info ctx;
};
-union fcoe_rsp_flow_info {
- struct fcoe_fcp_rsp_union fcp_rsp;
- struct fcoe_abts_rsp_union abts_rsp;
+
+/*
+ * External cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_cleanup_info {
+ __le32 rsrv0[6];
+ struct fcoe_cleanup_info ctx;
};
+
/*
- * 32 bytes used for general purposes
+ * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$
*/
-union fcoe_general_task_ctx {
- union fcoe_cmd_flow_info cmd_info;
- struct fcoe_read_flow_info read_info;
- struct fcoe_write_flow_info write_info;
- union fcoe_rsp_flow_info rsp_info;
- struct fcoe_cleanup_flow_info cleanup_info;
- u32 comp_info[8];
+struct fcoe_fw_tx_seq_ctx {
+ __le32 data_offset;
+ __le16 seq_cnt;
+ __le16 rsrv0;
+};
+
+/*
+ * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_fw_tx_seq_ctx {
+ __le32 rsrv0[6];
+ struct fcoe_fw_tx_seq_ctx ctx;
+};
+
+
+/*
+ * FCoE multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mul_sges_ctx {
+ struct regpair cur_sge_addr;
+ __le16 cur_sge_off;
+ u8 cur_sge_idx;
+ u8 sgl_size;
+};
+
+/*
+ * FCoE external multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_mul_sges_ctx {
+ struct fcoe_mul_sges_ctx mul_sgl;
+ struct regpair rsrv0;
};
/*
- * FCoE KCQ CQE parameters
+ * FCP CMD payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_cmd_payload {
+ __le32 opaque[8];
+};
+
+
+
+
+
+/*
+ * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_xfr_rdy_payload {
+ __le32 burst_len;
+ __le32 data_ro;
+};
+
+
+/*
+ * FC frame $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_frame {
+ struct fcoe_fc_hdr fc_hdr;
+ __le32 reserved0[2];
+};
+
+
+
+
+/*
+ * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$
*/
union fcoe_kcqe_params {
- u32 reserved0[4];
+ __le32 reserved0[4];
};
/*
- * FCoE KCQ CQE
+ * FCoE KCQ CQE $$KEEP_ENDIANNESS$$
*/
struct fcoe_kcqe {
- u32 fcoe_conn_id;
- u32 completion_status;
- u32 fcoe_conn_context_id;
+ __le32 fcoe_conn_id;
+ __le32 completion_status;
+ __le32 fcoe_conn_context_id;
union fcoe_kcqe_params params;
-#if defined(__BIG_ENDIAN)
- u8 flags;
-#define FCOE_KCQE_RESERVED0 (0x7<<0)
-#define FCOE_KCQE_RESERVED0_SHIFT 0
-#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
-#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
-#define FCOE_KCQE_LAYER_CODE (0x7<<4)
-#define FCOE_KCQE_LAYER_CODE_SHIFT 4
-#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
-#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
- u8 op_code;
- u16 qe_self_seq;
-#elif defined(__LITTLE_ENDIAN)
- u16 qe_self_seq;
+ __le16 qe_self_seq;
u8 op_code;
u8 flags;
#define FCOE_KCQE_RESERVED0 (0x7<<0)
@@ -254,23 +301,14 @@ struct fcoe_kcqe {
#define FCOE_KCQE_LAYER_CODE_SHIFT 4
#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
-#endif
};
+
+
/*
- * FCoE KWQE header
+ * FCoE KWQE header $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_header {
-#if defined(__BIG_ENDIAN)
- u8 flags;
-#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
-#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
-#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
-#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
-#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
- u8 op_code;
-#elif defined(__LITTLE_ENDIAN)
u8 op_code;
u8 flags;
#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
@@ -279,50 +317,23 @@ struct fcoe_kwqe_header {
#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
-#endif
};
/*
- * FCoE firmware init request 1
+ * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_init1 {
-#if defined(__BIG_ENDIAN)
+ __le16 num_tasks;
struct fcoe_kwqe_header hdr;
- u16 num_tasks;
-#elif defined(__LITTLE_ENDIAN)
- u16 num_tasks;
- struct fcoe_kwqe_header hdr;
-#endif
- u32 task_list_pbl_addr_lo;
- u32 task_list_pbl_addr_hi;
- u32 dummy_buffer_addr_lo;
- u32 dummy_buffer_addr_hi;
-#if defined(__BIG_ENDIAN)
- u16 rq_num_wqes;
- u16 sq_num_wqes;
-#elif defined(__LITTLE_ENDIAN)
- u16 sq_num_wqes;
- u16 rq_num_wqes;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 cq_num_wqes;
- u16 rq_buffer_log_size;
-#elif defined(__LITTLE_ENDIAN)
- u16 rq_buffer_log_size;
- u16 cq_num_wqes;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 flags;
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
-#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
-#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
-#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
-#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
- u8 num_sessions_log;
- u16 mtu;
-#elif defined(__LITTLE_ENDIAN)
- u16 mtu;
+ __le32 task_list_pbl_addr_lo;
+ __le32 task_list_pbl_addr_hi;
+ __le32 dummy_buffer_addr_lo;
+ __le32 dummy_buffer_addr_hi;
+ __le16 sq_num_wqes;
+ __le16 rq_num_wqes;
+ __le16 rq_buffer_log_size;
+ __le16 cq_num_wqes;
+ __le16 mtu;
u8 num_sessions_log;
u8 flags;
#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
@@ -331,113 +342,73 @@ struct fcoe_kwqe_init1 {
#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
-#endif
};
/*
- * FCoE firmware init request 2
+ * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_init2 {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
+ u8 hsi_major_version;
+ u8 hsi_minor_version;
struct fcoe_kwqe_header hdr;
-#endif
- u32 hash_tbl_pbl_addr_lo;
- u32 hash_tbl_pbl_addr_hi;
- u32 t2_hash_tbl_addr_lo;
- u32 t2_hash_tbl_addr_hi;
- u32 t2_ptr_hash_tbl_addr_lo;
- u32 t2_ptr_hash_tbl_addr_hi;
- u32 free_list_count;
+ __le32 hash_tbl_pbl_addr_lo;
+ __le32 hash_tbl_pbl_addr_hi;
+ __le32 t2_hash_tbl_addr_lo;
+ __le32 t2_hash_tbl_addr_hi;
+ __le32 t2_ptr_hash_tbl_addr_lo;
+ __le32 t2_ptr_hash_tbl_addr_hi;
+ __le32 free_list_count;
};
/*
- * FCoE firmware init request 3
+ * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_init3 {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
+ __le16 reserved0;
struct fcoe_kwqe_header hdr;
-#endif
- u32 error_bit_map_lo;
- u32 error_bit_map_hi;
-#if defined(__BIG_ENDIAN)
- u8 reserved21[3];
- u8 cached_session_enable;
-#elif defined(__LITTLE_ENDIAN)
- u8 cached_session_enable;
+ __le32 error_bit_map_lo;
+ __le32 error_bit_map_hi;
+ u8 perf_config;
u8 reserved21[3];
-#endif
- u32 reserved2[4];
+ __le32 reserved2[4];
};
/*
- * FCoE connection offload request 1
+ * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_conn_offload1 {
-#if defined(__BIG_ENDIAN)
+ __le16 fcoe_conn_id;
struct fcoe_kwqe_header hdr;
- u16 fcoe_conn_id;
-#elif defined(__LITTLE_ENDIAN)
- u16 fcoe_conn_id;
- struct fcoe_kwqe_header hdr;
-#endif
- u32 sq_addr_lo;
- u32 sq_addr_hi;
- u32 rq_pbl_addr_lo;
- u32 rq_pbl_addr_hi;
- u32 rq_first_pbe_addr_lo;
- u32 rq_first_pbe_addr_hi;
-#if defined(__BIG_ENDIAN)
- u16 reserved0;
- u16 rq_prod;
-#elif defined(__LITTLE_ENDIAN)
- u16 rq_prod;
- u16 reserved0;
-#endif
+ __le32 sq_addr_lo;
+ __le32 sq_addr_hi;
+ __le32 rq_pbl_addr_lo;
+ __le32 rq_pbl_addr_hi;
+ __le32 rq_first_pbe_addr_lo;
+ __le32 rq_first_pbe_addr_hi;
+ __le16 rq_prod;
+ __le16 reserved0;
};
/*
- * FCoE connection offload request 2
+ * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_conn_offload2 {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 tx_max_fc_pay_len;
-#elif defined(__LITTLE_ENDIAN)
- u16 tx_max_fc_pay_len;
+ __le16 tx_max_fc_pay_len;
struct fcoe_kwqe_header hdr;
-#endif
- u32 cq_addr_lo;
- u32 cq_addr_hi;
- u32 xferq_addr_lo;
- u32 xferq_addr_hi;
- u32 conn_db_addr_lo;
- u32 conn_db_addr_hi;
- u32 reserved1;
+ __le32 cq_addr_lo;
+ __le32 cq_addr_hi;
+ __le32 xferq_addr_lo;
+ __le32 xferq_addr_hi;
+ __le32 conn_db_addr_lo;
+ __le32 conn_db_addr_hi;
+ __le32 reserved1;
};
/*
- * FCoE connection offload request 3
+ * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_conn_offload3 {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 vlan_tag;
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
-#elif defined(__LITTLE_ENDIAN)
- u16 vlan_tag;
+ __le16 vlan_tag;
#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
@@ -445,34 +416,8 @@ struct fcoe_kwqe_conn_offload3 {
#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
struct fcoe_kwqe_header hdr;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 tx_max_conc_seqs_c3;
- u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
u8 s_id[3];
u8 tx_max_conc_seqs_c3;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 flags;
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
-#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
- u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
u8 d_id[3];
u8 flags;
#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
@@ -491,79 +436,44 @@ struct fcoe_kwqe_conn_offload3 {
#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
-#endif
- u32 reserved;
- u32 confq_first_pbe_addr_lo;
- u32 confq_first_pbe_addr_hi;
-#if defined(__BIG_ENDIAN)
- u16 rx_max_fc_pay_len;
- u16 tx_total_conc_seqs;
-#elif defined(__LITTLE_ENDIAN)
- u16 tx_total_conc_seqs;
- u16 rx_max_fc_pay_len;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 rx_open_seqs_exch_c3;
- u8 rx_max_conc_seqs_c3;
- u16 rx_total_conc_seqs;
-#elif defined(__LITTLE_ENDIAN)
- u16 rx_total_conc_seqs;
+ __le32 reserved;
+ __le32 confq_first_pbe_addr_lo;
+ __le32 confq_first_pbe_addr_hi;
+ __le16 tx_total_conc_seqs;
+ __le16 rx_max_fc_pay_len;
+ __le16 rx_total_conc_seqs;
u8 rx_max_conc_seqs_c3;
u8 rx_open_seqs_exch_c3;
-#endif
};
/*
- * FCoE connection offload request 4
+ * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_conn_offload4 {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u8 reserved2;
- u8 e_d_tov_timer_val;
-#elif defined(__LITTLE_ENDIAN)
u8 e_d_tov_timer_val;
u8 reserved2;
struct fcoe_kwqe_header hdr;
-#endif
- u8 src_mac_addr_lo32[4];
-#if defined(__BIG_ENDIAN)
- u8 dst_mac_addr_hi16[2];
- u8 src_mac_addr_hi16[2];
-#elif defined(__LITTLE_ENDIAN)
- u8 src_mac_addr_hi16[2];
- u8 dst_mac_addr_hi16[2];
-#endif
- u8 dst_mac_addr_lo32[4];
- u32 lcq_addr_lo;
- u32 lcq_addr_hi;
- u32 confq_pbl_base_addr_lo;
- u32 confq_pbl_base_addr_hi;
+ u8 src_mac_addr_lo[2];
+ u8 src_mac_addr_mid[2];
+ u8 src_mac_addr_hi[2];
+ u8 dst_mac_addr_hi[2];
+ u8 dst_mac_addr_lo[2];
+ u8 dst_mac_addr_mid[2];
+ __le32 lcq_addr_lo;
+ __le32 lcq_addr_hi;
+ __le32 confq_pbl_base_addr_lo;
+ __le32 confq_pbl_base_addr_hi;
};
/*
- * FCoE connection enable request
+ * FCoE connection enable request $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_conn_enable_disable {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
+ __le16 reserved0;
struct fcoe_kwqe_header hdr;
-#endif
- u8 src_mac_addr_lo32[4];
-#if defined(__BIG_ENDIAN)
- u16 vlan_tag;
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
-#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
- u8 src_mac_addr_hi16[2];
-#elif defined(__LITTLE_ENDIAN)
- u8 src_mac_addr_hi16[2];
+ u8 src_mac_addr_lo[2];
+ u8 src_mac_addr_mid[2];
+ u8 src_mac_addr_hi[2];
u16 vlan_tag;
#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
@@ -571,82 +481,52 @@ struct fcoe_kwqe_conn_enable_disable {
#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
-#endif
- u8 dst_mac_addr_lo32[4];
-#if defined(__BIG_ENDIAN)
- u16 reserved1;
- u8 dst_mac_addr_hi16[2];
-#elif defined(__LITTLE_ENDIAN)
- u8 dst_mac_addr_hi16[2];
- u16 reserved1;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 vlan_flag;
- u8 s_id[3];
-#elif defined(__LITTLE_ENDIAN)
+ u8 dst_mac_addr_lo[2];
+ u8 dst_mac_addr_mid[2];
+ u8 dst_mac_addr_hi[2];
+ __le16 reserved1;
u8 s_id[3];
u8 vlan_flag;
-#endif
-#if defined(__BIG_ENDIAN)
- u8 reserved3;
- u8 d_id[3];
-#elif defined(__LITTLE_ENDIAN)
u8 d_id[3];
u8 reserved3;
-#endif
- u32 context_id;
- u32 conn_id;
- u32 reserved4;
+ __le32 context_id;
+ __le32 conn_id;
+ __le32 reserved4;
};
/*
- * FCoE connection destroy request
+ * FCoE connection destroy request $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_conn_destroy {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
+ __le16 reserved0;
struct fcoe_kwqe_header hdr;
-#endif
- u32 context_id;
- u32 conn_id;
- u32 reserved1[5];
+ __le32 context_id;
+ __le32 conn_id;
+ __le32 reserved1[5];
};
/*
- * FCoe destroy request
+ * FCoe destroy request $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_destroy {
-#if defined(__BIG_ENDIAN)
- struct fcoe_kwqe_header hdr;
- u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
+ __le16 reserved0;
struct fcoe_kwqe_header hdr;
-#endif
- u32 reserved1[7];
+ __le32 reserved1[7];
};
/*
- * FCoe statistics request
+ * FCoe statistics request $$KEEP_ENDIANNESS$$
*/
struct fcoe_kwqe_stat {
-#if defined(__BIG_ENDIAN)
+ __le16 reserved0;
struct fcoe_kwqe_header hdr;
- u16 reserved0;
-#elif defined(__LITTLE_ENDIAN)
- u16 reserved0;
- struct fcoe_kwqe_header hdr;
-#endif
- u32 stat_params_addr_lo;
- u32 stat_params_addr_hi;
- u32 reserved1[5];
+ __le32 stat_params_addr_lo;
+ __le32 stat_params_addr_hi;
+ __le32 reserved1[5];
};
/*
- * FCoE KWQ WQE
+ * FCoE KWQ WQE $$KEEP_ENDIANNESS$$
*/
union fcoe_kwqe {
struct fcoe_kwqe_init1 init1;
@@ -662,19 +542,42 @@ union fcoe_kwqe {
struct fcoe_kwqe_stat statistics;
};
-struct fcoe_mul_sges_ctx {
- struct regpair cur_sge_addr;
-#if defined(__BIG_ENDIAN)
- u8 sgl_size;
- u8 cur_sge_idx;
- u16 cur_sge_off;
-#elif defined(__LITTLE_ENDIAN)
- u16 cur_sge_off;
- u8 cur_sge_idx;
- u8 sgl_size;
-#endif
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * TX SGL context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_sgl_union_ctx {
+ struct fcoe_cached_sge_ctx cached_sge;
+ struct fcoe_ext_mul_sges_ctx sgl;
+ __le32 opaque[5];
};
+/*
+ * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_read_flow_info {
+ union fcoe_sgl_union_ctx sgl_ctx;
+ __le32 rsrv0[3];
+};
+
+
+/*
+ * Fcoe stat context $$KEEP_ENDIANNESS$$
+ */
struct fcoe_s_stat_ctx {
u8 flags;
#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
@@ -693,51 +596,34 @@ struct fcoe_s_stat_ctx {
#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
};
-struct fcoe_seq_ctx {
-#if defined(__BIG_ENDIAN)
- u16 low_seq_cnt;
- struct fcoe_s_stat_ctx s_stat;
- u8 seq_id;
-#elif defined(__LITTLE_ENDIAN)
+/*
+ * Fcoe rx seq context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_seq_ctx {
u8 seq_id;
struct fcoe_s_stat_ctx s_stat;
- u16 low_seq_cnt;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 err_seq_cnt;
- u16 high_seq_cnt;
-#elif defined(__LITTLE_ENDIAN)
- u16 high_seq_cnt;
- u16 err_seq_cnt;
-#endif
- u32 low_exp_ro;
- u32 high_exp_ro;
+ __le16 seq_cnt;
+ __le32 low_exp_ro;
+ __le32 high_exp_ro;
};
-struct fcoe_single_sge_ctx {
- struct regpair cur_buf_addr;
-#if defined(__BIG_ENDIAN)
- u16 reserved0;
- u16 cur_buf_rem;
-#elif defined(__LITTLE_ENDIAN)
- u16 cur_buf_rem;
- u16 reserved0;
-#endif
-};
-
-union fcoe_sgl_ctx {
- struct fcoe_single_sge_ctx single_sge;
- struct fcoe_mul_sges_ctx mul_sges;
+/*
+ * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_rx_wr_union_ctx {
+ struct fcoe_read_flow_info read_info;
+ union fcoe_comp_flow_info comp_info;
+ __le32 opaque[8];
};
/*
- * FCoE SQ element
+ * FCoE SQ element $$KEEP_ENDIANNESS$$
*/
struct fcoe_sqe {
- u16 wqe;
+ __le16 wqe;
#define FCOE_SQE_TASK_ID (0x7FFF<<0)
#define FCOE_SQE_TASK_ID_SHIFT 0
#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
@@ -746,135 +632,141 @@ struct fcoe_sqe {
-struct fcoe_task_ctx_entry_tx_only {
- union fcoe_sgl_ctx sgl_ctx;
+/*
+ * 14 regs $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_only {
+ union fcoe_sgl_union_ctx sgl_ctx;
+ __le32 rsrv0;
};
-struct fcoe_task_ctx_entry_txwr_rxrd {
-#if defined(__BIG_ENDIAN)
- u16 verify_tx_seq;
+/*
+ * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$
+ */
+union fcoe_tx_wr_rx_rd_union_ctx {
+ struct fcoe_fc_frame tx_frame;
+ struct fcoe_fcp_cmd_payload fcp_cmd;
+ struct fcoe_ext_cleanup_info cleanup;
+ struct fcoe_ext_abts_info abts;
+ struct fcoe_ext_fw_tx_seq_ctx tx_seq;
+ __le32 opaque[8];
+};
+
+/*
+ * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd_const {
u8 init_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
- u8 tx_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
-#elif defined(__LITTLE_ENDIAN)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7
u8 tx_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
- u8 init_flags;
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
-#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
- u16 verify_tx_seq;
-#endif
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7
+ __le16 rsrv3;
+ __le32 verify_tx_seq;
};
/*
- * Common section. Both TX and RX processing might write and read from it in
- * different flows
+ * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$
*/
-struct fcoe_task_ctx_entry_tx_rx_cmn {
- u32 data_2_trns;
- union fcoe_general_task_ctx general;
-#if defined(__BIG_ENDIAN)
- u16 tx_low_seq_cnt;
- struct fcoe_s_stat_ctx tx_s_stat;
- u8 tx_seq_id;
-#elif defined(__LITTLE_ENDIAN)
- u8 tx_seq_id;
- struct fcoe_s_stat_ctx tx_s_stat;
- u16 tx_low_seq_cnt;
-#endif
- u32 common_flags;
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
-#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
-};
-
-struct fcoe_task_ctx_entry_rxwr_txrd {
-#if defined(__BIG_ENDIAN)
- u16 rx_id;
- u16 rx_flags;
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
-#elif defined(__LITTLE_ENDIAN)
- u16 rx_flags;
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
-#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
- u16 rx_id;
-#endif
+struct fcoe_tce_tx_wr_rx_rd {
+ union fcoe_tx_wr_rx_rd_union_ctx union_ctx;
+ struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
};
-struct fcoe_task_ctx_entry_rx_only {
- struct fcoe_seq_ctx seq_ctx;
- struct fcoe_seq_ctx ooo_seq_ctx;
- u32 rsrv3;
- union fcoe_sgl_ctx sgl_ctx;
+/*
+ * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_const {
+ __le32 data_2_trns;
+ __le32 init_flags;
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24
+};
+
+/*
+ * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_var {
+ __le16 rx_flags;
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15
+ __le16 rx_id;
+ struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy;
+};
+
+/*
+ * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd {
+ struct fcoe_tce_rx_wr_tx_rd_const const_ctx;
+ struct fcoe_tce_rx_wr_tx_rd_var var_ctx;
+};
+
+/*
+ * tce_rx_only $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_only {
+ struct fcoe_rx_seq_ctx rx_seq_ctx;
+ union fcoe_rx_wr_union_ctx union_ctx;
};
+/*
+ * task_ctx_entry $$KEEP_ENDIANNESS$$
+ */
struct fcoe_task_ctx_entry {
- struct fcoe_task_ctx_entry_tx_only tx_wr_only;
- struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
- struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
- struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
- struct fcoe_task_ctx_entry_rx_only rx_wr_only;
- u32 reserved[4];
+ struct fcoe_tce_tx_only txwr_only;
+ struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+ struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+ struct fcoe_tce_rx_only rxwr_only;
};
+
+
+
+
+
+
+
+
/*
- * FCoE XFRQ element
+ * FCoE XFRQ element $$KEEP_ENDIANNESS$$
*/
struct fcoe_xfrqe {
- u16 wqe;
+ __le16 wqe;
#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
#define FCOE_XFRQE_TASK_ID_SHIFT 0
#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
@@ -883,22 +775,31 @@ struct fcoe_xfrqe {
/*
- * FCoE CONFQ element
+ * fcoe rx doorbell message sent to the chip $$KEEP_ENDIANNESS$$
+ */
+struct b577xx_fcoe_rx_doorbell {
+ struct b577xx_doorbell_hdr hdr;
+ u8 params;
+#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM (0x1F<<0)
+#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT 0
+#define B577XX_FCOE_RX_DOORBELL_OPCODE (0x7<<5)
+#define B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT 5
+ __le16 doorbell_cq_cons;
+};
+
+
+/*
+ * FCoE CONFQ element $$KEEP_ENDIANNESS$$
*/
struct fcoe_confqe {
-#if defined(__BIG_ENDIAN)
- u16 rx_id;
- u16 ox_id;
-#elif defined(__LITTLE_ENDIAN)
- u16 ox_id;
- u16 rx_id;
-#endif
- u32 param;
+ __le16 ox_id;
+ __le16 rx_id;
+ __le32 param;
};
/*
- * FCoE connection data base
+ * FCoE conection data base
*/
struct fcoe_conn_db {
#if defined(__BIG_ENDIAN)
@@ -914,10 +815,10 @@ struct fcoe_conn_db {
/*
- * FCoE CQ element
+ * FCoE CQ element $$KEEP_ENDIANNESS$$
*/
struct fcoe_cqe {
- u16 wqe;
+ __le16 wqe;
#define FCOE_CQE_CQE_INFO (0x3FFF<<0)
#define FCOE_CQE_CQE_INFO_SHIFT 0
#define FCOE_CQE_CQE_TYPE (0x1<<14)
@@ -928,61 +829,46 @@ struct fcoe_cqe {
/*
- * FCoE error/warning resporting entry
+ * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_partial_err_report_entry {
+ __le32 err_warn_bitmap_lo;
+ __le32 err_warn_bitmap_hi;
+ __le32 tx_buf_off;
+ __le32 rx_buf_off;
+};
+
+/*
+ * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$
*/
struct fcoe_err_report_entry {
- u32 err_warn_bitmap_lo;
- u32 err_warn_bitmap_hi;
- u32 tx_buf_off;
- u32 rx_buf_off;
+ struct fcoe_partial_err_report_entry data;
struct fcoe_fc_hdr fc_hdr;
};
/*
- * FCoE hash table entry (32 bytes)
+ * FCoE hash table entry (32 bytes) $$KEEP_ENDIANNESS$$
*/
struct fcoe_hash_table_entry {
-#if defined(__BIG_ENDIAN)
- u8 d_id_0;
- u8 s_id_2;
- u8 s_id_1;
- u8 s_id_0;
-#elif defined(__LITTLE_ENDIAN)
u8 s_id_0;
u8 s_id_1;
u8 s_id_2;
u8 d_id_0;
-#endif
-#if defined(__BIG_ENDIAN)
- u16 dst_mac_addr_hi;
- u8 d_id_2;
- u8 d_id_1;
-#elif defined(__LITTLE_ENDIAN)
u8 d_id_1;
u8 d_id_2;
- u16 dst_mac_addr_hi;
-#endif
- u32 dst_mac_addr_lo;
-#if defined(__BIG_ENDIAN)
- u16 vlan_id;
- u16 src_mac_addr_hi;
-#elif defined(__LITTLE_ENDIAN)
- u16 src_mac_addr_hi;
- u16 vlan_id;
-#endif
- u32 src_mac_addr_lo;
-#if defined(__BIG_ENDIAN)
- u16 reserved1;
- u8 reserved0;
- u8 vlan_flag;
-#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_mac_addr_hi;
+ __le16 dst_mac_addr_mid;
+ __le16 dst_mac_addr_lo;
+ __le16 src_mac_addr_hi;
+ __le16 vlan_id;
+ __le16 src_mac_addr_lo;
+ __le16 src_mac_addr_mid;
u8 vlan_flag;
u8 reserved0;
- u16 reserved1;
-#endif
- u32 reserved2;
- u32 field_id;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 field_id;
#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0)
#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0
#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24)
@@ -991,11 +877,27 @@ struct fcoe_hash_table_entry {
#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31
};
+
/*
- * FCoE pending work request CQE
+ * FCoE LCQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_lcqe {
+ __le32 wqe;
+#define FCOE_LCQE_TASK_ID (0xFFFF<<0)
+#define FCOE_LCQE_TASK_ID_SHIFT 0
+#define FCOE_LCQE_LCQE_TYPE (0xFF<<16)
+#define FCOE_LCQE_LCQE_TYPE_SHIFT 16
+#define FCOE_LCQE_RESERVED (0xFF<<24)
+#define FCOE_LCQE_RESERVED_SHIFT 24
+};
+
+
+
+/*
+ * FCoE pending work request CQE $$KEEP_ENDIANNESS$$
*/
struct fcoe_pend_wq_cqe {
- u16 wqe;
+ __le16 wqe;
#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0)
#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0
#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14)
@@ -1006,53 +908,61 @@ struct fcoe_pend_wq_cqe {
/*
- * FCoE RX statistics parameters section#0
+ * FCoE RX statistics parameters section#0 $$KEEP_ENDIANNESS$$
*/
struct fcoe_rx_stat_params_section0 {
- u32 fcoe_ver_cnt;
- u32 fcoe_rx_pkt_cnt;
- u32 fcoe_rx_byte_cnt;
- u32 fcoe_rx_drop_pkt_cnt;
+ __le32 fcoe_rx_pkt_cnt;
+ __le32 fcoe_rx_byte_cnt;
};
/*
- * FCoE RX statistics parameters section#1
+ * FCoE RX statistics parameters section#1 $$KEEP_ENDIANNESS$$
*/
struct fcoe_rx_stat_params_section1 {
- u32 fc_crc_cnt;
- u32 eofa_del_cnt;
- u32 miss_frame_cnt;
- u32 seq_timeout_cnt;
- u32 drop_seq_cnt;
- u32 fcoe_rx_drop_pkt_cnt;
- u32 fcp_rx_pkt_cnt;
- u32 reserved0;
+ __le32 fcoe_ver_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_stat_params_section2 {
+ __le32 fc_crc_cnt;
+ __le32 eofa_del_cnt;
+ __le32 miss_frame_cnt;
+ __le32 seq_timeout_cnt;
+ __le32 drop_seq_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+ __le32 fcp_rx_pkt_cnt;
+ __le32 reserved0;
};
/*
- * FCoE TX statistics parameters
+ * FCoE TX statistics parameters $$KEEP_ENDIANNESS$$
*/
struct fcoe_tx_stat_params {
- u32 fcoe_tx_pkt_cnt;
- u32 fcoe_tx_byte_cnt;
- u32 fcp_tx_pkt_cnt;
- u32 reserved0;
+ __le32 fcoe_tx_pkt_cnt;
+ __le32 fcoe_tx_byte_cnt;
+ __le32 fcp_tx_pkt_cnt;
+ __le32 reserved0;
};
/*
- * FCoE statistics parameters
+ * FCoE statistics parameters $$KEEP_ENDIANNESS$$
*/
struct fcoe_statistics_params {
struct fcoe_tx_stat_params tx_stat;
struct fcoe_rx_stat_params_section0 rx_stat0;
struct fcoe_rx_stat_params_section1 rx_stat1;
+ struct fcoe_rx_stat_params_section2 rx_stat2;
};
/*
- * FCoE t2 hash table entry (64 bytes)
+ * FCoE t2 hash table entry (64 bytes) $$KEEP_ENDIANNESS$$
*/
struct fcoe_t2_hash_table_entry {
struct fcoe_hash_table_entry data;
@@ -1060,11 +970,13 @@ struct fcoe_t2_hash_table_entry {
struct regpair reserved0[3];
};
+
+
/*
- * FCoE unsolicited CQE
+ * FCoE unsolicited CQE $$KEEP_ENDIANNESS$$
*/
struct fcoe_unsolicited_cqe {
- u16 wqe;
+ __le16 wqe;
#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0)
#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0
#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2)
@@ -1075,6 +987,4 @@ struct fcoe_unsolicited_cqe {
#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15
};
-
-
#endif /* __57XX_FCOE_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 0a404bfb44f..42228ca5a9d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -2,7 +2,7 @@
#define _BNX2FC_H_
/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -62,7 +62,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.1"
+#define BNX2FC_VERSION "1.0.4"
#define PFX "bnx2fc: "
@@ -141,6 +141,10 @@
#define BNX2FC_RNID_HBA 0x7
+#define SRR_RETRY_COUNT 5
+#define REC_RETRY_COUNT 1
+#define BNX2FC_NUM_ERR_BITS 63
+
/* bnx2fc driver uses only one instance of fcoe_percpu_s */
extern struct fcoe_percpu_s bnx2fc_global;
@@ -152,20 +156,14 @@ struct bnx2fc_percpu_s {
spinlock_t fp_work_lock;
};
-
struct bnx2fc_hba {
- struct list_head link;
+ struct list_head list;
struct cnic_dev *cnic;
struct pci_dev *pcidev;
- struct net_device *netdev;
struct net_device *phys_dev;
unsigned long reg_with_cnic;
#define BNX2FC_CNIC_REGISTERED 1
- struct packet_type fcoe_packet_type;
- struct packet_type fip_packet_type;
struct bnx2fc_cmd_mgr *cmd_mgr;
- struct workqueue_struct *timer_work_queue;
- struct kref kref;
spinlock_t hba_lock;
struct mutex hba_mutex;
unsigned long adapter_state;
@@ -173,14 +171,9 @@ struct bnx2fc_hba {
#define ADAPTER_STATE_GOING_DOWN 1
#define ADAPTER_STATE_LINK_DOWN 2
#define ADAPTER_STATE_READY 3
- u32 flags;
- unsigned long init_done;
- #define BNX2FC_FW_INIT_DONE 0
- #define BNX2FC_CTLR_INIT_DONE 1
- #define BNX2FC_CREATE_DONE 2
- struct fcoe_ctlr ctlr;
- u8 vlan_enabled;
- int vlan_id;
+ unsigned long flags;
+ #define BNX2FC_FLAG_FW_INIT_DONE 0
+ #define BNX2FC_FLAG_DESTROY_CMPL 1
u32 next_conn_id;
struct fcoe_task_ctx_entry **task_ctx;
dma_addr_t *task_ctx_dma;
@@ -199,38 +192,46 @@ struct bnx2fc_hba {
char *dummy_buffer;
dma_addr_t dummy_buf_dma;
+ /* Active list of offloaded sessions */
+ struct bnx2fc_rport **tgt_ofld_list;
+
+ /* statistics */
struct fcoe_statistics_params *stats_buffer;
dma_addr_t stats_buf_dma;
-
- /*
- * PCI related info.
- */
- u16 pci_did;
- u16 pci_vid;
- u16 pci_sdid;
- u16 pci_svid;
- u16 pci_func;
- u16 pci_devno;
-
- struct task_struct *l2_thread;
-
- /* linkdown handling */
- wait_queue_head_t shutdown_wait;
- int wait_for_link_down;
+ struct completion stat_req_done;
/*destroy handling */
struct timer_list destroy_timer;
wait_queue_head_t destroy_wait;
- /* Active list of offloaded sessions */
- struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS];
+ /* linkdown handling */
+ wait_queue_head_t shutdown_wait;
+ int wait_for_link_down;
int num_ofld_sess;
+ struct list_head vports;
+};
- /* statistics */
- struct completion stat_req_done;
+struct bnx2fc_interface {
+ struct list_head list;
+ unsigned long if_flags;
+ #define BNX2FC_CTLR_INIT_DONE 0
+ struct bnx2fc_hba *hba;
+ struct net_device *netdev;
+ struct packet_type fcoe_packet_type;
+ struct packet_type fip_packet_type;
+ struct workqueue_struct *timer_work_queue;
+ struct kref kref;
+ struct fcoe_ctlr ctlr;
+ u8 vlan_enabled;
+ int vlan_id;
};
-#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
+#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
+
+struct bnx2fc_lport {
+ struct list_head list;
+ struct fc_lport *lport;
+};
struct bnx2fc_cmd_mgr {
struct bnx2fc_hba *hba;
@@ -247,9 +248,11 @@ struct bnx2fc_rport {
struct fc_rport_priv *rdata;
void __iomem *ctx_base;
#define DPM_TRIGER_TYPE 0x40
+ u32 io_timeout;
u32 fcoe_conn_id;
u32 context_id;
u32 sid;
+ int dev_type;
unsigned long flags;
#define BNX2FC_FLAG_SESSION_READY 0x1
@@ -257,14 +260,18 @@ struct bnx2fc_rport {
#define BNX2FC_FLAG_DISABLED 0x3
#define BNX2FC_FLAG_DESTROYED 0x4
#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
-#define BNX2FC_FLAG_DESTROY_CMPL 0x6
-#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7
-#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8
-#define BNX2FC_FLAG_EXPL_LOGO 0x9
+#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6
+#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
+#define BNX2FC_FLAG_EXPL_LOGO 0x8
+ u8 src_addr[ETH_ALEN];
u32 max_sqes;
u32 max_rqes;
u32 max_cqes;
+ atomic_t free_sqes;
+
+ struct b577xx_doorbell_set_prod sq_db;
+ struct b577xx_fcoe_rx_doorbell rx_db;
struct fcoe_sqe *sq;
dma_addr_t sq_dma;
@@ -274,7 +281,7 @@ struct bnx2fc_rport {
struct fcoe_cqe *cq;
dma_addr_t cq_dma;
- u32 cq_cons_idx;
+ u16 cq_cons_idx;
u8 cq_curr_toggle_bit;
u32 cq_mem_size;
@@ -317,12 +324,9 @@ struct bnx2fc_rport {
spinlock_t cq_lock;
atomic_t num_active_ios;
u32 flush_in_prog;
- unsigned long work_time_slice;
unsigned long timestamp;
struct list_head free_task_list;
struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
- atomic_t pi;
- atomic_t ci;
struct list_head active_cmd_queue;
struct list_head els_queue;
struct list_head io_retire_queue;
@@ -357,6 +361,8 @@ struct bnx2fc_els_cb_arg {
struct bnx2fc_cmd *aborted_io_req;
struct bnx2fc_cmd *io_req;
u16 l2_oxid;
+ u32 offset;
+ enum fc_rctl r_ctl;
};
/* bnx2fc command structure */
@@ -370,6 +376,7 @@ struct bnx2fc_cmd {
#define BNX2FC_ABTS 3
#define BNX2FC_ELS 4
#define BNX2FC_CLEANUP 5
+#define BNX2FC_SEQ_CLEANUP 6
u8 io_req_flags;
struct kref refcount;
struct fcoe_port *port;
@@ -383,6 +390,7 @@ struct bnx2fc_cmd {
struct completion tm_done;
int wait_for_comp;
u16 xid;
+ struct fcoe_err_report_entry err_entry;
struct fcoe_task_ctx_entry *task;
struct io_bdt *bd_tbl;
struct fcp_rsp *rsp;
@@ -399,6 +407,12 @@ struct bnx2fc_cmd {
#define BNX2FC_FLAG_IO_COMPL 0x9
#define BNX2FC_FLAG_ELS_DONE 0xa
#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
+#define BNX2FC_FLAG_CMD_LOST 0xc
+#define BNX2FC_FLAG_SRR_SENT 0xd
+ u8 rec_retry;
+ u8 srr_retry;
+ u32 srr_offset;
+ u8 srr_rctl;
u32 fcp_resid;
u32 fcp_rsp_len;
u32 fcp_sns_len;
@@ -423,11 +437,13 @@ struct bnx2fc_work {
struct bnx2fc_unsol_els {
struct fc_lport *lport;
struct fc_frame *fp;
+ struct bnx2fc_hba *hba;
struct work_struct unsol_els_work;
};
+struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
void bnx2fc_cmd_release(struct kref *ref);
int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
@@ -465,6 +481,10 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u16 orig_xid);
+void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req,
+ struct fcoe_task_ctx_entry *task,
+ struct bnx2fc_cmd *orig_io_req,
+ u32 offset);
void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task);
void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
@@ -505,6 +525,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
struct fc_frame *,
void *),
void *arg, u32 timeout);
+void bnx2fc_arm_cq(struct bnx2fc_rport *tgt);
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
@@ -513,5 +534,13 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
unsigned char *buf,
u32 frame_len, u16 l2_oxid);
int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
+int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req);
+int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req);
+int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl);
+void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 rx_state);
+int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
+ enum fc_rctl r_ctl);
#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
index fe7769173c4..399cda047a7 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_constants.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -5,6 +5,12 @@
* This file defines HSI constants for the FCoE flows
*/
+/* Current FCoE HSI version number composed of two fields (16 bit) */
+/* Implies on a change broken previous HSI */
+#define FCOE_HSI_MAJOR_VERSION (1)
+/* Implies on a change which does not broken previous HSI */
+#define FCOE_HSI_MINOR_VERSION (1)
+
/* KWQ/KCQ FCoE layer code */
#define FCOE_KWQE_LAYER_CODE (7)
@@ -40,21 +46,62 @@
#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4)
#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
+#define FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION (0x6)
+
+/* CQE type */
+#define FCOE_PENDING_CQE_TYPE 0
+#define FCOE_UNSOLIC_CQE_TYPE 1
/* Unsolicited CQE type */
#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0
#define FCOE_ERROR_DETECTION_CQE_TYPE 1
#define FCOE_WARNING_DETECTION_CQE_TYPE 2
+/* E_D_TOV timer resolution in ms */
+#define FCOE_E_D_TOV_TIMER_RESOLUTION_MS (20)
+
+/* E_D_TOV timer resolution for SDM (4 micro) */
+#define FCOE_E_D_TOV_SDM_TIMER_RESOLUTION \
+ (FCOE_E_D_TOV_TIMER_RESOLUTION_MS * 1000 / 4)
+
+/* REC timer resolution in ms */
+#define FCOE_REC_TIMER_RESOLUTION_MS (20)
+
+/* REC timer resolution for SDM (4 micro) */
+#define FCOE_REC_SDM_TIMER_RESOLUTION (FCOE_REC_TIMER_RESOLUTION_MS * 1000 / 4)
+
+/* E_D_TOV timer default wraparound value (2 sec) in 20 ms resolution */
+#define FCOE_E_D_TOV_DEFAULT_WRAPAROUND_VAL \
+ (2000 / FCOE_E_D_TOV_TIMER_RESOLUTION_MS)
+
+/* REC_TOV timer default wraparound value (3 sec) in 20 ms resolution */
+#define FCOE_REC_TOV_DEFAULT_WRAPAROUND_VAL \
+ (3000 / FCOE_REC_TIMER_RESOLUTION_MS)
+
+#define FCOE_NUM_OF_TIMER_TASKS (8 * 1024)
+
+#define FCOE_NUM_OF_CACHED_TASKS_TIMER (8)
+
/* Task context constants */
+/******** Remove FCP_CMD write tce sleep ***********************/
+/* In case timer services are required then shall be updated by Xstorm after
+ * start processing the task. In case no timer facilities are required then the
+ * driver would initialize the state to this value
+ *
+#define FCOE_TASK_TX_STATE_NORMAL 0
+ * After driver has initialize the task in case timer services required *
+#define FCOE_TASK_TX_STATE_INIT 1
+******** Remove FCP_CMD write tce sleep ***********************/
/* After driver has initialize the task in case timer services required */
#define FCOE_TASK_TX_STATE_INIT 0
/* In case timer services are required then shall be updated by Xstorm after
* start processing the task. In case no timer facilities are required then the
- * driver would initialize the state to this value */
+ * driver would initialize the state to this value
+ */
#define FCOE_TASK_TX_STATE_NORMAL 1
/* Task is under abort procedure. Updated in order to stop processing of
- * pending WQEs on this task */
+ * pending WQEs on this task
+ */
#define FCOE_TASK_TX_STATE_ABORT 2
/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
#define FCOE_TASK_TX_STATE_ERROR 3
@@ -66,17 +113,8 @@
#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6
/* For sequence cleanup request task */
#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7
-/* Mark task as aborted and indicate that ABTS was not transmitted */
-#define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8
-/* Mark task as aborted and indicate that ABTS was transmitted */
-#define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9
/* For completion the ABTS task. */
-#define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10
-/* Mark task as aborted and indicate that Exchange cleanup was not transmitted
- */
-#define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11
-/* Mark task as aborted and indicate that Exchange cleanup was transmitted */
-#define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12
+#define FCOE_TASK_TX_STATE_ABTS_TX 8
#define FCOE_TASK_RX_STATE_NORMAL 0
#define FCOE_TASK_RX_STATE_COMPLETED 1
@@ -86,25 +124,25 @@
#define FCOE_TASK_RX_STATE_WARNING 3
/* For E_D_T_TOV timer expiration in Ustorm */
#define FCOE_TASK_RX_STATE_ERROR 4
-/* ABTS ACC arrived wait for local completion to finally complete the task. */
-#define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5
-/* local completion arrived wait for ABTS ACC to finally complete the task. */
-#define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6
+/* FW only: First visit at rx-path, part of the abts round trip */
+#define FCOE_TASK_RX_STATE_ABTS_IN_PROCESS 5
+/* FW only: Second visit at rx-path, after ABTS frame transmitted */
+#define FCOE_TASK_RX_STATE_ABTS_TRANSMITTED 6
/* Special completion indication in case of task was aborted. */
#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7
-/* Special completion indication in case of task was cleaned. */
-#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8
-/* Special completion indication (in task requested the exchange cleanup) in
- * case cleaned task is in non-valid. */
-#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9
+/* FW only: First visit at rx-path, part of the cleanup round trip */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_IN_PROCESS 8
+/* FW only: Special completion indication in case of task was cleaned. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 9
+/* Not in used: Special completion indication (in task requested the exchange
+ * cleanup) in case cleaned task is in non-valid.
+ */
+#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 10
/* Special completion indication (in task requested the sequence cleanup) in
- * case cleaned task was already returned to normal. */
-#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10
-/* Exchange cleanup arrived wait until xfer will be handled to finally
- * complete the task. */
-#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11
-/* Xfer handled, wait for exchange cleanup to finally complete the task. */
-#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12
+ * case cleaned task was already returned to normal.
+ */
+#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 11
+
#define FCOE_TASK_TYPE_WRITE 0
#define FCOE_TASK_TYPE_READ 1
@@ -120,11 +158,40 @@
#define FCOE_TASK_CLASS_TYPE_3 0
#define FCOE_TASK_CLASS_TYPE_2 1
+/* FCoE/FC packet fields */
+#define FCOE_ETH_TYPE 0x8906
+
+/* FCoE maximum elements in hash table */
+#define FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW 8
+
+/* FCoE half of the elements in hash table */
+#define FCOE_HALF_ELEMENTS_IN_HASH_TABLE_ROW \
+ (FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW / 2)
+
+/* FcoE number of cached T2 entries */
+#define T_FCOE_NUMBER_OF_CACHED_T2_ENTRIES (4)
+
+/* FCoE maximum elements in hash table */
+#define FCOE_HASH_TBL_CHUNK_SIZE 16384
+
/* Everest FCoE connection type */
#define B577XX_FCOE_CONNECTION_TYPE 4
-/* Error codes for Error Reporting in fast path flows */
-/* XFER error codes */
+/* FCoE number of rows (in log). This number derives
+ * from the maximum connections supported which is 2048.
+ * TBA: Need a different constant for E2
+ */
+#define FCOE_MAX_NUM_SESSIONS_LOG 11
+
+#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
+
+/* Error codes for Error Reporting in slow path flows */
+#define FCOE_SLOW_PATH_ERROR_CODE_TOO_MANY_FUNCS 0
+#define FCOE_SLOW_PATH_ERROR_CODE_NO_LICENSE 1
+
+/* Error codes for Error Reporting in fast path flows
+ * XFER error codes
+ */
#define FCOE_ERROR_CODE_XFER_OOO_RO 0
#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1
#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2
@@ -155,17 +222,17 @@
#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23
#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24
#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25
-#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26
-#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27
+#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26
+#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27
#define FCOE_ERROR_CODE_DATA_FCTL 28
/* Middle path error codes */
-#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29
+#define FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE 29
#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30
#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31
#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32
#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33
-#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34
+#define FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL 34
#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35
#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36
@@ -173,7 +240,7 @@
#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37
#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38
#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39
-#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40
#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41
/* Common error codes */
@@ -185,7 +252,7 @@
#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47
#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48
#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49
-#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50
+#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50
/* Unsolicited Rx error codes */
#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
index 7f6aff68cc5..3416d9a746c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -21,21 +21,21 @@ extern unsigned int bnx2fc_debug_level;
#define BNX2FC_ELS_DBG(fmt, arg...) \
BNX2FC_CHK_LOGGING(LOG_ELS, \
- printk(KERN_ALERT PFX fmt, ##arg))
+ printk(KERN_INFO PFX fmt, ##arg))
#define BNX2FC_MISC_DBG(fmt, arg...) \
BNX2FC_CHK_LOGGING(LOG_MISC, \
- printk(KERN_ALERT PFX fmt, ##arg))
+ printk(KERN_INFO PFX fmt, ##arg))
#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
do { \
if (!io_req || !io_req->port || !io_req->port->lport || \
!io_req->port->lport->host) \
BNX2FC_CHK_LOGGING(LOG_IO, \
- printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
else \
BNX2FC_CHK_LOGGING(LOG_IO, \
- shost_printk(KERN_ALERT, \
+ shost_printk(KERN_INFO, \
(io_req)->port->lport->host, \
PFX "xid:0x%x " fmt, \
(io_req)->xid, ##arg)); \
@@ -46,10 +46,10 @@ extern unsigned int bnx2fc_debug_level;
if (!tgt || !tgt->port || !tgt->port->lport || \
!tgt->port->lport->host || !tgt->rport) \
BNX2FC_CHK_LOGGING(LOG_TGT, \
- printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
else \
BNX2FC_CHK_LOGGING(LOG_TGT, \
- shost_printk(KERN_ALERT, \
+ shost_printk(KERN_INFO, \
(tgt)->port->lport->host, \
PFX "port:%x " fmt, \
(tgt)->rport->port_id, ##arg)); \
@@ -60,10 +60,10 @@ extern unsigned int bnx2fc_debug_level;
do { \
if (!lport || !lport->host) \
BNX2FC_CHK_LOGGING(LOG_HBA, \
- printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
else \
BNX2FC_CHK_LOGGING(LOG_HBA, \
- shost_printk(KERN_ALERT, lport->host, \
+ shost_printk(KERN_INFO, lport->host, \
PFX fmt, ##arg)); \
} while (0)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 52c358427ce..d66dcbd0df1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -3,7 +3,7 @@
* This file contains helper routines that handle ELS requests
* and responses.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -83,7 +83,7 @@ int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
rrq.rrq_cmd = ELS_RRQ;
hton24(rrq.rrq_s_id, sid);
rrq.rrq_ox_id = htons(aborted_io_req->xid);
- rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id);
+ rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
retry_rrq:
rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
@@ -253,13 +253,417 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
return rc;
}
+void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr, *fh;
+ struct bnx2fc_cmd *srr_req;
+ struct bnx2fc_cmd *orig_io_req;
+ struct fc_frame *fp;
+ unsigned char *buf;
+ void *resp_buf;
+ u32 resp_len, hdr_len;
+ u8 opcode;
+ int rc = 0;
+
+ orig_io_req = cb_arg->aborted_io_req;
+ srr_req = cb_arg->io_req;
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed",
+ orig_io_req->xid);
+ goto srr_compl_done;
+ }
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(srr_req, "rec abts in prog "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ goto srr_compl_done;
+ }
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
+ /* SRR timedout */
+ BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ rc = bnx2fc_initiate_abts(srr_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+ "failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(srr_req);
+ }
+ orig_io_req->srr_retry++;
+ if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_srr(orig_io_req,
+ orig_io_req->srr_offset,
+ orig_io_req->srr_rctl);
+ spin_lock_bh(&tgt->tgt_lock);
+ if (!rc)
+ goto srr_compl_done;
+ }
+
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ orig_io_req->xid);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ goto srr_compl_done;
+ }
+ mp_req = &(srr_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ resp_buf = mp_req->resp_buf;
+
+ hdr_len = sizeof(*fc_hdr);
+ buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
+ goto srr_compl_done;
+ }
+ memcpy(buf, fc_hdr, hdr_len);
+ memcpy(buf + hdr_len, resp_buf, resp_len);
+
+ fp = fc_frame_alloc(NULL, resp_len);
+ if (!fp) {
+ printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+ goto free_buf;
+ }
+
+ fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+ /* Copy FC Frame header and payload into the frame */
+ memcpy(fh, buf, hdr_len + resp_len);
+
+ opcode = fc_frame_payload_op(fp);
+ switch (opcode) {
+ case ELS_LS_ACC:
+ BNX2FC_IO_DBG(srr_req, "SRR success\n");
+ break;
+ case ELS_LS_RJT:
+ BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ orig_io_req->xid);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ break;
+ default:
+ BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
+ opcode);
+ break;
+ }
+ fc_frame_free(fp);
+free_buf:
+ kfree(buf);
+srr_compl_done:
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+}
+
+void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *orig_io_req, *new_io_req;
+ struct bnx2fc_cmd *rec_req;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr, *fh;
+ struct fc_els_ls_rjt *rjt;
+ struct fc_els_rec_acc *acc;
+ struct bnx2fc_rport *tgt;
+ struct fcoe_err_report_entry *err_entry;
+ struct scsi_cmnd *sc_cmd;
+ enum fc_rctl r_ctl;
+ unsigned char *buf;
+ void *resp_buf;
+ struct fc_frame *fp;
+ u8 opcode;
+ u32 offset;
+ u32 e_stat;
+ u32 resp_len, hdr_len;
+ int rc = 0;
+ bool send_seq_clnp = false;
+ bool abort_io = false;
+
+ BNX2FC_MISC_DBG("Entered rec_compl callback\n");
+ rec_req = cb_arg->io_req;
+ orig_io_req = cb_arg->aborted_io_req;
+ BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
+ tgt = orig_io_req->tgt;
+
+ if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(rec_req, "completed"
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ goto rec_compl_done;
+ }
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+ BNX2FC_IO_DBG(rec_req, "abts in prog "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ goto rec_compl_done;
+ }
+ /* Handle REC timeout case */
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
+ BNX2FC_IO_DBG(rec_req, "timed out, abort "
+ "orig_io - 0x%x\n",
+ orig_io_req->xid);
+ /* els req is timed out. send abts for els */
+ rc = bnx2fc_initiate_abts(rec_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+ "failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(rec_req);
+ }
+ orig_io_req->rec_retry++;
+ /* REC timedout. send ABTS to the orig IO req */
+ if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_rec(orig_io_req);
+ spin_lock_bh(&tgt->tgt_lock);
+ if (!rc)
+ goto rec_compl_done;
+ }
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ orig_io_req->xid);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ goto rec_compl_done;
+ }
+ mp_req = &(rec_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ acc = resp_buf = mp_req->resp_buf;
+
+ hdr_len = sizeof(*fc_hdr);
+
+ buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
+ goto rec_compl_done;
+ }
+ memcpy(buf, fc_hdr, hdr_len);
+ memcpy(buf + hdr_len, resp_buf, resp_len);
+
+ fp = fc_frame_alloc(NULL, resp_len);
+ if (!fp) {
+ printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+ goto free_buf;
+ }
+
+ fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+ /* Copy FC Frame header and payload into the frame */
+ memcpy(fh, buf, hdr_len + resp_len);
+
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ if ((rjt->er_reason == ELS_RJT_LOGIC ||
+ rjt->er_reason == ELS_RJT_UNAB) &&
+ rjt->er_explan == ELS_EXPL_OXID_RXID) {
+ BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
+ new_io_req = bnx2fc_cmd_alloc(tgt);
+ if (!new_io_req)
+ goto abort_io;
+ new_io_req->sc_cmd = orig_io_req->sc_cmd;
+ /* cleanup orig_io_req that is with the FW */
+ set_bit(BNX2FC_FLAG_CMD_LOST,
+ &orig_io_req->req_flags);
+ bnx2fc_initiate_cleanup(orig_io_req);
+ /* Post a new IO req with the same sc_cmd */
+ BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_post_io_req(tgt, new_io_req);
+ spin_lock_bh(&tgt->tgt_lock);
+ if (!rc)
+ goto free_frame;
+ BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
+ }
+abort_io:
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+ "failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ } else if (opcode == ELS_LS_ACC) {
+ /* REVISIT: Check if the exchange is already aborted */
+ offset = ntohl(acc->reca_fc4value);
+ e_stat = ntohl(acc->reca_e_stat);
+ if (e_stat & ESB_ST_SEQ_INIT) {
+ BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
+ goto free_frame;
+ }
+ BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
+ e_stat, offset);
+ /* Seq initiative is with us */
+ err_entry = (struct fcoe_err_report_entry *)
+ &orig_io_req->err_entry;
+ sc_cmd = orig_io_req->sc_cmd;
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ /* SCSI WRITE command */
+ if (offset == orig_io_req->data_xfer_len) {
+ BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
+ /* FCP_RSP lost */
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ offset = 0;
+ } else {
+ /* start transmitting from offset */
+ BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
+ send_seq_clnp = true;
+ r_ctl = FC_RCTL_DD_DATA_DESC;
+ if (bnx2fc_initiate_seq_cleanup(orig_io_req,
+ offset, r_ctl))
+ abort_io = true;
+ /* XFER_RDY */
+ }
+ } else {
+ /* SCSI READ command */
+ if (err_entry->data.rx_buf_off ==
+ orig_io_req->data_xfer_len) {
+ /* FCP_RSP lost */
+ BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ offset = 0;
+ } else {
+ /* request retransmission from this offset */
+ send_seq_clnp = true;
+ offset = err_entry->data.rx_buf_off;
+ BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
+ /* FCP_DATA lost */
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ if (bnx2fc_initiate_seq_cleanup(orig_io_req,
+ offset, r_ctl))
+ abort_io = true;
+ }
+ }
+ if (abort_io) {
+ rc = bnx2fc_initiate_abts(orig_io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
+ " failed. issue cleanup\n");
+ bnx2fc_initiate_cleanup(orig_io_req);
+ }
+ } else if (!send_seq_clnp) {
+ BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (rc) {
+ BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
+ " IO will abort\n");
+ }
+ }
+ }
+free_frame:
+ fc_frame_free(fp);
+free_buf:
+ kfree(buf);
+rec_compl_done:
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+ kfree(cb_arg);
+}
+
+int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
+{
+ struct fc_els_rec rec;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ u32 sid = tgt->sid;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
+ memset(&rec, 0, sizeof(rec));
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
+ rc = -ENOMEM;
+ goto rec_err;
+ }
+ kref_get(&orig_io_req->refcount);
+
+ cb_arg->aborted_io_req = orig_io_req;
+
+ rec.rec_cmd = ELS_REC;
+ hton24(rec.rec_s_id, sid);
+ rec.rec_ox_id = htons(orig_io_req->xid);
+ rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
+
+ rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
+ bnx2fc_rec_compl, cb_arg,
+ r_a_tov);
+rec_err:
+ if (rc) {
+ BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ kfree(cb_arg);
+ }
+ return rc;
+}
+
+int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
+{
+ struct fcp_srr srr;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
+ memset(&srr, 0, sizeof(srr));
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
+ rc = -ENOMEM;
+ goto srr_err;
+ }
+ kref_get(&orig_io_req->refcount);
+
+ cb_arg->aborted_io_req = orig_io_req;
+
+ srr.srr_op = ELS_SRR;
+ srr.srr_ox_id = htons(orig_io_req->xid);
+ srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
+ srr.srr_rel_off = htonl(offset);
+ srr.srr_r_ctl = r_ctl;
+ orig_io_req->srr_offset = offset;
+ orig_io_req->srr_rctl = r_ctl;
+
+ rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
+ bnx2fc_srr_compl, cb_arg,
+ r_a_tov);
+srr_err:
+ if (rc) {
+ BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ kfree(cb_arg);
+ } else
+ set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
+
+ return rc;
+}
+
static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
void *data, u32 data_len,
void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
{
struct fcoe_port *port = tgt->port;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
struct fc_rport *rport = tgt->rport;
struct fc_lport *lport = port->lport;
struct bnx2fc_cmd *els_req;
@@ -274,12 +678,12 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
rc = fc_remote_port_chkready(rport);
if (rc) {
- printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op);
+ printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
rc = -EINVAL;
goto els_err;
}
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
- printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op);
+ printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
rc = -EINVAL;
goto els_err;
}
@@ -305,7 +709,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
rc = bnx2fc_init_mp_req(els_req);
if (rc == FAILED) {
- printk(KERN_ALERT PFX "ELS MP request init failed\n");
+ printk(KERN_ERR PFX "ELS MP request init failed\n");
spin_lock_bh(&tgt->tgt_lock);
kref_put(&els_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
@@ -324,7 +728,7 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
memcpy(mp_req->req_buf, data, data_len);
} else {
- printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op);
+ printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
els_req->cb_func = NULL;
els_req->cb_arg = NULL;
spin_lock_bh(&tgt->tgt_lock);
@@ -342,9 +746,14 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
did = tgt->rport->port_id;
sid = tgt->sid;
- __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
- FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
- FC_FC_SEQ_INIT, 0);
+ if (op == ELS_SRR)
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
+ FC_TYPE_FCP, FC_FC_FIRST_SEQ |
+ FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+ else
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
+ FC_TYPE_ELS, FC_FC_FIRST_SEQ |
+ FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
/* Obtain exchange id */
xid = els_req->xid;
@@ -352,7 +761,8 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
index = xid % BNX2FC_TASKS_PER_PAGE;
/* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
task = &(task_page[index]);
bnx2fc_init_mp_task(els_req, task);
@@ -417,12 +827,13 @@ void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
hdr = (u64 *)fc_hdr;
temp_hdr = (u64 *)
- &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
hdr[0] = cpu_to_be64(temp_hdr[0]);
hdr[1] = cpu_to_be64(temp_hdr[1]);
hdr[2] = cpu_to_be64(temp_hdr[2]);
- mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+ mp_req->resp_len =
+ task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
/* Parse ELS response */
if ((els_req->cb_func) && (els_req->cb_arg)) {
@@ -495,8 +906,8 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
void *arg, u32 timeout)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
- struct fcoe_ctlr *fip = &hba->ctlr;
+ struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *fip = &interface->ctlr;
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index ab255fbc7f3..7cb2cd48b17 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -3,7 +3,7 @@
* cnic modules to create FCoE instances, send/receive non-offloaded
* FIP/FCoE packets, listen to link events etc.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -15,13 +15,14 @@
#include "bnx2fc.h"
static struct list_head adapter_list;
+static struct list_head if_list;
static u32 adapter_count;
static DEFINE_MUTEX(bnx2fc_dev_lock);
DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Mar 17, 2011"
+#define DRV_MODULE_RELDATE "Jun 23, 2011"
static char version[] __devinitdata =
@@ -61,7 +62,7 @@ static int bnx2fc_disable(struct net_device *netdev);
static void bnx2fc_recv_frame(struct sk_buff *skb);
-static void bnx2fc_start_disc(struct bnx2fc_hba *hba);
+static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
static int bnx2fc_net_config(struct fc_lport *lp);
static int bnx2fc_lport_config(struct fc_lport *lport);
@@ -70,18 +71,20 @@ static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
-static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv);
static void bnx2fc_destroy_work(struct work_struct *work);
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
+static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
+ *phys_dev);
static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
static void bnx2fc_port_shutdown(struct fc_lport *lport);
-static void bnx2fc_stop(struct bnx2fc_hba *hba);
+static void bnx2fc_stop(struct bnx2fc_interface *interface);
static int __init bnx2fc_mod_init(void);
static void __exit bnx2fc_mod_exit(void);
@@ -142,7 +145,8 @@ static void bnx2fc_abort_io(struct fc_lport *lport)
static void bnx2fc_cleanup(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct bnx2fc_rport *tgt;
int i;
@@ -219,7 +223,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct fcoe_crc_eof *cp;
struct sk_buff *skb;
struct fc_frame_header *fh;
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
+ struct bnx2fc_hba *hba;
struct fcoe_port *port;
struct fcoe_hdr *hp;
struct bnx2fc_rport *tgt;
@@ -230,7 +235,8 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
int wlen, rc = 0;
port = (struct fcoe_port *)lport_priv(lport);
- hba = port->priv;
+ interface = port->priv;
+ hba = interface->hba;
fh = fc_frame_header_get(fp);
@@ -242,12 +248,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
- if (!hba->ctlr.sel_fcf) {
+ if (!interface->ctlr.sel_fcf) {
BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
kfree_skb(skb);
return -EINVAL;
}
- if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb))
+ if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
return 0;
}
@@ -316,19 +322,19 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_reset_network_header(skb);
skb->mac_len = elen;
skb->protocol = htons(ETH_P_FCOE);
- skb->dev = hba->netdev;
+ skb->dev = interface->netdev;
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE);
- if (hba->ctlr.map_dest)
+ if (interface->ctlr.map_dest)
fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
else
/* insert GW address */
- memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN);
+ memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
- if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN))
- memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN);
+ if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
else
memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
@@ -377,22 +383,23 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *olddev)
{
struct fc_lport *lport;
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct fc_frame_header *fh;
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
unsigned short oxid;
- hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type);
- lport = hba->ctlr.lp;
+ interface = container_of(ptype, struct bnx2fc_interface,
+ fcoe_packet_type);
+ lport = interface->ctlr.lp;
if (unlikely(lport == NULL)) {
- printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n");
+ printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
goto err;
}
if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
- printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n");
+ printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
goto err;
}
@@ -411,7 +418,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport;
- fr->ptype = ptype;
bg = &bnx2fc_global;
spin_lock_bh(&bg->fcoe_rx_list.lock);
@@ -469,7 +475,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
fr = fcoe_dev_from_skb(skb);
lport = fr->fr_dev;
if (unlikely(lport == NULL)) {
- printk(KERN_ALERT PFX "Invalid lport struct\n");
+ printk(KERN_ERR PFX "Invalid lport struct\n");
kfree_skb(skb);
return;
}
@@ -594,7 +600,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
struct fc_host_statistics *bnx2fc_stats;
struct fc_lport *lport = shost_priv(shost);
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct fcoe_statistics_params *fw_stats;
int rc = 0;
@@ -612,7 +619,7 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
return bnx2fc_stats;
}
- bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt;
+ bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt;
bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
@@ -631,7 +638,7 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
struct Scsi_Host *shost = lport->host;
int rc = 0;
@@ -654,7 +661,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
BNX2FC_NAME, BNX2FC_VERSION,
- hba->netdev->name);
+ interface->netdev->name);
return 0;
}
@@ -662,8 +669,8 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
static void bnx2fc_link_speed_update(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
- struct net_device *netdev = hba->netdev;
+ struct bnx2fc_interface *interface = port->priv;
+ struct net_device *netdev = interface->netdev;
struct ethtool_cmd ecmd;
if (!dev_ethtool_get_settings(netdev, &ecmd)) {
@@ -679,6 +686,9 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
case SPEED_1000:
lport->link_speed = FC_PORTSPEED_1GBIT;
break;
+ case SPEED_2500:
+ lport->link_speed = FC_PORTSPEED_2GBIT;
+ break;
case SPEED_10000:
lport->link_speed = FC_PORTSPEED_10GBIT;
break;
@@ -688,7 +698,8 @@ static void bnx2fc_link_speed_update(struct fc_lport *lport)
static int bnx2fc_link_ok(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct net_device *dev = hba->phys_dev;
int rc = 0;
@@ -710,7 +721,7 @@ static int bnx2fc_link_ok(struct fc_lport *lport)
*/
void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
{
- if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+ if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state))
set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
else
clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
@@ -719,11 +730,13 @@ void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
static int bnx2fc_net_config(struct fc_lport *lport)
{
struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct fcoe_port *port;
u64 wwnn, wwpn;
port = lport_priv(lport);
- hba = port->priv;
+ interface = port->priv;
+ hba = interface->hba;
/* require support for get_pauseparam ethtool op. */
if (!hba->phys_dev->ethtool_ops ||
@@ -740,11 +753,11 @@ static int bnx2fc_net_config(struct fc_lport *lport)
bnx2fc_link_speed_update(lport);
if (!lport->vport) {
- wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0);
+ wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 1, 0);
BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
fc_set_wwnn(lport, wwnn);
- wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0);
+ wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 2, 0);
BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
fc_set_wwpn(lport, wwpn);
}
@@ -756,9 +769,9 @@ static void bnx2fc_destroy_timer(unsigned long data)
{
struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
- BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - "
+ BNX2FC_MISC_DBG("ERROR:bnx2fc_destroy_timer - "
"Destroy compl not received!!\n");
- hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
wake_up_interruptible(&hba->destroy_wait);
}
@@ -767,57 +780,44 @@ static void bnx2fc_destroy_timer(unsigned long data)
*
* @context: adapter structure pointer
* @event: event type
+ * @vlan_id: vlan id - associated vlan id with this event
*
* Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
* NETDEV_CHANGE_MTU events
*/
-static void bnx2fc_indicate_netevent(void *context, unsigned long event)
+static void bnx2fc_indicate_netevent(void *context, unsigned long event,
+ u16 vlan_id)
{
struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
- struct fc_lport *lport = hba->ctlr.lp;
+ struct fc_lport *lport;
struct fc_lport *vport;
+ struct bnx2fc_interface *interface;
+ int wait_for_upload = 0;
u32 link_possible = 1;
- if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
- BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
- hba->netdev->name, event);
+ /* Ignore vlans for now */
+ if (vlan_id != 0)
return;
- }
-
- /*
- * ASSUMPTION:
- * indicate_netevent cannot be called from cnic unless bnx2fc
- * does register_device
- */
- BUG_ON(!lport);
-
- BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
- hba->netdev->name, event);
switch (event) {
case NETDEV_UP:
- BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
- hba->adapter_state);
if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
printk(KERN_ERR "indicate_netevent: "\
- "adapter is not UP!!\n");
+ "hba is not UP!!\n");
break;
case NETDEV_DOWN:
- BNX2FC_HBA_DBG(lport, "Port down\n");
clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
link_possible = 0;
break;
case NETDEV_GOING_DOWN:
- BNX2FC_HBA_DBG(lport, "Port going down\n");
set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
link_possible = 0;
break;
case NETDEV_CHANGE:
- BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
break;
default:
@@ -825,15 +825,22 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event)
return;
}
- bnx2fc_link_speed_update(lport);
+ mutex_lock(&bnx2fc_dev_lock);
+ list_for_each_entry(interface, &if_list, list) {
- if (link_possible && !bnx2fc_link_ok(lport)) {
- printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n");
- fcoe_ctlr_link_up(&hba->ctlr);
- } else {
- printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n");
- if (fcoe_ctlr_link_down(&hba->ctlr)) {
- clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ if (interface->hba != hba)
+ continue;
+
+ lport = interface->ctlr.lp;
+ BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
+ interface->netdev->name, event);
+
+ bnx2fc_link_speed_update(lport);
+
+ if (link_possible && !bnx2fc_link_ok(lport)) {
+ printk(KERN_ERR "indicate_netevent: ctlr_link_up\n");
+ fcoe_ctlr_link_up(&interface->ctlr);
+ } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
mutex_lock(&lport->lp_mutex);
list_for_each_entry(vport, &lport->vports, list)
fc_host_port_type(vport->host) =
@@ -844,24 +851,26 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event)
get_cpu())->LinkFailureCount++;
put_cpu();
fcoe_clean_pending_queue(lport);
+ wait_for_upload = 1;
+ }
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
- init_waitqueue_head(&hba->shutdown_wait);
- BNX2FC_HBA_DBG(lport, "indicate_netevent "
- "num_ofld_sess = %d\n",
- hba->num_ofld_sess);
- hba->wait_for_link_down = 1;
- BNX2FC_HBA_DBG(lport, "waiting for uploads to "
- "compl proc = %s\n",
- current->comm);
- wait_event_interruptible(hba->shutdown_wait,
- (hba->num_ofld_sess == 0));
- BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
+ if (wait_for_upload) {
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ init_waitqueue_head(&hba->shutdown_wait);
+ BNX2FC_MISC_DBG("indicate_netevent "
+ "num_ofld_sess = %d\n",
hba->num_ofld_sess);
- hba->wait_for_link_down = 0;
+ hba->wait_for_link_down = 1;
+ wait_event_interruptible(hba->shutdown_wait,
+ (hba->num_ofld_sess == 0));
+ BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 0;
- if (signal_pending(current))
- flush_signals(current);
- }
+ if (signal_pending(current))
+ flush_signals(current);
}
}
@@ -880,23 +889,12 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
static int bnx2fc_em_config(struct fc_lport *lport)
{
- struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
-
if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
FCOE_MAX_XID, NULL)) {
printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
return -ENOMEM;
}
- hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
- BNX2FC_MAX_XID);
-
- if (!hba->cmd_mgr) {
- printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
- fc_exch_mgr_free(lport);
- return -ENOMEM;
- }
return 0;
}
@@ -909,11 +907,8 @@ static int bnx2fc_lport_config(struct fc_lport *lport)
lport->e_d_tov = 2 * 1000;
lport->r_a_tov = 10 * 1000;
- /* REVISIT: enable when supporting tape devices
lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
- */
- lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
lport->does_npiv = 1;
memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
@@ -943,9 +938,10 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype,
struct net_device *orig_dev)
{
- struct bnx2fc_hba *hba;
- hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type);
- fcoe_ctlr_recv(&hba->ctlr, skb);
+ struct bnx2fc_interface *interface;
+ interface = container_of(ptype, struct bnx2fc_interface,
+ fip_packet_type);
+ fcoe_ctlr_recv(&interface->ctlr, skb);
return 0;
}
@@ -996,17 +992,17 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fcoe_port *port = lport_priv(n_port);
- struct bnx2fc_hba *hba = port->priv;
- struct net_device *netdev = hba->netdev;
+ struct bnx2fc_interface *interface = port->priv;
+ struct net_device *netdev = interface->netdev;
struct fc_lport *vn_port;
- if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
printk(KERN_ERR PFX "vn ports cannot be created on"
- "this hba\n");
+ "this interface\n");
return -EIO;
}
mutex_lock(&bnx2fc_dev_lock);
- vn_port = bnx2fc_if_create(hba, &vport->dev, 1);
+ vn_port = bnx2fc_if_create(interface, &vport->dev, 1);
mutex_unlock(&bnx2fc_dev_lock);
if (IS_ERR(vn_port)) {
@@ -1056,10 +1052,10 @@ static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
}
-static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
+static int bnx2fc_netdev_setup(struct bnx2fc_interface *interface)
{
- struct net_device *netdev = hba->netdev;
- struct net_device *physdev = hba->phys_dev;
+ struct net_device *netdev = interface->netdev;
+ struct net_device *physdev = interface->hba->phys_dev;
struct netdev_hw_addr *ha;
int sel_san_mac = 0;
@@ -1074,7 +1070,8 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
(is_valid_ether_addr(ha->addr))) {
- memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
+ memcpy(interface->ctlr.ctl_src_addr, ha->addr,
+ ETH_ALEN);
sel_san_mac = 1;
BNX2FC_MISC_DBG("Found SAN MAC\n");
}
@@ -1084,15 +1081,15 @@ static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
if (!sel_san_mac)
return -ENODEV;
- hba->fip_packet_type.func = bnx2fc_fip_recv;
- hba->fip_packet_type.type = htons(ETH_P_FIP);
- hba->fip_packet_type.dev = netdev;
- dev_add_pack(&hba->fip_packet_type);
+ interface->fip_packet_type.func = bnx2fc_fip_recv;
+ interface->fip_packet_type.type = htons(ETH_P_FIP);
+ interface->fip_packet_type.dev = netdev;
+ dev_add_pack(&interface->fip_packet_type);
- hba->fcoe_packet_type.func = bnx2fc_rcv;
- hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
- hba->fcoe_packet_type.dev = netdev;
- dev_add_pack(&hba->fcoe_packet_type);
+ interface->fcoe_packet_type.func = bnx2fc_rcv;
+ interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+ interface->fcoe_packet_type.dev = netdev;
+ dev_add_pack(&interface->fcoe_packet_type);
return 0;
}
@@ -1128,53 +1125,54 @@ static void bnx2fc_release_transport(void)
static void bnx2fc_interface_release(struct kref *kref)
{
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct net_device *netdev;
- struct net_device *phys_dev;
- hba = container_of(kref, struct bnx2fc_hba, kref);
+ interface = container_of(kref, struct bnx2fc_interface, kref);
BNX2FC_MISC_DBG("Interface is being released\n");
- netdev = hba->netdev;
- phys_dev = hba->phys_dev;
+ netdev = interface->netdev;
/* tear-down FIP controller */
- if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done))
- fcoe_ctlr_destroy(&hba->ctlr);
+ if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
+ fcoe_ctlr_destroy(&interface->ctlr);
+
+ kfree(interface);
- /* Free the command manager */
- if (hba->cmd_mgr) {
- bnx2fc_cmd_mgr_free(hba->cmd_mgr);
- hba->cmd_mgr = NULL;
- }
dev_put(netdev);
module_put(THIS_MODULE);
}
-static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba)
+static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface)
{
- kref_get(&hba->kref);
+ kref_get(&interface->kref);
}
-static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba)
+static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface)
{
- kref_put(&hba->kref, bnx2fc_interface_release);
+ kref_put(&interface->kref, bnx2fc_interface_release);
}
-static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba)
+static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
{
+ /* Free the command manager */
+ if (hba->cmd_mgr) {
+ bnx2fc_cmd_mgr_free(hba->cmd_mgr);
+ hba->cmd_mgr = NULL;
+ }
+ kfree(hba->tgt_ofld_list);
bnx2fc_unbind_pcidev(hba);
kfree(hba);
}
/**
- * bnx2fc_interface_create - create a new fcoe instance
+ * bnx2fc_hba_create - create a new bnx2fc hba
*
* @cnic: pointer to cnic device
*
- * Creates a new FCoE instance on the given device which include allocating
- * hba structure, scsi_host and lport structures.
+ * Creates a new FCoE hba on the given device.
+ *
*/
-static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
+static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
{
struct bnx2fc_hba *hba;
int rc;
@@ -1189,64 +1187,83 @@ static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
hba->cnic = cnic;
rc = bnx2fc_bind_pcidev(hba);
- if (rc)
+ if (rc) {
+ printk(KERN_ERR PFX "create_adapter: bind error\n");
goto bind_err;
+ }
hba->phys_dev = cnic->netdev;
- /* will get overwritten after we do vlan discovery */
- hba->netdev = hba->phys_dev;
+ hba->next_conn_id = 0;
+
+ hba->tgt_ofld_list =
+ kzalloc(sizeof(struct bnx2fc_rport *) * BNX2FC_NUM_MAX_SESS,
+ GFP_KERNEL);
+ if (!hba->tgt_ofld_list) {
+ printk(KERN_ERR PFX "Unable to allocate tgt offload list\n");
+ goto tgtofld_err;
+ }
+
+ hba->num_ofld_sess = 0;
+
+ hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
+ BNX2FC_MAX_XID);
+ if (!hba->cmd_mgr) {
+ printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
+ goto cmgr_err;
+ }
init_waitqueue_head(&hba->shutdown_wait);
init_waitqueue_head(&hba->destroy_wait);
+ INIT_LIST_HEAD(&hba->vports);
return hba;
+
+cmgr_err:
+ kfree(hba->tgt_ofld_list);
+tgtofld_err:
+ bnx2fc_unbind_pcidev(hba);
bind_err:
- printk(KERN_ERR PFX "create_interface: bind error\n");
kfree(hba);
return NULL;
}
-static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
- enum fip_state fip_mode)
+struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
+ struct net_device *netdev,
+ enum fip_state fip_mode)
{
+ struct bnx2fc_interface *interface;
int rc = 0;
- struct net_device *netdev = hba->netdev;
- struct fcoe_ctlr *fip = &hba->ctlr;
+ interface = kzalloc(sizeof(*interface), GFP_KERNEL);
+ if (!interface) {
+ printk(KERN_ERR PFX "Unable to allocate interface structure\n");
+ return NULL;
+ }
dev_hold(netdev);
- kref_init(&hba->kref);
-
- hba->flags = 0;
+ kref_init(&interface->kref);
+ interface->hba = hba;
+ interface->netdev = netdev;
/* Initialize FIP */
- memset(fip, 0, sizeof(*fip));
- fcoe_ctlr_init(fip, fip_mode);
- hba->ctlr.send = bnx2fc_fip_send;
- hba->ctlr.update_mac = bnx2fc_update_src_mac;
- hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
- set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
-
- rc = bnx2fc_netdev_setup(hba);
- if (rc)
- goto setup_err;
+ fcoe_ctlr_init(&interface->ctlr, fip_mode);
+ interface->ctlr.send = bnx2fc_fip_send;
+ interface->ctlr.update_mac = bnx2fc_update_src_mac;
+ interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
+ set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
- hba->next_conn_id = 0;
+ rc = bnx2fc_netdev_setup(interface);
+ if (!rc)
+ return interface;
- memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list));
- hba->num_ofld_sess = 0;
-
- return 0;
-
-setup_err:
- fcoe_ctlr_destroy(&hba->ctlr);
+ fcoe_ctlr_destroy(&interface->ctlr);
dev_put(netdev);
- bnx2fc_interface_put(hba);
- return rc;
+ kfree(interface);
+ return NULL;
}
/**
* bnx2fc_if_create - Create FCoE instance on a given interface
*
- * @hba: FCoE interface to create a local port on
+ * @interface: FCoE interface to create a local port on
* @parent: Device pointer to be the parent in sysfs for the SCSI host
* @npiv: Indicates if the port is vport or not
*
@@ -1254,15 +1271,23 @@ setup_err:
*
* Returns: Allocated fc_lport or an error pointer
*/
-static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv)
{
struct fc_lport *lport, *n_port;
struct fcoe_port *port;
struct Scsi_Host *shost;
struct fc_vport *vport = dev_to_vport(parent);
+ struct bnx2fc_lport *blport;
+ struct bnx2fc_hba *hba;
int rc = 0;
+ blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
+ if (!blport) {
+ BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
+ return NULL;
+ }
+
/* Allocate Scsi_Host structure */
if (!npiv)
lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port));
@@ -1271,12 +1296,12 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
if (!lport) {
printk(KERN_ERR PFX "could not allocate scsi host structure\n");
- return NULL;
+ goto free_blport;
}
shost = lport->host;
port = lport_priv(lport);
port->lport = lport;
- port->priv = hba;
+ port->priv = interface;
INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
@@ -1300,7 +1325,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
rc = bnx2fc_shost_config(lport, parent);
if (rc) {
printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
- hba->netdev->name);
+ interface->netdev->name);
goto lp_config_err;
}
@@ -1326,30 +1351,38 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
goto shost_err;
}
- bnx2fc_interface_get(hba);
+ bnx2fc_interface_get(interface);
+
+ hba = interface->hba;
+ spin_lock_bh(&hba->hba_lock);
+ blport->lport = lport;
+ list_add_tail(&blport->list, &hba->vports);
+ spin_unlock_bh(&hba->hba_lock);
+
return lport;
shost_err:
scsi_remove_host(shost);
lp_config_err:
scsi_host_put(lport->host);
+free_blport:
+ kfree(blport);
return NULL;
}
-static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba)
+static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface)
{
/* Dont listen for Ethernet packets anymore */
- __dev_remove_pack(&hba->fcoe_packet_type);
- __dev_remove_pack(&hba->fip_packet_type);
+ __dev_remove_pack(&interface->fcoe_packet_type);
+ __dev_remove_pack(&interface->fip_packet_type);
synchronize_net();
}
-static void bnx2fc_if_destroy(struct fc_lport *lport)
+static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_lport *blport, *tmp;
- BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
/* Stop the transmit retry timer */
del_timer_sync(&port->timer);
@@ -1372,10 +1405,17 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
/* Free memory used by statistical counters */
fc_lport_free_stats(lport);
+ spin_lock_bh(&hba->hba_lock);
+ list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
+ if (blport->lport == lport) {
+ list_del(&blport->list);
+ kfree(blport);
+ }
+ }
+ spin_unlock_bh(&hba->hba_lock);
+
/* Release Scsi_Host */
scsi_host_put(lport->host);
-
- bnx2fc_interface_put(hba);
}
/**
@@ -1390,46 +1430,31 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
*/
static int bnx2fc_destroy(struct net_device *netdev)
{
- struct bnx2fc_hba *hba = NULL;
- struct net_device *phys_dev;
+ struct bnx2fc_interface *interface = NULL;
+ struct bnx2fc_hba *hba;
+ struct fc_lport *lport;
int rc = 0;
rtnl_lock();
-
mutex_lock(&bnx2fc_dev_lock);
- /* obtain physical netdev */
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
- phys_dev = vlan_dev_real_dev(netdev);
- else {
- printk(KERN_ERR PFX "Not a vlan device\n");
- rc = -ENODEV;
- goto netdev_err;
- }
- hba = bnx2fc_hba_lookup(phys_dev);
- if (!hba || !hba->ctlr.lp) {
+ interface = bnx2fc_interface_lookup(netdev);
+ if (!interface || !interface->ctlr.lp) {
rc = -ENODEV;
- printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n");
+ printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
goto netdev_err;
}
- if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
- printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
- goto netdev_err;
- }
-
- bnx2fc_netdev_cleanup(hba);
-
- bnx2fc_stop(hba);
-
- bnx2fc_if_destroy(hba->ctlr.lp);
+ hba = interface->hba;
- destroy_workqueue(hba->timer_work_queue);
+ bnx2fc_netdev_cleanup(interface);
+ lport = interface->ctlr.lp;
+ bnx2fc_stop(interface);
+ list_del(&interface->list);
+ destroy_workqueue(interface->timer_work_queue);
+ bnx2fc_interface_put(interface);
+ bnx2fc_if_destroy(lport, hba);
- if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
- bnx2fc_fw_destroy(hba);
-
- clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
netdev_err:
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
@@ -1440,16 +1465,20 @@ static void bnx2fc_destroy_work(struct work_struct *work)
{
struct fcoe_port *port;
struct fc_lport *lport;
+ struct bnx2fc_interface *interface;
+ struct bnx2fc_hba *hba;
port = container_of(work, struct fcoe_port, destroy_work);
lport = port->lport;
+ interface = port->priv;
+ hba = interface->hba;
BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
bnx2fc_port_shutdown(lport);
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
- bnx2fc_if_destroy(lport);
+ bnx2fc_if_destroy(lport, hba);
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
}
@@ -1521,28 +1550,27 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
static void bnx2fc_ulp_start(void *handle)
{
struct bnx2fc_hba *hba = handle;
- struct fc_lport *lport = hba->ctlr.lp;
+ struct bnx2fc_interface *interface;
+ struct fc_lport *lport;
- BNX2FC_MISC_DBG("Entered %s\n", __func__);
mutex_lock(&bnx2fc_dev_lock);
- if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
- goto start_disc;
-
- if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
bnx2fc_fw_init(hba);
-start_disc:
- mutex_unlock(&bnx2fc_dev_lock);
-
BNX2FC_MISC_DBG("bnx2fc started.\n");
- /* Kick off Fabric discovery*/
- if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
- printk(KERN_ERR PFX "ulp_init: start discovery\n");
- lport->tt.frame_send = bnx2fc_xmit;
- bnx2fc_start_disc(hba);
+ list_for_each_entry(interface, &if_list, list) {
+ if (interface->hba == hba) {
+ lport = interface->ctlr.lp;
+ /* Kick off Fabric discovery*/
+ printk(KERN_ERR PFX "ulp_init: start discovery\n");
+ lport->tt.frame_send = bnx2fc_xmit;
+ bnx2fc_start_disc(interface);
+ }
}
+
+ mutex_unlock(&bnx2fc_dev_lock);
}
static void bnx2fc_port_shutdown(struct fc_lport *lport)
@@ -1552,37 +1580,25 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
fc_lport_destroy(lport);
}
-static void bnx2fc_stop(struct bnx2fc_hba *hba)
+static void bnx2fc_stop(struct bnx2fc_interface *interface)
{
struct fc_lport *lport;
struct fc_lport *vport;
- BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__,
- hba->init_done);
- if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
- test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
- lport = hba->ctlr.lp;
- bnx2fc_port_shutdown(lport);
- BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
- "offloaded sessions\n",
- hba->num_ofld_sess);
- wait_event_interruptible(hba->shutdown_wait,
- (hba->num_ofld_sess == 0));
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vport, &lport->vports, list)
- fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
- mutex_unlock(&lport->lp_mutex);
- fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
- fcoe_ctlr_link_down(&hba->ctlr);
- fcoe_clean_pending_queue(lport);
-
- mutex_lock(&hba->hba_mutex);
- clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
- clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
+ return;
- clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
- mutex_unlock(&hba->hba_mutex);
- }
+ lport = interface->ctlr.lp;
+ bnx2fc_port_shutdown(lport);
+
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ fcoe_ctlr_link_down(&interface->ctlr);
+ fcoe_clean_pending_queue(lport);
}
static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
@@ -1621,8 +1637,7 @@ static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
}
- /* Mark HBA to indicate that the FW INIT is done */
- set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
+ set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags);
return 0;
err_unbind:
@@ -1633,7 +1648,7 @@ err_out:
static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
{
- if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) {
if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
init_timer(&hba->destroy_timer);
hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
@@ -1642,8 +1657,8 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
hba->destroy_timer.data = (unsigned long)hba;
add_timer(&hba->destroy_timer);
wait_event_interruptible(hba->destroy_wait,
- (hba->flags &
- BNX2FC_FLAG_DESTROY_CMPL));
+ test_bit(BNX2FC_FLAG_DESTROY_CMPL,
+ &hba->flags));
/* This should never happen */
if (signal_pending(current))
flush_signals(current);
@@ -1664,40 +1679,57 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
*/
static void bnx2fc_ulp_stop(void *handle)
{
- struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle;
+ struct bnx2fc_hba *hba = handle;
+ struct bnx2fc_interface *interface;
printk(KERN_ERR "ULP_STOP\n");
mutex_lock(&bnx2fc_dev_lock);
- bnx2fc_stop(hba);
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags))
+ goto exit;
+ list_for_each_entry(interface, &if_list, list) {
+ if (interface->hba == hba)
+ bnx2fc_stop(interface);
+ }
+ BUG_ON(hba->num_ofld_sess != 0);
+
+ mutex_lock(&hba->hba_mutex);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_GOING_DOWN,
+ &hba->adapter_state);
+
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ mutex_unlock(&hba->hba_mutex);
+
bnx2fc_fw_destroy(hba);
+exit:
mutex_unlock(&bnx2fc_dev_lock);
}
-static void bnx2fc_start_disc(struct bnx2fc_hba *hba)
+static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
{
struct fc_lport *lport;
int wait_cnt = 0;
BNX2FC_MISC_DBG("Entered %s\n", __func__);
/* Kick off FIP/FLOGI */
- if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
printk(KERN_ERR PFX "Init not done yet\n");
return;
}
- lport = hba->ctlr.lp;
+ lport = interface->ctlr.lp;
BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
if (!bnx2fc_link_ok(lport)) {
BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
- fcoe_ctlr_link_up(&hba->ctlr);
+ fcoe_ctlr_link_up(&interface->ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
- set_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
}
/* wait for the FCF to be selected before issuing FLOGI */
- while (!hba->ctlr.sel_fcf) {
+ while (!interface->ctlr.sel_fcf) {
msleep(250);
/* give up after 3 secs */
if (++wait_cnt > 12)
@@ -1723,15 +1755,15 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
BNX2FC_MISC_DBG("Entered %s\n", __func__);
/* bnx2fc works only when bnx2x is loaded */
- if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) ||
+ (dev->max_fcoe_conn == 0)) {
printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
- " flags: %lx\n",
- dev->netdev->name, dev->flags);
+ " flags: %lx fcoe_conn: %d\n",
+ dev->netdev->name, dev->flags, dev->max_fcoe_conn);
return;
}
- /* Configure FCoE interface */
- hba = bnx2fc_interface_create(dev);
+ hba = bnx2fc_hba_create(dev);
if (!hba) {
printk(KERN_ERR PFX "hba initialization failed\n");
return;
@@ -1739,7 +1771,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
/* Add HBA to the adapter list */
mutex_lock(&bnx2fc_dev_lock);
- list_add_tail(&hba->link, &adapter_list);
+ list_add_tail(&hba->list, &adapter_list);
adapter_count++;
mutex_unlock(&bnx2fc_dev_lock);
@@ -1747,7 +1779,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
rc = dev->register_device(dev, CNIC_ULP_FCOE,
(void *) hba);
if (rc)
- printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc);
+ printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc);
else
set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
}
@@ -1755,52 +1787,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
static int bnx2fc_disable(struct net_device *netdev)
{
- struct bnx2fc_hba *hba;
- struct net_device *phys_dev;
- struct ethtool_drvinfo drvinfo;
+ struct bnx2fc_interface *interface;
int rc = 0;
rtnl_lock();
-
mutex_lock(&bnx2fc_dev_lock);
- /* obtain physical netdev */
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
- phys_dev = vlan_dev_real_dev(netdev);
- else {
- printk(KERN_ERR PFX "Not a vlan device\n");
- rc = -ENODEV;
- goto nodev;
- }
-
- /* verify if the physical device is a netxtreme2 device */
- if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
- memset(&drvinfo, 0, sizeof(drvinfo));
- phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
- if (strcmp(drvinfo.driver, "bnx2x")) {
- printk(KERN_ERR PFX "Not a netxtreme2 device\n");
- rc = -ENODEV;
- goto nodev;
- }
- } else {
- printk(KERN_ERR PFX "unable to obtain drv_info\n");
- rc = -ENODEV;
- goto nodev;
- }
-
- printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
-
- /* obtain hba and initialize rest of the structure */
- hba = bnx2fc_hba_lookup(phys_dev);
- if (!hba || !hba->ctlr.lp) {
+ interface = bnx2fc_interface_lookup(netdev);
+ if (!interface || !interface->ctlr.lp) {
rc = -ENODEV;
- printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n");
+ printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
} else {
- fcoe_ctlr_link_down(&hba->ctlr);
- fcoe_clean_pending_queue(hba->ctlr.lp);
+ fcoe_ctlr_link_down(&interface->ctlr);
+ fcoe_clean_pending_queue(interface->ctlr.lp);
}
-nodev:
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
return rc;
@@ -1809,48 +1810,19 @@ nodev:
static int bnx2fc_enable(struct net_device *netdev)
{
- struct bnx2fc_hba *hba;
- struct net_device *phys_dev;
- struct ethtool_drvinfo drvinfo;
+ struct bnx2fc_interface *interface;
int rc = 0;
rtnl_lock();
-
- BNX2FC_MISC_DBG("Entered %s\n", __func__);
mutex_lock(&bnx2fc_dev_lock);
- /* obtain physical netdev */
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
- phys_dev = vlan_dev_real_dev(netdev);
- else {
- printk(KERN_ERR PFX "Not a vlan device\n");
+ interface = bnx2fc_interface_lookup(netdev);
+ if (!interface || !interface->ctlr.lp) {
rc = -ENODEV;
- goto nodev;
- }
- /* verify if the physical device is a netxtreme2 device */
- if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
- memset(&drvinfo, 0, sizeof(drvinfo));
- phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
- if (strcmp(drvinfo.driver, "bnx2x")) {
- printk(KERN_ERR PFX "Not a netxtreme2 device\n");
- rc = -ENODEV;
- goto nodev;
- }
- } else {
- printk(KERN_ERR PFX "unable to obtain drv_info\n");
- rc = -ENODEV;
- goto nodev;
- }
+ printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
+ } else if (!bnx2fc_link_ok(interface->ctlr.lp))
+ fcoe_ctlr_link_up(&interface->ctlr);
- /* obtain hba and initialize rest of the structure */
- hba = bnx2fc_hba_lookup(phys_dev);
- if (!hba || !hba->ctlr.lp) {
- rc = -ENODEV;
- printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
- } else if (!bnx2fc_link_ok(hba->ctlr.lp))
- fcoe_ctlr_link_up(&hba->ctlr);
-
-nodev:
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
return rc;
@@ -1868,6 +1840,7 @@ nodev:
*/
static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
{
+ struct bnx2fc_interface *interface;
struct bnx2fc_hba *hba;
struct net_device *phys_dev;
struct fc_lport *lport;
@@ -1903,7 +1876,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
memset(&drvinfo, 0, sizeof(drvinfo));
phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
- if (strcmp(drvinfo.driver, "bnx2x")) {
+ if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) {
printk(KERN_ERR PFX "Not a netxtreme2 device\n");
rc = -EINVAL;
goto netdev_err;
@@ -1914,7 +1887,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
goto netdev_err;
}
- /* obtain hba and initialize rest of the structure */
+ /* obtain interface and initialize rest of the structure */
hba = bnx2fc_hba_lookup(phys_dev);
if (!hba) {
rc = -ENODEV;
@@ -1922,67 +1895,61 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
goto netdev_err;
}
- if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
- rc = bnx2fc_fw_init(hba);
- if (rc)
- goto netdev_err;
- }
-
- if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ if (bnx2fc_interface_lookup(netdev)) {
rc = -EEXIST;
goto netdev_err;
}
- /* update netdev with vlan netdev */
- hba->netdev = netdev;
- hba->vlan_id = vlan_id;
- hba->vlan_enabled = 1;
-
- rc = bnx2fc_interface_setup(hba, fip_mode);
- if (rc) {
- printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
+ interface = bnx2fc_interface_create(hba, netdev, fip_mode);
+ if (!interface) {
+ printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
goto ifput_err;
}
- hba->timer_work_queue =
+ interface->vlan_id = vlan_id;
+ interface->vlan_enabled = 1;
+
+ interface->timer_work_queue =
create_singlethread_workqueue("bnx2fc_timer_wq");
- if (!hba->timer_work_queue) {
+ if (!interface->timer_work_queue) {
printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
rc = -EINVAL;
goto ifput_err;
}
- lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0);
+ lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0);
if (!lport) {
printk(KERN_ERR PFX "Failed to create interface (%s)\n",
netdev->name);
- bnx2fc_netdev_cleanup(hba);
+ bnx2fc_netdev_cleanup(interface);
rc = -EINVAL;
goto if_create_err;
}
+ /* Add interface to if_list */
+ list_add_tail(&interface->list, &if_list);
+
lport->boot_time = jiffies;
/* Make this master N_port */
- hba->ctlr.lp = lport;
+ interface->ctlr.lp = lport;
- set_bit(BNX2FC_CREATE_DONE, &hba->init_done);
- printk(KERN_ERR PFX "create: START DISC\n");
- bnx2fc_start_disc(hba);
+ BNX2FC_HBA_DBG(lport, "create: START DISC\n");
+ bnx2fc_start_disc(interface);
/*
* Release from kref_init in bnx2fc_interface_setup, on success
* lport should be holding a reference taken in bnx2fc_if_create
*/
- bnx2fc_interface_put(hba);
+ bnx2fc_interface_put(interface);
/* put netdev that was held while calling dev_get_by_name */
mutex_unlock(&bnx2fc_dev_lock);
rtnl_unlock();
return 0;
if_create_err:
- destroy_workqueue(hba->timer_work_queue);
+ destroy_workqueue(interface->timer_work_queue);
ifput_err:
- bnx2fc_interface_put(hba);
+ bnx2fc_interface_put(interface);
netdev_err:
module_put(THIS_MODULE);
mod_err:
@@ -1992,7 +1959,7 @@ mod_err:
}
/**
- * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance
+ * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
*
* @cnic: Pointer to cnic device instance
*
@@ -2012,19 +1979,30 @@ static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
return NULL;
}
-static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
+static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
+ *netdev)
+{
+ struct bnx2fc_interface *interface;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_entry(interface, &if_list, list) {
+ if (interface->netdev == netdev)
+ return interface;
+ }
+ return NULL;
+}
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device
+ *phys_dev)
{
- struct list_head *list;
- struct list_head *temp;
struct bnx2fc_hba *hba;
/* Called with bnx2fc_dev_lock held */
- list_for_each_safe(list, temp, &adapter_list) {
- hba = (struct bnx2fc_hba *)list;
+ list_for_each_entry(hba, &adapter_list, list) {
if (hba->phys_dev == phys_dev)
return hba;
}
- printk(KERN_ERR PFX "hba_lookup: hba NULL\n");
+ printk(KERN_ERR PFX "adapter_lookup: hba NULL\n");
return NULL;
}
@@ -2036,6 +2014,8 @@ static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
static void bnx2fc_ulp_exit(struct cnic_dev *dev)
{
struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface, *tmp;
+ struct fc_lport *lport;
BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
@@ -2054,13 +2034,20 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
return;
}
- list_del_init(&hba->link);
+ list_del_init(&hba->list);
adapter_count--;
- if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ list_for_each_entry_safe(interface, tmp, &if_list, list) {
/* destroy not called yet, move to quiesced list */
- bnx2fc_netdev_cleanup(hba);
- bnx2fc_if_destroy(hba->ctlr.lp);
+ if (interface->hba == hba) {
+ bnx2fc_netdev_cleanup(interface);
+ bnx2fc_stop(interface);
+
+ list_del(&interface->list);
+ lport = interface->ctlr.lp;
+ bnx2fc_interface_put(interface);
+ bnx2fc_if_destroy(lport, hba);
+ }
}
mutex_unlock(&bnx2fc_dev_lock);
@@ -2068,7 +2055,7 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
- bnx2fc_interface_destroy(hba);
+ bnx2fc_hba_destroy(hba);
}
/**
@@ -2224,6 +2211,7 @@ static int __init bnx2fc_mod_init(void)
}
INIT_LIST_HEAD(&adapter_list);
+ INIT_LIST_HEAD(&if_list);
mutex_init(&bnx2fc_dev_lock);
adapter_count = 0;
@@ -2301,16 +2289,17 @@ static void __exit bnx2fc_mod_exit(void)
mutex_unlock(&bnx2fc_dev_lock);
/* Unregister with cnic */
- list_for_each_entry_safe(hba, next, &to_be_deleted, link) {
- list_del_init(&hba->link);
- printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n",
- hba, atomic_read(&hba->kref.refcount));
+ list_for_each_entry_safe(hba, next, &to_be_deleted, list) {
+ list_del_init(&hba->list);
+ printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n",
+ hba);
bnx2fc_ulp_stop(hba);
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
&hba->reg_with_cnic))
- hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
- bnx2fc_interface_destroy(hba);
+ hba->cnic->unregister_device(hba->cnic,
+ CNIC_ULP_FCOE);
+ bnx2fc_hba_destroy(hba);
}
cnic_unregister_driver(CNIC_ULP_FCOE);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index f756d5f85c7..72cfb14acd3 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -2,7 +2,7 @@
* This file contains the code that low level functions that interact
* with 57712 FCoE firmware.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,7 +23,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
struct fcoe_kcqe *ofld_kcqe);
static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
- struct fcoe_kcqe *conn_destroy);
+ struct fcoe_kcqe *destroy_kcqe);
int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
{
@@ -67,7 +67,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
int rc = 0;
if (!hba->cnic) {
- printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
+ printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
return -ENODEV;
}
@@ -100,6 +100,10 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+ fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
+ fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
+
+
fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
((u64) hba->hash_tbl_pbl_dma >> 32);
@@ -122,6 +126,7 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
fcoe_init3.error_bit_map_lo = 0xffffffff;
fcoe_init3.error_bit_map_hi = 0xffffffff;
+ fcoe_init3.perf_config = 1;
kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
@@ -161,7 +166,8 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
struct fc_lport *lport = port->lport;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct kwqe *kwqe_arr[4];
struct fcoe_kwqe_conn_offload1 ofld_req1;
struct fcoe_kwqe_conn_offload2 ofld_req2;
@@ -223,7 +229,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req3.hdr.flags =
(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
- ofld_req3.vlan_tag = hba->vlan_id <<
+ ofld_req3.vlan_tag = interface->vlan_id <<
FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
@@ -273,8 +279,20 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
+ /*
+ * Info from PRLI response, this info is used for sequence level error
+ * recovery support
+ */
+ if (tgt->dev_type == TYPE_TAPE) {
+ ofld_req3.flags |= 1 <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
+ ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
+ }
+
/* vlan flag */
- ofld_req3.flags |= (hba->vlan_enabled <<
+ ofld_req3.flags |= (interface->vlan_enabled <<
FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
/* C2_VALID and ACK flags are not set as they are not suppported */
@@ -289,19 +307,20 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
- ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5];
/* local mac */
- ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4];
- ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3];
- ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2];
- ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1];
- ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0];
- ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
- ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
- ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
- ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
- ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
- ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+ ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4];
+ ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3];
+ ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
+ ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
+ ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
+ ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
+ /* fcf mac */
+ ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
+ ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
+ ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
+ ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
+ ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -331,7 +350,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
struct kwqe *kwqe_arr[2];
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable enbl_req;
struct fc_lport *lport = port->lport;
struct fc_rport *rport = tgt->rport;
@@ -345,20 +365,21 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
enbl_req.hdr.flags =
(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
- enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5];
/* local mac */
- enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4];
- enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
- enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
- enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
- enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
-
- enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
- enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
- enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
- enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
- enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
- enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+ enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4];
+ enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3];
+ enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2];
+ enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1];
+ enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
+ memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
+
+ enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
+ enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
+ enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
+ enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
+ enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
+ enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
port_id = fc_host_port_id(lport->host);
if (port_id != tgt->sid) {
@@ -374,10 +395,10 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
enbl_req.d_id[0] = (port_id & 0x000000FF);
enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
- enbl_req.vlan_tag = hba->vlan_id <<
+ enbl_req.vlan_tag = interface->vlan_id <<
FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
- enbl_req.vlan_flag = hba->vlan_enabled;
+ enbl_req.vlan_flag = interface->vlan_enabled;
enbl_req.context_id = tgt->context_id;
enbl_req.conn_id = tgt->fcoe_conn_id;
@@ -397,7 +418,8 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
int bnx2fc_send_session_disable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable disable_req;
struct kwqe *kwqe_arr[2];
struct fc_rport *rport = tgt->rport;
@@ -411,18 +433,19 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
disable_req.hdr.flags =
(FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
- disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
- disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
- disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
- disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
- disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
+ disable_req.src_mac_addr_lo[0] = tgt->src_addr[5];
+ disable_req.src_mac_addr_lo[1] = tgt->src_addr[4];
+ disable_req.src_mac_addr_mid[0] = tgt->src_addr[3];
+ disable_req.src_mac_addr_mid[1] = tgt->src_addr[2];
+ disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
+ disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
- disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
- disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
- disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
- disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
- disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
- disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+ disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
+ disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
+ disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
+ disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
+ disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
+ disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
port_id = tgt->sid;
disable_req.s_id[0] = (port_id & 0x000000FF);
@@ -436,11 +459,11 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
disable_req.context_id = tgt->context_id;
disable_req.conn_id = tgt->fcoe_conn_id;
- disable_req.vlan_tag = hba->vlan_id <<
+ disable_req.vlan_tag = interface->vlan_id <<
FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
disable_req.vlan_tag |=
3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
- disable_req.vlan_flag = hba->vlan_enabled;
+ disable_req.vlan_flag = interface->vlan_enabled;
kwqe_arr[0] = (struct kwqe *) &disable_req;
@@ -480,16 +503,36 @@ int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
return rc;
}
+static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
+{
+ struct bnx2fc_lport *blport;
+
+ spin_lock_bh(&hba->hba_lock);
+ list_for_each_entry(blport, &hba->vports, list) {
+ if (blport->lport == lport) {
+ spin_unlock_bh(&hba->hba_lock);
+ return true;
+ }
+ }
+ spin_unlock_bh(&hba->hba_lock);
+ return false;
+
+}
+
+
static void bnx2fc_unsol_els_work(struct work_struct *work)
{
struct bnx2fc_unsol_els *unsol_els;
struct fc_lport *lport;
+ struct bnx2fc_hba *hba;
struct fc_frame *fp;
unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
lport = unsol_els->lport;
fp = unsol_els->fp;
- fc_exch_recv(lport, fp);
+ hba = unsol_els->hba;
+ if (is_valid_lport(hba, lport))
+ fc_exch_recv(lport, fp);
kfree(unsol_els);
}
@@ -499,6 +542,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
{
struct fcoe_port *port = tgt->port;
struct fc_lport *lport = port->lport;
+ struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_unsol_els *unsol_els;
struct fc_frame_header *fh;
struct fc_frame *fp;
@@ -559,6 +603,7 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
fr_eof(fp) = FC_EOF_T;
fr_crc(fp) = cpu_to_le32(~crc);
unsol_els->lport = lport;
+ unsol_els->hba = interface->hba;
unsol_els->fp = fp;
INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
@@ -580,9 +625,12 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
u32 frame_len, len;
struct bnx2fc_cmd *io_req = NULL;
struct fcoe_task_ctx_entry *task, *task_page;
- struct bnx2fc_hba *hba = tgt->port->priv;
+ struct bnx2fc_interface *interface = tgt->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
int task_idx, index;
int rc = 0;
+ u64 err_warn_bit_map;
+ u8 err_warn = 0xff;
BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
@@ -640,44 +688,48 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
xid = err_entry->fc_hdr.ox_id;
BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
- err_entry->err_warn_bitmap_hi,
- err_entry->err_warn_bitmap_lo);
+ err_entry->data.err_warn_bitmap_hi,
+ err_entry->data.err_warn_bitmap_lo);
BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
- err_entry->tx_buf_off, err_entry->rx_buf_off);
+ err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
- bnx2fc_return_rqe(tgt, 1);
if (xid > BNX2FC_MAX_XID) {
BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
xid);
- spin_unlock_bh(&tgt->tgt_lock);
- break;
+ goto ret_err_rqe;
}
task_idx = xid / BNX2FC_TASKS_PER_PAGE;
index = xid % BNX2FC_TASKS_PER_PAGE;
task_page = (struct fcoe_task_ctx_entry *)
- hba->task_ctx[task_idx];
+ hba->task_ctx[task_idx];
task = &(task_page[index]);
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
- if (!io_req) {
- spin_unlock_bh(&tgt->tgt_lock);
- break;
- }
+ if (!io_req)
+ goto ret_err_rqe;
if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
- spin_unlock_bh(&tgt->tgt_lock);
- break;
+ goto ret_err_rqe;
}
if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
&io_req->req_flags)) {
BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
"progress.. ignore unsol err\n");
- spin_unlock_bh(&tgt->tgt_lock);
- break;
+ goto ret_err_rqe;
+ }
+
+ err_warn_bit_map = (u64)
+ ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
+ (u64)err_entry->data.err_warn_bitmap_lo;
+ for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
+ if (err_warn_bit_map & (u64)((u64)1 << i)) {
+ err_warn = i;
+ break;
+ }
}
/*
@@ -687,26 +739,61 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
* logging out the target, when the ABTS eventually
* times out.
*/
- if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
- &io_req->req_flags)) {
- /*
- * Cancel the timeout_work, as we received IO
- * completion with FW error.
- */
- if (cancel_delayed_work(&io_req->timeout_work))
- kref_put(&io_req->refcount,
- bnx2fc_cmd_release); /* timer hold */
-
- rc = bnx2fc_initiate_abts(io_req);
- if (rc != SUCCESS) {
- BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
- "failed. issue cleanup\n");
- rc = bnx2fc_initiate_cleanup(io_req);
- BUG_ON(rc);
- }
- } else
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
"in ABTS processing\n", xid);
+ goto ret_err_rqe;
+ }
+ BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
+ if (tgt->dev_type != TYPE_TAPE)
+ goto skip_rec;
+ switch (err_warn) {
+ case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
+ case FCOE_ERROR_CODE_DATA_OOO_RO:
+ case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
+ case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
+ case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
+ case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
+ BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
+ xid);
+ memset(&io_req->err_entry, 0,
+ sizeof(struct fcoe_err_report_entry));
+ memcpy(&io_req->err_entry, err_entry,
+ sizeof(struct fcoe_err_report_entry));
+ if (!test_bit(BNX2FC_FLAG_SRR_SENT,
+ &io_req->req_flags)) {
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_rec(io_req);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (rc)
+ goto skip_rec;
+ } else
+ printk(KERN_ERR PFX "SRR in progress\n");
+ goto ret_err_rqe;
+ break;
+ default:
+ break;
+ }
+
+skip_rec:
+ set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
+ /*
+ * Cancel the timeout_work, as we received IO
+ * completion with FW error.
+ */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc != SUCCESS) {
+ printk(KERN_ERR PFX "err_warn: initiate_abts "
+ "failed xid = 0x%x. issue cleanup\n",
+ io_req->xid);
+ bnx2fc_initiate_cleanup(io_req);
+ }
+ret_err_rqe:
+ bnx2fc_return_rqe(tgt, 1);
spin_unlock_bh(&tgt->tgt_lock);
break;
@@ -722,11 +809,52 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
- err_entry->err_warn_bitmap_hi,
- err_entry->err_warn_bitmap_lo);
+ err_entry->data.err_warn_bitmap_hi,
+ err_entry->data.err_warn_bitmap_lo);
BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
- err_entry->tx_buf_off, err_entry->rx_buf_off);
+ err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
+
+ if (xid > BNX2FC_MAX_XID) {
+ BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
+ goto ret_warn_rqe;
+ }
+
+ err_warn_bit_map = (u64)
+ ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
+ (u64)err_entry->data.err_warn_bitmap_lo;
+ for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
+ if (err_warn_bit_map & (u64) (1 << i)) {
+ err_warn = i;
+ break;
+ }
+ }
+ BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+ if (!io_req)
+ goto ret_warn_rqe;
+
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
+ printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
+ goto ret_warn_rqe;
+ }
+
+ memset(&io_req->err_entry, 0,
+ sizeof(struct fcoe_err_report_entry));
+ memcpy(&io_req->err_entry, err_entry,
+ sizeof(struct fcoe_err_report_entry));
+ if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
+ /* REC_TOV is not a warning code */
+ BUG_ON(1);
+ else
+ BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
+ret_warn_rqe:
bnx2fc_return_rqe(tgt, 1);
spin_unlock_bh(&tgt->tgt_lock);
break;
@@ -742,7 +870,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
struct fcoe_task_ctx_entry *task;
struct fcoe_task_ctx_entry *task_page;
struct fcoe_port *port = tgt->port;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct bnx2fc_cmd *io_req;
int task_idx, index;
u16 xid;
@@ -753,7 +882,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
spin_lock_bh(&tgt->tgt_lock);
xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
if (xid >= BNX2FC_MAX_TASKS) {
- printk(KERN_ALERT PFX "ERROR:xid out of range\n");
+ printk(KERN_ERR PFX "ERROR:xid out of range\n");
spin_unlock_bh(&tgt->tgt_lock);
return;
}
@@ -762,9 +891,9 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
task = &(task_page[index]);
- num_rq = ((task->rx_wr_tx_rd.rx_flags &
- FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
- FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
+ num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
@@ -777,22 +906,19 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
/* Timestamp IO completion time */
cmd_type = io_req->cmd_type;
- /* optimized completion path */
- if (cmd_type == BNX2FC_SCSI_CMD) {
- rx_state = ((task->rx_wr_tx_rd.rx_flags &
- FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
- FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
+ rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
+ FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
+ FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
+ /* Process other IO completion types */
+ switch (cmd_type) {
+ case BNX2FC_SCSI_CMD:
if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
spin_unlock_bh(&tgt->tgt_lock);
return;
}
- }
- /* Process other IO completion types */
- switch (cmd_type) {
- case BNX2FC_SCSI_CMD:
if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
bnx2fc_process_abts_compl(io_req, task, num_rq);
else if (rx_state ==
@@ -819,8 +945,16 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
break;
case BNX2FC_ELS:
- BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
- bnx2fc_process_els_compl(io_req, task, num_rq);
+ if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
+ bnx2fc_process_els_compl(io_req, task, num_rq);
+ else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
+ bnx2fc_process_abts_compl(io_req, task, num_rq);
+ else if (rx_state ==
+ FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
+ bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+ else
+ printk(KERN_ERR PFX "Invalid rx state = %d\n",
+ rx_state);
break;
case BNX2FC_CLEANUP:
@@ -828,6 +962,13 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
kref_put(&io_req->refcount, bnx2fc_cmd_release);
break;
+ case BNX2FC_SEQ_CLEANUP:
+ BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
+ io_req->xid);
+ bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
default:
printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
break;
@@ -835,6 +976,20 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
spin_unlock_bh(&tgt->tgt_lock);
}
+void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
+{
+ struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
+ u32 msg;
+
+ wmb();
+ rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
+ FCOE_CQE_TOGGLE_BIT_SHIFT);
+ msg = *((u32 *)rx_db);
+ writel(cpu_to_le32(msg), tgt->ctx_base);
+ mmiowb();
+
+}
+
struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
{
struct bnx2fc_work *work;
@@ -853,8 +1008,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
struct fcoe_cqe *cq;
u32 cq_cons;
struct fcoe_cqe *cqe;
+ u32 num_free_sqes = 0;
u16 wqe;
- bool more_cqes_found = false;
/*
* cq_lock is a low contention lock used to protect
@@ -872,62 +1027,53 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
cq_cons = tgt->cq_cons_idx;
cqe = &cq[cq_cons];
- do {
- more_cqes_found ^= true;
-
- while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
- (tgt->cq_curr_toggle_bit <<
- FCOE_CQE_TOGGLE_BIT_SHIFT)) {
+ while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
+ (tgt->cq_curr_toggle_bit <<
+ FCOE_CQE_TOGGLE_BIT_SHIFT)) {
- /* new entry on the cq */
- if (wqe & FCOE_CQE_CQE_TYPE) {
- /* Unsolicited event notification */
- bnx2fc_process_unsol_compl(tgt, wqe);
- } else {
- struct bnx2fc_work *work = NULL;
- struct bnx2fc_percpu_s *fps = NULL;
- unsigned int cpu = wqe % num_possible_cpus();
-
- fps = &per_cpu(bnx2fc_percpu, cpu);
- spin_lock_bh(&fps->fp_work_lock);
- if (unlikely(!fps->iothread))
- goto unlock;
-
- work = bnx2fc_alloc_work(tgt, wqe);
- if (work)
- list_add_tail(&work->list,
- &fps->work_list);
+ /* new entry on the cq */
+ if (wqe & FCOE_CQE_CQE_TYPE) {
+ /* Unsolicited event notification */
+ bnx2fc_process_unsol_compl(tgt, wqe);
+ } else {
+ /* Pending work request completion */
+ struct bnx2fc_work *work = NULL;
+ struct bnx2fc_percpu_s *fps = NULL;
+ unsigned int cpu = wqe % num_possible_cpus();
+
+ fps = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&fps->fp_work_lock);
+ if (unlikely(!fps->iothread))
+ goto unlock;
+
+ work = bnx2fc_alloc_work(tgt, wqe);
+ if (work)
+ list_add_tail(&work->list,
+ &fps->work_list);
unlock:
- spin_unlock_bh(&fps->fp_work_lock);
+ spin_unlock_bh(&fps->fp_work_lock);
- /* Pending work request completion */
- if (fps->iothread && work)
- wake_up_process(fps->iothread);
- else
- bnx2fc_process_cq_compl(tgt, wqe);
- }
- cqe++;
- tgt->cq_cons_idx++;
-
- if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
- tgt->cq_cons_idx = 0;
- cqe = cq;
- tgt->cq_curr_toggle_bit =
- 1 - tgt->cq_curr_toggle_bit;
- }
+ /* Pending work request completion */
+ if (fps->iothread && work)
+ wake_up_process(fps->iothread);
+ else
+ bnx2fc_process_cq_compl(tgt, wqe);
}
- /* Re-arm CQ */
- if (more_cqes_found) {
- tgt->conn_db->cq_arm.lo = -1;
- wmb();
+ cqe++;
+ tgt->cq_cons_idx++;
+ num_free_sqes++;
+
+ if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
+ tgt->cq_cons_idx = 0;
+ cqe = cq;
+ tgt->cq_curr_toggle_bit =
+ 1 - tgt->cq_curr_toggle_bit;
}
- } while (more_cqes_found);
-
- /*
- * Commit tgt->cq_cons_idx change to the memory
- * spin_lock implies full memory barrier, no need to smp_wmb
- */
-
+ }
+ if (num_free_sqes) {
+ bnx2fc_arm_cq(tgt);
+ atomic_add(num_free_sqes, &tgt->free_sqes);
+ }
spin_unlock_bh(&tgt->cq_lock);
return 0;
}
@@ -947,7 +1093,7 @@ static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
if (!tgt) {
- printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
+ printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
return;
}
@@ -968,6 +1114,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
{
struct bnx2fc_rport *tgt;
struct fcoe_port *port;
+ struct bnx2fc_interface *interface;
u32 conn_id;
u32 context_id;
int rc;
@@ -982,8 +1129,9 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
ofld_kcqe->fcoe_conn_context_id);
port = tgt->port;
- if (hba != tgt->port->priv) {
- printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ interface = tgt->port->priv;
+ if (hba != interface->hba) {
+ printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
goto ofld_cmpl_err;
}
/*
@@ -1004,7 +1152,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
/* now enable the session */
rc = bnx2fc_send_session_enable_req(port, tgt);
if (rc) {
- printk(KERN_ALERT PFX "enable session failed\n");
+ printk(KERN_ERR PFX "enable session failed\n");
goto ofld_cmpl_err;
}
}
@@ -1027,6 +1175,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
struct fcoe_kcqe *ofld_kcqe)
{
struct bnx2fc_rport *tgt;
+ struct bnx2fc_interface *interface;
u32 conn_id;
u32 context_id;
@@ -1034,7 +1183,7 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
conn_id = ofld_kcqe->fcoe_conn_id;
tgt = hba->tgt_ofld_list[conn_id];
if (!tgt) {
- printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
+ printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
return;
}
@@ -1046,16 +1195,17 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
* and enable
*/
if (tgt->context_id != context_id) {
- printk(KERN_ALERT PFX "context id mis-match\n");
+ printk(KERN_ERR PFX "context id mis-match\n");
return;
}
- if (hba != tgt->port->priv) {
- printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ interface = tgt->port->priv;
+ if (hba != interface->hba) {
+ printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
goto enbl_cmpl_err;
}
- if (ofld_kcqe->completion_status) {
+ if (ofld_kcqe->completion_status)
goto enbl_cmpl_err;
- } else {
+ else {
/* enable successful - rport ready for issuing IOs */
set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
@@ -1078,14 +1228,14 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
conn_id = disable_kcqe->fcoe_conn_id;
tgt = hba->tgt_ofld_list[conn_id];
if (!tgt) {
- printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
+ printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
return;
}
BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
if (disable_kcqe->completion_status) {
- printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
+ printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
disable_kcqe->completion_status);
return;
} else {
@@ -1107,14 +1257,14 @@ static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
conn_id = destroy_kcqe->fcoe_conn_id;
tgt = hba->tgt_ofld_list[conn_id];
if (!tgt) {
- printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
+ printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
return;
}
BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
if (destroy_kcqe->completion_status) {
- printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
+ printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
destroy_kcqe->completion_status);
return;
} else {
@@ -1141,7 +1291,12 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
printk(KERN_ERR PFX "init_failure due to NIC error\n");
break;
-
+ case FCOE_KCQE_COMPLETION_STATUS_ERROR:
+ printk(KERN_ERR PFX "init failure due to compl status err\n");
+ break;
+ case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
+ printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
+ break;
default:
printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
}
@@ -1200,7 +1355,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
} else {
printk(KERN_ERR PFX "DESTROY success\n");
}
- hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
wake_up_interruptible(&hba->destroy_wait);
break;
@@ -1222,7 +1377,7 @@ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
case FCOE_KCQE_OPCODE_FCOE_ERROR:
/* fall thru */
default:
- printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
+ printk(KERN_ERR PFX "unknown opcode 0x%x\n",
kcqe->op_code);
}
}
@@ -1247,21 +1402,14 @@ void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
{
- struct b577xx_doorbell_set_prod ev_doorbell;
+ struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
u32 msg;
wmb();
-
- memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
- ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
-
- ev_doorbell.prod = tgt->sq_prod_idx |
+ sq_db->prod = tgt->sq_prod_idx |
(tgt->sq_curr_toggle_bit << 15);
- ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
- B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
- msg = *((u32 *)&ev_doorbell);
+ msg = *((u32 *)sq_db);
writel(cpu_to_le32(msg), tgt->ctx_base);
-
mmiowb();
}
@@ -1272,7 +1420,8 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
struct fcoe_port *port = tgt->port;
u32 reg_off;
resource_size_t reg_base;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
reg_base = pci_resource_start(hba->pcidev,
BNX2X_DOORBELL_PCI_BAR);
@@ -1311,6 +1460,96 @@ void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
tgt->conn_db->rq_prod = tgt->rq_prod_idx;
}
+void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
+ struct fcoe_task_ctx_entry *task,
+ struct bnx2fc_cmd *orig_io_req,
+ u32 offset)
+{
+ struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
+ struct bnx2fc_interface *interface = tgt->port->priv;
+ struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
+ struct fcoe_task_ctx_entry *orig_task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_ext_mul_sges_ctx *sgl;
+ u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
+ u8 orig_task_type;
+ u16 orig_xid = orig_io_req->xid;
+ u32 context_id = tgt->context_id;
+ u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
+ u32 orig_offset = offset;
+ int bd_count;
+ int orig_task_idx, index;
+ int i;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ orig_task_type = FCOE_TASK_TYPE_WRITE;
+ else
+ orig_task_type = FCOE_TASK_TYPE_READ;
+
+ /* Tx flags */
+ task->txwr_rxrd.const_ctx.tx_flags =
+ FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
+ /* init flags */
+ task->txwr_rxrd.const_ctx.init_flags = task_type <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+ task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
+ task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
+
+ task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
+ task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
+
+ bd_count = orig_io_req->bd_tbl->bd_valid;
+
+ /* obtain the appropriate bd entry from relative offset */
+ for (i = 0; i < bd_count; i++) {
+ if (offset < bd[i].buf_len)
+ break;
+ offset -= bd[i].buf_len;
+ }
+ phys_addr += (i * sizeof(struct fcoe_bd_ctx));
+
+ if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
+ (u32)phys_addr;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)phys_addr >> 32);
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
+ bd_count;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
+ offset; /* adjusted offset */
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
+ } else {
+ orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
+ index = orig_xid % BNX2FC_TASKS_PER_PAGE;
+
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[orig_task_idx];
+ orig_task = &(task_page[index]);
+
+ /* Multiple SGEs were used for this IO */
+ sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
+ sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
+ sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
+ sgl->mul_sgl.sgl_size = bd_count;
+ sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
+ sgl->mul_sgl.cur_sge_idx = i;
+
+ memset(&task->rxwr_only.rx_seq_ctx, 0,
+ sizeof(struct fcoe_rx_seq_ctx));
+ task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
+ task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
+ }
+}
void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u16 orig_xid)
@@ -1322,18 +1561,31 @@ void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
/* Tx Write Rx Read */
- task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
- task->tx_wr_rx_rd.init_flags = task_type <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
- task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
- /* Common */
- task->cmn.common_flags = context_id <<
- FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
- task->cmn.general.cleanup_info.task_id = orig_xid;
-
-
+ /* init flags */
+ task->txwr_rxrd.const_ctx.init_flags = task_type <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+ task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+ if (tgt->dev_type == TYPE_TAPE)
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_TAPE <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ else
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
+
+ /* Tx flags */
+ task->txwr_rxrd.const_ctx.tx_flags =
+ FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
+
+ /* Rx Read Tx Write */
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+ task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+ FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
}
void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
@@ -1342,6 +1594,7 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
struct bnx2fc_rport *tgt = io_req->tgt;
struct fc_frame_header *fc_hdr;
+ struct fcoe_ext_mul_sges_ctx *sgl;
u8 task_type = 0;
u64 *hdr;
u64 temp_hdr[3];
@@ -1367,47 +1620,54 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
/* Tx only */
if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
(task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
- task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
(u32)mp_req->mp_req_bd_dma;
- task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
(u32)((u64)mp_req->mp_req_bd_dma >> 32);
- task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
- BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
- (unsigned long long)mp_req->mp_req_bd_dma);
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
}
/* Tx Write Rx Read */
- task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
- task->tx_wr_rx_rd.init_flags = task_type <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
- task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
- task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
-
- /* Common */
- task->cmn.data_2_trns = io_req->data_xfer_len;
- context_id = tgt->context_id;
- task->cmn.common_flags = context_id <<
- FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
- task->cmn.common_flags |= 1 <<
- FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
- task->cmn.common_flags |= 1 <<
- FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+ /* init flags */
+ task->txwr_rxrd.const_ctx.init_flags = task_type <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+ if (tgt->dev_type == TYPE_TAPE)
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_TAPE <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ else
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+
+ /* tx flags */
+ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
/* Rx Write Tx Read */
+ task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
+
+ /* rx flags */
+ task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+ FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
+
+ context_id = tgt->context_id;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
fc_hdr = &(mp_req->req_fc_hdr);
if (task_type == FCOE_TASK_TYPE_MIDPATH) {
fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
fc_hdr->fh_rx_id = htons(0xffff);
- task->rx_wr_tx_rd.rx_id = 0xffff;
+ task->rxwr_txrd.var_ctx.rx_id = 0xffff;
} else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
}
/* Fill FC Header into middle path buffer */
- hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
hdr[0] = cpu_to_be64(temp_hdr[0]);
hdr[1] = cpu_to_be64(temp_hdr[1]);
@@ -1415,12 +1675,12 @@ void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
/* Rx Only */
if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+ sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
- task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
- (u32)mp_req->mp_resp_bd_dma;
- task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
+ sgl->mul_sgl.cur_sge_addr.hi =
(u32)((u64)mp_req->mp_resp_bd_dma >> 32);
- task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+ sgl->mul_sgl.sgl_size = 1;
}
}
@@ -1431,6 +1691,9 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct io_bdt *bd_tbl = io_req->bd_tbl;
struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fcoe_cached_sge_ctx *cached_sge;
+ struct fcoe_ext_mul_sges_ctx *sgl;
+ int dev_type = tgt->dev_type;
u64 *fcp_cmnd;
u64 tmp_fcp_cmnd[4];
u32 context_id;
@@ -1448,48 +1711,54 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
task_type = FCOE_TASK_TYPE_READ;
/* Tx only */
+ bd_count = bd_tbl->bd_valid;
if (task_type == FCOE_TASK_TYPE_WRITE) {
- task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
- (u32)bd_tbl->bd_tbl_dma;
- task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
- (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
- task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
- bd_tbl->bd_valid;
+ if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
+ struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
+ fcoe_bd_tbl->buf_addr_lo;
+ task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
+ fcoe_bd_tbl->buf_addr_hi;
+ task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
+ fcoe_bd_tbl->buf_len;
+
+ task->txwr_rxrd.const_ctx.init_flags |= 1 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
+ } else {
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
+ (u32)bd_tbl->bd_tbl_dma;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
+ bd_tbl->bd_valid;
+ }
}
/*Tx Write Rx Read */
/* Init state to NORMAL */
- task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
- task->tx_wr_rx_rd.init_flags = task_type <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
- task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
- task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
-
- /* Common */
- task->cmn.data_2_trns = io_req->data_xfer_len;
- context_id = tgt->context_id;
- task->cmn.common_flags = context_id <<
- FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
- task->cmn.common_flags |= 1 <<
- FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
- task->cmn.common_flags |= 1 <<
- FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
-
- /* Set initiative ownership */
- task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
+ task->txwr_rxrd.const_ctx.init_flags |= task_type <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+ if (dev_type == TYPE_TAPE)
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_TAPE <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ else
+ task->txwr_rxrd.const_ctx.init_flags |=
+ FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
+ task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+ /* tx flags */
+ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
/* Set initial seq counter */
- task->cmn.tx_low_seq_cnt = 1;
-
- /* Set state to "waiting for the first packet" */
- task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
+ task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
/* Fill FCP_CMND IU */
fcp_cmnd = (u64 *)
- task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
+ task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
/* swap fcp_cmnd */
@@ -1501,33 +1770,61 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
}
/* Rx Write Tx Read */
- task->rx_wr_tx_rd.rx_id = 0xffff;
+ task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
- /* Rx Only */
- if (task_type == FCOE_TASK_TYPE_READ) {
+ context_id = tgt->context_id;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
+ /* rx flags */
+ /* Set state to "waiting for the first packet" */
+ task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
+ FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
+
+ task->rxwr_txrd.var_ctx.rx_id = 0xffff;
- bd_count = bd_tbl->bd_valid;
+ /* Rx Only */
+ cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
+ sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
+ bd_count = bd_tbl->bd_valid;
+ if (task_type == FCOE_TASK_TYPE_READ &&
+ dev_type == TYPE_DISK) {
if (bd_count == 1) {
struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
- task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
- fcoe_bd_tbl->buf_addr_lo;
- task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
- fcoe_bd_tbl->buf_addr_hi;
- task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
- fcoe_bd_tbl->buf_len;
- task->tx_wr_rx_rd.init_flags |= 1 <<
- FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
+ cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
+ cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
+ cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
+ task->txwr_rxrd.const_ctx.init_flags |= 1 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
+ } else if (bd_count == 2) {
+ struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
+ cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
+ cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
+
+ fcoe_bd_tbl++;
+ cached_sge->second_buf_addr.lo =
+ fcoe_bd_tbl->buf_addr_lo;
+ cached_sge->second_buf_addr.hi =
+ fcoe_bd_tbl->buf_addr_hi;
+ cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
+ task->txwr_rxrd.const_ctx.init_flags |= 1 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
} else {
- task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
- (u32)bd_tbl->bd_tbl_dma;
- task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
+ sgl->mul_sgl.cur_sge_addr.hi =
(u32)((u64)bd_tbl->bd_tbl_dma >> 32);
- task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
- bd_tbl->bd_valid;
+ sgl->mul_sgl.sgl_size = bd_count;
}
+ } else {
+ sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
+ sgl->mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ sgl->mul_sgl.sgl_size = bd_count;
}
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index b5b5c346d77..6cc3789075b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1,7 +1,7 @@
/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
* IO manager and SCSI IO processing.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,8 +18,6 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
int bd_index);
static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
-static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
- struct bnx2fc_cmd *io_req);
static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
@@ -29,10 +27,11 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
unsigned int timer_msec)
{
- struct bnx2fc_hba *hba = io_req->port->priv;
+ struct bnx2fc_interface *interface = io_req->port->priv;
- if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work,
- msecs_to_jiffies(timer_msec)))
+ if (queue_delayed_work(interface->timer_work_queue,
+ &io_req->timeout_work,
+ msecs_to_jiffies(timer_msec)))
kref_get(&io_req->refcount);
}
@@ -217,6 +216,11 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
return;
BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
+ if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
+ /* Do not call scsi done for this IO */
+ return;
+ }
+
bnx2fc_unmap_sg_list(io_req);
io_req->sc_cmd = NULL;
if (!sc_cmd) {
@@ -419,12 +423,13 @@ free_cmgr:
struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
{
struct fcoe_port *port = tgt->port;
- struct bnx2fc_hba *hba = port->priv;
- struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
struct bnx2fc_cmd *io_req;
struct list_head *listp;
struct io_bdt *bd_tbl;
int index = RESERVE_FREE_LIST_INDEX;
+ u32 free_sqes;
u32 max_sqes;
u16 xid;
@@ -445,8 +450,10 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
* cmgr lock
*/
spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+ free_sqes = atomic_read(&tgt->free_sqes);
if ((list_empty(&(cmd_mgr->free_list[index]))) ||
- (tgt->num_active_ios.counter >= max_sqes)) {
+ (tgt->num_active_ios.counter >= max_sqes) ||
+ (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
"ios(%d):sqes(%d)\n",
tgt->num_active_ios.counter, tgt->max_sqes);
@@ -463,6 +470,7 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
xid = io_req->xid;
cmd_mgr->cmds[xid] = io_req;
atomic_inc(&tgt->num_active_ios);
+ atomic_dec(&tgt->free_sqes);
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
INIT_LIST_HEAD(&io_req->link);
@@ -481,14 +489,16 @@ struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
kref_init(&io_req->refcount);
return io_req;
}
-static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
+
+struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
{
struct fcoe_port *port = tgt->port;
- struct bnx2fc_hba *hba = port->priv;
- struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr;
struct bnx2fc_cmd *io_req;
struct list_head *listp;
struct io_bdt *bd_tbl;
+ u32 free_sqes;
u32 max_sqes;
u16 xid;
int index = get_cpu();
@@ -499,8 +509,10 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
* cmgr lock
*/
spin_lock_bh(&cmd_mgr->free_list_lock[index]);
+ free_sqes = atomic_read(&tgt->free_sqes);
if ((list_empty(&cmd_mgr->free_list[index])) ||
- (tgt->num_active_ios.counter >= max_sqes)) {
+ (tgt->num_active_ios.counter >= max_sqes) ||
+ (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) {
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
put_cpu();
return NULL;
@@ -513,6 +525,7 @@ static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
xid = io_req->xid;
cmd_mgr->cmds[xid] = io_req;
atomic_inc(&tgt->num_active_ios);
+ atomic_dec(&tgt->free_sqes);
spin_unlock_bh(&cmd_mgr->free_list_lock[index]);
put_cpu();
@@ -562,7 +575,8 @@ void bnx2fc_cmd_release(struct kref *ref)
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
{
struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
- struct bnx2fc_hba *hba = io_req->port->priv;
+ struct bnx2fc_interface *interface = io_req->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
size_t sz = sizeof(struct fcoe_bd_ctx);
/* clear tm flags */
@@ -598,7 +612,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
struct bnx2fc_mp_req *mp_req;
struct fcoe_bd_ctx *mp_req_bd;
struct fcoe_bd_ctx *mp_resp_bd;
- struct bnx2fc_hba *hba = io_req->port->priv;
+ struct bnx2fc_interface *interface = io_req->port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
dma_addr_t addr;
size_t sz;
@@ -674,7 +689,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct fcoe_port *port;
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct bnx2fc_rport *tgt;
struct bnx2fc_cmd *io_req;
struct bnx2fc_mp_req *tm_req;
@@ -691,10 +706,10 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
lport = shost_priv(host);
port = lport_priv(lport);
- hba = port->priv;
+ interface = port->priv;
if (rport == NULL) {
- printk(KERN_ALERT PFX "device_reset: rport is NULL\n");
+ printk(KERN_ERR PFX "device_reset: rport is NULL\n");
rc = FAILED;
goto tmf_err;
}
@@ -737,7 +752,9 @@ retry_tmf:
rc = bnx2fc_init_mp_req(io_req);
if (rc == FAILED) {
printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
+ spin_lock_bh(&tgt->tgt_lock);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
goto tmf_err;
}
@@ -766,7 +783,8 @@ retry_tmf:
index = xid % BNX2FC_TASKS_PER_PAGE;
/* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
task = &(task_page[index]);
bnx2fc_init_mp_task(io_req, task);
@@ -798,10 +816,10 @@ retry_tmf:
spin_unlock_bh(&tgt->tgt_lock);
if (!rc) {
- printk(KERN_ERR PFX "task mgmt command failed...\n");
+ BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n");
rc = FAILED;
} else {
- printk(KERN_ERR PFX "task mgmt command success...\n");
+ BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n");
rc = SUCCESS;
}
tmf_err:
@@ -814,7 +832,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
struct bnx2fc_rport *tgt = io_req->tgt;
struct fc_rport *rport = tgt->rport;
struct fc_rport_priv *rdata = tgt->rdata;
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct fcoe_port *port;
struct bnx2fc_cmd *abts_io_req;
struct fcoe_task_ctx_entry *task;
@@ -831,7 +849,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
port = io_req->port;
- hba = port->priv;
+ interface = port->priv;
lport = port->lport;
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
@@ -841,7 +859,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
}
if (rport == NULL) {
- printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n");
+ printk(KERN_ERR PFX "initiate_abts: rport is NULL\n");
rc = FAILED;
goto abts_err;
}
@@ -873,7 +891,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
/* Obtain oxid and rxid for the original exchange to be aborted */
fc_hdr->fh_ox_id = htons(io_req->xid);
- fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id);
+ fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id);
sid = tgt->sid;
did = rport->port_id;
@@ -888,7 +906,8 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
index = xid % BNX2FC_TASKS_PER_PAGE;
/* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
task = &(task_page[index]);
bnx2fc_init_mp_task(abts_io_req, task);
@@ -916,11 +935,81 @@ abts_err:
return rc;
}
+int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
+ enum fc_rctl r_ctl)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ struct bnx2fc_interface *interface;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *seq_clnp_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ int task_idx, index;
+ u16 xid;
+ int rc = 0;
+
+ BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
+ orig_io_req->xid);
+ kref_get(&orig_io_req->refcount);
+
+ port = orig_io_req->port;
+ interface = port->priv;
+ lport = port->lport;
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
+ rc = -ENOMEM;
+ goto cleanup_err;
+ }
+
+ seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
+ if (!seq_clnp_req) {
+ printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ rc = -ENOMEM;
+ kfree(cb_arg);
+ goto cleanup_err;
+ }
+ /* Initialize rest of io_req fields */
+ seq_clnp_req->sc_cmd = NULL;
+ seq_clnp_req->port = port;
+ seq_clnp_req->tgt = tgt;
+ seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
+
+ xid = seq_clnp_req->xid;
+
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ cb_arg->aborted_io_req = orig_io_req;
+ cb_arg->io_req = seq_clnp_req;
+ cb_arg->r_ctl = r_ctl;
+ cb_arg->offset = offset;
+ seq_clnp_req->cb_arg = cb_arg;
+
+ printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
+ bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+cleanup_err:
+ return rc;
+}
+
int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
{
struct fc_lport *lport;
struct bnx2fc_rport *tgt = io_req->tgt;
- struct bnx2fc_hba *hba;
+ struct bnx2fc_interface *interface;
struct fcoe_port *port;
struct bnx2fc_cmd *cleanup_io_req;
struct fcoe_task_ctx_entry *task;
@@ -933,7 +1022,7 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
port = io_req->port;
- hba = port->priv;
+ interface = port->priv;
lport = port->lport;
cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
@@ -955,7 +1044,8 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
index = xid % BNX2FC_TASKS_PER_PAGE;
/* Initialize task context for this IO request */
- task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
task = &(task_page[index]);
orig_xid = io_req->xid;
@@ -1023,7 +1113,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
lport = shost_priv(sc_cmd->device->host);
if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
- printk(KERN_ALERT PFX "eh_abort: link not ready\n");
+ printk(KERN_ERR PFX "eh_abort: link not ready\n");
return rc;
}
@@ -1054,7 +1144,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
* io_req is no longer in the active_q.
*/
if (tgt->flush_in_prog) {
- printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"flush in progress\n", io_req->xid);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
@@ -1062,7 +1152,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
}
if (io_req->on_active_queue == 0) {
- printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"not on active_q\n", io_req->xid);
/*
* This condition can happen only due to the FW bug,
@@ -1100,7 +1190,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
rc = bnx2fc_initiate_abts(io_req);
} else {
- printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"already in abts processing\n", io_req->xid);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
@@ -1141,6 +1231,42 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
return rc;
}
+void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 rx_state)
+{
+ struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
+ struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
+ u32 offset = cb_arg->offset;
+ enum fc_rctl r_ctl = cb_arg->r_ctl;
+ int rc = 0;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+
+ BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
+ "cmd_type = %d\n",
+ seq_clnp_req->xid, seq_clnp_req->cmd_type);
+
+ if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
+ printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
+ seq_clnp_req->xid);
+ goto free_cb_arg;
+ }
+ kref_get(&orig_io_req->refcount);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (rc)
+ printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
+ " IO will abort\n");
+ seq_clnp_req->cb_arg = NULL;
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+free_cb_arg:
+ kfree(cb_arg);
+ return;
+}
+
void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq)
@@ -1189,7 +1315,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
- r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl;
+ r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl;
switch (r_ctl) {
case FC_RCTL_BA_ACC:
@@ -1344,12 +1470,13 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
fc_hdr = &(tm_req->resp_fc_hdr);
hdr = (u64 *)fc_hdr;
temp_hdr = (u64 *)
- &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
hdr[0] = cpu_to_be64(temp_hdr[0]);
hdr[1] = cpu_to_be64(temp_hdr[1]);
hdr[2] = cpu_to_be64(temp_hdr[2]);
- tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+ tm_req->resp_len =
+ task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
rsp_buf = tm_req->resp_buf;
@@ -1369,7 +1496,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
fc_hdr->fh_r_ctl);
}
if (!sc_cmd->SCp.ptr) {
- printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n");
+ printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n");
return;
}
switch (io_req->fcp_status) {
@@ -1401,7 +1528,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
io_req->on_tmf_queue = 0;
} else {
- printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n");
+ printk(KERN_ERR PFX "Command not on active_cmd_queue!\n");
return;
}
@@ -1588,7 +1715,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
/* Invalid sense sense length. */
- printk(KERN_ALERT PFX "invalid sns length %d\n",
+ printk(KERN_ERR PFX "invalid sns length %d\n",
rq_buff_len);
/* reset rq_buff_len */
rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
@@ -1724,7 +1851,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
/* Fetch fcp_rsp from task context and perform cmd completion */
fcp_rsp = (struct fcoe_fcp_rsp_payload *)
- &(task->cmn.general.rsp_info.fcp_rsp.payload);
+ &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
/* parse fcp_rsp and obtain sense data from RQ if available */
bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
@@ -1734,7 +1861,6 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
printk(KERN_ERR PFX "SCp.ptr is NULL\n");
return;
}
- io_req->sc_cmd = NULL;
if (io_req->on_active_queue) {
list_del_init(&io_req->link);
@@ -1754,6 +1880,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
}
bnx2fc_unmap_sg_list(io_req);
+ io_req->sc_cmd = NULL;
switch (io_req->fcp_status) {
case FC_GOOD:
@@ -1771,7 +1898,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
scsi_set_resid(sc_cmd, io_req->fcp_resid);
break;
default:
- printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n",
+ printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n",
io_req->fcp_status);
break;
}
@@ -1780,14 +1907,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
-static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
struct bnx2fc_cmd *io_req)
{
struct fcoe_task_ctx_entry *task;
struct fcoe_task_ctx_entry *task_page;
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct fcoe_port *port = tgt->port;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct fc_lport *lport = port->lport;
struct fcoe_dev_stats *stats;
int task_idx, index;
@@ -1845,7 +1973,8 @@ static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
}
/* Time IO req */
- bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
+ if (tgt->io_timeout)
+ bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
/* Obtain free SQ entry */
bnx2fc_add_2_sq(tgt, xid);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index a2e3830bd26..d5311b577cc 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -2,7 +2,7 @@
* Handles operations such as session offload/upload etc, and manages
* session resources such as connection id and qp resources.
*
- * Copyright (c) 2008 - 2010 Broadcom Corporation
+ * Copyright (c) 2008 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -65,7 +65,8 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
{
struct fc_lport *lport = rdata->local_port;
struct fc_rport *rport = rdata->rport;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
int rval;
int i = 0;
@@ -133,6 +134,8 @@ retry_ofld:
/* upload will take care of cleaning up sess resc */
lport->tt.rport_logoff(rdata);
}
+ /* Arm CQ */
+ bnx2fc_arm_cq(tgt);
return;
ofld_err:
@@ -235,7 +238,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
static void bnx2fc_upload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
tgt->num_active_ios.counter);
@@ -314,7 +318,10 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
{
struct fc_rport *rport = rdata->rport;
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
+ struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
tgt->rport = rport;
tgt->rdata = rdata;
@@ -335,6 +342,7 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
+ atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX);
/* Initialize the toggle bit */
tgt->sq_curr_toggle_bit = 1;
@@ -345,7 +353,25 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt->rq_cons_idx = 0;
atomic_set(&tgt->num_active_ios, 0);
- tgt->work_time_slice = 2;
+ if (rdata->flags & FC_RP_FLAGS_RETRY) {
+ tgt->dev_type = TYPE_TAPE;
+ tgt->io_timeout = 0; /* use default ULP timeout */
+ } else {
+ tgt->dev_type = TYPE_DISK;
+ tgt->io_timeout = BNX2FC_IO_TIMEOUT;
+ }
+
+ /* initialize sq doorbell */
+ sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
+ sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
+ B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
+ /* initialize rx doorbell */
+ rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) |
+ (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) |
+ (B577XX_FCOE_CONNECTION_TYPE <<
+ B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT));
+ rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) |
+ (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT);
spin_lock_init(&tgt->tgt_lock);
spin_lock_init(&tgt->cq_lock);
@@ -377,7 +403,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
enum fc_rport_event event)
{
struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct fc_rport *rport = rdata->rport;
struct fc_rport_libfc_priv *rp;
struct bnx2fc_rport *tgt;
@@ -388,7 +415,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
switch (event) {
case RPORT_EV_READY:
if (!rport) {
- printk(KERN_ALERT PFX "rport is NULL: ERROR!\n");
+ printk(KERN_ERR PFX "rport is NULL: ERROR!\n");
break;
}
@@ -400,7 +427,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
* We should not come here, as lport will
* take care of fabric login
*/
- printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n",
+ printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n",
rdata->ids.port_id);
break;
}
@@ -468,7 +495,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
break;
if (!rport) {
- printk(KERN_ALERT PFX "%x - rport not created Yet!!\n",
+ printk(KERN_INFO PFX "%x - rport not created Yet!!\n",
port_id);
break;
}
@@ -522,7 +549,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
u32 port_id)
{
- struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
struct bnx2fc_rport *tgt;
struct fc_rport_priv *rdata;
int i;
@@ -537,7 +565,7 @@ struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
"obtained\n");
return tgt;
} else {
- printk(KERN_ERR PFX "rport 0x%x "
+ BNX2FC_TGT_DBG(tgt, "rport 0x%x "
"is in DELETED state\n",
rdata->ids.port_id);
return NULL;
@@ -618,7 +646,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
&tgt->sq_dma, GFP_KERNEL);
if (!tgt->sq) {
- printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n",
+ printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
tgt->sq_mem_size);
goto mem_alloc_failure;
}
@@ -631,7 +659,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
&tgt->cq_dma, GFP_KERNEL);
if (!tgt->cq) {
- printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n",
+ printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
tgt->cq_mem_size);
goto mem_alloc_failure;
}
@@ -644,7 +672,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
&tgt->rq_dma, GFP_KERNEL);
if (!tgt->rq) {
- printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n",
+ printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
tgt->rq_mem_size);
goto mem_alloc_failure;
}
@@ -656,7 +684,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
&tgt->rq_pbl_dma, GFP_KERNEL);
if (!tgt->rq_pbl) {
- printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n",
+ printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
tgt->rq_pbl_size);
goto mem_alloc_failure;
}
@@ -682,7 +710,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
&tgt->xferq_dma, GFP_KERNEL);
if (!tgt->xferq) {
- printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n",
+ printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
tgt->xferq_mem_size);
goto mem_alloc_failure;
}
@@ -696,7 +724,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
&tgt->confq_dma, GFP_KERNEL);
if (!tgt->confq) {
- printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n",
+ printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
tgt->confq_mem_size);
goto mem_alloc_failure;
}
@@ -711,7 +739,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->confq_pbl_size,
&tgt->confq_pbl_dma, GFP_KERNEL);
if (!tgt->confq_pbl) {
- printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n",
+ printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
tgt->confq_pbl_size);
goto mem_alloc_failure;
}
@@ -736,7 +764,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->conn_db_mem_size,
&tgt->conn_db_dma, GFP_KERNEL);
if (!tgt->conn_db) {
- printk(KERN_ALERT PFX "unable to allocate conn_db %d\n",
+ printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
tgt->conn_db_mem_size);
goto mem_alloc_failure;
}
@@ -752,14 +780,12 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
&tgt->lcq_dma, GFP_KERNEL);
if (!tgt->lcq) {
- printk(KERN_ALERT PFX "unable to allocate lcq %d\n",
+ printk(KERN_ERR PFX "unable to allocate lcq %d\n",
tgt->lcq_mem_size);
goto mem_alloc_failure;
}
memset(tgt->lcq, 0, tgt->lcq_mem_size);
- /* Arm CQ */
- tgt->conn_db->cq_arm.lo = -1;
tgt->conn_db->rq_prod = 0x8000;
return 0;
@@ -787,6 +813,8 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
iounmap(tgt->ctx_base);
tgt->ctx_base = NULL;
}
+
+ spin_lock_bh(&tgt->cq_lock);
/* Free LCQ */
if (tgt->lcq) {
dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
@@ -828,17 +856,16 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
tgt->rq = NULL;
}
/* Free CQ */
- spin_lock_bh(&tgt->cq_lock);
if (tgt->cq) {
dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
tgt->cq, tgt->cq_dma);
tgt->cq = NULL;
}
- spin_unlock_bh(&tgt->cq_lock);
/* Free SQ */
if (tgt->sq) {
dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
tgt->sq, tgt->sq_dma);
tgt->sq = NULL;
}
+ spin_unlock_bh(&tgt->cq_lock);
}
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 30e6bdbd65a..57515f1f169 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -1,6 +1,6 @@
/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
*
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -125,7 +125,7 @@
/* SQ/RQ/CQ DB structure sizes */
#define ISCSI_SQ_DB_SIZE (16)
-#define ISCSI_RQ_DB_SIZE (16)
+#define ISCSI_RQ_DB_SIZE (64)
#define ISCSI_CQ_DB_SIZE (80)
#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index dad6c8a3431..72118db89a2 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -1,6 +1,6 @@
/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
*
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -707,8 +707,10 @@ struct iscsi_kwqe_conn_update {
#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4)
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6
#elif defined(__LITTLE_ENDIAN)
u8 conn_flags;
#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
@@ -719,8 +721,10 @@ struct iscsi_kwqe_conn_update {
#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
-#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE (0x3<<4)
+#define ISCSI_KWQE_CONN_UPDATE_OOO_SUPPORT_MODE_SHIFT 4
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 6
u8 reserved2;
u8 max_outstanding_r2ts;
u8 session_error_recovery_level;
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 6bdd25a93db..dc5700765db 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -1,6 +1,6 @@
/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
@@ -22,11 +22,14 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/in.h>
#include <linux/kfifo.h>
#include <linux/netdevice.h>
#include <linux/completion.h>
+#include <linux/kthread.h>
+#include <linux/cpu.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -202,10 +205,13 @@ struct io_bdt {
/**
* bnx2i_cmd - iscsi command structure
*
+ * @hdr: iSCSI header
+ * @conn: iscsi_conn pointer
* @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd
* @sg: SG list
* @io_tbl: buffer descriptor (BD) table
* @bd_tbl_dma: buffer descriptor (BD) table's dma address
+ * @req: bnx2i specific command request struct
*/
struct bnx2i_cmd {
struct iscsi_hdr hdr;
@@ -229,6 +235,7 @@ struct bnx2i_cmd {
* @gen_pdu: login/nopout/logout pdu resources
* @violation_notified: bit mask used to track iscsi error/warning messages
* already printed out
+ * @work_cnt: keeps track of the number of outstanding work
*
* iSCSI connection structure
*/
@@ -252,6 +259,8 @@ struct bnx2i_conn {
*/
struct generic_pdu_resc gen_pdu;
u64 violation_notified;
+
+ atomic_t work_cnt;
};
@@ -478,7 +487,7 @@ struct bnx2i_5771x_cq_db {
struct bnx2i_5771x_sq_rq_db {
u16 prod_idx;
- u8 reserved0[14]; /* Pad structure size to 16 bytes */
+ u8 reserved0[62]; /* Pad structure size to 64 bytes */
};
@@ -661,7 +670,6 @@ enum {
* @hba: adapter to which this connection belongs
* @conn: iscsi connection this EP is linked to
* @cls_ep: associated iSCSI endpoint pointer
- * @sess: iscsi session this EP is linked to
* @cm_sk: cnic sock struct
* @hba_age: age to detect if 'iscsid' issues ep_disconnect()
* after HBA reset is completed by bnx2i/cnic/bnx2
@@ -687,7 +695,7 @@ struct bnx2i_endpoint {
u32 hba_age;
u32 state;
unsigned long timestamp;
- int num_active_cmds;
+ atomic_t num_active_cmds;
u32 ec_shift;
struct qp_info qp;
@@ -700,6 +708,19 @@ struct bnx2i_endpoint {
};
+struct bnx2i_work {
+ struct list_head list;
+ struct iscsi_session *session;
+ struct bnx2i_conn *bnx2i_conn;
+ struct cqe cqe;
+};
+
+struct bnx2i_percpu_s {
+ struct task_struct *iothread;
+ struct list_head work_list;
+ spinlock_t p_work_lock;
+};
+
/* Global variables */
extern unsigned int error_mask1, error_mask2;
@@ -783,7 +804,7 @@ extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
struct bnx2i_hba *hba, u32 iscsi_cid);
extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
-extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
+extern int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
extern int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep);
@@ -793,4 +814,8 @@ extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
+extern int bnx2i_percpu_io_thread(void *arg);
+extern int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe);
#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 5c54a2d9b83..9ae80cd5953 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1,6 +1,6 @@
/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
@@ -17,6 +17,8 @@
#include <scsi/libiscsi.h>
#include "bnx2i.h"
+DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
+
/**
* bnx2i_get_cid_num - get cid from ep
* @ep: endpoint pointer
@@ -131,16 +133,16 @@ static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
* the driver. EQ event is generated CQ index is hit or at least 1 CQ is
* outstanding and on chip timer expires
*/
-void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
+int bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
{
struct bnx2i_5771x_cq_db *cq_db;
u16 cq_index;
- u16 next_index;
+ u16 next_index = 0;
u32 num_active_cmds;
/* Coalesce CQ entries only on 10G devices */
if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
- return;
+ return 0;
/* Do not update CQ DB multiple times before firmware writes
* '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
@@ -150,16 +152,17 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
if (action != CNIC_ARM_CQE_FP)
if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
- return;
+ return 0;
if (action == CNIC_ARM_CQE || action == CNIC_ARM_CQE_FP) {
- num_active_cmds = ep->num_active_cmds;
+ num_active_cmds = atomic_read(&ep->num_active_cmds);
if (num_active_cmds <= event_coal_min)
next_index = 1;
- else
- next_index = event_coal_min +
- ((num_active_cmds - event_coal_min) >>
- ep->ec_shift);
+ else {
+ next_index = num_active_cmds >> ep->ec_shift;
+ if (next_index > num_active_cmds - event_coal_min)
+ next_index = num_active_cmds - event_coal_min;
+ }
if (!next_index)
next_index = 1;
cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
@@ -170,6 +173,7 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
cq_db->sqn[0] = cq_index;
}
+ return next_index;
}
@@ -265,7 +269,7 @@ static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
struct bnx2i_5771x_sq_rq_db *sq_db;
struct bnx2i_endpoint *ep = bnx2i_conn->ep;
- ep->num_active_cmds++;
+ atomic_inc(&ep->num_active_cmds);
wmb(); /* flush SQ WQE memory before the doorbell is rung */
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
@@ -328,11 +332,11 @@ int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
{
struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_login_request *login_wqe;
- struct iscsi_login *login_hdr;
+ struct iscsi_login_req *login_hdr;
u32 dword;
bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
- login_hdr = (struct iscsi_login *)task->hdr;
+ login_hdr = (struct iscsi_login_req *)task->hdr;
login_wqe = (struct bnx2i_login_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
@@ -430,7 +434,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
default:
tmfabort_wqe->ref_itt = RESERVED_ITT;
}
- memcpy(scsi_lun, tmfabort_hdr->lun, sizeof(struct scsi_lun));
+ memcpy(scsi_lun, &tmfabort_hdr->lun, sizeof(struct scsi_lun));
tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
@@ -547,7 +551,7 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
nopout_wqe->op_code = nopout_hdr->opcode;
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
- memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
+ memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
u32 tmp = nopout_wqe->lun[0];
@@ -1331,24 +1335,25 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
/**
* bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
- * @conn: iscsi connection
+ * @session: iscsi session
+ * @bnx2i_conn: bnx2i connection
* @cqe: pointer to newly DMA'ed CQE entry for processing
*
* process SCSI CMD Response CQE & complete the request to SCSI-ML
*/
-static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
- struct bnx2i_conn *bnx2i_conn,
- struct cqe *cqe)
+int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct bnx2i_cmd_response *resp_cqe;
struct bnx2i_cmd *bnx2i_cmd;
struct iscsi_task *task;
- struct iscsi_cmd_rsp *hdr;
+ struct iscsi_scsi_rsp *hdr;
u32 datalen = 0;
resp_cqe = (struct bnx2i_cmd_response *)cqe;
- spin_lock(&session->lock);
+ spin_lock_bh(&session->lock);
task = iscsi_itt_to_task(conn,
resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
if (!task)
@@ -1371,7 +1376,7 @@ static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
}
bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
- hdr = (struct iscsi_cmd_rsp *)task->hdr;
+ hdr = (struct iscsi_scsi_rsp *)task->hdr;
resp_cqe = (struct bnx2i_cmd_response *)cqe;
hdr->opcode = resp_cqe->op_code;
hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
@@ -1409,7 +1414,7 @@ done:
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
conn->data, datalen);
fail:
- spin_unlock(&session->lock);
+ spin_unlock_bh(&session->lock);
return 0;
}
@@ -1711,7 +1716,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->itt = task->hdr->itt;
hdr->ttt = cpu_to_be32(nop_in->ttt);
- memcpy(hdr->lun, nop_in->lun, 8);
+ memcpy(&hdr->lun, nop_in->lun, 8);
}
done:
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -1754,7 +1759,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,
resp_hdr->opcode = async_cqe->op_code;
resp_hdr->flags = 0x80;
- memcpy(resp_hdr->lun, async_cqe->lun, 8);
+ memcpy(&resp_hdr->lun, async_cqe->lun, 8);
resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
@@ -1836,21 +1841,136 @@ static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
}
+/**
+ * bnx2i_percpu_io_thread - thread per cpu for ios
+ *
+ * @arg: ptr to bnx2i_percpu_info structure
+ */
+int bnx2i_percpu_io_thread(void *arg)
+{
+ struct bnx2i_percpu_s *p = arg;
+ struct bnx2i_work *work, *tmp;
+ LIST_HEAD(work_list);
+
+ set_user_nice(current, -20);
+
+ while (!kthread_should_stop()) {
+ spin_lock_bh(&p->p_work_lock);
+ while (!list_empty(&p->work_list)) {
+ list_splice_init(&p->work_list, &work_list);
+ spin_unlock_bh(&p->p_work_lock);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ /* work allocated in the bh, freed here */
+ bnx2i_process_scsi_cmd_resp(work->session,
+ work->bnx2i_conn,
+ &work->cqe);
+ atomic_dec(&work->bnx2i_conn->work_cnt);
+ kfree(work);
+ }
+ spin_lock_bh(&p->p_work_lock);
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&p->p_work_lock);
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+
+/**
+ * bnx2i_queue_scsi_cmd_resp - queue cmd completion to the percpu thread
+ * @bnx2i_conn: bnx2i connection
+ *
+ * this function is called by generic KCQ handler to queue all pending cmd
+ * completion CQEs
+ *
+ * The implementation is to queue the cmd response based on the
+ * last recorded command for the given connection. The
+ * cpu_id gets recorded upon task_xmit. No out-of-order completion!
+ */
+static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct bnx2i_nop_in_msg *cqe)
+{
+ struct bnx2i_work *bnx2i_work = NULL;
+ struct bnx2i_percpu_s *p = NULL;
+ struct iscsi_task *task;
+ struct scsi_cmnd *sc;
+ int rc = 0;
+ int cpu;
+
+ spin_lock(&session->lock);
+ task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
+ cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
+ if (!task) {
+ spin_unlock(&session->lock);
+ return -EINVAL;
+ }
+ sc = task->sc;
+ spin_unlock(&session->lock);
+
+ if (!blk_rq_cpu_valid(sc->request))
+ cpu = smp_processor_id();
+ else
+ cpu = sc->request->cpu;
+
+ p = &per_cpu(bnx2i_percpu, cpu);
+ spin_lock(&p->p_work_lock);
+ if (unlikely(!p->iothread)) {
+ rc = -EINVAL;
+ goto err;
+ }
+ /* Alloc and copy to the cqe */
+ bnx2i_work = kzalloc(sizeof(struct bnx2i_work), GFP_ATOMIC);
+ if (bnx2i_work) {
+ INIT_LIST_HEAD(&bnx2i_work->list);
+ bnx2i_work->session = session;
+ bnx2i_work->bnx2i_conn = bnx2i_conn;
+ memcpy(&bnx2i_work->cqe, cqe, sizeof(struct cqe));
+ list_add_tail(&bnx2i_work->list, &p->work_list);
+ atomic_inc(&bnx2i_conn->work_cnt);
+ wake_up_process(p->iothread);
+ spin_unlock(&p->p_work_lock);
+ goto done;
+ } else
+ rc = -ENOMEM;
+err:
+ spin_unlock(&p->p_work_lock);
+ bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, (struct cqe *)cqe);
+done:
+ return rc;
+}
+
/**
* bnx2i_process_new_cqes - process newly DMA'ed CQE's
- * @bnx2i_conn: iscsi connection
+ * @bnx2i_conn: bnx2i connection
*
* this function is called by generic KCQ handler to process all pending CQE's
*/
-static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
+static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct qp_info *qp = &bnx2i_conn->ep->qp;
+ struct qp_info *qp;
struct bnx2i_nop_in_msg *nopin;
int tgt_async_msg;
+ int cqe_cnt = 0;
+
+ if (bnx2i_conn->ep == NULL)
+ return 0;
+
+ qp = &bnx2i_conn->ep->qp;
+ if (!qp->cq_virt) {
+ printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
+ bnx2i_conn->hba->netdev->name);
+ goto out;
+ }
while (1) {
nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
@@ -1873,8 +1993,9 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
switch (nopin->op_code) {
case ISCSI_OP_SCSI_CMD_RSP:
case ISCSI_OP_SCSI_DATA_IN:
- bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
- qp->cq_cons_qe);
+ /* Run the kthread engine only for data cmds
+ All other cmds will be completed in this bh! */
+ bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
break;
case ISCSI_OP_LOGIN_RSP:
bnx2i_process_login_resp(session, bnx2i_conn,
@@ -1918,13 +2039,21 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
nopin->op_code);
}
- if (!tgt_async_msg)
- bnx2i_conn->ep->num_active_cmds--;
+ if (!tgt_async_msg) {
+ if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
+ printk(KERN_ALERT "bnx2i (%s): no active cmd! "
+ "op 0x%x\n",
+ bnx2i_conn->hba->netdev->name,
+ nopin->op_code);
+ else
+ atomic_dec(&bnx2i_conn->ep->num_active_cmds);
+ }
cqe_out:
/* clear out in production version only, till beta keep opcode
* field intact, will be helpful in debugging (context dump)
* nopin->op_code = 0;
*/
+ cqe_cnt++;
qp->cqe_exp_seq_sn++;
if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
@@ -1937,6 +2066,8 @@ cqe_out:
qp->cq_cons_idx++;
}
}
+out:
+ return cqe_cnt;
}
/**
@@ -1952,6 +2083,7 @@ static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
{
struct bnx2i_conn *bnx2i_conn;
u32 iscsi_cid;
+ int nxt_idx;
iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
@@ -1964,9 +2096,12 @@ static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
return;
}
+
bnx2i_process_new_cqes(bnx2i_conn);
- bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
- bnx2i_process_new_cqes(bnx2i_conn);
+ nxt_idx = bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep,
+ CNIC_ARM_CQE_FP);
+ if (nxt_idx && nxt_idx == bnx2i_process_new_cqes(bnx2i_conn))
+ bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE_FP);
}
@@ -2312,7 +2447,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid "
"opcode\n", hba->netdev->name);
else if (ofld_kcqe->completion_status ==
- ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY)
+ ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY)
/* error status code valid only for 5771x chipset */
ep->state = EP_STATE_OFLD_FAILED_CID_BUSY;
else
@@ -2386,14 +2521,20 @@ static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
* bnx2i_indicate_netevent - Generic netdev event handler
* @context: adapter structure pointer
* @event: event type
+ * @vlan_id: vlans id - associated vlan id with this event
*
* Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
* NETDEV_GOING_DOWN and NETDEV_CHANGE
*/
-static void bnx2i_indicate_netevent(void *context, unsigned long event)
+static void bnx2i_indicate_netevent(void *context, unsigned long event,
+ u16 vlan_id)
{
struct bnx2i_hba *hba = context;
+ /* Ignore all netevent coming from vlans */
+ if (vlan_id != 0)
+ return;
+
switch (event) {
case NETDEV_UP:
if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
@@ -2511,7 +2652,7 @@ static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
static int bnx2i_send_nl_mesg(void *context, u32 msg_type,
- char *buf, u16 buflen)
+ char *buf, u16 buflen)
{
struct bnx2i_hba *hba = context;
int rc;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 6adbdc34a9a..1a947f1b972 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -1,6 +1,6 @@
/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.6.2.3"
-#define DRV_MODULE_RELDATE "Dec 31, 2010"
+#define DRV_MODULE_VERSION "2.7.0.3"
+#define DRV_MODULE_RELDATE "Jun 15, 2011"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -30,7 +30,7 @@ MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
"Eddie Wai <eddie.wai@broadcom.com>");
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712"
- " iSCSI Driver");
+ "/57800/57810/57840 iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -40,7 +40,7 @@ unsigned int event_coal_min = 24;
module_param(event_coal_min, int, 0664);
MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
-unsigned int event_coal_div = 1;
+unsigned int event_coal_div = 2;
module_param(event_coal_div, int, 0664);
MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
@@ -66,6 +66,15 @@ MODULE_PARM_DESC(rq_size, "Configure RQ size");
u64 iscsi_error_mask = 0x00;
+DEFINE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
+
+static int bnx2i_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu);
+/* notification function for CPU hotplug events */
+static struct notifier_block bnx2i_cpu_notifier = {
+ .notifier_call = bnx2i_cpu_callback,
+};
+
/**
* bnx2i_identify_device - identifies NetXtreme II device type
@@ -88,11 +97,20 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
(hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
- } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711E ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57712 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57712E)
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711E ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57712 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57712E ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57800 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57800_MF ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57800_VF ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57810 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57810_MF ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57810_VF ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57840 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57840_MF ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57840_VF)
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
else
printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
@@ -163,21 +181,14 @@ void bnx2i_start(void *handle)
struct bnx2i_hba *hba = handle;
int i = HZ;
- if (!hba->cnic->max_iscsi_conn) {
- printk(KERN_ALERT "bnx2i: dev %s does not support "
- "iSCSI\n", hba->netdev->name);
+ /*
+ * We should never register devices that don't support iSCSI
+ * (see bnx2i_init_one), so something is wrong if we try to
+ * start a iSCSI adapter on hardware with 0 supported iSCSI
+ * connections
+ */
+ BUG_ON(!hba->cnic->max_iscsi_conn);
- if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
- mutex_lock(&bnx2i_dev_lock);
- list_del_init(&hba->link);
- adapter_count--;
- hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
- clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
- mutex_unlock(&bnx2i_dev_lock);
- bnx2i_free_hba(hba);
- }
- return;
- }
bnx2i_send_fw_iscsi_init_msg(hba);
while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
msleep(BNX2I_INIT_POLL_TIME);
@@ -281,6 +292,13 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
int rc;
mutex_lock(&bnx2i_dev_lock);
+ if (!cnic->max_iscsi_conn) {
+ printk(KERN_ALERT "bnx2i: dev %s does not support "
+ "iSCSI\n", hba->netdev->name);
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
hba->cnic = cnic;
rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
if (!rc) {
@@ -298,6 +316,7 @@ static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
else
printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc);
+out:
mutex_unlock(&bnx2i_dev_lock);
return rc;
@@ -362,6 +381,91 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
/**
+ * bnx2i_percpu_thread_create - Create a receive thread for an
+ * online CPU
+ *
+ * @cpu: cpu index for the online cpu
+ */
+static void bnx2i_percpu_thread_create(unsigned int cpu)
+{
+ struct bnx2i_percpu_s *p;
+ struct task_struct *thread;
+
+ p = &per_cpu(bnx2i_percpu, cpu);
+
+ thread = kthread_create(bnx2i_percpu_io_thread, (void *)p,
+ "bnx2i_thread/%d", cpu);
+ /* bind thread to the cpu */
+ if (likely(!IS_ERR(thread))) {
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ }
+}
+
+
+static void bnx2i_percpu_thread_destroy(unsigned int cpu)
+{
+ struct bnx2i_percpu_s *p;
+ struct task_struct *thread;
+ struct bnx2i_work *work, *tmp;
+
+ /* Prevent any new work from being queued for this CPU */
+ p = &per_cpu(bnx2i_percpu, cpu);
+ spin_lock_bh(&p->p_work_lock);
+ thread = p->iothread;
+ p->iothread = NULL;
+
+ /* Free all work in the list */
+ list_for_each_entry_safe(work, tmp, &p->work_list, list) {
+ list_del_init(&work->list);
+ bnx2i_process_scsi_cmd_resp(work->session,
+ work->bnx2i_conn, &work->cqe);
+ kfree(work);
+ }
+
+ spin_unlock_bh(&p->p_work_lock);
+ if (thread)
+ kthread_stop(thread);
+}
+
+
+/**
+ * bnx2i_cpu_callback - Handler for CPU hotplug events
+ *
+ * @nfb: The callback data block
+ * @action: The event triggering the callback
+ * @hcpu: The index of the CPU that the event is for
+ *
+ * This creates or destroys per-CPU data for iSCSI
+ *
+ * Returns NOTIFY_OK always.
+ */
+static int bnx2i_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ printk(KERN_INFO "bnx2i: CPU %x online: Create Rx thread\n",
+ cpu);
+ bnx2i_percpu_thread_create(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ printk(KERN_INFO "CPU %x offline: Remove Rx thread\n", cpu);
+ bnx2i_percpu_thread_destroy(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+
+/**
* bnx2i_mod_init - module init entry point
*
* initialize any driver wide global data structures such as endpoint pool,
@@ -371,6 +475,8 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
static int __init bnx2i_mod_init(void)
{
int err;
+ unsigned cpu = 0;
+ struct bnx2i_percpu_s *p;
printk(KERN_INFO "%s", version);
@@ -393,6 +499,20 @@ static int __init bnx2i_mod_init(void)
goto unreg_xport;
}
+ /* Create percpu kernel threads to handle iSCSI I/O completions */
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(bnx2i_percpu, cpu);
+ INIT_LIST_HEAD(&p->work_list);
+ spin_lock_init(&p->p_work_lock);
+ p->iothread = NULL;
+ }
+
+ for_each_online_cpu(cpu)
+ bnx2i_percpu_thread_create(cpu);
+
+ /* Initialize per CPU interrupt thread */
+ register_hotcpu_notifier(&bnx2i_cpu_notifier);
+
return 0;
unreg_xport:
@@ -413,6 +533,7 @@ out:
static void __exit bnx2i_mod_exit(void)
{
struct bnx2i_hba *hba;
+ unsigned cpu = 0;
mutex_lock(&bnx2i_dev_lock);
while (!list_empty(&adapter_list)) {
@@ -430,6 +551,11 @@ static void __exit bnx2i_mod_exit(void)
}
mutex_unlock(&bnx2i_dev_lock);
+ unregister_hotcpu_notifier(&bnx2i_cpu_notifier);
+
+ for_each_online_cpu(cpu)
+ bnx2i_percpu_thread_destroy(cpu);
+
iscsi_unregister_transport(&bnx2i_iscsi_transport);
cnic_unregister_driver(CNIC_ULP_ISCSI);
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 041928b23cb..cffd4d75df5 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1,7 +1,7 @@
/*
* bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2010 Broadcom Corporation
+ * Copyright (c) 2006 - 2011 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
@@ -27,6 +27,7 @@ static struct scsi_host_template bnx2i_host_template;
*/
static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
+DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
{
@@ -1212,9 +1213,10 @@ static int bnx2i_task_xmit(struct iscsi_task *task)
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
struct scsi_cmnd *sc = task->sc;
struct bnx2i_cmd *cmd = task->dd_data;
- struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
- if (bnx2i_conn->ep->num_active_cmds + 1 > hba->max_sqes)
+ if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 >
+ hba->max_sqes)
return -ENOMEM;
/*
@@ -1354,6 +1356,9 @@ bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
bnx2i_conn = conn->dd_data;
bnx2i_conn->cls_conn = cls_conn;
bnx2i_conn->hba = hba;
+
+ atomic_set(&bnx2i_conn->work_cnt, 0);
+
/* 'ep' ptr will be assigned in bind() call */
bnx2i_conn->ep = NULL;
init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
@@ -1457,11 +1462,34 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
struct Scsi_Host *shost;
struct bnx2i_hba *hba;
+ struct bnx2i_work *work, *tmp;
+ unsigned cpu = 0;
+ struct bnx2i_percpu_s *p;
shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
hba = iscsi_host_priv(shost);
bnx2i_conn_free_login_resources(hba, bnx2i_conn);
+
+ if (atomic_read(&bnx2i_conn->work_cnt)) {
+ for_each_online_cpu(cpu) {
+ p = &per_cpu(bnx2i_percpu, cpu);
+ spin_lock_bh(&p->p_work_lock);
+ list_for_each_entry_safe(work, tmp,
+ &p->work_list, list) {
+ if (work->session == conn->session &&
+ work->bnx2i_conn == bnx2i_conn) {
+ list_del_init(&work->list);
+ kfree(work);
+ if (!atomic_dec_and_test(
+ &bnx2i_conn->work_cnt))
+ break;
+ }
+ }
+ spin_unlock_bh(&p->p_work_lock);
+ }
+ }
+
iscsi_conn_teardown(cls_conn);
}
@@ -1769,7 +1797,7 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
}
bnx2i_ep = ep->dd_data;
- bnx2i_ep->num_active_cmds = 0;
+ atomic_set(&bnx2i_ep->num_active_cmds, 0);
iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
if (iscsi_cid == -1) {
printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
@@ -2163,9 +2191,9 @@ static struct scsi_host_template bnx2i_host_template = {
.eh_device_reset_handler = iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.change_queue_depth = iscsi_change_queue_depth,
- .can_queue = 1024,
+ .can_queue = 2048,
.max_sectors = 127,
- .cmd_per_lun = 24,
+ .cmd_per_lun = 128,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index 9174196d903..83a77f7244d 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -1,6 +1,6 @@
/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2004 - 2010 Broadcom Corporation
+ * Copyright (c) 2004 - 2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index fc2cdb62f53..bd22041e278 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -985,7 +985,7 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->saddr.sin_addr.s_addr = chba->ipv4addr;
csk->rss_qid = 0;
- csk->l2t = t3_l2t_get(t3dev, dst->neighbour, ndev);
+ csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev);
if (!csk->l2t) {
pr_err("NO l2t available.\n");
return -EINVAL;
@@ -1245,7 +1245,7 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi;
struct ulp_iscsi_info uinfo;
unsigned int pgsz_factor[4];
- int err;
+ int i, err;
if (ddp) {
kref_get(&ddp->refcnt);
@@ -1271,6 +1271,8 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
cxgbi_ddp_page_size_factor(pgsz_factor);
+ for (i = 0; i < 4; i++)
+ uinfo.pgsz_factor[i] = pgsz_factor[i];
uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT);
err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index f3a4cd7cf78..ae13c4993aa 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1160,7 +1160,7 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
- csk->l2t = cxgb4_l2t_get(lldi->l2t, csk->dst->neighbour, ndev, 0);
+ csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0);
if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name);
goto rel_resource;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index a2a9c7c6c64..77ac217ad5c 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -492,7 +492,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
goto err_out;
}
dst = &rt->dst;
- ndev = dst->neighbour->dev;
+ ndev = dst_get_neighbour(dst)->dev;
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
pr_info("multi-cast route %pI4, port %u, dev %s.\n",
@@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
mtu = ndev->mtu;
pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
- dst->neighbour->dev->name, ndev->name, mtu);
+ dst_get_neighbour(dst)->dev->name, ndev->name, mtu);
}
cdev = cxgbi_device_find_by_netdev(ndev, &port);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index e7fc70d6b47..27c9d65d54a 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -35,7 +35,7 @@
* mode page were taken from the LSI RDAC 2.4 GPL'd
* driver, and then converted to Linux conventions.
*/
-#define RDAC_QUIESCENCE_TIME 20;
+#define RDAC_QUIESCENCE_TIME 20
/*
* Page Codes
*/
@@ -128,25 +128,7 @@ struct c4_inquiry {
u8 reserved[2];
};
-struct rdac_controller {
- u8 subsys_id[SUBSYS_ID_LEN];
- u8 slot_id[SLOT_ID_LEN];
- int use_ms10;
- struct kref kref;
- struct list_head node; /* list of all controllers */
- union {
- struct rdac_pg_legacy legacy;
- struct rdac_pg_expanded expanded;
- } mode_select;
- u8 index;
- u8 array_name[ARRAY_LABEL_LEN];
- spinlock_t ms_lock;
- int ms_queued;
- struct work_struct ms_work;
- struct scsi_device *ms_sdev;
- struct list_head ms_head;
-};
-
+#define UNIQUE_ID_LEN 16
struct c8_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC8 */
@@ -159,12 +141,31 @@ struct c8_inquiry {
u8 vol_user_label_len;
u8 vol_user_label[60];
u8 array_uniq_id_len;
- u8 array_unique_id[16];
+ u8 array_unique_id[UNIQUE_ID_LEN];
u8 array_user_label_len;
u8 array_user_label[60];
u8 lun[8];
};
+struct rdac_controller {
+ u8 array_id[UNIQUE_ID_LEN];
+ int use_ms10;
+ struct kref kref;
+ struct list_head node; /* list of all controllers */
+ union {
+ struct rdac_pg_legacy legacy;
+ struct rdac_pg_expanded expanded;
+ } mode_select;
+ u8 index;
+ u8 array_name[ARRAY_LABEL_LEN];
+ struct Scsi_Host *host;
+ spinlock_t ms_lock;
+ int ms_queued;
+ struct work_struct ms_work;
+ struct scsi_device *ms_sdev;
+ struct list_head ms_head;
+};
+
struct c2_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC2 */
@@ -369,16 +370,17 @@ static void release_controller(struct kref *kref)
kfree(ctlr);
}
-static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
- char *array_name)
+static struct rdac_controller *get_controller(int index, char *array_name,
+ u8 *array_id, struct scsi_device *sdev)
{
struct rdac_controller *ctlr, *tmp;
spin_lock(&list_lock);
list_for_each_entry(tmp, &ctlr_list, node) {
- if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
- (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
+ if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
+ (tmp->index == index) &&
+ (tmp->host == sdev->host)) {
kref_get(&tmp->kref);
spin_unlock(&list_lock);
return tmp;
@@ -389,16 +391,11 @@ static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
goto done;
/* initialize fields of controller */
- memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
- memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
+ memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
+ ctlr->index = index;
+ ctlr->host = sdev->host;
memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
- /* update the controller index */
- if (slot_id[1] == 0x31)
- ctlr->index = 0;
- else
- ctlr->index = 1;
-
kref_init(&ctlr->kref);
ctlr->use_ms10 = -1;
ctlr->ms_queued = 0;
@@ -444,7 +441,7 @@ done:
}
static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
- char *array_name)
+ char *array_name, u8 *array_id)
{
int err, i;
struct c8_inquiry *inqp;
@@ -463,6 +460,8 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
*(array_name+i) = inqp->array_user_label[(2*i)+1];
*(array_name+ARRAY_LABEL_LEN-1) = '\0';
+ memset(array_id, 0, UNIQUE_ID_LEN);
+ memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
}
return err;
}
@@ -504,16 +503,20 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
}
static int initialize_controller(struct scsi_device *sdev,
- struct rdac_dh_data *h, char *array_name)
+ struct rdac_dh_data *h, char *array_name, u8 *array_id)
{
- int err;
+ int err, index;
struct c4_inquiry *inqp;
err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c4;
- h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id,
- array_name);
+ /* get the controller index */
+ if (inqp->slot_id[1] == 0x31)
+ index = 0;
+ else
+ index = 1;
+ h->ctlr = get_controller(index, array_name, array_id, sdev);
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
}
@@ -835,6 +838,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int err;
char array_name[ARRAY_LABEL_LEN];
+ char array_id[UNIQUE_ID_LEN];
scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
@@ -849,11 +853,11 @@ static int rdac_bus_attach(struct scsi_device *sdev)
h->lun = UNINITIALIZED_LUN;
h->state = RDAC_STATE_ACTIVE;
- err = get_lun_info(sdev, h, array_name);
+ err = get_lun_info(sdev, h, array_name, array_id);
if (err != SCSI_DH_OK)
goto failed;
- err = initialize_controller(sdev, h, array_name);
+ err = initialize_controller(sdev, h, array_name, array_id);
if (err != SCSI_DH_OK)
goto failed;
diff --git a/drivers/scsi/dpt/dpti_i2o.h b/drivers/scsi/dpt/dpti_i2o.h
index 179ad77f6cc..bd9e31e1624 100644
--- a/drivers/scsi/dpt/dpti_i2o.h
+++ b/drivers/scsi/dpt/dpti_i2o.h
@@ -22,7 +22,7 @@
#include <linux/i2o-dev.h>
#include <linux/notifier.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/*
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 155d7b9bdea..ba710e350ac 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -99,7 +99,8 @@ static void fcoe_destroy_work(struct work_struct *);
static int fcoe_ddp_setup(struct fc_lport *, u16, struct scatterlist *,
unsigned int);
static int fcoe_ddp_done(struct fc_lport *, u16);
-
+static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
+ unsigned int);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
static bool fcoe_match(struct net_device *netdev);
@@ -143,6 +144,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
.ddp_setup = fcoe_ddp_setup,
.ddp_done = fcoe_ddp_done,
+ .ddp_target = fcoe_ddp_target,
.elsct_send = fcoe_elsct_send,
.get_lesb = fcoe_get_lesb,
.lport_set_port_id = fcoe_set_port_id,
@@ -429,21 +431,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
struct fcoe_ctlr *fip = &fcoe->ctlr;
u8 flogi_maddr[ETH_ALEN];
const struct net_device_ops *ops;
- struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
-
- FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
-
- /* Logout of the fabric */
- fc_fabric_logoff(fcoe->ctlr.lp);
-
- /* Cleanup the fc_lport */
- fc_lport_destroy(fcoe->ctlr.lp);
-
- /* Stop the transmit retry timer */
- del_timer_sync(&port->timer);
-
- /* Free existing transmit skbs */
- fcoe_clean_pending_queue(fcoe->ctlr.lp);
/*
* Don't listen for Ethernet packets anymore.
@@ -466,9 +453,6 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
} else
dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
- if (!is_zero_ether_addr(port->data_src_addr))
- dev_uc_del(netdev, port->data_src_addr);
-
/* Tell the LLD we are done w/ FCoE */
ops = netdev->netdev_ops;
if (ops->ndo_fcoe_disable) {
@@ -476,6 +460,8 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
FCOE_NETDEV_DBG(netdev, "Failed to disable FCoE"
" specific feature for LLD.\n");
}
+
+ /* Release the self-reference taken during fcoe_interface_create() */
fcoe_interface_put(fcoe);
}
@@ -501,6 +487,19 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
}
/**
+ * fcoe_port_send() - Send an Ethernet-encapsulated FIP/FCoE frame
+ * @port: The FCoE port
+ * @skb: The FIP/FCoE packet to be sent
+ */
+static void fcoe_port_send(struct fcoe_port *port, struct sk_buff *skb)
+{
+ if (port->fcoe_pending_queue.qlen)
+ fcoe_check_wait_queue(port->lport, skb);
+ else if (fcoe_start_io(skb))
+ fcoe_check_wait_queue(port->lport, skb);
+}
+
+/**
* fcoe_fip_send() - Send an Ethernet-encapsulated FIP frame
* @fip: The FCoE controller
* @skb: The FIP packet to be sent
@@ -508,7 +507,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
skb->dev = fcoe_from_ctlr(fip)->netdev;
- dev_queue_xmit(skb);
+ fcoe_port_send(lport_priv(fip->lp), skb);
}
/**
@@ -749,12 +748,27 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
* The offload EM that this routine is associated with will handle any
* packets that are for SCSI read requests.
*
+ * This has been enhanced to work when FCoE stack is operating in target
+ * mode.
+ *
* Returns: True for read types I/O, otherwise returns false.
*/
bool fcoe_oem_match(struct fc_frame *fp)
{
- return fc_fcp_is_read(fr_fsp(fp)) &&
- (fr_fsp(fp)->data_len > fcoe_ddp_min);
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fcp_cmnd *fcp;
+
+ if (fc_fcp_is_read(fr_fsp(fp)) &&
+ (fr_fsp(fp)->data_len > fcoe_ddp_min))
+ return true;
+ else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
+ fcp = fc_frame_payload_get(fp, sizeof(*fcp));
+ if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
+ fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
+ (fcp->fc_flags & FCP_CFL_WRDATA))
+ return true;
+ }
+ return false;
}
/**
@@ -844,6 +858,32 @@ skip_oem:
*/
static void fcoe_if_destroy(struct fc_lport *lport)
{
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+ struct net_device *netdev = fcoe->netdev;
+
+ FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
+
+ /* Logout of the fabric */
+ fc_fabric_logoff(lport);
+
+ /* Cleanup the fc_lport */
+ fc_lport_destroy(lport);
+
+ /* Stop the transmit retry timer */
+ del_timer_sync(&port->timer);
+
+ /* Free existing transmit skbs */
+ fcoe_clean_pending_queue(lport);
+
+ rtnl_lock();
+ if (!is_zero_ether_addr(port->data_src_addr))
+ dev_uc_del(netdev, port->data_src_addr);
+ rtnl_unlock();
+
+ /* Release reference held in fcoe_if_create() */
+ fcoe_interface_put(fcoe);
+
/* Free queued packets for the per-CPU receive threads */
fcoe_percpu_clean(lport);
@@ -887,6 +927,28 @@ static int fcoe_ddp_setup(struct fc_lport *lport, u16 xid,
}
/**
+ * fcoe_ddp_target() - Call a LLD's ddp_target through the net device
+ * @lport: The local port to setup DDP for
+ * @xid: The exchange ID for this DDP transfer
+ * @sgl: The scatterlist describing this transfer
+ * @sgc: The number of sg items
+ *
+ * Returns: 0 if the DDP context was not configured
+ */
+static int fcoe_ddp_target(struct fc_lport *lport, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct net_device *netdev = fcoe_netdev(lport);
+
+ if (netdev->netdev_ops->ndo_fcoe_ddp_target)
+ return netdev->netdev_ops->ndo_fcoe_ddp_target(netdev, xid,
+ sgl, sgc);
+
+ return 0;
+}
+
+
+/**
* fcoe_ddp_done() - Call a LLD's ddp_done through the net device
* @lport: The local port to complete DDP on
* @xid: The exchange ID for this DDP transfer
@@ -1206,6 +1268,26 @@ static int fcoe_cpu_callback(struct notifier_block *nfb,
}
/**
+ * fcoe_select_cpu() - Selects CPU to handle post-processing of incoming
+ * command.
+ *
+ * This routine selects next CPU based on cpumask to distribute
+ * incoming requests in round robin.
+ *
+ * Returns: int CPU number
+ */
+static inline unsigned int fcoe_select_cpu(void)
+{
+ static unsigned int selected_cpu;
+
+ selected_cpu = cpumask_next(selected_cpu, cpu_online_mask);
+ if (selected_cpu >= nr_cpu_ids)
+ selected_cpu = cpumask_first(cpu_online_mask);
+
+ return selected_cpu;
+}
+
+/**
* fcoe_rcv() - Receive packets from a net device
* @skb: The received packet
* @netdev: The net device that the packet was received on
@@ -1271,18 +1353,25 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport;
- fr->ptype = ptype;
/*
* In case the incoming frame's exchange is originated from
* the initiator, then received frame's exchange id is ANDed
* with fc_cpu_mask bits to get the same cpu on which exchange
- * was originated, otherwise just use the current cpu.
+ * was originated, otherwise select cpu using rx exchange id
+ * or fcoe_select_cpu().
*/
if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
- else
- cpu = smp_processor_id();
+ else {
+ if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)
+ cpu = fcoe_select_cpu();
+ else
+ cpu = ntohs(fh->fh_rx_id) & fc_cpu_mask;
+ }
+
+ if (cpu >= nr_cpu_ids)
+ goto err;
fps = &per_cpu(fcoe_percpu, cpu);
spin_lock_bh(&fps->fcoe_rx_list.lock);
@@ -1482,11 +1571,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* send down to lld */
fr_dev(fp) = lport;
- if (port->fcoe_pending_queue.qlen)
- fcoe_check_wait_queue(lport, skb);
- else if (fcoe_start_io(skb))
- fcoe_check_wait_queue(lport, skb);
-
+ fcoe_port_send(port, skb);
return 0;
}
@@ -1733,7 +1818,6 @@ static int fcoe_device_notification(struct notifier_block *notifier,
case NETDEV_UNREGISTER:
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
- fcoe_interface_cleanup(fcoe);
queue_work(fcoe_wq, &port->destroy_work);
goto out;
break;
@@ -1827,22 +1911,22 @@ static int fcoe_destroy(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
struct fc_lport *lport;
+ struct fcoe_port *port;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
rtnl_lock();
fcoe = fcoe_hostlist_lookup_port(netdev);
if (!fcoe) {
- rtnl_unlock();
rc = -ENODEV;
goto out_nodev;
}
lport = fcoe->ctlr.lp;
+ port = lport_priv(lport);
list_del(&fcoe->list);
- fcoe_interface_cleanup(fcoe);
- rtnl_unlock();
- fcoe_if_destroy(lport);
+ queue_work(fcoe_wq, &port->destroy_work);
out_nodev:
+ rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return rc;
}
@@ -1854,10 +1938,25 @@ out_nodev:
static void fcoe_destroy_work(struct work_struct *work)
{
struct fcoe_port *port;
+ struct fcoe_interface *fcoe;
+ int npiv = 0;
port = container_of(work, struct fcoe_port, destroy_work);
mutex_lock(&fcoe_config_mutex);
+
+ /* set if this is an NPIV port */
+ npiv = port->lport->vport ? 1 : 0;
+
+ fcoe = port->priv;
fcoe_if_destroy(port->lport);
+
+ /* Do not tear down the fcoe interface for NPIV port */
+ if (!npiv) {
+ rtnl_lock();
+ fcoe_interface_cleanup(fcoe);
+ rtnl_unlock();
+ }
+
mutex_unlock(&fcoe_config_mutex);
}
@@ -1886,7 +1985,7 @@ static bool fcoe_match(struct net_device *netdev)
*/
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
{
- int rc;
+ int rc = 0;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
@@ -1911,7 +2010,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
netdev->name);
rc = -EIO;
fcoe_interface_cleanup(fcoe);
- goto out_free;
+ goto out_nodev;
}
/* Make this the "master" N_Port */
@@ -1926,17 +2025,6 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
if (!fcoe_link_ok(lport))
fcoe_ctlr_link_up(&fcoe->ctlr);
- /*
- * Release from init in fcoe_interface_create(), on success lport
- * should be holding a reference taken in fcoe_if_create().
- */
- fcoe_interface_put(fcoe);
- rtnl_unlock();
- mutex_unlock(&fcoe_config_mutex);
-
- return 0;
-out_free:
- fcoe_interface_put(fcoe);
out_nodev:
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
@@ -2218,7 +2306,6 @@ static void __exit fcoe_exit(void)
list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
- fcoe_interface_cleanup(fcoe);
queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 671cde9d406..95a5ba29320 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -37,7 +37,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.5.0.1"
+#define DRV_VERSION "1.5.0.2"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index bb63f1a1f80..fc98eb61e76 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -388,17 +388,6 @@ static void fnic_iounmap(struct fnic *fnic)
iounmap(fnic->bar0.vaddr);
}
-/*
- * Allocate element for mempools requiring GFP_DMA flag.
- * Otherwise, checks in kmem_flagcheck() hit BUG_ON().
- */
-static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data)
-{
- struct kmem_cache *mem = pool_data;
-
- return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA);
-}
-
/**
* fnic_get_mac() - get assigned data MAC address for FIP code.
* @lport: local port.
@@ -603,14 +592,12 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
if (!fnic->io_req_pool)
goto err_out_free_resources;
- pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
- fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
+ pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
if (!pool)
goto err_out_free_ioreq_pool;
fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
- pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
- fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
+ pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
if (!pool)
goto err_out_free_dflt_pool;
fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
@@ -876,7 +863,7 @@ static int __init fnic_init_module(void)
len = sizeof(struct fnic_dflt_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
- SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
+ SLAB_HWCACHE_ALIGN,
NULL);
if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
@@ -888,7 +875,7 @@ static int __init fnic_init_module(void)
len = sizeof(struct fnic_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
- SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA,
+ SLAB_HWCACHE_ALIGN,
NULL);
if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 538b31c2cf5..c40ce52ed7c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -406,7 +406,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
if (sg_count) {
io_req->sgl_list =
mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
- GFP_ATOMIC | GFP_DMA);
+ GFP_ATOMIC);
if (!io_req->sgl_list) {
ret = SCSI_MLQUEUE_HOST_BUSY;
scsi_dma_unmap(sc);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c6c0434d803..ec61bdb833a 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -46,7 +46,7 @@
#include <linux/cciss_ioctl.h>
#include <linux/string.h>
#include <linux/bitmap.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/kthread.h>
#include "hpsa_cmd.h"
#include "hpsa.h"
@@ -1037,6 +1037,7 @@ static void complete_scsi_command(struct CommandList *cp)
unsigned char sense_key;
unsigned char asc; /* additional sense code */
unsigned char ascq; /* additional sense code qualifier */
+ unsigned long sense_data_size;
ei = cp->err_info;
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
@@ -1051,10 +1052,14 @@ static void complete_scsi_command(struct CommandList *cp)
cmd->result |= ei->ScsiStatus;
/* copy the sense data whether we need to or not. */
- memcpy(cmd->sense_buffer, ei->SenseInfo,
- ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
- SCSI_SENSE_BUFFERSIZE :
- ei->SenseLen);
+ if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
+ sense_data_size = SCSI_SENSE_BUFFERSIZE;
+ else
+ sense_data_size = sizeof(ei->SenseInfo);
+ if (ei->SenseLen < sense_data_size)
+ sense_data_size = ei->SenseLen;
+
+ memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
scsi_set_resid(cmd, ei->ResidualCnt);
if (ei->CommandStatus == 0) {
@@ -1214,8 +1219,8 @@ static void complete_scsi_command(struct CommandList *cp)
dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
break;
case CMD_UNSOLICITED_ABORT:
- cmd->result = DID_RESET << 16;
- dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
+ cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
+ dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
"abort\n", cp);
break;
case CMD_TIMEOUT:
@@ -2580,7 +2585,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[0].Ext = 0; /* we are not chaining*/
}
hpsa_scsi_do_simple_cmd_core(h, c);
- hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+ if (iocommand.buf_size > 0)
+ hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
/* Copy the error information out */
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6d8dcd4dd06..7f53ceaa723 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -214,7 +214,7 @@ static void SA5_submit_command(struct ctlr_info *h,
dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr,
c->Header.Tag.lower);
writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
- (void) readl(h->vaddr + SA5_REQUEST_PORT_OFFSET);
+ (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
h->commands_outstanding++;
if (h->commands_outstanding > h->max_outstanding)
h->max_outstanding = h->commands_outstanding;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b7650613b8c..bdfa223a7db 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4306,8 +4306,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
spin_lock_irqsave(vhost->host->host_lock, flags);
if (rc == H_CLOSED)
vio_enable_interrupts(to_vio_dev(vhost->dev));
- else if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
- (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+ if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+ (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 888086c4e70..8d636301e32 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -8778,14 +8778,14 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
if (rc != PCIBIOS_SUCCESSFUL) {
dev_err(&pdev->dev, "Failed to save PCI config space\n");
rc = -EIO;
- goto cleanup_nomem;
+ goto out_msi_disable;
}
if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
- goto cleanup_nomem;
+ goto out_msi_disable;
if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
- goto cleanup_nomem;
+ goto out_msi_disable;
if (ioa_cfg->sis64)
ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
@@ -8800,7 +8800,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
if (rc < 0) {
dev_err(&pdev->dev,
"Couldn't allocate enough memory for device driver!\n");
- goto cleanup_nomem;
+ goto out_msi_disable;
}
/*
@@ -8845,10 +8845,10 @@ out:
cleanup_nolog:
ipr_free_mem(ioa_cfg);
-cleanup_nomem:
- iounmap(ipr_regs);
out_msi_disable:
pci_disable_msi(pdev);
+cleanup_nomem:
+ iounmap(ipr_regs);
out_release_regions:
pci_release_regions(pdev);
out_scsi_host_put:
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
new file mode 100644
index 00000000000..3359e10e0d8
--- /dev/null
+++ b/drivers/scsi/isci/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_SCSI_ISCI) += isci.o
+isci-objs := init.o phy.o request.o \
+ remote_device.o port.o \
+ host.o task.o probe_roms.o \
+ remote_node_context.o \
+ remote_node_table.o \
+ unsolicited_frame_control.o \
+ port_config.o \
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
new file mode 100644
index 00000000000..5f54461cabc
--- /dev/null
+++ b/drivers/scsi/isci/firmware/Makefile
@@ -0,0 +1,19 @@
+# Makefile for create_fw
+#
+CC=gcc
+CFLAGS=-c -Wall -O2 -g
+LDFLAGS=
+SOURCES=create_fw.c
+OBJECTS=$(SOURCES:.cpp=.o)
+EXECUTABLE=create_fw
+
+all: $(SOURCES) $(EXECUTABLE)
+
+$(EXECUTABLE): $(OBJECTS)
+ $(CC) $(LDFLAGS) $(OBJECTS) -o $@
+
+.c.o:
+ $(CC) $(CFLAGS) $< -O $@
+
+clean:
+ rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
new file mode 100644
index 00000000000..8056d2bd233
--- /dev/null
+++ b/drivers/scsi/isci/firmware/README
@@ -0,0 +1,36 @@
+This defines the temporary binary blow we are to pass to the SCU
+driver to emulate the binary firmware that we will eventually be
+able to access via NVRAM on the SCU controller.
+
+The current size of the binary blob is expected to be 149 bytes or larger
+
+Header Types:
+0x1: Phy Masks
+0x2: Phy Gens
+0x3: SAS Addrs
+0xff: End of Data
+
+ID string - u8[12]: "#SCU MAGIC#\0"
+Version - u8: 1
+SubVersion - u8: 0
+
+Header Type - u8: 0x1
+Size - u8: 8
+Phy Mask - u32[8]
+
+Header Type - u8: 0x2
+Size - u8: 8
+Phy Gen - u32[8]
+
+Header Type - u8: 0x3
+Size - u8: 8
+Sas Addr - u64[8]
+
+Header Type - u8: 0xf
+
+
+==============================================================================
+
+Place isci_firmware.bin in /lib/firmware
+Be sure to recreate the initramfs image to include the firmware.
+
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
new file mode 100644
index 00000000000..c7a2887a7e9
--- /dev/null
+++ b/drivers/scsi/isci/firmware/create_fw.c
@@ -0,0 +1,99 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <asm/types.h>
+#include <strings.h>
+#include <stdint.h>
+
+#include "create_fw.h"
+#include "../probe_roms.h"
+
+int write_blob(struct isci_orom *isci_orom)
+{
+ FILE *fd;
+ int err;
+ size_t count;
+
+ fd = fopen(blob_name, "w+");
+ if (!fd) {
+ perror("Open file for write failed");
+ fclose(fd);
+ return -EIO;
+ }
+
+ count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
+ if (count != 1) {
+ perror("Write data failed");
+ fclose(fd);
+ return -EIO;
+ }
+
+ fclose(fd);
+
+ return 0;
+}
+
+void set_binary_values(struct isci_orom *isci_orom)
+{
+ int ctrl_idx, phy_idx, port_idx;
+
+ /* setting OROM signature */
+ strncpy(isci_orom->hdr.signature, sig, strlen(sig));
+ isci_orom->hdr.version = version;
+ isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
+ isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
+ isci_orom->hdr.num_elements = num_elements;
+
+ for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
+ isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
+ isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
+ max_num_concurrent_dev_spin_up;
+ isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
+ enable_ssc;
+
+ for (port_idx = 0; port_idx < 4; port_idx++)
+ isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
+ phy_mask[ctrl_idx][port_idx];
+
+ for (phy_idx = 0; phy_idx < 4; phy_idx++) {
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
+ (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
+ (__u32)(sas_addr[ctrl_idx][phy_idx]);
+
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
+ afe_tx_amp_control0;
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
+ afe_tx_amp_control1;
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
+ afe_tx_amp_control2;
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
+ afe_tx_amp_control3;
+ }
+ }
+}
+
+int main(void)
+{
+ int err;
+ struct isci_orom *isci_orom;
+
+ isci_orom = malloc(sizeof(struct isci_orom));
+ memset(isci_orom, 0, sizeof(struct isci_orom));
+
+ set_binary_values(isci_orom);
+
+ err = write_blob(isci_orom);
+ if (err < 0) {
+ free(isci_orom);
+ return err;
+ }
+
+ free(isci_orom);
+ return 0;
+}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
new file mode 100644
index 00000000000..5f298828d22
--- /dev/null
+++ b/drivers/scsi/isci/firmware/create_fw.h
@@ -0,0 +1,77 @@
+#ifndef _CREATE_FW_H_
+#define _CREATE_FW_H_
+#include "../probe_roms.h"
+
+
+/* we are configuring for 2 SCUs */
+static const int num_elements = 2;
+
+/*
+ * For all defined arrays:
+ * elements 0-3 are for SCU0, ports 0-3
+ * elements 4-7 are for SCU1, ports 0-3
+ *
+ * valid configurations for one SCU are:
+ * P0 P1 P2 P3
+ * ----------------
+ * 0xF,0x0,0x0,0x0 # 1 x4 port
+ * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
+ * # ports
+ * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
+ * # port
+ * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
+ * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
+ *
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value assigned to UNINIT_PARAM (255).
+ */
+
+/* discovery mode type (port auto config mode by default ) */
+
+/*
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value "0000000000000000". SAS address of zero's is
+ * considered invalid and will not be used.
+ */
+#ifdef MPC
+static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
+ {1, 2, 4, 8} };
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
+ 0x5FCFFFFFF0000002ULL,
+ 0x5FCFFFFFF0000003ULL,
+ 0x5FCFFFFFF0000004ULL },
+ { 0x5FCFFFFFF0000005ULL,
+ 0x5FCFFFFFF0000006ULL,
+ 0x5FCFFFFFF0000007ULL,
+ 0x5FCFFFFFF0000008ULL } };
+#else /* APC (default) */
+static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4];
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
+ 0x5FCFFFFF00000001ULL,
+ 0x5FCFFFFF00000001ULL,
+ 0x5FCFFFFF00000001ULL },
+ { 0x5FCFFFFF00000002ULL,
+ 0x5FCFFFFF00000002ULL,
+ 0x5FCFFFFF00000002ULL,
+ 0x5FCFFFFF00000002ULL } };
+#endif
+
+/* Maximum number of concurrent device spin up */
+static const int max_num_concurrent_dev_spin_up = 1;
+
+/* enable of ssc operation */
+static const int enable_ssc;
+
+/* AFE_TX_AMP_CONTROL */
+static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
+static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
+static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
+static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
+
+static const char blob_name[] = "isci_firmware.bin";
+static const char sig[] = "ISCUOEMB";
+static const unsigned char version = 0x10;
+
+#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
new file mode 100644
index 00000000000..26072f1e985
--- /dev/null
+++ b/drivers/scsi/isci/host.c
@@ -0,0 +1,2751 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <scsi/sas.h>
+#include "host.h"
+#include "isci.h"
+#include "port.h"
+#include "host.h"
+#include "probe_roms.h"
+#include "remote_device.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "registers.h"
+#include "scu_remote_node_context.h"
+#include "scu_task_context.h"
+
+#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
+
+#define smu_max_ports(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
+ )
+
+#define smu_max_task_contexts(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
+ )
+
+#define smu_max_rncs(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
+ )
+
+#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
+
+/**
+ *
+ *
+ * The number of milliseconds to wait while a given phy is consuming power
+ * before allowing another set of phys to consume power. Ultimately, this will
+ * be specified by OEM parameter.
+ */
+#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
+
+/**
+ * NORMALIZE_PUT_POINTER() -
+ *
+ * This macro will normalize the completion queue put pointer so its value can
+ * be used as an array inde
+ */
+#define NORMALIZE_PUT_POINTER(x) \
+ ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
+
+
+/**
+ * NORMALIZE_EVENT_POINTER() -
+ *
+ * This macro will normalize the completion queue event entry so its value can
+ * be used as an index.
+ */
+#define NORMALIZE_EVENT_POINTER(x) \
+ (\
+ ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
+ >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
+ )
+
+/**
+ * NORMALIZE_GET_POINTER() -
+ *
+ * This macro will normalize the completion queue get pointer so its value can
+ * be used as an index into an array
+ */
+#define NORMALIZE_GET_POINTER(x) \
+ ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
+
+/**
+ * NORMALIZE_GET_POINTER_CYCLE_BIT() -
+ *
+ * This macro will normalize the completion queue cycle pointer so it matches
+ * the completion queue cycle bit
+ */
+#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
+ ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
+
+/**
+ * COMPLETION_QUEUE_CYCLE_BIT() -
+ *
+ * This macro will return the cycle bit of the completion queue entry
+ */
+#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
+
+/* Init the state machine and call the state entry function (if any) */
+void sci_init_sm(struct sci_base_state_machine *sm,
+ const struct sci_base_state *state_table, u32 initial_state)
+{
+ sci_state_transition_t handler;
+
+ sm->initial_state_id = initial_state;
+ sm->previous_state_id = initial_state;
+ sm->current_state_id = initial_state;
+ sm->state_table = state_table;
+
+ handler = sm->state_table[initial_state].enter_state;
+ if (handler)
+ handler(sm);
+}
+
+/* Call the state exit fn, update the current state, call the state entry fn */
+void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
+{
+ sci_state_transition_t handler;
+
+ handler = sm->state_table[sm->current_state_id].exit_state;
+ if (handler)
+ handler(sm);
+
+ sm->previous_state_id = sm->current_state_id;
+ sm->current_state_id = next_state;
+
+ handler = sm->state_table[sm->current_state_id].enter_state;
+ if (handler)
+ handler(sm);
+}
+
+static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
+{
+ u32 get_value = ihost->completion_queue_get;
+ u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
+
+ if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
+ COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
+ return true;
+
+ return false;
+}
+
+static bool sci_controller_isr(struct isci_host *ihost)
+{
+ if (sci_controller_completion_queue_has_entries(ihost)) {
+ return true;
+ } else {
+ /*
+ * we have a spurious interrupt it could be that we have already
+ * emptied the completion queue from a previous interrupt */
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+
+ /*
+ * There is a race in the hardware that could cause us not to be notified
+ * of an interrupt completion if we do not take this step. We will mask
+ * then unmask the interrupts so if there is another interrupt pending
+ * the clearing of the interrupt source we get the next interrupt message. */
+ writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+ }
+
+ return false;
+}
+
+irqreturn_t isci_msix_isr(int vec, void *data)
+{
+ struct isci_host *ihost = data;
+
+ if (sci_controller_isr(ihost))
+ tasklet_schedule(&ihost->completion_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static bool sci_controller_error_isr(struct isci_host *ihost)
+{
+ u32 interrupt_status;
+
+ interrupt_status =
+ readl(&ihost->smu_registers->interrupt_status);
+ interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
+
+ if (interrupt_status != 0) {
+ /*
+ * There is an error interrupt pending so let it through and handle
+ * in the callback */
+ return true;
+ }
+
+ /*
+ * There is a race in the hardware that could cause us not to be notified
+ * of an interrupt completion if we do not take this step. We will mask
+ * then unmask the error interrupts so if there was another interrupt
+ * pending we will be notified.
+ * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
+ writel(0xff, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+
+ return false;
+}
+
+static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
+{
+ u32 index = SCU_GET_COMPLETION_INDEX(ent);
+ struct isci_request *ireq = ihost->reqs[index];
+
+ /* Make sure that we really want to process this IO request */
+ if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
+ ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
+ ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
+ /* Yep this is a valid io request pass it along to the
+ * io request handler
+ */
+ sci_io_request_tc_completion(ireq, ent);
+}
+
+static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
+{
+ u32 index;
+ struct isci_request *ireq;
+ struct isci_remote_device *idev;
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ switch (scu_get_command_request_type(ent)) {
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
+ ireq = ihost->reqs[index];
+ dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
+ __func__, ent, ireq);
+ /* @todo For a post TC operation we need to fail the IO
+ * request
+ */
+ break;
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
+ idev = ihost->device_table[index];
+ dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
+ __func__, ent, idev);
+ /* @todo For a port RNC operation we need to fail the
+ * device
+ */
+ break;
+ default:
+ dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
+ __func__, ent);
+ break;
+ }
+}
+
+static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
+{
+ u32 index;
+ u32 frame_index;
+
+ struct scu_unsolicited_frame_header *frame_header;
+ struct isci_phy *iphy;
+ struct isci_remote_device *idev;
+
+ enum sci_status result = SCI_FAILURE;
+
+ frame_index = SCU_GET_FRAME_INDEX(ent);
+
+ frame_header = ihost->uf_control.buffers.array[frame_index].header;
+ ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
+
+ if (SCU_GET_FRAME_ERROR(ent)) {
+ /*
+ * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
+ * / this cause a problem? We expect the phy initialization will
+ * / fail if there is an error in the frame. */
+ sci_controller_release_frame(ihost, frame_index);
+ return;
+ }
+
+ if (frame_header->is_address_frame) {
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ result = sci_phy_frame_handler(iphy, frame_index);
+ } else {
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ /*
+ * This is a signature fis or a frame from a direct attached SATA
+ * device that has not yet been created. In either case forwared
+ * the frame to the PE and let it take care of the frame data. */
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ result = sci_phy_frame_handler(iphy, frame_index);
+ } else {
+ if (index < ihost->remote_node_entries)
+ idev = ihost->device_table[index];
+ else
+ idev = NULL;
+
+ if (idev != NULL)
+ result = sci_remote_device_frame_handler(idev, frame_index);
+ else
+ sci_controller_release_frame(ihost, frame_index);
+ }
+ }
+
+ if (result != SCI_SUCCESS) {
+ /*
+ * / @todo Is there any reason to report some additional error message
+ * / when we get this failure notifiction? */
+ }
+}
+
+static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
+{
+ struct isci_remote_device *idev;
+ struct isci_request *ireq;
+ struct isci_phy *iphy;
+ u32 index;
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ switch (scu_get_event_type(ent)) {
+ case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
+ /* / @todo The driver did something wrong and we need to fix the condtion. */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received SMU command error "
+ "0x%x\n",
+ __func__,
+ ihost,
+ ent);
+ break;
+
+ case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
+ case SCU_EVENT_TYPE_SMU_ERROR:
+ case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
+ /*
+ * / @todo This is a hardware failure and its likely that we want to
+ * / reset the controller. */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received fatal controller "
+ "event 0x%x\n",
+ __func__,
+ ihost,
+ ent);
+ break;
+
+ case SCU_EVENT_TYPE_TRANSPORT_ERROR:
+ ireq = ihost->reqs[index];
+ sci_io_request_event_handler(ireq, ent);
+ break;
+
+ case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+ switch (scu_get_event_specifier(ent)) {
+ case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
+ case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
+ ireq = ihost->reqs[index];
+ if (ireq != NULL)
+ sci_io_request_event_handler(ireq, ent);
+ else
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received "
+ "event 0x%x for io request object "
+ "that doesnt exist.\n",
+ __func__,
+ ihost,
+ ent);
+
+ break;
+
+ case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
+ idev = ihost->device_table[index];
+ if (idev != NULL)
+ sci_remote_device_event_handler(idev, ent);
+ else
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received "
+ "event 0x%x for remote device object "
+ "that doesnt exist.\n",
+ __func__,
+ ihost,
+ ent);
+
+ break;
+ }
+ break;
+
+ case SCU_EVENT_TYPE_BROADCAST_CHANGE:
+ /*
+ * direct the broadcast change event to the phy first and then let
+ * the phy redirect the broadcast change to the port object */
+ case SCU_EVENT_TYPE_ERR_CNT_EVENT:
+ /*
+ * direct error counter event to the phy object since that is where
+ * we get the event notification. This is a type 4 event. */
+ case SCU_EVENT_TYPE_OSSP_EVENT:
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ sci_phy_event_handler(iphy, ent);
+ break;
+
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ case SCU_EVENT_TYPE_RNC_OPS_MISC:
+ if (index < ihost->remote_node_entries) {
+ idev = ihost->device_table[index];
+
+ if (idev != NULL)
+ sci_remote_device_event_handler(idev, ent);
+ } else
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received event 0x%x "
+ "for remote device object 0x%0x that doesnt "
+ "exist.\n",
+ __func__,
+ ihost,
+ ent,
+ index);
+
+ break;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller received unknown event code %x\n",
+ __func__,
+ ent);
+ break;
+ }
+}
+
+static void sci_controller_process_completions(struct isci_host *ihost)
+{
+ u32 completion_count = 0;
+ u32 ent;
+ u32 get_index;
+ u32 get_cycle;
+ u32 event_get;
+ u32 event_cycle;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue begining get:0x%08x\n",
+ __func__,
+ ihost->completion_queue_get);
+
+ /* Get the component parts of the completion queue */
+ get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
+ get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
+
+ event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
+ event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
+
+ while (
+ NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
+ == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
+ ) {
+ completion_count++;
+
+ ent = ihost->completion_queue[get_index];
+
+ /* increment the get pointer and check for rollover to toggle the cycle bit */
+ get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
+ (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
+ get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue entry:0x%08x\n",
+ __func__,
+ ent);
+
+ switch (SCU_GET_COMPLETION_TYPE(ent)) {
+ case SCU_COMPLETION_TYPE_TASK:
+ sci_controller_task_completion(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_SDMA:
+ sci_controller_sdma_completion(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_UFI:
+ sci_controller_unsolicited_frame(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_EVENT:
+ case SCU_COMPLETION_TYPE_NOTIFY: {
+ event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
+ (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
+ event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
+
+ sci_controller_event_completion(ihost, ent);
+ break;
+ }
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller received unknown "
+ "completion type %x\n",
+ __func__,
+ ent);
+ break;
+ }
+ }
+
+ /* Update the get register if we completed one or more entries */
+ if (completion_count > 0) {
+ ihost->completion_queue_get =
+ SMU_CQGR_GEN_BIT(ENABLE) |
+ SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
+ event_cycle |
+ SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
+ get_cycle |
+ SMU_CQGR_GEN_VAL(POINTER, get_index);
+
+ writel(ihost->completion_queue_get,
+ &ihost->smu_registers->completion_queue_get);
+
+ }
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue ending get:0x%08x\n",
+ __func__,
+ ihost->completion_queue_get);
+
+}
+
+static void sci_controller_error_handler(struct isci_host *ihost)
+{
+ u32 interrupt_status;
+
+ interrupt_status =
+ readl(&ihost->smu_registers->interrupt_status);
+
+ if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
+ sci_controller_completion_queue_has_entries(ihost)) {
+
+ sci_controller_process_completions(ihost);
+ writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
+ } else {
+ dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
+ interrupt_status);
+
+ sci_change_state(&ihost->sm, SCIC_FAILED);
+
+ return;
+ }
+
+ /* If we dont process any completions I am not sure that we want to do this.
+ * We are in the middle of a hardware fault and should probably be reset.
+ */
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+irqreturn_t isci_intx_isr(int vec, void *data)
+{
+ irqreturn_t ret = IRQ_NONE;
+ struct isci_host *ihost = data;
+
+ if (sci_controller_isr(ihost)) {
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+ tasklet_schedule(&ihost->completion_tasklet);
+ ret = IRQ_HANDLED;
+ } else if (sci_controller_error_isr(ihost)) {
+ spin_lock(&ihost->scic_lock);
+ sci_controller_error_handler(ihost);
+ spin_unlock(&ihost->scic_lock);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+irqreturn_t isci_error_isr(int vec, void *data)
+{
+ struct isci_host *ihost = data;
+
+ if (sci_controller_error_isr(ihost))
+ sci_controller_error_handler(ihost);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * isci_host_start_complete() - This function is called by the core library,
+ * through the ISCI Module, to indicate controller start status.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @completion_status: This parameter specifies the completion status from the
+ * core library.
+ *
+ */
+static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+ if (completion_status != SCI_SUCCESS)
+ dev_info(&ihost->pdev->dev,
+ "controller start timed out, continuing...\n");
+ isci_host_change_state(ihost, isci_ready);
+ clear_bit(IHOST_START_PENDING, &ihost->flags);
+ wake_up(&ihost->eventq);
+}
+
+int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+
+ if (test_bit(IHOST_START_PENDING, &ihost->flags))
+ return 0;
+
+ /* todo: use sas_flush_discovery once it is upstream */
+ scsi_flush_work(shost);
+
+ scsi_flush_work(shost);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: ihost->status = %d, time = %ld\n",
+ __func__, isci_host_get_state(ihost), time);
+
+ return 1;
+
+}
+
+/**
+ * sci_controller_get_suggested_start_timeout() - This method returns the
+ * suggested sci_controller_start() timeout amount. The user is free to
+ * use any timeout value, but this method provides the suggested minimum
+ * start timeout value. The returned value is based upon empirical
+ * information determined as a result of interoperability testing.
+ * @controller: the handle to the controller object for which to return the
+ * suggested start timeout.
+ *
+ * This method returns the number of milliseconds for the suggested start
+ * operation timeout.
+ */
+static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
+{
+ /* Validate the user supplied parameters. */
+ if (!ihost)
+ return 0;
+
+ /*
+ * The suggested minimum timeout value for a controller start operation:
+ *
+ * Signature FIS Timeout
+ * + Phy Start Timeout
+ * + Number of Phy Spin Up Intervals
+ * ---------------------------------
+ * Number of milliseconds for the controller start operation.
+ *
+ * NOTE: The number of phy spin up intervals will be equivalent
+ * to the number of phys divided by the number phys allowed
+ * per interval - 1 (once OEM parameters are supported).
+ * Currently we assume only 1 phy per interval. */
+
+ return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+ + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
+ + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+}
+
+static void sci_controller_enable_interrupts(struct isci_host *ihost)
+{
+ BUG_ON(ihost->smu_registers == NULL);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+void sci_controller_disable_interrupts(struct isci_host *ihost)
+{
+ BUG_ON(ihost->smu_registers == NULL);
+ writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
+}
+
+static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
+{
+ u32 port_task_scheduler_value;
+
+ port_task_scheduler_value =
+ readl(&ihost->scu_registers->peg0.ptsg.control);
+ port_task_scheduler_value |=
+ (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
+ SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
+ writel(port_task_scheduler_value,
+ &ihost->scu_registers->peg0.ptsg.control);
+}
+
+static void sci_controller_assign_task_entries(struct isci_host *ihost)
+{
+ u32 task_assignment;
+
+ /*
+ * Assign all the TCs to function 0
+ * TODO: Do we actually need to read this register to write it back?
+ */
+
+ task_assignment =
+ readl(&ihost->smu_registers->task_context_assignment[0]);
+
+ task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
+ (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
+ (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
+
+ writel(task_assignment,
+ &ihost->smu_registers->task_context_assignment[0]);
+
+}
+
+static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
+{
+ u32 index;
+ u32 completion_queue_control_value;
+ u32 completion_queue_get_value;
+ u32 completion_queue_put_value;
+
+ ihost->completion_queue_get = 0;
+
+ completion_queue_control_value =
+ (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
+ SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
+
+ writel(completion_queue_control_value,
+ &ihost->smu_registers->completion_queue_control);
+
+
+ /* Set the completion queue get pointer and enable the queue */
+ completion_queue_get_value = (
+ (SMU_CQGR_GEN_VAL(POINTER, 0))
+ | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
+ | (SMU_CQGR_GEN_BIT(ENABLE))
+ | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
+ );
+
+ writel(completion_queue_get_value,
+ &ihost->smu_registers->completion_queue_get);
+
+ /* Set the completion queue put pointer */
+ completion_queue_put_value = (
+ (SMU_CQPR_GEN_VAL(POINTER, 0))
+ | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
+ );
+
+ writel(completion_queue_put_value,
+ &ihost->smu_registers->completion_queue_put);
+
+ /* Initialize the cycle bit of the completion queue entries */
+ for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
+ /*
+ * If get.cycle_bit != completion_queue.cycle_bit
+ * its not a valid completion queue entry
+ * so at system start all entries are invalid */
+ ihost->completion_queue[index] = 0x80000000;
+ }
+}
+
+static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
+{
+ u32 frame_queue_control_value;
+ u32 frame_queue_get_value;
+ u32 frame_queue_put_value;
+
+ /* Write the queue size */
+ frame_queue_control_value =
+ SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
+
+ writel(frame_queue_control_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
+
+ /* Setup the get pointer for the unsolicited frame queue */
+ frame_queue_get_value = (
+ SCU_UFQGP_GEN_VAL(POINTER, 0)
+ | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
+ );
+
+ writel(frame_queue_get_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+ /* Setup the put pointer for the unsolicited frame queue */
+ frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
+ writel(frame_queue_put_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
+}
+
+static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
+{
+ if (ihost->sm.current_state_id == SCIC_STARTING) {
+ /*
+ * We move into the ready state, because some of the phys/ports
+ * may be up and operational.
+ */
+ sci_change_state(&ihost->sm, SCIC_READY);
+
+ isci_host_start_complete(ihost, status);
+ }
+}
+
+static bool is_phy_starting(struct isci_phy *iphy)
+{
+ enum sci_phy_states state;
+
+ state = iphy->sm.current_state_id;
+ switch (state) {
+ case SCI_PHY_STARTING:
+ case SCI_PHY_SUB_INITIAL:
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_IAF_UF:
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ case SCI_PHY_SUB_FINAL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * sci_controller_start_next_phy - start phy
+ * @scic: controller
+ *
+ * If all the phys have been started, then attempt to transition the
+ * controller to the READY state and inform the user
+ * (sci_cb_controller_start_complete()).
+ */
+static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
+{
+ struct sci_oem_params *oem = &ihost->oem_parameters;
+ struct isci_phy *iphy;
+ enum sci_status status;
+
+ status = SCI_SUCCESS;
+
+ if (ihost->phy_startup_timer_pending)
+ return status;
+
+ if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
+ bool is_controller_start_complete = true;
+ u32 state;
+ u8 index;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ iphy = &ihost->phys[index];
+ state = iphy->sm.current_state_id;
+
+ if (!phy_get_non_dummy_port(iphy))
+ continue;
+
+ /* The controller start operation is complete iff:
+ * - all links have been given an opportunity to start
+ * - have no indication of a connected device
+ * - have an indication of a connected device and it has
+ * finished the link training process.
+ */
+ if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+ (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+ (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+ is_controller_start_complete = false;
+ break;
+ }
+ }
+
+ /*
+ * The controller has successfully finished the start process.
+ * Inform the SCI Core user and transition to the READY state. */
+ if (is_controller_start_complete == true) {
+ sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+ sci_del_timer(&ihost->phy_timer);
+ ihost->phy_startup_timer_pending = false;
+ }
+ } else {
+ iphy = &ihost->phys[ihost->next_phy_to_start];
+
+ if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ if (phy_get_non_dummy_port(iphy) == NULL) {
+ ihost->next_phy_to_start++;
+
+ /* Caution recursion ahead be forwarned
+ *
+ * The PHY was never added to a PORT in MPC mode
+ * so start the next phy in sequence This phy
+ * will never go link up and will not draw power
+ * the OEM parameters either configured the phy
+ * incorrectly for the PORT or it was never
+ * assigned to a PORT
+ */
+ return sci_controller_start_next_phy(ihost);
+ }
+ }
+
+ status = sci_phy_start(iphy);
+
+ if (status == SCI_SUCCESS) {
+ sci_mod_timer(&ihost->phy_timer,
+ SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
+ ihost->phy_startup_timer_pending = true;
+ } else {
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed "
+ "to stop phy %d because of status "
+ "%d.\n",
+ __func__,
+ ihost->phys[ihost->next_phy_to_start].phy_index,
+ status);
+ }
+
+ ihost->next_phy_to_start++;
+ }
+
+ return status;
+}
+
+static void phy_startup_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
+ unsigned long flags;
+ enum sci_status status;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ ihost->phy_startup_timer_pending = false;
+
+ do {
+ status = sci_controller_start_next_phy(ihost);
+ } while (status != SCI_SUCCESS);
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static u16 isci_tci_active(struct isci_host *ihost)
+{
+ return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+static enum sci_status sci_controller_start(struct isci_host *ihost,
+ u32 timeout)
+{
+ enum sci_status result;
+ u16 index;
+
+ if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller start operation requested in "
+ "invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ /* Build the TCi free pool */
+ BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
+ ihost->tci_head = 0;
+ ihost->tci_tail = 0;
+ for (index = 0; index < ihost->task_context_entries; index++)
+ isci_tci_free(ihost, index);
+
+ /* Build the RNi free pool */
+ sci_remote_node_table_initialize(&ihost->available_remote_nodes,
+ ihost->remote_node_entries);
+
+ /*
+ * Before anything else lets make sure we will not be
+ * interrupted by the hardware.
+ */
+ sci_controller_disable_interrupts(ihost);
+
+ /* Enable the port task scheduler */
+ sci_controller_enable_port_task_scheduler(ihost);
+
+ /* Assign all the task entries to ihost physical function */
+ sci_controller_assign_task_entries(ihost);
+
+ /* Now initialize the completion queue */
+ sci_controller_initialize_completion_queue(ihost);
+
+ /* Initialize the unsolicited frame queue for use */
+ sci_controller_initialize_unsolicited_frame_queue(ihost);
+
+ /* Start all of the ports on this controller */
+ for (index = 0; index < ihost->logical_port_entries; index++) {
+ struct isci_port *iport = &ihost->ports[index];
+
+ result = sci_port_start(iport);
+ if (result)
+ return result;
+ }
+
+ sci_controller_start_next_phy(ihost);
+
+ sci_mod_timer(&ihost->timer, timeout);
+
+ sci_change_state(&ihost->sm, SCIC_STARTING);
+
+ return SCI_SUCCESS;
+}
+
+void isci_host_scan_start(struct Scsi_Host *shost)
+{
+ struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+ unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
+
+ set_bit(IHOST_START_PENDING, &ihost->flags);
+
+ spin_lock_irq(&ihost->scic_lock);
+ sci_controller_start(ihost, tmo);
+ sci_controller_enable_interrupts(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
+}
+
+static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+ isci_host_change_state(ihost, isci_stopped);
+ sci_controller_disable_interrupts(ihost);
+ clear_bit(IHOST_STOP_PENDING, &ihost->flags);
+ wake_up(&ihost->eventq);
+}
+
+static void sci_controller_completion_handler(struct isci_host *ihost)
+{
+ /* Empty out the completion queue */
+ if (sci_controller_completion_queue_has_entries(ihost))
+ sci_controller_process_completions(ihost);
+
+ /* Clear the interrupt and enable all interrupts again */
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+ /* Could we write the value of SMU_ISR_COMPLETION? */
+ writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+/**
+ * isci_host_completion_routine() - This function is the delayed service
+ * routine that calls the sci core library's completion handler. It's
+ * scheduled as a tasklet from the interrupt service routine when interrupts
+ * in use, or set as the timeout function in polled mode.
+ * @data: This parameter specifies the ISCI host object
+ *
+ */
+static void isci_host_completion_routine(unsigned long data)
+{
+ struct isci_host *ihost = (struct isci_host *)data;
+ struct list_head completed_request_list;
+ struct list_head errored_request_list;
+ struct list_head *current_position;
+ struct list_head *next_position;
+ struct isci_request *request;
+ struct isci_request *next_request;
+ struct sas_task *task;
+
+ INIT_LIST_HEAD(&completed_request_list);
+ INIT_LIST_HEAD(&errored_request_list);
+
+ spin_lock_irq(&ihost->scic_lock);
+
+ sci_controller_completion_handler(ihost);
+
+ /* Take the lists of completed I/Os from the host. */
+
+ list_splice_init(&ihost->requests_to_complete,
+ &completed_request_list);
+
+ /* Take the list of errored I/Os from the host. */
+ list_splice_init(&ihost->requests_to_errorback,
+ &errored_request_list);
+
+ spin_unlock_irq(&ihost->scic_lock);
+
+ /* Process any completions in the lists. */
+ list_for_each_safe(current_position, next_position,
+ &completed_request_list) {
+
+ request = list_entry(current_position, struct isci_request,
+ completed_node);
+ task = isci_request_access_task(request);
+
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Normal - request/task = %p/%p\n",
+ __func__,
+ request,
+ task);
+
+ /* Return the task to libsas */
+ if (task != NULL) {
+
+ task->lldd_task = NULL;
+ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+
+ /* If the task is already in the abort path,
+ * the task_done callback cannot be called.
+ */
+ task->task_done(task);
+ }
+ }
+
+ spin_lock_irq(&ihost->scic_lock);
+ isci_free_tag(ihost, request->io_tag);
+ spin_unlock_irq(&ihost->scic_lock);
+ }
+ list_for_each_entry_safe(request, next_request, &errored_request_list,
+ completed_node) {
+
+ task = isci_request_access_task(request);
+
+ /* Use sas_task_abort */
+ dev_warn(&ihost->pdev->dev,
+ "%s: Error - request/task = %p/%p\n",
+ __func__,
+ request,
+ task);
+
+ if (task != NULL) {
+
+ /* Put the task into the abort path if it's not there
+ * already.
+ */
+ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
+ sas_task_abort(task);
+
+ } else {
+ /* This is a case where the request has completed with a
+ * status such that it needed further target servicing,
+ * but the sas_task reference has already been removed
+ * from the request. Since it was errored, it was not
+ * being aborted, so there is nothing to do except free
+ * it.
+ */
+
+ spin_lock_irq(&ihost->scic_lock);
+ /* Remove the request from the remote device's list
+ * of pending requests.
+ */
+ list_del_init(&request->dev_node);
+ isci_free_tag(ihost, request->io_tag);
+ spin_unlock_irq(&ihost->scic_lock);
+ }
+ }
+
+}
+
+/**
+ * sci_controller_stop() - This method will stop an individual controller
+ * object.This method will invoke the associated user callback upon
+ * completion. The completion callback is called when the following
+ * conditions are met: -# the method return status is SCI_SUCCESS. -# the
+ * controller has been quiesced. This method will ensure that all IO
+ * requests are quiesced, phys are stopped, and all additional operation by
+ * the hardware is halted.
+ * @controller: the handle to the controller object to stop.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * stop operation should complete.
+ *
+ * The controller must be in the STARTED or STOPPED state. Indicate if the
+ * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
+ * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
+ * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
+ * controller is not either in the STARTED or STOPPED states.
+ */
+static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
+{
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller stop operation requested in "
+ "invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_mod_timer(&ihost->timer, timeout);
+ sci_change_state(&ihost->sm, SCIC_STOPPING);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_reset() - This method will reset the supplied core
+ * controller regardless of the state of said controller. This operation is
+ * considered destructive. In other words, all current operations are wiped
+ * out. No IO completions for outstanding devices occur. Outstanding IO
+ * requests are not aborted or completed at the actual remote device.
+ * @controller: the handle to the controller object to reset.
+ *
+ * Indicate if the controller reset method succeeded or failed in some way.
+ * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
+ * the controller reset operation is unable to complete.
+ */
+static enum sci_status sci_controller_reset(struct isci_host *ihost)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_RESET:
+ case SCIC_READY:
+ case SCIC_STOPPED:
+ case SCIC_FAILED:
+ /*
+ * The reset operation is not a graceful cleanup, just
+ * perform the state transition.
+ */
+ sci_change_state(&ihost->sm, SCIC_RESETTING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller reset operation requested in "
+ "invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+void isci_host_deinit(struct isci_host *ihost)
+{
+ int i;
+
+ isci_host_change_state(ihost, isci_stopping);
+ for (i = 0; i < SCI_MAX_PORTS; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+ struct isci_remote_device *idev, *d;
+
+ list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
+ if (test_bit(IDEV_ALLOCATED, &idev->flags))
+ isci_remote_device_stop(ihost, idev);
+ }
+ }
+
+ set_bit(IHOST_STOP_PENDING, &ihost->flags);
+
+ spin_lock_irq(&ihost->scic_lock);
+ sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
+ spin_unlock_irq(&ihost->scic_lock);
+
+ wait_for_stop(ihost);
+ sci_controller_reset(ihost);
+
+ /* Cancel any/all outstanding port timers */
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+ del_timer_sync(&iport->timer.timer);
+ }
+
+ /* Cancel any/all outstanding phy timers */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct isci_phy *iphy = &ihost->phys[i];
+ del_timer_sync(&iphy->sata_timer.timer);
+ }
+
+ del_timer_sync(&ihost->port_agent.timer.timer);
+
+ del_timer_sync(&ihost->power_control.timer.timer);
+
+ del_timer_sync(&ihost->timer.timer);
+
+ del_timer_sync(&ihost->phy_timer.timer);
+}
+
+static void __iomem *scu_base(struct isci_host *isci_host)
+{
+ struct pci_dev *pdev = isci_host->pdev;
+ int id = isci_host->id;
+
+ return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
+}
+
+static void __iomem *smu_base(struct isci_host *isci_host)
+{
+ struct pci_dev *pdev = isci_host->pdev;
+ int id = isci_host->id;
+
+ return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
+}
+
+static void isci_user_parameters_get(struct sci_user_parameters *u)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct sci_phy_user_params *u_phy = &u->phys[i];
+
+ u_phy->max_speed_generation = phy_gen;
+
+ /* we are not exporting these for now */
+ u_phy->align_insertion_frequency = 0x7f;
+ u_phy->in_connection_align_insertion_frequency = 0xff;
+ u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
+ }
+
+ u->stp_inactivity_timeout = stp_inactive_to;
+ u->ssp_inactivity_timeout = ssp_inactive_to;
+ u->stp_max_occupancy_timeout = stp_max_occ_to;
+ u->ssp_max_occupancy_timeout = ssp_max_occ_to;
+ u->no_outbound_task_timeout = no_outbound_task_to;
+ u->max_number_concurrent_device_spin_up = max_concurr_spinup;
+}
+
+static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_del_timer(&ihost->timer);
+}
+
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
+#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
+#define INTERRUPT_COALESCE_NUMBER_MAX 256
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
+
+/**
+ * sci_controller_set_interrupt_coalescence() - This method allows the user to
+ * configure the interrupt coalescence.
+ * @controller: This parameter represents the handle to the controller object
+ * for which its interrupt coalesce register is overridden.
+ * @coalesce_number: Used to control the number of entries in the Completion
+ * Queue before an interrupt is generated. If the number of entries exceed
+ * this number, an interrupt will be generated. The valid range of the input
+ * is [0, 256]. A setting of 0 results in coalescing being disabled.
+ * @coalesce_timeout: Timeout value in microseconds. The valid range of the
+ * input is [0, 2700000] . A setting of 0 is allowed and results in no
+ * interrupt coalescing timeout.
+ *
+ * Indicate if the user successfully set the interrupt coalesce parameters.
+ * SCI_SUCCESS The user successfully updated the interrutp coalescence.
+ * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
+ */
+static enum sci_status
+sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
+ u32 coalesce_number,
+ u32 coalesce_timeout)
+{
+ u8 timeout_encode = 0;
+ u32 min = 0;
+ u32 max = 0;
+
+ /* Check if the input parameters fall in the range. */
+ if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ /*
+ * Defined encoding for interrupt coalescing timeout:
+ * Value Min Max Units
+ * ----- --- --- -----
+ * 0 - - Disabled
+ * 1 13.3 20.0 ns
+ * 2 26.7 40.0
+ * 3 53.3 80.0
+ * 4 106.7 160.0
+ * 5 213.3 320.0
+ * 6 426.7 640.0
+ * 7 853.3 1280.0
+ * 8 1.7 2.6 us
+ * 9 3.4 5.1
+ * 10 6.8 10.2
+ * 11 13.7 20.5
+ * 12 27.3 41.0
+ * 13 54.6 81.9
+ * 14 109.2 163.8
+ * 15 218.5 327.7
+ * 16 436.9 655.4
+ * 17 873.8 1310.7
+ * 18 1.7 2.6 ms
+ * 19 3.5 5.2
+ * 20 7.0 10.5
+ * 21 14.0 21.0
+ * 22 28.0 41.9
+ * 23 55.9 83.9
+ * 24 111.8 167.8
+ * 25 223.7 335.5
+ * 26 447.4 671.1
+ * 27 894.8 1342.2
+ * 28 1.8 2.7 s
+ * Others Undefined */
+
+ /*
+ * Use the table above to decide the encode of interrupt coalescing timeout
+ * value for register writing. */
+ if (coalesce_timeout == 0)
+ timeout_encode = 0;
+ else{
+ /* make the timeout value in unit of (10 ns). */
+ coalesce_timeout = coalesce_timeout * 100;
+ min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
+ max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
+
+ /* get the encode of timeout for register writing. */
+ for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
+ timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
+ timeout_encode++) {
+ if (min <= coalesce_timeout && max > coalesce_timeout)
+ break;
+ else if (coalesce_timeout >= max && coalesce_timeout < min * 2
+ && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
+ if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
+ break;
+ else{
+ timeout_encode++;
+ break;
+ }
+ } else {
+ max = max * 2;
+ min = min * 2;
+ }
+ }
+
+ if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
+ /* the value is out of range. */
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+ }
+
+ writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
+ SMU_ICC_GEN_VAL(TIMER, timeout_encode),
+ &ihost->smu_registers->interrupt_coalesce_control);
+
+
+ ihost->interrupt_coalesce_number = (u16)coalesce_number;
+ ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
+
+ return SCI_SUCCESS;
+}
+
+
+static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ /* set the default interrupt coalescence number and timeout value. */
+ sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
+}
+
+static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ /* disable interrupt coalescence. */
+ sci_controller_set_interrupt_coalescence(ihost, 0, 0);
+}
+
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status status;
+ enum sci_status phy_status;
+
+ status = SCI_SUCCESS;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ phy_status = sci_phy_stop(&ihost->phys[index]);
+
+ if (phy_status != SCI_SUCCESS &&
+ phy_status != SCI_FAILURE_INVALID_STATE) {
+ status = SCI_FAILURE;
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed to stop "
+ "phy %d because of status %d.\n",
+ __func__,
+ ihost->phys[index].phy_index, phy_status);
+ }
+ }
+
+ return status;
+}
+
+static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status port_status;
+ enum sci_status status = SCI_SUCCESS;
+
+ for (index = 0; index < ihost->logical_port_entries; index++) {
+ struct isci_port *iport = &ihost->ports[index];
+
+ port_status = sci_port_stop(iport);
+
+ if ((port_status != SCI_SUCCESS) &&
+ (port_status != SCI_FAILURE_INVALID_STATE)) {
+ status = SCI_FAILURE;
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed to "
+ "stop port %d because of status %d.\n",
+ __func__,
+ iport->logical_port_index,
+ port_status);
+ }
+ }
+
+ return status;
+}
+
+static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status status;
+ enum sci_status device_status;
+
+ status = SCI_SUCCESS;
+
+ for (index = 0; index < ihost->remote_node_entries; index++) {
+ if (ihost->device_table[index] != NULL) {
+ /* / @todo What timeout value do we want to provide to this request? */
+ device_status = sci_remote_device_stop(ihost->device_table[index], 0);
+
+ if ((device_status != SCI_SUCCESS) &&
+ (device_status != SCI_FAILURE_INVALID_STATE)) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed "
+ "to stop device 0x%p because of "
+ "status %d.\n",
+ __func__,
+ ihost->device_table[index], device_status);
+ }
+ }
+ }
+
+ return status;
+}
+
+static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ /* Stop all of the components for this controller */
+ sci_controller_stop_phys(ihost);
+ sci_controller_stop_ports(ihost);
+ sci_controller_stop_devices(ihost);
+}
+
+static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_del_timer(&ihost->timer);
+}
+
+static void sci_controller_reset_hardware(struct isci_host *ihost)
+{
+ /* Disable interrupts so we dont take any spurious interrupts */
+ sci_controller_disable_interrupts(ihost);
+
+ /* Reset the SCU */
+ writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
+
+ /* Delay for 1ms to before clearing the CQP and UFQPR. */
+ udelay(1000);
+
+ /* The write to the CQGR clears the CQP */
+ writel(0x00000000, &ihost->smu_registers->completion_queue_get);
+
+ /* The write to the UFQGP clears the UFQPR */
+ writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_controller_reset_hardware(ihost);
+ sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static const struct sci_base_state sci_controller_state_table[] = {
+ [SCIC_INITIAL] = {
+ .enter_state = sci_controller_initial_state_enter,
+ },
+ [SCIC_RESET] = {},
+ [SCIC_INITIALIZING] = {},
+ [SCIC_INITIALIZED] = {},
+ [SCIC_STARTING] = {
+ .exit_state = sci_controller_starting_state_exit,
+ },
+ [SCIC_READY] = {
+ .enter_state = sci_controller_ready_state_enter,
+ .exit_state = sci_controller_ready_state_exit,
+ },
+ [SCIC_RESETTING] = {
+ .enter_state = sci_controller_resetting_state_enter,
+ },
+ [SCIC_STOPPING] = {
+ .enter_state = sci_controller_stopping_state_enter,
+ .exit_state = sci_controller_stopping_state_exit,
+ },
+ [SCIC_STOPPED] = {},
+ [SCIC_FAILED] = {}
+};
+
+static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
+{
+ /* these defaults are overridden by the platform / firmware */
+ u16 index;
+
+ /* Default to APC mode. */
+ ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+
+ /* Default to APC mode. */
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
+
+ /* Default to no SSC operation. */
+ ihost->oem_parameters.controller.do_enable_ssc = false;
+
+ /* Initialize all of the port parameter information to narrow ports. */
+ for (index = 0; index < SCI_MAX_PORTS; index++) {
+ ihost->oem_parameters.ports[index].phy_mask = 0;
+ }
+
+ /* Initialize all of the phy parameter information. */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ /* Default to 6G (i.e. Gen 3) for now. */
+ ihost->user_parameters.phys[index].max_speed_generation = 3;
+
+ /* the frequencies cannot be 0 */
+ ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
+ ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
+ ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
+
+ /*
+ * Previous Vitesse based expanders had a arbitration issue that
+ * is worked around by having the upper 32-bits of SAS address
+ * with a value greater then the Vitesse company identifier.
+ * Hence, usage of 0x5FCFFFFF. */
+ ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
+ ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
+ }
+
+ ihost->user_parameters.stp_inactivity_timeout = 5;
+ ihost->user_parameters.ssp_inactivity_timeout = 5;
+ ihost->user_parameters.stp_max_occupancy_timeout = 5;
+ ihost->user_parameters.ssp_max_occupancy_timeout = 20;
+ ihost->user_parameters.no_outbound_task_timeout = 20;
+}
+
+static void controller_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
+ struct sci_base_state_machine *sm = &ihost->sm;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ if (sm->current_state_id == SCIC_STARTING)
+ sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
+ else if (sm->current_state_id == SCIC_STOPPING) {
+ sci_change_state(sm, SCIC_FAILED);
+ isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
+ } else /* / @todo Now what do we want to do in this case? */
+ dev_err(&ihost->pdev->dev,
+ "%s: Controller timer fired when controller was not "
+ "in a state being timed.\n",
+ __func__);
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static enum sci_status sci_controller_construct(struct isci_host *ihost,
+ void __iomem *scu_base,
+ void __iomem *smu_base)
+{
+ u8 i;
+
+ sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
+
+ ihost->scu_registers = scu_base;
+ ihost->smu_registers = smu_base;
+
+ sci_port_configuration_agent_construct(&ihost->port_agent);
+
+ /* Construct the ports for this controller */
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ sci_port_construct(&ihost->ports[i], i, ihost);
+ sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
+
+ /* Construct the phys for this controller */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ /* Add all the PHYs to the dummy port */
+ sci_phy_construct(&ihost->phys[i],
+ &ihost->ports[SCI_MAX_PORTS], i);
+ }
+
+ ihost->invalid_phy_mask = 0;
+
+ sci_init_timer(&ihost->timer, controller_timeout);
+
+ /* Initialize the User and OEM parameters to default values. */
+ sci_controller_set_default_config_parameters(ihost);
+
+ return sci_controller_reset(ihost);
+}
+
+int sci_oem_parameters_validate(struct sci_oem_params *oem)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ if (oem->phys[i].sas_address.high == 0 &&
+ oem->phys[i].sas_address.low == 0)
+ return -EINVAL;
+
+ if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ if (oem->ports[i].phy_mask != 0)
+ return -EINVAL;
+ } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ u8 phy_mask = 0;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ phy_mask |= oem->ports[i].phy_mask;
+
+ if (phy_mask == 0)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
+{
+ u32 state = ihost->sm.current_state_id;
+
+ if (state == SCIC_RESET ||
+ state == SCIC_INITIALIZING ||
+ state == SCIC_INITIALIZED) {
+
+ if (sci_oem_parameters_validate(&ihost->oem_parameters))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+static void power_control_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
+ struct isci_phy *iphy;
+ unsigned long flags;
+ u8 i;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ ihost->power_control.phys_granted_power = 0;
+
+ if (ihost->power_control.phys_waiting == 0) {
+ ihost->power_control.timer_started = false;
+ goto done;
+ }
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+
+ if (ihost->power_control.phys_waiting == 0)
+ break;
+
+ iphy = ihost->power_control.requesters[i];
+ if (iphy == NULL)
+ continue;
+
+ if (ihost->power_control.phys_granted_power >=
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
+ break;
+
+ ihost->power_control.requesters[i] = NULL;
+ ihost->power_control.phys_waiting--;
+ ihost->power_control.phys_granted_power++;
+ sci_phy_consume_power_handler(iphy);
+ }
+
+ /*
+ * It doesn't matter if the power list is empty, we need to start the
+ * timer in case another phy becomes ready.
+ */
+ sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+ ihost->power_control.timer_started = true;
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ BUG_ON(iphy == NULL);
+
+ if (ihost->power_control.phys_granted_power <
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
+ ihost->power_control.phys_granted_power++;
+ sci_phy_consume_power_handler(iphy);
+
+ /*
+ * stop and start the power_control timer. When the timer fires, the
+ * no_of_phys_granted_power will be set to 0
+ */
+ if (ihost->power_control.timer_started)
+ sci_del_timer(&ihost->power_control.timer);
+
+ sci_mod_timer(&ihost->power_control.timer,
+ SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+ ihost->power_control.timer_started = true;
+
+ } else {
+ /* Add the phy in the waiting list */
+ ihost->power_control.requesters[iphy->phy_index] = iphy;
+ ihost->power_control.phys_waiting++;
+ }
+}
+
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ BUG_ON(iphy == NULL);
+
+ if (ihost->power_control.requesters[iphy->phy_index])
+ ihost->power_control.phys_waiting--;
+
+ ihost->power_control.requesters[iphy->phy_index] = NULL;
+}
+
+#define AFE_REGISTER_WRITE_DELAY 10
+
+/* Initialize the AFE for this phy index. We need to read the AFE setup from
+ * the OEM parameters
+ */
+static void sci_controller_afe_initialization(struct isci_host *ihost)
+{
+ const struct sci_oem_params *oem = &ihost->oem_parameters;
+ struct pci_dev *pdev = ihost->pdev;
+ u32 afe_status;
+ u32 phy_id;
+
+ /* Clear DFX Status registers */
+ writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ if (is_b0(pdev)) {
+ /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
+ * Timer, PM Stagger Timer */
+ writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /* Configure bias currents to normal */
+ if (is_a2(pdev))
+ writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+ else if (is_b0(pdev) || is_c0(pdev))
+ writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable PLL */
+ if (is_b0(pdev) || is_c0(pdev))
+ writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
+ else
+ writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Wait for the PLL to lock */
+ do {
+ afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } while ((afe_status & 0x00001000) == 0);
+
+ if (is_a2(pdev)) {
+ /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
+ writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+ const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+
+ if (is_b0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else if (is_c0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /*
+ * All defaults, except the Receive Word Alignament/Comma Detect
+ * Enable....(0xe800) */
+ writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else {
+ /*
+ * All defaults, except the Receive Word Alignament/Comma Detect
+ * Enable....(0xe800) */
+ writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /*
+ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+ * & increase TX int & ext bias 20%....(0xe85c) */
+ if (is_a2(pdev))
+ writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ else if (is_b0(pdev)) {
+ /* Power down TX and RX (PWRDNTX and PWRDNRX) */
+ writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /*
+ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+ * & increase TX int & ext bias 20%....(0xe85c) */
+ writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ } else {
+ writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /*
+ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+ * & increase TX int & ext bias 20%....(0xe85c) */
+ writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ }
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ if (is_a2(pdev)) {
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /*
+ * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
+ * RDD=0x0(RX Detect Enabled) ....(0xe800) */
+ writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Leave DFE/FFE on */
+ if (is_a2(pdev))
+ writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ else if (is_b0(pdev)) {
+ writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ } else {
+ writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ }
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control0,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control1,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control2,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control3,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /* Transfer control to the PEs */
+ writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+}
+
+static void sci_controller_initialize_power_control(struct isci_host *ihost)
+{
+ sci_init_timer(&ihost->power_control.timer, power_control_timeout);
+
+ memset(ihost->power_control.requesters, 0,
+ sizeof(ihost->power_control.requesters));
+
+ ihost->power_control.phys_waiting = 0;
+ ihost->power_control.phys_granted_power = 0;
+}
+
+static enum sci_status sci_controller_initialize(struct isci_host *ihost)
+{
+ struct sci_base_state_machine *sm = &ihost->sm;
+ enum sci_status result = SCI_FAILURE;
+ unsigned long i, state, val;
+
+ if (ihost->sm.current_state_id != SCIC_RESET) {
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller initialize operation requested "
+ "in invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(sm, SCIC_INITIALIZING);
+
+ sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
+
+ ihost->next_phy_to_start = 0;
+ ihost->phy_startup_timer_pending = false;
+
+ sci_controller_initialize_power_control(ihost);
+
+ /*
+ * There is nothing to do here for B0 since we do not have to
+ * program the AFE registers.
+ * / @todo The AFE settings are supposed to be correct for the B0 but
+ * / presently they seem to be wrong. */
+ sci_controller_afe_initialization(ihost);
+
+
+ /* Take the hardware out of reset */
+ writel(0, &ihost->smu_registers->soft_reset_control);
+
+ /*
+ * / @todo Provide meaningfull error code for hardware failure
+ * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
+ for (i = 100; i >= 1; i--) {
+ u32 status;
+
+ /* Loop until the hardware reports success */
+ udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
+ status = readl(&ihost->smu_registers->control_status);
+
+ if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
+ break;
+ }
+ if (i == 0)
+ goto out;
+
+ /*
+ * Determine what are the actaul device capacities that the
+ * hardware will support */
+ val = readl(&ihost->smu_registers->device_context_capacity);
+
+ /* Record the smaller of the two capacity values */
+ ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
+ ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
+ ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
+
+ /*
+ * Make all PEs that are unassigned match up with the
+ * logical ports
+ */
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct scu_port_task_scheduler_group_registers __iomem
+ *ptsg = &ihost->scu_registers->peg0.ptsg;
+
+ writel(i, &ptsg->protocol_engine[i]);
+ }
+
+ /* Initialize hardware PCI Relaxed ordering in DMA engines */
+ val = readl(&ihost->scu_registers->sdma.pdma_configuration);
+ val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+ writel(val, &ihost->scu_registers->sdma.pdma_configuration);
+
+ val = readl(&ihost->scu_registers->sdma.cdma_configuration);
+ val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+ writel(val, &ihost->scu_registers->sdma.cdma_configuration);
+
+ /*
+ * Initialize the PHYs before the PORTs because the PHY registers
+ * are accessed during the port initialization.
+ */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ result = sci_phy_initialize(&ihost->phys[i],
+ &ihost->scu_registers->peg0.pe[i].tl,
+ &ihost->scu_registers->peg0.pe[i].ll);
+ if (result != SCI_SUCCESS)
+ goto out;
+ }
+
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+
+ iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
+ iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
+ iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
+ }
+
+ result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
+
+ out:
+ /* Advance the controller state machine */
+ if (result == SCI_SUCCESS)
+ state = SCIC_INITIALIZED;
+ else
+ state = SCIC_FAILED;
+ sci_change_state(sm, state);
+
+ return result;
+}
+
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+ struct sci_user_parameters *sci_parms)
+{
+ u32 state = ihost->sm.current_state_id;
+
+ if (state == SCIC_RESET ||
+ state == SCIC_INITIALIZING ||
+ state == SCIC_INITIALIZED) {
+ u16 index;
+
+ /*
+ * Validate the user parameters. If they are not legal, then
+ * return a failure.
+ */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct sci_phy_user_params *user_phy;
+
+ user_phy = &sci_parms->phys[index];
+
+ if (!((user_phy->max_speed_generation <=
+ SCIC_SDS_PARM_MAX_SPEED) &&
+ (user_phy->max_speed_generation >
+ SCIC_SDS_PARM_NO_SPEED)))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if (user_phy->in_connection_align_insertion_frequency <
+ 3)
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if ((user_phy->in_connection_align_insertion_frequency <
+ 3) ||
+ (user_phy->align_insertion_frequency == 0) ||
+ (user_phy->
+ notify_enable_spin_up_insertion_frequency ==
+ 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+ }
+
+ if ((sci_parms->stp_inactivity_timeout == 0) ||
+ (sci_parms->ssp_inactivity_timeout == 0) ||
+ (sci_parms->stp_max_occupancy_timeout == 0) ||
+ (sci_parms->ssp_max_occupancy_timeout == 0) ||
+ (sci_parms->no_outbound_task_timeout == 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+static int sci_controller_mem_init(struct isci_host *ihost)
+{
+ struct device *dev = &ihost->pdev->dev;
+ dma_addr_t dma;
+ size_t size;
+ int err;
+
+ size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
+ ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ if (!ihost->completion_queue)
+ return -ENOMEM;
+
+ writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
+ writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
+
+ size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
+ ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
+ GFP_KERNEL);
+ if (!ihost->remote_node_context_table)
+ return -ENOMEM;
+
+ writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
+ writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
+
+ size = ihost->task_context_entries * sizeof(struct scu_task_context),
+ ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ if (!ihost->task_context_table)
+ return -ENOMEM;
+
+ ihost->task_context_dma = dma;
+ writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
+ writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
+
+ err = sci_unsolicited_frame_control_construct(ihost);
+ if (err)
+ return err;
+
+ /*
+ * Inform the silicon as to the location of the UF headers and
+ * address table.
+ */
+ writel(lower_32_bits(ihost->uf_control.headers.physical_address),
+ &ihost->scu_registers->sdma.uf_header_base_address_lower);
+ writel(upper_32_bits(ihost->uf_control.headers.physical_address),
+ &ihost->scu_registers->sdma.uf_header_base_address_upper);
+
+ writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
+ &ihost->scu_registers->sdma.uf_address_table_lower);
+ writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
+ &ihost->scu_registers->sdma.uf_address_table_upper);
+
+ return 0;
+}
+
+int isci_host_init(struct isci_host *ihost)
+{
+ int err = 0, i;
+ enum sci_status status;
+ struct sci_user_parameters sci_user_params;
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+
+ spin_lock_init(&ihost->state_lock);
+ spin_lock_init(&ihost->scic_lock);
+ init_waitqueue_head(&ihost->eventq);
+
+ isci_host_change_state(ihost, isci_starting);
+
+ status = sci_controller_construct(ihost, scu_base(ihost),
+ smu_base(ihost));
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: sci_controller_construct failed - status = %x\n",
+ __func__,
+ status);
+ return -ENODEV;
+ }
+
+ ihost->sas_ha.dev = &ihost->pdev->dev;
+ ihost->sas_ha.lldd_ha = ihost;
+
+ /*
+ * grab initial values stored in the controller object for OEM and USER
+ * parameters
+ */
+ isci_user_parameters_get(&sci_user_params);
+ status = sci_user_parameters_set(ihost, &sci_user_params);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_user_parameters_set failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ /* grab any OEM parameters specified in orom */
+ if (pci_info->orom) {
+ status = isci_parse_oem_parameters(&ihost->oem_parameters,
+ pci_info->orom,
+ ihost->id);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "parsing firmware oem parameters failed\n");
+ return -EINVAL;
+ }
+ }
+
+ status = sci_oem_parameters_set(ihost);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_oem_parameters_set failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ tasklet_init(&ihost->completion_tasklet,
+ isci_host_completion_routine, (unsigned long)ihost);
+
+ INIT_LIST_HEAD(&ihost->requests_to_complete);
+ INIT_LIST_HEAD(&ihost->requests_to_errorback);
+
+ spin_lock_irq(&ihost->scic_lock);
+ status = sci_controller_initialize(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_controller_initialize failed -"
+ " status = 0x%x\n",
+ __func__, status);
+ return -ENODEV;
+ }
+
+ err = sci_controller_mem_init(ihost);
+ if (err)
+ return err;
+
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ isci_port_init(&ihost->ports[i], ihost, i);
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ isci_phy_init(&ihost->phys[i], ihost, i);
+
+ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+ struct isci_remote_device *idev = &ihost->devices[i];
+
+ INIT_LIST_HEAD(&idev->reqs_in_process);
+ INIT_LIST_HEAD(&idev->node);
+ }
+
+ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+ struct isci_request *ireq;
+ dma_addr_t dma;
+
+ ireq = dmam_alloc_coherent(&ihost->pdev->dev,
+ sizeof(struct isci_request), &dma,
+ GFP_KERNEL);
+ if (!ireq)
+ return -ENOMEM;
+
+ ireq->tc = &ihost->task_context_table[i];
+ ireq->owning_controller = ihost;
+ spin_lock_init(&ireq->state_lock);
+ ireq->request_daddr = dma;
+ ireq->isci_host = ihost;
+ ihost->reqs[i] = ireq;
+ }
+
+ return 0;
+}
+
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STARTING:
+ sci_del_timer(&ihost->phy_timer);
+ ihost->phy_startup_timer_pending = false;
+ ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ sci_controller_start_next_phy(ihost);
+ break;
+ case SCIC_READY:
+ ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC Controller linkup event from phy %d in "
+ "unexpected state %d\n", __func__, iphy->phy_index,
+ ihost->sm.current_state_id);
+ }
+}
+
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STARTING:
+ case SCIC_READY:
+ ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC Controller linkdown event from phy %d in "
+ "unexpected state %d\n",
+ __func__,
+ iphy->phy_index,
+ ihost->sm.current_state_id);
+ }
+}
+
+static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
+{
+ u32 index;
+
+ for (index = 0; index < ihost->remote_node_entries; index++) {
+ if ((ihost->device_table[index] != NULL) &&
+ (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
+ return true;
+ }
+
+ return false;
+}
+
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ if (ihost->sm.current_state_id != SCIC_STOPPING) {
+ dev_dbg(&ihost->pdev->dev,
+ "SCIC Controller 0x%p remote device stopped event "
+ "from device 0x%p in unexpected state %d\n",
+ ihost, idev,
+ ihost->sm.current_state_id);
+ return;
+ }
+
+ if (!sci_controller_has_remote_devices_stopping(ihost))
+ sci_change_state(&ihost->sm, SCIC_STOPPED);
+}
+
+void sci_controller_post_request(struct isci_host *ihost, u32 request)
+{
+ dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
+ __func__, ihost->id, request);
+
+ writel(request, &ihost->smu_registers->post_context_port);
+}
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
+{
+ u16 task_index;
+ u16 task_sequence;
+
+ task_index = ISCI_TAG_TCI(io_tag);
+
+ if (task_index < ihost->task_context_entries) {
+ struct isci_request *ireq = ihost->reqs[task_index];
+
+ if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
+ task_sequence = ISCI_TAG_SEQ(io_tag);
+
+ if (task_sequence == ihost->io_request_sequence[task_index])
+ return ireq;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * This method allocates remote node index and the reserves the remote node
+ * context space for use. This method can fail if there are no more remote
+ * node index available.
+ * @scic: This is the controller object which contains the set of
+ * free remote node ids
+ * @sci_dev: This is the device object which is requesting the a remote node
+ * id
+ * @node_id: This is the remote node id that is assinged to the device if one
+ * is available
+ *
+ * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
+ * node index available.
+ */
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 *node_id)
+{
+ u16 node_index;
+ u32 remote_node_count = sci_remote_device_node_count(idev);
+
+ node_index = sci_remote_node_table_allocate_remote_node(
+ &ihost->available_remote_nodes, remote_node_count
+ );
+
+ if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ ihost->device_table[node_index] = idev;
+
+ *node_id = node_index;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+}
+
+void sci_controller_free_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 node_id)
+{
+ u32 remote_node_count = sci_remote_device_node_count(idev);
+
+ if (ihost->device_table[node_id] == idev) {
+ ihost->device_table[node_id] = NULL;
+
+ sci_remote_node_table_release_remote_node_index(
+ &ihost->available_remote_nodes, remote_node_count, node_id
+ );
+ }
+}
+
+void sci_controller_copy_sata_response(void *response_buffer,
+ void *frame_header,
+ void *frame_buffer)
+{
+ /* XXX type safety? */
+ memcpy(response_buffer, frame_header, sizeof(u32));
+
+ memcpy(response_buffer + sizeof(u32),
+ frame_buffer,
+ sizeof(struct dev_to_host_fis) - sizeof(u32));
+}
+
+void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
+{
+ if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
+ writel(ihost->uf_control.get,
+ &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+void isci_tci_free(struct isci_host *ihost, u16 tci)
+{
+ u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
+
+ ihost->tci_pool[tail] = tci;
+ ihost->tci_tail = tail + 1;
+}
+
+static u16 isci_tci_alloc(struct isci_host *ihost)
+{
+ u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
+ u16 tci = ihost->tci_pool[head];
+
+ ihost->tci_head = head + 1;
+ return tci;
+}
+
+static u16 isci_tci_space(struct isci_host *ihost)
+{
+ return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+u16 isci_alloc_tag(struct isci_host *ihost)
+{
+ if (isci_tci_space(ihost)) {
+ u16 tci = isci_tci_alloc(ihost);
+ u8 seq = ihost->io_request_sequence[tci];
+
+ return ISCI_TAG(seq, tci);
+ }
+
+ return SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
+{
+ u16 tci = ISCI_TAG_TCI(io_tag);
+ u16 seq = ISCI_TAG_SEQ(io_tag);
+
+ /* prevent tail from passing head */
+ if (isci_tci_active(ihost) == 0)
+ return SCI_FAILURE_INVALID_IO_TAG;
+
+ if (seq == ihost->io_request_sequence[tci]) {
+ ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
+
+ isci_tci_free(ihost, tci);
+
+ return SCI_SUCCESS;
+ }
+ return SCI_FAILURE_INVALID_IO_TAG;
+}
+
+enum sci_status sci_controller_start_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_device_start_io(ihost, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ /* terminate an ongoing (i.e. started) core IO request. This does not
+ * abort the IO request at the target, but rather removes the IO
+ * request from the host controller.
+ */
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev,
+ "invalid state to terminate request\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_io_request_terminate(ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ /*
+ * Utilize the original post context command and or in the POST_TC_ABORT
+ * request sub-type.
+ */
+ sci_controller_post_request(ihost,
+ ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_complete_io() - This method will perform core specific
+ * completion operations for an IO request. After this method is invoked,
+ * the user should consider the IO request as invalid until it is properly
+ * reused (i.e. re-constructed).
+ * @ihost: The handle to the controller object for which to complete the
+ * IO request.
+ * @idev: The handle to the remote device object for which to complete
+ * the IO request.
+ * @ireq: the handle to the io request object to complete.
+ */
+enum sci_status sci_controller_complete_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+ u16 index;
+
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STOPPING:
+ /* XXX: Implement this function */
+ return SCI_FAILURE;
+ case SCIC_READY:
+ status = sci_remote_device_complete_io(ihost, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ index = ISCI_TAG_TCI(ireq->io_tag);
+ clear_bit(IREQ_ACTIVE, &ireq->flags);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+}
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_start_task() - This method is called by the SCIC user to
+ * send/start a framework task management request.
+ * @controller: the handle to the controller object for which to start the task
+ * management request.
+ * @remote_device: the handle to the remote device object for which to start
+ * the task management request.
+ * @task_request: the handle to the task request object to start.
+ */
+enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller starting task from invalid "
+ "state\n",
+ __func__);
+ return SCI_TASK_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_device_start_task(ihost, idev, ireq);
+ switch (status) {
+ case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+
+ /*
+ * We will let framework know this task request started successfully,
+ * although core is still woring on starting the request (to post tc when
+ * RNC is resumed.)
+ */
+ return SCI_SUCCESS;
+ case SCI_SUCCESS:
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
new file mode 100644
index 00000000000..062101a39f7
--- /dev/null
+++ b/drivers/scsi/isci/host.h
@@ -0,0 +1,542 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _SCI_HOST_H_
+#define _SCI_HOST_H_
+
+#include "remote_device.h"
+#include "phy.h"
+#include "isci.h"
+#include "remote_node_table.h"
+#include "registers.h"
+#include "unsolicited_frame_control.h"
+#include "probe_roms.h"
+
+struct isci_request;
+struct scu_task_context;
+
+
+/**
+ * struct sci_power_control -
+ *
+ * This structure defines the fields for managing power control for direct
+ * attached disk devices.
+ */
+struct sci_power_control {
+ /**
+ * This field is set when the power control timer is running and cleared when
+ * it is not.
+ */
+ bool timer_started;
+
+ /**
+ * Timer to control when the directed attached disks can consume power.
+ */
+ struct sci_timer timer;
+
+ /**
+ * This field is used to keep track of how many phys are put into the
+ * requesters field.
+ */
+ u8 phys_waiting;
+
+ /**
+ * This field is used to keep track of how many phys have been granted to consume power
+ */
+ u8 phys_granted_power;
+
+ /**
+ * This field is an array of phys that we are waiting on. The phys are direct
+ * mapped into requesters via struct sci_phy.phy_index
+ */
+ struct isci_phy *requesters[SCI_MAX_PHYS];
+
+};
+
+struct sci_port_configuration_agent;
+typedef void (*port_config_fn)(struct isci_host *,
+ struct sci_port_configuration_agent *,
+ struct isci_port *, struct isci_phy *);
+
+struct sci_port_configuration_agent {
+ u16 phy_configured_mask;
+ u16 phy_ready_mask;
+ struct {
+ u8 min_index;
+ u8 max_index;
+ } phy_valid_port_range[SCI_MAX_PHYS];
+ bool timer_pending;
+ port_config_fn link_up_handler;
+ port_config_fn link_down_handler;
+ struct sci_timer timer;
+};
+
+/**
+ * isci_host - primary host/controller object
+ * @timer: timeout start/stop operations
+ * @device_table: rni (hw remote node index) to remote device lookup table
+ * @available_remote_nodes: rni allocator
+ * @power_control: manage device spin up
+ * @io_request_sequence: generation number for tci's (task contexts)
+ * @task_context_table: hw task context table
+ * @remote_node_context_table: hw remote node context table
+ * @completion_queue: hw-producer driver-consumer communication ring
+ * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
+ * @logical_port_entries: min({driver|silicon}-supported-port-count)
+ * @remote_node_entries: min({driver|silicon}-supported-node-count)
+ * @task_context_entries: min({driver|silicon}-supported-task-count)
+ * @phy_timer: phy startup timer
+ * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
+ * the phy index is set so further notifications are not
+ * made. Once the phy reports link up and is made part of a
+ * port then this bit is cleared.
+
+ */
+struct isci_host {
+ struct sci_base_state_machine sm;
+ /* XXX can we time this externally */
+ struct sci_timer timer;
+ /* XXX drop reference module params directly */
+ struct sci_user_parameters user_parameters;
+ /* XXX no need to be a union */
+ struct sci_oem_params oem_parameters;
+ struct sci_port_configuration_agent port_agent;
+ struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
+ struct sci_remote_node_table available_remote_nodes;
+ struct sci_power_control power_control;
+ u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
+ struct scu_task_context *task_context_table;
+ dma_addr_t task_context_dma;
+ union scu_remote_node_context *remote_node_context_table;
+ u32 *completion_queue;
+ u32 completion_queue_get;
+ u32 logical_port_entries;
+ u32 remote_node_entries;
+ u32 task_context_entries;
+ struct sci_unsolicited_frame_control uf_control;
+
+ /* phy startup */
+ struct sci_timer phy_timer;
+ /* XXX kill */
+ bool phy_startup_timer_pending;
+ u32 next_phy_to_start;
+ /* XXX convert to unsigned long and use bitops */
+ u8 invalid_phy_mask;
+
+ /* TODO attempt dynamic interrupt coalescing scheme */
+ u16 interrupt_coalesce_number;
+ u32 interrupt_coalesce_timeout;
+ struct smu_registers __iomem *smu_registers;
+ struct scu_registers __iomem *scu_registers;
+
+ u16 tci_head;
+ u16 tci_tail;
+ u16 tci_pool[SCI_MAX_IO_REQUESTS];
+
+ int id; /* unique within a given pci device */
+ struct isci_phy phys[SCI_MAX_PHYS];
+ struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
+ struct sas_ha_struct sas_ha;
+
+ spinlock_t state_lock;
+ struct pci_dev *pdev;
+ enum isci_status status;
+ #define IHOST_START_PENDING 0
+ #define IHOST_STOP_PENDING 1
+ unsigned long flags;
+ wait_queue_head_t eventq;
+ struct Scsi_Host *shost;
+ struct tasklet_struct completion_tasklet;
+ struct list_head requests_to_complete;
+ struct list_head requests_to_errorback;
+ spinlock_t scic_lock;
+ struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
+ struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
+};
+
+/**
+ * enum sci_controller_states - This enumeration depicts all the states
+ * for the common controller state machine.
+ */
+enum sci_controller_states {
+ /**
+ * Simply the initial state for the base controller state machine.
+ */
+ SCIC_INITIAL = 0,
+
+ /**
+ * This state indicates that the controller is reset. The memory for
+ * the controller is in it's initial state, but the controller requires
+ * initialization.
+ * This state is entered from the INITIAL state.
+ * This state is entered from the RESETTING state.
+ */
+ SCIC_RESET,
+
+ /**
+ * This state is typically an action state that indicates the controller
+ * is in the process of initialization. In this state no new IO operations
+ * are permitted.
+ * This state is entered from the RESET state.
+ */
+ SCIC_INITIALIZING,
+
+ /**
+ * This state indicates that the controller has been successfully
+ * initialized. In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZING state.
+ */
+ SCIC_INITIALIZED,
+
+ /**
+ * This state indicates the the controller is in the process of becoming
+ * ready (i.e. starting). In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZED state.
+ */
+ SCIC_STARTING,
+
+ /**
+ * This state indicates the controller is now ready. Thus, the user
+ * is able to perform IO operations on the controller.
+ * This state is entered from the STARTING state.
+ */
+ SCIC_READY,
+
+ /**
+ * This state is typically an action state that indicates the controller
+ * is in the process of resetting. Thus, the user is unable to perform
+ * IO operations on the controller. A reset is considered destructive in
+ * most cases.
+ * This state is entered from the READY state.
+ * This state is entered from the FAILED state.
+ * This state is entered from the STOPPED state.
+ */
+ SCIC_RESETTING,
+
+ /**
+ * This state indicates that the controller is in the process of stopping.
+ * In this state no new IO operations are permitted, but existing IO
+ * operations are allowed to complete.
+ * This state is entered from the READY state.
+ */
+ SCIC_STOPPING,
+
+ /**
+ * This state indicates that the controller has successfully been stopped.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the STOPPING state.
+ */
+ SCIC_STOPPED,
+
+ /**
+ * This state indicates that the controller could not successfully be
+ * initialized. In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZING state.
+ * This state is entered from the STARTING state.
+ * This state is entered from the STOPPING state.
+ * This state is entered from the RESETTING state.
+ */
+ SCIC_FAILED,
+};
+
+/**
+ * struct isci_pci_info - This class represents the pci function containing the
+ * controllers. Depending on PCI SKU, there could be up to 2 controllers in
+ * the PCI function.
+ */
+#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
+
+struct isci_pci_info {
+ struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
+ struct isci_host *hosts[SCI_MAX_CONTROLLERS];
+ struct isci_orom *orom;
+};
+
+static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+#define for_each_isci_host(id, ihost, pdev) \
+ for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
+ id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
+ ihost = to_pci_info(pdev)->hosts[++id])
+
+static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
+{
+ return isci_host->status;
+}
+
+static inline void isci_host_change_state(struct isci_host *isci_host,
+ enum isci_status status)
+{
+ unsigned long flags;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_host = %p, state = 0x%x",
+ __func__,
+ isci_host,
+ status);
+ spin_lock_irqsave(&isci_host->state_lock, flags);
+ isci_host->status = status;
+ spin_unlock_irqrestore(&isci_host->state_lock, flags);
+
+}
+
+static inline void wait_for_start(struct isci_host *ihost)
+{
+ wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_stop(struct isci_host *ihost)
+{
+ wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
+}
+
+static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
+}
+
+static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
+{
+ return dev->port->ha->lldd_ha;
+}
+
+/* we always use protocol engine group zero */
+#define ISCI_PEG 0
+
+/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
+#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
+
+/* these are returned by the hardware, so sanitize them */
+#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
+#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
+
+/* expander attached sata devices require 3 rnc slots */
+static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
+{
+ struct domain_device *dev = idev->domain_dev;
+
+ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
+ !idev->is_direct_attached)
+ return SCU_STP_REMOTE_NODE_COUNT;
+ return SCU_SSP_REMOTE_NODE_COUNT;
+}
+
+/**
+ * sci_controller_clear_invalid_phy() -
+ *
+ * This macro will clear the bit in the invalid phy mask for this controller
+ * object. This is used to control messages reported for invalid link up
+ * notifications.
+ */
+#define sci_controller_clear_invalid_phy(controller, phy) \
+ ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
+
+static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
+{
+
+ if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
+ return NULL;
+
+ return &iphy->isci_port->isci_host->pdev->dev;
+}
+
+static inline struct device *sciport_to_dev(struct isci_port *iport)
+{
+
+ if (!iport || !iport->isci_host)
+ return NULL;
+
+ return &iport->isci_host->pdev->dev;
+}
+
+static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
+{
+ if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
+ return NULL;
+
+ return &idev->isci_port->isci_host->pdev->dev;
+}
+
+static inline bool is_a2(struct pci_dev *pdev)
+{
+ if (pdev->revision < 4)
+ return true;
+ return false;
+}
+
+static inline bool is_b0(struct pci_dev *pdev)
+{
+ if (pdev->revision == 4)
+ return true;
+ return false;
+}
+
+static inline bool is_c0(struct pci_dev *pdev)
+{
+ if (pdev->revision >= 5)
+ return true;
+ return false;
+}
+
+void sci_controller_post_request(struct isci_host *ihost,
+ u32 request);
+void sci_controller_release_frame(struct isci_host *ihost,
+ u32 frame_index);
+void sci_controller_copy_sata_response(void *response_buffer,
+ void *frame_header,
+ void *frame_buffer);
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 *node_id);
+void sci_controller_free_remote_node_context(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 node_id);
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost,
+ u16 io_tag);
+
+void sci_controller_power_control_queue_insert(
+ struct isci_host *ihost,
+ struct isci_phy *iphy);
+
+void sci_controller_power_control_queue_remove(
+ struct isci_host *ihost,
+ struct isci_phy *iphy);
+
+void sci_controller_link_up(
+ struct isci_host *ihost,
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+void sci_controller_link_down(
+ struct isci_host *ihost,
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+void sci_controller_remote_device_stopped(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+void sci_controller_copy_task_context(
+ struct isci_host *ihost,
+ struct isci_request *ireq);
+
+void sci_controller_register_setup(struct isci_host *ihost);
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq);
+int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
+void isci_host_scan_start(struct Scsi_Host *);
+u16 isci_alloc_tag(struct isci_host *ihost);
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
+void isci_tci_free(struct isci_host *ihost, u16 tci);
+
+int isci_host_init(struct isci_host *);
+
+void isci_host_init_controller_names(
+ struct isci_host *isci_host,
+ unsigned int controller_idx);
+
+void isci_host_deinit(
+ struct isci_host *);
+
+void isci_host_port_link_up(
+ struct isci_host *,
+ struct isci_port *,
+ struct isci_phy *);
+int isci_host_dev_found(struct domain_device *);
+
+void isci_host_remote_device_start_complete(
+ struct isci_host *,
+ struct isci_remote_device *,
+ enum sci_status);
+
+void sci_controller_disable_interrupts(
+ struct isci_host *ihost);
+
+enum sci_status sci_controller_start_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_task_status sci_controller_start_task(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_controller_terminate_request(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_controller_complete_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+void sci_port_configuration_agent_construct(
+ struct sci_port_configuration_agent *port_agent);
+
+enum sci_status sci_port_configuration_agent_initialize(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent);
+#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
new file mode 100644
index 00000000000..61e0d09e2b5
--- /dev/null
+++ b/drivers/scsi/isci/init.c
@@ -0,0 +1,565 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/efi.h>
+#include <asm/string.h>
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static struct scsi_transport_template *isci_transport_template;
+
+static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
+ { PCI_VDEVICE(INTEL, 0x1D61),},
+ { PCI_VDEVICE(INTEL, 0x1D63),},
+ { PCI_VDEVICE(INTEL, 0x1D65),},
+ { PCI_VDEVICE(INTEL, 0x1D67),},
+ { PCI_VDEVICE(INTEL, 0x1D69),},
+ { PCI_VDEVICE(INTEL, 0x1D6B),},
+ { PCI_VDEVICE(INTEL, 0x1D60),},
+ { PCI_VDEVICE(INTEL, 0x1D62),},
+ { PCI_VDEVICE(INTEL, 0x1D64),},
+ { PCI_VDEVICE(INTEL, 0x1D66),},
+ { PCI_VDEVICE(INTEL, 0x1D68),},
+ { PCI_VDEVICE(INTEL, 0x1D6A),},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, isci_id_table);
+
+/* linux isci specific settings */
+
+unsigned char no_outbound_task_to = 20;
+module_param(no_outbound_task_to, byte, 0);
+MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
+
+u16 ssp_max_occ_to = 20;
+module_param(ssp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
+
+u16 stp_max_occ_to = 5;
+module_param(stp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
+
+u16 ssp_inactive_to = 5;
+module_param(ssp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
+
+u16 stp_inactive_to = 5;
+module_param(stp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
+
+unsigned char phy_gen = 3;
+module_param(phy_gen, byte, 0);
+MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
+
+unsigned char max_concurr_spinup = 1;
+module_param(max_concurr_spinup, byte, 0);
+MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+
+static struct scsi_host_template isci_sht = {
+
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .proc_name = DRV_NAME,
+ .queuecommand = sas_queuecommand,
+ .target_alloc = sas_target_alloc,
+ .slave_configure = sas_slave_configure,
+ .slave_destroy = sas_slave_destroy,
+ .scan_finished = isci_host_scan_finished,
+ .scan_start = isci_host_scan_start,
+ .change_queue_depth = sas_change_queue_depth,
+ .change_queue_type = sas_change_queue_type,
+ .bios_param = sas_bios_param,
+ .can_queue = ISCI_CAN_QUEUE_VAL,
+ .cmd_per_lun = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_bus_reset_handler = isci_bus_reset_handler,
+ .slave_alloc = sas_slave_alloc,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
+};
+
+static struct sas_domain_function_template isci_transport_ops = {
+
+ /* The class calls these to notify the LLDD of an event. */
+ .lldd_port_formed = isci_port_formed,
+ .lldd_port_deformed = isci_port_deformed,
+
+ /* The class calls these when a device is found or gone. */
+ .lldd_dev_found = isci_remote_device_found,
+ .lldd_dev_gone = isci_remote_device_gone,
+
+ .lldd_execute_task = isci_task_execute_task,
+ /* Task Management Functions. Must be called from process context. */
+ .lldd_abort_task = isci_task_abort_task,
+ .lldd_abort_task_set = isci_task_abort_task_set,
+ .lldd_clear_aca = isci_task_clear_aca,
+ .lldd_clear_task_set = isci_task_clear_task_set,
+ .lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset,
+ .lldd_lu_reset = isci_task_lu_reset,
+ .lldd_query_task = isci_task_query_task,
+
+ /* Port and Adapter management */
+ .lldd_clear_nexus_port = isci_task_clear_nexus_port,
+ .lldd_clear_nexus_ha = isci_task_clear_nexus_ha,
+
+ /* Phy management */
+ .lldd_control_phy = isci_phy_control,
+};
+
+
+/******************************************************************************
+* P R O T E C T E D M E T H O D S
+******************************************************************************/
+
+
+
+/**
+ * isci_register_sas_ha() - This method initializes various lldd
+ * specific members of the sas_ha struct and calls the libsas
+ * sas_register_ha() function.
+ * @isci_host: This parameter specifies the lldd specific wrapper for the
+ * libsas sas_ha struct.
+ *
+ * This method returns an error code indicating sucess or failure. The user
+ * should check for possible memory allocation error return otherwise, a zero
+ * indicates success.
+ */
+static int isci_register_sas_ha(struct isci_host *isci_host)
+{
+ int i;
+ struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
+ struct asd_sas_phy **sas_phys;
+ struct asd_sas_port **sas_ports;
+
+ sas_phys = devm_kzalloc(&isci_host->pdev->dev,
+ SCI_MAX_PHYS * sizeof(void *),
+ GFP_KERNEL);
+ if (!sas_phys)
+ return -ENOMEM;
+
+ sas_ports = devm_kzalloc(&isci_host->pdev->dev,
+ SCI_MAX_PORTS * sizeof(void *),
+ GFP_KERNEL);
+ if (!sas_ports)
+ return -ENOMEM;
+
+ /*----------------- Libsas Initialization Stuff----------------------
+ * Set various fields in the sas_ha struct:
+ */
+
+ sas_ha->sas_ha_name = DRV_NAME;
+ sas_ha->lldd_module = THIS_MODULE;
+ sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
+
+ /* set the array of phy and port structs. */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ sas_phys[i] = &isci_host->phys[i].sas_phy;
+ sas_ports[i] = &isci_host->ports[i].sas_port;
+ }
+
+ sas_ha->sas_phy = sas_phys;
+ sas_ha->sas_port = sas_ports;
+ sas_ha->num_phys = SCI_MAX_PHYS;
+
+ sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
+ sas_ha->lldd_max_execute_num = 1;
+ sas_ha->strict_wide_ports = 1;
+
+ sas_register_ha(sas_ha);
+
+ return 0;
+}
+
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+static void isci_unregister(struct isci_host *isci_host)
+{
+ struct Scsi_Host *shost;
+
+ if (!isci_host)
+ return;
+
+ shost = isci_host->shost;
+ device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
+
+ sas_unregister_ha(&isci_host->sas_ha);
+
+ sas_remove_host(isci_host->shost);
+ scsi_remove_host(isci_host->shost);
+ scsi_host_put(isci_host->shost);
+}
+
+static int __devinit isci_pci_init(struct pci_dev *pdev)
+{
+ int err, bar_num, bar_mask = 0;
+ void __iomem * const *iomap;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed enable PCI device %s!\n",
+ pci_name(pdev));
+ return err;
+ }
+
+ for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
+ bar_mask |= 1 << (bar_num * 2);
+
+ err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
+ if (err)
+ return err;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int num_controllers(struct pci_dev *pdev)
+{
+ /* bar size alone can tell us if we are running with a dual controller
+ * part, no need to trust revision ids that might be under broken firmware
+ * control
+ */
+ resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
+ resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
+
+ if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
+ smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
+ return SCI_MAX_CONTROLLERS;
+ else
+ return 1;
+}
+
+static int isci_setup_interrupts(struct pci_dev *pdev)
+{
+ int err, i, num_msix;
+ struct isci_host *ihost;
+ struct isci_pci_info *pci_info = to_pci_info(pdev);
+
+ /*
+ * Determine the number of vectors associated with this
+ * PCI function.
+ */
+ num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
+
+ for (i = 0; i < num_msix; i++)
+ pci_info->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix);
+ if (err)
+ goto intx;
+
+ for (i = 0; i < num_msix; i++) {
+ int id = i / SCI_NUM_MSI_X_INT;
+ struct msix_entry *msix = &pci_info->msix_entries[i];
+ irq_handler_t isr;
+
+ ihost = pci_info->hosts[id];
+ /* odd numbered vectors are error interrupts */
+ if (i & 1)
+ isr = isci_error_isr;
+ else
+ isr = isci_msix_isr;
+
+ err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
+ DRV_NAME"-msix", ihost);
+ if (!err)
+ continue;
+
+ dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
+ while (i--) {
+ id = i / SCI_NUM_MSI_X_INT;
+ ihost = pci_info->hosts[id];
+ msix = &pci_info->msix_entries[i];
+ devm_free_irq(&pdev->dev, msix->vector, ihost);
+ }
+ pci_disable_msix(pdev);
+ goto intx;
+ }
+ return 0;
+
+ intx:
+ for_each_isci_host(i, ihost, pdev) {
+ err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
+ IRQF_SHARED, DRV_NAME"-intx", ihost);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
+{
+ struct isci_host *isci_host;
+ struct Scsi_Host *shost;
+ int err;
+
+ isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
+ if (!isci_host)
+ return NULL;
+
+ isci_host->pdev = pdev;
+ isci_host->id = id;
+
+ shost = scsi_host_alloc(&isci_sht, sizeof(void *));
+ if (!shost)
+ return NULL;
+ isci_host->shost = shost;
+
+ err = isci_host_init(isci_host);
+ if (err)
+ goto err_shost;
+
+ SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
+ isci_host->sas_ha.core.shost = shost;
+ shost->transportt = isci_transport_template;
+
+ shost->max_id = ~0;
+ shost->max_lun = ~0;
+ shost->max_cmd_len = MAX_COMMAND_SIZE;
+
+ err = scsi_add_host(shost, &pdev->dev);
+ if (err)
+ goto err_shost;
+
+ err = isci_register_sas_ha(isci_host);
+ if (err)
+ goto err_shost_remove;
+
+ err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
+ if (err)
+ goto err_unregister_ha;
+
+ return isci_host;
+
+ err_unregister_ha:
+ sas_unregister_ha(&(isci_host->sas_ha));
+ err_shost_remove:
+ scsi_remove_host(shost);
+ err_shost:
+ scsi_host_put(shost);
+
+ return NULL;
+}
+
+static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct isci_pci_info *pci_info;
+ int err, i;
+ struct isci_host *isci_host;
+ const struct firmware *fw = NULL;
+ struct isci_orom *orom = NULL;
+ char *source = "(platform)";
+
+ dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
+ pdev->revision);
+
+ pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+ pci_set_drvdata(pdev, pci_info);
+
+ if (efi_enabled)
+ orom = isci_get_efi_var(pdev);
+
+ if (!orom)
+ orom = isci_request_oprom(pdev);
+
+ for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
+ if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+ dev_warn(&pdev->dev,
+ "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+ devm_kfree(&pdev->dev, orom);
+ orom = NULL;
+ break;
+ }
+ }
+
+ if (!orom) {
+ source = "(firmware)";
+ orom = isci_request_firmware(pdev, fw);
+ if (!orom) {
+ /* TODO convert this to WARN_TAINT_ONCE once the
+ * orom/efi parameter support is widely available
+ */
+ dev_warn(&pdev->dev,
+ "Loading user firmware failed, using default "
+ "values\n");
+ dev_warn(&pdev->dev,
+ "Default OEM configuration being used: 4 "
+ "narrow ports, and default SAS Addresses\n");
+ }
+ }
+
+ if (orom)
+ dev_info(&pdev->dev,
+ "OEM SAS parameters (version: %u.%u) loaded %s\n",
+ (orom->hdr.version & 0xf0) >> 4,
+ (orom->hdr.version & 0xf), source);
+
+ pci_info->orom = orom;
+
+ err = isci_pci_init(pdev);
+ if (err)
+ return err;
+
+ for (i = 0; i < num_controllers(pdev); i++) {
+ struct isci_host *h = isci_host_alloc(pdev, i);
+
+ if (!h) {
+ err = -ENOMEM;
+ goto err_host_alloc;
+ }
+ pci_info->hosts[i] = h;
+ }
+
+ err = isci_setup_interrupts(pdev);
+ if (err)
+ goto err_host_alloc;
+
+ for_each_isci_host(i, isci_host, pdev)
+ scsi_scan_host(isci_host->shost);
+
+ return 0;
+
+ err_host_alloc:
+ for_each_isci_host(i, isci_host, pdev)
+ isci_unregister(isci_host);
+ return err;
+}
+
+static void __devexit isci_pci_remove(struct pci_dev *pdev)
+{
+ struct isci_host *ihost;
+ int i;
+
+ for_each_isci_host(i, ihost, pdev) {
+ isci_unregister(ihost);
+ isci_host_deinit(ihost);
+ sci_controller_disable_interrupts(ihost);
+ }
+}
+
+static struct pci_driver isci_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = isci_id_table,
+ .probe = isci_pci_probe,
+ .remove = __devexit_p(isci_pci_remove),
+};
+
+static __init int isci_init(void)
+{
+ int err;
+
+ pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
+
+ isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
+ if (!isci_transport_template)
+ return -ENOMEM;
+
+ err = pci_register_driver(&isci_pci_driver);
+ if (err)
+ sas_release_transport(isci_transport_template);
+
+ return err;
+}
+
+static __exit void isci_exit(void)
+{
+ pci_unregister_driver(&isci_pci_driver);
+ sas_release_transport(isci_transport_template);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(ISCI_FW_NAME);
+module_init(isci_init);
+module_exit(isci_exit);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
new file mode 100644
index 00000000000..d1de63312e7
--- /dev/null
+++ b/drivers/scsi/isci/isci.h
@@ -0,0 +1,538 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ISCI_H__
+#define __ISCI_H__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#define DRV_NAME "isci"
+#define SCI_PCI_BAR_COUNT 2
+#define SCI_NUM_MSI_X_INT 2
+#define SCI_SMU_BAR 0
+#define SCI_SMU_BAR_SIZE (16*1024)
+#define SCI_SCU_BAR 1
+#define SCI_SCU_BAR_SIZE (4*1024*1024)
+#define SCI_IO_SPACE_BAR0 2
+#define SCI_IO_SPACE_BAR1 3
+#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */
+#define SCIC_CONTROLLER_STOP_TIMEOUT 5000
+
+#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
+
+#define SCI_MAX_PHYS (4UL)
+#define SCI_MAX_PORTS SCI_MAX_PHYS
+#define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */
+#define SCI_MAX_REMOTE_DEVICES (256UL)
+#define SCI_MAX_IO_REQUESTS (256UL)
+#define SCI_MAX_SEQ (16)
+#define SCI_MAX_MSIX_MESSAGES (2)
+#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */
+#define SCI_MAX_CONTROLLERS 2
+#define SCI_MAX_DOMAINS SCI_MAX_PORTS
+
+#define SCU_MAX_CRITICAL_NOTIFICATIONS (384)
+#define SCU_MAX_EVENTS_SHIFT (7)
+#define SCU_MAX_EVENTS (1 << SCU_MAX_EVENTS_SHIFT)
+#define SCU_MAX_UNSOLICITED_FRAMES (128)
+#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128)
+#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \
+ + SCU_MAX_EVENTS \
+ + SCU_MAX_UNSOLICITED_FRAMES \
+ + SCI_MAX_IO_REQUESTS \
+ + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
+#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
+
+#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
+#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024)
+#define SCU_INVALID_FRAME_INDEX (0xFFFF)
+
+#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF)
+#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF)
+
+static inline void check_sizes(void)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS);
+ BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES);
+ BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ);
+}
+
+/**
+ * enum sci_status - This is the general return status enumeration for non-IO,
+ * non-task management related SCI interface methods.
+ *
+ *
+ */
+enum sci_status {
+ /**
+ * This member indicates successful completion.
+ */
+ SCI_SUCCESS = 0,
+
+ /**
+ * This value indicates that the calling method completed successfully,
+ * but that the IO may have completed before having it's start method
+ * invoked. This occurs during SAT translation for requests that do
+ * not require an IO to the target or for any other requests that may
+ * be completed without having to submit IO.
+ */
+ SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+
+ /**
+ * This Value indicates that the SCU hardware returned an early response
+ * because the io request specified more data than is returned by the
+ * target device (mode pages, inquiry data, etc.). The completion routine
+ * will handle this case to get the actual number of bytes transferred.
+ */
+ SCI_SUCCESS_IO_DONE_EARLY,
+
+ /**
+ * This member indicates that the object for which a state change is
+ * being requested is already in said state.
+ */
+ SCI_WARNING_ALREADY_IN_STATE,
+
+ /**
+ * This member indicates interrupt coalescence timer may cause SAS
+ * specification compliance issues (i.e. SMP target mode response
+ * frames must be returned within 1.9 milliseconds).
+ */
+ SCI_WARNING_TIMER_CONFLICT,
+
+ /**
+ * This field indicates a sequence of action is not completed yet. Mostly,
+ * this status is used when multiple ATA commands are needed in a SATI translation.
+ */
+ SCI_WARNING_SEQUENCE_INCOMPLETE,
+
+ /**
+ * This member indicates that there was a general failure.
+ */
+ SCI_FAILURE,
+
+ /**
+ * This member indicates that the SCI implementation is unable to complete
+ * an operation due to a critical flaw the prevents any further operation
+ * (i.e. an invalid pointer).
+ */
+ SCI_FATAL_ERROR,
+
+ /**
+ * This member indicates the calling function failed, because the state
+ * of the controller is in a state that prevents successful completion.
+ */
+ SCI_FAILURE_INVALID_STATE,
+
+ /**
+ * This member indicates the calling function failed, because there is
+ * insufficient resources/memory to complete the request.
+ */
+ SCI_FAILURE_INSUFFICIENT_RESOURCES,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * controller object required for the operation can't be located.
+ */
+ SCI_FAILURE_CONTROLLER_NOT_FOUND,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * discovered controller type is not supported by the library.
+ */
+ SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested initialization data version isn't supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested configuration of SAS Phys into SAS Ports is not supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested protocol is not supported by the remote device, port,
+ * or controller.
+ */
+ SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested information type is not supported by the SCI implementation.
+ */
+ SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * device already exists.
+ */
+ SCI_FAILURE_DEVICE_EXISTS,
+
+ /**
+ * This member indicates the calling function failed, because adding
+ * a phy to the object is not possible.
+ */
+ SCI_FAILURE_ADDING_PHY_UNSUPPORTED,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested information type is not supported by the SCI implementation.
+ */
+ SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD,
+
+ /**
+ * This member indicates the calling function failed, because the SCI
+ * implementation does not support the supplied time limit.
+ */
+ SCI_FAILURE_UNSUPPORTED_TIME_LIMIT,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified Phy.
+ */
+ SCI_FAILURE_INVALID_PHY,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified Port.
+ */
+ SCI_FAILURE_INVALID_PORT,
+
+ /**
+ * This member indicates the calling method was partly successful
+ * The port was reset but not all phys in port are operational
+ */
+ SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS,
+
+ /**
+ * This member indicates that calling method failed
+ * The port reset did not complete because none of the phys are operational
+ */
+ SCI_FAILURE_RESET_PORT_FAILURE,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified remote device.
+ */
+ SCI_FAILURE_INVALID_REMOTE_DEVICE,
+
+ /**
+ * This member indicates the calling method failed, because the remote
+ * device is in a bad state and requires a reset.
+ */
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain or support the specified IO tag.
+ */
+ SCI_FAILURE_INVALID_IO_TAG,
+
+ /**
+ * This member indicates that the operation failed and the user should
+ * check the response data associated with the IO.
+ */
+ SCI_FAILURE_IO_RESPONSE_VALID,
+
+ /**
+ * This member indicates that the operation failed, the failure is
+ * controller implementation specific, and the response data associated
+ * with the request is not valid. You can query for the controller
+ * specific error information via sci_controller_get_request_status()
+ */
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+
+ /**
+ * This member indicated that the operation failed because the
+ * user requested this IO to be terminated.
+ */
+ SCI_FAILURE_IO_TERMINATED,
+
+ /**
+ * This member indicates that the operation failed and the associated
+ * request requires a SCSI abort task to be sent to the target.
+ */
+ SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+
+ /**
+ * This member indicates that the operation failed because the supplied
+ * device could not be located.
+ */
+ SCI_FAILURE_DEVICE_NOT_FOUND,
+
+ /**
+ * This member indicates that the operation failed because the
+ * objects association is required and is not correctly set.
+ */
+ SCI_FAILURE_INVALID_ASSOCIATION,
+
+ /**
+ * This member indicates that the operation failed, because a timeout
+ * occurred.
+ */
+ SCI_FAILURE_TIMEOUT,
+
+ /**
+ * This member indicates that the operation failed, because the user
+ * specified a value that is either invalid or not supported.
+ */
+ SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+ /**
+ * This value indicates that the operation failed, because the number
+ * of messages (MSI-X) is not supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT,
+
+ /**
+ * This value indicates that the method failed due to a lack of
+ * available NCQ tags.
+ */
+ SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+
+ /**
+ * This value indicates that a protocol violation has occurred on the
+ * link.
+ */
+ SCI_FAILURE_PROTOCOL_VIOLATION,
+
+ /**
+ * This value indicates a failure condition that retry may help to clear.
+ */
+ SCI_FAILURE_RETRY_REQUIRED,
+
+ /**
+ * This field indicates the retry limit was reached when a retry is attempted
+ */
+ SCI_FAILURE_RETRY_LIMIT_REACHED,
+
+ /**
+ * This member indicates the calling method was partly successful.
+ * Mostly, this status is used when a LUN_RESET issued to an expander attached
+ * STP device in READY NCQ substate needs to have RNC suspended/resumed
+ * before posting TC.
+ */
+ SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS,
+
+ /**
+ * This field indicates an illegal phy connection based on the routing attribute
+ * of both expander phy attached to each other.
+ */
+ SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION,
+
+ /**
+ * This field indicates a CONFIG ROUTE INFO command has a response with function result
+ * INDEX DOES NOT EXIST, usually means exceeding max route index.
+ */
+ SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX,
+
+ /**
+ * This value indicates that an unsupported PCI device ID has been
+ * specified. This indicates that attempts to invoke
+ * sci_library_allocate_controller() will fail.
+ */
+ SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
+
+};
+
+/**
+ * enum sci_io_status - This enumeration depicts all of the possible IO
+ * completion status values. Each value in this enumeration maps directly
+ * to a value in the enum sci_status enumeration. Please refer to that
+ * enumeration for detailed comments concerning what the status represents.
+ *
+ * Add the API to retrieve the SCU status from the core. Check to see that the
+ * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL
+ * - SCI_IO_FAILURE_INVALID_IO_TAG
+ */
+enum sci_io_status {
+ SCI_IO_SUCCESS = SCI_SUCCESS,
+ SCI_IO_FAILURE = SCI_FAILURE,
+ SCI_IO_SUCCESS_COMPLETE_BEFORE_START = SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+ SCI_IO_SUCCESS_IO_DONE_EARLY = SCI_SUCCESS_IO_DONE_EARLY,
+ SCI_IO_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
+ SCI_IO_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+ SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+ SCI_IO_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
+ SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+ SCI_IO_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
+ SCI_IO_FAILURE_REQUIRES_SCSI_ABORT = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+ SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+ SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+ SCI_IO_FAILURE_PROTOCOL_VIOLATION = SCI_FAILURE_PROTOCOL_VIOLATION,
+
+ SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+ SCI_IO_FAILURE_RETRY_REQUIRED = SCI_FAILURE_RETRY_REQUIRED,
+ SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED,
+ SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE
+};
+
+/**
+ * enum sci_task_status - This enumeration depicts all of the possible task
+ * completion status values. Each value in this enumeration maps directly
+ * to a value in the enum sci_status enumeration. Please refer to that
+ * enumeration for detailed comments concerning what the status represents.
+ *
+ * Check to see that the following status are properly handled:
+ */
+enum sci_task_status {
+ SCI_TASK_SUCCESS = SCI_SUCCESS,
+ SCI_TASK_FAILURE = SCI_FAILURE,
+ SCI_TASK_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
+ SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+ SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+ SCI_TASK_FAILURE_INVALID_TAG = SCI_FAILURE_INVALID_IO_TAG,
+ SCI_TASK_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
+ SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+ SCI_TASK_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
+ SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+ SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+ SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
+
+};
+
+/**
+ * sci_swab32_cpy - convert between scsi and scu-hardware byte format
+ * @dest: receive the 4-byte endian swapped version of src
+ * @src: word aligned source buffer
+ *
+ * scu hardware handles SSP/SMP control, response, and unidentified
+ * frames in "big endian dword" order. Regardless of host endian this
+ * is always a swab32()-per-dword conversion of the standard definition,
+ * i.e. single byte fields swapped and multi-byte fields in little-
+ * endian
+ */
+static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
+{
+ u32 *dest = _dest, *src = _src;
+
+ while (--word_cnt >= 0)
+ dest[word_cnt] = swab32(src[word_cnt]);
+}
+
+extern unsigned char no_outbound_task_to;
+extern u16 ssp_max_occ_to;
+extern u16 stp_max_occ_to;
+extern u16 ssp_inactive_to;
+extern u16 stp_inactive_to;
+extern unsigned char phy_gen;
+extern unsigned char max_concurr_spinup;
+
+irqreturn_t isci_msix_isr(int vec, void *data);
+irqreturn_t isci_intx_isr(int vec, void *data);
+irqreturn_t isci_error_isr(int vec, void *data);
+
+/*
+ * Each timer is associated with a cancellation flag that is set when
+ * del_timer() is called and checked in the timer callback function. This
+ * is needed since del_timer_sync() cannot be called with sci_lock held.
+ * For deinit however, del_timer_sync() is used without holding the lock.
+ */
+struct sci_timer {
+ struct timer_list timer;
+ bool cancel;
+};
+
+static inline
+void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
+{
+ tmr->timer.function = fn;
+ tmr->timer.data = (unsigned long) tmr;
+ tmr->cancel = 0;
+ init_timer(&tmr->timer);
+}
+
+static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
+{
+ tmr->cancel = 0;
+ mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
+}
+
+static inline void sci_del_timer(struct sci_timer *tmr)
+{
+ tmr->cancel = 1;
+ del_timer(&tmr->timer);
+}
+
+struct sci_base_state_machine {
+ const struct sci_base_state *state_table;
+ u32 initial_state_id;
+ u32 current_state_id;
+ u32 previous_state_id;
+};
+
+typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm);
+
+struct sci_base_state {
+ sci_state_transition_t enter_state; /* Called on state entry */
+ sci_state_transition_t exit_state; /* Called on state exit */
+};
+
+extern void sci_init_sm(struct sci_base_state_machine *sm,
+ const struct sci_base_state *state_table,
+ u32 initial_state);
+extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
+#endif /* __ISCI_H__ */
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
new file mode 100644
index 00000000000..79313a7a235
--- /dev/null
+++ b/drivers/scsi/isci/phy.c
@@ -0,0 +1,1312 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "host.h"
+#include "phy.h"
+#include "scu_event_codes.h"
+#include "probe_roms.h"
+
+/* Maximum arbitration wait time in micro-seconds */
+#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700)
+
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
+{
+ return iphy->max_negotiated_speed;
+}
+
+static enum sci_status
+sci_phy_transport_layer_initialization(struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *reg)
+{
+ u32 tl_control;
+
+ iphy->transport_layer_registers = reg;
+
+ writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
+ &iphy->transport_layer_registers->stp_rni);
+
+ /*
+ * Hardware team recommends that we enable the STP prefetch for all
+ * transports
+ */
+ tl_control = readl(&iphy->transport_layer_registers->control);
+ tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH);
+ writel(tl_control, &iphy->transport_layer_registers->control);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+sci_phy_link_layer_initialization(struct isci_phy *iphy,
+ struct scu_link_layer_registers __iomem *reg)
+{
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ int phy_idx = iphy->phy_index;
+ struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
+ struct sci_phy_oem_params *phy_oem =
+ &ihost->oem_parameters.phys[phy_idx];
+ u32 phy_configuration;
+ struct sci_phy_cap phy_cap;
+ u32 parity_check = 0;
+ u32 parity_count = 0;
+ u32 llctl, link_rate;
+ u32 clksm_value = 0;
+
+ iphy->link_layer_registers = reg;
+
+ /* Set our IDENTIFY frame data */
+ #define SCI_END_DEVICE 0x01
+
+ writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
+ SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
+ &iphy->link_layer_registers->transmit_identification);
+
+ /* Write the device SAS Address */
+ writel(0xFEDCBA98,
+ &iphy->link_layer_registers->sas_device_name_high);
+ writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+
+ /* Write the source SAS Address */
+ writel(phy_oem->sas_address.high,
+ &iphy->link_layer_registers->source_sas_address_high);
+ writel(phy_oem->sas_address.low,
+ &iphy->link_layer_registers->source_sas_address_low);
+
+ /* Clear and Set the PHY Identifier */
+ writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
+ writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
+ &iphy->link_layer_registers->identify_frame_phy_id);
+
+ /* Change the initial state of the phy configuration register */
+ phy_configuration =
+ readl(&iphy->link_layer_registers->phy_configuration);
+
+ /* Hold OOB state machine in reset */
+ phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(phy_configuration,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Configure the SNW capabilities */
+ phy_cap.all = 0;
+ phy_cap.start = 1;
+ phy_cap.gen3_no_ssc = 1;
+ phy_cap.gen2_no_ssc = 1;
+ phy_cap.gen1_no_ssc = 1;
+ if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+ phy_cap.gen3_ssc = 1;
+ phy_cap.gen2_ssc = 1;
+ phy_cap.gen1_ssc = 1;
+ }
+
+ /*
+ * The SAS specification indicates that the phy_capabilities that
+ * are transmitted shall have an even parity. Calculate the parity. */
+ parity_check = phy_cap.all;
+ while (parity_check != 0) {
+ if (parity_check & 0x1)
+ parity_count++;
+ parity_check >>= 1;
+ }
+
+ /*
+ * If parity indicates there are an odd number of bits set, then
+ * set the parity bit to 1 in the phy capabilities. */
+ if ((parity_count % 2) != 0)
+ phy_cap.parity = 1;
+
+ writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+
+ /* Set the enable spinup period but disable the ability to send
+ * notify enable spinup
+ */
+ writel(SCU_ENSPINUP_GEN_VAL(COUNT,
+ phy_user->notify_enable_spin_up_insertion_frequency),
+ &iphy->link_layer_registers->notify_enable_spinup_control);
+
+ /* Write the ALIGN Insertion Ferequency for connected phy and
+ * inpendent of connected state
+ */
+ clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED,
+ phy_user->in_connection_align_insertion_frequency);
+
+ clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
+ phy_user->align_insertion_frequency);
+
+ writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+
+ /* @todo Provide a way to write this register correctly */
+ writel(0x02108421,
+ &iphy->link_layer_registers->afe_lookup_table_control);
+
+ llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
+ (u8)ihost->user_parameters.no_outbound_task_timeout);
+
+ switch (phy_user->max_speed_generation) {
+ case SCIC_SDS_PARM_GEN3_SPEED:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3;
+ break;
+ case SCIC_SDS_PARM_GEN2_SPEED:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2;
+ break;
+ default:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1;
+ break;
+ }
+ llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
+ writel(llctl, &iphy->link_layer_registers->link_layer_control);
+
+ if (is_a2(ihost->pdev)) {
+ /* Program the max ARB time for the PHY to 700us so we inter-operate with
+ * the PMC expander which shuts down PHYs if the expander PHY generates too
+ * many breaks. This time value will guarantee that the initiator PHY will
+ * generate the break.
+ */
+ writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
+ &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+ }
+
+ /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
+ writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+
+ /* We can exit the initial state to the stopped state */
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+ return SCI_SUCCESS;
+}
+
+static void phy_sata_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: SCIC SDS Phy 0x%p did not receive signature fis before "
+ "timeout.\n",
+ __func__,
+ iphy);
+
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * This method returns the port currently containing this phy. If the phy is
+ * currently contained by the dummy port, then the phy is considered to not
+ * be part of a port.
+ * @sci_phy: This parameter specifies the phy for which to retrieve the
+ * containing port.
+ *
+ * This method returns a handle to a port that contains the supplied phy.
+ * NULL This value is returned if the phy is not part of a real
+ * port (i.e. it's contained in the dummy port). !NULL All other
+ * values indicate a handle/pointer to the port containing the phy.
+ */
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
+{
+ struct isci_port *iport = iphy->owning_port;
+
+ if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
+ return NULL;
+
+ return iphy->owning_port;
+}
+
+/**
+ * This method will assign a port to the phy object.
+ * @out]: iphy This parameter specifies the phy for which to assign a port
+ * object.
+ *
+ *
+ */
+void sci_phy_set_port(
+ struct isci_phy *iphy,
+ struct isci_port *iport)
+{
+ iphy->owning_port = iport;
+
+ if (iphy->bcn_received_while_port_unassigned) {
+ iphy->bcn_received_while_port_unassigned = false;
+ sci_port_broadcast_change_received(iphy->owning_port, iphy);
+ }
+}
+
+enum sci_status sci_phy_initialize(struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *tl,
+ struct scu_link_layer_registers __iomem *ll)
+{
+ /* Perfrom the initialization of the TL hardware */
+ sci_phy_transport_layer_initialization(iphy, tl);
+
+ /* Perofrm the initialization of the PE hardware */
+ sci_phy_link_layer_initialization(iphy, ll);
+
+ /* There is nothing that needs to be done in this state just
+ * transition to the stopped state
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * This method assigns the direct attached device ID for this phy.
+ *
+ * @iphy The phy for which the direct attached device id is to
+ * be assigned.
+ * @device_id The direct attached device ID to assign to the phy.
+ * This will either be the RNi for the device or an invalid RNi if there
+ * is no current device assigned to the phy.
+ */
+void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
+{
+ u32 tl_control;
+
+ writel(device_id, &iphy->transport_layer_registers->stp_rni);
+
+ /*
+ * The read should guarantee that the first write gets posted
+ * before the next write
+ */
+ tl_control = readl(&iphy->transport_layer_registers->control);
+ tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE);
+ writel(tl_control, &iphy->transport_layer_registers->control);
+}
+
+static void sci_phy_suspend(struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+void sci_phy_resume(struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+}
+
+void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+ sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
+ sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
+}
+
+void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+ struct sas_identify_frame *iaf;
+
+ iaf = &iphy->frame_rcvd.iaf;
+ memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
+}
+
+void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
+{
+ proto->all = readl(&iphy->link_layer_registers->transmit_identification);
+}
+
+enum sci_status sci_phy_start(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ if (state != SCI_PHY_STOPPED) {
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_stop(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_INITIAL:
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ case SCI_PHY_SUB_FINAL:
+ case SCI_PHY_READY:
+ break;
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_reset(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ if (state != SCI_PHY_READY) {
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_RESETTING);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_SAS_POWER: {
+ u32 enable_spinup;
+
+ enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+ enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE);
+ writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control);
+
+ /* Change state to the final state this substate machine has run to completion */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+ return SCI_SUCCESS;
+ }
+ case SCI_PHY_SUB_AWAIT_SATA_POWER: {
+ u32 scu_sas_pcfg_value;
+
+ /* Release the spinup hold state and reset the OOB state machine */
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value &=
+ ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Now restart the OOB operation */
+ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Change state to the final state this substate machine has run to completion */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
+
+ return SCI_SUCCESS;
+ }
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
+{
+ /* continue the link training for the phy as if it were a SAS PHY
+ * instead of a SATA PHY. This is done because the completion queue had a SAS
+ * PHY DETECTED event when the state machine was expecting a SATA PHY event.
+ */
+ u32 phy_control;
+
+ phy_control = readl(&iphy->link_layer_registers->phy_configuration);
+ phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
+ writel(phy_control,
+ &iphy->link_layer_registers->phy_configuration);
+
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
+
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
+}
+
+static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
+{
+ /* This method continues the link training for the phy as if it were a SATA PHY
+ * instead of a SAS PHY. This is done because the completion queue had a SATA
+ * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
+
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+}
+
+/**
+ * sci_phy_complete_link_training - perform processing common to
+ * all protocols upon completion of link training.
+ * @sci_phy: This parameter specifies the phy object for which link training
+ * has completed.
+ * @max_link_rate: This parameter specifies the maximum link rate to be
+ * associated with this phy.
+ * @next_state: This parameter specifies the next state for the phy's starting
+ * sub-state machine.
+ *
+ */
+static void sci_phy_complete_link_training(struct isci_phy *iphy,
+ enum sas_linkrate max_link_rate,
+ u32 next_state)
+{
+ iphy->max_negotiated_speed = max_link_rate;
+
+ sci_change_state(&iphy->sm, next_state);
+}
+
+enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ sci_phy_start_sas_link_training(iphy);
+ iphy->is_in_link_training = true;
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ sci_phy_start_sata_link_training(iphy);
+ iphy->is_in_link_training = true;
+ break;
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__,
+ event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /*
+ * Why is this being reported again by the controller?
+ * We would re-enter this state so just stay here */
+ break;
+ case SCU_EVENT_SAS_15:
+ case SCU_EVENT_SAS_15_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SAS_30:
+ case SCU_EVENT_SAS_30_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SAS_60:
+ case SCU_EVENT_SAS_60_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /*
+ * We were doing SAS PHY link training and received a SATA PHY event
+ * continue OOB/SN as if this were a SATA PHY */
+ sci_phy_start_sata_link_training(iphy);
+ break;
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+
+ return SCI_FAILURE;
+ break;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_IAF_UF:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* Backup the state machine */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* We were doing SAS PHY link training and received a
+ * SATA PHY event continue OOB/SN as if this were a
+ * SATA PHY
+ */
+ sci_phy_start_sata_link_training(iphy);
+ break;
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ case SCU_EVENT_LINK_FAILURE:
+ case SCU_EVENT_HARD_RESET_RECEIVED:
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received unexpected "
+ "event_code %x\n",
+ __func__,
+ event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* These events are received every 10ms and are
+ * expected while in this state
+ */
+ break;
+
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path.
+ */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* These events might be received since we dont know how many may be in
+ * the completion queue while waiting for power
+ */
+ break;
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+
+ /* We have received the SATA PHY notification change state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+ break;
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path.
+ */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__,
+ event_code);
+
+ return SCI_FAILURE;;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ /*
+ * The hardware reports multiple SATA PHY detected events
+ * ignore the extras */
+ break;
+ case SCU_EVENT_SATA_15:
+ case SCU_EVENT_SATA_15_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_SATA_30:
+ case SCU_EVENT_SATA_30_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_SATA_60:
+ case SCU_EVENT_SATA_60_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /*
+ * There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path. */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+
+ return SCI_FAILURE;
+ }
+
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ /* Backup the state machine */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+ break;
+
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__,
+ event_code);
+
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_READY:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_BROADCAST_CHANGE:
+ /* Broadcast change received. Notify the port. */
+ if (phy_get_non_dummy_port(iphy) != NULL)
+ sci_port_broadcast_change_received(iphy->owning_port, iphy);
+ else
+ iphy->bcn_received_while_port_unassigned = true;
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%sP SCIC PHY 0x%p ready state machine received "
+ "unexpected event_code %x\n",
+ __func__, iphy, event_code);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_RESETTING:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_HARD_RESET_TRANSMITTED:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: SCIC PHY 0x%p resetting state machine received "
+ "unexpected event_code %x\n",
+ __func__, iphy, event_code);
+
+ return SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+ return SCI_SUCCESS;
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ enum sci_status result;
+ unsigned long flags;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_IAF_UF: {
+ u32 *frame_words;
+ struct sas_identify_frame iaf;
+
+ result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_words);
+
+ if (result != SCI_SUCCESS)
+ return result;
+
+ sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32));
+ if (iaf.frame_type == 0) {
+ u32 state;
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf));
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+ if (iaf.smp_tport) {
+ /* We got the IAF for an expander PHY go to the final
+ * state since there are no power requirements for
+ * expander phys.
+ */
+ state = SCI_PHY_SUB_FINAL;
+ } else {
+ /* We got the IAF we can now go to the await spinup
+ * semaphore state
+ */
+ state = SCI_PHY_SUB_AWAIT_SAS_POWER;
+ }
+ sci_change_state(&iphy->sm, state);
+ result = SCI_SUCCESS;
+ } else
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected frame id %x\n",
+ __func__, frame_index);
+
+ sci_controller_release_frame(ihost, frame_index);
+ return result;
+ }
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
+ struct dev_to_host_fis *frame_header;
+ u32 *fis_frame_data;
+
+ result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (result != SCI_SUCCESS)
+ return result;
+
+ if ((frame_header->fis_type == FIS_REGD2H) &&
+ !(frame_header->status & ATA_BUSY)) {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&fis_frame_data);
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+ sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
+ frame_header,
+ fis_frame_data);
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ /* got IAF we can now go to the await spinup semaphore state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+ result = SCI_SUCCESS;
+ } else
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected frame id %x\n",
+ __func__, frame_index);
+
+ /* Regardless of the result we are done with this frame with it */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return result;
+ }
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+}
+
+static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* This is just an temporary state go off to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
+}
+
+static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ if (sci_port_link_detected(iphy->owning_port, iphy)) {
+
+ /*
+ * Clear the PE suspend condition so we can actually
+ * receive SIG FIS
+ * The hardware will not respond to the XRDY until the PE
+ * suspend condition is cleared.
+ */
+ sci_phy_resume(iphy);
+
+ sci_mod_timer(&iphy->sata_timer,
+ SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
+ } else
+ iphy->is_in_link_training = false;
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* State machine has run to completion so exit out and change
+ * the base state machine to the ready state
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_READY);
+}
+
+/**
+ *
+ * @sci_phy: This is the struct isci_phy object to stop.
+ *
+ * This method will stop the struct isci_phy object. This does not reset the
+ * protocol engine it just suspends it and places it in a state where it will
+ * not cause the end device to power up. none
+ */
+static void scu_link_layer_stop_protocol_engine(
+ struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+ u32 enable_spinup_value;
+
+ /* Suspend the protocol engine and place it in a sata spinup hold state */
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value |=
+ (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) |
+ SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD));
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Disable the notify enable spinup primitives */
+ enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+ enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE);
+ writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
+}
+
+/**
+ *
+ *
+ * This method will start the OOB/SN state machine for this struct isci_phy object.
+ */
+static void scu_link_layer_start_oob(
+ struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ scu_sas_pcfg_value &=
+ ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+}
+
+/**
+ *
+ *
+ * This method will transmit a hard reset request on the specified phy. The SCU
+ * hardware requires that we reset the OOB state machine and set the hard reset
+ * bit in the phy configuration register. We then must start OOB over with the
+ * hard reset bit set.
+ */
+static void scu_link_layer_tx_hard_reset(
+ struct isci_phy *iphy)
+{
+ u32 phy_configuration_value;
+
+ /*
+ * SAS Phys must wait for the HARD_RESET_TX event notification to transition
+ * to the starting state. */
+ phy_configuration_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ phy_configuration_value |=
+ (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
+ writel(phy_configuration_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Now take the OOB state machine out of reset */
+ phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(phy_configuration_value,
+ &iphy->link_layer_registers->phy_configuration);
+}
+
+static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * @todo We need to get to the controller to place this PE in a
+ * reset state
+ */
+ sci_del_timer(&iphy->sata_timer);
+
+ scu_link_layer_stop_protocol_engine(iphy);
+
+ if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
+ sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ scu_link_layer_stop_protocol_engine(iphy);
+ scu_link_layer_start_oob(iphy);
+
+ /* We don't know what kind of phy we are going to be just yet */
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+ iphy->bcn_received_while_port_unassigned = false;
+
+ if (iphy->sm.previous_state_id == SCI_PHY_READY)
+ sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
+}
+
+static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_phy_suspend(iphy);
+}
+
+static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* The phy is being reset, therefore deactivate it from the port. In
+ * the resetting state we don't notify the user regarding link up and
+ * link down notifications
+ */
+ sci_port_deactivate_phy(iphy->owning_port, iphy, false);
+
+ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ scu_link_layer_tx_hard_reset(iphy);
+ } else {
+ /* The SCU does not need to have a discrete reset state so
+ * just go back to the starting state.
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ }
+}
+
+static const struct sci_base_state sci_phy_state_table[] = {
+ [SCI_PHY_INITIAL] = { },
+ [SCI_PHY_STOPPED] = {
+ .enter_state = sci_phy_stopped_state_enter,
+ },
+ [SCI_PHY_STARTING] = {
+ .enter_state = sci_phy_starting_state_enter,
+ },
+ [SCI_PHY_SUB_INITIAL] = {
+ .enter_state = sci_phy_starting_initial_substate_enter,
+ },
+ [SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
+ [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
+ [SCI_PHY_SUB_AWAIT_IAF_UF] = { },
+ [SCI_PHY_SUB_AWAIT_SAS_POWER] = {
+ .enter_state = sci_phy_starting_await_sas_power_substate_enter,
+ .exit_state = sci_phy_starting_await_sas_power_substate_exit,
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_POWER] = {
+ .enter_state = sci_phy_starting_await_sata_power_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_power_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
+ .enter_state = sci_phy_starting_await_sata_phy_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_phy_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
+ .enter_state = sci_phy_starting_await_sata_speed_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_speed_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
+ .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
+ .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit
+ },
+ [SCI_PHY_SUB_FINAL] = {
+ .enter_state = sci_phy_starting_final_substate_enter,
+ },
+ [SCI_PHY_READY] = {
+ .enter_state = sci_phy_ready_state_enter,
+ .exit_state = sci_phy_ready_state_exit,
+ },
+ [SCI_PHY_RESETTING] = {
+ .enter_state = sci_phy_resetting_state_enter,
+ },
+ [SCI_PHY_FINAL] = { },
+};
+
+void sci_phy_construct(struct isci_phy *iphy,
+ struct isci_port *iport, u8 phy_index)
+{
+ sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
+
+ /* Copy the rest of the input data to our locals */
+ iphy->owning_port = iport;
+ iphy->phy_index = phy_index;
+ iphy->bcn_received_while_port_unassigned = false;
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+ iphy->link_layer_registers = NULL;
+ iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+ /* Create the SIGNATURE FIS Timeout timer for this phy */
+ sci_init_timer(&iphy->sata_timer, phy_sata_timeout);
+}
+
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
+{
+ struct sci_oem_params *oem = &ihost->oem_parameters;
+ u64 sci_sas_addr;
+ __be64 sas_addr;
+
+ sci_sas_addr = oem->phys[index].sas_address.high;
+ sci_sas_addr <<= 32;
+ sci_sas_addr |= oem->phys[index].sas_address.low;
+ sas_addr = cpu_to_be64(sci_sas_addr);
+ memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
+
+ iphy->isci_port = NULL;
+ iphy->sas_phy.enabled = 0;
+ iphy->sas_phy.id = index;
+ iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
+ iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd;
+ iphy->sas_phy.ha = &ihost->sas_ha;
+ iphy->sas_phy.lldd_phy = iphy;
+ iphy->sas_phy.enabled = 1;
+ iphy->sas_phy.class = SAS;
+ iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
+ iphy->sas_phy.tproto = 0;
+ iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
+ iphy->sas_phy.role = PHY_ROLE_INITIATOR;
+ iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
+ iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
+ memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd));
+}
+
+
+/**
+ * isci_phy_control() - This function is one of the SAS Domain Template
+ * functions. This is a phy management function.
+ * @phy: This parameter specifies the sphy being controlled.
+ * @func: This parameter specifies the phy control function being invoked.
+ * @buf: This parameter is specific to the phy function being invoked.
+ *
+ * status, zero indicates success.
+ */
+int isci_phy_control(struct asd_sas_phy *sas_phy,
+ enum phy_func func,
+ void *buf)
+{
+ int ret = 0;
+ struct isci_phy *iphy = sas_phy->lldd_phy;
+ struct isci_port *iport = iphy->isci_port;
+ struct isci_host *ihost = sas_phy->ha->lldd_ha;
+ unsigned long flags;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
+ __func__, sas_phy, func, buf, iphy, iport);
+
+ switch (func) {
+ case PHY_FUNC_DISABLE:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ sci_phy_stop(iphy);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ break;
+
+ case PHY_FUNC_LINK_RESET:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ sci_phy_stop(iphy);
+ sci_phy_start(iphy);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ break;
+
+ case PHY_FUNC_HARD_RESET:
+ if (!iport)
+ return -ENODEV;
+
+ /* Perform the port reset. */
+ ret = isci_port_perform_hard_reset(ihost, iport, iphy);
+
+ break;
+
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: phy %p; func %d NOT IMPLEMENTED!\n",
+ __func__, sas_phy, func);
+ ret = -ENOSYS;
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
new file mode 100644
index 00000000000..67699c8e321
--- /dev/null
+++ b/drivers/scsi/isci/phy.h
@@ -0,0 +1,504 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PHY_H_
+#define _ISCI_PHY_H_
+
+#include <scsi/sas.h>
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+
+/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS
+ * before restarting the starting state machine. Technically, the old parallel
+ * ATA specification required up to 30 seconds for a device to issue its
+ * signature FIS as a result of a soft reset. Now we see that devices respond
+ * generally within 15 seconds, but we'll use 25 for now.
+ */
+#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT 25000
+
+/* This is the timeout for the SATA OOB/SN because the hardware does not
+ * recognize a hot plug after OOB signal but before the SN signals. We need to
+ * make sure after a hotplug timeout if we have not received the speed event
+ * notification from the hardware that we restart the hardware OOB state
+ * machine.
+ */
+#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
+
+enum sci_phy_protocol {
+ SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
+ SCIC_SDS_PHY_PROTOCOL_SAS,
+ SCIC_SDS_PHY_PROTOCOL_SATA,
+ SCIC_SDS_MAX_PHY_PROTOCOLS
+};
+
+/**
+ * isci_phy - hba local phy infrastructure
+ * @sm:
+ * @protocol: attached device protocol
+ * @phy_index: physical index relative to the controller (0-3)
+ * @bcn_received_while_port_unassigned: bcn to report after port association
+ * @sata_timer: timeout SATA signature FIS arrival
+ */
+struct isci_phy {
+ struct sci_base_state_machine sm;
+ struct isci_port *owning_port;
+ enum sas_linkrate max_negotiated_speed;
+ enum sci_phy_protocol protocol;
+ u8 phy_index;
+ bool bcn_received_while_port_unassigned;
+ bool is_in_link_training;
+ struct sci_timer sata_timer;
+ struct scu_transport_layer_registers __iomem *transport_layer_registers;
+ struct scu_link_layer_registers __iomem *link_layer_registers;
+ struct asd_sas_phy sas_phy;
+ struct isci_port *isci_port;
+ u8 sas_addr[SAS_ADDR_SIZE];
+ union {
+ struct sas_identify_frame iaf;
+ struct dev_to_host_fis fis;
+ } frame_rcvd;
+};
+
+static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy)
+{
+ struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy);
+
+ return iphy;
+}
+
+struct sci_phy_cap {
+ union {
+ struct {
+ /*
+ * The SAS specification indicates the start bit shall
+ * always be set to
+ * 1. This implementation will have the start bit set
+ * to 0 if the PHY CAPABILITIES were either not
+ * received or speed negotiation failed.
+ */
+ u8 start:1;
+ u8 tx_ssc_type:1;
+ u8 res1:2;
+ u8 req_logical_linkrate:4;
+
+ u32 gen1_no_ssc:1;
+ u32 gen1_ssc:1;
+ u32 gen2_no_ssc:1;
+ u32 gen2_ssc:1;
+ u32 gen3_no_ssc:1;
+ u32 gen3_ssc:1;
+ u32 res2:17;
+ u32 parity:1;
+ };
+ u32 all;
+ };
+} __packed;
+
+/* this data structure reflects the link layer transmit identification reg */
+struct sci_phy_proto {
+ union {
+ struct {
+ u16 _r_a:1;
+ u16 smp_iport:1;
+ u16 stp_iport:1;
+ u16 ssp_iport:1;
+ u16 _r_b:4;
+ u16 _r_c:1;
+ u16 smp_tport:1;
+ u16 stp_tport:1;
+ u16 ssp_tport:1;
+ u16 _r_d:4;
+ };
+ u16 all;
+ };
+} __packed;
+
+
+/**
+ * struct sci_phy_properties - This structure defines the properties common to
+ * all phys that can be retrieved.
+ *
+ *
+ */
+struct sci_phy_properties {
+ /**
+ * This field specifies the port that currently contains the
+ * supplied phy. This field may be set to NULL
+ * if the phy is not currently contained in a port.
+ */
+ struct isci_port *iport;
+
+ /**
+ * This field specifies the link rate at which the phy is
+ * currently operating.
+ */
+ enum sas_linkrate negotiated_link_rate;
+
+ /**
+ * This field specifies the index of the phy in relation to other
+ * phys within the controller. This index is zero relative.
+ */
+ u8 index;
+};
+
+/**
+ * struct sci_sas_phy_properties - This structure defines the properties,
+ * specific to a SAS phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sas_phy_properties {
+ /**
+ * This field delineates the Identify Address Frame received
+ * from the remote end point.
+ */
+ struct sas_identify_frame rcvd_iaf;
+
+ /**
+ * This field delineates the Phy capabilities structure received
+ * from the remote end point.
+ */
+ struct sci_phy_cap rcvd_cap;
+
+};
+
+/**
+ * struct sci_sata_phy_properties - This structure defines the properties,
+ * specific to a SATA phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sata_phy_properties {
+ /**
+ * This field delineates the signature FIS received from the
+ * attached target.
+ */
+ struct dev_to_host_fis signature_fis;
+
+ /**
+ * This field specifies to the user if a port selector is connected
+ * on the specified phy.
+ */
+ bool is_port_selector_present;
+
+};
+
+/**
+ * enum sci_phy_counter_id - This enumeration depicts the various pieces of
+ * optional information that can be retrieved for a specific phy.
+ *
+ *
+ */
+enum sci_phy_counter_id {
+ /**
+ * This PHY information field tracks the number of frames received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME,
+
+ /**
+ * This PHY information field tracks the number of frames transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_FRAME,
+
+ /**
+ * This PHY information field tracks the number of DWORDs received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD,
+
+ /**
+ * This PHY information field tracks the number of DWORDs transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD,
+
+ /**
+ * This PHY information field tracks the number of times DWORD
+ * synchronization was lost.
+ */
+ SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR,
+
+ /**
+ * This PHY information field tracks the number of received DWORDs with
+ * running disparity errors.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR,
+
+ /**
+ * This PHY information field tracks the number of received frames with a
+ * CRC error (not including short or truncated frames).
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR,
+
+ /**
+ * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+ * primitives received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+ * primitives transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of times the inactivity
+ * timer for connections on the phy has been utilized.
+ */
+ SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED,
+
+ /**
+ * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+ * primitives received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+ * primitives transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of CREDIT BLOCKED
+ * primitives received.
+ * @note Depending on remote device implementation, credit blocks
+ * may occur regularly.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED,
+
+ /**
+ * This PHY information field contains the number of short frames
+ * received. A short frame is simply a frame smaller then what is
+ * allowed by either the SAS or SATA specification.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME,
+
+ /**
+ * This PHY information field contains the number of frames received after
+ * credit has been exhausted.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT,
+
+ /**
+ * This PHY information field contains the number of frames received after
+ * a DONE has been received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE,
+
+ /**
+ * This PHY information field contains the number of times the phy
+ * failed to achieve DWORD synchronization during speed negotiation.
+ */
+ SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
+};
+
+enum sci_phy_states {
+ /**
+ * Simply the initial state for the base domain state machine.
+ */
+ SCI_PHY_INITIAL,
+
+ /**
+ * This state indicates that the phy has successfully been stopped.
+ * In this state no new IO operations are permitted on this phy.
+ * This state is entered from the INITIAL state.
+ * This state is entered from the STARTING state.
+ * This state is entered from the READY state.
+ * This state is entered from the RESETTING state.
+ */
+ SCI_PHY_STOPPED,
+
+ /**
+ * This state indicates that the phy is in the process of becomming
+ * ready. In this state no new IO operations are permitted on this phy.
+ * This state is entered from the STOPPED state.
+ * This state is entered from the READY state.
+ * This state is entered from the RESETTING state.
+ */
+ SCI_PHY_STARTING,
+
+ /**
+ * Initial state
+ */
+ SCI_PHY_SUB_INITIAL,
+
+ /**
+ * Wait state for the hardware OSSP event type notification
+ */
+ SCI_PHY_SUB_AWAIT_OSSP_EN,
+
+ /**
+ * Wait state for the PHY speed notification
+ */
+ SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
+
+ /**
+ * Wait state for the IAF Unsolicited frame notification
+ */
+ SCI_PHY_SUB_AWAIT_IAF_UF,
+
+ /**
+ * Wait state for the request to consume power
+ */
+ SCI_PHY_SUB_AWAIT_SAS_POWER,
+
+ /**
+ * Wait state for request to consume power
+ */
+ SCI_PHY_SUB_AWAIT_SATA_POWER,
+
+ /**
+ * Wait state for the SATA PHY notification
+ */
+ SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
+
+ /**
+ * Wait for the SATA PHY speed notification
+ */
+ SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
+
+ /**
+ * Wait state for the SIGNATURE FIS unsolicited frame notification
+ */
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
+
+ /**
+ * Exit state for this state machine
+ */
+ SCI_PHY_SUB_FINAL,
+
+ /**
+ * This state indicates the the phy is now ready. Thus, the user
+ * is able to perform IO operations utilizing this phy as long as it
+ * is currently part of a valid port.
+ * This state is entered from the STARTING state.
+ */
+ SCI_PHY_READY,
+
+ /**
+ * This state indicates that the phy is in the process of being reset.
+ * In this state no new IO operations are permitted on this phy.
+ * This state is entered from the READY state.
+ */
+ SCI_PHY_RESETTING,
+
+ /**
+ * Simply the final state for the base phy state machine.
+ */
+ SCI_PHY_FINAL,
+};
+
+void sci_phy_construct(
+ struct isci_phy *iphy,
+ struct isci_port *iport,
+ u8 phy_index);
+
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
+
+void sci_phy_set_port(
+ struct isci_phy *iphy,
+ struct isci_port *iport);
+
+enum sci_status sci_phy_initialize(
+ struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *transport_layer_registers,
+ struct scu_link_layer_registers __iomem *link_layer_registers);
+
+enum sci_status sci_phy_start(
+ struct isci_phy *iphy);
+
+enum sci_status sci_phy_stop(
+ struct isci_phy *iphy);
+
+enum sci_status sci_phy_reset(
+ struct isci_phy *iphy);
+
+void sci_phy_resume(
+ struct isci_phy *iphy);
+
+void sci_phy_setup_transport(
+ struct isci_phy *iphy,
+ u32 device_id);
+
+enum sci_status sci_phy_event_handler(
+ struct isci_phy *iphy,
+ u32 event_code);
+
+enum sci_status sci_phy_frame_handler(
+ struct isci_phy *iphy,
+ u32 frame_index);
+
+enum sci_status sci_phy_consume_power_handler(
+ struct isci_phy *iphy);
+
+void sci_phy_get_sas_address(
+ struct isci_phy *iphy,
+ struct sci_sas_address *sas_address);
+
+void sci_phy_get_attached_sas_address(
+ struct isci_phy *iphy,
+ struct sci_sas_address *sas_address);
+
+struct sci_phy_proto;
+void sci_phy_get_protocols(
+ struct isci_phy *iphy,
+ struct sci_phy_proto *protocols);
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
+
+struct isci_host;
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index);
+int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf);
+
+#endif /* !defined(_ISCI_PHY_H_) */
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
new file mode 100644
index 00000000000..8f6f9b77e41
--- /dev/null
+++ b/drivers/scsi/isci/port.c
@@ -0,0 +1,1757 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "port.h"
+#include "request.h"
+
+#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
+#define SCU_DUMMY_INDEX (0xFFFF)
+
+static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
+{
+ unsigned long flags;
+
+ dev_dbg(&iport->isci_host->pdev->dev,
+ "%s: iport = %p, state = 0x%x\n",
+ __func__, iport, status);
+
+ /* XXX pointless lock */
+ spin_lock_irqsave(&iport->state_lock, flags);
+ iport->status = status;
+ spin_unlock_irqrestore(&iport->state_lock, flags);
+}
+
+static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
+{
+ u8 index;
+
+ proto->all = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct isci_phy *iphy = iport->phy_table[index];
+
+ if (!iphy)
+ continue;
+ sci_phy_get_protocols(iphy, proto);
+ }
+}
+
+static u32 sci_port_get_phys(struct isci_port *iport)
+{
+ u32 index;
+ u32 mask;
+
+ mask = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index])
+ mask |= (1 << index);
+
+ return mask;
+}
+
+/**
+ * sci_port_get_properties() - This method simply returns the properties
+ * regarding the port, such as: physical index, protocols, sas address, etc.
+ * @port: this parameter specifies the port for which to retrieve the physical
+ * index.
+ * @properties: This parameter specifies the properties structure into which to
+ * copy the requested information.
+ *
+ * Indicate if the user specified a valid port. SCI_SUCCESS This value is
+ * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
+ * value is returned if the specified port is not valid. When this value is
+ * returned, no data is copied to the properties output parameter.
+ */
+static enum sci_status sci_port_get_properties(struct isci_port *iport,
+ struct sci_port_properties *prop)
+{
+ if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
+ return SCI_FAILURE_INVALID_PORT;
+
+ prop->index = iport->logical_port_index;
+ prop->phy_mask = sci_port_get_phys(iport);
+ sci_port_get_sas_address(iport, &prop->local.sas_address);
+ sci_port_get_protocols(iport, &prop->local.protocols);
+ sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
+
+ return SCI_SUCCESS;
+}
+
+static void sci_port_bcn_enable(struct isci_port *iport)
+{
+ struct isci_phy *iphy;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+ iphy = iport->phy_table[i];
+ if (!iphy)
+ continue;
+ val = readl(&iphy->link_layer_registers->link_layer_control);
+ /* clear the bit by writing 1. */
+ writel(val, &iphy->link_layer_registers->link_layer_control);
+ }
+}
+
+/* called under sci_lock to stabilize phy:port associations */
+void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
+{
+ int i;
+
+ clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
+ wake_up(&ihost->eventq);
+
+ if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+ struct isci_phy *iphy = iport->phy_table[i];
+
+ if (!iphy)
+ continue;
+
+ ihost->sas_ha.notify_port_event(&iphy->sas_phy,
+ PORTE_BROADCAST_RCVD);
+ break;
+ }
+}
+
+static void isci_port_bc_change_received(struct isci_host *ihost,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
+ __func__, iphy, &iphy->sas_phy);
+ set_bit(IPORT_BCN_PENDING, &iport->flags);
+ atomic_inc(&iport->event);
+ wake_up(&ihost->eventq);
+ } else {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_phy = %p, sas_phy = %p\n",
+ __func__, iphy, &iphy->sas_phy);
+
+ ihost->sas_ha.notify_port_event(&iphy->sas_phy,
+ PORTE_BROADCAST_RCVD);
+ }
+ sci_port_bcn_enable(iport);
+}
+
+static void isci_port_link_up(struct isci_host *isci_host,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ unsigned long flags;
+ struct sci_port_properties properties;
+ unsigned long success = true;
+
+ BUG_ON(iphy->isci_port != NULL);
+
+ iphy->isci_port = iport;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n",
+ __func__, iport);
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ isci_port_change_state(iphy->isci_port, isci_starting);
+
+ sci_port_get_properties(iport, &properties);
+
+ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
+ u64 attached_sas_address;
+
+ iphy->sas_phy.oob_mode = SATA_OOB_MODE;
+ iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
+
+ /*
+ * For direct-attached SATA devices, the SCI core will
+ * automagically assign a SAS address to the end device
+ * for the purpose of creating a port. This SAS address
+ * will not be the same as assigned to the PHY and needs
+ * to be obtained from struct sci_port_properties properties.
+ */
+ attached_sas_address = properties.remote.sas_address.high;
+ attached_sas_address <<= 32;
+ attached_sas_address |= properties.remote.sas_address.low;
+ swab64s(&attached_sas_address);
+
+ memcpy(&iphy->sas_phy.attached_sas_addr,
+ &attached_sas_address, sizeof(attached_sas_address));
+ } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ iphy->sas_phy.oob_mode = SAS_OOB_MODE;
+ iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
+
+ /* Copy the attached SAS address from the IAF */
+ memcpy(iphy->sas_phy.attached_sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
+ } else {
+ dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
+ success = false;
+ }
+
+ iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
+
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ /* Notify libsas that we have an address frame, if indeed
+ * we've found an SSP, SMP, or STP target */
+ if (success)
+ isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
+ PORTE_BYTES_DMAED);
+}
+
+
+/**
+ * isci_port_link_down() - This function is called by the sci core when a link
+ * becomes inactive.
+ * @isci_host: This parameter specifies the isci host object.
+ * @phy: This parameter specifies the isci phy with the active link.
+ * @port: This parameter specifies the isci port with the active link.
+ *
+ */
+static void isci_port_link_down(struct isci_host *isci_host,
+ struct isci_phy *isci_phy,
+ struct isci_port *isci_port)
+{
+ struct isci_remote_device *isci_device;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n", __func__, isci_port);
+
+ if (isci_port) {
+
+ /* check to see if this is the last phy on this port. */
+ if (isci_phy->sas_phy.port &&
+ isci_phy->sas_phy.port->num_phys == 1) {
+ atomic_inc(&isci_port->event);
+ isci_port_bcn_enable(isci_host, isci_port);
+
+ /* change the state for all devices on this port. The
+ * next task sent to this device will be returned as
+ * SAS_TASK_UNDELIVERED, and the scsi mid layer will
+ * remove the target
+ */
+ list_for_each_entry(isci_device,
+ &isci_port->remote_dev_list,
+ node) {
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p\n",
+ __func__, isci_device);
+ set_bit(IDEV_GONE, &isci_device->flags);
+ }
+ }
+ isci_port_change_state(isci_port, isci_stopping);
+ }
+
+ /* Notify libsas of the borken link, this will trigger calls to our
+ * isci_port_deformed and isci_dev_gone functions.
+ */
+ sas_phy_disconnected(&isci_phy->sas_phy);
+ isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+
+ isci_phy->isci_port = NULL;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p - Done\n", __func__, isci_port);
+}
+
+
+/**
+ * isci_port_ready() - This function is called by the sci core when a link
+ * becomes ready.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the sci port with the active link.
+ *
+ */
+static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
+{
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n", __func__, isci_port);
+
+ complete_all(&isci_port->start_complete);
+ isci_port_change_state(isci_port, isci_ready);
+ return;
+}
+
+/**
+ * isci_port_not_ready() - This function is called by the sci core when a link
+ * is not ready. All remote devices on this link will be removed if they are
+ * in the stopping state.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the sci port with the active link.
+ *
+ */
+static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
+{
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n", __func__, isci_port);
+}
+
+static void isci_port_stop_complete(struct isci_host *ihost,
+ struct isci_port *iport,
+ enum sci_status completion_status)
+{
+ dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
+}
+
+/**
+ * isci_port_hard_reset_complete() - This function is called by the sci core
+ * when the hard reset complete notification has been received.
+ * @port: This parameter specifies the sci port with the active link.
+ * @completion_status: This parameter specifies the core status for the reset
+ * process.
+ *
+ */
+static void isci_port_hard_reset_complete(struct isci_port *isci_port,
+ enum sci_status completion_status)
+{
+ dev_dbg(&isci_port->isci_host->pdev->dev,
+ "%s: isci_port = %p, completion_status=%x\n",
+ __func__, isci_port, completion_status);
+
+ /* Save the status of the hard reset from the port. */
+ isci_port->hard_reset_status = completion_status;
+
+ complete_all(&isci_port->hard_reset_complete);
+}
+
+/* This method will return a true value if the specified phy can be assigned to
+ * this port The following is a list of phys for each port that are allowed: -
+ * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
+ * doesn't preclude all configurations. It merely ensures that a phy is part
+ * of the allowable set of phy identifiers for that port. For example, one
+ * could assign phy 3 to port 0 and no other phys. Please refer to
+ * sci_port_is_phy_mask_valid() for information regarding whether the
+ * phy_mask for a port can be supported. bool true if this is a valid phy
+ * assignment for the port false if this is not a valid phy assignment for the
+ * port
+ */
+bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ struct sci_user_parameters *user = &ihost->user_parameters;
+
+ /* Initialize to invalid value. */
+ u32 existing_phy_index = SCI_MAX_PHYS;
+ u32 index;
+
+ if ((iport->physical_port_index == 1) && (phy_index != 1))
+ return false;
+
+ if (iport->physical_port_index == 3 && phy_index != 3)
+ return false;
+
+ if (iport->physical_port_index == 2 &&
+ (phy_index == 0 || phy_index == 1))
+ return false;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index] && index != phy_index)
+ existing_phy_index = index;
+
+ /* Ensure that all of the phys in the port are capable of
+ * operating at the same maximum link rate.
+ */
+ if (existing_phy_index < SCI_MAX_PHYS &&
+ user->phys[phy_index].max_speed_generation !=
+ user->phys[existing_phy_index].max_speed_generation)
+ return false;
+
+ return true;
+}
+
+/**
+ *
+ * @sci_port: This is the port object for which to determine if the phy mask
+ * can be supported.
+ *
+ * This method will return a true value if the port's phy mask can be supported
+ * by the SCU. The following is a list of valid PHY mask configurations for
+ * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
+ * - Port 3 - [3] This method returns a boolean indication specifying if the
+ * phy mask can be supported. true if this is a valid phy assignment for the
+ * port false if this is not a valid phy assignment for the port
+ */
+static bool sci_port_is_phy_mask_valid(
+ struct isci_port *iport,
+ u32 phy_mask)
+{
+ if (iport->physical_port_index == 0) {
+ if (((phy_mask & 0x0F) == 0x0F)
+ || ((phy_mask & 0x03) == 0x03)
+ || ((phy_mask & 0x01) == 0x01)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 1) {
+ if (((phy_mask & 0x02) == 0x02)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 2) {
+ if (((phy_mask & 0x0C) == 0x0C)
+ || ((phy_mask & 0x04) == 0x04)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 3) {
+ if (((phy_mask & 0x08) == 0x08)
+ || (phy_mask == 0))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * This method retrieves a currently active (i.e. connected) phy contained in
+ * the port. Currently, the lowest order phy that is connected is returned.
+ * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
+ * returned if there are no currently active (i.e. connected to a remote end
+ * point) phys contained in the port. All other values specify a struct sci_phy
+ * object that is active in the port.
+ */
+static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
+{
+ u32 index;
+ struct isci_phy *iphy;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ /* Ensure that the phy is both part of the port and currently
+ * connected to the remote end-point.
+ */
+ iphy = iport->phy_table[index];
+ if (iphy && sci_port_active_phy(iport, iphy))
+ return iphy;
+ }
+
+ return NULL;
+}
+
+static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ /* Check to see if we can add this phy to a port
+ * that means that the phy is not part of a port and that the port does
+ * not already have a phy assinged to the phy index.
+ */
+ if (!iport->phy_table[iphy->phy_index] &&
+ !phy_get_non_dummy_port(iphy) &&
+ sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+ /* Phy is being added in the stopped state so we are in MPC mode
+ * make logical port index = physical port index
+ */
+ iport->logical_port_index = iport->physical_port_index;
+ iport->phy_table[iphy->phy_index] = iphy;
+ sci_phy_set_port(iphy, iport);
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE;
+}
+
+static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ /* Make sure that this phy is part of this port */
+ if (iport->phy_table[iphy->phy_index] == iphy &&
+ phy_get_non_dummy_port(iphy) == iport) {
+ struct isci_host *ihost = iport->owning_controller;
+
+ /* Yep it is assigned to this port so remove it */
+ sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
+ iport->phy_table[iphy->phy_index] = NULL;
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE;
+}
+
+void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+ u32 index;
+
+ sas->high = 0;
+ sas->low = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index])
+ sci_phy_get_sas_address(iport->phy_table[index], sas);
+}
+
+void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+ struct isci_phy *iphy;
+
+ /*
+ * Ensure that the phy is both part of the port and currently
+ * connected to the remote end-point.
+ */
+ iphy = sci_port_get_a_connected_phy(iport);
+ if (iphy) {
+ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
+ sci_phy_get_attached_sas_address(iphy, sas);
+ } else {
+ sci_phy_get_sas_address(iphy, sas);
+ sas->low += iphy->phy_index;
+ }
+ } else {
+ sas->high = 0;
+ sas->low = 0;
+ }
+}
+
+/**
+ * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
+ *
+ * @sci_port: logical port on which we need to create the remote node context
+ * @rni: remote node index for this remote node context.
+ *
+ * This routine will construct a dummy remote node context data structure
+ * This structure will be posted to the hardware to work around a scheduler
+ * error in the hardware.
+ */
+static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
+{
+ union scu_remote_node_context *rnc;
+
+ rnc = &iport->owning_controller->remote_node_context_table[rni];
+
+ memset(rnc, 0, sizeof(union scu_remote_node_context));
+
+ rnc->ssp.remote_sas_address_hi = 0;
+ rnc->ssp.remote_sas_address_lo = 0;
+
+ rnc->ssp.remote_node_index = rni;
+ rnc->ssp.remote_node_port_width = 1;
+ rnc->ssp.logical_port_index = iport->physical_port_index;
+
+ rnc->ssp.nexus_loss_timer_enable = false;
+ rnc->ssp.check_bit = false;
+ rnc->ssp.is_valid = true;
+ rnc->ssp.is_remote_node_context = true;
+ rnc->ssp.function_number = 0;
+ rnc->ssp.arbitration_wait_time = 0;
+}
+
+/*
+ * construct a dummy task context data structure. This
+ * structure will be posted to the hardwre to work around a scheduler error
+ * in the hardware.
+ */
+static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ struct scu_task_context *task_context;
+
+ task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ memset(task_context, 0, sizeof(struct scu_task_context));
+
+ task_context->initiator_request = 1;
+ task_context->connection_rate = 1;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+ task_context->task_index = ISCI_TAG_TCI(tag);
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+ task_context->remote_node_index = iport->reserved_rni;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->task_phase = 0x01;
+}
+
+static void sci_port_destroy_dummy_resources(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
+ isci_free_tag(ihost, iport->reserved_tag);
+
+ if (iport->reserved_rni != SCU_DUMMY_INDEX)
+ sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
+ 1, iport->reserved_rni);
+
+ iport->reserved_rni = SCU_DUMMY_INDEX;
+ iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
+{
+ u8 index;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->active_phy_mask & (1 << index))
+ sci_phy_setup_transport(iport->phy_table[index], device_id);
+ }
+}
+
+static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
+ bool do_notify_user)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+ sci_phy_resume(iphy);
+
+ iport->active_phy_mask |= 1 << iphy->phy_index;
+
+ sci_controller_clear_invalid_phy(ihost, iphy);
+
+ if (do_notify_user == true)
+ isci_port_link_up(ihost, iport, iphy);
+}
+
+void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
+ bool do_notify_user)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ iport->active_phy_mask &= ~(1 << iphy->phy_index);
+
+ iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+ /* Re-assign the phy back to the LP as if it were a narrow port */
+ writel(iphy->phy_index,
+ &iport->port_pe_configuration_register[iphy->phy_index]);
+
+ if (do_notify_user == true)
+ isci_port_link_down(ihost, iphy, iport);
+}
+
+static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * Check to see if we have alreay reported this link as bad and if
+ * not go ahead and tell the SCI_USER that we have discovered an
+ * invalid link.
+ */
+ if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
+ ihost->invalid_phy_mask |= 1 << iphy->phy_index;
+ dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
+ }
+}
+
+static bool is_port_ready_state(enum sci_port_states state)
+{
+ switch (state) {
+ case SCI_PORT_READY:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ case SCI_PORT_SUB_CONFIGURING:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* flag dummy rnc hanling when exiting a ready state */
+static void port_state_machine_change(struct isci_port *iport,
+ enum sci_port_states state)
+{
+ struct sci_base_state_machine *sm = &iport->sm;
+ enum sci_port_states old_state = sm->current_state_id;
+
+ if (is_port_ready_state(old_state) && !is_port_ready_state(state))
+ iport->ready_exit = true;
+
+ sci_change_state(sm, state);
+ iport->ready_exit = false;
+}
+
+/**
+ * sci_port_general_link_up_handler - phy can be assigned to port?
+ * @sci_port: sci_port object for which has a phy that has gone link up.
+ * @sci_phy: This is the struct isci_phy object that has gone link up.
+ * @do_notify_user: This parameter specifies whether to inform the user (via
+ * sci_port_link_up()) as to the fact that a new phy as become ready.
+ *
+ * Determine if this phy can be assigned to this
+ * port . If the phy is not a valid PHY for
+ * this port then the function will notify the user. A PHY can only be
+ * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
+ * the same port. none
+ */
+static void sci_port_general_link_up_handler(struct isci_port *iport,
+ struct isci_phy *iphy,
+ bool do_notify_user)
+{
+ struct sci_sas_address port_sas_address;
+ struct sci_sas_address phy_sas_address;
+
+ sci_port_get_attached_sas_address(iport, &port_sas_address);
+ sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
+
+ /* If the SAS address of the new phy matches the SAS address of
+ * other phys in the port OR this is the first phy in the port,
+ * then activate the phy and allow it to be used for operations
+ * in this port.
+ */
+ if ((phy_sas_address.high == port_sas_address.high &&
+ phy_sas_address.low == port_sas_address.low) ||
+ iport->active_phy_mask == 0) {
+ struct sci_base_state_machine *sm = &iport->sm;
+
+ sci_port_activate_phy(iport, iphy, do_notify_user);
+ if (sm->current_state_id == SCI_PORT_RESETTING)
+ port_state_machine_change(iport, SCI_PORT_READY);
+ } else
+ sci_port_invalid_link_up(iport, iphy);
+}
+
+
+
+/**
+ * This method returns false if the port only has a single phy object assigned.
+ * If there are no phys or more than one phy then the method will return
+ * true.
+ * @sci_port: The port for which the wide port condition is to be checked.
+ *
+ * bool true Is returned if this is a wide ported port. false Is returned if
+ * this is a narrow port.
+ */
+static bool sci_port_is_wide(struct isci_port *iport)
+{
+ u32 index;
+ u32 phy_count = 0;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->phy_table[index] != NULL) {
+ phy_count++;
+ }
+ }
+
+ return phy_count != 1;
+}
+
+/**
+ * This method is called by the PHY object when the link is detected. if the
+ * port wants the PHY to continue on to the link up state then the port
+ * layer must return true. If the port object returns false the phy object
+ * must halt its attempt to go link up.
+ * @sci_port: The port associated with the phy object.
+ * @sci_phy: The phy object that is trying to go link up.
+ *
+ * true if the phy object can continue to the link up condition. true Is
+ * returned if this phy can continue to the ready state. false Is returned if
+ * can not continue on to the ready state. This notification is in place for
+ * wide ports and direct attached phys. Since there are no wide ported SATA
+ * devices this could become an invalid port configuration.
+ */
+bool sci_port_link_detected(
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
+ (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
+ sci_port_is_wide(iport)) {
+ sci_port_invalid_link_up(iport, iphy);
+
+ return false;
+ }
+
+ return true;
+}
+
+static void port_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
+ struct isci_host *ihost = iport->owning_controller;
+ unsigned long flags;
+ u32 current_state;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ current_state = iport->sm.current_state_id;
+
+ if (current_state == SCI_PORT_RESETTING) {
+ /* if the port is still in the resetting state then the timeout
+ * fired before the reset completed.
+ */
+ port_state_machine_change(iport, SCI_PORT_FAILED);
+ } else if (current_state == SCI_PORT_STOPPED) {
+ /* if the port is stopped then the start request failed In this
+ * case stay in the stopped state.
+ */
+ dev_err(sciport_to_dev(iport),
+ "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
+ __func__,
+ iport);
+ } else if (current_state == SCI_PORT_STOPPING) {
+ /* if the port is still stopping then the stop has not completed */
+ isci_port_stop_complete(iport->owning_controller,
+ iport,
+ SCI_FAILURE_TIMEOUT);
+ } else {
+ /* The port is in the ready state and we have a timer
+ * reporting a timeout this should not happen.
+ */
+ dev_err(sciport_to_dev(iport),
+ "%s: SCIC Port 0x%p is processing a timeout operation "
+ "in state %d.\n", __func__, iport, current_state);
+ }
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/* --------------------------------------------------------------------------- */
+
+/**
+ * This function updates the hardwares VIIT entry for this port.
+ *
+ *
+ */
+static void sci_port_update_viit_entry(struct isci_port *iport)
+{
+ struct sci_sas_address sas_address;
+
+ sci_port_get_sas_address(iport, &sas_address);
+
+ writel(sas_address.high,
+ &iport->viit_registers->initiator_sas_address_hi);
+ writel(sas_address.low,
+ &iport->viit_registers->initiator_sas_address_lo);
+
+ /* This value get cleared just in case its not already cleared */
+ writel(0, &iport->viit_registers->reserved);
+
+ /* We are required to update the status register last */
+ writel(SCU_VIIT_ENTRY_ID_VIIT |
+ SCU_VIIT_IPPT_INITIATOR |
+ ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
+ SCU_VIIT_STATUS_ALL_VALID,
+ &iport->viit_registers->status);
+}
+
+enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
+{
+ u16 index;
+ struct isci_phy *iphy;
+ enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
+
+ /*
+ * Loop through all of the phys in this port and find the phy with the
+ * lowest maximum link rate. */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ iphy = iport->phy_table[index];
+ if (iphy && sci_port_active_phy(iport, iphy) &&
+ iphy->max_negotiated_speed < max_allowed_speed)
+ max_allowed_speed = iphy->max_negotiated_speed;
+ }
+
+ return max_allowed_speed;
+}
+
+static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+/**
+ * sci_port_post_dummy_request() - post dummy/workaround request
+ * @sci_port: port to post task
+ *
+ * Prevent the hardware scheduler from posting new requests to the front
+ * of the scheduler queue causing a starvation problem for currently
+ * ongoing requests.
+ *
+ */
+static void sci_port_post_dummy_request(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u16 tag = iport->reserved_tag;
+ struct scu_task_context *tc;
+ u32 command;
+
+ tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ tc->abort = 0;
+
+ command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+ ISCI_TAG_TCI(tag);
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ * This routine will abort the dummy request. This will alow the hardware to
+ * power down parts of the silicon to save power.
+ *
+ * @sci_port: The port on which the task must be aborted.
+ *
+ */
+static void sci_port_abort_dummy_request(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u16 tag = iport->reserved_tag;
+ struct scu_task_context *tc;
+ u32 command;
+
+ tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ tc->abort = 1;
+
+ command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
+ iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+ ISCI_TAG_TCI(tag);
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @sci_port: This is the struct isci_port object to resume.
+ *
+ * This method will resume the port task scheduler for this port object. none
+ */
+static void
+sci_port_resume_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_port_suspend_port_task_scheduler(iport);
+
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
+
+ if (iport->active_phy_mask != 0) {
+ /* At least one of the phys on the port is ready */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ }
+}
+
+static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
+{
+ u32 index;
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ isci_port_ready(ihost, iport);
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->phy_table[index]) {
+ writel(iport->physical_port_index,
+ &iport->port_pe_configuration_register[
+ iport->phy_table[index]->phy_index]);
+ }
+ }
+
+ sci_port_update_viit_entry(iport);
+
+ sci_port_resume_port_task_scheduler(iport);
+
+ /*
+ * Post the dummy task for the port so the hardware can schedule
+ * io correctly
+ */
+ sci_port_post_dummy_request(iport);
+}
+
+static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u8 phys_index = iport->physical_port_index;
+ union scu_remote_node_context *rnc;
+ u16 rni = iport->reserved_rni;
+ u32 command;
+
+ rnc = &ihost->remote_node_context_table[rni];
+
+ rnc->ssp.is_valid = false;
+
+ /* ensure the preceding tc abort request has reached the
+ * controller and give it ample time to act before posting the rnc
+ * invalidate
+ */
+ readl(&ihost->smu_registers->interrupt_status); /* flush */
+ udelay(10);
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @object: This is the object which is cast to a struct isci_port object.
+ *
+ * This method will perform the actions required by the struct isci_port on
+ * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
+ * the port not ready and suspends the port task scheduler. none
+ */
+static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * Kill the dummy task for this port if it has not yet posted
+ * the hardware will treat this as a NOP and just return abort
+ * complete.
+ */
+ sci_port_abort_dummy_request(iport);
+
+ isci_port_not_ready(ihost, iport);
+
+ if (iport->ready_exit)
+ sci_port_invalidate_dummy_remote_node(iport);
+}
+
+static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iport->active_phy_mask == 0) {
+ isci_port_not_ready(ihost, iport);
+
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_WAITING);
+ } else if (iport->started_request_count == 0)
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+}
+
+static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_port_suspend_port_task_scheduler(iport);
+ if (iport->ready_exit)
+ sci_port_invalidate_dummy_remote_node(iport);
+}
+
+enum sci_status sci_port_start(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ enum sci_status status = SCI_SUCCESS;
+ enum sci_port_states state;
+ u32 phy_mask;
+
+ state = iport->sm.current_state_id;
+ if (state != SCI_PORT_STOPPED) {
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ if (iport->assigned_device_count > 0) {
+ /* TODO This is a start failure operation because
+ * there are still devices assigned to this port.
+ * There must be no devices assigned to a port on a
+ * start operation.
+ */
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ if (iport->reserved_rni == SCU_DUMMY_INDEX) {
+ u16 rni = sci_remote_node_table_allocate_remote_node(
+ &ihost->available_remote_nodes, 1);
+
+ if (rni != SCU_DUMMY_INDEX)
+ sci_port_construct_dummy_rnc(iport, rni);
+ else
+ status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ iport->reserved_rni = rni;
+ }
+
+ if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+ u16 tag;
+
+ tag = isci_alloc_tag(ihost);
+ if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+ status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ else
+ sci_port_construct_dummy_task(iport, tag);
+ iport->reserved_tag = tag;
+ }
+
+ if (status == SCI_SUCCESS) {
+ phy_mask = sci_port_get_phys(iport);
+
+ /*
+ * There are one or more phys assigned to this port. Make sure
+ * the port's phy mask is in fact legal and supported by the
+ * silicon.
+ */
+ if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
+ port_state_machine_change(iport,
+ SCI_PORT_READY);
+
+ return SCI_SUCCESS;
+ }
+ status = SCI_FAILURE;
+ }
+
+ if (status != SCI_SUCCESS)
+ sci_port_destroy_dummy_resources(iport);
+
+ return status;
+}
+
+enum sci_status sci_port_stop(struct isci_port *iport)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ case SCI_PORT_SUB_CONFIGURING:
+ case SCI_PORT_RESETTING:
+ port_state_machine_change(iport,
+ SCI_PORT_STOPPING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
+{
+ enum sci_status status = SCI_FAILURE_INVALID_PHY;
+ struct isci_phy *iphy = NULL;
+ enum sci_port_states state;
+ u32 phy_index;
+
+ state = iport->sm.current_state_id;
+ if (state != SCI_PORT_SUB_OPERATIONAL) {
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ /* Select a phy on which we can send the hard reset request. */
+ for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
+ iphy = iport->phy_table[phy_index];
+ if (iphy && !sci_port_active_phy(iport, iphy)) {
+ /*
+ * We found a phy but it is not ready select
+ * different phy
+ */
+ iphy = NULL;
+ }
+ }
+
+ /* If we have a phy then go ahead and start the reset procedure */
+ if (!iphy)
+ return status;
+ status = sci_phy_reset(iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_mod_timer(&iport->timer, timeout);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
+
+ port_state_machine_change(iport, SCI_PORT_RESETTING);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_port_add_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will add a PHY to the selected port. This method returns an
+ * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
+ * status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_add_phy(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_status status;
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED: {
+ struct sci_sas_address port_sas_address;
+
+ /* Read the port assigned SAS Address if there is one */
+ sci_port_get_sas_address(iport, &port_sas_address);
+
+ if (port_sas_address.high != 0 && port_sas_address.low != 0) {
+ struct sci_sas_address phy_sas_address;
+
+ /* Make sure that the PHY SAS Address matches the SAS Address
+ * for this port
+ */
+ sci_phy_get_sas_address(iphy, &phy_sas_address);
+
+ if (port_sas_address.high != phy_sas_address.high ||
+ port_sas_address.low != phy_sas_address.low)
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ return sci_port_set_phy(iport, iphy);
+ }
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ status = sci_port_set_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_port_general_link_up_handler(iport, iphy, true);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+ port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
+
+ return status;
+ case SCI_PORT_SUB_CONFIGURING:
+ status = sci_port_set_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_port_general_link_up_handler(iport, iphy, true);
+
+ /* Re-enter the configuring state since this may be the last phy in
+ * the port.
+ */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+/**
+ * sci_port_remove_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will remove the PHY from the selected PORT. This method returns
+ * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
+ * other status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_remove_phy(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_status status;
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ return sci_port_clear_phy(iport, iphy);
+ case SCI_PORT_SUB_OPERATIONAL:
+ status = sci_port_clear_phy(iport, iphy);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_port_deactivate_phy(iport, iphy, true);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_CONFIGURING:
+ status = sci_port_clear_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_port_deactivate_phy(iport, iphy, true);
+
+ /* Re-enter the configuring state since this may be the last phy in
+ * the port
+ */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_WAITING:
+ /* Since this is the first phy going link up for the port we
+ * can just enable it and continue
+ */
+ sci_port_activate_phy(iport, iphy, true);
+
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_general_link_up_handler(iport, iphy, true);
+ return SCI_SUCCESS;
+ case SCI_PORT_RESETTING:
+ /* TODO We should make sure that the phy that has gone
+ * link up is the same one on which we sent the reset. It is
+ * possible that the phy on which we sent the reset is not the
+ * one that has gone link up and we want to make sure that
+ * phy being reset comes back. Consider the case where a
+ * reset is sent but before the hardware processes the reset it
+ * get a link up on the port because of a hot plug event.
+ * because of the reset request this phy will go link down
+ * almost immediately.
+ */
+
+ /* In the resetting state we don't notify the user regarding
+ * link up and link down notifications.
+ */
+ sci_port_general_link_up_handler(iport, iphy, false);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_link_down(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_deactivate_phy(iport, iphy, true);
+
+ /* If there are no active phys left in the port, then
+ * transition the port to the WAITING state until such time
+ * as a phy goes link up
+ */
+ if (iport->active_phy_mask == 0)
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_WAITING);
+ return SCI_SUCCESS;
+ case SCI_PORT_RESETTING:
+ /* In the resetting state we don't notify the user regarding
+ * link up and link down notifications. */
+ sci_port_deactivate_phy(iport, iphy, false);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_start_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_WAITING:
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_PORT_SUB_OPERATIONAL:
+ iport->started_request_count++;
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_complete_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_PORT_STOPPING:
+ sci_port_decrement_request_count(iport);
+
+ if (iport->started_request_count == 0)
+ port_state_machine_change(iport,
+ SCI_PORT_STOPPED);
+ break;
+ case SCI_PORT_READY:
+ case SCI_PORT_RESETTING:
+ case SCI_PORT_FAILED:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_decrement_request_count(iport);
+ break;
+ case SCI_PORT_SUB_CONFIGURING:
+ sci_port_decrement_request_count(iport);
+ if (iport->started_request_count == 0) {
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ }
+ break;
+ }
+ return SCI_SUCCESS;
+}
+
+static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ /* enable the port task scheduler in a suspended state */
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value &=
+ ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_post_dummy_remote_node(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u8 phys_index = iport->physical_port_index;
+ union scu_remote_node_context *rnc;
+ u16 rni = iport->reserved_rni;
+ u32 command;
+
+ rnc = &ihost->remote_node_context_table[rni];
+ rnc->ssp.is_valid = true;
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+
+ /* ensure hardware has seen the post rnc command and give it
+ * ample time to act before sending the suspend
+ */
+ readl(&ihost->smu_registers->interrupt_status); /* flush */
+ udelay(10);
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+}
+
+static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
+ /*
+ * If we enter this state becasuse of a request to stop
+ * the port then we want to disable the hardwares port
+ * task scheduler. */
+ sci_port_disable_port_task_scheduler(iport);
+ }
+}
+
+static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ /* Enable and suspend the port task scheduler */
+ sci_port_enable_port_task_scheduler(iport);
+}
+
+static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+ u32 prev_state;
+
+ prev_state = iport->sm.previous_state_id;
+ if (prev_state == SCI_PORT_RESETTING)
+ isci_port_hard_reset_complete(iport, SCI_SUCCESS);
+ else
+ isci_port_not_ready(ihost, iport);
+
+ /* Post and suspend the dummy remote node context for this port. */
+ sci_port_post_dummy_remote_node(iport);
+
+ /* Start the ready substate machine */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_WAITING);
+}
+
+static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_del_timer(&iport->timer);
+}
+
+static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_del_timer(&iport->timer);
+
+ sci_port_destroy_dummy_resources(iport);
+}
+
+static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
+}
+
+/* --------------------------------------------------------------------------- */
+
+static const struct sci_base_state sci_port_state_table[] = {
+ [SCI_PORT_STOPPED] = {
+ .enter_state = sci_port_stopped_state_enter,
+ .exit_state = sci_port_stopped_state_exit
+ },
+ [SCI_PORT_STOPPING] = {
+ .exit_state = sci_port_stopping_state_exit
+ },
+ [SCI_PORT_READY] = {
+ .enter_state = sci_port_ready_state_enter,
+ },
+ [SCI_PORT_SUB_WAITING] = {
+ .enter_state = sci_port_ready_substate_waiting_enter,
+ },
+ [SCI_PORT_SUB_OPERATIONAL] = {
+ .enter_state = sci_port_ready_substate_operational_enter,
+ .exit_state = sci_port_ready_substate_operational_exit
+ },
+ [SCI_PORT_SUB_CONFIGURING] = {
+ .enter_state = sci_port_ready_substate_configuring_enter,
+ .exit_state = sci_port_ready_substate_configuring_exit
+ },
+ [SCI_PORT_RESETTING] = {
+ .exit_state = sci_port_resetting_state_exit
+ },
+ [SCI_PORT_FAILED] = {
+ .enter_state = sci_port_failed_state_enter,
+ }
+};
+
+void sci_port_construct(struct isci_port *iport, u8 index,
+ struct isci_host *ihost)
+{
+ sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
+
+ iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
+ iport->physical_port_index = index;
+ iport->active_phy_mask = 0;
+ iport->ready_exit = false;
+
+ iport->owning_controller = ihost;
+
+ iport->started_request_count = 0;
+ iport->assigned_device_count = 0;
+
+ iport->reserved_rni = SCU_DUMMY_INDEX;
+ iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+
+ sci_init_timer(&iport->timer, port_timeout);
+
+ iport->port_task_scheduler_registers = NULL;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ iport->phy_table[index] = NULL;
+}
+
+void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
+{
+ INIT_LIST_HEAD(&iport->remote_dev_list);
+ INIT_LIST_HEAD(&iport->domain_dev_list);
+ spin_lock_init(&iport->state_lock);
+ init_completion(&iport->start_complete);
+ iport->isci_host = ihost;
+ isci_port_change_state(iport, isci_freed);
+ atomic_set(&iport->event, 0);
+}
+
+/**
+ * isci_port_get_state() - This function gets the status of the port object.
+ * @isci_port: This parameter points to the isci_port object
+ *
+ * status of the object as a isci_status enum.
+ */
+enum isci_status isci_port_get_state(
+ struct isci_port *isci_port)
+{
+ return isci_port->status;
+}
+
+void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ /* notify the user. */
+ isci_port_bc_change_received(ihost, iport, iphy);
+}
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ unsigned long flags;
+ enum sci_status status;
+ int idx, ret = TMF_RESP_FUNC_COMPLETE;
+
+ dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
+ __func__, iport);
+
+ init_completion(&iport->hard_reset_complete);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+ status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (status == SCI_SUCCESS) {
+ wait_for_completion(&iport->hard_reset_complete);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iport = %p; hard reset completion\n",
+ __func__, iport);
+
+ if (iport->hard_reset_status != SCI_SUCCESS)
+ ret = TMF_RESP_FUNC_FAILED;
+ } else {
+ ret = TMF_RESP_FUNC_FAILED;
+
+ dev_err(&ihost->pdev->dev,
+ "%s: iport = %p; sci_port_hard_reset call"
+ " failed 0x%x\n",
+ __func__, iport, status);
+
+ }
+
+ /* If the hard reset for the port has failed, consider this
+ * the same as link failures on all phys in the port.
+ */
+ if (ret != TMF_RESP_FUNC_COMPLETE) {
+
+ dev_err(&ihost->pdev->dev,
+ "%s: iport = %p; hard reset failed "
+ "(0x%x) - driving explicit link fail for all phys\n",
+ __func__, iport, iport->hard_reset_status);
+
+ /* Down all phys in the port. */
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
+ struct isci_phy *iphy = iport->phy_table[idx];
+
+ if (!iphy)
+ continue;
+ sci_phy_stop(iphy);
+ sci_phy_start(iphy);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
+ return ret;
+}
+
+/**
+ * isci_port_deformed() - This function is called by libsas when a port becomes
+ * inactive.
+ * @phy: This parameter specifies the libsas phy with the inactive port.
+ *
+ */
+void isci_port_deformed(struct asd_sas_phy *phy)
+{
+ pr_debug("%s: sas_phy = %p\n", __func__, phy);
+}
+
+/**
+ * isci_port_formed() - This function is called by libsas when a port becomes
+ * active.
+ * @phy: This parameter specifies the libsas phy with the active port.
+ *
+ */
+void isci_port_formed(struct asd_sas_phy *phy)
+{
+ pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
+}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
new file mode 100644
index 00000000000..b50ecd4e8f9
--- /dev/null
+++ b/drivers/scsi/isci/port.h
@@ -0,0 +1,306 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_PORT_H_
+#define _ISCI_PORT_H_
+
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+#include "phy.h"
+
+#define SCIC_SDS_DUMMY_PORT 0xFF
+
+struct isci_phy;
+struct isci_host;
+
+enum isci_status {
+ isci_freed = 0x00,
+ isci_starting = 0x01,
+ isci_ready = 0x02,
+ isci_ready_for_io = 0x03,
+ isci_stopping = 0x04,
+ isci_stopped = 0x05,
+};
+
+/**
+ * struct isci_port - isci direct attached sas port object
+ * @event: counts bcns and port stop events (for bcn filtering)
+ * @ready_exit: several states constitute 'ready'. When exiting ready we
+ * need to take extra port-teardown actions that are
+ * skipped when exiting to another 'ready' state.
+ * @logical_port_index: software port index
+ * @physical_port_index: hardware port index
+ * @active_phy_mask: identifies phy members
+ * @reserved_tag:
+ * @reserved_rni: reserver for port task scheduler workaround
+ * @started_request_count: reference count for outstanding commands
+ * @not_ready_reason: set during state transitions and notified
+ * @timer: timeout start/stop operations
+ */
+struct isci_port {
+ enum isci_status status;
+ #define IPORT_BCN_BLOCKED 0
+ #define IPORT_BCN_PENDING 1
+ unsigned long flags;
+ atomic_t event;
+ struct isci_host *isci_host;
+ struct asd_sas_port sas_port;
+ struct list_head remote_dev_list;
+ spinlock_t state_lock;
+ struct list_head domain_dev_list;
+ struct completion start_complete;
+ struct completion hard_reset_complete;
+ enum sci_status hard_reset_status;
+ struct sci_base_state_machine sm;
+ bool ready_exit;
+ u8 logical_port_index;
+ u8 physical_port_index;
+ u8 active_phy_mask;
+ u16 reserved_rni;
+ u16 reserved_tag;
+ u32 started_request_count;
+ u32 assigned_device_count;
+ u32 not_ready_reason;
+ struct isci_phy *phy_table[SCI_MAX_PHYS];
+ struct isci_host *owning_controller;
+ struct sci_timer timer;
+ struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers;
+ /* XXX rework: only one register, no need to replicate per-port */
+ u32 __iomem *port_pe_configuration_register;
+ struct scu_viit_entry __iomem *viit_registers;
+};
+
+enum sci_port_not_ready_reason_code {
+ SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
+ SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
+ SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
+ SCIC_PORT_NOT_READY_RECONFIGURING,
+
+ SCIC_PORT_NOT_READY_REASON_CODE_MAX
+};
+
+struct sci_port_end_point_properties {
+ struct sci_sas_address sas_address;
+ struct sci_phy_proto protocols;
+};
+
+struct sci_port_properties {
+ u32 index;
+ struct sci_port_end_point_properties local;
+ struct sci_port_end_point_properties remote;
+ u32 phy_mask;
+};
+
+/**
+ * enum sci_port_states - This enumeration depicts all the states for the
+ * common port state machine.
+ *
+ *
+ */
+enum sci_port_states {
+ /**
+ * This state indicates that the port has successfully been stopped.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the STOPPING state.
+ */
+ SCI_PORT_STOPPED,
+
+ /**
+ * This state indicates that the port is in the process of stopping.
+ * In this state no new IO operations are permitted, but existing IO
+ * operations are allowed to complete.
+ * This state is entered from the READY state.
+ */
+ SCI_PORT_STOPPING,
+
+ /**
+ * This state indicates the port is now ready. Thus, the user is
+ * able to perform IO operations on this port.
+ * This state is entered from the STARTING state.
+ */
+ SCI_PORT_READY,
+
+ /**
+ * The substate where the port is started and ready but has no
+ * active phys.
+ */
+ SCI_PORT_SUB_WAITING,
+
+ /**
+ * The substate where the port is started and ready and there is
+ * at least one phy operational.
+ */
+ SCI_PORT_SUB_OPERATIONAL,
+
+ /**
+ * The substate where the port is started and there was an
+ * add/remove phy event. This state is only used in Automatic
+ * Port Configuration Mode (APC)
+ */
+ SCI_PORT_SUB_CONFIGURING,
+
+ /**
+ * This state indicates the port is in the process of performing a hard
+ * reset. Thus, the user is unable to perform IO operations on this
+ * port.
+ * This state is entered from the READY state.
+ */
+ SCI_PORT_RESETTING,
+
+ /**
+ * This state indicates the port has failed a reset request. This state
+ * is entered when a port reset request times out.
+ * This state is entered from the RESETTING state.
+ */
+ SCI_PORT_FAILED,
+
+
+};
+
+static inline void sci_port_decrement_request_count(struct isci_port *iport)
+{
+ if (WARN_ONCE(iport->started_request_count == 0,
+ "%s: tried to decrement started_request_count past 0!?",
+ __func__))
+ /* pass */;
+ else
+ iport->started_request_count--;
+}
+
+#define sci_port_active_phy(port, phy) \
+ (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
+
+void sci_port_construct(
+ struct isci_port *iport,
+ u8 port_index,
+ struct isci_host *ihost);
+
+enum sci_status sci_port_start(struct isci_port *iport);
+enum sci_status sci_port_stop(struct isci_port *iport);
+
+enum sci_status sci_port_add_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+enum sci_status sci_port_remove_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+void sci_port_setup_transports(
+ struct isci_port *iport,
+ u32 device_id);
+
+void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
+
+void sci_port_deactivate_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy,
+ bool do_notify_user);
+
+bool sci_port_link_detected(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+ struct isci_phy *iphy);
+enum sci_status sci_port_link_down(struct isci_port *iport,
+ struct isci_phy *iphy);
+
+struct isci_request;
+struct isci_remote_device;
+enum sci_status sci_port_start_io(
+ struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_port_complete_io(
+ struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sas_linkrate sci_port_get_max_allowed_speed(
+ struct isci_port *iport);
+
+void sci_port_broadcast_change_received(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+bool sci_port_is_valid_phy_assignment(
+ struct isci_port *iport,
+ u32 phy_index);
+
+void sci_port_get_sas_address(
+ struct isci_port *iport,
+ struct sci_sas_address *sas_address);
+
+void sci_port_get_attached_sas_address(
+ struct isci_port *iport,
+ struct sci_sas_address *sas_address);
+
+enum isci_status isci_port_get_state(
+ struct isci_port *isci_port);
+
+void isci_port_formed(struct asd_sas_phy *);
+void isci_port_deformed(struct asd_sas_phy *);
+
+void isci_port_init(
+ struct isci_port *port,
+ struct isci_host *host,
+ int index);
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy);
+#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
new file mode 100644
index 00000000000..486b113c634
--- /dev/null
+++ b/drivers/scsi/isci/port_config.c
@@ -0,0 +1,754 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+
+#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
+#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100)
+
+enum SCIC_SDS_APC_ACTIVITY {
+ SCIC_SDS_APC_SKIP_PHY,
+ SCIC_SDS_APC_ADD_PHY,
+ SCIC_SDS_APC_START_TIMER,
+
+ SCIC_SDS_APC_ACTIVITY_MAX
+};
+
+/*
+ * ******************************************************************************
+ * General port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ * @address_one: A SAS Address to be compared.
+ * @address_two: A SAS Address to be compared.
+ *
+ * Compare the two SAS Address and if SAS Address One is greater than SAS
+ * Address Two then return > 0 else if SAS Address One is less than SAS Address
+ * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0
+ * > y where x is returned for Address One > Address Two y is returned for
+ * Address One < Address Two 0 is returned ofr Address One = Address Two
+ */
+static s32 sci_sas_address_compare(
+ struct sci_sas_address address_one,
+ struct sci_sas_address address_two)
+{
+ if (address_one.high > address_two.high) {
+ return 1;
+ } else if (address_one.high < address_two.high) {
+ return -1;
+ } else if (address_one.low > address_two.low) {
+ return 1;
+ } else if (address_one.low < address_two.low) {
+ return -1;
+ }
+
+ /* The two SAS Address must be identical */
+ return 0;
+}
+
+/**
+ *
+ * @controller: The controller object used for the port search.
+ * @phy: The phy object to match.
+ *
+ * This routine will find a matching port for the phy. This means that the
+ * port and phy both have the same broadcast sas address and same received sas
+ * address. The port address or the NULL if there is no matching
+ * port. port address if the port can be found to match the phy.
+ * NULL if there is no matching port for the phy.
+ */
+static struct isci_port *sci_port_configuration_agent_find_port(
+ struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ u8 i;
+ struct sci_sas_address port_sas_address;
+ struct sci_sas_address port_attached_device_address;
+ struct sci_sas_address phy_sas_address;
+ struct sci_sas_address phy_attached_device_address;
+
+ /*
+ * Since this phy can be a member of a wide port check to see if one or
+ * more phys match the sent and received SAS address as this phy in which
+ * case it should participate in the same port.
+ */
+ sci_phy_get_sas_address(iphy, &phy_sas_address);
+ sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
+
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+
+ sci_port_get_sas_address(iport, &port_sas_address);
+ sci_port_get_attached_sas_address(iport, &port_attached_device_address);
+
+ if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
+ sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
+ return iport;
+ }
+
+ return NULL;
+}
+
+/**
+ *
+ * @controller: This is the controller object that contains the port agent
+ * @port_agent: This is the port configruation agent for the controller.
+ *
+ * This routine will validate the port configuration is correct for the SCU
+ * hardware. The SCU hardware allows for port configurations as follows. LP0
+ * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2,
+ * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for
+ * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
+ * the port configuration is not valid for this port configuration agent.
+ */
+static enum sci_status sci_port_configuration_agent_validate_ports(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ struct sci_sas_address first_address;
+ struct sci_sas_address second_address;
+
+ /*
+ * Sanity check the max ranges for all the phys the max index
+ * is always equal to the port range index */
+ if (port_agent->phy_valid_port_range[0].max_index != 0 ||
+ port_agent->phy_valid_port_range[1].max_index != 1 ||
+ port_agent->phy_valid_port_range[2].max_index != 2 ||
+ port_agent->phy_valid_port_range[3].max_index != 3)
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+
+ /*
+ * This is a request to configure a single x4 port or at least attempt
+ * to make all the phys into a single port */
+ if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+ port_agent->phy_valid_port_range[1].min_index == 0 &&
+ port_agent->phy_valid_port_range[2].min_index == 0 &&
+ port_agent->phy_valid_port_range[3].min_index == 0)
+ return SCI_SUCCESS;
+
+ /*
+ * This is a degenerate case where phy 1 and phy 2 are assigned
+ * to the same port this is explicitly disallowed by the hardware
+ * unless they are part of the same x4 port and this condition was
+ * already checked above. */
+ if (port_agent->phy_valid_port_range[2].min_index == 1) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /*
+ * PE0 and PE3 can never have the same SAS Address unless they
+ * are part of the same x4 wide port and we have already checked
+ * for this condition. */
+ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /*
+ * PE0 and PE1 are configured into a 2x1 ports make sure that the
+ * SAS Address for PE0 and PE2 are different since they can not be
+ * part of the same port. */
+ if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+ port_agent->phy_valid_port_range[1].min_index == 1) {
+ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[2], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ }
+
+ /*
+ * PE2 and PE3 are configured into a 2x1 ports make sure that the
+ * SAS Address for PE1 and PE3 are different since they can not be
+ * part of the same port. */
+ if (port_agent->phy_valid_port_range[2].min_index == 2 &&
+ port_agent->phy_valid_port_range[3].min_index == 3) {
+ sci_phy_get_sas_address(&ihost->phys[1], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ }
+
+ return SCI_SUCCESS;
+}
+
+/*
+ * ******************************************************************************
+ * Manual port configuration agent routines
+ * ****************************************************************************** */
+
+/* verify all of the phys in the same port are using the same SAS address */
+static enum sci_status
+sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ u32 phy_mask;
+ u32 assigned_phy_mask;
+ struct sci_sas_address sas_address;
+ struct sci_sas_address phy_assigned_address;
+ u8 port_index;
+ u8 phy_index;
+
+ assigned_phy_mask = 0;
+ sas_address.high = 0;
+ sas_address.low = 0;
+
+ for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
+ phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
+
+ if (!phy_mask)
+ continue;
+ /*
+ * Make sure that one or more of the phys were not already assinged to
+ * a different port. */
+ if ((phy_mask & ~assigned_phy_mask) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /* Find the starting phy index for this round through the loop */
+ for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+ if ((phy_mask & (1 << phy_index)) == 0)
+ continue;
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &sas_address);
+
+ /*
+ * The phy_index can be used as the starting point for the
+ * port range since the hardware starts all logical ports
+ * the same as the PE index. */
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+ if (phy_index != port_index) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ break;
+ }
+
+ /*
+ * See how many additional phys are being added to this logical port.
+ * Note: We have not moved the current phy_index so we will actually
+ * compare the startting phy with itself.
+ * This is expected and required to add the phy to the port. */
+ while (phy_index < SCI_MAX_PHYS) {
+ if ((phy_mask & (1 << phy_index)) == 0)
+ continue;
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &phy_assigned_address);
+
+ if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
+ /*
+ * The phy mask specified that this phy is part of the same port
+ * as the starting phy and it is not so fail this configuration */
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+ sci_port_add_phy(&ihost->ports[port_index],
+ &ihost->phys[phy_index]);
+
+ assigned_phy_mask |= (1 << phy_index);
+ }
+
+ phy_index++;
+ }
+
+ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void mpc_agent_timeout(unsigned long data)
+{
+ u8 index;
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_port_configuration_agent *port_agent;
+ struct isci_host *ihost;
+ unsigned long flags;
+ u16 configure_phy_mask;
+
+ port_agent = container_of(tmr, typeof(*port_agent), timer);
+ ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ port_agent->timer_pending = false;
+
+ /* Find the mask of phys that are reported read but as yet unconfigured into a port */
+ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct isci_phy *iphy = &ihost->phys[index];
+
+ if (configure_phy_mask & (1 << index)) {
+ port_agent->link_up_handler(ihost, port_agent,
+ phy_get_non_dummy_port(iphy),
+ iphy);
+ }
+ }
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static void sci_mpc_agent_link_up(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ /* If the port is NULL then the phy was not assigned to a port.
+ * This is because the phy was not given the same SAS Address as
+ * the other PHYs in the port.
+ */
+ if (!iport)
+ return;
+
+ port_agent->phy_ready_mask |= (1 << iphy->phy_index);
+ sci_port_link_up(iport, iphy);
+ if ((iport->active_phy_mask & (1 << iphy->phy_index)))
+ port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ * notification.
+ * @port: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL. The port is an invalid
+ * handle only if the phy was never port of this port. This happens when
+ * the phy is not broadcasting the same SAS address as the other phys in the
+ * assigned port.
+ * @phy: This is the phy object which has gone link down.
+ *
+ * This function handles the manual port configuration link down notifications.
+ * Since all ports and phys are associated at initialization time we just turn
+ * around and notifiy the port object of the link down event. If this PHY is
+ * not associated with a port there is no action taken. Is it possible to get a
+ * link down notification from a phy that has no assocoated port?
+ */
+static void sci_mpc_agent_link_down(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ if (iport != NULL) {
+ /*
+ * If we can form a new port from the remainder of the phys
+ * then we want to start the timer to allow the SCI User to
+ * cleanup old devices and rediscover the port before
+ * rebuilding the port with the phys that remain in the ready
+ * state.
+ */
+ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+ port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+
+ /*
+ * Check to see if there are more phys waiting to be
+ * configured into a port. If there are allow the SCI User
+ * to tear down this port, if necessary, and then reconstruct
+ * the port after the timeout.
+ */
+ if ((port_agent->phy_configured_mask == 0x0000) &&
+ (port_agent->phy_ready_mask != 0x0000) &&
+ !port_agent->timer_pending) {
+ port_agent->timer_pending = true;
+
+ sci_mod_timer(&port_agent->timer,
+ SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
+ }
+
+ sci_port_link_down(iport, iphy);
+ }
+}
+
+/* verify phys are assigned a valid SAS address for automatic port
+ * configuration mode.
+ */
+static enum sci_status
+sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ u8 phy_index;
+ u8 port_index;
+ struct sci_sas_address sas_address;
+ struct sci_sas_address phy_assigned_address;
+
+ phy_index = 0;
+
+ while (phy_index < SCI_MAX_PHYS) {
+ port_index = phy_index;
+
+ /* Get the assigned SAS Address for the first PHY on the controller. */
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &sas_address);
+
+ while (++phy_index < SCI_MAX_PHYS) {
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &phy_assigned_address);
+
+ /* Verify each of the SAS address are all the same for every PHY */
+ if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) {
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+ } else {
+ port_agent->phy_valid_port_range[phy_index].min_index = phy_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+ break;
+ }
+ }
+ }
+
+ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void sci_apc_agent_configure_ports(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_phy *iphy,
+ bool start_timer)
+{
+ u8 port_index;
+ enum sci_status status;
+ struct isci_port *iport;
+ enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
+
+ iport = sci_port_configuration_agent_find_port(ihost, iphy);
+
+ if (iport) {
+ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ else
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ } else {
+ /*
+ * There is no matching Port for this PHY so lets search through the
+ * Ports and see if we can add the PHY to its own port or maybe start
+ * the timer and wait to see if a wider port can be made.
+ *
+ * Note the break when we reach the condition of the port id == phy id */
+ for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index;
+ port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index;
+ port_index++) {
+
+ iport = &ihost->ports[port_index];
+
+ /* First we must make sure that this PHY can be added to this Port. */
+ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+ /*
+ * Port contains a PHY with a greater PHY ID than the current
+ * PHY that has gone link up. This phy can not be part of any
+ * port so skip it and move on. */
+ if (iport->active_phy_mask > (1 << iphy->phy_index)) {
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ break;
+ }
+
+ /*
+ * We have reached the end of our Port list and have not found
+ * any reason why we should not either add the PHY to the port
+ * or wait for more phys to become active. */
+ if (iport->physical_port_index == iphy->phy_index) {
+ /*
+ * The Port either has no active PHYs.
+ * Consider that if the port had any active PHYs we would have
+ * or active PHYs with
+ * a lower PHY Id than this PHY. */
+ if (apc_activity != SCIC_SDS_APC_START_TIMER) {
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ }
+
+ break;
+ }
+
+ /*
+ * The current Port has no active PHYs and this PHY could be part
+ * of this Port. Since we dont know as yet setup to start the
+ * timer and see if there is a better configuration. */
+ if (iport->active_phy_mask == 0) {
+ apc_activity = SCIC_SDS_APC_START_TIMER;
+ }
+ } else if (iport->active_phy_mask != 0) {
+ /*
+ * The Port has an active phy and the current Phy can not
+ * participate in this port so skip the PHY and see if
+ * there is a better configuration. */
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ }
+ }
+ }
+
+ /*
+ * Check to see if the start timer operations should instead map to an
+ * add phy operation. This is caused because we have been waiting to
+ * add a phy to a port but could not becuase the automatic port
+ * configuration engine had a choice of possible ports for the phy.
+ * Since we have gone through a timeout we are going to restrict the
+ * choice to the smallest possible port. */
+ if (
+ (start_timer == false)
+ && (apc_activity == SCIC_SDS_APC_START_TIMER)
+ ) {
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ }
+
+ switch (apc_activity) {
+ case SCIC_SDS_APC_ADD_PHY:
+ status = sci_port_add_phy(iport, iphy);
+
+ if (status == SCI_SUCCESS) {
+ port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+ }
+ break;
+
+ case SCIC_SDS_APC_START_TIMER:
+ /*
+ * This can occur for either a link down event, or a link
+ * up event where we cannot yet tell the port to which a
+ * phy belongs.
+ */
+ if (port_agent->timer_pending)
+ sci_del_timer(&port_agent->timer);
+
+ port_agent->timer_pending = true;
+ sci_mod_timer(&port_agent->timer,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+ break;
+
+ case SCIC_SDS_APC_SKIP_PHY:
+ default:
+ /* do nothing the PHY can not be made part of a port at this time. */
+ break;
+ }
+}
+
+/**
+ * sci_apc_agent_link_up - handle apc link up events
+ * @scic: This is the controller object that receives the link up
+ * notification.
+ * @sci_port: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL.
+ * @sci_phy: This is the phy object which has gone link up.
+ *
+ * This method handles the automatic port configuration for link up
+ * notifications. Is it possible to get a link down notification from a phy
+ * that has no assocoated port?
+ */
+static void sci_apc_agent_link_up(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ u8 phy_index = iphy->phy_index;
+
+ if (!iport) {
+ /* the phy is not the part of this port */
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+ } else {
+ /* the phy is already the part of the port */
+ u32 port_state = iport->sm.current_state_id;
+
+ /* if the PORT'S state is resetting then the link up is from
+ * port hard reset in this case, we need to tell the port
+ * that link up is recieved
+ */
+ BUG_ON(port_state != SCI_PORT_RESETTING);
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_port_link_up(iport, iphy);
+ }
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ * notification.
+ * @iport: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL.
+ * @iphy: This is the phy object which has gone link down.
+ *
+ * This method handles the automatic port configuration link down
+ * notifications. not associated with a port there is no action taken. Is it
+ * possible to get a link down notification from a phy that has no assocoated
+ * port?
+ */
+static void sci_apc_agent_link_down(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+
+ if (!iport)
+ return;
+ if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
+ enum sci_status status;
+
+ status = sci_port_remove_phy(iport, iphy);
+
+ if (status == SCI_SUCCESS)
+ port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+ }
+}
+
+/* configure the phys into ports when the timer fires */
+static void apc_agent_timeout(unsigned long data)
+{
+ u32 index;
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_port_configuration_agent *port_agent;
+ struct isci_host *ihost;
+ unsigned long flags;
+ u16 configure_phy_mask;
+
+ port_agent = container_of(tmr, typeof(*port_agent), timer);
+ ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ port_agent->timer_pending = false;
+
+ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+ if (!configure_phy_mask)
+ return;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if ((configure_phy_mask & (1 << index)) == 0)
+ continue;
+
+ sci_apc_agent_configure_ports(ihost, port_agent,
+ &ihost->phys[index], false);
+ }
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/*
+ * ******************************************************************************
+ * Public port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ *
+ * This method will construct the port configuration agent for operation. This
+ * call is universal for both manual port configuration and automatic port
+ * configuration modes.
+ */
+void sci_port_configuration_agent_construct(
+ struct sci_port_configuration_agent *port_agent)
+{
+ u32 index;
+
+ port_agent->phy_configured_mask = 0x00;
+ port_agent->phy_ready_mask = 0x00;
+
+ port_agent->link_up_handler = NULL;
+ port_agent->link_down_handler = NULL;
+
+ port_agent->timer_pending = false;
+
+ for (index = 0; index < SCI_MAX_PORTS; index++) {
+ port_agent->phy_valid_port_range[index].min_index = 0;
+ port_agent->phy_valid_port_range[index].max_index = 0;
+ }
+}
+
+enum sci_status sci_port_configuration_agent_initialize(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ enum sci_status status;
+ enum sci_port_configuration_mode mode;
+
+ mode = ihost->oem_parameters.controller.mode_type;
+
+ if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ status = sci_mpc_agent_validate_phy_configuration(
+ ihost, port_agent);
+
+ port_agent->link_up_handler = sci_mpc_agent_link_up;
+ port_agent->link_down_handler = sci_mpc_agent_link_down;
+
+ sci_init_timer(&port_agent->timer, mpc_agent_timeout);
+ } else {
+ status = sci_apc_agent_validate_phy_configuration(
+ ihost, port_agent);
+
+ port_agent->link_up_handler = sci_apc_agent_link_up;
+ port_agent->link_down_handler = sci_apc_agent_link_down;
+
+ sci_init_timer(&port_agent->timer, apc_agent_timeout);
+ }
+
+ return status;
+}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
new file mode 100644
index 00000000000..b5f4341de24
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.c
@@ -0,0 +1,243 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ */
+
+/* probe_roms - scan for oem parameters */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/uaccess.h>
+#include <linux/efi.h>
+#include <asm/probe_roms.h>
+
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static efi_char16_t isci_efivar_name[] = {
+ 'R', 's', 't', 'S', 'c', 'u', 'O'
+};
+
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
+{
+ void __iomem *oprom = pci_map_biosrom(pdev);
+ struct isci_orom *rom = NULL;
+ size_t len, i;
+ int j;
+ char oem_sig[4];
+ struct isci_oem_hdr oem_hdr;
+ u8 *tmp, sum;
+
+ if (!oprom)
+ return NULL;
+
+ len = pci_biosrom_size(pdev);
+ rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
+ if (!rom) {
+ dev_warn(&pdev->dev,
+ "Unable to allocate memory for orom\n");
+ return NULL;
+ }
+
+ for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) {
+ memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE);
+
+ /* we think we found the OEM table */
+ if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) {
+ size_t copy_len;
+
+ memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr));
+
+ copy_len = min(oem_hdr.len - sizeof(oem_hdr),
+ sizeof(*rom));
+
+ memcpy_fromio(rom,
+ oprom + i + sizeof(oem_hdr),
+ copy_len);
+
+ /* calculate checksum */
+ tmp = (u8 *)&oem_hdr;
+ for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++)
+ sum += *tmp;
+
+ tmp = (u8 *)rom;
+ for (j = 0; j < sizeof(*rom); j++, tmp++)
+ sum += *tmp;
+
+ if (sum != 0) {
+ dev_warn(&pdev->dev,
+ "OEM table checksum failed\n");
+ continue;
+ }
+
+ /* keep going if that's not the oem param table */
+ if (memcmp(rom->hdr.signature,
+ ISCI_ROM_SIG,
+ ISCI_ROM_SIG_SIZE) != 0)
+ continue;
+
+ dev_info(&pdev->dev,
+ "OEM parameter table found in OROM\n");
+ break;
+ }
+ }
+
+ if (i >= len) {
+ dev_err(&pdev->dev, "oprom parse error\n");
+ devm_kfree(&pdev->dev, rom);
+ rom = NULL;
+ }
+ pci_unmap_biosrom(oprom);
+
+ return rom;
+}
+
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
+ struct isci_orom *orom, int scu_index)
+{
+ /* check for valid inputs */
+ if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
+ scu_index > orom->hdr.num_elements || !oem)
+ return -EINVAL;
+
+ *oem = orom->ctrl[scu_index];
+ return 0;
+}
+
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
+{
+ struct isci_orom *orom = NULL, *data;
+ int i, j;
+
+ if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0)
+ return NULL;
+
+ if (fw->size < sizeof(*orom))
+ goto out;
+
+ data = (struct isci_orom *)fw->data;
+
+ if (strncmp(ISCI_ROM_SIG, data->hdr.signature,
+ strlen(ISCI_ROM_SIG)) != 0)
+ goto out;
+
+ orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL);
+ if (!orom)
+ goto out;
+
+ memcpy(orom, fw->data, fw->size);
+
+ if (is_c0(pdev))
+ goto out;
+
+ /*
+ * deprecated: override default amp_control for pre-preproduction
+ * silicon revisions
+ */
+ for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++)
+ for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) {
+ orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03;
+ }
+ out:
+ release_firmware(fw);
+
+ return orom;
+}
+
+static struct efi *get_efi(void)
+{
+#ifdef CONFIG_EFI
+ return &efi;
+#else
+ return NULL;
+#endif
+}
+
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev)
+{
+ efi_status_t status;
+ struct isci_orom *rom;
+ struct isci_oem_hdr *oem_hdr;
+ u8 *tmp, sum;
+ int j;
+ unsigned long data_len;
+ u8 *efi_data;
+ u32 efi_attrib = 0;
+
+ data_len = 1024;
+ efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL);
+ if (!efi_data) {
+ dev_warn(&pdev->dev,
+ "Unable to allocate memory for EFI data\n");
+ return NULL;
+ }
+
+ rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr));
+
+ if (get_efi())
+ status = get_efi()->get_variable(isci_efivar_name,
+ &ISCI_EFI_VENDOR_GUID,
+ &efi_attrib,
+ &data_len,
+ efi_data);
+ else
+ status = EFI_NOT_FOUND;
+
+ if (status != EFI_SUCCESS) {
+ dev_warn(&pdev->dev,
+ "Unable to obtain EFI var data for OEM parms\n");
+ return NULL;
+ }
+
+ oem_hdr = (struct isci_oem_hdr *)efi_data;
+
+ if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) {
+ dev_warn(&pdev->dev,
+ "Invalid OEM header signature\n");
+ return NULL;
+ }
+
+ /* calculate checksum */
+ tmp = (u8 *)efi_data;
+ for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++)
+ sum += *tmp;
+
+ if (sum != 0) {
+ dev_warn(&pdev->dev,
+ "OEM table checksum failed\n");
+ return NULL;
+ }
+
+ if (memcmp(rom->hdr.signature,
+ ISCI_ROM_SIG,
+ ISCI_ROM_SIG_SIZE) != 0) {
+ dev_warn(&pdev->dev,
+ "Invalid OEM table signature\n");
+ return NULL;
+ }
+
+ return rom;
+}
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
new file mode 100644
index 00000000000..dc007e692f4
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.h
@@ -0,0 +1,249 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PROBE_ROMS_H_
+#define _ISCI_PROBE_ROMS_H_
+
+#ifdef __KERNEL__
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/efi.h>
+#include "isci.h"
+
+#define SCIC_SDS_PARM_NO_SPEED 0
+
+/* generation 1 (i.e. 1.5 Gb/s) */
+#define SCIC_SDS_PARM_GEN1_SPEED 1
+
+/* generation 2 (i.e. 3.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN2_SPEED 2
+
+/* generation 3 (i.e. 6.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN3_SPEED 3
+#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
+
+/* parameters that can be set by module parameters */
+struct sci_user_parameters {
+ struct sci_phy_user_params {
+ /**
+ * This field specifies the NOTIFY (ENABLE SPIN UP) primitive
+ * insertion frequency for this phy index.
+ */
+ u32 notify_enable_spin_up_insertion_frequency;
+
+ /**
+ * This method specifies the number of transmitted DWORDs within which
+ * to transmit a single ALIGN primitive. This value applies regardless
+ * of what type of device is attached or connection state. A value of
+ * 0 indicates that no ALIGN primitives will be inserted.
+ */
+ u16 align_insertion_frequency;
+
+ /**
+ * This method specifies the number of transmitted DWORDs within which
+ * to transmit 2 ALIGN primitives. This applies for SAS connections
+ * only. A minimum value of 3 is required for this field.
+ */
+ u16 in_connection_align_insertion_frequency;
+
+ /**
+ * This field indicates the maximum speed generation to be utilized
+ * by phys in the supplied port.
+ * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s).
+ * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s).
+ * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s).
+ */
+ u8 max_speed_generation;
+
+ } phys[SCI_MAX_PHYS];
+
+ /**
+ * This field specifies the maximum number of direct attached devices
+ * that can have power supplied to them simultaneously.
+ */
+ u8 max_number_concurrent_device_spin_up;
+
+ /**
+ * This field specifies the number of seconds to allow a phy to consume
+ * power before yielding to another phy.
+ *
+ */
+ u8 phy_spin_up_delay_interval;
+
+ /**
+ * These timer values specifies how long a link will remain open with no
+ * activity in increments of a microsecond, it can be in increments of
+ * 100 microseconds if the upper most bit is set.
+ *
+ */
+ u16 stp_inactivity_timeout;
+ u16 ssp_inactivity_timeout;
+
+ /**
+ * These timer values specifies how long a link will remain open in increments
+ * of 100 microseconds.
+ *
+ */
+ u16 stp_max_occupancy_timeout;
+ u16 ssp_max_occupancy_timeout;
+
+ /**
+ * This timer value specifies how long a link will remain open with no
+ * outbound traffic in increments of a microsecond.
+ *
+ */
+ u8 no_outbound_task_timeout;
+
+};
+
+#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
+#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
+#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
+
+struct sci_oem_params;
+int sci_oem_parameters_validate(struct sci_oem_params *oem);
+
+struct isci_orom;
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
+ struct isci_orom *orom, int scu_index);
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
+
+struct isci_oem_hdr {
+ u8 sig[4];
+ u8 rev_major;
+ u8 rev_minor;
+ u16 len;
+ u8 checksum;
+ u8 reserved1;
+ u16 reserved2;
+} __attribute__ ((packed));
+
+#else
+#define SCI_MAX_PORTS 4
+#define SCI_MAX_PHYS 4
+#define SCI_MAX_CONTROLLERS 2
+#endif
+
+#define ISCI_FW_NAME "isci/isci_firmware.bin"
+
+#define ROMSIGNATURE 0xaa55
+
+#define ISCI_OEM_SIG "$OEM"
+#define ISCI_OEM_SIG_SIZE 4
+#define ISCI_ROM_SIG "ISCUOEMB"
+#define ISCI_ROM_SIG_SIZE 8
+
+#define ISCI_EFI_VENDOR_GUID \
+ EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \
+ 0x1a, 0x04, 0xc6)
+#define ISCI_EFI_VAR_NAME "RstScuO"
+
+/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
+ * defined by the OEM configuration parameters providing no PHY_MASK parameters
+ * for any PORT. i.e. There are no phys assigned to any of the ports at start.
+ * MPC Manual PORT configuration mode is defined by the OEM configuration
+ * parameters providing a PHY_MASK value for any PORT. It is assumed that any
+ * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned.
+ * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
+ * being assigned is sufficient to declare manual PORT configuration.
+ */
+enum sci_port_configuration_mode {
+ SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
+ SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
+};
+
+struct sci_bios_oem_param_block_hdr {
+ uint8_t signature[ISCI_ROM_SIG_SIZE];
+ uint16_t total_block_length;
+ uint8_t hdr_length;
+ uint8_t version;
+ uint8_t preboot_source;
+ uint8_t num_elements;
+ uint16_t element_length;
+ uint8_t reserved[8];
+} __attribute__ ((packed));
+
+struct sci_oem_params {
+ struct {
+ uint8_t mode_type;
+ uint8_t max_concurrent_dev_spin_up;
+ uint8_t do_enable_ssc;
+ uint8_t reserved;
+ } controller;
+
+ struct {
+ uint8_t phy_mask;
+ } ports[SCI_MAX_PORTS];
+
+ struct sci_phy_oem_params {
+ struct {
+ uint32_t high;
+ uint32_t low;
+ } sas_address;
+
+ uint32_t afe_tx_amp_control0;
+ uint32_t afe_tx_amp_control1;
+ uint32_t afe_tx_amp_control2;
+ uint32_t afe_tx_amp_control3;
+ } phys[SCI_MAX_PHYS];
+} __attribute__ ((packed));
+
+struct isci_orom {
+ struct sci_bios_oem_param_block_hdr hdr;
+ struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
+} __attribute__ ((packed));
+
+#endif
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
new file mode 100644
index 00000000000..9b266c7428e
--- /dev/null
+++ b/drivers/scsi/isci/registers.h
@@ -0,0 +1,1934 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_REGISTERS_H_
+#define _SCU_REGISTERS_H_
+
+/**
+ * This file contains the constants and structures for the SCU memory mapped
+ * registers.
+ *
+ *
+ */
+
+#define SCU_VIIT_ENTRY_ID_MASK (0xC0000000)
+#define SCU_VIIT_ENTRY_ID_SHIFT (30)
+
+#define SCU_VIIT_ENTRY_FUNCTION_MASK (0x0FF00000)
+#define SCU_VIIT_ENTRY_FUNCTION_SHIFT (20)
+
+#define SCU_VIIT_ENTRY_IPPTMODE_MASK (0x0001F800)
+#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT (12)
+
+#define SCU_VIIT_ENTRY_LPVIE_MASK (0x00000F00)
+#define SCU_VIIT_ENTRY_LPVIE_SHIFT (8)
+
+#define SCU_VIIT_ENTRY_STATUS_MASK (0x000000FF)
+#define SCU_VIIT_ENTRY_STATUS_SHIFT (0)
+
+#define SCU_VIIT_ENTRY_ID_INVALID (0 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIIT (1 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_IIT (2 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIRT_EXP (3 << SCU_VIIT_ENTRY_ID_SHIFT)
+
+#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_INITIATOR \
+ (\
+ SCU_VIIT_IPPT_SSP_INITIATOR \
+ | SCU_VIIT_IPPT_SMP_INITIATOR \
+ | SCU_VIIT_IPPT_STP_INITIATOR \
+ )
+
+#define SCU_VIIT_STATUS_RNC_VALID (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ADDRESS_VALID (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_RNI_VALID (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ALL_VALID \
+ (\
+ SCU_VIIT_STATUS_RNC_VALID \
+ | SCU_VIIT_STATUS_ADDRESS_VALID \
+ | SCU_VIIT_STATUS_RNI_VALID \
+ )
+
+#define SCU_VIIT_IPPT_SMP_TARGET (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+
+/**
+ * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry
+ *
+ *
+ */
+struct scu_viit_entry {
+ /**
+ * This must be encoded as to the type of initiator that is being constructed
+ * for this port.
+ */
+ u32 status;
+
+ /**
+ * Virtual initiator high SAS Address
+ */
+ u32 initiator_sas_address_hi;
+
+ /**
+ * Virtual initiator low SAS Address
+ */
+ u32 initiator_sas_address_lo;
+
+ /**
+ * This must be 0
+ */
+ u32 reserved;
+
+};
+
+
+/* IIT Status Defines */
+#define SCU_IIT_ENTRY_ID_MASK (0xC0000000)
+#define SCU_IIT_ENTRY_ID_SHIFT (30)
+
+#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK (0x20000000)
+#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT (29)
+
+#define SCU_IIT_ENTRY_LPI_MASK (0x00000F00)
+#define SCU_IIT_ENTRY_LPI_SHIFT (8)
+
+#define SCU_IIT_ENTRY_STATUS_MASK (0x000000FF)
+#define SCU_IIT_ENTRY_STATUS_SHIFT (0)
+
+/* IIT Remote Initiator Defines */
+#define SCU_IIT_ENTRY_REMOTE_TAG_MASK (0x0000FFFF)
+#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0)
+
+#define SCU_IIT_ENTRY_REMOTE_RNC_MASK (0x0FFF0000)
+#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16)
+
+#define SCU_IIT_ENTRY_ID_INVALID (0 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIIT (1 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_IIT (2 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIRT_EXP (3 << SCU_IIT_ENTRY_ID_SHIFT)
+
+/**
+ * struct scu_iit_entry - This will be implemented later when we support
+ * virtual functions
+ *
+ *
+ */
+struct scu_iit_entry {
+ u32 status;
+ u32 remote_initiator_sas_address_hi;
+ u32 remote_initiator_sas_address_lo;
+ u32 remote_initiator;
+
+};
+
+/* Generate a value for an SCU register */
+#define SCU_GEN_VALUE(name, value) \
+ (((value) << name ## _SHIFT) & (name ## _MASK))
+
+/*
+ * Generate a bit value for an SCU register
+ * Make sure that the register MASK is just a single bit */
+#define SCU_GEN_BIT(name) \
+ SCU_GEN_VALUE(name, ((u32)1))
+
+#define SCU_SET_BIT(name, reg_value) \
+ ((reg_value) | SCU_GEN_BIT(name))
+
+#define SCU_CLEAR_BIT(name, reg_value) \
+ ((reg_value)$ ~(SCU_GEN_BIT(name)))
+
+/*
+ * *****************************************************************************
+ * Unions for bitfield definitions of SCU Registers
+ * SMU Post Context Port
+ * ***************************************************************************** */
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT (0)
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK (0x00000FFF)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT (12)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK (0x0000F000)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT (16)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK (0x00030000)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT (18)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK (0x00FC0000)
+#define SMU_POST_CONTEXT_PORT_RESERVED_MASK (0xFF000000)
+
+#define SMU_PCP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT (31)
+#define SMU_INTERRUPT_STATUS_COMPLETION_MASK (0x80000000)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT (1)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK (0x00000002)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT (0)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK (0x00000001)
+#define SMU_INTERRUPT_STATUS_RESERVED_MASK (0x7FFFFFFC)
+
+#define SMU_ISR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name)
+
+#define SMU_ISR_QUEUE_ERROR SMU_ISR_GEN_BIT(QUEUE_ERROR)
+#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_ISR_COMPLETION SMU_ISR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT (31)
+#define SMU_INTERRUPT_MASK_COMPLETION_MASK (0x80000000)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT (1)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK (0x00000002)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT (0)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK (0x00000001)
+#define SMU_INTERRUPT_MASK_RESERVED_MASK (0x7FFFFFFC)
+
+#define SMU_IMR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name)
+
+#define SMU_IMR_QUEUE_ERROR SMU_IMR_GEN_BIT(QUEUE_ERROR)
+#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_IMR_COMPLETION SMU_IMR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT (0)
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK (0x0000001F)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT (8)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK (0x0000FF00)
+#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK (0xFFFF00E0)
+
+#define SMU_ICC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_RANGE_START_SHIFT (0)
+#define SMU_TASK_CONTEXT_RANGE_START_MASK (0x00000FFF)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT (16)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK (0x0FFF0000)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT (31)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK (0x80000000)
+#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK (0x7000F000)
+
+#define SMU_TCR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value)
+
+#define SMU_TCR_GEN_BIT(name, value) \
+ SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT (15)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK (0x00008000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT (26)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK (0x04000000)
+#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK (0xF8004000)
+
+#define SMU_CQPR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value)
+
+#define SMU_CQPR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT (15)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK (0x00008000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT (26)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK (0x04000000)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT (30)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK (0x40000000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT (31)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK (0x80000000)
+#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK (0x38004000)
+
+#define SMU_CQGR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value)
+
+#define SMU_CQGR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name)
+
+#define SMU_CQGR_CYCLE_BIT \
+ SMU_CQGR_GEN_BIT(CYCLE_BIT)
+
+#define SMU_CQGR_EVENT_CYCLE_BIT \
+ SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT)
+
+#define SMU_CQGR_GET_POINTER_SET(value) \
+ SMU_CQGR_GEN_VAL(POINTER, value)
+
+
+/* ***************************************************************************** */
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK (0xFC00C000)
+
+#define SMU_CQC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value)
+
+#define SMU_CQC_QUEUE_LIMIT_SET(value) \
+ SMU_CQC_GEN_VAL(QUEUE_LIMIT, value)
+
+#define SMU_CQC_EVENT_LIMIT_SET(value) \
+ SMU_CQC_GEN_VAL(EVENT_LIMIT, value)
+
+
+/* ***************************************************************************** */
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT (0)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK (0x00000FFF)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT (12)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK (0x00007000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT (15)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK (0x07FF8000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT (27)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK (0x08000000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK (0xF0000000)
+
+#define SMU_DCC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value)
+
+#define SMU_DCC_GET_MAX_PEG(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_LP(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_TC(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_RNC(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
+ )
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0)
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK (0x00000001)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT (1)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK (0x00000002)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT (16)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK (0x00010000)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT (17)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK (0x00020000)
+#define SMU_CONTROL_STATUS_RESERVED_MASK (0xFFFCFFFC)
+
+#define SMU_SMUCSR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name)
+
+#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
+ (SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED))
+
+#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
+ (SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED))
+
+#define SCU_RAM_INIT_COMPLETED \
+ (\
+ SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
+ | SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
+ )
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT (0)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK (0x00000001)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT (1)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK (0x00000002)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT (2)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK (0x00000004)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT (3)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK (0x00000008)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT (8)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK (0x00000100)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT (9)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK (0x00000200)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT (10)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK (0x00000400)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT (11)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK (0x00000800)
+
+#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \
+ ((1 << (pe)) << ((peg) * 8))
+
+#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+ (\
+ SMU_RESET_PROTOCOL_ENGINE(peg, 0) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 1) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 2) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 3) \
+ )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINES() \
+ (\
+ SMU_RESET_PEG_PROTOCOL_ENGINES(0) \
+ | SMU_RESET_PEG_PROTOCOL_ENGINES(1) \
+ )
+
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT (16)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK (0x00010000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT (17)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK (0x00020000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT (18)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK (0x00040000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT (19)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK (0x00080000)
+
+#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \
+ ((1 << ((wide_port) / 2)) << ((peg) * 2) << 16)
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT (20)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK (0x00100000)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT (21)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK (0x00200000)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT (22)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK (0x00400000)
+
+/*
+ * It seems to make sense that if you are going to reset the protocol
+ * engine group that you would also reset all of the protocol engines */
+#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \
+ (\
+ (1 << ((peg) + 20)) \
+ | SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \
+ | SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \
+ | SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+ )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \
+ (\
+ SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \
+ | SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \
+ )
+
+#define SMU_RESET_SCU() (0xFFFFFFFF)
+
+
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT (0)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK (0x00000FFF)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT (16)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK (0x0FFF0000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT (31)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK (0x80000000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK (0x7000F000)
+
+#define SMU_TCA_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value)
+
+#define SMU_TCA_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK (0xFFFFF000)
+
+#define SCU_UFQC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value)
+
+#define SCU_UFQC_QUEUE_SIZE_SET(value) \
+ SCU_UFQC_GEN_VAL(QUEUE_SIZE, value)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK (0x00001000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK (0xFFFFE000)
+
+#define SCU_UFQPP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value)
+
+#define SCU_UFQPP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT (31)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK (0x80000000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK (0x7FFFE000)
+
+#define SCU_UFQGP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value)
+
+#define SCU_UFQGP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name)
+
+#define SCU_UFQGP_CYCLE_BIT(value) \
+ SCU_UFQGP_GEN_BIT(CYCLE_BIT, value)
+
+#define SCU_UFQGP_GET_POINTER(value) \
+ SCU_UFQGP_GEN_VALUE(POINTER, value)
+
+#define SCU_UFQGP_ENABLE(value) \
+ (SCU_UFQGP_GEN_BIT(ENABLE) | value)
+
+#define SCU_UFQGP_DISABLE(value) \
+ (~SCU_UFQGP_GEN_BIT(ENABLE) & value)
+
+#define SCU_UFQGP_VALUE(bit, value) \
+ (SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value))
+
+/* ***************************************************************************** */
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT (0)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK (0x0000FFFF)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (16)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00010000)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT (17)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK (0x00020000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT (18)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK (0x00040000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT (19)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK (0x00080000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT (20)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK (0x00100000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT (21)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK (0x00200000)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT (22)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK (0x00400000)
+#define SCU_PDMA_CONFIGURATION_RESERVED_MASK (0xFF800000)
+
+#define SCU_PDMACR_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value)
+
+#define SCU_PDMACR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name)
+
+#define SCU_PDMACR_BE_GEN_BIT(name) \
+ SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (8)
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00000100)
+
+#define SCU_CDMACR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SCU Link Layer Registers
+ * ***************************************************************************** */
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT (0)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK (0x000000FF)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT (8)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK (0x0000FF00)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT (16)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK (0x00FF0000)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT (24)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK (0xFF000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK (0x00000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK (0x7D00676F)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK (0x00FF0000)
+
+#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value)
+
+
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT (2)
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK (0x00000004)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT (4)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK (0x00000010)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT (5)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK (0x00000020)
+#define SCU_LINK_STATUS_RESERVED_MASK (0xFFFFFFCD)
+
+#define SCU_SAS_LLSTA_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_STATUS_ ## name)
+
+
+/* TODO: Where is the SATA_PSELTOV register? */
+
+/*
+ * *****************************************************************************
+ * * SCU SAS Maximum Arbitration Wait Time Timeout Register
+ * ***************************************************************************** */
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT (0)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK (0x00007FFF)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT (15)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK (0x00008000)
+
+#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value)
+
+#define SCU_SAS_MAWTTOV_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name)
+
+
+/*
+ * TODO: Where is the SAS_LNKTOV regsiter?
+ * TODO: Where is the SAS_PHYTOV register? */
+
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK (0x00000002)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT (2)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK (0x00000004)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT (3)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK (0x00000008)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT (8)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK (0x00000100)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT (9)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK (0x00000200)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT (10)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK (0x00000400)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT (11)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK (0x00000800)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT (16)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK (0x000F0000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT (24)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK (0x0F000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT (28)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK (0x70000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK (0x80F0F1F1)
+
+#define SCU_SAS_TIID_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value)
+
+#define SCU_SAS_TIID_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name)
+
+/* SAS Identify Frame PHY Identifier Register */
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT (16)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK (0x00010000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT (17)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK (0x00020000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT (18)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK (0x00040000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT (24)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK (0xFF000000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK (0x00F800FF)
+
+#define SCU_SAS_TIPID_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value)
+
+#define SCU_SAS_TIPID_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name)
+
+
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT (4)
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK (0x00000010)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT (6)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK (0x00000040)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT (7)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK (0x00000080)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT (8)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK (0x00000100)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT (9)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK (0x00000200)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT (11)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK (0x00000800)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT (12)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK (0x00001000)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT (13)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK (0x00002000)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT (14)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK (0x00004000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT (15)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK (0x00008000)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT (23)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK (0x00800000)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT (27)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK (0x08000000)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT (28)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK (0x10000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT (29)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK (0x20000000)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT (30)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK (0x40000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT (31)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK (0x80000000)
+#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK (0x0100000F)
+#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK (0x4180100F)
+#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK (0x00000000)
+
+#define SCU_SAS_PCFG_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name)
+
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT (0)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK (0x000007FF)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT (16)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK (0x00ff0000)
+
+#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value)
+
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT (0)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK (0x0003FFFF)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT (31)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK (0x80000000)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK (0x7FFC0000)
+
+#define SCU_ENSPINUP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value)
+
+#define SCU_ENSPINUP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT (1)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK (0x00000002)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT (4)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK (0x000000F0)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT (8)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK (0x00000100)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT (9)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK (0x00000201)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT (10)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK (0x00000401)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT (11)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK (0x00000801)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT (12)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK (0x00001001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT (13)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK (0x00002001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT (31)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK (0x80000000)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK (0x00003F01)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK (0x00000001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK (0x7FFFC00D)
+
+#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value)
+
+#define SCU_SAS_PHYCAP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT (0)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK (0x000000FF)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT (31)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK (0x80000000)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK (0x7FFFFF00)
+
+#define SCU_PSZGCR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value)
+
+#define SCU_PSZGCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name)
+
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT (1)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK (0x00000002)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT (2)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK (0x00000004)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT (4)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK (0x00000010)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT (5)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK (0x00000020)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK (0x00030000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT (19)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK (0x00080000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK (0x00300000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT (23)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK (0x00800000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK (0x03000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT (27)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK (0x08000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK (0x30000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT (31)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK (0x80000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK (0x4444FFC9)
+
+#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val)
+
+#define SCU_PEG_SCUVZECR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * Port Task Scheduler registers shift and mask values
+ * ***************************************************************************** */
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT (0)
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK (0x0000FFFF)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT (16)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK (0x00FF0000)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT (24)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK (0x01000000)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT (25)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK (0x02000000)
+#define SCU_PTSG_CONTROL_DEFAULT_MASK (0x00020002)
+#define SCU_PTSG_CONTROL_REQUIRED_MASK (0x00000000)
+#define SCU_PTSG_CONTROL_RESERVED_MASK (0xFC000000)
+
+#define SCU_PTSGCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val)
+
+#define SCU_PTSGCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name)
+
+
+/* ***************************************************************************** */
+#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_MASK (0x0000FFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK (0xFFFF0000)
+
+#define SCU_RTCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_ ## name, val)
+
+
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK (0x00FFFFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK (0xFF000000)
+
+#define SCU_RTCCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK (0xFFFFFFFC)
+
+#define SCU_PTSxCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT (2)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK (0x00000004)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK (0xFFFFFFF8)
+
+#define SCU_PTSxSR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * SGPIO Register shift and mask values
+ * ***************************************************************************** */
+#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0)
+#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004)
+#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15)
+#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000)
+#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8)
+
+#define SCU_SGICRx_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name)
+
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000)
+
+#define SCU_SGPBRx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value)
+
+#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0)
+#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003)
+#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4)
+#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030)
+#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8)
+#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300)
+#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12)
+#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000)
+#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSDLRx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
+
+#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0)
+#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003)
+#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4)
+#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030)
+#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8)
+#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300)
+#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12)
+#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000)
+#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSDURx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
+
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSIDLRx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
+
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSIDURx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
+
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0)
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F)
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0)
+
+#define SCU_SGVSCR_GEN_VAL(value) \
+ SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value)
+
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000)
+
+#define SCU_SGODSR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value)
+
+#define SCU_SGODSR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SMU Registers
+ * ***************************************************************************** */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SMU Registers
+ * These registers are based off of BAR0
+ *
+ * To calculate the offset for other functions use
+ * BAR0 + FN# * SystemPageSize * 2
+ *
+ * The TCA is only accessable from FN#0 (Physical Function) and each
+ * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or
+ * TCA0 for FN#0 is at BAR0 + 0x0400
+ * TCA1 for FN#1 is at BAR0 + 0x0404
+ * etc.
+ * ----------------------------------------------------------------------------
+ * Accessable to all FN#s */
+#define SCU_SMU_PCP_OFFSET 0x0000
+#define SCU_SMU_AMR_OFFSET 0x0004
+#define SCU_SMU_ISR_OFFSET 0x0010
+#define SCU_SMU_IMR_OFFSET 0x0014
+#define SCU_SMU_ICC_OFFSET 0x0018
+#define SCU_SMU_HTTLBAR_OFFSET 0x0020
+#define SCU_SMU_HTTUBAR_OFFSET 0x0024
+#define SCU_SMU_TCR_OFFSET 0x0028
+#define SCU_SMU_CQLBAR_OFFSET 0x0030
+#define SCU_SMU_CQUBAR_OFFSET 0x0034
+#define SCU_SMU_CQPR_OFFSET 0x0040
+#define SCU_SMU_CQGR_OFFSET 0x0044
+#define SCU_SMU_CQC_OFFSET 0x0048
+/* Accessable to FN#0 only */
+#define SCU_SMU_RNCLBAR_OFFSET 0x0080
+#define SCU_SMU_RNCUBAR_OFFSET 0x0084
+#define SCU_SMU_DCC_OFFSET 0x0090
+#define SCU_SMU_DFC_OFFSET 0x0094
+#define SCU_SMU_SMUCSR_OFFSET 0x0098
+#define SCU_SMU_SCUSRCR_OFFSET 0x009C
+#define SCU_SMU_SMAW_OFFSET 0x00A0
+#define SCU_SMU_SMDW_OFFSET 0x00A4
+/* Accessable to FN#0 only */
+#define SCU_SMU_TCA_OFFSET 0x0400
+/* Accessable to all FN#s */
+#define SCU_SMU_MT_MLAR0_OFFSET 0x2000
+#define SCU_SMU_MT_MUAR0_OFFSET 0x2004
+#define SCU_SMU_MT_MDR0_OFFSET 0x2008
+#define SCU_SMU_MT_VCR0_OFFSET 0x200C
+#define SCU_SMU_MT_MLAR1_OFFSET 0x2010
+#define SCU_SMU_MT_MUAR1_OFFSET 0x2014
+#define SCU_SMU_MT_MDR1_OFFSET 0x2018
+#define SCU_SMU_MT_VCR1_OFFSET 0x201C
+#define SCU_SMU_MPBA_OFFSET 0x3000
+
+/**
+ * struct smu_registers - These are the SMU registers
+ *
+ *
+ */
+struct smu_registers {
+/* 0x0000 PCP */
+ u32 post_context_port;
+/* 0x0004 AMR */
+ u32 address_modifier;
+ u32 reserved_08;
+ u32 reserved_0C;
+/* 0x0010 ISR */
+ u32 interrupt_status;
+/* 0x0014 IMR */
+ u32 interrupt_mask;
+/* 0x0018 ICC */
+ u32 interrupt_coalesce_control;
+ u32 reserved_1C;
+/* 0x0020 HTTLBAR */
+ u32 host_task_table_lower;
+/* 0x0024 HTTUBAR */
+ u32 host_task_table_upper;
+/* 0x0028 TCR */
+ u32 task_context_range;
+ u32 reserved_2C;
+/* 0x0030 CQLBAR */
+ u32 completion_queue_lower;
+/* 0x0034 CQUBAR */
+ u32 completion_queue_upper;
+ u32 reserved_38;
+ u32 reserved_3C;
+/* 0x0040 CQPR */
+ u32 completion_queue_put;
+/* 0x0044 CQGR */
+ u32 completion_queue_get;
+/* 0x0048 CQC */
+ u32 completion_queue_control;
+ u32 reserved_4C;
+ u32 reserved_5x[4];
+ u32 reserved_6x[4];
+ u32 reserved_7x[4];
+/*
+ * Accessable to FN#0 only
+ * 0x0080 RNCLBAR */
+ u32 remote_node_context_lower;
+/* 0x0084 RNCUBAR */
+ u32 remote_node_context_upper;
+ u32 reserved_88;
+ u32 reserved_8C;
+/* 0x0090 DCC */
+ u32 device_context_capacity;
+/* 0x0094 DFC */
+ u32 device_function_capacity;
+/* 0x0098 SMUCSR */
+ u32 control_status;
+/* 0x009C SCUSRCR */
+ u32 soft_reset_control;
+/* 0x00A0 SMAW */
+ u32 mmr_address_window;
+/* 0x00A4 SMDW */
+ u32 mmr_data_window;
+ u32 reserved_A8;
+ u32 reserved_AC;
+/* A whole bunch of reserved space */
+ u32 reserved_Bx[4];
+ u32 reserved_Cx[4];
+ u32 reserved_Dx[4];
+ u32 reserved_Ex[4];
+ u32 reserved_Fx[4];
+ u32 reserved_1xx[64];
+ u32 reserved_2xx[64];
+ u32 reserved_3xx[64];
+/*
+ * Accessable to FN#0 only
+ * 0x0400 TCA */
+ u32 task_context_assignment[256];
+/* MSI-X registers not included */
+};
+
+/*
+ * *****************************************************************************
+ * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_BASE 0x6000
+#define SCU_SDMA_PUFATLHAR_OFFSET 0x0000
+#define SCU_SDMA_PUFATUHAR_OFFSET 0x0004
+#define SCU_SDMA_UFLHBAR_OFFSET 0x0008
+#define SCU_SDMA_UFUHBAR_OFFSET 0x000C
+#define SCU_SDMA_UFQC_OFFSET 0x0010
+#define SCU_SDMA_UFQPP_OFFSET 0x0014
+#define SCU_SDMA_UFQGP_OFFSET 0x0018
+#define SCU_SDMA_PDMACR_OFFSET 0x001C
+#define SCU_SDMA_CDMACR_OFFSET 0x0080
+
+/**
+ * struct scu_sdma_registers - These are the SCU SDMA Registers
+ *
+ *
+ */
+struct scu_sdma_registers {
+/* 0x0000 PUFATLHAR */
+ u32 uf_address_table_lower;
+/* 0x0004 PUFATUHAR */
+ u32 uf_address_table_upper;
+/* 0x0008 UFLHBAR */
+ u32 uf_header_base_address_lower;
+/* 0x000C UFUHBAR */
+ u32 uf_header_base_address_upper;
+/* 0x0010 UFQC */
+ u32 unsolicited_frame_queue_control;
+/* 0x0014 UFQPP */
+ u32 unsolicited_frame_put_pointer;
+/* 0x0018 UFQGP */
+ u32 unsolicited_frame_get_pointer;
+/* 0x001C PDMACR */
+ u32 pdma_configuration;
+/* Reserved until offset 0x80 */
+ u32 reserved_0020_007C[0x18];
+/* 0x0080 CDMACR */
+ u32 cdma_configuration;
+/* Remainder SDMA register space */
+ u32 reserved_0084_0400[0xDF];
+
+};
+
+/*
+ * *****************************************************************************
+ * * SCU Link Registers
+ * ***************************************************************************** */
+#define SCU_PEG0_OFFSET 0x0000
+#define SCU_PEG1_OFFSET 0x8000
+
+#define SCU_TL0_OFFSET 0x0000
+#define SCU_TL1_OFFSET 0x0400
+#define SCU_TL2_OFFSET 0x0800
+#define SCU_TL3_OFFSET 0x0C00
+
+#define SCU_LL_OFFSET 0x0080
+#define SCU_LL0_OFFSET (SCU_TL0_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL1_OFFSET (SCU_TL1_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL2_OFFSET (SCU_TL2_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL3_OFFSET (SCU_TL3_OFFSET + SCU_LL_OFFSET)
+
+/* Transport Layer Offsets (PEG + TL) */
+#define SCU_TLCR_OFFSET 0x0000
+#define SCU_TLADTR_OFFSET 0x0004
+#define SCU_TLTTMR_OFFSET 0x0008
+#define SCU_TLEECR0_OFFSET 0x000C
+#define SCU_STPTLDARNI_OFFSET 0x0010
+
+
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT (0)
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK (0x00000001)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK (0x00000002)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT (3)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK (0x00000008)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT (4)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK (0x00000010)
+#define SCU_TLCR_RESERVED_MASK (0xFFFFFFEB)
+
+#define SCU_TLCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_TLCR_ ## name)
+
+/**
+ * struct scu_transport_layer_registers - These are the SCU Transport Layer
+ * registers
+ *
+ *
+ */
+struct scu_transport_layer_registers {
+ /* 0x0000 TLCR */
+ u32 control;
+ /* 0x0004 TLADTR */
+ u32 arbitration_delay_timer;
+ /* 0x0008 TLTTMR */
+ u32 timer_test_mode;
+ /* 0x000C reserved */
+ u32 reserved_0C;
+ /* 0x0010 STPTLDARNI */
+ u32 stp_rni;
+ /* 0x0014 TLFEWPORCTRL */
+ u32 tlfe_wpo_read_control;
+ /* 0x0018 TLFEWPORDATA */
+ u32 tlfe_wpo_read_data;
+ /* 0x001C RXTLSSCSR1 */
+ u32 rxtl_single_step_control_status_1;
+ /* 0x0020 RXTLSSCSR2 */
+ u32 rxtl_single_step_control_status_2;
+ /* 0x0024 AWTRDDCR */
+ u32 tlfe_awt_retry_delay_debug_control;
+ /* Remainder of TL memory space */
+ u32 reserved_0028_007F[0x16];
+
+};
+
+/* Protocol Engine Group Registers */
+#define SCU_SCUVZECRx_OFFSET 0x1080
+
+/* Link Layer Offsets (PEG + TL + LL) */
+#define SCU_SAS_SPDTOV_OFFSET 0x0000
+#define SCU_SAS_LLSTA_OFFSET 0x0004
+#define SCU_SATA_PSELTOV_OFFSET 0x0008
+#define SCU_SAS_TIMETOV_OFFSET 0x0010
+#define SCU_SAS_LOSTOT_OFFSET 0x0014
+#define SCU_SAS_LNKTOV_OFFSET 0x0018
+#define SCU_SAS_PHYTOV_OFFSET 0x001C
+#define SCU_SAS_AFERCNT_OFFSET 0x0020
+#define SCU_SAS_WERCNT_OFFSET 0x0024
+#define SCU_SAS_TIID_OFFSET 0x0028
+#define SCU_SAS_TIDNH_OFFSET 0x002C
+#define SCU_SAS_TIDNL_OFFSET 0x0030
+#define SCU_SAS_TISSAH_OFFSET 0x0034
+#define SCU_SAS_TISSAL_OFFSET 0x0038
+#define SCU_SAS_TIPID_OFFSET 0x003C
+#define SCU_SAS_TIRES2_OFFSET 0x0040
+#define SCU_SAS_ADRSTA_OFFSET 0x0044
+#define SCU_SAS_MAWTTOV_OFFSET 0x0048
+#define SCU_SAS_FRPLDFIL_OFFSET 0x0054
+#define SCU_SAS_RFCNT_OFFSET 0x0060
+#define SCU_SAS_TFCNT_OFFSET 0x0064
+#define SCU_SAS_RFDCNT_OFFSET 0x0068
+#define SCU_SAS_TFDCNT_OFFSET 0x006C
+#define SCU_SAS_LERCNT_OFFSET 0x0070
+#define SCU_SAS_RDISERRCNT_OFFSET 0x0074
+#define SCU_SAS_CRERCNT_OFFSET 0x0078
+#define SCU_STPCTL_OFFSET 0x007C
+#define SCU_SAS_PCFG_OFFSET 0x0080
+#define SCU_SAS_CLKSM_OFFSET 0x0084
+#define SCU_SAS_TXCOMWAKE_OFFSET 0x0088
+#define SCU_SAS_TXCOMINIT_OFFSET 0x008C
+#define SCU_SAS_TXCOMSAS_OFFSET 0x0090
+#define SCU_SAS_COMINIT_OFFSET 0x0094
+#define SCU_SAS_COMWAKE_OFFSET 0x0098
+#define SCU_SAS_COMSAS_OFFSET 0x009C
+#define SCU_SAS_SFERCNT_OFFSET 0x00A0
+#define SCU_SAS_CDFERCNT_OFFSET 0x00A4
+#define SCU_SAS_DNFERCNT_OFFSET 0x00A8
+#define SCU_SAS_PRSTERCNT_OFFSET 0x00AC
+#define SCU_SAS_CNTCTL_OFFSET 0x00B0
+#define SCU_SAS_SSPTOV_OFFSET 0x00B4
+#define SCU_FTCTL_OFFSET 0x00B8
+#define SCU_FRCTL_OFFSET 0x00BC
+#define SCU_FTWMRK_OFFSET 0x00C0
+#define SCU_ENSPINUP_OFFSET 0x00C4
+#define SCU_SAS_TRNTOV_OFFSET 0x00C8
+#define SCU_SAS_PHYCAP_OFFSET 0x00CC
+#define SCU_SAS_PHYCTL_OFFSET 0x00D0
+#define SCU_SAS_LLCTL_OFFSET 0x00D8
+#define SCU_AFE_XCVRCR_OFFSET 0x00DC
+#define SCU_AFE_LUTCR_OFFSET 0x00E0
+
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2 (1)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3 (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK (0x000003FC)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT (16)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK (0x00010000)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK (0x00020000)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT (24)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK (0xFF000000)
+#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED (0x00FCFC00)
+
+#define SCU_SAS_LLCTL_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value)
+
+#define SCU_SAS_LLCTL_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
+
+
+/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */
+#define SCU_PSZGCR_OFFSET 0x00E4
+#define SCU_SAS_RECPHYCAP_OFFSET 0x00E8
+/* #define SCU_TX_LUTSEL_OFFSET 0x00B8 */
+
+#define SCU_SAS_PTxC_OFFSET 0x00D4 /* Same offset as SAS_TCTSTM */
+
+/**
+ * struct scu_link_layer_registers - SCU Link Layer Registers
+ *
+ *
+ */
+struct scu_link_layer_registers {
+/* 0x0000 SAS_SPDTOV */
+ u32 speed_negotiation_timers;
+/* 0x0004 SAS_LLSTA */
+ u32 link_layer_status;
+/* 0x0008 SATA_PSELTOV */
+ u32 port_selector_timeout;
+ u32 reserved0C;
+/* 0x0010 SAS_TIMETOV */
+ u32 timeout_unit_value;
+/* 0x0014 SAS_RCDTOV */
+ u32 rcd_timeout;
+/* 0x0018 SAS_LNKTOV */
+ u32 link_timer_timeouts;
+/* 0x001C SAS_PHYTOV */
+ u32 sas_phy_timeouts;
+/* 0x0020 SAS_AFERCNT */
+ u32 received_address_frame_error_counter;
+/* 0x0024 SAS_WERCNT */
+ u32 invalid_dword_counter;
+/* 0x0028 SAS_TIID */
+ u32 transmit_identification;
+/* 0x002C SAS_TIDNH */
+ u32 sas_device_name_high;
+/* 0x0030 SAS_TIDNL */
+ u32 sas_device_name_low;
+/* 0x0034 SAS_TISSAH */
+ u32 source_sas_address_high;
+/* 0x0038 SAS_TISSAL */
+ u32 source_sas_address_low;
+/* 0x003C SAS_TIPID */
+ u32 identify_frame_phy_id;
+/* 0x0040 SAS_TIRES2 */
+ u32 identify_frame_reserved;
+/* 0x0044 SAS_ADRSTA */
+ u32 received_address_frame;
+/* 0x0048 SAS_MAWTTOV */
+ u32 maximum_arbitration_wait_timer_timeout;
+/* 0x004C SAS_PTxC */
+ u32 transmit_primitive;
+/* 0x0050 SAS_RORES */
+ u32 error_counter_event_notification_control;
+/* 0x0054 SAS_FRPLDFIL */
+ u32 frxq_payload_fill_threshold;
+/* 0x0058 SAS_LLHANG_TOT */
+ u32 link_layer_hang_detection_timeout;
+ u32 reserved_5C;
+/* 0x0060 SAS_RFCNT */
+ u32 received_frame_count;
+/* 0x0064 SAS_TFCNT */
+ u32 transmit_frame_count;
+/* 0x0068 SAS_RFDCNT */
+ u32 received_dword_count;
+/* 0x006C SAS_TFDCNT */
+ u32 transmit_dword_count;
+/* 0x0070 SAS_LERCNT */
+ u32 loss_of_sync_error_count;
+/* 0x0074 SAS_RDISERRCNT */
+ u32 running_disparity_error_count;
+/* 0x0078 SAS_CRERCNT */
+ u32 received_frame_crc_error_count;
+/* 0x007C STPCTL */
+ u32 stp_control;
+/* 0x0080 SAS_PCFG */
+ u32 phy_configuration;
+/* 0x0084 SAS_CLKSM */
+ u32 clock_skew_management;
+/* 0x0088 SAS_TXCOMWAKE */
+ u32 transmit_comwake_signal;
+/* 0x008C SAS_TXCOMINIT */
+ u32 transmit_cominit_signal;
+/* 0x0090 SAS_TXCOMSAS */
+ u32 transmit_comsas_signal;
+/* 0x0094 SAS_COMINIT */
+ u32 cominit_control;
+/* 0x0098 SAS_COMWAKE */
+ u32 comwake_control;
+/* 0x009C SAS_COMSAS */
+ u32 comsas_control;
+/* 0x00A0 SAS_SFERCNT */
+ u32 received_short_frame_count;
+/* 0x00A4 SAS_CDFERCNT */
+ u32 received_frame_without_credit_count;
+/* 0x00A8 SAS_DNFERCNT */
+ u32 received_frame_after_done_count;
+/* 0x00AC SAS_PRSTERCNT */
+ u32 phy_reset_problem_count;
+/* 0x00B0 SAS_CNTCTL */
+ u32 counter_control;
+/* 0x00B4 SAS_SSPTOV */
+ u32 ssp_timer_timeout_values;
+/* 0x00B8 FTCTL */
+ u32 ftx_control;
+/* 0x00BC FRCTL */
+ u32 frx_control;
+/* 0x00C0 FTWMRK */
+ u32 ftx_watermark;
+/* 0x00C4 ENSPINUP */
+ u32 notify_enable_spinup_control;
+/* 0x00C8 SAS_TRNTOV */
+ u32 sas_training_sequence_timer_values;
+/* 0x00CC SAS_PHYCAP */
+ u32 phy_capabilities;
+/* 0x00D0 SAS_PHYCTL */
+ u32 phy_control;
+ u32 reserved_d4;
+/* 0x00D8 LLCTL */
+ u32 link_layer_control;
+/* 0x00DC AFE_XCVRCR */
+ u32 afe_xcvr_control;
+/* 0x00E0 AFE_LUTCR */
+ u32 afe_lookup_table_control;
+/* 0x00E4 PSZGCR */
+ u32 phy_source_zone_group_control;
+/* 0x00E8 SAS_RECPHYCAP */
+ u32 receive_phycap;
+ u32 reserved_ec;
+/* 0x00F0 SNAFERXRSTCTL */
+ u32 speed_negotiation_afe_rx_reset_control;
+/* 0x00F4 SAS_SSIPMCTL */
+ u32 power_management_control;
+/* 0x00F8 SAS_PSPREQ_PRIM */
+ u32 sas_pm_partial_request_primitive;
+/* 0x00FC SAS_PSSREQ_PRIM */
+ u32 sas_pm_slumber_request_primitive;
+/* 0x0100 SAS_PPSACK_PRIM */
+ u32 sas_pm_ack_primitive_register;
+/* 0x0104 SAS_PSNAK_PRIM */
+ u32 sas_pm_nak_primitive_register;
+/* 0x0108 SAS_SSIPMTOV */
+ u32 sas_primitive_timeout;
+ u32 reserved_10c;
+/* 0x0110 - 0x011C PLAPRDCTRLxREG */
+ u32 pla_product_control[4];
+/* 0x0120 PLAPRDSUMREG */
+ u32 pla_product_sum;
+/* 0x0124 PLACONTROLREG */
+ u32 pla_control;
+/* Remainder of memory space 896 bytes */
+ u32 reserved_0128_037f[0x96];
+
+};
+
+/*
+ * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC
+ * u32 primitive_transmit_control; */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SGPIO
+ * ---------------------------------------------------------------------------- */
+#define SCU_SGPIO_OFFSET 0x1400
+
+/* #define SCU_SGPIO_OFFSET 0x6000 // later moves to 0x1400 see HSD 652625 */
+#define SCU_SGPIO_SGICR_OFFSET 0x0000
+#define SCU_SGPIO_SGPBR_OFFSET 0x0004
+#define SCU_SGPIO_SGSDLR_OFFSET 0x0008
+#define SCU_SGPIO_SGSDUR_OFFSET 0x000C
+#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010
+#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014
+#define SCU_SGPIO_SGVSCR_OFFSET 0x0018
+/* Address from 0x0820 to 0x083C */
+#define SCU_SGPIO_SGODSR_OFFSET 0x0020
+
+/**
+ * struct scu_sgpio_registers - SCU SGPIO Registers
+ *
+ *
+ */
+struct scu_sgpio_registers {
+/* 0x0000 SGPIO_SGICR */
+ u32 interface_control;
+/* 0x0004 SGPIO_SGPBR */
+ u32 blink_rate;
+/* 0x0008 SGPIO_SGSDLR */
+ u32 start_drive_lower;
+/* 0x000C SGPIO_SGSDUR */
+ u32 start_drive_upper;
+/* 0x0010 SGPIO_SGSIDLR */
+ u32 serial_input_lower;
+/* 0x0014 SGPIO_SGSIDUR */
+ u32 serial_input_upper;
+/* 0x0018 SGPIO_SGVSCR */
+ u32 vendor_specific_code;
+/* 0x0020 SGPIO_SGODSR */
+ u32 ouput_data_select[8];
+/* Remainder of memory space 256 bytes */
+ u32 reserved_1444_14ff[0x31];
+
+};
+
+/*
+ * *****************************************************************************
+ * * Defines for VIIT entry offsets
+ * * Access additional entries by SCU_VIIT_BASE + index * 0x10
+ * ***************************************************************************** */
+#define SCU_VIIT_BASE 0x1c00
+
+struct scu_viit_registers {
+ u32 registers[256];
+};
+
+/*
+ * *****************************************************************************
+ * * SCU PORT TASK SCHEDULER REGISTERS
+ * ***************************************************************************** */
+
+#define SCU_PTSG_BASE 0x1000
+
+#define SCU_PTSG_PTSGCR_OFFSET 0x0000
+#define SCU_PTSG_RTCR_OFFSET 0x0004
+#define SCU_PTSG_RTCCR_OFFSET 0x0008
+#define SCU_PTSG_PTS0CR_OFFSET 0x0010
+#define SCU_PTSG_PTS0SR_OFFSET 0x0014
+#define SCU_PTSG_PTS1CR_OFFSET 0x0018
+#define SCU_PTSG_PTS1SR_OFFSET 0x001C
+#define SCU_PTSG_PTS2CR_OFFSET 0x0020
+#define SCU_PTSG_PTS2SR_OFFSET 0x0024
+#define SCU_PTSG_PTS3CR_OFFSET 0x0028
+#define SCU_PTSG_PTS3SR_OFFSET 0x002C
+#define SCU_PTSG_PCSPE0CR_OFFSET 0x0030
+#define SCU_PTSG_PCSPE1CR_OFFSET 0x0034
+#define SCU_PTSG_PCSPE2CR_OFFSET 0x0038
+#define SCU_PTSG_PCSPE3CR_OFFSET 0x003C
+#define SCU_PTSG_ETMTSCCR_OFFSET 0x0040
+#define SCU_PTSG_ETMRNSCCR_OFFSET 0x0044
+
+/**
+ * struct scu_port_task_scheduler_registers - These are the control/stats pairs
+ * for each Port Task Scheduler.
+ *
+ *
+ */
+struct scu_port_task_scheduler_registers {
+ u32 control;
+ u32 status;
+};
+
+/**
+ * struct scu_port_task_scheduler_group_registers - These are the PORT Task
+ * Scheduler registers
+ *
+ *
+ */
+struct scu_port_task_scheduler_group_registers {
+/* 0x0000 PTSGCR */
+ u32 control;
+/* 0x0004 RTCR */
+ u32 real_time_clock;
+/* 0x0008 RTCCR */
+ u32 real_time_clock_control;
+/* 0x000C */
+ u32 reserved_0C;
+/*
+ * 0x0010 PTS0CR
+ * 0x0014 PTS0SR
+ * 0x0018 PTS1CR
+ * 0x001C PTS1SR
+ * 0x0020 PTS2CR
+ * 0x0024 PTS2SR
+ * 0x0028 PTS3CR
+ * 0x002C PTS3SR */
+ struct scu_port_task_scheduler_registers port[4];
+/*
+ * 0x0030 PCSPE0CR
+ * 0x0034 PCSPE1CR
+ * 0x0038 PCSPE2CR
+ * 0x003C PCSPE3CR */
+ u32 protocol_engine[4];
+/* 0x0040 ETMTSCCR */
+ u32 tc_scanning_interval_control;
+/* 0x0044 ETMRNSCCR */
+ u32 rnc_scanning_interval_control;
+/* Remainder of memory space 128 bytes */
+ u32 reserved_1048_107f[0x0E];
+
+};
+
+#define SCU_PTSG_SCUVZECR_OFFSET 0x003C
+
+/*
+ * *****************************************************************************
+ * * AFE REGISTERS
+ * ***************************************************************************** */
+#define SCU_AFE_MMR_BASE 0xE000
+
+/*
+ * AFE 0 is at offset 0x0800
+ * AFE 1 is at offset 0x0900
+ * AFE 2 is at offset 0x0a00
+ * AFE 3 is at offset 0x0b00 */
+struct scu_afe_transceiver {
+ /* 0x0000 AFE_XCVR_CTRL0 */
+ u32 afe_xcvr_control0;
+ /* 0x0004 AFE_XCVR_CTRL1 */
+ u32 afe_xcvr_control1;
+ /* 0x0008 */
+ u32 reserved_0008;
+ /* 0x000c afe_dfx_rx_control0 */
+ u32 afe_dfx_rx_control0;
+ /* 0x0010 AFE_DFX_RX_CTRL1 */
+ u32 afe_dfx_rx_control1;
+ /* 0x0014 */
+ u32 reserved_0014;
+ /* 0x0018 AFE_DFX_RX_STS0 */
+ u32 afe_dfx_rx_status0;
+ /* 0x001c AFE_DFX_RX_STS1 */
+ u32 afe_dfx_rx_status1;
+ /* 0x0020 */
+ u32 reserved_0020;
+ /* 0x0024 AFE_TX_CTRL */
+ u32 afe_tx_control;
+ /* 0x0028 AFE_TX_AMP_CTRL0 */
+ u32 afe_tx_amp_control0;
+ /* 0x002c AFE_TX_AMP_CTRL1 */
+ u32 afe_tx_amp_control1;
+ /* 0x0030 AFE_TX_AMP_CTRL2 */
+ u32 afe_tx_amp_control2;
+ /* 0x0034 AFE_TX_AMP_CTRL3 */
+ u32 afe_tx_amp_control3;
+ /* 0x0038 afe_tx_ssc_control */
+ u32 afe_tx_ssc_control;
+ /* 0x003c */
+ u32 reserved_003c;
+ /* 0x0040 AFE_RX_SSC_CTRL0 */
+ u32 afe_rx_ssc_control0;
+ /* 0x0044 AFE_RX_SSC_CTRL1 */
+ u32 afe_rx_ssc_control1;
+ /* 0x0048 AFE_RX_SSC_CTRL2 */
+ u32 afe_rx_ssc_control2;
+ /* 0x004c AFE_RX_EQ_STS0 */
+ u32 afe_rx_eq_status0;
+ /* 0x0050 AFE_RX_EQ_STS1 */
+ u32 afe_rx_eq_status1;
+ /* 0x0054 AFE_RX_CDR_STS */
+ u32 afe_rx_cdr_status;
+ /* 0x0058 */
+ u32 reserved_0058;
+ /* 0x005c AFE_CHAN_CTRL */
+ u32 afe_channel_control;
+ /* 0x0060-0x006c */
+ u32 reserved_0060_006c[0x04];
+ /* 0x0070 AFE_XCVR_EC_STS0 */
+ u32 afe_xcvr_error_capture_status0;
+ /* 0x0074 AFE_XCVR_EC_STS1 */
+ u32 afe_xcvr_error_capture_status1;
+ /* 0x0078 AFE_XCVR_EC_STS2 */
+ u32 afe_xcvr_error_capture_status2;
+ /* 0x007c afe_xcvr_ec_status3 */
+ u32 afe_xcvr_error_capture_status3;
+ /* 0x0080 AFE_XCVR_EC_STS4 */
+ u32 afe_xcvr_error_capture_status4;
+ /* 0x0084 AFE_XCVR_EC_STS5 */
+ u32 afe_xcvr_error_capture_status5;
+ /* 0x0088-0x00fc */
+ u32 reserved_008c_00fc[0x1e];
+};
+
+/**
+ * struct scu_afe_registers - AFE Regsiters
+ *
+ *
+ */
+/* Uaoa AFE registers */
+struct scu_afe_registers {
+ /* 0Xe000 AFE_BIAS_CTRL */
+ u32 afe_bias_control;
+ u32 reserved_0004;
+ /* 0x0008 AFE_PLL_CTRL0 */
+ u32 afe_pll_control0;
+ /* 0x000c AFE_PLL_CTRL1 */
+ u32 afe_pll_control1;
+ /* 0x0010 AFE_PLL_CTRL2 */
+ u32 afe_pll_control2;
+ /* 0x0014 AFE_CB_STS */
+ u32 afe_common_block_status;
+ /* 0x0018-0x007c */
+ u32 reserved_18_7c[0x1a];
+ /* 0x0080 AFE_PMSN_MCTRL0 */
+ u32 afe_pmsn_master_control0;
+ /* 0x0084 AFE_PMSN_MCTRL1 */
+ u32 afe_pmsn_master_control1;
+ /* 0x0088 AFE_PMSN_MCTRL2 */
+ u32 afe_pmsn_master_control2;
+ /* 0x008C-0x00fc */
+ u32 reserved_008c_00fc[0x1D];
+ /* 0x0100 AFE_DFX_MST_CTRL0 */
+ u32 afe_dfx_master_control0;
+ /* 0x0104 AFE_DFX_MST_CTRL1 */
+ u32 afe_dfx_master_control1;
+ /* 0x0108 AFE_DFX_DCL_CTRL */
+ u32 afe_dfx_dcl_control;
+ /* 0x010c AFE_DFX_DMON_CTRL */
+ u32 afe_dfx_digital_monitor_control;
+ /* 0x0110 AFE_DFX_AMONP_CTRL */
+ u32 afe_dfx_analog_p_monitor_control;
+ /* 0x0114 AFE_DFX_AMONN_CTRL */
+ u32 afe_dfx_analog_n_monitor_control;
+ /* 0x0118 AFE_DFX_NTL_STS */
+ u32 afe_dfx_ntl_status;
+ /* 0x011c AFE_DFX_FIFO_STS0 */
+ u32 afe_dfx_fifo_status0;
+ /* 0x0120 AFE_DFX_FIFO_STS1 */
+ u32 afe_dfx_fifo_status1;
+ /* 0x0124 AFE_DFX_MPAT_CTRL */
+ u32 afe_dfx_master_pattern_control;
+ /* 0x0128 AFE_DFX_P0_CTRL */
+ u32 afe_dfx_p0_control;
+ /* 0x012c-0x01a8 AFE_DFX_P0_DRx */
+ u32 afe_dfx_p0_data[32];
+ /* 0x01ac */
+ u32 reserved_01ac;
+ /* 0x01b0-0x020c AFE_DFX_P0_IRx */
+ u32 afe_dfx_p0_instruction[24];
+ /* 0x0210 */
+ u32 reserved_0210;
+ /* 0x0214 AFE_DFX_P1_CTRL */
+ u32 afe_dfx_p1_control;
+ /* 0x0218-0x245 AFE_DFX_P1_DRx */
+ u32 afe_dfx_p1_data[16];
+ /* 0x0258-0x029c */
+ u32 reserved_0258_029c[0x12];
+ /* 0x02a0-0x02bc AFE_DFX_P1_IRx */
+ u32 afe_dfx_p1_instruction[8];
+ /* 0x02c0-0x2fc */
+ u32 reserved_02c0_02fc[0x10];
+ /* 0x0300 AFE_DFX_TX_PMSN_CTRL */
+ u32 afe_dfx_tx_pmsn_control;
+ /* 0x0304 AFE_DFX_RX_PMSN_CTRL */
+ u32 afe_dfx_rx_pmsn_control;
+ u32 reserved_0308;
+ /* 0x030c AFE_DFX_NOA_CTRL0 */
+ u32 afe_dfx_noa_control0;
+ /* 0x0310 AFE_DFX_NOA_CTRL1 */
+ u32 afe_dfx_noa_control1;
+ /* 0x0314 AFE_DFX_NOA_CTRL2 */
+ u32 afe_dfx_noa_control2;
+ /* 0x0318 AFE_DFX_NOA_CTRL3 */
+ u32 afe_dfx_noa_control3;
+ /* 0x031c AFE_DFX_NOA_CTRL4 */
+ u32 afe_dfx_noa_control4;
+ /* 0x0320 AFE_DFX_NOA_CTRL5 */
+ u32 afe_dfx_noa_control5;
+ /* 0x0324 AFE_DFX_NOA_CTRL6 */
+ u32 afe_dfx_noa_control6;
+ /* 0x0328 AFE_DFX_NOA_CTRL7 */
+ u32 afe_dfx_noa_control7;
+ /* 0x032c-0x07fc */
+ u32 reserved_032c_07fc[0x135];
+
+ /* 0x0800-0x0bfc */
+ struct scu_afe_transceiver scu_afe_xcvr[4];
+
+ /* 0x0c00-0x0ffc */
+ u32 reserved_0c00_0ffc[0x0100];
+};
+
+struct scu_protocol_engine_group_registers {
+ u32 table[0xE0];
+};
+
+
+struct scu_viit_iit {
+ u32 table[256];
+};
+
+/**
+ * Placeholder for the ZONE Partition Table information ZONING will not be
+ * included in the 1.1 release.
+ *
+ *
+ */
+struct scu_zone_partition_table {
+ u32 table[2048];
+};
+
+/**
+ * Placeholder for the CRAM register since I am not sure if we need to
+ * read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_completion_ram {
+ u32 ram[128];
+};
+
+/**
+ * Placeholder for the FBRAM registers since I am not sure if we need to
+ * read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_frame_buffer_ram {
+ u32 ram[128];
+};
+
+#define scu_scratch_ram_SIZE_IN_DWORDS 256
+
+/**
+ * Placeholder for the scratch RAM registers.
+ *
+ *
+ */
+struct scu_scratch_ram {
+ u32 ram[scu_scratch_ram_SIZE_IN_DWORDS];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_protocol_engine_partition {
+ u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_hub_partition {
+ u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_host_interface_partition {
+ u32 reserved[64];
+};
+
+/**
+ * struct transport_link_layer_pair - The SCU Hardware pairs up the TL
+ * registers with the LL registers so we must place them adjcent to make the
+ * array of registers in the PEG.
+ *
+ *
+ */
+struct transport_link_layer_pair {
+ struct scu_transport_layer_registers tl;
+ struct scu_link_layer_registers ll;
+};
+
+/**
+ * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space.
+ * These registers are unique to each protocol engine group. There can be
+ * at most two PEG for a single SCU part.
+ *
+ *
+ */
+struct scu_peg_registers {
+ struct transport_link_layer_pair pe[4];
+ struct scu_port_task_scheduler_group_registers ptsg;
+ struct scu_protocol_engine_group_registers peg;
+ struct scu_sgpio_registers sgpio;
+ u32 reserved_01500_1BFF[0x1C0];
+ struct scu_viit_entry viit[64];
+ struct scu_zone_partition_table zpt0;
+ struct scu_zone_partition_table zpt1;
+};
+
+/**
+ * struct scu_registers - SCU regsiters including both PEG registers if we turn
+ * on that compile option. All of these registers are in the memory mapped
+ * space returned from BAR1.
+ *
+ *
+ */
+struct scu_registers {
+ /* 0x0000 - PEG 0 */
+ struct scu_peg_registers peg0;
+
+ /* 0x6000 - SDMA and Miscellaneous */
+ struct scu_sdma_registers sdma;
+ struct scu_completion_ram cram;
+ struct scu_frame_buffer_ram fbram;
+ u32 reserved_6800_69FF[0x80];
+ struct noa_protocol_engine_partition noa_pe;
+ struct noa_hub_partition noa_hub;
+ struct noa_host_interface_partition noa_if;
+ u32 reserved_6d00_7fff[0x4c0];
+
+ /* 0x8000 - PEG 1 */
+ struct scu_peg_registers peg1;
+
+ /* 0xE000 - AFE Registers */
+ struct scu_afe_registers afe;
+
+ /* 0xF000 - reserved */
+ u32 reserved_f000_211fff[0x80c00];
+
+ /* 0x212000 - scratch RAM */
+ struct scu_scratch_ram scratch_ram;
+};
+
+#endif /* _SCU_REGISTERS_HEADER_ */
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
new file mode 100644
index 00000000000..b6e6368c266
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.c
@@ -0,0 +1,1501 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <scsi/sas.h>
+#include "isci.h"
+#include "port.h"
+#include "remote_device.h"
+#include "request.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "task.h"
+
+/**
+ * isci_remote_device_not_ready() - This function is called by the ihost when
+ * the remote device is not ready. We mark the isci device as ready (not
+ * "ready_for_io") and signal the waiting proccess.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device
+ *
+ * sci_lock is held on entrance to this function.
+ */
+static void isci_remote_device_not_ready(struct isci_host *ihost,
+ struct isci_remote_device *idev, u32 reason)
+{
+ struct isci_request *ireq;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ switch (reason) {
+ case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
+ set_bit(IDEV_GONE, &idev->flags);
+ break;
+ case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
+ set_bit(IDEV_IO_NCQERROR, &idev->flags);
+
+ /* Kill all outstanding requests for the device. */
+ list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p request = %p\n",
+ __func__, idev, ireq);
+
+ sci_controller_terminate_request(ihost,
+ idev,
+ ireq);
+ }
+ /* Fall through into the default case... */
+ default:
+ clear_bit(IDEV_IO_READY, &idev->flags);
+ break;
+ }
+}
+
+/**
+ * isci_remote_device_ready() - This function is called by the ihost when the
+ * remote device is ready. We mark the isci device as ready and signal the
+ * waiting proccess.
+ * @ihost: our valid isci_host
+ * @idev: remote device
+ *
+ */
+static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p\n", __func__, idev);
+
+ clear_bit(IDEV_IO_NCQERROR, &idev->flags);
+ set_bit(IDEV_IO_READY, &idev->flags);
+ if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
+ wake_up(&ihost->eventq);
+}
+
+/* called once the remote node context is ready to be freed.
+ * The remote device can now report that its stop operation is complete. none
+ */
+static void rnc_destruct_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+
+ BUG_ON(idev->started_request_count != 0);
+ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
+{
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ enum sci_status status = SCI_SUCCESS;
+ u32 i;
+
+ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+ struct isci_request *ireq = ihost->reqs[i];
+ enum sci_status s;
+
+ if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
+ ireq->target_device != idev)
+ continue;
+
+ s = sci_controller_terminate_request(ihost, idev, ireq);
+ if (s != SCI_SUCCESS)
+ status = s;
+ }
+
+ return status;
+}
+
+enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
+ u32 timeout)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_STOPPED:
+ return SCI_SUCCESS;
+ case SCI_DEV_STARTING:
+ /* device not started so there had better be no requests */
+ BUG_ON(idev->started_request_count != 0);
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done, idev);
+ /* Transition to the stopping state and wait for the
+ * remote node to complete being posted and invalidated.
+ */
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ return SCI_SUCCESS;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ if (idev->started_request_count == 0) {
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done, idev);
+ return SCI_SUCCESS;
+ } else
+ return sci_remote_device_terminate_requests(idev);
+ break;
+ case SCI_DEV_STOPPING:
+ /* All requests should have been terminated, but if there is an
+ * attempt to stop a device already in the stopping state, then
+ * try again to terminate.
+ */
+ return sci_remote_device_terminate_requests(idev);
+ case SCI_DEV_RESETTING:
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ return SCI_SUCCESS;
+ }
+}
+
+enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ sci_change_state(sm, SCI_DEV_RESETTING);
+ return SCI_SUCCESS;
+ }
+}
+
+enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ if (state != SCI_DEV_RESETTING) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(sm, SCI_DEV_READY);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+ u32 suspend_type)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ if (state != SCI_STP_DEV_CMD) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ return sci_remote_node_context_suspend(&idev->rnc,
+ suspend_type, NULL, NULL);
+}
+
+enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
+ u32 frame_index)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_IDLE:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ /* Return the frame back to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING: {
+ struct isci_request *ireq;
+ struct ssp_frame_hdr hdr;
+ void *frame_header;
+ ssize_t word_cnt;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ word_cnt = sizeof(hdr) / sizeof(u32);
+ sci_swab32_cpy(&hdr, frame_header, word_cnt);
+
+ ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
+ if (ireq && ireq->target_device == idev) {
+ /* The IO request is now in charge of releasing the frame */
+ status = sci_io_request_frame_handler(ireq, frame_index);
+ } else {
+ /* We could not map this tag to a valid IO
+ * request Just toss the frame and continue
+ */
+ sci_controller_release_frame(ihost, frame_index);
+ }
+ break;
+ }
+ case SCI_STP_DEV_NCQ: {
+ struct dev_to_host_fis *hdr;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&hdr);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (hdr->fis_type == FIS_SETDEVBITS &&
+ (hdr->status & ATA_ERR)) {
+ idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+
+ /* TODO Check sactive and complete associated IO if any. */
+ sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
+ } else if (hdr->fis_type == FIS_REGD2H &&
+ (hdr->status & ATA_ERR)) {
+ /*
+ * Some devices return D2H FIS when an NCQ error is detected.
+ * Treat this like an SDB error FIS ready reason.
+ */
+ idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+ sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
+ } else
+ status = SCI_FAILURE;
+
+ sci_controller_release_frame(ihost, frame_index);
+ break;
+ }
+ case SCI_STP_DEV_CMD:
+ case SCI_SMP_DEV_CMD:
+ /* The device does not process any UF received from the hardware while
+ * in this state. All unsolicited frames are forwarded to the io request
+ * object.
+ */
+ status = sci_io_request_frame_handler(idev->working_request, frame_index);
+ break;
+ }
+
+ return status;
+}
+
+static bool is_remote_device_ready(struct isci_remote_device *idev)
+{
+
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
+ u32 event_code)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ enum sci_status status;
+
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_OPS_MISC:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
+ break;
+ case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+ if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
+ status = SCI_SUCCESS;
+
+ /* Suspend the associated RNC */
+ sci_remote_node_context_suspend(&idev->rnc,
+ SCI_SOFTWARE_SUSPENSION,
+ NULL, NULL);
+
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: device: %p event code: %x: %s\n",
+ __func__, idev, event_code,
+ is_remote_device_ready(idev)
+ ? "I_T_Nexus_Timeout event"
+ : "I_T_Nexus_Timeout event in wrong state");
+
+ break;
+ }
+ /* Else, fall through and treat as unhandled... */
+ default:
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: device: %p event code: %x: %s\n",
+ __func__, idev, event_code,
+ is_remote_device_ready(idev)
+ ? "unexpected event"
+ : "unexpected event in wrong state");
+ status = SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (state == SCI_STP_DEV_IDLE) {
+
+ /* We pick up suspension events to handle specifically to this
+ * state. We resume the RNC right away.
+ */
+ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
+ scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
+ status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+ }
+
+ return status;
+}
+
+static void sci_remote_device_start_request(struct isci_remote_device *idev,
+ struct isci_request *ireq,
+ enum sci_status status)
+{
+ struct isci_port *iport = idev->owning_port;
+
+ /* cleanup requests that failed after starting on the port */
+ if (status != SCI_SUCCESS)
+ sci_port_complete_io(iport, idev, ireq);
+ else {
+ kref_get(&idev->kref);
+ idev->started_request_count++;
+ }
+}
+
+enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ /* attempt to start an io request for this device object. The remote
+ * device object will issue the start request for the io and if
+ * successful it will start the request for the port object then
+ * increment its own request count.
+ */
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ break;
+ case SCI_STP_DEV_IDLE: {
+ /* handle the start io operation for a sata device that is in
+ * the command idle state. - Evalute the type of IO request to
+ * be started - If its an NCQ request change to NCQ substate -
+ * If its any other command change to the CMD substate
+ *
+ * If this is a softreset we may want to have a different
+ * substate.
+ */
+ enum sci_remote_device_states new_state;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (task->ata_task.use_ncq)
+ new_state = SCI_STP_DEV_NCQ;
+ else {
+ idev->working_request = ireq;
+ new_state = SCI_STP_DEV_CMD;
+ }
+ sci_change_state(sm, new_state);
+ break;
+ }
+ case SCI_STP_DEV_NCQ: {
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ if (task->ata_task.use_ncq) {
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ } else
+ return SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+ case SCI_STP_DEV_AWAIT_RESET:
+ return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ case SCI_SMP_DEV_IDLE:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ idev->working_request = ireq;
+ sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
+ break;
+ case SCI_STP_DEV_CMD:
+ case SCI_SMP_DEV_CMD:
+ /* device is already handling a command it can not accept new commands
+ * until this one is complete.
+ */
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_remote_device_start_request(idev, ireq, status);
+ return status;
+}
+
+static enum sci_status common_complete_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ status = sci_request_complete(ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_port_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_remote_device_decrement_request_count(idev);
+ return status;
+}
+
+enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_IDLE:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_DEV_RESETTING:
+ status = common_complete_io(iport, idev, ireq);
+ break;
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ /* This request causes hardware error, device needs to be Lun Reset.
+ * So here we force the state machine to IDLE state so the rest IOs
+ * can reach RNC state handler, these IOs will be completed by RNC with
+ * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
+ */
+ sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
+ } else if (idev->started_request_count == 0)
+ sci_change_state(sm, SCI_STP_DEV_IDLE);
+ break;
+ case SCI_SMP_DEV_CMD:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+ sci_change_state(sm, SCI_SMP_DEV_IDLE);
+ break;
+ case SCI_DEV_STOPPING:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (idev->started_request_count == 0)
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done,
+ idev);
+ break;
+ }
+
+ if (status != SCI_SUCCESS)
+ dev_err(scirdev_to_dev(idev),
+ "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
+ "could not complete\n", __func__, iport,
+ idev, ireq, status);
+ else
+ isci_put_device(idev);
+
+ return status;
+}
+
+static void sci_remote_device_continue_request(void *dev)
+{
+ struct isci_remote_device *idev = dev;
+
+ /* we need to check if this request is still valid to continue. */
+ if (idev->working_request)
+ sci_controller_continue_io(idev->working_request);
+}
+
+enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ goto out;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ goto out;
+
+ /* Note: If the remote device state is not IDLE this will
+ * replace the request that probably resulted in the task
+ * management request.
+ */
+ idev->working_request = ireq;
+ sci_change_state(sm, SCI_STP_DEV_CMD);
+
+ /* The remote node context must cleanup the TCi to NCQ mapping
+ * table. The only way to do this correctly is to either write
+ * to the TLCR register or to invalidate and repost the RNC. In
+ * either case the remote node context state machine will take
+ * the correct action when the remote node context is suspended
+ * and later resumed.
+ */
+ sci_remote_node_context_suspend(&idev->rnc,
+ SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+ sci_remote_node_context_resume(&idev->rnc,
+ sci_remote_device_continue_request,
+ idev);
+
+ out:
+ sci_remote_device_start_request(idev, ireq, status);
+ /* We need to let the controller start request handler know that
+ * it can't post TC yet. We will provide a callback function to
+ * post TC when RNC gets resumed.
+ */
+ return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
+ case SCI_DEV_READY:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ break;
+ }
+ sci_remote_device_start_request(idev, ireq, status);
+
+ return status;
+}
+
+void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
+{
+ struct isci_port *iport = idev->owning_port;
+ u32 context;
+
+ context = request |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ idev->rnc.remote_node_index;
+
+ sci_controller_post_request(iport->owning_controller, context);
+}
+
+/* called once the remote node context has transisitioned to a
+ * ready state. This is the indication that the remote device object can also
+ * transition to ready.
+ */
+static void remote_device_resume_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+
+ if (is_remote_device_ready(idev))
+ return;
+
+ /* go 'ready' if we are not already in a ready state */
+ sci_change_state(&idev->sm, SCI_DEV_READY);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ /* For NCQ operation we do not issue a isci_remote_device_not_ready().
+ * As a result, avoid sending the ready notification.
+ */
+ if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ /* Initial state is a transitional state to the stopped state */
+ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+/**
+ * sci_remote_device_destruct() - free remote node context and destruct
+ * @remote_device: This parameter specifies the remote device to be destructed.
+ *
+ * Remote device objects are a limited resource. As such, they must be
+ * protected. Thus calls to construct and destruct are mutually exclusive and
+ * non-reentrant. The return value shall indicate if the device was
+ * successfully destructed or if some failure occurred. enum sci_status This value
+ * is returned if the device is successfully destructed.
+ * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
+ * device isn't valid (e.g. it's already been destoryed, the handle isn't
+ * valid, etc.).
+ */
+static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_host *ihost;
+
+ if (state != SCI_DEV_STOPPED) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ ihost = idev->owning_port->owning_controller;
+ sci_controller_free_remote_node_context(ihost, idev,
+ idev->rnc.remote_node_index);
+ idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+ sci_change_state(sm, SCI_DEV_FINAL);
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
+ * @ihost: This parameter specifies the isci host object.
+ * @idev: This parameter specifies the remote device to be freed.
+ *
+ */
+static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ /* There should not be any outstanding io's. All paths to
+ * here should go through isci_remote_device_nuke_requests.
+ * If we hit this condition, we will need a way to complete
+ * io requests in process */
+ BUG_ON(!list_empty(&idev->reqs_in_process));
+
+ sci_remote_device_destruct(idev);
+ list_del_init(&idev->node);
+ isci_put_device(idev);
+}
+
+static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ u32 prev_state;
+
+ /* If we are entering from the stopping state let the SCI User know that
+ * the stop operation has completed.
+ */
+ prev_state = idev->sm.previous_state_id;
+ if (prev_state == SCI_DEV_STOPPING)
+ isci_remote_device_deconstruct(ihost, idev);
+
+ sci_controller_remote_device_stopped(ihost, idev);
+}
+
+static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
+}
+
+static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ struct domain_device *dev = idev->domain_dev;
+
+ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+ sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
+ } else if (dev_is_expander(dev)) {
+ sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
+ } else
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct domain_device *dev = idev->domain_dev;
+
+ if (dev->dev_type == SAS_END_DEV) {
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
+ }
+}
+
+static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ sci_remote_node_context_suspend(
+ &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+}
+
+static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ idev->working_request = NULL;
+ if (sci_remote_node_context_is_ready(&idev->rnc)) {
+ /*
+ * Since the RNC is ready, it's alright to finish completion
+ * processing (e.g. signal the remote device is ready). */
+ sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
+ } else {
+ sci_remote_node_context_resume(&idev->rnc,
+ sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
+ idev);
+ }
+}
+
+static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ BUG_ON(idev->working_request == NULL);
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
+}
+
+static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
+ isci_remote_device_not_ready(ihost, idev,
+ idev->not_ready_reason);
+}
+
+static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ BUG_ON(idev->working_request == NULL);
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ idev->working_request = NULL;
+}
+
+static const struct sci_base_state sci_remote_device_state_table[] = {
+ [SCI_DEV_INITIAL] = {
+ .enter_state = sci_remote_device_initial_state_enter,
+ },
+ [SCI_DEV_STOPPED] = {
+ .enter_state = sci_remote_device_stopped_state_enter,
+ },
+ [SCI_DEV_STARTING] = {
+ .enter_state = sci_remote_device_starting_state_enter,
+ },
+ [SCI_DEV_READY] = {
+ .enter_state = sci_remote_device_ready_state_enter,
+ .exit_state = sci_remote_device_ready_state_exit
+ },
+ [SCI_STP_DEV_IDLE] = {
+ .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
+ },
+ [SCI_STP_DEV_CMD] = {
+ .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
+ },
+ [SCI_STP_DEV_NCQ] = { },
+ [SCI_STP_DEV_NCQ_ERROR] = {
+ .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
+ },
+ [SCI_STP_DEV_AWAIT_RESET] = { },
+ [SCI_SMP_DEV_IDLE] = {
+ .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
+ },
+ [SCI_SMP_DEV_CMD] = {
+ .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
+ .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
+ },
+ [SCI_DEV_STOPPING] = { },
+ [SCI_DEV_FAILED] = { },
+ [SCI_DEV_RESETTING] = {
+ .enter_state = sci_remote_device_resetting_state_enter,
+ .exit_state = sci_remote_device_resetting_state_exit
+ },
+ [SCI_DEV_FINAL] = { },
+};
+
+/**
+ * sci_remote_device_construct() - common construction
+ * @sci_port: SAS/SATA port through which this device is accessed.
+ * @sci_dev: remote device to construct
+ *
+ * This routine just performs benign initialization and does not
+ * allocate the remote_node_context which is left to
+ * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
+ * frees the remote_node_context(s) for the device.
+ */
+static void sci_remote_device_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ idev->owning_port = iport;
+ idev->started_request_count = 0;
+
+ sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
+
+ sci_remote_node_context_construct(&idev->rnc,
+ SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+/**
+ * sci_remote_device_da_construct() - construct direct attached device.
+ *
+ * The information (e.g. IAF, Signature FIS, etc.) necessary to build
+ * the device is known to the SCI Core since it is contained in the
+ * sci_phy object. Remote node context(s) is/are a global resource
+ * allocated by this routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ enum sci_status status;
+ struct domain_device *dev = idev->domain_dev;
+
+ sci_remote_device_construct(iport, idev);
+
+ /*
+ * This information is request to determine how many remote node context
+ * entries will be needed to store the remote node.
+ */
+ idev->is_direct_attached = true;
+ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+ idev,
+ &idev->rnc.remote_node_index);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
+ (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
+ /* pass */;
+ else
+ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ idev->connection_rate = sci_port_get_max_allowed_speed(iport);
+
+ /* / @todo Should I assign the port width by reading all of the phys on the port? */
+ idev->device_port_width = 1;
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_ea_construct() - construct expander attached device
+ *
+ * Remote node context(s) is/are a global resource allocated by this
+ * routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status;
+
+ sci_remote_device_construct(iport, idev);
+
+ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+ idev,
+ &idev->rnc.remote_node_index);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
+ (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
+ /* pass */;
+ else
+ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ /*
+ * For SAS-2 the physical link rate is actually a logical link
+ * rate that incorporates multiplexing. The SCU doesn't
+ * incorporate multiplexing and for the purposes of the
+ * connection the logical link rate is that same as the
+ * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
+ * one another, so this code works for both situations. */
+ idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
+ dev->linkrate);
+
+ /* / @todo Should I assign the port width by reading all of the phys on the port? */
+ idev->device_port_width = 1;
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_start() - This method will start the supplied remote
+ * device. This method enables normal IO requests to flow through to the
+ * remote device.
+ * @remote_device: This parameter specifies the device to be started.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * start operation should complete.
+ *
+ * An indication of whether the device was successfully started. SCI_SUCCESS
+ * This value is returned if the device was successfully started.
+ * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
+ * the device when there have been no phys added to it.
+ */
+static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
+ u32 timeout)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ enum sci_status status;
+
+ if (state != SCI_DEV_STOPPED) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_node_context_resume(&idev->rnc,
+ remote_device_resume_done,
+ idev);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_change_state(sm, SCI_DEV_STARTING);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status isci_remote_device_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ struct isci_host *ihost = iport->isci_host;
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status;
+
+ if (dev->parent && dev_is_expander(dev->parent))
+ status = sci_remote_device_ea_construct(iport, idev);
+ else
+ status = sci_remote_device_da_construct(iport, idev);
+
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
+ __func__, status);
+
+ return status;
+ }
+
+ /* start the device. */
+ status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
+
+ if (status != SCI_SUCCESS)
+ dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
+ status);
+
+ return status;
+}
+
+void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p\n", __func__, idev);
+
+ /* Cleanup all requests pending for this device. */
+ isci_terminate_pending_requests(ihost, idev);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p, done\n", __func__, idev);
+}
+
+/**
+ * This function builds the isci_remote_device when a libsas dev_found message
+ * is received.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the isci_port conected to this device.
+ *
+ * pointer to new isci_remote_device.
+ */
+static struct isci_remote_device *
+isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
+{
+ struct isci_remote_device *idev;
+ int i;
+
+ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+ idev = &ihost->devices[i];
+ if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
+ break;
+ }
+
+ if (i >= SCI_MAX_REMOTE_DEVICES) {
+ dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
+ return NULL;
+ }
+
+ if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
+ return NULL;
+
+ if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
+ return NULL;
+
+ return idev;
+}
+
+void isci_remote_device_release(struct kref *kref)
+{
+ struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
+ struct isci_host *ihost = idev->isci_port->isci_host;
+
+ idev->domain_dev = NULL;
+ idev->isci_port = NULL;
+ clear_bit(IDEV_START_PENDING, &idev->flags);
+ clear_bit(IDEV_STOP_PENDING, &idev->flags);
+ clear_bit(IDEV_IO_READY, &idev->flags);
+ clear_bit(IDEV_GONE, &idev->flags);
+ clear_bit(IDEV_EH, &idev->flags);
+ smp_mb__before_clear_bit();
+ clear_bit(IDEV_ALLOCATED, &idev->flags);
+ wake_up(&ihost->eventq);
+}
+
+/**
+ * isci_remote_device_stop() - This function is called internally to stop the
+ * remote device.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device.
+ *
+ * The status of the ihost request to stop.
+ */
+enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ enum sci_status status;
+ unsigned long flags;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
+ set_bit(IDEV_GONE, &idev->flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Kill all outstanding requests. */
+ isci_remote_device_nuke_requests(ihost, idev);
+
+ set_bit(IDEV_STOP_PENDING, &idev->flags);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_stop(idev, 50);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Wait for the stop complete callback. */
+ if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
+ /* nothing to wait for */;
+ else
+ wait_for_device_stop(ihost, idev);
+
+ return status;
+}
+
+/**
+ * isci_remote_device_gone() - This function is called by libsas when a domain
+ * device is removed.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ */
+void isci_remote_device_gone(struct domain_device *dev)
+{
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev = dev->lldd_dev;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
+ __func__, dev, idev, idev->isci_port);
+
+ isci_remote_device_stop(ihost, idev);
+}
+
+
+/**
+ * isci_remote_device_found() - This function is called by libsas when a remote
+ * device is discovered. A remote device object is created and started. the
+ * function then sleeps until the sci core device started message is
+ * received.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ * status, zero indicates success.
+ */
+int isci_remote_device_found(struct domain_device *domain_dev)
+{
+ struct isci_host *isci_host = dev_to_ihost(domain_dev);
+ struct isci_port *isci_port;
+ struct isci_phy *isci_phy;
+ struct asd_sas_port *sas_port;
+ struct asd_sas_phy *sas_phy;
+ struct isci_remote_device *isci_device;
+ enum sci_status status;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: domain_device = %p\n", __func__, domain_dev);
+
+ wait_for_start(isci_host);
+
+ sas_port = domain_dev->port;
+ sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
+ port_phy_el);
+ isci_phy = to_iphy(sas_phy);
+ isci_port = isci_phy->isci_port;
+
+ /* we are being called for a device on this port,
+ * so it has to come up eventually
+ */
+ wait_for_completion(&isci_port->start_complete);
+
+ if ((isci_stopping == isci_port_get_state(isci_port)) ||
+ (isci_stopped == isci_port_get_state(isci_port)))
+ return -ENODEV;
+
+ isci_device = isci_remote_device_alloc(isci_host, isci_port);
+ if (!isci_device)
+ return -ENODEV;
+
+ kref_init(&isci_device->kref);
+ INIT_LIST_HEAD(&isci_device->node);
+
+ spin_lock_irq(&isci_host->scic_lock);
+ isci_device->domain_dev = domain_dev;
+ isci_device->isci_port = isci_port;
+ list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
+
+ set_bit(IDEV_START_PENDING, &isci_device->flags);
+ status = isci_remote_device_construct(isci_port, isci_device);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p\n",
+ __func__, isci_device);
+
+ if (status == SCI_SUCCESS) {
+ /* device came up, advertise it to the world */
+ domain_dev->lldd_dev = isci_device;
+ } else
+ isci_put_device(isci_device);
+ spin_unlock_irq(&isci_host->scic_lock);
+
+ /* wait for the device ready callback. */
+ wait_for_device_start(isci_host, isci_device);
+
+ return status == SCI_SUCCESS ? 0 : -ENODEV;
+}
+/**
+ * isci_device_is_reset_pending() - This function will check if there is any
+ * pending reset condition on the device.
+ * @request: This parameter is the isci_device object.
+ *
+ * true if there is a reset pending for the device.
+ */
+bool isci_device_is_reset_pending(
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device)
+{
+ struct isci_request *isci_request;
+ struct isci_request *tmp_req;
+ bool reset_is_pending = false;
+ unsigned long flags;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p\n", __func__, isci_device);
+
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+
+ /* Check for reset on all pending requests. */
+ list_for_each_entry_safe(isci_request, tmp_req,
+ &isci_device->reqs_in_process, dev_node) {
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p request = %p\n",
+ __func__, isci_device, isci_request);
+
+ if (isci_request->ttype == io_task) {
+ struct sas_task *task = isci_request_access_task(
+ isci_request);
+
+ spin_lock(&task->task_state_lock);
+ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+ reset_is_pending = true;
+ spin_unlock(&task->task_state_lock);
+ }
+ }
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p reset_is_pending = %d\n",
+ __func__, isci_device, reset_is_pending);
+
+ return reset_is_pending;
+}
+
+/**
+ * isci_device_clear_reset_pending() - This function will clear if any pending
+ * reset condition flags on the device.
+ * @request: This parameter is the isci_device object.
+ *
+ * true if there is a reset pending for the device.
+ */
+void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ struct isci_request *isci_request;
+ struct isci_request *tmp_req;
+ unsigned long flags = 0;
+
+ dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
+ __func__, idev, ihost);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ /* Clear reset pending on all pending requests. */
+ list_for_each_entry_safe(isci_request, tmp_req,
+ &idev->reqs_in_process, dev_node) {
+ dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
+ __func__, idev, isci_request);
+
+ if (isci_request->ttype == io_task) {
+
+ unsigned long flags2;
+ struct sas_task *task = isci_request_access_task(
+ isci_request);
+
+ spin_lock_irqsave(&task->task_state_lock, flags2);
+ task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, flags2);
+ }
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
new file mode 100644
index 00000000000..57ccfc3d6ad
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.h
@@ -0,0 +1,352 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REMOTE_DEVICE_H_
+#define _ISCI_REMOTE_DEVICE_H_
+#include <scsi/libsas.h>
+#include <linux/kref.h>
+#include "scu_remote_node_context.h"
+#include "remote_node_context.h"
+#include "port.h"
+
+enum sci_remote_device_not_ready_reason_code {
+ SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX
+};
+
+/**
+ * isci_remote_device - isci representation of a sas expander / end point
+ * @device_port_width: hw setting for number of simultaneous connections
+ * @connection_rate: per-taskcontext connection rate for this device
+ * @working_request: SATA requests have no tag we for unaccelerated
+ * protocols we need a method to associate unsolicited
+ * frames with a pending request
+ */
+struct isci_remote_device {
+ #define IDEV_START_PENDING 0
+ #define IDEV_STOP_PENDING 1
+ #define IDEV_ALLOCATED 2
+ #define IDEV_EH 3
+ #define IDEV_GONE 4
+ #define IDEV_IO_READY 5
+ #define IDEV_IO_NCQERROR 6
+ unsigned long flags;
+ struct kref kref;
+ struct isci_port *isci_port;
+ struct domain_device *domain_dev;
+ struct list_head node;
+ struct list_head reqs_in_process;
+ struct sci_base_state_machine sm;
+ u32 device_port_width;
+ enum sas_linkrate connection_rate;
+ bool is_direct_attached;
+ struct isci_port *owning_port;
+ struct sci_remote_node_context rnc;
+ /* XXX unify with device reference counting and delete */
+ u32 started_request_count;
+ struct isci_request *working_request;
+ u32 not_ready_reason;
+};
+
+#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
+
+/* device reference routines must be called under sci_lock */
+static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
+{
+ struct isci_remote_device *idev = dev->lldd_dev;
+
+ if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
+ kref_get(&idev->kref);
+ return idev;
+ }
+
+ return NULL;
+}
+
+void isci_remote_device_release(struct kref *kref);
+static inline void isci_put_device(struct isci_remote_device *idev)
+{
+ if (idev)
+ kref_put(&idev->kref, isci_remote_device_release);
+}
+
+enum sci_status isci_remote_device_stop(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+void isci_remote_device_nuke_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+void isci_remote_device_gone(struct domain_device *domain_dev);
+int isci_remote_device_found(struct domain_device *domain_dev);
+bool isci_device_is_reset_pending(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+void isci_device_clear_reset_pending(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+/**
+ * sci_remote_device_stop() - This method will stop both transmission and
+ * reception of link activity for the supplied remote device. This method
+ * disables normal IO requests from flowing through to the remote device.
+ * @remote_device: This parameter specifies the device to be stopped.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * stop operation should complete.
+ *
+ * An indication of whether the device was successfully stopped. SCI_SUCCESS
+ * This value is returned if the transmission and reception for the device was
+ * successfully stopped.
+ */
+enum sci_status sci_remote_device_stop(
+ struct isci_remote_device *idev,
+ u32 timeout);
+
+/**
+ * sci_remote_device_reset() - This method will reset the device making it
+ * ready for operation. This method must be called anytime the device is
+ * reset either through a SMP phy control or a port hard reset request.
+ * @remote_device: This parameter specifies the device to be reset.
+ *
+ * This method does not actually cause the device hardware to be reset. This
+ * method resets the software object so that it will be operational after a
+ * device hardware reset completes. An indication of whether the device reset
+ * was accepted. SCI_SUCCESS This value is returned if the device reset is
+ * started.
+ */
+enum sci_status sci_remote_device_reset(
+ struct isci_remote_device *idev);
+
+/**
+ * sci_remote_device_reset_complete() - This method informs the device object
+ * that the reset operation is complete and the device can resume operation
+ * again.
+ * @remote_device: This parameter specifies the device which is to be informed
+ * of the reset complete operation.
+ *
+ * An indication that the device is resuming operation. SCI_SUCCESS the device
+ * is resuming operation.
+ */
+enum sci_status sci_remote_device_reset_complete(
+ struct isci_remote_device *idev);
+
+/**
+ * enum sci_remote_device_states - This enumeration depicts all the states
+ * for the common remote device state machine.
+ *
+ *
+ */
+enum sci_remote_device_states {
+ /**
+ * Simply the initial state for the base remote device state machine.
+ */
+ SCI_DEV_INITIAL,
+
+ /**
+ * This state indicates that the remote device has successfully been
+ * stopped. In this state no new IO operations are permitted.
+ * This state is entered from the INITIAL state.
+ * This state is entered from the STOPPING state.
+ */
+ SCI_DEV_STOPPED,
+
+ /**
+ * This state indicates the the remote device is in the process of
+ * becoming ready (i.e. starting). In this state no new IO operations
+ * are permitted.
+ * This state is entered from the STOPPED state.
+ */
+ SCI_DEV_STARTING,
+
+ /**
+ * This state indicates the remote device is now ready. Thus, the user
+ * is able to perform IO operations on the remote device.
+ * This state is entered from the STARTING state.
+ */
+ SCI_DEV_READY,
+
+ /**
+ * This is the idle substate for the stp remote device. When there are no
+ * active IO for the device it is is in this state.
+ */
+ SCI_STP_DEV_IDLE,
+
+ /**
+ * This is the command state for for the STP remote device. This state is
+ * entered when the device is processing a non-NCQ command. The device object
+ * will fail any new start IO requests until this command is complete.
+ */
+ SCI_STP_DEV_CMD,
+
+ /**
+ * This is the NCQ state for the STP remote device. This state is entered
+ * when the device is processing an NCQ reuqest. It will remain in this state
+ * so long as there is one or more NCQ requests being processed.
+ */
+ SCI_STP_DEV_NCQ,
+
+ /**
+ * This is the NCQ error state for the STP remote device. This state is
+ * entered when an SDB error FIS is received by the device object while in the
+ * NCQ state. The device object will only accept a READ LOG command while in
+ * this state.
+ */
+ SCI_STP_DEV_NCQ_ERROR,
+
+ /**
+ * This is the READY substate indicates the device is waiting for the RESET task
+ * coming to be recovered from certain hardware specific error.
+ */
+ SCI_STP_DEV_AWAIT_RESET,
+
+ /**
+ * This is the ready operational substate for the remote device. This is the
+ * normal operational state for a remote device.
+ */
+ SCI_SMP_DEV_IDLE,
+
+ /**
+ * This is the suspended state for the remote device. This is the state that
+ * the device is placed in when a RNC suspend is received by the SCU hardware.
+ */
+ SCI_SMP_DEV_CMD,
+
+ /**
+ * This state indicates that the remote device is in the process of
+ * stopping. In this state no new IO operations are permitted, but
+ * existing IO operations are allowed to complete.
+ * This state is entered from the READY state.
+ * This state is entered from the FAILED state.
+ */
+ SCI_DEV_STOPPING,
+
+ /**
+ * This state indicates that the remote device has failed.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZING state.
+ * This state is entered from the READY state.
+ */
+ SCI_DEV_FAILED,
+
+ /**
+ * This state indicates the device is being reset.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the READY state.
+ */
+ SCI_DEV_RESETTING,
+
+ /**
+ * Simply the final state for the base remote device state machine.
+ */
+ SCI_DEV_FINAL,
+};
+
+static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
+{
+ struct isci_remote_device *idev;
+
+ idev = container_of(rnc, typeof(*idev), rnc);
+
+ return idev;
+}
+
+static inline bool dev_is_expander(struct domain_device *dev)
+{
+ return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
+}
+
+static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
+{
+ /* XXX delete this voodoo when converting to the top-level device
+ * reference count
+ */
+ if (WARN_ONCE(idev->started_request_count == 0,
+ "%s: tried to decrement started_request_count past 0!?",
+ __func__))
+ /* pass */;
+ else
+ idev->started_request_count--;
+}
+
+enum sci_status sci_remote_device_frame_handler(
+ struct isci_remote_device *idev,
+ u32 frame_index);
+
+enum sci_status sci_remote_device_event_handler(
+ struct isci_remote_device *idev,
+ u32 event_code);
+
+enum sci_status sci_remote_device_start_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_remote_device_start_task(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_remote_device_complete_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_remote_device_suspend(
+ struct isci_remote_device *idev,
+ u32 suspend_type);
+
+void sci_remote_device_post_request(
+ struct isci_remote_device *idev,
+ u32 request);
+
+#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
new file mode 100644
index 00000000000..748e8339d1e
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -0,0 +1,627 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "isci.h"
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "scu_task_context.h"
+
+
+/**
+ *
+ * @sci_rnc: The RNC for which the is posted request is being made.
+ *
+ * This method will return true if the RNC is not in the initial state. In all
+ * other states the RNC is considered active and this will return true. The
+ * destroy request of the state machine drives the RNC back to the initial
+ * state. If the state machine changes then this routine will also have to be
+ * changed. bool true if the state machine is not in the initial state false if
+ * the state machine is in the initial state
+ */
+
+/**
+ *
+ * @sci_rnc: The state of the remote node context object to check.
+ *
+ * This method will return true if the remote node context is in a READY state
+ * otherwise it will return false bool true if the remote node context is in
+ * the ready state. false if the remote node context is not in the ready state.
+ */
+bool sci_remote_node_context_is_ready(
+ struct sci_remote_node_context *sci_rnc)
+{
+ u32 current_state = sci_rnc->sm.current_state_id;
+
+ if (current_state == SCI_RNC_READY) {
+ return true;
+ }
+
+ return false;
+}
+
+static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
+{
+ if (id < ihost->remote_node_entries &&
+ ihost->device_table[id])
+ return &ihost->remote_node_context_table[id];
+
+ return NULL;
+}
+
+static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct domain_device *dev = idev->domain_dev;
+ int rni = sci_rnc->remote_node_index;
+ union scu_remote_node_context *rnc;
+ struct isci_host *ihost;
+ __le64 sas_addr;
+
+ ihost = idev->owning_port->owning_controller;
+ rnc = sci_rnc_by_id(ihost, rni);
+
+ memset(rnc, 0, sizeof(union scu_remote_node_context)
+ * sci_remote_device_node_count(idev));
+
+ rnc->ssp.remote_node_index = rni;
+ rnc->ssp.remote_node_port_width = idev->device_port_width;
+ rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
+
+ /* sas address is __be64, context ram format is __le64 */
+ sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
+ rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
+ rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
+
+ rnc->ssp.nexus_loss_timer_enable = true;
+ rnc->ssp.check_bit = false;
+ rnc->ssp.is_valid = false;
+ rnc->ssp.is_remote_node_context = true;
+ rnc->ssp.function_number = 0;
+
+ rnc->ssp.arbitration_wait_time = 0;
+
+ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ rnc->ssp.connection_occupancy_timeout =
+ ihost->user_parameters.stp_max_occupancy_timeout;
+ rnc->ssp.connection_inactivity_timeout =
+ ihost->user_parameters.stp_inactivity_timeout;
+ } else {
+ rnc->ssp.connection_occupancy_timeout =
+ ihost->user_parameters.ssp_max_occupancy_timeout;
+ rnc->ssp.connection_inactivity_timeout =
+ ihost->user_parameters.ssp_inactivity_timeout;
+ }
+
+ rnc->ssp.initial_arbitration_wait_time = 0;
+
+ /* Open Address Frame Parameters */
+ rnc->ssp.oaf_connection_rate = idev->connection_rate;
+ rnc->ssp.oaf_features = 0;
+ rnc->ssp.oaf_source_zone_group = 0;
+ rnc->ssp.oaf_more_compatibility_features = 0;
+}
+
+/**
+ *
+ * @sci_rnc:
+ * @callback:
+ * @callback_parameter:
+ *
+ * This method will setup the remote node context object so it will transition
+ * to its ready state. If the remote node context is already setup to
+ * transition to its final state then this function does nothing. none
+ */
+static void sci_remote_node_context_setup_to_resume(
+ struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter)
+{
+ if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
+ sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
+ sci_rnc->user_callback = callback;
+ sci_rnc->user_cookie = callback_parameter;
+ }
+}
+
+static void sci_remote_node_context_setup_to_destory(
+ struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter)
+{
+ sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
+ sci_rnc->user_callback = callback;
+ sci_rnc->user_cookie = callback_parameter;
+}
+
+/**
+ *
+ *
+ * This method just calls the user callback function and then resets the
+ * callback.
+ */
+static void sci_remote_node_context_notify_user(
+ struct sci_remote_node_context *rnc)
+{
+ if (rnc->user_callback != NULL) {
+ (*rnc->user_callback)(rnc->user_cookie);
+
+ rnc->user_callback = NULL;
+ rnc->user_cookie = NULL;
+ }
+}
+
+static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
+{
+ if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+ sci_remote_node_context_resume(rnc, rnc->user_callback,
+ rnc->user_cookie);
+}
+
+static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ union scu_remote_node_context *rnc_buffer;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct domain_device *dev = idev->domain_dev;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+ rnc_buffer->ssp.is_valid = true;
+
+ if (!idev->is_direct_attached &&
+ (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
+ } else {
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
+
+ if (idev->is_direct_attached)
+ sci_port_setup_transports(idev->owning_port,
+ sci_rnc->remote_node_index);
+ }
+}
+
+static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ union scu_remote_node_context *rnc_buffer;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+ rnc_buffer->ssp.is_valid = false;
+
+ sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+ SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
+}
+
+static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ /* Check to see if we have gotten back to the initial state because
+ * someone requested to destroy the remote node context object.
+ */
+ if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
+ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+ sci_remote_node_context_notify_user(rnc);
+ }
+}
+
+static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
+
+ sci_remote_node_context_validate_context_buffer(sci_rnc);
+}
+
+static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ sci_remote_node_context_invalidate_context_buffer(rnc);
+}
+
+static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev;
+ struct domain_device *dev;
+
+ idev = rnc_to_dev(rnc);
+ dev = idev->domain_dev;
+
+ /*
+ * For direct attached SATA devices we need to clear the TLCR
+ * NCQ to TCi tag mapping on the phy and in cases where we
+ * resume because of a target reset we also need to update
+ * the STPTLDARNI register with the RNi of the device
+ */
+ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
+ idev->is_direct_attached)
+ sci_port_setup_transports(idev->owning_port,
+ rnc->remote_node_index);
+
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
+}
+
+static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+
+ if (rnc->user_callback)
+ sci_remote_node_context_notify_user(rnc);
+}
+
+static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static const struct sci_base_state sci_remote_node_context_state_table[] = {
+ [SCI_RNC_INITIAL] = {
+ .enter_state = sci_remote_node_context_initial_state_enter,
+ },
+ [SCI_RNC_POSTING] = {
+ .enter_state = sci_remote_node_context_posting_state_enter,
+ },
+ [SCI_RNC_INVALIDATING] = {
+ .enter_state = sci_remote_node_context_invalidating_state_enter,
+ },
+ [SCI_RNC_RESUMING] = {
+ .enter_state = sci_remote_node_context_resuming_state_enter,
+ },
+ [SCI_RNC_READY] = {
+ .enter_state = sci_remote_node_context_ready_state_enter,
+ },
+ [SCI_RNC_TX_SUSPENDED] = {
+ .enter_state = sci_remote_node_context_tx_suspended_state_enter,
+ },
+ [SCI_RNC_TX_RX_SUSPENDED] = {
+ .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
+ },
+ [SCI_RNC_AWAIT_SUSPENSION] = { },
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+ u16 remote_node_index)
+{
+ memset(rnc, 0, sizeof(struct sci_remote_node_context));
+
+ rnc->remote_node_index = remote_node_index;
+ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+
+ sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
+}
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+ u32 event_code)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_POSTING:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_POST_RNC_COMPLETE:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case SCI_RNC_INVALIDATING:
+ if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
+ if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
+ state = SCI_RNC_INITIAL;
+ else
+ state = SCI_RNC_POSTING;
+ sci_change_state(&sci_rnc->sm, state);
+ } else {
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ /* We really dont care if the hardware is going to suspend
+ * the device since it's being invalidated anyway */
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: SCIC Remote Node Context 0x%p was "
+ "suspeneded by hardware while being "
+ "invalidated.\n", __func__, sci_rnc);
+ break;
+ default:
+ goto out;
+ }
+ }
+ break;
+ case SCI_RNC_RESUMING:
+ if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
+ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+ } else {
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ /* We really dont care if the hardware is going to suspend
+ * the device since it's being resumed anyway */
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: SCIC Remote Node Context 0x%p was "
+ "suspeneded by hardware while being resumed.\n",
+ __func__, sci_rnc);
+ break;
+ default:
+ goto out;
+ }
+ }
+ break;
+ case SCI_RNC_READY:
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TL_RNC_SUSPEND_TX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TL_RNC_SUSPEND_TX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ return SCI_SUCCESS;
+
+ out:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: code: %#x state: %d\n", __func__, event_code, state);
+ return SCI_FAILURE;
+
+}
+
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_INVALIDATING:
+ sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ return SCI_SUCCESS;
+ case SCI_RNC_POSTING:
+ case SCI_RNC_RESUMING:
+ case SCI_RNC_READY:
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ case SCI_RNC_AWAIT_SUSPENSION:
+ sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+ return SCI_SUCCESS;
+ case SCI_RNC_INITIAL:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ /* We have decided that the destruct request on the remote node context
+ * can not fail since it is either in the initial/destroyed state or is
+ * can be destroyed.
+ */
+ return SCI_SUCCESS;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+ u32 suspend_type,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ if (state != SCI_RNC_READY) {
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_rnc->user_callback = cb_fn;
+ sci_rnc->user_cookie = cb_p;
+ sci_rnc->suspension_code = suspend_type;
+
+ if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
+ sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+ SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
+ }
+
+ sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_INITIAL:
+ if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+ return SCI_FAILURE_INVALID_STATE;
+
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_construct_buffer(sci_rnc);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+ return SCI_SUCCESS;
+ case SCI_RNC_POSTING:
+ case SCI_RNC_INVALIDATING:
+ case SCI_RNC_RESUMING:
+ if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+ return SCI_FAILURE_INVALID_STATE;
+
+ sci_rnc->user_callback = cb_fn;
+ sci_rnc->user_cookie = cb_p;
+ return SCI_SUCCESS;
+ case SCI_RNC_TX_SUSPENDED: {
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct domain_device *dev = idev->domain_dev;
+
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+
+ /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
+ if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+ else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ if (idev->is_direct_attached) {
+ /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+ } else {
+ sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+ }
+ } else
+ return SCI_FAILURE;
+ return SCI_SUCCESS;
+ }
+ case SCI_RNC_TX_RX_SUSPENDED:
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+
+ switch (state) {
+ case SCI_RNC_READY:
+ return SCI_SUCCESS;
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ case SCI_RNC_AWAIT_SUSPENSION:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ default:
+ break;
+ }
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: requested to start IO while still resuming, %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_RESUMING:
+ case SCI_RNC_READY:
+ case SCI_RNC_AWAIT_SUSPENSION:
+ return SCI_SUCCESS;
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ sci_remote_node_context_resume(sci_rnc, NULL, NULL);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
new file mode 100644
index 00000000000..41580ad1252
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -0,0 +1,224 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+
+/**
+ * This file contains the structures, constants, and prototypes associated with
+ * the remote node context in the silicon. It exists to model and manage
+ * the remote node context in the silicon.
+ *
+ *
+ */
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * This constant represents an invalid remote device id, it is used to program
+ * the STPDARNI register so the driver knows when it has received a SIGNATURE
+ * FIS from the SCU.
+ */
+#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
+
+#define SCU_HARDWARE_SUSPENSION (0)
+#define SCI_SOFTWARE_SUSPENSION (1)
+
+struct isci_request;
+struct isci_remote_device;
+struct sci_remote_node_context;
+
+typedef void (*scics_sds_remote_node_context_callback)(void *);
+
+/**
+ * This is the enumeration of the remote node context states.
+ */
+enum scis_sds_remote_node_context_states {
+ /**
+ * This state is the initial state for a remote node context. On a resume
+ * request the remote node context will transition to the posting state.
+ */
+ SCI_RNC_INITIAL,
+
+ /**
+ * This is a transition state that posts the RNi to the hardware. Once the RNC
+ * is posted the remote node context will be made ready.
+ */
+ SCI_RNC_POSTING,
+
+ /**
+ * This is a transition state that will post an RNC invalidate to the
+ * hardware. Once the invalidate is complete the remote node context will
+ * transition to the posting state.
+ */
+ SCI_RNC_INVALIDATING,
+
+ /**
+ * This is a transition state that will post an RNC resume to the hardare.
+ * Once the event notification of resume complete is received the remote node
+ * context will transition to the ready state.
+ */
+ SCI_RNC_RESUMING,
+
+ /**
+ * This is the state that the remote node context must be in to accept io
+ * request operations.
+ */
+ SCI_RNC_READY,
+
+ /**
+ * This is the state that the remote node context transitions to when it gets
+ * a TX suspend notification from the hardware.
+ */
+ SCI_RNC_TX_SUSPENDED,
+
+ /**
+ * This is the state that the remote node context transitions to when it gets
+ * a TX RX suspend notification from the hardware.
+ */
+ SCI_RNC_TX_RX_SUSPENDED,
+
+ /**
+ * This state is a wait state for the remote node context that waits for a
+ * suspend notification from the hardware. This state is entered when either
+ * there is a request to supend the remote node context or when there is a TC
+ * completion where the remote node will be suspended by the hardware.
+ */
+ SCI_RNC_AWAIT_SUSPENSION
+};
+
+/**
+ *
+ *
+ * This enumeration is used to define the end destination state for the remote
+ * node context.
+ */
+enum sci_remote_node_context_destination_state {
+ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
+ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
+ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
+};
+
+/**
+ * struct sci_remote_node_context - This structure contains the data
+ * associated with the remote node context object. The remote node context
+ * (RNC) object models the the remote device information necessary to manage
+ * the silicon RNC.
+ */
+struct sci_remote_node_context {
+ /**
+ * This field indicates the remote node index (RNI) associated with
+ * this RNC.
+ */
+ u16 remote_node_index;
+
+ /**
+ * This field is the recored suspension code or the reason for the remote node
+ * context suspension.
+ */
+ u32 suspension_code;
+
+ /**
+ * This field is true if the remote node context is resuming from its current
+ * state. This can cause an automatic resume on receiving a suspension
+ * notification.
+ */
+ enum sci_remote_node_context_destination_state destination_state;
+
+ /**
+ * This field contains the callback function that the user requested to be
+ * called when the requested state transition is complete.
+ */
+ scics_sds_remote_node_context_callback user_callback;
+
+ /**
+ * This field contains the parameter that is called when the user requested
+ * state transition is completed.
+ */
+ void *user_cookie;
+
+ /**
+ * This field contains the data for the object's state machine.
+ */
+ struct sci_base_state_machine sm;
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+ u16 remote_node_index);
+
+
+bool sci_remote_node_context_is_ready(
+ struct sci_remote_node_context *sci_rnc);
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+ u32 event_code);
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter);
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+ u32 suspend_type,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq);
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq);
+
+#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
new file mode 100644
index 00000000000..301b3141945
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.c
@@ -0,0 +1,598 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
+ * public, protected, and private methods.
+ *
+ *
+ */
+#include "remote_node_table.h"
+#include "remote_node_context.h"
+
+/**
+ *
+ * @remote_node_table: This is the remote node index table from which the
+ * selection will be made.
+ * @group_table_index: This is the index to the group table from which to
+ * search for an available selection.
+ *
+ * This routine will find the bit position in absolute bit terms of the next 32
+ * + bit position. If there are available bits in the first u32 then it is
+ * just bit position. u32 This is the absolute bit position for an available
+ * group.
+ */
+static u32 sci_remote_node_table_get_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u32 dword_index;
+ u32 *group_table;
+ u32 bit_index;
+
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
+ if (group_table[dword_index] != 0) {
+ for (bit_index = 0; bit_index < 32; bit_index++) {
+ if ((group_table[dword_index] & (1 << bit_index)) != 0) {
+ return (dword_index * 32) + bit_index;
+ }
+ }
+ }
+ }
+
+ return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to clear the
+ * selector.
+ * @set_index: This is the remote node selector in which the change will be
+ * made.
+ * @group_index: This is the bit index in the table to be modified.
+ *
+ * This method will clear the group index entry in the specified group index
+ * table. none
+ */
+static void sci_remote_node_table_clear_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index,
+ u32 group_index)
+{
+ u32 dword_index;
+ u32 bit_index;
+ u32 *group_table;
+
+ BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+ BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+ dword_index = group_index / 32;
+ bit_index = group_index % 32;
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to set the
+ * selector.
+ * @group_table_index: This is the remote node selector in which the change
+ * will be made.
+ * @group_index: This is the bit position in the table to be modified.
+ *
+ * This method will set the group index bit entry in the specified gropu index
+ * table. none
+ */
+static void sci_remote_node_table_set_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index,
+ u32 group_index)
+{
+ u32 dword_index;
+ u32 bit_index;
+ u32 *group_table;
+
+ BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+ BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+ dword_index = group_index / 32;
+ bit_index = group_index % 32;
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table in which to modify
+ * the remote node availability.
+ * @remote_node_index: This is the remote node index that is being returned to
+ * the table.
+ *
+ * This method will set the remote to available in the remote node allocation
+ * table. none
+ */
+static void sci_remote_node_table_set_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 slot_normalized;
+ u32 slot_position;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+ slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+ remote_node_table->available_remote_nodes[dword_location] |=
+ 1 << (slot_normalized + slot_position);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table from which to clear
+ * the available remote node bit.
+ * @remote_node_index: This is the remote node index which is to be cleared
+ * from the table.
+ *
+ * This method clears the remote node index from the table of available remote
+ * nodes. none
+ */
+static void sci_remote_node_table_clear_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 slot_position;
+ u32 slot_normalized;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+ slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+ remote_node_table->available_remote_nodes[dword_location] &=
+ ~(1 << (slot_normalized + slot_position));
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which the slot will be
+ * cleared.
+ * @group_index: The index for the slot that is to be cleared.
+ *
+ * This method clears the entire table slot at the specified slot index. none
+ */
+static void sci_remote_node_table_clear_group(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * THis method sets an entire remote node group in the remote node table.
+ */
+static void sci_remote_node_table_set_group(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table that for which the group
+ * value is to be returned.
+ * @group_index: This is the group index to use to find the group value.
+ *
+ * This method will return the group value for the specified group index. The
+ * bit values at the specified remote node group index.
+ */
+static u8 sci_remote_node_table_get_group_value(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ dword_value = dword_value >> (dword_remainder * 4);
+
+ return (u8)dword_value;
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote that which is to be initialized.
+ * @remote_node_entries: The number of entries to put in the table.
+ *
+ * This method will initialize the remote node table for use. none
+ */
+void sci_remote_node_table_initialize(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_entries)
+{
+ u32 index;
+
+ /*
+ * Initialize the raw data we could improve the speed by only initializing
+ * those entries that we are actually going to be used */
+ memset(
+ remote_node_table->available_remote_nodes,
+ 0x00,
+ sizeof(remote_node_table->available_remote_nodes)
+ );
+
+ memset(
+ remote_node_table->remote_node_groups,
+ 0x00,
+ sizeof(remote_node_table->remote_node_groups)
+ );
+
+ /* Initialize the available remote node sets */
+ remote_node_table->available_nodes_array_size = (u16)
+ (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+ + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
+
+
+ /* Initialize each full DWORD to a FULL SET of remote nodes */
+ for (index = 0; index < remote_node_entries; index++) {
+ sci_remote_node_table_set_node_index(remote_node_table, index);
+ }
+
+ remote_node_table->group_array_size = (u16)
+ (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
+ + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
+
+ for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
+ /*
+ * These are all guaranteed to be full slot values so fill them in the
+ * available sets of 3 remote nodes */
+ sci_remote_node_table_set_group_index(remote_node_table, 2, index);
+ }
+
+ /* Now fill in any remainders that we may find */
+ if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
+ sci_remote_node_table_set_group_index(remote_node_table, 1, index);
+ } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
+ sci_remote_node_table_set_group_index(remote_node_table, 0, index);
+ }
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which to allocate a
+ * remote node.
+ * @table_index: The group index that is to be used for the search.
+ *
+ * This method will allocate a single RNi from the remote node table. The
+ * table index will determine from which remote node group table to search.
+ * This search may fail and another group node table can be specified. The
+ * function is designed to allow a serach of the available single remote node
+ * group up to the triple remote node group. If an entry is found in the
+ * specified table the remote node is removed and the remote node groups are
+ * updated. The RNi value or an invalid remote node context if an RNi can not
+ * be found.
+ */
+static u16 sci_remote_node_table_allocate_single_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u8 index;
+ u8 group_value;
+ u32 group_index;
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ group_index = sci_remote_node_table_get_group_index(
+ remote_node_table, group_table_index);
+
+ /* We could not find an available slot in the table selector 0 */
+ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+ group_value = sci_remote_node_table_get_group_value(
+ remote_node_table, group_index);
+
+ for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
+ if (((1 << index) & group_value) != 0) {
+ /* We have selected a bit now clear it */
+ remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
+ + index);
+
+ sci_remote_node_table_clear_group_index(
+ remote_node_table, group_table_index, group_index
+ );
+
+ sci_remote_node_table_clear_node_index(
+ remote_node_table, remote_node_index
+ );
+
+ if (group_table_index > 0) {
+ sci_remote_node_table_set_group_index(
+ remote_node_table, group_table_index - 1, group_index
+ );
+ }
+
+ break;
+ }
+ }
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which to allocate the
+ * remote node entries.
+ * @group_table_index: THis is the group table index which must equal two (2)
+ * for this operation.
+ *
+ * This method will allocate three consecutive remote node context entries. If
+ * there are no remaining triple entries the function will return a failure.
+ * The remote node index that represents three consecutive remote node entries
+ * or an invalid remote node context if none can be found.
+ */
+static u16 sci_remote_node_table_allocate_triple_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u32 group_index;
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ group_index = sci_remote_node_table_get_group_index(
+ remote_node_table, group_table_index);
+
+ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+ remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
+
+ sci_remote_node_table_clear_group_index(
+ remote_node_table, group_table_index, group_index
+ );
+
+ sci_remote_node_table_clear_group(
+ remote_node_table, group_index
+ );
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which the remote node
+ * allocation is to take place.
+ * @remote_node_count: This is ther remote node count which is one of
+ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
+ *
+ * This method will allocate a remote node that mataches the remote node count
+ * specified by the caller. Valid values for remote node count is
+ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
+ * the remote node index that is returned or an invalid remote node context.
+ */
+u16 sci_remote_node_table_allocate_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count)
+{
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 0);
+
+ if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 1);
+ }
+
+ if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 2);
+ }
+ } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+ remote_node_index =
+ sci_remote_node_table_allocate_triple_remote_node(
+ remote_node_table, 2);
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * This method will free a single remote node index back to the remote node
+ * table. This routine will update the remote node groups
+ */
+static void sci_remote_node_table_release_single_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u16 remote_node_index)
+{
+ u32 group_index;
+ u8 group_value;
+
+ group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+ group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
+
+ /*
+ * Assert that we are not trying to add an entry to a slot that is already
+ * full. */
+ BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
+
+ if (group_value == 0x00) {
+ /*
+ * There are no entries in this slot so it must be added to the single
+ * slot table. */
+ sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
+ } else if ((group_value & (group_value - 1)) == 0) {
+ /*
+ * There is only one entry in this slot so it must be moved from the
+ * single slot table to the dual slot table */
+ sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
+ sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
+ } else {
+ /*
+ * There are two entries in the slot so it must be moved from the dual
+ * slot table to the tripple slot table. */
+ sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
+ sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
+ }
+
+ sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table to which the remote node
+ * index is to be freed.
+ *
+ * This method will release a group of three consecutive remote nodes back to
+ * the free remote nodes.
+ */
+static void sci_remote_node_table_release_triple_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u16 remote_node_index)
+{
+ u32 group_index;
+
+ group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+ sci_remote_node_table_set_group_index(
+ remote_node_table, 2, group_index
+ );
+
+ sci_remote_node_table_set_group(remote_node_table, group_index);
+}
+
+/**
+ *
+ * @remote_node_table: The remote node table to which the remote node index is
+ * to be freed.
+ * @remote_node_count: This is the count of consecutive remote nodes that are
+ * to be freed.
+ *
+ * This method will release the remote node index back into the remote node
+ * table free pool.
+ */
+void sci_remote_node_table_release_remote_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count,
+ u16 remote_node_index)
+{
+ if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+ sci_remote_node_table_release_single_remote_node(
+ remote_node_table, remote_node_index);
+ } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+ sci_remote_node_table_release_triple_remote_node(
+ remote_node_table, remote_node_index);
+ }
+}
+
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
new file mode 100644
index 00000000000..721ab982d2a
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.h
@@ -0,0 +1,188 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_
+#define _SCIC_SDS_REMOTE_NODE_TABLE_H_
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * Remote node sets are sets of remote node index in the remtoe node table The
+ * SCU hardware requires that STP remote node entries take three consecutive
+ * remote node index so the table is arranged in sets of three. The bits are
+ * used as 0111 0111 to make a byte and the bits define the set of three remote
+ * nodes to use as a sequence.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2
+
+/**
+ *
+ *
+ * Since the remote node table is organized as DWORDS take the remote node sets
+ * in bytes and represent them in DWORDs. The lowest ordered bits are the ones
+ * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111
+ * 0111 0111 0111 // if only a single WORD is in use in the DWORD.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \
+ (sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+/**
+ *
+ *
+ * This is a count of the numeber of remote nodes that can be represented in a
+ * byte
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_BYTE \
+ (SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+
+/**
+ *
+ *
+ * This is a count of the number of remote nodes that can be represented in a
+ * DWROD
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_DWORD \
+ (sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE)
+
+/**
+ *
+ *
+ * This is the number of bits in a remote node group
+ */
+#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP 4
+
+#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX (0xFFFFFFFF)
+#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE (0x07)
+#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE (0x00)
+
+/**
+ *
+ *
+ * Expander attached sata remote node count
+ */
+#define SCU_STP_REMOTE_NODE_COUNT 3
+
+/**
+ *
+ *
+ * Expander or direct attached ssp remote node count
+ */
+#define SCU_SSP_REMOTE_NODE_COUNT 1
+
+/**
+ *
+ *
+ * Direct attached STP remote node count
+ */
+#define SCU_SATA_REMOTE_NODE_COUNT 1
+
+/**
+ * struct sci_remote_node_table -
+ *
+ *
+ */
+struct sci_remote_node_table {
+ /**
+ * This field contains the array size in dwords
+ */
+ u16 available_nodes_array_size;
+
+ /**
+ * This field contains the array size of the
+ */
+ u16 group_array_size;
+
+ /**
+ * This field is the array of available remote node entries in bits.
+ * Because of the way STP remote node data is allocated on the SCU hardware
+ * the remote nodes must occupy three consecutive remote node context
+ * entries. For ease of allocation and de-allocation we have broken the
+ * sets of three into a single nibble. When the STP RNi is allocated all
+ * of the bits in the nibble are cleared. This math results in a table size
+ * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte.
+ */
+ u32 available_remote_nodes[
+ (SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+ + ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)];
+
+ /**
+ * This field is the nibble selector for the above table. There are three
+ * possible selectors each for fast lookup when trying to find one, two or
+ * three remote node entries.
+ */
+ u32 remote_node_groups[
+ SCU_STP_REMOTE_NODE_COUNT][
+ (SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT))
+ + ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)];
+
+};
+
+/* --------------------------------------------------------------------------- */
+
+void sci_remote_node_table_initialize(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_entries);
+
+u16 sci_remote_node_table_allocate_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count);
+
+void sci_remote_node_table_release_remote_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count,
+ u16 remote_node_index);
+
+#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
new file mode 100644
index 00000000000..a46e07ac789
--- /dev/null
+++ b/drivers/scsi/isci/request.c
@@ -0,0 +1,3391 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "task.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "sas.h"
+
+static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
+ int idx)
+{
+ if (idx == 0)
+ return &ireq->tc->sgl_pair_ab;
+ else if (idx == 1)
+ return &ireq->tc->sgl_pair_cd;
+ else if (idx < 0)
+ return NULL;
+ else
+ return &ireq->sg_table[idx - 2];
+}
+
+static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
+ struct isci_request *ireq, u32 idx)
+{
+ u32 offset;
+
+ if (idx == 0) {
+ offset = (void *) &ireq->tc->sgl_pair_ab -
+ (void *) &ihost->task_context_table[0];
+ return ihost->task_context_dma + offset;
+ } else if (idx == 1) {
+ offset = (void *) &ireq->tc->sgl_pair_cd -
+ (void *) &ihost->task_context_table[0];
+ return ihost->task_context_dma + offset;
+ }
+
+ return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
+}
+
+static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
+{
+ e->length = sg_dma_len(sg);
+ e->address_upper = upper_32_bits(sg_dma_address(sg));
+ e->address_lower = lower_32_bits(sg_dma_address(sg));
+ e->address_modifier = 0;
+}
+
+static void sci_request_build_sgl(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->isci_host;
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct scatterlist *sg = NULL;
+ dma_addr_t dma_addr;
+ u32 sg_idx = 0;
+ struct scu_sgl_element_pair *scu_sg = NULL;
+ struct scu_sgl_element_pair *prev_sg = NULL;
+
+ if (task->num_scatter > 0) {
+ sg = task->scatter;
+
+ while (sg) {
+ scu_sg = to_sgl_element_pair(ireq, sg_idx);
+ init_sgl_element(&scu_sg->A, sg);
+ sg = sg_next(sg);
+ if (sg) {
+ init_sgl_element(&scu_sg->B, sg);
+ sg = sg_next(sg);
+ } else
+ memset(&scu_sg->B, 0, sizeof(scu_sg->B));
+
+ if (prev_sg) {
+ dma_addr = to_sgl_element_pair_dma(ihost,
+ ireq,
+ sg_idx);
+
+ prev_sg->next_pair_upper =
+ upper_32_bits(dma_addr);
+ prev_sg->next_pair_lower =
+ lower_32_bits(dma_addr);
+ }
+
+ prev_sg = scu_sg;
+ sg_idx++;
+ }
+ } else { /* handle when no sg */
+ scu_sg = to_sgl_element_pair(ireq, sg_idx);
+
+ dma_addr = dma_map_single(&ihost->pdev->dev,
+ task->scatter,
+ task->total_xfer_len,
+ task->data_dir);
+
+ ireq->zero_scatter_daddr = dma_addr;
+
+ scu_sg->A.length = task->total_xfer_len;
+ scu_sg->A.address_upper = upper_32_bits(dma_addr);
+ scu_sg->A.address_lower = lower_32_bits(dma_addr);
+ }
+
+ if (scu_sg) {
+ scu_sg->next_pair_upper = 0;
+ scu_sg->next_pair_lower = 0;
+ }
+}
+
+static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
+{
+ struct ssp_cmd_iu *cmd_iu;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ cmd_iu = &ireq->ssp.cmd;
+
+ memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
+ cmd_iu->add_cdb_len = 0;
+ cmd_iu->_r_a = 0;
+ cmd_iu->_r_b = 0;
+ cmd_iu->en_fburst = 0; /* unsupported */
+ cmd_iu->task_prio = task->ssp_task.task_prio;
+ cmd_iu->task_attr = task->ssp_task.task_attr;
+ cmd_iu->_r_c = 0;
+
+ sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
+ sizeof(task->ssp_task.cdb) / sizeof(u32));
+}
+
+static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
+{
+ struct ssp_task_iu *task_iu;
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+ task_iu = &ireq->ssp.tmf;
+
+ memset(task_iu, 0, sizeof(struct ssp_task_iu));
+
+ memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
+
+ task_iu->task_func = isci_tmf->tmf_code;
+ task_iu->task_tag =
+ (ireq->ttype == tmf_task) ?
+ isci_tmf->io_tag :
+ SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SSP request.
+ * @sci_req:
+ * @task_context:
+ *
+ */
+static void scu_ssp_reqeust_construct_task_context(
+ struct isci_request *ireq,
+ struct scu_task_context *task_context)
+{
+ dma_addr_t dma_addr;
+ struct isci_remote_device *idev;
+ struct isci_port *iport;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /* Fill in the TC with the its required data */
+ task_context->abort = 0;
+ task_context->priority = 0;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 0;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ task_context->address_modifier = 0;
+
+ /* task_context->type.ssp.tag = ireq->io_tag; */
+ task_context->task_phase = 0x01;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+
+ /*
+ * Copy the physical address for the command buffer to the
+ * SCU Task Context
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
+
+ task_context->command_iu_upper = upper_32_bits(dma_addr);
+ task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+ /*
+ * Copy the physical address for the response buffer to the
+ * SCU Task Context
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
+
+ task_context->response_iu_upper = upper_32_bits(dma_addr);
+ task_context->response_iu_lower = lower_32_bits(dma_addr);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for a SSP IO request.
+ * @sci_req:
+ *
+ */
+static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
+ enum dma_data_direction dir,
+ u32 len)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->ssp_command_iu_length =
+ sizeof(struct ssp_cmd_iu) / sizeof(u32);
+ task_context->type.ssp.frame_type = SSP_COMMAND;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ case DMA_NONE:
+ default:
+ task_context->task_type = SCU_TASK_TYPE_IOREAD;
+ break;
+ case DMA_TO_DEVICE:
+ task_context->task_type = SCU_TASK_TYPE_IOWRITE;
+ break;
+ }
+
+ task_context->transfer_length_bytes = len;
+
+ if (task_context->transfer_length_bytes > 0)
+ sci_request_build_sgl(ireq);
+}
+
+/**
+ * This method will fill in the SCU Task Context for a SSP Task request. The
+ * following important settings are utilized: -# priority ==
+ * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
+ * ahead of other task destined for the same Remote Node. -# task_type ==
+ * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
+ * (i.e. non-raw frame) is being utilized to perform task management. -#
+ * control_frame == 1. This ensures that the proper endianess is set so
+ * that the bytes are transmitted in the right order for a task frame.
+ * @sci_req: This parameter specifies the task request object being
+ * constructed.
+ *
+ */
+static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->control_frame = 1;
+ task_context->priority = SCU_TASK_PRIORITY_HIGH;
+ task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
+ task_context->transfer_length_bytes = 0;
+ task_context->type.ssp.frame_type = SSP_TASK;
+ task_context->ssp_command_iu_length =
+ sizeof(struct ssp_task_iu) / sizeof(u32);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SATA
+ * request. This is called from the various SATA constructors.
+ * @sci_req: The general IO request object which is to be used in
+ * constructing the SCU task context.
+ * @task_context: The buffer pointer for the SCU task context which is being
+ * constructed.
+ *
+ * The general io request construction is complete. The buffer assignment for
+ * the command buffer is complete. none Revisit task context construction to
+ * determine what is common for SSP/SMP/STP task context structures.
+ */
+static void scu_sata_reqeust_construct_task_context(
+ struct isci_request *ireq,
+ struct scu_task_context *task_context)
+{
+ dma_addr_t dma_addr;
+ struct isci_remote_device *idev;
+ struct isci_port *iport;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /* Fill in the TC with the its required data */
+ task_context->abort = 0;
+ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 0;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ task_context->address_modifier = 0;
+ task_context->task_phase = 0x01;
+
+ task_context->ssp_command_iu_length =
+ (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
+
+ /* Set the first word of the H2D REG FIS */
+ task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+ /*
+ * Copy the physical address for the command buffer to the SCU Task
+ * Context. We must offset the command buffer by 4 bytes because the
+ * first 4 bytes are transfered in the body of the TC.
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq,
+ ((char *) &ireq->stp.cmd) +
+ sizeof(u32));
+
+ task_context->command_iu_upper = upper_32_bits(dma_addr);
+ task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+ /* SATA Requests do not have a response buffer */
+ task_context->response_iu_upper = 0;
+ task_context->response_iu_lower = 0;
+}
+
+static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->control_frame = 0;
+ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+ task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
+ task_context->type.stp.fis_type = FIS_REGH2D;
+ task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
+}
+
+static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
+ bool copy_rx_frame)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+
+ scu_stp_raw_request_construct_task_context(ireq);
+
+ stp_req->status = 0;
+ stp_req->sgl.offset = 0;
+ stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
+
+ if (copy_rx_frame) {
+ sci_request_build_sgl(ireq);
+ stp_req->sgl.index = 0;
+ } else {
+ /* The user does not want the data copied to the SGL buffer location */
+ stp_req->sgl.index = -1;
+ }
+
+ return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: This parameter specifies the request to be constructed as an
+ * optimized request.
+ * @optimized_task_type: This parameter specifies whether the request is to be
+ * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
+ * value of 1 indicates NCQ.
+ *
+ * This method will perform request construction common to all types of STP
+ * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
+ * returns an indication as to whether the construction was successful.
+ */
+static void sci_stp_optimized_request_construct(struct isci_request *ireq,
+ u8 optimized_task_type,
+ u32 len,
+ enum dma_data_direction dir)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ /* Build the STP task context structure */
+ scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+ /* Copy over the SGL elements */
+ sci_request_build_sgl(ireq);
+
+ /* Copy over the number of bytes to be transfered */
+ task_context->transfer_length_bytes = len;
+
+ if (dir == DMA_TO_DEVICE) {
+ /*
+ * The difference between the DMA IN and DMA OUT request task type
+ * values are consistent with the difference between FPDMA READ
+ * and FPDMA WRITE values. Add the supplied task type parameter
+ * to this difference to set the task type properly for this
+ * DATA OUT (WRITE) case. */
+ task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
+ - SCU_TASK_TYPE_DMA_IN);
+ } else {
+ /*
+ * For the DATA IN (READ) case, simply save the supplied
+ * optimized task type. */
+ task_context->task_type = optimized_task_type;
+ }
+}
+
+
+
+static enum sci_status
+sci_io_request_construct_sata(struct isci_request *ireq,
+ u32 len,
+ enum dma_data_direction dir,
+ bool copy)
+{
+ enum sci_status status = SCI_SUCCESS;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ /* check for management protocols */
+ if (ireq->ttype == tmf_task) {
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+ if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+ tmf->tmf_code == isci_tmf_sata_srst_low) {
+ scu_stp_raw_request_construct_task_context(ireq);
+ return SCI_SUCCESS;
+ } else {
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Request 0x%p received un-handled SAT "
+ "management protocol 0x%x.\n",
+ __func__, ireq, tmf->tmf_code);
+
+ return SCI_FAILURE;
+ }
+ }
+
+ if (!sas_protocol_ata(task->task_proto)) {
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Non-ATA protocol in SATA path: 0x%x\n",
+ __func__,
+ task->task_proto);
+ return SCI_FAILURE;
+
+ }
+
+ /* non data */
+ if (task->data_dir == DMA_NONE) {
+ scu_stp_raw_request_construct_task_context(ireq);
+ return SCI_SUCCESS;
+ }
+
+ /* NCQ */
+ if (task->ata_task.use_ncq) {
+ sci_stp_optimized_request_construct(ireq,
+ SCU_TASK_TYPE_FPDMAQ_READ,
+ len, dir);
+ return SCI_SUCCESS;
+ }
+
+ /* DMA */
+ if (task->ata_task.dma_xfer) {
+ sci_stp_optimized_request_construct(ireq,
+ SCU_TASK_TYPE_DMA_IN,
+ len, dir);
+ return SCI_SUCCESS;
+ } else /* PIO */
+ return sci_stp_pio_request_construct(ireq, copy);
+
+ return status;
+}
+
+static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ ireq->protocol = SCIC_SSP_PROTOCOL;
+
+ scu_ssp_io_request_construct_task_context(ireq,
+ task->data_dir,
+ task->total_xfer_len);
+
+ sci_io_request_build_ssp_command_iu(ireq);
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_task_request_construct_ssp(
+ struct isci_request *ireq)
+{
+ /* Construct the SSP Task SCU Task Context */
+ scu_ssp_task_request_construct_task_context(ireq);
+
+ /* Fill in the SSP Task IU */
+ sci_task_request_build_ssp_task_iu(ireq);
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
+{
+ enum sci_status status;
+ bool copy = false;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ ireq->protocol = SCIC_STP_PROTOCOL;
+
+ copy = (task->data_dir == DMA_NONE) ? false : true;
+
+ status = sci_io_request_construct_sata(ireq,
+ task->total_xfer_len,
+ task->data_dir,
+ copy);
+
+ if (status == SCI_SUCCESS)
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return status;
+}
+
+enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ /* check for management protocols */
+ if (ireq->ttype == tmf_task) {
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+ if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+ tmf->tmf_code == isci_tmf_sata_srst_low) {
+ scu_stp_raw_request_construct_task_context(ireq);
+ } else {
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Request 0x%p received un-handled SAT "
+ "Protocol 0x%x.\n",
+ __func__, ireq, tmf->tmf_code);
+
+ return SCI_FAILURE;
+ }
+ }
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return status;
+}
+
+/**
+ * sci_req_tx_bytes - bytes transferred when reply underruns request
+ * @sci_req: request that was terminated early
+ */
+#define SCU_TASK_CONTEXT_SRAM 0x200000
+static u32 sci_req_tx_bytes(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ u32 ret_val = 0;
+
+ if (readl(&ihost->smu_registers->address_modifier) == 0) {
+ void __iomem *scu_reg_base = ihost->scu_registers;
+
+ /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
+ * BAR1 is the scu_registers
+ * 0x20002C = 0x200000 + 0x2c
+ * = start of task context SRAM + offset of (type.ssp.data_offset)
+ * TCi is the io_tag of struct sci_request
+ */
+ ret_val = readl(scu_reg_base +
+ (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
+ ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
+ }
+
+ return ret_val;
+}
+
+enum sci_status sci_request_start(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+ struct scu_task_context *tc = ireq->tc;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+ if (state != SCI_REQ_CONSTRUCTED) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC IO Request requested to start while in wrong "
+ "state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
+
+ switch (tc->protocol_type) {
+ case SCU_TASK_CONTEXT_PROTOCOL_SMP:
+ case SCU_TASK_CONTEXT_PROTOCOL_SSP:
+ /* SSP/SMP Frame */
+ tc->type.ssp.tag = ireq->io_tag;
+ tc->type.ssp.target_port_transfer_tag = 0xFFFF;
+ break;
+
+ case SCU_TASK_CONTEXT_PROTOCOL_STP:
+ /* STP/SATA Frame
+ * tc->type.stp.ncq_tag = ireq->ncq_tag;
+ */
+ break;
+
+ case SCU_TASK_CONTEXT_PROTOCOL_NONE:
+ /* / @todo When do we set no protocol type? */
+ break;
+
+ default:
+ /* This should never happen since we build the IO
+ * requests */
+ break;
+ }
+
+ /* Add to the post_context the io tag value */
+ ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
+
+ /* Everything is good go ahead and change state */
+ sci_change_state(&ireq->sm, SCI_REQ_STARTED);
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_terminate(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+
+ state = ireq->sm.current_state_id;
+
+ switch (state) {
+ case SCI_REQ_CONSTRUCTED:
+ ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+ ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_STARTED:
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ case SCI_REQ_SMP_WAIT_RESP:
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H:
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ case SCI_REQ_STP_PIO_WAIT_FRAME:
+ case SCI_REQ_STP_PIO_DATA_IN:
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
+ sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+ return SCI_SUCCESS;
+ case SCI_REQ_TASK_WAIT_TC_RESP:
+ sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_ABORTING:
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_COMPLETED:
+ default:
+ dev_warn(&ireq->owning_controller->pdev->dev,
+ "%s: SCIC IO Request requested to abort while in wrong "
+ "state %d\n",
+ __func__,
+ ireq->sm.current_state_id);
+ break;
+ }
+
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_request_complete(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+ if (WARN_ONCE(state != SCI_REQ_COMPLETED,
+ "isci: request completion from wrong state (%d)\n", state))
+ return SCI_FAILURE_INVALID_STATE;
+
+ if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
+ sci_controller_release_frame(ihost,
+ ireq->saved_rx_frame_index);
+
+ /* XXX can we just stop the machine and remove the 'final' state? */
+ sci_change_state(&ireq->sm, SCI_REQ_FINAL);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
+ u32 event_code)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+
+ if (state != SCI_REQ_STP_PIO_DATA_IN) {
+ dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
+ __func__, event_code, state);
+
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ switch (scu_get_event_specifier(event_code)) {
+ case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
+ /* We are waiting for data and the SCU has R_ERR the data frame.
+ * Go back to waiting for the D2H Register FIS
+ */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ return SCI_SUCCESS;
+ default:
+ dev_err(&ihost->pdev->dev,
+ "%s: pio request unexpected event %#x\n",
+ __func__, event_code);
+
+ /* TODO Should we fail the PIO request when we get an
+ * unexpected event?
+ */
+ return SCI_FAILURE;
+ }
+}
+
+/*
+ * This function copies response data for requests returning response data
+ * instead of sense data.
+ * @sci_req: This parameter specifies the request object for which to copy
+ * the response data.
+ */
+static void sci_io_request_copy_response(struct isci_request *ireq)
+{
+ void *resp_buf;
+ u32 len;
+ struct ssp_response_iu *ssp_response;
+ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+ ssp_response = &ireq->ssp.rsp;
+
+ resp_buf = &isci_tmf->resp.resp_iu;
+
+ len = min_t(u32,
+ SSP_RESP_IU_MAX_SIZE,
+ be32_to_cpu(ssp_response->response_data_len));
+
+ memcpy(resp_buf, ssp_response->resp_data, len);
+}
+
+static enum sci_status
+request_started_state_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ struct ssp_response_iu *resp_iu;
+ u8 datapres;
+
+ /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
+ * to determine SDMA status
+ */
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
+ /* There are times when the SCU hardware will return an early
+ * response because the io request specified more data than is
+ * returned by the target device (mode pages, inquiry data,
+ * etc.). We must check the response stats to see if this is
+ * truly a failed request or a good request that just got
+ * completed early.
+ */
+ struct ssp_response_iu *resp = &ireq->ssp.rsp;
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_swab32_cpy(&ireq->ssp.rsp,
+ &ireq->ssp.rsp,
+ word_cnt);
+
+ if (resp->status == 0) {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ }
+ break;
+ }
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_swab32_cpy(&ireq->ssp.rsp,
+ &ireq->ssp.rsp,
+ word_cnt);
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ break;
+ }
+
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
+ /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
+ * guaranteed to be received before this completion status is
+ * posted?
+ */
+ resp_iu = &ireq->ssp.rsp;
+ datapres = resp_iu->datapres;
+
+ if (datapres == 1 || datapres == 2) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ }
+ break;
+ /* only stp device gets suspended. */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
+ if (ireq->protocol == SCIC_STP_PROTOCOL) {
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ } else {
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ }
+ break;
+
+ /* both stp/ssp device gets suspended */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ break;
+
+ /* neither ssp nor stp gets suspended. */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
+ default:
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ break;
+ }
+
+ /*
+ * TODO: This is probably wrong for ACK/NAK timeout conditions
+ */
+
+ /* In all cases we will treat this as the completion of the IO req. */
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+request_aborting_state_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
+ case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
+ ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+ ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ /* Unless we get some strange error wait for the task abort to complete
+ * TODO: Should there be a state change for this completion?
+ */
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+ /* Currently, the decision is to simply allow the task request
+ * to timeout if the task IU wasn't received successfully.
+ * There is a potential for receiving multiple task responses if
+ * we decide to send the task IU again.
+ */
+ dev_warn(&ireq->owning_controller->pdev->dev,
+ "%s: TaskRequest:0x%p CompletionCode:%x - "
+ "ACK/NAK timeout\n", __func__, ireq,
+ completion_code);
+
+ sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+ break;
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_response_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ /* In the AWAIT RESPONSE state, any TC completion is
+ * unexpected. but if the TC has success status, we
+ * complete the IO anyway.
+ */
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+ /* These status has been seen in a specific LSI
+ * expander, which sometimes is not able to send smp
+ * response within 2 ms. This causes our hardware break
+ * the connection and set TC completion with one of
+ * these SMP_XXX_XX_ERR status. For these type of error,
+ * we ask ihost user to retry the request.
+ */
+ ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
+ ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ default:
+ /* All other completion status cause the IO to be complete. If a NAK
+ * was received, then it is up to the user to retry the request
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
+{
+ struct scu_sgl_element *sgl;
+ struct scu_sgl_element_pair *sgl_pair;
+ struct isci_request *ireq = to_ireq(stp_req);
+ struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
+
+ sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+ if (!sgl_pair)
+ sgl = NULL;
+ else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
+ if (sgl_pair->B.address_lower == 0 &&
+ sgl_pair->B.address_upper == 0) {
+ sgl = NULL;
+ } else {
+ pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
+ sgl = &sgl_pair->B;
+ }
+ } else {
+ if (sgl_pair->next_pair_lower == 0 &&
+ sgl_pair->next_pair_upper == 0) {
+ sgl = NULL;
+ } else {
+ pio_sgl->index++;
+ pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
+ sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+ sgl = &sgl_pair->A;
+ }
+ }
+
+ return sgl;
+}
+
+static enum sci_status
+stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
+
+/* transmit DATA_FIS from (current sgl + offset) for input
+ * parameter length. current sgl and offset is alreay stored in the IO request
+ */
+static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
+ struct isci_request *ireq,
+ u32 length)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ struct scu_task_context *task_context = ireq->tc;
+ struct scu_sgl_element_pair *sgl_pair;
+ struct scu_sgl_element *current_sgl;
+
+ /* Recycle the TC and reconstruct it for sending out DATA FIS containing
+ * for the data from current_sgl+offset for the input length
+ */
+ sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+ if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
+ current_sgl = &sgl_pair->A;
+ else
+ current_sgl = &sgl_pair->B;
+
+ /* update the TC */
+ task_context->command_iu_upper = current_sgl->address_upper;
+ task_context->command_iu_lower = current_sgl->address_lower;
+ task_context->transfer_length_bytes = length;
+ task_context->type.stp.fis_type = FIS_DATA;
+
+ /* send the new TC out. */
+ return sci_controller_continue_io(ireq);
+}
+
+static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ struct scu_sgl_element_pair *sgl_pair;
+ struct scu_sgl_element *sgl;
+ enum sci_status status;
+ u32 offset;
+ u32 len = 0;
+
+ offset = stp_req->sgl.offset;
+ sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+ if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
+ return SCI_FAILURE;
+
+ if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
+ sgl = &sgl_pair->A;
+ len = sgl_pair->A.length - offset;
+ } else {
+ sgl = &sgl_pair->B;
+ len = sgl_pair->B.length - offset;
+ }
+
+ if (stp_req->pio_len == 0)
+ return SCI_SUCCESS;
+
+ if (stp_req->pio_len >= len) {
+ status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
+ if (status != SCI_SUCCESS)
+ return status;
+ stp_req->pio_len -= len;
+
+ /* update the current sgl, offset and save for future */
+ sgl = pio_sgl_next(stp_req);
+ offset = 0;
+ } else if (stp_req->pio_len < len) {
+ sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
+
+ /* Sgl offset will be adjusted and saved for future */
+ offset += stp_req->pio_len;
+ sgl->address_lower += stp_req->pio_len;
+ stp_req->pio_len = 0;
+ }
+
+ stp_req->sgl.offset = offset;
+
+ return status;
+}
+
+/**
+ *
+ * @stp_request: The request that is used for the SGL processing.
+ * @data_buffer: The buffer of data to be copied.
+ * @length: The length of the data transfer.
+ *
+ * Copy the data from the buffer for the length specified to the IO reqeust SGL
+ * specified data region. enum sci_status
+ */
+static enum sci_status
+sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
+ u8 *data_buf, u32 len)
+{
+ struct isci_request *ireq;
+ u8 *src_addr;
+ int copy_len;
+ struct sas_task *task;
+ struct scatterlist *sg;
+ void *kaddr;
+ int total_len = len;
+
+ ireq = to_ireq(stp_req);
+ task = isci_request_access_task(ireq);
+ src_addr = data_buf;
+
+ if (task->num_scatter > 0) {
+ sg = task->scatter;
+
+ while (total_len > 0) {
+ struct page *page = sg_page(sg);
+
+ copy_len = min_t(int, total_len, sg_dma_len(sg));
+ kaddr = kmap_atomic(page, KM_IRQ0);
+ memcpy(kaddr + sg->offset, src_addr, copy_len);
+ kunmap_atomic(kaddr, KM_IRQ0);
+ total_len -= copy_len;
+ src_addr += copy_len;
+ sg = sg_next(sg);
+ }
+ } else {
+ BUG_ON(task->total_xfer_len < total_len);
+ memcpy(task->scatter, src_addr, total_len);
+ }
+
+ return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: The PIO DATA IN request that is to receive the data.
+ * @data_buffer: The buffer to copy from.
+ *
+ * Copy the data buffer to the io request data region. enum sci_status
+ */
+static enum sci_status sci_stp_request_pio_data_in_copy_data(
+ struct isci_stp_request *stp_req,
+ u8 *data_buffer)
+{
+ enum sci_status status;
+
+ /*
+ * If there is less than 1K remaining in the transfer request
+ * copy just the data for the transfer */
+ if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
+ status = sci_stp_request_pio_data_in_copy_data_buffer(
+ stp_req, data_buffer, stp_req->pio_len);
+
+ if (status == SCI_SUCCESS)
+ stp_req->pio_len = 0;
+ } else {
+ /* We are transfering the whole frame so copy */
+ status = sci_stp_request_pio_data_in_copy_data_buffer(
+ stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
+
+ if (status == SCI_SUCCESS)
+ stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
+ }
+
+ return status;
+}
+
+static enum sci_status
+stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status
+pio_data_out_tx_done_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+ bool all_frames_transferred = false;
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ /* Transmit data */
+ if (stp_req->pio_len != 0) {
+ status = sci_stp_request_pio_data_out_transmit_data(ireq);
+ if (status == SCI_SUCCESS) {
+ if (stp_req->pio_len == 0)
+ all_frames_transferred = true;
+ }
+ } else if (stp_req->pio_len == 0) {
+ /*
+ * this will happen if the all data is written at the
+ * first time after the pio setup fis is received
+ */
+ all_frames_transferred = true;
+ }
+
+ /* all data transferred. */
+ if (all_frames_transferred) {
+ /*
+ * Change the state to SCI_REQ_STP_PIO_DATA_IN
+ * and wait for PIO_SETUP fis / or D2H REg fis. */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ }
+ break;
+
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ struct dev_to_host_fis *frame_header;
+ enum sci_status status;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if ((status == SCI_SUCCESS) &&
+ (frame_header->fis_type == FIS_REGD2H)) {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+ }
+
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+}
+
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ enum sci_base_request_states state;
+ enum sci_status status;
+ ssize_t word_cnt;
+
+ state = ireq->sm.current_state_id;
+ switch (state) {
+ case SCI_REQ_STARTED: {
+ struct ssp_frame_hdr ssp_hdr;
+ void *frame_header;
+
+ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+
+ word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
+ sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
+
+ if (ssp_hdr.frame_type == SSP_RESPONSE) {
+ struct ssp_response_iu *resp_iu;
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&resp_iu);
+
+ sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
+
+ resp_iu = &ireq->ssp.rsp;
+
+ if (resp_iu->datapres == 0x01 ||
+ resp_iu->datapres == 0x02) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ }
+ } else {
+ /* not a response frame, why did it get forwarded? */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p received unexpected "
+ "frame %d type 0x%02x\n", __func__, ireq,
+ frame_index, ssp_hdr.frame_type);
+ }
+
+ /*
+ * In any case we are done with this frame buffer return it to
+ * the controller
+ */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return SCI_SUCCESS;
+ }
+
+ case SCI_REQ_TASK_WAIT_TC_RESP:
+ sci_io_request_copy_response(ireq);
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_SUCCESS;
+
+ case SCI_REQ_SMP_WAIT_RESP: {
+ struct smp_resp *rsp_hdr = &ireq->smp.rsp;
+ void *frame_header;
+
+ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+
+ /* byte swap the header. */
+ word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
+ sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
+
+ if (rsp_hdr->frame_type == SMP_RESPONSE) {
+ void *smp_resp;
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ &smp_resp);
+
+ word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
+ sizeof(u32);
+
+ sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
+ smp_resp, word_cnt);
+
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
+ } else {
+ /*
+ * This was not a response frame why did it get
+ * forwarded?
+ */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC SMP Request 0x%p received unexpected "
+ "frame %d type 0x%02x\n",
+ __func__,
+ ireq,
+ frame_index,
+ rsp_hdr->frame_type);
+
+ ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ }
+
+ sci_controller_release_frame(ihost, frame_index);
+
+ return SCI_SUCCESS;
+ }
+
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return sci_stp_request_udma_general_frame_handler(ireq,
+ frame_index);
+
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
+ /* Use the general frame handler to copy the resposne data */
+ status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_REGD2H:
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ /* The command has completed with error */
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ break;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: IO Request:0x%p Frame Id:%d protocol "
+ "violation occurred\n", __func__, stp_req,
+ frame_index);
+
+ ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+ ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+ break;
+ }
+
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ /* Frame has been decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+ }
+
+ case SCI_REQ_STP_PIO_WAIT_FRAME: {
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__, stp_req, frame_index, status);
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_PIO_SETUP:
+ /* Get from the frame buffer the PIO Setup Data */
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ /* Get the data from the PIO Setup The SCU Hardware
+ * returns first word in the frame_header and the rest
+ * of the data is in the frame buffer so we need to
+ * back up one dword
+ */
+
+ /* transfer_count: first 16bits in the 4th dword */
+ stp_req->pio_len = frame_buffer[3] & 0xffff;
+
+ /* status: 4th byte in the 3rd dword */
+ stp_req->status = (frame_buffer[2] >> 24) & 0xff;
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ ireq->stp.rsp.status = stp_req->status;
+
+ /* The next state is dependent on whether the
+ * request was PIO Data-in or Data out
+ */
+ if (task->data_dir == DMA_FROM_DEVICE) {
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
+ } else if (task->data_dir == DMA_TO_DEVICE) {
+ /* Transmit data */
+ status = sci_stp_request_pio_data_out_transmit_data(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
+ }
+ break;
+
+ case FIS_SETDEVBITS:
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ break;
+
+ case FIS_REGD2H:
+ if (frame_header->status & ATA_BUSY) {
+ /*
+ * Now why is the drive sending a D2H Register
+ * FIS when it is still busy? Do nothing since
+ * we are still in the right state.
+ */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC PIO Request 0x%p received "
+ "D2H Register FIS with BSY status "
+ "0x%x\n",
+ __func__,
+ stp_req,
+ frame_header->status);
+ break;
+ }
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.req,
+ frame_header,
+ frame_buffer);
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ /* FIXME: what do we do here? */
+ break;
+ }
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+ }
+
+ case SCI_REQ_STP_PIO_DATA_IN: {
+ struct dev_to_host_fis *frame_header;
+ struct sata_fis_data *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+ return status;
+ }
+
+ if (frame_header->fis_type != FIS_DATA) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC PIO Request 0x%p received frame %d "
+ "with fis type 0x%02x when expecting a data "
+ "fis.\n",
+ __func__,
+ stp_req,
+ frame_index,
+ frame_header->fis_type);
+
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ return status;
+ }
+
+ if (stp_req->sgl.index < 0) {
+ ireq->saved_rx_frame_index = frame_index;
+ stp_req->pio_len = 0;
+ } else {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ status = sci_stp_request_pio_data_in_copy_data(stp_req,
+ (u8 *)frame_buffer);
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ }
+
+ /* Check for the end of the transfer, are there more
+ * bytes remaining for this data transfer
+ */
+ if (status != SCI_SUCCESS || stp_req->pio_len != 0)
+ return status;
+
+ if ((stp_req->status & ATA_BUSY) == 0) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ } else {
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ }
+ return status;
+ }
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_REGD2H:
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ /* The command has completed with error */
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ break;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: IO Request:0x%p Frame Id:%d protocol "
+ "violation occurred\n",
+ __func__,
+ stp_req,
+ frame_index);
+
+ ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+ ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+ break;
+ }
+
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ /* Frame has been decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+ }
+ case SCI_REQ_ABORTING:
+ /*
+ * TODO: Is it even possible to get an unsolicited frame in the
+ * aborting state?
+ */
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_SUCCESS;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC IO Request given unexpected frame %x while "
+ "in state %d\n",
+ __func__,
+ frame_index,
+ state);
+
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+ /* We must check ther response buffer to see if the D2H
+ * Register FIS was received before we got the TC
+ * completion.
+ */
+ if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
+ sci_remote_device_suspend(ireq->target_device,
+ SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ } else {
+ /* If we have an error completion status for the
+ * TC then we can expect a D2H register FIS from
+ * the device so we must change state to wait
+ * for it
+ */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
+ }
+ break;
+
+ /* TODO Check to see if any of these completion status need to
+ * wait for the device to host register fis.
+ */
+ /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
+ * - this comes only for B0
+ */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
+ sci_remote_device_suspend(ireq->target_device,
+ SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+ /* Fall through to the default case */
+ default:
+ /* All other completion status cause the IO to be complete. */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
+ break;
+
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be complete. If
+ * a NAK was received, then it is up to the user to retry the
+ * request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+
+ switch (state) {
+ case SCI_REQ_STARTED:
+ return request_started_state_tc_event(ireq, completion_code);
+
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ return ssp_task_request_await_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_SMP_WAIT_RESP:
+ return smp_request_await_response_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ return smp_request_await_tc_event(ireq, completion_code);
+
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return stp_request_udma_await_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ return stp_request_non_data_await_h2d_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ return stp_request_pio_await_h2d_completion_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ return pio_data_out_tx_done_tc_event(ireq, completion_code);
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+ return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+ return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_ABORTING:
+ return request_aborting_state_tc_event(ireq,
+ completion_code);
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC IO Request given task completion "
+ "notification %x while in wrong state %d\n",
+ __func__,
+ completion_code,
+ state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+/**
+ * isci_request_process_response_iu() - This function sets the status and
+ * response iu, in the task struct, from the request object for the upper
+ * layer driver.
+ * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @resp_iu: This parameter points to the response iu of the completed request.
+ * @dev: This parameter specifies the linux device struct.
+ *
+ * none.
+ */
+static void isci_request_process_response_iu(
+ struct sas_task *task,
+ struct ssp_response_iu *resp_iu,
+ struct device *dev)
+{
+ dev_dbg(dev,
+ "%s: resp_iu = %p "
+ "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
+ "resp_iu->response_data_len = %x, "
+ "resp_iu->sense_data_len = %x\nrepsonse data: ",
+ __func__,
+ resp_iu,
+ resp_iu->status,
+ resp_iu->datapres,
+ resp_iu->response_data_len,
+ resp_iu->sense_data_len);
+
+ task->task_status.stat = resp_iu->status;
+
+ /* libsas updates the task status fields based on the response iu. */
+ sas_ssp_task_response(dev, task, resp_iu);
+}
+
+/**
+ * isci_request_set_open_reject_status() - This function prepares the I/O
+ * completion for OPEN_REJECT conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @complete_to_host_ptr: This parameter specifies the action to be taken by
+ * the LLDD with respect to completing this request or forcing an abort
+ * condition on the I/O.
+ * @open_rej_reason: This parameter specifies the encoded reason for the
+ * abandon-class reject.
+ *
+ * none.
+ */
+static void isci_request_set_open_reject_status(
+ struct isci_request *request,
+ struct sas_task *task,
+ enum service_response *response_ptr,
+ enum exec_status *status_ptr,
+ enum isci_completion_selection *complete_to_host_ptr,
+ enum sas_open_rej_reason open_rej_reason)
+{
+ /* Task in the target is done. */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ *response_ptr = SAS_TASK_UNDELIVERED;
+ *status_ptr = SAS_OPEN_REJECT;
+ *complete_to_host_ptr = isci_perform_normal_io_completion;
+ task->task_status.open_rej_reason = open_rej_reason;
+}
+
+/**
+ * isci_request_handle_controller_specific_errors() - This function decodes
+ * controller-specific I/O completion error conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @complete_to_host_ptr: This parameter specifies the action to be taken by
+ * the LLDD with respect to completing this request or forcing an abort
+ * condition on the I/O.
+ *
+ * none.
+ */
+static void isci_request_handle_controller_specific_errors(
+ struct isci_remote_device *idev,
+ struct isci_request *request,
+ struct sas_task *task,
+ enum service_response *response_ptr,
+ enum exec_status *status_ptr,
+ enum isci_completion_selection *complete_to_host_ptr)
+{
+ unsigned int cstatus;
+
+ cstatus = request->scu_status;
+
+ dev_dbg(&request->isci_host->pdev->dev,
+ "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
+ "- controller status = 0x%x\n",
+ __func__, request, cstatus);
+
+ /* Decode the controller-specific errors; most
+ * important is to recognize those conditions in which
+ * the target may still have a task outstanding that
+ * must be aborted.
+ *
+ * Note that there are SCU completion codes being
+ * named in the decode below for which SCIC has already
+ * done work to handle them in a way other than as
+ * a controller-specific completion code; these are left
+ * in the decode below for completeness sake.
+ */
+ switch (cstatus) {
+ case SCU_TASK_DONE_DMASETUP_DIRERR:
+ /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
+ case SCU_TASK_DONE_XFERCNT_ERR:
+ /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
+ *response_ptr = SAS_TASK_COMPLETE;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAS_ABORTED_TASK;
+
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr =
+ isci_perform_normal_io_completion;
+ } else {
+ /* Task in the target is not done. */
+ *response_ptr = SAS_TASK_UNDELIVERED;
+
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAM_STAT_TASK_ABORTED;
+
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr =
+ isci_perform_error_io_completion;
+ }
+
+ break;
+
+ case SCU_TASK_DONE_CRC_ERR:
+ case SCU_TASK_DONE_NAK_CMD_ERR:
+ case SCU_TASK_DONE_EXCESS_DATA:
+ case SCU_TASK_DONE_UNEXP_FIS:
+ /* Also SCU_TASK_DONE_UNEXP_RESP: */
+ case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
+ case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
+ case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
+ /* These are conditions in which the target
+ * has completed the task, so that no cleanup
+ * is necessary.
+ */
+ *response_ptr = SAS_TASK_COMPLETE;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAS_ABORTED_TASK;
+
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr = isci_perform_normal_io_completion;
+ break;
+
+
+ /* Note that the only open reject completion codes seen here will be
+ * abandon-class codes; all others are automatically retried in the SCU.
+ */
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+
+ /* Note - the return of AB0 will change when
+ * libsas implements detection of zone violations.
+ */
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB0);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB1);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB2);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB3);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_BAD_DEST);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_STP_NORES);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_EPROTO);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_CONN_RATE);
+ break;
+
+ case SCU_TASK_DONE_LL_R_ERR:
+ /* Also SCU_TASK_DONE_ACK_NAK_TO: */
+ case SCU_TASK_DONE_LL_PERR:
+ case SCU_TASK_DONE_LL_SY_TERM:
+ /* Also SCU_TASK_DONE_NAK_ERR:*/
+ case SCU_TASK_DONE_LL_LF_TERM:
+ /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
+ case SCU_TASK_DONE_LL_ABORT_ERR:
+ case SCU_TASK_DONE_SEQ_INV_TYPE:
+ /* Also SCU_TASK_DONE_UNEXP_XR: */
+ case SCU_TASK_DONE_XR_IU_LEN_ERR:
+ case SCU_TASK_DONE_INV_FIS_LEN:
+ /* Also SCU_TASK_DONE_XR_WD_LEN: */
+ case SCU_TASK_DONE_SDMA_ERR:
+ case SCU_TASK_DONE_OFFSET_ERR:
+ case SCU_TASK_DONE_MAX_PLD_ERR:
+ case SCU_TASK_DONE_LF_ERR:
+ case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
+ case SCU_TASK_DONE_SMP_LL_RX_ERR:
+ case SCU_TASK_DONE_UNEXP_DATA:
+ case SCU_TASK_DONE_UNEXP_SDBFIS:
+ case SCU_TASK_DONE_REG_ERR:
+ case SCU_TASK_DONE_SDB_ERR:
+ case SCU_TASK_DONE_TASK_ABORT:
+ default:
+ /* Task in the target is not done. */
+ *response_ptr = SAS_TASK_UNDELIVERED;
+ *status_ptr = SAM_STAT_TASK_ABORTED;
+
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr = isci_perform_normal_io_completion;
+ } else {
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr = isci_perform_error_io_completion;
+ }
+ break;
+ }
+}
+
+/**
+ * isci_task_save_for_upper_layer_completion() - This function saves the
+ * request for later completion to the upper layer driver.
+ * @host: This parameter is a pointer to the host on which the the request
+ * should be queued (either as an error or success).
+ * @request: This parameter is the completed request.
+ * @response: This parameter is the response code for the completed task.
+ * @status: This parameter is the status code for the completed task.
+ *
+ * none.
+ */
+static void isci_task_save_for_upper_layer_completion(
+ struct isci_host *host,
+ struct isci_request *request,
+ enum service_response response,
+ enum exec_status status,
+ enum isci_completion_selection task_notification_selection)
+{
+ struct sas_task *task = isci_request_access_task(request);
+
+ task_notification_selection
+ = isci_task_set_completion_status(task, response, status,
+ task_notification_selection);
+
+ /* Tasks aborted specifically by a call to the lldd_abort_task
+ * function should not be completed to the host in the regular path.
+ */
+ switch (task_notification_selection) {
+
+ case isci_perform_normal_io_completion:
+
+ /* Normal notification (task_done) */
+ dev_dbg(&host->pdev->dev,
+ "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+ /* Add to the completed list. */
+ list_add(&request->completed_node,
+ &host->requests_to_complete);
+
+ /* Take the request off the device's pending request list. */
+ list_del_init(&request->dev_node);
+ break;
+
+ case isci_perform_aborted_io_completion:
+ /* No notification to libsas because this request is
+ * already in the abort path.
+ */
+ dev_dbg(&host->pdev->dev,
+ "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+
+ /* Wake up whatever process was waiting for this
+ * request to complete.
+ */
+ WARN_ON(request->io_request_completion == NULL);
+
+ if (request->io_request_completion != NULL) {
+
+ /* Signal whoever is waiting that this
+ * request is complete.
+ */
+ complete(request->io_request_completion);
+ }
+ break;
+
+ case isci_perform_error_io_completion:
+ /* Use sas_task_abort */
+ dev_dbg(&host->pdev->dev,
+ "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+ /* Add to the aborted list. */
+ list_add(&request->completed_node,
+ &host->requests_to_errorback);
+ break;
+
+ default:
+ dev_dbg(&host->pdev->dev,
+ "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+
+ /* Add to the error to libsas list. */
+ list_add(&request->completed_node,
+ &host->requests_to_errorback);
+ break;
+ }
+}
+
+static void isci_request_process_stp_response(struct sas_task *task,
+ void *response_buffer)
+{
+ struct dev_to_host_fis *d2h_reg_fis = response_buffer;
+ struct task_status_struct *ts = &task->task_status;
+ struct ata_task_resp *resp = (void *)&ts->buf[0];
+
+ resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
+ memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
+ ts->buf_valid_size = sizeof(*resp);
+
+ /**
+ * If the device fault bit is set in the status register, then
+ * set the sense data and return.
+ */
+ if (d2h_reg_fis->status & ATA_DF)
+ ts->stat = SAS_PROTO_RESPONSE;
+ else
+ ts->stat = SAM_STAT_GOOD;
+
+ ts->resp = SAS_TASK_COMPLETE;
+}
+
+static void isci_request_io_request_complete(struct isci_host *ihost,
+ struct isci_request *request,
+ enum sci_io_status completion_status)
+{
+ struct sas_task *task = isci_request_access_task(request);
+ struct ssp_response_iu *resp_iu;
+ void *resp_buf;
+ unsigned long task_flags;
+ struct isci_remote_device *idev = isci_lookup_device(task->dev);
+ enum service_response response = SAS_TASK_UNDELIVERED;
+ enum exec_status status = SAS_ABORTED_TASK;
+ enum isci_request_status request_status;
+ enum isci_completion_selection complete_to_host
+ = isci_perform_normal_io_completion;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request = %p, task = %p,\n"
+ "task->data_dir = %d completion_status = 0x%x\n",
+ __func__,
+ request,
+ task,
+ task->data_dir,
+ completion_status);
+
+ spin_lock(&request->state_lock);
+ request_status = request->status;
+
+ /* Decode the request status. Note that if the request has been
+ * aborted by a task management function, we don't care
+ * what the status is.
+ */
+ switch (request_status) {
+
+ case aborted:
+ /* "aborted" indicates that the request was aborted by a task
+ * management function, since once a task management request is
+ * perfomed by the device, the request only completes because
+ * of the subsequent driver terminate.
+ *
+ * Aborted also means an external thread is explicitly managing
+ * this request, so that we do not complete it up the stack.
+ *
+ * The target is still there (since the TMF was successful).
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_COMPLETE;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_aborted_io_completion;
+ /* This was an aborted request. */
+
+ spin_unlock(&request->state_lock);
+ break;
+
+ case aborting:
+ /* aborting means that the task management function tried and
+ * failed to abort the request. We need to note the request
+ * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
+ * target as down.
+ *
+ * Aborting also means an external thread is explicitly managing
+ * this request, so that we do not complete it up the stack.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_UNDELIVERED;
+
+ if (!idev)
+ /* The device has been /is being stopped. Note that
+ * we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_PHY_DOWN;
+
+ complete_to_host = isci_perform_aborted_io_completion;
+
+ /* This was an aborted request. */
+
+ spin_unlock(&request->state_lock);
+ break;
+
+ case terminating:
+
+ /* This was an terminated request. This happens when
+ * the I/O is being terminated because of an action on
+ * the device (reset, tear down, etc.), and the I/O needs
+ * to be completed up the stack.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_UNDELIVERED;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_aborted_io_completion;
+
+ /* This was a terminated request. */
+
+ spin_unlock(&request->state_lock);
+ break;
+
+ case dead:
+ /* This was a terminated request that timed-out during the
+ * termination process. There is no task to complete to
+ * libsas.
+ */
+ complete_to_host = isci_perform_normal_io_completion;
+ spin_unlock(&request->state_lock);
+ break;
+
+ default:
+
+ /* The request is done from an SCU HW perspective. */
+ request->status = completed;
+
+ spin_unlock(&request->state_lock);
+
+ /* This is an active request being completed from the core. */
+ switch (completion_status) {
+
+ case SCI_IO_FAILURE_RESPONSE_VALID:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
+ __func__,
+ request,
+ task);
+
+ if (sas_protocol_ata(task->task_proto)) {
+ resp_buf = &request->stp.rsp;
+ isci_request_process_stp_response(task,
+ resp_buf);
+ } else if (SAS_PROTOCOL_SSP == task->task_proto) {
+
+ /* crack the iu response buffer. */
+ resp_iu = &request->ssp.rsp;
+ isci_request_process_response_iu(task, resp_iu,
+ &ihost->pdev->dev);
+
+ } else if (SAS_PROTOCOL_SMP == task->task_proto) {
+
+ dev_err(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
+ "SAS_PROTOCOL_SMP protocol\n",
+ __func__);
+
+ } else
+ dev_err(&ihost->pdev->dev,
+ "%s: unknown protocol\n", __func__);
+
+ /* use the task status set in the task struct by the
+ * isci_request_process_response_iu call.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = task->task_status.resp;
+ status = task->task_status.stat;
+ break;
+
+ case SCI_IO_SUCCESS:
+ case SCI_IO_SUCCESS_IO_DONE_EARLY:
+
+ response = SAS_TASK_COMPLETE;
+ status = SAM_STAT_GOOD;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ void *rsp = &request->smp.rsp;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SMP protocol completion\n",
+ __func__);
+
+ sg_copy_from_buffer(
+ &task->smp_task.smp_resp, 1,
+ rsp, sizeof(struct smp_resp));
+ } else if (completion_status
+ == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+
+ /* This was an SSP / STP / SATA transfer.
+ * There is a possibility that less data than
+ * the maximum was transferred.
+ */
+ u32 transferred_length = sci_req_tx_bytes(request);
+
+ task->task_status.residual
+ = task->total_xfer_len - transferred_length;
+
+ /* If there were residual bytes, call this an
+ * underrun.
+ */
+ if (task->task_status.residual != 0)
+ status = SAS_DATA_UNDERRUN;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
+ __func__,
+ status);
+
+ } else
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_SUCCESS\n",
+ __func__);
+
+ break;
+
+ case SCI_IO_FAILURE_TERMINATED:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
+ __func__,
+ request,
+ task);
+
+ /* The request was terminated explicitly. No handling
+ * is needed in the SCSI error handler path.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_UNDELIVERED;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_normal_io_completion;
+ break;
+
+ case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
+
+ isci_request_handle_controller_specific_errors(
+ idev, request, task, &response, &status,
+ &complete_to_host);
+
+ break;
+
+ case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
+ /* This is a special case, in that the I/O completion
+ * is telling us that the device needs a reset.
+ * In order for the device reset condition to be
+ * noticed, the I/O has to be handled in the error
+ * handler. Set the reset flag and cause the
+ * SCSI error thread to be scheduled.
+ */
+ spin_lock_irqsave(&task->task_state_lock, task_flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, task_flags);
+
+ /* Fail the I/O. */
+ response = SAS_TASK_UNDELIVERED;
+ status = SAM_STAT_TASK_ABORTED;
+
+ complete_to_host = isci_perform_error_io_completion;
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
+
+ case SCI_FAILURE_RETRY_REQUIRED:
+
+ /* Fail the I/O so it can be retried. */
+ response = SAS_TASK_UNDELIVERED;
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_normal_io_completion;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
+
+
+ default:
+ /* Catch any otherwise unhandled error codes here. */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: invalid completion code: 0x%x - "
+ "isci_request = %p\n",
+ __func__, completion_status, request);
+
+ response = SAS_TASK_UNDELIVERED;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ if (SAS_PROTOCOL_SMP == task->task_proto) {
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ complete_to_host = isci_perform_normal_io_completion;
+ } else {
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ complete_to_host = isci_perform_error_io_completion;
+ }
+ break;
+ }
+ break;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ if (task->data_dir == DMA_NONE)
+ break;
+ if (task->num_scatter == 0)
+ /* 0 indicates a single dma address */
+ dma_unmap_single(&ihost->pdev->dev,
+ request->zero_scatter_daddr,
+ task->total_xfer_len, task->data_dir);
+ else /* unmap the sgl dma addresses */
+ dma_unmap_sg(&ihost->pdev->dev, task->scatter,
+ request->num_sg_entries, task->data_dir);
+ break;
+ case SAS_PROTOCOL_SMP: {
+ struct scatterlist *sg = &task->smp_task.smp_req;
+ struct smp_req *smp_req;
+ void *kaddr;
+
+ dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
+
+ /* need to swab it back in case the command buffer is re-used */
+ kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+ smp_req = kaddr + sg->offset;
+ sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+ kunmap_atomic(kaddr, KM_IRQ0);
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* Put the completed request on the correct list */
+ isci_task_save_for_upper_layer_completion(ihost, request, response,
+ status, complete_to_host
+ );
+
+ /* complete the io request to the core. */
+ sci_controller_complete_io(ihost, request->target_device, request);
+ isci_put_device(idev);
+
+ /* set terminated handle so it cannot be completed or
+ * terminated again, and to cause any calls into abort
+ * task to recognize the already completed case.
+ */
+ set_bit(IREQ_TERMINATED, &request->flags);
+}
+
+static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+ struct domain_device *dev = ireq->target_device->domain_dev;
+ struct sas_task *task;
+
+ /* XXX as hch said always creating an internal sas_task for tmf
+ * requests would simplify the driver
+ */
+ task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
+
+ /* all unaccelerated request types (non ssp or ncq) handled with
+ * substates
+ */
+ if (!task && dev->dev_type == SAS_END_DEV) {
+ sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
+ } else if (!task &&
+ (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
+ isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
+ sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
+ } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
+ sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
+ } else if (task && sas_protocol_ata(task->task_proto) &&
+ !task->ata_task.use_ncq) {
+ u32 state;
+
+ if (task->data_dir == DMA_NONE)
+ state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
+ else if (task->ata_task.dma_xfer)
+ state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
+ else /* PIO */
+ state = SCI_REQ_STP_PIO_WAIT_H2D;
+
+ sci_change_state(sm, state);
+ }
+}
+
+static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+ struct isci_host *ihost = ireq->owning_controller;
+
+ /* Tell the SCI_USER that the IO request is complete */
+ if (!test_bit(IREQ_TMF, &ireq->flags))
+ isci_request_io_request_complete(ihost, ireq,
+ ireq->sci_status);
+ else
+ isci_task_request_complete(ihost, ireq, ireq->sci_status);
+}
+
+static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ /* Setting the abort bit in the Task Context is required by the silicon. */
+ ireq->tc->abort = 1;
+}
+
+static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+ struct scu_task_context *tc = ireq->tc;
+ struct host_to_dev_fis *h2d_fis;
+ enum sci_status status;
+
+ /* Clear the SRST bit */
+ h2d_fis = &ireq->stp.cmd;
+ h2d_fis->control = 0;
+
+ /* Clear the TC control bit */
+ tc->control_frame = 0;
+
+ status = sci_controller_continue_io(ireq);
+ WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
+}
+
+static const struct sci_base_state sci_request_state_table[] = {
+ [SCI_REQ_INIT] = { },
+ [SCI_REQ_CONSTRUCTED] = { },
+ [SCI_REQ_STARTED] = {
+ .enter_state = sci_request_started_state_enter,
+ },
+ [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
+ .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
+ },
+ [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
+ [SCI_REQ_STP_PIO_WAIT_H2D] = {
+ .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
+ },
+ [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
+ [SCI_REQ_STP_PIO_DATA_IN] = { },
+ [SCI_REQ_STP_PIO_DATA_OUT] = { },
+ [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
+ [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
+ .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
+ },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
+ .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
+ },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
+ [SCI_REQ_TASK_WAIT_TC_COMP] = { },
+ [SCI_REQ_TASK_WAIT_TC_RESP] = { },
+ [SCI_REQ_SMP_WAIT_RESP] = { },
+ [SCI_REQ_SMP_WAIT_TC_COMP] = { },
+ [SCI_REQ_COMPLETED] = {
+ .enter_state = sci_request_completed_state_enter,
+ },
+ [SCI_REQ_ABORTING] = {
+ .enter_state = sci_request_aborting_state_enter,
+ },
+ [SCI_REQ_FINAL] = { },
+};
+
+static void
+sci_general_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
+
+ ireq->target_device = idev;
+ ireq->protocol = SCIC_NO_PROTOCOL;
+ ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
+
+ ireq->sci_status = SCI_SUCCESS;
+ ireq->scu_status = 0;
+ ireq->post_context = 0xFFFFFFFF;
+}
+
+static enum sci_status
+sci_io_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status = SCI_SUCCESS;
+
+ /* Build the common part of the request */
+ sci_general_request_construct(ihost, idev, ireq);
+
+ if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+ return SCI_FAILURE_INVALID_REMOTE_DEVICE;
+
+ if (dev->dev_type == SAS_END_DEV)
+ /* pass */;
+ else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
+ memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
+ else if (dev_is_expander(dev))
+ /* pass */;
+ else
+ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
+
+ return status;
+}
+
+enum sci_status sci_task_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 io_tag, struct isci_request *ireq)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status = SCI_SUCCESS;
+
+ /* Build the common part of the request */
+ sci_general_request_construct(ihost, idev, ireq);
+
+ if (dev->dev_type == SAS_END_DEV ||
+ dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ set_bit(IREQ_TMF, &ireq->flags);
+ memset(ireq->tc, 0, sizeof(struct scu_task_context));
+ } else
+ status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ return status;
+}
+
+static enum sci_status isci_request_ssp_request_construct(
+ struct isci_request *request)
+{
+ enum sci_status status;
+
+ dev_dbg(&request->isci_host->pdev->dev,
+ "%s: request = %p\n",
+ __func__,
+ request);
+ status = sci_io_request_construct_basic_ssp(request);
+ return status;
+}
+
+static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct host_to_dev_fis *fis = &ireq->stp.cmd;
+ struct ata_queued_cmd *qc = task->uldd_task;
+ enum sci_status status;
+
+ dev_dbg(&ireq->isci_host->pdev->dev,
+ "%s: ireq = %p\n",
+ __func__,
+ ireq);
+
+ memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+ if (!task->ata_task.device_control_reg_update)
+ fis->flags |= 0x80;
+ fis->flags &= 0xF0;
+
+ status = sci_io_request_construct_basic_sata(ireq);
+
+ if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ)) {
+ fis->sector_count = qc->tag << 3;
+ ireq->tc->type.stp.ncq_tag = qc->tag;
+ }
+
+ return status;
+}
+
+static enum sci_status
+sci_io_request_construct_smp(struct device *dev,
+ struct isci_request *ireq,
+ struct sas_task *task)
+{
+ struct scatterlist *sg = &task->smp_task.smp_req;
+ struct isci_remote_device *idev;
+ struct scu_task_context *task_context;
+ struct isci_port *iport;
+ struct smp_req *smp_req;
+ void *kaddr;
+ u8 req_len;
+ u32 cmd;
+
+ kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+ smp_req = kaddr + sg->offset;
+ /*
+ * Look at the SMP requests' header fields; for certain SAS 1.x SMP
+ * functions under SAS 2.0, a zero request length really indicates
+ * a non-zero default length.
+ */
+ if (smp_req->req_len == 0) {
+ switch (smp_req->func) {
+ case SMP_DISCOVER:
+ case SMP_REPORT_PHY_ERR_LOG:
+ case SMP_REPORT_PHY_SATA:
+ case SMP_REPORT_ROUTE_INFO:
+ smp_req->req_len = 2;
+ break;
+ case SMP_CONF_ROUTE_INFO:
+ case SMP_PHY_CONTROL:
+ case SMP_PHY_TEST_FUNCTION:
+ smp_req->req_len = 9;
+ break;
+ /* Default - zero is a valid default for 2.0. */
+ }
+ }
+ req_len = smp_req->req_len;
+ sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+ cmd = *(u32 *) smp_req;
+ kunmap_atomic(kaddr, KM_IRQ0);
+
+ if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
+ return SCI_FAILURE;
+
+ ireq->protocol = SCIC_SMP_PROTOCOL;
+
+ /* byte swap the smp request. */
+
+ task_context = ireq->tc;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /*
+ * Fill in the TC with the its required data
+ * 00h
+ */
+ task_context->priority = 0;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
+ task_context->abort = 0;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ /* 04h */
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+ task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
+
+ /* 08h */
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 1;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ /* 0ch */
+ task_context->address_modifier = 0;
+
+ /* 10h */
+ task_context->ssp_command_iu_length = req_len;
+
+ /* 14h */
+ task_context->transfer_length_bytes = 0;
+
+ /*
+ * 18h ~ 30h, protocol specific
+ * since commandIU has been build by framework at this point, we just
+ * copy the frist DWord from command IU to this location. */
+ memcpy(&task_context->type.smp, &cmd, sizeof(u32));
+
+ /*
+ * 40h
+ * "For SMP you could program it to zero. We would prefer that way
+ * so that done code will be consistent." - Venki
+ */
+ task_context->task_phase = 0;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+ /*
+ * Copy the physical address for the command buffer to the SCU Task
+ * Context command buffer should not contain command header.
+ */
+ task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
+ task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
+
+ /* SMP response comes as UF, so no need to set response IU address. */
+ task_context->response_iu_upper = 0;
+ task_context->response_iu_lower = 0;
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+/*
+ * isci_smp_request_build() - This function builds the smp request.
+ * @ireq: This parameter points to the isci_request allocated in the
+ * request construct function.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_smp_request_build(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct device *dev = &ireq->isci_host->pdev->dev;
+ enum sci_status status = SCI_FAILURE;
+
+ status = sci_io_request_construct_smp(dev, ireq, task);
+ if (status != SCI_SUCCESS)
+ dev_dbg(&ireq->isci_host->pdev->dev,
+ "%s: failed with status = %d\n",
+ __func__,
+ status);
+
+ return status;
+}
+
+/**
+ * isci_io_request_build() - This function builds the io request object.
+ * @ihost: This parameter specifies the ISCI host object
+ * @request: This parameter points to the isci_request object allocated in the
+ * request construct function.
+ * @sci_device: This parameter is the handle for the sci core's remote device
+ * object that is the destination for this request.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_io_request_build(struct isci_host *ihost,
+ struct isci_request *request,
+ struct isci_remote_device *idev)
+{
+ enum sci_status status = SCI_SUCCESS;
+ struct sas_task *task = isci_request_access_task(request);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = 0x%p; request = %p, "
+ "num_scatter = %d\n",
+ __func__,
+ idev,
+ request,
+ task->num_scatter);
+
+ /* map the sgl addresses, if present.
+ * libata does the mapping for sata devices
+ * before we get the request.
+ */
+ if (task->num_scatter &&
+ !sas_protocol_ata(task->task_proto) &&
+ !(SAS_PROTOCOL_SMP & task->task_proto)) {
+
+ request->num_sg_entries = dma_map_sg(
+ &ihost->pdev->dev,
+ task->scatter,
+ task->num_scatter,
+ task->data_dir
+ );
+
+ if (request->num_sg_entries == 0)
+ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ }
+
+ status = sci_io_request_construct(ihost, idev, request);
+
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: failed request construct\n",
+ __func__);
+ return SCI_FAILURE;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SMP:
+ status = isci_smp_request_build(request);
+ break;
+ case SAS_PROTOCOL_SSP:
+ status = isci_request_ssp_request_construct(request);
+ break;
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ status = isci_request_stp_request_construct(request);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: unknown protocol\n", __func__);
+ return SCI_FAILURE;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
+ ireq->io_tag = tag;
+ ireq->io_request_completion = NULL;
+ ireq->flags = 0;
+ ireq->num_sg_entries = 0;
+ INIT_LIST_HEAD(&ireq->completed_node);
+ INIT_LIST_HEAD(&ireq->dev_node);
+ isci_request_change_state(ireq, allocated);
+
+ return ireq;
+}
+
+static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = isci_request_from_tag(ihost, tag);
+ ireq->ttype_ptr.io_task_ptr = task;
+ ireq->ttype = io_task;
+ task->lldd_task = ireq;
+
+ return ireq;
+}
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+ struct isci_tmf *isci_tmf,
+ u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = isci_request_from_tag(ihost, tag);
+ ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
+ ireq->ttype = tmf_task;
+
+ return ireq;
+}
+
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+ struct sas_task *task, u16 tag)
+{
+ enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+ struct isci_request *ireq;
+ unsigned long flags;
+ int ret = 0;
+
+ /* do common allocation and init of request object. */
+ ireq = isci_io_request_from_tag(ihost, task, tag);
+
+ status = isci_io_request_build(ihost, ireq, idev);
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request_construct failed - status = 0x%x\n",
+ __func__,
+ status);
+ return status;
+ }
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
+
+ if (isci_task_is_ncq_recovery(task)) {
+
+ /* The device is in an NCQ recovery state. Issue the
+ * request on the task side. Note that it will
+ * complete on the I/O request side because the
+ * request was built that way (ie.
+ * ireq->is_task_management_request is false).
+ */
+ status = sci_controller_start_task(ihost,
+ idev,
+ ireq);
+ } else {
+ status = SCI_FAILURE;
+ }
+ } else {
+ /* send the request, let the core assign the IO TAG. */
+ status = sci_controller_start_io(ihost, idev,
+ ireq);
+ }
+
+ if (status != SCI_SUCCESS &&
+ status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: failed request start (0x%x)\n",
+ __func__, status);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ return status;
+ }
+
+ /* Either I/O started OK, or the core has signaled that
+ * the device needs a target reset.
+ *
+ * In either case, hold onto the I/O for later.
+ *
+ * Update it's status and add it to the list in the
+ * remote device object.
+ */
+ list_add(&ireq->dev_node, &idev->reqs_in_process);
+
+ if (status == SCI_SUCCESS) {
+ isci_request_change_state(ireq, started);
+ } else {
+ /* The request did not really start in the
+ * hardware, so clear the request handle
+ * here so no terminations will be done.
+ */
+ set_bit(IREQ_TERMINATED, &ireq->flags);
+ isci_request_change_state(ireq, completed);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (status ==
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ /* Signal libsas that we need the SCSI error
+ * handler thread to work on this I/O and that
+ * we want a device reset.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Cause this task to be scheduled in the SCSI error
+ * handler thread.
+ */
+ isci_execpath_callback(ihost, task,
+ sas_task_abort);
+
+ /* Change the status, since we are holding
+ * the I/O until it is managed by the SCSI
+ * error handler.
+ */
+ status = SCI_SUCCESS;
+ }
+
+ return ret;
+}
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
new file mode 100644
index 00000000000..7a1d5a9778e
--- /dev/null
+++ b/drivers/scsi/isci/request.h
@@ -0,0 +1,448 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REQUEST_H_
+#define _ISCI_REQUEST_H_
+
+#include "isci.h"
+#include "host.h"
+#include "scu_task_context.h"
+
+/**
+ * struct isci_request_status - This enum defines the possible states of an I/O
+ * request.
+ *
+ *
+ */
+enum isci_request_status {
+ unallocated = 0x00,
+ allocated = 0x01,
+ started = 0x02,
+ completed = 0x03,
+ aborting = 0x04,
+ aborted = 0x05,
+ terminating = 0x06,
+ dead = 0x07
+};
+
+enum task_type {
+ io_task = 0,
+ tmf_task = 1
+};
+
+enum sci_request_protocol {
+ SCIC_NO_PROTOCOL,
+ SCIC_SMP_PROTOCOL,
+ SCIC_SSP_PROTOCOL,
+ SCIC_STP_PROTOCOL
+}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
+
+/**
+ * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
+ * @pio_len - number of bytes requested at PIO setup
+ * @status - pio setup ending status value to tell us if we need
+ * to wait for another fis or if the transfer is complete. Upon
+ * receipt of a d2h fis this will be the status field of that fis.
+ * @sgl - track pio transfer progress as we iterate through the sgl
+ * @device_cdb_len - atapi device advertises it's transfer constraints at setup
+ */
+struct isci_stp_request {
+ u32 pio_len;
+ u8 status;
+
+ struct isci_stp_pio_sgl {
+ int index;
+ u8 set;
+ u32 offset;
+ } sgl;
+ u32 device_cdb_len;
+};
+
+struct isci_request {
+ enum isci_request_status status;
+ #define IREQ_COMPLETE_IN_TARGET 0
+ #define IREQ_TERMINATED 1
+ #define IREQ_TMF 2
+ #define IREQ_ACTIVE 3
+ unsigned long flags;
+ /* XXX kill ttype and ttype_ptr, allocate full sas_task */
+ enum task_type ttype;
+ union ttype_ptr_union {
+ struct sas_task *io_task_ptr; /* When ttype==io_task */
+ struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
+ } ttype_ptr;
+ struct isci_host *isci_host;
+ /* For use in the requests_to_{complete|abort} lists: */
+ struct list_head completed_node;
+ /* For use in the reqs_in_process list: */
+ struct list_head dev_node;
+ spinlock_t state_lock;
+ dma_addr_t request_daddr;
+ dma_addr_t zero_scatter_daddr;
+ unsigned int num_sg_entries;
+ /* Note: "io_request_completion" is completed in two different ways
+ * depending on whether this is a TMF or regular request.
+ * - TMF requests are completed in the thread that started them;
+ * - regular requests are completed in the request completion callback
+ * function.
+ * This difference in operation allows the aborter of a TMF request
+ * to be sure that once the TMF request completes, the I/O that the
+ * TMF was aborting is guaranteed to have completed.
+ *
+ * XXX kill io_request_completion
+ */
+ struct completion *io_request_completion;
+ struct sci_base_state_machine sm;
+ struct isci_host *owning_controller;
+ struct isci_remote_device *target_device;
+ u16 io_tag;
+ enum sci_request_protocol protocol;
+ u32 scu_status; /* hardware result */
+ u32 sci_status; /* upper layer disposition */
+ u32 post_context;
+ struct scu_task_context *tc;
+ /* could be larger with sg chaining */
+ #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
+ struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
+ /* This field is a pointer to the stored rx frame data. It is used in
+ * STP internal requests and SMP response frames. If this field is
+ * non-NULL the saved frame must be released on IO request completion.
+ */
+ u32 saved_rx_frame_index;
+
+ union {
+ struct {
+ union {
+ struct ssp_cmd_iu cmd;
+ struct ssp_task_iu tmf;
+ };
+ union {
+ struct ssp_response_iu rsp;
+ u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+ };
+ } ssp;
+ struct {
+ struct smp_resp rsp;
+ } smp;
+ struct {
+ struct isci_stp_request req;
+ struct host_to_dev_fis cmd;
+ struct dev_to_host_fis rsp;
+ } stp;
+ };
+};
+
+static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
+{
+ struct isci_request *ireq;
+
+ ireq = container_of(stp_req, typeof(*ireq), stp.req);
+ return ireq;
+}
+
+/**
+ * enum sci_base_request_states - This enumeration depicts all the states for
+ * the common request state machine.
+ *
+ *
+ */
+enum sci_base_request_states {
+ /*
+ * Simply the initial state for the base request state machine.
+ */
+ SCI_REQ_INIT,
+
+ /*
+ * This state indicates that the request has been constructed.
+ * This state is entered from the INITIAL state.
+ */
+ SCI_REQ_CONSTRUCTED,
+
+ /*
+ * This state indicates that the request has been started. This state
+ * is entered from the CONSTRUCTED state.
+ */
+ SCI_REQ_STARTED,
+
+ SCI_REQ_STP_UDMA_WAIT_TC_COMP,
+ SCI_REQ_STP_UDMA_WAIT_D2H,
+
+ SCI_REQ_STP_NON_DATA_WAIT_H2D,
+ SCI_REQ_STP_NON_DATA_WAIT_D2H,
+
+ SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
+ SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
+ SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
+
+ /*
+ * While in this state the IO request object is waiting for the TC
+ * completion notification for the H2D Register FIS
+ */
+ SCI_REQ_STP_PIO_WAIT_H2D,
+
+ /*
+ * While in this state the IO request object is waiting for either a
+ * PIO Setup FIS or a D2H register FIS. The type of frame received is
+ * based on the result of the prior frame and line conditions.
+ */
+ SCI_REQ_STP_PIO_WAIT_FRAME,
+
+ /*
+ * While in this state the IO request object is waiting for a DATA
+ * frame from the device.
+ */
+ SCI_REQ_STP_PIO_DATA_IN,
+
+ /*
+ * While in this state the IO request object is waiting to transmit
+ * the next data frame to the device.
+ */
+ SCI_REQ_STP_PIO_DATA_OUT,
+
+ /*
+ * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
+ * task management request is waiting for the transmission of the
+ * initial frame (i.e. command, task, etc.).
+ */
+ SCI_REQ_TASK_WAIT_TC_COMP,
+
+ /*
+ * This sub-state indicates that the started task management request
+ * is waiting for the reception of an unsolicited frame
+ * (i.e. response IU).
+ */
+ SCI_REQ_TASK_WAIT_TC_RESP,
+
+ /*
+ * This sub-state indicates that the started task management request
+ * is waiting for the reception of an unsolicited frame
+ * (i.e. response IU).
+ */
+ SCI_REQ_SMP_WAIT_RESP,
+
+ /*
+ * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
+ * request is waiting for the transmission of the initial frame
+ * (i.e. command, task, etc.).
+ */
+ SCI_REQ_SMP_WAIT_TC_COMP,
+
+ /*
+ * This state indicates that the request has completed.
+ * This state is entered from the STARTED state. This state is entered
+ * from the ABORTING state.
+ */
+ SCI_REQ_COMPLETED,
+
+ /*
+ * This state indicates that the request is in the process of being
+ * terminated/aborted.
+ * This state is entered from the CONSTRUCTED state.
+ * This state is entered from the STARTED state.
+ */
+ SCI_REQ_ABORTING,
+
+ /*
+ * Simply the final state for the base request state machine.
+ */
+ SCI_REQ_FINAL,
+};
+
+enum sci_status sci_request_start(struct isci_request *ireq);
+enum sci_status sci_io_request_terminate(struct isci_request *ireq);
+enum sci_status
+sci_io_request_event_handler(struct isci_request *ireq,
+ u32 event_code);
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+ u32 frame_index);
+enum sci_status
+sci_task_request_terminate(struct isci_request *ireq);
+extern enum sci_status
+sci_request_complete(struct isci_request *ireq);
+extern enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
+
+/* XXX open code in caller */
+static inline dma_addr_t
+sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
+{
+
+ char *requested_addr = (char *)virt_addr;
+ char *base_addr = (char *)ireq;
+
+ BUG_ON(requested_addr < base_addr);
+ BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
+
+ return ireq->request_daddr + (requested_addr - base_addr);
+}
+
+/**
+ * isci_request_change_state() - This function sets the status of the request
+ * object.
+ * @request: This parameter points to the isci_request object
+ * @status: This Parameter is the new status of the object
+ *
+ */
+static inline enum isci_request_status
+isci_request_change_state(struct isci_request *isci_request,
+ enum isci_request_status status)
+{
+ enum isci_request_status old_state;
+ unsigned long flags;
+
+ dev_dbg(&isci_request->isci_host->pdev->dev,
+ "%s: isci_request = %p, state = 0x%x\n",
+ __func__,
+ isci_request,
+ status);
+
+ BUG_ON(isci_request == NULL);
+
+ spin_lock_irqsave(&isci_request->state_lock, flags);
+ old_state = isci_request->status;
+ isci_request->status = status;
+ spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+ return old_state;
+}
+
+/**
+ * isci_request_change_started_to_newstate() - This function sets the status of
+ * the request object.
+ * @request: This parameter points to the isci_request object
+ * @status: This Parameter is the new status of the object
+ *
+ * state previous to any change.
+ */
+static inline enum isci_request_status
+isci_request_change_started_to_newstate(struct isci_request *isci_request,
+ struct completion *completion_ptr,
+ enum isci_request_status newstate)
+{
+ enum isci_request_status old_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isci_request->state_lock, flags);
+
+ old_state = isci_request->status;
+
+ if (old_state == started || old_state == aborting) {
+ BUG_ON(isci_request->io_request_completion != NULL);
+
+ isci_request->io_request_completion = completion_ptr;
+ isci_request->status = newstate;
+ }
+
+ spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+ dev_dbg(&isci_request->isci_host->pdev->dev,
+ "%s: isci_request = %p, old_state = 0x%x\n",
+ __func__,
+ isci_request,
+ old_state);
+
+ return old_state;
+}
+
+/**
+ * isci_request_change_started_to_aborted() - This function sets the status of
+ * the request object.
+ * @request: This parameter points to the isci_request object
+ * @completion_ptr: This parameter is saved as the kernel completion structure
+ * signalled when the old request completes.
+ *
+ * state previous to any change.
+ */
+static inline enum isci_request_status
+isci_request_change_started_to_aborted(struct isci_request *isci_request,
+ struct completion *completion_ptr)
+{
+ return isci_request_change_started_to_newstate(isci_request,
+ completion_ptr,
+ aborted);
+}
+
+#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
+
+#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+ struct isci_tmf *isci_tmf,
+ u16 tag);
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+ struct sas_task *task, u16 tag);
+void isci_terminate_pending_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+enum sci_status
+sci_task_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 io_tag,
+ struct isci_request *ireq);
+enum sci_status
+sci_task_request_construct_ssp(struct isci_request *ireq);
+enum sci_status
+sci_task_request_construct_sata(struct isci_request *ireq);
+void sci_smp_request_copy_response(struct isci_request *ireq);
+
+static inline int isci_task_is_ncq_recovery(struct sas_task *task)
+{
+ return (sas_protocol_ata(task->task_proto) &&
+ task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
+ task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
+
+}
+
+#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
new file mode 100644
index 00000000000..462b15174d3
--- /dev/null
+++ b/drivers/scsi/isci/sas.h
@@ -0,0 +1,219 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCI_SAS_H_
+#define _SCI_SAS_H_
+
+#include <linux/kernel.h>
+
+/*
+ * SATA FIS Types These constants depict the various SATA FIS types devined in
+ * the serial ATA specification.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+#define FIS_REGH2D 0x27
+#define FIS_REGD2H 0x34
+#define FIS_SETDEVBITS 0xA1
+#define FIS_DMA_ACTIVATE 0x39
+#define FIS_DMA_SETUP 0x41
+#define FIS_BIST_ACTIVATE 0x58
+#define FIS_PIO_SETUP 0x5F
+#define FIS_DATA 0x46
+
+/**************************************************************************/
+#define SSP_RESP_IU_MAX_SIZE 280
+
+/*
+ * contents of the SSP COMMAND INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_cmd_iu {
+ u8 LUN[8];
+ u8 add_cdb_len:6;
+ u8 _r_a:2;
+ u8 _r_b;
+ u8 en_fburst:1;
+ u8 task_prio:4;
+ u8 task_attr:3;
+ u8 _r_c;
+
+ u8 cdb[16];
+} __packed;
+
+/*
+ * contents of the SSP TASK INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_task_iu {
+ u8 LUN[8];
+ u8 _r_a;
+ u8 task_func;
+ u8 _r_b[4];
+ u16 task_tag;
+ u8 _r_c[12];
+} __packed;
+
+
+/*
+ * struct smp_req_phy_id - This structure defines the contents of
+ * an SMP Request that is comprised of the struct smp_request_header and a
+ * phy identifier.
+ * Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phy_id {
+ u8 _r_a[4]; /* bytes 4-7 */
+
+ u8 ign_zone_grp:1; /* byte 8 */
+ u8 _r_b:7;
+
+ u8 phy_id; /* byte 9 */
+ u8 _r_c; /* byte 10 */
+ u8 _r_d; /* byte 11 */
+} __packed;
+
+/*
+ * struct smp_req_config_route_info - This structure defines the
+ * contents of an SMP Configure Route Information request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_conf_rtinfo {
+ u16 exp_change_cnt; /* bytes 4-5 */
+ u8 exp_rt_idx_hi; /* byte 6 */
+ u8 exp_rt_idx; /* byte 7 */
+
+ u8 _r_a; /* byte 8 */
+ u8 phy_id; /* byte 9 */
+ u16 _r_b; /* bytes 10-11 */
+
+ u8 _r_c:7; /* byte 12 */
+ u8 dis_rt_entry:1;
+ u8 _r_d[3]; /* bytes 13-15 */
+
+ u8 rt_sas_addr[8]; /* bytes 16-23 */
+ u8 _r_e[16]; /* bytes 24-39 */
+} __packed;
+
+/*
+ * struct smp_req_phycntl - This structure defines the contents of an
+ * SMP Phy Controller request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phycntl {
+ u16 exp_change_cnt; /* byte 4-5 */
+
+ u8 _r_a[3]; /* bytes 6-8 */
+
+ u8 phy_id; /* byte 9 */
+ u8 phy_op; /* byte 10 */
+
+ u8 upd_pathway:1; /* byte 11 */
+ u8 _r_b:7;
+
+ u8 _r_c[12]; /* byte 12-23 */
+
+ u8 att_dev_name[8]; /* byte 24-31 */
+
+ u8 _r_d:4; /* byte 32 */
+ u8 min_linkrate:4;
+
+ u8 _r_e:4; /* byte 33 */
+ u8 max_linkrate:4;
+
+ u8 _r_f[2]; /* byte 34-35 */
+
+ u8 pathway:4; /* byte 36 */
+ u8 _r_g:4;
+
+ u8 _r_h[3]; /* bytes 37-39 */
+} __packed;
+
+/*
+ * struct smp_req - This structure simply unionizes the existing request
+ * structures into a common request type.
+ *
+ * XXX: This data structure may need to go to scsi/sas.h
+ */
+struct smp_req {
+ u8 type; /* byte 0 */
+ u8 func; /* byte 1 */
+ u8 alloc_resp_len; /* byte 2 */
+ u8 req_len; /* byte 3 */
+ u8 req_data[0];
+} __packed;
+
+#define SMP_RESP_HDR_SZ 4
+
+/*
+ * struct sci_sas_address - This structure depicts how a SAS address is
+ * represented by SCI.
+ * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas
+ *
+ */
+struct sci_sas_address {
+ u32 high;
+ u32 low;
+};
+#endif
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
new file mode 100644
index 00000000000..c8b329c695f
--- /dev/null
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -0,0 +1,283 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_COMPLETION_CODES_HEADER_
+#define _SCU_COMPLETION_CODES_HEADER_
+
+/**
+ * This file contains the constants and macros for the SCU hardware completion
+ * codes.
+ *
+ *
+ */
+
+#define SCU_COMPLETION_TYPE_SHIFT 28
+#define SCU_COMPLETION_TYPE_MASK 0x70000000
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * This macro constructs an SCU completion type
+ */
+#define SCU_COMPLETION_TYPE(type) \
+ ((u32)(type) << SCU_COMPLETION_TYPE_SHIFT)
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * These macros contain the SCU completion types SCU_COMPLETION_TYPE
+ */
+#define SCU_COMPLETION_TYPE_TASK SCU_COMPLETION_TYPE(0)
+#define SCU_COMPLETION_TYPE_SDMA SCU_COMPLETION_TYPE(1)
+#define SCU_COMPLETION_TYPE_UFI SCU_COMPLETION_TYPE(2)
+#define SCU_COMPLETION_TYPE_EVENT SCU_COMPLETION_TYPE(3)
+#define SCU_COMPLETION_TYPE_NOTIFY SCU_COMPLETION_TYPE(4)
+
+/**
+ *
+ *
+ * These constants provide the shift and mask values for the various parts of
+ * an SCU completion code.
+ */
+#define SCU_COMPLETION_STATUS_MASK 0x0FFC0000
+#define SCU_COMPLETION_TL_STATUS_MASK 0x0FC00000
+#define SCU_COMPLETION_TL_STATUS_SHIFT 22
+#define SCU_COMPLETION_SDMA_STATUS_MASK 0x003C0000
+#define SCU_COMPLETION_PEG_MASK 0x00010000
+#define SCU_COMPLETION_PORT_MASK 0x00007000
+#define SCU_COMPLETION_PE_MASK SCU_COMPLETION_PORT_MASK
+#define SCU_COMPLETION_PE_SHIFT 12
+#define SCU_COMPLETION_INDEX_MASK 0x00000FFF
+
+/**
+ * SCU_GET_COMPLETION_TYPE() -
+ *
+ * This macro returns the SCU completion type.
+ */
+#define SCU_GET_COMPLETION_TYPE(completion_code) \
+ ((completion_code) & SCU_COMPLETION_TYPE_MASK)
+
+/**
+ * SCU_GET_COMPLETION_STATUS() -
+ *
+ * This macro returns the SCU completion status.
+ */
+#define SCU_GET_COMPLETION_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_TL_STATUS() -
+ *
+ * This macro returns the transport layer completion status.
+ */
+#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK)
+
+/**
+ * SCU_MAKE_COMPLETION_STATUS() -
+ *
+ * This macro takes a completion code and performs the shift and mask
+ * operations to turn it into a completion code that can be compared to a
+ * SCU_GET_COMPLETION_TL_STATUS.
+ */
+#define SCU_MAKE_COMPLETION_STATUS(completion_code) \
+ ((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT)
+
+/**
+ * SCU_NORMALIZE_COMPLETION_STATUS() -
+ *
+ * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a
+ * return code.
+ */
+#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \
+ (\
+ ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \
+ >> SCU_COMPLETION_TL_STATUS_SHIFT \
+ )
+
+/**
+ * SCU_GET_COMPLETION_SDMA_STATUS() -
+ *
+ * This macro returns the SDMA completion status.
+ */
+#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PEG() -
+ *
+ * This macro returns the Protocol Engine Group from the completion code.
+ */
+#define SCU_GET_COMPLETION_PEG(completion_code) \
+ ((completion_code) & SCU_COMPLETION_PEG_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PORT() -
+ *
+ * This macro reuturns the logical port index from the completion code.
+ */
+#define SCU_GET_COMPLETION_PORT(completion_code) \
+ ((completion_code) & SCU_COMPLETION_PORT_MASK)
+
+/**
+ * SCU_GET_PROTOCOL_ENGINE_INDEX() -
+ *
+ * This macro returns the PE index from the completion code.
+ */
+#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \
+ (((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT)
+
+/**
+ * SCU_GET_COMPLETION_INDEX() -
+ *
+ * This macro returns the index of the completion which is either a TCi or an
+ * RNi depending on the completion type.
+ */
+#define SCU_GET_COMPLETION_INDEX(completion_code) \
+ ((completion_code) & SCU_COMPLETION_INDEX_MASK)
+
+#define SCU_UNSOLICITED_FRAME_MASK 0x0FFF0000
+#define SCU_UNSOLICITED_FRAME_SHIFT 16
+
+/**
+ * SCU_GET_FRAME_INDEX() -
+ *
+ * This macro returns a normalized frame index from an unsolicited frame
+ * completion.
+ */
+#define SCU_GET_FRAME_INDEX(completion_code) \
+ (\
+ ((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \
+ >> SCU_UNSOLICITED_FRAME_SHIFT \
+ )
+
+#define SCU_UNSOLICITED_FRAME_ERROR_MASK 0x00008000
+
+/**
+ * SCU_GET_FRAME_ERROR() -
+ *
+ * This macro returns a zero (0) value if there is no frame error otherwise it
+ * returns non-zero (!0).
+ */
+#define SCU_GET_FRAME_ERROR(completion_code) \
+ ((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK)
+
+/**
+ *
+ *
+ * These constants represent normalized completion codes which must be shifted
+ * 18 bits to match it with the hardware completion code. In a 16-bit compiler,
+ * immediate constants are 16-bit values (the size of an int). If we shift
+ * those by 18 bits, we completely lose the value. To ensure the value is a
+ * 32-bit value like we want, each immediate value must be cast to a u32.
+ */
+#define SCU_TASK_DONE_GOOD ((u32)0x00)
+#define SCU_TASK_DONE_CRC_ERR ((u32)0x14)
+#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14)
+#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15)
+#define SCU_TASK_DONE_NAK_CMD_ERR ((u32)0x16)
+#define SCU_TASK_DONE_CMD_LL_R_ERR ((u32)0x16)
+#define SCU_TASK_DONE_LL_R_ERR ((u32)0x17)
+#define SCU_TASK_DONE_ACK_NAK_TO ((u32)0x17)
+#define SCU_TASK_DONE_LL_PERR ((u32)0x18)
+#define SCU_TASK_DONE_LL_SY_TERM ((u32)0x19)
+#define SCU_TASK_DONE_NAK_ERR ((u32)0x19)
+#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A)
+#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A)
+#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B)
+#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B)
+#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C)
+#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C)
+#define SCU_TASK_DONE_INV_FIS_TYPE ((u32)0x1D)
+#define SCU_TASK_DONE_XR_IU_LEN_ERR ((u32)0x1D)
+#define SCU_TASK_DONE_INV_FIS_LEN ((u32)0x1E)
+#define SCU_TASK_DONE_XR_WD_LEN ((u32)0x1E)
+#define SCU_TASK_DONE_SDMA_ERR ((u32)0x1F)
+#define SCU_TASK_DONE_OFFSET_ERR ((u32)0x20)
+#define SCU_TASK_DONE_MAX_PLD_ERR ((u32)0x21)
+#define SCU_TASK_DONE_EXCESS_DATA ((u32)0x22)
+#define SCU_TASK_DONE_LF_ERR ((u32)0x23)
+#define SCU_TASK_DONE_UNEXP_FIS ((u32)0x24)
+#define SCU_TASK_DONE_UNEXP_RESP ((u32)0x24)
+#define SCU_TASK_DONE_EARLY_RESP ((u32)0x25)
+#define SCU_TASK_DONE_SMP_RESP_TO_ERR ((u32)0x26)
+#define SCU_TASK_DONE_DMASETUP_DIRERR ((u32)0x27)
+#define SCU_TASK_DONE_SMP_UFI_ERR ((u32)0x27)
+#define SCU_TASK_DONE_XFERCNT_ERR ((u32)0x28)
+#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR ((u32)0x28)
+#define SCU_TASK_DONE_SMP_LL_RX_ERR ((u32)0x29)
+#define SCU_TASK_DONE_RESP_LEN_ERR ((u32)0x2A)
+#define SCU_TASK_DONE_UNEXP_DATA ((u32)0x2B)
+#define SCU_TASK_DONE_OPEN_FAIL ((u32)0x2C)
+#define SCU_TASK_DONE_UNEXP_SDBFIS ((u32)0x2D)
+#define SCU_TASK_DONE_REG_ERR ((u32)0x2E)
+#define SCU_TASK_DONE_SDB_ERR ((u32)0x2F)
+#define SCU_TASK_DONE_TASK_ABORT ((u32)0x30)
+#define SCU_TASK_DONE_CMD_SDMA_ERR ((U32)0x32)
+#define SCU_TASK_DONE_CMD_LL_ABORT_ERR ((U32)0x33)
+#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION ((u32)0x34)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1 ((u32)0x35)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2 ((u32)0x36)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3 ((u32)0x37)
+#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION ((u32)0x38)
+#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION ((u32)0x39)
+#define SCU_TASK_DONE_VIIT_ENTRY_NV ((u32)0x3A)
+#define SCU_TASK_DONE_IIT_ENTRY_NV ((u32)0x3B)
+#define SCU_TASK_DONE_RNCNV_OUTBOUND ((u32)0x3C)
+#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY ((u32)0x3D)
+#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED ((u32)0x3E)
+#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED ((u32)0x3F)
+
+#endif /* _SCU_COMPLETION_CODES_HEADER_ */
diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h
new file mode 100644
index 00000000000..36a945ad572
--- /dev/null
+++ b/drivers/scsi/isci/scu_event_codes.h
@@ -0,0 +1,336 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_EVENT_CODES_HEADER__
+#define __SCU_EVENT_CODES_HEADER__
+
+/**
+ * This file contains the constants and macros for the SCU event codes.
+ *
+ *
+ */
+
+#define SCU_EVENT_TYPE_CODE_SHIFT 24
+#define SCU_EVENT_TYPE_CODE_MASK 0x0F000000
+
+#define SCU_EVENT_SPECIFIC_CODE_SHIFT 18
+#define SCU_EVENT_SPECIFIC_CODE_MASK 0x00FC0000
+
+#define SCU_EVENT_CODE_MASK \
+ (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * This macro constructs an SCU event type from the type value.
+ */
+#define SCU_EVENT_TYPE(type) \
+ ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_SPECIFIC() -
+ *
+ * This macro constructs an SCU event specifier from the code value.
+ */
+#define SCU_EVENT_SPECIFIC(code) \
+ ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_MESSAGE() -
+ *
+ * This macro constructs a combines an SCU event type and SCU event specifier
+ * from the type and code values.
+ */
+#define SCU_EVENT_MESSAGE(type, code) \
+ ((type) | SCU_EVENT_SPECIFIC(code))
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * SCU_EVENT_TYPES
+ */
+#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR SCU_EVENT_TYPE(0x08)
+#define SCU_EVENT_TYPE_SMU_PCQ_ERROR SCU_EVENT_TYPE(0x09)
+#define SCU_EVENT_TYPE_SMU_ERROR SCU_EVENT_TYPE(0x00)
+#define SCU_EVENT_TYPE_TRANSPORT_ERROR SCU_EVENT_TYPE(0x01)
+#define SCU_EVENT_TYPE_BROADCAST_CHANGE SCU_EVENT_TYPE(0x02)
+#define SCU_EVENT_TYPE_OSSP_EVENT SCU_EVENT_TYPE(0x03)
+#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX SCU_EVENT_TYPE(0x04)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX SCU_EVENT_TYPE(0x05)
+#define SCU_EVENT_TYPE_RNC_OPS_MISC SCU_EVENT_TYPE(0x06)
+#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07)
+#define SCU_EVENT_TYPE_ERR_CNT_EVENT SCU_EVENT_TYPE(0x0A)
+
+/**
+ *
+ *
+ * SCU_EVENT_SPECIFIERS
+ */
+#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20
+#define SCU_EVENT_SPECIFIER_RNC_RELEASE 0x00
+
+/**
+ *
+ *
+ * SMU_COMMAND_EVENTS
+ */
+#define SCU_EVENT_INVALID_CONTEXT_COMMAND \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_PCQ_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02)
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03)
+#define SCU_EVENT_PCIE_INTERFACE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04)
+#define SCU_EVENT_FUNCTION_LEVEL_RESET \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05)
+
+/**
+ *
+ *
+ * TRANSPORT_LEVEL_ERRORS
+ */
+#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00)
+
+/**
+ *
+ *
+ * BROADCAST_CHANGE_EVENTS
+ */
+#define SCU_EVENT_BROADCAST_CHANGE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01)
+#define SCU_EVENT_BROADCAST_RESERVED0 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02)
+#define SCU_EVENT_BROADCAST_RESERVED1 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03)
+#define SCU_EVENT_BROADCAST_SES \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04)
+#define SCU_EVENT_BROADCAST_EXPANDER \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05)
+#define SCU_EVENT_BROADCAST_AEN \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06)
+#define SCU_EVENT_BROADCAST_RESERVED3 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07)
+#define SCU_EVENT_BROADCAST_RESERVED4 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08)
+#define SCU_EVENT_PE_SUSPENDED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09)
+
+/**
+ *
+ *
+ * OSSP_EVENTS
+ */
+#define SCU_EVENT_PORT_SELECTOR_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10)
+#define SCU_EVENT_SENT_PORT_SELECTION \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11)
+#define SCU_EVENT_HARD_RESET_TRANSMITTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12)
+#define SCU_EVENT_HARD_RESET_RECEIVED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13)
+#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15)
+#define SCU_EVENT_LINK_FAILURE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16)
+#define SCU_EVENT_SATA_SPINUP_HOLD \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17)
+#define SCU_EVENT_SAS_15_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18)
+#define SCU_EVENT_SAS_15 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19)
+#define SCU_EVENT_SAS_30_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A)
+#define SCU_EVENT_SAS_30 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B)
+#define SCU_EVENT_SAS_60_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C)
+#define SCU_EVENT_SAS_60 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D)
+#define SCU_EVENT_SATA_15_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E)
+#define SCU_EVENT_SATA_15 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F)
+#define SCU_EVENT_SATA_30_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20)
+#define SCU_EVENT_SATA_30 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21)
+#define SCU_EVENT_SATA_60_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22)
+#define SCU_EVENT_SATA_60 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23)
+#define SCU_EVENT_SAS_PHY_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24)
+#define SCU_EVENT_SATA_PHY_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25)
+
+/**
+ *
+ *
+ * FATAL_INTERNAL_MEMORY_ERROR_EVENTS
+ */
+#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x00)
+#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x01)
+#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x02)
+
+/**
+ *
+ *
+ * REMOTE_NODE_SUSPEND_EVENTS
+ */
+#define SCU_EVENT_TL_RNC_SUSPEND_TX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00)
+#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20)
+
+/**
+ *
+ *
+ * REMOTE_NODE_MISC_EVENTS
+ */
+#define SCU_EVENT_POST_RCN_RELEASE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02)
+#define SCU_EVENT_POST_RNC_COMPLETE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03)
+#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04)
+
+/**
+ *
+ *
+ * ERROR_COUNT_EVENT
+ */
+#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00)
+#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01)
+#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02)
+
+/**
+ * scu_get_event_type() -
+ *
+ * This macro returns the SCU event type from the event code.
+ */
+#define scu_get_event_type(event_code) \
+ ((event_code) & SCU_EVENT_TYPE_CODE_MASK)
+
+/**
+ * scu_get_event_specifier() -
+ *
+ * This macro returns the SCU event specifier from the event code.
+ */
+#define scu_get_event_specifier(event_code) \
+ ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * scu_get_event_code() -
+ *
+ * This macro returns the combined SCU event type and SCU event specifier from
+ * the event code.
+ */
+#define scu_get_event_code(event_code) \
+ ((event_code) & SCU_EVENT_CODE_MASK)
+
+
+/**
+ *
+ *
+ * PTS_SCHEDULE_EVENT
+ */
+#define SCU_EVENT_SMP_RESPONSE_NO_PE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00)
+#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \
+ scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE)
+
+#define SCU_EVENT_TASK_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01)
+#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT \
+ scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT)
+
+#define SCU_EVENT_IT_NEXUS_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02)
+#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \
+ scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT)
+
+
+#endif /* __SCU_EVENT_CODES_HEADER__ */
diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h
new file mode 100644
index 00000000000..33745adc826
--- /dev/null
+++ b/drivers/scsi/isci/scu_remote_node_context.h
@@ -0,0 +1,229 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__
+#define __SCU_REMOTE_NODE_CONTEXT_HEADER__
+
+/**
+ * This file contains the structures and constatns used by the SCU hardware to
+ * describe a remote node context.
+ *
+ *
+ */
+
+/**
+ * struct ssp_remote_node_context - This structure contains the SCU hardware
+ * definition for an SSP remote node.
+ *
+ *
+ */
+struct ssp_remote_node_context {
+ /* WORD 0 */
+
+ /**
+ * This field is the remote node index assigned for this remote node. All
+ * remote nodes must have a unique remote node index. The value of the remote
+ * node index can not exceed the maximum number of remote nodes reported in
+ * the SCU device context capacity register.
+ */
+ u32 remote_node_index:12;
+ u32 reserved0_1:4;
+
+ /**
+ * This field tells the SCU hardware how many simultaneous connections that
+ * this remote node will support.
+ */
+ u32 remote_node_port_width:4;
+
+ /**
+ * This field tells the SCU hardware which logical port to associate with this
+ * remote node.
+ */
+ u32 logical_port_index:3;
+ u32 reserved0_2:5;
+
+ /**
+ * This field will enable the I_T nexus loss timer for this remote node.
+ */
+ u32 nexus_loss_timer_enable:1;
+
+ /**
+ * This field is the for driver debug only and is not used.
+ */
+ u32 check_bit:1;
+
+ /**
+ * This field must be set to true when the hardware DMAs the remote node
+ * context to the hardware SRAM. When the remote node is being invalidated
+ * this field must be set to false.
+ */
+ u32 is_valid:1;
+
+ /**
+ * This field must be set to true.
+ */
+ u32 is_remote_node_context:1;
+
+ /* WORD 1 - 2 */
+
+ /**
+ * This is the low word of the remote device SAS Address
+ */
+ u32 remote_sas_address_lo;
+
+ /**
+ * This field is the high word of the remote device SAS Address
+ */
+ u32 remote_sas_address_hi;
+
+ /* WORD 3 */
+ /**
+ * This field reprensets the function number assigned to this remote device.
+ * This value must match the virtual function number that is being used to
+ * communicate to the device.
+ */
+ u32 function_number:8;
+ u32 reserved3_1:8;
+
+ /**
+ * This field provides the driver a way to cheat on the arbitration wait time
+ * for this remote node.
+ */
+ u32 arbitration_wait_time:16;
+
+ /* WORD 4 */
+ /**
+ * This field tells the SCU hardware how long this device may occupy the
+ * connection before it must be closed.
+ */
+ u32 connection_occupancy_timeout:16;
+
+ /**
+ * This field tells the SCU hardware how long to maintain a connection when
+ * there are no frames being transmitted on the link.
+ */
+ u32 connection_inactivity_timeout:16;
+
+ /* WORD 5 */
+ /**
+ * This field allows the driver to cheat on the arbitration wait time for this
+ * remote node.
+ */
+ u32 initial_arbitration_wait_time:16;
+
+ /**
+ * This field is tells the hardware what to program for the connection rate in
+ * the open address frame. See the SAS spec for valid values.
+ */
+ u32 oaf_connection_rate:4;
+
+ /**
+ * This field tells the SCU hardware what to program for the features in the
+ * open address frame. See the SAS spec for valid values.
+ */
+ u32 oaf_features:4;
+
+ /**
+ * This field tells the SCU hardware what to use for the source zone group in
+ * the open address frame. See the SAS spec for more details on zoning.
+ */
+ u32 oaf_source_zone_group:8;
+
+ /* WORD 6 */
+ /**
+ * This field tells the SCU hardware what to use as the more capibilities in
+ * the open address frame. See the SAS Spec for details.
+ */
+ u32 oaf_more_compatibility_features;
+
+ /* WORD 7 */
+ u32 reserved7;
+
+};
+
+/**
+ * struct stp_remote_node_context - This structure contains the SCU hardware
+ * definition for a STP remote node.
+ *
+ * STP Targets are not yet supported so this definition is a placeholder until
+ * we do support them.
+ */
+struct stp_remote_node_context {
+ /**
+ * Placeholder data for the STP remote node.
+ */
+ u32 data[8];
+
+};
+
+/**
+ * This union combines the SAS and SATA remote node definitions.
+ *
+ * union scu_remote_node_context
+ */
+union scu_remote_node_context {
+ /**
+ * SSP Remote Node
+ */
+ struct ssp_remote_node_context ssp;
+
+ /**
+ * STP Remote Node
+ */
+ struct stp_remote_node_context stp;
+
+};
+
+#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
new file mode 100644
index 00000000000..7df87d92328
--- /dev/null
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -0,0 +1,942 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_TASK_CONTEXT_H_
+#define _SCU_TASK_CONTEXT_H_
+
+/**
+ * This file contains the structures and constants for the SCU hardware task
+ * context.
+ *
+ *
+ */
+
+
+/**
+ * enum scu_ssp_task_type - This enumberation defines the various SSP task
+ * types the SCU hardware will accept. The definition for the various task
+ * types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+ SCU_TASK_TYPE_IOREAD, /* /< IO READ direction or no direction */
+ SCU_TASK_TYPE_IOWRITE, /* /< IO Write direction */
+ SCU_TASK_TYPE_SMP_REQUEST, /* /< SMP Request type */
+ SCU_TASK_TYPE_RESPONSE, /* /< Driver generated response frame (targt mode) */
+ SCU_TASK_TYPE_RAW_FRAME, /* /< Raw frame request type */
+ SCU_TASK_TYPE_PRIMITIVE /* /< Request for a primitive to be transmitted */
+} scu_ssp_task_type;
+
+/**
+ * enum scu_sata_task_type - This enumeration defines the various SATA task
+ * types the SCU hardware will accept. The definition for the various task
+ * types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+ SCU_TASK_TYPE_DMA_IN, /* /< Read request */
+ SCU_TASK_TYPE_FPDMAQ_READ, /* /< NCQ read request */
+ SCU_TASK_TYPE_PACKET_DMA_IN, /* /< Packet read request */
+ SCU_TASK_TYPE_SATA_RAW_FRAME, /* /< Raw frame request */
+ RESERVED_4,
+ RESERVED_5,
+ RESERVED_6,
+ RESERVED_7,
+ SCU_TASK_TYPE_DMA_OUT, /* /< Write request */
+ SCU_TASK_TYPE_FPDMAQ_WRITE, /* /< NCQ write Request */
+ SCU_TASK_TYPE_PACKET_DMA_OUT /* /< Packet write request */
+} scu_sata_task_type;
+
+
+/**
+ *
+ *
+ * SCU_CONTEXT_TYPE
+ */
+#define SCU_TASK_CONTEXT_TYPE 0
+#define SCU_RNC_CONTEXT_TYPE 1
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_VALIDITY
+ */
+#define SCU_TASK_CONTEXT_INVALID 0
+#define SCU_TASK_CONTEXT_VALID 1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CODE
+ */
+#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK 0
+#define SCU_COMMAND_CODE_ACTIVE_TASK 1
+#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK 2
+#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES 3
+
+/**
+ *
+ *
+ * SCU_TASK_PRIORITY
+ */
+/**
+ *
+ *
+ * This priority is used when there is no priority request for this request.
+ */
+#define SCU_TASK_PRIORITY_NORMAL 0
+
+/**
+ *
+ *
+ * This priority indicates that the task should be scheduled to the head of the
+ * queue. The task will NOT be executed if the TX is suspended for the remote
+ * node.
+ */
+#define SCU_TASK_PRIORITY_HEAD_OF_Q 1
+
+/**
+ *
+ *
+ * This priority indicates that the task will be executed before all
+ * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task
+ * WILL be executed if the TX is suspended for the remote node.
+ */
+#define SCU_TASK_PRIORITY_HIGH 2
+
+/**
+ *
+ *
+ * This task priority is reserved and should not be used.
+ */
+#define SCU_TASK_PRIORITY_RESERVED 3
+
+#define SCU_TASK_INITIATOR_MODE 1
+#define SCU_TASK_TARGET_MODE 0
+
+#define SCU_TASK_REGULAR 0
+#define SCU_TASK_ABORTED 1
+
+/* direction bit defintion */
+/**
+ *
+ *
+ * SATA_DIRECTION
+ */
+#define SCU_SATA_WRITE_DATA_DIRECTION 0
+#define SCU_SATA_READ_DATA_DIRECTION 1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift
+ * operations to construct the various SCU commands
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT 21
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK 0x00E00000
+#define scu_get_command_request_type(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT 18
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK 0x001C0000
+#define scu_get_command_request_subtype(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK \
+ (\
+ SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK \
+ | SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK \
+ )
+#define scu_get_command_request_full_type(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT 16
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK 0x00010000
+#define scu_get_command_protocl_engine_group(x) \
+ ((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK)
+
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT 12
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK 0x00007000
+#define scu_get_command_reqeust_logical_port(x) \
+ ((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK)
+
+
+#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \
+ ((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT)
+
+/**
+ * MAKE_SCU_CONTEXT_COMMAND_TYPE() -
+ *
+ * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU
+ * command types.
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(0)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(1)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(2)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(3)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(6)
+
+#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command) \
+ ((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT))
+
+/**
+ *
+ *
+ * SCU_REQUEST_TYPES These constants are the various request types that can be
+ * posted to the SCU hardware.
+ */
+#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1))
+
+#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_32 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_96 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_32 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_96 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4))
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to
+ * program the SCU Task context protocol field in word 0x00.
+ */
+#define SCU_TASK_CONTEXT_PROTOCOL_SMP 0x00
+#define SCU_TASK_CONTEXT_PROTOCOL_SSP 0x01
+#define SCU_TASK_CONTEXT_PROTOCOL_STP 0x02
+#define SCU_TASK_CONTEXT_PROTOCOL_NONE 0x07
+
+/**
+ * struct ssp_task_context - This is the SCU hardware definition for an SSP
+ * request.
+ *
+ *
+ */
+struct ssp_task_context {
+ /* OFFSET 0x18 */
+ u32 reserved00:24;
+ u32 frame_type:8;
+
+ /* OFFSET 0x1C */
+ u32 reserved01;
+
+ /* OFFSET 0x20 */
+ u32 fill_bytes:2;
+ u32 reserved02:6;
+ u32 changing_data_pointer:1;
+ u32 retransmit:1;
+ u32 retry_data_frame:1;
+ u32 tlr_control:2;
+ u32 reserved03:19;
+
+ /* OFFSET 0x24 */
+ u32 uiRsvd4;
+
+ /* OFFSET 0x28 */
+ u32 target_port_transfer_tag:16;
+ u32 tag:16;
+
+ /* OFFSET 0x2C */
+ u32 data_offset;
+};
+
+/**
+ * struct stp_task_context - This is the SCU hardware definition for an STP
+ * request.
+ *
+ *
+ */
+struct stp_task_context {
+ /* OFFSET 0x18 */
+ u32 fis_type:8;
+ u32 pm_port:4;
+ u32 reserved0:3;
+ u32 control:1;
+ u32 command:8;
+ u32 features:8;
+
+ /* OFFSET 0x1C */
+ u32 reserved1;
+
+ /* OFFSET 0x20 */
+ u32 reserved2;
+
+ /* OFFSET 0x24 */
+ u32 reserved3;
+
+ /* OFFSET 0x28 */
+ u32 ncq_tag:5;
+ u32 reserved4:27;
+
+ /* OFFSET 0x2C */
+ u32 data_offset; /* TODO: What is this used for? */
+};
+
+/**
+ * struct smp_task_context - This is the SCU hardware definition for an SMP
+ * request.
+ *
+ *
+ */
+struct smp_task_context {
+ /* OFFSET 0x18 */
+ u32 response_length:8;
+ u32 function_result:8;
+ u32 function:8;
+ u32 frame_type:8;
+
+ /* OFFSET 0x1C */
+ u32 smp_response_ufi:12;
+ u32 reserved1:20;
+
+ /* OFFSET 0x20 */
+ u32 reserved2;
+
+ /* OFFSET 0x24 */
+ u32 reserved3;
+
+ /* OFFSET 0x28 */
+ u32 reserved4;
+
+ /* OFFSET 0x2C */
+ u32 reserved5;
+};
+
+/**
+ * struct primitive_task_context - This is the SCU hardware definition used
+ * when the driver wants to send a primitive on the link.
+ *
+ *
+ */
+struct primitive_task_context {
+ /* OFFSET 0x18 */
+ /**
+ * This field is the control word and it must be 0.
+ */
+ u32 control; /* /< must be set to 0 */
+
+ /* OFFSET 0x1C */
+ /**
+ * This field specifies the primitive that is to be transmitted.
+ */
+ u32 sequence;
+
+ /* OFFSET 0x20 */
+ u32 reserved0;
+
+ /* OFFSET 0x24 */
+ u32 reserved1;
+
+ /* OFFSET 0x28 */
+ u32 reserved2;
+
+ /* OFFSET 0x2C */
+ u32 reserved3;
+};
+
+/**
+ * The union of the protocols that can be selected in the SCU task context
+ * field.
+ *
+ * protocol_context
+ */
+union protocol_context {
+ struct ssp_task_context ssp;
+ struct stp_task_context stp;
+ struct smp_task_context smp;
+ struct primitive_task_context primitive;
+ u32 words[6];
+};
+
+/**
+ * struct scu_sgl_element - This structure represents a single SCU defined SGL
+ * element. SCU SGLs contain a 64 bit address with the maximum data transfer
+ * being 24 bits in size. The SGL can not cross a 4GB boundary.
+ *
+ * struct scu_sgl_element
+ */
+struct scu_sgl_element {
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address.
+ */
+ u32 address_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address.
+ */
+ u32 address_lower;
+
+ /**
+ * This field is the number of bytes to transfer.
+ */
+ u32 length:24;
+
+ /**
+ * This field is the address modifier to be used when a virtual function is
+ * requesting a data transfer.
+ */
+ u32 address_modifier:8;
+
+};
+
+#define SCU_SGL_ELEMENT_PAIR_A 0
+#define SCU_SGL_ELEMENT_PAIR_B 1
+
+/**
+ * struct scu_sgl_element_pair - This structure is the SCU hardware definition
+ * of a pair of SGL elements. The SCU hardware always works on SGL pairs.
+ * They are refered to in the DS specification as SGL A and SGL B. Each SGL
+ * pair is followed by the address of the next pair.
+ *
+ *
+ */
+struct scu_sgl_element_pair {
+ /* OFFSET 0x60-0x68 */
+ /**
+ * This field is the SGL element A of the SGL pair.
+ */
+ struct scu_sgl_element A;
+
+ /* OFFSET 0x6C-0x74 */
+ /**
+ * This field is the SGL element B of the SGL pair.
+ */
+ struct scu_sgl_element B;
+
+ /* OFFSET 0x78-0x7C */
+ /**
+ * This field is the upper 32 bits of the 64 bit address to the next SGL
+ * element pair.
+ */
+ u32 next_pair_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit address to the next SGL
+ * element pair.
+ */
+ u32 next_pair_lower;
+
+};
+
+/**
+ * struct transport_snapshot - This structure is the SCU hardware scratch area
+ * for the task context. This is set to 0 by the driver but can be read by
+ * issuing a dump TC request to the SCU.
+ *
+ *
+ */
+struct transport_snapshot {
+ /* OFFSET 0x48 */
+ u32 xfer_rdy_write_data_length;
+
+ /* OFFSET 0x4C */
+ u32 data_offset;
+
+ /* OFFSET 0x50 */
+ u32 data_transfer_size:24;
+ u32 reserved_50_0:8;
+
+ /* OFFSET 0x54 */
+ u32 next_initiator_write_data_offset;
+
+ /* OFFSET 0x58 */
+ u32 next_initiator_write_data_xfer_size:24;
+ u32 reserved_58_0:8;
+};
+
+/**
+ * struct scu_task_context - This structure defines the contents of the SCU
+ * silicon task context. It lays out all of the fields according to the
+ * expected order and location for the Storage Controller unit.
+ *
+ *
+ */
+struct scu_task_context {
+ /* OFFSET 0x00 ------ */
+ /**
+ * This field must be encoded to one of the valid SCU task priority values
+ * - SCU_TASK_PRIORITY_NORMAL
+ * - SCU_TASK_PRIORITY_HEAD_OF_Q
+ * - SCU_TASK_PRIORITY_HIGH
+ */
+ u32 priority:2;
+
+ /**
+ * This field must be set to true if this is an initiator generated request.
+ * Until target mode is supported all task requests are initiator requests.
+ */
+ u32 initiator_request:1;
+
+ /**
+ * This field must be set to one of the valid connection rates valid values
+ * are 0x8, 0x9, and 0xA.
+ */
+ u32 connection_rate:4;
+
+ /**
+ * This field muse be programed when generating an SMP response since the SMP
+ * connection remains open until the SMP response is generated.
+ */
+ u32 protocol_engine_index:3;
+
+ /**
+ * This field must contain the logical port for the task request.
+ */
+ u32 logical_port_index:3;
+
+ /**
+ * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values
+ * - SCU_TASK_CONTEXT_PROTOCOL_SMP
+ * - SCU_TASK_CONTEXT_PROTOCOL_SSP
+ * - SCU_TASK_CONTEXT_PROTOCOL_STP
+ * - SCU_TASK_CONTEXT_PROTOCOL_NONE
+ */
+ u32 protocol_type:3;
+
+ /**
+ * This filed must be set to the TCi allocated for this task
+ */
+ u32 task_index:12;
+
+ /**
+ * This field is reserved and must be set to 0x00
+ */
+ u32 reserved_00_0:1;
+
+ /**
+ * For a normal task request this must be set to 0. If this is an abort of
+ * this task request it must be set to 1.
+ */
+ u32 abort:1;
+
+ /**
+ * This field must be set to true for the SCU hardware to process the task.
+ */
+ u32 valid:1;
+
+ /**
+ * This field must be set to SCU_TASK_CONTEXT_TYPE
+ */
+ u32 context_type:1;
+
+ /* OFFSET 0x04 */
+ /**
+ * This field contains the RNi that is the target of this request.
+ */
+ u32 remote_node_index:12;
+
+ /**
+ * This field is programmed if this is a mirrored request, which we are not
+ * using, in which case it is the RNi for the mirrored target.
+ */
+ u32 mirrored_node_index:12;
+
+ /**
+ * This field is programmed with the direction of the SATA reqeust
+ * - SCU_SATA_WRITE_DATA_DIRECTION
+ * - SCU_SATA_READ_DATA_DIRECTION
+ */
+ u32 sata_direction:1;
+
+ /**
+ * This field is programmsed with one of the following SCU_COMMAND_CODE
+ * - SCU_COMMAND_CODE_INITIATOR_NEW_TASK
+ * - SCU_COMMAND_CODE_ACTIVE_TASK
+ * - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK
+ * - SCU_COMMAND_CODE_TARGET_RAW_FRAMES
+ */
+ u32 command_code:2;
+
+ /**
+ * This field is set to true if the remote node should be suspended.
+ * This bit is only valid for SSP & SMP target devices.
+ */
+ u32 suspend_node:1;
+
+ /**
+ * This field is programmed with one of the following command type codes
+ *
+ * For SAS requests use the scu_ssp_task_type
+ * - SCU_TASK_TYPE_IOREAD
+ * - SCU_TASK_TYPE_IOWRITE
+ * - SCU_TASK_TYPE_SMP_REQUEST
+ * - SCU_TASK_TYPE_RESPONSE
+ * - SCU_TASK_TYPE_RAW_FRAME
+ * - SCU_TASK_TYPE_PRIMITIVE
+ *
+ * For SATA requests use the scu_sata_task_type
+ * - SCU_TASK_TYPE_DMA_IN
+ * - SCU_TASK_TYPE_FPDMAQ_READ
+ * - SCU_TASK_TYPE_PACKET_DMA_IN
+ * - SCU_TASK_TYPE_SATA_RAW_FRAME
+ * - SCU_TASK_TYPE_DMA_OUT
+ * - SCU_TASK_TYPE_FPDMAQ_WRITE
+ * - SCU_TASK_TYPE_PACKET_DMA_OUT
+ */
+ u32 task_type:4;
+
+ /* OFFSET 0x08 */
+ /**
+ * This field is reserved and the must be set to 0x00
+ */
+ u32 link_layer_control:8; /* presently all reserved */
+
+ /**
+ * This field is set to true when TLR is to be enabled
+ */
+ u32 ssp_tlr_enable:1;
+
+ /**
+ * This is field specifies if the SCU DMAs a response frame to host
+ * memory for good response frames when operating in target mode.
+ */
+ u32 dma_ssp_target_good_response:1;
+
+ /**
+ * This field indicates if the SCU should DMA the response frame to
+ * host memory.
+ */
+ u32 do_not_dma_ssp_good_response:1;
+
+ /**
+ * This field is set to true when strict ordering is to be enabled
+ */
+ u32 strict_ordering:1;
+
+ /**
+ * This field indicates the type of endianess to be utilized for the
+ * frame. command, task, and response frames utilized control_frame
+ * set to 1.
+ */
+ u32 control_frame:1;
+
+ /**
+ * This field is reserved and the driver should set to 0x00
+ */
+ u32 tl_control_reserved:3;
+
+ /**
+ * This field is set to true when the SCU hardware task timeout control is to
+ * be enabled
+ */
+ u32 timeout_enable:1;
+
+ /**
+ * This field is reserved and the driver should set it to 0x00
+ */
+ u32 pts_control_reserved:7;
+
+ /**
+ * This field should be set to true when block guard is to be enabled
+ */
+ u32 block_guard_enable:1;
+
+ /**
+ * This field is reserved and the driver should set to 0x00
+ */
+ u32 sdma_control_reserved:7;
+
+ /* OFFSET 0x0C */
+ /**
+ * This field is the address modifier for this io request it should be
+ * programmed with the virtual function that is making the request.
+ */
+ u32 address_modifier:16;
+
+ /**
+ * @todo What we support mirrored SMP response frame?
+ */
+ u32 mirrored_protocol_engine:3; /* mirrored protocol Engine Index */
+
+ /**
+ * If this is a mirrored request the logical port index for the mirrored RNi
+ * must be programmed.
+ */
+ u32 mirrored_logical_port:4; /* mirrored local port index */
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_0C_0:8;
+
+ /**
+ * This field must be set to true if the mirrored request processing is to be
+ * enabled.
+ */
+ u32 mirror_request_enable:1; /* Mirrored request Enable */
+
+ /* OFFSET 0x10 */
+ /**
+ * This field is the command iu length in dwords
+ */
+ u32 ssp_command_iu_length:8;
+
+ /**
+ * This is the target TLR enable bit it must be set to 0 when creatning the
+ * task context.
+ */
+ u32 xfer_ready_tlr_enable:1;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_10_0:7;
+
+ /**
+ * This is the maximum burst size that the SCU hardware will send in one
+ * connection its value is (N x 512) and N must be a multiple of 2. If the
+ * value is 0x00 then maximum burst size is disabled.
+ */
+ u32 ssp_max_burst_size:16;
+
+ /* OFFSET 0x14 */
+ /**
+ * This filed is set to the number of bytes to be transfered in the request.
+ */
+ u32 transfer_length_bytes:24; /* In terms of bytes */
+
+ /**
+ * This field is reserved and the driver should set it to 0x00
+ */
+ u32 reserved_14_0:8;
+
+ /* OFFSET 0x18-0x2C */
+ /**
+ * This union provides for the protocol specif part of the SCU Task Context.
+ */
+ union protocol_context type;
+
+ /* OFFSET 0x30-0x34 */
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address of the
+ * command iu buffer
+ */
+ u32 command_iu_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address of the
+ * command iu buffer
+ */
+ u32 command_iu_lower;
+
+ /* OFFSET 0x38-0x3C */
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address of the
+ * response iu buffer
+ */
+ u32 response_iu_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address of the
+ * response iu buffer
+ */
+ u32 response_iu_lower;
+
+ /* OFFSET 0x40 */
+ /**
+ * This field is set to the task phase of the SCU hardware. The driver must
+ * set this to 0x01
+ */
+ u32 task_phase:8;
+
+ /**
+ * This field is set to the transport layer task status. The driver must set
+ * this to 0x00
+ */
+ u32 task_status:8;
+
+ /**
+ * This field is used during initiator write TLR
+ */
+ u32 previous_extended_tag:4;
+
+ /**
+ * This field is set the maximum number of retries for a STP non-data FIS
+ */
+ u32 stp_retry_count:2;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_40_1:2;
+
+ /**
+ * This field is used by the SCU TL to determine when to take a snapshot when
+ * tranmitting read data frames.
+ * - 0x00 The entire IO
+ * - 0x01 32k
+ * - 0x02 64k
+ * - 0x04 128k
+ * - 0x08 256k
+ */
+ u32 ssp_tlr_threshold:4;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_40_2:4;
+
+ /* OFFSET 0x44 */
+ u32 write_data_length; /* read only set to 0 */
+
+ /* OFFSET 0x48-0x58 */
+ struct transport_snapshot snapshot; /* read only set to 0 */
+
+ /* OFFSET 0x5C */
+ u32 block_protection_enable:1;
+ u32 block_size:2;
+ u32 block_protection_function:2;
+ u32 reserved_5C_0:9;
+ u32 active_sgl_element:2; /* read only set to 0 */
+ u32 sgl_exhausted:1; /* read only set to 0 */
+ u32 payload_data_transfer_error:4; /* read only set to 0 */
+ u32 frame_buffer_offset:11; /* read only set to 0 */
+
+ /* OFFSET 0x60-0x7C */
+ /**
+ * This field is the first SGL element pair found in the TC data structure.
+ */
+ struct scu_sgl_element_pair sgl_pair_ab;
+ /* OFFSET 0x80-0x9C */
+ /**
+ * This field is the second SGL element pair found in the TC data structure.
+ */
+ struct scu_sgl_element_pair sgl_pair_cd;
+
+ /* OFFSET 0xA0-BC */
+ struct scu_sgl_element_pair sgl_snapshot_ac;
+
+ /* OFFSET 0xC0 */
+ u32 active_sgl_element_pair; /* read only set to 0 */
+
+ /* OFFSET 0xC4-0xCC */
+ u32 reserved_C4_CC[3];
+
+ /* OFFSET 0xD0 */
+ u32 intermediate_crc_value:16;
+ u32 initial_crc_seed:16;
+
+ /* OFFSET 0xD4 */
+ u32 application_tag_for_verify:16;
+ u32 application_tag_for_generate:16;
+
+ /* OFFSET 0xD8 */
+ u32 reference_tag_seed_for_verify_function;
+
+ /* OFFSET 0xDC */
+ u32 reserved_DC;
+
+ /* OFFSET 0xE0 */
+ u32 reserved_E0_0:16;
+ u32 application_tag_mask_for_generate:16;
+
+ /* OFFSET 0xE4 */
+ u32 block_protection_control:16;
+ u32 application_tag_mask_for_verify:16;
+
+ /* OFFSET 0xE8 */
+ u32 block_protection_error:8;
+ u32 reserved_E8_0:24;
+
+ /* OFFSET 0xEC */
+ u32 reference_tag_seed_for_verify;
+
+ /* OFFSET 0xF0 */
+ u32 intermediate_crc_valid_snapshot:16;
+ u32 reserved_F0_0:16;
+
+ /* OFFSET 0xF4 */
+ u32 reference_tag_seed_for_verify_function_snapshot;
+
+ /* OFFSET 0xF8 */
+ u32 snapshot_of_reserved_dword_DC_of_tc;
+
+ /* OFFSET 0xFC */
+ u32 reference_tag_seed_for_generate_function_snapshot;
+
+};
+
+#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
new file mode 100644
index 00000000000..d6bcdd013dc
--- /dev/null
+++ b/drivers/scsi/isci/task.c
@@ -0,0 +1,1676 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/completion.h>
+#include <linux/irqflags.h>
+#include "sas.h"
+#include <scsi/libsas.h>
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "isci.h"
+#include "request.h"
+#include "task.h"
+#include "host.h"
+
+/**
+* isci_task_refuse() - complete the request to the upper layer driver in
+* the case where an I/O needs to be completed back in the submit path.
+* @ihost: host on which the the request was queued
+* @task: request to complete
+* @response: response code for the completed task.
+* @status: status code for the completed task.
+*
+*/
+static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
+ enum service_response response,
+ enum exec_status status)
+
+{
+ enum isci_completion_selection disposition;
+
+ disposition = isci_perform_normal_io_completion;
+ disposition = isci_task_set_completion_status(task, response, status,
+ disposition);
+
+ /* Tasks aborted specifically by a call to the lldd_abort_task
+ * function should not be completed to the host in the regular path.
+ */
+ switch (disposition) {
+ case isci_perform_normal_io_completion:
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Normal - task = %p, response=%d, "
+ "status=%d\n",
+ __func__, task, response, status);
+
+ task->lldd_task = NULL;
+
+ isci_execpath_callback(ihost, task, task->task_done);
+ break;
+
+ case isci_perform_aborted_io_completion:
+ /*
+ * No notification because this request is already in the
+ * abort path.
+ */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Aborted - task = %p, response=%d, "
+ "status=%d\n",
+ __func__, task, response, status);
+ break;
+
+ case isci_perform_error_io_completion:
+ /* Use sas_task_abort */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Error - task = %p, response=%d, "
+ "status=%d\n",
+ __func__, task, response, status);
+
+ isci_execpath_callback(ihost, task, sas_task_abort);
+ break;
+
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci task notification default case!",
+ __func__);
+ sas_task_abort(task);
+ break;
+ }
+}
+
+#define for_each_sas_task(num, task) \
+ for (; num > 0; num--,\
+ task = list_entry(task->list.next, struct sas_task, list))
+
+
+static inline int isci_device_io_ready(struct isci_remote_device *idev,
+ struct sas_task *task)
+{
+ return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
+ (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
+ isci_task_is_ncq_recovery(task))
+ : 0;
+}
+/**
+ * isci_task_execute_task() - This function is one of the SAS Domain Template
+ * functions. This function is called by libsas to send a task down to
+ * hardware.
+ * @task: This parameter specifies the SAS task to send.
+ * @num: This parameter specifies the number of tasks to queue.
+ * @gfp_flags: This parameter specifies the context of this call.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
+{
+ struct isci_host *ihost = dev_to_ihost(task->dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ bool io_ready;
+ u16 tag;
+
+ dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
+
+ for_each_sas_task(num, task) {
+ enum sci_status status = SCI_FAILURE;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(task->dev);
+ io_ready = isci_device_io_ready(idev, task);
+ tag = isci_alloc_tag(ihost);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ dev_dbg(&ihost->pdev->dev,
+ "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
+ task, num, task->dev, idev, idev ? idev->flags : 0,
+ task->uldd_task);
+
+ if (!idev) {
+ isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
+ SAS_DEVICE_UNKNOWN);
+ } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+ /* Indicate QUEUE_FULL so that the scsi midlayer
+ * retries.
+ */
+ isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
+ SAS_QUEUE_FULL);
+ } else {
+ /* There is a device and it's ready for I/O. */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ /* The I/O was aborted. */
+ spin_unlock_irqrestore(&task->task_state_lock,
+ flags);
+
+ isci_task_refuse(ihost, task,
+ SAS_TASK_UNDELIVERED,
+ SAM_STAT_TASK_ABORTED);
+ } else {
+ task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* build and send the request. */
+ status = isci_request_execute(ihost, idev, task, tag);
+
+ if (status != SCI_SUCCESS) {
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ /* Did not really start this command. */
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Indicate QUEUE_FULL so that the scsi
+ * midlayer retries. if the request
+ * failed for remote device reasons,
+ * it gets returned as
+ * SAS_TASK_UNDELIVERED next time
+ * through.
+ */
+ isci_task_refuse(ihost, task,
+ SAS_TASK_COMPLETE,
+ SAS_QUEUE_FULL);
+ }
+ }
+ }
+ if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ /* command never hit the device, so just free
+ * the tci and skip the sequence increment
+ */
+ isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
+ isci_put_device(idev);
+ }
+ return 0;
+}
+
+static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
+{
+ struct isci_tmf *isci_tmf;
+ enum sci_status status;
+
+ if (tmf_task != ireq->ttype)
+ return SCI_FAILURE;
+
+ isci_tmf = isci_request_access_tmf(ireq);
+
+ switch (isci_tmf->tmf_code) {
+
+ case isci_tmf_sata_srst_high:
+ case isci_tmf_sata_srst_low: {
+ struct host_to_dev_fis *fis = &ireq->stp.cmd;
+
+ memset(fis, 0, sizeof(*fis));
+
+ fis->fis_type = 0x27;
+ fis->flags &= ~0x80;
+ fis->flags &= 0xF0;
+ if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
+ fis->control |= ATA_SRST;
+ else
+ fis->control &= ~ATA_SRST;
+ break;
+ }
+ /* other management commnd go here... */
+ default:
+ return SCI_FAILURE;
+ }
+
+ /* core builds the protocol specific request
+ * based on the h2d fis.
+ */
+ status = sci_task_request_construct_sata(ireq);
+
+ return status;
+}
+
+static struct isci_request *isci_task_request_build(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 tag, struct isci_tmf *isci_tmf)
+{
+ enum sci_status status = SCI_FAILURE;
+ struct isci_request *ireq = NULL;
+ struct domain_device *dev;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_tmf = %p\n", __func__, isci_tmf);
+
+ dev = idev->domain_dev;
+
+ /* do common allocation and init of request object. */
+ ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
+ if (!ireq)
+ return NULL;
+
+ /* let the core do it's construct. */
+ status = sci_task_request_construct(ihost, idev, tag,
+ ireq);
+
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_task_request_construct failed - "
+ "status = 0x%x\n",
+ __func__,
+ status);
+ return NULL;
+ }
+
+ /* XXX convert to get this from task->tproto like other drivers */
+ if (dev->dev_type == SAS_END_DEV) {
+ isci_tmf->proto = SAS_PROTOCOL_SSP;
+ status = sci_task_request_construct_ssp(ireq);
+ if (status != SCI_SUCCESS)
+ return NULL;
+ }
+
+ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ isci_tmf->proto = SAS_PROTOCOL_SATA;
+ status = isci_sata_management_task_request_build(ireq);
+
+ if (status != SCI_SUCCESS)
+ return NULL;
+ }
+ return ireq;
+}
+
+static int isci_task_execute_tmf(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_tmf *tmf, unsigned long timeout_ms)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ enum sci_task_status status = SCI_TASK_FAILURE;
+ struct isci_request *ireq;
+ int ret = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ unsigned long timeleft;
+ u16 tag;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ tag = isci_alloc_tag(ihost);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+ return ret;
+
+ /* sanity check, return TMF_RESP_FUNC_FAILED
+ * if the device is not there and ready.
+ */
+ if (!idev ||
+ (!test_bit(IDEV_IO_READY, &idev->flags) &&
+ !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p not ready (%#lx)\n",
+ __func__,
+ idev, idev ? idev->flags : 0);
+ goto err_tci;
+ } else
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p\n",
+ __func__, idev);
+
+ /* Assign the pointer to the TMF's completion kernel wait structure. */
+ tmf->complete = &completion;
+
+ ireq = isci_task_request_build(ihost, idev, tag, tmf);
+ if (!ireq)
+ goto err_tci;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ /* start the TMF io. */
+ status = sci_controller_start_task(ihost, idev, ireq);
+
+ if (status != SCI_TASK_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: start_io failed - status = 0x%x, request = %p\n",
+ __func__,
+ status,
+ ireq);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ goto err_tci;
+ }
+
+ if (tmf->cb_state_func != NULL)
+ tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
+
+ isci_request_change_state(ireq, started);
+
+ /* add the request to the remote device request list. */
+ list_add(&ireq->dev_node, &idev->reqs_in_process);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Wait for the TMF to complete, or a timeout. */
+ timeleft = wait_for_completion_timeout(&completion,
+ msecs_to_jiffies(timeout_ms));
+
+ if (timeleft == 0) {
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmf->cb_state_func != NULL)
+ tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
+
+ sci_controller_terminate_request(ihost,
+ idev,
+ ireq);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ wait_for_completion(tmf->complete);
+ }
+
+ isci_print_tmf(tmf);
+
+ if (tmf->status == SCI_SUCCESS)
+ ret = TMF_RESP_FUNC_COMPLETE;
+ else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: tmf.status == "
+ "SCI_FAILURE_IO_RESPONSE_VALID\n",
+ __func__);
+ ret = TMF_RESP_FUNC_COMPLETE;
+ }
+ /* Else - leave the default "failed" status alone. */
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completed request = %p\n",
+ __func__,
+ ireq);
+
+ return ret;
+
+ err_tci:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return ret;
+}
+
+static void isci_task_build_tmf(struct isci_tmf *tmf,
+ enum isci_tmf_function_codes code,
+ void (*tmf_sent_cb)(enum isci_tmf_cb_state,
+ struct isci_tmf *,
+ void *),
+ void *cb_data)
+{
+ memset(tmf, 0, sizeof(*tmf));
+
+ tmf->tmf_code = code;
+ tmf->cb_state_func = tmf_sent_cb;
+ tmf->cb_data = cb_data;
+}
+
+static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
+ enum isci_tmf_function_codes code,
+ void (*tmf_sent_cb)(enum isci_tmf_cb_state,
+ struct isci_tmf *,
+ void *),
+ struct isci_request *old_request)
+{
+ isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
+ tmf->io_tag = old_request->io_tag;
+}
+
+/**
+ * isci_task_validate_request_to_abort() - This function checks the given I/O
+ * against the "started" state. If the request is still "started", it's
+ * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
+ * BEFORE CALLING THIS FUNCTION.
+ * @isci_request: This parameter specifies the request object to control.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @isci_device: This is the device to which the request is pending.
+ * @aborted_io_completion: This is a completion structure that will be added to
+ * the request in case it is changed to aborting; this completion is
+ * triggered when the request is fully completed.
+ *
+ * Either "started" on successful change of the task status to "aborted", or
+ * "unallocated" if the task cannot be controlled.
+ */
+static enum isci_request_status isci_task_validate_request_to_abort(
+ struct isci_request *isci_request,
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device,
+ struct completion *aborted_io_completion)
+{
+ enum isci_request_status old_state = unallocated;
+
+ /* Only abort the task if it's in the
+ * device's request_in_process list
+ */
+ if (isci_request && !list_empty(&isci_request->dev_node)) {
+ old_state = isci_request_change_started_to_aborted(
+ isci_request, aborted_io_completion);
+
+ }
+
+ return old_state;
+}
+
+/**
+* isci_request_cleanup_completed_loiterer() - This function will take care of
+* the final cleanup on any request which has been explicitly terminated.
+* @isci_host: This parameter specifies the ISCI host object
+* @isci_device: This is the device to which the request is pending.
+* @isci_request: This parameter specifies the terminated request object.
+* @task: This parameter is the libsas I/O request.
+*/
+static void isci_request_cleanup_completed_loiterer(
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device,
+ struct isci_request *isci_request,
+ struct sas_task *task)
+{
+ unsigned long flags;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device=%p, request=%p, task=%p\n",
+ __func__, isci_device, isci_request, task);
+
+ if (task != NULL) {
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->lldd_task = NULL;
+
+ task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
+
+ isci_set_task_doneflags(task);
+
+ /* If this task is not in the abort path, call task_done. */
+ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ task->task_done(task);
+ } else
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ }
+
+ if (isci_request != NULL) {
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ list_del_init(&isci_request->dev_node);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ }
+}
+
+/**
+ * isci_terminate_request_core() - This function will terminate the given
+ * request, and wait for it to complete. This function must only be called
+ * from a thread that can wait. Note that the request is terminated and
+ * completed (back to the host, if started there).
+ * @ihost: This SCU.
+ * @idev: The target.
+ * @isci_request: The I/O request to be terminated.
+ *
+ */
+static void isci_terminate_request_core(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *isci_request)
+{
+ enum sci_status status = SCI_SUCCESS;
+ bool was_terminated = false;
+ bool needs_cleanup_handling = false;
+ enum isci_request_status request_status;
+ unsigned long flags;
+ unsigned long termination_completed = 1;
+ struct completion *io_request_completion;
+ struct sas_task *task;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: device = %p; request = %p\n",
+ __func__, idev, isci_request);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ io_request_completion = isci_request->io_request_completion;
+
+ task = (isci_request->ttype == io_task)
+ ? isci_request_access_task(isci_request)
+ : NULL;
+
+ /* Note that we are not going to control
+ * the target to abort the request.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
+
+ /* Make sure the request wasn't just sitting around signalling
+ * device condition (if the request handle is NULL, then the
+ * request completed but needed additional handling here).
+ */
+ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+ was_terminated = true;
+ needs_cleanup_handling = true;
+ status = sci_controller_terminate_request(ihost,
+ idev,
+ isci_request);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /*
+ * The only time the request to terminate will
+ * fail is when the io request is completed and
+ * being aborted.
+ */
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: sci_controller_terminate_request"
+ " returned = 0x%x\n",
+ __func__, status);
+
+ isci_request->io_request_completion = NULL;
+
+ } else {
+ if (was_terminated) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: before completion wait (%p/%p)\n",
+ __func__, isci_request, io_request_completion);
+
+ /* Wait here for the request to complete. */
+ #define TERMINATION_TIMEOUT_MSEC 500
+ termination_completed
+ = wait_for_completion_timeout(
+ io_request_completion,
+ msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
+
+ if (!termination_completed) {
+
+ /* The request to terminate has timed out. */
+ spin_lock_irqsave(&ihost->scic_lock,
+ flags);
+
+ /* Check for state changes. */
+ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+
+ /* The best we can do is to have the
+ * request die a silent death if it
+ * ever really completes.
+ *
+ * Set the request state to "dead",
+ * and clear the task pointer so that
+ * an actual completion event callback
+ * doesn't do anything.
+ */
+ isci_request->status = dead;
+ isci_request->io_request_completion
+ = NULL;
+
+ if (isci_request->ttype == io_task) {
+
+ /* Break links with the
+ * sas_task.
+ */
+ isci_request->ttype_ptr.io_task_ptr
+ = NULL;
+ }
+ } else
+ termination_completed = 1;
+
+ spin_unlock_irqrestore(&ihost->scic_lock,
+ flags);
+
+ if (!termination_completed) {
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: *** Timeout waiting for "
+ "termination(%p/%p)\n",
+ __func__, io_request_completion,
+ isci_request);
+
+ /* The request can no longer be referenced
+ * safely since it may go away if the
+ * termination every really does complete.
+ */
+ isci_request = NULL;
+ }
+ }
+ if (termination_completed)
+ dev_dbg(&ihost->pdev->dev,
+ "%s: after completion wait (%p/%p)\n",
+ __func__, isci_request, io_request_completion);
+ }
+
+ if (termination_completed) {
+
+ isci_request->io_request_completion = NULL;
+
+ /* Peek at the status of the request. This will tell
+ * us if there was special handling on the request such that it
+ * needs to be detached and freed here.
+ */
+ spin_lock_irqsave(&isci_request->state_lock, flags);
+ request_status = isci_request->status;
+
+ if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
+ && ((request_status == aborted)
+ || (request_status == aborting)
+ || (request_status == terminating)
+ || (request_status == completed)
+ || (request_status == dead)
+ )
+ ) {
+
+ /* The completion routine won't free a request in
+ * the aborted/aborting/etc. states, so we do
+ * it here.
+ */
+ needs_cleanup_handling = true;
+ }
+ spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+ }
+ if (needs_cleanup_handling)
+ isci_request_cleanup_completed_loiterer(
+ ihost, idev, isci_request, task);
+ }
+}
+
+/**
+ * isci_terminate_pending_requests() - This function will change the all of the
+ * requests on the given device's state to "aborting", will terminate the
+ * requests, and wait for them to complete. This function must only be
+ * called from a thread that can wait. Note that the requests are all
+ * terminated and completed (back to the host, if started there).
+ * @isci_host: This parameter specifies SCU.
+ * @idev: This parameter specifies the target.
+ *
+ */
+void isci_terminate_pending_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ struct completion request_completion;
+ enum isci_request_status old_state;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ list_splice_init(&idev->reqs_in_process, &list);
+
+ /* assumes that isci_terminate_request_core deletes from the list */
+ while (!list_empty(&list)) {
+ struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
+
+ /* Change state to "terminating" if it is currently
+ * "started".
+ */
+ old_state = isci_request_change_started_to_newstate(ireq,
+ &request_completion,
+ terminating);
+ switch (old_state) {
+ case started:
+ case completed:
+ case aborting:
+ break;
+ default:
+ /* termination in progress, or otherwise dispositioned.
+ * We know the request was on 'list' so should be safe
+ * to move it back to reqs_in_process
+ */
+ list_move(&ireq->dev_node, &idev->reqs_in_process);
+ ireq = NULL;
+ break;
+ }
+
+ if (!ireq)
+ continue;
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ init_completion(&request_completion);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev=%p request=%p; task=%p old_state=%d\n",
+ __func__, idev, ireq,
+ ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
+ old_state);
+
+ /* If the old_state is started:
+ * This request was not already being aborted. If it had been,
+ * then the aborting I/O (ie. the TMF request) would not be in
+ * the aborting state, and thus would be terminated here. Note
+ * that since the TMF completion's call to the kernel function
+ * "complete()" does not happen until the pending I/O request
+ * terminate fully completes, we do not have to implement a
+ * special wait here for already aborting requests - the
+ * termination of the TMF request will force the request
+ * to finish it's already started terminate.
+ *
+ * If old_state == completed:
+ * This request completed from the SCU hardware perspective
+ * and now just needs cleaning up in terms of freeing the
+ * request and potentially calling up to libsas.
+ *
+ * If old_state == aborting:
+ * This request has already gone through a TMF timeout, but may
+ * not have been terminated; needs cleaning up at least.
+ */
+ isci_terminate_request_core(ihost, idev, ireq);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
+ * Template functions.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+static int isci_task_send_lu_reset_sas(
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device,
+ u8 *lun)
+{
+ struct isci_tmf tmf;
+ int ret = TMF_RESP_FUNC_FAILED;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_host = %p, isci_device = %p\n",
+ __func__, isci_host, isci_device);
+ /* Send the LUN reset to the target. By the time the call returns,
+ * the TMF has fully exected in the target (in which case the return
+ * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
+ * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
+ */
+ isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
+
+ #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
+ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
+
+ if (ret == TMF_RESP_FUNC_COMPLETE)
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: %p: TMF_LU_RESET passed\n",
+ __func__, isci_device);
+ else
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: %p: TMF_LU_RESET failed (%x)\n",
+ __func__, isci_device, ret);
+
+ return ret;
+}
+
+static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
+ struct isci_remote_device *idev, u8 *lun)
+{
+ int ret = TMF_RESP_FUNC_FAILED;
+ struct isci_tmf tmf;
+
+ /* Send the soft reset to the target */
+ #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
+ isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
+
+ ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
+
+ if (ret != TMF_RESP_FUNC_COMPLETE) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Assert SRST failed (%p) = %x",
+ __func__, idev, ret);
+
+ /* Return the failure so that the LUN reset is escalated
+ * to a target reset.
+ */
+ }
+ return ret;
+}
+
+/**
+ * isci_task_lu_reset() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas,
+ * to reset the given lun. Note the assumption that while this call is
+ * executing, no I/O will be sent by the host to the device.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
+{
+ struct isci_host *isci_host = dev_to_ihost(domain_device);
+ struct isci_remote_device *isci_device;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ isci_device = isci_lookup_device(domain_device);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
+ __func__, domain_device, isci_host, isci_device);
+
+ if (isci_device)
+ set_bit(IDEV_EH, &isci_device->flags);
+
+ /* If there is a device reset pending on any request in the
+ * device's list, fail this LUN reset request in order to
+ * escalate to the device reset.
+ */
+ if (!isci_device ||
+ isci_device_is_reset_pending(isci_host, isci_device)) {
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: No dev (%p), or "
+ "RESET PENDING: domain_device=%p\n",
+ __func__, isci_device, domain_device);
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
+
+ /* Send the task management part of the reset. */
+ if (sas_protocol_ata(domain_device->tproto)) {
+ ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
+ } else
+ ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
+
+ /* If the LUN reset worked, all the I/O can now be terminated. */
+ if (ret == TMF_RESP_FUNC_COMPLETE)
+ /* Terminate all I/O now. */
+ isci_terminate_pending_requests(isci_host,
+ isci_device);
+
+ out:
+ isci_put_device(isci_device);
+ return ret;
+}
+
+
+/* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
+int isci_task_clear_nexus_port(struct asd_sas_port *port)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+/* Task Management Functions. Must be called from process context. */
+
+/**
+ * isci_abort_task_process_cb() - This is a helper function for the abort task
+ * TMF command. It manages the request state with respect to the successful
+ * transmission / completion of the abort task request.
+ * @cb_state: This parameter specifies when this function was called - after
+ * the TMF request has been started and after it has timed-out.
+ * @tmf: This parameter specifies the TMF in progress.
+ *
+ *
+ */
+static void isci_abort_task_process_cb(
+ enum isci_tmf_cb_state cb_state,
+ struct isci_tmf *tmf,
+ void *cb_data)
+{
+ struct isci_request *old_request;
+
+ old_request = (struct isci_request *)cb_data;
+
+ dev_dbg(&old_request->isci_host->pdev->dev,
+ "%s: tmf=%p, old_request=%p\n",
+ __func__, tmf, old_request);
+
+ switch (cb_state) {
+
+ case isci_tmf_started:
+ /* The TMF has been started. Nothing to do here, since the
+ * request state was already set to "aborted" by the abort
+ * task function.
+ */
+ if ((old_request->status != aborted)
+ && (old_request->status != completed))
+ dev_dbg(&old_request->isci_host->pdev->dev,
+ "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
+ __func__, old_request->status, tmf, old_request);
+ break;
+
+ case isci_tmf_timed_out:
+
+ /* Set the task's state to "aborting", since the abort task
+ * function thread set it to "aborted" (above) in anticipation
+ * of the task management request working correctly. Since the
+ * timeout has now fired, the TMF request failed. We set the
+ * state such that the request completion will indicate the
+ * device is no longer present.
+ */
+ isci_request_change_state(old_request, aborting);
+ break;
+
+ default:
+ dev_dbg(&old_request->isci_host->pdev->dev,
+ "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
+ __func__, cb_state, tmf, old_request);
+ break;
+ }
+}
+
+/**
+ * isci_task_abort_task() - This function is one of the SAS Domain Template
+ * functions. This function is called by libsas to abort a specified task.
+ * @task: This parameter specifies the SAS task to abort.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task(struct sas_task *task)
+{
+ struct isci_host *isci_host = dev_to_ihost(task->dev);
+ DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
+ struct isci_request *old_request = NULL;
+ enum isci_request_status old_state;
+ struct isci_remote_device *isci_device = NULL;
+ struct isci_tmf tmf;
+ int ret = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ bool any_dev_reset = false;
+
+ /* Get the isci_request reference from the task. Note that
+ * this check does not depend on the pending request list
+ * in the device, because tasks driving resets may land here
+ * after completion in the core.
+ */
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ spin_lock(&task->task_state_lock);
+
+ old_request = task->lldd_task;
+
+ /* If task is already done, the request isn't valid */
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+ (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+ old_request)
+ isci_device = isci_lookup_device(task->dev);
+
+ spin_unlock(&task->task_state_lock);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: task = %p\n", __func__, task);
+
+ if (!isci_device || !old_request)
+ goto out;
+
+ set_bit(IDEV_EH, &isci_device->flags);
+
+ /* This version of the driver will fail abort requests for
+ * SATA/STP. Failing the abort request this way will cause the
+ * SCSI error handler thread to escalate to LUN reset
+ */
+ if (sas_protocol_ata(task->task_proto)) {
+ dev_dbg(&isci_host->pdev->dev,
+ " task %p is for a STP/SATA device;"
+ " returning TMF_RESP_FUNC_FAILED\n"
+ " to cause a LUN reset...\n", task);
+ goto out;
+ }
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: old_request == %p\n", __func__, old_request);
+
+ any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device);
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+
+ any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
+
+ /* If the extraction of the request reference from the task
+ * failed, then the request has been completed (or if there is a
+ * pending reset then this abort request function must be failed
+ * in order to escalate to the target reset).
+ */
+ if ((old_request == NULL) || any_dev_reset) {
+
+ /* If the device reset task flag is set, fail the task
+ * management request. Otherwise, the original request
+ * has completed.
+ */
+ if (any_dev_reset) {
+
+ /* Turn off the task's DONE to make sure this
+ * task is escalated to a target reset.
+ */
+ task->task_state_flags &= ~SAS_TASK_STATE_DONE;
+
+ /* Make the reset happen as soon as possible. */
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Fail the task management request in order to
+ * escalate to the target reset.
+ */
+ ret = TMF_RESP_FUNC_FAILED;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: Failing task abort in order to "
+ "escalate to target reset because\n"
+ "SAS_TASK_NEED_DEV_RESET is set for "
+ "task %p on dev %p\n",
+ __func__, task, isci_device);
+
+
+ } else {
+ /* The request has already completed and there
+ * is nothing to do here other than to set the task
+ * done bit, and indicate that the task abort function
+ * was sucessful.
+ */
+ isci_set_task_doneflags(task);
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ ret = TMF_RESP_FUNC_COMPLETE;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: abort task not needed for %p\n",
+ __func__, task);
+ }
+ goto out;
+ } else {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ }
+
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+
+ /* Check the request status and change to "aborted" if currently
+ * "starting"; if true then set the I/O kernel completion
+ * struct that will be triggered when the request completes.
+ */
+ old_state = isci_task_validate_request_to_abort(
+ old_request, isci_host, isci_device,
+ &aborted_io_completion);
+ if ((old_state != started) &&
+ (old_state != completed) &&
+ (old_state != aborting)) {
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ /* The request was already being handled by someone else (because
+ * they got to set the state away from started).
+ */
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: device = %p; old_request %p already being aborted\n",
+ __func__,
+ isci_device, old_request);
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+ if (task->task_proto == SAS_PROTOCOL_SMP ||
+ test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: SMP request (%d)"
+ " or complete_in_target (%d), thus no TMF\n",
+ __func__, (task->task_proto == SAS_PROTOCOL_SMP),
+ test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
+
+ /* Set the state on the task. */
+ isci_task_all_done(task);
+
+ ret = TMF_RESP_FUNC_COMPLETE;
+
+ /* Stopping and SMP devices are not sent a TMF, and are not
+ * reset, but the outstanding I/O request is terminated below.
+ */
+ } else {
+ /* Fill in the tmf stucture */
+ isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
+ isci_abort_task_process_cb,
+ old_request);
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
+ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
+ ISCI_ABORT_TASK_TIMEOUT_MS);
+
+ if (ret != TMF_RESP_FUNC_COMPLETE)
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_task_send_tmf failed\n",
+ __func__);
+ }
+ if (ret == TMF_RESP_FUNC_COMPLETE) {
+ set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
+
+ /* Clean up the request on our side, and wait for the aborted
+ * I/O to complete.
+ */
+ isci_terminate_request_core(isci_host, isci_device, old_request);
+ }
+
+ /* Make sure we do not leave a reference to aborted_io_completion */
+ old_request->io_request_completion = NULL;
+ out:
+ isci_put_device(isci_device);
+ return ret;
+}
+
+/**
+ * isci_task_abort_task_set() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas,
+ * to abort all task for the given lun.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task_set(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_clear_aca() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_aca(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+/**
+ * isci_task_clear_task_set() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_task_set(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_query_task() - This function is implemented to cause libsas to
+ * correctly escalate the failed abort to a LUN or target reset (this is
+ * because sas_scsi_find_task libsas function does not correctly interpret
+ * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
+ * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
+ * returned, libsas will turn this into a target reset
+ * @task: This parameter specifies the sas task being queried.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_query_task(
+ struct sas_task *task)
+{
+ /* See if there is a pending device reset for this device. */
+ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+ return TMF_RESP_FUNC_FAILED;
+ else
+ return TMF_RESP_FUNC_SUCC;
+}
+
+/*
+ * isci_task_request_complete() - This function is called by the sci core when
+ * an task request completes.
+ * @ihost: This parameter specifies the ISCI host object
+ * @ireq: This parameter is the completed isci_request object.
+ * @completion_status: This parameter specifies the completion status from the
+ * sci core.
+ *
+ * none.
+ */
+void
+isci_task_request_complete(struct isci_host *ihost,
+ struct isci_request *ireq,
+ enum sci_task_status completion_status)
+{
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+ struct completion *tmf_complete;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request = %p, status=%d\n",
+ __func__, ireq, completion_status);
+
+ isci_request_change_state(ireq, completed);
+
+ tmf->status = completion_status;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
+
+ if (tmf->proto == SAS_PROTOCOL_SSP) {
+ memcpy(&tmf->resp.resp_iu,
+ &ireq->ssp.rsp,
+ SSP_RESP_IU_MAX_SIZE);
+ } else if (tmf->proto == SAS_PROTOCOL_SATA) {
+ memcpy(&tmf->resp.d2h_fis,
+ &ireq->stp.rsp,
+ sizeof(struct dev_to_host_fis));
+ }
+
+ /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
+ tmf_complete = tmf->complete;
+
+ sci_controller_complete_io(ihost, ireq->target_device, ireq);
+ /* set the 'terminated' flag handle to make sure it cannot be terminated
+ * or completed again.
+ */
+ set_bit(IREQ_TERMINATED, &ireq->flags);
+
+ isci_request_change_state(ireq, unallocated);
+ list_del_init(&ireq->dev_node);
+
+ /* The task management part completes last. */
+ complete(tmf_complete);
+}
+
+static void isci_smp_task_timedout(unsigned long _task)
+{
+ struct sas_task *task = (void *) _task;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ complete(&task->completion);
+}
+
+static void isci_smp_task_done(struct sas_task *task)
+{
+ if (!del_timer(&task->timer))
+ return;
+ complete(&task->completion);
+}
+
+static struct sas_task *isci_alloc_task(void)
+{
+ struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
+
+ if (task) {
+ INIT_LIST_HEAD(&task->list);
+ spin_lock_init(&task->task_state_lock);
+ task->task_state_flags = SAS_TASK_STATE_PENDING;
+ init_timer(&task->timer);
+ init_completion(&task->completion);
+ }
+
+ return task;
+}
+
+static void isci_free_task(struct isci_host *ihost, struct sas_task *task)
+{
+ if (task) {
+ BUG_ON(!list_empty(&task->list));
+ kfree(task);
+ }
+}
+
+static int isci_smp_execute_task(struct isci_host *ihost,
+ struct domain_device *dev, void *req,
+ int req_size, void *resp, int resp_size)
+{
+ int res, retry;
+ struct sas_task *task = NULL;
+
+ for (retry = 0; retry < 3; retry++) {
+ task = isci_alloc_task();
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = dev;
+ task->task_proto = dev->tproto;
+ sg_init_one(&task->smp_task.smp_req, req, req_size);
+ sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
+
+ task->task_done = isci_smp_task_done;
+
+ task->timer.data = (unsigned long) task;
+ task->timer.function = isci_smp_task_timedout;
+ task->timer.expires = jiffies + 10*HZ;
+ add_timer(&task->timer);
+
+ res = isci_task_execute_task(task, 1, GFP_KERNEL);
+
+ if (res) {
+ del_timer(&task->timer);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: executing SMP task failed:%d\n",
+ __func__, res);
+ goto ex_err;
+ }
+
+ wait_for_completion(&task->completion);
+ res = -ECOMM;
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: smp task timed out or aborted\n",
+ __func__);
+ isci_task_abort_task(task);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SMP task aborted and not done\n",
+ __func__);
+ goto ex_err;
+ }
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAM_STAT_GOOD) {
+ res = 0;
+ break;
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
+ /* no error, but return the number of bytes of
+ * underrun */
+ res = task->task_status.residual;
+ break;
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
+ res = -EMSGSIZE;
+ break;
+ } else {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: task to dev %016llx response: 0x%x "
+ "status 0x%x\n", __func__,
+ SAS_ADDR(dev->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ isci_free_task(ihost, task);
+ task = NULL;
+ }
+ }
+ex_err:
+ BUG_ON(retry == 3 && task != NULL);
+ isci_free_task(ihost, task);
+ return res;
+}
+
+#define DISCOVER_REQ_SIZE 16
+#define DISCOVER_RESP_SIZE 56
+
+int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
+ struct domain_device *dev,
+ int phy_id, int *adt)
+{
+ struct smp_resp *disc_resp;
+ u8 *disc_req;
+ int res;
+
+ disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
+ if (!disc_resp)
+ return -ENOMEM;
+
+ disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
+ if (disc_req) {
+ disc_req[0] = SMP_REQUEST;
+ disc_req[1] = SMP_DISCOVER;
+ disc_req[9] = phy_id;
+ } else {
+ kfree(disc_resp);
+ return -ENOMEM;
+ }
+ res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
+ disc_resp, DISCOVER_RESP_SIZE);
+ if (!res) {
+ if (disc_resp->result != SMP_RESP_FUNC_ACC)
+ res = disc_resp->result;
+ else
+ *adt = disc_resp->disc.attached_dev_type;
+ }
+ kfree(disc_req);
+ kfree(disc_resp);
+
+ return res;
+}
+
+static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
+{
+ struct domain_device *dev = idev->domain_dev;
+ struct isci_port *iport = idev->isci_port;
+ struct isci_host *ihost = iport->isci_host;
+ int res, iteration = 0, attached_device_type;
+ #define STP_WAIT_MSECS 25000
+ unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
+ unsigned long deadline = jiffies + tmo;
+ enum {
+ SMP_PHYWAIT_PHYDOWN,
+ SMP_PHYWAIT_PHYUP,
+ SMP_PHYWAIT_DONE
+ } phy_state = SMP_PHYWAIT_PHYDOWN;
+
+ /* While there is time, wait for the phy to go away and come back */
+ while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
+ int event = atomic_read(&iport->event);
+
+ ++iteration;
+
+ tmo = wait_event_timeout(ihost->eventq,
+ event != atomic_read(&iport->event) ||
+ !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
+ tmo);
+ /* link down, stop polling */
+ if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
+ break;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iport %p, iteration %d,"
+ " phase %d: time_remaining %lu, bcns = %d\n",
+ __func__, iport, iteration, phy_state,
+ tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
+
+ res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
+ &attached_device_type);
+ tmo = deadline - jiffies;
+
+ if (res) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iteration %d, phase %d:"
+ " SMP error=%d, time_remaining=%lu\n",
+ __func__, iteration, phy_state, res, tmo);
+ break;
+ }
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iport %p, iteration %d,"
+ " phase %d: time_remaining %lu, bcns = %d, "
+ "attdevtype = %x\n",
+ __func__, iport, iteration, phy_state,
+ tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
+ attached_device_type);
+
+ switch (phy_state) {
+ case SMP_PHYWAIT_PHYDOWN:
+ /* Has the device gone away? */
+ if (!attached_device_type)
+ phy_state = SMP_PHYWAIT_PHYUP;
+
+ break;
+
+ case SMP_PHYWAIT_PHYUP:
+ /* Has the device come back? */
+ if (attached_device_type)
+ phy_state = SMP_PHYWAIT_DONE;
+ break;
+
+ case SMP_PHYWAIT_DONE:
+ break;
+ }
+
+ }
+ dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__);
+}
+
+static int isci_reset_device(struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
+ struct isci_port *iport = idev->isci_port;
+ enum sci_status status;
+ unsigned long flags;
+ int rc;
+
+ dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_reset(idev);
+ if (status != SCI_SUCCESS) {
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: sci_remote_device_reset(%p) returned %d!\n",
+ __func__, idev, status);
+
+ return TMF_RESP_FUNC_FAILED;
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Make sure all pending requests are able to be fully terminated. */
+ isci_device_clear_reset_pending(ihost, idev);
+
+ /* If this is a device on an expander, disable BCN processing. */
+ if (!scsi_is_sas_phy_local(phy))
+ set_bit(IPORT_BCN_BLOCKED, &iport->flags);
+
+ rc = sas_phy_reset(phy, true);
+
+ /* Terminate in-progress I/O now. */
+ isci_remote_device_nuke_requests(ihost, idev);
+
+ /* Since all pending TCs have been cleaned, resume the RNC. */
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_reset_complete(idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* If this is a device on an expander, bring the phy back up. */
+ if (!scsi_is_sas_phy_local(phy)) {
+ /* A phy reset will cause the device to go away then reappear.
+ * Since libsas will take action on incoming BCNs (eg. remove
+ * a device going through an SMP phy-control driven reset),
+ * we need to wait until the phy comes back up before letting
+ * discovery proceed in libsas.
+ */
+ isci_wait_for_smp_phy_reset(idev, phy->number);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ isci_port_bcn_enable(ihost, idev->isci_port);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
+
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: sci_remote_device_reset_complete(%p) "
+ "returned %d!\n", __func__, idev, status);
+ }
+
+ dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
+
+ return rc;
+}
+
+int isci_task_I_T_nexus_reset(struct domain_device *dev)
+{
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+
+ ret = isci_reset_device(ihost, idev);
+ out:
+ isci_put_device(idev);
+ return ret;
+}
+
+int isci_bus_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct domain_device *dev = sdev_to_domain_dev(cmd->device);
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev) {
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+
+ ret = isci_reset_device(ihost, idev);
+ out:
+ isci_put_device(idev);
+ return ret;
+}
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
new file mode 100644
index 00000000000..4a7fa90287e
--- /dev/null
+++ b/drivers/scsi/isci/task.h
@@ -0,0 +1,367 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_TASK_H_
+#define _ISCI_TASK_H_
+
+#include <scsi/sas_ata.h>
+#include "host.h"
+
+struct isci_request;
+
+/**
+ * enum isci_tmf_cb_state - This enum defines the possible states in which the
+ * TMF callback function is invoked during the TMF execution process.
+ *
+ *
+ */
+enum isci_tmf_cb_state {
+
+ isci_tmf_init_state = 0,
+ isci_tmf_started,
+ isci_tmf_timed_out
+};
+
+/**
+ * enum isci_tmf_function_codes - This enum defines the possible preparations
+ * of task management requests.
+ *
+ *
+ */
+enum isci_tmf_function_codes {
+
+ isci_tmf_func_none = 0,
+ isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
+ isci_tmf_ssp_lun_reset = TMF_LU_RESET,
+ isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
+ isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */
+};
+/**
+ * struct isci_tmf - This class represents the task management object which
+ * acts as an interface to libsas for processing task management requests
+ *
+ *
+ */
+struct isci_tmf {
+
+ struct completion *complete;
+ enum sas_protocol proto;
+ union {
+ struct ssp_response_iu resp_iu;
+ struct dev_to_host_fis d2h_fis;
+ u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+ } resp;
+ unsigned char lun[8];
+ u16 io_tag;
+ struct isci_remote_device *device;
+ enum isci_tmf_function_codes tmf_code;
+ int status;
+
+ /* The optional callback function allows the user process to
+ * track the TMF transmit / timeout conditions.
+ */
+ void (*cb_state_func)(
+ enum isci_tmf_cb_state,
+ struct isci_tmf *, void *);
+ void *cb_data;
+
+};
+
+static inline void isci_print_tmf(struct isci_tmf *tmf)
+{
+ if (SAS_PROTOCOL_SATA == tmf->proto)
+ dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ "%s: status = %x\n"
+ "tmf->resp.d2h_fis.status = %x\n"
+ "tmf->resp.d2h_fis.error = %x\n",
+ __func__,
+ tmf->status,
+ tmf->resp.d2h_fis.status,
+ tmf->resp.d2h_fis.error);
+ else
+ dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ "%s: status = %x\n"
+ "tmf->resp.resp_iu.data_present = %x\n"
+ "tmf->resp.resp_iu.status = %x\n"
+ "tmf->resp.resp_iu.data_length = %x\n"
+ "tmf->resp.resp_iu.data[0] = %x\n"
+ "tmf->resp.resp_iu.data[1] = %x\n"
+ "tmf->resp.resp_iu.data[2] = %x\n"
+ "tmf->resp.resp_iu.data[3] = %x\n",
+ __func__,
+ tmf->status,
+ tmf->resp.resp_iu.datapres,
+ tmf->resp.resp_iu.status,
+ be32_to_cpu(tmf->resp.resp_iu.response_data_len),
+ tmf->resp.resp_iu.resp_data[0],
+ tmf->resp.resp_iu.resp_data[1],
+ tmf->resp.resp_iu.resp_data[2],
+ tmf->resp.resp_iu.resp_data[3]);
+}
+
+
+int isci_task_execute_task(
+ struct sas_task *task,
+ int num,
+ gfp_t gfp_flags);
+
+int isci_task_abort_task(
+ struct sas_task *task);
+
+int isci_task_abort_task_set(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_aca(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_task_set(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_query_task(
+ struct sas_task *task);
+
+int isci_task_lu_reset(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_nexus_port(
+ struct asd_sas_port *port);
+
+int isci_task_clear_nexus_ha(
+ struct sas_ha_struct *ha);
+
+int isci_task_I_T_nexus_reset(
+ struct domain_device *d_device);
+
+void isci_task_request_complete(
+ struct isci_host *isci_host,
+ struct isci_request *request,
+ enum sci_task_status completion_status);
+
+u16 isci_task_ssp_request_get_io_tag_to_manage(
+ struct isci_request *request);
+
+u8 isci_task_ssp_request_get_function(
+ struct isci_request *request);
+
+
+void *isci_task_ssp_request_get_response_data_address(
+ struct isci_request *request);
+
+u32 isci_task_ssp_request_get_response_data_length(
+ struct isci_request *request);
+
+int isci_queuecommand(
+ struct scsi_cmnd *scsi_cmd,
+ void (*donefunc)(struct scsi_cmnd *));
+
+int isci_bus_reset_handler(struct scsi_cmnd *cmd);
+
+/**
+ * enum isci_completion_selection - This enum defines the possible actions to
+ * take with respect to a given request's notification back to libsas.
+ *
+ *
+ */
+enum isci_completion_selection {
+
+ isci_perform_normal_io_completion, /* Normal notify (task_done) */
+ isci_perform_aborted_io_completion, /* No notification. */
+ isci_perform_error_io_completion /* Use sas_task_abort */
+};
+
+static inline void isci_set_task_doneflags(
+ struct sas_task *task)
+{
+ /* Since no futher action will be taken on this task,
+ * make sure to mark it complete from the lldd perspective.
+ */
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+}
+/**
+ * isci_task_all_done() - This function clears the task bits to indicate the
+ * LLDD is done with the task.
+ *
+ *
+ */
+static inline void isci_task_all_done(
+ struct sas_task *task)
+{
+ unsigned long flags;
+
+ /* Since no futher action will be taken on this task,
+ * make sure to mark it complete from the lldd perspective.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ isci_set_task_doneflags(task);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+}
+
+/**
+ * isci_task_set_completion_status() - This function sets the completion status
+ * for the request.
+ * @task: This parameter is the completed request.
+ * @response: This parameter is the response code for the completed task.
+ * @status: This parameter is the status code for the completed task.
+ *
+* @return The new notification mode for the request.
+*/
+static inline enum isci_completion_selection
+isci_task_set_completion_status(
+ struct sas_task *task,
+ enum service_response response,
+ enum exec_status status,
+ enum isci_completion_selection task_notification_selection)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+
+ /* If a device reset is being indicated, make sure the I/O
+ * is in the error path.
+ */
+ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
+ /* Fail the I/O to make sure it goes into the error path. */
+ response = SAS_TASK_UNDELIVERED;
+ status = SAM_STAT_TASK_ABORTED;
+
+ task_notification_selection = isci_perform_error_io_completion;
+ }
+ task->task_status.resp = response;
+ task->task_status.stat = status;
+
+ switch (task_notification_selection) {
+
+ case isci_perform_error_io_completion:
+
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ /* There is no error escalation in the SMP case.
+ * Convert to a normal completion to avoid the
+ * timeout in the discovery path and to let the
+ * next action take place quickly.
+ */
+ task_notification_selection
+ = isci_perform_normal_io_completion;
+
+ /* Fall through to the normal case... */
+ } else {
+ /* Use sas_task_abort */
+ /* Leave SAS_TASK_STATE_DONE clear
+ * Leave SAS_TASK_AT_INITIATOR set.
+ */
+ break;
+ }
+
+ case isci_perform_aborted_io_completion:
+ /* This path can occur with task-managed requests as well as
+ * requests terminated because of LUN or device resets.
+ */
+ /* Fall through to the normal case... */
+ case isci_perform_normal_io_completion:
+ /* Normal notification (task_done) */
+ isci_set_task_doneflags(task);
+ break;
+ default:
+ WARN_ONCE(1, "unknown task_notification_selection: %d\n",
+ task_notification_selection);
+ break;
+ }
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ return task_notification_selection;
+
+}
+/**
+* isci_execpath_callback() - This function is called from the task
+* execute path when the task needs to callback libsas about the submit-time
+* task failure. The callback occurs either through the task's done function
+* or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O
+* requests, libsas takes the host lock before calling execute task. Therefore
+* in this situation the host lock must be managed before calling the func.
+*
+* @ihost: This parameter is the controller to which the I/O request was sent.
+* @task: This parameter is the I/O request.
+* @func: This parameter is the function to call in the correct context.
+* @status: This parameter is the status code for the completed task.
+*
+*/
+static inline void isci_execpath_callback(struct isci_host *ihost,
+ struct sas_task *task,
+ void (*func)(struct sas_task *))
+{
+ struct domain_device *dev = task->dev;
+
+ if (dev_is_sata(dev) && task->uldd_task) {
+ unsigned long flags;
+
+ /* Since we are still in the submit path, and since
+ * libsas takes the host lock on behalf of SATA
+ * devices before I/O starts (in the non-discovery case),
+ * we need to unlock before we can call the callback function.
+ */
+ raw_local_irq_save(flags);
+ spin_unlock(dev->sata_dev.ap->lock);
+ func(task);
+ spin_lock(dev->sata_dev.ap->lock);
+ raw_local_irq_restore(flags);
+ } else
+ func(task);
+}
+#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
new file mode 100644
index 00000000000..e9e1e2abacb
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -0,0 +1,225 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "unsolicited_frame_control.h"
+#include "registers.h"
+
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
+{
+ struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
+ struct sci_unsolicited_frame *uf;
+ u32 buf_len, header_len, i;
+ dma_addr_t dma;
+ size_t size;
+ void *virt;
+
+ /*
+ * Prepare all of the memory sizes for the UF headers, UF address
+ * table, and UF buffers themselves.
+ */
+ buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+ header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
+ size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
+
+ /*
+ * The Unsolicited Frame buffers are set at the start of the UF
+ * memory descriptor entry. The headers and address table will be
+ * placed after the buffers.
+ */
+ virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ /*
+ * Program the location of the UF header table into the SCU.
+ * Notes:
+ * - The address must align on a 64-byte boundary. Guaranteed to be
+ * on 64-byte boundary already 1KB boundary for unsolicited frames.
+ * - Program unused header entries to overlap with the last
+ * unsolicited frame. The silicon will never DMA to these unused
+ * headers, since we program the UF address table pointers to
+ * NULL.
+ */
+ uf_control->headers.physical_address = dma + buf_len;
+ uf_control->headers.array = virt + buf_len;
+
+ /*
+ * Program the location of the UF address table into the SCU.
+ * Notes:
+ * - The address must align on a 64-bit boundary. Guaranteed to be on 64
+ * byte boundary already due to above programming headers being on a
+ * 64-bit boundary and headers are on a 64-bytes in size.
+ */
+ uf_control->address_table.physical_address = dma + buf_len + header_len;
+ uf_control->address_table.array = virt + buf_len + header_len;
+ uf_control->get = 0;
+
+ /*
+ * UF buffer requirements are:
+ * - The last entry in the UF queue is not NULL.
+ * - There is a power of 2 number of entries (NULL or not-NULL)
+ * programmed into the queue.
+ * - Aligned on a 1KB boundary. */
+
+ /*
+ * Program the actual used UF buffers into the UF address table and
+ * the controller's array of UFs.
+ */
+ for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) {
+ uf = &uf_control->buffers.array[i];
+
+ uf_control->address_table.array[i] = dma;
+
+ uf->buffer = virt;
+ uf->header = &uf_control->headers.array[i];
+ uf->state = UNSOLICITED_FRAME_EMPTY;
+
+ /*
+ * Increment the address of the physical and virtual memory
+ * pointers. Everything is aligned on 1k boundary with an
+ * increment of 1k.
+ */
+ virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+ dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+ }
+
+ return 0;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_header)
+{
+ if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+ /* Skip the first word in the frame since this is a controll word used
+ * by the hardware.
+ */
+ *frame_header = &uf_control->buffers.array[frame_index].header->data;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_buffer)
+{
+ if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+ *frame_buffer = uf_control->buffers.array[frame_index].buffer;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index)
+{
+ u32 frame_get;
+ u32 frame_cycle;
+
+ frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1);
+ frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES;
+
+ /*
+ * In the event there are NULL entries in the UF table, we need to
+ * advance the get pointer in order to find out if this frame should
+ * be released (i.e. update the get pointer)
+ */
+ while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+ upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+ frame_get < SCU_MAX_UNSOLICITED_FRAMES)
+ frame_get++;
+
+ /*
+ * The table has a NULL entry as it's last element. This is
+ * illegal.
+ */
+ BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
+ if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
+ return false;
+
+ uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
+
+ if (frame_get != frame_index) {
+ /*
+ * Frames remain in use until we advance the get pointer
+ * so there is nothing we can do here
+ */
+ return false;
+ }
+
+ /*
+ * The frame index is equal to the current get pointer so we
+ * can now free up all of the frame entries that
+ */
+ while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
+ uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
+
+ if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
+ frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
+ frame_get = 0;
+ } else
+ frame_get++;
+ }
+
+ uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
+
+ return true;
+}
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
new file mode 100644
index 00000000000..31cb9506f52
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -0,0 +1,278 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+
+#include "isci.h"
+
+#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15
+
+/**
+ * struct scu_unsolicited_frame_header -
+ *
+ * This structure delineates the format of an unsolicited frame header. The
+ * first DWORD are UF attributes defined by the silicon architecture. The data
+ * depicts actual header information received on the link.
+ */
+struct scu_unsolicited_frame_header {
+ /**
+ * This field indicates if there is an Initiator Index Table entry with
+ * which this header is associated.
+ */
+ u32 iit_exists:1;
+
+ /**
+ * This field simply indicates the protocol type (i.e. SSP, STP, SMP).
+ */
+ u32 protocol_type:3;
+
+ /**
+ * This field indicates if the frame is an address frame (IAF or OAF)
+ * or if it is a information unit frame.
+ */
+ u32 is_address_frame:1;
+
+ /**
+ * This field simply indicates the connection rate at which the frame
+ * was received.
+ */
+ u32 connection_rate:4;
+
+ u32 reserved:23;
+
+ /**
+ * This field represents the actual header data received on the link.
+ */
+ u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS];
+
+};
+
+
+
+/**
+ * enum unsolicited_frame_state -
+ *
+ * This enumeration represents the current unsolicited frame state. The
+ * controller object can not updtate the hardware unsolicited frame put pointer
+ * unless it has already processed the priror unsolicited frames.
+ */
+enum unsolicited_frame_state {
+ /**
+ * This state is when the frame is empty and not in use. It is
+ * different from the released state in that the hardware could DMA
+ * data to this frame buffer.
+ */
+ UNSOLICITED_FRAME_EMPTY,
+
+ /**
+ * This state is set when the frame buffer is in use by by some
+ * object in the system.
+ */
+ UNSOLICITED_FRAME_IN_USE,
+
+ /**
+ * This state is set when the frame is returned to the free pool
+ * but one or more frames prior to this one are still in use.
+ * Once all of the frame before this one are freed it will go to
+ * the empty state.
+ */
+ UNSOLICITED_FRAME_RELEASED,
+
+ UNSOLICITED_FRAME_MAX_STATES
+};
+
+/**
+ * struct sci_unsolicited_frame -
+ *
+ * This is the unsolicited frame data structure it acts as the container for
+ * the current frame state, frame header and frame buffer.
+ */
+struct sci_unsolicited_frame {
+ /**
+ * This field contains the current frame state
+ */
+ enum unsolicited_frame_state state;
+
+ /**
+ * This field points to the frame header data.
+ */
+ struct scu_unsolicited_frame_header *header;
+
+ /**
+ * This field points to the frame buffer data.
+ */
+ void *buffer;
+
+};
+
+/**
+ * struct sci_uf_header_array -
+ *
+ * This structure contains all of the unsolicited frame header information.
+ */
+struct sci_uf_header_array {
+ /**
+ * This field is represents a virtual pointer to the start
+ * address of the UF address table. The table contains
+ * 64-bit pointers as required by the hardware.
+ */
+ struct scu_unsolicited_frame_header *array;
+
+ /**
+ * This field specifies the physical address location for the UF
+ * buffer array.
+ */
+ dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_uf_buffer_array -
+ *
+ * This structure contains all of the unsolicited frame buffer (actual payload)
+ * information.
+ */
+struct sci_uf_buffer_array {
+ /**
+ * This field is the unsolicited frame data its used to manage
+ * the data for the unsolicited frame requests. It also represents
+ * the virtual address location that corresponds to the
+ * physical_address field.
+ */
+ struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
+
+ /**
+ * This field specifies the physical address location for the UF
+ * buffer array.
+ */
+ dma_addr_t physical_address;
+};
+
+/**
+ * struct sci_uf_address_table_array -
+ *
+ * This object maintains all of the unsolicited frame address table specific
+ * data. The address table is a collection of 64-bit pointers that point to
+ * 1KB buffers into which the silicon will DMA unsolicited frames.
+ */
+struct sci_uf_address_table_array {
+ /**
+ * This field represents a virtual pointer that refers to the
+ * starting address of the UF address table.
+ * 64-bit pointers are required by the hardware.
+ */
+ dma_addr_t *array;
+
+ /**
+ * This field specifies the physical address location for the UF
+ * address table.
+ */
+ dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_unsolicited_frame_control -
+ *
+ * This object contains all of the data necessary to handle unsolicited frames.
+ */
+struct sci_unsolicited_frame_control {
+ /**
+ * This field is the software copy of the unsolicited frame queue
+ * get pointer. The controller object writes this value to the
+ * hardware to let the hardware put more unsolicited frame entries.
+ */
+ u32 get;
+
+ /**
+ * This field contains all of the unsolicited frame header
+ * specific fields.
+ */
+ struct sci_uf_header_array headers;
+
+ /**
+ * This field contains all of the unsolicited frame buffer
+ * specific fields.
+ */
+ struct sci_uf_buffer_array buffers;
+
+ /**
+ * This field contains all of the unsolicited frame address table
+ * specific fields.
+ */
+ struct sci_uf_address_table_array address_table;
+
+};
+
+struct isci_host;
+
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
+
+enum sci_status sci_unsolicited_frame_control_get_header(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_header);
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_buffer);
+
+bool sci_unsolicited_frame_control_release_frame(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index);
+
+#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index df6bff7366c..89700cbca16 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -64,7 +64,8 @@ static void iscsi_boot_kobj_release(struct kobject *kobj)
struct iscsi_boot_kobj *boot_kobj =
container_of(kobj, struct iscsi_boot_kobj, kobj);
- kfree(boot_kobj->data);
+ if (boot_kobj->release)
+ boot_kobj->release(boot_kobj->data);
kfree(boot_kobj);
}
@@ -305,7 +306,8 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
struct attribute_group *attr_group,
const char *name, int index, void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type))
+ mode_t (*is_visible) (void *data, int type),
+ void (*release) (void *data))
{
struct iscsi_boot_kobj *boot_kobj;
@@ -323,6 +325,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
boot_kobj->data = data;
boot_kobj->show = show;
boot_kobj->is_visible = is_visible;
+ boot_kobj->release = release;
if (sysfs_create_group(&boot_kobj->kobj, attr_group)) {
/*
@@ -331,7 +334,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
* the boot kobj was not setup and the normal release
* path is not being run.
*/
- boot_kobj->data = NULL;
+ boot_kobj->release = NULL;
kobject_put(&boot_kobj->kobj);
return NULL;
}
@@ -357,6 +360,7 @@ static void iscsi_boot_remove_kobj(struct iscsi_boot_kobj *boot_kobj)
* @data: driver specific data for target
* @show: attr show function
* @is_visible: attr visibility function
+ * @release: release function
*
* Note: The boot sysfs lib will free the data passed in for the caller
* when all refs to the target kobject have been released.
@@ -365,10 +369,12 @@ struct iscsi_boot_kobj *
iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type))
+ mode_t (*is_visible) (void *data, int type),
+ void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group,
- "target%d", index, data, show, is_visible);
+ "target%d", index, data, show, is_visible,
+ release);
}
EXPORT_SYMBOL_GPL(iscsi_boot_create_target);
@@ -379,6 +385,7 @@ EXPORT_SYMBOL_GPL(iscsi_boot_create_target);
* @data: driver specific data
* @show: attr show function
* @is_visible: attr visibility function
+ * @release: release function
*
* Note: The boot sysfs lib will free the data passed in for the caller
* when all refs to the initiator kobject have been released.
@@ -387,12 +394,13 @@ struct iscsi_boot_kobj *
iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type))
+ mode_t (*is_visible) (void *data, int type),
+ void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset,
&iscsi_boot_initiator_attr_group,
"initiator", index, data, show,
- is_visible);
+ is_visible, release);
}
EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator);
@@ -403,6 +411,7 @@ EXPORT_SYMBOL_GPL(iscsi_boot_create_initiator);
* @data: driver specific data
* @show: attr show function
* @is_visible: attr visibility function
+ * @release: release function
*
* Note: The boot sysfs lib will free the data passed in for the caller
* when all refs to the ethernet kobject have been released.
@@ -411,12 +420,13 @@ struct iscsi_boot_kobj *
iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type))
+ mode_t (*is_visible) (void *data, int type),
+ void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset,
&iscsi_boot_ethernet_attr_group,
"ethernet%d", index, data, show,
- is_visible);
+ is_visible, release);
}
EXPORT_SYMBOL_GPL(iscsi_boot_create_ethernet);
@@ -472,6 +482,9 @@ void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset)
{
struct iscsi_boot_kobj *boot_kobj, *tmp_kobj;
+ if (!boot_kset)
+ return;
+
list_for_each_entry_safe(boot_kobj, tmp_kobj,
&boot_kset->kobj_list, list)
iscsi_boot_remove_kobj(boot_kobj);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 3df985305f6..7724414588f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -107,10 +107,12 @@ static int iscsi_sw_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
* If the socket is in CLOSE or CLOSE_WAIT we should
* not close the connection if there is still some
* data pending.
+ *
+ * Must be called with sk_callback_lock.
*/
static inline int iscsi_sw_sk_state_check(struct sock *sk)
{
- struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
+ struct iscsi_conn *conn = sk->sk_user_data;
if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
!atomic_read(&sk->sk_rmem_alloc)) {
@@ -123,11 +125,17 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
{
- struct iscsi_conn *conn = sk->sk_user_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_conn *conn;
+ struct iscsi_tcp_conn *tcp_conn;
read_descriptor_t rd_desc;
read_lock(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ read_unlock(&sk->sk_callback_lock);
+ return;
+ }
+ tcp_conn = conn->dd_data;
/*
* Use rd_desc to pass 'conn' to iscsi_tcp_recv.
@@ -141,11 +149,10 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
iscsi_sw_sk_state_check(sk);
- read_unlock(&sk->sk_callback_lock);
-
/* If we had to (atomically) map a highmem page,
* unmap it now. */
iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+ read_unlock(&sk->sk_callback_lock);
}
static void iscsi_sw_tcp_state_change(struct sock *sk)
@@ -157,8 +164,11 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
void (*old_state_change)(struct sock *);
read_lock(&sk->sk_callback_lock);
-
- conn = (struct iscsi_conn*)sk->sk_user_data;
+ conn = sk->sk_user_data;
+ if (!conn) {
+ read_unlock(&sk->sk_callback_lock);
+ return;
+ }
session = conn->session;
iscsi_sw_sk_state_check(sk);
@@ -178,11 +188,25 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
**/
static void iscsi_sw_tcp_write_space(struct sock *sk)
{
- struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct iscsi_conn *conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ void (*old_write_space)(struct sock *);
+
+ read_lock_bh(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ read_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+
+ tcp_conn = conn->dd_data;
+ tcp_sw_conn = tcp_conn->dd_data;
+ old_write_space = tcp_sw_conn->old_write_space;
+ read_unlock_bh(&sk->sk_callback_lock);
+
+ old_write_space(sk);
- tcp_sw_conn->old_write_space(sk);
ISCSI_SW_TCP_DBG(conn, "iscsi_write_space\n");
iscsi_conn_queue_work(conn);
}
@@ -592,20 +616,17 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
/* userspace may have goofed up and not bound us */
if (!sock)
return;
- /*
- * Make sure our recv side is stopped.
- * Older tools called conn stop before ep_disconnect
- * so IO could still be coming in.
- */
- write_lock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
sock->sk->sk_err = EIO;
wake_up_interruptible(sk_sleep(sock->sk));
- iscsi_conn_stop(cls_conn, flag);
+ /* stop xmit side */
+ iscsi_suspend_tx(conn);
+
+ /* stop recv side and release socket */
iscsi_sw_tcp_release_conn(conn);
+
+ iscsi_conn_stop(cls_conn, flag);
}
static int
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 3b8a6451ea2..01ff082dc34 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -802,10 +802,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
- if (ep) {
+ if (ep && ep->xid == xid)
fc_exch_hold(ep);
- WARN_ON(ep->xid != xid);
- }
spin_unlock_bh(&pool->lock);
}
return ep;
@@ -965,8 +963,30 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
sp = &ep->seq;
if (sp->id != fh->fh_seq_id) {
atomic_inc(&mp->stats.seq_not_found);
- reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
- goto rel;
+ if (f_ctl & FC_FC_END_SEQ) {
+ /*
+ * Update sequence_id based on incoming last
+ * frame of sequence exchange. This is needed
+ * for FCoE target where DDP has been used
+ * on target where, stack is indicated only
+ * about last frame's (payload _header) header.
+ * Whereas "seq_id" which is part of
+ * frame_header is allocated by initiator
+ * which is totally different from "seq_id"
+ * allocated when XFER_RDY was sent by target.
+ * To avoid false -ve which results into not
+ * sending RSP, hence write request on other
+ * end never finishes.
+ */
+ spin_lock_bh(&ep->ex_lock);
+ sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
+ spin_unlock_bh(&ep->ex_lock);
+ } else {
+ /* sequence/exch should exist */
+ reject = FC_RJT_SEQ_ID;
+ goto rel;
+ }
}
}
WARN_ON(ep != fc_seq_exch(sp));
@@ -2443,8 +2463,11 @@ int fc_setup_exch_mgr(void)
fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
if (!fc_exch_workqueue)
- return -ENOMEM;
+ goto err;
return 0;
+err:
+ kmem_cache_destroy(fc_em_cachep);
+ return -ENOMEM;
}
/**
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 9cd2149519a..afb63c84314 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -498,7 +498,7 @@ crc_err:
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->ErrorFrames++;
/* per cpu count, not total count, but OK for limit */
- if (stats->InvalidCRCCount++ < 5)
+ if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
printk(KERN_WARNING "libfc: CRC error on data "
"frame for port (%6.6x)\n",
lport->port_id);
@@ -690,7 +690,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
}
/**
- * fc_fcp_abts_resp() - Send an ABTS response
+ * fc_fcp_abts_resp() - Receive an ABTS response
* @fsp: The FCP packet that is being aborted
* @fp: The response frame
*/
@@ -730,7 +730,7 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
}
/**
- * fc_fcp_recv() - Reveive an FCP frame
+ * fc_fcp_recv() - Receive an FCP frame
* @seq: The sequence the frame is on
* @fp: The received frame
* @arg: The related FCP packet
@@ -1084,6 +1084,7 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
if (unlikely(rc)) {
spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ fsp->cmd->SCp.ptr = NULL;
list_del(&fsp->list);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
}
@@ -1645,12 +1646,10 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
struct fc_seq *seq;
struct fcp_srr *srr;
struct fc_frame *fp;
- u8 cdb_op;
unsigned int rec_tov;
rport = fsp->rport;
rpriv = rport->dd_data;
- cdb_op = fsp->cdb_cmd.fc_cdb[0];
if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
rpriv->rp_state != RPORT_ST_READY)
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 389ab80aef0..e55ed9cf23f 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1025,6 +1025,8 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
}
fc_lport_state_enter(lport, LPORT_ST_RESET);
+ fc_host_post_event(lport->host, fc_get_event_number(),
+ FCH_EVT_LIPRESET, 0);
fc_vports_linkchange(lport);
fc_lport_reset_locked(lport);
if (lport->link_up)
@@ -1350,7 +1352,6 @@ static void fc_lport_timeout(struct work_struct *work)
WARN_ON(1);
break;
case LPORT_ST_READY:
- WARN_ON(1);
break;
case LPORT_ST_RESET:
break;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 49e1ccca09d..760db761944 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -153,18 +153,6 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
}
/**
- * fc_rport_free_rcu() - Free a remote port
- * @rcu: The rcu_head structure inside the remote port
- */
-static void fc_rport_free_rcu(struct rcu_head *rcu)
-{
- struct fc_rport_priv *rdata;
-
- rdata = container_of(rcu, struct fc_rport_priv, rcu);
- kfree(rdata);
-}
-
-/**
* fc_rport_destroy() - Free a remote port after last reference is released
* @kref: The remote port's kref
*/
@@ -173,7 +161,7 @@ static void fc_rport_destroy(struct kref *kref)
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
- call_rcu(&rdata->rcu, fc_rport_free_rcu);
+ kfree_rcu(rdata, rcu);
}
/**
@@ -801,6 +789,20 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
switch (rdata->rp_state) {
case RPORT_ST_INIT:
+ /*
+ * If received the FLOGI request on RPORT which is INIT state
+ * (means not transition to FLOGI either fc_rport timeout
+ * function didn;t trigger or this end hasn;t received
+ * beacon yet from other end. In that case only, allow RPORT
+ * state machine to continue, otherwise fall through which
+ * causes the code to send reject response.
+ * NOTE; Not checking for FIP->state such as VNMP_UP or
+ * VNMP_CLAIM because if FIP state is not one of those,
+ * RPORT wouldn;t have created and 'rport_lookup' would have
+ * failed anyway in that case.
+ */
+ if (lport->point_to_multipoint)
+ break;
case RPORT_ST_DELETE:
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_FIP;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 0c550d5b913..256a999d010 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -84,22 +84,6 @@ MODULE_PARM_DESC(debug_libiscsi_eh,
__func__, ##arg); \
} while (0);
-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
-#define SNA32_CHECK 2147483648UL
-
-static int iscsi_sna_lt(u32 n1, u32 n2)
-{
- return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
-}
-
-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
-static int iscsi_sna_lte(u32 n1, u32 n2)
-{
- return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
-}
-
inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
{
struct Scsi_Host *shost = conn->session->host;
@@ -169,7 +153,7 @@ void iscsi_prep_data_out_pdu(struct iscsi_task *task, struct iscsi_r2t_info *r2t
hdr->datasn = cpu_to_be32(r2t->datasn);
r2t->datasn++;
hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
- memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
+ hdr->lun = task->lun;
hdr->itt = task->hdr_itt;
hdr->exp_statsn = r2t->exp_statsn;
hdr->offset = cpu_to_be32(r2t->data_offset + r2t->sent);
@@ -296,7 +280,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
/*
* Allow PDUs for unrelated LUNs
*/
- hdr_lun = scsilun_to_int((struct scsi_lun *)tmf->lun);
+ hdr_lun = scsilun_to_int(&tmf->lun);
if (hdr_lun != task->sc->device->lun)
return 0;
/* fall through */
@@ -360,7 +344,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
struct iscsi_conn *conn = task->conn;
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
- struct iscsi_cmd *hdr;
+ struct iscsi_scsi_req *hdr;
unsigned hdrlength, cmd_len;
itt_t itt;
int rc;
@@ -374,7 +358,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
if (rc)
return rc;
}
- hdr = (struct iscsi_cmd *) task->hdr;
+ hdr = (struct iscsi_scsi_req *)task->hdr;
itt = hdr->itt;
memset(hdr, 0, sizeof(*hdr));
@@ -389,8 +373,8 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
return rc;
hdr->opcode = ISCSI_OP_SCSI_CMD;
hdr->flags = ISCSI_ATTR_SIMPLE;
- int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
- memcpy(task->lun, hdr->lun, sizeof(task->lun));
+ int_to_scsilun(sc->device->lun, &hdr->lun);
+ task->lun = hdr->lun;
hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
cmd_len = sc->cmd_len;
if (cmd_len < ISCSI_CDB_SIZE)
@@ -830,7 +814,7 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
struct iscsi_task *task, char *data,
int datalen)
{
- struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_scsi_rsp *rhdr = (struct iscsi_scsi_rsp *)hdr;
struct iscsi_session *session = conn->session;
struct scsi_cmnd *sc = task->sc;
@@ -968,7 +952,7 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
hdr.flags = ISCSI_FLAG_CMD_FINAL;
if (rhdr) {
- memcpy(hdr.lun, rhdr->lun, 8);
+ hdr.lun = rhdr->lun;
hdr.ttt = rhdr->ttt;
hdr.itt = RESERVED_ITT;
} else
@@ -2092,7 +2076,7 @@ static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
- memcpy(hdr->lun, task->lun, sizeof(hdr->lun));
+ hdr->lun = task->lun;
hdr->rtt = task->hdr_itt;
hdr->refcmdsn = task->cmdsn;
}
@@ -2233,7 +2217,7 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
- int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+ int_to_scsilun(sc->device->lun, &hdr->lun);
hdr->rtt = RESERVED_ITT;
}
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index e98ae33f129..09b232fd9a1 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -1084,7 +1084,8 @@ iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
struct iscsi_cls_conn *cls_conn;
struct iscsi_tcp_conn *tcp_conn;
- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
+ cls_conn = iscsi_conn_setup(cls_session,
+ sizeof(*tcp_conn) + dd_data_size, conn_idx);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
@@ -1096,22 +1097,13 @@ iscsi_tcp_conn_setup(struct iscsi_cls_session *cls_session, int dd_data_size,
tcp_conn = conn->dd_data;
tcp_conn->iscsi_conn = conn;
-
- tcp_conn->dd_data = kzalloc(dd_data_size, GFP_KERNEL);
- if (!tcp_conn->dd_data) {
- iscsi_conn_teardown(cls_conn);
- return NULL;
- }
+ tcp_conn->dd_data = conn->dd_data + sizeof(*tcp_conn);
return cls_conn;
}
EXPORT_SYMBOL_GPL(iscsi_tcp_conn_setup);
void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn)
{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-
- kfree(tcp_conn->dd_data);
iscsi_conn_teardown(cls_conn);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_conn_teardown);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 874e29d9533..f84084bba2f 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -849,6 +849,9 @@ static struct domain_device *sas_ex_discover_expander(
res = sas_discover_expander(child);
if (res) {
+ spin_lock_irq(&parent->port->dev_list_lock);
+ list_del(&child->dev_list_node);
+ spin_unlock_irq(&parent->port->dev_list_lock);
kfree(child);
return NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 8ec2c86a49d..c088a36d1f3 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -20,6 +20,11 @@
*******************************************************************/
#include <scsi/scsi_host.h>
+
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
+#define CONFIG_SCSI_LPFC_DEBUG_FS
+#endif
+
struct lpfc_sli2_slim;
#define LPFC_PCI_DEV_LP 0x1
@@ -465,9 +470,10 @@ enum intr_type_t {
struct unsol_rcv_ct_ctx {
uint32_t ctxt_id;
uint32_t SID;
- uint32_t oxid;
uint32_t flags;
#define UNSOL_VALID 0x00000001
+ uint16_t oxid;
+ uint16_t rxid;
};
#define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/
@@ -674,6 +680,9 @@ struct lpfc_hba {
uint32_t cfg_enable_rrq;
uint32_t cfg_topology;
uint32_t cfg_link_speed;
+#define LPFC_FCF_FOV 1 /* Fast fcf failover */
+#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
+ uint32_t cfg_fcf_failover_policy;
uint32_t cfg_cr_delay;
uint32_t cfg_cr_count;
uint32_t cfg_multi_ring_support;
@@ -845,9 +854,13 @@ struct lpfc_hba {
/* iDiag debugfs sub-directory */
struct dentry *idiag_root;
struct dentry *idiag_pci_cfg;
+ struct dentry *idiag_bar_acc;
struct dentry *idiag_que_info;
struct dentry *idiag_que_acc;
struct dentry *idiag_drb_acc;
+ struct dentry *idiag_ctl_acc;
+ struct dentry *idiag_mbx_acc;
+ struct dentry *idiag_ext_acc;
#endif
/* Used for deferred freeing of ELS data buffers */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 135a53baa73..2542f1f8bf8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -755,6 +755,47 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
+ * @phba: lpfc_hba pointer.
+ *
+ * Description:
+ * SLI4 interface type-2 device to wait on the sliport status register for
+ * the readyness after performing a firmware reset.
+ *
+ * Returns:
+ * zero for success
+ **/
+static int
+lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
+{
+ struct lpfc_register portstat_reg;
+ int i;
+
+
+ lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0);
+
+ /* wait for the SLI port firmware ready after firmware reset */
+ for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
+ msleep(10);
+ lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0);
+ if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
+ continue;
+ if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
+ continue;
+ if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
+ continue;
+ break;
+ }
+
+ if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
+ return 0;
+ else
+ return -EIO;
+}
+
+/**
* lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
* @phba: lpfc_hba pointer.
*
@@ -769,6 +810,7 @@ static ssize_t
lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
{
struct completion online_compl;
+ struct pci_dev *pdev = phba->pcidev;
uint32_t reg_val;
int status = 0;
int rc;
@@ -781,6 +823,14 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
LPFC_SLI_INTF_IF_TYPE_2))
return -EPERM;
+ if (!pdev->is_physfn)
+ return -EPERM;
+
+ /* Disable SR-IOV virtual functions if enabled */
+ if (phba->cfg_sriov_nr_virtfn) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
if (status != 0)
@@ -805,7 +855,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
/* delay driver action following IF_TYPE_2 reset */
- msleep(100);
+ rc = lpfc_sli4_pdev_status_reg_wait(phba);
+
+ if (rc)
+ return -EIO;
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@ -895,6 +948,10 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
if (!phba->cfg_enable_hba_reset)
return -EACCES;
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3050 lpfc_board_mode set to %s\n", buf);
+
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
@@ -1290,6 +1347,10 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
if (phba->sli_rev == LPFC_SLI_REV4)
val = 0;
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3051 lpfc_poll changed from %d to %d\n",
+ phba->cfg_poll, val);
+
spin_lock_irq(&phba->hbalock);
old_val = phba->cfg_poll;
@@ -1414,80 +1475,10 @@ lpfc_sriov_hw_max_virtfn_show(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- struct pci_dev *pdev = phba->pcidev;
- union lpfc_sli4_cfg_shdr *shdr;
- uint32_t shdr_status, shdr_add_status;
- LPFC_MBOXQ_t *mboxq;
- struct lpfc_mbx_get_prof_cfg *get_prof_cfg;
- struct lpfc_rsrc_desc_pcie *desc;
- uint32_t max_nr_virtfn;
- uint32_t desc_count;
- int length, rc, i;
-
- if ((phba->sli_rev < LPFC_SLI_REV4) ||
- (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_2))
- return -EPERM;
-
- if (!pdev->is_physfn)
- return snprintf(buf, PAGE_SIZE, "%d\n", 0);
-
- mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mboxq)
- return -ENOMEM;
-
- /* get the maximum number of virtfn support by physfn */
- length = (sizeof(struct lpfc_mbx_get_prof_cfg) -
- sizeof(struct lpfc_sli4_cfg_mhdr));
- lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
- LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG,
- length, LPFC_SLI4_MBX_EMBED);
- shdr = (union lpfc_sli4_cfg_shdr *)
- &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
- bf_set(lpfc_mbox_hdr_pf_num, &shdr->request,
- phba->sli4_hba.iov.pf_number + 1);
-
- get_prof_cfg = &mboxq->u.mqe.un.get_prof_cfg;
- bf_set(lpfc_mbx_get_prof_cfg_prof_tp, &get_prof_cfg->u.request,
- LPFC_CFG_TYPE_CURRENT_ACTIVE);
-
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
- lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
-
- if (rc != MBX_TIMEOUT) {
- /* check return status */
- shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
- &shdr->response);
- if (shdr_status || shdr_add_status || rc)
- goto error_out;
-
- } else
- goto error_out;
-
- desc_count = get_prof_cfg->u.response.prof_cfg.rsrc_desc_count;
-
- for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
- desc = (struct lpfc_rsrc_desc_pcie *)
- &get_prof_cfg->u.response.prof_cfg.desc[i];
- if (LPFC_RSRC_DESC_TYPE_PCIE ==
- bf_get(lpfc_rsrc_desc_pcie_type, desc)) {
- max_nr_virtfn = bf_get(lpfc_rsrc_desc_pcie_nr_virtfn,
- desc);
- break;
- }
- }
-
- if (i < LPFC_RSRC_DESC_MAX_NUM) {
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
- return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
- }
+ uint16_t max_nr_virtfn;
-error_out:
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
- return -EIO;
+ max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
+ return snprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
}
/**
@@ -1605,6 +1596,9 @@ static int \
lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
{ \
if (val >= minval && val <= maxval) {\
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
+ "3052 lpfc_" #attr " changed from %d to %d\n", \
+ phba->cfg_##attr, val); \
phba->cfg_##attr = val;\
return 0;\
}\
@@ -1762,6 +1756,9 @@ static int \
lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
{ \
if (val >= minval && val <= maxval) {\
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
+ "3053 lpfc_" #attr " changed from %d to %d\n", \
+ vport->cfg_##attr, val); \
vport->cfg_##attr = val;\
return 0;\
}\
@@ -2196,6 +2193,9 @@ lpfc_param_show(enable_npiv);
lpfc_param_init(enable_npiv, 1, 0, 1);
static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
+LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
+ "FCF Fast failover=1 Priority failover=2");
+
int lpfc_enable_rrq;
module_param(lpfc_enable_rrq, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
@@ -2678,6 +2678,9 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
if (nolip)
return strlen(buf);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3054 lpfc_topology changed from %d to %d\n",
+ prev_val, val);
err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
if (err) {
phba->cfg_topology = prev_val;
@@ -3101,6 +3104,10 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
if (sscanf(val_buf, "%i", &val) != 1)
return -EINVAL;
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3055 lpfc_link_speed changed from %d to %d %s\n",
+ phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
+
if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
@@ -3678,7 +3685,9 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
# - Default will result in registering capabilities for all profiles.
#
*/
-unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
+unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION;
module_param(lpfc_prot_mask, uint, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
@@ -3769,6 +3778,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fdmi_on,
&dev_attr_lpfc_max_luns,
&dev_attr_lpfc_enable_npiv,
+ &dev_attr_lpfc_fcf_failover_policy,
&dev_attr_lpfc_enable_rrq,
&dev_attr_nport_evt_cnt,
&dev_attr_board_mode,
@@ -4989,6 +4999,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_link_speed_init(phba, lpfc_link_speed);
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
+ lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 7fb0ba4cbfa..6760c69f525 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -42,6 +42,7 @@
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
+#include "lpfc_debugfs.h"
#include "lpfc_vport.h"
#include "lpfc_version.h"
@@ -960,8 +961,10 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
evt_dat->immed_dat].oxid,
phba->ct_ctx[
evt_dat->immed_dat].SID);
+ phba->ct_ctx[evt_dat->immed_dat].rxid =
+ piocbq->iocb.ulpContext;
phba->ct_ctx[evt_dat->immed_dat].oxid =
- piocbq->iocb.ulpContext;
+ piocbq->iocb.unsli3.rcvsli3.ox_id;
phba->ct_ctx[evt_dat->immed_dat].SID =
piocbq->iocb.un.rcvels.remoteID;
phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
@@ -1312,7 +1315,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
- icmd->ulpContext = phba->ct_ctx[tag].oxid;
+ icmd->ulpContext = phba->ct_ctx[tag].rxid;
+ icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
if (!ndlp) {
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
@@ -1337,9 +1341,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
goto issue_ct_rsp_exit;
}
- icmd->un.ulpWord[3] = ndlp->nlp_rpi;
- if (phba->sli_rev == LPFC_SLI_REV4)
- icmd->ulpContext =
+ icmd->un.ulpWord[3] =
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
/* The exchange is done, mark the entry as invalid */
@@ -1351,8 +1353,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
/* Xmit CT response on exchange <xid> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
- icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
+ "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
+ icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
ctiocb->iocb_cmpl = NULL;
ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
@@ -1471,13 +1473,12 @@ send_mgmt_rsp_exit:
/**
* lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
* @phba: Pointer to HBA context object.
- * @job: LPFC_BSG_VENDOR_DIAG_MODE
*
* This function is responsible for preparing driver for diag loopback
* on device.
*/
static int
-lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
struct Scsi_Host *shost;
@@ -1521,7 +1522,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba, struct fc_bsg_job *job)
/**
* lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
* @phba: Pointer to HBA context object.
- * @job: LPFC_BSG_VENDOR_DIAG_MODE
*
* This function is responsible for driver exit processing of setting up
* diag loopback mode on device.
@@ -1567,7 +1567,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
uint32_t link_flags;
uint32_t timeout;
LPFC_MBOXQ_t *pmboxq;
- int mbxstatus;
+ int mbxstatus = MBX_SUCCESS;
int i = 0;
int rc = 0;
@@ -1586,7 +1586,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
goto job_error;
}
- rc = lpfc_bsg_diag_mode_enter(phba, job);
+ rc = lpfc_bsg_diag_mode_enter(phba);
if (rc)
goto job_error;
@@ -1741,7 +1741,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
uint32_t link_flags, timeout, req_len, alloc_len;
struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
LPFC_MBOXQ_t *pmboxq = NULL;
- int mbxstatus, i, rc = 0;
+ int mbxstatus = MBX_SUCCESS, i, rc = 0;
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
@@ -1758,7 +1758,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
goto job_error;
}
- rc = lpfc_bsg_diag_mode_enter(phba, job);
+ rc = lpfc_bsg_diag_mode_enter(phba);
if (rc)
goto job_error;
@@ -1982,7 +1982,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
goto job_error;
}
- rc = lpfc_bsg_diag_mode_enter(phba, job);
+ rc = lpfc_bsg_diag_mode_enter(phba);
if (rc)
goto job_error;
@@ -3178,6 +3178,11 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
"(x%x/x%x) complete bsg job done, bsize:%d\n",
phba->mbox_ext_buf_ctx.nembType,
phba->mbox_ext_buf_ctx.mboxType, size);
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
+ phba->mbox_ext_buf_ctx.nembType,
+ phba->mbox_ext_buf_ctx.mboxType,
+ dma_ebuf, sta_pos_addr,
+ phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
} else
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -3430,6 +3435,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
"ext_buf_cnt:%d\n", ext_buf_cnt);
}
+ /* before dma descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
+ sta_pre_addr, dmabuf, ext_buf_cnt);
+
/* reject non-embedded mailbox command with none external buffer */
if (ext_buf_cnt == 0) {
rc = -EPERM;
@@ -3477,6 +3486,10 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
}
+ /* after dma descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
+ sta_pos_addr, dmabuf, ext_buf_cnt);
+
/* construct base driver mbox command */
pmb = &pmboxq->u.mb;
pmbx = (uint8_t *)dmabuf->virt;
@@ -3511,7 +3524,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2947 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
- return 1;
+ return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2948 Failed to issue SLI_CONFIG ext-buffer "
@@ -3549,7 +3562,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
LPFC_MBOXQ_t *pmboxq = NULL;
MAILBOX_t *pmb;
uint8_t *mbx;
- int rc = 0, i;
+ int rc = SLI_CONFIG_NOT_HANDLED, i;
mbox_req =
(struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
@@ -3591,12 +3604,20 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
"ext_buf_cnt:%d\n", ext_buf_cnt);
}
+ /* before dma buffer descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
+ sta_pre_addr, dmabuf, ext_buf_cnt);
+
if (ext_buf_cnt == 0)
return -EPERM;
/* for the first external buffer */
lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
+ /* after dma descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
+ sta_pos_addr, dmabuf, ext_buf_cnt);
+
/* log for looking forward */
for (i = 1; i < ext_buf_cnt; i++) {
if (nemb_tp == nemb_mse)
@@ -3660,7 +3681,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2955 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
- return 1;
+ return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2956 Failed to issue SLI_CONFIG ext-buffer "
@@ -3668,6 +3689,11 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
rc = -EPIPE;
}
+ /* wait for additoinal external buffers */
+ job->reply->result = 0;
+ job->job_done(job);
+ return SLI_CONFIG_HANDLED;
+
job_error:
if (pmboxq)
mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -3840,6 +3866,12 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
struct lpfc_dmabuf, list);
list_del_init(&dmabuf->list);
+
+ /* after dma buffer descriptor setup */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
+ mbox_rd, dma_ebuf, sta_pos_addr,
+ dmabuf, index);
+
pbuf = (uint8_t *)dmabuf->virt;
job->reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->reply_payload.sg_list,
@@ -3922,6 +3954,11 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
dmabuf);
list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
+ /* after write dma buffer */
+ lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
+ mbox_wr, dma_ebuf, sta_pos_addr,
+ dmabuf, index);
+
if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2968 SLI_CONFIG ext-buffer wr all %d "
@@ -3959,7 +3996,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
"2969 Issued SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
- return 1;
+ return SLI_CONFIG_HANDLED;
}
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
"2970 Failed to issue SLI_CONFIG ext-buffer "
@@ -4039,14 +4076,14 @@ lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
struct lpfc_dmabuf *dmabuf)
{
struct dfc_mbox_req *mbox_req;
- int rc;
+ int rc = SLI_CONFIG_NOT_HANDLED;
mbox_req =
(struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
/* mbox command with/without single external buffer */
if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
- return SLI_CONFIG_NOT_HANDLED;
+ return rc;
/* mbox command and first external buffer */
if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
@@ -4249,7 +4286,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
* mailbox extension size
*/
if ((transmit_length > receive_length) ||
- (transmit_length > MAILBOX_EXT_SIZE)) {
+ (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
rc = -ERANGE;
goto job_done;
}
@@ -4272,7 +4309,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* receive length cannot be greater than mailbox
* extension size
*/
- if (receive_length > MAILBOX_EXT_SIZE) {
+ if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
rc = -ERANGE;
goto job_done;
}
@@ -4306,7 +4343,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
/* bde size cannot be greater than mailbox ext size */
- if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
+ if (bde->tus.f.bdeSize >
+ BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
rc = -ERANGE;
goto job_done;
}
@@ -4332,7 +4370,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
* mailbox extension size
*/
if ((receive_length == 0) ||
- (receive_length > MAILBOX_EXT_SIZE)) {
+ (receive_length >
+ BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
rc = -ERANGE;
goto job_done;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index fc20c247f36..a6db6aef133 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -235,9 +235,11 @@ int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
+void lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *, uint16_t);
int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
+void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);
int lpfc_mem_alloc(struct lpfc_hba *, int align);
void lpfc_mem_free(struct lpfc_hba *);
@@ -371,6 +373,10 @@ extern struct lpfc_hbq_init *lpfc_hbq_defs[];
/* SLI4 if_type 2 externs. */
int lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *);
int lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *);
+int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
+ uint16_t *, uint16_t *);
+int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
+ uint16_t *, uint16_t *);
/* externs BlockGuard */
extern char *_dump_buf_data;
@@ -432,10 +438,16 @@ void lpfc_handle_rrq_active(struct lpfc_hba *);
int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
uint16_t, uint16_t, uint16_t);
+uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
+void lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *, enum nemb_type,
+ enum mbox_type, enum dma_type, enum sta_type,
+ struct lpfc_dmabuf *, uint32_t);
+void lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *, MAILBOX_t *);
int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
/* functions to support SR-IOV */
int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
+uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index ffe82d169b4..a0424dd90e4 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -48,6 +48,7 @@
#include "lpfc_version.h"
#include "lpfc_compat.h"
#include "lpfc_debugfs.h"
+#include "lpfc_bsg.h"
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/*
@@ -135,7 +136,11 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
int i, index, len, enable;
uint32_t ms;
struct lpfc_debugfs_trc *dtp;
- char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE];
+ char *buffer;
+
+ buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return 0;
enable = lpfc_debugfs_enable;
lpfc_debugfs_enable = 0;
@@ -167,6 +172,8 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
}
lpfc_debugfs_enable = enable;
+ kfree(buffer);
+
return len;
}
@@ -195,8 +202,11 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
int i, index, len, enable;
uint32_t ms;
struct lpfc_debugfs_trc *dtp;
- char buffer[LPFC_DEBUG_TRC_ENTRY_SIZE];
+ char *buffer;
+ buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return 0;
enable = lpfc_debugfs_enable;
lpfc_debugfs_enable = 0;
@@ -228,6 +238,8 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
}
lpfc_debugfs_enable = enable;
+ kfree(buffer);
+
return len;
}
@@ -378,7 +390,11 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
int len = 0;
int i, off;
uint32_t *ptr;
- char buffer[1024];
+ char *buffer;
+
+ buffer = kmalloc(1024, GFP_KERNEL);
+ if (!buffer)
+ return 0;
off = 0;
spin_lock_irq(&phba->hbalock);
@@ -407,6 +423,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
}
spin_unlock_irq(&phba->hbalock);
+ kfree(buffer);
+
return len;
}
@@ -1147,7 +1165,8 @@ static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
{
char mybuf[64];
char *pbuf, *step_str;
- int bsize, i;
+ int i;
+ size_t bsize;
/* Protect copy from user */
if (!access_ok(VERIFY_READ, buf, nbytes))
@@ -1326,8 +1345,8 @@ lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
return 0;
if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
- where = idiag.cmd.data[0];
- count = idiag.cmd.data[1];
+ where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+ count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
} else
return 0;
@@ -1372,6 +1391,11 @@ pcicfg_browse:
len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
"%08x ", u32val);
offset += sizeof(uint32_t);
+ if (offset >= LPFC_PCI_CFG_SIZE) {
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_CFG_SIZE-len, "\n");
+ break;
+ }
index -= sizeof(uint32_t);
if (!index)
len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
@@ -1384,8 +1408,11 @@ pcicfg_browse:
}
/* Set up the offset for next portion of pci cfg read */
- idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
- if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
+ if (index == 0) {
+ idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
+ if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
+ idiag.offset.last_rd = 0;
+ } else
idiag.offset.last_rd = 0;
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
@@ -1438,8 +1465,8 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
if (rc != LPFC_PCI_CFG_RD_CMD_ARG)
goto error_out;
/* Read command from PCI config space, set up command fields */
- where = idiag.cmd.data[0];
- count = idiag.cmd.data[1];
+ where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+ count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
if (count == LPFC_PCI_CFG_BROWSE) {
if (where % sizeof(uint32_t))
goto error_out;
@@ -1474,9 +1501,9 @@ lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
if (rc != LPFC_PCI_CFG_WR_CMD_ARG)
goto error_out;
/* Write command to PCI config space, read-modify-write */
- where = idiag.cmd.data[0];
- count = idiag.cmd.data[1];
- value = idiag.cmd.data[2];
+ where = idiag.cmd.data[IDIAG_PCICFG_WHERE_INDX];
+ count = idiag.cmd.data[IDIAG_PCICFG_COUNT_INDX];
+ value = idiag.cmd.data[IDIAG_PCICFG_VALUE_INDX];
/* Sanity checks */
if ((count != sizeof(uint8_t)) &&
(count != sizeof(uint16_t)) &&
@@ -1569,6 +1596,292 @@ error_out:
}
/**
+ * lpfc_idiag_baracc_read - idiag debugfs pci bar access read
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci bar memory mapped space
+ * according to the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_baracc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int offset_label, offset, offset_run, len = 0, index;
+ int bar_num, acc_range, bar_size;
+ char *pbuffer;
+ void __iomem *mem_mapped_bar;
+ uint32_t if_type;
+ struct pci_dev *pdev;
+ uint32_t u32val;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_PCI_BAR_RD_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
+ bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
+ offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
+ acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
+ bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
+ } else
+ return 0;
+
+ if (acc_range == 0)
+ return 0;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (bar_num == IDIAG_BARACC_BAR_0)
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ else if (bar_num == IDIAG_BARACC_BAR_1)
+ mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
+ else if (bar_num == IDIAG_BARACC_BAR_2)
+ mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
+ else
+ return 0;
+ } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (bar_num == IDIAG_BARACC_BAR_0)
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ else
+ return 0;
+ } else
+ return 0;
+
+ /* Read single PCI bar space register */
+ if (acc_range == SINGLE_WORD) {
+ offset_run = offset;
+ u32val = readl(mem_mapped_bar + offset_run);
+ len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "%05x: %08x\n", offset_run, u32val);
+ } else
+ goto baracc_browse;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+baracc_browse:
+
+ /* Browse all PCI bar space registers */
+ offset_label = idiag.offset.last_rd;
+ offset_run = offset_label;
+
+ /* Read PCI bar memory mapped space */
+ len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "%05x: ", offset_label);
+ index = LPFC_PCI_BAR_RD_SIZE;
+ while (index > 0) {
+ u32val = readl(mem_mapped_bar + offset_run);
+ len += snprintf(pbuffer+len, LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "%08x ", u32val);
+ offset_run += sizeof(uint32_t);
+ if (acc_range == LPFC_PCI_BAR_BROWSE) {
+ if (offset_run >= bar_size) {
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+ break;
+ }
+ } else {
+ if (offset_run >= offset +
+ (acc_range * sizeof(uint32_t))) {
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+ break;
+ }
+ }
+ index -= sizeof(uint32_t);
+ if (!index)
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len, "\n");
+ else if (!(index % (8 * sizeof(uint32_t)))) {
+ offset_label += (8 * sizeof(uint32_t));
+ len += snprintf(pbuffer+len,
+ LPFC_PCI_BAR_RD_BUF_SIZE-len,
+ "\n%05x: ", offset_label);
+ }
+ }
+
+ /* Set up the offset for next portion of pci bar read */
+ if (index == 0) {
+ idiag.offset.last_rd += LPFC_PCI_BAR_RD_SIZE;
+ if (acc_range == LPFC_PCI_BAR_BROWSE) {
+ if (idiag.offset.last_rd >= bar_size)
+ idiag.offset.last_rd = 0;
+ } else {
+ if (offset_run >= offset +
+ (acc_range * sizeof(uint32_t)))
+ idiag.offset.last_rd = offset;
+ }
+ } else {
+ if (acc_range == LPFC_PCI_BAR_BROWSE)
+ idiag.offset.last_rd = 0;
+ else
+ idiag.offset.last_rd = offset;
+ }
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_baracc_write - Syntax check and set up idiag bar access commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI bar memory mapped space read or
+ * write command accordingly. In the case of PCI bar memory mapped space
+ * read command, it sets up the command in the idiag command struct for
+ * the debugfs read operation. In the case of PCI bar memorpy mapped space
+ * write operation, it executes the write operation into the PCI bar memory
+ * mapped space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_baracc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t bar_num, bar_size, offset, value, acc_range;
+ struct pci_dev *pdev;
+ void __iomem *mem_mapped_bar;
+ uint32_t if_type;
+ uint32_t u32val;
+ int rc;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return -EFAULT;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+ bar_num = idiag.cmd.data[IDIAG_BARACC_BAR_NUM_INDX];
+
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if ((bar_num != IDIAG_BARACC_BAR_0) &&
+ (bar_num != IDIAG_BARACC_BAR_1) &&
+ (bar_num != IDIAG_BARACC_BAR_2))
+ goto error_out;
+ } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (bar_num != IDIAG_BARACC_BAR_0)
+ goto error_out;
+ } else
+ goto error_out;
+
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (bar_num == IDIAG_BARACC_BAR_0) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF0_BAR0_SIZE;
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ } else if (bar_num == IDIAG_BARACC_BAR_1) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF0_BAR1_SIZE;
+ mem_mapped_bar = phba->sli4_hba.ctrl_regs_memmap_p;
+ } else if (bar_num == IDIAG_BARACC_BAR_2) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF0_BAR2_SIZE;
+ mem_mapped_bar = phba->sli4_hba.drbl_regs_memmap_p;
+ } else
+ goto error_out;
+ } else if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (bar_num == IDIAG_BARACC_BAR_0) {
+ idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX] =
+ LPFC_PCI_IF2_BAR0_SIZE;
+ mem_mapped_bar = phba->sli4_hba.conf_regs_memmap_p;
+ } else
+ goto error_out;
+ } else
+ goto error_out;
+
+ offset = idiag.cmd.data[IDIAG_BARACC_OFF_SET_INDX];
+ if (offset % sizeof(uint32_t))
+ goto error_out;
+
+ bar_size = idiag.cmd.data[IDIAG_BARACC_BAR_SZE_INDX];
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_RD) {
+ /* Sanity check on PCI config read command line arguments */
+ if (rc != LPFC_PCI_BAR_RD_CMD_ARG)
+ goto error_out;
+ acc_range = idiag.cmd.data[IDIAG_BARACC_ACC_MOD_INDX];
+ if (acc_range == LPFC_PCI_BAR_BROWSE) {
+ if (offset > bar_size - sizeof(uint32_t))
+ goto error_out;
+ /* Starting offset to browse */
+ idiag.offset.last_rd = offset;
+ } else if (acc_range > SINGLE_WORD) {
+ if (offset + acc_range * sizeof(uint32_t) > bar_size)
+ goto error_out;
+ /* Starting offset to browse */
+ idiag.offset.last_rd = offset;
+ } else if (acc_range != SINGLE_WORD)
+ goto error_out;
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
+ /* Sanity check on PCI bar write command line arguments */
+ if (rc != LPFC_PCI_BAR_WR_CMD_ARG)
+ goto error_out;
+ /* Write command to PCI bar space, read-modify-write */
+ acc_range = SINGLE_WORD;
+ value = idiag.cmd.data[IDIAG_BARACC_REG_VAL_INDX];
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_WR) {
+ writel(value, mem_mapped_bar + offset);
+ readl(mem_mapped_bar + offset);
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_ST) {
+ u32val = readl(mem_mapped_bar + offset);
+ u32val |= value;
+ writel(u32val, mem_mapped_bar + offset);
+ readl(mem_mapped_bar + offset);
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_BARACC_CL) {
+ u32val = readl(mem_mapped_bar + offset);
+ u32val &= ~value;
+ writel(u32val, mem_mapped_bar + offset);
+ readl(mem_mapped_bar + offset);
+ }
+ } else
+ /* All other opecodes are illegal for now */
+ goto error_out;
+
+ return nbytes;
+error_out:
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
* lpfc_idiag_queinfo_read - idiag debugfs read queue information
* @file: The file pointer to read from.
* @buf: The buffer to copy the data to.
@@ -1870,8 +2183,8 @@ lpfc_idiag_queacc_read(struct file *file, char __user *buf, size_t nbytes,
return 0;
if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_RD) {
- index = idiag.cmd.data[2];
- count = idiag.cmd.data[3];
+ index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
+ count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
pque = (struct lpfc_queue *)idiag.ptr_private;
} else
return 0;
@@ -1943,12 +2256,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
return rc;
/* Get and sanity check on command feilds */
- quetp = idiag.cmd.data[0];
- queid = idiag.cmd.data[1];
- index = idiag.cmd.data[2];
- count = idiag.cmd.data[3];
- offset = idiag.cmd.data[4];
- value = idiag.cmd.data[5];
+ quetp = idiag.cmd.data[IDIAG_QUEACC_QUETP_INDX];
+ queid = idiag.cmd.data[IDIAG_QUEACC_QUEID_INDX];
+ index = idiag.cmd.data[IDIAG_QUEACC_INDEX_INDX];
+ count = idiag.cmd.data[IDIAG_QUEACC_COUNT_INDX];
+ offset = idiag.cmd.data[IDIAG_QUEACC_OFFST_INDX];
+ value = idiag.cmd.data[IDIAG_QUEACC_VALUE_INDX];
/* Sanity check on command line arguments */
if (idiag.cmd.opcode == LPFC_IDIAG_CMD_QUEACC_WR ||
@@ -2217,7 +2530,7 @@ lpfc_idiag_drbacc_read(struct file *file, char __user *buf, size_t nbytes,
return 0;
if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_RD)
- drb_reg_id = idiag.cmd.data[0];
+ drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
else
return 0;
@@ -2256,7 +2569,7 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
- uint32_t drb_reg_id, value, reg_val;
+ uint32_t drb_reg_id, value, reg_val = 0;
void __iomem *drb_reg;
int rc;
@@ -2268,8 +2581,8 @@ lpfc_idiag_drbacc_write(struct file *file, const char __user *buf,
return rc;
/* Sanity check on command line arguments */
- drb_reg_id = idiag.cmd.data[0];
- value = idiag.cmd.data[1];
+ drb_reg_id = idiag.cmd.data[IDIAG_DRBACC_REGID_INDX];
+ value = idiag.cmd.data[IDIAG_DRBACC_VALUE_INDX];
if (idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_WR ||
idiag.cmd.opcode == LPFC_IDIAG_CMD_DRBACC_ST ||
@@ -2329,6 +2642,679 @@ error_out:
return -EINVAL;
}
+/**
+ * lpfc_idiag_ctlacc_read_reg - idiag debugfs read a control registers
+ * @phba: The pointer to hba structure.
+ * @pbuffer: The pointer to the buffer to copy the data to.
+ * @len: The lenght of bytes to copied.
+ * @drbregid: The id to doorbell registers.
+ *
+ * Description:
+ * This routine reads a control register and copies its content to the
+ * user buffer pointed to by @pbuffer.
+ *
+ * Returns:
+ * This function returns the amount of data that was copied into @pbuffer.
+ **/
+static int
+lpfc_idiag_ctlacc_read_reg(struct lpfc_hba *phba, char *pbuffer,
+ int len, uint32_t ctlregid)
+{
+
+ if (!pbuffer)
+ return 0;
+
+ switch (ctlregid) {
+ case LPFC_CTL_PORT_SEM:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port SemReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_SEM_OFFSET));
+ break;
+ case LPFC_CTL_PORT_STA:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port StaReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_STA_OFFSET));
+ break;
+ case LPFC_CTL_PORT_CTL:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port CtlReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_CTL_OFFSET));
+ break;
+ case LPFC_CTL_PORT_ER1:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port Er1Reg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER1_OFFSET));
+ break;
+ case LPFC_CTL_PORT_ER2:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "Port Er2Reg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER2_OFFSET));
+ break;
+ case LPFC_CTL_PDEV_CTL:
+ len += snprintf(pbuffer+len, LPFC_CTL_ACC_BUF_SIZE-len,
+ "PDev CtlReg: 0x%08x\n",
+ readl(phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET));
+ break;
+ default:
+ break;
+ }
+ return len;
+}
+
+/**
+ * lpfc_idiag_ctlacc_read - idiag debugfs read port and device control register
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba port and device registers according
+ * to the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_ctlacc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t ctl_reg_id, i;
+ char *pbuffer;
+ int len = 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_CTL_ACC_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD)
+ ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
+ else
+ return 0;
+
+ if (ctl_reg_id == LPFC_CTL_ACC_ALL)
+ for (i = 1; i <= LPFC_CTL_MAX; i++)
+ len = lpfc_idiag_ctlacc_read_reg(phba,
+ pbuffer, len, i);
+ else
+ len = lpfc_idiag_ctlacc_read_reg(phba,
+ pbuffer, len, ctl_reg_id);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_ctlacc_write - Syntax check and set up idiag ctlacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for port and device control register read (dump)
+ * or write (set) command accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_ctlacc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t ctl_reg_id, value, reg_val = 0;
+ void __iomem *ctl_reg;
+ int rc;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ /* Sanity check on command line arguments */
+ ctl_reg_id = idiag.cmd.data[IDIAG_CTLACC_REGID_INDX];
+ value = idiag.cmd.data[IDIAG_CTLACC_VALUE_INDX];
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+ if (rc != LPFC_CTL_ACC_WR_CMD_ARG)
+ goto error_out;
+ if (ctl_reg_id > LPFC_CTL_MAX)
+ goto error_out;
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_RD) {
+ if (rc != LPFC_CTL_ACC_RD_CMD_ARG)
+ goto error_out;
+ if ((ctl_reg_id > LPFC_CTL_MAX) &&
+ (ctl_reg_id != LPFC_CTL_ACC_ALL))
+ goto error_out;
+ } else
+ goto error_out;
+
+ /* Perform the write access operation */
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+ switch (ctl_reg_id) {
+ case LPFC_CTL_PORT_SEM:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_SEM_OFFSET;
+ break;
+ case LPFC_CTL_PORT_STA:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_STA_OFFSET;
+ break;
+ case LPFC_CTL_PORT_CTL:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_CTL_OFFSET;
+ break;
+ case LPFC_CTL_PORT_ER1:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER1_OFFSET;
+ break;
+ case LPFC_CTL_PORT_ER2:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER2_OFFSET;
+ break;
+ case LPFC_CTL_PDEV_CTL:
+ ctl_reg = phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PDEV_CTL_OFFSET;
+ break;
+ default:
+ goto error_out;
+ }
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_WR)
+ reg_val = value;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_ST) {
+ reg_val = readl(ctl_reg);
+ reg_val |= value;
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_CTLACC_CL) {
+ reg_val = readl(ctl_reg);
+ reg_val &= ~value;
+ }
+ writel(reg_val, ctl_reg);
+ readl(ctl_reg); /* flush */
+ }
+ return nbytes;
+
+error_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_mbxacc_get_setup - idiag debugfs get mailbox access setup
+ * @phba: Pointer to HBA context object.
+ * @pbuffer: Pointer to data buffer.
+ *
+ * Description:
+ * This routine gets the driver mailbox access debugfs setup information.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static int
+lpfc_idiag_mbxacc_get_setup(struct lpfc_hba *phba, char *pbuffer)
+{
+ uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
+ int len = 0;
+
+ mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_dump_map: 0x%08x\n", mbx_dump_map);
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_dump_cnt: %04d\n", mbx_dump_cnt);
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_word_cnt: %04d\n", mbx_word_cnt);
+ len += snprintf(pbuffer+len, LPFC_MBX_ACC_BUF_SIZE-len,
+ "mbx_mbox_cmd: 0x%02x\n", mbx_mbox_cmd);
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_mbxacc_read - idiag debugfs read on mailbox access
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba driver mailbox access debugfs setup
+ * information.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_mbxacc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *pbuffer;
+ int len = 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_MBX_ACC_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if ((idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP) &&
+ (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP))
+ return 0;
+
+ len = lpfc_idiag_mbxacc_get_setup(phba, pbuffer);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_mbxacc_write - Syntax check and set up idiag mbxacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for driver mailbox command (dump) and sets up the
+ * necessary states in the idiag command struct accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_mbxacc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ uint32_t mbx_dump_map, mbx_dump_cnt, mbx_word_cnt, mbx_mbox_cmd;
+ int rc;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ /* Sanity check on command line arguments */
+ mbx_mbox_cmd = idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_MBXACC_DP) {
+ if (!(mbx_dump_map & LPFC_MBX_DMP_MBX_ALL))
+ goto error_out;
+ if ((mbx_dump_map & ~LPFC_MBX_DMP_MBX_ALL) &&
+ (mbx_dump_map != LPFC_MBX_DMP_ALL))
+ goto error_out;
+ if (mbx_word_cnt > sizeof(MAILBOX_t))
+ goto error_out;
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_BSG_MBXACC_DP) {
+ if (!(mbx_dump_map & LPFC_BSG_DMP_MBX_ALL))
+ goto error_out;
+ if ((mbx_dump_map & ~LPFC_BSG_DMP_MBX_ALL) &&
+ (mbx_dump_map != LPFC_MBX_DMP_ALL))
+ goto error_out;
+ if (mbx_word_cnt > (BSG_MBOX_SIZE)/4)
+ goto error_out;
+ if (mbx_mbox_cmd != 0x9b)
+ goto error_out;
+ } else
+ goto error_out;
+
+ if (mbx_word_cnt == 0)
+ goto error_out;
+ if (rc != LPFC_MBX_DMP_ARG)
+ goto error_out;
+ if (mbx_mbox_cmd & ~0xff)
+ goto error_out;
+
+ /* condition for stop mailbox dump */
+ if (mbx_dump_cnt == 0)
+ goto reset_out;
+
+ return nbytes;
+
+reset_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return nbytes;
+
+error_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_extacc_avail_get - get the available extents information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the available extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_avail_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+ uint16_t ext_cnt, ext_size;
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\nAvailable Extents Information:\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available VPI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VPI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available VFI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_VFI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available RPI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_RPI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tPort Available XRI extents: ");
+ lpfc_sli4_get_avail_extnt_rsrc(phba, LPFC_RSC_TYPE_FCOE_XRI,
+ &ext_cnt, &ext_size);
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Count %3d, Size %3d\n", ext_cnt, ext_size);
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_extacc_alloc_get - get the allocated extents information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the allocated extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_alloc_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+ uint16_t ext_cnt, ext_size;
+ int rc;
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\nAllocated Extents Information:\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated VPI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VPI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated VFI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_VFI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated RPI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_RPI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tHost Allocated XRI extents: ");
+ rc = lpfc_sli4_get_allocated_extnts(phba, LPFC_RSC_TYPE_FCOE_XRI,
+ &ext_cnt, &ext_size);
+ if (!rc)
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "Port %d Extent %3d, Size %3d\n",
+ phba->brd_no, ext_cnt, ext_size);
+ else
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "N/A\n");
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_extacc_drivr_get - get driver extent information
+ * @phba: pointer to lpfc hba data structure.
+ * @pbuffer: pointer to internal buffer.
+ * @len: length into the internal buffer data has been copied.
+ *
+ * Description:
+ * This routine is to get the driver extent information.
+ *
+ * Returns:
+ * overall lenth of the data read into the internal buffer.
+ **/
+static int
+lpfc_idiag_extacc_drivr_get(struct lpfc_hba *phba, char *pbuffer, int len)
+{
+ struct lpfc_rsrc_blks *rsrc_blks;
+ int index;
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\nDriver Extents Information:\n");
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tVPI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->lpfc_vpi_blk_list, list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tVFI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_vfi_blk_list,
+ list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tRPI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_rpi_blk_list,
+ list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\tXRI extents:\n");
+ index = 0;
+ list_for_each_entry(rsrc_blks, &phba->sli4_hba.lpfc_xri_blk_list,
+ list) {
+ len += snprintf(pbuffer+len, LPFC_EXT_ACC_BUF_SIZE-len,
+ "\t\tBlock %3d: Start %4d, Count %4d\n",
+ index, rsrc_blks->rsrc_start,
+ rsrc_blks->rsrc_size);
+ index++;
+ }
+
+ return len;
+}
+
+/**
+ * lpfc_idiag_extacc_write - Syntax check and set up idiag extacc commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and then
+ * perform the syntax check for extent information access commands and sets
+ * up the necessary states in the idiag command struct accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ **/
+static ssize_t
+lpfc_idiag_extacc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ uint32_t ext_map;
+ int rc;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc < 0)
+ return rc;
+
+ ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
+
+ if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
+ goto error_out;
+ if (rc != LPFC_EXT_ACC_CMD_ARG)
+ goto error_out;
+ if (!(ext_map & LPFC_EXT_ACC_ALL))
+ goto error_out;
+
+ return nbytes;
+error_out:
+ /* Clean out command structure on command error out */
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_extacc_read - idiag debugfs read access to extent information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the proper extent information according to
+ * the idiag command, and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_extacc_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *pbuffer;
+ uint32_t ext_map;
+ int len = 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_EXT_ACC_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+ if (*ppos)
+ return 0;
+ if (idiag.cmd.opcode != LPFC_IDIAG_CMD_EXTACC_RD)
+ return 0;
+
+ ext_map = idiag.cmd.data[IDIAG_EXTACC_EXMAP_INDX];
+ if (ext_map & LPFC_EXT_ACC_AVAIL)
+ len = lpfc_idiag_extacc_avail_get(phba, pbuffer, len);
+ if (ext_map & LPFC_EXT_ACC_ALLOC)
+ len = lpfc_idiag_extacc_alloc_get(phba, pbuffer, len);
+ if (ext_map & LPFC_EXT_ACC_DRIVR)
+ len = lpfc_idiag_extacc_drivr_get(phba, pbuffer, len);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
#undef lpfc_debugfs_op_disc_trc
static const struct file_operations lpfc_debugfs_op_disc_trc = {
.owner = THIS_MODULE,
@@ -2419,6 +3405,16 @@ static const struct file_operations lpfc_idiag_op_pciCfg = {
.release = lpfc_idiag_cmd_release,
};
+#undef lpfc_idiag_op_barAcc
+static const struct file_operations lpfc_idiag_op_barAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_baracc_read,
+ .write = lpfc_idiag_baracc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
#undef lpfc_idiag_op_queInfo
static const struct file_operations lpfc_idiag_op_queInfo = {
.owner = THIS_MODULE,
@@ -2427,7 +3423,7 @@ static const struct file_operations lpfc_idiag_op_queInfo = {
.release = lpfc_idiag_release,
};
-#undef lpfc_idiag_op_queacc
+#undef lpfc_idiag_op_queAcc
static const struct file_operations lpfc_idiag_op_queAcc = {
.owner = THIS_MODULE,
.open = lpfc_idiag_open,
@@ -2437,7 +3433,7 @@ static const struct file_operations lpfc_idiag_op_queAcc = {
.release = lpfc_idiag_cmd_release,
};
-#undef lpfc_idiag_op_drbacc
+#undef lpfc_idiag_op_drbAcc
static const struct file_operations lpfc_idiag_op_drbAcc = {
.owner = THIS_MODULE,
.open = lpfc_idiag_open,
@@ -2447,8 +3443,234 @@ static const struct file_operations lpfc_idiag_op_drbAcc = {
.release = lpfc_idiag_cmd_release,
};
+#undef lpfc_idiag_op_ctlAcc
+static const struct file_operations lpfc_idiag_op_ctlAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_ctlacc_read,
+ .write = lpfc_idiag_ctlacc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_mbxAcc
+static const struct file_operations lpfc_idiag_op_mbxAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_mbxacc_read,
+ .write = lpfc_idiag_mbxacc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_extAcc
+static const struct file_operations lpfc_idiag_op_extAcc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_extacc_read,
+ .write = lpfc_idiag_extacc_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
#endif
+/* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * Description:
+ * This routine dump a bsg pass-through non-embedded mailbox command with
+ * external buffer.
+ **/
+void
+lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
+ enum mbox_type mbox_tp, enum dma_type dma_tp,
+ enum sta_type sta_tp,
+ struct lpfc_dmabuf *dmabuf, uint32_t ext_buf)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t *mbx_mbox_cmd, *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt;
+ char line_buf[LPFC_MBX_ACC_LBUF_SZ];
+ int len = 0;
+ uint32_t do_dump = 0;
+ uint32_t *pword;
+ uint32_t i;
+
+ if (idiag.cmd.opcode != LPFC_IDIAG_BSG_MBXACC_DP)
+ return;
+
+ mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ if (!(*mbx_dump_map & LPFC_MBX_DMP_ALL) ||
+ (*mbx_dump_cnt == 0) ||
+ (*mbx_word_cnt == 0))
+ return;
+
+ if (*mbx_mbox_cmd != 0x9B)
+ return;
+
+ if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) {
+ do_dump |= LPFC_BSG_DMP_MBX_RD_MBX;
+ printk(KERN_ERR "\nRead mbox command (x%x), "
+ "nemb:0x%x, extbuf_cnt:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+ if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) {
+ do_dump |= LPFC_BSG_DMP_MBX_RD_BUF;
+ printk(KERN_ERR "\nRead mbox buffer (x%x), "
+ "nemb:0x%x, extbuf_seq:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+ if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) {
+ do_dump |= LPFC_BSG_DMP_MBX_WR_MBX;
+ printk(KERN_ERR "\nWrite mbox command (x%x), "
+ "nemb:0x%x, extbuf_cnt:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+ if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) {
+ if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) {
+ do_dump |= LPFC_BSG_DMP_MBX_WR_BUF;
+ printk(KERN_ERR "\nWrite mbox buffer (x%x), "
+ "nemb:0x%x, extbuf_seq:%d:\n",
+ sta_tp, nemb_tp, ext_buf);
+ }
+ }
+
+ /* dump buffer content */
+ if (do_dump) {
+ pword = (uint32_t *)dmabuf->virt;
+ for (i = 0; i < *mbx_word_cnt; i++) {
+ if (!(i % 8)) {
+ if (i != 0)
+ printk(KERN_ERR "%s\n", line_buf);
+ len = 0;
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%03d: ", i);
+ }
+ len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+ "%08x ", (uint32_t)*pword);
+ pword++;
+ }
+ if ((i - 1) % 8)
+ printk(KERN_ERR "%s\n", line_buf);
+ (*mbx_dump_cnt)--;
+ }
+
+ /* Clean out command structure on reaching dump count */
+ if (*mbx_dump_cnt == 0)
+ memset(&idiag, 0, sizeof(idiag));
+ return;
+#endif
+}
+
+/* lpfc_idiag_mbxacc_dump_issue_mbox - idiag debugfs dump issue mailbox command
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: Pointer to a DMA buffer descriptor.
+ *
+ * Description:
+ * This routine dump a pass-through non-embedded mailbox command from issue
+ * mailbox command.
+ **/
+void
+lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t *mbx_dump_map, *mbx_dump_cnt, *mbx_word_cnt, *mbx_mbox_cmd;
+ char line_buf[LPFC_MBX_ACC_LBUF_SZ];
+ int len = 0;
+ uint32_t *pword;
+ uint8_t *pbyte;
+ uint32_t i, j;
+
+ if (idiag.cmd.opcode != LPFC_IDIAG_CMD_MBXACC_DP)
+ return;
+
+ mbx_mbox_cmd = &idiag.cmd.data[IDIAG_MBXACC_MBCMD_INDX];
+ mbx_dump_map = &idiag.cmd.data[IDIAG_MBXACC_DPMAP_INDX];
+ mbx_dump_cnt = &idiag.cmd.data[IDIAG_MBXACC_DPCNT_INDX];
+ mbx_word_cnt = &idiag.cmd.data[IDIAG_MBXACC_WDCNT_INDX];
+
+ if (!(*mbx_dump_map & LPFC_MBX_DMP_MBX_ALL) ||
+ (*mbx_dump_cnt == 0) ||
+ (*mbx_word_cnt == 0))
+ return;
+
+ if ((*mbx_mbox_cmd != LPFC_MBX_ALL_CMD) &&
+ (*mbx_mbox_cmd != pmbox->mbxCommand))
+ return;
+
+ /* dump buffer content */
+ if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) {
+ printk(KERN_ERR "Mailbox command:0x%x dump by word:\n",
+ pmbox->mbxCommand);
+ pword = (uint32_t *)pmbox;
+ for (i = 0; i < *mbx_word_cnt; i++) {
+ if (!(i % 8)) {
+ if (i != 0)
+ printk(KERN_ERR "%s\n", line_buf);
+ len = 0;
+ memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%03d: ", i);
+ }
+ len += snprintf(line_buf+len, LPFC_MBX_ACC_LBUF_SZ-len,
+ "%08x ",
+ ((uint32_t)*pword) & 0xffffffff);
+ pword++;
+ }
+ if ((i - 1) % 8)
+ printk(KERN_ERR "%s\n", line_buf);
+ printk(KERN_ERR "\n");
+ }
+ if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) {
+ printk(KERN_ERR "Mailbox command:0x%x dump by byte:\n",
+ pmbox->mbxCommand);
+ pbyte = (uint8_t *)pmbox;
+ for (i = 0; i < *mbx_word_cnt; i++) {
+ if (!(i % 8)) {
+ if (i != 0)
+ printk(KERN_ERR "%s\n", line_buf);
+ len = 0;
+ memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%03d: ", i);
+ }
+ for (j = 0; j < 4; j++) {
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len,
+ "%02x",
+ ((uint8_t)*pbyte) & 0xff);
+ pbyte++;
+ }
+ len += snprintf(line_buf+len,
+ LPFC_MBX_ACC_LBUF_SZ-len, " ");
+ }
+ if ((i - 1) % 8)
+ printk(KERN_ERR "%s\n", line_buf);
+ printk(KERN_ERR "\n");
+ }
+ (*mbx_dump_cnt)--;
+
+ /* Clean out command structure on reaching dump count */
+ if (*mbx_dump_cnt == 0)
+ memset(&idiag, 0, sizeof(idiag));
+ return;
+#endif
+}
+
/**
* lpfc_debugfs_initialize - Initialize debugfs for a vport
* @vport: The vport pointer to initialize.
@@ -2672,7 +3894,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
vport, &lpfc_debugfs_op_nodelist);
if (!vport->debug_nodelist) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0409 Can't create debugfs nodelist\n");
+ "2985 Can't create debugfs nodelist\n");
goto debug_failed;
}
@@ -2709,6 +3931,20 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
idiag.offset.last_rd = 0;
}
+ /* iDiag PCI BAR access */
+ snprintf(name, sizeof(name), "barAcc");
+ if (!phba->idiag_bar_acc) {
+ phba->idiag_bar_acc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
+ if (!phba->idiag_bar_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3056 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ idiag.offset.last_rd = 0;
+ }
+
/* iDiag get PCI function queue information */
snprintf(name, sizeof(name), "queInfo");
if (!phba->idiag_que_info) {
@@ -2748,6 +3984,50 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
}
+ /* iDiag access PCI function control registers */
+ snprintf(name, sizeof(name), "ctlAcc");
+ if (!phba->idiag_ctl_acc) {
+ phba->idiag_ctl_acc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
+ if (!phba->idiag_ctl_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2981 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
+ /* iDiag access mbox commands */
+ snprintf(name, sizeof(name), "mbxAcc");
+ if (!phba->idiag_mbx_acc) {
+ phba->idiag_mbx_acc =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
+ if (!phba->idiag_mbx_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2980 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
+ /* iDiag extents access commands */
+ if (phba->sli4_hba.extents_in_use) {
+ snprintf(name, sizeof(name), "extAcc");
+ if (!phba->idiag_ext_acc) {
+ phba->idiag_ext_acc =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba,
+ &lpfc_idiag_op_extAcc);
+ if (!phba->idiag_ext_acc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2986 Cant create "
+ "idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+ }
+
debug_failed:
return;
#endif
@@ -2782,7 +4062,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(vport->debug_nodelist); /* nodelist */
vport->debug_nodelist = NULL;
}
-
if (vport->vport_debugfs_root) {
debugfs_remove(vport->vport_debugfs_root); /* vportX */
vport->vport_debugfs_root = NULL;
@@ -2826,6 +4105,21 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
* iDiag release
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (phba->idiag_ext_acc) {
+ /* iDiag extAcc */
+ debugfs_remove(phba->idiag_ext_acc);
+ phba->idiag_ext_acc = NULL;
+ }
+ if (phba->idiag_mbx_acc) {
+ /* iDiag mbxAcc */
+ debugfs_remove(phba->idiag_mbx_acc);
+ phba->idiag_mbx_acc = NULL;
+ }
+ if (phba->idiag_ctl_acc) {
+ /* iDiag ctlAcc */
+ debugfs_remove(phba->idiag_ctl_acc);
+ phba->idiag_ctl_acc = NULL;
+ }
if (phba->idiag_drb_acc) {
/* iDiag drbAcc */
debugfs_remove(phba->idiag_drb_acc);
@@ -2841,6 +4135,11 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(phba->idiag_que_info);
phba->idiag_que_info = NULL;
}
+ if (phba->idiag_bar_acc) {
+ /* iDiag barAcc */
+ debugfs_remove(phba->idiag_bar_acc);
+ phba->idiag_bar_acc = NULL;
+ }
if (phba->idiag_pci_cfg) {
/* iDiag pciCfg */
debugfs_remove(phba->idiag_pci_cfg);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 6525a5e62d2..f83bd944edd 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -39,14 +39,51 @@
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
+/*
+ * For SLI4 iDiag debugfs diagnostics tool
+ */
+
/* pciConf */
#define LPFC_PCI_CFG_BROWSE 0xffff
#define LPFC_PCI_CFG_RD_CMD_ARG 2
#define LPFC_PCI_CFG_WR_CMD_ARG 3
#define LPFC_PCI_CFG_SIZE 4096
-#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
+#define IDIAG_PCICFG_WHERE_INDX 0
+#define IDIAG_PCICFG_COUNT_INDX 1
+#define IDIAG_PCICFG_VALUE_INDX 2
+
+/* barAcc */
+#define LPFC_PCI_BAR_BROWSE 0xffff
+#define LPFC_PCI_BAR_RD_CMD_ARG 3
+#define LPFC_PCI_BAR_WR_CMD_ARG 3
+
+#define LPFC_PCI_IF0_BAR0_SIZE (1024 * 16)
+#define LPFC_PCI_IF0_BAR1_SIZE (1024 * 128)
+#define LPFC_PCI_IF0_BAR2_SIZE (1024 * 128)
+#define LPFC_PCI_IF2_BAR0_SIZE (1024 * 32)
+
+#define LPFC_PCI_BAR_RD_BUF_SIZE 4096
+#define LPFC_PCI_BAR_RD_SIZE (LPFC_PCI_BAR_RD_BUF_SIZE/4)
+
+#define LPFC_PCI_IF0_BAR0_RD_SIZE (LPFC_PCI_IF0_BAR0_SIZE/4)
+#define LPFC_PCI_IF0_BAR1_RD_SIZE (LPFC_PCI_IF0_BAR1_SIZE/4)
+#define LPFC_PCI_IF0_BAR2_RD_SIZE (LPFC_PCI_IF0_BAR2_SIZE/4)
+#define LPFC_PCI_IF2_BAR0_RD_SIZE (LPFC_PCI_IF2_BAR0_SIZE/4)
+
+#define IDIAG_BARACC_BAR_NUM_INDX 0
+#define IDIAG_BARACC_OFF_SET_INDX 1
+#define IDIAG_BARACC_ACC_MOD_INDX 2
+#define IDIAG_BARACC_REG_VAL_INDX 2
+#define IDIAG_BARACC_BAR_SZE_INDX 3
+
+#define IDIAG_BARACC_BAR_0 0
+#define IDIAG_BARACC_BAR_1 1
+#define IDIAG_BARACC_BAR_2 2
+
+#define SINGLE_WORD 1
+
/* queue info */
#define LPFC_QUE_INFO_GET_BUF_SIZE 4096
@@ -63,7 +100,14 @@
#define LPFC_IDIAG_WQ 4
#define LPFC_IDIAG_RQ 5
-/* doorbell acc */
+#define IDIAG_QUEACC_QUETP_INDX 0
+#define IDIAG_QUEACC_QUEID_INDX 1
+#define IDIAG_QUEACC_INDEX_INDX 2
+#define IDIAG_QUEACC_COUNT_INDX 3
+#define IDIAG_QUEACC_OFFST_INDX 4
+#define IDIAG_QUEACC_VALUE_INDX 5
+
+/* doorbell register acc */
#define LPFC_DRB_ACC_ALL 0xffff
#define LPFC_DRB_ACC_RD_CMD_ARG 1
#define LPFC_DRB_ACC_WR_CMD_ARG 2
@@ -76,6 +120,67 @@
#define LPFC_DRB_MAX 4
+#define IDIAG_DRBACC_REGID_INDX 0
+#define IDIAG_DRBACC_VALUE_INDX 1
+
+/* control register acc */
+#define LPFC_CTL_ACC_ALL 0xffff
+#define LPFC_CTL_ACC_RD_CMD_ARG 1
+#define LPFC_CTL_ACC_WR_CMD_ARG 2
+#define LPFC_CTL_ACC_BUF_SIZE 256
+
+#define LPFC_CTL_PORT_SEM 1
+#define LPFC_CTL_PORT_STA 2
+#define LPFC_CTL_PORT_CTL 3
+#define LPFC_CTL_PORT_ER1 4
+#define LPFC_CTL_PORT_ER2 5
+#define LPFC_CTL_PDEV_CTL 6
+
+#define LPFC_CTL_MAX 6
+
+#define IDIAG_CTLACC_REGID_INDX 0
+#define IDIAG_CTLACC_VALUE_INDX 1
+
+/* mailbox access */
+#define LPFC_MBX_DMP_ARG 4
+
+#define LPFC_MBX_ACC_BUF_SIZE 512
+#define LPFC_MBX_ACC_LBUF_SZ 128
+
+#define LPFC_MBX_DMP_MBX_WORD 0x00000001
+#define LPFC_MBX_DMP_MBX_BYTE 0x00000002
+#define LPFC_MBX_DMP_MBX_ALL (LPFC_MBX_DMP_MBX_WORD | LPFC_MBX_DMP_MBX_BYTE)
+
+#define LPFC_BSG_DMP_MBX_RD_MBX 0x00000001
+#define LPFC_BSG_DMP_MBX_RD_BUF 0x00000002
+#define LPFC_BSG_DMP_MBX_WR_MBX 0x00000004
+#define LPFC_BSG_DMP_MBX_WR_BUF 0x00000008
+#define LPFC_BSG_DMP_MBX_ALL (LPFC_BSG_DMP_MBX_RD_MBX | \
+ LPFC_BSG_DMP_MBX_RD_BUF | \
+ LPFC_BSG_DMP_MBX_WR_MBX | \
+ LPFC_BSG_DMP_MBX_WR_BUF)
+
+#define LPFC_MBX_DMP_ALL 0xffff
+#define LPFC_MBX_ALL_CMD 0xff
+
+#define IDIAG_MBXACC_MBCMD_INDX 0
+#define IDIAG_MBXACC_DPMAP_INDX 1
+#define IDIAG_MBXACC_DPCNT_INDX 2
+#define IDIAG_MBXACC_WDCNT_INDX 3
+
+/* extents access */
+#define LPFC_EXT_ACC_CMD_ARG 1
+#define LPFC_EXT_ACC_BUF_SIZE 4096
+
+#define LPFC_EXT_ACC_AVAIL 0x1
+#define LPFC_EXT_ACC_ALLOC 0x2
+#define LPFC_EXT_ACC_DRIVR 0x4
+#define LPFC_EXT_ACC_ALL (LPFC_EXT_ACC_DRIVR | \
+ LPFC_EXT_ACC_AVAIL | \
+ LPFC_EXT_ACC_ALLOC)
+
+#define IDIAG_EXTACC_EXMAP_INDX 0
+
#define SIZE_U8 sizeof(uint8_t)
#define SIZE_U16 sizeof(uint16_t)
#define SIZE_U32 sizeof(uint32_t)
@@ -110,6 +215,11 @@ struct lpfc_idiag_cmd {
#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
+#define LPFC_IDIAG_CMD_BARACC_RD 0x00000008
+#define LPFC_IDIAG_CMD_BARACC_WR 0x00000009
+#define LPFC_IDIAG_CMD_BARACC_ST 0x0000000a
+#define LPFC_IDIAG_CMD_BARACC_CL 0x0000000b
+
#define LPFC_IDIAG_CMD_QUEACC_RD 0x00000011
#define LPFC_IDIAG_CMD_QUEACC_WR 0x00000012
#define LPFC_IDIAG_CMD_QUEACC_ST 0x00000013
@@ -119,6 +229,17 @@ struct lpfc_idiag_cmd {
#define LPFC_IDIAG_CMD_DRBACC_WR 0x00000022
#define LPFC_IDIAG_CMD_DRBACC_ST 0x00000023
#define LPFC_IDIAG_CMD_DRBACC_CL 0x00000024
+
+#define LPFC_IDIAG_CMD_CTLACC_RD 0x00000031
+#define LPFC_IDIAG_CMD_CTLACC_WR 0x00000032
+#define LPFC_IDIAG_CMD_CTLACC_ST 0x00000033
+#define LPFC_IDIAG_CMD_CTLACC_CL 0x00000034
+
+#define LPFC_IDIAG_CMD_MBXACC_DP 0x00000041
+#define LPFC_IDIAG_BSG_MBXACC_DP 0x00000042
+
+#define LPFC_IDIAG_CMD_EXTACC_RD 0x00000051
+
uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
};
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 32a084534f3..023da0e00d3 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -647,21 +647,15 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
lpfc_cleanup_pending_mbox(vport);
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev == LPFC_SLI_REV4) {
lpfc_sli4_unreg_all_rpis(vport);
-
- if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
- spin_unlock_irq(shost->host_lock);
- }
- /*
- * If VPI is unreged, driver need to do INIT_VPI
- * before re-registering
- */
- if (phba->sli_rev == LPFC_SLI_REV4) {
- spin_lock_irq(shost->host_lock);
+ /*
+ * If VPI is unreged, driver need to do INIT_VPI
+ * before re-registering
+ */
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
}
@@ -880,6 +874,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
phba->fcf.current_rec.fcf_indx,
irsp->ulpStatus, irsp->un.ulpWord[4],
irsp->ulpTimeout);
+ lpfc_sli4_set_fcf_flogi_fail(phba,
+ phba->fcf.current_rec.fcf_indx);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
if (rc)
@@ -1096,11 +1092,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Set the fcfi to the fcfi we registered with */
elsiocb->iocb.ulpContext = phba->fcf.fcfi;
}
- } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
- sp->cmn.request_multiple_Nport = 1;
- /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
- icmd->ulpCt_h = 1;
- icmd->ulpCt_l = 0;
+ } else {
+ if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+ sp->cmn.request_multiple_Nport = 1;
+ /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
+ icmd->ulpCt_h = 1;
+ icmd->ulpCt_l = 0;
+ } else
+ sp->cmn.request_multiple_Nport = 0;
}
if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
@@ -3656,7 +3655,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
}
icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t);
@@ -3673,7 +3673,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
return 1;
icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
if (mbox)
@@ -3695,7 +3696,8 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
return 1;
icmd = &elsiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
@@ -3781,7 +3783,8 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
icmd = &elsiocb->iocb;
oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
@@ -3853,7 +3856,8 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
icmd = &elsiocb->iocb;
oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
/* Xmit ADISC ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
@@ -3931,7 +3935,9 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
icmd = &elsiocb->iocb;
oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
/* Xmit PRLI ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0131 Xmit PRLI ACC response tag x%x xri x%x, "
@@ -4035,7 +4041,9 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
icmd = &elsiocb->iocb;
oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
+
/* Xmit RNID ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0132 Xmit RNID ACC response tag x%x xri x%x\n",
@@ -4163,7 +4171,9 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
if (!elsiocb)
return 1;
- elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */
+ elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
+ elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
+
/* Xmit ECHO ACC response tag <ulpIoTag> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2876 Xmit ECHO ACC response tag x%x xri x%x\n",
@@ -5054,13 +5064,15 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
- uint16_t xri;
+ uint16_t oxid;
+ uint16_t rxid;
uint32_t cmdsize;
mb = &pmb->u.mb;
ndlp = (struct lpfc_nodelist *) pmb->context2;
- xri = (uint16_t) ((unsigned long)(pmb->context1));
+ rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
+ oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
pmb->context1 = NULL;
pmb->context2 = NULL;
@@ -5082,7 +5094,8 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
icmd = &elsiocb->iocb;
- icmd->ulpContext = xri;
+ icmd->ulpContext = rxid;
+ icmd->unsli3.rcvsli3.ox_id = oxid;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -5137,13 +5150,16 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint8_t *pcmd;
struct lpfc_iocbq *elsiocb;
struct lpfc_nodelist *ndlp;
- uint16_t xri, status;
+ uint16_t status;
+ uint16_t oxid;
+ uint16_t rxid;
uint32_t cmdsize;
mb = &pmb->u.mb;
ndlp = (struct lpfc_nodelist *) pmb->context2;
- xri = (uint16_t) ((unsigned long)(pmb->context1));
+ rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
+ oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
pmb->context1 = NULL;
pmb->context2 = NULL;
@@ -5165,7 +5181,8 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
icmd = &elsiocb->iocb;
- icmd->ulpContext = xri;
+ icmd->ulpContext = rxid;
+ icmd->unsli3.rcvsli3.ox_id = oxid;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -5238,8 +5255,9 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
- mbox->context1 =
- (void *)((unsigned long) cmdiocb->iocb.ulpContext);
+ mbox->context1 = (void *)((unsigned long)
+ ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
+ cmdiocb->iocb.ulpContext)); /* rx_id */
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
@@ -5314,7 +5332,8 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
pcmd += sizeof(uint32_t); /* Skip past command */
/* use the command's xri in the response */
- elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
+ elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
+ elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
rtv_rsp = (struct RTV_RSP *)pcmd;
@@ -5399,8 +5418,9 @@ lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
if (mbox) {
lpfc_read_lnk_stat(phba, mbox);
- mbox->context1 =
- (void *)((unsigned long) cmdiocb->iocb.ulpContext);
+ mbox->context1 = (void *)((unsigned long)
+ ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
+ cmdiocb->iocb.ulpContext)); /* rx_id */
mbox->context2 = lpfc_nlp_get(ndlp);
mbox->vport = vport;
mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
@@ -5554,7 +5574,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
icmd = &elsiocb->iocb;
oldcmd = &oldiocb->iocb;
- icmd->ulpContext = oldcmd->ulpContext; /* Xri */
+ icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
+ icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
@@ -6586,7 +6607,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
{
struct lpfc_vport *vport;
unsigned long flags;
- int i;
+ int i = 0;
/* The physical ports are always vpi 0 - translate is unnecessary. */
if (vpi > 0) {
@@ -6609,7 +6630,7 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
spin_lock_irqsave(&phba->hbalock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
- if (vport->vpi == vpi) {
+ if (vport->vpi == i) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return vport;
}
@@ -7787,6 +7808,7 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ uint16_t lxri = 0;
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
unsigned long iflag = 0;
@@ -7815,7 +7837,12 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
}
}
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
- sglq_entry = __lpfc_get_active_sglq(phba, xri);
+ lxri = lpfc_sli4_xri_inrange(phba, xri);
+ if (lxri == NO_XRI) {
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+ }
+ sglq_entry = __lpfc_get_active_sglq(phba, lxri);
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 18d0dbfda2b..0b47adf9fee 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1109,6 +1109,28 @@ out:
return;
}
+/**
+ * lpfc_sli4_clear_fcf_rr_bmask
+ * @phba pointer to the struct lpfc_hba for this port.
+ * This fucnction resets the round robin bit mask and clears the
+ * fcf priority list. The list deletions are done while holding the
+ * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
+ * from the lpfc_fcf_pri record.
+ **/
+void
+lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
+{
+ struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *next_fcf_pri;
+ memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+ &phba->fcf.fcf_pri_list, list) {
+ list_del_init(&fcf_pri->list);
+ fcf_pri->fcf_rec.flag = 0;
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
static void
lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
@@ -1130,7 +1152,8 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
spin_unlock_irq(&phba->hbalock);
/* If there is a pending FCoE event, restart FCF table scan. */
- if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+ if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
+ lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
goto fail_out;
/* Mark successful completion of FCF table scan */
@@ -1250,6 +1273,30 @@ lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
}
/**
+ * lpfc_update_fcf_record - Update driver fcf record
+ * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: Index for the lpfc_fcf_record.
+ * @new_fcf_record: pointer to hba fcf record.
+ *
+ * This routine updates the driver FCF priority record from the new HBA FCF
+ * record. This routine is called with the host lock held.
+ **/
+static void
+__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
+ struct fcf_record *new_fcf_record
+ )
+{
+ struct lpfc_fcf_pri *fcf_pri;
+
+ fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ fcf_pri->fcf_rec.fcf_index = fcf_index;
+ /* FCF record priority */
+ fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+
+}
+
+/**
* lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
* @fcf: pointer to driver fcf record.
* @new_fcf_record: pointer to fcf record.
@@ -1332,6 +1379,9 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
fcf_rec->addr_mode = addr_mode;
fcf_rec->vlan_id = vlan_id;
fcf_rec->flag |= (flag | RECORD_VALID);
+ __lpfc_update_fcf_record_pri(phba,
+ bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
+ new_fcf_record);
}
/**
@@ -1834,6 +1884,8 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
return false;
if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
return false;
+ if (fcf_rec->priority != new_fcf_record->fip_priority)
+ return false;
return true;
}
@@ -1897,6 +1949,152 @@ stop_flogi_current_fcf:
}
/**
+ * lpfc_sli4_fcf_pri_list_del
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to delete
+ * This routine checks the on list flag of the fcf_index to be deleted.
+ * If it is one the list then it is removed from the list, and the flag
+ * is cleared. This routine grab the hbalock before removing the fcf
+ * record from the list.
+ **/
+static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
+ uint16_t fcf_index)
+{
+ struct lpfc_fcf_pri *new_fcf_pri;
+
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3058 deleting idx x%x pri x%x flg x%x\n",
+ fcf_index, new_fcf_pri->fcf_rec.priority,
+ new_fcf_pri->fcf_rec.flag);
+ spin_lock_irq(&phba->hbalock);
+ if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
+ if (phba->fcf.current_rec.priority ==
+ new_fcf_pri->fcf_rec.priority)
+ phba->fcf.eligible_fcf_cnt--;
+ list_del_init(&new_fcf_pri->list);
+ new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_set_fcf_flogi_fail
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to update
+ * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
+ * flag so the the round robin slection for the particular priority level
+ * will try a different fcf record that does not have this bit set.
+ * If the fcf record is re-read for any reason this flag is cleared brfore
+ * adding it to the priority list.
+ **/
+void
+lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+ struct lpfc_fcf_pri *new_fcf_pri;
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ spin_lock_irq(&phba->hbalock);
+ new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_fcf_pri_list_add
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index the index of the fcf record to add
+ * This routine checks the priority of the fcf_index to be added.
+ * If it is a lower priority than the current head of the fcf_pri list
+ * then it is added to the list in the right order.
+ * If it is the same priority as the current head of the list then it
+ * is added to the head of the list and its bit in the rr_bmask is set.
+ * If the fcf_index to be added is of a higher priority than the current
+ * head of the list then the rr_bmask is cleared, its bit is set in the
+ * rr_bmask and it is added to the head of the list.
+ * returns:
+ * 0=success 1=failure
+ **/
+int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
+ struct fcf_record *new_fcf_record)
+{
+ uint16_t current_fcf_pri;
+ uint16_t last_index;
+ struct lpfc_fcf_pri *fcf_pri;
+ struct lpfc_fcf_pri *next_fcf_pri;
+ struct lpfc_fcf_pri *new_fcf_pri;
+ int ret;
+
+ new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3059 adding idx x%x pri x%x flg x%x\n",
+ fcf_index, new_fcf_record->fip_priority,
+ new_fcf_pri->fcf_rec.flag);
+ spin_lock_irq(&phba->hbalock);
+ if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
+ list_del_init(&new_fcf_pri->list);
+ new_fcf_pri->fcf_rec.fcf_index = fcf_index;
+ new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
+ if (list_empty(&phba->fcf.fcf_pri_list)) {
+ list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+ ret = lpfc_sli4_fcf_rr_index_set(phba,
+ new_fcf_pri->fcf_rec.fcf_index);
+ goto out;
+ }
+
+ last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
+ if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ ret = 0; /* Empty rr list */
+ goto out;
+ }
+ current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
+ if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
+ list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
+ if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
+ /* fcfs_at_this_priority_level = 1; */
+ phba->fcf.eligible_fcf_cnt = 1;
+ } else
+ /* fcfs_at_this_priority_level++; */
+ phba->fcf.eligible_fcf_cnt++;
+ ret = lpfc_sli4_fcf_rr_index_set(phba,
+ new_fcf_pri->fcf_rec.fcf_index);
+ goto out;
+ }
+
+ list_for_each_entry_safe(fcf_pri, next_fcf_pri,
+ &phba->fcf.fcf_pri_list, list) {
+ if (new_fcf_pri->fcf_rec.priority <=
+ fcf_pri->fcf_rec.priority) {
+ if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
+ list_add(&new_fcf_pri->list,
+ &phba->fcf.fcf_pri_list);
+ else
+ list_add(&new_fcf_pri->list,
+ &((struct lpfc_fcf_pri *)
+ fcf_pri->list.prev)->list);
+ ret = 0;
+ goto out;
+ } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
+ || new_fcf_pri->fcf_rec.priority <
+ next_fcf_pri->fcf_rec.priority) {
+ list_add(&new_fcf_pri->list, &fcf_pri->list);
+ ret = 0;
+ goto out;
+ }
+ if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
+ continue;
+
+ }
+ ret = 1;
+out:
+ /* we use = instead of |= to clear the FLOGI_FAILED flag. */
+ new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
+ spin_unlock_irq(&phba->hbalock);
+ return ret;
+}
+
+/**
* lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to mailbox object.
@@ -1958,6 +2156,9 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* record for roundrobin FCF failover.
*/
if (!rc) {
+ lpfc_sli4_fcf_pri_list_del(phba,
+ bf_get(lpfc_fcf_record_fcf_index,
+ new_fcf_record));
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2781 FCF (x%x) failed connection "
"list check: (x%x/x%x)\n",
@@ -2005,7 +2206,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
goto read_next_fcf;
} else {
fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
- rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
+ rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
+ new_fcf_record);
if (rc)
goto read_next_fcf;
}
@@ -2018,7 +2220,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
*/
spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_IN_USE) {
- if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
+ if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+ lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
new_fcf_record, vlan_id)) {
if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
phba->fcf.current_rec.fcf_indx) {
@@ -2232,7 +2435,8 @@ read_next_fcf:
(phba->fcf.fcf_flag & FCF_REDISC_PEND))
return;
- if (phba->fcf.fcf_flag & FCF_IN_USE) {
+ if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
+ phba->fcf.fcf_flag & FCF_IN_USE) {
/*
* In case the current in-use FCF record no
* longer existed during FCF discovery that
@@ -2247,7 +2451,6 @@ read_next_fcf:
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag |= FCF_REDISC_FOV;
spin_unlock_irq(&phba->hbalock);
- lpfc_sli4_mbox_cmd_free(phba, mboxq);
lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
return;
@@ -2424,7 +2627,8 @@ lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
/* Update the eligible FCF record index bmask */
fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
- rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
+
+ rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
out:
lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -2645,6 +2849,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
vport->vpi_state |= LPFC_VPI_REGISTERED;
vport->fc_flag |= FC_VFI_REGISTERED;
vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+ vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
@@ -2893,8 +3098,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
goto out;
}
/* Reset FCF roundrobin bmask for new discovery */
- memset(phba->fcf.fcf_rr_bmask, 0,
- sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
}
return;
@@ -5592,7 +5796,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
/* Reset FCF roundrobin bmask for new discovery */
- memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 9059524cf22..046edc4ab35 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2955,18 +2955,18 @@ typedef struct _SLI2_RDSC {
typedef struct _PCB {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t type:8;
-#define TYPE_NATIVE_SLI2 0x01;
+#define TYPE_NATIVE_SLI2 0x01
uint32_t feature:8;
-#define FEATURE_INITIAL_SLI2 0x01;
+#define FEATURE_INITIAL_SLI2 0x01
uint32_t rsvd:12;
uint32_t maxRing:4;
#else /* __LITTLE_ENDIAN_BITFIELD */
uint32_t maxRing:4;
uint32_t rsvd:12;
uint32_t feature:8;
-#define FEATURE_INITIAL_SLI2 0x01;
+#define FEATURE_INITIAL_SLI2 0x01
uint32_t type:8;
-#define TYPE_NATIVE_SLI2 0x01;
+#define TYPE_NATIVE_SLI2 0x01
#endif
uint32_t mailBoxSize;
@@ -3470,11 +3470,16 @@ typedef struct {
or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
struct rcv_sli3 {
- uint32_t word8Rsvd;
#ifdef __BIG_ENDIAN_BITFIELD
+ uint16_t ox_id;
+ uint16_t seq_cnt;
+
uint16_t vpi;
uint16_t word9Rsvd;
#else /* __LITTLE_ENDIAN */
+ uint16_t seq_cnt;
+ uint16_t ox_id;
+
uint16_t word9Rsvd;
uint16_t vpi;
#endif
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 11e26a26b5d..7f8003b5181 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -170,15 +170,8 @@ struct lpfc_sli_intf {
#define LPFC_PCI_FUNC3 3
#define LPFC_PCI_FUNC4 4
-/* SLI4 interface type-2 control register offsets */
-#define LPFC_CTL_PORT_SEM_OFFSET 0x400
-#define LPFC_CTL_PORT_STA_OFFSET 0x404
-#define LPFC_CTL_PORT_CTL_OFFSET 0x408
-#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
-#define LPFC_CTL_PORT_ER2_OFFSET 0x410
+/* SLI4 interface type-2 PDEV_CTL register */
#define LPFC_CTL_PDEV_CTL_OFFSET 0x414
-
-/* Some SLI4 interface type-2 PDEV_CTL register bits */
#define LPFC_CTL_PDEV_CTL_DRST 0x00000001
#define LPFC_CTL_PDEV_CTL_FRST 0x00000002
#define LPFC_CTL_PDEV_CTL_DD 0x00000004
@@ -337,6 +330,7 @@ struct lpfc_cqe {
#define CQE_CODE_RELEASE_WQE 0x2
#define CQE_CODE_RECEIVE 0x4
#define CQE_CODE_XRI_ABORTED 0x5
+#define CQE_CODE_RECEIVE_V1 0x9
/* completion queue entry for wqe completions */
struct lpfc_wcqe_complete {
@@ -440,7 +434,10 @@ struct lpfc_rcqe {
#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
- uint32_t reserved1;
+ uint32_t word1;
+#define lpfc_rcqe_fcf_id_v1_SHIFT 0
+#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
+#define lpfc_rcqe_fcf_id_v1_WORD word1
uint32_t word2;
#define lpfc_rcqe_length_SHIFT 16
#define lpfc_rcqe_length_MASK 0x0000FFFF
@@ -451,6 +448,9 @@ struct lpfc_rcqe {
#define lpfc_rcqe_fcf_id_SHIFT 0
#define lpfc_rcqe_fcf_id_MASK 0x0000003F
#define lpfc_rcqe_fcf_id_WORD word2
+#define lpfc_rcqe_rq_id_v1_SHIFT 0
+#define lpfc_rcqe_rq_id_v1_MASK 0x0000FFFF
+#define lpfc_rcqe_rq_id_v1_WORD word2
uint32_t word3;
#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT
#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK
@@ -515,7 +515,7 @@ struct lpfc_register {
/* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */
#define LPFC_SLI_INTF 0x0058
-#define LPFC_SLIPORT_IF2_SMPHR 0x0400
+#define LPFC_CTL_PORT_SEM_OFFSET 0x400
#define lpfc_port_smphr_perr_SHIFT 31
#define lpfc_port_smphr_perr_MASK 0x1
#define lpfc_port_smphr_perr_WORD word0
@@ -575,7 +575,7 @@ struct lpfc_register {
#define LPFC_POST_STAGE_PORT_READY 0xC000
#define LPFC_POST_STAGE_PORT_UE 0xF000
-#define LPFC_SLIPORT_STATUS 0x0404
+#define LPFC_CTL_PORT_STA_OFFSET 0x404
#define lpfc_sliport_status_err_SHIFT 31
#define lpfc_sliport_status_err_MASK 0x1
#define lpfc_sliport_status_err_WORD word0
@@ -593,7 +593,7 @@ struct lpfc_register {
#define lpfc_sliport_status_rdy_WORD word0
#define MAX_IF_TYPE_2_RESETS 1000
-#define LPFC_SLIPORT_CNTRL 0x0408
+#define LPFC_CTL_PORT_CTL_OFFSET 0x408
#define lpfc_sliport_ctrl_end_SHIFT 30
#define lpfc_sliport_ctrl_end_MASK 0x1
#define lpfc_sliport_ctrl_end_WORD word0
@@ -604,8 +604,8 @@ struct lpfc_register {
#define lpfc_sliport_ctrl_ip_WORD word0
#define LPFC_SLIPORT_INIT_PORT 1
-#define LPFC_SLIPORT_ERR_1 0x040C
-#define LPFC_SLIPORT_ERR_2 0x0410
+#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
+#define LPFC_CTL_PORT_ER2_OFFSET 0x410
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
* reside in BAR 2.
@@ -3198,6 +3198,8 @@ struct lpfc_grp_hdr {
#define lpfc_grp_hdr_id_MASK 0x000000FF
#define lpfc_grp_hdr_id_WORD word2
uint8_t rev_name[128];
+ uint8_t date[12];
+ uint8_t revision[32];
};
#define FCP_COMMAND 0x0
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 148b98ddbb1..a3c820083c3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2927,6 +2927,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
sizeof fc_host_symbolic_name(shost));
fc_host_supported_speeds(shost) = 0;
+ if (phba->lmt & LMT_16Gb)
+ fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
if (phba->lmt & LMT_10Gb)
fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
if (phba->lmt & LMT_8Gb)
@@ -3632,8 +3634,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
lpfc_sli4_fcf_dead_failthrough(phba);
} else {
/* Reset FCF roundrobin bmask for new discovery */
- memset(phba->fcf.fcf_rr_bmask, 0,
- sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
/*
* Handling fast FCF failover to a DEAD FCF event is
* considered equalivant to receiving CVL to all vports.
@@ -3647,7 +3648,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
" tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
vport = lpfc_find_vport_by_vpid(phba,
- acqe_fip->index - phba->vpi_base);
+ acqe_fip->index);
ndlp = lpfc_sli4_perform_vport_cvl(vport);
if (!ndlp)
break;
@@ -3719,8 +3720,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* Reset FCF roundrobin bmask for new
* discovery.
*/
- memset(phba->fcf.fcf_rr_bmask, 0,
- sizeof(*phba->fcf.fcf_rr_bmask));
+ lpfc_sli4_clear_fcf_rr_bmask(phba);
}
break;
default:
@@ -4035,6 +4035,34 @@ lpfc_reset_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function enables the PCI SR-IOV virtual functions to a physical
+ * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
+ * enable the number of virtual functions to the physical function. As
+ * not all devices support SR-IOV, the return code from the pci_enable_sriov()
+ * API call does not considered as an error condition for most of the device.
+ **/
+uint16_t
+lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
+{
+ struct pci_dev *pdev = phba->pcidev;
+ uint16_t nr_virtfn;
+ int pos;
+
+ if (!pdev->is_physfn)
+ return 0;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos == 0)
+ return 0;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
+ return nr_virtfn;
+}
+
+/**
* lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
* @phba: pointer to lpfc hba data structure.
* @nr_vfn: number of virtual functions to be enabled.
@@ -4049,8 +4077,17 @@ int
lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
{
struct pci_dev *pdev = phba->pcidev;
+ uint16_t max_nr_vfn;
int rc;
+ max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
+ if (nr_vfn > max_nr_vfn) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3057 Requested vfs (%d) greater than "
+ "supported vfs (%d)", nr_vfn, max_nr_vfn);
+ return -EINVAL;
+ }
+
rc = pci_enable_sriov(pdev, nr_vfn);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -4516,7 +4553,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
}
}
- return rc;
+ return 0;
out_free_fcp_eq_hdl:
kfree(phba->sli4_hba.fcp_eq_hdl);
@@ -4966,17 +5003,14 @@ out_free_mem:
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post rpi header templates to the
- * HBA consistent with the SLI-4 interface spec. This routine
+ * port for those SLI4 ports that do not support extents. This routine
* posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers.
- * No locks are held here because this is an initialization routine
- * called only from probe or lpfc_online when interrupts are not
- * enabled and the driver is reinitializing the device.
+ * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
+ * and should be called only when interrupts are disabled.
*
* Return codes
* 0 - successful
- * -ENOMEM - No available memory
- * -EIO - The mailbox failed to complete successfully.
+ * -ERROR - otherwise.
**/
int
lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
@@ -5687,17 +5721,22 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
break;
case LPFC_SLI_INTF_IF_TYPE_2:
phba->sli4_hba.u.if_type2.ERR1regaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER1_OFFSET;
phba->sli4_hba.u.if_type2.ERR2regaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_ER2_OFFSET;
phba->sli4_hba.u.if_type2.CTRLregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_CTL_OFFSET;
phba->sli4_hba.u.if_type2.STATUSregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_STA_OFFSET;
phba->sli4_hba.SLIINTFregaddr =
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
phba->sli4_hba.PSMPHRregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_SEM_OFFSET;
phba->sli4_hba.RQDBregaddr =
phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
phba->sli4_hba.WQDBregaddr =
@@ -8859,11 +8898,11 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
return -EINVAL;
}
lpfc_decode_firmware_rev(phba, fwrev, 1);
- if (strncmp(fwrev, image->rev_name, strnlen(fwrev, 16))) {
+ if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3023 Updating Firmware. Current Version:%s "
"New Version:%s\n",
- fwrev, image->rev_name);
+ fwrev, image->revision);
for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
GFP_KERNEL);
@@ -8892,9 +8931,9 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
fw->size - offset);
break;
}
- temp_offset += SLI4_PAGE_SIZE;
memcpy(dmabuf->virt, fw->data + temp_offset,
SLI4_PAGE_SIZE);
+ temp_offset += SLI4_PAGE_SIZE;
}
rc = lpfc_wr_object(phba, &dma_buffer_list,
(fw->size - offset), &offset);
@@ -9005,6 +9044,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
}
INIT_LIST_HEAD(&phba->active_rrq_list);
+ INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
/* Set up common device driver resources */
error = lpfc_setup_driver_resource_phase2(phba);
@@ -9112,7 +9152,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
-
return 0;
out_disable_intr:
@@ -9483,6 +9522,13 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
}
pci_restore_state(pdev);
+
+ /*
+ * As the new kernel behavior of pci_restore_state() API call clears
+ * device saved_state flag, need to save the restored state again.
+ */
+ pci_save_state(pdev);
+
if (pdev->is_busmaster)
pci_set_master(pdev);
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 55676702835..83450cc5c4d 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2031,7 +2031,7 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
bf_set(lpfc_init_vfi_vp, init_vfi, 1);
bf_set(lpfc_init_vfi_vfi, init_vfi,
vport->phba->sli4_hba.vfi_ids[vport->vfi]);
- bf_set(lpfc_init_vpi_vpi, init_vfi,
+ bf_set(lpfc_init_vfi_vpi, init_vfi,
vport->phba->vpi_ids[vport->vpi]);
bf_set(lpfc_init_vfi_fcfi, init_vfi,
vport->phba->fcf.fcfi);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3ccc97496eb..eadd241eeff 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1302,13 +1302,13 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
case SCSI_PROT_NORMAL:
default:
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
- scsi_get_prot_op(sc), guard_type);
+ "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
+ scsi_get_prot_op(sc));
ret = 1;
break;
}
- } else if (guard_type == SHOST_DIX_GUARD_CRC) {
+ } else {
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_READ_STRIP:
case SCSI_PROT_WRITE_INSERT:
@@ -1324,17 +1324,18 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
+ *txop = BG_OP_IN_CRC_OUT_NODIF;
+ *rxop = BG_OP_IN_NODIF_OUT_CRC;
+ break;
+
case SCSI_PROT_NORMAL:
default:
lpfc_printf_log(phba, KERN_ERR, LOG_BG,
- "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
- scsi_get_prot_op(sc), guard_type);
+ "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
+ scsi_get_prot_op(sc));
ret = 1;
break;
}
- } else {
- /* unsupported format */
- BUG();
}
return ret;
@@ -1352,45 +1353,6 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
return sc->device->sector_size;
}
-/**
- * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
- * @sc: in: SCSI command
- * @apptagmask: out: app tag mask
- * @apptagval: out: app tag value
- * @reftag: out: ref tag (reference tag)
- *
- * Description:
- * Extract DIF parameters from the command if possible. Otherwise,
- * use default parameters.
- *
- **/
-static inline void
-lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
- uint16_t *apptagval, uint32_t *reftag)
-{
- struct scsi_dif_tuple *spt;
- unsigned char op = scsi_get_prot_op(sc);
- unsigned int protcnt = scsi_prot_sg_count(sc);
- static int cnt;
-
- if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
- op == SCSI_PROT_WRITE_PASS)) {
-
- cnt++;
- spt = page_address(sg_page(scsi_prot_sglist(sc))) +
- scsi_prot_sglist(sc)[0].offset;
- *apptagmask = 0;
- *apptagval = 0;
- *reftag = cpu_to_be32(spt->ref_tag);
-
- } else {
- /* SBC defines ref tag to be lower 32bits of LBA */
- *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
- *apptagmask = 0;
- *apptagval = 0;
- }
-}
-
/*
* This function sets up buffer list for protection groups of
* type LPFC_PG_TYPE_NO_DIF
@@ -1427,9 +1389,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
dma_addr_t physaddr;
int i = 0, num_bde = 0, status;
int datadir = sc->sc_data_direction;
- unsigned blksize;
uint32_t reftag;
- uint16_t apptagmask, apptagval;
+ unsigned blksize;
uint8_t txop, rxop;
status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
@@ -1438,17 +1399,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command for pde*/
blksize = lpfc_cmd_blksize(sc);
- lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
+ reftag = scsi_get_lba(sc) & 0xffffffff;
/* setup PDE5 with what we have */
pde5 = (struct lpfc_pde5 *) bpl;
memset(pde5, 0, sizeof(struct lpfc_pde5));
bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
- pde5->reftag = reftag;
/* Endianness conversion if necessary for PDE5 */
pde5->word0 = cpu_to_le32(pde5->word0);
- pde5->reftag = cpu_to_le32(pde5->reftag);
+ pde5->reftag = cpu_to_le32(reftag);
/* advance bpl and increment bde count */
num_bde++;
@@ -1463,10 +1423,10 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
if (datadir == DMA_FROM_DEVICE) {
bf_set(pde6_ce, pde6, 1);
bf_set(pde6_re, pde6, 1);
- bf_set(pde6_ae, pde6, 1);
}
bf_set(pde6_ai, pde6, 1);
- bf_set(pde6_apptagval, pde6, apptagval);
+ bf_set(pde6_ae, pde6, 0);
+ bf_set(pde6_apptagval, pde6, 0);
/* Endianness conversion if necessary for PDE6 */
pde6->word0 = cpu_to_le32(pde6->word0);
@@ -1551,7 +1511,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
unsigned char pgdone = 0, alldone = 0;
unsigned blksize;
uint32_t reftag;
- uint16_t apptagmask, apptagval;
uint8_t txop, rxop;
int num_bde = 0;
@@ -1571,7 +1530,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
/* extract some info from the scsi command */
blksize = lpfc_cmd_blksize(sc);
- lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
+ reftag = scsi_get_lba(sc) & 0xffffffff;
split_offset = 0;
do {
@@ -1579,11 +1538,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
pde5 = (struct lpfc_pde5 *) bpl;
memset(pde5, 0, sizeof(struct lpfc_pde5));
bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
- pde5->reftag = reftag;
/* Endianness conversion if necessary for PDE5 */
pde5->word0 = cpu_to_le32(pde5->word0);
- pde5->reftag = cpu_to_le32(pde5->reftag);
+ pde5->reftag = cpu_to_le32(reftag);
/* advance bpl and increment bde count */
num_bde++;
@@ -1597,9 +1555,9 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bf_set(pde6_oprx, pde6, rxop);
bf_set(pde6_ce, pde6, 1);
bf_set(pde6_re, pde6, 1);
- bf_set(pde6_ae, pde6, 1);
bf_set(pde6_ai, pde6, 1);
- bf_set(pde6_apptagval, pde6, apptagval);
+ bf_set(pde6_ae, pde6, 0);
+ bf_set(pde6_apptagval, pde6, 0);
/* Endianness conversion if necessary for PDE6 */
pde6->word0 = cpu_to_le32(pde6->word0);
@@ -1621,8 +1579,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
memset(pde7, 0, sizeof(struct lpfc_pde7));
bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
- pde7->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
- pde7->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
+ pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
+ pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
protgrp_blks = protgroup_len / 8;
protgrp_bytes = protgrp_blks * blksize;
@@ -1632,7 +1590,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
protgroup_offset += protgroup_remainder;
protgrp_blks = protgroup_remainder / 8;
- protgrp_bytes = protgroup_remainder * blksize;
+ protgrp_bytes = protgrp_blks * blksize;
} else {
protgroup_offset = 0;
curr_prot++;
@@ -2006,16 +1964,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
/*
* setup sense data descriptor 0 per SPC-4 as an information
- * field, and put the failing LBA in it
+ * field, and put the failing LBA in it.
+ * This code assumes there was also a guard/app/ref tag error
+ * indication.
*/
- cmd->sense_buffer[8] = 0; /* Information */
- cmd->sense_buffer[9] = 0xa; /* Add. length */
+ cmd->sense_buffer[7] = 0xc; /* Additional sense length */
+ cmd->sense_buffer[8] = 0; /* Information descriptor type */
+ cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
+ cmd->sense_buffer[10] = 0x80; /* Validity bit */
bghm /= cmd->device->sector_size;
failing_sector = scsi_get_lba(cmd);
failing_sector += bghm;
- put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
+ /* Descriptor Information */
+ put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
}
if (!ret) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 98999bbd8cb..8b799f047a9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -560,7 +560,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
- rrq->xritag = phba->sli4_hba.xri_ids[xritag];
+ rrq->xritag = xritag;
rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
rrq->ndlp = ndlp;
rrq->nlp_DID = ndlp->nlp_DID;
@@ -2452,7 +2452,8 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* search continue save q for same XRI */
list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
- if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
+ if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
+ saveq->iocb.unsli3.rcvsli3.ox_id) {
list_add_tail(&saveq->list, &iocbq->list);
found = 1;
break;
@@ -3355,6 +3356,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
irspiocbq);
break;
case CQE_CODE_RECEIVE:
+ case CQE_CODE_RECEIVE_V1:
dmabuf = container_of(cq_event, struct hbq_dmabuf,
cq_event);
lpfc_sli4_handle_received_buffer(phba, dmabuf);
@@ -4712,10 +4714,15 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
* lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
* @phba: Pointer to HBA context object.
* @type: The resource extent type.
+ * @extnt_count: buffer to hold port available extent count.
+ * @extnt_size: buffer to hold element count per extent.
*
- * This function allocates all SLI4 resource identifiers.
+ * This function calls the port and retrievs the number of available
+ * extents and their size for a particular extent type.
+ *
+ * Returns: 0 if successful. Nonzero otherwise.
**/
-static int
+int
lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
uint16_t *extnt_count, uint16_t *extnt_size)
{
@@ -4892,7 +4899,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
req_len, *emb);
if (alloc_len < req_len) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "9000 Allocated DMA memory size (x%x) is "
+ "2982 Allocated DMA memory size (x%x) is "
"less than the requested DMA memory "
"size (x%x)\n", alloc_len, req_len);
return -ENOMEM;
@@ -5506,6 +5513,154 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
+ * @phba: Pointer to HBA context object.
+ * @type: The resource extent type.
+ * @extnt_count: buffer to hold port extent count response
+ * @extnt_size: buffer to hold port extent size response.
+ *
+ * This function calls the port to read the host allocated extents
+ * for a particular type.
+ **/
+int
+lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
+ uint16_t *extnt_cnt, uint16_t *extnt_size)
+{
+ bool emb;
+ int rc = 0;
+ uint16_t curr_blks = 0;
+ uint32_t req_len, emb_len;
+ uint32_t alloc_len, mbox_tmo;
+ struct list_head *blk_list_head;
+ struct lpfc_rsrc_blks *rsrc_blk;
+ LPFC_MBOXQ_t *mbox;
+ void *virtaddr = NULL;
+ struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
+ struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ switch (type) {
+ case LPFC_RSC_TYPE_FCOE_VPI:
+ blk_list_head = &phba->lpfc_vpi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_XRI:
+ blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_VFI:
+ blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
+ break;
+ case LPFC_RSC_TYPE_FCOE_RPI:
+ blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
+ break;
+ default:
+ return -EIO;
+ }
+
+ /* Count the number of extents currently allocatd for this type. */
+ list_for_each_entry(rsrc_blk, blk_list_head, list) {
+ if (curr_blks == 0) {
+ /*
+ * The GET_ALLOCATED mailbox does not return the size,
+ * just the count. The size should be just the size
+ * stored in the current allocated block and all sizes
+ * for an extent type are the same so set the return
+ * value now.
+ */
+ *extnt_size = rsrc_blk->rsrc_size;
+ }
+ curr_blks++;
+ }
+
+ /* Calculate the total requested length of the dma memory. */
+ req_len = curr_blks * sizeof(uint16_t);
+
+ /*
+ * Calculate the size of an embedded mailbox. The uint32_t
+ * accounts for extents-specific word.
+ */
+ emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
+ sizeof(uint32_t);
+
+ /*
+ * Presume the allocation and response will fit into an embedded
+ * mailbox. If not true, reconfigure to a non-embedded mailbox.
+ */
+ emb = LPFC_SLI4_MBX_EMBED;
+ req_len = emb_len;
+ if (req_len > emb_len) {
+ req_len = curr_blks * sizeof(uint16_t) +
+ sizeof(union lpfc_sli4_cfg_shdr) +
+ sizeof(uint32_t);
+ emb = LPFC_SLI4_MBX_NEMBED;
+ }
+
+ mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
+
+ alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
+ req_len, emb);
+ if (alloc_len < req_len) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2983 Allocated DMA memory size (x%x) is "
+ "less than the requested DMA memory "
+ "size (x%x)\n", alloc_len, req_len);
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+ rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+
+ if (unlikely(rc)) {
+ rc = -EIO;
+ goto err_exit;
+ }
+
+ /*
+ * Figure out where the response is located. Then get local pointers
+ * to the response data. The port does not guarantee to respond to
+ * all extents counts request so update the local variable with the
+ * allocated count from the port.
+ */
+ if (emb == LPFC_SLI4_MBX_EMBED) {
+ rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
+ shdr = &rsrc_ext->header.cfg_shdr;
+ *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
+ } else {
+ virtaddr = mbox->sge_array->addr[0];
+ n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
+ shdr = &n_rsrc->cfg_shdr;
+ *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
+ }
+
+ if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+ "2984 Failed to read allocated resources "
+ "for type %d - Status 0x%x Add'l Status 0x%x.\n",
+ type,
+ bf_get(lpfc_mbox_hdr_status, &shdr->response),
+ bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
+ rc = -EIO;
+ goto err_exit;
+ }
+ err_exit:
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return rc;
+}
+
+/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
*
@@ -5837,6 +5992,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"Advanced Error Reporting (AER)\n");
phba->cfg_aer_support = 0;
}
+ rc = 0;
}
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
@@ -6634,6 +6790,9 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
unsigned long iflags;
int rc;
+ /* dump from issue mailbox command if setup */
+ lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
+
rc = lpfc_mbox_dev_check(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
@@ -7318,12 +7477,12 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
- break;
+ break;
case CMD_XMIT_SEQUENCE64_CX:
bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
iocbq->iocb.un.ulpWord[3]);
bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
- iocbq->iocb.ulpContext);
+ iocbq->iocb.unsli3.rcvsli3.ox_id);
/* The entire sequence is transmitted for this IOCB */
xmit_len = total_len;
cmnd = CMD_XMIT_SEQUENCE64_CR;
@@ -7341,7 +7500,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
wqe->xmit_sequence.xmit_len = xmit_len;
command_type = OTHER_COMMAND;
- break;
+ break;
case CMD_XMIT_BCAST64_CN:
/* word3 iocb=iotag32 wqe=seq_payload_len */
wqe->xmit_bcast64.seq_payload_len = xmit_len;
@@ -7355,7 +7514,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
LPFC_WQE_LENLOC_WORD3);
bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
- break;
+ break;
case CMD_FCP_IWRITE64_CR:
command_type = FCP_COMMAND_DATA_OUT;
/* word3 iocb=iotag wqe=payload_offset_len */
@@ -7375,7 +7534,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
- break;
+ break;
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
@@ -7394,7 +7553,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
- break;
+ break;
case CMD_FCP_ICMND64_CR:
/* word3 iocb=IO_TAG wqe=reserved */
wqe->fcp_icmd.rsrvd3 = 0;
@@ -7407,7 +7566,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
- break;
+ break;
case CMD_GEN_REQUEST64_CR:
/* For this command calculate the xmit length of the
* request bde.
@@ -7442,7 +7601,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
command_type = OTHER_COMMAND;
- break;
+ break;
case CMD_XMIT_ELS_RSP64_CX:
ndlp = (struct lpfc_nodelist *)iocbq->context1;
/* words0-2 BDE memcpy */
@@ -7457,7 +7616,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
- iocbq->iocb.ulpContext);
+ iocbq->iocb.unsli3.rcvsli3.ox_id);
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
phba->vpi_ids[iocbq->vport->vpi]);
@@ -7470,7 +7629,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
command_type = OTHER_COMMAND;
- break;
+ break;
case CMD_CLOSE_XRI_CN:
case CMD_ABORT_XRI_CN:
case CMD_ABORT_XRI_CX:
@@ -7509,7 +7668,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
cmnd = CMD_ABORT_XRI_CX;
command_type = OTHER_COMMAND;
xritag = 0;
- break;
+ break;
case CMD_XMIT_BLS_RSP64_CX:
/* As BLS ABTS RSP WQE is very different from other WQEs,
* we re-construct this WQE here based on information in
@@ -7553,7 +7712,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
}
- break;
+ break;
case CMD_XRI_ABORTED_CX:
case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
@@ -7565,7 +7724,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
"2014 Invalid command 0x%x\n",
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
- break;
+ break;
}
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
@@ -10481,10 +10640,14 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
struct hbq_dmabuf *dma_buf;
- uint32_t status;
+ uint32_t status, rq_id;
unsigned long iflags;
- if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id)
+ if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
+ rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
+ else
+ rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
+ if (rq_id != hrq->queue_id)
goto out;
status = bf_get(lpfc_rcqe_status, rcqe);
@@ -10563,6 +10726,7 @@ lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
(struct sli4_wcqe_xri_aborted *)&cqevt);
break;
case CQE_CODE_RECEIVE:
+ case CQE_CODE_RECEIVE_V1:
/* Process the RQ event */
phba->last_completion_time = jiffies;
workposted = lpfc_sli4_sp_handle_rcqe(phba,
@@ -12345,19 +12509,18 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
}
/**
- * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post rpi header templates to the
- * port for those SLI4 ports that do not support extents. This routine
- * posts a PAGE_SIZE memory region to the port to hold up to
- * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
- * and should be called only when interrupts are disabled.
+ * HBA consistent with the SLI-4 interface spec. This routine
+ * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
+ * SLI4_PAGE_SIZE modulo 64 rpi context headers.
*
- * Return codes
- * 0 - successful
- * -ERROR - otherwise.
- */
+ * Returns
+ * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
+ * LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
uint16_t
lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
{
@@ -13406,7 +13569,7 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
* This function validates the xri maps to the known range of XRIs allocated an
* used by the driver.
**/
-static uint16_t
+uint16_t
lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
uint16_t xri)
{
@@ -13643,10 +13806,12 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
static struct lpfc_iocbq *
lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
{
+ struct hbq_dmabuf *hbq_buf;
struct lpfc_dmabuf *d_buf, *n_buf;
struct lpfc_iocbq *first_iocbq, *iocbq;
struct fc_frame_header *fc_hdr;
uint32_t sid;
+ uint32_t len, tot_len;
struct ulp_bde64 *pbde;
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
@@ -13655,6 +13820,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
lpfc_update_rcv_time_stamp(vport);
/* get the Remote Port's SID */
sid = sli4_sid_from_fc_hdr(fc_hdr);
+ tot_len = 0;
/* Get an iocbq struct to fill in. */
first_iocbq = lpfc_sli_get_iocbq(vport->phba);
if (first_iocbq) {
@@ -13662,9 +13828,12 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
- first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
- /* iocbq is prepped for internal consumption. Logical vpi. */
- first_iocbq->iocb.unsli3.rcvsli3.vpi = vport->vpi;
+ first_iocbq->iocb.ulpContext = NO_XRI;
+ first_iocbq->iocb.unsli3.rcvsli3.ox_id =
+ be16_to_cpu(fc_hdr->fh_ox_id);
+ /* iocbq is prepped for internal consumption. Physical vpi. */
+ first_iocbq->iocb.unsli3.rcvsli3.vpi =
+ vport->phba->vpi_ids[vport->vpi];
/* put the first buffer into the first IOCBq */
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
@@ -13672,9 +13841,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.un.rcvels.remoteID = sid;
- first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
- bf_get(lpfc_rcqe_length,
+ tot_len = bf_get(lpfc_rcqe_length,
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+ first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
}
iocbq = first_iocbq;
/*
@@ -13692,9 +13861,13 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
pbde = (struct ulp_bde64 *)
&iocbq->iocb.unsli3.sli3Words[4];
pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
- first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
- bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
+ /* We need to get the size out of the right CQE */
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ len = bf_get(lpfc_rcqe_length,
+ &hbq_buf->cq_event.cqe.rcqe_cmpl);
+ iocbq->iocb.unsli3.rcvsli3.acc_len += len;
+ tot_len += len;
} else {
iocbq = lpfc_sli_get_iocbq(vport->phba);
if (!iocbq) {
@@ -13712,9 +13885,14 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
iocbq->iocb.ulpBdeCount = 1;
iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
- first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
- bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
+ /* We need to get the size out of the right CQE */
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ len = bf_get(lpfc_rcqe_length,
+ &hbq_buf->cq_event.cqe.rcqe_cmpl);
+ tot_len += len;
+ iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
+
iocbq->iocb.un.rcvels.remoteID = sid;
list_add_tail(&iocbq->list, &first_iocbq->list);
}
@@ -13787,7 +13965,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
lpfc_in_buf_free(phba, &dmabuf->dbuf);
return;
}
- fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl);
+ if ((bf_get(lpfc_cqe_code,
+ &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
+ fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
+ &dmabuf->cq_event.cqe.rcqe_cmpl);
+ else
+ fcfi = bf_get(lpfc_rcqe_fcf_id,
+ &dmabuf->cq_event.cqe.rcqe_cmpl);
vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
/* throw out the frame */
@@ -14451,6 +14635,92 @@ fail_fcf_read:
}
/**
+ * lpfc_check_next_fcf_pri
+ * phba pointer to the lpfc_hba struct for this port.
+ * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
+ * routine when the rr_bmask is empty. The FCF indecies are put into the
+ * rr_bmask based on their priority level. Starting from the highest priority
+ * to the lowest. The most likely FCF candidate will be in the highest
+ * priority group. When this routine is called it searches the fcf_pri list for
+ * next lowest priority group and repopulates the rr_bmask with only those
+ * fcf_indexes.
+ * returns:
+ * 1=success 0=failure
+ **/
+int
+lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
+{
+ uint16_t next_fcf_pri;
+ uint16_t last_index;
+ struct lpfc_fcf_pri *fcf_pri;
+ int rc;
+ int ret = 0;
+
+ last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
+ lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+ "3060 Last IDX %d\n", last_index);
+ if (list_empty(&phba->fcf.fcf_pri_list)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+ "3061 Last IDX %d\n", last_index);
+ return 0; /* Empty rr list */
+ }
+ next_fcf_pri = 0;
+ /*
+ * Clear the rr_bmask and set all of the bits that are at this
+ * priority.
+ */
+ memset(phba->fcf.fcf_rr_bmask, 0,
+ sizeof(*phba->fcf.fcf_rr_bmask));
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
+ continue;
+ /*
+ * the 1st priority that has not FLOGI failed
+ * will be the highest.
+ */
+ if (!next_fcf_pri)
+ next_fcf_pri = fcf_pri->fcf_rec.priority;
+ spin_unlock_irq(&phba->hbalock);
+ if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+ rc = lpfc_sli4_fcf_rr_index_set(phba,
+ fcf_pri->fcf_rec.fcf_index);
+ if (rc)
+ return 0;
+ }
+ spin_lock_irq(&phba->hbalock);
+ }
+ /*
+ * if next_fcf_pri was not set above and the list is not empty then
+ * we have failed flogis on all of them. So reset flogi failed
+ * and start at the begining.
+ */
+ if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
+ list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
+ /*
+ * the 1st priority that has not FLOGI failed
+ * will be the highest.
+ */
+ if (!next_fcf_pri)
+ next_fcf_pri = fcf_pri->fcf_rec.priority;
+ spin_unlock_irq(&phba->hbalock);
+ if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
+ rc = lpfc_sli4_fcf_rr_index_set(phba,
+ fcf_pri->fcf_rec.fcf_index);
+ if (rc)
+ return 0;
+ }
+ spin_lock_irq(&phba->hbalock);
+ }
+ } else
+ ret = 1;
+ spin_unlock_irq(&phba->hbalock);
+
+ return ret;
+}
+/**
* lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
* @phba: pointer to lpfc hba data structure.
*
@@ -14466,6 +14736,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
uint16_t next_fcf_index;
/* Search start from next bit of currently registered FCF index */
+next_priority:
next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
LPFC_SLI4_FCF_TBL_INDX_MAX;
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
@@ -14473,17 +14744,46 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
next_fcf_index);
/* Wrap around condition on phba->fcf.fcf_rr_bmask */
- if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ /*
+ * If we have wrapped then we need to clear the bits that
+ * have been tested so that we can detect when we should
+ * change the priority level.
+ */
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+ }
+
/* Check roundrobin failover list empty condition */
- if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
+ next_fcf_index == phba->fcf.current_rec.fcf_indx) {
+ /*
+ * If next fcf index is not found check if there are lower
+ * Priority level fcf's in the fcf_priority list.
+ * Set up the rr_bmask with all of the avaiable fcf bits
+ * at that level and continue the selection process.
+ */
+ if (lpfc_check_next_fcf_pri_level(phba))
+ goto next_priority;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2844 No roundrobin failover FCF available\n");
- return LPFC_FCOE_FCF_NEXT_NONE;
+ if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+ return LPFC_FCOE_FCF_NEXT_NONE;
+ else {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+ "3063 Only FCF available idx %d, flag %x\n",
+ next_fcf_index,
+ phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
+ return next_fcf_index;
+ }
}
+ if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
+ phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
+ LPFC_FCF_FLOGI_FAILED)
+ goto next_priority;
+
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2845 Get next roundrobin failover FCF (x%x)\n",
next_fcf_index);
@@ -14535,6 +14835,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
{
+ struct lpfc_fcf_pri *fcf_pri;
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2762 FCF (x%x) reached driver's book "
@@ -14543,6 +14844,14 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
return;
}
/* Clear the eligible FCF record index bmask */
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
+ if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
+ list_del_init(&fcf_pri->list);
+ break;
+ }
+ }
+ spin_unlock_irq(&phba->hbalock);
clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 4b1703554a2..19bb87ae859 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -81,6 +81,8 @@
(fc_hdr)->fh_f_ctl[1] << 8 | \
(fc_hdr)->fh_f_ctl[2])
+#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
+
enum lpfc_sli4_queue_type {
LPFC_EQ,
LPFC_GCQ,
@@ -157,6 +159,25 @@ struct lpfc_fcf_rec {
#define RECORD_VALID 0x02
};
+struct lpfc_fcf_pri_rec {
+ uint16_t fcf_index;
+#define LPFC_FCF_ON_PRI_LIST 0x0001
+#define LPFC_FCF_FLOGI_FAILED 0x0002
+ uint16_t flag;
+ uint32_t priority;
+};
+
+struct lpfc_fcf_pri {
+ struct list_head list;
+ struct lpfc_fcf_pri_rec fcf_rec;
+};
+
+/*
+ * Maximum FCF table index, it is for driver internal book keeping, it
+ * just needs to be no less than the supported HBA's FCF table size.
+ */
+#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
+
struct lpfc_fcf {
uint16_t fcfi;
uint32_t fcf_flag;
@@ -176,15 +197,13 @@ struct lpfc_fcf {
uint32_t eligible_fcf_cnt;
struct lpfc_fcf_rec current_rec;
struct lpfc_fcf_rec failover_rec;
+ struct list_head fcf_pri_list;
+ struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
+ uint32_t current_fcf_scan_pri;
struct timer_list redisc_wait;
unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
};
-/*
- * Maximum FCF table index, it is for driver internal book keeping, it
- * just needs to be no less than the supported HBA's FCF table size.
- */
-#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
#define LPFC_REGION23_SIGNATURE "RG23"
#define LPFC_REGION23_VERSION 1
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c03921b1232..c1e0ae94d9f 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.23"
+#define LPFC_DRIVER_VERSION "8.3.25"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index bf2a1c51629..af3a6af97cc 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -215,13 +215,6 @@ static int __init mac_scsi_setup(char *str) {
__setup("mac5380=", mac_scsi_setup);
/*
- * If you want to find the instance with (k)gdb ...
- */
-#if NDEBUG
-static struct Scsi_Host *default_instance;
-#endif
-
-/*
* Function : int macscsi_detect(struct scsi_host_template * tpnt)
*
* Purpose : initializes mac NCR5380 driver based on the
@@ -233,7 +226,7 @@ static struct Scsi_Host *default_instance;
*
*/
-int macscsi_detect(struct scsi_host_template * tpnt)
+int __init macscsi_detect(struct scsi_host_template * tpnt)
{
static int called = 0;
int flags = 0;
@@ -268,10 +261,7 @@ int macscsi_detect(struct scsi_host_template * tpnt)
/* Once we support multiple 5380s (e.g. DuoDock) we'll do
something different here */
instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
-#if NDEBUG
- default_instance = instance;
-#endif
-
+
if (macintosh_config->ident == MAC_MODEL_IIFX) {
mac_scsi_regp = via1+0x8000;
mac_scsi_drq = via1+0xE000;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 7370c084b17..3948a00d81f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.05.38-rc1"
-#define MEGASAS_RELDATE "May. 11, 2011"
-#define MEGASAS_EXT_VERSION "Wed. May. 11 17:00:00 PDT 2011"
+#define MEGASAS_VERSION "00.00.05.40-rc1"
+#define MEGASAS_RELDATE "Jul. 26, 2011"
+#define MEGASAS_EXT_VERSION "Tue. Jul. 26 17:00:00 PDT 2011"
/*
* Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 2d8cdce7b2f..776d0198866 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.05.38-rc1
+ * Version : v00.00.05.40-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -54,6 +54,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
#include "megaraid_sas_fusion.h"
#include "megaraid_sas.h"
@@ -2057,6 +2058,20 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
}
}
+static int megasas_change_queue_depth(struct scsi_device *sdev,
+ int queue_depth, int reason)
+{
+ if (reason != SCSI_QDEPTH_DEFAULT)
+ return -EOPNOTSUPP;
+
+ if (queue_depth > sdev->host->can_queue)
+ queue_depth = sdev->host->can_queue;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
+ queue_depth);
+
+ return queue_depth;
+}
+
/*
* Scsi host template for megaraid_sas driver
*/
@@ -2074,6 +2089,7 @@ static struct scsi_host_template megasas_template = {
.eh_timed_out = megasas_reset_timer,
.bios_param = megasas_bios_param,
.use_clustering = ENABLE_CLUSTERING,
+ .change_queue_depth = megasas_change_queue_depth,
};
/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 8fe3a45794f..5a5af1fe758 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -288,7 +288,6 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
/* Get dev handle from Pd */
*pDevHandle = MR_PdDevHandleGet(pd, map);
}
- retval = FALSE;
}
*pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index a3e60385787..3105d5e8d90 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.17
+ * mpi2.h Version: 02.00.18
*
* Version History
* ---------------
@@ -64,6 +64,8 @@
* 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
* Added alternative defines for the SGE Direction bit.
* 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
* --------------------------------------------------------------------------
*/
@@ -89,7 +91,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x11)
+#define MPI2_HEADER_VERSION_UNIT (0x12)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -1060,10 +1062,14 @@ typedef struct _MPI2_IEEE_SGE_UNION
#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+ /* IEEE Simple Element only */
#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+ /* IEEE Simple Element only */
#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
-
+ /* IEEE Simple Element only */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (0x03)
+ /* IEEE Chain Element only */
/****************************************************************************
* IEEE SGE operation Macros
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index f5b9c766e28..61475a6480e 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.16
+ * mpi2_cnfg.h Version: 02.00.17
*
* Version History
* ---------------
@@ -127,6 +127,13 @@
* Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
* 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
* defines.
+ * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to
+ * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
+ * the Pinout field.
+ * Added BoardTemperature and BoardTemperatureUnits fields
+ * to MPI2_CONFIG_PAGE_IO_UNIT_7.
+ * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
+ * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
* --------------------------------------------------------------------------
*/
@@ -210,6 +217,7 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION
#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
+#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
/*****************************************************************************
@@ -612,23 +620,31 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO
U32 Pinout; /* 0x00 */
U8 Connector[16]; /* 0x04 */
U8 Location; /* 0x14 */
- U8 Reserved1; /* 0x15 */
+ U8 ReceptacleID; /* 0x15 */
U16 Slot; /* 0x16 */
U32 Reserved2; /* 0x18 */
} MPI2_MANPAGE7_CONNECTOR_INFO, MPI2_POINTER PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
Mpi2ManPage7ConnectorInfo_t, MPI2_POINTER pMpi2ManPage7ConnectorInfo_t;
/* defines for the Pinout field */
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L4 (0x00080000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L3 (0x00040000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L2 (0x00020000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8484_L1 (0x00010000)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L4 (0x00000800)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L3 (0x00000400)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L2 (0x00000200)
-#define MPI2_MANPAGE7_PINOUT_SFF_8470_L1 (0x00000100)
-#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x00000002)
-#define MPI2_MANPAGE7_PINOUT_CONNECTION_UNKNOWN (0x00000001)
+#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00)
+#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8)
+
+#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF)
+#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00)
+#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01)
+#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02)
+#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03)
+#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04)
+#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07)
+#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08)
+#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
+#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
/* defines for the Location field */
#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
@@ -662,7 +678,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7
MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_7,
Mpi2ManufacturingPage7_t, MPI2_POINTER pMpi2ManufacturingPage7_t;
-#define MPI2_MANUFACTURING7_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
/* defines for the Flags field */
#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
@@ -849,11 +865,13 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
U16 IOCTemperature; /* 0x10 */
U8 IOCTemperatureUnits; /* 0x12 */
U8 IOCSpeed; /* 0x13 */
- U32 Reserved3; /* 0x14 */
+ U16 BoardTemperature; /* 0x14 */
+ U8 BoardTemperatureUnits; /* 0x16 */
+ U8 Reserved3; /* 0x17 */
} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
-#define MPI2_IOUNITPAGE7_PAGEVERSION (0x01)
+#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02)
/* defines for IO Unit Page 7 PCIeWidth field */
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
@@ -881,7 +899,6 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008)
#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004)
-
/* defines for IO Unit Page 7 IOCTemperatureUnits field */
#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01)
@@ -893,6 +910,11 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04)
#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08)
+/* defines for IO Unit Page 7 BoardTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02)
+
/****************************************************************************
@@ -2799,5 +2821,25 @@ typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 {
#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03)
+/****************************************************************************
+* Extended Manufacturing Config Pages
+****************************************************************************/
+
+/*
+ * Generic structure to use for product-specific extended manufacturing pages
+ * (currently Extended Manufacturing Page 40 through Extended Manufacturing
+ * Page 60).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /* 0x00 */
+ U32 ProductSpecificInfo; /* 0x08 */
+} MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ MPI2_POINTER PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ Mpi2ExtManufacturingPagePS_t,
+ MPI2_POINTER pMpi2ExtManufacturingPagePS_t;
+
+/* PageVersion should be provided by product-specific code */
+
#endif
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 165454d5259..de90162413c 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -6,7 +6,7 @@
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.10
+ * mpi2_init.h Version: 02.00.11
*
* Version History
* ---------------
@@ -33,6 +33,7 @@
* Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
* 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
* 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
+ * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
* --------------------------------------------------------------------------
*/
@@ -139,6 +140,9 @@ typedef struct _MPI2_SCSI_IO_REQUEST
#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4)
#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0)
+/* number of SGLOffset fields */
+#define MPI2_SCSIIO_NUM_SGLOFFSETS (4)
+
/* SCSI IO IoFlags bits */
/* Large CDB Address Space */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 761cbdb8a03..1f0c190d336 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.15
+ * mpi2_ioc.h Version: 02.00.16
*
* Version History
* ---------------
@@ -103,6 +103,7 @@
* defines.
* 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
* Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
+ * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
* --------------------------------------------------------------------------
*/
@@ -1032,6 +1033,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST
#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
/* FWDownload TransactionContext Element */
typedef struct _MPI2_FW_DOWNLOAD_TCSGE
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index efa0255491c..83035bd1c48 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -94,7 +94,7 @@ module_param(diag_buffer_enable, int, 0);
MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
"(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
-int mpt2sas_fwfault_debug;
+static int mpt2sas_fwfault_debug;
MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
"and halt firmware - (default=0)");
@@ -857,7 +857,7 @@ _base_interrupt(int irq, void *bus_id)
completed_cmds = 0;
cb_idx = 0xFF;
do {
- rd.word = rpf->Words;
+ rd.word = le64_to_cpu(rpf->Words);
if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
goto out;
reply = 0;
@@ -906,7 +906,7 @@ _base_interrupt(int irq, void *bus_id)
next:
- rpf->Words = ULLONG_MAX;
+ rpf->Words = cpu_to_le64(ULLONG_MAX);
ioc->reply_post_host_index = (ioc->reply_post_host_index ==
(ioc->reply_post_queue_depth - 1)) ? 0 :
ioc->reply_post_host_index + 1;
@@ -1740,9 +1740,11 @@ _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
static void
_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
{
- if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_INTEL &&
- ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008) {
+ if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
+ return;
+ switch (ioc->pdev->device) {
+ case MPI2_MFGPAGE_DEVID_SAS2008:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_INTEL_RMS2LL080_SSDID:
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
@@ -1752,7 +1754,20 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RMS2LL040_BRANDING);
break;
+ default:
+ break;
+ }
+ case MPI2_MFGPAGE_DEVID_SAS2308_2:
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_INTEL_RS25GB008_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RS25GB008_BRANDING);
+ break;
+ default:
+ break;
}
+ default:
+ break;
}
}
@@ -1817,7 +1832,9 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
char desc[16];
u8 revision;
u32 iounit_pg1_flags;
+ u32 bios_version;
+ bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
strncpy(desc, ioc->manu_pg0.ChipName, 16);
printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
@@ -1828,10 +1845,10 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
(ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
ioc->facts.FWVersion.Word & 0x000000FF,
revision,
- (ioc->bios_pg3.BiosVersion & 0xFF000000) >> 24,
- (ioc->bios_pg3.BiosVersion & 0x00FF0000) >> 16,
- (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8,
- ioc->bios_pg3.BiosVersion & 0x000000FF);
+ (bios_version & 0xFF000000) >> 24,
+ (bios_version & 0x00FF0000) >> 16,
+ (bios_version & 0x0000FF00) >> 8,
+ bios_version & 0x000000FF);
_base_display_dell_branding(ioc);
_base_display_intel_branding(ioc);
@@ -2150,7 +2167,7 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
static int
_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
{
- Mpi2IOCFactsReply_t *facts;
+ struct mpt2sas_facts *facts;
u32 queue_size, queue_diff;
u16 max_sge_elements;
u16 num_of_reply_frames;
@@ -2783,7 +2800,7 @@ _base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
int i;
u8 failed;
u16 dummy;
- u32 *mfp;
+ __le32 *mfp;
/* make sure doorbell is not in use */
if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
@@ -2871,7 +2888,7 @@ _base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
writel(0, &ioc->chip->HostInterruptStatus);
if (ioc->logging_level & MPT_DEBUG_INIT) {
- mfp = (u32 *)reply;
+ mfp = (__le32 *)reply;
printk(KERN_INFO "\toffset:data\n");
for (i = 0; i < reply_bytes/4; i++)
printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
@@ -3097,7 +3114,8 @@ static int
_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
{
Mpi2PortFactsRequest_t mpi_request;
- Mpi2PortFactsReply_t mpi_reply, *pfacts;
+ Mpi2PortFactsReply_t mpi_reply;
+ struct mpt2sas_port_facts *pfacts;
int mpi_reply_sz, mpi_request_sz, r;
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
@@ -3139,7 +3157,8 @@ static int
_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
{
Mpi2IOCFactsRequest_t mpi_request;
- Mpi2IOCFactsReply_t mpi_reply, *facts;
+ Mpi2IOCFactsReply_t mpi_reply;
+ struct mpt2sas_facts *facts;
int mpi_reply_sz, mpi_request_sz, r;
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
@@ -3225,17 +3244,6 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
- /* In MPI Revision I (0xA), the SystemReplyFrameSize(offset 0x18) was
- * removed and made reserved. For those with older firmware will need
- * this fix. It was decided that the Reply and Request frame sizes are
- * the same.
- */
- if ((ioc->facts.HeaderVersion >> 8) < 0xA) {
- mpi_request.Reserved7 = cpu_to_le16(ioc->reply_sz);
-/* mpi_request.SystemReplyFrameSize =
- * cpu_to_le16(ioc->reply_sz);
- */
- }
mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
mpi_request.ReplyDescriptorPostQueueDepth =
@@ -3243,25 +3251,17 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
mpi_request.ReplyFreeQueueDepth =
cpu_to_le16(ioc->reply_free_queue_depth);
-#if BITS_PER_LONG > 32
mpi_request.SenseBufferAddressHigh =
- cpu_to_le32(ioc->sense_dma >> 32);
+ cpu_to_le32((u64)ioc->sense_dma >> 32);
mpi_request.SystemReplyAddressHigh =
- cpu_to_le32(ioc->reply_dma >> 32);
+ cpu_to_le32((u64)ioc->reply_dma >> 32);
mpi_request.SystemRequestFrameBaseAddress =
- cpu_to_le64(ioc->request_dma);
+ cpu_to_le64((u64)ioc->request_dma);
mpi_request.ReplyFreeQueueAddress =
- cpu_to_le64(ioc->reply_free_dma);
+ cpu_to_le64((u64)ioc->reply_free_dma);
mpi_request.ReplyDescriptorPostQueueAddress =
- cpu_to_le64(ioc->reply_post_free_dma);
-#else
- mpi_request.SystemRequestFrameBaseAddress =
- cpu_to_le32(ioc->request_dma);
- mpi_request.ReplyFreeQueueAddress =
- cpu_to_le32(ioc->reply_free_dma);
- mpi_request.ReplyDescriptorPostQueueAddress =
- cpu_to_le32(ioc->reply_post_free_dma);
-#endif
+ cpu_to_le64((u64)ioc->reply_post_free_dma);
+
/* This time stamp specifies number of milliseconds
* since epoch ~ midnight January 1, 1970.
@@ -3271,10 +3271,10 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
(current_time.tv_usec / 1000));
if (ioc->logging_level & MPT_DEBUG_INIT) {
- u32 *mfp;
+ __le32 *mfp;
int i;
- mfp = (u32 *)&mpi_request;
+ mfp = (__le32 *)&mpi_request;
printk(KERN_INFO "\toffset:data\n");
for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
printk(KERN_INFO "\t[0x%02x]:%08x\n", i*4,
@@ -3759,7 +3759,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
/* initialize Reply Post Free Queue */
for (i = 0; i < ioc->reply_post_queue_depth; i++)
- ioc->reply_post_free[i].Words = ULLONG_MAX;
+ ioc->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX);
r = _base_send_ioc_init(ioc, sleep_flag);
if (r)
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index dcc289c2545..8d5be2120c6 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "08.100.00.02"
-#define MPT2SAS_MAJOR_VERSION 08
+#define MPT2SAS_DRIVER_VERSION "09.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 09
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
-#define MPT2SAS_RELEASE_VERSION 02
+#define MPT2SAS_RELEASE_VERSION 00
/*
* Set MPT2SAS_SG_DEPTH value based on user input.
@@ -161,12 +161,15 @@
"Intel Integrated RAID Module RMS2LL080"
#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
"Intel Integrated RAID Module RMS2LL040"
+#define MPT2SAS_INTEL_RS25GB008_BRANDING \
+ "Intel(R) RAID Controller RS25GB008"
/*
* Intel HBA SSDIDs
*/
#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
+#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
/*
@@ -541,6 +544,63 @@ struct _tr_list {
typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
+/* IOC Facts and Port Facts converted from little endian to cpu */
+union mpi2_version_union {
+ MPI2_VERSION_STRUCT Struct;
+ u32 Word;
+};
+
+struct mpt2sas_facts {
+ u16 MsgVersion;
+ u16 HeaderVersion;
+ u8 IOCNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u16 IOCExceptions;
+ u16 IOCStatus;
+ u32 IOCLogInfo;
+ u8 MaxChainDepth;
+ u8 WhoInit;
+ u8 NumberOfPorts;
+ u8 MaxMSIxVectors;
+ u16 RequestCredit;
+ u16 ProductID;
+ u32 IOCCapabilities;
+ union mpi2_version_union FWVersion;
+ u16 IOCRequestFrameSize;
+ u16 Reserved3;
+ u16 MaxInitiators;
+ u16 MaxTargets;
+ u16 MaxSasExpanders;
+ u16 MaxEnclosures;
+ u16 ProtocolFlags;
+ u16 HighPriorityCredit;
+ u16 MaxReplyDescriptorPostQueueDepth;
+ u8 ReplyFrameSize;
+ u8 MaxVolumes;
+ u16 MaxDevHandle;
+ u16 MaxPersistentEntries;
+ u16 MinDevHandle;
+};
+
+struct mpt2sas_port_facts {
+ u8 PortNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u8 PortType;
+ u16 MaxPostedCmdBuffers;
+};
+
+/**
+ * enum mutex_type - task management mutex type
+ * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
+ * @TM_MUTEX_ON: mutex is required
+ */
+enum mutex_type {
+ TM_MUTEX_OFF = 0,
+ TM_MUTEX_ON = 1,
+};
+
/**
* struct MPT2SAS_ADAPTER - per adapter struct
* @list: ioc_list
@@ -703,6 +763,7 @@ struct MPT2SAS_ADAPTER {
/* misc flags */
int aen_event_read_flag;
u8 broadcast_aen_busy;
+ u16 broadcast_aen_pending;
u8 shost_recovery;
struct mutex reset_in_progress_mutex;
@@ -749,8 +810,8 @@ struct MPT2SAS_ADAPTER {
u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
/* static config pages */
- Mpi2IOCFactsReply_t facts;
- Mpi2PortFactsReply_t *pfacts;
+ struct mpt2sas_facts facts;
+ struct mpt2sas_port_facts *pfacts;
Mpi2ManufacturingPage0_t manu_pg0;
Mpi2BiosPage2_t bios_pg2;
Mpi2BiosPage3_t bios_pg3;
@@ -840,7 +901,7 @@ struct MPT2SAS_ADAPTER {
/* reply free queue */
u16 reply_free_queue_depth;
- u32 *reply_free;
+ __le32 *reply_free;
dma_addr_t reply_free_dma;
struct dma_pool *reply_free_dma_pool;
u32 reply_free_host_index;
@@ -932,8 +993,8 @@ void mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc);
u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout, struct scsi_cmnd *scmd);
+ uint channel, uint id, uint lun, u8 type, u16 smid_task,
+ ulong timeout, unsigned long serial_number, enum mutex_type m_type);
void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 437c2d94c45..38ed0260959 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -994,7 +994,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
mpt2sas_scsih_issue_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
- NULL);
+ 0, TM_MUTEX_ON);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
} else
mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
@@ -2706,13 +2706,13 @@ static DEVICE_ATTR(ioc_reset_count, S_IRUGO,
_ctl_ioc_reset_count_show, NULL);
struct DIAG_BUFFER_START {
- u32 Size;
- u32 DiagVersion;
+ __le32 Size;
+ __le32 DiagVersion;
u8 BufferType;
u8 Reserved[3];
- u32 Reserved1;
- u32 Reserved2;
- u32 Reserved3;
+ __le32 Reserved1;
+ __le32 Reserved2;
+ __le32 Reserved3;
};
/**
* _ctl_host_trace_buffer_size_show - host buffer size (trace only)
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index 3dcddfeb6f4..9731f8e661b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -164,7 +164,7 @@ static inline void
_debug_dump_mf(void *mpi_request, int sz)
{
int i;
- u32 *mfp = (u32 *)mpi_request;
+ __le32 *mfp = (__le32 *)mpi_request;
printk(KERN_INFO "mf:\n\t");
for (i = 0; i < sz; i++) {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index a7dbc6825f5..6abd2fcc43e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -94,6 +94,10 @@ static u32 logging_level;
MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
"(default=0)");
+static ushort max_sectors = 0xFFFF;
+module_param(max_sectors, ushort, 0);
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192 default=8192");
+
/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
#define MPT2SAS_MAX_LUN (16895)
static int max_lun = MPT2SAS_MAX_LUN;
@@ -1956,7 +1960,7 @@ _scsih_slave_configure(struct scsi_device *sdev)
case MPI2_RAID_VOL_TYPE_RAID1E:
qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
if (ioc->manu_pg10.OEMIdentifier &&
- (ioc->manu_pg10.GenericFlags0 &
+ (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
MFG10_GF0_R10_DISPLAY) &&
!(raid_device->num_pds % 2))
r_level = "RAID10";
@@ -2236,6 +2240,8 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
* @timeout: timeout in seconds
+ * @serial_number: the serial_number from scmd
+ * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
* Context: user
*
* A generic API for sending task management requests to firmware.
@@ -2247,17 +2253,18 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
int
mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
- struct scsi_cmnd *scmd)
+ unsigned long serial_number, enum mutex_type m_type)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
u16 smid = 0;
u32 ioc_state;
unsigned long timeleft;
- struct scsi_cmnd *scmd_lookup;
+ struct scsiio_tracker *scsi_lookup = NULL;
int rc;
- mutex_lock(&ioc->tm_cmds.mutex);
+ if (m_type == TM_MUTEX_ON)
+ mutex_lock(&ioc->tm_cmds.mutex);
if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
__func__, ioc->name);
@@ -2277,18 +2284,18 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
if (ioc_state & MPI2_DOORBELL_USED) {
dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "unexpected doorbell "
"active!\n", ioc->name));
- mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
- rc = SUCCESS;
+ rc = (!rc) ? SUCCESS : FAILED;
goto err_out;
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
mpt2sas_base_fault_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
- mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
- rc = SUCCESS;
+ rc = (!rc) ? SUCCESS : FAILED;
goto err_out;
}
@@ -2300,6 +2307,9 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
goto err_out;
}
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
+
dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "sending tm: handle(0x%04x),"
" task_type(0x%02x), smid(%d)\n", ioc->name, handle, type,
smid_task));
@@ -2307,6 +2317,7 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
ioc->tm_cmds.smid = smid;
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = type;
@@ -2322,9 +2333,9 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
_debug_dump_mf(mpi_request,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
if (!(ioc->tm_cmds.status & MPT2_CMD_RESET)) {
- mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
- rc = SUCCESS;
+ rc = (!rc) ? SUCCESS : FAILED;
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
mpt2sas_scsih_clear_tm_flag(ioc, handle);
goto err_out;
@@ -2346,20 +2357,12 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
}
}
- /* sanity check:
- * Check to see the commands were terminated.
- * This is only needed for eh callbacks, hence the scmd check.
- */
- rc = FAILED;
- if (scmd == NULL)
- goto bypass_sanity_checks;
switch (type) {
case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
- scmd_lookup = _scsih_scsi_lookup_get(ioc, smid_task);
- if (scmd_lookup)
- rc = FAILED;
- else
- rc = SUCCESS;
+ rc = SUCCESS;
+ if (scsi_lookup->scmd == NULL)
+ break;
+ rc = FAILED;
break;
case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
@@ -2369,24 +2372,31 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
rc = SUCCESS;
break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
rc = FAILED;
else
rc = SUCCESS;
break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
}
- bypass_sanity_checks:
-
mpt2sas_scsih_clear_tm_flag(ioc, handle);
ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
- mutex_unlock(&ioc->tm_cmds.mutex);
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
return rc;
err_out:
- mutex_unlock(&ioc->tm_cmds.mutex);
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
return rc;
}
@@ -2496,7 +2506,8 @@ _scsih_abort(struct scsi_cmnd *scmd)
handle = sas_device_priv_data->sas_target->handle;
r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, scmd);
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+ scmd->serial_number, TM_MUTEX_ON);
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2557,7 +2568,8 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, scmd);
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
+ TM_MUTEX_ON);
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2617,7 +2629,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
- 30, scmd);
+ 30, 0, TM_MUTEX_ON);
out:
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -2750,6 +2762,31 @@ _scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
}
/**
+ * _scsih_ublock_io_all_device - unblock every device
+ * @ioc: per adapter object
+ *
+ * change the device state from block to running
+ */
+static void
+_scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (!sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 0;
+ dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, "
+ "handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle));
+ scsi_internal_device_unblock(sdev);
+ }
+}
+/**
* _scsih_ublock_io_device - set the device state to SDEV_RUNNING
* @ioc: per adapter object
* @handle: device handle
@@ -2779,6 +2816,34 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_all_device(struct MPT2SAS_ADAPTER *ioc)
+{
+ struct MPT2SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 1;
+ dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_blocked, "
+ "handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle));
+ scsi_internal_device_block(sdev);
+ }
+}
+
+
+/**
* _scsih_block_io_device - set the device state to SDEV_BLOCK
* @ioc: per adapter object
* @handle: device handle
@@ -3698,7 +3763,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
return 0;
}
- if (ioc->pci_error_recovery) {
+ if (ioc->pci_error_recovery || ioc->remove_host) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
@@ -4193,6 +4258,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
u32 log_info;
struct MPT2SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
+ unsigned long flags;
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@@ -4217,6 +4283,9 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* the failed direct I/O should be redirected to volume
*/
if (_scsih_scsi_direct_io_get(ioc, smid)) {
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ ioc->scsi_lookup[smid - 1].scmd = scmd;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
_scsih_scsi_direct_io_set(ioc, smid, 0);
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
@@ -4598,7 +4667,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
Mpi2SasEnclosurePage0_t enclosure_pg0;
u32 ioc_status;
u16 parent_handle;
- __le64 sas_address, sas_address_parent = 0;
+ u64 sas_address, sas_address_parent = 0;
int i;
unsigned long flags;
struct _sas_port *mpt2sas_port = NULL;
@@ -5380,9 +5449,10 @@ _scsih_sas_device_status_change_event_debug(struct MPT2SAS_ADAPTER *ioc,
break;
}
printk(MPT2SAS_INFO_FMT "device status change: (%s)\n"
- "\thandle(0x%04x), sas address(0x%016llx)", ioc->name,
- reason_str, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress));
+ "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+ ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
+ (unsigned long long)le64_to_cpu(event_data->SASAddress),
+ le16_to_cpu(event_data->TaskTag));
if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
printk(MPT2SAS_INFO_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
event_data->ASC, event_data->ASCQ);
@@ -5404,7 +5474,7 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
{
struct MPT2SAS_TARGET *target_priv_data;
struct _sas_device *sas_device;
- __le64 sas_address;
+ u64 sas_address;
unsigned long flags;
Mpi2EventDataSasDeviceStatusChange_t *event_data =
fw_event->event_data;
@@ -5522,25 +5592,38 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
u32 termination_count;
u32 query_count;
Mpi2SCSITaskManagementReply_t *mpi_reply;
-#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
-#endif
u16 ioc_status;
unsigned long flags;
int r;
+ u8 max_retries = 0;
+ u8 task_abort_retries;
+
+ mutex_lock(&ioc->tm_cmds.mutex);
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: phy number(%d), "
+ "width(%d)\n", ioc->name, __func__, event_data->PhyNum,
+ event_data->PortWidth));
- dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primitive: "
- "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
- event_data->PortWidth));
- dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
- __func__));
+ _scsih_block_io_all_device(ioc);
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- ioc->broadcast_aen_busy = 0;
+ mpi_reply = ioc->tm_cmds.reply;
+broadcast_aen_retry:
+
+ /* sanity checks for retrying this loop */
+ if (max_retries++ == 5) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: giving up\n",
+ ioc->name, __func__));
+ goto out;
+ } else if (max_retries > 1)
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: %d retry\n",
+ ioc->name, __func__, max_retries - 1));
+
termination_count = 0;
query_count = 0;
- mpi_reply = ioc->tm_cmds.reply;
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+ if (ioc->ioc_reset_in_progress_status)
+ goto out;
scmd = _scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
@@ -5561,34 +5644,90 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
lun = sas_device_priv_data->lun;
query_count++;
+ if (ioc->ioc_reset_in_progress_status)
+ goto out;
+
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
- ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+ r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
+ TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt2sas_scsih_issue_tm: FAILED when sending "
+ "QUERY_TASK: scmd(%p)\n", scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
& MPI2_IOCSTATUS_MASK;
- if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
- (mpi_reply->ResponseCode ==
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev, "query task: FAILED "
+ "with IOCSTATUS(0x%04x), scmd(%p)\n", ioc_status,
+ scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ /* see if IO is still owned by IOC and target */
+ if (mpi_reply->ResponseCode ==
MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
mpi_reply->ResponseCode ==
- MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) {
+ MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
continue;
}
+ task_abort_retries = 0;
+ tm_retry:
+ if (task_abort_retries++ == 60) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+ "%s: ABORT_TASK: giving up\n", ioc->name,
+ __func__));
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ if (ioc->ioc_reset_in_progress_status)
+ goto out_no_lock;
+
r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
- scmd);
- if (r == FAILED)
- sdev_printk(KERN_WARNING, sdev, "task abort: FAILED "
+ scmd->serial_number, TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : "
"scmd(%p)\n", scmd);
+ goto tm_retry;
+ }
+
+ if (task_abort_retries > 1)
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt2sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
+ " scmd(%p)\n",
+ task_abort_retries - 1, scmd);
+
termination_count += le32_to_cpu(mpi_reply->TerminationCount);
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
}
+
+ if (ioc->broadcast_aen_pending) {
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: loop back due to"
+ " pending AEN\n", ioc->name, __func__));
+ ioc->broadcast_aen_pending = 0;
+ goto broadcast_aen_retry;
+ }
+
+ out:
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ out_no_lock:
- dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
+ dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
"%s - exit, query_count = %d termination_count = %d\n",
ioc->name, __func__, query_count, termination_count));
+
+ ioc->broadcast_aen_busy = 0;
+ if (!ioc->ioc_reset_in_progress_status)
+ _scsih_ublock_io_all_device(ioc);
+ mutex_unlock(&ioc->tm_cmds.mutex);
}
/**
@@ -6566,7 +6705,7 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
Mpi2ExpanderPage0_t expander_pg0;
Mpi2ConfigReply_t mpi_reply;
u16 ioc_status;
- __le64 sas_address;
+ u64 sas_address;
u16 handle;
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__);
@@ -6862,10 +7001,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
mpi_reply->EventData;
if (baen_data->Primitive !=
- MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT ||
- ioc->broadcast_aen_busy)
+ MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
return 1;
- ioc->broadcast_aen_busy = 1;
+
+ if (ioc->broadcast_aen_busy) {
+ ioc->broadcast_aen_pending++;
+ return 1;
+ } else
+ ioc->broadcast_aen_busy = 1;
break;
}
@@ -7211,7 +7354,6 @@ _scsih_remove(struct pci_dev *pdev)
}
sas_remove_host(shost);
- _scsih_shutdown(pdev);
list_del(&ioc->list);
scsi_remove_host(shost);
scsi_host_put(shost);
@@ -7436,6 +7578,25 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->transportt = mpt2sas_transport_template;
shost->unique_id = ioc->id;
+ if (max_sectors != 0xFFFF) {
+ if (max_sectors < 64) {
+ shost->max_sectors = 64;
+ printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
+ "for max_sectors, range is 64 to 8192. Assigning "
+ "value of 64.\n", ioc->name, max_sectors);
+ } else if (max_sectors > 8192) {
+ shost->max_sectors = 8192;
+ printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
+ "for max_sectors, range is 64 to 8192. Assigning "
+ "default value of 8192.\n", ioc->name,
+ max_sectors);
+ } else {
+ shost->max_sectors = max_sectors & 0xFFFE;
+ printk(MPT2SAS_INFO_FMT "The max_sectors value is "
+ "set to %d\n", ioc->name, shost->max_sectors);
+ }
+ }
+
if ((scsi_add_host(shost, &pdev->dev))) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
@@ -7505,7 +7666,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
- u32 device_state;
+ pci_power_t device_state;
mpt2sas_base_stop_watchdog(ioc);
scsi_block_requests(shost);
@@ -7532,7 +7693,7 @@ _scsih_resume(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
- u32 device_state = pdev->current_state;
+ pci_power_t device_state = pdev->current_state;
int r;
printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, previous "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index cb1cdecbe0f..15c79802621 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -299,7 +299,6 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
void *data_out = NULL;
dma_addr_t data_out_dma;
u32 sz;
- u64 *sas_address_le;
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
@@ -372,8 +371,7 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
mpi_request->PhysicalPort = 0xFF;
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- sas_address_le = (u64 *)&mpi_request->SASAddress;
- *sas_address_le = cpu_to_le64(sas_address);
+ mpi_request->SASAddress = cpu_to_le64(sas_address);
mpi_request->RequestDataLength =
cpu_to_le16(sizeof(struct rep_manu_request));
psge = &mpi_request->SGL;
@@ -1049,14 +1047,14 @@ struct phy_error_log_reply{
u8 function; /* 0x11 */
u8 function_result;
u8 response_length;
- u16 expander_change_count;
+ __be16 expander_change_count;
u8 reserved_1[3];
u8 phy_identifier;
u8 reserved_2[2];
- u32 invalid_dword;
- u32 running_disparity_error;
- u32 loss_of_dword_sync;
- u32 phy_reset_problem;
+ __be32 invalid_dword;
+ __be32 running_disparity_error;
+ __be32 loss_of_dword_sync;
+ __be32 phy_reset_problem;
};
/**
@@ -1085,7 +1083,6 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
void *data_out = NULL;
dma_addr_t data_out_dma;
u32 sz;
- u64 *sas_address_le;
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
@@ -1160,8 +1157,7 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
mpi_request->PhysicalPort = 0xFF;
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- sas_address_le = (u64 *)&mpi_request->SASAddress;
- *sas_address_le = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
mpi_request->RequestDataLength =
cpu_to_le16(sizeof(struct phy_error_log_request));
psge = &mpi_request->SGL;
@@ -1406,7 +1402,6 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
void *data_out = NULL;
dma_addr_t data_out_dma;
u32 sz;
- u64 *sas_address_le;
u16 wait_state_count;
if (ioc->shost_recovery) {
@@ -1486,8 +1481,7 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
mpi_request->PhysicalPort = 0xFF;
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- sas_address_le = (u64 *)&mpi_request->SASAddress;
- *sas_address_le = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
mpi_request->RequestDataLength =
cpu_to_le16(sizeof(struct phy_error_log_request));
psge = &mpi_request->SGL;
@@ -1914,7 +1908,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
mpi_request->PhysicalPort = 0xFF;
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- *((u64 *)&mpi_request->SASAddress) = (rphy) ?
+ mpi_request->SASAddress = (rphy) ?
cpu_to_le64(rphy->identify.sas_address) :
cpu_to_le64(ioc->sas_hba.sas_address);
mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
index c82b012aba3..78f7e20a0c1 100644
--- a/drivers/scsi/mvsas/Kconfig
+++ b/drivers/scsi/mvsas/Kconfig
@@ -3,7 +3,7 @@
#
# Copyright 2007 Red Hat, Inc.
# Copyright 2008 Marvell. <kewei@marvell.com>
-# Copyright 2009-20011 Marvell. <yuxiangl@marvell.com>
+# Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
#
# This file is licensed under GPLv2.
#
@@ -41,3 +41,10 @@ config SCSI_MVSAS_DEBUG
help
Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode,
the driver prints some messages to the console.
+config SCSI_MVSAS_TASKLET
+ bool "Support for interrupt tasklet"
+ default n
+ depends on SCSI_MVSAS
+ help
+ Compiles the 88SE64xx/88SE94xx driver in interrupt tasklet mode.In this mode,
+ the interrupt will schedule a tasklet.
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 13c96048139..8ba47229049 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -33,7 +33,6 @@ static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
u32 reg;
struct mvs_phy *phy = &mvi->phy[i];
- /* TODO check & save device type */
reg = mr32(MVS_GBL_PORT_TYPE);
phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
if (reg & MODE_SAS_SATA & (1 << i))
@@ -48,7 +47,7 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
u32 tmp;
tmp = mr32(MVS_PCS);
- if (mvi->chip->n_phy <= 4)
+ if (mvi->chip->n_phy <= MVS_SOC_PORTS)
tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
else
tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
@@ -58,24 +57,16 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
+ int i;
mvs_phy_hacks(mvi);
if (!(mvi->flags & MVF_FLAG_SOC)) {
- /* TEST - for phy decoding error, adjust voltage levels */
- mw32(MVS_P0_VSR_ADDR + 0, 0x8);
- mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
-
- mw32(MVS_P0_VSR_ADDR + 8, 0x8);
- mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
-
- mw32(MVS_P0_VSR_ADDR + 16, 0x8);
- mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
-
- mw32(MVS_P0_VSR_ADDR + 24, 0x8);
- mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
+ for (i = 0; i < MVS_SOC_PORTS; i++) {
+ mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE8);
+ mvs_write_port_vsr_data(mvi, i, 0x2F0);
+ }
} else {
- int i;
/* disable auto port detection */
mw32(MVS_GBL_PORT_TYPE, 0);
for (i = 0; i < mvi->chip->n_phy; i++) {
@@ -95,7 +86,7 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
u32 reg, tmp;
if (!(mvi->flags & MVF_FLAG_SOC)) {
- if (phy_id < 4)
+ if (phy_id < MVS_SOC_PORTS)
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
else
pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
@@ -104,13 +95,13 @@ static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
reg = mr32(MVS_PHY_CTL);
tmp = reg;
- if (phy_id < 4)
+ if (phy_id < MVS_SOC_PORTS)
tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
else
- tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
+ tmp |= (1U << (phy_id - MVS_SOC_PORTS)) << PCTL_LINK_OFFS;
if (!(mvi->flags & MVF_FLAG_SOC)) {
- if (phy_id < 4) {
+ if (phy_id < MVS_SOC_PORTS) {
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
mdelay(10);
pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
@@ -133,9 +124,9 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
tmp &= ~PHYEV_RDY_CH;
mvs_write_port_irq_stat(mvi, phy_id, tmp);
tmp = mvs_read_phy_ctl(mvi, phy_id);
- if (hard == 1)
+ if (hard == MVS_HARD_RESET)
tmp |= PHY_RST_HARD;
- else if (hard == 0)
+ else if (hard == MVS_SOFT_RESET)
tmp |= PHY_RST;
mvs_write_phy_ctl(mvi, phy_id, tmp);
if (hard) {
@@ -321,6 +312,11 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
/* init phys */
mvs_64xx_phy_hacks(mvi);
+ tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
+ tmp &= 0x0000ffff;
+ tmp |= 0x00fa0000;
+ mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
+
/* enable auto port detection */
mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
@@ -346,7 +342,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
mvs_64xx_enable_xmt(mvi, i);
- mvs_64xx_phy_reset(mvi, i, 1);
+ mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET);
msleep(500);
mvs_64xx_detect_porttype(mvi, i);
}
@@ -377,13 +373,7 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
mvs_update_phyinfo(mvi, i, 1);
}
- /* FIXME: update wide port bitmaps */
-
/* little endian for open address and command table, etc. */
- /*
- * it seems that ( from the spec ) turning on big-endian won't
- * do us any good on big-endian machines, need further confirmation
- */
cctl = mr32(MVS_CTL);
cctl |= CCTL_ENDIAN_CMD;
cctl |= CCTL_ENDIAN_DATA;
@@ -394,15 +384,19 @@ static int __devinit mvs_64xx_init(struct mvs_info *mvi)
/* reset CMD queue */
tmp = mr32(MVS_PCS);
tmp |= PCS_CMD_RST;
+ tmp &= ~PCS_SELF_CLEAR;
mw32(MVS_PCS, tmp);
- /* interrupt coalescing may cause missing HW interrput in some case,
- * and the max count is 0x1ff, while our max slot is 0x200,
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
* it will make count 0.
*/
tmp = 0;
- mw32(MVS_INT_COAL, tmp);
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
- tmp = 0x100;
+ tmp = 0x10000 | interrupt_coalescing;
mw32(MVS_INT_COAL_TMOUT, tmp);
/* ladies and gentlemen, start your engines */
@@ -477,13 +471,11 @@ static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
/* clear CMD_CMPLT ASAP */
mw32_f(MVS_INT_STAT, CINT_DONE);
-#ifndef MVS_USE_TASKLET
+
spin_lock(&mvi->lock);
-#endif
mvs_int_full(mvi);
-#ifndef MVS_USE_TASKLET
spin_unlock(&mvi->lock);
-#endif
+
return IRQ_HANDLED;
}
@@ -630,7 +622,6 @@ static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
{
u32 tmp;
struct mvs_phy *phy = &mvi->phy[i];
- /* workaround for HW phy decoding error on 1.5g disk drive */
mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
tmp = mvs_read_port_vsr_data(mvi, i);
if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
@@ -661,7 +652,7 @@ void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
tmp |= lrmax;
}
mvs_write_phy_ctl(mvi, phy_id, tmp);
- mvs_64xx_phy_reset(mvi, phy_id, 1);
+ mvs_64xx_phy_reset(mvi, phy_id, MVS_HARD_RESET);
}
static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
@@ -744,11 +735,13 @@ int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
return -1;
}
-#ifndef DISABLE_HOTPLUG_DMA_FIX
-void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
+ int buf_len, int from, void *prd)
{
int i;
struct mvs_prd *buf_prd = prd;
+ dma_addr_t buf_dma = mvi->bulk_buffer_dma;
+
buf_prd += from;
for (i = 0; i < MAX_SG_ENTRY - from; i++) {
buf_prd->addr = cpu_to_le64(buf_dma);
@@ -756,7 +749,28 @@ void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
++buf_prd;
}
}
-#endif
+
+static void mvs_64xx_tune_interrupt(struct mvs_info *mvi, u32 time)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp = 0;
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
+ * it will make count 0.
+ */
+ if (time == 0) {
+ mw32(MVS_INT_COAL, 0);
+ mw32(MVS_INT_COAL_TMOUT, 0x10000);
+ } else {
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
+
+ tmp = 0x10000 | time;
+ mw32(MVS_INT_COAL_TMOUT, tmp);
+ }
+}
const struct mvs_dispatch mvs_64xx_dispatch = {
"mv64xx",
@@ -780,7 +794,6 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
mvs_write_port_irq_stat,
mvs_read_port_irq_mask,
mvs_write_port_irq_mask,
- mvs_get_sas_addr,
mvs_64xx_command_active,
mvs_64xx_clear_srs_irq,
mvs_64xx_issue_stop,
@@ -808,8 +821,8 @@ const struct mvs_dispatch mvs_64xx_dispatch = {
mvs_64xx_spi_buildcmd,
mvs_64xx_spi_issuecmd,
mvs_64xx_spi_waitdataready,
-#ifndef DISABLE_HOTPLUG_DMA_FIX
mvs_64xx_fix_dma,
-#endif
+ mvs_64xx_tune_interrupt,
+ NULL,
};
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 78162c3c36e..3501291618f 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -48,6 +48,216 @@ static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
}
}
+void set_phy_tuning(struct mvs_info *mvi, int phy_id,
+ struct phy_tuning phy_tuning)
+{
+ u32 tmp, setting_0 = 0, setting_1 = 0;
+ u8 i;
+
+ /* Remap information for B0 chip:
+ *
+ * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
+ * R0Dh -> R118h[31:16] (Generation 1 Setting 0)
+ * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
+ * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
+ * R10h -> R120h[15:0] (Generation 2 Setting 1)
+ * R11h -> R120h[31:16] (Generation 3 Setting 0)
+ * R12h -> R124h[15:0] (Generation 3 Setting 1)
+ * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
+ */
+
+ /* A0 has a different set of registers */
+ if (mvi->pdev->revision == VANIR_A0_REV)
+ return;
+
+ for (i = 0; i < 3; i++) {
+ /* loop 3 times, set Gen 1, Gen 2, Gen 3 */
+ switch (i) {
+ case 0:
+ setting_0 = GENERATION_1_SETTING;
+ setting_1 = GENERATION_1_2_SETTING;
+ break;
+ case 1:
+ setting_0 = GENERATION_1_2_SETTING;
+ setting_1 = GENERATION_2_3_SETTING;
+ break;
+ case 2:
+ setting_0 = GENERATION_2_3_SETTING;
+ setting_1 = GENERATION_3_4_SETTING;
+ break;
+ }
+
+ /* Set:
+ *
+ * Transmitter Emphasis Enable
+ * Transmitter Emphasis Amplitude
+ * Transmitter Amplitude
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, setting_0);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~(0xFBE << 16);
+ tmp |= (((phy_tuning.trans_emp_en << 11) |
+ (phy_tuning.trans_emp_amp << 7) |
+ (phy_tuning.trans_amp << 1)) << 16);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* Set Transmitter Amplitude Adjust */
+ mvs_write_port_vsr_addr(mvi, phy_id, setting_1);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~(0xC000);
+ tmp |= (phy_tuning.trans_amp_adj << 14);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+ }
+}
+
+void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
+ struct ffe_control ffe)
+{
+ u32 tmp;
+
+ /* Don't run this if A0/B0 */
+ if ((mvi->pdev->revision == VANIR_A0_REV)
+ || (mvi->pdev->revision == VANIR_B0_REV))
+ return;
+
+ /* FFE Resistor and Capacitor */
+ /* R10Ch DFE Resolution Control/Squelch and FFE Setting
+ *
+ * FFE_FORCE [7]
+ * FFE_RES_SEL [6:4]
+ * FFE_CAP_SEL [3:0]
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0xFF;
+
+ /* Read from HBA_Info_Page */
+ tmp |= ((0x1 << 7) |
+ (ffe.ffe_rss_sel << 4) |
+ (ffe.ffe_cap_sel << 0));
+
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* R064h PHY Mode Register 1
+ *
+ * DFE_DIS 18
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0x40001;
+ /* Hard coding */
+ /* No defines in HBA_Info_Page */
+ tmp |= (0 << 18);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* R110h DFE F0-F1 Coefficient Control/DFE Update Control
+ *
+ * DFE_UPDATE_EN [11:6]
+ * DFE_FX_FORCE [5:0]
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0xFFF;
+ /* Hard coding */
+ /* No defines in HBA_Info_Page */
+ tmp |= ((0x3F << 6) | (0x0 << 0));
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+
+ /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
+ *
+ * FFE_TRAIN_EN 3
+ */
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp &= ~0x8;
+ /* Hard coding */
+ /* No defines in HBA_Info_Page */
+ tmp |= (0 << 3);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp);
+}
+
+/*Notice: this function must be called when phy is disabled*/
+void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
+{
+ union reg_phy_cfg phy_cfg, phy_cfg_tmp;
+ mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+ phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id);
+ phy_cfg.v = 0;
+ phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy;
+ phy_cfg.u.sas_support = 1;
+ phy_cfg.u.sata_support = 1;
+ phy_cfg.u.sata_host_mode = 1;
+
+ switch (rate) {
+ case 0x0:
+ /* support 1.5 Gbps */
+ phy_cfg.u.speed_support = 1;
+ phy_cfg.u.snw_3_support = 0;
+ phy_cfg.u.tx_lnk_parity = 1;
+ phy_cfg.u.tx_spt_phs_lnk_rate = 0x30;
+ break;
+ case 0x1:
+
+ /* support 1.5, 3.0 Gbps */
+ phy_cfg.u.speed_support = 3;
+ phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c;
+ phy_cfg.u.tx_lgcl_lnk_rate = 0x08;
+ break;
+ case 0x2:
+ default:
+ /* support 1.5, 3.0, 6.0 Gbps */
+ phy_cfg.u.speed_support = 7;
+ phy_cfg.u.snw_3_support = 1;
+ phy_cfg.u.tx_lnk_parity = 1;
+ phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f;
+ phy_cfg.u.tx_lgcl_lnk_rate = 0x09;
+ break;
+ }
+ mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
+}
+
+static void __devinit
+mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
+{
+ u32 temp;
+ temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
+ if (temp == 0xFFFFFFFFL) {
+ mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6;
+ mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A;
+ mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3;
+ }
+
+ temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]);
+ if (temp == 0xFFL) {
+ switch (mvi->pdev->revision) {
+ case VANIR_A0_REV:
+ case VANIR_B0_REV:
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7;
+ break;
+ case VANIR_C0_REV:
+ case VANIR_C1_REV:
+ case VANIR_C2_REV:
+ default:
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
+ mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC;
+ break;
+ }
+ }
+
+ temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]);
+ if (temp == 0xFFL)
+ /*set default phy_rate = 6Gbps*/
+ mvi->hba_info_param.phy_rate[phy_id] = 0x2;
+
+ set_phy_tuning(mvi, phy_id,
+ mvi->hba_info_param.phy_tuning[phy_id]);
+ set_phy_ffe_tuning(mvi, phy_id,
+ mvi->hba_info_param.ffe_ctl[phy_id]);
+ set_phy_rate(mvi, phy_id,
+ mvi->hba_info_param.phy_rate[phy_id]);
+}
+
static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
{
void __iomem *regs = mvi->regs;
@@ -61,7 +271,14 @@ static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
{
u32 tmp;
-
+ u32 delay = 5000;
+ if (hard == MVS_PHY_TUNE) {
+ mvs_write_port_cfg_addr(mvi, phy_id, PHYR_SATA_CTL);
+ tmp = mvs_read_port_cfg_data(mvi, phy_id);
+ mvs_write_port_cfg_data(mvi, phy_id, tmp|0x20000000);
+ mvs_write_port_cfg_data(mvi, phy_id, tmp|0x100000);
+ return;
+ }
tmp = mvs_read_port_irq_stat(mvi, phy_id);
tmp &= ~PHYEV_RDY_CH;
mvs_write_port_irq_stat(mvi, phy_id, tmp);
@@ -71,12 +288,15 @@ static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
mvs_write_phy_ctl(mvi, phy_id, tmp);
do {
tmp = mvs_read_phy_ctl(mvi, phy_id);
- } while (tmp & PHY_RST_HARD);
+ udelay(10);
+ delay--;
+ } while ((tmp & PHY_RST_HARD) && delay);
+ if (!delay)
+ mv_dprintk("phy hard reset failed.\n");
} else {
- mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
- tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
tmp |= PHY_RST;
- mvs_write_port_vsr_data(mvi, phy_id, tmp);
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
}
}
@@ -90,12 +310,25 @@ static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
{
- mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
- mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
- mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
- mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
+ u32 tmp;
+ u8 revision = 0;
+
+ revision = mvi->pdev->revision;
+ if (revision == VANIR_A0_REV) {
+ mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
+ }
+ if (revision == VANIR_B0_REV) {
+ mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x08001006);
+ mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
+ mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f);
+ }
+
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
- mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
+ tmp = mvs_read_port_vsr_data(mvi, phy_id);
+ tmp |= bit(0);
+ mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
}
static int __devinit mvs_94xx_init(struct mvs_info *mvi)
@@ -103,7 +336,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
void __iomem *regs = mvi->regs;
int i;
u32 tmp, cctl;
+ u8 revision;
+ revision = mvi->pdev->revision;
mvs_show_pcie_usage(mvi);
if (mvi->flags & MVF_FLAG_SOC) {
tmp = mr32(MVS_PHY_CTL);
@@ -133,6 +368,28 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
msleep(100);
}
+ /* disable Multiplexing, enable phy implemented */
+ mw32(MVS_PORTS_IMP, 0xFF);
+
+ if (revision == VANIR_A0_REV) {
+ mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET);
+ mw32(MVS_PA_VSR_PORT, 0x00018080);
+ }
+ mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2);
+ if (revision == VANIR_A0_REV || revision == VANIR_B0_REV)
+ /* set 6G/3G/1.5G, multiplexing, without SSC */
+ mw32(MVS_PA_VSR_PORT, 0x0084d4fe);
+ else
+ /* set 6G/3G/1.5G, multiplexing, with and without SSC */
+ mw32(MVS_PA_VSR_PORT, 0x0084fffe);
+
+ if (revision == VANIR_B0_REV) {
+ mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL);
+ mw32(MVS_PA_VSR_PORT, 0x08001006);
+ mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA);
+ mw32(MVS_PA_VSR_PORT, 0x0000705f);
+ }
+
/* reset control */
mw32(MVS_PCS, 0); /* MVS_PCS */
mw32(MVS_STP_REG_SET_0, 0);
@@ -141,17 +398,8 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
/* init phys */
mvs_phy_hacks(mvi);
- /* disable Multiplexing, enable phy implemented */
- mw32(MVS_PORTS_IMP, 0xFF);
-
-
- mw32(MVS_PA_VSR_ADDR, 0x00000104);
- mw32(MVS_PA_VSR_PORT, 0x00018080);
- mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
- mw32(MVS_PA_VSR_PORT, 0x0084ffff);
-
/* set LED blink when IO*/
- mw32(MVS_PA_VSR_ADDR, 0x00000030);
+ mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED);
tmp = mr32(MVS_PA_VSR_PORT);
tmp &= 0xFFFF00FF;
tmp |= 0x00003300;
@@ -175,12 +423,13 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
mvs_94xx_phy_disable(mvi, i);
/* set phy local SAS address */
mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
- (mvi->phy[i].dev_sas_addr));
+ cpu_to_le64(mvi->phy[i].dev_sas_addr));
mvs_94xx_enable_xmt(mvi, i);
+ mvs_94xx_config_reg_from_hba(mvi, i);
mvs_94xx_phy_enable(mvi, i);
- mvs_94xx_phy_reset(mvi, i, 1);
+ mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD);
msleep(500);
mvs_94xx_detect_porttype(mvi, i);
}
@@ -211,16 +460,9 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
mvs_update_phyinfo(mvi, i, 1);
}
- /* FIXME: update wide port bitmaps */
-
/* little endian for open address and command table, etc. */
- /*
- * it seems that ( from the spec ) turning on big-endian won't
- * do us any good on big-endian machines, need further confirmation
- */
cctl = mr32(MVS_CTL);
cctl |= CCTL_ENDIAN_CMD;
- cctl |= CCTL_ENDIAN_DATA;
cctl &= ~CCTL_ENDIAN_OPEN;
cctl |= CCTL_ENDIAN_RSP;
mw32_f(MVS_CTL, cctl);
@@ -228,15 +470,20 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
/* reset CMD queue */
tmp = mr32(MVS_PCS);
tmp |= PCS_CMD_RST;
+ tmp &= ~PCS_SELF_CLEAR;
mw32(MVS_PCS, tmp);
- /* interrupt coalescing may cause missing HW interrput in some case,
- * and the max count is 0x1ff, while our max slot is 0x200,
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
* it will make count 0.
*/
tmp = 0;
- mw32(MVS_INT_COAL, tmp);
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
- tmp = 0x100;
+ /* default interrupt coalescing time is 128us */
+ tmp = 0x10000 | interrupt_coalescing;
mw32(MVS_INT_COAL_TMOUT, tmp);
/* ladies and gentlemen, start your engines */
@@ -249,7 +496,7 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
/* enable completion queue interrupt */
tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
- CINT_DMA_PCIE);
+ CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR);
tmp |= CINT_PHY_MASK;
mw32(MVS_INT_MASK, tmp);
@@ -332,13 +579,10 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
((stat & IRQ_SAS_B) && mvi->id == 1)) {
mw32_f(MVS_INT_STAT, CINT_DONE);
- #ifndef MVS_USE_TASKLET
+
spin_lock(&mvi->lock);
- #endif
mvs_int_full(mvi);
- #ifndef MVS_USE_TASKLET
spin_unlock(&mvi->lock);
- #endif
}
return IRQ_HANDLED;
}
@@ -346,10 +590,48 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
{
u32 tmp;
- mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
- do {
- tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
- } while (tmp & 1 << (slot_idx % 32));
+ tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
+ if (tmp && 1 << (slot_idx % 32)) {
+ mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
+ mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
+ 1 << (slot_idx % 32));
+ do {
+ tmp = mvs_cr32(mvi,
+ MVS_COMMAND_ACTIVE + (slot_idx >> 3));
+ } while (tmp & 1 << (slot_idx % 32));
+ }
+}
+
+void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
+{
+ void __iomem *regs = mvi->regs;
+ u32 tmp;
+
+ if (clear_all) {
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+ if (tmp) {
+ mv_dprintk("check SRS 0 %08X.\n", tmp);
+ mw32(MVS_INT_STAT_SRS_0, tmp);
+ }
+ tmp = mr32(MVS_INT_STAT_SRS_1);
+ if (tmp) {
+ mv_dprintk("check SRS 1 %08X.\n", tmp);
+ mw32(MVS_INT_STAT_SRS_1, tmp);
+ }
+ } else {
+ if (reg_set > 31)
+ tmp = mr32(MVS_INT_STAT_SRS_1);
+ else
+ tmp = mr32(MVS_INT_STAT_SRS_0);
+
+ if (tmp & (1 << (reg_set % 32))) {
+ mv_dprintk("register set 0x%x was stopped.\n", reg_set);
+ if (reg_set > 31)
+ mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32));
+ else
+ mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
+ }
+ }
}
static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
@@ -357,37 +639,56 @@ static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
{
void __iomem *regs = mvi->regs;
u32 tmp;
+ mvs_94xx_clear_srs_irq(mvi, 0, 1);
- if (type == PORT_TYPE_SATA) {
- tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
- mw32(MVS_INT_STAT_SRS_0, tmp);
- }
- mw32(MVS_INT_STAT, CINT_CI_STOP);
+ tmp = mr32(MVS_INT_STAT);
+ mw32(MVS_INT_STAT, tmp | CINT_CI_STOP);
tmp = mr32(MVS_PCS) | 0xFF00;
mw32(MVS_PCS, tmp);
}
+static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi)
+{
+ void __iomem *regs = mvi->regs;
+ u32 err_0, err_1;
+ u8 i;
+ struct mvs_device *device;
+
+ err_0 = mr32(MVS_NON_NCQ_ERR_0);
+ err_1 = mr32(MVS_NON_NCQ_ERR_1);
+
+ mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
+ err_0, err_1);
+ for (i = 0; i < 32; i++) {
+ if (err_0 & bit(i)) {
+ device = mvs_find_dev_by_reg_set(mvi, i);
+ if (device)
+ mvs_release_task(mvi, device->sas_device);
+ }
+ if (err_1 & bit(i)) {
+ device = mvs_find_dev_by_reg_set(mvi, i+32);
+ if (device)
+ mvs_release_task(mvi, device->sas_device);
+ }
+ }
+
+ mw32(MVS_NON_NCQ_ERR_0, err_0);
+ mw32(MVS_NON_NCQ_ERR_1, err_1);
+}
+
static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
{
void __iomem *regs = mvi->regs;
- u32 tmp;
u8 reg_set = *tfs;
if (*tfs == MVS_ID_NOT_MAPPED)
return;
mvi->sata_reg_set &= ~bit(reg_set);
- if (reg_set < 32) {
+ if (reg_set < 32)
w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
- tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
- if (tmp)
- mw32(MVS_INT_STAT_SRS_0, tmp);
- } else {
- w_reg_set_enable(reg_set, mvi->sata_reg_set);
- tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
- if (tmp)
- mw32(MVS_INT_STAT_SRS_1, tmp);
- }
+ else
+ w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32));
*tfs = MVS_ID_NOT_MAPPED;
@@ -403,7 +704,7 @@ static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
return 0;
i = mv_ffc64(mvi->sata_reg_set);
- if (i > 32) {
+ if (i >= 32) {
mvi->sata_reg_set |= bit(i);
w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
*tfs = i;
@@ -422,9 +723,12 @@ static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
int i;
struct scatterlist *sg;
struct mvs_prd *buf_prd = prd;
+ struct mvs_prd_imt im_len;
+ *(u32 *)&im_len = 0;
for_each_sg(scatter, sg, nr, i) {
buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
- buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
+ im_len.len = sg_dma_len(sg);
+ buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
buf_prd++;
}
}
@@ -433,7 +737,7 @@ static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
{
u32 phy_st;
phy_st = mvs_read_phy_ctl(mvi, i);
- if (phy_st & PHY_READY_MASK) /* phy ready */
+ if (phy_st & PHY_READY_MASK)
return 1;
return 0;
}
@@ -447,7 +751,7 @@ static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
for (i = 0; i < 7; i++) {
mvs_write_port_cfg_addr(mvi, port_id,
CONFIG_ID_FRAME0 + i * 4);
- id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+ id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
}
memcpy(id, id_frame, 28);
}
@@ -458,15 +762,13 @@ static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
int i;
u32 id_frame[7];
- /* mvs_hexdump(28, (u8 *)id_frame, 0); */
for (i = 0; i < 7; i++) {
mvs_write_port_cfg_addr(mvi, port_id,
CONFIG_ATT_ID_FRAME0 + i * 4);
- id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+ id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
mv_dprintk("94xx phy %d atta frame %d %x.\n",
port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
}
- /* mvs_hexdump(28, (u8 *)id_frame, 0); */
memcpy(id, id_frame, 28);
}
@@ -526,7 +828,18 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
struct sas_phy_linkrates *rates)
{
- /* TODO */
+ u32 lrmax = 0;
+ u32 tmp;
+
+ tmp = mvs_read_phy_ctl(mvi, phy_id);
+ lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12;
+
+ if (lrmax) {
+ tmp &= ~(0x3 << 12);
+ tmp |= lrmax;
+ }
+ mvs_write_phy_ctl(mvi, phy_id, tmp);
+ mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD);
}
static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
@@ -603,27 +916,59 @@ int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
return -1;
}
-#ifndef DISABLE_HOTPLUG_DMA_FIX
-void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
+ int buf_len, int from, void *prd)
{
int i;
struct mvs_prd *buf_prd = prd;
+ dma_addr_t buf_dma;
+ struct mvs_prd_imt im_len;
+
+ *(u32 *)&im_len = 0;
buf_prd += from;
- for (i = 0; i < MAX_SG_ENTRY - from; i++) {
- buf_prd->addr = cpu_to_le64(buf_dma);
- buf_prd->im_len.len = cpu_to_le32(buf_len);
- ++buf_prd;
+
+#define PRD_CHAINED_ENTRY 0x01
+ if ((mvi->pdev->revision == VANIR_A0_REV) ||
+ (mvi->pdev->revision == VANIR_B0_REV))
+ buf_dma = (phy_mask <= 0x08) ?
+ mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1;
+ else
+ return;
+
+ for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) {
+ if (i == MAX_SG_ENTRY - 1) {
+ buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1));
+ im_len.len = 2;
+ im_len.misc_ctl = PRD_CHAINED_ENTRY;
+ } else {
+ buf_prd->addr = cpu_to_le64(buf_dma);
+ im_len.len = buf_len;
+ }
+ buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
}
}
-#endif
-/*
- * FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
- * with 64xx fixes
- */
-static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
- u8 clear_all)
+static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time)
{
+ void __iomem *regs = mvi->regs;
+ u32 tmp = 0;
+ /*
+ * the max count is 0x1ff, while our max slot is 0x200,
+ * it will make count 0.
+ */
+ if (time == 0) {
+ mw32(MVS_INT_COAL, 0);
+ mw32(MVS_INT_COAL_TMOUT, 0x10000);
+ } else {
+ if (MVS_CHIP_SLOT_SZ > 0x1ff)
+ mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
+ else
+ mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
+
+ tmp = 0x10000 | time;
+ mw32(MVS_INT_COAL_TMOUT, tmp);
+ }
+
}
const struct mvs_dispatch mvs_94xx_dispatch = {
@@ -648,7 +993,6 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
mvs_write_port_irq_stat,
mvs_read_port_irq_mask,
mvs_write_port_irq_mask,
- mvs_get_sas_addr,
mvs_94xx_command_active,
mvs_94xx_clear_srs_irq,
mvs_94xx_issue_stop,
@@ -676,8 +1020,8 @@ const struct mvs_dispatch mvs_94xx_dispatch = {
mvs_94xx_spi_buildcmd,
mvs_94xx_spi_issuecmd,
mvs_94xx_spi_waitdataready,
-#ifndef DISABLE_HOTPLUG_DMA_FIX
mvs_94xx_fix_dma,
-#endif
+ mvs_94xx_tune_interrupt,
+ mvs_94xx_non_spec_ncq_error,
};
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 8835befe2c0..8f7eb4f2114 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -30,6 +30,14 @@
#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS
+enum VANIR_REVISION_ID {
+ VANIR_A0_REV = 0xA0,
+ VANIR_B0_REV = 0x01,
+ VANIR_C0_REV = 0x02,
+ VANIR_C1_REV = 0x03,
+ VANIR_C2_REV = 0xC2,
+};
+
enum hw_registers {
MVS_GBL_CTL = 0x04, /* global control */
MVS_GBL_INT_STAT = 0x00, /* global irq status */
@@ -101,6 +109,7 @@ enum hw_registers {
MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */
MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */
MVS_PA_VSR_PORT = 0x294, /* All port VSR data */
+ MVS_COMMAND_ACTIVE = 0x300,
};
enum pci_cfg_registers {
@@ -112,26 +121,29 @@ enum pci_cfg_registers {
/* SAS/SATA Vendor Specific Port Registers */
enum sas_sata_vsp_regs {
- VSR_PHY_STAT = 0x00 * 4, /* Phy Status */
- VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */
- VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */
- VSR_PHY_MODE3 = 0x03 * 4, /* pll */
- VSR_PHY_MODE4 = 0x04 * 4, /* VCO */
- VSR_PHY_MODE5 = 0x05 * 4, /* Rx */
- VSR_PHY_MODE6 = 0x06 * 4, /* CDR */
- VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */
- VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */
- VSR_PHY_MODE9 = 0x09 * 4, /* Test */
- VSR_PHY_MODE10 = 0x0A * 4, /* Power */
- VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */
- VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */
- VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */
+ VSR_PHY_STAT = 0x00 * 4, /* Phy Interrupt Status */
+ VSR_PHY_MODE1 = 0x01 * 4, /* phy Interrupt Enable */
+ VSR_PHY_MODE2 = 0x02 * 4, /* Phy Configuration */
+ VSR_PHY_MODE3 = 0x03 * 4, /* Phy Status */
+ VSR_PHY_MODE4 = 0x04 * 4, /* Phy Counter 0 */
+ VSR_PHY_MODE5 = 0x05 * 4, /* Phy Counter 1 */
+ VSR_PHY_MODE6 = 0x06 * 4, /* Event Counter Control */
+ VSR_PHY_MODE7 = 0x07 * 4, /* Event Counter Select */
+ VSR_PHY_MODE8 = 0x08 * 4, /* Event Counter 0 */
+ VSR_PHY_MODE9 = 0x09 * 4, /* Event Counter 1 */
+ VSR_PHY_MODE10 = 0x0A * 4, /* Event Counter 2 */
+ VSR_PHY_MODE11 = 0x0B * 4, /* Event Counter 3 */
+ VSR_PHY_ACT_LED = 0x0C * 4, /* Activity LED control */
+
+ VSR_PHY_FFE_CONTROL = 0x10C,
+ VSR_PHY_DFE_UPDATE_CRTL = 0x110,
+ VSR_REF_CLOCK_CRTL = 0x1A0,
};
enum chip_register_bits {
PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
- PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
- PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
+ PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 12),
+ PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
(0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
};
@@ -169,22 +181,75 @@ enum pci_interrupt_cause {
IRQ_PCIE_ERR = (1 << 31),
};
+union reg_phy_cfg {
+ u32 v;
+ struct {
+ u32 phy_reset:1;
+ u32 sas_support:1;
+ u32 sata_support:1;
+ u32 sata_host_mode:1;
+ /*
+ * bit 2: 6Gbps support
+ * bit 1: 3Gbps support
+ * bit 0: 1.5Gbps support
+ */
+ u32 speed_support:3;
+ u32 snw_3_support:1;
+ u32 tx_lnk_parity:1;
+ /*
+ * bit 5: G1 (1.5Gbps) Without SSC
+ * bit 4: G1 (1.5Gbps) with SSC
+ * bit 3: G2 (3.0Gbps) Without SSC
+ * bit 2: G2 (3.0Gbps) with SSC
+ * bit 1: G3 (6.0Gbps) without SSC
+ * bit 0: G3 (6.0Gbps) with SSC
+ */
+ u32 tx_spt_phs_lnk_rate:6;
+ /* 8h: 1.5Gbps 9h: 3Gbps Ah: 6Gbps */
+ u32 tx_lgcl_lnk_rate:4;
+ u32 tx_ssc_type:1;
+ u32 sata_spin_up_spt:1;
+ u32 sata_spin_up_en:1;
+ u32 bypass_oob:1;
+ u32 disable_phy:1;
+ u32 rsvd:8;
+ } u;
+};
+
#define MAX_SG_ENTRY 255
struct mvs_prd_imt {
+#ifndef __BIG_ENDIAN
__le32 len:22;
u8 _r_a:2;
u8 misc_ctl:4;
u8 inter_sel:4;
+#else
+ u32 inter_sel:4;
+ u32 misc_ctl:4;
+ u32 _r_a:2;
+ u32 len:22;
+#endif
};
struct mvs_prd {
/* 64-bit buffer address */
__le64 addr;
/* 22-bit length */
- struct mvs_prd_imt im_len;
+ __le32 im_len;
} __attribute__ ((packed));
+/*
+ * these registers are accessed through port vendor
+ * specific address/data registers
+ */
+enum sas_sata_phy_regs {
+ GENERATION_1_SETTING = 0x118,
+ GENERATION_1_2_SETTING = 0x11C,
+ GENERATION_2_3_SETTING = 0x120,
+ GENERATION_3_4_SETTING = 0x124,
+};
+
#define SPI_CTRL_REG_94XX 0xc800
#define SPI_ADDR_REG_94XX 0xc804
#define SPI_WR_DATA_REG_94XX 0xc808
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
index 1753a6fc42d..bcc408042ce 100644
--- a/drivers/scsi/mvsas/mv_chips.h
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -164,7 +164,6 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
{
u32 tmp;
- /* workaround for SATA R-ERR, to ignore phy glitch */
tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
tmp &= ~(1 << 9);
tmp |= (1 << 10);
@@ -179,23 +178,10 @@ static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
tmp |= 0x3fff;
mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
- /* workaround for WDTIMEOUT , set to 550 ms */
mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
/* not to halt for different port op during wideport link change */
mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
-
- /* workaround for Seagate disk not-found OOB sequence, recv
- * COMINIT before sending out COMWAKE */
- tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
- tmp &= 0x0000ffff;
- tmp |= 0x00fa0000;
- mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
-
- tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
- tmp &= 0x1fffffff;
- tmp |= (2U << 29); /* 8 ms retry */
- mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
}
static inline void mvs_int_sata(struct mvs_info *mvi)
@@ -223,6 +209,9 @@ static inline void mvs_int_full(struct mvs_info *mvi)
mvs_int_port(mvi, i, tmp);
}
+ if (stat & CINT_NON_SPEC_NCQ_ERROR)
+ MVS_CHIP_DISP->non_spec_ncq_error(mvi);
+
if (stat & CINT_SRS)
mvs_int_sata(mvi);
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index bc00c940743..dec7cadb748 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -43,7 +43,6 @@ enum chip_flavors {
/* driver compile-time configuration */
enum driver_configuration {
- MVS_SLOTS = 512, /* command slots */
MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
/* software requires power-of-2
@@ -56,8 +55,7 @@ enum driver_configuration {
MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
MVS_OAF_SZ = 64, /* Open address frame buffer size */
- MVS_QUEUE_SIZE = 32, /* Support Queue depth */
- MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */
+ MVS_QUEUE_SIZE = 64, /* Support Queue depth */
MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2,
};
@@ -144,6 +142,7 @@ enum hw_register_bits {
CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
CINT_MEM = (1U << 26), /* int mem parity err */
CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
+ CINT_NON_SPEC_NCQ_ERROR = (1U << 25), /* Non specific NCQ error */
CINT_SRS = (1U << 3), /* SRS event */
CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
CINT_DONE = (1U << 0), /* cmd completion */
@@ -161,7 +160,7 @@ enum hw_register_bits {
TXQ_CMD_SSP = 1, /* SSP protocol */
TXQ_CMD_SMP = 2, /* SMP protocol */
TXQ_CMD_STP = 3, /* STP/SATA protocol */
- TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
+ TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP target free list */
TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
TXQ_MODE_TARGET = 0,
@@ -391,15 +390,15 @@ enum sas_cmd_port_registers {
};
enum mvs_info_flags {
- MVF_MSI = (1U << 0), /* MSI is enabled */
MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */
};
enum mvs_event_flags {
- PHY_PLUG_EVENT = (3U),
+ PHY_PLUG_EVENT = (3U),
PHY_PLUG_IN = (1U << 0), /* phy plug in */
PHY_PLUG_OUT = (1U << 1), /* phy plug out */
+ EXP_BRCT_CHG = (1U << 2), /* broadcast change */
};
enum mvs_port_type {
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 90b636611cd..4e9af66fd1d 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -34,22 +34,25 @@ MODULE_PARM_DESC(collector, "\n"
"\tThe mvsas SAS LLDD supports both modes.\n"
"\tDefault: 1 (Direct Mode).\n");
+int interrupt_coalescing = 0x80;
+
static struct scsi_transport_template *mvs_stt;
struct kmem_cache *mvs_task_list_cache;
static const struct mvs_chip_info mvs_chips[] = {
- [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
- [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
- [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
- [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
- [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
- [chip_9445] = { 1, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, },
- [chip_9485] = { 2, 4, 0x800, 17, 64, 11, &mvs_94xx_dispatch, },
- [chip_1300] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, },
- [chip_1320] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, },
+ [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
+ [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
+ [chip_6485] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch, },
+ [chip_9180] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
+ [chip_9480] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
+ [chip_9445] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
+ [chip_9485] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch, },
+ [chip_1300] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
+ [chip_1320] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch, },
};
+struct device_attribute *mvst_host_attrs[];
+
#define SOC_SAS_NUM 2
-#define SG_MX 64
static struct scsi_host_template mvs_sht = {
.module = THIS_MODULE,
@@ -66,7 +69,7 @@ static struct scsi_host_template mvs_sht = {
.can_queue = 1,
.cmd_per_lun = 1,
.this_id = -1,
- .sg_tablesize = SG_MX,
+ .sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
@@ -74,6 +77,7 @@ static struct scsi_host_template mvs_sht = {
.slave_alloc = mvs_slave_alloc,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+ .shost_attrs = mvst_host_attrs,
};
static struct sas_domain_function_template mvs_transport_ops = {
@@ -100,6 +104,7 @@ static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
phy->mvi = mvi;
+ phy->port = NULL;
init_timer(&phy->timer);
sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
@@ -128,7 +133,7 @@ static void mvs_free(struct mvs_info *mvi)
if (mvi->flags & MVF_FLAG_SOC)
slot_nr = MVS_SOC_SLOTS;
else
- slot_nr = MVS_SLOTS;
+ slot_nr = MVS_CHIP_SLOT_SZ;
if (mvi->dma_pool)
pci_pool_destroy(mvi->dma_pool);
@@ -148,25 +153,26 @@ static void mvs_free(struct mvs_info *mvi)
dma_free_coherent(mvi->dev,
sizeof(*mvi->slot) * slot_nr,
mvi->slot, mvi->slot_dma);
-#ifndef DISABLE_HOTPLUG_DMA_FIX
+
if (mvi->bulk_buffer)
dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
mvi->bulk_buffer, mvi->bulk_buffer_dma);
-#endif
+ if (mvi->bulk_buffer1)
+ dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
+ mvi->bulk_buffer1, mvi->bulk_buffer_dma1);
MVS_CHIP_DISP->chip_iounmap(mvi);
if (mvi->shost)
scsi_host_put(mvi->shost);
list_for_each_entry(mwq, &mvi->wq_list, entry)
cancel_delayed_work(&mwq->work_q);
+ kfree(mvi->tags);
kfree(mvi);
}
-#ifdef MVS_USE_TASKLET
-struct tasklet_struct mv_tasklet;
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
static void mvs_tasklet(unsigned long opaque)
{
- unsigned long flags;
u32 stat;
u16 core_nr, i = 0;
@@ -179,35 +185,49 @@ static void mvs_tasklet(unsigned long opaque)
if (unlikely(!mvi))
BUG_ON(1);
+ stat = MVS_CHIP_DISP->isr_status(mvi, mvi->pdev->irq);
+ if (!stat)
+ goto out;
+
for (i = 0; i < core_nr; i++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
- stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
- if (stat)
- MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
+ MVS_CHIP_DISP->isr(mvi, mvi->pdev->irq, stat);
}
+out:
+ MVS_CHIP_DISP->interrupt_enable(mvi);
}
#endif
static irqreturn_t mvs_interrupt(int irq, void *opaque)
{
- u32 core_nr, i = 0;
+ u32 core_nr;
u32 stat;
struct mvs_info *mvi;
struct sas_ha_struct *sha = opaque;
+#ifndef CONFIG_SCSI_MVSAS_TASKLET
+ u32 i;
+#endif
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
if (unlikely(!mvi))
return IRQ_NONE;
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ MVS_CHIP_DISP->interrupt_disable(mvi);
+#endif
stat = MVS_CHIP_DISP->isr_status(mvi, irq);
- if (!stat)
+ if (!stat) {
+ #ifdef CONFIG_SCSI_MVSAS_TASKLET
+ MVS_CHIP_DISP->interrupt_enable(mvi);
+ #endif
return IRQ_NONE;
+ }
-#ifdef MVS_USE_TASKLET
- tasklet_schedule(&mv_tasklet);
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ tasklet_schedule(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
#else
for (i = 0; i < core_nr; i++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
@@ -225,7 +245,7 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
if (mvi->flags & MVF_FLAG_SOC)
slot_nr = MVS_SOC_SLOTS;
else
- slot_nr = MVS_SLOTS;
+ slot_nr = MVS_CHIP_SLOT_SZ;
spin_lock_init(&mvi->lock);
for (i = 0; i < mvi->chip->n_phy; i++) {
@@ -273,13 +293,18 @@ static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
goto err_out;
memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
-#ifndef DISABLE_HOTPLUG_DMA_FIX
mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
TRASH_BUCKET_SIZE,
&mvi->bulk_buffer_dma, GFP_KERNEL);
if (!mvi->bulk_buffer)
goto err_out;
-#endif
+
+ mvi->bulk_buffer1 = dma_alloc_coherent(mvi->dev,
+ TRASH_BUCKET_SIZE,
+ &mvi->bulk_buffer_dma1, GFP_KERNEL);
+ if (!mvi->bulk_buffer1)
+ goto err_out;
+
sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 16, 0);
if (!mvi->dma_pool) {
@@ -354,11 +379,12 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct Scsi_Host *shost, unsigned int id)
{
- struct mvs_info *mvi;
+ struct mvs_info *mvi = NULL;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
- mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
- GFP_KERNEL);
+ mvi = kzalloc(sizeof(*mvi) +
+ (1L << mvs_chips[ent->driver_data].slot_width) *
+ sizeof(struct mvs_slot_info), GFP_KERNEL);
if (!mvi)
return NULL;
@@ -367,7 +393,6 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
mvi->chip_id = ent->driver_data;
mvi->chip = &mvs_chips[mvi->chip_id];
INIT_LIST_HEAD(&mvi->wq_list);
- mvi->irq = pdev->irq;
((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
@@ -375,9 +400,10 @@ static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
mvi->id = id;
mvi->sas = sha;
mvi->shost = shost;
-#ifdef MVS_USE_TASKLET
- tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
-#endif
+
+ mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL);
+ if (!mvi->tags)
+ goto err_out;
if (MVS_CHIP_DISP->chip_ioremap(mvi))
goto err_out;
@@ -388,7 +414,6 @@ err_out:
return NULL;
}
-/* move to PCI layer or libata core? */
static int pci_go_64(struct pci_dev *pdev)
{
int rc;
@@ -450,7 +475,7 @@ static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
shost->transportt = mvs_stt;
- shost->max_id = 128;
+ shost->max_id = MVS_MAX_DEVICES;
shost->max_lun = ~0;
shost->max_channel = 1;
shost->max_cmd_len = 16;
@@ -493,11 +518,12 @@ static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
if (mvi->flags & MVF_FLAG_SOC)
can_queue = MVS_SOC_CAN_QUEUE;
else
- can_queue = MVS_CAN_QUEUE;
+ can_queue = MVS_CHIP_SLOT_SZ;
sha->lldd_queue_size = can_queue;
+ shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
shost->can_queue = can_queue;
- mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
+ mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
sha->core.shost = mvi->shost;
}
@@ -518,6 +544,7 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
{
unsigned int rc, nhost = 0;
struct mvs_info *mvi;
+ struct mvs_prv_info *mpi;
irq_handler_t irq_handler = mvs_interrupt;
struct Scsi_Host *shost = NULL;
const struct mvs_chip_info *chip;
@@ -569,6 +596,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
goto err_out_regions;
}
+ memset(&mvi->hba_info_param, 0xFF,
+ sizeof(struct hba_info_page));
+
mvs_init_sas_add(mvi);
mvi->instance = nhost;
@@ -579,8 +609,9 @@ static int __devinit mvs_pci_init(struct pci_dev *pdev,
}
nhost++;
} while (nhost < chip->n_host);
-#ifdef MVS_USE_TASKLET
- tasklet_init(&mv_tasklet, mvs_tasklet,
+ mpi = (struct mvs_prv_info *)(SHOST_TO_SAS_HA(shost)->lldd_ha);
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ tasklet_init(&(mpi->mv_tasklet), mvs_tasklet,
(unsigned long)SHOST_TO_SAS_HA(shost));
#endif
@@ -625,8 +656,8 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
-#ifdef MVS_USE_TASKLET
- tasklet_kill(&mv_tasklet);
+#ifdef CONFIG_SCSI_MVSAS_TASKLET
+ tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);
#endif
pci_set_drvdata(pdev, NULL);
@@ -635,7 +666,7 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
scsi_remove_host(mvi->shost);
MVS_CHIP_DISP->interrupt_disable(mvi);
- free_irq(mvi->irq, sha);
+ free_irq(mvi->pdev->irq, sha);
for (i = 0; i < core_nr; i++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
mvs_free(mvi);
@@ -703,6 +734,70 @@ static struct pci_driver mvs_pci_driver = {
.remove = __devexit_p(mvs_pci_remove),
};
+static ssize_t
+mvs_show_driver_version(struct device *cdev,
+ struct device_attribute *attr, char *buffer)
+{
+ return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION);
+}
+
+static DEVICE_ATTR(driver_version,
+ S_IRUGO,
+ mvs_show_driver_version,
+ NULL);
+
+static ssize_t
+mvs_store_interrupt_coalescing(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buffer, size_t size)
+{
+ int val = 0;
+ struct mvs_info *mvi = NULL;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ u8 i, core_nr;
+ if (buffer == NULL)
+ return size;
+
+ if (sscanf(buffer, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val >= 0x10000) {
+ mv_dprintk("interrupt coalescing timer %d us is"
+ "too long\n", val);
+ return strlen(buffer);
+ }
+
+ interrupt_coalescing = val;
+
+ core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+ if (unlikely(!mvi))
+ return -EINVAL;
+
+ for (i = 0; i < core_nr; i++) {
+ mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+ if (MVS_CHIP_DISP->tune_interrupt)
+ MVS_CHIP_DISP->tune_interrupt(mvi,
+ interrupt_coalescing);
+ }
+ mv_dprintk("set interrupt coalescing time to %d us\n",
+ interrupt_coalescing);
+ return strlen(buffer);
+}
+
+static ssize_t mvs_show_interrupt_coalescing(struct device *cdev,
+ struct device_attribute *attr, char *buffer)
+{
+ return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing);
+}
+
+static DEVICE_ATTR(interrupt_coalescing,
+ S_IRUGO|S_IWUSR,
+ mvs_show_interrupt_coalescing,
+ mvs_store_interrupt_coalescing);
+
/* task handler */
struct task_struct *mvs_th;
static int __init mvs_init(void)
@@ -739,6 +834,12 @@ static void __exit mvs_exit(void)
kmem_cache_destroy(mvs_task_list_cache);
}
+struct device_attribute *mvst_host_attrs[] = {
+ &dev_attr_driver_version,
+ &dev_attr_interrupt_coalescing,
+ NULL,
+};
+
module_init(mvs_init);
module_exit(mvs_exit);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 0ef27425c44..4958fefff36 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -38,7 +38,7 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
{
- void *bitmap = &mvi->tags;
+ void *bitmap = mvi->tags;
clear_bit(tag, bitmap);
}
@@ -49,14 +49,14 @@ void mvs_tag_free(struct mvs_info *mvi, u32 tag)
void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
{
- void *bitmap = &mvi->tags;
+ void *bitmap = mvi->tags;
set_bit(tag, bitmap);
}
inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
{
unsigned int index, tag;
- void *bitmap = &mvi->tags;
+ void *bitmap = mvi->tags;
index = find_first_zero_bit(bitmap, mvi->tags_num);
tag = index;
@@ -74,126 +74,6 @@ void mvs_tag_init(struct mvs_info *mvi)
mvs_tag_clear(mvi, i);
}
-void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
-{
- u32 i;
- u32 run;
- u32 offset;
-
- offset = 0;
- while (size) {
- printk(KERN_DEBUG"%08X : ", baseaddr + offset);
- if (size >= 16)
- run = 16;
- else
- run = size;
- size -= run;
- for (i = 0; i < 16; i++) {
- if (i < run)
- printk(KERN_DEBUG"%02X ", (u32)data[i]);
- else
- printk(KERN_DEBUG" ");
- }
- printk(KERN_DEBUG": ");
- for (i = 0; i < run; i++)
- printk(KERN_DEBUG"%c",
- isalnum(data[i]) ? data[i] : '.');
- printk(KERN_DEBUG"\n");
- data = &data[16];
- offset += run;
- }
- printk(KERN_DEBUG"\n");
-}
-
-#if (_MV_DUMP > 1)
-static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
- enum sas_protocol proto)
-{
- u32 offset;
- struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
- offset = slot->cmd_size + MVS_OAF_SZ +
- MVS_CHIP_DISP->prd_size() * slot->n_elem;
- dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
- tag);
- mvs_hexdump(32, (u8 *) slot->response,
- (u32) slot->buf_dma + offset);
-}
-#endif
-
-static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
- enum sas_protocol proto)
-{
-#if (_MV_DUMP > 1)
- u32 sz, w_ptr;
- u64 addr;
- struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
- /*Delivery Queue */
- sz = MVS_CHIP_SLOT_SZ;
- w_ptr = slot->tx;
- addr = mvi->tx_dma;
- dev_printk(KERN_DEBUG, mvi->dev,
- "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
- dev_printk(KERN_DEBUG, mvi->dev,
- "Delivery Queue Base Address=0x%llX (PA)"
- "(tx_dma=0x%llX), Entry=%04d\n",
- addr, (unsigned long long)mvi->tx_dma, w_ptr);
- mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
- (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
- /*Command List */
- addr = mvi->slot_dma;
- dev_printk(KERN_DEBUG, mvi->dev,
- "Command List Base Address=0x%llX (PA)"
- "(slot_dma=0x%llX), Header=%03d\n",
- addr, (unsigned long long)slot->buf_dma, tag);
- dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
- /*mvs_cmd_hdr */
- mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
- (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
- /*1.command table area */
- dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
- mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
- /*2.open address frame area */
- dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
- mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
- (u32) slot->buf_dma + slot->cmd_size);
- /*3.status buffer */
- mvs_hba_sb_dump(mvi, tag, proto);
- /*4.PRD table */
- dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
- mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
- (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
- (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
-#endif
-}
-
-static void mvs_hba_cq_dump(struct mvs_info *mvi)
-{
-#if (_MV_DUMP > 2)
- u64 addr;
- void __iomem *regs = mvi->regs;
- u32 entry = mvi->rx_cons + 1;
- u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
-
- /*Completion Queue */
- addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
- dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
- mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
- dev_printk(KERN_DEBUG, mvi->dev,
- "Completion List Base Address=0x%llX (PA), "
- "CQ_Entry=%04d, CQ_WP=0x%08X\n",
- addr, entry - 1, mvi->rx[0]);
- mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
- mvi->rx_dma + sizeof(u32) * entry);
-#endif
-}
-
-void mvs_get_sas_addr(void *buf, u32 buflen)
-{
- /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
-}
-
struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
{
unsigned long i = 0, j = 0, hi = 0;
@@ -222,7 +102,6 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
}
-/* FIXME */
int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
{
unsigned long i = 0, j = 0, n = 0, num = 0;
@@ -253,6 +132,20 @@ int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
return num;
}
+struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
+ u8 reg_set)
+{
+ u32 dev_no;
+ for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
+ if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
+ continue;
+
+ if (mvi->devices[dev_no].taskfileset == reg_set)
+ return &mvi->devices[dev_no];
+ }
+ return NULL;
+}
+
static inline void mvs_free_reg_set(struct mvs_info *mvi,
struct mvs_device *dev)
{
@@ -283,7 +176,6 @@ void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
}
}
-/* FIXME: locking? */
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata)
{
@@ -309,12 +201,12 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
if (tmp & PHY_RST_HARD)
break;
- MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
break;
case PHY_FUNC_LINK_RESET:
MVS_CHIP_DISP->phy_enable(mvi, phy_id);
- MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
break;
case PHY_FUNC_DISABLE:
@@ -406,14 +298,10 @@ int mvs_slave_configure(struct scsi_device *sdev)
if (ret)
return ret;
- if (dev_is_sata(dev)) {
- /* may set PIO mode */
- #if MV_DISABLE_NCQ
- struct ata_port *ap = dev->sata_dev.ap;
- struct ata_device *adev = ap->link.device;
- adev->flags |= ATA_DFLAG_NCQ_OFF;
- scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
- #endif
+ if (!dev_is_sata(dev)) {
+ sas_change_queue_depth(sdev,
+ MVS_QUEUE_SIZE,
+ SCSI_QDEPTH_DEFAULT);
}
return 0;
}
@@ -424,6 +312,7 @@ void mvs_scan_start(struct Scsi_Host *shost)
unsigned short core_nr;
struct mvs_info *mvi;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct mvs_prv_info *mvs_prv = sha->lldd_ha;
core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
@@ -432,15 +321,17 @@ void mvs_scan_start(struct Scsi_Host *shost)
for (i = 0; i < mvi->chip->n_phy; ++i)
mvs_bytes_dmaed(mvi, i);
}
+ mvs_prv->scan_finished = 1;
}
int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
- /* give the phy enabling interrupt event time to come in (1s
- * is empirically about all it takes) */
- if (time < HZ)
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ struct mvs_prv_info *mvs_prv = sha->lldd_ha;
+
+ if (mvs_prv->scan_finished == 0)
return 0;
- /* Wait for discovery to finish */
+
scsi_flush_work(shost);
return 1;
}
@@ -461,10 +352,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
void *buf_prd;
struct mvs_slot_info *slot = &mvi->slot_info[tag];
u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-#if _MV_DUMP
- u8 *buf_cmd;
- void *from;
-#endif
+
/*
* DMA-map SMP request, response buffers
*/
@@ -496,15 +384,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
buf_tmp = slot->buf;
buf_tmp_dma = slot->buf_dma;
-#if _MV_DUMP
- buf_cmd = buf_tmp;
- hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
- buf_tmp += req_len;
- buf_tmp_dma += req_len;
- slot->cmd_size = req_len;
-#else
hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
-#endif
/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
buf_oaf = buf_tmp;
@@ -553,12 +433,6 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
/* fill in PRD (scatter/gather) table, if any */
MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
-#if _MV_DUMP
- /* copy cmd table */
- from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
- memcpy(buf_cmd, from + sg_req->offset, req_len);
- kunmap_atomic(from, KM_IRQ0);
-#endif
return 0;
err_out_2:
@@ -616,14 +490,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
-#ifndef DISABLE_HOTPLUG_DMA_FIX
if (task->data_dir == DMA_FROM_DEVICE)
flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
else
flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-#else
- flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-#endif
+
if (task->ata_task.use_ncq)
flags |= MCH_FPDMA;
if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
@@ -631,11 +502,8 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
flags |= MCH_ATAPI;
}
- /* FIXME: fill in port multiplier number */
-
hdr->flags = cpu_to_le32(flags);
- /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
else
@@ -657,9 +525,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
buf_tmp += MVS_ATA_CMD_SZ;
buf_tmp_dma += MVS_ATA_CMD_SZ;
-#if _MV_DUMP
- slot->cmd_size = MVS_ATA_CMD_SZ;
-#endif
/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
/* used for STP. unused for SATA? */
@@ -682,9 +547,6 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
buf_tmp_dma += i;
/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
- /* FIXME: probably unused, for SATA. kept here just in case
- * we get a STP/SATA error information record
- */
slot->response = buf_tmp;
hdr->status_buf = cpu_to_le64(buf_tmp_dma);
if (mvi->flags & MVF_FLAG_SOC)
@@ -715,11 +577,11 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
/* fill in PRD (scatter/gather) table, if any */
MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
-#ifndef DISABLE_HOTPLUG_DMA_FIX
+
if (task->data_dir == DMA_FROM_DEVICE)
- MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
+ MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
-#endif
+
return 0;
}
@@ -761,6 +623,9 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
}
if (is_tmf)
flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
+ else
+ flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
+
hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
hdr->tags = cpu_to_le32(tag);
hdr->data_len = cpu_to_le32(task->total_xfer_len);
@@ -777,9 +642,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
buf_tmp += MVS_SSP_CMD_SZ;
buf_tmp_dma += MVS_SSP_CMD_SZ;
-#if _MV_DUMP
- slot->cmd_size = MVS_SSP_CMD_SZ;
-#endif
/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
buf_oaf = buf_tmp;
@@ -986,7 +848,6 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock(&task->task_state_lock);
- mvs_hba_memory_dump(mvi, tag, task->task_proto);
mvi_dev->running_req++;
++(*pass);
mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
@@ -1189,9 +1050,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
mvs_slot_free(mvi, slot_idx);
}
-static void mvs_update_wideport(struct mvs_info *mvi, int i)
+static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
{
- struct mvs_phy *phy = &mvi->phy[i];
+ struct mvs_phy *phy = &mvi->phy[phy_no];
struct mvs_port *port = phy->port;
int j, no;
@@ -1246,18 +1107,17 @@ static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
return NULL;
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
- s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+ s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
- s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+ s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
- s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+ s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
- s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+ s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
- /* Workaround: take some ATAPI devices for ATA */
if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
@@ -1269,6 +1129,13 @@ static u32 mvs_is_sig_fis_received(u32 irq_status)
return irq_status & PHYEV_SIG_FIS;
}
+static void mvs_sig_remove_timer(struct mvs_phy *phy)
+{
+ if (phy->timer.function)
+ del_timer(&phy->timer);
+ phy->timer.function = NULL;
+}
+
void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
{
struct mvs_phy *phy = &mvi->phy[i];
@@ -1291,6 +1158,7 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
if (phy->phy_type & PORT_TYPE_SATA) {
phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
if (mvs_is_sig_fis_received(phy->irq_status)) {
+ mvs_sig_remove_timer(phy);
phy->phy_attached = 1;
phy->att_dev_sas_addr =
i + mvi->id * mvi->chip->n_phy;
@@ -1308,7 +1176,6 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
tmp | PHYEV_SIG_FIS);
phy->phy_attached = 0;
phy->phy_type &= ~PORT_TYPE_SATA;
- MVS_CHIP_DISP->phy_reset(mvi, i, 0);
goto out_done;
}
} else if (phy->phy_type & PORT_TYPE_SAS
@@ -1334,9 +1201,9 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
if (MVS_CHIP_DISP->phy_work_around)
MVS_CHIP_DISP->phy_work_around(mvi, i);
}
- mv_dprintk("port %d attach dev info is %x\n",
+ mv_dprintk("phy %d attach dev info is %x\n",
i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
- mv_dprintk("port %d attach sas addr is %llx\n",
+ mv_dprintk("phy %d attach sas addr is %llx\n",
i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
out_done:
if (get_st)
@@ -1361,10 +1228,10 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
}
hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
- if (sas_port->id >= mvi->chip->n_phy)
- port = &mvi->port[sas_port->id - mvi->chip->n_phy];
+ if (i >= mvi->chip->n_phy)
+ port = &mvi->port[i - mvi->chip->n_phy];
else
- port = &mvi->port[sas_port->id];
+ port = &mvi->port[i];
if (lock)
spin_lock_irqsave(&mvi->lock, flags);
port->port_attached = 1;
@@ -1393,7 +1260,7 @@ static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
return;
}
list_for_each_entry(dev, &port->dev_list, dev_list_node)
- mvs_do_release_task(phy->mvi, phy_no, NULL);
+ mvs_do_release_task(phy->mvi, phy_no, dev);
}
@@ -1457,6 +1324,7 @@ int mvs_dev_found_notify(struct domain_device *dev, int lock)
mvi_device->dev_status = MVS_DEV_NORMAL;
mvi_device->dev_type = dev->dev_type;
mvi_device->mvi_info = mvi;
+ mvi_device->sas_device = dev;
if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
int phy_id;
u8 phy_num = parent_dev->ex_dev.num_phys;
@@ -1508,6 +1376,7 @@ void mvs_dev_gone_notify(struct domain_device *dev)
mv_dprintk("found dev has gone.\n");
}
dev->lldd_dev = NULL;
+ mvi_dev->sas_device = NULL;
spin_unlock_irqrestore(&mvi->lock, flags);
}
@@ -1555,7 +1424,6 @@ static void mvs_tmf_timedout(unsigned long data)
complete(&task->completion);
}
-/* XXX */
#define MVS_TASK_TIMEOUT 20
static int mvs_exec_internal_tmf_task(struct domain_device *dev,
void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
@@ -1588,7 +1456,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
}
wait_for_completion(&task->completion);
- res = -TMF_RESP_FUNC_FAILED;
+ res = TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
@@ -1638,11 +1506,10 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
u8 *lun, struct mvs_tmf_task *tmf)
{
struct sas_ssp_task ssp_task;
- DECLARE_COMPLETION_ONSTACK(completion);
if (!(dev->tproto & SAS_PROTOCOL_SSP))
return TMF_RESP_FUNC_ESUPP;
- strncpy((u8 *)&ssp_task.LUN, lun, 8);
+ memcpy(ssp_task.LUN, lun, 8);
return mvs_exec_internal_tmf_task(dev, &ssp_task,
sizeof(ssp_task), tmf);
@@ -1666,7 +1533,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
int mvs_lu_reset(struct domain_device *dev, u8 *lun)
{
unsigned long flags;
- int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
+ int rc = TMF_RESP_FUNC_FAILED;
struct mvs_tmf_task tmf_task;
struct mvs_device * mvi_dev = dev->lldd_dev;
struct mvs_info *mvi = mvi_dev->mvi_info;
@@ -1675,10 +1542,8 @@ int mvs_lu_reset(struct domain_device *dev, u8 *lun)
mvi_dev->dev_status = MVS_DEV_EH;
rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
if (rc == TMF_RESP_FUNC_COMPLETE) {
- num = mvs_find_dev_phyno(dev, phyno);
spin_lock_irqsave(&mvi->lock, flags);
- for (i = 0; i < num; i++)
- mvs_release_task(mvi, dev);
+ mvs_release_task(mvi, dev);
spin_unlock_irqrestore(&mvi->lock, flags);
}
/* If failed, fall-through I_T_Nexus reset */
@@ -1696,11 +1561,12 @@ int mvs_I_T_nexus_reset(struct domain_device *dev)
if (mvi_dev->dev_status != MVS_DEV_EH)
return TMF_RESP_FUNC_COMPLETE;
+ else
+ mvi_dev->dev_status = MVS_DEV_NORMAL;
rc = mvs_debug_I_T_nexus_reset(dev);
mv_printk("%s for device[%x]:rc= %d\n",
__func__, mvi_dev->device_id, rc);
- /* housekeeper */
spin_lock_irqsave(&mvi->lock, flags);
mvs_release_task(mvi, dev);
spin_unlock_irqrestore(&mvi->lock, flags);
@@ -1739,9 +1605,6 @@ int mvs_query_task(struct sas_task *task)
case TMF_RESP_FUNC_FAILED:
case TMF_RESP_FUNC_COMPLETE:
break;
- default:
- rc = TMF_RESP_FUNC_COMPLETE;
- break;
}
}
mv_printk("%s:rc= %d\n", __func__, rc);
@@ -1761,8 +1624,8 @@ int mvs_abort_task(struct sas_task *task)
u32 tag;
if (!mvi_dev) {
- mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__);
- rc = TMF_RESP_FUNC_FAILED;
+ mv_printk("Device has removed\n");
+ return TMF_RESP_FUNC_FAILED;
}
mvi = mvi_dev->mvi_info;
@@ -1807,25 +1670,17 @@ int mvs_abort_task(struct sas_task *task)
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
- /* to do free register_set */
if (SATA_DEV == dev->dev_type) {
struct mvs_slot_info *slot = task->lldd_task;
- struct task_status_struct *tstat;
u32 slot_idx = (u32)(slot - mvi->slot_info);
- tstat = &task->task_status;
- mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
+ mv_dprintk("mvs_abort_task() mvi=%p task=%p "
"slot=%p slot_idx=x%x\n",
mvi, task, slot, slot_idx);
- tstat->stat = SAS_ABORTED_TASK;
- if (mvi_dev && mvi_dev->running_req)
- mvi_dev->running_req--;
- if (sas_protocol_ata(task->task_proto))
- mvs_free_reg_set(mvi, mvi_dev);
+ mvs_tmf_timedout((unsigned long)task);
mvs_slot_task_free(mvi, task, slot, slot_idx);
- return -1;
+ rc = TMF_RESP_FUNC_COMPLETE;
+ goto out;
}
- } else {
- /* SMP */
}
out:
@@ -1891,12 +1746,63 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
return stat;
}
+void mvs_set_sense(u8 *buffer, int len, int d_sense,
+ int key, int asc, int ascq)
+{
+ memset(buffer, 0, len);
+
+ if (d_sense) {
+ /* Descriptor format */
+ if (len < 4) {
+ mv_printk("Length %d of sense buffer too small to "
+ "fit sense %x:%x:%x", len, key, asc, ascq);
+ }
+
+ buffer[0] = 0x72; /* Response Code */
+ if (len > 1)
+ buffer[1] = key; /* Sense Key */
+ if (len > 2)
+ buffer[2] = asc; /* ASC */
+ if (len > 3)
+ buffer[3] = ascq; /* ASCQ */
+ } else {
+ if (len < 14) {
+ mv_printk("Length %d of sense buffer too small to "
+ "fit sense %x:%x:%x", len, key, asc, ascq);
+ }
+
+ buffer[0] = 0x70; /* Response Code */
+ if (len > 2)
+ buffer[2] = key; /* Sense Key */
+ if (len > 7)
+ buffer[7] = 0x0a; /* Additional Sense Length */
+ if (len > 12)
+ buffer[12] = asc; /* ASC */
+ if (len > 13)
+ buffer[13] = ascq; /* ASCQ */
+ }
+
+ return;
+}
+
+void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
+ u8 key, u8 asc, u8 asc_q)
+{
+ iu->datapres = 2;
+ iu->response_data_len = 0;
+ iu->sense_data_len = 17;
+ iu->status = 02;
+ mvs_set_sense(iu->sense_data, 17, 0,
+ key, asc, asc_q);
+}
+
static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
u32 slot_idx)
{
struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
int stat;
- u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
+ u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
+ u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
u32 tfs = 0;
enum mvs_port_type type = PORT_TYPE_SAS;
@@ -1908,8 +1814,19 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
stat = SAM_STAT_CHECK_CONDITION;
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
+ {
stat = SAS_ABORTED_TASK;
+ if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
+ struct ssp_response_iu *iu = slot->response +
+ sizeof(struct mvs_err_info);
+ mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
+ sas_ssp_task_response(mvi->dev, task, iu);
+ stat = SAM_STAT_CHECK_CONDITION;
+ }
+ if (err_dw1 & bit(31))
+ mv_printk("reuse same slot, retry command.\n");
break;
+ }
case SAS_PROTOCOL_SMP:
stat = SAM_STAT_CHECK_CONDITION;
break;
@@ -1918,10 +1835,8 @@ static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
- if (err_dw0 == 0x80400002)
- mv_printk("find reserved error, why?\n");
-
task->ata_task.use_ncq = 0;
+ stat = SAS_PROTO_RESPONSE;
mvs_sata_done(mvi, task, slot_idx, err_dw0);
}
break;
@@ -1945,8 +1860,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
void *to;
enum exec_status sts;
- if (mvi->exp_req)
- mvi->exp_req--;
if (unlikely(!task || !task->lldd_task || !task->dev))
return -1;
@@ -1954,8 +1867,6 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
dev = task->dev;
mvi_dev = dev->lldd_dev;
- mvs_hba_cq_dump(mvi);
-
spin_lock(&task->task_state_lock);
task->task_state_flags &=
~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
@@ -1978,6 +1889,7 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
return -1;
}
+ /* when no device attaching, go ahead and complete by error handling*/
if (unlikely(!mvi_dev || flags)) {
if (!mvi_dev)
mv_dprintk("port has not device.\n");
@@ -1987,6 +1899,9 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
/* error info record present */
if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
+ mv_dprintk("port %d slot %d rx_desc %X has error info"
+ "%016llX.\n", slot->port->sas_port.id, slot_idx,
+ rx_desc, (u64)(*(u64 *)slot->response));
tstat->stat = mvs_slot_err(mvi, task, slot_idx);
tstat->resp = SAS_TASK_COMPLETE;
goto out;
@@ -2048,8 +1963,7 @@ out:
spin_unlock(&mvi->lock);
if (task->task_done)
task->task_done(task);
- else
- mv_dprintk("why has not task_done.\n");
+
spin_lock(&mvi->lock);
return sts;
@@ -2092,7 +2006,6 @@ void mvs_release_task(struct mvs_info *mvi,
struct domain_device *dev)
{
int i, phyno[WIDE_PORT_MAX_PHY], num;
- /* housekeeper */
num = mvs_find_dev_phyno(dev, phyno);
for (i = 0; i < num; i++)
mvs_do_release_task(mvi, phyno[i], dev);
@@ -2111,13 +2024,13 @@ static void mvs_work_queue(struct work_struct *work)
struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
struct mvs_info *mvi = mwq->mvi;
unsigned long flags;
+ u32 phy_no = (unsigned long) mwq->data;
+ struct sas_ha_struct *sas_ha = mvi->sas;
+ struct mvs_phy *phy = &mvi->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
spin_lock_irqsave(&mvi->lock, flags);
if (mwq->handler & PHY_PLUG_EVENT) {
- u32 phy_no = (unsigned long) mwq->data;
- struct sas_ha_struct *sas_ha = mvi->sas;
- struct mvs_phy *phy = &mvi->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
if (phy->phy_event & PHY_PLUG_OUT) {
u32 tmp;
@@ -2139,6 +2052,11 @@ static void mvs_work_queue(struct work_struct *work)
mv_dprintk("phy%d Attached Device\n", phy_no);
}
}
+ } else if (mwq->handler & EXP_BRCT_CHG) {
+ phy->phy_event &= ~EXP_BRCT_CHG;
+ sas_ha->notify_port_event(sas_phy,
+ PORTE_BROADCAST_RCVD);
+ mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
}
list_del(&mwq->entry);
spin_unlock_irqrestore(&mvi->lock, flags);
@@ -2174,29 +2092,21 @@ static void mvs_sig_time_out(unsigned long tphy)
if (&mvi->phy[phy_no] == phy) {
mv_dprintk("Get signature time out, reset phy %d\n",
phy_no+mvi->id*mvi->chip->n_phy);
- MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
}
}
}
-static void mvs_sig_remove_timer(struct mvs_phy *phy)
-{
- if (phy->timer.function)
- del_timer(&phy->timer);
- phy->timer.function = NULL;
-}
-
void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
{
u32 tmp;
- struct sas_ha_struct *sas_ha = mvi->sas;
struct mvs_phy *phy = &mvi->phy[phy_no];
- struct asd_sas_phy *sas_phy = &phy->sas_phy;
phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
- mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
+ MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
+ mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
- mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
+ mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
phy->irq_status);
/*
@@ -2205,11 +2115,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
*/
if (phy->irq_status & PHYEV_DCDR_ERR) {
- mv_dprintk("port %d STP decoding error.\n",
+ mv_dprintk("phy %d STP decoding error.\n",
phy_no + mvi->id*mvi->chip->n_phy);
}
if (phy->irq_status & PHYEV_POOF) {
+ mdelay(500);
if (!(phy->phy_event & PHY_PLUG_OUT)) {
int dev_sata = phy->phy_type & PORT_TYPE_SATA;
int ready;
@@ -2220,17 +2131,13 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
(void *)(unsigned long)phy_no,
PHY_PLUG_EVENT);
ready = mvs_is_phy_ready(mvi, phy_no);
- if (!ready)
- mv_dprintk("phy%d Unplug Notice\n",
- phy_no +
- mvi->id * mvi->chip->n_phy);
if (ready || dev_sata) {
if (MVS_CHIP_DISP->stp_reset)
MVS_CHIP_DISP->stp_reset(mvi,
phy_no);
else
MVS_CHIP_DISP->phy_reset(mvi,
- phy_no, 0);
+ phy_no, MVS_SOFT_RESET);
return;
}
}
@@ -2243,13 +2150,12 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
if (phy->timer.function == NULL) {
phy->timer.data = (unsigned long)phy;
phy->timer.function = mvs_sig_time_out;
- phy->timer.expires = jiffies + 10*HZ;
+ phy->timer.expires = jiffies + 5*HZ;
add_timer(&phy->timer);
}
}
if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
- mvs_sig_remove_timer(phy);
mv_dprintk("notify plug in on phy[%d]\n", phy_no);
if (phy->phy_status) {
mdelay(10);
@@ -2263,14 +2169,14 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
}
mvs_update_phyinfo(mvi, phy_no, 0);
if (phy->phy_type & PORT_TYPE_SAS) {
- MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2);
+ MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
mdelay(10);
}
mvs_bytes_dmaed(mvi, phy_no);
/* whether driver is going to handle hot plug */
if (phy->phy_event & PHY_PLUG_OUT) {
- mvs_port_notify_formed(sas_phy, 0);
+ mvs_port_notify_formed(&phy->sas_phy, 0);
phy->phy_event &= ~PHY_PLUG_OUT;
}
} else {
@@ -2278,13 +2184,11 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
phy_no + mvi->id*mvi->chip->n_phy);
}
} else if (phy->irq_status & PHYEV_BROAD_CH) {
- mv_dprintk("port %d broadcast change.\n",
+ mv_dprintk("phy %d broadcast change.\n",
phy_no + mvi->id*mvi->chip->n_phy);
- /* exception for Samsung disk drive*/
- mdelay(1000);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
+ EXP_BRCT_CHG);
}
- MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
}
int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 1367d8b9350..44d7885a4a1 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -48,12 +48,8 @@
#define DRV_NAME "mvsas"
#define DRV_VERSION "0.8.2"
-#define _MV_DUMP 0
#define MVS_ID_NOT_MAPPED 0x7f
-/* #define DISABLE_HOTPLUG_DMA_FIX */
-// #define MAX_EXP_RUNNING_REQ 2
#define WIDE_PORT_MAX_PHY 4
-#define MV_DISABLE_NCQ 0
#define mv_printk(fmt, arg ...) \
printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
#ifdef MV_DEBUG
@@ -64,6 +60,7 @@
#endif
#define MV_MAX_U32 0xffffffff
+extern int interrupt_coalescing;
extern struct mvs_tgt_initiator mvs_tgt;
extern struct mvs_info *tgt_mvi;
extern const struct mvs_dispatch mvs_64xx_dispatch;
@@ -99,6 +96,11 @@ enum dev_status {
MVS_DEV_EH = 0x1,
};
+enum dev_reset {
+ MVS_SOFT_RESET = 0,
+ MVS_HARD_RESET = 1,
+ MVS_PHY_TUNE = 2,
+};
struct mvs_info;
@@ -130,7 +132,6 @@ struct mvs_dispatch {
u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
- void (*get_sas_addr)(void *buf, u32 buflen);
void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
void (*clear_srs_irq)(struct mvs_info *mvi, u8 reg_set, u8 clear_all);
void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
@@ -167,9 +168,10 @@ struct mvs_dispatch {
);
int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
-#ifndef DISABLE_HOTPLUG_DMA_FIX
- void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
-#endif
+ void (*dma_fix)(struct mvs_info *mvi, u32 phy_mask,
+ int buf_len, int from, void *prd);
+ void (*tune_interrupt)(struct mvs_info *mvi, u32 time);
+ void (*non_spec_ncq_error)(struct mvs_info *mvi);
};
@@ -179,9 +181,11 @@ struct mvs_chip_info {
u32 fis_offs;
u32 fis_count;
u32 srs_sz;
+ u32 sg_width;
u32 slot_width;
const struct mvs_dispatch *dispatch;
};
+#define MVS_MAX_SG (1U << mvi->chip->sg_width)
#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
#define MVS_RX_FISL_SZ \
(mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
@@ -249,6 +253,73 @@ struct mvs_device {
u16 reserved;
};
+/* Generate PHY tunning parameters */
+struct phy_tuning {
+ /* 1 bit, transmitter emphasis enable */
+ u8 trans_emp_en:1;
+ /* 4 bits, transmitter emphasis amplitude */
+ u8 trans_emp_amp:4;
+ /* 3 bits, reserved space */
+ u8 Reserved_2bit_1:3;
+ /* 5 bits, transmitter amplitude */
+ u8 trans_amp:5;
+ /* 2 bits, transmitter amplitude adjust */
+ u8 trans_amp_adj:2;
+ /* 1 bit, reserved space */
+ u8 resv_2bit_2:1;
+ /* 2 bytes, reserved space */
+ u8 reserved[2];
+};
+
+struct ffe_control {
+ /* 4 bits, FFE Capacitor Select (value range 0~F) */
+ u8 ffe_cap_sel:4;
+ /* 3 bits, FFE Resistor Select (value range 0~7) */
+ u8 ffe_rss_sel:3;
+ /* 1 bit reserve*/
+ u8 reserved:1;
+};
+
+/*
+ * HBA_Info_Page is saved in Flash/NVRAM, total 256 bytes.
+ * The data area is valid only Signature="MRVL".
+ * If any member fills with 0xFF, the member is invalid.
+ */
+struct hba_info_page {
+ /* Dword 0 */
+ /* 4 bytes, structure signature,should be "MRVL" at first initial */
+ u8 signature[4];
+
+ /* Dword 1-13 */
+ u32 reserved1[13];
+
+ /* Dword 14-29 */
+ /* 64 bytes, SAS address for each port */
+ u64 sas_addr[8];
+
+ /* Dword 30-31 */
+ /* 8 bytes for vanir 8 port PHY FFE seeting
+ * BIT 0~3 : FFE Capacitor select(value range 0~F)
+ * BIT 4~6 : FFE Resistor select(value range 0~7)
+ * BIT 7: reserve.
+ */
+
+ struct ffe_control ffe_ctl[8];
+ /* Dword 32 -43 */
+ u32 reserved2[12];
+
+ /* Dword 44-45 */
+ /* 8 bytes, 0: 1.5G, 1: 3.0G, should be 0x01 at first initial */
+ u8 phy_rate[8];
+
+ /* Dword 46-53 */
+ /* 32 bytes, PHY tuning parameters for each PHY*/
+ struct phy_tuning phy_tuning[8];
+
+ /* Dword 54-63 */
+ u32 reserved3[10];
+}; /* total 256 bytes */
+
struct mvs_slot_info {
struct list_head entry;
union {
@@ -264,9 +335,6 @@ struct mvs_slot_info {
*/
void *buf;
dma_addr_t buf_dma;
-#if _MV_DUMP
- u32 cmd_size;
-#endif
void *response;
struct mvs_port *port;
struct mvs_device *device;
@@ -320,12 +388,10 @@ struct mvs_info {
const struct mvs_chip_info *chip;
int tags_num;
- DECLARE_BITMAP(tags, MVS_SLOTS);
+ unsigned long *tags;
/* further per-slot information */
struct mvs_phy phy[MVS_MAX_PHYS];
struct mvs_port port[MVS_MAX_PHYS];
- u32 irq;
- u32 exp_req;
u32 id;
u64 sata_reg_set;
struct list_head *hba_list;
@@ -337,12 +403,13 @@ struct mvs_info {
u32 flashsectSize;
void *addon;
+ struct hba_info_page hba_info_param;
struct mvs_device devices[MVS_MAX_DEVICES];
-#ifndef DISABLE_HOTPLUG_DMA_FIX
void *bulk_buffer;
dma_addr_t bulk_buffer_dma;
+ void *bulk_buffer1;
+ dma_addr_t bulk_buffer_dma1;
#define TRASH_BUCKET_SIZE 0x20000
-#endif
void *dma_pool;
struct mvs_slot_info slot_info[0];
};
@@ -350,8 +417,10 @@ struct mvs_info {
struct mvs_prv_info{
u8 n_host;
u8 n_phy;
- u16 reserve;
+ u8 scan_finished;
+ u8 reserve;
struct mvs_info *mvi[2];
+ struct tasklet_struct mv_tasklet;
};
struct mvs_wq {
@@ -415,6 +484,6 @@ void mvs_do_release_task(struct mvs_info *mvi, int phy_no,
void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
-void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
+struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, u8 reg_set);
#endif
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index aa05e661d11..b97c8ab0c20 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -54,7 +54,7 @@
#include <scsi/libsas.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm8001"
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index fca6a895307..d079f9a3c6b 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3871,6 +3871,9 @@ static long pmcraid_ioctl_passthrough(
pmcraid_err("couldn't build passthrough ioadls\n");
goto out_free_buffer;
}
+ } else if (request_size < 0) {
+ rc = -EINVAL;
+ goto out_free_buffer;
}
/* If data is being written into the device, copy the data from user
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 532313e0725..7836eb01c7f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -42,8 +42,8 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
int reading;
if (IS_QLA82XX(ha)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Firmware dump not supported for ISP82xx\n"));
+ ql_dbg(ql_dbg_user, vha, 0x705b,
+ "Firmware dump not supported for ISP82xx\n");
return count;
}
@@ -56,7 +56,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
if (!ha->fw_dump_reading)
break;
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x705d,
"Firmware dump cleared on (%ld).\n", vha->host_no);
ha->fw_dump_reading = 0;
@@ -66,7 +66,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
if (ha->fw_dumped && !ha->fw_dump_reading) {
ha->fw_dump_reading = 1;
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x705e,
"Raw firmware dump ready for read on (%ld).\n",
vha->host_no);
}
@@ -148,7 +148,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
}
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x705f,
"HBA not online, failing NVRAM update.\n");
return -EAGAIN;
}
@@ -158,6 +158,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
count);
+ ql_dbg(ql_dbg_user, vha, 0x7060,
+ "Setting ISP_ABORT_NEEDED\n");
/* NVRAM settings take effect immediately. */
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@@ -255,9 +257,9 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_state = QLA_SWAITING;
- DEBUG2(qla_printk(KERN_INFO, ha,
+ ql_dbg(ql_dbg_user, vha, 0x7061,
"Freeing flash region allocation -- 0x%x bytes.\n",
- ha->optrom_region_size));
+ ha->optrom_region_size);
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
@@ -273,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
if (ha->optrom_buffer == NULL) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7062,
"Unable to allocate memory for optrom retrieval "
"(%x).\n", ha->optrom_region_size);
@@ -282,14 +284,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "HBA not online, failing NVRAM update.\n");
+ ql_log(ql_log_warn, vha, 0x7063,
+ "HBA not online, failing NVRAM update.\n");
return -EAGAIN;
}
- DEBUG2(qla_printk(KERN_INFO, ha,
+ ql_dbg(ql_dbg_user, vha, 0x7064,
"Reading flash region -- 0x%x/0x%x.\n",
- ha->optrom_region_start, ha->optrom_region_size));
+ ha->optrom_region_start, ha->optrom_region_size);
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
@@ -328,7 +330,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
valid = 1;
if (!valid) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7065,
"Invalid start region 0x%x/0x%x.\n", start, size);
return -EINVAL;
}
@@ -340,17 +342,17 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
if (ha->optrom_buffer == NULL) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7066,
"Unable to allocate memory for optrom update "
- "(%x).\n", ha->optrom_region_size);
+ "(%x)\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
return count;
}
- DEBUG2(qla_printk(KERN_INFO, ha,
+ ql_dbg(ql_dbg_user, vha, 0x7067,
"Staging flash region write -- 0x%x/0x%x.\n",
- ha->optrom_region_start, ha->optrom_region_size));
+ ha->optrom_region_start, ha->optrom_region_size);
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
break;
@@ -359,14 +361,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
break;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7068,
"HBA not online, failing flash update.\n");
return -EAGAIN;
}
- DEBUG2(qla_printk(KERN_INFO, ha,
+ ql_dbg(ql_dbg_user, vha, 0x7069,
"Writing flash region -- 0x%x/0x%x.\n",
- ha->optrom_region_start, ha->optrom_region_size));
+ ha->optrom_region_start, ha->optrom_region_size);
ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
@@ -425,7 +427,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
return 0;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x706a,
"HBA not online, failing VPD update.\n");
return -EAGAIN;
}
@@ -440,7 +442,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
tmp_data = vmalloc(256);
if (!tmp_data) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x706b,
"Unable to allocate memory for VPD information update.\n");
goto done;
}
@@ -480,7 +482,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->sfp_data_dma);
if (!ha->sfp_data) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x706c,
"Unable to allocate memory for SFP read-data.\n");
return 0;
}
@@ -499,9 +501,10 @@ do_read:
rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
addr, offset, SFP_BLOCK_SIZE, 0);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x706d,
"Unable to read SFP data (%x/%x/%x).\n", rval,
addr, offset);
+
count = 0;
break;
}
@@ -538,8 +541,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
type = simple_strtol(buf, NULL, 10);
switch (type) {
case 0x2025c:
- qla_printk(KERN_INFO, ha,
- "Issuing ISP reset on (%ld).\n", vha->host_no);
+ ql_log(ql_log_info, vha, 0x706e,
+ "Issuing ISP reset.\n");
scsi_block_requests(vha->host);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -551,8 +554,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
if (!IS_QLA81XX(ha))
break;
- qla_printk(KERN_INFO, ha,
- "Issuing MPI reset on (%ld).\n", vha->host_no);
+ ql_log(ql_log_info, vha, 0x706f,
+ "Issuing MPI reset.\n");
/* Make sure FC side is not in reset */
qla2x00_wait_for_hba_online(vha);
@@ -560,20 +563,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
/* Issue MPI reset */
scsi_block_requests(vha->host);
if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
- qla_printk(KERN_WARNING, ha,
- "MPI reset failed on (%ld).\n", vha->host_no);
+ ql_log(ql_log_warn, vha, 0x7070,
+ "MPI reset failed.\n");
scsi_unblock_requests(vha->host);
break;
case 0x2025e:
if (!IS_QLA82XX(ha) || vha != base_vha) {
- qla_printk(KERN_INFO, ha,
- "FCoE ctx reset not supported for host%ld.\n",
- vha->host_no);
+ ql_log(ql_log_info, vha, 0x7071,
+ "FCoE ctx reset no supported.\n");
return count;
}
- qla_printk(KERN_INFO, ha,
- "Issuing FCoE CTX reset on host%ld.\n", vha->host_no);
+ ql_log(ql_log_info, vha, 0x7072,
+ "Issuing FCoE ctx reset.\n");
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_fcoe_ctx_reset(vha);
@@ -611,8 +613,8 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->edc_data_dma);
if (!ha->edc_data) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unable to allocate memory for EDC write.\n"));
+ ql_log(ql_log_warn, vha, 0x7073,
+ "Unable to allocate memory for EDC write.\n");
return 0;
}
}
@@ -631,9 +633,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
- rval, dev, adr, opt, len, buf[8]));
+ ql_log(ql_log_warn, vha, 0x7074,
+ "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
+ rval, dev, adr, opt, len, buf[8]);
return 0;
}
@@ -669,8 +671,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->edc_data_dma);
if (!ha->edc_data) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unable to allocate memory for EDC status.\n"));
+ ql_log(ql_log_warn, vha, 0x708c,
+ "Unable to allocate memory for EDC status.\n");
return 0;
}
}
@@ -688,9 +690,9 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
- rval, dev, adr, opt, len));
+ ql_log(ql_log_info, vha, 0x7075,
+ "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
+ rval, dev, adr, opt, len);
return 0;
}
@@ -749,7 +751,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
&ha->xgmac_data_dma, GFP_KERNEL);
if (!ha->xgmac_data) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7076,
"Unable to allocate memory for XGMAC read-data.\n");
return 0;
}
@@ -761,7 +763,7 @@ do_read:
rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
XGMAC_DATA_SIZE, &actual_size);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7077,
"Unable to read XGMAC data (%x).\n", rval);
count = 0;
}
@@ -801,7 +803,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
&ha->dcbx_tlv_dma, GFP_KERNEL);
if (!ha->dcbx_tlv) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7078,
"Unable to allocate memory for DCBX TLV read-data.\n");
return 0;
}
@@ -813,8 +815,8 @@ do_read:
rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
DCBX_TLV_DATA_SIZE);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Unable to read DCBX TLV data (%x).\n", rval);
+ ql_log(ql_log_warn, vha, 0x7079,
+ "Unable to read DCBX TLV (%x).\n", rval);
count = 0;
}
@@ -869,9 +871,13 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
iter->attr);
if (ret)
- qla_printk(KERN_INFO, vha->hw,
- "Unable to create sysfs %s binary attribute "
- "(%d).\n", iter->name, ret);
+ ql_log(ql_log_warn, vha, 0x00f3,
+ "Unable to create sysfs %s binary attribute (%d).\n",
+ iter->name, ret);
+ else
+ ql_dbg(ql_dbg_init, vha, 0x00f4,
+ "Successfully created sysfs %s binary attribure.\n",
+ iter->name);
}
}
@@ -1126,7 +1132,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
return -EPERM;
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x707a,
"Abort ISP active -- ignoring beacon request.\n");
return -EBUSY;
}
@@ -1322,9 +1328,8 @@ qla2x00_thermal_temp_show(struct device *dev,
temp = frac = 0;
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): isp reset in progress.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x707b,
+ "ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy)
rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
if (rval != QLA_SUCCESS)
@@ -1343,8 +1348,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
- DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x707c,
+ "ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy)
rval = qla2x00_get_firmware_state(vha, state);
if (rval != QLA_SUCCESS)
@@ -1645,8 +1650,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
if (stats == NULL) {
- DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
- __func__, base_vha->host_no));
+ ql_log(ql_log_warn, vha, 0x707d,
+ "Failed to allocate memory for stats.\n");
goto done;
}
memset(stats, 0, DMA_POOL_SIZE);
@@ -1746,15 +1751,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
ret = qla24xx_vport_create_req_sanity_check(fc_vport);
if (ret) {
- DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
- "status %x\n", ret));
+ ql_log(ql_log_warn, vha, 0x707e,
+ "Vport sanity check failed, status %x\n", ret);
return (ret);
}
vha = qla24xx_create_vhost(fc_vport);
if (vha == NULL) {
- DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
- vha));
+ ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
return FC_VPORT_FAILED;
}
if (disable) {
@@ -1764,8 +1768,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
atomic_set(&vha->vp_state, VP_FAILED);
/* ready to create vport */
- qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
- vha->vp_idx);
+ ql_log(ql_log_info, vha, 0x7080,
+ "VP entry id %d assigned.\n", vha->vp_idx);
/* initialized vport states */
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1775,8 +1779,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
/* Don't retry or attempt login of this virtual port */
- DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7081,
+ "Vport loop state is not UP.\n");
atomic_set(&vha->loop_state, LOOP_DEAD);
if (!disable)
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@@ -1785,9 +1789,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
vha->flags.difdix_supported = 1;
- DEBUG18(qla_printk(KERN_INFO, ha,
- "Registering for DIF/DIX type 1 and 3"
- " protection.\n"));
+ ql_dbg(ql_dbg_user, vha, 0x7082,
+ "Registered for DIF/DIX type 1 and 3 protection.\n");
scsi_host_set_prot(vha->host,
SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
@@ -1802,8 +1805,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
&ha->pdev->dev)) {
- DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
- vha->host_no, vha->vp_idx));
+ ql_dbg(ql_dbg_user, vha, 0x7083,
+ "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
goto vport_create_failed_2;
}
@@ -1820,6 +1823,10 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (ha->flags.cpu_affinity_enabled) {
req = ha->req_q_map[1];
+ ql_dbg(ql_dbg_multiq, vha, 0xc000,
+ "Request queue %p attached with "
+ "VP[%d], cpu affinity =%d\n",
+ req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
goto vport_queue;
} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
goto vport_queue;
@@ -1836,13 +1843,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
qos);
if (!ret)
- qla_printk(KERN_WARNING, ha,
- "Can't create request queue for vp_idx:%d\n",
- vha->vp_idx);
+ ql_log(ql_log_warn, vha, 0x7084,
+ "Can't create request queue for VP[%d]\n",
+ vha->vp_idx);
else {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
- ret, qos, vha->vp_idx));
+ ql_dbg(ql_dbg_multiq, vha, 0xc001,
+ "Request Que:%d Q0s: %d) created for VP[%d]\n",
+ ret, qos, vha->vp_idx);
+ ql_dbg(ql_dbg_user, vha, 0x7085,
+ "Request Que:%d Q0s: %d) created for VP[%d]\n",
+ ret, qos, vha->vp_idx);
req = ha->req_q_map[ret];
}
}
@@ -1882,12 +1892,13 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
if (vha->timer_active) {
qla2x00_vp_stop_timer(vha);
- DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
- " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
+ ql_dbg(ql_dbg_user, vha, 0x7086,
+ "Timer for the VP[%d] has stopped\n", vha->vp_idx);
}
/* No pending activities shall be there on the vha now */
- DEBUG(msleep(random32()%10)); /* Just to see if something falls on
+ if (ql2xextended_error_logging & ql_dbg_user)
+ msleep(random32()%10); /* Just to see if something falls on
* the net we have placed below */
BUG_ON(atomic_read(&vha->vref_count));
@@ -1901,12 +1912,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
- qla_printk(KERN_WARNING, ha,
- "Queue delete failed.\n");
+ ql_log(ql_log_warn, vha, 0x7087,
+ "Queue delete failed.\n");
}
scsi_host_put(vha->host);
- qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
+ ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 8c10e2c4928..07d1767cd26 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -36,7 +36,8 @@ done:
}
int
-qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
+qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
+ struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
{
int i, ret, num_valid;
uint8_t *bcode;
@@ -51,18 +52,17 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
if (bcode_val == 0xFFFFFFFF) {
/* No FCP Priority config data in flash */
- DEBUG2(printk(KERN_INFO
- "%s: No FCP priority config data.\n",
- __func__));
+ ql_dbg(ql_dbg_user, vha, 0x7051,
+ "No FCP Priority config data.\n");
return 0;
}
if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
bcode[3] != 'S') {
/* Invalid FCP priority data header*/
- DEBUG2(printk(KERN_ERR
- "%s: Invalid FCP Priority data header. bcode=0x%x\n",
- __func__, bcode_val));
+ ql_dbg(ql_dbg_user, vha, 0x7052,
+ "Invalid FCP Priority data header. bcode=0x%x.\n",
+ bcode_val);
return 0;
}
if (flag != 1)
@@ -77,15 +77,14 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
if (num_valid == 0) {
/* No valid FCP priority data entries */
- DEBUG2(printk(KERN_ERR
- "%s: No valid FCP Priority data entries.\n",
- __func__));
+ ql_dbg(ql_dbg_user, vha, 0x7053,
+ "No valid FCP Priority data entries.\n");
ret = 0;
} else {
/* FCP priority data is valid */
- DEBUG2(printk(KERN_INFO
- "%s: Valid FCP priority data. num entries = %d\n",
- __func__, num_valid));
+ ql_dbg(ql_dbg_user, vha, 0x7054,
+ "Valid FCP priority data. num entries = %d.\n",
+ num_valid);
}
return ret;
@@ -182,10 +181,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
if (!ha->fcp_prio_cfg) {
ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
if (!ha->fcp_prio_cfg) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory "
- "for fcp prio config data (%x).\n",
- FCP_PRIO_CFG_SIZE);
+ ql_log(ql_log_warn, vha, 0x7050,
+ "Unable to allocate memory for fcp prio "
+ "config data (%x).\n", FCP_PRIO_CFG_SIZE);
bsg_job->reply->result = (DID_ERROR << 16);
ret = -ENOMEM;
goto exit_fcp_prio_cfg;
@@ -198,9 +196,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
FCP_PRIO_CFG_SIZE);
/* validate fcp priority data */
- if (!qla24xx_fcp_prio_cfg_valid(
- (struct qla_fcp_prio_cfg *)
- ha->fcp_prio_cfg, 1)) {
+
+ if (!qla24xx_fcp_prio_cfg_valid(vha,
+ (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
bsg_job->reply->result = (DID_ERROR << 16);
ret = -EINVAL;
/* If buffer was invalidatic int
@@ -256,9 +254,8 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* pass through is supported only for ISP 4Gb or higher */
if (!IS_FWI2_CAPABLE(ha)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld):ELS passthru not supported for ISP23xx based "
- "adapters\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7001,
+ "ELS passthru not supported for ISP23xx based adapters.\n");
rval = -EPERM;
goto done;
}
@@ -266,11 +263,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* Multiple SG's are not supported for ELS requests */
if (bsg_job->request_payload.sg_cnt > 1 ||
bsg_job->reply_payload.sg_cnt > 1) {
- DEBUG2(printk(KERN_INFO
- "multiple SG's are not supported for ELS requests"
- " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt,
- bsg_job->reply_payload.sg_cnt));
+ ql_dbg(ql_dbg_user, vha, 0x7002,
+ "Multiple SG's are not suppored for ELS requests, "
+ "request_sg_cnt=%x reply_sg_cnt=%x.\n",
+ bsg_job->request_payload.sg_cnt,
+ bsg_job->reply_payload.sg_cnt);
rval = -EPERM;
goto done;
}
@@ -281,9 +278,9 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
* if not perform fabric login
*/
if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "failed to login port %06X for ELS passthru\n",
- fcport->d_id.b24));
+ ql_dbg(ql_dbg_user, vha, 0x7003,
+ "Failed to login port %06X for ELS passthru.\n",
+ fcport->d_id.b24);
rval = -EIO;
goto done;
}
@@ -314,8 +311,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
+ ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
rval = -EIO;
goto done;
}
@@ -337,12 +333,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts \
- [request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ ql_log(ql_log_warn, vha, 0x7008,
+ "dma mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
+ "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
+ req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@@ -363,15 +358,16 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
"bsg_els_rpt" : "bsg_els_hst");
els->u.bsg_job = bsg_job;
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
- "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
- bsg_job->request->rqst_data.h_els.command_code,
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_user, vha, 0x700a,
+ "bsg rqst type: %s els type: %x - loop-id=%x "
+ "portid=%-2x%02x%02x.\n", type,
+ bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x700e,
+ "qla2x00_start_sp failed = %d\n", rval);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
@@ -411,6 +407,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!req_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x700f,
+ "dma_map_sg return %d for request\n", req_sg_cnt);
rval = -ENOMEM;
goto done;
}
@@ -418,24 +416,25 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!rsp_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x7010,
+ "dma_map_sg return %d for reply\n", rsp_sg_cnt);
rval = -ENOMEM;
goto done;
}
if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "[request_sg_cnt: %x dma_request_sg_cnt: %x\
- reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
+ ql_log(ql_log_warn, vha, 0x7011,
+ "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
+ "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
+ req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "host not online\n"));
+ ql_log(ql_log_warn, vha, 0x7012,
+ "Host is not online.\n");
rval = -EIO;
goto done_unmap_sg;
}
@@ -451,8 +450,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
loop_id = vha->mgmt_svr_loop_id;
break;
default:
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Unknown loop id: %x\n", loop_id));
+ ql_dbg(ql_dbg_user, vha, 0x7013,
+ "Unknown loop id: %x.\n", loop_id);
rval = -EINVAL;
goto done_unmap_sg;
}
@@ -464,6 +463,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
*/
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
+ ql_log(ql_log_warn, vha, 0x7014,
+ "Failed to allocate fcport.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@@ -479,6 +480,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
/* Alloc SRB structure */
sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
if (!sp) {
+ ql_log(ql_log_warn, vha, 0x7015,
+ "qla2x00_get_ctx_bsg_sp failed.\n");
rval = -ENOMEM;
goto done_free_fcport;
}
@@ -488,15 +491,17 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
ct->name = "bsg_ct";
ct->u.bsg_job = bsg_job;
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
- "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
- (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_user, vha, 0x7016,
+ "bsg rqst type: %s else type: %x - "
+ "loop-id=%x portid=%02x%02x%02x.\n", type,
+ (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7017,
+ "qla2x00_start_sp failed=%d.\n", rval);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
@@ -535,9 +540,8 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
ha->notify_dcbx_comp = 1;
ret = qla81xx_set_port_config(vha, new_config);
if (ret != QLA_SUCCESS) {
- DEBUG2(printk(KERN_ERR
- "%s(%lu): Set port config failed\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7021,
+ "set port config failed.\n");
ha->notify_dcbx_comp = 0;
rval = -EINVAL;
goto done_set_internal;
@@ -545,11 +549,11 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
/* Wait for DCBX complete event */
if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "State change notificaition not received.\n"));
+ ql_dbg(ql_dbg_user, vha, 0x7022,
+ "State change notification not received.\n");
} else
- DEBUG2(qla_printk(KERN_INFO, ha,
- "State change RECEIVED\n"));
+ ql_dbg(ql_dbg_user, vha, 0x7023,
+ "State change received.\n");
ha->notify_dcbx_comp = 0;
@@ -581,9 +585,8 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
ha->notify_dcbx_comp = wait;
ret = qla81xx_set_port_config(vha, new_config);
if (ret != QLA_SUCCESS) {
- DEBUG2(printk(KERN_ERR
- "%s(%lu): Set port config failed\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7025,
+ "Set port config failed.\n");
ha->notify_dcbx_comp = 0;
rval = -EINVAL;
goto done_reset_internal;
@@ -592,14 +595,14 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
/* Wait for DCBX complete event */
if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
(20 * HZ))) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "State change notificaition not received.\n"));
+ ql_dbg(ql_dbg_user, vha, 0x7026,
+ "State change notification not received.\n");
ha->notify_dcbx_comp = 0;
rval = -EINVAL;
goto done_reset_internal;
} else
- DEBUG2(qla_printk(KERN_INFO, ha,
- "State change RECEIVED\n"));
+ ql_dbg(ql_dbg_user, vha, 0x7027,
+ "State change received.\n");
ha->notify_dcbx_comp = 0;
}
@@ -629,11 +632,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
return -EBUSY;
+ }
if (!vha->flags.online) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
+ ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
return -EIO;
}
@@ -641,26 +646,31 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
DMA_TO_DEVICE);
- if (!elreq.req_sg_cnt)
+ if (!elreq.req_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x701a,
+ "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
return -ENOMEM;
+ }
elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
DMA_FROM_DEVICE);
if (!elreq.rsp_sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x701b,
+ "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
rval = -ENOMEM;
goto done_unmap_req_sg;
}
if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts "
- "[request_sg_cnt: %x dma_request_sg_cnt: %x "
- "reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
- bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
- bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
+ ql_log(ql_log_warn, vha, 0x701c,
+ "dma mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
+ bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
+ bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@@ -668,8 +678,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
&req_data_dma, GFP_KERNEL);
if (!req_data) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
- "failed for host=%lu\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x701d,
+ "dma alloc failed for req_data.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@@ -677,8 +687,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
&rsp_data_dma, GFP_KERNEL);
if (!rsp_data) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
- "failed for host=%lu\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7004,
+ "dma alloc failed for rsp_data.\n");
rval = -ENOMEM;
goto done_free_dma_req;
}
@@ -699,8 +709,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
+ ql_dbg(ql_dbg_user, vha, 0x701e,
+ "BSG request type: %s.\n", type);
command_sent = INT_DEF_LB_ECHO_CMD;
rval = qla2x00_echo_test(vha, &elreq, response);
} else {
@@ -708,9 +718,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
if (qla81xx_get_port_config(vha, config)) {
- DEBUG2(printk(KERN_ERR
- "%s(%lu): Get port config failed\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x701f,
+ "Get port config failed.\n");
bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
@@ -718,11 +727,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
}
if (elreq.options != EXTERNAL_LOOPBACK) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Internal: current port config = %x\n",
- config[0]));
+ ql_dbg(ql_dbg_user, vha, 0x7020,
+ "Internal: curent port config = %x\n",
+ config[0]);
if (qla81xx_set_internal_loopback(vha, config,
new_config)) {
+ ql_log(ql_log_warn, vha, 0x7024,
+ "Internal loopback failed.\n");
bsg_job->reply->reply_payload_rcv_len =
0;
bsg_job->reply->result =
@@ -746,9 +757,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
}
type = "FC_BSG_HST_VENDOR_LOOPBACK";
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s\n",
- vha->host_no, type));
+ ql_dbg(ql_dbg_user, vha, 0x7028,
+ "BSG request type: %s.\n", type);
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
@@ -763,17 +773,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (response[0] == MBS_COMMAND_ERROR &&
response[1] == MBS_LB_RESET) {
- DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
- "ISP\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7029,
+ "MBX command error, Aborting ISP.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
/* Also reset the MPI */
if (qla81xx_restart_mpi_firmware(vha) !=
QLA_SUCCESS) {
- qla_printk(KERN_INFO, ha,
- "MPI reset failed for host%ld.\n",
- vha->host_no);
+ ql_log(ql_log_warn, vha, 0x702a,
+ "MPI reset failed.\n");
}
bsg_job->reply->reply_payload_rcv_len = 0;
@@ -783,17 +792,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
}
} else {
type = "FC_BSG_HST_VENDOR_LOOPBACK";
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) bsg rqst type: %s\n",
- vha->host_no, type));
+ ql_dbg(ql_dbg_user, vha, 0x702b,
+ "BSG request type: %s.\n", type);
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
}
}
if (rval) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request %s failed\n", vha->host_no, type));
+ ql_log(ql_log_warn, vha, 0x702c,
+ "Vendor request %s failed.\n", type);
fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
sizeof(struct fc_bsg_reply);
@@ -805,8 +813,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request %s completed\n", vha->host_no, type));
+ ql_dbg(ql_dbg_user, vha, 0x702d,
+ "Vendor request %s completed.\n", type);
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(response) + sizeof(uint8_t);
@@ -851,12 +859,13 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
return -EBUSY;
+ }
if (!IS_QLA84XX(ha)) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
- "exiting.\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
return -EINVAL;
}
@@ -865,14 +874,14 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
if (rval) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx reset failed\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7030,
+ "Vendor request 84xx reset failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx reset completed\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7031,
+ "Vendor request 84xx reset completed.\n");
bsg_job->reply->result = DID_OK;
}
@@ -902,21 +911,24 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
return -EBUSY;
if (!IS_QLA84XX(ha)) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
- "exiting.\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7032,
+ "Not 84xx, exiting.\n");
return -EINVAL;
}
sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
- if (!sg_cnt)
+ if (!sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x7033,
+ "dma_map_sg returned %d for request.\n", sg_cnt);
return -ENOMEM;
+ }
if (sg_cnt != bsg_job->request_payload.sg_cnt) {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts "
- "request_sg_cnt: %x dma_request_sg_cnt: %x ",
- bsg_job->request_payload.sg_cnt, sg_cnt));
+ ql_log(ql_log_warn, vha, 0x7034,
+ "DMA mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
+ bsg_job->request_payload.sg_cnt, sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@@ -925,8 +937,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
&fw_dma, GFP_KERNEL);
if (!fw_buf) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
- "failed for host=%lu\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7035,
+ "DMA alloc failed for fw_buf.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@@ -936,8 +948,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (!mn) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
- "failed for host=%lu\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7036,
+ "DMA alloc failed for fw buffer.\n");
rval = -ENOMEM;
goto done_free_fw_buf;
}
@@ -965,15 +977,15 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
if (rval) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx updatefw failed\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7037,
+ "Vendor request 84xx updatefw failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx updatefw completed\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7038,
+ "Vendor request 84xx updatefw completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
bsg_job->reply->result = DID_OK;
@@ -1009,27 +1021,30 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x7039,
+ "Abort active or needed.\n");
return -EBUSY;
+ }
if (!IS_QLA84XX(ha)) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
- "exiting.\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x703a,
+ "Not 84xx, exiting.\n");
return -EINVAL;
}
ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
sizeof(struct fc_bsg_request));
if (!ql84_mgmt) {
- DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x703b,
+ "MGMT header not provided, exiting.\n");
return -EINVAL;
}
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (!mn) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
- "failed for host=%lu\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x703c,
+ "DMA alloc failed for fw buffer.\n");
return -ENOMEM;
}
@@ -1044,6 +1059,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x703d,
+ "dma_map_sg returned %d for reply.\n", sg_cnt);
rval = -ENOMEM;
goto exit_mgmt;
}
@@ -1051,10 +1068,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
dma_direction = DMA_FROM_DEVICE;
if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts "
- "reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
- bsg_job->reply_payload.sg_cnt, sg_cnt));
+ ql_log(ql_log_warn, vha, 0x703e,
+ "DMA mapping resulted in different sg counts, "
+ "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
+ bsg_job->reply_payload.sg_cnt, sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@@ -1064,9 +1081,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
&mgmt_dma, GFP_KERNEL);
if (!mgmt_b) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
- "failed for host=%lu\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x703f,
+ "DMA alloc failed for mgmt_b.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@@ -1094,6 +1110,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!sg_cnt) {
+ ql_log(ql_log_warn, vha, 0x7040,
+ "dma_map_sg returned %d.\n", sg_cnt);
rval = -ENOMEM;
goto exit_mgmt;
}
@@ -1101,10 +1119,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
dma_direction = DMA_TO_DEVICE;
if (sg_cnt != bsg_job->request_payload.sg_cnt) {
- DEBUG2(printk(KERN_INFO
- "dma mapping resulted in different sg counts "
- "request_sg_cnt: %x dma_request_sg_cnt: %x ",
- bsg_job->request_payload.sg_cnt, sg_cnt));
+ ql_log(ql_log_warn, vha, 0x7041,
+ "DMA mapping resulted in different sg counts, "
+ "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
+ bsg_job->request_payload.sg_cnt, sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@@ -1113,9 +1131,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
&mgmt_dma, GFP_KERNEL);
if (!mgmt_b) {
- DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
- "failed for host=%lu\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7042,
+ "DMA alloc failed for mgmt_b.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@@ -1156,15 +1173,15 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
if (rval) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx mgmt failed\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7043,
+ "Vendor request 84xx mgmt failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
- DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
- "request 84xx mgmt completed\n", vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x7044,
+ "Vendor request 84xx mgmt completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
bsg_job->reply->result = DID_OK;
@@ -1204,7 +1221,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
{
struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host);
- struct qla_hw_data *ha = vha->hw;
int rval = 0;
struct qla_port_param *port_param = NULL;
fc_port_t *fcport = NULL;
@@ -1215,26 +1231,27 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
return -EBUSY;
+ }
if (!IS_IIDMA_CAPABLE(vha->hw)) {
- DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
- "supported\n", __func__, vha->host_no));
+ ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
return -EINVAL;
}
port_param = (struct qla_port_param *)((char *)bsg_job->request +
sizeof(struct fc_bsg_request));
if (!port_param) {
- DEBUG2(printk("%s(%ld): port_param header not provided, "
- "exiting.\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7047,
+ "port_param header not provided.\n");
return -EINVAL;
}
if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
- DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7048,
+ "Invalid destination type.\n");
return -EINVAL;
}
@@ -1249,21 +1266,20 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
}
if (!fcport) {
- DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7049,
+ "Failed to find port.\n");
return -EINVAL;
}
if (atomic_read(&fcport->state) != FCS_ONLINE) {
- DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x704a,
+ "Port is not online.\n");
return -EINVAL;
}
if (fcport->flags & FCF_LOGIN_NEEDED) {
- DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, "
- "flags = 0x%x\n",
- __func__, vha->host_no, fcport->flags));
+ ql_log(ql_log_warn, vha, 0x704b,
+ "Remote port not logged in flags = 0x%x.\n", fcport->flags);
return -EINVAL;
}
@@ -1275,15 +1291,13 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
&port_param->speed, mb);
if (rval) {
- DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
- "%02x%02x%02x%02x%02x%02x%02x%02x -- "
- "%04x %x %04x %04x.\n",
- vha->host_no, fcport->port_name[0],
- fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7], rval,
- fcport->fp_speed, mb[0], mb[1]));
+ ql_log(ql_log_warn, vha, 0x704c,
+ "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
+ "%04x %x %04x %04x.\n", fcport->port_name[0],
+ fcport->port_name[1], fcport->port_name[2],
+ fcport->port_name[3], fcport->port_name[4],
+ fcport->port_name[5], fcport->port_name[6],
+ fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
@@ -1307,11 +1321,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
}
static int
-qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
+qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
uint8_t is_update)
{
uint32_t start = 0;
int valid = 0;
+ struct qla_hw_data *ha = vha->hw;
bsg_job->reply->reply_payload_rcv_len = 0;
@@ -1319,14 +1334,20 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
return -EINVAL;
start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
- if (start > ha->optrom_size)
+ if (start > ha->optrom_size) {
+ ql_log(ql_log_warn, vha, 0x7055,
+ "start %d > optrom_size %d.\n", start, ha->optrom_size);
return -EINVAL;
+ }
- if (ha->optrom_state != QLA_SWAITING)
+ if (ha->optrom_state != QLA_SWAITING) {
+ ql_log(ql_log_info, vha, 0x7056,
+ "optrom_state %d.\n", ha->optrom_state);
return -EBUSY;
+ }
ha->optrom_region_start = start;
-
+ ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
if (is_update) {
if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
valid = 1;
@@ -1337,9 +1358,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
IS_QLA8XXX_TYPE(ha))
valid = 1;
if (!valid) {
- qla_printk(KERN_WARNING, ha,
- "Invalid start region 0x%x/0x%x.\n",
- start, bsg_job->request_payload.payload_len);
+ ql_log(ql_log_warn, vha, 0x7058,
+ "Invalid start region 0x%x/0x%x.\n", start,
+ bsg_job->request_payload.payload_len);
return -EINVAL;
}
@@ -1358,9 +1379,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
if (!ha->optrom_buffer) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7059,
"Read: Unable to allocate memory for optrom retrieval "
- "(%x).\n", ha->optrom_region_size);
+ "(%x)\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
return -ENOMEM;
@@ -1378,7 +1399,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
struct qla_hw_data *ha = vha->hw;
int rval = 0;
- rval = qla2x00_optrom_setup(bsg_job, ha, 0);
+ rval = qla2x00_optrom_setup(bsg_job, vha, 0);
if (rval)
return rval;
@@ -1406,7 +1427,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
struct qla_hw_data *ha = vha->hw;
int rval = 0;
- rval = qla2x00_optrom_setup(bsg_job, ha, 1);
+ rval = qla2x00_optrom_setup(bsg_job, vha, 1);
if (rval)
return rval;
@@ -1464,6 +1485,23 @@ int
qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
{
int ret = -EINVAL;
+ struct fc_rport *rport;
+ fc_port_t *fcport = NULL;
+ struct Scsi_Host *host;
+ scsi_qla_host_t *vha;
+
+ if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+ rport = bsg_job->rport;
+ fcport = *(fc_port_t **) rport->dd_data;
+ host = rport_to_shost(rport);
+ vha = shost_priv(host);
+ } else {
+ host = bsg_job->shost;
+ vha = shost_priv(host);
+ }
+
+ ql_dbg(ql_dbg_user, vha, 0x7000,
+ "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
switch (bsg_job->request->msgcode) {
case FC_BSG_RPT_ELS:
@@ -1480,7 +1518,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
case FC_BSG_HST_DEL_RPORT:
case FC_BSG_RPT_CT:
default:
- DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
+ ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
break;
}
return ret;
@@ -1514,17 +1552,15 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
&& (sp_bsg->u.bsg_job == bsg_job)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx "
- "abort_command failed\n",
- vha->host_no));
+ ql_log(ql_log_warn, vha, 0x7089,
+ "mbx abort_command "
+ "failed.\n");
bsg_job->req->errors =
bsg_job->reply->result = -EIO;
} else {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx "
- "abort_command success\n",
- vha->host_no));
+ ql_dbg(ql_dbg_user, vha, 0x708a,
+ "mbx abort_command "
+ "success.\n");
bsg_job->req->errors =
bsg_job->reply->result = 0;
}
@@ -1535,8 +1571,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld) SRB not found to abort\n", vha->host_no));
+ ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index c53719a9a74..2155071f310 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -4,10 +4,36 @@
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
+
+/*
+ * Table for showing the current message id in use for particular level
+ * Change this table for addition of log/debug messages.
+ * -----------------------------------------------------
+ * | Level | Last Value Used |
+ * -----------------------------------------------------
+ * | Module Init and Probe | 0x0116 |
+ * | Mailbox commands | 0x111e |
+ * | Device Discovery | 0x2083 |
+ * | Queue Command and IO tracing | 0x302e |
+ * | DPC Thread | 0x401c |
+ * | Async Events | 0x5059 |
+ * | Timer Routines | 0x600d |
+ * | User Space Interactions | 0x709c |
+ * | Task Management | 0x8043 |
+ * | AER/EEH | 0x900f |
+ * | Virtual Port | 0xa007 |
+ * | ISP82XX Specific | 0xb027 |
+ * | MultiQ | 0xc00b |
+ * | Misc | 0xd00b |
+ * -----------------------------------------------------
+ */
+
#include "qla_def.h"
#include <linux/delay.h>
+static uint32_t ql_dbg_offset = 0x800;
+
static inline void
qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
{
@@ -383,11 +409,11 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
struct qla_hw_data *ha = vha->hw;
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Failed to dump firmware (%x)!!!\n", rval);
+ ql_log(ql_log_warn, vha, 0xd000,
+ "Failed to dump firmware (%x).\n", rval);
ha->fw_dumped = 0;
} else {
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0xd001,
"Firmware dump saved to temp buffer (%ld/%p).\n",
vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
@@ -419,15 +445,16 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "No buffer available for dump!!!\n");
+ ql_log(ql_log_warn, vha, 0xd002,
+ "No buffer available for dump.\n");
goto qla2300_fw_dump_failed;
}
if (ha->fw_dumped) {
- qla_printk(KERN_WARNING, ha,
- "Firmware has been previously dumped (%p) -- ignoring "
- "request...\n", ha->fw_dump);
+ ql_log(ql_log_warn, vha, 0xd003,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
goto qla2300_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp23;
@@ -582,15 +609,16 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "No buffer available for dump!!!\n");
+ ql_log(ql_log_warn, vha, 0xd004,
+ "No buffer available for dump.\n");
goto qla2100_fw_dump_failed;
}
if (ha->fw_dumped) {
- qla_printk(KERN_WARNING, ha,
- "Firmware has been previously dumped (%p) -- ignoring "
- "request...\n", ha->fw_dump);
+ ql_log(ql_log_warn, vha, 0xd005,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
goto qla2100_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp21;
@@ -779,15 +807,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "No buffer available for dump!!!\n");
+ ql_log(ql_log_warn, vha, 0xd006,
+ "No buffer available for dump.\n");
goto qla24xx_fw_dump_failed;
}
if (ha->fw_dumped) {
- qla_printk(KERN_WARNING, ha,
- "Firmware has been previously dumped (%p) -- ignoring "
- "request...\n", ha->fw_dump);
+ ql_log(ql_log_warn, vha, 0xd007,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
goto qla24xx_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp24;
@@ -1017,15 +1046,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "No buffer available for dump!!!\n");
+ ql_log(ql_log_warn, vha, 0xd008,
+ "No buffer available for dump.\n");
goto qla25xx_fw_dump_failed;
}
if (ha->fw_dumped) {
- qla_printk(KERN_WARNING, ha,
- "Firmware has been previously dumped (%p) -- ignoring "
- "request...\n", ha->fw_dump);
+ ql_log(ql_log_warn, vha, 0xd009,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
goto qla25xx_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp25;
@@ -1328,15 +1358,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "No buffer available for dump!!!\n");
+ ql_log(ql_log_warn, vha, 0xd00a,
+ "No buffer available for dump.\n");
goto qla81xx_fw_dump_failed;
}
if (ha->fw_dumped) {
- qla_printk(KERN_WARNING, ha,
- "Firmware has been previously dumped (%p) -- ignoring "
- "request...\n", ha->fw_dump);
+ ql_log(ql_log_warn, vha, 0xd00b,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n",
+ ha->fw_dump);
goto qla81xx_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp81;
@@ -1619,106 +1650,255 @@ qla81xx_fw_dump_failed:
/****************************************************************************/
/* Driver Debug Functions. */
/****************************************************************************/
-
+/*
+ * This function is for formatting and logging debug information.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file.
+ * parameters:
+ * level: The level of the debug messages to be printed.
+ * If ql2xextended_error_logging value is correctly set,
+ * this message will appear in the messages file.
+ * vha: Pointer to the scsi_qla_host_t.
+ * id: This is a unique identifier for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
void
-qla2x00_dump_regs(scsi_qla_host_t *vha)
-{
- int i;
- struct qla_hw_data *ha = vha->hw;
- struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
- uint16_t __iomem *mbx_reg;
+ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
+
+ char pbuf[QL_DBG_BUF_LEN];
+ va_list ap;
+ uint32_t len;
+ struct pci_dev *pdev = NULL;
+
+ memset(pbuf, 0, QL_DBG_BUF_LEN);
+
+ va_start(ap, msg);
+
+ if ((level & ql2xextended_error_logging) == level) {
+ if (vha != NULL) {
+ pdev = vha->hw->pdev;
+ /* <module-name> <pci-name> <msg-id>:<host> Message */
+ sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
+ dev_name(&(pdev->dev)), id + ql_dbg_offset,
+ vha->host_no);
+ } else
+ sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
+ "0000:00:00.0", id + ql_dbg_offset);
+
+ len = strlen(pbuf);
+ vsprintf(pbuf+len, msg, ap);
+ pr_warning("%s", pbuf);
+ }
- mbx_reg = IS_FWI2_CAPABLE(ha) ? &reg24->mailbox0:
- MAILBOX_REG(ha, reg, 0);
+ va_end(ap);
- printk("Mailbox registers:\n");
- for (i = 0; i < 6; i++)
- printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
- RD_REG_WORD(mbx_reg++));
}
-
+/*
+ * This function is for formatting and logging debug information.
+ * It is to be used when vha is not available and pci is availble,
+ * i.e., before host allocation. It formats the message and logs it
+ * to the messages file.
+ * parameters:
+ * level: The level of the debug messages to be printed.
+ * If ql2xextended_error_logging value is correctly set,
+ * this message will appear in the messages file.
+ * pdev: Pointer to the struct pci_dev.
+ * id: This is a unique id for the level. It identifies the part
+ * of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
void
-qla2x00_dump_buffer(uint8_t * b, uint32_t size)
-{
- uint32_t cnt;
- uint8_t c;
+ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
- printk(" 0 1 2 3 4 5 6 7 8 9 "
- "Ah Bh Ch Dh Eh Fh\n");
- printk("----------------------------------------"
- "----------------------\n");
-
- for (cnt = 0; cnt < size;) {
- c = *b++;
- printk("%02x",(uint32_t) c);
- cnt++;
- if (!(cnt % 16))
- printk("\n");
- else
- printk(" ");
+ char pbuf[QL_DBG_BUF_LEN];
+ va_list ap;
+ uint32_t len;
+
+ if (pdev == NULL)
+ return;
+
+ memset(pbuf, 0, QL_DBG_BUF_LEN);
+
+ va_start(ap, msg);
+
+ if ((level & ql2xextended_error_logging) == level) {
+ /* <module-name> <dev-name>:<msg-id> Message */
+ sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
+ dev_name(&(pdev->dev)), id + ql_dbg_offset);
+
+ len = strlen(pbuf);
+ vsprintf(pbuf+len, msg, ap);
+ pr_warning("%s", pbuf);
}
- if (cnt % 16)
- printk("\n");
+
+ va_end(ap);
+
}
+/*
+ * This function is for formatting and logging log messages.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file. All the messages will be logged
+ * irrespective of value of ql2xextended_error_logging.
+ * parameters:
+ * level: The level of the log messages to be printed in the
+ * messages file.
+ * vha: Pointer to the scsi_qla_host_t
+ * id: This is a unique id for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
void
-qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size)
-{
- uint32_t cnt;
- uint8_t c;
- uint8_t last16[16], cur16[16];
- uint32_t lc = 0, num_same16 = 0, j;
+ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
- printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 "
- "Ah Bh Ch Dh Eh Fh\n");
- printk(KERN_DEBUG "----------------------------------------"
- "----------------------\n");
+ char pbuf[QL_DBG_BUF_LEN];
+ va_list ap;
+ uint32_t len;
+ struct pci_dev *pdev = NULL;
- for (cnt = 0; cnt < size;) {
- c = *b++;
+ memset(pbuf, 0, QL_DBG_BUF_LEN);
- cur16[lc++] = c;
+ va_start(ap, msg);
- cnt++;
- if (cnt % 16)
- continue;
-
- /* We have 16 now */
- lc = 0;
- if (num_same16 == 0) {
- memcpy(last16, cur16, 16);
- num_same16++;
- continue;
+ if (level <= ql_errlev) {
+ if (vha != NULL) {
+ pdev = vha->hw->pdev;
+ /* <module-name> <msg-id>:<host> Message */
+ sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
+ dev_name(&(pdev->dev)), id, vha->host_no);
+ } else
+ sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
+ "0000:00:00.0", id);
+
+ len = strlen(pbuf);
+ vsprintf(pbuf+len, msg, ap);
+
+ switch (level) {
+ case 0: /* FATAL LOG */
+ pr_crit("%s", pbuf);
+ break;
+ case 1:
+ pr_err("%s", pbuf);
+ break;
+ case 2:
+ pr_warn("%s", pbuf);
+ break;
+ default:
+ pr_info("%s", pbuf);
+ break;
}
- if (memcmp(cur16, last16, 16) == 0) {
- num_same16++;
- continue;
+ }
+
+ va_end(ap);
+}
+
+/*
+ * This function is for formatting and logging log messages.
+ * It is to be used when vha is not available and pci is availble,
+ * i.e., before host allocation. It formats the message and logs
+ * it to the messages file. All the messages are logged irrespective
+ * of the value of ql2xextended_error_logging.
+ * parameters:
+ * level: The level of the log messages to be printed in the
+ * messages file.
+ * pdev: Pointer to the struct pci_dev.
+ * id: This is a unique id for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
+void
+ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
+
+ char pbuf[QL_DBG_BUF_LEN];
+ va_list ap;
+ uint32_t len;
+
+ if (pdev == NULL)
+ return;
+
+ memset(pbuf, 0, QL_DBG_BUF_LEN);
+
+ va_start(ap, msg);
+
+ if (level <= ql_errlev) {
+ /* <module-name> <dev-name>:<msg-id> Message */
+ sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
+ dev_name(&(pdev->dev)), id);
+
+ len = strlen(pbuf);
+ vsprintf(pbuf+len, msg, ap);
+ switch (level) {
+ case 0: /* FATAL LOG */
+ pr_crit("%s", pbuf);
+ break;
+ case 1:
+ pr_err("%s", pbuf);
+ break;
+ case 2:
+ pr_warn("%s", pbuf);
+ break;
+ default:
+ pr_info("%s", pbuf);
+ break;
}
- for (j = 0; j < 16; j++)
- printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
- printk(KERN_DEBUG "\n");
-
- if (num_same16 > 1)
- printk(KERN_DEBUG "> prev pattern repeats (%u)"
- "more times\n", num_same16-1);
- memcpy(last16, cur16, 16);
- num_same16 = 1;
}
- if (num_same16) {
- for (j = 0; j < 16; j++)
- printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
- printk(KERN_DEBUG "\n");
+ va_end(ap);
+}
- if (num_same16 > 1)
- printk(KERN_DEBUG "> prev pattern repeats (%u)"
- "more times\n", num_same16-1);
+void
+ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
+{
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
+ struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+ uint16_t __iomem *mbx_reg;
+
+ if ((level & ql2xextended_error_logging) == level) {
+
+ if (IS_QLA82XX(ha))
+ mbx_reg = &reg82->mailbox_in[0];
+ else if (IS_FWI2_CAPABLE(ha))
+ mbx_reg = &reg24->mailbox0;
+ else
+ mbx_reg = MAILBOX_REG(ha, reg, 0);
+
+ ql_dbg(level, vha, id, "Mailbox registers:\n");
+ for (i = 0; i < 6; i++)
+ ql_dbg(level, vha, id,
+ "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
}
- if (lc) {
- for (j = 0; j < lc; j++)
- printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]);
- printk(KERN_DEBUG "\n");
+}
+
+
+void
+ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
+ uint8_t *b, uint32_t size)
+{
+ uint32_t cnt;
+ uint8_t c;
+ if ((level & ql2xextended_error_logging) == level) {
+
+ ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
+ "9 Ah Bh Ch Dh Eh Fh\n");
+ ql_dbg(level, vha, id, "----------------------------------"
+ "----------------------------\n");
+
+ ql_dbg(level, vha, id, "");
+ for (cnt = 0; cnt < size;) {
+ c = *b++;
+ printk("%02x", (uint32_t) c);
+ cnt++;
+ if (!(cnt % 16))
+ printk("\n");
+ else
+ printk(" ");
+ }
+ if (cnt % 16)
+ ql_dbg(level, vha, id, "\n");
}
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 930414541ec..98a377b9901 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -8,146 +8,6 @@
#include "qla_def.h"
/*
- * Driver debug definitions.
- */
-/* #define QL_DEBUG_LEVEL_1 */ /* Output register accesses to COM1 */
-/* #define QL_DEBUG_LEVEL_2 */ /* Output error msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_3 */ /* Output function trace msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_4 */ /* Output NVRAM trace msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_5 */ /* Output ring trace msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_6 */ /* Output WATCHDOG timer trace to COM1 */
-/* #define QL_DEBUG_LEVEL_7 */ /* Output RISC load trace msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_8 */ /* Output ring saturation msgs to COM1 */
-/* #define QL_DEBUG_LEVEL_9 */ /* Output IOCTL trace msgs */
-/* #define QL_DEBUG_LEVEL_10 */ /* Output IOCTL error msgs */
-/* #define QL_DEBUG_LEVEL_11 */ /* Output Mbx Cmd trace msgs */
-/* #define QL_DEBUG_LEVEL_12 */ /* Output IP trace msgs */
-/* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */
-/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
-/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
-/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
-/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
-/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
-
-/*
-* Macros use for debugging the driver.
-*/
-
-#define DEBUG(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-
-#if defined(QL_DEBUG_LEVEL_1)
-#define DEBUG1(x) do {x;} while (0)
-#else
-#define DEBUG1(x) do {} while (0)
-#endif
-
-#define DEBUG2(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_3(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_3_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_9_10(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-#define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0)
-
-#if defined(QL_DEBUG_LEVEL_3)
-#define DEBUG3(x) do {x;} while (0)
-#define DEBUG3_11(x) do {x;} while (0)
-#else
-#define DEBUG3(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_4)
-#define DEBUG4(x) do {x;} while (0)
-#else
-#define DEBUG4(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_5)
-#define DEBUG5(x) do {x;} while (0)
-#else
-#define DEBUG5(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_7)
-#define DEBUG7(x) do {x;} while (0)
-#else
-#define DEBUG7(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_9)
-#define DEBUG9(x) do {x;} while (0)
-#define DEBUG9_10(x) do {x;} while (0)
-#else
-#define DEBUG9(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_10)
-#define DEBUG10(x) do {x;} while (0)
-#define DEBUG9_10(x) do {x;} while (0)
-#else
-#define DEBUG10(x) do {} while (0)
- #if !defined(DEBUG9_10)
- #define DEBUG9_10(x) do {} while (0)
- #endif
-#endif
-
-#if defined(QL_DEBUG_LEVEL_11)
-#define DEBUG11(x) do{x;} while(0)
-#if !defined(DEBUG3_11)
-#define DEBUG3_11(x) do{x;} while(0)
-#endif
-#else
-#define DEBUG11(x) do{} while(0)
- #if !defined(QL_DEBUG_LEVEL_3)
- #define DEBUG3_11(x) do{} while(0)
- #endif
-#endif
-
-#if defined(QL_DEBUG_LEVEL_12)
-#define DEBUG12(x) do {x;} while (0)
-#else
-#define DEBUG12(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_13)
-#define DEBUG13(x) do {x;} while (0)
-#else
-#define DEBUG13(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_14)
-#define DEBUG14(x) do {x;} while (0)
-#else
-#define DEBUG14(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_15)
-#define DEBUG15(x) do {x;} while (0)
-#else
-#define DEBUG15(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_16)
-#define DEBUG16(x) do {x;} while (0)
-#else
-#define DEBUG16(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_17)
-#define DEBUG17(x) do {x;} while (0)
-#else
-#define DEBUG17(x) do {} while (0)
-#endif
-
-#if defined(QL_DEBUG_LEVEL_18)
-#define DEBUG18(x) do {if (ql2xextended_error_logging) x; } while (0)
-#else
-#define DEBUG18(x) do {} while (0)
-#endif
-
-
-/*
* Firmware Dump structure definition
*/
@@ -370,3 +230,50 @@ struct qla2xxx_fw_dump {
struct qla81xx_fw_dump isp81;
} isp;
};
+
+#define QL_MSGHDR "qla2xxx"
+
+#define ql_log_fatal 0 /* display fatal errors */
+#define ql_log_warn 1 /* display critical errors */
+#define ql_log_info 2 /* display all recovered errors */
+#define ql_log_all 3 /* This value is only used by ql_errlev.
+ * No messages will use this value.
+ * This should be always highest value
+ * as compared to other log levels.
+ */
+
+extern int ql_errlev;
+
+void
+ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
+void
+ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+
+void
+ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
+void
+ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+
+/* Debug Levels */
+/* The 0x40000000 is the max value any debug level can have
+ * as ql2xextended_error_logging is of type signed int
+ */
+#define ql_dbg_init 0x40000000 /* Init Debug */
+#define ql_dbg_mbx 0x20000000 /* MBX Debug */
+#define ql_dbg_disc 0x10000000 /* Device Discovery Debug */
+#define ql_dbg_io 0x08000000 /* IO Tracing Debug */
+#define ql_dbg_dpc 0x04000000 /* DPC Thead Debug */
+#define ql_dbg_async 0x02000000 /* Async events Debug */
+#define ql_dbg_timer 0x01000000 /* Timer Debug */
+#define ql_dbg_user 0x00800000 /* User Space Interations Debug */
+#define ql_dbg_taskm 0x00400000 /* Task Management Debug */
+#define ql_dbg_aer 0x00200000 /* AER/EEH Debug */
+#define ql_dbg_multiq 0x00100000 /* MultiQ Debug */
+#define ql_dbg_p3p 0x00080000 /* P3P specific Debug */
+#define ql_dbg_vport 0x00040000 /* Virtual Port Debug */
+#define ql_dbg_buffer 0x00020000 /* For dumping the buffer/regs */
+#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
+ * not covered by upper categories
+ */
+
+#define QL_DBG_BUF_LEN 512
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index a5a4e1275bf..0b4c2b794c6 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -64,7 +64,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
/* Pause tracing to flush FCE buffers. */
rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
if (rval)
- qla_printk(KERN_WARNING, ha,
+ ql_dbg(ql_dbg_user, vha, 0x705c,
"DebugFS: Unable to disable FCE (%d).\n", rval);
ha->flags.fce_enabled = 0;
@@ -92,7 +92,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
ha->fce_mb, &ha->fce_bufs);
if (rval) {
- qla_printk(KERN_WARNING, ha,
+ ql_dbg(ql_dbg_user, vha, 0x700d,
"DebugFS: Unable to reinitialize FCE (%d).\n", rval);
ha->flags.fce_enabled = 0;
}
@@ -125,8 +125,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
atomic_set(&qla2x00_dfs_root_count, 0);
qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
if (!qla2x00_dfs_root) {
- qla_printk(KERN_NOTICE, ha,
- "DebugFS: Unable to create root directory.\n");
+ ql_log(ql_log_warn, vha, 0x00f7,
+ "Unable to create debugfs root directory.\n");
goto out;
}
@@ -137,8 +137,8 @@ create_dir:
mutex_init(&ha->fce_mutex);
ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
if (!ha->dfs_dir) {
- qla_printk(KERN_NOTICE, ha,
- "DebugFS: Unable to create ha directory.\n");
+ ql_log(ql_log_warn, vha, 0x00f8,
+ "Unable to create debugfs ha directory.\n");
goto out;
}
@@ -148,8 +148,8 @@ create_nodes:
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
if (!ha->dfs_fce) {
- qla_printk(KERN_NOTICE, ha,
- "DebugFS: Unable to fce node.\n");
+ ql_log(ql_log_warn, vha, 0x00f9,
+ "Unable to create debugfs fce node.\n");
goto out;
}
out:
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b381224ae4..29b1a3e2823 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -185,7 +185,7 @@ extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
-extern uint16_t qla24xx_calc_iocbs(uint16_t);
+extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
extern int qla24xx_dif_start_scsi(srb_t *);
@@ -439,6 +439,9 @@ extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
extern void qla2x00_dump_regs(scsi_qla_host_t *);
extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
+extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
+extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
+ uint8_t *, uint32_t);
/*
* Global Function Prototypes in qla_gs.c source file.
@@ -478,7 +481,8 @@ extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16
extern int qla2x00_echo_test(scsi_qla_host_t *,
struct msg_echo_lb *, uint16_t *);
extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
-extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t);
+extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
+ struct qla_fcp_prio_cfg *, uint8_t);
/*
* Global Function Prototypes in qla_dfs.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 8cd9066ad90..37937aa3c3b 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -121,11 +121,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
rval = QLA_FUNCTION_FAILED;
if (ms_pkt->entry_status != 0) {
- DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status "
- "(%x) on port_id: %02x%02x%02x.\n",
- vha->host_no, routine, ms_pkt->entry_status,
- vha->d_id.b.domain, vha->d_id.b.area,
- vha->d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, vha, 0x2031,
+ "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
+ routine, ms_pkt->entry_status, vha->d_id.b.domain,
+ vha->d_id.b.area, vha->d_id.b.al_pa);
} else {
if (IS_FWI2_CAPABLE(ha))
comp_status = le16_to_cpu(
@@ -138,24 +137,24 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
case CS_DATA_OVERRUN: /* Overrun? */
if (ct_rsp->header.response !=
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
- DEBUG2_3(printk("scsi(%ld): %s failed, "
- "rejected request on port_id: %02x%02x%02x\n",
- vha->host_no, routine,
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
+ "%s failed rejected request on port_id: "
+ "%02x%02x%02x.\n", routine,
vha->d_id.b.domain, vha->d_id.b.area,
- vha->d_id.b.al_pa));
- DEBUG2_3(qla2x00_dump_buffer(
- (uint8_t *)&ct_rsp->header,
- sizeof(struct ct_rsp_hdr)));
+ vha->d_id.b.al_pa);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
+ 0x2078, (uint8_t *)&ct_rsp->header,
+ sizeof(struct ct_rsp_hdr));
rval = QLA_INVALID_COMMAND;
} else
rval = QLA_SUCCESS;
break;
default:
- DEBUG2_3(printk("scsi(%ld): %s failed, completion "
- "status (%x) on port_id: %02x%02x%02x.\n",
- vha->host_no, routine, comp_status,
+ ql_dbg(ql_dbg_disc, vha, 0x2033,
+ "%s failed, completion status (%x) on port_id: "
+ "%02x%02x%02x.\n", routine, comp_status,
vha->d_id.b.domain, vha->d_id.b.area,
- vha->d_id.b.al_pa));
+ vha->d_id.b.al_pa);
break;
}
}
@@ -202,8 +201,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2062,
+ "GA_NXT issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -222,11 +221,10 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
fcport->d_id.b.domain = 0xf0;
- DEBUG2_3(printk("scsi(%ld): GA_NXT entry - "
- "nn %02x%02x%02x%02x%02x%02x%02x%02x "
+ ql_dbg(ql_dbg_disc, vha, 0x2063,
+ "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
- "portid=%02x%02x%02x.\n",
- vha->host_no,
+ "port_id=%02x%02x%02x.\n",
fcport->node_name[0], fcport->node_name[1],
fcport->node_name[2], fcport->node_name[3],
fcport->node_name[4], fcport->node_name[5],
@@ -236,7 +234,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[4], fcport->port_name[5],
fcport->port_name[6], fcport->port_name[7],
fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ fcport->d_id.b.al_pa);
}
return (rval);
@@ -287,8 +285,8 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2055,
+ "GID_PT issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -364,8 +362,8 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed "
- "(%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2056,
+ "GPN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -424,8 +422,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed "
- "(%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2057,
+ "GNN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GNN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -434,11 +432,10 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
memcpy(list[i].node_name,
ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
- DEBUG2_3(printk("scsi(%ld): GID_PT entry - "
- "nn %02x%02x%02x%02x%02x%02x%02x%02x "
- "pn %02x%02x%02x%02x%02x%02x%02x%02x "
+ ql_dbg(ql_dbg_disc, vha, 0x2058,
+ "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x "
+ "pn %02x%02x%02x%02x%02x%02x%02X%02x "
"portid=%02x%02x%02x.\n",
- vha->host_no,
list[i].node_name[0], list[i].node_name[1],
list[i].node_name[2], list[i].node_name[3],
list[i].node_name[4], list[i].node_name[5],
@@ -448,7 +445,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
list[i].port_name[4], list[i].port_name[5],
list[i].port_name[6], list[i].port_name[7],
list[i].d_id.b.domain, list[i].d_id.b.area,
- list[i].d_id.b.al_pa));
+ list[i].d_id.b.al_pa);
}
/* Last device exit. */
@@ -499,14 +496,14 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "RFT_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2044,
+ "RFT_ID exiting normally.\n");
}
return (rval);
@@ -528,8 +525,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
struct ct_sns_rsp *ct_rsp;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on "
- "ISP2100/ISP2200.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2046,
+ "RFF_ID call not supported on ISP2100/ISP2200.\n");
return (QLA_SUCCESS);
}
@@ -556,14 +553,14 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2047,
+ "RFF_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2048,
+ "RFF_ID exiting normally.\n");
}
return (rval);
@@ -609,14 +606,14 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x204d,
+ "RNN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x204e,
+ "RNN_ID exiting normally.\n");
}
return (rval);
@@ -647,8 +644,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
struct ct_sns_rsp *ct_rsp;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on "
- "ISP2100/ISP2200.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2050,
+ "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
return (QLA_SUCCESS);
}
@@ -682,14 +679,14 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2051,
+ "RSNN_NN issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2052,
+ "RSNN_NN exiting normally.\n");
}
return (rval);
@@ -757,13 +754,14 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x205f,
+ "GA_NXT Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gan_data[8] != 0x80 ||
sns_cmd->p.gan_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, "
- "ga_nxt_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
+ "GA_NXT failed, rejected request ga_nxt_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
+ sns_cmd->p.gan_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Populate fc_port_t entry. */
@@ -778,11 +776,10 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
fcport->d_id.b.domain = 0xf0;
- DEBUG2_3(printk("scsi(%ld): GA_NXT entry - "
- "nn %02x%02x%02x%02x%02x%02x%02x%02x "
+ ql_dbg(ql_dbg_disc, vha, 0x2061,
+ "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
- "portid=%02x%02x%02x.\n",
- vha->host_no,
+ "port_id=%02x%02x%02x.\n",
fcport->node_name[0], fcport->node_name[1],
fcport->node_name[2], fcport->node_name[3],
fcport->node_name[4], fcport->node_name[5],
@@ -792,7 +789,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[4], fcport->port_name[5],
fcport->port_name[6], fcport->port_name[7],
fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ fcport->d_id.b.al_pa);
}
return (rval);
@@ -831,13 +828,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x206d,
+ "GID_PT Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gid_data[8] != 0x80 ||
sns_cmd->p.gid_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, "
- "gid_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16));
+ ql_dbg(ql_dbg_disc, vha, 0x202f,
+ "GID_PT failed, rejected request, gid_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
+ sns_cmd->p.gid_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Set port IDs in switch info list. */
@@ -900,13 +898,14 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed "
- "(%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2032,
+ "GPN_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gpn_data[8] != 0x80 ||
sns_cmd->p.gpn_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected "
- "request, gpn_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
+ "GPN_ID failed, rejected request, gpn_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
+ sns_cmd->p.gpn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Save portname */
@@ -955,24 +954,24 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed "
- "(%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x203f,
+ "GNN_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gnn_data[8] != 0x80 ||
sns_cmd->p.gnn_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected "
- "request, gnn_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
+ "GNN_ID failed, rejected request, gnn_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
+ sns_cmd->p.gnn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Save nodename */
memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
WWN_SIZE);
- DEBUG2_3(printk("scsi(%ld): GID_PT entry - "
- "nn %02x%02x%02x%02x%02x%02x%02x%02x "
+ ql_dbg(ql_dbg_disc, vha, 0x206e,
+ "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
- "portid=%02x%02x%02x.\n",
- vha->host_no,
+ "port_id=%02x%02x%02x.\n",
list[i].node_name[0], list[i].node_name[1],
list[i].node_name[2], list[i].node_name[3],
list[i].node_name[4], list[i].node_name[5],
@@ -982,7 +981,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
list[i].port_name[4], list[i].port_name[5],
list[i].port_name[6], list[i].port_name[7],
list[i].d_id.b.domain, list[i].d_id.b.area,
- list[i].d_id.b.al_pa));
+ list[i].d_id.b.al_pa);
}
/* Last device exit. */
@@ -1025,17 +1024,18 @@ qla2x00_sns_rft_id(scsi_qla_host_t *vha)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2060,
+ "RFT_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.rft_data[8] != 0x80 ||
sns_cmd->p.rft_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, "
- "rft_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
+ "RFT_ID failed, rejected request rft_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
+ sns_cmd->p.rft_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2073,
+ "RFT_ID exiting normally.\n");
}
return (rval);
@@ -1081,17 +1081,18 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x204a,
+ "RNN_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.rnn_data[8] != 0x80 ||
sns_cmd->p.rnn_data[9] != 0x02) {
- DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, "
- "rnn_rsp:\n", vha->host_no));
- DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
+ "RNN_ID failed, rejected request, rnn_rsp:\n");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
+ sns_cmd->p.rnn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x204c,
+ "RNN_ID exiting normally.\n");
}
return (rval);
@@ -1116,10 +1117,10 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
mb, BIT_1|BIT_0);
if (mb[0] != MBS_COMMAND_COMPLETE) {
- DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
- "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
- __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1],
- mb[2], mb[6], mb[7]));
+ ql_dbg(ql_dbg_disc, vha, 0x2024,
+ "Failed management_server login: loopid=%x mb[0]=%x "
+ "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
+ vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]);
ret = QLA_FUNCTION_FAILED;
} else
vha->flags.management_server_logged_in = 1;
@@ -1292,11 +1293,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
size += 4 + WWN_SIZE;
- DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
- __func__, vha->host_no,
- eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
- eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
- eiter->a.node_name[6], eiter->a.node_name[7]));
+ ql_dbg(ql_dbg_disc, vha, 0x2025,
+ "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
+ eiter->a.node_name[0], eiter->a.node_name[1],
+ eiter->a.node_name[2], eiter->a.node_name[3],
+ eiter->a.node_name[4], eiter->a.node_name[5],
+ eiter->a.node_name[6], eiter->a.node_name[7]);
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1307,8 +1309,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no,
- eiter->a.manufacturer));
+ ql_dbg(ql_dbg_disc, vha, 0x2026,
+ "Manufacturer = %s.\n", eiter->a.manufacturer);
/* Serial number. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1320,8 +1322,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no,
- eiter->a.serial_num));
+ ql_dbg(ql_dbg_disc, vha, 0x2027,
+ "Serial no. = %s.\n", eiter->a.serial_num);
/* Model name. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1332,8 +1334,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no,
- eiter->a.model));
+ ql_dbg(ql_dbg_disc, vha, 0x2028,
+ "Model Name = %s.\n", eiter->a.model);
/* Model description. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1345,8 +1347,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no,
- eiter->a.model_desc));
+ ql_dbg(ql_dbg_disc, vha, 0x2029,
+ "Model Desc = %s.\n", eiter->a.model_desc);
/* Hardware version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1357,8 +1359,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no,
- eiter->a.hw_version));
+ ql_dbg(ql_dbg_disc, vha, 0x202a,
+ "Hardware ver = %s.\n", eiter->a.hw_version);
/* Driver version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1369,8 +1371,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no,
- eiter->a.driver_version));
+ ql_dbg(ql_dbg_disc, vha, 0x202b,
+ "Driver ver = %s.\n", eiter->a.driver_version);
/* Option ROM version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1381,8 +1383,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no,
- eiter->a.orom_version));
+ ql_dbg(ql_dbg_disc, vha , 0x202c,
+ "Optrom vers = %s.\n", eiter->a.orom_version);
/* Firmware version */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1393,44 +1395,46 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no,
- eiter->a.fw_version));
+ ql_dbg(ql_dbg_disc, vha, 0x202d,
+ "Firmware vers = %s.\n", eiter->a.fw_version);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- DEBUG13(printk("%s(%ld): RHBA identifier="
- "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
- vha->host_no, ct_req->req.rhba.hba_identifier[0],
+ ql_dbg(ql_dbg_disc, vha, 0x202e,
+ "RHBA identifier = "
+ "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n",
+ ct_req->req.rhba.hba_identifier[0],
ct_req->req.rhba.hba_identifier[1],
ct_req->req.rhba.hba_identifier[2],
ct_req->req.rhba.hba_identifier[3],
ct_req->req.rhba.hba_identifier[4],
ct_req->req.rhba.hba_identifier[5],
ct_req->req.rhba.hba_identifier[6],
- ct_req->req.rhba.hba_identifier[7], size));
- DEBUG13(qla2x00_dump_buffer(entries, size));
+ ct_req->req.rhba.hba_identifier[7], size);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
+ entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2030,
+ "RHBA issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
- DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2034,
+ "HBA already registered.\n");
rval = QLA_ALREADY_REGISTERED;
}
} else {
- DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2035,
+ "RHBA exiting normally.\n");
}
return rval;
@@ -1464,26 +1468,26 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
/* Prepare FDMI command arguments -- portname. */
memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
- DEBUG13(printk("%s(%ld): DHBA portname="
- "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no,
+ ql_dbg(ql_dbg_disc, vha, 0x2036,
+ "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
- ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
+ ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2037,
+ "DHBA issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2038,
+ "DHBA exiting normally.\n");
}
return rval;
@@ -1534,9 +1538,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->a.fc4_types[2] = 0x01;
size += 4 + 32;
- DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__,
- vha->host_no, eiter->a.fc4_types[2],
- eiter->a.fc4_types[1]));
+ ql_dbg(ql_dbg_disc, vha, 0x2039,
+ "FC4_TYPES=%02x %02x.\n",
+ eiter->a.fc4_types[2],
+ eiter->a.fc4_types[1]);
/* Supported speed. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1561,8 +1566,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
FDMI_PORT_SPEED_1GB);
size += 4 + 4;
- DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no,
- eiter->a.sup_speed));
+ ql_dbg(ql_dbg_disc, vha, 0x203a,
+ "Supported_Speed=%x.\n", eiter->a.sup_speed);
/* Current speed. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1596,8 +1601,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
}
size += 4 + 4;
- DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no,
- eiter->a.cur_speed));
+ ql_dbg(ql_dbg_disc, vha, 0x203b,
+ "Current_Speed=%x.\n", eiter->a.cur_speed);
/* Max frame size. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1609,8 +1614,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
size += 4 + 4;
- DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no,
- eiter->a.max_frame_size));
+ ql_dbg(ql_dbg_disc, vha, 0x203c,
+ "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
/* OS device name. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1621,8 +1626,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no,
- eiter->a.os_dev_name));
+ ql_dbg(ql_dbg_disc, vha, 0x204b,
+ "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
/* Hostname. */
if (strlen(fc_host_system_hostname(vha->host))) {
@@ -1637,35 +1642,36 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__,
- vha->host_no, eiter->a.host_name));
+ ql_dbg(ql_dbg_disc, vha, 0x203d,
+ "HostName=%s.\n", eiter->a.host_name);
}
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- DEBUG13(printk("%s(%ld): RPA portname="
- "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
- vha->host_no, ct_req->req.rpa.port_name[0],
- ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
- ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
- ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
- ct_req->req.rpa.port_name[7], size));
- DEBUG13(qla2x00_dump_buffer(entries, size));
+ ql_dbg(ql_dbg_disc, vha, 0x203e,
+ "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
+ ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
+ ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
+ ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
+ ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
+ size);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
+ entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2040,
+ "RPA issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2041,
+ "RPA exiting nornally.\n");
}
return rval;
@@ -1749,8 +1755,8 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB "
- "failed (%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2023,
+ "GFPN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GFPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@@ -1860,8 +1866,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB "
- "failed (%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2059,
+ "GPSC issue IOCB failed (%d).\n", rval);
} else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GPSC")) != QLA_SUCCESS) {
/* FM command unsupported? */
@@ -1870,9 +1876,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
CT_REASON_INVALID_COMMAND_CODE ||
ct_rsp->header.reason_code ==
CT_REASON_COMMAND_UNSUPPORTED)) {
- DEBUG2(printk("scsi(%ld): GPSC command "
- "unsupported, disabling query...\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x205a,
+ "GPSC command unsupported, disabling "
+ "query.\n");
ha->flags.gpsc_supported = 0;
rval = QLA_FUNCTION_FAILED;
break;
@@ -1898,9 +1904,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
break;
}
- DEBUG2_3(printk("scsi(%ld): GPSC ext entry - "
- "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
- "speed=%04x.\n", vha->host_no,
+ ql_dbg(ql_dbg_disc, vha, 0x205b,
+ "GPSC ext entry - fpn "
+ "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
+ "speed=%04x.\n",
list[i].fabric_port_name[0],
list[i].fabric_port_name[1],
list[i].fabric_port_name[2],
@@ -1910,7 +1917,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
list[i].fabric_port_name[6],
list[i].fabric_port_name[7],
be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
- be16_to_cpu(ct_rsp->rsp.gpsc.speed)));
+ be16_to_cpu(ct_rsp->rsp.gpsc.speed));
}
/* Last device exit. */
@@ -1968,14 +1975,12 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
- DEBUG2_3(printk(KERN_INFO
- "scsi(%ld): GFF_ID issue IOCB failed "
- "(%d).\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x205c,
+ "GFF_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GFF_ID") != QLA_SUCCESS) {
- DEBUG2_3(printk(KERN_INFO
- "scsi(%ld): GFF_ID IOCB status had a "
- "failure status code\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x205d,
+ "GFF_ID IOCB status had a failure status code.\n");
} else {
fcp_scsi_features =
ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 920b76bfbb9..def694271bf 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -153,11 +153,10 @@ qla2x00_async_iocb_timeout(srb_t *sp)
fc_port_t *fcport = sp->fcport;
struct srb_ctx *ctx = sp->ctx;
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s timeout - portid=%02x%02x%02x.\n",
- fcport->vha->host_no, sp->handle,
- ctx->name, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
+ "Async-%s timeout - portid=%02x%02x%02x.\n",
+ ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
fcport->flags &= ~FCF_ASYNC_SENT;
if (ctx->type == SRB_LOGIN_CMD) {
@@ -211,11 +210,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (rval != QLA_SUCCESS)
goto done_free_sp;
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x "
- "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->login_retry));
+ ql_dbg(ql_dbg_disc, vha, 0x2072,
+ "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, fcport->login_retry);
return rval;
done_free_sp:
@@ -259,10 +257,10 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
- fcport->vha->host_no, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, vha, 0x2070,
+ "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -309,11 +307,10 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
if (rval != QLA_SUCCESS)
goto done_free_sp;
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-adisc - loop-id=%x portid=%02x%02x%02x.\n",
- fcport->vha->host_no, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
-
+ ql_dbg(ql_dbg_disc, vha, 0x206f,
+ "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -362,11 +359,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
if (rval != QLA_SUCCESS)
goto done_free_sp;
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-tmf - loop-id=%x portid=%02x%02x%02x.\n",
- fcport->vha->host_no, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
-
+ ql_dbg(ql_dbg_taskm, vha, 0x802f,
+ "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
+ fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -471,9 +467,8 @@ qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): TM IOCB failed (%x).\n",
- __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_taskm, vha, 0x8030,
+ "TM IOCB failed (%x).\n", rval);
}
return;
@@ -519,11 +514,12 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
set_bit(0, ha->req_qid_map);
set_bit(0, ha->rsp_qid_map);
- qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
+ ql_log(ql_log_info, vha, 0x0040,
+ "Configuring PCI space...\n");
rval = ha->isp_ops->pci_config(vha);
if (rval) {
- DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
- vha->host_no));
+ ql_log(ql_log_warn, vha, 0x0044,
+ "Unable to configure PCI space.\n");
return (rval);
}
@@ -531,20 +527,21 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
rval = qla2xxx_get_flash_info(vha);
if (rval) {
- DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
- vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x004f,
+ "Unable to validate FLASH data.\n");
return (rval);
}
ha->isp_ops->get_flash_version(vha, req->ring);
-
- qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
+ ql_log(ql_log_info, vha, 0x0061,
+ "Configure NVRAM parameters...\n");
ha->isp_ops->nvram_config(vha);
if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */
- qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
+ ql_log(ql_log_info, vha, 0x0077,
+ "Masking HBA WWPN "
"%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
vha->port_name[0], vha->port_name[1],
vha->port_name[2], vha->port_name[3],
@@ -553,7 +550,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
- qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
+ ql_log(ql_log_info, vha, 0x0078,
+ "Verifying loaded RISC code...\n");
if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
rval = ha->isp_ops->chip_diag(vha);
@@ -567,7 +565,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (IS_QLA84XX(ha)) {
ha->cs84xx = qla84xx_get_chip(vha);
if (!ha->cs84xx) {
- qla_printk(KERN_ERR, ha,
+ ql_log(ql_log_warn, vha, 0x00d0,
"Unable to configure ISP84XX.\n");
return QLA_FUNCTION_FAILED;
}
@@ -579,8 +577,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
/* Issue verify 84xx FW IOCB to complete 84xx initialization */
rval = qla84xx_init_chip(vha);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "Unable to initialize ISP84XX.\n");
+ ql_log(ql_log_warn, vha, 0x00d4,
+ "Unable to initialize ISP84XX.\n");
qla84xx_put_chip(vha);
}
}
@@ -797,9 +795,7 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
rval = QLA_FUNCTION_FAILED;
if (ha->flags.disable_risc_code_load) {
- DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
- vha->host_no));
- qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
+ ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
/* Verify checksum of loaded RISC code. */
rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
@@ -810,10 +806,9 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
}
}
- if (rval) {
- DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
- vha->host_no));
- }
+ if (rval)
+ ql_dbg(ql_dbg_init, vha, 0x007a,
+ "**** Load RISC code ****.\n");
return (rval);
}
@@ -1105,8 +1100,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
/* Assume a failed state */
rval = QLA_FUNCTION_FAILED;
- DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
- vha->host_no, (u_long)&reg->flash_address));
+ ql_dbg(ql_dbg_init, vha, 0x007b,
+ "Testing device at %lx.\n", (u_long)&reg->flash_address);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1128,8 +1123,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
if (!cnt)
goto chip_diag_failed;
- DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
- vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x007c,
+ "Reset register cleared by chip reset.\n");
/* Reset RISC processor. */
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -1150,7 +1145,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
goto chip_diag_failed;
/* Check product ID of chip */
- DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n");
mb[1] = RD_MAILBOX_REG(ha, reg, 1);
mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -1158,8 +1153,9 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
mb[3] != PROD_ID_3) {
- qla_printk(KERN_WARNING, ha,
- "Wrong product ID = 0x%x,0x%x,0x%x\n", mb[1], mb[2], mb[3]);
+ ql_log(ql_log_warn, vha, 0x0062,
+ "Wrong product ID = 0x%x,0x%x,0x%x.\n",
+ mb[1], mb[2], mb[3]);
goto chip_diag_failed;
}
@@ -1178,8 +1174,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
if (IS_QLA2200(ha) &&
RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
/* Limit firmware transfer size with a 2200A */
- DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
ha->device_type |= DT_ISP2200A;
ha->fw_transfer_size = 128;
@@ -1188,24 +1183,20 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
/* Wrap Incoming Mailboxes Test. */
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
rval = qla2x00_mbx_reg_test(vha);
- if (rval) {
- DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha,
- "Failed mailbox send register test\n");
- }
- else {
+ if (rval)
+ ql_log(ql_log_warn, vha, 0x0080,
+ "Failed mailbox send register test.\n");
+ else
/* Flag a successful rval */
rval = QLA_SUCCESS;
- }
spin_lock_irqsave(&ha->hardware_lock, flags);
chip_diag_failed:
if (rval)
- DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
- "****\n", vha->host_no));
+ ql_log(ql_log_info, vha, 0x0081,
+ "Chip diagnostics **** FAILED ****.\n");
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1232,10 +1223,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
rval = qla2x00_mbx_reg_test(vha);
if (rval) {
- DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha,
- "Failed mailbox send register test\n");
+ ql_log(ql_log_warn, vha, 0x0082,
+ "Failed mailbox send register test.\n");
} else {
/* Flag a successful rval */
rval = QLA_SUCCESS;
@@ -1257,8 +1246,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
struct rsp_que *rsp = ha->rsp_q_map[0];
if (ha->fw_dump) {
- qla_printk(KERN_WARNING, ha,
- "Firmware dump previously allocated.\n");
+ ql_dbg(ql_dbg_init, vha, 0x00bd,
+ "Firmware dump already allocated.\n");
return;
}
@@ -1288,8 +1277,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
GFP_KERNEL);
if (!tc) {
- qla_printk(KERN_WARNING, ha, "Unable to allocate "
- "(%d KB) for FCE.\n", FCE_SIZE / 1024);
+ ql_log(ql_log_warn, vha, 0x00be,
+ "Unable to allocate (%d KB) for FCE.\n",
+ FCE_SIZE / 1024);
goto try_eft;
}
@@ -1297,16 +1287,15 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
ha->fce_mb, &ha->fce_bufs);
if (rval) {
- qla_printk(KERN_WARNING, ha, "Unable to initialize "
- "FCE (%d).\n", rval);
+ ql_log(ql_log_warn, vha, 0x00bf,
+ "Unable to initialize FCE (%d).\n", rval);
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
tc_dma);
ha->flags.fce_enabled = 0;
goto try_eft;
}
-
- qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
- FCE_SIZE / 1024);
+ ql_log(ql_log_info, vha, 0x00c0,
+ "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
ha->flags.fce_enabled = 1;
@@ -1317,23 +1306,23 @@ try_eft:
tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
GFP_KERNEL);
if (!tc) {
- qla_printk(KERN_WARNING, ha, "Unable to allocate "
- "(%d KB) for EFT.\n", EFT_SIZE / 1024);
+ ql_log(ql_log_warn, vha, 0x00c1,
+ "Unable to allocate (%d KB) for EFT.\n",
+ EFT_SIZE / 1024);
goto cont_alloc;
}
memset(tc, 0, EFT_SIZE);
rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
if (rval) {
- qla_printk(KERN_WARNING, ha, "Unable to initialize "
- "EFT (%d).\n", rval);
+ ql_log(ql_log_warn, vha, 0x00c2,
+ "Unable to initialize EFT (%d).\n", rval);
dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
tc_dma);
goto cont_alloc;
}
-
- qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
- EFT_SIZE / 1024);
+ ql_log(ql_log_info, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
eft_size = EFT_SIZE;
ha->eft_dma = tc_dma;
@@ -1350,8 +1339,9 @@ cont_alloc:
ha->fw_dump = vmalloc(dump_size);
if (!ha->fw_dump) {
- qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
- "firmware dump!!!\n", dump_size / 1024);
+ ql_log(ql_log_warn, vha, 0x00c4,
+ "Unable to allocate (%d KB) for firmware dump.\n",
+ dump_size / 1024);
if (ha->fce) {
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
@@ -1368,8 +1358,8 @@ cont_alloc:
}
return;
}
- qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
- dump_size / 1024);
+ ql_log(ql_log_info, vha, 0x00c5,
+ "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
ha->fw_dump_len = dump_size;
ha->fw_dump->signature[0] = 'Q';
@@ -1398,23 +1388,21 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
int rval;
uint16_t dc;
uint32_t dw;
- struct qla_hw_data *ha = vha->hw;
if (!IS_QLA81XX(vha->hw))
return QLA_SUCCESS;
rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Sync-MPI: Unable to acquire semaphore.\n"));
+ ql_log(ql_log_warn, vha, 0x0105,
+ "Unable to acquire semaphore.\n");
goto done;
}
pci_read_config_word(vha->hw->pdev, 0x54, &dc);
rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Sync-MPI: Unable to read sync.\n"));
+ ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
goto done_release;
}
@@ -1426,15 +1414,14 @@ qla81xx_mpi_sync(scsi_qla_host_t *vha)
dw |= dc;
rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Sync-MPI: Unable to gain sync.\n"));
+ ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
}
done_release:
rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "Sync-MPI: Unable to release semaphore.\n"));
+ ql_log(ql_log_warn, vha, 0x006d,
+ "Unable to release semaphore.\n");
}
done:
@@ -1479,14 +1466,14 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
/* Load firmware sequences */
rval = ha->isp_ops->load_risc(vha, &srisc_address);
if (rval == QLA_SUCCESS) {
- DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
- "code.\n", vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x00c9,
+ "Verifying Checksum of loaded RISC code.\n");
rval = qla2x00_verify_checksum(vha, srisc_address);
if (rval == QLA_SUCCESS) {
/* Start firmware execution. */
- DEBUG(printk("scsi(%ld): Checksum OK, start "
- "firmware.\n", vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x00ca,
+ "Starting firmware.\n");
rval = qla2x00_execute_fw(vha, srisc_address);
/* Retrieve firmware information. */
@@ -1522,9 +1509,9 @@ enable_82xx_npiv:
}
}
} else {
- DEBUG2(printk(KERN_INFO
- "scsi(%ld): ISP Firmware failed checksum.\n",
- vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x00cd,
+ "ISP Firmware failed checksum.\n");
+ goto failed;
}
}
@@ -1549,7 +1536,7 @@ enable_82xx_npiv:
ha->flags.fac_supported = 1;
ha->fdt_block_size = size << 2;
} else {
- qla_printk(KERN_ERR, ha,
+ ql_log(ql_log_warn, vha, 0x00ce,
"Unsupported FAC firmware (%d.%02d.%02d).\n",
ha->fw_major_version, ha->fw_minor_version,
ha->fw_subminor_version);
@@ -1557,8 +1544,8 @@ enable_82xx_npiv:
}
failed:
if (rval) {
- DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
- vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x00cf,
+ "Setup chip ****FAILED****.\n");
}
return (rval);
@@ -1608,10 +1595,11 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha)
return;
/* Serial Link options. */
- DEBUG3(printk("scsi(%ld): Serial link options:\n",
- vha->host_no));
- DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
- sizeof(ha->fw_seriallink_options)));
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
+ "Serial link options.\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
+ (uint8_t *)&ha->fw_seriallink_options,
+ sizeof(ha->fw_seriallink_options));
ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
if (ha->fw_seriallink_options[3] & BIT_2) {
@@ -1688,7 +1676,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
le16_to_cpu(ha->fw_seriallink_options24[2]),
le16_to_cpu(ha->fw_seriallink_options24[3]));
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x0104,
"Unable to update Serial Link options (%x).\n", rval);
}
}
@@ -1746,8 +1734,9 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->rid = __constant_cpu_to_le16(rid);
if (ha->flags.msix_enabled) {
msix = &ha->msix_entries[1];
- DEBUG2_17(printk(KERN_INFO
- "Registering vector 0x%x for base que\n", msix->entry));
+ ql_dbg(ql_dbg_init, vha, 0x00fd,
+ "Registering vector 0x%x for base que.\n",
+ msix->entry);
icb->msix = cpu_to_le16(msix->entry);
}
/* Use alternate PCI bus number */
@@ -1764,8 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->firmware_options_2 &=
__constant_cpu_to_le32(~BIT_22);
ha->flags.disable_msix_handshake = 1;
- qla_printk(KERN_INFO, ha,
- "MSIX Handshake Disable Mode turned on\n");
+ ql_dbg(ql_dbg_init, vha, 0x00fe,
+ "MSIX Handshake Disable Mode turned on.\n");
} else {
icb->firmware_options_2 |=
__constant_cpu_to_le32(BIT_22);
@@ -1850,7 +1839,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
/* Update any ISP specific firmware options before initialization. */
ha->isp_ops->update_fw_options(vha);
- DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
if (ha->flags.npiv_supported) {
if (ha->operating_mode == LOOP)
@@ -1866,11 +1855,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
if (rval) {
- DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
- vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x00d2,
+ "Init Firmware **** FAILED ****.\n");
} else {
- DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_init, vha, 0x00d3,
+ "Init Firmware -- success.\n");
}
return (rval);
@@ -1913,10 +1902,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
/* Wait for ISP to finish LIP */
if (!vha->flags.init_done)
- qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
-
- DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
- vha->host_no));
+ ql_log(ql_log_info, vha, 0x801e,
+ "Waiting for LIP to complete.\n");
do {
rval = qla2x00_get_firmware_state(vha, state);
@@ -1925,30 +1912,35 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
vha->device_flags &= ~DFLG_NO_CABLE;
}
if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
- DEBUG16(printk("scsi(%ld): fw_state=%x "
- "84xx=%x.\n", vha->host_no, state[0],
- state[2]));
+ ql_dbg(ql_dbg_taskm, vha, 0x801f,
+ "fw_state=%x 84xx=%x.\n", state[0],
+ state[2]);
if ((state[2] & FSTATE_LOGGED_IN) &&
(state[2] & FSTATE_WAITING_FOR_VERIFY)) {
- DEBUG16(printk("scsi(%ld): Sending "
- "verify iocb.\n", vha->host_no));
+ ql_dbg(ql_dbg_taskm, vha, 0x8028,
+ "Sending verify iocb.\n");
cs84xx_time = jiffies;
rval = qla84xx_init_chip(vha);
- if (rval != QLA_SUCCESS)
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn,
+ vha, 0x8043,
+ "Init chip failed.\n");
break;
+ }
/* Add time taken to initialize. */
cs84xx_time = jiffies - cs84xx_time;
wtime += cs84xx_time;
mtime += cs84xx_time;
- DEBUG16(printk("scsi(%ld): Increasing "
- "wait time by %ld. New time %ld\n",
- vha->host_no, cs84xx_time, wtime));
+ ql_dbg(ql_dbg_taskm, vha, 0x8042,
+ "Increasing wait time by %ld. "
+ "New time %ld.\n", cs84xx_time,
+ wtime);
}
} else if (state[0] == FSTATE_READY) {
- DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
- vha->host_no));
+ ql_dbg(ql_dbg_taskm, vha, 0x8037,
+ "F/W Ready - OK.\n");
qla2x00_get_retry_cnt(vha, &ha->retry_count,
&ha->login_timeout, &ha->r_a_tov);
@@ -1965,7 +1957,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
* other than Wait for Login.
*/
if (time_after_eq(jiffies, mtime)) {
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x8038,
"Cable is unplugged...\n");
vha->device_flags |= DFLG_NO_CABLE;
@@ -1985,17 +1977,17 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
/* Delay for a while */
msleep(500);
- DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
- vha->host_no, state[0], jiffies));
+ ql_dbg(ql_dbg_taskm, vha, 0x8039,
+ "fw_state=%x curr time=%lx.\n", state[0], jiffies);
} while (1);
- DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
- vha->host_no, state[0], state[1], state[2], state[3], state[4],
- jiffies));
+ ql_dbg(ql_dbg_taskm, vha, 0x803a,
+ "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
+ state[1], state[2], state[3], state[4], jiffies);
if (rval) {
- DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
- vha->host_no));
+ ql_log(ql_log_warn, vha, 0x803b,
+ "Firmware ready **** FAILED ****.\n");
}
return (rval);
@@ -2034,19 +2026,19 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
IS_QLA8XXX_TYPE(ha) ||
(rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
- DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2008,
+ "Loop is in a transition state.\n");
} else {
- qla_printk(KERN_WARNING, ha,
- "ERROR -- Unable to get host loop ID.\n");
+ ql_log(ql_log_warn, vha, 0x2009,
+ "Unable to get host loop ID.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
return (rval);
}
if (topo == 4) {
- qla_printk(KERN_INFO, ha,
- "Cannot get topology - retrying.\n");
+ ql_log(ql_log_info, vha, 0x200a,
+ "Cannot get topology - retrying.\n");
return (QLA_FUNCTION_FAILED);
}
@@ -2059,31 +2051,27 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
switch (topo) {
case 0:
- DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
ha->current_topology = ISP_CFG_NL;
strcpy(connect_type, "(Loop)");
break;
case 1:
- DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
ha->switch_cap = sw_cap;
ha->current_topology = ISP_CFG_FL;
strcpy(connect_type, "(FL_Port)");
break;
case 2:
- DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
ha->operating_mode = P2P;
ha->current_topology = ISP_CFG_N;
strcpy(connect_type, "(N_Port-to-N_Port)");
break;
case 3:
- DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
ha->switch_cap = sw_cap;
ha->operating_mode = P2P;
ha->current_topology = ISP_CFG_F;
@@ -2091,9 +2079,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
break;
default:
- DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
- "Using NL.\n",
- vha->host_no, topo));
+ ql_dbg(ql_dbg_disc, vha, 0x200f,
+ "HBA in unknown topology %x, using NL.\n", topo);
ha->current_topology = ISP_CFG_NL;
strcpy(connect_type, "(Loop)");
break;
@@ -2106,14 +2093,16 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
vha->d_id.b.al_pa = al_pa;
if (!vha->flags.init_done)
- qla_printk(KERN_INFO, ha,
- "Topology - %s, Host Loop address 0x%x\n",
+ ql_log(ql_log_info, vha, 0x2010,
+ "Topology - %s, Host Loop address 0x%x.\n",
connect_type, vha->loop_id);
if (rval) {
- DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x2011,
+ "%s FAILED\n", __func__);
} else {
- DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2012,
+ "%s success\n", __func__);
}
return(rval);
@@ -2227,18 +2216,22 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
chksum += *ptr++;
- DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
- DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
+ "Contents of NVRAM.\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
+ (uint8_t *)nv, ha->nvram_size);
/* Bad NVRAM data, set defaults parameters. */
if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
/* Reset NVRAM data. */
- qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
- "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
- nv->nvram_version);
- qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
- "invalid -- WWPN) defaults.\n");
+ ql_log(ql_log_warn, vha, 0x0064,
+ "Inconisistent NVRAM "
+ "detected: checksum=0x%x id=%c version=0x%x.\n",
+ chksum, nv->id[0], nv->nvram_version);
+ ql_log(ql_log_warn, vha, 0x0065,
+ "Falling back to "
+ "functioning (yet invalid -- WWPN) defaults.\n");
/*
* Set default initialization control block.
@@ -2382,8 +2375,13 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
/*
* Set host adapter parameters.
*/
+
+ /*
+ * BIT_7 in the host-parameters section allows for modification to
+ * internal driver logging.
+ */
if (nv->host_p[0] & BIT_7)
- ql2xextended_error_logging = 1;
+ ql2xextended_error_logging = 0x7fffffff;
ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
/* Always load RISC code on non ISP2[12]00 chips. */
if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -2488,10 +2486,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
if (ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = QLA_ZIO_MODE_6;
- DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
- "delay (%d us).\n", vha->host_no, ha->zio_mode,
- ha->zio_timer * 100));
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x0068,
"ZIO mode %d enabled; timer delay (%d us).\n",
ha->zio_mode, ha->zio_timer * 100);
@@ -2502,8 +2497,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
}
if (rval) {
- DEBUG2_3(printk(KERN_WARNING
- "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x0069,
+ "NVRAM configuration failed.\n");
}
return (rval);
}
@@ -2574,15 +2569,15 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
rval = qla2x00_configure_hba(vha);
if (rval != QLA_SUCCESS) {
- DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2013,
+ "Unable to configure HBA.\n");
return (rval);
}
}
save_flags = flags = vha->dpc_flags;
- DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
- vha->host_no, flags));
+ ql_dbg(ql_dbg_disc, vha, 0x2014,
+ "Configure loop -- dpc flags = 0x%lx.\n", flags);
/*
* If we have both an RSCN and PORT UPDATE pending then handle them
@@ -2619,15 +2614,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
}
if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
- if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2015,
+ "Loop resync needed, failing.\n");
rval = QLA_FUNCTION_FAILED;
+ }
else
rval = qla2x00_configure_local_loop(vha);
}
if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
- if (LOOP_TRANSITION(vha))
+ if (LOOP_TRANSITION(vha)) {
+ ql_dbg(ql_dbg_disc, vha, 0x201e,
+ "Needs RSCN update and loop transition.\n");
rval = QLA_FUNCTION_FAILED;
+ }
else
rval = qla2x00_configure_fabric(vha);
}
@@ -2638,16 +2639,17 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
rval = QLA_FUNCTION_FAILED;
} else {
atomic_set(&vha->loop_state, LOOP_READY);
-
- DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2069,
+ "LOOP READY.\n");
}
}
if (rval) {
- DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x206a,
+ "%s *** FAILED ***.\n", __func__);
} else {
- DEBUG3(printk("%s: exiting normally\n", __func__));
+ ql_dbg(ql_dbg_disc, vha, 0x206b,
+ "%s: exiting normally.\n", __func__);
}
/* Restore state if a resync event occurred during processing */
@@ -2695,8 +2697,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport = NULL;
entries = MAX_FIBRE_DEVICES;
- DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
- DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
+ ql_dbg(ql_dbg_disc, vha, 0x2016,
+ "Getting FCAL position map.\n");
+ if (ql2xextended_error_logging & ql_dbg_disc)
+ qla2x00_get_fcal_position_map(vha, NULL);
/* Get list of logged in devices. */
memset(ha->gid_list, 0, GID_LIST_SIZE);
@@ -2705,14 +2709,17 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS)
goto cleanup_allocation;
- DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
- vha->host_no, entries));
- DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
- entries * sizeof(struct gid_list_info)));
+ ql_dbg(ql_dbg_disc, vha, 0x2017,
+ "Entries in ID list (%d).\n", entries);
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+ (uint8_t *)ha->gid_list,
+ entries * sizeof(struct gid_list_info));
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x2018,
+ "Memory allocation failed for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
}
@@ -2726,9 +2733,9 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
fcport->port_type != FCT_BROADCAST &&
(fcport->flags & FCF_FABRIC_DEVICE) == 0) {
- DEBUG(printk("scsi(%ld): Marking port lost, "
- "loop_id=0x%04x\n",
- vha->host_no, fcport->loop_id));
+ ql_dbg(ql_dbg_disc, vha, 0x2019,
+ "Marking port lost loop_id=0x%04x.\n",
+ fcport->loop_id);
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
}
@@ -2769,12 +2776,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->vp_idx = vha->vp_idx;
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
- DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
- "information -- get_port_database=%x, "
- "loop_id=0x%04x\n",
- vha->host_no, rval2, new_fcport->loop_id));
- DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
- vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x201a,
+ "Failed to retrieve fcport information "
+ "-- get_port_database=%x, loop_id=0x%04x.\n",
+ rval2, new_fcport->loop_id);
+ ql_dbg(ql_dbg_disc, vha, 0x201b,
+ "Scheduling resync.\n");
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
continue;
}
@@ -2810,6 +2817,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
fcport = new_fcport;
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x201c,
+ "Failed to allocate memory for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
}
@@ -2828,8 +2837,8 @@ cleanup_allocation:
kfree(new_fcport);
if (rval != QLA_SUCCESS) {
- DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
- "rval=%x\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x201d,
+ "Configure local loop error exit: rval=%x.\n", rval);
}
return (rval);
@@ -2858,27 +2867,27 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
mb);
if (rval != QLA_SUCCESS) {
- DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
- "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
- vha->host_no, fcport->port_name[0], fcport->port_name[1],
+ ql_dbg(ql_dbg_disc, vha, 0x2004,
+ "Unable to adjust iIDMA "
+ "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x "
+ "%04x.\n", fcport->port_name[0], fcport->port_name[1],
fcport->port_name[2], fcport->port_name[3],
fcport->port_name[4], fcport->port_name[5],
fcport->port_name[6], fcport->port_name[7], rval,
- fcport->fp_speed, mb[0], mb[1]));
+ fcport->fp_speed, mb[0], mb[1]);
} else {
link_speed = link_speeds[LS_UNKNOWN];
if (fcport->fp_speed < 5)
link_speed = link_speeds[fcport->fp_speed];
else if (fcport->fp_speed == 0x13)
link_speed = link_speeds[5];
- DEBUG2(qla_printk(KERN_INFO, ha,
- "iIDMA adjusted to %s GB/s on "
- "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
- link_speed, fcport->port_name[0],
- fcport->port_name[1], fcport->port_name[2],
- fcport->port_name[3], fcport->port_name[4],
- fcport->port_name[5], fcport->port_name[6],
- fcport->port_name[7]));
+ ql_dbg(ql_dbg_disc, vha, 0x2005,
+ "iIDMA adjusted to %s GB/s "
+ "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
+ fcport->port_name[0], fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7]);
}
}
@@ -2887,7 +2896,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
{
struct fc_rport_identifiers rport_ids;
struct fc_rport *rport;
- struct qla_hw_data *ha = vha->hw;
unsigned long flags;
qla2x00_rport_del(fcport);
@@ -2899,8 +2907,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
if (!rport) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate fc remote port!\n");
+ ql_log(ql_log_warn, vha, 0x2006,
+ "Unable to allocate fc remote port.\n");
return;
}
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -2975,8 +2983,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
loop_id = SNS_FL_PORT;
rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
if (rval != QLA_SUCCESS) {
- DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
- "Port\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x201f,
+ "MBX_GET_PORT_NAME failed, No FL Port.\n");
vha->device_flags &= ~SWITCH_FOUND;
return (QLA_SUCCESS);
@@ -3003,32 +3011,32 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
0xfc, mb, BIT_1 | BIT_0);
if (mb[0] != MBS_COMMAND_COMPLETE) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
- "mb[2]=%x mb[6]=%x mb[7]=%x\n", loop_id,
- mb[0], mb[1], mb[2], mb[6], mb[7]));
+ ql_dbg(ql_dbg_disc, vha, 0x2042,
+ "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
+ "mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
+ mb[2], mb[6], mb[7]);
return (QLA_SUCCESS);
}
if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
if (qla2x00_rft_id(vha)) {
/* EMPTY */
- DEBUG2(printk("scsi(%ld): Register FC-4 "
- "TYPE failed.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2045,
+ "Register FC-4 TYPE failed.\n");
}
if (qla2x00_rff_id(vha)) {
/* EMPTY */
- DEBUG2(printk("scsi(%ld): Register FC-4 "
- "Features failed.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2049,
+ "Register FC-4 Features failed.\n");
}
if (qla2x00_rnn_id(vha)) {
/* EMPTY */
- DEBUG2(printk("scsi(%ld): Register Node Name "
- "failed.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Register Node Name failed.\n");
} else if (qla2x00_rsnn_nn(vha)) {
/* EMPTY */
- DEBUG2(printk("scsi(%ld): Register Symbolic "
- "Node Name failed.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2053,
+ "Register Symobilic Node Name failed.\n");
}
}
@@ -3132,8 +3140,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
if (rval) {
- DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
- "rval=%d\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_disc, vha, 0x2068,
+ "Configure fabric error exit rval=%d.\n", rval);
}
return (rval);
@@ -3175,8 +3183,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL);
if (!swl) {
/*EMPTY*/
- DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
- "on GA_NXT\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2054,
+ "GID_PT allocations failed, fallback on GA_NXT.\n");
} else {
if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
kfree(swl);
@@ -3201,6 +3209,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x205e,
+ "Failed to allocate memory for fcport.\n");
kfree(swl);
return (QLA_MEMORY_ALLOC_FAILED);
}
@@ -3247,9 +3257,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
/* Send GA_NXT to the switch */
rval = qla2x00_ga_nxt(vha, new_fcport);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "SNS scan failed -- assuming zero-entry "
- "result...\n");
+ ql_log(ql_log_warn, vha, 0x2064,
+ "SNS scan failed -- assuming "
+ "zero-entry result.\n");
list_for_each_entry_safe(fcport, fcptemp,
new_fcports, list) {
list_del(&fcport->list);
@@ -3265,9 +3275,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
wrap.b24 = new_fcport->d_id.b24;
first_dev = 0;
} else if (new_fcport->d_id.b24 == wrap.b24) {
- DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
- vha->host_no, new_fcport->d_id.b.domain,
- new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "Device wrap (%02x%02x%02x).\n",
+ new_fcport->d_id.b.domain,
+ new_fcport->d_id.b.area,
+ new_fcport->d_id.b.al_pa);
break;
}
@@ -3372,6 +3384,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
nxt_d_id.b24 = new_fcport->d_id.b24;
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
+ ql_log(ql_log_warn, vha, 0x2066,
+ "Memory allocation failed for fcport.\n");
kfree(swl);
return (QLA_MEMORY_ALLOC_FAILED);
}
@@ -3501,10 +3515,10 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
d_id.b.area = MSB(LSW(rscn_entry));
d_id.b.al_pa = LSB(LSW(rscn_entry));
- DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
- "[%02x/%02x%02x%02x].\n",
- vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
- d_id.b.area, d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, vha, 0x2020,
+ "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
+ vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
+ d_id.b.al_pa);
vha->rscn_out_ptr++;
if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
@@ -3520,17 +3534,17 @@ qla2x00_device_resync(scsi_qla_host_t *vha)
if (rscn_entry != vha->rscn_queue[rscn_out_iter])
break;
- DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
- "entry found at [%d].\n", vha->host_no,
- rscn_out_iter));
+ ql_dbg(ql_dbg_disc, vha, 0x2021,
+ "Skipping duplicate RSCN queue entry found at "
+ "[%d].\n", rscn_out_iter);
vha->rscn_out_ptr = rscn_out_iter;
}
/* Queue overflow, set switch default case. */
if (vha->flags.rscn_queue_overflow) {
- DEBUG(printk("scsi(%ld): device_resync: rscn "
- "overflow.\n", vha->host_no));
+ ql_dbg(ql_dbg_disc, vha, 0x2022,
+ "device_resync: rscn overflow.\n");
format = 3;
vha->flags.rscn_queue_overflow = 0;
@@ -3659,10 +3673,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
tmp_loopid = 0;
for (;;) {
- DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
- "for port %02x%02x%02x.\n",
- vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_disc, vha, 0x2000,
+ "Trying Fabric Login w/loop id 0x%04x for port "
+ "%02x%02x%02x.\n",
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
/* Login fcport on switch. */
ha->isp_ops->fabric_login(vha, fcport->loop_id,
@@ -3680,10 +3695,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
tmp_loopid = fcport->loop_id;
fcport->loop_id = mb[1];
- DEBUG(printk("Fabric Login: port in use - next "
- "loop id=0x%04x, port Id=%02x%02x%02x.\n",
+ ql_dbg(ql_dbg_disc, vha, 0x2001,
+ "Fabric Login: port in use - next loop "
+ "id=0x%04x, port id= %02x%02x%02x.\n",
fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa));
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
} else if (mb[0] == MBS_COMMAND_COMPLETE) {
/*
@@ -3744,11 +3760,11 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
/*
* unrecoverable / not handled error
*/
- DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
- "loop_id=%x jiffies=%lx.\n",
- __func__, vha->host_no, mb[0],
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
+ ql_dbg(ql_dbg_disc, vha, 0x2002,
+ "Failed=%x port_id=%02x%02x%02x loop_id=%x "
+ "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->loop_id, jiffies);
*next_loopid = fcport->loop_id;
ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3852,7 +3868,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
return (QLA_FUNCTION_FAILED);
if (rval)
- DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
+ ql_dbg(ql_dbg_disc, vha, 0x206c,
+ "%s *** FAILED ***.\n", __func__);
return (rval);
}
@@ -3929,8 +3946,8 @@ qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
- qla_printk(KERN_INFO, ha,
- "Performing ISP error recovery - ha= %p.\n", ha);
+ ql_dbg(ql_dbg_p3p, vha, 0xb002,
+ "Performing ISP error recovery - ha=%p.\n", ha);
atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -3964,8 +3981,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ha->qla_stats.total_isp_aborts++;
- qla_printk(KERN_INFO, ha,
- "Performing ISP error recovery - ha= %p.\n", ha);
+ ql_log(ql_log_info, vha, 0x00af,
+ "Performing ISP error recovery - ha=%p.\n", ha);
/* For ISP82XX, reset_chip is just disabling interrupts.
* Driver waits for the completion of the commands.
@@ -4016,6 +4033,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
/* Make sure for ISP 82XX IO DMA is complete */
if (IS_QLA82XX(ha)) {
qla82xx_chip_reset_cleanup(vha);
+ ql_log(ql_log_info, vha, 0x00b4,
+ "Done chip reset cleanup.\n");
/* Done waiting for pending commands.
* Reset the online flag.
@@ -4097,7 +4116,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
ha->fce_dma, ha->fce_bufs, ha->fce_mb,
&ha->fce_bufs);
if (rval) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x8033,
"Unable to reinitialize FCE "
"(%d).\n", rval);
ha->flags.fce_enabled = 0;
@@ -4109,7 +4128,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
rval = qla2x00_enable_eft_trace(vha,
ha->eft_dma, EFT_NUM_BUFFERS);
if (rval) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x8034,
"Unable to reinitialize EFT "
"(%d).\n", rval);
}
@@ -4118,9 +4137,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
vha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
if (ha->isp_abort_cnt == 0) {
- qla_printk(KERN_WARNING, ha,
- "ISP error recovery failed - "
- "board disabled\n");
+ ql_log(ql_log_fatal, vha, 0x8035,
+ "ISP error recover failed - "
+ "board disabled.\n");
/*
* The next call disables the board
* completely.
@@ -4132,16 +4151,16 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
status = 0;
} else { /* schedule another ISP abort */
ha->isp_abort_cnt--;
- DEBUG(printk("qla%ld: ISP abort - "
- "retry remaining %d\n",
- vha->host_no, ha->isp_abort_cnt));
+ ql_dbg(ql_dbg_taskm, vha, 0x8020,
+ "ISP abort - retry remaining %d.\n",
+ ha->isp_abort_cnt);
status = 1;
}
} else {
ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
- DEBUG(printk("qla2x00(%ld): ISP error recovery "
- "- retrying (%d) more times\n",
- vha->host_no, ha->isp_abort_cnt));
+ ql_dbg(ql_dbg_taskm, vha, 0x8021,
+ "ISP error recovery - retrying (%d) "
+ "more times.\n", ha->isp_abort_cnt);
set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
status = 1;
}
@@ -4150,9 +4169,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
}
if (!status) {
- DEBUG(printk(KERN_INFO
- "qla2x00_abort_isp(%ld): succeeded.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vp, &ha->vp_list, list) {
@@ -4169,8 +4186,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
- qla_printk(KERN_INFO, ha,
- "qla2x00_abort_isp: **** FAILED ****\n");
+ ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
}
return(status);
@@ -4211,8 +4227,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
status = qla2x00_fw_ready(vha);
if (!status) {
- DEBUG(printk("%s(): Start configure loop, "
- "status = %d\n", __func__, status));
+ ql_dbg(ql_dbg_taskm, vha, 0x8031,
+ "Start configure loop status = %d.\n", status);
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4234,9 +4250,8 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
if ((vha->device_flags & DFLG_NO_CABLE))
status = 0;
- DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
- __func__,
- status));
+ ql_dbg(ql_dbg_taskm, vha, 0x8032,
+ "Configure loop done, status = 0x%x.\n", status);
}
return (status);
}
@@ -4256,13 +4271,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
rsp->options &= ~BIT_0;
ret = qla25xx_init_rsp_que(base_vha, rsp);
if (ret != QLA_SUCCESS)
- DEBUG2_17(printk(KERN_WARNING
- "%s Rsp que:%d init failed\n", __func__,
- rsp->id));
+ ql_dbg(ql_dbg_init, base_vha, 0x00ff,
+ "%s Rsp que: %d init failed.\n",
+ __func__, rsp->id);
else
- DEBUG2_17(printk(KERN_INFO
- "%s Rsp que:%d inited\n", __func__,
- rsp->id));
+ ql_dbg(ql_dbg_init, base_vha, 0x0100,
+ "%s Rsp que: %d inited.\n",
+ __func__, rsp->id);
}
}
for (i = 1; i < ha->max_req_queues; i++) {
@@ -4272,13 +4287,13 @@ qla25xx_init_queues(struct qla_hw_data *ha)
req->options &= ~BIT_0;
ret = qla25xx_init_req_que(base_vha, req);
if (ret != QLA_SUCCESS)
- DEBUG2_17(printk(KERN_WARNING
- "%s Req que:%d init failed\n", __func__,
- req->id));
+ ql_dbg(ql_dbg_init, base_vha, 0x0101,
+ "%s Req que: %d init failed.\n",
+ __func__, req->id);
else
- DEBUG2_17(printk(KERN_WARNING
- "%s Req que:%d inited\n", __func__,
- req->id));
+ ql_dbg(ql_dbg_init, base_vha, 0x0102,
+ "%s Req que: %d inited.\n",
+ __func__, req->id);
}
}
return ret;
@@ -4397,19 +4412,22 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
chksum += le32_to_cpu(*dptr++);
- DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
- DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
+ "Contents of NVRAM\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
+ (uint8_t *)nv, ha->nvram_size);
/* Bad NVRAM data, set defaults parameters. */
if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
|| nv->id[3] != ' ' ||
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
- qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
- "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
- le16_to_cpu(nv->nvram_version));
- qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
- "invalid -- WWPN) defaults.\n");
+ ql_log(ql_log_warn, vha, 0x006b,
+ "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+ "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
+ ql_log(ql_log_warn, vha, 0x006c,
+ "Falling back to functioning (yet invalid -- WWPN) "
+ "defaults.\n");
/*
* Set default initialization control block.
@@ -4587,10 +4605,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
if (ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = QLA_ZIO_MODE_6;
- DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
- "(%d us).\n", vha->host_no, ha->zio_mode,
- ha->zio_timer * 100));
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x006f,
"ZIO mode %d enabled; timer delay (%d us).\n",
ha->zio_mode, ha->zio_timer * 100);
@@ -4601,8 +4616,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
}
if (rval) {
- DEBUG2_3(printk(KERN_WARNING
- "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x0070,
+ "NVRAM configuration failed.\n");
}
return (rval);
}
@@ -4620,8 +4635,8 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
- qla_printk(KERN_INFO, ha,
- "FW: Loading from flash (%x)...\n", faddr);
+ ql_dbg(ql_dbg_init, vha, 0x008b,
+ "Loading firmware from flash (%x).\n", faddr);
rval = QLA_SUCCESS;
@@ -4637,11 +4652,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to verify integrity of flash firmware image!\n");
- qla_printk(KERN_WARNING, ha,
- "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
- dcode[1], dcode[2], dcode[3]);
+ ql_log(ql_log_fatal, vha, 0x008c,
+ "Unable to verify the integrity of flash firmware "
+ "image.\n");
+ ql_log(ql_log_fatal, vha, 0x008d,
+ "Firmware data: %08x %08x %08x %08x.\n",
+ dcode[0], dcode[1], dcode[2], dcode[3]);
return QLA_FUNCTION_FAILED;
}
@@ -4660,9 +4676,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
if (dlen > risc_size)
dlen = risc_size;
- DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
- "addr %x, number of dwords 0x%x, offset 0x%x.\n",
- vha->host_no, risc_addr, dlen, faddr));
+ ql_dbg(ql_dbg_init, vha, 0x008e,
+ "Loading risc segment@ risc addr %x "
+ "number of dwords 0x%x offset 0x%x.\n",
+ risc_addr, dlen, faddr);
qla24xx_read_flash_data(vha, dcode, faddr, dlen);
for (i = 0; i < dlen; i++)
@@ -4671,12 +4688,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
rval = qla2x00_load_ram(vha, req->dma, risc_addr,
dlen);
if (rval) {
- DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
- "segment %d of firmware\n", vha->host_no,
- fragment));
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to load segment %d of "
- "firmware\n", fragment);
+ ql_log(ql_log_fatal, vha, 0x008f,
+ "Failed to load segment %d of firmware.\n",
+ fragment);
break;
}
@@ -4709,9 +4723,10 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Load firmware blob. */
blob = qla2x00_request_firmware(vha);
if (!blob) {
- qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
- qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
- "from: " QLA_FW_URL ".\n");
+ ql_log(ql_log_info, vha, 0x0083,
+ "Fimware image unavailable.\n");
+ ql_log(ql_log_info, vha, 0x0084,
+ "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
return QLA_FUNCTION_FAILED;
}
@@ -4724,8 +4739,8 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Validate firmware image by checking version. */
if (blob->fw->size < 8 * sizeof(uint16_t)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to verify integrity of firmware image (%Zd)!\n",
+ ql_log(ql_log_fatal, vha, 0x0085,
+ "Unable to verify integrity of firmware image (%Zd).\n",
blob->fw->size);
goto fail_fw_integrity;
}
@@ -4734,11 +4749,11 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
wcode[2] == 0 && wcode[3] == 0)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to verify integrity of firmware image!\n");
- qla_printk(KERN_WARNING, ha,
- "Firmware data: %04x %04x %04x %04x!\n", wcode[0],
- wcode[1], wcode[2], wcode[3]);
+ ql_log(ql_log_fatal, vha, 0x0086,
+ "Unable to verify integrity of firmware image.\n");
+ ql_log(ql_log_fatal, vha, 0x0087,
+ "Firmware data: %04x %04x %04x %04x.\n",
+ wcode[0], wcode[1], wcode[2], wcode[3]);
goto fail_fw_integrity;
}
@@ -4751,9 +4766,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Validate firmware image size. */
fwclen += risc_size * sizeof(uint16_t);
if (blob->fw->size < fwclen) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_fatal, vha, 0x0088,
"Unable to verify integrity of firmware image "
- "(%Zd)!\n", blob->fw->size);
+ "(%Zd).\n", blob->fw->size);
goto fail_fw_integrity;
}
@@ -4762,10 +4777,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
wlen = (uint16_t)(ha->fw_transfer_size >> 1);
if (wlen > risc_size)
wlen = risc_size;
-
- DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
- "addr %x, number of words 0x%x.\n", vha->host_no,
- risc_addr, wlen));
+ ql_dbg(ql_dbg_init, vha, 0x0089,
+ "Loading risc segment@ risc addr %x number of "
+ "words 0x%x.\n", risc_addr, wlen);
for (i = 0; i < wlen; i++)
wcode[i] = swab16(fwcode[i]);
@@ -4773,12 +4787,9 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
rval = qla2x00_load_ram(vha, req->dma, risc_addr,
wlen);
if (rval) {
- DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
- "segment %d of firmware\n", vha->host_no,
- fragment));
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to load segment %d of "
- "firmware\n", fragment);
+ ql_log(ql_log_fatal, vha, 0x008a,
+ "Failed to load segment %d of firmware.\n",
+ fragment);
break;
}
@@ -4814,15 +4825,17 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Load firmware blob. */
blob = qla2x00_request_firmware(vha);
if (!blob) {
- qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
- qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
- "from: " QLA_FW_URL ".\n");
+ ql_log(ql_log_warn, vha, 0x0090,
+ "Fimware image unavailable.\n");
+ ql_log(ql_log_warn, vha, 0x0091,
+ "Firmware images can be retrieved from: "
+ QLA_FW_URL ".\n");
return QLA_FUNCTION_FAILED;
}
- qla_printk(KERN_INFO, ha,
- "FW: Loading via request-firmware...\n");
+ ql_log(ql_log_info, vha, 0x0092,
+ "Loading via request-firmware.\n");
rval = QLA_SUCCESS;
@@ -4834,8 +4847,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Validate firmware image by checking version. */
if (blob->fw->size < 8 * sizeof(uint32_t)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to verify integrity of firmware image (%Zd)!\n",
+ ql_log(ql_log_fatal, vha, 0x0093,
+ "Unable to verify integrity of firmware image (%Zd).\n",
blob->fw->size);
goto fail_fw_integrity;
}
@@ -4845,11 +4858,12 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to verify integrity of firmware image!\n");
- qla_printk(KERN_WARNING, ha,
- "Firmware data: %08x %08x %08x %08x!\n", dcode[0],
- dcode[1], dcode[2], dcode[3]);
+ ql_log(ql_log_fatal, vha, 0x0094,
+ "Unable to verify integrity of firmware image (%Zd).\n",
+ blob->fw->size);
+ ql_log(ql_log_fatal, vha, 0x0095,
+ "Firmware data: %08x %08x %08x %08x.\n",
+ dcode[0], dcode[1], dcode[2], dcode[3]);
goto fail_fw_integrity;
}
@@ -4861,9 +4875,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
/* Validate firmware image size. */
fwclen += risc_size * sizeof(uint32_t);
if (blob->fw->size < fwclen) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_fatal, vha, 0x0096,
"Unable to verify integrity of firmware image "
- "(%Zd)!\n", blob->fw->size);
+ "(%Zd).\n", blob->fw->size);
goto fail_fw_integrity;
}
@@ -4874,9 +4888,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
if (dlen > risc_size)
dlen = risc_size;
- DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
- "addr %x, number of dwords 0x%x.\n", vha->host_no,
- risc_addr, dlen));
+ ql_dbg(ql_dbg_init, vha, 0x0097,
+ "Loading risc segment@ risc addr %x "
+ "number of dwords 0x%x.\n", risc_addr, dlen);
for (i = 0; i < dlen; i++)
dcode[i] = swab32(fwcode[i]);
@@ -4884,12 +4898,9 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
rval = qla2x00_load_ram(vha, req->dma, risc_addr,
dlen);
if (rval) {
- DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
- "segment %d of firmware\n", vha->host_no,
- fragment));
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to load segment %d of "
- "firmware\n", fragment);
+ ql_log(ql_log_fatal, vha, 0x0098,
+ "Failed to load segment %d of firmware.\n",
+ fragment);
break;
}
@@ -4953,14 +4964,13 @@ try_blob_fw:
if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
return rval;
- qla_printk(KERN_ERR, ha,
- "FW: Attempting to fallback to golden firmware...\n");
+ ql_log(ql_log_info, vha, 0x0099,
+ "Attempting to fallback to golden firmware.\n");
rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
if (rval != QLA_SUCCESS)
return rval;
- qla_printk(KERN_ERR, ha,
- "FW: Please update operational firmware...\n");
+ ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
ha->flags.running_gold_fw = 1;
return rval;
@@ -4987,8 +4997,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
continue;
if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
continue;
- qla_printk(KERN_INFO, ha,
- "Attempting retry of stop-firmware command...\n");
+ ql_log(ql_log_info, vha, 0x8015,
+ "Attempting retry of stop-firmware command.\n");
ret = qla2x00_stop_firmware(vha);
}
}
@@ -5023,10 +5033,10 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
/* Login to SNS first */
ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
if (mb[0] != MBS_COMMAND_COMPLETE) {
- DEBUG15(qla_printk(KERN_INFO, ha,
- "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
- "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
- mb[0], mb[1], mb[2], mb[6], mb[7]));
+ ql_dbg(ql_dbg_init, vha, 0x0103,
+ "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
+ "mb[6]=%x mb[7]=%x.\n",
+ NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
return (QLA_FUNCTION_FAILED);
}
@@ -5146,19 +5156,23 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
chksum += le32_to_cpu(*dptr++);
- DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
- DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
+ "Contents of NVRAM:\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
+ (uint8_t *)nv, ha->nvram_size);
/* Bad NVRAM data, set defaults parameters. */
if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
|| nv->id[3] != ' ' ||
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
- qla_printk(KERN_WARNING, ha, "Inconsistent NVRAM detected: "
- "checksum=0x%x id=%c version=0x%x.\n", chksum, nv->id[0],
+ ql_log(ql_log_info, vha, 0x0073,
+ "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+ "version=0x%x.\n", chksum, nv->id[0],
le16_to_cpu(nv->nvram_version));
- qla_printk(KERN_WARNING, ha, "Falling back to functioning (yet "
- "invalid -- WWPN) defaults.\n");
+ ql_log(ql_log_info, vha, 0x0074,
+ "Falling back to functioning (yet invalid -- WWPN) "
+ "defaults.\n");
/*
* Set default initialization control block.
@@ -5350,12 +5364,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = QLA_ZIO_MODE_6;
- DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
- "(%d us).\n", vha->host_no, ha->zio_mode,
- ha->zio_timer * 100));
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x0075,
"ZIO mode %d enabled; timer delay (%d us).\n",
- ha->zio_mode, ha->zio_timer * 100);
+ ha->zio_mode,
+ ha->zio_timer * 100);
icb->firmware_options_2 |= cpu_to_le32(
(uint32_t)ha->zio_mode);
@@ -5364,8 +5376,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
}
if (rval) {
- DEBUG2_3(printk(KERN_WARNING
- "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x0076,
+ "NVRAM configuration failed.\n");
}
return (rval);
}
@@ -5388,9 +5400,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
status = qla2x00_fw_ready(vha);
if (!status) {
- qla_printk(KERN_INFO, ha,
- "%s(): Start configure loop, "
- "status = %d\n", __func__, status);
+ ql_log(ql_log_info, vha, 0x803c,
+ "Start configure loop, status =%d.\n", status);
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -5412,9 +5423,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
if ((vha->device_flags & DFLG_NO_CABLE))
status = 0;
- qla_printk(KERN_INFO, ha,
- "%s(): Configure loop done, status = 0x%x\n",
- __func__, status);
+ ql_log(ql_log_info, vha, 0x803d,
+ "Configure loop done, status = 0x%x.\n", status);
}
if (!status) {
@@ -5450,9 +5460,9 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
ha->fce_dma, ha->fce_bufs, ha->fce_mb,
&ha->fce_bufs);
if (rval) {
- qla_printk(KERN_WARNING, ha,
- "Unable to reinitialize FCE "
- "(%d).\n", rval);
+ ql_log(ql_log_warn, vha, 0x803e,
+ "Unable to reinitialize FCE (%d).\n",
+ rval);
ha->flags.fce_enabled = 0;
}
}
@@ -5462,17 +5472,16 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
rval = qla2x00_enable_eft_trace(vha,
ha->eft_dma, EFT_NUM_BUFFERS);
if (rval) {
- qla_printk(KERN_WARNING, ha,
- "Unable to reinitialize EFT "
- "(%d).\n", rval);
+ ql_log(ql_log_warn, vha, 0x803f,
+ "Unable to reinitialize EFT (%d).\n",
+ rval);
}
}
}
if (!status) {
- DEBUG(printk(KERN_INFO
- "qla82xx_restart_isp(%ld): succeeded.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_taskm, vha, 0x8040,
+ "qla82xx_restart_isp succeeded.\n");
spin_lock_irqsave(&ha->vport_slock, flags);
list_for_each_entry(vp, &ha->vp_list, list) {
@@ -5489,8 +5498,8 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
- qla_printk(KERN_INFO, ha,
- "qla82xx_restart_isp: **** FAILED ****\n");
+ ql_log(ql_log_warn, vha, 0x8041,
+ "qla82xx_restart_isp **** FAILED ****.\n");
}
return status;
@@ -5640,9 +5649,8 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
if (ret == QLA_SUCCESS)
fcport->fcp_prio = priority;
else
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld): Unable to activate fcp priority, "
- " ret=0x%x\n", vha->host_no, ret));
+ ql_dbg(ql_dbg_user, vha, 0x704f,
+ "Unable to activate fcp priority, ret=0x%x.\n", ret);
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 4c8167e11f6..d2e904bc21c 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -94,11 +94,11 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
/* Don't print state transitions during initial allocation of fcport */
if (old_state && old_state != state) {
- DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw,
- "scsi(%ld): FCPort state transitioned from %s to %s - "
- "portid=%02x%02x%02x.\n", fcport->vha->host_no,
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
+ "FCPort state transitioned from %s to %s - "
+ "portid=%02x%02x%02x.\n",
port_state_str[old_state], port_state_str[state],
fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ fcport->d_id.b.al_pa);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 7bac3cd109d..49d6906af88 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -150,7 +150,8 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
/* We only support T10 DIF right now */
if (guard != SHOST_DIX_GUARD_CRC) {
- DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
+ "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
return 0;
}
@@ -343,9 +344,10 @@ qla2x00_start_scsi(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
- != QLA_SUCCESS)
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS) {
return (QLA_FUNCTION_FAILED);
+ }
vha->marker_needed = 0;
}
@@ -490,8 +492,8 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk24 = NULL;
mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
if (mrk == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
- __func__, base_vha->host_no));
+ ql_log(ql_log_warn, base_vha, 0x3026,
+ "Failed to allocate Marker IOCB.\n");
return (QLA_FUNCTION_FAILED);
}
@@ -547,9 +549,10 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
- DEBUG5(printk("%s(): IOCB data:\n", __func__));
- DEBUG5(qla2x00_dump_buffer(
- (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
+ "IOCB data:\n");
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
+ (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
/* Adjust ring index. */
req->ring_index++;
@@ -604,7 +607,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
* Returns the number of IOCB entries needed to store @dsds.
*/
inline uint16_t
-qla24xx_calc_iocbs(uint16_t dsds)
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
{
uint16_t iocbs;
@@ -614,8 +617,6 @@ qla24xx_calc_iocbs(uint16_t dsds)
if ((dsds - 1) % 5)
iocbs++;
}
- DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
- __func__, iocbs));
return iocbs;
}
@@ -712,6 +713,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
unsigned int protcnt)
{
struct sd_dif_tuple *spt;
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
unsigned char op = scsi_get_prot_op(cmd);
switch (scsi_get_prot_type(cmd)) {
@@ -768,9 +770,9 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
op == SCSI_PROT_WRITE_PASS)) {
spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
scsi_prot_sglist(cmd)[0].offset;
- DEBUG18(printk(KERN_DEBUG
- "%s(): LBA from user %p, lba = 0x%x\n",
- __func__, spt, (int)spt->ref_tag));
+ ql_dbg(ql_dbg_io, vha, 0x3008,
+ "LBA from user %p, lba = 0x%x for cmd=%p.\n",
+ spt, (int)spt->ref_tag, cmd);
pkt->ref_tag = swab32(spt->ref_tag);
pkt->app_tag_mask[0] = 0x0;
pkt->app_tag_mask[1] = 0x0;
@@ -789,11 +791,11 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
break;
}
- DEBUG18(printk(KERN_DEBUG
- "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
- " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
- " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
- (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
+ ql_dbg(ql_dbg_io, vha, 0x3009,
+ "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
+ "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
+ pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
+ scsi_get_prot_type(cmd), cmd);
}
@@ -809,6 +811,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint32_t *cur_dsd = dsd;
int i;
uint16_t used_dsds = tot_dsds;
+ scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
uint8_t *cp;
@@ -853,9 +856,10 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
cur_dsd = (uint32_t *)next_dsd;
}
sle_dma = sg_dma_address(sg);
- DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
- " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
- MSD(sle_dma), sg_dma_len(sg)));
+ ql_dbg(ql_dbg_io, vha, 0x300a,
+ "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
+ cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
+ sp->cmd);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
@@ -863,8 +867,8 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
- DEBUG18(printk("%s(): User Data buffer= %p:\n",
- __func__ , cp));
+ ql_dbg(ql_dbg_io, vha, 0x300b,
+ "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
}
}
/* Null termination */
@@ -888,7 +892,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
struct scsi_cmnd *cmd;
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
-
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
uint8_t *cp;
@@ -935,10 +939,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
}
sle_dma = sg_dma_address(sg);
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
- DEBUG18(printk(KERN_DEBUG
- "%s(): %p, sg entry %d - addr =0x%x"
- "0x%x, len =%d\n", __func__ , cur_dsd, i,
- LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
+ ql_dbg(ql_dbg_io, vha, 0x3027,
+ "%s(): %p, sg_entry %d - "
+ "addr=0x%x0x%x, len=%d.\n",
+ __func__, cur_dsd, i,
+ LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
}
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -946,8 +951,9 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
- DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
- __func__ , cp));
+ ql_dbg(ql_dbg_io, vha, 0x3028,
+ "%s(): Protection Data buffer = %p.\n", __func__,
+ cp);
}
avail_dsds--;
}
@@ -996,22 +1002,16 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
*((uint32_t *)(&cmd_pkt->entry_type)) =
__constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
/* No data transfer */
data_bytes = scsi_bufflen(cmd);
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
- DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
- __func__, data_bytes));
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return QLA_SUCCESS;
}
- vha = sp->fcport->vha;
- ha = vha->hw;
-
- DEBUG18(printk(KERN_DEBUG
- "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
- vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
-
cmd_pkt->vp_index = sp->fcport->vp_idx;
/* Set transfer direction */
@@ -1056,8 +1056,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
/* Determine SCSI command length -- align to 4 byte boundary */
if (cmd->cmd_len > 16) {
- DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
- __func__));
additional_fcpcdb_len = cmd->cmd_len - 16;
if ((cmd->cmd_len % 4) != 0) {
/* SCSI cmd > 16 bytes must be multiple of 4 */
@@ -1108,11 +1106,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
- DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
- "entries %d, data bytes %d, Protection entries %d\n",
- __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
- data_bytes, tot_prot_dsds));
-
/* Compute dif len and adjust data len to incude protection */
total_bytes = data_bytes;
dif_bytes = 0;
@@ -1150,14 +1143,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
additional_fcpcdb_len);
*fcp_dl = htonl(total_bytes);
- DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
- " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
- vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
- crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
-
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
- DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
- __func__, data_bytes));
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return QLA_SUCCESS;
}
@@ -1182,8 +1168,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
return QLA_SUCCESS;
crc_queuing_error:
- DEBUG18(qla_printk(KERN_INFO, ha,
- "CMD sent FAILED crc_q error:sp = %p\n", sp));
/* Cleanup will be performed by the caller */
return QLA_FUNCTION_FAILED;
@@ -1225,8 +1209,8 @@ qla24xx_start_scsi(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
- != QLA_SUCCESS)
+ if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
vha->marker_needed = 0;
}
@@ -1243,8 +1227,9 @@ qla24xx_start_scsi(srb_t *sp)
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == MAX_OUTSTANDING_COMMANDS) {
goto queuing_error;
+ }
/* Map the sg table so we have an accurate count of sg entries needed */
if (scsi_sg_count(cmd)) {
@@ -1256,8 +1241,7 @@ qla24xx_start_scsi(srb_t *sp)
nseg = 0;
tot_dsds = nseg;
-
- req_cnt = qla24xx_calc_iocbs(tot_dsds);
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
@@ -1322,7 +1306,6 @@ qla24xx_start_scsi(srb_t *sp)
/* Specify response queue number where completion should happen */
cmd_pkt->entry_status = (uint8_t) rsp->id;
wmb();
-
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
@@ -1534,9 +1517,6 @@ queuing_error:
/* Cleanup will be performed by the caller (queuecommand) */
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- DEBUG18(qla_printk(KERN_INFO, ha,
- "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
return QLA_FUNCTION_FAILED;
}
@@ -1581,8 +1561,11 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == MAX_OUTSTANDING_COMMANDS) {
+ ql_log(ql_log_warn, vha, 0x700b,
+ "No room on oustanding cmd array.\n");
goto queuing_error;
+ }
/* Prep command array. */
req->current_outstanding_cmd = handle;
@@ -1999,8 +1982,11 @@ qla2x00_start_sp(srb_t *sp)
rval = QLA_FUNCTION_FAILED;
spin_lock_irqsave(&ha->hardware_lock, flags);
pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
- if (!pkt)
+ if (!pkt) {
+ ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
+ "qla2x00_alloc_iocbs failed.\n");
goto done;
+ }
rval = QLA_SUCCESS;
switch (ctx->type) {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 1b60a95adb5..b16b7725dee 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -45,7 +45,7 @@ qla2100_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return (IRQ_NONE);
}
@@ -63,7 +63,7 @@ qla2100_intr_handler(int irq, void *dev_id)
/*
* Issue a "HARD" reset in order for the RISC interrupt
- * bit to be cleared. Schedule a big hammmer to get
+ * bit to be cleared. Schedule a big hammer to get
* out of the RISC PAUSED state.
*/
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -91,9 +91,9 @@ qla2100_intr_handler(int irq, void *dev_id)
qla2x00_async_event(vha, rsp, mb);
} else {
/*EMPTY*/
- DEBUG2(printk("scsi(%ld): Unrecognized "
- "interrupt type (%d).\n",
- vha->host_no, mb[0]));
+ ql_dbg(ql_dbg_async, vha, 0x5025,
+ "Unrecognized interrupt type (%d).\n",
+ mb[0]);
}
/* Release mailbox registers. */
WRT_REG_WORD(&reg->semaphore, 0);
@@ -142,7 +142,7 @@ qla2300_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return (IRQ_NONE);
}
@@ -160,16 +160,18 @@ qla2300_intr_handler(int irq, void *dev_id)
hccr = RD_REG_WORD(&reg->hccr);
if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
- qla_printk(KERN_INFO, ha, "Parity error -- "
- "HCCR=%x, Dumping firmware!\n", hccr);
+ ql_log(ql_log_warn, vha, 0x5026,
+ "Parity error -- HCCR=%x, Dumping "
+ "firmware.\n", hccr);
else
- qla_printk(KERN_INFO, ha, "RISC paused -- "
- "HCCR=%x, Dumping firmware!\n", hccr);
+ ql_log(ql_log_warn, vha, 0x5027,
+ "RISC paused -- HCCR=%x, Dumping "
+ "firmware.\n", hccr);
/*
* Issue a "HARD" reset in order for the RISC
* interrupt bit to be cleared. Schedule a big
- * hammmer to get out of the RISC PAUSED state.
+ * hammer to get out of the RISC PAUSED state.
*/
WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
RD_REG_WORD(&reg->hccr);
@@ -213,9 +215,8 @@ qla2300_intr_handler(int irq, void *dev_id)
qla2x00_async_event(vha, rsp, mb);
break;
default:
- DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
- "(%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_async, vha, 0x5028,
+ "Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -262,11 +263,11 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
}
if (ha->mcp) {
- DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
- __func__, vha->host_no, ha->mcp->mb[0]));
+ ql_dbg(ql_dbg_async, vha, 0x5000,
+ "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
} else {
- DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x5001,
+ "MBX pointer ERROR.\n");
}
}
@@ -285,22 +286,24 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
mb[cnt] = RD_REG_WORD(wptr);
- DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
- "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no,
- event[aen & 0xff],
- mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]));
+ ql_dbg(ql_dbg_async, vha, 0x5021,
+ "Inter-Driver Commucation %s -- "
+ "%04x %04x %04x %04x %04x %04x %04x.\n",
+ event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
+ mb[4], mb[5], mb[6]);
/* Acknowledgement needed? [Notify && non-zero timeout]. */
timeout = (descr >> 8) & 0xf;
if (aen != MBA_IDC_NOTIFY || !timeout)
return;
- DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
- "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout));
+ ql_dbg(ql_dbg_async, vha, 0x5022,
+ "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
+ vha->host_no, event[aen & 0xff], timeout);
rval = qla2x00_post_idc_ack_work(vha, mb);
if (rval != QLA_SUCCESS)
- qla_printk(KERN_WARNING, vha->hw,
+ ql_log(ql_log_warn, vha, 0x5023,
"IDC failed to post ACK.\n");
}
@@ -393,15 +396,15 @@ skip_rio:
break;
case MBA_RESET: /* Reset */
- DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x5002,
+ "Asynchronous RESET.\n");
set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
break;
case MBA_SYSTEM_ERR: /* System Error */
mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_warn, vha, 0x5003,
"ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
"mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
@@ -409,7 +412,7 @@ skip_rio:
if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) {
- qla_printk(KERN_ERR, ha,
+ ql_log(ql_log_fatal, vha, 0x5004,
"Unrecoverable Hardware Error: adapter "
"marked OFFLINE!\n");
vha->flags.online = 0;
@@ -422,7 +425,7 @@ skip_rio:
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
} else if (mb[1] == 0) {
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_fatal, vha, 0x5005,
"Unrecoverable Hardware Error: adapter marked "
"OFFLINE!\n");
vha->flags.online = 0;
@@ -431,31 +434,27 @@ skip_rio:
break;
case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
- DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n",
- vha->host_no, mb[1]));
- qla_printk(KERN_WARNING, ha,
- "ISP Request Transfer Error (%x).\n", mb[1]);
+ ql_log(ql_log_warn, vha, 0x5006,
+ "ISP Request Transfer Error (%x).\n", mb[1]);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
- DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
+ ql_log(ql_log_warn, vha, 0x5007,
+ "ISP Response Transfer Error.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
- DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x5008,
+ "Asynchronous WAKEUP_THRES.\n");
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
- DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
- mb[1]));
- qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
+ ql_log(ql_log_info, vha, 0x5009,
+ "LIP occurred (%x).\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -488,10 +487,8 @@ skip_rio:
ha->link_data_rate = mb[1];
}
- DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
- vha->host_no, link_speed));
- qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
- link_speed);
+ ql_log(ql_log_info, vha, 0x500a,
+ "LOOP UP detected (%s Gbps).\n", link_speed);
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -500,12 +497,9 @@ skip_rio:
case MBA_LOOP_DOWN: /* Loop Down Event */
mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
- DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
- "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3],
- mbx));
- qla_printk(KERN_INFO, ha,
- "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
- mbx);
+ ql_log(ql_log_info, vha, 0x500b,
+ "LOOP DOWN detected (%x %x %x %x).\n",
+ mb[1], mb[2], mb[3], mbx);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -525,9 +519,7 @@ skip_rio:
break;
case MBA_LIP_RESET: /* LIP reset occurred */
- DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
- vha->host_no, mb[1]));
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x500c,
"LIP reset occurred (%x).\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -554,14 +546,15 @@ skip_rio:
break;
if (IS_QLA8XXX_TYPE(ha)) {
- DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
- "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x500d,
+ "DCBX Completed -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
if (ha->notify_dcbx_comp)
complete(&ha->dcbx_comp);
} else
- DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
- "received.\n", vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x500e,
+ "Asynchronous P2P MODE received.\n");
/*
* Until there's a transition from loop down to loop up, treat
@@ -594,10 +587,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
- "received.\n",
- vha->host_no));
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_info, vha, 0x500f,
"Configuration change detected: value=%x.\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -640,11 +630,9 @@ skip_rio:
/* Global event -- port logout or port unavailable. */
if (mb[1] == 0xffff && mb[2] == 0x7) {
- DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
- vha->host_no));
- DEBUG(printk(KERN_INFO
- "scsi(%ld): Port unavailable %04x %04x %04x.\n",
- vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5010,
+ "Port unavailable %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -674,17 +662,15 @@ skip_rio:
atomic_set(&vha->loop_down_timer, 0);
if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
atomic_read(&vha->loop_state) != LOOP_DEAD) {
- DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
- "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
- mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5011,
+ "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
+ mb[1], mb[2], mb[3]);
break;
}
- DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
- vha->host_no));
- DEBUG(printk(KERN_INFO
- "scsi(%ld): Port database changed %04x %04x %04x.\n",
- vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5012,
+ "Port database changed %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
/*
* Mark all devices as missing so we will login again.
@@ -707,20 +693,17 @@ skip_rio:
if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
break;
- DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
- vha->host_no));
- DEBUG(printk(KERN_INFO
- "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
- vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5013,
+ "RSCN database changed -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
| vha->d_id.b.al_pa;
if (rscn_entry == host_pid) {
- DEBUG(printk(KERN_INFO
- "scsi(%ld): Ignoring RSCN update to local host "
- "port ID (%06x)\n",
- vha->host_no, host_pid));
+ ql_dbg(ql_dbg_async, vha, 0x5014,
+ "Ignoring RSCN update to local host "
+ "port ID (%06x).\n", host_pid);
break;
}
@@ -747,8 +730,8 @@ skip_rio:
/* case MBA_RIO_RESPONSE: */
case MBA_ZIO_RESPONSE:
- DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x5015,
+ "[R|Z]IO update completion.\n");
if (IS_FWI2_CAPABLE(ha))
qla24xx_process_response_queue(vha, rsp);
@@ -757,61 +740,68 @@ skip_rio:
break;
case MBA_DISCARD_RND_FRAME:
- DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
- "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5016,
+ "Discard RND Frame -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
break;
case MBA_TRACE_NOTIFICATION:
- DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
- vha->host_no, mb[1], mb[2]));
+ ql_dbg(ql_dbg_async, vha, 0x5017,
+ "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
break;
case MBA_ISP84XX_ALERT:
- DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
- "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5018,
+ "ISP84XX Alert Notification -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
switch (mb[1]) {
case A84_PANIC_RECOVERY:
- qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
- "%04x %04x\n", mb[2], mb[3]);
+ ql_log(ql_log_info, vha, 0x5019,
+ "Alert 84XX: panic recovery %04x %04x.\n",
+ mb[2], mb[3]);
break;
case A84_OP_LOGIN_COMPLETE:
ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
- DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
- "firmware version %x\n", ha->cs84xx->op_fw_version));
+ ql_log(ql_log_info, vha, 0x501a,
+ "Alert 84XX: firmware version %x.\n",
+ ha->cs84xx->op_fw_version);
break;
case A84_DIAG_LOGIN_COMPLETE:
ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
- DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
- "diagnostic firmware version %x\n",
- ha->cs84xx->diag_fw_version));
+ ql_log(ql_log_info, vha, 0x501b,
+ "Alert 84XX: diagnostic firmware version %x.\n",
+ ha->cs84xx->diag_fw_version);
break;
case A84_GOLD_LOGIN_COMPLETE:
ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
ha->cs84xx->fw_update = 1;
- DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
- "firmware version %x\n",
- ha->cs84xx->gold_fw_version));
+ ql_log(ql_log_info, vha, 0x501c,
+ "Alert 84XX: gold firmware version %x.\n",
+ ha->cs84xx->gold_fw_version);
break;
default:
- qla_printk(KERN_ERR, ha,
- "Alert 84xx: Invalid Alert %04x %04x %04x\n",
+ ql_log(ql_log_warn, vha, 0x501d,
+ "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
}
spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
break;
case MBA_DCBX_START:
- DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
- vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x501e,
+ "DCBX Started -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
break;
case MBA_DCBX_PARAM_UPDATE:
- DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
- "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x501f,
+ "DCBX Parameters Updated -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
break;
case MBA_FCF_CONF_ERR:
- DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
- "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
+ ql_dbg(ql_dbg_async, vha, 0x5020,
+ "FCF Configuration Error -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
break;
case MBA_IDC_COMPLETE:
case MBA_IDC_NOTIFY:
@@ -838,10 +828,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
/* Validate handle. */
if (index >= MAX_OUTSTANDING_COMMANDS) {
- DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
- vha->host_no, index));
- qla_printk(KERN_WARNING, ha,
- "Invalid SCSI completion handle %d.\n", index);
+ ql_log(ql_log_warn, vha, 0x3014,
+ "Invalid SCSI command index (%x).\n", index);
if (IS_QLA82XX(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -859,10 +847,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
sp->cmd->result = DID_OK << 16;
qla2x00_sp_compl(ha, sp);
} else {
- DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
- " handle(0x%x)\n", vha->host_no, req->id, index));
- qla_printk(KERN_WARNING, ha,
- "Invalid ISP SCSI completion handle\n");
+ ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
if (IS_QLA82XX(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -882,8 +867,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
index = LSW(pkt->handle);
if (index >= MAX_OUTSTANDING_COMMANDS) {
- qla_printk(KERN_WARNING, ha,
- "%s: Invalid completion handle (%x).\n", func, index);
+ ql_log(ql_log_warn, vha, 0x5031,
+ "Invalid command index (%x).\n", index);
if (IS_QLA82XX(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
@@ -892,15 +877,13 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
}
sp = req->outstanding_cmds[index];
if (!sp) {
- qla_printk(KERN_WARNING, ha,
- "%s: Invalid completion handle (%x) -- timed-out.\n", func,
- index);
+ ql_log(ql_log_warn, vha, 0x5032,
+ "Invalid completion handle (%x) -- timed-out.\n", index);
return sp;
}
if (sp->handle != index) {
- qla_printk(KERN_WARNING, ha,
- "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle,
- index);
+ ql_log(ql_log_warn, vha, 0x5033,
+ "SRB handle (%x) mismatch %x.\n", sp->handle, index);
return NULL;
}
@@ -937,17 +920,17 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED : 0;
if (mbx->entry_status) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error entry - portid=%02x%02x%02x "
+ ql_dbg(ql_dbg_async, vha, 0x5043,
+ "Async-%s error entry - portid=%02x%02x%02x "
"entry-status=%x status=%x state-flag=%x "
"status-flags=%x.\n",
- fcport->vha->host_no, sp->handle, type,
- fcport->d_id.b.domain, fcport->d_id.b.area,
+ type, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mbx->entry_status,
le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
- le16_to_cpu(mbx->status_flags)));
+ le16_to_cpu(mbx->status_flags));
- DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
+ (uint8_t *)mbx, sizeof(*mbx));
goto logio_done;
}
@@ -957,12 +940,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
status = 0;
if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
- "mbx1=%x.\n",
- fcport->vha->host_no, sp->handle, type,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)));
+ ql_dbg(ql_dbg_async, vha, 0x5045,
+ "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
+ type, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
data[0] = MBS_COMMAND_COMPLETE;
if (ctx->type == SRB_LOGIN_CMD) {
@@ -987,14 +968,14 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
}
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x status=%x "
+ ql_log(ql_log_warn, vha, 0x5046,
+ "Async-%s failed - portid=%02x%02x%02x status=%x "
"mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
- fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
+ type, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
- le16_to_cpu(mbx->mb7)));
+ le16_to_cpu(mbx->mb7));
logio_done:
lio->done(sp);
@@ -1025,9 +1006,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type = "ct pass-through";
break;
default:
- qla_printk(KERN_WARNING, ha,
- "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
- sp_bsg->type);
+ ql_log(ql_log_warn, vha, 0x5047,
+ "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
return;
}
@@ -1045,20 +1025,20 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_job->reply->reply_payload_rcv_len =
le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld): CT pass-through-%s error "
+ ql_log(ql_log_warn, vha, 0x5048,
+ "CT pass-through-%s error "
"comp_status-status=0x%x total_byte = 0x%x.\n",
- vha->host_no, type, comp_status,
- bsg_job->reply->reply_payload_rcv_len));
+ type, comp_status,
+ bsg_job->reply->reply_payload_rcv_len);
} else {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld): CT pass-through-%s error "
- "comp_status-status=0x%x.\n",
- vha->host_no, type, comp_status));
+ ql_log(ql_log_warn, vha, 0x5049,
+ "CT pass-through-%s error "
+ "comp_status-status=0x%x.\n", type, comp_status);
bsg_job->reply->result = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
}
- DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
+ (uint8_t *)pkt, sizeof(*pkt));
} else {
bsg_job->reply->result = DID_OK << 16;
bsg_job->reply->reply_payload_rcv_len =
@@ -1110,9 +1090,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type = "ct pass-through";
break;
default:
- qla_printk(KERN_WARNING, ha,
- "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
- sp_bsg->type);
+ ql_log(ql_log_warn, vha, 0x503e,
+ "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
return;
}
@@ -1132,27 +1111,31 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_job->reply->reply_payload_rcv_len =
le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
+ ql_log(ql_log_info, vha, 0x503f,
+ "ELS-CT pass-through-%s error comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
- vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
- le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
+ type, comp_status, fw_status[1], fw_status[2],
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->total_byte_count));
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
else {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
+ ql_log(ql_log_info, vha, 0x5040,
+ "ELS-CT pass-through-%s error comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
- vha->host_no, sp->handle, type, comp_status,
- le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
- le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
+ type, comp_status,
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->error_subcode_1),
+ le16_to_cpu(((struct els_sts_entry_24xx *)
+ pkt)->error_subcode_2));
bsg_job->reply->result = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
}
- DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
+ (uint8_t *)pkt, sizeof(*pkt));
}
else {
bsg_job->reply->result = DID_OK << 16;
@@ -1201,25 +1184,24 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error entry - "
+ ql_log(ql_log_warn, vha, 0x5034,
+ "Async-%s error entry - "
"portid=%02x%02x%02x entry-status=%x.\n",
- fcport->vha->host_no, sp->handle, type,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, logio->entry_status));
- DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
+ type, fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, logio->entry_status);
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
+ (uint8_t *)logio, sizeof(*logio));
goto logio_done;
}
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
- DEBUG2(printk(KERN_DEBUG
- "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
+ ql_dbg(ql_dbg_async, vha, 0x5036,
+ "Async-%s complete - portid=%02x%02x%02x "
"iop0=%x.\n",
- fcport->vha->host_no, sp->handle, type,
- fcport->d_id.b.domain, fcport->d_id.b.area,
+ type, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa,
- le32_to_cpu(logio->io_parameter[0])));
+ le32_to_cpu(logio->io_parameter[0]));
data[0] = MBS_COMMAND_COMPLETE;
if (ctx->type != SRB_LOGIN_CMD)
@@ -1256,14 +1238,14 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
}
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x comp=%x "
+ ql_dbg(ql_dbg_async, vha, 0x5037,
+ "Async-%s failed - portid=%02x%02x%02x comp=%x "
"iop0=%x iop1=%x.\n",
- fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
+ type, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
- le32_to_cpu(logio->io_parameter[1])));
+ le32_to_cpu(logio->io_parameter[1]));
logio_done:
lio->done(sp);
@@ -1292,38 +1274,34 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
fcport = sp->fcport;
if (sts->entry_status) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error - entry-status(%x).\n",
- fcport->vha->host_no, sp->handle, type,
- sts->entry_status));
+ ql_log(ql_log_warn, vha, 0x5038,
+ "Async-%s error - entry-status(%x).\n",
+ type, sts->entry_status);
} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error - completion status(%x).\n",
- fcport->vha->host_no, sp->handle, type,
- sts->comp_status));
+ ql_log(ql_log_warn, vha, 0x5039,
+ "Async-%s error - completion status(%x).\n",
+ type, sts->comp_status);
} else if (!(le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error - no response info(%x).\n",
- fcport->vha->host_no, sp->handle, type,
- sts->scsi_status));
+ ql_log(ql_log_warn, vha, 0x503a,
+ "Async-%s error - no response info(%x).\n",
+ type, sts->scsi_status);
} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error - not enough response(%d).\n",
- fcport->vha->host_no, sp->handle, type,
- sts->rsp_data_len));
+ ql_log(ql_log_warn, vha, 0x503b,
+ "Async-%s error - not enough response(%d).\n",
+ type, sts->rsp_data_len);
} else if (sts->data[3]) {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld:%x): Async-%s error - response(%x).\n",
- fcport->vha->host_no, sp->handle, type,
- sts->data[3]));
+ ql_log(ql_log_warn, vha, 0x503c,
+ "Async-%s error - response(%x).\n",
+ type, sts->data[3]);
} else {
error = 0;
}
if (error) {
iocb->u.tmf.data = error;
- DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts)));
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
+ (uint8_t *)sts, sizeof(*sts));
}
iocb->done(sp);
@@ -1360,8 +1338,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
if (pkt->entry_status != 0) {
- DEBUG3(printk(KERN_INFO
- "scsi(%ld): Process error entry.\n", vha->host_no));
+ ql_log(ql_log_warn, vha, 0x5035,
+ "Process error entry.\n");
qla2x00_error_entry(vha, rsp, pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1399,10 +1377,10 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
break;
default:
/* Type Not Supported. */
- DEBUG4(printk(KERN_WARNING
- "scsi(%ld): Received unknown response pkt type %x "
+ ql_log(ql_log_warn, vha, 0x504a,
+ "Received unknown response pkt type %x "
"entry status=%x.\n",
- vha->host_no, pkt->entry_type, pkt->entry_status));
+ pkt->entry_type, pkt->entry_status);
break;
}
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1418,6 +1396,7 @@ static inline void
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
uint32_t sense_len, struct rsp_que *rsp)
{
+ struct scsi_qla_host *vha = sp->fcport->vha;
struct scsi_cmnd *cp = sp->cmd;
if (sense_len >= SCSI_SENSE_BUFFERSIZE)
@@ -1435,11 +1414,13 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
if (sp->request_sense_length != 0)
rsp->status_srb = sp;
- DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
- "cmd=%p\n", __func__, sp->fcport->vha->host_no,
- cp->device->channel, cp->device->id, cp->device->lun, cp));
+ ql_dbg(ql_dbg_io, vha, 0x301c,
+ "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
+ sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
+ cp->device->lun, cp);
if (sense_len)
- DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
+ cp->sense_buffer, sense_len);
}
struct scsi_dif_tuple {
@@ -1457,6 +1438,7 @@ struct scsi_dif_tuple {
static inline void
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{
+ struct scsi_qla_host *vha = sp->fcport->vha;
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_dif_tuple *ep =
(struct scsi_dif_tuple *)&sts24->data[20];
@@ -1473,15 +1455,15 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
e_guard = be16_to_cpu(ep->guard);
a_guard = be16_to_cpu(ap->guard);
- DEBUG18(printk(KERN_DEBUG
- "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24));
+ ql_dbg(ql_dbg_io, vha, 0x3023,
+ "iocb(s) %p Returned STATUS.\n", sts24);
- DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
+ ql_dbg(ql_dbg_io, vha, 0x3024,
+ "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
" tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
- " tag=0x%x, act guard=0x%x, exp guard=0x%x\n",
+ " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
- a_app_tag, e_app_tag, a_guard, e_guard));
-
+ a_app_tag, e_app_tag, a_guard, e_guard);
/* check guard */
if (e_guard != a_guard) {
@@ -1569,9 +1551,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sp = NULL;
if (sp == NULL) {
- qla_printk(KERN_WARNING, ha,
- "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no,
- sts->handle);
+ ql_log(ql_log_warn, vha, 0x3017,
+ "Invalid status handle (0x%x).\n", sts->handle);
if (IS_QLA82XX(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1582,9 +1563,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
cp = sp->cmd;
if (cp == NULL) {
- qla_printk(KERN_WARNING, ha,
- "scsi(%ld): Command already returned (0x%x/%p).\n",
- vha->host_no, sts->handle, sp);
+ ql_log(ql_log_warn, vha, 0x3018,
+ "Command already returned (0x%x/%p).\n",
+ sts->handle, sp);
return;
}
@@ -1629,10 +1610,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
par_sense_len -= rsp_info_len;
}
if (rsp_info_len > 3 && rsp_info[3]) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): FCP I/O protocol failure "
- "(0x%x/0x%x).\n", vha->host_no, cp->device->id,
- cp->device->lun, rsp_info_len, rsp_info[3]));
+ ql_log(ql_log_warn, vha, 0x3019,
+ "FCP I/O protocol failure (0x%x/0x%x).\n",
+ rsp_info_len, rsp_info[3]);
cp->result = DID_BUS_BUSY << 16;
goto out;
@@ -1661,11 +1641,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): Mid-layer underflow "
+ ql_log(ql_log_warn, vha, 0x301a,
+ "Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
- vha->host_no, cp->device->id,
- cp->device->lun, resid, scsi_bufflen(cp));
+ resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
@@ -1674,9 +1653,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
cp->result = DID_OK << 16 | lscsi_status;
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
- vha->host_no, cp->device->id, cp->device->lun));
+ ql_log(ql_log_warn, vha, 0x301b,
+ "QUEUE FULL detected.\n");
break;
}
logit = 0;
@@ -1697,11 +1675,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
scsi_set_resid(cp, resid);
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) Dropped frame(s) detected "
- "(0x%x of 0x%x bytes).\n", vha->host_no,
- cp->device->id, cp->device->lun, resid,
- scsi_bufflen(cp)));
+ ql_log(ql_log_warn, vha, 0x301d,
+ "Dropped frame(s) detected "
+ "(0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
break;
@@ -1710,20 +1687,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): Mid-layer underflow "
+ ql_log(ql_log_warn, vha, 0x301e,
+ "Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
- vha->host_no, cp->device->id,
- cp->device->lun, resid, scsi_bufflen(cp));
+ resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16;
break;
}
} else {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
- "of 0x%x bytes).\n", vha->host_no, cp->device->id,
- cp->device->lun, resid, scsi_bufflen(cp)));
+ ql_log(ql_log_warn, vha, 0x301f,
+ "Dropped frame(s) detected (0x%x "
+ "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
@@ -1739,10 +1714,8 @@ check_scsi_status:
*/
if (lscsi_status != 0) {
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
- vha->host_no, cp->device->id,
- cp->device->lun));
+ ql_log(ql_log_warn, vha, 0x3020,
+ "QUEUE FULL detected.\n");
logit = 1;
break;
}
@@ -1781,10 +1754,9 @@ check_scsi_status:
break;
}
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n",
- vha->host_no, cp->device->id, cp->device->lun,
- atomic_read(&fcport->state)));
+ ql_dbg(ql_dbg_io, vha, 0x3021,
+ "Port down status: port-state=0x%x.\n",
+ atomic_read(&fcport->state));
if (atomic_read(&fcport->state) == FCS_ONLINE)
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -1804,15 +1776,13 @@ check_scsi_status:
out:
if (logit)
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
- "portid=%02x%02x%02x oxid=0x%x cdb=%02x%02x%02x len=0x%x "
- "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
- cp->device->id, cp->device->lun, comp_status, scsi_status,
- cp->result, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, ox_id, cp->cmnd[0], cp->cmnd[1],
- cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len,
- fw_resid_len));
+ ql_dbg(ql_dbg_io, vha, 0x3022,
+ "FCP command status: 0x%x-0x%x (0x%x) "
+ "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
+ "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
+ comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
+ cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
+ resid_len, fw_resid_len);
if (rsp->status_srb == NULL)
qla2x00_sp_compl(ha, sp);
@@ -1830,16 +1800,15 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
{
uint8_t sense_sz = 0;
struct qla_hw_data *ha = rsp->hw;
+ struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
srb_t *sp = rsp->status_srb;
struct scsi_cmnd *cp;
if (sp != NULL && sp->request_sense_length != 0) {
cp = sp->cmd;
if (cp == NULL) {
- DEBUG2(printk("%s(): Cmd already returned back to OS "
- "sp=%p.\n", __func__, sp));
- qla_printk(KERN_INFO, ha,
- "cmd is NULL: already returned to OS (sp=%p)\n",
+ ql_log(ql_log_warn, vha, 0x3025,
+ "cmd is NULL: already returned to OS (sp=%p).\n",
sp);
rsp->status_srb = NULL;
@@ -1856,7 +1825,8 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
if (IS_FWI2_CAPABLE(ha))
host_to_fcp_swap(pkt->data, sizeof(pkt->data));
memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
- DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
+ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
+ sp->request_sense_ptr, sense_sz);
sp->request_sense_ptr += sense_sz;
sp->request_sense_length -= sense_sz;
@@ -1882,21 +1852,25 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
uint32_t handle = LSW(pkt->handle);
uint16_t que = MSW(pkt->handle);
struct req_que *req = ha->req_q_map[que];
-#if defined(QL_DEBUG_LEVEL_2)
+
if (pkt->entry_status & RF_INV_E_ORDER)
- qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
+ ql_dbg(ql_dbg_async, vha, 0x502a,
+ "Invalid Entry Order.\n");
else if (pkt->entry_status & RF_INV_E_COUNT)
- qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
+ ql_dbg(ql_dbg_async, vha, 0x502b,
+ "Invalid Entry Count.\n");
else if (pkt->entry_status & RF_INV_E_PARAM)
- qla_printk(KERN_ERR, ha,
- "%s: Invalid Entry Parameter\n", __func__);
+ ql_dbg(ql_dbg_async, vha, 0x502c,
+ "Invalid Entry Parameter.\n");
else if (pkt->entry_status & RF_INV_E_TYPE)
- qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
+ ql_dbg(ql_dbg_async, vha, 0x502d,
+ "Invalid Entry Type.\n");
else if (pkt->entry_status & RF_BUSY)
- qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
+ ql_dbg(ql_dbg_async, vha, 0x502e,
+ "Busy.\n");
else
- qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
-#endif
+ ql_dbg(ql_dbg_async, vha, 0x502f,
+ "UNKNOWN flag error.\n");
/* Validate handle. */
if (handle < MAX_OUTSTANDING_COMMANDS)
@@ -1923,10 +1897,8 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
|| pkt->entry_type == COMMAND_TYPE_6) {
- DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha,
- "Error entry - invalid handle\n");
+ ql_log(ql_log_warn, vha, 0x5030,
+ "Error entry - invalid handle.\n");
if (IS_QLA82XX(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
@@ -1960,11 +1932,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
}
if (ha->mcp) {
- DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
- __func__, vha->host_no, ha->mcp->mb[0]));
+ ql_dbg(ql_dbg_async, vha, 0x504d,
+ "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
} else {
- DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x504e,
+ "MBX pointer ERROR.\n");
}
}
@@ -1993,8 +1965,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
if (pkt->entry_status != 0) {
- DEBUG3(printk(KERN_INFO
- "scsi(%ld): Process error entry.\n", vha->host_no));
+ ql_dbg(ql_dbg_async, vha, 0x5029,
+ "Process error entry.\n");
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2030,10 +2002,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
break;
default:
/* Type Not Supported. */
- DEBUG4(printk(KERN_WARNING
- "scsi(%ld): Received unknown response pkt type %x "
+ ql_dbg(ql_dbg_async, vha, 0x5042,
+ "Received unknown response pkt type %x "
"entry status=%x.\n",
- vha->host_no, pkt->entry_type, pkt->entry_status));
+ pkt->entry_type, pkt->entry_status);
break;
}
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2088,7 +2060,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
next_test:
if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
- qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
+ ql_log(ql_log_info, vha, 0x504c,
+ "Additional code -- 0x55AA.\n");
done:
WRT_REG_DWORD(&reg->iobase_window, 0x0000);
@@ -2121,7 +2094,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
@@ -2142,8 +2115,9 @@ qla24xx_intr_handler(int irq, void *dev_id)
hccr = RD_REG_DWORD(&reg->hccr);
- qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
- "Dumping firmware!\n", hccr);
+ ql_log(ql_log_warn, vha, 0x504b,
+ "RISC paused -- HCCR=%x, Dumping firmware.\n",
+ hccr);
qla2xxx_check_risc_status(vha);
@@ -2174,9 +2148,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
default:
- DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
- "(%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_async, vha, 0x504f,
+ "Unrecognized interrupt type (%d).\n", stat * 0xff);
break;
}
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2205,7 +2178,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2235,7 +2208,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2268,8 +2241,8 @@ qla24xx_msix_default(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- DEBUG(printk(
- "%s(): NULL response queue pointer\n", __func__));
+ printk(KERN_INFO
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2286,8 +2259,9 @@ qla24xx_msix_default(int irq, void *dev_id)
hccr = RD_REG_DWORD(&reg->hccr);
- qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
- "Dumping firmware!\n", hccr);
+ ql_log(ql_log_info, vha, 0x5050,
+ "RISC paused -- HCCR=%x, Dumping firmware.\n",
+ hccr);
qla2xxx_check_risc_status(vha);
@@ -2318,9 +2292,8 @@ qla24xx_msix_default(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
default:
- DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
- "(%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_async, vha, 0x5051,
+ "Unrecognized interrupt type (%d).\n", stat & 0xff);
break;
}
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -2358,6 +2331,7 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
{
int i;
struct qla_msix_entry *qentry;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
@@ -2368,6 +2342,8 @@ qla24xx_disable_msix(struct qla_hw_data *ha)
kfree(ha->msix_entries);
ha->msix_entries = NULL;
ha->flags.msix_enabled = 0;
+ ql_dbg(ql_dbg_init, vha, 0x0042,
+ "Disabled the MSI.\n");
}
static int
@@ -2377,11 +2353,15 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
int i, ret;
struct msix_entry *entries;
struct qla_msix_entry *qentry;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
GFP_KERNEL);
- if (!entries)
+ if (!entries) {
+ ql_log(ql_log_warn, vha, 0x00bc,
+ "Failed to allocate memory for msix_entry.\n");
return -ENOMEM;
+ }
for (i = 0; i < ha->msix_count; i++)
entries[i].entry = i;
@@ -2391,16 +2371,18 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
if (ret < MIN_MSIX_COUNT)
goto msix_failed;
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Failed to enable support -- %d/%d\n"
- " Retry with %d vectors\n", ha->msix_count, ret, ret);
+ ql_log(ql_log_warn, vha, 0x00c6,
+ "MSI-X: Failed to enable support "
+ "-- %d/%d\n Retry with %d vectors.\n",
+ ha->msix_count, ret, ret);
ha->msix_count = ret;
ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
if (ret) {
msix_failed:
- qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
- " support, giving up -- %d/%d\n",
- ha->msix_count, ret);
+ ql_log(ql_log_fatal, vha, 0x00c7,
+ "MSI-X: Failed to enable support, "
+ "giving up -- %d/%d.\n",
+ ha->msix_count, ret);
goto msix_out;
}
ha->max_rsp_queues = ha->msix_count - 1;
@@ -2408,6 +2390,8 @@ msix_failed:
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
if (!ha->msix_entries) {
+ ql_log(ql_log_fatal, vha, 0x00c8,
+ "Failed to allocate memory for ha->msix_entries.\n");
ret = -ENOMEM;
goto msix_out;
}
@@ -2434,9 +2418,9 @@ msix_failed:
0, msix_entries[i].name, rsp);
}
if (ret) {
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Unable to register handler -- %x/%d.\n",
- qentry->vector, ret);
+ ql_log(ql_log_fatal, vha, 0x00cb,
+ "MSI-X: unable to register handler -- %x/%d.\n",
+ qentry->vector, ret);
qla24xx_disable_msix(ha);
ha->mqenable = 0;
goto msix_out;
@@ -2449,6 +2433,12 @@ msix_failed:
/* Enable MSI-X vector for response queue update for queue 0 */
if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
ha->mqenable = 1;
+ ql_dbg(ql_dbg_multiq, vha, 0xc005,
+ "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
+ ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
+ ql_dbg(ql_dbg_init, vha, 0x0055,
+ "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
+ ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
msix_out:
kfree(entries);
@@ -2460,6 +2450,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
{
int ret;
device_reg_t __iomem *reg = ha->iobase;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
@@ -2470,30 +2461,30 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
(ha->pdev->subsystem_device == 0x7040 ||
ha->pdev->subsystem_device == 0x7041 ||
ha->pdev->subsystem_device == 0x1705)) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
+ ql_log(ql_log_warn, vha, 0x0034,
+ "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
ha->pdev->subsystem_vendor,
- ha->pdev->subsystem_device));
+ ha->pdev->subsystem_device);
goto skip_msi;
}
if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
!QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
- ha->pdev->revision, ha->fw_attributes));
+ ql_log(ql_log_warn, vha, 0x0035,
+ "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
+ ha->pdev->revision, ha->fw_attributes);
goto skip_msix;
}
ret = qla24xx_enable_msix(ha, rsp);
if (!ret) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
- ha->fw_attributes));
+ ql_dbg(ql_dbg_init, vha, 0x0036,
+ "MSI-X: Enabled (0x%X, 0x%X).\n",
+ ha->chip_revision, ha->fw_attributes);
goto clear_risc_ints;
}
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
+ ql_log(ql_log_info, vha, 0x0037,
+ "MSI-X Falling back-to MSI mode -%d.\n", ret);
skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
@@ -2502,18 +2493,19 @@ skip_msix:
ret = pci_enable_msi(ha->pdev);
if (!ret) {
- DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
+ ql_dbg(ql_dbg_init, vha, 0x0038,
+ "MSI: Enabled.\n");
ha->flags.msi_enabled = 1;
} else
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
+ ql_log(ql_log_warn, vha, 0x0039,
+ "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
skip_msi:
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
ha->flags.msi_enabled ? 0 : IRQF_SHARED,
QLA2XXX_DRIVER_NAME, rsp);
if (ret) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x003a,
"Failed to reserve interrupt %d already in use.\n",
ha->pdev->irq);
goto fail;
@@ -2563,13 +2555,14 @@ int qla25xx_request_irq(struct rsp_que *rsp)
struct qla_hw_data *ha = rsp->hw;
struct qla_init_msix_entry *intr = &msix_entries[2];
struct qla_msix_entry *msix = rsp->msix;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
int ret;
ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
if (ret) {
- qla_printk(KERN_WARNING, ha,
- "MSI-X: Unable to register handler -- %x/%d.\n",
- msix->vector, ret);
+ ql_log(ql_log_fatal, vha, 0x00e6,
+ "MSI-X: Unable to register handler -- %x/%d.\n",
+ msix->vector, ret);
return ret;
}
msix->have_irq = 1;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index c26f0acdfec..f7604ea1af8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -46,14 +46,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- if (ha->pdev->error_state > pci_channel_io_frozen)
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
+
+ if (ha->pdev->error_state > pci_channel_io_frozen) {
+ ql_log(ql_log_warn, base_vha, 0x1001,
+ "error_state is greater than pci_channel_io_frozen, "
+ "exiting.\n");
return QLA_FUNCTION_TIMEOUT;
+ }
if (vha->device_flags & DFLG_DEV_FAILED) {
- DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
- "%s(%ld): Device in failed state, "
- "timeout MBX Exiting.\n",
- __func__, base_vha->host_no));
+ ql_log(ql_log_warn, base_vha, 0x1002,
+ "Device in failed state, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -63,17 +67,18 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_SUCCESS;
abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
- DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
if (ha->flags.pci_channel_io_perm_failure) {
- DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX "
- "Exiting.\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, base_vha, 0x1003,
+ "Perm failure on EEH timeout MBX, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
if (ha->flags.isp82xx_fw_hung) {
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+ ql_log(ql_log_warn, base_vha, 0x1004,
+ "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
rval = QLA_FUNCTION_FAILED;
goto premature_exit;
}
@@ -85,8 +90,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
*/
if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
/* Timeout occurred. Return error. */
- DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
- "Exiting.\n", __func__, base_vha->host_no));
+ ql_log(ql_log_warn, base_vha, 0x1005,
+ "Cmd access timeout, Exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -94,8 +99,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Save mailbox command for debug */
ha->mcp = mcp;
- DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n",
- base_vha->host_no, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
+ "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -123,27 +128,30 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
iptr++;
}
-#if defined(QL_DEBUG_LEVEL_1)
- printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n",
- __func__, base_vha->host_no);
- qla2x00_dump_buffer((uint8_t *)mcp->mb, 16);
- printk("\n");
- qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16);
- printk("\n");
- qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8);
- printk("\n");
- printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no,
- optr);
- qla2x00_dump_regs(base_vha);
-#endif
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
+ "Loaded MBX registers (displayed in bytes) =.\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
+ (uint8_t *)mcp->mb, 16);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
+ ".\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
+ ((uint8_t *)mcp->mb + 0x10), 16);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
+ ".\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
+ ((uint8_t *)mcp->mb + 0x20), 8);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
+ "I/O Address = %p.\n", optr);
+ ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
/* Issue set host interrupt command to send cmd out. */
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
/* Unlock mbx registers and wait for interrupt */
- DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. "
- "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
+ "Going to unlock irq & waiting for interrupts. "
+ "jiffies=%lx.\n", jiffies);
/* Wait for mbx cmd completion until timeout */
@@ -155,9 +163,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
- DEBUG2_3_11(printk(KERN_INFO
- "%s(%ld): Pending Mailbox timeout. "
- "Exiting.\n", __func__, base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
+ "Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
@@ -173,17 +180,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
} else {
- DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
- base_vha->host_no, command));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
+ "Cmd=%x Polling Mode.\n", command);
if (IS_QLA82XX(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
- DEBUG2_3_11(printk(KERN_INFO
- "%s(%ld): Pending Mailbox timeout. "
- "Exiting.\n", __func__, base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
+ "Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
@@ -207,17 +213,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
command == MBC_LOAD_RISC_RAM_EXTENDED))
msleep(10);
} /* while */
- DEBUG17(qla_printk(KERN_WARNING, ha,
- "Waited %d sec\n",
- (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
+ "Waited %d sec.\n",
+ (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
}
/* Check whether we timed out */
if (ha->flags.mbox_int) {
uint16_t *iptr2;
- DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__,
- base_vha->host_no, command));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
+ "Cmd=%x completed.\n", command);
/* Got interrupt. Clear the flag. */
ha->flags.mbox_int = 0;
@@ -229,6 +235,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ha->mcp = NULL;
rval = QLA_FUNCTION_FAILED;
+ ql_log(ql_log_warn, base_vha, 0x1015,
+ "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
goto premature_exit;
}
@@ -249,8 +257,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
}
} else {
-#if defined(QL_DEBUG_LEVEL_2) || defined(QL_DEBUG_LEVEL_3) || \
- defined(QL_DEBUG_LEVEL_11)
uint16_t mb0;
uint32_t ictrl;
@@ -261,14 +267,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
ictrl = RD_REG_WORD(&reg->isp.ictrl);
}
- printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n",
- __func__, base_vha->host_no, command);
- printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__,
- base_vha->host_no, ictrl, jiffies);
- printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__,
- base_vha->host_no, mb0);
- qla2x00_dump_regs(base_vha);
-#endif
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
+ "MBX Command timeout for cmd %x.\n", command);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
+ "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
+ "mb[0] = 0x%x.\n", mb0);
+ ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
rval = QLA_FUNCTION_TIMEOUT;
}
@@ -279,8 +284,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->mcp = NULL;
if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
- DEBUG11(printk("%s(%ld): checking for additional resp "
- "interrupt.\n", __func__, base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
+ "Checking for additional resp interrupt.\n");
/* polling mode for non isp_abort commands. */
qla2x00_poll(ha->rsp_q_map[0]);
@@ -291,38 +296,32 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
ha->flags.eeh_busy) {
/* not in dpc. schedule it for dpc to take over. */
- DEBUG(printk("%s(%ld): timeout schedule "
- "isp_abort_needed.\n", __func__,
- base_vha->host_no));
- DEBUG2_3_11(printk("%s(%ld): timeout schedule "
- "isp_abort_needed.\n", __func__,
- base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
+ "Timeout, schedule isp_abort_needed.\n");
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- qla_printk(KERN_WARNING, ha,
- "Mailbox command timeout occurred. "
- "Scheduling ISP " "abort. eeh_busy: 0x%x\n",
- ha->flags.eeh_busy);
+ ql_log(ql_log_info, base_vha, 0x101c,
+ "Mailbox cmd timeout occured. "
+ "Scheduling ISP abort eeh_busy=0x%x.\n",
+ ha->flags.eeh_busy);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
} else if (!abort_active) {
/* call abort directly since we are in the DPC thread */
- DEBUG(printk("%s(%ld): timeout calling abort_isp\n",
- __func__, base_vha->host_no));
- DEBUG2_3_11(printk("%s(%ld): timeout calling "
- "abort_isp\n", __func__, base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
+ "Timeout, calling abort_isp.\n");
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- qla_printk(KERN_WARNING, ha,
- "Mailbox command timeout occurred. "
- "Issuing ISP abort.\n");
+ ql_log(ql_log_info, base_vha, 0x101e,
+ "Mailbox cmd timeout occured. "
+ "Scheduling ISP abort.\n");
set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -332,11 +331,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
&vha->dpc_flags);
}
clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
- DEBUG(printk("%s(%ld): finished abort_isp\n",
- __func__, vha->host_no));
- DEBUG2_3_11(printk(
- "%s(%ld): finished abort_isp\n",
- __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
+ "Finished abort_isp.\n");
}
}
}
@@ -346,12 +342,11 @@ premature_exit:
complete(&ha->mbx_cmd_comp);
if (rval) {
- DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
- "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no,
- mcp->mb[0], mcp->mb[1], mcp->mb[2], command));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
+ "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
+ mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__,
- base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
}
return rval;
@@ -366,7 +361,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -397,10 +392,10 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1023,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
}
return rval;
@@ -430,7 +425,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
mcp->out_mb = MBX_0;
@@ -461,15 +456,14 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1026,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
if (IS_FWI2_CAPABLE(ha)) {
- DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
- __func__, vha->host_no, mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1027,
+ "Done exchanges=%x.\n", mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__,
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
}
}
@@ -501,7 +495,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
mcp->out_mb = MBX_0;
@@ -535,11 +529,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
failed:
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
}
return rval;
}
@@ -565,7 +558,7 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
mcp->out_mb = MBX_0;
@@ -576,15 +569,14 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
} else {
fwopts[0] = mcp->mb[0];
fwopts[1] = mcp->mb[1];
fwopts[2] = mcp->mb[2];
fwopts[3] = mcp->mb[3];
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
}
return rval;
@@ -612,7 +604,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
mcp->mb[1] = fwopts[1];
@@ -636,11 +628,11 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1030,
+ "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
}
return rval;
@@ -668,7 +660,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
mcp->mb[1] = 0xAAAA;
@@ -695,12 +687,10 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
}
return rval;
@@ -728,7 +718,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_VERIFY_CHECKSUM;
mcp->out_mb = MBX_0;
@@ -749,11 +739,11 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
- vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ?
- (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1036,
+ "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
+ (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
}
return rval;
@@ -785,6 +775,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
+
mcp->mb[0] = MBC_IOCB_COMMAND_A64;
mcp->mb[1] = 0;
mcp->mb[2] = MSW(phys_addr);
@@ -799,14 +791,14 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
} else {
sts_entry_t *sts_entry = (sts_entry_t *) buffer;
/* Mask reserved bits. */
sts_entry->entry_status &=
IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
+ ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
}
return rval;
@@ -847,7 +839,7 @@ qla2x00_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
- DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -876,11 +868,9 @@ qla2x00_abort_command(srb_t *sp)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
}
return rval;
@@ -896,10 +886,11 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
struct req_que *req;
struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
-
l = l;
vha = fcport->vha;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
+
req = vha->hw->req_q_map[0];
rsp = req->rsp;
mcp->mb[0] = MBC_ABORT_TARGET;
@@ -919,18 +910,17 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
}
/* Issue marker IOCB. */
rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
MK_SYNC_ID);
if (rval2 != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
- "(%x).\n", __func__, vha->host_no, rval2));
+ ql_dbg(ql_dbg_mbx, vha, 0x1040,
+ "Failed to issue marker IOCB (%x).\n", rval2);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
}
return rval;
@@ -946,9 +936,10 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
struct req_que *req;
struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
-
vha = fcport->vha;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
+
req = vha->hw->req_q_map[0];
rsp = req->rsp;
mcp->mb[0] = MBC_LUN_RESET;
@@ -966,18 +957,17 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
}
/* Issue marker IOCB. */
rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
MK_SYNC_ID_LUN);
if (rval2 != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
- "(%x).\n", __func__, vha->host_no, rval2));
+ ql_dbg(ql_dbg_mbx, vha, 0x1044,
+ "Failed to issue marker IOCB (%x).\n", rval2);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
}
return rval;
@@ -1011,8 +1001,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
mcp->mb[9] = vha->vp_idx;
@@ -1038,11 +1027,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
if (IS_QLA8XXX_TYPE(vha->hw)) {
vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1083,8 +1070,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_RETRY_COUNT;
mcp->out_mb = MBX_0;
@@ -1095,8 +1081,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n",
- vha->host_no, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x104a,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
/* Convert returned data and check our values. */
*r_a_tov = mcp->mb[3] / 2;
@@ -1107,8 +1093,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
*tov = ratov;
}
- DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d "
- "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov));
+ ql_dbg(ql_dbg_mbx, vha, 0x104b,
+ "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
}
return rval;
@@ -1139,8 +1125,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
if (IS_QLA82XX(ha) && ql2xdbwr)
qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1174,13 +1159,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x "
- "mb0=%x.\n",
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x104d,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
}
return rval;
@@ -1213,13 +1196,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
dma_addr_t pd_dma;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
pd24 = NULL;
pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Port Database "
- "structure.\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x1050,
+ "Failed to allocate port database structure.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -1261,12 +1244,10 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
/* Check for logged in state. */
if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
pd24->last_login_state != PDS_PRLI_COMPLETE) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld): Unable to verify login-state (%x/%x) "
- " - portid=%02x%02x%02x.\n", vha->host_no,
- pd24->current_login_state, pd24->last_login_state,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_mbx, vha, 0x1051,
+ "Unable to verify login-state (%x/%x) for "
+ "loop_id %x.\n", pd24->current_login_state,
+ pd24->last_login_state, fcport->loop_id);
rval = QLA_FUNCTION_FAILED;
goto gpd_error_out;
}
@@ -1290,12 +1271,11 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
/* Check for logged in state. */
if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "scsi(%ld): Unable to verify login-state (%x/%x) "
- " - portid=%02x%02x%02x.\n", vha->host_no,
- pd->master_state, pd->slave_state,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa));
+ ql_dbg(ql_dbg_mbx, vha, 0x100a,
+ "Unable to verify login-state (%x/%x) - "
+ "portid=%02x%02x%02x.\n", pd->master_state,
+ pd->slave_state, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
rval = QLA_FUNCTION_FAILED;
goto gpd_error_out;
}
@@ -1325,10 +1305,11 @@ gpd_error_out:
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1052,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
+ mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
}
return rval;
@@ -1357,8 +1338,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
@@ -1381,12 +1361,10 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): "
- "failed=%x.\n", vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
}
return rval;
@@ -1418,8 +1396,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_PORT_NAME;
mcp->mb[9] = vha->vp_idx;
@@ -1439,8 +1416,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
} else {
if (name != NULL) {
/* This function returns name in big endian. */
@@ -1454,8 +1430,7 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
name[7] = LSB(mcp->mb[7]);
}
- DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
}
return rval;
@@ -1483,7 +1458,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
if (IS_QLA8XXX_TYPE(vha->hw)) {
/* Logout across all FCFs. */
@@ -1517,11 +1492,10 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n",
- __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
}
return rval;
@@ -1553,12 +1527,11 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
- DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total "
- "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout,
- mcp->tov));
+ ql_dbg(ql_dbg_mbx, vha, 0x105e,
+ "Retry cnt=%d ratov=%d total tov=%d.\n",
+ vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
mcp->mb[0] = MBC_SEND_SNS_COMMAND;
mcp->mb[1] = cmd_size;
@@ -1575,13 +1548,12 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
- "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
- DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
- "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x105f,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
}
return rval;
@@ -1600,7 +1572,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
struct req_que *req;
struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
if (ha->flags.cpu_affinity_enabled)
req = ha->req_q_map[0];
@@ -1610,8 +1582,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x1062,
+ "Failed to allocate login IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1631,21 +1603,21 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
lg->vp_index = vha->vp_idx;
rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
- "(%x).\n", __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1063,
+ "Failed to issue login IOCB (%x).\n", rval);
} else if (lg->entry_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, vha->host_no,
- lg->entry_status));
+ ql_dbg(ql_dbg_mbx, vha, 0x1064,
+ "Failed to complete IOCB -- error status (%x).\n",
+ lg->entry_status);
rval = QLA_FUNCTION_FAILED;
} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
iop[0] = le32_to_cpu(lg->io_parameter[0]);
iop[1] = le32_to_cpu(lg->io_parameter[1]);
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x) ioparam=%x/%x.\n", __func__,
- vha->host_no, le16_to_cpu(lg->comp_status), iop[0],
- iop[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1065,
+ "Failed to complete IOCB -- completion status (%x) "
+ "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
+ iop[0], iop[1]);
switch (iop[0]) {
case LSC_SCODE_PORTID_USED:
@@ -1673,7 +1645,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
break;
}
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
iop[0] = le32_to_cpu(lg->io_parameter[0]);
@@ -1728,7 +1700,7 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1771,13 +1743,12 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
rval = QLA_SUCCESS;
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x "
- "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[2]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1068,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
}
return rval;
@@ -1808,13 +1779,13 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
+
if (IS_FWI2_CAPABLE(ha))
return qla24xx_login_fabric(vha, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mb_ret, opt);
- DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
if (HAS_EXTENDED_IDS(ha))
mcp->mb[1] = fcport->loop_id;
@@ -1845,15 +1816,12 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
rval = QLA_SUCCESS;
- DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
- "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
- DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
- "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
+ ql_dbg(ql_dbg_mbx, vha, 0x106b,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
} else {
/*EMPTY*/
- DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
}
return (rval);
@@ -1870,12 +1838,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
struct req_que *req;
struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x106e,
+ "Failed to allocate logout IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1899,22 +1867,22 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
- "(%x).\n", __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x106f,
+ "Failed to issue logout IOCB (%x).\n", rval);
} else if (lg->entry_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, vha->host_no,
- lg->entry_status));
+ ql_dbg(ql_dbg_mbx, vha, 0x1070,
+ "Failed to complete IOCB -- error status (%x).\n",
+ lg->entry_status);
rval = QLA_FUNCTION_FAILED;
} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
- DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB "
- "-- completion status (%x) ioparam=%x/%x.\n", __func__,
- vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status),
+ ql_dbg(ql_dbg_mbx, vha, 0x1071,
+ "Failed to complete IOCB -- completion status (%x) "
+ "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
le32_to_cpu(lg->io_parameter[0]),
- le32_to_cpu(lg->io_parameter[1])));
+ le32_to_cpu(lg->io_parameter[1]));
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1946,8 +1914,7 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
mcp->out_mb = MBX_1|MBX_0;
@@ -1966,12 +1933,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x "
- "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1074,
+ "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
}
return rval;
@@ -1999,8 +1965,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2014,12 +1979,10 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
}
return rval;
@@ -2045,8 +2008,7 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
if (id_list == NULL)
return QLA_FUNCTION_FAILED;
@@ -2075,12 +2037,10 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n",
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
} else {
*entries = mcp->mb[1];
- DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
}
return rval;
@@ -2108,7 +2068,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
mcp->out_mb = MBX_0;
@@ -2121,14 +2081,14 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__,
- vha->host_no, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x107d,
+ "Failed mb[0]=%x.\n", mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x "
- "mb7=%x mb10=%x mb11=%x mb12=%x.\n", __func__,
- vha->host_no, mcp->mb[1], mcp->mb[2], mcp->mb[3],
- mcp->mb[6], mcp->mb[7], mcp->mb[10], mcp->mb[11],
- mcp->mb[12]));
+ ql_dbg(ql_dbg_mbx, vha, 0x107e,
+ "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
+ "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
+ mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
+ mcp->mb[11], mcp->mb[12]);
if (cur_xchg_cnt)
*cur_xchg_cnt = mcp->mb[3];
@@ -2147,7 +2107,6 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
return (rval);
}
-#if defined(QL_DEBUG_LEVEL_3)
/*
* qla2x00_get_fcal_position_map
* Get FCAL (LILP) position map using mailbox command
@@ -2172,10 +2131,12 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
dma_addr_t pmap_dma;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
+
pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
if (pmap == NULL) {
- DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x1080,
+ "Memory alloc failed.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(pmap, 0, FCAL_MAP_SIZE);
@@ -2193,10 +2154,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval == QLA_SUCCESS) {
- DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map "
- "size (%x)\n", __func__, vha->host_no, mcp->mb[0],
- mcp->mb[1], (unsigned)pmap[0]));
- DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1));
+ ql_dbg(ql_dbg_mbx, vha, 0x1081,
+ "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
+ mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
+ pmap, pmap[0] + 1);
if (pos_map)
memcpy(pos_map, pmap, FCAL_MAP_SIZE);
@@ -2204,15 +2166,13 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
}
return rval;
}
-#endif
/*
* qla2x00_get_link_status
@@ -2237,7 +2197,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
uint32_t *siter, *diter, dwords;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_LINK_STATUS;
mcp->mb[2] = MSW(stats_dma);
@@ -2266,11 +2226,12 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
if (rval == QLA_SUCCESS) {
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
- DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
- __func__, vha->host_no, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1085,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
rval = QLA_FUNCTION_FAILED;
} else {
/* Copy over data -- firmware data is LE. */
+ ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
dwords = offsetof(struct link_statistics, unused1) / 4;
siter = diter = &stats->link_fail_cnt;
while (dwords--)
@@ -2278,8 +2239,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
}
} else {
/* Failed. */
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
}
return rval;
@@ -2294,7 +2254,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
mbx_cmd_t *mcp = &mc;
uint32_t *siter, *diter, dwords;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
mcp->mb[2] = MSW(stats_dma);
@@ -2312,10 +2272,11 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
if (rval == QLA_SUCCESS) {
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
- DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
- __func__, vha->host_no, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1089,
+ "Failed mb[0]=%x.\n", mcp->mb[0]);
rval = QLA_FUNCTION_FAILED;
} else {
+ ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
/* Copy over data -- firmware data is LE. */
dwords = sizeof(struct link_statistics) / 4;
siter = diter = &stats->link_fail_cnt;
@@ -2324,8 +2285,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
}
} else {
/* Failed. */
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
}
return rval;
@@ -2345,7 +2305,7 @@ qla24xx_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2360,8 +2320,8 @@ qla24xx_abort_command(srb_t *sp)
abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
if (abt == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n",
- __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x108d,
+ "Failed to allocate abort IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(abt, 0, sizeof(struct abort_entry_24xx));
@@ -2380,20 +2340,20 @@ qla24xx_abort_command(srb_t *sp)
rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
- __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x108e,
+ "Failed to issue IOCB (%x).\n", rval);
} else if (abt->entry_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, vha->host_no,
- abt->entry_status));
+ ql_dbg(ql_dbg_mbx, vha, 0x108f,
+ "Failed to complete IOCB -- error status (%x).\n",
+ abt->entry_status);
rval = QLA_FUNCTION_FAILED;
} else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, vha->host_no,
- le16_to_cpu(abt->nport_handle)));
+ ql_dbg(ql_dbg_mbx, vha, 0x1090,
+ "Failed to complete IOCB -- completion status (%x).\n",
+ le16_to_cpu(abt->nport_handle));
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2421,19 +2381,20 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
struct req_que *req;
struct rsp_que *rsp;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
-
vha = fcport->vha;
ha = vha->hw;
req = vha->req;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
+
if (ha->flags.cpu_affinity_enabled)
rsp = ha->rsp_q_map[tag + 1];
else
rsp = req->rsp;
tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
- "IOCB.\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x1093,
+ "Failed to allocate task management IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
@@ -2457,30 +2418,30 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
sts = &tsk->p.sts;
rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB "
- "(%x).\n", __func__, vha->host_no, name, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1094,
+ "Failed to issue %s reset IOCB (%x).\n", name, rval);
} else if (sts->entry_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, vha->host_no,
- sts->entry_status));
+ ql_dbg(ql_dbg_mbx, vha, 0x1095,
+ "Failed to complete IOCB -- error status (%x).\n",
+ sts->entry_status);
rval = QLA_FUNCTION_FAILED;
} else if (sts->comp_status !=
__constant_cpu_to_le16(CS_COMPLETE)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__,
- vha->host_no, le16_to_cpu(sts->comp_status)));
+ ql_dbg(ql_dbg_mbx, vha, 0x1096,
+ "Failed to complete IOCB -- completion status (%x).\n",
+ le16_to_cpu(sts->comp_status));
rval = QLA_FUNCTION_FAILED;
} else if (le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID) {
if (le32_to_cpu(sts->rsp_data_len) < 4) {
- DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent "
- "data length -- not enough response info (%d).\n",
- __func__, vha->host_no,
- le32_to_cpu(sts->rsp_data_len)));
+ ql_dbg(ql_dbg_mbx, vha, 0x1097,
+ "Ignoring inconsistent data length -- not enough "
+ "response info (%d).\n",
+ le32_to_cpu(sts->rsp_data_len));
} else if (sts->data[3]) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- response (%x).\n", __func__,
- vha->host_no, sts->data[3]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1098,
+ "Failed to complete IOCB -- response (%x).\n",
+ sts->data[3]);
rval = QLA_FUNCTION_FAILED;
}
}
@@ -2489,10 +2450,10 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
if (rval2 != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
- "(%x).\n", __func__, vha->host_no, rval2));
+ ql_dbg(ql_dbg_mbx, vha, 0x1099,
+ "Failed to issue marker IOCB (%x).\n", rval2);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2533,7 +2494,7 @@ qla2x00_system_error(scsi_qla_host_t *vha)
if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
mcp->out_mb = MBX_0;
@@ -2543,10 +2504,9 @@ qla2x00_system_error(scsi_qla_host_t *vha)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
}
return rval;
@@ -2566,7 +2526,7 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SERDES_PARAMS;
mcp->mb[1] = BIT_0;
@@ -2581,11 +2541,11 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
if (rval != QLA_SUCCESS) {
/*EMPTY*/
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x109f,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
/*EMPTY*/
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
}
return rval;
@@ -2601,7 +2561,7 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_STOP_FIRMWARE;
mcp->out_mb = MBX_0;
@@ -2611,12 +2571,11 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
if (mcp->mb[0] == MBS_INVALID_COMMAND)
rval = QLA_INVALID_COMMAND;
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
}
return rval;
@@ -2630,14 +2589,14 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_EFT_ENABLE;
mcp->mb[2] = LSW(eft_dma);
@@ -2652,10 +2611,11 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a5,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
}
return rval;
@@ -2668,14 +2628,14 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_EFT_DISABLE;
mcp->out_mb = MBX_1|MBX_0;
@@ -2684,10 +2644,11 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a8,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
}
return rval;
@@ -2701,14 +2662,14 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
+
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_FCE_ENABLE;
mcp->mb[2] = LSW(fce_dma);
@@ -2727,10 +2688,11 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ab,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
if (mb)
memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2748,14 +2710,14 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
if (unlikely(pci_channel_offline(vha->hw->pdev)))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_TRACE_CONTROL;
mcp->mb[1] = TC_FCE_DISABLE;
mcp->mb[2] = TC_FCE_DISABLE_TRACE;
@@ -2766,10 +2728,11 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ae,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
if (wr)
*wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2794,11 +2757,11 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
+
if (!IS_IIDMA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = mcp->mb[3] = 0;
@@ -2817,10 +2780,9 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
}
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
if (port_speed)
*port_speed = mcp->mb[3];
}
@@ -2836,11 +2798,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
+
if (!IS_IIDMA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
@@ -2863,10 +2825,9 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
}
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
}
return rval;
@@ -2882,33 +2843,36 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
scsi_qla_host_t *vp;
unsigned long flags;
+ ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
+
if (rptid_entry->entry_status != 0)
return;
if (rptid_entry->format == 0) {
- DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
- " number of VPs acquired %d\n", __func__, vha->host_no,
- MSB(le16_to_cpu(rptid_entry->vp_count)),
- LSB(le16_to_cpu(rptid_entry->vp_count))));
- DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
- rptid_entry->port_id[2], rptid_entry->port_id[1],
- rptid_entry->port_id[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b7,
+ "Format 0 : Number of VPs setup %d, number of "
+ "VPs acquired %d.\n",
+ MSB(le16_to_cpu(rptid_entry->vp_count)),
+ LSB(le16_to_cpu(rptid_entry->vp_count)));
+ ql_dbg(ql_dbg_mbx, vha, 0x10b8,
+ "Primary port id %02x%02x%02x.\n",
+ rptid_entry->port_id[2], rptid_entry->port_id[1],
+ rptid_entry->port_id[0]);
} else if (rptid_entry->format == 1) {
vp_idx = LSB(stat);
- DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
- "- status %d - "
- "with port id %02x%02x%02x\n", __func__, vha->host_no,
- vp_idx, MSB(stat),
+ ql_dbg(ql_dbg_mbx, vha, 0x10b9,
+ "Format 1: VP[%d] enabled - status %d - with "
+ "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
rptid_entry->port_id[2], rptid_entry->port_id[1],
- rptid_entry->port_id[0]));
+ rptid_entry->port_id[0]);
vp = vha;
if (vp_idx == 0 && (MSB(stat) != 1))
goto reg_needed;
if (MSB(stat) == 1) {
- DEBUG2(printk("scsi(%ld): Could not acquire ID for "
- "VP[%d].\n", vha->host_no, vp_idx));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ba,
+ "Could not acquire ID for VP[%d].\n", vp_idx);
return;
}
@@ -2963,10 +2927,12 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
/* This can be called by the parent */
+ ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
+
vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
if (!vpmod) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP "
- "IOCB.\n", __func__, vha->host_no));
+ ql_log(ql_log_warn, vha, 0x10bc,
+ "Failed to allocate modify VP IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
@@ -2983,22 +2949,21 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB"
- "(%x).\n", __func__, base_vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10bd,
+ "Failed to issue VP config IOCB (%x).\n", rval);
} else if (vpmod->comp_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, base_vha->host_no,
- vpmod->comp_status));
+ ql_dbg(ql_dbg_mbx, vha, 0x10be,
+ "Failed to complete IOCB -- error status (%x).\n",
+ vpmod->comp_status);
rval = QLA_FUNCTION_FAILED;
} else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, base_vha->host_no,
- le16_to_cpu(vpmod->comp_status)));
+ ql_dbg(ql_dbg_mbx, vha, 0x10bf,
+ "Failed to complete IOCB -- completion status (%x).\n",
+ le16_to_cpu(vpmod->comp_status));
rval = QLA_FUNCTION_FAILED;
} else {
/* EMPTY */
- DEBUG11(printk("%s(%ld): done.\n", __func__,
- base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
}
dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3032,17 +2997,16 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
int vp_index = vha->vp_idx;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
- vha->host_no, vp_index));
+ ql_dbg(ql_dbg_mbx, vha, 0x10c1,
+ "Entered %s enabling index %d.\n", __func__, vp_index);
if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
return QLA_PARAMETER_ERROR;
vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
if (!vce) {
- DEBUG2_3(printk("%s(%ld): "
- "failed to allocate VP Control IOCB.\n", __func__,
- base_vha->host_no));
+ ql_log(ql_log_warn, vha, 0x10c2,
+ "Failed to allocate VP control IOCB.\n");
return QLA_MEMORY_ALLOC_FAILED;
}
memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
@@ -3063,28 +3027,20 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB"
- "(%x).\n", __func__, base_vha->host_no, rval));
- printk("%s(%ld): failed to issue VP control IOCB"
- "(%x).\n", __func__, base_vha->host_no, rval);
+ ql_dbg(ql_dbg_mbx, vha, 0x10c3,
+ "Failed to issue VP control IOCB (%x).\n", rval);
} else if (vce->entry_status != 0) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, base_vha->host_no,
- vce->entry_status));
- printk("%s(%ld): failed to complete IOCB "
- "-- error status (%x).\n", __func__, base_vha->host_no,
+ ql_dbg(ql_dbg_mbx, vha, 0x10c4,
+ "Failed to complete IOCB -- error status (%x).\n",
vce->entry_status);
rval = QLA_FUNCTION_FAILED;
} else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, base_vha->host_no,
- le16_to_cpu(vce->comp_status)));
- printk("%s(%ld): failed to complete IOCB "
- "-- completion status (%x).\n", __func__, base_vha->host_no,
+ ql_dbg(ql_dbg_mbx, vha, 0x10c5,
+ "Failed to complet IOCB -- completion status (%x).\n",
le16_to_cpu(vce->comp_status));
rval = QLA_FUNCTION_FAILED;
} else {
- DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3121,6 +3077,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
+
/*
* This command is implicitly executed by firmware during login for the
* physical hosts
@@ -3155,7 +3113,7 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3186,10 +3144,10 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1008,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
}
return rval;
@@ -3214,12 +3172,10 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
unsigned long flags;
struct qla_hw_data *ha = vha->hw;
- DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (mn == NULL) {
- DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
- "IOCB.\n", __func__, vha->host_no));
return QLA_MEMORY_ALLOC_FAILED;
}
@@ -3237,43 +3193,43 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
mn->p.req.entry_count = 1;
mn->p.req.options = cpu_to_le16(options);
- DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__,
- vha->host_no));
- DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
- sizeof(*mn)));
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
+ "Dump of Verify Request.\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
+ (uint8_t *)mn, sizeof(*mn));
rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
if (rval != QLA_SUCCESS) {
- DEBUG2_16(printk("%s(%ld): failed to issue Verify "
- "IOCB (%x).\n", __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10cb,
+ "Failed to issue verify IOCB (%x).\n", rval);
goto verify_done;
}
- DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__,
- vha->host_no));
- DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
- sizeof(*mn)));
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
+ "Dump of Verify Response.\n");
+ ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
+ (uint8_t *)mn, sizeof(*mn));
status[0] = le16_to_cpu(mn->p.rsp.comp_status);
status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
le16_to_cpu(mn->p.rsp.failure_code) : 0;
- DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__,
- vha->host_no, status[0], status[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ce,
+ "cs=%x fc=%x.\n", status[0], status[1]);
if (status[0] != CS_COMPLETE) {
rval = QLA_FUNCTION_FAILED;
if (!(options & VCO_DONT_UPDATE_FW)) {
- DEBUG2_16(printk("%s(%ld): Firmware update "
- "failed. Retrying without update "
- "firmware.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10cf,
+ "Firmware update failed. Retrying "
+ "without update firmware.\n");
options |= VCO_DONT_UPDATE_FW;
options &= ~VCO_FORCE_UPDATE;
retry = 1;
}
} else {
- DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n",
- __func__, vha->host_no,
- le32_to_cpu(mn->p.rsp.fw_ver)));
+ ql_dbg(ql_dbg_mbx, vha, 0x10d0,
+ "Firmware updated to %x.\n",
+ le32_to_cpu(mn->p.rsp.fw_ver));
/* NOTE: we only update OP firmware. */
spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
@@ -3288,10 +3244,9 @@ verify_done:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
if (rval != QLA_SUCCESS) {
- DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
} else {
- DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
}
return rval;
@@ -3307,6 +3262,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
+
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = req->options;
mcp->mb[2] = MSW(LSD(req->dma));
@@ -3344,9 +3301,13 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
rval = qla2x00_mailbox_command(vha, mcp);
- if (rval != QLA_SUCCESS)
- DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0]));
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10d4,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
+ }
+
return rval;
}
@@ -3360,6 +3321,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
+
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = rsp->options;
mcp->mb[2] = MSW(LSD(rsp->dma));
@@ -3393,10 +3356,13 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
rval = qla2x00_mailbox_command(vha, mcp);
- if (rval != QLA_SUCCESS)
- DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x "
- "mb0=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10d7,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
+ }
+
return rval;
}
@@ -3407,7 +3373,7 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_IDC_ACK;
memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3418,10 +3384,10 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10da,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
}
return rval;
@@ -3434,11 +3400,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
+
if (!IS_QLA81XX(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
mcp->out_mb = MBX_1|MBX_0;
@@ -3448,10 +3414,11 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10dd,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
*sector_size = mcp->mb[1];
}
@@ -3468,7 +3435,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
if (!IS_QLA81XX(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3480,10 +3447,11 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e0,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
}
return rval;
@@ -3499,7 +3467,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
if (!IS_QLA81XX(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3514,11 +3482,11 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
- "mb[2]=%x.\n", __func__, vha->host_no, rval, mcp->mb[0],
- mcp->mb[1], mcp->mb[2]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e3,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
}
return rval;
@@ -3531,7 +3499,7 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_RESTART_MPI_FW;
mcp->out_mb = MBX_0;
@@ -3541,10 +3509,11 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e6,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
}
return rval;
@@ -3559,11 +3528,11 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
if (len == 1)
opt |= BIT_0;
@@ -3586,10 +3555,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
*sfp = mcp->mb[1];
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10e9,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
}
return rval;
@@ -3604,11 +3573,11 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
if (len == 1)
opt |= BIT_0;
@@ -3631,10 +3600,10 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ec,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
}
return rval;
@@ -3648,11 +3617,11 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
+
if (!IS_QLA8XXX_TYPE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_GET_XGMAC_STATS;
mcp->mb[2] = MSW(stats_dma);
mcp->mb[3] = LSW(stats_dma);
@@ -3666,11 +3635,12 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
- "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[2]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ef,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
+
*actual_size = mcp->mb[2] << 2;
}
@@ -3686,11 +3656,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
+
if (!IS_QLA8XXX_TYPE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_GET_DCBX_PARAMS;
mcp->mb[1] = 0;
mcp->mb[2] = MSW(tlv_dma);
@@ -3705,11 +3675,11 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
- "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
- mcp->mb[0], mcp->mb[1], mcp->mb[2]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f2,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
}
return rval;
@@ -3722,11 +3692,11 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_READ_RAM_EXTENDED;
mcp->mb[1] = LSW(risc_addr);
mcp->mb[8] = MSW(risc_addr);
@@ -3736,10 +3706,10 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f5,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
*data = mcp->mb[3] << 16 | mcp->mb[2];
}
@@ -3755,7 +3725,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mbx_cmd_t *mcp = &mc;
uint32_t iter_cnt = 0x1;
- DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3794,15 +3764,12 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2(printk(KERN_WARNING
- "(%ld): failed=%x mb[0]=0x%x "
- "mb[1]=0x%x mb[2]=0x%x mb[3]=0x%x mb[18]=0x%x "
- "mb[19]=0x%x.\n",
- vha->host_no, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
- mcp->mb[3], mcp->mb[18], mcp->mb[19]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f8,
+ "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
+ "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
+ mcp->mb[3], mcp->mb[18], mcp->mb[19]);
} else {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld): done.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
}
/* Copy mailbox information */
@@ -3819,7 +3786,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk("scsi(%ld): entered.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3858,12 +3825,11 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2(printk(KERN_WARNING
- "(%ld): failed=%x mb[0]=0x%x mb[1]=0x%x.\n",
- vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
+ ql_dbg(ql_dbg_mbx, vha, 0x10fb,
+ "Failed=%x mb[0]=%x mb[1]=%x.\n",
+ rval, mcp->mb[0], mcp->mb[1]);
} else {
- DEBUG2(printk(KERN_WARNING
- "scsi(%ld): done.\n", vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
}
/* Copy mailbox information */
@@ -3872,14 +3838,14 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
}
int
-qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
+qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG16(printk("%s(%ld): enable_diag=%d entered.\n", __func__,
- ha->host_no, enable_diagnostic));
+ ql_dbg(ql_dbg_mbx, vha, 0x10fd,
+ "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
mcp->mb[0] = MBC_ISP84XX_RESET;
mcp->mb[1] = enable_diagnostic;
@@ -3887,13 +3853,12 @@ qla84xx_reset_chip(scsi_qla_host_t *ha, uint16_t enable_diagnostic)
mcp->in_mb = MBX_1|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
- rval = qla2x00_mailbox_command(ha, mcp);
+ rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS)
- DEBUG16(printk("%s(%ld): failed=%x.\n", __func__, ha->host_no,
- rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
else
- DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
return rval;
}
@@ -3905,11 +3870,11 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
mcp->mb[1] = LSW(risc_addr);
mcp->mb[2] = LSW(data);
@@ -3921,10 +3886,10 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1101,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
}
return rval;
@@ -3941,8 +3906,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
rval = QLA_SUCCESS;
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
@@ -3982,11 +3946,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
rval = QLA_FUNCTION_FAILED;
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
- __func__, vha->host_no, rval, mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1104,
+ "Failed=%x mb[0]=%x.\n", rval, mb[0]);
} else {
- DEBUG11(printk(KERN_INFO
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
}
return rval;
@@ -3999,12 +3962,11 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_DATA_RATE;
mcp->mb[1] = 0;
mcp->out_mb = MBX_1|MBX_0;
@@ -4013,11 +3975,10 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
- __func__, vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1107,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(printk(KERN_INFO
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
if (mcp->mb[1] != 0x7)
ha->link_data_rate = mcp->mb[1];
}
@@ -4033,8 +3994,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk(KERN_INFO
- "%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
if (!IS_QLA81XX(ha))
return QLA_FUNCTION_FAILED;
@@ -4047,15 +4007,13 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x110a,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
/* Copy all bits to preserve original value */
memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
- DEBUG11(printk(KERN_INFO
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
}
return rval;
}
@@ -4067,8 +4025,7 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- DEBUG11(printk(KERN_INFO
- "%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_PORT_CONFIG;
/* Copy all bits to preserve original setting */
@@ -4080,12 +4037,10 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): failed=%x (%x).\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x110d,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else
- DEBUG11(printk(KERN_INFO
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
return rval;
}
@@ -4100,12 +4055,11 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
+
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(printk(KERN_INFO
- "%s(%ld): entered.\n", __func__, vha->host_no));
-
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
if (ha->flags.fcp_prio_enabled)
@@ -4127,12 +4081,9 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
}
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): failed=%x.\n", __func__,
- vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
} else {
- DEBUG11(printk(KERN_INFO
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
}
return rval;
@@ -4145,13 +4096,12 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
uint8_t byte;
struct qla_hw_data *ha = vha->hw;
- DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
/* Integer part */
rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
ha->flags.thermal_supported = 0;
goto fail;
}
@@ -4160,14 +4110,13 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
/* Fraction part */
rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(printk(KERN_WARNING
- "%s(%ld): failed=%x.\n", __func__, vha->host_no, rval));
+ ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
ha->flags.thermal_supported = 0;
goto fail;
}
*frac = (byte >> 6) * 25;
- DEBUG11(printk(KERN_INFO "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
fail:
return rval;
}
@@ -4180,12 +4129,11 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
+
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): entered.\n", __func__, vha->host_no));
-
memset(mcp, 0, sizeof(mbx_cmd_t));
mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
mcp->mb[1] = 1;
@@ -4197,12 +4145,10 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
- "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x1016,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
}
return rval;
@@ -4216,12 +4162,11 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
+ ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
+
if (!IS_QLA82XX(ha))
return QLA_FUNCTION_FAILED;
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): entered.\n", __func__, vha->host_no));
-
memset(mcp, 0, sizeof(mbx_cmd_t));
mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
mcp->mb[1] = 0;
@@ -4233,12 +4178,10 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- DEBUG2_3_11(qla_printk(KERN_WARNING, ha,
- "%s(%ld): failed=%x mb[0]=%x.\n", __func__,
- vha->host_no, rval, mcp->mb[0]));
+ ql_dbg(ql_dbg_mbx, vha, 0x100c,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- DEBUG11(qla_printk(KERN_INFO, ha,
- "%s(%ld): done.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
}
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 5e343919aca..c706ed37000 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -36,8 +36,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
mutex_lock(&ha->vport_lock);
vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
if (vp_id > ha->max_npiv_vports) {
- DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
- vp_id, ha->max_npiv_vports));
+ ql_dbg(ql_dbg_vport, vha, 0xa000,
+ "vp_id %d is bigger than max-supported %d.\n",
+ vp_id, ha->max_npiv_vports);
mutex_unlock(&ha->vport_lock);
return vp_id;
}
@@ -131,9 +132,9 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- DEBUG15(printk("scsi(%ld): Marking port dead, "
- "loop_id=0x%04x :%x\n",
- vha->host_no, fcport->loop_id, fcport->vp_idx));
+ ql_dbg(ql_dbg_vport, vha, 0xa001,
+ "Marking port dead, loop_id=0x%04x : %x.\n",
+ fcport->loop_id, fcport->vp_idx);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -187,13 +188,13 @@ qla24xx_enable_vp(scsi_qla_host_t *vha)
goto enable_failed;
}
- DEBUG15(qla_printk(KERN_INFO, ha,
- "Virtual port with id: %d - Enabled\n", vha->vp_idx));
+ ql_dbg(ql_dbg_taskm, vha, 0x801a,
+ "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
return 0;
enable_failed:
- DEBUG15(qla_printk(KERN_INFO, ha,
- "Virtual port with id: %d - Disabled\n", vha->vp_idx));
+ ql_dbg(ql_dbg_taskm, vha, 0x801b,
+ "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
return 1;
}
@@ -205,12 +206,12 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
fc_vport = vha->fc_vport;
- DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
- vha->host_no, __func__));
+ ql_dbg(ql_dbg_vport, vha, 0xa002,
+ "%s: change request #3.\n", __func__);
ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
if (ret != QLA_SUCCESS) {
- DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
- "receiving of RSCN requests: 0x%x\n", ret));
+ ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
+ "receiving of RSCN requests: 0x%x.\n", ret);
return;
} else {
/* Corresponds to SCR enabled */
@@ -248,9 +249,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
case MBA_CHG_IN_CONNECTION:
case MBA_PORT_UPDATE:
case MBA_RSCN_UPDATE:
- DEBUG15(printk("scsi(%ld)%s: Async_event for"
- " VP[%d], mb = 0x%x, vha=%p\n",
- vha->host_no, __func__, i, *mb, vha));
+ ql_dbg(ql_dbg_async, vha, 0x5024,
+ "Async_event for VP[%d], mb=0x%x vha=%p.\n",
+ i, *mb, vha);
qla2x00_async_event(vha, rsp, mb);
break;
}
@@ -286,37 +287,49 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
- DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
- vha->host_no, vha->vp_idx));
+ ql_dbg(ql_dbg_taskm, vha, 0x801d,
+ "Scheduling enable of Vport %d.\n", vha->vp_idx);
return qla24xx_enable_vp(vha);
}
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
+ ql_dbg(ql_dbg_dpc, vha, 0x4012,
+ "Entering %s.\n", __func__);
+ ql_dbg(ql_dbg_dpc, vha, 0x4013,
+ "vp_flags: 0x%lx.\n", vha->vp_flags);
+
qla2x00_do_work(vha);
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
/* VP acquired. complete port configuration */
+ ql_dbg(ql_dbg_dpc, vha, 0x4014,
+ "Configure VP scheduled.\n");
qla24xx_configure_vp(vha);
+ ql_dbg(ql_dbg_dpc, vha, 0x4015,
+ "Configure VP end.\n");
return 0;
}
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
+ ql_dbg(ql_dbg_dpc, vha, 0x4016,
+ "FCPort update scheduled.\n");
qla2x00_update_fcports(vha);
clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, vha, 0x4017,
+ "FCPort update end.\n");
}
if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
atomic_read(&vha->loop_state) != LOOP_DOWN) {
- DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
- vha->host_no));
+ ql_dbg(ql_dbg_dpc, vha, 0x4018,
+ "Relogin needed scheduled.\n");
qla2x00_relogin(vha);
-
- DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
- vha->host_no));
+ ql_dbg(ql_dbg_dpc, vha, 0x4019,
+ "Relogin needed end.\n");
}
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -326,11 +339,17 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
+ ql_dbg(ql_dbg_dpc, vha, 0x401a,
+ "Loop resync scheduled.\n");
qla2x00_loop_resync(vha);
clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, vha, 0x401b,
+ "Loop resync end.\n");
}
}
+ ql_dbg(ql_dbg_dpc, vha, 0x401c,
+ "Exiting %s.\n", __func__);
return 0;
}
@@ -396,9 +415,10 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
/* Check up max-npiv-supports */
if (ha->num_vhosts > ha->max_npiv_vports) {
- DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
- "max_npv_vports %ud.\n", base_vha->host_no,
- ha->num_vhosts, ha->max_npiv_vports));
+ ql_dbg(ql_dbg_vport, vha, 0xa004,
+ "num_vhosts %ud is bigger "
+ "than max_npiv_vports %ud.\n",
+ ha->num_vhosts, ha->max_npiv_vports);
return VPCERR_UNSUPPORTED;
}
return 0;
@@ -415,7 +435,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha = qla2x00_create_host(sht, ha);
if (!vha) {
- DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
+ ql_log(ql_log_warn, vha, 0xa005,
+ "scsi_host_alloc() failed for vport.\n");
return(NULL);
}
@@ -429,8 +450,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->device_flags = 0;
vha->vp_idx = qla24xx_allocate_vp_id(vha);
if (vha->vp_idx > ha->max_npiv_vports) {
- DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
- vha->host_no));
+ ql_dbg(ql_dbg_vport, vha, 0xa006,
+ "Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
@@ -461,8 +482,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
host->max_id = MAX_TARGETS_2200;
host->transportt = qla2xxx_transport_vport_template;
- DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
- vha->host_no, vha));
+ ql_dbg(ql_dbg_vport, vha, 0xa007,
+ "Detect vport hba %ld at address = %p.\n",
+ vha->host_no, vha);
vha->flags.init_done = 1;
@@ -567,9 +589,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
if (req) {
ret = qla25xx_delete_req_que(vha, req);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Couldn't delete req que %d\n",
- req->id);
+ ql_log(ql_log_warn, vha, 0x00ea,
+ "Couldn't delete req que %d.\n",
+ req->id);
return ret;
}
}
@@ -581,9 +603,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
if (rsp) {
ret = qla25xx_delete_rsp_que(vha, rsp);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Couldn't delete rsp que %d\n",
- rsp->id);
+ ql_log(ql_log_warn, vha, 0x00eb,
+ "Couldn't delete rsp que %d.\n",
+ rsp->id);
return ret;
}
}
@@ -604,8 +626,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
if (req == NULL) {
- qla_printk(KERN_WARNING, ha, "could not allocate memory"
- "for request que\n");
+ ql_log(ql_log_fatal, base_vha, 0x00d9,
+ "Failed to allocate memory for request queue.\n");
goto failed;
}
@@ -614,8 +636,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
(req->length + 1) * sizeof(request_t),
&req->dma, GFP_KERNEL);
if (req->ring == NULL) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - request_ring\n");
+ ql_log(ql_log_fatal, base_vha, 0x00da,
+ "Failed to allocte memory for request_ring.\n");
goto que_failed;
}
@@ -623,8 +645,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
mutex_unlock(&ha->vport_lock);
- qla_printk(KERN_INFO, ha, "No resources to create "
- "additional request queue\n");
+ ql_log(ql_log_warn, base_vha, 0x00db,
+ "No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->req_qid_map);
@@ -633,6 +655,12 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->vp_idx = vp_idx;
req->qos = qos;
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
+ "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
+ que_id, req->rid, req->vp_idx, req->qos);
+ ql_dbg(ql_dbg_init, base_vha, 0x00dc,
+ "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
+ que_id, req->rid, req->vp_idx, req->qos);
if (rsp_que < 0)
req->rsp = NULL;
else
@@ -645,6 +673,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
options |= BIT_5;
req->options = options;
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
+ "options=0x%x.\n", req->options);
+ ql_dbg(ql_dbg_init, base_vha, 0x00dd,
+ "options=0x%x.\n", req->options);
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
@@ -656,10 +688,21 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
reg = ISP_QUE_REG(ha, que_id);
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
mutex_unlock(&ha->vport_lock);
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
+ "ring_ptr=%p ring_index=%d, "
+ "cnt=%d id=%d max_q_depth=%d.\n",
+ req->ring_ptr, req->ring_index,
+ req->cnt, req->id, req->max_q_depth);
+ ql_dbg(ql_dbg_init, base_vha, 0x00de,
+ "ring_ptr=%p ring_index=%d, "
+ "cnt=%d id=%d max_q_depth=%d.\n",
+ req->ring_ptr, req->ring_index, req->cnt,
+ req->id, req->max_q_depth);
ret = qla25xx_init_req_que(base_vha, req);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
+ ql_log(ql_log_fatal, base_vha, 0x00df,
+ "%s failed.\n", __func__);
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->req_qid_map);
mutex_unlock(&ha->vport_lock);
@@ -700,8 +743,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
if (rsp == NULL) {
- qla_printk(KERN_WARNING, ha, "could not allocate memory for"
- " response que\n");
+ ql_log(ql_log_warn, base_vha, 0x0066,
+ "Failed to allocate memory for response queue.\n");
goto failed;
}
@@ -710,8 +753,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
(rsp->length + 1) * sizeof(response_t),
&rsp->dma, GFP_KERNEL);
if (rsp->ring == NULL) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - response_ring\n");
+ ql_log(ql_log_warn, base_vha, 0x00e1,
+ "Failed to allocate memory for response ring.\n");
goto que_failed;
}
@@ -719,8 +762,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
if (que_id >= ha->max_rsp_queues) {
mutex_unlock(&ha->vport_lock);
- qla_printk(KERN_INFO, ha, "No resources to create "
- "additional response queue\n");
+ ql_log(ql_log_warn, base_vha, 0x00e2,
+ "No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->rsp_qid_map);
@@ -728,12 +771,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (ha->flags.msix_enabled)
rsp->msix = &ha->msix_entries[que_id + 1];
else
- qla_printk(KERN_WARNING, ha, "msix not enabled\n");
+ ql_log(ql_log_warn, base_vha, 0x00e3,
+ "MSIX not enalbled.\n");
ha->rsp_q_map[que_id] = rsp;
rsp->rid = rid;
rsp->vp_idx = vp_idx;
rsp->hw = ha;
+ ql_dbg(ql_dbg_init, base_vha, 0x00e4,
+ "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
+ que_id, rsp->rid, rsp->vp_idx, rsp->hw);
/* Use alternate PCI bus number */
if (MSB(rsp->rid))
options |= BIT_4;
@@ -750,6 +797,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
mutex_unlock(&ha->vport_lock);
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ rsp->options, rsp->id, rsp->rsp_q_in,
+ rsp->rsp_q_out);
+ ql_dbg(ql_dbg_init, base_vha, 0x00e5,
+ "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
+ rsp->options, rsp->id, rsp->rsp_q_in,
+ rsp->rsp_q_out);
ret = qla25xx_request_irq(rsp);
if (ret)
@@ -757,7 +812,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
ret = qla25xx_init_rsp_que(base_vha, rsp);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
+ ql_log(ql_log_fatal, base_vha, 0x00e7,
+ "%s failed.\n", __func__);
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->rsp_qid_map);
mutex_unlock(&ha->vport_lock);
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index e1138bcc834..5cbf33a50b1 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -348,6 +348,7 @@ static void
qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
{
u32 win_read;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ha->crb_win = CRB_HI(*off);
writel(ha->crb_win,
@@ -358,9 +359,10 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
*/
win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
if (win_read != ha->crb_win) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
- "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
+ ql_dbg(ql_dbg_p3p, vha, 0xb000,
+ "%s: Written crbwin (0x%x) "
+ "!= Read crbwin (0x%x), off=0x%lx.\n",
+ ha->crb_win, win_read, *off);
}
*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
@@ -368,6 +370,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
static inline unsigned long
qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* See if we are currently pointing to the region we want to use next */
if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
/* No need to change window. PCIX and PCIEregs are in both
@@ -398,9 +401,10 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
return off;
}
/* strange address given */
- qla_printk(KERN_WARNING, ha,
- "%s: Warning: unm_nic_pci_set_crbwindow called with"
- " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
+ ql_dbg(ql_dbg_p3p, vha, 0xb001,
+ "%x: Warning: unm_nic_pci_set_crbwindow "
+ "called with an unknown address(%llx).\n",
+ QLA2XXX_DRIVER_NAME, off);
return off;
}
@@ -563,6 +567,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
{
int window;
u32 win_read;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
QLA82XX_ADDR_DDR_NET_MAX)) {
@@ -574,8 +579,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
win_read = qla82xx_rd_32(ha,
ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
if ((win_read << 17) != window) {
- qla_printk(KERN_WARNING, ha,
- "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
+ ql_dbg(ql_dbg_p3p, vha, 0xb003,
+ "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n",
__func__, window, win_read);
}
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
@@ -583,7 +588,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
QLA82XX_ADDR_OCM0_MAX)) {
unsigned int temp1;
if ((addr & 0x00ff800) == 0xff800) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0xb004,
"%s: QM access not handled.\n", __func__);
addr = -1UL;
}
@@ -596,8 +601,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
temp1 = ((window & 0x1FF) << 7) |
((window & 0x0FFFE0000) >> 17);
if (win_read != temp1) {
- qla_printk(KERN_WARNING, ha,
- "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
+ ql_log(ql_log_warn, vha, 0xb005,
+ "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n",
__func__, temp1, win_read);
}
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
@@ -612,8 +617,8 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
win_read = qla82xx_rd_32(ha,
ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
if (win_read != window) {
- qla_printk(KERN_WARNING, ha,
- "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
+ ql_log(ql_log_warn, vha, 0xb006,
+ "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n",
__func__, window, win_read);
}
addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
@@ -624,9 +629,9 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
*/
if ((qla82xx_pci_set_window_warning_count++ < 8) ||
(qla82xx_pci_set_window_warning_count%64 == 0)) {
- qla_printk(KERN_WARNING, ha,
- "%s: Warning:%s Unknown address range!\n", __func__,
- QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_warn, vha, 0xb007,
+ "%s: Warning:%s Unknown address range!.\n",
+ __func__, QLA2XXX_DRIVER_NAME);
}
addr = -1UL;
}
@@ -671,6 +676,7 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
uint8_t *mem_ptr = NULL;
unsigned long mem_base;
unsigned long mem_page;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
write_lock_irqsave(&ha->hw_lock, flags);
@@ -682,9 +688,10 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
if ((start == -1UL) ||
(qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
write_unlock_irqrestore(&ha->hw_lock, flags);
- qla_printk(KERN_ERR, ha,
- "%s out of bound pci memory access. "
- "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
+ ql_log(ql_log_fatal, vha, 0xb008,
+ "%s out of bound pci memory "
+ "access, offset is 0x%llx.\n",
+ QLA2XXX_DRIVER_NAME, off);
return -1;
}
@@ -741,6 +748,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
uint8_t *mem_ptr = NULL;
unsigned long mem_base;
unsigned long mem_page;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
write_lock_irqsave(&ha->hw_lock, flags);
@@ -752,9 +760,10 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
if ((start == -1UL) ||
(qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
write_unlock_irqrestore(&ha->hw_lock, flags);
- qla_printk(KERN_ERR, ha,
- "%s out of bound pci memory access. "
- "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
+ ql_log(ql_log_fatal, vha, 0xb009,
+ "%s out of bount memory "
+ "access, offset is 0x%llx.\n",
+ QLA2XXX_DRIVER_NAME, off);
return -1;
}
@@ -855,15 +864,16 @@ qla82xx_wait_rom_busy(struct qla_hw_data *ha)
{
long timeout = 0;
long done = 0 ;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while (done == 0) {
done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
done &= 4;
timeout++;
if (timeout >= rom_max_timeout) {
- DEBUG(qla_printk(KERN_INFO, ha,
- "%s: Timeout reached waiting for rom busy",
- QLA2XXX_DRIVER_NAME));
+ ql_dbg(ql_dbg_p3p, vha, 0xb00a,
+ "%s: Timeout reached waiting for rom busy.\n",
+ QLA2XXX_DRIVER_NAME);
return -1;
}
}
@@ -875,15 +885,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
{
long timeout = 0;
long done = 0 ;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while (done == 0) {
done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
done &= 2;
timeout++;
if (timeout >= rom_max_timeout) {
- DEBUG(qla_printk(KERN_INFO, ha,
- "%s: Timeout reached waiting for rom done",
- QLA2XXX_DRIVER_NAME));
+ ql_dbg(ql_dbg_p3p, vha, 0xb00b,
+ "%s: Timeout reached waiting for rom done.\n",
+ QLA2XXX_DRIVER_NAME);
return -1;
}
}
@@ -893,15 +904,16 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
static int
qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
qla82xx_wait_rom_busy(ha);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "%s: Error waiting for rom done\n",
- QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_fatal, vha, 0x00ba,
+ "Error waiting for rom done.\n");
return -1;
}
/* Reset abyte_cnt and dummy_byte_cnt */
@@ -917,6 +929,7 @@ static int
qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
{
int ret, loops = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
udelay(100);
@@ -924,9 +937,8 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
loops++;
}
if (loops >= 50000) {
- qla_printk(KERN_INFO, ha,
- "%s: qla82xx_rom_lock failed\n",
- QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_fatal, vha, 0x00b9,
+ "Failed to aquire SEM2 lock.\n");
return -1;
}
ret = qla82xx_do_rom_fast_read(ha, addr, valp);
@@ -937,11 +949,12 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
static int
qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
qla82xx_wait_rom_busy(ha);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "Error waiting for rom done\n");
+ ql_log(ql_log_warn, vha, 0xb00c,
+ "Error waiting for rom done.\n");
return -1;
}
*val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
@@ -955,6 +968,7 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
uint32_t done = 1 ;
uint32_t val;
int ret = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
while ((done != 0) && (ret == 0)) {
@@ -964,8 +978,8 @@ qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
udelay(10);
cond_resched();
if (timeout >= 50000) {
- qla_printk(KERN_WARNING, ha,
- "Timeout reached waiting for write finish");
+ ql_log(ql_log_warn, vha, 0xb00d,
+ "Timeout reached waiting for write finish.\n");
return -1;
}
}
@@ -992,13 +1006,14 @@ qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
static int
qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
if (qla82xx_flash_set_write_enable(ha))
return -1;
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "Error waiting for rom done\n");
+ ql_log(ql_log_warn, vha, 0xb00e,
+ "Error waiting for rom done.\n");
return -1;
}
return qla82xx_flash_wait_write_finish(ha);
@@ -1007,10 +1022,11 @@ qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
static int
qla82xx_write_disable_flash(struct qla_hw_data *ha)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "Error waiting for rom done\n");
+ ql_log(ql_log_warn, vha, 0xb00f,
+ "Error waiting for rom done.\n");
return -1;
}
return 0;
@@ -1020,13 +1036,16 @@ static int
ql82xx_rom_lock_d(struct qla_hw_data *ha)
{
int loops = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
udelay(100);
cond_resched();
loops++;
}
if (loops >= 50000) {
- qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
+ ql_log(ql_log_warn, vha, 0xb010,
+ "ROM lock failed.\n");
return -1;
}
return 0;;
@@ -1037,10 +1056,12 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
uint32_t data)
{
int ret = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ret = ql82xx_rom_lock_d(ha);
if (ret < 0) {
- qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ ql_log(ql_log_warn, vha, 0xb011,
+ "ROM lock failed.\n");
return ret;
}
@@ -1053,8 +1074,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
qla82xx_wait_rom_busy(ha);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "Error waiting for rom done\n");
+ ql_log(ql_log_warn, vha, 0xb012,
+ "Error waiting for rom done.\n");
ret = -1;
goto done_write;
}
@@ -1159,8 +1180,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
*/
if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
qla82xx_rom_fast_read(ha, 4, &n) != 0) {
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Reading crb_init area: n: %08x\n", n);
+ ql_log(ql_log_fatal, vha, 0x006e,
+ "Error Reading crb_init area: n: %08x.\n", n);
return -1;
}
@@ -1172,20 +1193,18 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
/* number of addr/value pair should not exceed 1024 enteries */
if (n >= 1024) {
- qla_printk(KERN_WARNING, ha,
- "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
- QLA2XXX_DRIVER_NAME, __func__, n);
+ ql_log(ql_log_fatal, vha, 0x0071,
+ "Card flash not initialized:n=0x%x.\n", n);
return -1;
}
- qla_printk(KERN_INFO, ha,
- "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
+ ql_log(ql_log_info, vha, 0x0072,
+ "%d CRB init values found in ROM.\n", n);
buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
if (buf == NULL) {
- qla_printk(KERN_WARNING, ha,
- "%s: [ERROR] Unable to malloc memory.\n",
- QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_fatal, vha, 0x010c,
+ "Unable to allocate memory.\n");
return -1;
}
@@ -1236,9 +1255,8 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
continue;
if (off == ADDR_ERROR) {
- qla_printk(KERN_WARNING, ha,
- "%s: [ERROR] Unknown addr: 0x%08lx\n",
- QLA2XXX_DRIVER_NAME, buf[i].addr);
+ ql_log(ql_log_fatal, vha, 0x0116,
+ "Unknow addr: 0x%08lx.\n", buf[i].addr);
continue;
}
@@ -1370,7 +1388,7 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
if (j >= MAX_CTL_CHECK) {
if (printk_ratelimit())
dev_err(&ha->pdev->dev,
- "failed to write through agent\n");
+ "failed to write through agent.\n");
ret = -1;
break;
}
@@ -1460,7 +1478,7 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
if (j >= MAX_CTL_CHECK) {
if (printk_ratelimit())
dev_err(&ha->pdev->dev,
- "failed to read through agent\n");
+ "failed to read through agent.\n");
break;
}
@@ -1633,17 +1651,15 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
uint32_t len = 0;
if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
- qla_printk(KERN_WARNING, ha,
- "Failed to reserve selected regions (%s)\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000c,
+ "Failed to reserver selected regions.\n");
goto iospace_error_exit;
}
/* Use MMIO operations for all accesses. */
if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
- qla_printk(KERN_ERR, ha,
- "region #0 not an MMIO resource (%s), aborting\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000d,
+ "Region #0 not an MMIO resource, aborting.\n");
goto iospace_error_exit;
}
@@ -1651,9 +1667,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
ha->nx_pcibase =
(unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
if (!ha->nx_pcibase) {
- qla_printk(KERN_ERR, ha,
- "cannot remap pcibase MMIO (%s), aborting\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
+ "Cannot remap pcibase MMIO, aborting.\n");
pci_release_regions(ha->pdev);
goto iospace_error_exit;
}
@@ -1667,9 +1682,8 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
(unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
(ha->pdev->devfn << 12)), 4);
if (!ha->nxdb_wr_ptr) {
- qla_printk(KERN_ERR, ha,
- "cannot remap MMIO (%s), aborting\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
+ "Cannot remap MMIO, aborting.\n");
pci_release_regions(ha->pdev);
goto iospace_error_exit;
}
@@ -1687,6 +1701,16 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
+ "nx_pci_base=%p iobase=%p "
+ "max_req_queues=%d msix_count=%d.\n",
+ ha->nx_pcibase, ha->iobase,
+ ha->max_req_queues, ha->msix_count);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
+ "nx_pci_base=%p iobase=%p "
+ "max_req_queues=%d msix_count=%d.\n",
+ ha->nx_pcibase, ha->iobase,
+ ha->max_req_queues, ha->msix_count);
return 0;
iospace_error_exit:
@@ -1712,6 +1736,9 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
pci_set_master(ha->pdev);
ret = pci_set_mwi(ha->pdev);
ha->chip_revision = ha->pdev->revision;
+ ql_dbg(ql_dbg_init, vha, 0x0043,
+ "Chip revision:%ld.\n",
+ ha->chip_revision);
return 0;
}
@@ -1877,6 +1904,7 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
{
u32 val = 0;
int retries = 60;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
do {
read_lock(&ha->hw_lock);
@@ -1892,15 +1920,15 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
default:
break;
}
- qla_printk(KERN_WARNING, ha,
- "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
- val, retries);
+ ql_log(ql_log_info, vha, 0x00a8,
+ "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n",
+ val, retries);
msleep(500);
} while (--retries);
- qla_printk(KERN_INFO, ha,
+ ql_log(ql_log_fatal, vha, 0x00a9,
"Cmd Peg initialization failed: 0x%x.\n", val);
val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
@@ -1915,6 +1943,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
{
u32 val = 0;
int retries = 60;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
do {
read_lock(&ha->hw_lock);
@@ -1930,17 +1959,16 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
default:
break;
}
-
- qla_printk(KERN_WARNING, ha,
- "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
- val, retries);
+ ql_log(ql_log_info, vha, 0x00ab,
+ "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n",
+ val, retries);
msleep(500);
} while (--retries);
- qla_printk(KERN_INFO, ha,
- "Rcv Peg initialization failed: 0x%x.\n", val);
+ ql_log(ql_log_fatal, vha, 0x00ac,
+ "Rcv Peg initializatin failed: 0x%x.\n", val);
read_lock(&ha->hw_lock);
qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
read_unlock(&ha->hw_lock);
@@ -1989,13 +2017,11 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
}
if (ha->mcp) {
- DEBUG3_11(printk(KERN_INFO "%s(%ld): "
- "Got mailbox completion. cmd=%x.\n",
- __func__, vha->host_no, ha->mcp->mb[0]));
+ ql_dbg(ql_dbg_async, vha, 0x5052,
+ "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
} else {
- qla_printk(KERN_INFO, ha,
- "%s(%ld): MBX pointer ERROR!\n",
- __func__, vha->host_no);
+ ql_dbg(ql_dbg_async, vha, 0x5053,
+ "MBX pointer ERROR.\n");
}
}
@@ -2019,13 +2045,13 @@ qla82xx_intr_handler(int irq, void *dev_id)
int status = 0, status1 = 0;
unsigned long flags;
unsigned long iter;
- uint32_t stat;
+ uint32_t stat = 0;
uint16_t mb[4];
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2075,9 +2101,9 @@ qla82xx_intr_handler(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
default:
- DEBUG2(printk("scsi(%ld): "
- " Unrecognized interrupt type (%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_async, vha, 0x5054,
+ "Unrecognized interrupt type (%d).\n",
+ stat & 0xff);
break;
}
}
@@ -2089,8 +2115,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
#ifdef QL_DEBUG_LEVEL_17
if (!irq && ha->flags.eeh_busy)
- qla_printk(KERN_WARNING, ha,
- "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
+ ql_log(ql_log_warn, vha, 0x503d,
+ "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
#endif
@@ -2111,13 +2137,13 @@ qla82xx_msix_default(int irq, void *dev_id)
struct device_reg_82xx __iomem *reg;
int status = 0;
unsigned long flags;
- uint32_t stat;
+ uint32_t stat = 0;
uint16_t mb[4];
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
@@ -2149,9 +2175,9 @@ qla82xx_msix_default(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
default:
- DEBUG2(printk("scsi(%ld): "
- " Unrecognized interrupt type (%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_async, vha, 0x5041,
+ "Unrecognized interrupt type (%d).\n",
+ stat & 0xff);
break;
}
}
@@ -2162,9 +2188,9 @@ qla82xx_msix_default(int irq, void *dev_id)
#ifdef QL_DEBUG_LEVEL_17
if (!irq && ha->flags.eeh_busy)
- qla_printk(KERN_WARNING, ha,
- "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
- status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
+ ql_log(ql_log_warn, vha, 0x5044,
+ "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
+ status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
#endif
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
@@ -2186,7 +2212,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
@@ -2215,7 +2241,7 @@ qla82xx_poll(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
- "%s(): NULL response queue pointer\n", __func__);
+ "%s(): NULL response queue pointer.\n", __func__);
return;
}
ha = rsp->hw;
@@ -2245,9 +2271,9 @@ qla82xx_poll(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
default:
- DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
- "(%d).\n",
- vha->host_no, stat & 0xff));
+ ql_dbg(ql_dbg_p3p, vha, 0xb013,
+ "Unrecognized interrupt type (%d).\n",
+ stat * 0xff);
break;
}
}
@@ -2347,9 +2373,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
}
drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
- qla_printk(KERN_INFO, ha,
- "%s(%ld):drv_state = 0x%x\n",
- __func__, vha->host_no, drv_state);
+ ql_log(ql_log_info, vha, 0x00bb,
+ "drv_state = 0x%x.\n", drv_state);
qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
}
@@ -2392,8 +2417,8 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "%s: Error during CRB Initialization\n", __func__);
+ ql_log(ql_log_fatal, vha, 0x009f,
+ "Error during CRB initialization.\n");
return QLA_FUNCTION_FAILED;
}
udelay(500);
@@ -2411,27 +2436,27 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
if (ql2xfwloadbin == 2)
goto try_blob_fw;
- qla_printk(KERN_INFO, ha,
- "Attempting to load firmware from flash\n");
+ ql_log(ql_log_info, vha, 0x00a0,
+ "Attempting to load firmware from flash.\n");
if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "Firmware loaded successfully from flash\n");
+ ql_log(ql_log_info, vha, 0x00a1,
+ "Firmware loaded successully from flash.\n");
return QLA_SUCCESS;
} else {
- qla_printk(KERN_ERR, ha,
- "Firmware load from flash failed\n");
+ ql_log(ql_log_warn, vha, 0x0108,
+ "Firmware load from flash failed.\n");
}
try_blob_fw:
- qla_printk(KERN_INFO, ha,
- "Attempting to load firmware from blob\n");
+ ql_log(ql_log_info, vha, 0x00a2,
+ "Attempting to load firmware from blob.\n");
/* Load firmware blob. */
blob = ha->hablob = qla2x00_request_firmware(vha);
if (!blob) {
- qla_printk(KERN_ERR, ha,
- "Firmware image not present.\n");
+ ql_log(ql_log_fatal, vha, 0x00a3,
+ "Firmware image not preset.\n");
goto fw_load_failed;
}
@@ -2441,20 +2466,19 @@ try_blob_fw:
/* Fallback to URI format */
if (qla82xx_validate_firmware_blob(vha,
QLA82XX_UNIFIED_ROMIMAGE)) {
- qla_printk(KERN_ERR, ha,
- "No valid firmware image found!!!");
+ ql_log(ql_log_fatal, vha, 0x00a4,
+ "No valid firmware image found.\n");
return QLA_FUNCTION_FAILED;
}
}
if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "%s: Firmware loaded successfully "
- " from binary blob\n", __func__);
+ ql_log(ql_log_info, vha, 0x00a5,
+ "Firmware loaded successfully from binary blob.\n");
return QLA_SUCCESS;
} else {
- qla_printk(KERN_ERR, ha,
- "Firmware load failed from binary blob\n");
+ ql_log(ql_log_fatal, vha, 0x00a6,
+ "Firmware load failed for binary blob.\n");
blob->fw = NULL;
blob = NULL;
goto fw_load_failed;
@@ -2486,15 +2510,15 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
- qla_printk(KERN_INFO, ha,
- "%s: Error trying to start fw!\n", __func__);
+ ql_log(ql_log_fatal, vha, 0x00a7,
+ "Error trying to start fw.\n");
return QLA_FUNCTION_FAILED;
}
/* Handshake with the card before we register the devices. */
if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
- qla_printk(KERN_INFO, ha,
- "%s: Error during card handshake!\n", __func__);
+ ql_log(ql_log_fatal, vha, 0x00aa,
+ "Error during card handshake.\n");
return QLA_FUNCTION_FAILED;
}
@@ -2663,8 +2687,11 @@ qla82xx_start_scsi(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
if (qla2x00_marker(vha, req,
- rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x300c,
+ "qla2x00_marker failed for cmd=%p.\n", cmd);
return QLA_FUNCTION_FAILED;
+ }
vha->marker_needed = 0;
}
@@ -2701,8 +2728,13 @@ qla82xx_start_scsi(srb_t *sp)
uint16_t i;
more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
- if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
+ if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+ ql_dbg(ql_dbg_io, vha, 0x300d,
+ "Num of DSD list %d is than %d for cmd=%p.\n",
+ more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+ cmd);
goto queuing_error;
+ }
if (more_dsd_lists <= ha->gbl_dsd_avail)
goto sufficient_dsds;
@@ -2711,13 +2743,20 @@ qla82xx_start_scsi(srb_t *sp)
for (i = 0; i < more_dsd_lists; i++) {
dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
- if (!dsd_ptr)
+ if (!dsd_ptr) {
+ ql_log(ql_log_fatal, vha, 0x300e,
+ "Failed to allocate memory for dsd_dma "
+ "for cmd=%p.\n", cmd);
goto queuing_error;
+ }
dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
if (!dsd_ptr->dsd_addr) {
kfree(dsd_ptr);
+ ql_log(ql_log_fatal, vha, 0x300f,
+ "Failed to allocate memory for dsd_addr "
+ "for cmd=%p.\n", cmd);
goto queuing_error;
}
list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
@@ -2742,17 +2781,16 @@ sufficient_dsds:
ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!sp->ctx) {
- DEBUG(printk(KERN_INFO
- "%s(%ld): failed to allocate"
- " ctx.\n", __func__, vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x3010,
+ "Failed to allocate ctx for cmd=%p.\n", cmd);
goto queuing_error;
}
memset(ctx, 0, sizeof(struct ct6_dsd));
ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
GFP_ATOMIC, &ctx->fcp_cmnd_dma);
if (!ctx->fcp_cmnd) {
- DEBUG2_3(printk("%s(%ld): failed to allocate"
- " fcp_cmnd.\n", __func__, vha->host_no));
+ ql_log(ql_log_fatal, vha, 0x3011,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
goto queuing_error_fcp_cmnd;
}
@@ -2766,6 +2804,9 @@ sufficient_dsds:
/* SCSI command bigger than 16 bytes must be
* multiple of 4
*/
+ ql_log(ql_log_warn, vha, 0x3012,
+ "scsi cmd len %d not multiple of 4 "
+ "for cmd=%p.\n", cmd->cmd_len, cmd);
goto queuing_error_fcp_cmnd;
}
ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
@@ -2845,7 +2886,7 @@ sufficient_dsds:
cmd_pkt->entry_status = (uint8_t) rsp->id;
} else {
struct cmd_type_7 *cmd_pkt;
- req_cnt = qla24xx_calc_iocbs(tot_dsds);
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = (uint16_t)RD_REG_DWORD_RELAXED(
&reg->req_q_out[0]);
@@ -2979,8 +3020,8 @@ qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
/* Dword reads to flash. */
for (i = 0; i < length/4; i++, faddr += 4) {
if (qla82xx_rom_fast_read(ha, faddr, &val)) {
- qla_printk(KERN_WARNING, ha,
- "Do ROM fast read failed\n");
+ ql_log(ql_log_warn, vha, 0x0106,
+ "Do ROM fast read failed.\n");
goto done_read;
}
dwptr[i] = __constant_cpu_to_le32(val);
@@ -2994,10 +3035,12 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
{
int ret;
uint32_t val;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ret = ql82xx_rom_lock_d(ha);
if (ret < 0) {
- qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ ql_log(ql_log_warn, vha, 0xb014,
+ "ROM Lock failed.\n");
return ret;
}
@@ -3013,7 +3056,8 @@ qla82xx_unprotect_flash(struct qla_hw_data *ha)
}
if (qla82xx_write_disable_flash(ha) != 0)
- qla_printk(KERN_WARNING, ha, "Write disable failed\n");
+ ql_log(ql_log_warn, vha, 0xb015,
+ "Write disable failed.\n");
done_unprotect:
qla82xx_rom_unlock(ha);
@@ -3025,10 +3069,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
{
int ret;
uint32_t val;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ret = ql82xx_rom_lock_d(ha);
if (ret < 0) {
- qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ ql_log(ql_log_warn, vha, 0xb016,
+ "ROM Lock failed.\n");
return ret;
}
@@ -3040,10 +3086,12 @@ qla82xx_protect_flash(struct qla_hw_data *ha)
/* LOCK all sectors */
ret = qla82xx_write_status_reg(ha, val);
if (ret < 0)
- qla_printk(KERN_WARNING, ha, "Write status register failed\n");
+ ql_log(ql_log_warn, vha, 0xb017,
+ "Write status register failed.\n");
if (qla82xx_write_disable_flash(ha) != 0)
- qla_printk(KERN_WARNING, ha, "Write disable failed\n");
+ ql_log(ql_log_warn, vha, 0xb018,
+ "Write disable failed.\n");
done_protect:
qla82xx_rom_unlock(ha);
return ret;
@@ -3053,10 +3101,12 @@ static int
qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
{
int ret = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ret = ql82xx_rom_lock_d(ha);
if (ret < 0) {
- qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
+ ql_log(ql_log_warn, vha, 0xb019,
+ "ROM Lock failed.\n");
return ret;
}
@@ -3066,8 +3116,8 @@ qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
if (qla82xx_wait_rom_done(ha)) {
- qla_printk(KERN_WARNING, ha,
- "Error waiting for rom done\n");
+ ql_log(ql_log_warn, vha, 0xb01a,
+ "Error waiting for rom done.\n");
ret = -1;
goto done;
}
@@ -3110,10 +3160,10 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
- qla_printk(KERN_DEBUG, ha,
- "Unable to allocate memory for optrom "
- "burst write (%x KB).\n",
- OPTROM_BURST_SIZE / 1024);
+ ql_log(ql_log_warn, vha, 0xb01b,
+ "Unable to allocate memory "
+ "for optron burst write (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
}
}
@@ -3122,8 +3172,8 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
ret = qla82xx_unprotect_flash(ha);
if (ret) {
- qla_printk(KERN_WARNING, ha,
- "Unable to unprotect flash for update.\n");
+ ql_log(ql_log_warn, vha, 0xb01c,
+ "Unable to unprotect flash for update.\n");
goto write_done;
}
@@ -3133,9 +3183,9 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
ret = qla82xx_erase_sector(ha, faddr);
if (ret) {
- DEBUG9(qla_printk(KERN_ERR, ha,
- "Unable to erase sector: "
- "address=%x.\n", faddr));
+ ql_log(ql_log_warn, vha, 0xb01d,
+ "Unable to erase sector: address=%x.\n",
+ faddr);
break;
}
}
@@ -3149,12 +3199,12 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
(ha->flash_data_off | faddr),
OPTROM_BURST_DWORDS);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0xb01e,
"Unable to burst-write optrom segment "
"(%x/%x/%llx).\n", ret,
(ha->flash_data_off | faddr),
(unsigned long long)optrom_dma);
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0xb01f,
"Reverting to slow-write.\n");
dma_free_coherent(&ha->pdev->dev,
@@ -3171,16 +3221,16 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
ret = qla82xx_write_flash_dword(ha, faddr,
cpu_to_le32(*dwptr));
if (ret) {
- DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
- "flash address=%x data=%x.\n", __func__,
- ha->host_no, faddr, *dwptr));
+ ql_dbg(ql_dbg_p3p, vha, 0xb020,
+ "Unable to program flash address=%x data=%x.\n",
+ faddr, *dwptr);
break;
}
}
ret = qla82xx_protect_flash(ha);
if (ret)
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0xb021,
"Unable to protect flash after update.\n");
write_done:
if (optrom)
@@ -3244,9 +3294,12 @@ qla82xx_start_iocbs(srb_t *sp)
void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
if (qla82xx_rom_lock(ha))
/* Someone else is holding the lock. */
- qla_printk(KERN_INFO, ha, "Resetting rom_lock\n");
+ ql_log(ql_log_info, vha, 0xb022,
+ "Resetting rom_lock.\n");
/*
* Either we got the lock, or someone
@@ -3313,7 +3366,8 @@ qla82xx_device_bootstrap(scsi_qla_host_t *vha)
dev_initialize:
/* set to DEV_INITIALIZING */
- qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
+ ql_log(ql_log_info, vha, 0x009e,
+ "HW State: INITIALIZING.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
/* Driver that sets device state to initializating sets IDC version */
@@ -3324,14 +3378,16 @@ dev_initialize:
qla82xx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ ql_log(ql_log_fatal, vha, 0x00ad,
+ "HW State: FAILED.\n");
qla82xx_clear_drv_active(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
return rval;
}
dev_ready:
- qla_printk(KERN_INFO, ha, "HW State: READY\n");
+ ql_log(ql_log_info, vha, 0x00ae,
+ "HW State: READY.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
return QLA_SUCCESS;
@@ -3376,15 +3432,15 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
/* quiescence timeout, other functions didn't ack
* changing the state to DEV_READY
*/
- qla_printk(KERN_INFO, ha,
- "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME);
- qla_printk(KERN_INFO, ha,
- "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active,
- drv_state);
+ ql_log(ql_log_info, vha, 0xb023,
+ "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_info, vha, 0xb024,
+ "DRV_ACTIVE:%d DRV_STATE:%d.\n",
+ drv_active, drv_state);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA82XX_DEV_READY);
- qla_printk(KERN_INFO, ha,
- "HW State: DEV_READY\n");
+ QLA82XX_DEV_READY);
+ ql_log(ql_log_info, vha, 0xb025,
+ "HW State: DEV_READY.\n");
qla82xx_idc_unlock(ha);
qla2x00_perform_loop_resync(vha);
qla82xx_idc_lock(ha);
@@ -3404,7 +3460,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
/* everyone acked so set the state to DEV_QUIESCENCE */
if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
- qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n");
+ ql_log(ql_log_info, vha, 0xb026,
+ "HW State: DEV_QUIESCENT.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
}
}
@@ -3441,7 +3498,8 @@ qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
/* Disable the board */
- qla_printk(KERN_INFO, ha, "Disabling the board\n");
+ ql_log(ql_log_fatal, vha, 0x00b8,
+ "Disabling the board.\n");
qla82xx_idc_lock(ha);
qla82xx_clear_drv_active(ha);
@@ -3492,8 +3550,8 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
while (drv_state != drv_active) {
if (time_after_eq(jiffies, reset_timeout)) {
- qla_printk(KERN_INFO, ha,
- "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
+ ql_log(ql_log_warn, vha, 0x00b5,
+ "Reset timeout.\n");
break;
}
qla82xx_idc_unlock(ha);
@@ -3504,12 +3562,15 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
}
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ ql_log(ql_log_info, vha, 0x00b6,
+ "Device state is 0x%x = %s.\n",
+ dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
/* Force to DEV_COLD unless someone else is starting a reset */
if (dev_state != QLA82XX_DEV_INITIALIZING) {
- qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
+ ql_log(ql_log_info, vha, 0x00b7,
+ "HW State: COLD/RE-INIT.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
}
}
@@ -3523,8 +3584,12 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
QLA82XX_PEG_ALIVE_COUNTER);
/* all 0xff, assume AER/EEH in progress, ignore */
- if (fw_heartbeat_counter == 0xffffffff)
+ if (fw_heartbeat_counter == 0xffffffff) {
+ ql_dbg(ql_dbg_timer, vha, 0x6003,
+ "FW heartbeat counter is 0xffffffff, "
+ "returning status=%d.\n", status);
return status;
+ }
if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
vha->seconds_since_last_heartbeat++;
/* FW not alive after 2 seconds */
@@ -3535,6 +3600,9 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
} else
vha->seconds_since_last_heartbeat = 0;
vha->fw_heartbeat_counter = fw_heartbeat_counter;
+ if (status)
+ ql_dbg(ql_dbg_timer, vha, 0x6004,
+ "Returning status=%d.\n", status);
return status;
}
@@ -3565,8 +3633,10 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
old_dev_state = dev_state;
- qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ ql_log(ql_log_info, vha, 0x009b,
+ "Device state is 0x%x = %s.\n",
+ dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -3574,9 +3644,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
while (1) {
if (time_after_eq(jiffies, dev_init_timeout)) {
- DEBUG(qla_printk(KERN_INFO, ha,
- "%s: device init failed!\n",
- QLA2XXX_DRIVER_NAME));
+ ql_log(ql_log_fatal, vha, 0x009c,
+ "Device init failed.\n");
rval = QLA_FUNCTION_FAILED;
break;
}
@@ -3586,10 +3655,11 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
old_dev_state = dev_state;
}
if (loopcount < 5) {
- qla_printk(KERN_INFO, ha,
- "2:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ?
- qdev_state[dev_state] : "Unknown");
+ ql_log(ql_log_info, vha, 0x009d,
+ "Device state is 0x%x = %s.\n",
+ dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] :
+ "Unknown");
}
switch (dev_state) {
@@ -3656,29 +3726,26 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
- qla_printk(KERN_WARNING, ha,
- "scsi(%ld) %s: Adapter reset needed!\n",
- vha->host_no, __func__);
+ ql_log(ql_log_warn, vha, 0x6001,
+ "Adapter reset needed.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
- DEBUG(qla_printk(KERN_INFO, ha,
- "scsi(%ld) %s - detected quiescence needed\n",
- vha->host_no, __func__));
+ ql_log(ql_log_warn, vha, 0x6002,
+ "Quiescent needed.\n");
set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
halt_status = qla82xx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): %s, Dumping hw/fw registers:\n "
- " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n "
- " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n "
- " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n "
- " PEG_NET_4_PC: 0x%x\n",
- vha->host_no, __func__, halt_status,
+ ql_dbg(ql_dbg_timer, vha, 0x6005,
+ "dumping hw/fw registers:.\n "
+ " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
+ " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
+ " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n "
+ " PEG_NET_4_PC: 0x%x.\n", halt_status,
qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2),
qla82xx_rd_32(ha,
QLA82XX_CRB_PEG_NET_0 + 0x3c),
@@ -3694,9 +3761,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
set_bit(ISP_UNRECOVERABLE,
&vha->dpc_flags);
} else {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): %s - detect abort needed\n",
- vha->host_no, __func__);
+ ql_log(ql_log_info, vha, 0x6006,
+ "Detect abort needed.\n");
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
}
@@ -3704,10 +3770,10 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
ha->flags.isp82xx_fw_hung = 1;
if (ha->flags.mbox_busy) {
ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "scsi(%ld) Due to fw hung, doing "
+ ql_log(ql_log_warn, vha, 0x6007,
+ "Due to FW hung, doing "
"premature completion of mbx "
- "command\n", vha->host_no));
+ "command.\n");
if (test_bit(MBX_INTR_WAIT,
&ha->mbx_cmd_flags))
complete(&ha->mbx_intr_comp);
@@ -3742,9 +3808,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
uint32_t dev_state;
if (vha->device_flags & DFLG_DEV_FAILED) {
- qla_printk(KERN_WARNING, ha,
- "%s(%ld): Device in failed state, "
- "Exiting.\n", __func__, vha->host_no);
+ ql_log(ql_log_warn, vha, 0x8024,
+ "Device in failed state, exiting.\n");
return QLA_SUCCESS;
}
ha->flags.isp82xx_reset_hdlr_active = 1;
@@ -3752,13 +3817,14 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
qla82xx_idc_lock(ha);
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
if (dev_state == QLA82XX_DEV_READY) {
- qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
+ ql_log(ql_log_info, vha, 0x8025,
+ "HW State: NEED RESET.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_NEED_RESET);
} else
- qla_printk(KERN_INFO, ha, "HW State: %s\n",
- dev_state < MAX_STATES ?
- qdev_state[dev_state] : "Unknown");
+ ql_log(ql_log_info, vha, 0x8026,
+ "Hw State: %s.\n", dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
qla82xx_idc_unlock(ha);
rval = qla82xx_device_state_handler(vha);
@@ -3777,9 +3843,9 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
vha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
if (ha->isp_abort_cnt == 0) {
- qla_printk(KERN_WARNING, ha,
- "ISP error recovery failed - "
- "board disabled\n");
+ ql_log(ql_log_warn, vha, 0x8027,
+ "ISP error recover failed - board "
+ "disabled.\n");
/*
* The next call disables the board
* completely.
@@ -3791,16 +3857,16 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
rval = QLA_SUCCESS;
} else { /* schedule another ISP abort */
ha->isp_abort_cnt--;
- DEBUG(qla_printk(KERN_INFO, ha,
- "qla%ld: ISP abort - retry remaining %d\n",
- vha->host_no, ha->isp_abort_cnt));
+ ql_log(ql_log_warn, vha, 0x8036,
+ "ISP abort - retry remaining %d.\n",
+ ha->isp_abort_cnt);
rval = QLA_FUNCTION_FAILED;
}
} else {
ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
- DEBUG(qla_printk(KERN_INFO, ha,
- "(%ld): ISP error recovery - retrying (%d) "
- "more times\n", vha->host_no, ha->isp_abort_cnt));
+ ql_dbg(ql_dbg_taskm, vha, 0x8029,
+ "ISP error recovery - retrying (%d) more times.\n",
+ ha->isp_abort_cnt);
set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
rval = QLA_FUNCTION_FAILED;
}
@@ -3872,8 +3938,8 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
break;
}
}
- DEBUG2(printk(KERN_INFO
- "%s status=%d\n", __func__, status));
+ ql_dbg(ql_dbg_p3p, vha, 0xb027,
+ "%s status=%d.\n", status);
return status;
}
@@ -3902,6 +3968,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
}
}
}
+ ql_dbg(ql_dbg_init, vha, 0x00b0,
+ "Entered %s fw_hung=%d.\n",
+ __func__, ha->flags.isp82xx_fw_hung);
/* Abort all commands gracefully if fw NOT hung */
if (!ha->flags.isp82xx_fw_hung) {
@@ -3922,13 +3991,13 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
spin_unlock_irqrestore(
&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx abort command failed in %s\n",
- vha->host_no, __func__);
+ ql_log(ql_log_info, vha,
+ 0x00b1,
+ "mbx abort failed.\n");
} else {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): mbx abort command success in %s\n",
- vha->host_no, __func__);
+ ql_log(ql_log_info, vha,
+ 0x00b2,
+ "mbx abort success.\n");
}
spin_lock_irqsave(&ha->hardware_lock, flags);
}
@@ -3940,8 +4009,9 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
/* Wait for pending cmds (physical and virtual) to complete */
if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
WAIT_HOST) == QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Done wait for pending commands\n"));
+ ql_dbg(ql_dbg_init, vha, 0x00b3,
+ "Done wait for "
+ "pending commands.\n");
}
}
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f461925a9df..e02df276804 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -35,6 +35,10 @@ static struct kmem_cache *srb_cachep;
* CT6 CTX allocation cache
*/
static struct kmem_cache *ctx_cachep;
+/*
+ * error level for logging
+ */
+int ql_errlev = ql_log_all;
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO);
@@ -69,8 +73,17 @@ MODULE_PARM_DESC(ql2xallocfwdump,
int ql2xextended_error_logging;
module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xextended_error_logging,
- "Option to enable extended error logging, "
- "Default is 0 - no logging. 1 - log errors.");
+ "Option to enable extended error logging,\n"
+ "\t\tDefault is 0 - no logging. 0x40000000 - Module Init & Probe.\n"
+ "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
+ "\t\t0x08000000 - IO tracing. 0x04000000 - DPC Thread.\n"
+ "\t\t0x02000000 - Async events. 0x01000000 - Timer routines.\n"
+ "\t\t0x00800000 - User space. 0x00400000 - Task Management.\n"
+ "\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
+ "\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
+ "\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
+ "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
+ "\t\tDo LOGICAL OR of the value to enable more than one level");
int ql2xshiftctondsd = 6;
module_param(ql2xshiftctondsd, int, S_IRUGO);
@@ -128,8 +141,8 @@ MODULE_PARM_DESC(ql2xmultique_tag,
int ql2xfwloadbin;
module_param(ql2xfwloadbin, int, S_IRUGO);
MODULE_PARM_DESC(ql2xfwloadbin,
- "Option to specify location from which to load ISP firmware:\n"
- " 2 -- load firmware via the request_firmware() (hotplug)\n"
+ "Option to specify location from which to load ISP firmware:.\n"
+ " 2 -- load firmware via the request_firmware() (hotplug).\n"
" interface.\n"
" 1 -- load firmware from flash.\n"
" 0 -- use default semantics.\n");
@@ -143,7 +156,7 @@ MODULE_PARM_DESC(ql2xetsenable,
int ql2xdbwr = 1;
module_param(ql2xdbwr, int, S_IRUGO);
MODULE_PARM_DESC(ql2xdbwr,
- "Option to specify scheme for request queue posting\n"
+ "Option to specify scheme for request queue posting.\n"
" 0 -- Regular doorbell.\n"
" 1 -- CAMRAM doorbell (faster).\n");
@@ -168,7 +181,7 @@ MODULE_PARM_DESC(ql2xasynctmfenable,
int ql2xdontresethba;
module_param(ql2xdontresethba, int, S_IRUGO);
MODULE_PARM_DESC(ql2xdontresethba,
- "Option to specify reset behaviour\n"
+ "Option to specify reset behaviour.\n"
" 0 (Default) -- Reset on failure.\n"
" 1 -- Do not reset on failure.\n");
@@ -247,8 +260,11 @@ static inline void
qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
{
/* Currently used for 82XX only. */
- if (vha->device_flags & DFLG_DEV_FAILED)
+ if (vha->device_flags & DFLG_DEV_FAILED) {
+ ql_dbg(ql_dbg_timer, vha, 0x600d,
+ "Device in a failed state, returning.\n");
return;
+ }
mod_timer(&vha->timer, jiffies + interval * HZ);
}
@@ -273,19 +289,20 @@ static void qla2x00_sp_free_dma(srb_t *);
/* -------------------------------------------------------------------------- */
static int qla2x00_alloc_queues(struct qla_hw_data *ha)
{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
GFP_KERNEL);
if (!ha->req_q_map) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for request queue ptrs\n");
+ ql_log(ql_log_fatal, vha, 0x003b,
+ "Unable to allocate memory for request queue ptrs.\n");
goto fail_req_map;
}
ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
GFP_KERNEL);
if (!ha->rsp_q_map) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for response queue ptrs\n");
+ ql_log(ql_log_fatal, vha, 0x003c,
+ "Unable to allocate memory for response queue ptrs.\n");
goto fail_rsp_map;
}
set_bit(0, ha->rsp_qid_map);
@@ -349,8 +366,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
if (!(ha->fw_attributes & BIT_6)) {
- qla_printk(KERN_INFO, ha,
- "Firmware is not multi-queue capable\n");
+ ql_log(ql_log_warn, vha, 0x00d8,
+ "Firmware is not multi-queue capable.\n");
goto fail;
}
if (ql2xmultique_tag) {
@@ -359,8 +376,8 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
req = qla25xx_create_req_que(ha, options, 0, 0, -1,
QLA_DEFAULT_QUE_QOS);
if (!req) {
- qla_printk(KERN_WARNING, ha,
- "Can't create request queue\n");
+ ql_log(ql_log_warn, vha, 0x00e0,
+ "Failed to create request queue.\n");
goto fail;
}
ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
@@ -369,17 +386,20 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
for (ques = 1; ques < ha->max_rsp_queues; ques++) {
ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
if (!ret) {
- qla_printk(KERN_WARNING, ha,
- "Response Queue create failed\n");
+ ql_log(ql_log_warn, vha, 0x00e8,
+ "Failed to create response queue.\n");
goto fail2;
}
}
ha->flags.cpu_affinity_enabled = 1;
-
- DEBUG2(qla_printk(KERN_INFO, ha,
- "CPU affinity mode enabled, no. of response"
- " queues:%d, no. of request queues:%d\n",
- ha->max_rsp_queues, ha->max_req_queues));
+ ql_dbg(ql_dbg_multiq, vha, 0xc007,
+ "CPU affinity mode enalbed, "
+ "no. of response queues:%d no. of request queues:%d.\n",
+ ha->max_rsp_queues, ha->max_req_queues);
+ ql_dbg(ql_dbg_init, vha, 0x00e9,
+ "CPU affinity mode enalbed, "
+ "no. of response queues:%d no. of request queues:%d.\n",
+ ha->max_rsp_queues, ha->max_req_queues);
}
return 0;
fail2:
@@ -526,8 +546,11 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
struct qla_hw_data *ha = vha->hw;
sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
- if (!sp)
+ if (!sp) {
+ ql_log(ql_log_warn, vha, 0x3006,
+ "Memory allocation failed for sp.\n");
return sp;
+ }
atomic_set(&sp->ref_count, 1);
sp->fcport = fcport;
@@ -551,30 +574,43 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int rval;
if (ha->flags.eeh_busy) {
- if (ha->flags.pci_channel_io_perm_failure)
+ if (ha->flags.pci_channel_io_perm_failure) {
+ ql_dbg(ql_dbg_io, vha, 0x3001,
+ "PCI Channel IO permanent failure, exiting "
+ "cmd=%p.\n", cmd);
cmd->result = DID_NO_CONNECT << 16;
- else
+ } else {
+ ql_dbg(ql_dbg_io, vha, 0x3002,
+ "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
cmd->result = DID_REQUEUE << 16;
+ }
goto qc24_fail_command;
}
rval = fc_remote_port_chkready(rport);
if (rval) {
cmd->result = rval;
+ ql_dbg(ql_dbg_io, vha, 0x3003,
+ "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
+ cmd, rval);
goto qc24_fail_command;
}
if (!vha->flags.difdix_supported &&
scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
- DEBUG2(qla_printk(KERN_ERR, ha,
- "DIF Cap Not Reg, fail DIF capable cmd's:%x\n",
- cmd->cmnd[0]));
+ ql_dbg(ql_dbg_io, vha, 0x3004,
+ "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
+ cmd);
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+ ql_dbg(ql_dbg_io, vha, 0x3005,
+ "Returning DNC, fcport_state=%d loop_state=%d.\n",
+ atomic_read(&fcport->state),
+ atomic_read(&base_vha->loop_state));
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
@@ -586,8 +622,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_host_busy;
rval = ha->isp_ops->start_scsi(sp);
- if (rval != QLA_SUCCESS)
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_io, vha, 0x3013,
+ "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
goto qc24_host_busy_free_sp;
+ }
return 0;
@@ -630,7 +669,8 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
int ret = QLA_SUCCESS;
if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
- DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
+ ql_dbg(ql_dbg_taskm, vha, 0x8005,
+ "Return:eh_wait.\n");
return ret;
}
@@ -723,7 +763,8 @@ qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
else
return_status = QLA_FUNCTION_FAILED;
- DEBUG2(printk("%s return_status=%d\n", __func__, return_status));
+ ql_dbg(ql_dbg_taskm, vha, 0x8019,
+ "%s return status=%d.\n", __func__, return_status);
return return_status;
}
@@ -831,10 +872,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
int wait = 0;
struct qla_hw_data *ha = vha->hw;
+ ql_dbg(ql_dbg_taskm, vha, 0x8000,
+ "Entered %s for cmd=%p.\n", __func__, cmd);
if (!CMD_SP(cmd))
return SUCCESS;
ret = fc_block_scsi_eh(cmd);
+ ql_dbg(ql_dbg_taskm, vha, 0x8001,
+ "Return value of fc_block_scsi_eh=%d.\n", ret);
if (ret != 0)
return ret;
ret = SUCCESS;
@@ -849,20 +894,19 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return SUCCESS;
}
- DEBUG2(printk("%s(%ld): aborting sp %p from RISC.",
- __func__, vha->host_no, sp));
+ ql_dbg(ql_dbg_taskm, vha, 0x8002,
+ "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
/* Get a reference to the sp and drop the lock.*/
sp_get(sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
- DEBUG2(printk("%s(%ld): abort_command "
- "mbx failed.\n", __func__, vha->host_no));
- ret = FAILED;
+ ql_dbg(ql_dbg_taskm, vha, 0x8003,
+ "Abort command mbx failed for cmd=%p.\n", cmd);
} else {
- DEBUG3(printk("%s(%ld): abort_command "
- "mbx success.\n", __func__, vha->host_no));
+ ql_dbg(ql_dbg_taskm, vha, 0x8004,
+ "Abort command mbx success.\n");
wait = 1;
}
qla2x00_sp_compl(ha, sp);
@@ -870,16 +914,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
/* Wait for the command to be returned. */
if (wait) {
if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n",
- vha->host_no, id, lun, ret);
+ ql_log(ql_log_warn, vha, 0x8006,
+ "Abort handler timed out for cmd=%p.\n", cmd);
ret = FAILED;
}
}
- qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n",
- vha->host_no, id, lun, wait, ret);
+ ql_log(ql_log_info, vha, 0x801c,
+ "Abort command issued -- %d %x.\n", wait, ret);
return ret;
}
@@ -947,40 +989,59 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int err;
- if (!fcport)
+ if (!fcport) {
+ ql_log(ql_log_warn, vha, 0x8007,
+ "fcport is NULL.\n");
return FAILED;
+ }
err = fc_block_scsi_eh(cmd);
+ ql_dbg(ql_dbg_taskm, vha, 0x8008,
+ "fc_block_scsi_eh ret=%d.\n", err);
if (err != 0)
return err;
- qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
- vha->host_no, cmd->device->id, cmd->device->lun, name);
+ ql_log(ql_log_info, vha, 0x8009,
+ "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
+ cmd->device->id, cmd->device->lun, cmd);
err = 0;
- if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800a,
+ "Wait for hba online failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
+ }
err = 1;
- if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800b,
+ "Wait for loop ready failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
+ }
err = 2;
if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
- != QLA_SUCCESS)
+ != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800c,
+ "do_reset failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
+ }
err = 3;
if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
- cmd->device->lun, type) != QLA_SUCCESS)
+ cmd->device->lun, type) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x800d,
+ "wait for peding cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
+ }
- qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
- vha->host_no, cmd->device->id, cmd->device->lun, name);
+ ql_log(ql_log_info, vha, 0x800e,
+ "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
+ cmd->device->id, cmd->device->lun, cmd);
return SUCCESS;
eh_reset_failed:
- qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
- , vha->host_no, cmd->device->id, cmd->device->lun, name,
- reset_errors[err]);
+ ql_log(ql_log_info, vha, 0x800f,
+ "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
+ reset_errors[err], cmd->device->id, cmd->device->lun);
return FAILED;
}
@@ -1030,19 +1091,25 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
id = cmd->device->id;
lun = cmd->device->lun;
- if (!fcport)
+ if (!fcport) {
+ ql_log(ql_log_warn, vha, 0x8010,
+ "fcport is NULL.\n");
return ret;
+ }
ret = fc_block_scsi_eh(cmd);
+ ql_dbg(ql_dbg_taskm, vha, 0x8011,
+ "fc_block_scsi_eh ret=%d.\n", ret);
if (ret != 0)
return ret;
ret = FAILED;
- qla_printk(KERN_INFO, vha->hw,
- "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
+ ql_log(ql_log_info, vha, 0x8012,
+ "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
- DEBUG2(printk("%s failed:board disabled\n",__func__));
+ ql_log(ql_log_fatal, vha, 0x8013,
+ "Wait for hba online failed board disabled.\n");
goto eh_bus_reset_done;
}
@@ -1055,12 +1122,15 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
/* Flush outstanding commands. */
if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
- QLA_SUCCESS)
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8014,
+ "Wait for pending commands failed.\n");
ret = FAILED;
+ }
eh_bus_reset_done:
- qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
- (ret == FAILED) ? "failed" : "succeeded");
+ ql_log(ql_log_warn, vha, 0x802b,
+ "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
return ret;
}
@@ -1093,16 +1163,21 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
id = cmd->device->id;
lun = cmd->device->lun;
- if (!fcport)
+ if (!fcport) {
+ ql_log(ql_log_warn, vha, 0x8016,
+ "fcport is NULL.\n");
return ret;
+ }
ret = fc_block_scsi_eh(cmd);
+ ql_dbg(ql_dbg_taskm, vha, 0x8017,
+ "fc_block_scsi_eh ret=%d.\n", ret);
if (ret != 0)
return ret;
ret = FAILED;
- qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
+ ql_log(ql_log_info, vha, 0x8018,
+ "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
@@ -1137,8 +1212,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
/* failed. schedule dpc to try */
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
- if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
+ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x802a,
+ "wait for hba online failed.\n");
goto eh_host_reset_lock;
+ }
}
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
}
@@ -1149,7 +1227,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
ret = SUCCESS;
eh_host_reset_lock:
- qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
+ qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
(ret == FAILED) ? "failed" : "succeeded");
return ret;
@@ -1179,9 +1257,9 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
ret = ha->isp_ops->target_reset(fcport, 0, 0);
if (ret != QLA_SUCCESS) {
- DEBUG2_3(printk("%s(%ld): bus_reset failed: "
- "target_reset=%d d_id=%x.\n", __func__,
- vha->host_no, ret, fcport->d_id.b24));
+ ql_dbg(ql_dbg_taskm, vha, 0x802c,
+ "Bus Reset failed: Target Reset=%d "
+ "d_id=%x.\n", ret, fcport->d_id.b24);
}
}
}
@@ -1189,9 +1267,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) {
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
- DEBUG2_3(printk("%s(%ld): failed: "
- "full_login_lip=%d.\n", __func__, vha->host_no,
- ret));
+ ql_dbg(ql_dbg_taskm, vha, 0x802d,
+ "full_login_lip=%d.\n", ret);
}
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -1202,8 +1279,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
if (ha->flags.enable_lip_reset) {
ret = qla2x00_lip_reset(vha);
if (ret != QLA_SUCCESS) {
- DEBUG2_3(printk("%s(%ld): failed: "
- "lip_reset=%d.\n", __func__, vha->host_no, ret));
+ ql_dbg(ql_dbg_taskm, vha, 0x802e,
+ "lip_reset failed (%d).\n", ret);
} else
qla2x00_wait_for_loop_ready(vha);
}
@@ -1302,17 +1379,17 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
if (!scsi_track_queue_full(sdev, qdepth))
return;
- DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
- "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
- fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
- sdev->queue_depth));
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
+ "Queue depth adjusted-down "
+ "to %d for scsi(%ld:%d:%d:%d).\n",
+ sdev->queue_depth, fcport->vha->host_no,
+ sdev->channel, sdev->id, sdev->lun);
}
static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
{
fc_port_t *fcport = sdev->hostdata;
struct scsi_qla_host *vha = fcport->vha;
- struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
req = vha->req;
@@ -1327,10 +1404,11 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
else
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
- DEBUG2(qla_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
- fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
- sdev->queue_depth));
+ ql_dbg(ql_dbg_io, vha, 0x302a,
+ "Queue depth adjusted-up to %d for "
+ "scsi(%ld:%d:%d:%d).\n",
+ sdev->queue_depth, fcport->vha->host_no,
+ sdev->channel, sdev->id, sdev->lun);
}
static int
@@ -1776,6 +1854,9 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->flags.port0 = 1;
else
ha->flags.port0 = 0;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
+ "device_type=0x%x port=%d fw_srisc_address=%p.\n",
+ ha->device_type, ha->flags.port0, ha->fw_srisc_address);
}
static int
@@ -1790,10 +1871,9 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) {
- qla_printk(KERN_WARNING, ha,
- "Failed to reserve PIO/MMIO regions (%s)\n",
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
pci_name(ha->pdev));
-
goto iospace_error_exit;
}
if (!(ha->bars & 1))
@@ -1803,39 +1883,42 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
pio = pci_resource_start(ha->pdev, 0);
if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
- qla_printk(KERN_WARNING, ha,
- "Invalid PCI I/O region size (%s)...\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
pio = 0;
}
} else {
- qla_printk(KERN_WARNING, ha,
- "region #0 not a PIO resource (%s)...\n",
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
+ "Region #0 no a PIO resource (%s).\n",
pci_name(ha->pdev));
pio = 0;
}
ha->pio_address = pio;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
+ "PIO address=%p.\n",
+ ha->pio_address);
skip_pio:
/* Use MMIO operations for all accesses. */
if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
- qla_printk(KERN_ERR, ha,
- "region #1 not an MMIO resource (%s), aborting\n",
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
+ "Region #1 not an MMIO resource (%s), aborting.\n",
pci_name(ha->pdev));
goto iospace_error_exit;
}
if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
- qla_printk(KERN_ERR, ha,
- "Invalid PCI mem region size (%s), aborting\n",
- pci_name(ha->pdev));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
+ "Invalid PCI mem region size (%s), aborting.\n",
+ pci_name(ha->pdev));
goto iospace_error_exit;
}
ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
if (!ha->iobase) {
- qla_printk(KERN_ERR, ha,
- "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
-
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
+ "Cannot remap MMIO (%s), aborting.\n",
+ pci_name(ha->pdev));
goto iospace_error_exit;
}
@@ -1849,6 +1932,8 @@ skip_pio:
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
pci_resource_len(ha->pdev, 3));
if (ha->mqiobase) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
+ "MQIO Base=%p.\n", ha->mqiobase);
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
ha->msix_count = msix;
@@ -1861,17 +1946,24 @@ skip_pio:
ha->max_req_queues = 2;
} else if (ql2xmaxqueues > 1) {
ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
- " of request queues:%d\n", ha->max_req_queues));
+ QLA_MQ_SIZE : ql2xmaxqueues;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
}
- qla_printk(KERN_INFO, ha,
- "MSI-X vector count: %d\n", msix);
+ ql_log_pci(ql_log_info, ha->pdev, 0x001a,
+ "MSI-X vector count: %d.\n", msix);
} else
- qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
+ ql_log_pci(ql_log_info, ha->pdev, 0x001b,
+ "BAR 3 not enabled.\n");
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
+ "MSIX Count:%d.\n", ha->msix_count);
return (0);
iospace_error_exit:
@@ -1935,7 +2027,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
+ ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
+ "Mem only adapter.\n");
}
+ ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
+ "Bars=%d.\n", bars);
if (mem_only) {
if (pci_enable_device_mem(pdev))
@@ -1950,9 +2046,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
if (!ha) {
- DEBUG(printk("Unable to allocate memory for ha\n"));
+ ql_log_pci(ql_log_fatal, pdev, 0x0009,
+ "Unable to allocate memory for ha.\n");
goto probe_out;
}
+ ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
+ "Memory allocated for ha=%p.\n", ha);
ha->pdev = pdev;
/* Clear our data area */
@@ -1974,10 +2073,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto probe_hw_failed;
- qla_printk(KERN_INFO, ha,
- "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
- ha->iobase);
-
+ ql_log_pci(ql_log_info, pdev, 0x001d,
+ "Found an ISP%04X irq %d iobase 0x%p.\n",
+ pdev->device, pdev->irq, ha->iobase);
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2078,7 +2176,18 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
}
-
+ ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
+ "mbx_count=%d, req_length=%d, "
+ "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
+ "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n",
+ ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
+ ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
+ ha->nvram_npiv_size);
+ ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
+ "isp_ops=%p, flash_conf_off=%d, "
+ "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
+ ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
+ ha->nvram_conf_off, ha->nvram_data_off);
mutex_init(&ha->vport_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
@@ -2088,10 +2197,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
set_bit(0, (unsigned long *) ha->vp_idx_map);
qla2x00_config_dma_addressing(ha);
+ ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
+ "64 Bit addressing is %s.\n",
+ ha->flags.enable_64bit_addressing ? "enable" :
+ "disable");
ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
if (!ret) {
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to allocate memory for adapter\n");
+ ql_log_pci(ql_log_fatal, pdev, 0x0031,
+ "Failed to allocate memory for adapter, aborting.\n");
goto probe_hw_failed;
}
@@ -2103,9 +2216,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
base_vha = qla2x00_create_host(sht, ha);
if (!base_vha) {
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to allocate memory for scsi_host\n");
-
ret = -ENOMEM;
qla2x00_mem_free(ha);
qla2x00_free_req_que(ha, req);
@@ -2132,7 +2242,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!IS_QLA82XX(ha))
host->sg_tablesize = QLA_SG_ALL;
}
-
+ ql_dbg(ql_dbg_init, base_vha, 0x0032,
+ "can_queue=%d, req=%p, "
+ "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
+ host->can_queue, base_vha->req,
+ base_vha->mgmt_svr_loop_id, host->sg_tablesize);
host->max_id = max_id;
host->this_id = 255;
host->cmd_per_lun = 3;
@@ -2146,6 +2260,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->transportt = qla2xxx_transport_template;
sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
+ ql_dbg(ql_dbg_init, base_vha, 0x0033,
+ "max_id=%d this_id=%d "
+ "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
+ "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
+ host->this_id, host->cmd_per_lun, host->unique_id,
+ host->max_cmd_len, host->max_channel, host->max_lun,
+ host->transportt, sht->vendor_id);
+
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
@@ -2156,9 +2278,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Alloc arrays of request and response ring ptrs */
que_init:
if (!qla2x00_alloc_queues(ha)) {
- qla_printk(KERN_WARNING, ha,
- "[ERROR] Failed to allocate memory for queue"
- " pointers\n");
+ ql_log(ql_log_fatal, base_vha, 0x003d,
+ "Failed to allocate memory for queue pointers.. aborting.\n");
goto probe_init_failed;
}
@@ -2186,20 +2307,33 @@ que_init:
rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
}
- if (qla2x00_initialize_adapter(base_vha)) {
- qla_printk(KERN_WARNING, ha,
- "Failed to initialize adapter\n");
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
+ "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
+ ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
+ ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
+ "req->req_q_in=%p req->req_q_out=%p "
+ "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
+ req->req_q_in, req->req_q_out,
+ rsp->rsp_q_in, rsp->rsp_q_out);
+ ql_dbg(ql_dbg_init, base_vha, 0x003e,
+ "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
+ ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
+ ql_dbg(ql_dbg_init, base_vha, 0x003f,
+ "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
+ req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
- DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
- "Adapter flags %x.\n",
- base_vha->host_no, base_vha->device_flags));
+ if (qla2x00_initialize_adapter(base_vha)) {
+ ql_log(ql_log_fatal, base_vha, 0x00d6,
+ "Failed to initialize adapter - Adapter flags %x.\n",
+ base_vha->device_flags);
if (IS_QLA82XX(ha)) {
qla82xx_idc_lock(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
qla82xx_idc_unlock(ha);
- qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ ql_log(ql_log_fatal, base_vha, 0x00d7,
+ "HW State: FAILED.\n");
}
ret = -ENODEV;
@@ -2208,9 +2342,8 @@ que_init:
if (ha->mqenable) {
if (qla25xx_setup_mode(base_vha)) {
- qla_printk(KERN_WARNING, ha,
- "Can't create queues, falling back to single"
- " queue mode\n");
+ ql_log(ql_log_warn, base_vha, 0x00ec,
+ "Failed to create queues, falling back to single queue mode.\n");
goto que_init;
}
}
@@ -2222,13 +2355,15 @@ que_init:
* Startup the kernel thread for this host adapter
*/
ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
- "%s_dpc", base_vha->host_str);
+ "%s_dpc", base_vha->host_str);
if (IS_ERR(ha->dpc_thread)) {
- qla_printk(KERN_WARNING, ha,
- "Unable to start DPC thread!\n");
+ ql_log(ql_log_fatal, base_vha, 0x00ed,
+ "Failed to start DPC thread.\n");
ret = PTR_ERR(ha->dpc_thread);
goto probe_failed;
}
+ ql_dbg(ql_dbg_init, base_vha, 0x00ee,
+ "DPC thread started successfully.\n");
skip_dpc:
list_add_tail(&base_vha->list, &ha->vp_list);
@@ -2236,16 +2371,18 @@ skip_dpc:
/* Initialized the timer */
qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
-
- DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
- base_vha->host_no, ha));
+ ql_dbg(ql_dbg_init, base_vha, 0x00ef,
+ "Started qla2x00_timer with "
+ "interval=%d.\n", WATCH_INTERVAL);
+ ql_dbg(ql_dbg_init, base_vha, 0x00f0,
+ "Detected hba at address=%p.\n",
+ ha);
if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
base_vha->flags.difdix_supported = 1;
- DEBUG18(qla_printk(KERN_INFO, ha,
- "Registering for DIF/DIX type 1 and 3"
- " protection.\n"));
+ ql_dbg(ql_dbg_init, base_vha, 0x00f1,
+ "Registering for DIF/DIX type 1 and 3 protection.\n");
scsi_host_set_prot(host,
SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
@@ -2267,6 +2404,9 @@ skip_dpc:
base_vha->flags.init_done = 1;
base_vha->flags.online = 1;
+ ql_dbg(ql_dbg_init, base_vha, 0x00f2,
+ "Init done and hba is online.\n");
+
scsi_scan_host(host);
qla2x00_alloc_sysfs_attr(base_vha);
@@ -2275,14 +2415,17 @@ skip_dpc:
qla2x00_dfs_setup(base_vha);
- qla_printk(KERN_INFO, ha, "\n"
- " QLogic Fibre Channel HBA Driver: %s\n"
- " QLogic %s - %s\n"
- " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
- qla2x00_version_str, ha->model_number,
- ha->model_desc ? ha->model_desc : "", pdev->device,
- ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
- ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
+ ql_log(ql_log_info, base_vha, 0x00fa,
+ "QLogic Fibre Channed HBA Driver: %s.\n",
+ qla2x00_version_str);
+ ql_log(ql_log_info, base_vha, 0x00fb,
+ "QLogic %s - %s.\n",
+ ha->model_number, ha->model_desc ? ha->model_desc : "");
+ ql_log(ql_log_info, base_vha, 0x00fc,
+ "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
+ pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
+ pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
+ base_vha->host_no,
ha->isp_ops->fw_version_str(base_vha, fw_str));
return 0;
@@ -2580,20 +2723,15 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
fcport->login_retry = vha->hw->login_retry_count;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- DEBUG(printk("scsi(%ld): Port login retry: "
+ ql_dbg(ql_dbg_disc, vha, 0x2067,
+ "Port login retry "
"%02x%02x%02x%02x%02x%02x%02x%02x, "
- "id = 0x%04x retry cnt=%d\n",
- vha->host_no,
- fcport->port_name[0],
- fcport->port_name[1],
- fcport->port_name[2],
- fcport->port_name[3],
- fcport->port_name[4],
- fcport->port_name[5],
- fcport->port_name[6],
- fcport->port_name[7],
- fcport->loop_id,
- fcport->login_retry));
+ "id = 0x%04x retry cnt=%d.\n",
+ fcport->port_name[0], fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7],
+ fcport->loop_id, fcport->login_retry);
}
}
@@ -2676,6 +2814,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ctx_cachep);
if (!ha->ctx_mempool)
goto fail_free_srb_mempool;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
+ "ctx_cachep=%p ctx_mempool=%p.\n",
+ ctx_cachep, ha->ctx_mempool);
}
/* Get memory for cached NVRAM */
@@ -2690,22 +2831,29 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->s_dma_pool)
goto fail_free_nvram;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
+ "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
+ ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
+
if (IS_QLA82XX(ha) || ql2xenabledif) {
ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
DSD_LIST_DMA_POOL_SIZE, 8, 0);
if (!ha->dl_dma_pool) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - dl_dma_pool\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
+ "Failed to allocate memory for dl_dma_pool.\n");
goto fail_s_dma_pool;
}
ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
FCP_CMND_DMA_POOL_SIZE, 8, 0);
if (!ha->fcp_cmnd_dma_pool) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - fcp_cmnd_dma_pool\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
+ "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
goto fail_dl_dma_pool;
}
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
+ "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n",
+ ha->dl_dma_pool, ha->fcp_cmnd_dma_pool);
}
/* Allocate memory for SNS commands */
@@ -2715,6 +2863,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
if (!ha->sns_cmd)
goto fail_dma_pool;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
+ "sns_cmd.\n", ha->sns_cmd);
} else {
/* Get consistent memory allocated for MS IOCB */
ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -2726,12 +2876,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
if (!ha->ct_sns)
goto fail_free_ms_iocb;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
+ "ms_iocb=%p ct_sns=%p.\n",
+ ha->ms_iocb, ha->ct_sns);
}
/* Allocate memory for request ring */
*req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
if (!*req) {
- DEBUG(printk("Unable to allocate memory for req\n"));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
+ "Failed to allocate memory for req.\n");
goto fail_req;
}
(*req)->length = req_len;
@@ -2739,14 +2893,15 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
((*req)->length + 1) * sizeof(request_t),
&(*req)->dma, GFP_KERNEL);
if (!(*req)->ring) {
- DEBUG(printk("Unable to allocate memory for req_ring\n"));
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
+ "Failed to allocate memory for req_ring.\n");
goto fail_req_ring;
}
/* Allocate memory for response ring */
*rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
if (!*rsp) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for rsp\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
+ "Failed to allocate memory for rsp.\n");
goto fail_rsp;
}
(*rsp)->hw = ha;
@@ -2755,19 +2910,24 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
((*rsp)->length + 1) * sizeof(response_t),
&(*rsp)->dma, GFP_KERNEL);
if (!(*rsp)->ring) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for rsp_ring\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
+ "Failed to allocate memory for rsp_ring.\n");
goto fail_rsp_ring;
}
(*req)->rsp = *rsp;
(*rsp)->req = *req;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
+ "req=%p req->length=%d req->ring=%p rsp=%p "
+ "rsp->length=%d rsp->ring=%p.\n",
+ *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
+ (*rsp)->ring);
/* Allocate memory for NVRAM data for vports */
if (ha->nvram_npiv_size) {
ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
- ha->nvram_npiv_size, GFP_KERNEL);
+ ha->nvram_npiv_size, GFP_KERNEL);
if (!ha->npiv_info) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for npiv info\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
+ "Failed to allocate memory for npiv_info.\n");
goto fail_npiv_info;
}
} else
@@ -2779,6 +2939,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
&ha->ex_init_cb_dma);
if (!ha->ex_init_cb)
goto fail_ex_init_cb;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
+ "ex_init_cb=%p.\n", ha->ex_init_cb);
}
INIT_LIST_HEAD(&ha->gbl_dsd_list);
@@ -2789,6 +2951,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
&ha->async_pd_dma);
if (!ha->async_pd)
goto fail_async_pd;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
+ "async_pd=%p.\n", ha->async_pd);
}
INIT_LIST_HEAD(&ha->vp_list);
@@ -2854,7 +3018,8 @@ fail_free_init_cb:
ha->init_cb = NULL;
ha->init_cb_dma = 0;
fail:
- DEBUG(printk("%s: Memory allocation failure\n", __func__));
+ ql_log(ql_log_fatal, NULL, 0x0030,
+ "Memory allocation failure.\n");
return -ENOMEM;
}
@@ -3003,8 +3168,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
if (host == NULL) {
- printk(KERN_WARNING
- "qla2xxx: Couldn't allocate host from scsi layer!\n");
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
+ "Failed to allocate host from the scsi layer, aborting.\n");
goto fail;
}
@@ -3023,6 +3188,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->work_lock);
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+ ql_dbg(ql_dbg_init, vha, 0x0041,
+ "Allocated the host=%p hw=%p vha=%p dev_name=%s",
+ vha->host, vha->hw, vha,
+ dev_name(&(ha->pdev->dev)));
+
return vha;
fail:
@@ -3264,18 +3434,18 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (status == QLA_SUCCESS) {
fcport->old_loop_id = fcport->loop_id;
- DEBUG(printk("scsi(%ld): port login OK: logged "
- "in ID 0x%x\n", vha->host_no, fcport->loop_id));
+ ql_dbg(ql_dbg_disc, vha, 0x2003,
+ "Port login OK: logged in ID 0x%x.\n",
+ fcport->loop_id);
qla2x00_update_fcport(vha, fcport);
} else if (status == 1) {
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
/* retry the login again */
- DEBUG(printk("scsi(%ld): Retrying"
- " %d login again loop_id 0x%x\n",
- vha->host_no, fcport->login_retry,
- fcport->loop_id));
+ ql_dbg(ql_dbg_disc, vha, 0x2007,
+ "Retrying %d login again loop_id 0x%x.\n",
+ fcport->login_retry, fcport->loop_id);
} else {
fcport->login_retry = 0;
}
@@ -3315,26 +3485,27 @@ qla2x00_do_dpc(void *data)
set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
- DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
+ "DPC handler sleeping.\n");
schedule();
__set_current_state(TASK_RUNNING);
- DEBUG3(printk("qla2x00: DPC handler waking up\n"));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
+ "DPC handler waking up.\n");
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
+ "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
/* Initialization not yet finished. Don't do anything yet. */
if (!base_vha->flags.init_done)
continue;
if (ha->flags.eeh_busy) {
- DEBUG17(qla_printk(KERN_WARNING, ha,
- "qla2x00_do_dpc: dpc_flags: %lx\n",
- base_vha->dpc_flags));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
+ "eeh_busy=%d.\n", ha->flags.eeh_busy);
continue;
}
- DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
-
ha->dpc_active = 1;
if (ha->flags.mbox_busy) {
@@ -3351,8 +3522,8 @@ qla2x00_do_dpc(void *data)
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
qla82xx_idc_unlock(ha);
- qla_printk(KERN_INFO, ha,
- "HW State: FAILED\n");
+ ql_log(ql_log_info, base_vha, 0x4004,
+ "HW State: FAILED.\n");
qla82xx_device_state_handler(base_vha);
continue;
}
@@ -3360,10 +3531,8 @@ qla2x00_do_dpc(void *data)
if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
&base_vha->dpc_flags)) {
- DEBUG(printk(KERN_INFO
- "scsi(%ld): dpc: sched "
- "qla82xx_fcoe_ctx_reset ha = %p\n",
- base_vha->host_no, ha));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
+ "FCoE context reset scheduled.\n");
if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
if (qla82xx_fcoe_ctx_reset(base_vha)) {
@@ -3377,18 +3546,16 @@ qla2x00_do_dpc(void *data)
&base_vha->dpc_flags);
}
- DEBUG(printk("scsi(%ld): dpc:"
- " qla82xx_fcoe_ctx_reset end\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
+ "FCoE context reset end.\n");
}
}
if (test_and_clear_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags)) {
- DEBUG(printk("scsi(%ld): dpc: sched "
- "qla2x00_abort_isp ha = %p\n",
- base_vha->host_no, ha));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
+ "ISP abort scheduled.\n");
if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
@@ -3401,8 +3568,8 @@ qla2x00_do_dpc(void *data)
&base_vha->dpc_flags);
}
- DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
+ "ISP abort end.\n");
}
if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
@@ -3411,9 +3578,8 @@ qla2x00_do_dpc(void *data)
}
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
- DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched "
- "qla2x00_quiesce_needed ha = %p\n",
- base_vha->host_no, ha));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
+ "Quiescence mode scheduled.\n");
qla82xx_device_state_handler(base_vha);
clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags);
if (!ha->flags.quiesce_owner) {
@@ -3423,17 +3589,20 @@ qla2x00_do_dpc(void *data)
qla82xx_clear_qsnt_ready(base_vha);
qla82xx_idc_unlock(ha);
}
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
+ "Quiescence mode end.\n");
}
if (test_and_clear_bit(RESET_MARKER_NEEDED,
&base_vha->dpc_flags) &&
(!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
- DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
- base_vha->host_no));
-
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
+ "Reset marker scheduled.\n");
qla2x00_rst_aen(base_vha);
clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
+ "Reset marker end.\n");
}
/* Retry each device up to login retry count */
@@ -3442,19 +3611,18 @@ qla2x00_do_dpc(void *data)
!test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
- DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
+ "Relogin scheduled.\n");
qla2x00_relogin(base_vha);
-
- DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
+ "Relogin end.\n");
}
if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
&base_vha->dpc_flags)) {
- DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
+ "Loop resync scheduled.\n");
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
&base_vha->dpc_flags))) {
@@ -3465,8 +3633,8 @@ qla2x00_do_dpc(void *data)
&base_vha->dpc_flags);
}
- DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
- base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
+ "Loop resync end.\n");
}
if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
@@ -3489,7 +3657,8 @@ qla2x00_do_dpc(void *data)
} /* End of while(1) */
__set_current_state(TASK_RUNNING);
- DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
+ "DPC handler exiting.\n");
/*
* Make sure that nobody tries to wake us up again.
@@ -3596,9 +3765,11 @@ void
qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
{
if (atomic_read(&sp->ref_count) == 0) {
- DEBUG2(qla_printk(KERN_WARNING, ha,
- "SP reference-count to ZERO -- sp=%p\n", sp));
- DEBUG2(BUG());
+ ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
+ "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
+ sp, sp->cmd);
+ if (ql2xextended_error_logging & ql_dbg_io)
+ BUG();
return;
}
if (!atomic_dec_and_test(&sp->ref_count))
@@ -3626,6 +3797,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
struct req_que *req;
if (ha->flags.eeh_busy) {
+ ql_dbg(ql_dbg_timer, vha, 0x6000,
+ "EEH = %d, restarting timer.\n",
+ ha->flags.eeh_busy);
qla2x00_restart_timer(vha, WATCH_INTERVAL);
return;
}
@@ -3650,9 +3824,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
if (atomic_read(&vha->loop_down_timer) ==
vha->loop_down_abort_time) {
- DEBUG(printk("scsi(%ld): Loop Down - aborting the "
- "queues before time expire\n",
- vha->host_no));
+ ql_log(ql_log_info, vha, 0x6008,
+ "Loop down - aborting the queues before time expires.\n");
if (!IS_QLA2100(ha) && vha->link_down_timeout)
atomic_set(&vha->loop_state, LOOP_DEAD);
@@ -3697,10 +3870,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* if the loop has been down for 4 minutes, reinit adapter */
if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
if (!(vha->device_flags & DFLG_NO_CABLE)) {
- DEBUG(printk("scsi(%ld): Loop down - "
- "aborting ISP.\n",
- vha->host_no));
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x6009,
"Loop down - aborting ISP.\n");
if (IS_QLA82XX(ha))
@@ -3711,9 +3881,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
&vha->dpc_flags);
}
}
- DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
- vha->host_no,
- atomic_read(&vha->loop_down_timer)));
+ ql_dbg(ql_dbg_timer, vha, 0x600a,
+ "Loop down - seconds remaining %d.\n",
+ atomic_read(&vha->loop_down_timer));
}
/* Check if beacon LED needs to be blinked for physical host only */
@@ -3736,8 +3906,27 @@ qla2x00_timer(scsi_qla_host_t *vha)
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+ ql_dbg(ql_dbg_timer, vha, 0x600b,
+ "isp_abort_needed=%d loop_resync_needed=%d "
+ "fcport_update_needed=%d start_dpc=%d "
+ "reset_marker_needed=%d",
+ test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
+ test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
+ start_dpc,
+ test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
+ ql_dbg(ql_dbg_timer, vha, 0x600c,
+ "beacon_blink_needed=%d isp_unrecoverable=%d "
+ "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
+ "relogin_needed=%d.\n",
+ test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
+ test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
+ test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
+ test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
+ }
qla2x00_restart_timer(vha, WATCH_INTERVAL);
}
@@ -3806,8 +3995,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
goto out;
if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
- DEBUG2(printk("scsi(%ld): Failed to load firmware image "
- "(%s).\n", vha->host_no, blob->name));
+ ql_log(ql_log_warn, vha, 0x0063,
+ "Failed to load firmware image (%s).\n", blob->name);
blob->fw = NULL;
blob = NULL;
goto out;
@@ -3836,8 +4025,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
scsi_qla_host_t *vha = pci_get_drvdata(pdev);
struct qla_hw_data *ha = vha->hw;
- DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
- state));
+ ql_dbg(ql_dbg_aer, vha, 0x9000,
+ "PCI error detected, state %x.\n", state);
switch (state) {
case pci_channel_io_normal:
@@ -3850,9 +4039,9 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
ha->flags.isp82xx_fw_hung = 1;
if (ha->flags.mbox_busy) {
ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "Due to pci channel io frozen, doing premature "
- "completion of mbx command\n"));
+ ql_dbg(ql_dbg_aer, vha, 0x9001,
+ "Due to pci channel io frozen, doing premature "
+ "completion of mbx command.\n");
complete(&ha->mbx_intr_comp);
}
}
@@ -3900,8 +4089,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (risc_paused) {
- qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
- "Dumping firmware!\n");
+ ql_log(ql_log_info, base_vha, 0x9003,
+ "RISC paused -- mmio_enabled, Dumping firmware.\n");
ha->isp_ops->fw_dump(base_vha, 0);
return PCI_ERS_RESULT_NEED_RESET;
@@ -3917,8 +4106,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
int fn;
struct pci_dev *other_pdev = NULL;
- DEBUG17(qla_printk(KERN_INFO, ha,
- "scsi(%ld): In qla82xx_error_recovery\n", base_vha->host_no));
+ ql_dbg(ql_dbg_aer, base_vha, 0x9006,
+ "Entered %s.\n", __func__);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
@@ -3932,8 +4121,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
fn = PCI_FUNC(ha->pdev->devfn);
while (fn > 0) {
fn--;
- DEBUG17(qla_printk(KERN_INFO, ha,
- "Finding pci device at function = 0x%x\n", fn));
+ ql_dbg(ql_dbg_aer, base_vha, 0x9007,
+ "Finding pci device at function = 0x%x.\n", fn);
other_pdev =
pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus),
ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn),
@@ -3942,9 +4131,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
if (!other_pdev)
continue;
if (atomic_read(&other_pdev->enable_cnt)) {
- DEBUG17(qla_printk(KERN_INFO, ha,
- "Found PCI func available and enabled at 0x%x\n",
- fn));
+ ql_dbg(ql_dbg_aer, base_vha, 0x9008,
+ "Found PCI func available and enable at 0x%x.\n",
+ fn);
pci_dev_put(other_pdev);
break;
}
@@ -3953,8 +4142,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
if (!fn) {
/* Reset owner */
- DEBUG17(qla_printk(KERN_INFO, ha,
- "This devfn is reset owner = 0x%x\n", ha->pdev->devfn));
+ ql_dbg(ql_dbg_aer, base_vha, 0x9009,
+ "This devfn is reset owner = 0x%x.\n",
+ ha->pdev->devfn);
qla82xx_idc_lock(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
@@ -3964,8 +4154,8 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
QLA82XX_IDC_VERSION);
drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
- DEBUG17(qla_printk(KERN_INFO, ha,
- "drv_active = 0x%x\n", drv_active));
+ ql_dbg(ql_dbg_aer, base_vha, 0x900a,
+ "drv_active = 0x%x.\n", drv_active);
qla82xx_idc_unlock(ha);
/* Reset if device is not already reset
@@ -3978,12 +4168,14 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
qla82xx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
- qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
+ ql_log(ql_log_info, base_vha, 0x900b,
+ "HW State: FAILED.\n");
qla82xx_clear_drv_active(ha);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
} else {
- qla_printk(KERN_INFO, ha, "HW State: READY\n");
+ ql_log(ql_log_info, base_vha, 0x900c,
+ "HW State: READY.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
qla82xx_idc_unlock(ha);
@@ -3996,8 +4188,9 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
}
qla82xx_idc_unlock(ha);
} else {
- DEBUG17(qla_printk(KERN_INFO, ha,
- "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn));
+ ql_dbg(ql_dbg_aer, base_vha, 0x900d,
+ "This devfn is not reset owner = 0x%x.\n",
+ ha->pdev->devfn);
if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
ha->flags.isp82xx_fw_hung = 0;
@@ -4021,7 +4214,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
struct rsp_que *rsp;
int rc, retries = 10;
- DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
+ ql_dbg(ql_dbg_aer, base_vha, 0x9004,
+ "Slot Reset.\n");
/* Workaround: qla2xxx driver which access hardware earlier
* needs error state to be pci_channel_io_online.
@@ -4042,7 +4236,7 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
rc = pci_enable_device(pdev);
if (rc) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, base_vha, 0x9005,
"Can't re-enable PCI device after reset.\n");
goto exit_slot_reset;
}
@@ -4072,8 +4266,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
exit_slot_reset:
- DEBUG17(qla_printk(KERN_WARNING, ha,
- "slot_reset-return:ret=%x\n", ret));
+ ql_dbg(ql_dbg_aer, base_vha, 0x900e,
+ "slot_reset return %x.\n", ret);
return ret;
}
@@ -4085,13 +4279,13 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
struct qla_hw_data *ha = base_vha->hw;
int ret;
- DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
+ ql_dbg(ql_dbg_aer, base_vha, 0x900f,
+ "pci_resume.\n");
ret = qla2x00_wait_for_hba_online(base_vha);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_ERR, ha,
- "the device failed to resume I/O "
- "from slot/link_reset");
+ ql_log(ql_log_fatal, base_vha, 0x9002,
+ "The device failed to resume I/O from slot/link_reset.\n");
}
pci_cleanup_aer_uncorrect_error_status(pdev);
@@ -4155,8 +4349,8 @@ qla2x00_module_init(void)
srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (srb_cachep == NULL) {
- printk(KERN_ERR
- "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
+ ql_log(ql_log_fatal, NULL, 0x0001,
+ "Unable to allocate SRB cache...Failing load!.\n");
return -ENOMEM;
}
@@ -4169,13 +4363,15 @@ qla2x00_module_init(void)
fc_attach_transport(&qla2xxx_transport_functions);
if (!qla2xxx_transport_template) {
kmem_cache_destroy(srb_cachep);
+ ql_log(ql_log_fatal, NULL, 0x0002,
+ "fc_attach_transport failed...Failing load!.\n");
return -ENODEV;
}
apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
if (apidev_major < 0) {
- printk(KERN_WARNING "qla2xxx: Unable to register char device "
- "%s\n", QLA2XXX_APIDEV);
+ ql_log(ql_log_fatal, NULL, 0x0003,
+ "Unable to register char device %s.\n", QLA2XXX_APIDEV);
}
qla2xxx_transport_vport_template =
@@ -4183,16 +4379,21 @@ qla2x00_module_init(void)
if (!qla2xxx_transport_vport_template) {
kmem_cache_destroy(srb_cachep);
fc_release_transport(qla2xxx_transport_template);
+ ql_log(ql_log_fatal, NULL, 0x0004,
+ "fc_attach_transport vport failed...Failing load!.\n");
return -ENODEV;
}
-
- printk(KERN_INFO "QLogic Fibre Channel HBA Driver: %s\n",
+ ql_log(ql_log_info, NULL, 0x0005,
+ "QLogic Fibre Channel HBA Driver: %s.\n",
qla2x00_version_str);
ret = pci_register_driver(&qla2xxx_pci_driver);
if (ret) {
kmem_cache_destroy(srb_cachep);
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
+ ql_log(ql_log_fatal, NULL, 0x0006,
+ "pci_register_driver failed...ret=%d Failing load!.\n",
+ ret);
}
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 693647661ed..eff13563c82 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -189,6 +189,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
uint16_t word;
uint32_t nv_cmd, wait_cnt;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla2x00_nv_write(ha, NVR_DATA_OUT);
qla2x00_nv_write(ha, 0);
@@ -220,8 +221,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
- DEBUG9_10(qla_printk(KERN_WARNING, ha,
- "NVRAM didn't go ready...\n"));
+ ql_dbg(ql_dbg_user, vha, 0x708d,
+ "NVRAM didn't go ready...\n");
break;
}
NVRAM_DELAY();
@@ -308,6 +309,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t word, wait_cnt;
uint16_t wprot, wprot_old;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* Clear NVRAM write protection. */
ret = QLA_FUNCTION_FAILED;
@@ -350,8 +352,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
- DEBUG9_10(qla_printk(KERN_WARNING, ha,
- "NVRAM didn't go ready...\n"));
+ ql_dbg(ql_dbg_user, vha, 0x708e,
+ "NVRAM didn't go ready...\n");
break;
}
NVRAM_DELAY();
@@ -371,6 +373,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t word, wait_cnt;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
if (stat != QLA_SUCCESS)
return;
@@ -409,8 +412,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
- DEBUG9_10(qla_printk(KERN_WARNING, ha,
- "NVRAM didn't go ready...\n"));
+ ql_dbg(ql_dbg_user, vha, 0x708f,
+ "NVRAM didn't go ready...\n");
break;
}
NVRAM_DELAY();
@@ -607,9 +610,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
for (chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
- qla_printk(KERN_ERR, ha,
+ ql_log(ql_log_fatal, vha, 0x0045,
"Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
- qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location));
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
+ buf, sizeof(struct qla_flt_location));
return QLA_FUNCTION_FAILED;
}
@@ -618,7 +622,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
*start = (le16_to_cpu(fltl->start_hi) << 16 |
le16_to_cpu(fltl->start_lo)) >> 2;
end:
- DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
+ ql_dbg(ql_dbg_init, vha, 0x0046,
+ "FLTL[%s] = 0x%x.\n",
+ loc, *start);
return QLA_SUCCESS;
}
@@ -685,10 +691,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
if (*wptr == __constant_cpu_to_le16(0xffff))
goto no_flash_data;
if (flt->version != __constant_cpu_to_le16(1)) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: "
- "version=0x%x length=0x%x checksum=0x%x.\n",
+ ql_log(ql_log_warn, vha, 0x0047,
+ "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
- le16_to_cpu(flt->checksum)));
+ le16_to_cpu(flt->checksum));
goto no_flash_data;
}
@@ -696,10 +702,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
for (chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
- "version=0x%x length=0x%x checksum=0x%x.\n",
+ ql_log(ql_log_fatal, vha, 0x0048,
+ "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
- chksum));
+ le16_to_cpu(flt->checksum));
goto no_flash_data;
}
@@ -708,10 +714,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
for ( ; cnt; cnt--, region++) {
/* Store addresses as DWORD offsets. */
start = le32_to_cpu(region->start) >> 2;
-
- DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
- "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
- le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
+ ql_dbg(ql_dbg_init, vha, 0x0049,
+ "FLT[%02x]: start=0x%x "
+ "end=0x%x size=0x%x.\n", le32_to_cpu(region->code),
+ start, le32_to_cpu(region->end) >> 2,
+ le32_to_cpu(region->size));
switch (le32_to_cpu(region->code) & 0xff) {
case FLT_REG_FW:
@@ -796,12 +803,16 @@ no_flash_data:
ha->flt_region_npiv_conf = ha->flags.port0 ?
def_npiv_conf0[def] : def_npiv_conf1[def];
done:
- DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
- "vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x "
- "npiv=0x%x. fcp_prio_cfg=0x%x\n", loc, ha->flt_region_boot,
- ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd,
- ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt,
- ha->flt_region_npiv_conf, ha->flt_region_fcp_prio));
+ ql_dbg(ql_dbg_init, vha, 0x004a,
+ "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n",
+ loc, ha->flt_region_boot,
+ ha->flt_region_fw, ha->flt_region_vpd_nvram,
+ ha->flt_region_vpd);
+ ql_dbg(ql_dbg_init, vha, 0x004b,
+ "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
+ ha->flt_region_nvram,
+ ha->flt_region_fdt, ha->flt_region_flt,
+ ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
}
static void
@@ -833,10 +844,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
cnt++)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FDT detected: "
- "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0],
- le16_to_cpu(fdt->version)));
- DEBUG9(qla2x00_dump_buffer((uint8_t *)fdt, sizeof(*fdt)));
+ ql_dbg(ql_dbg_init, vha, 0x004c,
+ "Inconsistent FDT detected:"
+ " checksum=0x%x id=%c version0x%x.\n", chksum,
+ fdt->sig[0], le16_to_cpu(fdt->version));
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
+ (uint8_t *)fdt, sizeof(*fdt));
goto no_flash_data;
}
@@ -890,11 +903,12 @@ no_flash_data:
break;
}
done:
- DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
- "pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
+ ql_dbg(ql_dbg_init, vha, 0x004d,
+ "FDT[%x]: (0x%x/0x%x) erase=0x%x "
+ "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
- ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable,
- ha->fdt_block_size));
+ ha->fdt_wrt_disable, ha->fdt_block_size);
+
}
static void
@@ -919,6 +933,10 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
ha->nx_reset_timeout = le32_to_cpu(*wptr);
}
+ ql_dbg(ql_dbg_init, vha, 0x004e,
+ "nx_dev_init_timeout=%d "
+ "nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout,
+ ha->nx_reset_timeout);
return;
}
@@ -963,17 +981,18 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
if (hdr.version == __constant_cpu_to_le16(0xffff))
return;
if (hdr.version != __constant_cpu_to_le16(1)) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config "
+ ql_dbg(ql_dbg_user, vha, 0x7090,
+ "Unsupported NPIV-Config "
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
- le16_to_cpu(hdr.checksum)));
+ le16_to_cpu(hdr.checksum));
return;
}
data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
if (!data) {
- DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to "
- "allocate memory.\n"));
+ ql_log(ql_log_warn, vha, 0x7091,
+ "Unable to allocate memory for data.\n");
return;
}
@@ -985,10 +1004,11 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
for (wptr = data, chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config "
+ ql_dbg(ql_dbg_user, vha, 0x7092,
+ "Inconsistent NPIV-Config "
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
- chksum));
+ le16_to_cpu(hdr.checksum));
goto done;
}
@@ -1014,21 +1034,22 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
vid.port_name = wwn_to_u64(entry->port_name);
vid.node_name = wwn_to_u64(entry->node_name);
- DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
- "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
- (unsigned long long)vid.port_name,
- (unsigned long long)vid.node_name,
- le16_to_cpu(entry->vf_id),
- entry->q_qos, entry->f_qos));
+ ql_dbg(ql_dbg_user, vha, 0x7093,
+ "NPIV[%02x]: wwpn=%llx "
+ "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name,
+ le16_to_cpu(entry->vf_id),
+ entry->q_qos, entry->f_qos);
if (i < QLA_PRECONFIG_VPORTS) {
vport = fc_vport_create(vha->host, 0, &vid);
if (!vport)
- qla_printk(KERN_INFO, ha,
- "NPIV-Config: Failed to create vport [%02x]: "
- "wwpn=%llx wwnn=%llx.\n", cnt,
- (unsigned long long)vid.port_name,
- (unsigned long long)vid.node_name);
+ ql_log(ql_log_warn, vha, 0x7094,
+ "NPIV-Config Failed to create vport [%02x]: "
+ "wwpn=%llx wwnn=%llx.\n", cnt,
+ (unsigned long long)vid.port_name,
+ (unsigned long long)vid.node_name);
}
}
done:
@@ -1127,9 +1148,10 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
- qla_printk(KERN_DEBUG, ha,
- "Unable to allocate memory for optrom burst write "
- "(%x KB).\n", OPTROM_BURST_SIZE / 1024);
+ ql_log(ql_log_warn, vha, 0x7095,
+ "Unable to allocate "
+ "memory for optrom burst write (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
}
}
@@ -1138,7 +1160,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ret = qla24xx_unprotect_flash(vha);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7096,
"Unable to unprotect flash for update.\n");
goto done;
}
@@ -1156,9 +1178,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
0xff0000) | ((fdata >> 16) & 0xff));
ret = qla24xx_erase_sector(vha, fdata);
if (ret != QLA_SUCCESS) {
- DEBUG9(qla_printk(KERN_WARNING, ha,
- "Unable to erase sector: address=%x.\n",
- faddr));
+ ql_dbg(ql_dbg_user, vha, 0x7007,
+ "Unable to erase erase sector: address=%x.\n",
+ faddr);
break;
}
}
@@ -1172,12 +1194,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
flash_data_addr(ha, faddr),
OPTROM_BURST_DWORDS);
if (ret != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7097,
"Unable to burst-write optrom segment "
"(%x/%x/%llx).\n", ret,
flash_data_addr(ha, faddr),
(unsigned long long)optrom_dma);
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7098,
"Reverting to slow-write.\n");
dma_free_coherent(&ha->pdev->dev,
@@ -1194,9 +1216,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ret = qla24xx_write_flash_dword(ha,
flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
if (ret != QLA_SUCCESS) {
- DEBUG9(printk("%s(%ld) Unable to program flash "
- "address=%x data=%x.\n", __func__,
- vha->host_no, faddr, *dwptr));
+ ql_dbg(ql_dbg_user, vha, 0x7006,
+ "Unable to program flash address=%x data=%x.\n",
+ faddr, *dwptr);
break;
}
@@ -1211,7 +1233,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ret = qla24xx_protect_flash(vha);
if (ret != QLA_SUCCESS)
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7099,
"Unable to protect flash after update.\n");
done:
if (optrom)
@@ -1324,9 +1346,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
ret = qla24xx_write_flash_dword(ha,
nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
if (ret != QLA_SUCCESS) {
- DEBUG9(qla_printk(KERN_WARNING, ha,
+ ql_dbg(ql_dbg_user, vha, 0x709a,
"Unable to program nvram address=%x data=%x.\n",
- naddr, *dwptr));
+ naddr, *dwptr);
break;
}
}
@@ -1476,7 +1498,7 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x709b,
"Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
@@ -1541,7 +1563,7 @@ qla2x00_beacon_off(struct scsi_qla_host *vha)
rval = qla2x00_set_fw_options(vha, ha->fw_options);
if (rval != QLA_SUCCESS)
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x709c,
"Unable to update fw options (beacon off).\n");
return rval;
}
@@ -1616,7 +1638,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
if (qla2x00_get_fw_options(vha, ha->fw_options) !=
QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x7009,
"Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
@@ -1670,14 +1692,14 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Unable to update fw options (beacon off).\n");
+ ql_log(ql_log_warn, vha, 0x704d,
+ "Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
- qla_printk(KERN_WARNING, ha,
- "Unable to get fw options (beacon off).\n");
+ ql_log(ql_log_warn, vha, 0x704e,
+ "Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
@@ -2389,10 +2411,9 @@ try_fast:
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
- qla_printk(KERN_DEBUG, ha,
- "Unable to allocate memory for optrom burst read "
- "(%x KB).\n", OPTROM_BURST_SIZE / 1024);
-
+ ql_log(ql_log_warn, vha, 0x00cc,
+ "Unable to allocate memory for optrom burst read (%x KB).\n",
+ OPTROM_BURST_SIZE / 1024);
goto slow_read;
}
@@ -2407,12 +2428,11 @@ try_fast:
rval = qla2x00_dump_ram(vha, optrom_dma,
flash_data_addr(ha, faddr), burst);
if (rval) {
- qla_printk(KERN_WARNING, ha,
- "Unable to burst-read optrom segment "
- "(%x/%x/%llx).\n", rval,
- flash_data_addr(ha, faddr),
+ ql_log(ql_log_warn, vha, 0x00f5,
+ "Unable to burst-read optrom segment (%x/%x/%llx).\n",
+ rval, flash_data_addr(ha, faddr),
(unsigned long long)optrom_dma);
- qla_printk(KERN_WARNING, ha,
+ ql_log(ql_log_warn, vha, 0x00f6,
"Reverting to slow-read.\n");
dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
@@ -2556,8 +2576,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
/* No signature */
- DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
- "signature.\n"));
+ ql_log(ql_log_fatal, vha, 0x0050,
+ "No matching ROM signature.\n");
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2573,8 +2593,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
/* Incorrect header. */
- DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
- "found pcir_adr=%x.\n", pcids));
+ ql_log(ql_log_fatal, vha, 0x0051,
+ "PCI data struct not found pcir_adr=%x.\n", pcids);
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2588,8 +2608,9 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x12);
ha->bios_revision[1] =
qla2x00_read_flash_byte(ha, pcids + 0x13);
- DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
- ha->bios_revision[1], ha->bios_revision[0]));
+ ql_dbg(ql_dbg_init, vha, 0x0052,
+ "Read BIOS %d.%d.\n",
+ ha->bios_revision[1], ha->bios_revision[0]);
break;
case ROM_CODE_TYPE_FCODE:
/* Open Firmware standard for PCI (FCode). */
@@ -2602,12 +2623,14 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x12);
ha->efi_revision[1] =
qla2x00_read_flash_byte(ha, pcids + 0x13);
- DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
- ha->efi_revision[1], ha->efi_revision[0]));
+ ql_dbg(ql_dbg_init, vha, 0x0053,
+ "Read EFI %d.%d.\n",
+ ha->efi_revision[1], ha->efi_revision[0]);
break;
default:
- DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
- "type %x at pcids %x.\n", code_type, pcids));
+ ql_log(ql_log_warn, vha, 0x0054,
+ "Unrecognized code type %x at pcids %x.\n",
+ code_type, pcids);
break;
}
@@ -2627,21 +2650,28 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
8);
- DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from "
- "flash:\n"));
- DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8));
+ ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a,
+ "Dumping fw "
+ "ver from flash:.\n");
+ ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
+ (uint8_t *)dbyte, 8);
if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
dcode[2] == 0xffff && dcode[3] == 0xffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
- "revision at %x.\n", ha->flt_region_fw * 4));
+ ql_log(ql_log_warn, vha, 0x0057,
+ "Unrecognized fw revision at %x.\n",
+ ha->flt_region_fw * 4);
} else {
/* values are in big endian */
ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
+ ql_dbg(ql_dbg_init, vha, 0x0058,
+ "FW Version: "
+ "%d.%d.%d.\n", ha->fw_revision[0],
+ ha->fw_revision[1], ha->fw_revision[2]);
}
}
@@ -2683,8 +2713,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
bcode = mbuf + (pcihdr % 4);
if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
/* No signature */
- DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
- "signature.\n"));
+ ql_log(ql_log_fatal, vha, 0x0059,
+ "No matching ROM signature.\n");
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2699,8 +2729,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
bcode[0x2] != 'I' || bcode[0x3] != 'R') {
/* Incorrect header. */
- DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
- "found pcir_adr=%x.\n", pcids));
+ ql_log(ql_log_fatal, vha, 0x005a,
+ "PCI data struct not found pcir_adr=%x.\n", pcids);
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2712,26 +2742,30 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
/* Intel x86, PC-AT compatible. */
ha->bios_revision[0] = bcode[0x12];
ha->bios_revision[1] = bcode[0x13];
- DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
- ha->bios_revision[1], ha->bios_revision[0]));
+ ql_dbg(ql_dbg_init, vha, 0x005b,
+ "Read BIOS %d.%d.\n",
+ ha->bios_revision[1], ha->bios_revision[0]);
break;
case ROM_CODE_TYPE_FCODE:
/* Open Firmware standard for PCI (FCode). */
ha->fcode_revision[0] = bcode[0x12];
ha->fcode_revision[1] = bcode[0x13];
- DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n",
- ha->fcode_revision[1], ha->fcode_revision[0]));
+ ql_dbg(ql_dbg_init, vha, 0x005c,
+ "Read FCODE %d.%d.\n",
+ ha->fcode_revision[1], ha->fcode_revision[0]);
break;
case ROM_CODE_TYPE_EFI:
/* Extensible Firmware Interface (EFI). */
ha->efi_revision[0] = bcode[0x12];
ha->efi_revision[1] = bcode[0x13];
- DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
- ha->efi_revision[1], ha->efi_revision[0]));
+ ql_dbg(ql_dbg_init, vha, 0x005d,
+ "Read EFI %d.%d.\n",
+ ha->efi_revision[1], ha->efi_revision[0]);
break;
default:
- DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
- "type %x at pcids %x.\n", code_type, pcids));
+ ql_log(ql_log_warn, vha, 0x005e,
+ "Unrecognized code type %x at pcids %x.\n",
+ code_type, pcids);
break;
}
@@ -2753,13 +2787,18 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
- DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
- "revision at %x.\n", ha->flt_region_fw * 4));
+ ql_log(ql_log_warn, vha, 0x005f,
+ "Unrecognized fw revision at %x.\n",
+ ha->flt_region_fw * 4);
} else {
ha->fw_revision[0] = dcode[0];
ha->fw_revision[1] = dcode[1];
ha->fw_revision[2] = dcode[2];
ha->fw_revision[3] = dcode[3];
+ ql_dbg(ql_dbg_init, vha, 0x0060,
+ "Firmware revision %d.%d.%d.%d.\n",
+ ha->fw_revision[0], ha->fw_revision[1],
+ ha->fw_revision[2], ha->fw_revision[3]);
}
/* Check for golden firmware and get version if available */
@@ -2775,9 +2814,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "%s(%ld): Unrecognized golden fw at 0x%x.\n",
- __func__, vha->host_no, ha->flt_region_gold_fw * 4));
+ ql_log(ql_log_warn, vha, 0x0056,
+ "Unrecognized golden fw at 0x%x.\n",
+ ha->flt_region_gold_fw * 4);
return ret;
}
@@ -2843,9 +2882,9 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
if (!ha->fcp_prio_cfg) {
ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
if (!ha->fcp_prio_cfg) {
- qla_printk(KERN_WARNING, ha,
- "Unable to allocate memory for fcp priority data "
- "(%x).\n", FCP_PRIO_CFG_SIZE);
+ ql_log(ql_log_warn, vha, 0x00d5,
+ "Unable to allocate memory for fcp priorty data (%x).\n",
+ FCP_PRIO_CFG_SIZE);
return QLA_FUNCTION_FAILED;
}
}
@@ -2857,7 +2896,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
- if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0))
+ if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
goto fail;
/* read remaining FCP CMD config data from flash */
@@ -2869,7 +2908,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
fcp_prio_addr << 2, (len < max_len ? len : max_len));
/* revalidate the entire FCP priority config data, including entries */
- if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1))
+ if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1))
goto fail;
ha->flags.fcp_prio_enabled = 1;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 82e9e5c0476..cf8dfab9489 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -197,6 +197,7 @@ static struct {
{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
{"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
+ {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
{"IOMEGA", "Io20S *F", NULL, BLIST_KEY},
{"INSITE", "Floptical F*8I", NULL, BLIST_KEY},
{"INSITE", "I325VM", NULL, BLIST_KEY},
@@ -243,6 +244,7 @@ static struct {
{"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
{"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
{"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
+ {"Traxdata", "CDR4120", NULL, BLIST_NOLUN}, /* locks up */
{"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36},
{"WangDAT", "Model 2600", "01.7", BLIST_SELECT_NO_ATN},
{"WangDAT", "Model 3200", "02.2", BLIST_SELECT_NO_ATN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ec1803a4872..fc3f168decb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -137,6 +137,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
host->host_blocked = host->max_host_blocked;
break;
case SCSI_MLQUEUE_DEVICE_BUSY:
+ case SCSI_MLQUEUE_EH_RETRY:
device->device_blocked = device->max_device_blocked;
break;
case SCSI_MLQUEUE_TARGET_BUSY:
@@ -213,6 +214,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int ret = DRIVER_ERROR << 24;
req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
+ if (!req)
+ return ret;
if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, __GFP_WAIT))
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d70e91ae60a..d82a023a901 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -144,9 +144,9 @@ int scsi_autopm_get_device(struct scsi_device *sdev)
int err;
err = pm_runtime_get_sync(&sdev->sdev_gendev);
- if (err < 0)
+ if (err < 0 && err !=-EACCES)
pm_runtime_put_sync(&sdev->sdev_gendev);
- else if (err > 0)
+ else
err = 0;
return err;
}
@@ -173,9 +173,9 @@ int scsi_autopm_get_host(struct Scsi_Host *shost)
int err;
err = pm_runtime_get_sync(&shost->shost_gendev);
- if (err < 0)
+ if (err < 0 && err !=-EACCES)
pm_runtime_put_sync(&shost->shost_gendev);
- else if (err > 0)
+ else
err = 0;
return err;
}
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 8a172d4f456..5fbeadd9681 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -597,6 +597,28 @@ static DEVICE_ATTR(signalling, S_IRUGO,
show_spi_host_signalling,
store_spi_host_signalling);
+static ssize_t show_spi_host_width(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+
+ return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow");
+}
+static DEVICE_ATTR(host_width, S_IRUGO,
+ show_spi_host_width, NULL);
+
+static ssize_t show_spi_host_hba_id(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = transport_class_to_shost(cdev);
+
+ return sprintf(buf, "%d\n", shost->this_id);
+}
+static DEVICE_ATTR(hba_id, S_IRUGO,
+ show_spi_host_hba_id, NULL);
+
#define DV_SET(x, y) \
if(i->f->set_##x) \
i->f->set_##x(sdev->sdev_target, y)
@@ -1380,6 +1402,8 @@ static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class,
static struct attribute *host_attributes[] = {
&dev_attr_signalling.attr,
+ &dev_attr_host_width.attr,
+ &dev_attr_hba_id.attr,
NULL
};
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index eb7a3e85304..eba183c428c 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -160,6 +160,10 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
return NULL;
}
+/* For device slot and array device slot elements, byte 3 bit 6
+ * is "fault sensed" while byte 3 bit 5 is "fault reqstd". As this
+ * code stands these bits are shifted 4 positions right so in
+ * sysfs they will appear as bits 2 and 1 respectively. Strange. */
static void ses_get_fault(struct enclosure_device *edev,
struct enclosure_component *ecomp)
{
@@ -181,7 +185,7 @@ static int ses_set_fault(struct enclosure_device *edev,
/* zero is disabled */
break;
case ENCLOSURE_SETTING_ENABLED:
- desc[2] = 0x02;
+ desc[3] = 0x20;
break;
default:
/* SES doesn't do the SGPIO blink settings */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 4778e270716..5fc97d2ba2f 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -221,14 +221,33 @@ static unsigned int sr_check_events(struct cdrom_device_info *cdi,
return 0;
events = sr_get_events(cd->device);
+ cd->get_event_changed |= events & DISK_EVENT_MEDIA_CHANGE;
+
+ /*
+ * If earlier GET_EVENT_STATUS_NOTIFICATION and TUR did not agree
+ * for several times in a row. We rely on TUR only for this likely
+ * broken device, to prevent generating incorrect media changed
+ * events for every open().
+ */
+ if (cd->ignore_get_event) {
+ events &= ~DISK_EVENT_MEDIA_CHANGE;
+ goto do_tur;
+ }
+
/*
* GET_EVENT_STATUS_NOTIFICATION is enough unless MEDIA_CHANGE
* is being cleared. Note that there are devices which hang
* if asked to execute TUR repeatedly.
*/
- if (!(clearing & DISK_EVENT_MEDIA_CHANGE))
- goto skip_tur;
+ if (cd->device->changed) {
+ events |= DISK_EVENT_MEDIA_CHANGE;
+ cd->device->changed = 0;
+ cd->tur_changed = true;
+ }
+ if (!(clearing & DISK_EVENT_MEDIA_CHANGE))
+ return events;
+do_tur:
/* let's see whether the media is there with TUR */
last_present = cd->media_present;
ret = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
@@ -242,12 +261,31 @@ static unsigned int sr_check_events(struct cdrom_device_info *cdi,
(scsi_sense_valid(&sshdr) && sshdr.asc != 0x3a);
if (last_present != cd->media_present)
- events |= DISK_EVENT_MEDIA_CHANGE;
-skip_tur:
+ cd->device->changed = 1;
+
if (cd->device->changed) {
events |= DISK_EVENT_MEDIA_CHANGE;
cd->device->changed = 0;
+ cd->tur_changed = true;
+ }
+
+ if (cd->ignore_get_event)
+ return events;
+
+ /* check whether GET_EVENT is reporting spurious MEDIA_CHANGE */
+ if (!cd->tur_changed) {
+ if (cd->get_event_changed) {
+ if (cd->tur_mismatch++ > 8) {
+ sdev_printk(KERN_WARNING, cd->device,
+ "GET_EVENT and TUR disagree continuously, suppress GET_EVENT events\n");
+ cd->ignore_get_event = true;
+ }
+ } else {
+ cd->tur_mismatch = 0;
+ }
}
+ cd->tur_changed = false;
+ cd->get_event_changed = false;
return events;
}
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index e036f1dc83c..37c8f6b1751 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -41,6 +41,13 @@ typedef struct scsi_cd {
unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */
unsigned readcd_cdda:1; /* reading audio data using READ_CD */
unsigned media_present:1; /* media is present */
+
+ /* GET_EVENT spurious event handling, blk layer guarantees exclusion */
+ int tur_mismatch; /* nr of get_event TUR mismatches */
+ bool tur_changed:1; /* changed according to TUR */
+ bool get_event_changed:1; /* changed according to GET_EVENT */
+ bool ignore_get_event:1; /* GET_EVENT is unreliable, use TUR */
+
struct cdrom_device_info cdi;
/* We hold gendisk and scsi_device references on probe and use
* the refs on this kref to decide when to release them */
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 07eaef1c722..7e12a2e4e0a 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -49,13 +49,6 @@
* inside the execution of NCR5380_intr(), leading to recursive
* calls.
*
- * - I've added a function merge_contiguous_buffers() that tries to
- * merge scatter-gather buffers that are located at contiguous
- * physical addresses and can be processed with the same DMA setup.
- * Since most scatter-gather operations work on a page (4K) of
- * 4 buffers (1K), in more than 90% of all cases three interrupts and
- * DMA setup actions are saved.
- *
* - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA
* and USLEEP, because these were messing up readability and will never be
* needed for Atari SCSI.
@@ -266,8 +259,9 @@ static struct scsi_host_template *the_template = NULL;
(struct NCR5380_hostdata *)(in)->hostdata
#define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata)
-#define NEXT(cmd) (*(struct scsi_cmnd **)&((cmd)->host_scribble))
-#define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble))
+#define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble)
+#define SET_NEXT(cmd, next) ((cmd)->host_scribble = (void *)(next))
+#define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble))
#define HOSTNO instance->host_no
#define H_NO(cmd) (cmd)->device->host->host_no
@@ -459,47 +453,6 @@ static void free_all_tags( void )
/*
- * Function: void merge_contiguous_buffers(struct scsi_cmnd *cmd)
- *
- * Purpose: Try to merge several scatter-gather requests into one DMA
- * transfer. This is possible if the scatter buffers lie on
- * physical contiguous addresses.
- *
- * Parameters: struct scsi_cmnd *cmd
- * The command to work on. The first scatter buffer's data are
- * assumed to be already transferred into ptr/this_residual.
- */
-
-static void merge_contiguous_buffers(struct scsi_cmnd *cmd)
-{
- unsigned long endaddr;
-#if (NDEBUG & NDEBUG_MERGING)
- unsigned long oldlen = cmd->SCp.this_residual;
- int cnt = 1;
-#endif
-
- for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;
- cmd->SCp.buffers_residual &&
- virt_to_phys(SGADDR(&(cmd->SCp.buffer[1]))) == endaddr; ) {
-
- MER_PRINTK("VTOP(%p) == %08lx -> merging\n",
- SGADDR(&(cmd->SCp.buffer[1])), endaddr);
-#if (NDEBUG & NDEBUG_MERGING)
- ++cnt;
-#endif
- ++cmd->SCp.buffer;
- --cmd->SCp.buffers_residual;
- cmd->SCp.this_residual += cmd->SCp.buffer->length;
- endaddr += cmd->SCp.buffer->length;
- }
-#if (NDEBUG & NDEBUG_MERGING)
- if (oldlen != cmd->SCp.this_residual)
- MER_PRINTK("merged %d buffers from %p, new length %08x\n",
- cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
-#endif
-}
-
-/*
* Function : void initialize_SCp(struct scsi_cmnd *cmd)
*
* Purpose : initialize the saved data pointers for cmd to point to the
@@ -520,11 +473,6 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
-
- /* ++roman: Try to merge some scatter-buffers if they are at
- * contiguous physical addresses.
- */
-// merge_contiguous_buffers( cmd );
} else {
cmd->SCp.buffer = NULL;
cmd->SCp.buffers_residual = 0;
@@ -841,7 +789,7 @@ static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer,
*
*/
-static int NCR5380_init (struct Scsi_Host *instance, int flags)
+static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
{
int i;
SETUP_HOSTDATA(instance);
@@ -889,6 +837,11 @@ static int NCR5380_init (struct Scsi_Host *instance, int flags)
return 0;
}
+static void NCR5380_exit(struct Scsi_Host *instance)
+{
+ /* Empty, as we didn't schedule any delayed work */
+}
+
/*
* Function : int NCR5380_queue_command (struct scsi_cmnd *cmd,
* void (*done)(struct scsi_cmnd *))
@@ -962,7 +915,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
* in a queue
*/
- NEXT(cmd) = NULL;
+ SET_NEXT(cmd, NULL);
cmd->scsi_done = done;
cmd->result = 0;
@@ -990,14 +943,14 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
*/
if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
LIST(cmd, hostdata->issue_queue);
- NEXT(cmd) = hostdata->issue_queue;
+ SET_NEXT(cmd, hostdata->issue_queue);
hostdata->issue_queue = cmd;
} else {
for (tmp = (struct scsi_cmnd *)hostdata->issue_queue;
NEXT(tmp); tmp = NEXT(tmp))
;
LIST(cmd, tmp);
- NEXT(tmp) = cmd;
+ SET_NEXT(tmp, cmd);
}
local_irq_restore(flags);
@@ -1105,12 +1058,12 @@ static void NCR5380_main (struct work_struct *bl)
local_irq_disable();
if (prev) {
REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
- NEXT(prev) = NEXT(tmp);
+ SET_NEXT(prev, NEXT(tmp));
} else {
REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp));
hostdata->issue_queue = NEXT(tmp);
}
- NEXT(tmp) = NULL;
+ SET_NEXT(tmp, NULL);
/* reenable interrupts after finding one */
local_irq_restore(flags);
@@ -1144,7 +1097,7 @@ static void NCR5380_main (struct work_struct *bl)
} else {
local_irq_disable();
LIST(tmp, hostdata->issue_queue);
- NEXT(tmp) = hostdata->issue_queue;
+ SET_NEXT(tmp, hostdata->issue_queue);
hostdata->issue_queue = tmp;
#ifdef SUPPORT_TAGS
cmd_free_tag( tmp );
@@ -1439,7 +1392,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
local_irq_restore(flags);
/* Wait for arbitration logic to complete */
-#if NCR_TIMEOUT
+#ifdef NCR_TIMEOUT
{
unsigned long timeout = jiffies + 2*NCR_TIMEOUT;
@@ -2070,11 +2023,6 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
cmd->SCp.ptr = SGADDR(cmd->SCp.buffer);
-
- /* ++roman: Try to merge some scatter-buffers if
- * they are at contiguous physical addresses.
- */
-// merge_contiguous_buffers( cmd );
INF_PRINTK("scsi%d: %d bytes and %d buffers left\n",
HOSTNO, cmd->SCp.this_residual,
cmd->SCp.buffers_residual);
@@ -2274,7 +2222,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
local_irq_save(flags);
LIST(cmd,hostdata->issue_queue);
- NEXT(cmd) = hostdata->issue_queue;
+ SET_NEXT(cmd, hostdata->issue_queue);
hostdata->issue_queue = (struct scsi_cmnd *) cmd;
local_irq_restore(flags);
QU_PRINTK("scsi%d: REQUEST SENSE added to head of "
@@ -2330,7 +2278,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
local_irq_save(flags);
cmd->device->disconnect = 1;
LIST(cmd,hostdata->disconnected_queue);
- NEXT(cmd) = hostdata->disconnected_queue;
+ SET_NEXT(cmd, hostdata->disconnected_queue);
hostdata->connected = NULL;
hostdata->disconnected_queue = cmd;
local_irq_restore(flags);
@@ -2589,12 +2537,12 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
) {
if (prev) {
REMOVE(prev, NEXT(prev), tmp, NEXT(tmp));
- NEXT(prev) = NEXT(tmp);
+ SET_NEXT(prev, NEXT(tmp));
} else {
REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp));
hostdata->disconnected_queue = NEXT(tmp);
}
- NEXT(tmp) = NULL;
+ SET_NEXT(tmp, NULL);
break;
}
}
@@ -2762,7 +2710,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (cmd == tmp) {
REMOVE(5, *prev, tmp, NEXT(tmp));
(*prev) = NEXT(tmp);
- NEXT(tmp) = NULL;
+ SET_NEXT(tmp, NULL);
tmp->result = DID_ABORT << 16;
local_irq_restore(flags);
ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n",
@@ -2835,7 +2783,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (cmd == tmp) {
REMOVE(5, *prev, tmp, NEXT(tmp));
*prev = NEXT(tmp);
- NEXT(tmp) = NULL;
+ SET_NEXT(tmp, NULL);
tmp->result = DID_ABORT << 16;
/* We must unlock the tag/LUN immediately here, since the
* target goes to BUS FREE and doesn't send us another
@@ -2943,7 +2891,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
for (i = 0; (cmd = disconnected_queue); ++i) {
disconnected_queue = NEXT(cmd);
- NEXT(cmd) = NULL;
+ SET_NEXT(cmd, NULL);
cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
cmd->scsi_done( cmd );
}
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 613f5880d13..baf7328de95 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -70,6 +70,12 @@
#include <asm/idprom.h>
#include <asm/machines.h>
+#define NDEBUG 0
+
+#define NDEBUG_ABORT 0x00100000
+#define NDEBUG_TAGS 0x00200000
+#define NDEBUG_MERGING 0x00400000
+
/* dma on! */
#define REAL_DMA
@@ -86,8 +92,6 @@ static void NCR5380_print(struct Scsi_Host *instance);
/*#define RESET_BOOT */
#define DRIVER_SETUP
-#define NDEBUG 0
-
/*
* BUG can be used to trigger a strange code-size related hang on 2.1 kernels
*/
@@ -195,7 +199,7 @@ static struct Scsi_Host *default_instance;
*
*/
-int sun3scsi_detect(struct scsi_host_template * tpnt)
+int __init sun3scsi_detect(struct scsi_host_template * tpnt)
{
unsigned long ioaddr;
static int called = 0;
@@ -314,6 +318,7 @@ int sun3scsi_release (struct Scsi_Host *shpnt)
iounmap((void *)sun3_scsi_regp);
+ NCR5380_exit(shpnt);
return 0;
}
diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c
index 7c526b8e30a..fbba78e5722 100644
--- a/drivers/scsi/sun3_scsi_vme.c
+++ b/drivers/scsi/sun3_scsi_vme.c
@@ -39,6 +39,12 @@
/* dma on! */
#define REAL_DMA
+#define NDEBUG 0
+
+#define NDEBUG_ABORT 0x00100000
+#define NDEBUG_TAGS 0x00200000
+#define NDEBUG_MERGING 0x00400000
+
#include "scsi.h"
#include "initio.h"
#include <scsi/scsi_host.h>
@@ -50,8 +56,6 @@ extern int sun3_map_test(unsigned long, char *);
/*#define RESET_BOOT */
#define DRIVER_SETUP
-#define NDEBUG 0
-
/*
* BUG can be used to trigger a strange code-size related hang on 2.1 kernels
*/
@@ -137,7 +141,7 @@ static struct Scsi_Host *default_instance;
*
*/
-static int sun3scsi_detect(struct scsi_host_template * tpnt)
+static int __init sun3scsi_detect(struct scsi_host_template * tpnt)
{
unsigned long ioaddr, irq = 0;
static int called = 0;
@@ -283,6 +287,7 @@ int sun3scsi_release (struct Scsi_Host *shpnt)
iounmap((void *)sun3_scsi_regp);
+ NCR5380_exit(shpnt);
return 0;
}
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 7e9c39951ec..dc8d022c07a 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -34,6 +34,9 @@ static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
static DEFINE_MUTEX(clock_list_sem);
+/* clock disable operations are not passed on to hardware during boot */
+static int allow_disable;
+
void clk_rate_table_build(struct clk *clk,
struct cpufreq_frequency_table *freq_table,
int nr_freqs,
@@ -228,7 +231,7 @@ static void __clk_disable(struct clk *clk)
return;
if (!(--clk->usecount)) {
- if (likely(clk->ops && clk->ops->disable))
+ if (likely(allow_disable && clk->ops && clk->ops->disable))
clk->ops->disable(clk);
if (likely(clk->parent))
__clk_disable(clk->parent);
@@ -393,7 +396,7 @@ int clk_register(struct clk *clk)
{
int ret;
- if (clk == NULL || IS_ERR(clk))
+ if (IS_ERR_OR_NULL(clk))
return -EINVAL;
/*
@@ -670,7 +673,7 @@ static struct dentry *clk_debugfs_root;
static int clk_debugfs_register_one(struct clk *c)
{
int err;
- struct dentry *d, *child, *child_tmp;
+ struct dentry *d;
struct clk *pa = c->parent;
char s[255];
char *p = s;
@@ -699,10 +702,7 @@ static int clk_debugfs_register_one(struct clk *c)
return 0;
err_out:
- d = c->dentry;
- list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
- debugfs_remove(child);
- debugfs_remove(c->dentry);
+ debugfs_remove_recursive(c->dentry);
return err;
}
@@ -747,3 +747,25 @@ err_out:
return err;
}
late_initcall(clk_debugfs_init);
+
+static int __init clk_late_init(void)
+{
+ unsigned long flags;
+ struct clk *clk;
+
+ /* disable all clocks with zero use count */
+ mutex_lock(&clock_list_sem);
+ spin_lock_irqsave(&clock_lock, flags);
+
+ list_for_each_entry(clk, &clock_list, node)
+ if (!clk->usecount && clk->ops && clk->ops->disable)
+ clk->ops->disable(clk);
+
+ /* from now on allow clock disable operations */
+ allow_disable = 1;
+
+ spin_unlock_irqrestore(&clock_lock, flags);
+ mutex_unlock(&clock_list_sem);
+ return 0;
+}
+late_initcall(clk_late_init);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index de35c3ad8a6..52e2900d9d8 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -86,9 +86,6 @@ config SPI_BFIN_SPORT
help
Enable support for a SPI bus via the Blackfin SPORT peripheral.
- This driver can also be built as a module. If so, the module
- will be called spi_bfin_sport.
-
config SPI_AU1550
tristate "Au1550/Au12x0 SPI Controller"
depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
@@ -97,9 +94,6 @@ config SPI_AU1550
If you say yes to this option, support will be included for the
Au1550 SPI controller (may also work with Au1200,Au1210,Au1250).
- This driver can also be built as a module. If so, the module
- will be called au1550_spi.
-
config SPI_BITBANG
tristate "Utilities for Bitbanging SPI masters"
help
@@ -130,9 +124,6 @@ config SPI_COLDFIRE_QSPI
This enables support for the Coldfire QSPI controller in master
mode.
- This driver can also be built as a module. If so, the module
- will be called coldfire_qspi.
-
config SPI_DAVINCI
tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
depends on SPI_MASTER && ARCH_DAVINCI
@@ -140,9 +131,6 @@ config SPI_DAVINCI
help
SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
- This driver can also be built as a module. The module will be called
- davinci_spi.
-
config SPI_EP93XX
tristate "Cirrus Logic EP93xx SPI controller"
depends on ARCH_EP93XX
@@ -150,9 +138,6 @@ config SPI_EP93XX
This enables using the Cirrus EP93xx SPI controller in master
mode.
- To compile this driver as a module, choose M here. The module will be
- called ep93xx_spi.
-
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
depends on GENERIC_GPIO
@@ -169,21 +154,6 @@ config SPI_GPIO
GPIO operations, you should be able to leverage that for better
speed with a custom version of this driver; see the source code.
-config SPI_IMX_VER_IMX1
- def_bool y if SOC_IMX1
-
-config SPI_IMX_VER_0_0
- def_bool y if SOC_IMX21 || SOC_IMX27
-
-config SPI_IMX_VER_0_4
- def_bool y if SOC_IMX31
-
-config SPI_IMX_VER_0_7
- def_bool y if ARCH_MX25 || SOC_IMX35 || SOC_IMX51 || SOC_IMX53
-
-config SPI_IMX_VER_2_3
- def_bool y if SOC_IMX51 || SOC_IMX53
-
config SPI_IMX
tristate "Freescale i.MX SPI controllers"
depends on ARCH_MXC
@@ -328,16 +298,6 @@ config SPI_S3C24XX_FIQ
no free DMA channels, or when doing transfers that required both
TX and RX data paths.
-config SPI_S3C24XX_GPIO
- tristate "Samsung S3C24XX series SPI by GPIO"
- depends on ARCH_S3C2410 && EXPERIMENTAL
- select SPI_BITBANG
- help
- SPI driver for Samsung S3C24XX series ARM SoCs using
- GPIO lines to provide the SPI bus. This can be used where
- the inbuilt hardware cannot provide the transfer mode, or
- where the board is using non hardware connected pins.
-
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
depends on (ARCH_S3C64XX || ARCH_S5P64X0)
@@ -385,16 +345,16 @@ config SPI_TI_SSP
This selects an SPI master implementation using a TI sequencer
serial port.
- To compile this driver as a module, choose M here: the
- module will be called ti-ssp-spi.
-
config SPI_TOPCLIFF_PCH
- tristate "Topcliff PCH SPI Controller"
+ tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI controller"
depends on PCI
help
SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
used in some x86 embedded processors.
+ This driver also supports the ML7213, a companion chip for the
+ Atom E6xx series and compatible with the Intel EG20T PCH.
+
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
depends on GENERIC_GPIO && CPU_TX49XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 0f8c69b6b19..61c3261c388 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -7,68 +7,55 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
# small core, mostly translating board-specific
# config declarations into driver model code
obj-$(CONFIG_SPI_MASTER) += spi.o
+obj-$(CONFIG_SPI_SPIDEV) += spidev.o
# SPI master controller drivers (bus)
-obj-$(CONFIG_SPI_ALTERA) += spi_altera.o
-obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o
-obj-$(CONFIG_SPI_ATH79) += ath79_spi.o
-obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
-obj-$(CONFIG_SPI_BFIN_SPORT) += spi_bfin_sport.o
-obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
-obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
-obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
-obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o
-obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o
-obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
-obj-$(CONFIG_SPI_DW_PCI) += dw_spi_midpci.o
-dw_spi_midpci-objs := dw_spi_pci.o dw_spi_mid.o
-obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o
-obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o
-obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
-obj-$(CONFIG_SPI_IMX) += spi_imx.o
-obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
-obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
-obj-$(CONFIG_SPI_PXA2XX_PCI) += pxa2xx_spi_pci.o
-obj-$(CONFIG_SPI_OC_TINY) += spi_oc_tiny.o
-obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
-obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
-obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o
-obj-$(CONFIG_SPI_ORION) += orion_spi.o
-obj-$(CONFIG_SPI_PL022) += amba-pl022.o
-obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o
-obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
-obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
-obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o
-obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o
-obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o
-obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
-obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
-obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
-obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
-obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o
-obj-$(CONFIG_SPI_TI_SSP) += ti-ssp-spi.o
-obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o
-obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
-obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
-obj-$(CONFIG_SPI_SH) += spi_sh.o
-obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
-obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
-obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
-obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
+obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
+obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
+obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
+obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
+obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o
+obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
+obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
+obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
+obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
+obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
+obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
+obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
+obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
+spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o
+obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
+obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
+obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
+obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
+obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
+obj-$(CONFIG_SPI_IMX) += spi-imx.o
+obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
+obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
+obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
+obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
+obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
+obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
+obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
+obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
+obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
+obj-$(CONFIG_SPI_ORION) += spi-orion.o
+obj-$(CONFIG_SPI_PL022) += spi-pl022.o
+obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
+obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o
+obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
+obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
+spi-s3c24xx-hw-y := spi-s3c24xx.o
+spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o
+obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o
+obj-$(CONFIG_SPI_SH) += spi-sh.o
+obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
+obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
+obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o
+obj-$(CONFIG_SPI_TEGRA) += spi-tegra.o
+obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
+obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
+obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
+obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
+obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
-# special build for s3c24xx spi driver with fiq support
-spi_s3c24xx_hw-y := spi_s3c24xx.o
-spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o
-
-# ... add above this line ...
-
-# SPI protocol drivers (device/link on bus)
-obj-$(CONFIG_SPI_SPIDEV) += spidev.o
-obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o
-# ... add above this line ...
-
-# SPI slave controller drivers (upstream link)
-# ... add above this line ...
-
-# SPI slave drivers (protocol for that link)
-# ... add above this line ...
diff --git a/drivers/spi/atmel_spi.h b/drivers/spi/atmel_spi.h
deleted file mode 100644
index 6e06b6ad3a4..00000000000
--- a/drivers/spi/atmel_spi.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Register definitions for Atmel Serial Peripheral Interface (SPI)
- *
- * Copyright (C) 2006 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ATMEL_SPI_H__
-#define __ATMEL_SPI_H__
-
-/* SPI register offsets */
-#define SPI_CR 0x0000
-#define SPI_MR 0x0004
-#define SPI_RDR 0x0008
-#define SPI_TDR 0x000c
-#define SPI_SR 0x0010
-#define SPI_IER 0x0014
-#define SPI_IDR 0x0018
-#define SPI_IMR 0x001c
-#define SPI_CSR0 0x0030
-#define SPI_CSR1 0x0034
-#define SPI_CSR2 0x0038
-#define SPI_CSR3 0x003c
-#define SPI_RPR 0x0100
-#define SPI_RCR 0x0104
-#define SPI_TPR 0x0108
-#define SPI_TCR 0x010c
-#define SPI_RNPR 0x0110
-#define SPI_RNCR 0x0114
-#define SPI_TNPR 0x0118
-#define SPI_TNCR 0x011c
-#define SPI_PTCR 0x0120
-#define SPI_PTSR 0x0124
-
-/* Bitfields in CR */
-#define SPI_SPIEN_OFFSET 0
-#define SPI_SPIEN_SIZE 1
-#define SPI_SPIDIS_OFFSET 1
-#define SPI_SPIDIS_SIZE 1
-#define SPI_SWRST_OFFSET 7
-#define SPI_SWRST_SIZE 1
-#define SPI_LASTXFER_OFFSET 24
-#define SPI_LASTXFER_SIZE 1
-
-/* Bitfields in MR */
-#define SPI_MSTR_OFFSET 0
-#define SPI_MSTR_SIZE 1
-#define SPI_PS_OFFSET 1
-#define SPI_PS_SIZE 1
-#define SPI_PCSDEC_OFFSET 2
-#define SPI_PCSDEC_SIZE 1
-#define SPI_FDIV_OFFSET 3
-#define SPI_FDIV_SIZE 1
-#define SPI_MODFDIS_OFFSET 4
-#define SPI_MODFDIS_SIZE 1
-#define SPI_LLB_OFFSET 7
-#define SPI_LLB_SIZE 1
-#define SPI_PCS_OFFSET 16
-#define SPI_PCS_SIZE 4
-#define SPI_DLYBCS_OFFSET 24
-#define SPI_DLYBCS_SIZE 8
-
-/* Bitfields in RDR */
-#define SPI_RD_OFFSET 0
-#define SPI_RD_SIZE 16
-
-/* Bitfields in TDR */
-#define SPI_TD_OFFSET 0
-#define SPI_TD_SIZE 16
-
-/* Bitfields in SR */
-#define SPI_RDRF_OFFSET 0
-#define SPI_RDRF_SIZE 1
-#define SPI_TDRE_OFFSET 1
-#define SPI_TDRE_SIZE 1
-#define SPI_MODF_OFFSET 2
-#define SPI_MODF_SIZE 1
-#define SPI_OVRES_OFFSET 3
-#define SPI_OVRES_SIZE 1
-#define SPI_ENDRX_OFFSET 4
-#define SPI_ENDRX_SIZE 1
-#define SPI_ENDTX_OFFSET 5
-#define SPI_ENDTX_SIZE 1
-#define SPI_RXBUFF_OFFSET 6
-#define SPI_RXBUFF_SIZE 1
-#define SPI_TXBUFE_OFFSET 7
-#define SPI_TXBUFE_SIZE 1
-#define SPI_NSSR_OFFSET 8
-#define SPI_NSSR_SIZE 1
-#define SPI_TXEMPTY_OFFSET 9
-#define SPI_TXEMPTY_SIZE 1
-#define SPI_SPIENS_OFFSET 16
-#define SPI_SPIENS_SIZE 1
-
-/* Bitfields in CSR0 */
-#define SPI_CPOL_OFFSET 0
-#define SPI_CPOL_SIZE 1
-#define SPI_NCPHA_OFFSET 1
-#define SPI_NCPHA_SIZE 1
-#define SPI_CSAAT_OFFSET 3
-#define SPI_CSAAT_SIZE 1
-#define SPI_BITS_OFFSET 4
-#define SPI_BITS_SIZE 4
-#define SPI_SCBR_OFFSET 8
-#define SPI_SCBR_SIZE 8
-#define SPI_DLYBS_OFFSET 16
-#define SPI_DLYBS_SIZE 8
-#define SPI_DLYBCT_OFFSET 24
-#define SPI_DLYBCT_SIZE 8
-
-/* Bitfields in RCR */
-#define SPI_RXCTR_OFFSET 0
-#define SPI_RXCTR_SIZE 16
-
-/* Bitfields in TCR */
-#define SPI_TXCTR_OFFSET 0
-#define SPI_TXCTR_SIZE 16
-
-/* Bitfields in RNCR */
-#define SPI_RXNCR_OFFSET 0
-#define SPI_RXNCR_SIZE 16
-
-/* Bitfields in TNCR */
-#define SPI_TXNCR_OFFSET 0
-#define SPI_TXNCR_SIZE 16
-
-/* Bitfields in PTCR */
-#define SPI_RXTEN_OFFSET 0
-#define SPI_RXTEN_SIZE 1
-#define SPI_RXTDIS_OFFSET 1
-#define SPI_RXTDIS_SIZE 1
-#define SPI_TXTEN_OFFSET 8
-#define SPI_TXTEN_SIZE 1
-#define SPI_TXTDIS_OFFSET 9
-#define SPI_TXTDIS_SIZE 1
-
-/* Constants for BITS */
-#define SPI_BITS_8_BPT 0
-#define SPI_BITS_9_BPT 1
-#define SPI_BITS_10_BPT 2
-#define SPI_BITS_11_BPT 3
-#define SPI_BITS_12_BPT 4
-#define SPI_BITS_13_BPT 5
-#define SPI_BITS_14_BPT 6
-#define SPI_BITS_15_BPT 7
-#define SPI_BITS_16_BPT 8
-
-/* Bit manipulation macros */
-#define SPI_BIT(name) \
- (1 << SPI_##name##_OFFSET)
-#define SPI_BF(name,value) \
- (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
-#define SPI_BFEXT(name,value) \
- (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
-#define SPI_BFINS(name,value,old) \
- ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
- | SPI_BF(name,value))
-
-/* Register access macros */
-#define spi_readl(port,reg) \
- __raw_readl((port)->regs + SPI_##reg)
-#define spi_writel(port,reg,value) \
- __raw_writel((value), (port)->regs + SPI_##reg)
-
-#endif /* __ATMEL_SPI_H__ */
diff --git a/drivers/spi/spi_altera.c b/drivers/spi/spi-altera.c
index 4813a63ce6f..4813a63ce6f 100644
--- a/drivers/spi/spi_altera.c
+++ b/drivers/spi/spi-altera.c
diff --git a/drivers/spi/ath79_spi.c b/drivers/spi/spi-ath79.c
index fcff810ea3b..03019bf5a5e 100644
--- a/drivers/spi/ath79_spi.c
+++ b/drivers/spi/spi-ath79.c
@@ -232,7 +232,7 @@ static __devinit int ath79_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
- sp->base = ioremap(r->start, r->end - r->start + 1);
+ sp->base = ioremap(r->start, resource_size(r));
if (!sp->base) {
ret = -ENXIO;
goto err_put_master;
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/spi-atmel.c
index 08711e9202a..82dee9a6c0d 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/spi-atmel.c
@@ -25,7 +25,160 @@
#include <mach/gpio.h>
#include <mach/cpu.h>
-#include "atmel_spi.h"
+/* SPI register offsets */
+#define SPI_CR 0x0000
+#define SPI_MR 0x0004
+#define SPI_RDR 0x0008
+#define SPI_TDR 0x000c
+#define SPI_SR 0x0010
+#define SPI_IER 0x0014
+#define SPI_IDR 0x0018
+#define SPI_IMR 0x001c
+#define SPI_CSR0 0x0030
+#define SPI_CSR1 0x0034
+#define SPI_CSR2 0x0038
+#define SPI_CSR3 0x003c
+#define SPI_RPR 0x0100
+#define SPI_RCR 0x0104
+#define SPI_TPR 0x0108
+#define SPI_TCR 0x010c
+#define SPI_RNPR 0x0110
+#define SPI_RNCR 0x0114
+#define SPI_TNPR 0x0118
+#define SPI_TNCR 0x011c
+#define SPI_PTCR 0x0120
+#define SPI_PTSR 0x0124
+
+/* Bitfields in CR */
+#define SPI_SPIEN_OFFSET 0
+#define SPI_SPIEN_SIZE 1
+#define SPI_SPIDIS_OFFSET 1
+#define SPI_SPIDIS_SIZE 1
+#define SPI_SWRST_OFFSET 7
+#define SPI_SWRST_SIZE 1
+#define SPI_LASTXFER_OFFSET 24
+#define SPI_LASTXFER_SIZE 1
+
+/* Bitfields in MR */
+#define SPI_MSTR_OFFSET 0
+#define SPI_MSTR_SIZE 1
+#define SPI_PS_OFFSET 1
+#define SPI_PS_SIZE 1
+#define SPI_PCSDEC_OFFSET 2
+#define SPI_PCSDEC_SIZE 1
+#define SPI_FDIV_OFFSET 3
+#define SPI_FDIV_SIZE 1
+#define SPI_MODFDIS_OFFSET 4
+#define SPI_MODFDIS_SIZE 1
+#define SPI_LLB_OFFSET 7
+#define SPI_LLB_SIZE 1
+#define SPI_PCS_OFFSET 16
+#define SPI_PCS_SIZE 4
+#define SPI_DLYBCS_OFFSET 24
+#define SPI_DLYBCS_SIZE 8
+
+/* Bitfields in RDR */
+#define SPI_RD_OFFSET 0
+#define SPI_RD_SIZE 16
+
+/* Bitfields in TDR */
+#define SPI_TD_OFFSET 0
+#define SPI_TD_SIZE 16
+
+/* Bitfields in SR */
+#define SPI_RDRF_OFFSET 0
+#define SPI_RDRF_SIZE 1
+#define SPI_TDRE_OFFSET 1
+#define SPI_TDRE_SIZE 1
+#define SPI_MODF_OFFSET 2
+#define SPI_MODF_SIZE 1
+#define SPI_OVRES_OFFSET 3
+#define SPI_OVRES_SIZE 1
+#define SPI_ENDRX_OFFSET 4
+#define SPI_ENDRX_SIZE 1
+#define SPI_ENDTX_OFFSET 5
+#define SPI_ENDTX_SIZE 1
+#define SPI_RXBUFF_OFFSET 6
+#define SPI_RXBUFF_SIZE 1
+#define SPI_TXBUFE_OFFSET 7
+#define SPI_TXBUFE_SIZE 1
+#define SPI_NSSR_OFFSET 8
+#define SPI_NSSR_SIZE 1
+#define SPI_TXEMPTY_OFFSET 9
+#define SPI_TXEMPTY_SIZE 1
+#define SPI_SPIENS_OFFSET 16
+#define SPI_SPIENS_SIZE 1
+
+/* Bitfields in CSR0 */
+#define SPI_CPOL_OFFSET 0
+#define SPI_CPOL_SIZE 1
+#define SPI_NCPHA_OFFSET 1
+#define SPI_NCPHA_SIZE 1
+#define SPI_CSAAT_OFFSET 3
+#define SPI_CSAAT_SIZE 1
+#define SPI_BITS_OFFSET 4
+#define SPI_BITS_SIZE 4
+#define SPI_SCBR_OFFSET 8
+#define SPI_SCBR_SIZE 8
+#define SPI_DLYBS_OFFSET 16
+#define SPI_DLYBS_SIZE 8
+#define SPI_DLYBCT_OFFSET 24
+#define SPI_DLYBCT_SIZE 8
+
+/* Bitfields in RCR */
+#define SPI_RXCTR_OFFSET 0
+#define SPI_RXCTR_SIZE 16
+
+/* Bitfields in TCR */
+#define SPI_TXCTR_OFFSET 0
+#define SPI_TXCTR_SIZE 16
+
+/* Bitfields in RNCR */
+#define SPI_RXNCR_OFFSET 0
+#define SPI_RXNCR_SIZE 16
+
+/* Bitfields in TNCR */
+#define SPI_TXNCR_OFFSET 0
+#define SPI_TXNCR_SIZE 16
+
+/* Bitfields in PTCR */
+#define SPI_RXTEN_OFFSET 0
+#define SPI_RXTEN_SIZE 1
+#define SPI_RXTDIS_OFFSET 1
+#define SPI_RXTDIS_SIZE 1
+#define SPI_TXTEN_OFFSET 8
+#define SPI_TXTEN_SIZE 1
+#define SPI_TXTDIS_OFFSET 9
+#define SPI_TXTDIS_SIZE 1
+
+/* Constants for BITS */
+#define SPI_BITS_8_BPT 0
+#define SPI_BITS_9_BPT 1
+#define SPI_BITS_10_BPT 2
+#define SPI_BITS_11_BPT 3
+#define SPI_BITS_12_BPT 4
+#define SPI_BITS_13_BPT 5
+#define SPI_BITS_14_BPT 6
+#define SPI_BITS_15_BPT 7
+#define SPI_BITS_16_BPT 8
+
+/* Bit manipulation macros */
+#define SPI_BIT(name) \
+ (1 << SPI_##name##_OFFSET)
+#define SPI_BF(name,value) \
+ (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
+#define SPI_BFEXT(name,value) \
+ (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
+#define SPI_BFINS(name,value,old) \
+ ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
+ | SPI_BF(name,value))
+
+/* Register access macros */
+#define spi_readl(port,reg) \
+ __raw_readl((port)->regs + SPI_##reg)
+#define spi_writel(port,reg,value) \
+ __raw_writel((value), (port)->regs + SPI_##reg)
+
/*
* The core SPI transfer engine just talks to a register bank to set up
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/spi-au1550.c
index b50563d320e..bddee5f516b 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/spi-au1550.c
@@ -1,5 +1,5 @@
/*
- * au1550_spi.c - au1550 psc spi controller driver
+ * au1550 psc spi controller driver
* may work also with au1200, au1210, au1250
* will not work on au1000, au1100 and au1500 (no full spi controller there)
*
diff --git a/drivers/spi/spi_bfin_sport.c b/drivers/spi/spi-bfin-sport.c
index e557ff617b1..e557ff617b1 100644
--- a/drivers/spi/spi_bfin_sport.c
+++ b/drivers/spi/spi-bfin-sport.c
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index cc880c95e7d..b8d25f2b703 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -58,7 +58,7 @@ struct bfin_spi_master_data {
struct spi_master *master;
/* Regs base of SPI controller */
- void __iomem *regs_base;
+ struct bfin_spi_regs __iomem *regs;
/* Pin request list */
u16 *pin_req;
@@ -122,34 +122,14 @@ struct bfin_spi_slave_data {
const struct bfin_spi_transfer_ops *ops;
};
-#define DEFINE_SPI_REG(reg, off) \
-static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \
- { return bfin_read16(drv_data->regs_base + off); } \
-static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \
- { bfin_write16(drv_data->regs_base + off, v); }
-
-DEFINE_SPI_REG(CTRL, 0x00)
-DEFINE_SPI_REG(FLAG, 0x04)
-DEFINE_SPI_REG(STAT, 0x08)
-DEFINE_SPI_REG(TDBR, 0x0C)
-DEFINE_SPI_REG(RDBR, 0x10)
-DEFINE_SPI_REG(BAUD, 0x14)
-DEFINE_SPI_REG(SHAW, 0x18)
-
static void bfin_spi_enable(struct bfin_spi_master_data *drv_data)
{
- u16 cr;
-
- cr = read_CTRL(drv_data);
- write_CTRL(drv_data, (cr | BIT_CTL_ENABLE));
+ bfin_write_or(&drv_data->regs->ctl, BIT_CTL_ENABLE);
}
static void bfin_spi_disable(struct bfin_spi_master_data *drv_data)
{
- u16 cr;
-
- cr = read_CTRL(drv_data);
- write_CTRL(drv_data, (cr & (~BIT_CTL_ENABLE)));
+ bfin_write_and(&drv_data->regs->ctl, ~BIT_CTL_ENABLE);
}
/* Caculate the SPI_BAUD register value based on input HZ */
@@ -172,10 +152,10 @@ static int bfin_spi_flush(struct bfin_spi_master_data *drv_data)
unsigned long limit = loops_per_jiffy << 1;
/* wait for stop and clear stat */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && --limit)
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF) && --limit)
cpu_relax();
- write_STAT(drv_data, BIT_STAT_CLR);
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
return limit;
}
@@ -183,29 +163,19 @@ static int bfin_spi_flush(struct bfin_spi_master_data *drv_data)
/* Chip select operation functions for cs_change flag */
static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip)
{
- if (likely(chip->chip_select_num < MAX_CTRL_CS)) {
- u16 flag = read_FLAG(drv_data);
-
- flag &= ~chip->flag;
-
- write_FLAG(drv_data, flag);
- } else {
+ if (likely(chip->chip_select_num < MAX_CTRL_CS))
+ bfin_write_and(&drv_data->regs->flg, ~chip->flag);
+ else
gpio_set_value(chip->cs_gpio, 0);
- }
}
static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data,
struct bfin_spi_slave_data *chip)
{
- if (likely(chip->chip_select_num < MAX_CTRL_CS)) {
- u16 flag = read_FLAG(drv_data);
-
- flag |= chip->flag;
-
- write_FLAG(drv_data, flag);
- } else {
+ if (likely(chip->chip_select_num < MAX_CTRL_CS))
+ bfin_write_or(&drv_data->regs->flg, chip->flag);
+ else
gpio_set_value(chip->cs_gpio, 1);
- }
/* Move delay here for consistency */
if (chip->cs_chg_udelay)
@@ -216,25 +186,15 @@ static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data,
static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data,
struct bfin_spi_slave_data *chip)
{
- if (chip->chip_select_num < MAX_CTRL_CS) {
- u16 flag = read_FLAG(drv_data);
-
- flag |= (chip->flag >> 8);
-
- write_FLAG(drv_data, flag);
- }
+ if (chip->chip_select_num < MAX_CTRL_CS)
+ bfin_write_or(&drv_data->regs->flg, chip->flag >> 8);
}
static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data,
struct bfin_spi_slave_data *chip)
{
- if (chip->chip_select_num < MAX_CTRL_CS) {
- u16 flag = read_FLAG(drv_data);
-
- flag &= ~(chip->flag >> 8);
-
- write_FLAG(drv_data, flag);
- }
+ if (chip->chip_select_num < MAX_CTRL_CS)
+ bfin_write_and(&drv_data->regs->flg, ~(chip->flag >> 8));
}
/* stop controller and re-config current chip*/
@@ -243,15 +203,15 @@ static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data)
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
/* Clear status and disable clock */
- write_STAT(drv_data, BIT_STAT_CLR);
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
bfin_spi_disable(drv_data);
dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
SSYNC();
/* Load the registers */
- write_CTRL(drv_data, chip->ctl_reg);
- write_BAUD(drv_data, chip->baud);
+ bfin_write(&drv_data->regs->ctl, chip->ctl_reg);
+ bfin_write(&drv_data->regs->baud, chip->baud);
bfin_spi_enable(drv_data);
bfin_spi_cs_active(drv_data, chip);
@@ -260,7 +220,7 @@ static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data)
/* used to kick off transfer in rx mode and read unwanted RX data */
static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data)
{
- (void) read_RDBR(drv_data);
+ (void) bfin_read(&drv_data->regs->rdbr);
}
static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data)
@@ -269,10 +229,10 @@ static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->tx < drv_data->tx_end) {
- write_TDBR(drv_data, (*(u8 *) (drv_data->tx++)));
+ bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++)));
/* wait until transfer finished.
checking SPIF or TXS may not guarantee transfer completion */
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
/* discard RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
@@ -287,10 +247,10 @@ static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
- write_TDBR(drv_data, tx_val);
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ bfin_write(&drv_data->regs->tdbr, tx_val);
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
- *(u8 *) (drv_data->rx++) = read_RDBR(drv_data);
+ *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr);
}
}
@@ -300,10 +260,10 @@ static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
- write_TDBR(drv_data, (*(u8 *) (drv_data->tx++)));
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++)));
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
- *(u8 *) (drv_data->rx++) = read_RDBR(drv_data);
+ *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr);
}
}
@@ -319,11 +279,11 @@ static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->tx < drv_data->tx_end) {
- write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
+ bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx)));
drv_data->tx += 2;
/* wait until transfer finished.
checking SPIF or TXS may not guarantee transfer completion */
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
/* discard RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
@@ -338,10 +298,10 @@ static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
- write_TDBR(drv_data, tx_val);
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ bfin_write(&drv_data->regs->tdbr, tx_val);
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
- *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
+ *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr);
drv_data->rx += 2;
}
}
@@ -352,11 +312,11 @@ static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
- write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
+ bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx)));
drv_data->tx += 2;
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
- *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
+ *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr);
drv_data->rx += 2;
}
}
@@ -428,7 +388,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
int loop = 0;
/* wait until transfer finished. */
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) ||
@@ -439,11 +399,11 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
if (n_bytes % 2) {
u16 *buf = (u16 *)drv_data->rx;
for (loop = 0; loop < n_bytes / 2; loop++)
- *buf++ = read_RDBR(drv_data);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
} else {
u8 *buf = (u8 *)drv_data->rx;
for (loop = 0; loop < n_bytes; loop++)
- *buf++ = read_RDBR(drv_data);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
}
drv_data->rx += n_bytes;
}
@@ -468,15 +428,15 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
u16 *buf = (u16 *)drv_data->rx;
u16 *buf2 = (u16 *)drv_data->tx;
for (loop = 0; loop < n_bytes / 2; loop++) {
- *buf++ = read_RDBR(drv_data);
- write_TDBR(drv_data, *buf2++);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf2++);
}
} else {
u8 *buf = (u8 *)drv_data->rx;
u8 *buf2 = (u8 *)drv_data->tx;
for (loop = 0; loop < n_bytes; loop++) {
- *buf++ = read_RDBR(drv_data);
- write_TDBR(drv_data, *buf2++);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf2++);
}
}
} else if (drv_data->rx) {
@@ -485,14 +445,14 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
if (n_bytes % 2) {
u16 *buf = (u16 *)drv_data->rx;
for (loop = 0; loop < n_bytes / 2; loop++) {
- *buf++ = read_RDBR(drv_data);
- write_TDBR(drv_data, chip->idle_tx_val);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
}
} else {
u8 *buf = (u8 *)drv_data->rx;
for (loop = 0; loop < n_bytes; loop++) {
- *buf++ = read_RDBR(drv_data);
- write_TDBR(drv_data, chip->idle_tx_val);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
}
}
} else if (drv_data->tx) {
@@ -501,14 +461,14 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
if (n_bytes % 2) {
u16 *buf = (u16 *)drv_data->tx;
for (loop = 0; loop < n_bytes / 2; loop++) {
- read_RDBR(drv_data);
- write_TDBR(drv_data, *buf++);
+ bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf++);
}
} else {
u8 *buf = (u8 *)drv_data->tx;
for (loop = 0; loop < n_bytes; loop++) {
- read_RDBR(drv_data);
- write_TDBR(drv_data, *buf++);
+ bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf++);
}
}
}
@@ -528,19 +488,19 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
struct spi_message *msg = drv_data->cur_msg;
unsigned long timeout;
unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel);
- u16 spistat = read_STAT(drv_data);
+ u16 spistat = bfin_read(&drv_data->regs->stat);
dev_dbg(&drv_data->pdev->dev,
"in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
dmastat, spistat);
if (drv_data->rx != NULL) {
- u16 cr = read_CTRL(drv_data);
+ u16 cr = bfin_read(&drv_data->regs->ctl);
/* discard old RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
- write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */
- write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */
- write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */
+ bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_ENABLE); /* Disable SPI */
+ bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_TIMOD); /* Restore State */
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); /* Clear Status */
}
clear_dma_irqstat(drv_data->dma_channel);
@@ -552,17 +512,17 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
* register until it goes low for 2 successive reads
*/
if (drv_data->tx != NULL) {
- while ((read_STAT(drv_data) & BIT_STAT_TXS) ||
- (read_STAT(drv_data) & BIT_STAT_TXS))
+ while ((bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS) ||
+ (bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS))
cpu_relax();
}
dev_dbg(&drv_data->pdev->dev,
"in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
- dmastat, read_STAT(drv_data));
+ dmastat, bfin_read(&drv_data->regs->stat));
timeout = jiffies + HZ;
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF))
if (!time_before(jiffies, timeout)) {
dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF");
break;
@@ -699,9 +659,9 @@ static void bfin_spi_pump_transfers(unsigned long data)
bfin_spi_giveback(drv_data);
return;
}
- cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE);
+ cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE);
cr |= cr_width;
- write_CTRL(drv_data, cr);
+ bfin_write(&drv_data->regs->ctl, cr);
dev_dbg(&drv_data->pdev->dev,
"transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n",
@@ -712,11 +672,11 @@ static void bfin_spi_pump_transfers(unsigned long data)
/* Speed setup (surely valid because already checked) */
if (transfer->speed_hz)
- write_BAUD(drv_data, hz_to_spi_baud(transfer->speed_hz));
+ bfin_write(&drv_data->regs->baud, hz_to_spi_baud(transfer->speed_hz));
else
- write_BAUD(drv_data, chip->baud);
+ bfin_write(&drv_data->regs->baud, chip->baud);
- write_STAT(drv_data, BIT_STAT_CLR);
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
bfin_spi_cs_active(drv_data, chip);
dev_dbg(&drv_data->pdev->dev,
@@ -749,7 +709,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
}
/* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF))
cpu_relax();
/* dirty hack for autobuffer DMA mode */
@@ -766,7 +726,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
enable_dma(drv_data->dma_channel);
/* start SPI transfer */
- write_CTRL(drv_data, cr | BIT_CTL_TIMOD_DMA_TX);
+ bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TIMOD_DMA_TX);
/* just return here, there can only be one transfer
* in this mode
@@ -821,7 +781,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
set_dma_config(drv_data->dma_channel, dma_config);
local_irq_save(flags);
SSYNC();
- write_CTRL(drv_data, cr);
+ bfin_write(&drv_data->regs->ctl, cr);
enable_dma(drv_data->dma_channel);
dma_enable_irq(drv_data->dma_channel);
local_irq_restore(flags);
@@ -835,7 +795,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
* problems with setting up the output value in TDBR prior to the
* start of the transfer.
*/
- write_CTRL(drv_data, cr | BIT_CTL_TXMOD);
+ bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TXMOD);
if (chip->pio_interrupt) {
/* SPI irq should have been disabled by now */
@@ -845,19 +805,19 @@ static void bfin_spi_pump_transfers(unsigned long data)
/* start transfer */
if (drv_data->tx == NULL)
- write_TDBR(drv_data, chip->idle_tx_val);
+ bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
else {
int loop;
if (bits_per_word % 16 == 0) {
u16 *buf = (u16 *)drv_data->tx;
for (loop = 0; loop < bits_per_word / 16;
loop++) {
- write_TDBR(drv_data, *buf++);
+ bfin_write(&drv_data->regs->tdbr, *buf++);
}
} else if (bits_per_word % 8 == 0) {
u8 *buf = (u8 *)drv_data->tx;
for (loop = 0; loop < bits_per_word / 8; loop++)
- write_TDBR(drv_data, *buf++);
+ bfin_write(&drv_data->regs->tdbr, *buf++);
}
drv_data->tx += drv_data->n_bytes;
@@ -1005,7 +965,7 @@ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg)
#define MAX_SPI_SSEL 7
-static u16 ssel[][MAX_SPI_SSEL] = {
+static const u16 ssel[][MAX_SPI_SSEL] = {
{P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
P_SPI0_SSEL4, P_SPI0_SSEL5,
P_SPI0_SSEL6, P_SPI0_SSEL7},
@@ -1226,7 +1186,7 @@ static void bfin_spi_cleanup(struct spi_device *spi)
spi_set_ctldata(spi, NULL);
}
-static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
+static int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
{
INIT_LIST_HEAD(&drv_data->queue);
spin_lock_init(&drv_data->lock);
@@ -1248,7 +1208,7 @@ static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
return 0;
}
-static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
+static int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
{
unsigned long flags;
@@ -1270,7 +1230,7 @@ static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
return 0;
}
-static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
+static int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
{
unsigned long flags;
unsigned limit = 500;
@@ -1299,7 +1259,7 @@ static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
return status;
}
-static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
+static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
{
int status;
@@ -1353,8 +1313,8 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
goto out_error_get_res;
}
- drv_data->regs_base = ioremap(res->start, resource_size(res));
- if (drv_data->regs_base == NULL) {
+ drv_data->regs = ioremap(res->start, resource_size(res));
+ if (drv_data->regs == NULL) {
dev_err(dev, "Cannot map IO\n");
status = -ENXIO;
goto out_error_ioremap;
@@ -1397,8 +1357,8 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
/* Reset SPI registers. If these registers were used by the boot loader,
* the sky may fall on your head if you enable the dma controller.
*/
- write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER);
- write_FLAG(drv_data, 0xFF00);
+ bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER);
+ bfin_write(&drv_data->regs->flg, 0xFF00);
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
@@ -1408,15 +1368,15 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
goto out_error_queue_alloc;
}
- dev_info(dev, "%s, Version %s, regs_base@%p, dma channel@%d\n",
- DRV_DESC, DRV_VERSION, drv_data->regs_base,
+ dev_info(dev, "%s, Version %s, regs@%p, dma channel@%d\n",
+ DRV_DESC, DRV_VERSION, drv_data->regs,
drv_data->dma_channel);
return status;
out_error_queue_alloc:
bfin_spi_destroy_queue(drv_data);
out_error_free_io:
- iounmap((void *) drv_data->regs_base);
+ iounmap(drv_data->regs);
out_error_ioremap:
out_error_get_res:
spi_master_put(master);
@@ -1473,14 +1433,14 @@ static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
if (status != 0)
return status;
- drv_data->ctrl_reg = read_CTRL(drv_data);
- drv_data->flag_reg = read_FLAG(drv_data);
+ drv_data->ctrl_reg = bfin_read(&drv_data->regs->ctl);
+ drv_data->flag_reg = bfin_read(&drv_data->regs->flg);
/*
* reset SPI_CTL and SPI_FLG registers
*/
- write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER);
- write_FLAG(drv_data, 0xFF00);
+ bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER);
+ bfin_write(&drv_data->regs->flg, 0xFF00);
return 0;
}
@@ -1490,8 +1450,8 @@ static int bfin_spi_resume(struct platform_device *pdev)
struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
int status = 0;
- write_CTRL(drv_data, drv_data->ctrl_reg);
- write_FLAG(drv_data, drv_data->flag_reg);
+ bfin_write(&drv_data->regs->ctl, drv_data->ctrl_reg);
+ bfin_write(&drv_data->regs->flg, drv_data->flag_reg);
/* Start the queue running */
status = bfin_spi_start_queue(drv_data);
diff --git a/drivers/spi/spi_bitbang_txrx.h b/drivers/spi/spi-bitbang-txrx.h
index c16bf853c3e..c16bf853c3e 100644
--- a/drivers/spi/spi_bitbang_txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi-bitbang.c
index 14a63f6010d..02d57fbba29 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -1,5 +1,5 @@
/*
- * spi_bitbang.c - polling/bitbanging SPI master controller driver utilities
+ * polling/bitbanging SPI master controller driver utilities
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ static unsigned bitbang_txrx_8(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = spi->bits_per_word;
+ unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned count = t->len;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
@@ -94,7 +94,7 @@ static unsigned bitbang_txrx_16(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = spi->bits_per_word;
+ unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned count = t->len;
const u16 *tx = t->tx_buf;
u16 *rx = t->rx_buf;
@@ -120,7 +120,7 @@ static unsigned bitbang_txrx_32(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = spi->bits_per_word;
+ unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned count = t->len;
const u32 *tx = t->tx_buf;
u32 *rx = t->rx_buf;
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi-butterfly.c
index 0d4ceba3b59..9f907ec52de 100644
--- a/drivers/spi/spi_butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -1,5 +1,5 @@
/*
- * spi_butterfly.c - parport-to-butterfly adapter
+ * parport-to-butterfly adapter
*
* Copyright (C) 2005 David Brownell
*
@@ -149,7 +149,7 @@ static void butterfly_chipselect(struct spi_device *spi, int value)
#define spidelay(X) do{}while(0)
//#define spidelay ndelay
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
static u32
butterfly_txrx_word_mode0(struct spi_device *spi,
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/spi-coldfire-qspi.c
index ae2cd1c1fda..ae2cd1c1fda 100644
--- a/drivers/spi/coldfire_qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/spi-davinci.c
index 1f0ed8005c9..1f0ed8005c9 100644
--- a/drivers/spi/davinci_spi.c
+++ b/drivers/spi/spi-davinci.c
diff --git a/drivers/spi/dw_spi_mid.c b/drivers/spi/spi-dw-mid.c
index 489178243d8..130e55537db 100644
--- a/drivers/spi/dw_spi_mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -1,5 +1,5 @@
/*
- * dw_spi_mid.c - special handling for DW core on Intel MID platform
+ * Special handling for DW core on Intel MID platform
*
* Copyright (c) 2009, Intel Corporation.
*
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include "dw_spi.h"
+#include "spi-dw.h"
#ifdef CONFIG_SPI_DW_MID_DMA
#include <linux/intel_mid_dma.h>
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/spi-dw-mmio.c
index e0e813dad15..34eb66501db 100644
--- a/drivers/spi/dw_spi_mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -1,5 +1,5 @@
/*
- * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core
+ * Memory-mapped interface driver for DW SPI Core
*
* Copyright (c) 2010, Octasic semiconductor.
*
@@ -16,7 +16,7 @@
#include <linux/spi/spi.h>
#include <linux/scatterlist.h>
-#include "dw_spi.h"
+#include "spi-dw.h"
#define DRIVER_NAME "dw_spi_mmio"
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/spi-dw-pci.c
index ad260aa5e52..c5f37f03ac8 100644
--- a/drivers/spi/dw_spi_pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -1,5 +1,5 @@
/*
- * dw_spi_pci.c - PCI interface driver for DW SPI Core
+ * PCI interface driver for DW SPI Core
*
* Copyright (c) 2009, Intel Corporation.
*
@@ -22,7 +22,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include "dw_spi.h"
+#include "spi-dw.h"
#define DRIVER_NAME "dw_spi_pci"
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/spi-dw.c
index 919fa9d9e16..857cd30b44b 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/spi-dw.c
@@ -1,5 +1,5 @@
/*
- * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
+ * Designware SPI core controller driver (refer pxa2xx_spi.c)
*
* Copyright (c) 2009, Intel Corporation.
*
@@ -24,7 +24,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include "dw_spi.h"
+#include "spi-dw.h"
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
@@ -818,9 +818,11 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
dws->prev_chip = NULL;
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
+ snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
+ dws->bus_num);
ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
- "dw_spi", dws);
+ dws->name, dws);
if (ret < 0) {
dev_err(&master->dev, "can not get IRQ\n");
goto err_free_master;
diff --git a/drivers/spi/dw_spi.h b/drivers/spi/spi-dw.h
index 7a5e78d2a5c..8b7b07bf6c3 100644
--- a/drivers/spi/dw_spi.h
+++ b/drivers/spi/spi-dw.h
@@ -96,6 +96,7 @@ struct dw_spi {
struct spi_device *cur_dev;
struct device *parent_dev;
enum dw_ssi_type type;
+ char name[16];
void __iomem *regs;
unsigned long paddr;
diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/spi-ep93xx.c
index d3570071e98..1cf645479bf 100644
--- a/drivers/spi/ep93xx_spi.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -1,7 +1,7 @@
/*
* Driver for Cirrus Logic EP93xx SPI controller.
*
- * Copyright (c) 2010 Mika Westerberg
+ * Copyright (C) 2010-2011 Mika Westerberg
*
* Explicit FIFO handling code was inspired by amba-pl022 driver.
*
@@ -21,13 +21,16 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/scatterlist.h>
#include <linux/spi/spi.h>
+#include <mach/dma.h>
#include <mach/ep93xx_spi.h>
#define SSPCR0 0x0000
@@ -71,6 +74,7 @@
* @pdev: pointer to platform device
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
+ * @sspdr_phys: physical address of the SSPDR register
* @irq: IRQ number used by the driver
* @min_rate: minimum clock rate (in Hz) supported by the controller
* @max_rate: maximum clock rate (in Hz) supported by the controller
@@ -84,6 +88,14 @@
* @rx: current byte in transfer to receive
* @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
* frame decreases this level and sending one frame increases it.
+ * @dma_rx: RX DMA channel
+ * @dma_tx: TX DMA channel
+ * @dma_rx_data: RX parameters passed to the DMA engine
+ * @dma_tx_data: TX parameters passed to the DMA engine
+ * @rx_sgt: sg table for RX transfers
+ * @tx_sgt: sg table for TX transfers
+ * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
+ * the client
*
* This structure holds EP93xx SPI controller specific information. When
* @running is %true, driver accepts transfer requests from protocol drivers.
@@ -100,6 +112,7 @@ struct ep93xx_spi {
const struct platform_device *pdev;
struct clk *clk;
void __iomem *regs_base;
+ unsigned long sspdr_phys;
int irq;
unsigned long min_rate;
unsigned long max_rate;
@@ -112,6 +125,13 @@ struct ep93xx_spi {
size_t tx;
size_t rx;
size_t fifo_level;
+ struct dma_chan *dma_rx;
+ struct dma_chan *dma_tx;
+ struct ep93xx_dma_data dma_rx_data;
+ struct ep93xx_dma_data dma_tx_data;
+ struct sg_table rx_sgt;
+ struct sg_table tx_sgt;
+ void *zeropage;
};
/**
@@ -496,14 +516,195 @@ static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
espi->fifo_level++;
}
- if (espi->rx == t->len) {
- msg->actual_length += t->len;
+ if (espi->rx == t->len)
return 0;
- }
return -EINPROGRESS;
}
+static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
+{
+ /*
+ * Now everything is set up for the current transfer. We prime the TX
+ * FIFO, enable interrupts, and wait for the transfer to complete.
+ */
+ if (ep93xx_spi_read_write(espi)) {
+ ep93xx_spi_enable_interrupts(espi);
+ wait_for_completion(&espi->wait);
+ }
+}
+
+/**
+ * ep93xx_spi_dma_prepare() - prepares a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function configures the DMA, maps the buffer and prepares the DMA
+ * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
+ * in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
+{
+ struct spi_transfer *t = espi->current_msg->state;
+ struct dma_async_tx_descriptor *txd;
+ enum dma_slave_buswidth buswidth;
+ struct dma_slave_config conf;
+ struct scatterlist *sg;
+ struct sg_table *sgt;
+ struct dma_chan *chan;
+ const void *buf, *pbuf;
+ size_t len = t->len;
+ int i, ret, nents;
+
+ if (bits_per_word(espi) > 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = dir;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = espi->dma_rx;
+ buf = t->rx_buf;
+ sgt = &espi->rx_sgt;
+
+ conf.src_addr = espi->sspdr_phys;
+ conf.src_addr_width = buswidth;
+ } else {
+ chan = espi->dma_tx;
+ buf = t->tx_buf;
+ sgt = &espi->tx_sgt;
+
+ conf.dst_addr = espi->sspdr_phys;
+ conf.dst_addr_width = buswidth;
+ }
+
+ ret = dmaengine_slave_config(chan, &conf);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * We need to split the transfer into PAGE_SIZE'd chunks. This is
+ * because we are using @espi->zeropage to provide a zero RX buffer
+ * for the TX transfers and we have only allocated one page for that.
+ *
+ * For performance reasons we allocate a new sg_table only when
+ * needed. Otherwise we will re-use the current one. Eventually the
+ * last sg_table is released in ep93xx_spi_release_dma().
+ */
+
+ nents = DIV_ROUND_UP(len, PAGE_SIZE);
+ if (nents != sgt->nents) {
+ sg_free_table(sgt);
+
+ ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ pbuf = buf;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = min_t(size_t, len, PAGE_SIZE);
+
+ if (buf) {
+ sg_set_page(sg, virt_to_page(pbuf), bytes,
+ offset_in_page(pbuf));
+ } else {
+ sg_set_page(sg, virt_to_page(espi->zeropage),
+ bytes, 0);
+ }
+
+ pbuf += bytes;
+ len -= bytes;
+ }
+
+ if (WARN_ON(len)) {
+ dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents)
+ return ERR_PTR(-ENOMEM);
+
+ txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
+ dir, DMA_CTRL_ACK);
+ if (!txd) {
+ dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+ return ERR_PTR(-ENOMEM);
+ }
+ return txd;
+}
+
+/**
+ * ep93xx_spi_dma_finish() - finishes with a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function finishes with the DMA transfer. After this, the DMA buffer is
+ * unmapped.
+ */
+static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
+ enum dma_data_direction dir)
+{
+ struct dma_chan *chan;
+ struct sg_table *sgt;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = espi->dma_rx;
+ sgt = &espi->rx_sgt;
+ } else {
+ chan = espi->dma_tx;
+ sgt = &espi->tx_sgt;
+ }
+
+ dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+}
+
+static void ep93xx_spi_dma_callback(void *callback_param)
+{
+ complete(callback_param);
+}
+
+static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
+{
+ struct spi_message *msg = espi->current_msg;
+ struct dma_async_tx_descriptor *rxd, *txd;
+
+ rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE);
+ if (IS_ERR(rxd)) {
+ dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
+ msg->status = PTR_ERR(rxd);
+ return;
+ }
+
+ txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE);
+ if (IS_ERR(txd)) {
+ ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+ dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
+ msg->status = PTR_ERR(txd);
+ return;
+ }
+
+ /* We are ready when RX is done */
+ rxd->callback = ep93xx_spi_dma_callback;
+ rxd->callback_param = &espi->wait;
+
+ /* Now submit both descriptors and wait while they finish */
+ dmaengine_submit(rxd);
+ dmaengine_submit(txd);
+
+ dma_async_issue_pending(espi->dma_rx);
+ dma_async_issue_pending(espi->dma_tx);
+
+ wait_for_completion(&espi->wait);
+
+ ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE);
+ ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+}
+
/**
* ep93xx_spi_process_transfer() - processes one SPI transfer
* @espi: ep93xx SPI controller struct
@@ -556,13 +757,14 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
espi->tx = 0;
/*
- * Now everything is set up for the current transfer. We prime the TX
- * FIFO, enable interrupts, and wait for the transfer to complete.
+ * There is no point of setting up DMA for the transfers which will
+ * fit into the FIFO and can be transferred with a single interrupt.
+ * So in these cases we will be using PIO and don't bother for DMA.
*/
- if (ep93xx_spi_read_write(espi)) {
- ep93xx_spi_enable_interrupts(espi);
- wait_for_completion(&espi->wait);
- }
+ if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
+ ep93xx_spi_dma_transfer(espi);
+ else
+ ep93xx_spi_pio_transfer(espi);
/*
* In case of error during transmit, we bail out from processing
@@ -571,6 +773,8 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
if (msg->status)
return;
+ msg->actual_length += t->len;
+
/*
* After this transfer is finished, perform any possible
* post-transfer actions requested by the protocol driver.
@@ -752,6 +956,75 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ if (ep93xx_dma_chan_is_m2p(chan))
+ return false;
+
+ chan->private = filter_param;
+ return true;
+}
+
+static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
+{
+ dma_cap_mask_t mask;
+ int ret;
+
+ espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!espi->zeropage)
+ return -ENOMEM;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ espi->dma_rx_data.port = EP93XX_DMA_SSP;
+ espi->dma_rx_data.direction = DMA_FROM_DEVICE;
+ espi->dma_rx_data.name = "ep93xx-spi-rx";
+
+ espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+ &espi->dma_rx_data);
+ if (!espi->dma_rx) {
+ ret = -ENODEV;
+ goto fail_free_page;
+ }
+
+ espi->dma_tx_data.port = EP93XX_DMA_SSP;
+ espi->dma_tx_data.direction = DMA_TO_DEVICE;
+ espi->dma_tx_data.name = "ep93xx-spi-tx";
+
+ espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+ &espi->dma_tx_data);
+ if (!espi->dma_tx) {
+ ret = -ENODEV;
+ goto fail_release_rx;
+ }
+
+ return 0;
+
+fail_release_rx:
+ dma_release_channel(espi->dma_rx);
+ espi->dma_rx = NULL;
+fail_free_page:
+ free_page((unsigned long)espi->zeropage);
+
+ return ret;
+}
+
+static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
+{
+ if (espi->dma_rx) {
+ dma_release_channel(espi->dma_rx);
+ sg_free_table(&espi->rx_sgt);
+ }
+ if (espi->dma_tx) {
+ dma_release_channel(espi->dma_tx);
+ sg_free_table(&espi->tx_sgt);
+ }
+
+ if (espi->zeropage)
+ free_page((unsigned long)espi->zeropage);
+}
+
static int __init ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -818,6 +1091,7 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
goto fail_put_clock;
}
+ espi->sspdr_phys = res->start + SSPDR;
espi->regs_base = ioremap(res->start, resource_size(res));
if (!espi->regs_base) {
dev_err(&pdev->dev, "failed to map resources\n");
@@ -832,10 +1106,13 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
goto fail_unmap_regs;
}
+ if (info->use_dma && ep93xx_spi_setup_dma(espi))
+ dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
+
espi->wq = create_singlethread_workqueue("ep93xx_spid");
if (!espi->wq) {
dev_err(&pdev->dev, "unable to create workqueue\n");
- goto fail_free_irq;
+ goto fail_free_dma;
}
INIT_WORK(&espi->msg_work, ep93xx_spi_work);
INIT_LIST_HEAD(&espi->msg_queue);
@@ -857,7 +1134,8 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
fail_free_queue:
destroy_workqueue(espi->wq);
-fail_free_irq:
+fail_free_dma:
+ ep93xx_spi_release_dma(espi);
free_irq(espi->irq, espi);
fail_unmap_regs:
iounmap(espi->regs_base);
@@ -901,6 +1179,7 @@ static int __exit ep93xx_spi_remove(struct platform_device *pdev)
}
spin_unlock_irq(&espi->lock);
+ ep93xx_spi_release_dma(espi);
free_irq(espi->irq, espi);
iounmap(espi->regs_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi_fsl_espi.c b/drivers/spi/spi-fsl-espi.c
index 496f895a002..54e499d5f92 100644
--- a/drivers/spi/spi_fsl_espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -22,7 +22,7 @@
#include <linux/err.h>
#include <sysdev/fsl_soc.h>
-#include "spi_fsl_lib.h"
+#include "spi-fsl-lib.h"
/* eSPI Controller registers */
struct fsl_espi_reg {
diff --git a/drivers/spi/spi_fsl_lib.c b/drivers/spi/spi-fsl-lib.c
index ff59f42ae99..2674fad7f68 100644
--- a/drivers/spi/spi_fsl_lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -25,7 +25,7 @@
#include <linux/of_spi.h>
#include <sysdev/fsl_soc.h>
-#include "spi_fsl_lib.h"
+#include "spi-fsl-lib.h"
#define MPC8XXX_SPI_RX_BUF(type) \
void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
diff --git a/drivers/spi/spi_fsl_lib.h b/drivers/spi/spi-fsl-lib.h
index cbe881b9ea7..cbe881b9ea7 100644
--- a/drivers/spi/spi_fsl_lib.h
+++ b/drivers/spi/spi-fsl-lib.h
diff --git a/drivers/spi/spi_fsl_spi.c b/drivers/spi/spi-fsl-spi.c
index 7963c9b4956..d2407558773 100644
--- a/drivers/spi/spi_fsl_spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -37,7 +37,7 @@
#include <asm/cpm.h>
#include <asm/qe.h>
-#include "spi_fsl_lib.h"
+#include "spi-fsl-lib.h"
/* CPM1 and CPM2 are mutually exclusive. */
#ifdef CONFIG_CPM1
@@ -684,7 +684,7 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
- unsigned long spi_base_ofs;
+ void __iomem *spi_base;
unsigned long pram_ofs = -ENOMEM;
/* Can't use of_address_to_resource(), QE muram isn't at 0. */
@@ -702,33 +702,27 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
return pram_ofs;
}
- /* CPM1 and CPM2 pram must be at a fixed addr. */
- if (!iprop || size != sizeof(*iprop) * 4)
- return -ENOMEM;
-
- spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2);
- if (IS_ERR_VALUE(spi_base_ofs))
- return -ENOMEM;
+ spi_base = of_iomap(np, 1);
+ if (spi_base == NULL)
+ return -EINVAL;
if (mspi->flags & SPI_CPM2) {
pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
- if (!IS_ERR_VALUE(pram_ofs)) {
- u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs);
-
- out_be16(spi_base, pram_ofs);
- }
+ out_be16(spi_base, pram_ofs);
} else {
- struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs);
+ struct spi_pram __iomem *pram = spi_base;
u16 rpbase = in_be16(&pram->rpbase);
/* Microcode relocation patch applied? */
if (rpbase)
pram_ofs = rpbase;
- else
- return spi_base_ofs;
+ else {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ out_be16(spi_base, pram_ofs);
+ }
}
- cpm_muram_free(spi_base_ofs);
+ iounmap(spi_base);
return pram_ofs;
}
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi-gpio.c
index 63e51b011d5..0e88ab74549 100644
--- a/drivers/spi/spi_gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -1,5 +1,5 @@
/*
- * spi_gpio.c - SPI master driver using generic bitbanged GPIO
+ * SPI master driver using generic bitbanged GPIO
*
* Copyright (C) 2006,2008 David Brownell
*
@@ -69,7 +69,7 @@ struct spi_gpio {
* #define SPI_MOSI_GPIO 120
* #define SPI_SCK_GPIO 121
* #define SPI_N_CHIPSEL 4
- * #include "spi_gpio.c"
+ * #include "spi-gpio.c"
*/
#ifndef DRIVER_NAME
@@ -127,7 +127,7 @@ static inline int getmiso(const struct spi_device *spi)
*/
#define spidelay(nsecs) do {} while (0)
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
/*
* These functions can leverage inline expansion of GPIO calls to shrink
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi-imx.c
index 69d6dba67c1..8ac6542aedc 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi-imx.c
@@ -34,6 +34,9 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <mach/spi.h>
@@ -45,9 +48,6 @@
#define MXC_CSPIINT 0x0c
#define MXC_RESET 0x1c
-#define MX3_CSPISTAT 0x14
-#define MX3_CSPISTAT_RR (1 << 3)
-
/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
@@ -60,12 +60,12 @@ struct spi_imx_config {
};
enum spi_imx_devtype {
- SPI_IMX_VER_IMX1,
- SPI_IMX_VER_0_0,
- SPI_IMX_VER_0_4,
- SPI_IMX_VER_0_5,
- SPI_IMX_VER_0_7,
- SPI_IMX_VER_2_3,
+ IMX1_CSPI,
+ IMX21_CSPI,
+ IMX27_CSPI,
+ IMX31_CSPI,
+ IMX35_CSPI, /* CSPI on all i.mx except above */
+ IMX51_ECSPI, /* ECSPI on i.mx51 and later */
};
struct spi_imx_data;
@@ -76,7 +76,7 @@ struct spi_imx_devtype_data {
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
- unsigned int fifosize;
+ enum spi_imx_devtype devtype;
};
struct spi_imx_data {
@@ -87,7 +87,6 @@ struct spi_imx_data {
int irq;
struct clk *clk;
unsigned long spi_clk;
- int *chipselect;
unsigned int count;
void (*tx)(struct spi_imx_data *);
@@ -96,9 +95,25 @@ struct spi_imx_data {
const void *tx_buf;
unsigned int txfifo; /* number of words pushed in tx FIFO */
- struct spi_imx_devtype_data devtype_data;
+ struct spi_imx_devtype_data *devtype_data;
+ int chipselect[0];
};
+static inline int is_imx27_cspi(struct spi_imx_data *d)
+{
+ return d->devtype_data->devtype == IMX27_CSPI;
+}
+
+static inline int is_imx35_cspi(struct spi_imx_data *d)
+{
+ return d->devtype_data->devtype == IMX35_CSPI;
+}
+
+static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d)
+{
+ return (d->devtype_data->devtype == IMX51_ECSPI) ? 64 : 8;
+}
+
#define MXC_SPI_BUF_RX(type) \
static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
{ \
@@ -140,14 +155,9 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
/* MX21, MX27 */
static unsigned int spi_imx_clkdiv_1(unsigned int fin,
- unsigned int fspi)
+ unsigned int fspi, unsigned int max)
{
- int i, max;
-
- if (cpu_is_mx21())
- max = 18;
- else
- max = 16;
+ int i;
for (i = 2; i < max; i++)
if (fspi * mxc_clkdivs[i] >= fin)
@@ -171,30 +181,30 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
return 7;
}
-#define SPI_IMX2_3_CTRL 0x08
-#define SPI_IMX2_3_CTRL_ENABLE (1 << 0)
-#define SPI_IMX2_3_CTRL_XCH (1 << 2)
-#define SPI_IMX2_3_CTRL_MODE_MASK (0xf << 4)
-#define SPI_IMX2_3_CTRL_POSTDIV_OFFSET 8
-#define SPI_IMX2_3_CTRL_PREDIV_OFFSET 12
-#define SPI_IMX2_3_CTRL_CS(cs) ((cs) << 18)
-#define SPI_IMX2_3_CTRL_BL_OFFSET 20
+#define MX51_ECSPI_CTRL 0x08
+#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
+#define MX51_ECSPI_CTRL_XCH (1 << 2)
+#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
+#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
+#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
+#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
+#define MX51_ECSPI_CTRL_BL_OFFSET 20
-#define SPI_IMX2_3_CONFIG 0x0c
-#define SPI_IMX2_3_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
-#define SPI_IMX2_3_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
-#define SPI_IMX2_3_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
-#define SPI_IMX2_3_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
+#define MX51_ECSPI_CONFIG 0x0c
+#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
+#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
+#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
+#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
-#define SPI_IMX2_3_INT 0x10
-#define SPI_IMX2_3_INT_TEEN (1 << 0)
-#define SPI_IMX2_3_INT_RREN (1 << 3)
+#define MX51_ECSPI_INT 0x10
+#define MX51_ECSPI_INT_TEEN (1 << 0)
+#define MX51_ECSPI_INT_RREN (1 << 3)
-#define SPI_IMX2_3_STAT 0x18
-#define SPI_IMX2_3_STAT_RR (1 << 3)
+#define MX51_ECSPI_STAT 0x18
+#define MX51_ECSPI_STAT_RR (1 << 3)
/* MX51 eCSPI */
-static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi)
+static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
{
/*
* there are two 4-bit dividers, the pre-divider divides by
@@ -222,36 +232,36 @@ static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi)
pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
__func__, fin, fspi, post, pre);
- return (pre << SPI_IMX2_3_CTRL_PREDIV_OFFSET) |
- (post << SPI_IMX2_3_CTRL_POSTDIV_OFFSET);
+ return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
+ (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
}
-static void __maybe_unused spi_imx2_3_intctrl(struct spi_imx_data *spi_imx, int enable)
+static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned val = 0;
if (enable & MXC_INT_TE)
- val |= SPI_IMX2_3_INT_TEEN;
+ val |= MX51_ECSPI_INT_TEEN;
if (enable & MXC_INT_RR)
- val |= SPI_IMX2_3_INT_RREN;
+ val |= MX51_ECSPI_INT_RREN;
- writel(val, spi_imx->base + SPI_IMX2_3_INT);
+ writel(val, spi_imx->base + MX51_ECSPI_INT);
}
-static void __maybe_unused spi_imx2_3_trigger(struct spi_imx_data *spi_imx)
+static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
{
u32 reg;
- reg = readl(spi_imx->base + SPI_IMX2_3_CTRL);
- reg |= SPI_IMX2_3_CTRL_XCH;
- writel(reg, spi_imx->base + SPI_IMX2_3_CTRL);
+ reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ reg |= MX51_ECSPI_CTRL_XCH;
+ writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
-static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx,
+static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
- u32 ctrl = SPI_IMX2_3_CTRL_ENABLE, cfg = 0;
+ u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
/*
* The hardware seems to have a race condition when changing modes. The
@@ -260,42 +270,42 @@ static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx,
* the same time.
* So set master mode for all channels as we do not support slave mode.
*/
- ctrl |= SPI_IMX2_3_CTRL_MODE_MASK;
+ ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/* set clock speed */
- ctrl |= spi_imx2_3_clkdiv(spi_imx->spi_clk, config->speed_hz);
+ ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz);
/* set chip select to use */
- ctrl |= SPI_IMX2_3_CTRL_CS(config->cs);
+ ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
- ctrl |= (config->bpw - 1) << SPI_IMX2_3_CTRL_BL_OFFSET;
+ ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
- cfg |= SPI_IMX2_3_CONFIG_SBBCTRL(config->cs);
+ cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs);
if (config->mode & SPI_CPHA)
- cfg |= SPI_IMX2_3_CONFIG_SCLKPHA(config->cs);
+ cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
if (config->mode & SPI_CPOL)
- cfg |= SPI_IMX2_3_CONFIG_SCLKPOL(config->cs);
+ cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
if (config->mode & SPI_CS_HIGH)
- cfg |= SPI_IMX2_3_CONFIG_SSBPOL(config->cs);
+ cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
- writel(ctrl, spi_imx->base + SPI_IMX2_3_CTRL);
- writel(cfg, spi_imx->base + SPI_IMX2_3_CONFIG);
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+ writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
return 0;
}
-static int __maybe_unused spi_imx2_3_rx_available(struct spi_imx_data *spi_imx)
+static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
{
- return readl(spi_imx->base + SPI_IMX2_3_STAT) & SPI_IMX2_3_STAT_RR;
+ return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
}
-static void __maybe_unused spi_imx2_3_reset(struct spi_imx_data *spi_imx)
+static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx)
{
/* drain receive buffer */
- while (spi_imx2_3_rx_available(spi_imx))
+ while (mx51_ecspi_rx_available(spi_imx))
readl(spi_imx->base + MXC_CSPIRXDATA);
}
@@ -343,32 +353,7 @@ static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int __maybe_unused spi_imx0_4_config(struct spi_imx_data *spi_imx,
- struct spi_imx_config *config)
-{
- unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
- int cs = spi_imx->chipselect[config->cs];
-
- reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
- MX31_CSPICTRL_DR_SHIFT;
-
- reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
-
- if (config->mode & SPI_CPHA)
- reg |= MX31_CSPICTRL_PHA;
- if (config->mode & SPI_CPOL)
- reg |= MX31_CSPICTRL_POL;
- if (config->mode & SPI_CS_HIGH)
- reg |= MX31_CSPICTRL_SSPOL;
- if (cs < 0)
- reg |= (cs + 32) << MX31_CSPICTRL_CS_SHIFT;
-
- writel(reg, spi_imx->base + MXC_CSPICTRL);
-
- return 0;
-}
-
-static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx,
+static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
@@ -377,8 +362,12 @@ static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx,
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
MX31_CSPICTRL_DR_SHIFT;
- reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
- reg |= MX31_CSPICTRL_SSCTL;
+ if (is_imx35_cspi(spi_imx)) {
+ reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
+ reg |= MX31_CSPICTRL_SSCTL;
+ } else {
+ reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
+ }
if (config->mode & SPI_CPHA)
reg |= MX31_CSPICTRL_PHA;
@@ -387,7 +376,9 @@ static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx,
if (config->mode & SPI_CS_HIGH)
reg |= MX31_CSPICTRL_SSPOL;
if (cs < 0)
- reg |= (cs + 32) << MX35_CSPICTRL_CS_SHIFT;
+ reg |= (cs + 32) <<
+ (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
+ MX31_CSPICTRL_CS_SHIFT);
writel(reg, spi_imx->base + MXC_CSPICTRL);
@@ -399,77 +390,78 @@ static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx)
return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
}
-static void __maybe_unused spi_imx0_4_reset(struct spi_imx_data *spi_imx)
+static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx)
{
/* drain receive buffer */
- while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR)
+ while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
readl(spi_imx->base + MXC_CSPIRXDATA);
}
-#define MX27_INTREG_RR (1 << 4)
-#define MX27_INTREG_TEEN (1 << 9)
-#define MX27_INTREG_RREN (1 << 13)
+#define MX21_INTREG_RR (1 << 4)
+#define MX21_INTREG_TEEN (1 << 9)
+#define MX21_INTREG_RREN (1 << 13)
-#define MX27_CSPICTRL_POL (1 << 5)
-#define MX27_CSPICTRL_PHA (1 << 6)
-#define MX27_CSPICTRL_SSPOL (1 << 8)
-#define MX27_CSPICTRL_XCH (1 << 9)
-#define MX27_CSPICTRL_ENABLE (1 << 10)
-#define MX27_CSPICTRL_MASTER (1 << 11)
-#define MX27_CSPICTRL_DR_SHIFT 14
-#define MX27_CSPICTRL_CS_SHIFT 19
+#define MX21_CSPICTRL_POL (1 << 5)
+#define MX21_CSPICTRL_PHA (1 << 6)
+#define MX21_CSPICTRL_SSPOL (1 << 8)
+#define MX21_CSPICTRL_XCH (1 << 9)
+#define MX21_CSPICTRL_ENABLE (1 << 10)
+#define MX21_CSPICTRL_MASTER (1 << 11)
+#define MX21_CSPICTRL_DR_SHIFT 14
+#define MX21_CSPICTRL_CS_SHIFT 19
-static void __maybe_unused mx27_intctrl(struct spi_imx_data *spi_imx, int enable)
+static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
if (enable & MXC_INT_TE)
- val |= MX27_INTREG_TEEN;
+ val |= MX21_INTREG_TEEN;
if (enable & MXC_INT_RR)
- val |= MX27_INTREG_RREN;
+ val |= MX21_INTREG_RREN;
writel(val, spi_imx->base + MXC_CSPIINT);
}
-static void __maybe_unused mx27_trigger(struct spi_imx_data *spi_imx)
+static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx)
{
unsigned int reg;
reg = readl(spi_imx->base + MXC_CSPICTRL);
- reg |= MX27_CSPICTRL_XCH;
+ reg |= MX21_CSPICTRL_XCH;
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int __maybe_unused mx27_config(struct spi_imx_data *spi_imx,
+static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
- unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER;
+ unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
int cs = spi_imx->chipselect[config->cs];
+ unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
- reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) <<
- MX27_CSPICTRL_DR_SHIFT;
+ reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
+ MX21_CSPICTRL_DR_SHIFT;
reg |= config->bpw - 1;
if (config->mode & SPI_CPHA)
- reg |= MX27_CSPICTRL_PHA;
+ reg |= MX21_CSPICTRL_PHA;
if (config->mode & SPI_CPOL)
- reg |= MX27_CSPICTRL_POL;
+ reg |= MX21_CSPICTRL_POL;
if (config->mode & SPI_CS_HIGH)
- reg |= MX27_CSPICTRL_SSPOL;
+ reg |= MX21_CSPICTRL_SSPOL;
if (cs < 0)
- reg |= (cs + 32) << MX27_CSPICTRL_CS_SHIFT;
+ reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT;
writel(reg, spi_imx->base + MXC_CSPICTRL);
return 0;
}
-static int __maybe_unused mx27_rx_available(struct spi_imx_data *spi_imx)
+static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx)
{
- return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR;
+ return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
}
-static void __maybe_unused spi_imx0_0_reset(struct spi_imx_data *spi_imx)
+static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx)
{
writel(1, spi_imx->base + MXC_RESET);
}
@@ -535,61 +527,94 @@ static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx)
writel(1, spi_imx->base + MXC_RESET);
}
-/*
- * These version numbers are taken from the Freescale driver. Unfortunately it
- * doesn't support i.MX1, so this entry doesn't match the scheme. :-(
- */
-static struct spi_imx_devtype_data spi_imx_devtype_data[] __devinitdata = {
-#ifdef CONFIG_SPI_IMX_VER_IMX1
- [SPI_IMX_VER_IMX1] = {
- .intctrl = mx1_intctrl,
- .config = mx1_config,
- .trigger = mx1_trigger,
- .rx_available = mx1_rx_available,
- .reset = mx1_reset,
- .fifosize = 8,
- },
-#endif
-#ifdef CONFIG_SPI_IMX_VER_0_0
- [SPI_IMX_VER_0_0] = {
- .intctrl = mx27_intctrl,
- .config = mx27_config,
- .trigger = mx27_trigger,
- .rx_available = mx27_rx_available,
- .reset = spi_imx0_0_reset,
- .fifosize = 8,
- },
-#endif
-#ifdef CONFIG_SPI_IMX_VER_0_4
- [SPI_IMX_VER_0_4] = {
- .intctrl = mx31_intctrl,
- .config = spi_imx0_4_config,
- .trigger = mx31_trigger,
- .rx_available = mx31_rx_available,
- .reset = spi_imx0_4_reset,
- .fifosize = 8,
- },
-#endif
-#ifdef CONFIG_SPI_IMX_VER_0_7
- [SPI_IMX_VER_0_7] = {
- .intctrl = mx31_intctrl,
- .config = spi_imx0_7_config,
- .trigger = mx31_trigger,
- .rx_available = mx31_rx_available,
- .reset = spi_imx0_4_reset,
- .fifosize = 8,
- },
-#endif
-#ifdef CONFIG_SPI_IMX_VER_2_3
- [SPI_IMX_VER_2_3] = {
- .intctrl = spi_imx2_3_intctrl,
- .config = spi_imx2_3_config,
- .trigger = spi_imx2_3_trigger,
- .rx_available = spi_imx2_3_rx_available,
- .reset = spi_imx2_3_reset,
- .fifosize = 64,
- },
-#endif
+static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
+ .intctrl = mx1_intctrl,
+ .config = mx1_config,
+ .trigger = mx1_trigger,
+ .rx_available = mx1_rx_available,
+ .reset = mx1_reset,
+ .devtype = IMX1_CSPI,
+};
+
+static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
+ .intctrl = mx21_intctrl,
+ .config = mx21_config,
+ .trigger = mx21_trigger,
+ .rx_available = mx21_rx_available,
+ .reset = mx21_reset,
+ .devtype = IMX21_CSPI,
+};
+
+static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
+ /* i.mx27 cspi shares the functions with i.mx21 one */
+ .intctrl = mx21_intctrl,
+ .config = mx21_config,
+ .trigger = mx21_trigger,
+ .rx_available = mx21_rx_available,
+ .reset = mx21_reset,
+ .devtype = IMX27_CSPI,
+};
+
+static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
+ .intctrl = mx31_intctrl,
+ .config = mx31_config,
+ .trigger = mx31_trigger,
+ .rx_available = mx31_rx_available,
+ .reset = mx31_reset,
+ .devtype = IMX31_CSPI,
+};
+
+static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
+ /* i.mx35 and later cspi shares the functions with i.mx31 one */
+ .intctrl = mx31_intctrl,
+ .config = mx31_config,
+ .trigger = mx31_trigger,
+ .rx_available = mx31_rx_available,
+ .reset = mx31_reset,
+ .devtype = IMX35_CSPI,
+};
+
+static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
+ .intctrl = mx51_ecspi_intctrl,
+ .config = mx51_ecspi_config,
+ .trigger = mx51_ecspi_trigger,
+ .rx_available = mx51_ecspi_rx_available,
+ .reset = mx51_ecspi_reset,
+ .devtype = IMX51_ECSPI,
+};
+
+static struct platform_device_id spi_imx_devtype[] = {
+ {
+ .name = "imx1-cspi",
+ .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
+ }, {
+ .name = "imx21-cspi",
+ .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data,
+ }, {
+ .name = "imx27-cspi",
+ .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data,
+ }, {
+ .name = "imx31-cspi",
+ .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data,
+ }, {
+ .name = "imx35-cspi",
+ .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data,
+ }, {
+ .name = "imx51-ecspi",
+ .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data,
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct of_device_id spi_imx_dt_ids[] = {
+ { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
+ { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
+ { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
+ { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
+ { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
+ { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
+ { /* sentinel */ }
};
static void spi_imx_chipselect(struct spi_device *spi, int is_active)
@@ -607,21 +632,21 @@ static void spi_imx_chipselect(struct spi_device *spi, int is_active)
static void spi_imx_push(struct spi_imx_data *spi_imx)
{
- while (spi_imx->txfifo < spi_imx->devtype_data.fifosize) {
+ while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) {
if (!spi_imx->count)
break;
spi_imx->tx(spi_imx);
spi_imx->txfifo++;
}
- spi_imx->devtype_data.trigger(spi_imx);
+ spi_imx->devtype_data->trigger(spi_imx);
}
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
{
struct spi_imx_data *spi_imx = dev_id;
- while (spi_imx->devtype_data.rx_available(spi_imx)) {
+ while (spi_imx->devtype_data->rx_available(spi_imx)) {
spi_imx->rx(spi_imx);
spi_imx->txfifo--;
}
@@ -635,12 +660,12 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id)
/* No data left to push, but still waiting for rx data,
* enable receive data available interrupt.
*/
- spi_imx->devtype_data.intctrl(
+ spi_imx->devtype_data->intctrl(
spi_imx, MXC_INT_RR);
return IRQ_HANDLED;
}
- spi_imx->devtype_data.intctrl(spi_imx, 0);
+ spi_imx->devtype_data->intctrl(spi_imx, 0);
complete(&spi_imx->xfer_done);
return IRQ_HANDLED;
@@ -677,7 +702,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
} else
BUG();
- spi_imx->devtype_data.config(spi_imx, &config);
+ spi_imx->devtype_data->config(spi_imx, &config);
return 0;
}
@@ -696,7 +721,7 @@ static int spi_imx_transfer(struct spi_device *spi,
spi_imx_push(spi_imx);
- spi_imx->devtype_data.intctrl(spi_imx, MXC_INT_TE);
+ spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
wait_for_completion(&spi_imx->xfer_done);
@@ -723,72 +748,47 @@ static void spi_imx_cleanup(struct spi_device *spi)
{
}
-static struct platform_device_id spi_imx_devtype[] = {
- {
- .name = "imx1-cspi",
- .driver_data = SPI_IMX_VER_IMX1,
- }, {
- .name = "imx21-cspi",
- .driver_data = SPI_IMX_VER_0_0,
- }, {
- .name = "imx25-cspi",
- .driver_data = SPI_IMX_VER_0_7,
- }, {
- .name = "imx27-cspi",
- .driver_data = SPI_IMX_VER_0_0,
- }, {
- .name = "imx31-cspi",
- .driver_data = SPI_IMX_VER_0_4,
- }, {
- .name = "imx35-cspi",
- .driver_data = SPI_IMX_VER_0_7,
- }, {
- .name = "imx51-cspi",
- .driver_data = SPI_IMX_VER_0_7,
- }, {
- .name = "imx51-ecspi",
- .driver_data = SPI_IMX_VER_2_3,
- }, {
- .name = "imx53-cspi",
- .driver_data = SPI_IMX_VER_0_7,
- }, {
- .name = "imx53-ecspi",
- .driver_data = SPI_IMX_VER_2_3,
- }, {
- /* sentinel */
- }
-};
-
static int __devinit spi_imx_probe(struct platform_device *pdev)
{
- struct spi_imx_master *mxc_platform_info;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(spi_imx_dt_ids, &pdev->dev);
+ struct spi_imx_master *mxc_platform_info =
+ dev_get_platdata(&pdev->dev);
struct spi_master *master;
struct spi_imx_data *spi_imx;
struct resource *res;
- int i, ret;
+ int i, ret, num_cs;
- mxc_platform_info = dev_get_platdata(&pdev->dev);
- if (!mxc_platform_info) {
+ if (!np && !mxc_platform_info) {
dev_err(&pdev->dev, "can't get the platform data\n");
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
+ ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
+ if (ret < 0)
+ num_cs = mxc_platform_info->num_chipselect;
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
master->bus_num = pdev->id;
- master->num_chipselect = mxc_platform_info->num_chipselect;
+ master->num_chipselect = num_cs;
spi_imx = spi_master_get_devdata(master);
spi_imx->bitbang.master = spi_master_get(master);
- spi_imx->chipselect = mxc_platform_info->chipselect;
for (i = 0; i < master->num_chipselect; i++) {
- if (spi_imx->chipselect[i] < 0)
+ int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+ if (cs_gpio < 0)
+ cs_gpio = mxc_platform_info->chipselect[i];
+ if (cs_gpio < 0)
continue;
+ spi_imx->chipselect[i] = cs_gpio;
ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
if (ret) {
while (i > 0) {
@@ -810,8 +810,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
init_completion(&spi_imx->xfer_done);
- spi_imx->devtype_data =
- spi_imx_devtype_data[pdev->id_entry->driver_data];
+ spi_imx->devtype_data = of_id ? of_id->data :
+ (struct spi_imx_devtype_data *) pdev->id_entry->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -854,10 +854,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
clk_enable(spi_imx->clk);
spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
- spi_imx->devtype_data.reset(spi_imx);
+ spi_imx->devtype_data->reset(spi_imx);
- spi_imx->devtype_data.intctrl(spi_imx, 0);
+ spi_imx->devtype_data->intctrl(spi_imx, 0);
+ master->dev.of_node = pdev->dev.of_node;
ret = spi_bitbang_start(&spi_imx->bitbang);
if (ret) {
dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
@@ -920,6 +921,7 @@ static struct platform_driver spi_imx_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = spi_imx_dt_ids,
},
.id_table = spi_imx_devtype,
.probe = spi_imx_probe,
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi-lm70llp.c
index 7746a41ab6d..933eb9d9ddd 100644
--- a/drivers/spi/spi_lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -1,5 +1,5 @@
/*
- * spi_lm70llp.c - driver for LM70EVAL-LLP board for the LM70 sensor
+ * Driver for LM70EVAL-LLP board for the LM70 sensor
*
* Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
*
@@ -174,7 +174,7 @@ static inline int getmiso(struct spi_device *s)
}
/*--------------------------------------------------------------------*/
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
static void lm70_chipselect(struct spi_device *spi, int value)
{
diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/spi-mpc512x-psc.c
index 6a5b4238fb6..6a5b4238fb6 100644
--- a/drivers/spi/mpc512x_psc_spi.c
+++ b/drivers/spi/spi-mpc512x-psc.c
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/spi-mpc52xx-psc.c
index e30baf0852a..e30baf0852a 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/spi-mpc52xx.c
index 015a974bed7..015a974bed7 100644
--- a/drivers/spi/mpc52xx_spi.c
+++ b/drivers/spi/spi-mpc52xx.c
diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi-nuc900.c
index 3cd15f690f1..c0a6ce81f9c 100644
--- a/drivers/spi/spi_nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -1,5 +1,4 @@
-/* linux/drivers/spi/spi_nuc900.c
- *
+/*
* Copyright (c) 2009 Nuvoton technology.
* Wan ZongShun <mcuos.com@gmail.com>
*
@@ -7,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
-*/
+ */
#include <linux/init.h>
#include <linux/spinlock.h>
diff --git a/drivers/spi/spi_oc_tiny.c b/drivers/spi/spi-oc-tiny.c
index f1bde66cea1..f1bde66cea1 100644
--- a/drivers/spi/spi_oc_tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
diff --git a/drivers/spi/omap_spi_100k.c b/drivers/spi/spi-omap-100k.c
index 9bd1c92ad96..9bd1c92ad96 100644
--- a/drivers/spi/omap_spi_100k.c
+++ b/drivers/spi/spi-omap-100k.c
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/spi-omap-uwire.c
index 160d3266205..00a8e9d7dbe 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -1,5 +1,5 @@
/*
- * omap_uwire.c -- MicroWire interface driver for OMAP
+ * MicroWire interface driver for OMAP
*
* Copyright 2003 MontaVista Software Inc. <source@mvista.com>
*
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 969cdd2fe12..fde3a2d4f12 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1116,8 +1116,8 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
status = -ENODEV;
goto err1;
}
- if (!request_mem_region(r->start, (r->end - r->start) + 1,
- dev_name(&pdev->dev))) {
+ if (!request_mem_region(r->start, resource_size(r),
+ dev_name(&pdev->dev))) {
status = -EBUSY;
goto err1;
}
@@ -1125,7 +1125,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
r->start += pdata->regs_offset;
r->end += pdata->regs_offset;
mcspi->phys = r->start;
- mcspi->base = ioremap(r->start, r->end - r->start + 1);
+ mcspi->base = ioremap(r->start, resource_size(r));
if (!mcspi->base) {
dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
status = -ENOMEM;
@@ -1190,7 +1190,7 @@ err4:
err3:
kfree(mcspi->dma_channels);
err2:
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
iounmap(mcspi->base);
err1:
return status;
@@ -1210,7 +1210,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
omap2_mcspi_disable_clocks(mcspi);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
base = mcspi->base;
spi_unregister_master(master);
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/spi-orion.c
index 0b677dc041a..9421a390a5e 100644
--- a/drivers/spi/orion_spi.c
+++ b/drivers/spi/spi-orion.c
@@ -1,5 +1,5 @@
/*
- * orion_spi.c -- Marvell Orion SPI controller driver
+ * Marvell Orion SPI controller driver
*
* Author: Shadi Ammouri <shadi@marvell.com>
* Copyright (C) 2007-2008 Marvell Ltd.
@@ -489,7 +489,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
goto out;
}
- if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
status = -EBUSY;
goto out;
@@ -511,7 +511,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
return status;
out_rel_mem:
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
out:
spi_master_put(master);
@@ -531,7 +531,7 @@ static int __exit orion_spi_remove(struct platform_device *pdev)
cancel_work_sync(&spi->work);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
spi_unregister_master(master);
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/spi-pl022.c
index d18ce9e946d..730b4a37b82 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1,6 +1,4 @@
/*
- * drivers/spi/amba-pl022.c
- *
* A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
*
* Copyright (C) 2008-2009 ST-Ericsson AB
@@ -42,6 +40,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <linux/pm_runtime.h>
/*
* This macro is used to define some register default values.
@@ -383,6 +382,8 @@ struct pl022 {
enum ssp_reading read;
enum ssp_writing write;
u32 exp_fifo_level;
+ enum ssp_rx_level_trig rx_lev_trig;
+ enum ssp_tx_level_trig tx_lev_trig;
/* DMA settings */
#ifdef CONFIG_DMA_ENGINE
struct dma_chan *dma_rx_channel;
@@ -517,6 +518,7 @@ static void giveback(struct pl022 *pl022)
clk_disable(pl022->clk);
amba_pclk_disable(pl022->adev);
amba_vcore_disable(pl022->adev);
+ pm_runtime_put(&pl022->adev->dev);
}
/**
@@ -909,12 +911,10 @@ static int configure_dma(struct pl022 *pl022)
struct dma_slave_config rx_conf = {
.src_addr = SSP_DR(pl022->phybase),
.direction = DMA_FROM_DEVICE,
- .src_maxburst = pl022->vendor->fifodepth >> 1,
};
struct dma_slave_config tx_conf = {
.dst_addr = SSP_DR(pl022->phybase),
.direction = DMA_TO_DEVICE,
- .dst_maxburst = pl022->vendor->fifodepth >> 1,
};
unsigned int pages;
int ret;
@@ -928,6 +928,54 @@ static int configure_dma(struct pl022 *pl022)
if (!rxchan || !txchan)
return -ENODEV;
+ /*
+ * If supplied, the DMA burstsize should equal the FIFO trigger level.
+ * Notice that the DMA engine uses one-to-one mapping. Since we can
+ * not trigger on 2 elements this needs explicit mapping rather than
+ * calculation.
+ */
+ switch (pl022->rx_lev_trig) {
+ case SSP_RX_1_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 1;
+ break;
+ case SSP_RX_4_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 4;
+ break;
+ case SSP_RX_8_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 8;
+ break;
+ case SSP_RX_16_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 16;
+ break;
+ case SSP_RX_32_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 32;
+ break;
+ default:
+ rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1;
+ break;
+ }
+
+ switch (pl022->tx_lev_trig) {
+ case SSP_TX_1_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 1;
+ break;
+ case SSP_TX_4_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 4;
+ break;
+ case SSP_TX_8_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 8;
+ break;
+ case SSP_TX_16_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 16;
+ break;
+ case SSP_TX_32_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 32;
+ break;
+ default:
+ tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1;
+ break;
+ }
+
switch (pl022->read) {
case READING_NULL:
/* Use the same as for writing */
@@ -1496,6 +1544,7 @@ static void pump_messages(struct work_struct *work)
* and core will be disabled when giveback() is called in each method
* (poll/interrupt/DMA)
*/
+ pm_runtime_get_sync(&pl022->adev->dev);
amba_vcore_enable(pl022->adev);
amba_pclk_enable(pl022->adev);
clk_enable(pl022->clk);
@@ -1629,17 +1678,57 @@ static int verify_controller_parameters(struct pl022 *pl022,
"Communication mode is configured incorrectly\n");
return -EINVAL;
}
- if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
- || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
+ switch (chip_info->rx_lev_trig) {
+ case SSP_RX_1_OR_MORE_ELEM:
+ case SSP_RX_4_OR_MORE_ELEM:
+ case SSP_RX_8_OR_MORE_ELEM:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SSP_RX_16_OR_MORE_ELEM:
+ if (pl022->vendor->fifodepth < 16) {
+ dev_err(&pl022->adev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SSP_RX_32_OR_MORE_ELEM:
+ if (pl022->vendor->fifodepth < 32) {
+ dev_err(&pl022->adev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
dev_err(&pl022->adev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
+ break;
}
- if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
- || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
+ switch (chip_info->tx_lev_trig) {
+ case SSP_TX_1_OR_MORE_EMPTY_LOC:
+ case SSP_TX_4_OR_MORE_EMPTY_LOC:
+ case SSP_TX_8_OR_MORE_EMPTY_LOC:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SSP_TX_16_OR_MORE_EMPTY_LOC:
+ if (pl022->vendor->fifodepth < 16) {
+ dev_err(&pl022->adev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SSP_TX_32_OR_MORE_EMPTY_LOC:
+ if (pl022->vendor->fifodepth < 32) {
+ dev_err(&pl022->adev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
dev_err(&pl022->adev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
+ break;
}
if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
if ((chip_info->ctrl_len < SSP_BITS_4)
@@ -1874,6 +1963,9 @@ static int pl022_setup(struct spi_device *spi)
goto err_config_params;
}
+ pl022->rx_lev_trig = chip_info->rx_lev_trig;
+ pl022->tx_lev_trig = chip_info->tx_lev_trig;
+
/* Now set controller state based on controller data */
chip->xfer_type = chip_info->com_mode;
if (!chip_info->cs_control) {
@@ -2094,6 +2186,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
}
printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
adev->res.start, pl022->virtbase);
+ pm_runtime_enable(dev);
+ pm_runtime_resume(dev);
pl022->clk = clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
@@ -2155,6 +2249,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
destroy_queue(pl022);
pl022_dma_remove(pl022);
free_irq(adev->irq[0], pl022);
+ pm_runtime_disable(&adev->dev);
err_no_irq:
clk_put(pl022->clk);
err_no_clk:
@@ -2172,17 +2267,13 @@ static int __devexit
pl022_remove(struct amba_device *adev)
{
struct pl022 *pl022 = amba_get_drvdata(adev);
- int status = 0;
+
if (!pl022)
return 0;
/* Remove the queue */
- status = destroy_queue(pl022);
- if (status != 0) {
- dev_err(&adev->dev,
- "queue remove failed (%d)\n", status);
- return status;
- }
+ if (destroy_queue(pl022) != 0)
+ dev_err(&adev->dev, "queue remove failed\n");
load_ssp_default_config(pl022);
pl022_dma_remove(pl022);
free_irq(adev->irq[0], pl022);
@@ -2194,7 +2285,6 @@ pl022_remove(struct amba_device *adev)
spi_unregister_master(pl022->master);
spi_master_put(pl022->master);
amba_set_drvdata(adev, NULL);
- dev_dbg(&adev->dev, "remove succeeded\n");
return 0;
}
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 2a298c02919..b267fd901e5 100644
--- a/drivers/spi/spi_ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -502,7 +502,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
goto free_gpios;
}
hw->mapbase = resource.start;
- hw->mapsize = resource.end - resource.start + 1;
+ hw->mapsize = resource_size(&resource);
/* Sanity check */
if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) {
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 378e504f89e..378e504f89e 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/spi-pxa2xx.c
index dc25bee8d33..dc25bee8d33 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/spi-pxa2xx.c
diff --git a/drivers/spi/spi_s3c24xx_fiq.S b/drivers/spi/spi-s3c24xx-fiq.S
index 3793cae361d..059f2dc1fda 100644
--- a/drivers/spi/spi_s3c24xx_fiq.S
+++ b/drivers/spi/spi-s3c24xx-fiq.S
@@ -17,7 +17,7 @@
#include <mach/regs-irq.h>
#include <plat/regs-spi.h>
-#include "spi_s3c24xx_fiq.h"
+#include "spi-s3c24xx-fiq.h"
.text
diff --git a/drivers/spi/spi_s3c24xx_fiq.h b/drivers/spi/spi-s3c24xx-fiq.h
index a5950bb25b5..a5950bb25b5 100644
--- a/drivers/spi/spi_s3c24xx_fiq.h
+++ b/drivers/spi/spi-s3c24xx-fiq.h
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 1a5fcabfd56..1996ac57ef9 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -1,5 +1,4 @@
-/* linux/drivers/spi/spi_s3c24xx.c
- *
+/*
* Copyright (c) 2006 Ben Dooks
* Copyright 2006-2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
@@ -32,7 +31,7 @@
#include <plat/fiq.h>
#include <asm/fiq.h>
-#include "spi_s3c24xx_fiq.h"
+#include "spi-s3c24xx-fiq.h"
/**
* s3c24xx_spi_devstate - per device data
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 795828b90f4..595dacc7645 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1,5 +1,4 @@
-/* linux/drivers/spi/spi_s3c64xx.c
- *
+/*
* Copyright (C) 2009 Samsung Electronics Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
@@ -116,9 +115,7 @@
(((i)->fifo_lvl_mask + 1))) \
? 1 : 0)
-#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
- (((i)->fifo_lvl_mask + 1) << 1)) \
- ? 1 : 0)
+#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & (1 << (i)->tx_st_done)) ? 1 : 0)
#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi-sh-msiof.c
index e00d94b2225..e00d94b2225 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi-sh-sci.c
index 5c643916119..e7779c09f6e 100644
--- a/drivers/spi/spi_sh_sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -78,7 +78,7 @@ static inline u32 getmiso(struct spi_device *dev)
#define spidelay(x) ndelay(x)
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
diff --git a/drivers/spi/spi_sh.c b/drivers/spi/spi-sh.c
index 9eedd71ad89..9eedd71ad89 100644
--- a/drivers/spi/spi_sh.c
+++ b/drivers/spi/spi-sh.c
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi-stmp.c
index fadff76eb7e..fadff76eb7e 100644
--- a/drivers/spi/spi_stmp.c
+++ b/drivers/spi/spi-stmp.c
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi-tegra.c
index 6c3aa6ecaad..a5a6302dc8e 100644
--- a/drivers/spi/spi_tegra.c
+++ b/drivers/spi/spi-tegra.c
@@ -498,14 +498,14 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
goto err0;
}
- if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
ret = -EBUSY;
goto err0;
}
tspi->phys = r->start;
- tspi->base = ioremap(r->start, r->end - r->start + 1);
+ tspi->base = ioremap(r->start, resource_size(r));
if (!tspi->base) {
dev_err(&pdev->dev, "can't ioremap iomem\n");
ret = -ENOMEM;
@@ -546,6 +546,7 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
tspi->rx_dma_req.dev = tspi;
+ master->dev.of_node = pdev->dev.of_node;
ret = spi_register_master(master);
if (ret < 0)
@@ -563,7 +564,7 @@ err3:
err2:
iounmap(tspi->base);
err1:
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
err0:
spi_master_put(master);
return ret;
@@ -588,17 +589,28 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev)
iounmap(tspi->base);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
return 0;
}
MODULE_ALIAS("platform:spi_tegra");
+#ifdef CONFIG_OF
+static struct of_device_id spi_tegra_of_match_table[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_tegra_of_match_table);
+#else /* CONFIG_OF */
+#define spi_tegra_of_match_table NULL
+#endif /* CONFIG_OF */
+
static struct platform_driver spi_tegra_driver = {
.driver = {
.name = "spi_tegra",
.owner = THIS_MODULE,
+ .of_match_table = spi_tegra_of_match_table,
},
.remove = __devexit_p(spi_tegra_remove),
};
diff --git a/drivers/spi/ti-ssp-spi.c b/drivers/spi/spi-ti-ssp.c
index ee22795c797..ee22795c797 100644
--- a/drivers/spi/ti-ssp-spi.c
+++ b/drivers/spi/spi-ti-ssp.c
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/spi-tle62x0.c
index 32a40876532..940e73d1cf0 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -1,5 +1,5 @@
/*
- * tle62x0.c -- support Infineon TLE62x0 driver chips
+ * Support Infineon TLE62x0 driver chips
*
* Copyright (c) 2007 Simtec Electronics
* Ben Dooks, <ben@simtec.co.uk>
diff --git a/drivers/spi/spi_topcliff_pch.c b/drivers/spi/spi-topcliff-pch.c
index 79e48d45113..1d23f383186 100644
--- a/drivers/spi/spi_topcliff_pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -26,6 +26,10 @@
#include <linux/spi/spidev.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/dmaengine.h>
+#include <linux/pch_dma.h>
/* Register offsets */
#define PCH_SPCR 0x00 /* SPI control register */
@@ -35,6 +39,7 @@
#define PCH_SPDRR 0x10 /* SPI read data register */
#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */
#define PCH_SRST 0x1C /* SPI reset register */
+#define PCH_ADDRESS_SIZE 0x20
#define PCH_SPSR_TFD 0x000007C0
#define PCH_SPSR_RFD 0x0000F800
@@ -52,8 +57,6 @@
#define STATUS_EXITING 2
#define PCH_SLEEP_TIME 10
-#define PCH_ADDRESS_SIZE 0x20
-
#define SSN_LOW 0x02U
#define SSN_NO_CONTROL 0x00U
#define PCH_MAX_CS 0xFF
@@ -73,22 +76,57 @@
#define SPSR_TFI_BIT (1 << 0)
#define SPSR_RFI_BIT (1 << 1)
#define SPSR_FI_BIT (1 << 2)
+#define SPSR_ORF_BIT (1 << 3)
#define SPBRR_SIZE_BIT (1 << 10)
-#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
+#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
+ SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
#define SPCR_RFIC_FIELD 20
#define SPCR_TFIC_FIELD 16
-#define SPSR_INT_BITS 0x1F
-#define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1))
-#define MASK_RFIC_SPCR_BITS (~(0xf << 20))
-#define MASK_TFIC_SPCR_BITS (~(0xf000f << 12))
+#define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
+#define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
+#define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
#define PCH_CLOCK_HZ 50000000
#define PCH_MAX_SPBR 1023
+/* Definition for ML7213 by OKI SEMICONDUCTOR */
+#define PCI_VENDOR_ID_ROHM 0x10DB
+#define PCI_DEVICE_ID_ML7213_SPI 0x802c
+#define PCI_DEVICE_ID_ML7223_SPI 0x800F
+/*
+ * Set the number of SPI instance max
+ * Intel EG20T PCH : 1ch
+ * OKI SEMICONDUCTOR ML7213 IOH : 2ch
+ * OKI SEMICONDUCTOR ML7223 IOH : 1ch
+*/
+#define PCH_SPI_MAX_DEV 2
+
+#define PCH_BUF_SIZE 4096
+#define PCH_DMA_TRANS_SIZE 12
+
+static int use_dma = 1;
+
+struct pch_spi_dma_ctrl {
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx;
+ struct pch_dma_slave param_tx;
+ struct pch_dma_slave param_rx;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ struct scatterlist *sg_tx_p;
+ struct scatterlist *sg_rx_p;
+ struct scatterlist sg_tx;
+ struct scatterlist sg_rx;
+ int nent;
+ void *tx_buf_virt;
+ void *rx_buf_virt;
+ dma_addr_t tx_buf_dma;
+ dma_addr_t rx_buf_dma;
+};
/**
* struct pch_spi_data - Holds the SPI channel specific details
* @io_remap_addr: The remapped PCI base address
@@ -121,9 +159,13 @@
* @cur_trans: The current transfer that this SPI driver is
* handling
* @board_dat: Reference to the SPI device data structure
+ * @plat_dev: platform_device structure
+ * @ch: SPI channel number
+ * @irq_reg_sts: Status of IRQ registration
*/
struct pch_spi_data {
void __iomem *io_remap_addr;
+ unsigned long io_base_addr;
struct spi_master *master;
struct work_struct work;
struct workqueue_struct *wk;
@@ -144,27 +186,36 @@ struct pch_spi_data {
struct spi_message *current_msg;
struct spi_transfer *cur_trans;
struct pch_spi_board_data *board_dat;
+ struct platform_device *plat_dev;
+ int ch;
+ struct pch_spi_dma_ctrl dma;
+ int use_dma;
+ u8 irq_reg_sts;
};
/**
* struct pch_spi_board_data - Holds the SPI device specific details
* @pdev: Pointer to the PCI device
- * @irq_reg_sts: Status of IRQ registration
- * @pci_req_sts: Status of pci_request_regions
* @suspend_sts: Status of suspend
- * @data: Pointer to SPI channel data structure
+ * @num: The number of SPI device instance
*/
struct pch_spi_board_data {
struct pci_dev *pdev;
- u8 irq_reg_sts;
- u8 pci_req_sts;
u8 suspend_sts;
- struct pch_spi_data *data;
+ int num;
+};
+
+struct pch_pd_dev_save {
+ int num;
+ struct platform_device *pd_save[PCH_SPI_MAX_DEV];
+ struct pch_spi_board_data *board_dat;
};
static struct pci_device_id pch_spi_pcidev_id[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)},
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
+ { }
};
/**
@@ -251,10 +302,10 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
/* reset rx threshold */
- reg_spcr_val &= MASK_RFIC_SPCR_BITS;
+ reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
- iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))),
- (io_remap_addr + PCH_SPCR));
+
+ iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
}
/* update counts */
@@ -265,12 +316,15 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
/* if transfer complete interrupt */
if (reg_spsr_val & SPSR_FI_BIT) {
- /* disable FI & RFI interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
- SPCR_FIE_BIT | SPCR_RFIE_BIT);
+ if (tx_index < bpw_len)
+ dev_err(&data->master->dev,
+ "%s : Transfer is not completed", __func__);
+ /* disable interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
/* transfer is completed;inform pch_spi_process_messages */
data->transfer_complete = true;
+ data->transfer_active = false;
wake_up(&data->wait);
}
}
@@ -283,24 +337,28 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
static irqreturn_t pch_spi_handler(int irq, void *dev_id)
{
u32 reg_spsr_val;
- struct pch_spi_data *data;
void __iomem *spsr;
void __iomem *io_remap_addr;
irqreturn_t ret = IRQ_NONE;
- struct pch_spi_board_data *board_dat = dev_id;
+ struct pch_spi_data *data = dev_id;
+ struct pch_spi_board_data *board_dat = data->board_dat;
if (board_dat->suspend_sts) {
dev_dbg(&board_dat->pdev->dev,
"%s returning due to suspend\n", __func__);
return IRQ_NONE;
}
+ if (data->use_dma)
+ return IRQ_NONE;
- data = board_dat->data;
io_remap_addr = data->io_remap_addr;
spsr = io_remap_addr + PCH_SPSR;
reg_spsr_val = ioread32(spsr);
+ if (reg_spsr_val & SPSR_ORF_BIT)
+ dev_err(&board_dat->pdev->dev, "%s Over run error", __func__);
+
/* Check if the interrupt is for SPI device */
if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
@@ -326,7 +384,7 @@ static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
if (n_spbr > PCH_MAX_SPBR)
n_spbr = PCH_MAX_SPBR;
- pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS);
+ pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
}
/**
@@ -435,26 +493,27 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
dev_dbg(&pspi->dev, "%s Transfer List not empty. "
"Transfer Speed is set.\n", __func__);
+ spin_lock_irqsave(&data->lock, flags);
/* validate Tx/Rx buffers and Transfer length */
list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
if (!transfer->tx_buf && !transfer->rx_buf) {
dev_err(&pspi->dev,
"%s Tx and Rx buffer NULL\n", __func__);
retval = -EINVAL;
- goto err_out;
+ goto err_return_spinlock;
}
if (!transfer->len) {
dev_err(&pspi->dev, "%s Transfer length invalid\n",
__func__);
retval = -EINVAL;
- goto err_out;
+ goto err_return_spinlock;
}
dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
" valid\n", __func__);
- /* if baud rate hs been specified validate the same */
+ /* if baud rate has been specified validate the same */
if (transfer->speed_hz > PCH_MAX_BAUDRATE)
transfer->speed_hz = PCH_MAX_BAUDRATE;
@@ -465,25 +524,24 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
retval = -EINVAL;
dev_err(&pspi->dev,
"%s Invalid bits per word\n", __func__);
- goto err_out;
+ goto err_return_spinlock;
}
}
}
-
- spin_lock_irqsave(&data->lock, flags);
+ spin_unlock_irqrestore(&data->lock, flags);
/* We won't process any messages if we have been asked to terminate */
if (data->status == STATUS_EXITING) {
dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
retval = -ESHUTDOWN;
- goto err_return_spinlock;
+ goto err_out;
}
/* If suspended ,return -EINVAL */
if (data->board_dat->suspend_sts) {
dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
retval = -EINVAL;
- goto err_return_spinlock;
+ goto err_out;
}
/* set status of message */
@@ -491,9 +549,11 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
pmsg->status = -EINPROGRESS;
-
+ spin_lock_irqsave(&data->lock, flags);
/* add message to queue */
list_add_tail(&pmsg->queue, &data->queue);
+ spin_unlock_irqrestore(&data->lock, flags);
+
dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
/* schedule work queue to run */
@@ -502,11 +562,13 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
retval = 0;
-err_return_spinlock:
- spin_unlock_irqrestore(&data->lock, flags);
err_out:
dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
return retval;
+err_return_spinlock:
+ dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return retval;
}
static inline void pch_spi_select_chip(struct pch_spi_data *data,
@@ -527,8 +589,7 @@ static inline void pch_spi_select_chip(struct pch_spi_data *data,
pch_spi_setup_transfer(pspi);
}
-static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
- struct spi_message **ppmsg)
+static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
{
int size;
u32 n_writes;
@@ -537,8 +598,6 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
const u8 *tx_buf;
const u16 *tx_sbuf;
- pmsg = *ppmsg;
-
/* set baud rate if needed */
if (data->cur_trans->speed_hz) {
dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
@@ -621,10 +680,9 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
data->transfer_active = true;
}
-
-static void pch_spi_nomore_transfer(struct pch_spi_data *data,
- struct spi_message *pmsg)
+static void pch_spi_nomore_transfer(struct pch_spi_data *data)
{
+ struct spi_message *pmsg;
dev_dbg(&data->master->dev, "%s called\n", __func__);
/* Invoke complete callback
* [To the spi core..indicating end of transfer] */
@@ -675,29 +733,21 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data,
static void pch_spi_set_ir(struct pch_spi_data *data)
{
- /* enable interrupts */
- if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) {
+ /* enable interrupts, set threshold, enable SPI */
+ if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
/* set receive threshold to PCH_RX_THOLD */
pch_spi_setclr_reg(data->master, PCH_SPCR,
- PCH_RX_THOLD << SPCR_RFIC_FIELD,
- ~MASK_RFIC_SPCR_BITS);
- /* enable FI and RFI interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR,
- SPCR_RFIE_BIT | SPCR_FIE_BIT, 0);
- } else {
+ PCH_RX_THOLD << SPCR_RFIC_FIELD |
+ SPCR_FIE_BIT | SPCR_RFIE_BIT |
+ SPCR_ORIE_BIT | SPCR_SPE_BIT,
+ MASK_RFIC_SPCR_BITS | PCH_ALL);
+ else
/* set receive threshold to maximum */
pch_spi_setclr_reg(data->master, PCH_SPCR,
- PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD,
- ~MASK_TFIC_SPCR_BITS);
- /* enable FI interrupt */
- pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0);
- }
-
- dev_dbg(&data->master->dev,
- "%s:invoking pch_spi_set_enable to enable SPI\n", __func__);
-
- /* SPI set enable */
- pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0);
+ PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
+ SPCR_FIE_BIT | SPCR_ORIE_BIT |
+ SPCR_SPE_BIT,
+ MASK_RFIC_SPCR_BITS | PCH_ALL);
/* Wait until the transfer completes; go to sleep after
initiating the transfer. */
@@ -710,15 +760,13 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
dev_dbg(&data->master->dev,
"%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
- data->transfer_active = false;
- dev_dbg(&data->master->dev,
- "%s set data->transfer_active = false\n", __func__);
-
/* clear all interrupts */
pch_spi_writereg(data->master, PCH_SPSR,
pch_spi_readreg(data->master, PCH_SPSR));
- /* disable interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ /* Disable interrupts and SPI transfer */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
+ /* clear FIFO */
+ pch_spi_clear_fifo(data->master);
}
static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
@@ -742,6 +790,327 @@ static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
}
}
+static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
+{
+ int j;
+ u8 *rx_buf;
+ u16 *rx_sbuf;
+ const u8 *rx_dma_buf;
+ const u16 *rx_dma_sbuf;
+
+ /* copy Rx Data */
+ if (!data->cur_trans->rx_buf)
+ return;
+
+ if (bpw == 8) {
+ rx_buf = data->cur_trans->rx_buf;
+ rx_dma_buf = data->dma.rx_buf_virt;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_buf++ = *rx_dma_buf++ & 0xFF;
+ } else {
+ rx_sbuf = data->cur_trans->rx_buf;
+ rx_dma_sbuf = data->dma.rx_buf_virt;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_sbuf++ = *rx_dma_sbuf++;
+ }
+}
+
+static void pch_spi_start_transfer(struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+ unsigned long flags;
+
+ dma = &data->dma;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* disable interrupts, SPI set enable */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* Wait until the transfer completes; go to sleep after
+ initiating the transfer. */
+ dev_dbg(&data->master->dev,
+ "%s:waiting for transfer to get over\n", __func__);
+ wait_event_interruptible(data->wait, data->transfer_complete);
+
+ dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
+ DMA_FROM_DEVICE);
+ async_tx_ack(dma->desc_rx);
+ async_tx_ack(dma->desc_tx);
+ kfree(dma->sg_tx_p);
+ kfree(dma->sg_rx_p);
+
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
+ dev_dbg(&data->master->dev,
+ "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
+
+ /* clear fifo threshold, disable interrupts, disable SPI transfer */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+ MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
+ SPCR_SPE_BIT);
+ /* clear all interrupts */
+ pch_spi_writereg(data->master, PCH_SPSR,
+ pch_spi_readreg(data->master, PCH_SPSR));
+ /* clear FIFO */
+ pch_spi_clear_fifo(data->master);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void pch_dma_rx_complete(void *arg)
+{
+ struct pch_spi_data *data = arg;
+
+ /* transfer is completed;inform pch_spi_process_messages_dma */
+ data->transfer_complete = true;
+ wake_up_interruptible(&data->wait);
+}
+
+static bool pch_spi_filter(struct dma_chan *chan, void *slave)
+{
+ struct pch_dma_slave *param = slave;
+
+ if ((chan->chan_id == param->chan_id) &&
+ (param->dma_dev == chan->device->dev)) {
+ chan->private = param;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ struct pci_dev *dma_dev;
+ struct pch_dma_slave *param;
+ struct pch_spi_dma_ctrl *dma;
+ unsigned int width;
+
+ if (bpw == 8)
+ width = PCH_DMA_WIDTH_1_BYTE;
+ else
+ width = PCH_DMA_WIDTH_2_BYTES;
+
+ dma = &data->dma;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Get DMA's dev information */
+ dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(12, 0));
+
+ /* Set Tx DMA */
+ param = &dma->param_tx;
+ param->dma_dev = &dma_dev->dev;
+ param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
+ param->tx_reg = data->io_base_addr + PCH_SPDWR;
+ param->width = width;
+ chan = dma_request_channel(mask, pch_spi_filter, param);
+ if (!chan) {
+ dev_err(&data->master->dev,
+ "ERROR: dma_request_channel FAILS(Tx)\n");
+ data->use_dma = 0;
+ return;
+ }
+ dma->chan_tx = chan;
+
+ /* Set Rx DMA */
+ param = &dma->param_rx;
+ param->dma_dev = &dma_dev->dev;
+ param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
+ param->rx_reg = data->io_base_addr + PCH_SPDRR;
+ param->width = width;
+ chan = dma_request_channel(mask, pch_spi_filter, param);
+ if (!chan) {
+ dev_err(&data->master->dev,
+ "ERROR: dma_request_channel FAILS(Rx)\n");
+ dma_release_channel(dma->chan_tx);
+ dma->chan_tx = NULL;
+ data->use_dma = 0;
+ return;
+ }
+ dma->chan_rx = chan;
+}
+
+static void pch_spi_release_dma(struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+ if (dma->chan_tx) {
+ dma_release_channel(dma->chan_tx);
+ dma->chan_tx = NULL;
+ }
+ if (dma->chan_rx) {
+ dma_release_channel(dma->chan_rx);
+ dma->chan_rx = NULL;
+ }
+ return;
+}
+
+static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+{
+ const u8 *tx_buf;
+ const u16 *tx_sbuf;
+ u8 *tx_dma_buf;
+ u16 *tx_dma_sbuf;
+ struct scatterlist *sg;
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx;
+ int num;
+ int i;
+ int size;
+ int rem;
+ unsigned long flags;
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+
+ /* set baud rate if needed */
+ if (data->cur_trans->speed_hz) {
+ dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ /* set bits per word if needed */
+ if (data->cur_trans->bits_per_word &&
+ (data->current_msg->spi->bits_per_word !=
+ data->cur_trans->bits_per_word)) {
+ dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_set_bits_per_word(data->master,
+ data->cur_trans->bits_per_word);
+ spin_unlock_irqrestore(&data->lock, flags);
+ *bpw = data->cur_trans->bits_per_word;
+ } else {
+ *bpw = data->current_msg->spi->bits_per_word;
+ }
+ data->bpw_len = data->cur_trans->len / (*bpw / 8);
+
+ /* copy Tx Data */
+ if (data->cur_trans->tx_buf != NULL) {
+ if (*bpw == 8) {
+ tx_buf = data->cur_trans->tx_buf;
+ tx_dma_buf = dma->tx_buf_virt;
+ for (i = 0; i < data->bpw_len; i++)
+ *tx_dma_buf++ = *tx_buf++;
+ } else {
+ tx_sbuf = data->cur_trans->tx_buf;
+ tx_dma_sbuf = dma->tx_buf_virt;
+ for (i = 0; i < data->bpw_len; i++)
+ *tx_dma_sbuf++ = *tx_sbuf++;
+ }
+ }
+ if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
+ size = PCH_DMA_TRANS_SIZE;
+ rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
+ } else {
+ num = 1;
+ size = data->bpw_len;
+ rem = data->bpw_len;
+ }
+ dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
+ __func__, num, size, rem);
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* set receive fifo threshold and transmit fifo threshold */
+ pch_spi_setclr_reg(data->master, PCH_SPCR,
+ ((size - 1) << SPCR_RFIC_FIELD) |
+ ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) <<
+ SPCR_TFIC_FIELD),
+ MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* RX */
+ dma->sg_rx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+ sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
+ /* offset, length setting */
+ sg = dma->sg_rx_p;
+ for (i = 0; i < num; i++, sg++) {
+ if (i == 0) {
+ sg->offset = 0;
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
+ sg->offset);
+ sg_dma_len(sg) = rem;
+ } else {
+ sg->offset = rem + size * (i - 1);
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
+ }
+ sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
+ }
+ sg = dma->sg_rx_p;
+ desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
+ num, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx) {
+ dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
+ __func__);
+ return;
+ }
+ dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
+ desc_rx->callback = pch_dma_rx_complete;
+ desc_rx->callback_param = data;
+ dma->nent = num;
+ dma->desc_rx = desc_rx;
+
+ /* TX */
+ dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+ sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
+ /* offset, length setting */
+ sg = dma->sg_tx_p;
+ for (i = 0; i < num; i++, sg++) {
+ if (i == 0) {
+ sg->offset = 0;
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
+ sg->offset);
+ sg_dma_len(sg) = rem;
+ } else {
+ sg->offset = rem + size * (i - 1);
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
+ }
+ sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
+ }
+ sg = dma->sg_tx_p;
+ desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
+ sg, num, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
+ __func__);
+ return;
+ }
+ dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
+ desc_tx->callback = NULL;
+ desc_tx->callback_param = data;
+ dma->nent = num;
+ dma->desc_tx = desc_tx;
+
+ dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
+ "0x2 to SSNXCR\n", __func__);
+
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
+ desc_rx->tx_submit(desc_rx);
+ desc_tx->tx_submit(desc_tx);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* reset transfer complete flag */
+ data->transfer_complete = false;
+}
static void pch_spi_process_messages(struct work_struct *pwork)
{
@@ -753,13 +1122,10 @@ static void pch_spi_process_messages(struct work_struct *pwork)
dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
spin_lock(&data->lock);
-
/* check if suspend has been initiated;if yes flush queue */
if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
- dev_dbg(&data->master->dev,
- "%s suspend/remove initiated,flushing queue\n",
- __func__);
-
+ dev_dbg(&data->master->dev, "%s suspend/remove initiated,"
+ "flushing queue\n", __func__);
list_for_each_entry(pmsg, data->queue.next, queue) {
pmsg->status = -EIO;
@@ -793,53 +1159,42 @@ static void pch_spi_process_messages(struct work_struct *pwork)
spin_unlock(&data->lock);
+ if (data->use_dma)
+ pch_spi_request_dma(data,
+ data->current_msg->spi->bits_per_word);
do {
/* If we are already processing a message get the next
transfer structure from the message otherwise retrieve
the 1st transfer request from the message. */
spin_lock(&data->lock);
-
if (data->cur_trans == NULL) {
data->cur_trans =
- list_entry(data->current_msg->transfers.
- next, struct spi_transfer,
- transfer_list);
- dev_dbg(&data->master->dev,
- "%s :Getting 1st transfer message\n", __func__);
+ list_entry(data->current_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ dev_dbg(&data->master->dev, "%s "
+ ":Getting 1st transfer message\n", __func__);
} else {
data->cur_trans =
- list_entry(data->cur_trans->transfer_list.next,
- struct spi_transfer,
- transfer_list);
- dev_dbg(&data->master->dev,
- "%s :Getting next transfer message\n",
- __func__);
+ list_entry(data->cur_trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ dev_dbg(&data->master->dev, "%s "
+ ":Getting next transfer message\n", __func__);
}
-
spin_unlock(&data->lock);
- pch_spi_set_tx(data, &bpw, &pmsg);
-
- /* Control interrupt*/
- pch_spi_set_ir(data);
-
- /* Disable SPI transfer */
- pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0,
- SPCR_SPE_BIT);
-
- /* clear FIFO */
- pch_spi_clear_fifo(data->master);
-
- /* copy Rx Data */
- pch_spi_copy_rx_data(data, bpw);
-
- /* free memory */
- kfree(data->pkt_rx_buff);
- data->pkt_rx_buff = NULL;
-
- kfree(data->pkt_tx_buff);
- data->pkt_tx_buff = NULL;
-
+ if (data->use_dma) {
+ pch_spi_handle_dma(data, &bpw);
+ pch_spi_start_transfer(data);
+ pch_spi_copy_rx_data_for_dma(data, bpw);
+ } else {
+ pch_spi_set_tx(data, &bpw);
+ pch_spi_set_ir(data);
+ pch_spi_copy_rx_data(data, bpw);
+ kfree(data->pkt_rx_buff);
+ data->pkt_rx_buff = NULL;
+ kfree(data->pkt_tx_buff);
+ data->pkt_tx_buff = NULL;
+ }
/* increment message count */
data->current_msg->actual_length += data->cur_trans->len;
@@ -860,125 +1215,60 @@ static void pch_spi_process_messages(struct work_struct *pwork)
/* No more transfer in this message. */
if ((data->cur_trans->transfer_list.next) ==
&(data->current_msg->transfers)) {
- pch_spi_nomore_transfer(data, pmsg);
+ pch_spi_nomore_transfer(data);
}
spin_unlock(&data->lock);
} while (data->cur_trans != NULL);
+
+ if (data->use_dma)
+ pch_spi_release_dma(data);
}
-static void pch_spi_free_resources(struct pch_spi_board_data *board_dat)
+static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
{
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
/* free workqueue */
- if (board_dat->data->wk != NULL) {
- destroy_workqueue(board_dat->data->wk);
- board_dat->data->wk = NULL;
+ if (data->wk != NULL) {
+ destroy_workqueue(data->wk);
+ data->wk = NULL;
dev_dbg(&board_dat->pdev->dev,
"%s destroy_workqueue invoked successfully\n",
__func__);
}
-
- /* disable interrupts & free IRQ */
- if (board_dat->irq_reg_sts) {
- /* disable interrupts */
- pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0,
- PCH_ALL);
-
- /* free IRQ */
- free_irq(board_dat->pdev->irq, board_dat);
-
- dev_dbg(&board_dat->pdev->dev,
- "%s free_irq invoked successfully\n", __func__);
-
- board_dat->irq_reg_sts = false;
- }
-
- /* unmap PCI base address */
- if (board_dat->data->io_remap_addr != 0) {
- pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr);
-
- board_dat->data->io_remap_addr = 0;
-
- dev_dbg(&board_dat->pdev->dev,
- "%s pci_iounmap invoked successfully\n", __func__);
- }
-
- /* release PCI region */
- if (board_dat->pci_req_sts) {
- pci_release_regions(board_dat->pdev);
- dev_dbg(&board_dat->pdev->dev,
- "%s pci_release_regions invoked successfully\n",
- __func__);
- board_dat->pci_req_sts = false;
- }
}
-static int pch_spi_get_resources(struct pch_spi_board_data *board_dat)
+static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
{
- void __iomem *io_remap_addr;
- int retval;
+ int retval = 0;
+
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
/* create workqueue */
- board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
- if (!board_dat->data->wk) {
+ data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
+ if (!data->wk) {
dev_err(&board_dat->pdev->dev,
"%s create_singlet hread_workqueue failed\n", __func__);
retval = -EBUSY;
goto err_return;
}
- dev_dbg(&board_dat->pdev->dev,
- "%s create_singlethread_workqueue success\n", __func__);
-
- retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME);
- if (retval != 0) {
- dev_err(&board_dat->pdev->dev,
- "%s request_region failed\n", __func__);
- goto err_return;
- }
-
- board_dat->pci_req_sts = true;
-
- io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
- if (io_remap_addr == 0) {
- dev_err(&board_dat->pdev->dev,
- "%s pci_iomap failed\n", __func__);
- retval = -ENOMEM;
- goto err_return;
- }
-
- /* calculate base address for all channels */
- board_dat->data->io_remap_addr = io_remap_addr;
-
/* reset PCH SPI h/w */
- pch_spi_reset(board_dat->data->master);
+ pch_spi_reset(data->master);
dev_dbg(&board_dat->pdev->dev,
"%s pch_spi_reset invoked successfully\n", __func__);
- /* register IRQ */
- retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
- IRQF_SHARED, KBUILD_MODNAME, board_dat);
- if (retval != 0) {
- dev_err(&board_dat->pdev->dev,
- "%s request_irq failed\n", __func__);
- goto err_return;
- }
-
- dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n",
- __func__, retval);
-
- board_dat->irq_reg_sts = true;
dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
err_return:
if (retval != 0) {
dev_err(&board_dat->pdev->dev,
"%s FAIL:invoking pch_spi_free_resources\n", __func__);
- pch_spi_free_resources(board_dat);
+ pch_spi_free_resources(board_dat, data);
}
dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
@@ -986,255 +1276,387 @@ err_return:
return retval;
}
-static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
{
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+ if (dma->tx_buf_dma)
+ dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
+ dma->tx_buf_virt, dma->tx_buf_dma);
+ if (dma->rx_buf_dma)
+ dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
+ dma->rx_buf_virt, dma->rx_buf_dma);
+ return;
+}
- struct spi_master *master;
-
- struct pch_spi_board_data *board_dat;
- int retval;
-
- dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
-
- /* allocate memory for private data */
- board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
- if (board_dat == NULL) {
- dev_err(&pdev->dev,
- " %s memory allocation for private data failed\n",
- __func__);
- retval = -ENOMEM;
- goto err_kmalloc;
- }
+static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+ /* Get Consistent memory for Tx DMA */
+ dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+ PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
+ /* Get Consistent memory for Rx DMA */
+ dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+ PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
+}
- dev_dbg(&pdev->dev,
- "%s memory allocation for private data success\n", __func__);
+static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
+{
+ int ret;
+ struct spi_master *master;
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
+ struct pch_spi_data *data;
- /* enable PCI device */
- retval = pci_enable_device(pdev);
- if (retval != 0) {
- dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__);
+ dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
- goto err_pci_en_device;
+ master = spi_alloc_master(&board_dat->pdev->dev,
+ sizeof(struct pch_spi_data));
+ if (!master) {
+ dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
+ plat_dev->id);
+ return -ENOMEM;
}
- dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n",
- __func__, retval);
+ data = spi_master_get_devdata(master);
+ data->master = master;
- board_dat->pdev = pdev;
+ platform_set_drvdata(plat_dev, data);
- /* alllocate memory for SPI master */
- master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data));
- if (master == NULL) {
- retval = -ENOMEM;
- dev_err(&pdev->dev, "%s Fail.\n", __func__);
- goto err_spi_alloc_master;
+ /* baseaddress + address offset) */
+ data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
+ PCH_ADDRESS_SIZE * plat_dev->id;
+ data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) +
+ PCH_ADDRESS_SIZE * plat_dev->id;
+ if (!data->io_remap_addr) {
+ dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
+ ret = -ENOMEM;
+ goto err_pci_iomap;
}
- dev_dbg(&pdev->dev,
- "%s spi_alloc_master returned non NULL\n", __func__);
+ dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
+ plat_dev->id, data->io_remap_addr);
/* initialize members of SPI master */
master->bus_num = -1;
master->num_chipselect = PCH_MAX_CS;
master->setup = pch_spi_setup;
master->transfer = pch_spi_transfer;
- dev_dbg(&pdev->dev,
- "%s transfer member of SPI master initialized\n", __func__);
-
- board_dat->data = spi_master_get_devdata(master);
-
- board_dat->data->master = master;
- board_dat->data->n_curnt_chip = 255;
- board_dat->data->board_dat = board_dat;
- board_dat->data->status = STATUS_RUNNING;
-
- INIT_LIST_HEAD(&board_dat->data->queue);
- spin_lock_init(&board_dat->data->lock);
- INIT_WORK(&board_dat->data->work, pch_spi_process_messages);
- init_waitqueue_head(&board_dat->data->wait);
- /* allocate resources for PCH SPI */
- retval = pch_spi_get_resources(board_dat);
- if (retval) {
- dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval);
+ data->board_dat = board_dat;
+ data->plat_dev = plat_dev;
+ data->n_curnt_chip = 255;
+ data->status = STATUS_RUNNING;
+ data->ch = plat_dev->id;
+ data->use_dma = use_dma;
+
+ INIT_LIST_HEAD(&data->queue);
+ spin_lock_init(&data->lock);
+ INIT_WORK(&data->work, pch_spi_process_messages);
+ init_waitqueue_head(&data->wait);
+
+ ret = pch_spi_get_resources(board_dat, data);
+ if (ret) {
+ dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
goto err_spi_get_resources;
}
- dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n",
- __func__, retval);
-
- /* save private data in dev */
- pci_set_drvdata(pdev, board_dat);
- dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__);
+ ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
+ IRQF_SHARED, KBUILD_MODNAME, data);
+ if (ret) {
+ dev_err(&plat_dev->dev,
+ "%s request_irq failed\n", __func__);
+ goto err_request_irq;
+ }
+ data->irq_reg_sts = true;
- /* set master mode */
pch_spi_set_master_mode(master);
- dev_dbg(&pdev->dev,
- "%s invoked pch_spi_set_master_mode\n", __func__);
- /* Register the controller with the SPI core. */
- retval = spi_register_master(master);
- if (retval != 0) {
- dev_err(&pdev->dev,
+ ret = spi_register_master(master);
+ if (ret != 0) {
+ dev_err(&plat_dev->dev,
"%s spi_register_master FAILED\n", __func__);
- goto err_spi_reg_master;
+ goto err_spi_register_master;
}
- dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n",
- __func__, retval);
-
+ if (use_dma) {
+ dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+ pch_alloc_dma_buf(board_dat, data);
+ }
return 0;
-err_spi_reg_master:
- spi_unregister_master(master);
+err_spi_register_master:
+ free_irq(board_dat->pdev->irq, board_dat);
+err_request_irq:
+ pch_spi_free_resources(board_dat, data);
err_spi_get_resources:
-err_spi_alloc_master:
+ pci_iounmap(board_dat->pdev, data->io_remap_addr);
+err_pci_iomap:
spi_master_put(master);
- pci_disable_device(pdev);
-err_pci_en_device:
- kfree(board_dat);
-err_kmalloc:
- return retval;
+
+ return ret;
}
-static void pch_spi_remove(struct pci_dev *pdev)
+static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev)
{
- struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev);
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(plat_dev);
int count;
+ unsigned long flags;
- dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+ dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
+ __func__, plat_dev->id, board_dat->pdev->irq);
- if (!board_dat) {
- dev_err(&pdev->dev,
- "%s pci_get_drvdata returned NULL\n", __func__);
- return;
- }
+ if (use_dma)
+ pch_free_dma_buf(board_dat, data);
/* check for any pending messages; no action is taken if the queue
* is still full; but at least we tried. Unload anyway */
count = 500;
- spin_lock(&board_dat->data->lock);
- board_dat->data->status = STATUS_EXITING;
- while ((list_empty(&board_dat->data->queue) == 0) && --count) {
+ spin_lock_irqsave(&data->lock, flags);
+ data->status = STATUS_EXITING;
+ while ((list_empty(&data->queue) == 0) && --count) {
dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
__func__);
- spin_unlock(&board_dat->data->lock);
+ spin_unlock_irqrestore(&data->lock, flags);
msleep(PCH_SLEEP_TIME);
- spin_lock(&board_dat->data->lock);
+ spin_lock_irqsave(&data->lock, flags);
}
- spin_unlock(&board_dat->data->lock);
-
- /* Free resources allocated for PCH SPI */
- pch_spi_free_resources(board_dat);
-
- spi_unregister_master(board_dat->data->master);
-
- /* free memory for private data */
- kfree(board_dat);
+ spin_unlock_irqrestore(&data->lock, flags);
- pci_set_drvdata(pdev, NULL);
+ pch_spi_free_resources(board_dat, data);
+ /* disable interrupts & free IRQ */
+ if (data->irq_reg_sts) {
+ /* disable interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ data->irq_reg_sts = false;
+ free_irq(board_dat->pdev->irq, data);
+ }
- /* disable PCI device */
- pci_disable_device(pdev);
+ pci_iounmap(board_dat->pdev, data->io_remap_addr);
+ spi_unregister_master(data->master);
+ spi_master_put(data->master);
+ platform_set_drvdata(plat_dev, NULL);
- dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__);
+ return 0;
}
-
#ifdef CONFIG_PM
-static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int pch_spi_pd_suspend(struct platform_device *pd_dev,
+ pm_message_t state)
{
u8 count;
- int retval;
-
- struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev);
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(pd_dev);
- dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+ dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
if (!board_dat) {
- dev_err(&pdev->dev,
+ dev_err(&pd_dev->dev,
"%s pci_get_drvdata returned NULL\n", __func__);
return -EFAULT;
}
- retval = 0;
- board_dat->suspend_sts = true;
-
/* check if the current message is processed:
Only after thats done the transfer will be suspended */
count = 255;
while ((--count) > 0) {
- if (!(board_dat->data->bcurrent_msg_processing)) {
- dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_"
- "msg_processing = false\n", __func__);
+ if (!(data->bcurrent_msg_processing))
break;
- } else {
- dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_"
- "processing = true\n", __func__);
- }
msleep(PCH_SLEEP_TIME);
}
/* Free IRQ */
- if (board_dat->irq_reg_sts) {
+ if (data->irq_reg_sts) {
/* disable all interrupts */
- pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0,
- PCH_ALL);
- pch_spi_reset(board_dat->data->master);
-
- free_irq(board_dat->pdev->irq, board_dat);
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ pch_spi_reset(data->master);
+ free_irq(board_dat->pdev->irq, data);
- board_dat->irq_reg_sts = false;
- dev_dbg(&pdev->dev,
+ data->irq_reg_sts = false;
+ dev_dbg(&pd_dev->dev,
"%s free_irq invoked successfully.\n", __func__);
}
+ return 0;
+}
+
+static int pch_spi_pd_resume(struct platform_device *pd_dev)
+{
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(pd_dev);
+ int retval;
+
+ if (!board_dat) {
+ dev_err(&pd_dev->dev,
+ "%s pci_get_drvdata returned NULL\n", __func__);
+ return -EFAULT;
+ }
+
+ if (!data->irq_reg_sts) {
+ /* register IRQ */
+ retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
+ IRQF_SHARED, KBUILD_MODNAME, data);
+ if (retval < 0) {
+ dev_err(&pd_dev->dev,
+ "%s request_irq failed\n", __func__);
+ return retval;
+ }
+
+ /* reset PCH SPI h/w */
+ pch_spi_reset(data->master);
+ pch_spi_set_master_mode(data->master);
+ data->irq_reg_sts = true;
+ }
+ return 0;
+}
+#else
+#define pch_spi_pd_suspend NULL
+#define pch_spi_pd_resume NULL
+#endif
+
+static struct platform_driver pch_spi_pd_driver = {
+ .driver = {
+ .name = "pch-spi",
+ .owner = THIS_MODULE,
+ },
+ .probe = pch_spi_pd_probe,
+ .remove = __devexit_p(pch_spi_pd_remove),
+ .suspend = pch_spi_pd_suspend,
+ .resume = pch_spi_pd_resume
+};
+
+static int __devinit pch_spi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pch_spi_board_data *board_dat;
+ struct platform_device *pd_dev = NULL;
+ int retval;
+ int i;
+ struct pch_pd_dev_save *pd_dev_save;
+
+ pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
+ if (!pd_dev_save) {
+ dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__);
+ return -ENOMEM;
+ }
+
+ board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
+ if (!board_dat) {
+ dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__);
+ retval = -ENOMEM;
+ goto err_no_mem;
+ }
+
+ retval = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (retval) {
+ dev_err(&pdev->dev, "%s request_region failed\n", __func__);
+ goto pci_request_regions;
+ }
+
+ board_dat->pdev = pdev;
+ board_dat->num = id->driver_data;
+ pd_dev_save->num = id->driver_data;
+ pd_dev_save->board_dat = board_dat;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
+ goto pci_enable_device;
+ }
+
+ for (i = 0; i < board_dat->num; i++) {
+ pd_dev = platform_device_alloc("pch-spi", i);
+ if (!pd_dev) {
+ dev_err(&pdev->dev, "platform_device_alloc failed\n");
+ goto err_platform_device;
+ }
+ pd_dev_save->pd_save[i] = pd_dev;
+ pd_dev->dev.parent = &pdev->dev;
+
+ retval = platform_device_add_data(pd_dev, board_dat,
+ sizeof(*board_dat));
+ if (retval) {
+ dev_err(&pdev->dev,
+ "platform_device_add_data failed\n");
+ platform_device_put(pd_dev);
+ goto err_platform_device;
+ }
+
+ retval = platform_device_add(pd_dev);
+ if (retval) {
+ dev_err(&pdev->dev, "platform_device_add failed\n");
+ platform_device_put(pd_dev);
+ goto err_platform_device;
+ }
+ }
+
+ pci_set_drvdata(pdev, pd_dev_save);
+
+ return 0;
+
+err_platform_device:
+ pci_disable_device(pdev);
+pci_enable_device:
+ pci_release_regions(pdev);
+pci_request_regions:
+ kfree(board_dat);
+err_no_mem:
+ kfree(pd_dev_save);
+
+ return retval;
+}
+
+static void __devexit pch_spi_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
+
+ for (i = 0; i < pd_dev_save->num; i++)
+ platform_device_unregister(pd_dev_save->pd_save[i]);
+
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ kfree(pd_dev_save->board_dat);
+ kfree(pd_dev_save);
+}
+
+#ifdef CONFIG_PM
+static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+ struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+
+ pd_dev_save->board_dat->suspend_sts = true;
+
/* save config space */
retval = pci_save_state(pdev);
-
if (retval == 0) {
- dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n",
- __func__, retval);
- /* disable PM notifications */
pci_enable_wake(pdev, PCI_D3hot, 0);
- dev_dbg(&pdev->dev,
- "%s pci_enable_wake invoked successfully\n", __func__);
- /* disable PCI device */
pci_disable_device(pdev);
- dev_dbg(&pdev->dev,
- "%s pci_disable_device invoked successfully\n",
- __func__);
- /* move device to D3hot state */
pci_set_power_state(pdev, PCI_D3hot);
- dev_dbg(&pdev->dev,
- "%s pci_set_power_state invoked successfully\n",
- __func__);
} else {
dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
}
- dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval);
-
return retval;
}
static int pch_spi_resume(struct pci_dev *pdev)
{
int retval;
-
- struct pch_spi_board_data *board = pci_get_drvdata(pdev);
+ struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
- if (!board) {
- dev_err(&pdev->dev,
- "%s pci_get_drvdata returned NULL\n", __func__);
- return -EFAULT;
- }
-
- /* move device to DO power state */
pci_set_power_state(pdev, PCI_D0);
-
- /* restore state */
pci_restore_state(pdev);
retval = pci_enable_device(pdev);
@@ -1242,34 +1664,12 @@ static int pch_spi_resume(struct pci_dev *pdev)
dev_err(&pdev->dev,
"%s pci_enable_device failed\n", __func__);
} else {
- /* disable PM notifications */
pci_enable_wake(pdev, PCI_D3hot, 0);
- /* register IRQ handler */
- if (!board->irq_reg_sts) {
- /* register IRQ */
- retval = request_irq(board->pdev->irq, pch_spi_handler,
- IRQF_SHARED, KBUILD_MODNAME,
- board);
- if (retval < 0) {
- dev_err(&pdev->dev,
- "%s request_irq failed\n", __func__);
- return retval;
- }
- board->irq_reg_sts = true;
-
- /* reset PCH SPI h/w */
- pch_spi_reset(board->data->master);
- pch_spi_set_master_mode(board->data->master);
-
- /* set suspend status to false */
- board->suspend_sts = false;
-
- }
+ /* set suspend status to false */
+ pd_dev_save->board_dat->suspend_sts = false;
}
- dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval);
-
return retval;
}
#else
@@ -1289,15 +1689,29 @@ static struct pci_driver pch_spi_pcidev = {
static int __init pch_spi_init(void)
{
- return pci_register_driver(&pch_spi_pcidev);
+ int ret;
+ ret = platform_driver_register(&pch_spi_pd_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&pch_spi_pcidev);
+ if (ret)
+ return ret;
+
+ return 0;
}
module_init(pch_spi_init);
static void __exit pch_spi_exit(void)
{
pci_unregister_driver(&pch_spi_pcidev);
+ platform_driver_unregister(&pch_spi_pd_driver);
}
module_exit(pch_spi_exit);
+module_param(use_dma, int, 0644);
+MODULE_PARM_DESC(use_dma,
+ "to use DMA for data transfers pass 1 else 0; default 1");
+
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7xxx IOH SPI Driver");
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi-txx9.c
index dfa024b633e..f0a2ab0428a 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -1,5 +1,5 @@
/*
- * spi_txx9.c - TXx9 SPI controller driver.
+ * TXx9 SPI controller driver.
*
* Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c
* Copyright (C) 2000-2001 Toshiba Corporation
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/spi-xilinx.c
index 4d2c75df886..4d2c75df886 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/spi-xilinx.c
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 2e13a14bba3..4d1b9f517ce 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1,5 +1,5 @@
/*
- * spi.c - SPI init/core code
+ * SPI init/core code
*
* Copyright (C) 2005 David Brownell
*
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
deleted file mode 100644
index be991359bf9..00000000000
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/* linux/drivers/spi/spi_s3c24xx_gpio.c
- *
- * Copyright (c) 2006 Ben Dooks
- * Copyright (c) 2006 Simtec Electronics
- *
- * S3C24XX GPIO based SPI driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
-
-#include <mach/regs-gpio.h>
-#include <mach/spi-gpio.h>
-#include <mach/hardware.h>
-
-struct s3c2410_spigpio {
- struct spi_bitbang bitbang;
-
- struct s3c2410_spigpio_info *info;
- struct platform_device *dev;
-};
-
-static inline struct s3c2410_spigpio *spidev_to_sg(struct spi_device *spi)
-{
- return spi_master_get_devdata(spi->master);
-}
-
-static inline void setsck(struct spi_device *dev, int on)
-{
- struct s3c2410_spigpio *sg = spidev_to_sg(dev);
- s3c2410_gpio_setpin(sg->info->pin_clk, on ? 1 : 0);
-}
-
-static inline void setmosi(struct spi_device *dev, int on)
-{
- struct s3c2410_spigpio *sg = spidev_to_sg(dev);
- s3c2410_gpio_setpin(sg->info->pin_mosi, on ? 1 : 0);
-}
-
-static inline u32 getmiso(struct spi_device *dev)
-{
- struct s3c2410_spigpio *sg = spidev_to_sg(dev);
- return s3c2410_gpio_getpin(sg->info->pin_miso) ? 1 : 0;
-}
-
-#define spidelay(x) ndelay(x)
-
-#include "spi_bitbang_txrx.h"
-
-
-static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
-{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
-}
-
-static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
-{
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
-}
-
-static u32 s3c2410_spigpio_txrx_mode2(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
-{
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
-}
-
-static u32 s3c2410_spigpio_txrx_mode3(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
-{
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
-}
-
-
-static void s3c2410_spigpio_chipselect(struct spi_device *dev, int value)
-{
- struct s3c2410_spigpio *sg = spidev_to_sg(dev);
-
- if (sg->info && sg->info->chip_select)
- (sg->info->chip_select)(sg->info, value);
-}
-
-static int s3c2410_spigpio_probe(struct platform_device *dev)
-{
- struct s3c2410_spigpio_info *info;
- struct spi_master *master;
- struct s3c2410_spigpio *sp;
- int ret;
-
- master = spi_alloc_master(&dev->dev, sizeof(struct s3c2410_spigpio));
- if (master == NULL) {
- dev_err(&dev->dev, "failed to allocate spi master\n");
- ret = -ENOMEM;
- goto err;
- }
-
- sp = spi_master_get_devdata(master);
-
- platform_set_drvdata(dev, sp);
-
- /* copy in the plkatform data */
- info = sp->info = dev->dev.platform_data;
-
- /* setup spi bitbang adaptor */
- sp->bitbang.master = spi_master_get(master);
- sp->bitbang.master->bus_num = info->bus_num;
- sp->bitbang.master->num_chipselect = info->num_chipselect;
- sp->bitbang.chipselect = s3c2410_spigpio_chipselect;
-
- sp->bitbang.txrx_word[SPI_MODE_0] = s3c2410_spigpio_txrx_mode0;
- sp->bitbang.txrx_word[SPI_MODE_1] = s3c2410_spigpio_txrx_mode1;
- sp->bitbang.txrx_word[SPI_MODE_2] = s3c2410_spigpio_txrx_mode2;
- sp->bitbang.txrx_word[SPI_MODE_3] = s3c2410_spigpio_txrx_mode3;
-
- /* set state of spi pins, always assume that the clock is
- * available, but do check the MOSI and MISO. */
- s3c2410_gpio_setpin(info->pin_clk, 0);
- s3c2410_gpio_cfgpin(info->pin_clk, S3C2410_GPIO_OUTPUT);
-
- if (info->pin_mosi < S3C2410_GPH10) {
- s3c2410_gpio_setpin(info->pin_mosi, 0);
- s3c2410_gpio_cfgpin(info->pin_mosi, S3C2410_GPIO_OUTPUT);
- }
-
- if (info->pin_miso != S3C2410_GPA0 && info->pin_miso < S3C2410_GPH10)
- s3c2410_gpio_cfgpin(info->pin_miso, S3C2410_GPIO_INPUT);
-
- ret = spi_bitbang_start(&sp->bitbang);
- if (ret)
- goto err_no_bitbang;
-
- return 0;
-
- err_no_bitbang:
- spi_master_put(sp->bitbang.master);
- err:
- return ret;
-
-}
-
-static int s3c2410_spigpio_remove(struct platform_device *dev)
-{
- struct s3c2410_spigpio *sp = platform_get_drvdata(dev);
-
- spi_bitbang_stop(&sp->bitbang);
- spi_master_put(sp->bitbang.master);
-
- return 0;
-}
-
-/* all gpio should be held over suspend/resume, so we should
- * not need to deal with this
-*/
-
-#define s3c2410_spigpio_suspend NULL
-#define s3c2410_spigpio_resume NULL
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:spi_s3c24xx_gpio");
-
-static struct platform_driver s3c2410_spigpio_drv = {
- .probe = s3c2410_spigpio_probe,
- .remove = s3c2410_spigpio_remove,
- .suspend = s3c2410_spigpio_suspend,
- .resume = s3c2410_spigpio_resume,
- .driver = {
- .name = "spi_s3c24xx_gpio",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s3c2410_spigpio_init(void)
-{
- return platform_driver_register(&s3c2410_spigpio_drv);
-}
-
-static void __exit s3c2410_spigpio_exit(void)
-{
- platform_driver_unregister(&s3c2410_spigpio_drv);
-}
-
-module_init(s3c2410_spigpio_init);
-module_exit(s3c2410_spigpio_exit);
-
-MODULE_DESCRIPTION("S3C24XX SPI Driver");
-MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d9fd8621136..830adbed1d7 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -1,5 +1,5 @@
/*
- * spidev.c -- simple synchronous userspace interface to SPI devices
+ * Simple synchronous userspace interface to SPI devices
*
* Copyright (C) 2006 SWAPP
* Andrea Paterniani <a.paterniani@swapp-eng.it>
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index 744d3f6e470..bf53e44c82a 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -5,7 +5,7 @@
* because of its small size we include it in the SSB core
* instead of creating a standalone module.
*
- * Copyright 2007 Michael Buesch <mb@bu3sch.de>
+ * Copyright 2007 Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 06d15b6f221..5d9c97c2479 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -3,7 +3,7 @@
* Broadcom ChipCommon core driver
*
* Copyright 2005, Broadcom Corporation
- * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 305ade7825f..52901c14c68 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -2,7 +2,7 @@
* Sonics Silicon Backplane
* Broadcom ChipCommon Power Management Unit driver
*
- * Copyright 2009, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2009, Michael Buesch <m@bues.ch>
* Copyright 2007, Broadcom Corporation
*
* Licensed under the GNU/GPL. See COPYING for details.
@@ -417,9 +417,9 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
u32 min_msk = 0, max_msk = 0;
unsigned int i;
const struct pmu_res_updown_tab_entry *updown_tab = NULL;
- unsigned int updown_tab_size;
+ unsigned int updown_tab_size = 0;
const struct pmu_res_depend_tab_entry *depend_tab = NULL;
- unsigned int depend_tab_size;
+ unsigned int depend_tab_size = 0;
switch (bus->chip_id) {
case 0x4312:
diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c
index c3e1d3e6d61..dc47f30e9cf 100644
--- a/drivers/ssb/driver_extif.c
+++ b/drivers/ssb/driver_extif.c
@@ -3,7 +3,7 @@
* Broadcom EXTIF core driver
*
* Copyright 2005, Broadcom Corporation
- * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
* Copyright 2006, 2007, Felix Fietkau <nbd@openwrt.org>
* Copyright 2007, Aurelien Jarno <aurelien@aurel32.net>
*
diff --git a/drivers/ssb/driver_gige.c b/drivers/ssb/driver_gige.c
index 5ba92a2719a..3adb98dad70 100644
--- a/drivers/ssb/driver_gige.c
+++ b/drivers/ssb/driver_gige.c
@@ -3,7 +3,7 @@
* Broadcom Gigabit Ethernet core driver
*
* Copyright 2008, Broadcom Corporation
- * Copyright 2008, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2008, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
@@ -106,8 +106,9 @@ void gige_pcicfg_write32(struct ssb_gige *dev,
gige_write32(dev, SSB_GIGE_PCICFG + offset, value);
}
-static int ssb_gige_pci_read_config(struct pci_bus *bus, unsigned int devfn,
- int reg, int size, u32 *val)
+static int __devinit ssb_gige_pci_read_config(struct pci_bus *bus,
+ unsigned int devfn, int reg,
+ int size, u32 *val)
{
struct ssb_gige *dev = container_of(bus->ops, struct ssb_gige, pci_ops);
unsigned long flags;
@@ -136,8 +137,9 @@ static int ssb_gige_pci_read_config(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
-static int ssb_gige_pci_write_config(struct pci_bus *bus, unsigned int devfn,
- int reg, int size, u32 val)
+static int __devinit ssb_gige_pci_write_config(struct pci_bus *bus,
+ unsigned int devfn, int reg,
+ int size, u32 val)
{
struct ssb_gige *dev = container_of(bus->ops, struct ssb_gige, pci_ops);
unsigned long flags;
@@ -166,7 +168,8 @@ static int ssb_gige_pci_write_config(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
-static int ssb_gige_probe(struct ssb_device *sdev, const struct ssb_device_id *id)
+static int __devinit ssb_gige_probe(struct ssb_device *sdev,
+ const struct ssb_device_id *id)
{
struct ssb_gige *dev;
u32 base, tmslow, tmshigh;
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 97efce184a8..ced50156859 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -3,7 +3,7 @@
* Broadcom MIPS core driver
*
* Copyright 2005, Broadcom Corporation
- * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 2a20dabec76..e6ac3177fbb 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -3,7 +3,7 @@
* Broadcom PCI-core driver
*
* Copyright 2005, Broadcom Corporation
- * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
@@ -314,7 +314,7 @@ int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return ssb_mips_irq(extpci_core->dev) + 2;
}
-static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
+static void __devinit ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
{
u32 val;
@@ -379,7 +379,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
register_pci_controller(&ssb_pcicore_controller);
}
-static int pcicore_is_in_hostmode(struct ssb_pcicore *pc)
+static int __devinit pcicore_is_in_hostmode(struct ssb_pcicore *pc)
{
struct ssb_bus *bus = pc->dev->bus;
u16 chipid_top;
@@ -412,7 +412,7 @@ static int pcicore_is_in_hostmode(struct ssb_pcicore *pc)
* Workarounds.
**************************************************/
-static void ssb_pcicore_fix_sprom_core_index(struct ssb_pcicore *pc)
+static void __devinit ssb_pcicore_fix_sprom_core_index(struct ssb_pcicore *pc)
{
u16 tmp = pcicore_read16(pc, SSB_PCICORE_SPROM(0));
if (((tmp & 0xF000) >> 12) != pc->dev->core_index) {
@@ -514,13 +514,22 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
* Generic and Clientmode operation code.
**************************************************/
-static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
+static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
+ ssb_pcicore_fix_sprom_core_index(pc);
+
/* Disable PCI interrupts. */
ssb_write32(pc->dev, SSB_INTVEC, 0);
+
+ /* Additional PCIe always once-executed workarounds */
+ if (pc->dev->id.coreid == SSB_DEV_PCIE) {
+ ssb_pcicore_serdes_workaround(pc);
+ /* TODO: ASPM */
+ /* TODO: Clock Request Update */
+ }
}
-void ssb_pcicore_init(struct ssb_pcicore *pc)
+void __devinit ssb_pcicore_init(struct ssb_pcicore *pc)
{
struct ssb_device *dev = pc->dev;
@@ -529,8 +538,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
if (!ssb_device_is_enabled(dev))
ssb_device_enable(dev, 0);
- ssb_pcicore_fix_sprom_core_index(pc);
-
#ifdef CONFIG_SSB_PCICORE_HOSTMODE
pc->hostmode = pcicore_is_in_hostmode(pc);
if (pc->hostmode)
@@ -538,13 +545,6 @@ void ssb_pcicore_init(struct ssb_pcicore *pc)
#endif /* CONFIG_SSB_PCICORE_HOSTMODE */
if (!pc->hostmode)
ssb_pcicore_init_clientmode(pc);
-
- /* Additional PCIe always once-executed workarounds */
- if (dev->id.coreid == SSB_DEV_PCIE) {
- ssb_pcicore_serdes_workaround(pc);
- /* TODO: ASPM */
- /* TODO: Clock Request Update */
- }
}
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
index a0e0d246b59..eec3e267be4 100644
--- a/drivers/ssb/embedded.c
+++ b/drivers/ssb/embedded.c
@@ -3,7 +3,7 @@
* Embedded systems support code
*
* Copyright 2005-2008, Broadcom Corporation
- * Copyright 2006-2008, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2006-2008, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index f8a13f86321..29c7d4f9d1a 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -3,7 +3,7 @@
* Subsystem core
*
* Copyright 2005, Broadcom Corporation
- * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
@@ -557,7 +557,7 @@ error:
}
/* Needs ssb_buses_lock() */
-static int ssb_attach_queued_buses(void)
+static int __devinit ssb_attach_queued_buses(void)
{
struct ssb_bus *bus, *n;
int err = 0;
@@ -768,9 +768,9 @@ out:
return err;
}
-static int ssb_bus_register(struct ssb_bus *bus,
- ssb_invariants_func_t get_invariants,
- unsigned long baseaddr)
+static int __devinit ssb_bus_register(struct ssb_bus *bus,
+ ssb_invariants_func_t get_invariants,
+ unsigned long baseaddr)
{
int err;
@@ -851,8 +851,8 @@ err_disable_xtal:
}
#ifdef CONFIG_SSB_PCIHOST
-int ssb_bus_pcibus_register(struct ssb_bus *bus,
- struct pci_dev *host_pci)
+int __devinit ssb_bus_pcibus_register(struct ssb_bus *bus,
+ struct pci_dev *host_pci)
{
int err;
@@ -875,9 +875,9 @@ EXPORT_SYMBOL(ssb_bus_pcibus_register);
#endif /* CONFIG_SSB_PCIHOST */
#ifdef CONFIG_SSB_PCMCIAHOST
-int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
- struct pcmcia_device *pcmcia_dev,
- unsigned long baseaddr)
+int __devinit ssb_bus_pcmciabus_register(struct ssb_bus *bus,
+ struct pcmcia_device *pcmcia_dev,
+ unsigned long baseaddr)
{
int err;
@@ -897,8 +897,9 @@ EXPORT_SYMBOL(ssb_bus_pcmciabus_register);
#endif /* CONFIG_SSB_PCMCIAHOST */
#ifdef CONFIG_SSB_SDIOHOST
-int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func,
- unsigned int quirks)
+int __devinit ssb_bus_sdiobus_register(struct ssb_bus *bus,
+ struct sdio_func *func,
+ unsigned int quirks)
{
int err;
@@ -918,9 +919,9 @@ int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func,
EXPORT_SYMBOL(ssb_bus_sdiobus_register);
#endif /* CONFIG_SSB_PCMCIAHOST */
-int ssb_bus_ssbbus_register(struct ssb_bus *bus,
- unsigned long baseaddr,
- ssb_invariants_func_t get_invariants)
+int __devinit ssb_bus_ssbbus_register(struct ssb_bus *bus,
+ unsigned long baseaddr,
+ ssb_invariants_func_t get_invariants)
{
int err;
@@ -1001,8 +1002,8 @@ u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m)
switch (plltype) {
case SSB_PLLTYPE_6: /* 100/200 or 120/240 only */
if (m & SSB_CHIPCO_CLK_T6_MMASK)
- return SSB_CHIPCO_CLK_T6_M0;
- return SSB_CHIPCO_CLK_T6_M1;
+ return SSB_CHIPCO_CLK_T6_M1;
+ return SSB_CHIPCO_CLK_T6_M0;
case SSB_PLLTYPE_1: /* 48Mhz base, 3 dividers */
case SSB_PLLTYPE_3: /* 25Mhz, 2 dividers */
case SSB_PLLTYPE_4: /* 48Mhz, 4 dividers */
@@ -1265,7 +1266,10 @@ u32 ssb_dma_translation(struct ssb_device *dev)
case SSB_BUSTYPE_SSB:
return 0;
case SSB_BUSTYPE_PCI:
- return SSB_PCI_DMA;
+ if (ssb_read32(dev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64)
+ return SSB_PCIE_DMA_H32;
+ else
+ return SSB_PCI_DMA;
default:
__ssb_dma_not_implemented(dev);
}
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 7ad48585c5e..34c3bab90b9 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -1,7 +1,7 @@
/*
* Sonics Silicon Backplane PCI-Hostbus related functions.
*
- * Copyright (C) 2005-2006 Michael Buesch <mb@bu3sch.de>
+ * Copyright (C) 2005-2006 Michael Buesch <m@bues.ch>
* Copyright (C) 2005 Martin Langer <martin-langer@gmx.de>
* Copyright (C) 2005 Stefano Brivio <st3@riseup.net>
* Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org>
@@ -734,12 +734,9 @@ out_free:
static void ssb_pci_get_boardinfo(struct ssb_bus *bus,
struct ssb_boardinfo *bi)
{
- pci_read_config_word(bus->host_pci, PCI_SUBSYSTEM_VENDOR_ID,
- &bi->vendor);
- pci_read_config_word(bus->host_pci, PCI_SUBSYSTEM_ID,
- &bi->type);
- pci_read_config_word(bus->host_pci, PCI_REVISION_ID,
- &bi->rev);
+ bi->vendor = bus->host_pci->subsystem_vendor;
+ bi->type = bus->host_pci->subsystem_device;
+ bi->rev = bus->host_pci->revision;
}
int ssb_pci_get_invariants(struct ssb_bus *bus,
diff --git a/drivers/ssb/pcihost_wrapper.c b/drivers/ssb/pcihost_wrapper.c
index f6c8c81a002..116a8116984 100644
--- a/drivers/ssb/pcihost_wrapper.c
+++ b/drivers/ssb/pcihost_wrapper.c
@@ -6,7 +6,7 @@
* Copyright (c) 2005 Stefano Brivio <st3@riseup.net>
* Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
* Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
- * Copyright (c) 2005-2007 Michael Buesch <mbuesch@freenet.de>
+ * Copyright (c) 2005-2007 Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
@@ -53,8 +53,8 @@ static int ssb_pcihost_resume(struct pci_dev *dev)
# define ssb_pcihost_resume NULL
#endif /* CONFIG_PM */
-static int ssb_pcihost_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int __devinit ssb_pcihost_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
{
struct ssb_bus *ssb;
int err = -ENOMEM;
@@ -110,7 +110,7 @@ static void ssb_pcihost_remove(struct pci_dev *dev)
pci_set_drvdata(dev, NULL);
}
-int ssb_pcihost_register(struct pci_driver *driver)
+int __devinit ssb_pcihost_register(struct pci_driver *driver)
{
driver->probe = ssb_pcihost_probe;
driver->remove = ssb_pcihost_remove;
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index f8533795ee7..c821c6b2a6a 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -3,7 +3,7 @@
* PCMCIA-Hostbus related functions
*
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2007-2008 Michael Buesch <mb@bu3sch.de>
+ * Copyright 2007-2008 Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 45e5babd396..3e844874631 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -2,7 +2,7 @@
* Sonics Silicon Backplane
* Bus scanning
*
- * Copyright (C) 2005-2007 Michael Buesch <mb@bu3sch.de>
+ * Copyright (C) 2005-2007 Michael Buesch <m@bues.ch>
* Copyright (C) 2005 Martin Langer <martin-langer@gmx.de>
* Copyright (C) 2005 Stefano Brivio <st3@riseup.net>
* Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org>
@@ -310,8 +310,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
} else {
if (bus->bustype == SSB_BUSTYPE_PCI) {
bus->chip_id = pcidev_to_chipid(bus->host_pci);
- pci_read_config_byte(bus->host_pci, PCI_REVISION_ID,
- &bus->chip_rev);
+ bus->chip_rev = bus->host_pci->revision;
bus->chip_package = 0;
} else {
bus->chip_id = 0x4710;
diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c
index 65a6080cb02..63fd709038c 100644
--- a/drivers/ssb/sdio.c
+++ b/drivers/ssb/sdio.c
@@ -6,7 +6,7 @@
*
* Based on drivers/ssb/pcmcia.c
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2007-2008 Michael Buesch <mb@bu3sch.de>
+ * Copyright 2007-2008 Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 45ff0e3a382..80d366fcf8d 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -2,7 +2,7 @@
* Sonics Silicon Backplane
* Common SPROM support routines
*
- * Copyright (C) 2005-2008 Michael Buesch <mb@bu3sch.de>
+ * Copyright (C) 2005-2008 Michael Buesch <m@bues.ch>
* Copyright (C) 2005 Martin Langer <martin-langer@gmx.de>
* Copyright (C) 2005 Stefano Brivio <st3@riseup.net>
* Copyright (C) 2005 Danny van Dyk <kugelfang@gentoo.org>
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 196284dc2f3..06c9081d596 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,10 +24,6 @@ menuconfig STAGING
if STAGING
-source "drivers/staging/tty/Kconfig"
-
-source "drivers/staging/generic_serial/Kconfig"
-
source "drivers/staging/et131x/Kconfig"
source "drivers/staging/slicoss/Kconfig"
@@ -100,8 +96,6 @@ source "drivers/staging/sep/Kconfig"
source "drivers/staging/iio/Kconfig"
-source "drivers/staging/cs5535_gpio/Kconfig"
-
source "drivers/staging/zram/Kconfig"
source "drivers/staging/zcache/Kconfig"
@@ -120,8 +114,6 @@ source "drivers/staging/cxt1e1/Kconfig"
source "drivers/staging/xgifb/Kconfig"
-source "drivers/staging/msm/Kconfig"
-
source "drivers/staging/lirc/Kconfig"
source "drivers/staging/easycap/Kconfig"
@@ -132,8 +124,6 @@ source "drivers/staging/tidspbridge/Kconfig"
source "drivers/staging/quickstart/Kconfig"
-source "drivers/staging/westbridge/Kconfig"
-
source "drivers/staging/sbe-2t3e3/Kconfig"
source "drivers/staging/ath6kl/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index fa41b9c2378..f3c5e33bb26 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -3,8 +3,6 @@
# fix for build system bug...
obj-$(CONFIG_STAGING) += staging.o
-obj-y += tty/
-obj-y += generic_serial/
obj-$(CONFIG_ET131X) += et131x/
obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
@@ -41,7 +39,6 @@ obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
-obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio/
obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_XVMALLOC) += zram/
obj-$(CONFIG_ZCACHE) += zcache/
@@ -52,12 +49,10 @@ obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_CRYSTALHD) += crystalhd/
obj-$(CONFIG_CXT1E1) += cxt1e1/
obj-$(CONFIG_FB_XGI) += xgifb/
-obj-$(CONFIG_MSM_STAGING) += msm/
obj-$(CONFIG_EASYCAP) += easycap/
obj-$(CONFIG_SOLO6X10) += solo6x10/
obj-$(CONFIG_TIDSPBRIDGE) += tidspbridge/
obj-$(CONFIG_ACPI_QUICKSTART) += quickstart/
-obj-$(CONFIG_WESTBRIDGE_ASTORIA) += westbridge/astoria/
obj-$(CONFIG_SBE_2T3E3) += sbe-2t3e3/
obj-$(CONFIG_ATH6K_LEGACY) += ath6kl/
obj-$(CONFIG_USB_ENESTORAGE) += keucr/
diff --git a/drivers/staging/altera-stapl/Kconfig b/drivers/staging/altera-stapl/Kconfig
index 7f01d8e9399..b6537321ed7 100644
--- a/drivers/staging/altera-stapl/Kconfig
+++ b/drivers/staging/altera-stapl/Kconfig
@@ -1,5 +1,3 @@
-comment "Altera FPGA firmware download module"
-
config ALTERA_STAPL
tristate "Altera FPGA firmware download module"
depends on I2C
diff --git a/drivers/staging/altera-stapl/Makefile b/drivers/staging/altera-stapl/Makefile
index 055f61ee781..ddeede3c4b9 100644
--- a/drivers/staging/altera-stapl/Makefile
+++ b/drivers/staging/altera-stapl/Makefile
@@ -1,3 +1,3 @@
-altera-stapl-objs = altera-lpt.o altera-jtag.o altera-comp.o altera.o
+altera-stapl-y := altera-lpt.o altera-jtag.o altera-comp.o altera.o
obj-$(CONFIG_ALTERA_STAPL) += altera-stapl.o
diff --git a/drivers/staging/altera-stapl/altera.c b/drivers/staging/altera-stapl/altera.c
index 9cd5e76880c..8d73a864273 100644
--- a/drivers/staging/altera-stapl/altera.c
+++ b/drivers/staging/altera-stapl/altera.c
@@ -2430,16 +2430,23 @@ int altera_init(struct altera_config *config, const struct firmware *fw)
int index = 0;
s32 offset = 0L;
s32 error_address = 0L;
+ int retval = 0;
- key = kzalloc(33 * sizeof(char), GFP_KERNEL);
- if (!key)
- return -ENOMEM;
- value = kzalloc(257 * sizeof(char), GFP_KERNEL);
- if (!value)
- return -ENOMEM;
+ key = kzalloc(33, GFP_KERNEL);
+ if (!key) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ value = kzalloc(257, GFP_KERNEL);
+ if (!value) {
+ retval = -ENOMEM;
+ goto free_key;
+ }
astate = kzalloc(sizeof(struct altera_state), GFP_KERNEL);
- if (!astate)
- return -ENOMEM;
+ if (!astate) {
+ retval = -ENOMEM;
+ goto free_value;
+ }
astate->config = config;
if (!astate->config->jtag_io) {
@@ -2518,10 +2525,12 @@ int altera_init(struct altera_config *config, const struct firmware *fw)
} else if (exec_result)
printk(KERN_ERR "%s: error %d\n", __func__, exec_result);
- kfree(key);
- kfree(value);
kfree(astate);
-
- return 0;
+free_value:
+ kfree(value);
+free_key:
+ kfree(key);
+out:
+ return retval;
}
EXPORT_SYMBOL(altera_init);
diff --git a/drivers/staging/ath6kl/TODO b/drivers/staging/ath6kl/TODO
index 019df4b471e..7be4b46ebb5 100644
--- a/drivers/staging/ath6kl/TODO
+++ b/drivers/staging/ath6kl/TODO
@@ -1,7 +1,7 @@
TODO:
We are working hard on cleaning up the driver. There's sooooooooo much todo
-so instead of editign this file please use the wiki:
+so instead of editing this file please use the wiki:
http://wireless.kernel.org/en/users/Drivers/ath6kl
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
index c01c0cb0af4..b99a11a9dd6 100644
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c
@@ -812,7 +812,7 @@ int AthCreateCommandList(struct ps_cmd_packet **HciPacketList, u32 *numPackets)
for(count = 0; count < Patch_Count; count++) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing Patch Buffer %d \r\n",count));
- kfree(RamPatch[Patch_Count].Data);
+ kfree(RamPatch[count].Data);
}
for(count = 0; count < Tag_Count; count++) {
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
index 48dd9e36359..32ee39ad00d 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -954,9 +954,13 @@ ar6000_transfer_bin_file(struct ar6_softc *ar, AR6K_BIN_FILE file, u32 address,
const char *filename;
const struct firmware *fw_entry;
u32 fw_entry_size;
+ u8 **buf;
+ size_t *buf_len;
switch (file) {
case AR6K_OTP_FILE:
+ buf = &ar->fw_otp;
+ buf_len = &ar->fw_otp_len;
if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
filename = AR6003_REV1_OTP_FILE;
} else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
@@ -970,6 +974,8 @@ ar6000_transfer_bin_file(struct ar6_softc *ar, AR6K_BIN_FILE file, u32 address,
break;
case AR6K_FIRMWARE_FILE:
+ buf = &ar->fw;
+ buf_len = &ar->fw_len;
if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
filename = AR6003_REV1_FIRMWARE_FILE;
} else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
@@ -1028,6 +1034,8 @@ ar6000_transfer_bin_file(struct ar6_softc *ar, AR6K_BIN_FILE file, u32 address,
break;
case AR6K_PATCH_FILE:
+ buf = &ar->fw_patch;
+ buf_len = &ar->fw_patch_len;
if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
filename = AR6003_REV1_PATCH_FILE;
} else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
@@ -1041,6 +1049,8 @@ ar6000_transfer_bin_file(struct ar6_softc *ar, AR6K_BIN_FILE file, u32 address,
break;
case AR6K_BOARD_DATA_FILE:
+ buf = &ar->fw_data;
+ buf_len = &ar->fw_data_len;
if (ar->arVersion.target_ver == AR6003_REV1_VERSION) {
filename = AR6003_REV1_BOARD_DATA_FILE;
} else if (ar->arVersion.target_ver == AR6003_REV2_VERSION) {
@@ -1057,23 +1067,29 @@ ar6000_transfer_bin_file(struct ar6_softc *ar, AR6K_BIN_FILE file, u32 address,
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Unknown file type: %d\n", file));
return A_ERROR;
}
- if ((A_REQUEST_FIRMWARE(&fw_entry, filename, ((struct device *)ar->osDevInfo.pOSDevice))) != 0)
- {
- AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to get %s\n", filename));
- return A_ENOENT;
+
+ if (*buf == NULL) {
+ if ((A_REQUEST_FIRMWARE(&fw_entry, filename, ((struct device *)ar->osDevInfo.pOSDevice))) != 0) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Failed to get %s\n", filename));
+ return A_ENOENT;
+ }
+
+ *buf = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ *buf_len = fw_entry->size;
+ A_RELEASE_FIRMWARE(fw_entry);
}
#ifdef SOFTMAC_FILE_USED
- if (file==AR6K_BOARD_DATA_FILE && fw_entry->data) {
- ar6000_softmac_update(ar, (u8 *)fw_entry->data, fw_entry->size);
+ if (file==AR6K_BOARD_DATA_FILE && *buf_len) {
+ ar6000_softmac_update(ar, *buf, *buf_len);
}
#endif
- fw_entry_size = fw_entry->size;
+ fw_entry_size = *buf_len;
/* Load extended board data for AR6003 */
- if ((file==AR6K_BOARD_DATA_FILE) && (fw_entry->data)) {
+ if ((file==AR6K_BOARD_DATA_FILE) && *buf) {
u32 board_ext_address;
u32 board_ext_data_size;
u32 board_data_size;
@@ -1089,14 +1105,13 @@ ar6000_transfer_bin_file(struct ar6_softc *ar, AR6K_BIN_FILE file, u32 address,
AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("Board extended Data download address: 0x%x\n", board_ext_address));
/* check whether the target has allocated memory for extended board data and file contains extended board data */
- if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) {
+ if ((board_ext_address) && (*buf_len == (board_data_size + board_ext_data_size))) {
u32 param;
- status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (u8 *)(fw_entry->data + board_data_size), board_ext_data_size);
+ status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (u8 *)(*buf + board_data_size), board_ext_data_size);
if (status) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
- A_RELEASE_FIRMWARE(fw_entry);
return A_ERROR;
}
@@ -1110,17 +1125,16 @@ ar6000_transfer_bin_file(struct ar6_softc *ar, AR6K_BIN_FILE file, u32 address,
}
if (compressed) {
- status = BMIFastDownload(ar->arHifDevice, address, (u8 *)fw_entry->data, fw_entry_size);
+ status = BMIFastDownload(ar->arHifDevice, address, *buf, fw_entry_size);
} else {
- status = BMIWriteMemory(ar->arHifDevice, address, (u8 *)fw_entry->data, fw_entry_size);
+ status = BMIWriteMemory(ar->arHifDevice, address, *buf, fw_entry_size);
}
if (status) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
- A_RELEASE_FIRMWARE(fw_entry);
return A_ERROR;
}
- A_RELEASE_FIRMWARE(fw_entry);
+
return 0;
}
@@ -2088,6 +2102,11 @@ ar6000_destroy(struct net_device *dev, unsigned int unregister)
ar6000_remove_ap_interface();
#endif /*CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT */
+ kfree(ar->fw_otp);
+ kfree(ar->fw);
+ kfree(ar->fw_patch);
+ kfree(ar->fw_data);
+
AR_DEBUG_PRINTF(ATH_DEBUG_INFO,("-ar6000_destroy \n"));
}
@@ -4114,6 +4133,13 @@ ar6000_ready_event(void *devt, u8 *datap, u8 phyCap, u32 sw_ver, u32 abi_ver)
ar->arVersion.wlan_ver = sw_ver;
ar->arVersion.abi_ver = abi_ver;
+ snprintf(ar->wdev->wiphy->fw_version, sizeof(ar->wdev->wiphy->fw_version),
+ "%u:%u:%u:%u",
+ (ar->arVersion.wlan_ver & 0xf0000000) >> 28,
+ (ar->arVersion.wlan_ver & 0x0f000000) >> 24,
+ (ar->arVersion.wlan_ver & 0x00ff0000) >> 16,
+ (ar->arVersion.wlan_ver & 0x0000ffff));
+
/* Indicate to the waiting thread that the ready event was received */
ar->arWmiReady = true;
wake_up(&arEvent);
@@ -6179,6 +6205,7 @@ int ar6000_create_ap_interface(struct ar6_softc *ar, char *ap_ifname)
ether_setup(dev);
init_netdev(dev, ap_ifname);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
if (register_netdev(dev)) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("ar6000_create_ap_interface: register_netdev failed\n"));
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index d3a774dbb7e..5fdda4aa2fe 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -24,6 +24,7 @@
#include <linux/wireless.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
+#include <net/netlink.h>
#include "ar6000_drv.h"
@@ -867,26 +868,31 @@ ar6k_cfg80211_scanComplete_event(struct ar6_softc *ar, int status)
AR_DEBUG_PRINTF(ATH_DEBUG_INFO, ("%s: status %d\n", __func__, status));
- if(ar->scan_request)
- {
- /* Translate data to cfg80211 mgmt format */
- if (ar->arWmi)
- wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
+ if (!ar->scan_request)
+ return;
+
+ if ((status == A_ECANCELED) || (status == A_EBUSY)) {
+ cfg80211_scan_done(ar->scan_request, true);
+ goto out;
+ }
- cfg80211_scan_done(ar->scan_request,
- ((status & A_ECANCELED) || (status & A_EBUSY)) ? true : false);
+ /* Translate data to cfg80211 mgmt format */
+ wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
- if(ar->scan_request->n_ssids &&
- ar->scan_request->ssids[0].ssid_len) {
+ cfg80211_scan_done(ar->scan_request, false);
+
+ if(ar->scan_request->n_ssids &&
+ ar->scan_request->ssids[0].ssid_len) {
u8 i;
for (i = 0; i < ar->scan_request->n_ssids; i++) {
- wmi_probedSsid_cmd(ar->arWmi, i+1, DISABLE_SSID_FLAG,
- 0, NULL);
+ wmi_probedSsid_cmd(ar->arWmi, i+1, DISABLE_SSID_FLAG,
+ 0, NULL);
}
- }
- ar->scan_request = NULL;
}
+
+out:
+ ar->scan_request = NULL;
}
static int
@@ -1453,6 +1459,159 @@ ar6k_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
return 0;
}
+#ifdef CONFIG_NL80211_TESTMODE
+enum ar6k_testmode_attr {
+ __AR6K_TM_ATTR_INVALID = 0,
+ AR6K_TM_ATTR_CMD = 1,
+ AR6K_TM_ATTR_DATA = 2,
+
+ /* keep last */
+ __AR6K_TM_ATTR_AFTER_LAST,
+ AR6K_TM_ATTR_MAX = __AR6K_TM_ATTR_AFTER_LAST - 1
+};
+
+enum ar6k_testmode_cmd {
+ AR6K_TM_CMD_TCMD = 0,
+ AR6K_TM_CMD_RX_REPORT = 1,
+};
+
+#define AR6K_TM_DATA_MAX_LEN 5000
+
+static const struct nla_policy ar6k_testmode_policy[AR6K_TM_ATTR_MAX + 1] = {
+ [AR6K_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [AR6K_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = AR6K_TM_DATA_MAX_LEN },
+};
+
+void ar6000_testmode_rx_report_event(struct ar6_softc *ar, void *buf,
+ int buf_len)
+{
+ if (down_interruptible(&ar->arSem))
+ return;
+
+ kfree(ar->tcmd_rx_report);
+
+ ar->tcmd_rx_report = kmemdup(buf, buf_len, GFP_KERNEL);
+ ar->tcmd_rx_report_len = buf_len;
+
+ up(&ar->arSem);
+
+ wake_up(&arEvent);
+}
+
+static int ar6000_testmode_rx_report(struct ar6_softc *ar, void *buf,
+ int buf_len, struct sk_buff *skb)
+{
+ int ret = 0;
+ long left;
+
+ if (down_interruptible(&ar->arSem))
+ return -ERESTARTSYS;
+
+ if (ar->arWmiReady == false) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (ar->bIsDestroyProgress) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ WARN_ON(ar->tcmd_rx_report != NULL);
+ WARN_ON(ar->tcmd_rx_report_len > 0);
+
+ if (wmi_test_cmd(ar->arWmi, buf, buf_len) < 0) {
+ up(&ar->arSem);
+ return -EIO;
+ }
+
+ left = wait_event_interruptible_timeout(arEvent,
+ ar->tcmd_rx_report != NULL,
+ wmitimeout * HZ);
+
+ if (left == 0) {
+ ret = -ETIMEDOUT;
+ goto out;
+ } else if (left < 0) {
+ ret = left;
+ goto out;
+ }
+
+ if (ar->tcmd_rx_report == NULL || ar->tcmd_rx_report_len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ NLA_PUT(skb, AR6K_TM_ATTR_DATA, ar->tcmd_rx_report_len,
+ ar->tcmd_rx_report);
+
+ kfree(ar->tcmd_rx_report);
+ ar->tcmd_rx_report = NULL;
+
+out:
+ up(&ar->arSem);
+
+ return ret;
+
+nla_put_failure:
+ ret = -ENOBUFS;
+ goto out;
+}
+
+static int ar6k_testmode_cmd(struct wiphy *wiphy, void *data, int len)
+{
+ struct ar6_softc *ar = wiphy_priv(wiphy);
+ struct nlattr *tb[AR6K_TM_ATTR_MAX + 1];
+ int err, buf_len, reply_len;
+ struct sk_buff *skb;
+ void *buf;
+
+ err = nla_parse(tb, AR6K_TM_ATTR_MAX, data, len,
+ ar6k_testmode_policy);
+ if (err)
+ return err;
+
+ if (!tb[AR6K_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[AR6K_TM_ATTR_CMD])) {
+ case AR6K_TM_CMD_TCMD:
+ if (!tb[AR6K_TM_ATTR_DATA])
+ return -EINVAL;
+
+ buf = nla_data(tb[AR6K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[AR6K_TM_ATTR_DATA]);
+
+ wmi_test_cmd(ar->arWmi, buf, buf_len);
+
+ return 0;
+
+ break;
+ case AR6K_TM_CMD_RX_REPORT:
+ if (!tb[AR6K_TM_ATTR_DATA])
+ return -EINVAL;
+
+ buf = nla_data(tb[AR6K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[AR6K_TM_ATTR_DATA]);
+
+ reply_len = nla_total_size(AR6K_TM_DATA_MAX_LEN);
+ skb = cfg80211_testmode_alloc_reply_skb(wiphy, reply_len);
+ if (!skb)
+ return -ENOMEM;
+
+ err = ar6000_testmode_rx_report(ar, buf, buf_len, skb);
+ if (err < 0) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ return cfg80211_testmode_reply(skb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+#endif
static const
u32 cipher_suites[] = {
@@ -1607,6 +1766,28 @@ static int ar6k_get_station(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
+static int ar6k_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ar6_softc *ar = ar6k_priv(netdev);
+ return wmi_setPmkid_cmd(ar->arWmi, pmksa->bssid, pmksa->pmkid, true);
+}
+
+static int ar6k_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_pmksa *pmksa)
+{
+ struct ar6_softc *ar = ar6k_priv(netdev);
+ return wmi_setPmkid_cmd(ar->arWmi, pmksa->bssid, pmksa->pmkid, false);
+}
+
+static int ar6k_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
+{
+ struct ar6_softc *ar = ar6k_priv(netdev);
+ if (ar->arConnected)
+ return wmi_setPmkid_cmd(ar->arWmi, ar->arBssid, NULL, false);
+ return 0;
+}
+
static struct
cfg80211_ops ar6k_cfg80211_ops = {
.change_virtual_intf = ar6k_cfg80211_change_iface,
@@ -1628,6 +1809,10 @@ cfg80211_ops ar6k_cfg80211_ops = {
.join_ibss = ar6k_cfg80211_join_ibss,
.leave_ibss = ar6k_cfg80211_leave_ibss,
.get_station = ar6k_get_station,
+ .set_pmksa = ar6k_set_pmksa,
+ .del_pmksa = ar6k_del_pmksa,
+ .flush_pmksa = ar6k_flush_pmksa,
+ CFG80211_TESTMODE_CMD(ar6k_testmode_cmd)
};
struct wireless_dev *
diff --git a/drivers/staging/ath6kl/os/linux/include/ar6000_drv.h b/drivers/staging/ath6kl/os/linux/include/ar6000_drv.h
index 22453b0873e..80cef77738f 100644
--- a/drivers/staging/ath6kl/os/linux/include/ar6000_drv.h
+++ b/drivers/staging/ath6kl/os/linux/include/ar6000_drv.h
@@ -52,7 +52,6 @@
#include "aggr_recv_api.h"
#include <host_version.h>
#include <linux/rtnetlink.h>
-#include <linux/init.h>
#include <linux/moduleparam.h>
#include "ar6000_api.h"
#ifdef CONFIG_HOST_TCMD_SUPPORT
@@ -546,15 +545,9 @@ struct ar6_softc {
s8 arMaxRetries;
u8 arPhyCapability;
#ifdef CONFIG_HOST_TCMD_SUPPORT
- u8 tcmdRxReport;
- u32 tcmdRxTotalPkt;
- s32 tcmdRxRssi;
- u32 tcmdPm;
u32 arTargetMode;
- u32 tcmdRxcrcErrPkt;
- u32 tcmdRxsecErrPkt;
- u16 tcmdRateCnt[TCMD_MAX_RATES];
- u16 tcmdRateCntShortGuard[TCMD_MAX_RATES];
+ void *tcmd_rx_report;
+ int tcmd_rx_report_len;
#endif
AR6000_WLAN_STATE arWlanState;
struct ar_node_mapping arNodeMap[MAX_NODE_NUM];
@@ -651,6 +644,15 @@ struct ar6_softc {
void *arApDev;
#endif
u8 arAutoAuthStage;
+
+ u8 *fw_otp;
+ size_t fw_otp_len;
+ u8 *fw;
+ size_t fw_len;
+ u8 *fw_patch;
+ size_t fw_patch_len;
+ u8 *fw_data;
+ size_t fw_data_len;
};
#ifdef CONFIG_AP_VIRTUAL_ADAPTER_SUPPORT
diff --git a/drivers/staging/ath6kl/os/linux/include/cfg80211.h b/drivers/staging/ath6kl/os/linux/include/cfg80211.h
index 1a6ae97c6b0..d5253207b19 100644
--- a/drivers/staging/ath6kl/os/linux/include/cfg80211.h
+++ b/drivers/staging/ath6kl/os/linux/include/cfg80211.h
@@ -41,6 +41,17 @@ void ar6k_cfg80211_disconnect_event(struct ar6_softc *ar, u8 reason,
void ar6k_cfg80211_tkip_micerr_event(struct ar6_softc *ar, u8 keyid, bool ismcast);
+#ifdef CONFIG_NL80211_TESTMODE
+void ar6000_testmode_rx_report_event(struct ar6_softc *ar, void *buf,
+ int buf_len);
+#else
+static inline void ar6000_testmode_rx_report_event(struct ar6_softc *ar,
+ void *buf, int buf_len)
+{
+}
+#endif
+
+
#endif /* _AR6K_CFG80211_H_ */
diff --git a/drivers/staging/ath6kl/os/linux/include/config_linux.h b/drivers/staging/ath6kl/os/linux/include/config_linux.h
index d4030e26b20..dbbe1a00b92 100644
--- a/drivers/staging/ath6kl/os/linux/include/config_linux.h
+++ b/drivers/staging/ath6kl/os/linux/include/config_linux.h
@@ -28,8 +28,6 @@
extern "C" {
#endif
-#include <linux/version.h>
-
/*
* Host side Test Command support
*/
diff --git a/drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h b/drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h
index 769a4801431..e6e96de3fc6 100644
--- a/drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h
+++ b/drivers/staging/ath6kl/os/linux/include/ieee80211_ioctl.h
@@ -24,8 +24,6 @@
#ifndef _IEEE80211_IOCTL_H_
#define _IEEE80211_IOCTL_H_
-#include <linux/version.h>
-
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/drivers/staging/ath6kl/os/linux/include/osapi_linux.h b/drivers/staging/ath6kl/os/linux/include/osapi_linux.h
index 07078b49583..41f43730772 100644
--- a/drivers/staging/ath6kl/os/linux/include/osapi_linux.h
+++ b/drivers/staging/ath6kl/os/linux/include/osapi_linux.h
@@ -29,7 +29,6 @@
#ifdef __KERNEL__
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/staging/ath6kl/wmi/wmi.c b/drivers/staging/ath6kl/wmi/wmi.c
index 4a17f99ea14..c7b5e5cf9df 100644
--- a/drivers/staging/ath6kl/wmi/wmi.c
+++ b/drivers/staging/ath6kl/wmi/wmi.c
@@ -41,6 +41,7 @@
#include "a_debug.h"
#include "dbglog_api.h"
#include "roaming.h"
+#include "cfg80211.h"
#define ATH_DEBUG_WMI ATH_DEBUG_MAKE_MODULE_MASK(0)
@@ -4465,10 +4466,9 @@ wmi_verify_tspec_params(WMI_CREATE_PSTREAM_CMD *pCmd, int tspecCompliance)
static int
wmi_tcmd_test_report_rx(struct wmi_t *wmip, u8 *datap, int len)
{
+ ar6000_testmode_rx_report_event(wmip->wmi_devt, datap, len);
- A_DPRINTF(DBG_WMI, (DBGFMT "Enter\n", DBGARG));
-
- return 0;
+ return 0;
}
#endif /* CONFIG_HOST_TCMD_SUPPORT*/
diff --git a/drivers/staging/bcm/Debug.h b/drivers/staging/bcm/Debug.h
index 3138729cf34..420382d1cac 100644
--- a/drivers/staging/bcm/Debug.h
+++ b/drivers/staging/bcm/Debug.h
@@ -131,7 +131,7 @@ DriverEntry.c, bcmfwup.c, ChipDetectTask.c, HaltnReset.c, InterfaceDDR.c */
// TODO - put PHS_SEND in Tx PHS_RECEIVE in Rx path ?
#define PHS_SEND (OTHERS<<16)
-#define PHS_RECIEVE (OTHERS<<17)
+#define PHS_RECEIVE (OTHERS<<17)
#define PHS_MODULE (OTHERS<<18)
#define INTF_INIT (OTHERS<<19)
diff --git a/drivers/staging/bcm/InterfaceRx.c b/drivers/staging/bcm/InterfaceRx.c
index 806ef5d1852..d495828a731 100644
--- a/drivers/staging/bcm/InterfaceRx.c
+++ b/drivers/staging/bcm/InterfaceRx.c
@@ -157,7 +157,7 @@ static void read_bulk_callback(struct urb *urb)
{
/* Moving ahead by ETH_HLEN to the data ptr as received from FW */
skb_pull(skb, ETH_HLEN);
- PHSRecieve(Adapter, pLeader->Vcid, skb, &skb->len,
+ PHSReceive(Adapter, pLeader->Vcid, skb, &skb->len,
NULL,bHeaderSupressionEnabled);
if(!Adapter->PackInfo[QueueIndex].bEthCSSupport)
@@ -229,7 +229,7 @@ static int ReceiveRcb(PS_INTERFACE_ADAPTER psIntfAdapter, PUSB_RCB pRcb)
/*
Function: InterfaceRx
-Description: This is the hardware specific Function for Recieveing
+Description: This is the hardware specific Function for Receiving
data packet/control packets from the device.
Input parameters: IN PMINI_ADAPTER Adapter - Miniport Adapter Context
diff --git a/drivers/staging/bcm/Macros.h b/drivers/staging/bcm/Macros.h
index feb351578c8..916bebb7ed5 100644
--- a/drivers/staging/bcm/Macros.h
+++ b/drivers/staging/bcm/Macros.h
@@ -176,8 +176,8 @@ enum enLinkStatus {
PHY_SYNC_ACHIVED = 2,
LINKUP_IN_PROGRESS = 3,
LINKUP_DONE = 4,
- DREG_RECIEVED = 5,
- LINK_STATUS_RESET_RECIEVED = 6,
+ DREG_RECEIVED = 5,
+ LINK_STATUS_RESET_RECEIVED = 6,
PERIODIC_WAKE_UP_NOTIFICATION_FRM_FW = 7,
LINK_SHUTDOWN_REQ_FROM_FIRMWARE = 8,
COMPLETE_WAKE_UP_NOTIFICATION_FRM_FW =9
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index c5003b62234..c5b3a3666bc 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -1447,16 +1447,10 @@ static void convertEndian(B_UINT8 rwFlag, PUINT puiBuffer, UINT uiByteCount)
int rdm(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
{
- INT uiRetVal =0;
-
- uiRetVal = Adapter->interface_rdm(Adapter->pvInterfaceAdapter,
+ return Adapter->interface_rdm(Adapter->pvInterfaceAdapter,
uiAddress, pucBuff, sSize);
-
- if(uiRetVal < 0)
- return uiRetVal;
-
- return uiRetVal;
}
+
int wrm(PMINI_ADAPTER Adapter, UINT uiAddress, PCHAR pucBuff, size_t sSize)
{
int iRetVal;
diff --git a/drivers/staging/bcm/PHSModule.c b/drivers/staging/bcm/PHSModule.c
index d1ca1912a74..4aa2b71a40e 100644
--- a/drivers/staging/bcm/PHSModule.c
+++ b/drivers/staging/bcm/PHSModule.c
@@ -209,7 +209,7 @@ int PHSTransmit(PMINI_ADAPTER Adapter,
return STATUS_SUCCESS;
}
-int PHSRecieve(PMINI_ADAPTER Adapter,
+int PHSReceive(PMINI_ADAPTER Adapter,
USHORT usVcid,
struct sk_buff *packet,
UINT *punPacketLen,
@@ -223,7 +223,7 @@ int PHSRecieve(PMINI_ADAPTER Adapter,
UINT TotalBytesAdded = 0;
if(!bHeaderSuppressionEnabled)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECIEVE,DBG_LVL_ALL,"\nPhs Disabled for incoming packet");
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"\nPhs Disabled for incoming packet");
return ulPhsStatus;
}
@@ -238,7 +238,7 @@ int PHSRecieve(PMINI_ADAPTER Adapter,
&nTotalsupressedPktHdrBytes,
&nStandardPktHdrLen);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECIEVE,DBG_LVL_ALL,"\nSupressed PktHdrLen : 0x%x Restored PktHdrLen : 0x%x",
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"\nSupressed PktHdrLen : 0x%x Restored PktHdrLen : 0x%x",
nTotalsupressedPktHdrBytes,nStandardPktHdrLen);
if(ulPhsStatus != STATUS_PHS_COMPRESSED)
@@ -786,14 +786,14 @@ ULONG PhsDeCompress(IN void* pvContext,
if(pDeviceExtension == NULL)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECIEVE,DBG_LVL_ALL,"Invalid Device Extension\n");
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"Invalid Device Extension\n");
return ERR_PHS_INVALID_DEVICE_EXETENSION;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECIEVE,DBG_LVL_ALL,"Restoring header \n");
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"Restoring header\n");
phsi = *((unsigned char *)(pvInputBuffer));
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECIEVE,DBG_LVL_ALL,"PHSI To Be Used For restore : %x \n",phsi);
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"PHSI To Be Used For restore : %x\n",phsi);
if(phsi == UNCOMPRESSED_PACKET )
{
return STATUS_PHS_NOCOMPRESSION;
@@ -804,7 +804,7 @@ ULONG PhsDeCompress(IN void* pvContext,
uiVcid,&pstServiceFlowEntry);
if(nSFIndex == PHS_INVALID_TABLE_INDEX)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECIEVE,DBG_LVL_ALL,"SFID Match Failed During Lookup\n");
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"SFID Match Failed During Lookup\n");
return ERR_SF_MATCH_FAIL;
}
@@ -1417,7 +1417,7 @@ int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,
int in_buf_len = *header_size-1;
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
in_buf++;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECIEVE,DBG_LVL_ALL,"====>\n");
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"====>\n");
*header_size = 0;
if((decomp_phs_rules == NULL ))
@@ -1425,7 +1425,7 @@ int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,
tmp_memb = decomp_phs_rules;
- //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECIEVE,DBG_LVL_ALL,"\nDECOMP:In phs_decompress PHSI 1 %d",phsi));
+ //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"\nDECOMP:In phs_decompress PHSI 1 %d",phsi));
//*header_size = tmp_memb->u8PHSFLength;
phss = tmp_memb->u8PHSS;
phsf = tmp_memb->u8PHSF;
@@ -1433,7 +1433,7 @@ int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,
if(phss > MAX_PHS_LENGTHS)
phss = MAX_PHS_LENGTHS;
- //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECIEVE,DBG_LVL_ALL,"\nDECOMP:In phs_decompress PHSI %d phss %d index %d",phsi,phss,index));
+ //BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"\nDECOMP:In phs_decompress PHSI %d phss %d index %d",phsi,phss,index));
while((phss > 0) && (size < in_buf_len))
{
bit = ((*phsm << i)& SUPPRESS);
@@ -1441,13 +1441,13 @@ int phs_decompress(unsigned char *in_buf,unsigned char *out_buf,
if(bit == SUPPRESS)
{
*out_buf = *phsf;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECIEVE,DBG_LVL_ALL,"\nDECOMP:In phss %d phsf %d ouput %d",
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"\nDECOMP:In phss %d phsf %d ouput %d",
phss,*phsf,*out_buf);
}
else
{
*out_buf = *in_buf;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECIEVE,DBG_LVL_ALL,"\nDECOMP:In phss %d input %d ouput %d",
+ BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"\nDECOMP:In phss %d input %d ouput %d",
phss,*in_buf,*out_buf);
in_buf++;
size++;
diff --git a/drivers/staging/bcm/PHSModule.h b/drivers/staging/bcm/PHSModule.h
index 0dd05a7c55d..c629585d0a8 100644
--- a/drivers/staging/bcm/PHSModule.h
+++ b/drivers/staging/bcm/PHSModule.h
@@ -9,7 +9,7 @@ int PHSTransmit(PMINI_ADAPTER Adapter,
PUINT PacketLen,
UCHAR bEthCSSupport);
-int PHSRecieve(PMINI_ADAPTER Adapter,
+int PHSReceive(PMINI_ADAPTER Adapter,
USHORT usVcid,
struct sk_buff *packet,
UINT *punPacketLen,
diff --git a/drivers/staging/bcm/headers.h b/drivers/staging/bcm/headers.h
index 1148e5e22eb..947d0632568 100644
--- a/drivers/staging/bcm/headers.h
+++ b/drivers/staging/bcm/headers.h
@@ -20,25 +20,22 @@
#include <linux/file.h>
#include <linux/string.h>
#include <linux/etherdevice.h>
-#include <net/ip.h>
#include <linux/wait.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
-
-#include <linux/version.h>
#include <linux/stddef.h>
-#include <linux/kernel.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/unistd.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
-#include <asm/uaccess.h>
#include <linux/kthread.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/usb.h>
+#include <asm/uaccess.h>
+#include <net/ip.h>
#include "Typedefs.h"
#include "Version.h"
@@ -61,7 +58,6 @@
#include "Queue.h"
#include "vendorspecificextn.h"
-
#include "InterfaceMacros.h"
#include "InterfaceAdapter.h"
#include "InterfaceIsr.h"
diff --git a/drivers/staging/bcm/sort.c b/drivers/staging/bcm/sort.c
index fc5d07aec3d..63c966a0254 100644
--- a/drivers/staging/bcm/sort.c
+++ b/drivers/staging/bcm/sort.c
@@ -1,4 +1,5 @@
#include "headers.h"
+#include <linux/sort.h>
/*
* File Name: sort.c
@@ -10,54 +11,42 @@
* Copyright (c) 2007 Beceem Communications Pvt. Ltd
*/
+static int compare_packet_info(void const *a, void const *b)
+{
+ PacketInfo const *pa = a;
+ PacketInfo const *pb = b;
+
+ if (!pa->bValid || !pb->bValid)
+ return 0;
+
+ return pa->u8TrafficPriority - pb->u8TrafficPriority;
+}
+
VOID SortPackInfo(PMINI_ADAPTER Adapter)
{
- UINT nIndex1;
- UINT nIndex2;
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=======");
-
- for(nIndex1 = 0; nIndex1 < NO_OF_QUEUES -2 ; nIndex1++)
- {
- for(nIndex2 = nIndex1 + 1 ; nIndex2 < NO_OF_QUEUES -1 ; nIndex2++)
- {
- if(Adapter->PackInfo[nIndex1].bValid && Adapter->PackInfo[nIndex2].bValid)
- {
- if(Adapter->PackInfo[nIndex2].u8TrafficPriority <
- Adapter->PackInfo[nIndex1].u8TrafficPriority)
- {
- PacketInfo stTemppackInfo = Adapter->PackInfo[nIndex2];
- Adapter->PackInfo[nIndex2] = Adapter->PackInfo[nIndex1];
- Adapter->PackInfo[nIndex1] = stTemppackInfo;
-
- }
- }
- }
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
+ DBG_LVL_ALL, "<=======");
+
+ sort(Adapter->PackInfo, NO_OF_QUEUES, sizeof(PacketInfo),
+ compare_packet_info, NULL);
+}
+
+static int compare_classifiers(void const *a, void const *b)
+{
+ S_CLASSIFIER_RULE const *pa = a;
+ S_CLASSIFIER_RULE const *pb = b;
+
+ if (!pa->bUsed || !pb->bUsed)
+ return 0;
+
+ return pa->u8ClassifierRulePriority - pb->u8ClassifierRulePriority;
}
VOID SortClassifiers(PMINI_ADAPTER Adapter)
{
- UINT nIndex1;
- UINT nIndex2;
-
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=======");
-
- for(nIndex1 = 0; nIndex1 < MAX_CLASSIFIERS -1 ; nIndex1++)
- {
- for(nIndex2 = nIndex1 + 1 ; nIndex2 < MAX_CLASSIFIERS ; nIndex2++)
- {
- if(Adapter->astClassifierTable[nIndex1].bUsed && Adapter->astClassifierTable[nIndex2].bUsed)
- {
- if(Adapter->astClassifierTable[nIndex2].u8ClassifierRulePriority <
- Adapter->astClassifierTable[nIndex1].u8ClassifierRulePriority)
- {
- S_CLASSIFIER_RULE stTempClassifierRule = Adapter->astClassifierTable[nIndex2];
- Adapter->astClassifierTable[nIndex2] = Adapter->astClassifierTable[nIndex1];
- Adapter->astClassifierTable[nIndex1] = stTempClassifierRule;
-
- }
- }
- }
- }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG,
+ DBG_LVL_ALL, "<=======");
+
+ sort(Adapter->astClassifierTable, MAX_CLASSIFIERS,
+ sizeof(S_CLASSIFIER_RULE), compare_classifiers, NULL);
}
diff --git a/drivers/staging/brcm80211/Kconfig b/drivers/staging/brcm80211/Kconfig
index f4cf9b23481..379cf16e89f 100644
--- a/drivers/staging/brcm80211/Kconfig
+++ b/drivers/staging/brcm80211/Kconfig
@@ -7,6 +7,7 @@ config BRCMSMAC
default n
depends on PCI
depends on WLAN && MAC80211
+ depends on X86 || MIPS
select BRCMUTIL
select FW_LOADER
select CRC_CCITT
@@ -20,6 +21,7 @@ config BRCMFMAC
default n
depends on MMC
depends on WLAN && CFG80211
+ depends on X86 || MIPS
select BRCMUTIL
select FW_LOADER
select WIRELESS_EXT
diff --git a/drivers/staging/brcm80211/Makefile b/drivers/staging/brcm80211/Makefile
index e7b3f27847c..8b01f5e7ba2 100644
--- a/drivers/staging/brcm80211/Makefile
+++ b/drivers/staging/brcm80211/Makefile
@@ -17,8 +17,8 @@
# common flags
subdir-ccflags-y := -DBCMDMA32
-subdir-ccflags-$(CONFIG_BRCMDBG) += -DBCMDBG -DBCMDBG_ASSERT
+subdir-ccflags-$(CONFIG_BRCMDBG) += -DBCMDBG
-obj-$(CONFIG_BRCMUTIL) += util/
+obj-$(CONFIG_BRCMUTIL) += brcmutil/
obj-$(CONFIG_BRCMFMAC) += brcmfmac/
obj-$(CONFIG_BRCMSMAC) += brcmsmac/
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README
index 8ad558675bd..bb86b1b3e58 100644
--- a/drivers/staging/brcm80211/README
+++ b/drivers/staging/brcm80211/README
@@ -1,64 +1 @@
-Broadcom brcmsmac (mac80211-based softmac PCIe) and brcmfmac (SDIO) drivers.
-
-Completely open source host drivers, no binary object files.
-
-Support for the following chips:
-===============================
-
- brcmsmac (PCIe)
- Name Device ID
- BCM4313 0x4727
- BCM43224 0x4353
- BCM43225 0x4357
-
- brcmfmac (SDIO)
- Name
- BCM4329
-
-Both brcmsmac and brcmfmac drivers require firmware files that need to be
-separately downloaded.
-
-Firmware
-======================
-Firmware is available from the Linux firmware repository at:
-
- git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git
- http://git.kernel.org/?p=linux/kernel/git/dwmw2/linux-firmware.git
- https://git.kernel.org/?p=linux/kernel/git/dwmw2/linux-firmware.git
-
-
-===============================================================
-Broadcom brcmsmac driver
-===============================================================
-- Support for both 32 and 64 bit Linux kernels
-
-
-Firmware installation
-======================
-Copy brcm/bcm43xx-0.fw and brcm/bcm43xx_hdr-0.fw to
-/lib/firmware/brcm (or wherever firmware is normally installed
-on your system).
-
-
-===============================================================
-Broadcom brcmfmac driver
-===============================================================
-- Support for 32 bit Linux kernel, 64 bit untested
-
-
-Firmware installation
-======================
-Copy brcm/bcm4329-fullmac-4.bin and brcm/bcm4329-fullmac-4.txt
-to /lib/firmware/brcm (or wherever firmware is normally installed on your
-system).
-
-
-Contact Info:
-=============
-Brett Rudley brudley@broadcom.com
-Henry Ptasinski henryp@broadcom.com
-Dowan Kim dowan@broadcom.com
-Roland Vossen rvossen@broadcom.com
-Arend van Spriel arend@broadcom.com
-
-For more info, refer to: http://linuxwireless.org/en/users/Drivers/brcm80211
+refer to: http://linuxwireless.org/en/users/Drivers/brcm80211
diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO
index e9c1393a2b9..e2e2ef9bd7a 100644
--- a/drivers/staging/brcm80211/TODO
+++ b/drivers/staging/brcm80211/TODO
@@ -2,14 +2,12 @@ To Do List for Broadcom Mac80211 driver before getting in mainline
Bugs
====
-- Oops on AMPDU traffic, to be solved by new ucode (currently under test)
+- none known at this moment
-brcmfmac and brcmsmac
+brcmfmac
=====================
-- ASSERTS not allowed in mainline, replace by warning + error handling
-- Replace printk and WL_ERROR() with proper routines
+- ASSERTS deprecated in mainline, replace by warning + error handling
-brcmfmac
+brcm80211 info page
=====================
-- Replace driver's proprietary ssb interface with generic kernel ssb module
-- Build and test on 64 bit linux kernel
+http://linuxwireless.org/en/users/Drivers/brcm80211
diff --git a/drivers/staging/brcm80211/brcmfmac/Makefile b/drivers/staging/brcm80211/brcmfmac/Makefile
index c5ec562c364..da3c8057590 100644
--- a/drivers/staging/brcm80211/brcmfmac/Makefile
+++ b/drivers/staging/brcm80211/brcmfmac/Makefile
@@ -16,23 +16,11 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y := \
- -DARP_OFFLOAD_SUPPORT \
- -DBCMLXSDMMC \
- -DBCMPLATFORM_BUS \
- -DBCMSDIO \
- -DBDC \
- -DBRCM_FULLMAC \
- -DDHD_FIRSTREAD=64 \
- -DDHD_SCHED \
- -DDHD_SDALIGN=64 \
- -DEMBEDDED_PLATFORM \
- -DMAX_HDR_READ=64 \
- -DMMC_SDIO_ABORT \
- -DPKT_FILTER_SUPPORT \
- -DSHOW_EVENTS \
- -DTOE
+ -DBRCMF_FIRSTREAD=64 \
+ -DBRCMF_SDALIGN=64 \
+ -DMAX_HDR_READ=64
-ccflags-$(CONFIG_BRCMDBG) += -DDHD_DEBUG
+ccflags-$(CONFIG_BRCMDBG) += -DSHOW_EVENTS
ccflags-y += \
-Idrivers/staging/brcm80211/brcmfmac \
@@ -40,17 +28,12 @@ ccflags-y += \
DHDOFILES = \
wl_cfg80211.o \
- wl_iw.o \
dhd_cdc.o \
dhd_common.o \
- dhd_custom_gpio.o \
dhd_sdio.o \
dhd_linux.o \
- dhd_linux_sched.o \
bcmsdh.o \
- bcmsdh_linux.o \
- bcmsdh_sdmmc.o \
- bcmsdh_sdmmc_linux.o
+ bcmsdh_sdmmc.o
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += $(DHDOFILES)
diff --git a/drivers/staging/brcm80211/brcmfmac/README b/drivers/staging/brcm80211/brcmfmac/README
deleted file mode 100644
index 139597f9cb0..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/README
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
diff --git a/drivers/staging/brcm80211/brcmfmac/aiutils.c b/drivers/staging/brcm80211/brcmfmac/aiutils.c
deleted file mode 100644
index e64808648ce..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/aiutils.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../util/aiutils.c"
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmcdc.h b/drivers/staging/brcm80211/brcmfmac/bcmcdc.h
deleted file mode 100644
index ed4c4a517ec..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/bcmcdc.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <linux/if_ether.h>
-
-typedef struct cdc_ioctl {
- u32 cmd; /* ioctl command value */
- u32 len; /* lower 16: output buflen; upper 16:
- input buflen (excludes header) */
- u32 flags; /* flag defns given below */
- u32 status; /* status code returned from the device */
-} cdc_ioctl_t;
-
-/* Max valid buffer size that can be sent to the dongle */
-#define CDC_MAX_MSG_SIZE (ETH_FRAME_LEN+ETH_FCS_LEN)
-
-/* len field is divided into input and output buffer lengths */
-#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF /* maximum or expected
- response length, */
- /* excluding IOCTL header */
-#define CDCL_IOC_OUTLEN_SHIFT 0
-#define CDCL_IOC_INLEN_MASK 0xFFFF0000 /* input buffer length,
- excluding IOCTL header */
-#define CDCL_IOC_INLEN_SHIFT 16
-
-/* CDC flag definitions */
-#define CDCF_IOC_ERROR 0x01 /* 0=success, 1=ioctl cmd failed */
-#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */
-#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */
-#define CDCF_IOC_IF_SHIFT 12
-#define CDCF_IOC_ID_MASK 0xFFFF0000 /* used to uniquely id an ioctl
- req/resp pairing */
-#define CDCF_IOC_ID_SHIFT 16 /* # of bits of shift for ID Mask */
-
-#define CDC_IOC_IF_IDX(flags) \
- (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
-#define CDC_IOC_ID(flags) \
- (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
-
-#define CDC_GET_IF_IDX(hdr) \
- ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT))
-#define CDC_SET_IF_IDX(hdr, idx) \
- ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | \
- ((idx) << CDCF_IOC_IF_SHIFT)))
-
-/*
- * BDC header
- *
- * The BDC header is used on data packets to convey priority across USB.
- */
-
-#define BDC_HEADER_LEN 4
-
-#define BDC_PROTO_VER 1 /* Protocol version */
-
-#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
-#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
-
-#define BDC_FLAG__UNUSED 0x03 /* Unassigned */
-#define BDC_FLAG_SUM_GOOD 0x04 /* Dongle has verified good
- RX checksums */
-#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */
-
-#define BDC_PRIORITY_MASK 0x7
-
-#define BDC_FLAG2_FC_FLAG 0x10 /* flag to indicate if pkt contains */
- /* FLOW CONTROL info only */
-#define BDC_PRIORITY_FC_SHIFT 4 /* flow control info shift */
-
-#define BDC_FLAG2_IF_MASK 0x0f /* APSTA: interface on which the
- packet was received */
-#define BDC_FLAG2_IF_SHIFT 0
-
-#define BDC_GET_IF_IDX(hdr) \
- ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
-#define BDC_SET_IF_IDX(hdr, idx) \
- ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \
- ((idx) << BDC_FLAG2_IF_SHIFT)))
-
-struct bdc_header {
- u8 flags; /* Flags */
- u8 priority; /* 802.1d Priority 0:2 bits, 4:7 flow
- control info for usb */
- u8 flags2;
- u8 rssi;
-};
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmchip.h b/drivers/staging/brcm80211/brcmfmac/bcmchip.h
index c0d4c3bf6d4..d7d3afd5a10 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmchip.h
+++ b/drivers/staging/brcm80211/brcmfmac/bcmchip.h
@@ -17,12 +17,6 @@
#ifndef _bcmchip_h_
#define _bcmchip_h_
-/* Core reg address translation */
-#define CORE_CC_REG(base, field) (base + offsetof(chipcregs_t, field))
-#define CORE_BUS_REG(base, field) (base + offsetof(sdpcmd_regs_t, field))
-#define CORE_SB(base, field) \
- (base + SBCONFIGOFF + offsetof(sbconfig_t, field))
-
/* bcm4329 */
/* SDIO device core, ID 0x829 */
#define BCM4329_CORE_BUS_BASE 0x18011000
@@ -31,5 +25,8 @@
/* ARM Cortex M3 core, ID 0x82a */
#define BCM4329_CORE_ARM_BASE 0x18002000
#define BCM4329_RAMSIZE 0x48000
+/* firmware name */
+#define BCM4329_FW_NAME "brcm/bcm4329-fullmac-4.bin"
+#define BCM4329_NV_NAME "brcm/bcm4329-fullmac-4.txt"
#endif /* _bcmchip_h_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdbus.h b/drivers/staging/brcm80211/brcmfmac/bcmsdbus.h
deleted file mode 100644
index 53c32915acc..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdbus.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _sdio_api_h_
-#define _sdio_api_h_
-
-#define SDIOH_API_RC_SUCCESS (0x00)
-#define SDIOH_API_RC_FAIL (0x01)
-#define SDIOH_API_SUCCESS(status) (status == 0)
-
-#define SDIOH_READ 0 /* Read request */
-#define SDIOH_WRITE 1 /* Write request */
-
-#define SDIOH_DATA_FIX 0 /* Fixed addressing */
-#define SDIOH_DATA_INC 1 /* Incremental addressing */
-
-#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */
-#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */
-#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */
-
-#define SDIOH_DATA_PIO 0 /* PIO mode */
-#define SDIOH_DATA_DMA 1 /* DMA mode */
-
-typedef int SDIOH_API_RC;
-
-/* SDio Host structure */
-typedef struct sdioh_info sdioh_info_t;
-
-/* callback function, taking one arg */
-typedef void (*sdioh_cb_fn_t) (void *);
-
-/* attach, return handler on success, NULL if failed.
- * The handler shall be provided by all subsequent calls. No local cache
- * cfghdl points to the starting address of pci device mapped memory
- */
-extern sdioh_info_t *sdioh_attach(void *cfghdl, uint irq);
-extern SDIOH_API_RC sdioh_detach(sdioh_info_t *si);
-extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si,
- sdioh_cb_fn_t fn, void *argh);
-extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
-
-/* query whether SD interrupt is enabled or not */
-extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *si, bool *onoff);
-
-/* enable or disable SD interrupt */
-extern SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *si, bool enable_disable);
-
-#if defined(DHD_DEBUG)
-extern bool sdioh_interrupt_pending(sdioh_info_t *si);
-#endif
-
-extern int sdioh_claim_host_and_lock(sdioh_info_t *si);
-extern int sdioh_release_host_and_unlock(sdioh_info_t *si);
-
-/* read or write one byte using cmd52 */
-extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc,
- uint addr, u8 *byte);
-
-/* read or write 2/4 bytes using cmd53 */
-extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *si, uint cmd_type,
- uint rw, uint fnc, uint addr,
- u32 *word, uint nbyte);
-
-/* read or write any buffer using cmd53 */
-extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *si, uint pio_dma,
- uint fix_inc, uint rw, uint fnc_num,
- u32 addr, uint regwidth,
- u32 buflen, u8 *buffer,
- struct sk_buff *pkt);
-
-/* get cis data */
-extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, u8 *cis,
- u32 length);
-
-extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *si, uint fuc, u32 addr,
- u8 *data);
-extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *si, uint fuc, u32 addr,
- u8 *data);
-
-/* query number of io functions */
-extern uint sdioh_query_iofnum(sdioh_info_t *si);
-
-/* handle iovars */
-extern int sdioh_iovar_op(sdioh_info_t *si, const char *name,
- void *params, int plen, void *arg, int len, bool set);
-
-/* Issue abort to the specified function and clear controller as needed */
-extern int sdioh_abort(sdioh_info_t *si, uint fnc);
-
-/* Start and Stop SDIO without re-enumerating the SD card. */
-extern int sdioh_start(sdioh_info_t *si, int stage);
-extern int sdioh_stop(sdioh_info_t *si);
-
-/* Reset and re-initialize the device */
-extern int sdioh_sdio_reset(sdioh_info_t *si);
-
-/* Helper function */
-void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
-
-#endif /* _sdio_api_h_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh.c
index 3750fcf5a87..f4e72ed126b 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh.c
@@ -13,29 +13,59 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-/* ****************** BCMSDH Interface Functions *************************** */
+/* ****************** SDIO CARD Interface Functions **************************/
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/pci.h>
#include <linux/pci_ids.h>
-#include <bcmdefs.h>
-#include <bcmdevs.h>
-#include <bcmutils.h>
-#include <hndsoc.h>
-
-#include <bcmsdh.h> /* BRCM API for SDIO
- clients (such as wl, dhd) */
-#include <bcmsdbus.h> /* common SDIO/controller interface */
-#include <sbsdio.h> /* BRCM sdio device core */
-
-#include <sdio.h> /* sdio spec */
-#include "dngl_stats.h"
+#include <linux/sched.h>
+#include <linux/completion.h>
+
+#include <defs.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include <soc.h>
#include "dhd.h"
+#include "dhd_bus.h"
+#include "sdio_host.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
-const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
-struct bcmsdh_info {
+#define BRCMF_SD_ERROR_VAL 0x0001 /* Error */
+#define BRCMF_SD_INFO_VAL 0x0002 /* Info */
+
+
+#ifdef BCMDBG
+#define BRCMF_SD_ERROR(x) \
+ do { \
+ if ((brcmf_sdio_msglevel & BRCMF_SD_ERROR_VAL) && \
+ net_ratelimit()) \
+ printk x; \
+ } while (0)
+#define BRCMF_SD_INFO(x) \
+ do { \
+ if ((brcmf_sdio_msglevel & BRCMF_SD_INFO_VAL) && \
+ net_ratelimit()) \
+ printk x; \
+ } while (0)
+#else /* BCMDBG */
+#define BRCMF_SD_ERROR(x)
+#define BRCMF_SD_INFO(x)
+#endif /* BCMDBG */
+
+/* debugging macros */
+#define SDLX_MSG(x)
+
+#define SDIOH_CMD_TYPE_NORMAL 0 /* Normal command */
+#define SDIOH_CMD_TYPE_APPEND 1 /* Append command */
+#define SDIOH_CMD_TYPE_CUTTHRU 2 /* Cut-through command */
+
+#define SDIOH_DATA_PIO 0 /* PIO mode */
+#define SDIOH_DATA_DMA 1 /* DMA mode */
+
+struct brcmf_sdio_card {
bool init_success; /* underlying driver successfully attached */
void *sdioh; /* handler for sdioh */
u32 vendevid; /* Target Vendor and Device ID on SD bus */
@@ -43,282 +73,232 @@ struct bcmsdh_info {
reg_read/reg_write call */
u32 sbwad; /* Save backplane window address */
};
+
+/**
+ * SDIO Host Controller info
+ */
+struct sdio_hc {
+ struct sdio_hc *next;
+ struct device *dev; /* platform device handle */
+ void *regs; /* SDIO Host Controller address */
+ struct brcmf_sdio_card *card;
+ void *ch;
+ unsigned int oob_irq;
+ unsigned long oob_flags; /* OOB Host specifiction
+ as edge and etc */
+ bool oob_irq_registered;
+};
+
/* local copy of bcm sd handler */
-bcmsdh_info_t *l_bcmsdh;
+static struct brcmf_sdio_card *l_card;
-#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
-extern int sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
+const uint brcmf_sdio_msglevel = BRCMF_SD_ERROR_VAL;
-void bcmsdh_enable_hw_oob_intr(bcmsdh_info_t *sdh, bool enable)
-{
- sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
-}
-#endif
+static struct sdio_hc *sdhcinfo;
+
+/* driver info, initialized when brcmf_sdio_register is called */
+static struct brcmf_sdioh_driver drvinfo = { NULL, NULL };
+
+/* Module parameters specific to each host-controller driver */
+
+module_param(sd_msglevel, uint, 0);
-bcmsdh_info_t *bcmsdh_attach(void *cfghdl, void **regsva, uint irq)
+extern uint sd_f2_blocksize;
+module_param(sd_f2_blocksize, int, 0);
+
+/* forward declarations */
+int brcmf_sdio_probe(struct device *dev);
+EXPORT_SYMBOL(brcmf_sdio_probe);
+
+int brcmf_sdio_remove(struct device *dev);
+EXPORT_SYMBOL(brcmf_sdio_remove);
+
+struct brcmf_sdio_card*
+brcmf_sdcard_attach(void *cfghdl, u32 *regsva, uint irq)
{
- bcmsdh_info_t *bcmsdh;
+ struct brcmf_sdio_card *card;
- bcmsdh = kzalloc(sizeof(bcmsdh_info_t), GFP_ATOMIC);
- if (bcmsdh == NULL) {
- BCMSDH_ERROR(("bcmsdh_attach: out of memory"));
+ card = kzalloc(sizeof(struct brcmf_sdio_card), GFP_ATOMIC);
+ if (card == NULL) {
+ BRCMF_SD_ERROR(("sdcard_attach: out of memory"));
return NULL;
}
/* save the handler locally */
- l_bcmsdh = bcmsdh;
+ l_card = card;
- bcmsdh->sdioh = sdioh_attach(cfghdl, irq);
- if (!bcmsdh->sdioh) {
- bcmsdh_detach(bcmsdh);
+ card->sdioh = brcmf_sdioh_attach(cfghdl, irq);
+ if (!card->sdioh) {
+ brcmf_sdcard_detach(card);
return NULL;
}
- bcmsdh->init_success = true;
+ card->init_success = true;
- *regsva = (u32 *) SI_ENUM_BASE;
+ *regsva = SI_ENUM_BASE;
/* Report the BAR, to fix if needed */
- bcmsdh->sbwad = SI_ENUM_BASE;
- return bcmsdh;
+ card->sbwad = SI_ENUM_BASE;
+ return card;
}
-int bcmsdh_detach(void *sdh)
+int brcmf_sdcard_detach(struct brcmf_sdio_card *card)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
-
- if (bcmsdh != NULL) {
- if (bcmsdh->sdioh) {
- sdioh_detach(bcmsdh->sdioh);
- bcmsdh->sdioh = NULL;
+ if (card != NULL) {
+ if (card->sdioh) {
+ brcmf_sdioh_detach(card->sdioh);
+ card->sdioh = NULL;
}
- kfree(bcmsdh);
+ kfree(card);
}
- l_bcmsdh = NULL;
+ l_card = NULL;
return 0;
}
int
-bcmsdh_iovar_op(void *sdh, const char *name,
+brcmf_sdcard_iovar_op(struct brcmf_sdio_card *card, const char *name,
void *params, int plen, void *arg, int len, bool set)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
-}
-
-bool bcmsdh_intr_query(void *sdh)
-{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
- bool on;
-
- ASSERT(bcmsdh);
- status = sdioh_interrupt_query(bcmsdh->sdioh, &on);
- if (SDIOH_API_SUCCESS(status))
- return false;
- else
- return on;
-}
-
-int bcmsdh_intr_enable(void *sdh)
-{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
- ASSERT(bcmsdh);
-
- status = sdioh_interrupt_set(bcmsdh->sdioh, true);
- return SDIOH_API_SUCCESS(status) ? 0 : -EIO;
+ return brcmf_sdioh_iovar_op(card->sdioh, name, params, plen, arg,
+ len, set);
}
-int bcmsdh_intr_disable(void *sdh)
+int brcmf_sdcard_intr_enable(struct brcmf_sdio_card *card)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
- ASSERT(bcmsdh);
-
- status = sdioh_interrupt_set(bcmsdh->sdioh, false);
- return SDIOH_API_SUCCESS(status) ? 0 : -EIO;
+ return brcmf_sdioh_interrupt_set(card->sdioh, true);
}
-int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+int brcmf_sdcard_intr_disable(struct brcmf_sdio_card *card)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
- ASSERT(bcmsdh);
-
- status = sdioh_interrupt_register(bcmsdh->sdioh, fn, argh);
- return SDIOH_API_SUCCESS(status) ? 0 : -EIO;
+ return brcmf_sdioh_interrupt_set(card->sdioh, false);
}
-int bcmsdh_intr_dereg(void *sdh)
+int brcmf_sdcard_intr_reg(struct brcmf_sdio_card *card,
+ void (*fn)(void *), void *argh)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
- ASSERT(bcmsdh);
-
- status = sdioh_interrupt_deregister(bcmsdh->sdioh);
- return SDIOH_API_SUCCESS(status) ? 0 : -EIO;
+ return brcmf_sdioh_interrupt_register(card->sdioh, fn, argh);
}
-#if defined(DHD_DEBUG)
-bool bcmsdh_intr_pending(void *sdh)
+int brcmf_sdcard_intr_dereg(struct brcmf_sdio_card *card)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
-
- ASSERT(sdh);
- return sdioh_interrupt_pending(bcmsdh->sdioh);
+ return brcmf_sdioh_interrupt_deregister(card->sdioh);
}
-#endif
-int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
+u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_card *card, uint fnc_num, u32 addr,
+ int *err)
{
- ASSERT(sdh);
-
- /* don't support yet */
- return -ENOTSUPP;
-}
-
-u8 bcmsdh_cfg_read(void *sdh, uint fnc_num, u32 addr, int *err)
-{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
-#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int status;
s32 retry = 0;
-#endif
u8 data = 0;
- if (!bcmsdh)
- bcmsdh = l_bcmsdh;
-
- ASSERT(bcmsdh->init_success);
+ if (!card)
+ card = l_card;
-#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
do {
if (retry) /* wait for 1 ms till bus get settled down */
udelay(1000);
-#endif
status =
- sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr,
+ brcmf_sdioh_cfg_read(card->sdioh, fnc_num, addr,
(u8 *) &data);
-#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
- } while (!SDIOH_API_SUCCESS(status)
+ } while (status != 0
&& (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
-#endif
if (err)
- *err = (SDIOH_API_SUCCESS(status) ? 0 : -EIO);
+ *err = status;
- BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, u8data = 0x%x\n",
+ BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, u8data = 0x%x\n",
__func__, fnc_num, addr, data));
return data;
}
void
-bcmsdh_cfg_write(void *sdh, uint fnc_num, u32 addr, u8 data, int *err)
+brcmf_sdcard_cfg_write(struct brcmf_sdio_card *card, uint fnc_num, u32 addr,
+ u8 data, int *err)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
-#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
+ int status;
s32 retry = 0;
-#endif
- if (!bcmsdh)
- bcmsdh = l_bcmsdh;
+ if (!card)
+ card = l_card;
- ASSERT(bcmsdh->init_success);
-
-#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
do {
if (retry) /* wait for 1 ms till bus get settled down */
udelay(1000);
-#endif
status =
- sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr,
+ brcmf_sdioh_cfg_write(card->sdioh, fnc_num, addr,
(u8 *) &data);
-#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
- } while (!SDIOH_API_SUCCESS(status)
+ } while (status != 0
&& (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
-#endif
if (err)
- *err = SDIOH_API_SUCCESS(status) ? 0 : -EIO;
+ *err = status;
- BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, u8data = 0x%x\n",
+ BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, u8data = 0x%x\n",
__func__, fnc_num, addr, data));
}
-u32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, u32 addr, int *err)
+u32 brcmf_sdcard_cfg_read_word(struct brcmf_sdio_card *card, uint fnc_num,
+ u32 addr, int *err)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
+ int status;
u32 data = 0;
- if (!bcmsdh)
- bcmsdh = l_bcmsdh;
-
- ASSERT(bcmsdh->init_success);
+ if (!card)
+ card = l_card;
- status =
- sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL, SDIOH_READ,
- fnc_num, addr, &data, 4);
+ status = brcmf_sdioh_request_word(card->sdioh, SDIOH_CMD_TYPE_NORMAL,
+ SDIOH_READ, fnc_num, addr, &data, 4);
if (err)
- *err = (SDIOH_API_SUCCESS(status) ? 0 : -EIO);
+ *err = status;
- BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, u32data = 0x%x\n",
+ BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, u32data = 0x%x\n",
__func__, fnc_num, addr, data));
return data;
}
void
-bcmsdh_cfg_write_word(void *sdh, uint fnc_num, u32 addr, u32 data,
- int *err)
+brcmf_sdcard_cfg_write_word(struct brcmf_sdio_card *card, uint fnc_num,
+ u32 addr, u32 data, int *err)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
-
- if (!bcmsdh)
- bcmsdh = l_bcmsdh;
+ int status;
- ASSERT(bcmsdh->init_success);
+ if (!card)
+ card = l_card;
status =
- sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+ brcmf_sdioh_request_word(card->sdioh, SDIOH_CMD_TYPE_NORMAL,
SDIOH_WRITE, fnc_num, addr, &data, 4);
if (err)
- *err = (SDIOH_API_SUCCESS(status) ? 0 : -EIO);
+ *err = status;
- BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, u32data = 0x%x\n",
+ BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, u32data = 0x%x\n",
__func__, fnc_num, addr, data));
}
-int bcmsdh_cis_read(void *sdh, uint func, u8 * cis, uint length)
+int brcmf_sdcard_cis_read(struct brcmf_sdio_card *card, uint func, u8 * cis,
+ uint length)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
+ int status;
u8 *tmp_buf, *tmp_ptr;
u8 *ptr;
bool ascii = func & ~0xf;
func &= 0x7;
- if (!bcmsdh)
- bcmsdh = l_bcmsdh;
+ if (!card)
+ card = l_card;
- ASSERT(bcmsdh->init_success);
- ASSERT(cis);
- ASSERT(length <= SBSDIO_CIS_SIZE_LIMIT);
-
- status = sdioh_cis_read(bcmsdh->sdioh, func, cis, length);
+ status = brcmf_sdioh_cis_read(card->sdioh, func, cis, length);
if (ascii) {
/* Move binary bits to tmp and format them
into the provided buffer. */
tmp_buf = kmalloc(length, GFP_ATOMIC);
if (tmp_buf == NULL) {
- BCMSDH_ERROR(("%s: out of memory\n", __func__));
+ BRCMF_SD_ERROR(("%s: out of memory\n", __func__));
return -ENOMEM;
}
memcpy(tmp_buf, cis, length);
@@ -331,60 +311,60 @@ int bcmsdh_cis_read(void *sdh, uint func, u8 * cis, uint length)
kfree(tmp_buf);
}
- return SDIOH_API_SUCCESS(status) ? 0 : -EIO;
+ return status;
}
-static int bcmsdhsdio_set_sbaddr_window(void *sdh, u32 address)
+static int
+brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_card *card, u32 address)
{
int err = 0;
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
(address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
if (!err)
- bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
- (address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SBADDRMID,
+ (address >> 16) & SBSDIO_SBADDRMID_MASK,
+ &err);
if (!err)
- bcmsdh_cfg_write(bcmsdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
- (address >> 24) & SBSDIO_SBADDRHIGH_MASK,
- &err);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK,
+ &err);
return err;
}
-u32 bcmsdh_reg_read(void *sdh, u32 addr, uint size)
+u32 brcmf_sdcard_reg_read(struct brcmf_sdio_card *card, u32 addr, uint size)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
+ int status;
u32 word = 0;
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
- BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, ", __func__, addr));
-
- if (!bcmsdh)
- bcmsdh = l_bcmsdh;
+ BRCMF_SD_INFO(("%s:fun = 1, addr = 0x%x, ", __func__, addr));
- ASSERT(bcmsdh->init_success);
+ if (!card)
+ card = l_card;
- if (bar0 != bcmsdh->sbwad) {
- if (bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0))
+ if (bar0 != card->sbwad) {
+ if (brcmf_sdcard_set_sbaddr_window(card, bar0))
return 0xFFFFFFFF;
- bcmsdh->sbwad = bar0;
+ card->sbwad = bar0;
}
addr &= SBSDIO_SB_OFT_ADDR_MASK;
if (size == 4)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- status = sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+ status = brcmf_sdioh_request_word(card->sdioh, SDIOH_CMD_TYPE_NORMAL,
SDIOH_READ, SDIO_FUNC_1, addr, &word, size);
- bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+ card->regfail = (status != 0);
- BCMSDH_INFO(("u32data = 0x%x\n", word));
+ BRCMF_SD_INFO(("u32data = 0x%x\n", word));
/* if ok, return appropriately masked word */
- if (SDIOH_API_SUCCESS(status)) {
+ if (status == 0) {
switch (size) {
case sizeof(u8):
return word & 0xff;
@@ -393,90 +373,86 @@ u32 bcmsdh_reg_read(void *sdh, u32 addr, uint size)
case sizeof(u32):
return word;
default:
- bcmsdh->regfail = true;
+ card->regfail = true;
}
}
/* otherwise, bad sdio access or invalid size */
- BCMSDH_ERROR(("%s: error reading addr 0x%04x size %d\n", __func__,
+ BRCMF_SD_ERROR(("%s: error reading addr 0x%04x size %d\n", __func__,
addr, size));
return 0xFFFFFFFF;
}
-u32 bcmsdh_reg_write(void *sdh, u32 addr, uint size, u32 data)
+u32 brcmf_sdcard_reg_write(struct brcmf_sdio_card *card, u32 addr, uint size,
+ u32 data)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
+ int status;
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
- BCMSDH_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
+ BRCMF_SD_INFO(("%s:fun = 1, addr = 0x%x, uint%ddata = 0x%x\n",
__func__, addr, size * 8, data));
- if (!bcmsdh)
- bcmsdh = l_bcmsdh;
-
- ASSERT(bcmsdh->init_success);
+ if (!card)
+ card = l_card;
- if (bar0 != bcmsdh->sbwad) {
- err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0);
+ if (bar0 != card->sbwad) {
+ err = brcmf_sdcard_set_sbaddr_window(card, bar0);
if (err)
return err;
- bcmsdh->sbwad = bar0;
+ card->sbwad = bar0;
}
addr &= SBSDIO_SB_OFT_ADDR_MASK;
if (size == 4)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
status =
- sdioh_request_word(bcmsdh->sdioh, SDIOH_CMD_TYPE_NORMAL,
+ brcmf_sdioh_request_word(card->sdioh, SDIOH_CMD_TYPE_NORMAL,
SDIOH_WRITE, SDIO_FUNC_1, addr, &data, size);
- bcmsdh->regfail = !(SDIOH_API_SUCCESS(status));
+ card->regfail = (status != 0);
- if (SDIOH_API_SUCCESS(status))
+ if (status == 0)
return 0;
- BCMSDH_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
+ BRCMF_SD_ERROR(("%s: error writing 0x%08x to addr 0x%04x size %d\n",
__func__, data, addr, size));
return 0xFFFFFFFF;
}
-bool bcmsdh_regfail(void *sdh)
+bool brcmf_sdcard_regfail(struct brcmf_sdio_card *card)
{
- return ((bcmsdh_info_t *) sdh)->regfail;
+ return card->regfail;
}
int
-bcmsdh_recv_buf(void *sdh, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt,
- bcmsdh_cmplt_fn_t complete, void *handle)
-{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
+brcmf_sdcard_recv_buf(struct brcmf_sdio_card *card, u32 addr, uint fn,
+ uint flags,
+ u8 *buf, uint nbytes, struct sk_buff *pkt,
+ void (*complete)(void *handle, int status,
+ bool sync_waiting),
+ void *handle)
+{
+ int status;
uint incr_fix;
uint width;
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
- ASSERT(bcmsdh);
- ASSERT(bcmsdh->init_success);
-
- BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
__func__, fn, addr, nbytes));
/* Async not implemented yet */
- ASSERT(!(flags & SDIO_REQ_ASYNC));
if (flags & SDIO_REQ_ASYNC)
return -ENOTSUPP;
- if (bar0 != bcmsdh->sbwad) {
- err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0);
+ if (bar0 != card->sbwad) {
+ err = brcmf_sdcard_set_sbaddr_window(card, bar0);
if (err)
return err;
- bcmsdh->sbwad = bar0;
+ card->sbwad = bar0;
}
addr &= SBSDIO_SB_OFT_ADDR_MASK;
@@ -486,42 +462,37 @@ bcmsdh_recv_buf(void *sdh, u32 addr, uint fn, uint flags,
if (width == 4)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
- SDIOH_READ, fn, addr, width, nbytes, buf,
- pkt);
+ status = brcmf_sdioh_request_buffer(card->sdioh, SDIOH_DATA_PIO,
+ incr_fix, SDIOH_READ, fn, addr, width, nbytes, buf, pkt);
- return SDIOH_API_SUCCESS(status) ? 0 : -EIO;
+ return status;
}
int
-bcmsdh_send_buf(void *sdh, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, void *pkt,
- bcmsdh_cmplt_fn_t complete, void *handle)
+brcmf_sdcard_send_buf(struct brcmf_sdio_card *card, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes, void *pkt,
+ void (*complete)(void *handle, int status,
+ bool sync_waiting),
+ void *handle)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
uint incr_fix;
uint width;
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
- ASSERT(bcmsdh);
- ASSERT(bcmsdh->init_success);
-
- BCMSDH_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
+ BRCMF_SD_INFO(("%s:fun = %d, addr = 0x%x, size = %d\n",
__func__, fn, addr, nbytes));
/* Async not implemented yet */
- ASSERT(!(flags & SDIO_REQ_ASYNC));
if (flags & SDIO_REQ_ASYNC)
return -ENOTSUPP;
- if (bar0 != bcmsdh->sbwad) {
- err = bcmsdhsdio_set_sbaddr_window(bcmsdh, bar0);
+ if (bar0 != card->sbwad) {
+ err = brcmf_sdcard_set_sbaddr_window(card, bar0);
if (err)
return err;
- bcmsdh->sbwad = bar0;
+ card->sbwad = bar0;
}
addr &= SBSDIO_SB_OFT_ADDR_MASK;
@@ -531,101 +502,141 @@ bcmsdh_send_buf(void *sdh, u32 addr, uint fn, uint flags,
if (width == 4)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- status = sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, incr_fix,
- SDIOH_WRITE, fn, addr, width, nbytes, buf,
- pkt);
-
- return SDIOH_API_SUCCESS(status) ? 0 : -EIO;
+ return brcmf_sdioh_request_buffer(card->sdioh, SDIOH_DATA_PIO,
+ incr_fix, SDIOH_WRITE, fn, addr, width, nbytes, buf, pkt);
}
-int bcmsdh_rwdata(void *sdh, uint rw, u32 addr, u8 *buf, uint nbytes)
+int brcmf_sdcard_rwdata(struct brcmf_sdio_card *card, uint rw, u32 addr,
+ u8 *buf, uint nbytes)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- SDIOH_API_RC status;
-
- ASSERT(bcmsdh);
- ASSERT(bcmsdh->init_success);
- ASSERT((addr & SBSDIO_SBWINDOW_MASK) == 0);
-
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- status =
- sdioh_request_buffer(bcmsdh->sdioh, SDIOH_DATA_PIO, SDIOH_DATA_INC,
- (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
- addr, 4, nbytes, buf, NULL);
-
- return SDIOH_API_SUCCESS(status) ? 0 : -EIO;
+ return brcmf_sdioh_request_buffer(card->sdioh, SDIOH_DATA_PIO,
+ SDIOH_DATA_INC, (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
+ addr, 4, nbytes, buf, NULL);
}
-int bcmsdh_abort(void *sdh, uint fn)
+int brcmf_sdcard_abort(struct brcmf_sdio_card *card, uint fn)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
-
- return sdioh_abort(bcmsdh->sdioh, fn);
+ return brcmf_sdioh_abort(card->sdioh, fn);
}
-int bcmsdh_start(void *sdh, int stage)
+int brcmf_sdcard_query_device(struct brcmf_sdio_card *card)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
-
- return sdioh_start(bcmsdh->sdioh, stage);
+ card->vendevid = (PCI_VENDOR_ID_BROADCOM << 16) | 0;
+ return card->vendevid;
}
-int bcmsdh_stop(void *sdh)
+u32 brcmf_sdcard_cur_sbwad(struct brcmf_sdio_card *card)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
+ if (!card)
+ card = l_card;
- return sdioh_stop(bcmsdh->sdioh);
+ return card->sbwad;
}
-int bcmsdh_query_device(void *sdh)
+int brcmf_sdio_probe(struct device *dev)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
- bcmsdh->vendevid = (PCI_VENDOR_ID_BROADCOM << 16) | 0;
- return bcmsdh->vendevid;
-}
+ struct sdio_hc *sdhc = NULL;
+ u32 regs = 0;
+ struct brcmf_sdio_card *card = NULL;
+ int irq = 0;
+ u32 vendevid;
+ unsigned long irq_flags = 0;
-uint bcmsdh_query_iofnum(void *sdh)
-{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
+ /* allocate SDIO Host Controller state info */
+ sdhc = kzalloc(sizeof(struct sdio_hc), GFP_ATOMIC);
+ if (!sdhc) {
+ SDLX_MSG(("%s: out of memory\n", __func__));
+ goto err;
+ }
+ sdhc->dev = (void *)dev;
- if (!bcmsdh)
- bcmsdh = l_bcmsdh;
+ card = brcmf_sdcard_attach((void *)0, &regs, irq);
+ if (!card) {
+ SDLX_MSG(("%s: attach failed\n", __func__));
+ goto err;
+ }
- return sdioh_query_iofnum(bcmsdh->sdioh);
-}
+ sdhc->card = card;
+ sdhc->oob_irq = irq;
+ sdhc->oob_flags = irq_flags;
+ sdhc->oob_irq_registered = false; /* to make sure.. */
+
+ /* chain SDIO Host Controller info together */
+ sdhc->next = sdhcinfo;
+ sdhcinfo = sdhc;
+ /* Read the vendor/device ID from the CIS */
+ vendevid = brcmf_sdcard_query_device(card);
+
+ /* try to attach to the target device */
+ sdhc->ch = drvinfo.attach((vendevid >> 16), (vendevid & 0xFFFF),
+ 0, 0, 0, 0, regs, card);
+ if (!sdhc->ch) {
+ SDLX_MSG(("%s: device attach failed\n", __func__));
+ goto err;
+ }
-int bcmsdh_reset(bcmsdh_info_t *sdh)
-{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
+ return 0;
- return sdioh_sdio_reset(bcmsdh->sdioh);
-}
+ /* error handling */
+err:
+ if (sdhc) {
+ if (sdhc->card)
+ brcmf_sdcard_detach(sdhc->card);
+ kfree(sdhc);
+ }
-void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh)
-{
- ASSERT(sdh);
- return sdh->sdioh;
+ return -ENODEV;
}
-/* Function to pass device-status bits to DHD. */
-u32 bcmsdh_get_dstatus(void *sdh)
+int brcmf_sdio_remove(struct device *dev)
{
+ struct sdio_hc *sdhc, *prev;
+
+ sdhc = sdhcinfo;
+ drvinfo.detach(sdhc->ch);
+ brcmf_sdcard_detach(sdhc->card);
+ /* find the SDIO Host Controller state for this pdev
+ and take it out from the list */
+ for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
+ if (sdhc->dev == (void *)dev) {
+ if (prev)
+ prev->next = sdhc->next;
+ else
+ sdhcinfo = NULL;
+ break;
+ }
+ prev = sdhc;
+ }
+ if (!sdhc) {
+ SDLX_MSG(("%s: failed\n", __func__));
+ return 0;
+ }
+
+ /* release SDIO Host Controller info */
+ kfree(sdhc);
return 0;
}
-u32 bcmsdh_cur_sbwad(void *sdh)
+int brcmf_sdio_register(struct brcmf_sdioh_driver *driver)
{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *) sdh;
+ drvinfo = *driver;
- if (!bcmsdh)
- bcmsdh = l_bcmsdh;
+ SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n"));
+ return brcmf_sdio_function_init();
+}
- return bcmsdh->sbwad;
+void brcmf_sdio_unregister(void)
+{
+ brcmf_sdio_function_cleanup();
}
-void bcmsdh_chipinfo(void *sdh, u32 chip, u32 chiprev)
+void brcmf_sdio_wdtmr_enable(bool enable)
{
- return;
+ if (enable)
+ brcmf_sdbrcm_wd_timer(sdhcinfo->ch, brcmf_watchdog_ms);
+ else
+ brcmf_sdbrcm_wd_timer(sdhcinfo->ch, 0);
}
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c
deleted file mode 100644
index 465f623760f..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_linux.c
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/**
- * @file bcmsdh_linux.c
- */
-
-#define __UNDEF_NO_VERSION__
-
-#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/completion.h>
-
-#include <pcicfg.h>
-#include <bcmdefs.h>
-#include <bcmdevs.h>
-#include <bcmutils.h>
-
-#if defined(OOB_INTR_ONLY)
-#include <linux/irq.h>
-extern void dhdsdio_isr(void *args);
-#include <dngl_stats.h>
-#include <dhd.h>
-#endif /* defined(OOB_INTR_ONLY) */
-#if defined(CONFIG_MACH_SANDGATE2G) || defined(CONFIG_MACH_LOGICPD_PXA270)
-#if !defined(BCMPLATFORM_BUS)
-#define BCMPLATFORM_BUS
-#endif /* !defined(BCMPLATFORM_BUS) */
-
-#include <linux/platform_device.h>
-#endif /* CONFIG_MACH_SANDGATE2G */
-
-#include "dngl_stats.h"
-#include "dhd.h"
-
-/**
- * SDIO Host Controller info
- */
-typedef struct bcmsdh_hc bcmsdh_hc_t;
-
-struct bcmsdh_hc {
- bcmsdh_hc_t *next;
-#ifdef BCMPLATFORM_BUS
- struct device *dev; /* platform device handle */
-#else
- struct pci_dev *dev; /* pci device handle */
-#endif /* BCMPLATFORM_BUS */
- void *regs; /* SDIO Host Controller address */
- bcmsdh_info_t *sdh; /* SDIO Host Controller handle */
- void *ch;
- unsigned int oob_irq;
- unsigned long oob_flags; /* OOB Host specifiction
- as edge and etc */
- bool oob_irq_registered;
-#if defined(OOB_INTR_ONLY)
- spinlock_t irq_lock;
-#endif
-};
-static bcmsdh_hc_t *sdhcinfo;
-
-/* driver info, initialized when bcmsdh_register is called */
-static bcmsdh_driver_t drvinfo = { NULL, NULL };
-
-/* debugging macros */
-#define SDLX_MSG(x)
-
-/**
- * Checks to see if vendor and device IDs match a supported SDIO Host Controller.
- */
-bool bcmsdh_chipmatch(u16 vendor, u16 device)
-{
- /* Add other vendors and devices as required */
-
-#ifdef BCMSDIOH_STD
- /* Check for Arasan host controller */
- if (vendor == VENDOR_SI_IMAGE)
- return true;
-
- /* Check for BRCM 27XX Standard host controller */
- if (device == BCM27XX_SDIOH_ID && vendor == PCI_VENDOR_ID_BROADCOM)
- return true;
-
- /* Check for BRCM Standard host controller */
- if (device == SDIOH_FPGA_ID && vendor == PCI_VENDOR_ID_BROADCOM)
- return true;
-
- /* Check for TI PCIxx21 Standard host controller */
- if (device == PCIXX21_SDIOH_ID && vendor == VENDOR_TI)
- return true;
-
- if (device == PCIXX21_SDIOH0_ID && vendor == VENDOR_TI)
- return true;
-
- /* Ricoh R5C822 Standard SDIO Host */
- if (device == R5C822_SDIOH_ID && vendor == VENDOR_RICOH)
- return true;
-
- /* JMicron Standard SDIO Host */
- if (device == JMICRON_SDIOH_ID && vendor == VENDOR_JMICRON)
- return true;
-#endif /* BCMSDIOH_STD */
-#ifdef BCMSDIOH_SPI
- /* This is the PciSpiHost. */
- if (device == SPIH_FPGA_ID && vendor == PCI_VENDOR_ID_BROADCOM) {
- return true;
- }
-#endif /* BCMSDIOH_SPI */
-
- return false;
-}
-
-#if defined(BCMPLATFORM_BUS)
-#if defined(BCMLXSDMMC)
-/* forward declarations */
-int bcmsdh_probe(struct device *dev);
-EXPORT_SYMBOL(bcmsdh_probe);
-
-int bcmsdh_remove(struct device *dev);
-EXPORT_SYMBOL(bcmsdh_remove);
-
-#else
-/* forward declarations */
-static int __devinit bcmsdh_probe(struct device *dev);
-static int __devexit bcmsdh_remove(struct device *dev);
-#endif /* BCMLXSDMMC */
-
-#ifndef BCMLXSDMMC
-static
-#endif /* BCMLXSDMMC */
-int bcmsdh_probe(struct device *dev)
-{
- bcmsdh_hc_t *sdhc = NULL;
- unsigned long regs = 0;
- bcmsdh_info_t *sdh = NULL;
-#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
- struct platform_device *pdev;
- struct resource *r;
-#endif /* BCMLXSDMMC */
- int irq = 0;
- u32 vendevid;
- unsigned long irq_flags = 0;
-
-#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
- pdev = to_platform_device(dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (!r || irq == NO_IRQ)
- return -ENXIO;
-#endif /* BCMLXSDMMC */
-
-#if defined(OOB_INTR_ONLY)
-#ifdef HW_OOB
- irq_flags =
- IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL |
- IORESOURCE_IRQ_SHAREABLE;
-#else
- irq_flags = IRQF_TRIGGER_FALLING;
-#endif /* HW_OOB */
- irq = dhd_customer_oob_irq_map(&irq_flags);
- if (irq < 0) {
- SDLX_MSG(("%s: Host irq is not defined\n", __func__));
- return 1;
- }
-#endif /* defined(OOB_INTR_ONLY) */
- /* allocate SDIO Host Controller state info */
- sdhc = kzalloc(sizeof(bcmsdh_hc_t), GFP_ATOMIC);
- if (!sdhc) {
- SDLX_MSG(("%s: out of memory\n", __func__));
- goto err;
- }
- sdhc->dev = (void *)dev;
-
-#ifdef BCMLXSDMMC
- sdh = bcmsdh_attach((void *)0, (void **)&regs, irq);
- if (!sdh) {
- SDLX_MSG(("%s: bcmsdh_attach failed\n", __func__));
- goto err;
- }
-#else
- sdh = bcmsdh_attach((void *)r->start, (void **)&regs, irq);
- if (!sdh) {
- SDLX_MSG(("%s: bcmsdh_attach failed\n", __func__));
- goto err;
- }
-#endif /* BCMLXSDMMC */
- sdhc->sdh = sdh;
- sdhc->oob_irq = irq;
- sdhc->oob_flags = irq_flags;
- sdhc->oob_irq_registered = false; /* to make sure.. */
-#if defined(OOB_INTR_ONLY)
- spin_lock_init(&sdhc->irq_lock);
-#endif
-
- /* chain SDIO Host Controller info together */
- sdhc->next = sdhcinfo;
- sdhcinfo = sdhc;
- /* Read the vendor/device ID from the CIS */
- vendevid = bcmsdh_query_device(sdh);
-
- /* try to attach to the target device */
- sdhc->ch = drvinfo.attach((vendevid >> 16), (vendevid & 0xFFFF),
- 0, 0, 0, 0, (void *)regs, sdh);
- if (!sdhc->ch) {
- SDLX_MSG(("%s: device attach failed\n", __func__));
- goto err;
- }
-
- return 0;
-
- /* error handling */
-err:
- if (sdhc) {
- if (sdhc->sdh)
- bcmsdh_detach(sdhc->sdh);
- kfree(sdhc);
- }
-
- return -ENODEV;
-}
-
-#ifndef BCMLXSDMMC
-static
-#endif /* BCMLXSDMMC */
-int bcmsdh_remove(struct device *dev)
-{
- bcmsdh_hc_t *sdhc, *prev;
-
- sdhc = sdhcinfo;
- drvinfo.detach(sdhc->ch);
- bcmsdh_detach(sdhc->sdh);
- /* find the SDIO Host Controller state for this pdev
- and take it out from the list */
- for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
- if (sdhc->dev == (void *)dev) {
- if (prev)
- prev->next = sdhc->next;
- else
- sdhcinfo = NULL;
- break;
- }
- prev = sdhc;
- }
- if (!sdhc) {
- SDLX_MSG(("%s: failed\n", __func__));
- return 0;
- }
-
- /* release SDIO Host Controller info */
- kfree(sdhc);
-
-#if !defined(BCMLXSDMMC)
- dev_set_drvdata(dev, NULL);
-#endif /* !defined(BCMLXSDMMC) */
-
- return 0;
-}
-#endif /* BCMPLATFORM_BUS */
-
-extern int sdio_function_init(void);
-
-int bcmsdh_register(bcmsdh_driver_t *driver)
-{
- drvinfo = *driver;
-
- SDLX_MSG(("Linux Kernel SDIO/MMC Driver\n"));
- return sdio_function_init();
-}
-
-extern void sdio_function_cleanup(void);
-
-void bcmsdh_unregister(void)
-{
- sdio_function_cleanup();
-}
-
-#if defined(OOB_INTR_ONLY)
-void bcmsdh_oob_intr_set(bool enable)
-{
- static bool curstate = 1;
- unsigned long flags;
-
- spin_lock_irqsave(&sdhcinfo->irq_lock, flags);
- if (curstate != enable) {
- if (enable)
- enable_irq(sdhcinfo->oob_irq);
- else
- disable_irq_nosync(sdhcinfo->oob_irq);
- curstate = enable;
- }
- spin_unlock_irqrestore(&sdhcinfo->irq_lock, flags);
-}
-
-static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
-{
- dhd_pub_t *dhdp;
-
- dhdp = (dhd_pub_t *) dev_get_drvdata(sdhcinfo->dev);
-
- bcmsdh_oob_intr_set(0);
-
- if (dhdp == NULL) {
- SDLX_MSG(("Out of band GPIO interrupt fired way too early\n"));
- return IRQ_HANDLED;
- }
-
- dhdsdio_isr((void *)dhdp->bus);
-
- return IRQ_HANDLED;
-}
-
-int bcmsdh_register_oob_intr(void *dhdp)
-{
- int error = 0;
-
- SDLX_MSG(("%s Enter\n", __func__));
-
- sdhcinfo->oob_flags =
- IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL |
- IORESOURCE_IRQ_SHAREABLE;
- dev_set_drvdata(sdhcinfo->dev, dhdp);
-
- if (!sdhcinfo->oob_irq_registered) {
- SDLX_MSG(("%s IRQ=%d Type=%X\n", __func__,
- (int)sdhcinfo->oob_irq, (int)sdhcinfo->oob_flags));
- /* Refer to customer Host IRQ docs about
- proper irqflags definition */
- error =
- request_irq(sdhcinfo->oob_irq, wlan_oob_irq,
- sdhcinfo->oob_flags, "bcmsdh_sdmmc", NULL);
- if (error)
- return -ENODEV;
-
- irq_set_irq_wake(sdhcinfo->oob_irq, 1);
- sdhcinfo->oob_irq_registered = true;
- }
-
- return 0;
-}
-
-void bcmsdh_unregister_oob_intr(void)
-{
- SDLX_MSG(("%s: Enter\n", __func__));
-
- irq_set_irq_wake(sdhcinfo->oob_irq, 0);
- disable_irq(sdhcinfo->oob_irq); /* just in case.. */
- free_irq(sdhcinfo->oob_irq, NULL);
- sdhcinfo->oob_irq_registered = false;
-}
-#endif /* defined(OOB_INTR_ONLY) */
-/* Module parameters specific to each host-controller driver */
-
-extern uint sd_msglevel; /* Debug message level */
-module_param(sd_msglevel, uint, 0);
-
-extern uint sd_power; /* 0 = SD Power OFF,
- 1 = SD Power ON. */
-module_param(sd_power, uint, 0);
-
-extern uint sd_clock; /* SD Clock Control, 0 = SD Clock OFF,
- 1 = SD Clock ON */
-module_param(sd_clock, uint, 0);
-
-extern uint sd_divisor; /* Divisor (-1 means external clock) */
-module_param(sd_divisor, uint, 0);
-
-extern uint sd_sdmode; /* Default is SD4, 0=SPI, 1=SD1, 2=SD4 */
-module_param(sd_sdmode, uint, 0);
-
-extern uint sd_hiok; /* Ok to use hi-speed mode */
-module_param(sd_hiok, uint, 0);
-
-extern uint sd_f2_blocksize;
-module_param(sd_f2_blocksize, int, 0);
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index c0ffbd35e0c..38bd9ba3096 100644
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -15,64 +15,179 @@
*/
#include <linux/types.h>
#include <linux/netdevice.h>
-#include <bcmdefs.h>
-#include <bcmdevs.h>
-#include <bcmutils.h>
-#include <sdio.h> /* SDIO Device and Protocol Specs */
-#include <sdioh.h> /* SDIO Host Controller Specification */
-#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
-#include <sdiovar.h> /* ioctl/iovars */
-
+#include <linux/mmc/sdio.h>
#include <linux/mmc/core.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/card.h>
#include <linux/suspend.h>
+#include <linux/errno.h>
+#include <linux/sched.h> /* request_irq() */
+#include <net/cfg80211.h>
-#include <dngl_stats.h>
-#include <dhd.h>
+#include <defs.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include "sdio_host.h"
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "wl_cfg80211.h"
-#include "bcmsdh_sdmmc.h"
+#define BLOCK_SIZE_64 64
+#define BLOCK_SIZE_512 512
+#define BLOCK_SIZE_4318 64
+#define BLOCK_SIZE_4328 512
-extern int sdio_function_init(void);
-extern void sdio_function_cleanup(void);
+/* private bus modes */
+#define SDIOH_MODE_SD4 2
-#if !defined(OOB_INTR_ONLY)
-static void IRQHandler(struct sdio_func *func);
-static void IRQHandlerF2(struct sdio_func *func);
-#endif /* !defined(OOB_INTR_ONLY) */
-static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, u32 regaddr);
-extern int sdio_reset_comm(struct mmc_card *card);
+#define CLIENT_INTR 0x100 /* Get rid of this! */
-extern PBCMSDH_SDMMC_INSTANCE gInstance;
+#if !defined(SDIO_VENDOR_ID_BROADCOM)
+#define SDIO_VENDOR_ID_BROADCOM 0x02d0
+#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
-uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
-uint sd_f2_blocksize = 512; /* Default blocksize */
+#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000
+
+#define DMA_ALIGN_MASK 0x03
-uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+
+/* Common msglevel constants */
+#define SDH_ERROR_VAL 0x0001 /* Error */
+#define SDH_TRACE_VAL 0x0002 /* Trace */
+#define SDH_INFO_VAL 0x0004 /* Info */
+#define SDH_DEBUG_VAL 0x0008 /* Debug */
+#define SDH_DATA_VAL 0x0010 /* Data */
+#define SDH_CTRL_VAL 0x0020 /* Control Regs */
+#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */
+#define SDH_DMA_VAL 0x0080 /* DMA */
+
+#ifdef BCMDBG
+#define sd_err(x) \
+ do { \
+ if ((sd_msglevel & SDH_ERROR_VAL) && net_ratelimit()) \
+ printk x; \
+ } while (0)
+#define sd_trace(x) \
+ do { \
+ if ((sd_msglevel & SDH_TRACE_VAL) && net_ratelimit()) \
+ printk x; \
+ } while (0)
+#define sd_info(x) \
+ do { \
+ if ((sd_msglevel & SDH_INFO_VAL) && net_ratelimit()) \
+ printk x; \
+ } while (0)
+#define sd_debug(x) \
+ do { \
+ if ((sd_msglevel & SDH_DEBUG_VAL) && net_ratelimit()) \
+ printk x; \
+ } while (0)
+#define sd_data(x) \
+ do { \
+ if ((sd_msglevel & SDH_DATA_VAL) && net_ratelimit()) \
+ printk x; \
+ } while (0)
+#define sd_ctrl(x) \
+ do { \
+ if ((sd_msglevel & SDH_CTRL_VAL) && net_ratelimit()) \
+ printk x; \
+ } while (0)
+#else
+#define sd_err(x)
+#define sd_trace(x)
+#define sd_info(x)
+#define sd_debug(x)
+#define sd_data(x)
+#define sd_ctrl(x)
+#endif
+
+struct sdos_info {
+ struct sdioh_info *sd;
+ spinlock_t lock;
+};
+
+static void brcmf_sdioh_irqhandler(struct sdio_func *func);
+static void brcmf_sdioh_irqhandler_f2(struct sdio_func *func);
+static int brcmf_sdioh_get_cisaddr(struct sdioh_info *sd, u32 regaddr);
+static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id);
+static void brcmf_ops_sdio_remove(struct sdio_func *func);
+
+#ifdef CONFIG_PM
+static int brcmf_sdio_suspend(struct device *dev);
+static int brcmf_sdio_resume(struct device *dev);
+#endif /* CONFIG_PM */
+
+uint sd_f2_blocksize = 512; /* Default blocksize */
-uint sd_power = 1; /* Default to SD Slot powered ON */
-uint sd_clock = 1; /* Default to SD Clock turned ON */
-uint sd_hiok = false; /* Don't use hi-speed mode by default */
uint sd_msglevel = 0x01;
-uint sd_use_dma = true;
-DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
-DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
-DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
-DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
-#define DMA_ALIGN_MASK 0x03
+/* module param defaults */
+static int clockoverride;
-int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, u32 regaddr,
- int regsize, u32 *data);
+module_param(clockoverride, int, 0644);
+MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
-void sdioh_sdio_set_host_pm_flags(int flag)
-{
- if (sdio_set_host_pm_flags(gInstance->func[1], flag))
- printk(KERN_ERR "%s: Failed to set pm_flags 0x%08x\n",\
- __func__, (unsigned int)flag);
-}
+struct brcmf_sdmmc_instance *gInstance;
+
+struct device sdmmc_dev;
+
+/* devices we support, null terminated */
+static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT)},
+ {SDIO_DEVICE
+ (SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319)},
+ { /* end: all zeroes */ },
+};
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops brcmf_sdio_pm_ops = {
+ .suspend = brcmf_sdio_suspend,
+ .resume = brcmf_sdio_resume,
+};
+#endif /* CONFIG_PM */
+
+static struct sdio_driver brcmf_sdmmc_driver = {
+ .probe = brcmf_ops_sdio_probe,
+ .remove = brcmf_ops_sdio_remove,
+ .name = "brcmfmac",
+ .id_table = brcmf_sdmmc_ids,
+#ifdef CONFIG_PM
+ .drv = {
+ .pm = &brcmf_sdio_pm_ops,
+ },
+#endif /* CONFIG_PM */
+};
+
+MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+
+BRCMF_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
+BRCMF_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
+BRCMF_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
+BRCMF_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
-static int sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
+static int
+brcmf_sdioh_card_regread(struct sdioh_info *sd, int func, u32 regaddr,
+ int regsize, u32 *data);
+
+static int brcmf_sdioh_enablefuncs(struct sdioh_info *sd)
{
int err_ret;
u32 fbraddr;
@@ -81,16 +196,16 @@ static int sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
sd_trace(("%s\n", __func__));
/* Get the Card's common CIS address */
- sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
+ sd->com_cis_ptr = brcmf_sdioh_get_cisaddr(sd, SDIO_CCCR_CIS);
sd->func_cis_ptr[0] = sd->com_cis_ptr;
sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __func__,
sd->com_cis_ptr));
/* Get the Card's function CIS (for each function) */
- for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
+ for (fbraddr = SDIO_FBR_BASE(1), func = 1;
func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
sd->func_cis_ptr[func] =
- sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
+ brcmf_sdioh_get_cisaddr(sd, SDIO_FBR_CIS + fbraddr);
sd_info(("%s: Function %d CIS Ptr = 0x%x\n", __func__, func,
sd->func_cis_ptr[func]));
}
@@ -104,8 +219,8 @@ static int sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
err_ret = sdio_enable_func(gInstance->func[1]);
sdio_release_host(gInstance->func[1]);
if (err_ret) {
- sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x",
- err_ret));
+ sd_err(("brcmf_sdioh_enablefuncs: Failed to enable F1 "
+ "Err: 0x%08x\n", err_ret));
}
return false;
@@ -114,9 +229,9 @@ static int sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
/*
* Public entry points & extern's
*/
-sdioh_info_t *sdioh_attach(void *bar0, uint irq)
+struct sdioh_info *brcmf_sdioh_attach(void *bar0, uint irq)
{
- sdioh_info_t *sd;
+ struct sdioh_info *sd;
int err_ret;
sd_trace(("%s\n", __func__));
@@ -126,19 +241,18 @@ sdioh_info_t *sdioh_attach(void *bar0, uint irq)
return NULL;
}
- sd = kzalloc(sizeof(sdioh_info_t), GFP_ATOMIC);
+ sd = kzalloc(sizeof(struct sdioh_info), GFP_ATOMIC);
if (sd == NULL) {
sd_err(("sdioh_attach: out of memory\n"));
return NULL;
}
- if (sdioh_sdmmc_osinit(sd) != 0) {
+ if (brcmf_sdioh_osinit(sd) != 0) {
sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __func__));
kfree(sd);
return NULL;
}
sd->num_funcs = 2;
- sd->sd_blockmode = true;
sd->use_client_ints = true;
sd->client_block_size[0] = 64;
@@ -150,7 +264,7 @@ sdioh_info_t *sdioh_attach(void *bar0, uint irq)
sd->client_block_size[1] = 64;
err_ret = sdio_set_block_size(gInstance->func[1], 64);
if (err_ret)
- sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+ sd_err(("brcmf_sdioh_attach: Failed to set F1 blocksize\n"));
/* Release host controller F1 */
sdio_release_host(gInstance->func[1]);
@@ -163,20 +277,20 @@ sdioh_info_t *sdioh_attach(void *bar0, uint irq)
err_ret =
sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
if (err_ret)
- sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize "
- "to %d\n", sd_f2_blocksize));
+ sd_err(("brcmf_sdioh_attach: Failed to set F2 blocksize"
+ " to %d\n", sd_f2_blocksize));
/* Release host controller F2 */
sdio_release_host(gInstance->func[2]);
}
- sdioh_sdmmc_card_enablefuncs(sd);
+ brcmf_sdioh_enablefuncs(sd);
sd_trace(("%s: Done\n", __func__));
return sd;
}
-extern SDIOH_API_RC sdioh_detach(sdioh_info_t *sd)
+extern int brcmf_sdioh_detach(struct sdioh_info *sd)
{
sd_trace(("%s\n", __func__));
@@ -193,92 +307,25 @@ extern SDIOH_API_RC sdioh_detach(sdioh_info_t *sd)
sdio_release_host(gInstance->func[1]);
/* deregister irq */
- sdioh_sdmmc_osfree(sd);
+ brcmf_sdioh_osfree(sd);
kfree(sd);
}
- return SDIOH_API_RC_SUCCESS;
-}
-
-#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
-
-extern SDIOH_API_RC sdioh_enable_func_intr(void)
-{
- u8 reg;
- int err;
-
- if (gInstance->func[0]) {
- sdio_claim_host(gInstance->func[0]);
-
- reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
- if (err) {
- sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n",
- __func__, err));
- sdio_release_host(gInstance->func[0]);
- return SDIOH_API_RC_FAIL;
- }
-
- /* Enable F1 and F2 interrupts, set master enable */
- reg |=
- (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN |
- INTR_CTL_MASTER_EN);
-
- sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
- sdio_release_host(gInstance->func[0]);
-
- if (err) {
- sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n",
- __func__, err));
- return SDIOH_API_RC_FAIL;
- }
- }
-
- return SDIOH_API_RC_SUCCESS;
-}
-
-extern SDIOH_API_RC sdioh_disable_func_intr(void)
-{
- u8 reg;
- int err;
-
- if (gInstance->func[0]) {
- sdio_claim_host(gInstance->func[0]);
- reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
- if (err) {
- sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n",
- __func__, err));
- sdio_release_host(gInstance->func[0]);
- return SDIOH_API_RC_FAIL;
- }
-
- reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
- /* Disable master interrupt with the last function interrupt */
- if (!(reg & 0xFE))
- reg = 0;
- sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
-
- sdio_release_host(gInstance->func[0]);
- if (err) {
- sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n",
- __func__, err));
- return SDIOH_API_RC_FAIL;
- }
- }
- return SDIOH_API_RC_SUCCESS;
+ return 0;
}
-#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
/* Configure callback to client when we receive client interrupt */
-extern SDIOH_API_RC
-sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
+extern int
+brcmf_sdioh_interrupt_register(struct sdioh_info *sd, void (*fn)(void *),
+ void *argh)
{
sd_trace(("%s: Entering\n", __func__));
if (fn == NULL) {
sd_err(("%s: interrupt handler is NULL, not registering\n",
__func__));
- return SDIOH_API_RC_FAIL;
+ return -EINVAL;
}
-#if !defined(OOB_INTR_ONLY)
+
sd->intr_handler = fn;
sd->intr_handler_arg = argh;
sd->intr_handler_valid = true;
@@ -286,26 +333,23 @@ sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
/* register and unmask irq */
if (gInstance->func[2]) {
sdio_claim_host(gInstance->func[2]);
- sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
+ sdio_claim_irq(gInstance->func[2], brcmf_sdioh_irqhandler_f2);
sdio_release_host(gInstance->func[2]);
}
if (gInstance->func[1]) {
sdio_claim_host(gInstance->func[1]);
- sdio_claim_irq(gInstance->func[1], IRQHandler);
+ sdio_claim_irq(gInstance->func[1], brcmf_sdioh_irqhandler);
sdio_release_host(gInstance->func[1]);
}
-#elif defined(HW_OOB)
- sdioh_enable_func_intr();
-#endif /* defined(OOB_INTR_ONLY) */
- return SDIOH_API_RC_SUCCESS;
+
+ return 0;
}
-extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *sd)
+extern int brcmf_sdioh_interrupt_deregister(struct sdioh_info *sd)
{
sd_trace(("%s: Entering\n", __func__));
-#if !defined(OOB_INTR_ONLY)
if (gInstance->func[1]) {
/* register and unmask irq */
sdio_claim_host(gInstance->func[1]);
@@ -324,73 +368,28 @@ extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *sd)
sd->intr_handler_valid = false;
sd->intr_handler = NULL;
sd->intr_handler_arg = NULL;
-#elif defined(HW_OOB)
- sdioh_disable_func_intr();
-#endif /* !defined(OOB_INTR_ONLY) */
- return SDIOH_API_RC_SUCCESS;
-}
-
-extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
-{
- sd_trace(("%s: Entering\n", __func__));
- *onoff = sd->client_intr_enabled;
- return SDIOH_API_RC_SUCCESS;
-}
-#if defined(DHD_DEBUG)
-extern bool sdioh_interrupt_pending(sdioh_info_t *sd)
-{
return 0;
}
-#endif
-
-uint sdioh_query_iofnum(sdioh_info_t *sd)
-{
- return sd->num_funcs;
-}
/* IOVar table */
enum {
IOV_MSGLEVEL = 1,
- IOV_BLOCKMODE,
IOV_BLOCKSIZE,
- IOV_DMA,
IOV_USEINTS,
IOV_NUMINTS,
- IOV_NUMLOCALINTS,
- IOV_HOSTREG,
IOV_DEVREG,
- IOV_DIVISOR,
- IOV_SDMODE,
- IOV_HISPEED,
IOV_HCIREGS,
- IOV_POWER,
- IOV_CLOCK,
IOV_RXCHAIN
};
-const bcm_iovar_t sdioh_iovars[] = {
+const struct brcmu_iovar sdioh_iovars[] = {
{"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0},
- {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0},
{"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0},/* ((fn << 16) |
size) */
- {"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0},
{"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0},
{"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0},
- {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0},
- {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t)}
- ,
- {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t)}
- ,
- {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0}
- ,
- {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0}
- ,
- {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0}
- ,
- {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}
- ,
- {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0}
+ {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(struct brcmf_sdreg)}
,
{"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0}
,
@@ -398,33 +397,37 @@ const bcm_iovar_t sdioh_iovars[] = {
};
int
-sdioh_iovar_op(sdioh_info_t *si, const char *name,
- void *params, int plen, void *arg, int len, bool set)
+brcmf_sdioh_iovar_op(struct sdioh_info *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
{
- const bcm_iovar_t *vi = NULL;
+ const struct brcmu_iovar *vi = NULL;
int bcmerror = 0;
int val_size;
s32 int_val = 0;
bool bool_val;
u32 actionid;
- ASSERT(name);
- ASSERT(len >= 0);
+ if (name == NULL || len <= 0)
+ return -EINVAL;
+
+ /* Set does not take qualifiers */
+ if (set && (params || plen))
+ return -EINVAL;
- /* Get must have return space; Set does not take qualifiers */
- ASSERT(set || (arg && len));
- ASSERT(!set || (!params && !plen));
+ /* Get must have return space;*/
+ if (!set && !(arg && len))
+ return -EINVAL;
sd_trace(("%s: Enter (%s %s)\n", __func__, (set ? "set" : "get"),
name));
- vi = bcm_iovar_lookup(sdioh_iovars, name);
+ vi = brcmu_iovar_lookup(sdioh_iovars, name);
if (vi == NULL) {
bcmerror = -ENOTSUPP;
goto exit;
}
- bcmerror = bcm_iovar_lencheck(vi, arg, len, set);
+ bcmerror = brcmu_iovar_lencheck(vi, arg, len, set);
if (bcmerror != 0)
goto exit;
@@ -457,16 +460,6 @@ sdioh_iovar_op(sdioh_info_t *si, const char *name,
sd_msglevel = int_val;
break;
- case IOV_GVAL(IOV_BLOCKMODE):
- int_val = (s32) si->sd_blockmode;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_BLOCKMODE):
- si->sd_blockmode = (bool) int_val;
- /* Haven't figured out how to make non-block mode with DMA */
- break;
-
case IOV_GVAL(IOV_BLOCKSIZE):
if ((u32) int_val > si->num_funcs) {
bcmerror = -EINVAL;
@@ -518,15 +511,6 @@ sdioh_iovar_op(sdioh_info_t *si, const char *name,
memcpy(arg, &int_val, val_size);
break;
- case IOV_GVAL(IOV_DMA):
- int_val = (s32) si->sd_use_dma;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_DMA):
- si->sd_use_dma = (bool) int_val;
- break;
-
case IOV_GVAL(IOV_USEINTS):
int_val = (s32) si->use_client_ints;
memcpy(arg, &int_val, val_size);
@@ -541,117 +525,18 @@ sdioh_iovar_op(sdioh_info_t *si, const char *name,
break;
- case IOV_GVAL(IOV_DIVISOR):
- int_val = (u32) sd_divisor;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_DIVISOR):
- sd_divisor = int_val;
- break;
-
- case IOV_GVAL(IOV_POWER):
- int_val = (u32) sd_power;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_POWER):
- sd_power = int_val;
- break;
-
- case IOV_GVAL(IOV_CLOCK):
- int_val = (u32) sd_clock;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_CLOCK):
- sd_clock = int_val;
- break;
-
- case IOV_GVAL(IOV_SDMODE):
- int_val = (u32) sd_sdmode;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_SDMODE):
- sd_sdmode = int_val;
- break;
-
- case IOV_GVAL(IOV_HISPEED):
- int_val = (u32) sd_hiok;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_HISPEED):
- sd_hiok = int_val;
- break;
-
case IOV_GVAL(IOV_NUMINTS):
int_val = (s32) si->intrcount;
memcpy(arg, &int_val, val_size);
break;
- case IOV_GVAL(IOV_NUMLOCALINTS):
- int_val = (s32) 0;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_GVAL(IOV_HOSTREG):
- {
- sdreg_t *sd_ptr = (sdreg_t *) params;
-
- if (sd_ptr->offset < SD_SysAddr
- || sd_ptr->offset > SD_MaxCurCap) {
- sd_err(("%s: bad offset 0x%x\n", __func__,
- sd_ptr->offset));
- bcmerror = -EINVAL;
- break;
- }
-
- sd_trace(("%s: rreg%d at offset %d\n", __func__,
- (sd_ptr->offset & 1) ? 8
- : ((sd_ptr->offset & 2) ? 16 : 32),
- sd_ptr->offset));
- if (sd_ptr->offset & 1)
- int_val = 8; /* sdioh_sdmmc_rreg8(si,
- sd_ptr->offset); */
- else if (sd_ptr->offset & 2)
- int_val = 16; /* sdioh_sdmmc_rreg16(si,
- sd_ptr->offset); */
- else
- int_val = 32; /* sdioh_sdmmc_rreg(si,
- sd_ptr->offset); */
-
- memcpy(arg, &int_val, sizeof(int_val));
- break;
- }
-
- case IOV_SVAL(IOV_HOSTREG):
- {
- sdreg_t *sd_ptr = (sdreg_t *) params;
-
- if (sd_ptr->offset < SD_SysAddr
- || sd_ptr->offset > SD_MaxCurCap) {
- sd_err(("%s: bad offset 0x%x\n", __func__,
- sd_ptr->offset));
- bcmerror = -EINVAL;
- break;
- }
-
- sd_trace(("%s: wreg%d value 0x%08x at offset %d\n",
- __func__, sd_ptr->value,
- (sd_ptr->offset & 1) ? 8
- : ((sd_ptr->offset & 2) ? 16 : 32),
- sd_ptr->offset));
- break;
- }
-
case IOV_GVAL(IOV_DEVREG):
{
- sdreg_t *sd_ptr = (sdreg_t *) params;
+ struct brcmf_sdreg *sd_ptr =
+ (struct brcmf_sdreg *) params;
u8 data = 0;
- if (sdioh_cfg_read
+ if (brcmf_sdioh_cfg_read
(si, sd_ptr->func, sd_ptr->offset, &data)) {
bcmerror = -EIO;
break;
@@ -664,10 +549,11 @@ sdioh_iovar_op(sdioh_info_t *si, const char *name,
case IOV_SVAL(IOV_DEVREG):
{
- sdreg_t *sd_ptr = (sdreg_t *) params;
+ struct brcmf_sdreg *sd_ptr =
+ (struct brcmf_sdreg *) params;
u8 data = (u8) sd_ptr->value;
- if (sdioh_cfg_write
+ if (brcmf_sdioh_cfg_write
(si, sd_ptr->func, sd_ptr->offset, &data)) {
bcmerror = -EIO;
break;
@@ -684,50 +570,32 @@ exit:
return bcmerror;
}
-#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
-
-SDIOH_API_RC sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
-{
- SDIOH_API_RC status;
- u8 data;
-
- if (enable)
- data = 3; /* enable hw oob interrupt */
- else
- data = 4; /* disable hw oob interrupt */
- data |= 4; /* Active HIGH */
-
- status = sdioh_request_byte(sd, SDIOH_WRITE, 0, 0xf2, &data);
- return status;
-}
-#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
-
-extern SDIOH_API_RC
-sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, u32 addr, u8 *data)
+extern int
+brcmf_sdioh_cfg_read(struct sdioh_info *sd, uint fnc_num, u32 addr, u8 *data)
{
- SDIOH_API_RC status;
- /* No lock needed since sdioh_request_byte does locking */
- status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
+ int status;
+ /* No lock needed since brcmf_sdioh_request_byte does locking */
+ status = brcmf_sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
return status;
}
-extern SDIOH_API_RC
-sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, u32 addr, u8 *data)
+extern int
+brcmf_sdioh_cfg_write(struct sdioh_info *sd, uint fnc_num, u32 addr, u8 *data)
{
- /* No lock needed since sdioh_request_byte does locking */
- SDIOH_API_RC status;
- status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
+ /* No lock needed since brcmf_sdioh_request_byte does locking */
+ int status;
+ status = brcmf_sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
return status;
}
-static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, u32 regaddr)
+static int brcmf_sdioh_get_cisaddr(struct sdioh_info *sd, u32 regaddr)
{
/* read 24 bits and return valid 17 bit addr */
int i;
u32 scratch, regdata;
u8 *ptr = (u8 *)&scratch;
for (i = 0; i < 3; i++) {
- if ((sdioh_sdmmc_card_regread(sd, 0, regaddr, 1, &regdata)) !=
+ if ((brcmf_sdioh_card_regread(sd, 0, regaddr, 1, &regdata)) !=
SUCCESS)
sd_err(("%s: Can't read!\n", __func__));
@@ -741,8 +609,8 @@ static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, u32 regaddr)
return scratch;
}
-extern SDIOH_API_RC
-sdioh_cis_read(sdioh_info_t *sd, uint func, u8 *cisd, u32 length)
+extern int
+brcmf_sdioh_cis_read(struct sdioh_info *sd, uint func, u8 *cisd, u32 length)
{
u32 count;
int offset;
@@ -754,7 +622,7 @@ sdioh_cis_read(sdioh_info_t *sd, uint func, u8 *cisd, u32 length)
if (!sd->func_cis_ptr[func]) {
memset(cis, 0, length);
sd_err(("%s: no func_cis_ptr[%d]\n", __func__, func));
- return SDIOH_API_RC_FAIL;
+ return -ENOTSUPP;
}
sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __func__, func,
@@ -762,37 +630,37 @@ sdioh_cis_read(sdioh_info_t *sd, uint func, u8 *cisd, u32 length)
for (count = 0; count < length; count++) {
offset = sd->func_cis_ptr[func] + count;
- if (sdioh_sdmmc_card_regread(sd, 0, offset, 1, &foo) < 0) {
+ if (brcmf_sdioh_card_regread(sd, 0, offset, 1, &foo) < 0) {
sd_err(("%s: regread failed: Can't read CIS\n",
__func__));
- return SDIOH_API_RC_FAIL;
+ return -EIO;
}
*cis = (u8) (foo & 0xff);
cis++;
}
- return SDIOH_API_RC_SUCCESS;
+ return 0;
}
-extern SDIOH_API_RC
-sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr,
- u8 *byte)
+extern int
+brcmf_sdioh_request_byte(struct sdioh_info *sd, uint rw, uint func,
+ uint regaddr, u8 *byte)
{
int err_ret;
sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __func__, rw, func,
regaddr));
- DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
- DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ BRCMF_PM_RESUME_WAIT(sdioh_request_byte_wait);
+ BRCMF_PM_RESUME_RETURN_ERROR(-EIO);
if (rw) { /* CMD52 Write */
if (func == 0) {
/* Can only directly write to some F0 registers.
* Handle F2 enable
* as a special case.
*/
- if (regaddr == SDIOD_CCCR_IOEN) {
+ if (regaddr == SDIO_CCCR_IOEx) {
if (gInstance->func[2]) {
sdio_claim_host(gInstance->func[2]);
if (*byte & SDIO_FUNC_ENABLE_2) {
@@ -801,7 +669,9 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr,
sdio_enable_func
(gInstance->func[2]);
if (err_ret)
- sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
+ sd_err(("request_byte: "
+ "enable F2 "
+ "failed:%d\n",
err_ret));
} else {
/* Disable Function 2 */
@@ -809,15 +679,16 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr,
sdio_disable_func
(gInstance->func[2]);
if (err_ret)
- sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
+ sd_err(("request_byte: "
+ "Disab F2 "
+ "failed:%d\n",
err_ret));
}
sdio_release_host(gInstance->func[2]);
}
}
-#if defined(MMC_SDIO_ABORT)
/* to allow abort command through F1 */
- else if (regaddr == SDIOD_CCCR_IOABORT) {
+ else if (regaddr == SDIO_CCCR_ABORT) {
sdio_claim_host(gInstance->func[func]);
/*
* this sdio_f0_writeb() can be replaced
@@ -828,10 +699,8 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr,
sdio_writeb(gInstance->func[func], *byte,
regaddr, &err_ret);
sdio_release_host(gInstance->func[func]);
- }
-#endif /* MMC_SDIO_ABORT */
- else if (regaddr < 0xF0) {
- sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write "
+ } else if (regaddr < 0xF0) {
+ sd_err(("brcmf: F0 Wr:0x%02x: write "
"disallowed\n", regaddr));
} else {
/* Claim host controller, perform F0 write,
@@ -867,29 +736,29 @@ sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr,
}
if (err_ret)
- sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, "
+ sd_err(("brcmf: Failed to %s byte F%d:@0x%05x=%02x, "
"Err: %d\n", rw ? "Write" : "Read", func, regaddr,
*byte, err_ret));
- return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+ return err_ret;
}
-extern SDIOH_API_RC
-sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func,
- uint addr, u32 *word, uint nbytes)
+extern int
+brcmf_sdioh_request_word(struct sdioh_info *sd, uint cmd_type, uint rw,
+ uint func, uint addr, u32 *word, uint nbytes)
{
- int err_ret = SDIOH_API_RC_FAIL;
+ int err_ret = -EIO;
if (func == 0) {
sd_err(("%s: Only CMD52 allowed to F0.\n", __func__));
- return SDIOH_API_RC_FAIL;
+ return -EINVAL;
}
sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
__func__, cmd_type, rw, func, addr, nbytes));
- DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
- DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ BRCMF_PM_RESUME_WAIT(sdioh_request_word_wait);
+ BRCMF_PM_RESUME_RETURN_ERROR(-EIO);
/* Claim host controller */
sdio_claim_host(gInstance->func[func]);
@@ -920,16 +789,16 @@ sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func,
sdio_release_host(gInstance->func[func]);
if (err_ret) {
- sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
+ sd_err(("brcmf: Failed to %s word, Err: 0x%08x\n",
rw ? "Write" : "Read", err_ret));
}
- return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+ return err_ret;
}
-static SDIOH_API_RC
-sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
- uint addr, struct sk_buff *pkt)
+static int
+brcmf_sdioh_request_packet(struct sdioh_info *sd, uint fix_inc, uint write,
+ uint func, uint addr, struct sk_buff *pkt)
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
u32 SGCount = 0;
@@ -939,9 +808,8 @@ sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
sd_trace(("%s: Enter\n", __func__));
- ASSERT(pkt);
- DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
- DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ BRCMF_PM_RESUME_WAIT(sdioh_request_packet_wait);
+ BRCMF_PM_RESUME_RETURN_ERROR(-EIO);
/* Claim host controller */
sdio_claim_host(gInstance->func[func]);
@@ -950,21 +818,6 @@ sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
pkt_len += 3;
pkt_len &= 0xFFFFFFFC;
-#ifdef CONFIG_MMC_MSM7X00A
- if ((pkt_len % 64) == 32) {
- sd_trace(("%s: Rounding up TX packet +=32\n",
- __func__));
- pkt_len += 32;
- }
-#endif /* CONFIG_MMC_MSM7X00A */
- /* Make sure the packet is aligned properly.
- * If it isn't, then this
- * is the fault of sdioh_request_buffer() which
- * is supposed to give
- * us something we can work with.
- */
- ASSERT(((u32) (pkt->data) & DMA_ALIGN_MASK) == 0);
-
if ((write) && (!fifo)) {
err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
((u8 *) (pnext->data)),
@@ -1005,7 +858,7 @@ sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
sdio_release_host(gInstance->func[func]);
sd_trace(("%s: Exit\n", __func__));
- return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
+ return err_ret;
}
/*
@@ -1023,114 +876,101 @@ sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
* aligned packet.
*
*/
-extern SDIOH_API_RC
-sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write,
- uint func, uint addr, uint reg_width, uint buflen_u,
- u8 *buffer, struct sk_buff *pkt)
+extern int
+brcmf_sdioh_request_buffer(struct sdioh_info *sd, uint pio_dma, uint fix_inc,
+ uint write, uint func, uint addr, uint reg_width,
+ uint buflen_u, u8 *buffer, struct sk_buff *pkt)
{
- SDIOH_API_RC Status;
+ int Status;
struct sk_buff *mypkt = NULL;
sd_trace(("%s: Enter\n", __func__));
- DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
- DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
+ BRCMF_PM_RESUME_WAIT(sdioh_request_buffer_wait);
+ BRCMF_PM_RESUME_RETURN_ERROR(-EIO);
/* Case 1: we don't have a packet. */
if (pkt == NULL) {
sd_data(("%s: Creating new %s Packet, len=%d\n",
__func__, write ? "TX" : "RX", buflen_u));
- mypkt = bcm_pkt_buf_get_skb(buflen_u);
+ mypkt = brcmu_pkt_buf_get_skb(buflen_u);
if (!mypkt) {
- sd_err(("%s: bcm_pkt_buf_get_skb failed: len %d\n",
+ sd_err(("%s: brcmu_pkt_buf_get_skb failed: len %d\n",
__func__, buflen_u));
- return SDIOH_API_RC_FAIL;
+ return -EIO;
}
/* For a write, copy the buffer data into the packet. */
if (write)
memcpy(mypkt->data, buffer, buflen_u);
- Status =
- sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+ Status = brcmf_sdioh_request_packet(sd, fix_inc, write, func,
+ addr, mypkt);
/* For a read, copy the packet data back to the buffer. */
if (!write)
memcpy(buffer, mypkt->data, buflen_u);
- bcm_pkt_buf_free_skb(mypkt);
- } else if (((u32) (pkt->data) & DMA_ALIGN_MASK) != 0) {
- /* Case 2: We have a packet, but it is unaligned. */
-
- /* In this case, we cannot have a chain. */
- ASSERT(pkt->next == NULL);
-
+ brcmu_pkt_buf_free_skb(mypkt);
+ } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
+ /*
+ * Case 2: We have a packet, but it is unaligned.
+ * In this case, we cannot have a chain (pkt->next == NULL)
+ */
sd_data(("%s: Creating aligned %s Packet, len=%d\n",
__func__, write ? "TX" : "RX", pkt->len));
- mypkt = bcm_pkt_buf_get_skb(pkt->len);
+ mypkt = brcmu_pkt_buf_get_skb(pkt->len);
if (!mypkt) {
- sd_err(("%s: bcm_pkt_buf_get_skb failed: len %d\n",
+ sd_err(("%s: brcmu_pkt_buf_get_skb failed: len %d\n",
__func__, pkt->len));
- return SDIOH_API_RC_FAIL;
+ return -EIO;
}
/* For a write, copy the buffer data into the packet. */
if (write)
memcpy(mypkt->data, pkt->data, pkt->len);
- Status =
- sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
+ Status = brcmf_sdioh_request_packet(sd, fix_inc, write, func,
+ addr, mypkt);
/* For a read, copy the packet data back to the buffer. */
if (!write)
memcpy(pkt->data, mypkt->data, mypkt->len);
- bcm_pkt_buf_free_skb(mypkt);
+ brcmu_pkt_buf_free_skb(mypkt);
} else { /* case 3: We have a packet and
it is aligned. */
sd_data(("%s: Aligned %s Packet, direct DMA\n",
__func__, write ? "Tx" : "Rx"));
- Status =
- sdioh_request_packet(sd, fix_inc, write, func, addr, pkt);
+ Status = brcmf_sdioh_request_packet(sd, fix_inc, write, func,
+ addr, pkt);
}
return Status;
}
/* this function performs "abort" for both of host & device */
-extern int sdioh_abort(sdioh_info_t *sd, uint func)
+extern int brcmf_sdioh_abort(struct sdioh_info *sd, uint func)
{
-#if defined(MMC_SDIO_ABORT)
char t_func = (char)func;
-#endif /* defined(MMC_SDIO_ABORT) */
sd_trace(("%s: Enter\n", __func__));
-#if defined(MMC_SDIO_ABORT)
- /* issue abort cmd52 command through F1 */
- sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT,
+ /* issue abort cmd52 command through F0 */
+ brcmf_sdioh_request_byte(sd, SDIOH_WRITE, SDIO_FUNC_0, SDIO_CCCR_ABORT,
&t_func);
-#endif /* defined(MMC_SDIO_ABORT) */
-
- sd_trace(("%s: Exit\n", __func__));
- return SDIOH_API_RC_SUCCESS;
-}
-/* Reset and re-initialize the device */
-int sdioh_sdio_reset(sdioh_info_t *si)
-{
- sd_trace(("%s: Enter\n", __func__));
sd_trace(("%s: Exit\n", __func__));
- return SDIOH_API_RC_SUCCESS;
+ return 0;
}
/* Disable device interrupt */
-void sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
+void brcmf_sdioh_dev_intr_off(struct sdioh_info *sd)
{
sd_trace(("%s: %d\n", __func__, sd->use_client_ints));
sd->intmask &= ~CLIENT_INTR;
}
/* Enable device interrupt */
-void sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
+void brcmf_sdioh_dev_intr_on(struct sdioh_info *sd)
{
sd_trace(("%s: %d\n", __func__, sd->use_client_ints));
sd->intmask |= CLIENT_INTR;
@@ -1138,19 +978,19 @@ void sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
/* Read client card reg */
int
-sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, u32 regaddr,
+brcmf_sdioh_card_regread(struct sdioh_info *sd, int func, u32 regaddr,
int regsize, u32 *data)
{
if ((func == 0) || (regsize == 1)) {
u8 temp = 0;
- sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
+ brcmf_sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
*data = temp;
*data &= 0xff;
sd_data(("%s: byte read data=0x%02x\n", __func__, *data));
} else {
- sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data,
+ brcmf_sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data,
regsize);
if (regsize == 2)
*data &= 0xffff;
@@ -1161,25 +1001,20 @@ sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, u32 regaddr,
return SUCCESS;
}
-#if !defined(OOB_INTR_ONLY)
-/* bcmsdh_sdmmc interrupt handler */
-static void IRQHandler(struct sdio_func *func)
+static void brcmf_sdioh_irqhandler(struct sdio_func *func)
{
- sdioh_info_t *sd;
+ struct sdioh_info *sd;
- sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
+ sd_trace(("brcmf: ***IRQHandler\n"));
sd = gInstance->sd;
- ASSERT(sd != NULL);
sdio_release_host(gInstance->func[0]);
if (sd->use_client_ints) {
sd->intrcount++;
- ASSERT(sd->intr_handler);
- ASSERT(sd->intr_handler_arg);
(sd->intr_handler) (sd->intr_handler_arg);
} else {
- sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
+ sd_err(("brcmf: ***IRQHandler\n"));
sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
__func__, sd->client_intr_enabled, sd->intr_handler));
@@ -1188,52 +1023,174 @@ static void IRQHandler(struct sdio_func *func)
sdio_claim_host(gInstance->func[0]);
}
-/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
-static void IRQHandlerF2(struct sdio_func *func)
+/* interrupt handler for F2 (dummy handler) */
+static void brcmf_sdioh_irqhandler_f2(struct sdio_func *func)
{
- sdioh_info_t *sd;
+ struct sdioh_info *sd;
- sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
+ sd_trace(("brcmf: ***IRQHandlerF2\n"));
sd = gInstance->sd;
+}
+
+static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret = 0;
+ static struct sdio_func sdio_func_0;
+ sd_trace(("sdio_probe: %s Enter\n", __func__));
+ sd_trace(("sdio_probe: func->class=%x\n", func->class));
+ sd_trace(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_trace(("sdio_device: 0x%04x\n", func->device));
+ sd_trace(("Function#: 0x%04x\n", func->num));
+
+ if (func->num == 1) {
+ sdio_func_0.num = 0;
+ sdio_func_0.card = func->card;
+ gInstance->func[0] = &sdio_func_0;
+ if (func->device == 0x4) { /* 4318 */
+ gInstance->func[2] = NULL;
+ sd_trace(("NIC found, calling brcmf_sdio_probe...\n"));
+ ret = brcmf_sdio_probe(&sdmmc_dev);
+ }
+ }
- ASSERT(sd != NULL);
+ gInstance->func[func->num] = func;
+
+ if (func->num == 2) {
+ brcmf_cfg80211_sdio_func(func);
+ sd_trace(("F2 found, calling brcmf_sdio_probe...\n"));
+ ret = brcmf_sdio_probe(&sdmmc_dev);
+ }
+
+ return ret;
}
-#endif /* !defined(OOB_INTR_ONLY) */
-#ifdef NOTUSED
-/* Write client card reg */
-static int
-sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, u32 regaddr,
- int regsize, u32 data)
+static void brcmf_ops_sdio_remove(struct sdio_func *func)
{
+ sd_trace(("%s Enter\n", __func__));
+ sd_info(("func->class=%x\n", func->class));
+ sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_info(("sdio_device: 0x%04x\n", func->device));
+ sd_info(("Function#: 0x%04x\n", func->num));
+
+ if (func->num == 2) {
+ sd_trace(("F2 found, calling brcmf_sdio_remove...\n"));
+ brcmf_sdio_remove(&sdmmc_dev);
+ }
+}
- if ((func == 0) || (regsize == 1)) {
- u8 temp;
- temp = data & 0xff;
- sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
- sd_data(("%s: byte write data=0x%02x\n", __func__, data));
- } else {
- if (regsize == 2)
- data &= 0xffff;
+#ifdef CONFIG_PM
+static int brcmf_sdio_suspend(struct device *dev)
+{
+ mmc_pm_flag_t sdio_flags;
+ int ret = 0;
- sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data,
- regsize);
+ sd_trace(("%s\n", __func__));
- sd_data(("%s: word write data=0x%08x\n", __func__, data));
+ sdio_flags = sdio_get_host_pm_caps(gInstance->func[1]);
+ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
+ sd_err(("Host can't keep power while suspended\n"));
+ return -EINVAL;
}
- return SUCCESS;
+ ret = sdio_set_host_pm_flags(gInstance->func[1], MMC_PM_KEEP_POWER);
+ if (ret) {
+ sd_err(("Failed to set pm_flags\n"));
+ return ret;
+ }
+
+ brcmf_sdio_wdtmr_enable(false);
+
+ return ret;
+}
+
+static int brcmf_sdio_resume(struct device *dev)
+{
+ brcmf_sdio_wdtmr_enable(true);
+ return 0;
}
-#endif /* NOTUSED */
+#endif /* CONFIG_PM */
-int sdioh_start(sdioh_info_t *si, int stage)
+int brcmf_sdioh_osinit(struct sdioh_info *sd)
{
+ struct sdos_info *sdos;
+
+ sdos = kmalloc(sizeof(struct sdos_info), GFP_ATOMIC);
+ sd->sdos_info = (void *)sdos;
+ if (sdos == NULL)
+ return -ENOMEM;
+
+ sdos->sd = sd;
+ spin_lock_init(&sdos->lock);
return 0;
}
-int sdioh_stop(sdioh_info_t *si)
+void brcmf_sdioh_osfree(struct sdioh_info *sd)
+{
+ struct sdos_info *sdos;
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+ kfree(sdos);
+}
+
+/* Interrupt enable/disable */
+int brcmf_sdioh_interrupt_set(struct sdioh_info *sd, bool enable)
{
+ unsigned long flags;
+ struct sdos_info *sdos;
+
+ sd_trace(("%s: %s\n", __func__, enable ? "Enabling" : "Disabling"));
+
+ sdos = (struct sdos_info *)sd->sdos_info;
+
+ if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
+ sd_err(("%s: no handler registered, will not enable\n",
+ __func__));
+ return -EINVAL;
+ }
+
+ /* Ensure atomicity for enable/disable calls */
+ spin_lock_irqsave(&sdos->lock, flags);
+
+ sd->client_intr_enabled = enable;
+ if (enable)
+ brcmf_sdioh_dev_intr_on(sd);
+ else
+ brcmf_sdioh_dev_intr_off(sd);
+
+ spin_unlock_irqrestore(&sdos->lock, flags);
+
return 0;
}
+
+/*
+ * module init
+*/
+int brcmf_sdio_function_init(void)
+{
+ int error = 0;
+ sd_trace(("brcmf_sdio_function_init: %s Enter\n", __func__));
+
+ gInstance = kzalloc(sizeof(struct brcmf_sdmmc_instance), GFP_KERNEL);
+ if (!gInstance)
+ return -ENOMEM;
+
+ memset(&sdmmc_dev, 0, sizeof(sdmmc_dev));
+ error = sdio_register_driver(&brcmf_sdmmc_driver);
+
+ return error;
+}
+
+/*
+ * module cleanup
+*/
+void brcmf_sdio_function_cleanup(void)
+{
+ sd_trace(("%s Enter\n", __func__));
+
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
+
+ kfree(gInstance);
+}
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.h b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.h
deleted file mode 100644
index 3ef42b31849..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __BCMSDH_SDMMC_H__
-#define __BCMSDH_SDMMC_H__
-
-#ifdef BCMDBG
-#define sd_err(x) \
- do { \
- if ((sd_msglevel & SDH_ERROR_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#define sd_trace(x) \
- do { \
- if ((sd_msglevel & SDH_TRACE_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#define sd_info(x) \
- do { \
- if ((sd_msglevel & SDH_INFO_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#define sd_debug(x) \
- do { \
- if ((sd_msglevel & SDH_DEBUG_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#define sd_data(x) \
- do { \
- if ((sd_msglevel & SDH_DATA_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#define sd_ctrl(x) \
- do { \
- if ((sd_msglevel & SDH_CTRL_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#else
-#define sd_err(x)
-#define sd_trace(x)
-#define sd_info(x)
-#define sd_debug(x)
-#define sd_data(x)
-#define sd_ctrl(x)
-#endif
-
-/* Allocate/init/free per-OS private data */
-extern int sdioh_sdmmc_osinit(sdioh_info_t *sd);
-extern void sdioh_sdmmc_osfree(sdioh_info_t *sd);
-
-#define BLOCK_SIZE_64 64
-#define BLOCK_SIZE_512 512
-#define BLOCK_SIZE_4318 64
-#define BLOCK_SIZE_4328 512
-
-/* internal return code */
-#define SUCCESS 0
-#define ERROR 1
-
-/* private bus modes */
-#define SDIOH_MODE_SD4 2
-#define CLIENT_INTR 0x100 /* Get rid of this! */
-
-struct sdioh_info {
- struct osl_info *osh; /* osh handler */
- bool client_intr_enabled; /* interrupt connnected flag */
- bool intr_handler_valid; /* client driver interrupt handler valid */
- sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
- void *intr_handler_arg; /* argument to call interrupt handler */
- u16 intmask; /* Current active interrupts */
- void *sdos_info; /* Pointer to per-OS private data */
-
- uint irq; /* Client irq */
- int intrcount; /* Client interrupts */
- bool sd_use_dma; /* DMA on CMD53 */
- bool sd_blockmode; /* sd_blockmode == false => 64 Byte Cmd 53s. */
- /* Must be on for sd_multiblock to be effective */
- bool use_client_ints; /* If this is false, make sure to restore */
- int sd_mode; /* SD1/SD4/SPI */
- int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
- u8 num_funcs; /* Supported funcs on client */
- u32 com_cis_ptr;
- u32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
- uint max_dma_len;
- uint max_dma_descriptors; /* DMA Descriptors supported by this controller. */
- /* SDDMA_DESCRIPTOR SGList[32]; *//* Scatter/Gather DMA List */
-};
-
-/************************************************************
- * Internal interfaces: per-port references into bcmsdh_sdmmc.c
- */
-
-/* Global message bits */
-extern uint sd_msglevel;
-
-/* OS-independent interrupt handler */
-extern bool check_client_intr(sdioh_info_t *sd);
-
-/* Core interrupt enable/disable of device interrupts */
-extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
-extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
-
-/**************************************************************
- * Internal interfaces: bcmsdh_sdmmc.c references to per-port code
- */
-
-/* Register mapping routines */
-extern u32 *sdioh_sdmmc_reg_map(s32 addr, int size);
-extern void sdioh_sdmmc_reg_unmap(s32 addr, int size);
-
-/* Interrupt (de)registration routines */
-extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
-extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
-
-typedef struct _BCMSDH_SDMMC_INSTANCE {
- sdioh_info_t *sd;
- struct sdio_func *func[SDIOD_MAX_IOFUNCS];
- u32 host_claimed;
-} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE;
-
-#endif /* __BCMSDH_SDMMC_H__ */
diff --git a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c b/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c
deleted file mode 100644
index 2792a4dfe65..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/bcmsdh_sdmmc_linux.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <linux/types.h>
-#include <linux/sched.h> /* request_irq() */
-#include <linux/netdevice.h>
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <sdio.h> /* SDIO Specs */
-#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
-#include <sdiovar.h> /* to get msglevel bit values */
-
-#include <linux/mmc/core.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-
-#include "dngl_stats.h"
-#include "dhd.h"
-
-#if !defined(SDIO_VENDOR_ID_BROADCOM)
-#define SDIO_VENDOR_ID_BROADCOM 0x02d0
-#endif /* !defined(SDIO_VENDOR_ID_BROADCOM) */
-
-#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000
-
-#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
-#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */
-#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
-#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
-#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493
-#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
-#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
-#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
-#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
-#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
-#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319
-#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
-
-#include <bcmsdh_sdmmc.h>
-
-#include <dhd_dbg.h>
-#include <wl_cfg80211.h>
-
-extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
-extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
-
-int sdio_function_init(void);
-void sdio_function_cleanup(void);
-
-/* module param defaults */
-static int clockoverride;
-
-module_param(clockoverride, int, 0644);
-MODULE_PARM_DESC(clockoverride, "SDIO card clock override");
-
-PBCMSDH_SDMMC_INSTANCE gInstance;
-
-/* Maximum number of bcmsdh_sdmmc devices supported by driver */
-#define BCMSDH_SDMMC_MAX_DEVICES 1
-
-extern int bcmsdh_probe(struct device *dev);
-extern int bcmsdh_remove(struct device *dev);
-struct device sdmmc_dev;
-
-static int bcmsdh_sdmmc_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- int ret = 0;
- static struct sdio_func sdio_func_0;
- sd_trace(("bcmsdh_sdmmc: %s Enter\n", __func__));
- sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class));
- sd_trace(("sdio_vendor: 0x%04x\n", func->vendor));
- sd_trace(("sdio_device: 0x%04x\n", func->device));
- sd_trace(("Function#: 0x%04x\n", func->num));
-
- if (func->num == 1) {
- sdio_func_0.num = 0;
- sdio_func_0.card = func->card;
- gInstance->func[0] = &sdio_func_0;
- if (func->device == 0x4) { /* 4318 */
- gInstance->func[2] = NULL;
- sd_trace(("NIC found, calling bcmsdh_probe...\n"));
- ret = bcmsdh_probe(&sdmmc_dev);
- }
- }
-
- gInstance->func[func->num] = func;
-
- if (func->num == 2) {
- wl_cfg80211_sdio_func(func);
- sd_trace(("F2 found, calling bcmsdh_probe...\n"));
- ret = bcmsdh_probe(&sdmmc_dev);
- }
-
- return ret;
-}
-
-static void bcmsdh_sdmmc_remove(struct sdio_func *func)
-{
- sd_trace(("bcmsdh_sdmmc: %s Enter\n", __func__));
- sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
- sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
- sd_info(("sdio_device: 0x%04x\n", func->device));
- sd_info(("Function#: 0x%04x\n", func->num));
-
- if (func->num == 2) {
- sd_trace(("F2 found, calling bcmsdh_remove...\n"));
- bcmsdh_remove(&sdmmc_dev);
- }
-}
-
-/* devices we support, null terminated */
-static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT)},
- {SDIO_DEVICE
- (SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
- {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319)},
- { /* end: all zeroes */ },
-};
-
-MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
-
-static struct sdio_driver bcmsdh_sdmmc_driver = {
- .probe = bcmsdh_sdmmc_probe,
- .remove = bcmsdh_sdmmc_remove,
- .name = "brcmfmac",
- .id_table = bcmsdh_sdmmc_ids,
-};
-
-struct sdos_info {
- sdioh_info_t *sd;
- spinlock_t lock;
-};
-
-int sdioh_sdmmc_osinit(sdioh_info_t *sd)
-{
- struct sdos_info *sdos;
-
- sdos = kmalloc(sizeof(struct sdos_info), GFP_ATOMIC);
- sd->sdos_info = (void *)sdos;
- if (sdos == NULL)
- return -ENOMEM;
-
- sdos->sd = sd;
- spin_lock_init(&sdos->lock);
- return 0;
-}
-
-void sdioh_sdmmc_osfree(sdioh_info_t *sd)
-{
- struct sdos_info *sdos;
- ASSERT(sd && sd->sdos_info);
-
- sdos = (struct sdos_info *)sd->sdos_info;
- kfree(sdos);
-}
-
-/* Interrupt enable/disable */
-SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
-{
- unsigned long flags;
- struct sdos_info *sdos;
-
- sd_trace(("%s: %s\n", __func__, enable ? "Enabling" : "Disabling"));
-
- sdos = (struct sdos_info *)sd->sdos_info;
- ASSERT(sdos);
-
-#if !defined(OOB_INTR_ONLY)
- if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
- sd_err(("%s: no handler registered, will not enable\n",
- __func__));
- return SDIOH_API_RC_FAIL;
- }
-#endif /* !defined(OOB_INTR_ONLY) */
-
- /* Ensure atomicity for enable/disable calls */
- spin_lock_irqsave(&sdos->lock, flags);
-
- sd->client_intr_enabled = enable;
- if (enable)
- sdioh_sdmmc_devintr_on(sd);
- else
- sdioh_sdmmc_devintr_off(sd);
-
- spin_unlock_irqrestore(&sdos->lock, flags);
-
- return SDIOH_API_RC_SUCCESS;
-}
-
-/*
- * module init
-*/
-int sdio_function_init(void)
-{
- int error = 0;
- sd_trace(("bcmsdh_sdmmc: %s Enter\n", __func__));
-
- gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL);
- if (!gInstance)
- return -ENOMEM;
-
- memset(&sdmmc_dev, 0, sizeof(sdmmc_dev));
- error = sdio_register_driver(&bcmsdh_sdmmc_driver);
-
- return error;
-}
-
-/*
- * module cleanup
-*/
-extern int bcmsdh_remove(struct device *dev);
-void sdio_function_cleanup(void)
-{
- sd_trace(("%s Enter\n", __func__));
-
- sdio_unregister_driver(&bcmsdh_sdmmc_driver);
-
- kfree(gInstance);
-}
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd.h b/drivers/staging/brcm80211/brcmfmac/dhd.h
index a726b493ea8..82bf04df16d 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd.h
+++ b/drivers/staging/brcm80211/brcmfmac/dhd.h
@@ -18,51 +18,539 @@
* Common types *
*/
-#ifndef _dhd_h_
-#define _dhd_h_
-
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/random.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/suspend.h>
-#include <asm/uaccess.h>
-#include <asm/unaligned.h>
-/* The kernel threading is sdio-specific */
-
-#include <wlioctl.h>
-
-/* Forward decls */
-struct dhd_bus;
-struct dhd_prot;
-struct dhd_info;
+#ifndef _BRCMF_H_
+#define _BRCMF_H_
+
+#define BRCMF_VERSION_STR "4.218.248.5"
+
+#define BRCMF_C_IOCTL_SMLEN 256 /* "small" ioctl buffer required */
+#define BRCMF_C_IOCTL_MEDLEN 1536 /* "med" ioctl buffer required */
+#define BRCMF_C_IOCTL_MAXLEN 8192
+
+#define BRCMF_C_UP 2
+#define BRCMF_C_SET_PROMISC 10
+#define BRCMF_C_GET_RATE 12
+#define BRCMF_C_GET_INFRA 19
+#define BRCMF_C_SET_INFRA 20
+#define BRCMF_C_GET_AUTH 21
+#define BRCMF_C_SET_AUTH 22
+#define BRCMF_C_GET_BSSID 23
+#define BRCMF_C_GET_SSID 25
+#define BRCMF_C_SET_SSID 26
+#define BRCMF_C_GET_CHANNEL 29
+#define BRCMF_C_GET_SRL 31
+#define BRCMF_C_GET_LRL 33
+#define BRCMF_C_GET_RADIO 37
+#define BRCMF_C_SET_RADIO 38
+#define BRCMF_C_GET_PHYTYPE 39
+#define BRCMF_C_SET_KEY 45
+#define BRCMF_C_SET_PASSIVE_SCAN 49
+#define BRCMF_C_SCAN 50
+#define BRCMF_C_SCAN_RESULTS 51
+#define BRCMF_C_DISASSOC 52
+#define BRCMF_C_REASSOC 53
+#define BRCMF_C_SET_ROAM_TRIGGER 55
+#define BRCMF_C_SET_ROAM_DELTA 57
+#define BRCMF_C_GET_DTIMPRD 77
+#define BRCMF_C_SET_COUNTRY 84
+#define BRCMF_C_GET_PM 85
+#define BRCMF_C_SET_PM 86
+#define BRCMF_C_GET_AP 117
+#define BRCMF_C_SET_AP 118
+#define BRCMF_C_GET_RSSI 127
+#define BRCMF_C_GET_WSEC 133
+#define BRCMF_C_SET_WSEC 134
+#define BRCMF_C_GET_PHY_NOISE 135
+#define BRCMF_C_GET_BSS_INFO 136
+#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
+#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
+#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
+#define BRCMF_C_GET_VALID_CHANNELS 217
+#define BRCMF_C_GET_KEY_PRIMARY 235
+#define BRCMF_C_SET_KEY_PRIMARY 236
+#define BRCMF_C_SET_SCAN_PASSIVE_TIME 258
+#define BRCMF_C_GET_VAR 262
+#define BRCMF_C_SET_VAR 263
+
+/* phy types (returned by WLC_GET_PHYTPE) */
+#define WLC_PHY_TYPE_A 0
+#define WLC_PHY_TYPE_B 1
+#define WLC_PHY_TYPE_G 2
+#define WLC_PHY_TYPE_N 4
+#define WLC_PHY_TYPE_LP 5
+#define WLC_PHY_TYPE_SSN 6
+#define WLC_PHY_TYPE_HT 7
+#define WLC_PHY_TYPE_LCN 8
+#define WLC_PHY_TYPE_NULL 0xf
+
+#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter, u)
+#define BRCMF_PKT_FILTER_PATTERN_FIXED_LEN \
+ offsetof(struct brcmf_pkt_filter_pattern, mask_and_pattern)
+
+#define BRCMF_EVENTING_MASK_LEN 16
+
+#define TOE_TX_CSUM_OL 0x00000001
+#define TOE_RX_CSUM_OL 0x00000002
+
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS 64
+
+#define BRCMF_BSS_INFO_VERSION 108 /* current ver of brcmf_bss_info struct */
+
+/* size of brcmf_scan_params not including variable length array */
+#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
+
+/* masks for channel and ssid count */
+#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define BRCMF_SCAN_ACTION_START 1
+#define BRCMF_SCAN_ACTION_CONTINUE 2
+#define WL_SCAN_ACTION_ABORT 3
+
+#define BRCMF_ISCAN_REQ_VERSION 1
+
+/* brcmf_iscan_results status values */
+#define BRCMF_SCAN_RESULTS_SUCCESS 0
+#define BRCMF_SCAN_RESULTS_PARTIAL 1
+#define BRCMF_SCAN_RESULTS_PENDING 2
+#define BRCMF_SCAN_RESULTS_ABORTED 3
+#define BRCMF_SCAN_RESULTS_NO_MEM 4
+
+#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */
+#define BRCMF_PRIMARY_KEY (1 << 1) /* primary (ie tx) key */
+#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */
+#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */
+#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */
+
+/* For supporting multiple interfaces */
+#define BRCMF_MAX_IFS 16
+#define BRCMF_DEL_IF -0xe
+#define BRCMF_BAD_IF -0xf
+
+#define DOT11_BSSTYPE_ANY 2
+#define DOT11_MAX_DEFAULT_KEYS 4
+
+#define BRCMF_EVENT_MSG_LINK 0x01
+#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
+#define BRCMF_EVENT_MSG_GROUP 0x04
+
+struct brcmf_event_msg {
+ u16 version;
+ u16 flags;
+ u32 event_type;
+ u32 status;
+ u32 reason;
+ u32 auth_type;
+ u32 datalen;
+ u8 addr[ETH_ALEN];
+ char ifname[IFNAMSIZ];
+} __packed;
+
+struct brcm_ethhdr {
+ u16 subtype;
+ u16 length;
+ u8 version;
+ u8 oui[3];
+ u16 usr_subtype;
+} __packed;
+
+struct brcmf_event {
+ struct ethhdr eth;
+ struct brcm_ethhdr hdr;
+ struct brcmf_event_msg msg;
+} __packed;
+
+struct dngl_stats {
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes; /* total bytes received */
+ unsigned long tx_bytes; /* total bytes transmitted */
+ unsigned long rx_errors; /* bad packets received */
+ unsigned long tx_errors; /* packet transmit problems */
+ unsigned long rx_dropped; /* packets dropped by dongle */
+ unsigned long tx_dropped; /* packets dropped by dongle */
+ unsigned long multicast; /* multicast packets received */
+};
+
+#define BRCMF_E_SET_SSID 0
+#define BRCMF_E_JOIN 1
+#define BRCMF_E_START 2
+#define BRCMF_E_AUTH 3
+#define BRCMF_E_AUTH_IND 4
+#define BRCMF_E_DEAUTH 5
+#define BRCMF_E_DEAUTH_IND 6
+#define BRCMF_E_ASSOC 7
+#define BRCMF_E_ASSOC_IND 8
+#define BRCMF_E_REASSOC 9
+#define BRCMF_E_REASSOC_IND 10
+#define BRCMF_E_DISASSOC 11
+#define BRCMF_E_DISASSOC_IND 12
+#define BRCMF_E_QUIET_START 13
+#define BRCMF_E_QUIET_END 14
+#define BRCMF_E_BEACON_RX 15
+#define BRCMF_E_LINK 16
+#define BRCMF_E_MIC_ERROR 17
+#define BRCMF_E_NDIS_LINK 18
+#define BRCMF_E_ROAM 19
+#define BRCMF_E_TXFAIL 20
+#define BRCMF_E_PMKID_CACHE 21
+#define BRCMF_E_RETROGRADE_TSF 22
+#define BRCMF_E_PRUNE 23
+#define BRCMF_E_AUTOAUTH 24
+#define BRCMF_E_EAPOL_MSG 25
+#define BRCMF_E_SCAN_COMPLETE 26
+#define BRCMF_E_ADDTS_IND 27
+#define BRCMF_E_DELTS_IND 28
+#define BRCMF_E_BCNSENT_IND 29
+#define BRCMF_E_BCNRX_MSG 30
+#define BRCMF_E_BCNLOST_MSG 31
+#define BRCMF_E_ROAM_PREP 32
+#define BRCMF_E_PFN_NET_FOUND 33
+#define BRCMF_E_PFN_NET_LOST 34
+#define BRCMF_E_RESET_COMPLETE 35
+#define BRCMF_E_JOIN_START 36
+#define BRCMF_E_ROAM_START 37
+#define BRCMF_E_ASSOC_START 38
+#define BRCMF_E_IBSS_ASSOC 39
+#define BRCMF_E_RADIO 40
+#define BRCMF_E_PSM_WATCHDOG 41
+#define BRCMF_E_PROBREQ_MSG 44
+#define BRCMF_E_SCAN_CONFIRM_IND 45
+#define BRCMF_E_PSK_SUP 46
+#define BRCMF_E_COUNTRY_CODE_CHANGED 47
+#define BRCMF_E_EXCEEDED_MEDIUM_TIME 48
+#define BRCMF_E_ICV_ERROR 49
+#define BRCMF_E_UNICAST_DECODE_ERROR 50
+#define BRCMF_E_MULTICAST_DECODE_ERROR 51
+#define BRCMF_E_TRACE 52
+#define BRCMF_E_IF 54
+#define BRCMF_E_RSSI 56
+#define BRCMF_E_PFN_SCAN_COMPLETE 57
+#define BRCMF_E_EXTLOG_MSG 58
+#define BRCMF_E_ACTION_FRAME 59
+#define BRCMF_E_ACTION_FRAME_COMPLETE 60
+#define BRCMF_E_PRE_ASSOC_IND 61
+#define BRCMF_E_PRE_REASSOC_IND 62
+#define BRCMF_E_CHANNEL_ADOPTED 63
+#define BRCMF_E_AP_STARTED 64
+#define BRCMF_E_DFS_AP_STOP 65
+#define BRCMF_E_DFS_AP_RESUME 66
+#define BRCMF_E_RESERVED1 67
+#define BRCMF_E_RESERVED2 68
+#define BRCMF_E_ESCAN_RESULT 69
+#define BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70
+#define BRCMF_E_DCS_REQUEST 73
+
+#define BRCMF_E_FIFO_CREDIT_MAP 74
+
+#define BRCMF_E_LAST 75
+
+#define BRCMF_E_STATUS_SUCCESS 0
+#define BRCMF_E_STATUS_FAIL 1
+#define BRCMF_E_STATUS_TIMEOUT 2
+#define BRCMF_E_STATUS_NO_NETWORKS 3
+#define BRCMF_E_STATUS_ABORT 4
+#define BRCMF_E_STATUS_NO_ACK 5
+#define BRCMF_E_STATUS_UNSOLICITED 6
+#define BRCMF_E_STATUS_ATTEMPT 7
+#define BRCMF_E_STATUS_PARTIAL 8
+#define BRCMF_E_STATUS_NEWSCAN 9
+#define BRCMF_E_STATUS_NEWASSOC 10
+#define BRCMF_E_STATUS_11HQUIET 11
+#define BRCMF_E_STATUS_SUPPRESS 12
+#define BRCMF_E_STATUS_NOCHANS 13
+#define BRCMF_E_STATUS_CS_ABORT 15
+#define BRCMF_E_STATUS_ERROR 16
+
+#define BRCMF_E_REASON_INITIAL_ASSOC 0
+#define BRCMF_E_REASON_LOW_RSSI 1
+#define BRCMF_E_REASON_DEAUTH 2
+#define BRCMF_E_REASON_DISASSOC 3
+#define BRCMF_E_REASON_BCNS_LOST 4
+#define BRCMF_E_REASON_MINTXRATE 9
+#define BRCMF_E_REASON_TXFAIL 10
+
+#define BRCMF_E_REASON_FAST_ROAM_FAILED 5
+#define BRCMF_E_REASON_DIRECTED_ROAM 6
+#define BRCMF_E_REASON_TSPEC_REJECTED 7
+#define BRCMF_E_REASON_BETTER_AP 8
+
+#define BRCMF_E_PRUNE_ENCR_MISMATCH 1
+#define BRCMF_E_PRUNE_BCAST_BSSID 2
+#define BRCMF_E_PRUNE_MAC_DENY 3
+#define BRCMF_E_PRUNE_MAC_NA 4
+#define BRCMF_E_PRUNE_REG_PASSV 5
+#define BRCMF_E_PRUNE_SPCT_MGMT 6
+#define BRCMF_E_PRUNE_RADAR 7
+#define BRCMF_E_RSN_MISMATCH 8
+#define BRCMF_E_PRUNE_NO_COMMON_RATES 9
+#define BRCMF_E_PRUNE_BASIC_RATES 10
+#define BRCMF_E_PRUNE_CIPHER_NA 12
+#define BRCMF_E_PRUNE_KNOWN_STA 13
+#define BRCMF_E_PRUNE_WDS_PEER 15
+#define BRCMF_E_PRUNE_QBSS_LOAD 16
+#define BRCMF_E_PRUNE_HOME_AP 17
+
+#define BRCMF_E_SUP_OTHER 0
+#define BRCMF_E_SUP_DECRYPT_KEY_DATA 1
+#define BRCMF_E_SUP_BAD_UCAST_WEP128 2
+#define BRCMF_E_SUP_BAD_UCAST_WEP40 3
+#define BRCMF_E_SUP_UNSUP_KEY_LEN 4
+#define BRCMF_E_SUP_PW_KEY_CIPHER 5
+#define BRCMF_E_SUP_MSG3_TOO_MANY_IE 6
+#define BRCMF_E_SUP_MSG3_IE_MISMATCH 7
+#define BRCMF_E_SUP_NO_INSTALL_FLAG 8
+#define BRCMF_E_SUP_MSG3_NO_GTK 9
+#define BRCMF_E_SUP_GRP_KEY_CIPHER 10
+#define BRCMF_E_SUP_GRP_MSG1_NO_GTK 11
+#define BRCMF_E_SUP_GTK_DECRYPT_FAIL 12
+#define BRCMF_E_SUP_SEND_FAIL 13
+#define BRCMF_E_SUP_DEAUTH 14
+
+#define BRCMF_E_IF_ADD 1
+#define BRCMF_E_IF_DEL 2
+#define BRCMF_E_IF_CHANGE 3
+
+#define BRCMF_E_IF_ROLE_STA 0
+#define BRCMF_E_IF_ROLE_AP 1
+#define BRCMF_E_IF_ROLE_WDS 2
+
+#define BRCMF_E_LINK_BCN_LOSS 1
+#define BRCMF_E_LINK_DISASSOC 2
+#define BRCMF_E_LINK_ASSOC_REC 3
+#define BRCMF_E_LINK_BSSCFG_DIS 4
/* The level of bus communication with the dongle */
-enum dhd_bus_state {
- DHD_BUS_DOWN, /* Not ready for frame transfers */
- DHD_BUS_LOAD, /* Download access only (CPU reset) */
- DHD_BUS_DATA /* Ready for frame transfers */
+enum brcmf_bus_state {
+ BRCMF_BUS_DOWN, /* Not ready for frame transfers */
+ BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
+ BRCMF_BUS_DATA /* Ready for frame transfers */
+};
+
+/* Pattern matching filter. Specifies an offset within received packets to
+ * start matching, the pattern to match, the size of the pattern, and a bitmask
+ * that indicates which bits within the pattern should be matched.
+ */
+struct brcmf_pkt_filter_pattern {
+ u32 offset; /* Offset within received packet to start pattern matching.
+ * Offset '0' is the first byte of the ethernet header.
+ */
+ u32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */
+ u8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts
+ * at offset 0. Pattern immediately follows mask.
+ */
+};
+
+/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
+struct brcmf_pkt_filter {
+ u32 id; /* Unique filter id, specified by app. */
+ u32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */
+ u32 negate_match; /* Negate the result of filter matches */
+ union { /* Filter definitions */
+ struct brcmf_pkt_filter_pattern pattern; /* Filter pattern */
+ } u;
+};
+
+/* IOVAR "pkt_filter_enable" parameter. */
+struct brcmf_pkt_filter_enable {
+ u32 id; /* Unique filter id */
+ u32 enable; /* Enable/disable bool */
+};
+
+/* BSS info structure
+ * Applications MUST CHECK ie_offset field and length field to access IEs and
+ * next bss_info structure in a vector (in struct brcmf_scan_results)
+ */
+struct brcmf_bss_info {
+ u32 version; /* version field */
+ u32 length; /* byte length of data in this record,
+ * starting at version and including IEs
+ */
+ u8 BSSID[ETH_ALEN];
+ u16 beacon_period; /* units are Kusec */
+ u16 capability; /* Capability information */
+ u8 SSID_len;
+ u8 SSID[32];
+ struct {
+ uint count; /* # rates in this set */
+ u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
+ } rateset; /* supported rates */
+ chanspec_t chanspec; /* chanspec for bss */
+ u16 atim_window; /* units are Kusec */
+ u8 dtim_period; /* DTIM period */
+ s16 RSSI; /* receive signal strength (in dBm) */
+ s8 phy_noise; /* noise (in dBm) */
+
+ u8 n_cap; /* BSS is 802.11N Capable */
+ u32 nbss_cap; /* 802.11N BSS Capabilities (based on HT_CAP_*) */
+ u8 ctl_ch; /* 802.11N BSS control channel number */
+ u32 reserved32[1]; /* Reserved for expansion of BSS properties */
+ u8 flags; /* flags */
+ u8 reserved[3]; /* Reserved for expansion of BSS properties */
+ u8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
+
+ u16 ie_offset; /* offset at which IEs start, from beginning */
+ u32 ie_length; /* byte length of Information Elements */
+ s16 SNR; /* average SNR of during frame reception */
+ /* Add new fields here */
+ /* variable length Information Elements */
+};
+
+struct brcmf_ssid {
+ u32 SSID_len;
+ unsigned char SSID[32];
+};
+
+struct brcmf_scan_params {
+ struct brcmf_ssid ssid; /* default: {0, ""} */
+ u8 bssid[ETH_ALEN]; /* default: bcast */
+ s8 bss_type; /* default: any,
+ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
+ */
+ u8 scan_type; /* flags, 0 use default */
+ s32 nprobes; /* -1 use default, number of probes per channel */
+ s32 active_time; /* -1 use default, dwell time per channel for
+ * active scanning
+ */
+ s32 passive_time; /* -1 use default, dwell time per channel
+ * for passive scanning
+ */
+ s32 home_time; /* -1 use default, dwell time for the home channel
+ * between channel scans
+ */
+ s32 channel_num; /* count of channels and ssids that follow
+ *
+ * low half is count of channels in
+ * channel_list, 0 means default (use all
+ * available channels)
+ *
+ * high half is entries in struct brcmf_ssid
+ * array that follows channel_list, aligned for
+ * s32 (4 bytes) meaning an odd channel count
+ * implies a 2-byte pad between end of
+ * channel_list and first ssid
+ *
+ * if ssid count is zero, single ssid in the
+ * fixed parameter portion is assumed, otherwise
+ * ssid in the fixed portion is ignored
+ */
+ u16 channel_list[1]; /* list of chanspecs */
+};
+
+/* incremental scan struct */
+struct brcmf_iscan_params {
+ u32 version;
+ u16 action;
+ u16 scan_duration;
+ struct brcmf_scan_params params;
+};
+
+/* 3 fields + size of brcmf_scan_params, not including variable length array */
+#define BRCMF_ISCAN_PARAMS_FIXED_SIZE \
+ (offsetof(struct brcmf_iscan_params, params) + \
+ sizeof(struct brcmf_ssid))
+
+struct brcmf_scan_results {
+ u32 buflen;
+ u32 version;
+ u32 count;
+ struct brcmf_bss_info bss_info[1];
+};
+
+/* used for association with a specific BSSID and chanspec list */
+struct brcmf_assoc_params {
+ u8 bssid[ETH_ALEN]; /* 00:00:00:00:00:00: broadcast scan */
+ s32 chanspec_num; /* 0: all available channels,
+ * otherwise count of chanspecs in chanspec_list
+ */
+ chanspec_t chanspec_list[1]; /* list of chanspecs */
+};
+#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
+ (sizeof(struct brcmf_assoc_params) - sizeof(chanspec_t))
+
+/* used for join with or without a specific bssid and channel list */
+struct brcmf_join_params {
+ struct brcmf_ssid ssid;
+ struct brcmf_assoc_params params;
+};
+
+/* size of brcmf_scan_results not including variable length array */
+#define BRCMF_SCAN_RESULTS_FIXED_SIZE \
+ (sizeof(struct brcmf_scan_results) - sizeof(struct brcmf_bss_info))
+
+/* incremental scan results struct */
+struct brcmf_iscan_results {
+ u32 status;
+ struct brcmf_scan_results results;
+};
+
+/* size of brcmf_iscan_results not including variable length array */
+#define BRCMF_ISCAN_RESULTS_FIXED_SIZE \
+ (BRCMF_SCAN_RESULTS_FIXED_SIZE + \
+ offsetof(struct brcmf_iscan_results, results))
+
+struct brcmf_wsec_key {
+ u32 index; /* key index */
+ u32 len; /* key length */
+ u8 data[WLAN_MAX_KEY_LEN]; /* key data */
+ u32 pad_1[18];
+ u32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ u32 flags; /* misc flags */
+ u32 pad_2[2];
+ int pad_3;
+ int iv_initialized; /* has IV been initialized already? */
+ int pad_4;
+ /* Rx IV */
+ struct {
+ u32 hi; /* upper 32 bits of IV */
+ u16 lo; /* lower 16 bits of IV */
+ } rxiv;
+ u32 pad_5[2];
+ u8 ea[ETH_ALEN]; /* per station */
+};
+
+/* Used to get specific STA parameters */
+struct brcmf_scb_val {
+ u32 val;
+ u8 ea[ETH_ALEN];
+};
+
+/* channel encoding */
+struct brcmf_channel_info {
+ int hw_channel;
+ int target_channel;
+ int scan_channel;
};
+/* Linux network driver ioctl encoding */
+struct brcmf_ioctl {
+ uint cmd; /* common ioctl definition */
+ void *buf; /* pointer to user buffer */
+ uint len; /* length of user buffer */
+ u8 set; /* get or set request (optional) */
+ uint used; /* bytes read or written (optional) */
+ uint needed; /* bytes needed (optional) */
+};
+
+/* Forward decls for struct brcmf_pub (see below) */
+struct brcmf_bus; /* device bus info */
+struct brcmf_proto; /* device communication protocol info */
+struct brcmf_info; /* device driver info */
+
/* Common structure for module and instance linkage */
-typedef struct dhd_pub {
+struct brcmf_pub {
/* Linkage ponters */
- struct dhd_bus *bus; /* Bus module handle */
- struct dhd_prot *prot; /* Protocol module handle */
- struct dhd_info *info; /* Info module handle */
+ struct brcmf_bus *bus;
+ struct brcmf_proto *prot;
+ struct brcmf_info *info;
- /* Internal dhd items */
+ /* Internal brcmf items */
bool up; /* Driver up/down (to OS) */
bool txoff; /* Transmit flow-controlled */
bool dongle_reset; /* true = DEVRESET put dongle into reset */
- enum dhd_bus_state busstate;
- uint hdrlen; /* Total DHD header length (proto + bus) */
+ enum brcmf_bus_state busstate;
+ uint hdrlen; /* Total BRCMF header length (proto + bus) */
uint maxctl; /* Max size rxctl request from proto to bus */
uint rxsz; /* Rx buffer size bus module should use */
u8 wme_dp; /* wme discard priority */
@@ -71,7 +559,7 @@ typedef struct dhd_pub {
bool iswl; /* Dongle-resident driver is wl */
unsigned long drv_version; /* Version of dongle-resident driver */
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
- dngl_stats_t dstats; /* Stats for dongle-based data */
+ struct dngl_stats dstats; /* Stats for dongle-based data */
/* Additional stats for the bus level */
unsigned long tx_packets; /* Data packets sent to dongle */
@@ -88,7 +576,7 @@ typedef struct dhd_pub {
unsigned long rx_dropped; /* Packets dropped locally (no memory) */
unsigned long rx_flushed; /* Packets flushed due to
unscheduled sendup thread */
- unsigned long wd_dpc_sched; /* Number of times dhd dpc scheduled by
+ unsigned long wd_dpc_sched; /* Number of times dpc scheduled by
watchdog timer */
unsigned long rx_readahead_cnt; /* Number of packets where header read-ahead
@@ -108,40 +596,51 @@ typedef struct dhd_pub {
int suspend_disable_flag; /* "1" to disable all extra powersaving
during suspend */
int in_suspend; /* flag set to 1 when early suspend called */
-#ifdef PNO_SUPPORT
- int pno_enable; /* pno status : "1" is pno enable */
-#endif /* PNO_SUPPORT */
int dtim_skip; /* dtim skip , default 0 means wake each dtim */
/* Pkt filter defination */
char *pktfilter[100];
int pktfilter_count;
- u8 country_code[WLC_CNTRY_BUF_SZ];
- char eventmask[WL_EVENTING_MASK_LEN];
+ u8 country_code[BRCM_CNTRY_BUF_SZ];
+ char eventmask[BRCMF_EVENTING_MASK_LEN];
+
+};
+
+struct brcmf_if_event {
+ u8 ifidx;
+ u8 action;
+ u8 flags;
+ u8 bssidx;
+};
+
+struct brcmf_timeout {
+ u32 limit; /* Expiration time (usec) */
+ u32 increment; /* Current expiration increment (usec) */
+ u32 elapsed; /* Current elapsed time (usec) */
+ u32 tick; /* O/S tick time (usec) */
+};
-} dhd_pub_t;
+struct bcmevent_name {
+ uint event;
+ const char *name;
+};
#if defined(CONFIG_PM_SLEEP)
-extern atomic_t dhd_mmc_suspend;
-#define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
-#define _DHD_PM_RESUME_WAIT(a, b) do { \
+extern atomic_t brcmf_mmc_suspend;
+#define BRCMF_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+#define _BRCMF_PM_RESUME_WAIT(a, b) do { \
int retry = 0; \
- while (atomic_read(&dhd_mmc_suspend) && retry++ != b) { \
+ while (atomic_read(&brcmf_mmc_suspend) && retry++ != b) { \
wait_event_timeout(a, false, HZ/100); \
} \
} while (0)
-#define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 30)
-#define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0)
-#define DHD_PM_RESUME_RETURN_ERROR(a) \
- do { if (atomic_read(&dhd_mmc_suspend)) return a; } while (0)
-#define DHD_PM_RESUME_RETURN do { \
- if (atomic_read(&dhd_mmc_suspend)) \
- return; \
- } while (0)
+#define BRCMF_PM_RESUME_WAIT(a) _BRCMF_PM_RESUME_WAIT(a, 30)
+#define BRCMF_PM_RESUME_RETURN_ERROR(a) \
+ do { if (atomic_read(&brcmf_mmc_suspend)) return a; } while (0)
-#define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
-#define SPINWAIT_SLEEP(a, exp, us) do { \
+#define BRCMF_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
+#define BRCMF_SPINWAIT_SLEEP(a, exp, us) do { \
uint countdown = (us) + 9999; \
while ((exp) && (countdown >= 10000)) { \
wait_event_timeout(a, false, HZ/100); \
@@ -151,14 +650,12 @@ extern atomic_t dhd_mmc_suspend;
#else
-#define DHD_PM_RESUME_WAIT_INIT(a)
-#define DHD_PM_RESUME_WAIT(a)
-#define DHD_PM_RESUME_WAIT_FOREVER(a)
-#define DHD_PM_RESUME_RETURN_ERROR(a)
-#define DHD_PM_RESUME_RETURN
+#define BRCMF_PM_RESUME_WAIT_INIT(a)
+#define BRCMF_PM_RESUME_WAIT(a)
+#define BRCMF_PM_RESUME_RETURN_ERROR(a)
-#define DHD_SPINWAIT_SLEEP_INIT(a)
-#define SPINWAIT_SLEEP(a, exp, us) do { \
+#define BRCMF_SPINWAIT_SLEEP_INIT(a)
+#define BRCMF_SPINWAIT_SLEEP(a, exp, us) do { \
uint countdown = (us) + 9; \
while ((exp) && (countdown >= 10)) { \
udelay(10); \
@@ -167,29 +664,82 @@ extern atomic_t dhd_mmc_suspend;
} while (0)
#endif /* defined(CONFIG_PM_SLEEP) */
-#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */
-static inline void MUTEX_LOCK_INIT(dhd_pub_t *dhdp)
+/*
+ * Insmod parameters for debug/test
+ */
+
+/* Use interrupts */
+extern uint brcmf_intr;
+
+/* Use polling */
+extern uint brcmf_poll;
+
+/* ARP offload agent mode */
+extern uint brcmf_arp_mode;
+
+/* ARP offload enable */
+extern uint brcmf_arp_enable;
+
+/* Pkt filte enable control */
+extern uint brcmf_pkt_filter_enable;
+
+/* Pkt filter init setup */
+extern uint brcmf_pkt_filter_init;
+
+/* Pkt filter mode control */
+extern uint brcmf_master_mode;
+
+/* Roaming mode control */
+extern uint brcmf_roam;
+
+/* Roaming mode control */
+extern uint brcmf_radio_up;
+
+/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
+extern int brcmf_idletime;
+#define BRCMF_IDLETIME_TICKS 1
+
+/* SDIO Drive Strength */
+extern uint brcmf_sdiod_drive_strength;
+
+/* Override to force tx queueing all the time */
+extern uint brcmf_force_tx_queueing;
+
+#ifdef SDTEST
+/* Echo packet generator (SDIO), pkts/s */
+extern uint brcmf_pktgen;
+
+/* Echo packet len (0 => sawtooth, max 1800) */
+extern uint brcmf_pktgen_len;
+#define BRCMF_MAX_PKTGEN_LEN 1800
+#endif
+
+extern const struct bcmevent_name bcmevent_names[];
+extern const int bcmevent_names_size;
+
+
+static inline void MUTEX_LOCK_INIT(struct brcmf_pub *drvr)
{
}
-static inline void MUTEX_LOCK(dhd_pub_t *dhdp)
+static inline void MUTEX_LOCK(struct brcmf_pub *drvr)
{
}
-static inline void MUTEX_UNLOCK(dhd_pub_t *dhdp)
+static inline void MUTEX_UNLOCK(struct brcmf_pub *drvr)
{
}
-static inline void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t *dhdp)
+static inline void MUTEX_LOCK_SOFTAP_SET_INIT(struct brcmf_pub *drvr)
{
}
-static inline void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t *dhdp)
+static inline void MUTEX_LOCK_SOFTAP_SET(struct brcmf_pub *drvr)
{
}
-static inline void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t *dhdp)
+static inline void MUTEX_UNLOCK_SOFTAP_SET(struct brcmf_pub *drvr)
{
}
@@ -205,210 +755,150 @@ static inline void MUTEX_UNLOCK_WL_SCAN_SET(void)
{
}
-typedef struct dhd_if_event {
- u8 ifidx;
- u8 action;
- u8 flags;
- u8 bssidx;
-} dhd_if_event_t;
-
-/*
- * Exported from dhd OS modules (dhd_linux/dhd_ndis)
- */
-
/* Indication from bus module regarding presence/insertion of dongle.
- * Return dhd_pub_t pointer, used as handle to OS module in later calls.
+ * Return struct brcmf_pub pointer, used as handle to OS module in later calls.
* Returned structure should have bus and prot pointers filled in.
* bus_hdrlen specifies required headroom for bus module header.
*/
-extern dhd_pub_t *dhd_attach(struct dhd_bus *bus,
- uint bus_hdrlen);
-extern int dhd_net_attach(dhd_pub_t *dhdp, int idx);
+extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus,
+ uint bus_hdrlen);
+extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
+extern int brcmf_netdev_wait_pend8021x(struct net_device *dev);
/* Indication from bus module regarding removal/absence of dongle */
-extern void dhd_detach(dhd_pub_t *dhdp);
+extern void brcmf_detach(struct brcmf_pub *drvr);
/* Indication from bus module to change flow-control state */
-extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
+extern void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool on);
-extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q,
+extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
struct sk_buff *pkt, int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx,
+extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
struct sk_buff *rxp, int numpkt);
/* Return pointer to interface name */
-extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
-
-/* Request scheduling of the bus dpc */
-extern void dhd_sched_dpc(dhd_pub_t *dhdp);
+extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
/* Notify tx completion */
-extern void dhd_txcomplete(dhd_pub_t *dhdp, struct sk_buff *txp, bool success);
+extern void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp,
+ bool success);
/* Query ioctl */
-extern int dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf,
- uint len);
+extern int brcmf_proto_cdc_query_ioctl(struct brcmf_pub *drvr, int ifidx,
+ uint cmd, void *buf, uint len);
/* OS independent layer functions */
-extern int dhd_os_proto_block(dhd_pub_t *pub);
-extern int dhd_os_proto_unblock(dhd_pub_t *pub);
-extern int dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition,
+extern int brcmf_os_proto_block(struct brcmf_pub *drvr);
+extern int brcmf_os_proto_unblock(struct brcmf_pub *drvr);
+extern int brcmf_os_ioctl_resp_wait(struct brcmf_pub *drvr, uint *condition,
bool *pending);
-extern int dhd_os_ioctl_resp_wake(dhd_pub_t *pub);
-extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
-extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
-extern void *dhd_os_open_image(char *filename);
-extern int dhd_os_get_image_block(char *buf, int len, void *image);
-extern void dhd_os_close_image(void *image);
-extern void dhd_os_wd_timer(void *bus, uint wdtick);
-extern void dhd_os_sdlock(dhd_pub_t *pub);
-extern void dhd_os_sdunlock(dhd_pub_t *pub);
-extern void dhd_os_sdlock_txq(dhd_pub_t *pub);
-extern void dhd_os_sdunlock_txq(dhd_pub_t *pub);
-extern void dhd_os_sdlock_rxq(dhd_pub_t *pub);
-extern void dhd_os_sdunlock_rxq(dhd_pub_t *pub);
-extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t *pub);
-extern void dhd_customer_gpio_wlan_ctrl(int onoff);
-extern int dhd_custom_get_mac_address(unsigned char *buf);
-extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t *pub);
-extern void dhd_os_sdlock_eventq(dhd_pub_t *pub);
-extern void dhd_os_sdunlock_eventq(dhd_pub_t *pub);
-#ifdef DHD_DEBUG
-extern int write_to_file(dhd_pub_t *dhd, u8 *buf, int size);
-#endif /* DHD_DEBUG */
-#if defined(OOB_INTR_ONLY)
-extern int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr);
-#endif /* defined(OOB_INTR_ONLY) */
-extern void dhd_os_sdtxlock(dhd_pub_t *pub);
-extern void dhd_os_sdtxunlock(dhd_pub_t *pub);
-
-int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
-
-typedef struct {
- u32 limit; /* Expiration time (usec) */
- u32 increment; /* Current expiration increment (usec) */
- u32 elapsed; /* Current elapsed time (usec) */
- u32 tick; /* O/S tick time (usec) */
-} dhd_timeout_t;
+extern int brcmf_os_ioctl_resp_wake(struct brcmf_pub *drvr);
+extern unsigned int brcmf_os_get_ioctl_resp_timeout(void);
+extern void brcmf_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
+#ifdef BCMDBG
+extern int brcmf_write_to_file(struct brcmf_pub *drvr, u8 *buf, int size);
+#endif /* BCMDBG */
-extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
-extern int dhd_timeout_expired(dhd_timeout_t *tmo);
+extern void brcmf_timeout_start(struct brcmf_timeout *tmo, uint usec);
+extern int brcmf_timeout_expired(struct brcmf_timeout *tmo);
-extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
-extern u8 *dhd_bssidx2bssid(dhd_pub_t *dhd, int idx);
-extern int wl_host_event(struct dhd_info *dhd, int *idx, void *pktdata,
- wl_event_msg_t *, void **data_ptr);
+extern int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name);
+extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
+ void *pktdata, struct brcmf_event_msg *,
+ void **data_ptr);
-extern void dhd_common_init(void);
+extern void brcmf_c_init(void);
-extern int dhd_add_if(struct dhd_info *dhd, int ifidx, void *handle,
+extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, void *handle,
char *name, u8 *mac_addr, u32 flags, u8 bssidx);
-extern void dhd_del_if(struct dhd_info *dhd, int ifidx);
-
-extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char *name);
-extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
-
-extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
-extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, unsigned char * cp,
- int len);
+extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);
/* Send packet to dongle via data channel */
-extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pkt);
-
-/* Send event to host */
-extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event,
- void *data);
-extern int dhd_bus_devreset(dhd_pub_t *dhdp, u8 flag);
-extern uint dhd_bus_status(dhd_pub_t *dhdp);
-extern int dhd_bus_start(dhd_pub_t *dhdp);
-
-enum cust_gpio_modes {
- WLAN_RESET_ON,
- WLAN_RESET_OFF,
- WLAN_POWER_ON,
- WLAN_POWER_OFF
+extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
+ struct sk_buff *pkt);
+
+extern int brcmf_bus_devreset(struct brcmf_pub *drvr, u8 flag);
+extern int brcmf_bus_start(struct brcmf_pub *drvr);
+
+extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
+extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
+ int enable, int master_mode);
+
+/* Linux network driver ioctl encoding */
+struct brcmf_c_ioctl {
+ uint cmd; /* common ioctl definition */
+ void *buf; /* pointer to user buffer */
+ uint len; /* length of user buffer */
+ bool set; /* get or set request (optional) */
+ uint used; /* bytes read or written (optional) */
+ uint needed; /* bytes needed (optional) */
+ uint driver; /* to identify target driver */
};
-/*
- * Insmod parameters for debug/test
- */
-
-/* Watchdog timer interval */
-extern uint dhd_watchdog_ms;
-
-#if defined(DHD_DEBUG)
-/* Console output poll interval */
-extern uint dhd_console_ms;
-#endif /* defined(DHD_DEBUG) */
-
-/* Use interrupts */
-extern uint dhd_intr;
-
-/* Use polling */
-extern uint dhd_poll;
-
-/* ARP offload agent mode */
-extern uint dhd_arp_mode;
-
-/* ARP offload enable */
-extern uint dhd_arp_enable;
-
-/* Pkt filte enable control */
-extern uint dhd_pkt_filter_enable;
-
-/* Pkt filter init setup */
-extern uint dhd_pkt_filter_init;
-
-/* Pkt filter mode control */
-extern uint dhd_master_mode;
-/* Roaming mode control */
-extern uint dhd_roam;
-
-/* Roaming mode control */
-extern uint dhd_radio_up;
-
-/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
-extern int dhd_idletime;
-#define DHD_IDLETIME_TICKS 1
-
-/* SDIO Drive Strength */
-extern uint dhd_sdiod_drive_strength;
-
-/* Override to force tx queueing all the time */
-extern uint dhd_force_tx_queueing;
+/* per-driver magic numbers */
+#define BRCMF_IOCTL_MAGIC 0x00444944
+
+/* bump this number if you change the ioctl interface */
+#define BRCMF_IOCTL_VERSION 1
+#define BRCMF_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
+
+/* common ioctl definitions */
+#define BRCMF_GET_MAGIC 0
+#define BRCMF_GET_VERSION 1
+#define BRCMF_GET_VAR 2
+#define BRCMF_SET_VAR 3
+
+/* message levels */
+#define BRCMF_ERROR_VAL 0x0001
+#define BRCMF_TRACE_VAL 0x0002
+#define BRCMF_INFO_VAL 0x0004
+#define BRCMF_DATA_VAL 0x0008
+#define BRCMF_CTL_VAL 0x0010
+#define BRCMF_TIMER_VAL 0x0020
+#define BRCMF_HDRS_VAL 0x0040
+#define BRCMF_BYTES_VAL 0x0080
+#define BRCMF_INTR_VAL 0x0100
+#define BRCMF_GLOM_VAL 0x0400
+#define BRCMF_EVENT_VAL 0x0800
+#define BRCMF_BTA_VAL 0x1000
+#define BRCMF_ISCAN_VAL 0x2000
#ifdef SDTEST
-/* Echo packet generator (SDIO), pkts/s */
-extern uint dhd_pktgen;
-
-/* Echo packet len (0 => sawtooth, max 1800) */
-extern uint dhd_pktgen_len;
-#define MAX_PKTGEN_LEN 1800
-#endif
-
-/* optionally set by a module_param_string() */
-#define MOD_PARAM_PATHLEN 2048
-extern char fw_path[MOD_PARAM_PATHLEN];
-extern char nv_path[MOD_PARAM_PATHLEN];
+/* For pktgen iovar */
+struct brcmf_pktgen {
+ uint version; /* To allow structure change tracking */
+ uint freq; /* Max ticks between tx/rx attempts */
+ uint count; /* Test packets to send/rcv each attempt */
+ uint print; /* Print counts every <print> attempts */
+ uint total; /* Total packets (or bursts) */
+ uint minlen; /* Minimum length of packets to send */
+ uint maxlen; /* Maximum length of packets to send */
+ uint numsent; /* Count of test packets sent */
+ uint numrcvd; /* Count of test packets received */
+ uint numfail; /* Count of test send failures */
+ uint mode; /* Test mode (type of test packets) */
+ uint stop; /* Stop after this many tx failures */
+};
-/* For supporting multiple interfaces */
-#define DHD_MAX_IFS 16
-#define DHD_DEL_IF -0xe
-#define DHD_BAD_IF -0xf
+/* Version in case structure changes */
+#define BRCMF_PKTGEN_VERSION 2
-extern void dhd_wait_for_event(dhd_pub_t *dhd, bool * lockvar);
-extern void dhd_wait_event_wakeup(dhd_pub_t *dhd);
+/* Type of test packets to use */
+#define BRCMF_PKTGEN_ECHO 1 /* Send echo requests */
+#define BRCMF_PKTGEN_SEND 2 /* Send discard packets */
+#define BRCMF_PKTGEN_RXBURST 3 /* Request dongle send N packets */
+#define BRCMF_PKTGEN_RECV 4 /* Continuous rx from continuous
+ tx dongle */
+#endif /* SDTEST */
-extern u32 g_assert_type;
+/* Enter idle immediately (no timeout) */
+#define BRCMF_IDLE_IMMEDIATE (-1)
-#ifdef BCMDBG
-#define ASSERT(exp) \
- do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
-extern void osl_assert(char *exp, char *file, int line);
-#else
-#define ASSERT(exp) do {} while (0)
-#endif /* defined(BCMDBG) */
+/* Values for idleclock iovar: other values are the sd_divisor to use
+ when idle */
+#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
+ when idle */
-#endif /* _dhd_h_ */
+#endif /* _BRCMF_H_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_bus.h b/drivers/staging/brcm80211/brcmfmac/dhd_bus.h
index 065f1aeb6ca..653cf0daa0e 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_bus.h
@@ -14,69 +14,65 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _dhd_bus_h_
-#define _dhd_bus_h_
+#ifndef _BRCMF_BUS_H_
+#define _BRCMF_BUS_H_
+
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#ifndef BRCMF_SDALIGN
+#define BRCMF_SDALIGN 32
+#endif
+#if !ISPOWEROF2(BRCMF_SDALIGN)
+#error BRCMF_SDALIGN is not a power of 2!
+#endif
/*
- * Exported from dhd bus module (dhd_usb, dhd_sdio)
+ * Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
*/
-/* Indicate (dis)interest in finding dongles. */
-extern int dhd_bus_register(void);
-extern void dhd_bus_unregister(void);
+/* dongle ram module parameter */
+extern int brcmf_dongle_memsize;
+
+/* Tx/Rx bounds module parameters */
+extern uint brcmf_txbound;
+extern uint brcmf_rxbound;
-/* Download firmware image and nvram image */
-extern bool dhd_bus_download_firmware(struct dhd_bus *bus,
- char *fw_path, char *nv_path);
+/* Watchdog timer interval */
+extern uint brcmf_watchdog_ms;
+
+/* Indicate (dis)interest in finding dongles. */
+extern int brcmf_bus_register(void);
+extern void brcmf_bus_unregister(void);
/* Stop bus module: clear pending frames, disable data flow */
-extern void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex);
+extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus, bool enforce_mutex);
/* Initialize bus module: prepare for communication w/dongle */
-extern int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex);
+extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr, bool enforce_mutex);
/* Send a data frame to the dongle. Callee disposes of txp. */
-extern int dhd_bus_txdata(struct dhd_bus *bus, struct sk_buff *txp);
+extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp);
/* Send/receive a control message to/from the dongle.
* Expects caller to enforce a single outstanding transaction.
*/
-extern int dhd_bus_txctl(struct dhd_bus *bus, unsigned char *msg, uint msglen);
-extern int dhd_bus_rxctl(struct dhd_bus *bus, unsigned char *msg, uint msglen);
+extern int
+brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
-/* Watchdog timer function */
-extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
-
-#ifdef DHD_DEBUG
-/* Device console input function */
-extern int dhd_bus_console_in(dhd_pub_t *dhd, unsigned char *msg, uint msglen);
-#endif /* DHD_DEBUG */
-
-/* Deferred processing for the bus, return true requests reschedule */
-extern bool dhd_bus_dpc(struct dhd_bus *bus);
-extern void dhd_bus_isr(bool *InterruptRecognized,
- bool *QueueMiniportHandleInterrupt, void *arg);
+extern int
+brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
/* Check for and handle local prot-specific iovar commands */
-extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+extern int brcmf_sdbrcm_bus_iovar_op(struct brcmf_pub *drvr, const char *name,
void *params, int plen, void *arg, int len,
bool set);
/* Add bus dump output to a buffer */
-extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+extern void brcmf_sdbrcm_bus_dump(struct brcmf_pub *drvr,
+ struct brcmu_strbuf *strbuf);
/* Clear any bus counters */
-extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
-
-/* return the dongle chipid */
-extern uint dhd_bus_chip(struct dhd_bus *bus);
-
-/* Set user-specified nvram parameters. */
-extern void dhd_bus_set_nvram_params(struct dhd_bus *bus,
- const char *nvram_params);
+extern void brcmf_bus_clearcounts(struct brcmf_pub *drvr);
-extern void *dhd_bus_pub(struct dhd_bus *bus);
-extern void *dhd_bus_txq(struct dhd_bus *bus);
-extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
+extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick);
-#endif /* _dhd_bus_h_ */
+#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c b/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c
index ba5a5cb7eed..345acabe935 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_cdc.c
@@ -16,33 +16,71 @@
#include <linux/types.h>
#include <linux/netdevice.h>
-#include <bcmdefs.h>
-
-#include <bcmutils.h>
-#include <bcmcdc.h>
-
-#include <dngl_stats.h>
-#include <dhd.h>
-#include <dhd_proto.h>
-#include <dhd_bus.h>
-#include <dhd_dbg.h>
-#ifdef CUSTOMER_HW2
-int wifi_get_mac_addr(unsigned char *buf);
-#endif
-
-extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
-
-/* Packet alignment for most efficient SDIO (can change based on platform) */
-#ifndef DHD_SDALIGN
-#define DHD_SDALIGN 32
-#endif
-#if !ISPOWEROF2(DHD_SDALIGN)
-#error DHD_SDALIGN is not a power of 2!
-#endif
+#include <linux/sched.h>
+#include <defs.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "dhd.h"
+#include "dhd_proto.h"
+#include "dhd_bus.h"
+#include "dhd_dbg.h"
+
+struct brcmf_proto_cdc_ioctl {
+ u32 cmd; /* ioctl command value */
+ u32 len; /* lower 16: output buflen;
+ * upper 16: input buflen (excludes header) */
+ u32 flags; /* flag defns given below */
+ u32 status; /* status code returned from the device */
+};
+
+/* Max valid buffer size that can be sent to the dongle */
+#define CDC_MAX_MSG_SIZE (ETH_FRAME_LEN+ETH_FCS_LEN)
+
+/* CDC flag definitions */
+#define CDCF_IOC_ERROR 0x01 /* 1=ioctl cmd failed */
+#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */
+#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */
+#define CDCF_IOC_IF_SHIFT 12
+#define CDCF_IOC_ID_MASK 0xFFFF0000 /* id an ioctl pairing */
+#define CDCF_IOC_ID_SHIFT 16 /* ID Mask shift bits */
+#define CDC_IOC_ID(flags) \
+ (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
+#define CDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | \
+ ((idx) << CDCF_IOC_IF_SHIFT)))
+
+/*
+ * BDC header - Broadcom specific extension of CDC.
+ * Used on data packets to convey priority across USB.
+ */
+#define BDC_HEADER_LEN 4
+#define BDC_PROTO_VER 1 /* Protocol version */
+#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
+#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
+#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
+#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */
+#define BDC_PRIORITY_MASK 0x7
+#define BDC_FLAG2_IF_MASK 0x0f /* packet rx interface in APSTA */
+#define BDC_FLAG2_IF_SHIFT 0
+
+#define BDC_GET_IF_IDX(hdr) \
+ ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
+#define BDC_SET_IF_IDX(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \
+ ((idx) << BDC_FLAG2_IF_SHIFT)))
+
+struct brcmf_proto_bdc_header {
+ u8 flags;
+ u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */
+ u8 flags2;
+ u8 rssi;
+};
+
#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
-#define BUS_HEADER_LEN (16+DHD_SDALIGN) /* Must be atleast SDPCM_RESERVE
- * defined in dhd_sdio.c
+#define BUS_HEADER_LEN (16+BRCMF_SDALIGN) /* Must be atleast SDPCM_RESERVE
* (amount of header tha might be added)
* plus any space that might be needed
* for alignment padding.
@@ -51,21 +89,22 @@ extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
* round off at the end of buffer
*/
-typedef struct dhd_prot {
+struct brcmf_proto {
u16 reqid;
u8 pending;
u32 lastcmd;
u8 bus_header[BUS_HEADER_LEN];
- cdc_ioctl_t msg;
- unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
-} dhd_prot_t;
+ struct brcmf_proto_cdc_ioctl msg;
+ unsigned char buf[BRCMF_C_IOCTL_MAXLEN + ROUND_UP_MARGIN];
+};
-static int dhdcdc_msg(dhd_pub_t *dhd)
+static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
{
- dhd_prot_t *prot = dhd->prot;
- int len = le32_to_cpu(prot->msg.len) + sizeof(cdc_ioctl_t);
+ struct brcmf_proto *prot = drvr->prot;
+ int len = le32_to_cpu(prot->msg.len) +
+ sizeof(struct brcmf_proto_cdc_ioctl);
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
/* NOTE : cdc->msg.len holds the desired length of the buffer to be
* returned. Only up to CDC_MAX_MSG_SIZE of this buffer area
@@ -75,20 +114,21 @@ static int dhdcdc_msg(dhd_pub_t *dhd)
len = CDC_MAX_MSG_SIZE;
/* Send request */
- return dhd_bus_txctl(dhd->bus, (unsigned char *)&prot->msg, len);
+ return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg,
+ len);
}
-static int dhdcdc_cmplt(dhd_pub_t *dhd, u32 id, u32 len)
+static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
{
int ret;
- dhd_prot_t *prot = dhd->prot;
+ struct brcmf_proto *prot = drvr->prot;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
do {
- ret =
- dhd_bus_rxctl(dhd->bus, (unsigned char *)&prot->msg,
- len + sizeof(cdc_ioctl_t));
+ ret = brcmf_sdbrcm_bus_rxctl(drvr->bus,
+ (unsigned char *)&prot->msg,
+ len + sizeof(struct brcmf_proto_cdc_ioctl));
if (ret < 0)
break;
} while (CDC_IOC_ID(le32_to_cpu(prot->msg.flags)) != id);
@@ -97,30 +137,31 @@ static int dhdcdc_cmplt(dhd_pub_t *dhd, u32 id, u32 len)
}
int
-dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len)
+brcmf_proto_cdc_query_ioctl(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len)
{
- dhd_prot_t *prot = dhd->prot;
- cdc_ioctl_t *msg = &prot->msg;
+ struct brcmf_proto *prot = drvr->prot;
+ struct brcmf_proto_cdc_ioctl *msg = &prot->msg;
void *info;
int ret = 0, retries = 0;
u32 id, flags = 0;
- DHD_TRACE(("%s: Enter\n", __func__));
- DHD_CTL(("%s: cmd %d len %d\n", __func__, cmd, len));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
+ BRCMF_CTL(("%s: cmd %d len %d\n", __func__, cmd, len));
/* Respond "bcmerror" and "bcmerrorstr" with local cache */
- if (cmd == WLC_GET_VAR && buf) {
+ if (cmd == BRCMF_C_GET_VAR && buf) {
if (!strcmp((char *)buf, "bcmerrorstr")) {
strncpy((char *)buf, "bcm_error",
BCME_STRLEN);
goto done;
} else if (!strcmp((char *)buf, "bcmerror")) {
- *(int *)buf = dhd->dongle_error;
+ *(int *)buf = drvr->dongle_error;
goto done;
}
}
- memset(msg, 0, sizeof(cdc_ioctl_t));
+ memset(msg, 0, sizeof(struct brcmf_proto_cdc_ioctl));
msg->cmd = cpu_to_le32(cmd);
msg->len = cpu_to_le32(len);
@@ -131,16 +172,16 @@ dhdcdc_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len)
if (buf)
memcpy(prot->buf, buf, len);
- ret = dhdcdc_msg(dhd);
+ ret = brcmf_proto_cdc_msg(drvr);
if (ret < 0) {
- DHD_ERROR(("dhdcdc_query_ioctl: dhdcdc_msg failed w/status "
- "%d\n", ret));
+ BRCMF_ERROR(("brcmf_proto_cdc_query_ioctl: brcmf_proto_cdc_msg "
+ "failed w/status %d\n", ret));
goto done;
}
retry:
/* wait for interrupt and get first fragment */
- ret = dhdcdc_cmplt(dhd, prot->reqid, len);
+ ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
if (ret < 0)
goto done;
@@ -150,8 +191,9 @@ retry:
if ((id < prot->reqid) && (++retries < RETRIES))
goto retry;
if (id != prot->reqid) {
- DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
- dhd_ifname(dhd, ifidx), __func__, id, prot->reqid));
+ BRCMF_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ brcmf_ifname(drvr, ifidx), __func__, id,
+ prot->reqid));
ret = -EINVAL;
goto done;
}
@@ -170,24 +212,25 @@ retry:
if (flags & CDCF_IOC_ERROR) {
ret = le32_to_cpu(msg->status);
/* Cache error from dongle */
- dhd->dongle_error = ret;
+ drvr->dongle_error = ret;
}
done:
return ret;
}
-int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len)
+int brcmf_proto_cdc_set_ioctl(struct brcmf_pub *drvr, int ifidx, uint cmd,
+ void *buf, uint len)
{
- dhd_prot_t *prot = dhd->prot;
- cdc_ioctl_t *msg = &prot->msg;
+ struct brcmf_proto *prot = drvr->prot;
+ struct brcmf_proto_cdc_ioctl *msg = &prot->msg;
int ret = 0;
u32 flags, id;
- DHD_TRACE(("%s: Enter\n", __func__));
- DHD_CTL(("%s: cmd %d len %d\n", __func__, cmd, len));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
+ BRCMF_CTL(("%s: cmd %d len %d\n", __func__, cmd, len));
- memset(msg, 0, sizeof(cdc_ioctl_t));
+ memset(msg, 0, sizeof(struct brcmf_proto_cdc_ioctl));
msg->cmd = cpu_to_le32(cmd);
msg->len = cpu_to_le32(len);
@@ -198,11 +241,11 @@ int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len)
if (buf)
memcpy(prot->buf, buf, len);
- ret = dhdcdc_msg(dhd);
+ ret = brcmf_proto_cdc_msg(drvr);
if (ret < 0)
goto done;
- ret = dhdcdc_cmplt(dhd, prot->reqid, len);
+ ret = brcmf_proto_cdc_cmplt(drvr, prot->reqid, len);
if (ret < 0)
goto done;
@@ -210,8 +253,9 @@ int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len)
id = (flags & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT;
if (id != prot->reqid) {
- DHD_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
- dhd_ifname(dhd, ifidx), __func__, id, prot->reqid));
+ BRCMF_ERROR(("%s: %s: unexpected request id %d (expected %d)\n",
+ brcmf_ifname(drvr, ifidx), __func__, id,
+ prot->reqid));
ret = -EINVAL;
goto done;
}
@@ -220,41 +264,40 @@ int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len)
if (flags & CDCF_IOC_ERROR) {
ret = le32_to_cpu(msg->status);
/* Cache error from dongle */
- dhd->dongle_error = ret;
+ drvr->dongle_error = ret;
}
done:
return ret;
}
-extern int dhd_bus_interface(struct dhd_bus *bus, uint arg, void *arg2);
int
-dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
+brcmf_proto_ioctl(struct brcmf_pub *drvr, int ifidx, struct brcmf_ioctl *ioc,
+ void *buf, int len)
{
- dhd_prot_t *prot = dhd->prot;
+ struct brcmf_proto *prot = drvr->prot;
int ret = -1;
- if (dhd->busstate == DHD_BUS_DOWN) {
- DHD_ERROR(("%s : bus is down. we have nothing to do\n",
- __func__));
+ if (drvr->busstate == BRCMF_BUS_DOWN) {
+ BRCMF_ERROR(("%s : bus is down. we have nothing to do\n",
+ __func__));
return ret;
}
- dhd_os_proto_block(dhd);
-
- DHD_TRACE(("%s: Enter\n", __func__));
+ brcmf_os_proto_block(drvr);
- ASSERT(len <= WLC_IOCTL_MAXLEN);
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- if (len > WLC_IOCTL_MAXLEN)
+ if (len > BRCMF_C_IOCTL_MAXLEN)
goto done;
if (prot->pending == true) {
- DHD_TRACE(("CDC packet is pending!!!! cmd=0x%x (%lu) "
- "lastcmd=0x%x (%lu)\n",
- ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
- (unsigned long)prot->lastcmd));
- if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR))
- DHD_TRACE(("iovar cmd=%s\n", (char *)buf));
+ BRCMF_TRACE(("CDC packet is pending!!!! cmd=0x%x (%lu) "
+ "lastcmd=0x%x (%lu)\n",
+ ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+ (unsigned long)prot->lastcmd));
+ if ((ioc->cmd == BRCMF_C_SET_VAR) ||
+ (ioc->cmd == BRCMF_C_GET_VAR))
+ BRCMF_TRACE(("iovar cmd=%s\n", (char *)buf));
goto done;
}
@@ -262,36 +305,39 @@ dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
prot->pending = true;
prot->lastcmd = ioc->cmd;
if (ioc->set)
- ret = dhdcdc_set_ioctl(dhd, ifidx, ioc->cmd, buf, len);
+ ret = brcmf_proto_cdc_set_ioctl(drvr, ifidx, ioc->cmd,
+ buf, len);
else {
- ret = dhdcdc_query_ioctl(dhd, ifidx, ioc->cmd, buf, len);
+ ret = brcmf_proto_cdc_query_ioctl(drvr, ifidx, ioc->cmd,
+ buf, len);
if (ret > 0)
- ioc->used = ret - sizeof(cdc_ioctl_t);
+ ioc->used = ret - sizeof(struct brcmf_proto_cdc_ioctl);
}
/* Too many programs assume ioctl() returns 0 on success */
if (ret >= 0)
ret = 0;
else {
- cdc_ioctl_t *msg = &prot->msg;
+ struct brcmf_proto_cdc_ioctl *msg = &prot->msg;
/* len == needed when set/query fails from dongle */
ioc->needed = le32_to_cpu(msg->len);
}
/* Intercept the wme_dp ioctl here */
- if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+ if (!ret && ioc->cmd == BRCMF_C_SET_VAR &&
+ !strcmp(buf, "wme_dp")) {
int slen, val = 0;
slen = strlen("wme_dp") + 1;
if (len >= (int)(slen + sizeof(int)))
memcpy(&val, (char *)buf + slen, sizeof(int));
- dhd->wme_dp = (u8) le32_to_cpu(val);
+ drvr->wme_dp = (u8) le32_to_cpu(val);
}
prot->pending = false;
done:
- dhd_os_proto_unblock(dhd);
+ brcmf_os_proto_unblock(drvr);
return ret;
}
@@ -302,35 +348,23 @@ done:
(((struct sk_buff *)(skb))->ip_summed = \
((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
-/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because
- skb->ip_summed is overloaded */
-
-int
-dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
- void *params, int plen, void *arg, int len, bool set)
-{
- return -ENOTSUPP;
-}
-
-void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+void brcmf_proto_dump(struct brcmf_pub *drvr, struct brcmu_strbuf *strbuf)
{
- bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
+ brcmu_bprintf(strbuf, "Protocol CDC: reqid %d\n", drvr->prot->reqid);
}
-void dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, struct sk_buff *pktbuf)
+void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
+ struct sk_buff *pktbuf)
{
-#ifdef BDC
- struct bdc_header *h;
-#endif /* BDC */
+ struct brcmf_proto_bdc_header *h;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
-#ifdef BDC
/* Push BDC header used to convey priority for buses that don't */
skb_push(pktbuf, BDC_HEADER_LEN);
- h = (struct bdc_header *)(pktbuf->data);
+ h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
h->flags = (BDC_PROTO_VER << BDC_FLAG_VER_SHIFT);
if (PKTSUMNEEDED(pktbuf))
@@ -339,79 +373,74 @@ void dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, struct sk_buff *pktbuf)
h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
h->flags2 = 0;
h->rssi = 0;
-#endif /* BDC */
BDC_SET_IF_IDX(h, ifidx);
}
-int dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, struct sk_buff *pktbuf)
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, int *ifidx,
+ struct sk_buff *pktbuf)
{
-#ifdef BDC
- struct bdc_header *h;
-#endif
+ struct brcmf_proto_bdc_header *h;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
-#ifdef BDC
/* Pop BDC header used to convey priority for buses that don't */
if (pktbuf->len < BDC_HEADER_LEN) {
- DHD_ERROR(("%s: rx data too short (%d < %d)\n", __func__,
- pktbuf->len, BDC_HEADER_LEN));
+ BRCMF_ERROR(("%s: rx data too short (%d < %d)\n", __func__,
+ pktbuf->len, BDC_HEADER_LEN));
return -EBADE;
}
- h = (struct bdc_header *)(pktbuf->data);
+ h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
*ifidx = BDC_GET_IF_IDX(h);
- if (*ifidx >= DHD_MAX_IFS) {
- DHD_ERROR(("%s: rx data ifnum out of range (%d)\n",
- __func__, *ifidx));
+ if (*ifidx >= BRCMF_MAX_IFS) {
+ BRCMF_ERROR(("%s: rx data ifnum out of range (%d)\n",
+ __func__, *ifidx));
return -EBADE;
}
if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
BDC_PROTO_VER) {
- DHD_ERROR(("%s: non-BDC packet received, flags 0x%x\n",
- dhd_ifname(dhd, *ifidx), h->flags));
+ BRCMF_ERROR(("%s: non-BDC packet received, flags 0x%x\n",
+ brcmf_ifname(drvr, *ifidx), h->flags));
return -EBADE;
}
if (h->flags & BDC_FLAG_SUM_GOOD) {
- DHD_INFO(("%s: BDC packet received with good rx-csum, "
- "flags 0x%x\n",
- dhd_ifname(dhd, *ifidx), h->flags));
+ BRCMF_INFO(("%s: BDC packet received with good rx-csum, "
+ "flags 0x%x\n",
+ brcmf_ifname(drvr, *ifidx), h->flags));
PKTSETSUMGOOD(pktbuf, true);
}
pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
skb_pull(pktbuf, BDC_HEADER_LEN);
-#endif /* BDC */
return 0;
}
-int dhd_prot_attach(dhd_pub_t *dhd)
+int brcmf_proto_attach(struct brcmf_pub *drvr)
{
- dhd_prot_t *cdc;
+ struct brcmf_proto *cdc;
- cdc = kzalloc(sizeof(dhd_prot_t), GFP_ATOMIC);
+ cdc = kzalloc(sizeof(struct brcmf_proto), GFP_ATOMIC);
if (!cdc) {
- DHD_ERROR(("%s: kmalloc failed\n", __func__));
+ BRCMF_ERROR(("%s: kmalloc failed\n", __func__));
goto fail;
}
/* ensure that the msg buf directly follows the cdc msg struct */
if ((unsigned long)(&cdc->msg + 1) != (unsigned long)cdc->buf) {
- DHD_ERROR(("dhd_prot_t is not correctly defined\n"));
+ BRCMF_ERROR(("struct brcmf_proto is not correctly defined\n"));
goto fail;
}
- dhd->prot = cdc;
-#ifdef BDC
- dhd->hdrlen += BDC_HEADER_LEN;
-#endif
- dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
+ drvr->prot = cdc;
+ drvr->hdrlen += BDC_HEADER_LEN;
+ drvr->maxctl = BRCMF_C_IOCTL_MAXLEN +
+ sizeof(struct brcmf_proto_cdc_ioctl) + ROUND_UP_MARGIN;
return 0;
fail:
@@ -420,55 +449,54 @@ fail:
}
/* ~NOTE~ What if another thread is waiting on the semaphore? Holding it? */
-void dhd_prot_detach(dhd_pub_t *dhd)
+void brcmf_proto_detach(struct brcmf_pub *drvr)
{
- kfree(dhd->prot);
- dhd->prot = NULL;
+ kfree(drvr->prot);
+ drvr->prot = NULL;
}
-void dhd_prot_dstats(dhd_pub_t *dhd)
+void brcmf_proto_dstats(struct brcmf_pub *drvr)
{
/* No stats from dongle added yet, copy bus stats */
- dhd->dstats.tx_packets = dhd->tx_packets;
- dhd->dstats.tx_errors = dhd->tx_errors;
- dhd->dstats.rx_packets = dhd->rx_packets;
- dhd->dstats.rx_errors = dhd->rx_errors;
- dhd->dstats.rx_dropped = dhd->rx_dropped;
- dhd->dstats.multicast = dhd->rx_multicast;
+ drvr->dstats.tx_packets = drvr->tx_packets;
+ drvr->dstats.tx_errors = drvr->tx_errors;
+ drvr->dstats.rx_packets = drvr->rx_packets;
+ drvr->dstats.rx_errors = drvr->rx_errors;
+ drvr->dstats.rx_dropped = drvr->rx_dropped;
+ drvr->dstats.multicast = drvr->rx_multicast;
return;
}
-int dhd_prot_init(dhd_pub_t *dhd)
+int brcmf_proto_init(struct brcmf_pub *drvr)
{
int ret = 0;
char buf[128];
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- dhd_os_proto_block(dhd);
+ brcmf_os_proto_block(drvr);
/* Get the device MAC address */
strcpy(buf, "cur_etheraddr");
- ret = dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, sizeof(buf));
+ ret = brcmf_proto_cdc_query_ioctl(drvr, 0, BRCMF_C_GET_VAR,
+ buf, sizeof(buf));
if (ret < 0) {
- dhd_os_proto_unblock(dhd);
+ brcmf_os_proto_unblock(drvr);
return ret;
}
- memcpy(dhd->mac, buf, ETH_ALEN);
+ memcpy(drvr->mac, buf, ETH_ALEN);
- dhd_os_proto_unblock(dhd);
+ brcmf_os_proto_unblock(drvr);
-#ifdef EMBEDDED_PLATFORM
- ret = dhd_preinit_ioctls(dhd);
-#endif /* EMBEDDED_PLATFORM */
+ ret = brcmf_c_preinit_ioctls(drvr);
/* Always assumes wl for now */
- dhd->iswl = true;
+ drvr->iswl = true;
return ret;
}
-void dhd_prot_stop(dhd_pub_t *dhd)
+void brcmf_proto_stop(struct brcmf_pub *drvr)
{
/* Nothing to do for CDC */
}
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_common.c b/drivers/staging/brcm80211/brcmfmac/dhd_common.c
index 0bfb93c0075..fdec4683c42 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_common.c
@@ -15,64 +15,41 @@
*/
#include <linux/kernel.h>
#include <linux/string.h>
-#include <bcmdefs.h>
+#include <linux/sched.h>
#include <linux/netdevice.h>
-#include <bcmutils.h>
-#include <dngl_stats.h>
-#include <dhd.h>
-#include <dhd_bus.h>
-#include <dhd_proto.h>
-#include <dhd_dbg.h>
-#include <msgtrace.h>
-#include <wlioctl.h>
-
-int dhd_msg_level;
-char fw_path[MOD_PARAM_PATHLEN];
-char nv_path[MOD_PARAM_PATHLEN];
-
-/* Last connection success/failure status */
-u32 dhd_conn_event;
-u32 dhd_conn_status;
-u32 dhd_conn_reason;
-
-extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf,
- uint len);
-extern void dhd_ind_scan_confirm(void *h, bool status);
-extern int dhd_wl_ioctl(dhd_pub_t *dhd, uint cmd, char *buf, uint buflen);
-void dhd_iscan_lock(void);
-void dhd_iscan_unlock(void);
-
-/* Packet alignment for most efficient SDIO (can change based on platform) */
-#ifndef DHD_SDALIGN
-#define DHD_SDALIGN 32
-#endif
-#if !ISPOWEROF2(DHD_SDALIGN)
-#error DHD_SDALIGN is not a power of 2!
-#endif
-
-#define EPI_VERSION_STR "4.218.248.5"
-#ifdef DHD_DEBUG
-const char dhd_version[] =
-"Dongle Host Driver, version " EPI_VERSION_STR "\nCompiled on " __DATE__
+#include <asm/unaligned.h>
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_proto.h"
+#include "dhd_dbg.h"
+
+#define BRCM_OUI "\x00\x10\x18"
+#define DOT11_OUI_LEN 3
+#define BCMILCP_BCM_SUBTYPE_EVENT 1
+#define PKTFILTER_BUF_SIZE 2048
+
+int brcmf_msg_level;
+
+#define MSGTRACE_VERSION 1
+
+#ifdef BCMDBG
+const char brcmf_version[] =
+"Dongle Host Driver, version " BRCMF_VERSION_STR "\nCompiled on " __DATE__
" at " __TIME__;
#else
-const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
+const char brcmf_version[] = "Dongle Host Driver, version " BRCMF_VERSION_STR;
#endif
-void dhd_set_timer(void *bus, uint wdtick);
-
/* IOVar table */
enum {
IOV_VERSION = 1,
IOV_MSGLEVEL,
IOV_BCMERRORSTR,
IOV_BCMERROR,
- IOV_WDTICK,
IOV_DUMP,
-#ifdef DHD_DEBUG
- IOV_CONS,
- IOV_DCONSOLE_POLL,
-#endif
IOV_CLEARCOUNTS,
IOV_LOGDUMP,
IOV_LOGCAL,
@@ -82,27 +59,19 @@ enum {
IOV_LAST
};
-const bcm_iovar_t dhd_iovars[] = {
- {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version)}
+const struct brcmu_iovar brcmf_iovars[] = {
+ {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(brcmf_version)}
,
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
{"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0}
,
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
{"bcmerrorstr", IOV_BCMERRORSTR, 0, IOVT_BUFFER, BCME_STRLEN}
,
{"bcmerror", IOV_BCMERROR, 0, IOVT_INT8, 0}
,
- {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0}
- ,
- {"dump", IOV_DUMP, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN}
- ,
-#ifdef DHD_DEBUG
- {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0}
+ {"dump", IOV_DUMP, 0, IOVT_BUFFER, BRCMF_IOCTL_MAXLEN}
,
- {"cons", IOV_CONS, 0, IOVT_BUFFER, 0}
- ,
-#endif
{"clearcounts", IOV_CLEARCOUNTS, 0, IOVT_VOID, 0}
,
{"gpioob", IOV_GPIOOB, 0, IOVT_UINT32, 0}
@@ -112,7 +81,23 @@ const bcm_iovar_t dhd_iovars[] = {
{NULL, 0, 0, 0, 0}
};
-void dhd_common_init(void)
+/* Message trace header */
+struct msgtrace_hdr {
+ u8 version;
+ u8 spare;
+ u16 len; /* Len of the trace */
+ u32 seqnum; /* Sequence number of message. Useful
+ * if the messsage has been lost
+ * because of DMA error or a bus reset
+ * (ex: SDIO Func2)
+ */
+ u32 discarded_bytes; /* Number of discarded bytes because of
+ trace overflow */
+ u32 discarded_printf; /* Number of discarded printf
+ because of trace overflow */
+} __packed;
+
+void brcmf_c_init(void)
{
/* Init global variables at run-time, not as part of the declaration.
* This is required to support init/de-init of the driver.
@@ -122,87 +107,77 @@ void dhd_common_init(void)
* first time that the driver is initialized vs subsequent
* initializations.
*/
- dhd_msg_level = DHD_ERROR_VAL;
-#ifdef CONFIG_BCM4329_FW_PATH
- strncpy(fw_path, CONFIG_BCM4329_FW_PATH, MOD_PARAM_PATHLEN - 1);
-#else
- fw_path[0] = '\0';
-#endif
-#ifdef CONFIG_BCM4329_NVRAM_PATH
- strncpy(nv_path, CONFIG_BCM4329_NVRAM_PATH, MOD_PARAM_PATHLEN - 1);
-#else
- nv_path[0] = '\0';
-#endif
+ brcmf_msg_level = BRCMF_ERROR_VAL;
}
-static int dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
+static int brcmf_c_dump(struct brcmf_pub *drvr, char *buf, int buflen)
{
- struct bcmstrbuf b;
- struct bcmstrbuf *strbuf = &b;
-
- bcm_binit(strbuf, buf, buflen);
-
- /* Base DHD info */
- bcm_bprintf(strbuf, "%s\n", dhd_version);
- bcm_bprintf(strbuf, "\n");
- bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
- dhdp->up, dhdp->txoff, dhdp->busstate);
- bcm_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n",
- dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
- bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %pM\n",
- dhdp->iswl, dhdp->drv_version, &dhdp->mac);
- bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", dhdp->bcmerror,
- dhdp->tickcnt);
-
- bcm_bprintf(strbuf, "dongle stats:\n");
- bcm_bprintf(strbuf,
+ struct brcmu_strbuf b;
+ struct brcmu_strbuf *strbuf = &b;
+
+ brcmu_binit(strbuf, buf, buflen);
+
+ /* Base info */
+ brcmu_bprintf(strbuf, "%s\n", brcmf_version);
+ brcmu_bprintf(strbuf, "\n");
+ brcmu_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
+ drvr->up, drvr->txoff, drvr->busstate);
+ brcmu_bprintf(strbuf, "pub.hdrlen %d pub.maxctl %d pub.rxsz %d\n",
+ drvr->hdrlen, drvr->maxctl, drvr->rxsz);
+ brcmu_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %pM\n",
+ drvr->iswl, drvr->drv_version, &drvr->mac);
+ brcmu_bprintf(strbuf, "pub.bcmerror %d tickcnt %d\n", drvr->bcmerror,
+ drvr->tickcnt);
+
+ brcmu_bprintf(strbuf, "dongle stats:\n");
+ brcmu_bprintf(strbuf,
"tx_packets %ld tx_bytes %ld tx_errors %ld tx_dropped %ld\n",
- dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
- dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
- bcm_bprintf(strbuf,
+ drvr->dstats.tx_packets, drvr->dstats.tx_bytes,
+ drvr->dstats.tx_errors, drvr->dstats.tx_dropped);
+ brcmu_bprintf(strbuf,
"rx_packets %ld rx_bytes %ld rx_errors %ld rx_dropped %ld\n",
- dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
- dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
- bcm_bprintf(strbuf, "multicast %ld\n", dhdp->dstats.multicast);
-
- bcm_bprintf(strbuf, "bus stats:\n");
- bcm_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n",
- dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors);
- bcm_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n",
- dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
- bcm_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld\n",
- dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
- bcm_bprintf(strbuf,
+ drvr->dstats.rx_packets, drvr->dstats.rx_bytes,
+ drvr->dstats.rx_errors, drvr->dstats.rx_dropped);
+ brcmu_bprintf(strbuf, "multicast %ld\n", drvr->dstats.multicast);
+
+ brcmu_bprintf(strbuf, "bus stats:\n");
+ brcmu_bprintf(strbuf, "tx_packets %ld tx_multicast %ld tx_errors %ld\n",
+ drvr->tx_packets, drvr->tx_multicast, drvr->tx_errors);
+ brcmu_bprintf(strbuf, "tx_ctlpkts %ld tx_ctlerrs %ld\n",
+ drvr->tx_ctlpkts, drvr->tx_ctlerrs);
+ brcmu_bprintf(strbuf, "rx_packets %ld rx_multicast %ld rx_errors %ld\n",
+ drvr->rx_packets, drvr->rx_multicast, drvr->rx_errors);
+ brcmu_bprintf(strbuf,
"rx_ctlpkts %ld rx_ctlerrs %ld rx_dropped %ld rx_flushed %ld\n",
- dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped,
- dhdp->rx_flushed);
- bcm_bprintf(strbuf,
+ drvr->rx_ctlpkts, drvr->rx_ctlerrs, drvr->rx_dropped,
+ drvr->rx_flushed);
+ brcmu_bprintf(strbuf,
"rx_readahead_cnt %ld tx_realloc %ld fc_packets %ld\n",
- dhdp->rx_readahead_cnt, dhdp->tx_realloc, dhdp->fc_packets);
- bcm_bprintf(strbuf, "wd_dpc_sched %ld\n", dhdp->wd_dpc_sched);
- bcm_bprintf(strbuf, "\n");
+ drvr->rx_readahead_cnt, drvr->tx_realloc, drvr->fc_packets);
+ brcmu_bprintf(strbuf, "wd_dpc_sched %ld\n", drvr->wd_dpc_sched);
+ brcmu_bprintf(strbuf, "\n");
/* Add any prot info */
- dhd_prot_dump(dhdp, strbuf);
- bcm_bprintf(strbuf, "\n");
+ brcmf_proto_dump(drvr, strbuf);
+ brcmu_bprintf(strbuf, "\n");
/* Add any bus info */
- dhd_bus_dump(dhdp, strbuf);
+ brcmf_sdbrcm_bus_dump(drvr, strbuf);
return !strbuf->size ? -EOVERFLOW : 0;
}
static int
-dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, u32 actionid,
- const char *name, void *params, int plen, void *arg, int len,
- int val_size)
+brcmf_c_doiovar(struct brcmf_pub *drvr, const struct brcmu_iovar *vi,
+ u32 actionid, const char *name, void *params, int plen,
+ void *arg, int len, int val_size)
{
int bcmerror = 0;
s32 int_val = 0;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid));
+ bcmerror = brcmu_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid));
if (bcmerror != 0)
goto exit;
@@ -212,16 +187,16 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, u32 actionid,
switch (actionid) {
case IOV_GVAL(IOV_VERSION):
/* Need to have checked buffer length */
- strncpy((char *)arg, dhd_version, len);
+ strncpy((char *)arg, brcmf_version, len);
break;
case IOV_GVAL(IOV_MSGLEVEL):
- int_val = (s32) dhd_msg_level;
+ int_val = (s32) brcmf_msg_level;
memcpy(arg, &int_val, val_size);
break;
case IOV_SVAL(IOV_MSGLEVEL):
- dhd_msg_level = int_val;
+ brcmf_msg_level = int_val;
break;
case IOV_GVAL(IOV_BCMERRORSTR):
@@ -231,58 +206,29 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, u32 actionid,
break;
case IOV_GVAL(IOV_BCMERROR):
- int_val = (s32) dhd_pub->bcmerror;
+ int_val = (s32) drvr->bcmerror;
memcpy(arg, &int_val, val_size);
break;
- case IOV_GVAL(IOV_WDTICK):
- int_val = (s32) dhd_watchdog_ms;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_WDTICK):
- if (!dhd_pub->up) {
- bcmerror = -ENOLINK;
- break;
- }
- dhd_os_wd_timer(dhd_pub, (uint) int_val);
- break;
-
case IOV_GVAL(IOV_DUMP):
- bcmerror = dhd_dump(dhd_pub, arg, len);
- break;
-
-#ifdef DHD_DEBUG
- case IOV_GVAL(IOV_DCONSOLE_POLL):
- int_val = (s32) dhd_console_ms;
- memcpy(arg, &int_val, val_size);
- break;
-
- case IOV_SVAL(IOV_DCONSOLE_POLL):
- dhd_console_ms = (uint) int_val;
- break;
-
- case IOV_SVAL(IOV_CONS):
- if (len > 0)
- bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
+ bcmerror = brcmf_c_dump(drvr, arg, len);
break;
-#endif
case IOV_SVAL(IOV_CLEARCOUNTS):
- dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
- dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
- dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
- dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
- dhd_pub->rx_dropped = 0;
- dhd_pub->rx_readahead_cnt = 0;
- dhd_pub->tx_realloc = 0;
- dhd_pub->wd_dpc_sched = 0;
- memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
- dhd_bus_clearcounts(dhd_pub);
+ drvr->tx_packets = drvr->rx_packets = 0;
+ drvr->tx_errors = drvr->rx_errors = 0;
+ drvr->tx_ctlpkts = drvr->rx_ctlpkts = 0;
+ drvr->tx_ctlerrs = drvr->rx_ctlerrs = 0;
+ drvr->rx_dropped = 0;
+ drvr->rx_readahead_cnt = 0;
+ drvr->tx_realloc = 0;
+ drvr->wd_dpc_sched = 0;
+ memset(&drvr->dstats, 0, sizeof(drvr->dstats));
+ brcmf_bus_clearcounts(drvr);
break;
case IOV_GVAL(IOV_IOCTLTIMEOUT):{
- int_val = (s32) dhd_os_get_ioctl_resp_timeout();
+ int_val = (s32) brcmf_os_get_ioctl_resp_timeout();
memcpy(arg, &int_val, sizeof(int_val));
break;
}
@@ -291,7 +237,7 @@ dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, u32 actionid,
if (int_val <= 0)
bcmerror = -EINVAL;
else
- dhd_os_set_ioctl_resp_timeout((unsigned int)
+ brcmf_os_set_ioctl_resp_timeout((unsigned int)
int_val);
break;
}
@@ -305,8 +251,8 @@ exit:
return bcmerror;
}
-bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, struct sk_buff *pkt,
- int prec)
+bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
+ struct sk_buff *pkt, int prec)
{
struct sk_buff *p;
int eprec = -1; /* precedence to evict from */
@@ -316,7 +262,7 @@ bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, struct sk_buff *pkt,
* exceeding total queue length
*/
if (!pktq_pfull(q, prec) && !pktq_full(q)) {
- bcm_pktq_penq(q, prec, pkt);
+ brcmu_pktq_penq(q, prec, pkt);
return true;
}
@@ -324,8 +270,7 @@ bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, struct sk_buff *pkt,
if (pktq_pfull(q, prec))
eprec = prec;
else if (pktq_full(q)) {
- p = bcm_pktq_peek_tail(q, &eprec);
- ASSERT(p);
+ p = brcmu_pktq_peek_tail(q, &eprec);
if (eprec > prec)
return false;
}
@@ -333,60 +278,58 @@ bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, struct sk_buff *pkt,
/* Evict if needed */
if (eprec >= 0) {
/* Detect queueing to unconfigured precedence */
- ASSERT(!pktq_pempty(q, eprec));
- discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
+ discard_oldest = AC_BITMAP_TST(drvr->wme_dp, eprec);
if (eprec == prec && !discard_oldest)
return false; /* refuse newer (incoming) packet */
/* Evict packet according to discard policy */
- p = discard_oldest ? bcm_pktq_pdeq(q, eprec) :
- bcm_pktq_pdeq_tail(q, eprec);
+ p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
+ brcmu_pktq_pdeq_tail(q, eprec);
if (p == NULL) {
- DHD_ERROR(("%s: bcm_pktq_penq() failed, oldest %d.",
- __func__, discard_oldest));
- ASSERT(p);
+ BRCMF_ERROR(("%s: brcmu_pktq_penq() failed, oldest %d.",
+ __func__, discard_oldest));
}
-
- bcm_pkt_buf_free_skb(p);
+ brcmu_pkt_buf_free_skb(p);
}
/* Enqueue */
- p = bcm_pktq_penq(q, prec, pkt);
+ p = brcmu_pktq_penq(q, prec, pkt);
if (p == NULL) {
- DHD_ERROR(("%s: bcm_pktq_penq() failed.", __func__));
- ASSERT(p);
+ BRCMF_ERROR(("%s: brcmu_pktq_penq() failed.", __func__));
}
- return true;
+ return p != NULL;
}
static int
-dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
+brcmf_c_iovar_op(struct brcmf_pub *drvr, const char *name,
void *params, int plen, void *arg, int len, bool set)
{
int bcmerror = 0;
int val_size;
- const bcm_iovar_t *vi = NULL;
+ const struct brcmu_iovar *vi = NULL;
u32 actionid;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- ASSERT(name);
- ASSERT(len >= 0);
+ if (name == NULL || len <= 0)
+ return -EINVAL;
- /* Get MUST have return space */
- ASSERT(set || (arg && len));
+ /* Set does not take qualifiers */
+ if (set && (params || plen))
+ return -EINVAL;
- /* Set does NOT take qualifiers */
- ASSERT(!set || (!params && !plen));
+ /* Get must have return space;*/
+ if (!set && !(arg && len))
+ return -EINVAL;
- vi = bcm_iovar_lookup(dhd_iovars, name);
+ vi = brcmu_iovar_lookup(brcmf_iovars, name);
if (vi == NULL) {
bcmerror = -ENOTSUPP;
goto exit;
}
- DHD_CTL(("%s: %s %s, len %d plen %d\n", __func__,
- name, (set ? "set" : "get"), len, plen));
+ BRCMF_CTL(("%s: %s %s, len %d plen %d\n", __func__,
+ name, (set ? "set" : "get"), len, plen));
/* set up 'params' pointer in case this is a set command so that
* the convenience int and bool code can be common to set and get
@@ -406,39 +349,40 @@ dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
bcmerror =
- dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len,
+ brcmf_c_doiovar(drvr, vi, actionid, name, params, plen, arg, len,
val_size);
exit:
return bcmerror;
}
-int dhd_ioctl(dhd_pub_t *dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
+int brcmf_c_ioctl(struct brcmf_pub *drvr, struct brcmf_c_ioctl *ioc, void *buf,
+ uint buflen)
{
int bcmerror = 0;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
if (!buf)
return -EINVAL;
switch (ioc->cmd) {
- case DHD_GET_MAGIC:
+ case BRCMF_GET_MAGIC:
if (buflen < sizeof(int))
bcmerror = -EOVERFLOW;
else
- *(int *)buf = DHD_IOCTL_MAGIC;
+ *(int *)buf = BRCMF_IOCTL_MAGIC;
break;
- case DHD_GET_VERSION:
+ case BRCMF_GET_VERSION:
if (buflen < sizeof(int))
bcmerror = -EOVERFLOW;
else
- *(int *)buf = DHD_IOCTL_VERSION;
+ *(int *)buf = BRCMF_IOCTL_VERSION;
break;
- case DHD_GET_VAR:
- case DHD_SET_VAR:{
+ case BRCMF_GET_VAR:
+ case BRCMF_SET_VAR:{
char *arg;
uint arglen;
@@ -456,38 +400,25 @@ int dhd_ioctl(dhd_pub_t *dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
arg++, arglen--;
/* call with the appropriate arguments */
- if (ioc->cmd == DHD_GET_VAR)
- bcmerror =
- dhd_iovar_op(dhd_pub, buf, arg, arglen, buf,
- buflen, IOV_GET);
+ if (ioc->cmd == BRCMF_GET_VAR)
+ bcmerror = brcmf_c_iovar_op(drvr, buf, arg,
+ arglen, buf, buflen, IOV_GET);
else
bcmerror =
- dhd_iovar_op(dhd_pub, buf, NULL, 0, arg,
- arglen, IOV_SET);
- if (bcmerror != -ENOTSUPP)
- break;
-
- /* not in generic table, try protocol module */
- if (ioc->cmd == DHD_GET_VAR)
- bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
- arglen, buf,
- buflen, IOV_GET);
- else
- bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
- NULL, 0, arg,
- arglen, IOV_SET);
+ brcmf_c_iovar_op(drvr, buf, NULL, 0, arg,
+ arglen, IOV_SET);
if (bcmerror != -ENOTSUPP)
break;
/* if still not found, try bus module */
- if (ioc->cmd == DHD_GET_VAR)
- bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
- arg, arglen, buf,
- buflen, IOV_GET);
+ if (ioc->cmd == BRCMF_GET_VAR)
+ bcmerror = brcmf_sdbrcm_bus_iovar_op(drvr,
+ buf, arg, arglen, buf, buflen,
+ IOV_GET);
else
- bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
- NULL, 0, arg,
- arglen, IOV_SET);
+ bcmerror = brcmf_sdbrcm_bus_iovar_op(drvr,
+ buf, NULL, 0, arg, arglen,
+ IOV_SET);
break;
}
@@ -500,7 +431,8 @@ int dhd_ioctl(dhd_pub_t *dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
}
#ifdef SHOW_EVENTS
-static void wl_show_host_event(wl_event_msg_t *event, void *event_data)
+static void
+brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
{
uint i, status, reason;
bool group = false, flush_txq = false, link = false;
@@ -512,62 +444,62 @@ static void wl_show_host_event(wl_event_msg_t *event, void *event_data)
char *event_name;
} event_names[] = {
{
- WLC_E_SET_SSID, "SET_SSID"}, {
- WLC_E_JOIN, "JOIN"}, {
- WLC_E_START, "START"}, {
- WLC_E_AUTH, "AUTH"}, {
- WLC_E_AUTH_IND, "AUTH_IND"}, {
- WLC_E_DEAUTH, "DEAUTH"}, {
- WLC_E_DEAUTH_IND, "DEAUTH_IND"}, {
- WLC_E_ASSOC, "ASSOC"}, {
- WLC_E_ASSOC_IND, "ASSOC_IND"}, {
- WLC_E_REASSOC, "REASSOC"}, {
- WLC_E_REASSOC_IND, "REASSOC_IND"}, {
- WLC_E_DISASSOC, "DISASSOC"}, {
- WLC_E_DISASSOC_IND, "DISASSOC_IND"}, {
- WLC_E_QUIET_START, "START_QUIET"}, {
- WLC_E_QUIET_END, "END_QUIET"}, {
- WLC_E_BEACON_RX, "BEACON_RX"}, {
- WLC_E_LINK, "LINK"}, {
- WLC_E_MIC_ERROR, "MIC_ERROR"}, {
- WLC_E_NDIS_LINK, "NDIS_LINK"}, {
- WLC_E_ROAM, "ROAM"}, {
- WLC_E_TXFAIL, "TXFAIL"}, {
- WLC_E_PMKID_CACHE, "PMKID_CACHE"}, {
- WLC_E_RETROGRADE_TSF, "RETROGRADE_TSF"}, {
- WLC_E_PRUNE, "PRUNE"}, {
- WLC_E_AUTOAUTH, "AUTOAUTH"}, {
- WLC_E_EAPOL_MSG, "EAPOL_MSG"}, {
- WLC_E_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
- WLC_E_ADDTS_IND, "ADDTS_IND"}, {
- WLC_E_DELTS_IND, "DELTS_IND"}, {
- WLC_E_BCNSENT_IND, "BCNSENT_IND"}, {
- WLC_E_BCNRX_MSG, "BCNRX_MSG"}, {
- WLC_E_BCNLOST_MSG, "BCNLOST_MSG"}, {
- WLC_E_ROAM_PREP, "ROAM_PREP"}, {
- WLC_E_PFN_NET_FOUND, "PNO_NET_FOUND"}, {
- WLC_E_PFN_NET_LOST, "PNO_NET_LOST"}, {
- WLC_E_RESET_COMPLETE, "RESET_COMPLETE"}, {
- WLC_E_JOIN_START, "JOIN_START"}, {
- WLC_E_ROAM_START, "ROAM_START"}, {
- WLC_E_ASSOC_START, "ASSOC_START"}, {
- WLC_E_IBSS_ASSOC, "IBSS_ASSOC"}, {
- WLC_E_RADIO, "RADIO"}, {
- WLC_E_PSM_WATCHDOG, "PSM_WATCHDOG"}, {
- WLC_E_PROBREQ_MSG, "PROBREQ_MSG"}, {
- WLC_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"}, {
- WLC_E_PSK_SUP, "PSK_SUP"}, {
- WLC_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"}, {
- WLC_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"}, {
- WLC_E_ICV_ERROR, "ICV_ERROR"}, {
- WLC_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"}, {
- WLC_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"}, {
- WLC_E_TRACE, "TRACE"}, {
- WLC_E_ACTION_FRAME, "ACTION FRAME"}, {
- WLC_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
- WLC_E_IF, "IF"}, {
- WLC_E_RSSI, "RSSI"}, {
- WLC_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}
+ BRCMF_E_SET_SSID, "SET_SSID"}, {
+ BRCMF_E_JOIN, "JOIN"}, {
+ BRCMF_E_START, "START"}, {
+ BRCMF_E_AUTH, "AUTH"}, {
+ BRCMF_E_AUTH_IND, "AUTH_IND"}, {
+ BRCMF_E_DEAUTH, "DEAUTH"}, {
+ BRCMF_E_DEAUTH_IND, "DEAUTH_IND"}, {
+ BRCMF_E_ASSOC, "ASSOC"}, {
+ BRCMF_E_ASSOC_IND, "ASSOC_IND"}, {
+ BRCMF_E_REASSOC, "REASSOC"}, {
+ BRCMF_E_REASSOC_IND, "REASSOC_IND"}, {
+ BRCMF_E_DISASSOC, "DISASSOC"}, {
+ BRCMF_E_DISASSOC_IND, "DISASSOC_IND"}, {
+ BRCMF_E_QUIET_START, "START_QUIET"}, {
+ BRCMF_E_QUIET_END, "END_QUIET"}, {
+ BRCMF_E_BEACON_RX, "BEACON_RX"}, {
+ BRCMF_E_LINK, "LINK"}, {
+ BRCMF_E_MIC_ERROR, "MIC_ERROR"}, {
+ BRCMF_E_NDIS_LINK, "NDIS_LINK"}, {
+ BRCMF_E_ROAM, "ROAM"}, {
+ BRCMF_E_TXFAIL, "TXFAIL"}, {
+ BRCMF_E_PMKID_CACHE, "PMKID_CACHE"}, {
+ BRCMF_E_RETROGRADE_TSF, "RETROGRADE_TSF"}, {
+ BRCMF_E_PRUNE, "PRUNE"}, {
+ BRCMF_E_AUTOAUTH, "AUTOAUTH"}, {
+ BRCMF_E_EAPOL_MSG, "EAPOL_MSG"}, {
+ BRCMF_E_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
+ BRCMF_E_ADDTS_IND, "ADDTS_IND"}, {
+ BRCMF_E_DELTS_IND, "DELTS_IND"}, {
+ BRCMF_E_BCNSENT_IND, "BCNSENT_IND"}, {
+ BRCMF_E_BCNRX_MSG, "BCNRX_MSG"}, {
+ BRCMF_E_BCNLOST_MSG, "BCNLOST_MSG"}, {
+ BRCMF_E_ROAM_PREP, "ROAM_PREP"}, {
+ BRCMF_E_PFN_NET_FOUND, "PNO_NET_FOUND"}, {
+ BRCMF_E_PFN_NET_LOST, "PNO_NET_LOST"}, {
+ BRCMF_E_RESET_COMPLETE, "RESET_COMPLETE"}, {
+ BRCMF_E_JOIN_START, "JOIN_START"}, {
+ BRCMF_E_ROAM_START, "ROAM_START"}, {
+ BRCMF_E_ASSOC_START, "ASSOC_START"}, {
+ BRCMF_E_IBSS_ASSOC, "IBSS_ASSOC"}, {
+ BRCMF_E_RADIO, "RADIO"}, {
+ BRCMF_E_PSM_WATCHDOG, "PSM_WATCHDOG"}, {
+ BRCMF_E_PROBREQ_MSG, "PROBREQ_MSG"}, {
+ BRCMF_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"}, {
+ BRCMF_E_PSK_SUP, "PSK_SUP"}, {
+ BRCMF_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"}, {
+ BRCMF_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"}, {
+ BRCMF_E_ICV_ERROR, "ICV_ERROR"}, {
+ BRCMF_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"}, {
+ BRCMF_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"}, {
+ BRCMF_E_TRACE, "TRACE"}, {
+ BRCMF_E_ACTION_FRAME, "ACTION FRAME"}, {
+ BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
+ BRCMF_E_IF, "IF"}, {
+ BRCMF_E_RSSI, "RSSI"}, {
+ BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}
};
uint event_type, flags, auth_type, datalen;
event_type = be32_to_cpu(event->event_type);
@@ -585,54 +517,55 @@ static void wl_show_host_event(wl_event_msg_t *event, void *event_data)
event_name = event_names[i].event_name;
}
- DHD_EVENT(("EVENT: %s, event ID = %d\n", event_name, event_type));
- DHD_EVENT(("flags 0x%04x, status %d, reason %d, auth_type %d MAC %s\n",
- flags, status, reason, auth_type, eabuf));
+ BRCMF_EVENT(("EVENT: %s, event ID = %d\n", event_name, event_type));
+ BRCMF_EVENT(("flags 0x%04x, status %d, reason %d, auth_type %d"
+ " MAC %s\n", flags, status, reason, auth_type, eabuf));
- if (flags & WLC_EVENT_MSG_LINK)
+ if (flags & BRCMF_EVENT_MSG_LINK)
link = true;
- if (flags & WLC_EVENT_MSG_GROUP)
+ if (flags & BRCMF_EVENT_MSG_GROUP)
group = true;
- if (flags & WLC_EVENT_MSG_FLUSHTXQ)
+ if (flags & BRCMF_EVENT_MSG_FLUSHTXQ)
flush_txq = true;
switch (event_type) {
- case WLC_E_START:
- case WLC_E_DEAUTH:
- case WLC_E_DISASSOC:
- DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ case BRCMF_E_START:
+ case BRCMF_E_DEAUTH:
+ case BRCMF_E_DISASSOC:
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
break;
- case WLC_E_ASSOC_IND:
- case WLC_E_REASSOC_IND:
- DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ case BRCMF_E_ASSOC_IND:
+ case BRCMF_E_REASSOC_IND:
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
break;
- case WLC_E_ASSOC:
- case WLC_E_REASSOC:
- if (status == WLC_E_STATUS_SUCCESS) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n",
- event_name, eabuf));
- } else if (status == WLC_E_STATUS_TIMEOUT) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n",
- event_name, eabuf));
- } else if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
- event_name, eabuf, (int)reason));
+ case BRCMF_E_ASSOC:
+ case BRCMF_E_REASSOC:
+ if (status == BRCMF_E_STATUS_SUCCESS) {
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n",
+ event_name, eabuf));
+ } else if (status == BRCMF_E_STATUS_TIMEOUT) {
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n",
+ event_name, eabuf));
+ } else if (status == BRCMF_E_STATUS_FAIL) {
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s, FAILURE,"
+ " reason %d\n", event_name, eabuf,
+ (int)reason));
} else {
- DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status "
- "%d\n", event_name, eabuf, (int)status));
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s, unexpected status "
+ "%d\n", event_name, eabuf, (int)status));
}
break;
- case WLC_E_DEAUTH_IND:
- case WLC_E_DISASSOC_IND:
- DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name,
- eabuf, (int)reason));
+ case BRCMF_E_DEAUTH_IND:
+ case BRCMF_E_DISASSOC_IND:
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name,
+ eabuf, (int)reason));
break;
- case WLC_E_AUTH:
- case WLC_E_AUTH_IND:
+ case BRCMF_E_AUTH:
+ case BRCMF_E_AUTH_IND:
if (auth_type == WLAN_AUTH_OPEN)
auth_str = "Open System";
else if (auth_type == WLAN_AUTH_SHARED_KEY)
@@ -641,102 +574,102 @@ static void wl_show_host_event(wl_event_msg_t *event, void *event_data)
sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
auth_str = err_msg;
}
- if (event_type == WLC_E_AUTH_IND) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name,
- eabuf, auth_str));
- } else if (status == WLC_E_STATUS_SUCCESS) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
- event_name, eabuf, auth_str));
- } else if (status == WLC_E_STATUS_TIMEOUT) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
- event_name, eabuf, auth_str));
- } else if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, "
- "reason %d\n",
- event_name, eabuf, auth_str, (int)reason));
+ if (event_type == BRCMF_E_AUTH_IND) {
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name,
+ eabuf, auth_str));
+ } else if (status == BRCMF_E_STATUS_SUCCESS) {
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
+ event_name, eabuf, auth_str));
+ } else if (status == BRCMF_E_STATUS_TIMEOUT) {
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
+ event_name, eabuf, auth_str));
+ } else if (status == BRCMF_E_STATUS_FAIL) {
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, "
+ "reason %d\n",
+ event_name, eabuf, auth_str, (int)reason));
}
break;
- case WLC_E_JOIN:
- case WLC_E_ROAM:
- case WLC_E_SET_SSID:
- if (status == WLC_E_STATUS_SUCCESS) {
- DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name,
- eabuf));
- } else if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
- } else if (status == WLC_E_STATUS_NO_NETWORKS) {
- DHD_EVENT(("MACEVENT: %s, no networks found\n",
- event_name));
+ case BRCMF_E_JOIN:
+ case BRCMF_E_ROAM:
+ case BRCMF_E_SET_SSID:
+ if (status == BRCMF_E_STATUS_SUCCESS) {
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s\n", event_name,
+ eabuf));
+ } else if (status == BRCMF_E_STATUS_FAIL) {
+ BRCMF_EVENT(("MACEVENT: %s, failed\n", event_name));
+ } else if (status == BRCMF_E_STATUS_NO_NETWORKS) {
+ BRCMF_EVENT(("MACEVENT: %s, no networks found\n",
+ event_name));
} else {
- DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
- event_name, (int)status));
+ BRCMF_EVENT(("MACEVENT: %s, unexpected status %d\n",
+ event_name, (int)status));
}
break;
- case WLC_E_BEACON_RX:
- if (status == WLC_E_STATUS_SUCCESS) {
- DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
- } else if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
+ case BRCMF_E_BEACON_RX:
+ if (status == BRCMF_E_STATUS_SUCCESS) {
+ BRCMF_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
+ } else if (status == BRCMF_E_STATUS_FAIL) {
+ BRCMF_EVENT(("MACEVENT: %s, FAIL\n", event_name));
} else {
- DHD_EVENT(("MACEVENT: %s, status %d\n", event_name,
- status));
+ BRCMF_EVENT(("MACEVENT: %s, status %d\n", event_name,
+ status));
}
break;
- case WLC_E_LINK:
- DHD_EVENT(("MACEVENT: %s %s\n", event_name,
- link ? "UP" : "DOWN"));
+ case BRCMF_E_LINK:
+ BRCMF_EVENT(("MACEVENT: %s %s\n", event_name,
+ link ? "UP" : "DOWN"));
break;
- case WLC_E_MIC_ERROR:
- DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
- event_name, eabuf, group, flush_txq));
+ case BRCMF_E_MIC_ERROR:
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
+ event_name, eabuf, group, flush_txq));
break;
- case WLC_E_ICV_ERROR:
- case WLC_E_UNICAST_DECODE_ERROR:
- case WLC_E_MULTICAST_DECODE_ERROR:
- DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ case BRCMF_E_ICV_ERROR:
+ case BRCMF_E_UNICAST_DECODE_ERROR:
+ case BRCMF_E_MULTICAST_DECODE_ERROR:
+ BRCMF_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
break;
- case WLC_E_TXFAIL:
- DHD_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
+ case BRCMF_E_TXFAIL:
+ BRCMF_EVENT(("MACEVENT: %s, RA %s\n", event_name, eabuf));
break;
- case WLC_E_SCAN_COMPLETE:
- case WLC_E_PMKID_CACHE:
- DHD_EVENT(("MACEVENT: %s\n", event_name));
+ case BRCMF_E_SCAN_COMPLETE:
+ case BRCMF_E_PMKID_CACHE:
+ BRCMF_EVENT(("MACEVENT: %s\n", event_name));
break;
- case WLC_E_PFN_NET_FOUND:
- case WLC_E_PFN_NET_LOST:
- case WLC_E_PFN_SCAN_COMPLETE:
- DHD_EVENT(("PNOEVENT: %s\n", event_name));
+ case BRCMF_E_PFN_NET_FOUND:
+ case BRCMF_E_PFN_NET_LOST:
+ case BRCMF_E_PFN_SCAN_COMPLETE:
+ BRCMF_EVENT(("PNOEVENT: %s\n", event_name));
break;
- case WLC_E_PSK_SUP:
- case WLC_E_PRUNE:
- DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
+ case BRCMF_E_PSK_SUP:
+ case BRCMF_E_PRUNE:
+ BRCMF_EVENT(("MACEVENT: %s, status %d, reason %d\n",
event_name, (int)status, (int)reason));
break;
- case WLC_E_TRACE:
+ case BRCMF_E_TRACE:
{
static u32 seqnum_prev;
- msgtrace_hdr_t hdr;
+ struct msgtrace_hdr hdr;
u32 nblost;
char *s, *p;
buf = (unsigned char *) event_data;
- memcpy(&hdr, buf, MSGTRACE_HDRLEN);
+ memcpy(&hdr, buf, sizeof(struct msgtrace_hdr));
if (hdr.version != MSGTRACE_VERSION) {
- DHD_ERROR(
+ BRCMF_ERROR(
("\nMACEVENT: %s [unsupported version --> "
- "dhd version:%d dongle version:%d]\n",
+ "brcmf version:%d dongle version:%d]\n",
event_name, MSGTRACE_VERSION, hdr.version)
);
/* Reset datalen to avoid display below */
@@ -745,11 +678,12 @@ static void wl_show_host_event(wl_event_msg_t *event, void *event_data)
}
/* There are 2 bytes available at the end of data */
- buf[MSGTRACE_HDRLEN + be16_to_cpu(hdr.len)] = '\0';
+ *(buf + sizeof(struct msgtrace_hdr)
+ + be16_to_cpu(hdr.len)) = '\0';
if (be32_to_cpu(hdr.discarded_bytes)
|| be32_to_cpu(hdr.discarded_printf)) {
- DHD_ERROR(
+ BRCMF_ERROR(
("\nWLC_E_TRACE: [Discarded traces in dongle -->"
"discarded_bytes %d discarded_printf %d]\n",
be32_to_cpu(hdr.discarded_bytes),
@@ -758,7 +692,7 @@ static void wl_show_host_event(wl_event_msg_t *event, void *event_data)
nblost = be32_to_cpu(hdr.seqnum) - seqnum_prev - 1;
if (nblost > 0) {
- DHD_ERROR(
+ BRCMF_ERROR(
("\nWLC_E_TRACE: [Event lost --> seqnum %d nblost %d\n",
be32_to_cpu(hdr.seqnum), nblost));
}
@@ -768,7 +702,7 @@ static void wl_show_host_event(wl_event_msg_t *event, void *event_data)
* avoid display big
* printf (issue with Linux printk )
*/
- p = (char *)&buf[MSGTRACE_HDRLEN];
+ p = (char *)&buf[sizeof(struct msgtrace_hdr)];
while ((s = strstr(p, "\n")) != NULL) {
*s = '\0';
printk(KERN_DEBUG"%s\n", p);
@@ -781,49 +715,49 @@ static void wl_show_host_event(wl_event_msg_t *event, void *event_data)
}
break;
- case WLC_E_RSSI:
- DHD_EVENT(("MACEVENT: %s %d\n", event_name,
- be32_to_cpu(*((int *)event_data))));
+ case BRCMF_E_RSSI:
+ BRCMF_EVENT(("MACEVENT: %s %d\n", event_name,
+ be32_to_cpu(*((int *)event_data))));
break;
default:
- DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, "
- "auth %d\n", event_name, event_type, eabuf,
- (int)status, (int)reason, (int)auth_type));
+ BRCMF_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, "
+ "auth %d\n", event_name, event_type, eabuf,
+ (int)status, (int)reason, (int)auth_type));
break;
}
/* show any appended data */
if (datalen) {
buf = (unsigned char *) event_data;
- DHD_EVENT((" data (%d) : ", datalen));
+ BRCMF_EVENT((" data (%d) : ", datalen));
for (i = 0; i < datalen; i++)
- DHD_EVENT((" 0x%02x ", *buf++));
- DHD_EVENT(("\n"));
+ BRCMF_EVENT((" 0x%02x ", *buf++));
+ BRCMF_EVENT(("\n"));
}
}
#endif /* SHOW_EVENTS */
int
-wl_host_event(struct dhd_info *dhd, int *ifidx, void *pktdata,
- wl_event_msg_t *event, void **data_ptr)
+brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
+ struct brcmf_event_msg *event, void **data_ptr)
{
/* check whether packet is a BRCM event pkt */
- bcm_event_t *pvt_data = (bcm_event_t *) pktdata;
+ struct brcmf_event *pvt_data = (struct brcmf_event *) pktdata;
char *event_data;
u32 type, status;
u16 flags;
int evlen;
- if (memcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
- DHD_ERROR(("%s: mismatched OUI, bailing\n", __func__));
+ if (memcmp(BRCM_OUI, &pvt_data->hdr.oui[0], DOT11_OUI_LEN)) {
+ BRCMF_ERROR(("%s: mismatched OUI, bailing\n", __func__));
return -EBADE;
}
/* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
- if (get_unaligned_be16(&pvt_data->bcm_hdr.usr_subtype) !=
+ if (get_unaligned_be16(&pvt_data->hdr.usr_subtype) !=
BCMILCP_BCM_SUBTYPE_EVENT) {
- DHD_ERROR(("%s: mismatched subtype, bailing\n", __func__));
+ BRCMF_ERROR(("%s: mismatched subtype, bailing\n", __func__));
return -EBADE;
}
@@ -831,93 +765,87 @@ wl_host_event(struct dhd_info *dhd, int *ifidx, void *pktdata,
event_data = *data_ptr;
/* memcpy since BRCM event pkt may be unaligned. */
- memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
+ memcpy(event, &pvt_data->msg, sizeof(struct brcmf_event_msg));
type = get_unaligned_be32(&event->event_type);
flags = get_unaligned_be16(&event->flags);
status = get_unaligned_be32(&event->status);
- evlen = get_unaligned_be32(&event->datalen) + sizeof(bcm_event_t);
+ evlen = get_unaligned_be32(&event->datalen) +
+ sizeof(struct brcmf_event);
switch (type) {
- case WLC_E_IF:
+ case BRCMF_E_IF:
{
- dhd_if_event_t *ifevent = (dhd_if_event_t *) event_data;
- DHD_TRACE(("%s: if event\n", __func__));
+ struct brcmf_if_event *ifevent =
+ (struct brcmf_if_event *) event_data;
+ BRCMF_TRACE(("%s: if event\n", __func__));
if (ifevent->ifidx > 0 &&
- ifevent->ifidx < DHD_MAX_IFS) {
- if (ifevent->action == WLC_E_IF_ADD)
- dhd_add_if(dhd, ifevent->ifidx,
+ ifevent->ifidx < BRCMF_MAX_IFS) {
+ if (ifevent->action == BRCMF_E_IF_ADD)
+ brcmf_add_if(drvr_priv, ifevent->ifidx,
NULL, event->ifname,
pvt_data->eth.h_dest,
ifevent->flags,
ifevent->bssidx);
else
- dhd_del_if(dhd, ifevent->ifidx);
+ brcmf_del_if(drvr_priv, ifevent->ifidx);
} else {
- DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
- __func__, ifevent->ifidx,
- event->ifname));
+ BRCMF_ERROR(("%s: Invalid ifidx %d for %s\n",
+ __func__, ifevent->ifidx,
+ event->ifname));
}
}
/* send up the if event: btamp user needs it */
- *ifidx = dhd_ifname2idx(dhd, event->ifname);
- /* push up to external supp/auth */
- dhd_event(dhd, (char *)pvt_data, evlen, *ifidx);
+ *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
break;
-#ifdef P2P
- case WLC_E_NDIS_LINK:
- break;
-#endif
- /* fall through */
/* These are what external supplicant/authenticator wants */
- case WLC_E_LINK:
- case WLC_E_ASSOC_IND:
- case WLC_E_REASSOC_IND:
- case WLC_E_DISASSOC_IND:
- case WLC_E_MIC_ERROR:
+ case BRCMF_E_LINK:
+ case BRCMF_E_ASSOC_IND:
+ case BRCMF_E_REASSOC_IND:
+ case BRCMF_E_DISASSOC_IND:
+ case BRCMF_E_MIC_ERROR:
default:
/* Fall through: this should get _everything_ */
- *ifidx = dhd_ifname2idx(dhd, event->ifname);
- /* push up to external supp/auth */
- dhd_event(dhd, (char *)pvt_data, evlen, *ifidx);
- DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
- __func__, type, flags, status));
+ *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
+ BRCMF_TRACE(("%s: MAC event %d, flags %x, status %x\n",
+ __func__, type, flags, status));
- /* put it back to WLC_E_NDIS_LINK */
- if (type == WLC_E_NDIS_LINK) {
+ /* put it back to BRCMF_E_NDIS_LINK */
+ if (type == BRCMF_E_NDIS_LINK) {
u32 temp;
temp = get_unaligned_be32(&event->event_type);
- DHD_TRACE(("Converted to WLC_E_LINK type %d\n", temp));
+ BRCMF_TRACE(("Converted to WLC_E_LINK type %d\n",
+ temp));
- temp = be32_to_cpu(WLC_E_NDIS_LINK);
- memcpy((void *)(&pvt_data->event.event_type), &temp,
- sizeof(pvt_data->event.event_type));
+ temp = be32_to_cpu(BRCMF_E_NDIS_LINK);
+ memcpy((void *)(&pvt_data->msg.event_type), &temp,
+ sizeof(pvt_data->msg.event_type));
}
break;
}
#ifdef SHOW_EVENTS
- wl_show_host_event(event, event_data);
+ brcmf_c_show_host_event(event, event_data);
#endif /* SHOW_EVENTS */
return 0;
}
/* Convert user's input in hex pattern to byte-size mask */
-static int wl_pattern_atoh(char *src, char *dst)
+static int brcmf_c_pattern_atoh(char *src, char *dst)
{
int i;
if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
- DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+ BRCMF_ERROR(("Mask invalid format. Needs to start with 0x\n"));
return -1;
}
src = src + 2; /* Skip past 0x */
if (strlen(src) % 2 != 0) {
- DHD_ERROR(("Mask invalid format. Length must be even.\n"));
+ BRCMF_ERROR(("Mask invalid format. Length must be even.\n"));
return -1;
}
for (i = 0; *src != '\0'; i++) {
@@ -931,7 +859,7 @@ static int wl_pattern_atoh(char *src, char *dst)
}
void
-dhd_pktfilter_offload_enable(dhd_pub_t *dhd, char *arg, int enable,
+brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, int enable,
int master_mode)
{
char *argv[8];
@@ -942,12 +870,12 @@ dhd_pktfilter_offload_enable(dhd_pub_t *dhd, char *arg, int enable,
char *arg_save = 0, *arg_org = 0;
int rc;
char buf[128];
- wl_pkt_filter_enable_t enable_parm;
- wl_pkt_filter_enable_t *pkt_filterp;
+ struct brcmf_pkt_filter_enable enable_parm;
+ struct brcmf_pkt_filter_enable *pkt_filterp;
arg_save = kmalloc(strlen(arg) + 1, GFP_ATOMIC);
if (!arg_save) {
- DHD_ERROR(("%s: kmalloc failed\n", __func__));
+ BRCMF_ERROR(("%s: kmalloc failed\n", __func__));
goto fail;
}
arg_org = arg_save;
@@ -957,7 +885,7 @@ dhd_pktfilter_offload_enable(dhd_pub_t *dhd, char *arg, int enable,
i = 0;
if (NULL == argv[i]) {
- DHD_ERROR(("No args provided\n"));
+ BRCMF_ERROR(("No args provided\n"));
goto fail;
}
@@ -967,7 +895,7 @@ dhd_pktfilter_offload_enable(dhd_pub_t *dhd, char *arg, int enable,
buf[str_len] = '\0';
buf_len = str_len + 1;
- pkt_filterp = (wl_pkt_filter_enable_t *) (buf + str_len + 1);
+ pkt_filterp = (struct brcmf_pkt_filter_enable *) (buf + str_len + 1);
/* Parse packet filter id. */
enable_parm.id = simple_strtoul(argv[i], NULL, 0);
@@ -979,33 +907,34 @@ dhd_pktfilter_offload_enable(dhd_pub_t *dhd, char *arg, int enable,
memcpy((char *)pkt_filterp, &enable_parm, sizeof(enable_parm));
/* Enable/disable the specified filter. */
- rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len);
+ rc = brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
rc = rc >= 0 ? 0 : rc;
if (rc)
- DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
- __func__, arg, rc));
+ BRCMF_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __func__, arg, rc));
else
- DHD_TRACE(("%s: successfully added pktfilter %s\n",
- __func__, arg));
+ BRCMF_TRACE(("%s: successfully added pktfilter %s\n",
+ __func__, arg));
/* Contorl the master mode */
- bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf,
+ brcmu_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf,
sizeof(buf));
- rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf));
+ rc = brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, buf,
+ sizeof(buf));
rc = rc >= 0 ? 0 : rc;
if (rc)
- DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
- __func__, arg, rc));
+ BRCMF_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __func__, arg, rc));
fail:
kfree(arg_org);
}
-void dhd_pktfilter_offload_set(dhd_pub_t *dhd, char *arg)
+void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg)
{
const char *str;
- wl_pkt_filter_t pkt_filter;
- wl_pkt_filter_t *pkt_filterp;
+ struct brcmf_pkt_filter pkt_filter;
+ struct brcmf_pkt_filter *pkt_filterp;
int buf_len;
int str_len;
int rc;
@@ -1014,29 +943,22 @@ void dhd_pktfilter_offload_set(dhd_pub_t *dhd, char *arg)
char *argv[8], *buf = 0;
int i = 0;
char *arg_save = 0, *arg_org = 0;
-#define BUF_SIZE 2048
arg_save = kmalloc(strlen(arg) + 1, GFP_ATOMIC);
if (!arg_save) {
- DHD_ERROR(("%s: kmalloc failed\n", __func__));
+ BRCMF_ERROR(("%s: kmalloc failed\n", __func__));
goto fail;
}
arg_org = arg_save;
- buf = kmalloc(BUF_SIZE, GFP_ATOMIC);
+ buf = kmalloc(PKTFILTER_BUF_SIZE, GFP_ATOMIC);
if (!buf) {
- DHD_ERROR(("%s: kmalloc failed\n", __func__));
+ BRCMF_ERROR(("%s: kmalloc failed\n", __func__));
goto fail;
}
- memcpy(arg_save, arg, strlen(arg) + 1);
-
- if (strlen(arg) > BUF_SIZE) {
- DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg),
- (int)sizeof(buf)));
- goto fail;
- }
+ strcpy(arg_save, arg);
argv[i] = strsep(&arg_save, " ");
while (argv[i++])
@@ -1044,23 +966,22 @@ void dhd_pktfilter_offload_set(dhd_pub_t *dhd, char *arg)
i = 0;
if (NULL == argv[i]) {
- DHD_ERROR(("No args provided\n"));
+ BRCMF_ERROR(("No args provided\n"));
goto fail;
}
str = "pkt_filter_add";
+ strcpy(buf, str);
str_len = strlen(str);
- strncpy(buf, str, str_len);
- buf[str_len] = '\0';
buf_len = str_len + 1;
- pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
+ pkt_filterp = (struct brcmf_pkt_filter *) (buf + str_len + 1);
/* Parse packet filter id. */
pkt_filter.id = simple_strtoul(argv[i], NULL, 0);
if (NULL == argv[++i]) {
- DHD_ERROR(("Polarity not provided\n"));
+ BRCMF_ERROR(("Polarity not provided\n"));
goto fail;
}
@@ -1068,7 +989,7 @@ void dhd_pktfilter_offload_set(dhd_pub_t *dhd, char *arg)
pkt_filter.negate_match = simple_strtoul(argv[i], NULL, 0);
if (NULL == argv[++i]) {
- DHD_ERROR(("Filter type not provided\n"));
+ BRCMF_ERROR(("Filter type not provided\n"));
goto fail;
}
@@ -1076,7 +997,7 @@ void dhd_pktfilter_offload_set(dhd_pub_t *dhd, char *arg)
pkt_filter.type = simple_strtoul(argv[i], NULL, 0);
if (NULL == argv[++i]) {
- DHD_ERROR(("Offset not provided\n"));
+ BRCMF_ERROR(("Offset not provided\n"));
goto fail;
}
@@ -1084,34 +1005,34 @@ void dhd_pktfilter_offload_set(dhd_pub_t *dhd, char *arg)
pkt_filter.u.pattern.offset = simple_strtoul(argv[i], NULL, 0);
if (NULL == argv[++i]) {
- DHD_ERROR(("Bitmask not provided\n"));
+ BRCMF_ERROR(("Bitmask not provided\n"));
goto fail;
}
/* Parse pattern filter mask. */
mask_size =
- wl_pattern_atoh
+ brcmf_c_pattern_atoh
(argv[i], (char *)pkt_filterp->u.pattern.mask_and_pattern);
if (NULL == argv[++i]) {
- DHD_ERROR(("Pattern not provided\n"));
+ BRCMF_ERROR(("Pattern not provided\n"));
goto fail;
}
/* Parse pattern filter pattern. */
pattern_size =
- wl_pattern_atoh(argv[i],
+ brcmf_c_pattern_atoh(argv[i],
(char *)&pkt_filterp->u.pattern.
mask_and_pattern[mask_size]);
if (mask_size != pattern_size) {
- DHD_ERROR(("Mask and pattern not the same size\n"));
+ BRCMF_ERROR(("Mask and pattern not the same size\n"));
goto fail;
}
pkt_filter.u.pattern.size_bytes = mask_size;
- buf_len += WL_PKT_FILTER_FIXED_LEN;
- buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
+ buf_len += BRCMF_PKT_FILTER_FIXED_LEN;
+ buf_len += (BRCMF_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
/* Keep-alive attributes are set in local
* variable (keep_alive_pkt), and
@@ -1120,17 +1041,17 @@ void dhd_pktfilter_offload_set(dhd_pub_t *dhd, char *arg)
*/
memcpy((char *)pkt_filterp,
&pkt_filter,
- WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
+ BRCMF_PKT_FILTER_FIXED_LEN + BRCMF_PKT_FILTER_PATTERN_FIXED_LEN);
- rc = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, buf_len);
+ rc = brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
rc = rc >= 0 ? 0 : rc;
if (rc)
- DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
- __func__, arg, rc));
+ BRCMF_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
+ __func__, arg, rc));
else
- DHD_TRACE(("%s: successfully added pktfilter %s\n",
- __func__, arg));
+ BRCMF_TRACE(("%s: successfully added pktfilter %s\n",
+ __func__, arg));
fail:
kfree(arg_org);
@@ -1138,711 +1059,138 @@ fail:
kfree(buf);
}
-void dhd_arp_offload_set(dhd_pub_t *dhd, int arp_mode)
+void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode)
{
char iovbuf[32];
int retcode;
- bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
- retcode = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ brcmu_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+ retcode = brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
retcode = retcode >= 0 ? 0 : retcode;
if (retcode)
- DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, "
- "retcode = %d\n", __func__, arp_mode, retcode));
+ BRCMF_TRACE(("%s: failed to set ARP offload mode to 0x%x, "
+ "retcode = %d\n", __func__, arp_mode, retcode));
else
- DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
- __func__, arp_mode));
+ BRCMF_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
+ __func__, arp_mode));
}
-void dhd_arp_offload_enable(dhd_pub_t *dhd, int arp_enable)
+void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable)
{
char iovbuf[32];
int retcode;
- bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
- retcode = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ brcmu_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
+ retcode = brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
retcode = retcode >= 0 ? 0 : retcode;
if (retcode)
- DHD_TRACE(("%s: failed to enabe ARP offload to %d, "
- "retcode = %d\n", __func__, arp_enable, retcode));
+ BRCMF_TRACE(("%s: failed to enabe ARP offload to %d, "
+ "retcode = %d\n", __func__, arp_enable, retcode));
else
- DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
- __func__, arp_enable));
+ BRCMF_TRACE(("%s: successfully enabed ARP offload to %d\n",
+ __func__, arp_enable));
}
-int dhd_preinit_ioctls(dhd_pub_t *dhd)
+int brcmf_c_preinit_ioctls(struct brcmf_pub *drvr)
{
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for
+ char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
"event_msgs" + '\0' + bitvec */
uint up = 0;
char buf[128], *ptr;
uint power_mode = PM_FAST;
- u32 dongle_align = DHD_SDALIGN;
+ u32 dongle_align = BRCMF_SDALIGN;
u32 glom = 0;
uint bcn_timeout = 3;
int scan_assoc_time = 40;
int scan_unassoc_time = 40;
-#ifdef GET_CUSTOM_MAC_ENABLE
- int ret = 0;
- u8 ea_addr[ETH_ALEN];
-#endif /* GET_CUSTOM_MAC_ENABLE */
-
- dhd_os_proto_block(dhd);
-
-#ifdef GET_CUSTOM_MAC_ENABLE
- /* Read MAC address from external customer place
- ** NOTE that default mac address has to be present in
- ** otp or nvram file to bring up
- ** firmware but unique per board mac address maybe provided by
- ** customer code
- */
- ret = dhd_custom_get_mac_address(ea_addr);
- if (!ret) {
- bcm_mkiovar("cur_etheraddr", (void *)ea_addr, ETH_ALEN,
- buf, sizeof(buf));
- ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, buf, sizeof(buf));
- if (ret < 0) {
- DHD_ERROR(("%s: can't set MAC address , error=%d\n",
- __func__, ret));
- } else
- memcpy(dhd->mac.octet, (void *)&ea_addr,
- ETH_ALEN);
- }
-#endif /* GET_CUSTOM_MAC_ENABLE */
+ int i;
+
+ brcmf_os_proto_block(drvr);
/* Set Country code */
- if (dhd->country_code[0] != 0) {
- if (dhdcdc_set_ioctl(dhd, 0, WLC_SET_COUNTRY,
- dhd->country_code,
- sizeof(dhd->country_code)) < 0) {
- DHD_ERROR(("%s: country code setting failed\n",
- __func__));
+ if (drvr->country_code[0] != 0) {
+ if (brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_COUNTRY,
+ drvr->country_code,
+ sizeof(drvr->country_code)) < 0) {
+ BRCMF_ERROR(("%s: country code setting failed\n",
+ __func__));
}
}
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
ptr = buf;
- bcm_mkiovar("ver", 0, 0, buf, sizeof(buf));
- dhdcdc_query_ioctl(dhd, 0, WLC_GET_VAR, buf, sizeof(buf));
+ brcmu_mkiovar("ver", 0, 0, buf, sizeof(buf));
+ brcmf_proto_cdc_query_ioctl(drvr, 0, BRCMF_C_GET_VAR, buf, sizeof(buf));
strsep(&ptr, "\n");
/* Print fw version info */
- DHD_ERROR(("Firmware version = %s\n", buf));
+ BRCMF_ERROR(("Firmware version = %s\n", buf));
/* Set PowerSave mode */
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM, (char *)&power_mode,
+ brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_PM, (char *)&power_mode,
sizeof(power_mode));
/* Match Host and Dongle rx alignment */
- bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf,
+ brcmu_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf,
sizeof(iovbuf));
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
/* disable glom option per default */
- bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ brcmu_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
/* Setup timeout if Beacons are lost and roam is off to report
link down */
- bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
+ brcmu_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
sizeof(iovbuf));
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
/* Enable/Disable build-in roaming to allowed ext supplicant to take
of romaing */
- bcm_mkiovar("roam_off", (char *)&dhd_roam, 4, iovbuf, sizeof(iovbuf));
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ brcmu_mkiovar("roam_off", (char *)&brcmf_roam, 4,
+ iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
/* Force STA UP */
- if (dhd_radio_up)
- dhdcdc_set_ioctl(dhd, 0, WLC_UP, (char *)&up, sizeof(up));
+ if (brcmf_radio_up)
+ brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_UP, (char *)&up,
+ sizeof(up));
/* Setup event_msgs */
- bcm_mkiovar("event_msgs", dhd->eventmask, WL_EVENTING_MASK_LEN, iovbuf,
- sizeof(iovbuf));
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ brcmu_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
+ sizeof(iovbuf));
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_SCAN_CHANNEL_TIME,
+ brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME,
(char *)&scan_assoc_time, sizeof(scan_assoc_time));
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_SCAN_UNASSOC_TIME,
+ brcmf_proto_cdc_set_ioctl(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME,
(char *)&scan_unassoc_time, sizeof(scan_unassoc_time));
-#ifdef ARP_OFFLOAD_SUPPORT
/* Set and enable ARP offload feature */
- if (dhd_arp_enable)
- dhd_arp_offload_set(dhd, dhd_arp_mode);
- dhd_arp_offload_enable(dhd, dhd_arp_enable);
-#endif /* ARP_OFFLOAD_SUPPORT */
-
-#ifdef PKT_FILTER_SUPPORT
- {
- int i;
- /* Set up pkt filter */
- if (dhd_pkt_filter_enable) {
- for (i = 0; i < dhd->pktfilter_count; i++) {
- dhd_pktfilter_offload_set(dhd,
- dhd->pktfilter[i]);
- dhd_pktfilter_offload_enable(dhd,
- dhd->pktfilter[i],
- dhd_pkt_filter_init,
- dhd_master_mode);
- }
- }
- }
-#endif /* PKT_FILTER_SUPPORT */
-
- dhd_os_proto_unblock(dhd);
-
- return 0;
-}
-
-#ifdef SIMPLE_ISCAN
-uint iscan_thread_id;
-iscan_buf_t *iscan_chain;
-
-iscan_buf_t *dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
-{
- iscan_buf_t *iscanbuf_alloc = 0;
- iscan_buf_t *iscanbuf_head;
-
- dhd_iscan_lock();
-
- iscanbuf_alloc = kmalloc(sizeof(iscan_buf_t), GFP_ATOMIC);
- if (iscanbuf_alloc == NULL)
- goto fail;
-
- iscanbuf_alloc->next = NULL;
- iscanbuf_head = *iscanbuf;
-
- DHD_ISCAN(("%s: addr of allocated node = 0x%X"
- "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
- __func__, iscanbuf_alloc, iscanbuf_head, dhd));
-
- if (iscanbuf_head == NULL) {
- *iscanbuf = iscanbuf_alloc;
- DHD_ISCAN(("%s: Head is allocated\n", __func__));
- goto fail;
- }
-
- while (iscanbuf_head->next)
- iscanbuf_head = iscanbuf_head->next;
-
- iscanbuf_head->next = iscanbuf_alloc;
-
-fail:
- dhd_iscan_unlock();
- return iscanbuf_alloc;
-}
-
-void dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
-{
- iscan_buf_t *iscanbuf_free = 0;
- iscan_buf_t *iscanbuf_prv = 0;
- iscan_buf_t *iscanbuf_cur = iscan_chain;
- dhd_pub_t *dhd = dhd_bus_pub(dhdp);
-
- dhd_iscan_lock();
- /* If iscan_delete is null then delete the entire
- * chain or else delete specific one provided
- */
- if (!iscan_delete) {
- while (iscanbuf_cur) {
- iscanbuf_free = iscanbuf_cur;
- iscanbuf_cur = iscanbuf_cur->next;
- iscanbuf_free->next = 0;
- kfree(iscanbuf_free);
- }
- iscan_chain = 0;
- } else {
- while (iscanbuf_cur) {
- if (iscanbuf_cur == iscan_delete)
- break;
- iscanbuf_prv = iscanbuf_cur;
- iscanbuf_cur = iscanbuf_cur->next;
- }
- if (iscanbuf_prv)
- iscanbuf_prv->next = iscan_delete->next;
-
- iscan_delete->next = 0;
- kfree(iscan_delete);
-
- if (!iscanbuf_prv)
- iscan_chain = 0;
- }
- dhd_iscan_unlock();
-}
-
-iscan_buf_t *dhd_iscan_result_buf(void)
-{
- return iscan_chain;
-}
-
-/*
-* print scan cache
-* print partial iscan_skip list differently
-*/
-int dhd_iscan_print_cache(iscan_buf_t *iscan_skip)
-{
- int i = 0, l = 0;
- iscan_buf_t *iscan_cur;
- wl_iscan_results_t *list;
- wl_scan_results_t *results;
- wl_bss_info_t UNALIGNED *bi;
-
- dhd_iscan_lock();
-
- iscan_cur = dhd_iscan_result_buf();
-
- while (iscan_cur) {
- list = (wl_iscan_results_t *)iscan_cur->iscan_buf;
- if (!list)
- break;
-
- results = (wl_scan_results_t *)&list->results;
- if (!results)
- break;
-
- if (results->version != WL_BSS_INFO_VERSION) {
- DHD_ISCAN(("%s: results->version %d != "
- "WL_BSS_INFO_VERSION\n",
- __func__, results->version));
- goto done;
- }
-
- bi = results->bss_info;
- for (i = 0; i < results->count; i++) {
- if (!bi)
- break;
-
- DHD_ISCAN(("%s[%2.2d:%2.2d] %X:%X:%X:%X:%X:%X\n",
- iscan_cur != iscan_skip ? "BSS" : "bss", l,
- i, bi->BSSID.octet[0], bi->BSSID.octet[1],
- bi->BSSID.octet[2], bi->BSSID.octet[3],
- bi->BSSID.octet[4], bi->BSSID.octet[5]));
-
- bi = (wl_bss_info_t *)((unsigned long)bi + bi->length);
- }
- iscan_cur = iscan_cur->next;
- l++;
- }
-
-done:
- dhd_iscan_unlock();
- return 0;
-}
-
-/*
-* delete disappeared AP from specific scan cache but skip partial
-* list in iscan_skip
-*/
-int dhd_iscan_delete_bss(void *dhdp, void *addr, iscan_buf_t *iscan_skip)
-{
- int i = 0, j = 0, l = 0;
- iscan_buf_t *iscan_cur;
- wl_iscan_results_t *list;
- wl_scan_results_t *results;
- wl_bss_info_t UNALIGNED *bi, *bi_new, *bi_next;
-
- unsigned char *s_addr = addr;
-
- dhd_iscan_lock();
- DHD_ISCAN(("%s: BSS to remove %X:%X:%X:%X:%X:%X\n",
- __func__, s_addr[0], s_addr[1], s_addr[2],
- s_addr[3], s_addr[4], s_addr[5]));
-
- iscan_cur = dhd_iscan_result_buf();
-
- while (iscan_cur) {
- if (iscan_cur != iscan_skip) {
- list = (wl_iscan_results_t *)iscan_cur->iscan_buf;
- if (!list)
- break;
-
- results = (wl_scan_results_t *)&list->results;
- if (!results)
- break;
-
- if (results->version != WL_BSS_INFO_VERSION) {
- DHD_ERROR(("%s: results->version %d != "
- "WL_BSS_INFO_VERSION\n",
- __func__, results->version));
- goto done;
- }
-
- bi = results->bss_info;
- for (i = 0; i < results->count; i++) {
- if (!bi)
- break;
-
- if (!memcmp
- (bi->BSSID.octet, addr, ETH_ALEN)) {
- DHD_ISCAN(("%s: Del BSS[%2.2d:%2.2d] "
- "%X:%X:%X:%X:%X:%X\n",
- __func__, l, i, bi->BSSID.octet[0],
- bi->BSSID.octet[1], bi->BSSID.octet[2],
- bi->BSSID.octet[3], bi->BSSID.octet[4],
- bi->BSSID.octet[5]));
-
- bi_new = bi;
- bi = (wl_bss_info_t *)((unsigned long)
- bi + bi->length);
-/*
- if(bi && bi_new) {
- memcpy(bi_new, bi, results->buflen -
- bi_new->length);
- results->buflen -= bi_new->length;
- }
-*/
- results->buflen -= bi_new->length;
- results->count--;
-
- for (j = i; j < results->count; j++) {
- if (bi && bi_new) {
- DHD_ISCAN(("%s: Moved up BSS[%2.2d:%2.2d]" "%X:%X:%X:%X:%X:%X\n",
- __func__, l, j,
- bi->BSSID.octet[0],
- bi->BSSID.octet[1],
- bi->BSSID.octet[2],
- bi->BSSID.octet[3],
- bi->BSSID.octet[4],
- bi->BSSID.octet[5]));
-
- bi_next =
- (wl_bss_info_t *)((unsigned long)bi +
- bi->length);
- memcpy(bi_new, bi,
- bi->length);
- bi_new =
- (wl_bss_info_t *)((unsigned long)bi_new +
- bi_new->
- length);
- bi = bi_next;
- }
- }
-
- if (results->count == 0) {
- /* Prune now empty partial
- scan list */
- dhd_iscan_free_buf(dhdp,
- iscan_cur);
- goto done;
- }
- break;
- }
- bi = (wl_bss_info_t *)((unsigned long)bi +
- bi->length);
- }
+ if (brcmf_arp_enable)
+ brcmf_c_arp_offload_set(drvr, brcmf_arp_mode);
+ brcmf_c_arp_offload_enable(drvr, brcmf_arp_enable);
+
+ /* Set up pkt filter */
+ if (brcmf_pkt_filter_enable) {
+ for (i = 0; i < drvr->pktfilter_count; i++) {
+ brcmf_c_pktfilter_offload_set(drvr,
+ drvr->pktfilter[i]);
+ brcmf_c_pktfilter_offload_enable(drvr,
+ drvr->pktfilter[i],
+ brcmf_pkt_filter_init,
+ brcmf_master_mode);
}
- iscan_cur = iscan_cur->next;
- l++;
- }
-
-done:
- dhd_iscan_unlock();
- return 0;
-}
-
-int dhd_iscan_remove_duplicates(void *dhdp, iscan_buf_t *iscan_cur)
-{
- int i = 0;
- wl_iscan_results_t *list;
- wl_scan_results_t *results;
- wl_bss_info_t UNALIGNED *bi, *bi_new, *bi_next;
-
- dhd_iscan_lock();
-
- DHD_ISCAN(("%s: Scan cache before delete\n", __func__));
- dhd_iscan_print_cache(iscan_cur);
-
- if (!iscan_cur)
- goto done;
-
- list = (wl_iscan_results_t *)iscan_cur->iscan_buf;
- if (!list)
- goto done;
-
- results = (wl_scan_results_t *)&list->results;
- if (!results)
- goto done;
-
- if (results->version != WL_BSS_INFO_VERSION) {
- DHD_ERROR(("%s: results->version %d != WL_BSS_INFO_VERSION\n",
- __func__, results->version));
- goto done;
}
- bi = results->bss_info;
- for (i = 0; i < results->count; i++) {
- if (!bi)
- break;
-
- DHD_ISCAN(("%s: Find dups for BSS[%2.2d] %X:%X:%X:%X:%X:%X\n",
- __func__, i, bi->BSSID.octet[0],
- bi->BSSID.octet[1], bi->BSSID.octet[2],
- bi->BSSID.octet[3], bi->BSSID.octet[4],
- bi->BSSID.octet[5]));
-
- dhd_iscan_delete_bss(dhdp, bi->BSSID.octet, iscan_cur);
+ brcmf_os_proto_unblock(drvr);
- bi = (wl_bss_info_t *)((unsigned long)bi + bi->length);
- }
-
-done:
- DHD_ISCAN(("%s: Scan cache after delete\n", __func__));
- dhd_iscan_print_cache(iscan_cur);
- dhd_iscan_unlock();
return 0;
}
-
-void dhd_iscan_ind_scan_confirm(void *dhdp, bool status)
-{
-
- dhd_ind_scan_confirm(dhdp, status);
-}
-
-int dhd_iscan_request(void *dhdp, u16 action)
-{
- int rc;
- wl_iscan_params_t params;
- dhd_pub_t *dhd = dhd_bus_pub(dhdp);
- char buf[WLC_IOCTL_SMLEN];
-
- memset(&params, 0, sizeof(wl_iscan_params_t));
- memcpy(&params.params.bssid, &ether_bcast, ETH_ALEN);
-
- params.params.bss_type = DOT11_BSSTYPE_ANY;
- params.params.scan_type = DOT11_SCANTYPE_ACTIVE;
-
- params.params.nprobes = -1;
- params.params.active_time = -1;
- params.params.passive_time = -1;
- params.params.home_time = -1;
- params.params.channel_num = 0;
-
- params.version = ISCAN_REQ_VERSION;
- params.action = action;
- params.scan_duration = 0;
-
- bcm_mkiovar("iscan", (char *)&params, sizeof(wl_iscan_params_t), buf,
- WLC_IOCTL_SMLEN);
- rc = dhd_wl_ioctl(dhdp, WLC_SET_VAR, buf, WLC_IOCTL_SMLEN);
-
- return rc;
-}
-
-static int dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
-{
- wl_iscan_results_t *list_buf;
- wl_iscan_results_t list;
- wl_scan_results_t *results;
- iscan_buf_t *iscan_cur;
- int status = -1;
- dhd_pub_t *dhd = dhd_bus_pub(dhdp);
- int rc;
-
- iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
- if (!iscan_cur) {
- DHD_ERROR(("%s: Failed to allocate node\n", __func__));
- dhd_iscan_free_buf(dhdp, 0);
- dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
- goto fail;
- }
-
- dhd_iscan_lock();
-
- memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
- list_buf = (wl_iscan_results_t *) iscan_cur->iscan_buf;
- results = &list_buf->results;
- results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
- results->version = 0;
- results->count = 0;
-
- memset(&list, 0, sizeof(list));
- list.results.buflen = WLC_IW_ISCAN_MAXLEN;
- bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
- iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
- rc = dhd_wl_ioctl(dhdp, WLC_GET_VAR, iscan_cur->iscan_buf,
- WLC_IW_ISCAN_MAXLEN);
-
- results->buflen = results->buflen;
- results->version = results->version;
- *scan_count = results->count = results->count;
- status = list_buf->status;
-
- dhd_iscan_unlock();
-
- if (!(*scan_count))
- dhd_iscan_free_buf(dhdp, iscan_cur);
- else
- dhd_iscan_remove_duplicates(dhdp, iscan_cur);
-
-fail:
- return status;
-}
-#endif /* SIMPLE_ISCAN */
-
-#ifdef PNO_SUPPORT
-int dhd_pno_clean(dhd_pub_t *dhd)
-{
- char iovbuf[128];
- int pfn_enabled = 0;
- int iov_len = 0;
- int ret;
-
- /* Disable pfn */
- iov_len =
- bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf, sizeof(iovbuf));
- ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
- if (ret >= 0) {
- /* clear pfn */
- iov_len = bcm_mkiovar("pfnclear", 0, 0, iovbuf, sizeof(iovbuf));
- if (iov_len) {
- ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf,
- iov_len);
- if (ret < 0) {
- DHD_ERROR(("%s failed code %d\n", __func__,
- ret));
- }
- } else {
- ret = -1;
- DHD_ERROR(("%s failed code %d\n", __func__, iov_len));
- }
- } else
- DHD_ERROR(("%s failed code %d\n", __func__, ret));
-
- return ret;
-}
-
-int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled)
-{
- char iovbuf[128];
- int ret = -1;
-
- if ((!dhd) && ((pfn_enabled != 0) || (pfn_enabled != 1))) {
- DHD_ERROR(("%s error exit\n", __func__));
- return ret;
- }
-
- /* Enable/disable PNO */
- ret = bcm_mkiovar("pfn", (char *)&pfn_enabled, 4, iovbuf,
- sizeof(iovbuf));
- if (ret > 0) {
- ret = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf));
- if (ret < 0) {
- DHD_ERROR(("%s failed for error=%d\n", __func__, ret));
- return ret;
- } else {
- dhd->pno_enable = pfn_enabled;
- DHD_TRACE(("%s set pno as %d\n", __func__,
- dhd->pno_enable));
- }
- } else
- DHD_ERROR(("%s failed err=%d\n", __func__, ret));
-
- return ret;
-}
-
-/* Function to execute combined scan */
-int
-dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t *ssids_local, int nssid, unsigned char scan_fr)
-{
- int err = -1;
- char iovbuf[128];
- int k, i;
- wl_pfn_param_t pfn_param;
- wl_pfn_t pfn_element;
-
- DHD_TRACE(("%s nssid=%d nchan=%d\n", __func__, nssid, scan_fr));
-
- if ((!dhd) && (!ssids_local)) {
- DHD_ERROR(("%s error exit\n", __func__));
- err = -1;
- }
-
- /* Check for broadcast ssid */
- for (k = 0; k < nssid; k++) {
- if (!ssids_local[k].SSID_len) {
- DHD_ERROR(("%d: Broadcast SSID is ilegal for PNO "
- "setting\n", k));
- return err;
- }
- }
-/* #define PNO_DUMP 1 */
-#ifdef PNO_DUMP
- {
- int j;
- for (j = 0; j < nssid; j++) {
- DHD_ERROR(("%d: scan for %s size =%d\n", j,
- ssids_local[j].SSID,
- ssids_local[j].SSID_len));
- }
- }
-#endif /* PNO_DUMP */
-
- /* clean up everything */
- err = dhd_pno_clean(dhd);
- if (err < 0) {
- DHD_ERROR(("%s failed error=%d\n", __func__, err));
- return err;
- }
- memset(&pfn_param, 0, sizeof(pfn_param));
- memset(&pfn_element, 0, sizeof(pfn_element));
-
- /* set pfn parameters */
- pfn_param.version = PFN_VERSION;
- pfn_param.flags = (PFN_LIST_ORDER << SORT_CRITERIA_BIT);
-
- /* set up pno scan fr */
- if (scan_fr != 0)
- pfn_param.scan_freq = scan_fr;
-
- bcm_mkiovar("pfn_set", (char *)&pfn_param, sizeof(pfn_param), iovbuf,
- sizeof(iovbuf));
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
-
- /* set all pfn ssid */
- for (i = 0; i < nssid; i++) {
-
- pfn_element.bss_type = DOT11_BSSTYPE_INFRASTRUCTURE;
- pfn_element.auth = WLAN_AUTH_OPEN;
- pfn_element.wpa_auth = WPA_AUTH_PFN_ANY;
- pfn_element.wsec = 0;
- pfn_element.infra = 1;
-
- memcpy((char *)pfn_element.ssid.SSID, ssids_local[i].SSID,
- ssids_local[i].SSID_len);
- pfn_element.ssid.SSID_len = ssids_local[i].SSID_len;
-
- err = bcm_mkiovar("pfn_add", (char *)&pfn_element,
- sizeof(pfn_element), iovbuf, sizeof(iovbuf));
- if (err > 0) {
- err = dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf));
- if (err < 0) {
- DHD_ERROR(("%s failed for i=%d error=%d\n",
- __func__, i, err));
- return err;
- }
- } else
- DHD_ERROR(("%s failed err=%d\n", __func__, err));
- }
-
- /* Enable PNO */
- /* dhd_pno_enable(dhd, 1); */
- return err;
-}
-
-int dhd_pno_get_status(dhd_pub_t *dhd)
-{
- int ret = -1;
-
- if (!dhd)
- return ret;
- else
- return dhd->pno_enable;
-}
-
-#endif /* PNO_SUPPORT */
-
-/* Androd ComboSCAN support */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c b/drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c
deleted file mode 100644
index 1cf6c5dc2bb..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dhd_custom_gpio.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/netdevice.h>
-#include <bcmutils.h>
-
-#include <dngl_stats.h>
-#include <dhd.h>
-
-#include <wlioctl.h>
-#include <wl_iw.h>
-
-#define WL_ERROR(fmt, args...) printk(fmt, ##args)
-#define WL_TRACE(fmt, args...) no_printk(fmt, ##args)
-
-#ifdef CUSTOMER_HW
-extern void bcm_wlan_power_off(int);
-extern void bcm_wlan_power_on(int);
-#endif /* CUSTOMER_HW */
-#ifdef CUSTOMER_HW2
-int wifi_set_carddetect(int on);
-int wifi_set_power(int on, unsigned long msec);
-int wifi_get_irq_number(unsigned long *irq_flags_ptr);
-#endif
-
-#if defined(OOB_INTR_ONLY)
-
-#if defined(BCMLXSDMMC)
-extern int sdioh_mmc_irq(int irq);
-#endif /* (BCMLXSDMMC) */
-
-#ifdef CUSTOMER_HW3
-#include <mach/gpio.h>
-#endif
-
-/* Customer specific Host GPIO definition */
-static int dhd_oob_gpio_num = -1; /* GG 19 */
-
-module_param(dhd_oob_gpio_num, int, 0644);
-MODULE_PARM_DESC(dhd_oob_gpio_num, "DHD oob gpio number");
-
-int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr)
-{
- int host_oob_irq = 0;
-
-#ifdef CUSTOMER_HW2
- host_oob_irq = wifi_get_irq_number(irq_flags_ptr);
-
-#else /* for NOT CUSTOMER_HW2 */
-#if defined(CUSTOM_OOB_GPIO_NUM)
- if (dhd_oob_gpio_num < 0)
- dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
-#endif
-
- if (dhd_oob_gpio_num < 0) {
- WL_ERROR("%s: ERROR customer specific Host GPIO is NOT defined\n",
- __func__);
- return dhd_oob_gpio_num;
- }
-
- WL_ERROR("%s: customer specific Host GPIO number is (%d)\n",
- __func__, dhd_oob_gpio_num);
-
-#if defined CUSTOMER_HW
- host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num);
-#elif defined CUSTOMER_HW3
- gpio_request(dhd_oob_gpio_num, "oob irq");
- host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
- gpio_direction_input(dhd_oob_gpio_num);
-#endif /* CUSTOMER_HW */
-#endif /* CUSTOMER_HW2 */
-
- return host_oob_irq;
-}
-#endif /* defined(OOB_INTR_ONLY) */
-
-/* Customer function to control hw specific wlan gpios */
-void dhd_customer_gpio_wlan_ctrl(int onoff)
-{
- switch (onoff) {
- case WLAN_RESET_OFF:
- WL_TRACE("%s: call customer specific GPIO to insert WLAN RESET\n",
- __func__);
-#ifdef CUSTOMER_HW
- bcm_wlan_power_off(2);
-#endif /* CUSTOMER_HW */
-#ifdef CUSTOMER_HW2
- wifi_set_power(0, 0);
-#endif
- WL_ERROR("=========== WLAN placed in RESET ========\n");
- break;
-
- case WLAN_RESET_ON:
- WL_TRACE("%s: callc customer specific GPIO to remove WLAN RESET\n",
- __func__);
-#ifdef CUSTOMER_HW
- bcm_wlan_power_on(2);
-#endif /* CUSTOMER_HW */
-#ifdef CUSTOMER_HW2
- wifi_set_power(1, 0);
-#endif
- WL_ERROR("=========== WLAN going back to live ========\n");
- break;
-
- case WLAN_POWER_OFF:
- WL_TRACE("%s: call customer specific GPIO to turn off WL_REG_ON\n",
- __func__);
-#ifdef CUSTOMER_HW
- bcm_wlan_power_off(1);
-#endif /* CUSTOMER_HW */
- break;
-
- case WLAN_POWER_ON:
- WL_TRACE("%s: call customer specific GPIO to turn on WL_REG_ON\n",
- __func__);
-#ifdef CUSTOMER_HW
- bcm_wlan_power_on(1);
-#endif /* CUSTOMER_HW */
- /* Lets customer power to get stable */
- udelay(200);
- break;
- }
-}
-
-#ifdef GET_CUSTOM_MAC_ENABLE
-/* Function to get custom MAC address */
-int dhd_custom_get_mac_address(unsigned char *buf)
-{
- WL_TRACE("%s Enter\n", __func__);
- if (!buf)
- return -EINVAL;
-
- /* Customer access to MAC address stored outside of DHD driver */
-
-#ifdef EXAMPLE_GET_MAC
- /* EXAMPLE code */
- {
- u8 ea_example[ETH_ALEN] = {0x00, 0x11, 0x22, 0x33, 0x44, 0xFF};
- memcpy(buf, ea_example, ETH_ALEN);
- }
-#endif /* EXAMPLE_GET_MAC */
-
- return 0;
-}
-#endif /* GET_CUSTOM_MAC_ENABLE */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_dbg.h b/drivers/staging/brcm80211/brcmfmac/dhd_dbg.h
index 0817f1348e0..5be4d7a609c 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_dbg.h
@@ -14,90 +14,57 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _dhd_dbg_
-#define _dhd_dbg_
+#ifndef _BRCMF_DBG_H_
+#define _BRCMF_DBG_H_
-#if defined(DHD_DEBUG)
+#if defined(BCMDBG)
-#define DHD_ERROR(args) \
- do {if ((dhd_msg_level & DHD_ERROR_VAL) && (net_ratelimit())) \
+#define BRCMF_ERROR(args) \
+ do {if ((brcmf_msg_level & BRCMF_ERROR_VAL) && (net_ratelimit())) \
printk args; } while (0)
-#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) \
+#define BRCMF_TRACE(args) do {if (brcmf_msg_level & BRCMF_TRACE_VAL) \
printk args; } while (0)
-#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) \
+#define BRCMF_INFO(args) do {if (brcmf_msg_level & BRCMF_INFO_VAL) \
printk args; } while (0)
-#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) \
+#define BRCMF_DATA(args) do {if (brcmf_msg_level & BRCMF_DATA_VAL) \
printk args; } while (0)
-#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) \
+#define BRCMF_CTL(args) do {if (brcmf_msg_level & BRCMF_CTL_VAL) \
printk args; } while (0)
-#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) \
+#define BRCMF_TIMER(args) do {if (brcmf_msg_level & BRCMF_TIMER_VAL) \
printk args; } while (0)
-#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) \
+#define BRCMF_INTR(args) do {if (brcmf_msg_level & BRCMF_INTR_VAL) \
printk args; } while (0)
-#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) \
+#define BRCMF_GLOM(args) do {if (brcmf_msg_level & BRCMF_GLOM_VAL) \
printk args; } while (0)
-#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) \
+#define BRCMF_EVENT(args) do {if (brcmf_msg_level & BRCMF_EVENT_VAL) \
printk args; } while (0)
-#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) \
- printk args; } while (0)
-#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) \
- printk args; } while (0)
-#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) \
- printk args; } while (0)
-#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) \
- printk args; } while (0)
-
-#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL)
-#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL)
-#define DHD_INFO_ON() (dhd_msg_level & DHD_INFO_VAL)
-#define DHD_DATA_ON() (dhd_msg_level & DHD_DATA_VAL)
-#define DHD_CTL_ON() (dhd_msg_level & DHD_CTL_VAL)
-#define DHD_TIMER_ON() (dhd_msg_level & DHD_TIMER_VAL)
-#define DHD_HDRS_ON() (dhd_msg_level & DHD_HDRS_VAL)
-#define DHD_BYTES_ON() (dhd_msg_level & DHD_BYTES_VAL)
-#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL)
-#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL)
-#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL)
-#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL)
-#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL)
-#else /* (defined BCMDBG) || (defined DHD_DEBUG) */
+#define BRCMF_DATA_ON() (brcmf_msg_level & BRCMF_DATA_VAL)
+#define BRCMF_CTL_ON() (brcmf_msg_level & BRCMF_CTL_VAL)
+#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL)
+#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL)
+#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL)
-#define DHD_ERROR(args) do {if (net_ratelimit()) printk args; } while (0)
-#define DHD_TRACE(args)
-#define DHD_INFO(args)
-#define DHD_DATA(args)
-#define DHD_CTL(args)
-#define DHD_TIMER(args)
-#define DHD_HDRS(args)
-#define DHD_BYTES(args)
-#define DHD_INTR(args)
-#define DHD_GLOM(args)
-#define DHD_EVENT(args)
-#define DHD_BTA(args)
-#define DHD_ISCAN(args)
+#else /* (defined BCMDBG) || (defined BCMDBG) */
-#define DHD_ERROR_ON() 0
-#define DHD_TRACE_ON() 0
-#define DHD_INFO_ON() 0
-#define DHD_DATA_ON() 0
-#define DHD_CTL_ON() 0
-#define DHD_TIMER_ON() 0
-#define DHD_HDRS_ON() 0
-#define DHD_BYTES_ON() 0
-#define DHD_INTR_ON() 0
-#define DHD_GLOM_ON() 0
-#define DHD_EVENT_ON() 0
-#define DHD_BTA_ON() 0
-#define DHD_ISCAN_ON() 0
-#endif /* defined(DHD_DEBUG) */
+#define BRCMF_ERROR(args) do {if (net_ratelimit()) printk args; } while (0)
+#define BRCMF_TRACE(args)
+#define BRCMF_INFO(args)
+#define BRCMF_DATA(args)
+#define BRCMF_CTL(args)
+#define BRCMF_TIMER(args)
+#define BRCMF_INTR(args)
+#define BRCMF_GLOM(args)
+#define BRCMF_EVENT(args)
-#define DHD_LOG(args)
+#define BRCMF_DATA_ON() 0
+#define BRCMF_CTL_ON() 0
+#define BRCMF_HDRS_ON() 0
+#define BRCMF_BYTES_ON() 0
+#define BRCMF_GLOM_ON() 0
-#define DHD_NONE(args)
-extern int dhd_msg_level;
+#endif /* defined(BCMDBG) */
-/* Defines msg bits */
-#include <dhdioctl.h>
+extern int brcmf_msg_level;
-#endif /* _dhd_dbg_ */
+#endif /* _BRCMF_DBG_H_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
index f356c564cfb..05dada98eb6 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
@@ -14,9 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifdef CONFIG_WIFI_CONTROL_FUNC
-#include <linux/platform_device.h>
-#endif
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
@@ -31,177 +28,34 @@
#include <linux/fcntl.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
-#include <bcmdefs.h>
-#include <bcmutils.h>
-
-#include <dngl_stats.h>
-#include <dhd.h>
-#include <dhd_bus.h>
-#include <dhd_proto.h>
-#include <dhd_dbg.h>
-
-#include <wl_cfg80211.h>
-
-#define EPI_VERSION_STR "4.218.248.5"
-#define ETH_P_BRCM 0x886c
-
-#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
-#include <linux/wifi_tiwlan.h>
-
-struct semaphore wifi_control_sem;
-
-struct dhd_bus *g_bus;
-
-static struct wifi_platform_data *wifi_control_data;
-static struct resource *wifi_irqres;
-
-int wifi_get_irq_number(unsigned long *irq_flags_ptr)
-{
- if (wifi_irqres) {
- *irq_flags_ptr = wifi_irqres->flags & IRQF_TRIGGER_MASK;
- return (int)wifi_irqres->start;
- }
-#ifdef CUSTOM_OOB_GPIO_NUM
- return CUSTOM_OOB_GPIO_NUM;
-#else
- return -1;
-#endif
-}
-
-int wifi_set_carddetect(int on)
-{
- printk(KERN_ERR "%s = %d\n", __func__, on);
- if (wifi_control_data && wifi_control_data->set_carddetect)
- wifi_control_data->set_carddetect(on);
- return 0;
-}
-
-int wifi_set_power(int on, unsigned long msec)
-{
- printk(KERN_ERR "%s = %d\n", __func__, on);
- if (wifi_control_data && wifi_control_data->set_power)
- wifi_control_data->set_power(on);
- if (msec)
- mdelay(msec);
- return 0;
-}
-
-int wifi_set_reset(int on, unsigned long msec)
-{
- printk(KERN_ERR "%s = %d\n", __func__, on);
- if (wifi_control_data && wifi_control_data->set_reset)
- wifi_control_data->set_reset(on);
- if (msec)
- mdelay(msec);
- return 0;
-}
-
-static int wifi_probe(struct platform_device *pdev)
-{
- struct wifi_platform_data *wifi_ctrl =
- (struct wifi_platform_data *)(pdev->dev.platform_data);
-
- printk(KERN_ERR "## %s\n", __func__);
- wifi_irqres =
- platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- "bcm4329_wlan_irq");
- wifi_control_data = wifi_ctrl;
-
- wifi_set_power(1, 0); /* Power On */
- wifi_set_carddetect(1); /* CardDetect (0->1) */
-
- up(&wifi_control_sem);
- return 0;
-}
-
-static int wifi_remove(struct platform_device *pdev)
-{
- struct wifi_platform_data *wifi_ctrl =
- (struct wifi_platform_data *)(pdev->dev.platform_data);
-
- printk(KERN_ERR "## %s\n", __func__);
- wifi_control_data = wifi_ctrl;
-
- wifi_set_carddetect(0); /* CardDetect (1->0) */
- wifi_set_power(0, 0); /* Power Off */
-
- up(&wifi_control_sem);
- return 0;
-}
-
-static int wifi_suspend(struct platform_device *pdev, pm_message_t state)
-{
- DHD_TRACE(("##> %s\n", __func__));
- return 0;
-}
-
-static int wifi_resume(struct platform_device *pdev)
-{
- DHD_TRACE(("##> %s\n", __func__));
- return 0;
-}
-
-static struct platform_driver wifi_device = {
- .probe = wifi_probe,
- .remove = wifi_remove,
- .suspend = wifi_suspend,
- .resume = wifi_resume,
- .driver = {
- .name = KBUILD_MODNAME,
- }
-};
-
-int wifi_add_dev(void)
-{
- DHD_TRACE(("## Calling platform_driver_register\n"));
- return platform_driver_register(&wifi_device);
-}
-
-void wifi_del_dev(void)
-{
- DHD_TRACE(("## Unregister platform_driver_register\n"));
- platform_driver_unregister(&wifi_device);
-}
-#endif /* defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
+#include <net/cfg80211.h>
+#include <defs.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_proto.h"
+#include "dhd_dbg.h"
+#include "wl_cfg80211.h"
+#include "bcmchip.h"
#if defined(CONFIG_PM_SLEEP)
#include <linux/suspend.h>
-atomic_t dhd_mmc_suspend;
-DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
+atomic_t brcmf_mmc_suspend;
#endif /* defined(CONFIG_PM_SLEEP) */
-#if defined(OOB_INTR_ONLY)
-extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
-#endif /* defined(OOB_INTR_ONLY) */
-
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac cards");
MODULE_LICENSE("Dual BSD/GPL");
-#define DRV_MODULE_NAME "brcmfmac"
-
-/* Linux wireless extension support */
-#if defined(CONFIG_WIRELESS_EXT)
-#include <wl_iw.h>
-extern wl_iw_extra_params_t g_wl_iw_params;
-#endif /* defined(CONFIG_WIRELESS_EXT) */
-
-#if defined(CONFIG_HAS_EARLYSUSPEND)
-#include <linux/earlysuspend.h>
-extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf,
- uint len);
-#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
-
-#ifdef PKT_FILTER_SUPPORT
-extern void dhd_pktfilter_offload_set(dhd_pub_t *dhd, char *arg);
-extern void dhd_pktfilter_offload_enable(dhd_pub_t *dhd, char *arg, int enable,
- int master_mode);
-#endif
/* Interface control information */
-typedef struct dhd_if {
- struct dhd_info *info; /* back pointer to dhd_info */
+struct brcmf_if {
+ struct brcmf_info *info; /* back pointer to brcmf_info */
/* OS/stack specifics */
struct net_device *net;
struct net_device_stats stats;
@@ -212,33 +66,17 @@ typedef struct dhd_if {
bool attached; /* Delayed attachment when unset */
bool txflowcontrol; /* Per interface flow control indicator */
char name[IFNAMSIZ]; /* linux interface name */
-} dhd_if_t;
+};
/* Local private structure (extension of pub) */
-typedef struct dhd_info {
-#if defined(CONFIG_WIRELESS_EXT)
- wl_iw_t iw; /* wireless extensions state (must be first) */
-#endif /* defined(CONFIG_WIRELESS_EXT) */
-
- dhd_pub_t pub;
+struct brcmf_info {
+ struct brcmf_pub pub;
/* OS/stack specifics */
- dhd_if_t *iflist[DHD_MAX_IFS];
+ struct brcmf_if *iflist[BRCMF_MAX_IFS];
struct semaphore proto_sem;
wait_queue_head_t ioctl_resp_wait;
- struct timer_list timer;
- bool wd_timer_valid;
- struct tasklet_struct tasklet;
- spinlock_t sdlock;
- spinlock_t txqlock;
- /* Thread based operation */
- bool threads_only;
- struct semaphore sdsem;
- struct task_struct *watchdog_tsk;
- struct semaphore watchdog_sem;
- struct task_struct *dpc_tsk;
- struct semaphore dpc_sem;
/* Thread to issue ioctl for multicast */
struct task_struct *sysioc_tsk;
@@ -246,83 +84,44 @@ typedef struct dhd_info {
bool set_multicast;
bool set_macaddress;
u8 macvalue[ETH_ALEN];
- wait_queue_head_t ctrl_wait;
atomic_t pend_8021x_cnt;
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
- struct early_suspend early_suspend;
-#endif /* CONFIG_HAS_EARLYSUSPEND */
-} dhd_info_t;
-
-/* Definitions to provide path to the firmware and nvram
- * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
- */
-char firmware_path[MOD_PARAM_PATHLEN];
-char nvram_path[MOD_PARAM_PATHLEN];
-
-/* load firmware and/or nvram values from the filesystem */
-module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0);
-module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0);
+};
/* Error bits */
-module_param(dhd_msg_level, int, 0);
+module_param(brcmf_msg_level, int, 0);
/* Spawn a thread for system ioctls (set mac, set mcast) */
-uint dhd_sysioc = true;
-module_param(dhd_sysioc, uint, 0);
-
-/* Watchdog interval */
-uint dhd_watchdog_ms = 10;
-module_param(dhd_watchdog_ms, uint, 0);
-
-#ifdef DHD_DEBUG
-/* Console poll interval */
-uint dhd_console_ms;
-module_param(dhd_console_ms, uint, 0);
-#endif /* DHD_DEBUG */
+uint brcmf_sysioc = true;
+module_param(brcmf_sysioc, uint, 0);
/* ARP offload agent mode : Enable ARP Host Auto-Reply
and ARP Peer Auto-Reply */
-uint dhd_arp_mode = 0xb;
-module_param(dhd_arp_mode, uint, 0);
+uint brcmf_arp_mode = 0xb;
+module_param(brcmf_arp_mode, uint, 0);
/* ARP offload enable */
-uint dhd_arp_enable = true;
-module_param(dhd_arp_enable, uint, 0);
+uint brcmf_arp_enable = true;
+module_param(brcmf_arp_enable, uint, 0);
/* Global Pkt filter enable control */
-uint dhd_pkt_filter_enable = true;
-module_param(dhd_pkt_filter_enable, uint, 0);
+uint brcmf_pkt_filter_enable = true;
+module_param(brcmf_pkt_filter_enable, uint, 0);
/* Pkt filter init setup */
-uint dhd_pkt_filter_init;
-module_param(dhd_pkt_filter_init, uint, 0);
+uint brcmf_pkt_filter_init;
+module_param(brcmf_pkt_filter_init, uint, 0);
/* Pkt filter mode control */
-uint dhd_master_mode = true;
-module_param(dhd_master_mode, uint, 1);
-
-/* Watchdog thread priority, -1 to use kernel timer */
-int dhd_watchdog_prio = 97;
-module_param(dhd_watchdog_prio, int, 0);
-
-/* DPC thread priority, -1 to use tasklet */
-int dhd_dpc_prio = 98;
-module_param(dhd_dpc_prio, int, 0);
+uint brcmf_master_mode = true;
+module_param(brcmf_master_mode, uint, 0);
-/* DPC thread priority, -1 to use tasklet */
-extern int dhd_dongle_memsize;
-module_param(dhd_dongle_memsize, int, 0);
+module_param(brcmf_dongle_memsize, int, 0);
/* Contorl fw roaming */
-#ifdef CUSTOMER_HW2
-uint dhd_roam;
-#else
-uint dhd_roam = 1;
-#endif
+uint brcmf_roam = 1;
/* Control radio state */
-uint dhd_radio_up = 1;
+uint brcmf_radio_up = 1;
/* Network inteface name */
char iface_name[IFNAMSIZ] = "wlan";
@@ -331,252 +130,58 @@ module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
/* The following are specific to the SDIO dongle */
/* IOCTL response timeout */
-int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+int brcmf_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
/* Idle timeout for backplane clock */
-int dhd_idletime = DHD_IDLETIME_TICKS;
-module_param(dhd_idletime, int, 0);
+int brcmf_idletime = BRCMF_IDLETIME_TICKS;
+module_param(brcmf_idletime, int, 0);
/* Use polling */
-uint dhd_poll = false;
-module_param(dhd_poll, uint, 0);
-
-/* Use cfg80211 */
-uint dhd_cfg80211 = true;
-module_param(dhd_cfg80211, uint, 0);
+uint brcmf_poll;
+module_param(brcmf_poll, uint, 0);
/* Use interrupts */
-uint dhd_intr = true;
-module_param(dhd_intr, uint, 0);
+uint brcmf_intr = true;
+module_param(brcmf_intr, uint, 0);
/* SDIO Drive Strength (in milliamps) */
-uint dhd_sdiod_drive_strength = 6;
-module_param(dhd_sdiod_drive_strength, uint, 0);
+uint brcmf_sdiod_drive_strength = 6;
+module_param(brcmf_sdiod_drive_strength, uint, 0);
/* Tx/Rx bounds */
-extern uint dhd_txbound;
-extern uint dhd_rxbound;
-module_param(dhd_txbound, uint, 0);
-module_param(dhd_rxbound, uint, 0);
-
-/* Deferred transmits */
-extern uint dhd_deferred_tx;
-module_param(dhd_deferred_tx, uint, 0);
+module_param(brcmf_txbound, uint, 0);
+module_param(brcmf_rxbound, uint, 0);
#ifdef SDTEST
/* Echo packet generator (pkts/s) */
-uint dhd_pktgen;
-module_param(dhd_pktgen, uint, 0);
+uint brcmf_pktgen;
+module_param(brcmf_pktgen, uint, 0);
/* Echo packet len (0 => sawtooth, max 2040) */
-uint dhd_pktgen_len;
-module_param(dhd_pktgen_len, uint, 0);
-#endif
-
-#define FAVORITE_WIFI_CP (!!dhd_cfg80211)
-#define IS_CFG80211_FAVORITE() FAVORITE_WIFI_CP
-#define DBG_CFG80211_GET() ((dhd_cfg80211 & WL_DBG_MASK) >> 1)
-#define NO_FW_REQ() (dhd_cfg80211 & 0x80)
-
-/* Version string to report */
-#ifdef DHD_DEBUG
-#define DHD_COMPILED "\nCompiled in " SRCBASE
-#else
-#define DHD_COMPILED
+uint brcmf_pktgen_len;
+module_param(brcmf_pktgen_len, uint, 0);
#endif
-static void dhd_dpc(unsigned long data);
-/* forward decl */
-extern int dhd_wait_pend8021x(struct net_device *dev);
-
-#ifdef TOE
-#ifndef BDC
-#error TOE requires BDC
-#endif /* !BDC */
-static int dhd_toe_get(dhd_info_t *dhd, int idx, u32 *toe_ol);
-static int dhd_toe_set(dhd_info_t *dhd, int idx, u32 toe_ol);
-#endif /* TOE */
-
-static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
- wl_event_msg_t *event_ptr, void **data_ptr);
-
-#if defined(CONFIG_PM_SLEEP)
-static int dhd_sleep_pm_callback(struct notifier_block *nfb,
- unsigned long action, void *ignored)
-{
- switch (action) {
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- atomic_set(&dhd_mmc_suspend, true);
- return NOTIFY_OK;
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- atomic_set(&dhd_mmc_suspend, false);
- return NOTIFY_OK;
- }
- return 0;
-}
-
-static struct notifier_block dhd_sleep_pm_notifier = {
- .notifier_call = dhd_sleep_pm_callback,
- .priority = 0
-};
-
-extern int register_pm_notifier(struct notifier_block *nb);
-extern int unregister_pm_notifier(struct notifier_block *nb);
-#endif /* defined(CONFIG_PM_SLEEP) */
- /* && defined(DHD_GPL) */
-static void dhd_set_packet_filter(int value, dhd_pub_t *dhd)
-{
-#ifdef PKT_FILTER_SUPPORT
- DHD_TRACE(("%s: %d\n", __func__, value));
- /* 1 - Enable packet filter, only allow unicast packet to send up */
- /* 0 - Disable packet filter */
- if (dhd_pkt_filter_enable) {
- int i;
-
- for (i = 0; i < dhd->pktfilter_count; i++) {
- dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
- dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
- value, dhd_master_mode);
- }
- }
-#endif
-}
-
-#if defined(CONFIG_HAS_EARLYSUSPEND)
-static int dhd_set_suspend(int value, dhd_pub_t *dhd)
-{
- int power_mode = PM_MAX;
- /* wl_pkt_filter_enable_t enable_parm; */
- char iovbuf[32];
- int bcn_li_dtim = 3;
-#ifdef CUSTOMER_HW2
- uint roamvar = 1;
-#endif /* CUSTOMER_HW2 */
-
- DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
- __func__, value, dhd->in_suspend));
-
- if (dhd && dhd->up) {
- if (value && dhd->in_suspend) {
-
- /* Kernel suspended */
- DHD_TRACE(("%s: force extra Suspend setting\n",
- __func__));
-
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM,
- (char *)&power_mode,
- sizeof(power_mode));
-
- /* Enable packet filter, only allow unicast
- packet to send up */
- dhd_set_packet_filter(1, dhd);
-
- /* if dtim skip setup as default force it
- * to wake each third dtim
- * for better power saving.
- * Note that side effect is chance to miss BC/MC
- * packet
- */
- if ((dhd->dtim_skip == 0) || (dhd->dtim_skip == 1))
- bcn_li_dtim = 3;
- else
- bcn_li_dtim = dhd->dtim_skip;
- bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim,
- 4, iovbuf, sizeof(iovbuf));
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf));
-#ifdef CUSTOMER_HW2
- /* Disable build-in roaming to allowed \
- * supplicant to take of romaing
- */
- bcm_mkiovar("roam_off", (char *)&roamvar, 4,
- iovbuf, sizeof(iovbuf));
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf));
-#endif /* CUSTOMER_HW2 */
- } else {
-
- /* Kernel resumed */
- DHD_TRACE(("%s: Remove extra suspend setting\n",
- __func__));
-
- power_mode = PM_FAST;
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_PM,
- (char *)&power_mode,
- sizeof(power_mode));
-
- /* disable pkt filter */
- dhd_set_packet_filter(0, dhd);
-
- /* restore pre-suspend setting for dtim_skip */
- bcm_mkiovar("bcn_li_dtim", (char *)&dhd->dtim_skip,
- 4, iovbuf, sizeof(iovbuf));
-
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf));
-#ifdef CUSTOMER_HW2
- roamvar = 0;
- bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf,
- sizeof(iovbuf));
- dhdcdc_set_ioctl(dhd, 0, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf));
-#endif /* CUSTOMER_HW2 */
- }
- }
-
- return 0;
-}
-
-static void dhd_suspend_resume_helper(struct dhd_info *dhd, int val)
-{
- dhd_pub_t *dhdp = &dhd->pub;
-
- dhd_os_proto_block(dhdp);
- /* Set flag when early suspend was called */
- dhdp->in_suspend = val;
- if (!dhdp->suspend_disable_flag)
- dhd_set_suspend(val, dhdp);
- dhd_os_proto_unblock(dhdp);
-}
-
-static void dhd_early_suspend(struct early_suspend *h)
-{
- struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
-
- DHD_TRACE(("%s: enter\n", __func__));
-
- if (dhd)
- dhd_suspend_resume_helper(dhd, 1);
-
-}
-
-static void dhd_late_resume(struct early_suspend *h)
-{
- struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
-
- DHD_TRACE(("%s: enter\n", __func__));
-
- if (dhd)
- dhd_suspend_resume_helper(dhd, 0);
-}
-#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
+static int brcmf_toe_get(struct brcmf_info *drvr_priv, int idx, u32 *toe_ol);
+static int brcmf_toe_set(struct brcmf_info *drvr_priv, int idx, u32 toe_ol);
+static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
+ struct brcmf_event_msg *event_ptr,
+ void **data_ptr);
/*
* Generalized timeout mechanism. Uses spin sleep with exponential
* back-off until
* the sleep time reaches one jiffy, then switches over to task delay. Usage:
*
- * dhd_timeout_start(&tmo, usec);
- * while (!dhd_timeout_expired(&tmo))
+ * brcmf_timeout_start(&tmo, usec);
+ * while (!brcmf_timeout_expired(&tmo))
* if (poll_something())
* break;
- * if (dhd_timeout_expired(&tmo))
+ * if (brcmf_timeout_expired(&tmo))
* fatal();
*/
-void dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+void brcmf_timeout_start(struct brcmf_timeout *tmo, uint usec)
{
tmo->limit = usec;
tmo->increment = 0;
@@ -584,7 +189,7 @@ void dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
tmo->tick = 1000000 / HZ;
}
-int dhd_timeout_expired(dhd_timeout_t *tmo)
+int brcmf_timeout_expired(struct brcmf_timeout *tmo)
{
/* Does nothing the first call */
if (tmo->increment == 0) {
@@ -621,74 +226,68 @@ int dhd_timeout_expired(dhd_timeout_t *tmo)
return 0;
}
-static int dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+static int brcmf_net2idx(struct brcmf_info *drvr_priv, struct net_device *net)
{
int i = 0;
- ASSERT(dhd);
- while (i < DHD_MAX_IFS) {
- if (dhd->iflist[i] && (dhd->iflist[i]->net == net))
+ while (i < BRCMF_MAX_IFS) {
+ if (drvr_priv->iflist[i] && (drvr_priv->iflist[i]->net == net))
return i;
i++;
}
- return DHD_BAD_IF;
+ return BRCMF_BAD_IF;
}
-int dhd_ifname2idx(dhd_info_t *dhd, char *name)
+int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
{
- int i = DHD_MAX_IFS;
-
- ASSERT(dhd);
+ int i = BRCMF_MAX_IFS;
if (name == NULL || *name == '\0')
return 0;
while (--i > 0)
- if (dhd->iflist[i]
- && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ))
+ if (drvr_priv->iflist[i]
+ && !strncmp(drvr_priv->iflist[i]->name, name, IFNAMSIZ))
break;
- DHD_TRACE(("%s: return idx %d for \"%s\"\n", __func__, i, name));
+ BRCMF_TRACE(("%s: return idx %d for \"%s\"\n", __func__, i, name));
return i; /* default - the primary interface */
}
-char *dhd_ifname(dhd_pub_t *dhdp, int ifidx)
+char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
{
- dhd_info_t *dhd = (dhd_info_t *) dhdp->info;
-
- ASSERT(dhd);
+ struct brcmf_info *drvr_priv = drvr->info;
- if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
- DHD_ERROR(("%s: ifidx %d out of range\n", __func__, ifidx));
+ if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
+ BRCMF_ERROR(("%s: ifidx %d out of range\n", __func__, ifidx));
return "<if_bad>";
}
- if (dhd->iflist[ifidx] == NULL) {
- DHD_ERROR(("%s: null i/f %d\n", __func__, ifidx));
+ if (drvr_priv->iflist[ifidx] == NULL) {
+ BRCMF_ERROR(("%s: null i/f %d\n", __func__, ifidx));
return "<if_null>";
}
- if (dhd->iflist[ifidx]->net)
- return dhd->iflist[ifidx]->net->name;
+ if (drvr_priv->iflist[ifidx]->net)
+ return drvr_priv->iflist[ifidx]->net->name;
return "<if_none>";
}
-static void _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+static void _brcmf_set_multicast_list(struct brcmf_info *drvr_priv, int ifidx)
{
struct net_device *dev;
struct netdev_hw_addr *ha;
u32 allmulti, cnt;
- wl_ioctl_t ioc;
+ struct brcmf_ioctl ioc;
char *buf, *bufp;
uint buflen;
int ret;
- ASSERT(dhd && dhd->iflist[ifidx]);
- dev = dhd->iflist[ifidx]->net;
+ dev = drvr_priv->iflist[ifidx]->net;
cnt = netdev_mc_count(dev);
/* Determine initial value of allmulti flag */
@@ -699,8 +298,8 @@ static void _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETH_ALEN);
bufp = buf = kmalloc(buflen, GFP_ATOMIC);
if (!bufp) {
- DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
- dhd_ifname(&dhd->pub, ifidx), cnt));
+ BRCMF_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx), cnt));
return;
}
@@ -720,15 +319,15 @@ static void _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
}
memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = WLC_SET_VAR;
+ ioc.cmd = BRCMF_C_SET_VAR;
ioc.buf = buf;
ioc.len = buflen;
ioc.set = true;
- ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
- DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
- dhd_ifname(&dhd->pub, ifidx), cnt));
+ BRCMF_ERROR(("%s: set mcast_list failed, cnt %d\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx), cnt));
allmulti = cnt ? true : allmulti;
}
@@ -742,32 +341,33 @@ static void _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
buflen = sizeof("allmulti") + sizeof(allmulti);
buf = kmalloc(buflen, GFP_ATOMIC);
if (!buf) {
- DHD_ERROR(("%s: out of memory for allmulti\n",
- dhd_ifname(&dhd->pub, ifidx)));
+ BRCMF_ERROR(("%s: out of memory for allmulti\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx)));
return;
}
allmulti = cpu_to_le32(allmulti);
- if (!bcm_mkiovar
+ if (!brcmu_mkiovar
("allmulti", (void *)&allmulti, sizeof(allmulti), buf, buflen)) {
- DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d "
- "buflen %u\n", dhd_ifname(&dhd->pub, ifidx),
- (int)sizeof(allmulti), buflen));
+ BRCMF_ERROR(("%s: mkiovar failed for allmulti, datalen %d "
+ "buflen %u\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx),
+ (int)sizeof(allmulti), buflen));
kfree(buf);
return;
}
memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = WLC_SET_VAR;
+ ioc.cmd = BRCMF_C_SET_VAR;
ioc.buf = buf;
ioc.len = buflen;
ioc.set = true;
- ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
- DHD_ERROR(("%s: set allmulti %d failed\n",
- dhd_ifname(&dhd->pub, ifidx),
- le32_to_cpu(allmulti)));
+ BRCMF_ERROR(("%s: set allmulti %d failed\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx),
+ le32_to_cpu(allmulti)));
}
kfree(buf);
@@ -779,45 +379,44 @@ static void _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
allmulti = cpu_to_le32(allmulti);
memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = WLC_SET_PROMISC;
+ ioc.cmd = BRCMF_C_SET_PROMISC;
ioc.buf = &allmulti;
ioc.len = sizeof(allmulti);
ioc.set = true;
- ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
- DHD_ERROR(("%s: set promisc %d failed\n",
- dhd_ifname(&dhd->pub, ifidx),
- le32_to_cpu(allmulti)));
+ BRCMF_ERROR(("%s: set promisc %d failed\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx),
+ le32_to_cpu(allmulti)));
}
}
-static int
-_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, u8 *addr)
+static int _brcmf_set_mac_address(struct brcmf_info *drvr_priv, int ifidx, u8 *addr)
{
char buf[32];
- wl_ioctl_t ioc;
+ struct brcmf_ioctl ioc;
int ret;
- DHD_TRACE(("%s enter\n", __func__));
- if (!bcm_mkiovar
+ BRCMF_TRACE(("%s enter\n", __func__));
+ if (!brcmu_mkiovar
("cur_etheraddr", (char *)addr, ETH_ALEN, buf, 32)) {
- DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n",
- dhd_ifname(&dhd->pub, ifidx)));
+ BRCMF_ERROR(("%s: mkiovar failed for cur_etheraddr\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx)));
return -1;
}
memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = WLC_SET_VAR;
+ ioc.cmd = BRCMF_C_SET_VAR;
ioc.buf = buf;
ioc.len = 32;
ioc.set = true;
- ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
- DHD_ERROR(("%s: set cur_etheraddr failed\n",
- dhd_ifname(&dhd->pub, ifidx)));
+ BRCMF_ERROR(("%s: set cur_etheraddr failed\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx)));
} else {
- memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETH_ALEN);
+ memcpy(drvr_priv->iflist[ifidx]->net->dev_addr, addr, ETH_ALEN);
}
return ret;
@@ -827,45 +426,44 @@ _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, u8 *addr)
extern struct net_device *ap_net_dev;
#endif
-static void dhd_op_if(dhd_if_t *ifp)
+/* Virtual interfaces only ((ifp && ifp->info && ifp->idx == true) */
+static void brcmf_op_if(struct brcmf_if *ifp)
{
- dhd_info_t *dhd;
+ struct brcmf_info *drvr_priv;
int ret = 0, err = 0;
- ASSERT(ifp && ifp->info && ifp->idx); /* Virtual interfaces only */
-
- dhd = ifp->info;
+ drvr_priv = ifp->info;
- DHD_TRACE(("%s: idx %d, state %d\n", __func__, ifp->idx, ifp->state));
+ BRCMF_TRACE(("%s: idx %d, state %d\n", __func__, ifp->idx, ifp->state));
switch (ifp->state) {
- case WLC_E_IF_ADD:
+ case BRCMF_E_IF_ADD:
/*
* Delete the existing interface before overwriting it
- * in case we missed the WLC_E_IF_DEL event.
+ * in case we missed the BRCMF_E_IF_DEL event.
*/
if (ifp->net != NULL) {
- DHD_ERROR(("%s: ERROR: netdev:%s already exists, "
- "try free & unregister\n",
- __func__, ifp->net->name));
+ BRCMF_ERROR(("%s: ERROR: netdev:%s already exists, "
+ "try free & unregister\n",
+ __func__, ifp->net->name));
netif_stop_queue(ifp->net);
unregister_netdev(ifp->net);
free_netdev(ifp->net);
}
/* Allocate etherdev, including space for private structure */
- ifp->net = alloc_etherdev(sizeof(dhd));
+ ifp->net = alloc_etherdev(sizeof(drvr_priv));
if (!ifp->net) {
- DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
+ BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
ret = -ENOMEM;
}
if (ret == 0) {
strcpy(ifp->net->name, ifp->name);
- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
- err = dhd_net_attach(&dhd->pub, ifp->idx);
+ memcpy(netdev_priv(ifp->net), &drvr_priv, sizeof(drvr_priv));
+ err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
if (err != 0) {
- DHD_ERROR(("%s: dhd_net_attach failed, "
- "err %d\n",
- __func__, err));
+ BRCMF_ERROR(("%s: brcmf_net_attach failed, "
+ "err %d\n",
+ __func__, err));
ret = -EOPNOTSUPP;
} else {
#ifdef SOFTAP
@@ -880,26 +478,25 @@ static void dhd_op_if(dhd_if_t *ifp)
wl0.1 is ready */
up(&ap_eth_sema);
#endif
- DHD_TRACE(("\n ==== pid:%x, net_device for "
- "if:%s created ===\n\n",
- current->pid, ifp->net->name));
+ BRCMF_TRACE(("\n ==== pid:%x, net_device for "
+ "if:%s created ===\n\n",
+ current->pid, ifp->net->name));
ifp->state = 0;
}
}
break;
- case WLC_E_IF_DEL:
+ case BRCMF_E_IF_DEL:
if (ifp->net != NULL) {
- DHD_TRACE(("\n%s: got 'WLC_E_IF_DEL' state\n",
- __func__));
+ BRCMF_TRACE(("\n%s: got 'WLC_E_IF_DEL' state\n",
+ __func__));
netif_stop_queue(ifp->net);
unregister_netdev(ifp->net);
- ret = DHD_DEL_IF; /* Make sure the free_netdev()
+ ret = BRCMF_DEL_IF; /* Make sure the free_netdev()
is called */
}
break;
default:
- DHD_ERROR(("%s: bad op %d\n", __func__, ifp->state));
- ASSERT(!ifp->state);
+ BRCMF_ERROR(("%s: bad op %d\n", __func__, ifp->state));
break;
}
@@ -907,7 +504,7 @@ static void dhd_op_if(dhd_if_t *ifp)
if (ifp->net)
free_netdev(ifp->net);
- dhd->iflist[ifp->idx] = NULL;
+ drvr_priv->iflist[ifp->idx] = NULL;
kfree(ifp);
#ifdef SOFTAP
if (ifp->net == ap_net_dev)
@@ -917,9 +514,9 @@ static void dhd_op_if(dhd_if_t *ifp)
}
}
-static int _dhd_sysioc_thread(void *data)
+static int _brcmf_sysioc_thread(void *data)
{
- dhd_info_t *dhd = (dhd_info_t *) data;
+ struct brcmf_info *drvr_priv = (struct brcmf_info *) data;
int i;
#ifdef SOFTAP
bool in_ap = false;
@@ -927,46 +524,51 @@ static int _dhd_sysioc_thread(void *data)
allow_signal(SIGTERM);
- while (down_interruptible(&dhd->sysioc_sem) == 0) {
+ while (down_interruptible(&drvr_priv->sysioc_sem) == 0) {
if (kthread_should_stop())
break;
- for (i = 0; i < DHD_MAX_IFS; i++) {
- if (dhd->iflist[i]) {
+ for (i = 0; i < BRCMF_MAX_IFS; i++) {
+ struct brcmf_if *ifentry = drvr_priv->iflist[i];
+ if (ifentry) {
#ifdef SOFTAP
in_ap = (ap_net_dev != NULL);
#endif /* SOFTAP */
- if (dhd->iflist[i]->state)
- dhd_op_if(dhd->iflist[i]);
+ if (ifentry->state)
+ brcmf_op_if(ifentry);
#ifdef SOFTAP
- if (dhd->iflist[i] == NULL) {
- DHD_TRACE(("\n\n %s: interface %d "
- "removed!\n", __func__, i));
+ if (drvr_priv->iflist[i] == NULL) {
+ BRCMF_TRACE(("\n\n %s: interface %d "
+ "removed!\n", __func__,
+ i));
continue;
}
- if (in_ap && dhd->set_macaddress) {
- DHD_TRACE(("attempt to set MAC for %s "
- "in AP Mode," "blocked. \n",
- dhd->iflist[i]->net->name));
- dhd->set_macaddress = false;
+ if (in_ap && drvr_priv->set_macaddress) {
+ BRCMF_TRACE(("attempt to set MAC for"
+ " %s in AP Mode,"
+ " blocked.\n",
+ ifentry->net->name));
+ drvr_priv->set_macaddress = false;
continue;
}
- if (in_ap && dhd->set_multicast) {
- DHD_TRACE(("attempt to set MULTICAST list for %s" "in AP Mode, blocked. \n",
- dhd->iflist[i]->net->name));
- dhd->set_multicast = false;
+ if (in_ap && drvr_priv->set_multicast) {
+ BRCMF_TRACE(("attempt to set MULTICAST "
+ "list for %s in AP Mode, "
+ "blocked.\n",
+ ifentry->net->name));
+ drvr_priv->set_multicast = false;
continue;
}
#endif /* SOFTAP */
- if (dhd->set_multicast) {
- dhd->set_multicast = false;
- _dhd_set_multicast_list(dhd, i);
+ if (drvr_priv->set_multicast) {
+ drvr_priv->set_multicast = false;
+ _brcmf_set_multicast_list(drvr_priv, i);
}
- if (dhd->set_macaddress) {
- dhd->set_macaddress = false;
- _dhd_set_mac_address(dhd, i,
- dhd->macvalue);
+ if (drvr_priv->set_macaddress) {
+ drvr_priv->set_macaddress = false;
+ _brcmf_set_mac_address(drvr_priv, i,
+ drvr_priv->macvalue);
}
}
}
@@ -974,47 +576,44 @@ static int _dhd_sysioc_thread(void *data)
return 0;
}
-static int dhd_set_mac_address(struct net_device *dev, void *addr)
+static int brcmf_netdev_set_mac_address(struct net_device *dev, void *addr)
{
int ret = 0;
- dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(dev);
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(dev);
struct sockaddr *sa = (struct sockaddr *)addr;
int ifidx;
- ifidx = dhd_net2idx(dhd, dev);
- if (ifidx == DHD_BAD_IF)
+ ifidx = brcmf_net2idx(drvr_priv, dev);
+ if (ifidx == BRCMF_BAD_IF)
return -1;
- ASSERT(dhd->sysioc_tsk);
- memcpy(&dhd->macvalue, sa->sa_data, ETH_ALEN);
- dhd->set_macaddress = true;
- up(&dhd->sysioc_sem);
+ memcpy(&drvr_priv->macvalue, sa->sa_data, ETH_ALEN);
+ drvr_priv->set_macaddress = true;
+ up(&drvr_priv->sysioc_sem);
return ret;
}
-static void dhd_set_multicast_list(struct net_device *dev)
+static void brcmf_netdev_set_multicast_list(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(dev);
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(dev);
int ifidx;
- ifidx = dhd_net2idx(dhd, dev);
- if (ifidx == DHD_BAD_IF)
+ ifidx = brcmf_net2idx(drvr_priv, dev);
+ if (ifidx == BRCMF_BAD_IF)
return;
- ASSERT(dhd->sysioc_tsk);
- dhd->set_multicast = true;
- up(&dhd->sysioc_sem);
+ drvr_priv->set_multicast = true;
+ up(&drvr_priv->sysioc_sem);
}
-int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pktbuf)
+int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
{
- int ret;
- dhd_info_t *dhd = (dhd_info_t *) (dhdp->info);
+ struct brcmf_info *drvr_priv = drvr->info;
/* Reject if down */
- if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN))
+ if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
return -ENODEV;
/* Update multicast statistic */
@@ -1023,143 +622,107 @@ int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pktbuf)
struct ethhdr *eh = (struct ethhdr *)pktdata;
if (is_multicast_ether_addr(eh->h_dest))
- dhdp->tx_multicast++;
+ drvr->tx_multicast++;
if (ntohs(eh->h_proto) == ETH_P_PAE)
- atomic_inc(&dhd->pend_8021x_cnt);
+ atomic_inc(&drvr_priv->pend_8021x_cnt);
}
/* If the protocol uses a data header, apply it */
- dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
+ brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
/* Use bus module to send data frame */
-#ifdef BCMDBUS
- ret = dbus_send_pkt(dhdp->dbus, pktbuf, NULL /* pktinfo */);
-#else
- ret = dhd_bus_txdata(dhdp->bus, pktbuf);
-#endif /* BCMDBUS */
-
- return ret;
-}
-
-static inline void *
-osl_pkt_frmnative(struct sk_buff *skb)
-{
- return (void *)skb;
-}
-#define PKTFRMNATIVE(osh, skb) \
- osl_pkt_frmnative((struct sk_buff *)(skb))
-
-static inline struct sk_buff *
-osl_pkt_tonative(void *pkt)
-{
- return (struct sk_buff *)pkt;
+ return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
}
-#define PKTTONATIVE(osh, pkt) \
- osl_pkt_tonative((pkt))
-static int dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
+static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *net)
{
int ret;
- void *pktbuf;
- dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(net);
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
int ifidx;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
/* Reject if down */
- if (!dhd->pub.up || (dhd->pub.busstate == DHD_BUS_DOWN)) {
- DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d\n",
- __func__, dhd->pub.up, dhd->pub.busstate));
+ if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
+ BRCMF_ERROR(("%s: xmit rejected pub.up=%d busstate=%d\n",
+ __func__, drvr_priv->pub.up,
+ drvr_priv->pub.busstate));
netif_stop_queue(net);
return -ENODEV;
}
- ifidx = dhd_net2idx(dhd, net);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s: bad ifidx %d\n", __func__, ifidx));
+ ifidx = brcmf_net2idx(drvr_priv, net);
+ if (ifidx == BRCMF_BAD_IF) {
+ BRCMF_ERROR(("%s: bad ifidx %d\n", __func__, ifidx));
netif_stop_queue(net);
return -ENODEV;
}
/* Make sure there's enough room for any header */
- if (skb_headroom(skb) < dhd->pub.hdrlen) {
+ if (skb_headroom(skb) < drvr_priv->pub.hdrlen) {
struct sk_buff *skb2;
- DHD_INFO(("%s: insufficient headroom\n",
- dhd_ifname(&dhd->pub, ifidx)));
- dhd->pub.tx_realloc++;
- skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen);
+ BRCMF_INFO(("%s: insufficient headroom\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx)));
+ drvr_priv->pub.tx_realloc++;
+ skb2 = skb_realloc_headroom(skb, drvr_priv->pub.hdrlen);
dev_kfree_skb(skb);
skb = skb2;
if (skb == NULL) {
- DHD_ERROR(("%s: skb_realloc_headroom failed\n",
- dhd_ifname(&dhd->pub, ifidx)));
+ BRCMF_ERROR(("%s: skb_realloc_headroom failed\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx)));
ret = -ENOMEM;
goto done;
}
}
- /* Convert to packet */
- pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb);
- if (!pktbuf) {
- DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
- dhd_ifname(&dhd->pub, ifidx)));
- dev_kfree_skb_any(skb);
- ret = -ENOMEM;
- goto done;
- }
-
- ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+ ret = brcmf_sendpkt(&drvr_priv->pub, ifidx, skb);
done:
if (ret)
- dhd->pub.dstats.tx_dropped++;
+ drvr_priv->pub.dstats.tx_dropped++;
else
- dhd->pub.tx_packets++;
+ drvr_priv->pub.tx_packets++;
/* Return ok: we always eat the packet */
return 0;
}
-void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool state)
{
struct net_device *net;
- dhd_info_t *dhd = dhdp->info;
+ struct brcmf_info *drvr_priv = drvr->info;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- dhdp->txoff = state;
- ASSERT(dhd && dhd->iflist[ifidx]);
- net = dhd->iflist[ifidx]->net;
+ drvr->txoff = state;
+ net = drvr_priv->iflist[ifidx]->net;
if (state == ON)
netif_stop_queue(net);
else
netif_wake_queue(net);
}
-void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pktbuf,
+void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
int numpkt)
{
- dhd_info_t *dhd = (dhd_info_t *) dhdp->info;
- struct sk_buff *skb;
+ struct brcmf_info *drvr_priv = drvr->info;
unsigned char *eth;
uint len;
void *data;
struct sk_buff *pnext, *save_pktbuf;
int i;
- dhd_if_t *ifp;
- wl_event_msg_t event;
-
- DHD_TRACE(("%s: Enter\n", __func__));
+ struct brcmf_if *ifp;
+ struct brcmf_event_msg event;
- save_pktbuf = pktbuf;
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+ save_pktbuf = skb;
- pnext = pktbuf->next;
- pktbuf->next = NULL;
+ for (i = 0; skb && i < numpkt; i++, skb = pnext) {
- skb = PKTTONATIVE(dhdp->osh, pktbuf);
+ pnext = skb->next;
+ skb->next = NULL;
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
@@ -1176,16 +739,15 @@ void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pktbuf,
eth = skb->data;
len = skb->len;
- ifp = dhd->iflist[ifidx];
+ ifp = drvr_priv->iflist[ifidx];
if (ifp == NULL)
- ifp = dhd->iflist[0];
+ ifp = drvr_priv->iflist[0];
- ASSERT(ifp);
skb->dev = ifp->net;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST)
- dhd->pub.rx_multicast++;
+ drvr_priv->pub.rx_multicast++;
skb->data = eth;
skb->len = len;
@@ -1194,20 +756,20 @@ void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pktbuf,
skb_pull(skb, ETH_HLEN);
/* Process special event packets and then discard them */
- if (ntohs(skb->protocol) == ETH_P_BRCM)
- dhd_wl_host_event(dhd, &ifidx,
+ if (ntohs(skb->protocol) == ETH_P_LINK_CTL)
+ brcmf_host_event(drvr_priv, &ifidx,
skb_mac_header(skb),
&event, &data);
- ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
- if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state)
- ifp = dhd->iflist[ifidx];
+ if (drvr_priv->iflist[ifidx] &&
+ !drvr_priv->iflist[ifidx]->state)
+ ifp = drvr_priv->iflist[ifidx];
if (ifp->net)
ifp->net->last_rx = jiffies;
- dhdp->dstats.rx_bytes += skb->len;
- dhdp->rx_packets++; /* Local count */
+ drvr->dstats.rx_bytes += skb->len;
+ drvr->rx_packets++; /* Local count */
if (in_interrupt()) {
netif_rx(skb);
@@ -1223,216 +785,83 @@ void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, struct sk_buff *pktbuf,
}
}
-void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
-{
- /* Linux version has nothing to do */
- return;
-}
-
-void dhd_txcomplete(dhd_pub_t *dhdp, struct sk_buff *txp, bool success)
+void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, bool success)
{
uint ifidx;
- dhd_info_t *dhd = (dhd_info_t *) (dhdp->info);
+ struct brcmf_info *drvr_priv = drvr->info;
struct ethhdr *eh;
u16 type;
- dhd_prot_hdrpull(dhdp, &ifidx, txp);
+ brcmf_proto_hdrpull(drvr, &ifidx, txp);
eh = (struct ethhdr *)(txp->data);
type = ntohs(eh->h_proto);
if (type == ETH_P_PAE)
- atomic_dec(&dhd->pend_8021x_cnt);
+ atomic_dec(&drvr_priv->pend_8021x_cnt);
}
-static struct net_device_stats *dhd_get_stats(struct net_device *net)
+static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *net)
{
- dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(net);
- dhd_if_t *ifp;
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
+ struct brcmf_if *ifp;
int ifidx;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- ifidx = dhd_net2idx(dhd, net);
- if (ifidx == DHD_BAD_IF)
+ ifidx = brcmf_net2idx(drvr_priv, net);
+ if (ifidx == BRCMF_BAD_IF)
return NULL;
- ifp = dhd->iflist[ifidx];
- ASSERT(dhd && ifp);
+ ifp = drvr_priv->iflist[ifidx];
- if (dhd->pub.up) {
+ if (drvr_priv->pub.up) {
/* Use the protocol to get dongle stats */
- dhd_prot_dstats(&dhd->pub);
+ brcmf_proto_dstats(&drvr_priv->pub);
}
/* Copy dongle stats to net device stats */
- ifp->stats.rx_packets = dhd->pub.dstats.rx_packets;
- ifp->stats.tx_packets = dhd->pub.dstats.tx_packets;
- ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes;
- ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes;
- ifp->stats.rx_errors = dhd->pub.dstats.rx_errors;
- ifp->stats.tx_errors = dhd->pub.dstats.tx_errors;
- ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped;
- ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped;
- ifp->stats.multicast = dhd->pub.dstats.multicast;
+ ifp->stats.rx_packets = drvr_priv->pub.dstats.rx_packets;
+ ifp->stats.tx_packets = drvr_priv->pub.dstats.tx_packets;
+ ifp->stats.rx_bytes = drvr_priv->pub.dstats.rx_bytes;
+ ifp->stats.tx_bytes = drvr_priv->pub.dstats.tx_bytes;
+ ifp->stats.rx_errors = drvr_priv->pub.dstats.rx_errors;
+ ifp->stats.tx_errors = drvr_priv->pub.dstats.tx_errors;
+ ifp->stats.rx_dropped = drvr_priv->pub.dstats.rx_dropped;
+ ifp->stats.tx_dropped = drvr_priv->pub.dstats.tx_dropped;
+ ifp->stats.multicast = drvr_priv->pub.dstats.multicast;
return &ifp->stats;
}
-static int dhd_watchdog_thread(void *data)
-{
- dhd_info_t *dhd = (dhd_info_t *) data;
-
- /* This thread doesn't need any user-level access,
- * so get rid of all our resources
- */
-#ifdef DHD_SCHED
- if (dhd_watchdog_prio > 0) {
- struct sched_param param;
- param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO) ?
- dhd_watchdog_prio : (MAX_RT_PRIO - 1);
- setScheduler(current, SCHED_FIFO, &param);
- }
-#endif /* DHD_SCHED */
-
- allow_signal(SIGTERM);
- /* Run until signal received */
- while (1) {
- if (kthread_should_stop())
- break;
- if (down_interruptible(&dhd->watchdog_sem) == 0) {
- if (dhd->pub.dongle_reset == false) {
- /* Call the bus module watchdog */
- dhd_bus_watchdog(&dhd->pub);
- }
- /* Count the tick for reference */
- dhd->pub.tickcnt++;
- } else
- break;
- }
- return 0;
-}
-
-static void dhd_watchdog(unsigned long data)
-{
- dhd_info_t *dhd = (dhd_info_t *) data;
-
- if (dhd->watchdog_tsk) {
- up(&dhd->watchdog_sem);
-
- /* Reschedule the watchdog */
- if (dhd->wd_timer_valid) {
- mod_timer(&dhd->timer,
- jiffies + dhd_watchdog_ms * HZ / 1000);
- }
- return;
- }
-
- /* Call the bus module watchdog */
- dhd_bus_watchdog(&dhd->pub);
-
- /* Count the tick for reference */
- dhd->pub.tickcnt++;
-
- /* Reschedule the watchdog */
- if (dhd->wd_timer_valid)
- mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000);
-}
-
-static int dhd_dpc_thread(void *data)
-{
- dhd_info_t *dhd = (dhd_info_t *) data;
-
- /* This thread doesn't need any user-level access,
- * so get rid of all our resources
- */
-#ifdef DHD_SCHED
- if (dhd_dpc_prio > 0) {
- struct sched_param param;
- param.sched_priority =
- (dhd_dpc_prio <
- MAX_RT_PRIO) ? dhd_dpc_prio : (MAX_RT_PRIO - 1);
- setScheduler(current, SCHED_FIFO, &param);
- }
-#endif /* DHD_SCHED */
-
- allow_signal(SIGTERM);
- /* Run until signal received */
- while (1) {
- if (kthread_should_stop())
- break;
- if (down_interruptible(&dhd->dpc_sem) == 0) {
- /* Call bus dpc unless it indicated down
- (then clean stop) */
- if (dhd->pub.busstate != DHD_BUS_DOWN) {
- if (dhd_bus_dpc(dhd->pub.bus)) {
- up(&dhd->dpc_sem);
- }
- } else {
- dhd_bus_stop(dhd->pub.bus, true);
- }
- } else
- break;
- }
- return 0;
-}
-
-static void dhd_dpc(unsigned long data)
-{
- dhd_info_t *dhd;
-
- dhd = (dhd_info_t *) data;
-
- /* Call bus dpc unless it indicated down (then clean stop) */
- if (dhd->pub.busstate != DHD_BUS_DOWN) {
- if (dhd_bus_dpc(dhd->pub.bus))
- tasklet_schedule(&dhd->tasklet);
- } else {
- dhd_bus_stop(dhd->pub.bus, true);
- }
-}
-
-void dhd_sched_dpc(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = (dhd_info_t *) dhdp->info;
-
- if (dhd->dpc_tsk) {
- up(&dhd->dpc_sem);
- return;
- }
-
- tasklet_schedule(&dhd->tasklet);
-}
-
-#ifdef TOE
/* Retrieve current toe component enables, which are kept
as a bitmap in toe_ol iovar */
-static int dhd_toe_get(dhd_info_t *dhd, int ifidx, u32 *toe_ol)
+static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
{
- wl_ioctl_t ioc;
+ struct brcmf_ioctl ioc;
char buf[32];
int ret;
memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = WLC_GET_VAR;
+ ioc.cmd = BRCMF_C_GET_VAR;
ioc.buf = buf;
ioc.len = (uint) sizeof(buf);
ioc.set = false;
strcpy(buf, "toe_ol");
- ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
/* Check for older dongle image that doesn't support toe_ol */
if (ret == -EIO) {
- DHD_ERROR(("%s: toe not supported by device\n",
- dhd_ifname(&dhd->pub, ifidx)));
+ BRCMF_ERROR(("%s: toe not supported by device\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx)));
return -EOPNOTSUPP;
}
- DHD_INFO(("%s: could not get toe_ol: ret=%d\n",
- dhd_ifname(&dhd->pub, ifidx), ret));
+ BRCMF_INFO(("%s: could not get toe_ol: ret=%d\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx), ret));
return ret;
}
@@ -1442,15 +871,15 @@ static int dhd_toe_get(dhd_info_t *dhd, int ifidx, u32 *toe_ol)
/* Set current toe component enables in toe_ol iovar,
and set toe global enable iovar */
-static int dhd_toe_set(dhd_info_t *dhd, int ifidx, u32 toe_ol)
+static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
{
- wl_ioctl_t ioc;
+ struct brcmf_ioctl ioc;
char buf[32];
int toe, ret;
memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = WLC_SET_VAR;
+ ioc.cmd = BRCMF_C_SET_VAR;
ioc.buf = buf;
ioc.len = (uint) sizeof(buf);
ioc.set = true;
@@ -1460,10 +889,10 @@ static int dhd_toe_set(dhd_info_t *dhd, int ifidx, u32 toe_ol)
strcpy(buf, "toe_ol");
memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(u32));
- ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
- DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
- dhd_ifname(&dhd->pub, ifidx), ret));
+ BRCMF_ERROR(("%s: could not set toe_ol: ret=%d\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx), ret));
return ret;
}
@@ -1474,44 +903,42 @@ static int dhd_toe_set(dhd_info_t *dhd, int ifidx, u32 toe_ol)
strcpy(buf, "toe");
memcpy(&buf[sizeof("toe")], &toe, sizeof(u32));
- ret = dhd_prot_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ ret = brcmf_proto_ioctl(&drvr_priv->pub, ifidx, &ioc, ioc.buf, ioc.len);
if (ret < 0) {
- DHD_ERROR(("%s: could not set toe: ret=%d\n",
- dhd_ifname(&dhd->pub, ifidx), ret));
+ BRCMF_ERROR(("%s: could not set toe: ret=%d\n",
+ brcmf_ifname(&drvr_priv->pub, ifidx), ret));
return ret;
}
return 0;
}
-#endif /* TOE */
-static void dhd_ethtool_get_drvinfo(struct net_device *net,
+static void brcmf_ethtool_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(net);
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
- sprintf(info->driver, DRV_MODULE_NAME);
- sprintf(info->version, "%lu", dhd->pub.drv_version);
- sprintf(info->fw_version, "%s", wl_cfg80211_get_fwname());
- sprintf(info->bus_info, "%s", dev_name(&wl_cfg80211_get_sdio_func()->dev));
+ sprintf(info->driver, KBUILD_MODNAME);
+ sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
+ sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
+ sprintf(info->bus_info, "%s",
+ dev_name(&brcmf_cfg80211_get_sdio_func()->dev));
}
-struct ethtool_ops dhd_ethtool_ops = {
- .get_drvinfo = dhd_ethtool_get_drvinfo
+struct ethtool_ops brcmf_ethtool_ops = {
+ .get_drvinfo = brcmf_ethtool_get_drvinfo
};
-static int dhd_ethtool(dhd_info_t *dhd, void *uaddr)
+static int brcmf_ethtool(struct brcmf_info *drvr_priv, void *uaddr)
{
struct ethtool_drvinfo info;
char drvname[sizeof(info.driver)];
u32 cmd;
-#ifdef TOE
struct ethtool_value edata;
u32 toe_cmpnt, csum_dir;
int ret;
-#endif
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
/* all ethtool calls start with a cmd word */
if (copy_from_user(&cmd, uaddr, sizeof(u32)))
@@ -1529,36 +956,35 @@ static int dhd_ethtool(dhd_info_t *dhd, void *uaddr)
memset(&info, 0, sizeof(info));
info.cmd = cmd;
- /* if dhd requested, identify ourselves */
+ /* if requested, identify ourselves */
if (strcmp(drvname, "?dhd") == 0) {
sprintf(info.driver, "dhd");
- strcpy(info.version, EPI_VERSION_STR);
+ strcpy(info.version, BRCMF_VERSION_STR);
}
/* otherwise, require dongle to be up */
- else if (!dhd->pub.up) {
- DHD_ERROR(("%s: dongle is not up\n", __func__));
+ else if (!drvr_priv->pub.up) {
+ BRCMF_ERROR(("%s: dongle is not up\n", __func__));
return -ENODEV;
}
/* finally, report dongle driver type */
- else if (dhd->pub.iswl)
+ else if (drvr_priv->pub.iswl)
sprintf(info.driver, "wl");
else
sprintf(info.driver, "xx");
- sprintf(info.version, "%lu", dhd->pub.drv_version);
+ sprintf(info.version, "%lu", drvr_priv->pub.drv_version);
if (copy_to_user(uaddr, &info, sizeof(info)))
return -EFAULT;
- DHD_CTL(("%s: given %*s, returning %s\n", __func__,
- (int)sizeof(drvname), drvname, info.driver));
+ BRCMF_CTL(("%s: given %*s, returning %s\n", __func__,
+ (int)sizeof(drvname), drvname, info.driver));
break;
-#ifdef TOE
/* Get toe offload components from dongle */
case ETHTOOL_GRXCSUM:
case ETHTOOL_GTXCSUM:
- ret = dhd_toe_get(dhd, 0, &toe_cmpnt);
+ ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
if (ret < 0)
return ret;
@@ -1579,7 +1005,7 @@ static int dhd_ethtool(dhd_info_t *dhd, void *uaddr)
return -EFAULT;
/* Read the current settings, update and write back */
- ret = dhd_toe_get(dhd, 0, &toe_cmpnt);
+ ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
if (ret < 0)
return ret;
@@ -1591,22 +1017,21 @@ static int dhd_ethtool(dhd_info_t *dhd, void *uaddr)
else
toe_cmpnt &= ~csum_dir;
- ret = dhd_toe_set(dhd, 0, toe_cmpnt);
+ ret = brcmf_toe_set(drvr_priv, 0, toe_cmpnt);
if (ret < 0)
return ret;
/* If setting TX checksum mode, tell Linux the new mode */
if (cmd == ETHTOOL_STXCSUM) {
if (edata.data)
- dhd->iflist[0]->net->features |=
+ drvr_priv->iflist[0]->net->features |=
NETIF_F_IP_CSUM;
else
- dhd->iflist[0]->net->features &=
+ drvr_priv->iflist[0]->net->features &=
~NETIF_F_IP_CSUM;
}
break;
-#endif /* TOE */
default:
return -EOPNOTSUPP;
@@ -1615,10 +1040,11 @@ static int dhd_ethtool(dhd_info_t *dhd, void *uaddr)
return 0;
}
-static int dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+static int brcmf_netdev_ioctl_entry(struct net_device *net, struct ifreq *ifr,
+ int cmd)
{
- dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(net);
- dhd_ioctl_t ioc;
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
+ struct brcmf_c_ioctl ioc;
int bcmerror = 0;
int buflen = 0;
void *buf = NULL;
@@ -1626,22 +1052,14 @@ static int dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
int ifidx;
bool is_set_key_cmd;
- ifidx = dhd_net2idx(dhd, net);
- DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __func__, ifidx, cmd));
+ ifidx = brcmf_net2idx(drvr_priv, net);
+ BRCMF_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __func__, ifidx, cmd));
- if (ifidx == DHD_BAD_IF)
+ if (ifidx == BRCMF_BAD_IF)
return -1;
-#if defined(CONFIG_WIRELESS_EXT)
- /* linux wireless extensions */
- if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
- /* may recurse, do NOT lock */
- return wl_iw_ioctl(net, ifr, cmd);
- }
-#endif /* defined(CONFIG_WIRELESS_EXT) */
-
if (cmd == SIOCETHTOOL)
- return dhd_ethtool(dhd, (void *)ifr->ifr_data);
+ return brcmf_ethtool(drvr_priv, (void *)ifr->ifr_data);
if (cmd != SIOCDEVPRIVATE)
return -EOPNOTSUPP;
@@ -1649,14 +1067,14 @@ static int dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
memset(&ioc, 0, sizeof(ioc));
/* Copy the ioc control structure part of ioctl request */
- if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(struct brcmf_ioctl))) {
bcmerror = -EINVAL;
goto done;
}
/* Copy out any buffer passed */
if (ioc.buf) {
- buflen = min_t(int, ioc.len, DHD_IOCTL_MAXLEN);
+ buflen = min_t(int, ioc.len, BRCMF_IOCTL_MAXLEN);
/* optimization for direct ioctl calls from kernel */
/*
if (segment_eq(get_fs(), KERNEL_DS)) {
@@ -1676,9 +1094,9 @@ static int dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
}
}
- /* To differentiate between wl and dhd read 4 more byes */
- if ((copy_from_user(&driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
- sizeof(uint)) != 0)) {
+ /* To differentiate read 4 more byes */
+ if ((copy_from_user(&driver, (char *)ifr->ifr_data +
+ sizeof(struct brcmf_ioctl), sizeof(uint)) != 0)) {
bcmerror = -EINVAL;
goto done;
}
@@ -1688,39 +1106,41 @@ static int dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
goto done;
}
- /* check for local dhd ioctl and handle it */
- if (driver == DHD_IOCTL_MAGIC) {
- bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen);
+ /* check for local brcmf ioctl and handle it */
+ if (driver == BRCMF_IOCTL_MAGIC) {
+ bcmerror = brcmf_c_ioctl((void *)&drvr_priv->pub, &ioc, buf, buflen);
if (bcmerror)
- dhd->pub.bcmerror = bcmerror;
+ drvr_priv->pub.bcmerror = bcmerror;
goto done;
}
/* send to dongle (must be up, and wl) */
- if ((dhd->pub.busstate != DHD_BUS_DATA)) {
- DHD_ERROR(("%s DONGLE_DOWN,__func__\n", __func__));
+ if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
+ BRCMF_ERROR(("%s DONGLE_DOWN,__func__\n", __func__));
bcmerror = -EIO;
goto done;
}
- if (!dhd->pub.iswl) {
+ if (!drvr_priv->pub.iswl) {
bcmerror = -EIO;
goto done;
}
- /* Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
- * prevent M4 encryption.
+ /*
+ * Intercept BRCMF_C_SET_KEY IOCTL - serialize M4 send and
+ * set key IOCTL to prevent M4 encryption.
*/
- is_set_key_cmd = ((ioc.cmd == WLC_SET_KEY) ||
- ((ioc.cmd == WLC_SET_VAR) &&
+ is_set_key_cmd = ((ioc.cmd == BRCMF_C_SET_KEY) ||
+ ((ioc.cmd == BRCMF_C_SET_VAR) &&
!(strncmp("wsec_key", ioc.buf, 9))) ||
- ((ioc.cmd == WLC_SET_VAR) &&
+ ((ioc.cmd == BRCMF_C_SET_VAR) &&
!(strncmp("bsscfg:wsec_key", ioc.buf, 15))));
if (is_set_key_cmd)
- dhd_wait_pend8021x(net);
+ brcmf_netdev_wait_pend8021x(net);
bcmerror =
- dhd_prot_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen);
+ brcmf_proto_ioctl(&drvr_priv->pub, ifidx, (struct brcmf_ioctl *)&ioc,
+ buf, buflen);
done:
if (!bcmerror && buf && ioc.buf) {
@@ -1736,159 +1156,146 @@ done:
return bcmerror;
}
-static int dhd_stop(struct net_device *net)
+static int brcmf_netdev_stop(struct net_device *net)
{
#if !defined(IGNORE_ETH0_DOWN)
- dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(net);
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
- DHD_TRACE(("%s: Enter\n", __func__));
- if (IS_CFG80211_FAVORITE()) {
- wl_cfg80211_down();
- }
- if (dhd->pub.up == 0)
+ BRCMF_TRACE(("%s: Enter\n", __func__));
+ brcmf_cfg80211_down();
+ if (drvr_priv->pub.up == 0)
return 0;
/* Set state and stop OS transmissions */
- dhd->pub.up = 0;
+ drvr_priv->pub.up = 0;
netif_stop_queue(net);
#else
- DHD_ERROR(("BYPASS %s:due to BRCM compilation : under investigation\n",
- __func__));
+ BRCMF_ERROR(("BYPASS %s:due to BRCM compilation: under investigation\n",
+ __func__));
#endif /* !defined(IGNORE_ETH0_DOWN) */
return 0;
}
-static int dhd_open(struct net_device *net)
+static int brcmf_netdev_open(struct net_device *net)
{
- dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(net);
-#ifdef TOE
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **) netdev_priv(net);
u32 toe_ol;
-#endif
- int ifidx = dhd_net2idx(dhd, net);
+ int ifidx = brcmf_net2idx(drvr_priv, net);
s32 ret = 0;
- DHD_TRACE(("%s: ifidx %d\n", __func__, ifidx));
+ BRCMF_TRACE(("%s: ifidx %d\n", __func__, ifidx));
if (ifidx == 0) { /* do it only for primary eth0 */
/* try to bring up bus */
- ret = dhd_bus_start(&dhd->pub);
+ ret = brcmf_bus_start(&drvr_priv->pub);
if (ret != 0) {
- DHD_ERROR(("%s: failed with code %d\n", __func__, ret));
+ BRCMF_ERROR(("%s: failed with code %d\n",
+ __func__, ret));
return -1;
}
- atomic_set(&dhd->pend_8021x_cnt, 0);
+ atomic_set(&drvr_priv->pend_8021x_cnt, 0);
- memcpy(net->dev_addr, dhd->pub.mac, ETH_ALEN);
+ memcpy(net->dev_addr, drvr_priv->pub.mac, ETH_ALEN);
-#ifdef TOE
/* Get current TOE mode from dongle */
- if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0
+ if (brcmf_toe_get(drvr_priv, ifidx, &toe_ol) >= 0
&& (toe_ol & TOE_TX_CSUM_OL) != 0)
- dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+ drvr_priv->iflist[ifidx]->net->features |=
+ NETIF_F_IP_CSUM;
else
- dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
-#endif
+ drvr_priv->iflist[ifidx]->net->features &=
+ ~NETIF_F_IP_CSUM;
}
/* Allow transmit calls */
netif_start_queue(net);
- dhd->pub.up = 1;
- if (IS_CFG80211_FAVORITE()) {
- if (unlikely(wl_cfg80211_up())) {
- DHD_ERROR(("%s: failed to bring up cfg80211\n",
- __func__));
- return -1;
- }
+ drvr_priv->pub.up = 1;
+ if (unlikely(brcmf_cfg80211_up())) {
+ BRCMF_ERROR(("%s: failed to bring up cfg80211\n",
+ __func__));
+ return -1;
}
return ret;
}
int
-dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name,
+brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, void *handle, char *name,
u8 *mac_addr, u32 flags, u8 bssidx)
{
- dhd_if_t *ifp;
-
- DHD_TRACE(("%s: idx %d, handle->%p\n", __func__, ifidx, handle));
+ struct brcmf_if *ifp;
- ASSERT(dhd && (ifidx < DHD_MAX_IFS));
+ BRCMF_TRACE(("%s: idx %d, handle->%p\n", __func__, ifidx, handle));
- ifp = dhd->iflist[ifidx];
- if (!ifp && !(ifp = kmalloc(sizeof(dhd_if_t), GFP_ATOMIC))) {
- DHD_ERROR(("%s: OOM - dhd_if_t\n", __func__));
- return -ENOMEM;
+ ifp = drvr_priv->iflist[ifidx];
+ if (!ifp) {
+ ifp = kmalloc(sizeof(struct brcmf_if), GFP_ATOMIC);
+ if (!ifp) {
+ BRCMF_ERROR(("%s: OOM - struct brcmf_if\n", __func__));
+ return -ENOMEM;
+ }
}
- memset(ifp, 0, sizeof(dhd_if_t));
- ifp->info = dhd;
- dhd->iflist[ifidx] = ifp;
+ memset(ifp, 0, sizeof(struct brcmf_if));
+ ifp->info = drvr_priv;
+ drvr_priv->iflist[ifidx] = ifp;
strlcpy(ifp->name, name, IFNAMSIZ);
if (mac_addr != NULL)
memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
if (handle == NULL) {
- ifp->state = WLC_E_IF_ADD;
+ ifp->state = BRCMF_E_IF_ADD;
ifp->idx = ifidx;
- ASSERT(dhd->sysioc_tsk);
- up(&dhd->sysioc_sem);
+ up(&drvr_priv->sysioc_sem);
} else
ifp->net = (struct net_device *)handle;
return 0;
}
-void dhd_del_if(dhd_info_t *dhd, int ifidx)
+void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
{
- dhd_if_t *ifp;
+ struct brcmf_if *ifp;
- DHD_TRACE(("%s: idx %d\n", __func__, ifidx));
+ BRCMF_TRACE(("%s: idx %d\n", __func__, ifidx));
- ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS));
- ifp = dhd->iflist[ifidx];
+ ifp = drvr_priv->iflist[ifidx];
if (!ifp) {
- DHD_ERROR(("%s: Null interface\n", __func__));
+ BRCMF_ERROR(("%s: Null interface\n", __func__));
return;
}
- ifp->state = WLC_E_IF_DEL;
+ ifp->state = BRCMF_E_IF_DEL;
ifp->idx = ifidx;
- ASSERT(dhd->sysioc_tsk);
- up(&dhd->sysioc_sem);
+ up(&drvr_priv->sysioc_sem);
}
-dhd_pub_t *dhd_attach(struct dhd_bus *bus, uint bus_hdrlen)
+struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
{
- dhd_info_t *dhd = NULL;
+ struct brcmf_info *drvr_priv = NULL;
struct net_device *net;
- DHD_TRACE(("%s: Enter\n", __func__));
- /* updates firmware nvram path if it was provided as module
- paramters */
- if ((firmware_path != NULL) && (firmware_path[0] != '\0'))
- strcpy(fw_path, firmware_path);
- if ((nvram_path != NULL) && (nvram_path[0] != '\0'))
- strcpy(nv_path, nvram_path);
+ BRCMF_TRACE(("%s: Enter\n", __func__));
/* Allocate etherdev, including space for private structure */
- net = alloc_etherdev(sizeof(dhd));
+ net = alloc_etherdev(sizeof(drvr_priv));
if (!net) {
- DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
+ BRCMF_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
goto fail;
}
- /* Allocate primary dhd_info */
- dhd = kzalloc(sizeof(dhd_info_t), GFP_ATOMIC);
- if (!dhd) {
- DHD_ERROR(("%s: OOM - alloc dhd_info\n", __func__));
+ /* Allocate primary brcmf_info */
+ drvr_priv = kzalloc(sizeof(struct brcmf_info), GFP_ATOMIC);
+ if (!drvr_priv) {
+ BRCMF_ERROR(("%s: OOM - alloc brcmf_info\n", __func__));
goto fail;
}
/*
- * Save the dhd_info into the priv
+ * Save the brcmf_info into the priv
*/
- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+ memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
/* Set network interface name if it was provided as module parameter */
if (iface_name[0]) {
@@ -1902,317 +1309,180 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bus, uint bus_hdrlen)
strcat(net->name, "%d");
}
- if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) ==
- DHD_BAD_IF)
+ if (brcmf_add_if(drvr_priv, 0, (void *)net, net->name, NULL, 0, 0) ==
+ BRCMF_BAD_IF)
goto fail;
net->netdev_ops = NULL;
- sema_init(&dhd->proto_sem, 1);
+ sema_init(&drvr_priv->proto_sem, 1);
/* Initialize other structure content */
- init_waitqueue_head(&dhd->ioctl_resp_wait);
- init_waitqueue_head(&dhd->ctrl_wait);
-
- /* Initialize the spinlocks */
- spin_lock_init(&dhd->sdlock);
- spin_lock_init(&dhd->txqlock);
+ init_waitqueue_head(&drvr_priv->ioctl_resp_wait);
/* Link to info module */
- dhd->pub.info = dhd;
+ drvr_priv->pub.info = drvr_priv;
/* Link to bus module */
- dhd->pub.bus = bus;
- dhd->pub.hdrlen = bus_hdrlen;
+ drvr_priv->pub.bus = bus;
+ drvr_priv->pub.hdrlen = bus_hdrlen;
/* Attach and link in the protocol */
- if (dhd_prot_attach(&dhd->pub) != 0) {
- DHD_ERROR(("dhd_prot_attach failed\n"));
+ if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
+ BRCMF_ERROR(("brcmf_prot_attach failed\n"));
goto fail;
}
-#if defined(CONFIG_WIRELESS_EXT)
- /* Attach and link in the iw */
- if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
- DHD_ERROR(("wl_iw_attach failed\n"));
- goto fail;
- }
-#endif /* defined(CONFIG_WIRELESS_EXT) */
/* Attach and link in the cfg80211 */
- if (IS_CFG80211_FAVORITE()) {
- if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
- DHD_ERROR(("wl_cfg80211_attach failed\n"));
- goto fail;
- }
- if (!NO_FW_REQ()) {
- strcpy(fw_path, wl_cfg80211_get_fwname());
- strcpy(nv_path, wl_cfg80211_get_nvramname());
- }
- }
-
- /* Set up the watchdog timer */
- init_timer(&dhd->timer);
- dhd->timer.data = (unsigned long) dhd;
- dhd->timer.function = dhd_watchdog;
-
- /* Initialize thread based operation and lock */
- sema_init(&dhd->sdsem, 1);
- if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0))
- dhd->threads_only = true;
- else
- dhd->threads_only = false;
-
- if (dhd_dpc_prio >= 0) {
- /* Initialize watchdog thread */
- sema_init(&dhd->watchdog_sem, 0);
- dhd->watchdog_tsk = kthread_run(dhd_watchdog_thread, dhd,
- "dhd_watchdog");
- if (IS_ERR(dhd->watchdog_tsk)) {
- printk(KERN_WARNING
- "dhd_watchdog thread failed to start\n");
- dhd->watchdog_tsk = NULL;
- }
- } else {
- dhd->watchdog_tsk = NULL;
- }
-
- /* Set up the bottom half handler */
- if (dhd_dpc_prio >= 0) {
- /* Initialize DPC thread */
- sema_init(&dhd->dpc_sem, 0);
- dhd->dpc_tsk = kthread_run(dhd_dpc_thread, dhd, "dhd_dpc");
- if (IS_ERR(dhd->dpc_tsk)) {
- printk(KERN_WARNING
- "dhd_dpc thread failed to start\n");
- dhd->dpc_tsk = NULL;
- }
- } else {
- tasklet_init(&dhd->tasklet, dhd_dpc, (unsigned long) dhd);
- dhd->dpc_tsk = NULL;
+ if (unlikely(brcmf_cfg80211_attach(net, &drvr_priv->pub))) {
+ BRCMF_ERROR(("wl_cfg80211_attach failed\n"));
+ goto fail;
}
- if (dhd_sysioc) {
- sema_init(&dhd->sysioc_sem, 0);
- dhd->sysioc_tsk = kthread_run(_dhd_sysioc_thread, dhd,
- "_dhd_sysioc");
- if (IS_ERR(dhd->sysioc_tsk)) {
+ if (brcmf_sysioc) {
+ sema_init(&drvr_priv->sysioc_sem, 0);
+ drvr_priv->sysioc_tsk = kthread_run(_brcmf_sysioc_thread, drvr_priv,
+ "_brcmf_sysioc");
+ if (IS_ERR(drvr_priv->sysioc_tsk)) {
printk(KERN_WARNING
- "_dhd_sysioc thread failed to start\n");
- dhd->sysioc_tsk = NULL;
+ "_brcmf_sysioc thread failed to start\n");
+ drvr_priv->sysioc_tsk = NULL;
}
} else
- dhd->sysioc_tsk = NULL;
+ drvr_priv->sysioc_tsk = NULL;
/*
- * Save the dhd_info into the priv
+ * Save the brcmf_info into the priv
*/
- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+ memcpy(netdev_priv(net), &drvr_priv, sizeof(drvr_priv));
-#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
- g_bus = bus;
-#endif
#if defined(CONFIG_PM_SLEEP)
- atomic_set(&dhd_mmc_suspend, false);
- if (!IS_CFG80211_FAVORITE())
- register_pm_notifier(&dhd_sleep_pm_notifier);
+ atomic_set(&brcmf_mmc_suspend, false);
#endif /* defined(CONFIG_PM_SLEEP) */
- /* && defined(DHD_GPL) */
- /* Init lock suspend to prevent kernel going to suspend */
-#ifdef CONFIG_HAS_EARLYSUSPEND
- dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
- dhd->early_suspend.suspend = dhd_early_suspend;
- dhd->early_suspend.resume = dhd_late_resume;
- register_early_suspend(&dhd->early_suspend);
-#endif
-
- return &dhd->pub;
+ return &drvr_priv->pub;
fail:
if (net)
free_netdev(net);
- if (dhd)
- dhd_detach(&dhd->pub);
+ if (drvr_priv)
+ brcmf_detach(&drvr_priv->pub);
return NULL;
}
-int dhd_bus_start(dhd_pub_t *dhdp)
+int brcmf_bus_start(struct brcmf_pub *drvr)
{
int ret = -1;
- dhd_info_t *dhd = (dhd_info_t *) dhdp->info;
-#ifdef EMBEDDED_PLATFORM
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" +
- '\0' + bitvec */
-#endif /* EMBEDDED_PLATFORM */
-
- ASSERT(dhd);
-
- DHD_TRACE(("%s:\n", __func__));
-
- /* try to download image and nvram to the dongle */
- if (dhd->pub.busstate == DHD_BUS_DOWN) {
- if (!(dhd_bus_download_firmware(dhd->pub.bus,
- fw_path, nv_path))) {
- DHD_ERROR(("%s: dhdsdio_probe_download failed. "
- "firmware = %s nvram = %s\n",
- __func__, fw_path, nv_path));
- return -1;
- }
- }
+ struct brcmf_info *drvr_priv = drvr->info;
+ /* Room for "event_msgs" + '\0' + bitvec */
+ char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
- /* Start the watchdog timer */
- dhd->pub.tickcnt = 0;
- dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+ BRCMF_TRACE(("%s:\n", __func__));
/* Bring up the bus */
- ret = dhd_bus_init(&dhd->pub, true);
+ ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub, true);
if (ret != 0) {
- DHD_ERROR(("%s, dhd_bus_init failed %d\n", __func__, ret));
+ BRCMF_ERROR(("%s, brcmf_sdbrcm_bus_init failed %d\n", __func__,
+ ret));
return ret;
}
-#if defined(OOB_INTR_ONLY)
- /* Host registration for OOB interrupt */
- if (bcmsdh_register_oob_intr(dhdp)) {
- del_timer_sync(&dhd->timer);
- dhd->wd_timer_valid = false;
- DHD_ERROR(("%s Host failed to resgister for OOB\n", __func__));
- return -ENODEV;
- }
-
- /* Enable oob at firmware */
- dhd_enable_oob_intr(dhd->pub.bus, true);
-#endif /* defined(OOB_INTR_ONLY) */
/* If bus is not ready, can't come up */
- if (dhd->pub.busstate != DHD_BUS_DATA) {
- del_timer_sync(&dhd->timer);
- dhd->wd_timer_valid = false;
- DHD_ERROR(("%s failed bus is not ready\n", __func__));
+ if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
+ BRCMF_ERROR(("%s failed bus is not ready\n", __func__));
return -ENODEV;
}
-#ifdef EMBEDDED_PLATFORM
- bcm_mkiovar("event_msgs", dhdp->eventmask, WL_EVENTING_MASK_LEN, iovbuf,
- sizeof(iovbuf));
- dhdcdc_query_ioctl(dhdp, 0, WLC_GET_VAR, iovbuf, sizeof(iovbuf));
- memcpy(dhdp->eventmask, iovbuf, WL_EVENTING_MASK_LEN);
-
- setbit(dhdp->eventmask, WLC_E_SET_SSID);
- setbit(dhdp->eventmask, WLC_E_PRUNE);
- setbit(dhdp->eventmask, WLC_E_AUTH);
- setbit(dhdp->eventmask, WLC_E_REASSOC);
- setbit(dhdp->eventmask, WLC_E_REASSOC_IND);
- setbit(dhdp->eventmask, WLC_E_DEAUTH_IND);
- setbit(dhdp->eventmask, WLC_E_DISASSOC_IND);
- setbit(dhdp->eventmask, WLC_E_DISASSOC);
- setbit(dhdp->eventmask, WLC_E_JOIN);
- setbit(dhdp->eventmask, WLC_E_ASSOC_IND);
- setbit(dhdp->eventmask, WLC_E_PSK_SUP);
- setbit(dhdp->eventmask, WLC_E_LINK);
- setbit(dhdp->eventmask, WLC_E_NDIS_LINK);
- setbit(dhdp->eventmask, WLC_E_MIC_ERROR);
- setbit(dhdp->eventmask, WLC_E_PMKID_CACHE);
- setbit(dhdp->eventmask, WLC_E_TXFAIL);
- setbit(dhdp->eventmask, WLC_E_JOIN_START);
- setbit(dhdp->eventmask, WLC_E_SCAN_COMPLETE);
-#ifdef PNO_SUPPORT
- setbit(dhdp->eventmask, WLC_E_PFN_NET_FOUND);
-#endif /* PNO_SUPPORT */
+
+ brcmu_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
+ iovbuf, sizeof(iovbuf));
+ brcmf_proto_cdc_query_ioctl(drvr, 0, BRCMF_C_GET_VAR, iovbuf,
+ sizeof(iovbuf));
+ memcpy(drvr->eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
+
+ setbit(drvr->eventmask, BRCMF_E_SET_SSID);
+ setbit(drvr->eventmask, BRCMF_E_PRUNE);
+ setbit(drvr->eventmask, BRCMF_E_AUTH);
+ setbit(drvr->eventmask, BRCMF_E_REASSOC);
+ setbit(drvr->eventmask, BRCMF_E_REASSOC_IND);
+ setbit(drvr->eventmask, BRCMF_E_DEAUTH_IND);
+ setbit(drvr->eventmask, BRCMF_E_DISASSOC_IND);
+ setbit(drvr->eventmask, BRCMF_E_DISASSOC);
+ setbit(drvr->eventmask, BRCMF_E_JOIN);
+ setbit(drvr->eventmask, BRCMF_E_ASSOC_IND);
+ setbit(drvr->eventmask, BRCMF_E_PSK_SUP);
+ setbit(drvr->eventmask, BRCMF_E_LINK);
+ setbit(drvr->eventmask, BRCMF_E_NDIS_LINK);
+ setbit(drvr->eventmask, BRCMF_E_MIC_ERROR);
+ setbit(drvr->eventmask, BRCMF_E_PMKID_CACHE);
+ setbit(drvr->eventmask, BRCMF_E_TXFAIL);
+ setbit(drvr->eventmask, BRCMF_E_JOIN_START);
+ setbit(drvr->eventmask, BRCMF_E_SCAN_COMPLETE);
/* enable dongle roaming event */
- dhdp->pktfilter_count = 1;
+ drvr->pktfilter_count = 1;
/* Setup filter to allow only unicast */
- dhdp->pktfilter[0] = "100 0 0 0 0x01 0x00";
-#endif /* EMBEDDED_PLATFORM */
+ drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
/* Bus is ready, do any protocol initialization */
- ret = dhd_prot_init(&dhd->pub);
+ ret = brcmf_proto_init(&drvr_priv->pub);
if (ret < 0)
return ret;
return 0;
}
-int
-dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len,
- int set)
-{
- char buf[strlen(name) + 1 + cmd_len];
- int len = sizeof(buf);
- wl_ioctl_t ioc;
- int ret;
-
- len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
-
- memset(&ioc, 0, sizeof(ioc));
-
- ioc.cmd = set ? WLC_SET_VAR : WLC_GET_VAR;
- ioc.buf = buf;
- ioc.len = len;
- ioc.set = set;
-
- ret = dhd_prot_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
- if (!set && ret >= 0)
- memcpy(cmd_buf, buf, cmd_len);
-
- return ret;
-}
-
-static struct net_device_ops dhd_ops_pri = {
- .ndo_open = dhd_open,
- .ndo_stop = dhd_stop,
- .ndo_get_stats = dhd_get_stats,
- .ndo_do_ioctl = dhd_ioctl_entry,
- .ndo_start_xmit = dhd_start_xmit,
- .ndo_set_mac_address = dhd_set_mac_address,
- .ndo_set_multicast_list = dhd_set_multicast_list
+static struct net_device_ops brcmf_netdev_ops_pri = {
+ .ndo_open = brcmf_netdev_open,
+ .ndo_stop = brcmf_netdev_stop,
+ .ndo_get_stats = brcmf_netdev_get_stats,
+ .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
+ .ndo_start_xmit = brcmf_netdev_start_xmit,
+ .ndo_set_mac_address = brcmf_netdev_set_mac_address,
+ .ndo_set_multicast_list = brcmf_netdev_set_multicast_list
};
-int dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
+int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
{
- dhd_info_t *dhd = (dhd_info_t *) dhdp->info;
+ struct brcmf_info *drvr_priv = drvr->info;
struct net_device *net;
u8 temp_addr[ETH_ALEN] = {
0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
- DHD_TRACE(("%s: ifidx %d\n", __func__, ifidx));
-
- ASSERT(dhd && dhd->iflist[ifidx]);
+ BRCMF_TRACE(("%s: ifidx %d\n", __func__, ifidx));
- net = dhd->iflist[ifidx]->net;
- ASSERT(net);
-
- ASSERT(!net->netdev_ops);
- net->netdev_ops = &dhd_ops_pri;
+ net = drvr_priv->iflist[ifidx]->net;
+ net->netdev_ops = &brcmf_netdev_ops_pri;
/*
* We have to use the primary MAC for virtual interfaces
*/
if (ifidx != 0) {
/* for virtual interfaces use the primary MAC */
- memcpy(temp_addr, dhd->pub.mac, ETH_ALEN);
+ memcpy(temp_addr, drvr_priv->pub.mac, ETH_ALEN);
}
if (ifidx == 1) {
- DHD_TRACE(("%s ACCESS POINT MAC: \n", __func__));
+ BRCMF_TRACE(("%s ACCESS POINT MAC:\n", __func__));
/* ACCESSPOINT INTERFACE CASE */
temp_addr[0] |= 0X02; /* set bit 2 ,
- Locally Administered address */
}
- net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
- net->ethtool_ops = &dhd_ethtool_ops;
+ net->hard_header_len = ETH_HLEN + drvr_priv->pub.hdrlen;
+ net->ethtool_ops = &brcmf_ethtool_ops;
- dhd->pub.rxsz = net->mtu + net->hard_header_len + dhd->pub.hdrlen;
+ drvr_priv->pub.rxsz = net->mtu + net->hard_header_len +
+ drvr_priv->pub.hdrlen;
memcpy(net->dev_addr, temp_addr, ETH_ALEN);
if (register_netdev(net) != 0) {
- DHD_ERROR(("%s: couldn't register the net device\n",
- __func__));
+ BRCMF_ERROR(("%s: couldn't register the net device\n",
+ __func__));
goto fail;
}
- DHD_INFO(("%s: Broadcom Dongle Host Driver\n", net->name));
+ BRCMF_INFO(("%s: Broadcom Dongle Host Driver\n", net->name));
return 0;
@@ -2221,221 +1491,139 @@ fail:
return -EBADE;
}
-void dhd_bus_detach(dhd_pub_t *dhdp)
+static void brcmf_bus_detach(struct brcmf_pub *drvr)
{
- dhd_info_t *dhd;
+ struct brcmf_info *drvr_priv;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- if (dhdp) {
- dhd = (dhd_info_t *) dhdp->info;
- if (dhd) {
+ if (drvr) {
+ drvr_priv = drvr->info;
+ if (drvr_priv) {
/* Stop the protocol module */
- dhd_prot_stop(&dhd->pub);
+ brcmf_proto_stop(&drvr_priv->pub);
/* Stop the bus module */
- dhd_bus_stop(dhd->pub.bus, true);
-#if defined(OOB_INTR_ONLY)
- bcmsdh_unregister_oob_intr();
-#endif /* defined(OOB_INTR_ONLY) */
-
- /* Clear the watchdog timer */
- del_timer_sync(&dhd->timer);
- dhd->wd_timer_valid = false;
+ brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus, true);
}
}
}
-void dhd_detach(dhd_pub_t *dhdp)
+void brcmf_detach(struct brcmf_pub *drvr)
{
- dhd_info_t *dhd;
+ struct brcmf_info *drvr_priv;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- if (dhdp) {
- dhd = (dhd_info_t *) dhdp->info;
- if (dhd) {
- dhd_if_t *ifp;
+ if (drvr) {
+ drvr_priv = drvr->info;
+ if (drvr_priv) {
+ struct brcmf_if *ifp;
int i;
-#if defined(CONFIG_HAS_EARLYSUSPEND)
- if (dhd->early_suspend.suspend)
- unregister_early_suspend(&dhd->early_suspend);
-#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
-
- for (i = 1; i < DHD_MAX_IFS; i++)
- if (dhd->iflist[i])
- dhd_del_if(dhd, i);
+ for (i = 1; i < BRCMF_MAX_IFS; i++)
+ if (drvr_priv->iflist[i])
+ brcmf_del_if(drvr_priv, i);
- ifp = dhd->iflist[0];
- ASSERT(ifp);
- if (ifp->net->netdev_ops == &dhd_ops_pri) {
- dhd_stop(ifp->net);
+ ifp = drvr_priv->iflist[0];
+ if (ifp->net->netdev_ops == &brcmf_netdev_ops_pri) {
+ brcmf_netdev_stop(ifp->net);
unregister_netdev(ifp->net);
}
- if (dhd->watchdog_tsk) {
- send_sig(SIGTERM, dhd->watchdog_tsk, 1);
- kthread_stop(dhd->watchdog_tsk);
- dhd->watchdog_tsk = NULL;
+ if (drvr_priv->sysioc_tsk) {
+ send_sig(SIGTERM, drvr_priv->sysioc_tsk, 1);
+ kthread_stop(drvr_priv->sysioc_tsk);
+ drvr_priv->sysioc_tsk = NULL;
}
- if (dhd->dpc_tsk) {
- send_sig(SIGTERM, dhd->dpc_tsk, 1);
- kthread_stop(dhd->dpc_tsk);
- dhd->dpc_tsk = NULL;
- } else
- tasklet_kill(&dhd->tasklet);
-
- if (dhd->sysioc_tsk) {
- send_sig(SIGTERM, dhd->sysioc_tsk, 1);
- kthread_stop(dhd->sysioc_tsk);
- dhd->sysioc_tsk = NULL;
- }
+ brcmf_bus_detach(drvr);
- dhd_bus_detach(dhdp);
+ if (drvr->prot)
+ brcmf_proto_detach(drvr);
- if (dhdp->prot)
- dhd_prot_detach(dhdp);
+ brcmf_cfg80211_detach();
-#if defined(CONFIG_WIRELESS_EXT)
- wl_iw_detach();
-#endif /* (CONFIG_WIRELESS_EXT) */
-
- if (IS_CFG80211_FAVORITE())
- wl_cfg80211_detach();
-
-#if defined(CONFIG_PM_SLEEP)
- if (!IS_CFG80211_FAVORITE())
- unregister_pm_notifier(&dhd_sleep_pm_notifier);
-#endif /* defined(CONFIG_PM_SLEEP) */
- /* && defined(DHD_GPL) */
free_netdev(ifp->net);
kfree(ifp);
- kfree(dhd);
+ kfree(drvr_priv);
}
}
}
-static void __exit dhd_module_cleanup(void)
+static void __exit brcmf_module_cleanup(void)
{
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- dhd_bus_unregister();
-#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
- wifi_del_dev();
-#endif
- /* Call customer gpio to turn off power with WL_REG_ON signal */
- dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+ brcmf_bus_unregister();
}
-static int __init dhd_module_init(void)
+static int __init brcmf_module_init(void)
{
int error;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- /* Sanity check on the module parameters */
- do {
- /* Both watchdog and DPC as tasklets are ok */
- if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0))
- break;
-
- /* If both watchdog and DPC are threads, TX must be deferred */
- if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)
- && dhd_deferred_tx)
- break;
-
- DHD_ERROR(("Invalid module parameters.\n"));
- return -EINVAL;
- } while (0);
- /* Call customer gpio to turn on power with WL_REG_ON signal */
- dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
-
-#if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
- sema_init(&wifi_control_sem, 0);
-
- error = wifi_add_dev();
- if (error) {
- DHD_ERROR(("%s: platform_driver_register failed\n", __func__));
- goto failed;
- }
-
- /* Waiting callback after platform_driver_register is done or
- exit with error */
- if (down_timeout(&wifi_control_sem, msecs_to_jiffies(1000)) != 0) {
- printk(KERN_ERR "%s: platform_driver_register timeout\n",
- __func__);
- /* remove device */
- wifi_del_dev();
- goto failed;
- }
-#endif /* #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC) */
-
- error = dhd_bus_register();
+ error = brcmf_bus_register();
if (error) {
- DHD_ERROR(("%s: sdio_register_driver failed\n", __func__));
+ BRCMF_ERROR(("%s: brcmf_bus_register failed\n", __func__));
goto failed;
}
- return error;
+ return 0;
failed:
- /* turn off power and exit */
- dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
return -EINVAL;
}
-module_init(dhd_module_init);
-module_exit(dhd_module_cleanup);
+module_init(brcmf_module_init);
+module_exit(brcmf_module_cleanup);
-/*
- * OS specific functions required to implement DHD driver in OS independent way
- */
-int dhd_os_proto_block(dhd_pub_t *pub)
+int brcmf_os_proto_block(struct brcmf_pub *drvr)
{
- dhd_info_t *dhd = (dhd_info_t *) (pub->info);
+ struct brcmf_info *drvr_priv = drvr->info;
- if (dhd) {
- down(&dhd->proto_sem);
+ if (drvr_priv) {
+ down(&drvr_priv->proto_sem);
return 1;
}
return 0;
}
-int dhd_os_proto_unblock(dhd_pub_t *pub)
+int brcmf_os_proto_unblock(struct brcmf_pub *drvr)
{
- dhd_info_t *dhd = (dhd_info_t *) (pub->info);
+ struct brcmf_info *drvr_priv = drvr->info;
- if (dhd) {
- up(&dhd->proto_sem);
+ if (drvr_priv) {
+ up(&drvr_priv->proto_sem);
return 1;
}
return 0;
}
-unsigned int dhd_os_get_ioctl_resp_timeout(void)
+unsigned int brcmf_os_get_ioctl_resp_timeout(void)
{
- return (unsigned int)dhd_ioctl_timeout_msec;
+ return (unsigned int)brcmf_ioctl_timeout_msec;
}
-void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
+void brcmf_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
{
- dhd_ioctl_timeout_msec = (int)timeout_msec;
+ brcmf_ioctl_timeout_msec = (int)timeout_msec;
}
-int dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
+int brcmf_os_ioctl_resp_wait(struct brcmf_pub *drvr, uint *condition,
+ bool *pending)
{
- dhd_info_t *dhd = (dhd_info_t *) (pub->info);
+ struct brcmf_info *drvr_priv = drvr->info;
DECLARE_WAITQUEUE(wait, current);
- int timeout = dhd_ioctl_timeout_msec;
+ int timeout = brcmf_ioctl_timeout_msec;
/* Convert timeout in millsecond to jiffies */
timeout = timeout * HZ / 1000;
/* Wait until control frame is available */
- add_wait_queue(&dhd->ioctl_resp_wait, &wait);
+ add_wait_queue(&drvr_priv->ioctl_resp_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (!(*condition) && (!signal_pending(current) && timeout))
@@ -2445,366 +1633,59 @@ int dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending)
*pending = true;
set_current_state(TASK_RUNNING);
- remove_wait_queue(&dhd->ioctl_resp_wait, &wait);
+ remove_wait_queue(&drvr_priv->ioctl_resp_wait, &wait);
return timeout;
}
-int dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
+int brcmf_os_ioctl_resp_wake(struct brcmf_pub *drvr)
{
- dhd_info_t *dhd = (dhd_info_t *) (pub->info);
+ struct brcmf_info *drvr_priv = drvr->info;
- if (waitqueue_active(&dhd->ioctl_resp_wait))
- wake_up_interruptible(&dhd->ioctl_resp_wait);
+ if (waitqueue_active(&drvr_priv->ioctl_resp_wait))
+ wake_up_interruptible(&drvr_priv->ioctl_resp_wait);
return 0;
}
-void dhd_os_wd_timer(void *bus, uint wdtick)
-{
- dhd_pub_t *pub = bus;
- static uint save_dhd_watchdog_ms;
- dhd_info_t *dhd = (dhd_info_t *) pub->info;
-
- /* don't start the wd until fw is loaded */
- if (pub->busstate == DHD_BUS_DOWN)
- return;
-
- /* Totally stop the timer */
- if (!wdtick && dhd->wd_timer_valid == true) {
- del_timer_sync(&dhd->timer);
- dhd->wd_timer_valid = false;
- save_dhd_watchdog_ms = wdtick;
- return;
- }
-
- if (wdtick) {
- dhd_watchdog_ms = (uint) wdtick;
-
- if (save_dhd_watchdog_ms != dhd_watchdog_ms) {
-
- if (dhd->wd_timer_valid == true)
- /* Stop timer and restart at new value */
- del_timer_sync(&dhd->timer);
-
- /* Create timer again when watchdog period is
- dynamically changed or in the first instance
- */
- dhd->timer.expires =
- jiffies + dhd_watchdog_ms * HZ / 1000;
- add_timer(&dhd->timer);
-
- } else {
- /* Re arm the timer, at last watchdog period */
- mod_timer(&dhd->timer,
- jiffies + dhd_watchdog_ms * HZ / 1000);
- }
-
- dhd->wd_timer_valid = true;
- save_dhd_watchdog_ms = wdtick;
- }
-}
-
-void *dhd_os_open_image(char *filename)
-{
- struct file *fp;
-
- if (IS_CFG80211_FAVORITE() && !NO_FW_REQ())
- return wl_cfg80211_request_fw(filename);
-
- fp = filp_open(filename, O_RDONLY, 0);
- /*
- * 2.6.11 (FC4) supports filp_open() but later revs don't?
- * Alternative:
- * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
- * ???
- */
- if (IS_ERR(fp))
- fp = NULL;
-
- return fp;
-}
-
-int dhd_os_get_image_block(char *buf, int len, void *image)
-{
- struct file *fp = (struct file *)image;
- int rdlen;
-
- if (IS_CFG80211_FAVORITE() && !NO_FW_REQ())
- return wl_cfg80211_read_fw(buf, len);
-
- if (!image)
- return 0;
-
- rdlen = kernel_read(fp, fp->f_pos, buf, len);
- if (rdlen > 0)
- fp->f_pos += rdlen;
-
- return rdlen;
-}
-
-void dhd_os_close_image(void *image)
-{
- if (IS_CFG80211_FAVORITE() && !NO_FW_REQ())
- return wl_cfg80211_release_fw();
- if (image)
- filp_close((struct file *)image, NULL);
-}
-
-void dhd_os_sdlock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
-
- dhd = (dhd_info_t *) (pub->info);
-
- if (dhd->threads_only)
- down(&dhd->sdsem);
- else
- spin_lock_bh(&dhd->sdlock);
-}
-
-void dhd_os_sdunlock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
-
- dhd = (dhd_info_t *) (pub->info);
-
- if (dhd->threads_only)
- up(&dhd->sdsem);
- else
- spin_unlock_bh(&dhd->sdlock);
-}
-
-void dhd_os_sdlock_txq(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
-
- dhd = (dhd_info_t *) (pub->info);
- spin_lock_bh(&dhd->txqlock);
-}
-
-void dhd_os_sdunlock_txq(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
-
- dhd = (dhd_info_t *) (pub->info);
- spin_unlock_bh(&dhd->txqlock);
-}
-
-void dhd_os_sdlock_rxq(dhd_pub_t *pub)
-{
-}
-
-void dhd_os_sdunlock_rxq(dhd_pub_t *pub)
-{
-}
-
-void dhd_os_sdtxlock(dhd_pub_t *pub)
-{
- dhd_os_sdlock(pub);
-}
-
-void dhd_os_sdtxunlock(dhd_pub_t *pub)
-{
- dhd_os_sdunlock(pub);
-}
-
-static int
-dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
- wl_event_msg_t *event, void **data)
+static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
+ struct brcmf_event_msg *event, void **data)
{
int bcmerror = 0;
- ASSERT(dhd != NULL);
-
- bcmerror = wl_host_event(dhd, ifidx, pktdata, event, data);
+ bcmerror = brcmf_c_host_event(drvr_priv, ifidx, pktdata, event, data);
if (bcmerror != 0)
return bcmerror;
-#if defined(CONFIG_WIRELESS_EXT)
- if (!IS_CFG80211_FAVORITE()) {
- if ((dhd->iflist[*ifidx] == NULL)
- || (dhd->iflist[*ifidx]->net == NULL)) {
- DHD_ERROR(("%s Exit null pointer\n", __func__));
- return bcmerror;
- }
-
- if (dhd->iflist[*ifidx]->net)
- wl_iw_event(dhd->iflist[*ifidx]->net, event, *data);
- }
-#endif /* defined(CONFIG_WIRELESS_EXT) */
-
- if (IS_CFG80211_FAVORITE()) {
- ASSERT(dhd->iflist[*ifidx] != NULL);
- ASSERT(dhd->iflist[*ifidx]->net != NULL);
- if (dhd->iflist[*ifidx]->net)
- wl_cfg80211_event(dhd->iflist[*ifidx]->net, event,
- *data);
- }
+ if (drvr_priv->iflist[*ifidx]->net)
+ brcmf_cfg80211_event(drvr_priv->iflist[*ifidx]->net,
+ event, *data);
return bcmerror;
}
-/* send up locally generated event */
-void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
-{
- switch (be32_to_cpu(event->event_type)) {
- default:
- break;
- }
-}
-
-void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
+int brcmf_netdev_reset(struct net_device *dev, u8 flag)
{
- struct dhd_info *dhdinfo = dhd->info;
- dhd_os_sdunlock(dhd);
- wait_event_interruptible_timeout(dhdinfo->ctrl_wait,
- (*lockvar == false), HZ * 2);
- dhd_os_sdlock(dhd);
- return;
-}
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(dev);
-void dhd_wait_event_wakeup(dhd_pub_t *dhd)
-{
- struct dhd_info *dhdinfo = dhd->info;
- if (waitqueue_active(&dhdinfo->ctrl_wait))
- wake_up_interruptible(&dhdinfo->ctrl_wait);
- return;
-}
-
-int dhd_dev_reset(struct net_device *dev, u8 flag)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-
- /* Turning off watchdog */
- if (flag)
- dhd_os_wd_timer(&dhd->pub, 0);
-
- dhd_bus_devreset(&dhd->pub, flag);
-
- /* Turning on watchdog back */
- if (!flag)
- dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
- DHD_ERROR(("%s: WLAN OFF DONE\n", __func__));
+ brcmf_bus_devreset(&drvr_priv->pub, flag);
return 1;
}
-int net_os_set_suspend_disable(struct net_device *dev, int val)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- int ret = 0;
-
- if (dhd) {
- ret = dhd->pub.suspend_disable_flag;
- dhd->pub.suspend_disable_flag = val;
- }
- return ret;
-}
-
-int net_os_set_suspend(struct net_device *dev, int val)
+static int brcmf_get_pend_8021x_cnt(struct brcmf_info *drvr_priv)
{
- int ret = 0;
-#if defined(CONFIG_HAS_EARLYSUSPEND)
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-
- if (dhd) {
- dhd_os_proto_block(&dhd->pub);
- ret = dhd_set_suspend(val, &dhd->pub);
- dhd_os_proto_unblock(&dhd->pub);
- }
-#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */
- return ret;
-}
-
-int net_os_set_dtim_skip(struct net_device *dev, int val)
-{
- dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(dev);
-
- if (dhd)
- dhd->pub.dtim_skip = val;
-
- return 0;
-}
-
-int net_os_set_packet_filter(struct net_device *dev, int val)
-{
- dhd_info_t *dhd = *(dhd_info_t **) netdev_priv(dev);
- int ret = 0;
-
- /* Packet filtering is set only if we still in early-suspend and
- * we need either to turn it ON or turn it OFF
- * We can always turn it OFF in case of early-suspend, but we turn it
- * back ON only if suspend_disable_flag was not set
- */
- if (dhd && dhd->pub.up) {
- dhd_os_proto_block(&dhd->pub);
- if (dhd->pub.in_suspend) {
- if (!val || (val && !dhd->pub.suspend_disable_flag))
- dhd_set_packet_filter(val, &dhd->pub);
- }
- dhd_os_proto_unblock(&dhd->pub);
- }
- return ret;
-}
-
-void dhd_dev_init_ioctl(struct net_device *dev)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-
- dhd_preinit_ioctls(&dhd->pub);
-}
-
-#ifdef PNO_SUPPORT
-/* Linux wrapper to call common dhd_pno_clean */
-int dhd_dev_pno_reset(struct net_device *dev)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-
- return dhd_pno_clean(&dhd->pub);
-}
-
-/* Linux wrapper to call common dhd_pno_enable */
-int dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-
- return dhd_pno_enable(&dhd->pub, pfn_enabled);
-}
-
-/* Linux wrapper to call common dhd_pno_set */
-int
-dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t *ssids_local, int nssid,
- unsigned char scan_fr)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-
- return dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr);
-}
-
-/* Linux wrapper to get pno status */
-int dhd_dev_get_pno_status(struct net_device *dev)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-
- return dhd_pno_get_status(&dhd->pub);
-}
-
-#endif /* PNO_SUPPORT */
-
-static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
-{
- return atomic_read(&dhd->pend_8021x_cnt);
+ return atomic_read(&drvr_priv->pend_8021x_cnt);
}
#define MAX_WAIT_FOR_8021X_TX 10
-int dhd_wait_pend8021x(struct net_device *dev)
+int brcmf_netdev_wait_pend8021x(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(dev);
int timeout = 10 * HZ / 1000;
int ntimes = MAX_WAIT_FOR_8021X_TX;
- int pend = dhd_get_pend_8021x_cnt(dhd);
+ int pend = brcmf_get_pend_8021x_cnt(drvr_priv);
while (ntimes && pend) {
if (pend) {
@@ -2813,20 +1694,13 @@ int dhd_wait_pend8021x(struct net_device *dev)
set_current_state(TASK_RUNNING);
ntimes--;
}
- pend = dhd_get_pend_8021x_cnt(dhd);
+ pend = brcmf_get_pend_8021x_cnt(drvr_priv);
}
return pend;
}
-void wl_os_wd_timer(struct net_device *ndev, uint wdtick)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
-
- dhd_os_wd_timer(&dhd->pub, wdtick);
-}
-
-#ifdef DHD_DEBUG
-int write_to_file(dhd_pub_t *dhd, u8 *buf, int size)
+#ifdef BCMDBG
+int brcmf_write_to_file(struct brcmf_pub *drvr, u8 *buf, int size)
{
int ret = 0;
struct file *fp;
@@ -2840,7 +1714,7 @@ int write_to_file(dhd_pub_t *dhd, u8 *buf, int size)
/* open file to write */
fp = filp_open("/tmp/mem_dump", O_WRONLY | O_CREAT, 0640);
if (!fp) {
- DHD_ERROR(("%s: open file error\n", __func__));
+ BRCMF_ERROR(("%s: open file error\n", __func__));
ret = -1;
goto exit;
}
@@ -2859,4 +1733,4 @@ exit:
return ret;
}
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c
deleted file mode 100644
index c66f1c2941e..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux_sched.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-
-int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
-{
- int rc = 0;
- rc = sched_setscheduler(p, policy, param);
- return rc;
-}
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_proto.h b/drivers/staging/brcm80211/brcmfmac/dhd_proto.h
index 030d5ffb0e8..ff788b37afd 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_proto.h
@@ -14,11 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _dhd_proto_h_
-#define _dhd_proto_h_
-
-#include <dhdioctl.h>
-#include <wlioctl.h>
+#ifndef _BRCMF_PROTO_H_
+#define _BRCMF_PROTO_H_
#ifndef IOCTL_RESP_TIMEOUT
#define IOCTL_RESP_TIMEOUT 2000 /* In milli second */
@@ -29,62 +26,50 @@
#endif
/*
- * Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
+ * Exported from the brcmf protocol module (brcmf_cdc)
*/
/* Linkage, sets prot link and updates hdrlen in pub */
-extern int dhd_prot_attach(dhd_pub_t *dhdp);
+extern int brcmf_proto_attach(struct brcmf_pub *drvr);
-/* Unlink, frees allocated protocol memory (including dhd_prot) */
-extern void dhd_prot_detach(dhd_pub_t *dhdp);
+/* Unlink, frees allocated protocol memory (including brcmf_proto) */
+extern void brcmf_proto_detach(struct brcmf_pub *drvr);
/* Initialize protocol: sync w/dongle state.
* Sets dongle media info (iswl, drv_version, mac address).
*/
-extern int dhd_prot_init(dhd_pub_t *dhdp);
+extern int brcmf_proto_init(struct brcmf_pub *drvr);
/* Stop protocol: sync w/dongle state. */
-extern void dhd_prot_stop(dhd_pub_t *dhdp);
+extern void brcmf_proto_stop(struct brcmf_pub *drvr);
/* Add any protocol-specific data header.
* Caller must reserve prot_hdrlen prepend space.
*/
-extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, struct sk_buff *txp);
+extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
+ struct sk_buff *txp);
/* Remove any protocol-specific data header. */
-extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, struct sk_buff *rxp);
+extern int brcmf_proto_hdrpull(struct brcmf_pub *, int *ifidx,
+ struct sk_buff *rxp);
/* Use protocol to issue ioctl to dongle */
-extern int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc,
- void *buf, int len);
-
-/* Check for and handle local prot-specific iovar commands */
-extern int dhd_prot_iovar_op(dhd_pub_t *dhdp, const char *name,
- void *params, int plen, void *arg, int len,
- bool set);
+extern int brcmf_proto_ioctl(struct brcmf_pub *drvr, int ifidx,
+ struct brcmf_ioctl *ioc, void *buf, int len);
/* Add prot dump output to a buffer */
-extern void dhd_prot_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+extern void brcmf_proto_dump(struct brcmf_pub *drvr,
+ struct brcmu_strbuf *strbuf);
/* Update local copy of dongle statistics */
-extern void dhd_prot_dstats(dhd_pub_t *dhdp);
+extern void brcmf_proto_dstats(struct brcmf_pub *drvr);
-extern int dhd_ioctl(dhd_pub_t *dhd_pub, dhd_ioctl_t *ioc, void *buf,
- uint buflen);
+extern int brcmf_c_ioctl(struct brcmf_pub *drvr, struct brcmf_c_ioctl *ioc,
+ void *buf, uint buflen);
-extern int dhd_preinit_ioctls(dhd_pub_t *dhd);
+extern int brcmf_c_preinit_ioctls(struct brcmf_pub *drvr);
-/********************************
- * For version-string expansion *
- */
-#if defined(BDC)
-#define DHD_PROTOCOL "bdc"
-#elif defined(CDC)
-#define DHD_PROTOCOL "cdc"
-#elif defined(RNDIS)
-#define DHD_PROTOCOL "rndis"
-#else
-#define DHD_PROTOCOL "unknown"
-#endif /* proto */
-
-#endif /* _dhd_proto_h_ */
+extern int brcmf_proto_cdc_set_ioctl(struct brcmf_pub *drvr, int ifidx,
+ uint cmd, void *buf, uint len);
+
+#endif /* _BRCMF_PROTO_H_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c b/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
index a71c6f8ee8a..7fa95b6213c 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_sdio.c
@@ -16,47 +16,145 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/printk.h>
#include <linux/pci_ids.h>
#include <linux/netdevice.h>
-#include <bcmdefs.h>
-#include <bcmsdh.h>
-
-#ifdef BCMEMBEDIMAGE
-#include BCMEMBEDIMAGE
-#endif /* BCMEMBEDIMAGE */
-
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <bcmdevs.h>
-
-#include <hndsoc.h>
-#ifdef DHD_DEBUG
-#include <hndrte_armtrap.h>
-#include <hndrte_cons.h>
-#endif /* DHD_DEBUG */
-#include <sbchipc.h>
-#include <sbhnddma.h>
-
-#include <sdio.h>
-#include <sbsdio.h>
-#include <sbsdpcmdev.h>
-#include <bcmsdpcm.h>
-
-#include <proto/802.11.h>
-
-#include <dngl_stats.h>
-#include <dhd.h>
-#include <dhd_bus.h>
-#include <dhd_proto.h>
-#include <dhd_dbg.h>
-#include <dhdioctl.h>
-#include <sdiovar.h>
-#include <bcmchip.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/semaphore.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <brcm_hw_ids.h>
+#include <soc.h>
+#include "sdio_host.h"
+
+/* register access macros */
+#ifndef __BIG_ENDIAN
+#ifndef __mips__
+#define R_REG(r, typ) \
+ brcmf_sdcard_reg_read(NULL, (r), sizeof(typ))
+#else /* __mips__ */
+#define R_REG(r, typ) \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ __asm__ __volatile__("sync"); \
+ __osl_v = brcmf_sdcard_reg_read(NULL, (r),\
+ sizeof(typ)); \
+ __asm__ __volatile__("sync"); \
+ __osl_v; \
+ })
+#endif /* __mips__ */
+
+#else /* __BIG_ENDIAN */
+#define R_REG(r, typ) \
+ brcmf_sdcard_reg_read(NULL, (r), sizeof(typ))
+#endif /* __BIG_ENDIAN */
+
+#define OR_REG(r, v, typ) \
+ brcmf_sdcard_reg_write(NULL, (r), sizeof(typ), R_REG(r, typ) | (v))
+
+#ifdef BCMDBG
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+#if defined(__ARM_ARCH_4T__)
+#define MAX_TRAP_TYPE (TR_FIQ + 1)
+#elif defined(__ARM_ARCH_7M__)
+#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS)
+#endif /* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define TR_TYPE 0x00
+#define TR_EPC 0x04
+#define TR_CPSR 0x08
+#define TR_SPSR 0x0c
+#define TR_REGS 0x10
+#define TR_REG(n) (TR_REGS + (n) * 4)
+#define TR_SP TR_REG(13)
+#define TR_LR TR_REG(14)
+#define TR_PC TR_REG(15)
+
+#define TRAP_T_SIZE 80
+
+struct brcmf_trap {
+ u32 type;
+ u32 epc;
+ u32 cpsr;
+ u32 spsr;
+ u32 r0;
+ u32 r1;
+ u32 r2;
+ u32 r3;
+ u32 r4;
+ u32 r5;
+ u32 r6;
+ u32 r7;
+ u32 r8;
+ u32 r9;
+ u32 r10;
+ u32 r11;
+ u32 r12;
+ u32 r13;
+ u32 r14;
+ u32 pc;
+};
-#ifndef DHDSDIO_MEM_DUMP_FNAME
-#define DHDSDIO_MEM_DUMP_FNAME "mem_dump"
-#endif
+#define CBUF_LEN (128)
+
+struct rte_log {
+ u32 buf; /* Can't be pointer on (64-bit) hosts */
+ uint buf_size;
+ uint idx;
+ char *_buf_compat; /* Redundant pointer for backward compat. */
+};
+
+struct rte_console {
+ /* Virtual UART
+ * When there is no UART (e.g. Quickturn),
+ * the host should write a complete
+ * input line directly into cbuf and then write
+ * the length into vcons_in.
+ * This may also be used when there is a real UART
+ * (at risk of conflicting with
+ * the real UART). vcons_out is currently unused.
+ */
+ volatile uint vcons_in;
+ volatile uint vcons_out;
+
+ /* Output (logging) buffer
+ * Console output is written to a ring buffer log_buf at index log_idx.
+ * The host may read the output when it sees log_idx advance.
+ * Output will be lost if the output wraps around faster than the host
+ * polls.
+ */
+ struct rte_log log;
+
+ /* Console input line buffer
+ * Characters are read one at a time into cbuf
+ * until <CR> is received, then
+ * the buffer is processed as a command line.
+ * Also used for virtual UART.
+ */
+ uint cbuf_idx;
+ char cbuf[CBUF_LEN];
+};
+
+#endif /* BCMDBG */
+#include <chipcommon.h>
+
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_proto.h"
+#include "dhd_dbg.h"
+#include <bcmchip.h>
#define TXQLEN 2048 /* bulk tx queue length */
#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
@@ -65,47 +163,239 @@
#define TXRETRIES 2 /* # of retries for tx frames */
-#if defined(CONFIG_MACH_SANDGATE2G)
-#define DHD_RXBOUND 250 /* Default for max rx frames in
+#define BRCMF_RXBOUND 50 /* Default for max rx frames in
one scheduling */
-#else
-#define DHD_RXBOUND 50 /* Default for max rx frames in
- one scheduling */
-#endif /* defined(CONFIG_MACH_SANDGATE2G) */
-#define DHD_TXBOUND 20 /* Default for max tx frames in
+#define BRCMF_TXBOUND 20 /* Default for max tx frames in
one scheduling */
-#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */
+#define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
#define MEMBLOCK 2048 /* Block size used for downloading
of dongle image */
#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
biggest possible glom */
-/* Packet alignment for most efficient SDIO (can change based on platform) */
-#ifndef DHD_SDALIGN
-#define DHD_SDALIGN 32
-#endif
-#if !ISPOWEROF2(DHD_SDALIGN)
-#error DHD_SDALIGN is not a power of 2!
+#ifndef BRCMF_FIRSTREAD
+#define BRCMF_FIRSTREAD 32
#endif
-#ifndef DHD_FIRSTREAD
-#define DHD_FIRSTREAD 32
-#endif
-#if !ISPOWEROF2(DHD_FIRSTREAD)
-#error DHD_FIRSTREAD is not a power of 2!
+#if !ISPOWEROF2(BRCMF_FIRSTREAD)
+#error BRCMF_FIRSTREAD is not a power of 2!
#endif
+/* SBSDIO_DEVICE_CTL */
+#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when
+ * receiving CMD53
+ */
+#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is
+ * synchronous to the sdio clock
+ */
+#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host
+ * except the chipActive (rev 8)
+ */
+#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put
+ * external pads in tri-state; requires
+ * sdio bus power cycle to clear (rev 9)
+ */
+#define SBSDIO_DEVCTL_SB_RST_CTL 0x30 /* Force SD->SB reset mapping (rev 11) */
+#define SBSDIO_DEVCTL_RST_CORECTL 0x00 /* Determined by CoreControl bit */
+#define SBSDIO_DEVCTL_RST_BPRESET 0x10 /* Force backplane reset */
+#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 /* Force no backplane reset */
+
+/* SBSDIO_FUNC1_CHIPCLKCSR */
+#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */
+#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */
+#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */
+#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */
+#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */
+#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */
+#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */
+
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \
+ (alponly ? 1 : SBSDIO_HTAV(regval)))
+/* direct(mapped) cis space */
+#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */
+#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */
+#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */
+
+#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple,
+ * link bytes
+ */
+
+/* intstatus */
+#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
+#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
+#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
+#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
+#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
+#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
+#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
+#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
+#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
+#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
+#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
+#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
+#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
+#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
+#define I_PC (1 << 10) /* descriptor error */
+#define I_PD (1 << 11) /* data error */
+#define I_DE (1 << 12) /* Descriptor protocol Error */
+#define I_RU (1 << 13) /* Receive descriptor Underflow */
+#define I_RO (1 << 14) /* Receive fifo Overflow */
+#define I_XU (1 << 15) /* Transmit fifo Underflow */
+#define I_RI (1 << 16) /* Receive Interrupt */
+#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
+#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
+#define I_XI (1 << 24) /* Transmit Interrupt */
+#define I_RF_TERM (1 << 25) /* Read Frame Terminate */
+#define I_WF_TERM (1 << 26) /* Write Frame Terminate */
+#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
+#define I_SBINT (1 << 28) /* sbintstatus Interrupt */
+#define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
+#define I_SRESET (1 << 30) /* CCCR RES interrupt */
+#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
+#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
+#define I_DMA (I_RI | I_XI | I_ERRORS)
+
+/* corecontrol */
+#define CC_CISRDY (1 << 0) /* CIS Ready */
+#define CC_BPRESEN (1 << 1) /* CCCR RES signal */
+#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
+#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
+#define CC_XMTDATAAVAIL_MODE (1 << 4)
+#define CC_XMTDATAAVAIL_CTRL (1 << 5)
+
+/* SDA_FRAMECTRL */
+#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
+#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
+#define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
+#define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
+
+/* HW frame tag */
+#define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */
+
/* Total length of frame header for dongle protocol */
#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
#ifdef SDTEST
-#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
+#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + BRCMF_SDALIGN)
#else
-#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN)
+#define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN)
#endif
+/*
+ * Software allocation of To SB Mailbox resources
+ */
+
+/* tosbmailbox bits corresponding to intstatus bits */
+#define SMB_NAK (1 << 0) /* Frame NAK */
+#define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
+#define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
+#define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
+
+/* tosbmailboxdata */
+#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
+
+/*
+ * Software allocation of To Host Mailbox resources
+ */
+
+/* intstatus bits */
+#define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
+#define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
+#define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
+#define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
+
+/* tohostmailboxdata */
+#define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
+#define HMB_DATA_DEVREADY 2 /* talk to host after enable */
+#define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
+#define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
+
+#define HMB_DATA_FCDATA_MASK 0xff000000
+#define HMB_DATA_FCDATA_SHIFT 24
+
+#define HMB_DATA_VERSION_MASK 0x00ff0000
+#define HMB_DATA_VERSION_SHIFT 16
+
+/*
+ * Software-defined protocol header
+ */
+
+/* Current protocol version */
+#define SDPCM_PROT_VERSION 4
+
+/* SW frame header */
+#define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff)
+
+#define SDPCM_CHANNEL_MASK 0x00000f00
+#define SDPCM_CHANNEL_SHIFT 8
+#define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f)
+
+#define SDPCM_NEXTLEN_OFFSET 2
+
+/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
+#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
+#define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_MASK 0xff000000
+#define SDPCM_DOFFSET_SHIFT 24
+#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
+#define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
+#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
+#define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
+
+#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
+
+/* logical channel numbers */
+#define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */
+#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
+#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
+#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */
+#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
+
+#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */
+
+#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
+
+/* For TEST_CHANNEL packets, define another 4-byte header */
+#define SDPCM_TEST_HDRLEN 4 /*
+ * Generally: Cmd(1), Ext(1), Len(2);
+ * Semantics of Ext byte depend on
+ * command. Len is current or requested
+ * frame length, not including test
+ * header; sent little-endian.
+ */
+#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext:pattern id. */
+#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext:pattern id. */
+#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext:pattern id. */
+#define SDPCM_TEST_BURST 0x04 /*
+ * Receiver to send a burst.
+ * Ext is a frame count
+ */
+#define SDPCM_TEST_SEND 0x05 /*
+ * Receiver sets send mode.
+ * Ext is boolean on/off
+ */
+
+/* Handy macro for filling in datagen packets with a pattern */
+#define SDPCM_TEST_FILL(byteno, id) ((u8)(id + byteno))
+
+/*
+ * Shared structure between dongle and the host.
+ * The structure contains pointers to trap or assert information.
+ */
+#define SDPCM_SHARED_VERSION 0x0002
+#define SDPCM_SHARED_VERSION_MASK 0x00FF
+#define SDPCM_SHARED_ASSERT_BUILT 0x0100
+#define SDPCM_SHARED_ASSERT 0x0200
+#define SDPCM_SHARED_TRAP 0x0400
+
+
/* Space for header read, limit for data packets */
#ifndef MAX_HDR_READ
#define MAX_HDR_READ 32
@@ -117,7 +407,7 @@
#define MAX_RX_DATASZ 2048
/* Maximum milliseconds to wait for F2 to come up */
-#define DHD_WAIT_F2RDY 3000
+#define BRCMF_WAIT_F2RDY 3000
/* Bump up limit on waiting for HT to account for first startup;
* if the image is doing a CRC calculation before programming the PMU
@@ -130,13 +420,48 @@
#endif
/* Value for ChipClockCSR during initial setup */
-#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
+#define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
SBSDIO_ALP_AVAIL_REQ)
-#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
+/* sbimstate */
+#define SBIM_IBE 0x20000 /* inbanderror */
+#define SBIM_TO 0x40000 /* timeout */
+#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */
+#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */
+
+/* sbtmstatelow */
+#define SBTML_RESET 0x0001 /* reset */
+#define SBTML_REJ_MASK 0x0006 /* reject field */
+#define SBTML_REJ 0x0002 /* reject */
+#define SBTML_TMPREJ 0x0004 /* temporary reject, for error recovery */
+
+#define SBTML_SICF_SHIFT 16 /* Shift to locate the SI control flags in sbtml */
+
+/* sbtmstatehigh */
+#define SBTMH_SERR 0x0001 /* serror */
+#define SBTMH_INT 0x0002 /* interrupt */
+#define SBTMH_BUSY 0x0004 /* busy */
+#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */
+
+#define SBTMH_SISF_SHIFT 16 /* Shift to locate the SI status flags in sbtmh */
+
+/* sbidlow */
+#define SBIDL_INIT 0x80 /* initiator */
+
+/* sbidhigh */
+#define SBIDH_RC_MASK 0x000f /* revision code */
+#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */
+#define SBIDH_RCE_SHIFT 8
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
+#define SBIDH_CC_MASK 0x8ff0 /* core code */
+#define SBIDH_CC_SHIFT 4
+#define SBIDH_VC_MASK 0xffff0000 /* vendor code */
+#define SBIDH_VC_SHIFT 16
+
/*
* Conversion of 802.1D priority to precedence level
*/
@@ -144,21 +469,130 @@
(((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? \
((prio^2)) : (prio))
-DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
-extern int dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf,
- uint len);
+BRCMF_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
+
+/*
+ * Core reg address translation.
+ * Both macro's returns a 32 bits byte address on the backplane bus.
+ */
+#define CORE_CC_REG(base, field) (base + offsetof(chipcregs_t, field))
+#define CORE_BUS_REG(base, field) \
+ (base + offsetof(struct sdpcmd_regs, field))
+#define CORE_SB(base, field) \
+ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+
+/* core registers */
+struct sdpcmd_regs {
+ u32 corecontrol; /* 0x00, rev8 */
+ u32 corestatus; /* rev8 */
+ u32 PAD[1];
+ u32 biststatus; /* rev8 */
+
+ /* PCMCIA access */
+ u16 pcmciamesportaladdr; /* 0x010, rev8 */
+ u16 PAD[1];
+ u16 pcmciamesportalmask; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciawrframebc; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciaunderflowtimer; /* rev8 */
+ u16 PAD[1];
+
+ /* interrupt */
+ u32 intstatus; /* 0x020, rev8 */
+ u32 hostintmask; /* rev8 */
+ u32 intmask; /* rev8 */
+ u32 sbintstatus; /* rev8 */
+ u32 sbintmask; /* rev8 */
+ u32 funcintmask; /* rev4 */
+ u32 PAD[2];
+ u32 tosbmailbox; /* 0x040, rev8 */
+ u32 tohostmailbox; /* rev8 */
+ u32 tosbmailboxdata; /* rev8 */
+ u32 tohostmailboxdata; /* rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ u32 sdioaccess; /* 0x050, rev8 */
+ u32 PAD[3];
+
+ /* PCMCIA frame control */
+ u8 pcmciaframectrl; /* 0x060, rev8 */
+ u8 PAD[3];
+ u8 pcmciawatermark; /* rev8 */
+ u8 PAD[155];
+
+ /* interrupt batching control */
+ u32 intrcvlazy; /* 0x100, rev8 */
+ u32 PAD[3];
+
+ /* counters */
+ u32 cmd52rd; /* 0x110, rev8 */
+ u32 cmd52wr; /* rev8 */
+ u32 cmd53rd; /* rev8 */
+ u32 cmd53wr; /* rev8 */
+ u32 abort; /* rev8 */
+ u32 datacrcerror; /* rev8 */
+ u32 rdoutofsync; /* rev8 */
+ u32 wroutofsync; /* rev8 */
+ u32 writebusy; /* rev8 */
+ u32 readwait; /* rev8 */
+ u32 readterm; /* rev8 */
+ u32 writeterm; /* rev8 */
+ u32 PAD[40];
+ u32 clockctlstatus; /* rev8 */
+ u32 PAD[7];
+
+ u32 PAD[128]; /* DMA engines */
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* 0x600-6ff, rev6 */
+ u16 PAD[55];
+
+ /* PCMCIA backplane access */
+ u16 backplanecsr; /* 0x76E, rev6 */
+ u16 backplaneaddr0; /* rev6 */
+ u16 backplaneaddr1; /* rev6 */
+ u16 backplaneaddr2; /* rev6 */
+ u16 backplaneaddr3; /* rev6 */
+ u16 backplanedata0; /* rev6 */
+ u16 backplanedata1; /* rev6 */
+ u16 backplanedata2; /* rev6 */
+ u16 backplanedata3; /* rev6 */
+ u16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ u16 spromstatus; /* 0x7BE, rev2 */
+ u32 PAD[464];
+
+ u16 PAD[0x80];
+};
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
/* Device console log buffer state */
-typedef struct dhd_console {
+struct brcmf_console {
uint count; /* Poll interval msec counter */
uint log_addr; /* Log struct address (fixed) */
- hndrte_log_t log; /* Log struct (host copy) */
+ struct rte_log log; /* Log struct (host copy) */
uint bufsize; /* Size of log buffer */
u8 *buf; /* Log buffer (host copy) */
uint last; /* Last buffer read index */
-} dhd_console_t;
-#endif /* DHD_DEBUG */
+};
+#endif /* BCMDBG */
+
+struct sdpcm_shared {
+ u32 flags;
+ u32 trap_addr;
+ u32 assert_exp_addr;
+ u32 assert_file_addr;
+ u32 assert_line;
+ u32 console_addr; /* Address of struct rte_console */
+ u32 msgtrace_addr;
+ u8 tag[32];
+};
+
/* misc chip info needed by some of the routines */
struct chip_info {
@@ -167,7 +601,7 @@ struct chip_info {
u32 cccorebase;
u32 ccrev;
u32 cccaps;
- u32 buscorebase;
+ u32 buscorebase; /* 32 bits backplane bus address */
u32 buscorerev;
u32 buscoretype;
u32 ramcorebase;
@@ -177,19 +611,14 @@ struct chip_info {
};
/* Private data for SDIO bus interaction */
-typedef struct dhd_bus {
- dhd_pub_t *dhd;
+struct brcmf_bus {
+ struct brcmf_pub *drvr;
- bcmsdh_info_t *sdh; /* Handle for BCMSDH calls */
+ struct brcmf_sdio_card *card; /* Handle for sdio card calls */
struct chip_info *ci; /* Chip info struct */
char *vars; /* Variables (from CIS and/or other) */
uint varsz; /* Size of variables buffer */
- u32 sbaddr; /* Current SB window pointer (-1, invalid) */
- sdpcmd_regs_t *regs; /* Registers for SDIO core */
- uint sdpcmrev; /* SDIO core revision */
- uint armrev; /* CPU core revision */
- uint ramrev; /* SOCRAM core revision */
u32 ramsize; /* Size of RAM in SOCRAM (bytes) */
u32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
@@ -199,10 +628,7 @@ typedef struct dhd_bus {
bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
bool fcstate; /* State of dongle flow-control */
- u16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
- char *fw_path; /* module_param: path to firmware image */
- char *nv_path; /* module_param: path to nvram vars file */
- const char *nvram_params; /* user specified nvram params. */
+ u16 cl_devid; /* cached devid for brcmf_sdio_probe_attach() */
uint blocksize; /* Block size of SDIO transfers */
uint roundup; /* Max roundup limit */
@@ -212,7 +638,7 @@ typedef struct dhd_bus {
u8 tx_seq; /* Transmit sequence number (next) */
u8 tx_max; /* Maximum transmit sequence allowed */
- u8 hdrbuf[MAX_HDR_READ + DHD_SDALIGN];
+ u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
u16 nextlen; /* Next Read Len from last header */
u8 rx_seq; /* Receive sequence number (expected) */
@@ -242,27 +668,23 @@ typedef struct dhd_bus {
uint polltick; /* Tick counter */
uint pollcnt; /* Count of active polls */
-#ifdef DHD_DEBUG
- dhd_console_t console; /* Console output polling support */
+#ifdef BCMDBG
+ struct brcmf_console console; /* Console output polling support */
uint console_addr; /* Console address from shared struct */
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
- uint regfails; /* Count of R_REG/W_REG failures */
+ uint regfails; /* Count of R_REG failures */
uint clkstate; /* State of sd and backplane clock(s) */
bool activity; /* Activity flag for clock down */
s32 idletime; /* Control for activity timeout */
s32 idlecount; /* Activity timeout counter */
s32 idleclock; /* How to set bus driver when idle */
- s32 sd_divisor; /* Speed control to bus driver */
- s32 sd_mode; /* Mode control to bus driver */
- s32 sd_rxchain; /* If bcmsdh api accepts PKT chains */
- bool use_rxchain; /* If dhd should use PKT chains */
+ s32 sd_rxchain;
+ bool use_rxchain; /* If brcmf should use PKT chains */
bool sleeping; /* Is SDIO bus sleeping? */
bool rxflow_mode; /* Rx flow control mode */
bool rxflow; /* Is rx flow control on */
- uint prev_rxlim_hit; /* Is prev rx limit exceeded
- (per dpc schedule) */
bool alp_only; /* Don't use HT clock (ALP only) */
/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
bool usebufpool;
@@ -314,7 +736,68 @@ typedef struct dhd_bus {
u8 *ctrl_frame_buf;
u32 ctrl_frame_len;
bool ctrl_frame_stat;
-} dhd_bus_t;
+
+ spinlock_t txqlock;
+ wait_queue_head_t ctrl_wait;
+
+ struct timer_list timer;
+ struct completion watchdog_wait;
+ struct task_struct *watchdog_tsk;
+ bool wd_timer_valid;
+
+ struct tasklet_struct tasklet;
+ struct task_struct *dpc_tsk;
+ struct completion dpc_wait;
+
+ bool threads_only;
+ struct semaphore sdsem;
+ spinlock_t sdlock;
+
+ const char *fw_name;
+ const struct firmware *firmware;
+ const char *nv_name;
+ u32 fw_ptr;
+};
+
+struct sbconfig {
+ u32 PAD[2];
+ u32 sbipsflag; /* initiator port ocp slave flag */
+ u32 PAD[3];
+ u32 sbtpsflag; /* target port ocp slave flag */
+ u32 PAD[11];
+ u32 sbtmerrloga; /* (sonics >= 2.3) */
+ u32 PAD;
+ u32 sbtmerrlog; /* (sonics >= 2.3) */
+ u32 PAD[3];
+ u32 sbadmatch3; /* address match3 */
+ u32 PAD;
+ u32 sbadmatch2; /* address match2 */
+ u32 PAD;
+ u32 sbadmatch1; /* address match1 */
+ u32 PAD[7];
+ u32 sbimstate; /* initiator agent state */
+ u32 sbintvec; /* interrupt mask */
+ u32 sbtmstatelow; /* target state */
+ u32 sbtmstatehigh; /* target state */
+ u32 sbbwa0; /* bandwidth allocation table0 */
+ u32 PAD;
+ u32 sbimconfiglow; /* initiator configuration */
+ u32 sbimconfighigh; /* initiator configuration */
+ u32 sbadmatch0; /* address match0 */
+ u32 PAD;
+ u32 sbtmconfiglow; /* target configuration */
+ u32 sbtmconfighigh; /* target configuration */
+ u32 sbbconfig; /* broadcast configuration */
+ u32 PAD;
+ u32 sbbstate; /* broadcast state */
+ u32 PAD[3];
+ u32 sbactcnfg; /* activate configuration */
+ u32 PAD[3];
+ u32 sbflagst; /* current sbflags */
+ u32 PAD[3];
+ u32 sbidlow; /* identification */
+ u32 sbidhigh; /* identification */
+};
/* clkstate */
#define CLK_NONE 0
@@ -322,29 +805,45 @@ typedef struct dhd_bus {
#define CLK_PENDING 2 /* Not used yet */
#define CLK_AVAIL 3
-#define DHD_NOPMU(dhd) (false)
+#define BRCMF_NOPMU(brcmf) (false)
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
static int qcount[NUMPRIO];
static int tx_packets[NUMPRIO];
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
/* Deferred transmit */
-const uint dhd_deferred_tx = 1;
+uint brcmf_deferred_tx = 1;
+module_param(brcmf_deferred_tx, uint, 0);
+
+/* Watchdog thread priority, -1 to use kernel timer */
+int brcmf_watchdog_prio = 97;
+module_param(brcmf_watchdog_prio, int, 0);
-extern uint dhd_watchdog_ms;
-extern void dhd_os_wd_timer(void *bus, uint wdtick);
+/* Watchdog interval */
+uint brcmf_watchdog_ms = 10;
+module_param(brcmf_watchdog_ms, uint, 0);
+
+/* DPC thread priority, -1 to use tasklet */
+int brcmf_dpc_prio = 98;
+module_param(brcmf_dpc_prio, int, 0);
+
+#ifdef BCMDBG
+/* Console poll interval */
+uint brcmf_console_ms;
+module_param(brcmf_console_ms, uint, 0);
+#endif /* BCMDBG */
/* Tx/Rx bounds */
-uint dhd_txbound;
-uint dhd_rxbound;
-uint dhd_txminmax;
+uint brcmf_txbound;
+uint brcmf_rxbound;
+uint brcmf_txminmax;
/* override the RAM size if possible */
#define DONGLE_MIN_MEMSIZE (128 * 1024)
-int dhd_dongle_memsize;
+int brcmf_dongle_memsize;
-static bool dhd_alignctl;
+static bool brcmf_alignctl;
static bool sd1idle;
@@ -352,9 +851,7 @@ static bool retrydata;
#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
static const uint watermark = 8;
-static const uint firstread = DHD_FIRSTREAD;
-
-#define HDATLEN (firstread - (SDPCM_HDRLEN))
+static const uint firstread = BRCMF_FIRSTREAD;
/* Retry count for register access failures */
static const uint retry_limit = 2;
@@ -364,20 +861,11 @@ static bool forcealign;
#define ALIGNMENT 4
-#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
-extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
-#endif
-
-#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
-#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
-#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
#define PKTALIGN(_p, _len, _align) \
do { \
uint datalign; \
datalign = (unsigned long)((_p)->data); \
datalign = roundup(datalign, (_align)) - datalign; \
- ASSERT(datalign < (_align)); \
- ASSERT((_p)->len >= ((_len) + datalign)); \
if (datalign) \
skb_pull((_p), datalign); \
__skb_trim((_p), (_len)); \
@@ -387,146 +875,166 @@ extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
static const uint max_roundup = 512;
/* Try doing readahead */
-static bool dhd_readahead;
+static bool brcmf_readahead;
/* To check if there's window offered */
#define DATAOK(bus) \
(((u8)(bus->tx_max - bus->tx_seq) != 0) && \
(((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0))
-/* Macros to get register read/write status */
-/* NOTE: these assume a local dhdsdio_bus_t *bus! */
-#define R_SDREG(regvar, regaddr, retryvar) \
-do { \
- retryvar = 0; \
- do { \
- regvar = R_REG(regaddr); \
- } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
- if (retryvar) { \
- bus->regfails += (retryvar-1); \
- if (retryvar > retry_limit) { \
- DHD_ERROR(("%s: FAILED" #regvar "READ, LINE %d\n", \
- __func__, __LINE__)); \
- regvar = 0; \
- } \
- } \
-} while (0)
-
-#define W_SDREG(regval, regaddr, retryvar) \
-do { \
- retryvar = 0; \
- do { \
- W_REG(regaddr, regval); \
- } while (bcmsdh_regfail(bus->sdh) && (++retryvar <= retry_limit)); \
- if (retryvar) { \
- bus->regfails += (retryvar-1); \
- if (retryvar > retry_limit) \
- DHD_ERROR(("%s: FAILED REGISTER WRITE, LINE %d\n", \
- __func__, __LINE__)); \
- } \
-} while (0)
-
-#define DHD_BUS SDIO_BUS
+/*
+ * Reads a register in the SDIO hardware block. This block occupies a series of
+ * adresses on the 32 bit backplane bus.
+ */
+static void
+r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+{
+ *retryvar = 0;
+ do {
+ *regvar = R_REG(bus->ci->buscorebase + reg_offset, u32);
+ } while (brcmf_sdcard_regfail(bus->card) &&
+ (++(*retryvar) <= retry_limit));
+ if (*retryvar) {
+ bus->regfails += (*retryvar-1);
+ if (*retryvar > retry_limit) {
+ BRCMF_ERROR(("FAILED READ %Xh\n", reg_offset));
+ *regvar = 0;
+ }
+ }
+}
+
+static void
+w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+{
+ *retryvar = 0;
+ do {
+ brcmf_sdcard_reg_write(NULL, bus->ci->buscorebase + reg_offset,
+ sizeof(u32), regval);
+ } while (brcmf_sdcard_regfail(bus->card) &&
+ (++(*retryvar) <= retry_limit));
+ if (*retryvar) {
+ bus->regfails += (*retryvar-1);
+ if (*retryvar > retry_limit)
+ BRCMF_ERROR(("FAILED REGISTER WRITE"
+ " %Xh\n", reg_offset));
+ }
+}
+
+#define BRCMF_BUS SDIO_BUS
#define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
#ifdef SDTEST
-static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
-static void dhdsdio_sdtest_set(dhd_bus_t *bus, bool start);
+static void brcmf_sdbrcm_checkdied(struct brcmf_bus *bus, void *pkt, uint seq);
+static void brcmf_sdbrcm_sdtest_set(struct brcmf_bus *bus, bool start);
#endif
-#ifdef DHD_DEBUG
-static int dhdsdio_checkdied(dhd_bus_t *bus, u8 *data, uint size);
-static int dhdsdio_mem_dump(dhd_bus_t *bus);
-#endif /* DHD_DEBUG */
-static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
-
-static void dhdsdio_release(dhd_bus_t *bus);
-static void dhdsdio_release_malloc(dhd_bus_t *bus);
-static void dhdsdio_disconnect(void *ptr);
-static bool dhdsdio_chipmatch(u16 chipid);
-static bool dhdsdio_probe_attach(dhd_bus_t *bus, void *sdh,
- void *regsva, u16 devid);
-static bool dhdsdio_probe_malloc(dhd_bus_t *bus, void *sdh);
-static bool dhdsdio_probe_init(dhd_bus_t *bus, void *sdh);
-static void dhdsdio_release_dongle(dhd_bus_t *bus);
-
-static uint process_nvram_vars(char *varbuf, uint len);
-
-static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size);
-static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, u32 addr, uint fn,
+#ifdef BCMDBG
+static int brcmf_sdbrcm_bus_console_in(struct brcmf_pub *drvr,
+ unsigned char *msg, uint msglen);
+static int brcmf_sdbrcm_checkdied(struct brcmf_bus *bus, u8 *data, uint size);
+static int brcmf_sdbrcm_mem_dump(struct brcmf_bus *bus);
+#endif /* BCMDBG */
+static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter);
+
+static void brcmf_sdbrcm_release(struct brcmf_bus *bus);
+static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus);
+static void brcmf_sdbrcm_disconnect(void *ptr);
+static bool brcmf_sdbrcm_chipmatch(u16 chipid);
+static bool brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, void *card,
+ u32 regsva, u16 devid);
+static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus, void *card);
+static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus, void *card);
+static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus);
+
+static uint brcmf_process_nvram_vars(char *varbuf, uint len);
+
+static void brcmf_sdbrcm_setmemsize(struct brcmf_bus *bus, int mem_size);
+static int brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes,
- struct sk_buff *pkt, bcmsdh_cmplt_fn_t complete,
+ struct sk_buff *pkt,
+ void (*complete)(void *handle, int status,
+ bool sync_waiting),
void *handle);
-static bool dhdsdio_download_firmware(struct dhd_bus *bus, void *sdh);
-static int _dhdsdio_download_firmware(struct dhd_bus *bus);
+static bool brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus, void *card);
+static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus);
-static int dhdsdio_download_code_file(struct dhd_bus *bus, char *image_path);
-static int dhdsdio_download_nvram(struct dhd_bus *bus);
-#ifdef BCMEMBEDIMAGE
-static int dhdsdio_download_code_array(struct dhd_bus *bus);
-#endif
-static void dhdsdio_chip_disablecore(bcmsdh_info_t *sdh, u32 corebase);
-static int dhdsdio_chip_attach(struct dhd_bus *bus, void *regs);
-static void dhdsdio_chip_resetcore(bcmsdh_info_t *sdh, u32 corebase);
-static void dhdsdio_sdiod_drive_strength_init(struct dhd_bus *bus,
+static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus);
+static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus);
+
+static void
+brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_card *card, u32 corebase);
+
+static int brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs);
+
+static void
+brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_card *card, u32 corebase);
+
+static void brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus,
u32 drivestrength);
-static void dhdsdio_chip_detach(struct dhd_bus *bus);
+static void brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus);
+static void brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar);
+static void brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus);
+static void brcmf_sdbrcm_watchdog(unsigned long data);
+static int brcmf_sdbrcm_watchdog_thread(void *data);
+static int brcmf_sdbrcm_dpc_thread(void *data);
+static void brcmf_sdbrcm_dpc_tasklet(unsigned long data);
+static void brcmf_sdbrcm_sched_dpc(struct brcmf_bus *bus);
+static void brcmf_sdbrcm_sdlock(struct brcmf_bus *bus);
+static void brcmf_sdbrcm_sdunlock(struct brcmf_bus *bus);
+static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus);
/* Packet free applicable unconditionally for sdio and sdspi.
* Conditional if bufpool was present for gspi bus.
*/
-static void dhdsdio_pktfree2(dhd_bus_t *bus, struct sk_buff *pkt)
+static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
{
- dhd_os_sdlock_rxq(bus->dhd);
if ((bus->bus != SPI_BUS) || bus->usebufpool)
- bcm_pkt_buf_free_skb(pkt);
- dhd_os_sdunlock_rxq(bus->dhd);
+ brcmu_pkt_buf_free_skb(pkt);
}
-static void dhd_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+static void brcmf_sdbrcm_setmemsize(struct brcmf_bus *bus, int mem_size)
{
s32 min_size = DONGLE_MIN_MEMSIZE;
/* Restrict the memsize to user specified limit */
- DHD_ERROR(("user: Restrict the dongle ram size to %d, min %d\n",
- dhd_dongle_memsize, min_size));
- if ((dhd_dongle_memsize > min_size) &&
- (dhd_dongle_memsize < (s32) bus->orig_ramsize))
- bus->ramsize = dhd_dongle_memsize;
+ BRCMF_ERROR(("user: Restrict the dongle ram size to %d, min %d\n",
+ brcmf_dongle_memsize, min_size));
+ if ((brcmf_dongle_memsize > min_size) &&
+ (brcmf_dongle_memsize < (s32) bus->orig_ramsize))
+ bus->ramsize = brcmf_dongle_memsize;
}
-static int dhdsdio_set_siaddr_window(dhd_bus_t *bus, u32 address)
+static int brcmf_sdbrcm_set_siaddr_window(struct brcmf_bus *bus, u32 address)
{
int err = 0;
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRLOW,
(address >> 8) & SBSDIO_SBADDRLOW_MASK, &err);
if (!err)
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRMID,
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SBADDRMID,
(address >> 16) & SBSDIO_SBADDRMID_MASK, &err);
if (!err)
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SBADDRHIGH,
- (address >> 24) & SBSDIO_SBADDRHIGH_MASK,
- &err);
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SBADDRHIGH,
+ (address >> 24) & SBSDIO_SBADDRHIGH_MASK,
+ &err);
return err;
}
/* Turn backplane clock on or off */
-static int dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
+static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
{
int err;
u8 clkctl, clkreq, devctl;
- bcmsdh_info_t *sdh;
+ struct brcmf_sdio_card *card;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
-#if defined(OOB_INTR_ONLY)
- pendok = false;
-#endif
clkctl = 0;
- sdh = bus->sdh;
+ card = bus->card;
if (on) {
/* Request HT Avail */
@@ -537,99 +1045,93 @@ static int dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
&& (bus->ci->chiprev == 0))
clkreq |= SBSDIO_FORCE_ALP;
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- clkreq, &err);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
if (err) {
- DHD_ERROR(("%s: HT Avail request error: %d\n",
- __func__, err));
+ BRCMF_ERROR(("%s: HT Avail request error: %d\n",
+ __func__, err));
return -EBADE;
}
if (pendok && ((bus->ci->buscoretype == PCMCIA_CORE_ID)
&& (bus->ci->buscorerev == 9))) {
u32 dummy, retries;
- R_SDREG(dummy, &bus->regs->clockctlstatus, retries);
+ r_sdreg32(bus, &dummy,
+ offsetof(struct sdpcmd_regs, clockctlstatus),
+ &retries);
}
/* Check current status */
- clkctl =
- bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- &err);
+ clkctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
- DHD_ERROR(("%s: HT Avail read error: %d\n",
- __func__, err));
+ BRCMF_ERROR(("%s: HT Avail read error: %d\n",
+ __func__, err));
return -EBADE;
}
/* Go to pending and await interrupt if appropriate */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
/* Allow only clock-available interrupt */
- devctl =
- bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
- &err);
+ devctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
if (err) {
- DHD_ERROR(("%s: Devctl error setting CA: %d\n",
- __func__, err));
+ BRCMF_ERROR(("%s: Devctl error setting CA:"
+ " %d\n", __func__, err));
return -EBADE;
}
devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
- devctl, &err);
- DHD_INFO(("CLKCTL: set PENDING\n"));
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
+ BRCMF_INFO(("CLKCTL: set PENDING\n"));
bus->clkstate = CLK_PENDING;
return 0;
} else if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
devctl =
- bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
- &err);
+ brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
}
/* Otherwise, wait here (polling) for HT Avail */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- SPINWAIT_SLEEP(sdioh_spinwait_sleep,
- ((clkctl =
- bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- &err)),
- !SBSDIO_CLKAV(clkctl, bus->alp_only)),
- PMU_MAX_TRANSITION_DLY);
+ BRCMF_SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+ ((clkctl =
+ brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ &err)),
+ !SBSDIO_CLKAV(clkctl, bus->alp_only)),
+ PMU_MAX_TRANSITION_DLY);
}
if (err) {
- DHD_ERROR(("%s: HT Avail request error: %d\n",
- __func__, err));
+ BRCMF_ERROR(("%s: HT Avail request error: %d\n",
+ __func__, err));
return -EBADE;
}
if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
- DHD_ERROR(("%s: HT Avail timeout (%d): clkctl 0x%02x\n",
- __func__, PMU_MAX_TRANSITION_DLY, clkctl));
+ BRCMF_ERROR(("%s: HT Avail timeout (%d): "
+ "clkctl 0x%02x\n", __func__,
+ PMU_MAX_TRANSITION_DLY, clkctl));
return -EBADE;
}
/* Mark clock available */
bus->clkstate = CLK_AVAIL;
- DHD_INFO(("CLKCTL: turned ON\n"));
-
-#if defined(DHD_DEBUG)
- if (bus->alp_only == true) {
-#if !defined(BCMLXSDMMC)
- if (!SBSDIO_ALPONLY(clkctl)) {
- DHD_ERROR(("%s: HT Clock, when ALP Only\n",
- __func__));
- }
-#endif /* !defined(BCMLXSDMMC) */
- } else {
+ BRCMF_INFO(("CLKCTL: turned ON\n"));
+
+#if defined(BCMDBG)
+ if (bus->alp_only != true) {
if (SBSDIO_ALPONLY(clkctl)) {
- DHD_ERROR(("%s: HT Clock should be on.\n",
- __func__));
+ BRCMF_ERROR(("%s: HT Clock should be on.\n",
+ __func__));
}
}
-#endif /* defined (DHD_DEBUG) */
+#endif /* defined (BCMDBG) */
bus->activity = true;
} else {
@@ -637,21 +1139,20 @@ static int dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
- devctl =
- bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
- &err);
+ devctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
}
bus->clkstate = CLK_SDONLY;
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- clkreq, &err);
- DHD_INFO(("CLKCTL: turned OFF\n"));
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
+ BRCMF_INFO(("CLKCTL: turned OFF\n"));
if (err) {
- DHD_ERROR(("%s: Failed access turning clock off: %d\n",
- __func__, err));
+ BRCMF_ERROR(("%s: Failed access turning clock off:"
+ " %d\n", __func__, err));
return -EBADE;
}
}
@@ -659,105 +1160,31 @@ static int dhdsdio_htclk(dhd_bus_t *bus, bool on, bool pendok)
}
/* Change idle/active SD state */
-static int dhdsdio_sdclk(dhd_bus_t *bus, bool on)
+static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
{
- int err;
- s32 iovalue;
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- DHD_TRACE(("%s: Enter\n", __func__));
-
- if (on) {
- if (bus->idleclock == DHD_IDLE_STOP) {
- /* Turn on clock and restore mode */
- iovalue = 1;
- err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
- &iovalue, sizeof(iovalue), true);
- if (err) {
- DHD_ERROR(("%s: error enabling sd_clock: %d\n",
- __func__, err));
- return -EBADE;
- }
-
- iovalue = bus->sd_mode;
- err = bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
- &iovalue, sizeof(iovalue), true);
- if (err) {
- DHD_ERROR(("%s: error changing sd_mode: %d\n",
- __func__, err));
- return -EBADE;
- }
- } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
- /* Restore clock speed */
- iovalue = bus->sd_divisor;
- err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
- &iovalue, sizeof(iovalue), true);
- if (err) {
- DHD_ERROR(("%s: error restoring sd_divisor: %d\n",
- __func__, err));
- return -EBADE;
- }
- }
+ if (on)
bus->clkstate = CLK_SDONLY;
- } else {
- /* Stop or slow the SD clock itself */
- if ((bus->sd_divisor == -1) || (bus->sd_mode == -1)) {
- DHD_TRACE(("%s: can't idle clock, divisor %d mode %d\n",
- __func__, bus->sd_divisor, bus->sd_mode));
- return -EBADE;
- }
- if (bus->idleclock == DHD_IDLE_STOP) {
- if (sd1idle) {
- /* Change to SD1 mode and turn off clock */
- iovalue = 1;
- err =
- bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL,
- 0, &iovalue,
- sizeof(iovalue), true);
- if (err) {
- DHD_ERROR(("%s: error changing sd_clock: %d\n",
- __func__, err));
- return -EBADE;
- }
- }
-
- iovalue = 0;
- err = bcmsdh_iovar_op(bus->sdh, "sd_clock", NULL, 0,
- &iovalue, sizeof(iovalue), true);
- if (err) {
- DHD_ERROR(("%s: error disabling sd_clock: %d\n",
- __func__, err));
- return -EBADE;
- }
- } else if (bus->idleclock != DHD_IDLE_ACTIVE) {
- /* Set divisor to idle value */
- iovalue = bus->idleclock;
- err = bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
- &iovalue, sizeof(iovalue), true);
- if (err) {
- DHD_ERROR(("%s: error changing sd_divisor: %d\n",
- __func__, err));
- return -EBADE;
- }
- }
+ else
bus->clkstate = CLK_NONE;
- }
return 0;
}
/* Transition SD and backplane clock readiness */
-static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
+static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
{
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
uint oldstate = bus->clkstate;
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
/* Early exit if we're already there */
if (bus->clkstate == target) {
if (target == CLK_AVAIL) {
- dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
bus->activity = true;
}
return 0;
@@ -767,50 +1194,50 @@ static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok)
case CLK_AVAIL:
/* Make sure SD clock is available */
if (bus->clkstate == CLK_NONE)
- dhdsdio_sdclk(bus, true);
+ brcmf_sdbrcm_sdclk(bus, true);
/* Now request HT Avail on the backplane */
- dhdsdio_htclk(bus, true, pendok);
- dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ brcmf_sdbrcm_htclk(bus, true, pendok);
+ brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
bus->activity = true;
break;
case CLK_SDONLY:
/* Remove HT request, or bring up SD clock */
if (bus->clkstate == CLK_NONE)
- dhdsdio_sdclk(bus, true);
+ brcmf_sdbrcm_sdclk(bus, true);
else if (bus->clkstate == CLK_AVAIL)
- dhdsdio_htclk(bus, false, false);
+ brcmf_sdbrcm_htclk(bus, false, false);
else
- DHD_ERROR(("dhdsdio_clkctl: request for %d -> %d\n",
- bus->clkstate, target));
- dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ BRCMF_ERROR(("brcmf_sdbrcm_clkctl: request for %d -> %d"
+ "\n", bus->clkstate, target));
+ brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
break;
case CLK_NONE:
/* Make sure to remove HT request */
if (bus->clkstate == CLK_AVAIL)
- dhdsdio_htclk(bus, false, false);
+ brcmf_sdbrcm_htclk(bus, false, false);
/* Now remove the SD clock */
- dhdsdio_sdclk(bus, false);
- dhd_os_wd_timer(bus->dhd, 0);
+ brcmf_sdbrcm_sdclk(bus, false);
+ brcmf_sdbrcm_wd_timer(bus, 0);
break;
}
-#ifdef DHD_DEBUG
- DHD_INFO(("dhdsdio_clkctl: %d -> %d\n", oldstate, bus->clkstate));
-#endif /* DHD_DEBUG */
+#ifdef BCMDBG
+ BRCMF_INFO(("brcmf_sdbrcm_clkctl: %d -> %d\n",
+ oldstate, bus->clkstate));
+#endif /* BCMDBG */
return 0;
}
-int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
+int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
{
- bcmsdh_info_t *sdh = bus->sdh;
- sdpcmd_regs_t *regs = bus->regs;
+ struct brcmf_sdio_card *card = bus->card;
uint retries = 0;
- DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
- (sleep ? "SLEEP" : "WAKE"),
- (bus->sleeping ? "SLEEP" : "WAKE")));
+ BRCMF_INFO(("brcmf_sdbrcm_bussleep: request %s (currently %s)\n",
+ (sleep ? "SLEEP" : "WAKE"),
+ (bus->sleeping ? "SLEEP" : "WAKE")));
/* Done if we're already in the requested state */
if (sleep == bus->sleeping)
@@ -823,27 +1250,31 @@ int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
return -EBUSY;
/* Disable SDIO interrupts (no longer interested) */
- bcmsdh_intr_disable(bus->sdh);
+ brcmf_sdcard_intr_disable(bus->card);
/* Make sure the controller has the bus up */
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
/* Tell device to start using OOB wakeup */
- W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
+ w_sdreg32(bus, SMB_USE_OOB,
+ offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
if (retries > retry_limit)
- DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
+ BRCMF_ERROR(("CANNOT SIGNAL CHIP, "
+ "WILL NOT WAKE UP!!\n"));
/* Turn off our contribution to the HT clock request */
- dhdsdio_clkctl(bus, CLK_SDONLY, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
/* Isolate the bus */
if (bus->ci->chip != BCM4329_CHIP_ID
&& bus->ci->chip != BCM4319_CHIP_ID) {
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
- SBSDIO_DEVCTL_PADS_ISO, NULL);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
}
/* Change state */
@@ -852,82 +1283,55 @@ int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep)
} else {
/* Waking up: bus power up is ok, set local state */
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- 0, NULL);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
/* Force pad isolation off if possible
(in case power never toggled) */
if ((bus->ci->buscoretype == PCMCIA_CORE_ID)
&& (bus->ci->buscorerev >= 10))
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0,
- NULL);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, 0, NULL);
/* Make sure the controller has the bus up */
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
/* Send misc interrupt to indicate OOB not needed */
- W_SDREG(0, &regs->tosbmailboxdata, retries);
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, tosbmailboxdata),
+ &retries);
if (retries <= retry_limit)
- W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
+ w_sdreg32(bus, SMB_DEV_INT,
+ offsetof(struct sdpcmd_regs, tosbmailbox),
+ &retries);
if (retries > retry_limit)
- DHD_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
+ BRCMF_ERROR(("CANNOT SIGNAL CHIP TO CLEAR OOB!!\n"));
/* Make sure we have SD bus access */
- dhdsdio_clkctl(bus, CLK_SDONLY, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
/* Change state */
bus->sleeping = false;
/* Enable interrupts again */
- if (bus->intr && (bus->dhd->busstate == DHD_BUS_DATA)) {
+ if (bus->intr && (bus->drvr->busstate == BRCMF_BUS_DATA)) {
bus->intdis = false;
- bcmsdh_intr_enable(bus->sdh);
+ brcmf_sdcard_intr_enable(bus->card);
}
}
return 0;
}
-#if defined(OOB_INTR_ONLY)
-void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
-{
-#if defined(HW_OOB)
- bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
-#else
- sdpcmd_regs_t *regs = bus->regs;
- uint retries = 0;
-
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
- if (enable == true) {
-
- /* Tell device to start using OOB wakeup */
- W_SDREG(SMB_USE_OOB, &regs->tosbmailbox, retries);
- if (retries > retry_limit)
- DHD_ERROR(("CANNOT SIGNAL CHIP, WILL NOT WAKE UP!!\n"));
-
- } else {
- /* Send misc interrupt to indicate OOB not needed */
- W_SDREG(0, &regs->tosbmailboxdata, retries);
- if (retries <= retry_limit)
- W_SDREG(SMB_DEV_INT, &regs->tosbmailbox, retries);
- }
-
- /* Turn off our contribution to the HT clock request */
- dhdsdio_clkctl(bus, CLK_SDONLY, false);
-#endif /* !defined(HW_OOB) */
-}
-#endif /* defined(OOB_INTR_ONLY) */
-
#define BUS_WAKE(bus) \
do { \
if ((bus)->sleeping) \
- dhdsdio_bussleep((bus), false); \
+ brcmf_sdbrcm_bussleep((bus), false); \
} while (0);
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
-static int dhdsdio_txpkt(dhd_bus_t *bus, struct sk_buff *pkt, uint chan,
+static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt, uint chan,
bool free_pkt)
{
int ret;
@@ -935,15 +1339,15 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, struct sk_buff *pkt, uint chan,
u16 len, pad = 0;
u32 swheader;
uint retries = 0;
- bcmsdh_info_t *sdh;
+ struct brcmf_sdio_card *card;
struct sk_buff *new;
int i;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- sdh = bus->sdh;
+ card = bus->card;
- if (bus->dhd->dongle_reset) {
+ if (bus->drvr->dongle_reset) {
ret = -EPERM;
goto done;
}
@@ -951,40 +1355,39 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, struct sk_buff *pkt, uint chan,
frame = (u8 *) (pkt->data);
/* Add alignment padding, allocate new packet if needed */
- pad = ((unsigned long)frame % DHD_SDALIGN);
+ pad = ((unsigned long)frame % BRCMF_SDALIGN);
if (pad) {
if (skb_headroom(pkt) < pad) {
- DHD_INFO(("%s: insufficient headroom %d for %d pad\n",
- __func__, skb_headroom(pkt), pad));
- bus->dhd->tx_realloc++;
- new = bcm_pkt_buf_get_skb(pkt->len + DHD_SDALIGN);
+ BRCMF_INFO(("%s: insufficient headroom %d for %d pad\n",
+ __func__, skb_headroom(pkt), pad));
+ bus->drvr->tx_realloc++;
+ new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN);
if (!new) {
- DHD_ERROR(("%s: couldn't allocate new %d-byte "
- "packet\n",
- __func__, pkt->len + DHD_SDALIGN));
+ BRCMF_ERROR(("%s: couldn't allocate new "
+ "%d-byte packet\n", __func__,
+ pkt->len + BRCMF_SDALIGN));
ret = -ENOMEM;
goto done;
}
- PKTALIGN(new, pkt->len, DHD_SDALIGN);
+ PKTALIGN(new, pkt->len, BRCMF_SDALIGN);
memcpy(new->data, pkt->data, pkt->len);
if (free_pkt)
- bcm_pkt_buf_free_skb(pkt);
+ brcmu_pkt_buf_free_skb(pkt);
/* free the pkt if canned one is not used */
free_pkt = true;
pkt = new;
frame = (u8 *) (pkt->data);
- ASSERT(((unsigned long)frame % DHD_SDALIGN) == 0);
+ /* precondition: (frame % BRCMF_SDALIGN) == 0) */
pad = 0;
} else {
skb_push(pkt, pad);
frame = (u8 *) (pkt->data);
-
- ASSERT((pad + SDPCM_HDRLEN) <= (int)(pkt->len));
+ /* precondition: pad + SDPCM_HDRLEN <= pkt->len */
memset(frame, 0, pad + SDPCM_HDRLEN);
}
}
- ASSERT(pad < DHD_SDALIGN);
+ /* precondition: pad < BRCMF_SDALIGN */
/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
len = (u16) (pkt->len);
@@ -1000,14 +1403,14 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, struct sk_buff *pkt, uint chan,
put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
tx_packets[pkt->priority]++;
- if (DHD_BYTES_ON() &&
- (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
- (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
+ if (BRCMF_BYTES_ON() &&
+ (((BRCMF_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
+ (BRCMF_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
printk(KERN_DEBUG "Tx Frame:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, frame, len);
- } else if (DHD_HDRS_ON()) {
+ } else if (BRCMF_HDRS_ON()) {
printk(KERN_DEBUG "TxHdr:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
frame, min_t(u16, len, 16));
@@ -1018,53 +1421,40 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, struct sk_buff *pkt, uint chan,
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
u16 pad = bus->blocksize - (len % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize))
-#ifdef NOTUSED
- if (pad <= skb_tailroom(pkt))
-#endif /* NOTUSED */
len += pad;
- } else if (len % DHD_SDALIGN) {
- len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ } else if (len % BRCMF_SDALIGN) {
+ len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
}
/* Some controllers have trouble with odd bytes -- round to even */
if (forcealign && (len & (ALIGNMENT - 1))) {
-#ifdef NOTUSED
- if (skb_tailroom(pkt))
-#endif
len = roundup(len, ALIGNMENT);
-#ifdef NOTUSED
- else
- DHD_ERROR(("%s: sending unrounded %d-byte packet\n",
- __func__, len));
-#endif
}
do {
- ret =
- dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2,
- F2SYNC, frame, len, pkt, NULL, NULL);
+ ret = brcmf_sdbrcm_send_buf(bus, brcmf_sdcard_cur_sbwad(card),
+ SDIO_FUNC_2, F2SYNC, frame, len, pkt, NULL, NULL);
bus->f2txdata++;
- ASSERT(ret != -BCME_PENDING);
if (ret < 0) {
/* On failure, abort the command
and terminate the frame */
- DHD_INFO(("%s: sdio error %d, abort command and "
- "terminate frame.\n", __func__, ret));
+ BRCMF_INFO(("%s: sdio error %d, abort command and "
+ "terminate frame.\n", __func__, ret));
bus->tx_sderrs++;
- bcmsdh_abort(sdh, SDIO_FUNC_2);
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1,
+ brcmf_sdcard_abort(card, SDIO_FUNC_2);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
NULL);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ hi = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCHI,
NULL);
- lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ lo = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCLO,
NULL);
bus->f1regdata += 2;
@@ -1081,22 +1471,22 @@ static int dhdsdio_txpkt(dhd_bus_t *bus, struct sk_buff *pkt, uint chan,
done:
/* restore pkt buffer pointer before calling tx complete routine */
skb_pull(pkt, SDPCM_HDRLEN + pad);
- dhd_os_sdunlock(bus->dhd);
- dhd_txcomplete(bus->dhd, pkt, ret != 0);
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
+ brcmf_txcomplete(bus->drvr, pkt, ret != 0);
+ brcmf_sdbrcm_sdlock(bus);
if (free_pkt)
- bcm_pkt_buf_free_skb(pkt);
+ brcmu_pkt_buf_free_skb(pkt);
return ret;
}
-int dhd_bus_txdata(struct dhd_bus *bus, struct sk_buff *pkt)
+int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
{
int ret = -EBADE;
uint datalen, prec;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
datalen = pkt->len;
@@ -1116,79 +1506,80 @@ int dhd_bus_txdata(struct dhd_bus *bus, struct sk_buff *pkt)
/* Add space for the header */
skb_push(pkt, SDPCM_HDRLEN);
- ASSERT(IS_ALIGNED((unsigned long)(pkt->data), 2));
+ /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
prec = PRIO2PREC((pkt->priority & PRIOMASK));
/* Check for existing queue, current flow-control,
pending event, or pending clock */
- if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq)
+ if (brcmf_deferred_tx || bus->fcstate || pktq_len(&bus->txq)
|| bus->dpc_sched || (!DATAOK(bus))
|| (bus->flowcontrol & NBITVAL(prec))
|| (bus->clkstate != CLK_AVAIL)) {
- DHD_TRACE(("%s: deferring pktq len %d\n", __func__,
- pktq_len(&bus->txq)));
+ BRCMF_TRACE(("%s: deferring pktq len %d\n", __func__,
+ pktq_len(&bus->txq)));
bus->fcqueued++;
/* Priority based enq */
- dhd_os_sdlock_txq(bus->dhd);
- if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == false) {
+ spin_lock_bh(&bus->txqlock);
+ if (brcmf_c_prec_enq(bus->drvr, &bus->txq, pkt, prec) == false) {
skb_pull(pkt, SDPCM_HDRLEN);
- dhd_txcomplete(bus->dhd, pkt, false);
- bcm_pkt_buf_free_skb(pkt);
- DHD_ERROR(("%s: out of bus->txq !!!\n", __func__));
+ brcmf_txcomplete(bus->drvr, pkt, false);
+ brcmu_pkt_buf_free_skb(pkt);
+ BRCMF_ERROR(("%s: out of bus->txq !!!\n", __func__));
ret = -ENOSR;
} else {
ret = 0;
}
- dhd_os_sdunlock_txq(bus->dhd);
+ spin_unlock_bh(&bus->txqlock);
if (pktq_len(&bus->txq) >= TXHI)
- dhd_txflowcontrol(bus->dhd, 0, ON);
+ brcmf_txflowcontrol(bus->drvr, 0, ON);
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
qcount[prec] = pktq_plen(&bus->txq, prec);
#endif
/* Schedule DPC if needed to send queued packet(s) */
- if (dhd_deferred_tx && !bus->dpc_sched) {
+ if (brcmf_deferred_tx && !bus->dpc_sched) {
bus->dpc_sched = true;
- dhd_sched_dpc(bus->dhd);
+ brcmf_sdbrcm_sched_dpc(bus);
}
} else {
/* Lock: we're about to use shared data/code (and SDIO) */
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdlock(bus);
/* Otherwise, send it now */
BUS_WAKE(bus);
/* Make sure back plane ht clk is on, no pending allowed */
- dhdsdio_clkctl(bus, CLK_AVAIL, true);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
#ifndef SDTEST
- DHD_TRACE(("%s: calling txpkt\n", __func__));
- ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
+ BRCMF_TRACE(("%s: calling txpkt\n", __func__));
+ ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
#else
- ret = dhdsdio_txpkt(bus, pkt,
+ ret = brcmf_sdbrcm_txpkt(bus, pkt,
(bus->ext_loop ? SDPCM_TEST_CHANNEL :
SDPCM_DATA_CHANNEL), true);
#endif
if (ret)
- bus->dhd->tx_errors++;
+ bus->drvr->tx_errors++;
else
- bus->dhd->dstats.tx_bytes += datalen;
+ bus->drvr->dstats.tx_bytes += datalen;
- if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ if (bus->idletime == BRCMF_IDLE_IMMEDIATE &&
+ !bus->dpc_sched) {
bus->activity = false;
- dhdsdio_clkctl(bus, CLK_NONE, true);
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
}
- dhd_os_sdunlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
}
return ret;
}
-static uint dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
+static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
{
struct sk_buff *pkt;
u32 intstatus = 0;
@@ -1198,42 +1589,43 @@ static uint dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
uint datalen;
u8 tx_prec_map;
- dhd_pub_t *dhd = bus->dhd;
- sdpcmd_regs_t *regs = bus->regs;
+ struct brcmf_pub *drvr = bus->drvr;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
tx_prec_map = ~bus->flowcontrol;
/* Send frames until the limit or some other event */
for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) {
- dhd_os_sdlock_txq(bus->dhd);
- pkt = bcm_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
+ spin_lock_bh(&bus->txqlock);
+ pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
if (pkt == NULL) {
- dhd_os_sdunlock_txq(bus->dhd);
+ spin_unlock_bh(&bus->txqlock);
break;
}
- dhd_os_sdunlock_txq(bus->dhd);
+ spin_unlock_bh(&bus->txqlock);
datalen = pkt->len - SDPCM_HDRLEN;
#ifndef SDTEST
- ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
+ ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
#else
- ret = dhdsdio_txpkt(bus, pkt,
+ ret = brcmf_sdbrcm_txpkt(bus, pkt,
(bus->ext_loop ? SDPCM_TEST_CHANNEL :
SDPCM_DATA_CHANNEL), true);
#endif
if (ret)
- bus->dhd->tx_errors++;
+ bus->drvr->tx_errors++;
else
- bus->dhd->dstats.tx_bytes += datalen;
+ bus->drvr->dstats.tx_bytes += datalen;
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
/* Check device status, signal pending interrupt */
- R_SDREG(intstatus, &regs->intstatus, retries);
+ r_sdreg32(bus, &intstatus,
+ offsetof(struct sdpcmd_regs, intstatus),
+ &retries);
bus->f2txdata++;
- if (bcmsdh_regfail(bus->sdh))
+ if (brcmf_sdcard_regfail(bus->card))
break;
if (intstatus & bus->hostintmask)
bus->ipend = true;
@@ -1241,27 +1633,28 @@ static uint dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
}
/* Deflow-control stack if needed */
- if (dhd->up && (dhd->busstate == DHD_BUS_DATA) &&
- dhd->txoff && (pktq_len(&bus->txq) < TXLOW))
- dhd_txflowcontrol(dhd, 0, OFF);
+ if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
+ drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
+ brcmf_txflowcontrol(drvr, 0, OFF);
return cnt;
}
-int dhd_bus_txctl(struct dhd_bus *bus, unsigned char *msg, uint msglen)
+int
+brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
{
u8 *frame;
u16 len;
u32 swheader;
uint retries = 0;
- bcmsdh_info_t *sdh = bus->sdh;
+ struct brcmf_sdio_card *card = bus->card;
u8 doff = 0;
int ret = -1;
int i;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- if (bus->dhd->dongle_reset)
+ if (bus->drvr->dongle_reset)
return -EIO;
/* Back the pointer to make a room for bus header */
@@ -1269,15 +1662,15 @@ int dhd_bus_txctl(struct dhd_bus *bus, unsigned char *msg, uint msglen)
len = (msglen += SDPCM_HDRLEN);
/* Add alignment padding (optional for ctl frames) */
- if (dhd_alignctl) {
- doff = ((unsigned long)frame % DHD_SDALIGN);
+ if (brcmf_alignctl) {
+ doff = ((unsigned long)frame % BRCMF_SDALIGN);
if (doff) {
frame -= doff;
len += doff;
msglen += doff;
memset(frame, 0, doff + SDPCM_HDRLEN);
}
- ASSERT(doff < DHD_SDALIGN);
+ /* precondition: doff < BRCMF_SDALIGN */
}
doff += SDPCM_HDRLEN;
@@ -1286,23 +1679,23 @@ int dhd_bus_txctl(struct dhd_bus *bus, unsigned char *msg, uint msglen)
u16 pad = bus->blocksize - (len % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize))
len += pad;
- } else if (len % DHD_SDALIGN) {
- len += DHD_SDALIGN - (len % DHD_SDALIGN);
+ } else if (len % BRCMF_SDALIGN) {
+ len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
}
/* Satisfy length-alignment requirements */
if (forcealign && (len & (ALIGNMENT - 1)))
len = roundup(len, ALIGNMENT);
- ASSERT(IS_ALIGNED((unsigned long)frame, 2));
+ /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
/* Need to lock here to protect txseq and SDIO tx calls */
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdlock(bus);
BUS_WAKE(bus);
/* Make sure backplane clock is on */
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
*(u16 *) frame = cpu_to_le16((u16) msglen);
@@ -1318,31 +1711,33 @@ int dhd_bus_txctl(struct dhd_bus *bus, unsigned char *msg, uint msglen)
put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
if (!DATAOK(bus)) {
- DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
- __func__, bus->tx_max, bus->tx_seq));
+ BRCMF_INFO(("%s: No bus credit bus->tx_max %d,"
+ " bus->tx_seq %d\n", __func__,
+ bus->tx_max, bus->tx_seq));
bus->ctrl_frame_stat = true;
/* Send from dpc */
bus->ctrl_frame_buf = frame;
bus->ctrl_frame_len = len;
- dhd_wait_for_event(bus->dhd, &bus->ctrl_frame_stat);
+ brcmf_sdbrcm_wait_for_event(bus, &bus->ctrl_frame_stat);
if (bus->ctrl_frame_stat == false) {
- DHD_INFO(("%s: ctrl_frame_stat == false\n", __func__));
+ BRCMF_INFO(("%s: ctrl_frame_stat == false\n",
+ __func__));
ret = 0;
} else {
- DHD_INFO(("%s: ctrl_frame_stat == true\n", __func__));
+ BRCMF_INFO(("%s: ctrl_frame_stat == true\n", __func__));
ret = -1;
}
}
if (ret == -1) {
-#ifdef DHD_DEBUG
- if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_CTL_ON()) {
printk(KERN_DEBUG "Tx Frame:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
frame, len);
- } else if (DHD_HDRS_ON()) {
+ } else if (BRCMF_HDRS_ON()) {
printk(KERN_DEBUG "TxHdr:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
frame, min_t(u16, len, 16));
@@ -1351,35 +1746,35 @@ int dhd_bus_txctl(struct dhd_bus *bus, unsigned char *msg, uint msglen)
do {
bus->ctrl_frame_stat = false;
- ret =
- dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh),
- SDIO_FUNC_2, F2SYNC, frame, len,
- NULL, NULL, NULL);
-
- ASSERT(ret != -BCME_PENDING);
+ ret = brcmf_sdbrcm_send_buf(bus,
+ brcmf_sdcard_cur_sbwad(card), SDIO_FUNC_2,
+ F2SYNC, frame, len, NULL, NULL, NULL);
if (ret < 0) {
/* On failure, abort the command and
terminate the frame */
- DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
- __func__, ret));
+ BRCMF_INFO(("%s: sdio error %d, abort command "
+ "and terminate frame.\n",
+ __func__, ret));
bus->tx_sderrs++;
- bcmsdh_abort(sdh, SDIO_FUNC_2);
+ brcmf_sdcard_abort(card, SDIO_FUNC_2);
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1,
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, NULL);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ hi = brcmf_sdcard_cfg_read(card,
+ SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCHI,
NULL);
- lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ lo = brcmf_sdcard_cfg_read(card,
+ SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCLO,
- NULL);
+ NULL);
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@@ -1393,67 +1788,67 @@ int dhd_bus_txctl(struct dhd_bus *bus, unsigned char *msg, uint msglen)
} while ((ret < 0) && retries++ < TXRETRIES);
}
- if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) {
bus->activity = false;
- dhdsdio_clkctl(bus, CLK_NONE, true);
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
}
- dhd_os_sdunlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
if (ret)
- bus->dhd->tx_ctlerrs++;
+ bus->drvr->tx_ctlerrs++;
else
- bus->dhd->tx_ctlpkts++;
+ bus->drvr->tx_ctlpkts++;
return ret ? -EIO : 0;
}
-int dhd_bus_rxctl(struct dhd_bus *bus, unsigned char *msg, uint msglen)
+int brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
{
int timeleft;
uint rxlen = 0;
bool pending;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- if (bus->dhd->dongle_reset)
+ if (bus->drvr->dongle_reset)
return -EIO;
/* Wait until control frame is available */
- timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+ timeleft = brcmf_os_ioctl_resp_wait(bus->drvr, &bus->rxlen, &pending);
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdlock(bus);
rxlen = bus->rxlen;
memcpy(msg, bus->rxctl, min(msglen, rxlen));
bus->rxlen = 0;
- dhd_os_sdunlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
if (rxlen) {
- DHD_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
- __func__, rxlen, msglen));
+ BRCMF_CTL(("%s: resumed on rxctl frame, got %d expected %d\n",
+ __func__, rxlen, msglen));
} else if (timeleft == 0) {
- DHD_ERROR(("%s: resumed on timeout\n", __func__));
-#ifdef DHD_DEBUG
- dhd_os_sdlock(bus->dhd);
- dhdsdio_checkdied(bus, NULL, 0);
- dhd_os_sdunlock(bus->dhd);
-#endif /* DHD_DEBUG */
+ BRCMF_ERROR(("%s: resumed on timeout\n", __func__));
+#ifdef BCMDBG
+ brcmf_sdbrcm_sdlock(bus);
+ brcmf_sdbrcm_checkdied(bus, NULL, 0);
+ brcmf_sdbrcm_sdunlock(bus);
+#endif /* BCMDBG */
} else if (pending == true) {
- DHD_CTL(("%s: cancelled\n", __func__));
+ BRCMF_CTL(("%s: cancelled\n", __func__));
return -ERESTARTSYS;
} else {
- DHD_CTL(("%s: resumed for unknown reason?\n", __func__));
-#ifdef DHD_DEBUG
- dhd_os_sdlock(bus->dhd);
- dhdsdio_checkdied(bus, NULL, 0);
- dhd_os_sdunlock(bus->dhd);
-#endif /* DHD_DEBUG */
+ BRCMF_CTL(("%s: resumed for unknown reason?\n", __func__));
+#ifdef BCMDBG
+ brcmf_sdbrcm_sdlock(bus);
+ brcmf_sdbrcm_checkdied(bus, NULL, 0);
+ brcmf_sdbrcm_sdunlock(bus);
+#endif /* BCMDBG */
}
if (rxlen)
- bus->dhd->rx_ctlpkts++;
+ bus->drvr->rx_ctlpkts++;
else
- bus->dhd->rx_ctlerrs++;
+ bus->drvr->rx_ctlerrs++;
return rxlen ? (int)rxlen : -ETIMEDOUT;
}
@@ -1467,8 +1862,10 @@ enum {
IOV_SDCIS,
IOV_MEMBYTES,
IOV_MEMSIZE,
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
IOV_CHECKDIED,
+ IOV_CONS,
+ IOV_DCONSOLE_POLL,
#endif
IOV_DOWNLOAD,
IOV_FORCEEVEN,
@@ -1491,10 +1888,11 @@ enum {
IOV_IDLECLOCK,
IOV_SD1IDLE,
IOV_SLEEP,
+ IOV_WDTICK,
IOV_VARS
};
-const bcm_iovar_t dhdsdio_iovars[] = {
+const struct brcmu_iovar brcmf_sdio_iovars[] = {
{"intr", IOV_INTR, 0, IOVT_BOOL, 0},
{"sleep", IOV_SLEEP, 0, IOVT_BOOL, 0},
{"pollrate", IOV_POLLRATE, 0, IOVT_UINT32, 0},
@@ -1511,12 +1909,17 @@ const bcm_iovar_t dhdsdio_iovars[] = {
{"alignctl", IOV_ALIGNCTL, 0, IOVT_BOOL, 0},
{"sdalign", IOV_SDALIGN, 0, IOVT_BOOL, 0},
{"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0},
-#ifdef DHD_DEBUG
- {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(sdreg_t)}
+ {"wdtick", IOV_WDTICK, 0, IOVT_UINT32, 0},
+#ifdef BCMDBG
+ {"cons", IOV_CONS, 0, IOVT_BUFFER, 0}
+ ,
+ {"dconpoll", IOV_DCONSOLE_POLL, 0, IOVT_UINT32, 0}
+ ,
+ {"sdreg", IOV_SDREG, 0, IOVT_BUFFER, sizeof(struct brcmf_sdreg)}
,
- {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t)}
+ {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(struct brcmf_sdreg)}
,
- {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN}
+ {"sd_cis", IOV_SDCIS, 0, IOVT_BUFFER, BRCMF_IOCTL_MAXLEN}
,
{"forcealign", IOV_FORCEEVEN, 0, IOVT_BOOL, 0}
,
@@ -1528,15 +1931,13 @@ const bcm_iovar_t dhdsdio_iovars[] = {
,
{"cpu", IOV_CPU, 0, IOVT_BOOL, 0}
,
-#ifdef DHD_DEBUG
{"checkdied", IOV_CHECKDIED, 0, IOVT_BUFFER, 0}
,
-#endif /* DHD_DEBUG */
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
#ifdef SDTEST
{"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0}
,
- {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t)}
+ {"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(struct brcmf_pktgen)}
,
#endif /* SDTEST */
@@ -1544,122 +1945,122 @@ const bcm_iovar_t dhdsdio_iovars[] = {
};
static void
-dhd_dump_pct(struct bcmstrbuf *strbuf, char *desc, uint num, uint div)
+brcmf_dump_pct(struct brcmu_strbuf *strbuf, char *desc, uint num, uint div)
{
uint q1, q2;
if (!div) {
- bcm_bprintf(strbuf, "%s N/A", desc);
+ brcmu_bprintf(strbuf, "%s N/A", desc);
} else {
q1 = num / div;
q2 = (100 * (num - (q1 * div))) / div;
- bcm_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
+ brcmu_bprintf(strbuf, "%s %d.%02d", desc, q1, q2);
}
}
-void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+void brcmf_sdbrcm_bus_dump(struct brcmf_pub *drvr, struct brcmu_strbuf *strbuf)
{
- dhd_bus_t *bus = dhdp->bus;
+ struct brcmf_bus *bus = drvr->bus;
- bcm_bprintf(strbuf, "Bus SDIO structure:\n");
- bcm_bprintf(strbuf,
+ brcmu_bprintf(strbuf, "Bus SDIO structure:\n");
+ brcmu_bprintf(strbuf,
"hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
- bcm_bprintf(strbuf,
+ brcmu_bprintf(strbuf,
"fcstate %d qlen %d tx_seq %d, max %d, rxskip %d rxlen %d rx_seq %d\n",
bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max,
bus->rxskip, bus->rxlen, bus->rx_seq);
- bcm_bprintf(strbuf, "intr %d intrcount %d lastintrs %d spurious %d\n",
+ brcmu_bprintf(strbuf, "intr %d intrcount %d lastintrs %d spurious %d\n",
bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
- bcm_bprintf(strbuf, "pollrate %d pollcnt %d regfails %d\n",
+ brcmu_bprintf(strbuf, "pollrate %d pollcnt %d regfails %d\n",
bus->pollrate, bus->pollcnt, bus->regfails);
- bcm_bprintf(strbuf, "\nAdditional counters:\n");
- bcm_bprintf(strbuf,
+ brcmu_bprintf(strbuf, "\nAdditional counters:\n");
+ brcmu_bprintf(strbuf,
"tx_sderrs %d fcqueued %d rxrtx %d rx_toolong %d rxc_errors %d\n",
bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
bus->rxc_errors);
- bcm_bprintf(strbuf, "rx_hdrfail %d badhdr %d badseq %d\n",
+ brcmu_bprintf(strbuf, "rx_hdrfail %d badhdr %d badseq %d\n",
bus->rx_hdrfail, bus->rx_badhdr, bus->rx_badseq);
- bcm_bprintf(strbuf, "fc_rcvd %d, fc_xoff %d, fc_xon %d\n", bus->fc_rcvd,
- bus->fc_xoff, bus->fc_xon);
- bcm_bprintf(strbuf, "rxglomfail %d, rxglomframes %d, rxglompkts %d\n",
+ brcmu_bprintf(strbuf, "fc_rcvd %d, fc_xoff %d, fc_xon %d\n",
+ bus->fc_rcvd, bus->fc_xoff, bus->fc_xon);
+ brcmu_bprintf(strbuf, "rxglomfail %d, rxglomframes %d, rxglompkts %d\n",
bus->rxglomfail, bus->rxglomframes, bus->rxglompkts);
- bcm_bprintf(strbuf, "f2rx (hdrs/data) %d (%d/%d), f2tx %d f1regs %d\n",
- (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs,
- bus->f2rxdata, bus->f2txdata, bus->f1regdata);
+ brcmu_bprintf(strbuf, "f2rx (hdrs/data) %d (%d/%d), f2tx %d f1regs"
+ " %d\n",
+ (bus->f2rxhdrs + bus->f2rxdata), bus->f2rxhdrs,
+ bus->f2rxdata, bus->f2txdata, bus->f1regdata);
{
- dhd_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->dhd->rx_packets,
+ brcmf_dump_pct(strbuf, "\nRx: pkts/f2rd", bus->drvr->rx_packets,
(bus->f2rxhdrs + bus->f2rxdata));
- dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->rx_packets,
+ brcmf_dump_pct(strbuf, ", pkts/f1sd", bus->drvr->rx_packets,
bus->f1regdata);
- dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->rx_packets,
+ brcmf_dump_pct(strbuf, ", pkts/sd", bus->drvr->rx_packets,
(bus->f2rxhdrs + bus->f2rxdata + bus->f1regdata));
- dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->rx_packets,
+ brcmf_dump_pct(strbuf, ", pkts/int", bus->drvr->rx_packets,
bus->intrcount);
- bcm_bprintf(strbuf, "\n");
+ brcmu_bprintf(strbuf, "\n");
- dhd_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
- bus->dhd->rx_packets);
- dhd_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts,
+ brcmf_dump_pct(strbuf, "Rx: glom pct", (100 * bus->rxglompkts),
+ bus->drvr->rx_packets);
+ brcmf_dump_pct(strbuf, ", pkts/glom", bus->rxglompkts,
bus->rxglomframes);
- bcm_bprintf(strbuf, "\n");
+ brcmu_bprintf(strbuf, "\n");
- dhd_dump_pct(strbuf, "Tx: pkts/f2wr", bus->dhd->tx_packets,
+ brcmf_dump_pct(strbuf, "Tx: pkts/f2wr", bus->drvr->tx_packets,
bus->f2txdata);
- dhd_dump_pct(strbuf, ", pkts/f1sd", bus->dhd->tx_packets,
+ brcmf_dump_pct(strbuf, ", pkts/f1sd", bus->drvr->tx_packets,
bus->f1regdata);
- dhd_dump_pct(strbuf, ", pkts/sd", bus->dhd->tx_packets,
+ brcmf_dump_pct(strbuf, ", pkts/sd", bus->drvr->tx_packets,
(bus->f2txdata + bus->f1regdata));
- dhd_dump_pct(strbuf, ", pkts/int", bus->dhd->tx_packets,
+ brcmf_dump_pct(strbuf, ", pkts/int", bus->drvr->tx_packets,
bus->intrcount);
- bcm_bprintf(strbuf, "\n");
+ brcmu_bprintf(strbuf, "\n");
- dhd_dump_pct(strbuf, "Total: pkts/f2rw",
- (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ brcmf_dump_pct(strbuf, "Total: pkts/f2rw",
+ (bus->drvr->tx_packets + bus->drvr->rx_packets),
(bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata));
- dhd_dump_pct(strbuf, ", pkts/f1sd",
- (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ brcmf_dump_pct(strbuf, ", pkts/f1sd",
+ (bus->drvr->tx_packets + bus->drvr->rx_packets),
bus->f1regdata);
- dhd_dump_pct(strbuf, ", pkts/sd",
- (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ brcmf_dump_pct(strbuf, ", pkts/sd",
+ (bus->drvr->tx_packets + bus->drvr->rx_packets),
(bus->f2txdata + bus->f2rxhdrs + bus->f2rxdata +
bus->f1regdata));
- dhd_dump_pct(strbuf, ", pkts/int",
- (bus->dhd->tx_packets + bus->dhd->rx_packets),
+ brcmf_dump_pct(strbuf, ", pkts/int",
+ (bus->drvr->tx_packets + bus->drvr->rx_packets),
bus->intrcount);
- bcm_bprintf(strbuf, "\n\n");
+ brcmu_bprintf(strbuf, "\n\n");
}
#ifdef SDTEST
if (bus->pktgen_count) {
- bcm_bprintf(strbuf, "pktgen config and count:\n");
- bcm_bprintf(strbuf,
+ brcmu_bprintf(strbuf, "pktgen config and count:\n");
+ brcmu_bprintf(strbuf,
"freq %d count %d print %d total %d min %d len %d\n",
bus->pktgen_freq, bus->pktgen_count,
bus->pktgen_print, bus->pktgen_total,
bus->pktgen_minlen, bus->pktgen_maxlen);
- bcm_bprintf(strbuf, "send attempts %d rcvd %d fail %d\n",
+ brcmu_bprintf(strbuf, "send attempts %d rcvd %d fail %d\n",
bus->pktgen_sent, bus->pktgen_rcvd,
bus->pktgen_fail);
}
#endif /* SDTEST */
-#ifdef DHD_DEBUG
- bcm_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
- bus->dpc_sched,
- (bcmsdh_intr_pending(bus->sdh) ? " " : " not "));
- bcm_bprintf(strbuf, "blocksize %d roundup %d\n", bus->blocksize,
+#ifdef BCMDBG
+ brcmu_bprintf(strbuf, "dpc_sched %d host interrupt%spending\n",
+ bus->dpc_sched, " not ");
+ brcmu_bprintf(strbuf, "blocksize %d roundup %d\n", bus->blocksize,
bus->roundup);
-#endif /* DHD_DEBUG */
- bcm_bprintf(strbuf,
+#endif /* BCMDBG */
+ brcmu_bprintf(strbuf,
"clkstate %d activity %d idletime %d idlecount %d sleeping %d\n",
bus->clkstate, bus->activity, bus->idletime, bus->idlecount,
bus->sleeping);
}
-void dhd_bus_clearcounts(dhd_pub_t *dhdp)
+void brcmf_bus_clearcounts(struct brcmf_pub *drvr)
{
- dhd_bus_t *bus = (dhd_bus_t *) dhdp->bus;
+ struct brcmf_bus *bus = (struct brcmf_bus *) drvr->bus;
bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
@@ -1670,11 +2071,11 @@ void dhd_bus_clearcounts(dhd_pub_t *dhdp)
}
#ifdef SDTEST
-static int dhdsdio_pktgen_get(dhd_bus_t *bus, u8 *arg)
+static int brcmf_sdbrcm_pktgen_get(struct brcmf_bus *bus, u8 *arg)
{
- dhd_pktgen_t pktgen;
+ struct brcmf_pktgen pktgen;
- pktgen.version = DHD_PKTGEN_VERSION;
+ pktgen.version = BRCMF_PKTGEN_VERSION;
pktgen.freq = bus->pktgen_freq;
pktgen.count = bus->pktgen_count;
pktgen.print = bus->pktgen_print;
@@ -1692,13 +2093,13 @@ static int dhdsdio_pktgen_get(dhd_bus_t *bus, u8 *arg)
return 0;
}
-static int dhdsdio_pktgen_set(dhd_bus_t *bus, u8 *arg)
+static int brcmf_sdbrcm_pktgen_set(struct brcmf_bus *bus, u8 *arg)
{
- dhd_pktgen_t pktgen;
+ struct brcmf_pktgen pktgen;
uint oldcnt, oldmode;
memcpy(&pktgen, arg, sizeof(pktgen));
- if (pktgen.version != DHD_PKTGEN_VERSION)
+ if (pktgen.version != BRCMF_PKTGEN_VERSION)
return -EINVAL;
oldcnt = bus->pktgen_count;
@@ -1726,7 +2127,7 @@ static int dhdsdio_pktgen_set(dhd_bus_t *bus, u8 *arg)
#endif /* SDTEST */
static int
-dhdsdio_membytes(dhd_bus_t *bus, bool write, u32 address, u8 *data,
+brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
uint size)
{
int bcmerror = 0;
@@ -1741,21 +2142,22 @@ dhdsdio_membytes(dhd_bus_t *bus, bool write, u32 address, u8 *data,
dsize = size;
/* Set the backplane window to include the start address */
- bcmerror = dhdsdio_set_siaddr_window(bus, address);
+ bcmerror = brcmf_sdbrcm_set_siaddr_window(bus, address);
if (bcmerror) {
- DHD_ERROR(("%s: window change failed\n", __func__));
+ BRCMF_ERROR(("%s: window change failed\n", __func__));
goto xfer_done;
}
/* Do the transfer(s) */
while (size) {
- DHD_INFO(("%s: %s %d bytes at offset 0x%08x in window 0x%08x\n",
- __func__, (write ? "write" : "read"), dsize,
- sdaddr, (address & SBSDIO_SBWINDOW_MASK)));
+ BRCMF_INFO(("%s: %s %d bytes at offset 0x%08x in window"
+ " 0x%08x\n", __func__, (write ? "write" : "read"),
+ dsize, sdaddr, (address & SBSDIO_SBWINDOW_MASK)));
bcmerror =
- bcmsdh_rwdata(bus->sdh, write, sdaddr, data, dsize);
+ brcmf_sdcard_rwdata(bus->card, write, sdaddr, data, dsize);
if (bcmerror) {
- DHD_ERROR(("%s: membytes transfer failed\n", __func__));
+ BRCMF_ERROR(("%s: membytes transfer failed\n",
+ __func__));
break;
}
@@ -1764,10 +2166,10 @@ dhdsdio_membytes(dhd_bus_t *bus, bool write, u32 address, u8 *data,
if (size) {
data += dsize;
address += dsize;
- bcmerror = dhdsdio_set_siaddr_window(bus, address);
+ bcmerror = brcmf_sdbrcm_set_siaddr_window(bus, address);
if (bcmerror) {
- DHD_ERROR(("%s: window change failed\n",
- __func__));
+ BRCMF_ERROR(("%s: window change failed\n",
+ __func__));
break;
}
sdaddr = 0;
@@ -1777,43 +2179,45 @@ dhdsdio_membytes(dhd_bus_t *bus, bool write, u32 address, u8 *data,
xfer_done:
/* Return the window to backplane enumeration space for core access */
- if (dhdsdio_set_siaddr_window(bus, bcmsdh_cur_sbwad(bus->sdh))) {
- DHD_ERROR(("%s: FAILED to set window back to 0x%x\n",
- __func__, bcmsdh_cur_sbwad(bus->sdh)));
+ if (brcmf_sdbrcm_set_siaddr_window(bus,
+ brcmf_sdcard_cur_sbwad(bus->card))) {
+ BRCMF_ERROR(("%s: FAILED to set window back to 0x%x\n",
+ __func__, brcmf_sdcard_cur_sbwad(bus->card)));
}
return bcmerror;
}
-#ifdef DHD_DEBUG
-static int dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
+#ifdef BCMDBG
+static int brcmf_sdbrcm_readshared(struct brcmf_bus *bus, struct sdpcm_shared *sh)
{
u32 addr;
int rv;
/* Read last word in memory to determine address of
sdpcm_shared structure */
- rv = dhdsdio_membytes(bus, false, bus->ramsize - 4, (u8 *)&addr, 4);
+ rv = brcmf_sdbrcm_membytes(bus, false, bus->ramsize - 4, (u8 *)&addr,
+ 4);
if (rv < 0)
return rv;
addr = le32_to_cpu(addr);
- DHD_INFO(("sdpcm_shared address 0x%08X\n", addr));
+ BRCMF_INFO(("sdpcm_shared address 0x%08X\n", addr));
/*
* Check if addr is valid.
* NVRAM length at the end of memory should have been overwritten.
*/
if (addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)) {
- DHD_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n",
- __func__, addr));
+ BRCMF_ERROR(("%s: address (0x%08x) of sdpcm_shared invalid\n",
+ __func__, addr));
return -EBADE;
}
- /* Read hndrte_shared structure */
- rv = dhdsdio_membytes(bus, false, addr, (u8 *) sh,
- sizeof(sdpcm_shared_t));
+ /* Read rte_shared structure */
+ rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *) sh,
+ sizeof(struct sdpcm_shared));
if (rv < 0)
return rv;
@@ -1827,28 +2231,28 @@ static int dhdsdio_readshared(dhd_bus_t *bus, sdpcm_shared_t *sh)
sh->msgtrace_addr = le32_to_cpu(sh->msgtrace_addr);
if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
- DHD_ERROR(("%s: sdpcm_shared version %d in dhd "
- "is different than sdpcm_shared version %d in dongle\n",
- __func__, SDPCM_SHARED_VERSION,
- sh->flags & SDPCM_SHARED_VERSION_MASK));
+ BRCMF_ERROR(("%s: sdpcm_shared version %d in brcmf "
+ "is different than sdpcm_shared version %d in dongle\n",
+ __func__, SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK));
return -EBADE;
}
return 0;
}
-static int dhdsdio_checkdied(dhd_bus_t *bus, u8 *data, uint size)
+static int brcmf_sdbrcm_checkdied(struct brcmf_bus *bus, u8 *data, uint size)
{
int bcmerror = 0;
uint msize = 512;
char *mbuffer = NULL;
uint maxstrlen = 256;
char *str = NULL;
- trap_t tr;
- sdpcm_shared_t sdpcm_shared;
- struct bcmstrbuf strbuf;
+ struct brcmf_trap tr;
+ struct sdpcm_shared sdpcm_shared;
+ struct brcmu_strbuf strbuf;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
if (data == NULL) {
/*
@@ -1858,8 +2262,8 @@ static int dhdsdio_checkdied(dhd_bus_t *bus, u8 *data, uint size)
size = msize;
mbuffer = data = kmalloc(msize, GFP_ATOMIC);
if (mbuffer == NULL) {
- DHD_ERROR(("%s: kmalloc(%d) failed\n", __func__,
- msize));
+ BRCMF_ERROR(("%s: kmalloc(%d) failed\n", __func__,
+ msize));
bcmerror = -ENOMEM;
goto done;
}
@@ -1867,18 +2271,18 @@ static int dhdsdio_checkdied(dhd_bus_t *bus, u8 *data, uint size)
str = kmalloc(maxstrlen, GFP_ATOMIC);
if (str == NULL) {
- DHD_ERROR(("%s: kmalloc(%d) failed\n", __func__, maxstrlen));
+ BRCMF_ERROR(("%s: kmalloc(%d) failed\n", __func__, maxstrlen));
bcmerror = -ENOMEM;
goto done;
}
- bcmerror = dhdsdio_readshared(bus, &sdpcm_shared);
+ bcmerror = brcmf_sdbrcm_readshared(bus, &sdpcm_shared);
if (bcmerror < 0)
goto done;
- bcm_binit(&strbuf, data, size);
+ brcmu_binit(&strbuf, data, size);
- bcm_bprintf(&strbuf,
+ brcmu_bprintf(&strbuf,
"msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
sdpcm_shared.msgtrace_addr, sdpcm_shared.console_addr);
@@ -1887,7 +2291,7 @@ static int dhdsdio_checkdied(dhd_bus_t *bus, u8 *data, uint size)
* (Avoids conflict with real asserts for programmatic
* parsing of output.)
*/
- bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+ brcmu_bprintf(&strbuf, "Assrt not built in dongle\n");
}
if ((sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP)) ==
@@ -1896,49 +2300,49 @@ static int dhdsdio_checkdied(dhd_bus_t *bus, u8 *data, uint size)
* (Avoids conflict with real asserts for programmatic
* parsing of output.)
*/
- bcm_bprintf(&strbuf, "No trap%s in dongle",
+ brcmu_bprintf(&strbuf, "No trap%s in dongle",
(sdpcm_shared.flags & SDPCM_SHARED_ASSERT_BUILT)
? "/assrt" : "");
} else {
if (sdpcm_shared.flags & SDPCM_SHARED_ASSERT) {
/* Download assert */
- bcm_bprintf(&strbuf, "Dongle assert");
+ brcmu_bprintf(&strbuf, "Dongle assert");
if (sdpcm_shared.assert_exp_addr != 0) {
str[0] = '\0';
- bcmerror = dhdsdio_membytes(bus, false,
+ bcmerror = brcmf_sdbrcm_membytes(bus, false,
sdpcm_shared.assert_exp_addr,
(u8 *) str, maxstrlen);
if (bcmerror < 0)
goto done;
str[maxstrlen - 1] = '\0';
- bcm_bprintf(&strbuf, " expr \"%s\"", str);
+ brcmu_bprintf(&strbuf, " expr \"%s\"", str);
}
if (sdpcm_shared.assert_file_addr != 0) {
str[0] = '\0';
- bcmerror = dhdsdio_membytes(bus, false,
+ bcmerror = brcmf_sdbrcm_membytes(bus, false,
sdpcm_shared.assert_file_addr,
(u8 *) str, maxstrlen);
if (bcmerror < 0)
goto done;
str[maxstrlen - 1] = '\0';
- bcm_bprintf(&strbuf, " file \"%s\"", str);
+ brcmu_bprintf(&strbuf, " file \"%s\"", str);
}
- bcm_bprintf(&strbuf, " line %d ",
+ brcmu_bprintf(&strbuf, " line %d ",
sdpcm_shared.assert_line);
}
if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
- bcmerror = dhdsdio_membytes(bus, false,
+ bcmerror = brcmf_sdbrcm_membytes(bus, false,
sdpcm_shared.trap_addr, (u8 *)&tr,
- sizeof(trap_t));
+ sizeof(struct brcmf_trap));
if (bcmerror < 0)
goto done;
- bcm_bprintf(&strbuf,
+ brcmu_bprintf(&strbuf,
"Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
"lp 0x%x, rpc 0x%x Trap offset 0x%x, "
"r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n",
@@ -1950,14 +2354,14 @@ static int dhdsdio_checkdied(dhd_bus_t *bus, u8 *data, uint size)
}
if (sdpcm_shared.flags & (SDPCM_SHARED_ASSERT | SDPCM_SHARED_TRAP))
- DHD_ERROR(("%s: %s\n", __func__, strbuf.origbuf));
+ BRCMF_ERROR(("%s: %s\n", __func__, strbuf.origbuf));
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
if (sdpcm_shared.flags & SDPCM_SHARED_TRAP) {
/* Mem dump to a file on device */
- dhdsdio_mem_dump(bus);
+ brcmf_sdbrcm_mem_dump(bus);
}
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
done:
kfree(mbuffer);
@@ -1966,7 +2370,7 @@ done:
return bcmerror;
}
-static int dhdsdio_mem_dump(dhd_bus_t *bus)
+static int brcmf_sdbrcm_mem_dump(struct brcmf_bus *bus)
{
int ret = 0;
int size; /* Full mem size */
@@ -1978,7 +2382,7 @@ static int dhdsdio_mem_dump(dhd_bus_t *bus)
size = bus->ramsize;
buf = kmalloc(size, GFP_ATOMIC);
if (!buf) {
- DHD_ERROR(("%s: Out of memory (%d bytes)\n", __func__, size));
+ BRCMF_ERROR(("%s: Out of memory (%d bytes)\n", __func__, size));
return -1;
}
@@ -1987,9 +2391,10 @@ static int dhdsdio_mem_dump(dhd_bus_t *bus)
databuf = buf;
while (size) {
read_size = min(MEMBLOCK, size);
- ret = dhdsdio_membytes(bus, false, start, databuf, read_size);
+ ret = brcmf_sdbrcm_membytes(bus, false, start, databuf,
+ read_size);
if (ret) {
- DHD_ERROR(("%s: Error membytes %d\n", __func__, ret));
+ BRCMF_ERROR(("%s: Error membytes %d\n", __func__, ret));
kfree(buf);
return -1;
}
@@ -2003,20 +2408,20 @@ static int dhdsdio_mem_dump(dhd_bus_t *bus)
printk(KERN_DEBUG "Done\n");
/* free buf before return !!! */
- if (write_to_file(bus->dhd, buf, bus->ramsize)) {
- DHD_ERROR(("%s: Error writing to files\n", __func__));
+ if (brcmf_write_to_file(bus->drvr, buf, bus->ramsize)) {
+ BRCMF_ERROR(("%s: Error writing to files\n", __func__));
return -1;
}
- /* buf free handled in write_to_file, not here */
+ /* buf free handled in brcmf_write_to_file, not here */
return 0;
}
#define CONSOLE_LINE_MAX 192
-static int dhdsdio_readconsole(dhd_bus_t *bus)
+static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
{
- dhd_console_t *c = &bus->console;
+ struct brcmf_console *c = &bus->console;
u8 line[CONSOLE_LINE_MAX], ch;
u32 n, idx, addr;
int rv;
@@ -2026,8 +2431,8 @@ static int dhdsdio_readconsole(dhd_bus_t *bus)
return 0;
/* Read console log struct */
- addr = bus->console_addr + offsetof(hndrte_cons_t, log);
- rv = dhdsdio_membytes(bus, false, addr, (u8 *)&c->log,
+ addr = bus->console_addr + offsetof(struct rte_console, log);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&c->log,
sizeof(c->log));
if (rv < 0)
return rv;
@@ -2053,7 +2458,7 @@ static int dhdsdio_readconsole(dhd_bus_t *bus)
/* Read the console buffer */
addr = le32_to_cpu(c->log.buf);
- rv = dhdsdio_membytes(bus, false, addr, c->buf, c->bufsize);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr, c->buf, c->bufsize);
if (rv < 0)
return rv;
@@ -2089,16 +2494,16 @@ break2:
return 0;
}
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
-int dhdsdio_downloadvars(dhd_bus_t *bus, void *arg, int len)
+int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
{
int bcmerror = 0;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
/* Basic sanity checks */
- if (bus->dhd->up) {
+ if (bus->drvr->up) {
bcmerror = -EISCONN;
goto err;
}
@@ -2125,7 +2530,7 @@ err:
}
static int
-dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
+brcmf_sdbrcm_doiovar(struct brcmf_bus *bus, const struct brcmu_iovar *vi, u32 actionid,
const char *name, void *params, int plen, void *arg, int len,
int val_size)
{
@@ -2133,11 +2538,11 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
s32 int_val = 0;
bool bool_val = 0;
- DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p "
- "len %d val_size %d\n",
- __func__, actionid, name, params, plen, arg, len, val_size));
+ BRCMF_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p "
+ "len %d val_size %d\n", __func__, actionid, name, params,
+ plen, arg, len, val_size));
- bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid));
+ bcmerror = brcmu_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid));
if (bcmerror != 0)
goto exit;
@@ -2147,10 +2552,10 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
bool_val = (int_val != 0) ? true : false;
/* Some ioctls use the bus */
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdlock(bus);
/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
- if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+ if (bus->drvr->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
actionid == IOV_GVAL(IOV_DEVRESET))) {
bcmerror = -EPERM;
goto exit;
@@ -2159,7 +2564,7 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
/* Handle sleep stuff before any clock mucking */
if (vi->varid == IOV_SLEEP) {
if (IOV_ISSET(actionid)) {
- bcmerror = dhdsdio_bussleep(bus, bool_val);
+ bcmerror = brcmf_sdbrcm_bussleep(bus, bool_val);
} else {
int_val = (s32) bus->sleeping;
memcpy(arg, &int_val, val_size);
@@ -2168,9 +2573,9 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
}
/* Request clock to allow SDIO accesses */
- if (!bus->dhd->dongle_reset) {
+ if (!bus->drvr->dongle_reset) {
BUS_WAKE(bus);
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
}
switch (actionid) {
@@ -2182,15 +2587,13 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
case IOV_SVAL(IOV_INTR):
bus->intr = bool_val;
bus->intdis = false;
- if (bus->dhd->up) {
+ if (bus->drvr->up) {
+ BRCMF_INTR(("%s: %s SDIO interrupts\n", __func__,
+ bus->intr ? "enable" : "disable"));
if (bus->intr) {
- DHD_INTR(("%s: enable SDIO device interrupts\n",
- __func__));
- bcmsdh_intr_enable(bus->sdh);
+ brcmf_sdcard_intr_enable(bus->card);
} else {
- DHD_INTR(("%s: disable SDIO interrupts\n",
- __func__));
- bcmsdh_intr_disable(bus->sdh);
+ brcmf_sdcard_intr_disable(bus->card);
}
}
break;
@@ -2211,7 +2614,7 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
break;
case IOV_SVAL(IOV_IDLETIME):
- if ((int_val < 0) && (int_val != DHD_IDLE_IMMEDIATE))
+ if ((int_val < 0) && (int_val != BRCMF_IDLE_IMMEDIATE))
bcmerror = -EINVAL;
else
bus->idletime = int_val;
@@ -2244,8 +2647,6 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
- ASSERT(plen >= 2 * sizeof(int));
-
address = (u32) int_val;
memcpy(&int_val, (char *)params + sizeof(int_val),
sizeof(int_val));
@@ -2254,25 +2655,25 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
/* Do some validation */
dsize = set ? plen - (2 * sizeof(int)) : len;
if (dsize < size) {
- DHD_ERROR(("%s: error on %s membytes, addr "
- "0x%08x size %d dsize %d\n",
- __func__, (set ? "set" : "get"),
- address, size, dsize));
+ BRCMF_ERROR(("%s: error on %s membytes, addr "
+ "0x%08x size %d dsize %d\n",
+ __func__, (set ? "set" : "get"),
+ address, size, dsize));
bcmerror = -EINVAL;
break;
}
- DHD_INFO(("%s: Request to %s %d bytes at address "
- "0x%08x\n",
- __func__, (set ? "write" : "read"), size, address));
+ BRCMF_INFO(("%s: Request to %s %d bytes at address "
+ "0x%08x\n", __func__,
+ (set ? "write" : "read"), size, address));
/* If we know about SOCRAM, check for a fit */
if ((bus->orig_ramsize) &&
((address > bus->orig_ramsize)
|| (address + size > bus->orig_ramsize))) {
- DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d "
- "bytes at 0x%08x\n",
- __func__, bus->orig_ramsize, size, address));
+ BRCMF_ERROR(("%s: ramsize 0x%08x doesn't have"
+ " %d bytes at 0x%08x\n", __func__,
+ bus->orig_ramsize, size, address));
bcmerror = -EINVAL;
break;
}
@@ -2283,8 +2684,8 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
2 * sizeof(int) : (u8 *) arg;
/* Call to do the transfer */
- bcmerror =
- dhdsdio_membytes(bus, set, address, data, size);
+ bcmerror = brcmf_sdbrcm_membytes(bus, set, address,
+ data, size);
break;
}
@@ -2295,33 +2696,33 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
break;
case IOV_GVAL(IOV_SDIOD_DRIVE):
- int_val = (s32) dhd_sdiod_drive_strength;
+ int_val = (s32) brcmf_sdiod_drive_strength;
memcpy(arg, &int_val, val_size);
break;
case IOV_SVAL(IOV_SDIOD_DRIVE):
- dhd_sdiod_drive_strength = int_val;
- dhdsdio_sdiod_drive_strength_init(bus,
- dhd_sdiod_drive_strength);
+ brcmf_sdiod_drive_strength = int_val;
+ brcmf_sdbrcm_sdiod_drive_strength_init(bus,
+ brcmf_sdiod_drive_strength);
break;
case IOV_SVAL(IOV_DOWNLOAD):
- bcmerror = dhdsdio_download_state(bus, bool_val);
+ bcmerror = brcmf_sdbrcm_download_state(bus, bool_val);
break;
case IOV_SVAL(IOV_VARS):
- bcmerror = dhdsdio_downloadvars(bus, arg, len);
+ bcmerror = brcmf_sdbrcm_downloadvars(bus, arg, len);
break;
case IOV_GVAL(IOV_READAHEAD):
- int_val = (s32) dhd_readahead;
+ int_val = (s32) brcmf_readahead;
memcpy(arg, &int_val, val_size);
break;
case IOV_SVAL(IOV_READAHEAD):
- if (bool_val && !dhd_readahead)
+ if (bool_val && !brcmf_readahead)
bus->nextlen = 0;
- dhd_readahead = bool_val;
+ brcmf_readahead = bool_val;
break;
case IOV_GVAL(IOV_SDRXCHAIN):
@@ -2336,40 +2737,56 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
bus->use_rxchain = bool_val;
break;
case IOV_GVAL(IOV_ALIGNCTL):
- int_val = (s32) dhd_alignctl;
+ int_val = (s32) brcmf_alignctl;
memcpy(arg, &int_val, val_size);
break;
case IOV_SVAL(IOV_ALIGNCTL):
- dhd_alignctl = bool_val;
+ brcmf_alignctl = bool_val;
break;
case IOV_GVAL(IOV_SDALIGN):
- int_val = DHD_SDALIGN;
+ int_val = BRCMF_SDALIGN;
memcpy(arg, &int_val, val_size);
break;
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
case IOV_GVAL(IOV_VARS):
if (bus->varsz < (uint) len)
memcpy(arg, bus->vars, bus->varsz);
else
bcmerror = -EOVERFLOW;
break;
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
+
+#ifdef BCMDBG
+ case IOV_GVAL(IOV_DCONSOLE_POLL):
+ int_val = (s32) brcmf_console_ms;
+ memcpy(arg, &int_val, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DCONSOLE_POLL):
+ brcmf_console_ms = (uint) int_val;
+ break;
+
+ case IOV_SVAL(IOV_CONS):
+ if (len > 0)
+ bcmerror = brcmf_sdbrcm_bus_console_in(bus->drvr,
+ arg, len - 1);
+ break;
-#ifdef DHD_DEBUG
case IOV_GVAL(IOV_SDREG):
{
- sdreg_t *sd_ptr;
+ struct brcmf_sdreg *sd_ptr;
u32 addr, size;
- sd_ptr = (sdreg_t *) params;
+ sd_ptr = (struct brcmf_sdreg *) params;
- addr = (unsigned long)bus->regs + sd_ptr->offset;
+ addr = bus->ci->buscorebase + sd_ptr->offset;
size = sd_ptr->func;
- int_val = (s32) bcmsdh_reg_read(bus->sdh, addr, size);
- if (bcmsdh_regfail(bus->sdh))
+ int_val = (s32) brcmf_sdcard_reg_read(bus->card, addr,
+ size);
+ if (brcmf_sdcard_regfail(bus->card))
bcmerror = -EIO;
memcpy(arg, &int_val, sizeof(s32));
break;
@@ -2377,15 +2794,16 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
case IOV_SVAL(IOV_SDREG):
{
- sdreg_t *sd_ptr;
+ struct brcmf_sdreg *sd_ptr;
u32 addr, size;
- sd_ptr = (sdreg_t *) params;
+ sd_ptr = (struct brcmf_sdreg *) params;
- addr = (unsigned long)bus->regs + sd_ptr->offset;
+ addr = bus->ci->buscorebase + sd_ptr->offset;
size = sd_ptr->func;
- bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
- if (bcmsdh_regfail(bus->sdh))
+ brcmf_sdcard_reg_write(bus->card, addr, size,
+ sd_ptr->value);
+ if (brcmf_sdcard_regfail(bus->card))
bcmerror = -EIO;
break;
}
@@ -2394,15 +2812,16 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
(not SDIO core) */
case IOV_GVAL(IOV_SBREG):
{
- sdreg_t sdreg;
+ struct brcmf_sdreg sdreg;
u32 addr, size;
memcpy(&sdreg, params, sizeof(sdreg));
addr = SI_ENUM_BASE + sdreg.offset;
size = sdreg.func;
- int_val = (s32) bcmsdh_reg_read(bus->sdh, addr, size);
- if (bcmsdh_regfail(bus->sdh))
+ int_val = (s32) brcmf_sdcard_reg_read(bus->card, addr,
+ size);
+ if (brcmf_sdcard_regfail(bus->card))
bcmerror = -EIO;
memcpy(arg, &int_val, sizeof(s32));
break;
@@ -2410,15 +2829,16 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
case IOV_SVAL(IOV_SBREG):
{
- sdreg_t sdreg;
+ struct brcmf_sdreg sdreg;
u32 addr, size;
memcpy(&sdreg, params, sizeof(sdreg));
addr = SI_ENUM_BASE + sdreg.offset;
size = sdreg.func;
- bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
- if (bcmsdh_regfail(bus->sdh))
+ brcmf_sdcard_reg_write(bus->card, addr, size,
+ sdreg.value);
+ if (brcmf_sdcard_regfail(bus->card))
bcmerror = -EIO;
break;
}
@@ -2428,15 +2848,15 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
*(char *)arg = 0;
strcat(arg, "\nFunc 0\n");
- bcmsdh_cis_read(bus->sdh, 0x10,
+ brcmf_sdcard_cis_read(bus->card, 0x10,
(u8 *) arg + strlen(arg),
SBSDIO_CIS_SIZE_LIMIT);
strcat(arg, "\nFunc 1\n");
- bcmsdh_cis_read(bus->sdh, 0x11,
+ brcmf_sdcard_cis_read(bus->card, 0x11,
(u8 *) arg + strlen(arg),
SBSDIO_CIS_SIZE_LIMIT);
strcat(arg, "\nFunc 2\n");
- bcmsdh_cis_read(bus->sdh, 0x12,
+ brcmf_sdcard_cis_read(bus->card, 0x12,
(u8 *) arg + strlen(arg),
SBSDIO_CIS_SIZE_LIMIT);
break;
@@ -2452,32 +2872,32 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
break;
case IOV_GVAL(IOV_TXBOUND):
- int_val = (s32) dhd_txbound;
+ int_val = (s32) brcmf_txbound;
memcpy(arg, &int_val, val_size);
break;
case IOV_SVAL(IOV_TXBOUND):
- dhd_txbound = (uint) int_val;
+ brcmf_txbound = (uint) int_val;
break;
case IOV_GVAL(IOV_RXBOUND):
- int_val = (s32) dhd_rxbound;
+ int_val = (s32) brcmf_rxbound;
memcpy(arg, &int_val, val_size);
break;
case IOV_SVAL(IOV_RXBOUND):
- dhd_rxbound = (uint) int_val;
+ brcmf_rxbound = (uint) int_val;
break;
case IOV_GVAL(IOV_TXMINMAX):
- int_val = (s32) dhd_txminmax;
+ int_val = (s32) brcmf_txminmax;
memcpy(arg, &int_val, val_size);
break;
case IOV_SVAL(IOV_TXMINMAX):
- dhd_txminmax = (uint) int_val;
+ brcmf_txminmax = (uint) int_val;
break;
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
#ifdef SDTEST
case IOV_GVAL(IOV_EXTLOOP):
@@ -2490,31 +2910,44 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
break;
case IOV_GVAL(IOV_PKTGEN):
- bcmerror = dhdsdio_pktgen_get(bus, arg);
+ bcmerror = brcmf_sdbrcm_pktgen_get(bus, arg);
break;
case IOV_SVAL(IOV_PKTGEN):
- bcmerror = dhdsdio_pktgen_set(bus, arg);
+ bcmerror = brcmf_sdbrcm_pktgen_set(bus, arg);
break;
#endif /* SDTEST */
case IOV_SVAL(IOV_DEVRESET):
- DHD_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d "
- "busstate=%d\n",
- __func__, bool_val, bus->dhd->dongle_reset,
- bus->dhd->busstate));
+ BRCMF_TRACE(("%s: Called set IOV_DEVRESET=%d dongle_reset=%d "
+ "busstate=%d\n",
+ __func__, bool_val, bus->drvr->dongle_reset,
+ bus->drvr->busstate));
- dhd_bus_devreset(bus->dhd, (u8) bool_val);
+ brcmf_bus_devreset(bus->drvr, (u8) bool_val);
break;
case IOV_GVAL(IOV_DEVRESET):
- DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __func__));
+ BRCMF_TRACE(("%s: Called get IOV_DEVRESET\n", __func__));
/* Get its status */
- int_val = (bool) bus->dhd->dongle_reset;
+ int_val = (bool) bus->drvr->dongle_reset;
+ memcpy(arg, &int_val, val_size);
+
+ break;
+
+ case IOV_GVAL(IOV_WDTICK):
+ int_val = (s32) brcmf_watchdog_ms;
memcpy(arg, &int_val, val_size);
+ break;
+ case IOV_SVAL(IOV_WDTICK):
+ if (!bus->drvr->up) {
+ bcmerror = -ENOLINK;
+ break;
+ }
+ brcmf_sdbrcm_wd_timer(bus, (uint) int_val);
break;
default:
@@ -2523,29 +2956,29 @@ dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, u32 actionid,
}
exit:
- if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) {
bus->activity = false;
- dhdsdio_clkctl(bus, CLK_NONE, true);
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
}
- dhd_os_sdunlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
if (actionid == IOV_SVAL(IOV_DEVRESET) && bool_val == false)
- dhd_preinit_ioctls((dhd_pub_t *) bus->dhd);
+ brcmf_c_preinit_ioctls(bus->drvr);
return bcmerror;
}
-static int dhdsdio_write_vars(dhd_bus_t *bus)
+static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
{
int bcmerror = 0;
u32 varsize;
u32 varaddr;
u8 *vbuffer;
u32 varsizew;
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
char *nvram_ularray;
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
/* Even if there are no vars are to be written, we still
need to set the ramsize. */
@@ -2561,10 +2994,10 @@ static int dhdsdio_write_vars(dhd_bus_t *bus)
/* Write the vars list */
bcmerror =
- dhdsdio_membytes(bus, true, varaddr, vbuffer, varsize);
-#ifdef DHD_DEBUG
+ brcmf_sdbrcm_membytes(bus, true, varaddr, vbuffer, varsize);
+#ifdef BCMDBG
/* Verify NVRAM bytes */
- DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+ BRCMF_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
nvram_ularray = kmalloc(varsize, GFP_ATOMIC);
if (!nvram_ularray)
return -ENOMEM;
@@ -2574,30 +3007,31 @@ static int dhdsdio_write_vars(dhd_bus_t *bus)
/* Read the vars list to temp buffer for comparison */
bcmerror =
- dhdsdio_membytes(bus, false, varaddr, nvram_ularray,
+ brcmf_sdbrcm_membytes(bus, false, varaddr, nvram_ularray,
varsize);
if (bcmerror) {
- DHD_ERROR(("%s: error %d on reading %d nvram bytes at "
- "0x%08x\n", __func__, bcmerror, varsize, varaddr));
+ BRCMF_ERROR(("%s: error %d on reading %d nvram bytes"
+ " at 0x%08x\n", __func__, bcmerror,
+ varsize, varaddr));
}
/* Compare the org NVRAM with the one read from RAM */
if (memcmp(vbuffer, nvram_ularray, varsize)) {
- DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n",
- __func__));
+ BRCMF_ERROR(("%s: Downloaded NVRAM image is "
+ "corrupted.\n", __func__));
} else
- DHD_ERROR(("%s: Download/Upload/Compare of NVRAM ok.\n",
- __func__));
+ BRCMF_ERROR(("%s: Download/Upload/Compare of"
+ " NVRAM ok.\n", __func__));
kfree(nvram_ularray);
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
kfree(vbuffer);
}
/* adjust to the user specified RAM */
- DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
- bus->orig_ramsize, bus->ramsize));
- DHD_INFO(("Vars are at %d, orig varsize is %d\n", varaddr, varsize));
+ BRCMF_INFO(("Physical memory size: %d, usable memory size: %d\n",
+ bus->orig_ramsize, bus->ramsize));
+ BRCMF_INFO(("Vars are at %d, orig varsize is %d\n", varaddr, varsize));
varsize = ((bus->orig_ramsize - 4) - varaddr);
/*
@@ -2613,17 +3047,17 @@ static int dhdsdio_write_vars(dhd_bus_t *bus)
varsizew = cpu_to_le32(varsizew);
}
- DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize,
- varsizew));
+ BRCMF_INFO(("New varsize is %d, length token=0x%08x\n", varsize,
+ varsizew));
/* Write the length token to the last word */
- bcmerror = dhdsdio_membytes(bus, true, (bus->orig_ramsize - 4),
+ bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->orig_ramsize - 4),
(u8 *)&varsizew, 4);
return bcmerror;
}
-static int dhdsdio_download_state(dhd_bus_t *bus, bool enter)
+static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
{
uint retries;
u32 regdata;
@@ -2635,139 +3069,114 @@ static int dhdsdio_download_state(dhd_bus_t *bus, bool enter)
if (enter) {
bus->alp_only = true;
- dhdsdio_chip_disablecore(bus->sdh, bus->ci->armcorebase);
+ brcmf_sdbrcm_chip_disablecore(bus->card, bus->ci->armcorebase);
- dhdsdio_chip_resetcore(bus->sdh, bus->ci->ramcorebase);
+ brcmf_sdbrcm_chip_resetcore(bus->card, bus->ci->ramcorebase);
/* Clear the top bit of memory */
if (bus->ramsize) {
u32 zeros = 0;
- dhdsdio_membytes(bus, true, bus->ramsize - 4,
+ brcmf_sdbrcm_membytes(bus, true, bus->ramsize - 4,
(u8 *)&zeros, 4);
}
} else {
- regdata = bcmsdh_reg_read(bus->sdh,
+ regdata = brcmf_sdcard_reg_read(bus->card,
CORE_SB(bus->ci->ramcorebase, sbtmstatelow), 4);
regdata &= (SBTML_RESET | SBTML_REJ_MASK |
(SICF_CLOCK_EN << SBTML_SICF_SHIFT));
if ((SICF_CLOCK_EN << SBTML_SICF_SHIFT) != regdata) {
- DHD_ERROR(("%s: SOCRAM core is down after reset?\n",
- __func__));
+ BRCMF_ERROR(("%s: SOCRAM core is down after reset?\n",
+ __func__));
bcmerror = -EBADE;
goto fail;
}
- bcmerror = dhdsdio_write_vars(bus);
+ bcmerror = brcmf_sdbrcm_write_vars(bus);
if (bcmerror) {
- DHD_ERROR(("%s: no vars written to RAM\n", __func__));
+ BRCMF_ERROR(("%s: no vars written to RAM\n", __func__));
bcmerror = 0;
}
- W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+ w_sdreg32(bus, 0xFFFFFFFF,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
- dhdsdio_chip_resetcore(bus->sdh, bus->ci->armcorebase);
+ brcmf_sdbrcm_chip_resetcore(bus->card, bus->ci->armcorebase);
/* Allow HT Clock now that the ARM is running. */
bus->alp_only = false;
- bus->dhd->busstate = DHD_BUS_LOAD;
+ bus->drvr->busstate = BRCMF_BUS_LOAD;
}
fail:
return bcmerror;
}
int
-dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
- void *params, int plen, void *arg, int len, bool set)
+brcmf_sdbrcm_bus_iovar_op(struct brcmf_pub *drvr, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
{
- dhd_bus_t *bus = dhdp->bus;
- const bcm_iovar_t *vi = NULL;
+ struct brcmf_bus *bus = drvr->bus;
+ const struct brcmu_iovar *vi = NULL;
int bcmerror = 0;
int val_size;
u32 actionid;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- ASSERT(name);
- ASSERT(len >= 0);
+ if (name == NULL || len <= 0)
+ return -EINVAL;
- /* Get MUST have return space */
- ASSERT(set || (arg && len));
+ /* Set does not take qualifiers */
+ if (set && (params || plen))
+ return -EINVAL;
- /* Set does NOT take qualifiers */
- ASSERT(!set || (!params && !plen));
+ /* Get must have return space;*/
+ if (!set && !(arg && len))
+ return -EINVAL;
/* Look up var locally; if not found pass to host driver */
- vi = bcm_iovar_lookup(dhdsdio_iovars, name);
+ vi = brcmu_iovar_lookup(brcmf_sdio_iovars, name);
if (vi == NULL) {
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdlock(bus);
BUS_WAKE(bus);
/* Turn on clock in case SD command needs backplane */
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ bcmerror = brcmf_sdcard_iovar_op(bus->card, name, params, plen,
+ arg, len, set);
- bcmerror =
- bcmsdh_iovar_op(bus->sdh, name, params, plen, arg, len,
- set);
-
- /* Check for bus configuration changes of interest */
-
- /* If it was divisor change, read the new one */
- if (set && strcmp(name, "sd_divisor") == 0) {
- if (bcmsdh_iovar_op(bus->sdh, "sd_divisor", NULL, 0,
- &bus->sd_divisor, sizeof(s32),
- false) != 0) {
- bus->sd_divisor = -1;
- DHD_ERROR(("%s: fail on %s get\n", __func__,
- name));
- } else {
- DHD_INFO(("%s: noted %s update, value now %d\n",
- __func__, name, bus->sd_divisor));
- }
- }
- /* If it was a mode change, read the new one */
- if (set && strcmp(name, "sd_mode") == 0) {
- if (bcmsdh_iovar_op(bus->sdh, "sd_mode", NULL, 0,
- &bus->sd_mode, sizeof(s32),
- false) != 0) {
- bus->sd_mode = -1;
- DHD_ERROR(("%s: fail on %s get\n", __func__,
- name));
- } else {
- DHD_INFO(("%s: noted %s update, value now %d\n",
- __func__, name, bus->sd_mode));
- }
- }
/* Similar check for blocksize change */
if (set && strcmp(name, "sd_blocksize") == 0) {
s32 fnum = 2;
- if (bcmsdh_iovar_op
- (bus->sdh, "sd_blocksize", &fnum, sizeof(s32),
+ if (brcmf_sdcard_iovar_op
+ (bus->card, "sd_blocksize", &fnum, sizeof(s32),
&bus->blocksize, sizeof(s32),
false) != 0) {
bus->blocksize = 0;
- DHD_ERROR(("%s: fail on %s get\n", __func__,
- "sd_blocksize"));
+ BRCMF_ERROR(("%s: fail on %s get\n", __func__,
+ "sd_blocksize"));
} else {
- DHD_INFO(("%s: noted %s update, value now %d\n",
- __func__, "sd_blocksize",
- bus->blocksize));
+ BRCMF_INFO(("%s: noted sd_blocksize update,"
+ " value now %d\n", __func__,
+ bus->blocksize));
}
}
bus->roundup = min(max_roundup, bus->blocksize);
- if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ if (bus->idletime == BRCMF_IDLE_IMMEDIATE &&
+ !bus->dpc_sched) {
bus->activity = false;
- dhdsdio_clkctl(bus, CLK_NONE, true);
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
}
- dhd_os_sdunlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
goto exit;
}
- DHD_CTL(("%s: %s %s, len %d plen %d\n", __func__,
- name, (set ? "set" : "get"), len, plen));
+ BRCMF_CTL(("%s: %s %s, len %d plen %d\n", __func__,
+ name, (set ? "set" : "get"), len, plen));
/* set up 'params' pointer in case this is a set command so that
* the convenience int and bool code can be common to set and get
@@ -2786,168 +3195,188 @@ dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
val_size = sizeof(int);
actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
- bcmerror =
- dhdsdio_doiovar(bus, vi, actionid, name, params, plen, arg, len,
- val_size);
+ bcmerror = brcmf_sdbrcm_doiovar(bus, vi, actionid, name, params, plen,
+ arg, len, val_size);
exit:
return bcmerror;
}
-void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus, bool enforce_mutex)
{
u32 local_hostintmask;
u8 saveclk;
uint retries;
int err;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
if (enforce_mutex)
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdlock(bus);
BUS_WAKE(bus);
/* Enable clock for device interrupts */
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ if (bus->watchdog_tsk) {
+ send_sig(SIGTERM, bus->watchdog_tsk, 1);
+ kthread_stop(bus->watchdog_tsk);
+ bus->watchdog_tsk = NULL;
+ }
+
+ if (bus->dpc_tsk) {
+ send_sig(SIGTERM, bus->dpc_tsk, 1);
+ kthread_stop(bus->dpc_tsk);
+ bus->dpc_tsk = NULL;
+ } else
+ tasklet_kill(&bus->tasklet);
/* Disable and clear interrupts at the chip level also */
- W_SDREG(0, &bus->regs->hostintmask, retries);
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
local_hostintmask = bus->hostintmask;
bus->hostintmask = 0;
/* Change our idea of bus state */
- bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
/* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk =
- bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- &err);
+ saveclk = brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
}
if (err) {
- DHD_ERROR(("%s: Failed to force clock for F2: err %d\n",
- __func__, err));
+ BRCMF_ERROR(("%s: Failed to force clock for F2: err %d\n",
+ __func__, err));
}
/* Turn off the bus (F2), free any pending packets */
- DHD_INTR(("%s: disable SDIO interrupts\n", __func__));
- bcmsdh_intr_disable(bus->sdh);
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN,
+ BRCMF_INTR(("%s: disable SDIO interrupts\n", __func__));
+ brcmf_sdcard_intr_disable(bus->card);
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_0, SDIO_CCCR_IOEx,
SDIO_FUNC_ENABLE_1, NULL);
/* Clear any pending interrupts now that F2 is disabled */
- W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+ w_sdreg32(bus, local_hostintmask,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
/* Turn off the backplane clock (only) */
- dhdsdio_clkctl(bus, CLK_SDONLY, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
/* Clear the data packet queues */
- bcm_pktq_flush(&bus->txq, true, NULL, NULL);
+ brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
/* Clear any held glomming stuff */
if (bus->glomd)
- bcm_pkt_buf_free_skb(bus->glomd);
+ brcmu_pkt_buf_free_skb(bus->glomd);
if (bus->glom)
- bcm_pkt_buf_free_skb(bus->glom);
+ brcmu_pkt_buf_free_skb(bus->glom);
bus->glom = bus->glomd = NULL;
/* Clear rx control and wake any waiters */
bus->rxlen = 0;
- dhd_os_ioctl_resp_wake(bus->dhd);
+ brcmf_os_ioctl_resp_wake(bus->drvr);
/* Reset some F2 state stuff */
bus->rxskip = false;
bus->tx_seq = bus->rx_seq = 0;
if (enforce_mutex)
- dhd_os_sdunlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
}
-int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr, bool enforce_mutex)
{
- dhd_bus_t *bus = dhdp->bus;
- dhd_timeout_t tmo;
+ struct brcmf_bus *bus = drvr->bus;
+ struct brcmf_timeout tmo;
uint retries = 0;
u8 ready, enable;
int err, ret = 0;
u8 saveclk;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
+
+ /* try to download image and nvram to the dongle */
+ if (drvr->busstate == BRCMF_BUS_DOWN) {
+ if (!(brcmf_sdbrcm_download_firmware(bus, bus->card)))
+ return -1;
+ }
- ASSERT(bus->dhd);
- if (!bus->dhd)
+ if (!bus->drvr)
return 0;
+ /* Start the watchdog timer */
+ bus->drvr->tickcnt = 0;
+ brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
+
if (enforce_mutex)
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdlock(bus);
/* Make sure backplane clock is on, needed to generate F2 interrupt */
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
if (bus->clkstate != CLK_AVAIL)
goto exit;
/* Force clocks on backplane to be sure F2 interrupt propagates */
saveclk =
- bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- &err);
+ brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
}
if (err) {
- DHD_ERROR(("%s: Failed to force clock for F2: err %d\n",
- __func__, err));
+ BRCMF_ERROR(("%s: Failed to force clock for F2: err %d\n",
+ __func__, err));
goto exit;
}
/* Enable function 2 (frame transfers) */
- W_SDREG((SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT),
- &bus->regs->tosbmailboxdata, retries);
+ w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
+ offsetof(struct sdpcmd_regs, tosbmailboxdata), &retries);
enable = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable, NULL);
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_0, SDIO_CCCR_IOEx, enable,
+ NULL);
/* Give the dongle some time to do its thing and set IOR2 */
- dhd_timeout_start(&tmo, DHD_WAIT_F2RDY * 1000);
+ brcmf_timeout_start(&tmo, BRCMF_WAIT_F2RDY * 1000);
ready = 0;
- while (ready != enable && !dhd_timeout_expired(&tmo))
- ready =
- bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY,
- NULL);
+ while (ready != enable && !brcmf_timeout_expired(&tmo))
+ ready = brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_0,
+ SDIO_CCCR_IORx, NULL);
- DHD_INFO(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
- __func__, enable, ready, tmo.elapsed));
+ BRCMF_INFO(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
+ __func__, enable, ready, tmo.elapsed));
/* If F2 successfully enabled, set core and enable interrupts */
if (ready == enable) {
/* Set up the interrupt mask and enable interrupts */
bus->hostintmask = HOSTINTMASK;
- W_SDREG(bus->hostintmask,
- (unsigned int *)CORE_BUS_REG(bus->ci->buscorebase,
- hostintmask), retries);
+ w_sdreg32(bus, bus->hostintmask,
+ offsetof(struct sdpcmd_regs, hostintmask), &retries);
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK,
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_WATERMARK,
(u8) watermark, &err);
/* Set bus state according to enable result */
- dhdp->busstate = DHD_BUS_DATA;
-
- /* bcmsdh_intr_unmask(bus->sdh); */
+ drvr->busstate = BRCMF_BUS_DATA;
bus->intdis = false;
if (bus->intr) {
- DHD_INTR(("%s: enable SDIO device interrupts\n",
- __func__));
- bcmsdh_intr_enable(bus->sdh);
+ BRCMF_INTR(("%s: enable SDIO device interrupts\n",
+ __func__));
+ brcmf_sdcard_intr_enable(bus->card);
} else {
- DHD_INTR(("%s: disable SDIO interrupts\n", __func__));
- bcmsdh_intr_disable(bus->sdh);
+ BRCMF_INTR(("%s: disable SDIO interrupts\n", __func__));
+ brcmf_sdcard_intr_disable(bus->card);
}
}
@@ -2955,75 +3384,90 @@ int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
else {
/* Disable F2 again */
enable = SDIO_FUNC_ENABLE_1;
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, enable,
- NULL);
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ enable, NULL);
}
/* Restore previous clock setting */
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
saveclk, &err);
+#if defined(OOB_INTR_ONLY)
+ /* Host registration for OOB interrupt */
+ if (brcmf_sdio_register_oob_intr(bus->dhd)) {
+ brcmf_sdbrcm_wd_timer(bus, 0);
+ BRCMF_ERROR(("%s Host failed to resgister for OOB\n",
+ __func__));
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ /* Enable oob at firmware */
+ brcmf_sdbrcm_enable_oob_intr(bus, true);
+#endif /* defined(OOB_INTR_ONLY) */
+
/* If we didn't come up, turn off backplane clock */
- if (dhdp->busstate != DHD_BUS_DATA)
- dhdsdio_clkctl(bus, CLK_NONE, false);
+ if (drvr->busstate != BRCMF_BUS_DATA)
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
exit:
if (enforce_mutex)
- dhd_os_sdunlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
return ret;
}
-static void dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
+static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
{
- bcmsdh_info_t *sdh = bus->sdh;
- sdpcmd_regs_t *regs = bus->regs;
+ struct brcmf_sdio_card *card = bus->card;
uint retries = 0;
u16 lastrbc;
u8 hi, lo;
int err;
- DHD_ERROR(("%s: %sterminate frame%s\n", __func__,
- (abort ? "abort command, " : ""),
- (rtx ? ", send NAK" : "")));
+ BRCMF_ERROR(("%s: %sterminate frame%s\n", __func__,
+ (abort ? "abort command, " : ""),
+ (rtx ? ", send NAK" : "")));
if (abort)
- bcmsdh_abort(sdh, SDIO_FUNC_2);
+ brcmf_sdcard_abort(card, SDIO_FUNC_2);
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM,
- &err);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_RF_TERM, &err);
bus->f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
- hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCHI,
- NULL);
- lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_RFRAMEBCLO,
- NULL);
+ hi = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_RFRAMEBCHI, NULL);
+ lo = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_RFRAMEBCLO, NULL);
bus->f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
- DHD_ERROR(("%s: count growing: last 0x%04x now "
- "0x%04x\n",
- __func__, lastrbc, ((hi << 8) + lo)));
+ BRCMF_ERROR(("%s: count growing: last 0x%04x now "
+ "0x%04x\n",
+ __func__, lastrbc, ((hi << 8) + lo)));
}
lastrbc = (hi << 8) + lo;
}
if (!retries) {
- DHD_ERROR(("%s: count never zeroed: last 0x%04x\n",
- __func__, lastrbc));
+ BRCMF_ERROR(("%s: count never zeroed: last 0x%04x\n",
+ __func__, lastrbc));
} else {
- DHD_INFO(("%s: flush took %d iterations\n", __func__,
- (0xffff - retries)));
+ BRCMF_INFO(("%s: flush took %d iterations\n", __func__,
+ (0xffff - retries)));
}
if (rtx) {
bus->rxrtx++;
- W_SDREG(SMB_NAK, &regs->tosbmailbox, retries);
+ w_sdreg32(bus, SMB_NAK,
+ offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
+
bus->f1regdata++;
if (retries <= retry_limit)
bus->rxskip = true;
@@ -3033,35 +3477,33 @@ static void dhdsdio_rxfail(dhd_bus_t *bus, bool abort, bool rtx)
bus->nextlen = 0;
/* If we can't reach the device, signal failure */
- if (err || bcmsdh_regfail(sdh))
- bus->dhd->busstate = DHD_BUS_DOWN;
+ if (err || brcmf_sdcard_regfail(card))
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
}
static void
-dhdsdio_read_control(dhd_bus_t *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
{
- bcmsdh_info_t *sdh = bus->sdh;
+ struct brcmf_sdio_card *card = bus->card;
uint rdlen, pad;
int sdret;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
/* Control data already received in aligned rxctl */
if ((bus->bus == SPI_BUS) && (!bus->usebufpool))
goto gotpkt;
- ASSERT(bus->rxbuf);
/* Set rxctl for frame (w/optional alignment) */
bus->rxctl = bus->rxbuf;
- if (dhd_alignctl) {
+ if (brcmf_alignctl) {
bus->rxctl += firstread;
- pad = ((unsigned long)bus->rxctl % DHD_SDALIGN);
+ pad = ((unsigned long)bus->rxctl % BRCMF_SDALIGN);
if (pad)
- bus->rxctl += (DHD_SDALIGN - pad);
+ bus->rxctl += (BRCMF_SDALIGN - pad);
bus->rxctl -= firstread;
}
- ASSERT(bus->rxctl >= bus->rxbuf);
/* Copy the already-read portion over */
memcpy(bus->rxctl, hdr, firstread);
@@ -3079,10 +3521,10 @@ dhdsdio_read_control(dhd_bus_t *bus, u8 *hdr, uint len, uint doff)
if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
pad = bus->blocksize - (rdlen % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
- ((len + pad) < bus->dhd->maxctl))
+ ((len + pad) < bus->drvr->maxctl))
rdlen += pad;
- } else if (rdlen % DHD_SDALIGN) {
- rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ } else if (rdlen % BRCMF_SDALIGN) {
+ rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
}
/* Satisfy length-alignment requirements */
@@ -3090,44 +3532,44 @@ dhdsdio_read_control(dhd_bus_t *bus, u8 *hdr, uint len, uint doff)
rdlen = roundup(rdlen, ALIGNMENT);
/* Drop if the read is too big or it exceeds our maximum */
- if ((rdlen + firstread) > bus->dhd->maxctl) {
- DHD_ERROR(("%s: %d-byte control read exceeds %d-byte buffer\n",
- __func__, rdlen, bus->dhd->maxctl));
- bus->dhd->rx_errors++;
- dhdsdio_rxfail(bus, false, false);
+ if ((rdlen + firstread) > bus->drvr->maxctl) {
+ BRCMF_ERROR(("%s: %d-byte control read exceeds %d-byte"
+ " buffer\n", __func__, rdlen, bus->drvr->maxctl));
+ bus->drvr->rx_errors++;
+ brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
}
- if ((len - doff) > bus->dhd->maxctl) {
- DHD_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds "
- "%d-byte limit\n",
- __func__, len, (len - doff), bus->dhd->maxctl));
- bus->dhd->rx_errors++;
+ if ((len - doff) > bus->drvr->maxctl) {
+ BRCMF_ERROR(("%s: %d-byte ctl frame (%d-byte ctl data) exceeds "
+ "%d-byte limit\n",
+ __func__, len, (len - doff), bus->drvr->maxctl));
+ bus->drvr->rx_errors++;
bus->rx_toolong++;
- dhdsdio_rxfail(bus, false, false);
+ brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
}
/* Read remainder of frame body into the rxctl buffer */
- sdret = bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2,
+ sdret = brcmf_sdcard_recv_buf(card, brcmf_sdcard_cur_sbwad(card),
+ SDIO_FUNC_2,
F2SYNC, (bus->rxctl + firstread), rdlen,
NULL, NULL, NULL);
bus->f2rxdata++;
- ASSERT(sdret != -BCME_PENDING);
/* Control frame failures need retransmission */
if (sdret < 0) {
- DHD_ERROR(("%s: read %d control bytes failed: %d\n",
- __func__, rdlen, sdret));
- bus->rxc_errors++; /* dhd.rx_ctlerrs is higher level */
- dhdsdio_rxfail(bus, true, true);
+ BRCMF_ERROR(("%s: read %d control bytes failed: %d\n",
+ __func__, rdlen, sdret));
+ bus->rxc_errors++;
+ brcmf_sdbrcm_rxfail(bus, true, true);
goto done;
}
gotpkt:
-#ifdef DHD_DEBUG
- if (DHD_BYTES_ON() && DHD_CTL_ON()) {
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_CTL_ON()) {
printk(KERN_DEBUG "RxCtrl:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, bus->rxctl, len);
}
@@ -3139,10 +3581,10 @@ gotpkt:
done:
/* Awake any waiters */
- dhd_os_ioctl_resp_wake(bus->dhd);
+ brcmf_os_ioctl_resp_wake(bus->drvr);
}
-static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
+static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
{
u16 dlen, totlen;
u8 *dptr, num = 0;
@@ -3160,19 +3602,18 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
/* If packets, issue read(s) and send up packet chain */
/* Return sequence numbers consumed? */
- DHD_TRACE(("dhdsdio_rxglom: start: glomd %p glom %p\n", bus->glomd,
- bus->glom));
+ BRCMF_TRACE(("brcmf_sdbrcm_rxglom: start: glomd %p glom %p\n",
+ bus->glomd, bus->glom));
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
- dhd_os_sdlock_rxq(bus->dhd);
-
pfirst = plast = pnext = NULL;
dlen = (u16) (bus->glomd->len);
dptr = bus->glomd->data;
if (!dlen || (dlen & 1)) {
- DHD_ERROR(("%s: bad glomd len(%d), ignore descriptor\n",
- __func__, dlen));
+ BRCMF_ERROR(("%s: bad glomd len(%d),"
+ " ignore descriptor\n",
+ __func__, dlen));
dlen = 0;
}
@@ -3183,14 +3624,15 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
dptr += sizeof(u16);
if ((sublen < SDPCM_HDRLEN) ||
((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
- DHD_ERROR(("%s: descriptor len %d bad: %d\n",
- __func__, num, sublen));
+ BRCMF_ERROR(("%s: descriptor len %d bad: %d\n",
+ __func__, num, sublen));
pnext = NULL;
break;
}
- if (sublen % DHD_SDALIGN) {
- DHD_ERROR(("%s: sublen %d not multiple of %d\n",
- __func__, sublen, DHD_SDALIGN));
+ if (sublen % BRCMF_SDALIGN) {
+ BRCMF_ERROR(("%s: sublen %d not multiple of"
+ " %d\n", __func__, sublen,
+ BRCMF_SDALIGN));
usechain = false;
}
totlen += sublen;
@@ -3204,123 +3646,120 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
}
/* Allocate/chain packet for next subframe */
- pnext = bcm_pkt_buf_get_skb(sublen + DHD_SDALIGN);
+ pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
if (pnext == NULL) {
- DHD_ERROR(("%s: bcm_pkt_buf_get_skb failed, "
- "num %d len %d\n", __func__,
- num, sublen));
+ BRCMF_ERROR(("%s: bcm_pkt_buf_get_skb failed, "
+ "num %d len %d\n", __func__,
+ num, sublen));
break;
}
- ASSERT(!(pnext->prev));
if (!pfirst) {
- ASSERT(!plast);
pfirst = plast = pnext;
} else {
- ASSERT(plast);
plast->next = pnext;
plast = pnext;
}
/* Adhere to start alignment requirements */
- PKTALIGN(pnext, sublen, DHD_SDALIGN);
+ PKTALIGN(pnext, sublen, BRCMF_SDALIGN);
}
/* If all allocations succeeded, save packet chain
in bus structure */
if (pnext) {
- DHD_GLOM(("%s: allocated %d-byte packet chain for %d "
- "subframes\n", __func__, totlen, num));
- if (DHD_GLOM_ON() && bus->nextlen) {
+ BRCMF_GLOM(("%s: allocated %d-byte packet chain for %d "
+ "subframes\n", __func__, totlen, num));
+ if (BRCMF_GLOM_ON() && bus->nextlen) {
if (totlen != bus->nextlen) {
- DHD_GLOM(("%s: glomdesc mismatch: nextlen %d glomdesc %d " "rxseq %d\n",
- __func__, bus->nextlen,
- totlen, rxseq));
+ BRCMF_GLOM(("%s: glomdesc mismatch: "
+ "nextlen %d glomdesc %d "
+ "rxseq %d\n", __func__,
+ bus->nextlen,
+ totlen, rxseq));
}
}
bus->glom = pfirst;
pfirst = pnext = NULL;
} else {
if (pfirst)
- bcm_pkt_buf_free_skb(pfirst);
+ brcmu_pkt_buf_free_skb(pfirst);
bus->glom = NULL;
num = 0;
}
/* Done with descriptor packet */
- bcm_pkt_buf_free_skb(bus->glomd);
+ brcmu_pkt_buf_free_skb(bus->glomd);
bus->glomd = NULL;
bus->nextlen = 0;
-
- dhd_os_sdunlock_rxq(bus->dhd);
}
/* Ok -- either we just generated a packet chain,
or had one from before */
if (bus->glom) {
- if (DHD_GLOM_ON()) {
- DHD_GLOM(("%s: try superframe read, packet chain:\n",
- __func__));
+ if (BRCMF_GLOM_ON()) {
+ BRCMF_GLOM(("%s: try superframe read, packet chain:\n",
+ __func__));
for (pnext = bus->glom; pnext; pnext = pnext->next) {
- DHD_GLOM((" %p: %p len 0x%04x (%d)\n",
- pnext, (u8 *) (pnext->data),
- pnext->len, pnext->len));
+ BRCMF_GLOM((" %p: %p len 0x%04x (%d)\n",
+ pnext, (u8 *) (pnext->data),
+ pnext->len, pnext->len));
}
}
pfirst = bus->glom;
- dlen = (u16) bcm_pkttotlen(pfirst);
+ dlen = (u16) brcmu_pkttotlen(pfirst);
/* Do an SDIO read for the superframe. Configurable iovar to
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
*/
if (usechain) {
- errcode = bcmsdh_recv_buf(bus,
- bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ errcode = brcmf_sdcard_recv_buf(bus->card,
+ brcmf_sdcard_cur_sbwad(bus->card),
+ SDIO_FUNC_2,
F2SYNC, (u8 *) pfirst->data, dlen,
pfirst, NULL, NULL);
} else if (bus->dataptr) {
- errcode = bcmsdh_recv_buf(bus,
- bcmsdh_cur_sbwad(bus->sdh), SDIO_FUNC_2,
+ errcode = brcmf_sdcard_recv_buf(bus->card,
+ brcmf_sdcard_cur_sbwad(bus->card),
+ SDIO_FUNC_2,
F2SYNC, bus->dataptr, dlen,
NULL, NULL, NULL);
- sublen = (u16) bcm_pktfrombuf(pfirst, 0, dlen,
+ sublen = (u16) brcmu_pktfrombuf(pfirst, 0, dlen,
bus->dataptr);
if (sublen != dlen) {
- DHD_ERROR(("%s: FAILED TO COPY, dlen %d sublen %d\n",
- __func__, dlen, sublen));
+ BRCMF_ERROR(("%s: FAILED TO COPY, dlen %d "
+ "sublen %d\n",
+ __func__, dlen, sublen));
errcode = -1;
}
pnext = NULL;
} else {
- DHD_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
- dlen));
+ BRCMF_ERROR(("COULDN'T ALLOC %d-BYTE GLOM, "
+ "FORCE FAILURE\n", dlen));
errcode = -1;
}
bus->f2rxdata++;
- ASSERT(errcode != -BCME_PENDING);
/* On failure, kill the superframe, allow a couple retries */
if (errcode < 0) {
- DHD_ERROR(("%s: glom read of %d bytes failed: %d\n",
- __func__, dlen, errcode));
- bus->dhd->rx_errors++;
+ BRCMF_ERROR(("%s: glom read of %d bytes failed: %d\n",
+ __func__, dlen, errcode));
+ bus->drvr->rx_errors++;
if (bus->glomerr++ < 3) {
- dhdsdio_rxfail(bus, true, true);
+ brcmf_sdbrcm_rxfail(bus, true, true);
} else {
bus->glomerr = 0;
- dhdsdio_rxfail(bus, true, false);
- dhd_os_sdlock_rxq(bus->dhd);
- bcm_pkt_buf_free_skb(bus->glom);
- dhd_os_sdunlock_rxq(bus->dhd);
+ brcmf_sdbrcm_rxfail(bus, true, false);
+ brcmu_pkt_buf_free_skb(bus->glom);
bus->rxglomfail++;
bus->glom = NULL;
}
return 0;
}
-#ifdef DHD_DEBUG
- if (DHD_GLOM_ON()) {
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
printk(KERN_DEBUG "SUPERFRAME:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
pfirst->data, min_t(int, pfirst->len, 48));
@@ -3336,8 +3775,8 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
bus->nextlen = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
- DHD_INFO(("%s: nextlen too large (%d) seq %d\n",
- __func__, bus->nextlen, seq));
+ BRCMF_INFO(("%s: nextlen too large (%d) seq %d\n",
+ __func__, bus->nextlen, seq));
bus->nextlen = 0;
}
doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -3345,47 +3784,48 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
errcode = 0;
if ((u16)~(sublen ^ check)) {
- DHD_ERROR(("%s (superframe): HW hdr error: len/check "
- "0x%04x/0x%04x\n", __func__, sublen, check));
+ BRCMF_ERROR(("%s (superframe): HW hdr error: len/check "
+ "0x%04x/0x%04x\n", __func__, sublen,
+ check));
errcode = -1;
} else if (roundup(sublen, bus->blocksize) != dlen) {
- DHD_ERROR(("%s (superframe): len 0x%04x, rounded "
- "0x%04x, expect 0x%04x\n",
- __func__, sublen,
- roundup(sublen, bus->blocksize), dlen));
+ BRCMF_ERROR(("%s (superframe): len 0x%04x, rounded "
+ "0x%04x, expect 0x%04x\n",
+ __func__, sublen,
+ roundup(sublen, bus->blocksize), dlen));
errcode = -1;
} else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) !=
SDPCM_GLOM_CHANNEL) {
- DHD_ERROR(("%s (superframe): bad channel %d\n",
+ BRCMF_ERROR(("%s (superframe): bad channel %d\n",
__func__,
SDPCM_PACKET_CHANNEL(&dptr
[SDPCM_FRAMETAG_LEN])));
errcode = -1;
} else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
- DHD_ERROR(("%s (superframe): got second descriptor?\n",
- __func__));
+ BRCMF_ERROR(("%s (superframe): got 2nd descriptor?\n",
+ __func__));
errcode = -1;
} else if ((doff < SDPCM_HDRLEN) ||
(doff > (pfirst->len - SDPCM_HDRLEN))) {
- DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d "
- "pkt %d min %d\n",
- __func__, doff, sublen,
- pfirst->len, SDPCM_HDRLEN));
+ BRCMF_ERROR(("%s (superframe): Bad data offset %d: "
+ "HW %d pkt %d min %d\n",
+ __func__, doff, sublen,
+ pfirst->len, SDPCM_HDRLEN));
errcode = -1;
}
/* Check sequence number of superframe SW header */
if (rxseq != seq) {
- DHD_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
- __func__, seq, rxseq));
+ BRCMF_INFO(("%s: (superframe) rx_seq %d, expected %d\n",
+ __func__, seq, rxseq));
bus->rx_badseq++;
rxseq = seq;
}
/* Check window for sanity */
if ((u8) (txmax - bus->tx_seq) > 0x40) {
- DHD_ERROR(("%s: unlikely tx max %d with tx_seq %d\n",
- __func__, txmax, bus->tx_seq));
+ BRCMF_ERROR(("%s: unlikely tx max %d with tx_seq %d\n",
+ __func__, txmax, bus->tx_seq));
txmax = bus->tx_seq + 2;
}
bus->tx_max = txmax;
@@ -3403,8 +3843,8 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
check = get_unaligned_le16(dptr + sizeof(u16));
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
-#ifdef DHD_DEBUG
- if (DHD_GLOM_ON()) {
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
printk(KERN_DEBUG "subframe:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
dptr, 32);
@@ -3412,24 +3852,25 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
#endif
if ((u16)~(sublen ^ check)) {
- DHD_ERROR(("%s (subframe %d): HW hdr error: "
- "len/check 0x%04x/0x%04x\n",
- __func__, num, sublen, check));
+ BRCMF_ERROR(("%s (subframe %d): HW hdr error: "
+ "len/check 0x%04x/0x%04x\n",
+ __func__, num, sublen, check));
errcode = -1;
} else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
- DHD_ERROR(("%s (subframe %d): length mismatch: "
- "len 0x%04x, expect 0x%04x\n",
- __func__, num, sublen, dlen));
+ BRCMF_ERROR(("%s (subframe %d): length mismatch"
+ ": len 0x%04x, expect 0x%04x\n",
+ __func__, num, sublen, dlen));
errcode = -1;
} else if ((chan != SDPCM_DATA_CHANNEL) &&
(chan != SDPCM_EVENT_CHANNEL)) {
- DHD_ERROR(("%s (subframe %d): bad channel %d\n",
- __func__, num, chan));
+ BRCMF_ERROR(("%s (subframe %d): bad channel"
+ " %d\n", __func__, num, chan));
errcode = -1;
} else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
- DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
- __func__, num, doff, sublen,
- SDPCM_HDRLEN));
+ BRCMF_ERROR(("%s (subframe %d): Bad data offset"
+ " %d: HW %d min %d\n",
+ __func__, num, doff, sublen,
+ SDPCM_HDRLEN));
errcode = -1;
}
}
@@ -3440,13 +3881,11 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
if (bus->glomerr++ < 3) {
/* Restore superframe header space */
skb_push(pfirst, sfdoff);
- dhdsdio_rxfail(bus, true, true);
+ brcmf_sdbrcm_rxfail(bus, true, true);
} else {
bus->glomerr = 0;
- dhdsdio_rxfail(bus, true, false);
- dhd_os_sdlock_rxq(bus->dhd);
- bcm_pkt_buf_free_skb(bus->glom);
- dhd_os_sdunlock_rxq(bus->dhd);
+ brcmf_sdbrcm_rxfail(bus, true, false);
+ brcmu_pkt_buf_free_skb(bus->glom);
bus->rxglomfail++;
bus->glom = NULL;
}
@@ -3459,7 +3898,6 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
bus->glom = NULL;
plast = NULL;
- dhd_os_sdlock_rxq(bus->dhd);
for (num = 0; pfirst; rxseq++, pfirst = pnext) {
pnext = pfirst->next;
pfirst->next = NULL;
@@ -3470,22 +3908,22 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
- DHD_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d "
- "chan %d seq %d\n",
- __func__, num, pfirst, pfirst->data,
- pfirst->len, sublen, chan, seq));
+ BRCMF_GLOM(("%s: Get subframe %d, %p(%p/%d), sublen %d "
+ "chan %d seq %d\n",
+ __func__, num, pfirst, pfirst->data,
+ pfirst->len, sublen, chan, seq));
- ASSERT((chan == SDPCM_DATA_CHANNEL)
- || (chan == SDPCM_EVENT_CHANNEL));
+ /* precondition: chan == SDPCM_DATA_CHANNEL ||
+ chan == SDPCM_EVENT_CHANNEL */
if (rxseq != seq) {
- DHD_GLOM(("%s: rx_seq %d, expected %d\n",
- __func__, seq, rxseq));
+ BRCMF_GLOM(("%s: rx_seq %d, expected %d\n",
+ __func__, seq, rxseq));
bus->rx_badseq++;
rxseq = seq;
}
-#ifdef DHD_DEBUG
- if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
printk(KERN_DEBUG "Rx Subframe Data:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
dptr, dlen);
@@ -3496,24 +3934,22 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
skb_pull(pfirst, doff);
if (pfirst->len == 0) {
- bcm_pkt_buf_free_skb(pfirst);
+ brcmu_pkt_buf_free_skb(pfirst);
if (plast) {
plast->next = pnext;
} else {
- ASSERT(save_pfirst == pfirst);
save_pfirst = pnext;
}
continue;
- } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pfirst) !=
- 0) {
- DHD_ERROR(("%s: rx protocol error\n",
- __func__));
- bus->dhd->rx_errors++;
- bcm_pkt_buf_free_skb(pfirst);
+ } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pfirst)
+ != 0) {
+ BRCMF_ERROR(("%s: rx protocol error\n",
+ __func__));
+ bus->drvr->rx_errors++;
+ brcmu_pkt_buf_free_skb(pfirst);
if (plast) {
plast->next = pnext;
} else {
- ASSERT(save_pfirst == pfirst);
save_pfirst = pnext;
}
continue;
@@ -3525,24 +3961,23 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
plast = pfirst;
num++;
-#ifdef DHD_DEBUG
- if (DHD_GLOM_ON()) {
- DHD_GLOM(("%s subframe %d to stack, %p(%p/%d) "
- "nxt/lnk %p/%p\n",
- __func__, num, pfirst, pfirst->data,
- pfirst->len, pfirst->next,
- pfirst->prev));
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
+ BRCMF_GLOM(("%s subframe %d to stack, %p"
+ "(%p/%d) nxt/lnk %p/%p\n",
+ __func__, num, pfirst, pfirst->data,
+ pfirst->len, pfirst->next,
+ pfirst->prev));
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
pfirst->data,
min_t(int, pfirst->len, 32));
}
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
}
- dhd_os_sdunlock_rxq(bus->dhd);
if (num) {
- dhd_os_sdunlock(bus->dhd);
- dhd_rx_frame(bus->dhd, ifidx, save_pfirst, num);
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
+ brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
+ brcmf_sdbrcm_sdlock(bus);
}
bus->rxglomframes++;
@@ -3552,9 +3987,10 @@ static u8 dhdsdio_rxglom(dhd_bus_t *bus, u8 rxseq)
}
/* Return true if there may be more frames to read */
-static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
+static uint
+brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
{
- bcmsdh_info_t *sdh = bus->sdh;
+ struct brcmf_sdio_card *card = bus->card;
u16 len, check; /* Extracted hardware header fields */
u8 chan, seq, doff; /* Extracted software header fields */
@@ -3565,7 +4001,7 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
u16 rdlen; /* Total number of bytes to read */
u8 rxseq; /* Next sequence number to expect */
uint rxleft = 0; /* Remaining number of frames allowed */
- int sdret; /* Return code from bcmsdh calls */
+ int sdret; /* Return code from calls */
u8 txmax; /* Maximum tx sequence offered */
bool len_consistent; /* Result of comparing readahead len and
len from hw-hdr */
@@ -3573,17 +4009,15 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
int ifidx = 0;
uint rxcount = 0; /* Total frames read */
-#if defined(DHD_DEBUG) || defined(SDTEST)
+#if defined(BCMDBG) || defined(SDTEST)
bool sdtest = false; /* To limit message spew from test mode */
#endif
- DHD_TRACE(("%s: Enter\n", __func__));
-
- ASSERT(maxframes);
+ BRCMF_TRACE(("%s: Enter\n", __func__));
#ifdef SDTEST
/* Allow pktgen to override maxframes */
- if (bus->pktgen_count && (bus->pktgen_mode == DHD_PKTGEN_RECV)) {
+ if (bus->pktgen_count && (bus->pktgen_mode == BRCMF_PKTGEN_RECV)) {
maxframes = bus->pktgen_count;
sdtest = true;
}
@@ -3593,23 +4027,23 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
*finished = false;
for (rxseq = bus->rx_seq, rxleft = maxframes;
- !bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
+ !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
rxseq++, rxleft--) {
/* Handle glomming separately */
if (bus->glom || bus->glomd) {
u8 cnt;
- DHD_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
- __func__, bus->glomd, bus->glom));
- cnt = dhdsdio_rxglom(bus, rxseq);
- DHD_GLOM(("%s: rxglom returned %d\n", __func__, cnt));
+ BRCMF_GLOM(("%s: calling rxglom: glomd %p, glom %p\n",
+ __func__, bus->glomd, bus->glom));
+ cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
+ BRCMF_GLOM(("%s: rxglom returned %d\n", __func__, cnt));
rxseq += cnt - 1;
rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1;
continue;
}
/* Try doing single read if we can */
- if (dhd_readahead && bus->nextlen) {
+ if (brcmf_readahead && bus->nextlen) {
u16 nextlen = bus->nextlen;
bus->nextlen = 0;
@@ -3629,9 +4063,9 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
&& ((rdlen + pad + firstread) <
MAX_RX_DATASZ))
rdlen += pad;
- } else if (rdlen % DHD_SDALIGN) {
+ } else if (rdlen % BRCMF_SDALIGN) {
rdlen +=
- DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
}
}
@@ -3646,42 +4080,40 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
* or non-data frame.
*/
/* Allocate a packet buffer */
- dhd_os_sdlock_rxq(bus->dhd);
- pkt = bcm_pkt_buf_get_skb(rdlen + DHD_SDALIGN);
+ pkt = brcmu_pkt_buf_get_skb(rdlen + BRCMF_SDALIGN);
if (!pkt) {
if (bus->bus == SPI_BUS) {
bus->usebufpool = false;
bus->rxctl = bus->rxbuf;
- if (dhd_alignctl) {
+ if (brcmf_alignctl) {
bus->rxctl += firstread;
pad = ((unsigned long)bus->rxctl %
- DHD_SDALIGN);
+ BRCMF_SDALIGN);
if (pad)
bus->rxctl +=
- (DHD_SDALIGN - pad);
+ (BRCMF_SDALIGN - pad);
bus->rxctl -= firstread;
}
- ASSERT(bus->rxctl >= bus->rxbuf);
rxbuf = bus->rxctl;
/* Read the entire frame */
- sdret = bcmsdh_recv_buf(bus,
- bcmsdh_cur_sbwad(sdh),
- SDIO_FUNC_2, F2SYNC,
- rxbuf, rdlen,
- NULL, NULL, NULL);
+ sdret = brcmf_sdcard_recv_buf(card,
+ brcmf_sdcard_cur_sbwad(card),
+ SDIO_FUNC_2, F2SYNC,
+ rxbuf, rdlen,
+ NULL, NULL, NULL);
bus->f2rxdata++;
- ASSERT(sdret != -BCME_PENDING);
/* Control frame failures need
retransmission */
if (sdret < 0) {
- DHD_ERROR(("%s: read %d control bytes failed: %d\n",
- __func__,
- rdlen, sdret));
+ BRCMF_ERROR(("%s: read %d "
+ "control bytes "
+ "failed: %d\n",
+ __func__,
+ rdlen, sdret));
/* dhd.rx_ctlerrs is higher */
bus->rxc_errors++;
- dhd_os_sdunlock_rxq(bus->dhd);
- dhdsdio_rxfail(bus, true,
+ brcmf_sdbrcm_rxfail(bus, true,
(bus->bus ==
SPI_BUS) ? false
: true);
@@ -3690,50 +4122,45 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
} else {
/* Give up on data,
request rtx of events */
- DHD_ERROR(("%s (nextlen): "
- "bcm_pkt_buf_get_skb failed:"
- " len %d rdlen %d expected"
- " rxseq %d\n", __func__,
- len, rdlen, rxseq));
- /* Just go try again w/normal
- header read */
- dhd_os_sdunlock_rxq(bus->dhd);
+ BRCMF_ERROR(("%s (nextlen): "
+ "brcmu_pkt_buf_get_skb "
+ "failed:"
+ " len %d rdlen %d expected"
+ " rxseq %d\n", __func__,
+ len, rdlen, rxseq));
continue;
}
} else {
if (bus->bus == SPI_BUS)
bus->usebufpool = true;
- ASSERT(!(pkt->prev));
- PKTALIGN(pkt, rdlen, DHD_SDALIGN);
+ PKTALIGN(pkt, rdlen, BRCMF_SDALIGN);
rxbuf = (u8 *) (pkt->data);
/* Read the entire frame */
- sdret = bcmsdh_recv_buf(bus,
- bcmsdh_cur_sbwad(sdh),
+ sdret = brcmf_sdcard_recv_buf(card,
+ brcmf_sdcard_cur_sbwad(card),
SDIO_FUNC_2, F2SYNC,
rxbuf, rdlen,
pkt, NULL, NULL);
bus->f2rxdata++;
- ASSERT(sdret != -BCME_PENDING);
if (sdret < 0) {
- DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
- __func__, rdlen, sdret));
- bcm_pkt_buf_free_skb(pkt);
- bus->dhd->rx_errors++;
- dhd_os_sdunlock_rxq(bus->dhd);
+ BRCMF_ERROR(("%s (nextlen): read %d"
+ " bytes failed: %d\n",
+ __func__, rdlen, sdret));
+ brcmu_pkt_buf_free_skb(pkt);
+ bus->drvr->rx_errors++;
/* Force retry w/normal header read.
* Don't attempt NAK for
* gSPI
*/
- dhdsdio_rxfail(bus, true,
+ brcmf_sdbrcm_rxfail(bus, true,
(bus->bus ==
SPI_BUS) ? false :
true);
continue;
}
}
- dhd_os_sdunlock_rxq(bus->dhd);
/* Now check the header */
memcpy(bus->rxhdr, rxbuf, SDPCM_HDRLEN);
@@ -3744,29 +4171,29 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
/* All zeros means readahead info was bad */
if (!(len | check)) {
- DHD_INFO(("%s (nextlen): read zeros in HW "
- "header???\n", __func__));
- dhdsdio_pktfree2(bus, pkt);
+ BRCMF_INFO(("%s (nextlen): read zeros in HW "
+ "header???\n", __func__));
+ brcmf_sdbrcm_pktfree2(bus, pkt);
continue;
}
/* Validate check bytes */
if ((u16)~(len ^ check)) {
- DHD_ERROR(("%s (nextlen): HW hdr error:"
- " nextlen/len/check"
- " 0x%04x/0x%04x/0x%04x\n",
- __func__, nextlen, len, check));
+ BRCMF_ERROR(("%s (nextlen): HW hdr error:"
+ " nextlen/len/check"
+ " 0x%04x/0x%04x/0x%04x\n",
+ __func__, nextlen, len, check));
bus->rx_badhdr++;
- dhdsdio_rxfail(bus, false, false);
- dhdsdio_pktfree2(bus, pkt);
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdbrcm_pktfree2(bus, pkt);
continue;
}
/* Validate frame length */
if (len < SDPCM_HDRLEN) {
- DHD_ERROR(("%s (nextlen): HW hdr length "
- "invalid: %d\n", __func__, len));
- dhdsdio_pktfree2(bus, pkt);
+ BRCMF_ERROR(("%s (nextlen): HW hdr length "
+ "invalid: %d\n", __func__, len));
+ brcmf_sdbrcm_pktfree2(bus, pkt);
continue;
}
@@ -3775,13 +4202,14 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
if (len_consistent) {
/* Mismatch, force retry w/normal
header (may be >4K) */
- DHD_ERROR(("%s (nextlen): mismatch, "
- "nextlen %d len %d rnd %d; "
- "expected rxseq %d\n",
- __func__, nextlen,
- len, roundup(len, 16), rxseq));
- dhdsdio_rxfail(bus, true, (bus->bus != SPI_BUS));
- dhdsdio_pktfree2(bus, pkt);
+ BRCMF_ERROR(("%s (nextlen): mismatch, "
+ "nextlen %d len %d rnd %d; "
+ "expected rxseq %d\n",
+ __func__, nextlen,
+ len, roundup(len, 16), rxseq));
+ brcmf_sdbrcm_rxfail(bus, true,
+ bus->bus != SPI_BUS);
+ brcmf_sdbrcm_pktfree2(bus, pkt);
continue;
}
@@ -3799,12 +4227,13 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
bus->rxhdr[SDPCM_FRAMETAG_LEN +
SDPCM_NEXTLEN_OFFSET];
if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
- DHD_INFO(("%s (nextlen): got frame w/nextlen too large" " (%d), seq %d\n",
- __func__, bus->nextlen, seq));
+ BRCMF_INFO(("%s (nextlen): got frame w/nextlen"
+ " too large (%d), seq %d\n",
+ __func__, bus->nextlen, seq));
bus->nextlen = 0;
}
- bus->dhd->rx_readahead_cnt++;
+ bus->drvr->rx_readahead_cnt++;
/* Handle Flow Control */
fcbits = SDPCM_FCMASK_VALUE(
@@ -3823,27 +4252,27 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
/* Check and update sequence number */
if (rxseq != seq) {
- DHD_INFO(("%s (nextlen): rx_seq %d, expected "
- "%d\n", __func__, seq, rxseq));
+ BRCMF_INFO(("%s (nextlen): rx_seq %d, expected "
+ "%d\n", __func__, seq, rxseq));
bus->rx_badseq++;
rxseq = seq;
}
/* Check window for sanity */
if ((u8) (txmax - bus->tx_seq) > 0x40) {
- DHD_ERROR(("%s: got unlikely tx max %d with "
- "tx_seq %d\n",
- __func__, txmax, bus->tx_seq));
+ BRCMF_ERROR(("%s: got unlikely tx max %d with "
+ "tx_seq %d\n",
+ __func__, txmax, bus->tx_seq));
txmax = bus->tx_seq + 2;
}
bus->tx_max = txmax;
-#ifdef DHD_DEBUG
- if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
printk(KERN_DEBUG "Rx Data:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
rxbuf, len);
- } else if (DHD_HDRS_ON()) {
+ } else if (BRCMF_HDRS_ON()) {
printk(KERN_DEBUG "RxHdr:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
bus->rxhdr, SDPCM_HDRLEN);
@@ -3852,31 +4281,35 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
if (chan == SDPCM_CONTROL_CHANNEL) {
if (bus->bus == SPI_BUS) {
- dhdsdio_read_control(bus, rxbuf, len,
- doff);
+ brcmf_sdbrcm_read_control(bus, rxbuf,
+ len, doff);
} else {
- DHD_ERROR(("%s (nextlen): readahead on control" " packet %d?\n",
- __func__, seq));
+ BRCMF_ERROR(("%s (nextlen): readahead"
+ " on control packet %d?\n",
+ __func__, seq));
/* Force retry w/normal header read */
bus->nextlen = 0;
- dhdsdio_rxfail(bus, false, true);
+ brcmf_sdbrcm_rxfail(bus, false, true);
}
- dhdsdio_pktfree2(bus, pkt);
+ brcmf_sdbrcm_pktfree2(bus, pkt);
continue;
}
if ((bus->bus == SPI_BUS) && !bus->usebufpool) {
- DHD_ERROR(("Received %d bytes on %d channel. Running out of " "rx pktbuf's or not yet malloced.\n",
- len, chan));
+ BRCMF_ERROR(("Received %d bytes on %d channel."
+ " Running out of " "rx pktbuf's or"
+ " not yet malloced.\n",
+ len, chan));
continue;
}
/* Validate data offset */
if ((doff < SDPCM_HDRLEN) || (doff > len)) {
- DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
- __func__, doff, len, SDPCM_HDRLEN));
- dhdsdio_rxfail(bus, false, false);
- dhdsdio_pktfree2(bus, pkt);
+ BRCMF_ERROR(("%s (nextlen): bad data offset %d:"
+ " HW len %d min %d\n", __func__,
+ doff, len, SDPCM_HDRLEN));
+ brcmf_sdbrcm_rxfail(bus, false, false);
+ brcmf_sdbrcm_pktfree2(bus, pkt);
continue;
}
@@ -3888,21 +4321,21 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
break;
/* Read frame header (hardware and software) */
- sdret = bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh),
+ sdret = brcmf_sdcard_recv_buf(card,
+ brcmf_sdcard_cur_sbwad(card),
SDIO_FUNC_2, F2SYNC, bus->rxhdr, firstread,
NULL, NULL, NULL);
bus->f2rxhdrs++;
- ASSERT(sdret != -BCME_PENDING);
if (sdret < 0) {
- DHD_ERROR(("%s: RXHEADER FAILED: %d\n", __func__,
- sdret));
+ BRCMF_ERROR(("%s: RXHEADER FAILED: %d\n", __func__,
+ sdret));
bus->rx_hdrfail++;
- dhdsdio_rxfail(bus, true, true);
+ brcmf_sdbrcm_rxfail(bus, true, true);
continue;
}
-#ifdef DHD_DEBUG
- if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() || BRCMF_HDRS_ON()) {
printk(KERN_DEBUG "RxHdr:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
bus->rxhdr, SDPCM_HDRLEN);
@@ -3921,17 +4354,17 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
/* Validate check bytes */
if ((u16) ~(len ^ check)) {
- DHD_ERROR(("%s: HW hdr err: len/check 0x%04x/0x%04x\n",
- __func__, len, check));
+ BRCMF_ERROR(("%s: HW hdr err: len/check "
+ "0x%04x/0x%04x\n", __func__, len, check));
bus->rx_badhdr++;
- dhdsdio_rxfail(bus, false, false);
+ brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
/* Validate frame length */
if (len < SDPCM_HDRLEN) {
- DHD_ERROR(("%s: HW hdr length invalid: %d\n",
- __func__, len));
+ BRCMF_ERROR(("%s: HW hdr length invalid: %d\n",
+ __func__, len));
continue;
}
@@ -3943,12 +4376,11 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
/* Validate data offset */
if ((doff < SDPCM_HDRLEN) || (doff > len)) {
- DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d "
- "seq %d\n",
- __func__, doff, len, SDPCM_HDRLEN, seq));
+ BRCMF_ERROR(("%s: Bad data offset %d: HW len %d,"
+ " min %d seq %d\n", __func__, doff,
+ len, SDPCM_HDRLEN, seq));
bus->rx_badhdr++;
- ASSERT(0);
- dhdsdio_rxfail(bus, false, false);
+ brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
@@ -3956,9 +4388,9 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
bus->nextlen =
bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
- DHD_INFO(("%s (nextlen): got frame w/nextlen too large "
- "(%d), seq %d\n",
- __func__, bus->nextlen, seq));
+ BRCMF_INFO(("%s (nextlen): got frame w/nextlen too"
+ " large (%d), seq %d\n",
+ __func__, bus->nextlen, seq));
bus->nextlen = 0;
}
@@ -3978,30 +4410,29 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
/* Check and update sequence number */
if (rxseq != seq) {
- DHD_INFO(("%s: rx_seq %d, expected %d\n", __func__,
- seq, rxseq));
+ BRCMF_INFO(("%s: rx_seq %d, expected %d\n", __func__,
+ seq, rxseq));
bus->rx_badseq++;
rxseq = seq;
}
/* Check window for sanity */
if ((u8) (txmax - bus->tx_seq) > 0x40) {
- DHD_ERROR(("%s: unlikely tx max %d with tx_seq %d\n",
- __func__, txmax, bus->tx_seq));
+ BRCMF_ERROR(("%s: unlikely tx max %d with tx_seq %d\n",
+ __func__, txmax, bus->tx_seq));
txmax = bus->tx_seq + 2;
}
bus->tx_max = txmax;
/* Call a separate function for control frames */
if (chan == SDPCM_CONTROL_CHANNEL) {
- dhdsdio_read_control(bus, bus->rxhdr, len, doff);
+ brcmf_sdbrcm_read_control(bus, bus->rxhdr, len, doff);
continue;
}
- ASSERT((chan == SDPCM_DATA_CHANNEL)
- || (chan == SDPCM_EVENT_CHANNEL)
- || (chan == SDPCM_TEST_CHANNEL)
- || (chan == SDPCM_GLOM_CHANNEL));
+ /* precondition: chan is either SDPCM_DATA_CHANNEL,
+ SDPCM_EVENT_CHANNEL, SDPCM_TEST_CHANNEL or
+ SDPCM_GLOM_CHANNEL */
/* Length to read */
rdlen = (len > firstread) ? (len - firstread) : 0;
@@ -4013,8 +4444,8 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
((rdlen + pad + firstread) < MAX_RX_DATASZ))
rdlen += pad;
- } else if (rdlen % DHD_SDALIGN) {
- rdlen += DHD_SDALIGN - (rdlen % DHD_SDALIGN);
+ } else if (rdlen % BRCMF_SDALIGN) {
+ rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
}
/* Satisfy length-alignment requirements */
@@ -4023,54 +4454,45 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
if ((rdlen + firstread) > MAX_RX_DATASZ) {
/* Too long -- skip this frame */
- DHD_ERROR(("%s: too long: len %d rdlen %d\n",
- __func__, len, rdlen));
- bus->dhd->rx_errors++;
+ BRCMF_ERROR(("%s: too long: len %d rdlen %d\n",
+ __func__, len, rdlen));
+ bus->drvr->rx_errors++;
bus->rx_toolong++;
- dhdsdio_rxfail(bus, false, false);
+ brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
- dhd_os_sdlock_rxq(bus->dhd);
- pkt = bcm_pkt_buf_get_skb(rdlen + firstread + DHD_SDALIGN);
+ pkt = brcmu_pkt_buf_get_skb(rdlen + firstread + BRCMF_SDALIGN);
if (!pkt) {
/* Give up on data, request rtx of events */
- DHD_ERROR(("%s: bcm_pkt_buf_get_skb failed: rdlen %d "
- "chan %d\n", __func__, rdlen, chan));
- bus->dhd->rx_dropped++;
- dhd_os_sdunlock_rxq(bus->dhd);
- dhdsdio_rxfail(bus, false, RETRYCHAN(chan));
+ BRCMF_ERROR(("%s: brcmu_pkt_buf_get_skb failed:"
+ " rdlen %d chan %d\n", __func__, rdlen,
+ chan));
+ bus->drvr->rx_dropped++;
+ brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan));
continue;
}
- dhd_os_sdunlock_rxq(bus->dhd);
-
- ASSERT(!(pkt->prev));
/* Leave room for what we already read, and align remainder */
- ASSERT(firstread < pkt->len);
skb_pull(pkt, firstread);
- PKTALIGN(pkt, rdlen, DHD_SDALIGN);
+ PKTALIGN(pkt, rdlen, BRCMF_SDALIGN);
/* Read the remaining frame data */
- sdret = bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2,
- F2SYNC, ((u8 *) (pkt->data)), rdlen,
- pkt, NULL, NULL);
+ sdret = brcmf_sdcard_recv_buf(card,
+ brcmf_sdcard_cur_sbwad(card),
+ SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
+ rdlen, pkt, NULL, NULL);
bus->f2rxdata++;
- ASSERT(sdret != -BCME_PENDING);
if (sdret < 0) {
- DHD_ERROR(("%s: read %d %s bytes failed: %d\n",
- __func__, rdlen,
- ((chan ==
- SDPCM_EVENT_CHANNEL) ? "event" : ((chan ==
- SDPCM_DATA_CHANNEL)
- ? "data" : "test")),
- sdret));
- dhd_os_sdlock_rxq(bus->dhd);
- bcm_pkt_buf_free_skb(pkt);
- dhd_os_sdunlock_rxq(bus->dhd);
- bus->dhd->rx_errors++;
- dhdsdio_rxfail(bus, true, RETRYCHAN(chan));
+ BRCMF_ERROR(("%s: read %d %s bytes failed: %d\n",
+ __func__, rdlen,
+ ((chan == SDPCM_EVENT_CHANNEL) ? "event"
+ : ((chan == SDPCM_DATA_CHANNEL) ? "data"
+ : "test")), sdret));
+ brcmu_pkt_buf_free_skb(pkt);
+ bus->drvr->rx_errors++;
+ brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan));
continue;
}
@@ -4078,8 +4500,8 @@ static uint dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
skb_push(pkt, firstread);
memcpy(pkt->data, bus->rxhdr, firstread);
-#ifdef DHD_DEBUG
- if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
printk(KERN_DEBUG "Rx Data:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
pkt->data, len);
@@ -4090,10 +4512,10 @@ deliver:
/* Save superframe descriptor and allocate packet frame */
if (chan == SDPCM_GLOM_CHANNEL) {
if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
- DHD_GLOM(("%s: glom descriptor, %d bytes:\n",
- __func__, len));
-#ifdef DHD_DEBUG
- if (DHD_GLOM_ON()) {
+ BRCMF_GLOM(("%s: glom descriptor, %d bytes:\n",
+ __func__, len));
+#ifdef BCMDBG
+ if (BRCMF_GLOM_ON()) {
printk(KERN_DEBUG "Glom Data:\n");
print_hex_dump_bytes("",
DUMP_PREFIX_OFFSET,
@@ -4101,13 +4523,12 @@ deliver:
}
#endif
__skb_trim(pkt, len);
- ASSERT(doff == SDPCM_HDRLEN);
skb_pull(pkt, SDPCM_HDRLEN);
bus->glomd = pkt;
} else {
- DHD_ERROR(("%s: glom superframe w/o "
- "descriptor!\n", __func__));
- dhdsdio_rxfail(bus, false, false);
+ BRCMF_ERROR(("%s: glom superframe w/o "
+ "descriptor!\n", __func__));
+ brcmf_sdbrcm_rxfail(bus, false, false);
}
continue;
}
@@ -4119,39 +4540,35 @@ deliver:
#ifdef SDTEST
/* Test channel packets are processed separately */
if (chan == SDPCM_TEST_CHANNEL) {
- dhdsdio_testrcv(bus, pkt, seq);
+ brcmf_sdbrcm_checkdied(bus, pkt, seq);
continue;
}
#endif /* SDTEST */
if (pkt->len == 0) {
- dhd_os_sdlock_rxq(bus->dhd);
- bcm_pkt_buf_free_skb(pkt);
- dhd_os_sdunlock_rxq(bus->dhd);
+ brcmu_pkt_buf_free_skb(pkt);
continue;
- } else if (dhd_prot_hdrpull(bus->dhd, &ifidx, pkt) != 0) {
- DHD_ERROR(("%s: rx protocol error\n", __func__));
- dhd_os_sdlock_rxq(bus->dhd);
- bcm_pkt_buf_free_skb(pkt);
- dhd_os_sdunlock_rxq(bus->dhd);
- bus->dhd->rx_errors++;
+ } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pkt) != 0) {
+ BRCMF_ERROR(("%s: rx protocol error\n", __func__));
+ brcmu_pkt_buf_free_skb(pkt);
+ bus->drvr->rx_errors++;
continue;
}
/* Unlock during rx call */
- dhd_os_sdunlock(bus->dhd);
- dhd_rx_frame(bus->dhd, ifidx, pkt, 1);
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
+ brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
+ brcmf_sdbrcm_sdlock(bus);
}
rxcount = maxframes - rxleft;
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
/* Message if we hit the limit */
if (!rxleft && !sdtest)
- DHD_DATA(("%s: hit rx limit of %d frames\n", __func__,
- maxframes));
+ BRCMF_DATA(("%s: hit rx limit of %d frames\n", __func__,
+ maxframes));
else
-#endif /* DHD_DEBUG */
- DHD_DATA(("%s: processed %d frames\n", __func__, rxcount));
+#endif /* BCMDBG */
+ BRCMF_DATA(("%s: processed %d frames\n", __func__, rxcount));
/* Back off rxseq if awaiting rtx, update rx_seq */
if (bus->rxskip)
rxseq--;
@@ -4160,28 +4577,30 @@ deliver:
return rxcount;
}
-static u32 dhdsdio_hostmail(dhd_bus_t *bus)
+static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
{
- sdpcmd_regs_t *regs = bus->regs;
u32 intstatus = 0;
u32 hmb_data;
u8 fcbits;
uint retries = 0;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
/* Read mailbox data and ack that we did so */
- R_SDREG(hmb_data, &regs->tohostmailboxdata, retries);
+ r_sdreg32(bus, &hmb_data,
+ offsetof(struct sdpcmd_regs, tohostmailboxdata), &retries);
+
if (retries <= retry_limit)
- W_SDREG(SMB_INT_ACK, &regs->tosbmailbox, retries);
+ w_sdreg32(bus, SMB_INT_ACK,
+ offsetof(struct sdpcmd_regs, tosbmailbox), &retries);
bus->f1regdata += 2;
/* Dongle recomposed rx frames, accept them again */
if (hmb_data & HMB_DATA_NAKHANDLED) {
- DHD_INFO(("Dongle reports NAK handled, expect rtx of %d\n",
- bus->rx_seq));
+ BRCMF_INFO(("Dongle reports NAK handled, expect rtx of %d\n",
+ bus->rx_seq));
if (!bus->rxskip)
- DHD_ERROR(("%s: unexpected NAKHANDLED!\n", __func__));
+ BRCMF_ERROR(("%s: unexpected NAKHANDLED!\n", __func__));
bus->rxskip = false;
intstatus |= I_HMB_FRAME_IND;
@@ -4195,12 +4614,12 @@ static u32 dhdsdio_hostmail(dhd_bus_t *bus)
(hmb_data & HMB_DATA_VERSION_MASK) >>
HMB_DATA_VERSION_SHIFT;
if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
- DHD_ERROR(("Version mismatch, dongle reports %d, "
- "expecting %d\n",
- bus->sdpcm_ver, SDPCM_PROT_VERSION));
+ BRCMF_ERROR(("Version mismatch, dongle reports %d, "
+ "expecting %d\n",
+ bus->sdpcm_ver, SDPCM_PROT_VERSION));
else
- DHD_INFO(("Dongle ready, protocol version %d\n",
- bus->sdpcm_ver));
+ BRCMF_INFO(("Dongle ready, protocol version %d\n",
+ bus->sdpcm_ver));
}
/*
@@ -4228,78 +4647,74 @@ static u32 dhdsdio_hostmail(dhd_bus_t *bus)
HMB_DATA_FC |
HMB_DATA_FWREADY |
HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK)) {
- DHD_ERROR(("Unknown mailbox data content: 0x%02x\n", hmb_data));
+ BRCMF_ERROR(("Unknown mailbox data content: 0x%02x\n",
+ hmb_data));
}
return intstatus;
}
-bool dhdsdio_dpc(dhd_bus_t *bus)
+static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
{
- bcmsdh_info_t *sdh = bus->sdh;
- sdpcmd_regs_t *regs = bus->regs;
+ struct brcmf_sdio_card *card = bus->card;
u32 intstatus, newstatus = 0;
uint retries = 0;
- uint rxlimit = dhd_rxbound; /* Rx frames to read before resched */
- uint txlimit = dhd_txbound; /* Tx frames to send before resched */
+ uint rxlimit = brcmf_rxbound; /* Rx frames to read before resched */
+ uint txlimit = brcmf_txbound; /* Tx frames to send before resched */
uint framecnt = 0; /* Temporary counter of tx/rx frames */
bool rxdone = true; /* Flag for no more read data */
bool resched = false; /* Flag indicating resched wanted */
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
/* Start with leftover status bits */
intstatus = bus->intstatus;
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdlock(bus);
/* If waiting for HTAVAIL, check status */
if (bus->clkstate == CLK_PENDING) {
int err;
u8 clkctl, devctl = 0;
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
/* Check for inconsistent device control */
- devctl =
- bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
if (err) {
- DHD_ERROR(("%s: error reading DEVCTL: %d\n",
- __func__, err));
- bus->dhd->busstate = DHD_BUS_DOWN;
- } else {
- ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
+ BRCMF_ERROR(("%s: error reading DEVCTL: %d\n",
+ __func__, err));
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
}
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
/* Read CSR, if clock on switch to AVAIL, else ignore */
- clkctl =
- bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- &err);
+ clkctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
- DHD_ERROR(("%s: error reading CSR: %d\n", __func__,
- err));
- bus->dhd->busstate = DHD_BUS_DOWN;
+ BRCMF_ERROR(("%s: error reading CSR: %d\n", __func__,
+ err));
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
}
- DHD_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", devctl,
- clkctl));
+ BRCMF_INFO(("DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
+ devctl, clkctl));
if (SBSDIO_HTAV(clkctl)) {
- devctl =
- bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
- &err);
+ devctl = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, &err);
if (err) {
- DHD_ERROR(("%s: error reading DEVCTL: %d\n",
- __func__, err));
- bus->dhd->busstate = DHD_BUS_DOWN;
+ BRCMF_ERROR(("%s: error reading DEVCTL: %d\n",
+ __func__, err));
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
- devctl, &err);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL, devctl, &err);
if (err) {
- DHD_ERROR(("%s: error writing DEVCTL: %d\n",
- __func__, err));
- bus->dhd->busstate = DHD_BUS_DOWN;
+ BRCMF_ERROR(("%s: error writing DEVCTL: %d\n",
+ __func__, err));
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
}
bus->clkstate = CLK_AVAIL;
} else {
@@ -4310,21 +4725,24 @@ bool dhdsdio_dpc(dhd_bus_t *bus)
BUS_WAKE(bus);
/* Make sure backplane clock is on */
- dhdsdio_clkctl(bus, CLK_AVAIL, true);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, true);
if (bus->clkstate == CLK_PENDING)
goto clkwait;
/* Pending interrupt indicates new device status */
if (bus->ipend) {
bus->ipend = false;
- R_SDREG(newstatus, &regs->intstatus, retries);
+ r_sdreg32(bus, &newstatus,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
bus->f1regdata++;
- if (bcmsdh_regfail(bus->sdh))
+ if (brcmf_sdcard_regfail(bus->card))
newstatus = 0;
newstatus &= bus->hostintmask;
bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
if (newstatus) {
- W_SDREG(newstatus, &regs->intstatus, retries);
+ w_sdreg32(bus, newstatus,
+ offsetof(struct sdpcmd_regs, intstatus),
+ &retries);
bus->f1regdata++;
}
}
@@ -4339,8 +4757,11 @@ bool dhdsdio_dpc(dhd_bus_t *bus)
*/
if (intstatus & I_HMB_FC_CHANGE) {
intstatus &= ~I_HMB_FC_CHANGE;
- W_SDREG(I_HMB_FC_CHANGE, &regs->intstatus, retries);
- R_SDREG(newstatus, &regs->intstatus, retries);
+ w_sdreg32(bus, I_HMB_FC_CHANGE,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+
+ r_sdreg32(bus, &newstatus,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
bus->f1regdata += 2;
bus->fcstate =
!!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
@@ -4350,28 +4771,28 @@ bool dhdsdio_dpc(dhd_bus_t *bus)
/* Handle host mailbox indication */
if (intstatus & I_HMB_HOST_INT) {
intstatus &= ~I_HMB_HOST_INT;
- intstatus |= dhdsdio_hostmail(bus);
+ intstatus |= brcmf_sdbrcm_hostmail(bus);
}
/* Generally don't ask for these, can get CRC errors... */
if (intstatus & I_WR_OOSYNC) {
- DHD_ERROR(("Dongle reports WR_OOSYNC\n"));
+ BRCMF_ERROR(("Dongle reports WR_OOSYNC\n"));
intstatus &= ~I_WR_OOSYNC;
}
if (intstatus & I_RD_OOSYNC) {
- DHD_ERROR(("Dongle reports RD_OOSYNC\n"));
+ BRCMF_ERROR(("Dongle reports RD_OOSYNC\n"));
intstatus &= ~I_RD_OOSYNC;
}
if (intstatus & I_SBINT) {
- DHD_ERROR(("Dongle reports SBINT\n"));
+ BRCMF_ERROR(("Dongle reports SBINT\n"));
intstatus &= ~I_SBINT;
}
/* Would be active due to wake-wlan in gSPI */
if (intstatus & I_CHIPACTIVE) {
- DHD_INFO(("Dongle reports CHIPACTIVE\n"));
+ BRCMF_INFO(("Dongle reports CHIPACTIVE\n"));
intstatus &= ~I_CHIPACTIVE;
}
@@ -4381,7 +4802,7 @@ bool dhdsdio_dpc(dhd_bus_t *bus)
/* On frame indication, read available frames */
if (PKT_AVAILABLE()) {
- framecnt = dhdsdio_readframes(bus, rxlimit, &rxdone);
+ framecnt = brcmf_sdbrcm_readframes(bus, rxlimit, &rxdone);
if (rxdone || bus->rxskip)
intstatus &= ~I_HMB_FRAME_IND;
rxlimit -= min(framecnt, rxlimit);
@@ -4391,51 +4812,45 @@ bool dhdsdio_dpc(dhd_bus_t *bus)
bus->intstatus = intstatus;
clkwait:
-#if defined(OOB_INTR_ONLY)
- bcmsdh_oob_intr_set(1);
-#endif /* (OOB_INTR_ONLY) */
/* Re-enable interrupts to detect new device events (mailbox, rx frame)
* or clock availability. (Allows tx loop to check ipend if desired.)
* (Unless register access seems hosed, as we may not be able to ACK...)
*/
- if (bus->intr && bus->intdis && !bcmsdh_regfail(sdh)) {
- DHD_INTR(("%s: enable SDIO interrupts, rxdone %d framecnt %d\n",
- __func__, rxdone, framecnt));
+ if (bus->intr && bus->intdis && !brcmf_sdcard_regfail(card)) {
+ BRCMF_INTR(("%s: enable SDIO interrupts, rxdone %d"
+ " framecnt %d\n", __func__, rxdone, framecnt));
bus->intdis = false;
- bcmsdh_intr_enable(sdh);
+ brcmf_sdcard_intr_enable(card);
}
if (DATAOK(bus) && bus->ctrl_frame_stat &&
(bus->clkstate == CLK_AVAIL)) {
int ret, i;
- ret =
- dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2,
- F2SYNC, (u8 *) bus->ctrl_frame_buf,
- (u32) bus->ctrl_frame_len, NULL,
- NULL, NULL);
- ASSERT(ret != -BCME_PENDING);
+ ret = brcmf_sdbrcm_send_buf(bus, brcmf_sdcard_cur_sbwad(card),
+ SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
+ (u32) bus->ctrl_frame_len, NULL, NULL, NULL);
if (ret < 0) {
/* On failure, abort the command and
terminate the frame */
- DHD_INFO(("%s: sdio error %d, abort command and "
- "terminate frame.\n", __func__, ret));
+ BRCMF_INFO(("%s: sdio error %d, abort command and "
+ "terminate frame.\n", __func__, ret));
bus->tx_sderrs++;
- bcmsdh_abort(sdh, SDIO_FUNC_2);
+ brcmf_sdcard_abort(card, SDIO_FUNC_2);
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1,
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1,
SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM,
NULL);
bus->f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
- hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ hi = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCHI,
NULL);
- lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
+ lo = brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
SBSDIO_FUNC1_WFRAMEBCLO,
NULL);
bus->f1regdata += 2;
@@ -4447,16 +4862,16 @@ clkwait:
if (ret == 0)
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
- DHD_INFO(("Return_dpc value is : %d\n", ret));
+ BRCMF_INFO(("Return_dpc value is : %d\n", ret));
bus->ctrl_frame_stat = false;
- dhd_wait_event_wakeup(bus->dhd);
+ brcmf_sdbrcm_wait_event_wakeup(bus);
}
/* Send queued frames (limit 1 if rx may still be pending) */
else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
- bcm_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
+ brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
&& DATAOK(bus)) {
- framecnt = rxdone ? txlimit : min(txlimit, dhd_txminmax);
- framecnt = dhdsdio_sendfromq(bus, framecnt);
+ framecnt = rxdone ? txlimit : min(txlimit, brcmf_txminmax);
+ framecnt = brcmf_sdbrcm_sendfromq(bus, framecnt);
txlimit -= framecnt;
}
@@ -4464,18 +4879,20 @@ clkwait:
else await next interrupt */
/* On failed register access, all bets are off:
no resched or interrupts */
- if ((bus->dhd->busstate == DHD_BUS_DOWN) || bcmsdh_regfail(sdh)) {
- DHD_ERROR(("%s: failed backplane access over SDIO, halting "
- "operation %d\n", __func__, bcmsdh_regfail(sdh)));
- bus->dhd->busstate = DHD_BUS_DOWN;
+ if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
+ brcmf_sdcard_regfail(card)) {
+ BRCMF_ERROR(("%s: failed backplane access over SDIO, halting "
+ "operation %d\n", __func__,
+ brcmf_sdcard_regfail(card)));
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
bus->intstatus = 0;
} else if (bus->clkstate == CLK_PENDING) {
- DHD_INFO(("%s: rescheduled due to CLK_PENDING awaiting "
- "I_CHIPACTIVE interrupt\n", __func__));
+ BRCMF_INFO(("%s: rescheduled due to CLK_PENDING awaiting "
+ "I_CHIPACTIVE interrupt\n", __func__));
resched = true;
} else if (bus->intstatus || bus->ipend ||
- (!bus->fcstate && bcm_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
- DATAOK(bus)) || PKT_AVAILABLE()) {
+ (!bus->fcstate && brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)
+ && DATAOK(bus)) || PKT_AVAILABLE()) {
resched = true;
}
@@ -4483,42 +4900,31 @@ clkwait:
/* If we're done for now, turn off clock request. */
if ((bus->clkstate != CLK_PENDING)
- && bus->idletime == DHD_IDLE_IMMEDIATE) {
+ && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
bus->activity = false;
- dhdsdio_clkctl(bus, CLK_NONE, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
}
- dhd_os_sdunlock(bus->dhd);
-
- return resched;
-}
-
-bool dhd_bus_dpc(struct dhd_bus *bus)
-{
- bool resched;
-
- /* Call the DPC directly. */
- DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __func__));
- resched = dhdsdio_dpc(bus);
+ brcmf_sdbrcm_sdunlock(bus);
return resched;
}
-void dhdsdio_isr(void *arg)
+void brcmf_sdbrcm_isr(void *arg)
{
- dhd_bus_t *bus = (dhd_bus_t *) arg;
- bcmsdh_info_t *sdh;
+ struct brcmf_bus *bus = (struct brcmf_bus *) arg;
+ struct brcmf_sdio_card *card;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
if (!bus) {
- DHD_ERROR(("%s : bus is null pointer , exit\n", __func__));
+ BRCMF_ERROR(("%s : bus is null pointer , exit\n", __func__));
return;
}
- sdh = bus->sdh;
+ card = bus->card;
- if (bus->dhd->busstate == DHD_BUS_DOWN) {
- DHD_ERROR(("%s : bus is down. we have nothing to do\n",
+ if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
+ BRCMF_ERROR(("%s : bus is down. we have nothing to do\n",
__func__));
return;
}
@@ -4528,54 +4934,55 @@ void dhdsdio_isr(void *arg)
/* Shouldn't get this interrupt if we're sleeping? */
if (bus->sleeping) {
- DHD_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
+ BRCMF_ERROR(("INTERRUPT WHILE SLEEPING??\n"));
return;
}
/* Disable additional interrupts (is this needed now)? */
if (bus->intr)
- DHD_INTR(("%s: disable SDIO interrupts\n", __func__));
+ BRCMF_INTR(("%s: disable SDIO interrupts\n", __func__));
else
- DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
+ BRCMF_ERROR(("brcmf_sdbrcm_isr() w/o interrupt configured!\n"));
- bcmsdh_intr_disable(sdh);
+ brcmf_sdcard_intr_disable(card);
bus->intdis = true;
#if defined(SDIO_ISR_THREAD)
- DHD_TRACE(("Calling dhdsdio_dpc() from %s\n", __func__));
- while (dhdsdio_dpc(bus))
+ BRCMF_TRACE(("Calling brcmf_sdbrcm_dpc() from %s\n", __func__));
+ while (brcmf_sdbrcm_dpc(bus))
;
#else
bus->dpc_sched = true;
- dhd_sched_dpc(bus->dhd);
+ brcmf_sdbrcm_sched_dpc(bus);
#endif
}
#ifdef SDTEST
-static void dhdsdio_pktgen_init(dhd_bus_t *bus)
+static void brcmf_sdbrcm_pktgen_init(struct brcmf_bus *bus)
{
/* Default to specified length, or full range */
- if (dhd_pktgen_len) {
- bus->pktgen_maxlen = min(dhd_pktgen_len, MAX_PKTGEN_LEN);
+ if (brcmf_pktgen_len) {
+ bus->pktgen_maxlen = min(brcmf_pktgen_len,
+ BRCMF_MAX_PKTGEN_LEN);
bus->pktgen_minlen = bus->pktgen_maxlen;
} else {
- bus->pktgen_maxlen = MAX_PKTGEN_LEN;
+ bus->pktgen_maxlen = BRCMF_MAX_PKTGEN_LEN;
bus->pktgen_minlen = 0;
}
bus->pktgen_len = (u16) bus->pktgen_minlen;
/* Default to per-watchdog burst with 10s print time */
bus->pktgen_freq = 1;
- bus->pktgen_print = 10000 / dhd_watchdog_ms;
- bus->pktgen_count = (dhd_pktgen * dhd_watchdog_ms + 999) / 1000;
+ bus->pktgen_print = 10000 / brcmf_watchdog_ms;
+ bus->pktgen_count = (brcmf_pktgen * brcmf_watchdog_ms + 999) / 1000;
/* Default to echo mode */
- bus->pktgen_mode = DHD_PKTGEN_ECHO;
+ bus->pktgen_mode = BRCMF_PKTGEN_ECHO;
bus->pktgen_stop = 1;
}
-static void dhdsdio_pktgen(dhd_bus_t *bus)
+static void brcmf_sdbrcm_pktgen(struct brcmf_bus *bus)
{
struct sk_buff *pkt;
u8 *data;
@@ -4591,9 +4998,9 @@ static void dhdsdio_pktgen(dhd_bus_t *bus)
}
/* For recv mode, just make sure dongle has started sending */
- if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (bus->pktgen_mode == BRCMF_PKTGEN_RECV) {
if (!bus->pktgen_rcvd)
- dhdsdio_sdtest_set(bus, true);
+ brcmf_sdbrcm_sdtest_set(bus, true);
return;
}
@@ -4608,39 +5015,39 @@ static void dhdsdio_pktgen(dhd_bus_t *bus)
/* Allocate an appropriate-sized packet */
len = bus->pktgen_len;
- pkt = bcm_pkt_buf_get_skb(
- (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN),
+ pkt = brcmu_pkt_buf_get_skb(
+ len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + BRCMF_SDALIGN,
true);
if (!pkt) {
- DHD_ERROR(("%s: bcm_pkt_buf_get_skb failed!\n",
- __func__));
+ BRCMF_ERROR(("%s: brcmu_pkt_buf_get_skb failed!\n",
+ __func__));
break;
}
PKTALIGN(pkt, (len + SDPCM_HDRLEN + SDPCM_TEST_HDRLEN),
- DHD_SDALIGN);
+ BRCMF_SDALIGN);
data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
/* Write test header cmd and extra based on mode */
switch (bus->pktgen_mode) {
- case DHD_PKTGEN_ECHO:
+ case BRCMF_PKTGEN_ECHO:
*data++ = SDPCM_TEST_ECHOREQ;
*data++ = (u8) bus->pktgen_sent;
break;
- case DHD_PKTGEN_SEND:
+ case BRCMF_PKTGEN_SEND:
*data++ = SDPCM_TEST_DISCARD;
*data++ = (u8) bus->pktgen_sent;
break;
- case DHD_PKTGEN_RXBURST:
+ case BRCMF_PKTGEN_RXBURST:
*data++ = SDPCM_TEST_BURST;
*data++ = (u8) bus->pktgen_count;
break;
default:
- DHD_ERROR(("Unrecognized pktgen mode %d\n",
- bus->pktgen_mode));
- bcm_pkt_buf_free_skb(pkt, true);
+ BRCMF_ERROR(("Unrecognized pktgen mode %d\n",
+ bus->pktgen_mode));
+ brcmu_pkt_buf_free_skb(pkt, true);
bus->pktgen_count = 0;
return;
}
@@ -4655,17 +5062,17 @@ static void dhdsdio_pktgen(dhd_bus_t *bus)
*data++ =
SDPCM_TEST_FILL(fillbyte, (u8) bus->pktgen_sent);
-#ifdef DHD_DEBUG
- if (DHD_BYTES_ON() && DHD_DATA_ON()) {
+#ifdef BCMDBG
+ if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
- printk(KERN_DEBUG "dhdsdio_pktgen: Tx Data:\n");
+ printk(KERN_DEBUG "brcmf_sdbrcm_pktgen: Tx Data:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, data,
pkt->len - SDPCM_HDRLEN);
}
#endif
/* Send it */
- if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, true)) {
+ if (brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, true)) {
bus->pktgen_fail++;
if (bus->pktgen_stop
&& bus->pktgen_stop == bus->pktgen_fail)
@@ -4678,24 +5085,24 @@ static void dhdsdio_pktgen(dhd_bus_t *bus)
bus->pktgen_len = (u16) bus->pktgen_minlen;
/* Special case for burst mode: just send one request! */
- if (bus->pktgen_mode == DHD_PKTGEN_RXBURST)
+ if (bus->pktgen_mode == BRCMF_PKTGEN_RXBURST)
break;
}
}
-static void dhdsdio_sdtest_set(dhd_bus_t *bus, bool start)
+static void brcmf_sdbrcm_sdtest_set(struct brcmf_bus *bus, bool start)
{
struct sk_buff *pkt;
u8 *data;
/* Allocate the packet */
- pkt = bcm_pkt_buf_get_skb(SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
- DHD_SDALIGN, true);
+ pkt = brcmu_pkt_buf_get_skb(SDPCM_HDRLEN + SDPCM_TEST_HDRLEN +
+ BRCMF_SDALIGN, true);
if (!pkt) {
- DHD_ERROR(("%s: bcm_pkt_buf_get_skb failed!\n", __func__));
+ BRCMF_ERROR(("%s: brcmu_pkt_buf_get_skb failed!\n", __func__));
return;
}
- PKTALIGN(pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), DHD_SDALIGN);
+ PKTALIGN(pkt, (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN), BRCMF_SDALIGN);
data = (u8 *) (pkt->data) + SDPCM_HDRLEN;
/* Fill in the test header */
@@ -4705,11 +5112,12 @@ static void dhdsdio_sdtest_set(dhd_bus_t *bus, bool start)
*data++ = (bus->pktgen_maxlen >> 8);
/* Send it */
- if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, true))
+ if (brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, true))
bus->pktgen_fail++;
}
-static void dhdsdio_testrcv(dhd_bus_t *bus, struct sk_buff *pkt, uint seq)
+static void
+brcmf_sdbrcm_checkdied(struct brcmf_bus *bus, struct sk_buff *pkt, uint seq)
{
u8 *data;
uint pktlen;
@@ -4722,9 +5130,9 @@ static void dhdsdio_testrcv(dhd_bus_t *bus, struct sk_buff *pkt, uint seq)
/* Check for min length */
pktlen = pkt->len;
if (pktlen < SDPCM_TEST_HDRLEN) {
- DHD_ERROR(("dhdsdio_restrcv: toss runt frame, pktlen %d\n",
- pktlen));
- bcm_pkt_buf_free_skb(pkt, false);
+ BRCMF_ERROR(("brcmf_sdbrcm_checkdied: toss runt frame, pktlen "
+ "%d\n", pktlen));
+ brcmu_pkt_buf_free_skb(pkt, false);
return;
}
@@ -4739,10 +5147,11 @@ static void dhdsdio_testrcv(dhd_bus_t *bus, struct sk_buff *pkt, uint seq)
if (cmd == SDPCM_TEST_DISCARD || cmd == SDPCM_TEST_ECHOREQ
|| cmd == SDPCM_TEST_ECHORSP) {
if (pktlen != len + SDPCM_TEST_HDRLEN) {
- DHD_ERROR(("dhdsdio_testrcv: frame length mismatch, "
- "pktlen %d seq %d" " cmd %d extra %d len %d\n",
- pktlen, seq, cmd, extra, len));
- bcm_pkt_buf_free_skb(pkt, false);
+ BRCMF_ERROR(("brcmf_sdbrcm_checkdied: frame length "
+ "mismatch, pktlen %d seq %d"
+ " cmd %d extra %d len %d\n",
+ pktlen, seq, cmd, extra, len));
+ brcmu_pkt_buf_free_skb(pkt, false);
return;
}
}
@@ -4753,76 +5162,80 @@ static void dhdsdio_testrcv(dhd_bus_t *bus, struct sk_buff *pkt, uint seq)
/* Rx->Tx turnaround ok (even on NDIS w/current
implementation) */
*(u8 *) (pkt->data) = SDPCM_TEST_ECHORSP;
- if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, true) == 0) {
+ if (brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, true) == 0)
bus->pktgen_sent++;
- } else {
+ else {
bus->pktgen_fail++;
- bcm_pkt_buf_free_skb(pkt, false);
+ brcmu_pkt_buf_free_skb(pkt, false);
}
bus->pktgen_rcvd++;
break;
case SDPCM_TEST_ECHORSP:
if (bus->ext_loop) {
- bcm_pkt_buf_free_skb(pkt, false);
+ brcmu_pkt_buf_free_skb(pkt, false);
bus->pktgen_rcvd++;
break;
}
for (offset = 0; offset < len; offset++, data++) {
if (*data != SDPCM_TEST_FILL(offset, extra)) {
- DHD_ERROR(("dhdsdio_testrcv: echo data mismatch: " "offset %d (len %d) expect 0x%02x rcvd 0x%02x\n",
- offset, len,
- SDPCM_TEST_FILL(offset, extra), *data));
+ BRCMF_ERROR(("brcmf_sdbrcm_checkdied: echo"
+ " data mismatch: "
+ "offset %d (len %d) "
+ "expect 0x%02x rcvd 0x%02x\n",
+ offset, len,
+ SDPCM_TEST_FILL(offset, extra),
+ *data));
break;
}
}
- bcm_pkt_buf_free_skb(pkt, false);
+ brcmu_pkt_buf_free_skb(pkt, false);
bus->pktgen_rcvd++;
break;
case SDPCM_TEST_DISCARD:
- bcm_pkt_buf_free_skb(pkt, false);
+ brcmu_pkt_buf_free_skb(pkt, false);
bus->pktgen_rcvd++;
break;
case SDPCM_TEST_BURST:
case SDPCM_TEST_SEND:
default:
- DHD_INFO(("dhdsdio_testrcv: unsupported or unknown command, "
- "pktlen %d seq %d" " cmd %d extra %d len %d\n",
- pktlen, seq, cmd, extra, len));
- bcm_pkt_buf_free_skb(pkt, false);
+ BRCMF_INFO(("brcmf_sdbrcm_checkdied: unsupported or unknown "
+ "command, pktlen %d seq %d" " cmd %d extra %d"
+ " len %d\n", pktlen, seq, cmd, extra, len));
+ brcmu_pkt_buf_free_skb(pkt, false);
break;
}
/* For recv mode, stop at limie (and tell dongle to stop sending) */
- if (bus->pktgen_mode == DHD_PKTGEN_RECV) {
+ if (bus->pktgen_mode == BRCMF_PKTGEN_RECV) {
if (bus->pktgen_total
&& (bus->pktgen_rcvd >= bus->pktgen_total)) {
bus->pktgen_count = 0;
- dhdsdio_sdtest_set(bus, false);
+ brcmf_sdbrcm_sdtest_set(bus, false);
}
}
}
#endif /* SDTEST */
-extern bool dhd_bus_watchdog(dhd_pub_t *dhdp)
+extern bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
{
- dhd_bus_t *bus;
+ struct brcmf_bus *bus;
- DHD_TIMER(("%s: Enter\n", __func__));
+ BRCMF_TIMER(("%s: Enter\n", __func__));
- bus = dhdp->bus;
+ bus = drvr->bus;
- if (bus->dhd->dongle_reset)
+ if (bus->drvr->dongle_reset)
return false;
/* Ignore the timer if simulating bus down */
if (bus->sleeping)
return false;
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdlock(bus);
/* Poll period: check device if appropriate. */
if (bus->poll && (++bus->polltick >= bus->pollrate)) {
@@ -4836,9 +5249,9 @@ extern bool dhd_bus_watchdog(dhd_pub_t *dhdp)
if (!bus->dpc_sched) {
u8 devpend;
- devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
- SDIOD_CCCR_INTPEND,
- NULL);
+ devpend = brcmf_sdcard_cfg_read(bus->card,
+ SDIO_FUNC_0, SDIO_CCCR_INTx,
+ NULL);
intstatus =
devpend & (INTR_STATUS_FUNC1 |
INTR_STATUS_FUNC2);
@@ -4850,10 +5263,10 @@ extern bool dhd_bus_watchdog(dhd_pub_t *dhdp)
bus->pollcnt++;
bus->ipend = true;
if (bus->intr)
- bcmsdh_intr_disable(bus->sdh);
+ brcmf_sdcard_intr_disable(bus->card);
bus->dpc_sched = true;
- dhd_sched_dpc(bus->dhd);
+ brcmf_sdbrcm_sched_dpc(bus);
}
}
@@ -4861,28 +5274,28 @@ extern bool dhd_bus_watchdog(dhd_pub_t *dhdp)
/* Update interrupt tracking */
bus->lastintrs = bus->intrcount;
}
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
/* Poll for console output periodically */
- if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
- bus->console.count += dhd_watchdog_ms;
- if (bus->console.count >= dhd_console_ms) {
- bus->console.count -= dhd_console_ms;
+ if (drvr->busstate == BRCMF_BUS_DATA && brcmf_console_ms != 0) {
+ bus->console.count += brcmf_watchdog_ms;
+ if (bus->console.count >= brcmf_console_ms) {
+ bus->console.count -= brcmf_console_ms;
/* Make sure backplane clock is on */
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
- if (dhdsdio_readconsole(bus) < 0)
- dhd_console_ms = 0; /* On error,
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ if (brcmf_sdbrcm_readconsole(bus) < 0)
+ brcmf_console_ms = 0; /* On error,
stop trying */
}
}
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
#ifdef SDTEST
/* Generate packets if configured */
if (bus->pktgen_count && (++bus->pktgen_tick >= bus->pktgen_freq)) {
/* Make sure backplane clock is on */
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
bus->pktgen_tick = 0;
- dhdsdio_pktgen(bus);
+ brcmf_sdbrcm_pktgen(bus);
}
#endif
@@ -4892,22 +5305,23 @@ extern bool dhd_bus_watchdog(dhd_pub_t *dhdp)
bus->idlecount = 0;
if (bus->activity) {
bus->activity = false;
- dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
+ brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
} else {
- dhdsdio_clkctl(bus, CLK_NONE, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
}
}
}
- dhd_os_sdunlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
return bus->ipend;
}
-#ifdef DHD_DEBUG
-extern int dhd_bus_console_in(dhd_pub_t *dhdp, unsigned char *msg, uint msglen)
+#ifdef BCMDBG
+static int brcmf_sdbrcm_bus_console_in(struct brcmf_pub *drvr,
+ unsigned char *msg, uint msglen)
{
- dhd_bus_t *bus = dhdp->bus;
+ struct brcmf_bus *bus = drvr->bus;
u32 addr, val;
int rv;
struct sk_buff *pkt;
@@ -4917,88 +5331,59 @@ extern int dhd_bus_console_in(dhd_pub_t *dhdp, unsigned char *msg, uint msglen)
return -ENOTSUPP;
/* Exclusive bus access */
- dhd_os_sdlock(bus->dhd);
+ brcmf_sdbrcm_sdlock(bus);
/* Don't allow input if dongle is in reset */
- if (bus->dhd->dongle_reset) {
- dhd_os_sdunlock(bus->dhd);
+ if (bus->drvr->dongle_reset) {
+ brcmf_sdbrcm_sdunlock(bus);
return -EPERM;
}
/* Request clock to allow SDIO accesses */
BUS_WAKE(bus);
/* No pend allowed since txpkt is called later, ht clk has to be on */
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
/* Zero cbuf_index */
- addr = bus->console_addr + offsetof(hndrte_cons_t, cbuf_idx);
+ addr = bus->console_addr + offsetof(struct rte_console, cbuf_idx);
val = cpu_to_le32(0);
- rv = dhdsdio_membytes(bus, true, addr, (u8 *)&val, sizeof(val));
+ rv = brcmf_sdbrcm_membytes(bus, true, addr, (u8 *)&val, sizeof(val));
if (rv < 0)
goto done;
/* Write message into cbuf */
- addr = bus->console_addr + offsetof(hndrte_cons_t, cbuf);
- rv = dhdsdio_membytes(bus, true, addr, (u8 *)msg, msglen);
+ addr = bus->console_addr + offsetof(struct rte_console, cbuf);
+ rv = brcmf_sdbrcm_membytes(bus, true, addr, (u8 *)msg, msglen);
if (rv < 0)
goto done;
/* Write length into vcons_in */
- addr = bus->console_addr + offsetof(hndrte_cons_t, vcons_in);
+ addr = bus->console_addr + offsetof(struct rte_console, vcons_in);
val = cpu_to_le32(msglen);
- rv = dhdsdio_membytes(bus, true, addr, (u8 *)&val, sizeof(val));
+ rv = brcmf_sdbrcm_membytes(bus, true, addr, (u8 *)&val, sizeof(val));
if (rv < 0)
goto done;
/* Bump dongle by sending an empty event pkt.
* sdpcm_sendup (RX) checks for virtual console input.
*/
- pkt = bcm_pkt_buf_get_skb(4 + SDPCM_RESERVE);
+ pkt = brcmu_pkt_buf_get_skb(4 + SDPCM_RESERVE);
if ((pkt != NULL) && bus->clkstate == CLK_AVAIL)
- dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, true);
+ brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, true);
done:
- if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) && !bus->dpc_sched) {
bus->activity = false;
- dhdsdio_clkctl(bus, CLK_NONE, true);
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
}
- dhd_os_sdunlock(bus->dhd);
+ brcmf_sdbrcm_sdunlock(bus);
return rv;
}
-#endif /* DHD_DEBUG */
-
-#ifdef DHD_DEBUG
-static void dhd_dump_cis(uint fn, u8 *cis)
-{
- uint byte, tag, tdata;
- DHD_INFO(("Function %d CIS:\n", fn));
-
- for (tdata = byte = 0; byte < SBSDIO_CIS_SIZE_LIMIT; byte++) {
- if ((byte % 16) == 0)
- DHD_INFO((" "));
- DHD_INFO(("%02x ", cis[byte]));
- if ((byte % 16) == 15)
- DHD_INFO(("\n"));
- if (!tdata--) {
- tag = cis[byte];
- if (tag == 0xff)
- break;
- else if (!tag)
- tdata = 0;
- else if ((byte + 1) < SBSDIO_CIS_SIZE_LIMIT)
- tdata = cis[byte + 1] + 1;
- else
- DHD_INFO(("]"));
- }
- }
- if ((byte % 16) != 15)
- DHD_INFO(("\n"));
-}
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
-static bool dhdsdio_chipmatch(u16 chipid)
+static bool brcmf_sdbrcm_chipmatch(u16 chipid)
{
if (chipid == BCM4325_CHIP_ID)
return true;
@@ -5009,12 +5394,12 @@ static bool dhdsdio_chipmatch(u16 chipid)
return false;
}
-static void *dhdsdio_probe(u16 venid, u16 devid, u16 bus_no,
- u16 slot, u16 func, uint bustype, void *regsva,
- void *sdh)
+static void *brcmf_sdbrcm_probe(u16 venid, u16 devid, u16 bus_no,
+ u16 slot, u16 func, uint bustype, u32 regsva,
+ void *card)
{
int ret;
- dhd_bus_t *bus;
+ struct brcmf_bus *bus;
/* Init global variables at run-time, not as part of the declaration.
* This is required to support init/de-init of the driver.
@@ -5024,26 +5409,26 @@ static void *dhdsdio_probe(u16 venid, u16 devid, u16 bus_no,
* first time that the driver is initialized vs subsequent
* initializations.
*/
- dhd_txbound = DHD_TXBOUND;
- dhd_rxbound = DHD_RXBOUND;
- dhd_alignctl = true;
+ brcmf_txbound = BRCMF_TXBOUND;
+ brcmf_rxbound = BRCMF_RXBOUND;
+ brcmf_alignctl = true;
sd1idle = true;
- dhd_readahead = true;
+ brcmf_readahead = true;
retrydata = false;
- dhd_dongle_memsize = 0;
- dhd_txminmax = DHD_TXMINMAX;
+ brcmf_dongle_memsize = 0;
+ brcmf_txminmax = BRCMF_TXMINMAX;
forcealign = true;
- dhd_common_init();
+ brcmf_c_init();
- DHD_TRACE(("%s: Enter\n", __func__));
- DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __func__, venid, devid));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
+ BRCMF_INFO(("%s: venid 0x%04x devid 0x%04x\n", __func__, venid, devid));
- /* We make assumptions about address window mappings */
- ASSERT((unsigned long)regsva == SI_ENUM_BASE);
+ /* We make an assumption about address window mappings:
+ * regsva == SI_ENUM_BASE*/
- /* BCMSDH passes venid and devid based on CIS parsing -- but
+ /* SDIO car passes venid and devid based on CIS parsing -- but
* low-power start
* means early parse could fail, so here we should get either an ID
* we recognize OR (-1) indicating we must request power first.
@@ -5054,7 +5439,7 @@ static void *dhdsdio_probe(u16 venid, u16 devid, u16 bus_no,
case PCI_VENDOR_ID_BROADCOM:
break;
default:
- DHD_ERROR(("%s: unknown vendor: 0x%04x\n", __func__, venid));
+ BRCMF_ERROR(("%s: unknown vendor: 0x%04x\n", __func__, venid));
return NULL;
}
@@ -5063,104 +5448,156 @@ static void *dhdsdio_probe(u16 venid, u16 devid, u16 bus_no,
case BCM4325_D11DUAL_ID: /* 4325 802.11a/g id */
case BCM4325_D11G_ID: /* 4325 802.11g 2.4Ghz band id */
case BCM4325_D11A_ID: /* 4325 802.11a 5Ghz band id */
- DHD_INFO(("%s: found 4325 Dongle\n", __func__));
+ BRCMF_INFO(("%s: found 4325 Dongle\n", __func__));
break;
case BCM4329_D11NDUAL_ID: /* 4329 802.11n dualband device */
case BCM4329_D11N2G_ID: /* 4329 802.11n 2.4G device */
case BCM4329_D11N5G_ID: /* 4329 802.11n 5G device */
case 0x4329:
- DHD_INFO(("%s: found 4329 Dongle\n", __func__));
+ BRCMF_INFO(("%s: found 4329 Dongle\n", __func__));
break;
case BCM4319_D11N_ID: /* 4319 802.11n id */
case BCM4319_D11N2G_ID: /* 4319 802.11n2g id */
case BCM4319_D11N5G_ID: /* 4319 802.11n5g id */
- DHD_INFO(("%s: found 4319 Dongle\n", __func__));
+ BRCMF_INFO(("%s: found 4319 Dongle\n", __func__));
break;
case 0:
- DHD_INFO(("%s: allow device id 0, will check chip internals\n",
- __func__));
+ BRCMF_INFO(("%s: allow device id 0, will check chip"
+ " internals\n", __func__));
break;
default:
- DHD_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
- __func__, venid, devid));
+ BRCMF_ERROR(("%s: skipping 0x%04x/0x%04x, not a dongle\n",
+ __func__, venid, devid));
return NULL;
}
/* Allocate private bus interface state */
- bus = kzalloc(sizeof(dhd_bus_t), GFP_ATOMIC);
+ bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
if (!bus) {
- DHD_ERROR(("%s: kmalloc of dhd_bus_t failed\n", __func__));
+ BRCMF_ERROR(("%s: kmalloc of struct dhd_bus failed\n",
+ __func__));
goto fail;
}
- bus->sdh = sdh;
+ bus->card = card;
bus->cl_devid = (u16) devid;
- bus->bus = DHD_BUS;
+ bus->bus = BRCMF_BUS;
bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
bus->usebufpool = false; /* Use bufpool if allocated,
else use locally malloced rxbuf */
/* attempt to attach to the dongle */
- if (!(dhdsdio_probe_attach(bus, sdh, regsva, devid))) {
- DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __func__));
+ if (!(brcmf_sdbrcm_probe_attach(bus, card, regsva, devid))) {
+ BRCMF_ERROR(("%s: brcmf_sdbrcm_probe_attach failed\n",
+ __func__));
goto fail;
}
- /* Attach to the dhd/OS/network interface */
- bus->dhd = dhd_attach(bus, SDPCM_RESERVE);
- if (!bus->dhd) {
- DHD_ERROR(("%s: dhd_attach failed\n", __func__));
+ spin_lock_init(&bus->txqlock);
+ init_waitqueue_head(&bus->ctrl_wait);
+
+ /* Set up the watchdog timer */
+ init_timer(&bus->timer);
+ bus->timer.data = (unsigned long)bus;
+ bus->timer.function = brcmf_sdbrcm_watchdog;
+
+ /* Initialize thread based operation and lock */
+ if ((brcmf_watchdog_prio >= 0) && (brcmf_dpc_prio >= 0)) {
+ bus->threads_only = true;
+ sema_init(&bus->sdsem, 1);
+ } else {
+ bus->threads_only = false;
+ spin_lock_init(&bus->sdlock);
+ }
+
+ if (brcmf_dpc_prio >= 0) {
+ /* Initialize watchdog thread */
+ init_completion(&bus->watchdog_wait);
+ bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
+ bus, "brcmf_watchdog");
+ if (IS_ERR(bus->watchdog_tsk)) {
+ printk(KERN_WARNING
+ "brcmf_watchdog thread failed to start\n");
+ bus->watchdog_tsk = NULL;
+ }
+ } else
+ bus->watchdog_tsk = NULL;
+
+ /* Set up the bottom half handler */
+ if (brcmf_dpc_prio >= 0) {
+ /* Initialize DPC thread */
+ init_completion(&bus->dpc_wait);
+ bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
+ bus, "brcmf_dpc");
+ if (IS_ERR(bus->dpc_tsk)) {
+ printk(KERN_WARNING
+ "brcmf_dpc thread failed to start\n");
+ bus->dpc_tsk = NULL;
+ }
+ } else {
+ tasklet_init(&bus->tasklet, brcmf_sdbrcm_dpc_tasklet,
+ (unsigned long)bus);
+ bus->dpc_tsk = NULL;
+ }
+
+ /* Attach to the brcmf/OS/network interface */
+ bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
+ if (!bus->drvr) {
+ BRCMF_ERROR(("%s: brcmf_attach failed\n", __func__));
goto fail;
}
/* Allocate buffers */
- if (!(dhdsdio_probe_malloc(bus, sdh))) {
- DHD_ERROR(("%s: dhdsdio_probe_malloc failed\n", __func__));
+ if (!(brcmf_sdbrcm_probe_malloc(bus, card))) {
+ BRCMF_ERROR(("%s: brcmf_sdbrcm_probe_malloc failed\n",
+ __func__));
goto fail;
}
- if (!(dhdsdio_probe_init(bus, sdh))) {
- DHD_ERROR(("%s: dhdsdio_probe_init failed\n", __func__));
+ if (!(brcmf_sdbrcm_probe_init(bus, card))) {
+ BRCMF_ERROR(("%s: brcmf_sdbrcm_probe_init failed\n", __func__));
goto fail;
}
/* Register interrupt callback, but mask it (not operational yet). */
- DHD_INTR(("%s: disable SDIO interrupts (not interested yet)\n",
- __func__));
- bcmsdh_intr_disable(sdh);
- ret = bcmsdh_intr_reg(sdh, dhdsdio_isr, bus);
+ BRCMF_INTR(("%s: disable SDIO interrupts (not interested yet)\n",
+ __func__));
+ brcmf_sdcard_intr_disable(card);
+ ret = brcmf_sdcard_intr_reg(card, brcmf_sdbrcm_isr, bus);
if (ret != 0) {
- DHD_ERROR(("%s: FAILED: bcmsdh_intr_reg returned %d\n",
- __func__, ret));
+ BRCMF_ERROR(("%s: FAILED: sdcard_intr_reg returned %d\n",
+ __func__, ret));
goto fail;
}
- DHD_INTR(("%s: registered SDIO interrupt function ok\n", __func__));
+ BRCMF_INTR(("%s: registered SDIO interrupt function ok\n", __func__));
- DHD_INFO(("%s: completed!!\n", __func__));
+ BRCMF_INFO(("%s: completed!!\n", __func__));
/* if firmware path present try to download and bring up bus */
- ret = dhd_bus_start(bus->dhd);
+ ret = brcmf_bus_start(bus->drvr);
if (ret != 0) {
if (ret == -ENOLINK) {
- DHD_ERROR(("%s: dongle is not responding\n", __func__));
+ BRCMF_ERROR(("%s: dongle is not responding\n",
+ __func__));
goto fail;
}
}
/* Ok, have the per-port tell the stack we're open for business */
- if (dhd_net_attach(bus->dhd, 0) != 0) {
- DHD_ERROR(("%s: Net attach failed!!\n", __func__));
+ if (brcmf_net_attach(bus->drvr, 0) != 0) {
+ BRCMF_ERROR(("%s: Net attach failed!!\n", __func__));
goto fail;
}
return bus;
fail:
- dhdsdio_release(bus);
+ brcmf_sdbrcm_release(bus);
return NULL;
}
static bool
-dhdsdio_probe_attach(struct dhd_bus *bus, void *sdh, void *regsva, u16 devid)
+brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, void *card, u32 regsva,
+ u16 devid)
{
u8 clkctl = 0;
int err = 0;
@@ -5168,130 +5605,81 @@ dhdsdio_probe_attach(struct dhd_bus *bus, void *sdh, void *regsva, u16 devid)
bus->alp_only = true;
/* Return the window to backplane enumeration space for core access */
- if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE))
- DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __func__));
+ if (brcmf_sdbrcm_set_siaddr_window(bus, SI_ENUM_BASE))
+ BRCMF_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n",
+ __func__));
-#ifdef DHD_DEBUG
+#ifdef BCMDBG
printk(KERN_DEBUG "F1 signature read @0x18000000=0x%4x\n",
- bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4));
+ brcmf_sdcard_reg_read(bus->card, SI_ENUM_BASE, 4));
-#endif /* DHD_DEBUG */
+#endif /* BCMDBG */
/*
- * Force PLL off until dhdsdio_chip_attach()
+ * Force PLL off until brcmf_sdbrcm_chip_attach()
* programs PLL control regs
*/
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- DHD_INIT_CLKCTL1, &err);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ BRCMF_INIT_CLKCTL1, &err);
if (!err)
clkctl =
- bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- &err);
+ brcmf_sdcard_cfg_read(card, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (err || ((clkctl & ~SBSDIO_AVBITS) != DHD_INIT_CLKCTL1)) {
- DHD_ERROR(("dhdsdio_probe: ChipClkCSR access: err %d wrote "
- "0x%02x read 0x%02x\n",
- err, DHD_INIT_CLKCTL1, clkctl));
+ if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
+ BRCMF_ERROR(("brcmf_sdbrcm_probe: ChipClkCSR access: err %d"
+ " wrote 0x%02x read 0x%02x\n",
+ err, BRCMF_INIT_CLKCTL1, clkctl));
goto fail;
}
-#ifdef DHD_DEBUG
- if (DHD_INFO_ON()) {
- uint fn, numfn;
- u8 *cis[SDIOD_MAX_IOFUNCS];
- int err = 0;
-
- numfn = bcmsdh_query_iofnum(sdh);
- ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
- /* Make sure ALP is available before trying to read CIS */
- SPINWAIT(((clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- NULL)),
- !SBSDIO_ALPAV(clkctl)), PMU_MAX_TRANSITION_DLY);
-
- /* Now request ALP be put on the bus */
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- DHD_INIT_CLKCTL2, &err);
- udelay(65);
-
- for (fn = 0; fn <= numfn; fn++) {
- cis[fn] = kzalloc(SBSDIO_CIS_SIZE_LIMIT, GFP_ATOMIC);
- if (!cis[fn]) {
- DHD_INFO(("dhdsdio_probe: fn %d cis malloc "
- "failed\n", fn));
- break;
- }
-
- err = bcmsdh_cis_read(sdh, fn, cis[fn],
- SBSDIO_CIS_SIZE_LIMIT);
- if (err) {
- DHD_INFO(("dhdsdio_probe: fn %d cis read "
- "err %d\n", fn, err));
- kfree(cis[fn]);
- break;
- }
- dhd_dump_cis(fn, cis[fn]);
- }
-
- while (fn-- > 0) {
- ASSERT(cis[fn]);
- kfree(cis[fn]);
- }
-
- if (err) {
- DHD_ERROR(("dhdsdio_probe: error read/parsing CIS\n"));
- goto fail;
- }
- }
-#endif /* DHD_DEBUG */
-
- if (dhdsdio_chip_attach(bus, regsva)) {
- DHD_ERROR(("%s: dhdsdio_chip_attach failed!\n", __func__));
+ if (brcmf_sdbrcm_chip_attach(bus, regsva)) {
+ BRCMF_ERROR(("%s: brcmf_sdbrcm_chip_attach failed!\n",
+ __func__));
goto fail;
}
- bcmsdh_chipinfo(sdh, bus->ci->chip, bus->ci->chiprev);
-
- if (!dhdsdio_chipmatch((u16) bus->ci->chip)) {
- DHD_ERROR(("%s: unsupported chip: 0x%04x\n",
- __func__, bus->ci->chip));
+ if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
+ BRCMF_ERROR(("%s: unsupported chip: 0x%04x\n",
+ __func__, bus->ci->chip));
goto fail;
}
- dhdsdio_sdiod_drive_strength_init(bus, dhd_sdiod_drive_strength);
+ brcmf_sdbrcm_sdiod_drive_strength_init(bus, brcmf_sdiod_drive_strength);
/* Get info on the ARM and SOCRAM cores... */
- if (!DHD_NOPMU(bus)) {
- bus->armrev = SBCOREREV(bcmsdh_reg_read(bus->sdh,
- CORE_SB(bus->ci->armcorebase, sbidhigh), 4));
+ if (!BRCMF_NOPMU(bus)) {
+ brcmf_sdcard_reg_read(bus->card,
+ CORE_SB(bus->ci->armcorebase, sbidhigh), 4);
bus->orig_ramsize = bus->ci->ramsize;
if (!(bus->orig_ramsize)) {
- DHD_ERROR(("%s: failed to find SOCRAM memory!\n",
- __func__));
+ BRCMF_ERROR(("%s: failed to find SOCRAM memory!\n",
+ __func__));
goto fail;
}
bus->ramsize = bus->orig_ramsize;
- if (dhd_dongle_memsize)
- dhd_dongle_setmemsize(bus, dhd_dongle_memsize);
+ if (brcmf_dongle_memsize)
+ brcmf_sdbrcm_setmemsize(bus, brcmf_dongle_memsize);
- DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d)\n",
- bus->ramsize, bus->orig_ramsize));
+ BRCMF_ERROR(("DHD: dongle ram size is set to %d(orig %d)\n",
+ bus->ramsize, bus->orig_ramsize));
}
- bus->regs = (void *)bus->ci->buscorebase;
-
/* Set core control so an SDIO reset does a backplane reset */
- OR_REG(&bus->regs->corecontrol, CC_BPRESEN);
+ OR_REG(bus->ci->buscorebase + offsetof(struct sdpcmd_regs,
+ corecontrol),
+ CC_BPRESEN, u32);
- bcm_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
+ brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
/* Locate an appropriately-aligned portion of hdrbuf */
- bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0], DHD_SDALIGN);
+ bus->rxhdr = (u8 *) roundup((unsigned long)&bus->hdrbuf[0],
+ BRCMF_SDALIGN);
/* Set the poll and/or interrupt flags */
- bus->intr = (bool) dhd_intr;
- bus->poll = (bool) dhd_poll;
+ bus->intr = (bool) brcmf_intr;
+ bus->poll = (bool) brcmf_poll;
if (bus->poll)
bus->pollrate = 1;
@@ -5301,18 +5689,18 @@ fail:
return false;
}
-static bool dhdsdio_probe_malloc(dhd_bus_t *bus, void *sdh)
+static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus, void *card)
{
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- if (bus->dhd->maxctl) {
+ if (bus->drvr->maxctl) {
bus->rxblen =
- roundup((bus->dhd->maxctl + SDPCM_HDRLEN),
- ALIGNMENT) + DHD_SDALIGN;
+ roundup((bus->drvr->maxctl + SDPCM_HDRLEN),
+ ALIGNMENT) + BRCMF_SDALIGN;
bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
if (!(bus->rxbuf)) {
- DHD_ERROR(("%s: kmalloc of %d-byte rxbuf failed\n",
- __func__, bus->rxblen));
+ BRCMF_ERROR(("%s: kmalloc of %d-byte rxbuf failed\n",
+ __func__, bus->rxblen));
goto fail;
}
}
@@ -5320,8 +5708,8 @@ static bool dhdsdio_probe_malloc(dhd_bus_t *bus, void *sdh)
/* Allocate buffer to receive glomed packet */
bus->databuf = kmalloc(MAX_DATA_BUF, GFP_ATOMIC);
if (!(bus->databuf)) {
- DHD_ERROR(("%s: kmalloc of %d-byte databuf failed\n",
- __func__, MAX_DATA_BUF));
+ BRCMF_ERROR(("%s: kmalloc of %d-byte databuf failed\n",
+ __func__, MAX_DATA_BUF));
/* release rxbuf which was already located as above */
if (!bus->rxblen)
kfree(bus->rxbuf);
@@ -5329,10 +5717,9 @@ static bool dhdsdio_probe_malloc(dhd_bus_t *bus, void *sdh)
}
/* Align the buffer */
- if ((unsigned long)bus->databuf % DHD_SDALIGN)
- bus->dataptr =
- bus->databuf + (DHD_SDALIGN -
- ((unsigned long)bus->databuf % DHD_SDALIGN));
+ if ((unsigned long)bus->databuf % BRCMF_SDALIGN)
+ bus->dataptr = bus->databuf + (BRCMF_SDALIGN -
+ ((unsigned long)bus->databuf % BRCMF_SDALIGN));
else
bus->dataptr = bus->databuf;
@@ -5342,181 +5729,145 @@ fail:
return false;
}
-static bool dhdsdio_probe_init(dhd_bus_t *bus, void *sdh)
+static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus, void *card)
{
s32 fnum;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
#ifdef SDTEST
- dhdsdio_pktgen_init(bus);
+ brcmf_sdbrcm_pktgen_init(bus);
#endif /* SDTEST */
/* Disable F2 to clear any intermediate frame state on the dongle */
- bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1,
- NULL);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
- bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->drvr->busstate = BRCMF_BUS_DOWN;
bus->sleeping = false;
bus->rxflow = false;
- bus->prev_rxlim_hit = 0;
/* Done with backplane-dependent accesses, can drop clock... */
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+ brcmf_sdcard_cfg_write(card, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0,
+ NULL);
/* ...and initialize clock/power states */
bus->clkstate = CLK_SDONLY;
- bus->idletime = (s32) dhd_idletime;
- bus->idleclock = DHD_IDLE_ACTIVE;
-
- /* Query the SD clock speed */
- if (bcmsdh_iovar_op(sdh, "sd_divisor", NULL, 0,
- &bus->sd_divisor, sizeof(s32),
- false) != 0) {
- DHD_ERROR(("%s: fail on %s get\n", __func__, "sd_divisor"));
- bus->sd_divisor = -1;
- } else {
- DHD_INFO(("%s: Initial value for %s is %d\n",
- __func__, "sd_divisor", bus->sd_divisor));
- }
-
- /* Query the SD bus mode */
- if (bcmsdh_iovar_op(sdh, "sd_mode", NULL, 0,
- &bus->sd_mode, sizeof(s32), false) != 0) {
- DHD_ERROR(("%s: fail on %s get\n", __func__, "sd_mode"));
- bus->sd_mode = -1;
- } else {
- DHD_INFO(("%s: Initial value for %s is %d\n",
- __func__, "sd_mode", bus->sd_mode));
- }
+ bus->idletime = (s32) brcmf_idletime;
+ bus->idleclock = BRCMF_IDLE_ACTIVE;
/* Query the F2 block size, set roundup accordingly */
fnum = 2;
- if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(s32),
+ if (brcmf_sdcard_iovar_op(card, "sd_blocksize", &fnum, sizeof(s32),
&bus->blocksize, sizeof(s32), false) != 0) {
bus->blocksize = 0;
- DHD_ERROR(("%s: fail on %s get\n", __func__, "sd_blocksize"));
+ BRCMF_ERROR(("%s: fail on %s get\n", __func__, "sd_blocksize"));
} else {
- DHD_INFO(("%s: Initial value for %s is %d\n",
- __func__, "sd_blocksize", bus->blocksize));
+ BRCMF_INFO(("%s: Initial value for %s is %d\n",
+ __func__, "sd_blocksize", bus->blocksize));
}
bus->roundup = min(max_roundup, bus->blocksize);
/* Query if bus module supports packet chaining,
default to use if supported */
- if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
+ if (brcmf_sdcard_iovar_op(card, "sd_rxchain", NULL, 0,
&bus->sd_rxchain, sizeof(s32),
false) != 0) {
bus->sd_rxchain = false;
} else {
- DHD_INFO(("%s: bus module (through bcmsdh API) %s chaining\n",
- __func__,
- (bus->sd_rxchain ? "supports" : "does not support")));
+ BRCMF_INFO(("%s: bus module (through sdiocard API) %s"
+ " chaining\n", __func__, bus->sd_rxchain
+ ? "supports" : "does not support"));
}
bus->use_rxchain = (bool) bus->sd_rxchain;
return true;
}
-bool
-dhd_bus_download_firmware(struct dhd_bus *bus, char *fw_path, char *nv_path)
-{
- bool ret;
- bus->fw_path = fw_path;
- bus->nv_path = nv_path;
-
- ret = dhdsdio_download_firmware(bus, bus->sdh);
-
- return ret;
-}
-
static bool
-dhdsdio_download_firmware(struct dhd_bus *bus, void *sdh)
+brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus, void *card)
{
bool ret;
/* Download the firmware */
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
- ret = _dhdsdio_download_firmware(bus) == 0;
+ ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
- dhdsdio_clkctl(bus, CLK_SDONLY, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
return ret;
}
/* Detach and free everything */
-static void dhdsdio_release(dhd_bus_t *bus)
+static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
{
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
if (bus) {
/* De-register interrupt handler */
- bcmsdh_intr_disable(bus->sdh);
- bcmsdh_intr_dereg(bus->sdh);
+ brcmf_sdcard_intr_disable(bus->card);
+ brcmf_sdcard_intr_dereg(bus->card);
- if (bus->dhd) {
- dhd_detach(bus->dhd);
- dhdsdio_release_dongle(bus);
- bus->dhd = NULL;
+ if (bus->drvr) {
+ brcmf_detach(bus->drvr);
+ brcmf_sdbrcm_release_dongle(bus);
+ bus->drvr = NULL;
}
- dhdsdio_release_malloc(bus);
+ brcmf_sdbrcm_release_malloc(bus);
kfree(bus);
}
- DHD_TRACE(("%s: Disconnected\n", __func__));
+ BRCMF_TRACE(("%s: Disconnected\n", __func__));
}
-static void dhdsdio_release_malloc(dhd_bus_t *bus)
+static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
{
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- if (bus->dhd && bus->dhd->dongle_reset)
+ if (bus->drvr && bus->drvr->dongle_reset)
return;
- if (bus->rxbuf) {
- kfree(bus->rxbuf);
- bus->rxctl = bus->rxbuf = NULL;
- bus->rxlen = 0;
- }
+ kfree(bus->rxbuf);
+ bus->rxctl = bus->rxbuf = NULL;
+ bus->rxlen = 0;
kfree(bus->databuf);
bus->databuf = NULL;
}
-static void dhdsdio_release_dongle(dhd_bus_t *bus)
+static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
{
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- if (bus->dhd && bus->dhd->dongle_reset)
+ if (bus->drvr && bus->drvr->dongle_reset)
return;
if (bus->ci) {
- dhdsdio_clkctl(bus, CLK_AVAIL, false);
- dhdsdio_clkctl(bus, CLK_NONE, false);
- dhdsdio_chip_detach(bus);
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+ brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
+ brcmf_sdbrcm_chip_detach(bus);
if (bus->vars && bus->varsz)
kfree(bus->vars);
bus->vars = NULL;
}
- DHD_TRACE(("%s: Disconnected\n", __func__));
+ BRCMF_TRACE(("%s: Disconnected\n", __func__));
}
-static void dhdsdio_disconnect(void *ptr)
+static void brcmf_sdbrcm_disconnect(void *ptr)
{
- dhd_bus_t *bus = (dhd_bus_t *)ptr;
+ struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
if (bus) {
- ASSERT(bus->dhd);
- dhdsdio_release(bus);
+ brcmf_sdbrcm_release(bus);
}
- DHD_TRACE(("%s: Disconnected\n", __func__));
+ BRCMF_TRACE(("%s: Disconnected\n", __func__));
}
/* Register/Unregister functions are called by the main DHD entry
@@ -5524,147 +5875,78 @@ static void dhdsdio_disconnect(void *ptr)
* order to look for or await the device.
*/
-static bcmsdh_driver_t dhd_sdio = {
- dhdsdio_probe,
- dhdsdio_disconnect
+static struct brcmf_sdioh_driver brcmf_sdio = {
+ brcmf_sdbrcm_probe,
+ brcmf_sdbrcm_disconnect
};
-int dhd_bus_register(void)
+int brcmf_bus_register(void)
{
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- return bcmsdh_register(&dhd_sdio);
-}
+ /* Sanity check on the module parameters */
+ do {
+ /* Both watchdog and DPC as tasklets are ok */
+ if ((brcmf_watchdog_prio < 0) && (brcmf_dpc_prio < 0))
+ break;
-void dhd_bus_unregister(void)
-{
- DHD_TRACE(("%s: Enter\n", __func__));
+ /* If both watchdog and DPC are threads, TX must be deferred */
+ if ((brcmf_watchdog_prio >= 0) && (brcmf_dpc_prio >= 0)
+ && brcmf_deferred_tx)
+ break;
+
+ BRCMF_ERROR(("Invalid module parameters.\n"));
+ return -EINVAL;
+ } while (0);
- bcmsdh_unregister();
+ return brcmf_sdio_register(&brcmf_sdio);
}
-#ifdef BCMEMBEDIMAGE
-static int dhdsdio_download_code_array(struct dhd_bus *bus)
+void brcmf_bus_unregister(void)
{
- int bcmerror = -1;
- int offset = 0;
-
- DHD_INFO(("%s: download embedded firmware...\n", __func__));
-
- /* Download image */
- while ((offset + MEMBLOCK) < sizeof(dlarray)) {
- bcmerror =
- dhdsdio_membytes(bus, true, offset, dlarray + offset,
- MEMBLOCK);
- if (bcmerror) {
- DHD_ERROR(("%s: error %d on writing %d membytes at "
- "0x%08x\n",
- __func__, bcmerror, MEMBLOCK, offset));
- goto err;
- }
-
- offset += MEMBLOCK;
- }
+ BRCMF_TRACE(("%s: Enter\n", __func__));
- if (offset < sizeof(dlarray)) {
- bcmerror = dhdsdio_membytes(bus, true, offset,
- dlarray + offset,
- sizeof(dlarray) - offset);
- if (bcmerror) {
- DHD_ERROR(("%s: error %d on writing %d membytes at "
- "0x%08x\n", __func__, bcmerror,
- sizeof(dlarray) - offset, offset));
- goto err;
- }
- }
-#ifdef DHD_DEBUG
- /* Upload and compare the downloaded code */
- {
- unsigned char *ularray;
-
- ularray = kmalloc(bus->ramsize, GFP_ATOMIC);
- if (!ularray) {
- bcmerror = -ENOMEM;
- goto err;
- }
- /* Upload image to verify downloaded contents. */
- offset = 0;
- memset(ularray, 0xaa, bus->ramsize);
- while ((offset + MEMBLOCK) < sizeof(dlarray)) {
- bcmerror =
- dhdsdio_membytes(bus, false, offset,
- ularray + offset, MEMBLOCK);
- if (bcmerror) {
- DHD_ERROR(("%s: error %d on reading %d membytes"
- " at 0x%08x\n",
- __func__, bcmerror, MEMBLOCK, offset));
- goto free;
- }
-
- offset += MEMBLOCK;
- }
-
- if (offset < sizeof(dlarray)) {
- bcmerror = dhdsdio_membytes(bus, false, offset,
- ularray + offset,
- sizeof(dlarray) - offset);
- if (bcmerror) {
- DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
- __func__, bcmerror,
- sizeof(dlarray) - offset, offset));
- goto free;
- }
- }
-
- if (memcmp(dlarray, ularray, sizeof(dlarray))) {
- DHD_ERROR(("%s: Downloaded image is corrupted.\n",
- __func__));
- ASSERT(0);
- goto free;
- } else
- DHD_ERROR(("%s: Download/Upload/Compare succeeded.\n",
- __func__));
-free:
- kfree(ularray);
- }
-#endif /* DHD_DEBUG */
-
-err:
- return bcmerror;
+ brcmf_sdio_unregister();
}
-#endif /* BCMEMBEDIMAGE */
-static int dhdsdio_download_code_file(struct dhd_bus *bus, char *fw_path)
+static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
{
- int bcmerror = -1;
int offset = 0;
uint len;
- void *image = NULL;
u8 *memblock = NULL, *memptr;
+ int ret;
- DHD_INFO(("%s: download firmware %s\n", __func__, fw_path));
+ BRCMF_INFO(("%s: Enter\n", __func__));
- image = dhd_os_open_image(fw_path);
- if (image == NULL)
- goto err;
+ bus->fw_name = BCM4329_FW_NAME;
+ ret = request_firmware(&bus->firmware, bus->fw_name,
+ &gInstance->func[2]->dev);
+ if (ret) {
+ BRCMF_ERROR(("%s: Fail to request firmware %d\n",
+ __func__, ret));
+ return ret;
+ }
+ bus->fw_ptr = 0;
- memptr = memblock = kmalloc(MEMBLOCK + DHD_SDALIGN, GFP_ATOMIC);
+ memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
if (memblock == NULL) {
- DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
- __func__, MEMBLOCK));
+ BRCMF_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __func__, MEMBLOCK));
+ ret = -ENOMEM;
goto err;
}
- if ((u32)(unsigned long)memblock % DHD_SDALIGN)
- memptr +=
- (DHD_SDALIGN - ((u32)(unsigned long)memblock % DHD_SDALIGN));
+ if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
+ memptr += (BRCMF_SDALIGN -
+ ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
/* Download image */
while ((len =
- dhd_os_get_image_block((char *)memptr, MEMBLOCK, image))) {
- bcmerror = dhdsdio_membytes(bus, true, offset, memptr, len);
- if (bcmerror) {
- DHD_ERROR(("%s: error %d on writing %d membytes at "
- "0x%08x\n", __func__, bcmerror, MEMBLOCK, offset));
+ brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus))) {
+ ret = brcmf_sdbrcm_membytes(bus, true, offset, memptr, len);
+ if (ret) {
+ BRCMF_ERROR(("%s: error %d on writing %d membytes at "
+ "0x%08x\n", __func__, ret, MEMBLOCK,
+ offset));
goto err;
}
@@ -5674,10 +5956,10 @@ static int dhdsdio_download_code_file(struct dhd_bus *bus, char *fw_path)
err:
kfree(memblock);
- if (image)
- dhd_os_close_image(image);
+ release_firmware(bus->firmware);
+ bus->fw_ptr = 0;
- return bcmerror;
+ return ret;
}
/*
@@ -5689,7 +5971,7 @@ err:
* by two NULs.
*/
-static uint process_nvram_vars(char *varbuf, uint len)
+static uint brcmf_process_nvram_vars(char *varbuf, uint len)
{
char *dp;
bool findNewline;
@@ -5731,162 +6013,86 @@ static uint process_nvram_vars(char *varbuf, uint len)
return buf_len;
}
-/*
- EXAMPLE: nvram_array
- nvram_arry format:
- name=value
- Use carriage return at the end of each assignment,
- and an empty string with
- carriage return at the end of array.
-
- For example:
- unsigned char nvram_array[] = {"name1=value1\n",
- "name2=value2\n", "\n"};
- Hex values start with 0x, and mac addr format: xx:xx:xx:xx:xx:xx.
-
- Search "EXAMPLE: nvram_array" to see how the array is activated.
-*/
-
-void dhd_bus_set_nvram_params(struct dhd_bus *bus, const char *nvram_params)
-{
- bus->nvram_params = nvram_params;
-}
-
-static int dhdsdio_download_nvram(struct dhd_bus *bus)
+static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
{
- int bcmerror = -1;
uint len;
- void *image = NULL;
char *memblock = NULL;
char *bufp;
- char *nv_path;
- bool nvram_file_exists;
-
- nv_path = bus->nv_path;
-
- nvram_file_exists = ((nv_path != NULL) && (nv_path[0] != '\0'));
- if (!nvram_file_exists && (bus->nvram_params == NULL))
- return 0;
+ int ret;
- if (nvram_file_exists) {
- image = dhd_os_open_image(nv_path);
- if (image == NULL)
- goto err;
+ bus->nv_name = BCM4329_NV_NAME;
+ ret = request_firmware(&bus->firmware, bus->nv_name,
+ &gInstance->func[2]->dev);
+ if (ret) {
+ BRCMF_ERROR(("%s: Fail to request nvram %d\n", __func__, ret));
+ return ret;
}
+ bus->fw_ptr = 0;
memblock = kmalloc(MEMBLOCK, GFP_ATOMIC);
if (memblock == NULL) {
- DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
- __func__, MEMBLOCK));
+ BRCMF_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __func__, MEMBLOCK));
+ ret = -ENOMEM;
goto err;
}
- /* Download variables */
- if (nvram_file_exists) {
- len = dhd_os_get_image_block(memblock, MEMBLOCK, image);
- } else {
- len = strlen(bus->nvram_params);
- ASSERT(len <= MEMBLOCK);
- if (len > MEMBLOCK)
- len = MEMBLOCK;
- memcpy(memblock, bus->nvram_params, len);
- }
+ len = brcmf_sdbrcm_get_image(memblock, MEMBLOCK, bus);
if (len > 0 && len < MEMBLOCK) {
bufp = (char *)memblock;
bufp[len] = 0;
- len = process_nvram_vars(bufp, len);
+ len = brcmf_process_nvram_vars(bufp, len);
bufp += len;
*bufp++ = 0;
if (len)
- bcmerror = dhdsdio_downloadvars(bus, memblock, len + 1);
- if (bcmerror) {
- DHD_ERROR(("%s: error downloading vars: %d\n",
- __func__, bcmerror));
- }
+ ret = brcmf_sdbrcm_downloadvars(bus, memblock, len + 1);
+ if (ret)
+ BRCMF_ERROR(("%s: error downloading vars: %d\n",
+ __func__, ret));
} else {
- DHD_ERROR(("%s: error reading nvram file: %d\n",
- __func__, len));
- bcmerror = -EIO;
+ BRCMF_ERROR(("%s: error reading nvram file: %d\n",
+ __func__, len));
+ ret = -EIO;
}
err:
kfree(memblock);
- if (image)
- dhd_os_close_image(image);
+ release_firmware(bus->firmware);
+ bus->fw_ptr = 0;
- return bcmerror;
+ return ret;
}
-static int _dhdsdio_download_firmware(struct dhd_bus *bus)
+static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
{
int bcmerror = -1;
- bool embed = false; /* download embedded firmware */
- bool dlok = false; /* download firmware succeeded */
-
- /* Out immediately if no image to download */
- if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
-#ifdef BCMEMBEDIMAGE
- embed = true;
-#else
- return bcmerror;
-#endif
- }
-
/* Keep arm in reset */
- if (dhdsdio_download_state(bus, true)) {
- DHD_ERROR(("%s: error placing ARM core in reset\n", __func__));
+ if (brcmf_sdbrcm_download_state(bus, true)) {
+ BRCMF_ERROR(("%s: error placing ARM core in reset\n",
+ __func__));
goto err;
}
/* External image takes precedence if specified */
- if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
- if (dhdsdio_download_code_file(bus, bus->fw_path)) {
- DHD_ERROR(("%s: dongle image file download failed\n",
- __func__));
-#ifdef BCMEMBEDIMAGE
- embed = true;
-#else
- goto err;
-#endif
- } else {
- embed = false;
- dlok = true;
- }
- }
-#ifdef BCMEMBEDIMAGE
- if (embed) {
- if (dhdsdio_download_code_array(bus)) {
- DHD_ERROR(("%s: dongle image array download failed\n",
- __func__));
- goto err;
- } else {
- dlok = true;
- }
- }
-#endif
- if (!dlok) {
- DHD_ERROR(("%s: dongle image download failed\n", __func__));
+ if (brcmf_sdbrcm_download_code_file(bus)) {
+ BRCMF_ERROR(("%s: dongle image file download failed\n",
+ __func__));
goto err;
}
- /* EXAMPLE: nvram_array */
- /* If a valid nvram_arry is specified as above, it can be passed
- down to dongle */
- /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
-
/* External nvram takes precedence if specified */
- if (dhdsdio_download_nvram(bus)) {
- DHD_ERROR(("%s: dongle nvram file download failed\n",
- __func__));
+ if (brcmf_sdbrcm_download_nvram(bus)) {
+ BRCMF_ERROR(("%s: dongle nvram file download failed\n",
+ __func__));
}
/* Take arm out of reset */
- if (dhdsdio_download_state(bus, false)) {
- DHD_ERROR(("%s: error getting out of ARM core reset\n",
- __func__));
+ if (brcmf_sdbrcm_download_state(bus, false)) {
+ BRCMF_ERROR(("%s: error getting out of ARM core reset\n",
+ __func__));
goto err;
}
@@ -5898,110 +6104,83 @@ err:
static int
-dhd_bcmsdh_send_buf(dhd_bus_t *bus, u32 addr, uint fn, uint flags,
+brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
u8 *buf, uint nbytes, struct sk_buff *pkt,
- bcmsdh_cmplt_fn_t complete, void *handle)
+ void (*complete)(void *handle, int status,
+ bool sync_waiting),
+ void *handle)
{
- return bcmsdh_send_buf
- (bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete,
+ return brcmf_sdcard_send_buf
+ (bus->card, addr, fn, flags, buf, nbytes, pkt, complete,
handle);
}
-uint dhd_bus_chip(struct dhd_bus *bus)
-{
- ASSERT(bus->ci != NULL);
- return bus->ci->chip;
-}
-
-void *dhd_bus_pub(struct dhd_bus *bus)
-{
- return bus->dhd;
-}
-
-void *dhd_bus_txq(struct dhd_bus *bus)
-{
- return &bus->txq;
-}
-
-uint dhd_bus_hdrlen(struct dhd_bus *bus)
-{
- return SDPCM_HDRLEN;
-}
-
-int dhd_bus_devreset(dhd_pub_t *dhdp, u8 flag)
+int brcmf_bus_devreset(struct brcmf_pub *drvr, u8 flag)
{
int bcmerror = 0;
- dhd_bus_t *bus;
+ struct brcmf_bus *bus;
- bus = dhdp->bus;
+ bus = drvr->bus;
if (flag == true) {
- if (!bus->dhd->dongle_reset) {
+ brcmf_sdbrcm_wd_timer(bus, 0);
+ if (!bus->drvr->dongle_reset) {
/* Expect app to have torn down any
connection before calling */
/* Stop the bus, disable F2 */
- dhd_bus_stop(bus, false);
+ brcmf_sdbrcm_bus_stop(bus, false);
/* Clean tx/rx buffer pointers,
detach from the dongle */
- dhdsdio_release_dongle(bus);
+ brcmf_sdbrcm_release_dongle(bus);
- bus->dhd->dongle_reset = true;
- bus->dhd->up = false;
+ bus->drvr->dongle_reset = true;
+ bus->drvr->up = false;
- DHD_TRACE(("%s: WLAN OFF DONE\n", __func__));
+ BRCMF_TRACE(("%s: WLAN OFF DONE\n", __func__));
/* App can now remove power from device */
} else
bcmerror = -EIO;
} else {
/* App must have restored power to device before calling */
- DHD_TRACE(("\n\n%s: == WLAN ON ==\n", __func__));
+ BRCMF_TRACE(("\n\n%s: == WLAN ON ==\n", __func__));
- if (bus->dhd->dongle_reset) {
+ if (bus->drvr->dongle_reset) {
/* Turn on WLAN */
- /* Reset SD client */
- bcmsdh_reset(bus->sdh);
/* Attempt to re-attach & download */
- if (dhdsdio_probe_attach(bus, bus->sdh,
- (u32 *) SI_ENUM_BASE,
- bus->cl_devid)) {
+ if (brcmf_sdbrcm_probe_attach(bus, bus->card,
+ SI_ENUM_BASE,
+ bus->cl_devid)) {
/* Attempt to download binary to the dongle */
- if (dhdsdio_probe_init
- (bus, bus->sdh)
- && dhdsdio_download_firmware(bus,
- bus->sdh)) {
-
+ if (brcmf_sdbrcm_probe_init(bus, bus->card)) {
/* Re-init bus, enable F2 transfer */
- dhd_bus_init((dhd_pub_t *) bus->dhd,
- false);
-
-#if defined(OOB_INTR_ONLY)
- dhd_enable_oob_intr(bus, true);
-#endif /* defined(OOB_INTR_ONLY) */
+ brcmf_sdbrcm_bus_init(bus->drvr, false);
- bus->dhd->dongle_reset = false;
- bus->dhd->up = true;
+ bus->drvr->dongle_reset = false;
+ bus->drvr->up = true;
- DHD_TRACE(("%s: WLAN ON DONE\n",
- __func__));
+ BRCMF_TRACE(("%s: WLAN ON DONE\n",
+ __func__));
} else
bcmerror = -EIO;
} else
bcmerror = -EIO;
} else {
bcmerror = -EISCONN;
- DHD_ERROR(("%s: Set DEVRESET=false invoked when device "
- "is on\n", __func__));
+ BRCMF_ERROR(("%s: Set DEVRESET=false invoked when"
+ " device is on\n", __func__));
bcmerror = -EIO;
}
+ brcmf_sdbrcm_wd_timer(bus, brcmf_watchdog_ms);
}
return bcmerror;
}
static int
-dhdsdio_chip_recognition(bcmsdh_info_t *sdh, struct chip_info *ci, void *regs)
+brcmf_sdbrcm_chip_recognition(struct brcmf_sdio_card *card,
+ struct chip_info *ci, u32 regs)
{
u32 regdata;
@@ -6011,13 +6190,14 @@ dhdsdio_chip_recognition(bcmsdh_info_t *sdh, struct chip_info *ci, void *regs)
* For different chiptypes or old sdio hosts w/o chipcommon,
* other ways of recognition should be added here.
*/
- ci->cccorebase = (u32)regs;
- regdata = bcmsdh_reg_read(sdh, CORE_CC_REG(ci->cccorebase, chipid), 4);
+ ci->cccorebase = regs;
+ regdata = brcmf_sdcard_reg_read(card,
+ CORE_CC_REG(ci->cccorebase, chipid), 4);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
- DHD_INFO(("%s: chipid=0x%x chiprev=%d\n",
- __func__, ci->chip, ci->chiprev));
+ BRCMF_INFO(("%s: chipid=0x%x chiprev=%d\n",
+ __func__, ci->chip, ci->chiprev));
/* Address of cores for new chips should be added here */
switch (ci->chip) {
@@ -6028,126 +6208,127 @@ dhdsdio_chip_recognition(bcmsdh_info_t *sdh, struct chip_info *ci, void *regs)
ci->ramsize = BCM4329_RAMSIZE;
break;
default:
- DHD_ERROR(("%s: chipid 0x%x is not supported\n",
- __func__, ci->chip));
+ BRCMF_ERROR(("%s: chipid 0x%x is not supported\n",
+ __func__, ci->chip));
return -ENODEV;
}
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(ci->cccorebase, sbidhigh), 4);
ci->ccrev = SBCOREREV(regdata);
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_CC_REG(ci->cccorebase, pmucapabilities), 4);
ci->pmurev = regdata & PCAP_REV_MASK;
- regdata = bcmsdh_reg_read(sdh, CORE_SB(ci->buscorebase, sbidhigh), 4);
+ regdata = brcmf_sdcard_reg_read(card,
+ CORE_SB(ci->buscorebase, sbidhigh), 4);
ci->buscorerev = SBCOREREV(regdata);
ci->buscoretype = (regdata & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT;
- DHD_INFO(("%s: ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
- __func__, ci->ccrev, ci->pmurev,
- ci->buscorerev, ci->buscoretype));
+ BRCMF_INFO(("%s: ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
+ __func__, ci->ccrev, ci->pmurev,
+ ci->buscorerev, ci->buscoretype));
/* get chipcommon capabilites */
- ci->cccaps = bcmsdh_reg_read(sdh,
+ ci->cccaps = brcmf_sdcard_reg_read(card,
CORE_CC_REG(ci->cccorebase, capabilities), 4);
return 0;
}
static void
-dhdsdio_chip_disablecore(bcmsdh_info_t *sdh, u32 corebase)
+brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_card *card, u32 corebase)
{
u32 regdata;
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbtmstatelow), 4);
if (regdata & SBTML_RESET)
return;
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbtmstatelow), 4);
if ((regdata & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) != 0) {
/*
* set target reject and spin until busy is clear
* (preserve core-specific bits)
*/
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbtmstatelow), 4);
- bcmsdh_reg_write(sdh, CORE_SB(corebase, sbtmstatelow), 4,
+ brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatelow), 4,
regdata | SBTML_REJ);
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbtmstatelow), 4);
udelay(1);
- SPINWAIT((bcmsdh_reg_read(sdh,
+ SPINWAIT((brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbtmstatehigh), 4) &
SBTMH_BUSY), 100000);
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbtmstatehigh), 4);
if (regdata & SBTMH_BUSY)
- DHD_ERROR(("%s: ARM core still busy\n", __func__));
+ BRCMF_ERROR(("%s: ARM core still busy\n", __func__));
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbidlow), 4);
if (regdata & SBIDL_INIT) {
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbimstate), 4) |
SBIM_RJ;
- bcmsdh_reg_write(sdh,
+ brcmf_sdcard_reg_write(card,
CORE_SB(corebase, sbimstate), 4,
regdata);
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbimstate), 4);
udelay(1);
- SPINWAIT((bcmsdh_reg_read(sdh,
+ SPINWAIT((brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbimstate), 4) &
SBIM_BY), 100000);
}
/* set reset and reject while enabling the clocks */
- bcmsdh_reg_write(sdh,
+ brcmf_sdcard_reg_write(card,
CORE_SB(corebase, sbtmstatelow), 4,
(((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
SBTML_REJ | SBTML_RESET));
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbtmstatelow), 4);
udelay(10);
/* clear the initiator reject bit */
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbidlow), 4);
if (regdata & SBIDL_INIT) {
- regdata = bcmsdh_reg_read(sdh,
+ regdata = brcmf_sdcard_reg_read(card,
CORE_SB(corebase, sbimstate), 4) &
~SBIM_RJ;
- bcmsdh_reg_write(sdh,
+ brcmf_sdcard_reg_write(card,
CORE_SB(corebase, sbimstate), 4,
regdata);
}
}
/* leave reset and reject asserted */
- bcmsdh_reg_write(sdh, CORE_SB(corebase, sbtmstatelow), 4,
+ brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatelow), 4,
(SBTML_REJ | SBTML_RESET));
udelay(1);
}
static int
-dhdsdio_chip_attach(struct dhd_bus *bus, void *regs)
+brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs)
{
struct chip_info *ci;
int err;
u8 clkval, clkset;
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
/* alloc chip_info_t */
ci = kmalloc(sizeof(struct chip_info), GFP_ATOMIC);
if (NULL == ci) {
- DHD_ERROR(("%s: malloc failed!\n", __func__));
+ BRCMF_ERROR(("%s: malloc failed!\n", __func__));
return -ENOMEM;
}
@@ -6156,48 +6337,48 @@ dhdsdio_chip_attach(struct dhd_bus *bus, void *regs)
/* bus/core/clk setup for register access */
/* Try forcing SDIO core to do ALPAvail request only */
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
clkset, &err);
if (err) {
- DHD_ERROR(("%s: error writing for HT off\n", __func__));
+ BRCMF_ERROR(("%s: error writing for HT off\n", __func__));
goto fail;
}
/* If register supported, wait for ALPAvail and then force ALP */
/* This may take up to 15 milliseconds */
- clkval = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ clkval = brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, NULL);
if ((clkval & ~SBSDIO_AVBITS) == clkset) {
SPINWAIT(((clkval =
- bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR,
NULL)),
!SBSDIO_ALPAV(clkval)),
PMU_MAX_TRANSITION_DLY);
if (!SBSDIO_ALPAV(clkval)) {
- DHD_ERROR(("%s: timeout on ALPAV wait, clkval 0x%02x\n",
- __func__, clkval));
+ BRCMF_ERROR(("%s: timeout on ALPAV wait,"
+ " clkval 0x%02x\n", __func__, clkval));
err = -EBUSY;
goto fail;
}
clkset = SBSDIO_FORCE_HW_CLKREQ_OFF |
SBSDIO_FORCE_ALP;
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR,
clkset, &err);
udelay(65);
} else {
- DHD_ERROR(("%s: ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
- __func__, clkset, clkval));
+ BRCMF_ERROR(("%s: ChipClkCSR access: wrote 0x%02x"
+ " read 0x%02x\n", __func__, clkset, clkval));
err = -EACCES;
goto fail;
}
/* Also, disable the extra SDIO pull-ups */
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0,
- NULL);
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP,
+ 0, NULL);
- err = dhdsdio_chip_recognition(bus->sdh, ci, regs);
+ err = brcmf_sdbrcm_chip_recognition(bus->card, ci, regs);
if (err)
goto fail;
@@ -6205,24 +6386,24 @@ dhdsdio_chip_attach(struct dhd_bus *bus, void *regs)
* Make sure any on-chip ARM is off (in case strapping is wrong),
* or downloaded code was already running.
*/
- dhdsdio_chip_disablecore(bus->sdh, ci->armcorebase);
+ brcmf_sdbrcm_chip_disablecore(bus->card, ci->armcorebase);
- bcmsdh_reg_write(bus->sdh,
+ brcmf_sdcard_reg_write(bus->card,
CORE_CC_REG(ci->cccorebase, gpiopullup), 4, 0);
- bcmsdh_reg_write(bus->sdh,
+ brcmf_sdcard_reg_write(bus->card,
CORE_CC_REG(ci->cccorebase, gpiopulldown), 4, 0);
/* Disable F2 to clear any intermediate frame state on the dongle */
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN,
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_0, SDIO_CCCR_IOEx,
SDIO_FUNC_ENABLE_1, NULL);
/* WAR: cmd52 backplane read so core HW will drop ALPReq */
- clkval = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1,
+ clkval = brcmf_sdcard_cfg_read(bus->card, SDIO_FUNC_1,
0, NULL);
/* Done with backplane-dependent accesses, can drop clock... */
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0,
- NULL);
+ brcmf_sdcard_cfg_write(bus->card, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ 0, NULL);
bus->ci = ci;
return 0;
@@ -6233,7 +6414,7 @@ fail:
}
static void
-dhdsdio_chip_resetcore(bcmsdh_info_t *sdh, u32 corebase)
+brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_card *card, u32 corebase)
{
u32 regdata;
@@ -6241,35 +6422,37 @@ dhdsdio_chip_resetcore(bcmsdh_info_t *sdh, u32 corebase)
* Must do the disable sequence first to work for
* arbitrary current core state.
*/
- dhdsdio_chip_disablecore(sdh, corebase);
+ brcmf_sdbrcm_chip_disablecore(card, corebase);
/*
* Now do the initialization sequence.
* set reset while enabling the clock and
* forcing them on throughout the core
*/
- bcmsdh_reg_write(sdh, CORE_SB(corebase, sbtmstatelow), 4,
+ brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatelow), 4,
((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
SBTML_RESET);
udelay(1);
- regdata = bcmsdh_reg_read(sdh, CORE_SB(corebase, sbtmstatehigh), 4);
+ regdata = brcmf_sdcard_reg_read(card, CORE_SB(corebase, sbtmstatehigh),
+ 4);
if (regdata & SBTMH_SERR)
- bcmsdh_reg_write(sdh, CORE_SB(corebase, sbtmstatehigh), 4, 0);
+ brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatehigh),
+ 4, 0);
- regdata = bcmsdh_reg_read(sdh, CORE_SB(corebase, sbimstate), 4);
+ regdata = brcmf_sdcard_reg_read(card, CORE_SB(corebase, sbimstate), 4);
if (regdata & (SBIM_IBE | SBIM_TO))
- bcmsdh_reg_write(sdh, CORE_SB(corebase, sbimstate), 4,
+ brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbimstate), 4,
regdata & ~(SBIM_IBE | SBIM_TO));
/* clear reset and allow it to propagate throughout the core */
- bcmsdh_reg_write(sdh, CORE_SB(corebase, sbtmstatelow), 4,
+ brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatelow), 4,
(SICF_FGC << SBTML_SICF_SHIFT) |
(SICF_CLOCK_EN << SBTML_SICF_SHIFT));
udelay(1);
/* leave clock enabled */
- bcmsdh_reg_write(sdh, CORE_SB(corebase, sbtmstatelow), 4,
+ brcmf_sdcard_reg_write(card, CORE_SB(corebase, sbtmstatelow), 4,
(SICF_CLOCK_EN << SBTML_SICF_SHIFT));
udelay(1);
}
@@ -6317,7 +6500,7 @@ static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
static void
-dhdsdio_sdiod_drive_strength_init(struct dhd_bus *bus, u32 drivestrength) {
+brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus, u32 drivestrength) {
struct sdiod_drive_str *str_tab = NULL;
u32 str_mask = 0;
u32 str_shift = 0;
@@ -6344,10 +6527,10 @@ dhdsdio_sdiod_drive_strength_init(struct dhd_bus *bus, u32 drivestrength) {
str_shift = 11;
break;
default:
- DHD_ERROR(("No SDIO Drive strength init"
- "done for chip %s rev %d pmurev %d\n",
- bcm_chipname(bus->ci->chip, chn, 8),
- bus->ci->chiprev, bus->ci->pmurev));
+ BRCMF_ERROR(("No SDIO Drive strength init"
+ "done for chip %s rev %d pmurev %d\n",
+ brcmu_chipname(bus->ci->chip, chn, 8),
+ bus->ci->chiprev, bus->ci->pmurev));
break;
}
@@ -6363,28 +6546,227 @@ dhdsdio_sdiod_drive_strength_init(struct dhd_bus *bus, u32 drivestrength) {
}
}
- bcmsdh_reg_write(bus->sdh,
+ brcmf_sdcard_reg_write(bus->card,
CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
4, 1);
- cc_data_temp = bcmsdh_reg_read(bus->sdh,
+ cc_data_temp = brcmf_sdcard_reg_read(bus->card,
CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), 4);
cc_data_temp &= ~str_mask;
drivestrength_sel <<= str_shift;
cc_data_temp |= drivestrength_sel;
- bcmsdh_reg_write(bus->sdh,
+ brcmf_sdcard_reg_write(bus->card,
CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
4, cc_data_temp);
- DHD_INFO(("SDIO: %dmA drive strength selected, set to 0x%08x\n",
- drivestrength, cc_data_temp));
+ BRCMF_INFO(("SDIO: %dmA drive strength selected, "
+ "set to 0x%08x\n", drivestrength, cc_data_temp));
}
}
static void
-dhdsdio_chip_detach(struct dhd_bus *bus)
+brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus)
{
- DHD_TRACE(("%s: Enter\n", __func__));
+ BRCMF_TRACE(("%s: Enter\n", __func__));
kfree(bus->ci);
bus->ci = NULL;
}
+
+static void
+brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
+{
+ brcmf_sdbrcm_sdunlock(bus);
+ wait_event_interruptible_timeout(bus->ctrl_wait,
+ (*lockvar == false), HZ * 2);
+ brcmf_sdbrcm_sdlock(bus);
+ return;
+}
+
+static void
+brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
+{
+ if (waitqueue_active(&bus->ctrl_wait))
+ wake_up_interruptible(&bus->ctrl_wait);
+ return;
+}
+
+static int
+brcmf_sdbrcm_watchdog_thread(void *data)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *)data;
+
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (brcmf_watchdog_prio > 0) {
+ struct sched_param param;
+ param.sched_priority = (brcmf_watchdog_prio < MAX_RT_PRIO) ?
+ brcmf_watchdog_prio : (MAX_RT_PRIO - 1);
+ sched_setscheduler(current, SCHED_FIFO, &param);
+ }
+
+ allow_signal(SIGTERM);
+ /* Run until signal received */
+ while (1) {
+ if (kthread_should_stop())
+ break;
+ if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
+ if (bus->drvr->dongle_reset == false)
+ brcmf_sdbrcm_bus_watchdog(bus->drvr);
+ /* Count the tick for reference */
+ bus->drvr->tickcnt++;
+ } else
+ break;
+ }
+ return 0;
+}
+
+static void
+brcmf_sdbrcm_watchdog(unsigned long data)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *)data;
+
+ if (brcmf_watchdog_prio >= 0) {
+ if (bus->watchdog_tsk)
+ complete(&bus->watchdog_wait);
+ else
+ return;
+ } else {
+ brcmf_sdbrcm_bus_watchdog(bus->drvr);
+
+ /* Count the tick for reference */
+ bus->drvr->tickcnt++;
+ }
+
+ /* Reschedule the watchdog */
+ if (bus->wd_timer_valid)
+ mod_timer(&bus->timer, jiffies + brcmf_watchdog_ms * HZ / 1000);
+}
+
+void
+brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
+{
+ static uint save_ms;
+
+ /* don't start the wd until fw is loaded */
+ if (bus->drvr->busstate == BRCMF_BUS_DOWN)
+ return;
+
+ /* Totally stop the timer */
+ if (!wdtick && bus->wd_timer_valid == true) {
+ del_timer_sync(&bus->timer);
+ bus->wd_timer_valid = false;
+ save_ms = wdtick;
+ return;
+ }
+
+ if (wdtick) {
+ brcmf_watchdog_ms = (uint) wdtick;
+
+ if (save_ms != brcmf_watchdog_ms) {
+ if (bus->wd_timer_valid == true)
+ /* Stop timer and restart at new value */
+ del_timer_sync(&bus->timer);
+
+ /* Create timer again when watchdog period is
+ dynamically changed or in the first instance
+ */
+ bus->timer.expires =
+ jiffies + brcmf_watchdog_ms * HZ / 1000;
+ add_timer(&bus->timer);
+
+ } else {
+ /* Re arm the timer, at last watchdog period */
+ mod_timer(&bus->timer,
+ jiffies + brcmf_watchdog_ms * HZ / 1000);
+ }
+
+ bus->wd_timer_valid = true;
+ save_ms = wdtick;
+ }
+}
+
+static int brcmf_sdbrcm_dpc_thread(void *data)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *) data;
+
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (brcmf_dpc_prio > 0) {
+ struct sched_param param;
+ param.sched_priority = (brcmf_dpc_prio < MAX_RT_PRIO) ?
+ brcmf_dpc_prio : (MAX_RT_PRIO - 1);
+ sched_setscheduler(current, SCHED_FIFO, &param);
+ }
+
+ allow_signal(SIGTERM);
+ /* Run until signal received */
+ while (1) {
+ if (kthread_should_stop())
+ break;
+ if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
+ /* Call bus dpc unless it indicated down
+ (then clean stop) */
+ if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
+ if (brcmf_sdbrcm_dpc(bus))
+ complete(&bus->dpc_wait);
+ } else {
+ brcmf_sdbrcm_bus_stop(bus, true);
+ }
+ } else
+ break;
+ }
+ return 0;
+}
+
+static void brcmf_sdbrcm_dpc_tasklet(unsigned long data)
+{
+ struct brcmf_bus *bus = (struct brcmf_bus *) data;
+
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
+ if (brcmf_sdbrcm_dpc(bus))
+ tasklet_schedule(&bus->tasklet);
+ } else
+ brcmf_sdbrcm_bus_stop(bus, true);
+}
+
+static void brcmf_sdbrcm_sched_dpc(struct brcmf_bus *bus)
+{
+ if (bus->dpc_tsk) {
+ complete(&bus->dpc_wait);
+ return;
+ }
+
+ tasklet_schedule(&bus->tasklet);
+}
+
+static void brcmf_sdbrcm_sdlock(struct brcmf_bus *bus)
+{
+ if (bus->threads_only)
+ down(&bus->sdsem);
+ else
+ spin_lock_bh(&bus->sdlock);
+}
+
+static void brcmf_sdbrcm_sdunlock(struct brcmf_bus *bus)
+{
+ if (bus->threads_only)
+ up(&bus->sdsem);
+ else
+ spin_unlock_bh(&bus->sdlock);
+}
+
+static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
+{
+ if (bus->firmware->size < bus->fw_ptr + len)
+ len = bus->firmware->size - bus->fw_ptr;
+
+ memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
+ bus->fw_ptr += len;
+ return len;
+}
+
+MODULE_FIRMWARE(BCM4329_FW_NAME);
+MODULE_FIRMWARE(BCM4329_NV_NAME);
diff --git a/drivers/staging/brcm80211/brcmfmac/dhdioctl.h b/drivers/staging/brcm80211/brcmfmac/dhdioctl.h
deleted file mode 100644
index f0ba53558cc..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dhdioctl.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _dhdioctl_h_
-#define _dhdioctl_h_
-
-/* Linux network driver ioctl encoding */
-typedef struct dhd_ioctl {
- uint cmd; /* common ioctl definition */
- void *buf; /* pointer to user buffer */
- uint len; /* length of user buffer */
- bool set; /* get or set request (optional) */
- uint used; /* bytes read or written (optional) */
- uint needed; /* bytes needed (optional) */
- uint driver; /* to identify target driver */
-} dhd_ioctl_t;
-
-/* per-driver magic numbers */
-#define DHD_IOCTL_MAGIC 0x00444944
-
-/* bump this number if you change the ioctl interface */
-#define DHD_IOCTL_VERSION 1
-
-#define DHD_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
-#define DHD_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
-
-/* common ioctl definitions */
-#define DHD_GET_MAGIC 0
-#define DHD_GET_VERSION 1
-#define DHD_GET_VAR 2
-#define DHD_SET_VAR 3
-
-/* message levels */
-#define DHD_ERROR_VAL 0x0001
-#define DHD_TRACE_VAL 0x0002
-#define DHD_INFO_VAL 0x0004
-#define DHD_DATA_VAL 0x0008
-#define DHD_CTL_VAL 0x0010
-#define DHD_TIMER_VAL 0x0020
-#define DHD_HDRS_VAL 0x0040
-#define DHD_BYTES_VAL 0x0080
-#define DHD_INTR_VAL 0x0100
-#define DHD_LOG_VAL 0x0200
-#define DHD_GLOM_VAL 0x0400
-#define DHD_EVENT_VAL 0x0800
-#define DHD_BTA_VAL 0x1000
-#define DHD_ISCAN_VAL 0x2000
-
-#ifdef SDTEST
-/* For pktgen iovar */
-typedef struct dhd_pktgen {
- uint version; /* To allow structure change tracking */
- uint freq; /* Max ticks between tx/rx attempts */
- uint count; /* Test packets to send/rcv each attempt */
- uint print; /* Print counts every <print> attempts */
- uint total; /* Total packets (or bursts) */
- uint minlen; /* Minimum length of packets to send */
- uint maxlen; /* Maximum length of packets to send */
- uint numsent; /* Count of test packets sent */
- uint numrcvd; /* Count of test packets received */
- uint numfail; /* Count of test send failures */
- uint mode; /* Test mode (type of test packets) */
- uint stop; /* Stop after this many tx failures */
-} dhd_pktgen_t;
-
-/* Version in case structure changes */
-#define DHD_PKTGEN_VERSION 2
-
-/* Type of test packets to use */
-#define DHD_PKTGEN_ECHO 1 /* Send echo requests */
-#define DHD_PKTGEN_SEND 2 /* Send discard packets */
-#define DHD_PKTGEN_RXBURST 3 /* Request dongle send N packets */
-#define DHD_PKTGEN_RECV 4 /* Continuous rx from continuous
- tx dongle */
-#endif /* SDTEST */
-
-/* Enter idle immediately (no timeout) */
-#define DHD_IDLE_IMMEDIATE (-1)
-
-/* Values for idleclock iovar: other values are the sd_divisor to use
- when idle */
-#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change
- when idle */
-#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped
- (and use SD1 mode) */
-
-#endif /* _dhdioctl_h_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/dngl_stats.h b/drivers/staging/brcm80211/brcmfmac/dngl_stats.h
deleted file mode 100644
index 699cbffa9c4..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/dngl_stats.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _dngl_stats_h_
-#define _dngl_stats_h_
-
-typedef struct {
- unsigned long rx_packets; /* total packets received */
- unsigned long tx_packets; /* total packets transmitted */
- unsigned long rx_bytes; /* total bytes received */
- unsigned long tx_bytes; /* total bytes transmitted */
- unsigned long rx_errors; /* bad packets received */
- unsigned long tx_errors; /* packet transmit problems */
- unsigned long rx_dropped; /* packets dropped by dongle */
- unsigned long tx_dropped; /* packets dropped by dongle */
- unsigned long multicast; /* multicast packets received */
-} dngl_stats_t;
-
-#endif /* _dngl_stats_h_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/hndrte_armtrap.h b/drivers/staging/brcm80211/brcmfmac/hndrte_armtrap.h
deleted file mode 100644
index 28f092c9e02..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/hndrte_armtrap.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _hndrte_armtrap_h
-#define _hndrte_armtrap_h
-
-/* ARM trap handling */
-
-/* Trap types defined by ARM (see arminc.h) */
-
-/* Trap locations in lo memory */
-#define TRAP_STRIDE 4
-#define FIRST_TRAP TR_RST
-#define LAST_TRAP (TR_FIQ * TRAP_STRIDE)
-
-#if defined(__ARM_ARCH_4T__)
-#define MAX_TRAP_TYPE (TR_FIQ + 1)
-#elif defined(__ARM_ARCH_7M__)
-#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS)
-#endif /* __ARM_ARCH_7M__ */
-
-/* The trap structure is defined here as offsets for assembly */
-#define TR_TYPE 0x00
-#define TR_EPC 0x04
-#define TR_CPSR 0x08
-#define TR_SPSR 0x0c
-#define TR_REGS 0x10
-#define TR_REG(n) (TR_REGS + (n) * 4)
-#define TR_SP TR_REG(13)
-#define TR_LR TR_REG(14)
-#define TR_PC TR_REG(15)
-
-#define TRAP_T_SIZE 80
-
-#ifndef _LANGUAGE_ASSEMBLY
-
-typedef struct _trap_struct {
- u32 type;
- u32 epc;
- u32 cpsr;
- u32 spsr;
- u32 r0;
- u32 r1;
- u32 r2;
- u32 r3;
- u32 r4;
- u32 r5;
- u32 r6;
- u32 r7;
- u32 r8;
- u32 r9;
- u32 r10;
- u32 r11;
- u32 r12;
- u32 r13;
- u32 r14;
- u32 pc;
-} trap_t;
-
-#endif /* !_LANGUAGE_ASSEMBLY */
-
-#endif /* _hndrte_armtrap_h */
diff --git a/drivers/staging/brcm80211/brcmfmac/hndrte_cons.h b/drivers/staging/brcm80211/brcmfmac/hndrte_cons.h
deleted file mode 100644
index 4df3eecaa83..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/hndrte_cons.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#ifndef _hndrte_cons_h
-#define _hndrte_cons_h
-
-#define CBUF_LEN (128)
-
-#define LOG_BUF_LEN 1024
-
-typedef struct {
- u32 buf; /* Can't be pointer on (64-bit) hosts */
- uint buf_size;
- uint idx;
- char *_buf_compat; /* Redundant pointer for backward compat. */
-} hndrte_log_t;
-
-typedef struct {
- /* Virtual UART
- * When there is no UART (e.g. Quickturn),
- * the host should write a complete
- * input line directly into cbuf and then write
- * the length into vcons_in.
- * This may also be used when there is a real UART
- * (at risk of conflicting with
- * the real UART). vcons_out is currently unused.
- */
- volatile uint vcons_in;
- volatile uint vcons_out;
-
- /* Output (logging) buffer
- * Console output is written to a ring buffer log_buf at index log_idx.
- * The host may read the output when it sees log_idx advance.
- * Output will be lost if the output wraps around faster than the host
- * polls.
- */
- hndrte_log_t log;
-
- /* Console input line buffer
- * Characters are read one at a time into cbuf
- * until <CR> is received, then
- * the buffer is processed as a command line.
- * Also used for virtual UART.
- */
- uint cbuf_idx;
- char cbuf[CBUF_LEN];
-} hndrte_cons_t;
-
-#endif /* _hndrte_cons_h */
-
diff --git a/drivers/staging/brcm80211/brcmfmac/msgtrace.h b/drivers/staging/brcm80211/brcmfmac/msgtrace.h
deleted file mode 100644
index d654671a5a3..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/msgtrace.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _MSGTRACE_H
-#define _MSGTRACE_H
-
-#define MSGTRACE_VERSION 1
-
-/* Message trace header */
-typedef struct msgtrace_hdr {
- u8 version;
- u8 spare;
- u16 len; /* Len of the trace */
- u32 seqnum; /* Sequence number of message. Useful
- * if the messsage has been lost
- * because of DMA error or a bus reset
- * (ex: SDIO Func2)
- */
- u32 discarded_bytes; /* Number of discarded bytes because of
- trace overflow */
- u32 discarded_printf; /* Number of discarded printf
- because of trace overflow */
-} __attribute__((packed)) msgtrace_hdr_t;
-
-#define MSGTRACE_HDRLEN sizeof(msgtrace_hdr_t)
-
-/* The hbus driver generates traces when sending a trace message.
- * This causes endless traces.
- * This flag must be set to true in any hbus traces.
- * The flag is reset in the function msgtrace_put.
- * This prevents endless traces but generates hasardous
- * lost of traces only in bus device code.
- * It is recommendat to set this flag in macro SD_TRACE
- * but not in SD_ERROR for avoiding missing
- * hbus error traces. hbus error trace should not generates endless traces.
- */
-extern bool msgtrace_hbus_trace;
-
-typedef void (*msgtrace_func_send_t) (void *hdl1, void *hdl2, u8 *hdr,
- u16 hdrlen, u8 *buf,
- u16 buflen);
-
-extern void msgtrace_sent(void);
-extern void msgtrace_put(char *buf, int count);
-extern void msgtrace_init(void *hdl1, void *hdl2,
- msgtrace_func_send_t func_send);
-
-#endif /* _MSGTRACE_H */
diff --git a/drivers/staging/brcm80211/brcmfmac/sdio_host.h b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
new file mode 100644
index 00000000000..d3454721506
--- /dev/null
+++ b/drivers/staging/brcm80211/brcmfmac/sdio_host.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_SDH_H_
+#define _BRCM_SDH_H_
+
+#include <linux/skbuff.h>
+extern const uint brcmf_sdio_msglevel;
+
+#define SDIO_FUNC_0 0
+#define SDIO_FUNC_1 1
+#define SDIO_FUNC_2 2
+
+#define SDIOD_FBR_SIZE 0x100
+
+/* io_en */
+#define SDIO_FUNC_ENABLE_1 0x02
+#define SDIO_FUNC_ENABLE_2 0x04
+
+/* io_rdys */
+#define SDIO_FUNC_READY_1 0x02
+#define SDIO_FUNC_READY_2 0x04
+
+/* intr_status */
+#define INTR_STATUS_FUNC1 0x2
+#define INTR_STATUS_FUNC2 0x4
+
+/* Maximum number of I/O funcs */
+#define SDIOD_MAX_IOFUNCS 7
+
+#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */
+
+/* function 1 miscellaneous registers */
+#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */
+#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */
+#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */
+#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */
+#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */
+#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */
+#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */
+#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */
+#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */
+#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */
+
+/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
+#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */
+#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */
+#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */
+#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */
+#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */
+#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */
+#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */
+#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */
+#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */
+#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */
+
+#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */
+#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */
+
+/* function 1 OCP space */
+#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */
+#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000
+#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */
+
+/* some duplication with sbsdpcmdev.h here */
+/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
+#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */
+#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */
+#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */
+#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */
+
+#define SDIOH_READ 0 /* Read request */
+#define SDIOH_WRITE 1 /* Write request */
+
+#define SDIOH_DATA_FIX 0 /* Fixed addressing */
+#define SDIOH_DATA_INC 1 /* Incremental addressing */
+
+/* internal return code */
+#define SUCCESS 0
+#define ERROR 1
+
+/* forward declarations */
+struct brcmf_sdio_card;
+
+struct brcmf_sdreg {
+ int func;
+ int offset;
+ int value;
+};
+
+struct sdioh_info {
+ struct osl_info *osh; /* osh handler */
+ bool client_intr_enabled; /* interrupt connnected flag */
+ bool intr_handler_valid; /* client driver interrupt handler valid */
+ void (*intr_handler)(void *); /* registered interrupt handler */
+ void *intr_handler_arg; /* argument to call interrupt handler */
+ u16 intmask; /* Current active interrupts */
+ void *sdos_info; /* Pointer to per-OS private data */
+
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+ bool sd_blockmode; /* sd_blockmode == false => 64 Byte Cmd 53s. */
+ /* Must be on for sd_multiblock to be effective */
+ bool use_client_ints; /* If this is false, make sure to restore */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ u8 num_funcs; /* Supported funcs on client */
+ u32 com_cis_ptr;
+ u32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ uint max_dma_len;
+ uint max_dma_descriptors; /* DMA Descriptors supported by this controller. */
+ /* SDDMA_DESCRIPTOR SGList[32]; *//* Scatter/Gather DMA List */
+};
+
+struct brcmf_sdmmc_instance {
+ struct sdioh_info *sd;
+ struct sdio_func *func[SDIOD_MAX_IOFUNCS];
+ u32 host_claimed;
+};
+
+/* Attach and build an interface to the underlying SD host driver.
+ * - Allocates resources (structs, arrays, mem, OS handles, etc) needed by
+ * brcmf_sdcard.
+ * - Returns the sdio card handle and virtual address base for register access.
+ * The returned handle should be used in all subsequent calls, but the bcmsh
+ * implementation may maintain a single "default" handle (e.g. the first or
+ * most recent one) to enable single-instance implementations to pass NULL.
+ */
+extern struct brcmf_sdio_card*
+brcmf_sdcard_attach(void *cfghdl, u32 *regsva, uint irq);
+
+/* Detach - freeup resources allocated in attach */
+extern int brcmf_sdcard_detach(struct brcmf_sdio_card *card);
+
+/* Enable/disable SD interrupt */
+extern int brcmf_sdcard_intr_enable(struct brcmf_sdio_card *card);
+extern int brcmf_sdcard_intr_disable(struct brcmf_sdio_card *card);
+
+/* Register/deregister device interrupt handler. */
+extern int
+brcmf_sdcard_intr_reg(struct brcmf_sdio_card *card,
+ void (*fn)(void *), void *argh);
+
+extern int brcmf_sdcard_intr_dereg(struct brcmf_sdio_card *card);
+
+/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
+ * fn: function number
+ * addr: unmodified SDIO-space address
+ * data: data byte to write
+ * err: pointer to error code (or NULL)
+ */
+extern u8 brcmf_sdcard_cfg_read(struct brcmf_sdio_card *card, uint func,
+ u32 addr, int *err);
+extern void brcmf_sdcard_cfg_write(struct brcmf_sdio_card *card, uint func,
+ u32 addr, u8 data, int *err);
+
+/* Read/Write 4bytes from/to cfg space */
+extern u32
+brcmf_sdcard_cfg_read_word(struct brcmf_sdio_card *card, uint fnc_num,
+ u32 addr, int *err);
+
+extern void brcmf_sdcard_cfg_write_word(struct brcmf_sdio_card *card,
+ uint fnc_num, u32 addr,
+ u32 data, int *err);
+
+/* Read CIS content for specified function.
+ * fn: function whose CIS is being requested (0 is common CIS)
+ * cis: pointer to memory location to place results
+ * length: number of bytes to read
+ * Internally, this routine uses the values from the cis base regs (0x9-0xB)
+ * to form an SDIO-space address to read the data from.
+ */
+extern int brcmf_sdcard_cis_read(struct brcmf_sdio_card *card, uint func,
+ u8 *cis, uint length);
+
+/* Synchronous access to device (client) core registers via CMD53 to F1.
+ * addr: backplane address (i.e. >= regsva from attach)
+ * size: register width in bytes (2 or 4)
+ * data: data for register write
+ */
+extern u32
+brcmf_sdcard_reg_read(struct brcmf_sdio_card *card, u32 addr, uint size);
+
+extern u32
+brcmf_sdcard_reg_write(struct brcmf_sdio_card *card, u32 addr, uint size,
+ u32 data);
+
+/* Indicate if last reg read/write failed */
+extern bool brcmf_sdcard_regfail(struct brcmf_sdio_card *card);
+
+/* Buffer transfer to/from device (client) core via cmd53.
+ * fn: function number
+ * addr: backplane address (i.e. >= regsva from attach)
+ * flags: backplane width, address increment, sync/async
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * pkt: pointer to packet associated with buf (if any)
+ * complete: callback function for command completion (async only)
+ * handle: handle for completion callback (first arg in callback)
+ * Returns 0 or error code.
+ * NOTE: Async operation is not currently supported.
+ */
+extern int
+brcmf_sdcard_send_buf(struct brcmf_sdio_card *card, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes, void *pkt,
+ void (*complete)(void *handle, int status,
+ bool sync_waiting),
+ void *handle);
+extern int
+brcmf_sdcard_recv_buf(struct brcmf_sdio_card *card, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt,
+ void (*complete)(void *handle, int status,
+ bool sync_waiting),
+ void *handle);
+
+/* Flags bits */
+#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */
+#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */
+#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */
+
+/* Pending (non-error) return code */
+#define BCME_PENDING 1
+
+/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
+ * rw: read or write (0/1)
+ * addr: direct SDIO address
+ * buf: pointer to memory data buffer
+ * nbytes: number of bytes to transfer to/from buf
+ * Returns 0 or error code.
+ */
+extern int brcmf_sdcard_rwdata(struct brcmf_sdio_card *card, uint rw, u32 addr,
+ u8 *buf, uint nbytes);
+
+/* Issue an abort to the specified function */
+extern int brcmf_sdcard_abort(struct brcmf_sdio_card *card, uint fn);
+
+/* Returns the "Device ID" of target device on the SDIO bus. */
+extern int brcmf_sdcard_query_device(struct brcmf_sdio_card *card);
+
+/* Miscellaneous knob tweaker. */
+extern int brcmf_sdcard_iovar_op(struct brcmf_sdio_card *card, const char *name,
+ void *params, int plen, void *arg, int len,
+ bool set);
+
+/* helper functions */
+
+/* callback functions */
+struct brcmf_sdioh_driver {
+ /* attach to device */
+ void *(*attach) (u16 vend_id, u16 dev_id, u16 bus, u16 slot,
+ u16 func, uint bustype, u32 regsva, void *param);
+ /* detach from device */
+ void (*detach) (void *ch);
+};
+
+struct sdioh_info;
+
+/* platform specific/high level functions */
+extern int brcmf_sdio_function_init(void);
+extern int brcmf_sdio_register(struct brcmf_sdioh_driver *driver);
+extern void brcmf_sdio_unregister(void);
+extern void brcmf_sdio_function_cleanup(void);
+extern int brcmf_sdio_probe(struct device *dev);
+extern int brcmf_sdio_remove(struct device *dev);
+
+/* Function to return current window addr */
+extern u32 brcmf_sdcard_cur_sbwad(struct brcmf_sdio_card *card);
+
+/* Allocate/init/free per-OS private data */
+extern int brcmf_sdioh_osinit(struct sdioh_info *sd);
+extern void brcmf_sdioh_osfree(struct sdioh_info *sd);
+
+/* Core interrupt enable/disable of device interrupts */
+extern void brcmf_sdioh_dev_intr_on(struct sdioh_info *sd);
+extern void brcmf_sdioh_dev_intr_off(struct sdioh_info *sd);
+
+/* attach, return handler on success, NULL if failed.
+ * The handler shall be provided by all subsequent calls. No local cache
+ * cfghdl points to the starting address of pci device mapped memory
+ */
+extern struct sdioh_info *brcmf_sdioh_attach(void *cfghdl, uint irq);
+extern int brcmf_sdioh_detach(struct sdioh_info *si);
+
+extern int
+brcmf_sdioh_interrupt_register(struct sdioh_info *si,
+ void (*sdioh_cb_fn)(void *), void *argh);
+
+extern int brcmf_sdioh_interrupt_deregister(struct sdioh_info *si);
+
+/* enable or disable SD interrupt */
+extern int
+brcmf_sdioh_interrupt_set(struct sdioh_info *si, bool enable_disable);
+
+/* read or write one byte using cmd52 */
+extern int
+brcmf_sdioh_request_byte(struct sdioh_info *si, uint rw, uint fnc, uint addr,
+ u8 *byte);
+
+/* read or write 2/4 bytes using cmd53 */
+extern int
+brcmf_sdioh_request_word(struct sdioh_info *si, uint cmd_type,
+ uint rw, uint fnc, uint addr,
+ u32 *word, uint nbyte);
+
+/* read or write any buffer using cmd53 */
+extern int
+brcmf_sdioh_request_buffer(struct sdioh_info *si, uint pio_dma,
+ uint fix_inc, uint rw, uint fnc_num,
+ u32 addr, uint regwidth,
+ u32 buflen, u8 *buffer, struct sk_buff *pkt);
+
+/* get cis data */
+extern int
+brcmf_sdioh_cis_read(struct sdioh_info *si, uint fuc, u8 *cis, u32 length);
+
+extern int
+brcmf_sdioh_cfg_read(struct sdioh_info *si, uint fuc, u32 addr, u8 *data);
+extern int
+brcmf_sdioh_cfg_write(struct sdioh_info *si, uint fuc, u32 addr, u8 *data);
+
+/* handle iovars */
+extern int brcmf_sdioh_iovar_op(struct sdioh_info *si, const char *name,
+ void *params, int plen, void *arg, int len, bool set);
+
+/* Issue abort to the specified function and clear controller as needed */
+extern int brcmf_sdioh_abort(struct sdioh_info *si, uint fnc);
+
+/* Watchdog timer interface for pm ops */
+extern void brcmf_sdio_wdtmr_enable(bool enable);
+
+extern uint sd_msglevel; /* Debug message level */
+
+extern struct brcmf_sdmmc_instance *gInstance;
+
+#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/sdioh.h b/drivers/staging/brcm80211/brcmfmac/sdioh.h
deleted file mode 100644
index f96aaf9cec7..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/sdioh.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _SDIOH_H
-#define _SDIOH_H
-
-#define SD_SysAddr 0x000
-#define SD_BlockSize 0x004
-#define SD_BlockCount 0x006
-#define SD_Arg0 0x008
-#define SD_Arg1 0x00A
-#define SD_TransferMode 0x00C
-#define SD_Command 0x00E
-#define SD_Response0 0x010
-#define SD_Response1 0x012
-#define SD_Response2 0x014
-#define SD_Response3 0x016
-#define SD_Response4 0x018
-#define SD_Response5 0x01A
-#define SD_Response6 0x01C
-#define SD_Response7 0x01E
-#define SD_BufferDataPort0 0x020
-#define SD_BufferDataPort1 0x022
-#define SD_PresentState 0x024
-#define SD_HostCntrl 0x028
-#define SD_PwrCntrl 0x029
-#define SD_BlockGapCntrl 0x02A
-#define SD_WakeupCntrl 0x02B
-#define SD_ClockCntrl 0x02C
-#define SD_TimeoutCntrl 0x02E
-#define SD_SoftwareReset 0x02F
-#define SD_IntrStatus 0x030
-#define SD_ErrorIntrStatus 0x032
-#define SD_IntrStatusEnable 0x034
-#define SD_ErrorIntrStatusEnable 0x036
-#define SD_IntrSignalEnable 0x038
-#define SD_ErrorIntrSignalEnable 0x03A
-#define SD_CMD12ErrorStatus 0x03C
-#define SD_Capabilities 0x040
-#define SD_Capabilities_Reserved 0x044
-#define SD_MaxCurCap 0x048
-#define SD_MaxCurCap_Reserved 0x04C
-#define SD_ADMA_SysAddr 0x58
-#define SD_SlotInterruptStatus 0x0FC
-#define SD_HostControllerVersion 0x0FE
-
-/* SD specific registers in PCI config space */
-#define SD_SlotInfo 0x40
-
-#endif /* _SDIOH_H */
diff --git a/drivers/staging/brcm80211/brcmfmac/sdiovar.h b/drivers/staging/brcm80211/brcmfmac/sdiovar.h
deleted file mode 100644
index d1cfa5f0a98..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/sdiovar.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _sdiovar_h_
-#define _sdiovar_h_
-
-typedef struct sdreg {
- int func;
- int offset;
- int value;
-} sdreg_t;
-
-/* Common msglevel constants */
-#define SDH_ERROR_VAL 0x0001 /* Error */
-#define SDH_TRACE_VAL 0x0002 /* Trace */
-#define SDH_INFO_VAL 0x0004 /* Info */
-#define SDH_DEBUG_VAL 0x0008 /* Debug */
-#define SDH_DATA_VAL 0x0010 /* Data */
-#define SDH_CTRL_VAL 0x0020 /* Control Regs */
-#define SDH_LOG_VAL 0x0040 /* Enable bcmlog */
-#define SDH_DMA_VAL 0x0080 /* DMA */
-
-#define NUM_PREV_TRANSACTIONS 16
-
-#endif /* _sdiovar_h_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index 1827b0bf920..821206d3e53 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -16,336 +16,325 @@
#include <linux/kernel.h>
#include <linux/if_arp.h>
-
-#include <bcmutils.h>
-
-#include <asm/uaccess.h>
-
-#include <dngl_stats.h>
-#include <dhd.h>
-#include <dhdioctl.h>
-#include <wlioctl.h>
-
+#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/etherdevice.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/uaccess.h>
#include <net/cfg80211.h>
-
#include <net/rtnetlink.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/firmware.h>
-#include <wl_cfg80211.h>
-void sdioh_sdio_set_host_pm_flags(int flag);
+#include <brcmu_utils.h>
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include "dhd.h"
+#include "wl_cfg80211.h"
static struct sdio_func *cfg80211_sdio_func;
-static struct wl_dev *wl_cfg80211_dev;
+static struct brcmf_cfg80211_dev *cfg80211_dev;
static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
-u32 wl_dbg_level = WL_DBG_ERR;
-
-#define WL_4329_FW_FILE "brcm/bcm4329-fullmac-4.bin"
-#define WL_4329_NVRAM_FILE "brcm/bcm4329-fullmac-4.txt"
+u32 brcmf_dbg_level = WL_DBG_ERR;
/*
** cfg80211_ops api/callback list
*/
-static s32 wl_cfg80211_change_iface(struct wiphy *wiphy,
- struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params);
-static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request,
- struct cfg80211_ssid *this_ssid);
-static s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request);
-static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
-static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_ibss_params *params);
-static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
- struct net_device *dev);
-static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
- struct net_device *dev, u8 *mac,
- struct station_info *sinfo);
-static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
- struct net_device *dev, bool enabled,
- s32 timeout);
-static s32 wl_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
- struct net_device *dev,
- const u8 *addr,
- const struct cfg80211_bitrate_mask
- *mask);
-static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_connect_params *sme);
-static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
- u16 reason_code);
-static s32 wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+static s32 brcmf_cfg80211_change_iface(struct wiphy *wiphy,
+ struct net_device *ndev,
+ enum nl80211_iftype type, u32 *flags,
+ struct vif_params *params);
+static s32 __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid);
+static s32 brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request);
+static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
+static s32 brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_ibss_params *params);
+static s32 brcmf_cfg80211_leave_ibss(struct wiphy *wiphy,
+ struct net_device *dev);
+static s32 brcmf_cfg80211_get_station(struct wiphy *wiphy,
+ struct net_device *dev, u8 *mac,
+ struct station_info *sinfo);
+static s32 brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy,
+ struct net_device *dev, bool enabled,
+ s32 timeout);
+static s32 brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr,
+ const struct cfg80211_bitrate_mask
+ *mask);
+static int brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_connect_params *sme);
+static s32 brcmf_cfg80211_disconnect(struct wiphy *wiphy,
+ struct net_device *dev,
+ u16 reason_code);
+static s32 brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type,
s32 dbm);
-static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
-static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
+static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm);
+static s32 brcmf_cfg80211_config_default_key(struct wiphy *wiphy,
struct net_device *dev, u8 key_idx,
bool unicast, bool multicast);
-static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+static s32 brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr,
struct key_params *params);
-static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+static s32 brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr);
-static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+static s32 brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr,
void *cookie, void (*callback) (void *cookie,
struct
key_params *
params));
-static s32 wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+static s32 brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
struct net_device *dev,
u8 key_idx);
-static s32 wl_cfg80211_resume(struct wiphy *wiphy);
-static s32 wl_cfg80211_suspend(struct wiphy *wiphy);
-static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+static s32 brcmf_cfg80211_resume(struct wiphy *wiphy);
+static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wow);
+static s32 brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa);
-static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+static s32 brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa);
-static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
+static s32 brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy,
struct net_device *dev);
/*
** event & event Q handlers for cfg80211 interfaces
*/
-static s32 wl_create_event_handler(struct wl_priv *wl);
-static void wl_destroy_event_handler(struct wl_priv *wl);
-static s32 wl_event_handler(void *data);
-static void wl_init_eq(struct wl_priv *wl);
-static void wl_flush_eq(struct wl_priv *wl);
-static void wl_lock_eq(struct wl_priv *wl);
-static void wl_unlock_eq(struct wl_priv *wl);
-static void wl_init_eq_lock(struct wl_priv *wl);
-static void wl_init_eloop_handler(struct wl_event_loop *el);
-static struct wl_event_q *wl_deq_event(struct wl_priv *wl);
-static s32 wl_enq_event(struct wl_priv *wl, u32 type,
- const wl_event_msg_t *msg, void *data);
-static void wl_put_event(struct wl_event_q *e);
-static void wl_wakeup_event(struct wl_priv *wl);
-static s32 wl_notify_connect_status(struct wl_priv *wl,
- struct net_device *ndev,
- const wl_event_msg_t *e, void *data);
-static s32 wl_notify_roaming_status(struct wl_priv *wl,
- struct net_device *ndev,
- const wl_event_msg_t *e, void *data);
-static s32 wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
- const wl_event_msg_t *e, void *data);
-static s32 wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
- const wl_event_msg_t *e, void *data,
- bool completed);
-static s32 wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
- const wl_event_msg_t *e, void *data);
-static s32 wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev,
- const wl_event_msg_t *e, void *data);
+static s32 brcmf_create_event_handler(struct brcmf_cfg80211_priv *cfg_priv);
+static void brcmf_destroy_event_handler(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_event_handler(void *data);
+static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv);
+static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv);
+static void brcmf_lock_eq(struct brcmf_cfg80211_priv *cfg_priv);
+static void brcmf_unlock_eq(struct brcmf_cfg80211_priv *cfg_priv);
+static void brcmf_init_eq_lock(struct brcmf_cfg80211_priv *cfg_priv);
+static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el);
+static struct brcmf_cfg80211_event_q *
+brcmf_deq_event(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 type,
+ const struct brcmf_event_msg *msg, void *data);
+static void brcmf_put_event(struct brcmf_cfg80211_event_q *e);
+static void brcmf_wakeup_event(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e,
+ void *data);
+static s32 brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e,
+ void *data);
+static s32 brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e,
+ void *data);
+static s32 brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data,
+ bool completed);
+static s32 brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data);
+static s32 brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data);
/*
** register/deregister sdio function
*/
-struct sdio_func *wl_cfg80211_get_sdio_func(void);
-static void wl_clear_sdio_func(void);
+static void brcmf_clear_sdio_func(void);
/*
** ioctl utilites
*/
-static s32 wl_dev_bufvar_get(struct net_device *dev, s8 *name, s8 *buf,
+static s32 brcmf_dev_bufvar_get(struct net_device *dev, s8 *name, s8 *buf,
s32 buf_len);
-static __used s32 wl_dev_bufvar_set(struct net_device *dev, s8 *name,
+static __used s32 brcmf_dev_bufvar_set(struct net_device *dev, s8 *name,
s8 *buf, s32 len);
-static s32 wl_dev_intvar_set(struct net_device *dev, s8 *name, s32 val);
-static s32 wl_dev_intvar_get(struct net_device *dev, s8 *name,
+static s32 brcmf_dev_intvar_set(struct net_device *dev, s8 *name, s32 val);
+static s32 brcmf_dev_intvar_get(struct net_device *dev, s8 *name,
s32 *retval);
-static s32 wl_dev_ioctl(struct net_device *dev, u32 cmd, void *arg,
+static s32 brcmf_dev_ioctl(struct net_device *dev, u32 cmd, void *arg,
u32 len);
/*
** cfg80211 set_wiphy_params utilities
*/
-static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold);
-static s32 wl_set_rts(struct net_device *dev, u32 frag_threshold);
-static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l);
+static s32 brcmf_set_frag(struct net_device *dev, u32 frag_threshold);
+static s32 brcmf_set_rts(struct net_device *dev, u32 frag_threshold);
+static s32 brcmf_set_retry(struct net_device *dev, u32 retry, bool l);
/*
** wl profile utilities
*/
-static s32 wl_update_prof(struct wl_priv *wl, const wl_event_msg_t *e,
- void *data, s32 item);
-static void *wl_read_prof(struct wl_priv *wl, s32 item);
-static void wl_init_prof(struct wl_profile *prof);
+static s32 brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e,
+ void *data, s32 item);
+static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item);
+static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof);
/*
** cfg80211 connect utilites
*/
-static s32 wl_set_wpa_version(struct net_device *dev,
+static s32 brcmf_set_wpa_version(struct net_device *dev,
struct cfg80211_connect_params *sme);
-static s32 wl_set_auth_type(struct net_device *dev,
+static s32 brcmf_set_auth_type(struct net_device *dev,
struct cfg80211_connect_params *sme);
-static s32 wl_set_set_cipher(struct net_device *dev,
+static s32 brcmf_set_set_cipher(struct net_device *dev,
struct cfg80211_connect_params *sme);
-static s32 wl_set_key_mgmt(struct net_device *dev,
+static s32 brcmf_set_key_mgmt(struct net_device *dev,
struct cfg80211_connect_params *sme);
-static s32 wl_set_set_sharedkey(struct net_device *dev,
+static s32 brcmf_set_set_sharedkey(struct net_device *dev,
struct cfg80211_connect_params *sme);
-static s32 wl_get_assoc_ies(struct wl_priv *wl);
-static void wl_clear_assoc_ies(struct wl_priv *wl);
-static void wl_ch_to_chanspec(int ch,
- struct wl_join_params *join_params, size_t *join_params_size);
+static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv);
+static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv);
+static void brcmf_ch_to_chanspec(int ch,
+ struct brcmf_join_params *join_params, size_t *join_params_size);
/*
** information element utilities
*/
-static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v);
-static s32 wl_mode_to_nl80211_iftype(s32 mode);
-static struct wireless_dev *wl_alloc_wdev(s32 sizeof_iface,
+static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv,
+ u8 t, u8 l, u8 *v);
+static s32 brcmf_mode_to_nl80211_iftype(s32 mode);
+static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
struct device *dev);
-static void wl_free_wdev(struct wl_priv *wl);
-static s32 wl_inform_bss(struct wl_priv *wl);
-static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi);
-static s32 wl_update_bss_info(struct wl_priv *wl);
-static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
+ struct brcmf_bss_info *bi);
+static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_add_keyext(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, const u8 *mac_addr,
struct key_params *params);
/*
** key indianess swap utilities
*/
-static void swap_key_from_BE(struct wl_wsec_key *key);
-static void swap_key_to_BE(struct wl_wsec_key *key);
+static void swap_key_from_BE(struct brcmf_wsec_key *key);
+static void swap_key_to_BE(struct brcmf_wsec_key *key);
/*
-** wl_priv memory init/deinit utilities
+** brcmf_cfg80211_priv memory init/deinit utilities
*/
-static s32 wl_init_priv_mem(struct wl_priv *wl);
-static void wl_deinit_priv_mem(struct wl_priv *wl);
+static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv);
+static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv);
-static void wl_delay(u32 ms);
+static void brcmf_delay(u32 ms);
/*
** store/restore cfg80211 instance data
*/
-static void wl_set_drvdata(struct wl_dev *dev, void *data);
-static void *wl_get_drvdata(struct wl_dev *dev);
+static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data);
+static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev);
/*
** ibss mode utilities
*/
-static bool wl_is_ibssmode(struct wl_priv *wl);
+static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv);
/*
** dongle up/down , default configuration utilities
*/
-static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e);
-static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e);
-static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e);
-static void wl_link_down(struct wl_priv *wl);
-static s32 wl_dongle_mode(struct net_device *ndev, s32 iftype);
-static s32 __wl_cfg80211_up(struct wl_priv *wl);
-static s32 __wl_cfg80211_down(struct wl_priv *wl);
-static s32 wl_dongle_probecap(struct wl_priv *wl);
-static void wl_init_conf(struct wl_conf *conf);
+static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e);
+static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e);
+static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e);
+static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype);
+static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv);
+static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf);
/*
** dongle configuration utilities
*/
-#ifndef EMBEDDED_PLATFORM
-static s32 wl_dongle_mode(struct net_device *ndev, s32 iftype);
-static s32 wl_dongle_country(struct net_device *ndev, u8 ccode);
-static s32 wl_dongle_up(struct net_device *ndev, u32 up);
-static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode);
-static s32 wl_dongle_glom(struct net_device *ndev, u32 glom,
- u32 dongle_align);
-static s32 wl_dongle_offload(struct net_device *ndev, s32 arpoe,
- s32 arp_ol);
-static s32 wl_pattern_atoh(s8 *src, s8 *dst);
-static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode);
-static s32 wl_update_wiphybands(struct wl_priv *wl);
-#endif /* !EMBEDDED_PLATFORM */
-
-static s32 wl_dongle_eventmsg(struct net_device *ndev);
-static s32 wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
- s32 scan_unassoc_time, s32 scan_passive_time);
-static s32 wl_config_dongle(struct wl_priv *wl, bool need_lock);
-static s32 wl_dongle_roam(struct net_device *ndev, u32 roamvar,
+static s32 brcmf_dongle_eventmsg(struct net_device *ndev);
+static s32 brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
+ s32 scan_unassoc_time, s32 scan_passive_time);
+static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv,
+ bool need_lock);
+static s32 brcmf_dongle_roam(struct net_device *ndev, u32 roamvar,
u32 bcn_timeout);
/*
** iscan handler
*/
-static void wl_iscan_timer(unsigned long data);
-static void wl_term_iscan(struct wl_priv *wl);
-static s32 wl_init_iscan(struct wl_priv *wl);
-static s32 wl_iscan_thread(void *data);
-static s32 wl_dev_iovar_setbuf(struct net_device *dev, s8 *iovar,
+static void brcmf_iscan_timer(unsigned long data);
+static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_iscan_thread(void *data);
+static s32 brcmf_dev_iovar_setbuf(struct net_device *dev, s8 *iovar,
void *param, s32 paramlen, void *bufptr,
s32 buflen);
-static s32 wl_dev_iovar_getbuf(struct net_device *dev, s8 *iovar,
+static s32 brcmf_dev_iovar_getbuf(struct net_device *dev, s8 *iovar,
void *param, s32 paramlen, void *bufptr,
s32 buflen);
-static s32 wl_run_iscan(struct wl_iscan_ctrl *iscan, struct wlc_ssid *ssid,
- u16 action);
-static s32 wl_do_iscan(struct wl_priv *wl);
-static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan);
-static s32 wl_invoke_iscan(struct wl_priv *wl);
-static s32 wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
- struct wl_scan_results **bss_list);
-static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted);
-static void wl_init_iscan_eloop(struct wl_iscan_eloop *el);
-static s32 wl_iscan_done(struct wl_priv *wl);
-static s32 wl_iscan_pending(struct wl_priv *wl);
-static s32 wl_iscan_inprogress(struct wl_priv *wl);
-static s32 wl_iscan_aborted(struct wl_priv *wl);
-
-/*
-** fw/nvram downloading handler
-*/
-static void wl_init_fw(struct wl_fw_ctrl *fw);
+static s32 brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
+ struct brcmf_ssid *ssid, u16 action);
+static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan);
+static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan,
+ u32 *status,
+ struct brcmf_scan_results **bss_list);
+static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
+ bool aborted);
+static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el);
+static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv);
+static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv);
/*
* find most significant bit set
*/
-static __used u32 wl_find_msb(u16 bit16);
+static __used u32 brcmf_find_msb(u16 bit16);
/*
* update pmklist to dongle
*/
-static __used s32 wl_update_pmklist(struct net_device *dev,
- struct wl_pmk_list *pmk_list, s32 err);
+static __used s32 brcmf_update_pmklist(struct net_device *dev,
+ struct brcmf_cfg80211_pmk_list *pmk_list,
+ s32 err);
-static void wl_set_mpc(struct net_device *ndev, int mpc);
+static void brcmf_set_mpc(struct net_device *ndev, int mpc);
/*
* debufs support
*/
-static int wl_debugfs_add_netdev_params(struct wl_priv *wl);
-static void wl_debugfs_remove_netdev(struct wl_priv *wl);
+static int
+brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv);
+static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv);
-#define WL_PRIV_GET() \
+#define WL_PRIV_GET() \
({ \
- struct wl_iface *ci; \
- if (unlikely(!(wl_cfg80211_dev && \
- (ci = wl_get_drvdata(wl_cfg80211_dev))))) { \
+ struct brcmf_cfg80211_iface *ci = brcmf_get_drvdata(cfg80211_dev); \
+ if (unlikely(!ci)) { \
WL_ERR("wl_cfg80211_dev is unavailable\n"); \
BUG(); \
- } \
- ci_to_wl(ci); \
+ } \
+ ci->cfg_priv; \
})
#define CHECK_SYS_UP() \
do { \
- struct wl_priv *wl = wiphy_to_wl(wiphy); \
- if (unlikely(!test_bit(WL_STATUS_READY, &wl->status))) { \
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); \
+ if (unlikely(!test_bit(WL_STATUS_READY, &cfg_priv->status))) { \
WL_INFO("device is not ready : status (%d)\n", \
- (int)wl->status); \
+ (int)cfg_priv->status); \
return -EIO; \
} \
} while (0)
-extern int dhd_wait_pend8021x(struct net_device *dev);
#define CHAN2G(_channel, _freq, _flags) { \
.band = IEEE80211_BAND_2GHZ, \
.center_freq = (_freq), \
@@ -373,18 +362,18 @@ extern int dhd_wait_pend8021x(struct net_device *dev);
}
static struct ieee80211_rate __wl_rates[] = {
- RATETAB_ENT(WLC_RATE_1M, 0),
- RATETAB_ENT(WLC_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
- RATETAB_ENT(WLC_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
- RATETAB_ENT(WLC_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
- RATETAB_ENT(WLC_RATE_6M, 0),
- RATETAB_ENT(WLC_RATE_9M, 0),
- RATETAB_ENT(WLC_RATE_12M, 0),
- RATETAB_ENT(WLC_RATE_18M, 0),
- RATETAB_ENT(WLC_RATE_24M, 0),
- RATETAB_ENT(WLC_RATE_36M, 0),
- RATETAB_ENT(WLC_RATE_48M, 0),
- RATETAB_ENT(WLC_RATE_54M, 0),
+ RATETAB_ENT(BRCM_RATE_1M, 0),
+ RATETAB_ENT(BRCM_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(BRCM_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(BRCM_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATETAB_ENT(BRCM_RATE_6M, 0),
+ RATETAB_ENT(BRCM_RATE_9M, 0),
+ RATETAB_ENT(BRCM_RATE_12M, 0),
+ RATETAB_ENT(BRCM_RATE_18M, 0),
+ RATETAB_ENT(BRCM_RATE_24M, 0),
+ RATETAB_ENT(BRCM_RATE_36M, 0),
+ RATETAB_ENT(BRCM_RATE_48M, 0),
+ RATETAB_ENT(BRCM_RATE_54M, 0),
};
#define wl_a_rates (__wl_rates + 4)
@@ -521,7 +510,7 @@ static const u32 __wl_cipher_suites[] = {
WLAN_CIPHER_SUITE_AES_CMAC,
};
-static void swap_key_from_BE(struct wl_wsec_key *key)
+static void swap_key_from_BE(struct brcmf_wsec_key *key)
{
key->index = cpu_to_le32(key->index);
key->len = cpu_to_le32(key->len);
@@ -532,7 +521,7 @@ static void swap_key_from_BE(struct wl_wsec_key *key)
key->iv_initialized = cpu_to_le32(key->iv_initialized);
}
-static void swap_key_to_BE(struct wl_wsec_key *key)
+static void swap_key_to_BE(struct brcmf_wsec_key *key)
{
key->index = le32_to_cpu(key->index);
key->len = le32_to_cpu(key->len);
@@ -544,10 +533,10 @@ static void swap_key_to_BE(struct wl_wsec_key *key)
}
static s32
-wl_dev_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len)
+brcmf_dev_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len)
{
struct ifreq ifr;
- struct wl_ioctl ioc;
+ struct brcmf_ioctl ioc;
mm_segment_t fs;
s32 err = 0;
@@ -567,11 +556,11 @@ wl_dev_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len)
}
static s32
-wl_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
+brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
struct wireless_dev *wdev;
s32 infra = 0;
s32 err = 0;
@@ -586,11 +575,11 @@ wl_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
type);
return -EOPNOTSUPP;
case NL80211_IFTYPE_ADHOC:
- wl->conf->mode = WL_MODE_IBSS;
+ cfg_priv->conf->mode = WL_MODE_IBSS;
infra = 0;
break;
case NL80211_IFTYPE_STATION:
- wl->conf->mode = WL_MODE_BSS;
+ cfg_priv->conf->mode = WL_MODE_BSS;
infra = 1;
break;
default:
@@ -599,7 +588,7 @@ wl_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
}
infra = cpu_to_le32(infra);
- err = wl_dev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra));
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_INFRA, &infra, sizeof(infra));
if (unlikely(err)) {
WL_ERR("WLC_SET_INFRA error (%d)\n", err);
err = -EAGAIN;
@@ -609,7 +598,7 @@ wl_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
}
WL_INFO("IF Type = %s\n",
- (wl->conf->mode == WL_MODE_IBSS) ? "Adhoc" : "Infra");
+ (cfg_priv->conf->mode == WL_MODE_IBSS) ? "Adhoc" : "Infra");
done:
WL_TRACE("Exit\n");
@@ -617,7 +606,8 @@ done:
return err;
}
-static void wl_iscan_prep(struct wl_scan_params *params, struct wlc_ssid *ssid)
+static void wl_iscan_prep(struct brcmf_scan_params *params,
+ struct brcmf_ssid *ssid)
{
memcpy(params->bssid, ether_bcast, ETH_ALEN);
params->bss_type = DOT11_BSSTYPE_ANY;
@@ -633,58 +623,59 @@ static void wl_iscan_prep(struct wl_scan_params *params, struct wlc_ssid *ssid)
params->passive_time = cpu_to_le32(params->passive_time);
params->home_time = cpu_to_le32(params->home_time);
if (ssid && ssid->SSID_len)
- memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
+ memcpy(&params->ssid, ssid, sizeof(struct brcmf_ssid));
}
static s32
-wl_dev_iovar_setbuf(struct net_device *dev, s8 * iovar, void *param,
+brcmf_dev_iovar_setbuf(struct net_device *dev, s8 * iovar, void *param,
s32 paramlen, void *bufptr, s32 buflen)
{
s32 iolen;
- iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ iolen = brcmu_mkiovar(iovar, param, paramlen, bufptr, buflen);
BUG_ON(!iolen);
- return wl_dev_ioctl(dev, WLC_SET_VAR, bufptr, iolen);
+ return brcmf_dev_ioctl(dev, BRCMF_C_SET_VAR, bufptr, iolen);
}
static s32
-wl_dev_iovar_getbuf(struct net_device *dev, s8 * iovar, void *param,
+brcmf_dev_iovar_getbuf(struct net_device *dev, s8 * iovar, void *param,
s32 paramlen, void *bufptr, s32 buflen)
{
s32 iolen;
- iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
+ iolen = brcmu_mkiovar(iovar, param, paramlen, bufptr, buflen);
BUG_ON(!iolen);
- return wl_dev_ioctl(dev, WLC_GET_VAR, bufptr, buflen);
+ return brcmf_dev_ioctl(dev, BRCMF_C_GET_VAR, bufptr, buflen);
}
static s32
-wl_run_iscan(struct wl_iscan_ctrl *iscan, struct wlc_ssid *ssid, u16 action)
+brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
+ struct brcmf_ssid *ssid, u16 action)
{
- s32 params_size =
- (WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params));
- struct wl_iscan_params *params;
+ s32 params_size = (BRCMF_SCAN_PARAMS_FIXED_SIZE +
+ offsetof(struct brcmf_iscan_params, params));
+ struct brcmf_iscan_params *params;
s32 err = 0;
if (ssid && ssid->SSID_len)
- params_size += sizeof(struct wlc_ssid);
+ params_size += sizeof(struct brcmf_ssid);
params = kzalloc(params_size, GFP_KERNEL);
if (unlikely(!params))
return -ENOMEM;
- BUG_ON(params_size >= WLC_IOCTL_SMLEN);
+ BUG_ON(params_size >= BRCMF_C_IOCTL_SMLEN);
wl_iscan_prep(&params->params, ssid);
- params->version = cpu_to_le32(ISCAN_REQ_VERSION);
+ params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION);
params->action = cpu_to_le16(action);
params->scan_duration = cpu_to_le16(0);
- /* params_size += offsetof(wl_iscan_params_t, params); */
- err = wl_dev_iovar_setbuf(iscan->dev, "iscan", params, params_size,
- iscan->ioctl_buf, WLC_IOCTL_SMLEN);
+ /* params_size += offsetof(struct brcmf_iscan_params, params); */
+ err = brcmf_dev_iovar_setbuf(iscan->dev, "iscan", params, params_size,
+ iscan->ioctl_buf, BRCMF_C_IOCTL_SMLEN);
if (unlikely(err)) {
if (err == -EBUSY) {
WL_INFO("system busy : iscan canceled\n");
@@ -696,11 +687,11 @@ wl_run_iscan(struct wl_iscan_ctrl *iscan, struct wlc_ssid *ssid, u16 action)
return err;
}
-static s32 wl_do_iscan(struct wl_priv *wl)
+static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
- struct net_device *ndev = wl_to_ndev(wl);
- struct wlc_ssid ssid;
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ struct brcmf_ssid ssid;
s32 passive_scan;
s32 err = 0;
@@ -709,16 +700,16 @@ static s32 wl_do_iscan(struct wl_priv *wl)
iscan->state = WL_ISCAN_STATE_SCANING;
- passive_scan = wl->active_scan ? 0 : 1;
- err = wl_dev_ioctl(wl_to_ndev(wl), WLC_SET_PASSIVE_SCAN,
+ passive_scan = cfg_priv->active_scan ? 0 : 1;
+ err = brcmf_dev_ioctl(cfg_to_ndev(cfg_priv), BRCMF_C_SET_PASSIVE_SCAN,
&passive_scan, sizeof(passive_scan));
if (unlikely(err)) {
WL_ERR("error (%d)\n", err);
return err;
}
- wl_set_mpc(ndev, 0);
- wl->iscan_kickstart = true;
- wl_run_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
+ brcmf_set_mpc(ndev, 0);
+ cfg_priv->iscan_kickstart = true;
+ brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START);
mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
iscan->timer_on = 1;
@@ -726,30 +717,30 @@ static s32 wl_do_iscan(struct wl_priv *wl)
}
static s32
-__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+__brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request,
struct cfg80211_ssid *this_ssid)
{
- struct wl_priv *wl = ndev_to_wl(ndev);
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
struct cfg80211_ssid *ssids;
- struct wl_scan_req *sr = wl_to_sr(wl);
+ struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int;
s32 passive_scan;
bool iscan_req;
bool spec_scan;
s32 err = 0;
- if (unlikely(test_bit(WL_STATUS_SCANNING, &wl->status))) {
- WL_ERR("Scanning already : status (%d)\n", (int)wl->status);
+ if (unlikely(test_bit(WL_STATUS_SCANNING, &cfg_priv->status))) {
+ WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status);
return -EAGAIN;
}
- if (unlikely(test_bit(WL_STATUS_SCAN_ABORTING, &wl->status))) {
- WL_ERR("Scanning being aborted : status (%d)\n",
- (int)wl->status);
+ if (unlikely(test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status))) {
+ WL_ERR("Scanning being aborted : status (%lu)\n",
+ cfg_priv->status);
return -EAGAIN;
}
- if (test_bit(WL_STATUS_CONNECTING, &wl->status)) {
- WL_ERR("Connecting : status (%d)\n",
- (int)wl->status);
+ if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
+ WL_ERR("Connecting : status (%lu)\n",
+ cfg_priv->status);
return -EAGAIN;
}
@@ -758,7 +749,7 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
if (request) {
/* scan bss */
ssids = request->ssids;
- if (wl->iscan_on && (!ssids || !ssids->ssid_len))
+ if (cfg_priv->iscan_on && (!ssids || !ssids->ssid_len))
iscan_req = true;
} else {
/* scan in ibss */
@@ -766,10 +757,10 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
ssids = this_ssid;
}
- wl->scan_request = request;
- set_bit(WL_STATUS_SCANNING, &wl->status);
+ cfg_priv->scan_request = request;
+ set_bit(WL_STATUS_SCANNING, &cfg_priv->status);
if (iscan_req) {
- err = wl_do_iscan(wl);
+ err = brcmf_do_iscan(cfg_priv);
if (likely(!err))
return err;
else
@@ -788,15 +779,15 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
WL_SCAN("Broadcast scan\n");
}
- passive_scan = wl->active_scan ? 0 : 1;
- err = wl_dev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
+ passive_scan = cfg_priv->active_scan ? 0 : 1;
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_PASSIVE_SCAN,
&passive_scan, sizeof(passive_scan));
if (unlikely(err)) {
WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
goto scan_out;
}
- wl_set_mpc(ndev, 0);
- err = wl_dev_ioctl(ndev, WLC_SCAN, &sr->ssid,
+ brcmf_set_mpc(ndev, 0);
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SCAN, &sr->ssid,
sizeof(sr->ssid));
if (err) {
if (err == -EBUSY) {
@@ -805,7 +796,7 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
} else {
WL_ERR("WLC_SCAN error (%d)\n", err);
}
- wl_set_mpc(ndev, 1);
+ brcmf_set_mpc(ndev, 1);
goto scan_out;
}
}
@@ -813,13 +804,13 @@ __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
return 0;
scan_out:
- clear_bit(WL_STATUS_SCANNING, &wl->status);
- wl->scan_request = NULL;
+ clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
+ cfg_priv->scan_request = NULL;
return err;
}
static s32
-wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request)
{
s32 err = 0;
@@ -828,7 +819,7 @@ wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
CHECK_SYS_UP();
- err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+ err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL);
if (unlikely(err))
WL_ERR("scan error (%d)\n", err);
@@ -836,17 +827,18 @@ wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
return err;
}
-static s32 wl_dev_intvar_set(struct net_device *dev, s8 *name, s32 val)
+static s32 brcmf_dev_intvar_set(struct net_device *dev, s8 *name, s32 val)
{
- s8 buf[WLC_IOCTL_SMLEN];
+ s8 buf[BRCMF_C_IOCTL_SMLEN];
u32 len;
s32 err = 0;
val = cpu_to_le32(val);
- len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
+ len = brcmu_mkiovar(name, (char *)(&val), sizeof(val), buf,
+ sizeof(buf));
BUG_ON(!len);
- err = wl_dev_ioctl(dev, WLC_SET_VAR, buf, len);
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_VAR, buf, len);
if (unlikely(err))
WL_ERR("error (%d)\n", err);
@@ -854,10 +846,10 @@ static s32 wl_dev_intvar_set(struct net_device *dev, s8 *name, s32 val)
}
static s32
-wl_dev_intvar_get(struct net_device *dev, s8 *name, s32 *retval)
+brcmf_dev_intvar_get(struct net_device *dev, s8 *name, s32 *retval)
{
union {
- s8 buf[WLC_IOCTL_SMLEN];
+ s8 buf[BRCMF_C_IOCTL_SMLEN];
s32 val;
} var;
u32 len;
@@ -865,10 +857,10 @@ wl_dev_intvar_get(struct net_device *dev, s8 *name, s32 *retval)
s32 err = 0;
len =
- bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var),
+ brcmu_mkiovar(name, (char *)(&data_null), 0, (char *)(&var),
sizeof(var.buf));
BUG_ON(!len);
- err = wl_dev_ioctl(dev, WLC_GET_VAR, &var, len);
+ err = brcmf_dev_ioctl(dev, BRCMF_C_GET_VAR, &var, len);
if (unlikely(err))
WL_ERR("error (%d)\n", err);
@@ -877,35 +869,35 @@ wl_dev_intvar_get(struct net_device *dev, s8 *name, s32 *retval)
return err;
}
-static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+static s32 brcmf_set_rts(struct net_device *dev, u32 rts_threshold)
{
s32 err = 0;
- err = wl_dev_intvar_set(dev, "rtsthresh", rts_threshold);
+ err = brcmf_dev_intvar_set(dev, "rtsthresh", rts_threshold);
if (unlikely(err))
WL_ERR("Error (%d)\n", err);
return err;
}
-static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
+static s32 brcmf_set_frag(struct net_device *dev, u32 frag_threshold)
{
s32 err = 0;
- err = wl_dev_intvar_set(dev, "fragthresh", frag_threshold);
+ err = brcmf_dev_intvar_set(dev, "fragthresh", frag_threshold);
if (unlikely(err))
WL_ERR("Error (%d)\n", err);
return err;
}
-static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+static s32 brcmf_set_retry(struct net_device *dev, u32 retry, bool l)
{
s32 err = 0;
- u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+ u32 cmd = (l ? BRCM_SET_LRL : BRCM_SET_SRL);
retry = cpu_to_le32(retry);
- err = wl_dev_ioctl(dev, cmd, &retry, sizeof(retry));
+ err = brcmf_dev_ioctl(dev, cmd, &retry, sizeof(retry));
if (unlikely(err)) {
WL_ERR("cmd (%d) , error (%d)\n", cmd, err);
return err;
@@ -913,40 +905,40 @@ static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
return err;
}
-static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
- struct net_device *ndev = wl_to_ndev(wl);
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
s32 err = 0;
WL_TRACE("Enter\n");
CHECK_SYS_UP();
if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
- (wl->conf->rts_threshold != wiphy->rts_threshold)) {
- wl->conf->rts_threshold = wiphy->rts_threshold;
- err = wl_set_rts(ndev, wl->conf->rts_threshold);
+ (cfg_priv->conf->rts_threshold != wiphy->rts_threshold)) {
+ cfg_priv->conf->rts_threshold = wiphy->rts_threshold;
+ err = brcmf_set_rts(ndev, cfg_priv->conf->rts_threshold);
if (!err)
goto done;
}
if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
- (wl->conf->frag_threshold != wiphy->frag_threshold)) {
- wl->conf->frag_threshold = wiphy->frag_threshold;
- err = wl_set_frag(ndev, wl->conf->frag_threshold);
+ (cfg_priv->conf->frag_threshold != wiphy->frag_threshold)) {
+ cfg_priv->conf->frag_threshold = wiphy->frag_threshold;
+ err = brcmf_set_frag(ndev, cfg_priv->conf->frag_threshold);
if (!err)
goto done;
}
if (changed & WIPHY_PARAM_RETRY_LONG
- && (wl->conf->retry_long != wiphy->retry_long)) {
- wl->conf->retry_long = wiphy->retry_long;
- err = wl_set_retry(ndev, wl->conf->retry_long, true);
+ && (cfg_priv->conf->retry_long != wiphy->retry_long)) {
+ cfg_priv->conf->retry_long = wiphy->retry_long;
+ err = brcmf_set_retry(ndev, cfg_priv->conf->retry_long, true);
if (!err)
goto done;
}
if (changed & WIPHY_PARAM_RETRY_SHORT
- && (wl->conf->retry_short != wiphy->retry_short)) {
- wl->conf->retry_short = wiphy->retry_short;
- err = wl_set_retry(ndev, wl->conf->retry_short, false);
+ && (cfg_priv->conf->retry_short != wiphy->retry_short)) {
+ cfg_priv->conf->retry_short = wiphy->retry_short;
+ err = brcmf_set_retry(ndev, cfg_priv->conf->retry_short, false);
if (!err)
goto done;
}
@@ -957,11 +949,11 @@ done:
}
static s32
-wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
+brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ibss_params *params)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
- struct wl_join_params join_params;
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_join_params join_params;
size_t join_params_size = 0;
s32 err = 0;
s32 wsec = 0;
@@ -977,6 +969,8 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
}
+ set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+
if (params->bssid)
WL_CONN("BSSID: %02X %02X %02X %02X %02X %02X\n",
params->bssid[0], params->bssid[1], params->bssid[2],
@@ -1018,7 +1012,7 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
if (params->privacy)
wsec |= WEP_ENABLED;
- err = wl_dev_intvar_set(dev, "wsec", wsec);
+ err = brcmf_dev_intvar_set(dev, "wsec", wsec);
if (unlikely(err)) {
WL_ERR("wsec failed (%d)\n", err);
goto done;
@@ -1030,14 +1024,14 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
else
bcnprd = cpu_to_le32(100);
- err = wl_dev_ioctl(dev, WLC_SET_BCNPRD, &bcnprd, sizeof(bcnprd));
+ err = brcmf_dev_ioctl(dev, BRCM_SET_BCNPRD, &bcnprd, sizeof(bcnprd));
if (unlikely(err)) {
WL_ERR("WLC_SET_BCNPRD failed (%d)\n", err);
goto done;
}
/* Configure required join parameter */
- memset(&join_params, 0, sizeof(wl_join_params_t));
+ memset(&join_params, 0, sizeof(struct brcmf_join_params));
/* SSID */
join_params.ssid.SSID_len =
@@ -1045,67 +1039,69 @@ wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
memcpy(join_params.ssid.SSID, params->ssid, join_params.ssid.SSID_len);
join_params.ssid.SSID_len = cpu_to_le32(join_params.ssid.SSID_len);
join_params_size = sizeof(join_params.ssid);
- wl_update_prof(wl, NULL, &join_params.ssid, WL_PROF_SSID);
+ brcmf_update_prof(cfg_priv, NULL, &join_params.ssid, WL_PROF_SSID);
/* BSSID */
if (params->bssid) {
memcpy(join_params.params.bssid, params->bssid, ETH_ALEN);
- join_params_size =
- sizeof(join_params.ssid) + WL_ASSOC_PARAMS_FIXED_SIZE;
+ join_params_size = sizeof(join_params.ssid) +
+ BRCMF_ASSOC_PARAMS_FIXED_SIZE;
} else {
memcpy(join_params.params.bssid, ether_bcast, ETH_ALEN);
}
- wl_update_prof(wl, NULL, &join_params.params.bssid, WL_PROF_BSSID);
+ brcmf_update_prof(cfg_priv, NULL,
+ &join_params.params.bssid, WL_PROF_BSSID);
/* Channel */
if (params->channel) {
u32 target_channel;
- wl->channel =
+ cfg_priv->channel =
ieee80211_frequency_to_channel(
params->channel->center_freq);
if (params->channel_fixed) {
/* adding chanspec */
- wl_ch_to_chanspec(wl->channel,
+ brcmf_ch_to_chanspec(cfg_priv->channel,
&join_params, &join_params_size);
}
/* set channel for starter */
- target_channel = cpu_to_le32(wl->channel);
- err = wl_dev_ioctl(dev, WLC_SET_CHANNEL,
+ target_channel = cpu_to_le32(cfg_priv->channel);
+ err = brcmf_dev_ioctl(dev, BRCM_SET_CHANNEL,
&target_channel, sizeof(target_channel));
if (unlikely(err)) {
WL_ERR("WLC_SET_CHANNEL failed (%d)\n", err);
goto done;
}
} else
- wl->channel = 0;
+ cfg_priv->channel = 0;
- wl->ibss_starter = false;
+ cfg_priv->ibss_starter = false;
- err = wl_dev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size);
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_SSID,
+ &join_params, join_params_size);
if (unlikely(err)) {
WL_ERR("WLC_SET_SSID failed (%d)\n", err);
goto done;
}
- set_bit(WL_STATUS_CONNECTING, &wl->status);
-
done:
+ if (err)
+ clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
WL_TRACE("Exit\n");
return err;
}
-static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+static s32 brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
s32 err = 0;
WL_TRACE("Enter\n");
CHECK_SYS_UP();
- wl_link_down(wl);
+ brcmf_link_down(cfg_priv);
WL_TRACE("Exit\n");
@@ -1113,10 +1109,10 @@ static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
}
static s32
-wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
+brcmf_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = ndev_to_wl(dev);
- struct wl_security *sec;
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
+ struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
@@ -1127,21 +1123,21 @@ wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
else
val = WPA_AUTH_DISABLED;
WL_CONN("setting wpa_auth to 0x%0x\n", val);
- err = wl_dev_intvar_set(dev, "wpa_auth", val);
+ err = brcmf_dev_intvar_set(dev, "wpa_auth", val);
if (unlikely(err)) {
WL_ERR("set wpa_auth failed (%d)\n", err);
return err;
}
- sec = wl_read_prof(wl, WL_PROF_SEC);
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
sec->wpa_versions = sme->crypto.wpa_versions;
return err;
}
static s32
-wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
+brcmf_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = ndev_to_wl(dev);
- struct wl_security *sec;
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
+ struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
@@ -1166,21 +1162,21 @@ wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
break;
}
- err = wl_dev_intvar_set(dev, "auth", val);
+ err = brcmf_dev_intvar_set(dev, "auth", val);
if (unlikely(err)) {
WL_ERR("set auth failed (%d)\n", err);
return err;
}
- sec = wl_read_prof(wl, WL_PROF_SEC);
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
sec->auth_type = sme->auth_type;
return err;
}
static s32
-wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
+brcmf_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = ndev_to_wl(dev);
- struct wl_security *sec;
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
+ struct brcmf_cfg80211_security *sec;
s32 pval = 0;
s32 gval = 0;
s32 err = 0;
@@ -1229,13 +1225,13 @@ wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
}
WL_CONN("pval (%d) gval (%d)\n", pval, gval);
- err = wl_dev_intvar_set(dev, "wsec", pval | gval);
+ err = brcmf_dev_intvar_set(dev, "wsec", pval | gval);
if (unlikely(err)) {
WL_ERR("error (%d)\n", err);
return err;
}
- sec = wl_read_prof(wl, WL_PROF_SEC);
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
sec->cipher_group = sme->crypto.cipher_group;
@@ -1243,15 +1239,15 @@ wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
}
static s32
-wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
+brcmf_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = ndev_to_wl(dev);
- struct wl_security *sec;
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
+ struct brcmf_cfg80211_security *sec;
s32 val = 0;
s32 err = 0;
if (sme->crypto.n_akm_suites) {
- err = wl_dev_intvar_get(dev, "wpa_auth", &val);
+ err = brcmf_dev_intvar_get(dev, "wpa_auth", &val);
if (unlikely(err)) {
WL_ERR("could not get wpa_auth (%d)\n", err);
return err;
@@ -1285,31 +1281,31 @@ wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
}
WL_CONN("setting wpa_auth to %d\n", val);
- err = wl_dev_intvar_set(dev, "wpa_auth", val);
+ err = brcmf_dev_intvar_set(dev, "wpa_auth", val);
if (unlikely(err)) {
WL_ERR("could not set wpa_auth (%d)\n", err);
return err;
}
}
- sec = wl_read_prof(wl, WL_PROF_SEC);
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
sec->wpa_auth = sme->crypto.akm_suites[0];
return err;
}
static s32
-wl_set_set_sharedkey(struct net_device *dev,
+brcmf_set_set_sharedkey(struct net_device *dev,
struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = ndev_to_wl(dev);
- struct wl_security *sec;
- struct wl_wsec_key key;
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
+ struct brcmf_cfg80211_security *sec;
+ struct brcmf_wsec_key key;
s32 val;
s32 err = 0;
WL_CONN("key len (%d)\n", sme->key_len);
if (sme->key_len) {
- sec = wl_read_prof(wl, WL_PROF_SEC);
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n",
sec->wpa_versions, sec->cipher_pairwise);
if (!
@@ -1325,7 +1321,7 @@ wl_set_set_sharedkey(struct net_device *dev,
return -EINVAL;
}
memcpy(key.data, sme->key, key.len);
- key.flags = WL_PRIMARY_KEY;
+ key.flags = BRCMF_PRIMARY_KEY;
switch (sec->cipher_pairwise) {
case WLAN_CIPHER_SUITE_WEP40:
key.algo = CRYPTO_ALGO_WEP1;
@@ -1343,7 +1339,7 @@ wl_set_set_sharedkey(struct net_device *dev,
key.len, key.index, key.algo);
WL_CONN("key \"%s\"\n", key.data);
swap_key_from_BE(&key);
- err = wl_dev_ioctl(dev, WLC_SET_KEY, &key,
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY, &key,
sizeof(key));
if (unlikely(err)) {
WL_ERR("WLC_SET_KEY error (%d)\n", err);
@@ -1352,7 +1348,7 @@ wl_set_set_sharedkey(struct net_device *dev,
if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
WL_CONN("set auth_type to shared key\n");
val = 1; /* shared key */
- err = wl_dev_intvar_set(dev, "auth", val);
+ err = brcmf_dev_intvar_set(dev, "auth", val);
if (unlikely(err)) {
WL_ERR("set auth failed (%d)\n", err);
return err;
@@ -1364,12 +1360,12 @@ wl_set_set_sharedkey(struct net_device *dev,
}
static s32
-wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
+brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
struct ieee80211_channel *chan = sme->channel;
- struct wl_join_params join_params;
+ struct brcmf_join_params join_params;
size_t join_params_size;
s32 err = 0;
@@ -1382,37 +1378,49 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
}
+ set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+
if (chan) {
- wl->channel =
+ cfg_priv->channel =
ieee80211_frequency_to_channel(chan->center_freq);
WL_CONN("channel (%d), center_req (%d)\n",
- wl->channel, chan->center_freq);
+ cfg_priv->channel, chan->center_freq);
} else
- wl->channel = 0;
+ cfg_priv->channel = 0;
WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
- err = wl_set_wpa_version(dev, sme);
- if (unlikely(err))
- return err;
+ err = brcmf_set_wpa_version(dev, sme);
+ if (err) {
+ WL_ERR("wl_set_wpa_version failed (%d)\n", err);
+ goto done;
+ }
- err = wl_set_auth_type(dev, sme);
- if (unlikely(err))
- return err;
+ err = brcmf_set_auth_type(dev, sme);
+ if (err) {
+ WL_ERR("wl_set_auth_type failed (%d)\n", err);
+ goto done;
+ }
- err = wl_set_set_cipher(dev, sme);
- if (unlikely(err))
- return err;
+ err = brcmf_set_set_cipher(dev, sme);
+ if (err) {
+ WL_ERR("wl_set_set_cipher failed (%d)\n", err);
+ goto done;
+ }
- err = wl_set_key_mgmt(dev, sme);
- if (unlikely(err))
- return err;
+ err = brcmf_set_key_mgmt(dev, sme);
+ if (err) {
+ WL_ERR("wl_set_key_mgmt failed (%d)\n", err);
+ goto done;
+ }
- err = wl_set_set_sharedkey(dev, sme);
- if (unlikely(err))
- return err;
+ err = brcmf_set_set_sharedkey(dev, sme);
+ if (err) {
+ WL_ERR("wl_set_set_sharedkey failed (%d)\n", err);
+ goto done;
+ }
- wl_update_prof(wl, NULL, sme->bssid, WL_PROF_BSSID);
+ brcmf_update_prof(cfg_priv, NULL, sme->bssid, WL_PROF_BSSID);
/*
** Join with specific BSSID and cached SSID
** If SSID is zero join based on BSSID only
@@ -1423,7 +1431,7 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
join_params.ssid.SSID_len = cpu_to_le32(join_params.ssid.SSID_len);
- wl_update_prof(wl, NULL, &join_params.ssid, WL_PROF_SSID);
+ brcmf_update_prof(cfg_priv, NULL, &join_params.ssid, WL_PROF_SSID);
if (sme->bssid)
memcpy(join_params.params.bssid, sme->bssid, ETH_ALEN);
@@ -1435,52 +1443,54 @@ wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
join_params.ssid.SSID, join_params.ssid.SSID_len);
}
- wl_ch_to_chanspec(wl->channel, &join_params, &join_params_size);
- err = wl_dev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size);
- if (unlikely(err)) {
- WL_ERR("error (%d)\n", err);
- return err;
- }
- set_bit(WL_STATUS_CONNECTING, &wl->status);
+ brcmf_ch_to_chanspec(cfg_priv->channel,
+ &join_params, &join_params_size);
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_SSID,
+ &join_params, join_params_size);
+ if (err)
+ WL_ERR("WLC_SET_SSID failed (%d)\n", err);
+done:
+ if (err)
+ clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
WL_TRACE("Exit\n");
return err;
}
static s32
-wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
+brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
- scb_val_t scbval;
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_scb_val scbval;
s32 err = 0;
WL_TRACE("Enter. Reason code = %d\n", reason_code);
CHECK_SYS_UP();
- clear_bit(WL_STATUS_CONNECTED, &wl->status);
+ clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
scbval.val = reason_code;
- memcpy(&scbval.ea, wl_read_prof(wl, WL_PROF_BSSID), ETH_ALEN);
+ memcpy(&scbval.ea, brcmf_read_prof(cfg_priv, WL_PROF_BSSID), ETH_ALEN);
scbval.val = cpu_to_le32(scbval.val);
- err = wl_dev_ioctl(dev, WLC_DISASSOC, &scbval,
- sizeof(scb_val_t));
+ err = brcmf_dev_ioctl(dev, BRCMF_C_DISASSOC, &scbval,
+ sizeof(struct brcmf_scb_val));
if (unlikely(err))
WL_ERR("error (%d)\n", err);
- wl->link_up = false;
+ cfg_priv->link_up = false;
WL_TRACE("Exit\n");
return err;
}
static s32
-wl_cfg80211_set_tx_power(struct wiphy *wiphy,
+brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type, s32 dbm)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
- struct net_device *ndev = wl_to_ndev(wl);
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
u16 txpwrmw;
s32 err = 0;
s32 disable = 0;
@@ -1509,7 +1519,7 @@ wl_cfg80211_set_tx_power(struct wiphy *wiphy,
/* Make sure radio is off or on as far as software is concerned */
disable = WL_RADIO_SW_DISABLE << 16;
disable = cpu_to_le32(disable);
- err = wl_dev_ioctl(ndev, WLC_SET_RADIO, &disable, sizeof(disable));
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_RADIO, &disable, sizeof(disable));
if (unlikely(err))
WL_ERR("WLC_SET_RADIO error (%d)\n", err);
@@ -1517,21 +1527,21 @@ wl_cfg80211_set_tx_power(struct wiphy *wiphy,
txpwrmw = 0xffff;
else
txpwrmw = (u16) dbm;
- err = wl_dev_intvar_set(ndev, "qtxpower",
- (s32) (bcm_mw_to_qdbm(txpwrmw)));
+ err = brcmf_dev_intvar_set(ndev, "qtxpower",
+ (s32) (brcmu_mw_to_qdbm(txpwrmw)));
if (unlikely(err))
WL_ERR("qtxpower error (%d)\n", err);
- wl->conf->tx_power = dbm;
+ cfg_priv->conf->tx_power = dbm;
done:
WL_TRACE("Exit\n");
return err;
}
-static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
+static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
- struct net_device *ndev = wl_to_ndev(wl);
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
s32 txpwrdbm;
u8 result;
s32 err = 0;
@@ -1539,14 +1549,14 @@ static s32 wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
WL_TRACE("Enter\n");
CHECK_SYS_UP();
- err = wl_dev_intvar_get(ndev, "qtxpower", &txpwrdbm);
+ err = brcmf_dev_intvar_get(ndev, "qtxpower", &txpwrdbm);
if (unlikely(err)) {
WL_ERR("error (%d)\n", err);
goto done;
}
result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
- *dbm = (s32) bcm_qdbm_to_mw(result);
+ *dbm = (s32) brcmu_qdbm_to_mw(result);
done:
WL_TRACE("Exit\n");
@@ -1554,7 +1564,7 @@ done:
}
static s32
-wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
+brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool unicast, bool multicast)
{
u32 index;
@@ -1565,7 +1575,7 @@ wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
WL_CONN("key index (%d)\n", key_idx);
CHECK_SYS_UP();
- err = wl_dev_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec));
+ err = brcmf_dev_ioctl(dev, BRCMF_C_GET_WSEC, &wsec, sizeof(wsec));
if (unlikely(err)) {
WL_ERR("WLC_GET_WSEC error (%d)\n", err);
goto done;
@@ -1576,7 +1586,7 @@ wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
/* Just select a new current key */
index = (u32) key_idx;
index = cpu_to_le32(index);
- err = wl_dev_ioctl(dev, WLC_SET_KEY_PRIMARY, &index,
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY_PRIMARY, &index,
sizeof(index));
if (unlikely(err))
WL_ERR("error (%d)\n", err);
@@ -1587,10 +1597,10 @@ done:
}
static s32
-wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
+brcmf_add_keyext(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, const u8 *mac_addr, struct key_params *params)
{
- struct wl_wsec_key key;
+ struct brcmf_wsec_key key;
s32 err = 0;
memset(&key, 0, sizeof(key));
@@ -1604,7 +1614,7 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
if (key.len == 0) {
/* key delete */
swap_key_from_BE(&key);
- err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY, &key, sizeof(key));
if (unlikely(err)) {
WL_ERR("key delete error (%d)\n", err);
return err;
@@ -1663,8 +1673,8 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
}
swap_key_from_BE(&key);
- dhd_wait_pend8021x(dev);
- err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ brcmf_netdev_wait_pend8021x(dev);
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY, &key, sizeof(key));
if (unlikely(err)) {
WL_ERR("WLC_SET_KEY error (%d)\n", err);
return err;
@@ -1674,11 +1684,11 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
}
static s32
-wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
+brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr,
struct key_params *params)
{
- struct wl_wsec_key key;
+ struct brcmf_wsec_key key;
s32 val;
s32 wsec;
s32 err = 0;
@@ -1690,7 +1700,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
if (mac_addr) {
WL_TRACE("Exit");
- return wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
+ return brcmf_add_keyext(wiphy, dev, key_idx, mac_addr, params);
}
memset(&key, 0, sizeof(key));
@@ -1704,7 +1714,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
}
memcpy(key.data, params->key, key.len);
- key.flags = WL_PRIMARY_KEY;
+ key.flags = BRCMF_PRIMARY_KEY;
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key.algo = CRYPTO_ALGO_WEP1;
@@ -1737,21 +1747,21 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
/* Set the new key/index */
swap_key_from_BE(&key);
- err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY, &key, sizeof(key));
if (unlikely(err)) {
WL_ERR("WLC_SET_KEY error (%d)\n", err);
goto done;
}
val = WEP_ENABLED;
- err = wl_dev_intvar_get(dev, "wsec", &wsec);
+ err = brcmf_dev_intvar_get(dev, "wsec", &wsec);
if (unlikely(err)) {
WL_ERR("get wsec error (%d)\n", err);
goto done;
}
wsec &= ~(WEP_ENABLED);
wsec |= val;
- err = wl_dev_intvar_set(dev, "wsec", wsec);
+ err = brcmf_dev_intvar_set(dev, "wsec", wsec);
if (unlikely(err)) {
WL_ERR("set wsec error (%d)\n", err);
goto done;
@@ -1759,7 +1769,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
val = 1; /* assume shared key. otherwise 0 */
val = cpu_to_le32(val);
- err = wl_dev_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val));
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_AUTH, &val, sizeof(val));
if (unlikely(err))
WL_ERR("WLC_SET_AUTH error (%d)\n", err);
done:
@@ -1768,10 +1778,10 @@ done:
}
static s32
-wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
+brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr)
{
- struct wl_wsec_key key;
+ struct brcmf_wsec_key key;
s32 err = 0;
s32 val;
s32 wsec;
@@ -1781,13 +1791,13 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
memset(&key, 0, sizeof(key));
key.index = (u32) key_idx;
- key.flags = WL_PRIMARY_KEY;
+ key.flags = BRCMF_PRIMARY_KEY;
key.algo = CRYPTO_ALGO_OFF;
WL_CONN("key index (%d)\n", key_idx);
/* Set the new key/index */
swap_key_from_BE(&key);
- err = wl_dev_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_KEY, &key, sizeof(key));
if (unlikely(err)) {
if (err == -EINVAL) {
if (key.index >= DOT11_MAX_DEFAULT_KEYS)
@@ -1802,7 +1812,7 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
}
val = 0;
- err = wl_dev_intvar_get(dev, "wsec", &wsec);
+ err = brcmf_dev_intvar_get(dev, "wsec", &wsec);
if (unlikely(err)) {
WL_ERR("get wsec error (%d)\n", err);
/* Ignore this error, may happen during DISASSOC */
@@ -1811,7 +1821,7 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
}
wsec &= ~(WEP_ENABLED);
wsec |= val;
- err = wl_dev_intvar_set(dev, "wsec", wsec);
+ err = brcmf_dev_intvar_set(dev, "wsec", wsec);
if (unlikely(err)) {
WL_ERR("set wsec error (%d)\n", err);
/* Ignore this error, may happen during DISASSOC */
@@ -1821,7 +1831,7 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
val = 0; /* assume open key. otherwise 1 */
val = cpu_to_le32(val);
- err = wl_dev_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val));
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_AUTH, &val, sizeof(val));
if (unlikely(err)) {
WL_ERR("WLC_SET_AUTH error (%d)\n", err);
/* Ignore this error, may happen during DISASSOC */
@@ -1833,14 +1843,14 @@ done:
}
static s32
-wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
+brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
void (*callback) (void *cookie, struct key_params * params))
{
struct key_params params;
- struct wl_wsec_key key;
- struct wl_priv *wl = wiphy_to_wl(wiphy);
- struct wl_security *sec;
+ struct brcmf_wsec_key key;
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_cfg80211_security *sec;
s32 wsec;
s32 err = 0;
@@ -1855,7 +1865,7 @@ wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
params.key_len = (u8) min_t(u8, WLAN_MAX_KEY_LEN, key.len);
memcpy(params.key, key.data, params.key_len);
- err = wl_dev_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec));
+ err = brcmf_dev_ioctl(dev, BRCMF_C_GET_WSEC, &wsec, sizeof(wsec));
if (unlikely(err)) {
WL_ERR("WLC_GET_WSEC error (%d)\n", err);
/* Ignore this error, may happen during DISASSOC */
@@ -1865,7 +1875,7 @@ wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
wsec = le32_to_cpu(wsec);
switch (wsec) {
case WEP_ENABLED:
- sec = wl_read_prof(wl, WL_PROF_SEC);
+ sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC);
if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
params.cipher = WLAN_CIPHER_SUITE_WEP40;
WL_CONN("WLAN_CIPHER_SUITE_WEP40\n");
@@ -1895,7 +1905,7 @@ done:
}
static s32
-wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
+brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
struct net_device *dev, u8 key_idx)
{
WL_INFO("Not supported\n");
@@ -1905,15 +1915,15 @@ wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
}
static s32
-wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
- scb_val_t scb_val;
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct brcmf_scb_val scb_val;
int rssi;
s32 rate;
s32 err = 0;
- u8 *bssid = wl_read_prof(wl, WL_PROF_BSSID);
+ u8 *bssid = brcmf_read_prof(cfg_priv, WL_PROF_BSSID);
WL_TRACE("Enter\n");
CHECK_SYS_UP();
@@ -1930,7 +1940,7 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
}
/* Report the current tx rate */
- err = wl_dev_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate));
+ err = brcmf_dev_ioctl(dev, BRCMF_C_GET_RATE, &rate, sizeof(rate));
if (err) {
WL_ERR("Could not get rate (%d)\n", err);
} else {
@@ -1940,10 +1950,10 @@ wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
WL_CONN("Rate %d Mbps\n", rate / 2);
}
- if (test_bit(WL_STATUS_CONNECTED, &wl->status)) {
+ if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) {
scb_val.val = 0;
- err = wl_dev_ioctl(dev, WLC_GET_RSSI, &scb_val,
- sizeof(scb_val_t));
+ err = brcmf_dev_ioctl(dev, BRCMF_C_GET_RSSI, &scb_val,
+ sizeof(struct brcmf_scb_val));
if (unlikely(err)) {
WL_ERR("Could not get rssi (%d)\n", err);
}
@@ -1959,7 +1969,7 @@ done:
}
static s32
-wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
bool enabled, s32 timeout)
{
s32 pm;
@@ -1972,7 +1982,7 @@ wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
pm = cpu_to_le32(pm);
WL_INFO("power save %s\n", (pm ? "enabled" : "disabled"));
- err = wl_dev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
+ err = brcmf_dev_ioctl(dev, BRCMF_C_SET_PM, &pm, sizeof(pm));
if (unlikely(err)) {
if (err == -ENODEV)
WL_ERR("net_device is not ready yet\n");
@@ -1983,7 +1993,7 @@ wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
return err;
}
-static __used u32 wl_find_msb(u16 bit16)
+static __used u32 brcmf_find_msb(u16 bit16)
{
u32 ret = 0;
@@ -2011,7 +2021,7 @@ static __used u32 wl_find_msb(u16 bit16)
}
static s32
-wl_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
+brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
const u8 *addr,
const struct cfg80211_bitrate_mask *mask)
{
@@ -2028,7 +2038,7 @@ wl_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
/* addr param is always NULL. ignore it */
/* Get current rateset */
- err = wl_dev_ioctl(dev, WLC_GET_CURR_RATESET, &rateset,
+ err = brcmf_dev_ioctl(dev, BRCM_GET_CURR_RATESET, &rateset,
sizeof(rateset));
if (unlikely(err)) {
WL_ERR("could not get current rateset (%d)\n", err);
@@ -2037,9 +2047,9 @@ wl_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
rateset.count = le32_to_cpu(rateset.count);
- legacy = wl_find_msb(mask->control[IEEE80211_BAND_2GHZ].legacy);
+ legacy = brcmf_find_msb(mask->control[IEEE80211_BAND_2GHZ].legacy);
if (!legacy)
- legacy = wl_find_msb(mask->control[IEEE80211_BAND_5GHZ].legacy);
+ legacy = brcmf_find_msb(mask->control[IEEE80211_BAND_5GHZ].legacy);
val = wl_g_rates[legacy - 1].bitrate * 100000;
@@ -2057,8 +2067,8 @@ wl_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev,
* Set rate override,
* Since the is a/b/g-blind, both a/bg_rate are enforced.
*/
- err_bg = wl_dev_intvar_set(dev, "bg_rate", rate);
- err_a = wl_dev_intvar_set(dev, "a_rate", rate);
+ err_bg = brcmf_dev_intvar_set(dev, "bg_rate", rate);
+ err_a = brcmf_dev_intvar_set(dev, "a_rate", rate);
if (unlikely(err_bg && err_a)) {
WL_ERR("could not set fixed rate (%d) (%d)\n", err_bg, err_a);
err = err_bg | err_a;
@@ -2069,10 +2079,9 @@ done:
return err;
}
-static s32 wl_cfg80211_resume(struct wiphy *wiphy)
+static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
- struct net_device *ndev = wl_to_ndev(wl);
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
/*
* Check for WL_STATUS_READY before any function call which
@@ -2082,23 +2091,21 @@ static s32 wl_cfg80211_resume(struct wiphy *wiphy)
WL_TRACE("Enter\n");
#if defined(CONFIG_PM_SLEEP)
- atomic_set(&dhd_mmc_suspend, false);
+ atomic_set(&brcmf_mmc_suspend, false);
#endif /* defined(CONFIG_PM_SLEEP) */
- if (test_bit(WL_STATUS_READY, &wl->status)) {
- /* Turn on Watchdog timer */
- wl_os_wd_timer(ndev, dhd_watchdog_ms);
- wl_invoke_iscan(wiphy_to_wl(wiphy));
- }
+ if (test_bit(WL_STATUS_READY, &cfg_priv->status))
+ brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
WL_TRACE("Exit\n");
return 0;
}
-static s32 wl_cfg80211_suspend(struct wiphy *wiphy)
+static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wow)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
- struct net_device *ndev = wl_to_ndev(wl);
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
WL_TRACE("Enter\n");
@@ -2112,11 +2119,12 @@ static s32 wl_cfg80211_suspend(struct wiphy *wiphy)
* While going to suspend if associated with AP disassociate
* from AP to save power while system is in suspended state
*/
- if (test_bit(WL_STATUS_CONNECTED, &wl->status) &&
- test_bit(WL_STATUS_READY, &wl->status)) {
+ if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
+ test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
+ test_bit(WL_STATUS_READY, &cfg_priv->status)) {
WL_INFO("Disassociating from AP"
" while entering suspend state\n");
- wl_link_down(wl);
+ brcmf_link_down(cfg_priv);
/*
* Make sure WPA_Supplicant receives all the event
@@ -2124,37 +2132,31 @@ static s32 wl_cfg80211_suspend(struct wiphy *wiphy)
* the state fw and WPA_Supplicant state consistent
*/
rtnl_unlock();
- wl_delay(500);
+ brcmf_delay(500);
rtnl_lock();
}
- set_bit(WL_STATUS_SCAN_ABORTING, &wl->status);
- if (test_bit(WL_STATUS_READY, &wl->status))
- wl_term_iscan(wl);
+ set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+ if (test_bit(WL_STATUS_READY, &cfg_priv->status))
+ brcmf_term_iscan(cfg_priv);
- if (wl->scan_request) {
+ if (cfg_priv->scan_request) {
/* Indidate scan abort to cfg80211 layer */
WL_INFO("Terminating scan in progress\n");
- cfg80211_scan_done(wl->scan_request, true);
- wl->scan_request = NULL;
+ cfg80211_scan_done(cfg_priv->scan_request, true);
+ cfg_priv->scan_request = NULL;
}
- clear_bit(WL_STATUS_SCANNING, &wl->status);
- clear_bit(WL_STATUS_SCAN_ABORTING, &wl->status);
- clear_bit(WL_STATUS_CONNECTING, &wl->status);
- clear_bit(WL_STATUS_CONNECTED, &wl->status);
-
- /* Inform SDIO stack not to switch off power to the chip */
- sdioh_sdio_set_host_pm_flags(MMC_PM_KEEP_POWER);
+ clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
+ clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
/* Turn off watchdog timer */
- if (test_bit(WL_STATUS_READY, &wl->status)) {
- WL_INFO("Terminate watchdog timer and enable MPC\n");
- wl_set_mpc(ndev, 1);
- wl_os_wd_timer(ndev, 0);
+ if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ WL_INFO("Enable MPC\n");
+ brcmf_set_mpc(ndev, 1);
}
#if defined(CONFIG_PM_SLEEP)
- atomic_set(&dhd_mmc_suspend, true);
+ atomic_set(&brcmf_mmc_suspend, true);
#endif /* defined(CONFIG_PM_SLEEP) */
WL_TRACE("Exit\n");
@@ -2163,8 +2165,8 @@ static s32 wl_cfg80211_suspend(struct wiphy *wiphy)
}
static __used s32
-wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
- s32 err)
+brcmf_update_pmklist(struct net_device *dev,
+ struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
{
int i, j;
@@ -2177,55 +2179,51 @@ wl_update_pmklist(struct net_device *dev, struct wl_pmk_list *pmk_list,
}
if (likely(!err))
- wl_dev_bufvar_set(dev, "pmkid_info", (char *)pmk_list,
+ brcmf_dev_bufvar_set(dev, "pmkid_info", (char *)pmk_list,
sizeof(*pmk_list));
return err;
}
static s32
-wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_pmksa *pmksa)
+brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
+ struct _pmkid_list *pmkids = &cfg_priv->pmk_list->pmkids;
s32 err = 0;
int i;
WL_TRACE("Enter\n");
CHECK_SYS_UP();
- for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
- if (!memcmp(pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
- ETH_ALEN))
+ for (i = 0; i < pmkids->npmkid; i++)
+ if (!memcmp(pmksa->bssid, pmkids->pmkid[i].BSSID, ETH_ALEN))
break;
if (i < WL_NUM_PMKIDS_MAX) {
- memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
- ETH_ALEN);
- memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
- WLAN_PMKID_LEN);
- if (i == wl->pmk_list->pmkids.npmkid)
- wl->pmk_list->pmkids.npmkid++;
+ memcpy(pmkids->pmkid[i].BSSID, pmksa->bssid, ETH_ALEN);
+ memcpy(pmkids->pmkid[i].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
+ if (i == pmkids->npmkid)
+ pmkids->npmkid++;
} else
err = -EINVAL;
WL_CONN("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
- &wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].BSSID);
+ pmkids->pmkid[pmkids->npmkid].BSSID);
for (i = 0; i < WLAN_PMKID_LEN; i++)
- WL_CONN("%02x\n",
- wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid].
- PMKID[i]);
+ WL_CONN("%02x\n", pmkids->pmkid[pmkids->npmkid].PMKID[i]);
- err = wl_update_pmklist(dev, wl->pmk_list, err);
+ err = brcmf_update_pmklist(dev, cfg_priv->pmk_list, err);
WL_TRACE("Exit\n");
return err;
}
static s32
-wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
struct _pmkid_list pmkid;
s32 err = 0;
int i;
@@ -2240,28 +2238,29 @@ wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
for (i = 0; i < WLAN_PMKID_LEN; i++)
WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]);
- for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
+ for (i = 0; i < cfg_priv->pmk_list->pmkids.npmkid; i++)
if (!memcmp
- (pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
+ (pmksa->bssid, &cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
ETH_ALEN))
break;
- if ((wl->pmk_list->pmkids.npmkid > 0)
- && (i < wl->pmk_list->pmkids.npmkid)) {
- memset(&wl->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
- for (; i < (wl->pmk_list->pmkids.npmkid - 1); i++) {
- memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID,
- &wl->pmk_list->pmkids.pmkid[i + 1].BSSID,
+ if ((cfg_priv->pmk_list->pmkids.npmkid > 0)
+ && (i < cfg_priv->pmk_list->pmkids.npmkid)) {
+ memset(&cfg_priv->pmk_list->pmkids.pmkid[i], 0,
+ sizeof(pmkid_t));
+ for (; i < (cfg_priv->pmk_list->pmkids.npmkid - 1); i++) {
+ memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].BSSID,
+ &cfg_priv->pmk_list->pmkids.pmkid[i + 1].BSSID,
ETH_ALEN);
- memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID,
- &wl->pmk_list->pmkids.pmkid[i + 1].PMKID,
+ memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].PMKID,
+ &cfg_priv->pmk_list->pmkids.pmkid[i + 1].PMKID,
WLAN_PMKID_LEN);
}
- wl->pmk_list->pmkids.npmkid--;
+ cfg_priv->pmk_list->pmkids.npmkid--;
} else
err = -EINVAL;
- err = wl_update_pmklist(dev, wl->pmk_list, err);
+ err = brcmf_update_pmklist(dev, cfg_priv->pmk_list, err);
WL_TRACE("Exit\n");
return err;
@@ -2269,16 +2268,16 @@ wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
}
static s32
-wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
+brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
{
- struct wl_priv *wl = wiphy_to_wl(wiphy);
+ struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
s32 err = 0;
WL_TRACE("Enter\n");
CHECK_SYS_UP();
- memset(wl->pmk_list, 0, sizeof(*wl->pmk_list));
- err = wl_update_pmklist(dev, wl->pmk_list, err);
+ memset(cfg_priv->pmk_list, 0, sizeof(*cfg_priv->pmk_list));
+ err = brcmf_update_pmklist(dev, cfg_priv->pmk_list, err);
WL_TRACE("Exit\n");
return err;
@@ -2286,31 +2285,31 @@ wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
}
static struct cfg80211_ops wl_cfg80211_ops = {
- .change_virtual_intf = wl_cfg80211_change_iface,
- .scan = wl_cfg80211_scan,
- .set_wiphy_params = wl_cfg80211_set_wiphy_params,
- .join_ibss = wl_cfg80211_join_ibss,
- .leave_ibss = wl_cfg80211_leave_ibss,
- .get_station = wl_cfg80211_get_station,
- .set_tx_power = wl_cfg80211_set_tx_power,
- .get_tx_power = wl_cfg80211_get_tx_power,
- .add_key = wl_cfg80211_add_key,
- .del_key = wl_cfg80211_del_key,
- .get_key = wl_cfg80211_get_key,
- .set_default_key = wl_cfg80211_config_default_key,
- .set_default_mgmt_key = wl_cfg80211_config_default_mgmt_key,
- .set_power_mgmt = wl_cfg80211_set_power_mgmt,
- .set_bitrate_mask = wl_cfg80211_set_bitrate_mask,
- .connect = wl_cfg80211_connect,
- .disconnect = wl_cfg80211_disconnect,
- .suspend = wl_cfg80211_suspend,
- .resume = wl_cfg80211_resume,
- .set_pmksa = wl_cfg80211_set_pmksa,
- .del_pmksa = wl_cfg80211_del_pmksa,
- .flush_pmksa = wl_cfg80211_flush_pmksa
+ .change_virtual_intf = brcmf_cfg80211_change_iface,
+ .scan = brcmf_cfg80211_scan,
+ .set_wiphy_params = brcmf_cfg80211_set_wiphy_params,
+ .join_ibss = brcmf_cfg80211_join_ibss,
+ .leave_ibss = brcmf_cfg80211_leave_ibss,
+ .get_station = brcmf_cfg80211_get_station,
+ .set_tx_power = brcmf_cfg80211_set_tx_power,
+ .get_tx_power = brcmf_cfg80211_get_tx_power,
+ .add_key = brcmf_cfg80211_add_key,
+ .del_key = brcmf_cfg80211_del_key,
+ .get_key = brcmf_cfg80211_get_key,
+ .set_default_key = brcmf_cfg80211_config_default_key,
+ .set_default_mgmt_key = brcmf_cfg80211_config_default_mgmt_key,
+ .set_power_mgmt = brcmf_cfg80211_set_power_mgmt,
+ .set_bitrate_mask = brcmf_cfg80211_set_bitrate_mask,
+ .connect = brcmf_cfg80211_connect,
+ .disconnect = brcmf_cfg80211_disconnect,
+ .suspend = brcmf_cfg80211_suspend,
+ .resume = brcmf_cfg80211_resume,
+ .set_pmksa = brcmf_cfg80211_set_pmksa,
+ .del_pmksa = brcmf_cfg80211_del_pmksa,
+ .flush_pmksa = brcmf_cfg80211_flush_pmksa
};
-static s32 wl_mode_to_nl80211_iftype(s32 mode)
+static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
{
s32 err = 0;
@@ -2326,7 +2325,7 @@ static s32 wl_mode_to_nl80211_iftype(s32 mode)
return err;
}
-static struct wireless_dev *wl_alloc_wdev(s32 sizeof_iface,
+static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface,
struct device *dev)
{
struct wireless_dev *wdev;
@@ -2338,7 +2337,8 @@ static struct wireless_dev *wl_alloc_wdev(s32 sizeof_iface,
return ERR_PTR(-ENOMEM);
}
wdev->wiphy =
- wiphy_new(&wl_cfg80211_ops, sizeof(struct wl_priv) + sizeof_iface);
+ wiphy_new(&wl_cfg80211_ops,
+ sizeof(struct brcmf_cfg80211_priv) + sizeof_iface);
if (unlikely(!wdev->wiphy)) {
WL_ERR("Couldn not allocate wiphy device\n");
err = -ENOMEM;
@@ -2384,9 +2384,9 @@ wiphy_new_out:
return ERR_PTR(err);
}
-static void wl_free_wdev(struct wl_priv *wl)
+static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wireless_dev *wdev = wl_to_wdev(wl);
+ struct wireless_dev *wdev = cfg_to_wdev(cfg_priv);
if (unlikely(!wdev)) {
WL_ERR("wdev is invalid\n");
@@ -2395,18 +2395,18 @@ static void wl_free_wdev(struct wl_priv *wl)
wiphy_unregister(wdev->wiphy);
wiphy_free(wdev->wiphy);
kfree(wdev);
- wl_to_wdev(wl) = NULL;
+ cfg_to_wdev(cfg_priv) = NULL;
}
-static s32 wl_inform_bss(struct wl_priv *wl)
+static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_scan_results *bss_list;
- struct wl_bss_info *bi = NULL; /* must be initialized */
+ struct brcmf_scan_results *bss_list;
+ struct brcmf_bss_info *bi = NULL; /* must be initialized */
s32 err = 0;
int i;
- bss_list = wl->bss_list;
- if (unlikely(bss_list->version != WL_BSS_INFO_VERSION)) {
+ bss_list = cfg_priv->bss_list;
+ if (unlikely(bss_list->version != BRCMF_BSS_INFO_VERSION)) {
WL_ERR("Version %d != WL_BSS_INFO_VERSION\n",
bss_list->version);
return -EOPNOTSUPP;
@@ -2414,7 +2414,7 @@ static s32 wl_inform_bss(struct wl_priv *wl)
WL_SCAN("scanned AP count (%d)\n", bss_list->count);
bi = next_bss(bss_list, bi);
for_each_bss(bss_list, bi, i) {
- err = wl_inform_single_bss(wl, bi);
+ err = brcmf_inform_single_bss(cfg_priv, bi);
if (unlikely(err))
break;
}
@@ -2422,9 +2422,10 @@ static s32 wl_inform_bss(struct wl_priv *wl)
}
-static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
+static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
+ struct brcmf_bss_info *bi)
{
- struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
struct ieee80211_channel *notify_channel;
struct cfg80211_bss *bss;
struct ieee80211_supported_band *band;
@@ -2482,12 +2483,12 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
return err;
}
-static s32
-wl_inform_ibss(struct wl_priv *wl, struct net_device *dev, const u8 *bssid)
+static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *dev, const u8 *bssid)
{
- struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
struct ieee80211_channel *notify_channel;
- struct wl_bss_info *bi = NULL;
+ struct brcmf_bss_info *bi = NULL;
struct ieee80211_supported_band *band;
u8 *buf = NULL;
s32 err = 0;
@@ -2511,13 +2512,13 @@ wl_inform_ibss(struct wl_priv *wl, struct net_device *dev, const u8 *bssid)
*(u32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
- err = wl_dev_ioctl(dev, WLC_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
+ err = brcmf_dev_ioctl(dev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX);
if (unlikely(err)) {
WL_ERR("WLC_GET_BSS_INFO failed: %d\n", err);
goto CleanUp;
}
- bi = (wl_bss_info_t *)(buf + 4);
+ bi = (struct brcmf_bss_info *)(buf + 4);
channel = bi->ctl_ch ? bi->ctl_ch :
CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
@@ -2556,45 +2557,48 @@ CleanUp:
return err;
}
-static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e)
+static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e)
{
u32 event = be32_to_cpu(e->event_type);
u32 status = be32_to_cpu(e->status);
- if (event == WLC_E_SET_SSID && status == WLC_E_STATUS_SUCCESS) {
+ if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
WL_CONN("Processing set ssid\n");
- wl->link_up = true;
+ cfg_priv->link_up = true;
return true;
}
return false;
}
-static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e)
+static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e)
{
u32 event = be32_to_cpu(e->event_type);
u16 flags = be16_to_cpu(e->flags);
- if (event == WLC_E_LINK && (!(flags & WLC_EVENT_MSG_LINK))) {
+ if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) {
WL_CONN("Processing link down\n");
return true;
}
return false;
}
-static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e)
+static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e)
{
u32 event = be32_to_cpu(e->event_type);
u32 status = be32_to_cpu(e->status);
- u16 flags = be16_to_cpu(e->flags);
- if (event == WLC_E_LINK && status == WLC_E_STATUS_NO_NETWORKS) {
+ if (event == BRCMF_E_LINK && status == BRCMF_E_STATUS_NO_NETWORKS) {
WL_CONN("Processing Link %s & no network found\n",
- flags & WLC_EVENT_MSG_LINK ? "up" : "down");
+ be16_to_cpu(e->flags) & BRCMF_EVENT_MSG_LINK ?
+ "up" : "down");
return true;
}
- if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS) {
+ if (event == BRCMF_E_SET_SSID && status != BRCMF_E_STATUS_SUCCESS) {
WL_CONN("Processing connecting & no network found\n");
return true;
}
@@ -2603,142 +2607,153 @@ static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e)
}
static s32
-wl_notify_connect_status(struct wl_priv *wl, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
+brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
{
s32 err = 0;
- if (wl_is_linkup(wl, e)) {
+ if (brcmf_is_linkup(cfg_priv, e)) {
WL_CONN("Linkup\n");
- if (wl_is_ibssmode(wl)) {
- wl_update_prof(wl, NULL, (void *)e->addr,
+ if (brcmf_is_ibssmode(cfg_priv)) {
+ brcmf_update_prof(cfg_priv, NULL, (void *)e->addr,
WL_PROF_BSSID);
- wl_inform_ibss(wl, ndev, e->addr);
+ wl_inform_ibss(cfg_priv, ndev, e->addr);
cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
- clear_bit(WL_STATUS_CONNECTING, &wl->status);
- set_bit(WL_STATUS_CONNECTED, &wl->status);
+ clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
+ set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
} else
- wl_bss_connect_done(wl, ndev, e, data, true);
- } else if (wl_is_linkdown(wl, e)) {
+ brcmf_bss_connect_done(cfg_priv, ndev, e, data, true);
+ } else if (brcmf_is_linkdown(cfg_priv, e)) {
WL_CONN("Linkdown\n");
- if (wl_is_ibssmode(wl)) {
+ if (brcmf_is_ibssmode(cfg_priv)) {
+ clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
if (test_and_clear_bit(WL_STATUS_CONNECTED,
- &wl->status))
- wl_link_down(wl);
+ &cfg_priv->status))
+ brcmf_link_down(cfg_priv);
} else {
+ brcmf_bss_connect_done(cfg_priv, ndev, e, data, false);
if (test_and_clear_bit(WL_STATUS_CONNECTED,
- &wl->status)) {
+ &cfg_priv->status)) {
cfg80211_disconnected(ndev, 0, NULL, 0,
GFP_KERNEL);
- wl_link_down(wl);
+ brcmf_link_down(cfg_priv);
}
}
- wl_init_prof(wl->profile);
- } else if (wl_is_nonetwork(wl, e)) {
- if (wl_is_ibssmode(wl))
- clear_bit(WL_STATUS_CONNECTING, &wl->status);
+ brcmf_init_prof(cfg_priv->profile);
+ } else if (brcmf_is_nonetwork(cfg_priv, e)) {
+ if (brcmf_is_ibssmode(cfg_priv))
+ clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
else
- wl_bss_connect_done(wl, ndev, e, data, false);
+ brcmf_bss_connect_done(cfg_priv, ndev, e, data, false);
}
return err;
}
static s32
-wl_notify_roaming_status(struct wl_priv *wl, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
+brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
{
s32 err = 0;
u32 event = be32_to_cpu(e->event_type);
u32 status = be32_to_cpu(e->status);
- if (event == WLC_E_ROAM && status == WLC_E_STATUS_SUCCESS) {
- if (test_bit(WL_STATUS_CONNECTED, &wl->status))
- wl_bss_roaming_done(wl, ndev, e, data);
+ if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
+ if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status))
+ brcmf_bss_roaming_done(cfg_priv, ndev, e, data);
else
- wl_bss_connect_done(wl, ndev, e, data, true);
+ brcmf_bss_connect_done(cfg_priv, ndev, e, data, true);
}
return err;
}
static __used s32
-wl_dev_bufvar_set(struct net_device *dev, s8 *name, s8 *buf, s32 len)
+brcmf_dev_bufvar_set(struct net_device *dev, s8 *name, s8 *buf, s32 len)
{
- struct wl_priv *wl = ndev_to_wl(dev);
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
u32 buflen;
- buflen = bcm_mkiovar(name, buf, len, wl->ioctl_buf, WL_IOCTL_LEN_MAX);
+ buflen = brcmu_mkiovar(name, buf, len, cfg_priv->ioctl_buf,
+ WL_IOCTL_LEN_MAX);
BUG_ON(!buflen);
- return wl_dev_ioctl(dev, WLC_SET_VAR, wl->ioctl_buf, buflen);
+ return brcmf_dev_ioctl(dev, BRCMF_C_SET_VAR, cfg_priv->ioctl_buf,
+ buflen);
}
static s32
-wl_dev_bufvar_get(struct net_device *dev, s8 *name, s8 *buf,
+brcmf_dev_bufvar_get(struct net_device *dev, s8 *name, s8 *buf,
s32 buf_len)
{
- struct wl_priv *wl = ndev_to_wl(dev);
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(dev);
u32 len;
s32 err = 0;
- len = bcm_mkiovar(name, NULL, 0, wl->ioctl_buf, WL_IOCTL_LEN_MAX);
+ len = brcmu_mkiovar(name, NULL, 0, cfg_priv->ioctl_buf,
+ WL_IOCTL_LEN_MAX);
BUG_ON(!len);
- err = wl_dev_ioctl(dev, WLC_GET_VAR, (void *)wl->ioctl_buf,
+ err = brcmf_dev_ioctl(dev, BRCMF_C_GET_VAR, (void *)cfg_priv->ioctl_buf,
WL_IOCTL_LEN_MAX);
if (unlikely(err)) {
WL_ERR("error (%d)\n", err);
return err;
}
- memcpy(buf, wl->ioctl_buf, buf_len);
+ memcpy(buf, cfg_priv->ioctl_buf, buf_len);
return err;
}
-static s32 wl_get_assoc_ies(struct wl_priv *wl)
+static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct net_device *ndev = wl_to_ndev(wl);
- struct wl_assoc_ielen *assoc_info;
- struct wl_connect_info *conn_info = wl_to_conn(wl);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
+ struct brcmf_cfg80211_assoc_ielen *assoc_info;
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
u32 req_len;
u32 resp_len;
s32 err = 0;
- wl_clear_assoc_ies(wl);
+ brcmf_clear_assoc_ies(cfg_priv);
- err = wl_dev_bufvar_get(ndev, "assoc_info", wl->extra_buf,
+ err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg_priv->extra_buf,
WL_ASSOC_INFO_MAX);
if (unlikely(err)) {
WL_ERR("could not get assoc info (%d)\n", err);
return err;
}
- assoc_info = (struct wl_assoc_ielen *)wl->extra_buf;
+ assoc_info = (struct brcmf_cfg80211_assoc_ielen *)cfg_priv->extra_buf;
req_len = assoc_info->req_len;
resp_len = assoc_info->resp_len;
if (req_len) {
- err = wl_dev_bufvar_get(ndev, "assoc_req_ies", wl->extra_buf,
- WL_ASSOC_INFO_MAX);
+ err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies",
+ cfg_priv->extra_buf,
+ WL_ASSOC_INFO_MAX);
if (unlikely(err)) {
WL_ERR("could not get assoc req (%d)\n", err);
return err;
}
conn_info->req_ie_len = req_len;
conn_info->req_ie =
- kmemdup(wl->extra_buf, conn_info->req_ie_len, GFP_KERNEL);
+ kmemdup(cfg_priv->extra_buf, conn_info->req_ie_len,
+ GFP_KERNEL);
} else {
conn_info->req_ie_len = 0;
conn_info->req_ie = NULL;
}
if (resp_len) {
- err = wl_dev_bufvar_get(ndev, "assoc_resp_ies", wl->extra_buf,
- WL_ASSOC_INFO_MAX);
+ err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies",
+ cfg_priv->extra_buf,
+ WL_ASSOC_INFO_MAX);
if (unlikely(err)) {
WL_ERR("could not get assoc resp (%d)\n", err);
return err;
}
conn_info->resp_ie_len = resp_len;
conn_info->resp_ie =
- kmemdup(wl->extra_buf, conn_info->resp_ie_len, GFP_KERNEL);
+ kmemdup(cfg_priv->extra_buf, conn_info->resp_ie_len,
+ GFP_KERNEL);
} else {
conn_info->resp_ie_len = 0;
conn_info->resp_ie = NULL;
@@ -2749,9 +2764,9 @@ static s32 wl_get_assoc_ies(struct wl_priv *wl)
return err;
}
-static void wl_clear_assoc_ies(struct wl_priv *wl)
+static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_connect_info *conn_info = wl_to_conn(wl);
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
kfree(conn_info->req_ie);
conn_info->req_ie = NULL;
@@ -2762,7 +2777,7 @@ static void wl_clear_assoc_ies(struct wl_priv *wl)
}
-static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
+static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
size_t *join_params_size)
{
chanspec_t chanspec = 0;
@@ -2779,7 +2794,7 @@ static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
chanspec |= WL_CHANSPEC_BW_20;
chanspec |= WL_CHANSPEC_CTL_SB_NONE;
- *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+ *join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE +
join_params->params.chanspec_num * sizeof(chanspec_t);
join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
@@ -2796,11 +2811,11 @@ static void wl_ch_to_chanspec(int ch, struct wl_join_params *join_params,
}
}
-static s32 wl_update_bss_info(struct wl_priv *wl)
+static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_bss_info *bi;
- struct wlc_ssid *ssid;
- struct bcm_tlv *tim;
+ struct brcmf_bss_info *bi;
+ struct brcmf_ssid *ssid;
+ struct brcmu_tlv *tim;
u16 beacon_interval;
u8 dtim_period;
size_t ie_len;
@@ -2808,21 +2823,21 @@ static s32 wl_update_bss_info(struct wl_priv *wl)
s32 err = 0;
WL_TRACE("Enter\n");
- if (wl_is_ibssmode(wl))
+ if (brcmf_is_ibssmode(cfg_priv))
return err;
- ssid = (struct wlc_ssid *)wl_read_prof(wl, WL_PROF_SSID);
+ ssid = (struct brcmf_ssid *)brcmf_read_prof(cfg_priv, WL_PROF_SSID);
- *(u32 *)wl->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
- err = wl_dev_ioctl(wl_to_ndev(wl), WLC_GET_BSS_INFO,
- wl->extra_buf, WL_EXTRA_BUF_MAX);
+ *(u32 *)cfg_priv->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
+ err = brcmf_dev_ioctl(cfg_to_ndev(cfg_priv), BRCMF_C_GET_BSS_INFO,
+ cfg_priv->extra_buf, WL_EXTRA_BUF_MAX);
if (unlikely(err)) {
WL_ERR("Could not get bss info %d\n", err);
goto update_bss_info_out;
}
- bi = (struct wl_bss_info *)(wl->extra_buf + 4);
- err = wl_inform_single_bss(wl, bi);
+ bi = (struct brcmf_bss_info *)(cfg_priv->extra_buf + 4);
+ err = brcmf_inform_single_bss(cfg_priv, bi);
if (unlikely(err))
goto update_bss_info_out;
@@ -2830,7 +2845,7 @@ static s32 wl_update_bss_info(struct wl_priv *wl)
ie_len = bi->ie_length;
beacon_interval = cpu_to_le16(bi->beacon_period);
- tim = bcm_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
+ tim = brcmu_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
if (tim)
dtim_period = tim->data[1];
else {
@@ -2840,7 +2855,8 @@ static s32 wl_update_bss_info(struct wl_priv *wl)
* so we speficially query dtim information to dongle.
*/
u32 var;
- err = wl_dev_intvar_get(wl_to_ndev(wl), "dtim_assoc", &var);
+ err = brcmf_dev_intvar_get(cfg_to_ndev(cfg_priv),
+ "dtim_assoc", &var);
if (unlikely(err)) {
WL_ERR("wl dtim_assoc failed (%d)\n", err);
goto update_bss_info_out;
@@ -2848,8 +2864,8 @@ static s32 wl_update_bss_info(struct wl_priv *wl)
dtim_period = (u8)var;
}
- wl_update_prof(wl, NULL, &beacon_interval, WL_PROF_BEACONINT);
- wl_update_prof(wl, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+ brcmf_update_prof(cfg_priv, NULL, &beacon_interval, WL_PROF_BEACONINT);
+ brcmf_update_prof(cfg_priv, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
update_bss_info_out:
WL_TRACE("Exit");
@@ -2857,54 +2873,59 @@ update_bss_info_out:
}
static s32
-wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
+brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
{
- struct wl_connect_info *conn_info = wl_to_conn(wl);
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
s32 err = 0;
WL_TRACE("Enter\n");
- wl_get_assoc_ies(wl);
- wl_update_prof(wl, NULL, &e->addr, WL_PROF_BSSID);
- wl_update_bss_info(wl);
+ brcmf_get_assoc_ies(cfg_priv);
+ brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID);
+ brcmf_update_bss_info(cfg_priv);
cfg80211_roamed(ndev, NULL,
- (u8 *)wl_read_prof(wl, WL_PROF_BSSID),
+ (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID),
conn_info->req_ie, conn_info->req_ie_len,
conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
WL_CONN("Report roaming result\n");
- set_bit(WL_STATUS_CONNECTED, &wl->status);
+ set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
WL_TRACE("Exit\n");
return err;
}
static s32
-wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
- const wl_event_msg_t *e, void *data, bool completed)
+brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev, const struct brcmf_event_msg *e,
+ void *data, bool completed)
{
- struct wl_connect_info *conn_info = wl_to_conn(wl);
+ struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv);
s32 err = 0;
WL_TRACE("Enter\n");
- if (test_and_clear_bit(WL_STATUS_CONNECTING, &wl->status)) {
+ if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) {
if (completed) {
- wl_get_assoc_ies(wl);
- wl_update_prof(wl, NULL, &e->addr, WL_PROF_BSSID);
- wl_update_bss_info(wl);
+ brcmf_get_assoc_ies(cfg_priv);
+ brcmf_update_prof(cfg_priv, NULL, &e->addr,
+ WL_PROF_BSSID);
+ brcmf_update_bss_info(cfg_priv);
}
cfg80211_connect_result(ndev,
- (u8 *)wl_read_prof(wl, WL_PROF_BSSID),
+ (u8 *)brcmf_read_prof(cfg_priv,
+ WL_PROF_BSSID),
conn_info->req_ie,
conn_info->req_ie_len,
conn_info->resp_ie,
conn_info->resp_ie_len,
- completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT,
+ completed ? WLAN_STATUS_SUCCESS :
+ WLAN_STATUS_AUTH_TIMEOUT,
GFP_KERNEL);
if (completed)
- set_bit(WL_STATUS_CONNECTED, &wl->status);
+ set_bit(WL_STATUS_CONNECTED, &cfg_priv->status);
WL_CONN("Report connect result - connection %s\n",
completed ? "succeeded" : "failed");
}
@@ -2913,14 +2934,15 @@ wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
}
static s32
-wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
+brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
{
u16 flags = be16_to_cpu(e->flags);
enum nl80211_key_type key_type;
rtnl_lock();
- if (flags & WLC_EVENT_MSG_GROUP)
+ if (flags & BRCMF_EVENT_MSG_GROUP)
key_type = NL80211_KEYTYPE_GROUP;
else
key_type = NL80211_KEYTYPE_PAIRWISE;
@@ -2933,30 +2955,32 @@ wl_notify_mic_status(struct wl_priv *wl, struct net_device *ndev,
}
static s32
-wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
+brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv,
+ struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
{
- struct channel_info channel_inform;
- struct wl_scan_results *bss_list;
+ struct brcmf_channel_info channel_inform;
+ struct brcmf_scan_results *bss_list;
u32 len = WL_SCAN_BUF_MAX;
s32 err = 0;
bool scan_abort = false;
WL_TRACE("Enter\n");
- if (wl->iscan_on && wl->iscan_kickstart) {
+ if (cfg_priv->iscan_on && cfg_priv->iscan_kickstart) {
WL_TRACE("Exit\n");
- return wl_wakeup_iscan(wl_to_iscan(wl));
+ return brcmf_wakeup_iscan(cfg_to_iscan(cfg_priv));
}
- if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING, &wl->status))) {
+ if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING,
+ &cfg_priv->status))) {
WL_ERR("Scan complete while device not scanning\n");
scan_abort = true;
err = -EINVAL;
goto scan_done_out;
}
- err = wl_dev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform,
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_GET_CHANNEL, &channel_inform,
sizeof(channel_inform));
if (unlikely(err)) {
WL_ERR("scan busy (%d)\n", err);
@@ -2969,12 +2993,12 @@ wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
WL_CONN("channel_inform.scan_channel (%d)\n",
channel_inform.scan_channel);
}
- wl->bss_list = wl->scan_results;
- bss_list = wl->bss_list;
+ cfg_priv->bss_list = cfg_priv->scan_results;
+ bss_list = cfg_priv->bss_list;
memset(bss_list, 0, len);
bss_list->buflen = cpu_to_le32(len);
- err = wl_dev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len);
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SCAN_RESULTS, bss_list, len);
if (unlikely(err)) {
WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
err = -EINVAL;
@@ -2985,18 +3009,18 @@ wl_notify_scan_status(struct wl_priv *wl, struct net_device *ndev,
bss_list->version = le32_to_cpu(bss_list->version);
bss_list->count = le32_to_cpu(bss_list->count);
- err = wl_inform_bss(wl);
+ err = brcmf_inform_bss(cfg_priv);
if (err) {
scan_abort = true;
goto scan_done_out;
}
scan_done_out:
- if (wl->scan_request) {
+ if (cfg_priv->scan_request) {
WL_SCAN("calling cfg80211_scan_done\n");
- cfg80211_scan_done(wl->scan_request, scan_abort);
- wl_set_mpc(ndev, 1);
- wl->scan_request = NULL;
+ cfg80211_scan_done(cfg_priv->scan_request, scan_abort);
+ brcmf_set_mpc(ndev, 1);
+ cfg_priv->scan_request = NULL;
}
WL_TRACE("Exit\n");
@@ -3004,7 +3028,7 @@ scan_done_out:
return err;
}
-static void wl_init_conf(struct wl_conf *conf)
+static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
{
conf->mode = (u32)-1;
conf->frag_threshold = (u32)-1;
@@ -3014,70 +3038,66 @@ static void wl_init_conf(struct wl_conf *conf)
conf->tx_power = -1;
}
-static void wl_init_prof(struct wl_profile *prof)
+static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof)
{
memset(prof, 0, sizeof(*prof));
}
-static void wl_init_eloop_handler(struct wl_event_loop *el)
+static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el)
{
memset(el, 0, sizeof(*el));
- el->handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
- el->handler[WLC_E_LINK] = wl_notify_connect_status;
- el->handler[WLC_E_ROAM] = wl_notify_roaming_status;
- el->handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
- el->handler[WLC_E_SET_SSID] = wl_notify_connect_status;
+ el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status;
+ el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status;
+ el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status;
+ el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status;
+ el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status;
}
-static s32 wl_init_priv_mem(struct wl_priv *wl)
+static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
{
- wl->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
- if (unlikely(!wl->scan_results)) {
+ cfg_priv->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+ if (unlikely(!cfg_priv->scan_results)) {
WL_ERR("Scan results alloc failed\n");
goto init_priv_mem_out;
}
- wl->conf = kzalloc(sizeof(*wl->conf), GFP_KERNEL);
- if (unlikely(!wl->conf)) {
+ cfg_priv->conf = kzalloc(sizeof(*cfg_priv->conf), GFP_KERNEL);
+ if (unlikely(!cfg_priv->conf)) {
WL_ERR("wl_conf alloc failed\n");
goto init_priv_mem_out;
}
- wl->profile = kzalloc(sizeof(*wl->profile), GFP_KERNEL);
- if (unlikely(!wl->profile)) {
+ cfg_priv->profile = kzalloc(sizeof(*cfg_priv->profile), GFP_KERNEL);
+ if (unlikely(!cfg_priv->profile)) {
WL_ERR("wl_profile alloc failed\n");
goto init_priv_mem_out;
}
- wl->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
- if (unlikely(!wl->bss_info)) {
+ cfg_priv->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
+ if (unlikely(!cfg_priv->bss_info)) {
WL_ERR("Bss information alloc failed\n");
goto init_priv_mem_out;
}
- wl->scan_req_int = kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL);
- if (unlikely(!wl->scan_req_int)) {
+ cfg_priv->scan_req_int = kzalloc(sizeof(*cfg_priv->scan_req_int),
+ GFP_KERNEL);
+ if (unlikely(!cfg_priv->scan_req_int)) {
WL_ERR("Scan req alloc failed\n");
goto init_priv_mem_out;
}
- wl->ioctl_buf = kzalloc(WL_IOCTL_LEN_MAX, GFP_KERNEL);
- if (unlikely(!wl->ioctl_buf)) {
+ cfg_priv->ioctl_buf = kzalloc(WL_IOCTL_LEN_MAX, GFP_KERNEL);
+ if (unlikely(!cfg_priv->ioctl_buf)) {
WL_ERR("Ioctl buf alloc failed\n");
goto init_priv_mem_out;
}
- wl->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
- if (unlikely(!wl->extra_buf)) {
+ cfg_priv->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+ if (unlikely(!cfg_priv->extra_buf)) {
WL_ERR("Extra buf alloc failed\n");
goto init_priv_mem_out;
}
- wl->iscan = kzalloc(sizeof(*wl->iscan), GFP_KERNEL);
- if (unlikely(!wl->iscan)) {
+ cfg_priv->iscan = kzalloc(sizeof(*cfg_priv->iscan), GFP_KERNEL);
+ if (unlikely(!cfg_priv->iscan)) {
WL_ERR("Iscan buf alloc failed\n");
goto init_priv_mem_out;
}
- wl->fw = kzalloc(sizeof(*wl->fw), GFP_KERNEL);
- if (unlikely(!wl->fw)) {
- WL_ERR("fw object alloc failed\n");
- goto init_priv_mem_out;
- }
- wl->pmk_list = kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL);
- if (unlikely(!wl->pmk_list)) {
+ cfg_priv->pmk_list = kzalloc(sizeof(*cfg_priv->pmk_list), GFP_KERNEL);
+ if (unlikely(!cfg_priv->pmk_list)) {
WL_ERR("pmk list alloc failed\n");
goto init_priv_mem_out;
}
@@ -3085,61 +3105,60 @@ static s32 wl_init_priv_mem(struct wl_priv *wl)
return 0;
init_priv_mem_out:
- wl_deinit_priv_mem(wl);
+ brcmf_deinit_priv_mem(cfg_priv);
return -ENOMEM;
}
-static void wl_deinit_priv_mem(struct wl_priv *wl)
-{
- kfree(wl->scan_results);
- wl->scan_results = NULL;
- kfree(wl->bss_info);
- wl->bss_info = NULL;
- kfree(wl->conf);
- wl->conf = NULL;
- kfree(wl->profile);
- wl->profile = NULL;
- kfree(wl->scan_req_int);
- wl->scan_req_int = NULL;
- kfree(wl->ioctl_buf);
- wl->ioctl_buf = NULL;
- kfree(wl->extra_buf);
- wl->extra_buf = NULL;
- kfree(wl->iscan);
- wl->iscan = NULL;
- kfree(wl->fw);
- wl->fw = NULL;
- kfree(wl->pmk_list);
- wl->pmk_list = NULL;
-}
-
-static s32 wl_create_event_handler(struct wl_priv *wl)
-{
- sema_init(&wl->event_sync, 0);
- wl->event_tsk = kthread_run(wl_event_handler, wl, "wl_event_handler");
- if (IS_ERR(wl->event_tsk)) {
- wl->event_tsk = NULL;
+static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ kfree(cfg_priv->scan_results);
+ cfg_priv->scan_results = NULL;
+ kfree(cfg_priv->bss_info);
+ cfg_priv->bss_info = NULL;
+ kfree(cfg_priv->conf);
+ cfg_priv->conf = NULL;
+ kfree(cfg_priv->profile);
+ cfg_priv->profile = NULL;
+ kfree(cfg_priv->scan_req_int);
+ cfg_priv->scan_req_int = NULL;
+ kfree(cfg_priv->ioctl_buf);
+ cfg_priv->ioctl_buf = NULL;
+ kfree(cfg_priv->extra_buf);
+ cfg_priv->extra_buf = NULL;
+ kfree(cfg_priv->iscan);
+ cfg_priv->iscan = NULL;
+ kfree(cfg_priv->pmk_list);
+ cfg_priv->pmk_list = NULL;
+}
+
+static s32 brcmf_create_event_handler(struct brcmf_cfg80211_priv *cfg_priv)
+{
+ sema_init(&cfg_priv->event_sync, 0);
+ cfg_priv->event_tsk = kthread_run(brcmf_event_handler, cfg_priv,
+ "wl_event_handler");
+ if (IS_ERR(cfg_priv->event_tsk)) {
+ cfg_priv->event_tsk = NULL;
WL_ERR("failed to create event thread\n");
return -ENOMEM;
}
return 0;
}
-static void wl_destroy_event_handler(struct wl_priv *wl)
+static void brcmf_destroy_event_handler(struct brcmf_cfg80211_priv *cfg_priv)
{
- if (wl->event_tsk) {
- send_sig(SIGTERM, wl->event_tsk, 1);
- kthread_stop(wl->event_tsk);
- wl->event_tsk = NULL;
+ if (cfg_priv->event_tsk) {
+ send_sig(SIGTERM, cfg_priv->event_tsk, 1);
+ kthread_stop(cfg_priv->event_tsk);
+ cfg_priv->event_tsk = NULL;
}
}
-static void wl_term_iscan(struct wl_priv *wl)
+static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
- if (wl->iscan_on && iscan->tsk) {
+ if (cfg_priv->iscan_on && iscan->tsk) {
iscan->state = WL_ISCAN_STATE_IDLE;
send_sig(SIGTERM, iscan->tsk, 1);
kthread_stop(iscan->tsk);
@@ -3147,26 +3166,28 @@ static void wl_term_iscan(struct wl_priv *wl)
}
}
-static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted)
+static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
+ bool aborted)
{
- struct wl_priv *wl = iscan_to_wl(iscan);
- struct net_device *ndev = wl_to_ndev(wl);
+ struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
+ struct net_device *ndev = cfg_to_ndev(cfg_priv);
- if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING, &wl->status))) {
+ if (unlikely(!test_and_clear_bit(WL_STATUS_SCANNING,
+ &cfg_priv->status))) {
WL_ERR("Scan complete while device not scanning\n");
return;
}
- if (likely(wl->scan_request)) {
+ if (likely(cfg_priv->scan_request)) {
WL_SCAN("ISCAN Completed scan: %s\n",
aborted ? "Aborted" : "Done");
- cfg80211_scan_done(wl->scan_request, aborted);
- wl_set_mpc(ndev, 1);
- wl->scan_request = NULL;
+ cfg80211_scan_done(cfg_priv->scan_request, aborted);
+ brcmf_set_mpc(ndev, 1);
+ cfg_priv->scan_request = NULL;
}
- wl->iscan_kickstart = false;
+ cfg_priv->iscan_kickstart = false;
}
-static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan)
+static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan)
{
if (likely(iscan->state != WL_ISCAN_STATE_IDLE)) {
WL_SCAN("wake up iscan\n");
@@ -3178,25 +3199,25 @@ static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan)
}
static s32
-wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
- struct wl_scan_results **bss_list)
+brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
+ struct brcmf_scan_results **bss_list)
{
- struct wl_iscan_results list;
- struct wl_scan_results *results;
- struct wl_iscan_results *list_buf;
+ struct brcmf_iscan_results list;
+ struct brcmf_scan_results *results;
+ struct brcmf_iscan_results *list_buf;
s32 err = 0;
memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX);
- list_buf = (struct wl_iscan_results *)iscan->scan_buf;
+ list_buf = (struct brcmf_iscan_results *)iscan->scan_buf;
results = &list_buf->results;
- results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
+ results->buflen = BRCMF_ISCAN_RESULTS_FIXED_SIZE;
results->version = 0;
results->count = 0;
memset(&list, 0, sizeof(list));
list.results.buflen = cpu_to_le32(WL_ISCAN_BUF_MAX);
- err = wl_dev_iovar_getbuf(iscan->dev, "iscanresults", &list,
- WL_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf,
+ err = brcmf_dev_iovar_getbuf(iscan->dev, "iscanresults", &list,
+ BRCMF_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf,
WL_ISCAN_BUF_MAX);
if (unlikely(err)) {
WL_ERR("error (%d)\n", err);
@@ -3213,23 +3234,23 @@ wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
return err;
}
-static s32 wl_iscan_done(struct wl_priv *wl)
+static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_iscan_ctrl *iscan = wl->iscan;
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
s32 err = 0;
iscan->state = WL_ISCAN_STATE_IDLE;
rtnl_lock();
- wl_inform_bss(wl);
- wl_notify_iscan_complete(iscan, false);
+ brcmf_inform_bss(cfg_priv);
+ brcmf_notify_iscan_complete(iscan, false);
rtnl_unlock();
return err;
}
-static s32 wl_iscan_pending(struct wl_priv *wl)
+static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_iscan_ctrl *iscan = wl->iscan;
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
s32 err = 0;
/* Reschedule the timer */
@@ -3239,14 +3260,14 @@ static s32 wl_iscan_pending(struct wl_priv *wl)
return err;
}
-static s32 wl_iscan_inprogress(struct wl_priv *wl)
+static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_iscan_ctrl *iscan = wl->iscan;
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
s32 err = 0;
rtnl_lock();
- wl_inform_bss(wl);
- wl_run_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
+ brcmf_inform_bss(cfg_priv);
+ brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE);
rtnl_unlock();
/* Reschedule the timer */
mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
@@ -3255,31 +3276,32 @@ static s32 wl_iscan_inprogress(struct wl_priv *wl)
return err;
}
-static s32 wl_iscan_aborted(struct wl_priv *wl)
+static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_iscan_ctrl *iscan = wl->iscan;
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan;
s32 err = 0;
iscan->state = WL_ISCAN_STATE_IDLE;
rtnl_lock();
- wl_notify_iscan_complete(iscan, true);
+ brcmf_notify_iscan_complete(iscan, true);
rtnl_unlock();
return err;
}
-static s32 wl_iscan_thread(void *data)
+static s32 brcmf_iscan_thread(void *data)
{
struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
- struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
- struct wl_priv *wl = iscan_to_wl(iscan);
- struct wl_iscan_eloop *el = &iscan->el;
+ struct brcmf_cfg80211_iscan_ctrl *iscan =
+ (struct brcmf_cfg80211_iscan_ctrl *)data;
+ struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan);
+ struct brcmf_cfg80211_iscan_eloop *el = &iscan->el;
u32 status;
int err = 0;
sched_setscheduler(current, SCHED_FIFO, &param);
allow_signal(SIGTERM);
- status = WL_SCAN_RESULTS_PARTIAL;
+ status = BRCMF_SCAN_RESULTS_PARTIAL;
while (likely(!down_interruptible(&iscan->sync))) {
if (kthread_should_stop())
break;
@@ -3288,13 +3310,14 @@ static s32 wl_iscan_thread(void *data)
iscan->timer_on = 0;
}
rtnl_lock();
- err = wl_get_iscan_results(iscan, &status, &wl->bss_list);
+ err = brcmf_get_iscan_results(iscan, &status,
+ &cfg_priv->bss_list);
if (unlikely(err)) {
- status = WL_SCAN_RESULTS_ABORTED;
+ status = BRCMF_SCAN_RESULTS_ABORTED;
WL_ERR("Abort iscan\n");
}
rtnl_unlock();
- el->handler[status] (wl);
+ el->handler[status](cfg_priv);
}
if (iscan->timer_on) {
del_timer_sync(&iscan->timer);
@@ -3305,26 +3328,27 @@ static s32 wl_iscan_thread(void *data)
return 0;
}
-static void wl_iscan_timer(unsigned long data)
+static void brcmf_iscan_timer(unsigned long data)
{
- struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
+ struct brcmf_cfg80211_iscan_ctrl *iscan =
+ (struct brcmf_cfg80211_iscan_ctrl *)data;
if (iscan) {
iscan->timer_on = 0;
WL_SCAN("timer expired\n");
- wl_wakeup_iscan(iscan);
+ brcmf_wakeup_iscan(iscan);
}
}
-static s32 wl_invoke_iscan(struct wl_priv *wl)
+static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
int err = 0;
- if (wl->iscan_on && !iscan->tsk) {
+ if (cfg_priv->iscan_on && !iscan->tsk) {
iscan->state = WL_ISCAN_STATE_IDLE;
sema_init(&iscan->sync, 0);
- iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
+ iscan->tsk = kthread_run(brcmf_iscan_thread, iscan, "wl_iscan");
if (IS_ERR(iscan->tsk)) {
WL_ERR("Could not create iscan thread\n");
iscan->tsk = NULL;
@@ -3335,228 +3359,228 @@ static s32 wl_invoke_iscan(struct wl_priv *wl)
return err;
}
-static void wl_init_iscan_eloop(struct wl_iscan_eloop *el)
+static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el)
{
memset(el, 0, sizeof(*el));
- el->handler[WL_SCAN_RESULTS_SUCCESS] = wl_iscan_done;
- el->handler[WL_SCAN_RESULTS_PARTIAL] = wl_iscan_inprogress;
- el->handler[WL_SCAN_RESULTS_PENDING] = wl_iscan_pending;
- el->handler[WL_SCAN_RESULTS_ABORTED] = wl_iscan_aborted;
- el->handler[WL_SCAN_RESULTS_NO_MEM] = wl_iscan_aborted;
+ el->handler[BRCMF_SCAN_RESULTS_SUCCESS] = brcmf_iscan_done;
+ el->handler[BRCMF_SCAN_RESULTS_PARTIAL] = brcmf_iscan_inprogress;
+ el->handler[BRCMF_SCAN_RESULTS_PENDING] = brcmf_iscan_pending;
+ el->handler[BRCMF_SCAN_RESULTS_ABORTED] = brcmf_iscan_aborted;
+ el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted;
}
-static s32 wl_init_iscan(struct wl_priv *wl)
+static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
+ struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv);
int err = 0;
- if (wl->iscan_on) {
- iscan->dev = wl_to_ndev(wl);
+ if (cfg_priv->iscan_on) {
+ iscan->dev = cfg_to_ndev(cfg_priv);
iscan->state = WL_ISCAN_STATE_IDLE;
- wl_init_iscan_eloop(&iscan->el);
+ brcmf_init_iscan_eloop(&iscan->el);
iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
init_timer(&iscan->timer);
iscan->timer.data = (unsigned long) iscan;
- iscan->timer.function = wl_iscan_timer;
+ iscan->timer.function = brcmf_iscan_timer;
sema_init(&iscan->sync, 0);
- iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
+ iscan->tsk = kthread_run(brcmf_iscan_thread, iscan, "wl_iscan");
if (IS_ERR(iscan->tsk)) {
WL_ERR("Could not create iscan thread\n");
iscan->tsk = NULL;
return -ENOMEM;
}
- iscan->data = wl;
+ iscan->data = cfg_priv;
}
return err;
}
-static void wl_init_fw(struct wl_fw_ctrl *fw)
-{
- fw->status = 0; /* init fw loading status.
- 0 means nothing was loaded yet */
-}
-
-static s32 wl_init_priv(struct wl_priv *wl)
+static s32 wl_init_priv(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
s32 err = 0;
- wl->scan_request = NULL;
- wl->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
- wl->iscan_on = true; /* iscan on & off switch.
+ cfg_priv->scan_request = NULL;
+ cfg_priv->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+ cfg_priv->iscan_on = true; /* iscan on & off switch.
we enable iscan per default */
- wl->roam_on = false; /* roam on & off switch.
+ cfg_priv->roam_on = false; /* roam on & off switch.
we enable roam per default */
- wl->iscan_kickstart = false;
- wl->active_scan = true; /* we do active scan for
+ cfg_priv->iscan_kickstart = false;
+ cfg_priv->active_scan = true; /* we do active scan for
specific scan per default */
- wl->dongle_up = false; /* dongle is not up yet */
- wl_init_eq(wl);
- err = wl_init_priv_mem(wl);
+ cfg_priv->dongle_up = false; /* dongle is not up yet */
+ brcmf_init_eq(cfg_priv);
+ err = brcmf_init_priv_mem(cfg_priv);
if (unlikely(err))
return err;
- if (unlikely(wl_create_event_handler(wl)))
+ if (unlikely(brcmf_create_event_handler(cfg_priv)))
return -ENOMEM;
- wl_init_eloop_handler(&wl->el);
- mutex_init(&wl->usr_sync);
- err = wl_init_iscan(wl);
+ brcmf_init_eloop_handler(&cfg_priv->el);
+ mutex_init(&cfg_priv->usr_sync);
+ err = brcmf_init_iscan(cfg_priv);
if (unlikely(err))
return err;
- wl_init_fw(wl->fw);
- wl_init_conf(wl->conf);
- wl_init_prof(wl->profile);
- wl_link_down(wl);
+ brcmf_init_conf(cfg_priv->conf);
+ brcmf_init_prof(cfg_priv->profile);
+ brcmf_link_down(cfg_priv);
return err;
}
-static void wl_deinit_priv(struct wl_priv *wl)
+static void wl_deinit_priv(struct brcmf_cfg80211_priv *cfg_priv)
{
- wl_destroy_event_handler(wl);
- wl->dongle_up = false; /* dongle down */
- wl_flush_eq(wl);
- wl_link_down(wl);
- wl_term_iscan(wl);
- wl_deinit_priv_mem(wl);
+ brcmf_destroy_event_handler(cfg_priv);
+ cfg_priv->dongle_up = false; /* dongle down */
+ brcmf_flush_eq(cfg_priv);
+ brcmf_link_down(cfg_priv);
+ brcmf_term_iscan(cfg_priv);
+ brcmf_deinit_priv_mem(cfg_priv);
}
-s32 wl_cfg80211_attach(struct net_device *ndev, void *data)
+s32 brcmf_cfg80211_attach(struct net_device *ndev, void *data)
{
struct wireless_dev *wdev;
- struct wl_priv *wl;
- struct wl_iface *ci;
+ struct brcmf_cfg80211_priv *cfg_priv;
+ struct brcmf_cfg80211_iface *ci;
s32 err = 0;
if (unlikely(!ndev)) {
WL_ERR("ndev is invalid\n");
return -ENODEV;
}
- wl_cfg80211_dev = kzalloc(sizeof(struct wl_dev), GFP_KERNEL);
- if (unlikely(!wl_cfg80211_dev)) {
+ cfg80211_dev = kzalloc(sizeof(struct brcmf_cfg80211_dev), GFP_KERNEL);
+ if (unlikely(!cfg80211_dev)) {
WL_ERR("wl_cfg80211_dev is invalid\n");
return -ENOMEM;
}
- WL_INFO("func %p\n", wl_cfg80211_get_sdio_func());
- wdev = wl_alloc_wdev(sizeof(struct wl_iface), &wl_cfg80211_get_sdio_func()->dev);
+ WL_INFO("func %p\n", brcmf_cfg80211_get_sdio_func());
+ wdev = brcmf_alloc_wdev(sizeof(struct brcmf_cfg80211_iface),
+ &brcmf_cfg80211_get_sdio_func()->dev);
if (IS_ERR(wdev))
return -ENOMEM;
- wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
- wl = wdev_to_wl(wdev);
- wl->wdev = wdev;
- wl->pub = data;
- ci = (struct wl_iface *)wl_to_ci(wl);
- ci->wl = wl;
+ wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS);
+ cfg_priv = wdev_to_cfg(wdev);
+ cfg_priv->wdev = wdev;
+ cfg_priv->pub = data;
+ ci = (struct brcmf_cfg80211_iface *)&cfg_priv->ci;
+ ci->cfg_priv = cfg_priv;
ndev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
- err = wl_init_priv(wl);
+ err = wl_init_priv(cfg_priv);
if (unlikely(err)) {
WL_ERR("Failed to init iwm_priv (%d)\n", err);
goto cfg80211_attach_out;
}
- wl_set_drvdata(wl_cfg80211_dev, ci);
+ brcmf_set_drvdata(cfg80211_dev, ci);
return err;
cfg80211_attach_out:
- wl_free_wdev(wl);
+ brcmf_free_wdev(cfg_priv);
return err;
}
-void wl_cfg80211_detach(void)
+void brcmf_cfg80211_detach(void)
{
- struct wl_priv *wl;
+ struct brcmf_cfg80211_priv *cfg_priv;
- wl = WL_PRIV_GET();
+ cfg_priv = WL_PRIV_GET();
- wl_deinit_priv(wl);
- wl_free_wdev(wl);
- wl_set_drvdata(wl_cfg80211_dev, NULL);
- kfree(wl_cfg80211_dev);
- wl_cfg80211_dev = NULL;
- wl_clear_sdio_func();
+ wl_deinit_priv(cfg_priv);
+ brcmf_free_wdev(cfg_priv);
+ brcmf_set_drvdata(cfg80211_dev, NULL);
+ kfree(cfg80211_dev);
+ cfg80211_dev = NULL;
+ brcmf_clear_sdio_func();
}
-static void wl_wakeup_event(struct wl_priv *wl)
+static void brcmf_wakeup_event(struct brcmf_cfg80211_priv *cfg_priv)
{
- up(&wl->event_sync);
+ up(&cfg_priv->event_sync);
}
-static s32 wl_event_handler(void *data)
+static s32 brcmf_event_handler(void *data)
{
- struct wl_priv *wl = (struct wl_priv *)data;
+ struct brcmf_cfg80211_priv *cfg_priv =
+ (struct brcmf_cfg80211_priv *)data;
struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
- struct wl_event_q *e;
+ struct brcmf_cfg80211_event_q *e;
sched_setscheduler(current, SCHED_FIFO, &param);
allow_signal(SIGTERM);
- while (likely(!down_interruptible(&wl->event_sync))) {
+ while (likely(!down_interruptible(&cfg_priv->event_sync))) {
if (kthread_should_stop())
break;
- e = wl_deq_event(wl);
+ e = brcmf_deq_event(cfg_priv);
if (unlikely(!e)) {
WL_ERR("event queue empty...\n");
BUG();
}
WL_INFO("event type (%d)\n", e->etype);
- if (wl->el.handler[e->etype]) {
- wl->el.handler[e->etype] (wl, wl_to_ndev(wl), &e->emsg,
- e->edata);
+ if (cfg_priv->el.handler[e->etype]) {
+ cfg_priv->el.handler[e->etype](cfg_priv,
+ cfg_to_ndev(cfg_priv),
+ &e->emsg, e->edata);
} else {
WL_INFO("Unknown Event (%d): ignoring\n", e->etype);
}
- wl_put_event(e);
+ brcmf_put_event(e);
}
WL_INFO("was terminated\n");
return 0;
}
void
-wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
+brcmf_cfg80211_event(struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data)
{
u32 event_type = be32_to_cpu(e->event_type);
- struct wl_priv *wl = ndev_to_wl(ndev);
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
- if (likely(!wl_enq_event(wl, event_type, e, data)))
- wl_wakeup_event(wl);
+ if (likely(!brcmf_enq_event(cfg_priv, event_type, e, data)))
+ brcmf_wakeup_event(cfg_priv);
}
-static void wl_init_eq(struct wl_priv *wl)
+static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv)
{
- wl_init_eq_lock(wl);
- INIT_LIST_HEAD(&wl->eq_list);
+ brcmf_init_eq_lock(cfg_priv);
+ INIT_LIST_HEAD(&cfg_priv->eq_list);
}
-static void wl_flush_eq(struct wl_priv *wl)
+static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_event_q *e;
+ struct brcmf_cfg80211_event_q *e;
- wl_lock_eq(wl);
- while (!list_empty(&wl->eq_list)) {
- e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+ brcmf_lock_eq(cfg_priv);
+ while (!list_empty(&cfg_priv->eq_list)) {
+ e = list_first_entry(&cfg_priv->eq_list,
+ struct brcmf_cfg80211_event_q, eq_list);
list_del(&e->eq_list);
kfree(e);
}
- wl_unlock_eq(wl);
+ brcmf_unlock_eq(cfg_priv);
}
/*
* retrieve first queued event from head
*/
-static struct wl_event_q *wl_deq_event(struct wl_priv *wl)
+static struct brcmf_cfg80211_event_q *brcmf_deq_event(
+ struct brcmf_cfg80211_priv *cfg_priv)
{
- struct wl_event_q *e = NULL;
+ struct brcmf_cfg80211_event_q *e = NULL;
- wl_lock_eq(wl);
- if (likely(!list_empty(&wl->eq_list))) {
- e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+ brcmf_lock_eq(cfg_priv);
+ if (likely(!list_empty(&cfg_priv->eq_list))) {
+ e = list_first_entry(&cfg_priv->eq_list,
+ struct brcmf_cfg80211_event_q, eq_list);
list_del(&e->eq_list);
}
- wl_unlock_eq(wl);
+ brcmf_unlock_eq(cfg_priv);
return e;
}
@@ -3566,50 +3590,49 @@ static struct wl_event_q *wl_deq_event(struct wl_priv *wl)
*/
static s32
-wl_enq_event(struct wl_priv *wl, u32 event, const wl_event_msg_t *msg,
- void *data)
+brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event,
+ const struct brcmf_event_msg *msg, void *data)
{
- struct wl_event_q *e;
+ struct brcmf_cfg80211_event_q *e;
s32 err = 0;
- e = kzalloc(sizeof(struct wl_event_q), GFP_KERNEL);
+ e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_KERNEL);
if (unlikely(!e)) {
WL_ERR("event alloc failed\n");
return -ENOMEM;
}
e->etype = event;
- memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
- if (data) {
- }
- wl_lock_eq(wl);
- list_add_tail(&e->eq_list, &wl->eq_list);
- wl_unlock_eq(wl);
+ memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
+
+ brcmf_lock_eq(cfg_priv);
+ list_add_tail(&e->eq_list, &cfg_priv->eq_list);
+ brcmf_unlock_eq(cfg_priv);
return err;
}
-static void wl_put_event(struct wl_event_q *e)
+static void brcmf_put_event(struct brcmf_cfg80211_event_q *e)
{
kfree(e);
}
-void wl_cfg80211_sdio_func(void *func)
+void brcmf_cfg80211_sdio_func(void *func)
{
cfg80211_sdio_func = (struct sdio_func *)func;
}
-static void wl_clear_sdio_func(void)
+static void brcmf_clear_sdio_func(void)
{
cfg80211_sdio_func = NULL;
}
-struct sdio_func *wl_cfg80211_get_sdio_func(void)
+struct sdio_func *brcmf_cfg80211_get_sdio_func(void)
{
return cfg80211_sdio_func;
}
-static s32 wl_dongle_mode(struct net_device *ndev, s32 iftype)
+static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype)
{
s32 infra = 0;
s32 err = 0;
@@ -3633,7 +3656,7 @@ static s32 wl_dongle_mode(struct net_device *ndev, s32 iftype)
return err;
}
infra = cpu_to_le32(infra);
- err = wl_dev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(infra));
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_INFRA, &infra, sizeof(infra));
if (unlikely(err)) {
WL_ERR("WLC_SET_INFRA error (%d)\n", err);
return err;
@@ -3642,253 +3665,48 @@ static s32 wl_dongle_mode(struct net_device *ndev, s32 iftype)
return 0;
}
-#ifndef EMBEDDED_PLATFORM
-static s32 wl_dongle_country(struct net_device *ndev, u8 ccode)
+static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
{
-
- s32 err = 0;
-
- return err;
-}
-
-static s32 wl_dongle_up(struct net_device *ndev, u32 up)
-{
- s32 err = 0;
-
- err = wl_dev_ioctl(ndev, WLC_UP, &up, sizeof(up));
- if (unlikely(err)) {
- WL_ERR("WLC_UP error (%d)\n", err);
- }
- return err;
-}
-
-static s32 wl_dongle_power(struct net_device *ndev, u32 power_mode)
-{
- s32 err = 0;
-
- err = wl_dev_ioctl(ndev, WLC_SET_PM, &power_mode, sizeof(power_mode));
- if (unlikely(err)) {
- WL_ERR("WLC_SET_PM error (%d)\n", err);
- }
- return err;
-}
-
-static s32
-wl_dongle_glom(struct net_device *ndev, u32 glom, u32 dongle_align)
-{
- s8 iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" +
- '\0' + bitvec */
- s32 err = 0;
-
- /* Match Host and Dongle rx alignment */
- bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf,
- sizeof(iovbuf));
- err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
- if (unlikely(err)) {
- WL_ERR("txglomalign error (%d)\n", err);
- goto dongle_glom_out;
- }
- /* disable glom option per default */
- bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
- err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
- if (unlikely(err)) {
- WL_ERR("txglom error (%d)\n", err);
- goto dongle_glom_out;
- }
-dongle_glom_out:
- return err;
-}
-
-static s32
-wl_dongle_offload(struct net_device *ndev, s32 arpoe, s32 arp_ol)
-{
- s8 iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" +
- '\0' + bitvec */
- s32 err = 0;
-
- /* Set ARP offload */
- bcm_mkiovar("arpoe", (char *)&arpoe, 4, iovbuf, sizeof(iovbuf));
- err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
- if (err) {
- if (err == -EOPNOTSUPP)
- WL_INFO("arpoe is not supported\n");
- else
- WL_ERR("arpoe error (%d)\n", err);
-
- goto dongle_offload_out;
- }
- bcm_mkiovar("arp_ol", (char *)&arp_ol, 4, iovbuf, sizeof(iovbuf));
- err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
- if (err) {
- if (err == -EOPNOTSUPP)
- WL_INFO("arp_ol is not supported\n");
- else
- WL_ERR("arp_ol error (%d)\n", err);
-
- goto dongle_offload_out;
- }
-
-dongle_offload_out:
- return err;
-}
-
-static s32 wl_pattern_atoh(s8 *src, s8 *dst)
-{
- int i;
- if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
- WL_ERR("Mask invalid format. Needs to start with 0x\n");
- return -1;
- }
- src = src + 2; /* Skip past 0x */
- if (strlen(src) % 2 != 0) {
- WL_ERR("Mask invalid format. Needs to be of even length\n");
- return -1;
- }
- for (i = 0; *src != '\0'; i++) {
- char num[3];
- strncpy(num, src, 2);
- num[2] = '\0';
- dst[i] = (u8) simple_strtoul(num, NULL, 16);
- src += 2;
- }
- return i;
-}
-
-static s32 wl_dongle_filter(struct net_device *ndev, u32 filter_mode)
-{
- s8 iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" +
- '\0' + bitvec */
- const s8 *str;
- struct wl_pkt_filter pkt_filter;
- struct wl_pkt_filter *pkt_filterp;
- s32 buf_len;
- s32 str_len;
- u32 mask_size;
- u32 pattern_size;
- s8 buf[256];
- s32 err = 0;
-
-/* add a default packet filter pattern */
- str = "pkt_filter_add";
- str_len = strlen(str);
- strncpy(buf, str, str_len);
- buf[str_len] = '\0';
- buf_len = str_len + 1;
-
- pkt_filterp = (struct wl_pkt_filter *)(buf + str_len + 1);
-
- /* Parse packet filter id. */
- pkt_filter.id = cpu_to_le32(100);
-
- /* Parse filter polarity. */
- pkt_filter.negate_match = cpu_to_le32(0);
-
- /* Parse filter type. */
- pkt_filter.type = cpu_to_le32(0);
-
- /* Parse pattern filter offset. */
- pkt_filter.u.pattern.offset = cpu_to_le32(0);
-
- /* Parse pattern filter mask. */
- mask_size = cpu_to_le32(wl_pattern_atoh("0xff",
- (char *)pkt_filterp->u.pattern.
- mask_and_pattern));
-
- /* Parse pattern filter pattern. */
- pattern_size = cpu_to_le32(wl_pattern_atoh("0x00",
- (char *)&pkt_filterp->u.
- pattern.
- mask_and_pattern
- [mask_size]));
-
- if (mask_size != pattern_size) {
- WL_ERR("Mask and pattern not the same size\n");
- err = -EINVAL;
- goto dongle_filter_out;
- }
-
- pkt_filter.u.pattern.size_bytes = mask_size;
- buf_len += WL_PKT_FILTER_FIXED_LEN;
- buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
-
- /* Keep-alive attributes are set in local
- * variable (keep_alive_pkt), and
- * then memcpy'ed into buffer (keep_alive_pktp) since there is no
- * guarantee that the buffer is properly aligned.
- */
- memcpy((char *)pkt_filterp, &pkt_filter,
- WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
-
- err = wl_dev_ioctl(ndev, WLC_SET_VAR, buf, buf_len);
- if (err) {
- if (err == -EOPNOTSUPP) {
- WL_INFO("filter not supported\n");
- } else {
- WL_ERR("filter (%d)\n", err);
- }
- goto dongle_filter_out;
- }
-
- /* set mode to allow pattern */
- bcm_mkiovar("pkt_filter_mode", (char *)&filter_mode, 4, iovbuf,
- sizeof(iovbuf));
- err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
- if (err) {
- if (err == -EOPNOTSUPP) {
- WL_INFO("filter_mode not supported\n");
- } else {
- WL_ERR("filter_mode (%d)\n", err);
- }
- goto dongle_filter_out;
- }
-
-dongle_filter_out:
- return err;
-}
-#endif /* !EMBEDDED_PLATFORM */
-
-static s32 wl_dongle_eventmsg(struct net_device *ndev)
-{
- s8 iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" +
- '\0' + bitvec */
- s8 eventmask[WL_EVENTING_MASK_LEN];
+ /* Room for "event_msgs" + '\0' + bitvec */
+ s8 iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
+ s8 eventmask[BRCMF_EVENTING_MASK_LEN];
s32 err = 0;
WL_TRACE("Enter\n");
/* Setup event_msgs */
- bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+ brcmu_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, iovbuf,
sizeof(iovbuf));
- err = wl_dev_ioctl(ndev, WLC_GET_VAR, iovbuf, sizeof(iovbuf));
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_GET_VAR, iovbuf, sizeof(iovbuf));
if (unlikely(err)) {
WL_ERR("Get event_msgs error (%d)\n", err);
goto dongle_eventmsg_out;
}
- memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
-
- setbit(eventmask, WLC_E_SET_SSID);
- setbit(eventmask, WLC_E_ROAM);
- setbit(eventmask, WLC_E_PRUNE);
- setbit(eventmask, WLC_E_AUTH);
- setbit(eventmask, WLC_E_REASSOC);
- setbit(eventmask, WLC_E_REASSOC_IND);
- setbit(eventmask, WLC_E_DEAUTH_IND);
- setbit(eventmask, WLC_E_DISASSOC_IND);
- setbit(eventmask, WLC_E_DISASSOC);
- setbit(eventmask, WLC_E_JOIN);
- setbit(eventmask, WLC_E_ASSOC_IND);
- setbit(eventmask, WLC_E_PSK_SUP);
- setbit(eventmask, WLC_E_LINK);
- setbit(eventmask, WLC_E_NDIS_LINK);
- setbit(eventmask, WLC_E_MIC_ERROR);
- setbit(eventmask, WLC_E_PMKID_CACHE);
- setbit(eventmask, WLC_E_TXFAIL);
- setbit(eventmask, WLC_E_JOIN_START);
- setbit(eventmask, WLC_E_SCAN_COMPLETE);
-
- bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+ memcpy(eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
+
+ setbit(eventmask, BRCMF_E_SET_SSID);
+ setbit(eventmask, BRCMF_E_ROAM);
+ setbit(eventmask, BRCMF_E_PRUNE);
+ setbit(eventmask, BRCMF_E_AUTH);
+ setbit(eventmask, BRCMF_E_REASSOC);
+ setbit(eventmask, BRCMF_E_REASSOC_IND);
+ setbit(eventmask, BRCMF_E_DEAUTH_IND);
+ setbit(eventmask, BRCMF_E_DISASSOC_IND);
+ setbit(eventmask, BRCMF_E_DISASSOC);
+ setbit(eventmask, BRCMF_E_JOIN);
+ setbit(eventmask, BRCMF_E_ASSOC_IND);
+ setbit(eventmask, BRCMF_E_PSK_SUP);
+ setbit(eventmask, BRCMF_E_LINK);
+ setbit(eventmask, BRCMF_E_NDIS_LINK);
+ setbit(eventmask, BRCMF_E_MIC_ERROR);
+ setbit(eventmask, BRCMF_E_PMKID_CACHE);
+ setbit(eventmask, BRCMF_E_TXFAIL);
+ setbit(eventmask, BRCMF_E_JOIN_START);
+ setbit(eventmask, BRCMF_E_SCAN_COMPLETE);
+
+ brcmu_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, iovbuf,
sizeof(iovbuf));
- err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
if (unlikely(err)) {
WL_ERR("Set event_msgs error (%d)\n", err);
goto dongle_eventmsg_out;
@@ -3900,7 +3718,7 @@ dongle_eventmsg_out:
}
static s32
-wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
+brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
{
s8 iovbuf[32];
s32 roamtrigger[2];
@@ -3912,9 +3730,10 @@ wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
* off to report link down
*/
if (roamvar) {
- bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout,
+ brcmu_mkiovar("bcn_timeout", (char *)&bcn_timeout,
sizeof(bcn_timeout), iovbuf, sizeof(iovbuf));
- err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
if (unlikely(err)) {
WL_ERR("bcn_timeout error (%d)\n", err);
goto dongle_rom_out;
@@ -3926,17 +3745,17 @@ wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
* to take care of roaming
*/
WL_INFO("Internal Roaming = %s\n", roamvar ? "Off" : "On");
- bcm_mkiovar("roam_off", (char *)&roamvar,
+ brcmu_mkiovar("roam_off", (char *)&roamvar,
sizeof(roamvar), iovbuf, sizeof(iovbuf));
- err = wl_dev_ioctl(ndev, WLC_SET_VAR, iovbuf, sizeof(iovbuf));
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
if (unlikely(err)) {
WL_ERR("roam_off error (%d)\n", err);
goto dongle_rom_out;
}
roamtrigger[0] = WL_ROAM_TRIGGER_LEVEL;
- roamtrigger[1] = WLC_BAND_ALL;
- err = wl_dev_ioctl(ndev, WLC_SET_ROAM_TRIGGER,
+ roamtrigger[1] = BRCM_BAND_ALL;
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_ROAM_TRIGGER,
(void *)roamtrigger, sizeof(roamtrigger));
if (unlikely(err)) {
WL_ERR("WLC_SET_ROAM_TRIGGER error (%d)\n", err);
@@ -3944,8 +3763,8 @@ wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
}
roam_delta[0] = WL_ROAM_DELTA;
- roam_delta[1] = WLC_BAND_ALL;
- err = wl_dev_ioctl(ndev, WLC_SET_ROAM_DELTA,
+ roam_delta[1] = BRCM_BAND_ALL;
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_ROAM_DELTA,
(void *)roam_delta, sizeof(roam_delta));
if (unlikely(err)) {
WL_ERR("WLC_SET_ROAM_DELTA error (%d)\n", err);
@@ -3957,13 +3776,13 @@ dongle_rom_out:
}
static s32
-wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
+brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
s32 scan_unassoc_time, s32 scan_passive_time)
{
s32 err = 0;
- err = wl_dev_ioctl(ndev, WLC_SET_SCAN_CHANNEL_TIME, &scan_assoc_time,
- sizeof(scan_assoc_time));
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_SCAN_CHANNEL_TIME,
+ &scan_assoc_time, sizeof(scan_assoc_time));
if (err) {
if (err == -EOPNOTSUPP)
WL_INFO("Scan assoc time is not supported\n");
@@ -3971,8 +3790,8 @@ wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
WL_ERR("Scan assoc time error (%d)\n", err);
goto dongle_scantime_out;
}
- err = wl_dev_ioctl(ndev, WLC_SET_SCAN_UNASSOC_TIME, &scan_unassoc_time,
- sizeof(scan_unassoc_time));
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_SCAN_UNASSOC_TIME,
+ &scan_unassoc_time, sizeof(scan_unassoc_time));
if (err) {
if (err == -EOPNOTSUPP)
WL_INFO("Scan unassoc time is not supported\n");
@@ -3981,8 +3800,8 @@ wl_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
goto dongle_scantime_out;
}
- err = wl_dev_ioctl(ndev, WLC_SET_SCAN_PASSIVE_TIME, &scan_passive_time,
- sizeof(scan_passive_time));
+ err = brcmf_dev_ioctl(ndev, BRCMF_C_SET_SCAN_PASSIVE_TIME,
+ &scan_passive_time, sizeof(scan_passive_time));
if (err) {
if (err == -EOPNOTSUPP)
WL_INFO("Scan passive time is not supported\n");
@@ -3995,54 +3814,34 @@ dongle_scantime_out:
return err;
}
-s32 wl_config_dongle(struct wl_priv *wl, bool need_lock)
+s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv, bool need_lock)
{
-#ifndef DHD_SDALIGN
-#define DHD_SDALIGN 32
-#endif
struct net_device *ndev;
struct wireless_dev *wdev;
s32 err = 0;
- if (wl->dongle_up)
+ if (cfg_priv->dongle_up)
return err;
- ndev = wl_to_ndev(wl);
+ ndev = cfg_to_ndev(cfg_priv);
wdev = ndev->ieee80211_ptr;
if (need_lock)
rtnl_lock();
-#ifndef EMBEDDED_PLATFORM
- err = wl_dongle_up(ndev, 0);
- if (unlikely(err))
- goto default_conf_out;
- err = wl_dongle_country(ndev, 0);
- if (unlikely(err))
- goto default_conf_out;
- err = wl_dongle_power(ndev, PM_FAST);
- if (unlikely(err))
- goto default_conf_out;
- err = wl_dongle_glom(ndev, 0, DHD_SDALIGN);
- if (unlikely(err))
- goto default_conf_out;
-
- wl_dongle_offload(ndev, 1, 0xf);
- wl_dongle_filter(ndev, 1);
-#endif /* !EMBEDDED_PLATFORM */
-
- wl_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
+ brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME);
- err = wl_dongle_eventmsg(ndev);
+ err = brcmf_dongle_eventmsg(ndev);
if (unlikely(err))
goto default_conf_out;
- err = wl_dongle_roam(ndev, (wl->roam_on ? 0 : 1), WL_BEACON_TIMEOUT);
+ err = brcmf_dongle_roam(ndev, (cfg_priv->roam_on ? 0 : 1),
+ WL_BEACON_TIMEOUT);
if (unlikely(err))
goto default_conf_out;
- err = wl_dongle_mode(ndev, wdev->iftype);
+ err = brcmf_dongle_mode(ndev, wdev->iftype);
if (unlikely(err && err != -EINPROGRESS))
goto default_conf_out;
- err = wl_dongle_probecap(wl);
+ err = brcmf_dongle_probecap(cfg_priv);
if (unlikely(err))
goto default_conf_out;
@@ -4052,21 +3851,21 @@ default_conf_out:
if (need_lock)
rtnl_unlock();
- wl->dongle_up = true;
+ cfg_priv->dongle_up = true;
return err;
}
-static s32 wl_update_wiphybands(struct wl_priv *wl)
+static s32 wl_update_wiphybands(struct brcmf_cfg80211_priv *cfg_priv)
{
struct wiphy *wiphy;
s32 phy_list;
s8 phy;
s32 err = 0;
- err = wl_dev_ioctl(wl_to_ndev(wl), WLC_GET_PHYLIST, &phy_list,
- sizeof(phy_list));
+ err = brcmf_dev_ioctl(cfg_to_ndev(cfg_priv), BRCM_GET_PHYLIST,
+ &phy_list, sizeof(phy_list));
if (unlikely(err)) {
WL_ERR("error (%d)\n", err);
return err;
@@ -4075,131 +3874,144 @@ static s32 wl_update_wiphybands(struct wl_priv *wl)
phy = ((char *)&phy_list)[1];
WL_INFO("%c phy\n", phy);
if (phy == 'n' || phy == 'a') {
- wiphy = wl_to_wiphy(wl);
+ wiphy = cfg_to_wiphy(cfg_priv);
wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
}
return err;
}
-static s32 __wl_cfg80211_up(struct wl_priv *wl)
+static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv)
{
s32 err = 0;
- set_bit(WL_STATUS_READY, &wl->status);
+ set_bit(WL_STATUS_READY, &cfg_priv->status);
- wl_debugfs_add_netdev_params(wl);
+ brcmf_debugfs_add_netdev_params(cfg_priv);
- err = wl_config_dongle(wl, false);
+ err = brcmf_config_dongle(cfg_priv, false);
if (unlikely(err))
return err;
- wl_invoke_iscan(wl);
+ brcmf_invoke_iscan(cfg_priv);
return err;
}
-static s32 __wl_cfg80211_down(struct wl_priv *wl)
+static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv)
{
- set_bit(WL_STATUS_SCAN_ABORTING, &wl->status);
- wl_term_iscan(wl);
- if (wl->scan_request) {
- cfg80211_scan_done(wl->scan_request, true);
+ /*
+ * While going down, if associated with AP disassociate
+ * from AP to save power
+ */
+ if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) ||
+ test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) &&
+ test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ WL_INFO("Disassociating from AP");
+ brcmf_link_down(cfg_priv);
+
+ /* Make sure WPA_Supplicant receives all the event
+ generated due to DISASSOC call to the fw to keep
+ the state fw and WPA_Supplicant state consistent
+ */
+ rtnl_unlock();
+ brcmf_delay(500);
+ rtnl_lock();
+ }
+
+ set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
+ brcmf_term_iscan(cfg_priv);
+ if (cfg_priv->scan_request) {
+ cfg80211_scan_done(cfg_priv->scan_request, true);
/* May need to perform this to cover rmmod */
- /* wl_set_mpc(wl_to_ndev(wl), 1); */
- wl->scan_request = NULL;
+ /* wl_set_mpc(cfg_to_ndev(wl), 1); */
+ cfg_priv->scan_request = NULL;
}
- clear_bit(WL_STATUS_READY, &wl->status);
- clear_bit(WL_STATUS_SCANNING, &wl->status);
- clear_bit(WL_STATUS_SCAN_ABORTING, &wl->status);
- clear_bit(WL_STATUS_CONNECTING, &wl->status);
- clear_bit(WL_STATUS_CONNECTED, &wl->status);
+ clear_bit(WL_STATUS_READY, &cfg_priv->status);
+ clear_bit(WL_STATUS_SCANNING, &cfg_priv->status);
+ clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status);
- wl_debugfs_remove_netdev(wl);
+ brcmf_debugfs_remove_netdev(cfg_priv);
return 0;
}
-s32 wl_cfg80211_up(void)
+s32 brcmf_cfg80211_up(void)
{
- struct wl_priv *wl;
+ struct brcmf_cfg80211_priv *cfg_priv;
s32 err = 0;
- wl = WL_PRIV_GET();
- mutex_lock(&wl->usr_sync);
- err = __wl_cfg80211_up(wl);
- mutex_unlock(&wl->usr_sync);
+ cfg_priv = WL_PRIV_GET();
+ mutex_lock(&cfg_priv->usr_sync);
+ err = __brcmf_cfg80211_up(cfg_priv);
+ mutex_unlock(&cfg_priv->usr_sync);
return err;
}
-s32 wl_cfg80211_down(void)
+s32 brcmf_cfg80211_down(void)
{
- struct wl_priv *wl;
+ struct brcmf_cfg80211_priv *cfg_priv;
s32 err = 0;
- wl = WL_PRIV_GET();
- mutex_lock(&wl->usr_sync);
- err = __wl_cfg80211_down(wl);
- mutex_unlock(&wl->usr_sync);
+ cfg_priv = WL_PRIV_GET();
+ mutex_lock(&cfg_priv->usr_sync);
+ err = __brcmf_cfg80211_down(cfg_priv);
+ mutex_unlock(&cfg_priv->usr_sync);
return err;
}
-static s32 wl_dongle_probecap(struct wl_priv *wl)
+static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv)
{
- s32 err = 0;
-
- err = wl_update_wiphybands(wl);
- if (unlikely(err))
- return err;
-
- return err;
+ return wl_update_wiphybands(cfg_priv);
}
-static void *wl_read_prof(struct wl_priv *wl, s32 item)
+static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item)
{
switch (item) {
case WL_PROF_SEC:
- return &wl->profile->sec;
+ return &cfg_priv->profile->sec;
case WL_PROF_BSSID:
- return &wl->profile->bssid;
+ return &cfg_priv->profile->bssid;
case WL_PROF_SSID:
- return &wl->profile->ssid;
+ return &cfg_priv->profile->ssid;
}
WL_ERR("invalid item (%d)\n", item);
return NULL;
}
static s32
-wl_update_prof(struct wl_priv *wl, const wl_event_msg_t *e, void *data,
- s32 item)
+brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv,
+ const struct brcmf_event_msg *e, void *data, s32 item)
{
s32 err = 0;
- struct wlc_ssid *ssid;
+ struct brcmf_ssid *ssid;
switch (item) {
case WL_PROF_SSID:
- ssid = (wlc_ssid_t *) data;
- memset(wl->profile->ssid.SSID, 0,
- sizeof(wl->profile->ssid.SSID));
- memcpy(wl->profile->ssid.SSID, ssid->SSID, ssid->SSID_len);
- wl->profile->ssid.SSID_len = ssid->SSID_len;
+ ssid = (struct brcmf_ssid *) data;
+ memset(cfg_priv->profile->ssid.SSID, 0,
+ sizeof(cfg_priv->profile->ssid.SSID));
+ memcpy(cfg_priv->profile->ssid.SSID,
+ ssid->SSID, ssid->SSID_len);
+ cfg_priv->profile->ssid.SSID_len = ssid->SSID_len;
break;
case WL_PROF_BSSID:
if (data)
- memcpy(wl->profile->bssid, data, ETH_ALEN);
+ memcpy(cfg_priv->profile->bssid, data, ETH_ALEN);
else
- memset(wl->profile->bssid, 0, ETH_ALEN);
+ memset(cfg_priv->profile->bssid, 0, ETH_ALEN);
break;
case WL_PROF_SEC:
- memcpy(&wl->profile->sec, data, sizeof(wl->profile->sec));
+ memcpy(&cfg_priv->profile->sec, data,
+ sizeof(cfg_priv->profile->sec));
break;
case WL_PROF_BEACONINT:
- wl->profile->beacon_interval = *(u16 *)data;
+ cfg_priv->profile->beacon_interval = *(u16 *)data;
break;
case WL_PROF_DTIMPERIOD:
- wl->profile->dtim_period = *(u8 *)data;
+ cfg_priv->profile->dtim_period = *(u8 *)data;
break;
default:
WL_ERR("unsupported item (%d)\n", item);
@@ -4210,14 +4022,15 @@ wl_update_prof(struct wl_priv *wl, const wl_event_msg_t *e, void *data,
return err;
}
-static bool wl_is_ibssmode(struct wl_priv *wl)
+static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv)
{
- return wl->conf->mode == WL_MODE_IBSS;
+ return cfg_priv->conf->mode == WL_MODE_IBSS;
}
-static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v)
+static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv,
+ u8 t, u8 l, u8 *v)
{
- struct wl_ie *ie = wl_to_ie(wl);
+ struct brcmf_cfg80211_ie *ie = &cfg_priv->ie;
s32 err = 0;
if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
@@ -4232,42 +4045,40 @@ static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v)
return err;
}
-
-static void wl_link_down(struct wl_priv *wl)
+static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv)
{
struct net_device *dev = NULL;
s32 err = 0;
WL_TRACE("Enter\n");
- clear_bit(WL_STATUS_CONNECTED, &wl->status);
- if (wl->link_up) {
- dev = wl_to_ndev(wl);
+ if (cfg_priv->link_up) {
+ dev = cfg_to_ndev(cfg_priv);
WL_INFO("Call WLC_DISASSOC to stop excess roaming\n ");
- err = wl_dev_ioctl(dev, WLC_DISASSOC, NULL, 0);
+ err = brcmf_dev_ioctl(dev, BRCMF_C_DISASSOC, NULL, 0);
if (unlikely(err))
WL_ERR("WLC_DISASSOC failed (%d)\n", err);
- wl->link_up = false;
+ cfg_priv->link_up = false;
}
WL_TRACE("Exit\n");
}
-static void wl_lock_eq(struct wl_priv *wl)
+static void brcmf_lock_eq(struct brcmf_cfg80211_priv *cfg_priv)
{
- spin_lock_irq(&wl->eq_lock);
+ spin_lock_irq(&cfg_priv->eq_lock);
}
-static void wl_unlock_eq(struct wl_priv *wl)
+static void brcmf_unlock_eq(struct brcmf_cfg80211_priv *cfg_priv)
{
- spin_unlock_irq(&wl->eq_lock);
+ spin_unlock_irq(&cfg_priv->eq_lock);
}
-static void wl_init_eq_lock(struct wl_priv *wl)
+static void brcmf_init_eq_lock(struct brcmf_cfg80211_priv *cfg_priv)
{
- spin_lock_init(&wl->eq_lock);
+ spin_lock_init(&cfg_priv->eq_lock);
}
-static void wl_delay(u32 ms)
+static void brcmf_delay(u32 ms)
{
if (ms < 1000 / HZ) {
cond_resched();
@@ -4277,115 +4088,27 @@ static void wl_delay(u32 ms)
}
}
-static void wl_set_drvdata(struct wl_dev *dev, void *data)
+static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data)
{
dev->driver_data = data;
}
-static void *wl_get_drvdata(struct wl_dev *dev)
-{
- return dev->driver_data;
-}
-
-s32 wl_cfg80211_read_fw(s8 *buf, u32 size)
-{
- const struct firmware *fw_entry;
- struct wl_priv *wl;
-
- wl = WL_PRIV_GET();
-
- fw_entry = wl->fw->fw_entry;
-
- if (fw_entry->size < wl->fw->ptr + size)
- size = fw_entry->size - wl->fw->ptr;
-
- memcpy(buf, &fw_entry->data[wl->fw->ptr], size);
- wl->fw->ptr += size;
- return size;
-}
-
-void wl_cfg80211_release_fw(void)
-{
- struct wl_priv *wl;
-
- wl = WL_PRIV_GET();
- release_firmware(wl->fw->fw_entry);
- wl->fw->ptr = 0;
-}
-
-void *wl_cfg80211_request_fw(s8 *file_name)
-{
- struct wl_priv *wl;
- const struct firmware *fw_entry = NULL;
- s32 err = 0;
-
- WL_INFO("file name : \"%s\"\n", file_name);
- wl = WL_PRIV_GET();
-
- if (!test_bit(WL_FW_LOADING_DONE, &wl->fw->status)) {
- err = request_firmware(&wl->fw->fw_entry, file_name,
- &wl_cfg80211_get_sdio_func()->dev);
- if (unlikely(err)) {
- WL_ERR("Could not download fw (%d)\n", err);
- goto req_fw_out;
- }
- set_bit(WL_FW_LOADING_DONE, &wl->fw->status);
- fw_entry = wl->fw->fw_entry;
- if (fw_entry) {
- WL_INFO("fw size (%zd), data (%p)\n",
- fw_entry->size, fw_entry->data);
- }
- } else if (!test_bit(WL_NVRAM_LOADING_DONE, &wl->fw->status)) {
- err = request_firmware(&wl->fw->fw_entry, file_name,
- &wl_cfg80211_get_sdio_func()->dev);
- if (unlikely(err)) {
- WL_ERR("Could not download nvram (%d)\n", err);
- goto req_fw_out;
- }
- set_bit(WL_NVRAM_LOADING_DONE, &wl->fw->status);
- fw_entry = wl->fw->fw_entry;
- if (fw_entry) {
- WL_INFO("nvram size (%zd), data (%p)\n",
- fw_entry->size, fw_entry->data);
- }
- } else {
- WL_INFO("Downloading already done. Nothing to do more\n");
- err = -EPERM;
- }
-
-req_fw_out:
- if (unlikely(err)) {
- return NULL;
- }
- wl->fw->ptr = 0;
- return (void *)fw_entry->data;
-}
-
-s8 *wl_cfg80211_get_fwname(void)
-{
- struct wl_priv *wl;
-
- wl = WL_PRIV_GET();
- strcpy(wl->fw->fw_name, WL_4329_FW_FILE);
- return wl->fw->fw_name;
-}
-
-s8 *wl_cfg80211_get_nvramname(void)
+static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev)
{
- struct wl_priv *wl;
+ void *data = NULL;
- wl = WL_PRIV_GET();
- strcpy(wl->fw->nvram_name, WL_4329_NVRAM_FILE);
- return wl->fw->nvram_name;
+ if (dev)
+ data = dev->driver_data;
+ return data;
}
-static void wl_set_mpc(struct net_device *ndev, int mpc)
+static void brcmf_set_mpc(struct net_device *ndev, int mpc)
{
s32 err = 0;
- struct wl_priv *wl = ndev_to_wl(ndev);
+ struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev);
- if (test_bit(WL_STATUS_READY, &wl->status)) {
- err = wl_dev_intvar_set(ndev, "mpc", mpc);
+ if (test_bit(WL_STATUS_READY, &cfg_priv->status)) {
+ err = brcmf_dev_intvar_set(ndev, "mpc", mpc);
if (unlikely(err)) {
WL_ERR("fail to set mpc\n");
return;
@@ -4394,24 +4117,25 @@ static void wl_set_mpc(struct net_device *ndev, int mpc)
}
}
-static int wl_debugfs_add_netdev_params(struct wl_priv *wl)
+static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv)
{
char buf[10+IFNAMSIZ];
struct dentry *fd;
s32 err = 0;
- sprintf(buf, "netdev:%s", wl_to_ndev(wl)->name);
- wl->debugfsdir = debugfs_create_dir(buf, wl_to_wiphy(wl)->debugfsdir);
+ sprintf(buf, "netdev:%s", cfg_to_ndev(cfg_priv)->name);
+ cfg_priv->debugfsdir = debugfs_create_dir(buf,
+ cfg_to_wiphy(cfg_priv)->debugfsdir);
- fd = debugfs_create_u16("beacon_int", S_IRUGO, wl->debugfsdir,
- (u16 *)&wl->profile->beacon_interval);
+ fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg_priv->debugfsdir,
+ (u16 *)&cfg_priv->profile->beacon_interval);
if (!fd) {
err = -ENOMEM;
goto err_out;
}
- fd = debugfs_create_u8("dtim_period", S_IRUGO, wl->debugfsdir,
- (u8 *)&wl->profile->dtim_period);
+ fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg_priv->debugfsdir,
+ (u8 *)&cfg_priv->profile->dtim_period);
if (!fd) {
err = -ENOMEM;
goto err_out;
@@ -4421,8 +4145,8 @@ err_out:
return err;
}
-static void wl_debugfs_remove_netdev(struct wl_priv *wl)
+static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv)
{
- debugfs_remove_recursive(wl->debugfsdir);
- wl->debugfsdir = NULL;
+ debugfs_remove_recursive(cfg_priv->debugfsdir);
+ cfg_priv->debugfsdir = NULL;
}
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h
index 996033cf9b0..f26d08793ca 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.h
@@ -17,16 +17,11 @@
#ifndef _wl_cfg80211_h_
#define _wl_cfg80211_h_
-#include <linux/wireless.h>
-#include <linux/wireless.h>
-#include <net/cfg80211.h>
-#include <wlioctl.h>
-
-struct wl_conf;
-struct wl_iface;
-struct wl_priv;
-struct wl_security;
-struct wl_ibss;
+struct brcmf_cfg80211_conf;
+struct brcmf_cfg80211_iface;
+struct brcmf_cfg80211_priv;
+struct brcmf_cfg80211_security;
+struct brcmf_cfg80211_ibss;
#define WL_DBG_NONE 0
#define WL_DBG_CONN (1 << 5)
@@ -39,7 +34,7 @@ struct wl_ibss;
#define WL_ERR(fmt, args...) \
do { \
- if (wl_dbg_level & WL_DBG_ERR) { \
+ if (brcmf_dbg_level & WL_DBG_ERR) { \
if (net_ratelimit()) { \
printk(KERN_ERR "ERROR @%s : " fmt, \
__func__, ##args); \
@@ -50,7 +45,7 @@ do { \
#if (defined BCMDBG)
#define WL_INFO(fmt, args...) \
do { \
- if (wl_dbg_level & WL_DBG_INFO) { \
+ if (brcmf_dbg_level & WL_DBG_INFO) { \
if (net_ratelimit()) { \
printk(KERN_ERR "INFO @%s : " fmt, \
__func__, ##args); \
@@ -60,7 +55,7 @@ do { \
#define WL_TRACE(fmt, args...) \
do { \
- if (wl_dbg_level & WL_DBG_TRACE) { \
+ if (brcmf_dbg_level & WL_DBG_TRACE) { \
if (net_ratelimit()) { \
printk(KERN_ERR "TRACE @%s : " fmt, \
__func__, ##args); \
@@ -70,7 +65,7 @@ do { \
#define WL_SCAN(fmt, args...) \
do { \
- if (wl_dbg_level & WL_DBG_SCAN) { \
+ if (brcmf_dbg_level & WL_DBG_SCAN) { \
if (net_ratelimit()) { \
printk(KERN_ERR "SCAN @%s : " fmt, \
__func__, ##args); \
@@ -80,7 +75,7 @@ do { \
#define WL_CONN(fmt, args...) \
do { \
- if (wl_dbg_level & WL_DBG_CONN) { \
+ if (brcmf_dbg_level & WL_DBG_CONN) { \
if (net_ratelimit()) { \
printk(KERN_ERR "CONN @%s : " fmt, \
__func__, ##args); \
@@ -95,15 +90,13 @@ do { \
#define WL_CONN(fmt, args...)
#endif /* (defined BCMDBG) */
-
-#define WL_SCAN_RETRY_MAX 3 /* used for ibss scan */
#define WL_NUM_SCAN_MAX 1
#define WL_NUM_PMKIDS_MAX MAXPMKID /* will be used
* for 2.6.33 kernel
* or later
*/
-#define WL_SCAN_BUF_MAX (1024 * 8)
-#define WL_TLV_INFO_MAX 1024
+#define WL_SCAN_BUF_MAX (1024 * 8)
+#define WL_TLV_INFO_MAX 1024
#define WL_BSS_INFO_MAX 2048
#define WL_ASSOC_INFO_MAX 512 /*
* needs to grab assoc info from dongle to
@@ -113,15 +106,14 @@ do { \
#define WL_IOCTL_LEN_MAX 1024
#define WL_EXTRA_BUF_MAX 2048
#define WL_ISCAN_BUF_MAX 2048 /*
- * the buf lengh can be WLC_IOCTL_MAXLEN (8K)
+ * the buf length can be BRCMF_C_IOCTL_MAXLEN
* to reduce iteration
*/
#define WL_ISCAN_TIMER_INTERVAL_MS 3000
-#define WL_SCAN_ERSULTS_LAST (WL_SCAN_RESULTS_NO_MEM+1)
+#define WL_SCAN_ERSULTS_LAST (BRCMF_SCAN_RESULTS_NO_MEM+1)
#define WL_AP_MAX 256 /* virtually unlimitted as long
* as kernel memory allows
*/
-#define WL_FILE_NAME_MAX 256
#define WL_ROAM_TRIGGER_LEVEL -75
#define WL_ROAM_DELTA 20
@@ -166,22 +158,8 @@ enum wl_iscan_state {
WL_ISCAN_STATE_SCANING
};
-/* fw downloading status */
-enum wl_fw_status {
- WL_FW_LOADING_DONE,
- WL_NVRAM_LOADING_DONE
-};
-
-/* beacon / probe_response */
-struct beacon_proberesp {
- __le64 timestamp;
- __le16 beacon_int;
- __le16 capab_info;
- u8 variable[0];
-} __attribute__ ((packed));
-
/* dongle configuration */
-struct wl_conf {
+struct brcmf_cfg80211_conf {
u32 mode; /* adhoc , infrastructure or ap */
u32 frag_threshold;
u32 rts_threshold;
@@ -192,51 +170,43 @@ struct wl_conf {
};
/* cfg80211 main event loop */
-struct wl_event_loop {
- s32(*handler[WLC_E_LAST]) (struct wl_priv *wl,
+struct brcmf_cfg80211_event_loop {
+ s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_priv *cfg_priv,
struct net_device *ndev,
- const wl_event_msg_t *e, void *data);
+ const struct brcmf_event_msg *e,
+ void *data);
};
/* representing interface of cfg80211 plane */
-struct wl_iface {
- struct wl_priv *wl;
+struct brcmf_cfg80211_iface {
+ struct brcmf_cfg80211_priv *cfg_priv;
};
-struct wl_dev {
+struct brcmf_cfg80211_dev {
void *driver_data; /* to store cfg80211 object information */
};
-/* bss inform structure for cfg80211 interface */
-struct wl_cfg80211_bss_info {
- u16 band;
- u16 channel;
- s16 rssi;
- u16 frame_len;
- u8 frame_buf[1];
-};
-
/* basic structure of scan request */
-struct wl_scan_req {
- struct wlc_ssid ssid;
+struct brcmf_cfg80211_scan_req {
+ struct brcmf_ssid ssid;
};
/* basic structure of information element */
-struct wl_ie {
+struct brcmf_cfg80211_ie {
u16 offset;
u8 buf[WL_TLV_INFO_MAX];
};
/* event queue for cfg80211 main event */
-struct wl_event_q {
+struct brcmf_cfg80211_event_q {
struct list_head eq_list;
u32 etype;
- wl_event_msg_t emsg;
+ struct brcmf_event_msg emsg;
s8 edata[1];
};
/* security information with currently associated ap */
-struct wl_security {
+struct brcmf_cfg80211_security {
u32 wpa_versions;
u32 auth_type;
u32 cipher_pairwise;
@@ -245,7 +215,7 @@ struct wl_security {
};
/* ibss information for currently joined ibss network */
-struct wl_ibss {
+struct brcmf_cfg80211_ibss {
u8 beacon_interval; /* in millisecond */
u8 atim; /* in millisecond */
s8 join_only;
@@ -254,24 +224,25 @@ struct wl_ibss {
};
/* dongle profile */
-struct wl_profile {
+struct brcmf_cfg80211_profile {
u32 mode;
- struct wlc_ssid ssid;
+ struct brcmf_ssid ssid;
u8 bssid[ETH_ALEN];
u16 beacon_interval;
u8 dtim_period;
- struct wl_security sec;
- struct wl_ibss ibss;
+ struct brcmf_cfg80211_security sec;
+ struct brcmf_cfg80211_ibss ibss;
s32 band;
};
/* dongle iscan event loop */
-struct wl_iscan_eloop {
- s32(*handler[WL_SCAN_ERSULTS_LAST]) (struct wl_priv *wl);
+struct brcmf_cfg80211_iscan_eloop {
+ s32 (*handler[WL_SCAN_ERSULTS_LAST])
+ (struct brcmf_cfg80211_priv *cfg_priv);
};
/* dongle iscan controller */
-struct wl_iscan_ctrl {
+struct brcmf_cfg80211_iscan_ctrl {
struct net_device *dev;
struct timer_list timer;
u32 timer_ms;
@@ -279,69 +250,57 @@ struct wl_iscan_ctrl {
s32 state;
struct task_struct *tsk;
struct semaphore sync;
- struct wl_iscan_eloop el;
+ struct brcmf_cfg80211_iscan_eloop el;
void *data;
- s8 ioctl_buf[WLC_IOCTL_SMLEN];
+ s8 ioctl_buf[BRCMF_C_IOCTL_SMLEN];
s8 scan_buf[WL_ISCAN_BUF_MAX];
};
/* association inform */
-struct wl_connect_info {
+struct brcmf_cfg80211_connect_info {
u8 *req_ie;
s32 req_ie_len;
u8 *resp_ie;
s32 resp_ie_len;
};
-/* firmware /nvram downloading controller */
-struct wl_fw_ctrl {
- const struct firmware *fw_entry;
- unsigned long status;
- u32 ptr;
- s8 fw_name[WL_FILE_NAME_MAX];
- s8 nvram_name[WL_FILE_NAME_MAX];
-};
-
/* assoc ie length */
-struct wl_assoc_ielen {
+struct brcmf_cfg80211_assoc_ielen {
u32 req_len;
u32 resp_len;
};
/* wpa2 pmk list */
-struct wl_pmk_list {
+struct brcmf_cfg80211_pmk_list {
pmkid_list_t pmkids;
pmkid_t foo[MAXPMKID - 1];
};
/* dongle private data of cfg80211 interface */
-struct wl_priv {
+struct brcmf_cfg80211_priv {
struct wireless_dev *wdev; /* representing wl cfg80211 device */
- struct wl_conf *conf; /* dongle configuration */
+ struct brcmf_cfg80211_conf *conf; /* dongle configuration */
struct cfg80211_scan_request *scan_request; /* scan request
object */
- struct wl_event_loop el; /* main event loop */
+ struct brcmf_cfg80211_event_loop el; /* main event loop */
struct list_head eq_list; /* used for event queue */
spinlock_t eq_lock; /* for event queue synchronization */
struct mutex usr_sync; /* maily for dongle up/down synchronization */
- struct wl_scan_results *bss_list; /* bss_list holding scanned
+ struct brcmf_scan_results *bss_list; /* bss_list holding scanned
ap information */
- struct wl_scan_results *scan_results;
- struct wl_scan_req *scan_req_int; /* scan request object for
- internal purpose */
+ struct brcmf_scan_results *scan_results;
+ struct brcmf_cfg80211_scan_req *scan_req_int; /* scan request object
+ for internal purpose */
struct wl_cfg80211_bss_info *bss_info; /* bss information for
cfg80211 layer */
- struct wl_ie ie; /* information element object for
+ struct brcmf_cfg80211_ie ie; /* information element object for
internal purpose */
struct semaphore event_sync; /* for synchronization of main event
thread */
- struct wl_profile *profile; /* holding dongle profile */
- struct wl_iscan_ctrl *iscan; /* iscan controller */
- struct wl_connect_info conn_info; /* association information
- container */
- struct wl_fw_ctrl *fw; /* control firwmare / nvram paramter
- downloading */
- struct wl_pmk_list *pmk_list; /* wpa2 pmk list */
+ struct brcmf_cfg80211_profile *profile; /* holding dongle profile */
+ struct brcmf_cfg80211_iscan_ctrl *iscan; /* iscan controller */
+ struct brcmf_cfg80211_connect_info conn_info; /* association info */
+ struct brcmf_cfg80211_pmk_list *pmk_list; /* wpa2 pmk list */
struct task_struct *event_tsk; /* task of main event handler thread */
unsigned long status; /* current dongle status */
void *pub;
@@ -362,26 +321,21 @@ struct wl_priv {
u8 ci[0] __attribute__ ((__aligned__(NETDEV_ALIGN)));
};
-#define wl_to_dev(w) (wiphy_dev(wl->wdev->wiphy))
-#define wl_to_wiphy(w) (w->wdev->wiphy)
-#define wiphy_to_wl(w) ((struct wl_priv *)(wiphy_priv(w)))
-#define wl_to_wdev(w) (w->wdev)
-#define wdev_to_wl(w) ((struct wl_priv *)(wdev_priv(w)))
-#define wl_to_ndev(w) (w->wdev->netdev)
-#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr))
-#define ci_to_wl(c) (ci->wl)
-#define wl_to_ci(w) (&w->ci)
-#define wl_to_sr(w) (w->scan_req_int)
-#define wl_to_ie(w) (&w->ie)
-#define iscan_to_wl(i) ((struct wl_priv *)(i->data))
-#define wl_to_iscan(w) (w->iscan)
-#define wl_to_conn(w) (&w->conn_info)
-
-static inline struct wl_bss_info *next_bss(struct wl_scan_results *list,
- struct wl_bss_info *bss)
+#define cfg_to_wiphy(w) (w->wdev->wiphy)
+#define wiphy_to_cfg(w) ((struct brcmf_cfg80211_priv *)(wiphy_priv(w)))
+#define cfg_to_wdev(w) (w->wdev)
+#define wdev_to_cfg(w) ((struct brcmf_cfg80211_priv *)(wdev_priv(w)))
+#define cfg_to_ndev(w) (w->wdev->netdev)
+#define ndev_to_cfg(n) (wdev_to_cfg(n->ieee80211_ptr))
+#define iscan_to_cfg(i) ((struct brcmf_cfg80211_priv *)(i->data))
+#define cfg_to_iscan(w) (w->iscan)
+#define cfg_to_conn(w) (&w->conn_info)
+
+static inline struct brcmf_bss_info *next_bss(struct brcmf_scan_results *list,
+ struct brcmf_bss_info *bss)
{
return bss = bss ?
- (struct wl_bss_info *)((unsigned long)bss +
+ (struct brcmf_bss_info *)((unsigned long)bss +
le32_to_cpu(bss->length)) :
list->bss_info;
}
@@ -389,26 +343,14 @@ static inline struct wl_bss_info *next_bss(struct wl_scan_results *list,
#define for_each_bss(list, bss, __i) \
for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
-extern s32 wl_cfg80211_attach(struct net_device *ndev, void *data);
-extern void wl_cfg80211_detach(void);
+extern s32 brcmf_cfg80211_attach(struct net_device *ndev, void *data);
+extern void brcmf_cfg80211_detach(void);
/* event handler from dongle */
-extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
- void *data);
-extern void wl_cfg80211_sdio_func(void *func); /* set sdio function info */
-extern struct sdio_func *wl_cfg80211_get_sdio_func(void); /* set sdio function info */
-extern s32 wl_cfg80211_up(void); /* dongle up */
-extern s32 wl_cfg80211_down(void); /* dongle down */
-extern void wl_cfg80211_dbg_level(u32 level); /* set dongle
- debugging level */
-extern void *wl_cfg80211_request_fw(s8 *file_name); /* request fw /nvram
- downloading */
-extern s32 wl_cfg80211_read_fw(s8 *buf, u32 size); /* read fw
- image */
-extern void wl_cfg80211_release_fw(void); /* release fw */
-extern s8 *wl_cfg80211_get_fwname(void); /* get firmware name for
- the dongle */
-extern s8 *wl_cfg80211_get_nvramname(void); /* get nvram name for
- the dongle */
-extern void wl_os_wd_timer(struct net_device *ndev, uint wdtick);
+extern void brcmf_cfg80211_event(struct net_device *ndev,
+ const struct brcmf_event_msg *e, void *data);
+extern void brcmf_cfg80211_sdio_func(void *func); /* set sdio function info */
+extern struct sdio_func *brcmf_cfg80211_get_sdio_func(void);
+extern s32 brcmf_cfg80211_up(void); /* dongle up */
+extern s32 brcmf_cfg80211_down(void); /* dongle down */
#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_iw.c b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
deleted file mode 100644
index 15e1b05ca92..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/wl_iw.c
+++ /dev/null
@@ -1,3693 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/kthread.h>
-#include <linux/semaphore.h>
-#include <bcmdefs.h>
-#include <linux/netdevice.h>
-#include <wlioctl.h>
-
-#include <bcmutils.h>
-
-#include <linux/if_arp.h>
-#include <asm/uaccess.h>
-
-#include <dngl_stats.h>
-#include <dhd.h>
-#include <dhdioctl.h>
-#include <linux/ieee80211.h>
-typedef const struct si_pub si_t;
-#include <wlioctl.h>
-
-#include <dngl_stats.h>
-#include <dhd.h>
-
-#define WL_ERROR(fmt, args...) printk(fmt, ##args)
-#define WL_TRACE(fmt, args...) no_printk(fmt, ##args)
-#define WL_INFORM(fmt, args...) no_printk(fmt, ##args)
-#define WL_WSEC(fmt, args...) no_printk(fmt, ##args)
-#define WL_SCAN(fmt, args...) no_printk(fmt, ##args)
-
-#include <wl_iw.h>
-
-#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | \
- TKIP_ENABLED | AES_ENABLED))
-
-#include <linux/rtnetlink.h>
-
-#define WL_IW_USE_ISCAN 1
-#define ENABLE_ACTIVE_PASSIVE_SCAN_SUPPRESS 1
-
-bool g_set_essid_before_scan = true;
-
-#define WL_IW_IOCTL_CALL(func_call) \
- do { \
- func_call; \
- } while (0)
-
-static int g_onoff = G_WLAN_SET_ON;
-wl_iw_extra_params_t g_wl_iw_params;
-
-extern bool wl_iw_conn_status_str(u32 event_type, u32 status,
- u32 reason, char *stringBuf, uint buflen);
-
-#define MAX_WLIW_IOCTL_LEN 1024
-
-#ifdef CONFIG_WIRELESS_EXT
-extern int dhd_wait_pend8021x(struct net_device *dev);
-#endif
-
-#if WIRELESS_EXT < 19
-#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST)
-#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST)
-#endif
-
-static void *g_scan;
-static volatile uint g_scan_specified_ssid;
-static wlc_ssid_t g_specific_ssid;
-
-static wlc_ssid_t g_ssid;
-
-#if defined(WL_IW_USE_ISCAN)
-#define ISCAN_STATE_IDLE 0
-#define ISCAN_STATE_SCANING 1
-
-#define WLC_IW_ISCAN_MAXLEN 2048
-typedef struct iscan_buf {
- struct iscan_buf *next;
- char iscan_buf[WLC_IW_ISCAN_MAXLEN];
-} iscan_buf_t;
-
-typedef struct iscan_info {
- struct net_device *dev;
- struct timer_list timer;
- u32 timer_ms;
- u32 timer_on;
- int iscan_state;
- iscan_buf_t *list_hdr;
- iscan_buf_t *list_cur;
-
- struct task_struct *sysioc_tsk;
- struct semaphore sysioc_sem;
-
-#if defined CSCAN
- char ioctlbuf[WLC_IOCTL_MEDLEN];
-#else
- char ioctlbuf[WLC_IOCTL_SMLEN];
-#endif
- wl_iscan_params_t *iscan_ex_params_p;
- int iscan_ex_param_size;
-} iscan_info_t;
-iscan_info_t *g_iscan;
-
-static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
-
-/* Global ASSERT type flag */
-u32 g_assert_type;
-
-static void wl_iw_timerfunc(unsigned long data);
-static void wl_iw_set_event_mask(struct net_device *dev);
-static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, u16 action);
-#endif /* defined(WL_IW_USE_ISCAN) */
-
-static int
-wl_iw_set_scan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra);
-
-static int
-wl_iw_get_scan(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra);
-
-static uint
-wl_iw_get_scan_prep(wl_scan_results_t *list,
- struct iw_request_info *info, char *extra, short max_size);
-
-static void swap_key_from_BE(wl_wsec_key_t *key)
-{
- key->index = cpu_to_le32(key->index);
- key->len = cpu_to_le32(key->len);
- key->algo = cpu_to_le32(key->algo);
- key->flags = cpu_to_le32(key->flags);
- key->rxiv.hi = cpu_to_le32(key->rxiv.hi);
- key->rxiv.lo = cpu_to_le16(key->rxiv.lo);
- key->iv_initialized = cpu_to_le32(key->iv_initialized);
-}
-
-static void swap_key_to_BE(wl_wsec_key_t *key)
-{
- key->index = le32_to_cpu(key->index);
- key->len = le32_to_cpu(key->len);
- key->algo = le32_to_cpu(key->algo);
- key->flags = le32_to_cpu(key->flags);
- key->rxiv.hi = le32_to_cpu(key->rxiv.hi);
- key->rxiv.lo = le16_to_cpu(key->rxiv.lo);
- key->iv_initialized = le32_to_cpu(key->iv_initialized);
-}
-
-static int dev_wlc_ioctl(struct net_device *dev, int cmd, void *arg, int len)
-{
- struct ifreq ifr;
- wl_ioctl_t ioc;
- mm_segment_t fs;
- int ret = -EINVAL;
-
- if (!dev) {
- WL_ERROR("%s: dev is null\n", __func__);
- return ret;
- }
-
- WL_INFORM("\n%s, PID:%x: send Local IOCTL -> dhd: cmd:0x%x, buf:%p, len:%d\n",
- __func__, current->pid, cmd, arg, len);
-
- if (g_onoff == G_WLAN_SET_ON) {
- memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = cmd;
- ioc.buf = arg;
- ioc.len = len;
-
- strcpy(ifr.ifr_name, dev->name);
- ifr.ifr_data = (caddr_t)&ioc;
-
- ret = dev_open(dev);
- if (ret) {
- WL_ERROR("%s: Error dev_open: %d\n", __func__, ret);
- return ret;
- }
-
- fs = get_fs();
- set_fs(get_ds());
- ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, SIOCDEVPRIVATE);
- set_fs(fs);
- } else {
- WL_TRACE("%s: call after driver stop : ignored\n", __func__);
- }
- return ret;
-}
-
-static int dev_wlc_intvar_set(struct net_device *dev, char *name, int val)
-{
- char buf[WLC_IOCTL_SMLEN];
- uint len;
-
- val = cpu_to_le32(val);
- len = bcm_mkiovar(name, (char *)(&val), sizeof(val), buf, sizeof(buf));
- ASSERT(len);
-
- return dev_wlc_ioctl(dev, WLC_SET_VAR, buf, len);
-}
-
-#if defined(WL_IW_USE_ISCAN)
-static int
-dev_iw_iovar_setbuf(struct net_device *dev,
- char *iovar,
- void *param, int paramlen, void *bufptr, int buflen)
-{
- int iolen;
-
- iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
- ASSERT(iolen);
-
- if (iolen == 0)
- return 0;
-
- return dev_wlc_ioctl(dev, WLC_SET_VAR, bufptr, iolen);
-}
-
-static int
-dev_iw_iovar_getbuf(struct net_device *dev,
- char *iovar,
- void *param, int paramlen, void *bufptr, int buflen)
-{
- int iolen;
-
- iolen = bcm_mkiovar(iovar, param, paramlen, bufptr, buflen);
- ASSERT(iolen);
-
- return dev_wlc_ioctl(dev, WLC_GET_VAR, bufptr, buflen);
-}
-#endif /* defined(WL_IW_USE_ISCAN) */
-
-#if WIRELESS_EXT > 17
-static int
-dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
-{
- static char ioctlbuf[MAX_WLIW_IOCTL_LEN];
- uint buflen;
-
- buflen = bcm_mkiovar(name, buf, len, ioctlbuf, sizeof(ioctlbuf));
- ASSERT(buflen);
-
- return dev_wlc_ioctl(dev, WLC_SET_VAR, ioctlbuf, buflen);
-}
-#endif /* WIRELESS_EXT > 17 */
-
-static int
-dev_wlc_bufvar_get(struct net_device *dev, char *name, char *buf, int buflen)
-{
- static char ioctlbuf[MAX_WLIW_IOCTL_LEN];
- int error;
- uint len;
-
- len = bcm_mkiovar(name, NULL, 0, ioctlbuf, sizeof(ioctlbuf));
- ASSERT(len);
- error =
- dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)ioctlbuf,
- MAX_WLIW_IOCTL_LEN);
- if (!error)
- memcpy(buf, ioctlbuf, buflen);
-
- return error;
-}
-
-static int dev_wlc_intvar_get(struct net_device *dev, char *name, int *retval)
-{
- union {
- char buf[WLC_IOCTL_SMLEN];
- int val;
- } var;
- int error;
-
- uint len;
- uint data_null;
-
- len =
- bcm_mkiovar(name, (char *)(&data_null), 0, (char *)(&var),
- sizeof(var.buf));
- ASSERT(len);
- error = dev_wlc_ioctl(dev, WLC_GET_VAR, (void *)&var, len);
-
- *retval = le32_to_cpu(var.val);
-
- return error;
-}
-
-#if WIRELESS_EXT < 13
-struct iw_request_info {
- __u16 cmd;
- __u16 flags;
-};
-
-typedef int (*iw_handler) (struct net_device *dev,
- struct iw_request_info *info,
- void *wrqu, char *extra);
-#endif
-
-static int
-wl_iw_config_commit(struct net_device *dev,
- struct iw_request_info *info, void *zwrq, char *extra)
-{
- wlc_ssid_t ssid;
- int error;
- struct sockaddr bssid;
-
- WL_TRACE("%s: SIOCSIWCOMMIT\n", dev->name);
-
- error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid));
- if (error)
- return error;
-
- ssid.SSID_len = le32_to_cpu(ssid.SSID_len);
-
- if (!ssid.SSID_len)
- return 0;
-
- memset(&bssid, 0, sizeof(struct sockaddr));
- error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETH_ALEN);
- if (error) {
- WL_ERROR("%s: WLC_REASSOC to %s failed\n",
- __func__, ssid.SSID);
- return error;
- }
-
- return 0;
-}
-
-static int
-wl_iw_get_name(struct net_device *dev,
- struct iw_request_info *info, char *cwrq, char *extra)
-{
- WL_TRACE("%s: SIOCGIWNAME\n", dev->name);
-
- strcpy(cwrq, "IEEE 802.11-DS");
-
- return 0;
-}
-
-static int
-wl_iw_set_freq(struct net_device *dev,
- struct iw_request_info *info, struct iw_freq *fwrq, char *extra)
-{
- int error, chan;
- uint sf = 0;
-
- WL_TRACE("\n %s %s: SIOCSIWFREQ\n", __func__, dev->name);
-
- if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
- chan = fwrq->m;
- } else {
- if (fwrq->e >= 6) {
- fwrq->e -= 6;
- while (fwrq->e--)
- fwrq->m *= 10;
- } else if (fwrq->e < 6) {
- while (fwrq->e++ < 6)
- fwrq->m /= 10;
- }
- if (fwrq->m > 4000 && fwrq->m < 5000)
- sf = WF_CHAN_FACTOR_4_G;
-
- chan = bcm_mhz2channel(fwrq->m, sf);
- }
- chan = cpu_to_le32(chan);
-
- error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan));
- if (error)
- return error;
-
- g_wl_iw_params.target_channel = chan;
- return -EINPROGRESS;
-}
-
-static int
-wl_iw_get_freq(struct net_device *dev,
- struct iw_request_info *info, struct iw_freq *fwrq, char *extra)
-{
- channel_info_t ci;
- int error;
-
- WL_TRACE("%s: SIOCGIWFREQ\n", dev->name);
-
- error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci));
- if (error)
- return error;
-
- fwrq->m = le32_to_cpu(ci.hw_channel);
- fwrq->e = le32_to_cpu(0);
- return 0;
-}
-
-static int
-wl_iw_set_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq, char *extra)
-{
- int infra = 0, ap = 0, error = 0;
-
- WL_TRACE("%s: SIOCSIWMODE\n", dev->name);
-
- switch (*uwrq) {
- case IW_MODE_MASTER:
- infra = ap = 1;
- break;
- case IW_MODE_ADHOC:
- case IW_MODE_AUTO:
- break;
- case IW_MODE_INFRA:
- infra = 1;
- break;
- default:
- return -EINVAL;
- }
- infra = cpu_to_le32(infra);
- ap = cpu_to_le32(ap);
-
- error = dev_wlc_ioctl(dev, WLC_SET_INFRA, &infra, sizeof(infra));
- if (error)
- return error;
-
- error = dev_wlc_ioctl(dev, WLC_SET_AP, &ap, sizeof(ap));
- if (error)
- return error;
-
- return -EINPROGRESS;
-}
-
-static int
-wl_iw_get_mode(struct net_device *dev,
- struct iw_request_info *info, __u32 *uwrq, char *extra)
-{
- int error, infra = 0, ap = 0;
-
- WL_TRACE("%s: SIOCGIWMODE\n", dev->name);
-
- error = dev_wlc_ioctl(dev, WLC_GET_INFRA, &infra, sizeof(infra));
- if (error)
- return error;
-
- error = dev_wlc_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap));
- if (error)
- return error;
-
- infra = le32_to_cpu(infra);
- ap = le32_to_cpu(ap);
- *uwrq = infra ? ap ? IW_MODE_MASTER : IW_MODE_INFRA : IW_MODE_ADHOC;
-
- return 0;
-}
-
-static int
-wl_iw_get_range(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- struct iw_range *range = (struct iw_range *)extra;
- wl_u32_list_t *list;
- wl_rateset_t rateset;
- s8 *channels;
- int error, i, k;
- uint ch;
-
- int phytype;
- int bw_cap = 0, sgi_tx = 0, nmode = 0;
- channel_info_t ci;
- u8 nrate_list2copy = 0;
- u16 nrate_list[4][8] = { {13, 26, 39, 52, 78, 104, 117, 130},
- {14, 29, 43, 58, 87, 116, 130, 144},
- {27, 54, 81, 108, 162, 216, 243, 270},
- {30, 60, 90, 120, 180, 240, 270, 300}
- };
-
- WL_TRACE("%s: SIOCGIWRANGE\n", dev->name);
-
- if (!extra)
- return -EINVAL;
-
- channels = kmalloc((MAXCHANNEL + 1) * 4, GFP_KERNEL);
- if (!channels) {
- WL_ERROR("Could not alloc channels\n");
- return -ENOMEM;
- }
- list = (wl_u32_list_t *) channels;
-
- dwrq->length = sizeof(struct iw_range);
- memset(range, 0, sizeof(*range));
-
- list->count = cpu_to_le32(MAXCHANNEL);
- error = dev_wlc_ioctl(dev, WLC_GET_VALID_CHANNELS, channels,
- (MAXCHANNEL + 1) * 4);
- if (error) {
- kfree(channels);
- return error;
- }
- for (i = 0; i < le32_to_cpu(list->count) && i < IW_MAX_FREQUENCIES;
- i++) {
- range->freq[i].i = le32_to_cpu(list->element[i]);
-
- ch = le32_to_cpu(list->element[i]);
- if (ch <= CH_MAX_2G_CHANNEL) {
- range->freq[i].m = ieee80211_dsss_chan_to_freq(ch);
- } else {
- range->freq[i].m = ieee80211_ofdm_chan_to_freq(
- WF_CHAN_FACTOR_5_G/2, ch);
- }
- range->freq[i].e = 6;
- }
- range->num_frequency = range->num_channels = i;
-
- range->max_qual.qual = 5;
- range->max_qual.level = 0x100 - 200;
- range->max_qual.noise = 0x100 - 200;
- range->sensitivity = 65535;
-
-#if WIRELESS_EXT > 11
- range->avg_qual.qual = 3;
- range->avg_qual.level = 0x100 + WL_IW_RSSI_GOOD;
- range->avg_qual.noise = 0x100 - 75;
-#endif
-
- error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset,
- sizeof(rateset));
- if (error) {
- kfree(channels);
- return error;
- }
- rateset.count = le32_to_cpu(rateset.count);
- range->num_bitrates = rateset.count;
- for (i = 0; i < rateset.count && i < IW_MAX_BITRATES; i++)
- range->bitrate[i] = (rateset.rates[i] & 0x7f) * 500000;
- dev_wlc_intvar_get(dev, "nmode", &nmode);
- dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &phytype, sizeof(phytype));
-
- if (nmode == 1 && phytype == WLC_PHY_TYPE_SSN) {
- dev_wlc_intvar_get(dev, "mimo_bw_cap", &bw_cap);
- dev_wlc_intvar_get(dev, "sgi_tx", &sgi_tx);
- dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci,
- sizeof(channel_info_t));
- ci.hw_channel = le32_to_cpu(ci.hw_channel);
-
- if (bw_cap == 0 || (bw_cap == 2 && ci.hw_channel <= 14)) {
- if (sgi_tx == 0)
- nrate_list2copy = 0;
- else
- nrate_list2copy = 1;
- }
- if (bw_cap == 1 || (bw_cap == 2 && ci.hw_channel >= 36)) {
- if (sgi_tx == 0)
- nrate_list2copy = 2;
- else
- nrate_list2copy = 3;
- }
- range->num_bitrates += 8;
- for (k = 0; i < range->num_bitrates; k++, i++) {
- range->bitrate[i] =
- (nrate_list[nrate_list2copy][k]) * 500000;
- }
- }
-
- error = dev_wlc_ioctl(dev, WLC_GET_PHYTYPE, &i, sizeof(i));
- if (error) {
- kfree(channels);
- return error;
- }
- i = le32_to_cpu(i);
- if (i == WLC_PHY_TYPE_A)
- range->throughput = 24000000;
- else
- range->throughput = 1500000;
-
- range->min_rts = 0;
- range->max_rts = 2347;
- range->min_frag = 256;
- range->max_frag = 2346;
-
- range->max_encoding_tokens = DOT11_MAX_DEFAULT_KEYS;
- range->num_encoding_sizes = 4;
- range->encoding_size[0] = WLAN_KEY_LEN_WEP40;
- range->encoding_size[1] = WLAN_KEY_LEN_WEP104;
-#if WIRELESS_EXT > 17
- range->encoding_size[2] = WLAN_KEY_LEN_TKIP;
-#else
- range->encoding_size[2] = 0;
-#endif
- range->encoding_size[3] = WLAN_KEY_LEN_AES_CMAC;
-
- range->min_pmp = 0;
- range->max_pmp = 0;
- range->min_pmt = 0;
- range->max_pmt = 0;
- range->pmp_flags = 0;
- range->pm_capa = 0;
-
- range->num_txpower = 2;
- range->txpower[0] = 1;
- range->txpower[1] = 255;
- range->txpower_capa = IW_TXPOW_MWATT;
-
-#if WIRELESS_EXT > 10
- range->we_version_compiled = WIRELESS_EXT;
- range->we_version_source = 19;
-
- range->retry_capa = IW_RETRY_LIMIT;
- range->retry_flags = IW_RETRY_LIMIT;
- range->r_time_flags = 0;
- range->min_retry = 1;
- range->max_retry = 255;
- range->min_r_time = 0;
- range->max_r_time = 0;
-#endif
-
-#if WIRELESS_EXT > 17
- range->enc_capa = IW_ENC_CAPA_WPA;
- range->enc_capa |= IW_ENC_CAPA_CIPHER_TKIP;
- range->enc_capa |= IW_ENC_CAPA_CIPHER_CCMP;
- range->enc_capa |= IW_ENC_CAPA_WPA2;
-
- IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
- IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
- IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
- IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
- IW_EVENT_CAPA_SET(range->event_capa, IWEVMICHAELMICFAILURE);
- IW_EVENT_CAPA_SET(range->event_capa, IWEVPMKIDCAND);
-#endif /* WIRELESS_EXT > 17 */
-
- kfree(channels);
-
- return 0;
-}
-
-static int rssi_to_qual(int rssi)
-{
- if (rssi <= WL_IW_RSSI_NO_SIGNAL)
- return 0;
- else if (rssi <= WL_IW_RSSI_VERY_LOW)
- return 1;
- else if (rssi <= WL_IW_RSSI_LOW)
- return 2;
- else if (rssi <= WL_IW_RSSI_GOOD)
- return 3;
- else if (rssi <= WL_IW_RSSI_VERY_GOOD)
- return 4;
- else
- return 5;
-}
-
-static int
-wl_iw_set_spy(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *dwrq, char *extra)
-{
- wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
- struct sockaddr *addr = (struct sockaddr *)extra;
- int i;
-
- WL_TRACE("%s: SIOCSIWSPY\n", dev->name);
-
- if (!extra)
- return -EINVAL;
-
- iw->spy_num = min_t(int, ARRAY_SIZE(iw->spy_addr), dwrq->length);
- for (i = 0; i < iw->spy_num; i++)
- memcpy(iw->spy_addr[i], addr[i].sa_data, ETH_ALEN);
- memset(iw->spy_qual, 0, sizeof(iw->spy_qual));
-
- return 0;
-}
-
-static int
-wl_iw_get_spy(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *dwrq, char *extra)
-{
- wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
- struct sockaddr *addr = (struct sockaddr *)extra;
- struct iw_quality *qual = (struct iw_quality *)&addr[iw->spy_num];
- int i;
-
- WL_TRACE("%s: SIOCGIWSPY\n", dev->name);
-
- if (!extra)
- return -EINVAL;
-
- dwrq->length = iw->spy_num;
- for (i = 0; i < iw->spy_num; i++) {
- memcpy(addr[i].sa_data, iw->spy_addr[i], ETH_ALEN);
- addr[i].sa_family = AF_UNIX;
- memcpy(&qual[i], &iw->spy_qual[i], sizeof(struct iw_quality));
- iw->spy_qual[i].updated = 0;
- }
-
- return 0;
-}
-
-static int
-wl_iw_ch_to_chanspec(int ch, wl_join_params_t *join_params,
- int *join_params_size)
-{
- chanspec_t chanspec = 0;
-
- if (ch != 0) {
- join_params->params.chanspec_num = 1;
- join_params->params.chanspec_list[0] = ch;
-
- if (join_params->params.chanspec_list[0])
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
-
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
-
- *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
- join_params->params.chanspec_num * sizeof(chanspec_t);
-
- join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
- join_params->params.chanspec_list[0] |= chanspec;
- join_params->params.chanspec_list[0] =
- cpu_to_le16(join_params->params.chanspec_list[0]);
-
- join_params->params.chanspec_num =
- cpu_to_le32(join_params->params.chanspec_num);
-
- WL_TRACE("%s join_params->params.chanspec_list[0]= %X\n",
- __func__, join_params->params.chanspec_list[0]);
- }
- return 1;
-}
-
-static int
-wl_iw_set_wap(struct net_device *dev,
- struct iw_request_info *info, struct sockaddr *awrq, char *extra)
-{
- int error = -EINVAL;
- wl_join_params_t join_params;
- int join_params_size;
-
- WL_TRACE("%s: SIOCSIWAP\n", dev->name);
-
- if (awrq->sa_family != ARPHRD_ETHER) {
- WL_ERROR("Invalid Header...sa_family\n");
- return -EINVAL;
- }
-
- if (is_broadcast_ether_addr(awrq->sa_data) ||
- is_zero_ether_addr(awrq->sa_data)) {
- scb_val_t scbval;
- memset(&scbval, 0, sizeof(scb_val_t));
- (void)dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval,
- sizeof(scb_val_t));
- return 0;
- }
-
- memset(&join_params, 0, sizeof(join_params));
- join_params_size = sizeof(join_params.ssid);
-
- memcpy(join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
- join_params.ssid.SSID_len = cpu_to_le32(g_ssid.SSID_len);
- memcpy(&join_params.params.bssid, awrq->sa_data, ETH_ALEN);
-
- WL_TRACE("%s target_channel=%d\n",
- __func__, g_wl_iw_params.target_channel);
- wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params,
- &join_params_size);
-
- error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params,
- join_params_size);
- if (error) {
- WL_ERROR("%s Invalid ioctl data=%d\n", __func__, error);
- }
-
- if (g_ssid.SSID_len) {
- WL_TRACE("%s: join SSID=%s BSSID=%pM ch=%d\n",
- __func__, g_ssid.SSID, awrq->sa_data,
- g_wl_iw_params.target_channel);
- }
-
- memset(&g_ssid, 0, sizeof(g_ssid));
- return 0;
-}
-
-static int
-wl_iw_get_wap(struct net_device *dev,
- struct iw_request_info *info, struct sockaddr *awrq, char *extra)
-{
- WL_TRACE("%s: SIOCGIWAP\n", dev->name);
-
- awrq->sa_family = ARPHRD_ETHER;
- memset(awrq->sa_data, 0, ETH_ALEN);
-
- (void)dev_wlc_ioctl(dev, WLC_GET_BSSID, awrq->sa_data, ETH_ALEN);
-
- return 0;
-}
-
-#if WIRELESS_EXT > 17
-static int
-wl_iw_mlme(struct net_device *dev,
- struct iw_request_info *info, struct sockaddr *awrq, char *extra)
-{
- struct iw_mlme *mlme;
- scb_val_t scbval;
- int error = -EINVAL;
-
- WL_TRACE("%s: SIOCSIWMLME DISASSOC/DEAUTH\n", dev->name);
-
- mlme = (struct iw_mlme *)extra;
- if (mlme == NULL) {
- WL_ERROR("Invalid ioctl data\n");
- return error;
- }
-
- scbval.val = mlme->reason_code;
- memcpy(&scbval.ea, &mlme->addr.sa_data, ETH_ALEN);
-
- if (mlme->cmd == IW_MLME_DISASSOC) {
- scbval.val = cpu_to_le32(scbval.val);
- error =
- dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval,
- sizeof(scb_val_t));
- } else if (mlme->cmd == IW_MLME_DEAUTH) {
- scbval.val = cpu_to_le32(scbval.val);
- error =
- dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON,
- &scbval, sizeof(scb_val_t));
- } else {
- WL_ERROR("Invalid ioctl data\n");
- return error;
- }
-
- return error;
-}
-#endif /* WIRELESS_EXT > 17 */
-
-#ifndef WL_IW_USE_ISCAN
-static int
-wl_iw_get_aplist(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- wl_scan_results_t *list;
- struct sockaddr *addr = (struct sockaddr *)extra;
- struct iw_quality qual[IW_MAX_AP];
- wl_bss_info_t *bi = NULL;
- int error, i;
- uint buflen = dwrq->length;
-
- WL_TRACE("%s: SIOCGIWAPLIST\n", dev->name);
-
- if (!extra)
- return -EINVAL;
-
- list = kzalloc(buflen, GFP_KERNEL);
- if (!list)
- return -ENOMEM;
- list->buflen = cpu_to_le32(buflen);
- error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, buflen);
- if (error) {
- WL_ERROR("%d: Scan results error %d\n", __LINE__, error);
- kfree(list);
- return error;
- }
- list->buflen = le32_to_cpu(list->buflen);
- list->version = le32_to_cpu(list->version);
- list->count = le32_to_cpu(list->count);
- if (list->version != WL_BSS_INFO_VERSION) {
- WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
- __func__, list->version);
- kfree(list);
- return -EINVAL;
- }
-
- for (i = 0, dwrq->length = 0;
- i < list->count && dwrq->length < IW_MAX_AP; i++) {
- bi = bi ? (wl_bss_info_t *) ((unsigned long)bi +
- le32_to_cpu(bi->length)) : list->
- bss_info;
- ASSERT(((unsigned long)bi + le32_to_cpu(bi->length)) <=
- ((unsigned long)list + buflen));
-
- if (!(le16_to_cpu(bi->capability) & WLAN_CAPABILITY_ESS))
- continue;
-
- memcpy(addr[dwrq->length].sa_data, &bi->BSSID, ETH_ALEN);
- addr[dwrq->length].sa_family = ARPHRD_ETHER;
- qual[dwrq->length].qual = rssi_to_qual(le16_to_cpu(bi->RSSI));
- qual[dwrq->length].level = 0x100 + le16_to_cpu(bi->RSSI);
- qual[dwrq->length].noise = 0x100 + bi->phy_noise;
-
-#if WIRELESS_EXT > 18
- qual[dwrq->length].updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
-#else
- qual[dwrq->length].updated = 7;
-#endif
- dwrq->length++;
- }
-
- kfree(list);
-
- if (dwrq->length) {
- memcpy(&addr[dwrq->length], qual,
- sizeof(struct iw_quality) * dwrq->length);
- dwrq->flags = 1;
- }
-
- return 0;
-}
-#endif /* WL_IW_USE_ISCAN */
-
-#ifdef WL_IW_USE_ISCAN
-static int
-wl_iw_iscan_get_aplist(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- wl_scan_results_t *list;
- iscan_buf_t *buf;
- iscan_info_t *iscan = g_iscan;
-
- struct sockaddr *addr = (struct sockaddr *)extra;
- struct iw_quality qual[IW_MAX_AP];
- wl_bss_info_t *bi = NULL;
- int i;
-
- WL_TRACE("%s: SIOCGIWAPLIST\n", dev->name);
-
- if (!extra)
- return -EINVAL;
-
- if ((!iscan) || (!iscan->sysioc_tsk)) {
- WL_ERROR("%s error\n", __func__);
- return 0;
- }
-
- buf = iscan->list_hdr;
- while (buf) {
- list = &((wl_iscan_results_t *) buf->iscan_buf)->results;
- if (list->version != WL_BSS_INFO_VERSION) {
- WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
- __func__, list->version);
- return -EINVAL;
- }
-
- bi = NULL;
- for (i = 0, dwrq->length = 0;
- i < list->count && dwrq->length < IW_MAX_AP; i++) {
- bi = bi ? (wl_bss_info_t *) ((unsigned long)bi +
- le32_to_cpu(bi->length)) :
- list->bss_info;
- ASSERT(((unsigned long)bi + le32_to_cpu(bi->length)) <=
- ((unsigned long)list + WLC_IW_ISCAN_MAXLEN));
-
- if (!(le16_to_cpu(bi->capability) &
- WLAN_CAPABILITY_ESS))
- continue;
-
- memcpy(addr[dwrq->length].sa_data, &bi->BSSID,
- ETH_ALEN);
- addr[dwrq->length].sa_family = ARPHRD_ETHER;
- qual[dwrq->length].qual =
- rssi_to_qual(le16_to_cpu(bi->RSSI));
- qual[dwrq->length].level = 0x100 +
- le16_to_cpu(bi->RSSI);
- qual[dwrq->length].noise = 0x100 + bi->phy_noise;
-
-#if WIRELESS_EXT > 18
- qual[dwrq->length].updated =
- IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
-#else
- qual[dwrq->length].updated = 7;
-#endif
-
- dwrq->length++;
- }
- buf = buf->next;
- }
- if (dwrq->length) {
- memcpy(&addr[dwrq->length], qual,
- sizeof(struct iw_quality) * dwrq->length);
- dwrq->flags = 1;
- }
-
- return 0;
-}
-
-static int wl_iw_iscan_prep(wl_scan_params_t *params, wlc_ssid_t *ssid)
-{
- int err = 0;
-
- memcpy(params->bssid, ether_bcast, ETH_ALEN);
- params->bss_type = DOT11_BSSTYPE_ANY;
- params->scan_type = 0;
- params->nprobes = -1;
- params->active_time = -1;
- params->passive_time = -1;
- params->home_time = -1;
- params->channel_num = 0;
-
- params->nprobes = cpu_to_le32(params->nprobes);
- params->active_time = cpu_to_le32(params->active_time);
- params->passive_time = cpu_to_le32(params->passive_time);
- params->home_time = cpu_to_le32(params->home_time);
- if (ssid && ssid->SSID_len)
- memcpy(&params->ssid, ssid, sizeof(wlc_ssid_t));
-
- return err;
-}
-
-static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, u16 action)
-{
- int err = 0;
-
- iscan->iscan_ex_params_p->version = cpu_to_le32(ISCAN_REQ_VERSION);
- iscan->iscan_ex_params_p->action = cpu_to_le16(action);
- iscan->iscan_ex_params_p->scan_duration = cpu_to_le16(0);
-
- WL_SCAN("%s : nprobes=%d\n",
- __func__, iscan->iscan_ex_params_p->params.nprobes);
- WL_SCAN("active_time=%d\n",
- iscan->iscan_ex_params_p->params.active_time);
- WL_SCAN("passive_time=%d\n",
- iscan->iscan_ex_params_p->params.passive_time);
- WL_SCAN("home_time=%d\n", iscan->iscan_ex_params_p->params.home_time);
- WL_SCAN("scan_type=%d\n", iscan->iscan_ex_params_p->params.scan_type);
- WL_SCAN("bss_type=%d\n", iscan->iscan_ex_params_p->params.bss_type);
-
- (void)dev_iw_iovar_setbuf(iscan->dev, "iscan", iscan->iscan_ex_params_p,
- iscan->iscan_ex_param_size, iscan->ioctlbuf,
- sizeof(iscan->ioctlbuf));
-
- return err;
-}
-
-static void wl_iw_timerfunc(unsigned long data)
-{
- iscan_info_t *iscan = (iscan_info_t *) data;
- if (iscan) {
- iscan->timer_on = 0;
- if (iscan->iscan_state != ISCAN_STATE_IDLE) {
- WL_TRACE("timer trigger\n");
- up(&iscan->sysioc_sem);
- }
- }
-}
-
-static void wl_iw_set_event_mask(struct net_device *dev)
-{
- char eventmask[WL_EVENTING_MASK_LEN];
- char iovbuf[WL_EVENTING_MASK_LEN + 12];
-
- dev_iw_iovar_getbuf(dev, "event_msgs", "", 0, iovbuf, sizeof(iovbuf));
- memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
- setbit(eventmask, WLC_E_SCAN_COMPLETE);
- dev_iw_iovar_setbuf(dev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN,
- iovbuf, sizeof(iovbuf));
-}
-
-static u32 wl_iw_iscan_get(iscan_info_t *iscan)
-{
- iscan_buf_t *buf;
- iscan_buf_t *ptr;
- wl_iscan_results_t *list_buf;
- wl_iscan_results_t list;
- wl_scan_results_t *results;
- u32 status;
- int res = 0;
-
- MUTEX_LOCK_WL_SCAN_SET();
- if (iscan->list_cur) {
- buf = iscan->list_cur;
- iscan->list_cur = buf->next;
- } else {
- buf = kmalloc(sizeof(iscan_buf_t), GFP_KERNEL);
- if (!buf) {
- WL_ERROR("%s can't alloc iscan_buf_t : going to abort current iscan\n",
- __func__);
- MUTEX_UNLOCK_WL_SCAN_SET();
- return WL_SCAN_RESULTS_NO_MEM;
- }
- buf->next = NULL;
- if (!iscan->list_hdr)
- iscan->list_hdr = buf;
- else {
- ptr = iscan->list_hdr;
- while (ptr->next) {
- ptr = ptr->next;
- }
- ptr->next = buf;
- }
- }
- memset(buf->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
- list_buf = (wl_iscan_results_t *) buf->iscan_buf;
- results = &list_buf->results;
- results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
- results->version = 0;
- results->count = 0;
-
- memset(&list, 0, sizeof(list));
- list.results.buflen = cpu_to_le32(WLC_IW_ISCAN_MAXLEN);
- res = dev_iw_iovar_getbuf(iscan->dev,
- "iscanresults",
- &list,
- WL_ISCAN_RESULTS_FIXED_SIZE,
- buf->iscan_buf, WLC_IW_ISCAN_MAXLEN);
- if (res == 0) {
- results->buflen = le32_to_cpu(results->buflen);
- results->version = le32_to_cpu(results->version);
- results->count = le32_to_cpu(results->count);
- WL_TRACE("results->count = %d\n", results->count);
- WL_TRACE("results->buflen = %d\n", results->buflen);
- status = le32_to_cpu(list_buf->status);
- } else {
- WL_ERROR("%s returns error %d\n", __func__, res);
- status = WL_SCAN_RESULTS_NO_MEM;
- }
- MUTEX_UNLOCK_WL_SCAN_SET();
- return status;
-}
-
-static void wl_iw_force_specific_scan(iscan_info_t *iscan)
-{
- WL_TRACE("%s force Specific SCAN for %s\n",
- __func__, g_specific_ssid.SSID);
- rtnl_lock();
-
- (void)dev_wlc_ioctl(iscan->dev, WLC_SCAN, &g_specific_ssid,
- sizeof(g_specific_ssid));
-
- rtnl_unlock();
-}
-
-static void wl_iw_send_scan_complete(iscan_info_t *iscan)
-{
-#ifndef SANDGATE2G
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
-
- wireless_send_event(iscan->dev, SIOCGIWSCAN, &wrqu, NULL);
- WL_TRACE("Send Event ISCAN complete\n");
-#endif
-}
-
-static int _iscan_sysioc_thread(void *data)
-{
- u32 status;
- iscan_info_t *iscan = (iscan_info_t *) data;
- static bool iscan_pass_abort = false;
-
- allow_signal(SIGTERM);
- status = WL_SCAN_RESULTS_PARTIAL;
- while (down_interruptible(&iscan->sysioc_sem) == 0) {
- if (kthread_should_stop())
- break;
-
- if (iscan->timer_on) {
- del_timer_sync(&iscan->timer);
- iscan->timer_on = 0;
- }
- rtnl_lock();
- status = wl_iw_iscan_get(iscan);
- rtnl_unlock();
- if (g_scan_specified_ssid && (iscan_pass_abort == true)) {
- WL_TRACE("%s Get results from specific scan status = %d\n",
- __func__, status);
- wl_iw_send_scan_complete(iscan);
- iscan_pass_abort = false;
- status = -1;
- }
-
- switch (status) {
- case WL_SCAN_RESULTS_PARTIAL:
- WL_TRACE("iscanresults incomplete\n");
- rtnl_lock();
- wl_iw_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
- rtnl_unlock();
- mod_timer(&iscan->timer,
- jiffies + iscan->timer_ms * HZ / 1000);
- iscan->timer_on = 1;
- break;
- case WL_SCAN_RESULTS_SUCCESS:
- WL_TRACE("iscanresults complete\n");
- iscan->iscan_state = ISCAN_STATE_IDLE;
- wl_iw_send_scan_complete(iscan);
- break;
- case WL_SCAN_RESULTS_PENDING:
- WL_TRACE("iscanresults pending\n");
- mod_timer(&iscan->timer,
- jiffies + iscan->timer_ms * HZ / 1000);
- iscan->timer_on = 1;
- break;
- case WL_SCAN_RESULTS_ABORTED:
- WL_TRACE("iscanresults aborted\n");
- iscan->iscan_state = ISCAN_STATE_IDLE;
- if (g_scan_specified_ssid == 0)
- wl_iw_send_scan_complete(iscan);
- else {
- iscan_pass_abort = true;
- wl_iw_force_specific_scan(iscan);
- }
- break;
- case WL_SCAN_RESULTS_NO_MEM:
- WL_TRACE("iscanresults can't alloc memory: skip\n");
- iscan->iscan_state = ISCAN_STATE_IDLE;
- break;
- default:
- WL_TRACE("iscanresults returned unknown status %d\n",
- status);
- break;
- }
- }
-
- if (iscan->timer_on) {
- del_timer_sync(&iscan->timer);
- iscan->timer_on = 0;
- }
- return 0;
-}
-#endif /* WL_IW_USE_ISCAN */
-
-static int
-wl_iw_set_scan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int error;
- WL_TRACE("\n:%s dev:%s: SIOCSIWSCAN : SCAN\n", __func__, dev->name);
-
- g_set_essid_before_scan = false;
-#if defined(CSCAN)
- WL_ERROR("%s: Scan from SIOCGIWSCAN not supported\n", __func__);
- return -EINVAL;
-#endif
-
- if (g_onoff == G_WLAN_SET_OFF)
- return 0;
-
- memset(&g_specific_ssid, 0, sizeof(g_specific_ssid));
-#ifndef WL_IW_USE_ISCAN
- g_scan_specified_ssid = 0;
-#endif
-
-#if WIRELESS_EXT > 17
- if (wrqu->data.length == sizeof(struct iw_scan_req)) {
- if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- struct iw_scan_req *req = (struct iw_scan_req *)extra;
- if (g_scan_specified_ssid) {
- WL_TRACE("%s Specific SCAN is not done ignore scan for = %s\n",
- __func__, req->essid);
- return -EBUSY;
- } else {
- g_specific_ssid.SSID_len = min_t(size_t,
- sizeof(g_specific_ssid.SSID),
- req->essid_len);
- memcpy(g_specific_ssid.SSID, req->essid,
- g_specific_ssid.SSID_len);
- g_specific_ssid.SSID_len =
- cpu_to_le32(g_specific_ssid.SSID_len);
- g_scan_specified_ssid = 1;
- WL_TRACE("### Specific scan ssid=%s len=%d\n",
- g_specific_ssid.SSID,
- g_specific_ssid.SSID_len);
- }
- }
- }
-#endif /* WIRELESS_EXT > 17 */
- error = dev_wlc_ioctl(dev, WLC_SCAN, &g_specific_ssid,
- sizeof(g_specific_ssid));
- if (error) {
- WL_TRACE("#### Set SCAN for %s failed with %d\n",
- g_specific_ssid.SSID, error);
- g_scan_specified_ssid = 0;
- return -EBUSY;
- }
-
- return 0;
-}
-
-#ifdef WL_IW_USE_ISCAN
-int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag)
-{
- wlc_ssid_t ssid;
- iscan_info_t *iscan = g_iscan;
-
- if (flag)
- rtnl_lock();
-
- wl_iw_set_event_mask(dev);
-
- WL_TRACE("+++: Set Broadcast ISCAN\n");
- memset(&ssid, 0, sizeof(ssid));
-
- iscan->list_cur = iscan->list_hdr;
- iscan->iscan_state = ISCAN_STATE_SCANING;
-
- memset(&iscan->iscan_ex_params_p->params, 0,
- iscan->iscan_ex_param_size);
- wl_iw_iscan_prep(&iscan->iscan_ex_params_p->params, &ssid);
- wl_iw_iscan(iscan, &ssid, WL_SCAN_ACTION_START);
-
- if (flag)
- rtnl_unlock();
-
- mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
-
- iscan->timer_on = 1;
-
- return 0;
-}
-
-static int
-wl_iw_iscan_set_scan(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- wlc_ssid_t ssid;
- iscan_info_t *iscan = g_iscan;
-
- WL_TRACE("%s: SIOCSIWSCAN : ISCAN\n", dev->name);
-
-#if defined(CSCAN)
- WL_ERROR("%s: Scan from SIOCGIWSCAN not supported\n", __func__);
- return -EINVAL;
-#endif
-
- if (g_onoff == G_WLAN_SET_OFF) {
- WL_TRACE("%s: driver is not up yet after START\n", __func__);
- return 0;
- }
-#ifdef PNO_SUPPORT
- if (dhd_dev_get_pno_status(dev)) {
- WL_ERROR("%s: Scan called when PNO is active\n", __func__);
- }
-#endif
-
- if ((!iscan) || (!iscan->sysioc_tsk))
- return wl_iw_set_scan(dev, info, wrqu, extra);
-
- if (g_scan_specified_ssid) {
- WL_TRACE("%s Specific SCAN already running ignoring BC scan\n",
- __func__);
- return -EBUSY;
- }
-
- memset(&ssid, 0, sizeof(ssid));
-
-#if WIRELESS_EXT > 17
- if (wrqu->data.length == sizeof(struct iw_scan_req)) {
- if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- struct iw_scan_req *req = (struct iw_scan_req *)extra;
- ssid.SSID_len = min_t(size_t, sizeof(ssid.SSID),
- req->essid_len);
- memcpy(ssid.SSID, req->essid, ssid.SSID_len);
- ssid.SSID_len = cpu_to_le32(ssid.SSID_len);
- } else {
- g_scan_specified_ssid = 0;
-
- if (iscan->iscan_state == ISCAN_STATE_SCANING) {
- WL_TRACE("%s ISCAN already in progress\n",
- __func__);
- return 0;
- }
- }
- }
-#endif /* WIRELESS_EXT > 17 */
- wl_iw_iscan_set_scan_broadcast_prep(dev, 0);
-
- return 0;
-}
-#endif /* WL_IW_USE_ISCAN */
-
-#if WIRELESS_EXT > 17
-static bool ie_is_wpa_ie(u8 **wpaie, u8 **tlvs, int *tlvs_len)
-{
-
- u8 *ie = *wpaie;
-
- if ((ie[1] >= 6) &&
- !memcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x01"), 4)) {
- return true;
- }
-
- ie += ie[1] + 2;
- *tlvs_len -= (int)(ie - *tlvs);
- *tlvs = ie;
- return false;
-}
-
-static bool ie_is_wps_ie(u8 **wpsie, u8 **tlvs, int *tlvs_len)
-{
-
- u8 *ie = *wpsie;
-
- if ((ie[1] >= 4) &&
- !memcmp((const void *)&ie[2], (const void *)(WPA_OUI "\x04"), 4)) {
- return true;
- }
-
- ie += ie[1] + 2;
- *tlvs_len -= (int)(ie - *tlvs);
- *tlvs = ie;
- return false;
-}
-#endif /* WIRELESS_EXT > 17 */
-
-static int
-wl_iw_handle_scanresults_ies(char **event_p, char *end,
- struct iw_request_info *info, wl_bss_info_t *bi)
-{
-#if WIRELESS_EXT > 17
- struct iw_event iwe;
- char *event;
-
- event = *event_p;
- if (bi->ie_length) {
- bcm_tlv_t *ie;
- u8 *ptr = ((u8 *) bi) + sizeof(wl_bss_info_t);
- int ptr_len = bi->ie_length;
-
- ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID);
- if (ie) {
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = ie->len + 2;
- event =
- IWE_STREAM_ADD_POINT(info, event, end, &iwe,
- (char *)ie);
- }
- ptr = ((u8 *) bi) + sizeof(wl_bss_info_t);
-
- while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
- if (ie_is_wps_ie(((u8 **)&ie), &ptr, &ptr_len)) {
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = ie->len + 2;
- event =
- IWE_STREAM_ADD_POINT(info, event, end, &iwe,
- (char *)ie);
- break;
- }
- }
-
- ptr = ((u8 *) bi) + sizeof(wl_bss_info_t);
- ptr_len = bi->ie_length;
- while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WPA_ID))) {
- if (ie_is_wpa_ie(((u8 **)&ie), &ptr, &ptr_len)) {
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = ie->len + 2;
- event =
- IWE_STREAM_ADD_POINT(info, event, end, &iwe,
- (char *)ie);
- break;
- }
- }
-
- *event_p = event;
- }
-#endif /* WIRELESS_EXT > 17 */
- return 0;
-}
-
-static uint
-wl_iw_get_scan_prep(wl_scan_results_t *list,
- struct iw_request_info *info, char *extra, short max_size)
-{
- int i, j;
- struct iw_event iwe;
- wl_bss_info_t *bi = NULL;
- char *event = extra, *end = extra + max_size - WE_ADD_EVENT_FIX, *value;
- int ret = 0;
-
- ASSERT(list);
-
- for (i = 0; i < list->count && i < IW_MAX_AP; i++) {
- if (list->version != WL_BSS_INFO_VERSION) {
- WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
- __func__, list->version);
- return ret;
- }
-
- bi = bi ? (wl_bss_info_t *)((unsigned long)bi +
- le32_to_cpu(bi->length)) : list->
- bss_info;
-
- WL_TRACE("%s : %s\n", __func__, bi->SSID);
-
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETH_ALEN);
- event =
- IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
- IW_EV_ADDR_LEN);
- iwe.u.data.length = le32_to_cpu(bi->SSID_len);
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
- event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
-
- if (le16_to_cpu(bi->capability) & (WLAN_CAPABILITY_ESS |
- WLAN_CAPABILITY_IBSS)) {
- iwe.cmd = SIOCGIWMODE;
- if (le16_to_cpu(bi->capability) & WLAN_CAPABILITY_ESS)
- iwe.u.mode = IW_MODE_INFRA;
- else
- iwe.u.mode = IW_MODE_ADHOC;
- event =
- IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
- IW_EV_UINT_LEN);
- }
-
- iwe.cmd = SIOCGIWFREQ;
-
- if (CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL)
- iwe.u.freq.m = ieee80211_dsss_chan_to_freq(
- CHSPEC_CHANNEL(bi->chanspec));
- else
- iwe.u.freq.m = ieee80211_ofdm_chan_to_freq(
- WF_CHAN_FACTOR_5_G/2,
- CHSPEC_CHANNEL(bi->chanspec));
-
- iwe.u.freq.e = 6;
- event =
- IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
- IW_EV_FREQ_LEN);
-
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.qual = rssi_to_qual(le16_to_cpu(bi->RSSI));
- iwe.u.qual.level = 0x100 + le16_to_cpu(bi->RSSI);
- iwe.u.qual.noise = 0x100 + bi->phy_noise;
- event =
- IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
- IW_EV_QUAL_LEN);
-
- wl_iw_handle_scanresults_ies(&event, end, info, bi);
-
- iwe.cmd = SIOCGIWENCODE;
- if (le16_to_cpu(bi->capability) & WLAN_CAPABILITY_PRIVACY)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- event =
- IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
-
- if (bi->rateset.count) {
- if (((event - extra) +
- IW_EV_LCP_LEN) <= (unsigned long)end) {
- value = event + IW_EV_LCP_LEN;
- iwe.cmd = SIOCGIWRATE;
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled =
- 0;
- for (j = 0;
- j < bi->rateset.count
- && j < IW_MAX_BITRATES; j++) {
- iwe.u.bitrate.value =
- (bi->rateset.rates[j] & 0x7f) *
- 500000;
- value =
- IWE_STREAM_ADD_VALUE(info, event,
- value, end, &iwe,
- IW_EV_PARAM_LEN);
- }
- event = value;
- }
- }
- }
-
- ret = event - extra;
- if (ret < 0) {
- WL_ERROR("==> Wrong size\n");
- ret = 0;
- }
- WL_TRACE("%s: size=%d bytes prepared\n",
- __func__, (unsigned int)(event - extra));
- return (uint)ret;
-}
-
-static int
-wl_iw_get_scan(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *dwrq, char *extra)
-{
- channel_info_t ci;
- wl_scan_results_t *list_merge;
- wl_scan_results_t *list = (wl_scan_results_t *) g_scan;
- int error;
- uint buflen_from_user = dwrq->length;
- uint len = G_SCAN_RESULTS;
- __u16 len_ret = 0;
-#if defined(WL_IW_USE_ISCAN)
- iscan_info_t *iscan = g_iscan;
- iscan_buf_t *p_buf;
-#endif
-
- WL_TRACE("%s: buflen_from_user %d:\n", dev->name, buflen_from_user);
-
- if (!extra) {
- WL_TRACE("%s: wl_iw_get_scan return -EINVAL\n", dev->name);
- return -EINVAL;
- }
-
- error = dev_wlc_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(ci));
- if (error)
- return error;
- ci.scan_channel = le32_to_cpu(ci.scan_channel);
- if (ci.scan_channel)
- return -EAGAIN;
-
- if (g_scan_specified_ssid) {
- list = kmalloc(len, GFP_KERNEL);
- if (!list) {
- WL_TRACE("%s: wl_iw_get_scan return -ENOMEM\n",
- dev->name);
- g_scan_specified_ssid = 0;
- return -ENOMEM;
- }
- }
-
- memset(list, 0, len);
- list->buflen = cpu_to_le32(len);
- error = dev_wlc_ioctl(dev, WLC_SCAN_RESULTS, list, len);
- if (error) {
- WL_ERROR("%s: %s : Scan_results ERROR %d\n",
- dev->name, __func__, error);
- dwrq->length = len;
- if (g_scan_specified_ssid) {
- g_scan_specified_ssid = 0;
- kfree(list);
- }
- return 0;
- }
- list->buflen = le32_to_cpu(list->buflen);
- list->version = le32_to_cpu(list->version);
- list->count = le32_to_cpu(list->count);
-
- if (list->version != WL_BSS_INFO_VERSION) {
- WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
- __func__, list->version);
- if (g_scan_specified_ssid) {
- g_scan_specified_ssid = 0;
- kfree(list);
- }
- return -EINVAL;
- }
-
- if (g_scan_specified_ssid) {
- WL_TRACE("%s: Specified scan APs in the list =%d\n",
- __func__, list->count);
- len_ret =
- (__u16) wl_iw_get_scan_prep(list, info, extra,
- buflen_from_user);
- kfree(list);
-
-#if defined(WL_IW_USE_ISCAN)
- p_buf = iscan->list_hdr;
- while (p_buf != iscan->list_cur) {
- list_merge =
- &((wl_iscan_results_t *) p_buf->iscan_buf)->results;
- WL_TRACE("%s: Bcast APs list=%d\n",
- __func__, list_merge->count);
- if (list_merge->count > 0)
- len_ret +=
- (__u16) wl_iw_get_scan_prep(list_merge,
- info, extra + len_ret,
- buflen_from_user - len_ret);
- p_buf = p_buf->next;
- }
-#else
- list_merge = (wl_scan_results_t *) g_scan;
- WL_TRACE("%s: Bcast APs list=%d\n",
- __func__, list_merge->count);
- if (list_merge->count > 0)
- len_ret +=
- (__u16) wl_iw_get_scan_prep(list_merge, info,
- extra + len_ret,
- buflen_from_user -
- len_ret);
-#endif /* defined(WL_IW_USE_ISCAN) */
- } else {
- list = (wl_scan_results_t *) g_scan;
- len_ret =
- (__u16) wl_iw_get_scan_prep(list, info, extra,
- buflen_from_user);
- }
-
-#if defined(WL_IW_USE_ISCAN)
- g_scan_specified_ssid = 0;
-#endif
- if ((len_ret + WE_ADD_EVENT_FIX) < buflen_from_user)
- len = len_ret;
-
- dwrq->length = len;
- dwrq->flags = 0;
-
- WL_TRACE("%s return to WE %d bytes APs=%d\n",
- __func__, dwrq->length, list->count);
- return 0;
-}
-
-#if defined(WL_IW_USE_ISCAN)
-static int
-wl_iw_iscan_get_scan(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- wl_scan_results_t *list;
- struct iw_event iwe;
- wl_bss_info_t *bi = NULL;
- int ii, j;
- int apcnt;
- char *event = extra, *end = extra + dwrq->length, *value;
- iscan_info_t *iscan = g_iscan;
- iscan_buf_t *p_buf;
- u32 counter = 0;
- u8 channel;
-
- WL_TRACE("%s %s buflen_from_user %d:\n",
- dev->name, __func__, dwrq->length);
-
- if (!extra) {
- WL_TRACE("%s: INVALID SIOCGIWSCAN GET bad parameter\n",
- dev->name);
- return -EINVAL;
- }
-
- if ((!iscan) || (!iscan->sysioc_tsk)) {
- WL_ERROR("%ssysioc_tsk\n", __func__);
- return wl_iw_get_scan(dev, info, dwrq, extra);
- }
-
- if (iscan->iscan_state == ISCAN_STATE_SCANING) {
- WL_TRACE("%s: SIOCGIWSCAN GET still scanning\n", dev->name);
- return -EAGAIN;
- }
-
- WL_TRACE("%s: SIOCGIWSCAN GET broadcast results\n", dev->name);
- apcnt = 0;
- p_buf = iscan->list_hdr;
- while (p_buf != iscan->list_cur) {
- list = &((wl_iscan_results_t *) p_buf->iscan_buf)->results;
-
- counter += list->count;
-
- if (list->version != WL_BSS_INFO_VERSION) {
- WL_ERROR("%s : list->version %d != WL_BSS_INFO_VERSION\n",
- __func__, list->version);
- return -EINVAL;
- }
-
- bi = NULL;
- for (ii = 0; ii < list->count && apcnt < IW_MAX_AP;
- apcnt++, ii++) {
- bi = bi ? (wl_bss_info_t *)((unsigned long)bi +
- le32_to_cpu(bi->length)) :
- list->bss_info;
- ASSERT(((unsigned long)bi + le32_to_cpu(bi->length)) <=
- ((unsigned long)list + WLC_IW_ISCAN_MAXLEN));
-
- if (event + ETH_ALEN + bi->SSID_len +
- IW_EV_UINT_LEN + IW_EV_FREQ_LEN + IW_EV_QUAL_LEN >=
- end)
- return -E2BIG;
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID,
- ETH_ALEN);
- event =
- IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
- IW_EV_ADDR_LEN);
-
- iwe.u.data.length = le32_to_cpu(bi->SSID_len);
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
- event =
- IWE_STREAM_ADD_POINT(info, event, end, &iwe,
- bi->SSID);
-
- if (le16_to_cpu(bi->capability) &
- (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
- iwe.cmd = SIOCGIWMODE;
- if (le16_to_cpu(bi->capability) &
- WLAN_CAPABILITY_ESS)
- iwe.u.mode = IW_MODE_INFRA;
- else
- iwe.u.mode = IW_MODE_ADHOC;
- event =
- IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
- IW_EV_UINT_LEN);
- }
-
- iwe.cmd = SIOCGIWFREQ;
- channel =
- (bi->ctl_ch ==
- 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
-
- if (channel <= CH_MAX_2G_CHANNEL)
- iwe.u.freq.m =
- ieee80211_dsss_chan_to_freq(channel);
- else
- iwe.u.freq.m = ieee80211_ofdm_chan_to_freq(
- WF_CHAN_FACTOR_5_G/2,
- channel);
-
- iwe.u.freq.e = 6;
- event =
- IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
- IW_EV_FREQ_LEN);
-
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.qual = rssi_to_qual(le16_to_cpu(bi->RSSI));
- iwe.u.qual.level = 0x100 + le16_to_cpu(bi->RSSI);
- iwe.u.qual.noise = 0x100 + bi->phy_noise;
- event =
- IWE_STREAM_ADD_EVENT(info, event, end, &iwe,
- IW_EV_QUAL_LEN);
-
- wl_iw_handle_scanresults_ies(&event, end, info, bi);
-
- iwe.cmd = SIOCGIWENCODE;
- if (le16_to_cpu(bi->capability) &
- WLAN_CAPABILITY_PRIVACY)
- iwe.u.data.flags =
- IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- event =
- IWE_STREAM_ADD_POINT(info, event, end, &iwe,
- (char *)event);
-
- if (bi->rateset.count) {
- if (event + IW_MAX_BITRATES * IW_EV_PARAM_LEN >=
- end)
- return -E2BIG;
-
- value = event + IW_EV_LCP_LEN;
- iwe.cmd = SIOCGIWRATE;
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled =
- 0;
- for (j = 0;
- j < bi->rateset.count
- && j < IW_MAX_BITRATES; j++) {
- iwe.u.bitrate.value =
- (bi->rateset.rates[j] & 0x7f) *
- 500000;
- value =
- IWE_STREAM_ADD_VALUE(info, event,
- value, end,
- &iwe,
- IW_EV_PARAM_LEN);
- }
- event = value;
- }
- }
- p_buf = p_buf->next;
- }
-
- dwrq->length = event - extra;
- dwrq->flags = 0;
-
- WL_TRACE("%s return to WE %d bytes APs=%d\n",
- __func__, dwrq->length, counter);
-
- if (!dwrq->length)
- return -EAGAIN;
-
- return 0;
-}
-#endif /* defined(WL_IW_USE_ISCAN) */
-
-static int
-wl_iw_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- int error;
- wl_join_params_t join_params;
- int join_params_size;
-
- WL_TRACE("%s: SIOCSIWESSID\n", dev->name);
-
- if (g_set_essid_before_scan)
- return -EAGAIN;
-
- memset(&g_ssid, 0, sizeof(g_ssid));
-
- CHECK_EXTRA_FOR_NULL(extra);
-
- if (dwrq->length && extra) {
-#if WIRELESS_EXT > 20
- g_ssid.SSID_len = min_t(size_t, sizeof(g_ssid.SSID),
- dwrq->length);
-#else
- g_ssid.SSID_len = min_t(size_t, sizeof(g_ssid.SSID),
- dwrq->length - 1);
-#endif
- memcpy(g_ssid.SSID, extra, g_ssid.SSID_len);
- } else {
- g_ssid.SSID_len = 0;
- }
- g_ssid.SSID_len = cpu_to_le32(g_ssid.SSID_len);
-
- memset(&join_params, 0, sizeof(join_params));
- join_params_size = sizeof(join_params.ssid);
-
- memcpy(&join_params.ssid.SSID, g_ssid.SSID, g_ssid.SSID_len);
- join_params.ssid.SSID_len = cpu_to_le32(g_ssid.SSID_len);
- memcpy(join_params.params.bssid, ether_bcast, ETH_ALEN);
-
- wl_iw_ch_to_chanspec(g_wl_iw_params.target_channel, &join_params,
- &join_params_size);
-
- error = dev_wlc_ioctl(dev, WLC_SET_SSID, &join_params,
- join_params_size);
- if (error)
- WL_ERROR("Invalid ioctl data=%d\n", error);
-
- if (g_ssid.SSID_len) {
- WL_TRACE("%s: join SSID=%s ch=%d\n",
- __func__, g_ssid.SSID, g_wl_iw_params.target_channel);
- }
- return 0;
-}
-
-static int
-wl_iw_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- wlc_ssid_t ssid;
- int error;
-
- WL_TRACE("%s: SIOCGIWESSID\n", dev->name);
-
- if (!extra)
- return -EINVAL;
-
- error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid));
- if (error) {
- WL_ERROR("Error getting the SSID\n");
- return error;
- }
-
- ssid.SSID_len = le32_to_cpu(ssid.SSID_len);
-
- memcpy(extra, ssid.SSID, ssid.SSID_len);
-
- dwrq->length = ssid.SSID_len;
-
- dwrq->flags = 1;
-
- return 0;
-}
-
-static int
-wl_iw_set_nick(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *dwrq, char *extra)
-{
- wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
-
- WL_TRACE("%s: SIOCSIWNICKN\n", dev->name);
-
- if (!extra)
- return -EINVAL;
-
- if (dwrq->length > sizeof(iw->nickname))
- return -E2BIG;
-
- memcpy(iw->nickname, extra, dwrq->length);
- iw->nickname[dwrq->length - 1] = '\0';
-
- return 0;
-}
-
-static int
-wl_iw_get_nick(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *dwrq, char *extra)
-{
- wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
-
- WL_TRACE("%s: SIOCGIWNICKN\n", dev->name);
-
- if (!extra)
- return -EINVAL;
-
- strcpy(extra, iw->nickname);
- dwrq->length = strlen(extra) + 1;
-
- return 0;
-}
-
-static int
-wl_iw_set_rate(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq, char *extra)
-{
- wl_rateset_t rateset;
- int error, rate, i, error_bg, error_a;
-
- WL_TRACE("%s: SIOCSIWRATE\n", dev->name);
-
- error = dev_wlc_ioctl(dev, WLC_GET_CURR_RATESET, &rateset,
- sizeof(rateset));
- if (error)
- return error;
-
- rateset.count = le32_to_cpu(rateset.count);
-
- if (vwrq->value < 0)
- rate = rateset.rates[rateset.count - 1] & 0x7f;
- else if (vwrq->value < rateset.count)
- rate = rateset.rates[vwrq->value] & 0x7f;
- else
- rate = vwrq->value / 500000;
-
- if (vwrq->fixed) {
- error_bg = dev_wlc_intvar_set(dev, "bg_rate", rate);
- error_a = dev_wlc_intvar_set(dev, "a_rate", rate);
-
- if (error_bg && error_a)
- return error_bg | error_a;
- } else {
- error_bg = dev_wlc_intvar_set(dev, "bg_rate", 0);
- error_a = dev_wlc_intvar_set(dev, "a_rate", 0);
-
- if (error_bg && error_a)
- return error_bg | error_a;
-
- for (i = 0; i < rateset.count; i++)
- if ((rateset.rates[i] & 0x7f) > rate)
- break;
- rateset.count = cpu_to_le32(i);
-
- error = dev_wlc_ioctl(dev, WLC_SET_RATESET, &rateset,
- sizeof(rateset));
- if (error)
- return error;
- }
-
- return 0;
-}
-
-static int
-wl_iw_get_rate(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq, char *extra)
-{
- int error, rate;
-
- WL_TRACE("%s: SIOCGIWRATE\n", dev->name);
-
- error = dev_wlc_ioctl(dev, WLC_GET_RATE, &rate, sizeof(rate));
- if (error)
- return error;
- rate = le32_to_cpu(rate);
- vwrq->value = rate * 500000;
-
- return 0;
-}
-
-static int
-wl_iw_set_rts(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq, char *extra)
-{
- int error, rts;
-
- WL_TRACE("%s: SIOCSIWRTS\n", dev->name);
-
- if (vwrq->disabled)
- rts = DOT11_DEFAULT_RTS_LEN;
- else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_RTS_LEN)
- return -EINVAL;
- else
- rts = vwrq->value;
-
- error = dev_wlc_intvar_set(dev, "rtsthresh", rts);
- if (error)
- return error;
-
- return 0;
-}
-
-static int
-wl_iw_get_rts(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq, char *extra)
-{
- int error, rts;
-
- WL_TRACE("%s: SIOCGIWRTS\n", dev->name);
-
- error = dev_wlc_intvar_get(dev, "rtsthresh", &rts);
- if (error)
- return error;
-
- vwrq->value = rts;
- vwrq->disabled = (rts >= DOT11_DEFAULT_RTS_LEN);
- vwrq->fixed = 1;
-
- return 0;
-}
-
-static int
-wl_iw_set_frag(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq, char *extra)
-{
- int error, frag;
-
- WL_TRACE("%s: SIOCSIWFRAG\n", dev->name);
-
- if (vwrq->disabled)
- frag = DOT11_DEFAULT_FRAG_LEN;
- else if (vwrq->value < 0 || vwrq->value > DOT11_DEFAULT_FRAG_LEN)
- return -EINVAL;
- else
- frag = vwrq->value;
-
- error = dev_wlc_intvar_set(dev, "fragthresh", frag);
- if (error)
- return error;
-
- return 0;
-}
-
-static int
-wl_iw_get_frag(struct net_device *dev,
- struct iw_request_info *info, struct iw_param *vwrq, char *extra)
-{
- int error, fragthreshold;
-
- WL_TRACE("%s: SIOCGIWFRAG\n", dev->name);
-
- error = dev_wlc_intvar_get(dev, "fragthresh", &fragthreshold);
- if (error)
- return error;
-
- vwrq->value = fragthreshold;
- vwrq->disabled = (fragthreshold >= DOT11_DEFAULT_FRAG_LEN);
- vwrq->fixed = 1;
-
- return 0;
-}
-
-static int
-wl_iw_set_txpow(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- int error, disable;
- u16 txpwrmw;
- WL_TRACE("%s: SIOCSIWTXPOW\n", dev->name);
-
- disable = vwrq->disabled ? WL_RADIO_SW_DISABLE : 0;
- disable += WL_RADIO_SW_DISABLE << 16;
-
- disable = cpu_to_le32(disable);
- error = dev_wlc_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable));
- if (error)
- return error;
-
- if (disable & WL_RADIO_SW_DISABLE)
- return 0;
-
- if (!(vwrq->flags & IW_TXPOW_MWATT))
- return -EINVAL;
-
- if (vwrq->value < 0)
- return 0;
-
- if (vwrq->value > 0xffff)
- txpwrmw = 0xffff;
- else
- txpwrmw = (u16) vwrq->value;
-
- error =
- dev_wlc_intvar_set(dev, "qtxpower", (int)(bcm_mw_to_qdbm(txpwrmw)));
- return error;
-}
-
-static int
-wl_iw_get_txpow(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- int error, disable, txpwrdbm;
- u8 result;
-
- WL_TRACE("%s: SIOCGIWTXPOW\n", dev->name);
-
- error = dev_wlc_ioctl(dev, WLC_GET_RADIO, &disable, sizeof(disable));
- if (error)
- return error;
-
- error = dev_wlc_intvar_get(dev, "qtxpower", &txpwrdbm);
- if (error)
- return error;
-
- disable = le32_to_cpu(disable);
- result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
- vwrq->value = (s32) bcm_qdbm_to_mw(result);
- vwrq->fixed = 0;
- vwrq->disabled =
- (disable & (WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE)) ? 1 : 0;
- vwrq->flags = IW_TXPOW_MWATT;
-
- return 0;
-}
-
-#if WIRELESS_EXT > 10
-static int
-wl_iw_set_retry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- int error, lrl, srl;
-
- WL_TRACE("%s: SIOCSIWRETRY\n", dev->name);
-
- if (vwrq->disabled || (vwrq->flags & IW_RETRY_LIFETIME))
- return -EINVAL;
-
- if (vwrq->flags & IW_RETRY_LIMIT) {
-
-#if WIRELESS_EXT > 20
- if ((vwrq->flags & IW_RETRY_LONG)
- || (vwrq->flags & IW_RETRY_MAX)
- || !((vwrq->flags & IW_RETRY_SHORT)
- || (vwrq->flags & IW_RETRY_MIN))) {
-#else
- if ((vwrq->flags & IW_RETRY_MAX)
- || !(vwrq->flags & IW_RETRY_MIN)) {
-#endif
- lrl = cpu_to_le32(vwrq->value);
- error = dev_wlc_ioctl(dev, WLC_SET_LRL, &lrl,
- sizeof(lrl));
- if (error)
- return error;
- }
-#if WIRELESS_EXT > 20
- if ((vwrq->flags & IW_RETRY_SHORT)
- || (vwrq->flags & IW_RETRY_MIN)
- || !((vwrq->flags & IW_RETRY_LONG)
- || (vwrq->flags & IW_RETRY_MAX))) {
-#else
- if ((vwrq->flags & IW_RETRY_MIN)
- || !(vwrq->flags & IW_RETRY_MAX)) {
-#endif
- srl = cpu_to_le32(vwrq->value);
- error = dev_wlc_ioctl(dev, WLC_SET_SRL, &srl,
- sizeof(srl));
- if (error)
- return error;
- }
- }
- return 0;
-}
-
-static int
-wl_iw_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- int error, lrl, srl;
-
- WL_TRACE("%s: SIOCGIWRETRY\n", dev->name);
-
- vwrq->disabled = 0;
-
- if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
- return -EINVAL;
-
- error = dev_wlc_ioctl(dev, WLC_GET_LRL, &lrl, sizeof(lrl));
- if (error)
- return error;
-
- error = dev_wlc_ioctl(dev, WLC_GET_SRL, &srl, sizeof(srl));
- if (error)
- return error;
-
- lrl = le32_to_cpu(lrl);
- srl = le32_to_cpu(srl);
-
- if (vwrq->flags & IW_RETRY_MAX) {
- vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
- vwrq->value = lrl;
- } else {
- vwrq->flags = IW_RETRY_LIMIT;
- vwrq->value = srl;
- if (srl != lrl)
- vwrq->flags |= IW_RETRY_MIN;
- }
-
- return 0;
-}
-#endif /* WIRELESS_EXT > 10 */
-
-static int
-wl_iw_set_encode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- wl_wsec_key_t key;
- int error, val, wsec;
-
- WL_TRACE("%s: SIOCSIWENCODE\n", dev->name);
-
- memset(&key, 0, sizeof(key));
-
- if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
- for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS;
- key.index++) {
- val = cpu_to_le32(key.index);
- error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val,
- sizeof(val));
- if (error)
- return error;
- val = le32_to_cpu(val);
- if (val)
- break;
- }
- if (key.index == DOT11_MAX_DEFAULT_KEYS)
- key.index = 0;
- } else {
- key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
- if (key.index >= DOT11_MAX_DEFAULT_KEYS)
- return -EINVAL;
- }
-
- if (!extra || !dwrq->length || (dwrq->flags & IW_ENCODE_NOKEY)) {
- val = cpu_to_le32(key.index);
- error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY, &val,
- sizeof(val));
- if (error)
- return error;
- } else {
- key.len = dwrq->length;
-
- if (dwrq->length > sizeof(key.data))
- return -EINVAL;
-
- memcpy(key.data, extra, dwrq->length);
-
- key.flags = WL_PRIMARY_KEY;
- switch (key.len) {
- case WLAN_KEY_LEN_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- break;
- case WLAN_KEY_LEN_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- break;
- case WLAN_KEY_LEN_TKIP:
- key.algo = CRYPTO_ALGO_TKIP;
- break;
- case WLAN_KEY_LEN_AES_CMAC:
- key.algo = CRYPTO_ALGO_AES_CCM;
- break;
- default:
- return -EINVAL;
- }
-
- swap_key_from_BE(&key);
- error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
- if (error)
- return error;
- }
-
- val = (dwrq->flags & IW_ENCODE_DISABLED) ? 0 : WEP_ENABLED;
-
- error = dev_wlc_intvar_get(dev, "wsec", &wsec);
- if (error)
- return error;
-
- wsec &= ~(WEP_ENABLED);
- wsec |= val;
-
- error = dev_wlc_intvar_set(dev, "wsec", wsec);
- if (error)
- return error;
-
- val = (dwrq->flags & IW_ENCODE_RESTRICTED) ? 1 : 0;
- val = cpu_to_le32(val);
- error = dev_wlc_ioctl(dev, WLC_SET_AUTH, &val, sizeof(val));
- if (error)
- return error;
-
- return 0;
-}
-
-static int
-wl_iw_get_encode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- wl_wsec_key_t key;
- int error, val, wsec, auth;
-
- WL_TRACE("%s: SIOCGIWENCODE\n", dev->name);
-
- memset(&key, 0, sizeof(wl_wsec_key_t));
-
- if ((dwrq->flags & IW_ENCODE_INDEX) == 0) {
- for (key.index = 0; key.index < DOT11_MAX_DEFAULT_KEYS;
- key.index++) {
- val = key.index;
- error = dev_wlc_ioctl(dev, WLC_GET_KEY_PRIMARY, &val,
- sizeof(val));
- if (error)
- return error;
- val = le32_to_cpu(val);
- if (val)
- break;
- }
- } else
- key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
-
- if (key.index >= DOT11_MAX_DEFAULT_KEYS)
- key.index = 0;
-
- error = dev_wlc_ioctl(dev, WLC_GET_WSEC, &wsec, sizeof(wsec));
- if (error)
- return error;
-
- error = dev_wlc_ioctl(dev, WLC_GET_AUTH, &auth, sizeof(auth));
- if (error)
- return error;
-
- swap_key_to_BE(&key);
-
- wsec = le32_to_cpu(wsec);
- auth = le32_to_cpu(auth);
- dwrq->length = min_t(u16, WLAN_MAX_KEY_LEN, key.len);
-
- dwrq->flags = key.index + 1;
- if (!(wsec & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED)))
- dwrq->flags |= IW_ENCODE_DISABLED;
-
- if (auth)
- dwrq->flags |= IW_ENCODE_RESTRICTED;
-
- if (dwrq->length && extra)
- memcpy(extra, key.data, dwrq->length);
-
- return 0;
-}
-
-static int
-wl_iw_set_power(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- int error, pm;
-
- WL_TRACE("%s: SIOCSIWPOWER\n", dev->name);
-
- pm = vwrq->disabled ? PM_OFF : PM_MAX;
-
- pm = cpu_to_le32(pm);
- error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
- if (error)
- return error;
-
- return 0;
-}
-
-static int
-wl_iw_get_power(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- int error, pm;
-
- WL_TRACE("%s: SIOCGIWPOWER\n", dev->name);
-
- error = dev_wlc_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm));
- if (error)
- return error;
-
- pm = le32_to_cpu(pm);
- vwrq->disabled = pm ? 0 : 1;
- vwrq->flags = IW_POWER_ALL_R;
-
- return 0;
-}
-
-#if WIRELESS_EXT > 17
-static int
-wl_iw_set_wpaie(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *iwp, char *extra)
-{
-
- WL_TRACE("%s: SIOCSIWGENIE\n", dev->name);
-
- CHECK_EXTRA_FOR_NULL(extra);
-
- dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
-
- return 0;
-}
-
-static int
-wl_iw_get_wpaie(struct net_device *dev,
- struct iw_request_info *info, struct iw_point *iwp, char *extra)
-{
- WL_TRACE("%s: SIOCGIWGENIE\n", dev->name);
- iwp->length = 64;
- dev_wlc_bufvar_get(dev, "wpaie", extra, iwp->length);
- return 0;
-}
-
-static int
-wl_iw_set_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
-{
- wl_wsec_key_t key;
- int error;
- struct iw_encode_ext *iwe;
-
- WL_TRACE("%s: SIOCSIWENCODEEXT\n", dev->name);
-
- CHECK_EXTRA_FOR_NULL(extra);
-
- memset(&key, 0, sizeof(key));
- iwe = (struct iw_encode_ext *)extra;
-
- if (dwrq->flags & IW_ENCODE_DISABLED) {
-
- }
-
- key.index = 0;
- if (dwrq->flags & IW_ENCODE_INDEX)
- key.index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
-
- key.len = iwe->key_len;
-
- if (!is_multicast_ether_addr(iwe->addr.sa_data))
- memcpy(&key.ea, &iwe->addr.sa_data, ETH_ALEN);
-
- if (key.len == 0) {
- if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- WL_WSEC("Changing the the primary Key to %d\n",
- key.index);
- key.index = cpu_to_le32(key.index);
- error = dev_wlc_ioctl(dev, WLC_SET_KEY_PRIMARY,
- &key.index, sizeof(key.index));
- if (error)
- return error;
- } else {
- swap_key_from_BE(&key);
- dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
- }
- } else {
- if (iwe->key_len > sizeof(key.data))
- return -EINVAL;
-
- WL_WSEC("Setting the key index %d\n", key.index);
- if (iwe->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- WL_WSEC("key is a Primary Key\n");
- key.flags = WL_PRIMARY_KEY;
- }
-
- memcpy(key.data, iwe->key, iwe->key_len);
-
- if (iwe->alg == IW_ENCODE_ALG_TKIP) {
- u8 keybuf[8];
- memcpy(keybuf, &key.data[24], sizeof(keybuf));
- memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
- memcpy(&key.data[16], keybuf, sizeof(keybuf));
- }
-
- if (iwe->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
- unsigned char *ivptr;
- ivptr = (unsigned char *) iwe->rx_seq;
- key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
- (ivptr[3] << 8) | ivptr[2];
- key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
- key.iv_initialized = true;
- }
-
- switch (iwe->alg) {
- case IW_ENCODE_ALG_NONE:
- key.algo = CRYPTO_ALGO_OFF;
- break;
- case IW_ENCODE_ALG_WEP:
- if (iwe->key_len == WLAN_KEY_LEN_WEP40)
- key.algo = CRYPTO_ALGO_WEP1;
- else
- key.algo = CRYPTO_ALGO_WEP128;
- break;
- case IW_ENCODE_ALG_TKIP:
- key.algo = CRYPTO_ALGO_TKIP;
- break;
- case IW_ENCODE_ALG_CCMP:
- key.algo = CRYPTO_ALGO_AES_CCM;
- break;
- default:
- break;
- }
- swap_key_from_BE(&key);
-
- dhd_wait_pend8021x(dev);
-
- error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
- if (error)
- return error;
- }
- return 0;
-}
-
-#if WIRELESS_EXT > 17
-struct {
- pmkid_list_t pmkids;
- pmkid_t foo[MAXPMKID - 1];
-} pmkid_list;
-
-static int
-wl_iw_set_pmksa(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- struct iw_pmksa *iwpmksa;
- uint i;
- int ret = 0;
-
- WL_WSEC("%s: SIOCSIWPMKSA\n", dev->name);
-
- CHECK_EXTRA_FOR_NULL(extra);
-
- iwpmksa = (struct iw_pmksa *)extra;
-
- if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
- WL_WSEC("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n");
- memset((char *)&pmkid_list, 0, sizeof(pmkid_list));
- }
-
- else if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
- {
- pmkid_list_t pmkid, *pmkidptr;
- uint j;
- pmkidptr = &pmkid;
-
- memcpy(&pmkidptr->pmkid[0].BSSID,
- &iwpmksa->bssid.sa_data[0],
- ETH_ALEN);
- memcpy(&pmkidptr->pmkid[0].PMKID,
- &iwpmksa->pmkid[0],
- WLAN_PMKID_LEN);
-
- WL_WSEC("wl_iw_set_pmksa:IW_PMKSA_REMOVE:PMKID: "
- "%pM = ", &pmkidptr->pmkid[0].BSSID);
- for (j = 0; j < WLAN_PMKID_LEN; j++)
- WL_WSEC("%02x ", pmkidptr->pmkid[0].PMKID[j]);
- WL_WSEC("\n");
- }
-
- for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
- if (!memcmp
- (&iwpmksa->bssid.sa_data[0],
- &pmkid_list.pmkids.pmkid[i].BSSID, ETH_ALEN))
- break;
-
- if ((pmkid_list.pmkids.npmkid > 0)
- && (i < pmkid_list.pmkids.npmkid)) {
- memset(&pmkid_list.pmkids.pmkid[i], 0, sizeof(pmkid_t));
- for (; i < (pmkid_list.pmkids.npmkid - 1); i++) {
- memcpy(&pmkid_list.pmkids.pmkid[i].BSSID,
- &pmkid_list.pmkids.pmkid[i + 1].BSSID,
- ETH_ALEN);
- memcpy(&pmkid_list.pmkids.pmkid[i].PMKID,
- &pmkid_list.pmkids.pmkid[i + 1].PMKID,
- WLAN_PMKID_LEN);
- }
- pmkid_list.pmkids.npmkid--;
- } else
- ret = -EINVAL;
- }
-
- else if (iwpmksa->cmd == IW_PMKSA_ADD) {
- for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
- if (!memcmp
- (&iwpmksa->bssid.sa_data[0],
- &pmkid_list.pmkids.pmkid[i].BSSID, ETH_ALEN))
- break;
- if (i < MAXPMKID) {
- memcpy(&pmkid_list.pmkids.pmkid[i].BSSID,
- &iwpmksa->bssid.sa_data[0],
- ETH_ALEN);
- memcpy(&pmkid_list.pmkids.pmkid[i].PMKID,
- &iwpmksa->pmkid[0],
- WLAN_PMKID_LEN);
- if (i == pmkid_list.pmkids.npmkid)
- pmkid_list.pmkids.npmkid++;
- } else
- ret = -EINVAL;
- {
- uint j;
- uint k;
- k = pmkid_list.pmkids.npmkid;
- WL_WSEC("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %pM = ",
- &pmkid_list.pmkids.pmkid[k].BSSID);
- for (j = 0; j < WLAN_PMKID_LEN; j++)
- WL_WSEC("%02x ",
- pmkid_list.pmkids.pmkid[k].PMKID[j]);
- WL_WSEC("\n");
- }
- }
- WL_WSEC("PRINTING pmkid LIST - No of elements %d\n",
- pmkid_list.pmkids.npmkid);
- for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
- uint j;
- WL_WSEC("PMKID[%d]: %pM = ",
- i, &pmkid_list.pmkids.pmkid[i].BSSID);
- for (j = 0; j < WLAN_PMKID_LEN; j++)
- WL_WSEC("%02x ", pmkid_list.pmkids.pmkid[i].PMKID[j]);
- WL_WSEC("\n");
- }
- WL_WSEC("\n");
-
- if (!ret)
- ret = dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list,
- sizeof(pmkid_list));
- return ret;
-}
-#endif /* WIRELESS_EXT > 17 */
-
-static int
-wl_iw_get_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- WL_TRACE("%s: SIOCGIWENCODEEXT\n", dev->name);
- return 0;
-}
-
-static int
-wl_iw_set_wpaauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- int error = 0;
- int paramid;
- int paramval;
- int val = 0;
- wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
-
- WL_TRACE("%s: SIOCSIWAUTH\n", dev->name);
-
- paramid = vwrq->flags & IW_AUTH_INDEX;
- paramval = vwrq->value;
-
- WL_TRACE("%s: SIOCSIWAUTH, paramid = 0x%0x, paramval = 0x%0x\n",
- dev->name, paramid, paramval);
-
- switch (paramid) {
- case IW_AUTH_WPA_VERSION:
- if (paramval & IW_AUTH_WPA_VERSION_DISABLED)
- val = WPA_AUTH_DISABLED;
- else if (paramval & (IW_AUTH_WPA_VERSION_WPA))
- val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
- else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
- val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
- WL_INFORM("%s: %d: setting wpa_auth to 0x%0x\n",
- __func__, __LINE__, val);
- error = dev_wlc_intvar_set(dev, "wpa_auth", val);
- if (error)
- return error;
- break;
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- if (paramval & (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
- val = WEP_ENABLED;
- if (paramval & IW_AUTH_CIPHER_TKIP)
- val = TKIP_ENABLED;
- if (paramval & IW_AUTH_CIPHER_CCMP)
- val = AES_ENABLED;
-
- if (paramid == IW_AUTH_CIPHER_PAIRWISE) {
- iw->pwsec = val;
- val |= iw->gwsec;
- } else {
- iw->gwsec = val;
- val |= iw->pwsec;
- }
-
- if (iw->privacy_invoked && !val) {
- WL_WSEC("%s: %s: 'Privacy invoked' true but clearing wsec, assuming we're a WPS enrollee\n",
- dev->name, __func__);
- error = dev_wlc_intvar_set(dev, "is_WPS_enrollee",
- true);
- if (error) {
- WL_WSEC("Failed to set is_WPS_enrollee\n");
- return error;
- }
- } else if (val) {
- error = dev_wlc_intvar_set(dev, "is_WPS_enrollee",
- false);
- if (error) {
- WL_WSEC("Failed to clear is_WPS_enrollee\n");
- return error;
- }
- }
-
- error = dev_wlc_intvar_set(dev, "wsec", val);
- if (error)
- return error;
-
- break;
-
- case IW_AUTH_KEY_MGMT:
- error = dev_wlc_intvar_get(dev, "wpa_auth", &val);
- if (error)
- return error;
-
- if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
- if (paramval & IW_AUTH_KEY_MGMT_PSK)
- val = WPA_AUTH_PSK;
- else
- val = WPA_AUTH_UNSPECIFIED;
- } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
- if (paramval & IW_AUTH_KEY_MGMT_PSK)
- val = WPA2_AUTH_PSK;
- else
- val = WPA2_AUTH_UNSPECIFIED;
- }
- WL_INFORM("%s: %d: setting wpa_auth to %d\n",
- __func__, __LINE__, val);
- error = dev_wlc_intvar_set(dev, "wpa_auth", val);
- if (error)
- return error;
-
- break;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- dev_wlc_bufvar_set(dev, "tkip_countermeasures",
- (char *)&paramval, 1);
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- WL_INFORM("Setting the D11auth %d\n", paramval);
- if (paramval == IW_AUTH_ALG_OPEN_SYSTEM)
- val = 0;
- else if (paramval == IW_AUTH_ALG_SHARED_KEY)
- val = 1;
- else if (paramval ==
- (IW_AUTH_ALG_OPEN_SYSTEM | IW_AUTH_ALG_SHARED_KEY))
- val = 2;
- else
- error = 1;
- if (!error) {
- error = dev_wlc_intvar_set(dev, "auth", val);
- if (error)
- return error;
- }
- break;
-
- case IW_AUTH_WPA_ENABLED:
- if (paramval == 0) {
- iw->pwsec = 0;
- iw->gwsec = 0;
- error = dev_wlc_intvar_get(dev, "wsec", &val);
- if (error)
- return error;
- if (val & (TKIP_ENABLED | AES_ENABLED)) {
- val &= ~(TKIP_ENABLED | AES_ENABLED);
- dev_wlc_intvar_set(dev, "wsec", val);
- }
- val = 0;
- WL_INFORM("%s: %d: setting wpa_auth to %d\n",
- __func__, __LINE__, val);
- dev_wlc_intvar_set(dev, "wpa_auth", 0);
- return error;
- }
- break;
-
- case IW_AUTH_DROP_UNENCRYPTED:
- dev_wlc_bufvar_set(dev, "wsec_restrict", (char *)&paramval, 1);
- break;
-
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- dev_wlc_bufvar_set(dev, "rx_unencrypted_eapol",
- (char *)&paramval, 1);
- break;
-
-#if WIRELESS_EXT > 17
- case IW_AUTH_ROAMING_CONTROL:
- WL_INFORM("%s: IW_AUTH_ROAMING_CONTROL\n", __func__);
- break;
- case IW_AUTH_PRIVACY_INVOKED:
- {
- int wsec;
-
- if (paramval == 0) {
- iw->privacy_invoked = false;
- error = dev_wlc_intvar_set(dev,
- "is_WPS_enrollee", false);
- if (error) {
- WL_WSEC("Failed to clear iovar is_WPS_enrollee\n");
- return error;
- }
- } else {
- iw->privacy_invoked = true;
- error = dev_wlc_intvar_get(dev, "wsec", &wsec);
- if (error)
- return error;
-
- if (!(IW_WSEC_ENABLED(wsec))) {
- error = dev_wlc_intvar_set(dev,
- "is_WPS_enrollee",
- true);
- if (error) {
- WL_WSEC("Failed to set iovar is_WPS_enrollee\n");
- return error;
- }
- } else {
- error = dev_wlc_intvar_set(dev,
- "is_WPS_enrollee",
- false);
- if (error) {
- WL_WSEC("Failed to clear is_WPS_enrollee\n");
- return error;
- }
- }
- }
- break;
- }
-#endif /* WIRELESS_EXT > 17 */
- default:
- break;
- }
- return 0;
-}
-
-#define VAL_PSK(_val) (((_val) & WPA_AUTH_PSK) || ((_val) & WPA2_AUTH_PSK))
-
-static int
-wl_iw_get_wpaauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq, char *extra)
-{
- int error;
- int paramid;
- int paramval = 0;
- int val;
- wl_iw_t *iw = *(wl_iw_t **) netdev_priv(dev);
-
- WL_TRACE("%s: SIOCGIWAUTH\n", dev->name);
-
- paramid = vwrq->flags & IW_AUTH_INDEX;
-
- switch (paramid) {
- case IW_AUTH_WPA_VERSION:
- error = dev_wlc_intvar_get(dev, "wpa_auth", &val);
- if (error)
- return error;
- if (val & (WPA_AUTH_NONE | WPA_AUTH_DISABLED))
- paramval = IW_AUTH_WPA_VERSION_DISABLED;
- else if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED))
- paramval = IW_AUTH_WPA_VERSION_WPA;
- else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED))
- paramval = IW_AUTH_WPA_VERSION_WPA2;
- break;
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- if (paramid == IW_AUTH_CIPHER_PAIRWISE)
- val = iw->pwsec;
- else
- val = iw->gwsec;
-
- paramval = 0;
- if (val) {
- if (val & WEP_ENABLED)
- paramval |=
- (IW_AUTH_CIPHER_WEP40 |
- IW_AUTH_CIPHER_WEP104);
- if (val & TKIP_ENABLED)
- paramval |= (IW_AUTH_CIPHER_TKIP);
- if (val & AES_ENABLED)
- paramval |= (IW_AUTH_CIPHER_CCMP);
- } else
- paramval = IW_AUTH_CIPHER_NONE;
- break;
- case IW_AUTH_KEY_MGMT:
- error = dev_wlc_intvar_get(dev, "wpa_auth", &val);
- if (error)
- return error;
- if (VAL_PSK(val))
- paramval = IW_AUTH_KEY_MGMT_PSK;
- else
- paramval = IW_AUTH_KEY_MGMT_802_1X;
-
- break;
- case IW_AUTH_TKIP_COUNTERMEASURES:
- dev_wlc_bufvar_get(dev, "tkip_countermeasures",
- (char *)&paramval, 1);
- break;
-
- case IW_AUTH_DROP_UNENCRYPTED:
- dev_wlc_bufvar_get(dev, "wsec_restrict", (char *)&paramval, 1);
- break;
-
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- dev_wlc_bufvar_get(dev, "rx_unencrypted_eapol",
- (char *)&paramval, 1);
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- error = dev_wlc_intvar_get(dev, "auth", &val);
- if (error)
- return error;
- if (!val)
- paramval = IW_AUTH_ALG_OPEN_SYSTEM;
- else
- paramval = IW_AUTH_ALG_SHARED_KEY;
- break;
- case IW_AUTH_WPA_ENABLED:
- error = dev_wlc_intvar_get(dev, "wpa_auth", &val);
- if (error)
- return error;
- if (val)
- paramval = true;
- else
- paramval = false;
- break;
-#if WIRELESS_EXT > 17
- case IW_AUTH_ROAMING_CONTROL:
- WL_ERROR("%s: IW_AUTH_ROAMING_CONTROL\n", __func__);
- break;
- case IW_AUTH_PRIVACY_INVOKED:
- paramval = iw->privacy_invoked;
- break;
-
-#endif
- }
- vwrq->value = paramval;
- return 0;
-}
-#endif /* WIRELESS_EXT > 17 */
-
-static const iw_handler wl_iw_handler[] = {
- (iw_handler) wl_iw_config_commit,
- (iw_handler) wl_iw_get_name,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) wl_iw_set_freq,
- (iw_handler) wl_iw_get_freq,
- (iw_handler) wl_iw_set_mode,
- (iw_handler) wl_iw_get_mode,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) wl_iw_get_range,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) wl_iw_set_spy,
- (iw_handler) wl_iw_get_spy,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) wl_iw_set_wap,
- (iw_handler) wl_iw_get_wap,
-#if WIRELESS_EXT > 17
- (iw_handler) wl_iw_mlme,
-#else
- (iw_handler) NULL,
-#endif
-#if defined(WL_IW_USE_ISCAN)
- (iw_handler) wl_iw_iscan_get_aplist,
-#else
- (iw_handler) wl_iw_get_aplist,
-#endif
-#if WIRELESS_EXT > 13
-#if defined(WL_IW_USE_ISCAN)
- (iw_handler) wl_iw_iscan_set_scan,
- (iw_handler) wl_iw_iscan_get_scan,
-#else
- (iw_handler) wl_iw_set_scan,
- (iw_handler) wl_iw_get_scan,
-#endif
-#else
- (iw_handler) NULL,
- (iw_handler) NULL,
-#endif /* WIRELESS_EXT > 13 */
- (iw_handler) wl_iw_set_essid,
- (iw_handler) wl_iw_get_essid,
- (iw_handler) wl_iw_set_nick,
- (iw_handler) wl_iw_get_nick,
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) wl_iw_set_rate,
- (iw_handler) wl_iw_get_rate,
- (iw_handler) wl_iw_set_rts,
- (iw_handler) wl_iw_get_rts,
- (iw_handler) wl_iw_set_frag,
- (iw_handler) wl_iw_get_frag,
- (iw_handler) wl_iw_set_txpow,
- (iw_handler) wl_iw_get_txpow,
-#if WIRELESS_EXT > 10
- (iw_handler) wl_iw_set_retry,
- (iw_handler) wl_iw_get_retry,
-#endif
- (iw_handler) wl_iw_set_encode,
- (iw_handler) wl_iw_get_encode,
- (iw_handler) wl_iw_set_power,
- (iw_handler) wl_iw_get_power,
-#if WIRELESS_EXT > 17
- (iw_handler) NULL,
- (iw_handler) NULL,
- (iw_handler) wl_iw_set_wpaie,
- (iw_handler) wl_iw_get_wpaie,
- (iw_handler) wl_iw_set_wpaauth,
- (iw_handler) wl_iw_get_wpaauth,
- (iw_handler) wl_iw_set_encodeext,
- (iw_handler) wl_iw_get_encodeext,
- (iw_handler) wl_iw_set_pmksa,
-#endif /* WIRELESS_EXT > 17 */
-};
-
-#if WIRELESS_EXT > 12
-
-const struct iw_handler_def wl_iw_handler_def = {
- .num_standard = ARRAY_SIZE(wl_iw_handler),
- .standard = (iw_handler *) wl_iw_handler,
- .num_private = 0,
- .num_private_args = 0,
- .private = 0,
- .private_args = 0,
-
-#if WIRELESS_EXT >= 19
- .get_wireless_stats = NULL,
-#endif
-};
-#endif /* WIRELESS_EXT > 12 */
-
-int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct iwreq *wrq = (struct iwreq *)rq;
- struct iw_request_info info;
- iw_handler handler;
- char *extra = NULL;
- int token_size = 1, max_tokens = 0, ret = 0;
-
- WL_TRACE("\n%s, cmd:%x alled via dhd->do_ioctl()entry point\n",
- __func__, cmd);
- if (cmd < SIOCIWFIRST ||
- IW_IOCTL_IDX(cmd) >= ARRAY_SIZE(wl_iw_handler)) {
- WL_ERROR("%s: error in cmd=%x : out of range\n",
- __func__, cmd);
- return -EOPNOTSUPP;
- }
-
- handler = wl_iw_handler[IW_IOCTL_IDX(cmd)];
- if (!handler) {
- WL_ERROR("%s: error in cmd=%x : not supported\n",
- __func__, cmd);
- return -EOPNOTSUPP;
- }
-
- switch (cmd) {
-
- case SIOCSIWESSID:
- case SIOCGIWESSID:
- case SIOCSIWNICKN:
- case SIOCGIWNICKN:
- max_tokens = IW_ESSID_MAX_SIZE + 1;
- break;
-
- case SIOCSIWENCODE:
- case SIOCGIWENCODE:
-#if WIRELESS_EXT > 17
- case SIOCSIWENCODEEXT:
- case SIOCGIWENCODEEXT:
-#endif
- max_tokens = wrq->u.data.length;
- break;
-
- case SIOCGIWRANGE:
- max_tokens = sizeof(struct iw_range) + 500;
- break;
-
- case SIOCGIWAPLIST:
- token_size =
- sizeof(struct sockaddr) + sizeof(struct iw_quality);
- max_tokens = IW_MAX_AP;
- break;
-
-#if WIRELESS_EXT > 13
- case SIOCGIWSCAN:
-#if defined(WL_IW_USE_ISCAN)
- if (g_iscan)
- max_tokens = wrq->u.data.length;
- else
-#endif
- max_tokens = IW_SCAN_MAX_DATA;
- break;
-#endif /* WIRELESS_EXT > 13 */
-
- case SIOCSIWSPY:
- token_size = sizeof(struct sockaddr);
- max_tokens = IW_MAX_SPY;
- break;
-
- case SIOCGIWSPY:
- token_size =
- sizeof(struct sockaddr) + sizeof(struct iw_quality);
- max_tokens = IW_MAX_SPY;
- break;
-
-#if WIRELESS_EXT > 17
- case SIOCSIWPMKSA:
- case SIOCSIWGENIE:
-#endif
- case SIOCSIWPRIV:
- max_tokens = wrq->u.data.length;
- break;
- }
-
- if (max_tokens && wrq->u.data.pointer) {
- if (wrq->u.data.length > max_tokens) {
- WL_ERROR("%s: error in cmd=%x wrq->u.data.length=%d > max_tokens=%d\n",
- __func__, cmd, wrq->u.data.length, max_tokens);
- return -E2BIG;
- }
- extra = kmalloc(max_tokens * token_size, GFP_KERNEL);
- if (!extra)
- return -ENOMEM;
-
- if (copy_from_user
- (extra, wrq->u.data.pointer,
- wrq->u.data.length * token_size)) {
- kfree(extra);
- return -EFAULT;
- }
- }
-
- info.cmd = cmd;
- info.flags = 0;
-
- ret = handler(dev, &info, &wrq->u, extra);
-
- if (extra) {
- if (copy_to_user
- (wrq->u.data.pointer, extra,
- wrq->u.data.length * token_size)) {
- kfree(extra);
- return -EFAULT;
- }
-
- kfree(extra);
- }
-
- return ret;
-}
-
-bool
-wl_iw_conn_status_str(u32 event_type, u32 status, u32 reason,
- char *stringBuf, uint buflen)
-{
- typedef struct conn_fail_event_map_t {
- u32 inEvent;
- u32 inStatus;
- u32 inReason;
- const char *outName;
- const char *outCause;
- } conn_fail_event_map_t;
-
-#define WL_IW_DONT_CARE 9999
- const conn_fail_event_map_t event_map[] = {
- {WLC_E_SET_SSID, WLC_E_STATUS_SUCCESS, WL_IW_DONT_CARE,
- "Conn", "Success"},
- {WLC_E_SET_SSID, WLC_E_STATUS_NO_NETWORKS, WL_IW_DONT_CARE,
- "Conn", "NoNetworks"},
- {WLC_E_SET_SSID, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
- "Conn", "ConfigMismatch"},
- {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_PRUNE_ENCR_MISMATCH,
- "Conn", "EncrypMismatch"},
- {WLC_E_PRUNE, WL_IW_DONT_CARE, WLC_E_RSN_MISMATCH,
- "Conn", "RsnMismatch"},
- {WLC_E_AUTH, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
- "Conn", "AuthTimeout"},
- {WLC_E_AUTH, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
- "Conn", "AuthFail"},
- {WLC_E_AUTH, WLC_E_STATUS_NO_ACK, WL_IW_DONT_CARE,
- "Conn", "AuthNoAck"},
- {WLC_E_REASSOC, WLC_E_STATUS_FAIL, WL_IW_DONT_CARE,
- "Conn", "ReassocFail"},
- {WLC_E_REASSOC, WLC_E_STATUS_TIMEOUT, WL_IW_DONT_CARE,
- "Conn", "ReassocTimeout"},
- {WLC_E_REASSOC, WLC_E_STATUS_ABORT, WL_IW_DONT_CARE,
- "Conn", "ReassocAbort"},
- {WLC_E_PSK_SUP, WLC_SUP_KEYED, WL_IW_DONT_CARE,
- "Sup", "ConnSuccess"},
- {WLC_E_PSK_SUP, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
- "Sup", "WpaHandshakeFail"},
- {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
- "Conn", "Deauth"},
- {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
- "Conn", "DisassocInd"},
- {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE,
- "Conn", "Disassoc"}
- };
-
- const char *name = "";
- const char *cause = NULL;
- int i;
-
- for (i = 0; i < sizeof(event_map) / sizeof(event_map[0]); i++) {
- const conn_fail_event_map_t *row = &event_map[i];
- if (row->inEvent == event_type &&
- (row->inStatus == status
- || row->inStatus == WL_IW_DONT_CARE)
- && (row->inReason == reason
- || row->inReason == WL_IW_DONT_CARE)) {
- name = row->outName;
- cause = row->outCause;
- break;
- }
- }
-
- if (cause) {
- memset(stringBuf, 0, buflen);
- snprintf(stringBuf, buflen, "%s %s %02d %02d",
- name, cause, status, reason);
- WL_INFORM("Connection status: %s\n", stringBuf);
- return true;
- } else {
- return false;
- }
-}
-
-#if WIRELESS_EXT > 14
-
-static bool
-wl_iw_check_conn_fail(wl_event_msg_t *e, char *stringBuf, uint buflen)
-{
- u32 event = be32_to_cpu(e->event_type);
- u32 status = be32_to_cpu(e->status);
- u32 reason = be32_to_cpu(e->reason);
-
- if (wl_iw_conn_status_str(event, status, reason, stringBuf, buflen)) {
- return true;
- } else
- return false;
-}
-#endif
-
-#ifndef IW_CUSTOM_MAX
-#define IW_CUSTOM_MAX 256
-#endif
-
-void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void *data)
-{
-#if WIRELESS_EXT > 13
- union iwreq_data wrqu;
- char extra[IW_CUSTOM_MAX + 1];
- int cmd = 0;
- u32 event_type = be32_to_cpu(e->event_type);
- u16 flags = be16_to_cpu(e->flags);
- u32 datalen = be32_to_cpu(e->datalen);
- u32 status = be32_to_cpu(e->status);
- wl_iw_t *iw;
- u32 toto;
- memset(&wrqu, 0, sizeof(wrqu));
- memset(extra, 0, sizeof(extra));
- iw = 0;
-
- if (!dev) {
- WL_ERROR("%s: dev is null\n", __func__);
- return;
- }
-
- iw = *(wl_iw_t **) netdev_priv(dev);
-
- WL_TRACE("%s: dev=%s event=%d\n", __func__, dev->name, event_type);
-
- switch (event_type) {
- case WLC_E_TXFAIL:
- cmd = IWEVTXDROP;
- memcpy(wrqu.addr.sa_data, &e->addr, ETH_ALEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
- break;
-#if WIRELESS_EXT > 14
- case WLC_E_JOIN:
- case WLC_E_ASSOC_IND:
- case WLC_E_REASSOC_IND:
- memcpy(wrqu.addr.sa_data, &e->addr, ETH_ALEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
- cmd = IWEVREGISTERED;
- break;
- case WLC_E_DEAUTH_IND:
- case WLC_E_DISASSOC_IND:
- cmd = SIOCGIWAP;
- memset(wrqu.addr.sa_data, 0, ETH_ALEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
- memset(&extra, 0, ETH_ALEN);
- break;
- case WLC_E_LINK:
- case WLC_E_NDIS_LINK:
- cmd = SIOCGIWAP;
- if (!(flags & WLC_EVENT_MSG_LINK)) {
- memset(wrqu.addr.sa_data, 0, ETH_ALEN);
- memset(&extra, 0, ETH_ALEN);
- } else {
- memcpy(wrqu.addr.sa_data, &e->addr, ETH_ALEN);
- WL_TRACE("Link UP\n");
-
- }
- wrqu.addr.sa_family = ARPHRD_ETHER;
- break;
- case WLC_E_ACTION_FRAME:
- cmd = IWEVCUSTOM;
- if (datalen + 1 <= sizeof(extra)) {
- wrqu.data.length = datalen + 1;
- extra[0] = WLC_E_ACTION_FRAME;
- memcpy(&extra[1], data, datalen);
- WL_TRACE("WLC_E_ACTION_FRAME len %d\n",
- wrqu.data.length);
- }
- break;
-
- case WLC_E_ACTION_FRAME_COMPLETE:
- cmd = IWEVCUSTOM;
- memcpy(&toto, data, 4);
- if (sizeof(status) + 1 <= sizeof(extra)) {
- wrqu.data.length = sizeof(status) + 1;
- extra[0] = WLC_E_ACTION_FRAME_COMPLETE;
- memcpy(&extra[1], &status, sizeof(status));
- WL_TRACE("wl_iw_event status %d PacketId %d\n", status,
- toto);
- WL_TRACE("WLC_E_ACTION_FRAME_COMPLETE len %d\n",
- wrqu.data.length);
- }
- break;
-#endif /* WIRELESS_EXT > 14 */
-#if WIRELESS_EXT > 17
- case WLC_E_MIC_ERROR:
- {
- struct iw_michaelmicfailure *micerrevt =
- (struct iw_michaelmicfailure *)&extra;
- cmd = IWEVMICHAELMICFAILURE;
- wrqu.data.length = sizeof(struct iw_michaelmicfailure);
- if (flags & WLC_EVENT_MSG_GROUP)
- micerrevt->flags |= IW_MICFAILURE_GROUP;
- else
- micerrevt->flags |= IW_MICFAILURE_PAIRWISE;
- memcpy(micerrevt->src_addr.sa_data, &e->addr,
- ETH_ALEN);
- micerrevt->src_addr.sa_family = ARPHRD_ETHER;
-
- break;
- }
- case WLC_E_PMKID_CACHE:
- {
- if (data) {
- struct iw_pmkid_cand *iwpmkidcand =
- (struct iw_pmkid_cand *)&extra;
- pmkid_cand_list_t *pmkcandlist;
- pmkid_cand_t *pmkidcand;
- int count;
-
- cmd = IWEVPMKIDCAND;
- pmkcandlist = data;
- count = get_unaligned_be32(&pmkcandlist->
- npmkid_cand);
- ASSERT(count >= 0);
- wrqu.data.length = sizeof(struct iw_pmkid_cand);
- pmkidcand = pmkcandlist->pmkid_cand;
- while (count) {
- memset(iwpmkidcand, 0,
- sizeof(struct iw_pmkid_cand));
- if (pmkidcand->preauth)
- iwpmkidcand->flags |=
- IW_PMKID_CAND_PREAUTH;
- memcpy(&iwpmkidcand->bssid.sa_data,
- &pmkidcand->BSSID,
- ETH_ALEN);
-#ifndef SANDGATE2G
- wireless_send_event(dev, cmd, &wrqu,
- extra);
-#endif
- pmkidcand++;
- count--;
- }
- }
- return;
- }
-#endif /* WIRELESS_EXT > 17 */
-
- case WLC_E_SCAN_COMPLETE:
-#if defined(WL_IW_USE_ISCAN)
- if ((g_iscan) && (g_iscan->sysioc_tsk) &&
- (g_iscan->iscan_state != ISCAN_STATE_IDLE)) {
- up(&g_iscan->sysioc_sem);
- } else {
- cmd = SIOCGIWSCAN;
- wrqu.data.length = strlen(extra);
- WL_TRACE("Event WLC_E_SCAN_COMPLETE from specific scan %d\n",
- g_iscan->iscan_state);
- }
-#else
- cmd = SIOCGIWSCAN;
- wrqu.data.length = strlen(extra);
- WL_TRACE("Event WLC_E_SCAN_COMPLETE\n");
-#endif
- break;
-
- case WLC_E_PFN_NET_FOUND:
- {
- wlc_ssid_t *ssid;
- ssid = (wlc_ssid_t *) data;
- WL_ERROR("%s Event WLC_E_PFN_NET_FOUND, send %s up : find %s len=%d\n",
- __func__, PNO_EVENT_UP,
- ssid->SSID, ssid->SSID_len);
- cmd = IWEVCUSTOM;
- memset(&wrqu, 0, sizeof(wrqu));
- strcpy(extra, PNO_EVENT_UP);
- wrqu.data.length = strlen(extra);
- }
- break;
-
- default:
- WL_TRACE("Unknown Event %d: ignoring\n", event_type);
- break;
- }
-#ifndef SANDGATE2G
- if (cmd) {
- if (cmd == SIOCGIWSCAN)
- wireless_send_event(dev, cmd, &wrqu, NULL);
- else
- wireless_send_event(dev, cmd, &wrqu, extra);
- }
-#endif
-
-#if WIRELESS_EXT > 14
- memset(extra, 0, sizeof(extra));
- if (wl_iw_check_conn_fail(e, extra, sizeof(extra))) {
- cmd = IWEVCUSTOM;
- wrqu.data.length = strlen(extra);
-#ifndef SANDGATE2G
- wireless_send_event(dev, cmd, &wrqu, extra);
-#endif
- }
-#endif /* WIRELESS_EXT > 14 */
-#endif /* WIRELESS_EXT > 13 */
-}
-
-int wl_iw_attach(struct net_device *dev, void *dhdp)
-{
- int params_size;
- wl_iw_t *iw;
-#if defined(WL_IW_USE_ISCAN)
- iscan_info_t *iscan = NULL;
-
- if (!dev)
- return 0;
-
- memset(&g_wl_iw_params, 0, sizeof(wl_iw_extra_params_t));
-
-#ifdef CSCAN
- params_size =
- (WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params)) +
- (WL_NUMCHANNELS * sizeof(u16)) +
- WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
-#else
- params_size =
- (WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params));
-#endif
- iscan = kzalloc(sizeof(iscan_info_t), GFP_KERNEL);
-
- if (!iscan)
- return -ENOMEM;
-
- iscan->iscan_ex_params_p = kmalloc(params_size, GFP_KERNEL);
- if (!iscan->iscan_ex_params_p) {
- kfree(iscan);
- return -ENOMEM;
- }
- iscan->iscan_ex_param_size = params_size;
- iscan->sysioc_tsk = NULL;
-
- g_iscan = iscan;
- iscan->dev = dev;
- iscan->iscan_state = ISCAN_STATE_IDLE;
-
- iscan->timer_ms = 3000;
- init_timer(&iscan->timer);
- iscan->timer.data = (unsigned long) iscan;
- iscan->timer.function = wl_iw_timerfunc;
-
- sema_init(&iscan->sysioc_sem, 0);
- iscan->sysioc_tsk = kthread_run(_iscan_sysioc_thread, iscan,
- "_iscan_sysioc");
- if (IS_ERR(iscan->sysioc_tsk)) {
- iscan->sysioc_tsk = NULL;
- return -ENOMEM;
- }
-#endif /* defined(WL_IW_USE_ISCAN) */
-
- iw = *(wl_iw_t **) netdev_priv(dev);
- iw->pub = (dhd_pub_t *) dhdp;
- MUTEX_LOCK_INIT(iw->pub);
- MUTEX_LOCK_WL_SCAN_SET_INIT();
-#ifdef SOFTAP
- priv_dev = dev;
- MUTEX_LOCK_SOFTAP_SET_INIT(iw->pub);
-#endif
- g_scan = kzalloc(G_SCAN_RESULTS, GFP_KERNEL);
- if (!g_scan)
- return -ENOMEM;
-
- g_scan_specified_ssid = 0;
-
- return 0;
-}
-
-void wl_iw_detach(void)
-{
-#if defined(WL_IW_USE_ISCAN)
- iscan_buf_t *buf;
- iscan_info_t *iscan = g_iscan;
-
- if (!iscan)
- return;
- if (iscan->sysioc_tsk) {
- send_sig(SIGTERM, iscan->sysioc_tsk, 1);
- kthread_stop(iscan->sysioc_tsk);
- iscan->sysioc_tsk = NULL;
- }
-
- MUTEX_LOCK_WL_SCAN_SET();
- while (iscan->list_hdr) {
- buf = iscan->list_hdr->next;
- kfree(iscan->list_hdr);
- iscan->list_hdr = buf;
- }
- MUTEX_UNLOCK_WL_SCAN_SET();
- kfree(iscan->iscan_ex_params_p);
- kfree(iscan);
- g_iscan = NULL;
-#endif /* WL_IW_USE_ISCAN */
-
- kfree(g_scan);
-
- g_scan = NULL;
-}
-
-#if defined(BCMDBG)
-void osl_assert(char *exp, char *file, int line)
-{
- char tempbuf[256];
- char *basename;
-
- basename = strrchr(file, '/');
- /* skip the '/' */
- if (basename)
- basename++;
-
- if (!basename)
- basename = file;
-
- snprintf(tempbuf, 256,
- "assertion \"%s\" failed: file \"%s\", line %d\n", exp,
- basename, line);
-
- /*
- * Print assert message and give it time to
- * be written to /var/log/messages
- */
- if (!in_interrupt()) {
- const int delay = 3;
- printk(KERN_ERR "%s", tempbuf);
- printk(KERN_ERR "panic in %d seconds\n", delay);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(delay * HZ);
- }
-
- switch (g_assert_type) {
- case 0:
- panic(KERN_ERR "%s", tempbuf);
- break;
- case 1:
- printk(KERN_ERR "%s", tempbuf);
- BUG();
- break;
- case 2:
- printk(KERN_ERR "%s", tempbuf);
- break;
- default:
- break;
- }
-}
-#endif /* defined(BCMDBG) */
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_iw.h b/drivers/staging/brcm80211/brcmfmac/wl_iw.h
deleted file mode 100644
index fe06174cee7..00000000000
--- a/drivers/staging/brcm80211/brcmfmac/wl_iw.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wl_iw_h_
-#define _wl_iw_h_
-
-#include <linux/wireless.h>
-
-#include <wlioctl.h>
-
-#define WL_SCAN_PARAMS_SSID_MAX 10
-#define GET_SSID "SSID="
-#define GET_CHANNEL "CH="
-#define GET_NPROBE "NPROBE="
-#define GET_ACTIVE_ASSOC_DWELL "ACTIVE="
-#define GET_PASSIVE_ASSOC_DWELL "PASSIVE="
-#define GET_HOME_DWELL "HOME="
-#define GET_SCAN_TYPE "TYPE="
-
-#define BAND_GET_CMD "BANDGET"
-#define BAND_SET_CMD "BANDSET"
-#define DTIM_SKIP_GET_CMD "DTIMSKIPGET"
-#define DTIM_SKIP_SET_CMD "DTIMSKIPSET"
-#define SETSUSPEND_CMD "SETSUSPENDOPT"
-#define PNOSSIDCLR_SET_CMD "PNOSSIDCLR"
-#define PNOSETUP_SET_CMD "PNOSETUP"
-#define PNOENABLE_SET_CMD "PNOFORCE"
-#define PNODEBUG_SET_CMD "PNODEBUG"
-
-typedef struct wl_iw_extra_params {
- int target_channel;
-} wl_iw_extra_params_t;
-
-#define WL_IW_RSSI_MINVAL -200
-#define WL_IW_RSSI_NO_SIGNAL -91
-#define WL_IW_RSSI_VERY_LOW -80
-#define WL_IW_RSSI_LOW -70
-#define WL_IW_RSSI_GOOD -68
-#define WL_IW_RSSI_VERY_GOOD -58
-#define WL_IW_RSSI_EXCELLENT -57
-#define WL_IW_RSSI_INVALID 0
-#define MAX_WX_STRING 80
-#define WL_IW_SET_ACTIVE_SCAN (SIOCIWFIRSTPRIV+1)
-#define WL_IW_GET_RSSI (SIOCIWFIRSTPRIV+3)
-#define WL_IW_SET_PASSIVE_SCAN (SIOCIWFIRSTPRIV+5)
-#define WL_IW_GET_LINK_SPEED (SIOCIWFIRSTPRIV+7)
-#define WL_IW_GET_CURR_MACADDR (SIOCIWFIRSTPRIV+9)
-#define WL_IW_SET_STOP (SIOCIWFIRSTPRIV+11)
-#define WL_IW_SET_START (SIOCIWFIRSTPRIV+13)
-
-#define WL_SET_AP_CFG (SIOCIWFIRSTPRIV+15)
-#define WL_AP_STA_LIST (SIOCIWFIRSTPRIV+17)
-#define WL_AP_MAC_FLTR (SIOCIWFIRSTPRIV+19)
-#define WL_AP_BSS_START (SIOCIWFIRSTPRIV+21)
-#define AP_LPB_CMD (SIOCIWFIRSTPRIV+23)
-#define WL_AP_STOP (SIOCIWFIRSTPRIV+25)
-#define WL_FW_RELOAD (SIOCIWFIRSTPRIV+27)
-#define WL_COMBO_SCAN (SIOCIWFIRSTPRIV+29)
-#define WL_AP_SPARE3 (SIOCIWFIRSTPRIV+31)
-#define G_SCAN_RESULTS (8*1024)
-#define WE_ADD_EVENT_FIX 0x80
-#define G_WLAN_SET_ON 0
-#define G_WLAN_SET_OFF 1
-
-#define CHECK_EXTRA_FOR_NULL(extra) \
-if (!extra) { \
- WL_ERROR("%s: error : extra is null pointer\n", __func__); \
- return -EINVAL; \
-}
-
-typedef struct wl_iw {
- char nickname[IW_ESSID_MAX_SIZE];
-
- struct iw_statistics wstats;
-
- int spy_num;
- u32 pwsec;
- u32 gwsec;
- bool privacy_invoked;
-
- u8 spy_addr[IW_MAX_SPY][ETH_ALEN];
- struct iw_quality spy_qual[IW_MAX_SPY];
- void *wlinfo;
- dhd_pub_t *pub;
-} wl_iw_t;
-
-#if WIRELESS_EXT > 12
-#include <net/iw_handler.h>
-extern const struct iw_handler_def wl_iw_handler_def;
-#endif
-
-extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void *data);
-extern int wl_iw_get_wireless_stats(struct net_device *dev,
- struct iw_statistics *wstats);
-int wl_iw_attach(struct net_device *dev, void *dhdp);
-void wl_iw_detach(void);
-extern int net_os_set_suspend_disable(struct net_device *dev, int val);
-extern int net_os_set_suspend(struct net_device *dev, int val);
-extern int net_os_set_dtim_skip(struct net_device *dev, int val);
-extern int net_os_set_packet_filter(struct net_device *dev, int val);
-
-#define IWE_STREAM_ADD_EVENT(info, stream, ends, iwe, extra) \
- iwe_stream_add_event(info, stream, ends, iwe, extra)
-#define IWE_STREAM_ADD_VALUE(info, event, value, ends, iwe, event_len) \
- iwe_stream_add_value(info, event, value, ends, iwe, event_len)
-#define IWE_STREAM_ADD_POINT(info, stream, ends, iwe, extra) \
- iwe_stream_add_point(info, stream, ends, iwe, extra)
-
-extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
-extern int dhd_pno_clean(dhd_pub_t *dhd);
-extern int dhd_pno_set(dhd_pub_t *dhd, wlc_ssid_t *ssids_local, int nssid,
- unsigned char scan_fr);
-extern int dhd_pno_get_status(dhd_pub_t *dhd);
-extern int dhd_dev_pno_reset(struct net_device *dev);
-extern int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t *ssids_local,
- int nssid, unsigned char scan_fr);
-extern int dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled);
-extern int dhd_dev_get_pno_status(struct net_device *dev);
-
-#define PNO_TLV_PREFIX 'S'
-#define PNO_TLV_VERSION 1
-#define PNO_TLV_SUBVERSION 0
-#define PNO_TLV_RESERVED 0
-#define PNO_TLV_TYPE_SSID_IE 'S'
-#define PNO_TLV_TYPE_TIME 'T'
-#define PNO_EVENT_UP "PNO_EVENT"
-
-#endif /* _wl_iw_h_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/Makefile b/drivers/staging/brcm80211/brcmsmac/Makefile
index 8d75fe19ca9..1ea3e0c48f3 100644
--- a/drivers/staging/brcm80211/brcmsmac/Makefile
+++ b/drivers/staging/brcm80211/brcmsmac/Makefile
@@ -28,30 +28,29 @@ ccflags-y := \
-Idrivers/staging/brcm80211/include
BRCMSMAC_OFILES := \
- wl_mac80211.o \
- wl_ucode_loader.o \
- wlc_alloc.o \
- wlc_ampdu.o \
- wlc_antsel.o \
- wlc_bmac.o \
- wlc_channel.o \
- wlc_main.o \
- wlc_phy_shim.o \
- wlc_pmu.o \
- wlc_rate.o \
- wlc_stf.o \
+ mac80211_if.o \
+ ucode_loader.o \
+ alloc.o \
+ ampdu.o \
+ antsel.o \
+ bmac.o \
+ channel.o \
+ main.o \
+ phy_shim.o \
+ pmu.o \
+ rate.o \
+ stf.o \
aiutils.o \
- phy/wlc_phy_cmn.o \
- phy/wlc_phy_lcn.o \
- phy/wlc_phy_n.o \
- phy/wlc_phytbl_lcn.o \
- phy/wlc_phytbl_n.o \
- phy/wlc_phy_qmath.o \
- bcmotp.o \
- bcmsrom.o \
- hnddma.o \
- nicpci.o \
- nvram.o
+ phy/phy_cmn.o \
+ phy/phy_lcn.o \
+ phy/phy_n.o \
+ phy/phytbl_lcn.o \
+ phy/phytbl_n.o \
+ phy/phy_qmath.o \
+ otp.o \
+ srom.o \
+ dma.o \
+ nicpci.o
MODULEPFX := brcmsmac
diff --git a/drivers/staging/brcm80211/brcmsmac/aiutils.c b/drivers/staging/brcm80211/brcmsmac/aiutils.c
index a61185f70a7..a25901e9981 100644
--- a/drivers/staging/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/staging/brcm80211/brcmsmac/aiutils.c
@@ -13,36 +13,354 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-
#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <bcmdefs.h>
-#include <linux/module.h>
#include <linux/pci.h>
-#include <bcmutils.h>
-#include <aiutils.h>
-#include <hndsoc.h>
-#include <sbchipc.h>
-#include <pcicfg.h>
-#include <bcmdevs.h>
-
-/* ********** from siutils.c *********** */
-#include <pci_core.h>
-#include <pcie_core.h>
-#include <nicpci.h>
-#include <bcmnvram.h>
-#include <bcmsrom.h>
-#include <wlc_pmu.h>
+
+#include <defs.h>
+#include <chipcommon.h>
+#include <brcmu_utils.h>
+#include <brcm_hw_ids.h>
+#include "types.h"
+#include "pub.h"
+#include "pmu.h"
+#include "srom.h"
+#include "nicpci.h"
+#include "aiutils.h"
+
+/* slow_clk_ctl */
+#define SCC_SS_MASK 0x00000007 /* slow clock source mask */
+#define SCC_SS_LPO 0x00000000 /* source of slow clock is LPO */
+#define SCC_SS_XTAL 0x00000001 /* source of slow clock is crystal */
+#define SCC_SS_PCI 0x00000002 /* source of slow clock is PCI */
+#define SCC_LF 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */
+#define SCC_LP 0x00000400 /* LPOPowerDown, 1: LPO is disabled,
+ * 0: LPO is enabled
+ */
+#define SCC_FS 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock,
+ * 0: power logic control
+ */
+#define SCC_IP 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors
+ * PLL clock disable requests from core
+ */
+#define SCC_XC 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't
+ * disable crystal when appropriate
+ */
+#define SCC_XP 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */
+#define SCC_CD_MASK 0xffff0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */
+#define SCC_CD_SHIFT 16
+
+/* system_clk_ctl */
+#define SYCC_IE 0x00000001 /* ILPen: Enable Idle Low Power */
+#define SYCC_AE 0x00000002 /* ALPen: Enable Active Low Power */
+#define SYCC_FP 0x00000004 /* ForcePLLOn */
+#define SYCC_AR 0x00000008 /* Force ALP (or HT if ALPen is not set */
+#define SYCC_HR 0x00000010 /* Force HT */
+#define SYCC_CD_MASK 0xffff0000 /* ClkDiv (ILP = 1/(4 * (divisor + 1)) */
+#define SYCC_CD_SHIFT 16
+
+#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4329_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
+#define CST4329_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
+#define CST4329_OTP_SEL 2 /* OTP is powered up, no SPROM */
+#define CST4329_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
+#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT 2
+
+/* 43224 chip-specific ChipControl register bits */
+#define CCTRL43224_GPIO_TOGGLE 0x8000
+#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */
+#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */
+
+/* 43236 Chip specific ChipStatus register bits */
+#define CST43236_SFLASH_MASK 0x00000040
+#define CST43236_OTP_MASK 0x00000080
+#define CST43236_HSIC_MASK 0x00000100 /* USB/HSIC */
+#define CST43236_BP_CLK 0x00000200 /* 120/96Mbps */
+#define CST43236_BOOT_MASK 0x00001800
+#define CST43236_BOOT_SHIFT 11
+#define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
+#define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */
+#define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */
+#define CST43236_BOOT_FROM_INVALID 3
+
+/* 4331 chip-specific ChipControl register bits */
+#define CCTRL4331_BT_COEXIST (1<<0) /* 0 disable */
+#define CCTRL4331_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */
+#define CCTRL4331_EXT_LNA (1<<2) /* 0 disable */
+#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /* sprom/gpio13-15 mux */
+#define CCTRL4331_EXTPA_EN (1<<4) /* 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /* set drive out GPIO_CLK on sprom_cs pin */
+#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /* use sprom_cs pin as PCIE mdio interface */
+#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) /* aband extpa will be at gpio2/5 and sprom_dout */
+#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /* override core control on pipe_AuxClkEnable */
+#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /* override core control on pipe_AuxPowerDown */
+#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /* pcie_auxclkenable */
+#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /* pcie_pipe_pllpowerdown */
+#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /* enable bt_shd0 at gpio4 */
+#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /* enable bt_shd1 at gpio5 */
+
+/* 4331 Chip specific ChipStatus register bits */
+#define CST4331_XTAL_FREQ 0x00000001 /* crystal frequency 20/40Mhz */
+#define CST4331_SPROM_PRESENT 0x00000002
+#define CST4331_OTP_PRESENT 0x00000004
+#define CST4331_LDO_RF 0x00000008
+#define CST4331_LDO_PAR 0x00000010
+
+/* 4319 chip-specific ChipStatus register bits */
+#define CST4319_SPI_CPULESSUSB 0x00000001
+#define CST4319_SPI_CLK_POL 0x00000002
+#define CST4319_SPI_CLK_PH 0x00000008
+#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /* gpio [7:6], SDIO CIS selection */
+#define CST4319_SPROM_OTP_SEL_SHIFT 6
+#define CST4319_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */
+#define CST4319_SPROM_SEL 0x00000040 /* use SPROM, OTP is powered up */
+#define CST4319_OTP_SEL 0x00000080 /* use OTP, OTP is powered up */
+#define CST4319_OTP_PWRDN 0x000000c0 /* use SPROM, OTP is powered down */
+#define CST4319_SDIO_USB_MODE 0x00000100 /* gpio [8], sdio/usb mode */
+#define CST4319_REMAP_SEL_MASK 0x00000600
+#define CST4319_ILPDIV_EN 0x00000800
+#define CST4319_XTAL_PD_POL 0x00001000
+#define CST4319_LPO_SEL 0x00002000
+#define CST4319_RES_INIT_MODE 0x0000c000
+#define CST4319_PALDO_EXTPNP 0x00010000 /* PALDO is configured with external PNP */
+#define CST4319_CBUCK_MODE_MASK 0x00060000
+#define CST4319_CBUCK_MODE_BURST 0x00020000
+#define CST4319_CBUCK_MODE_LPBURST 0x00060000
+#define CST4319_RCAL_VALID 0x01000000
+#define CST4319_RCAL_VALUE_MASK 0x3e000000
+#define CST4319_RCAL_VALUE_SHIFT 25
+
+/* 4336 chip-specific ChipStatus register bits */
+#define CST4336_SPI_MODE_MASK 0x00000001
+#define CST4336_SPROM_PRESENT 0x00000002
+#define CST4336_OTP_PRESENT 0x00000004
+#define CST4336_ARMREMAP_0 0x00000008
+#define CST4336_ILPDIV_EN_MASK 0x00000010
+#define CST4336_ILPDIV_EN_SHIFT 4
+#define CST4336_XTAL_PD_POL_MASK 0x00000020
+#define CST4336_XTAL_PD_POL_SHIFT 5
+#define CST4336_LPO_SEL_MASK 0x00000040
+#define CST4336_LPO_SEL_SHIFT 6
+#define CST4336_RES_INIT_MODE_MASK 0x00000180
+#define CST4336_RES_INIT_MODE_SHIFT 7
+#define CST4336_CBUCK_MODE_MASK 0x00000600
+#define CST4336_CBUCK_MODE_SHIFT 9
+
+/* 4313 chip-specific ChipStatus register bits */
+#define CST4313_SPROM_PRESENT 1
+#define CST4313_OTP_PRESENT 2
+#define CST4313_SPROM_OTP_SEL_MASK 0x00000002
+#define CST4313_SPROM_OTP_SEL_SHIFT 0
+
+/* 4313 Chip specific ChipControl register bits */
+#define CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */
#define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
(sih->chiprev == 0) && \
(sii->coreid[sii->curidx] == MIPS74K_CORE_ID))
+/* Manufacturer Ids */
+#define MFGID_ARM 0x43b
+#define MFGID_BRCM 0x4bf
+#define MFGID_MIPS 0x4a7
+
+/* Enumeration ROM registers */
+#define ER_EROMENTRY 0x000
+#define ER_REMAPCONTROL 0xe00
+#define ER_REMAPSELECT 0xe04
+#define ER_MASTERSELECT 0xe10
+#define ER_ITCR 0xf00
+#define ER_ITIP 0xf04
+
+/* Erom entries */
+#define ER_TAG 0xe
+#define ER_TAG1 0x6
+#define ER_VALID 1
+#define ER_CI 0
+#define ER_MP 2
+#define ER_ADD 4
+#define ER_END 0xe
+#define ER_BAD 0xffffffff
+
+/* EROM CompIdentA */
+#define CIA_MFG_MASK 0xfff00000
+#define CIA_MFG_SHIFT 20
+#define CIA_CID_MASK 0x000fff00
+#define CIA_CID_SHIFT 8
+#define CIA_CCL_MASK 0x000000f0
+#define CIA_CCL_SHIFT 4
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+#define CIB_NSW_MASK 0x00f80000
+#define CIB_NSW_SHIFT 19
+#define CIB_NMW_MASK 0x0007c000
+#define CIB_NMW_SHIFT 14
+#define CIB_NSP_MASK 0x00003e00
+#define CIB_NSP_SHIFT 9
+#define CIB_NMP_MASK 0x000001f0
+#define CIB_NMP_SHIFT 4
+
+/* EROM AddrDesc */
+#define AD_ADDR_MASK 0xfffff000
+#define AD_SP_MASK 0x00000f00
+#define AD_SP_SHIFT 8
+#define AD_ST_MASK 0x000000c0
+#define AD_ST_SHIFT 6
+#define AD_ST_SLAVE 0x00000000
+#define AD_ST_BRIDGE 0x00000040
+#define AD_ST_SWRAP 0x00000080
+#define AD_ST_MWRAP 0x000000c0
+#define AD_SZ_MASK 0x00000030
+#define AD_SZ_SHIFT 4
+#define AD_SZ_4K 0x00000000
+#define AD_SZ_8K 0x00000010
+#define AD_SZ_16K 0x00000020
+#define AD_SZ_SZD 0x00000030
+#define AD_AG32 0x00000008
+#define AD_ADDR_ALIGN 0x00000fff
+#define AD_SZ_BASE 0x00001000 /* 4KB */
+
+/* EROM SizeDesc */
+#define SD_SZ_MASK 0xfffff000
+#define SD_SG32 0x00000008
+#define SD_SZ_ALIGN 0x00000fff
+
+#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */
+#define PCI_CFG_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal power-up */
+#define PCI_CFG_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL power-down */
+
+/* power control defines */
+#define PLL_DELAY 150 /* us pll on delay */
+#define FREF_DELAY 200 /* us fref change delay */
+#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */
+
+/* resetctrl */
+#define AIRC_RESET 1
+
+struct aidmp {
+ u32 oobselina30; /* 0x000 */
+ u32 oobselina74; /* 0x004 */
+ u32 PAD[6];
+ u32 oobselinb30; /* 0x020 */
+ u32 oobselinb74; /* 0x024 */
+ u32 PAD[6];
+ u32 oobselinc30; /* 0x040 */
+ u32 oobselinc74; /* 0x044 */
+ u32 PAD[6];
+ u32 oobselind30; /* 0x060 */
+ u32 oobselind74; /* 0x064 */
+ u32 PAD[38];
+ u32 oobselouta30; /* 0x100 */
+ u32 oobselouta74; /* 0x104 */
+ u32 PAD[6];
+ u32 oobseloutb30; /* 0x120 */
+ u32 oobseloutb74; /* 0x124 */
+ u32 PAD[6];
+ u32 oobseloutc30; /* 0x140 */
+ u32 oobseloutc74; /* 0x144 */
+ u32 PAD[6];
+ u32 oobseloutd30; /* 0x160 */
+ u32 oobseloutd74; /* 0x164 */
+ u32 PAD[38];
+ u32 oobsynca; /* 0x200 */
+ u32 oobseloutaen; /* 0x204 */
+ u32 PAD[6];
+ u32 oobsyncb; /* 0x220 */
+ u32 oobseloutben; /* 0x224 */
+ u32 PAD[6];
+ u32 oobsyncc; /* 0x240 */
+ u32 oobseloutcen; /* 0x244 */
+ u32 PAD[6];
+ u32 oobsyncd; /* 0x260 */
+ u32 oobseloutden; /* 0x264 */
+ u32 PAD[38];
+ u32 oobaextwidth; /* 0x300 */
+ u32 oobainwidth; /* 0x304 */
+ u32 oobaoutwidth; /* 0x308 */
+ u32 PAD[5];
+ u32 oobbextwidth; /* 0x320 */
+ u32 oobbinwidth; /* 0x324 */
+ u32 oobboutwidth; /* 0x328 */
+ u32 PAD[5];
+ u32 oobcextwidth; /* 0x340 */
+ u32 oobcinwidth; /* 0x344 */
+ u32 oobcoutwidth; /* 0x348 */
+ u32 PAD[5];
+ u32 oobdextwidth; /* 0x360 */
+ u32 oobdinwidth; /* 0x364 */
+ u32 oobdoutwidth; /* 0x368 */
+ u32 PAD[37];
+ u32 ioctrlset; /* 0x400 */
+ u32 ioctrlclear; /* 0x404 */
+ u32 ioctrl; /* 0x408 */
+ u32 PAD[61];
+ u32 iostatus; /* 0x500 */
+ u32 PAD[127];
+ u32 ioctrlwidth; /* 0x700 */
+ u32 iostatuswidth; /* 0x704 */
+ u32 PAD[62];
+ u32 resetctrl; /* 0x800 */
+ u32 resetstatus; /* 0x804 */
+ u32 resetreadid; /* 0x808 */
+ u32 resetwriteid; /* 0x80c */
+ u32 PAD[60];
+ u32 errlogctrl; /* 0x900 */
+ u32 errlogdone; /* 0x904 */
+ u32 errlogstatus; /* 0x908 */
+ u32 errlogaddrlo; /* 0x90c */
+ u32 errlogaddrhi; /* 0x910 */
+ u32 errlogid; /* 0x914 */
+ u32 errloguser; /* 0x918 */
+ u32 errlogflags; /* 0x91c */
+ u32 PAD[56];
+ u32 intstatus; /* 0xa00 */
+ u32 PAD[127];
+ u32 config; /* 0xe00 */
+ u32 PAD[63];
+ u32 itcr; /* 0xf00 */
+ u32 PAD[3];
+ u32 itipooba; /* 0xf10 */
+ u32 itipoobb; /* 0xf14 */
+ u32 itipoobc; /* 0xf18 */
+ u32 itipoobd; /* 0xf1c */
+ u32 PAD[4];
+ u32 itipoobaout; /* 0xf30 */
+ u32 itipoobbout; /* 0xf34 */
+ u32 itipoobcout; /* 0xf38 */
+ u32 itipoobdout; /* 0xf3c */
+ u32 PAD[4];
+ u32 itopooba; /* 0xf50 */
+ u32 itopoobb; /* 0xf54 */
+ u32 itopoobc; /* 0xf58 */
+ u32 itopoobd; /* 0xf5c */
+ u32 PAD[4];
+ u32 itopoobain; /* 0xf70 */
+ u32 itopoobbin; /* 0xf74 */
+ u32 itopoobcin; /* 0xf78 */
+ u32 itopoobdin; /* 0xf7c */
+ u32 PAD[4];
+ u32 itopreset; /* 0xf90 */
+ u32 PAD[15];
+ u32 peripherialid4; /* 0xfd0 */
+ u32 peripherialid5; /* 0xfd4 */
+ u32 peripherialid6; /* 0xfd8 */
+ u32 peripherialid7; /* 0xfdc */
+ u32 peripherialid0; /* 0xfe0 */
+ u32 peripherialid1; /* 0xfe4 */
+ u32 peripherialid2; /* 0xfe8 */
+ u32 peripherialid3; /* 0xfec */
+ u32 componentid0; /* 0xff0 */
+ u32 componentid1; /* 0xff4 */
+ u32 componentid2; /* 0xff8 */
+ u32 componentid3; /* 0xffc */
+};
+
/* EROM parsing */
static u32
-get_erom_ent(si_t *sih, u32 **eromptr, u32 mask, u32 match)
+get_erom_ent(struct si_pub *sih, u32 **eromptr, u32 mask, u32 match)
{
u32 ent;
uint inv = 0, nom = 0;
@@ -77,7 +395,7 @@ get_erom_ent(si_t *sih, u32 **eromptr, u32 mask, u32 match)
}
static u32
-get_asd(si_t *sih, u32 **eromptr, uint sp, uint ad, uint st,
+get_asd(struct si_pub *sih, u32 **eromptr, uint sp, uint ad, uint st,
u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
{
u32 asd, sz, szd;
@@ -111,14 +429,14 @@ get_asd(si_t *sih, u32 **eromptr, uint sp, uint ad, uint st,
return asd;
}
-static void ai_hwfixup(si_info_t *sii)
+static void ai_hwfixup(struct si_info *sii)
{
}
/* parse the enumeration rom to identify all cores */
-void ai_scan(si_t *sih, void *regs, uint devid)
+void ai_scan(struct si_pub *sih, void *regs)
{
- si_info_t *sii = SI_INFO(sih);
+ struct si_info *sii = SI_INFO(sih);
chipcregs_t *cc = (chipcregs_t *) regs;
u32 erombase, *eromptr, *eromlim;
@@ -319,9 +637,9 @@ void ai_scan(si_t *sih, void *regs, uint devid)
/* This function changes the logical "focus" to the indicated core.
* Return the current core's virtual address.
*/
-void *ai_setcoreidx(si_t *sih, uint coreidx)
+void *ai_setcoreidx(struct si_pub *sih, uint coreidx)
{
- si_info_t *sii = SI_INFO(sih);
+ struct si_info *sii = SI_INFO(sih);
u32 addr = sii->coresba[coreidx];
u32 wrap = sii->wrapba[coreidx];
void *regs;
@@ -368,15 +686,15 @@ void *ai_setcoreidx(si_t *sih, uint coreidx)
}
/* Return the number of address spaces in current core */
-int ai_numaddrspaces(si_t *sih)
+int ai_numaddrspaces(struct si_pub *sih)
{
return 2;
}
/* Return the address of the nth address space in the current core */
-u32 ai_addrspace(si_t *sih, uint asidx)
+u32 ai_addrspace(struct si_pub *sih, uint asidx)
{
- si_info_t *sii;
+ struct si_info *sii;
uint cidx;
sii = SI_INFO(sih);
@@ -393,9 +711,9 @@ u32 ai_addrspace(si_t *sih, uint asidx)
}
/* Return the size of the nth address space in the current core */
-u32 ai_addrspacesize(si_t *sih, uint asidx)
+u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
{
- si_info_t *sii;
+ struct si_info *sii;
uint cidx;
sii = SI_INFO(sih);
@@ -411,10 +729,10 @@ u32 ai_addrspacesize(si_t *sih, uint asidx)
}
}
-uint ai_flag(si_t *sih)
+uint ai_flag(struct si_pub *sih)
{
- si_info_t *sii;
- aidmp_t *ai;
+ struct si_info *sii;
+ struct aidmp *ai;
sii = SI_INFO(sih);
if (BCM47162_DMP()) {
@@ -426,13 +744,13 @@ uint ai_flag(si_t *sih)
return R_REG(&ai->oobselouta30) & 0x1f;
}
-void ai_setint(si_t *sih, int siflag)
+void ai_setint(struct si_pub *sih, int siflag)
{
}
-uint ai_corevendor(si_t *sih)
+uint ai_corevendor(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
u32 cia;
sii = SI_INFO(sih);
@@ -440,9 +758,9 @@ uint ai_corevendor(si_t *sih)
return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
}
-uint ai_corerev(si_t *sih)
+uint ai_corerev(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
u32 cib;
sii = SI_INFO(sih);
@@ -450,10 +768,10 @@ uint ai_corerev(si_t *sih)
return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
}
-bool ai_iscoreup(si_t *sih)
+bool ai_iscoreup(struct si_pub *sih)
{
- si_info_t *sii;
- aidmp_t *ai;
+ struct si_info *sii;
+ struct aidmp *ai;
sii = SI_INFO(sih);
ai = sii->curwrap;
@@ -463,10 +781,10 @@ bool ai_iscoreup(si_t *sih)
&& ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
}
-void ai_core_cflags_wo(si_t *sih, u32 mask, u32 val)
+void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
{
- si_info_t *sii;
- aidmp_t *ai;
+ struct si_info *sii;
+ struct aidmp *ai;
u32 w;
sii = SI_INFO(sih);
@@ -485,10 +803,10 @@ void ai_core_cflags_wo(si_t *sih, u32 mask, u32 val)
}
}
-u32 ai_core_cflags(si_t *sih, u32 mask, u32 val)
+u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
{
- si_info_t *sii;
- aidmp_t *ai;
+ struct si_info *sii;
+ struct aidmp *ai;
u32 w;
sii = SI_INFO(sih);
@@ -508,10 +826,10 @@ u32 ai_core_cflags(si_t *sih, u32 mask, u32 val)
return R_REG(&ai->ioctrl);
}
-u32 ai_core_sflags(si_t *sih, u32 mask, u32 val)
+u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
{
- si_info_t *sii;
- aidmp_t *ai;
+ struct si_info *sii;
+ struct aidmp *ai;
u32 w;
sii = SI_INFO(sih);
@@ -532,19 +850,19 @@ u32 ai_core_sflags(si_t *sih, u32 mask, u32 val)
/* *************** from siutils.c ************** */
/* local prototypes */
-static si_info_t *ai_doattach(si_info_t *sii, uint devid, void *regs,
+static struct si_info *ai_doattach(struct si_info *sii, void *regs,
uint bustype, void *sdh, char **vars,
uint *varsz);
-static bool ai_buscore_prep(si_info_t *sii, uint bustype, uint devid,
- void *sdh);
-static bool ai_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
+static bool ai_buscore_prep(struct si_info *sii, uint bustype);
+static bool ai_buscore_setup(struct si_info *sii, chipcregs_t *cc, uint bustype,
u32 savewin, uint *origidx, void *regs);
-static void ai_nvram_process(si_info_t *sii, char *pvars);
+static void ai_nvram_process(struct si_info *sii, char *pvars);
/* dev path concatenation util */
-static char *ai_devpathvar(si_t *sih, char *var, int len, const char *name);
-static bool _ai_clkctl_cc(si_info_t *sii, uint mode);
-static bool ai_ispcie(si_info_t *sii);
+static char *ai_devpathvar(struct si_pub *sih, char *var, int len,
+ const char *name);
+static bool _ai_clkctl_cc(struct si_info *sii, uint mode);
+static bool ai_ispcie(struct si_info *sii);
/* global variable to indicate reservation/release of gpio's */
static u32 ai_gpioreservation;
@@ -558,19 +876,19 @@ static u32 ai_gpioreservation;
* vars - pointer to a pointer area for "environment" variables
* varsz - pointer to int to return the size of the vars
*/
-si_t *ai_attach(uint devid, void *regs, uint bustype,
+struct si_pub *ai_attach(void *regs, uint bustype,
void *sdh, char **vars, uint *varsz)
{
- si_info_t *sii;
+ struct si_info *sii;
- /* alloc si_info_t */
- sii = kmalloc(sizeof(si_info_t), GFP_ATOMIC);
+ /* alloc struct si_info */
+ sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC);
if (sii == NULL) {
SI_ERROR(("si_attach: malloc failed!\n"));
return NULL;
}
- if (ai_doattach(sii, devid, regs, bustype, sdh, vars, varsz) ==
+ if (ai_doattach(sii, regs, bustype, sdh, vars, varsz) ==
NULL) {
kfree(sii);
return NULL;
@@ -578,14 +896,13 @@ si_t *ai_attach(uint devid, void *regs, uint bustype,
sii->vars = vars ? *vars : NULL;
sii->varsz = varsz ? *varsz : 0;
- return (si_t *) sii;
+ return (struct si_pub *) sii;
}
/* global kernel resource */
-static si_info_t ksii;
+static struct si_info ksii;
-static bool ai_buscore_prep(si_info_t *sii, uint bustype, uint devid,
- void *sdh)
+static bool ai_buscore_prep(struct si_info *sii, uint bustype)
{
/* kludge to enable the clock on the 4306 which lacks a slowclock */
if (bustype == PCI_BUS && !ai_ispcie(sii))
@@ -593,7 +910,7 @@ static bool ai_buscore_prep(si_info_t *sii, uint bustype, uint devid,
return true;
}
-static bool ai_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
+static bool ai_buscore_setup(struct si_info *sii, chipcregs_t *cc, uint bustype,
u32 savewin, uint *origidx, void *regs)
{
bool pci, pcie;
@@ -702,7 +1019,7 @@ static bool ai_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype,
return true;
}
-static __used void ai_nvram_process(si_info_t *sii, char *pvars)
+static __used void ai_nvram_process(struct si_info *sii, char *pvars)
{
uint w = 0;
@@ -751,7 +1068,7 @@ static __used void ai_nvram_process(si_info_t *sii, char *pvars)
sii->pub.boardflags = getintvar(pvars, "boardflags");
}
-static si_info_t *ai_doattach(si_info_t *sii, uint devid,
+static struct si_info *ai_doattach(struct si_info *sii,
void *regs, uint bustype, void *pbus,
char **vars, uint *varsz)
{
@@ -762,7 +1079,7 @@ static si_info_t *ai_doattach(si_info_t *sii, uint devid,
uint socitype;
uint origidx;
- memset((unsigned char *) sii, 0, sizeof(si_info_t));
+ memset((unsigned char *) sii, 0, sizeof(struct si_info));
savewin = 0;
@@ -797,7 +1114,7 @@ static si_info_t *ai_doattach(si_info_t *sii, uint devid,
sih->bustype = bustype;
/* bus/core/clk setup for register access */
- if (!ai_buscore_prep(sii, bustype, devid, pbus)) {
+ if (!ai_buscore_prep(sii, bustype)) {
SI_ERROR(("si_doattach: si_core_clk_prep failed %d\n",
bustype));
return NULL;
@@ -823,7 +1140,7 @@ static si_info_t *ai_doattach(si_info_t *sii, uint devid,
if (socitype == SOCI_AI) {
SI_MSG(("Found chip type AI (0x%08x)\n", w));
/* pass chipc address instead of original core base */
- ai_scan(&sii->pub, (void *)cc, devid);
+ ai_scan(&sii->pub, (void *)cc);
} else {
SI_ERROR(("Found chip of unknown type (0x%08x)\n", w));
return NULL;
@@ -840,28 +1157,6 @@ static si_info_t *ai_doattach(si_info_t *sii, uint devid,
goto exit;
}
- /* assume current core is CC */
- if ((sii->pub.ccrev == 0x25)
- &&
- ((sih->chip == BCM43236_CHIP_ID
- || sih->chip == BCM43235_CHIP_ID
- || sih->chip == BCM43238_CHIP_ID)
- && (sii->pub.chiprev <= 2))) {
-
- if ((cc->chipstatus & CST43236_BP_CLK) != 0) {
- uint clkdiv;
- clkdiv = R_REG(&cc->clkdiv);
- /* otp_clk_div is even number, 120/14 < 9mhz */
- clkdiv = (clkdiv & ~CLKD_OTP) | (14 << CLKD_OTP_SHIFT);
- W_REG(&cc->clkdiv, clkdiv);
- SI_ERROR(("%s: set clkdiv to %x\n", __func__, clkdiv));
- }
- udelay(10);
- }
-
- /* Init nvram from flash if it exists */
- nvram_init();
-
/* Init nvram from sprom/otp if they exist */
if (srom_var_init
(&sii->pub, bustype, regs, vars, varsz)) {
@@ -901,8 +1196,7 @@ static si_info_t *ai_doattach(si_info_t *sii, uint devid,
pcicore_attach(sii->pch, pvars, SI_DOATTACH);
}
- if ((sih->chip == BCM43224_CHIP_ID) ||
- (sih->chip == BCM43421_CHIP_ID)) {
+ if (sih->chip == BCM43224_CHIP_ID) {
/*
* enable 12 mA drive strenth for 43224 and
* set chipControl register bit 15
@@ -933,11 +1227,6 @@ static si_info_t *ai_doattach(si_info_t *sii, uint devid,
CCTRL_4313_12MA_LED_DRIVE);
}
- if (sih->chip == BCM4331_CHIP_ID) {
- /* Enable Ext PA lines depending on chip package option */
- ai_chipcontrl_epa4331(sih, true);
- }
-
return sii;
exit:
if (sih->bustype == PCI_BUS) {
@@ -950,13 +1239,13 @@ static si_info_t *ai_doattach(si_info_t *sii, uint devid,
}
/* may be called with core in reset */
-void ai_detach(si_t *sih)
+void ai_detach(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
uint idx;
struct si_pub *si_local = NULL;
- bcopy(&sih, &si_local, sizeof(si_t **));
+ memcpy(&si_local, &sih, sizeof(struct si_pub **));
sii = SI_INFO(sih);
@@ -970,8 +1259,6 @@ void ai_detach(si_t *sih)
sii->regs[idx] = NULL;
}
- nvram_exit(); /* free up nvram buffers */
-
if (sih->bustype == PCI_BUS) {
if (sii->pch)
pcicore_deinit(sii->pch);
@@ -984,10 +1271,11 @@ void ai_detach(si_t *sih)
/* register driver interrupt disabling and restoring callback functions */
void
-ai_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
+ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
+ void *intrsrestore_fn,
void *intrsenabled_fn, void *intr_arg)
{
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
sii->intr_arg = intr_arg;
@@ -1000,39 +1288,39 @@ ai_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
sii->dev_coreid = sii->coreid[sii->curidx];
}
-void ai_deregister_intr_callback(si_t *sih)
+void ai_deregister_intr_callback(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
sii->intrsoff_fn = NULL;
}
-uint ai_coreid(si_t *sih)
+uint ai_coreid(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
return sii->coreid[sii->curidx];
}
-uint ai_coreidx(si_t *sih)
+uint ai_coreidx(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
return sii->curidx;
}
-bool ai_backplane64(si_t *sih)
+bool ai_backplane64(struct si_pub *sih)
{
return (sih->cccaps & CC_CAP_BKPLN64) != 0;
}
/* return index of coreid or BADIDX if not found */
-uint ai_findcoreidx(si_t *sih, uint coreid, uint coreunit)
+uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit)
{
- si_info_t *sii;
+ struct si_info *sii;
uint found;
uint i;
@@ -1056,7 +1344,7 @@ uint ai_findcoreidx(si_t *sih, uint coreid, uint coreunit)
* Moreover, callers should keep interrupts off during switching
* out of and back to d11 core.
*/
-void *ai_setcore(si_t *sih, uint coreid, uint coreunit)
+void *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
{
uint idx;
@@ -1068,10 +1356,11 @@ void *ai_setcore(si_t *sih, uint coreid, uint coreunit)
}
/* Turn off interrupt as required by ai_setcore, before switch core */
-void *ai_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
+void *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
+ uint *intr_val)
{
void *cc;
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
@@ -1093,9 +1382,9 @@ void *ai_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
}
/* restore coreidx and restore interrupt */
-void ai_restore_core(si_t *sih, uint coreid, uint intr_val)
+void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
{
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
if (SI_FAST(sii)
@@ -1106,9 +1395,9 @@ void ai_restore_core(si_t *sih, uint coreid, uint intr_val)
INTR_RESTORE(sii, intr_val);
}
-void ai_write_wrapperreg(si_t *sih, u32 offset, u32 val)
+void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
{
- si_info_t *sii = SI_INFO(sih);
+ struct si_info *sii = SI_INFO(sih);
u32 *w = (u32 *) sii->curwrap;
W_REG(w + (offset / 4), val);
return;
@@ -1124,14 +1413,15 @@ void ai_write_wrapperreg(si_t *sih, u32 offset, u32 val)
* Also, when using pci/pcie, we can optimize away the core switching for pci
* registers and (on newer pci cores) chipcommon registers.
*/
-uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
+uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
+ uint val)
{
uint origidx = 0;
u32 *r = NULL;
uint w;
uint intr_val = 0;
bool fast = false;
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
@@ -1208,11 +1498,11 @@ uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
return w;
}
-void ai_core_disable(si_t *sih, u32 bits)
+void ai_core_disable(struct si_pub *sih, u32 bits)
{
- si_info_t *sii;
+ struct si_info *sii;
u32 dummy;
- aidmp_t *ai;
+ struct aidmp *ai;
sii = SI_INFO(sih);
@@ -1235,10 +1525,10 @@ void ai_core_disable(si_t *sih, u32 bits)
* bits - core specific bits that are set during and after reset sequence
* resetbits - core specific bits that are set only during reset sequence
*/
-void ai_core_reset(si_t *sih, u32 bits, u32 resetbits)
+void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
{
- si_info_t *sii;
- aidmp_t *ai;
+ struct si_info *sii;
+ struct aidmp *ai;
u32 dummy;
sii = SI_INFO(sih);
@@ -1264,7 +1554,7 @@ void ai_core_reset(si_t *sih, u32 bits, u32 resetbits)
}
/* return the slow clock source - LPO, XTAL, or PCI */
-static uint ai_slowclk_src(si_info_t *sii)
+static uint ai_slowclk_src(struct si_info *sii)
{
chipcregs_t *cc;
u32 val;
@@ -1288,7 +1578,7 @@ static uint ai_slowclk_src(si_info_t *sii)
* return the ILP (slowclock) min or max frequency
* precondition: we've established the chip has dynamic clk control
*/
-static uint ai_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
+static uint ai_slowclk_freq(struct si_info *sii, bool max_freq, chipcregs_t *cc)
{
u32 slowclk;
uint div;
@@ -1322,7 +1612,7 @@ static uint ai_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
return 0;
}
-static void ai_clkctl_setdelay(si_info_t *sii, void *chipcregs)
+static void ai_clkctl_setdelay(struct si_info *sii, void *chipcregs)
{
chipcregs_t *cc = (chipcregs_t *) chipcregs;
uint slowmaxfreq, pll_delay, slowclk;
@@ -1352,9 +1642,9 @@ static void ai_clkctl_setdelay(si_info_t *sii, void *chipcregs)
}
/* initialize power control delay registers */
-void ai_clkctl_init(si_t *sih)
+void ai_clkctl_init(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
uint origidx = 0;
chipcregs_t *cc;
bool fast;
@@ -1390,9 +1680,9 @@ void ai_clkctl_init(si_t *sih)
* return the value suitable for writing to the
* dot11 core FAST_PWRUP_DELAY register
*/
-u16 ai_clkctl_fast_pwrup_delay(si_t *sih)
+u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
uint origidx = 0;
chipcregs_t *cc;
uint slowminfreq;
@@ -1438,9 +1728,9 @@ u16 ai_clkctl_fast_pwrup_delay(si_t *sih)
}
/* turn primary xtal and/or pll off/on */
-int ai_clkctl_xtal(si_t *sih, uint what, bool on)
+int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
{
- si_info_t *sii;
+ struct si_info *sii;
u32 in, out, outen;
sii = SI_INFO(sih);
@@ -1515,9 +1805,9 @@ int ai_clkctl_xtal(si_t *sih, uint what, bool on)
* this is a wrapper over the next internal function
* to allow flexible policy settings for outside caller
*/
-bool ai_clkctl_cc(si_t *sih, uint mode)
+bool ai_clkctl_cc(struct si_pub *sih, uint mode)
{
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
@@ -1532,7 +1822,7 @@ bool ai_clkctl_cc(si_t *sih, uint mode)
}
/* clk control mechanism through chipcommon, no policy checking */
-static bool _ai_clkctl_cc(si_info_t *sii, uint mode)
+static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
{
uint origidx = 0;
chipcregs_t *cc;
@@ -1624,7 +1914,7 @@ static bool _ai_clkctl_cc(si_info_t *sii, uint mode)
}
/* Build device path. Support SI, PCI, and JTAG for now. */
-int ai_devpath(si_t *sih, char *path, int size)
+int ai_devpath(struct si_pub *sih, char *path, int size)
{
int slen;
@@ -1657,7 +1947,7 @@ int ai_devpath(si_t *sih, char *path, int size)
}
/* Get a variable, but only if it has a devpath prefix */
-char *ai_getdevpathvar(si_t *sih, const char *name)
+char *ai_getdevpathvar(struct si_pub *sih, const char *name)
{
char varname[SI_DEVPATH_BUFSZ + 32];
@@ -1667,7 +1957,7 @@ char *ai_getdevpathvar(si_t *sih, const char *name)
}
/* Get a variable, but only if it has a devpath prefix */
-int ai_getdevpathintvar(si_t *sih, const char *name)
+int ai_getdevpathintvar(struct si_pub *sih, const char *name)
{
#if defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS)
return getintvar(NULL, name);
@@ -1680,7 +1970,7 @@ int ai_getdevpathintvar(si_t *sih, const char *name)
#endif
}
-char *ai_getnvramflvar(si_t *sih, const char *name)
+char *ai_getnvramflvar(struct si_pub *sih, const char *name)
{
return getvar(NULL, name);
}
@@ -1690,7 +1980,8 @@ char *ai_getnvramflvar(si_t *sih, const char *name)
* len == 0 or var is NULL, var is still returned. On overflow, the
* first char will be set to '\0'.
*/
-static char *ai_devpathvar(si_t *sih, char *var, int len, const char *name)
+static char *ai_devpathvar(struct si_pub *sih, char *var, int len,
+ const char *name)
{
uint path_len;
@@ -1710,7 +2001,7 @@ static char *ai_devpathvar(si_t *sih, char *var, int len, const char *name)
}
/* return true if PCIE capability exists in the pci config space */
-static __used bool ai_ispcie(si_info_t *sii)
+static bool ai_ispcie(struct si_info *sii)
{
u8 cap_ptr;
@@ -1726,18 +2017,18 @@ static __used bool ai_ispcie(si_info_t *sii)
return true;
}
-bool ai_pci_war16165(si_t *sih)
+bool ai_pci_war16165(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
return PCI(sii) && (sih->buscorerev <= 10);
}
-void ai_pci_up(si_t *sih)
+void ai_pci_up(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
@@ -1754,9 +2045,9 @@ void ai_pci_up(si_t *sih)
}
/* Unconfigure and/or apply various WARs when system is going to sleep mode */
-void ai_pci_sleep(si_t *sih)
+void ai_pci_sleep(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
@@ -1764,9 +2055,9 @@ void ai_pci_sleep(si_t *sih)
}
/* Unconfigure and/or apply various WARs when going down */
-void ai_pci_down(si_t *sih)
+void ai_pci_down(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
@@ -1785,10 +2076,10 @@ void ai_pci_down(si_t *sih)
* Configure the pci core for pci client (NIC) action
* coremask is the bitvec of cores by index to be enabled.
*/
-void ai_pci_setup(si_t *sih, uint coremask)
+void ai_pci_setup(struct si_pub *sih, uint coremask)
{
- si_info_t *sii;
- struct sbpciregs *pciregs = NULL;
+ struct si_info *sii;
+ void *regs = NULL;
u32 siflag = 0, w;
uint idx = 0;
@@ -1805,7 +2096,7 @@ void ai_pci_setup(si_t *sih, uint coremask)
siflag = ai_flag(sih);
/* switch over to pci core */
- pciregs = ai_setcoreidx(sih, sii->pub.buscoreidx);
+ regs = ai_setcoreidx(sih, sii->pub.buscoreidx);
}
/*
@@ -1823,16 +2114,7 @@ void ai_pci_setup(si_t *sih, uint coremask)
}
if (PCI(sii)) {
- OR_REG(&pciregs->sbtopci2,
- (SBTOPCI_PREF | SBTOPCI_BURST));
- if (sii->pub.buscorerev >= 11) {
- OR_REG(&pciregs->sbtopci2,
- SBTOPCI_RC_READMULTI);
- w = R_REG(&pciregs->clkrun);
- W_REG(&pciregs->clkrun,
- (w | PCI_CLKRUN_DSBL));
- w = R_REG(&pciregs->clkrun);
- }
+ pcicore_pci_setup(sii->pch, regs);
/* switch back to previous core */
ai_setcoreidx(sih, idx);
@@ -1843,38 +2125,20 @@ void ai_pci_setup(si_t *sih, uint coremask)
* Fixup SROMless PCI device's configuration.
* The current core may be changed upon return.
*/
-int ai_pci_fixcfg(si_t *sih)
+int ai_pci_fixcfg(struct si_pub *sih)
{
- uint origidx, pciidx;
- struct sbpciregs *pciregs = NULL;
- sbpcieregs_t *pcieregs = NULL;
+ uint origidx;
void *regs = NULL;
- u16 val16, *reg16 = NULL;
- si_info_t *sii = SI_INFO(sih);
+ struct si_info *sii = SI_INFO(sih);
/* Fixup PI in SROM shadow area to enable the correct PCI core access */
/* save the current index */
origidx = ai_coreidx(&sii->pub);
/* check 'pi' is correct and fix it if not */
- if (sii->pub.buscoretype == PCIE_CORE_ID) {
- pcieregs = ai_setcore(&sii->pub, PCIE_CORE_ID, 0);
- regs = pcieregs;
- reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
- } else if (sii->pub.buscoretype == PCI_CORE_ID) {
- pciregs = ai_setcore(&sii->pub, PCI_CORE_ID, 0);
- regs = pciregs;
- reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
- }
- pciidx = ai_coreidx(&sii->pub);
- val16 = R_REG(reg16);
- if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16) pciidx) {
- val16 =
- (u16) (pciidx << SRSH_PI_SHIFT) | (val16 &
- ~SRSH_PI_MASK);
- W_REG(reg16, val16);
- }
+ regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0);
+ pcicore_fixcfg(sii->pch, regs);
/* restore the original index */
ai_setcoreidx(&sii->pub, origidx);
@@ -1884,7 +2148,7 @@ int ai_pci_fixcfg(si_t *sih)
}
/* mask&set gpiocontrol bits */
-u32 ai_gpiocontrol(si_t *sih, u32 mask, u32 val, u8 priority)
+u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
{
uint regoff;
@@ -1904,9 +2168,9 @@ u32 ai_gpiocontrol(si_t *sih, u32 mask, u32 val, u8 priority)
return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
}
-void ai_chipcontrl_epa4331(si_t *sih, bool on)
+void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
{
- si_info_t *sii;
+ struct si_info *sii;
chipcregs_t *cc;
uint origidx;
u32 val;
@@ -1938,9 +2202,9 @@ void ai_chipcontrl_epa4331(si_t *sih, bool on)
}
/* Enable BT-COEX & Ex-PA for 4313 */
-void ai_epa_4313war(si_t *sih)
+void ai_epa_4313war(struct si_pub *sih)
{
- si_info_t *sii;
+ struct si_info *sii;
chipcregs_t *cc;
uint origidx;
@@ -1957,10 +2221,10 @@ void ai_epa_4313war(si_t *sih)
}
/* check if the device is removed */
-bool ai_deviceremoved(si_t *sih)
+bool ai_deviceremoved(struct si_pub *sih)
{
u32 w;
- si_info_t *sii;
+ struct si_info *sii;
sii = SI_INFO(sih);
@@ -1974,10 +2238,10 @@ bool ai_deviceremoved(si_t *sih)
return false;
}
-bool ai_is_sprom_available(si_t *sih)
+bool ai_is_sprom_available(struct si_pub *sih)
{
if (sih->ccrev >= 31) {
- si_info_t *sii;
+ struct si_info *sii;
uint origidx;
chipcregs_t *cc;
u32 sromctrl;
@@ -1994,61 +2258,22 @@ bool ai_is_sprom_available(si_t *sih)
}
switch (sih->chip) {
- case BCM4329_CHIP_ID:
- return (sih->chipst & CST4329_SPROM_SEL) != 0;
- case BCM4319_CHIP_ID:
- return (sih->chipst & CST4319_SPROM_SEL) != 0;
- case BCM4336_CHIP_ID:
- return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
- case BCM4330_CHIP_ID:
- return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
case BCM4313_CHIP_ID:
return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
- case BCM4331_CHIP_ID:
- return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
default:
return true;
}
}
-bool ai_is_otp_disabled(si_t *sih)
+bool ai_is_otp_disabled(struct si_pub *sih)
{
switch (sih->chip) {
- case BCM4329_CHIP_ID:
- return (sih->chipst & CST4329_SPROM_OTP_SEL_MASK) ==
- CST4329_OTP_PWRDN;
- case BCM4319_CHIP_ID:
- return (sih->chipst & CST4319_SPROM_OTP_SEL_MASK) ==
- CST4319_OTP_PWRDN;
- case BCM4336_CHIP_ID:
- return (sih->chipst & CST4336_OTP_PRESENT) == 0;
- case BCM4330_CHIP_ID:
- return (sih->chipst & CST4330_OTP_PRESENT) == 0;
case BCM4313_CHIP_ID:
return (sih->chipst & CST4313_OTP_PRESENT) == 0;
/* These chips always have their OTP on */
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
- case BCM43421_CHIP_ID:
- case BCM43235_CHIP_ID:
- case BCM43236_CHIP_ID:
- case BCM43238_CHIP_ID:
- case BCM4331_CHIP_ID:
default:
return false;
}
}
-
-bool ai_is_otp_powered(si_t *sih)
-{
- if (PMUCTL_ENAB(sih))
- return si_pmu_is_otp_powered(sih);
- return true;
-}
-
-void ai_otp_power(si_t *sih, bool on)
-{
- if (PMUCTL_ENAB(sih))
- si_pmu_otp_power(sih, on);
- udelay(1000);
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/aiutils.h b/drivers/staging/brcm80211/brcmsmac/aiutils.h
index b98099eaa62..e245c278beb 100644
--- a/drivers/staging/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/staging/brcm80211/brcmsmac/aiutils.h
@@ -14,18 +14,10 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _aiutils_h_
-#define _aiutils_h_
-
-/* cpp contortions to concatenate w/arg prescan */
-#ifndef PAD
-#define _PADLINE(line) pad ## line
-#define _XSTR(line) _PADLINE(line)
-#define PAD _XSTR(__LINE__)
-#endif
+#ifndef _BRCM_AIUTILS_H_
+#define _BRCM_AIUTILS_H_
-/* Include the soci specific files */
-#include <aidmp.h>
+#include "types.h"
/*
* SOC Interconnect Address Map.
@@ -158,9 +150,7 @@
* maps all unused address ranges
*/
-/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
- * and chipcommon being the first core:
- */
+/* chipcommon being the first core: */
#define SI_CC_IDX 0
/* SOC Interconnect types (aka chip types) */
@@ -225,7 +215,70 @@
#define BISZ_BSSEND_IDX 6 /* 6: bss end */
#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */
-#define SI_INFO(sih) (si_info_t *)sih
+#define CC_SROM_OTP 0x800 /* SROM/OTP address space */
+
+/* gpiotimerval */
+#define GPIO_ONTIME_SHIFT 16
+
+/* Fields in clkdiv */
+#define CLKD_OTP 0x000f0000
+#define CLKD_OTP_SHIFT 16
+
+/* When Srom support present, fields in sromcontrol */
+#define SRC_START 0x80000000
+#define SRC_BUSY 0x80000000
+#define SRC_OPCODE 0x60000000
+#define SRC_OP_READ 0x00000000
+#define SRC_OP_WRITE 0x20000000
+#define SRC_OP_WRDIS 0x40000000
+#define SRC_OP_WREN 0x60000000
+#define SRC_OTPSEL 0x00000010
+#define SRC_LOCK 0x00000008
+#define SRC_SIZE_MASK 0x00000006
+#define SRC_SIZE_1K 0x00000000
+#define SRC_SIZE_4K 0x00000002
+#define SRC_SIZE_16K 0x00000004
+#define SRC_SIZE_SHIFT 1
+#define SRC_PRESENT 0x00000001
+
+/* 4330 chip-specific ChipStatus register bits */
+#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /* SDIO || gSPI */
+#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /* USB || USBDA */
+#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /* SDIO */
+#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /* gSPI */
+#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /* USB packet-oriented */
+#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /* USB Direct Access */
+#define CST4330_OTP_PRESENT 0x00000010
+#define CST4330_LPO_AUTODET_EN 0x00000020
+#define CST4330_ARMREMAP_0 0x00000040
+#define CST4330_SPROM_PRESENT 0x00000080 /* takes priority over OTP if both set */
+#define CST4330_ILPDIV_EN 0x00000100
+#define CST4330_LPO_SEL 0x00000200
+#define CST4330_RES_INIT_MODE_SHIFT 10
+#define CST4330_RES_INIT_MODE_MASK 0x00000c00
+#define CST4330_CBUCK_MODE_SHIFT 12
+#define CST4330_CBUCK_MODE_MASK 0x00003000
+#define CST4330_CBUCK_POWER_OK 0x00004000
+#define CST4330_BB_PLL_LOCKED 0x00008000
+
+/* Package IDs */
+#define BCM4329_289PIN_PKG_ID 0 /* 4329 289-pin package id */
+#define BCM4329_182PIN_PKG_ID 1 /* 4329N 182-pin package id */
+#define BCM4717_PKG_ID 9 /* 4717 package id */
+#define BCM4718_PKG_ID 10 /* 4718 package id */
+#define HDLSIM_PKG_ID 14 /* HDL simulator package id */
+#define HWSIM_PKG_ID 15 /* Hardware simulator package id */
+#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */
+
+/* these are router chips */
+#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
+#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */
+#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */
+#define BCM5356_CHIP_ID 0x5356 /* 5356 chipcommon chipid */
+#define BCM5357_CHIP_ID 0x5357 /* 5357 chipcommon chipid */
+
+
+#define SI_INFO(sih) ((struct si_info *)sih)
#define GOODCOREADDR(x, b) \
(((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
@@ -315,13 +368,6 @@ struct si_pub {
};
/*
- * for HIGH_ONLY driver, the si_t must be writable to allow states sync from
- * BMAC to HIGH driver for monolithic driver, it is readonly to prevent accident
- * change
- */
-typedef const struct si_pub si_t;
-
-/*
* Many of the routines below take an 'sih' handle as their first arg.
* Allocate this by calling si_attach(). Free it by calling si_detach().
* At any one time, the sih is logically focused on one particular si core
@@ -363,8 +409,6 @@ typedef const struct si_pub si_t;
#define SI_PCIDOWN 2
#define SI_PCIUP 3
-#define ISSIM_ENAB(sih) 0
-
/* PMU clock/power control */
#if defined(BCMPMUCTL)
#define PMUCTL_ENAB(sih) (BCMPMUCTL)
@@ -404,16 +448,16 @@ typedef u32(*si_intrsoff_t) (void *intr_arg);
typedef void (*si_intrsrestore_t) (void *intr_arg, u32 arg);
typedef bool(*si_intrsenabled_t) (void *intr_arg);
-typedef struct gpioh_item {
+struct gpioh_item {
void *arg;
bool level;
gpio_handler_t handler;
u32 event;
struct gpioh_item *next;
-} gpioh_item_t;
+};
/* misc si info needed by some of the routines */
-typedef struct si_info {
+struct si_info {
struct si_pub pub; /* back plane public state (must be first) */
void *pbus; /* handle to bus (pci/sdio/..) */
uint dev_coreid; /* the core provides driver functions */
@@ -424,10 +468,6 @@ typedef struct si_info {
void *pch; /* PCI/E core handle */
- gpioh_item_t *gpioh_head; /* GPIO event handlers list */
-
- bool memseg; /* flag to toggle MEM_SEG register */
-
char *vars;
uint varsz;
@@ -450,97 +490,95 @@ typedef struct si_info {
u32 cia[SI_MAXCORES]; /* erom cia entry for each core */
u32 cib[SI_MAXCORES]; /* erom cia entry for each core */
u32 oob_router; /* oob router registers for axi */
-} si_info_t;
+};
/* AMBA Interconnect exported externs */
-extern void ai_scan(si_t *sih, void *regs, uint devid);
-
-extern uint ai_flag(si_t *sih);
-extern void ai_setint(si_t *sih, int siflag);
-extern uint ai_coreidx(si_t *sih);
-extern uint ai_corevendor(si_t *sih);
-extern uint ai_corerev(si_t *sih);
-extern bool ai_iscoreup(si_t *sih);
-extern void *ai_setcoreidx(si_t *sih, uint coreidx);
-extern u32 ai_core_cflags(si_t *sih, u32 mask, u32 val);
-extern void ai_core_cflags_wo(si_t *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(si_t *sih, u32 mask, u32 val);
-extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask,
+extern void ai_scan(struct si_pub *sih, void *regs);
+
+extern uint ai_flag(struct si_pub *sih);
+extern void ai_setint(struct si_pub *sih, int siflag);
+extern uint ai_coreidx(struct si_pub *sih);
+extern uint ai_corevendor(struct si_pub *sih);
+extern uint ai_corerev(struct si_pub *sih);
+extern bool ai_iscoreup(struct si_pub *sih);
+extern void *ai_setcoreidx(struct si_pub *sih, uint coreidx);
+extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
+extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val);
+extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
+extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
uint val);
-extern void ai_core_reset(si_t *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(si_t *sih, u32 bits);
-extern int ai_numaddrspaces(si_t *sih);
-extern u32 ai_addrspace(si_t *sih, uint asidx);
-extern u32 ai_addrspacesize(si_t *sih, uint asidx);
-extern void ai_write_wrap_reg(si_t *sih, u32 offset, u32 val);
+extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
+extern void ai_core_disable(struct si_pub *sih, u32 bits);
+extern int ai_numaddrspaces(struct si_pub *sih);
+extern u32 ai_addrspace(struct si_pub *sih, uint asidx);
+extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx);
+extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val);
/* === exported functions === */
-extern si_t *ai_attach(uint pcidev, void *regs, uint bustype,
+extern struct si_pub *ai_attach(void *regs, uint bustype,
void *sdh, char **vars, uint *varsz);
-extern void ai_detach(si_t *sih);
-extern bool ai_pci_war16165(si_t *sih);
+extern void ai_detach(struct si_pub *sih);
+extern bool ai_pci_war16165(struct si_pub *sih);
-extern uint ai_coreid(si_t *sih);
-extern uint ai_corerev(si_t *sih);
-extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask,
+extern uint ai_coreid(struct si_pub *sih);
+extern uint ai_corerev(struct si_pub *sih);
+extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
uint val);
-extern void ai_write_wrapperreg(si_t *sih, u32 offset, u32 val);
-extern u32 ai_core_cflags(si_t *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(si_t *sih, u32 mask, u32 val);
-extern bool ai_iscoreup(si_t *sih);
-extern uint ai_findcoreidx(si_t *sih, uint coreid, uint coreunit);
-extern void *ai_setcoreidx(si_t *sih, uint coreidx);
-extern void *ai_setcore(si_t *sih, uint coreid, uint coreunit);
-extern void *ai_switch_core(si_t *sih, uint coreid, uint *origidx,
+extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val);
+extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
+extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
+extern bool ai_iscoreup(struct si_pub *sih);
+extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
+extern void *ai_setcoreidx(struct si_pub *sih, uint coreidx);
+extern void *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
+extern void *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
uint *intr_val);
-extern void ai_restore_core(si_t *sih, uint coreid, uint intr_val);
-extern void ai_core_reset(si_t *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(si_t *sih, u32 bits);
-extern u32 ai_alp_clock(si_t *sih);
-extern u32 ai_ilp_clock(si_t *sih);
-extern void ai_pci_setup(si_t *sih, uint coremask);
-extern void ai_setint(si_t *sih, int siflag);
-extern bool ai_backplane64(si_t *sih);
-extern void ai_register_intr_callback(si_t *sih, void *intrsoff_fn,
+extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val);
+extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
+extern void ai_core_disable(struct si_pub *sih, u32 bits);
+extern u32 ai_alp_clock(struct si_pub *sih);
+extern u32 ai_ilp_clock(struct si_pub *sih);
+extern void ai_pci_setup(struct si_pub *sih, uint coremask);
+extern void ai_setint(struct si_pub *sih, int siflag);
+extern bool ai_backplane64(struct si_pub *sih);
+extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
void *intrsrestore_fn,
void *intrsenabled_fn, void *intr_arg);
-extern void ai_deregister_intr_callback(si_t *sih);
-extern void ai_clkctl_init(si_t *sih);
-extern u16 ai_clkctl_fast_pwrup_delay(si_t *sih);
-extern bool ai_clkctl_cc(si_t *sih, uint mode);
-extern int ai_clkctl_xtal(si_t *sih, uint what, bool on);
-extern bool ai_deviceremoved(si_t *sih);
-extern u32 ai_gpiocontrol(si_t *sih, u32 mask, u32 val,
+extern void ai_deregister_intr_callback(struct si_pub *sih);
+extern void ai_clkctl_init(struct si_pub *sih);
+extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
+extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
+extern int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on);
+extern bool ai_deviceremoved(struct si_pub *sih);
+extern u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val,
u8 priority);
/* OTP status */
-extern bool ai_is_otp_disabled(si_t *sih);
-extern bool ai_is_otp_powered(si_t *sih);
-extern void ai_otp_power(si_t *sih, bool on);
+extern bool ai_is_otp_disabled(struct si_pub *sih);
/* SPROM availability */
-extern bool ai_is_sprom_available(si_t *sih);
+extern bool ai_is_sprom_available(struct si_pub *sih);
/*
* Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
* The returned path is NULL terminated and has trailing '/'.
* Return 0 on success, nonzero otherwise.
*/
-extern int ai_devpath(si_t *sih, char *path, int size);
+extern int ai_devpath(struct si_pub *sih, char *path, int size);
/* Read variable with prepending the devpath to the name */
-extern char *ai_getdevpathvar(si_t *sih, const char *name);
-extern int ai_getdevpathintvar(si_t *sih, const char *name);
+extern char *ai_getdevpathvar(struct si_pub *sih, const char *name);
+extern int ai_getdevpathintvar(struct si_pub *sih, const char *name);
-extern void ai_pci_sleep(si_t *sih);
-extern void ai_pci_down(si_t *sih);
-extern void ai_pci_up(si_t *sih);
-extern int ai_pci_fixcfg(si_t *sih);
+extern void ai_pci_sleep(struct si_pub *sih);
+extern void ai_pci_down(struct si_pub *sih);
+extern void ai_pci_up(struct si_pub *sih);
+extern int ai_pci_fixcfg(struct si_pub *sih);
-extern void ai_chipcontrl_epa4331(si_t *sih, bool on);
+extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
/* Enable Ex-PA for 4313 */
-extern void ai_epa_4313war(si_t *sih);
+extern void ai_epa_4313war(struct si_pub *sih);
-char *ai_getnvramflvar(si_t *sih, const char *name);
+char *ai_getnvramflvar(struct si_pub *sih, const char *name);
-#endif /* _aiutils_h_ */
+#endif /* _BRCM_AIUTILS_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_alloc.c b/drivers/staging/brcm80211/brcmsmac/alloc.c
index 82c64cd4486..7f8dd7b396b 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_alloc.c
+++ b/drivers/staging/brcm80211/brcmsmac/alloc.c
@@ -13,37 +13,21 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <aiutils.h>
-#include <wlioctl.h>
-#include <sbhnddma.h>
-
-#include "d11.h"
-#include "wlc_types.h"
-#include "wlc_cfg.h"
-#include "wlc_scb.h"
-#include "wlc_pub.h"
-#include "wlc_key.h"
-#include "wlc_alloc.h"
-#include "wl_dbg.h"
-#include "wlc_rate.h"
-#include "wlc_bsscfg.h"
-#include "phy/wlc_phy_hal.h"
-#include "wlc_channel.h"
-#include "wlc_main.h"
-
-static struct wlc_bsscfg *wlc_bsscfg_malloc(uint unit);
-static void wlc_bsscfg_mfree(struct wlc_bsscfg *cfg);
-static struct wlc_pub *wlc_pub_malloc(uint unit,
+
+#include <brcmu_utils.h>
+#include "types.h"
+#include "pub.h"
+#include "main.h"
+#include "alloc.h"
+
+static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit);
+static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg);
+static struct brcms_pub *brcms_c_pub_malloc(uint unit,
uint *err, uint devid);
-static void wlc_pub_mfree(struct wlc_pub *pub);
-static void wlc_tunables_init(wlc_tunables_t *tunables, uint devid);
+static void brcms_c_pub_mfree(struct brcms_pub *pub);
+static void brcms_c_tunables_init(struct brcms_tunables *tunables, uint devid);
-static void wlc_tunables_init(wlc_tunables_t *tunables, uint devid)
+static void brcms_c_tunables_init(struct brcms_tunables *tunables, uint devid)
{
tunables->ntxd = NTXD;
tunables->nrxd = NRXD;
@@ -52,33 +36,33 @@ static void wlc_tunables_init(wlc_tunables_t *tunables, uint devid)
tunables->maxscb = MAXSCB;
tunables->ampdunummpdu = AMPDU_NUM_MPDU;
tunables->maxpktcb = MAXPKTCB;
- tunables->maxucodebss = WLC_MAX_UCODE_BSS;
- tunables->maxucodebss4 = WLC_MAX_UCODE_BSS4;
+ tunables->maxucodebss = BRCMS_MAX_UCODE_BSS;
+ tunables->maxucodebss4 = BRCMS_MAX_UCODE_BSS4;
tunables->maxbss = MAXBSS;
- tunables->datahiwat = WLC_DATAHIWAT;
- tunables->ampdudatahiwat = WLC_AMPDUDATAHIWAT;
+ tunables->datahiwat = BRCMS_DATAHIWAT;
+ tunables->ampdudatahiwat = BRCMS_AMPDUDATAHIWAT;
tunables->rxbnd = RXBND;
tunables->txsbnd = TXSBND;
}
-static struct wlc_pub *wlc_pub_malloc(uint unit, uint *err, uint devid)
+static struct brcms_pub *brcms_c_pub_malloc(uint unit, uint *err, uint devid)
{
- struct wlc_pub *pub;
+ struct brcms_pub *pub;
- pub = kzalloc(sizeof(struct wlc_pub), GFP_ATOMIC);
+ pub = kzalloc(sizeof(struct brcms_pub), GFP_ATOMIC);
if (pub == NULL) {
*err = 1001;
goto fail;
}
- pub->tunables = kzalloc(sizeof(wlc_tunables_t), GFP_ATOMIC);
+ pub->tunables = kzalloc(sizeof(struct brcms_tunables), GFP_ATOMIC);
if (pub->tunables == NULL) {
*err = 1028;
goto fail;
}
/* need to init the tunables now */
- wlc_tunables_init(pub->tunables, devid);
+ brcms_c_tunables_init(pub->tunables, devid);
pub->multicast = kzalloc(ETH_ALEN * MAXMULTILIST, GFP_ATOMIC);
if (pub->multicast == NULL) {
@@ -89,11 +73,11 @@ static struct wlc_pub *wlc_pub_malloc(uint unit, uint *err, uint devid)
return pub;
fail:
- wlc_pub_mfree(pub);
+ brcms_c_pub_mfree(pub);
return NULL;
}
-static void wlc_pub_mfree(struct wlc_pub *pub)
+static void brcms_c_pub_mfree(struct brcms_pub *pub)
{
if (pub == NULL)
return;
@@ -103,26 +87,26 @@ static void wlc_pub_mfree(struct wlc_pub *pub)
kfree(pub);
}
-static struct wlc_bsscfg *wlc_bsscfg_malloc(uint unit)
+static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit)
{
- struct wlc_bsscfg *cfg;
+ struct brcms_bss_cfg *cfg;
- cfg = kzalloc(sizeof(struct wlc_bsscfg), GFP_ATOMIC);
+ cfg = kzalloc(sizeof(struct brcms_bss_cfg), GFP_ATOMIC);
if (cfg == NULL)
goto fail;
- cfg->current_bss = kzalloc(sizeof(wlc_bss_info_t), GFP_ATOMIC);
+ cfg->current_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
if (cfg->current_bss == NULL)
goto fail;
return cfg;
fail:
- wlc_bsscfg_mfree(cfg);
+ brcms_c_bsscfg_mfree(cfg);
return NULL;
}
-static void wlc_bsscfg_mfree(struct wlc_bsscfg *cfg)
+static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg)
{
if (cfg == NULL)
return;
@@ -132,8 +116,8 @@ static void wlc_bsscfg_mfree(struct wlc_bsscfg *cfg)
kfree(cfg);
}
-static void wlc_bsscfg_ID_assign(struct wlc_info *wlc,
- struct wlc_bsscfg *bsscfg)
+static void brcms_c_bsscfg_ID_assign(struct brcms_c_info *wlc,
+ struct brcms_bss_cfg *bsscfg)
{
bsscfg->ID = wlc->next_bsscfg_ID;
wlc->next_bsscfg_ID++;
@@ -142,29 +126,27 @@ static void wlc_bsscfg_ID_assign(struct wlc_info *wlc,
/*
* The common driver entry routine. Error codes should be unique
*/
-struct wlc_info *wlc_attach_malloc(uint unit, uint *err, uint devid)
+struct brcms_c_info *brcms_c_attach_malloc(uint unit, uint *err, uint devid)
{
- struct wlc_info *wlc;
+ struct brcms_c_info *wlc;
- wlc = kzalloc(sizeof(struct wlc_info), GFP_ATOMIC);
+ wlc = kzalloc(sizeof(struct brcms_c_info), GFP_ATOMIC);
if (wlc == NULL) {
*err = 1002;
goto fail;
}
- wlc->hwrxoff = WL_HWRXOFF;
-
- /* allocate struct wlc_pub state structure */
- wlc->pub = wlc_pub_malloc(unit, err, devid);
+ /* allocate struct brcms_c_pub state structure */
+ wlc->pub = brcms_c_pub_malloc(unit, err, devid);
if (wlc->pub == NULL) {
*err = 1003;
goto fail;
}
wlc->pub->wlc = wlc;
- /* allocate struct wlc_hw_info state structure */
+ /* allocate struct brcms_hardware state structure */
- wlc->hw = kzalloc(sizeof(struct wlc_hw_info), GFP_ATOMIC);
+ wlc->hw = kzalloc(sizeof(struct brcms_hardware), GFP_ATOMIC);
if (wlc->hw == NULL) {
*err = 1005;
goto fail;
@@ -172,7 +154,7 @@ struct wlc_info *wlc_attach_malloc(uint unit, uint *err, uint devid)
wlc->hw->wlc = wlc;
wlc->hw->bandstate[0] =
- kzalloc(sizeof(struct wlc_hwband) * MAXBANDS, GFP_ATOMIC);
+ kzalloc(sizeof(struct brcms_hw_band) * MAXBANDS, GFP_ATOMIC);
if (wlc->hw->bandstate[0] == NULL) {
*err = 1006;
goto fail;
@@ -180,68 +162,62 @@ struct wlc_info *wlc_attach_malloc(uint unit, uint *err, uint devid)
int i;
for (i = 1; i < MAXBANDS; i++) {
- wlc->hw->bandstate[i] = (struct wlc_hwband *)
+ wlc->hw->bandstate[i] = (struct brcms_hw_band *)
((unsigned long)wlc->hw->bandstate[0] +
- (sizeof(struct wlc_hwband) * i));
+ (sizeof(struct brcms_hw_band) * i));
}
}
wlc->modulecb =
- kzalloc(sizeof(struct modulecb) * WLC_MAXMODULES, GFP_ATOMIC);
+ kzalloc(sizeof(struct modulecb) * BRCMS_MAXMODULES, GFP_ATOMIC);
if (wlc->modulecb == NULL) {
*err = 1009;
goto fail;
}
- wlc->default_bss = kzalloc(sizeof(wlc_bss_info_t), GFP_ATOMIC);
+ wlc->default_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
if (wlc->default_bss == NULL) {
*err = 1010;
goto fail;
}
- wlc->cfg = wlc_bsscfg_malloc(unit);
+ wlc->cfg = brcms_c_bsscfg_malloc(unit);
if (wlc->cfg == NULL) {
*err = 1011;
goto fail;
}
- wlc_bsscfg_ID_assign(wlc, wlc->cfg);
-
- wlc->pkt_callback = kzalloc(sizeof(struct pkt_cb) *
- (wlc->pub->tunables->maxpktcb + 1),
- GFP_ATOMIC);
- if (wlc->pkt_callback == NULL) {
- *err = 1013;
- goto fail;
- }
+ brcms_c_bsscfg_ID_assign(wlc, wlc->cfg);
wlc->wsec_def_keys[0] =
- kzalloc(sizeof(wsec_key_t) * WLC_DEFAULT_KEYS, GFP_ATOMIC);
+ kzalloc(sizeof(struct wsec_key) * BRCMS_DEFAULT_KEYS,
+ GFP_ATOMIC);
if (wlc->wsec_def_keys[0] == NULL) {
*err = 1015;
goto fail;
} else {
int i;
- for (i = 1; i < WLC_DEFAULT_KEYS; i++) {
- wlc->wsec_def_keys[i] = (wsec_key_t *)
+ for (i = 1; i < BRCMS_DEFAULT_KEYS; i++) {
+ wlc->wsec_def_keys[i] = (struct wsec_key *)
((unsigned long)wlc->wsec_def_keys[0] +
- (sizeof(wsec_key_t) * i));
+ (sizeof(struct wsec_key) * i));
}
}
- wlc->protection = kzalloc(sizeof(struct wlc_protection), GFP_ATOMIC);
+ wlc->protection = kzalloc(sizeof(struct brcms_protection),
+ GFP_ATOMIC);
if (wlc->protection == NULL) {
*err = 1016;
goto fail;
}
- wlc->stf = kzalloc(sizeof(struct wlc_stf), GFP_ATOMIC);
+ wlc->stf = kzalloc(sizeof(struct brcms_stf), GFP_ATOMIC);
if (wlc->stf == NULL) {
*err = 1017;
goto fail;
}
wlc->bandstate[0] =
- kzalloc(sizeof(struct wlcband)*MAXBANDS, GFP_ATOMIC);
+ kzalloc(sizeof(struct brcms_band)*MAXBANDS, GFP_ATOMIC);
if (wlc->bandstate[0] == NULL) {
*err = 1025;
goto fail;
@@ -249,20 +225,20 @@ struct wlc_info *wlc_attach_malloc(uint unit, uint *err, uint devid)
int i;
for (i = 1; i < MAXBANDS; i++) {
- wlc->bandstate[i] =
- (struct wlcband *) ((unsigned long)wlc->bandstate[0]
- + (sizeof(struct wlcband)*i));
+ wlc->bandstate[i] = (struct brcms_band *)
+ ((unsigned long)wlc->bandstate[0]
+ + (sizeof(struct brcms_band)*i));
}
}
- wlc->corestate = kzalloc(sizeof(struct wlccore), GFP_ATOMIC);
+ wlc->corestate = kzalloc(sizeof(struct brcms_core), GFP_ATOMIC);
if (wlc->corestate == NULL) {
*err = 1026;
goto fail;
}
wlc->corestate->macstat_snapshot =
- kzalloc(sizeof(macstat_t), GFP_ATOMIC);
+ kzalloc(sizeof(struct macstat), GFP_ATOMIC);
if (wlc->corestate->macstat_snapshot == NULL) {
*err = 1027;
goto fail;
@@ -271,20 +247,19 @@ struct wlc_info *wlc_attach_malloc(uint unit, uint *err, uint devid)
return wlc;
fail:
- wlc_detach_mfree(wlc);
+ brcms_c_detach_mfree(wlc);
return NULL;
}
-void wlc_detach_mfree(struct wlc_info *wlc)
+void brcms_c_detach_mfree(struct brcms_c_info *wlc)
{
if (wlc == NULL)
return;
- wlc_bsscfg_mfree(wlc->cfg);
- wlc_pub_mfree(wlc->pub);
+ brcms_c_bsscfg_mfree(wlc->cfg);
+ brcms_c_pub_mfree(wlc->pub);
kfree(wlc->modulecb);
kfree(wlc->default_bss);
- kfree(wlc->pkt_callback);
kfree(wlc->wsec_def_keys[0]);
kfree(wlc->protection);
kfree(wlc->stf);
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_alloc.h b/drivers/staging/brcm80211/brcmsmac/alloc.h
index 95f951eb2b2..f465d304303 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_alloc.h
+++ b/drivers/staging/brcm80211/brcmsmac/alloc.h
@@ -14,5 +14,6 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-extern struct wlc_info *wlc_attach_malloc(uint unit, uint *err, uint devid);
-extern void wlc_detach_mfree(struct wlc_info *wlc);
+extern struct brcms_c_info *brcms_c_attach_malloc(uint unit, uint *err,
+ uint devid);
+extern void brcms_c_detach_mfree(struct brcms_c_info *wlc);
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c b/drivers/staging/brcm80211/brcmsmac/ampdu.c
index 85ad7009605..fcaf61e3b13 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.c
+++ b/drivers/staging/brcm80211/brcmsmac/ampdu.c
@@ -13,30 +13,14 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
#include <net/mac80211.h>
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <aiutils.h>
-#include <wlioctl.h>
-#include <sbhnddma.h>
-#include <hnddma.h>
-#include <d11.h>
-
-#include "wlc_types.h"
-#include "wlc_cfg.h"
-#include "wlc_rate.h"
-#include "wlc_scb.h"
-#include "wlc_pub.h"
-#include "wlc_key.h"
-#include "phy/wlc_phy_hal.h"
-#include "wlc_antsel.h"
-#include "wl_export.h"
-#include "wl_dbg.h"
-#include "wlc_channel.h"
-#include "wlc_main.h"
-#include "wlc_ampdu.h"
+#include "rate.h"
+#include "scb.h"
+#include "phy/phy_hal.h"
+#include "antsel.h"
+#include "main.h"
+#include "ampdu.h"
#define AMPDU_MAX_MPDU 32 /* max number of mpdus in an ampdu */
#define AMPDU_NUM_MPDU_LEGACY 16 /* max number of mpdus in an ampdu to a legacy */
@@ -76,7 +60,7 @@
* This allows to maintain a specific state independently of
* how often and/or when the wlc counters are updated.
*/
-typedef struct wlc_fifo_info {
+struct brcms_fifo_info {
u16 ampdu_pld_size; /* number of bytes to be pre-loaded */
u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1]; /* per-mcs max # of mpdus in an ampdu */
u16 prev_txfunfl; /* num of underflows last read from the HW macstats counter */
@@ -84,11 +68,11 @@ typedef struct wlc_fifo_info {
u32 accum_txampdu; /* num of tx ampdu since we modified pld params */
u32 prev_txampdu; /* previous reading of tx ampdu */
u32 dmaxferrate; /* estimated dma avg xfer rate in kbits/sec */
-} wlc_fifo_info_t;
+};
/* AMPDU module specific state */
struct ampdu_info {
- struct wlc_info *wlc; /* pointer to main wlc structure */
+ struct brcms_c_info *wlc; /* pointer to main wlc structure */
int scb_handle; /* scb cubby handle to retrieve data from scb */
u8 ini_enable[AMPDU_MAX_SCB_TID]; /* per-tid initiator enable/disable of ampdu */
u8 ba_tx_wsize; /* Tx ba window size (in pdu) */
@@ -110,7 +94,8 @@ struct ampdu_info {
u32 tx_max_funl; /* underflows should be kept such that
* (tx_max_funfl*underflows) < tx frames
*/
- wlc_fifo_info_t fifo_tb[NUM_FFPLD_FIFO]; /* table of fifo infos */
+ /* table of fifo infos */
+ struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO];
};
@@ -126,35 +111,36 @@ struct cb_del_ampdu_pars {
#define SCB_AMPDU_CUBBY(ampdu, scb) (&(scb->scb_ampdu))
#define SCB_AMPDU_INI(scb_ampdu, tid) (&(scb_ampdu->ini[tid]))
-static void wlc_ffpld_init(struct ampdu_info *ampdu);
-static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int f);
-static void wlc_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f);
+static void brcms_c_ffpld_init(struct ampdu_info *ampdu);
+static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int f);
+static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f);
+
+static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu,
+ u8 dur);
+static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
+ struct scb *scb);
+static void brcms_c_scb_ampdu_update_config_all(struct ampdu_info *ampdu);
-static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(struct ampdu_info *ampdu,
- scb_ampdu_t *scb_ampdu,
- u8 tid, bool override);
-static void ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur);
-static void scb_ampdu_update_config(struct ampdu_info *ampdu, struct scb *scb);
-static void scb_ampdu_update_config_all(struct ampdu_info *ampdu);
+#define brcms_c_ampdu_txflowcontrol(a, b, c) do {} while (0)
-#define wlc_ampdu_txflowcontrol(a, b, c) do {} while (0)
+static void
+brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu,
+ struct scb *scb,
+ struct sk_buff *p, struct tx_status *txs,
+ u32 frmtxstatus, u32 frmtxstatus2);
-static void wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu,
- struct scb *scb,
- struct sk_buff *p, tx_status_t *txs,
- u32 frmtxstatus, u32 frmtxstatus2);
-static bool wlc_ampdu_cap(struct ampdu_info *ampdu);
-static int wlc_ampdu_set(struct ampdu_info *ampdu, bool on);
+static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu);
+static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on);
-struct ampdu_info *wlc_ampdu_attach(struct wlc_info *wlc)
+struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
{
struct ampdu_info *ampdu;
int i;
ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC);
if (!ampdu) {
- wiphy_err(wlc->wiphy, "wl%d: wlc_ampdu_attach: out of mem\n",
- wlc->pub->unit);
+ wiphy_err(wlc->wiphy, "wl%d: brcms_c_ampdu_attach: out of mem"
+ "\n", wlc->pub->unit);
return NULL;
}
ampdu->wlc = wlc;
@@ -178,7 +164,7 @@ struct ampdu_info *wlc_ampdu_attach(struct wlc_info *wlc)
ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
/* bump max ampdu rcv size to 64k for all 11n devices except 4321A0 and 4321A1 */
- if (WLCISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2))
+ if (BRCMS_ISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2))
ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_32K;
else
ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_64K;
@@ -190,18 +176,18 @@ struct ampdu_info *wlc_ampdu_attach(struct wlc_info *wlc)
ampdu->rr_retry_limit_tid[i] = ampdu->rr_retry_limit;
}
- ampdu_update_max_txlen(ampdu, ampdu->dur);
+ brcms_c_scb_ampdu_update_max_txlen(ampdu, ampdu->dur);
ampdu->mfbr = false;
/* try to set ampdu to the default value */
- wlc_ampdu_set(ampdu, wlc->pub->_ampdu);
+ brcms_c_ampdu_set(ampdu, wlc->pub->_ampdu);
ampdu->tx_max_funl = FFPLD_TX_MAX_UNFL;
- wlc_ffpld_init(ampdu);
+ brcms_c_ffpld_init(ampdu);
return ampdu;
}
-void wlc_ampdu_detach(struct ampdu_info *ampdu)
+void brcms_c_ampdu_detach(struct ampdu_info *ampdu)
{
int i;
@@ -213,13 +199,14 @@ void wlc_ampdu_detach(struct ampdu_info *ampdu)
kfree(ampdu->ini_free[i]);
}
- wlc_module_unregister(ampdu->wlc->pub, "ampdu", ampdu);
+ brcms_c_module_unregister(ampdu->wlc->pub, "ampdu", ampdu);
kfree(ampdu);
}
-static void scb_ampdu_update_config(struct ampdu_info *ampdu, struct scb *scb)
+static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
+ struct scb *scb)
{
- scb_ampdu_t *scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
+ struct scb_ampdu *scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
int i;
scb_ampdu->max_pdu = (u8) ampdu->wlc->pub->tunables->ampdunummpdu;
@@ -236,24 +223,24 @@ static void scb_ampdu_update_config(struct ampdu_info *ampdu, struct scb *scb)
scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu, AMPDU_SCB_MAX_RELEASE);
- if (scb_ampdu->max_rxlen)
- scb_ampdu->release =
- min_t(u8, scb_ampdu->release, scb_ampdu->max_rxlen / 1600);
+ if (scb_ampdu->max_rx_ampdu_bytes)
+ scb_ampdu->release = min_t(u8, scb_ampdu->release,
+ scb_ampdu->max_rx_ampdu_bytes / 1600);
scb_ampdu->release = min(scb_ampdu->release,
ampdu->fifo_tb[TX_AC_BE_FIFO].
mcs2ampdu_table[FFPLD_MAX_MCS]);
}
-static void scb_ampdu_update_config_all(struct ampdu_info *ampdu)
+static void brcms_c_scb_ampdu_update_config_all(struct ampdu_info *ampdu)
{
- scb_ampdu_update_config(ampdu, ampdu->wlc->pub->global_scb);
+ brcms_c_scb_ampdu_update_config(ampdu, ampdu->wlc->pub->global_scb);
}
-static void wlc_ffpld_init(struct ampdu_info *ampdu)
+static void brcms_c_ffpld_init(struct ampdu_info *ampdu)
{
int i, j;
- wlc_fifo_info_t *fifo;
+ struct brcms_fifo_info *fifo;
for (j = 0; j < NUM_FFPLD_FIFO; j++) {
fifo = (ampdu->fifo_tb + j);
@@ -274,7 +261,7 @@ static void wlc_ffpld_init(struct ampdu_info *ampdu)
* Return 1 if pre-loading not active, -1 if not an underflow event,
* 0 if pre-loading module took care of the event.
*/
-static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int fid)
+static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
{
struct ampdu_info *ampdu = wlc->ampdu;
u32 phy_rate = MCS_RATE(FFPLD_MAX_MCS, true, false);
@@ -283,14 +270,14 @@ static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int fid)
u32 current_ampdu_cnt = 0;
u16 max_pld_size;
u32 new_txunfl;
- wlc_fifo_info_t *fifo = (ampdu->fifo_tb + fid);
+ struct brcms_fifo_info *fifo = (ampdu->fifo_tb + fid);
uint xmtfifo_sz;
u16 cur_txunfl;
/* return if we got here for a different reason than underflows */
- cur_txunfl =
- wlc_read_shm(wlc,
- M_UCODE_MACSTAT + offsetof(macstat_t, txfunfl[fid]));
+ cur_txunfl = brcms_c_read_shm(wlc,
+ M_UCODE_MACSTAT +
+ offsetof(struct macstat, txfunfl[fid]));
new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
if (new_txunfl == 0) {
BCMMSG(wlc->wiphy, "TX status FRAG set but no tx underflows\n");
@@ -302,9 +289,8 @@ static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int fid)
return 1;
/* check if fifo is big enough */
- if (wlc_xmtfifo_sz_get(wlc, fid, &xmtfifo_sz)) {
+ if (brcms_c_xmtfifo_sz_get(wlc, fid, &xmtfifo_sz))
return -1;
- }
if ((TXFIFO_SIZE_UNIT * (u32) xmtfifo_sz) <= ampdu->ffpld_rsvd)
return 1;
@@ -356,7 +342,7 @@ static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int fid)
fifo->ampdu_pld_size = max_pld_size;
/* update scb release size */
- scb_ampdu_update_config_all(ampdu);
+ brcms_c_scb_ampdu_update_config_all(ampdu);
/*
compute a new dma xfer rate for max_mpdu @ max mcs.
@@ -383,22 +369,22 @@ static int wlc_ffpld_check_txfunfl(struct wlc_info *wlc, int fid)
fifo->mcs2ampdu_table[FFPLD_MAX_MCS] -= 1;
/* recompute the table */
- wlc_ffpld_calc_mcs2ampdu_table(ampdu, fid);
+ brcms_c_ffpld_calc_mcs2ampdu_table(ampdu, fid);
/* update scb release size */
- scb_ampdu_update_config_all(ampdu);
+ brcms_c_scb_ampdu_update_config_all(ampdu);
}
}
fifo->accum_txfunfl = 0;
return 0;
}
-static void wlc_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
+static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
{
int i;
u32 phy_rate, dma_rate, tmp;
u8 max_mpdu;
- wlc_fifo_info_t *fifo = (ampdu->fifo_tb + f);
+ struct brcms_fifo_info *fifo = (ampdu->fifo_tb + f);
/* recompute the dma rate */
/* note : we divide/multiply by 100 to avoid integer overflows */
@@ -425,47 +411,53 @@ static void wlc_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
}
}
-static void
-wlc_ampdu_agg(struct ampdu_info *ampdu, struct scb *scb, struct sk_buff *p,
- uint prec)
+void
+brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
+ u8 ba_wsize, /* negotiated ba window size (in pdu) */
+ uint max_rx_ampdu_bytes) /* from ht_cap in beacon */
{
- scb_ampdu_t *scb_ampdu;
- scb_ampdu_tid_ini_t *ini;
- u8 tid = (u8) (p->priority);
-
+ struct scb_ampdu *scb_ampdu;
+ struct scb_ampdu_tid_ini *ini;
+ struct ampdu_info *ampdu = wlc->ampdu;
+ struct scb *scb = wlc->pub->global_scb;
scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
- /* initialize initiator on first packet; sends addba req */
- ini = SCB_AMPDU_INI(scb_ampdu, tid);
- if (ini->magic != INI_MAGIC) {
- ini = wlc_ampdu_init_tid_ini(ampdu, scb_ampdu, tid, false);
+ if (!ampdu->ini_enable[tid]) {
+ wiphy_err(ampdu->wlc->wiphy, "%s: Rejecting tid %d\n",
+ __func__, tid);
+ return;
}
- return;
+
+ ini = SCB_AMPDU_INI(scb_ampdu, tid);
+ ini->tid = tid;
+ ini->scb = scb_ampdu->scb;
+ ini->ba_wsize = ba_wsize;
+ scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes;
}
int
-wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
+brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
struct sk_buff **pdu, int prec)
{
- struct wlc_info *wlc;
+ struct brcms_c_info *wlc;
struct sk_buff *p, *pkt[AMPDU_MAX_MPDU];
u8 tid, ndelim;
int err = 0;
- u8 preamble_type = WLC_GF_PREAMBLE;
- u8 fbr_preamble_type = WLC_GF_PREAMBLE;
- u8 rts_preamble_type = WLC_LONG_PREAMBLE;
- u8 rts_fbr_preamble_type = WLC_LONG_PREAMBLE;
+ u8 preamble_type = BRCMS_GF_PREAMBLE;
+ u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
+ u8 rts_preamble_type = BRCMS_LONG_PREAMBLE;
+ u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE;
bool rr = true, fbr = false;
uint i, count = 0, fifo, seg_cnt = 0;
u16 plen, len, seq = 0, mcl, mch, index, frameid, dma_len = 0;
- u32 ampdu_len, maxlen = 0;
- d11txh_t *txh = NULL;
+ u32 ampdu_len, max_ampdu_bytes = 0;
+ struct d11txh *txh = NULL;
u8 *plcp;
struct ieee80211_hdr *h;
struct scb *scb;
- scb_ampdu_t *scb_ampdu;
- scb_ampdu_tid_ini_t *ini;
+ struct scb_ampdu *scb_ampdu;
+ struct scb_ampdu_tid_ini *ini;
u8 mcs = 0;
bool use_rts = false, use_cts = false;
ratespec_t rspec = 0, rspec_fallback = 0;
@@ -473,7 +465,7 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
struct ieee80211_rts *rts;
u8 rr_retry_limit;
- wlc_fifo_info_t *f;
+ struct brcms_fifo_info *f;
bool fbr_iscck;
struct ieee80211_tx_info *tx_info;
u16 qlen;
@@ -493,16 +485,13 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
/* Let pressure continue to build ... */
qlen = pktq_plen(&qi->q, prec);
- if (ini->tx_in_transit > 0 && qlen < scb_ampdu->max_pdu) {
+ if (ini->tx_in_transit > 0 &&
+ qlen < min(scb_ampdu->max_pdu, ini->ba_wsize)) {
+ /* Collect multiple MPDU's to be sent in the next AMPDU */
return -EBUSY;
}
- wlc_ampdu_agg(ampdu, scb, p, tid);
-
- if (wlc->block_datafifo) {
- wiphy_err(wiphy, "%s: Fifo blocked\n", __func__);
- return -EBUSY;
- }
+ /* at this point we intend to transmit an AMPDU */
rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
ampdu_len = 0;
dma_len = 0;
@@ -513,7 +502,7 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
txrate = tx_info->status.rates;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
- err = wlc_prep_pdu(wlc, p, &fifo);
+ err = brcms_c_prep_pdu(wlc, p, &fifo);
} else {
wiphy_err(wiphy, "%s: AMPDU flag is off!\n", __func__);
*pdu = NULL;
@@ -523,7 +512,7 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
if (err) {
if (err == -EBUSY) {
- wiphy_err(wiphy, "wl%d: wlc_sendampdu: "
+ wiphy_err(wiphy, "wl%d: sendampdu: "
"prep_xdu retry; seq 0x%x\n",
wlc->pub->unit, seq);
*pdu = p;
@@ -531,14 +520,14 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
}
/* error in the packet; reject it */
- wiphy_err(wiphy, "wl%d: wlc_sendampdu: prep_xdu "
+ wiphy_err(wiphy, "wl%d: sendampdu: prep_xdu "
"rejected; seq 0x%x\n", wlc->pub->unit, seq);
*pdu = NULL;
break;
}
/* pkt is good to be aggregated */
- txh = (d11txh_t *) p->data;
+ txh = (struct d11txh *) p->data;
plcp = (u8 *) (txh + 1);
h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
@@ -562,8 +551,8 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
}
/* extract the length info */
- len = fbr_iscck ? WLC_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
- : WLC_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
+ len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
+ : BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
/* retrieve null delimiter count */
ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
@@ -598,7 +587,7 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
len = roundup(len, 4);
ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
- dma_len += (u16) bcm_pkttotlen(p);
+ dma_len += (u16) brcmu_pkttotlen(p);
BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d"
" seg_cnt %d null delim %d\n",
@@ -627,19 +616,14 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
sgi = PLCP3_ISSGI(plcp3) ? 1 : 0;
mcs = plcp0 & ~MIMO_PLCP_40MHZ;
- maxlen =
- min(scb_ampdu->max_rxlen,
+ max_ampdu_bytes =
+ min(scb_ampdu->max_rx_ampdu_bytes,
ampdu->max_txlen[mcs][is40][sgi]);
- /* XXX Fix me to honor real max_rxlen */
- /* can fix this as soon as ampdu_action() in mac80211.h
- * gets extra u8buf_size par */
- maxlen = 64 * 1024;
-
if (is40)
mimo_ctlchbw =
- CHSPEC_SB_UPPER(WLC_BAND_PI_RADIO_CHANSPEC)
- ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
+ CHSPEC_SB_UPPER(BRCMS_BAND_PI_RADIO_CHANSPEC)
+ ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
/* rebuild the rspec and rspec_fallback */
rspec = RSPEC_MIMORATE;
@@ -663,11 +647,11 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
if (use_rts || use_cts) {
rts_rspec =
- wlc_rspec_to_rts_rspec(wlc, rspec, false,
- mimo_ctlchbw);
+ brcms_c_rspec_to_rts_rspec(wlc,
+ rspec, false, mimo_ctlchbw);
rts_rspec_fallback =
- wlc_rspec_to_rts_rspec(wlc, rspec_fallback,
- false, mimo_ctlchbw);
+ brcms_c_rspec_to_rts_rspec(wlc,
+ rspec_fallback, false, mimo_ctlchbw);
}
}
@@ -693,14 +677,12 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
((u8) (p->priority) == tid)) {
- plen =
- bcm_pkttotlen(p) + AMPDU_MAX_MPDU_OVERHEAD;
+ plen = brcmu_pkttotlen(p) +
+ AMPDU_MAX_MPDU_OVERHEAD;
plen = max(scb_ampdu->min_len, plen);
- if ((plen + ampdu_len) > maxlen) {
+ if ((plen + ampdu_len) > max_ampdu_bytes) {
p = NULL;
- wiphy_err(wiphy, "%s: Bogus plen #1\n",
- __func__);
continue;
}
@@ -711,7 +693,7 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
p = NULL;
continue;
}
- p = bcm_pktq_pdeq(&qi->q, prec);
+ p = brcmu_pktq_pdeq(&qi->q, prec);
} else {
p = NULL;
}
@@ -722,7 +704,7 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
if (count) {
/* patch up the last txh */
- txh = (d11txh_t *) pkt[count - 1]->data;
+ txh = (struct d11txh *) pkt[count - 1]->data;
mcl = le16_to_cpu(txh->MacTxControlLow);
mcl &= ~TXC_AMPDU_MASK;
mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
@@ -735,30 +717,31 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
/* remove the pad len from last mpdu */
fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
- len = fbr_iscck ? WLC_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
- : WLC_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
+ len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
+ : BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
ampdu_len -= roundup(len, 4) - len;
/* patch up the first txh & plcp */
- txh = (d11txh_t *) pkt[0]->data;
+ txh = (struct d11txh *) pkt[0]->data;
plcp = (u8 *) (txh + 1);
- WLC_SET_MIMO_PLCP_LEN(plcp, ampdu_len);
+ BRCMS_SET_MIMO_PLCP_LEN(plcp, ampdu_len);
/* mark plcp to indicate ampdu */
- WLC_SET_MIMO_PLCP_AMPDU(plcp);
+ BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
/* reset the mixed mode header durations */
if (txh->MModeLen) {
u16 mmodelen =
- wlc_calc_lsig_len(wlc, rspec, ampdu_len);
+ brcms_c_calc_lsig_len(wlc, rspec, ampdu_len);
txh->MModeLen = cpu_to_le16(mmodelen);
- preamble_type = WLC_MM_PREAMBLE;
+ preamble_type = BRCMS_MM_PREAMBLE;
}
if (txh->MModeFbrLen) {
u16 mmfbrlen =
- wlc_calc_lsig_len(wlc, rspec_fallback, ampdu_len);
+ brcms_c_calc_lsig_len(wlc, rspec_fallback,
+ ampdu_len);
txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
- fbr_preamble_type = WLC_MM_PREAMBLE;
+ fbr_preamble_type = BRCMS_MM_PREAMBLE;
}
/* set the preload length */
@@ -776,19 +759,19 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
rts = (struct ieee80211_rts *)&txh->rts_frame;
if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
TXC_PREAMBLE_RTS_MAIN_SHORT)
- rts_preamble_type = WLC_SHORT_PREAMBLE;
+ rts_preamble_type = BRCMS_SHORT_PREAMBLE;
if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
TXC_PREAMBLE_RTS_FB_SHORT)
- rts_fbr_preamble_type = WLC_SHORT_PREAMBLE;
+ rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
durid =
- wlc_compute_rtscts_dur(wlc, use_cts, rts_rspec,
+ brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
rspec, rts_preamble_type,
preamble_type, ampdu_len,
true);
rts->duration = cpu_to_le16(durid);
- durid = wlc_compute_rtscts_dur(wlc, use_cts,
+ durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
rts_rspec_fallback,
rspec_fallback,
rts_fbr_preamble_type,
@@ -805,8 +788,8 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
if (fbr) {
mch |= TXC_AMPDU_FBR;
txh->MacTxControlHigh = cpu_to_le16(mch);
- WLC_SET_MIMO_PLCP_AMPDU(plcp);
- WLC_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
+ BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
+ BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
}
BCMMSG(wlc->wiphy, "wl%d: count %d ampdu_len %d\n",
@@ -819,7 +802,7 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
"TXFID_RATE_PROBE_MASK!?\n", __func__);
}
for (i = 0; i < count; i++)
- wlc_txfifo(wlc, fifo, pkt[i], i == (count - 1),
+ brcms_c_txfifo(wlc, fifo, pkt[i], i == (count - 1),
ampdu->txpkt_weight);
}
@@ -828,12 +811,12 @@ wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
}
void
-wlc_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
- struct sk_buff *p, tx_status_t *txs)
+brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+ struct sk_buff *p, struct tx_status *txs)
{
- scb_ampdu_t *scb_ampdu;
- struct wlc_info *wlc = ampdu->wlc;
- scb_ampdu_tid_ini_t *ini;
+ struct scb_ampdu *scb_ampdu;
+ struct brcms_c_info *wlc = ampdu->wlc;
+ struct scb_ampdu_tid_ini *ini;
u32 s1 = 0, s2 = 0;
struct ieee80211_tx_info *tx_info;
@@ -861,31 +844,32 @@ wlc_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
if (likely(scb)) {
scb_ampdu = SCB_AMPDU_CUBBY(ampdu, scb);
ini = SCB_AMPDU_INI(scb_ampdu, p->priority);
- wlc_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
+ brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
} else {
/* loop through all pkts and free */
u8 queue = txs->frameid & TXFID_QUEUE_MASK;
- d11txh_t *txh;
+ struct d11txh *txh;
u16 mcl;
while (p) {
tx_info = IEEE80211_SKB_CB(p);
- txh = (d11txh_t *) p->data;
+ txh = (struct d11txh *) p->data;
mcl = le16_to_cpu(txh->MacTxControlLow);
- bcm_pkt_buf_free_skb(p);
+ brcmu_pkt_buf_free_skb(p);
/* break out if last packet of ampdu */
if (((mcl & TXC_AMPDU_MASK) >> TXC_AMPDU_SHIFT) ==
TXC_AMPDU_LAST)
break;
p = GETNEXTTXP(wlc, queue);
}
- wlc_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
+ brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
}
- wlc_ampdu_txflowcontrol(wlc, scb_ampdu, ini);
+ brcms_c_ampdu_txflowcontrol(wlc, scb_ampdu, ini);
}
static void
-rate_status(struct wlc_info *wlc, struct ieee80211_tx_info *tx_info,
- tx_status_t *txs, u8 mcs)
+brcms_c_ampdu_rate_status(struct brcms_c_info *wlc,
+ struct ieee80211_tx_info *tx_info,
+ struct tx_status *txs, u8 mcs)
{
struct ieee80211_tx_rate *txrate = tx_info->status.rates;
int i;
@@ -900,15 +884,15 @@ rate_status(struct wlc_info *wlc, struct ieee80211_tx_info *tx_info,
#define SHORTNAME "AMPDU status"
static void
-wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
- struct sk_buff *p, tx_status_t *txs,
+brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
+ struct sk_buff *p, struct tx_status *txs,
u32 s1, u32 s2)
{
- scb_ampdu_t *scb_ampdu;
- struct wlc_info *wlc = ampdu->wlc;
- scb_ampdu_tid_ini_t *ini;
+ struct scb_ampdu *scb_ampdu;
+ struct brcms_c_info *wlc = ampdu->wlc;
+ struct scb_ampdu_tid_ini *ini;
u8 bitmap[8], queue, tid;
- d11txh_t *txh;
+ struct d11txh *txh;
u8 *plcp;
struct ieee80211_hdr *h;
u16 seq, start_seq = 0, bindex, index, mcl;
@@ -974,9 +958,9 @@ wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
wlc->default_bss->chanspec));
} else {
if (supr_status != TX_STATUS_SUPR_FRAG)
- wiphy_err(wiphy, "%s: wlc_ampdu_dotx"
- "status:supr_status 0x%x\n",
- __func__, supr_status);
+ wiphy_err(wiphy, "%s:"
+ "supr_status 0x%x\n",
+ __func__, supr_status);
}
/* no need to retry for badch; will fail again */
if (supr_status == TX_STATUS_SUPR_BADCH ||
@@ -988,29 +972,29 @@ wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
/* if there were underflows, but pre-loading is not active,
notify rate adaptation.
*/
- if (wlc_ffpld_check_txfunfl(wlc, prio2fifo[tid])
- > 0) {
+ if (brcms_c_ffpld_check_txfunfl(wlc,
+ prio2fifo[tid]) > 0) {
tx_error = true;
}
}
} else if (txs->phyerr) {
update_rate = false;
- wiphy_err(wiphy, "wl%d: wlc_ampdu_dotxstatus: tx phy "
+ wiphy_err(wiphy, "wl%d: ampdu tx phy "
"error (0x%x)\n", wlc->pub->unit,
txs->phyerr);
if (WL_ERROR_ON()) {
- bcm_prpkt("txpkt (AMPDU)", p);
- wlc_print_txdesc((d11txh_t *) p->data);
+ brcmu_prpkt("txpkt (AMPDU)", p);
+ brcms_c_print_txdesc((struct d11txh *) p->data);
}
- wlc_print_txstatus(txs);
+ brcms_c_print_txstatus(txs);
}
}
/* loop through all pkts and retry if not acked */
while (p) {
tx_info = IEEE80211_SKB_CB(p);
- txh = (d11txh_t *) p->data;
+ txh = (struct d11txh *) p->data;
mcl = le16_to_cpu(txh->MacTxControlLow);
plcp = (u8 *) (txh + 1);
h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
@@ -1037,7 +1021,8 @@ wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
/* ampdu_ack_len: number of acked aggregated frames */
/* ampdu_len: number of aggregated frames */
- rate_status(wlc, tx_info, txs, mcs);
+ brcms_c_ampdu_rate_status(wlc, tx_info, txs,
+ mcs);
tx_info->flags |= IEEE80211_TX_STAT_ACK;
tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
tx_info->status.ampdu_ack_len =
@@ -1060,9 +1045,10 @@ wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
ini->txretry[index]++;
ini->tx_in_transit--;
/* Use high prededence for retransmit to give some punch */
- /* wlc_txq_enq(wlc, scb, p, WLC_PRIO_TO_PREC(tid)); */
- wlc_txq_enq(wlc, scb, p,
- WLC_PRIO_TO_HI_PREC(tid));
+ /* brcms_c_txq_enq(wlc, scb, p,
+ * BRCMS_PRIO_TO_PREC(tid)); */
+ brcms_c_txq_enq(wlc, scb, p,
+ BRCMS_PRIO_TO_HI_PREC(tid));
} else {
/* Retry timeout */
ini->tx_in_transit--;
@@ -1089,38 +1075,17 @@ wlc_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
p = GETNEXTTXP(wlc, queue);
}
- wlc_send_q(wlc);
+ brcms_c_send_q(wlc);
/* update rate state */
- antselid = wlc_antsel_antsel2id(wlc->asi, mimoantsel);
+ antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
- wlc_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
-}
-
-/* initialize the initiator code for tid */
-static scb_ampdu_tid_ini_t *wlc_ampdu_init_tid_ini(struct ampdu_info *ampdu,
- scb_ampdu_t *scb_ampdu,
- u8 tid, bool override)
-{
- scb_ampdu_tid_ini_t *ini;
-
- /* check for per-tid control of ampdu */
- if (!ampdu->ini_enable[tid]) {
- wiphy_err(ampdu->wlc->wiphy, "%s: Rejecting tid %d\n",
- __func__, tid);
- return NULL;
- }
-
- ini = SCB_AMPDU_INI(scb_ampdu, tid);
- ini->tid = tid;
- ini->scb = scb_ampdu->scb;
- ini->magic = INI_MAGIC;
- return ini;
+ brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
}
-static int wlc_ampdu_set(struct ampdu_info *ampdu, bool on)
+static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on)
{
- struct wlc_info *wlc = ampdu->wlc;
+ struct brcms_c_info *wlc = ampdu->wlc;
wlc->pub->_ampdu = false;
@@ -1130,7 +1095,7 @@ static int wlc_ampdu_set(struct ampdu_info *ampdu, bool on)
"nmode enabled\n", wlc->pub->unit);
return -ENOTSUPP;
}
- if (!wlc_ampdu_cap(ampdu)) {
+ if (!brcms_c_ampdu_cap(ampdu)) {
wiphy_err(ampdu->wlc->wiphy, "wl%d: device not "
"ampdu capable\n", wlc->pub->unit);
return -ENOTSUPP;
@@ -1141,15 +1106,15 @@ static int wlc_ampdu_set(struct ampdu_info *ampdu, bool on)
return 0;
}
-static bool wlc_ampdu_cap(struct ampdu_info *ampdu)
+static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu)
{
- if (WLC_PHY_11N_CAP(ampdu->wlc->band))
+ if (BRCMS_PHY_11N_CAP(ampdu->wlc->band))
return true;
else
return false;
}
-static void ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
+static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
{
u32 rate, mcs;
@@ -1170,34 +1135,35 @@ static void ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
}
}
-void wlc_ampdu_macaddr_upd(struct wlc_info *wlc)
+void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc)
{
char template[T_RAM_ACCESS_SZ * 2];
/* driver needs to write the ta in the template; ta is at offset 16 */
memset(template, 0, sizeof(template));
memcpy(template, wlc->pub->cur_etheraddr, ETH_ALEN);
- wlc_write_template_ram(wlc, (T_BA_TPL_BASE + 16), (T_RAM_ACCESS_SZ * 2),
- template);
+ brcms_c_write_template_ram(wlc, (T_BA_TPL_BASE + 16),
+ (T_RAM_ACCESS_SZ * 2),
+ template);
}
-bool wlc_aggregatable(struct wlc_info *wlc, u8 tid)
+bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid)
{
return wlc->ampdu->ini_enable[tid];
}
-void wlc_ampdu_shm_upd(struct ampdu_info *ampdu)
+void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu)
{
- struct wlc_info *wlc = ampdu->wlc;
+ struct brcms_c_info *wlc = ampdu->wlc;
/* Extend ucode internal watchdog timer to match larger received frames */
if ((ampdu->rx_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) ==
IEEE80211_HT_MAX_AMPDU_64K) {
- wlc_write_shm(wlc, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
- wlc_write_shm(wlc, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX);
+ brcms_c_write_shm(wlc, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
+ brcms_c_write_shm(wlc, M_WATCHDOG_8TU, WATCHDOG_8TU_MAX);
} else {
- wlc_write_shm(wlc, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF);
- wlc_write_shm(wlc, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF);
+ brcms_c_write_shm(wlc, M_MIMO_MAXSYM, MIMO_MAXSYM_DEF);
+ brcms_c_write_shm(wlc, M_WATCHDOG_8TU, WATCHDOG_8TU_DEF);
}
}
@@ -1235,10 +1201,10 @@ static void dma_cb_fn_ampdu(void *txi, void *arg_a)
* When a remote party is no longer available for ampdu communication, any
* pending tx ampdu packets in the driver have to be flushed.
*/
-void wlc_ampdu_flush(struct wlc_info *wlc,
+void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
struct ieee80211_sta *sta, u16 tid)
{
- struct wlc_txq_info *qi = wlc->pkt_queue;
+ struct brcms_txq_info *qi = wlc->pkt_queue;
struct pktq *pq = &qi->q;
int prec;
struct cb_del_ampdu_pars ampdu_pars;
@@ -1246,8 +1212,8 @@ void wlc_ampdu_flush(struct wlc_info *wlc,
ampdu_pars.sta = sta;
ampdu_pars.tid = tid;
for (prec = 0; prec < pq->num_prec; prec++) {
- bcm_pktq_pflush(pq, prec, true, cb_del_ampdu_pkt,
+ brcmu_pktq_pflush(pq, prec, true, cb_del_ampdu_pkt,
(void *)&ampdu_pars);
}
- wlc_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
+ brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
}
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_types.h b/drivers/staging/brcm80211/brcmsmac/ampdu.h
index df6e04c6ac5..421f4ba7c63 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_types.h
+++ b/drivers/staging/brcm80211/brcmsmac/ampdu.h
@@ -14,24 +14,17 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _wlc_types_h_
-#define _wlc_types_h_
+#ifndef _BRCM_AMPDU_H_
+#define _BRCM_AMPDU_H_
-/* forward declarations */
+extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
+extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
+extern int brcms_c_sendampdu(struct ampdu_info *ampdu,
+ struct brcms_txq_info *qi,
+ struct sk_buff **aggp, int prec);
+extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+ struct sk_buff *p, struct tx_status *txs);
+extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
+extern void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
-struct wlc_info;
-struct wlc_hw_info;
-struct wlc_if;
-struct wl_if;
-struct ampdu_info;
-struct antsel_info;
-struct bmac_pmq;
-
-struct d11init;
-
-#ifndef _hnddma_pub_
-#define _hnddma_pub_
-struct hnddma_pub;
-#endif /* _hnddma_pub_ */
-
-#endif /* _wlc_types_h_ */
+#endif /* _BRCM_AMPDU_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_antsel.c b/drivers/staging/brcm80211/brcmsmac/antsel.c
index 111ef32b7ac..c4e76c093ae 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_antsel.c
+++ b/drivers/staging/brcm80211/brcmsmac/antsel.c
@@ -14,40 +14,29 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <wlc_cfg.h>
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <bcmnvram.h>
-#include <aiutils.h>
-#include <bcmdevs.h>
-#include <sbhnddma.h>
-#include <wlioctl.h>
-
-#include "d11.h"
-#include "wlc_rate.h"
-#include "wlc_key.h"
-#include "wlc_scb.h"
-#include "wlc_pub.h"
-#include "wl_dbg.h"
-#include "phy/wlc_phy_hal.h"
-#include "wlc_bmac.h"
-#include "wlc_channel.h"
-#include "wlc_main.h"
-#include "wl_export.h"
-#include "wlc_phy_shim.h"
-#include "wlc_antsel.h"
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "types.h"
+#include "bmac.h"
+#include "main.h"
+#include "phy_shim.h"
+#include "antsel.h"
+
+#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */
+#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */
+#define ANT_SELCFG_TX_UNICAST 0 /* unicast tx antenna configuration */
+#define ANT_SELCFG_RX_UNICAST 1 /* unicast rx antenna configuration */
+#define ANT_SELCFG_TX_DEF 2 /* default tx antenna configuration */
+#define ANT_SELCFG_RX_DEF 3 /* default rx antenna configuration */
/* useful macros */
-#define WLC_ANTSEL_11N_0(ant) ((((ant) & ANT_SELCFG_MASK) >> 4) & 0xf)
-#define WLC_ANTSEL_11N_1(ant) (((ant) & ANT_SELCFG_MASK) & 0xf)
-#define WLC_ANTIDX_11N(ant) (((WLC_ANTSEL_11N_0(ant)) << 2) + (WLC_ANTSEL_11N_1(ant)))
-#define WLC_ANT_ISAUTO_11N(ant) (((ant) & ANT_SELCFG_AUTO) == ANT_SELCFG_AUTO)
-#define WLC_ANTSEL_11N(ant) ((ant) & ANT_SELCFG_MASK)
+#define BRCMS_ANTSEL_11N_0(ant) ((((ant) & ANT_SELCFG_MASK) >> 4) & 0xf)
+#define BRCMS_ANTSEL_11N_1(ant) (((ant) & ANT_SELCFG_MASK) & 0xf)
+#define BRCMS_ANTIDX_11N(ant) (((BRCMS_ANTSEL_11N_0(ant)) << 2) +\
+ (BRCMS_ANTSEL_11N_1(ant)))
+#define BRCMS_ANT_ISAUTO_11N(ant) (((ant) & ANT_SELCFG_AUTO) == ANT_SELCFG_AUTO)
+#define BRCMS_ANTSEL_11N(ant) ((ant) & ANT_SELCFG_MASK)
/* antenna switch */
/* defines for no boardlevel antenna diversity */
@@ -62,11 +51,12 @@
#define ANT_SELCFG_DEF_2x4 0x02 /* default antenna configuration */
/* static functions */
-static int wlc_antsel_cfgupd(struct antsel_info *asi, wlc_antselcfg_t *antsel);
-static u8 wlc_antsel_id2antcfg(struct antsel_info *asi, u8 id);
-static u16 wlc_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg);
-static void wlc_antsel_init_cfg(struct antsel_info *asi,
- wlc_antselcfg_t *antsel,
+static int brcms_c_antsel_cfgupd(struct antsel_info *asi,
+ struct brcms_antselcfg *antsel);
+static u8 brcms_c_antsel_id2antcfg(struct antsel_info *asi, u8 id);
+static u16 brcms_c_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg);
+static void brcms_c_antsel_init_cfg(struct antsel_info *asi,
+ struct brcms_antselcfg *antsel,
bool auto_sel);
const u16 mimo_2x4_div_antselpat_tbl[] = {
@@ -93,14 +83,14 @@ const u8 mimo_2x3_div_antselid_tbl[16] = {
0, 0, 0, 0, 0, 0, 0, 0 /* pat to antselid */
};
-struct antsel_info *wlc_antsel_attach(struct wlc_info *wlc)
+struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
{
struct antsel_info *asi;
asi = kzalloc(sizeof(struct antsel_info), GFP_ATOMIC);
if (!asi) {
- wiphy_err(wlc->wiphy, "wl%d: wlc_antsel_attach: out of mem\n",
- wlc->pub->unit);
+ wiphy_err(wlc->wiphy, "wl%d: brcms_c_antsel_attach: out of "
+ "mem\n", wlc->pub->unit);
return NULL;
}
@@ -129,7 +119,7 @@ struct antsel_info *wlc_antsel_attach(struct wlc_info *wlc)
asi->antsel_avail = false;
} else {
asi->antsel_avail = false;
- wiphy_err(wlc->wiphy, "wlc_antsel_attach: 2o3 "
+ wiphy_err(wlc->wiphy, "antsel_attach: 2o3 "
"board cfg invalid\n");
}
break;
@@ -148,30 +138,30 @@ struct antsel_info *wlc_antsel_attach(struct wlc_info *wlc)
}
/* Set the antenna selection type for the low driver */
- wlc_bmac_antsel_type_set(wlc->hw, asi->antsel_type);
+ brcms_b_antsel_type_set(wlc->hw, asi->antsel_type);
/* Init (auto/manual) antenna selection */
- wlc_antsel_init_cfg(asi, &asi->antcfg_11n, true);
- wlc_antsel_init_cfg(asi, &asi->antcfg_cur, true);
+ brcms_c_antsel_init_cfg(asi, &asi->antcfg_11n, true);
+ brcms_c_antsel_init_cfg(asi, &asi->antcfg_cur, true);
return asi;
}
-void wlc_antsel_detach(struct antsel_info *asi)
+void brcms_c_antsel_detach(struct antsel_info *asi)
{
kfree(asi);
}
-void wlc_antsel_init(struct antsel_info *asi)
+void brcms_c_antsel_init(struct antsel_info *asi)
{
if ((asi->antsel_type == ANTSEL_2x3) ||
(asi->antsel_type == ANTSEL_2x4))
- wlc_antsel_cfgupd(asi, &asi->antcfg_11n);
+ brcms_c_antsel_cfgupd(asi, &asi->antcfg_11n);
}
/* boardlevel antenna selection: init antenna selection structure */
static void
-wlc_antsel_init_cfg(struct antsel_info *asi, wlc_antselcfg_t *antsel,
+brcms_c_antsel_init_cfg(struct antsel_info *asi, struct brcms_antselcfg *antsel,
bool auto_sel)
{
if (asi->antsel_type == ANTSEL_2x3) {
@@ -202,7 +192,7 @@ wlc_antsel_init_cfg(struct antsel_info *asi, wlc_antselcfg_t *antsel,
}
void
-wlc_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
+brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
u8 antselid, u8 fbantselid, u8 *antcfg,
u8 *fbantcfg)
{
@@ -222,8 +212,8 @@ wlc_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
} else {
ant = asi->antcfg_11n.ant_config[ANT_SELCFG_TX_UNICAST];
if ((ant & ANT_SELCFG_AUTO) == ANT_SELCFG_AUTO) {
- *antcfg = wlc_antsel_id2antcfg(asi, antselid);
- *fbantcfg = wlc_antsel_id2antcfg(asi, fbantselid);
+ *antcfg = brcms_c_antsel_id2antcfg(asi, antselid);
+ *fbantcfg = brcms_c_antsel_id2antcfg(asi, fbantselid);
} else {
*antcfg =
asi->antcfg_11n.ant_config[ANT_SELCFG_TX_UNICAST];
@@ -234,7 +224,7 @@ wlc_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
}
/* boardlevel antenna selection: convert mimo_antsel (ucode interface) to id */
-u8 wlc_antsel_antsel2id(struct antsel_info *asi, u16 antsel)
+u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel)
{
u8 antselid = 0;
@@ -253,7 +243,7 @@ u8 wlc_antsel_antsel2id(struct antsel_info *asi, u16 antsel)
}
/* boardlevel antenna selection: convert id to ant_cfg */
-static u8 wlc_antsel_id2antcfg(struct antsel_info *asi, u8 id)
+static u8 brcms_c_antsel_id2antcfg(struct antsel_info *asi, u8 id)
{
u8 antcfg = ANT_SELCFG_DEF_2x2;
@@ -272,9 +262,9 @@ static u8 wlc_antsel_id2antcfg(struct antsel_info *asi, u8 id)
}
/* boardlevel antenna selection: convert ant_cfg to mimo_antsel (ucode interface) */
-static u16 wlc_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg)
+static u16 brcms_c_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg)
{
- u8 idx = WLC_ANTIDX_11N(WLC_ANTSEL_11N(ant_cfg));
+ u8 idx = BRCMS_ANTIDX_11N(BRCMS_ANTSEL_11N(ant_cfg));
u16 mimo_antsel = 0;
if (asi->antsel_type == ANTSEL_2x4) {
@@ -292,9 +282,10 @@ static u16 wlc_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg)
}
/* boardlevel antenna selection: ucode interface control */
-static int wlc_antsel_cfgupd(struct antsel_info *asi, wlc_antselcfg_t *antsel)
+static int brcms_c_antsel_cfgupd(struct antsel_info *asi,
+ struct brcms_antselcfg *antsel)
{
- struct wlc_info *wlc = asi->wlc;
+ struct brcms_c_info *wlc = asi->wlc;
u8 ant_cfg;
u16 mimo_antsel;
@@ -302,8 +293,8 @@ static int wlc_antsel_cfgupd(struct antsel_info *asi, wlc_antselcfg_t *antsel)
* (aka default TX)
*/
ant_cfg = antsel->ant_config[ANT_SELCFG_TX_DEF];
- mimo_antsel = wlc_antsel_antcfg2antsel(asi, ant_cfg);
- wlc_write_shm(wlc, M_MIMO_ANTSEL_TXDFLT, mimo_antsel);
+ mimo_antsel = brcms_c_antsel_antcfg2antsel(asi, ant_cfg);
+ brcms_c_write_shm(wlc, M_MIMO_ANTSEL_TXDFLT, mimo_antsel);
/* Update driver stats for currently selected default tx/rx antenna config */
asi->antcfg_cur.ant_config[ANT_SELCFG_TX_DEF] = ant_cfg;
@@ -311,8 +302,8 @@ static int wlc_antsel_cfgupd(struct antsel_info *asi, wlc_antselcfg_t *antsel)
* (aka default RX)
*/
ant_cfg = antsel->ant_config[ANT_SELCFG_RX_DEF];
- mimo_antsel = wlc_antsel_antcfg2antsel(asi, ant_cfg);
- wlc_write_shm(wlc, M_MIMO_ANTSEL_RXDFLT, mimo_antsel);
+ mimo_antsel = brcms_c_antsel_antcfg2antsel(asi, ant_cfg);
+ brcms_c_write_shm(wlc, M_MIMO_ANTSEL_RXDFLT, mimo_antsel);
/* Update driver stats for currently selected default tx/rx antenna config */
asi->antcfg_cur.ant_config[ANT_SELCFG_RX_DEF] = ant_cfg;
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_antsel.h b/drivers/staging/brcm80211/brcmsmac/antsel.h
index 2470c73fc4e..97ea3881a8e 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_antsel.h
+++ b/drivers/staging/brcm80211/brcmsmac/antsel.h
@@ -14,16 +14,16 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _wlc_antsel_h_
-#define _wlc_antsel_h_
+#ifndef _BRCM_ANTSEL_H_
+#define _BRCM_ANTSEL_H_
-extern struct antsel_info *wlc_antsel_attach(struct wlc_info *wlc);
-extern void wlc_antsel_detach(struct antsel_info *asi);
-extern void wlc_antsel_init(struct antsel_info *asi);
-extern void wlc_antsel_antcfg_get(struct antsel_info *asi, bool usedef,
+extern struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
+extern void brcms_c_antsel_detach(struct antsel_info *asi);
+extern void brcms_c_antsel_init(struct antsel_info *asi);
+extern void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef,
bool sel,
u8 id, u8 fbid, u8 *antcfg,
u8 *fbantcfg);
-extern u8 wlc_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
+extern u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
-#endif /* _wlc_antsel_h_ */
+#endif /* _BRCM_ANTSEL_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/bcmotp.c b/drivers/staging/brcm80211/brcmsmac/bcmotp.c
deleted file mode 100644
index d09628b5a88..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/bcmotp.c
+++ /dev/null
@@ -1,936 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/crc-ccitt.h>
-
-#include <bcmdefs.h>
-#include <bcmdevs.h>
-#include <bcmutils.h>
-#include <aiutils.h>
-#include <hndsoc.h>
-#include <sbchipc.h>
-#include <bcmotp.h>
-
-/*
- * There are two different OTP controllers so far:
- * 1. new IPX OTP controller: chipc 21, >=23
- * 2. older HND OTP controller: chipc 12, 17, 22
- *
- * Define BCMHNDOTP to include support for the HND OTP controller.
- * Define BCMIPXOTP to include support for the IPX OTP controller.
- *
- * NOTE 1: More than one may be defined
- * NOTE 2: If none are defined, the default is to include them all.
- */
-
-#if !defined(BCMHNDOTP) && !defined(BCMIPXOTP)
-#define BCMHNDOTP 1
-#define BCMIPXOTP 1
-#endif
-
-#define OTPTYPE_HND(ccrev) ((ccrev) < 21 || (ccrev) == 22)
-#define OTPTYPE_IPX(ccrev) ((ccrev) == 21 || (ccrev) >= 23)
-
-#define OTPP_TRIES 10000000 /* # of tries for OTPP */
-
-#ifdef BCMIPXOTP
-#define MAXNUMRDES 9 /* Maximum OTP redundancy entries */
-#endif
-
-/* OTP common function type */
-typedef int (*otp_status_t) (void *oh);
-typedef int (*otp_size_t) (void *oh);
-typedef void *(*otp_init_t) (si_t *sih);
-typedef u16(*otp_read_bit_t) (void *oh, chipcregs_t *cc, uint off);
-typedef int (*otp_read_region_t) (si_t *sih, int region, u16 *data,
- uint *wlen);
-typedef int (*otp_nvread_t) (void *oh, char *data, uint *len);
-
-/* OTP function struct */
-typedef struct otp_fn_s {
- otp_size_t size;
- otp_read_bit_t read_bit;
- otp_init_t init;
- otp_read_region_t read_region;
- otp_nvread_t nvread;
- otp_status_t status;
-} otp_fn_t;
-
-typedef struct {
- uint ccrev; /* chipc revision */
- otp_fn_t *fn; /* OTP functions */
- si_t *sih; /* Saved sb handle */
-
-#ifdef BCMIPXOTP
- /* IPX OTP section */
- u16 wsize; /* Size of otp in words */
- u16 rows; /* Geometry */
- u16 cols; /* Geometry */
- u32 status; /* Flag bits (lock/prog/rv).
- * (Reflected only when OTP is power cycled)
- */
- u16 hwbase; /* hardware subregion offset */
- u16 hwlim; /* hardware subregion boundary */
- u16 swbase; /* software subregion offset */
- u16 swlim; /* software subregion boundary */
- u16 fbase; /* fuse subregion offset */
- u16 flim; /* fuse subregion boundary */
- int otpgu_base; /* offset to General Use Region */
-#endif /* BCMIPXOTP */
-
-#ifdef BCMHNDOTP
- /* HND OTP section */
- uint size; /* Size of otp in bytes */
- uint hwprot; /* Hardware protection bits */
- uint signvalid; /* Signature valid bits */
- int boundary; /* hw/sw boundary */
-#endif /* BCMHNDOTP */
-} otpinfo_t;
-
-static otpinfo_t otpinfo;
-
-/*
- * IPX OTP Code
- *
- * Exported functions:
- * ipxotp_status()
- * ipxotp_size()
- * ipxotp_init()
- * ipxotp_read_bit()
- * ipxotp_read_region()
- * ipxotp_nvread()
- *
- */
-
-#ifdef BCMIPXOTP
-
-#define HWSW_RGN(rgn) (((rgn) == OTP_HW_RGN) ? "h/w" : "s/w")
-
-/* OTP layout */
-/* CC revs 21, 24 and 27 OTP General Use Region word offset */
-#define REVA4_OTPGU_BASE 12
-
-/* CC revs 23, 25, 26, 28 and above OTP General Use Region word offset */
-#define REVB8_OTPGU_BASE 20
-
-/* CC rev 36 OTP General Use Region word offset */
-#define REV36_OTPGU_BASE 12
-
-/* Subregion word offsets in General Use region */
-#define OTPGU_HSB_OFF 0
-#define OTPGU_SFB_OFF 1
-#define OTPGU_CI_OFF 2
-#define OTPGU_P_OFF 3
-#define OTPGU_SROM_OFF 4
-
-/* Flag bit offsets in General Use region */
-#define OTPGU_HWP_OFF 60
-#define OTPGU_SWP_OFF 61
-#define OTPGU_CIP_OFF 62
-#define OTPGU_FUSEP_OFF 63
-#define OTPGU_CIP_MSK 0x4000
-#define OTPGU_P_MSK 0xf000
-#define OTPGU_P_SHIFT (OTPGU_HWP_OFF % 16)
-
-/* OTP Size */
-#define OTP_SZ_FU_324 ((roundup(324, 8))/8) /* 324 bits */
-#define OTP_SZ_FU_288 (288/8) /* 288 bits */
-#define OTP_SZ_FU_216 (216/8) /* 216 bits */
-#define OTP_SZ_FU_72 (72/8) /* 72 bits */
-#define OTP_SZ_CHECKSUM (16/8) /* 16 bits */
-#define OTP4315_SWREG_SZ 178 /* 178 bytes */
-#define OTP_SZ_FU_144 (144/8) /* 144 bits */
-
-static int ipxotp_status(void *oh)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
- return (int)(oi->status);
-}
-
-/* Return size in bytes */
-static int ipxotp_size(void *oh)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
- return (int)oi->wsize * 2;
-}
-
-static u16 ipxotp_otpr(void *oh, chipcregs_t *cc, uint wn)
-{
- otpinfo_t *oi;
-
- oi = (otpinfo_t *) oh;
-
- return R_REG(&cc->sromotp[wn]);
-}
-
-static u16 ipxotp_read_bit(void *oh, chipcregs_t *cc, uint off)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
- uint k, row, col;
- u32 otpp, st;
-
- row = off / oi->cols;
- col = off % oi->cols;
-
- otpp = OTPP_START_BUSY |
- ((OTPPOC_READ << OTPP_OC_SHIFT) & OTPP_OC_MASK) |
- ((row << OTPP_ROW_SHIFT) & OTPP_ROW_MASK) |
- ((col << OTPP_COL_SHIFT) & OTPP_COL_MASK);
- W_REG(&cc->otpprog, otpp);
-
- for (k = 0;
- ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
- && (k < OTPP_TRIES); k++)
- ;
- if (k >= OTPP_TRIES) {
- return 0xffff;
- }
- if (st & OTPP_READERR) {
- return 0xffff;
- }
- st = (st & OTPP_VALUE_MASK) >> OTPP_VALUE_SHIFT;
-
- return (int)st;
-}
-
-/* Calculate max HW/SW region byte size by subtracting fuse region and checksum size,
- * osizew is oi->wsize (OTP size - GU size) in words
- */
-static int ipxotp_max_rgnsz(si_t *sih, int osizew)
-{
- int ret = 0;
-
- switch (sih->chip) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
- break;
- case BCM4313_CHIP_ID:
- ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
- break;
- default:
- break; /* Don't know about this chip */
- }
-
- return ret;
-}
-
-static void _ipxotp_init(otpinfo_t *oi, chipcregs_t *cc)
-{
- uint k;
- u32 otpp, st;
-
- /* record word offset of General Use Region for various chipcommon revs */
- if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
- || oi->sih->ccrev == 27) {
- oi->otpgu_base = REVA4_OTPGU_BASE;
- } else if (oi->sih->ccrev == 36) {
- /* OTP size greater than equal to 2KB (128 words), otpgu_base is similar to rev23 */
- if (oi->wsize >= 128)
- oi->otpgu_base = REVB8_OTPGU_BASE;
- else
- oi->otpgu_base = REV36_OTPGU_BASE;
- } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) {
- oi->otpgu_base = REVB8_OTPGU_BASE;
- }
-
- /* First issue an init command so the status is up to date */
- otpp =
- OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
-
- W_REG(&cc->otpprog, otpp);
- for (k = 0;
- ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
- && (k < OTPP_TRIES); k++)
- ;
- if (k >= OTPP_TRIES) {
- return;
- }
-
- /* Read OTP lock bits and subregion programmed indication bits */
- oi->status = R_REG(&cc->otpstatus);
-
- if ((oi->sih->chip == BCM43224_CHIP_ID)
- || (oi->sih->chip == BCM43225_CHIP_ID)) {
- u32 p_bits;
- p_bits =
- (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
- OTPGU_P_MSK)
- >> OTPGU_P_SHIFT;
- oi->status |= (p_bits << OTPS_GUP_SHIFT);
- }
-
- /*
- * h/w region base and fuse region limit are fixed to the top and
- * the bottom of the general use region. Everything else can be flexible.
- */
- oi->hwbase = oi->otpgu_base + OTPGU_SROM_OFF;
- oi->hwlim = oi->wsize;
- if (oi->status & OTPS_GUP_HW) {
- oi->hwlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
- oi->swbase = oi->hwlim;
- } else
- oi->swbase = oi->hwbase;
-
- /* subtract fuse and checksum from beginning */
- oi->swlim = ipxotp_max_rgnsz(oi->sih, oi->wsize) / 2;
-
- if (oi->status & OTPS_GUP_SW) {
- oi->swlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
- oi->fbase = oi->swlim;
- } else
- oi->fbase = oi->swbase;
-
- oi->flim = oi->wsize;
-}
-
-static void *ipxotp_init(si_t *sih)
-{
- uint idx;
- chipcregs_t *cc;
- otpinfo_t *oi;
-
- /* Make sure we're running IPX OTP */
- if (!OTPTYPE_IPX(sih->ccrev))
- return NULL;
-
- /* Make sure OTP is not disabled */
- if (ai_is_otp_disabled(sih))
- return NULL;
-
- /* Make sure OTP is powered up */
- if (!ai_is_otp_powered(sih))
- return NULL;
-
- oi = &otpinfo;
-
- /* Check for otp size */
- switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
- case 0:
- /* Nothing there */
- return NULL;
- case 1: /* 32x64 */
- oi->rows = 32;
- oi->cols = 64;
- oi->wsize = 128;
- break;
- case 2: /* 64x64 */
- oi->rows = 64;
- oi->cols = 64;
- oi->wsize = 256;
- break;
- case 5: /* 96x64 */
- oi->rows = 96;
- oi->cols = 64;
- oi->wsize = 384;
- break;
- case 7: /* 16x64 *//* 1024 bits */
- oi->rows = 16;
- oi->cols = 64;
- oi->wsize = 64;
- break;
- default:
- /* Don't know the geometry */
- return NULL;
- }
-
- /* Retrieve OTP region info */
- idx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- _ipxotp_init(oi, cc);
-
- ai_setcoreidx(sih, idx);
-
- return (void *)oi;
-}
-
-static int ipxotp_read_region(void *oh, int region, u16 *data, uint *wlen)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
- uint idx;
- chipcregs_t *cc;
- uint base, i, sz;
-
- /* Validate region selection */
- switch (region) {
- case OTP_HW_RGN:
- sz = (uint) oi->hwlim - oi->hwbase;
- if (!(oi->status & OTPS_GUP_HW)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->hwbase;
- break;
- case OTP_SW_RGN:
- sz = ((uint) oi->swlim - oi->swbase);
- if (!(oi->status & OTPS_GUP_SW)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->swbase;
- break;
- case OTP_CI_RGN:
- sz = OTPGU_CI_SZ;
- if (!(oi->status & OTPS_GUP_CI)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->otpgu_base + OTPGU_CI_OFF;
- break;
- case OTP_FUSE_RGN:
- sz = (uint) oi->flim - oi->fbase;
- if (!(oi->status & OTPS_GUP_FUSE)) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->fbase;
- break;
- case OTP_ALL_RGN:
- sz = ((uint) oi->flim - oi->hwbase);
- if (!(oi->status & (OTPS_GUP_HW | OTPS_GUP_SW))) {
- *wlen = sz;
- return -ENODATA;
- }
- if (*wlen < sz) {
- *wlen = sz;
- return -EOVERFLOW;
- }
- base = oi->hwbase;
- break;
- default:
- return -EINVAL;
- }
-
- idx = ai_coreidx(oi->sih);
- cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
-
- /* Read the data */
- for (i = 0; i < sz; i++)
- data[i] = ipxotp_otpr(oh, cc, base + i);
-
- ai_setcoreidx(oi->sih, idx);
- *wlen = sz;
- return 0;
-}
-
-static int ipxotp_nvread(void *oh, char *data, uint *len)
-{
- return -ENOTSUPP;
-}
-
-static otp_fn_t ipxotp_fn = {
- (otp_size_t) ipxotp_size,
- (otp_read_bit_t) ipxotp_read_bit,
-
- (otp_init_t) ipxotp_init,
- (otp_read_region_t) ipxotp_read_region,
- (otp_nvread_t) ipxotp_nvread,
-
- (otp_status_t) ipxotp_status
-};
-
-#endif /* BCMIPXOTP */
-
-/*
- * HND OTP Code
- *
- * Exported functions:
- * hndotp_status()
- * hndotp_size()
- * hndotp_init()
- * hndotp_read_bit()
- * hndotp_read_region()
- * hndotp_nvread()
- *
- */
-
-#ifdef BCMHNDOTP
-
-/* Fields in otpstatus */
-#define OTPS_PROGFAIL 0x80000000
-#define OTPS_PROTECT 0x00000007
-#define OTPS_HW_PROTECT 0x00000001
-#define OTPS_SW_PROTECT 0x00000002
-#define OTPS_CID_PROTECT 0x00000004
-#define OTPS_RCEV_MSK 0x00003f00
-#define OTPS_RCEV_SHIFT 8
-
-/* Fields in the otpcontrol register */
-#define OTPC_RECWAIT 0xff000000
-#define OTPC_PROGWAIT 0x00ffff00
-#define OTPC_PRW_SHIFT 8
-#define OTPC_MAXFAIL 0x00000038
-#define OTPC_VSEL 0x00000006
-#define OTPC_SELVL 0x00000001
-
-/* OTP regions (Word offsets from otp size) */
-#define OTP_SWLIM_OFF (-4)
-#define OTP_CIDBASE_OFF 0
-#define OTP_CIDLIM_OFF 4
-
-/* Predefined OTP words (Word offset from otp size) */
-#define OTP_BOUNDARY_OFF (-4)
-#define OTP_HWSIGN_OFF (-3)
-#define OTP_SWSIGN_OFF (-2)
-#define OTP_CIDSIGN_OFF (-1)
-#define OTP_CID_OFF 0
-#define OTP_PKG_OFF 1
-#define OTP_FID_OFF 2
-#define OTP_RSV_OFF 3
-#define OTP_LIM_OFF 4
-#define OTP_RD_OFF 4 /* Redundancy row starts here */
-#define OTP_RC0_OFF 28 /* Redundancy control word 1 */
-#define OTP_RC1_OFF 32 /* Redundancy control word 2 */
-#define OTP_RC_LIM_OFF 36 /* Redundancy control word end */
-
-#define OTP_HW_REGION OTPS_HW_PROTECT
-#define OTP_SW_REGION OTPS_SW_PROTECT
-#define OTP_CID_REGION OTPS_CID_PROTECT
-
-#if OTP_HW_REGION != OTP_HW_RGN
-#error "incompatible OTP_HW_RGN"
-#endif
-#if OTP_SW_REGION != OTP_SW_RGN
-#error "incompatible OTP_SW_RGN"
-#endif
-#if OTP_CID_REGION != OTP_CI_RGN
-#error "incompatible OTP_CI_RGN"
-#endif
-
-/* Redundancy entry definitions */
-#define OTP_RCE_ROW_SZ 6
-#define OTP_RCE_SIGN_MASK 0x7fff
-#define OTP_RCE_ROW_MASK 0x3f
-#define OTP_RCE_BITS 21
-#define OTP_RCE_SIGN_SZ 15
-#define OTP_RCE_BIT0 1
-
-#define OTP_WPR 4
-#define OTP_SIGNATURE 0x578a
-#define OTP_MAGIC 0x4e56
-
-static int hndotp_status(void *oh)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
- return (int)(oi->hwprot | oi->signvalid);
-}
-
-static int hndotp_size(void *oh)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
- return (int)(oi->size);
-}
-
-static u16 hndotp_otpr(void *oh, chipcregs_t *cc, uint wn)
-{
- volatile u16 *ptr;
-
- ptr = (volatile u16 *)((volatile char *)cc + CC_SROM_OTP);
- return R_REG(&ptr[wn]);
-}
-
-static u16 hndotp_otproff(void *oh, chipcregs_t *cc, int woff)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
- volatile u16 *ptr;
-
- ptr = (volatile u16 *)((volatile char *)cc + CC_SROM_OTP);
-
- return R_REG(&ptr[(oi->size / 2) + woff]);
-}
-
-static u16 hndotp_read_bit(void *oh, chipcregs_t *cc, uint idx)
-{
- uint k, row, col;
- u32 otpp, st;
-
- row = idx / 65;
- col = idx % 65;
-
- otpp = OTPP_START_BUSY | OTPP_READ |
- ((row << OTPP_ROW_SHIFT) & OTPP_ROW_MASK) | (col & OTPP_COL_MASK);
-
- W_REG(&cc->otpprog, otpp);
- st = R_REG(&cc->otpprog);
- for (k = 0;
- ((st & OTPP_START_BUSY) == OTPP_START_BUSY) && (k < OTPP_TRIES);
- k++)
- st = R_REG(&cc->otpprog);
-
- if (k >= OTPP_TRIES) {
- return 0xffff;
- }
- if (st & OTPP_READERR) {
- return 0xffff;
- }
- st = (st & OTPP_VALUE_MASK) >> OTPP_VALUE_SHIFT;
- return (u16) st;
-}
-
-static void *hndotp_init(si_t *sih)
-{
- uint idx;
- chipcregs_t *cc;
- otpinfo_t *oi;
- u32 cap = 0, clkdiv, otpdiv = 0;
- void *ret = NULL;
-
- oi = &otpinfo;
-
- idx = ai_coreidx(sih);
-
- /* Check for otp */
- cc = ai_setcoreidx(sih, SI_CC_IDX);
- if (cc != NULL) {
- cap = R_REG(&cc->capabilities);
- if ((cap & CC_CAP_OTPSIZE) == 0) {
- /* Nothing there */
- goto out;
- }
-
- if (!((oi->ccrev == 12) || (oi->ccrev == 17)
- || (oi->ccrev == 22)))
- return NULL;
-
- /* Read the OTP byte size. chipcommon rev >= 18 has RCE so the size is
- * 8 row (64 bytes) smaller
- */
- oi->size =
- 1 << (((cap & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT)
- + CC_CAP_OTPSIZE_BASE);
- if (oi->ccrev >= 18)
- oi->size -= ((OTP_RC0_OFF - OTP_BOUNDARY_OFF) * 2);
-
- oi->hwprot = (int)(R_REG(&cc->otpstatus) & OTPS_PROTECT);
- oi->boundary = -1;
-
- /* Check the region signature */
- if (hndotp_otproff(oi, cc, OTP_HWSIGN_OFF) == OTP_SIGNATURE) {
- oi->signvalid |= OTP_HW_REGION;
- oi->boundary = hndotp_otproff(oi, cc, OTP_BOUNDARY_OFF);
- }
-
- if (hndotp_otproff(oi, cc, OTP_SWSIGN_OFF) == OTP_SIGNATURE)
- oi->signvalid |= OTP_SW_REGION;
-
- if (hndotp_otproff(oi, cc, OTP_CIDSIGN_OFF) == OTP_SIGNATURE)
- oi->signvalid |= OTP_CID_REGION;
-
- /* Set OTP clkdiv for stability */
- if (oi->ccrev == 22)
- otpdiv = 12;
-
- if (otpdiv) {
- clkdiv = R_REG(&cc->clkdiv);
- clkdiv =
- (clkdiv & ~CLKD_OTP) | (otpdiv << CLKD_OTP_SHIFT);
- W_REG(&cc->clkdiv, clkdiv);
- }
- udelay(10);
-
- ret = (void *)oi;
- }
-
- out: /* All done */
- ai_setcoreidx(sih, idx);
-
- return ret;
-}
-
-static int hndotp_read_region(void *oh, int region, u16 *data, uint *wlen)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
- u32 idx, st;
- chipcregs_t *cc;
- int i;
-
-
- if (region != OTP_HW_REGION) {
- /*
- * Only support HW region
- * (no active chips use HND OTP SW region)
- * */
- return -ENOTSUPP;
- }
-
- /* Region empty? */
- st = oi->hwprot | oi->signvalid;
- if ((st & region) == 0)
- return -ENODATA;
-
- *wlen =
- ((int)*wlen < oi->boundary / 2) ? *wlen : (uint) oi->boundary / 2;
-
- idx = ai_coreidx(oi->sih);
- cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
-
- for (i = 0; i < (int)*wlen; i++)
- data[i] = hndotp_otpr(oh, cc, i);
-
- ai_setcoreidx(oi->sih, idx);
-
- return 0;
-}
-
-static int hndotp_nvread(void *oh, char *data, uint *len)
-{
- int rc = 0;
- otpinfo_t *oi = (otpinfo_t *) oh;
- u32 base, bound, lim = 0, st;
- int i, chunk, gchunks, tsz = 0;
- u32 idx;
- chipcregs_t *cc;
- uint offset;
- u16 *rawotp = NULL;
-
- /* save the orig core */
- idx = ai_coreidx(oi->sih);
- cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
-
- st = hndotp_status(oh);
- if (!(st & (OTP_HW_REGION | OTP_SW_REGION))) {
- rc = -1;
- goto out;
- }
-
- /* Read the whole otp so we can easily manipulate it */
- lim = hndotp_size(oh);
- rawotp = kmalloc(lim, GFP_ATOMIC);
- if (rawotp == NULL) {
- rc = -2;
- goto out;
- }
- for (i = 0; i < (int)(lim / 2); i++)
- rawotp[i] = hndotp_otpr(oh, cc, i);
-
- if ((st & OTP_HW_REGION) == 0) {
- /* This could be a programming failure in the first
- * chunk followed by one or more good chunks
- */
- for (i = 0; i < (int)(lim / 2); i++)
- if (rawotp[i] == OTP_MAGIC)
- break;
-
- if (i < (int)(lim / 2)) {
- base = i;
- bound = (i * 2) + rawotp[i + 1];
- } else {
- rc = -3;
- goto out;
- }
- } else {
- bound = rawotp[(lim / 2) + OTP_BOUNDARY_OFF];
-
- /* There are two cases: 1) The whole otp is used as nvram
- * and 2) There is a hardware header followed by nvram.
- */
- if (rawotp[0] == OTP_MAGIC) {
- base = 0;
- } else
- base = bound;
- }
-
- /* Find and copy the data */
-
- chunk = 0;
- gchunks = 0;
- i = base / 2;
- offset = 0;
- while ((i < (int)(lim / 2)) && (rawotp[i] == OTP_MAGIC)) {
- int dsz, rsz = rawotp[i + 1];
-
- if (((i * 2) + rsz) >= (int)lim) {
- /* Bad length, try to find another chunk anyway */
- rsz = 6;
- }
- if (crc_ccitt(CRC16_INIT_VALUE, (u8 *) &rawotp[i], rsz) ==
- CRC16_GOOD_VALUE) {
- /* Good crc, copy the vars */
- gchunks++;
- dsz = rsz - 6;
- tsz += dsz;
- if (offset + dsz >= *len) {
- goto out;
- }
- memcpy(&data[offset], &rawotp[i + 2], dsz);
- offset += dsz;
- /* Remove extra null characters at the end */
- while (offset > 1 &&
- data[offset - 1] == 0 && data[offset - 2] == 0)
- offset--;
- i += rsz / 2;
- } else {
- /* bad length or crc didn't check, try to find the next set */
- if (rawotp[i + (rsz / 2)] == OTP_MAGIC) {
- /* Assume length is good */
- i += rsz / 2;
- } else {
- while (++i < (int)(lim / 2))
- if (rawotp[i] == OTP_MAGIC)
- break;
- }
- }
- chunk++;
- }
-
- *len = offset;
-
- out:
- kfree(rawotp);
- ai_setcoreidx(oi->sih, idx);
-
- return rc;
-}
-
-static otp_fn_t hndotp_fn = {
- (otp_size_t) hndotp_size,
- (otp_read_bit_t) hndotp_read_bit,
-
- (otp_init_t) hndotp_init,
- (otp_read_region_t) hndotp_read_region,
- (otp_nvread_t) hndotp_nvread,
-
- (otp_status_t) hndotp_status
-};
-
-#endif /* BCMHNDOTP */
-
-/*
- * Common Code: Compiled for IPX / HND / AUTO
- * otp_status()
- * otp_size()
- * otp_read_bit()
- * otp_init()
- * otp_read_region()
- * otp_nvread()
- */
-
-int otp_status(void *oh)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
-
- return oi->fn->status(oh);
-}
-
-int otp_size(void *oh)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
-
- return oi->fn->size(oh);
-}
-
-u16 otp_read_bit(void *oh, uint offset)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
- uint idx = ai_coreidx(oi->sih);
- chipcregs_t *cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
- u16 readBit = (u16) oi->fn->read_bit(oh, cc, offset);
- ai_setcoreidx(oi->sih, idx);
- return readBit;
-}
-
-void *otp_init(si_t *sih)
-{
- otpinfo_t *oi;
- void *ret = NULL;
-
- oi = &otpinfo;
- memset(oi, 0, sizeof(otpinfo_t));
-
- oi->ccrev = sih->ccrev;
-
-#ifdef BCMIPXOTP
- if (OTPTYPE_IPX(oi->ccrev))
- oi->fn = &ipxotp_fn;
-#endif
-
-#ifdef BCMHNDOTP
- if (OTPTYPE_HND(oi->ccrev))
- oi->fn = &hndotp_fn;
-#endif
-
- if (oi->fn == NULL) {
- return NULL;
- }
-
- oi->sih = sih;
-
- ret = (oi->fn->init) (sih);
-
- return ret;
-}
-
-int
-otp_read_region(si_t *sih, int region, u16 *data,
- uint *wlen) {
- bool wasup = false;
- void *oh;
- int err = 0;
-
- wasup = ai_is_otp_powered(sih);
- if (!wasup)
- ai_otp_power(sih, true);
-
- if (!ai_is_otp_powered(sih) || ai_is_otp_disabled(sih)) {
- err = -EPERM;
- goto out;
- }
-
- oh = otp_init(sih);
- if (oh == NULL) {
- err = -EBADE;
- goto out;
- }
-
- err = (((otpinfo_t *) oh)->fn->read_region) (oh, region, data, wlen);
-
- out:
- if (!wasup)
- ai_otp_power(sih, false);
-
- return err;
-}
-
-int otp_nvread(void *oh, char *data, uint *len)
-{
- otpinfo_t *oi = (otpinfo_t *) oh;
-
- return oi->fn->nvread(oh, data, len);
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/bcmsrom.c b/drivers/staging/brcm80211/brcmsmac/bcmsrom.c
deleted file mode 100644
index bbfc6420436..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/bcmsrom.c
+++ /dev/null
@@ -1,714 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/etherdevice.h>
-#include <bcmdefs.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <stdarg.h>
-#include <bcmutils.h>
-#include <hndsoc.h>
-#include <sbchipc.h>
-#include <bcmdevs.h>
-#include <pcicfg.h>
-#include <aiutils.h>
-#include <bcmsrom.h>
-#include <bcmsrom_tbl.h>
-
-#include <bcmnvram.h>
-#include <bcmotp.h>
-
-#define SROM_OFFSET(sih) ((sih->ccrev > 31) ? \
- (((sih->cccaps & CC_CAP_SROM) == 0) ? NULL : \
- ((u8 *)curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP)) : \
- ((u8 *)curmap + PCI_BAR0_SPROM_OFFSET))
-
-#if defined(BCMDBG)
-#define WRITE_ENABLE_DELAY 500 /* 500 ms after write enable/disable toggle */
-#define WRITE_WORD_DELAY 20 /* 20 ms between each word write */
-#endif
-
-typedef struct varbuf {
- char *base; /* pointer to buffer base */
- char *buf; /* pointer to current position */
- unsigned int size; /* current (residual) size in bytes */
-} varbuf_t;
-extern char *_vars;
-extern uint _varsz;
-
-static int initvars_srom_si(si_t *sih, void *curmap, char **vars, uint *count);
-static void _initvars_srom_pci(u8 sromrev, u16 *srom, uint off, varbuf_t *b);
-static int initvars_srom_pci(si_t *sih, void *curmap, char **vars, uint *count);
-static int initvars_flash_si(si_t *sih, char **vars, uint *count);
-static int sprom_read_pci(si_t *sih, u16 *sprom,
- uint wordoff, u16 *buf, uint nwords, bool check_crc);
-#if defined(BCMNVRAMR)
-static int otp_read_pci(si_t *sih, u16 *buf, uint bufsz);
-#endif
-static u16 srom_cc_cmd(si_t *sih, void *ccregs, u32 cmd,
- uint wordoff, u16 data);
-
-static int initvars_table(char *start, char *end,
- char **vars, uint *count);
-static int initvars_flash(si_t *sih, char **vp,
- uint len);
-
-/* Initialization of varbuf structure */
-static void varbuf_init(varbuf_t *b, char *buf, uint size)
-{
- b->size = size;
- b->base = b->buf = buf;
-}
-
-/* append a null terminated var=value string */
-static int varbuf_append(varbuf_t *b, const char *fmt, ...)
-{
- va_list ap;
- int r;
- size_t len;
- char *s;
-
- if (b->size < 2)
- return 0;
-
- va_start(ap, fmt);
- r = vsnprintf(b->buf, b->size, fmt, ap);
- va_end(ap);
-
- /* C99 snprintf behavior returns r >= size on overflow,
- * others return -1 on overflow.
- * All return -1 on format error.
- * We need to leave room for 2 null terminations, one for the current var
- * string, and one for final null of the var table. So check that the
- * strlen written, r, leaves room for 2 chars.
- */
- if ((r == -1) || (r > (int)(b->size - 2))) {
- b->size = 0;
- return 0;
- }
-
- /* Remove any earlier occurrence of the same variable */
- s = strchr(b->buf, '=');
- if (s != NULL) {
- len = (size_t) (s - b->buf);
- for (s = b->base; s < b->buf;) {
- if ((memcmp(s, b->buf, len) == 0) && s[len] == '=') {
- len = strlen(s) + 1;
- memmove(s, (s + len),
- ((b->buf + r + 1) - (s + len)));
- b->buf -= len;
- b->size += (unsigned int)len;
- break;
- }
-
- while (*s++)
- ;
- }
- }
-
- /* skip over this string's null termination */
- r++;
- b->size -= r;
- b->buf += r;
-
- return r;
-}
-
-/*
- * Initialize local vars from the right source for this platform.
- * Return 0 on success, nonzero on error.
- */
-int srom_var_init(si_t *sih, uint bustype, void *curmap,
- char **vars, uint *count)
-{
- uint len;
-
- len = 0;
-
- if (vars == NULL || count == NULL)
- return 0;
-
- *vars = NULL;
- *count = 0;
-
- switch (bustype) {
- case SI_BUS:
- case JTAG_BUS:
- return initvars_srom_si(sih, curmap, vars, count);
-
- case PCI_BUS:
- if (curmap == NULL)
- return -1;
-
- return initvars_srom_pci(sih, curmap, vars, count);
-
- default:
- break;
- }
- return -1;
-}
-
-/* In chips with chipcommon rev 32 and later, the srom is in chipcommon,
- * not in the bus cores.
- */
-static u16
-srom_cc_cmd(si_t *sih, void *ccregs, u32 cmd,
- uint wordoff, u16 data)
-{
- chipcregs_t *cc = (chipcregs_t *) ccregs;
- uint wait_cnt = 1000;
-
- if ((cmd == SRC_OP_READ) || (cmd == SRC_OP_WRITE)) {
- W_REG(&cc->sromaddress, wordoff * 2);
- if (cmd == SRC_OP_WRITE)
- W_REG(&cc->sromdata, data);
- }
-
- W_REG(&cc->sromcontrol, SRC_START | cmd);
-
- while (wait_cnt--) {
- if ((R_REG(&cc->sromcontrol) & SRC_BUSY) == 0)
- break;
- }
-
- if (!wait_cnt) {
- return 0xffff;
- }
- if (cmd == SRC_OP_READ)
- return (u16) R_REG(&cc->sromdata);
- else
- return 0xffff;
-}
-
-static inline void ltoh16_buf(u16 *buf, unsigned int size)
-{
- for (size /= 2; size; size--)
- *(buf + size) = le16_to_cpu(*(buf + size));
-}
-
-static inline void htol16_buf(u16 *buf, unsigned int size)
-{
- for (size /= 2; size; size--)
- *(buf + size) = cpu_to_le16(*(buf + size));
-}
-
-/*
- * Read in and validate sprom.
- * Return 0 on success, nonzero on error.
- */
-static int
-sprom_read_pci(si_t *sih, u16 *sprom, uint wordoff,
- u16 *buf, uint nwords, bool check_crc)
-{
- int err = 0;
- uint i;
- void *ccregs = NULL;
-
- /* read the sprom */
- for (i = 0; i < nwords; i++) {
-
- if (sih->ccrev > 31 && ISSIM_ENAB(sih)) {
- /* use indirect since direct is too slow on QT */
- if ((sih->cccaps & CC_CAP_SROM) == 0)
- return 1;
-
- ccregs = (void *)((u8 *) sprom - CC_SROM_OTP);
- buf[i] =
- srom_cc_cmd(sih, ccregs, SRC_OP_READ,
- wordoff + i, 0);
-
- } else {
- if (ISSIM_ENAB(sih))
- buf[i] = R_REG(&sprom[wordoff + i]);
-
- buf[i] = R_REG(&sprom[wordoff + i]);
- }
-
- }
-
- /* bypass crc checking for simulation to allow srom hack */
- if (ISSIM_ENAB(sih))
- return err;
-
- if (check_crc) {
-
- if (buf[0] == 0xffff) {
- /* The hardware thinks that an srom that starts with 0xffff
- * is blank, regardless of the rest of the content, so declare
- * it bad.
- */
- return 1;
- }
-
- /* fixup the endianness so crc8 will pass */
- htol16_buf(buf, nwords * 2);
- if (bcm_crc8((u8 *) buf, nwords * 2, CRC8_INIT_VALUE) !=
- CRC8_GOOD_VALUE) {
- /* DBG only pci always read srom4 first, then srom8/9 */
- err = 1;
- }
- /* now correct the endianness of the byte array */
- ltoh16_buf(buf, nwords * 2);
- }
- return err;
-}
-
-#if defined(BCMNVRAMR)
-static int otp_read_pci(si_t *sih, u16 *buf, uint bufsz)
-{
- u8 *otp;
- uint sz = OTP_SZ_MAX / 2; /* size in words */
- int err = 0;
-
- otp = kzalloc(OTP_SZ_MAX, GFP_ATOMIC);
- if (otp == NULL) {
- return -EBADE;
- }
-
- err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz);
-
- memcpy(buf, otp, bufsz);
-
- kfree(otp);
-
- /* Check CRC */
- if (buf[0] == 0xffff) {
- /* The hardware thinks that an srom that starts with 0xffff
- * is blank, regardless of the rest of the content, so declare
- * it bad.
- */
- return 1;
- }
-
- /* fixup the endianness so crc8 will pass */
- htol16_buf(buf, bufsz);
- if (bcm_crc8((u8 *) buf, SROM4_WORDS * 2, CRC8_INIT_VALUE) !=
- CRC8_GOOD_VALUE) {
- err = 1;
- }
- /* now correct the endianness of the byte array */
- ltoh16_buf(buf, bufsz);
-
- return err;
-}
-#endif /* defined(BCMNVRAMR) */
-/*
-* Create variable table from memory.
-* Return 0 on success, nonzero on error.
-*/
-static int initvars_table(char *start, char *end,
- char **vars, uint *count)
-{
- int c = (int)(end - start);
-
- /* do it only when there is more than just the null string */
- if (c > 1) {
- char *vp = kmalloc(c, GFP_ATOMIC);
- if (!vp)
- return -ENOMEM;
- memcpy(vp, start, c);
- *vars = vp;
- *count = c;
- } else {
- *vars = NULL;
- *count = 0;
- }
-
- return 0;
-}
-
-/*
- * Find variables with <devpath> from flash. 'base' points to the beginning
- * of the table upon enter and to the end of the table upon exit when success.
- * Return 0 on success, nonzero on error.
- */
-static int initvars_flash(si_t *sih, char **base, uint len)
-{
- char *vp = *base;
- char *flash;
- int err;
- char *s;
- uint l, dl, copy_len;
- char devpath[SI_DEVPATH_BUFSZ];
-
- /* allocate memory and read in flash */
- flash = kmalloc(NVRAM_SPACE, GFP_ATOMIC);
- if (!flash)
- return -ENOMEM;
- err = nvram_getall(flash, NVRAM_SPACE);
- if (err)
- goto exit;
-
- ai_devpath(sih, devpath, sizeof(devpath));
-
- /* grab vars with the <devpath> prefix in name */
- dl = strlen(devpath);
- for (s = flash; s && *s; s += l + 1) {
- l = strlen(s);
-
- /* skip non-matching variable */
- if (strncmp(s, devpath, dl))
- continue;
-
- /* is there enough room to copy? */
- copy_len = l - dl + 1;
- if (len < copy_len) {
- err = -EOVERFLOW;
- goto exit;
- }
-
- /* no prefix, just the name=value */
- strncpy(vp, &s[dl], copy_len);
- vp += copy_len;
- len -= copy_len;
- }
-
- /* add null string as terminator */
- if (len < 1) {
- err = -EOVERFLOW;
- goto exit;
- }
- *vp++ = '\0';
-
- *base = vp;
-
- exit: kfree(flash);
- return err;
-}
-
-/*
- * Initialize nonvolatile variable table from flash.
- * Return 0 on success, nonzero on error.
- */
-static int initvars_flash_si(si_t *sih, char **vars, uint *count)
-{
- char *vp, *base;
- int err;
-
- base = vp = kmalloc(MAXSZ_NVRAM_VARS, GFP_ATOMIC);
- if (!vp)
- return -ENOMEM;
-
- err = initvars_flash(sih, &vp, MAXSZ_NVRAM_VARS);
- if (err == 0)
- err = initvars_table(base, vp, vars, count);
-
- kfree(base);
-
- return err;
-}
-
-/* Parse SROM and create name=value pairs. 'srom' points to
- * the SROM word array. 'off' specifies the offset of the
- * first word 'srom' points to, which should be either 0 or
- * SROM3_SWRG_OFF (full SROM or software region).
- */
-
-static uint mask_shift(u16 mask)
-{
- uint i;
- for (i = 0; i < (sizeof(mask) << 3); i++) {
- if (mask & (1 << i))
- return i;
- }
- return 0;
-}
-
-static uint mask_width(u16 mask)
-{
- int i;
- for (i = (sizeof(mask) << 3) - 1; i >= 0; i--) {
- if (mask & (1 << i))
- return (uint) (i - mask_shift(mask) + 1);
- }
- return 0;
-}
-
-static void _initvars_srom_pci(u8 sromrev, u16 *srom, uint off, varbuf_t *b)
-{
- u16 w;
- u32 val;
- const sromvar_t *srv;
- uint width;
- uint flags;
- u32 sr = (1 << sromrev);
-
- varbuf_append(b, "sromrev=%d", sromrev);
-
- for (srv = pci_sromvars; srv->name != NULL; srv++) {
- const char *name;
-
- if ((srv->revmask & sr) == 0)
- continue;
-
- if (srv->off < off)
- continue;
-
- flags = srv->flags;
- name = srv->name;
-
- /* This entry is for mfgc only. Don't generate param for it, */
- if (flags & SRFL_NOVAR)
- continue;
-
- if (flags & SRFL_ETHADDR) {
- u8 ea[ETH_ALEN];
-
- ea[0] = (srom[srv->off - off] >> 8) & 0xff;
- ea[1] = srom[srv->off - off] & 0xff;
- ea[2] = (srom[srv->off + 1 - off] >> 8) & 0xff;
- ea[3] = srom[srv->off + 1 - off] & 0xff;
- ea[4] = (srom[srv->off + 2 - off] >> 8) & 0xff;
- ea[5] = srom[srv->off + 2 - off] & 0xff;
-
- varbuf_append(b, "%s=%pM", name, ea);
- } else {
- w = srom[srv->off - off];
- val = (w & srv->mask) >> mask_shift(srv->mask);
- width = mask_width(srv->mask);
-
- while (srv->flags & SRFL_MORE) {
- srv++;
- if (srv->off == 0 || srv->off < off)
- continue;
-
- w = srom[srv->off - off];
- val +=
- ((w & srv->mask) >> mask_shift(srv->
- mask)) <<
- width;
- width += mask_width(srv->mask);
- }
-
- if ((flags & SRFL_NOFFS)
- && ((int)val == (1 << width) - 1))
- continue;
-
- if (flags & SRFL_CCODE) {
- if (val == 0)
- varbuf_append(b, "ccode=");
- else
- varbuf_append(b, "ccode=%c%c",
- (val >> 8), (val & 0xff));
- }
- /* LED Powersave duty cycle has to be scaled:
- *(oncount >> 24) (offcount >> 8)
- */
- else if (flags & SRFL_LEDDC) {
- u32 w32 = (((val >> 8) & 0xff) << 24) | /* oncount */
- (((val & 0xff)) << 8); /* offcount */
- varbuf_append(b, "leddc=%d", w32);
- } else if (flags & SRFL_PRHEX)
- varbuf_append(b, "%s=0x%x", name, val);
- else if ((flags & SRFL_PRSIGN)
- && (val & (1 << (width - 1))))
- varbuf_append(b, "%s=%d", name,
- (int)(val | (~0 << width)));
- else
- varbuf_append(b, "%s=%u", name, val);
- }
- }
-
- if (sromrev >= 4) {
- /* Do per-path variables */
- uint p, pb, psz;
-
- if (sromrev >= 8) {
- pb = SROM8_PATH0;
- psz = SROM8_PATH1 - SROM8_PATH0;
- } else {
- pb = SROM4_PATH0;
- psz = SROM4_PATH1 - SROM4_PATH0;
- }
-
- for (p = 0; p < MAX_PATH_SROM; p++) {
- for (srv = perpath_pci_sromvars; srv->name != NULL;
- srv++) {
- if ((srv->revmask & sr) == 0)
- continue;
-
- if (pb + srv->off < off)
- continue;
-
- /* This entry is for mfgc only. Don't generate param for it, */
- if (srv->flags & SRFL_NOVAR)
- continue;
-
- w = srom[pb + srv->off - off];
- val = (w & srv->mask) >> mask_shift(srv->mask);
- width = mask_width(srv->mask);
-
- /* Cheating: no per-path var is more than 1 word */
-
- if ((srv->flags & SRFL_NOFFS)
- && ((int)val == (1 << width) - 1))
- continue;
-
- if (srv->flags & SRFL_PRHEX)
- varbuf_append(b, "%s%d=0x%x", srv->name,
- p, val);
- else
- varbuf_append(b, "%s%d=%d", srv->name,
- p, val);
- }
- pb += psz;
- }
- }
-}
-
-/*
- * Initialize nonvolatile variable table from sprom.
- * Return 0 on success, nonzero on error.
- */
-static int initvars_srom_pci(si_t *sih, void *curmap, char **vars, uint *count)
-{
- u16 *srom, *sromwindow;
- u8 sromrev = 0;
- u32 sr;
- varbuf_t b;
- char *vp, *base = NULL;
- bool flash = false;
- int err = 0;
-
- /*
- * Apply CRC over SROM content regardless SROM is present or not,
- * and use variable <devpath>sromrev's existence in flash to decide
- * if we should return an error when CRC fails or read SROM variables
- * from flash.
- */
- srom = kmalloc(SROM_MAX, GFP_ATOMIC);
- if (!srom)
- return -2;
-
- sromwindow = (u16 *) SROM_OFFSET(sih);
- if (ai_is_sprom_available(sih)) {
- err =
- sprom_read_pci(sih, sromwindow, 0, srom, SROM_WORDS,
- true);
-
- if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) ||
- (((sih->buscoretype == PCIE_CORE_ID)
- && (sih->buscorerev >= 6))
- || ((sih->buscoretype == PCI_CORE_ID)
- && (sih->buscorerev >= 0xe)))) {
- /* sromrev >= 4, read more */
- err =
- sprom_read_pci(sih, sromwindow, 0, srom,
- SROM4_WORDS, true);
- sromrev = srom[SROM4_CRCREV] & 0xff;
- } else if (err == 0) {
- /* srom is good and is rev < 4 */
- /* top word of sprom contains version and crc8 */
- sromrev = srom[SROM_CRCREV] & 0xff;
- /* bcm4401 sroms misprogrammed */
- if (sromrev == 0x10)
- sromrev = 1;
- }
- }
-#if defined(BCMNVRAMR)
- /* Use OTP if SPROM not available */
- else {
- err = otp_read_pci(sih, srom, SROM_MAX);
- if (err == 0)
- /* OTP only contain SROM rev8/rev9 for now */
- sromrev = srom[SROM4_CRCREV] & 0xff;
- else
- err = 1;
- }
-#else
- else
- err = 1;
-#endif
-
- /*
- * We want internal/wltest driver to come up with default
- * sromvars so we can program a blank SPROM/OTP.
- */
- if (err) {
- char *value;
- u32 val;
- val = 0;
-
- value = ai_getdevpathvar(sih, "sromrev");
- if (value) {
- sromrev = (u8) simple_strtoul(value, NULL, 0);
- flash = true;
- goto varscont;
- }
-
- value = ai_getnvramflvar(sih, "sromrev");
- if (value) {
- err = 0;
- goto errout;
- }
-
- {
- err = -1;
- goto errout;
- }
- }
-
- varscont:
- /* Bitmask for the sromrev */
- sr = 1 << sromrev;
-
- /* srom version check: Current valid versions: 1, 2, 3, 4, 5, 8, 9 */
- if ((sr & 0x33e) == 0) {
- err = -2;
- goto errout;
- }
-
- base = vp = kmalloc(MAXSZ_NVRAM_VARS, GFP_ATOMIC);
- if (!vp) {
- err = -2;
- goto errout;
- }
-
- /* read variables from flash */
- if (flash) {
- err = initvars_flash(sih, &vp, MAXSZ_NVRAM_VARS);
- if (err)
- goto errout;
- goto varsdone;
- }
-
- varbuf_init(&b, base, MAXSZ_NVRAM_VARS);
-
- /* parse SROM into name=value pairs. */
- _initvars_srom_pci(sromrev, srom, 0, &b);
-
- /* final nullbyte terminator */
- vp = b.buf;
- *vp++ = '\0';
-
- varsdone:
- err = initvars_table(base, vp, vars, count);
-
- errout:
- if (base)
- kfree(base);
-
- kfree(srom);
- return err;
-}
-
-
-static int initvars_srom_si(si_t *sih, void *curmap, char **vars, uint *varsz)
-{
- /* Search flash nvram section for srom variables */
- return initvars_flash_si(sih, vars, varsz);
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_bmac.c b/drivers/staging/brcm80211/brcmsmac/bmac.c
index 45349261061..b25c5170556 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_bmac.c
+++ b/drivers/staging/brcm80211/brcmsmac/bmac.c
@@ -13,49 +13,20 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
+#include <net/mac80211.h>
-#include <proto/802.11.h>
-#include <bcmdefs.h>
-#include <bcmdevs.h>
-#include <bcmwifi.h>
+#include <brcm_hw_ids.h>
#include <aiutils.h>
-#include <bcmsrom.h>
-#include <bcmotp.h>
-#include <bcmutils.h>
-#include <bcmnvram.h>
-#include <wlioctl.h>
-#include <sbconfig.h>
-#include <sbchipc.h>
-#include <pcicfg.h>
-#include <sbhnddma.h>
-#include <hnddma.h>
-
-#include "wlc_types.h"
-#include "wlc_pmu.h"
-#include "d11.h"
-#include "wlc_cfg.h"
-#include "wlc_rate.h"
-#include "wlc_scb.h"
-#include "wlc_pub.h"
-#include "wlc_key.h"
-#include "wlc_phy_shim.h"
-#include "phy/wlc_phy_hal.h"
-#include "wlc_channel.h"
-#include "wlc_main.h"
-#include "wl_export.h"
-#include "wl_ucode.h"
-#include "wlc_antsel.h"
-#include "pcie_core.h"
-#include "wlc_alloc.h"
-#include "wl_dbg.h"
-#include "wlc_bmac.h"
+#include <chipcommon.h>
+#include "types.h"
+#include "rate.h"
+#include "phy/phy_hal.h"
+#include "channel.h"
+#include "main.h"
+#include "ucode_loader.h"
+#include "mac80211_if.h"
+#include "bmac.h"
#define TIMER_INTERVAL_WATCHDOG_BMAC 1000 /* watchdog timer, in unit of ms */
@@ -67,7 +38,6 @@
#define SYNTHPU_DLY_PHY_US_QT 100 /* QT synthpu_dly time in us */
#ifndef BMAC_DUP_TO_REMOVE
-#define WLC_RM_WAIT_TX_SUSPEND 4 /* Wait Tx Suspend */
#define ANTCNT 10 /* vanilla M_MAX_ANTCNT value */
@@ -78,6 +48,9 @@
(void *)&(wlc_hw->regs->fifo64regs[fifonum].dmaxmt) : \
(void *)&(wlc_hw->regs->fifo64regs[fifonum].dmarcv))
+#define APHY_SLOT_TIME 9
+#define BPHY_SLOT_TIME 20
+
/*
* The following table lists the buffer memory allocated to xmt fifos in HW.
* the size is in units of 256bytes(one block), total size is HW dependent
@@ -97,82 +70,86 @@ static u16 xmtfifo_sz[][NFIFO] = {
{9, 58, 22, 14, 14, 5}, /* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */
};
-static void wlc_clkctl_clk(struct wlc_hw_info *wlc, uint mode);
-static void wlc_coreinit(struct wlc_info *wlc);
+static void brcms_b_clkctl_clk(struct brcms_hardware *wlc, uint mode);
+static void brcms_b_coreinit(struct brcms_c_info *wlc);
/* used by wlc_wakeucode_init() */
-static void wlc_write_inits(struct wlc_hw_info *wlc_hw,
+static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
const struct d11init *inits);
-static void wlc_ucode_write(struct wlc_hw_info *wlc_hw, const u32 ucode[],
+static void brcms_ucode_write(struct brcms_hardware *wlc_hw, const u32 ucode[],
const uint nbytes);
-static void wlc_ucode_download(struct wlc_hw_info *wlc);
-static void wlc_ucode_txant_set(struct wlc_hw_info *wlc_hw);
-
-/* used by wlc_dpc() */
-static bool wlc_bmac_dotxstatus(struct wlc_hw_info *wlc, tx_status_t *txs,
- u32 s2);
-static bool wlc_bmac_txstatus(struct wlc_hw_info *wlc, bool bound, bool *fatal);
-static bool wlc_bmac_recv(struct wlc_hw_info *wlc_hw, uint fifo, bool bound);
-
-/* used by wlc_down() */
-static void wlc_flushqueues(struct wlc_info *wlc);
-
-static void wlc_write_mhf(struct wlc_hw_info *wlc_hw, u16 *mhfs);
-static void wlc_mctrl_reset(struct wlc_hw_info *wlc_hw);
-static void wlc_corerev_fifofixup(struct wlc_hw_info *wlc_hw);
-static bool wlc_bmac_tx_fifo_suspended(struct wlc_hw_info *wlc_hw,
+static void brcms_ucode_download(struct brcms_hardware *wlc);
+static void brcms_c_ucode_txant_set(struct brcms_hardware *wlc_hw);
+
+/* used by brcms_c_dpc() */
+static bool brcms_b_dotxstatus(struct brcms_hardware *wlc,
+ struct tx_status *txs, u32 s2);
+static bool brcms_b_txstatus(struct brcms_hardware *wlc, bool bound,
+ bool *fatal);
+static bool brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound);
+
+/* used by brcms_c_down() */
+static void brcms_c_flushqueues(struct brcms_c_info *wlc);
+
+static void brcms_c_write_mhf(struct brcms_hardware *wlc_hw, u16 *mhfs);
+static void brcms_c_mctrl_reset(struct brcms_hardware *wlc_hw);
+static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw);
+static bool brcms_b_tx_fifo_suspended(struct brcms_hardware *wlc_hw,
uint tx_fifo);
-static void wlc_bmac_tx_fifo_suspend(struct wlc_hw_info *wlc_hw, uint tx_fifo);
-static void wlc_bmac_tx_fifo_resume(struct wlc_hw_info *wlc_hw, uint tx_fifo);
+static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw,
+ uint tx_fifo);
+static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
+ uint tx_fifo);
/* Low Level Prototypes */
-static int wlc_bmac_bandtype(struct wlc_hw_info *wlc_hw);
-static void wlc_bmac_info_init(struct wlc_hw_info *wlc_hw);
-static void wlc_bmac_xtal(struct wlc_hw_info *wlc_hw, bool want);
-static u16 wlc_bmac_read_objmem(struct wlc_hw_info *wlc_hw, uint offset,
+static int brcms_b_bandtype(struct brcms_hardware *wlc_hw);
+static void brcms_b_info_init(struct brcms_hardware *wlc_hw);
+static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want);
+static u16 brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset,
u32 sel);
-static void wlc_bmac_write_objmem(struct wlc_hw_info *wlc_hw, uint offset,
+static void brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset,
u16 v, u32 sel);
-static void wlc_bmac_core_phy_clk(struct wlc_hw_info *wlc_hw, bool clk);
-static bool wlc_bmac_attach_dmapio(struct wlc_info *wlc, uint j, bool wme);
-static void wlc_bmac_detach_dmapio(struct wlc_hw_info *wlc_hw);
-static void wlc_ucode_bsinit(struct wlc_hw_info *wlc_hw);
-static bool wlc_validboardtype(struct wlc_hw_info *wlc);
-static bool wlc_isgoodchip(struct wlc_hw_info *wlc_hw);
-static bool wlc_bmac_validate_chip_access(struct wlc_hw_info *wlc_hw);
-static char *wlc_get_macaddr(struct wlc_hw_info *wlc_hw);
-static void wlc_mhfdef(struct wlc_info *wlc, u16 *mhfs, u16 mhf2_init);
-static void wlc_mctrl_write(struct wlc_hw_info *wlc_hw);
-static void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool want, mbool flags);
-static void wlc_ucode_mute_override_set(struct wlc_hw_info *wlc_hw);
-static void wlc_ucode_mute_override_clear(struct wlc_hw_info *wlc_hw);
-static u32 wlc_wlintrsoff(struct wlc_info *wlc);
-static void wlc_wlintrsrestore(struct wlc_info *wlc, u32 macintmask);
-static void wlc_gpio_init(struct wlc_info *wlc);
-static void wlc_write_hw_bcntemplate0(struct wlc_hw_info *wlc_hw, void *bcn,
- int len);
-static void wlc_write_hw_bcntemplate1(struct wlc_hw_info *wlc_hw, void *bcn,
- int len);
-static void wlc_bmac_bsinit(struct wlc_info *wlc, chanspec_t chanspec);
-static u32 wlc_setband_inact(struct wlc_info *wlc, uint bandunit);
-static void wlc_bmac_setband(struct wlc_hw_info *wlc_hw, uint bandunit,
+static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk);
+static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme);
+static void brcms_b_detach_dmapio(struct brcms_hardware *wlc_hw);
+static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw);
+static bool brcms_c_validboardtype(struct brcms_hardware *wlc);
+static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw);
+static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw);
+static char *brcms_c_get_macaddr(struct brcms_hardware *wlc_hw);
+static void brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init);
+static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw);
+static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool want,
+ mbool flags);
+static void brcms_c_ucode_mute_override_set(struct brcms_hardware *wlc_hw);
+static void brcms_c_ucode_mute_override_clear(struct brcms_hardware *wlc_hw);
+static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc);
+static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask);
+static void brcms_c_gpio_init(struct brcms_c_info *wlc);
+static void brcms_c_write_hw_bcntemplate0(struct brcms_hardware *wlc_hw,
+ void *bcn, int len);
+static void brcms_c_write_hw_bcntemplate1(struct brcms_hardware *wlc_hw,
+ void *bcn, int len);
+static void brcms_b_bsinit(struct brcms_c_info *wlc, chanspec_t chanspec);
+static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit);
+static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
chanspec_t chanspec);
-static void wlc_bmac_update_slot_timing(struct wlc_hw_info *wlc_hw,
+static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
bool shortslot);
-static void wlc_upd_ofdm_pctl1_table(struct wlc_hw_info *wlc_hw);
-static u16 wlc_bmac_ofdm_ratetable_offset(struct wlc_hw_info *wlc_hw,
+static void brcms_upd_ofdm_pctl1_table(struct brcms_hardware *wlc_hw);
+static u16 brcms_b_ofdm_ratetable_offset(struct brcms_hardware *wlc_hw,
u8 rate);
/* === Low Level functions === */
-void wlc_bmac_set_shortslot(struct wlc_hw_info *wlc_hw, bool shortslot)
+void brcms_b_set_shortslot(struct brcms_hardware *wlc_hw, bool shortslot)
{
wlc_hw->shortslot = shortslot;
- if (BAND_2G(wlc_bmac_bandtype(wlc_hw)) && wlc_hw->up) {
- wlc_suspend_mac_and_wait(wlc_hw->wlc);
- wlc_bmac_update_slot_timing(wlc_hw, shortslot);
- wlc_enable_mac(wlc_hw->wlc);
+ if (BAND_2G(brcms_b_bandtype(wlc_hw)) && wlc_hw->up) {
+ brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
+ brcms_b_update_slot_timing(wlc_hw, shortslot);
+ brcms_c_enable_mac(wlc_hw->wlc);
}
}
@@ -181,7 +158,7 @@ void wlc_bmac_set_shortslot(struct wlc_hw_info *wlc_hw, bool shortslot)
* or shortslot 11g (9us slots)
* The PSM needs to be suspended for this call.
*/
-static void wlc_bmac_update_slot_timing(struct wlc_hw_info *wlc_hw,
+static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
bool shortslot)
{
d11regs_t *regs;
@@ -191,25 +168,25 @@ static void wlc_bmac_update_slot_timing(struct wlc_hw_info *wlc_hw,
if (shortslot) {
/* 11g short slot: 11a timing */
W_REG(&regs->ifs_slot, 0x0207); /* APHY_SLOT_TIME */
- wlc_bmac_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
+ brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
} else {
/* 11g long slot: 11b timing */
W_REG(&regs->ifs_slot, 0x0212); /* BPHY_SLOT_TIME */
- wlc_bmac_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
+ brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
}
}
-static void WLBANDINITFN(wlc_ucode_bsinit) (struct wlc_hw_info *wlc_hw)
+static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
{
struct wiphy *wiphy = wlc_hw->wlc->wiphy;
/* init microcode host flags */
- wlc_write_mhf(wlc_hw, wlc_hw->band->mhfs);
+ brcms_c_write_mhf(wlc_hw, wlc_hw->band->mhfs);
/* do band-specific ucode IHR, SHM, and SCR inits */
if (D11REV_IS(wlc_hw->corerev, 23)) {
- if (WLCISNPHY(wlc_hw->band)) {
- wlc_write_inits(wlc_hw, d11n0bsinitvals16);
+ if (BRCMS_ISNPHY(wlc_hw->band)) {
+ brcms_c_write_inits(wlc_hw, d11n0bsinitvals16);
} else {
wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
" %d\n", __func__, wlc_hw->unit,
@@ -217,8 +194,9 @@ static void WLBANDINITFN(wlc_ucode_bsinit) (struct wlc_hw_info *wlc_hw)
}
} else {
if (D11REV_IS(wlc_hw->corerev, 24)) {
- if (WLCISLCNPHY(wlc_hw->band)) {
- wlc_write_inits(wlc_hw, d11lcn0bsinitvals24);
+ if (BRCMS_ISLCNPHY(wlc_hw->band)) {
+ brcms_c_write_inits(wlc_hw,
+ d11lcn0bsinitvals24);
} else
wiphy_err(wiphy, "%s: wl%d: unsupported phy in"
" core rev %d\n", __func__,
@@ -231,9 +209,10 @@ static void WLBANDINITFN(wlc_ucode_bsinit) (struct wlc_hw_info *wlc_hw)
}
/* switch to new band but leave it inactive */
-static u32 WLBANDINITFN(wlc_setband_inact) (struct wlc_info *wlc, uint bandunit)
+static u32 brcms_c_setband_inact(struct brcms_c_info *wlc,
+ uint bandunit)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
u32 macintmask;
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
@@ -241,14 +220,14 @@ static u32 WLBANDINITFN(wlc_setband_inact) (struct wlc_info *wlc, uint bandunit)
WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
/* disable interrupts */
- macintmask = wl_intrsoff(wlc->wl);
+ macintmask = brcms_intrsoff(wlc->wl);
/* radio off */
wlc_phy_switch_radio(wlc_hw->band->pi, OFF);
- wlc_bmac_core_phy_clk(wlc_hw, OFF);
+ brcms_b_core_phy_clk(wlc_hw, OFF);
- wlc_setxband(wlc_hw, bandunit);
+ brcms_c_setxband(wlc_hw, bandunit);
return macintmask;
}
@@ -259,14 +238,14 @@ static u32 WLBANDINITFN(wlc_setband_inact) (struct wlc_info *wlc, uint bandunit)
* Param 'bound' indicates max. # frames to process before break out.
*/
static bool
-wlc_bmac_recv(struct wlc_hw_info *wlc_hw, uint fifo, bool bound)
+brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
{
struct sk_buff *p;
struct sk_buff *head = NULL;
struct sk_buff *tail = NULL;
uint n = 0;
uint bound_limit = bound ? wlc_hw->wlc->pub->tunables->rxbnd : -1;
- wlc_d11rxhdr_t *wlc_rxhdr = NULL;
+ struct brcms_d11rxhdr *wlc_rxhdr = NULL;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
/* gather received frames */
@@ -292,12 +271,12 @@ wlc_bmac_recv(struct wlc_hw_info *wlc_hw, uint fifo, bool bound)
head = head->prev;
p->prev = NULL;
- wlc_rxhdr = (wlc_d11rxhdr_t *) p->data;
+ wlc_rxhdr = (struct brcms_d11rxhdr *) p->data;
/* compute the RSSI from d11rxhdr and record it in wlc_rxd11hr */
wlc_phy_rssi_compute(wlc_hw->band->pi, wlc_rxhdr);
- wlc_recv(wlc_hw->wlc, p);
+ brcms_c_recv(wlc_hw->wlc, p);
}
return n >= bound_limit;
@@ -307,10 +286,10 @@ wlc_bmac_recv(struct wlc_hw_info *wlc_hw, uint fifo, bool bound)
* Return true if another dpc needs to be re-scheduled. false otherwise.
* Param 'bounded' indicates if applicable loops should be bounded.
*/
-bool wlc_dpc(struct wlc_info *wlc, bool bounded)
+bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
{
u32 macintstatus;
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
d11regs_t *regs = wlc_hw->regs;
bool fatal = false;
struct wiphy *wiphy = wlc->wiphy;
@@ -318,7 +297,7 @@ bool wlc_dpc(struct wlc_info *wlc, bool bounded)
if (DEVICEREMOVED(wlc)) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
- wl_down(wlc->wl);
+ brcms_down(wlc->wl);
return false;
}
@@ -333,18 +312,14 @@ bool wlc_dpc(struct wlc_info *wlc, bool bounded)
/* BCN template is available */
/* ZZZ: Use AP_ACTIVE ? */
- if (AP_ENAB(wlc->pub) && (!APSTA_ENAB(wlc->pub) || wlc->aps_associated)
+ if (AP_ENAB(wlc->pub) && (!APSTA_ENAB(wlc->pub))
&& (macintstatus & MI_BCNTPL)) {
- wlc_update_beacon(wlc);
- }
-
- /* PMQ entry addition */
- if (macintstatus & MI_PMQ) {
+ brcms_c_update_beacon(wlc);
}
/* tx status */
if (macintstatus & MI_TFS) {
- if (wlc_bmac_txstatus(wlc->hw, bounded, &fatal))
+ if (brcms_b_txstatus(wlc->hw, bounded, &fatal))
wlc->macintstatus |= MI_TFS;
if (fatal) {
wiphy_err(wiphy, "MI_TFS: fatal\n");
@@ -353,7 +328,7 @@ bool wlc_dpc(struct wlc_info *wlc, bool bounded)
}
if (macintstatus & (MI_TBTT | MI_DTIM_TBTT))
- wlc_tbtt(wlc, regs);
+ brcms_c_tbtt(wlc);
/* ATIM window end */
if (macintstatus & MI_ATIMWINEND) {
@@ -363,18 +338,13 @@ bool wlc_dpc(struct wlc_info *wlc, bool bounded)
}
/* received data or control frame, MI_DMAINT is indication of RX_FIFO interrupt */
- if (macintstatus & MI_DMAINT) {
- if (wlc_bmac_recv(wlc_hw, RX_FIFO, bounded)) {
+ if (macintstatus & MI_DMAINT)
+ if (brcms_b_recv(wlc_hw, RX_FIFO, bounded))
wlc->macintstatus |= MI_DMAINT;
- }
- }
/* TX FIFO suspend/flush completion */
- if (macintstatus & MI_TXSTOP) {
- if (wlc_bmac_tx_fifo_suspended(wlc_hw, TX_DATA_FIFO)) {
- /* wiphy_err(wiphy, "dpc: fifo_suspend_comlete\n"); */
- }
- }
+ if (macintstatus & MI_TXSTOP)
+ brcms_b_tx_fifo_suspended(wlc_hw, TX_DATA_FIFO);
/* noise sample collected */
if (macintstatus & MI_BG_NOISE) {
@@ -389,7 +359,7 @@ bool wlc_dpc(struct wlc_info *wlc, bool bounded)
__func__, wlc_hw->sih->chip,
wlc_hw->sih->chiprev);
/* big hammer */
- wl_init(wlc->wl);
+ brcms_init(wlc->wl);
}
/* gptimer timeout */
@@ -400,26 +370,26 @@ bool wlc_dpc(struct wlc_info *wlc, bool bounded)
if (macintstatus & MI_RFDISABLE) {
BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
" RF Disable Input\n", wlc_hw->unit);
- wl_rfkill_set_hw_state(wlc->wl);
+ brcms_rfkill_set_hw_state(wlc->wl);
}
/* send any enq'd tx packets. Just makes sure to jump start tx */
if (!pktq_empty(&wlc->pkt_queue->q))
- wlc_send_q(wlc);
+ brcms_c_send_q(wlc);
/* it isn't done and needs to be resched if macintstatus is non-zero */
return wlc->macintstatus != 0;
fatal:
- wl_init(wlc->wl);
+ brcms_init(wlc->wl);
return wlc->macintstatus != 0;
}
/* common low-level watchdog code */
-void wlc_bmac_watchdog(void *arg)
+void brcms_b_watchdog(void *arg)
{
- struct wlc_info *wlc = (struct wlc_info *) arg;
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
+ struct brcms_hardware *wlc_hw = wlc->hw;
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
@@ -430,7 +400,7 @@ void wlc_bmac_watchdog(void *arg)
wlc_hw->now++;
/* Check for FIFO error interrupts */
- wlc_bmac_fifoerrors(wlc_hw);
+ brcms_b_fifoerrors(wlc_hw);
/* make sure RX dma has buffers */
dma_rxfill(wlc->hw->di[RX_FIFO]);
@@ -439,7 +409,7 @@ void wlc_bmac_watchdog(void *arg)
}
void
-wlc_bmac_set_chanspec(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
+brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, chanspec_t chanspec,
bool mute, struct txpwr_limits *txpwr)
{
uint bandunit;
@@ -450,18 +420,18 @@ wlc_bmac_set_chanspec(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
/* Switch bands if necessary */
if (NBANDS_HW(wlc_hw) > 1) {
- bandunit = CHSPEC_WLCBANDUNIT(chanspec);
+ bandunit = CHSPEC_BANDUNIT(chanspec);
if (wlc_hw->band->bandunit != bandunit) {
- /* wlc_bmac_setband disables other bandunit,
+ /* brcms_b_setband disables other bandunit,
* use light band switch if not up yet
*/
if (wlc_hw->up) {
wlc_phy_chanspec_radio_set(wlc_hw->
bandstate[bandunit]->
pi, chanspec);
- wlc_bmac_setband(wlc_hw, bandunit, chanspec);
+ brcms_b_setband(wlc_hw, bandunit, chanspec);
} else {
- wlc_setxband(wlc_hw, bandunit);
+ brcms_c_setxband(wlc_hw, bandunit);
}
}
}
@@ -478,26 +448,27 @@ wlc_bmac_set_chanspec(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec);
/* Update muting of the channel */
- wlc_bmac_mute(wlc_hw, mute, 0);
+ brcms_b_mute(wlc_hw, mute, 0);
}
}
-int wlc_bmac_state_get(struct wlc_hw_info *wlc_hw, wlc_bmac_state_t *state)
+int brcms_b_state_get(struct brcms_hardware *wlc_hw,
+ struct brcms_b_state *state)
{
state->machwcap = wlc_hw->machwcap;
return 0;
}
-static bool wlc_bmac_attach_dmapio(struct wlc_info *wlc, uint j, bool wme)
+static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
{
uint i;
char name[8];
/* ucode host flag 2 needed for pio mode, independent of band and fifo */
u16 pio_mhf2 = 0;
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
uint unit = wlc_hw->unit;
- wlc_tunables_t *tune = wlc->pub->tunables;
+ struct brcms_tunables *tune = wlc->pub->tunables;
struct wiphy *wiphy = wlc->wiphy;
/* name and offsets for dma_attach */
@@ -529,7 +500,7 @@ static bool wlc_bmac_attach_dmapio(struct wlc_info *wlc, uint j, bool wme)
NULL), DMAREG(wlc_hw, DMA_RX, 0),
(wme ? tune->ntxd : 0), tune->nrxd,
tune->rxbufsz, -1, tune->nrxbufpost,
- WL_HWRXOFF, &wl_msg_level);
+ BRCMS_HWRXOFF, &brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[0]);
/*
@@ -541,7 +512,7 @@ static bool wlc_bmac_attach_dmapio(struct wlc_info *wlc, uint j, bool wme)
wlc_hw->di[1] = dma_attach(name, wlc_hw->sih,
DMAREG(wlc_hw, DMA_TX, 1), NULL,
tune->ntxd, 0, 0, -1, 0, 0,
- &wl_msg_level);
+ &brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[1]);
/*
@@ -552,7 +523,7 @@ static bool wlc_bmac_attach_dmapio(struct wlc_info *wlc, uint j, bool wme)
wlc_hw->di[2] = dma_attach(name, wlc_hw->sih,
DMAREG(wlc_hw, DMA_TX, 2), NULL,
tune->ntxd, 0, 0, -1, 0, 0,
- &wl_msg_level);
+ &brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[2]);
/*
* FIFO 3
@@ -562,7 +533,7 @@ static bool wlc_bmac_attach_dmapio(struct wlc_info *wlc, uint j, bool wme)
wlc_hw->di[3] = dma_attach(name, wlc_hw->sih,
DMAREG(wlc_hw, DMA_TX, 3),
NULL, tune->ntxd, 0, 0, -1,
- 0, 0, &wl_msg_level);
+ 0, 0, &brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[3]);
/* Cleaner to leave this as if with AP defined */
@@ -581,12 +552,12 @@ static bool wlc_bmac_attach_dmapio(struct wlc_info *wlc, uint j, bool wme)
}
/* initial ucode host flags */
- wlc_mhfdef(wlc, wlc_hw->band->mhfs, pio_mhf2);
+ brcms_c_mhfdef(wlc, wlc_hw->band->mhfs, pio_mhf2);
return true;
}
-static void wlc_bmac_detach_dmapio(struct wlc_hw_info *wlc_hw)
+static void brcms_b_detach_dmapio(struct brcms_hardware *wlc_hw)
{
uint j;
@@ -604,17 +575,17 @@ static void wlc_bmac_detach_dmapio(struct wlc_hw_info *wlc_hw)
* initialize software state for each core and band
* put the whole chip in reset(driver down state), no clock
*/
-int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
+int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device, uint unit,
bool piomode, void *regsva, uint bustype, void *btparam)
{
- struct wlc_hw_info *wlc_hw;
+ struct brcms_hardware *wlc_hw;
d11regs_t *regs;
char *macaddr = NULL;
char *vars;
uint err = 0;
uint j;
bool wme = false;
- shared_phy_params_t sha_params;
+ struct shared_phy_params sha_params;
struct wiphy *wiphy = wlc->wiphy;
BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor,
@@ -628,18 +599,18 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
wlc_hw->band = wlc_hw->bandstate[0];
wlc_hw->_piomode = piomode;
- /* populate struct wlc_hw_info with default values */
- wlc_bmac_info_init(wlc_hw);
+ /* populate struct brcms_hardware with default values */
+ brcms_b_info_init(wlc_hw);
/*
* Do the hardware portion of the attach.
* Also initialize software state that depends on the particular hardware
* we are running.
*/
- wlc_hw->sih = ai_attach((uint) device, regsva, bustype, btparam,
+ wlc_hw->sih = ai_attach(regsva, bustype, btparam,
&wlc_hw->vars, &wlc_hw->vars_size);
if (wlc_hw->sih == NULL) {
- wiphy_err(wiphy, "wl%d: wlc_bmac_attach: si_attach failed\n",
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n",
unit);
err = 11;
goto fail;
@@ -674,8 +645,8 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
}
/* verify again the device is supported */
- if (!wlc_chipmatch(vendor, device)) {
- wiphy_err(wiphy, "wl%d: wlc_bmac_attach: Unsupported "
+ if (!brcms_c_chipmatch(vendor, device)) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
"vendor/device (0x%x/0x%x)\n",
unit, vendor, device);
err = 12;
@@ -695,7 +666,7 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
wlc->regs = wlc_hw->regs;
/* validate chip, chiprev and corerev */
- if (!wlc_isgoodchip(wlc_hw)) {
+ if (!brcms_c_isgoodchip(wlc_hw)) {
err = 13;
goto fail;
}
@@ -708,11 +679,11 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
* For PMU chips, the first wlc_clkctl_clk is no-op since core-clk is still false;
* But it will be called again inside wlc_corereset, after d11 is out of reset.
*/
- wlc_clkctl_clk(wlc_hw, CLK_FAST);
- wlc_bmac_corereset(wlc_hw, WLC_USE_COREFLAGS);
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
- if (!wlc_bmac_validate_chip_access(wlc_hw)) {
- wiphy_err(wiphy, "wl%d: wlc_bmac_attach: validate_chip_access "
+ if (!brcms_b_validate_chip_access(wlc_hw)) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: validate_chip_access "
"failed\n", unit);
err = 14;
goto fail;
@@ -724,8 +695,8 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
if (j == BOARDREV_PROMOTABLE)
j = BOARDREV_PROMOTED;
wlc_hw->boardrev = (u16) j;
- if (!wlc_validboardtype(wlc_hw)) {
- wiphy_err(wiphy, "wl%d: wlc_bmac_attach: Unsupported Broadcom "
+ if (!brcms_c_validboardtype(wlc_hw)) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom "
"board type (0x%x)" " or revision level (0x%x)\n",
unit, wlc_hw->sih->boardtype, wlc_hw->boardrev);
err = 15;
@@ -736,14 +707,15 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
wlc_hw->boardflags2 = (u32) getintvar(vars, "boardflags2");
if (wlc_hw->boardflags & BFL_NOPLLDOWN)
- wlc_bmac_pllreq(wlc_hw, true, WLC_PLLREQ_SHARED);
+ brcms_b_pllreq(wlc_hw, true, BRCMS_PLLREQ_SHARED);
if ((wlc_hw->sih->bustype == PCI_BUS)
&& (ai_pci_war16165(wlc_hw->sih)))
wlc->war16165 = true;
/* check device id(srom, nvram etc.) to set bands */
- if (wlc_hw->deviceid == BCM43224_D11N_ID) {
+ if (wlc_hw->deviceid == BCM43224_D11N_ID ||
+ wlc_hw->deviceid == BCM43224_D11N_ID_VEN1) {
/* Dualband boards */
wlc_hw->_nbands = 2;
} else
@@ -752,8 +724,8 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
if ((wlc_hw->sih->chip == BCM43225_CHIP_ID))
wlc_hw->_nbands = 1;
- /* BMAC_NOTE: remove init of pub values when wlc_attach() unconditionally does the
- * init of these values
+ /* BMAC_NOTE: remove init of pub values when brcms_c_attach()
+ * unconditionally does the init of these values
*/
wlc->vendorid = wlc_hw->vendorid;
wlc->deviceid = wlc_hw->deviceid;
@@ -768,7 +740,7 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
wlc_hw->physhim = wlc_phy_shim_attach(wlc_hw, wlc->wl, wlc);
if (wlc_hw->physhim == NULL) {
- wiphy_err(wiphy, "wl%d: wlc_bmac_attach: wlc_phy_shim_attach "
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: wlc_phy_shim_attach "
"failed\n", unit);
err = 25;
goto fail;
@@ -812,12 +784,12 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
if (IS_SINGLEBAND_5G(wlc_hw->deviceid))
j = BAND_5G_INDEX;
- wlc_setxband(wlc_hw, j);
+ brcms_c_setxband(wlc_hw, j);
wlc_hw->band->bandunit = j;
- wlc_hw->band->bandtype = j ? WLC_BAND_5G : WLC_BAND_2G;
+ wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
wlc->band->bandunit = j;
- wlc->band->bandtype = j ? WLC_BAND_5G : WLC_BAND_2G;
+ wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
wlc->core->coreidx = ai_coreidx(wlc_hw->sih);
wlc_hw->machwcap = R_REG(&regs->machwcap);
@@ -829,10 +801,10 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
/* Get a phy for this band */
wlc_hw->band->pi = wlc_phy_attach(wlc_hw->phy_sh,
- (void *)regs, wlc_bmac_bandtype(wlc_hw), vars,
+ (void *)regs, brcms_b_bandtype(wlc_hw), vars,
wlc->wiphy);
if (wlc_hw->band->pi == NULL) {
- wiphy_err(wiphy, "wl%d: wlc_bmac_attach: wlc_phy_"
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: wlc_phy_"
"attach failed\n", unit);
err = 17;
goto fail;
@@ -851,19 +823,19 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
wlc_phy_get_coreflags(wlc_hw->band->pi);
/* verify good phy_type & supported phy revision */
- if (WLCISNPHY(wlc_hw->band)) {
+ if (BRCMS_ISNPHY(wlc_hw->band)) {
if (NCONF_HAS(wlc_hw->band->phyrev))
goto good_phy;
else
goto bad_phy;
- } else if (WLCISLCNPHY(wlc_hw->band)) {
+ } else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
if (LCNCONF_HAS(wlc_hw->band->phyrev))
goto good_phy;
else
goto bad_phy;
} else {
bad_phy:
- wiphy_err(wiphy, "wl%d: wlc_bmac_attach: unsupported "
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: unsupported "
"phy type/rev (%d/%d)\n", unit,
wlc_hw->band->phytype, wlc_hw->band->phyrev);
err = 18;
@@ -887,25 +859,25 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
wlc_hw->band->CWmin = APHY_CWMIN;
wlc_hw->band->CWmax = PHY_CWMAX;
- if (!wlc_bmac_attach_dmapio(wlc, j, wme)) {
+ if (!brcms_b_attach_dmapio(wlc, j, wme)) {
err = 19;
goto fail;
}
}
/* disable core to match driver "down" state */
- wlc_coredisable(wlc_hw);
+ brcms_c_coredisable(wlc_hw);
/* Match driver "down" state */
if (wlc_hw->sih->bustype == PCI_BUS)
ai_pci_down(wlc_hw->sih);
/* register sb interrupt callback functions */
- ai_register_intr_callback(wlc_hw->sih, (void *)wlc_wlintrsoff,
- (void *)wlc_wlintrsrestore, NULL, wlc);
+ ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff,
+ (void *)brcms_c_wlintrsrestore, NULL, wlc);
/* turn off pll and xtal to match driver "down" state */
- wlc_bmac_xtal(wlc_hw, OFF);
+ brcms_b_xtal(wlc_hw, OFF);
/* *********************************************************************
* The hardware is in the DOWN state at this point. D11 core
@@ -918,17 +890,17 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
*/
/* init etheraddr state variables */
- macaddr = wlc_get_macaddr(wlc_hw);
+ macaddr = brcms_c_get_macaddr(wlc_hw);
if (macaddr == NULL) {
- wiphy_err(wiphy, "wl%d: wlc_bmac_attach: macaddr not found\n",
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: macaddr not found\n",
unit);
err = 21;
goto fail;
}
- bcm_ether_atoe(macaddr, wlc_hw->etheraddr);
+ brcmu_ether_atoe(macaddr, wlc_hw->etheraddr);
if (is_broadcast_ether_addr(wlc_hw->etheraddr) ||
is_zero_ether_addr(wlc_hw->etheraddr)) {
- wiphy_err(wiphy, "wl%d: wlc_bmac_attach: bad macaddr %s\n",
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: bad macaddr %s\n",
unit, macaddr);
err = 22;
goto fail;
@@ -942,19 +914,19 @@ int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device, uint unit,
return err;
fail:
- wiphy_err(wiphy, "wl%d: wlc_bmac_attach: failed with err %d\n", unit,
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: failed with err %d\n", unit,
err);
return err;
}
/*
- * Initialize wlc_info default values ...
+ * Initialize brcms_c_info default values ...
* may get overrides later in this function
* BMAC_NOTES, move low out and resolve the dangling ones
*/
-static void wlc_bmac_info_init(struct wlc_hw_info *wlc_hw)
+static void brcms_b_info_init(struct brcms_hardware *wlc_hw)
{
- struct wlc_info *wlc = wlc_hw->wlc;
+ struct brcms_c_info *wlc = wlc_hw->wlc;
/* set default sw macintmask value */
wlc->defmacintmask = DEF_MACINTMASK;
@@ -974,11 +946,11 @@ static void wlc_bmac_info_init(struct wlc_hw_info *wlc_hw)
/*
* low level detach
*/
-int wlc_bmac_detach(struct wlc_info *wlc)
+int brcms_b_detach(struct brcms_c_info *wlc)
{
uint i;
- struct wlc_hwband *band;
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hw_band *band;
+ struct brcms_hardware *wlc_hw = wlc->hw;
int callbacks;
callbacks = 0;
@@ -993,7 +965,7 @@ int wlc_bmac_detach(struct wlc_info *wlc)
ai_pci_sleep(wlc_hw->sih);
}
- wlc_bmac_detach_dmapio(wlc_hw);
+ brcms_b_detach_dmapio(wlc_hw);
band = wlc_hw->band;
for (i = 0; i < NBANDS_HW(wlc_hw); i++) {
@@ -1006,7 +978,7 @@ int wlc_bmac_detach(struct wlc_info *wlc)
}
/* Free shared phy state */
- wlc_phy_shared_detach(wlc_hw->phy_sh);
+ kfree(wlc_hw->phy_sh);
wlc_phy_shim_detach(wlc_hw->physhim);
@@ -1023,61 +995,61 @@ int wlc_bmac_detach(struct wlc_info *wlc)
}
-void wlc_bmac_reset(struct wlc_hw_info *wlc_hw)
+void brcms_b_reset(struct brcms_hardware *wlc_hw)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
/* reset the core */
if (!DEVICEREMOVED(wlc_hw->wlc))
- wlc_bmac_corereset(wlc_hw, WLC_USE_COREFLAGS);
+ brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
/* purge the dma rings */
- wlc_flushqueues(wlc_hw->wlc);
+ brcms_c_flushqueues(wlc_hw->wlc);
- wlc_reset_bmac_done(wlc_hw->wlc);
+ brcms_c_reset_bmac_done(wlc_hw->wlc);
}
void
-wlc_bmac_init(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
+brcms_b_init(struct brcms_hardware *wlc_hw, chanspec_t chanspec,
bool mute) {
u32 macintmask;
bool fastclk;
- struct wlc_info *wlc = wlc_hw->wlc;
+ struct brcms_c_info *wlc = wlc_hw->wlc;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
- wlc_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
/* disable interrupts */
- macintmask = wl_intrsoff(wlc->wl);
+ macintmask = brcms_intrsoff(wlc->wl);
/* set up the specified band and chanspec */
- wlc_setxband(wlc_hw, CHSPEC_WLCBANDUNIT(chanspec));
+ brcms_c_setxband(wlc_hw, CHSPEC_BANDUNIT(chanspec));
wlc_phy_chanspec_radio_set(wlc_hw->band->pi, chanspec);
/* do one-time phy inits and calibration */
wlc_phy_cal_init(wlc_hw->band->pi);
/* core-specific initialization */
- wlc_coreinit(wlc);
+ brcms_b_coreinit(wlc);
/* suspend the tx fifos and mute the phy for preism cac time */
if (mute)
- wlc_bmac_mute(wlc_hw, ON, PHY_MUTE_FOR_PREISM);
+ brcms_b_mute(wlc_hw, ON, PHY_MUTE_FOR_PREISM);
/* band-specific inits */
- wlc_bmac_bsinit(wlc, chanspec);
+ brcms_b_bsinit(wlc, chanspec);
/* restore macintmask */
- wl_intrsrestore(wlc->wl, macintmask);
+ brcms_intrsrestore(wlc->wl, macintmask);
- /* seed wake_override with WLC_WAKE_OVERRIDE_MACSUSPEND since the mac is suspended
- * and wlc_enable_mac() will clear this override bit.
+ /* seed wake_override with BRCMS_WAKE_OVERRIDE_MACSUSPEND since the mac
+ * is suspended and brcms_c_enable_mac() will clear this override bit.
*/
- mboolset(wlc_hw->wake_override, WLC_WAKE_OVERRIDE_MACSUSPEND);
+ mboolset(wlc_hw->wake_override, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
/*
* initialize mac_suspend_depth to 1 to match ucode initial suspended state
@@ -1086,10 +1058,10 @@ wlc_bmac_init(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
/* restore the clk */
if (!fastclk)
- wlc_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
}
-int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw)
+int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
{
uint coremask;
@@ -1097,14 +1069,14 @@ int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw)
/*
* Enable pll and xtal, initialize the power control registers,
- * and force fastclock for the remainder of wlc_up().
+ * and force fastclock for the remainder of brcms_c_up().
*/
- wlc_bmac_xtal(wlc_hw, ON);
+ brcms_b_xtal(wlc_hw, ON);
ai_clkctl_init(wlc_hw->sih);
- wlc_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
/*
- * Configure pci/pcmcia here instead of in wlc_attach()
+ * Configure pci/pcmcia here instead of in brcms_c_attach()
* to allow mfg hotswap: down, hotswap (chip power cycle), up.
*/
coremask = (1 << wlc_hw->wlc->core->coreidx);
@@ -1116,11 +1088,11 @@ int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw)
* Need to read the hwradio status here to cover the case where the system
* is loaded with the hw radio disabled. We do not want to bring the driver up in this case.
*/
- if (wlc_bmac_radio_read_hwdisabled(wlc_hw)) {
+ if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
/* put SB PCI in down state again */
if (wlc_hw->sih->bustype == PCI_BUS)
ai_pci_down(wlc_hw->sih);
- wlc_bmac_xtal(wlc_hw, OFF);
+ brcms_b_xtal(wlc_hw, OFF);
return -ENOMEDIUM;
}
@@ -1128,12 +1100,12 @@ int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw)
ai_pci_up(wlc_hw->sih);
/* reset the d11 core */
- wlc_bmac_corereset(wlc_hw, WLC_USE_COREFLAGS);
+ brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
return 0;
}
-int wlc_bmac_up_finish(struct wlc_hw_info *wlc_hw)
+int brcms_b_up_finish(struct brcms_hardware *wlc_hw)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
@@ -1141,12 +1113,12 @@ int wlc_bmac_up_finish(struct wlc_hw_info *wlc_hw)
wlc_phy_hw_state_upd(wlc_hw->band->pi, true);
/* FULLY enable dynamic power control and d11 core interrupt */
- wlc_clkctl_clk(wlc_hw, CLK_DYNAMIC);
- wl_intrson(wlc_hw->wlc->wl);
+ brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_intrson(wlc_hw->wlc->wl);
return 0;
}
-int wlc_bmac_down_prep(struct wlc_hw_info *wlc_hw)
+int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw)
{
bool dev_gone;
uint callbacks = 0;
@@ -1163,10 +1135,10 @@ int wlc_bmac_down_prep(struct wlc_hw_info *wlc_hw)
wlc_hw->wlc->macintmask = 0;
else {
/* now disable interrupts */
- wl_intrsoff(wlc_hw->wlc->wl);
+ brcms_intrsoff(wlc_hw->wlc->wl);
/* ensure we're running on the pll clock again */
- wlc_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
}
/* down phy at the last of this stage */
callbacks += wlc_phy_down(wlc_hw->band->pi);
@@ -1174,7 +1146,7 @@ int wlc_bmac_down_prep(struct wlc_hw_info *wlc_hw)
return callbacks;
}
-int wlc_bmac_down_finish(struct wlc_hw_info *wlc_hw)
+int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
{
uint callbacks = 0;
bool dev_gone;
@@ -1195,51 +1167,51 @@ int wlc_bmac_down_finish(struct wlc_hw_info *wlc_hw)
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
/* reclaim any posted packets */
- wlc_flushqueues(wlc_hw->wlc);
+ brcms_c_flushqueues(wlc_hw->wlc);
} else {
/* Reset and disable the core */
if (ai_iscoreup(wlc_hw->sih)) {
if (R_REG(&wlc_hw->regs->maccontrol) &
MCTL_EN_MAC)
- wlc_suspend_mac_and_wait(wlc_hw->wlc);
- callbacks += wl_reset(wlc_hw->wlc->wl);
- wlc_coredisable(wlc_hw);
+ brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
+ callbacks += brcms_reset(wlc_hw->wlc->wl);
+ brcms_c_coredisable(wlc_hw);
}
/* turn off primary xtal and pll */
if (!wlc_hw->noreset) {
if (wlc_hw->sih->bustype == PCI_BUS)
ai_pci_down(wlc_hw->sih);
- wlc_bmac_xtal(wlc_hw, OFF);
+ brcms_b_xtal(wlc_hw, OFF);
}
}
return callbacks;
}
-void wlc_bmac_wait_for_wake(struct wlc_hw_info *wlc_hw)
+void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
{
/* delay before first read of ucode state */
udelay(40);
/* wait until ucode is no longer asleep */
- SPINWAIT((wlc_bmac_read_shm(wlc_hw, M_UCODE_DBGST) ==
+ SPINWAIT((brcms_b_read_shm(wlc_hw, M_UCODE_DBGST) ==
DBGST_ASLEEP), wlc_hw->wlc->fastpwrup_dly);
}
-void wlc_bmac_hw_etheraddr(struct wlc_hw_info *wlc_hw, u8 *ea)
+void brcms_b_hw_etheraddr(struct brcms_hardware *wlc_hw, u8 *ea)
{
memcpy(ea, wlc_hw->etheraddr, ETH_ALEN);
}
-static int wlc_bmac_bandtype(struct wlc_hw_info *wlc_hw)
+static int brcms_b_bandtype(struct brcms_hardware *wlc_hw)
{
return wlc_hw->band->bandtype;
}
/* control chip clock to save power, enable dynamic clock or force fast clock */
-static void wlc_clkctl_clk(struct wlc_hw_info *wlc_hw, uint mode)
+static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
{
if (PMUCTL_ENAB(wlc_hw->sih)) {
/* new chips with PMU, CCS_FORCEHT will distribute the HT clock on backplane,
@@ -1301,18 +1273,18 @@ static void wlc_clkctl_clk(struct wlc_hw_info *wlc_hw, uint mode)
*/
if (wlc_hw->forcefastclk)
mboolset(wlc_hw->wake_override,
- WLC_WAKE_OVERRIDE_FORCEFAST);
+ BRCMS_WAKE_OVERRIDE_FORCEFAST);
else
mboolclr(wlc_hw->wake_override,
- WLC_WAKE_OVERRIDE_FORCEFAST);
+ BRCMS_WAKE_OVERRIDE_FORCEFAST);
}
}
/* set initial host flags value */
static void
-wlc_mhfdef(struct wlc_info *wlc, u16 *mhfs, u16 mhf2_init)
+brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
memset(mhfs, 0, MHFMAX * sizeof(u16));
@@ -1322,7 +1294,7 @@ wlc_mhfdef(struct wlc_info *wlc, u16 *mhfs, u16 mhf2_init)
if (wlc_hw->boardflags & BFL_NOPLLDOWN)
mhfs[MHF1] |= MHF1_FORCEFASTCLK;
- if (WLCISNPHY(wlc_hw->band) && NREV_LT(wlc_hw->band->phyrev, 2)) {
+ if (BRCMS_ISNPHY(wlc_hw->band) && NREV_LT(wlc_hw->band->phyrev, 2)) {
mhfs[MHF2] |= MHF2_NPHY40MHZ_WAR;
mhfs[MHF1] |= MHF1_IQSWAP_WAR;
}
@@ -1334,13 +1306,13 @@ wlc_mhfdef(struct wlc_info *wlc, u16 *mhfs, u16 mhf2_init)
* pre-CLK changes should use wlc_write_mhf to get around the optimization
*
*
- * bands values are: WLC_BAND_AUTO <--- Current band only
- * WLC_BAND_5G <--- 5G band only
- * WLC_BAND_2G <--- 2G band only
- * WLC_BAND_ALL <--- All bands
+ * bands values are: BRCM_BAND_AUTO <--- Current band only
+ * BRCM_BAND_5G <--- 5G band only
+ * BRCM_BAND_2G <--- 2G band only
+ * BRCM_BAND_ALL <--- All bands
*/
void
-wlc_bmac_mhf(struct wlc_hw_info *wlc_hw, u8 idx, u16 mask, u16 val,
+brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
int bands)
{
u16 save;
@@ -1348,7 +1320,7 @@ wlc_bmac_mhf(struct wlc_hw_info *wlc_hw, u8 idx, u16 mask, u16 val,
M_HOST_FLAGS1, M_HOST_FLAGS2, M_HOST_FLAGS3, M_HOST_FLAGS4,
M_HOST_FLAGS5
};
- struct wlc_hwband *band;
+ struct brcms_hw_band *band;
if ((val & ~mask) || idx >= MHFMAX)
return; /* error condition */
@@ -1357,14 +1329,14 @@ wlc_bmac_mhf(struct wlc_hw_info *wlc_hw, u8 idx, u16 mask, u16 val,
/* Current band only or all bands,
* then set the band to current band
*/
- case WLC_BAND_AUTO:
- case WLC_BAND_ALL:
+ case BRCM_BAND_AUTO:
+ case BRCM_BAND_ALL:
band = wlc_hw->band;
break;
- case WLC_BAND_5G:
+ case BRCM_BAND_5G:
band = wlc_hw->bandstate[BAND_5G_INDEX];
break;
- case WLC_BAND_2G:
+ case BRCM_BAND_2G:
band = wlc_hw->bandstate[BAND_2G_INDEX];
break;
default:
@@ -1380,11 +1352,11 @@ wlc_bmac_mhf(struct wlc_hw_info *wlc_hw, u8 idx, u16 mask, u16 val,
*/
if (wlc_hw->clk && (band->mhfs[idx] != save)
&& (band == wlc_hw->band))
- wlc_bmac_write_shm(wlc_hw, addr[idx],
+ brcms_b_write_shm(wlc_hw, addr[idx],
(u16) band->mhfs[idx]);
}
- if (bands == WLC_BAND_ALL) {
+ if (bands == BRCM_BAND_ALL) {
wlc_hw->bandstate[0]->mhfs[idx] =
(wlc_hw->bandstate[0]->mhfs[idx] & ~mask) | val;
wlc_hw->bandstate[1]->mhfs[idx] =
@@ -1392,20 +1364,20 @@ wlc_bmac_mhf(struct wlc_hw_info *wlc_hw, u8 idx, u16 mask, u16 val,
}
}
-u16 wlc_bmac_mhf_get(struct wlc_hw_info *wlc_hw, u8 idx, int bands)
+u16 brcms_b_mhf_get(struct brcms_hardware *wlc_hw, u8 idx, int bands)
{
- struct wlc_hwband *band;
+ struct brcms_hw_band *band;
if (idx >= MHFMAX)
return 0; /* error condition */
switch (bands) {
- case WLC_BAND_AUTO:
+ case BRCM_BAND_AUTO:
band = wlc_hw->band;
break;
- case WLC_BAND_5G:
+ case BRCM_BAND_5G:
band = wlc_hw->bandstate[BAND_5G_INDEX];
break;
- case WLC_BAND_2G:
+ case BRCM_BAND_2G:
band = wlc_hw->bandstate[BAND_2G_INDEX];
break;
default:
@@ -1418,7 +1390,7 @@ u16 wlc_bmac_mhf_get(struct wlc_hw_info *wlc_hw, u8 idx, int bands)
return band->mhfs[idx];
}
-static void wlc_write_mhf(struct wlc_hw_info *wlc_hw, u16 *mhfs)
+static void brcms_c_write_mhf(struct brcms_hardware *wlc_hw, u16 *mhfs)
{
u8 idx;
u16 addr[] = {
@@ -1427,25 +1399,25 @@ static void wlc_write_mhf(struct wlc_hw_info *wlc_hw, u16 *mhfs)
};
for (idx = 0; idx < MHFMAX; idx++) {
- wlc_bmac_write_shm(wlc_hw, addr[idx], mhfs[idx]);
+ brcms_b_write_shm(wlc_hw, addr[idx], mhfs[idx]);
}
}
/* set the maccontrol register to desired reset state and
* initialize the sw cache of the register
*/
-static void wlc_mctrl_reset(struct wlc_hw_info *wlc_hw)
+static void brcms_c_mctrl_reset(struct brcms_hardware *wlc_hw)
{
/* IHR accesses are always enabled, PSM disabled, HPS off and WAKE on */
wlc_hw->maccontrol = 0;
wlc_hw->suspended_fifos = 0;
wlc_hw->wake_override = 0;
wlc_hw->mute_override = 0;
- wlc_bmac_mctrl(wlc_hw, ~0, MCTL_IHR_EN | MCTL_WAKE);
+ brcms_b_mctrl(wlc_hw, ~0, MCTL_IHR_EN | MCTL_WAKE);
}
/* set or clear maccontrol bits */
-void wlc_bmac_mctrl(struct wlc_hw_info *wlc_hw, u32 mask, u32 val)
+void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val)
{
u32 maccontrol;
u32 new_maccontrol;
@@ -1463,11 +1435,11 @@ void wlc_bmac_mctrl(struct wlc_hw_info *wlc_hw, u32 mask, u32 val)
wlc_hw->maccontrol = new_maccontrol;
/* write the new values with overrides applied */
- wlc_mctrl_write(wlc_hw);
+ brcms_c_mctrl_write(wlc_hw);
}
/* write the software state of maccontrol and overrides to the maccontrol register */
-static void wlc_mctrl_write(struct wlc_hw_info *wlc_hw)
+static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw)
{
u32 maccontrol = wlc_hw->maccontrol;
@@ -1484,7 +1456,8 @@ static void wlc_mctrl_write(struct wlc_hw_info *wlc_hw)
W_REG(&wlc_hw->regs->maccontrol, maccontrol);
}
-void wlc_ucode_wake_override_set(struct wlc_hw_info *wlc_hw, u32 override_bit)
+void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
+ u32 override_bit)
{
if (wlc_hw->wake_override || (wlc_hw->maccontrol & MCTL_WAKE)) {
mboolset(wlc_hw->wake_override, override_bit);
@@ -1493,20 +1466,21 @@ void wlc_ucode_wake_override_set(struct wlc_hw_info *wlc_hw, u32 override_bit)
mboolset(wlc_hw->wake_override, override_bit);
- wlc_mctrl_write(wlc_hw);
- wlc_bmac_wait_for_wake(wlc_hw);
+ brcms_c_mctrl_write(wlc_hw);
+ brcms_b_wait_for_wake(wlc_hw);
return;
}
-void wlc_ucode_wake_override_clear(struct wlc_hw_info *wlc_hw, u32 override_bit)
+void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
+ u32 override_bit)
{
mboolclr(wlc_hw->wake_override, override_bit);
if (wlc_hw->wake_override || (wlc_hw->maccontrol & MCTL_WAKE))
return;
- wlc_mctrl_write(wlc_hw);
+ brcms_c_mctrl_write(wlc_hw);
return;
}
@@ -1518,7 +1492,7 @@ void wlc_ucode_wake_override_clear(struct wlc_hw_info *wlc_hw, u32 override_bit)
* STA 0 1 <--- This will ensure no beacons
* IBSS 0 0
*/
-static void wlc_ucode_mute_override_set(struct wlc_hw_info *wlc_hw)
+static void brcms_c_ucode_mute_override_set(struct brcms_hardware *wlc_hw)
{
wlc_hw->mute_override = 1;
@@ -1528,13 +1502,13 @@ static void wlc_ucode_mute_override_set(struct wlc_hw_info *wlc_hw)
if ((wlc_hw->maccontrol & (MCTL_AP | MCTL_INFRA)) == MCTL_INFRA)
return;
- wlc_mctrl_write(wlc_hw);
+ brcms_c_mctrl_write(wlc_hw);
return;
}
/* Clear the override on AP and INFRA bits */
-static void wlc_ucode_mute_override_clear(struct wlc_hw_info *wlc_hw)
+static void brcms_c_ucode_mute_override_clear(struct brcms_hardware *wlc_hw)
{
if (wlc_hw->mute_override == 0)
return;
@@ -1547,14 +1521,14 @@ static void wlc_ucode_mute_override_clear(struct wlc_hw_info *wlc_hw)
if ((wlc_hw->maccontrol & (MCTL_AP | MCTL_INFRA)) == MCTL_INFRA)
return;
- wlc_mctrl_write(wlc_hw);
+ brcms_c_mctrl_write(wlc_hw);
}
/*
* Write a MAC address to the given match reg offset in the RXE match engine.
*/
void
-wlc_bmac_set_addrmatch(struct wlc_hw_info *wlc_hw, int match_reg_offset,
+brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
const u8 *addr)
{
d11regs_t *regs;
@@ -1562,7 +1536,7 @@ wlc_bmac_set_addrmatch(struct wlc_hw_info *wlc_hw, int match_reg_offset,
u16 mac_m;
u16 mac_h;
- BCMMSG(wlc_hw->wlc->wiphy, "wl%d: wlc_bmac_set_addrmatch\n",
+ BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
wlc_hw->unit);
regs = wlc_hw->regs;
@@ -1579,7 +1553,7 @@ wlc_bmac_set_addrmatch(struct wlc_hw_info *wlc_hw, int match_reg_offset,
}
void
-wlc_bmac_write_template_ram(struct wlc_hw_info *wlc_hw, int offset, int len,
+brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
void *buf)
{
d11regs_t *regs;
@@ -1611,7 +1585,7 @@ wlc_bmac_write_template_ram(struct wlc_hw_info *wlc_hw, int offset, int len,
}
}
-void wlc_bmac_set_cwmin(struct wlc_hw_info *wlc_hw, u16 newmin)
+void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin)
{
wlc_hw->band->CWmin = newmin;
@@ -1620,7 +1594,7 @@ void wlc_bmac_set_cwmin(struct wlc_hw_info *wlc_hw, u16 newmin)
W_REG(&wlc_hw->regs->objdata, newmin);
}
-void wlc_bmac_set_cwmax(struct wlc_hw_info *wlc_hw, u16 newmax)
+void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax)
{
wlc_hw->band->CWmax = newmax;
@@ -1629,124 +1603,126 @@ void wlc_bmac_set_cwmax(struct wlc_hw_info *wlc_hw, u16 newmax)
W_REG(&wlc_hw->regs->objdata, newmax);
}
-void wlc_bmac_bw_set(struct wlc_hw_info *wlc_hw, u16 bw)
+void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
{
bool fastclk;
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
- wlc_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
wlc_phy_bw_state_set(wlc_hw->band->pi, bw);
- wlc_bmac_phy_reset(wlc_hw);
+ brcms_b_phy_reset(wlc_hw);
wlc_phy_init(wlc_hw->band->pi, wlc_phy_chanspec_get(wlc_hw->band->pi));
/* restore the clk */
if (!fastclk)
- wlc_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
}
static void
-wlc_write_hw_bcntemplate0(struct wlc_hw_info *wlc_hw, void *bcn, int len)
+brcms_c_write_hw_bcntemplate0(struct brcms_hardware *wlc_hw, void *bcn,
+ int len)
{
d11regs_t *regs = wlc_hw->regs;
- wlc_bmac_write_template_ram(wlc_hw, T_BCN0_TPL_BASE, (len + 3) & ~3,
+ brcms_b_write_template_ram(wlc_hw, T_BCN0_TPL_BASE, (len + 3) & ~3,
bcn);
/* write beacon length to SCR */
- wlc_bmac_write_shm(wlc_hw, M_BCN0_FRM_BYTESZ, (u16) len);
+ brcms_b_write_shm(wlc_hw, M_BCN0_FRM_BYTESZ, (u16) len);
/* mark beacon0 valid */
OR_REG(&regs->maccommand, MCMD_BCN0VLD);
}
static void
-wlc_write_hw_bcntemplate1(struct wlc_hw_info *wlc_hw, void *bcn, int len)
+brcms_c_write_hw_bcntemplate1(struct brcms_hardware *wlc_hw, void *bcn,
+ int len)
{
d11regs_t *regs = wlc_hw->regs;
- wlc_bmac_write_template_ram(wlc_hw, T_BCN1_TPL_BASE, (len + 3) & ~3,
+ brcms_b_write_template_ram(wlc_hw, T_BCN1_TPL_BASE, (len + 3) & ~3,
bcn);
/* write beacon length to SCR */
- wlc_bmac_write_shm(wlc_hw, M_BCN1_FRM_BYTESZ, (u16) len);
+ brcms_b_write_shm(wlc_hw, M_BCN1_FRM_BYTESZ, (u16) len);
/* mark beacon1 valid */
OR_REG(&regs->maccommand, MCMD_BCN1VLD);
}
/* mac is assumed to be suspended at this point */
void
-wlc_bmac_write_hw_bcntemplates(struct wlc_hw_info *wlc_hw, void *bcn, int len,
- bool both)
+brcms_b_write_hw_bcntemplates(struct brcms_hardware *wlc_hw, void *bcn,
+ int len, bool both)
{
d11regs_t *regs = wlc_hw->regs;
if (both) {
- wlc_write_hw_bcntemplate0(wlc_hw, bcn, len);
- wlc_write_hw_bcntemplate1(wlc_hw, bcn, len);
+ brcms_c_write_hw_bcntemplate0(wlc_hw, bcn, len);
+ brcms_c_write_hw_bcntemplate1(wlc_hw, bcn, len);
} else {
/* bcn 0 */
if (!(R_REG(&regs->maccommand) & MCMD_BCN0VLD))
- wlc_write_hw_bcntemplate0(wlc_hw, bcn, len);
+ brcms_c_write_hw_bcntemplate0(wlc_hw, bcn, len);
/* bcn 1 */
else if (!
(R_REG(&regs->maccommand) & MCMD_BCN1VLD))
- wlc_write_hw_bcntemplate1(wlc_hw, bcn, len);
+ brcms_c_write_hw_bcntemplate1(wlc_hw, bcn, len);
}
}
-static void WLBANDINITFN(wlc_bmac_upd_synthpu) (struct wlc_hw_info *wlc_hw)
+static void brcms_b_upd_synthpu(struct brcms_hardware *wlc_hw)
{
u16 v;
- struct wlc_info *wlc = wlc_hw->wlc;
+ struct brcms_c_info *wlc = wlc_hw->wlc;
/* update SYNTHPU_DLY */
- if (WLCISLCNPHY(wlc->band)) {
+ if (BRCMS_ISLCNPHY(wlc->band)) {
v = SYNTHPU_DLY_LPPHY_US;
- } else if (WLCISNPHY(wlc->band) && (NREV_GE(wlc->band->phyrev, 3))) {
+ } else if (BRCMS_ISNPHY(wlc->band) && (NREV_GE(wlc->band->phyrev, 3))) {
v = SYNTHPU_DLY_NPHY_US;
} else {
v = SYNTHPU_DLY_BPHY_US;
}
- wlc_bmac_write_shm(wlc_hw, M_SYNTHPU_DLY, v);
+ brcms_b_write_shm(wlc_hw, M_SYNTHPU_DLY, v);
}
/* band-specific init */
static void
-WLBANDINITFN(wlc_bmac_bsinit) (struct wlc_info *wlc, chanspec_t chanspec)
+brcms_b_bsinit(struct brcms_c_info *wlc, chanspec_t chanspec)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
wlc_hw->band->bandunit);
- wlc_ucode_bsinit(wlc_hw);
+ brcms_c_ucode_bsinit(wlc_hw);
wlc_phy_init(wlc_hw->band->pi, chanspec);
- wlc_ucode_txant_set(wlc_hw);
+ brcms_c_ucode_txant_set(wlc_hw);
/* cwmin is band-specific, update hardware with value for current band */
- wlc_bmac_set_cwmin(wlc_hw, wlc_hw->band->CWmin);
- wlc_bmac_set_cwmax(wlc_hw, wlc_hw->band->CWmax);
+ brcms_b_set_cwmin(wlc_hw, wlc_hw->band->CWmin);
+ brcms_b_set_cwmax(wlc_hw, wlc_hw->band->CWmax);
- wlc_bmac_update_slot_timing(wlc_hw,
+ brcms_b_update_slot_timing(wlc_hw,
BAND_5G(wlc_hw->band->
bandtype) ? true : wlc_hw->
shortslot);
/* write phytype and phyvers */
- wlc_bmac_write_shm(wlc_hw, M_PHYTYPE, (u16) wlc_hw->band->phytype);
- wlc_bmac_write_shm(wlc_hw, M_PHYVER, (u16) wlc_hw->band->phyrev);
+ brcms_b_write_shm(wlc_hw, M_PHYTYPE, (u16) wlc_hw->band->phytype);
+ brcms_b_write_shm(wlc_hw, M_PHYVER, (u16) wlc_hw->band->phyrev);
/* initialize the txphyctl1 rate table since shmem is shared between bands */
- wlc_upd_ofdm_pctl1_table(wlc_hw);
+ brcms_upd_ofdm_pctl1_table(wlc_hw);
- wlc_bmac_upd_synthpu(wlc_hw);
+ brcms_b_upd_synthpu(wlc_hw);
}
-static void wlc_bmac_core_phy_clk(struct wlc_hw_info *wlc_hw, bool clk)
+static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
@@ -1771,7 +1747,7 @@ static void wlc_bmac_core_phy_clk(struct wlc_hw_info *wlc_hw, bool clk)
}
/* Perform a soft reset of the PHY PLL */
-void wlc_bmac_core_phypll_reset(struct wlc_hw_info *wlc_hw)
+void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
@@ -1790,12 +1766,12 @@ void wlc_bmac_core_phypll_reset(struct wlc_hw_info *wlc_hw)
}
/* light way to turn on phy clock without reset for NPHY only
- * refer to wlc_bmac_core_phy_clk for full version
+ * refer to brcms_b_core_phy_clk for full version
*/
-void wlc_bmac_phyclk_fgc(struct wlc_hw_info *wlc_hw, bool clk)
+void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk)
{
/* support(necessary for NPHY and HYPHY) only */
- if (!WLCISNPHY(wlc_hw->band))
+ if (!BRCMS_ISNPHY(wlc_hw->band))
return;
if (ON == clk)
@@ -1805,7 +1781,7 @@ void wlc_bmac_phyclk_fgc(struct wlc_hw_info *wlc_hw, bool clk)
}
-void wlc_bmac_macphyclk_set(struct wlc_hw_info *wlc_hw, bool clk)
+void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk)
{
if (ON == clk)
ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE);
@@ -1813,9 +1789,9 @@ void wlc_bmac_macphyclk_set(struct wlc_hw_info *wlc_hw, bool clk)
ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0);
}
-void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw)
+void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
{
- wlc_phy_t *pih = wlc_hw->band->pi;
+ struct brcms_phy_pub *pih = wlc_hw->band->pi;
u32 phy_bw_clkbits;
bool phy_in_reset = false;
@@ -1827,7 +1803,7 @@ void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw)
phy_bw_clkbits = wlc_phy_clk_bwbits(wlc_hw->band->pi);
/* Specific reset sequence required for NPHY rev 3 and 4 */
- if (WLCISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
+ if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
NREV_LE(wlc_hw->band->phyrev, 4)) {
/* Set the PHY bandwidth */
ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits);
@@ -1835,7 +1811,7 @@ void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw)
udelay(1);
/* Perform a soft reset of the PHY PLL */
- wlc_bmac_core_phypll_reset(wlc_hw);
+ brcms_b_core_phypll_reset(wlc_hw);
/* reset the PHY */
ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE),
@@ -1849,7 +1825,7 @@ void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw)
}
udelay(2);
- wlc_bmac_core_phy_clk(wlc_hw, ON);
+ brcms_b_core_phy_clk(wlc_hw, ON);
if (pih)
wlc_phy_anacore(pih, ON);
@@ -1857,44 +1833,45 @@ void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw)
/* switch to and initialize new band */
static void
-WLBANDINITFN(wlc_bmac_setband) (struct wlc_hw_info *wlc_hw, uint bandunit,
+brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
chanspec_t chanspec) {
- struct wlc_info *wlc = wlc_hw->wlc;
+ struct brcms_c_info *wlc = wlc_hw->wlc;
u32 macintmask;
/* Enable the d11 core before accessing it */
if (!ai_iscoreup(wlc_hw->sih)) {
ai_core_reset(wlc_hw->sih, 0, 0);
- wlc_mctrl_reset(wlc_hw);
+ brcms_c_mctrl_reset(wlc_hw);
}
- macintmask = wlc_setband_inact(wlc, bandunit);
+ macintmask = brcms_c_setband_inact(wlc, bandunit);
if (!wlc_hw->up)
return;
- wlc_bmac_core_phy_clk(wlc_hw, ON);
+ brcms_b_core_phy_clk(wlc_hw, ON);
/* band-specific initializations */
- wlc_bmac_bsinit(wlc, chanspec);
+ brcms_b_bsinit(wlc, chanspec);
/*
* If there are any pending software interrupt bits,
* then replace these with a harmless nonzero value
- * so wlc_dpc() will re-enable interrupts when done.
+ * so brcms_c_dpc() will re-enable interrupts when done.
*/
if (wlc->macintstatus)
wlc->macintstatus = MI_DMAINT;
/* restore macintmask */
- wl_intrsrestore(wlc->wl, macintmask);
+ brcms_intrsrestore(wlc->wl, macintmask);
/* ucode should still be suspended.. */
WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
}
/* low-level band switch utility routine */
-void WLBANDINITFN(wlc_setxband) (struct wlc_hw_info *wlc_hw, uint bandunit)
+void brcms_c_setxband(struct brcms_hardware *wlc_hw,
+ uint bandunit)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
bandunit);
@@ -1911,7 +1888,7 @@ void WLBANDINITFN(wlc_setxband) (struct wlc_hw_info *wlc_hw, uint bandunit)
}
}
-static bool wlc_isgoodchip(struct wlc_hw_info *wlc_hw)
+static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw)
{
/* reject unsupported corerev */
@@ -1924,31 +1901,36 @@ static bool wlc_isgoodchip(struct wlc_hw_info *wlc_hw)
return true;
}
-static bool wlc_validboardtype(struct wlc_hw_info *wlc_hw)
+/* Validate some board info parameters */
+static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
{
- bool goodboard = true;
uint boardrev = wlc_hw->boardrev;
+ /* 4 bits each for board type, major, minor, and tiny version */
+ uint brt = (boardrev & 0xf000) >> 12;
+ uint b0 = (boardrev & 0xf00) >> 8;
+ uint b1 = (boardrev & 0xf0) >> 4;
+ uint b2 = boardrev & 0xf;
+
+ /* voards from other vendors are always considered valid */
+ if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM)
+ return true;
+
+ /* do some boardrev sanity checks when boardvendor is Broadcom */
if (boardrev == 0)
- goodboard = false;
- else if (boardrev > 0xff) {
- uint brt = (boardrev & 0xf000) >> 12;
- uint b0 = (boardrev & 0xf00) >> 8;
- uint b1 = (boardrev & 0xf0) >> 4;
- uint b2 = boardrev & 0xf;
+ return false;
- if ((brt > 2) || (brt == 0) || (b0 > 9) || (b0 == 0) || (b1 > 9)
- || (b2 > 9))
- goodboard = false;
- }
+ if (boardrev <= 0xff)
+ return true;
- if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM)
- return goodboard;
+ if ((brt > 2) || (brt == 0) || (b0 > 9) || (b0 == 0) || (b1 > 9)
+ || (b2 > 9))
+ return false;
- return goodboard;
+ return true;
}
-static char *wlc_get_macaddr(struct wlc_hw_info *wlc_hw)
+static char *brcms_c_get_macaddr(struct brcms_hardware *wlc_hw)
{
const char *varname = "macaddr";
char *macaddr;
@@ -1978,14 +1960,14 @@ static char *wlc_get_macaddr(struct wlc_hw_info *wlc_hw)
* this function could be called when driver is down and w/o clock
* it operates on different registers depending on corerev and boardflag.
*/
-bool wlc_bmac_radio_read_hwdisabled(struct wlc_hw_info *wlc_hw)
+bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
{
bool v, clk, xtal;
u32 resetbits = 0, flags = 0;
xtal = wlc_hw->sbclk;
if (!xtal)
- wlc_bmac_xtal(wlc_hw, ON);
+ brcms_b_xtal(wlc_hw, ON);
/* may need to take core out of reset first */
clk = wlc_hw->clk;
@@ -1999,13 +1981,12 @@ bool wlc_bmac_radio_read_hwdisabled(struct wlc_hw_info *wlc_hw)
/* AI chip doesn't restore bar0win2 on hibernation/resume, need sw fixup */
if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43421_CHIP_ID))
+ (wlc_hw->sih->chip == BCM43225_CHIP_ID))
wlc_hw->regs =
(d11regs_t *) ai_setcore(wlc_hw->sih, D11_CORE_ID,
0);
ai_core_reset(wlc_hw->sih, flags, resetbits);
- wlc_mctrl_reset(wlc_hw);
+ brcms_c_mctrl_reset(wlc_hw);
}
v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0);
@@ -2015,13 +1996,13 @@ bool wlc_bmac_radio_read_hwdisabled(struct wlc_hw_info *wlc_hw)
ai_core_disable(wlc_hw->sih, 0);
if (!xtal)
- wlc_bmac_xtal(wlc_hw, OFF);
+ brcms_b_xtal(wlc_hw, OFF);
return v;
}
/* Initialize just the hardware when coming out of POR or S3/S5 system states */
-void wlc_bmac_hw_up(struct wlc_hw_info *wlc_hw)
+void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
{
if (wlc_hw->wlc->pub->hw_up)
return;
@@ -2030,19 +2011,18 @@ void wlc_bmac_hw_up(struct wlc_hw_info *wlc_hw)
/*
* Enable pll and xtal, initialize the power control registers,
- * and force fastclock for the remainder of wlc_up().
+ * and force fastclock for the remainder of brcms_c_up().
*/
- wlc_bmac_xtal(wlc_hw, ON);
+ brcms_b_xtal(wlc_hw, ON);
ai_clkctl_init(wlc_hw->sih);
- wlc_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
if (wlc_hw->sih->bustype == PCI_BUS) {
ai_pci_fixcfg(wlc_hw->sih);
/* AI chip doesn't restore bar0win2 on hibernation/resume, need sw fixup */
if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43421_CHIP_ID))
+ (wlc_hw->sih->chip == BCM43225_CHIP_ID))
wlc_hw->regs =
(d11regs_t *) ai_setcore(wlc_hw->sih, D11_CORE_ID,
0);
@@ -2063,9 +2043,9 @@ void wlc_bmac_hw_up(struct wlc_hw_info *wlc_hw)
}
}
-static bool wlc_dma_rxreset(struct wlc_hw_info *wlc_hw, uint fifo)
+static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo)
{
- struct hnddma_pub *di = wlc_hw->di[fifo];
+ struct dma_pub *di = wlc_hw->di[fifo];
return dma_rxreset(di);
}
@@ -2077,14 +2057,14 @@ static bool wlc_dma_rxreset(struct wlc_hw_info *wlc_hw, uint fifo)
* clear software macintstatus for fresh new start
* one testing hack wlc_hw->noreset will bypass the d11/phy reset
*/
-void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags)
+void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
{
d11regs_t *regs;
uint i;
bool fastclk;
u32 resetbits = 0;
- if (flags == WLC_USE_COREFLAGS)
+ if (flags == BRCMS_USE_COREFLAGS)
flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
@@ -2094,7 +2074,7 @@ void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags)
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
- wlc_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
/* reset the dma engines except first time thru */
if (ai_iscoreup(wlc_hw->sih)) {
@@ -2115,7 +2095,7 @@ void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags)
/* if noreset, just stop the psm and return */
if (wlc_hw->noreset) {
wlc_hw->wlc->macintstatus = 0; /* skip wl_dpc after down */
- wlc_bmac_mctrl(wlc_hw, MCTL_PSM_RUN | MCTL_EN_MAC, 0);
+ brcms_b_mctrl(wlc_hw, MCTL_PSM_RUN | MCTL_EN_MAC, 0);
return;
}
@@ -2139,28 +2119,28 @@ void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags)
if (wlc_hw->band && wlc_hw->band->pi)
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true);
- wlc_mctrl_reset(wlc_hw);
+ brcms_c_mctrl_reset(wlc_hw);
if (PMUCTL_ENAB(wlc_hw->sih))
- wlc_clkctl_clk(wlc_hw, CLK_FAST);
+ brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
- wlc_bmac_phy_reset(wlc_hw);
+ brcms_b_phy_reset(wlc_hw);
/* turn on PHY_PLL */
- wlc_bmac_core_phypll_ctl(wlc_hw, true);
+ brcms_b_core_phypll_ctl(wlc_hw, true);
/* clear sw intstatus */
wlc_hw->wlc->macintstatus = 0;
/* restore the clk setting */
if (!fastclk)
- wlc_clkctl_clk(wlc_hw, CLK_DYNAMIC);
+ brcms_b_clkctl_clk(wlc_hw, CLK_DYNAMIC);
}
/* txfifo sizes needs to be modified(increased) since the newer cores
* have more memory.
*/
-static void wlc_corerev_fifofixup(struct wlc_hw_info *wlc_hw)
+static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
{
d11regs_t *regs = wlc_hw->regs;
u16 fifo_nu;
@@ -2195,14 +2175,14 @@ static void wlc_corerev_fifofixup(struct wlc_hw_info *wlc_hw)
* need to propagate to shm location to be in sync since ucode/hw won't
* do this
*/
- wlc_bmac_write_shm(wlc_hw, M_FIFOSIZE0,
+ brcms_b_write_shm(wlc_hw, M_FIFOSIZE0,
wlc_hw->xmtfifo_sz[TX_AC_BE_FIFO]);
- wlc_bmac_write_shm(wlc_hw, M_FIFOSIZE1,
+ brcms_b_write_shm(wlc_hw, M_FIFOSIZE1,
wlc_hw->xmtfifo_sz[TX_AC_VI_FIFO]);
- wlc_bmac_write_shm(wlc_hw, M_FIFOSIZE2,
+ brcms_b_write_shm(wlc_hw, M_FIFOSIZE2,
((wlc_hw->xmtfifo_sz[TX_AC_VO_FIFO] << 8) | wlc_hw->
xmtfifo_sz[TX_AC_BK_FIFO]));
- wlc_bmac_write_shm(wlc_hw, M_FIFOSIZE3,
+ brcms_b_write_shm(wlc_hw, M_FIFOSIZE3,
((wlc_hw->xmtfifo_sz[TX_ATIM_FIFO] << 8) | wlc_hw->
xmtfifo_sz[TX_BCMC_FIFO]));
}
@@ -2215,9 +2195,9 @@ static void wlc_corerev_fifofixup(struct wlc_hw_info *wlc_hw)
* config other core registers
* init dma
*/
-static void wlc_coreinit(struct wlc_info *wlc)
+static void brcms_b_coreinit(struct brcms_c_info *wlc)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
d11regs_t *regs;
u32 sflags;
uint bcnint_us;
@@ -2232,9 +2212,9 @@ static void wlc_coreinit(struct wlc_info *wlc)
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
/* reset PSM */
- wlc_bmac_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE));
+ brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE));
- wlc_ucode_download(wlc_hw);
+ brcms_ucode_download(wlc_hw);
/*
* FIFOSZ fixup. driver wants to controls the fifo allocation.
*/
@@ -2242,7 +2222,7 @@ static void wlc_coreinit(struct wlc_info *wlc)
/* let the PSM run to the suspended state, set mode to BSS STA */
W_REG(&regs->macintstatus, -1);
- wlc_bmac_mctrl(wlc_hw, ~0,
+ brcms_b_mctrl(wlc_hw, ~0,
(MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE));
/* wait for ucode to self-suspend after auto-init */
@@ -2252,20 +2232,20 @@ static void wlc_coreinit(struct wlc_info *wlc)
wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
"suspend!\n", wlc_hw->unit);
- wlc_gpio_init(wlc);
+ brcms_c_gpio_init(wlc);
sflags = ai_core_sflags(wlc_hw->sih, 0, 0);
if (D11REV_IS(wlc_hw->corerev, 23)) {
- if (WLCISNPHY(wlc_hw->band))
- wlc_write_inits(wlc_hw, d11n0initvals16);
+ if (BRCMS_ISNPHY(wlc_hw->band))
+ brcms_c_write_inits(wlc_hw, d11n0initvals16);
else
wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
" %d\n", __func__, wlc_hw->unit,
wlc_hw->corerev);
} else if (D11REV_IS(wlc_hw->corerev, 24)) {
- if (WLCISLCNPHY(wlc_hw->band)) {
- wlc_write_inits(wlc_hw, d11lcn0initvals24);
+ if (BRCMS_ISLCNPHY(wlc_hw->band)) {
+ brcms_c_write_inits(wlc_hw, d11lcn0initvals24);
} else {
wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev"
" %d\n", __func__, wlc_hw->unit,
@@ -2278,21 +2258,21 @@ static void wlc_coreinit(struct wlc_info *wlc)
/* For old ucode, txfifo sizes needs to be modified(increased) */
if (fifosz_fixup == true) {
- wlc_corerev_fifofixup(wlc_hw);
+ brcms_b_corerev_fifofixup(wlc_hw);
}
/* check txfifo allocations match between ucode and driver */
- buf[TX_AC_BE_FIFO] = wlc_bmac_read_shm(wlc_hw, M_FIFOSIZE0);
+ buf[TX_AC_BE_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE0);
if (buf[TX_AC_BE_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_BE_FIFO]) {
i = TX_AC_BE_FIFO;
err = -1;
}
- buf[TX_AC_VI_FIFO] = wlc_bmac_read_shm(wlc_hw, M_FIFOSIZE1);
+ buf[TX_AC_VI_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE1);
if (buf[TX_AC_VI_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_VI_FIFO]) {
i = TX_AC_VI_FIFO;
err = -1;
}
- buf[TX_AC_BK_FIFO] = wlc_bmac_read_shm(wlc_hw, M_FIFOSIZE2);
+ buf[TX_AC_BK_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE2);
buf[TX_AC_VO_FIFO] = (buf[TX_AC_BK_FIFO] >> 8) & 0xff;
buf[TX_AC_BK_FIFO] &= 0xff;
if (buf[TX_AC_BK_FIFO] != wlc_hw->xmtfifo_sz[TX_AC_BK_FIFO]) {
@@ -2303,7 +2283,7 @@ static void wlc_coreinit(struct wlc_info *wlc)
i = TX_AC_VO_FIFO;
err = -1;
}
- buf[TX_BCMC_FIFO] = wlc_bmac_read_shm(wlc_hw, M_FIFOSIZE3);
+ buf[TX_BCMC_FIFO] = brcms_b_read_shm(wlc_hw, M_FIFOSIZE3);
buf[TX_ATIM_FIFO] = (buf[TX_BCMC_FIFO] >> 8) & 0xff;
buf[TX_BCMC_FIFO] &= 0xff;
if (buf[TX_BCMC_FIFO] != wlc_hw->xmtfifo_sz[TX_BCMC_FIFO]) {
@@ -2326,14 +2306,14 @@ static void wlc_coreinit(struct wlc_info *wlc)
/* band-specific inits done by wlc_bsinit() */
/* Set up frame burst size and antenna swap threshold init values */
- wlc_bmac_write_shm(wlc_hw, M_MBURST_SIZE, MAXTXFRAMEBURST);
- wlc_bmac_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
+ brcms_b_write_shm(wlc_hw, M_MBURST_SIZE, MAXTXFRAMEBURST);
+ brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
/* enable one rx interrupt per received frame */
W_REG(&regs->intrcvlazy[0], (1 << IRL_FC_SHIFT));
/* set the station mode (BSS STA) */
- wlc_bmac_mctrl(wlc_hw,
+ brcms_b_mctrl(wlc_hw,
(MCTL_INFRA | MCTL_DISCARD_PMQ | MCTL_AP),
(MCTL_INFRA | MCTL_DISCARD_PMQ));
@@ -2347,19 +2327,19 @@ static void wlc_coreinit(struct wlc_info *wlc)
W_REG(&regs->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK);
/* allow the MAC to control the PHY clock (dynamic on/off) */
- wlc_bmac_macphyclk_set(wlc_hw, ON);
+ brcms_b_macphyclk_set(wlc_hw, ON);
/* program dynamic clock control fast powerup delay register */
wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih);
W_REG(&regs->scc_fastpwrup_dly, wlc->fastpwrup_dly);
/* tell the ucode the corerev */
- wlc_bmac_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
+ brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
/* tell the ucode MAC capabilities */
- wlc_bmac_write_shm(wlc_hw, M_MACHW_CAP_L,
+ brcms_b_write_shm(wlc_hw, M_MACHW_CAP_L,
(u16) (wlc_hw->machwcap & 0xffff));
- wlc_bmac_write_shm(wlc_hw, M_MACHW_CAP_H,
+ brcms_b_write_shm(wlc_hw, M_MACHW_CAP_H,
(u16) ((wlc_hw->
machwcap >> 16) & 0xffff));
@@ -2372,8 +2352,8 @@ static void wlc_coreinit(struct wlc_info *wlc)
W_REG(&regs->objdata, wlc_hw->LRL);
/* write rate fallback retry limits */
- wlc_bmac_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
- wlc_bmac_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
+ brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
+ brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
AND_REG(&regs->ifs_ctl, 0x0FFF);
W_REG(&regs->ifs_aifsn, EDCF_AIFSN_MIN);
@@ -2405,7 +2385,7 @@ static void wlc_coreinit(struct wlc_info *wlc)
* - 559241 = 0x88889 => tsf_clk_frac_h = 0x8, tsf_clk_frac_l = 0x8889
*/
-void wlc_bmac_switch_macfreq(struct wlc_hw_info *wlc_hw, u8 spurmode)
+void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
{
d11regs_t *regs;
regs = wlc_hw->regs;
@@ -2422,7 +2402,7 @@ void wlc_bmac_switch_macfreq(struct wlc_hw_info *wlc_hw, u8 spurmode)
W_REG(&regs->tsf_clk_frac_l, 0x8889);
W_REG(&regs->tsf_clk_frac_h, 0x8);
}
- } else if (WLCISLCNPHY(wlc_hw->band)) {
+ } else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */
W_REG(&regs->tsf_clk_frac_l, 0x7CE0);
W_REG(&regs->tsf_clk_frac_h, 0xC);
@@ -2434,16 +2414,16 @@ void wlc_bmac_switch_macfreq(struct wlc_hw_info *wlc_hw, u8 spurmode)
}
/* Initialize GPIOs that are controlled by D11 core */
-static void wlc_gpio_init(struct wlc_info *wlc)
+static void brcms_c_gpio_init(struct brcms_c_info *wlc)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
d11regs_t *regs;
u32 gc, gm;
regs = wlc_hw->regs;
/* use GPIO select 0 to get all gpio signals from the gpio out reg */
- wlc_bmac_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
+ brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
/*
* Common GPIO setup:
@@ -2458,10 +2438,10 @@ static void wlc_gpio_init(struct wlc_info *wlc)
/* Allocate GPIOs for mimo antenna diversity feature */
if (wlc_hw->antsel_type == ANTSEL_2x3) {
/* Enable antenna diversity, use 2x3 mode */
- wlc_bmac_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
- MHF3_ANTSEL_EN, WLC_BAND_ALL);
- wlc_bmac_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE,
- MHF3_ANTSEL_MODE, WLC_BAND_ALL);
+ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
+ MHF3_ANTSEL_EN, BRCM_BAND_ALL);
+ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE,
+ MHF3_ANTSEL_MODE, BRCM_BAND_ALL);
/* init superswitch control */
wlc_phy_antsel_init(wlc_hw->band->pi, false);
@@ -2478,13 +2458,13 @@ static void wlc_gpio_init(struct wlc_info *wlc)
(BOARD_GPIO_12 | BOARD_GPIO_13));
/* Enable antenna diversity, use 2x4 mode */
- wlc_bmac_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
- MHF3_ANTSEL_EN, WLC_BAND_ALL);
- wlc_bmac_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE, 0,
- WLC_BAND_ALL);
+ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
+ MHF3_ANTSEL_EN, BRCM_BAND_ALL);
+ brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_MODE, 0,
+ BRCM_BAND_ALL);
/* Configure the desired clock to be 4Mhz */
- wlc_bmac_write_shm(wlc_hw, M_ANTSEL_CLKDIV,
+ brcms_b_write_shm(wlc_hw, M_ANTSEL_CLKDIV,
ANTSEL_CLKDIV_4MHZ);
}
@@ -2496,17 +2476,17 @@ static void wlc_gpio_init(struct wlc_info *wlc)
ai_gpiocontrol(wlc_hw->sih, gm, gc, GPIO_DRV_PRIORITY);
}
-static void wlc_ucode_download(struct wlc_hw_info *wlc_hw)
+static void brcms_ucode_download(struct brcms_hardware *wlc_hw)
{
- struct wlc_info *wlc;
+ struct brcms_c_info *wlc;
wlc = wlc_hw->wlc;
if (wlc_hw->ucode_loaded)
return;
if (D11REV_IS(wlc_hw->corerev, 23)) {
- if (WLCISNPHY(wlc_hw->band)) {
- wlc_ucode_write(wlc_hw, bcm43xx_16_mimo,
+ if (BRCMS_ISNPHY(wlc_hw->band)) {
+ brcms_ucode_write(wlc_hw, bcm43xx_16_mimo,
bcm43xx_16_mimosz);
wlc_hw->ucode_loaded = true;
} else
@@ -2514,8 +2494,8 @@ static void wlc_ucode_download(struct wlc_hw_info *wlc_hw)
"corerev %d\n",
__func__, wlc_hw->unit, wlc_hw->corerev);
} else if (D11REV_IS(wlc_hw->corerev, 24)) {
- if (WLCISLCNPHY(wlc_hw->band)) {
- wlc_ucode_write(wlc_hw, bcm43xx_24_lcn,
+ if (BRCMS_ISLCNPHY(wlc_hw->band)) {
+ brcms_ucode_write(wlc_hw, bcm43xx_24_lcn,
bcm43xx_24_lcnsz);
wlc_hw->ucode_loaded = true;
} else {
@@ -2526,7 +2506,7 @@ static void wlc_ucode_download(struct wlc_hw_info *wlc_hw)
}
}
-static void wlc_ucode_write(struct wlc_hw_info *wlc_hw, const u32 ucode[],
+static void brcms_ucode_write(struct brcms_hardware *wlc_hw, const u32 ucode[],
const uint nbytes) {
d11regs_t *regs = wlc_hw->regs;
uint i;
@@ -2542,7 +2522,7 @@ static void wlc_ucode_write(struct wlc_hw_info *wlc_hw, const u32 ucode[],
W_REG(&regs->objdata, ucode[i]);
}
-static void wlc_write_inits(struct wlc_hw_info *wlc_hw,
+static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
const struct d11init *inits)
{
int i;
@@ -2562,24 +2542,24 @@ static void wlc_write_inits(struct wlc_hw_info *wlc_hw,
}
}
-static void wlc_ucode_txant_set(struct wlc_hw_info *wlc_hw)
+static void brcms_c_ucode_txant_set(struct brcms_hardware *wlc_hw)
{
u16 phyctl;
u16 phytxant = wlc_hw->bmac_phytxant;
u16 mask = PHY_TXC_ANT_MASK;
/* set the Probe Response frame phy control word */
- phyctl = wlc_bmac_read_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS);
+ phyctl = brcms_b_read_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS);
phyctl = (phyctl & ~mask) | phytxant;
- wlc_bmac_write_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS, phyctl);
+ brcms_b_write_shm(wlc_hw, M_CTXPRS_BLK + C_CTX_PCTLWD_POS, phyctl);
/* set the Response (ACK/CTS) frame phy control word */
- phyctl = wlc_bmac_read_shm(wlc_hw, M_RSP_PCTLWD);
+ phyctl = brcms_b_read_shm(wlc_hw, M_RSP_PCTLWD);
phyctl = (phyctl & ~mask) | phytxant;
- wlc_bmac_write_shm(wlc_hw, M_RSP_PCTLWD, phyctl);
+ brcms_b_write_shm(wlc_hw, M_RSP_PCTLWD, phyctl);
}
-void wlc_bmac_txant_set(struct wlc_hw_info *wlc_hw, u16 phytxant)
+void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant)
{
/* update sw state */
wlc_hw->bmac_phytxant = phytxant;
@@ -2587,16 +2567,16 @@ void wlc_bmac_txant_set(struct wlc_hw_info *wlc_hw, u16 phytxant)
/* push to ucode if up */
if (!wlc_hw->up)
return;
- wlc_ucode_txant_set(wlc_hw);
+ brcms_c_ucode_txant_set(wlc_hw);
}
-u16 wlc_bmac_get_txant(struct wlc_hw_info *wlc_hw)
+u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw)
{
return (u16) wlc_hw->wlc->stf->txant;
}
-void wlc_bmac_antsel_type_set(struct wlc_hw_info *wlc_hw, u8 antsel_type)
+void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type)
{
wlc_hw->antsel_type = antsel_type;
@@ -2604,7 +2584,7 @@ void wlc_bmac_antsel_type_set(struct wlc_hw_info *wlc_hw, u8 antsel_type)
wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type);
}
-void wlc_bmac_fifoerrors(struct wlc_hw_info *wlc_hw)
+void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
{
bool fatal = false;
uint unit;
@@ -2660,7 +2640,7 @@ void wlc_bmac_fifoerrors(struct wlc_hw_info *wlc_hw)
}
if (fatal) {
- wlc_fatal_error(wlc_hw->wlc); /* big hammer */
+ brcms_c_fatal_error(wlc_hw->wlc); /* big hammer */
break;
} else
W_REG(&regs->intctrlregs[idx].intstatus,
@@ -2668,9 +2648,9 @@ void wlc_bmac_fifoerrors(struct wlc_hw_info *wlc_hw)
}
}
-void wlc_intrson(struct wlc_info *wlc)
+void brcms_c_intrson(struct brcms_c_info *wlc)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
wlc->macintmask = wlc->defmacintmask;
W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
}
@@ -2680,25 +2660,25 @@ void wlc_intrson(struct wlc_info *wlc)
* but also because per-port code may require sync with valid interrupt.
*/
-static u32 wlc_wlintrsoff(struct wlc_info *wlc)
+static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc)
{
if (!wlc->hw->up)
return 0;
- return wl_intrsoff(wlc->wl);
+ return brcms_intrsoff(wlc->wl);
}
-static void wlc_wlintrsrestore(struct wlc_info *wlc, u32 macintmask)
+static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask)
{
if (!wlc->hw->up)
return;
- wl_intrsrestore(wlc->wl, macintmask);
+ brcms_intrsrestore(wlc->wl, macintmask);
}
-u32 wlc_intrsoff(struct wlc_info *wlc)
+u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
u32 macintmask;
if (!wlc_hw->clk)
@@ -2715,9 +2695,9 @@ u32 wlc_intrsoff(struct wlc_info *wlc)
return wlc->macintstatus ? 0 : macintmask;
}
-void wlc_intrsrestore(struct wlc_info *wlc, u32 macintmask)
+void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
if (!wlc_hw->clk)
return;
@@ -2725,43 +2705,44 @@ void wlc_intrsrestore(struct wlc_info *wlc, u32 macintmask)
W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
}
-static void wlc_bmac_mute(struct wlc_hw_info *wlc_hw, bool on, mbool flags)
+static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, mbool flags)
{
u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
if (on) {
/* suspend tx fifos */
- wlc_bmac_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO);
- wlc_bmac_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO);
- wlc_bmac_tx_fifo_suspend(wlc_hw, TX_AC_BK_FIFO);
- wlc_bmac_tx_fifo_suspend(wlc_hw, TX_AC_VI_FIFO);
+ brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO);
+ brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO);
+ brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_BK_FIFO);
+ brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_VI_FIFO);
/* zero the address match register so we do not send ACKs */
- wlc_bmac_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
+ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
null_ether_addr);
} else {
/* resume tx fifos */
if (!wlc_hw->wlc->tx_suspended) {
- wlc_bmac_tx_fifo_resume(wlc_hw, TX_DATA_FIFO);
+ brcms_b_tx_fifo_resume(wlc_hw, TX_DATA_FIFO);
}
- wlc_bmac_tx_fifo_resume(wlc_hw, TX_CTL_FIFO);
- wlc_bmac_tx_fifo_resume(wlc_hw, TX_AC_BK_FIFO);
- wlc_bmac_tx_fifo_resume(wlc_hw, TX_AC_VI_FIFO);
+ brcms_b_tx_fifo_resume(wlc_hw, TX_CTL_FIFO);
+ brcms_b_tx_fifo_resume(wlc_hw, TX_AC_BK_FIFO);
+ brcms_b_tx_fifo_resume(wlc_hw, TX_AC_VI_FIFO);
/* Restore address */
- wlc_bmac_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
+ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
wlc_hw->etheraddr);
}
wlc_phy_mute_upd(wlc_hw->band->pi, on, flags);
if (on)
- wlc_ucode_mute_override_set(wlc_hw);
+ brcms_c_ucode_mute_override_set(wlc_hw);
else
- wlc_ucode_mute_override_clear(wlc_hw);
+ brcms_c_ucode_mute_override_clear(wlc_hw);
}
-int wlc_bmac_xmtfifo_sz_get(struct wlc_hw_info *wlc_hw, uint fifo, uint *blocks)
+int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
+ uint *blocks)
{
if (fifo >= NFIFO)
return -EINVAL;
@@ -2771,7 +2752,7 @@ int wlc_bmac_xmtfifo_sz_get(struct wlc_hw_info *wlc_hw, uint fifo, uint *blocks)
return 0;
}
-/* wlc_bmac_tx_fifo_suspended:
+/* brcms_b_tx_fifo_suspended:
* Check the MAC's tx suspend status for a tx fifo.
*
* When the MAC acknowledges a tx suspend, it indicates that no more
@@ -2780,7 +2761,8 @@ int wlc_bmac_xmtfifo_sz_get(struct wlc_hw_info *wlc_hw, uint fifo, uint *blocks)
* be pulling data into a tx fifo, by the time the MAC acks the suspend
* request.
*/
-static bool wlc_bmac_tx_fifo_suspended(struct wlc_hw_info *wlc_hw, uint tx_fifo)
+static bool brcms_b_tx_fifo_suspended(struct brcms_hardware *wlc_hw,
+ uint tx_fifo)
{
/* check that a suspend has been requested and is no longer pending */
@@ -2799,7 +2781,8 @@ static bool wlc_bmac_tx_fifo_suspended(struct wlc_hw_info *wlc_hw, uint tx_fifo)
return false;
}
-static void wlc_bmac_tx_fifo_suspend(struct wlc_hw_info *wlc_hw, uint tx_fifo)
+static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw,
+ uint tx_fifo)
{
u8 fifo = 1 << tx_fifo;
@@ -2811,7 +2794,8 @@ static void wlc_bmac_tx_fifo_suspend(struct wlc_hw_info *wlc_hw, uint tx_fifo)
/* force the core awake only if not already */
if (wlc_hw->suspended_fifos == 0)
- wlc_ucode_wake_override_set(wlc_hw, WLC_WAKE_OVERRIDE_TXFIFO);
+ brcms_c_ucode_wake_override_set(wlc_hw,
+ BRCMS_WAKE_OVERRIDE_TXFIFO);
wlc_hw->suspended_fifos |= fifo;
@@ -2820,20 +2804,22 @@ static void wlc_bmac_tx_fifo_suspend(struct wlc_hw_info *wlc_hw, uint tx_fifo)
* which may result in mismatch between ucode and driver
* so suspend the mac before suspending the FIFO
*/
- if (WLC_PHY_11N_CAP(wlc_hw->band))
- wlc_suspend_mac_and_wait(wlc_hw->wlc);
+ if (BRCMS_PHY_11N_CAP(wlc_hw->band))
+ brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
dma_txsuspend(wlc_hw->di[tx_fifo]);
- if (WLC_PHY_11N_CAP(wlc_hw->band))
- wlc_enable_mac(wlc_hw->wlc);
+ if (BRCMS_PHY_11N_CAP(wlc_hw->band))
+ brcms_c_enable_mac(wlc_hw->wlc);
}
}
-static void wlc_bmac_tx_fifo_resume(struct wlc_hw_info *wlc_hw, uint tx_fifo)
+static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
+ uint tx_fifo)
{
- /* BMAC_NOTE: WLC_TX_FIFO_ENAB is done in wlc_dpc() for DMA case but need to be done
- * here for PIO otherwise the watchdog will catch the inconsistency and fire
+ /* BMAC_NOTE: BRCMS_TX_FIFO_ENAB is done in brcms_c_dpc() for DMA case
+ * but need to be done here for PIO otherwise the watchdog will catch
+ * the inconsistency and fire
*/
/* Two clients of this code, 11h Quiet period and scanning. */
if (wlc_hw->di[tx_fifo])
@@ -2845,8 +2831,8 @@ static void wlc_bmac_tx_fifo_resume(struct wlc_hw_info *wlc_hw, uint tx_fifo)
else {
wlc_hw->suspended_fifos &= ~(1 << tx_fifo);
if (wlc_hw->suspended_fifos == 0)
- wlc_ucode_wake_override_clear(wlc_hw,
- WLC_WAKE_OVERRIDE_TXFIFO);
+ brcms_c_ucode_wake_override_clear(wlc_hw,
+ BRCMS_WAKE_OVERRIDE_TXFIFO);
}
}
@@ -2858,9 +2844,9 @@ static void wlc_bmac_tx_fifo_resume(struct wlc_hw_info *wlc_hw, uint tx_fifo)
* 0 if the interrupt is not for us, or we are in some special cases;
* device interrupt status bits otherwise.
*/
-static inline u32 wlc_intstatus(struct wlc_info *wlc, bool in_isr)
+static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
d11regs_t *regs = wlc_hw->regs;
u32 macintstatus;
@@ -2915,7 +2901,7 @@ static inline u32 wlc_intstatus(struct wlc_info *wlc, bool in_isr)
/* Update wlc->macintstatus and wlc->intstatus[]. */
/* Return true if they are updated successfully. false otherwise */
-bool wlc_intrsupd(struct wlc_info *wlc)
+bool brcms_c_intrsupd(struct brcms_c_info *wlc)
{
u32 macintstatus;
@@ -2935,12 +2921,12 @@ bool wlc_intrsupd(struct wlc_info *wlc)
/*
* First-level interrupt processing.
* Return true if this was our interrupt, false otherwise.
- * *wantdpc will be set to true if further wlc_dpc() processing is required,
+ * *wantdpc will be set to true if further brcms_c_dpc() processing is required,
* false otherwise.
*/
-bool wlc_isr(struct wlc_info *wlc, bool *wantdpc)
+bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
u32 macintstatus;
*wantdpc = false;
@@ -2969,7 +2955,8 @@ bool wlc_isr(struct wlc_info *wlc, bool *wantdpc)
}
static bool
-wlc_bmac_dotxstatus(struct wlc_hw_info *wlc_hw, tx_status_t *txs, u32 s2)
+brcms_b_dotxstatus(struct brcms_hardware *wlc_hw, struct tx_status *txs,
+ u32 s2)
{
/* discard intermediate indications for ucode with one legitimate case:
* e.g. if "useRTS" is set. ucode did a successful rts/cts exchange, but the subsequent
@@ -2981,19 +2968,19 @@ wlc_bmac_dotxstatus(struct wlc_hw_info *wlc_hw, tx_status_t *txs, u32 s2)
return false;
}
- return wlc_dotxstatus(wlc_hw->wlc, txs, s2);
+ return brcms_c_dotxstatus(wlc_hw->wlc, txs, s2);
}
/* process tx completion events in BMAC
* Return true if more tx status need to be processed. false otherwise.
*/
static bool
-wlc_bmac_txstatus(struct wlc_hw_info *wlc_hw, bool bound, bool *fatal)
+brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
{
bool morepending = false;
- struct wlc_info *wlc = wlc_hw->wlc;
+ struct brcms_c_info *wlc = wlc_hw->wlc;
d11regs_t *regs;
- tx_status_t txstatus, *txs;
+ struct tx_status txstatus, *txs;
u32 s1, s2;
uint n = 0;
/*
@@ -3023,7 +3010,7 @@ wlc_bmac_txstatus(struct wlc_hw_info *wlc_hw, bool bound, bool *fatal)
txs->phyerr = (s2 & TXS_PTX_MASK) >> TXS_PTX_SHIFT;
txs->lasttxtime = 0;
- *fatal = wlc_bmac_dotxstatus(wlc_hw, txs, s2);
+ *fatal = brcms_b_dotxstatus(wlc_hw, txs, s2);
/* !give others some time to run! */
if (++n >= max_tx_num)
@@ -3037,14 +3024,14 @@ wlc_bmac_txstatus(struct wlc_hw_info *wlc_hw, bool bound, bool *fatal)
morepending = true;
if (!pktq_empty(&wlc->pkt_queue->q))
- wlc_send_q(wlc);
+ brcms_c_send_q(wlc);
return morepending;
}
-void wlc_suspend_mac_and_wait(struct wlc_info *wlc)
+void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
d11regs_t *regs = wlc_hw->regs;
u32 mc, mi;
struct wiphy *wiphy = wlc->wiphy;
@@ -3060,14 +3047,14 @@ void wlc_suspend_mac_and_wait(struct wlc_info *wlc)
return;
/* force the core awake */
- wlc_ucode_wake_override_set(wlc_hw, WLC_WAKE_OVERRIDE_MACSUSPEND);
+ brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
mc = R_REG(&regs->maccontrol);
if (mc == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
- wl_down(wlc->wl);
+ brcms_down(wlc->wl);
return;
}
WARN_ON(mc & MCTL_PSM_JMP_0);
@@ -3078,20 +3065,20 @@ void wlc_suspend_mac_and_wait(struct wlc_info *wlc)
if (mi == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
- wl_down(wlc->wl);
+ brcms_down(wlc->wl);
return;
}
WARN_ON(mi & MI_MACSSPNDD);
- wlc_bmac_mctrl(wlc_hw, MCTL_EN_MAC, 0);
+ brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0);
SPINWAIT(!(R_REG(&regs->macintstatus) & MI_MACSSPNDD),
- WLC_MAX_MAC_SUSPEND);
+ BRCMS_MAX_MAC_SUSPEND);
if (!(R_REG(&regs->macintstatus) & MI_MACSSPNDD)) {
wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
" and MI_MACSSPNDD is still not on.\n",
- wlc_hw->unit, WLC_MAX_MAC_SUSPEND);
+ wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
"psm_brc 0x%04x\n", wlc_hw->unit,
R_REG(&regs->psmdebug),
@@ -3103,7 +3090,7 @@ void wlc_suspend_mac_and_wait(struct wlc_info *wlc)
if (mc == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
- wl_down(wlc->wl);
+ brcms_down(wlc->wl);
return;
}
WARN_ON(mc & MCTL_PSM_JMP_0);
@@ -3111,9 +3098,9 @@ void wlc_suspend_mac_and_wait(struct wlc_info *wlc)
WARN_ON(mc & MCTL_EN_MAC);
}
-void wlc_enable_mac(struct wlc_info *wlc)
+void brcms_c_enable_mac(struct brcms_c_info *wlc)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
d11regs_t *regs = wlc_hw->regs;
u32 mc, mi;
@@ -3132,7 +3119,7 @@ void wlc_enable_mac(struct wlc_info *wlc)
WARN_ON(mc & MCTL_EN_MAC);
WARN_ON(!(mc & MCTL_PSM_RUN));
- wlc_bmac_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
+ brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
W_REG(&regs->macintstatus, MI_MACSSPNDD);
mc = R_REG(&regs->maccontrol);
@@ -3143,44 +3130,46 @@ void wlc_enable_mac(struct wlc_info *wlc)
mi = R_REG(&regs->macintstatus);
WARN_ON(mi & MI_MACSSPNDD);
- wlc_ucode_wake_override_clear(wlc_hw, WLC_WAKE_OVERRIDE_MACSUSPEND);
+ brcms_c_ucode_wake_override_clear(wlc_hw,
+ BRCMS_WAKE_OVERRIDE_MACSUSPEND);
}
-static void wlc_upd_ofdm_pctl1_table(struct wlc_hw_info *wlc_hw)
+static void brcms_upd_ofdm_pctl1_table(struct brcms_hardware *wlc_hw)
{
u8 rate;
u8 rates[8] = {
- WLC_RATE_6M, WLC_RATE_9M, WLC_RATE_12M, WLC_RATE_18M,
- WLC_RATE_24M, WLC_RATE_36M, WLC_RATE_48M, WLC_RATE_54M
+ BRCM_RATE_6M, BRCM_RATE_9M, BRCM_RATE_12M, BRCM_RATE_18M,
+ BRCM_RATE_24M, BRCM_RATE_36M, BRCM_RATE_48M, BRCM_RATE_54M
};
u16 entry_ptr;
u16 pctl1;
uint i;
- if (!WLC_PHY_11N_CAP(wlc_hw->band))
+ if (!BRCMS_PHY_11N_CAP(wlc_hw->band))
return;
/* walk the phy rate table and update the entries */
for (i = 0; i < ARRAY_SIZE(rates); i++) {
rate = rates[i];
- entry_ptr = wlc_bmac_ofdm_ratetable_offset(wlc_hw, rate);
+ entry_ptr = brcms_b_ofdm_ratetable_offset(wlc_hw, rate);
/* read the SHM Rate Table entry OFDM PCTL1 values */
pctl1 =
- wlc_bmac_read_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS);
+ brcms_b_read_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS);
/* modify the value */
pctl1 &= ~PHY_TXC1_MODE_MASK;
pctl1 |= (wlc_hw->hw_stf_ss_opmode << PHY_TXC1_MODE_SHIFT);
/* Update the SHM Rate Table entry OFDM PCTL1 values */
- wlc_bmac_write_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS,
+ brcms_b_write_shm(wlc_hw, entry_ptr + M_RT_OFDM_PCTL1_POS,
pctl1);
}
}
-static u16 wlc_bmac_ofdm_ratetable_offset(struct wlc_hw_info *wlc_hw, u8 rate)
+static u16 brcms_b_ofdm_ratetable_offset(struct brcms_hardware *wlc_hw,
+ u8 rate)
{
uint i;
u8 plcp_rate = 0;
@@ -3190,14 +3179,14 @@ static u16 wlc_bmac_ofdm_ratetable_offset(struct wlc_hw_info *wlc_hw, u8 rate)
};
/* OFDM RATE sub-field of PLCP SIGNAL field, per 802.11 sec 17.3.4.1 */
const struct plcp_signal_rate_lookup rate_lookup[] = {
- {WLC_RATE_6M, 0xB},
- {WLC_RATE_9M, 0xF},
- {WLC_RATE_12M, 0xA},
- {WLC_RATE_18M, 0xE},
- {WLC_RATE_24M, 0x9},
- {WLC_RATE_36M, 0xD},
- {WLC_RATE_48M, 0x8},
- {WLC_RATE_54M, 0xC}
+ {BRCM_RATE_6M, 0xB},
+ {BRCM_RATE_9M, 0xF},
+ {BRCM_RATE_12M, 0xA},
+ {BRCM_RATE_18M, 0xE},
+ {BRCM_RATE_24M, 0x9},
+ {BRCM_RATE_36M, 0xD},
+ {BRCM_RATE_48M, 0x8},
+ {BRCM_RATE_54M, 0xC}
};
for (i = 0; i < ARRAY_SIZE(rate_lookup); i++) {
@@ -3210,19 +3199,19 @@ static u16 wlc_bmac_ofdm_ratetable_offset(struct wlc_hw_info *wlc_hw, u8 rate)
/* Find the SHM pointer to the rate table entry by looking in the
* Direct-map Table
*/
- return 2 * wlc_bmac_read_shm(wlc_hw, M_RT_DIRMAP_A + (plcp_rate * 2));
+ return 2 * brcms_b_read_shm(wlc_hw, M_RT_DIRMAP_A + (plcp_rate * 2));
}
-void wlc_bmac_band_stf_ss_set(struct wlc_hw_info *wlc_hw, u8 stf_mode)
+void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode)
{
wlc_hw->hw_stf_ss_opmode = stf_mode;
if (wlc_hw->clk)
- wlc_upd_ofdm_pctl1_table(wlc_hw);
+ brcms_upd_ofdm_pctl1_table(wlc_hw);
}
void
-wlc_bmac_read_tsf(struct wlc_hw_info *wlc_hw, u32 *tsf_l_ptr,
+brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
u32 *tsf_h_ptr)
{
d11regs_t *regs = wlc_hw->regs;
@@ -3234,7 +3223,7 @@ wlc_bmac_read_tsf(struct wlc_hw_info *wlc_hw, u32 *tsf_l_ptr,
return;
}
-static bool wlc_bmac_validate_chip_access(struct wlc_hw_info *wlc_hw)
+static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
{
d11regs_t *regs;
u32 w, val;
@@ -3299,7 +3288,7 @@ static bool wlc_bmac_validate_chip_access(struct wlc_hw_info *wlc_hw)
#define PHYPLL_WAIT_US 100000
-void wlc_bmac_core_phypll_ctl(struct wlc_hw_info *wlc_hw, bool on)
+void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
{
d11regs_t *regs;
u32 tmp;
@@ -3351,7 +3340,7 @@ void wlc_bmac_core_phypll_ctl(struct wlc_hw_info *wlc_hw, bool on)
}
}
-void wlc_coredisable(struct wlc_hw_info *wlc_hw)
+void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
{
bool dev_gone;
@@ -3372,7 +3361,7 @@ void wlc_coredisable(struct wlc_hw_info *wlc_hw)
wlc_phy_anacore(wlc_hw->band->pi, OFF);
/* turn off PHYPLL to save power */
- wlc_bmac_core_phypll_ctl(wlc_hw, false);
+ brcms_b_core_phypll_ctl(wlc_hw, false);
/* No need to set wlc->pub->radio_active = OFF
* because this function needs down capability and
@@ -3389,7 +3378,7 @@ void wlc_coredisable(struct wlc_hw_info *wlc_hw)
}
/* power both the pll and external oscillator on/off */
-static void wlc_bmac_xtal(struct wlc_hw_info *wlc_hw, bool want)
+static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d: want %d\n", wlc_hw->unit, want);
@@ -3408,9 +3397,9 @@ static void wlc_bmac_xtal(struct wlc_hw_info *wlc_hw, bool want)
}
}
-static void wlc_flushqueues(struct wlc_info *wlc)
+static void brcms_c_flushqueues(struct brcms_c_info *wlc)
{
- struct wlc_hw_info *wlc_hw = wlc->hw;
+ struct brcms_hardware *wlc_hw = wlc->hw;
uint i;
wlc->txpend16165war = 0;
@@ -3418,7 +3407,7 @@ static void wlc_flushqueues(struct wlc_info *wlc)
/* free any posted tx packets */
for (i = 0; i < NFIFO; i++)
if (wlc_hw->di[i]) {
- dma_txreclaim(wlc_hw->di[i], HNDDMA_RANGE_ALL);
+ dma_txreclaim(wlc_hw->di[i], DMA_RANGE_ALL);
TXPKTPENDCLR(wlc, i);
BCMMSG(wlc->wiphy, "pktpend fifo %d clrd\n", i);
}
@@ -3427,18 +3416,18 @@ static void wlc_flushqueues(struct wlc_info *wlc)
dma_rxreclaim(wlc_hw->di[RX_FIFO]);
}
-u16 wlc_bmac_read_shm(struct wlc_hw_info *wlc_hw, uint offset)
+u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset)
{
- return wlc_bmac_read_objmem(wlc_hw, offset, OBJADDR_SHM_SEL);
+ return brcms_b_read_objmem(wlc_hw, offset, OBJADDR_SHM_SEL);
}
-void wlc_bmac_write_shm(struct wlc_hw_info *wlc_hw, uint offset, u16 v)
+void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v)
{
- wlc_bmac_write_objmem(wlc_hw, offset, v, OBJADDR_SHM_SEL);
+ brcms_b_write_objmem(wlc_hw, offset, v, OBJADDR_SHM_SEL);
}
static u16
-wlc_bmac_read_objmem(struct wlc_hw_info *wlc_hw, uint offset, u32 sel)
+brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
{
d11regs_t *regs = wlc_hw->regs;
volatile u16 *objdata_lo = (volatile u16 *)&regs->objdata;
@@ -3457,7 +3446,8 @@ wlc_bmac_read_objmem(struct wlc_hw_info *wlc_hw, uint offset, u32 sel)
}
static void
-wlc_bmac_write_objmem(struct wlc_hw_info *wlc_hw, uint offset, u16 v, u32 sel)
+brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
+ u32 sel)
{
d11regs_t *regs = wlc_hw->regs;
volatile u16 *objdata_lo = (volatile u16 *)&regs->objdata;
@@ -3478,8 +3468,8 @@ wlc_bmac_write_objmem(struct wlc_hw_info *wlc_hw, uint offset, u16 v, u32 sel)
* 'sel' selects the type of memory
*/
void
-wlc_bmac_copyto_objmem(struct wlc_hw_info *wlc_hw, uint offset, const void *buf,
- int len, u32 sel)
+brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
+ const void *buf, int len, u32 sel)
{
u16 v;
const u8 *p = (const u8 *)buf;
@@ -3490,7 +3480,7 @@ wlc_bmac_copyto_objmem(struct wlc_hw_info *wlc_hw, uint offset, const void *buf,
for (i = 0; i < len; i += 2) {
v = p[i] | (p[i + 1] << 8);
- wlc_bmac_write_objmem(wlc_hw, offset + i, v, sel);
+ brcms_b_write_objmem(wlc_hw, offset + i, v, sel);
}
}
@@ -3500,7 +3490,7 @@ wlc_bmac_copyto_objmem(struct wlc_hw_info *wlc_hw, uint offset, const void *buf,
* 'sel' selects the type of memory
*/
void
-wlc_bmac_copyfrom_objmem(struct wlc_hw_info *wlc_hw, uint offset, void *buf,
+brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset, void *buf,
int len, u32 sel)
{
u16 v;
@@ -3511,13 +3501,14 @@ wlc_bmac_copyfrom_objmem(struct wlc_hw_info *wlc_hw, uint offset, void *buf,
return;
for (i = 0; i < len; i += 2) {
- v = wlc_bmac_read_objmem(wlc_hw, offset + i, sel);
+ v = brcms_b_read_objmem(wlc_hw, offset + i, sel);
p[i] = v & 0xFF;
p[i + 1] = (v >> 8) & 0xFF;
}
}
-void wlc_bmac_copyfrom_vars(struct wlc_hw_info *wlc_hw, char **buf, uint *len)
+void brcms_b_copyfrom_vars(struct brcms_hardware *wlc_hw, char **buf,
+ uint *len)
{
BCMMSG(wlc_hw->wlc->wiphy, "nvram vars totlen=%d\n",
wlc_hw->vars_size);
@@ -3526,7 +3517,7 @@ void wlc_bmac_copyfrom_vars(struct wlc_hw_info *wlc_hw, char **buf, uint *len)
*len = wlc_hw->vars_size;
}
-void wlc_bmac_retrylimit_upd(struct wlc_hw_info *wlc_hw, u16 SRL, u16 LRL)
+void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw, u16 SRL, u16 LRL)
{
wlc_hw->SRL = SRL;
wlc_hw->LRL = LRL;
@@ -3544,7 +3535,7 @@ void wlc_bmac_retrylimit_upd(struct wlc_hw_info *wlc_hw, u16 SRL, u16 LRL)
}
}
-void wlc_bmac_pllreq(struct wlc_hw_info *wlc_hw, bool set, mbool req_bit)
+void brcms_b_pllreq(struct brcms_hardware *wlc_hw, bool set, mbool req_bit)
{
if (set) {
if (mboolisset(wlc_hw->pllreq, req_bit))
@@ -3552,9 +3543,9 @@ void wlc_bmac_pllreq(struct wlc_hw_info *wlc_hw, bool set, mbool req_bit)
mboolset(wlc_hw->pllreq, req_bit);
- if (mboolisset(wlc_hw->pllreq, WLC_PLLREQ_FLIP)) {
+ if (mboolisset(wlc_hw->pllreq, BRCMS_PLLREQ_FLIP)) {
if (!wlc_hw->sbclk) {
- wlc_bmac_xtal(wlc_hw, ON);
+ brcms_b_xtal(wlc_hw, ON);
}
}
} else {
@@ -3563,9 +3554,9 @@ void wlc_bmac_pllreq(struct wlc_hw_info *wlc_hw, bool set, mbool req_bit)
mboolclr(wlc_hw->pllreq, req_bit);
- if (mboolisset(wlc_hw->pllreq, WLC_PLLREQ_FLIP)) {
+ if (mboolisset(wlc_hw->pllreq, BRCMS_PLLREQ_FLIP)) {
if (wlc_hw->sbclk) {
- wlc_bmac_xtal(wlc_hw, OFF);
+ brcms_b_xtal(wlc_hw, OFF);
}
}
}
@@ -3573,13 +3564,12 @@ void wlc_bmac_pllreq(struct wlc_hw_info *wlc_hw, bool set, mbool req_bit)
return;
}
-u16 wlc_bmac_rate_shm_offset(struct wlc_hw_info *wlc_hw, u8 rate)
+u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate)
{
u16 table_ptr;
u8 phy_rate, index;
/* get the phy specific rate encoding for the PLCP SIGNAL field */
- /* XXX4321 fixup needed ? */
if (IS_OFDM(rate))
table_ptr = M_RT_DIRMAP_A;
else
@@ -3588,16 +3578,16 @@ u16 wlc_bmac_rate_shm_offset(struct wlc_hw_info *wlc_hw, u8 rate)
/* for a given rate, the LS-nibble of the PLCP SIGNAL field is
* the index into the rate table.
*/
- phy_rate = rate_info[rate] & WLC_RATE_MASK;
+ phy_rate = rate_info[rate] & BRCMS_RATE_MASK;
index = phy_rate & 0xf;
/* Find the SHM pointer to the rate table entry by looking in the
* Direct-map Table
*/
- return 2 * wlc_bmac_read_shm(wlc_hw, table_ptr + (index * 2));
+ return 2 * brcms_b_read_shm(wlc_hw, table_ptr + (index * 2));
}
-void wlc_bmac_antsel_set(struct wlc_hw_info *wlc_hw, u32 antsel_avail)
+void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
{
wlc_hw->antsel_avail = antsel_avail;
}
diff --git a/drivers/staging/brcm80211/brcmsmac/bmac.h b/drivers/staging/brcm80211/brcmsmac/bmac.h
new file mode 100644
index 00000000000..3c9ad4f3bd2
--- /dev/null
+++ b/drivers/staging/brcm80211/brcmsmac/bmac.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _BRCM_BOTTOM_MAC_H_
+#define _BRCM_BOTTOM_MAC_H_
+
+#include <brcmu_wifi.h>
+#include "types.h"
+
+/* dup state between BMAC(struct brcms_hardware) and HIGH(struct brcms_c_info)
+ driver */
+struct brcms_b_state {
+ u32 machwcap; /* mac hw capibility */
+ u32 preamble_ovr; /* preamble override */
+};
+
+enum {
+ IOV_BMAC_DIAG,
+ IOV_BMAC_SBGPIOTIMERVAL,
+ IOV_BMAC_SBGPIOOUT,
+ IOV_BMAC_CCGPIOCTRL, /* CC GPIOCTRL REG */
+ IOV_BMAC_CCGPIOOUT, /* CC GPIOOUT REG */
+ IOV_BMAC_CCGPIOOUTEN, /* CC GPIOOUTEN REG */
+ IOV_BMAC_CCGPIOIN, /* CC GPIOIN REG */
+ IOV_BMAC_WPSGPIO, /* WPS push button GPIO pin */
+ IOV_BMAC_OTPDUMP,
+ IOV_BMAC_OTPSTAT,
+ IOV_BMAC_PCIEASPM, /* obfuscation clkreq/aspm control */
+ IOV_BMAC_PCIEADVCORRMASK, /* advanced correctable error mask */
+ IOV_BMAC_PCIECLKREQ, /* PCIE 1.1 clockreq enab support */
+ IOV_BMAC_PCIELCREG, /* PCIE LCREG */
+ IOV_BMAC_SBGPIOTIMERMASK,
+ IOV_BMAC_RFDISABLEDLY,
+ IOV_BMAC_PCIEREG, /* PCIE REG */
+ IOV_BMAC_PCICFGREG, /* PCI Config register */
+ IOV_BMAC_PCIESERDESREG, /* PCIE SERDES REG (dev, 0}offset) */
+ IOV_BMAC_PCIEGPIOOUT, /* PCIEOUT REG */
+ IOV_BMAC_PCIEGPIOOUTEN, /* PCIEOUTEN REG */
+ IOV_BMAC_PCIECLKREQENCTRL, /* clkreqenctrl REG (PCIE REV > 6.0 */
+ IOV_BMAC_DMALPBK,
+ IOV_BMAC_CCREG,
+ IOV_BMAC_COREREG,
+ IOV_BMAC_SDCIS,
+ IOV_BMAC_SDIO_DRIVE,
+ IOV_BMAC_OTPW,
+ IOV_BMAC_NVOTPW,
+ IOV_BMAC_SROM,
+ IOV_BMAC_SRCRC,
+ IOV_BMAC_CIS_SOURCE,
+ IOV_BMAC_CISVAR,
+ IOV_BMAC_OTPLOCK,
+ IOV_BMAC_OTP_CHIPID,
+ IOV_BMAC_CUSTOMVAR1,
+ IOV_BMAC_BOARDFLAGS,
+ IOV_BMAC_BOARDFLAGS2,
+ IOV_BMAC_WPSLED,
+ IOV_BMAC_NVRAM_SOURCE,
+ IOV_BMAC_OTP_RAW_READ,
+ IOV_BMAC_LAST
+};
+
+extern int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
+ uint unit, bool piomode, void *regsva, uint bustype,
+ void *btparam);
+extern int brcms_b_detach(struct brcms_c_info *wlc);
+extern void brcms_b_watchdog(void *arg);
+
+/* up/down, reset, clk */
+extern void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw,
+ uint offset, const void *buf, int len,
+ u32 sel);
+extern void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
+ void *buf, int len, u32 sel);
+#define brcms_b_copyfrom_shm(wlc_hw, offset, buf, len) \
+ brcms_b_copyfrom_objmem(wlc_hw, offset, buf, len, OBJADDR_SHM_SEL)
+#define brcms_b_copyto_shm(wlc_hw, offset, buf, len) \
+ brcms_b_copyto_objmem(wlc_hw, offset, buf, len, OBJADDR_SHM_SEL)
+
+extern void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
+extern void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
+extern void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
+extern void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
+extern void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
+extern void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
+extern void brcms_b_reset(struct brcms_hardware *wlc_hw);
+extern void brcms_b_init(struct brcms_hardware *wlc_hw, chanspec_t chanspec,
+ bool mute);
+extern int brcms_b_up_prep(struct brcms_hardware *wlc_hw);
+extern int brcms_b_up_finish(struct brcms_hardware *wlc_hw);
+extern int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw);
+extern int brcms_b_down_finish(struct brcms_hardware *wlc_hw);
+extern void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
+
+/* chanspec, ucode interface */
+extern void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw,
+ chanspec_t chanspec,
+ bool mute, struct txpwr_limits *txpwr);
+
+extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
+ uint *blocks);
+extern void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask,
+ u16 val, int bands);
+extern void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
+extern u16 brcms_b_mhf_get(struct brcms_hardware *wlc_hw, u8 idx, int bands);
+extern void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
+extern u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
+extern void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw,
+ u8 antsel_type);
+extern int brcms_b_state_get(struct brcms_hardware *wlc_hw,
+ struct brcms_b_state *state);
+extern void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset,
+ u16 v);
+extern u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
+extern void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw,
+ int offset, int len, void *buf);
+extern void brcms_b_copyfrom_vars(struct brcms_hardware *wlc_hw, char **buf,
+ uint *len);
+
+extern void brcms_b_hw_etheraddr(struct brcms_hardware *wlc_hw,
+ u8 *ea);
+
+extern bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw);
+extern void brcms_b_set_shortslot(struct brcms_hardware *wlc_hw,
+ bool shortslot);
+extern void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw,
+ u8 stf_mode);
+
+extern void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw);
+
+extern void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
+ u32 override_bit);
+extern void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
+ u32 override_bit);
+
+extern void brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw,
+ int match_reg_offset,
+ const u8 *addr);
+extern void brcms_b_write_hw_bcntemplates(struct brcms_hardware *wlc_hw,
+ void *bcn, int len, bool both);
+
+extern void brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
+ u32 *tsf_h_ptr);
+extern void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin);
+extern void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax);
+
+extern void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw, u16 SRL,
+ u16 LRL);
+
+extern void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw);
+
+
+/* API for BMAC driver (e.g. wlc_phy.c etc) */
+
+extern void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
+extern void brcms_b_pllreq(struct brcms_hardware *wlc_hw, bool set,
+ mbool req_bit);
+extern void brcms_b_hw_up(struct brcms_hardware *wlc_hw);
+extern u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
+extern void brcms_b_antsel_set(struct brcms_hardware *wlc_hw,
+ u32 antsel_avail);
+
+#endif /* _BRCM_BOTTOM_MAC_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_channel.c b/drivers/staging/brcm80211/brcmsmac/channel.c
index a3a2bf9b4f1..f59693e1d8a 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_channel.c
+++ b/drivers/staging/brcm80211/brcmsmac/channel.c
@@ -14,109 +14,99 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <bcmnvram.h>
-#include <aiutils.h>
-#include <sbhnddma.h>
-#include <wlioctl.h>
-
-#include "wlc_types.h"
-#include "d11.h"
-#include "wlc_cfg.h"
-#include "wlc_scb.h"
-#include "wlc_pub.h"
-#include "wlc_key.h"
-#include "phy/wlc_phy_hal.h"
-#include "wlc_bmac.h"
-#include "wlc_rate.h"
-#include "wlc_channel.h"
-#include "wlc_main.h"
-#include "wlc_stf.h"
-#include "wl_dbg.h"
-
-#define VALID_CHANNEL20_DB(wlc, val) wlc_valid_channel20_db((wlc)->cmi, val)
+#include <net/mac80211.h>
+
+#include <defs.h>
+#include "pub.h"
+#include "phy/phy_hal.h"
+#include "bmac.h"
+#include "main.h"
+#include "stf.h"
+#include "channel.h"
+
+#define VALID_CHANNEL20_DB(wlc, val) brcms_c_valid_channel20_db((wlc)->cmi, val)
#define VALID_CHANNEL20_IN_BAND(wlc, bandunit, val) \
- wlc_valid_channel20_in_band((wlc)->cmi, bandunit, val)
-#define VALID_CHANNEL20(wlc, val) wlc_valid_channel20((wlc)->cmi, val)
+ brcms_c_valid_channel20_in_band((wlc)->cmi, bandunit, val)
+#define VALID_CHANNEL20(wlc, val) brcms_c_valid_channel20((wlc)->cmi, val)
-typedef struct wlc_cm_band {
- u8 locale_flags; /* locale_info_t flags */
+struct brcms_cm_band {
+ u8 locale_flags; /* struct locale_info flags */
chanvec_t valid_channels; /* List of valid channels in the country */
const chanvec_t *restricted_channels; /* List of restricted use channels */
const chanvec_t *radar_channels; /* List of radar sensitive channels */
u8 PAD[8];
-} wlc_cm_band_t;
+};
-struct wlc_cm_info {
- struct wlc_pub *pub;
- struct wlc_info *wlc;
- char srom_ccode[WLC_CNTRY_BUF_SZ]; /* Country Code in SROM */
+struct brcms_cm_info {
+ struct brcms_pub *pub;
+ struct brcms_c_info *wlc;
+ char srom_ccode[BRCM_CNTRY_BUF_SZ]; /* Country Code in SROM */
uint srom_regrev; /* Regulatory Rev for the SROM ccode */
- const country_info_t *country; /* current country def */
- char ccode[WLC_CNTRY_BUF_SZ]; /* current internal Country Code */
+ const struct country_info *country; /* current country def */
+ char ccode[BRCM_CNTRY_BUF_SZ]; /* current internal Country Code */
uint regrev; /* current Regulatory Revision */
- char country_abbrev[WLC_CNTRY_BUF_SZ]; /* current advertised ccode */
- wlc_cm_band_t bandstate[MAXBANDS]; /* per-band state (one per phy/radio) */
+ char country_abbrev[BRCM_CNTRY_BUF_SZ]; /* current advertised ccode */
+ /* per-band state (one per phy/radio) */
+ struct brcms_cm_band bandstate[MAXBANDS];
/* quiet channels currently for radar sensitivity or 11h support */
chanvec_t quiet_channels; /* channels on which we cannot transmit */
};
-static int wlc_channels_init(wlc_cm_info_t *wlc_cm,
- const country_info_t *country);
-static void wlc_set_country_common(wlc_cm_info_t *wlc_cm,
+static int brcms_c_channels_init(struct brcms_cm_info *wlc_cm,
+ const struct country_info *country);
+static void brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
const char *country_abbrev,
const char *ccode, uint regrev,
- const country_info_t *country);
-static int wlc_set_countrycode(wlc_cm_info_t *wlc_cm, const char *ccode);
-static int wlc_set_countrycode_rev(wlc_cm_info_t *wlc_cm,
+ const struct country_info *country);
+static int brcms_c_set_countrycode(struct brcms_cm_info *wlc_cm,
+ const char *ccode);
+static int brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm,
const char *country_abbrev,
const char *ccode, int regrev);
-static int wlc_country_aggregate_map(wlc_cm_info_t *wlc_cm, const char *ccode,
- char *mapped_ccode, uint *mapped_regrev);
-static const country_info_t *wlc_country_lookup_direct(const char *ccode,
- uint regrev);
-static const country_info_t *wlc_countrycode_map(wlc_cm_info_t *wlc_cm,
- const char *ccode,
- char *mapped_ccode,
- uint *mapped_regrev);
-static void wlc_channels_commit(wlc_cm_info_t *wlc_cm);
-static void wlc_quiet_channels_reset(wlc_cm_info_t *wlc_cm);
-static bool wlc_quiet_chanspec(wlc_cm_info_t *wlc_cm, chanspec_t chspec);
-static bool wlc_valid_channel20_db(wlc_cm_info_t *wlc_cm, uint val);
-static bool wlc_valid_channel20_in_band(wlc_cm_info_t *wlc_cm, uint bandunit,
- uint val);
-static bool wlc_valid_channel20(wlc_cm_info_t *wlc_cm, uint val);
-static const country_info_t *wlc_country_lookup(struct wlc_info *wlc,
- const char *ccode);
-static void wlc_locale_get_channels(const locale_info_t *locale,
+static int brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm,
+ const char *ccode,
+ char *mapped_ccode, uint *mapped_regrev);
+
+static const struct country_info *
+brcms_c_country_lookup_direct(const char *ccode, uint regrev);
+
+static const struct country_info *
+brcms_c_countrycode_map(struct brcms_cm_info *wlc_cm,
+ const char *ccode, char *mapped_ccode,
+ uint *mapped_regrev);
+
+static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm);
+static void brcms_c_quiet_channels_reset(struct brcms_cm_info *wlc_cm);
+static bool brcms_c_quiet_chanspec(struct brcms_cm_info *wlc_cm,
+ chanspec_t chspec);
+static bool brcms_c_valid_channel20_db(struct brcms_cm_info *wlc_cm, uint val);
+static bool brcms_c_valid_channel20_in_band(struct brcms_cm_info *wlc_cm,
+ uint bandunit, uint val);
+static bool brcms_c_valid_channel20(struct brcms_cm_info *wlc_cm, uint val);
+
+static const struct country_info *
+brcms_c_country_lookup(struct brcms_c_info *wlc, const char *ccode);
+
+static void brcms_c_locale_get_channels(const struct locale_info *locale,
chanvec_t *valid_channels);
-static const locale_info_t *wlc_get_locale_2g(u8 locale_idx);
-static const locale_info_t *wlc_get_locale_5g(u8 locale_idx);
-static bool wlc_japan(struct wlc_info *wlc);
-static bool wlc_japan_ccode(const char *ccode);
-static void wlc_channel_min_txpower_limits_with_local_constraint(wlc_cm_info_t *
- wlc_cm,
- struct
- txpwr_limits
- *txpwr,
- u8
- local_constraint_qdbm);
-static void wlc_locale_add_channels(chanvec_t *target,
+static const struct locale_info *brcms_c_get_locale_2g(u8 locale_idx);
+static const struct locale_info *brcms_c_get_locale_5g(u8 locale_idx);
+static bool brcms_c_japan(struct brcms_c_info *wlc);
+static bool brcms_c_japan_ccode(const char *ccode);
+static void brcms_c_channel_min_txpower_limits_with_local_constraint(
+ struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr,
+ u8 local_constraint_qdbm);
+static void brcms_c_locale_add_channels(chanvec_t *target,
const chanvec_t *channels);
-static const locale_mimo_info_t *wlc_get_mimo_2g(u8 locale_idx);
-static const locale_mimo_info_t *wlc_get_mimo_5g(u8 locale_idx);
+static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx);
+static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx);
/* QDB() macro takes a dB value and converts to a quarter dB value */
#ifdef QDB
#undef QDB
#endif
-#define QDB(n) ((n) * WLC_TXPWR_DB_FACTOR)
+#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR)
/* Regulatory Matrix Spreadsheet (CLM) MIMO v3.7.9 */
@@ -398,7 +388,7 @@ static const chanvec_t *g_table_locale_base[] = {
&locale_5g_HIGH4
};
-static void wlc_locale_add_channels(chanvec_t *target,
+static void brcms_c_locale_add_channels(chanvec_t *target,
const chanvec_t *channels)
{
u8 i;
@@ -407,7 +397,7 @@ static void wlc_locale_add_channels(chanvec_t *target,
}
}
-static void wlc_locale_get_channels(const locale_info_t *locale,
+static void brcms_c_locale_get_channels(const struct locale_info *locale,
chanvec_t *channels)
{
u8 i;
@@ -416,7 +406,7 @@ static void wlc_locale_get_channels(const locale_info_t *locale,
for (i = 0; i < ARRAY_SIZE(g_table_locale_base); i++) {
if (locale->valid_channels & (1 << i)) {
- wlc_locale_add_channels(channels,
+ brcms_c_locale_add_channels(channels,
g_table_locale_base[i]);
}
}
@@ -425,43 +415,43 @@ static void wlc_locale_get_channels(const locale_info_t *locale,
/*
* Locale Definitions - 2.4 GHz
*/
-static const locale_info_t locale_i = { /* locale i. channel 1 - 13 */
+static const struct locale_info locale_i = { /* locale i. channel 1 - 13 */
LOCALE_CHAN_01_11 | LOCALE_CHAN_12_13,
LOCALE_RADAR_SET_NONE,
LOCALE_RESTRICTED_SET_2G_SHORT,
{QDB(19), QDB(19), QDB(19),
QDB(19), QDB(19), QDB(19)},
{20, 20, 20, 0},
- WLC_EIRP
+ BRCMS_EIRP
};
/*
* Locale Definitions - 5 GHz
*/
-static const locale_info_t locale_11 = {
+static const struct locale_info locale_11 = {
/* locale 11. channel 36 - 48, 52 - 64, 100 - 140, 149 - 165 */
LOCALE_CHAN_36_64 | LOCALE_CHAN_100_140 | LOCALE_CHAN_149_165,
LOCALE_RADAR_SET_1,
LOCALE_RESTRICTED_NONE,
{QDB(21), QDB(21), QDB(21), QDB(21), QDB(21)},
{23, 23, 23, 30, 30},
- WLC_EIRP | WLC_DFS_EU
+ BRCMS_EIRP | BRCMS_DFS_EU
};
#define LOCALE_2G_IDX_i 0
-static const locale_info_t *g_locale_2g_table[] = {
+static const struct locale_info *g_locale_2g_table[] = {
&locale_i
};
#define LOCALE_5G_IDX_11 0
-static const locale_info_t *g_locale_5g_table[] = {
+static const struct locale_info *g_locale_5g_table[] = {
&locale_11
};
/*
* MIMO Locale Definitions - 2.4 GHz
*/
-static const locale_mimo_info_t locale_bn = {
+static const struct locale_mimo_info locale_bn = {
{QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
QDB(13), QDB(13), QDB(13)},
@@ -474,21 +464,21 @@ static const locale_mimo_info_t locale_bn = {
/* locale mimo 2g indexes */
#define LOCALE_MIMO_IDX_bn 0
-static const locale_mimo_info_t *g_mimo_2g_table[] = {
+static const struct locale_mimo_info *g_mimo_2g_table[] = {
&locale_bn
};
/*
* MIMO Locale Definitions - 5 GHz
*/
-static const locale_mimo_info_t locale_11n = {
+static const struct locale_mimo_info locale_11n = {
{ /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)},
{QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)},
0
};
#define LOCALE_MIMO_IDX_11n 0
-static const locale_mimo_info_t *g_mimo_5g_table[] = {
+static const struct locale_mimo_info *g_mimo_5g_table[] = {
&locale_11n
};
@@ -510,8 +500,8 @@ static const locale_mimo_info_t *g_mimo_5g_table[] = {
#define LOCALES(band2, band5, mimo2, mimo5) {LC_2G(band2), LC_5G(band5), LC(mimo2), LC(mimo5)}
static const struct {
- char abbrev[WLC_CNTRY_BUF_SZ]; /* country abbreviation */
- country_info_t country;
+ char abbrev[BRCM_CNTRY_BUF_SZ]; /* country abbreviation */
+ struct country_info country;
} cntry_locales[] = {
{
"X2", LOCALES(i, 11, bn, 11n)}, /* Worldwide RoW 2 */
@@ -594,7 +584,7 @@ struct chan20_info chan20_info[] = {
};
#endif /* SUPPORT_40MHZ */
-static const locale_info_t *wlc_get_locale_2g(u8 locale_idx)
+static const struct locale_info *brcms_c_get_locale_2g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_locale_2g_table)) {
return NULL; /* error condition */
@@ -602,7 +592,7 @@ static const locale_info_t *wlc_get_locale_2g(u8 locale_idx)
return g_locale_2g_table[locale_idx];
}
-static const locale_info_t *wlc_get_locale_5g(u8 locale_idx)
+static const struct locale_info *brcms_c_get_locale_5g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_locale_5g_table)) {
return NULL; /* error condition */
@@ -610,7 +600,7 @@ static const locale_info_t *wlc_get_locale_5g(u8 locale_idx)
return g_locale_5g_table[locale_idx];
}
-static const locale_mimo_info_t *wlc_get_mimo_2g(u8 locale_idx)
+static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table)) {
return NULL;
@@ -618,7 +608,7 @@ static const locale_mimo_info_t *wlc_get_mimo_2g(u8 locale_idx)
return g_mimo_2g_table[locale_idx];
}
-static const locale_mimo_info_t *wlc_get_mimo_5g(u8 locale_idx)
+static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_mimo_5g_table)) {
return NULL;
@@ -626,17 +616,17 @@ static const locale_mimo_info_t *wlc_get_mimo_5g(u8 locale_idx)
return g_mimo_5g_table[locale_idx];
}
-wlc_cm_info_t *wlc_channel_mgr_attach(struct wlc_info *wlc)
+struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
{
- wlc_cm_info_t *wlc_cm;
- char country_abbrev[WLC_CNTRY_BUF_SZ];
- const country_info_t *country;
- struct wlc_pub *pub = wlc->pub;
+ struct brcms_cm_info *wlc_cm;
+ char country_abbrev[BRCM_CNTRY_BUF_SZ];
+ const struct country_info *country;
+ struct brcms_pub *pub = wlc->pub;
char *ccode;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
- wlc_cm = kzalloc(sizeof(wlc_cm_info_t), GFP_ATOMIC);
+ wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC);
if (wlc_cm == NULL) {
wiphy_err(wlc->wiphy, "wl%d: %s: out of memory", pub->unit,
__func__);
@@ -649,31 +639,33 @@ wlc_cm_info_t *wlc_channel_mgr_attach(struct wlc_info *wlc)
/* store the country code for passing up as a regulatory hint */
ccode = getvar(wlc->pub->vars, "ccode");
if (ccode) {
- strncpy(wlc->pub->srom_ccode, ccode, WLC_CNTRY_BUF_SZ - 1);
+ strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
}
/* internal country information which must match regulatory constraints in firmware */
- memset(country_abbrev, 0, WLC_CNTRY_BUF_SZ);
+ memset(country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
strncpy(country_abbrev, "X2", sizeof(country_abbrev) - 1);
- country = wlc_country_lookup(wlc, country_abbrev);
+ country = brcms_c_country_lookup(wlc, country_abbrev);
/* save default country for exiting 11d regulatory mode */
- strncpy(wlc->country_default, country_abbrev, WLC_CNTRY_BUF_SZ - 1);
+ strncpy(wlc->country_default, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
/* initialize autocountry_default to driver default */
- strncpy(wlc->autocountry_default, "X2", WLC_CNTRY_BUF_SZ - 1);
+ strncpy(wlc->autocountry_default, "X2", BRCM_CNTRY_BUF_SZ - 1);
- wlc_set_countrycode(wlc_cm, country_abbrev);
+ brcms_c_set_countrycode(wlc_cm, country_abbrev);
return wlc_cm;
}
-void wlc_channel_mgr_detach(wlc_cm_info_t *wlc_cm)
+void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm)
{
kfree(wlc_cm);
}
-u8 wlc_channel_locale_flags_in_band(wlc_cm_info_t *wlc_cm, uint bandunit)
+u8
+brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
+ uint bandunit)
{
return wlc_cm->bandstate[bandunit].locale_flags;
}
@@ -681,20 +673,21 @@ u8 wlc_channel_locale_flags_in_band(wlc_cm_info_t *wlc_cm, uint bandunit)
/* set the driver's current country and regulatory information using a country code
* as the source. Lookup built in country information found with the country code.
*/
-static int wlc_set_countrycode(wlc_cm_info_t *wlc_cm, const char *ccode)
+static int
+brcms_c_set_countrycode(struct brcms_cm_info *wlc_cm, const char *ccode)
{
- char country_abbrev[WLC_CNTRY_BUF_SZ];
- strncpy(country_abbrev, ccode, WLC_CNTRY_BUF_SZ);
- return wlc_set_countrycode_rev(wlc_cm, country_abbrev, ccode, -1);
+ char country_abbrev[BRCM_CNTRY_BUF_SZ];
+ strncpy(country_abbrev, ccode, BRCM_CNTRY_BUF_SZ);
+ return brcms_c_set_countrycode_rev(wlc_cm, country_abbrev, ccode, -1);
}
static int
-wlc_set_countrycode_rev(wlc_cm_info_t *wlc_cm,
+brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm,
const char *country_abbrev,
const char *ccode, int regrev)
{
- const country_info_t *country;
- char mapped_ccode[WLC_CNTRY_BUF_SZ];
+ const struct country_info *country;
+ char mapped_ccode[BRCM_CNTRY_BUF_SZ];
uint mapped_regrev;
/* if regrev is -1, lookup the mapped country code,
@@ -703,12 +696,12 @@ wlc_set_countrycode_rev(wlc_cm_info_t *wlc_cm,
if (regrev == -1) {
/* map the country code to a built-in country code, regrev, and country_info */
country =
- wlc_countrycode_map(wlc_cm, ccode, mapped_ccode,
+ brcms_c_countrycode_map(wlc_cm, ccode, mapped_ccode,
&mapped_regrev);
} else {
/* find the matching built-in country definition */
- country = wlc_country_lookup_direct(ccode, regrev);
- strncpy(mapped_ccode, ccode, WLC_CNTRY_BUF_SZ);
+ country = brcms_c_country_lookup_direct(ccode, regrev);
+ strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
mapped_regrev = regrev;
}
@@ -716,7 +709,7 @@ wlc_set_countrycode_rev(wlc_cm_info_t *wlc_cm,
return -EINVAL;
/* set the driver state for the country */
- wlc_set_country_common(wlc_cm, country_abbrev, mapped_ccode,
+ brcms_c_set_country_common(wlc_cm, country_abbrev, mapped_ccode,
mapped_regrev, country);
return 0;
@@ -726,49 +719,49 @@ wlc_set_countrycode_rev(wlc_cm_info_t *wlc_cm,
* as the source. Look up built in country information found with the country code.
*/
static void
-wlc_set_country_common(wlc_cm_info_t *wlc_cm,
+brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
const char *country_abbrev,
const char *ccode, uint regrev,
- const country_info_t *country)
+ const struct country_info *country)
{
- const locale_mimo_info_t *li_mimo;
- const locale_info_t *locale;
- struct wlc_info *wlc = wlc_cm->wlc;
- char prev_country_abbrev[WLC_CNTRY_BUF_SZ];
+ const struct locale_mimo_info *li_mimo;
+ const struct locale_info *locale;
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+ char prev_country_abbrev[BRCM_CNTRY_BUF_SZ];
/* save current country state */
wlc_cm->country = country;
- memset(&prev_country_abbrev, 0, WLC_CNTRY_BUF_SZ);
+ memset(&prev_country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
strncpy(prev_country_abbrev, wlc_cm->country_abbrev,
- WLC_CNTRY_BUF_SZ - 1);
+ BRCM_CNTRY_BUF_SZ - 1);
- strncpy(wlc_cm->country_abbrev, country_abbrev, WLC_CNTRY_BUF_SZ - 1);
- strncpy(wlc_cm->ccode, ccode, WLC_CNTRY_BUF_SZ - 1);
+ strncpy(wlc_cm->country_abbrev, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
+ strncpy(wlc_cm->ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
wlc_cm->regrev = regrev;
/* disable/restore nmode based on country regulations */
- li_mimo = wlc_get_mimo_2g(country->locale_mimo_2G);
- if (li_mimo && (li_mimo->flags & WLC_NO_MIMO)) {
- wlc_set_nmode(wlc, OFF);
+ li_mimo = brcms_c_get_mimo_2g(country->locale_mimo_2G);
+ if (li_mimo && (li_mimo->flags & BRCMS_NO_MIMO)) {
+ brcms_c_set_nmode(wlc, OFF);
wlc->stf->no_cddstbc = true;
} else {
wlc->stf->no_cddstbc = false;
if (N_ENAB(wlc->pub) != wlc->protection->nmode_user)
- wlc_set_nmode(wlc, wlc->protection->nmode_user);
+ brcms_c_set_nmode(wlc, wlc->protection->nmode_user);
}
- wlc_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
- wlc_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
/* set or restore gmode as required by regulatory */
- locale = wlc_get_locale_2g(country->locale_2G);
- if (locale && (locale->flags & WLC_NO_OFDM)) {
- wlc_set_gmode(wlc, GMODE_LEGACY_B, false);
+ locale = brcms_c_get_locale_2g(country->locale_2G);
+ if (locale && (locale->flags & BRCMS_NO_OFDM)) {
+ brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
} else {
- wlc_set_gmode(wlc, wlc->protection->gmode_user, false);
+ brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
}
- wlc_channels_init(wlc_cm, country);
+ brcms_c_channels_init(wlc_cm, country);
return;
}
@@ -776,40 +769,39 @@ wlc_set_country_common(wlc_cm_info_t *wlc_cm,
/* Lookup a country info structure from a null terminated country code
* The lookup is case sensitive.
*/
-static const country_info_t *wlc_country_lookup(struct wlc_info *wlc,
- const char *ccode)
+static const struct country_info *
+brcms_c_country_lookup(struct brcms_c_info *wlc, const char *ccode)
{
- const country_info_t *country;
- char mapped_ccode[WLC_CNTRY_BUF_SZ];
+ const struct country_info *country;
+ char mapped_ccode[BRCM_CNTRY_BUF_SZ];
uint mapped_regrev;
/* map the country code to a built-in country code, regrev, and country_info struct */
- country =
- wlc_countrycode_map(wlc->cmi, ccode, mapped_ccode, &mapped_regrev);
+ country = brcms_c_countrycode_map(wlc->cmi, ccode, mapped_ccode,
+ &mapped_regrev);
return country;
}
-static const country_info_t *wlc_countrycode_map(wlc_cm_info_t *wlc_cm,
- const char *ccode,
- char *mapped_ccode,
- uint *mapped_regrev)
+static const struct country_info *
+brcms_c_countrycode_map(struct brcms_cm_info *wlc_cm, const char *ccode,
+ char *mapped_ccode, uint *mapped_regrev)
{
- struct wlc_info *wlc = wlc_cm->wlc;
- const country_info_t *country;
+ struct brcms_c_info *wlc = wlc_cm->wlc;
+ const struct country_info *country;
uint srom_regrev = wlc_cm->srom_regrev;
const char *srom_ccode = wlc_cm->srom_ccode;
int mapped;
/* check for currently supported ccode size */
- if (strlen(ccode) > (WLC_CNTRY_BUF_SZ - 1)) {
+ if (strlen(ccode) > (BRCM_CNTRY_BUF_SZ - 1)) {
wiphy_err(wlc->wiphy, "wl%d: %s: ccode \"%s\" too long for "
"match\n", wlc->pub->unit, __func__, ccode);
return NULL;
}
/* default mapping is the given ccode and regrev 0 */
- strncpy(mapped_ccode, ccode, WLC_CNTRY_BUF_SZ);
+ strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
*mapped_regrev = 0;
/* If the desired country code matches the srom country code,
@@ -822,25 +814,25 @@ static const country_info_t *wlc_countrycode_map(wlc_cm_info_t *wlc_cm,
wiphy_err(wlc->wiphy, "srom_code == ccode %s\n", __func__);
} else {
mapped =
- wlc_country_aggregate_map(wlc_cm, ccode, mapped_ccode,
+ brcms_c_country_aggregate_map(wlc_cm, ccode, mapped_ccode,
mapped_regrev);
}
/* find the matching built-in country definition */
- country = wlc_country_lookup_direct(mapped_ccode, *mapped_regrev);
+ country = brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
/* if there is not an exact rev match, default to rev zero */
if (country == NULL && *mapped_regrev != 0) {
*mapped_regrev = 0;
country =
- wlc_country_lookup_direct(mapped_ccode, *mapped_regrev);
+ brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
}
return country;
}
static int
-wlc_country_aggregate_map(wlc_cm_info_t *wlc_cm, const char *ccode,
+brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm, const char *ccode,
char *mapped_ccode, uint *mapped_regrev)
{
return false;
@@ -849,8 +841,8 @@ wlc_country_aggregate_map(wlc_cm_info_t *wlc_cm, const char *ccode,
/* Lookup a country info structure from a null terminated country
* abbreviation and regrev directly with no translation.
*/
-static const country_info_t *wlc_country_lookup_direct(const char *ccode,
- uint regrev)
+static const struct country_info *
+brcms_c_country_lookup_direct(const char *ccode, uint regrev)
{
uint size, i;
@@ -872,26 +864,27 @@ static const country_info_t *wlc_country_lookup_direct(const char *ccode,
}
static int
-wlc_channels_init(wlc_cm_info_t *wlc_cm, const country_info_t *country)
+brcms_c_channels_init(struct brcms_cm_info *wlc_cm,
+ const struct country_info *country)
{
- struct wlc_info *wlc = wlc_cm->wlc;
+ struct brcms_c_info *wlc = wlc_cm->wlc;
uint i, j;
- struct wlcband *band;
- const locale_info_t *li;
+ struct brcms_band *band;
+ const struct locale_info *li;
chanvec_t sup_chan;
- const locale_mimo_info_t *li_mimo;
+ const struct locale_mimo_info *li_mimo;
band = wlc->band;
for (i = 0; i < NBANDS(wlc);
i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
li = BAND_5G(band->bandtype) ?
- wlc_get_locale_5g(country->locale_5G) :
- wlc_get_locale_2g(country->locale_2G);
+ brcms_c_get_locale_5g(country->locale_5G) :
+ brcms_c_get_locale_2g(country->locale_2G);
wlc_cm->bandstate[band->bandunit].locale_flags = li->flags;
li_mimo = BAND_5G(band->bandtype) ?
- wlc_get_mimo_5g(country->locale_mimo_5G) :
- wlc_get_mimo_2g(country->locale_mimo_2G);
+ brcms_c_get_mimo_5g(country->locale_mimo_5G) :
+ brcms_c_get_mimo_2g(country->locale_mimo_2G);
/* merge the mimo non-mimo locale flags */
wlc_cm->bandstate[band->bandunit].locale_flags |=
@@ -907,7 +900,7 @@ wlc_channels_init(wlc_cm_info_t *wlc_cm, const country_info_t *country)
*/
wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
&sup_chan);
- wlc_locale_get_channels(li,
+ brcms_c_locale_get_channels(li,
&wlc_cm->bandstate[band->bandunit].
valid_channels);
for (j = 0; j < sizeof(chanvec_t); j++)
@@ -915,8 +908,8 @@ wlc_channels_init(wlc_cm_info_t *wlc_cm, const country_info_t *country)
vec[j] &= sup_chan.vec[j];
}
- wlc_quiet_channels_reset(wlc_cm);
- wlc_channels_commit(wlc_cm);
+ brcms_c_quiet_channels_reset(wlc_cm);
+ brcms_c_channels_commit(wlc_cm);
return 0;
}
@@ -924,9 +917,9 @@ wlc_channels_init(wlc_cm_info_t *wlc_cm, const country_info_t *country)
/* Update the radio state (enable/disable) and tx power targets
* based on a new set of channel/regulatory information
*/
-static void wlc_channels_commit(wlc_cm_info_t *wlc_cm)
+static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm)
{
- struct wlc_info *wlc = wlc_cm->wlc;
+ struct brcms_c_info *wlc = wlc_cm->wlc;
uint chan;
struct txpwr_limits txpwr;
@@ -959,25 +952,24 @@ static void wlc_channels_commit(wlc_cm_info_t *wlc_cm)
*/
if (NBANDS(wlc) > 1 || BAND_2G(wlc->band->bandtype)) {
wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
- wlc_japan(wlc) ? true :
+ brcms_c_japan(wlc) ? true :
false);
}
if (wlc->pub->up && chan != INVCHANNEL) {
- wlc_channel_reg_limits(wlc_cm, wlc->chanspec, &txpwr);
- wlc_channel_min_txpower_limits_with_local_constraint(wlc_cm,
- &txpwr,
- WLC_TXPWR_MAX);
+ brcms_c_channel_reg_limits(wlc_cm, wlc->chanspec, &txpwr);
+ brcms_c_channel_min_txpower_limits_with_local_constraint(wlc_cm,
+ &txpwr, BRCMS_TXPWR_MAX);
wlc_phy_txpower_limit_set(wlc->band->pi, &txpwr, wlc->chanspec);
}
}
/* reset the quiet channels vector to the union of the restricted and radar channel sets */
-static void wlc_quiet_channels_reset(wlc_cm_info_t *wlc_cm)
+static void brcms_c_quiet_channels_reset(struct brcms_cm_info *wlc_cm)
{
- struct wlc_info *wlc = wlc_cm->wlc;
+ struct brcms_c_info *wlc = wlc_cm->wlc;
uint i, j;
- struct wlcband *band;
+ struct brcms_band *band;
const chanvec_t *chanvec;
memset(&wlc_cm->quiet_channels, 0, sizeof(chanvec_t));
@@ -994,7 +986,8 @@ static void wlc_quiet_channels_reset(wlc_cm_info_t *wlc_cm)
}
}
-static bool wlc_quiet_chanspec(wlc_cm_info_t *wlc_cm, chanspec_t chspec)
+static bool
+brcms_c_quiet_chanspec(struct brcms_cm_info *wlc_cm, chanspec_t chspec)
{
return N_ENAB(wlc_cm->wlc->pub) && CHSPEC_IS40(chspec) ?
(isset
@@ -1011,9 +1004,9 @@ static bool wlc_quiet_chanspec(wlc_cm_info_t *wlc_cm, chanspec_t chspec)
/* Is the channel valid for the current locale? (but don't consider channels not
* available due to bandlocking)
*/
-static bool wlc_valid_channel20_db(wlc_cm_info_t *wlc_cm, uint val)
+static bool brcms_c_valid_channel20_db(struct brcms_cm_info *wlc_cm, uint val)
{
- struct wlc_info *wlc = wlc_cm->wlc;
+ struct brcms_c_info *wlc = wlc_cm->wlc;
return VALID_CHANNEL20(wlc, val) ||
(!wlc->bandlocked
@@ -1021,17 +1014,17 @@ static bool wlc_valid_channel20_db(wlc_cm_info_t *wlc_cm, uint val)
}
/* Is the channel valid for the current locale and specified band? */
-static bool
-wlc_valid_channel20_in_band(wlc_cm_info_t *wlc_cm, uint bandunit, uint val)
+static bool brcms_c_valid_channel20_in_band(struct brcms_cm_info *wlc_cm,
+ uint bandunit, uint val)
{
return ((val < MAXCHANNEL)
&& isset(wlc_cm->bandstate[bandunit].valid_channels.vec, val));
}
/* Is the channel valid for the current locale and current band? */
-static bool wlc_valid_channel20(wlc_cm_info_t *wlc_cm, uint val)
+static bool brcms_c_valid_channel20(struct brcms_cm_info *wlc_cm, uint val)
{
- struct wlc_info *wlc = wlc_cm->wlc;
+ struct brcms_c_info *wlc = wlc_cm->wlc;
return ((val < MAXCHANNEL) &&
isset(wlc_cm->bandstate[wlc->band->bandunit].valid_channels.vec,
@@ -1039,10 +1032,9 @@ static bool wlc_valid_channel20(wlc_cm_info_t *wlc_cm, uint val)
}
static void
-wlc_channel_min_txpower_limits_with_local_constraint(wlc_cm_info_t *wlc_cm,
- struct txpwr_limits *txpwr,
- u8
- local_constraint_qdbm)
+brcms_c_channel_min_txpower_limits_with_local_constraint(
+ struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr,
+ u8 local_constraint_qdbm)
{
int j;
@@ -1057,66 +1049,66 @@ wlc_channel_min_txpower_limits_with_local_constraint(wlc_cm_info_t *wlc_cm,
}
/* 20 MHz Legacy OFDM CDD */
- for (j = 0; j < WLC_NUM_RATES_OFDM; j++) {
+ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) {
txpwr->ofdm_cdd[j] =
min(txpwr->ofdm_cdd[j], local_constraint_qdbm);
}
/* 40 MHz Legacy OFDM SISO */
- for (j = 0; j < WLC_NUM_RATES_OFDM; j++) {
+ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) {
txpwr->ofdm_40_siso[j] =
min(txpwr->ofdm_40_siso[j], local_constraint_qdbm);
}
/* 40 MHz Legacy OFDM CDD */
- for (j = 0; j < WLC_NUM_RATES_OFDM; j++) {
+ for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++) {
txpwr->ofdm_40_cdd[j] =
min(txpwr->ofdm_40_cdd[j], local_constraint_qdbm);
}
/* 20MHz MCS 0-7 SISO */
- for (j = 0; j < WLC_NUM_RATES_MCS_1_STREAM; j++) {
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
txpwr->mcs_20_siso[j] =
min(txpwr->mcs_20_siso[j], local_constraint_qdbm);
}
/* 20MHz MCS 0-7 CDD */
- for (j = 0; j < WLC_NUM_RATES_MCS_1_STREAM; j++) {
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
txpwr->mcs_20_cdd[j] =
min(txpwr->mcs_20_cdd[j], local_constraint_qdbm);
}
/* 20MHz MCS 0-7 STBC */
- for (j = 0; j < WLC_NUM_RATES_MCS_1_STREAM; j++) {
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
txpwr->mcs_20_stbc[j] =
min(txpwr->mcs_20_stbc[j], local_constraint_qdbm);
}
/* 20MHz MCS 8-15 MIMO */
- for (j = 0; j < WLC_NUM_RATES_MCS_2_STREAM; j++)
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++)
txpwr->mcs_20_mimo[j] =
min(txpwr->mcs_20_mimo[j], local_constraint_qdbm);
/* 40MHz MCS 0-7 SISO */
- for (j = 0; j < WLC_NUM_RATES_MCS_1_STREAM; j++) {
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
txpwr->mcs_40_siso[j] =
min(txpwr->mcs_40_siso[j], local_constraint_qdbm);
}
/* 40MHz MCS 0-7 CDD */
- for (j = 0; j < WLC_NUM_RATES_MCS_1_STREAM; j++) {
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
txpwr->mcs_40_cdd[j] =
min(txpwr->mcs_40_cdd[j], local_constraint_qdbm);
}
/* 40MHz MCS 0-7 STBC */
- for (j = 0; j < WLC_NUM_RATES_MCS_1_STREAM; j++) {
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++) {
txpwr->mcs_40_stbc[j] =
min(txpwr->mcs_40_stbc[j], local_constraint_qdbm);
}
/* 40MHz MCS 8-15 MIMO */
- for (j = 0; j < WLC_NUM_RATES_MCS_2_STREAM; j++)
+ for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++)
txpwr->mcs_40_mimo[j] =
min(txpwr->mcs_40_mimo[j], local_constraint_qdbm);
@@ -1126,162 +1118,172 @@ wlc_channel_min_txpower_limits_with_local_constraint(wlc_cm_info_t *wlc_cm,
}
void
-wlc_channel_set_chanspec(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
+brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, chanspec_t chanspec,
u8 local_constraint_qdbm)
{
- struct wlc_info *wlc = wlc_cm->wlc;
+ struct brcms_c_info *wlc = wlc_cm->wlc;
struct txpwr_limits txpwr;
- wlc_channel_reg_limits(wlc_cm, chanspec, &txpwr);
+ brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
- wlc_channel_min_txpower_limits_with_local_constraint(wlc_cm, &txpwr,
+ brcms_c_channel_min_txpower_limits_with_local_constraint(wlc_cm, &txpwr,
local_constraint_qdbm);
- wlc_bmac_set_chanspec(wlc->hw, chanspec,
- (wlc_quiet_chanspec(wlc_cm, chanspec) != 0),
+ brcms_b_set_chanspec(wlc->hw, chanspec,
+ (brcms_c_quiet_chanspec(wlc_cm, chanspec) != 0),
&txpwr);
}
#ifdef POWER_DBG
-static void wlc_phy_txpower_limits_dump(txpwr_limits_t *txpwr)
+static void wlc_phy_txpower_limits_dump(struct txpwr_limits *txpwr)
{
int i;
char buf[80];
char fraction[4][4] = { " ", ".25", ".5 ", ".75" };
sprintf(buf, "CCK ");
- for (i = 0; i < WLC_NUM_RATES_CCK; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_CCK; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->cck[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->cck[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->cck[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->cck[i] % BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz OFDM SISO ");
- for (i = 0; i < WLC_NUM_RATES_OFDM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->ofdm[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->ofdm[i] % BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz OFDM CDD ");
- for (i = 0; i < WLC_NUM_RATES_OFDM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_cdd[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_cdd[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->ofdm_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->ofdm_cdd[i] % BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz OFDM SISO ");
- for (i = 0; i < WLC_NUM_RATES_OFDM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_40_siso[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_40_siso[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->ofdm_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->ofdm_40_siso[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz OFDM CDD ");
- for (i = 0; i < WLC_NUM_RATES_OFDM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_40_cdd[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_40_cdd[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->ofdm_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->ofdm_40_cdd[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz MCS0-7 SISO ");
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_siso[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_siso[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->mcs_20_siso[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_20_siso[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz MCS0-7 CDD ");
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_cdd[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_cdd[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->mcs_20_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_20_cdd[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz MCS0-7 STBC ");
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_stbc[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_stbc[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->mcs_20_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_20_stbc[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz MCS8-15 SDM ");
- for (i = 0; i < WLC_NUM_RATES_MCS_2_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_mimo[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_mimo[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->mcs_20_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_20_mimo[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz MCS0-7 SISO ");
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_siso[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_siso[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->mcs_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_40_siso[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz MCS0-7 CDD ");
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_cdd[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_cdd[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->mcs_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_40_cdd[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz MCS0-7 STBC ");
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_stbc[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_stbc[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->mcs_40_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_40_stbc[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz MCS8-15 SDM ");
- for (i = 0; i < WLC_NUM_RATES_MCS_2_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) {
sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_mimo[i] / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_mimo[i] % WLC_TXPWR_DB_FACTOR]);
+ txpwr->mcs_40_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs_40_mimo[i] %
+ BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
printk(KERN_DEBUG "MCS32 %2d%s\n",
- txpwr->mcs32 / WLC_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs32 % WLC_TXPWR_DB_FACTOR]);
+ txpwr->mcs32 / BRCMS_TXPWR_DB_FACTOR,
+ fraction[txpwr->mcs32 % BRCMS_TXPWR_DB_FACTOR]);
}
#endif /* POWER_DBG */
void
-wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
- txpwr_limits_t *txpwr)
+brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, chanspec_t chanspec,
+ struct txpwr_limits *txpwr)
{
- struct wlc_info *wlc = wlc_cm->wlc;
+ struct brcms_c_info *wlc = wlc_cm->wlc;
uint i;
uint chan;
int maxpwr;
int delta;
- const country_info_t *country;
- struct wlcband *band;
- const locale_info_t *li;
+ const struct country_info *country;
+ struct brcms_band *band;
+ const struct locale_info *li;
int conducted_max;
int conducted_ofdm_max;
- const locale_mimo_info_t *li_mimo;
+ const struct locale_mimo_info *li_mimo;
int maxpwr20, maxpwr40;
int maxpwr_idx;
uint j;
- memset(txpwr, 0, sizeof(txpwr_limits_t));
+ memset(txpwr, 0, sizeof(struct txpwr_limits));
- if (!wlc_valid_chanspec_db(wlc_cm, chanspec)) {
- country = wlc_country_lookup(wlc, wlc->autocountry_default);
+ if (!brcms_c_valid_chanspec_db(wlc_cm, chanspec)) {
+ country = brcms_c_country_lookup(wlc, wlc->autocountry_default);
if (country == NULL)
return;
} else {
@@ -1289,16 +1291,16 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
}
chan = CHSPEC_CHANNEL(chanspec);
- band = wlc->bandstate[CHSPEC_WLCBANDUNIT(chanspec)];
+ band = wlc->bandstate[CHSPEC_BANDUNIT(chanspec)];
li = BAND_5G(band->bandtype) ?
- wlc_get_locale_5g(country->locale_5G) :
- wlc_get_locale_2g(country->locale_2G);
+ brcms_c_get_locale_5g(country->locale_5G) :
+ brcms_c_get_locale_2g(country->locale_2G);
li_mimo = BAND_5G(band->bandtype) ?
- wlc_get_mimo_5g(country->locale_mimo_5G) :
- wlc_get_mimo_2g(country->locale_mimo_2G);
+ brcms_c_get_mimo_5g(country->locale_mimo_5G) :
+ brcms_c_get_mimo_2g(country->locale_mimo_2G);
- if (li->flags & WLC_EIRP) {
+ if (li->flags & BRCMS_EIRP) {
delta = band->antgain;
} else {
delta = 0;
@@ -1319,7 +1321,7 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
maxpwr = max(maxpwr, 0);
maxpwr = min(maxpwr, conducted_max);
- for (i = 0; i < WLC_NUM_RATES_CCK; i++)
+ for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
txpwr->cck[i] = (u8) maxpwr;
}
@@ -1339,11 +1341,10 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
if (BAND_2G(band->bandtype))
maxpwr = min_t(int, maxpwr, txpwr->cck[0]);
- for (i = 0; i < WLC_NUM_RATES_OFDM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
txpwr->ofdm[i] = (u8) maxpwr;
- }
- for (i = 0; i < WLC_NUM_RATES_OFDM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
/* OFDM 40 MHz SISO has the same power as the corresponding MCS0-7 rate unless
* overriden by the locale specific code. We set this value to 0 as a
* flag (presumably 0 dBm isn't a possibility) and then copy the MCS0-7 value
@@ -1357,7 +1358,7 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
}
/* MIMO/HT specific limits */
- if (li_mimo->flags & WLC_EIRP) {
+ if (li_mimo->flags & BRCMS_EIRP) {
delta = band->antgain;
} else {
delta = 0;
@@ -1379,7 +1380,7 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
maxpwr40 = max(maxpwr40, 0);
/* Fill in the MCS 0-7 (SISO) rates */
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
/* 20 MHz has the same power as the corresponding OFDM rate unless
* overriden by the locale specific code.
@@ -1389,7 +1390,7 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
}
/* Fill in the MCS 0-7 CDD rates */
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
txpwr->mcs_20_cdd[i] = (u8) maxpwr20;
txpwr->mcs_40_cdd[i] = (u8) maxpwr40;
}
@@ -1405,20 +1406,20 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
}
}
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
txpwr->mcs_20_siso[i] = (u8) maxpwr20;
txpwr->mcs_40_siso[i] = (u8) maxpwr40;
}
}
/* Fill in the MCS 0-7 STBC rates */
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
txpwr->mcs_20_stbc[i] = 0;
txpwr->mcs_40_stbc[i] = 0;
}
/* Fill in the MCS 8-15 SDM rates */
- for (i = 0; i < WLC_NUM_RATES_MCS_2_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) {
txpwr->mcs_20_mimo[i] = (u8) maxpwr20;
txpwr->mcs_40_mimo[i] = (u8) maxpwr40;
}
@@ -1426,7 +1427,7 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
/* Fill in MCS32 */
txpwr->mcs32 = (u8) maxpwr40;
- for (i = 0, j = 0; i < WLC_NUM_RATES_OFDM; i++, j++) {
+ for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) {
if (txpwr->ofdm_40_cdd[i] == 0)
txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j];
if (i == 0) {
@@ -1440,12 +1441,12 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
* provided explicitly.
*/
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
if (txpwr->mcs_40_siso[i] == 0)
txpwr->mcs_40_siso[i] = txpwr->mcs_40_cdd[i];
}
- for (i = 0, j = 0; i < WLC_NUM_RATES_OFDM; i++, j++) {
+ for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) {
if (txpwr->ofdm_40_siso[i] == 0)
txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j];
if (i == 0) {
@@ -1458,7 +1459,7 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
/* Copy the 20 and 40 MHz MCS0-7 CDD values to the corresponding STBC values if they weren't
* provided explicitly.
*/
- for (i = 0; i < WLC_NUM_RATES_MCS_1_STREAM; i++) {
+ for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
if (txpwr->mcs_20_stbc[i] == 0)
txpwr->mcs_20_stbc[i] = txpwr->mcs_20_cdd[i];
@@ -1473,13 +1474,13 @@ wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm, chanspec_t chanspec,
}
/* Returns true if currently set country is Japan or variant */
-static bool wlc_japan(struct wlc_info *wlc)
+static bool brcms_c_japan(struct brcms_c_info *wlc)
{
- return wlc_japan_ccode(wlc->cmi->country_abbrev);
+ return brcms_c_japan_ccode(wlc->cmi->country_abbrev);
}
/* JP, J1 - J10 are Japan ccodes */
-static bool wlc_japan_ccode(const char *ccode)
+static bool brcms_c_japan_ccode(const char *ccode)
{
return (ccode[0] == 'J' &&
(ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9')));
@@ -1490,20 +1491,21 @@ static bool wlc_japan_ccode(const char *ccode)
* are valid 20MZH channels in this locale and they are also a legal HT combination
*/
static bool
-wlc_valid_chanspec_ext(wlc_cm_info_t *wlc_cm, chanspec_t chspec, bool dualband)
+brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, chanspec_t chspec,
+ bool dualband)
{
- struct wlc_info *wlc = wlc_cm->wlc;
+ struct brcms_c_info *wlc = wlc_cm->wlc;
u8 channel = CHSPEC_CHANNEL(chspec);
/* check the chanspec */
- if (bcm_chspec_malformed(chspec)) {
+ if (brcmu_chspec_malformed(chspec)) {
wiphy_err(wlc->wiphy, "wl%d: malformed chanspec 0x%x\n",
wlc->pub->unit, chspec);
return false;
}
if (CHANNEL_BANDUNIT(wlc_cm->wlc, channel) !=
- CHSPEC_WLCBANDUNIT(chspec))
+ CHSPEC_BANDUNIT(chspec))
return false;
/* Check a 20Mhz channel */
@@ -1517,12 +1519,12 @@ wlc_valid_chanspec_ext(wlc_cm_info_t *wlc_cm, chanspec_t chspec, bool dualband)
/* We know we are now checking a 40MHZ channel, so we should only be here
* for NPHYS
*/
- if (WLCISNPHY(wlc->band) || WLCISSSLPNPHY(wlc->band)) {
+ if (BRCMS_ISNPHY(wlc->band) || BRCMS_ISSSLPNPHY(wlc->band)) {
u8 upper_sideband = 0, idx;
u8 num_ch20_entries =
sizeof(chan20_info) / sizeof(struct chan20_info);
- if (!VALID_40CHANSPEC_IN_BAND(wlc, CHSPEC_WLCBANDUNIT(chspec)))
+ if (!VALID_40CHANSPEC_IN_BAND(wlc, CHSPEC_BANDUNIT(chspec)))
return false;
if (dualband) {
@@ -1551,7 +1553,7 @@ wlc_valid_chanspec_ext(wlc_cm_info_t *wlc_cm, chanspec_t chspec, bool dualband)
return false;
}
-bool wlc_valid_chanspec_db(wlc_cm_info_t *wlc_cm, chanspec_t chspec)
+bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, chanspec_t chspec)
{
- return wlc_valid_chanspec_ext(wlc_cm, chspec, true);
+ return brcms_c_valid_chanspec_ext(wlc_cm, chspec, true);
}
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_channel.h b/drivers/staging/brcm80211/brcmsmac/channel.h
index b8dec5b39d8..d22f2f5f592 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_channel.h
+++ b/drivers/staging/brcm80211/brcmsmac/channel.h
@@ -14,12 +14,12 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _WLC_CHANNEL_H_
-#define _WLC_CHANNEL_H_
+#ifndef _BRCM_CHANNEL_H_
+#define _BRCM_CHANNEL_H_
-#define WLC_TXPWR_DB_FACTOR 4 /* conversion for phy txpwr cacluations that use .25 dB units */
+/* conversion for phy txpwr calculations that use .25 dB units */
+#define BRCMS_TXPWR_DB_FACTOR 4
-struct wlc_info;
/* maxpwr mapping to 5GHz band channels:
* maxpwr[0] - channels [34-48]
@@ -47,43 +47,56 @@ struct wlc_info;
#define CHANNEL_POWER_IDX_5G(c) \
(((c) < 52) ? 0 : (((c) < 62) ? 1 : (((c) < 100) ? 2 : (((c) < 149) ? 3 : 4))))
-#define WLC_MAXPWR_TBL_SIZE 6 /* max of BAND_5G_PWR_LVLS and 6 for 2.4 GHz */
-#define WLC_MAXPWR_MIMO_TBL_SIZE 14 /* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */
+/* max of BAND_5G_PWR_LVLS and 6 for 2.4 GHz */
+#define BRCMS_MAXPWR_TBL_SIZE 6
+/* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */
+#define BRCMS_MAXPWR_MIMO_TBL_SIZE 14
+
+#define NBANDS(wlc) ((wlc)->pub->_nbands)
+#define NBANDS_PUB(pub) ((pub)->_nbands)
+#define NBANDS_HW(hw) ((hw)->_nbands)
+
+#define IS_SINGLEBAND_5G(device) 0
/* locale channel and power info. */
-typedef struct {
+struct locale_info {
u32 valid_channels;
- u8 radar_channels; /* List of radar sensitive channels */
- u8 restricted_channels; /* List of channels used only if APs are detected */
- s8 maxpwr[WLC_MAXPWR_TBL_SIZE]; /* Max tx pwr in qdBm for each sub-band */
+ /* List of radar sensitive channels */
+ u8 radar_channels;
+ /* List of channels used only if APs are detected */
+ u8 restricted_channels;
+ /* Max tx pwr in qdBm for each sub-band */
+ s8 maxpwr[BRCMS_MAXPWR_TBL_SIZE];
s8 pub_maxpwr[BAND_5G_PWR_LVLS]; /* Country IE advertised max tx pwr in dBm
* per sub-band
*/
u8 flags;
-} locale_info_t;
+};
/* bits for locale_info flags */
-#define WLC_PEAK_CONDUCTED 0x00 /* Peak for locals */
-#define WLC_EIRP 0x01 /* Flag for EIRP */
-#define WLC_DFS_TPC 0x02 /* Flag for DFS TPC */
-#define WLC_NO_OFDM 0x04 /* Flag for No OFDM */
-#define WLC_NO_40MHZ 0x08 /* Flag for No MIMO 40MHz */
-#define WLC_NO_MIMO 0x10 /* Flag for No MIMO, 20 or 40 MHz */
-#define WLC_RADAR_TYPE_EU 0x20 /* Flag for EU */
-#define WLC_DFS_FCC WLC_DFS_TPC /* Flag for DFS FCC */
-#define WLC_DFS_EU (WLC_DFS_TPC | WLC_RADAR_TYPE_EU) /* Flag for DFS EU */
-
-#define ISDFS_EU(fl) (((fl) & WLC_DFS_EU) == WLC_DFS_EU)
+#define BRCMS_PEAK_CONDUCTED 0x00 /* Peak for locals */
+#define BRCMS_EIRP 0x01 /* Flag for EIRP */
+#define BRCMS_DFS_TPC 0x02 /* Flag for DFS TPC */
+#define BRCMS_NO_OFDM 0x04 /* Flag for No OFDM */
+#define BRCMS_NO_40MHZ 0x08 /* Flag for No MIMO 40MHz */
+#define BRCMS_NO_MIMO 0x10 /* Flag for No MIMO, 20 or 40 MHz */
+#define BRCMS_RADAR_TYPE_EU 0x20 /* Flag for EU */
+#define BRCMS_DFS_FCC BRCMS_DFS_TPC /* Flag for DFS FCC */
+#define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */
+
+#define ISDFS_EU(fl) (((fl) & BRCMS_DFS_EU) == BRCMS_DFS_EU)
/* locale per-channel tx power limits for MIMO frames
* maxpwr arrays are index by channel for 2.4 GHz limits, and
* by sub-band for 5 GHz limits using CHANNEL_POWER_IDX_5G(channel)
*/
-typedef struct {
- s8 maxpwr20[WLC_MAXPWR_MIMO_TBL_SIZE]; /* tx 20 MHz power limits, qdBm units */
- s8 maxpwr40[WLC_MAXPWR_MIMO_TBL_SIZE]; /* tx 40 MHz power limits, qdBm units */
+struct locale_mimo_info {
+ /* tx 20 MHz power limits, qdBm units */
+ s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE];
+ /* tx 40 MHz power limits, qdBm units */
+ s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE];
u8 flags;
-} locale_mimo_info_t;
+};
extern const chanvec_t chanvec_all_2G;
extern const chanvec_t chanvec_all_5G;
@@ -98,22 +111,21 @@ struct country_info {
const u8 locale_mimo_5G; /* 5G mimo info */
};
-typedef struct country_info country_info_t;
-
-typedef struct wlc_cm_info wlc_cm_info_t;
+extern struct brcms_cm_info *
+brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
-extern wlc_cm_info_t *wlc_channel_mgr_attach(struct wlc_info *wlc);
-extern void wlc_channel_mgr_detach(wlc_cm_info_t *wlc_cm);
+extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
-extern u8 wlc_channel_locale_flags_in_band(wlc_cm_info_t *wlc_cm,
+extern u8 brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
uint bandunit);
-extern bool wlc_valid_chanspec_db(wlc_cm_info_t *wlc_cm, chanspec_t chspec);
+extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
+ chanspec_t chspec);
-extern void wlc_channel_reg_limits(wlc_cm_info_t *wlc_cm,
+extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
chanspec_t chanspec,
struct txpwr_limits *txpwr);
-extern void wlc_channel_set_chanspec(wlc_cm_info_t *wlc_cm,
+extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
chanspec_t chanspec,
u8 local_constraint_qdbm);
diff --git a/drivers/staging/brcm80211/brcmsmac/d11.h b/drivers/staging/brcm80211/brcmsmac/d11.h
index d91e4189a3e..e7ff0e6f28e 100644
--- a/drivers/staging/brcm80211/brcmsmac/d11.h
+++ b/drivers/staging/brcm80211/brcmsmac/d11.h
@@ -14,23 +14,14 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _D11_H
-#define _D11_H
+#ifndef _BRCM_D11_H_
+#define _BRCM_D11_H_
-#include <sbconfig.h>
+#include <linux/ieee80211.h>
-#ifndef WL_RSSI_ANT_MAX
-#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */
-#elif WL_RSSI_ANT_MAX != 4
-#error "WL_RSSI_ANT_MAX does not match"
-#endif
-
-/* cpp contortions to concatenate w/arg prescan */
-#ifndef PAD
-#define _PADLINE(line) pad ## line
-#define _XSTR(line) _PADLINE(line)
-#define PAD _XSTR(__LINE__)
-#endif
+#include <defs.h>
+#include "pub.h"
+#include "dma.h"
#define BCN_TMPL_LEN 512 /* length of the BCN template area */
@@ -56,10 +47,16 @@
#define TX_DATA_FIFO TX_AC_BE_FIFO
#define TX_CTL_FIFO TX_AC_VO_FIFO
-typedef volatile struct {
+#ifndef WL_RSSI_ANT_MAX
+#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */
+#elif WL_RSSI_ANT_MAX != 4
+#error "WL_RSSI_ANT_MAX does not match"
+#endif
+
+struct intctrlregs {
u32 intstatus;
u32 intmask;
-} intctrlregs_t;
+};
/* PIO structure,
* support two PIO format: 2 bytes access and 4 bytes access
@@ -67,55 +64,53 @@ typedef volatile struct {
* a pair of channels is defined for convenience
*/
/* 2byte-wide pio register set per channel(xmt or rcv) */
-typedef volatile struct {
+struct pio2regs {
u16 fifocontrol;
u16 fifodata;
u16 fifofree; /* only valid in xmt channel, not in rcv channel */
u16 PAD;
-} pio2regs_t;
+};
/* a pair of pio channels(tx and rx) */
-typedef volatile struct {
+struct pio2regp {
pio2regs_t tx;
pio2regs_t rx;
-} pio2regp_t;
+};
/* 4byte-wide pio register set per channel(xmt or rcv) */
-typedef volatile struct {
+struct pio4regs {
u32 fifocontrol;
u32 fifodata;
-} pio4regs_t;
+};
/* a pair of pio channels(tx and rx) */
-typedef volatile struct {
+struct pio4regp {
pio4regs_t tx;
pio4regs_t rx;
-} pio4regp_t;
+};
/* read: 32-bit register that can be read as 32-bit or as 2 16-bit
* write: only low 16b-it half can be written
*/
-typedef volatile union {
+union pmqreg {
u32 pmqhostdata; /* read only! */
struct {
u16 pmqctrlstatus; /* read/write */
u16 PAD;
} w;
-} pmqreg_t;
+};
-typedef volatile struct {
+struct fifo64 {
dma64regs_t dmaxmt; /* dma tx */
pio4regs_t piotx; /* pio tx */
dma64regs_t dmarcv; /* dma rx */
pio4regs_t piorx; /* pio rx */
-} fifo64_t;
+};
/*
* Host Interface Registers
- * - primed from hnd_cores/dot11mac/systemC/registers/ihr.h
- * - but definitely not complete
*/
-typedef volatile struct _d11regs {
+struct d11regs {
/* Device Control ("semi-standard host registers") */
u32 PAD[3]; /* 0x0 - 0x8 */
u32 biststatus; /* 0xC */
@@ -439,10 +434,7 @@ typedef volatile struct _d11regs {
/* SHM *//* 0x800 - 0xEFE */
u16 PAD[0x380]; /* 0x800 - 0xEFE */
-
- /* SB configuration registers: 0xF00 */
- sbconfig_t sbconfig; /* sb config regs occupy top 256 bytes */
-} d11regs_t;
+};
#define PIHR_BASE 0x0400 /* byte address of packed IHR region */
@@ -629,12 +621,11 @@ typedef volatile struct _d11regs {
#define ANA_11N_013 5
/* 802.11a PLCP header def */
-typedef struct ofdm_phy_hdr ofdm_phy_hdr_t;
struct ofdm_phy_hdr {
u8 rlpt[3]; /* rate, length, parity, tail */
u16 service;
u8 pad;
-} __attribute__((packed));
+} __packed;
#define D11A_PHY_HDR_GRATE(phdr) ((phdr)->rlpt[0] & 0x0f)
#define D11A_PHY_HDR_GRES(phdr) (((phdr)->rlpt[0] >> 4) & 0x01)
@@ -664,13 +655,12 @@ struct ofdm_phy_hdr {
#define D11A_PHY_PREHDR_TIME (D11A_PHY_PRE_TIME + D11A_PHY_HDR_TIME)
/* 802.11b PLCP header def */
-typedef struct cck_phy_hdr cck_phy_hdr_t;
struct cck_phy_hdr {
u8 signal;
u8 service;
u16 length;
u16 crc;
-} __attribute__((packed));
+} __packed;
#define D11B_PHY_HDR_LEN 6
@@ -691,17 +681,17 @@ struct cck_phy_hdr {
#define MIMO_PLCP_40MHZ 0x80 /* 40 Hz frame */
#define MIMO_PLCP_AMPDU 0x08 /* ampdu */
-#define WLC_GET_CCK_PLCP_LEN(plcp) (plcp[4] + (plcp[5] << 8))
-#define WLC_GET_MIMO_PLCP_LEN(plcp) (plcp[1] + (plcp[2] << 8))
-#define WLC_SET_MIMO_PLCP_LEN(plcp, len) \
+#define BRCMS_GET_CCK_PLCP_LEN(plcp) (plcp[4] + (plcp[5] << 8))
+#define BRCMS_GET_MIMO_PLCP_LEN(plcp) (plcp[1] + (plcp[2] << 8))
+#define BRCMS_SET_MIMO_PLCP_LEN(plcp, len) \
do { \
plcp[1] = len & 0xff; \
plcp[2] = ((len >> 8) & 0xff); \
} while (0);
-#define WLC_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU)
-#define WLC_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU)
-#define WLC_IS_MIMO_PLCP_AMPDU(plcp) (plcp[3] & MIMO_PLCP_AMPDU)
+#define BRCMS_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU)
+#define BRCMS_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU)
+#define BRCMS_IS_MIMO_PLCP_AMPDU(plcp) (plcp[3] & MIMO_PLCP_AMPDU)
/* The dot11a PLCP header is 5 bytes. To simplify the software (so that we
* don't need e.g. different tx DMA headers for 11a and 11b), the PLCP header has
@@ -710,7 +700,6 @@ struct cck_phy_hdr {
#define D11_PHY_HDR_LEN 6
/* TX DMA buffer header */
-typedef struct d11txh d11txh_t;
struct d11txh {
u16 MacTxControlLow; /* 0x0 */
u16 MacTxControlHigh; /* 0x1 */
@@ -746,7 +735,7 @@ struct d11txh {
u8 RTSPhyHeader[D11_PHY_HDR_LEN]; /* 0x2c - 0x2e */
struct ieee80211_rts rts_frame; /* 0x2f - 0x36 */
u16 PAD; /* 0x37 */
-} __attribute__((packed));
+} __packed;
#define D11_TXH_LEN 112 /* bytes */
@@ -854,7 +843,6 @@ struct d11txh {
#define ABI_MAS_MRT_ANT_PTN_MASK 0x000f
/* tx status packet */
-typedef struct tx_status tx_status_t;
struct tx_status {
u16 framelen;
u16 PAD;
@@ -864,7 +852,7 @@ struct tx_status {
u16 sequence;
u16 phyerr;
u16 ackphyrxsh;
-} __attribute__((packed));
+} __packed;
#define TXSTATUS_LEN 16
@@ -1160,25 +1148,25 @@ struct tx_status {
#define M_TX_IDLE_BUSY_RATIO_X_16_OFDM (0x5A * 2)
/* CW RSSI for LCNPHY */
-#define M_LCN_RSSI_0 0x1332
-#define M_LCN_RSSI_1 0x1338
-#define M_LCN_RSSI_2 0x133e
-#define M_LCN_RSSI_3 0x1344
+#define M_LCN_RSSI_0 0x1332
+#define M_LCN_RSSI_1 0x1338
+#define M_LCN_RSSI_2 0x133e
+#define M_LCN_RSSI_3 0x1344
/* SNR for LCNPHY */
-#define M_LCN_SNR_A_0 0x1334
-#define M_LCN_SNR_B_0 0x1336
+#define M_LCN_SNR_A_0 0x1334
+#define M_LCN_SNR_B_0 0x1336
-#define M_LCN_SNR_A_1 0x133a
-#define M_LCN_SNR_B_1 0x133c
+#define M_LCN_SNR_A_1 0x133a
+#define M_LCN_SNR_B_1 0x133c
-#define M_LCN_SNR_A_2 0x1340
-#define M_LCN_SNR_B_2 0x1342
+#define M_LCN_SNR_A_2 0x1340
+#define M_LCN_SNR_B_2 0x1342
-#define M_LCN_SNR_A_3 0x1346
-#define M_LCN_SNR_B_3 0x1348
+#define M_LCN_SNR_A_3 0x1346
+#define M_LCN_SNR_B_3 0x1348
-#define M_LCN_LAST_RESET (81*2)
+#define M_LCN_LAST_RESET (81*2)
#define M_LCN_LAST_LOC (63*2)
#define M_LCNPHY_RESET_STATUS (4902)
#define M_LCNPHY_DSC_TIME (0x98d*2)
@@ -1247,7 +1235,6 @@ struct tx_status {
#define MIMO_ANTSEL_WAIT 50 /* 50us wait */
#define MIMO_ANTSEL_OVERRIDE 0x8000 /* flag */
-typedef struct shm_acparams shm_acparams_t;
struct shm_acparams {
u16 txop;
u16 cwmin;
@@ -1258,7 +1245,7 @@ struct shm_acparams {
u16 reggap;
u16 status;
u16 rsvd[8];
-} __attribute__((packed));
+} __packed;
#define M_EDCF_QLEN (16 * 2)
#define WME_STATUS_NEWAC (1 << 8)
@@ -1292,7 +1279,7 @@ struct shm_acparams {
/* Flags in M_HOST_FLAGS4 */
#define MHF4_BPHY_TXCORE0 0x0080 /* force bphy Tx on core 0 (board level WAR) */
-#define MHF4_EXTPA_ENABLE 0x4000 /* for 4313A0 FEM boards */
+#define MHF4_EXTPA_ENABLE 0x4000 /* for 4313A0 FEM boards */
/* Flags in M_HOST_FLAGS5 */
#define MHF5_4313_GPIOCTRL 0x0001
@@ -1306,7 +1293,6 @@ struct shm_acparams {
#define PHY_NOISE_MASK 0x00ff
/* Receive Frame Data Header for 802.11b DCF-only frames */
-typedef struct d11rxhdr d11rxhdr_t;
struct d11rxhdr {
u16 RxFrameSize; /* Actual byte length of the frame data received */
u16 PAD;
@@ -1320,21 +1306,20 @@ struct d11rxhdr {
u16 RxStatus2; /* extended MAC Rx status */
u16 RxTSFTime; /* RxTSFTime time of first MAC symbol + M_PHY_PLCPRX_DLY */
u16 RxChan; /* gain code, channel radio code, and phy type */
-} __attribute__((packed));
+} __packed;
-#define RXHDR_LEN 24 /* sizeof d11rxhdr_t */
+#define RXHDR_LEN 24 /* sizeof struct d11rxhdr */
#define FRAMELEN(h) ((h)->RxFrameSize)
-typedef struct wlc_d11rxhdr wlc_d11rxhdr_t;
-struct wlc_d11rxhdr {
- d11rxhdr_t rxhdr;
+struct brcms_d11rxhdr {
+ struct d11rxhdr rxhdr;
u32 tsf_l; /* TSF_L reading */
s8 rssi; /* computed instanteneous rssi in BMAC */
s8 rxpwr0; /* obsoleted, place holder for legacy ROM code. use rxpwr[] */
s8 rxpwr1; /* obsoleted, place holder for legacy ROM code. use rxpwr[] */
s8 do_rssi_ma; /* do per-pkt sampling for per-antenna ma in HIGH */
s8 rxpwr[WL_RSSI_ANT_MAX]; /* rssi for supported antennas */
-} __attribute__((packed));
+} __packed;
/* PhyRxStatus_0: */
#define PRXS0_FT_MASK 0x0003 /* NPHY only: CCK, OFDM, preN, N */
@@ -1473,7 +1458,7 @@ struct wlc_d11rxhdr {
#define DBGST_ASLEEP 4 /* asleep (PS mode) */
/* Scratch Reg defs */
-typedef enum {
+enum _ePsmScratchPadRegDefinitions {
S_RSV0 = 0,
S_RSV1,
S_RSV2,
@@ -1551,7 +1536,7 @@ typedef enum {
S_MFGTEST_TMP0, /* Temp register used for RX test calculations 0x3D */
S_RXESN, /* Received end sequence number for A-MPDU BA 0x3E */
S_STREG6, /* 0x3F */
-} ePsmScratchPadRegDefinitions;
+};
#define S_BEACON_INDX S_OLD_BREM
#define S_PRS_INDX S_OLD_CWWIN
@@ -1563,7 +1548,7 @@ typedef enum {
#define SLOW_CTRL_FD (1 << 8)
/* ucode mac statistic counters in shared memory */
-typedef struct macstat {
+struct macstat {
u16 txallfrm; /* 0x80 */
u16 txrtsfrm; /* 0x82 */
u16 txctsfrm; /* 0x84 */
@@ -1621,7 +1606,7 @@ typedef struct macstat {
u16 phywatchdog; /* 0xfa # of phy watchdog events */
u16 PAD;
u16 bphy_badplcp; /* bphy bad plcp */
-} macstat_t;
+};
/* dot11 core-specific control flags */
#define SICF_PCLKE 0x0004 /* PHY clock enable */
@@ -1688,7 +1673,7 @@ typedef struct macstat {
#define BPHY_PEAK_ENERGY_HI 0x34
#define BPHY_SYNC_CTL 0x35
#define BPHY_TX_PWR_CTRL 0x36
-#define BPHY_TX_EST_PWR 0x37
+#define BPHY_TX_EST_PWR 0x37
#define BPHY_STEP 0x38
#define BPHY_WARMUP 0x39
#define BPHY_LMS_CFF_READ 0x3a
@@ -1770,4 +1755,21 @@ typedef struct macstat {
#define SHM_BYT_CNT 0x2 /* IHR location */
#define MAX_BYT_CNT 0x600 /* Maximum frame len */
-#endif /* _D11_H */
+struct d11cnt {
+ u32 txfrag;
+ u32 txmulti;
+ u32 txfail;
+ u32 txretry;
+ u32 txretrie;
+ u32 rxdup;
+ u32 txrts;
+ u32 txnocts;
+ u32 txnoack;
+ u32 rxfrag;
+ u32 rxmulti;
+ u32 rxcrc;
+ u32 txfrmsnt;
+ u32 rxundec;
+};
+
+#endif /* _BRCM_D11_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/hnddma.c b/drivers/staging/brcm80211/brcmsmac/dma.c
index f607315f814..ea17671efb6 100644
--- a/drivers/staging/brcm80211/brcmsmac/hnddma.c
+++ b/drivers/staging/brcm80211/brcmsmac/dma.c
@@ -13,27 +13,149 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
#include <linux/pci.h>
-#include <bcmdefs.h>
-#include <bcmdevs.h>
-#include <hndsoc.h>
-#include <bcmutils.h>
-#include <aiutils.h>
-
-#include <sbhnddma.h>
-#include <hnddma.h>
#if defined(__mips__)
#include <asm/addrspace.h>
#endif
-#ifdef BRCM_FULLMAC
-#error "hnddma.c shouldn't be needed for FULLMAC"
-#endif
+#include <brcmu_utils.h>
+#include <aiutils.h>
+#include "types.h"
+#include "dma.h"
+
+/*
+ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical address.
+ */
+#define D64RINGALIGN_BITS 13
+#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
+#define D64RINGALIGN (1 << D64RINGALIGN_BITS)
+
+#define D64MAXDD (D64MAXRINGSZ / sizeof(struct dma64desc))
+
+/* transmit channel control */
+#define D64_XC_XE 0x00000001 /* transmit enable */
+#define D64_XC_SE 0x00000002 /* transmit suspend request */
+#define D64_XC_LE 0x00000004 /* loopback enable */
+#define D64_XC_FL 0x00000010 /* flush request */
+#define D64_XC_PD 0x00000800 /* parity check disable */
+#define D64_XC_AE 0x00030000 /* address extension bits */
+#define D64_XC_AE_SHIFT 16
+
+/* transmit descriptor table pointer */
+#define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
+
+/* transmit channel status */
+#define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
+#define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
+#define D64_XS0_XS_SHIFT 28
+#define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
+#define D64_XS0_XS_ACTIVE 0x10000000 /* active */
+#define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
+#define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
+#define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
+
+#define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
+#define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
+#define D64_XS1_XE_SHIFT 28
+#define D64_XS1_XE_NOERR 0x00000000 /* no error */
+#define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
+#define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
+#define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
+#define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
+#define D64_XS1_XE_COREE 0x50000000 /* core error */
+
+/* receive channel control */
+#define D64_RC_RE 0x00000001 /* receive enable */
+#define D64_RC_RO_MASK 0x000000fe /* receive frame offset */
+#define D64_RC_RO_SHIFT 1
+#define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */
+#define D64_RC_SH 0x00000200 /* separate rx header descriptor enable */
+#define D64_RC_OC 0x00000400 /* overflow continue */
+#define D64_RC_PD 0x00000800 /* parity check disable */
+#define D64_RC_AE 0x00030000 /* address extension bits */
+#define D64_RC_AE_SHIFT 16
+
+/* flags for dma controller */
+#define DMA_CTRL_PEN (1 << 0) /* partity enable */
+#define DMA_CTRL_ROC (1 << 1) /* rx overflow continue */
+#define DMA_CTRL_RXMULTI (1 << 2) /* allow rx scatter to multiple descriptors */
+#define DMA_CTRL_UNFRAMED (1 << 3) /* Unframed Rx/Tx data */
+
+/* receive descriptor table pointer */
+#define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
+
+/* receive channel status */
+#define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
+#define D64_RS0_RS_MASK 0xf0000000 /* receive state */
+#define D64_RS0_RS_SHIFT 28
+#define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
+#define D64_RS0_RS_ACTIVE 0x10000000 /* active */
+#define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
+#define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
+#define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
+
+#define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
+#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
+#define D64_RS1_RE_SHIFT 28
+#define D64_RS1_RE_NOERR 0x00000000 /* no error */
+#define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
+#define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
+#define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
+#define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
+#define D64_RS1_RE_COREE 0x50000000 /* core error */
+
+/* fifoaddr */
+#define D64_FA_OFF_MASK 0xffff /* offset */
+#define D64_FA_SEL_MASK 0xf0000 /* select */
+#define D64_FA_SEL_SHIFT 16
+#define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
+#define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
+#define D64_FA_SEL_RDD 0x40000 /* receive dma data */
+#define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
+#define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
+#define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
+#define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
+#define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
+#define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
+#define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
+
+/* descriptor control flags 1 */
+#define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
+#define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
+#define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
+#define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
+#define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
+
+/* descriptor control flags 2 */
+#define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count. real data len must <= 16KB */
+#define D64_CTRL2_AE 0x00030000 /* address extension bits */
+#define D64_CTRL2_AE_SHIFT 16
+#define D64_CTRL2_PARITY 0x00040000 /* parity bit */
+
+/* control flags in the range [27:20] are core-specific and not defined here */
+#define D64_CTRL_CORE_MASK 0x0ff00000
+
+#define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
+#define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
+#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
+#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
+
+#define DMADDRWIDTH_30 30 /* 30-bit addressing capability */
+#define DMADDRWIDTH_32 32 /* 32-bit addressing capability */
+#define DMADDRWIDTH_63 63 /* 64-bit addressing capability */
+#define DMADDRWIDTH_64 64 /* 64-bit addressing capability */
+
+/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
+ * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL.
+ * There is a compile time check in wlc.c which ensure that this value is at least as big
+ * as TXOFF. This value is used in dma_rxfill (dma.c).
+ */
+
+#define BCMEXTRAHDROOM 172
/* debug/trace */
#ifdef BCMDBG
@@ -58,6 +180,15 @@
#define DMA_NONE(args)
+typedef unsigned long dmaaddr_t;
+#define PHYSADDRHI(_pa) (0)
+#define PHYSADDRHISET(_pa, _val)
+#define PHYSADDRLO(_pa) ((_pa))
+#define PHYSADDRLOSET(_pa, _val) \
+ do { \
+ (_pa) = (_val); \
+ } while (0)
+
#define d64txregs dregs.d64_u.txregs_64
#define d64rxregs dregs.d64_u.rxregs_64
#define txd64 dregs.d64_u.txd_64
@@ -73,9 +204,33 @@ static uint dma_msg_level;
#define R_SM(r) (*(r))
#define W_SM(r, v) (*(r) = (v))
+/* One physical DMA segment */
+struct dma_seg {
+ dmaaddr_t addr;
+ u32 length;
+};
+
+struct dma_seg_map {
+ void *oshdmah; /* Opaque handle for OSL to store its information */
+ uint origsize; /* Size of the virtual packet */
+ uint nsegs;
+ struct dma_seg segs[MAX_DMA_SEGS];
+};
+
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+struct dma64desc {
+ u32 ctrl1; /* misc control bits & bufcount */
+ u32 ctrl2; /* buffer count and address extension */
+ u32 addrlow; /* memory address of the date buffer, bits 31:0 */
+ u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
+};
+
/* dma engine software state */
-typedef struct dma_info {
- struct hnddma_pub hnddma; /* exported structure */
+struct dma_info {
+ struct dma_pub dma; /* exported structure */
uint *msg_level; /* message level pointer */
char name[MAXNAMEL]; /* callers name for diag msgs */
@@ -88,8 +243,10 @@ typedef struct dma_info {
struct {
dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
- dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
- dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
+ /* pointer to dma64 tx descriptor ring */
+ struct dma64desc *txd_64;
+ /* pointer to dma64 rx descriptor ring */
+ struct dma64desc *rxd_64;
} d64_u;
} dregs;
@@ -99,7 +256,7 @@ typedef struct dma_info {
u16 txin; /* index of next descriptor to reclaim */
u16 txout; /* index of next descriptor to post */
void **txp; /* pointer to parallel array of pointers to packets */
- hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
+ struct dma_seg_map *txp_dmah; /* DMA MAP meta-data handle */
dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
u16 txdalign; /* #bytes added to alloc'd mem to align txd */
@@ -113,7 +270,7 @@ typedef struct dma_info {
u16 rxin; /* index of next descriptor to reclaim */
u16 rxout; /* index of next descriptor to post */
void **rxp; /* pointer to parallel array of pointers to packets */
- hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
+ struct dma_seg_map *rxp_dmah; /* DMA MAP meta-data handle */
dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
@@ -137,7 +294,7 @@ typedef struct dma_info {
uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
uint dataoffsethigh; /* high 32 bits */
bool aligndesc_4k; /* descriptor base need to be aligned or not */
-} dma_info_t;
+};
/* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
#ifdef BCMDMASGLISTOSL
@@ -169,65 +326,67 @@ typedef struct dma_info {
#define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
/* Common prototypes */
-static bool _dma_isaddrext(dma_info_t *di);
-static bool _dma_descriptor_align(dma_info_t *di);
-static bool _dma_alloc(dma_info_t *di, uint direction);
-static void _dma_detach(dma_info_t *di);
-static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
-static void _dma_rxinit(dma_info_t *di);
-static void *_dma_rx(dma_info_t *di);
-static bool _dma_rxfill(dma_info_t *di);
-static void _dma_rxreclaim(dma_info_t *di);
-static void _dma_rxenable(dma_info_t *di);
-static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
-static void _dma_rx_param_get(dma_info_t *di, u16 *rxoffset,
+static bool _dma_isaddrext(struct dma_info *di);
+static bool _dma_descriptor_align(struct dma_info *di);
+static bool _dma_alloc(struct dma_info *di, uint direction);
+static void _dma_detach(struct dma_info *di);
+static void _dma_ddtable_init(struct dma_info *di, uint direction,
+ dmaaddr_t pa);
+static void _dma_rxinit(struct dma_info *di);
+static void *_dma_rx(struct dma_info *di);
+static bool _dma_rxfill(struct dma_info *di);
+static void _dma_rxreclaim(struct dma_info *di);
+static void _dma_rxenable(struct dma_info *di);
+static void *_dma_getnextrxp(struct dma_info *di, bool forceall);
+static void _dma_rx_param_get(struct dma_info *di, u16 *rxoffset,
u16 *rxbufsize);
-static void _dma_txblock(dma_info_t *di);
-static void _dma_txunblock(dma_info_t *di);
-static uint _dma_txactive(dma_info_t *di);
-static uint _dma_rxactive(dma_info_t *di);
-static uint _dma_txpending(dma_info_t *di);
-static uint _dma_txcommitted(dma_info_t *di);
-
-static void *_dma_peeknexttxp(dma_info_t *di);
-static void *_dma_peeknextrxp(dma_info_t *di);
-static unsigned long _dma_getvar(dma_info_t *di, const char *name);
-static void _dma_counterreset(dma_info_t *di);
-static void _dma_fifoloopbackenable(dma_info_t *di);
-static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
+static void _dma_txblock(struct dma_info *di);
+static void _dma_txunblock(struct dma_info *di);
+static uint _dma_txactive(struct dma_info *di);
+static uint _dma_rxactive(struct dma_info *di);
+static uint _dma_txpending(struct dma_info *di);
+static uint _dma_txcommitted(struct dma_info *di);
+
+static void *_dma_peeknexttxp(struct dma_info *di);
+static void *_dma_peeknextrxp(struct dma_info *di);
+static unsigned long _dma_getvar(struct dma_info *di, const char *name);
+static void _dma_counterreset(struct dma_info *di);
+static void _dma_fifoloopbackenable(struct dma_info *di);
+static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags);
static u8 dma_align_sizetobits(uint size);
-static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
+static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
u16 *alignbits, uint *alloced,
dmaaddr_t *descpa);
/* Prototypes for 64-bit routines */
-static bool dma64_alloc(dma_info_t *di, uint direction);
-static bool dma64_txreset(dma_info_t *di);
-static bool dma64_rxreset(dma_info_t *di);
-static bool dma64_txsuspendedidle(dma_info_t *di);
-static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
-static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
-static void *dma64_getpos(dma_info_t *di, bool direction);
-static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
-static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
-static void dma64_txrotate(dma_info_t *di);
-
-static bool dma64_rxidle(dma_info_t *di);
-static void dma64_txinit(dma_info_t *di);
-static bool dma64_txenabled(dma_info_t *di);
-static void dma64_txsuspend(dma_info_t *di);
-static void dma64_txresume(dma_info_t *di);
-static bool dma64_txsuspended(dma_info_t *di);
-static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
-static bool dma64_txstopped(dma_info_t *di);
-static bool dma64_rxstopped(dma_info_t *di);
-static bool dma64_rxenabled(dma_info_t *di);
+static bool dma64_alloc(struct dma_info *di, uint direction);
+static bool dma64_txreset(struct dma_info *di);
+static bool dma64_rxreset(struct dma_info *di);
+static bool dma64_txsuspendedidle(struct dma_info *di);
+static int dma64_txfast(struct dma_info *di, struct sk_buff *p0, bool commit);
+static int dma64_txunframed(struct dma_info *di, void *p0, uint len,
+ bool commit);
+static void *dma64_getpos(struct dma_info *di, bool direction);
+static void *dma64_getnexttxp(struct dma_info *di, enum txd_range range);
+static void *dma64_getnextrxp(struct dma_info *di, bool forceall);
+static void dma64_txrotate(struct dma_info *di);
+
+static bool dma64_rxidle(struct dma_info *di);
+static void dma64_txinit(struct dma_info *di);
+static bool dma64_txenabled(struct dma_info *di);
+static void dma64_txsuspend(struct dma_info *di);
+static void dma64_txresume(struct dma_info *di);
+static bool dma64_txsuspended(struct dma_info *di);
+static void dma64_txreclaim(struct dma_info *di, enum txd_range range);
+static bool dma64_txstopped(struct dma_info *di);
+static bool dma64_rxstopped(struct dma_info *di);
+static bool dma64_rxenabled(struct dma_info *di);
static bool _dma64_addrext(dma64regs_t *dma64regs);
static inline u32 parity32(u32 data);
-const di_fcn_t dma64proc = {
+const struct di_fcn_s dma64proc = {
(di_detach_t) _dma_detach,
(di_txinit_t) dma64_txinit,
(di_txreset_t) dma64_txreset,
@@ -274,16 +433,16 @@ const di_fcn_t dma64proc = {
39
};
-struct hnddma_pub *dma_attach(char *name, si_t *sih,
+struct dma_pub *dma_attach(char *name, struct si_pub *sih,
void *dmaregstx, void *dmaregsrx, uint ntxd,
uint nrxd, uint rxbufsize, int rxextheadroom,
uint nrxpost, uint rxoffset, uint *msg_level)
{
- dma_info_t *di;
+ struct dma_info *di;
uint size;
/* allocate private info structure */
- di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC);
+ di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
if (di == NULL) {
#ifdef BCMDBG
printk(KERN_ERR "dma_attach: out of memory\n");
@@ -299,20 +458,20 @@ struct hnddma_pub *dma_attach(char *name, si_t *sih,
/* init dma reg pointer */
di->d64txregs = (dma64regs_t *) dmaregstx;
di->d64rxregs = (dma64regs_t *) dmaregsrx;
- di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
+ di->dma.di_fn = (const struct di_fcn_s *)&dma64proc;
/* Default flags (which can be changed by the driver calling dma_ctrlflags
* before enable): For backwards compatibility both Rx Overflow Continue
* and Parity are DISABLED.
* supports it.
*/
- di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
- 0);
+ di->dma.di_fn->ctrlflags(&di->dma, DMA_CTRL_ROC | DMA_CTRL_PEN,
+ 0);
DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
"rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
"dmaregstx %p dmaregsrx %p\n", name, "DMA64",
- di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize,
+ di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
/* make a private copy of our callers name */
@@ -427,21 +586,21 @@ struct hnddma_pub *dma_attach(char *name, si_t *sih,
/* allocate DMA mapping vectors */
if (DMASGLIST_ENAB) {
if (ntxd) {
- size = ntxd * sizeof(hnddma_seg_map_t);
+ size = ntxd * sizeof(struct dma_seg_map);
di->txp_dmah = kzalloc(size, GFP_ATOMIC);
if (di->txp_dmah == NULL)
goto fail;
}
if (nrxd) {
- size = nrxd * sizeof(hnddma_seg_map_t);
+ size = nrxd * sizeof(struct dma_seg_map);
di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
if (di->rxp_dmah == NULL)
goto fail;
}
}
- return (struct hnddma_pub *) di;
+ return (struct dma_pub *) di;
fail:
_dma_detach(di);
@@ -463,8 +622,8 @@ static inline u32 parity32(u32 data)
#define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
static inline void
-dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
- u32 *flags, u32 bufcount)
+dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
+ dmaaddr_t pa, uint outidx, u32 *flags, u32 bufcount)
{
u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
@@ -497,7 +656,7 @@ dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
}
- if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
+ if (di->dma.dmactrlflags & DMA_CTRL_PEN) {
if (DMA64_DD_PARITY(&ddring[outidx])) {
W_SM(&ddring[outidx].ctrl2,
BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
@@ -505,7 +664,7 @@ dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
}
}
-static bool _dma_alloc(dma_info_t *di, uint direction)
+static bool _dma_alloc(struct dma_info *di, uint direction)
{
return dma64_alloc(di, direction);
}
@@ -523,7 +682,7 @@ void *dma_alloc_consistent(struct pci_dev *pdev, uint size, u16 align_bits,
}
/* !! may be called with core in reset */
-static void _dma_detach(dma_info_t *di)
+static void _dma_detach(struct dma_info *di)
{
DMA_TRACE(("%s: dma_detach\n", di->name));
@@ -553,7 +712,7 @@ static void _dma_detach(dma_info_t *di)
}
-static bool _dma_descriptor_align(dma_info_t *di)
+static bool _dma_descriptor_align(struct dma_info *di)
{
u32 addrl;
@@ -573,7 +732,7 @@ static bool _dma_descriptor_align(dma_info_t *di)
}
/* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
-static bool _dma_isaddrext(dma_info_t *di)
+static bool _dma_isaddrext(struct dma_info *di)
{
/* DMA64 supports full 32- or 64-bit operation. AE is always valid */
@@ -595,7 +754,7 @@ static bool _dma_isaddrext(dma_info_t *di)
}
/* initialize descriptor table base address */
-static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
+static void _dma_ddtable_init(struct dma_info *di, uint direction, dmaaddr_t pa)
{
if (!di->aligndesc_4k) {
if (direction == DMA_TX)
@@ -644,14 +803,14 @@ static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
}
}
-static void _dma_fifoloopbackenable(dma_info_t *di)
+static void _dma_fifoloopbackenable(struct dma_info *di)
{
DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
OR_REG(&di->d64txregs->control, D64_XC_LE);
}
-static void _dma_rxinit(dma_info_t *di)
+static void _dma_rxinit(struct dma_info *di)
{
DMA_TRACE(("%s: dma_rxinit\n", di->name));
@@ -662,7 +821,7 @@ static void _dma_rxinit(dma_info_t *di)
/* clear rx descriptor ring */
memset((void *)di->rxd64, '\0',
- (di->nrxd * sizeof(dma64dd_t)));
+ (di->nrxd * sizeof(struct dma64desc)));
/* DMA engine with out alignment requirement requires table to be inited
* before enabling the engine
@@ -676,9 +835,9 @@ static void _dma_rxinit(dma_info_t *di)
_dma_ddtable_init(di, DMA_RX, di->rxdpa);
}
-static void _dma_rxenable(dma_info_t *di)
+static void _dma_rxenable(struct dma_info *di)
{
- uint dmactrlflags = di->hnddma.dmactrlflags;
+ uint dmactrlflags = di->dma.dmactrlflags;
u32 control;
DMA_TRACE(("%s: dma_rxenable\n", di->name));
@@ -698,7 +857,7 @@ static void _dma_rxenable(dma_info_t *di)
}
static void
-_dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
+_dma_rx_param_get(struct dma_info *di, u16 *rxoffset, u16 *rxbufsize)
{
/* the normal values fit into 16 bits */
*rxoffset = (u16) di->rxoffset;
@@ -714,7 +873,7 @@ _dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
* After it reaches the max size of buffer, the data continues in next DMA descriptor
* buffer WITHOUT DMA header
*/
-static void *_dma_rx(dma_info_t *di)
+static void *_dma_rx(struct dma_info *di)
{
struct sk_buff *p, *head, *tail;
uint len;
@@ -754,17 +913,17 @@ static void *_dma_rx(dma_info_t *di)
B2I(((R_REG(&di->d64rxregs->status0) &
D64_RS0_CD_MASK) -
di->rcvptrbase) & D64_RS0_CD_MASK,
- dma64dd_t);
+ struct dma64desc);
DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
di->rxin, di->rxout, cur));
}
#endif /* BCMDBG */
- if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
+ if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
di->name, len));
- bcm_pkt_buf_free_skb(head);
- di->hnddma.rxgiants++;
+ brcmu_pkt_buf_free_skb(head);
+ di->dma.rxgiants++;
goto next_frame;
}
}
@@ -777,7 +936,7 @@ static void *_dma_rx(dma_info_t *di)
* this will stall the rx dma and user might want to call rxfill again asap
* This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
*/
-static bool _dma_rxfill(dma_info_t *di)
+static bool _dma_rxfill(struct dma_info *di)
{
struct sk_buff *p;
u16 rxin, rxout;
@@ -811,7 +970,7 @@ static bool _dma_rxfill(dma_info_t *di)
size to be allocated
*/
- p = bcm_pkt_buf_get_skb(di->rxbufsize + extra_offset);
+ p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
if (p == NULL) {
DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
@@ -821,7 +980,7 @@ static bool _dma_rxfill(dma_info_t *di)
di->name));
ring_empty = true;
}
- di->hnddma.rxnobuf++;
+ di->dma.rxnobuf++;
break;
}
/* reserve an extra headroom, if applicable */
@@ -835,7 +994,7 @@ static bool _dma_rxfill(dma_info_t *di)
if (DMASGLIST_ENAB)
memset(&di->rxp_dmah[rxout], 0,
- sizeof(hnddma_seg_map_t));
+ sizeof(struct dma_seg_map));
pa = pci_map_single(di->pbus, p->data,
di->rxbufsize, PCI_DMA_FROMDEVICE);
@@ -857,13 +1016,13 @@ static bool _dma_rxfill(dma_info_t *di)
/* update the chip lastdscr pointer */
W_REG(&di->d64rxregs->ptr,
- di->rcvptrbase + I2B(rxout, dma64dd_t));
+ di->rcvptrbase + I2B(rxout, struct dma64desc));
return ring_empty;
}
/* like getnexttxp but no reclaim */
-static void *_dma_peeknexttxp(dma_info_t *di)
+static void *_dma_peeknexttxp(struct dma_info *di)
{
uint end, i;
@@ -873,7 +1032,7 @@ static void *_dma_peeknexttxp(dma_info_t *di)
end =
B2I(((R_REG(&di->d64txregs->status0) &
D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
- dma64dd_t);
+ struct dma64desc);
for (i = di->txin; i != end; i = NEXTTXD(i))
if (di->txp[i])
@@ -883,7 +1042,7 @@ static void *_dma_peeknexttxp(dma_info_t *di)
}
/* like getnextrxp but not take off the ring */
-static void *_dma_peeknextrxp(dma_info_t *di)
+static void *_dma_peeknextrxp(struct dma_info *di)
{
uint end, i;
@@ -893,7 +1052,7 @@ static void *_dma_peeknextrxp(dma_info_t *di)
end =
B2I(((R_REG(&di->d64rxregs->status0) &
D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
- dma64dd_t);
+ struct dma64desc);
for (i = di->rxin; i != end; i = NEXTRXD(i))
if (di->rxp[i])
@@ -902,17 +1061,17 @@ static void *_dma_peeknextrxp(dma_info_t *di)
return NULL;
}
-static void _dma_rxreclaim(dma_info_t *di)
+static void _dma_rxreclaim(struct dma_info *di)
{
void *p;
DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
while ((p = _dma_getnextrxp(di, true)))
- bcm_pkt_buf_free_skb(p);
+ brcmu_pkt_buf_free_skb(p);
}
-static void *_dma_getnextrxp(dma_info_t *di, bool forceall)
+static void *_dma_getnextrxp(struct dma_info *di, bool forceall)
{
if (di->nrxd == 0)
return NULL;
@@ -920,34 +1079,34 @@ static void *_dma_getnextrxp(dma_info_t *di, bool forceall)
return dma64_getnextrxp(di, forceall);
}
-static void _dma_txblock(dma_info_t *di)
+static void _dma_txblock(struct dma_info *di)
{
- di->hnddma.txavail = 0;
+ di->dma.txavail = 0;
}
-static void _dma_txunblock(dma_info_t *di)
+static void _dma_txunblock(struct dma_info *di)
{
- di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+ di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
}
-static uint _dma_txactive(dma_info_t *di)
+static uint _dma_txactive(struct dma_info *di)
{
return NTXDACTIVE(di->txin, di->txout);
}
-static uint _dma_txpending(dma_info_t *di)
+static uint _dma_txpending(struct dma_info *di)
{
uint curr;
curr =
B2I(((R_REG(&di->d64txregs->status0) &
D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
- dma64dd_t);
+ struct dma64desc);
return NTXDACTIVE(curr, di->txout);
}
-static uint _dma_txcommitted(dma_info_t *di)
+static uint _dma_txcommitted(struct dma_info *di)
{
uint ptr;
uint txin = di->txin;
@@ -955,27 +1114,27 @@ static uint _dma_txcommitted(dma_info_t *di)
if (txin == di->txout)
return 0;
- ptr = B2I(R_REG(&di->d64txregs->ptr), dma64dd_t);
+ ptr = B2I(R_REG(&di->d64txregs->ptr), struct dma64desc);
return NTXDACTIVE(di->txin, ptr);
}
-static uint _dma_rxactive(dma_info_t *di)
+static uint _dma_rxactive(struct dma_info *di)
{
return NRXDACTIVE(di->rxin, di->rxout);
}
-static void _dma_counterreset(dma_info_t *di)
+static void _dma_counterreset(struct dma_info *di)
{
/* reset all software counter */
- di->hnddma.rxgiants = 0;
- di->hnddma.rxnobuf = 0;
- di->hnddma.txnobuf = 0;
+ di->dma.rxgiants = 0;
+ di->dma.rxnobuf = 0;
+ di->dma.txnobuf = 0;
}
-static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
+static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
{
- uint dmactrlflags = di->hnddma.dmactrlflags;
+ uint dmactrlflags = di->dma.dmactrlflags;
if (di == NULL) {
DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
@@ -1004,16 +1163,16 @@ static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
}
}
- di->hnddma.dmactrlflags = dmactrlflags;
+ di->dma.dmactrlflags = dmactrlflags;
return dmactrlflags;
}
/* get the address of the var in order to change later */
-static unsigned long _dma_getvar(dma_info_t *di, const char *name)
+static unsigned long _dma_getvar(struct dma_info *di, const char *name)
{
if (!strcmp(name, "&txavail"))
- return (unsigned long)&(di->hnddma.txavail);
+ return (unsigned long)&(di->dma.txavail);
return 0;
}
@@ -1033,7 +1192,7 @@ u8 dma_align_sizetobits(uint size)
* descriptor ring size aligned location. This will ensure that the ring will
* not cross page boundary
*/
-static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
+static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
u16 *alignbits, uint *alloced,
dmaaddr_t *descpa)
{
@@ -1059,7 +1218,7 @@ static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
/* 64-bit DMA functions */
-static void dma64_txinit(dma_info_t *di)
+static void dma64_txinit(struct dma_info *di)
{
u32 control = D64_XC_XE;
@@ -1069,10 +1228,10 @@ static void dma64_txinit(dma_info_t *di)
return;
di->txin = di->txout = 0;
- di->hnddma.txavail = di->ntxd - 1;
+ di->dma.txavail = di->ntxd - 1;
/* clear tx descriptor ring */
- memset((void *)di->txd64, '\0', (di->ntxd * sizeof(dma64dd_t)));
+ memset((void *)di->txd64, '\0', (di->ntxd * sizeof(struct dma64desc)));
/* DMA engine with out alignment requirement requires table to be inited
* before enabling the engine
@@ -1080,7 +1239,7 @@ static void dma64_txinit(dma_info_t *di)
if (!di->aligndesc_4k)
_dma_ddtable_init(di, DMA_TX, di->txdpa);
- if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
+ if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
control |= D64_XC_PD;
OR_REG(&di->d64txregs->control, control);
@@ -1091,7 +1250,7 @@ static void dma64_txinit(dma_info_t *di)
_dma_ddtable_init(di, DMA_TX, di->txdpa);
}
-static bool dma64_txenabled(dma_info_t *di)
+static bool dma64_txenabled(struct dma_info *di)
{
u32 xc;
@@ -1100,7 +1259,7 @@ static bool dma64_txenabled(dma_info_t *di)
return (xc != 0xffffffff) && (xc & D64_XC_XE);
}
-static void dma64_txsuspend(dma_info_t *di)
+static void dma64_txsuspend(struct dma_info *di)
{
DMA_TRACE(("%s: dma_txsuspend\n", di->name));
@@ -1110,7 +1269,7 @@ static void dma64_txsuspend(dma_info_t *di)
OR_REG(&di->d64txregs->control, D64_XC_SE);
}
-static void dma64_txresume(dma_info_t *di)
+static void dma64_txresume(struct dma_info *di)
{
DMA_TRACE(("%s: dma_txresume\n", di->name));
@@ -1120,21 +1279,21 @@ static void dma64_txresume(dma_info_t *di)
AND_REG(&di->d64txregs->control, ~D64_XC_SE);
}
-static bool dma64_txsuspended(dma_info_t *di)
+static bool dma64_txsuspended(struct dma_info *di)
{
return (di->ntxd == 0) ||
((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
D64_XC_SE);
}
-static void dma64_txreclaim(dma_info_t *di, txd_range_t range)
+static void dma64_txreclaim(struct dma_info *di, enum txd_range range)
{
void *p;
DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
- (range == HNDDMA_RANGE_ALL) ? "all" :
+ (range == DMA_RANGE_ALL) ? "all" :
((range ==
- HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
+ DMA_RANGE_TRANSMITTED) ? "transmitted" :
"transferred")));
if (di->txin == di->txout)
@@ -1142,24 +1301,24 @@ static void dma64_txreclaim(dma_info_t *di, txd_range_t range)
while ((p = dma64_getnexttxp(di, range))) {
/* For unframed data, we don't have any packets to free */
- if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
- bcm_pkt_buf_free_skb(p);
+ if (!(di->dma.dmactrlflags & DMA_CTRL_UNFRAMED))
+ brcmu_pkt_buf_free_skb(p);
}
}
-static bool dma64_txstopped(dma_info_t *di)
+static bool dma64_txstopped(struct dma_info *di)
{
return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
D64_XS0_XS_STOPPED);
}
-static bool dma64_rxstopped(dma_info_t *di)
+static bool dma64_rxstopped(struct dma_info *di)
{
return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
D64_RS0_RS_STOPPED);
}
-static bool dma64_alloc(dma_info_t *di, uint direction)
+static bool dma64_alloc(struct dma_info *di, uint direction)
{
u16 size;
uint ddlen;
@@ -1168,7 +1327,7 @@ static bool dma64_alloc(dma_info_t *di, uint direction)
u16 align;
u16 align_bits;
- ddlen = sizeof(dma64dd_t);
+ ddlen = sizeof(struct dma64desc);
size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
align_bits = di->dmadesc_align;
@@ -1182,7 +1341,8 @@ static bool dma64_alloc(dma_info_t *di, uint direction)
return false;
}
align = (1 << align_bits);
- di->txd64 = (dma64dd_t *) roundup((unsigned long)va, align);
+ di->txd64 = (struct dma64desc *)
+ roundup((unsigned long)va, align);
di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
PHYSADDRLOSET(di->txdpa,
PHYSADDRLO(di->txdpaorig) + di->txdalign);
@@ -1196,7 +1356,8 @@ static bool dma64_alloc(dma_info_t *di, uint direction)
return false;
}
align = (1 << align_bits);
- di->rxd64 = (dma64dd_t *) roundup((unsigned long)va, align);
+ di->rxd64 = (struct dma64desc *)
+ roundup((unsigned long)va, align);
di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
PHYSADDRLOSET(di->rxdpa,
PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
@@ -1207,7 +1368,7 @@ static bool dma64_alloc(dma_info_t *di, uint direction)
return true;
}
-static bool dma64_txreset(dma_info_t *di)
+static bool dma64_txreset(struct dma_info *di)
{
u32 status;
@@ -1232,7 +1393,7 @@ static bool dma64_txreset(dma_info_t *di)
return status == D64_XS0_XS_DISABLED;
}
-static bool dma64_rxidle(dma_info_t *di)
+static bool dma64_rxidle(struct dma_info *di)
{
DMA_TRACE(("%s: dma_rxidle\n", di->name));
@@ -1243,7 +1404,7 @@ static bool dma64_rxidle(dma_info_t *di)
(R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
}
-static bool dma64_rxreset(dma_info_t *di)
+static bool dma64_rxreset(struct dma_info *di)
{
u32 status;
@@ -1258,7 +1419,7 @@ static bool dma64_rxreset(dma_info_t *di)
return status == D64_RS0_RS_DISABLED;
}
-static bool dma64_rxenabled(dma_info_t *di)
+static bool dma64_rxenabled(struct dma_info *di)
{
u32 rc;
@@ -1266,7 +1427,7 @@ static bool dma64_rxenabled(dma_info_t *di)
return (rc != 0xffffffff) && (rc & D64_RC_RE);
}
-static bool dma64_txsuspendedidle(dma_info_t *di)
+static bool dma64_txsuspendedidle(struct dma_info *di)
{
if (di->ntxd == 0)
@@ -1286,7 +1447,7 @@ static bool dma64_txsuspendedidle(dma_info_t *di)
* We return a pointer to the beginning of the DATA buffer of the current descriptor.
* If DMA is idle, we return NULL.
*/
-static void *dma64_getpos(dma_info_t *di, bool direction)
+static void *dma64_getpos(struct dma_info *di, bool direction)
{
void *va;
bool idle;
@@ -1296,12 +1457,12 @@ static void *dma64_getpos(dma_info_t *di, bool direction)
cd_offset =
R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK;
idle = !NTXDACTIVE(di->txin, di->txout);
- va = di->txp[B2I(cd_offset, dma64dd_t)];
+ va = di->txp[B2I(cd_offset, struct dma64desc)];
} else {
cd_offset =
R_REG(&di->d64rxregs->status0) & D64_XS0_CD_MASK;
idle = !NRXDACTIVE(di->rxin, di->rxout);
- va = di->rxp[B2I(cd_offset, dma64dd_t)];
+ va = di->rxp[B2I(cd_offset, struct dma64desc)];
}
/* If DMA is IDLE, return NULL */
@@ -1316,12 +1477,13 @@ static void *dma64_getpos(dma_info_t *di, bool direction)
/* TX of unframed data
*
* Adds a DMA ring descriptor for the data pointed to by "buf".
- * This is for DMA of a buffer of data and is unlike other hnddma TX functions
+ * This is for DMA of a buffer of data and is unlike other dma TX functions
* that take a pointer to a "packet"
* Each call to this is results in a single descriptor being added for "len" bytes of
* data starting at "buf", it doesn't handle chained buffers.
*/
-static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
+static int
+dma64_txunframed(struct dma_info *di, void *buf, uint len, bool commit)
{
u16 txout;
u32 flags = 0;
@@ -1355,18 +1517,18 @@ static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
/* kick the chip */
if (commit) {
W_REG(&di->d64txregs->ptr,
- di->xmtptrbase + I2B(txout, dma64dd_t));
+ di->xmtptrbase + I2B(txout, struct dma64desc));
}
/* tx flow control */
- di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+ di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
return 0;
outoftxd:
DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
- di->hnddma.txavail = 0;
- di->hnddma.txnobuf++;
+ di->dma.txavail = 0;
+ di->dma.txnobuf++;
return -1;
}
@@ -1374,7 +1536,7 @@ static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
* WARNING: call must check the return value for error.
* the error(toss frames) could be fatal and cause many subsequent hard to debug problems
*/
-static int dma64_txfast(dma_info_t *di, struct sk_buff *p0,
+static int dma64_txfast(struct dma_info *di, struct sk_buff *p0,
bool commit)
{
struct sk_buff *p, *next;
@@ -1394,7 +1556,7 @@ static int dma64_txfast(dma_info_t *di, struct sk_buff *p0,
*/
for (p = p0; p; p = next) {
uint nsegs, j;
- hnddma_seg_map_t *map;
+ struct dma_seg_map *map;
data = p->data;
len = p->len;
@@ -1410,7 +1572,7 @@ static int dma64_txfast(dma_info_t *di, struct sk_buff *p0,
/* get physical address of buffer start */
if (DMASGLIST_ENAB)
memset(&di->txp_dmah[txout], 0,
- sizeof(hnddma_seg_map_t));
+ sizeof(struct dma_seg_map));
pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
@@ -1471,41 +1633,41 @@ static int dma64_txfast(dma_info_t *di, struct sk_buff *p0,
/* kick the chip */
if (commit)
W_REG(&di->d64txregs->ptr,
- di->xmtptrbase + I2B(txout, dma64dd_t));
+ di->xmtptrbase + I2B(txout, struct dma64desc));
/* tx flow control */
- di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+ di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
return 0;
outoftxd:
DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
- bcm_pkt_buf_free_skb(p0);
- di->hnddma.txavail = 0;
- di->hnddma.txnobuf++;
+ brcmu_pkt_buf_free_skb(p0);
+ di->dma.txavail = 0;
+ di->dma.txnobuf++;
return -1;
}
/*
* Reclaim next completed txd (txds if using chained buffers) in the range
* specified and return associated packet.
- * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
+ * If range is DMA_RANGE_TRANSMITTED, reclaim descriptors that have be
* transmitted as noted by the hardware "CurrDescr" pointer.
- * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
+ * If range is DMA_RANGE_TRANSFERED, reclaim descriptors that have be
* transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
- * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
+ * If range is DMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
* return associated packet regardless of the value of hardware pointers.
*/
-static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range)
+static void *dma64_getnexttxp(struct dma_info *di, enum txd_range range)
{
u16 start, end, i;
u16 active_desc;
void *txp;
DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
- (range == HNDDMA_RANGE_ALL) ? "all" :
+ (range == DMA_RANGE_ALL) ? "all" :
((range ==
- HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
+ DMA_RANGE_TRANSMITTED) ? "transmitted" :
"transferred")));
if (di->ntxd == 0)
@@ -1514,24 +1676,23 @@ static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range)
txp = NULL;
start = di->txin;
- if (range == HNDDMA_RANGE_ALL)
+ if (range == DMA_RANGE_ALL)
end = di->txout;
else {
dma64regs_t *dregs = di->d64txregs;
- end =
- (u16) (B2I
- (((R_REG(&dregs->status0) &
+ end = (u16) (B2I(((R_REG(&dregs->status0) &
D64_XS0_CD_MASK) -
- di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
+ di->xmtptrbase) & D64_XS0_CD_MASK,
+ struct dma64desc));
- if (range == HNDDMA_RANGE_TRANSFERED) {
+ if (range == DMA_RANGE_TRANSFERED) {
active_desc =
(u16) (R_REG(&dregs->status1) &
D64_XS1_AD_MASK);
active_desc =
(active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
- active_desc = B2I(active_desc, dma64dd_t);
+ active_desc = B2I(active_desc, struct dma64desc);
if (end != active_desc)
end = PREVTXD(active_desc);
}
@@ -1542,7 +1703,7 @@ static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range)
for (i = start; i != end && !txp; i = NEXTTXD(i)) {
dmaaddr_t pa;
- hnddma_seg_map_t *map = NULL;
+ struct dma_seg_map *map = NULL;
uint size, j, nsegs;
PHYSADDRLOSET(pa,
@@ -1579,7 +1740,7 @@ static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range)
di->txin = i;
/* tx flow control */
- di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+ di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
return txp;
@@ -1588,7 +1749,7 @@ static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range)
return NULL;
}
-static void *dma64_getnextrxp(dma_info_t *di, bool forceall)
+static void *dma64_getnextrxp(struct dma_info *di, bool forceall)
{
uint i, curr;
void *rxp;
@@ -1602,7 +1763,7 @@ static void *dma64_getnextrxp(dma_info_t *di, bool forceall)
curr =
B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
- di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
+ di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
/* ignore curr if forceall */
if (!forceall && (i == curr))
@@ -1642,7 +1803,7 @@ static bool _dma64_addrext(dma64regs_t *dma64regs)
/*
* Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
*/
-static void dma64_txrotate(dma_info_t *di)
+static void dma64_txrotate(struct dma_info *di)
{
u16 ad;
uint nactive;
@@ -1652,10 +1813,9 @@ static void dma64_txrotate(dma_info_t *di)
u16 first, last;
nactive = _dma_txactive(di);
- ad = (u16) (B2I
- ((((R_REG(&di->d64txregs->status1) &
- D64_XS1_AD_MASK)
- - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
+ ad = (u16) (B2I((((R_REG(&di->d64txregs->status1) &
+ D64_XS1_AD_MASK) - di->xmtptrbase) &
+ D64_XS1_AD_MASK), struct dma64desc));
rot = TXD(ad - di->txin);
/* full-ring case is a lot harder - don't worry about this */
@@ -1696,8 +1856,9 @@ static void dma64_txrotate(dma_info_t *di)
/* Move the map */
if (DMASGLIST_ENAB) {
memcpy(&di->txp_dmah[new], &di->txp_dmah[old],
- sizeof(hnddma_seg_map_t));
- memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
+ sizeof(struct dma_seg_map));
+ memset(&di->txp_dmah[old], 0,
+ sizeof(struct dma_seg_map));
}
di->txp[old] = NULL;
@@ -1706,14 +1867,14 @@ static void dma64_txrotate(dma_info_t *di)
/* update txin and txout */
di->txin = ad;
di->txout = TXD(di->txout + rot);
- di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+ di->dma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
/* kick the chip */
W_REG(&di->d64txregs->ptr,
- di->xmtptrbase + I2B(di->txout, dma64dd_t));
+ di->xmtptrbase + I2B(di->txout, struct dma64desc));
}
-uint dma_addrwidth(si_t *sih, void *dmaregs)
+uint dma_addrwidth(struct si_pub *sih, void *dmaregs)
{
/* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
/* DMA engine is 64-bit capable */
@@ -1736,10 +1897,10 @@ uint dma_addrwidth(si_t *sih, void *dmaregs)
* engine. This function calls a caller-supplied function for each packet in
* the caller specified dma chain.
*/
-void dma_walk_packets(struct hnddma_pub *dmah, void (*callback_fnc)
+void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
(void *pkt, void *arg_a), void *arg_a)
{
- dma_info_t *di = (dma_info_t *) dmah;
+ struct dma_info *di = (struct dma_info *) dmah;
uint i = di->txin;
uint end = di->txout;
struct sk_buff *skb;
diff --git a/drivers/staging/brcm80211/include/hnddma.h b/drivers/staging/brcm80211/brcmsmac/dma.h
index fbbcb9b5ae6..9c8b9a6a557 100644
--- a/drivers/staging/brcm80211/include/hnddma.h
+++ b/drivers/staging/brcm80211/brcmsmac/dma.h
@@ -14,13 +14,37 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _hnddma_h_
-#define _hnddma_h_
+#ifndef _BRCM_DMA_H_
+#define _BRCM_DMA_H_
-#ifndef _hnddma_pub_
-#define _hnddma_pub_
-struct hnddma_pub;
-#endif /* _hnddma_pub_ */
+#include "types.h" /* forward structure declarations */
+
+/* DMA structure:
+ * support two DMA engines: 32 bits address or 64 bit addressing
+ * basic DMA register set is per channel(transmit or receive)
+ * a pair of channels is defined for convenience
+ */
+
+/* 32 bits addressing */
+
+struct dma32diag { /* diag access */
+ u32 fifoaddr; /* diag address */
+ u32 fifodatalow; /* low 32bits of data */
+ u32 fifodatahigh; /* high 32bits of data */
+ u32 pad; /* reserved */
+};
+
+/* 64 bits addressing */
+
+/* dma registers per channel(xmt or rcv) */
+struct dma64regs {
+ u32 control; /* enable, et al */
+ u32 ptr; /* last descriptor posted to chip */
+ u32 addrlow; /* descriptor ring base address low 32-bits (8K aligned) */
+ u32 addrhigh; /* descriptor ring base address bits 63:32 (8K aligned) */
+ u32 status0; /* current descriptor, xmt state */
+ u32 status1; /* active descriptor, xmt error */
+};
/* map/unmap direction */
#define DMA_TX 1 /* TX direction for DMA */
@@ -28,64 +52,64 @@ struct hnddma_pub;
#define BUS_SWAP32(v) (v)
/* range param for dma_getnexttxp() and dma_txreclaim */
-typedef enum txd_range {
- HNDDMA_RANGE_ALL = 1,
- HNDDMA_RANGE_TRANSMITTED,
- HNDDMA_RANGE_TRANSFERED
-} txd_range_t;
+enum txd_range {
+ DMA_RANGE_ALL = 1,
+ DMA_RANGE_TRANSMITTED,
+ DMA_RANGE_TRANSFERED
+};
/* dma function type */
-typedef void (*di_detach_t) (struct hnddma_pub *dmah);
-typedef bool(*di_txreset_t) (struct hnddma_pub *dmah);
-typedef bool(*di_rxreset_t) (struct hnddma_pub *dmah);
-typedef bool(*di_rxidle_t) (struct hnddma_pub *dmah);
-typedef void (*di_txinit_t) (struct hnddma_pub *dmah);
-typedef bool(*di_txenabled_t) (struct hnddma_pub *dmah);
-typedef void (*di_rxinit_t) (struct hnddma_pub *dmah);
-typedef void (*di_txsuspend_t) (struct hnddma_pub *dmah);
-typedef void (*di_txresume_t) (struct hnddma_pub *dmah);
-typedef bool(*di_txsuspended_t) (struct hnddma_pub *dmah);
-typedef bool(*di_txsuspendedidle_t) (struct hnddma_pub *dmah);
-typedef int (*di_txfast_t) (struct hnddma_pub *dmah, struct sk_buff *p,
+typedef void (*di_detach_t) (struct dma_pub *dmah);
+typedef bool(*di_txreset_t) (struct dma_pub *dmah);
+typedef bool(*di_rxreset_t) (struct dma_pub *dmah);
+typedef bool(*di_rxidle_t) (struct dma_pub *dmah);
+typedef void (*di_txinit_t) (struct dma_pub *dmah);
+typedef bool(*di_txenabled_t) (struct dma_pub *dmah);
+typedef void (*di_rxinit_t) (struct dma_pub *dmah);
+typedef void (*di_txsuspend_t) (struct dma_pub *dmah);
+typedef void (*di_txresume_t) (struct dma_pub *dmah);
+typedef bool(*di_txsuspended_t) (struct dma_pub *dmah);
+typedef bool(*di_txsuspendedidle_t) (struct dma_pub *dmah);
+typedef int (*di_txfast_t) (struct dma_pub *dmah, struct sk_buff *p,
bool commit);
-typedef int (*di_txunframed_t) (struct hnddma_pub *dmah, void *p, uint len,
+typedef int (*di_txunframed_t) (struct dma_pub *dmah, void *p, uint len,
bool commit);
-typedef void *(*di_getpos_t) (struct hnddma_pub *di, bool direction);
-typedef void (*di_fifoloopbackenable_t) (struct hnddma_pub *dmah);
-typedef bool(*di_txstopped_t) (struct hnddma_pub *dmah);
-typedef bool(*di_rxstopped_t) (struct hnddma_pub *dmah);
-typedef bool(*di_rxenable_t) (struct hnddma_pub *dmah);
-typedef bool(*di_rxenabled_t) (struct hnddma_pub *dmah);
-typedef void *(*di_rx_t) (struct hnddma_pub *dmah);
-typedef bool(*di_rxfill_t) (struct hnddma_pub *dmah);
-typedef void (*di_txreclaim_t) (struct hnddma_pub *dmah, txd_range_t range);
-typedef void (*di_rxreclaim_t) (struct hnddma_pub *dmah);
-typedef unsigned long (*di_getvar_t) (struct hnddma_pub *dmah,
+typedef void *(*di_getpos_t) (struct dma_pub *di, bool direction);
+typedef void (*di_fifoloopbackenable_t) (struct dma_pub *dmah);
+typedef bool(*di_txstopped_t) (struct dma_pub *dmah);
+typedef bool(*di_rxstopped_t) (struct dma_pub *dmah);
+typedef bool(*di_rxenable_t) (struct dma_pub *dmah);
+typedef bool(*di_rxenabled_t) (struct dma_pub *dmah);
+typedef void *(*di_rx_t) (struct dma_pub *dmah);
+typedef bool(*di_rxfill_t) (struct dma_pub *dmah);
+typedef void (*di_txreclaim_t) (struct dma_pub *dmah, enum txd_range range);
+typedef void (*di_rxreclaim_t) (struct dma_pub *dmah);
+typedef unsigned long (*di_getvar_t) (struct dma_pub *dmah,
const char *name);
-typedef void *(*di_getnexttxp_t) (struct hnddma_pub *dmah, txd_range_t range);
-typedef void *(*di_getnextrxp_t) (struct hnddma_pub *dmah, bool forceall);
-typedef void *(*di_peeknexttxp_t) (struct hnddma_pub *dmah);
-typedef void *(*di_peeknextrxp_t) (struct hnddma_pub *dmah);
-typedef void (*di_rxparam_get_t) (struct hnddma_pub *dmah, u16 *rxoffset,
+typedef void *(*di_getnexttxp_t) (struct dma_pub *dmah, enum txd_range range);
+typedef void *(*di_getnextrxp_t) (struct dma_pub *dmah, bool forceall);
+typedef void *(*di_peeknexttxp_t) (struct dma_pub *dmah);
+typedef void *(*di_peeknextrxp_t) (struct dma_pub *dmah);
+typedef void (*di_rxparam_get_t) (struct dma_pub *dmah, u16 *rxoffset,
u16 *rxbufsize);
-typedef void (*di_txblock_t) (struct hnddma_pub *dmah);
-typedef void (*di_txunblock_t) (struct hnddma_pub *dmah);
-typedef uint(*di_txactive_t) (struct hnddma_pub *dmah);
-typedef void (*di_txrotate_t) (struct hnddma_pub *dmah);
-typedef void (*di_counterreset_t) (struct hnddma_pub *dmah);
-typedef uint(*di_ctrlflags_t) (struct hnddma_pub *dmah, uint mask, uint flags);
-typedef char *(*di_dump_t) (struct hnddma_pub *dmah, struct bcmstrbuf *b,
+typedef void (*di_txblock_t) (struct dma_pub *dmah);
+typedef void (*di_txunblock_t) (struct dma_pub *dmah);
+typedef uint(*di_txactive_t) (struct dma_pub *dmah);
+typedef void (*di_txrotate_t) (struct dma_pub *dmah);
+typedef void (*di_counterreset_t) (struct dma_pub *dmah);
+typedef uint(*di_ctrlflags_t) (struct dma_pub *dmah, uint mask, uint flags);
+typedef char *(*di_dump_t) (struct dma_pub *dmah, struct brcmu_strbuf *b,
bool dumpring);
-typedef char *(*di_dumptx_t) (struct hnddma_pub *dmah, struct bcmstrbuf *b,
+typedef char *(*di_dumptx_t) (struct dma_pub *dmah, struct brcmu_strbuf *b,
bool dumpring);
-typedef char *(*di_dumprx_t) (struct hnddma_pub *dmah, struct bcmstrbuf *b,
+typedef char *(*di_dumprx_t) (struct dma_pub *dmah, struct brcmu_strbuf *b,
bool dumpring);
-typedef uint(*di_rxactive_t) (struct hnddma_pub *dmah);
-typedef uint(*di_txpending_t) (struct hnddma_pub *dmah);
-typedef uint(*di_txcommitted_t) (struct hnddma_pub *dmah);
+typedef uint(*di_rxactive_t) (struct dma_pub *dmah);
+typedef uint(*di_txpending_t) (struct dma_pub *dmah);
+typedef uint(*di_txcommitted_t) (struct dma_pub *dmah);
/* dma opsvec */
-typedef struct di_fcn_s {
+struct di_fcn_s {
di_detach_t detach;
di_txinit_t txinit;
di_txreset_t txreset;
@@ -130,14 +154,14 @@ typedef struct di_fcn_s {
di_txpending_t txpending;
di_txcommitted_t txcommitted;
uint endnum;
-} di_fcn_t;
+};
/*
* Exported data structure (read-only)
*/
/* export structure */
-struct hnddma_pub {
- const di_fcn_t *di_fn; /* DMA function pointers */
+struct dma_pub {
+ const struct di_fcn_s *di_fn; /* DMA function pointers */
uint txavail; /* # free tx descriptors */
uint dmactrlflags; /* dma control flags */
@@ -148,12 +172,12 @@ struct hnddma_pub {
uint txnobuf; /* tx out of dma descriptors */
};
-extern struct hnddma_pub *dma_attach(char *name, si_t *sih,
+extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
void *dmaregstx, void *dmaregsrx, uint ntxd,
uint nrxd, uint rxbufsize, int rxextheadroom,
uint nrxpost, uint rxoffset, uint *msg_level);
-extern const di_fcn_t dma64proc;
+extern const struct di_fcn_s dma64proc;
#define dma_detach(di) (dma64proc.detach(di))
#define dma_txreset(di) (dma64proc.txreset(di))
@@ -201,8 +225,8 @@ extern const di_fcn_t dma64proc;
* SB attach provides ability to probe backplane and dma core capabilities
* This info is needed by DMA_ALLOC_CONSISTENT in dma attach
*/
-extern uint dma_addrwidth(si_t *sih, void *dmaregs);
-void dma_walk_packets(struct hnddma_pub *dmah, void (*callback_fnc)
+extern uint dma_addrwidth(struct si_pub *sih, void *dmaregs);
+void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
(void *pkt, void *arg_a), void *arg_a);
/*
@@ -223,4 +247,4 @@ static inline void dma_spin_for_len(uint len, struct sk_buff *head)
#endif /* defined(__mips__) */
}
-#endif /* _hnddma_h_ */
+#endif /* _BRCM_DMA_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c b/drivers/staging/brcm80211/brcmsmac/mac80211_if.c
index 6c6236c969b..d6de44e430d 100644
--- a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c
+++ b/drivers/staging/brcm80211/brcmsmac/mac80211_if.c
@@ -16,43 +16,46 @@
#define __UNDEF_NO_VERSION__
-#include <linux/kernel.h>
#include <linux/etherdevice.h>
-#include <linux/types.h>
-#include <linux/pci_ids.h>
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/firmware.h>
+#include <linux/interrupt.h>
#include <net/mac80211.h>
-
-#include <proto/802.11.h>
-#include <bcmdefs.h>
-#include <bcmwifi.h>
-#include <bcmutils.h>
-#include <bcmnvram.h>
-#include <pcicfg.h>
-#include <wlioctl.h>
-#include <sbhnddma.h>
-
-#include "phy/wlc_phy_int.h"
+#include <defs.h>
+#include "nicpci.h"
+#include "phy/phy_int.h"
#include "d11.h"
-#include "wlc_types.h"
-#include "wlc_cfg.h"
-#include "phy/phy_version.h"
-#include "wlc_key.h"
-#include "wlc_channel.h"
-#include "wlc_scb.h"
-#include "wlc_pub.h"
-#include "wl_dbg.h"
-#include "wl_export.h"
-#include "wl_ucode.h"
-#include "wl_mac80211.h"
+#include "channel.h"
+#include "scb.h"
+#include "pub.h"
+#include "ucode_loader.h"
+#include "mac80211_if.h"
#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
-static void wl_timer(unsigned long data);
-static void _wl_timer(struct wl_timer *t);
+#define LOCK(wl) spin_lock_bh(&(wl)->lock)
+#define UNLOCK(wl) spin_unlock_bh(&(wl)->lock)
+
+/* locking from inside brcms_isr */
+#define ISR_LOCK(wl, flags)\
+ do {\
+ spin_lock(&(wl)->isr_lock);\
+ (void)(flags); } \
+ while (0)
+
+#define ISR_UNLOCK(wl, flags)\
+ do {\
+ spin_unlock(&(wl)->isr_lock);\
+ (void)(flags); } \
+ while (0)
+
+/* locking under LOCK() to synchronize with brcms_isr */
+#define INT_LOCK(wl, flags) spin_lock_irqsave(&(wl)->isr_lock, flags)
+#define INT_UNLOCK(wl, flags) spin_unlock_irqrestore(&(wl)->isr_lock, flags)
+
+static void brcms_timer(unsigned long data);
+static void _brcms_timer(struct brcms_timer *t);
static int ieee_hw_init(struct ieee80211_hw *hw);
@@ -69,22 +72,20 @@ static int wl_linux_watchdog(void *ctx);
FIF_OTHER_BSS | \
FIF_BCN_PRBRESP_PROMISC)
-static int wl_found;
+static int n_adapters_found;
-#define WL_DEV_IF(dev) ((struct wl_if *)netdev_priv(dev))
-#define WL_INFO(dev) ((struct wl_info *)(WL_DEV_IF(dev)->wl))
-static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev);
-static void wl_release_fw(struct wl_info *wl);
+static int brcms_request_fw(struct brcms_info *wl, struct pci_dev *pdev);
+static void brcms_release_fw(struct brcms_info *wl);
/* local prototypes */
-static void wl_dpc(unsigned long data);
-static irqreturn_t wl_isr(int irq, void *dev_id);
+static void brcms_dpc(unsigned long data);
+static irqreturn_t brcms_isr(int irq, void *dev_id);
-static int __devinit wl_pci_probe(struct pci_dev *pdev,
+static int __devinit brcms_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent);
-static void wl_remove(struct pci_dev *pdev);
-static void wl_free(struct wl_info *wl);
-static void wl_set_basic_rate(struct wl_rateset *rs, u16 rate, bool is_br);
+static void brcms_remove(struct pci_dev *pdev);
+static void brcms_free(struct brcms_info *wl);
+static void brcms_set_basic_rate(struct wl_rateset *rs, u16 rate, bool is_br);
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
@@ -92,14 +93,16 @@ MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
/* recognized PCI IDs */
-static struct pci_device_id wl_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = {
{PCI_VENDOR_ID_BROADCOM, 0x4357, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 43225 2G */
{PCI_VENDOR_ID_BROADCOM, 0x4353, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 43224 DUAL */
{PCI_VENDOR_ID_BROADCOM, 0x4727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 4313 DUAL */
+ /* 43224 Ven */
+ {PCI_VENDOR_ID_BROADCOM, 0x0576, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{0}
};
-MODULE_DEVICE_TABLE(pci, wl_id_table);
+MODULE_DEVICE_TABLE(pci, brcms_pci_id_table);
#ifdef BCMDBG
static int msglevel = 0xdeadbeef;
@@ -112,88 +115,89 @@ module_param(phymsglevel, int, 0);
#define WL_TO_HW(wl) (wl->pub->ieee_hw)
/* MAC80211 callback functions */
-static int wl_ops_start(struct ieee80211_hw *hw);
-static void wl_ops_stop(struct ieee80211_hw *hw);
-static int wl_ops_add_interface(struct ieee80211_hw *hw,
+static int brcms_ops_start(struct ieee80211_hw *hw);
+static void brcms_ops_stop(struct ieee80211_hw *hw);
+static int brcms_ops_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-static void wl_ops_remove_interface(struct ieee80211_hw *hw,
+static void brcms_ops_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
-static int wl_ops_config(struct ieee80211_hw *hw, u32 changed);
-static void wl_ops_bss_info_changed(struct ieee80211_hw *hw,
+static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed);
+static void brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u32 changed);
-static void wl_ops_configure_filter(struct ieee80211_hw *hw,
+static void brcms_ops_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast);
-static int wl_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+static int brcms_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
bool set);
-static void wl_ops_sw_scan_start(struct ieee80211_hw *hw);
-static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw);
-static void wl_ops_set_tsf(struct ieee80211_hw *hw, u64 tsf);
-static int wl_ops_get_stats(struct ieee80211_hw *hw,
+static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw);
+static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw);
+static void brcms_ops_set_tsf(struct ieee80211_hw *hw, u64 tsf);
+static int brcms_ops_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
-static void wl_ops_sta_notify(struct ieee80211_hw *hw,
+static void brcms_ops_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta);
-static int wl_ops_conf_tx(struct ieee80211_hw *hw, u16 queue,
+static int brcms_ops_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params);
-static u64 wl_ops_get_tsf(struct ieee80211_hw *hw);
-static int wl_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+static u64 brcms_ops_get_tsf(struct ieee80211_hw *hw);
+static int brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
-static int wl_ops_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-static int wl_ops_ampdu_action(struct ieee80211_hw *hw,
+static int brcms_ops_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+static int brcms_ops_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
u8 buf_size);
-static void wl_ops_rfkill_poll(struct ieee80211_hw *hw);
-static void wl_ops_flush(struct ieee80211_hw *hw, bool drop);
+static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw);
+static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop);
-static void wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
- struct wl_info *wl = hw->priv;
+ struct brcms_info *wl = hw->priv;
- WL_LOCK(wl);
+ LOCK(wl);
if (!wl->pub->up) {
wiphy_err(wl->wiphy, "ops->tx called while down\n");
kfree_skb(skb);
goto done;
}
- wlc_sendpkt_mac80211(wl->wlc, skb, hw);
+ brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
done:
- WL_UNLOCK(wl);
+ UNLOCK(wl);
}
-static int wl_ops_start(struct ieee80211_hw *hw)
+static int brcms_ops_start(struct ieee80211_hw *hw)
{
- struct wl_info *wl = hw->priv;
+ struct brcms_info *wl = hw->priv;
bool blocked;
/*
struct ieee80211_channel *curchan = hw->conf.channel;
*/
ieee80211_wake_queues(hw);
- WL_LOCK(wl);
- blocked = wl_rfkill_set_hw_state(wl);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ blocked = brcms_rfkill_set_hw_state(wl);
+ UNLOCK(wl);
if (!blocked)
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
return 0;
}
-static void wl_ops_stop(struct ieee80211_hw *hw)
+static void brcms_ops_stop(struct ieee80211_hw *hw)
{
ieee80211_stop_queues(hw);
}
static int
-wl_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct wl_info *wl;
+ struct brcms_info *wl;
int err;
/* Just STA for now */
@@ -208,28 +212,28 @@ wl_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
wl = HW_TO_WL(hw);
- WL_LOCK(wl);
- err = wl_up(wl);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ err = brcms_up(wl);
+ UNLOCK(wl);
if (err != 0) {
- wiphy_err(hw->wiphy, "%s: wl_up() returned %d\n", __func__,
+ wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
err);
}
return err;
}
static void
-wl_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct wl_info *wl;
+ struct brcms_info *wl;
wl = HW_TO_WL(hw);
/* put driver in down state */
- WL_LOCK(wl);
- wl_down(wl);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ brcms_down(wl);
+ UNLOCK(wl);
}
/*
@@ -239,13 +243,13 @@ static int
ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan,
enum nl80211_channel_type type)
{
- struct wl_info *wl = HW_TO_WL(hw);
+ struct brcms_info *wl = HW_TO_WL(hw);
int err = 0;
switch (type) {
case NL80211_CHAN_HT20:
case NL80211_CHAN_NO_HT:
- err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value);
+ err = brcms_c_set(wl->wlc, BRCM_SET_CHANNEL, chan->hw_value);
break;
case NL80211_CHAN_HT40MINUS:
case NL80211_CHAN_HT40PLUS:
@@ -260,24 +264,24 @@ ieee_set_channel(struct ieee80211_hw *hw, struct ieee80211_channel *chan,
return err;
}
-static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
+static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
{
struct ieee80211_conf *conf = &hw->conf;
- struct wl_info *wl = HW_TO_WL(hw);
+ struct brcms_info *wl = HW_TO_WL(hw);
int err = 0;
int new_int;
struct wiphy *wiphy = hw->wiphy;
- WL_LOCK(wl);
+ LOCK(wl);
if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
- if (wlc_iovar_setint
- (wl->wlc, "bcn_li_bcn", conf->listen_interval)) {
+ if (brcms_c_set_par(wl->wlc, IOV_BCN_LI_BCN,
+ conf->listen_interval) < 0) {
wiphy_err(wiphy, "%s: Error setting listen_interval\n",
__func__);
err = -EIO;
goto config_out;
}
- wlc_iovar_getint(wl->wlc, "bcn_li_bcn", &new_int);
+ brcms_c_get_par(wl->wlc, IOV_BCN_LI_BCN, &new_int);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR)
wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n",
@@ -289,14 +293,14 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
"true" : "false");
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- if (wlc_iovar_setint
- (wl->wlc, "qtxpower", conf->power_level * 4)) {
+ if (brcms_c_set_par(wl->wlc, IOV_QTXPOWER,
+ conf->power_level * 4) < 0) {
wiphy_err(wiphy, "%s: Error setting power_level\n",
__func__);
err = -EIO;
goto config_out;
}
- wlc_iovar_getint(wl->wlc, "qtxpower", &new_int);
+ brcms_c_get_par(wl->wlc, IOV_QTXPOWER, &new_int);
if (new_int != (conf->power_level * 4))
wiphy_err(wiphy, "%s: Power level req != actual, %d %d"
"\n", __func__, conf->power_level * 4,
@@ -306,15 +310,15 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
err = ieee_set_channel(hw, conf->channel, conf->channel_type);
}
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- if (wlc_set
- (wl->wlc, WLC_SET_SRL,
+ if (brcms_c_set
+ (wl->wlc, BRCM_SET_SRL,
conf->short_frame_max_tx_count) < 0) {
wiphy_err(wiphy, "%s: Error setting srl\n", __func__);
err = -EIO;
goto config_out;
}
- if (wlc_set(wl->wlc, WLC_SET_LRL, conf->long_frame_max_tx_count)
- < 0) {
+ if (brcms_c_set(wl->wlc, BRCM_SET_LRL,
+ conf->long_frame_max_tx_count) < 0) {
wiphy_err(wiphy, "%s: Error setting lrl\n", __func__);
err = -EIO;
goto config_out;
@@ -322,16 +326,16 @@ static int wl_ops_config(struct ieee80211_hw *hw, u32 changed)
}
config_out:
- WL_UNLOCK(wl);
+ UNLOCK(wl);
return err;
}
static void
-wl_ops_bss_info_changed(struct ieee80211_hw *hw,
+brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
- struct wl_info *wl = HW_TO_WL(hw);
+ struct brcms_info *wl = HW_TO_WL(hw);
struct wiphy *wiphy = hw->wiphy;
int val;
@@ -341,9 +345,9 @@ wl_ops_bss_info_changed(struct ieee80211_hw *hw,
*/
wiphy_err(wiphy, "%s: %s: %sassociated\n", KBUILD_MODNAME,
__func__, info->assoc ? "" : "dis");
- WL_LOCK(wl);
- wlc_associate_upd(wl->wlc, info->assoc);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ brcms_c_associate_upd(wl->wlc, info->assoc);
+ UNLOCK(wl);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
/* slot timing changed */
@@ -351,23 +355,23 @@ wl_ops_bss_info_changed(struct ieee80211_hw *hw,
val = 1;
else
val = 0;
- WL_LOCK(wl);
- wlc_set(wl->wlc, WLC_SET_SHORTSLOT_OVERRIDE, val);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ brcms_c_set(wl->wlc, BRCMS_SET_SHORTSLOT_OVERRIDE, val);
+ UNLOCK(wl);
}
if (changed & BSS_CHANGED_HT) {
/* 802.11n parameters changed */
u16 mode = info->ht_operation_mode;
- WL_LOCK(wl);
- wlc_protection_upd(wl->wlc, WLC_PROT_N_CFG,
+ LOCK(wl);
+ brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_CFG,
mode & IEEE80211_HT_OP_MODE_PROTECTION);
- wlc_protection_upd(wl->wlc, WLC_PROT_N_NONGF,
+ brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_NONGF,
mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
- wlc_protection_upd(wl->wlc, WLC_PROT_N_OBSS,
+ brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_OBSS,
mode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT);
- WL_UNLOCK(wl);
+ UNLOCK(wl);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
struct ieee80211_supported_band *bi;
@@ -377,43 +381,43 @@ wl_ops_bss_info_changed(struct ieee80211_hw *hw,
int error;
/* retrieve the current rates */
- WL_LOCK(wl);
- error = wlc_ioctl(wl->wlc, WLC_GET_CURR_RATESET,
+ LOCK(wl);
+ error = brcms_c_ioctl(wl->wlc, BRCM_GET_CURR_RATESET,
&rs, sizeof(rs), NULL);
- WL_UNLOCK(wl);
+ UNLOCK(wl);
if (error) {
wiphy_err(wiphy, "%s: retrieve rateset failed: %d\n",
__func__, error);
return;
}
br_mask = info->basic_rates;
- bi = hw->wiphy->bands[wlc_get_curband(wl->wlc)];
+ bi = hw->wiphy->bands[brcms_c_get_curband(wl->wlc)];
for (i = 0; i < bi->n_bitrates; i++) {
/* convert to internal rate value */
rate = (bi->bitrates[i].bitrate << 1) / 10;
/* set/clear basic rate flag */
- wl_set_basic_rate(&rs, rate, br_mask & 1);
+ brcms_set_basic_rate(&rs, rate, br_mask & 1);
br_mask >>= 1;
}
/* update the rate set */
- WL_LOCK(wl);
- wlc_ioctl(wl->wlc, WLC_SET_RATESET, &rs, sizeof(rs), NULL);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ brcms_c_ioctl(wl->wlc, BRCM_SET_RATESET, &rs, sizeof(rs), NULL);
+ UNLOCK(wl);
}
if (changed & BSS_CHANGED_BEACON_INT) {
/* Beacon interval changed */
- WL_LOCK(wl);
- wlc_set(wl->wlc, WLC_SET_BCNPRD, info->beacon_int);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ brcms_c_set(wl->wlc, BRCM_SET_BCNPRD, info->beacon_int);
+ UNLOCK(wl);
}
if (changed & BSS_CHANGED_BSSID) {
/* BSSID changed, for whatever reason (IBSS and managed mode) */
- WL_LOCK(wl);
- wlc_set_addrmatch(wl->wlc, RCM_BSSID_OFFSET,
+ LOCK(wl);
+ brcms_c_set_addrmatch(wl->wlc, RCM_BSSID_OFFSET,
info->bssid);
- WL_UNLOCK(wl);
+ UNLOCK(wl);
}
if (changed & BSS_CHANGED_BEACON) {
/* Beacon data changed, retrieve new beacon (beaconing modes) */
@@ -449,20 +453,15 @@ wl_ops_bss_info_changed(struct ieee80211_hw *hw,
wiphy_err(wiphy, "%s: qos enabled: %s (implement)\n", __func__,
info->qos ? "true" : "false");
}
- if (changed & BSS_CHANGED_IDLE) {
- /* Idle changed for this BSS/interface */
- wiphy_err(wiphy, "%s: BSS idle: %s (implement)\n", __func__,
- info->idle ? "true" : "false");
- }
return;
}
static void
-wl_ops_configure_filter(struct ieee80211_hw *hw,
+brcms_ops_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast)
{
- struct wl_info *wl = hw->priv;
+ struct brcms_info *wl = hw->priv;
struct wiphy *wiphy = hw->wiphy;
changed_flags &= MAC_FILTERS;
@@ -480,68 +479,68 @@ wl_ops_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_OTHER_BSS)
wiphy_err(wiphy, "FIF_OTHER_BSS\n");
if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
- WL_LOCK(wl);
+ LOCK(wl);
if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
- wlc_mac_bcn_promisc_change(wl->wlc, 1);
+ brcms_c_mac_bcn_promisc_change(wl->wlc, 1);
} else {
- wlc_mac_bcn_promisc_change(wl->wlc, 0);
+ brcms_c_mac_bcn_promisc_change(wl->wlc, 0);
wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
}
- WL_UNLOCK(wl);
+ UNLOCK(wl);
}
return;
}
static int
-wl_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+brcms_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
{
return 0;
}
-static void wl_ops_sw_scan_start(struct ieee80211_hw *hw)
+static void brcms_ops_sw_scan_start(struct ieee80211_hw *hw)
{
- struct wl_info *wl = hw->priv;
- WL_LOCK(wl);
- wlc_scan_start(wl->wlc);
- WL_UNLOCK(wl);
+ struct brcms_info *wl = hw->priv;
+ LOCK(wl);
+ brcms_c_scan_start(wl->wlc);
+ UNLOCK(wl);
return;
}
-static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw)
+static void brcms_ops_sw_scan_complete(struct ieee80211_hw *hw)
{
- struct wl_info *wl = hw->priv;
- WL_LOCK(wl);
- wlc_scan_stop(wl->wlc);
- WL_UNLOCK(wl);
+ struct brcms_info *wl = hw->priv;
+ LOCK(wl);
+ brcms_c_scan_stop(wl->wlc);
+ UNLOCK(wl);
return;
}
-static void wl_ops_set_tsf(struct ieee80211_hw *hw, u64 tsf)
+static void brcms_ops_set_tsf(struct ieee80211_hw *hw, u64 tsf)
{
wiphy_err(hw->wiphy, "%s: Enter\n", __func__);
return;
}
static int
-wl_ops_get_stats(struct ieee80211_hw *hw,
+brcms_ops_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
- struct wl_info *wl = hw->priv;
+ struct brcms_info *wl = hw->priv;
struct wl_cnt *cnt;
- WL_LOCK(wl);
+ LOCK(wl);
cnt = wl->pub->_cnt;
stats->dot11ACKFailureCount = 0;
stats->dot11RTSFailureCount = 0;
stats->dot11FCSErrorCount = 0;
stats->dot11RTSSuccessCount = 0;
- WL_UNLOCK(wl);
+ UNLOCK(wl);
return 0;
}
static void
-wl_ops_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+brcms_ops_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
{
switch (cmd) {
@@ -554,32 +553,32 @@ wl_ops_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static int
-wl_ops_conf_tx(struct ieee80211_hw *hw, u16 queue,
+brcms_ops_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct wl_info *wl = hw->priv;
+ struct brcms_info *wl = hw->priv;
- WL_LOCK(wl);
- wlc_wme_setparams(wl->wlc, queue, params, true);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ brcms_c_wme_setparams(wl->wlc, queue, params, true);
+ UNLOCK(wl);
return 0;
}
-static u64 wl_ops_get_tsf(struct ieee80211_hw *hw)
+static u64 brcms_ops_get_tsf(struct ieee80211_hw *hw)
{
wiphy_err(hw->wiphy, "%s: Enter\n", __func__);
return 0;
}
static int
-wl_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct scb *scb;
int i;
- struct wl_info *wl = hw->priv;
+ struct brcms_info *wl = hw->priv;
/* Init the scb */
scb = (struct scb *)sta->drv_priv;
@@ -593,7 +592,7 @@ wl_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
wl->pub->global_ampdu = &(scb->scb_ampdu);
wl->pub->global_ampdu->scb = scb;
wl->pub->global_ampdu->max_pdu = 16;
- bcm_pktq_init(&scb->scb_ampdu.txq, AMPDU_MAX_SCB_TID,
+ brcmu_pktq_init(&scb->scb_ampdu.txq, AMPDU_MAX_SCB_TID,
AMPDU_MAX_SCB_TID * PKTQ_LEN_DEFAULT);
sta->ht_cap.ht_supported = true;
@@ -608,21 +607,21 @@ wl_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static int
-wl_ops_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+brcms_ops_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
return 0;
}
static int
-wl_ops_ampdu_action(struct ieee80211_hw *hw,
+brcms_ops_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
u8 buf_size)
{
struct scb *scb = (struct scb *)sta->drv_priv;
- struct wl_info *wl = hw->priv;
+ struct brcms_info *wl = hw->priv;
int status;
if (WARN_ON(scb->magic != SCB_MAGIC))
@@ -633,27 +632,37 @@ wl_ops_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
break;
case IEEE80211_AMPDU_TX_START:
- WL_LOCK(wl);
- status = wlc_aggregatable(wl->wlc, tid);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ status = brcms_c_aggregatable(wl->wlc, tid);
+ UNLOCK(wl);
if (!status) {
wiphy_err(wl->wiphy, "START: tid %d is not agg\'able\n",
tid);
return -EINVAL;
}
- /* XXX: Use the starting sequence number provided ... */
+ /* Future improvement: Use the starting sequence number provided ... */
*ssn = 0;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP:
- WL_LOCK(wl);
- wlc_ampdu_flush(wl->wlc, sta, tid);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ brcms_c_ampdu_flush(wl->wlc, sta, tid);
+ UNLOCK(wl);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
- /* Not sure what to do here */
+ /*
+ * BA window size from ADDBA response ('buf_size') defines how
+ * many outstanding MPDUs are allowed for the BA stream by
+ * recipient and traffic class. 'ampdu_factor' gives maximum
+ * AMPDU size.
+ */
+ LOCK(wl);
+ brcms_c_ampdu_tx_operational(wl->wlc, tid, buf_size,
+ (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ sta->ht_cap.ampdu_factor)) - 1);
+ UNLOCK(wl);
/* Power save wakeup */
break;
default:
@@ -664,58 +673,58 @@ wl_ops_ampdu_action(struct ieee80211_hw *hw,
return 0;
}
-static void wl_ops_rfkill_poll(struct ieee80211_hw *hw)
+static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
{
- struct wl_info *wl = HW_TO_WL(hw);
+ struct brcms_info *wl = HW_TO_WL(hw);
bool blocked;
- WL_LOCK(wl);
- blocked = wlc_check_radio_disabled(wl->wlc);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ blocked = brcms_c_check_radio_disabled(wl->wlc);
+ UNLOCK(wl);
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
}
-static void wl_ops_flush(struct ieee80211_hw *hw, bool drop)
+static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
{
- struct wl_info *wl = HW_TO_WL(hw);
+ struct brcms_info *wl = HW_TO_WL(hw);
no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
/* wait for packet queue and dma fifos to run empty */
- WL_LOCK(wl);
- wlc_wait_for_tx_completion(wl->wlc, drop);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ brcms_c_wait_for_tx_completion(wl->wlc, drop);
+ UNLOCK(wl);
}
-static const struct ieee80211_ops wl_ops = {
- .tx = wl_ops_tx,
- .start = wl_ops_start,
- .stop = wl_ops_stop,
- .add_interface = wl_ops_add_interface,
- .remove_interface = wl_ops_remove_interface,
- .config = wl_ops_config,
- .bss_info_changed = wl_ops_bss_info_changed,
- .configure_filter = wl_ops_configure_filter,
- .set_tim = wl_ops_set_tim,
- .sw_scan_start = wl_ops_sw_scan_start,
- .sw_scan_complete = wl_ops_sw_scan_complete,
- .set_tsf = wl_ops_set_tsf,
- .get_stats = wl_ops_get_stats,
- .sta_notify = wl_ops_sta_notify,
- .conf_tx = wl_ops_conf_tx,
- .get_tsf = wl_ops_get_tsf,
- .sta_add = wl_ops_sta_add,
- .sta_remove = wl_ops_sta_remove,
- .ampdu_action = wl_ops_ampdu_action,
- .rfkill_poll = wl_ops_rfkill_poll,
- .flush = wl_ops_flush,
+static const struct ieee80211_ops brcms_ops = {
+ .tx = brcms_ops_tx,
+ .start = brcms_ops_start,
+ .stop = brcms_ops_stop,
+ .add_interface = brcms_ops_add_interface,
+ .remove_interface = brcms_ops_remove_interface,
+ .config = brcms_ops_config,
+ .bss_info_changed = brcms_ops_bss_info_changed,
+ .configure_filter = brcms_ops_configure_filter,
+ .set_tim = brcms_ops_set_tim,
+ .sw_scan_start = brcms_ops_sw_scan_start,
+ .sw_scan_complete = brcms_ops_sw_scan_complete,
+ .set_tsf = brcms_ops_set_tsf,
+ .get_stats = brcms_ops_get_stats,
+ .sta_notify = brcms_ops_sta_notify,
+ .conf_tx = brcms_ops_conf_tx,
+ .get_tsf = brcms_ops_get_tsf,
+ .sta_add = brcms_ops_sta_add,
+ .sta_remove = brcms_ops_sta_remove,
+ .ampdu_action = brcms_ops_ampdu_action,
+ .rfkill_poll = brcms_ops_rfkill_poll,
+ .flush = brcms_ops_flush,
};
/*
- * is called in wl_pci_probe() context, therefore no locking required.
+ * is called in brcms_pci_probe() context, therefore no locking required.
*/
-static int wl_set_hint(struct wl_info *wl, char *abbrev)
+static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
{
return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev);
}
@@ -726,25 +735,25 @@ static int wl_set_hint(struct wl_info *wl, char *abbrev)
* Attach to the WL device identified by vendor and device parameters.
* regs is a host accessible memory address pointing to WL device registers.
*
- * wl_attach is not defined as static because in the case where no bus
+ * brcms_attach is not defined as static because in the case where no bus
* is defined, wl_attach will never be called, and thus, gcc will issue
* a warning that this function is defined but not used if we declare
* it as static.
*
*
- * is called in wl_pci_probe() context, therefore no locking required.
+ * is called in brcms_pci_probe() context, therefore no locking required.
*/
-static struct wl_info *wl_attach(u16 vendor, u16 device, unsigned long regs,
+static struct brcms_info *brcms_attach(u16 vendor, u16 device,
+ unsigned long regs,
uint bustype, void *btparam, uint irq)
{
- struct wl_info *wl = NULL;
+ struct brcms_info *wl = NULL;
int unit, err;
-
unsigned long base_addr;
struct ieee80211_hw *hw;
u8 perm[ETH_ALEN];
- unit = wl_found;
+ unit = n_adapters_found;
err = 0;
if (unit < 0) {
@@ -762,15 +771,13 @@ static struct wl_info *wl_attach(u16 vendor, u16 device, unsigned long regs,
atomic_set(&wl->callbacks, 0);
/* setup the bottom half handler */
- tasklet_init(&wl->tasklet, wl_dpc, (unsigned long) wl);
+ tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
base_addr = regs;
- if (bustype == PCI_BUS) {
- wl->piomode = false;
- } else if (bustype == RPC_BUS) {
+ if (bustype == PCI_BUS || bustype == RPC_BUS) {
/* Do nothing */
} else {
bustype = PCI_BUS;
@@ -787,42 +794,41 @@ static struct wl_info *wl_attach(u16 vendor, u16 device, unsigned long regs,
spin_lock_init(&wl->isr_lock);
/* prepare ucode */
- if (wl_request_fw(wl, (struct pci_dev *)btparam) < 0) {
+ if (brcms_request_fw(wl, (struct pci_dev *)btparam) < 0) {
wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
"%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
- wl_release_fw(wl);
- wl_remove((struct pci_dev *)btparam);
+ brcms_release_fw(wl);
+ brcms_remove((struct pci_dev *)btparam);
return NULL;
}
/* common load-time initialization */
- wl->wlc = wlc_attach((void *)wl, vendor, device, unit, wl->piomode,
+ wl->wlc = brcms_c_attach((void *)wl, vendor, device, unit, false,
wl->regsva, wl->bcm_bustype, btparam, &err);
- wl_release_fw(wl);
+ brcms_release_fw(wl);
if (!wl->wlc) {
- wiphy_err(wl->wiphy, "%s: wlc_attach() failed with code %d\n",
+ wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
KBUILD_MODNAME, err);
goto fail;
}
- wl->pub = wlc_pub(wl->wlc);
+ wl->pub = brcms_c_pub(wl->wlc);
wl->pub->ieee_hw = hw;
- if (wlc_iovar_setint(wl->wlc, "mpc", 0)) {
+ if (brcms_c_set_par(wl->wlc, IOV_MPC, 0) < 0) {
wiphy_err(wl->wiphy, "wl%d: Error setting MPC variable to 0\n",
unit);
}
/* register our interrupt handler */
- if (request_irq(irq, wl_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
+ if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
goto fail;
}
wl->irq = irq;
/* register module */
- wlc_module_register(wl->pub, NULL, "linux", wl, NULL, wl_linux_watchdog,
- NULL);
+ brcms_c_module_register(wl->pub, "linux", wl, wl_linux_watchdog, NULL);
if (ieee_hw_init(hw)) {
wiphy_err(wl->wiphy, "wl%d: %s: ieee_hw_init failed!\n", unit,
@@ -842,19 +848,19 @@ static struct wl_info *wl_attach(u16 vendor, u16 device, unsigned long regs,
}
if (wl->pub->srom_ccode[0])
- err = wl_set_hint(wl, wl->pub->srom_ccode);
+ err = brcms_set_hint(wl, wl->pub->srom_ccode);
else
- err = wl_set_hint(wl, "US");
+ err = brcms_set_hint(wl, "US");
if (err) {
wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n",
__func__, err);
}
- wl_found++;
+ n_adapters_found++;
return wl;
fail:
- wl_free(wl);
+ brcms_free(wl);
return NULL;
}
@@ -869,7 +875,7 @@ fail:
.max_power = 19, \
}
-static struct ieee80211_channel wl_2ghz_chantable[] = {
+static struct ieee80211_channel brcms_2ghz_chantable[] = {
CHAN2GHZ(1, 2412, IEEE80211_CHAN_NO_HT40MINUS),
CHAN2GHZ(2, 2417, IEEE80211_CHAN_NO_HT40MINUS),
CHAN2GHZ(3, 2422, IEEE80211_CHAN_NO_HT40MINUS),
@@ -901,7 +907,7 @@ static struct ieee80211_channel wl_2ghz_chantable[] = {
.max_power = 21, \
}
-static struct ieee80211_channel wl_5ghz_nphy_chantable[] = {
+static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
/* UNII-1 */
CHAN5GHZ(36, IEEE80211_CHAN_NO_HT40MINUS),
CHAN5GHZ(40, IEEE80211_CHAN_NO_HT40PLUS),
@@ -969,7 +975,7 @@ static struct ieee80211_channel wl_5ghz_nphy_chantable[] = {
.hw_value = (rate100m / 5), \
}
-static struct ieee80211_rate wl_legacy_ratetable[] = {
+static struct ieee80211_rate legacy_ratetable[] = {
RATE(10, 0),
RATE(20, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(55, IEEE80211_RATE_SHORT_PREAMBLE),
@@ -984,12 +990,12 @@ static struct ieee80211_rate wl_legacy_ratetable[] = {
RATE(540, 0),
};
-static struct ieee80211_supported_band wl_band_2GHz_nphy = {
+static struct ieee80211_supported_band brcms_band_2GHz_nphy = {
.band = IEEE80211_BAND_2GHZ,
- .channels = wl_2ghz_chantable,
- .n_channels = ARRAY_SIZE(wl_2ghz_chantable),
- .bitrates = wl_legacy_ratetable,
- .n_bitrates = ARRAY_SIZE(wl_legacy_ratetable),
+ .channels = brcms_2ghz_chantable,
+ .n_channels = ARRAY_SIZE(brcms_2ghz_chantable),
+ .bitrates = legacy_ratetable,
+ .n_bitrates = ARRAY_SIZE(legacy_ratetable),
.ht_cap = {
/* from include/linux/ieee80211.h */
.cap = IEEE80211_HT_CAP_GRN_FLD |
@@ -1006,12 +1012,12 @@ static struct ieee80211_supported_band wl_band_2GHz_nphy = {
}
};
-static struct ieee80211_supported_band wl_band_5GHz_nphy = {
+static struct ieee80211_supported_band brcms_band_5GHz_nphy = {
.band = IEEE80211_BAND_5GHZ,
- .channels = wl_5ghz_nphy_chantable,
- .n_channels = ARRAY_SIZE(wl_5ghz_nphy_chantable),
- .bitrates = wl_legacy_ratetable + 4,
- .n_bitrates = ARRAY_SIZE(wl_legacy_ratetable) - 4,
+ .channels = brcms_5ghz_nphy_chantable,
+ .n_channels = ARRAY_SIZE(brcms_5ghz_nphy_chantable),
+ .bitrates = legacy_ratetable + 4,
+ .n_bitrates = ARRAY_SIZE(legacy_ratetable) - 4,
.ht_cap = {
/* use IEEE80211_HT_CAP_* from include/linux/ieee80211.h */
.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */
@@ -1027,11 +1033,11 @@ static struct ieee80211_supported_band wl_band_5GHz_nphy = {
};
/*
- * is called in wl_pci_probe() context, therefore no locking required.
+ * is called in brcms_pci_probe() context, therefore no locking required.
*/
static int ieee_hw_rate_init(struct ieee80211_hw *hw)
{
- struct wl_info *wl = HW_TO_WL(hw);
+ struct brcms_info *wl = HW_TO_WL(hw);
int has_5g;
char phy_list[4];
@@ -1040,17 +1046,16 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
- if (wlc_get(wl->wlc, WLC_GET_PHYLIST, (int *)&phy_list) < 0) {
+ if (brcms_c_get(wl->wlc, BRCM_GET_PHYLIST, (int *)&phy_list) < 0)
wiphy_err(hw->wiphy, "Phy list failed\n");
- }
if (phy_list[0] == 'n' || phy_list[0] == 'c') {
if (phy_list[0] == 'c') {
/* Single stream */
- wl_band_2GHz_nphy.ht_cap.mcs.rx_mask[1] = 0;
- wl_band_2GHz_nphy.ht_cap.mcs.rx_highest = 72;
+ brcms_band_2GHz_nphy.ht_cap.mcs.rx_mask[1] = 0;
+ brcms_band_2GHz_nphy.ht_cap.mcs.rx_highest = 72;
}
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl_band_2GHz_nphy;
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &brcms_band_2GHz_nphy;
} else {
return -EPERM;
}
@@ -1060,7 +1065,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
has_5g++;
if (phy_list[0] == 'n' || phy_list[0] == 'c') {
hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &wl_band_5GHz_nphy;
+ &brcms_band_5GHz_nphy;
} else {
return -EPERM;
}
@@ -1069,7 +1074,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
}
/*
- * is called in wl_pci_probe() context, therefore no locking required.
+ * is called in brcms_pci_probe() context, therefore no locking required.
*/
static int ieee_hw_init(struct ieee80211_hw *hw)
{
@@ -1078,13 +1083,8 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
| IEEE80211_HW_REPORTS_TX_ACK_STATUS
| IEEE80211_HW_AMPDU_AGGREGATION;
- hw->extra_tx_headroom = wlc_get_header_len();
+ hw->extra_tx_headroom = brcms_c_get_header_len();
hw->queues = N_TX_QUEUES;
- /* FIXME: this doesn't seem to be used properly in minstrel_ht.
- * mac80211/status.c:ieee80211_tx_status() checks this value,
- * but mac80211/rc80211_minstrel_ht.c:minstrel_ht_get_rate()
- * appears to always set 3 rates
- */
hw->max_rates = 2; /* Primary rate and 1 fallback rate */
hw->channel_change_time = 7 * 1000; /* channel change time is dependent on chip and band */
@@ -1100,15 +1100,15 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
* determines if a device is a WL device, and if so, attaches it.
*
* This function determines if a device pointed to by pdev is a WL device,
- * and if so, performs a wl_attach() on it.
+ * and if so, performs a brcms_attach() on it.
*
* Perimeter lock is initialized in the course of this function.
*/
static int __devinit
-wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
- struct wl_info *wl;
+ struct brcms_info *wl;
struct ieee80211_hw *hw;
u32 val;
@@ -1117,7 +1117,8 @@ wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
PCI_FUNC(pdev->devfn), pdev->irq);
if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) ||
- (((pdev->device & 0xff00) != 0x4300) &&
+ ((pdev->device != 0x0576) &&
+ ((pdev->device & 0xff00) != 0x4300) &&
((pdev->device & 0xff00) != 0x4700) &&
((pdev->device < 43000) || (pdev->device > 43999))))
return -ENODEV;
@@ -1135,7 +1136,7 @@ wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- hw = ieee80211_alloc_hw(sizeof(struct wl_info), &wl_ops);
+ hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops);
if (!hw) {
pr_err("%s: ieee80211_alloc_hw failed\n", __func__);
return -ENOMEM;
@@ -1147,43 +1148,44 @@ wl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
memset(hw->priv, 0, sizeof(*wl));
- wl = wl_attach(pdev->vendor, pdev->device, pci_resource_start(pdev, 0),
- PCI_BUS, pdev, pdev->irq);
+ wl = brcms_attach(pdev->vendor, pdev->device,
+ pci_resource_start(pdev, 0), PCI_BUS, pdev,
+ pdev->irq);
if (!wl) {
- pr_err("%s: %s: wl_attach failed!\n", KBUILD_MODNAME,
+ pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME,
__func__);
return -ENODEV;
}
return 0;
}
-static int wl_suspend(struct pci_dev *pdev, pm_message_t state)
+static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
{
- struct wl_info *wl;
+ struct brcms_info *wl;
struct ieee80211_hw *hw;
hw = pci_get_drvdata(pdev);
wl = HW_TO_WL(hw);
if (!wl) {
wiphy_err(wl->wiphy,
- "wl_suspend: pci_get_drvdata failed\n");
+ "brcms_suspend: pci_get_drvdata failed\n");
return -ENODEV;
}
/* only need to flag hw is down for proper resume */
- WL_LOCK(wl);
+ LOCK(wl);
wl->pub->hw_up = false;
- WL_UNLOCK(wl);
+ UNLOCK(wl);
pci_save_state(pdev);
pci_disable_device(pdev);
return pci_set_power_state(pdev, PCI_D3hot);
}
-static int wl_resume(struct pci_dev *pdev)
+static int brcms_resume(struct pci_dev *pdev)
{
- struct wl_info *wl;
+ struct brcms_info *wl;
struct ieee80211_hw *hw;
int err = 0;
u32 val;
@@ -1192,7 +1194,7 @@ static int wl_resume(struct pci_dev *pdev)
wl = HW_TO_WL(hw);
if (!wl) {
wiphy_err(wl->wiphy,
- "wl: wl_resume: pci_get_drvdata failed\n");
+ "wl: brcms_resume: pci_get_drvdata failed\n");
return -ENODEV;
}
@@ -1214,97 +1216,80 @@ static int wl_resume(struct pci_dev *pdev)
/*
* done. driver will be put in up state
- * in wl_ops_add_interface() call.
+ * in brcms_ops_add_interface() call.
*/
return err;
}
/*
-* called from both kernel as from wl_*()
+* called from both kernel as from this kernel module.
* precondition: perimeter lock is not acquired.
*/
-static void wl_remove(struct pci_dev *pdev)
+static void brcms_remove(struct pci_dev *pdev)
{
- struct wl_info *wl;
+ struct brcms_info *wl;
struct ieee80211_hw *hw;
int status;
hw = pci_get_drvdata(pdev);
wl = HW_TO_WL(hw);
if (!wl) {
- pr_err("wl: wl_remove: pci_get_drvdata failed\n");
+ pr_err("wl: brcms_remove: pci_get_drvdata failed\n");
return;
}
- WL_LOCK(wl);
- status = wlc_chipmatch(pdev->vendor, pdev->device);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ status = brcms_c_chipmatch(pdev->vendor, pdev->device);
+ UNLOCK(wl);
if (!status) {
- wiphy_err(wl->wiphy, "wl: wl_remove: wlc_chipmatch failed\n");
+ wiphy_err(wl->wiphy, "wl: brcms_remove: chipmatch "
+ "failed\n");
return;
}
if (wl->wlc) {
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
ieee80211_unregister_hw(hw);
- WL_LOCK(wl);
- wl_down(wl);
- WL_UNLOCK(wl);
+ LOCK(wl);
+ brcms_down(wl);
+ UNLOCK(wl);
}
pci_disable_device(pdev);
- wl_free(wl);
+ brcms_free(wl);
pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
}
-static struct pci_driver wl_pci_driver = {
+static struct pci_driver brcms_pci_driver = {
.name = KBUILD_MODNAME,
- .probe = wl_pci_probe,
- .suspend = wl_suspend,
- .resume = wl_resume,
- .remove = __devexit_p(wl_remove),
- .id_table = wl_id_table,
+ .probe = brcms_pci_probe,
+ .suspend = brcms_suspend,
+ .resume = brcms_resume,
+ .remove = __devexit_p(brcms_remove),
+ .id_table = brcms_pci_id_table,
};
/**
* This is the main entry point for the WL driver.
*
* This function determines if a device pointed to by pdev is a WL device,
- * and if so, performs a wl_attach() on it.
+ * and if so, performs a brcms_attach() on it.
*
*/
-static int __init wl_module_init(void)
+static int __init brcms_module_init(void)
{
int error = -ENODEV;
#ifdef BCMDBG
if (msglevel != 0xdeadbeef)
- wl_msg_level = msglevel;
- else {
- char *var = getvar(NULL, "wl_msglevel");
- if (var) {
- unsigned long value;
-
- (void)strict_strtoul(var, 0, &value);
- wl_msg_level = value;
- }
- }
+ brcm_msg_level = msglevel;
if (phymsglevel != 0xdeadbeef)
phyhal_msg_level = phymsglevel;
- else {
- char *var = getvar(NULL, "phy_msglevel");
- if (var) {
- unsigned long value;
-
- (void)strict_strtoul(var, 0, &value);
- phyhal_msg_level = value;
- }
- }
#endif /* BCMDBG */
- error = pci_register_driver(&wl_pci_driver);
+ error = pci_register_driver(&brcms_pci_driver);
if (!error)
return 0;
@@ -1320,14 +1305,14 @@ static int __init wl_module_init(void)
* system.
*
*/
-static void __exit wl_module_exit(void)
+static void __exit brcms_module_exit(void)
{
- pci_unregister_driver(&wl_pci_driver);
+ pci_unregister_driver(&brcms_pci_driver);
}
-module_init(wl_module_init);
-module_exit(wl_module_exit);
+module_init(brcms_module_init);
+module_exit(brcms_module_exit);
/**
* This function frees the WL per-device resources.
@@ -1338,13 +1323,13 @@ module_exit(wl_module_exit);
* precondition: can both be called locked and unlocked
*
*/
-static void wl_free(struct wl_info *wl)
+static void brcms_free(struct brcms_info *wl)
{
- struct wl_timer *t, *next;
+ struct brcms_timer *t, *next;
/* free ucode data */
if (wl->fw.fw_cnt)
- wl_ucode_data_free();
+ brcms_ucode_data_free();
if (wl->irq)
free_irq(wl->irq, wl);
@@ -1352,12 +1337,12 @@ static void wl_free(struct wl_info *wl)
tasklet_kill(&wl->tasklet);
if (wl->pub) {
- wlc_module_unregister(wl->pub, "linux", wl);
+ brcms_c_module_unregister(wl->pub, "linux", wl);
}
/* free common resources */
if (wl->wlc) {
- wlc_detach(wl->wlc);
+ brcms_c_detach(wl->wlc);
wl->wlc = NULL;
wl->pub = NULL;
}
@@ -1389,7 +1374,7 @@ static void wl_free(struct wl_info *wl)
}
/* flags the given rate in rateset as requested */
-static void wl_set_basic_rate(struct wl_rateset *rs, u16 rate, bool is_br)
+static void brcms_set_basic_rate(struct wl_rateset *rs, u16 rate, bool is_br)
{
u32 i;
@@ -1398,9 +1383,9 @@ static void wl_set_basic_rate(struct wl_rateset *rs, u16 rate, bool is_br)
continue;
if (is_br)
- rs->rates[i] |= WLC_RATE_FLAG;
+ rs->rates[i] |= BRCMS_RATE_FLAG;
else
- rs->rates[i] &= WLC_RATE_MASK;
+ rs->rates[i] &= BRCMS_RATE_MASK;
return;
}
}
@@ -1408,8 +1393,8 @@ static void wl_set_basic_rate(struct wl_rateset *rs, u16 rate, bool is_br)
/*
* precondition: perimeter lock has been acquired
*/
-void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state,
- int prio)
+void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
+ bool state, int prio)
{
wiphy_err(wl->wiphy, "Shouldn't be here %s\n", __func__);
}
@@ -1417,21 +1402,21 @@ void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state,
/*
* precondition: perimeter lock has been acquired
*/
-void wl_init(struct wl_info *wl)
+void brcms_init(struct brcms_info *wl)
{
BCMMSG(WL_TO_HW(wl)->wiphy, "wl%d\n", wl->pub->unit);
- wl_reset(wl);
+ brcms_reset(wl);
- wlc_init(wl->wlc);
+ brcms_c_init(wl->wlc);
}
/*
* precondition: perimeter lock has been acquired
*/
-uint wl_reset(struct wl_info *wl)
+uint brcms_reset(struct brcms_info *wl)
{
BCMMSG(WL_TO_HW(wl)->wiphy, "wl%d\n", wl->pub->unit);
- wlc_reset(wl->wlc);
+ brcms_c_reset(wl->wlc);
/* dpc will not be rescheduled */
wl->resched = 0;
@@ -1443,54 +1428,54 @@ uint wl_reset(struct wl_info *wl)
* These are interrupt on/off entry points. Disable interrupts
* during interrupt state transition.
*/
-void wl_intrson(struct wl_info *wl)
+void brcms_intrson(struct brcms_info *wl)
{
unsigned long flags;
INT_LOCK(wl, flags);
- wlc_intrson(wl->wlc);
+ brcms_c_intrson(wl->wlc);
INT_UNLOCK(wl, flags);
}
/*
* precondition: perimeter lock has been acquired
*/
-bool wl_alloc_dma_resources(struct wl_info *wl, uint addrwidth)
+bool wl_alloc_dma_resources(struct brcms_info *wl, uint addrwidth)
{
return true;
}
-u32 wl_intrsoff(struct wl_info *wl)
+u32 brcms_intrsoff(struct brcms_info *wl)
{
unsigned long flags;
u32 status;
INT_LOCK(wl, flags);
- status = wlc_intrsoff(wl->wlc);
+ status = brcms_c_intrsoff(wl->wlc);
INT_UNLOCK(wl, flags);
return status;
}
-void wl_intrsrestore(struct wl_info *wl, u32 macintmask)
+void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask)
{
unsigned long flags;
INT_LOCK(wl, flags);
- wlc_intrsrestore(wl->wlc, macintmask);
+ brcms_c_intrsrestore(wl->wlc, macintmask);
INT_UNLOCK(wl, flags);
}
/*
* precondition: perimeter lock has been acquired
*/
-int wl_up(struct wl_info *wl)
+int brcms_up(struct brcms_info *wl)
{
int error = 0;
if (wl->pub->up)
return 0;
- error = wlc_up(wl->wlc);
+ error = brcms_c_up(wl->wlc);
return error;
}
@@ -1498,37 +1483,37 @@ int wl_up(struct wl_info *wl)
/*
* precondition: perimeter lock has been acquired
*/
-void wl_down(struct wl_info *wl)
+void brcms_down(struct brcms_info *wl)
{
uint callbacks, ret_val = 0;
/* call common down function */
- ret_val = wlc_down(wl->wlc);
+ ret_val = brcms_c_down(wl->wlc);
callbacks = atomic_read(&wl->callbacks) - ret_val;
/* wait for down callbacks to complete */
- WL_UNLOCK(wl);
+ UNLOCK(wl);
/* For HIGH_only driver, it's important to actually schedule other work,
* not just spin wait since everything runs at schedule level
*/
SPINWAIT((atomic_read(&wl->callbacks) > callbacks), 100 * 1000);
- WL_LOCK(wl);
+ LOCK(wl);
}
-static irqreturn_t wl_isr(int irq, void *dev_id)
+static irqreturn_t brcms_isr(int irq, void *dev_id)
{
- struct wl_info *wl;
+ struct brcms_info *wl;
bool ours, wantdpc;
unsigned long flags;
- wl = (struct wl_info *) dev_id;
+ wl = (struct brcms_info *) dev_id;
- WL_ISRLOCK(wl, flags);
+ ISR_LOCK(wl, flags);
/* call common first level interrupt handler */
- ours = wlc_isr(wl->wlc, &wantdpc);
+ ours = brcms_c_isr(wl->wlc, &wantdpc);
if (ours) {
/* if more to do... */
if (wantdpc) {
@@ -1539,18 +1524,18 @@ static irqreturn_t wl_isr(int irq, void *dev_id)
}
}
- WL_ISRUNLOCK(wl, flags);
+ ISR_UNLOCK(wl, flags);
return IRQ_RETVAL(ours);
}
-static void wl_dpc(unsigned long data)
+static void brcms_dpc(unsigned long data)
{
- struct wl_info *wl;
+ struct brcms_info *wl;
- wl = (struct wl_info *) data;
+ wl = (struct brcms_info *) data;
- WL_LOCK(wl);
+ LOCK(wl);
/* call the common second level interrupt handler */
if (wl->pub->up) {
@@ -1558,14 +1543,14 @@ static void wl_dpc(unsigned long data)
unsigned long flags;
INT_LOCK(wl, flags);
- wlc_intrsupd(wl->wlc);
+ brcms_c_intrsupd(wl->wlc);
INT_UNLOCK(wl, flags);
}
- wl->resched = wlc_dpc(wl->wlc, true);
+ wl->resched = brcms_c_dpc(wl->wlc, true);
}
- /* wlc_dpc() may bring the driver down */
+ /* brcms_c_dpc() may bring the driver down */
if (!wl->pub->up)
goto done;
@@ -1574,27 +1559,27 @@ static void wl_dpc(unsigned long data)
tasklet_schedule(&wl->tasklet);
else {
/* re-enable interrupts */
- wl_intrson(wl);
+ brcms_intrson(wl);
}
done:
- WL_UNLOCK(wl);
+ UNLOCK(wl);
}
/*
* is called by the kernel from software irq context
*/
-static void wl_timer(unsigned long data)
+static void brcms_timer(unsigned long data)
{
- _wl_timer((struct wl_timer *) data);
+ _brcms_timer((struct brcms_timer *) data);
}
/*
* precondition: perimeter lock is not acquired
*/
-static void _wl_timer(struct wl_timer *t)
+static void _brcms_timer(struct brcms_timer *t)
{
- WL_LOCK(t->wl);
+ LOCK(t->wl);
if (t->set) {
if (t->periodic) {
@@ -1610,7 +1595,7 @@ static void _wl_timer(struct wl_timer *t)
atomic_dec(&t->wl->callbacks);
- WL_UNLOCK(t->wl);
+ UNLOCK(t->wl);
}
/*
@@ -1619,21 +1604,22 @@ static void _wl_timer(struct wl_timer *t)
*
* precondition: perimeter lock has been acquired
*/
-struct wl_timer *wl_init_timer(struct wl_info *wl, void (*fn) (void *arg),
- void *arg, const char *name)
+struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
+ void (*fn) (void *arg),
+ void *arg, const char *name)
{
- struct wl_timer *t;
+ struct brcms_timer *t;
- t = kzalloc(sizeof(struct wl_timer), GFP_ATOMIC);
+ t = kzalloc(sizeof(struct brcms_timer), GFP_ATOMIC);
if (!t) {
- wiphy_err(wl->wiphy, "wl%d: wl_init_timer: out of memory\n",
+ wiphy_err(wl->wiphy, "wl%d: brcms_init_timer: out of memory\n",
wl->pub->unit);
return 0;
}
init_timer(&t->timer);
t->timer.data = (unsigned long) t;
- t->timer.function = wl_timer;
+ t->timer.function = brcms_timer;
t->wl = wl;
t->fn = fn;
t->arg = arg;
@@ -1654,7 +1640,8 @@ struct wl_timer *wl_init_timer(struct wl_info *wl, void (*fn) (void *arg),
*
* precondition: perimeter lock has been acquired
*/
-void wl_add_timer(struct wl_info *wl, struct wl_timer *t, uint ms, int periodic)
+void brcms_add_timer(struct brcms_info *wl, struct brcms_timer *t, uint ms,
+ int periodic)
{
#ifdef BCMDBG
if (t->set) {
@@ -1676,7 +1663,7 @@ void wl_add_timer(struct wl_info *wl, struct wl_timer *t, uint ms, int periodic)
*
* precondition: perimeter lock has been acquired
*/
-bool wl_del_timer(struct wl_info *wl, struct wl_timer *t)
+bool brcms_del_timer(struct brcms_info *wl, struct brcms_timer *t)
{
if (t->set) {
t->set = false;
@@ -1692,12 +1679,12 @@ bool wl_del_timer(struct wl_info *wl, struct wl_timer *t)
/*
* precondition: perimeter lock has been acquired
*/
-void wl_free_timer(struct wl_info *wl, struct wl_timer *t)
+void brcms_free_timer(struct brcms_info *wl, struct brcms_timer *t)
{
- struct wl_timer *tmp;
+ struct brcms_timer *tmp;
/* delete the timer in case it is active */
- wl_del_timer(wl, t);
+ brcms_del_timer(wl, t);
if (wl->timers == t) {
wl->timers = wl->timers->next;
@@ -1734,13 +1721,13 @@ static int wl_linux_watchdog(void *ctx)
return 0;
}
-struct wl_fw_hdr {
+struct firmware_hdr {
u32 offset;
u32 len;
u32 idx;
};
-char *wl_firmwares[WL_MAX_FW] = {
+char *brcms_firmwares[MAX_FW_IMAGES] = {
"brcm/bcm43xx",
NULL
};
@@ -1748,13 +1735,13 @@ char *wl_firmwares[WL_MAX_FW] = {
/*
* precondition: perimeter lock has been acquired
*/
-int wl_ucode_init_buf(struct wl_info *wl, void **pbuf, u32 idx)
+int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
{
int i, entry;
const u8 *pdata;
- struct wl_fw_hdr *hdr;
+ struct firmware_hdr *hdr;
for (i = 0; i < wl->fw.fw_cnt; i++) {
- hdr = (struct wl_fw_hdr *)wl->fw.fw_hdr[i]->data;
+ hdr = (struct firmware_hdr *)wl->fw.fw_hdr[i]->data;
for (entry = 0; entry < wl->fw.hdr_num_entries[i];
entry++, hdr++) {
if (hdr->idx == idx) {
@@ -1778,16 +1765,16 @@ fail:
}
/*
- * Precondition: Since this function is called in wl_pci_probe() context,
+ * Precondition: Since this function is called in brcms_pci_probe() context,
* no locking is required.
*/
-int wl_ucode_init_uint(struct wl_info *wl, u32 *data, u32 idx)
+int brcms_ucode_init_uint(struct brcms_info *wl, u32 *data, u32 idx)
{
int i, entry;
const u8 *pdata;
- struct wl_fw_hdr *hdr;
+ struct firmware_hdr *hdr;
for (i = 0; i < wl->fw.fw_cnt; i++) {
- hdr = (struct wl_fw_hdr *)wl->fw.fw_hdr[i]->data;
+ hdr = (struct firmware_hdr *)wl->fw.fw_hdr[i]->data;
for (entry = 0; entry < wl->fw.hdr_num_entries[i];
entry++, hdr++) {
if (hdr->idx == idx) {
@@ -1807,21 +1794,21 @@ int wl_ucode_init_uint(struct wl_info *wl, u32 *data, u32 idx)
}
/*
- * Precondition: Since this function is called in wl_pci_probe() context,
+ * Precondition: Since this function is called in brcms_pci_probe() context,
* no locking is required.
*/
-static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev)
+static int brcms_request_fw(struct brcms_info *wl, struct pci_dev *pdev)
{
int status;
struct device *device = &pdev->dev;
char fw_name[100];
int i;
- memset((void *)&wl->fw, 0, sizeof(struct wl_firmware));
- for (i = 0; i < WL_MAX_FW; i++) {
- if (wl_firmwares[i] == NULL)
+ memset((void *)&wl->fw, 0, sizeof(struct brcms_firmware));
+ for (i = 0; i < MAX_FW_IMAGES; i++) {
+ if (brcms_firmwares[i] == NULL)
break;
- sprintf(fw_name, "%s-%d.fw", wl_firmwares[i],
+ sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
UCODE_LOADER_API_VER);
status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
if (status) {
@@ -1829,7 +1816,7 @@ static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev)
KBUILD_MODNAME, fw_name);
return status;
}
- sprintf(fw_name, "%s_hdr-%d.fw", wl_firmwares[i],
+ sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
UCODE_LOADER_API_VER);
status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
if (status) {
@@ -1838,28 +1825,28 @@ static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev)
return status;
}
wl->fw.hdr_num_entries[i] =
- wl->fw.fw_hdr[i]->size / (sizeof(struct wl_fw_hdr));
+ wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
}
wl->fw.fw_cnt = i;
- return wl_ucode_data_init(wl);
+ return brcms_ucode_data_init(wl);
}
/*
* precondition: can both be called locked and unlocked
*/
-void wl_ucode_free_buf(void *p)
+void brcms_ucode_free_buf(void *p)
{
kfree(p);
}
/*
- * Precondition: Since this function is called in wl_pci_probe() context,
+ * Precondition: Since this function is called in brcms_pci_probe() context,
* no locking is required.
*/
-static void wl_release_fw(struct wl_info *wl)
+static void brcms_release_fw(struct brcms_info *wl)
{
int i;
- for (i = 0; i < WL_MAX_FW; i++) {
+ for (i = 0; i < MAX_FW_IMAGES; i++) {
release_firmware(wl->fw.fw_bin[i]);
release_firmware(wl->fw.fw_hdr[i]);
}
@@ -1869,18 +1856,18 @@ static void wl_release_fw(struct wl_info *wl)
/*
* checks validity of all firmware images loaded from user space
*
- * Precondition: Since this function is called in wl_pci_probe() context,
+ * Precondition: Since this function is called in brcms_pci_probe() context,
* no locking is required.
*/
-int wl_check_firmwares(struct wl_info *wl)
+int brcms_check_firmwares(struct brcms_info *wl)
{
int i;
int entry;
int rc = 0;
const struct firmware *fw;
const struct firmware *fw_hdr;
- struct wl_fw_hdr *ucode_hdr;
- for (i = 0; i < WL_MAX_FW && rc == 0; i++) {
+ struct firmware_hdr *ucode_hdr;
+ for (i = 0; i < MAX_FW_IMAGES && rc == 0; i++) {
fw = wl->fw.fw_bin[i];
fw_hdr = wl->fw.fw_hdr[i];
if (fw == NULL && fw_hdr == NULL) {
@@ -1889,10 +1876,10 @@ int wl_check_firmwares(struct wl_info *wl)
wiphy_err(wl->wiphy, "%s: invalid bin/hdr fw\n",
__func__);
rc = -EBADF;
- } else if (fw_hdr->size % sizeof(struct wl_fw_hdr)) {
+ } else if (fw_hdr->size % sizeof(struct firmware_hdr)) {
wiphy_err(wl->wiphy, "%s: non integral fw hdr file "
"size %zu/%zu\n", __func__, fw_hdr->size,
- sizeof(struct wl_fw_hdr));
+ sizeof(struct firmware_hdr));
rc = -EBADF;
} else if (fw->size < MIN_FW_SIZE || fw->size > MAX_FW_SIZE) {
wiphy_err(wl->wiphy, "%s: out of bounds fw file size "
@@ -1900,7 +1887,7 @@ int wl_check_firmwares(struct wl_info *wl)
rc = -EBADF;
} else {
/* check if ucode section overruns firmware image */
- ucode_hdr = (struct wl_fw_hdr *)fw_hdr->data;
+ ucode_hdr = (struct firmware_hdr *)fw_hdr->data;
for (entry = 0; entry < wl->fw.hdr_num_entries[i] &&
!rc; entry++, ucode_hdr++) {
if (ucode_hdr->offset + ucode_hdr->len >
@@ -1924,24 +1911,24 @@ int wl_check_firmwares(struct wl_info *wl)
/*
* precondition: perimeter lock has been acquired
*/
-bool wl_rfkill_set_hw_state(struct wl_info *wl)
+bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
{
- bool blocked = wlc_check_radio_disabled(wl->wlc);
+ bool blocked = brcms_c_check_radio_disabled(wl->wlc);
- WL_UNLOCK(wl);
+ UNLOCK(wl);
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
if (blocked)
wiphy_rfkill_start_polling(wl->pub->ieee_hw->wiphy);
- WL_LOCK(wl);
+ LOCK(wl);
return blocked;
}
/*
* precondition: perimeter lock has been acquired
*/
-void wl_msleep(struct wl_info *wl, uint ms)
+void brcms_msleep(struct brcms_info *wl, uint ms)
{
- WL_UNLOCK(wl);
+ UNLOCK(wl);
msleep(ms);
- WL_LOCK(wl);
+ LOCK(wl);
}
diff --git a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.h b/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
index e703d8bb94d..40e3d375ea9 100644
--- a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.h
+++ b/drivers/staging/brcm80211/brcmsmac/mac80211_if.h
@@ -14,42 +14,49 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _wl_mac80211_h_
-#define _wl_mac80211_h_
+#ifndef _BRCM_MAC80211_IF_H_
+#define _BRCM_MAC80211_IF_H_
+
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+
+/* softmac ioctl definitions */
+#define BRCMS_SET_SHORTSLOT_OVERRIDE 146
+
/* BMAC Note: High-only driver is no longer working in softirq context as it needs to block and
* sleep so perimeter lock has to be a semaphore instead of spinlock. This requires timers to be
* submitted to workqueue instead of being on kernel timer
*/
-struct wl_timer {
+struct brcms_timer {
struct timer_list timer;
- struct wl_info *wl;
+ struct brcms_info *wl;
void (*fn) (void *);
void *arg; /* argument to fn */
uint ms;
bool periodic;
bool set;
- struct wl_timer *next;
+ struct brcms_timer *next;
#ifdef BCMDBG
char *name; /* Description of the timer */
#endif
};
-struct wl_if {
+struct brcms_if {
uint subunit; /* WDS/BSS unit */
struct pci_dev *pci_dev;
};
-#define WL_MAX_FW 4
-struct wl_firmware {
+#define MAX_FW_IMAGES 4
+struct brcms_firmware {
u32 fw_cnt;
- const struct firmware *fw_bin[WL_MAX_FW];
- const struct firmware *fw_hdr[WL_MAX_FW];
- u32 hdr_num_entries[WL_MAX_FW];
+ const struct firmware *fw_bin[MAX_FW_IMAGES];
+ const struct firmware *fw_hdr[MAX_FW_IMAGES];
+ u32 hdr_num_entries[MAX_FW_IMAGES];
};
-struct wl_info {
- struct wlc_pub *pub; /* pointer to public wlc state */
+struct brcms_info {
+ struct brcms_pub *pub; /* pointer to public wlc state */
void *wlc; /* pointer to private common os-independent data */
u32 magic;
@@ -57,29 +64,45 @@ struct wl_info {
spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */
+
+ /* bus type and regsva for unmap in brcms_free() */
uint bcm_bustype; /* bus type */
- bool piomode; /* set from insmod argument */
void *regsva; /* opaque chip registers virtual address */
+
+ /* timer related fields */
atomic_t callbacks; /* # outstanding callback functions */
- struct wl_timer *timers; /* timer cleanup queue */
+ struct brcms_timer *timers; /* timer cleanup queue */
+
struct tasklet_struct tasklet; /* dpc tasklet */
bool resched; /* dpc needs to be and is rescheduled */
#ifdef LINUXSTA_PS
u32 pci_psstate[16]; /* pci ps-state save/restore */
#endif
- struct wl_firmware fw;
+ struct brcms_firmware fw;
struct wiphy *wiphy;
};
-#define WL_LOCK(wl) spin_lock_bh(&(wl)->lock)
-#define WL_UNLOCK(wl) spin_unlock_bh(&(wl)->lock)
-
-/* locking from inside wl_isr */
-#define WL_ISRLOCK(wl, flags) do {spin_lock(&(wl)->isr_lock); (void)(flags); } while (0)
-#define WL_ISRUNLOCK(wl, flags) do {spin_unlock(&(wl)->isr_lock); (void)(flags); } while (0)
+/* misc callbacks */
+extern void brcms_init(struct brcms_info *wl);
+extern uint brcms_reset(struct brcms_info *wl);
+extern void brcms_intrson(struct brcms_info *wl);
+extern u32 brcms_intrsoff(struct brcms_info *wl);
+extern void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
+extern int brcms_up(struct brcms_info *wl);
+extern void brcms_down(struct brcms_info *wl);
+extern void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
+ bool state, int prio);
+extern bool wl_alloc_dma_resources(struct brcms_info *wl, uint dmaddrwidth);
+extern bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
-/* locking under WL_LOCK() to synchronize with wl_isr */
-#define INT_LOCK(wl, flags) spin_lock_irqsave(&(wl)->isr_lock, flags)
-#define INT_UNLOCK(wl, flags) spin_unlock_irqrestore(&(wl)->isr_lock, flags)
+/* timer functions */
+extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
+ void (*fn) (void *arg), void *arg,
+ const char *name);
+extern void brcms_free_timer(struct brcms_info *wl, struct brcms_timer *timer);
+extern void brcms_add_timer(struct brcms_info *wl, struct brcms_timer *timer,
+ uint ms, int periodic);
+extern bool brcms_del_timer(struct brcms_info *wl, struct brcms_timer *timer);
+extern void brcms_msleep(struct brcms_info *wl, uint ms);
-#endif /* _wl_mac80211_h_ */
+#endif /* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_main.c b/drivers/staging/brcm80211/brcmsmac/main.c
index 4b4a31eff90..1763c4535cd 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_main.c
+++ b/drivers/staging/brcm80211/brcmsmac/main.c
@@ -13,47 +13,23 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/etherdevice.h>
+
#include <linux/pci_ids.h>
#include <net/mac80211.h>
-#include <bcmdefs.h>
-#include <bcmdevs.h>
-#include <bcmutils.h>
-#include <bcmwifi.h>
-#include <bcmnvram.h>
+#include <brcm_hw_ids.h>
#include <aiutils.h>
-#include <pcicfg.h>
-#include <bcmsrom.h>
-#include <wlioctl.h>
-#include <sbhnddma.h>
-#include <hnddma.h>
-
-#include "wlc_pmu.h"
-#include "d11.h"
-#include "wlc_types.h"
-#include "wlc_cfg.h"
-#include "wlc_rate.h"
-#include "wlc_scb.h"
-#include "wlc_pub.h"
-#include "wlc_key.h"
-#include "wlc_bsscfg.h"
-#include "phy/wlc_phy_hal.h"
-#include "wlc_channel.h"
-#include "wlc_main.h"
-#include "wlc_bmac.h"
-#include "wlc_phy_hal.h"
-#include "wlc_phy_shim.h"
-#include "wlc_antsel.h"
-#include "wlc_stf.h"
-#include "wlc_ampdu.h"
-#include "wl_export.h"
-#include "wlc_alloc.h"
-#include "wl_dbg.h"
-
-#include "wl_mac80211.h"
+#include "rate.h"
+#include "scb.h"
+#include "phy/phy_hal.h"
+#include "channel.h"
+#include "bmac.h"
+#include "antsel.h"
+#include "stf.h"
+#include "ampdu.h"
+#include "alloc.h"
+#include "mac80211_if.h"
+#include "main.h"
/*
* WPA(2) definitions
@@ -71,7 +47,6 @@
#define ALLPRIO -1
/*
- * buffer length needed for wlc_format_ssid
* 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
*/
#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
@@ -79,11 +54,14 @@
#define TIMER_INTERVAL_WATCHDOG 1000 /* watchdog timer, in unit of ms */
#define TIMER_INTERVAL_RADIOCHK 800 /* radio monitor timer, in unit of ms */
-#ifndef WLC_MPC_MAX_DELAYCNT
-#define WLC_MPC_MAX_DELAYCNT 10 /* Max MPC timeout, in unit of watchdog */
+/* Max MPC timeout, in unit of watchdog */
+#ifndef BRCMS_MPC_MAX_DELAYCNT
+#define BRCMS_MPC_MAX_DELAYCNT 10
#endif
-#define WLC_MPC_MIN_DELAYCNT 1 /* Min MPC timeout, in unit of watchdog */
-#define WLC_MPC_THRESHOLD 3 /* MPC count threshold level */
+
+/* Min MPC timeout, in unit of watchdog */
+#define BRCMS_MPC_MIN_DELAYCNT 1
+#define BRCMS_MPC_THRESHOLD 3 /* MPC count threshold level */
#define BEACON_INTERVAL_DEFAULT 100 /* beacon interval, in unit of 1024TU */
#define DTIM_INTERVAL_DEFAULT 3 /* DTIM interval, in unit of beacon interval */
@@ -94,6 +72,94 @@
#define TBTT_ALIGN_LEEWAY_US 100 /* min leeway before first TBTT in us */
+/* Software feature flag defines used by wlfeatureflag */
+#define WL_SWFL_NOHWRADIO 0x0004
+#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */
+#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */
+
+/* n-mode support capability */
+/* 2x2 includes both 1x1 & 2x2 devices
+ * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
+ * control it independently
+ */
+#define WL_11N_2x2 1
+#define WL_11N_3x3 3
+#define WL_11N_4x4 4
+
+/* define 11n feature disable flags */
+#define WLFEATURE_DISABLE_11N 0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
+#define WLFEATURE_DISABLE_11N_GF 0x00000080
+
+#define EDCF_ACI_MASK 0x60
+#define EDCF_ACI_SHIFT 5
+#define EDCF_ECWMIN_MASK 0x0f
+#define EDCF_ECWMAX_SHIFT 4
+#define EDCF_AIFSN_MASK 0x0f
+#define EDCF_AIFSN_MAX 15
+#define EDCF_ECWMAX_MASK 0xf0
+
+#define EDCF_AC_BE_TXOP_STA 0x0000
+#define EDCF_AC_BK_TXOP_STA 0x0000
+#define EDCF_AC_VO_ACI_STA 0x62
+#define EDCF_AC_VO_ECW_STA 0x32
+#define EDCF_AC_VI_ACI_STA 0x42
+#define EDCF_AC_VI_ECW_STA 0x43
+#define EDCF_AC_BK_ECW_STA 0xA4
+#define EDCF_AC_VI_TXOP_STA 0x005e
+#define EDCF_AC_VO_TXOP_STA 0x002f
+#define EDCF_AC_BE_ACI_STA 0x03
+#define EDCF_AC_BE_ECW_STA 0xA4
+#define EDCF_AC_BK_ACI_STA 0x27
+#define EDCF_AC_VO_TXOP_AP 0x002f
+
+#define EDCF_TXOP2USEC(txop) ((txop) << 5)
+#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
+
+#define APHY_SYMBOL_TIME 4
+#define APHY_PREAMBLE_TIME 16
+#define APHY_SIGNAL_TIME 4
+#define APHY_SIFS_TIME 16
+#define APHY_SERVICE_NBITS 16
+#define APHY_TAIL_NBITS 6
+#define BPHY_SIFS_TIME 10
+#define BPHY_PLCP_SHORT_TIME 96
+
+#define PREN_PREAMBLE 24
+#define PREN_MM_EXT 12
+#define PREN_PREAMBLE_EXT 4
+
+#define DOT11_MAC_HDR_LEN 24
+#define DOT11_ACK_LEN 10
+#define DOT11_BA_LEN 4
+#define DOT11_OFDM_SIGNAL_EXTENSION 6
+#define DOT11_MIN_FRAG_LEN 256
+#define DOT11_RTS_LEN 16
+#define DOT11_CTS_LEN 10
+#define DOT11_BA_BITMAP_LEN 128
+#define DOT11_MIN_BEACON_PERIOD 1
+#define DOT11_MAX_BEACON_PERIOD 0xFFFF
+#define DOT11_MAXNUMFRAGS 16
+#define DOT11_MAX_FRAG_LEN 2346
+
+#define BPHY_PLCP_TIME 192
+#define RIFS_11N_TIME 2
+
+#define WME_VER 1
+#define WME_SUBTYPE_PARAM_IE 1
+#define WME_TYPE 2
+#define WME_OUI "\x00\x50\xf2"
+
+#define AC_BE 0
+#define AC_BK 1
+#define AC_VI 2
+#define AC_VO 3
+
/*
* driver maintains internal 'tick'(wlc->pub->now) which increments in 1s OS timer(soft
* watchdog) it is not a wall clock and won't increment when driver is in "down" state
@@ -101,26 +167,22 @@
* calibration and scb update
*/
-/* watchdog trigger mode: OSL timer or TBTT */
-#define WLC_WATCHDOG_TBTT(wlc) \
- (wlc->stas_associated > 0 && wlc->PM != PM_OFF && wlc->pub->align_wd_tbtt)
-
/* To inform the ucode of the last mcast frame posted so that it can clear moredata bit */
-#define BCMCFID(wlc, fid) wlc_bmac_write_shm((wlc)->hw, M_BCMC_FID, (fid))
+#define BCMCFID(wlc, fid) brcms_b_write_shm((wlc)->hw, M_BCMC_FID, (fid))
-#define WLC_WAR16165(wlc) (wlc->pub->sih->bustype == PCI_BUS && \
+#define BRCMS_WAR16165(wlc) (wlc->pub->sih->bustype == PCI_BUS && \
(!AP_ENAB(wlc->pub)) && (wlc->war16165))
/* debug/trace */
-uint wl_msg_level =
+uint brcm_msg_level =
#if defined(BCMDBG)
- WL_ERROR_VAL;
+ LOG_ERROR_VAL;
#else
- 0;
+ 0;
#endif /* BCMDBG */
/* Find basic rate for a given rate */
-#define WLC_BASIC_RATE(wlc, rspec) (IS_MCS(rspec) ? \
+#define BRCMS_BASIC_RATE(wlc, rspec) (IS_MCS(rspec) ? \
(wlc)->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK].leg_ofdm] : \
(wlc)->band->basic_rate[rspec & RSPEC_RATE_MASK])
@@ -128,7 +190,7 @@ uint wl_msg_level =
#define RFDISABLE_DEFAULT 10000000 /* rfdisable delay timer 500 ms, runs of ALP clock */
-#define WLC_TEMPSENSE_PERIOD 10 /* 10 second timeout */
+#define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */
#define SCAN_IN_PROGRESS(x) 0
@@ -136,31 +198,9 @@ uint wl_msg_level =
#ifdef BCMDBG
/* pointer to most recently allocated wl/wlc */
-static struct wlc_info *wlc_info_dbg = (struct wlc_info *) (NULL);
+static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
#endif
-/* IOVar table */
-
-/* Parameter IDs, for use only internally to wlc -- in the wlc_iovars
- * table and by the wlc_doiovar() function. No ordering is imposed:
- * the table is keyed by name, and the function uses a switch.
- */
-enum {
- IOV_MPC = 1,
- IOV_RTSTHRESH,
- IOV_QTXPOWER,
- IOV_BCN_LI_BCN, /* Beacon listen interval in # of beacons */
- IOV_LAST /* In case of a need to check max ID number */
-};
-
-const bcm_iovar_t wlc_iovars[] = {
- {"mpc", IOV_MPC, (0), IOVT_BOOL, 0},
- {"rtsthresh", IOV_RTSTHRESH, (IOVF_WHL), IOVT_UINT16, 0},
- {"qtxpower", IOV_QTXPOWER, (IOVF_WHL), IOVT_UINT32, 0},
- {"bcn_li_bcn", IOV_BCN_LI_BCN, (0), IOVT_UINT8, 0},
- {NULL, 0, 0, 0, 0}
-};
-
const u8 prio2fifo[NUMPRIO] = {
TX_AC_BE_FIFO, /* 0 BE AC_BE Best Effort */
TX_AC_BK_FIFO, /* 1 BK AC_BK Background */
@@ -177,35 +217,48 @@ const u8 prio2fifo[NUMPRIO] = {
* Odd numbers are used for HI priority traffic at same precedence levels
* These constants are used ONLY by wlc_prio2prec_map. Do not use them elsewhere.
*/
-#define _WLC_PREC_NONE 0 /* None = - */
-#define _WLC_PREC_BK 2 /* BK - Background */
-#define _WLC_PREC_BE 4 /* BE - Best-effort */
-#define _WLC_PREC_EE 6 /* EE - Excellent-effort */
-#define _WLC_PREC_CL 8 /* CL - Controlled Load */
-#define _WLC_PREC_VI 10 /* Vi - Video */
-#define _WLC_PREC_VO 12 /* Vo - Voice */
-#define _WLC_PREC_NC 14 /* NC - Network Control */
+#define _BRCMS_PREC_NONE 0 /* None = - */
+#define _BRCMS_PREC_BK 2 /* BK - Background */
+#define _BRCMS_PREC_BE 4 /* BE - Best-effort */
+#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */
+#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */
+#define _BRCMS_PREC_VI 10 /* Vi - Video */
+#define _BRCMS_PREC_VO 12 /* Vo - Voice */
+#define _BRCMS_PREC_NC 14 /* NC - Network Control */
+
+#define MAXMACLIST 64 /* max # source MAC matches */
+#define BCN_TEMPLATE_COUNT 2
+
+/* The BSS is generating beacons in HW */
+#define BRCMS_BSSCFG_HW_BCN 0x20
+
+#define HWBCN_ENAB(cfg) (((cfg)->flags & BRCMS_BSSCFG_HW_BCN) != 0)
+
+#define MBSS_BCN_ENAB(cfg) 0
+#define MBSS_PRB_ENAB(cfg) 0
+#define SOFTBCN_ENAB(pub) (0)
/* 802.1D Priority to precedence queue mapping */
const u8 wlc_prio2prec_map[] = {
- _WLC_PREC_BE, /* 0 BE - Best-effort */
- _WLC_PREC_BK, /* 1 BK - Background */
- _WLC_PREC_NONE, /* 2 None = - */
- _WLC_PREC_EE, /* 3 EE - Excellent-effort */
- _WLC_PREC_CL, /* 4 CL - Controlled Load */
- _WLC_PREC_VI, /* 5 Vi - Video */
- _WLC_PREC_VO, /* 6 Vo - Voice */
- _WLC_PREC_NC, /* 7 NC - Network Control */
+ _BRCMS_PREC_BE, /* 0 BE - Best-effort */
+ _BRCMS_PREC_BK, /* 1 BK - Background */
+ _BRCMS_PREC_NONE, /* 2 None = - */
+ _BRCMS_PREC_EE, /* 3 EE - Excellent-effort */
+ _BRCMS_PREC_CL, /* 4 CL - Controlled Load */
+ _BRCMS_PREC_VI, /* 5 Vi - Video */
+ _BRCMS_PREC_VO, /* 6 Vo - Voice */
+ _BRCMS_PREC_NC, /* 7 NC - Network Control */
};
-/* Sanity check for tx_prec_map and fifo synchup
- * Either there are some packets pending for the fifo, else if fifo is empty then
- * all the corresponding precmap bits should be set
- */
-#define WLC_TX_FIFO_CHECK(wlc, fifo) (TXPKTPENDGET((wlc), (fifo)) || \
- (TXPKTPENDGET((wlc), (fifo)) == 0 && \
- ((wlc)->tx_prec_map & (wlc)->fifo2prec_map[(fifo)]) == \
- (wlc)->fifo2prec_map[(fifo)]))
+/* Check if a particular BSS config is AP or STA */
+#define BSSCFG_AP(cfg) (0)
+#define BSSCFG_STA(cfg) (1)
+#define BSSCFG_IBSS(cfg) (!(cfg)->BSS)
+
+/* As above for all non-NULL BSS configs */
+#define FOREACH_BSS(wlc, idx, cfg) \
+ for (idx = 0; (int) idx < BRCMS_MAXBSSCFG; idx++) \
+ if ((cfg = (wlc)->bsscfg[idx]))
/* TX FIFO number to WME/802.1E Access Category */
const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE };
@@ -213,13 +266,13 @@ const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE };
/* WME/802.1E Access Category to TX FIFO number */
static const u8 wme_ac2fifo[] = { 1, 0, 2, 3 };
-static bool in_send_q = false;
+static bool in_send_q;
/* Shared memory location index for various AC params */
#define wme_shmemacindex(ac) wme_ac2fifo[ac]
#ifdef BCMDBG
-static const char *fifo_names[] = {
+static const char * const fifo_names[] = {
"AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
#else
static const char fifo_names[6][0];
@@ -233,148 +286,147 @@ static const u8 acbitmap2maxprio[] = {
};
/* currently the best mechanism for determining SIFS is the band in use */
-#define SIFS(band) ((band)->bandtype == WLC_BAND_5G ? APHY_SIFS_TIME : BPHY_SIFS_TIME);
-
-/* value for # replay counters currently supported */
-#define WLC_REPLAY_CNTRS_VALUE WPA_CAP_16_REPLAY_CNTRS
+#define SIFS(band) ((band)->bandtype == BRCM_BAND_5G ? APHY_SIFS_TIME : \
+ BPHY_SIFS_TIME);
/* local prototypes */
-static u16 wlc_d11hdrs_mac80211(struct wlc_info *wlc,
+static u16 brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc,
struct ieee80211_hw *hw,
struct sk_buff *p,
struct scb *scb, uint frag,
uint nfrags, uint queue,
uint next_frag_len,
- wsec_key_t *key,
+ struct wsec_key *key,
ratespec_t rspec_override);
-static void wlc_bss_default_init(struct wlc_info *wlc);
-static void wlc_ucode_mac_upd(struct wlc_info *wlc);
-static ratespec_t mac80211_wlc_set_nrate(struct wlc_info *wlc,
- struct wlcband *cur_band, u32 int_val);
-static void wlc_tx_prec_map_init(struct wlc_info *wlc);
-static void wlc_watchdog(void *arg);
-static void wlc_watchdog_by_timer(void *arg);
-static u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate);
-static int wlc_set_rateset(struct wlc_info *wlc, wlc_rateset_t *rs_arg);
-static int wlc_iovar_rangecheck(struct wlc_info *wlc, u32 val,
- const bcm_iovar_t *vi);
-static u8 wlc_local_constraint_qdbm(struct wlc_info *wlc);
+static void brcms_c_bss_default_init(struct brcms_c_info *wlc);
+static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc);
+static ratespec_t mac80211_wlc_set_nrate(struct brcms_c_info *wlc,
+ struct brcms_band *cur_band,
+ u32 int_val);
+static void brcms_c_tx_prec_map_init(struct brcms_c_info *wlc);
+static void brcms_c_watchdog(void *arg);
+static void brcms_c_watchdog_by_timer(void *arg);
+static u16 brcms_c_rate_shm_offset(struct brcms_c_info *wlc, u8 rate);
+static int brcms_c_set_rateset(struct brcms_c_info *wlc, wlc_rateset_t *rs_arg);
+static u8 brcms_c_local_constraint_qdbm(struct brcms_c_info *wlc);
/* send and receive */
-static struct wlc_txq_info *wlc_txq_alloc(struct wlc_info *wlc);
-static void wlc_txq_free(struct wlc_info *wlc,
- struct wlc_txq_info *qi);
-static void wlc_txflowcontrol_signal(struct wlc_info *wlc,
- struct wlc_txq_info *qi,
+static struct brcms_txq_info *brcms_c_txq_alloc(struct brcms_c_info *wlc);
+static void brcms_c_txq_free(struct brcms_c_info *wlc,
+ struct brcms_txq_info *qi);
+static void brcms_c_txflowcontrol_signal(struct brcms_c_info *wlc,
+ struct brcms_txq_info *qi,
bool on, int prio);
-static void wlc_txflowcontrol_reset(struct wlc_info *wlc);
-static void wlc_compute_cck_plcp(struct wlc_info *wlc, ratespec_t rate,
+static void brcms_c_txflowcontrol_reset(struct brcms_c_info *wlc);
+static void brcms_c_compute_cck_plcp(struct brcms_c_info *wlc, ratespec_t rate,
uint length, u8 *plcp);
-static void wlc_compute_ofdm_plcp(ratespec_t rate, uint length, u8 *plcp);
-static void wlc_compute_mimo_plcp(ratespec_t rate, uint length, u8 *plcp);
-static u16 wlc_compute_frame_dur(struct wlc_info *wlc, ratespec_t rate,
+static void brcms_c_compute_ofdm_plcp(ratespec_t rate, uint length, u8 *plcp);
+static void brcms_c_compute_mimo_plcp(ratespec_t rate, uint length, u8 *plcp);
+static u16 brcms_c_compute_frame_dur(struct brcms_c_info *wlc, ratespec_t rate,
u8 preamble_type, uint next_frag_len);
-static u64 wlc_recover_tsf64(struct wlc_info *wlc, struct wlc_d11rxhdr *rxh);
-static void wlc_recvctl(struct wlc_info *wlc,
- d11rxhdr_t *rxh, struct sk_buff *p);
-static uint wlc_calc_frame_len(struct wlc_info *wlc, ratespec_t rate,
+static u64 brcms_c_recover_tsf64(struct brcms_c_info *wlc,
+ struct brcms_d11rxhdr *rxh);
+static void brcms_c_recvctl(struct brcms_c_info *wlc,
+ struct d11rxhdr *rxh, struct sk_buff *p);
+static uint brcms_c_calc_frame_len(struct brcms_c_info *wlc, ratespec_t rate,
u8 preamble_type, uint dur);
-static uint wlc_calc_ack_time(struct wlc_info *wlc, ratespec_t rate,
+static uint brcms_c_calc_ack_time(struct brcms_c_info *wlc, ratespec_t rate,
u8 preamble_type);
-static uint wlc_calc_cts_time(struct wlc_info *wlc, ratespec_t rate,
+static uint brcms_c_calc_cts_time(struct brcms_c_info *wlc, ratespec_t rate,
u8 preamble_type);
/* interrupt, up/down, band */
-static void wlc_setband(struct wlc_info *wlc, uint bandunit);
-static chanspec_t wlc_init_chanspec(struct wlc_info *wlc);
-static void wlc_bandinit_ordered(struct wlc_info *wlc, chanspec_t chanspec);
-static void wlc_bsinit(struct wlc_info *wlc);
-static int wlc_duty_cycle_set(struct wlc_info *wlc, int duty_cycle, bool isOFDM,
- bool writeToShm);
-static void wlc_radio_hwdisable_upd(struct wlc_info *wlc);
-static bool wlc_radio_monitor_start(struct wlc_info *wlc);
-static void wlc_radio_timer(void *arg);
-static void wlc_radio_enable(struct wlc_info *wlc);
-static void wlc_radio_upd(struct wlc_info *wlc);
+static void brcms_c_setband(struct brcms_c_info *wlc, uint bandunit);
+static chanspec_t brcms_c_init_chanspec(struct brcms_c_info *wlc);
+static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
+ chanspec_t chanspec);
+static void brcms_c_bsinit(struct brcms_c_info *wlc);
+static int brcms_c_duty_cycle_set(struct brcms_c_info *wlc, int duty_cycle,
+ bool isOFDM, bool writeToShm);
+static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc);
+static bool brcms_c_radio_monitor_start(struct brcms_c_info *wlc);
+static void brcms_c_radio_timer(void *arg);
+static void brcms_c_radio_enable(struct brcms_c_info *wlc);
+static void brcms_c_radio_upd(struct brcms_c_info *wlc);
/* scan, association, BSS */
-static uint wlc_calc_ba_time(struct wlc_info *wlc, ratespec_t rate,
+static uint brcms_c_calc_ba_time(struct brcms_c_info *wlc, ratespec_t rate,
u8 preamble_type);
-static void wlc_update_mimo_band_bwcap(struct wlc_info *wlc, u8 bwcap);
-static void wlc_ht_update_sgi_rx(struct wlc_info *wlc, int val);
-static void wlc_ht_update_ldpc(struct wlc_info *wlc, s8 val);
-static void wlc_war16165(struct wlc_info *wlc, bool tx);
-
-static void wlc_wme_retries_write(struct wlc_info *wlc);
-static bool wlc_attach_stf_ant_init(struct wlc_info *wlc);
-static uint wlc_attach_module(struct wlc_info *wlc);
-static void wlc_detach_module(struct wlc_info *wlc);
-static void wlc_timers_deinit(struct wlc_info *wlc);
-static void wlc_down_led_upd(struct wlc_info *wlc);
-static uint wlc_down_del_timer(struct wlc_info *wlc);
-static void wlc_ofdm_rateset_war(struct wlc_info *wlc);
-static int _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
- struct wlc_if *wlcif);
+static void brcms_c_update_mimo_band_bwcap(struct brcms_c_info *wlc, u8 bwcap);
+static void brcms_c_ht_update_sgi_rx(struct brcms_c_info *wlc, int val);
+static void brcms_c_ht_update_ldpc(struct brcms_c_info *wlc, s8 val);
+static void brcms_c_war16165(struct brcms_c_info *wlc, bool tx);
+
+static void brcms_c_wme_retries_write(struct brcms_c_info *wlc);
+static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc);
+static uint brcms_c_attach_module(struct brcms_c_info *wlc);
+static void brcms_c_detach_module(struct brcms_c_info *wlc);
+static void brcms_c_timers_deinit(struct brcms_c_info *wlc);
+static void brcms_c_down_led_upd(struct brcms_c_info *wlc);
+static uint brcms_c_down_del_timer(struct brcms_c_info *wlc);
+static void brcms_c_ofdm_rateset_war(struct brcms_c_info *wlc);
+static int _brcms_c_ioctl(struct brcms_c_info *wlc, int cmd, void *arg, int len,
+ struct brcms_c_if *wlcif);
/* conditions under which the PM bit should be set in outgoing frames and STAY_AWAKE is meaningful
*/
-bool wlc_ps_allowed(struct wlc_info *wlc)
+bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
{
int idx;
- struct wlc_bsscfg *cfg;
+ struct brcms_bss_cfg *cfg;
/* disallow PS when one of the following global conditions meets */
- if (!wlc->pub->associated || !wlc->PMenabled || wlc->PM_override)
+ if (!wlc->pub->associated)
return false;
/* disallow PS when one of these meets when not scanning */
- if (!wlc->PMblocked) {
- if (AP_ACTIVE(wlc) || wlc->monitor)
- return false;
- }
+ if (AP_ACTIVE(wlc) || wlc->monitor)
+ return false;
- FOREACH_AS_STA(wlc, idx, cfg) {
- /* disallow PS when one of the following bsscfg specific conditions meets */
- if (!cfg->BSS || !WLC_PORTOPEN(cfg))
- return false;
+ for (idx = 0; idx < BRCMS_MAXBSSCFG; idx++) {
+ cfg = wlc->bsscfg[idx];
+ if (cfg && BSSCFG_STA(cfg) && cfg->associated) {
+ /*
+ * disallow PS when one of the following
+ * bsscfg specific conditions meets
+ */
+ if (!cfg->BSS || !BRCMS_PORTOPEN(cfg))
+ return false;
- if (!cfg->dtim_programmed)
- return false;
+ if (!cfg->dtim_programmed)
+ return false;
+ }
}
return true;
}
-void wlc_reset(struct wlc_info *wlc)
+void brcms_c_reset(struct brcms_c_info *wlc)
{
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
- wlc->check_for_unaligned_tbtt = false;
-
/* slurp up hw mac counters before core reset */
- wlc_statsupd(wlc);
+ brcms_c_statsupd(wlc);
/* reset our snapshot of macstat counters */
memset((char *)wlc->core->macstat_snapshot, 0,
- sizeof(macstat_t));
-
- wlc_bmac_reset(wlc->hw);
- wlc->txretried = 0;
+ sizeof(struct macstat));
+ brcms_b_reset(wlc->hw);
}
-void wlc_fatal_error(struct wlc_info *wlc)
+void brcms_c_fatal_error(struct brcms_c_info *wlc)
{
wiphy_err(wlc->wiphy, "wl%d: fatal error, reinitializing\n",
wlc->pub->unit);
- wl_init(wlc->wl);
+ brcms_init(wlc->wl);
}
-/* Return the channel the driver should initialize during wlc_init.
+/* Return the channel the driver should initialize during brcms_c_init.
* the channel may have to be changed from the currently configured channel
* if other configurations are in conflict (bandlocked, 11n mode disabled,
* invalid channel for current country, etc.)
*/
-static chanspec_t wlc_init_chanspec(struct wlc_info *wlc)
+static chanspec_t brcms_c_init_chanspec(struct brcms_c_info *wlc)
{
chanspec_t chanspec =
1 | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE |
@@ -385,7 +437,7 @@ static chanspec_t wlc_init_chanspec(struct wlc_info *wlc)
struct scb global_scb;
-static void wlc_init_scb(struct wlc_info *wlc, struct scb *scb)
+static void brcms_c_init_scb(struct brcms_c_info *wlc, struct scb *scb)
{
int i;
scb->flags = SCB_WMECAP | SCB_HTCAP;
@@ -393,12 +445,12 @@ static void wlc_init_scb(struct wlc_info *wlc, struct scb *scb)
scb->seqnum[i] = 0;
}
-void wlc_init(struct wlc_info *wlc)
+void brcms_c_init(struct brcms_c_info *wlc)
{
d11regs_t *regs;
chanspec_t chanspec;
int i;
- struct wlc_bsscfg *bsscfg;
+ struct brcms_bss_cfg *bsscfg;
bool mute = false;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
@@ -411,27 +463,20 @@ void wlc_init(struct wlc_info *wlc)
if (wlc->pub->associated)
chanspec = wlc->home_chanspec;
else
- chanspec = wlc_init_chanspec(wlc);
-
- wlc_bmac_init(wlc->hw, chanspec, mute);
+ chanspec = brcms_c_init_chanspec(wlc);
- wlc->seckeys = wlc_bmac_read_shm(wlc->hw, M_SECRXKEYS_PTR) * 2;
- if (wlc->machwcap & MCAP_TKIPMIC)
- wlc->tkmickeys =
- wlc_bmac_read_shm(wlc->hw, M_TKMICKEYS_PTR) * 2;
+ brcms_b_init(wlc->hw, chanspec, mute);
/* update beacon listen interval */
- wlc_bcn_li_upd(wlc);
- wlc->bcn_wait_prd =
- (u8) (wlc_bmac_read_shm(wlc->hw, M_NOSLPZNATDTIM) >> 10);
+ brcms_c_bcn_li_upd(wlc);
/* the world is new again, so is our reported rate */
- wlc_reprate_init(wlc);
+ brcms_c_reprate_init(wlc);
/* write ethernet address to core */
FOREACH_BSS(wlc, i, bsscfg) {
- wlc_set_mac(bsscfg);
- wlc_set_bssid(bsscfg);
+ brcms_c_set_mac(bsscfg);
+ brcms_c_set_bssid(bsscfg);
}
/* Update tsf_cfprep if associated and up */
@@ -450,58 +495,56 @@ void wlc_init(struct wlc_info *wlc)
(bi << CFPREP_CBI_SHIFT));
/* Update maccontrol PM related bits */
- wlc_set_ps_ctrl(wlc);
+ brcms_c_set_ps_ctrl(wlc);
break;
}
}
}
- wlc_key_hw_init_all(wlc);
+ brcms_c_bandinit_ordered(wlc, chanspec);
- wlc_bandinit_ordered(wlc, chanspec);
-
- wlc_init_scb(wlc, &global_scb);
+ brcms_c_init_scb(wlc, &global_scb);
/* init probe response timeout */
- wlc_write_shm(wlc, M_PRS_MAXTIME, wlc->prb_resp_timeout);
+ brcms_c_write_shm(wlc, M_PRS_MAXTIME, wlc->prb_resp_timeout);
/* init max burst txop (framebursting) */
- wlc_write_shm(wlc, M_MBURST_TXOP,
+ brcms_c_write_shm(wlc, M_MBURST_TXOP,
(wlc->
_rifs ? (EDCF_AC_VO_TXOP_AP << 5) : MAXFRAMEBURST_TXOP));
/* initialize maximum allowed duty cycle */
- wlc_duty_cycle_set(wlc, wlc->tx_duty_cycle_ofdm, true, true);
- wlc_duty_cycle_set(wlc, wlc->tx_duty_cycle_cck, false, true);
+ brcms_c_duty_cycle_set(wlc, wlc->tx_duty_cycle_ofdm, true, true);
+ brcms_c_duty_cycle_set(wlc, wlc->tx_duty_cycle_cck, false, true);
/* Update some shared memory locations related to max AMPDU size allowed to received */
- wlc_ampdu_shm_upd(wlc->ampdu);
+ brcms_c_ampdu_shm_upd(wlc->ampdu);
/* band-specific inits */
- wlc_bsinit(wlc);
+ brcms_c_bsinit(wlc);
/* Enable EDCF mode (while the MAC is suspended) */
if (EDCF_ENAB(wlc->pub)) {
OR_REG(&regs->ifs_ctl, IFS_USEEDCF);
- wlc_edcf_setparams(wlc, false);
+ brcms_c_edcf_setparams(wlc, false);
}
/* Init precedence maps for empty FIFOs */
- wlc_tx_prec_map_init(wlc);
+ brcms_c_tx_prec_map_init(wlc);
/* read the ucode version if we have not yet done so */
if (wlc->ucode_rev == 0) {
wlc->ucode_rev =
- wlc_read_shm(wlc, M_BOM_REV_MAJOR) << NBITS(u16);
- wlc->ucode_rev |= wlc_read_shm(wlc, M_BOM_REV_MINOR);
+ brcms_c_read_shm(wlc, M_BOM_REV_MAJOR) << NBITS(u16);
+ wlc->ucode_rev |= brcms_c_read_shm(wlc, M_BOM_REV_MINOR);
}
/* ..now really unleash hell (allow the MAC out of suspend) */
- wlc_enable_mac(wlc);
+ brcms_c_enable_mac(wlc);
/* clear tx flow control */
- wlc_txflowcontrol_reset(wlc);
+ brcms_c_txflowcontrol_reset(wlc);
/* clear tx data fifo suspends */
wlc->tx_suspended = false;
@@ -510,39 +553,40 @@ void wlc_init(struct wlc_info *wlc)
W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT);
/* initialize mpc delay */
- wlc->mpc_delay_off = wlc->mpc_dlycnt = WLC_MPC_MIN_DELAYCNT;
+ wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
/*
* Initialize WME parameters; if they haven't been set by some other
* mechanism (IOVar, etc) then read them from the hardware.
*/
- if (WLC_WME_RETRY_SHORT_GET(wlc, 0) == 0) { /* Uninitialized; read from HW */
+ if (BRCMS_WME_RETRY_SHORT_GET(wlc, 0) == 0) {
+ /* Uninitialized; read from HW */
int ac;
for (ac = 0; ac < AC_COUNT; ac++) {
wlc->wme_retries[ac] =
- wlc_read_shm(wlc, M_AC_TXLMT_ADDR(ac));
+ brcms_c_read_shm(wlc, M_AC_TXLMT_ADDR(ac));
}
}
}
-void wlc_mac_bcn_promisc_change(struct wlc_info *wlc, bool promisc)
+void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
{
wlc->bcnmisc_monitor = promisc;
- wlc_mac_bcn_promisc(wlc);
+ brcms_c_mac_bcn_promisc(wlc);
}
-void wlc_mac_bcn_promisc(struct wlc_info *wlc)
+void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc)
{
if ((AP_ENAB(wlc->pub) && (N_ENAB(wlc->pub) || wlc->band->gmode)) ||
wlc->bcnmisc_ibss || wlc->bcnmisc_scan || wlc->bcnmisc_monitor)
- wlc_mctrl(wlc, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC);
+ brcms_c_mctrl(wlc, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC);
else
- wlc_mctrl(wlc, MCTL_BCNS_PROMISC, 0);
+ brcms_c_mctrl(wlc, MCTL_BCNS_PROMISC, 0);
}
/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */
-void wlc_mac_promisc(struct wlc_info *wlc)
+void brcms_c_mac_promisc(struct brcms_c_info *wlc)
{
u32 promisc_bits = 0;
@@ -550,21 +594,21 @@ void wlc_mac_promisc(struct wlc_info *wlc)
* Note: APs get all BSS traffic without the need to set the MCTL_PROMISC bit
* since all BSS data traffic is directed at the AP
*/
- if (PROMISC_ENAB(wlc->pub) && !AP_ENAB(wlc->pub) && !wlc->wet)
+ if (PROMISC_ENAB(wlc->pub) && !AP_ENAB(wlc->pub))
promisc_bits |= MCTL_PROMISC;
/* monitor mode needs both MCTL_PROMISC and MCTL_KEEPCONTROL
* Note: monitor mode also needs MCTL_BCNS_PROMISC, but that is
- * handled in wlc_mac_bcn_promisc()
+ * handled in brcms_c_mac_bcn_promisc()
*/
if (MONITOR_ENAB(wlc))
promisc_bits |= MCTL_PROMISC | MCTL_KEEPCONTROL;
- wlc_mctrl(wlc, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits);
+ brcms_c_mctrl(wlc, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits);
}
/* push sw hps and wake state through hardware */
-void wlc_set_ps_ctrl(struct wlc_info *wlc)
+void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
{
u32 v1, v2;
bool hps;
@@ -579,12 +623,12 @@ void wlc_set_ps_ctrl(struct wlc_info *wlc)
if (hps)
v2 |= MCTL_HPS;
- wlc_mctrl(wlc, MCTL_WAKE | MCTL_HPS, v2);
+ brcms_c_mctrl(wlc, MCTL_WAKE | MCTL_HPS, v2);
awake_before = ((v1 & MCTL_WAKE) || ((v1 & MCTL_HPS) == 0));
if (!awake_before)
- wlc_bmac_wait_for_wake(wlc->hw);
+ brcms_b_wait_for_wake(wlc->hw);
}
@@ -592,17 +636,17 @@ void wlc_set_ps_ctrl(struct wlc_info *wlc)
* Write this BSS config's MAC address to core.
* Updates RXE match engine.
*/
-int wlc_set_mac(struct wlc_bsscfg *cfg)
+int brcms_c_set_mac(struct brcms_bss_cfg *cfg)
{
int err = 0;
- struct wlc_info *wlc = cfg->wlc;
+ struct brcms_c_info *wlc = cfg->wlc;
if (cfg == wlc->cfg) {
/* enter the MAC addr into the RXE match registers */
- wlc_set_addrmatch(wlc, RCM_MAC_OFFSET, cfg->cur_etheraddr);
+ brcms_c_set_addrmatch(wlc, RCM_MAC_OFFSET, cfg->cur_etheraddr);
}
- wlc_ampdu_macaddr_upd(wlc);
+ brcms_c_ampdu_macaddr_upd(wlc);
return err;
}
@@ -610,17 +654,17 @@ int wlc_set_mac(struct wlc_bsscfg *cfg)
/* Write the BSS config's BSSID address to core (set_bssid in d11procs.tcl).
* Updates RXE match engine.
*/
-void wlc_set_bssid(struct wlc_bsscfg *cfg)
+void brcms_c_set_bssid(struct brcms_bss_cfg *cfg)
{
- struct wlc_info *wlc = cfg->wlc;
+ struct brcms_c_info *wlc = cfg->wlc;
/* if primary config, we need to update BSSID in RXE match registers */
if (cfg == wlc->cfg) {
- wlc_set_addrmatch(wlc, RCM_BSSID_OFFSET, cfg->BSSID);
+ brcms_c_set_addrmatch(wlc, RCM_BSSID_OFFSET, cfg->BSSID);
}
#ifdef SUPPORT_HWKEYS
else if (BSSCFG_STA(cfg) && cfg->BSS) {
- wlc_rcmta_add_bssid(wlc, cfg);
+ brcms_c_rcmta_add_bssid(wlc, cfg);
}
#endif
}
@@ -629,14 +673,14 @@ void wlc_set_bssid(struct wlc_bsscfg *cfg)
* Suspend the the MAC and update the slot timing
* for standard 11b/g (20us slots) or shortslot 11g (9us slots).
*/
-void wlc_switch_shortslot(struct wlc_info *wlc, bool shortslot)
+void brcms_c_switch_shortslot(struct brcms_c_info *wlc, bool shortslot)
{
int idx;
- struct wlc_bsscfg *cfg;
+ struct brcms_bss_cfg *cfg;
/* use the override if it is set */
- if (wlc->shortslot_override != WLC_SHORTSLOT_AUTO)
- shortslot = (wlc->shortslot_override == WLC_SHORTSLOT_ON);
+ if (wlc->shortslot_override != BRCMS_SHORTSLOT_AUTO)
+ shortslot = (wlc->shortslot_override == BRCMS_SHORTSLOT_ON);
if (wlc->shortslot == shortslot)
return;
@@ -654,27 +698,28 @@ void wlc_switch_shortslot(struct wlc_info *wlc, bool shortslot)
WLAN_CAPABILITY_SHORT_SLOT_TIME;
}
- wlc_bmac_set_shortslot(wlc->hw, shortslot);
+ brcms_b_set_shortslot(wlc->hw, shortslot);
}
-static u8 wlc_local_constraint_qdbm(struct wlc_info *wlc)
+static u8 brcms_c_local_constraint_qdbm(struct brcms_c_info *wlc)
{
u8 local;
s16 local_max;
- local = WLC_TXPWR_MAX;
+ local = BRCMS_TXPWR_MAX;
if (wlc->pub->associated &&
- (bcm_chspec_ctlchan(wlc->chanspec) ==
- bcm_chspec_ctlchan(wlc->home_chanspec))) {
+ (brcmu_chspec_ctlchan(wlc->chanspec) ==
+ brcmu_chspec_ctlchan(wlc->home_chanspec))) {
/* get the local power constraint if we are on the AP's
* channel [802.11h, 7.3.2.13]
*/
- /* Clamp the value between 0 and WLC_TXPWR_MAX w/o overflowing the target */
+ /* Clamp the value between 0 and BRCMS_TXPWR_MAX w/o
+ * overflowing the target */
local_max =
(wlc->txpwr_local_max -
- wlc->txpwr_local_constraint) * WLC_TXPWR_DB_FACTOR;
- if (local_max > 0 && local_max < WLC_TXPWR_MAX)
+ wlc->txpwr_local_constraint) * BRCMS_TXPWR_DB_FACTOR;
+ if (local_max > 0 && local_max < BRCMS_TXPWR_MAX)
return (u8) local_max;
if (local_max < 0)
return 0;
@@ -684,11 +729,11 @@ static u8 wlc_local_constraint_qdbm(struct wlc_info *wlc)
}
/* propagate home chanspec to all bsscfgs in case bsscfg->current_bss->chanspec is referenced */
-void wlc_set_home_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
+void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, chanspec_t chanspec)
{
if (wlc->home_chanspec != chanspec) {
int idx;
- struct wlc_bsscfg *cfg;
+ struct brcms_bss_cfg *cfg;
wlc->home_chanspec = chanspec;
@@ -702,7 +747,8 @@ void wlc_set_home_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
}
}
-static void wlc_set_phy_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
+static void brcms_c_set_phy_chanspec(struct brcms_c_info *wlc,
+ chanspec_t chanspec)
{
/* Save our copy of the chanspec */
wlc->chanspec = chanspec;
@@ -710,24 +756,24 @@ static void wlc_set_phy_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
/* Set the chanspec and power limits for this locale after computing
* any 11h local tx power constraints.
*/
- wlc_channel_set_chanspec(wlc->cmi, chanspec,
- wlc_local_constraint_qdbm(wlc));
+ brcms_c_channel_set_chanspec(wlc->cmi, chanspec,
+ brcms_c_local_constraint_qdbm(wlc));
if (wlc->stf->ss_algosel_auto)
- wlc_stf_ss_algo_channel_get(wlc, &wlc->stf->ss_algo_channel,
+ brcms_c_stf_ss_algo_channel_get(wlc, &wlc->stf->ss_algo_channel,
chanspec);
- wlc_stf_ss_update(wlc, wlc->band);
+ brcms_c_stf_ss_update(wlc, wlc->band);
}
-void wlc_set_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
+void brcms_c_set_chanspec(struct brcms_c_info *wlc, chanspec_t chanspec)
{
uint bandunit;
bool switchband = false;
chanspec_t old_chanspec = wlc->chanspec;
- if (!wlc_valid_chanspec_db(wlc->cmi, chanspec)) {
+ if (!brcms_c_valid_chanspec_db(wlc->cmi, chanspec)) {
wiphy_err(wlc->wiphy, "wl%d: %s: Bad channel %d\n",
wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec));
return;
@@ -735,7 +781,7 @@ void wlc_set_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
/* Switch bands if necessary */
if (NBANDS(wlc) > 1) {
- bandunit = CHSPEC_WLCBANDUNIT(chanspec);
+ bandunit = CHSPEC_BANDUNIT(chanspec);
if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) {
switchband = true;
if (wlc->bandlocked) {
@@ -745,263 +791,49 @@ void wlc_set_chanspec(struct wlc_info *wlc, chanspec_t chanspec)
CHSPEC_CHANNEL(chanspec));
return;
}
- /* BMAC_NOTE: should the setband call come after the wlc_bmac_chanspec() ?
- * if the setband updates (wlc_bsinit) use low level calls to inspect and
- * set state, the state inspected may be from the wrong band, or the
- * following wlc_bmac_set_chanspec() may undo the work.
+ /*
+ * should the setband call come after the
+ * brcms_b_chanspec() ? if the setband updates
+ * (brcms_c_bsinit) use low level calls to inspect and
+ * set state, the state inspected may be from the wrong
+ * band, or the following brcms_b_set_chanspec() may
+ * undo the work.
*/
- wlc_setband(wlc, bandunit);
+ brcms_c_setband(wlc, bandunit);
}
}
/* sync up phy/radio chanspec */
- wlc_set_phy_chanspec(wlc, chanspec);
+ brcms_c_set_phy_chanspec(wlc, chanspec);
/* init antenna selection */
if (CHSPEC_WLC_BW(old_chanspec) != CHSPEC_WLC_BW(chanspec)) {
- wlc_antsel_init(wlc->asi);
+ brcms_c_antsel_init(wlc->asi);
/* Fix the hardware rateset based on bw.
* Mainly add MCS32 for 40Mhz, remove MCS 32 for 20Mhz
*/
- wlc_rateset_bw_mcs_filter(&wlc->band->hw_rateset,
+ brcms_c_rateset_bw_mcs_filter(&wlc->band->hw_rateset,
wlc->band->
mimo_cap_40 ? CHSPEC_WLC_BW(chanspec)
: 0);
}
/* update some mac configuration since chanspec changed */
- wlc_ucode_mac_upd(wlc);
-}
-
-#if defined(BCMDBG)
-static int wlc_get_current_txpwr(struct wlc_info *wlc, void *pwr, uint len)
-{
- txpwr_limits_t txpwr;
- tx_power_t power;
- tx_power_legacy_t *old_power = NULL;
- int r, c;
- uint qdbm;
- bool override;
-
- if (len == sizeof(tx_power_legacy_t))
- old_power = (tx_power_legacy_t *) pwr;
- else if (len < sizeof(tx_power_t))
- return -EOVERFLOW;
-
- memset(&power, 0, sizeof(tx_power_t));
-
- power.chanspec = WLC_BAND_PI_RADIO_CHANSPEC;
- if (wlc->pub->associated)
- power.local_chanspec = wlc->home_chanspec;
-
- /* Return the user target tx power limits for the various rates. Note wlc_phy.c's
- * public interface only implements getting and setting a single value for all of
- * rates, so we need to fill the array ourselves.
- */
- wlc_phy_txpower_get(wlc->band->pi, &qdbm, &override);
- for (r = 0; r < WL_TX_POWER_RATES; r++) {
- power.user_limit[r] = (u8) qdbm;
- }
-
- power.local_max = wlc->txpwr_local_max * WLC_TXPWR_DB_FACTOR;
- power.local_constraint =
- wlc->txpwr_local_constraint * WLC_TXPWR_DB_FACTOR;
-
- power.antgain[0] = wlc->bandstate[BAND_2G_INDEX]->antgain;
- power.antgain[1] = wlc->bandstate[BAND_5G_INDEX]->antgain;
-
- wlc_channel_reg_limits(wlc->cmi, power.chanspec, &txpwr);
-
-#if WL_TX_POWER_CCK_NUM != WLC_NUM_RATES_CCK
-#error "WL_TX_POWER_CCK_NUM != WLC_NUM_RATES_CCK"
-#endif
-
- /* CCK tx power limits */
- for (c = 0, r = WL_TX_POWER_CCK_FIRST; c < WL_TX_POWER_CCK_NUM;
- c++, r++)
- power.reg_limit[r] = txpwr.cck[c];
-
-#if WL_TX_POWER_OFDM_NUM != WLC_NUM_RATES_OFDM
-#error "WL_TX_POWER_OFDM_NUM != WLC_NUM_RATES_OFDM"
-#endif
-
- /* 20 MHz OFDM SISO tx power limits */
- for (c = 0, r = WL_TX_POWER_OFDM_FIRST; c < WL_TX_POWER_OFDM_NUM;
- c++, r++)
- power.reg_limit[r] = txpwr.ofdm[c];
-
- if (WLC_PHY_11N_CAP(wlc->band)) {
-
- /* 20 MHz OFDM CDD tx power limits */
- for (c = 0, r = WL_TX_POWER_OFDM20_CDD_FIRST;
- c < WL_TX_POWER_OFDM_NUM; c++, r++)
- power.reg_limit[r] = txpwr.ofdm_cdd[c];
-
- /* 40 MHz OFDM SISO tx power limits */
- for (c = 0, r = WL_TX_POWER_OFDM40_SISO_FIRST;
- c < WL_TX_POWER_OFDM_NUM; c++, r++)
- power.reg_limit[r] = txpwr.ofdm_40_siso[c];
-
- /* 40 MHz OFDM CDD tx power limits */
- for (c = 0, r = WL_TX_POWER_OFDM40_CDD_FIRST;
- c < WL_TX_POWER_OFDM_NUM; c++, r++)
- power.reg_limit[r] = txpwr.ofdm_40_cdd[c];
-
-#if WL_TX_POWER_MCS_1_STREAM_NUM != WLC_NUM_RATES_MCS_1_STREAM
-#error "WL_TX_POWER_MCS_1_STREAM_NUM != WLC_NUM_RATES_MCS_1_STREAM"
-#endif
-
- /* 20MHz MCS0-7 SISO tx power limits */
- for (c = 0, r = WL_TX_POWER_MCS20_SISO_FIRST;
- c < WLC_NUM_RATES_MCS_1_STREAM; c++, r++)
- power.reg_limit[r] = txpwr.mcs_20_siso[c];
-
- /* 20MHz MCS0-7 CDD tx power limits */
- for (c = 0, r = WL_TX_POWER_MCS20_CDD_FIRST;
- c < WLC_NUM_RATES_MCS_1_STREAM; c++, r++)
- power.reg_limit[r] = txpwr.mcs_20_cdd[c];
-
- /* 20MHz MCS0-7 STBC tx power limits */
- for (c = 0, r = WL_TX_POWER_MCS20_STBC_FIRST;
- c < WLC_NUM_RATES_MCS_1_STREAM; c++, r++)
- power.reg_limit[r] = txpwr.mcs_20_stbc[c];
-
- /* 40MHz MCS0-7 SISO tx power limits */
- for (c = 0, r = WL_TX_POWER_MCS40_SISO_FIRST;
- c < WLC_NUM_RATES_MCS_1_STREAM; c++, r++)
- power.reg_limit[r] = txpwr.mcs_40_siso[c];
-
- /* 40MHz MCS0-7 CDD tx power limits */
- for (c = 0, r = WL_TX_POWER_MCS40_CDD_FIRST;
- c < WLC_NUM_RATES_MCS_1_STREAM; c++, r++)
- power.reg_limit[r] = txpwr.mcs_40_cdd[c];
-
- /* 40MHz MCS0-7 STBC tx power limits */
- for (c = 0, r = WL_TX_POWER_MCS40_STBC_FIRST;
- c < WLC_NUM_RATES_MCS_1_STREAM; c++, r++)
- power.reg_limit[r] = txpwr.mcs_40_stbc[c];
-
-#if WL_TX_POWER_MCS_2_STREAM_NUM != WLC_NUM_RATES_MCS_2_STREAM
-#error "WL_TX_POWER_MCS_2_STREAM_NUM != WLC_NUM_RATES_MCS_2_STREAM"
-#endif
-
- /* 20MHz MCS8-15 SDM tx power limits */
- for (c = 0, r = WL_TX_POWER_MCS20_SDM_FIRST;
- c < WLC_NUM_RATES_MCS_2_STREAM; c++, r++)
- power.reg_limit[r] = txpwr.mcs_20_mimo[c];
-
- /* 40MHz MCS8-15 SDM tx power limits */
- for (c = 0, r = WL_TX_POWER_MCS40_SDM_FIRST;
- c < WLC_NUM_RATES_MCS_2_STREAM; c++, r++)
- power.reg_limit[r] = txpwr.mcs_40_mimo[c];
-
- /* MCS 32 */
- power.reg_limit[WL_TX_POWER_MCS_32] = txpwr.mcs32;
- }
-
- wlc_phy_txpower_get_current(wlc->band->pi, &power,
- CHSPEC_CHANNEL(power.chanspec));
-
- /* copy the tx_power_t struct to the return buffer,
- * or convert to a tx_power_legacy_t struct
- */
- if (!old_power) {
- memcpy(pwr, &power, sizeof(tx_power_t));
- } else {
- int band_idx = CHSPEC_IS2G(power.chanspec) ? 0 : 1;
-
- memset(old_power, 0, sizeof(tx_power_legacy_t));
-
- old_power->txpwr_local_max = power.local_max;
- old_power->txpwr_local_constraint = power.local_constraint;
- if (CHSPEC_IS2G(power.chanspec)) {
- old_power->txpwr_chan_reg_max = txpwr.cck[0];
- old_power->txpwr_est_Pout[band_idx] =
- power.est_Pout_cck;
- old_power->txpwr_est_Pout_gofdm = power.est_Pout[0];
- } else {
- old_power->txpwr_chan_reg_max = txpwr.ofdm[0];
- old_power->txpwr_est_Pout[band_idx] = power.est_Pout[0];
- }
- old_power->txpwr_antgain[0] = power.antgain[0];
- old_power->txpwr_antgain[1] = power.antgain[1];
-
- for (r = 0; r < NUM_PWRCTRL_RATES; r++) {
- old_power->txpwr_band_max[r] = power.user_limit[r];
- old_power->txpwr_limit[r] = power.reg_limit[r];
- old_power->txpwr_target[band_idx][r] = power.target[r];
- if (CHSPEC_IS2G(power.chanspec))
- old_power->txpwr_bphy_cck_max[r] =
- power.board_limit[r];
- else
- old_power->txpwr_aphy_max[r] =
- power.board_limit[r];
- }
- }
-
- return 0;
-}
-#endif /* defined(BCMDBG) */
-
-static u32 wlc_watchdog_backup_bi(struct wlc_info *wlc)
-{
- u32 bi;
- bi = 2 * wlc->cfg->current_bss->dtim_period *
- wlc->cfg->current_bss->beacon_period;
- if (wlc->bcn_li_dtim)
- bi *= wlc->bcn_li_dtim;
- else if (wlc->bcn_li_bcn)
- /* recalculate bi based on bcn_li_bcn */
- bi = 2 * wlc->bcn_li_bcn * wlc->cfg->current_bss->beacon_period;
-
- if (bi < 2 * TIMER_INTERVAL_WATCHDOG)
- bi = 2 * TIMER_INTERVAL_WATCHDOG;
- return bi;
-}
-
-/* Change to run the watchdog either from a periodic timer or from tbtt handler.
- * Call watchdog from tbtt handler if tbtt is true, watchdog timer otherwise.
- */
-void wlc_watchdog_upd(struct wlc_info *wlc, bool tbtt)
-{
- /* make sure changing watchdog driver is allowed */
- if (!wlc->pub->up || !wlc->pub->align_wd_tbtt)
- return;
- if (!tbtt && wlc->WDarmed) {
- wl_del_timer(wlc->wl, wlc->wdtimer);
- wlc->WDarmed = false;
- }
-
- /* stop watchdog timer and use tbtt interrupt to drive watchdog */
- if (tbtt && wlc->WDarmed) {
- wl_del_timer(wlc->wl, wlc->wdtimer);
- wlc->WDarmed = false;
- wlc->WDlast = OSL_SYSUPTIME();
- }
- /* arm watchdog timer and drive the watchdog there */
- else if (!tbtt && !wlc->WDarmed) {
- wl_add_timer(wlc->wl, wlc->wdtimer, TIMER_INTERVAL_WATCHDOG,
- true);
- wlc->WDarmed = true;
- }
- if (tbtt && !wlc->WDarmed) {
- wl_add_timer(wlc->wl, wlc->wdtimer, wlc_watchdog_backup_bi(wlc),
- true);
- wlc->WDarmed = true;
- }
+ brcms_c_ucode_mac_upd(wlc);
}
-ratespec_t wlc_lowest_basic_rspec(struct wlc_info *wlc, wlc_rateset_t *rs)
+ratespec_t brcms_c_lowest_basic_rspec(struct brcms_c_info *wlc,
+ wlc_rateset_t *rs)
{
ratespec_t lowest_basic_rspec;
uint i;
/* Use the lowest basic rate */
- lowest_basic_rspec = rs->rates[0] & WLC_RATE_MASK;
+ lowest_basic_rspec = rs->rates[0] & BRCMS_RATE_MASK;
for (i = 0; i < rs->count; i++) {
- if (rs->rates[i] & WLC_RATE_FLAG) {
- lowest_basic_rspec = rs->rates[i] & WLC_RATE_MASK;
+ if (rs->rates[i] & BRCMS_RATE_FLAG) {
+ lowest_basic_rspec = rs->rates[i] & BRCMS_RATE_MASK;
break;
}
}
@@ -1018,63 +850,63 @@ ratespec_t wlc_lowest_basic_rspec(struct wlc_info *wlc, wlc_rateset_t *rs)
/* This function changes the phytxctl for beacon based on current beacon ratespec AND txant
* setting as per this table:
* ratespec CCK ant = wlc->stf->txant
- * OFDM ant = 3
+ * OFDM ant = 3
*/
-void wlc_beacon_phytxctl_txant_upd(struct wlc_info *wlc, ratespec_t bcn_rspec)
+void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
+ ratespec_t bcn_rspec)
{
u16 phyctl;
u16 phytxant = wlc->stf->phytxant;
u16 mask = PHY_TXC_ANT_MASK;
/* for non-siso rates or default setting, use the available chains */
- if (WLC_PHY_11N_CAP(wlc->band)) {
- phytxant = wlc_stf_phytxchain_sel(wlc, bcn_rspec);
- }
+ if (BRCMS_PHY_11N_CAP(wlc->band))
+ phytxant = brcms_c_stf_phytxchain_sel(wlc, bcn_rspec);
- phyctl = wlc_read_shm(wlc, M_BCN_PCTLWD);
+ phyctl = brcms_c_read_shm(wlc, M_BCN_PCTLWD);
phyctl = (phyctl & ~mask) | phytxant;
- wlc_write_shm(wlc, M_BCN_PCTLWD, phyctl);
+ brcms_c_write_shm(wlc, M_BCN_PCTLWD, phyctl);
}
/* centralized protection config change function to simplify debugging, no consistency checking
* this should be called only on changes to avoid overhead in periodic function
*/
-void wlc_protection_upd(struct wlc_info *wlc, uint idx, int val)
+void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val)
{
BCMMSG(wlc->wiphy, "idx %d, val %d\n", idx, val);
switch (idx) {
- case WLC_PROT_G_SPEC:
+ case BRCMS_PROT_G_SPEC:
wlc->protection->_g = (bool) val;
break;
- case WLC_PROT_G_OVR:
+ case BRCMS_PROT_G_OVR:
wlc->protection->g_override = (s8) val;
break;
- case WLC_PROT_G_USER:
+ case BRCMS_PROT_G_USER:
wlc->protection->gmode_user = (u8) val;
break;
- case WLC_PROT_OVERLAP:
+ case BRCMS_PROT_OVERLAP:
wlc->protection->overlap = (s8) val;
break;
- case WLC_PROT_N_USER:
+ case BRCMS_PROT_N_USER:
wlc->protection->nmode_user = (s8) val;
break;
- case WLC_PROT_N_CFG:
+ case BRCMS_PROT_N_CFG:
wlc->protection->n_cfg = (s8) val;
break;
- case WLC_PROT_N_CFG_OVR:
+ case BRCMS_PROT_N_CFG_OVR:
wlc->protection->n_cfg_override = (s8) val;
break;
- case WLC_PROT_N_NONGF:
+ case BRCMS_PROT_N_NONGF:
wlc->protection->nongf = (bool) val;
break;
- case WLC_PROT_N_NONGF_OVR:
+ case BRCMS_PROT_N_NONGF_OVR:
wlc->protection->nongf_override = (s8) val;
break;
- case WLC_PROT_N_PAM_OVR:
+ case BRCMS_PROT_N_PAM_OVR:
wlc->protection->n_pam_override = (s8) val;
break;
- case WLC_PROT_N_OBSS:
+ case BRCMS_PROT_N_OBSS:
wlc->protection->n_obss = (bool) val;
break;
@@ -1084,22 +916,22 @@ void wlc_protection_upd(struct wlc_info *wlc, uint idx, int val)
}
-static void wlc_ht_update_sgi_rx(struct wlc_info *wlc, int val)
+static void brcms_c_ht_update_sgi_rx(struct brcms_c_info *wlc, int val)
{
wlc->ht_cap.cap_info &= ~(IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40);
- wlc->ht_cap.cap_info |= (val & WLC_N_SGI_20) ?
+ wlc->ht_cap.cap_info |= (val & BRCMS_N_SGI_20) ?
IEEE80211_HT_CAP_SGI_20 : 0;
- wlc->ht_cap.cap_info |= (val & WLC_N_SGI_40) ?
+ wlc->ht_cap.cap_info |= (val & BRCMS_N_SGI_40) ?
IEEE80211_HT_CAP_SGI_40 : 0;
if (wlc->pub->up) {
- wlc_update_beacon(wlc);
- wlc_update_probe_resp(wlc, true);
+ brcms_c_update_beacon(wlc);
+ brcms_c_update_probe_resp(wlc, true);
}
}
-static void wlc_ht_update_ldpc(struct wlc_info *wlc, s8 val)
+static void brcms_c_ht_update_ldpc(struct brcms_c_info *wlc, s8 val)
{
wlc->stf->ldpc = val;
@@ -1108,8 +940,8 @@ static void wlc_ht_update_ldpc(struct wlc_info *wlc, s8 val)
wlc->ht_cap.cap_info |= IEEE80211_HT_CAP_LDPC_CODING;
if (wlc->pub->up) {
- wlc_update_beacon(wlc);
- wlc_update_probe_resp(wlc, true);
+ brcms_c_update_beacon(wlc);
+ brcms_c_update_probe_resp(wlc, true);
wlc_phy_ldpc_override_set(wlc->band->pi, (val ? true : false));
}
}
@@ -1118,12 +950,12 @@ static void wlc_ht_update_ldpc(struct wlc_info *wlc, s8 val)
* ucode, hwmac update
* Channel dependent updates for ucode and hw
*/
-static void wlc_ucode_mac_upd(struct wlc_info *wlc)
+static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
{
/* enable or disable any active IBSSs depending on whether or not
* we are on the home channel
*/
- if (wlc->home_chanspec == WLC_BAND_PI_RADIO_CHANSPEC) {
+ if (wlc->home_chanspec == BRCMS_BAND_PI_RADIO_CHANSPEC) {
if (wlc->pub->associated) {
/* BMAC_NOTE: This is something that should be fixed in ucode inits.
* I think that the ucode inits set up the bcn templates and shm values
@@ -1131,8 +963,8 @@ static void wlc_ucode_mac_upd(struct wlc_info *wlc)
* to set up a beacon for testing, the test routines should write it down,
* not expect the inits to populate a bogus beacon.
*/
- if (WLC_PHY_11N_CAP(wlc->band)) {
- wlc_write_shm(wlc, M_BCN_TXTSF_OFFSET,
+ if (BRCMS_PHY_11N_CAP(wlc->band)) {
+ brcms_c_write_shm(wlc, M_BCN_TXTSF_OFFSET,
wlc->band->bcntsfoff);
}
}
@@ -1141,11 +973,12 @@ static void wlc_ucode_mac_upd(struct wlc_info *wlc)
}
/* update the various promisc bits */
- wlc_mac_bcn_promisc(wlc);
- wlc_mac_promisc(wlc);
+ brcms_c_mac_bcn_promisc(wlc);
+ brcms_c_mac_promisc(wlc);
}
-static void wlc_bandinit_ordered(struct wlc_info *wlc, chanspec_t chanspec)
+static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
+ chanspec_t chanspec)
{
wlc_rateset_t default_rateset;
uint parkband;
@@ -1157,11 +990,12 @@ static void wlc_bandinit_ordered(struct wlc_info *wlc, chanspec_t chanspec)
* figure out the right band to park on
*/
if (wlc->bandlocked || NBANDS(wlc) == 1) {
- parkband = wlc->band->bandunit; /* updated in wlc_bandlock() */
+ /* updated in brcms_c_bandlock() */
+ parkband = wlc->band->bandunit;
band_order[0] = band_order[1] = parkband;
} else {
/* park on the band of the specified chanspec */
- parkband = CHSPEC_WLCBANDUNIT(chanspec);
+ parkband = CHSPEC_BANDUNIT(chanspec);
/* order so that parkband initialize last */
band_order[0] = parkband ^ 1;
@@ -1174,43 +1008,44 @@ static void wlc_bandinit_ordered(struct wlc_info *wlc, chanspec_t chanspec)
wlc->band = wlc->bandstate[j];
- wlc_default_rateset(wlc, &default_rateset);
+ brcms_default_rateset(wlc, &default_rateset);
/* fill in hw_rate */
- wlc_rateset_filter(&default_rateset, &wlc->band->hw_rateset,
- false, WLC_RATES_CCK_OFDM, WLC_RATE_MASK,
+ brcms_c_rateset_filter(&default_rateset, &wlc->band->hw_rateset,
+ false, BRCMS_RATES_CCK_OFDM, BRCMS_RATE_MASK,
(bool) N_ENAB(wlc->pub));
/* init basic rate lookup */
- wlc_rate_lookup_init(wlc, &default_rateset);
+ brcms_c_rate_lookup_init(wlc, &default_rateset);
}
/* sync up phy/radio chanspec */
- wlc_set_phy_chanspec(wlc, chanspec);
+ brcms_c_set_phy_chanspec(wlc, chanspec);
}
/* band-specific init */
-static void WLBANDINITFN(wlc_bsinit) (struct wlc_info *wlc)
+static void brcms_c_bsinit(struct brcms_c_info *wlc)
{
BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n",
wlc->pub->unit, wlc->band->bandunit);
/* write ucode ACK/CTS rate table */
- wlc_set_ratetable(wlc);
+ brcms_c_set_ratetable(wlc);
/* update some band specific mac configuration */
- wlc_ucode_mac_upd(wlc);
+ brcms_c_ucode_mac_upd(wlc);
/* init antenna selection */
- wlc_antsel_init(wlc->asi);
+ brcms_c_antsel_init(wlc->asi);
}
/* switch to and initialize new band */
-static void WLBANDINITFN(wlc_setband) (struct wlc_info *wlc, uint bandunit)
+static void brcms_c_setband(struct brcms_c_info *wlc,
+ uint bandunit)
{
int idx;
- struct wlc_bsscfg *cfg;
+ struct brcms_bss_cfg *cfg;
wlc->band = wlc->bandstate[bandunit];
@@ -1218,19 +1053,22 @@ static void WLBANDINITFN(wlc_setband) (struct wlc_info *wlc, uint bandunit)
return;
/* wait for at least one beacon before entering sleeping state */
- wlc->PMawakebcn = true;
- FOREACH_AS_STA(wlc, idx, cfg)
- cfg->PMawakebcn = true;
- wlc_set_ps_ctrl(wlc);
+ for (idx = 0; idx < BRCMS_MAXBSSCFG; idx++) {
+ cfg = wlc->bsscfg[idx];
+ if (cfg && BSSCFG_STA(cfg) && cfg->associated)
+ cfg->PMawakebcn = true;
+ }
+ brcms_c_set_ps_ctrl(wlc);
/* band-specific initializations */
- wlc_bsinit(wlc);
+ brcms_c_bsinit(wlc);
}
/* Initialize a WME Parameter Info Element with default STA parameters from WMM Spec, Table 12 */
-void wlc_wme_initparams_sta(struct wlc_info *wlc, wme_param_ie_t *pe)
+void
+brcms_c_wme_initparams_sta(struct brcms_c_info *wlc, struct wme_param_ie *pe)
{
- static const wme_param_ie_t stadef = {
+ static const struct wme_param_ie stadef = {
WME_OUI,
WME_TYPE,
WME_SUBTYPE_PARAM_IE,
@@ -1251,12 +1089,12 @@ void wlc_wme_initparams_sta(struct wlc_info *wlc, wme_param_ie_t *pe)
memcpy(pe, &stadef, sizeof(*pe));
}
-void wlc_wme_setparams(struct wlc_info *wlc, u16 aci,
+void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
const struct ieee80211_tx_queue_params *params,
bool suspend)
{
int i;
- shm_acparams_t acp_shm;
+ struct shm_acparams acp_shm;
u16 *shm_entry;
/* Only apply params if the core is out of reset and has clocks */
@@ -1266,10 +1104,8 @@ void wlc_wme_setparams(struct wlc_info *wlc, u16 aci,
return;
}
- wlc->wme_admctl = 0;
-
do {
- memset((char *)&acp_shm, 0, sizeof(shm_acparams_t));
+ memset((char *)&acp_shm, 0, sizeof(struct shm_acparams));
/* fill in shm ac params struct */
acp_shm.txop = le16_to_cpu(params->txop);
/* convert from units of 32us to us for ucode */
@@ -1283,7 +1119,7 @@ void wlc_wme_setparams(struct wlc_info *wlc, u16 aci,
if (acp_shm.aifs < EDCF_AIFSN_MIN
|| acp_shm.aifs > EDCF_AIFSN_MAX) {
- wiphy_err(wlc->wiphy, "wl%d: wlc_edcf_setparams: bad "
+ wiphy_err(wlc->wiphy, "wl%d: edcf_setparams: bad "
"aifs %d\n", wlc->pub->unit, acp_shm.aifs);
continue;
}
@@ -1295,7 +1131,7 @@ void wlc_wme_setparams(struct wlc_info *wlc, u16 aci,
R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur;
acp_shm.reggap = acp_shm.bslots + acp_shm.aifs;
/* Indicate the new params to the ucode */
- acp_shm.status = wlc_read_shm(wlc, (M_EDCF_QINFO +
+ acp_shm.status = brcms_c_read_shm(wlc, (M_EDCF_QINFO +
wme_shmemacindex(aci) *
M_EDCF_QLEN +
M_EDCF_STATUS_OFF));
@@ -1303,8 +1139,8 @@ void wlc_wme_setparams(struct wlc_info *wlc, u16 aci,
/* Fill in shm acparam table */
shm_entry = (u16 *) &acp_shm;
- for (i = 0; i < (int)sizeof(shm_acparams_t); i += 2)
- wlc_write_shm(wlc,
+ for (i = 0; i < (int)sizeof(struct shm_acparams); i += 2)
+ brcms_c_write_shm(wlc,
M_EDCF_QINFO +
wme_shmemacindex(aci) * M_EDCF_QLEN + i,
*shm_entry++);
@@ -1312,18 +1148,18 @@ void wlc_wme_setparams(struct wlc_info *wlc, u16 aci,
} while (0);
if (suspend)
- wlc_suspend_mac_and_wait(wlc);
+ brcms_c_suspend_mac_and_wait(wlc);
if (suspend)
- wlc_enable_mac(wlc);
+ brcms_c_enable_mac(wlc);
}
-void wlc_edcf_setparams(struct wlc_info *wlc, bool suspend)
+void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
{
u16 aci;
int i_ac;
- edcf_acparam_t *edcf_acp;
+ struct edcf_acparam *edcf_acp;
struct ieee80211_tx_queue_params txq_pars;
struct ieee80211_tx_queue_params *params = &txq_pars;
@@ -1334,15 +1170,11 @@ void wlc_edcf_setparams(struct wlc_info *wlc, bool suspend)
* STA uses AC params from wme_param_ie.
*/
- edcf_acp = (edcf_acparam_t *) &wlc->wme_param_ie.acparam[0];
+ edcf_acp = (struct edcf_acparam *) &wlc->wme_param_ie.acparam[0];
for (i_ac = 0; i_ac < AC_COUNT; i_ac++, edcf_acp++) {
/* find out which ac this set of params applies to */
aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
- /* set the admission control policy for this AC */
- if (edcf_acp->ACI & EDCF_ACM_MASK) {
- wlc->wme_admctl |= 1 << aci;
- }
/* fill in shm ac params struct */
params->txop = edcf_acp->TXOP;
@@ -1353,25 +1185,25 @@ void wlc_edcf_setparams(struct wlc_info *wlc, bool suspend)
/* CWmax = 2^(ECWmax) - 1 */
params->cw_max = EDCF_ECW2CW((edcf_acp->ECW & EDCF_ECWMAX_MASK)
>> EDCF_ECWMAX_SHIFT);
- wlc_wme_setparams(wlc, aci, params, suspend);
+ brcms_c_wme_setparams(wlc, aci, params, suspend);
}
if (suspend)
- wlc_suspend_mac_and_wait(wlc);
+ brcms_c_suspend_mac_and_wait(wlc);
if (AP_ENAB(wlc->pub) && WME_ENAB(wlc->pub)) {
- wlc_update_beacon(wlc);
- wlc_update_probe_resp(wlc, false);
+ brcms_c_update_beacon(wlc);
+ brcms_c_update_probe_resp(wlc, false);
}
if (suspend)
- wlc_enable_mac(wlc);
+ brcms_c_enable_mac(wlc);
}
-bool wlc_timers_init(struct wlc_info *wlc, int unit)
+bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit)
{
- wlc->wdtimer = wl_init_timer(wlc->wl, wlc_watchdog_by_timer,
+ wlc->wdtimer = brcms_init_timer(wlc->wl, brcms_c_watchdog_by_timer,
wlc, "watchdog");
if (!wlc->wdtimer) {
wiphy_err(wlc->wiphy, "wl%d: wl_init_timer for wdtimer "
@@ -1379,7 +1211,7 @@ bool wlc_timers_init(struct wlc_info *wlc, int unit)
goto fail;
}
- wlc->radio_timer = wl_init_timer(wlc->wl, wlc_radio_timer,
+ wlc->radio_timer = brcms_init_timer(wlc->wl, brcms_c_radio_timer,
wlc, "radio");
if (!wlc->radio_timer) {
wiphy_err(wlc->wiphy, "wl%d: wl_init_timer for radio_timer "
@@ -1394,44 +1226,35 @@ bool wlc_timers_init(struct wlc_info *wlc, int unit)
}
/*
- * Initialize wlc_info default values ...
+ * Initialize brcms_c_info default values ...
* may get overrides later in this function
*/
-void wlc_info_init(struct wlc_info *wlc, int unit)
+void brcms_c_info_init(struct brcms_c_info *wlc, int unit)
{
int i;
/* Assume the device is there until proven otherwise */
wlc->device_present = true;
- /* set default power output percentage to 100 percent */
- wlc->txpwr_percent = 100;
-
/* Save our copy of the chanspec */
wlc->chanspec = CH20MHZ_CHSPEC(1);
- /* initialize CCK preamble mode to unassociated state */
- wlc->shortpreamble = false;
-
- wlc->legacy_probe = true;
-
/* various 802.11g modes */
wlc->shortslot = false;
- wlc->shortslot_override = WLC_SHORTSLOT_AUTO;
+ wlc->shortslot_override = BRCMS_SHORTSLOT_AUTO;
- wlc->barker_overlap_control = true;
- wlc->barker_preamble = WLC_BARKER_SHORT_ALLOWED;
- wlc->txburst_limit_override = AUTO;
+ brcms_c_protection_upd(wlc, BRCMS_PROT_G_OVR, BRCMS_PROTECTION_AUTO);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_G_SPEC, false);
- wlc_protection_upd(wlc, WLC_PROT_G_OVR, WLC_PROTECTION_AUTO);
- wlc_protection_upd(wlc, WLC_PROT_G_SPEC, false);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_CFG_OVR,
+ BRCMS_PROTECTION_AUTO);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_CFG, BRCMS_N_PROTECTION_OFF);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_NONGF_OVR,
+ BRCMS_PROTECTION_AUTO);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_NONGF, false);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_PAM_OVR, AUTO);
- wlc_protection_upd(wlc, WLC_PROT_N_CFG_OVR, WLC_PROTECTION_AUTO);
- wlc_protection_upd(wlc, WLC_PROT_N_CFG, WLC_N_PROTECTION_OFF);
- wlc_protection_upd(wlc, WLC_PROT_N_NONGF_OVR, WLC_PROTECTION_AUTO);
- wlc_protection_upd(wlc, WLC_PROT_N_NONGF, false);
- wlc_protection_upd(wlc, WLC_PROT_N_PAM_OVR, AUTO);
-
- wlc_protection_upd(wlc, WLC_PROT_OVERLAP, WLC_PROTECTION_CTL_OVERLAP);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_OVERLAP,
+ BRCMS_PROTECTION_CTL_OVERLAP);
/* 802.11g draft 4.0 NonERP elt advertisement */
wlc->include_legacy_erp = true;
@@ -1439,7 +1262,7 @@ void wlc_info_init(struct wlc_info *wlc, int unit)
wlc->stf->ant_rx_ovr = ANT_RX_DIV_DEF;
wlc->stf->txant = ANT_TX_DEF;
- wlc->prb_resp_timeout = WLC_PRB_RESP_TIMEOUT;
+ wlc->prb_resp_timeout = BRCMS_PRB_RESP_TIMEOUT;
wlc->usr_fragthresh = DOT11_DEFAULT_FRAG_LEN;
for (i = 0; i < NFIFO; i++)
@@ -1454,30 +1277,6 @@ void wlc_info_init(struct wlc_info *wlc, int unit)
wlc->SRL = RETRY_SHORT_DEF;
wlc->LRL = RETRY_LONG_DEF;
- /* init PM state */
- wlc->PM = PM_OFF; /* User's setting of PM mode through IOCTL */
- wlc->PM_override = false; /* Prevents from going to PM if our AP is 'ill' */
- wlc->PMenabled = false; /* Current PM state */
- wlc->PMpending = false; /* Tracks whether STA indicated PM in the last attempt */
- wlc->PMblocked = false; /* To allow blocking going into PM during RM and scans */
-
- /* In WMM Auto mode, PM is allowed if association is a UAPSD association */
- wlc->WME_PM_blocked = false;
-
- /* Init wme queuing method */
- wlc->wme_prec_queuing = false;
-
- /* Overrides for the core to stay awake under zillion conditions Look for STAY_AWAKE */
- wlc->wake = false;
- /* Are we waiting for a response to PS-Poll that we sent */
- wlc->PSpoll = false;
-
- /* APSD defaults */
- wlc->wme_apsd = true;
- wlc->apsd_sta_usp = false;
- wlc->apsd_trigger_timeout = 0; /* disable the trigger timer */
- wlc->apsd_trigger_ac = AC_BITMAP_ALL;
-
/* Set flag to indicate that hw keys should be used when available. */
wlc->wsec_swkeys = false;
@@ -1487,8 +1286,6 @@ void wlc_info_init(struct wlc_info *wlc, int unit)
wlc->wsec_keys[i]->idx = (u8) i;
}
- wlc->_regulatory_domain = false; /* 802.11d */
-
/* WME QoS mode is Auto by default */
wlc->pub->_wme = AUTO;
@@ -1498,54 +1295,50 @@ void wlc_info_init(struct wlc_info *wlc, int unit)
wlc->pub->_ampdu = AMPDU_AGG_HOST;
wlc->pub->bcmerror = 0;
- wlc->ibss_allowed = true;
- wlc->ibss_coalesce_allowed = true;
wlc->pub->_coex = ON;
/* initialize mpc delay */
- wlc->mpc_delay_off = wlc->mpc_dlycnt = WLC_MPC_MIN_DELAYCNT;
-
- wlc->pr80838_war = true;
+ wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
}
-static bool wlc_state_bmac_sync(struct wlc_info *wlc)
+static bool brcms_c_state_bmac_sync(struct brcms_c_info *wlc)
{
- wlc_bmac_state_t state_bmac;
+ struct brcms_b_state state_bmac;
- if (wlc_bmac_state_get(wlc->hw, &state_bmac) != 0)
+ if (brcms_b_state_get(wlc->hw, &state_bmac) != 0)
return false;
wlc->machwcap = state_bmac.machwcap;
- wlc_protection_upd(wlc, WLC_PROT_N_PAM_OVR,
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_PAM_OVR,
(s8) state_bmac.preamble_ovr);
return true;
}
-static uint wlc_attach_module(struct wlc_info *wlc)
+static uint brcms_c_attach_module(struct brcms_c_info *wlc)
{
uint err = 0;
uint unit;
unit = wlc->pub->unit;
- wlc->asi = wlc_antsel_attach(wlc);
+ wlc->asi = brcms_c_antsel_attach(wlc);
if (wlc->asi == NULL) {
- wiphy_err(wlc->wiphy, "wl%d: wlc_attach: wlc_antsel_attach "
+ wiphy_err(wlc->wiphy, "wl%d: attach: antsel_attach "
"failed\n", unit);
err = 44;
goto fail;
}
- wlc->ampdu = wlc_ampdu_attach(wlc);
+ wlc->ampdu = brcms_c_ampdu_attach(wlc);
if (wlc->ampdu == NULL) {
- wiphy_err(wlc->wiphy, "wl%d: wlc_attach: wlc_ampdu_attach "
+ wiphy_err(wlc->wiphy, "wl%d: attach: ampdu_attach "
"failed\n", unit);
err = 50;
goto fail;
}
- if ((wlc_stf_attach(wlc) != 0)) {
- wiphy_err(wlc->wiphy, "wl%d: wlc_attach: wlc_stf_attach "
+ if ((brcms_c_stf_attach(wlc) != 0)) {
+ wiphy_err(wlc->wiphy, "wl%d: attach: stf_attach "
"failed\n", unit);
err = 68;
goto fail;
@@ -1554,28 +1347,28 @@ static uint wlc_attach_module(struct wlc_info *wlc)
return err;
}
-struct wlc_pub *wlc_pub(void *wlc)
+struct brcms_pub *brcms_c_pub(void *wlc)
{
- return ((struct wlc_info *) wlc)->pub;
+ return ((struct brcms_c_info *) wlc)->pub;
}
-#define CHIP_SUPPORTS_11N(wlc) 1
+#define CHIP_SUPPORTS_11N(wlc) 1
/*
* The common driver entry routine. Error codes should be unique
*/
-void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
+void *brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
bool piomode, void *regsva, uint bustype, void *btparam,
uint *perr)
{
- struct wlc_info *wlc;
+ struct brcms_c_info *wlc;
uint err = 0;
uint j;
- struct wlc_pub *pub;
+ struct brcms_pub *pub;
uint n_disabled;
- /* allocate struct wlc_info state and its substructures */
- wlc = (struct wlc_info *) wlc_attach_malloc(unit, &err, device);
+ /* allocate struct brcms_c_info state and its substructures */
+ wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device);
if (wlc == NULL)
goto fail;
wlc->wiphy = wl->wiphy;
@@ -1589,30 +1382,23 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
wlc->core = wlc->corestate;
wlc->wl = wl;
pub->unit = unit;
- wlc->btparam = btparam;
pub->_piomode = piomode;
wlc->bandinit_pending = false;
- /* By default restrict TKIP associations from 11n STA's */
- wlc->ht_wsec_restriction = WLC_HT_TKIP_RESTRICT;
- /* populate struct wlc_info with default values */
- wlc_info_init(wlc, unit);
+ /* populate struct brcms_c_info with default values */
+ brcms_c_info_init(wlc, unit);
/* update sta/ap related parameters */
- wlc_ap_upd(wlc);
+ brcms_c_ap_upd(wlc);
/* 11n_disable nvram */
n_disabled = getintvar(pub->vars, "11n_disable");
- /* register a module (to handle iovars) */
- wlc_module_register(wlc->pub, wlc_iovars, "wlc_iovars", wlc,
- wlc_doiovar, NULL, NULL);
-
/*
* low level attach steps(all hw accesses go
* inside, no more in rest of the attach)
*/
- err = wlc_bmac_attach(wlc, vendor, device, unit, piomode, regsva,
+ err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva,
bustype, btparam);
if (err)
goto fail;
@@ -1620,15 +1406,15 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
/* for some states, due to different info pointer(e,g, wlc, wlc_hw) or master/slave split,
* HIGH driver(both monolithic and HIGH_ONLY) needs to sync states FROM BMAC portion driver
*/
- if (!wlc_state_bmac_sync(wlc)) {
+ if (!brcms_c_state_bmac_sync(wlc)) {
err = 20;
goto fail;
}
- pub->phy_11ncapable = WLC_PHY_11N_CAP(wlc->band);
+ pub->phy_11ncapable = BRCMS_PHY_11N_CAP(wlc->band);
/* propagate *vars* from BMAC driver to high driver */
- wlc_bmac_copyfrom_vars(wlc->hw, &pub->vars, &wlc->vars_size);
+ brcms_b_copyfrom_vars(wlc->hw, &pub->vars, &wlc->vars_size);
/* set maximum allowed duty cycle */
@@ -1637,10 +1423,10 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
wlc->tx_duty_cycle_cck =
(u16) getintvar(pub->vars, "tx_duty_cycle_cck");
- wlc_stf_phy_chain_calc(wlc);
+ brcms_c_stf_phy_chain_calc(wlc);
/* txchain 1: txant 0, txchain 2: txant 1 */
- if (WLCISNPHY(wlc->band) && (wlc->stf->txstreams == 1))
+ if (BRCMS_ISNPHY(wlc->band) && (wlc->stf->txstreams == 1))
wlc->stf->txant = wlc->stf->hw_txchain - 1;
/* push to BMAC driver */
@@ -1654,7 +1440,7 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
wlc->core->txavail[i] = wlc->hw->txavail[i];
}
- wlc_bmac_hw_etheraddr(wlc->hw, wlc->perm_etheraddr);
+ brcms_b_hw_etheraddr(wlc->hw, wlc->perm_etheraddr);
memcpy(&pub->cur_etheraddr, &wlc->perm_etheraddr, ETH_ALEN);
@@ -1665,7 +1451,7 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
wlc->band = wlc->bandstate[j];
- if (!wlc_attach_stf_ant_init(wlc)) {
+ if (!brcms_c_attach_stf_ant_init(wlc)) {
err = 24;
goto fail;
}
@@ -1677,18 +1463,19 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
/* init gmode value */
if (BAND_2G(wlc->band->bandtype)) {
wlc->band->gmode = GMODE_AUTO;
- wlc_protection_upd(wlc, WLC_PROT_G_USER,
+ brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER,
wlc->band->gmode);
}
/* init _n_enab supported mode */
- if (WLC_PHY_11N_CAP(wlc->band) && CHIP_SUPPORTS_11N(wlc)) {
+ if (BRCMS_PHY_11N_CAP(wlc->band) && CHIP_SUPPORTS_11N(wlc)) {
if (n_disabled & WLFEATURE_DISABLE_11N) {
pub->_n_enab = OFF;
- wlc_protection_upd(wlc, WLC_PROT_N_USER, OFF);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_USER,
+ OFF);
} else {
pub->_n_enab = SUPPORT_11N;
- wlc_protection_upd(wlc, WLC_PROT_N_USER,
+ brcms_c_protection_upd(wlc, BRCMS_PROT_N_USER,
((pub->_n_enab ==
SUPPORT_11N) ? WL_11N_2x2 :
WL_11N_3x3));
@@ -1696,48 +1483,48 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
}
/* init per-band default rateset, depend on band->gmode */
- wlc_default_rateset(wlc, &wlc->band->defrateset);
+ brcms_default_rateset(wlc, &wlc->band->defrateset);
- /* fill in hw_rateset (used early by WLC_SET_RATESET) */
- wlc_rateset_filter(&wlc->band->defrateset,
+ /* fill in hw_rateset (used early by BRCM_SET_RATESET) */
+ brcms_c_rateset_filter(&wlc->band->defrateset,
&wlc->band->hw_rateset, false,
- WLC_RATES_CCK_OFDM, WLC_RATE_MASK,
+ BRCMS_RATES_CCK_OFDM, BRCMS_RATE_MASK,
(bool) N_ENAB(wlc->pub));
}
/* update antenna config due to wlc->stf->txant/txchain/ant_rx_ovr change */
- wlc_stf_phy_txant_upd(wlc);
+ brcms_c_stf_phy_txant_upd(wlc);
/* attach each modules */
- err = wlc_attach_module(wlc);
+ err = brcms_c_attach_module(wlc);
if (err != 0)
goto fail;
- if (!wlc_timers_init(wlc, unit)) {
- wiphy_err(wl->wiphy, "wl%d: %s: wlc_init_timer failed\n", unit,
+ if (!brcms_c_timers_init(wlc, unit)) {
+ wiphy_err(wl->wiphy, "wl%d: %s: init_timer failed\n", unit,
__func__);
err = 32;
goto fail;
}
/* depend on rateset, gmode */
- wlc->cmi = wlc_channel_mgr_attach(wlc);
+ wlc->cmi = brcms_c_channel_mgr_attach(wlc);
if (!wlc->cmi) {
- wiphy_err(wl->wiphy, "wl%d: %s: wlc_channel_mgr_attach failed"
+ wiphy_err(wl->wiphy, "wl%d: %s: channel_mgr_attach failed"
"\n", unit, __func__);
err = 33;
goto fail;
}
/* init default when all parameters are ready, i.e. ->rateset */
- wlc_bss_default_init(wlc);
+ brcms_c_bss_default_init(wlc);
/*
* Complete the wlc default state initializations..
*/
/* allocate our initial queue */
- wlc->pkt_queue = wlc_txq_alloc(wlc);
+ wlc->pkt_queue = brcms_c_txq_alloc(wlc);
if (wlc->pkt_queue == NULL) {
wiphy_err(wl->wiphy, "wl%d: %s: failed to malloc tx queue\n",
unit, __func__);
@@ -1750,7 +1537,7 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
wlc->cfg->wlc = wlc;
pub->txmaxpkts = MAXTXPKTS;
- wlc_wme_initparams_sta(wlc, &wlc->wme_param_ie);
+ brcms_c_wme_initparams_sta(wlc, &wlc->wme_param_ie);
wlc->mimoft = FT_HT;
wlc->ht_cap.cap_info = HT_CAP;
@@ -1760,20 +1547,19 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
wlc->mimo_40txbw = AUTO;
wlc->ofdm_40txbw = AUTO;
wlc->cck_40txbw = AUTO;
- wlc_update_mimo_band_bwcap(wlc, WLC_N_BW_20IN2G_40IN5G);
-
- /* Enable setting the RIFS Mode bit by default in HT Info IE */
- wlc->rifs_advert = AUTO;
+ brcms_c_update_mimo_band_bwcap(wlc, BRCMS_N_BW_20IN2G_40IN5G);
/* Set default values of SGI */
- if (WLC_SGI_CAP_PHY(wlc)) {
- wlc_ht_update_sgi_rx(wlc, (WLC_N_SGI_20 | WLC_N_SGI_40));
+ if (BRCMS_SGI_CAP_PHY(wlc)) {
+ brcms_c_ht_update_sgi_rx(wlc, (BRCMS_N_SGI_20 |
+ BRCMS_N_SGI_40));
wlc->sgi_tx = AUTO;
- } else if (WLCISSSLPNPHY(wlc->band)) {
- wlc_ht_update_sgi_rx(wlc, (WLC_N_SGI_20 | WLC_N_SGI_40));
+ } else if (BRCMS_ISSSLPNPHY(wlc->band)) {
+ brcms_c_ht_update_sgi_rx(wlc, (BRCMS_N_SGI_20 |
+ BRCMS_N_SGI_40));
wlc->sgi_tx = AUTO;
} else {
- wlc_ht_update_sgi_rx(wlc, 0);
+ brcms_c_ht_update_sgi_rx(wlc, 0);
wlc->sgi_tx = OFF;
}
@@ -1784,7 +1570,7 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
wlc->sgi_tx = OFF;
if (n_disabled & WLFEATURE_DISABLE_11N_SGI_RX)
- wlc_ht_update_sgi_rx(wlc, 0);
+ brcms_c_ht_update_sgi_rx(wlc, 0);
/* apply the stbc override from nvram conf */
if (n_disabled & WLFEATURE_DISABLE_11N_STBC_TX) {
@@ -1793,23 +1579,15 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
wlc->ht_cap.cap_info &= ~IEEE80211_HT_CAP_TX_STBC;
}
if (n_disabled & WLFEATURE_DISABLE_11N_STBC_RX)
- wlc_stf_stbc_rx_set(wlc, HT_CAP_RX_STBC_NO);
+ brcms_c_stf_stbc_rx_set(wlc, HT_CAP_RX_STBC_NO);
/* apply the GF override from nvram conf */
if (n_disabled & WLFEATURE_DISABLE_11N_GF)
wlc->ht_cap.cap_info &= ~IEEE80211_HT_CAP_GRN_FLD;
/* initialize radio_mpc_disable according to wlc->mpc */
- wlc_radio_mpc_upd(wlc);
-
- if ((wlc->pub->sih->chip) == BCM43235_CHIP_ID) {
- if ((getintvar(wlc->pub->vars, "aa2g") == 7) ||
- (getintvar(wlc->pub->vars, "aa5g") == 7)) {
- wlc_bmac_antsel_set(wlc->hw, 1);
- }
- } else {
- wlc_bmac_antsel_set(wlc->hw, wlc->asi->antsel_avail);
- }
+ brcms_c_radio_mpc_upd(wlc);
+ brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail);
if (perr)
*perr = 0;
@@ -1820,14 +1598,14 @@ void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
wiphy_err(wl->wiphy, "wl%d: %s: failed with err %d\n",
unit, __func__, err);
if (wlc)
- wlc_detach(wlc);
+ brcms_c_detach(wlc);
if (perr)
*perr = err;
return NULL;
}
-static void wlc_attach_antgain_init(struct wlc_info *wlc)
+static void brcms_c_attach_antgain_init(struct brcms_c_info *wlc)
{
uint unit;
unit = wlc->pub->unit;
@@ -1859,7 +1637,7 @@ static void wlc_attach_antgain_init(struct wlc_info *wlc)
}
}
-static bool wlc_attach_stf_ant_init(struct wlc_info *wlc)
+static bool brcms_c_attach_stf_ant_init(struct brcms_c_info *wlc)
{
int aa;
uint unit;
@@ -1894,49 +1672,51 @@ static bool wlc_attach_stf_ant_init(struct wlc_info *wlc)
/* Compute Antenna Gain */
wlc->band->antgain =
(s8) getintvar(vars, (BAND_5G(bandtype) ? "ag1" : "ag0"));
- wlc_attach_antgain_init(wlc);
+ brcms_c_attach_antgain_init(wlc);
return true;
}
-static void wlc_timers_deinit(struct wlc_info *wlc)
+static void brcms_c_timers_deinit(struct brcms_c_info *wlc)
{
/* free timer state */
if (wlc->wdtimer) {
- wl_free_timer(wlc->wl, wlc->wdtimer);
+ brcms_free_timer(wlc->wl, wlc->wdtimer);
wlc->wdtimer = NULL;
}
if (wlc->radio_timer) {
- wl_free_timer(wlc->wl, wlc->radio_timer);
+ brcms_free_timer(wlc->wl, wlc->radio_timer);
wlc->radio_timer = NULL;
}
}
-static void wlc_detach_module(struct wlc_info *wlc)
+static void brcms_c_detach_module(struct brcms_c_info *wlc)
{
if (wlc->asi) {
- wlc_antsel_detach(wlc->asi);
+ brcms_c_antsel_detach(wlc->asi);
wlc->asi = NULL;
}
if (wlc->ampdu) {
- wlc_ampdu_detach(wlc->ampdu);
+ brcms_c_ampdu_detach(wlc->ampdu);
wlc->ampdu = NULL;
}
- wlc_stf_detach(wlc);
+ brcms_c_stf_detach(wlc);
}
/*
* Return a count of the number of driver callbacks still pending.
*
- * General policy is that wlc_detach can only dealloc/free software states. It can NOT
- * touch hardware registers since the d11core may be in reset and clock may not be available.
- * One exception is sb register access, which is possible if crystal is turned on
- * After "down" state, driver should avoid software timer with the exception of radio_monitor.
+ * General policy is that brcms_c_detach can only dealloc/free software states.
+ * It can NOT touch hardware registers since the d11core may be in reset and
+ * clock may not be available.
+ * One exception is sb register access, which is possible if crystal is turned
+ * on after "down" state, driver should avoid software timer with the exception
+ * of radio_monitor.
*/
-uint wlc_detach(struct wlc_info *wlc)
+uint brcms_c_detach(struct brcms_c_info *wlc)
{
uint callbacks = 0;
@@ -1945,70 +1725,47 @@ uint wlc_detach(struct wlc_info *wlc)
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
- callbacks += wlc_bmac_detach(wlc);
+ callbacks += brcms_b_detach(wlc);
/* delete software timers */
- if (!wlc_radio_monitor_stop(wlc))
+ if (!brcms_c_radio_monitor_stop(wlc))
callbacks++;
- wlc_channel_mgr_detach(wlc->cmi);
-
- wlc_timers_deinit(wlc);
-
- wlc_detach_module(wlc);
-
- /* free other state */
-
+ brcms_c_channel_mgr_detach(wlc->cmi);
-#ifdef BCMDBG
- kfree(wlc->country_ie_override);
- wlc->country_ie_override = NULL;
-#endif /* BCMDBG */
+ brcms_c_timers_deinit(wlc);
- {
- /* free dumpcb list */
- struct dumpcb_s *prev, *ptr;
- prev = ptr = wlc->dumpcb_head;
- while (ptr) {
- ptr = prev->next;
- kfree(prev);
- prev = ptr;
- }
- wlc->dumpcb_head = NULL;
- }
+ brcms_c_detach_module(wlc);
- /* Detach from iovar manager */
- wlc_module_unregister(wlc->pub, "wlc_iovars", wlc);
while (wlc->tx_queues != NULL)
- wlc_txq_free(wlc, wlc->tx_queues);
+ brcms_c_txq_free(wlc, wlc->tx_queues);
- wlc_detach_mfree(wlc);
+ brcms_c_detach_mfree(wlc);
return callbacks;
}
/* update state that depends on the current value of "ap" */
-void wlc_ap_upd(struct wlc_info *wlc)
+void brcms_c_ap_upd(struct brcms_c_info *wlc)
{
if (AP_ENAB(wlc->pub))
- wlc->PLCPHdr_override = WLC_PLCP_AUTO; /* AP: short not allowed, but not enforced */
+ /* AP: short not allowed, but not enforced */
+ wlc->PLCPHdr_override = BRCMS_PLCP_AUTO;
else
- wlc->PLCPHdr_override = WLC_PLCP_SHORT; /* STA-BSS; short capable */
-
- /* disable vlan_mode on AP since some legacy STAs cannot rx tagged pkts */
- wlc->vlan_mode = AP_ENAB(wlc->pub) ? OFF : AUTO;
+ /* STA-BSS; short capable */
+ wlc->PLCPHdr_override = BRCMS_PLCP_SHORT;
/* fixup mpc */
wlc->mpc = true;
}
/* read hwdisable state and propagate to wlc flag */
-static void wlc_radio_hwdisable_upd(struct wlc_info *wlc)
+static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc)
{
if (wlc->pub->wlfeatureflag & WL_SWFL_NOHWRADIO || wlc->pub->hw_off)
return;
- if (wlc_bmac_radio_read_hwdisabled(wlc->hw)) {
+ if (brcms_b_radio_read_hwdisabled(wlc->hw)) {
mboolset(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
} else {
mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
@@ -2016,17 +1773,17 @@ static void wlc_radio_hwdisable_upd(struct wlc_info *wlc)
}
/* return true if Minimum Power Consumption should be entered, false otherwise */
-bool wlc_is_non_delay_mpc(struct wlc_info *wlc)
+bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc)
{
return false;
}
-bool wlc_ismpc(struct wlc_info *wlc)
+bool brcms_c_ismpc(struct brcms_c_info *wlc)
{
- return (wlc->mpc_delay_off == 0) && (wlc_is_non_delay_mpc(wlc));
+ return (wlc->mpc_delay_off == 0) && (brcms_c_is_non_delay_mpc(wlc));
}
-void wlc_radio_mpc_upd(struct wlc_info *wlc)
+void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc)
{
bool mpc_radio, radio_state;
@@ -2040,9 +1797,9 @@ void wlc_radio_mpc_upd(struct wlc_info *wlc)
if (!wlc->pub->radio_disabled)
return;
mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
- wlc_radio_upd(wlc);
+ brcms_c_radio_upd(wlc);
if (!wlc->pub->radio_disabled)
- wlc_radio_monitor_stop(wlc);
+ brcms_c_radio_monitor_stop(wlc);
return;
}
@@ -2054,17 +1811,17 @@ void wlc_radio_mpc_upd(struct wlc_info *wlc)
radio_state =
(mboolisset(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE) ? OFF :
ON);
- mpc_radio = (wlc_ismpc(wlc) == true) ? OFF : ON;
+ mpc_radio = (brcms_c_ismpc(wlc) == true) ? OFF : ON;
if (radio_state == ON && mpc_radio == OFF)
wlc->mpc_delay_off = wlc->mpc_dlycnt;
else if (radio_state == OFF && mpc_radio == ON) {
mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
- wlc_radio_upd(wlc);
- if (wlc->mpc_offcnt < WLC_MPC_THRESHOLD) {
- wlc->mpc_dlycnt = WLC_MPC_MAX_DELAYCNT;
- } else
- wlc->mpc_dlycnt = WLC_MPC_MIN_DELAYCNT;
+ brcms_c_radio_upd(wlc);
+ if (wlc->mpc_offcnt < BRCMS_MPC_THRESHOLD)
+ wlc->mpc_dlycnt = BRCMS_MPC_MAX_DELAYCNT;
+ else
+ wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
wlc->mpc_dur += OSL_SYSUPTIME() - wlc->mpc_laston_ts;
}
/* Below logic is meant to capture the transition from mpc off to mpc on for reasons
@@ -2072,57 +1829,57 @@ void wlc_radio_mpc_upd(struct wlc_info *wlc)
* wlc->mpc_delay_off to wlc->mpc_dlycnt, so that we restart the countdown of mpc_delay_off
*/
if ((wlc->prev_non_delay_mpc == false) &&
- (wlc_is_non_delay_mpc(wlc) == true) && wlc->mpc_delay_off) {
+ (brcms_c_is_non_delay_mpc(wlc) == true) && wlc->mpc_delay_off) {
wlc->mpc_delay_off = wlc->mpc_dlycnt;
}
- wlc->prev_non_delay_mpc = wlc_is_non_delay_mpc(wlc);
+ wlc->prev_non_delay_mpc = brcms_c_is_non_delay_mpc(wlc);
}
/*
* centralized radio disable/enable function,
* invoke radio enable/disable after updating hwradio status
*/
-static void wlc_radio_upd(struct wlc_info *wlc)
+static void brcms_c_radio_upd(struct brcms_c_info *wlc)
{
if (wlc->pub->radio_disabled) {
- wlc_radio_disable(wlc);
+ brcms_c_radio_disable(wlc);
} else {
- wlc_radio_enable(wlc);
+ brcms_c_radio_enable(wlc);
}
}
/* maintain LED behavior in down state */
-static void wlc_down_led_upd(struct wlc_info *wlc)
+static void brcms_c_down_led_upd(struct brcms_c_info *wlc)
{
/* maintain LEDs while in down state, turn on sbclk if not available yet */
/* turn on sbclk if necessary */
if (!AP_ENAB(wlc->pub)) {
- wlc_pllreq(wlc, true, WLC_PLLREQ_FLIP);
+ brcms_c_pllreq(wlc, true, BRCMS_PLLREQ_FLIP);
- wlc_pllreq(wlc, false, WLC_PLLREQ_FLIP);
+ brcms_c_pllreq(wlc, false, BRCMS_PLLREQ_FLIP);
}
}
/* update hwradio status and return it */
-bool wlc_check_radio_disabled(struct wlc_info *wlc)
+bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc)
{
- wlc_radio_hwdisable_upd(wlc);
+ brcms_c_radio_hwdisable_upd(wlc);
return mboolisset(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE) ? true : false;
}
-void wlc_radio_disable(struct wlc_info *wlc)
+void brcms_c_radio_disable(struct brcms_c_info *wlc)
{
if (!wlc->pub->up) {
- wlc_down_led_upd(wlc);
+ brcms_c_down_led_upd(wlc);
return;
}
- wlc_radio_monitor_start(wlc);
- wl_down(wlc->wl);
+ brcms_c_radio_monitor_start(wlc);
+ brcms_down(wlc->wl);
}
-static void wlc_radio_enable(struct wlc_info *wlc)
+static void brcms_c_radio_enable(struct brcms_c_info *wlc)
{
if (wlc->pub->up)
return;
@@ -2130,71 +1887,63 @@ static void wlc_radio_enable(struct wlc_info *wlc)
if (DEVICEREMOVED(wlc))
return;
- if (!wlc->down_override) { /* imposed by wl down/out ioctl */
- wl_up(wlc->wl);
- }
+ brcms_up(wlc->wl);
}
/* periodical query hw radio button while driver is "down" */
-static void wlc_radio_timer(void *arg)
+static void brcms_c_radio_timer(void *arg)
{
- struct wlc_info *wlc = (struct wlc_info *) arg;
+ struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
if (DEVICEREMOVED(wlc)) {
wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
__func__);
- wl_down(wlc->wl);
+ brcms_down(wlc->wl);
return;
}
/* cap mpc off count */
- if (wlc->mpc_offcnt < WLC_MPC_MAX_DELAYCNT)
+ if (wlc->mpc_offcnt < BRCMS_MPC_MAX_DELAYCNT)
wlc->mpc_offcnt++;
- wlc_radio_hwdisable_upd(wlc);
- wlc_radio_upd(wlc);
+ brcms_c_radio_hwdisable_upd(wlc);
+ brcms_c_radio_upd(wlc);
}
-static bool wlc_radio_monitor_start(struct wlc_info *wlc)
+static bool brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
{
/* Don't start the timer if HWRADIO feature is disabled */
if (wlc->radio_monitor || (wlc->pub->wlfeatureflag & WL_SWFL_NOHWRADIO))
return true;
wlc->radio_monitor = true;
- wlc_pllreq(wlc, true, WLC_PLLREQ_RADIO_MON);
- wl_add_timer(wlc->wl, wlc->radio_timer, TIMER_INTERVAL_RADIOCHK, true);
+ brcms_c_pllreq(wlc, true, BRCMS_PLLREQ_RADIO_MON);
+ brcms_add_timer(wlc->wl, wlc->radio_timer, TIMER_INTERVAL_RADIOCHK,
+ true);
return true;
}
-bool wlc_radio_monitor_stop(struct wlc_info *wlc)
+bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc)
{
if (!wlc->radio_monitor)
return true;
wlc->radio_monitor = false;
- wlc_pllreq(wlc, false, WLC_PLLREQ_RADIO_MON);
- return wl_del_timer(wlc->wl, wlc->radio_timer);
+ brcms_c_pllreq(wlc, false, BRCMS_PLLREQ_RADIO_MON);
+ return brcms_del_timer(wlc->wl, wlc->radio_timer);
}
-static void wlc_watchdog_by_timer(void *arg)
+static void brcms_c_watchdog_by_timer(void *arg)
{
- struct wlc_info *wlc = (struct wlc_info *) arg;
- wlc_watchdog(arg);
- if (WLC_WATCHDOG_TBTT(wlc)) {
- /* set to normal osl watchdog period */
- wl_del_timer(wlc->wl, wlc->wdtimer);
- wl_add_timer(wlc->wl, wlc->wdtimer, TIMER_INTERVAL_WATCHDOG,
- true);
- }
+ brcms_c_watchdog(arg);
}
/* common watchdog code */
-static void wlc_watchdog(void *arg)
+static void brcms_c_watchdog(void *arg)
{
- struct wlc_info *wlc = (struct wlc_info *) arg;
+ struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
int i;
- struct wlc_bsscfg *cfg;
+ struct brcms_bss_cfg *cfg;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
@@ -2204,7 +1953,7 @@ static void wlc_watchdog(void *arg)
if (DEVICEREMOVED(wlc)) {
wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
__func__);
- wl_down(wlc->wl);
+ brcms_down(wlc->wl);
return;
}
@@ -2216,26 +1965,26 @@ static void wlc_watchdog(void *arg)
if (--wlc->mpc_delay_off == 0) {
mboolset(wlc->pub->radio_disabled,
WL_RADIO_MPC_DISABLE);
- if (wlc->mpc && wlc_ismpc(wlc))
+ if (wlc->mpc && brcms_c_ismpc(wlc))
wlc->mpc_offcnt = 0;
wlc->mpc_laston_ts = OSL_SYSUPTIME();
}
}
/* mpc sync */
- wlc_radio_mpc_upd(wlc);
+ brcms_c_radio_mpc_upd(wlc);
/* radio sync: sw/hw/mpc --> radio_disable/radio_enable */
- wlc_radio_hwdisable_upd(wlc);
- wlc_radio_upd(wlc);
+ brcms_c_radio_hwdisable_upd(wlc);
+ brcms_c_radio_upd(wlc);
/* if radio is disable, driver may be down, quit here */
if (wlc->pub->radio_disabled)
return;
- wlc_bmac_watchdog(wlc);
+ brcms_b_watchdog(wlc);
/* occasionally sample mac stat counters to detect 16-bit counter wrap */
if ((wlc->pub->now % SW_TIMER_MAC_STAT_UPD) == 0)
- wlc_statsupd(wlc);
+ brcms_c_statsupd(wlc);
/* Manage TKIP countermeasures timers */
FOREACH_BSS(wlc, i, cfg) {
@@ -2248,21 +1997,21 @@ static void wlc_watchdog(void *arg)
}
/* Call any registered watchdog handlers */
- for (i = 0; i < WLC_MAXMODULES; i++) {
+ for (i = 0; i < BRCMS_MAXMODULES; i++) {
if (wlc->modulecb[i].watchdog_fn)
wlc->modulecb[i].watchdog_fn(wlc->modulecb[i].hdl);
}
- if (WLCISNPHY(wlc->band) && !wlc->pub->tempsense_disable &&
+ if (BRCMS_ISNPHY(wlc->band) && !wlc->pub->tempsense_disable &&
((wlc->pub->now - wlc->tempsense_lasttime) >=
- WLC_TEMPSENSE_PERIOD)) {
+ BRCMS_TEMPSENSE_PERIOD)) {
wlc->tempsense_lasttime = wlc->pub->now;
- wlc_tempsense_upd(wlc);
+ brcms_c_tempsense_upd(wlc);
}
}
/* make interface operational */
-int wlc_up(struct wlc_info *wlc)
+int brcms_c_up(struct brcms_c_info *wlc)
{
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
@@ -2271,7 +2020,7 @@ int wlc_up(struct wlc_info *wlc)
return -ENOMEDIUM;
if (!wlc->pub->hw_up) {
- wlc_bmac_hw_up(wlc->hw);
+ brcms_b_hw_up(wlc->hw);
wlc->pub->hw_up = true;
}
@@ -2279,11 +2028,11 @@ int wlc_up(struct wlc_info *wlc)
&& (wlc->pub->sih->chip == BCM4313_CHIP_ID)) {
if (wlc->pub->boardrev >= 0x1250
&& (wlc->pub->boardflags & BFL_FEM_BT)) {
- wlc_mhf(wlc, MHF5, MHF5_4313_GPIOCTRL,
- MHF5_4313_GPIOCTRL, WLC_BAND_ALL);
+ brcms_c_mhf(wlc, MHF5, MHF5_4313_GPIOCTRL,
+ MHF5_4313_GPIOCTRL, BRCM_BAND_ALL);
} else {
- wlc_mhf(wlc, MHF4, MHF4_EXTPA_ENABLE, MHF4_EXTPA_ENABLE,
- WLC_BAND_ALL);
+ brcms_c_mhf(wlc, MHF4, MHF4_EXTPA_ENABLE,
+ MHF4_EXTPA_ENABLE, BRCM_BAND_ALL);
}
}
@@ -2291,17 +2040,17 @@ int wlc_up(struct wlc_info *wlc)
* Need to read the hwradio status here to cover the case where the system
* is loaded with the hw radio disabled. We do not want to bring the driver up in this case.
* if radio is disabled, abort up, lower power, start radio timer and return 0(for NDIS)
- * don't call radio_update to avoid looping wlc_up.
+ * don't call radio_update to avoid looping brcms_c_up.
*
- * wlc_bmac_up_prep() returns either 0 or -BCME_RADIOOFF only
+ * brcms_b_up_prep() returns either 0 or -BCME_RADIOOFF only
*/
if (!wlc->pub->radio_disabled) {
- int status = wlc_bmac_up_prep(wlc->hw);
+ int status = brcms_b_up_prep(wlc->hw);
if (status == -ENOMEDIUM) {
if (!mboolisset
(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE)) {
int idx;
- struct wlc_bsscfg *bsscfg;
+ struct brcms_bss_cfg *bsscfg;
mboolset(wlc->pub->radio_disabled,
WL_RADIO_HW_DISABLE);
@@ -2309,9 +2058,9 @@ int wlc_up(struct wlc_info *wlc)
if (!BSSCFG_STA(bsscfg)
|| !bsscfg->enable || !bsscfg->BSS)
continue;
- wiphy_err(wlc->wiphy, "wl%d.%d: wlc_up"
+ wiphy_err(wlc->wiphy, "wl%d.%d: up"
": rfdisable -> "
- "wlc_bsscfg_disable()\n",
+ "bsscfg_disable()\n",
wlc->pub->unit, idx);
}
}
@@ -2319,78 +2068,78 @@ int wlc_up(struct wlc_info *wlc)
}
if (wlc->pub->radio_disabled) {
- wlc_radio_monitor_start(wlc);
+ brcms_c_radio_monitor_start(wlc);
return 0;
}
- /* wlc_bmac_up_prep has done wlc_corereset(). so clk is on, set it */
+ /* brcms_b_up_prep has done brcms_c_corereset(). so clk is on, set it */
wlc->clk = true;
- wlc_radio_monitor_stop(wlc);
+ brcms_c_radio_monitor_stop(wlc);
/* Set EDCF hostflags */
if (EDCF_ENAB(wlc->pub)) {
- wlc_mhf(wlc, MHF1, MHF1_EDCF, MHF1_EDCF, WLC_BAND_ALL);
+ brcms_c_mhf(wlc, MHF1, MHF1_EDCF, MHF1_EDCF, BRCM_BAND_ALL);
} else {
- wlc_mhf(wlc, MHF1, MHF1_EDCF, 0, WLC_BAND_ALL);
+ brcms_c_mhf(wlc, MHF1, MHF1_EDCF, 0, BRCM_BAND_ALL);
}
- if (WLC_WAR16165(wlc))
- wlc_mhf(wlc, MHF2, MHF2_PCISLOWCLKWAR, MHF2_PCISLOWCLKWAR,
- WLC_BAND_ALL);
+ if (BRCMS_WAR16165(wlc))
+ brcms_c_mhf(wlc, MHF2, MHF2_PCISLOWCLKWAR, MHF2_PCISLOWCLKWAR,
+ BRCM_BAND_ALL);
- wl_init(wlc->wl);
+ brcms_init(wlc->wl);
wlc->pub->up = true;
if (wlc->bandinit_pending) {
- wlc_suspend_mac_and_wait(wlc);
- wlc_set_chanspec(wlc, wlc->default_bss->chanspec);
+ brcms_c_suspend_mac_and_wait(wlc);
+ brcms_c_set_chanspec(wlc, wlc->default_bss->chanspec);
wlc->bandinit_pending = false;
- wlc_enable_mac(wlc);
+ brcms_c_enable_mac(wlc);
}
- wlc_bmac_up_finish(wlc->hw);
+ brcms_b_up_finish(wlc->hw);
/* other software states up after ISR is running */
/* start APs that were to be brought up but are not up yet */
- /* if (AP_ENAB(wlc->pub)) wlc_restart_ap(wlc->ap); */
+ /* if (AP_ENAB(wlc->pub)) brcms_c_restart_ap(wlc->ap); */
/* Program the TX wme params with the current settings */
- wlc_wme_retries_write(wlc);
+ brcms_c_wme_retries_write(wlc);
/* start one second watchdog timer */
- wl_add_timer(wlc->wl, wlc->wdtimer, TIMER_INTERVAL_WATCHDOG, true);
+ brcms_add_timer(wlc->wl, wlc->wdtimer, TIMER_INTERVAL_WATCHDOG, true);
wlc->WDarmed = true;
/* ensure antenna config is up to date */
- wlc_stf_phy_txant_upd(wlc);
+ brcms_c_stf_phy_txant_upd(wlc);
/* ensure LDPC config is in sync */
- wlc_ht_update_ldpc(wlc, wlc->stf->ldpc);
+ brcms_c_ht_update_ldpc(wlc, wlc->stf->ldpc);
return 0;
}
/* Initialize the base precedence map for dequeueing from txq based on WME settings */
-static void wlc_tx_prec_map_init(struct wlc_info *wlc)
+static void brcms_c_tx_prec_map_init(struct brcms_c_info *wlc)
{
- wlc->tx_prec_map = WLC_PREC_BMP_ALL;
+ wlc->tx_prec_map = BRCMS_PREC_BMP_ALL;
memset(wlc->fifo2prec_map, 0, NFIFO * sizeof(u16));
/* For non-WME, both fifos have overlapping MAXPRIO. So just disable all precedences
* if either is full.
*/
if (!EDCF_ENAB(wlc->pub)) {
- wlc->fifo2prec_map[TX_DATA_FIFO] = WLC_PREC_BMP_ALL;
- wlc->fifo2prec_map[TX_CTL_FIFO] = WLC_PREC_BMP_ALL;
+ wlc->fifo2prec_map[TX_DATA_FIFO] = BRCMS_PREC_BMP_ALL;
+ wlc->fifo2prec_map[TX_CTL_FIFO] = BRCMS_PREC_BMP_ALL;
} else {
- wlc->fifo2prec_map[TX_AC_BK_FIFO] = WLC_PREC_BMP_AC_BK;
- wlc->fifo2prec_map[TX_AC_BE_FIFO] = WLC_PREC_BMP_AC_BE;
- wlc->fifo2prec_map[TX_AC_VI_FIFO] = WLC_PREC_BMP_AC_VI;
- wlc->fifo2prec_map[TX_AC_VO_FIFO] = WLC_PREC_BMP_AC_VO;
+ wlc->fifo2prec_map[TX_AC_BK_FIFO] = BRCMS_PREC_BMP_AC_BK;
+ wlc->fifo2prec_map[TX_AC_BE_FIFO] = BRCMS_PREC_BMP_AC_BE;
+ wlc->fifo2prec_map[TX_AC_VI_FIFO] = BRCMS_PREC_BMP_AC_VI;
+ wlc->fifo2prec_map[TX_AC_VO_FIFO] = BRCMS_PREC_BMP_AC_VO;
}
}
-static uint wlc_down_del_timer(struct wlc_info *wlc)
+static uint brcms_c_down_del_timer(struct brcms_c_info *wlc)
{
uint callbacks = 0;
@@ -2402,13 +2151,13 @@ static uint wlc_down_del_timer(struct wlc_info *wlc)
* disable the hardware, free any transient buffer state.
* Return a count of the number of driver callbacks still pending.
*/
-uint wlc_down(struct wlc_info *wlc)
+uint brcms_c_down(struct brcms_c_info *wlc)
{
uint callbacks = 0;
int i;
bool dev_gone = false;
- struct wlc_txq_info *qi;
+ struct brcms_txq_info *qi;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
@@ -2424,12 +2173,12 @@ uint wlc_down(struct wlc_info *wlc)
/* in between, mpc could try to bring down again.. */
wlc->going_down = true;
- callbacks += wlc_bmac_down_prep(wlc->hw);
+ callbacks += brcms_b_bmac_down_prep(wlc->hw);
dev_gone = DEVICEREMOVED(wlc);
/* Call any registered down handlers */
- for (i = 0; i < WLC_MAXMODULES; i++) {
+ for (i = 0; i < BRCMS_MAXMODULES; i++) {
if (wlc->modulecb[i].down_fn)
callbacks +=
wlc->modulecb[i].down_fn(wlc->modulecb[i].hdl);
@@ -2437,28 +2186,28 @@ uint wlc_down(struct wlc_info *wlc)
/* cancel the watchdog timer */
if (wlc->WDarmed) {
- if (!wl_del_timer(wlc->wl, wlc->wdtimer))
+ if (!brcms_del_timer(wlc->wl, wlc->wdtimer))
callbacks++;
wlc->WDarmed = false;
}
/* cancel all other timers */
- callbacks += wlc_down_del_timer(wlc);
+ callbacks += brcms_c_down_del_timer(wlc);
wlc->pub->up = false;
wlc_phy_mute_upd(wlc->band->pi, false, PHY_MUTE_ALL);
/* clear txq flow control */
- wlc_txflowcontrol_reset(wlc);
+ brcms_c_txflowcontrol_reset(wlc);
/* flush tx queues */
for (qi = wlc->tx_queues; qi != NULL; qi = qi->next) {
- bcm_pktq_flush(&qi->q, true, NULL, NULL);
+ brcmu_pktq_flush(&qi->q, true, NULL, NULL);
}
- callbacks += wlc_bmac_down_finish(wlc->hw);
+ callbacks += brcms_b_down_finish(wlc->hw);
- /* wlc_bmac_down_finish has done wlc_coredisable(). so clk is off */
+ /* brcms_b_down_finish has done brcms_c_coredisable(). so clk is off */
wlc->clk = false;
wlc->going_down = false;
@@ -2466,22 +2215,23 @@ uint wlc_down(struct wlc_info *wlc)
}
/* Set the current gmode configuration */
-int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config)
+int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
{
int ret = 0;
uint i;
wlc_rateset_t rs;
/* Default to 54g Auto */
- s8 shortslot = WLC_SHORTSLOT_AUTO; /* Advertise and use shortslot (-1/0/1 Auto/Off/On) */
+ /* Advertise and use shortslot (-1/0/1 Auto/Off/On) */
+ s8 shortslot = BRCMS_SHORTSLOT_AUTO;
bool shortslot_restrict = false; /* Restrict association to stations that support shortslot
*/
- bool ignore_bcns = true; /* Ignore legacy beacons on the same channel */
bool ofdm_basic = false; /* Make 6, 12, and 24 basic rates */
- int preamble = WLC_PLCP_LONG; /* Advertise and use short preambles (-1/0/1 Auto/Off/On) */
+ /* Advertise and use short preambles (-1/0/1 Auto/Off/On) */
+ int preamble = BRCMS_PLCP_LONG;
bool preamble_restrict = false; /* Restrict association to stations that support short
* preambles
*/
- struct wlcband *band;
+ struct brcms_band *band;
/* if N-support is enabled, allow Gmode set as long as requested
* Gmode is not GMODE_LEGACY_B
@@ -2490,22 +2240,22 @@ int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config)
return -ENOTSUPP;
/* verify that we are dealing with 2G band and grab the band pointer */
- if (wlc->band->bandtype == WLC_BAND_2G)
+ if (wlc->band->bandtype == BRCM_BAND_2G)
band = wlc->band;
else if ((NBANDS(wlc) > 1) &&
- (wlc->bandstate[OTHERBANDUNIT(wlc)]->bandtype == WLC_BAND_2G))
+ (wlc->bandstate[OTHERBANDUNIT(wlc)]->bandtype == BRCM_BAND_2G))
band = wlc->bandstate[OTHERBANDUNIT(wlc)];
else
return -EINVAL;
/* Legacy or bust when no OFDM is supported by regulatory */
- if ((wlc_channel_locale_flags_in_band(wlc->cmi, band->bandunit) &
- WLC_NO_OFDM) && (gmode != GMODE_LEGACY_B))
+ if ((brcms_c_channel_locale_flags_in_band(wlc->cmi, band->bandunit) &
+ BRCMS_NO_OFDM) && (gmode != GMODE_LEGACY_B))
return -EINVAL;
/* update configuration value */
if (config == true)
- wlc_protection_upd(wlc, WLC_PROT_G_USER, gmode);
+ brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
/* Clear supported rates filter */
memset(&wlc->sup_rates_override, 0, sizeof(wlc_rateset_t));
@@ -2515,14 +2265,15 @@ int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config)
switch (gmode) {
case GMODE_LEGACY_B:
- shortslot = WLC_SHORTSLOT_OFF;
- wlc_rateset_copy(&gphy_legacy_rates, &rs);
+ shortslot = BRCMS_SHORTSLOT_OFF;
+ brcms_c_rateset_copy(&gphy_legacy_rates, &rs);
break;
case GMODE_LRS:
if (AP_ENAB(wlc->pub))
- wlc_rateset_copy(&cck_rates, &wlc->sup_rates_override);
+ brcms_c_rateset_copy(&cck_rates,
+ &wlc->sup_rates_override);
break;
case GMODE_AUTO:
@@ -2531,19 +2282,19 @@ int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config)
case GMODE_ONLY:
ofdm_basic = true;
- preamble = WLC_PLCP_SHORT;
+ preamble = BRCMS_PLCP_SHORT;
preamble_restrict = true;
break;
case GMODE_PERFORMANCE:
if (AP_ENAB(wlc->pub)) /* Put all rates into the Supported Rates element */
- wlc_rateset_copy(&cck_ofdm_rates,
+ brcms_c_rateset_copy(&cck_ofdm_rates,
&wlc->sup_rates_override);
- shortslot = WLC_SHORTSLOT_ON;
+ shortslot = BRCMS_SHORTSLOT_ON;
shortslot_restrict = true;
ofdm_basic = true;
- preamble = WLC_PLCP_SHORT;
+ preamble = BRCMS_PLCP_SHORT;
preamble_restrict = true;
break;
@@ -2562,7 +2313,7 @@ int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config)
band->gmode = gmode;
if (band->rspec_override && !IS_CCK(band->rspec_override)) {
band->rspec_override = 0;
- wlc_reprate_init(wlc);
+ brcms_c_reprate_init(wlc);
}
if (band->mrspec_override && !IS_CCK(band->mrspec_override)) {
band->mrspec_override = 0;
@@ -2571,26 +2322,24 @@ int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config)
band->gmode = gmode;
- wlc->ignore_bcns = ignore_bcns;
-
wlc->shortslot_override = shortslot;
if (AP_ENAB(wlc->pub)) {
/* wlc->ap->shortslot_restrict = shortslot_restrict; */
wlc->PLCPHdr_override =
(preamble !=
- WLC_PLCP_LONG) ? WLC_PLCP_SHORT : WLC_PLCP_AUTO;
+ BRCMS_PLCP_LONG) ? BRCMS_PLCP_SHORT : BRCMS_PLCP_AUTO;
}
- if ((AP_ENAB(wlc->pub) && preamble != WLC_PLCP_LONG)
- || preamble == WLC_PLCP_SHORT)
+ if ((AP_ENAB(wlc->pub) && preamble != BRCMS_PLCP_LONG)
+ || preamble == BRCMS_PLCP_SHORT)
wlc->default_bss->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
else
wlc->default_bss->capability &= ~WLAN_CAPABILITY_SHORT_PREAMBLE;
/* Update shortslot capability bit for AP and IBSS */
- if ((AP_ENAB(wlc->pub) && shortslot == WLC_SHORTSLOT_AUTO) ||
- shortslot == WLC_SHORTSLOT_ON)
+ if ((AP_ENAB(wlc->pub) && shortslot == BRCMS_SHORTSLOT_AUTO) ||
+ shortslot == BRCMS_SHORTSLOT_ON)
wlc->default_bss->capability |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
else
wlc->default_bss->capability &=
@@ -2598,26 +2347,26 @@ int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config)
/* Use the default 11g rateset */
if (!rs.count)
- wlc_rateset_copy(&cck_ofdm_rates, &rs);
+ brcms_c_rateset_copy(&cck_ofdm_rates, &rs);
if (ofdm_basic) {
for (i = 0; i < rs.count; i++) {
- if (rs.rates[i] == WLC_RATE_6M
- || rs.rates[i] == WLC_RATE_12M
- || rs.rates[i] == WLC_RATE_24M)
- rs.rates[i] |= WLC_RATE_FLAG;
+ if (rs.rates[i] == BRCM_RATE_6M
+ || rs.rates[i] == BRCM_RATE_12M
+ || rs.rates[i] == BRCM_RATE_24M)
+ rs.rates[i] |= BRCMS_RATE_FLAG;
}
}
/* Set default bss rateset */
wlc->default_bss->rateset.count = rs.count;
- memcpy(wlc->default_bss->rateset.rates, rs.rates,
+ memcpy(wlc->default_bss->rateset.rates, rs.rates,
sizeof(wlc->default_bss->rateset.rates));
return ret;
}
-static int wlc_nmode_validate(struct wlc_info *wlc, s32 nmode)
+static int brcms_c_nmode_validate(struct brcms_c_info *wlc, s32 nmode)
{
int err = 0;
@@ -2629,7 +2378,7 @@ static int wlc_nmode_validate(struct wlc_info *wlc, s32 nmode)
case AUTO:
case WL_11N_2x2:
case WL_11N_3x3:
- if (!(WLC_PHY_11N_CAP(wlc->band)))
+ if (!(BRCMS_PHY_11N_CAP(wlc->band)))
err = -EINVAL;
break;
@@ -2641,27 +2390,27 @@ static int wlc_nmode_validate(struct wlc_info *wlc, s32 nmode)
return err;
}
-int wlc_set_nmode(struct wlc_info *wlc, s32 nmode)
+int brcms_c_set_nmode(struct brcms_c_info *wlc, s32 nmode)
{
uint i;
int err;
- err = wlc_nmode_validate(wlc, nmode);
+ err = brcms_c_nmode_validate(wlc, nmode);
if (err)
return err;
switch (nmode) {
case OFF:
wlc->pub->_n_enab = OFF;
- wlc->default_bss->flags &= ~WLC_BSS_HT;
+ wlc->default_bss->flags &= ~BRCMS_BSS_HT;
/* delete the mcs rates from the default and hw ratesets */
- wlc_rateset_mcs_clear(&wlc->default_bss->rateset);
+ brcms_c_rateset_mcs_clear(&wlc->default_bss->rateset);
for (i = 0; i < NBANDS(wlc); i++) {
memset(wlc->bandstate[i]->hw_rateset.mcs, 0,
MCSSET_LEN);
if (IS_MCS(wlc->band->rspec_override)) {
wlc->bandstate[i]->rspec_override = 0;
- wlc_reprate_init(wlc);
+ brcms_c_reprate_init(wlc);
}
if (IS_MCS(wlc->band->mrspec_override))
wlc->bandstate[i]->mrspec_override = 0;
@@ -2676,14 +2425,14 @@ int wlc_set_nmode(struct wlc_info *wlc, s32 nmode)
case WL_11N_2x2:
case WL_11N_3x3:
/* force GMODE_AUTO if NMODE is ON */
- wlc_set_gmode(wlc, GMODE_AUTO, true);
+ brcms_c_set_gmode(wlc, GMODE_AUTO, true);
if (nmode == WL_11N_3x3)
wlc->pub->_n_enab = SUPPORT_HT;
else
wlc->pub->_n_enab = SUPPORT_11N;
- wlc->default_bss->flags |= WLC_BSS_HT;
+ wlc->default_bss->flags |= BRCMS_BSS_HT;
/* add the mcs rates to the default and hw ratesets */
- wlc_rateset_mcs_build(&wlc->default_bss->rateset,
+ brcms_c_rateset_mcs_build(&wlc->default_bss->rateset,
wlc->stf->txstreams);
for (i = 0; i < NBANDS(wlc); i++)
memcpy(wlc->bandstate[i]->hw_rateset.mcs,
@@ -2697,7 +2446,7 @@ int wlc_set_nmode(struct wlc_info *wlc, s32 nmode)
return err;
}
-static int wlc_set_rateset(struct wlc_info *wlc, wlc_rateset_t *rs_arg)
+static int brcms_c_set_rateset(struct brcms_c_info *wlc, wlc_rateset_t *rs_arg)
{
wlc_rateset_t rs, new;
uint bandunit;
@@ -2705,13 +2454,13 @@ static int wlc_set_rateset(struct wlc_info *wlc, wlc_rateset_t *rs_arg)
memcpy(&rs, rs_arg, sizeof(wlc_rateset_t));
/* check for bad count value */
- if ((rs.count == 0) || (rs.count > WLC_NUMRATES))
+ if ((rs.count == 0) || (rs.count > BRCMS_NUMRATES))
return -EINVAL;
/* try the current band */
bandunit = wlc->band->bandunit;
memcpy(&new, &rs, sizeof(wlc_rateset_t));
- if (wlc_rate_hwrs_filter_sort_validate
+ if (brcms_c_rate_hwrs_filter_sort_validate
(&new, &wlc->bandstate[bandunit]->hw_rateset, true,
wlc->stf->txstreams))
goto good;
@@ -2720,7 +2469,7 @@ static int wlc_set_rateset(struct wlc_info *wlc, wlc_rateset_t *rs_arg)
if (IS_MBAND_UNLOCKED(wlc)) {
bandunit = OTHERBANDUNIT(wlc);
memcpy(&new, &rs, sizeof(wlc_rateset_t));
- if (wlc_rate_hwrs_filter_sort_validate(&new,
+ if (brcms_c_rate_hwrs_filter_sort_validate(&new,
&wlc->
bandstate[bandunit]->
hw_rateset, true,
@@ -2739,18 +2488,18 @@ static int wlc_set_rateset(struct wlc_info *wlc, wlc_rateset_t *rs_arg)
}
/* simplified integer set interface for common ioctl handler */
-int wlc_set(struct wlc_info *wlc, int cmd, int arg)
+int brcms_c_set(struct brcms_c_info *wlc, int cmd, int arg)
{
- return wlc_ioctl(wlc, cmd, (void *)&arg, sizeof(arg), NULL);
+ return brcms_c_ioctl(wlc, cmd, (void *)&arg, sizeof(arg), NULL);
}
/* simplified integer get interface for common ioctl handler */
-int wlc_get(struct wlc_info *wlc, int cmd, int *arg)
+int brcms_c_get(struct brcms_c_info *wlc, int cmd, int *arg)
{
- return wlc_ioctl(wlc, cmd, arg, sizeof(int), NULL);
+ return brcms_c_ioctl(wlc, cmd, arg, sizeof(int), NULL);
}
-static void wlc_ofdm_rateset_war(struct wlc_info *wlc)
+static void brcms_c_ofdm_rateset_war(struct brcms_c_info *wlc)
{
u8 r;
bool war = false;
@@ -2766,28 +2515,25 @@ static void wlc_ofdm_rateset_war(struct wlc_info *wlc)
}
int
-wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
- struct wlc_if *wlcif)
+brcms_c_ioctl(struct brcms_c_info *wlc, int cmd, void *arg, int len,
+ struct brcms_c_if *wlcif)
{
- return _wlc_ioctl(wlc, cmd, arg, len, wlcif);
+ return _brcms_c_ioctl(wlc, cmd, arg, len, wlcif);
}
/* common ioctl handler. return: 0=ok, -1=error, positive=particular error */
static int
-_wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
- struct wlc_if *wlcif)
+_brcms_c_ioctl(struct brcms_c_info *wlc, int cmd, void *arg, int len,
+ struct brcms_c_if *wlcif)
{
int val, *pval;
bool bool_val;
int bcmerror;
- d11regs_t *regs;
- uint i;
struct scb *nextscb;
bool ta_ok;
uint band;
- rw_reg_t *r;
- struct wlc_bsscfg *bsscfg;
- wlc_bss_info_t *current_bss;
+ struct brcms_bss_cfg *bsscfg;
+ struct brcms_bss_info *current_bss;
/* update bsscfg pointer */
bsscfg = wlc->cfg;
@@ -2797,18 +2543,17 @@ _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
nextscb = NULL;
ta_ok = false;
band = 0;
- r = NULL;
/* If the device is turned off, then it's not "removed" */
if (!wlc->pub->hw_off && DEVICEREMOVED(wlc)) {
wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit,
__func__);
- wl_down(wlc->wl);
+ brcms_down(wlc->wl);
return -EBADE;
}
/* default argument is generic integer */
- pval = arg ? (int *)arg:NULL;
+ pval = arg ? (int *)arg : NULL;
/* This will prevent the misaligned access */
if (pval && (u32) len >= sizeof(val))
@@ -2819,64 +2564,17 @@ _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
/* bool conversion to avoid duplication below */
bool_val = val != 0;
bcmerror = 0;
- regs = wlc->regs;
-
- /* A few commands don't need any arguments; all the others do. */
- switch (cmd) {
- case WLC_UP:
- case WLC_OUT:
- case WLC_DOWN:
- case WLC_DISASSOC:
- case WLC_RESTART:
- case WLC_REBOOT:
- case WLC_START_CHANNEL_QA:
- case WLC_INIT:
- break;
- default:
- if ((arg == NULL) || (len <= 0)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: Command %d needs "
- "arguments\n",
- wlc->pub->unit, __func__, cmd);
- bcmerror = -EINVAL;
- goto done;
- }
+ if ((arg == NULL) || (len <= 0)) {
+ wiphy_err(wlc->wiphy, "wl%d: %s: Command %d needs arguments\n",
+ wlc->pub->unit, __func__, cmd);
+ bcmerror = -EINVAL;
+ goto done;
}
switch (cmd) {
-#if defined(BCMDBG)
- case WLC_GET_MSGLEVEL:
- *pval = wl_msg_level;
- break;
-
- case WLC_SET_MSGLEVEL:
- wl_msg_level = val;
- break;
-#endif
-
- case WLC_GET_INSTANCE:
- *pval = wlc->pub->unit;
- break;
-
- case WLC_GET_CHANNEL:{
- channel_info_t *ci = (channel_info_t *) arg;
-
- if (len <= (int)sizeof(ci)) {
- bcmerror = EOVERFLOW;
- goto done;
- }
-
- ci->hw_channel =
- CHSPEC_CHANNEL(WLC_BAND_PI_RADIO_CHANSPEC);
- ci->target_channel =
- CHSPEC_CHANNEL(wlc->default_bss->chanspec);
- ci->scan_channel = 0;
-
- break;
- }
-
- case WLC_SET_CHANNEL:{
+ case BRCM_SET_CHANNEL:{
chanspec_t chspec = CH20MHZ_CHSPEC(val);
if (val < 0 || val > MAXCHANNEL) {
@@ -2884,486 +2582,63 @@ _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
break;
}
- if (!wlc_valid_chanspec_db(wlc->cmi, chspec)) {
+ if (!brcms_c_valid_chanspec_db(wlc->cmi, chspec)) {
bcmerror = -EINVAL;
break;
}
if (!wlc->pub->up && IS_MBAND_UNLOCKED(wlc)) {
if (wlc->band->bandunit !=
- CHSPEC_WLCBANDUNIT(chspec))
+ CHSPEC_BANDUNIT(chspec))
wlc->bandinit_pending = true;
else
wlc->bandinit_pending = false;
}
wlc->default_bss->chanspec = chspec;
- /* wlc_BSSinit() will sanitize the rateset before using it.. */
+ /* brcms_c_BSSinit() will sanitize the rateset before
+ * using it.. */
if (wlc->pub->up &&
- (WLC_BAND_PI_RADIO_CHANSPEC != chspec)) {
- wlc_set_home_chanspec(wlc, chspec);
- wlc_suspend_mac_and_wait(wlc);
- wlc_set_chanspec(wlc, chspec);
- wlc_enable_mac(wlc);
- }
- break;
- }
-
-#if defined(BCMDBG)
- case WLC_GET_UCFLAGS:
- if (!wlc->pub->up) {
- bcmerror = -ENOLINK;
- break;
- }
-
- /* optional band is stored in the second integer of incoming buffer */
- band =
- (len <
- (int)(2 * sizeof(int))) ? WLC_BAND_AUTO : ((int *)arg)[1];
-
- /* bcmerror checking */
- bcmerror = wlc_iocregchk(wlc, band);
- if (bcmerror)
- break;
-
- if (val >= MHFMAX) {
- bcmerror = -EINVAL;
- break;
- }
-
- *pval = wlc_bmac_mhf_get(wlc->hw, (u8) val, WLC_BAND_AUTO);
- break;
-
- case WLC_SET_UCFLAGS:
- if (!wlc->pub->up) {
- bcmerror = -ENOLINK;
- break;
- }
-
- /* optional band is stored in the second integer of incoming buffer */
- band =
- (len <
- (int)(2 * sizeof(int))) ? WLC_BAND_AUTO : ((int *)arg)[1];
-
- /* bcmerror checking */
- bcmerror = wlc_iocregchk(wlc, band);
- if (bcmerror)
- break;
-
- i = (u16) val;
- if (i >= MHFMAX) {
- bcmerror = -EINVAL;
- break;
- }
-
- wlc_mhf(wlc, (u8) i, 0xffff, (u16) (val >> NBITS(u16)),
- WLC_BAND_AUTO);
- break;
-
- case WLC_GET_SHMEM:
- ta_ok = true;
-
- /* optional band is stored in the second integer of incoming buffer */
- band =
- (len <
- (int)(2 * sizeof(int))) ? WLC_BAND_AUTO : ((int *)arg)[1];
-
- /* bcmerror checking */
- bcmerror = wlc_iocregchk(wlc, band);
- if (bcmerror)
- break;
-
- if (val & 1) {
- bcmerror = -EINVAL;
- break;
- }
-
- *pval = wlc_read_shm(wlc, (u16) val);
- break;
-
- case WLC_SET_SHMEM:
- ta_ok = true;
-
- /* optional band is stored in the second integer of incoming buffer */
- band =
- (len <
- (int)(2 * sizeof(int))) ? WLC_BAND_AUTO : ((int *)arg)[1];
-
- /* bcmerror checking */
- bcmerror = wlc_iocregchk(wlc, band);
- if (bcmerror)
- break;
-
- if (val & 1) {
- bcmerror = -EINVAL;
- break;
- }
-
- wlc_write_shm(wlc, (u16) val,
- (u16) (val >> NBITS(u16)));
- break;
-
- case WLC_R_REG: /* MAC registers */
- ta_ok = true;
- r = (rw_reg_t *) arg;
- band = WLC_BAND_AUTO;
-
- if (len < (int)(sizeof(rw_reg_t) - sizeof(uint))) {
- bcmerror = -EOVERFLOW;
- break;
- }
-
- if (len >= (int)sizeof(rw_reg_t))
- band = r->band;
-
- /* bcmerror checking */
- bcmerror = wlc_iocregchk(wlc, band);
- if (bcmerror)
- break;
-
- if ((r->byteoff + r->size) > sizeof(d11regs_t)) {
- bcmerror = -EINVAL;
- break;
- }
- if (r->size == sizeof(u32))
- r->val =
- R_REG((u32 *)((unsigned char *)(unsigned long)regs +
- r->byteoff));
- else if (r->size == sizeof(u16))
- r->val =
- R_REG((u16 *)((unsigned char *)(unsigned long)regs +
- r->byteoff));
- else
- bcmerror = -EINVAL;
- break;
-
- case WLC_W_REG:
- ta_ok = true;
- r = (rw_reg_t *) arg;
- band = WLC_BAND_AUTO;
-
- if (len < (int)(sizeof(rw_reg_t) - sizeof(uint))) {
- bcmerror = -EOVERFLOW;
- break;
- }
-
- if (len >= (int)sizeof(rw_reg_t))
- band = r->band;
-
- /* bcmerror checking */
- bcmerror = wlc_iocregchk(wlc, band);
- if (bcmerror)
- break;
-
- if (r->byteoff + r->size > sizeof(d11regs_t)) {
- bcmerror = -EINVAL;
- break;
- }
- if (r->size == sizeof(u32))
- W_REG((u32 *)((unsigned char *)(unsigned long) regs +
- r->byteoff), r->val);
- else if (r->size == sizeof(u16))
- W_REG((u16 *)((unsigned char *)(unsigned long) regs +
- r->byteoff), r->val);
- else
- bcmerror = -EINVAL;
- break;
-#endif /* BCMDBG */
-
- case WLC_GET_TXANT:
- *pval = wlc->stf->txant;
- break;
-
- case WLC_SET_TXANT:
- bcmerror = wlc_stf_ant_txant_validate(wlc, (s8) val);
- if (bcmerror < 0)
- break;
-
- wlc->stf->txant = (s8) val;
-
- /* if down, we are done */
- if (!wlc->pub->up)
- break;
-
- wlc_suspend_mac_and_wait(wlc);
-
- wlc_stf_phy_txant_upd(wlc);
- wlc_beacon_phytxctl_txant_upd(wlc, wlc->bcn_rspec);
-
- wlc_enable_mac(wlc);
-
- break;
-
- case WLC_GET_ANTDIV:{
- u8 phy_antdiv;
-
- /* return configured value if core is down */
- if (!wlc->pub->up) {
- *pval = wlc->stf->ant_rx_ovr;
-
- } else {
- if (wlc_phy_ant_rxdiv_get
- (wlc->band->pi, &phy_antdiv))
- *pval = (int)phy_antdiv;
- else
- *pval = (int)wlc->stf->ant_rx_ovr;
- }
-
- break;
- }
- case WLC_SET_ANTDIV:
- /* values are -1=driver default, 0=force0, 1=force1, 2=start1, 3=start0 */
- if ((val < -1) || (val > 3)) {
- bcmerror = -EINVAL;
- break;
- }
-
- if (val == -1)
- val = ANT_RX_DIV_DEF;
-
- wlc->stf->ant_rx_ovr = (u8) val;
- wlc_phy_ant_rxdiv_set(wlc->band->pi, (u8) val);
- break;
-
- case WLC_GET_RX_ANT:{ /* get latest used rx antenna */
- u16 rxstatus;
-
- if (!wlc->pub->up) {
- bcmerror = -ENOLINK;
- break;
- }
-
- rxstatus = R_REG(&wlc->regs->phyrxstatus0);
- if (rxstatus == 0xdead || rxstatus == (u16) -1) {
- bcmerror = -EBADE;
- break;
- }
- *pval = (rxstatus & PRXS0_RXANT_UPSUBBAND) ? 1 : 0;
- break;
- }
-
-#if defined(BCMDBG)
- case WLC_GET_UCANTDIV:
- if (!wlc->clk) {
- bcmerror = -EIO;
- break;
- }
-
- *pval =
- (wlc_bmac_mhf_get(wlc->hw, MHF1, WLC_BAND_AUTO) &
- MHF1_ANTDIV);
- break;
-
- case WLC_SET_UCANTDIV:{
- if (!wlc->pub->up) {
- bcmerror = -ENOLINK;
- break;
- }
-
- /* if multiband, band must be locked */
- if (IS_MBAND_UNLOCKED(wlc)) {
- bcmerror = -ENOMEDIUM;
- break;
+ (BRCMS_BAND_PI_RADIO_CHANSPEC != chspec)) {
+ brcms_c_set_home_chanspec(wlc, chspec);
+ brcms_c_suspend_mac_and_wait(wlc);
+ brcms_c_set_chanspec(wlc, chspec);
+ brcms_c_enable_mac(wlc);
}
-
- wlc_mhf(wlc, MHF1, MHF1_ANTDIV,
- (val ? MHF1_ANTDIV : 0), WLC_BAND_AUTO);
break;
}
-#endif /* defined(BCMDBG) */
- case WLC_GET_SRL:
- *pval = wlc->SRL;
- break;
-
- case WLC_SET_SRL:
+ case BRCM_SET_SRL:
if (val >= 1 && val <= RETRY_SHORT_MAX) {
int ac;
wlc->SRL = (u16) val;
- wlc_bmac_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
+ brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
for (ac = 0; ac < AC_COUNT; ac++) {
- WLC_WME_RETRY_SHORT_SET(wlc, ac, wlc->SRL);
+ BRCMS_WME_RETRY_SHORT_SET(wlc, ac, wlc->SRL);
}
- wlc_wme_retries_write(wlc);
+ brcms_c_wme_retries_write(wlc);
} else
bcmerror = -EINVAL;
break;
- case WLC_GET_LRL:
- *pval = wlc->LRL;
- break;
-
- case WLC_SET_LRL:
+ case BRCM_SET_LRL:
if (val >= 1 && val <= 255) {
int ac;
wlc->LRL = (u16) val;
- wlc_bmac_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
+ brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
for (ac = 0; ac < AC_COUNT; ac++) {
- WLC_WME_RETRY_LONG_SET(wlc, ac, wlc->LRL);
- }
- wlc_wme_retries_write(wlc);
- } else
- bcmerror = -EINVAL;
- break;
-
- case WLC_GET_CWMIN:
- *pval = wlc->band->CWmin;
- break;
-
- case WLC_SET_CWMIN:
- if (!wlc->clk) {
- bcmerror = -EIO;
- break;
- }
-
- if (val >= 1 && val <= 255) {
- wlc_set_cwmin(wlc, (u16) val);
- } else
- bcmerror = -EINVAL;
- break;
-
- case WLC_GET_CWMAX:
- *pval = wlc->band->CWmax;
- break;
-
- case WLC_SET_CWMAX:
- if (!wlc->clk) {
- bcmerror = -EIO;
- break;
- }
-
- if (val >= 255 && val <= 2047) {
- wlc_set_cwmax(wlc, (u16) val);
- } else
- bcmerror = -EINVAL;
- break;
-
- case WLC_GET_RADIO: /* use mask if don't want to expose some internal bits */
- *pval = wlc->pub->radio_disabled;
- break;
-
- case WLC_SET_RADIO:{ /* 32 bits input, higher 16 bits are mask, lower 16 bits are value to
- * set
- */
- u16 radiomask, radioval;
- uint validbits =
- WL_RADIO_SW_DISABLE | WL_RADIO_HW_DISABLE;
- mbool new = 0;
-
- radiomask = (val & 0xffff0000) >> 16;
- radioval = val & 0x0000ffff;
-
- if ((radiomask == 0) || (radiomask & ~validbits)
- || (radioval & ~validbits)
- || ((radioval & ~radiomask) != 0)) {
- wiphy_err(wlc->wiphy, "SET_RADIO with wrong "
- "bits 0x%x\n", val);
- bcmerror = -EINVAL;
- break;
- }
-
- new =
- (wlc->pub->radio_disabled & ~radiomask) | radioval;
- wlc->pub->radio_disabled = new;
-
- wlc_radio_hwdisable_upd(wlc);
- wlc_radio_upd(wlc);
- break;
- }
-
- case WLC_GET_PHYTYPE:
- *pval = WLC_PHYTYPE(wlc->band->phytype);
- break;
-
-#if defined(BCMDBG)
- case WLC_GET_KEY:
- if ((val >= 0) && (val < WLC_MAX_WSEC_KEYS(wlc))) {
- wl_wsec_key_t key;
-
- wsec_key_t *src_key = wlc->wsec_keys[val];
-
- if (len < (int)sizeof(key)) {
- bcmerror = -EOVERFLOW;
- break;
- }
-
- memset((char *)&key, 0, sizeof(key));
- if (src_key) {
- key.index = src_key->id;
- key.len = src_key->len;
- memcpy(key.data, src_key->data, key.len);
- key.algo = src_key->algo;
- if (WSEC_SOFTKEY(wlc, src_key, bsscfg))
- key.flags |= WL_SOFT_KEY;
- if (src_key->flags & WSEC_PRIMARY_KEY)
- key.flags |= WL_PRIMARY_KEY;
-
- memcpy(key.ea, src_key->ea, ETH_ALEN);
+ BRCMS_WME_RETRY_LONG_SET(wlc, ac, wlc->LRL);
}
-
- memcpy(arg, &key, sizeof(key));
+ brcms_c_wme_retries_write(wlc);
} else
bcmerror = -EINVAL;
break;
-#endif /* defined(BCMDBG) */
-
- case WLC_SET_KEY:
- bcmerror =
- wlc_iovar_op(wlc, "wsec_key", NULL, 0, arg, len, IOV_SET,
- wlcif);
- break;
-
- case WLC_GET_KEY_SEQ:{
- wsec_key_t *key;
-
- if (len < DOT11_WPA_KEY_RSC_LEN) {
- bcmerror = -EOVERFLOW;
- break;
- }
-
- /* Return the key's tx iv as an EAPOL sequence counter.
- * This will be used to supply the RSC value to a supplicant.
- * The format is 8 bytes, with least significant in seq[0].
- */
-
- key = WSEC_KEY(wlc, val);
- if ((val >= 0) && (val < WLC_MAX_WSEC_KEYS(wlc)) &&
- (key != NULL)) {
- u8 seq[DOT11_WPA_KEY_RSC_LEN];
- u16 lo;
- u32 hi;
- /* group keys in WPA-NONE (IBSS only, AES and TKIP) use a global TXIV */
- if ((bsscfg->WPA_auth & WPA_AUTH_NONE) &&
- is_zero_ether_addr(key->ea)) {
- lo = bsscfg->wpa_none_txiv.lo;
- hi = bsscfg->wpa_none_txiv.hi;
- } else {
- lo = key->txiv.lo;
- hi = key->txiv.hi;
- }
-
- /* format the buffer, low to high */
- seq[0] = lo & 0xff;
- seq[1] = (lo >> 8) & 0xff;
- seq[2] = hi & 0xff;
- seq[3] = (hi >> 8) & 0xff;
- seq[4] = (hi >> 16) & 0xff;
- seq[5] = (hi >> 24) & 0xff;
- seq[6] = 0;
- seq[7] = 0;
-
- memcpy(arg, seq, sizeof(seq));
- } else {
- bcmerror = -EINVAL;
- }
- break;
- }
- case WLC_GET_CURR_RATESET:{
+ case BRCM_GET_CURR_RATESET:{
wl_rateset_t *ret_rs = (wl_rateset_t *) arg;
wlc_rateset_t *rs;
@@ -3383,25 +2658,7 @@ _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
break;
}
- case WLC_GET_RATESET:{
- wlc_rateset_t rs;
- wl_rateset_t *ret_rs = (wl_rateset_t *) arg;
-
- memset(&rs, 0, sizeof(wlc_rateset_t));
- wlc_default_rateset(wlc, (wlc_rateset_t *) &rs);
-
- if (len < (int)(rs.count + sizeof(rs.count))) {
- bcmerror = -EOVERFLOW;
- break;
- }
-
- /* Copy only legacy rateset section */
- ret_rs->count = rs.count;
- memcpy(&ret_rs->rates, &rs.rates, rs.count);
- break;
- }
-
- case WLC_SET_RATESET:{
+ case BRCM_SET_RATESET:{
wlc_rateset_t rs;
wl_rateset_t *in_rs = (wl_rateset_t *) arg;
@@ -3410,7 +2667,7 @@ _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
break;
}
- if (in_rs->count > WLC_NUMRATES) {
+ if (in_rs->count > BRCMS_NUMRATES) {
bcmerror = -ENOBUFS;
break;
}
@@ -3433,149 +2690,24 @@ _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
MCSSET_LEN);
}
- bcmerror = wlc_set_rateset(wlc, &rs);
+ bcmerror = brcms_c_set_rateset(wlc, &rs);
if (!bcmerror)
- wlc_ofdm_rateset_war(wlc);
+ brcms_c_ofdm_rateset_war(wlc);
break;
}
- case WLC_GET_BCNPRD:
- if (BSSCFG_STA(bsscfg) && bsscfg->BSS && bsscfg->associated)
- *pval = current_bss->beacon_period;
- else
- *pval = wlc->default_bss->beacon_period;
- break;
-
- case WLC_SET_BCNPRD:
+ case BRCM_SET_BCNPRD:
/* range [1, 0xffff] */
if (val >= DOT11_MIN_BEACON_PERIOD
- && val <= DOT11_MAX_BEACON_PERIOD) {
+ && val <= DOT11_MAX_BEACON_PERIOD)
wlc->default_bss->beacon_period = (u16) val;
- } else
- bcmerror = -EINVAL;
- break;
-
- case WLC_GET_DTIMPRD:
- if (BSSCFG_STA(bsscfg) && bsscfg->BSS && bsscfg->associated)
- *pval = current_bss->dtim_period;
else
- *pval = wlc->default_bss->dtim_period;
- break;
-
- case WLC_SET_DTIMPRD:
- /* range [1, 0xff] */
- if (val >= DOT11_MIN_DTIM_PERIOD
- && val <= DOT11_MAX_DTIM_PERIOD) {
- wlc->default_bss->dtim_period = (u8) val;
- } else
bcmerror = -EINVAL;
break;
-#ifdef SUPPORT_PS
- case WLC_GET_PM:
- *pval = wlc->PM;
- break;
-
- case WLC_SET_PM:
- if ((val >= PM_OFF) && (val <= PM_MAX)) {
- wlc->PM = (u8) val;
- if (wlc->pub->up) {
- }
- /* Change watchdog driver to align watchdog with tbtt if possible */
- wlc_watchdog_upd(wlc, PS_ALLOWED(wlc));
- } else
- bcmerror = -EBADE;
- break;
-#endif /* SUPPORT_PS */
-
-#ifdef SUPPORT_PS
-#ifdef BCMDBG
- case WLC_GET_WAKE:
- if (AP_ENAB(wlc->pub)) {
- bcmerror = -BCME_NOTSTA;
- break;
- }
- *pval = wlc->wake;
- break;
-
- case WLC_SET_WAKE:
- if (AP_ENAB(wlc->pub)) {
- bcmerror = -BCME_NOTSTA;
- break;
- }
-
- wlc->wake = val ? true : false;
-
- /* if down, we're done */
- if (!wlc->pub->up)
- break;
-
- /* apply to the mac */
- wlc_set_ps_ctrl(wlc);
- break;
-#endif /* BCMDBG */
-#endif /* SUPPORT_PS */
-
- case WLC_GET_REVINFO:
- bcmerror = wlc_get_revision_info(wlc, arg, (uint) len);
- break;
-
- case WLC_GET_AP:
- *pval = (int)AP_ENAB(wlc->pub);
- break;
-
- case WLC_GET_ATIM:
- if (bsscfg->associated)
- *pval = (int)current_bss->atim_window;
- else
- *pval = (int)wlc->default_bss->atim_window;
- break;
-
- case WLC_SET_ATIM:
- wlc->default_bss->atim_window = (u32) val;
- break;
-
-#ifdef SUPPORT_HWKEY
- case WLC_GET_WSEC:
- bcmerror =
- wlc_iovar_op(wlc, "wsec", NULL, 0, arg, len, IOV_GET,
- wlcif);
- break;
-
- case WLC_SET_WSEC:
- bcmerror =
- wlc_iovar_op(wlc, "wsec", NULL, 0, arg, len, IOV_SET,
- wlcif);
- break;
-
- case WLC_GET_WPA_AUTH:
- *pval = (int)bsscfg->WPA_auth;
- break;
-
- case WLC_SET_WPA_AUTH:
- /* change of WPA_Auth modifies the PS_ALLOWED state */
- if (BSSCFG_STA(bsscfg)) {
- bsscfg->WPA_auth = (u16) val;
- } else
- bsscfg->WPA_auth = (u16) val;
- break;
-#endif /* SUPPORT_HWKEY */
-
- case WLC_GET_BANDLIST:
- /* count of number of bands, followed by each band type */
- *pval++ = NBANDS(wlc);
- *pval++ = wlc->band->bandtype;
- if (NBANDS(wlc) > 1)
- *pval++ = wlc->bandstate[OTHERBANDUNIT(wlc)]->bandtype;
- break;
-
- case WLC_GET_BAND:
- *pval = wlc->bandlocked ? wlc->band->bandtype : WLC_BAND_AUTO;
- break;
-
- case WLC_GET_PHYLIST:
+ case BRCM_GET_PHYLIST:
{
unsigned char *cp = arg;
if (len < 3) {
@@ -3583,28 +2715,19 @@ _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
break;
}
- if (WLCISNPHY(wlc->band)) {
+ if (BRCMS_ISNPHY(wlc->band))
*cp++ = 'n';
- } else if (WLCISLCNPHY(wlc->band)) {
+ else if (BRCMS_ISLCNPHY(wlc->band))
*cp++ = 'c';
- } else if (WLCISSSLPNPHY(wlc->band)) {
+ else if (BRCMS_ISSSLPNPHY(wlc->band))
*cp++ = 's';
- }
*cp = '\0';
break;
}
- case WLC_GET_SHORTSLOT:
- *pval = wlc->shortslot;
- break;
-
- case WLC_GET_SHORTSLOT_OVERRIDE:
- *pval = wlc->shortslot_override;
- break;
-
- case WLC_SET_SHORTSLOT_OVERRIDE:
- if ((val != WLC_SHORTSLOT_AUTO) &&
- (val != WLC_SHORTSLOT_OFF) && (val != WLC_SHORTSLOT_ON)) {
+ case BRCMS_SET_SHORTSLOT_OVERRIDE:
+ if (val != BRCMS_SHORTSLOT_AUTO && val != BRCMS_SHORTSLOT_OFF &&
+ val != BRCMS_SHORTSLOT_ON) {
bcmerror = -EINVAL;
break;
}
@@ -3621,270 +2744,21 @@ _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
/* let watchdog or beacon processing update shortslot */
} else if (wlc->pub->up) {
/* unassociated shortslot is off */
- wlc_switch_shortslot(wlc, false);
+ brcms_c_switch_shortslot(wlc, false);
} else {
- /* driver is down, so just update the wlc_info value */
- if (wlc->shortslot_override == WLC_SHORTSLOT_AUTO) {
+ /* driver is down, so just update the brcms_c_info
+ * value */
+ if (wlc->shortslot_override == BRCMS_SHORTSLOT_AUTO) {
wlc->shortslot = false;
} else {
wlc->shortslot =
(wlc->shortslot_override ==
- WLC_SHORTSLOT_ON);
- }
- }
-
- break;
-
- case WLC_GET_LEGACY_ERP:
- *pval = wlc->include_legacy_erp;
- break;
-
- case WLC_SET_LEGACY_ERP:
- if (wlc->include_legacy_erp == bool_val)
- break;
-
- wlc->include_legacy_erp = bool_val;
-
- if (AP_ENAB(wlc->pub) && wlc->clk) {
- wlc_update_beacon(wlc);
- wlc_update_probe_resp(wlc, true);
- }
- break;
-
- case WLC_GET_GMODE:
- if (wlc->band->bandtype == WLC_BAND_2G)
- *pval = wlc->band->gmode;
- else if (NBANDS(wlc) > 1)
- *pval = wlc->bandstate[OTHERBANDUNIT(wlc)]->gmode;
- break;
-
- case WLC_SET_GMODE:
- if (!wlc->pub->associated)
- bcmerror = wlc_set_gmode(wlc, (u8) val, true);
- else {
- bcmerror = -EISCONN;
- break;
- }
- break;
-
- case WLC_GET_GMODE_PROTECTION:
- *pval = wlc->protection->_g;
- break;
-
- case WLC_GET_PROTECTION_CONTROL:
- *pval = wlc->protection->overlap;
- break;
-
- case WLC_SET_PROTECTION_CONTROL:
- if ((val != WLC_PROTECTION_CTL_OFF) &&
- (val != WLC_PROTECTION_CTL_LOCAL) &&
- (val != WLC_PROTECTION_CTL_OVERLAP)) {
- bcmerror = -EINVAL;
- break;
- }
-
- wlc_protection_upd(wlc, WLC_PROT_OVERLAP, (s8) val);
-
- /* Current g_protection will sync up to the specified control alg in watchdog
- * if the driver is up and associated.
- * If the driver is down or not associated, the control setting has no effect.
- */
- break;
-
- case WLC_GET_GMODE_PROTECTION_OVERRIDE:
- *pval = wlc->protection->g_override;
- break;
-
- case WLC_SET_GMODE_PROTECTION_OVERRIDE:
- if ((val != WLC_PROTECTION_AUTO) &&
- (val != WLC_PROTECTION_OFF) && (val != WLC_PROTECTION_ON)) {
- bcmerror = -EINVAL;
- break;
- }
-
- wlc_protection_upd(wlc, WLC_PROT_G_OVR, (s8) val);
-
- break;
-
- case WLC_SET_SUP_RATESET_OVERRIDE:{
- wlc_rateset_t rs, new;
-
- /* copyin */
- if (len < (int)sizeof(wlc_rateset_t)) {
- bcmerror = -EOVERFLOW;
- break;
- }
- memcpy(&rs, arg, sizeof(wlc_rateset_t));
-
- /* check for bad count value */
- if (rs.count > WLC_NUMRATES) {
- bcmerror = -EINVAL;
- break;
- }
-
- /* this command is only appropriate for gmode operation */
- if (!(wlc->band->gmode ||
- ((NBANDS(wlc) > 1)
- && wlc->bandstate[OTHERBANDUNIT(wlc)]->gmode))) {
- /* gmode only command when not in gmode */
- bcmerror = -EINVAL;
- break;
- }
-
- /* check for an empty rateset to clear the override */
- if (rs.count == 0) {
- memset(&wlc->sup_rates_override, 0,
- sizeof(wlc_rateset_t));
- break;
- }
-
- /*
- * validate rateset by comparing pre and
- * post sorted against 11g hw rates
- */
- wlc_rateset_filter(&rs, &new, false,
- WLC_RATES_CCK_OFDM, WLC_RATE_MASK,
- BSS_N_ENAB(wlc, bsscfg));
- wlc_rate_hwrs_filter_sort_validate(&new,
- &cck_ofdm_rates,
- false,
- wlc->stf->txstreams);
- if (rs.count != new.count) {
- bcmerror = -EINVAL;
- break;
- }
-
- /* apply new rateset to the override */
- memcpy(&wlc->sup_rates_override, &new,
- sizeof(wlc_rateset_t));
-
- /* update bcn and probe resp if needed */
- if (wlc->pub->up && AP_ENAB(wlc->pub)
- && wlc->pub->associated) {
- wlc_update_beacon(wlc);
- wlc_update_probe_resp(wlc, true);
- }
- break;
- }
-
- case WLC_GET_SUP_RATESET_OVERRIDE:
- /* this command is only appropriate for gmode operation */
- if (!(wlc->band->gmode ||
- ((NBANDS(wlc) > 1)
- && wlc->bandstate[OTHERBANDUNIT(wlc)]->gmode))) {
- /* gmode only command when not in gmode */
- bcmerror = -EINVAL;
- break;
- }
- if (len < (int)sizeof(wlc_rateset_t)) {
- bcmerror = -EOVERFLOW;
- break;
- }
- memcpy(arg, &wlc->sup_rates_override, sizeof(wlc_rateset_t));
-
- break;
-
- case WLC_GET_PRB_RESP_TIMEOUT:
- *pval = wlc->prb_resp_timeout;
- break;
-
- case WLC_SET_PRB_RESP_TIMEOUT:
- if (wlc->pub->up) {
- bcmerror = -EISCONN;
- break;
- }
- if (val < 0 || val >= 0xFFFF) {
- bcmerror = -EINVAL; /* bad value */
- break;
- }
- wlc->prb_resp_timeout = (u16) val;
- break;
-
- case WLC_GET_KEY_PRIMARY:{
- wsec_key_t *key;
-
- /* treat the 'val' parm as the key id */
- key = WSEC_BSS_DEFAULT_KEY(bsscfg);
- if (key != NULL) {
- *pval = key->id == val ? true : false;
- } else {
- bcmerror = -EINVAL;
- }
- break;
- }
-
- case WLC_SET_KEY_PRIMARY:{
- wsec_key_t *key, *old_key;
-
- bcmerror = -EINVAL;
-
- /* treat the 'val' parm as the key id */
- for (i = 0; i < WSEC_MAX_DEFAULT_KEYS; i++) {
- key = bsscfg->bss_def_keys[i];
- if (key != NULL && key->id == val) {
- old_key = WSEC_BSS_DEFAULT_KEY(bsscfg);
- if (old_key != NULL)
- old_key->flags &=
- ~WSEC_PRIMARY_KEY;
- key->flags |= WSEC_PRIMARY_KEY;
- bsscfg->wsec_index = i;
- bcmerror = 0;
- }
+ BRCMS_SHORTSLOT_ON);
}
- break;
- }
-
-#ifdef BCMDBG
- case WLC_INIT:
- wl_init(wlc->wl);
- break;
-#endif
-
- case WLC_SET_VAR:
- case WLC_GET_VAR:{
- char *name;
- /* validate the name value */
- name = (char *)arg;
- for (i = 0; i < (uint) len && *name != '\0';
- i++, name++)
- ;
-
- if (i == (uint) len) {
- bcmerror = -EOVERFLOW;
- break;
- }
- i++; /* include the null in the string length */
-
- if (cmd == WLC_GET_VAR) {
- bcmerror =
- wlc_iovar_op(wlc, arg,
- (void *)((s8 *) arg + i),
- len - i, arg, len, IOV_GET,
- wlcif);
- } else
- bcmerror =
- wlc_iovar_op(wlc, arg, NULL, 0,
- (void *)((s8 *) arg + i),
- len - i, IOV_SET, wlcif);
-
- break;
}
- case WLC_SET_WSEC_PMK:
- bcmerror = -ENOTSUPP;
- break;
-
-#if defined(BCMDBG)
- case WLC_CURRENT_PWR:
- if (!wlc->pub->up)
- bcmerror = -ENOLINK;
- else
- bcmerror = wlc_get_current_txpwr(wlc, arg, len);
break;
-#endif
- case WLC_LAST:
- wiphy_err(wlc->wiphy, "%s: WLC_LAST\n", __func__);
}
done:
@@ -3894,83 +2768,22 @@ _wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
return bcmerror;
}
-#if defined(BCMDBG)
-/* consolidated register access ioctl error checking */
-int wlc_iocregchk(struct wlc_info *wlc, uint band)
-{
- /* if band is specified, it must be the current band */
- if ((band != WLC_BAND_AUTO) && (band != (uint) wlc->band->bandtype))
- return -EINVAL;
-
- /* if multiband and band is not specified, band must be locked */
- if ((band == WLC_BAND_AUTO) && IS_MBAND_UNLOCKED(wlc))
- return -ENOMEDIUM;
-
- /* must have core clocks */
- if (!wlc->clk)
- return -EIO;
-
- return 0;
-}
-#endif /* defined(BCMDBG) */
-
-/* Look up the given var name in the given table */
-static const bcm_iovar_t *wlc_iovar_lookup(const bcm_iovar_t *table,
- const char *name)
-{
- const bcm_iovar_t *vi;
- const char *lookup_name;
-
- /* skip any ':' delimited option prefixes */
- lookup_name = strrchr(name, ':');
- if (lookup_name != NULL)
- lookup_name++;
- else
- lookup_name = name;
-
- for (vi = table; vi->name; vi++) {
- if (!strcmp(vi->name, lookup_name))
- return vi;
- }
- /* ran to end of table */
-
- return NULL; /* var name not found */
-}
-
-/* simplified integer get interface for common WLC_GET_VAR ioctl handler */
-int wlc_iovar_getint(struct wlc_info *wlc, const char *name, int *arg)
-{
- return wlc_iovar_op(wlc, name, NULL, 0, arg, sizeof(s32), IOV_GET,
- NULL);
-}
-
-/* simplified integer set interface for common WLC_SET_VAR ioctl handler */
-int wlc_iovar_setint(struct wlc_info *wlc, const char *name, int arg)
-{
- return wlc_iovar_op(wlc, name, NULL, 0, (void *)&arg, sizeof(arg),
- IOV_SET, NULL);
-}
-
/*
- * register iovar table, watchdog and down handlers.
- * calling function must keep 'iovars' until wlc_module_unregister is called.
- * 'iovar' must have the last entry's name field being NULL as terminator.
+ * register watchdog and down handlers.
*/
-int wlc_module_register(struct wlc_pub *pub, const bcm_iovar_t *iovars,
- const char *name, void *hdl, iovar_fn_t i_fn,
+int brcms_c_module_register(struct brcms_pub *pub,
+ const char *name, void *hdl,
watchdog_fn_t w_fn, down_fn_t d_fn)
{
- struct wlc_info *wlc = (struct wlc_info *) pub->wlc;
+ struct brcms_c_info *wlc = (struct brcms_c_info *) pub->wlc;
int i;
/* find an empty entry and just add, no duplication check! */
- for (i = 0; i < WLC_MAXMODULES; i++) {
+ for (i = 0; i < BRCMS_MAXMODULES; i++) {
if (wlc->modulecb[i].name[0] == '\0') {
strncpy(wlc->modulecb[i].name, name,
sizeof(wlc->modulecb[i].name) - 1);
- wlc->modulecb[i].iovars = iovars;
wlc->modulecb[i].hdl = hdl;
- wlc->modulecb[i].iovar_fn = i_fn;
wlc->modulecb[i].watchdog_fn = w_fn;
wlc->modulecb[i].down_fn = d_fn;
return 0;
@@ -3981,15 +2794,16 @@ int wlc_module_register(struct wlc_pub *pub, const bcm_iovar_t *iovars,
}
/* unregister module callbacks */
-int wlc_module_unregister(struct wlc_pub *pub, const char *name, void *hdl)
+int
+brcms_c_module_unregister(struct brcms_pub *pub, const char *name, void *hdl)
{
- struct wlc_info *wlc = (struct wlc_info *) pub->wlc;
+ struct brcms_c_info *wlc = (struct brcms_c_info *) pub->wlc;
int i;
if (wlc == NULL)
return -ENODATA;
- for (i = 0; i < WLC_MAXMODULES; i++) {
+ for (i = 0; i < BRCMS_MAXMODULES; i++) {
if (!strcmp(wlc->modulecb[i].name, name) &&
(wlc->modulecb[i].hdl == hdl)) {
memset(&wlc->modulecb[i], 0, sizeof(struct modulecb));
@@ -4002,7 +2816,7 @@ int wlc_module_unregister(struct wlc_pub *pub, const char *name, void *hdl)
}
/* Write WME tunable parameters for retransmit/max rate from wlc struct to ucode */
-static void wlc_wme_retries_write(struct wlc_info *wlc)
+static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
{
int ac;
@@ -4011,307 +2825,19 @@ static void wlc_wme_retries_write(struct wlc_info *wlc)
return;
for (ac = 0; ac < AC_COUNT; ac++) {
- wlc_write_shm(wlc, M_AC_TXLMT_ADDR(ac), wlc->wme_retries[ac]);
- }
-}
-
-/* Get or set an iovar. The params/p_len pair specifies any additional
- * qualifying parameters (e.g. an "element index") for a get, while the
- * arg/len pair is the buffer for the value to be set or retrieved.
- * Operation (get/set) is specified by the last argument.
- * interface context provided by wlcif
- *
- * All pointers may point into the same buffer.
- */
-int
-wlc_iovar_op(struct wlc_info *wlc, const char *name,
- void *params, int p_len, void *arg, int len,
- bool set, struct wlc_if *wlcif)
-{
- int err = 0;
- int val_size;
- const bcm_iovar_t *vi = NULL;
- u32 actionid;
- int i;
-
- if (!set && (len == sizeof(int)) &&
- !(IS_ALIGNED((unsigned long)(arg), (uint) sizeof(int)))) {
- wiphy_err(wlc->wiphy, "wl%d: %s unaligned get ptr for %s\n",
- wlc->pub->unit, __func__, name);
- return -ENOTSUPP;
- }
-
- /* find the given iovar name */
- for (i = 0; i < WLC_MAXMODULES; i++) {
- if (!wlc->modulecb[i].iovars)
- continue;
- vi = wlc_iovar_lookup(wlc->modulecb[i].iovars, name);
- if (vi)
- break;
- }
- /* iovar name not found */
- if (i >= WLC_MAXMODULES) {
- return -ENOTSUPP;
- }
-
- /* set up 'params' pointer in case this is a set command so that
- * the convenience int and bool code can be common to set and get
- */
- if (params == NULL) {
- params = arg;
- p_len = len;
- }
-
- if (vi->type == IOVT_VOID)
- val_size = 0;
- else if (vi->type == IOVT_BUFFER)
- val_size = len;
- else
- /* all other types are integer sized */
- val_size = sizeof(int);
-
- actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
-
- /* Do the actual parameter implementation */
- err = wlc->modulecb[i].iovar_fn(wlc->modulecb[i].hdl, vi, actionid,
- name, params, p_len, arg, len, val_size,
- wlcif);
- return err;
-}
-
-int
-wlc_iovar_check(struct wlc_pub *pub, const bcm_iovar_t *vi, void *arg, int len,
- bool set)
-{
- struct wlc_info *wlc = (struct wlc_info *) pub->wlc;
- int err = 0;
- s32 int_val = 0;
-
- /* check generic condition flags */
- if (set) {
- if (((vi->flags & IOVF_SET_DOWN) && wlc->pub->up) ||
- ((vi->flags & IOVF_SET_UP) && !wlc->pub->up)) {
- err = (wlc->pub->up ? -EISCONN : -ENOLINK);
- } else if ((vi->flags & IOVF_SET_BAND)
- && IS_MBAND_UNLOCKED(wlc)) {
- err = -ENOMEDIUM;
- } else if ((vi->flags & IOVF_SET_CLK) && !wlc->clk) {
- err = -EIO;
- }
- } else {
- if (((vi->flags & IOVF_GET_DOWN) && wlc->pub->up) ||
- ((vi->flags & IOVF_GET_UP) && !wlc->pub->up)) {
- err = (wlc->pub->up ? -EISCONN : -ENOLINK);
- } else if ((vi->flags & IOVF_GET_BAND)
- && IS_MBAND_UNLOCKED(wlc)) {
- err = -ENOMEDIUM;
- } else if ((vi->flags & IOVF_GET_CLK) && !wlc->clk) {
- err = -EIO;
- }
- }
-
- if (err)
- goto exit;
-
- /* length check on io buf */
- err = bcm_iovar_lencheck(vi, arg, len, set);
- if (err)
- goto exit;
-
- /* On set, check value ranges for integer types */
- if (set) {
- switch (vi->type) {
- case IOVT_BOOL:
- case IOVT_INT8:
- case IOVT_INT16:
- case IOVT_INT32:
- case IOVT_UINT8:
- case IOVT_UINT16:
- case IOVT_UINT32:
- memcpy(&int_val, arg, sizeof(int));
- err = wlc_iovar_rangecheck(wlc, int_val, vi);
- break;
- }
- }
- exit:
- return err;
-}
-
-/* handler for iovar table wlc_iovars */
-/*
- * IMPLEMENTATION NOTE: In order to avoid checking for get/set in each
- * iovar case, the switch statement maps the iovar id into separate get
- * and set values. If you add a new iovar to the switch you MUST use
- * IOV_GVAL and/or IOV_SVAL in the case labels to avoid conflict with
- * another case.
- * Please use params for additional qualifying parameters.
- */
-int
-wlc_doiovar(void *hdl, const bcm_iovar_t *vi, u32 actionid,
- const char *name, void *params, uint p_len, void *arg, int len,
- int val_size, struct wlc_if *wlcif)
-{
- struct wlc_info *wlc = hdl;
- struct wlc_bsscfg *bsscfg;
- int err = 0;
- s32 int_val = 0;
- s32 int_val2 = 0;
- s32 *ret_int_ptr;
- bool bool_val;
- bool bool_val2;
- wlc_bss_info_t *current_bss;
-
- BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
-
- bsscfg = NULL;
- current_bss = NULL;
-
- err = wlc_iovar_check(wlc->pub, vi, arg, len, IOV_ISSET(actionid));
- if (err != 0)
- return err;
-
- /* convenience int and bool vals for first 8 bytes of buffer */
- if (p_len >= (int)sizeof(int_val))
- memcpy(&int_val, params, sizeof(int_val));
-
- if (p_len >= (int)sizeof(int_val) * 2)
- memcpy(&int_val2,
- (void *)((unsigned long)params + sizeof(int_val)),
- sizeof(int_val));
-
- /* convenience int ptr for 4-byte gets (requires int aligned arg) */
- ret_int_ptr = (s32 *) arg;
-
- bool_val = (int_val != 0) ? true : false;
- bool_val2 = (int_val2 != 0) ? true : false;
-
- BCMMSG(wlc->wiphy, "wl%d: id %d\n", wlc->pub->unit, IOV_ID(actionid));
- /* Do the actual parameter implementation */
- switch (actionid) {
- case IOV_SVAL(IOV_RTSTHRESH):
- wlc->RTSThresh = int_val;
- break;
-
- case IOV_GVAL(IOV_QTXPOWER):{
- uint qdbm;
- bool override;
-
- err = wlc_phy_txpower_get(wlc->band->pi, &qdbm,
- &override);
- if (err != 0)
- return err;
-
- /* Return qdbm units */
- *ret_int_ptr =
- qdbm | (override ? WL_TXPWR_OVERRIDE : 0);
- break;
- }
-
- /* As long as override is false, this only sets the *user* targets.
- User can twiddle this all he wants with no harm.
- wlc_phy_txpower_set() explicitly sets override to false if
- not internal or test.
- */
- case IOV_SVAL(IOV_QTXPOWER):{
- u8 qdbm;
- bool override;
-
- /* Remove override bit and clip to max qdbm value */
- qdbm = (u8)min_t(u32, (int_val & ~WL_TXPWR_OVERRIDE), 0xff);
- /* Extract override setting */
- override = (int_val & WL_TXPWR_OVERRIDE) ? true : false;
- err =
- wlc_phy_txpower_set(wlc->band->pi, qdbm, override);
- break;
- }
-
- case IOV_GVAL(IOV_MPC):
- *ret_int_ptr = (s32) wlc->mpc;
- break;
-
- case IOV_SVAL(IOV_MPC):
- wlc->mpc = bool_val;
- wlc_radio_mpc_upd(wlc);
-
- break;
-
- case IOV_GVAL(IOV_BCN_LI_BCN):
- *ret_int_ptr = wlc->bcn_li_bcn;
- break;
-
- case IOV_SVAL(IOV_BCN_LI_BCN):
- wlc->bcn_li_bcn = (u8) int_val;
- if (wlc->pub->up)
- wlc_bcn_li_upd(wlc);
- break;
-
- default:
- wiphy_err(wlc->wiphy, "wl%d: %s: unsupported\n",
- wlc->pub->unit, __func__);
- err = -ENOTSUPP;
- break;
- }
-
- goto exit; /* avoid unused label warning */
-
- exit:
- return err;
-}
-
-static int
-wlc_iovar_rangecheck(struct wlc_info *wlc, u32 val, const bcm_iovar_t *vi)
-{
- int err = 0;
- u32 min_val = 0;
- u32 max_val = 0;
-
- /* Only ranged integers are checked */
- switch (vi->type) {
- case IOVT_INT32:
- max_val |= 0x7fffffff;
- /* fall through */
- case IOVT_INT16:
- max_val |= 0x00007fff;
- /* fall through */
- case IOVT_INT8:
- max_val |= 0x0000007f;
- min_val = ~max_val;
- if (vi->flags & IOVF_NTRL)
- min_val = 1;
- else if (vi->flags & IOVF_WHL)
- min_val = 0;
- /* Signed values are checked against max_val and min_val */
- if ((s32) val < (s32) min_val
- || (s32) val > (s32) max_val)
- err = -EINVAL;
- break;
-
- case IOVT_UINT32:
- max_val |= 0xffffffff;
- /* fall through */
- case IOVT_UINT16:
- max_val |= 0x0000ffff;
- /* fall through */
- case IOVT_UINT8:
- max_val |= 0x000000ff;
- if (vi->flags & IOVF_NTRL)
- min_val = 1;
- if ((val < min_val) || (val > max_val))
- err = -EINVAL;
- break;
+ brcms_c_write_shm(wlc, M_AC_TXLMT_ADDR(ac),
+ wlc->wme_retries[ac]);
}
-
- return err;
}
#ifdef BCMDBG
-static const char *supr_reason[] = {
+static const char * const supr_reason[] = {
"None", "PMQ Entry", "Flush request",
"Previous frag failure", "Channel mismatch",
"Lifetime Expiry", "Underflow"
};
-static void wlc_print_txs_status(u16 s)
+static void brcms_c_print_txs_status(u16 s)
{
printk(KERN_DEBUG "[15:12] %d frame attempts\n",
(s & TX_STATUS_FRM_RTX_MASK) >> TX_STATUS_FRM_RTX_SHIFT);
@@ -4331,7 +2857,7 @@ static void wlc_print_txs_status(u16 s)
}
#endif /* BCMDBG */
-void wlc_print_txstatus(tx_status_t *txs)
+void brcms_c_print_txstatus(struct tx_status *txs)
{
#if defined(BCMDBG)
u16 s = txs->status;
@@ -4343,7 +2869,7 @@ void wlc_print_txstatus(tx_status_t *txs)
printk(KERN_DEBUG "TxStatus: %04x", s);
printk(KERN_DEBUG "\n");
- wlc_print_txs_status(s);
+ brcms_c_print_txs_status(s);
printk(KERN_DEBUG "LastTxTime: %04x ", txs->lasttxtime);
printk(KERN_DEBUG "Seq: %04x ", txs->sequence);
@@ -4356,10 +2882,10 @@ void wlc_print_txstatus(tx_status_t *txs)
#endif /* defined(BCMDBG) */
}
-void wlc_statsupd(struct wlc_info *wlc)
+void brcms_c_statsupd(struct brcms_c_info *wlc)
{
int i;
- macstat_t macstats;
+ struct macstat macstats;
#ifdef BCMDBG
u16 delta;
u16 rxf0ovfl;
@@ -4380,8 +2906,8 @@ void wlc_statsupd(struct wlc_info *wlc)
#endif /* BCMDBG */
/* Read mac stats from contiguous shared memory */
- wlc_bmac_copyfrom_shm(wlc->hw, M_UCODE_MACSTAT,
- &macstats, sizeof(macstat_t));
+ brcms_b_copyfrom_shm(wlc->hw, M_UCODE_MACSTAT,
+ &macstats, sizeof(struct macstat));
#ifdef BCMDBG
/* check for rx fifo 0 overflow */
@@ -4409,27 +2935,28 @@ void wlc_statsupd(struct wlc_info *wlc)
}
}
-bool wlc_chipmatch(u16 vendor, u16 device)
+bool brcms_c_chipmatch(u16 vendor, u16 device)
{
if (vendor != PCI_VENDOR_ID_BROADCOM) {
- pr_err("wlc_chipmatch: unknown vendor id %04x\n", vendor);
+ pr_err("chipmatch: unknown vendor id %04x\n", vendor);
return false;
}
+ if (device == BCM43224_D11N_ID_VEN1)
+ return true;
if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
return true;
-
if (device == BCM4313_D11N2G_ID)
return true;
if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
return true;
- pr_err("wlc_chipmatch: unknown device id %04x\n", device);
+ pr_err("chipmatch: unknown device id %04x\n", device);
return false;
}
#if defined(BCMDBG)
-void wlc_print_txdesc(d11txh_t *txh)
+void brcms_c_print_txdesc(struct d11txh *txh)
{
u16 mtcl = le16_to_cpu(txh->MacTxControlLow);
u16 mtch = le16_to_cpu(txh->MacTxControlHigh);
@@ -4465,7 +2992,7 @@ void wlc_print_txdesc(d11txh_t *txh)
/* add plcp header along with txh descriptor */
printk(KERN_DEBUG "Raw TxDesc + plcp header:\n");
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- txh, sizeof(d11txh_t) + 48);
+ txh, sizeof(struct d11txh) + 48);
printk(KERN_DEBUG "TxCtlLow: %04x ", mtcl);
printk(KERN_DEBUG "TxCtlHigh: %04x ", mtch);
@@ -4481,16 +3008,16 @@ void wlc_print_txdesc(d11txh_t *txh)
printk(KERN_DEBUG "XtraFrameTypes: %04x ", xtraft);
printk(KERN_DEBUG "\n");
- bcm_format_hex(hexbuf, iv, sizeof(txh->IV));
+ brcmu_format_hex(hexbuf, iv, sizeof(txh->IV));
printk(KERN_DEBUG "SecIV: %s\n", hexbuf);
- bcm_format_hex(hexbuf, ra, sizeof(txh->TxFrameRA));
+ brcmu_format_hex(hexbuf, ra, sizeof(txh->TxFrameRA));
printk(KERN_DEBUG "RA: %s\n", hexbuf);
printk(KERN_DEBUG "Fb FES Time: %04x ", tfestfb);
- bcm_format_hex(hexbuf, rtspfb, sizeof(txh->RTSPLCPFallback));
+ brcmu_format_hex(hexbuf, rtspfb, sizeof(txh->RTSPLCPFallback));
printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
printk(KERN_DEBUG "RTS DUR: %04x ", rtsdfb);
- bcm_format_hex(hexbuf, fragpfb, sizeof(txh->FragPLCPFallback));
+ brcmu_format_hex(hexbuf, fragpfb, sizeof(txh->FragPLCPFallback));
printk(KERN_DEBUG "PLCP: %s ", hexbuf);
printk(KERN_DEBUG "DUR: %04x", fragdfb);
printk(KERN_DEBUG "\n");
@@ -4506,16 +3033,16 @@ void wlc_print_txdesc(d11txh_t *txh)
printk(KERN_DEBUG "MaxAggbyte_fb: %04x\n", mabyte_f);
printk(KERN_DEBUG "MinByte: %04x\n", mmbyte);
- bcm_format_hex(hexbuf, rtsph, sizeof(txh->RTSPhyHeader));
+ brcmu_format_hex(hexbuf, rtsph, sizeof(txh->RTSPhyHeader));
printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
- bcm_format_hex(hexbuf, (u8 *) &rts, sizeof(txh->rts_frame));
+ brcmu_format_hex(hexbuf, (u8 *) &rts, sizeof(txh->rts_frame));
printk(KERN_DEBUG "RTS Frame: %s", hexbuf);
printk(KERN_DEBUG "\n");
}
#endif /* defined(BCMDBG) */
#if defined(BCMDBG)
-void wlc_print_rxh(d11rxhdr_t *rxh)
+void brcms_c_print_rxh(struct d11rxhdr *rxh)
{
u16 len = rxh->RxFrameSize;
u16 phystatus_0 = rxh->PhyRxStatus_0;
@@ -4526,7 +3053,7 @@ void wlc_print_rxh(d11rxhdr_t *rxh)
u16 macstatus2 = rxh->RxStatus2;
char flagstr[64];
char lenbuf[20];
- static const bcm_bit_desc_t macstat_flags[] = {
+ static const struct brcmu_bit_desc macstat_flags[] = {
{RXS_FCSERR, "FCSErr"},
{RXS_RESPFRAMETX, "Reply"},
{RXS_PBPRES, "PADDING"},
@@ -4537,9 +3064,10 @@ void wlc_print_rxh(d11rxhdr_t *rxh)
};
printk(KERN_DEBUG "Raw RxDesc:\n");
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, rxh, sizeof(d11rxhdr_t));
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, rxh,
+ sizeof(struct d11rxhdr));
- bcm_format_flags(macstat_flags, macstatus1, flagstr, 64);
+ brcmu_format_flags(macstat_flags, macstatus1, flagstr, 64);
snprintf(lenbuf, sizeof(lenbuf), "0x%x", len);
@@ -4554,35 +3082,9 @@ void wlc_print_rxh(d11rxhdr_t *rxh)
}
#endif /* defined(BCMDBG) */
-#if defined(BCMDBG)
-int wlc_format_ssid(char *buf, const unsigned char ssid[], uint ssid_len)
-{
- uint i, c;
- char *p = buf;
- char *endp = buf + SSID_FMT_BUF_LEN;
-
- if (ssid_len > IEEE80211_MAX_SSID_LEN)
- ssid_len = IEEE80211_MAX_SSID_LEN;
-
- for (i = 0; i < ssid_len; i++) {
- c = (uint) ssid[i];
- if (c == '\\') {
- *p++ = '\\';
- *p++ = '\\';
- } else if (isprint((unsigned char) c)) {
- *p++ = (char)c;
- } else {
- p += snprintf(p, (endp - p), "\\x%02X", c);
- }
- }
- *p = '\0';
- return (int)(p - buf);
-}
-#endif /* defined(BCMDBG) */
-
-static u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate)
+static u16 brcms_c_rate_shm_offset(struct brcms_c_info *wlc, u8 rate)
{
- return wlc_bmac_rate_shm_offset(wlc->hw, rate);
+ return brcms_b_rate_shm_offset(wlc->hw, rate);
}
/* Callback for device removed */
@@ -4597,14 +3099,14 @@ static u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate)
* Returns true if packet consumed (queued), false if not.
*/
bool
-wlc_prec_enq(struct wlc_info *wlc, struct pktq *q, void *pkt, int prec)
+brcms_c_prec_enq(struct brcms_c_info *wlc, struct pktq *q, void *pkt, int prec)
{
- return wlc_prec_enq_head(wlc, q, pkt, prec, false);
+ return brcms_c_prec_enq_head(wlc, q, pkt, prec, false);
}
bool
-wlc_prec_enq_head(struct wlc_info *wlc, struct pktq *q, struct sk_buff *pkt,
- int prec, bool head)
+brcms_c_prec_enq_head(struct brcms_c_info *wlc, struct pktq *q,
+ struct sk_buff *pkt, int prec, bool head)
{
struct sk_buff *p;
int eprec = -1; /* precedence to evict from */
@@ -4613,7 +3115,7 @@ wlc_prec_enq_head(struct wlc_info *wlc, struct pktq *q, struct sk_buff *pkt,
if (pktq_pfull(q, prec))
eprec = prec;
else if (pktq_full(q)) {
- p = bcm_pktq_peek_tail(q, &eprec);
+ p = brcmu_pktq_peek_tail(q, &eprec);
if (eprec > prec) {
wiphy_err(wlc->wiphy, "%s: Failing: eprec %d > prec %d"
"\n", __func__, eprec, prec);
@@ -4635,41 +3137,41 @@ wlc_prec_enq_head(struct wlc_info *wlc, struct pktq *q, struct sk_buff *pkt,
}
/* Evict packet according to discard policy */
- p = discard_oldest ? bcm_pktq_pdeq(q, eprec) :
- bcm_pktq_pdeq_tail(q, eprec);
- bcm_pkt_buf_free_skb(p);
+ p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
+ brcmu_pktq_pdeq_tail(q, eprec);
+ brcmu_pkt_buf_free_skb(p);
}
/* Enqueue */
if (head)
- p = bcm_pktq_penq_head(q, prec, pkt);
+ p = brcmu_pktq_penq_head(q, prec, pkt);
else
- p = bcm_pktq_penq(q, prec, pkt);
+ p = brcmu_pktq_penq(q, prec, pkt);
return true;
}
-void wlc_txq_enq(void *ctx, struct scb *scb, struct sk_buff *sdu,
+void brcms_c_txq_enq(void *ctx, struct scb *scb, struct sk_buff *sdu,
uint prec)
{
- struct wlc_info *wlc = (struct wlc_info *) ctx;
- struct wlc_txq_info *qi = wlc->pkt_queue; /* Check me */
+ struct brcms_c_info *wlc = (struct brcms_c_info *) ctx;
+ struct brcms_txq_info *qi = wlc->pkt_queue; /* Check me */
struct pktq *q = &qi->q;
int prio;
prio = sdu->priority;
- if (!wlc_prec_enq(wlc, q, sdu, prec)) {
+ if (!brcms_c_prec_enq(wlc, q, sdu, prec)) {
if (!EDCF_ENAB(wlc->pub)
|| (wlc->pub->wlfeatureflag & WL_SWFL_FLOWCONTROL))
- wiphy_err(wlc->wiphy, "wl%d: wlc_txq_enq: txq overflow"
+ wiphy_err(wlc->wiphy, "wl%d: txq_enq: txq overflow"
"\n", wlc->pub->unit);
/*
- * XXX we might hit this condtion in case
+ * we might hit this condtion in case
* packet flooding from mac80211 stack
*/
- bcm_pkt_buf_free_skb(sdu);
+ brcmu_pkt_buf_free_skb(sdu);
}
/* Check if flow control needs to be turned on after enqueuing the packet
@@ -4679,18 +3181,18 @@ void wlc_txq_enq(void *ctx, struct scb *scb, struct sk_buff *sdu,
if (!EDCF_ENAB(wlc->pub)
|| (wlc->pub->wlfeatureflag & WL_SWFL_FLOWCONTROL)) {
if (pktq_len(q) >= wlc->pub->tunables->datahiwat) {
- wlc_txflowcontrol(wlc, qi, ON, ALLPRIO);
+ brcms_c_txflowcontrol(wlc, qi, ON, ALLPRIO);
}
} else if (wlc->pub->_priofc) {
if (pktq_plen(q, wlc_prio2prec_map[prio]) >=
wlc->pub->tunables->datahiwat) {
- wlc_txflowcontrol(wlc, qi, ON, prio);
+ brcms_c_txflowcontrol(wlc, qi, ON, prio);
}
}
}
bool
-wlc_sendpkt_mac80211(struct wlc_info *wlc, struct sk_buff *sdu,
+brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
struct ieee80211_hw *hw)
{
u8 prio;
@@ -4705,21 +3207,22 @@ wlc_sendpkt_mac80211(struct wlc_info *wlc, struct sk_buff *sdu,
fifo = prio2fifo[prio];
pkt = sdu;
if (unlikely
- (wlc_d11hdrs_mac80211(wlc, hw, pkt, scb, 0, 1, fifo, 0, NULL, 0)))
+ (brcms_c_d11hdrs_mac80211(
+ wlc, hw, pkt, scb, 0, 1, fifo, 0, NULL, 0)))
return -EINVAL;
- wlc_txq_enq(wlc, scb, pkt, WLC_PRIO_TO_PREC(prio));
- wlc_send_q(wlc);
+ brcms_c_txq_enq(wlc, scb, pkt, BRCMS_PRIO_TO_PREC(prio));
+ brcms_c_send_q(wlc);
return 0;
}
-void wlc_send_q(struct wlc_info *wlc)
+void brcms_c_send_q(struct brcms_c_info *wlc)
{
struct sk_buff *pkt[DOT11_MAXNUMFRAGS];
int prec;
u16 prec_map;
int err = 0, i, count;
uint fifo;
- struct wlc_txq_info *qi = wlc->pkt_queue;
+ struct brcms_txq_info *qi = wlc->pkt_queue;
struct pktq *q = &qi->q;
struct ieee80211_tx_info *tx_info;
@@ -4733,22 +3236,23 @@ void wlc_send_q(struct wlc_info *wlc)
/* Send all the enq'd pkts that we can.
* Dequeue packets with precedence with empty HW fifo only
*/
- while (prec_map && (pkt[0] = bcm_pktq_mdeq(q, prec_map, &prec))) {
+ while (prec_map && (pkt[0] = brcmu_pktq_mdeq(q, prec_map, &prec))) {
tx_info = IEEE80211_SKB_CB(pkt[0]);
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
- err = wlc_sendampdu(wlc->ampdu, qi, pkt, prec);
+ err = brcms_c_sendampdu(wlc->ampdu, qi, pkt, prec);
} else {
count = 1;
- err = wlc_prep_pdu(wlc, pkt[0], &fifo);
+ err = brcms_c_prep_pdu(wlc, pkt[0], &fifo);
if (!err) {
for (i = 0; i < count; i++) {
- wlc_txfifo(wlc, fifo, pkt[i], true, 1);
+ brcms_c_txfifo(wlc, fifo, pkt[i], true,
+ 1);
}
}
}
if (err == -EBUSY) {
- bcm_pktq_penq_head(q, prec, pkt[0]);
+ brcmu_pktq_penq_head(q, prec, pkt[0]);
/* If send failed due to any other reason than a change in
* HW FIFO condition, quit. Otherwise, read the new prec_map!
*/
@@ -4761,17 +3265,17 @@ void wlc_send_q(struct wlc_info *wlc)
/* Check if flow control needs to be turned off after sending the packet */
if (!EDCF_ENAB(wlc->pub)
|| (wlc->pub->wlfeatureflag & WL_SWFL_FLOWCONTROL)) {
- if (wlc_txflowcontrol_prio_isset(wlc, qi, ALLPRIO)
+ if (brcms_c_txflowcontrol_prio_isset(wlc, qi, ALLPRIO)
&& (pktq_len(q) < wlc->pub->tunables->datahiwat / 2)) {
- wlc_txflowcontrol(wlc, qi, OFF, ALLPRIO);
+ brcms_c_txflowcontrol(wlc, qi, OFF, ALLPRIO);
}
} else if (wlc->pub->_priofc) {
int prio;
for (prio = MAXPRIO; prio >= 0; prio--) {
- if (wlc_txflowcontrol_prio_isset(wlc, qi, prio) &&
+ if (brcms_c_txflowcontrol_prio_isset(wlc, qi, prio) &&
(pktq_plen(q, wlc_prio2prec_map[prio]) <
wlc->pub->tunables->datahiwat / 2)) {
- wlc_txflowcontrol(wlc, qi, OFF, prio);
+ brcms_c_txflowcontrol(wlc, qi, OFF, prio);
}
}
}
@@ -4784,8 +3288,8 @@ void wlc_send_q(struct wlc_info *wlc)
* for MC frames so is used as part of the sequence number.
*/
static inline u16
-bcmc_fid_generate(struct wlc_info *wlc, struct wlc_bsscfg *bsscfg,
- d11txh_t *txh)
+bcmc_fid_generate(struct brcms_c_info *wlc, struct brcms_bss_cfg *bsscfg,
+ struct d11txh *txh)
{
u16 frameid;
@@ -4800,13 +3304,13 @@ bcmc_fid_generate(struct wlc_info *wlc, struct wlc_bsscfg *bsscfg,
}
void
-wlc_txfifo(struct wlc_info *wlc, uint fifo, struct sk_buff *p, bool commit,
- s8 txpktpend)
+brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p,
+ bool commit, s8 txpktpend)
{
u16 frameid = INVALIDFID;
- d11txh_t *txh;
+ struct d11txh *txh;
- txh = (d11txh_t *) (p->data);
+ txh = (struct d11txh *) (p->data);
/* When a BC/MC frame is being committed to the BCMC fifo via DMA (NOT PIO), update
* ucode or BSS info as appropriate.
@@ -4816,12 +3320,12 @@ wlc_txfifo(struct wlc_info *wlc, uint fifo, struct sk_buff *p, bool commit,
}
- if (WLC_WAR16165(wlc))
- wlc_war16165(wlc, true);
+ if (BRCMS_WAR16165(wlc))
+ brcms_c_war16165(wlc, true);
/* Bump up pending count for if not using rpc. If rpc is used, this will be handled
- * in wlc_bmac_txfifo()
+ * in brcms_b_txfifo()
*/
if (commit) {
TXPKTPENDINC(wlc, fifo, txpktpend);
@@ -4834,31 +3338,32 @@ wlc_txfifo(struct wlc_info *wlc, uint fifo, struct sk_buff *p, bool commit,
BCMCFID(wlc, frameid);
if (dma_txfast(wlc->hw->di[fifo], p, commit) < 0) {
- wiphy_err(wlc->wiphy, "wlc_txfifo: fatal, toss frames !!!\n");
+ wiphy_err(wlc->wiphy, "txfifo: fatal, toss frames !!!\n");
}
}
void
-wlc_compute_plcp(struct wlc_info *wlc, ratespec_t rspec, uint length, u8 *plcp)
+brcms_c_compute_plcp(struct brcms_c_info *wlc, ratespec_t rspec,
+ uint length, u8 *plcp)
{
if (IS_MCS(rspec)) {
- wlc_compute_mimo_plcp(rspec, length, plcp);
+ brcms_c_compute_mimo_plcp(rspec, length, plcp);
} else if (IS_OFDM(rspec)) {
- wlc_compute_ofdm_plcp(rspec, length, plcp);
+ brcms_c_compute_ofdm_plcp(rspec, length, plcp);
} else {
- wlc_compute_cck_plcp(wlc, rspec, length, plcp);
+ brcms_c_compute_cck_plcp(wlc, rspec, length, plcp);
}
return;
}
/* Rate: 802.11 rate code, length: PSDU length in octets */
-static void wlc_compute_mimo_plcp(ratespec_t rspec, uint length, u8 *plcp)
+static void brcms_c_compute_mimo_plcp(ratespec_t rspec, uint length, u8 *plcp)
{
u8 mcs = (u8) (rspec & RSPEC_RATE_MASK);
plcp[0] = mcs;
if (RSPEC_IS40MHZ(rspec) || (mcs == 32))
plcp[0] |= MIMO_PLCP_40MHZ;
- WLC_SET_MIMO_PLCP_LEN(plcp, length);
+ BRCMS_SET_MIMO_PLCP_LEN(plcp, length);
plcp[3] = RSPEC_MIMOPLCP3(rspec); /* rspec already holds this byte */
plcp[3] |= 0x7; /* set smoothing, not sounding ppdu & reserved */
plcp[4] = 0; /* number of extension spatial streams bit 0 & 1 */
@@ -4867,16 +3372,16 @@ static void wlc_compute_mimo_plcp(ratespec_t rspec, uint length, u8 *plcp)
/* Rate: 802.11 rate code, length: PSDU length in octets */
static void
-wlc_compute_ofdm_plcp(ratespec_t rspec, u32 length, u8 *plcp)
+brcms_c_compute_ofdm_plcp(ratespec_t rspec, u32 length, u8 *plcp)
{
u8 rate_signal;
u32 tmp = 0;
int rate = RSPEC2RATE(rspec);
/* encode rate per 802.11a-1999 sec 17.3.4.1, with lsb transmitted first */
- rate_signal = rate_info[rate] & WLC_RATE_MASK;
+ rate_signal = rate_info[rate] & BRCMS_RATE_MASK;
memset(plcp, 0, D11_PHY_HDR_LEN);
- D11A_PHY_HDR_SRATE((ofdm_phy_hdr_t *) plcp, rate_signal);
+ D11A_PHY_HDR_SRATE((struct ofdm_phy_hdr *) plcp, rate_signal);
tmp = (length & 0xfff) << 5;
plcp[2] |= (tmp >> 16) & 0xff;
@@ -4893,25 +3398,25 @@ wlc_compute_ofdm_plcp(ratespec_t rspec, u32 length, u8 *plcp)
* Broken out for PRQ.
*/
-static void wlc_cck_plcp_set(struct wlc_info *wlc, int rate_500, uint length,
- u8 *plcp)
+static void brcms_c_cck_plcp_set(struct brcms_c_info *wlc, int rate_500,
+ uint length, u8 *plcp)
{
u16 usec = 0;
u8 le = 0;
switch (rate_500) {
- case WLC_RATE_1M:
+ case BRCM_RATE_1M:
usec = length << 3;
break;
- case WLC_RATE_2M:
+ case BRCM_RATE_2M:
usec = length << 2;
break;
- case WLC_RATE_5M5:
+ case BRCM_RATE_5M5:
usec = (length << 4) / 11;
if ((length << 4) - (usec * 11) > 0)
usec++;
break;
- case WLC_RATE_11M:
+ case BRCM_RATE_11M:
usec = (length << 3) / 11;
if ((length << 3) - (usec * 11) > 0) {
usec++;
@@ -4921,9 +3426,9 @@ static void wlc_cck_plcp_set(struct wlc_info *wlc, int rate_500, uint length,
break;
default:
- wiphy_err(wlc->wiphy, "wlc_cck_plcp_set: unsupported rate %d"
+ wiphy_err(wlc->wiphy, "brcms_c_cck_plcp_set: unsupported rate %d"
"\n", rate_500);
- rate_500 = WLC_RATE_1M;
+ rate_500 = BRCM_RATE_1M;
usec = length << 3;
break;
}
@@ -4940,15 +3445,15 @@ static void wlc_cck_plcp_set(struct wlc_info *wlc, int rate_500, uint length,
}
/* Rate: 802.11 rate code, length: PSDU length in octets */
-static void wlc_compute_cck_plcp(struct wlc_info *wlc, ratespec_t rspec,
+static void brcms_c_compute_cck_plcp(struct brcms_c_info *wlc, ratespec_t rspec,
uint length, u8 *plcp)
{
int rate = RSPEC2RATE(rspec);
- wlc_cck_plcp_set(wlc, rate, length, plcp);
+ brcms_c_cck_plcp_set(wlc, rate, length, plcp);
}
-/* wlc_compute_frame_dur()
+/* brcms_c_compute_frame_dur()
*
* Calculate the 802.11 MAC header DUR field for MPDU
* DUR for a single frame = 1 SIFS + 1 ACK
@@ -4959,15 +3464,15 @@ static void wlc_compute_cck_plcp(struct wlc_info *wlc, ratespec_t rspec,
* preamble_type use short/GF or long/MM PLCP header
*/
static u16
-wlc_compute_frame_dur(struct wlc_info *wlc, ratespec_t rate, u8 preamble_type,
- uint next_frag_len)
+brcms_c_compute_frame_dur(struct brcms_c_info *wlc, ratespec_t rate,
+ u8 preamble_type, uint next_frag_len)
{
u16 dur, sifs;
sifs = SIFS(wlc->band);
dur = sifs;
- dur += (u16) wlc_calc_ack_time(wlc, rate, preamble_type);
+ dur += (u16) brcms_c_calc_ack_time(wlc, rate, preamble_type);
if (next_frag_len) {
/* Double the current DUR to get 2 SIFS + 2 ACKs */
@@ -4975,13 +3480,13 @@ wlc_compute_frame_dur(struct wlc_info *wlc, ratespec_t rate, u8 preamble_type,
/* add another SIFS and the frag time */
dur += sifs;
dur +=
- (u16) wlc_calc_frame_time(wlc, rate, preamble_type,
+ (u16) brcms_c_calc_frame_time(wlc, rate, preamble_type,
next_frag_len);
}
return dur;
}
-/* wlc_compute_rtscts_dur()
+/* brcms_c_compute_rtscts_dur()
*
* Calculate the 802.11 MAC header DUR field for an RTS or CTS frame
* DUR for normal RTS/CTS w/ frame = 3 SIFS + 1 CTS + next frame time + 1 ACK
@@ -4993,9 +3498,10 @@ wlc_compute_frame_dur(struct wlc_info *wlc, ratespec_t rate, u8 preamble_type,
* frame_len next MPDU frame length in bytes
*/
u16
-wlc_compute_rtscts_dur(struct wlc_info *wlc, bool cts_only, ratespec_t rts_rate,
- ratespec_t frame_rate, u8 rts_preamble_type,
- u8 frame_preamble_type, uint frame_len, bool ba)
+brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
+ ratespec_t rts_rate,
+ ratespec_t frame_rate, u8 rts_preamble_type,
+ u8 frame_preamble_type, uint frame_len, bool ba)
{
u16 dur, sifs;
@@ -5004,38 +3510,38 @@ wlc_compute_rtscts_dur(struct wlc_info *wlc, bool cts_only, ratespec_t rts_rate,
if (!cts_only) { /* RTS/CTS */
dur = 3 * sifs;
dur +=
- (u16) wlc_calc_cts_time(wlc, rts_rate,
+ (u16) brcms_c_calc_cts_time(wlc, rts_rate,
rts_preamble_type);
} else { /* CTS-TO-SELF */
dur = 2 * sifs;
}
dur +=
- (u16) wlc_calc_frame_time(wlc, frame_rate, frame_preamble_type,
+ (u16) brcms_c_calc_frame_time(wlc, frame_rate, frame_preamble_type,
frame_len);
if (ba)
dur +=
- (u16) wlc_calc_ba_time(wlc, frame_rate,
- WLC_SHORT_PREAMBLE);
+ (u16) brcms_c_calc_ba_time(wlc, frame_rate,
+ BRCMS_SHORT_PREAMBLE);
else
dur +=
- (u16) wlc_calc_ack_time(wlc, frame_rate,
+ (u16) brcms_c_calc_ack_time(wlc, frame_rate,
frame_preamble_type);
return dur;
}
-u16 wlc_phytxctl1_calc(struct wlc_info *wlc, ratespec_t rspec)
+u16 brcms_c_phytxctl1_calc(struct brcms_c_info *wlc, ratespec_t rspec)
{
u16 phyctl1 = 0;
u16 bw;
- if (WLCISLCNPHY(wlc->band)) {
+ if (BRCMS_ISLCNPHY(wlc->band)) {
bw = PHY_TXC1_BW_20MHZ;
} else {
bw = RSPEC_GET_BW(rspec);
/* 10Mhz is not supported yet */
if (bw < PHY_TXC1_BW_20MHZ) {
- wiphy_err(wlc->wiphy, "wlc_phytxctl1_calc: bw %d is "
+ wiphy_err(wlc->wiphy, "phytxctl1_calc: bw %d is "
"not supported yet, set to 20L\n", bw);
bw = PHY_TXC1_BW_20MHZ;
}
@@ -5048,8 +3554,8 @@ u16 wlc_phytxctl1_calc(struct wlc_info *wlc, ratespec_t rspec)
phyctl1 = RSPEC_PHYTXBYTE2(rspec);
/* set the upper byte of phyctl1 */
phyctl1 |= (mcs_table[mcs].tx_phy_ctl3 << 8);
- } else if (IS_CCK(rspec) && !WLCISLCNPHY(wlc->band)
- && !WLCISSSLPNPHY(wlc->band)) {
+ } else if (IS_CCK(rspec) && !BRCMS_ISLCNPHY(wlc->band)
+ && !BRCMS_ISSSLPNPHY(wlc->band)) {
/* In CCK mode LPPHY overloads OFDM Modulation bits with CCK Data Rate */
/* Eventually MIMOPHY would also be converted to this format */
/* 0 = 1Mbps; 1 = 2Mbps; 2 = 5.5Mbps; 3 = 11Mbps */
@@ -5057,9 +3563,9 @@ u16 wlc_phytxctl1_calc(struct wlc_info *wlc, ratespec_t rspec)
} else { /* legacy OFDM/CCK */
s16 phycfg;
/* get the phyctl byte from rate phycfg table */
- phycfg = wlc_rate_legacy_phyctl(RSPEC2RATE(rspec));
+ phycfg = brcms_c_rate_legacy_phyctl(RSPEC2RATE(rspec));
if (phycfg == -1) {
- wiphy_err(wlc->wiphy, "wlc_phytxctl1_calc: wrong "
+ wiphy_err(wlc->wiphy, "phytxctl1_calc: wrong "
"legacy OFDM/CCK rate\n");
phycfg = 0;
}
@@ -5072,8 +3578,8 @@ u16 wlc_phytxctl1_calc(struct wlc_info *wlc, ratespec_t rspec)
}
ratespec_t
-wlc_rspec_to_rts_rspec(struct wlc_info *wlc, ratespec_t rspec, bool use_rspec,
- u16 mimo_ctlchbw)
+brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, ratespec_t rspec,
+ bool use_rspec, u16 mimo_ctlchbw)
{
ratespec_t rts_rspec = 0;
@@ -5083,21 +3589,21 @@ wlc_rspec_to_rts_rspec(struct wlc_info *wlc, ratespec_t rspec, bool use_rspec,
} else if (wlc->band->gmode && wlc->protection->_g && !IS_CCK(rspec)) {
/* Use 11Mbps as the g protection RTS target rate and fallback.
- * Use the WLC_BASIC_RATE() lookup to find the best basic rate under the
- * target in case 11 Mbps is not Basic.
+ * Use the BRCMS_BASIC_RATE() lookup to find the best basic rate
+ * under the target in case 11 Mbps is not Basic.
* 6 and 9 Mbps are not usually selected by rate selection, but even
* if the OFDM rate we are protecting is 6 or 9 Mbps, 11 is more robust.
*/
- rts_rspec = WLC_BASIC_RATE(wlc, WLC_RATE_11M);
+ rts_rspec = BRCMS_BASIC_RATE(wlc, BRCM_RATE_11M);
} else {
/* calculate RTS rate and fallback rate based on the frame rate
* RTS must be sent at a basic rate since it is a
* control frame, sec 9.6 of 802.11 spec
*/
- rts_rspec = WLC_BASIC_RATE(wlc, rspec);
+ rts_rspec = BRCMS_BASIC_RATE(wlc, rspec);
}
- if (WLC_PHY_11N_CAP(wlc->band)) {
+ if (BRCMS_PHY_11N_CAP(wlc->band)) {
/* set rts txbw to correct side band */
rts_rspec &= ~RSPEC_BW_MASK;
@@ -5119,7 +3625,7 @@ wlc_rspec_to_rts_rspec(struct wlc_info *wlc, ratespec_t rspec, bool use_rspec,
}
/*
- * Add d11txh_t, cck_phy_hdr_t.
+ * Add struct d11txh, struct cck_phy_hdr.
*
* 'p' data must start with 802.11 MAC header
* 'p' must allow enough bytes of local headers to be "pushed" onto the packet
@@ -5128,25 +3634,25 @@ wlc_rspec_to_rts_rspec(struct wlc_info *wlc, ratespec_t rspec, bool use_rspec,
*
*/
static u16
-wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
+brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
struct sk_buff *p, struct scb *scb, uint frag,
uint nfrags, uint queue, uint next_frag_len,
- wsec_key_t *key, ratespec_t rspec_override)
+ struct wsec_key *key, ratespec_t rspec_override)
{
struct ieee80211_hdr *h;
- d11txh_t *txh;
+ struct d11txh *txh;
u8 *plcp, plcp_fallback[D11_PHY_HDR_LEN];
int len, phylen, rts_phylen;
u16 mch, phyctl, xfts, mainrates;
u16 seq = 0, mcl = 0, status = 0, frameid = 0;
- ratespec_t rspec[2] = { WLC_RATE_1M, WLC_RATE_1M }, rts_rspec[2] = {
- WLC_RATE_1M, WLC_RATE_1M};
+ ratespec_t rspec[2] = { BRCM_RATE_1M, BRCM_RATE_1M }, rts_rspec[2] = {
+ BRCM_RATE_1M, BRCM_RATE_1M};
bool use_rts = false;
bool use_cts = false;
bool use_rifs = false;
bool short_preamble[2] = { false, false };
- u8 preamble_type[2] = { WLC_LONG_PREAMBLE, WLC_LONG_PREAMBLE };
- u8 rts_preamble_type[2] = { WLC_LONG_PREAMBLE, WLC_LONG_PREAMBLE };
+ u8 preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
+ u8 rts_preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
u8 *rts_plcp, rts_plcp_fallback[D11_PHY_HDR_LEN];
struct ieee80211_rts *rts = NULL;
bool qos;
@@ -5171,7 +3677,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
qos = ieee80211_is_data_qos(h->frame_control);
/* compute length of frame in bytes for use in PLCP computations */
- len = bcm_pkttotlen(p);
+ len = brcmu_pkttotlen(p);
phylen = len + FCS_LEN;
/* If WEP enabled, add room in phylen for the additional bytes of
@@ -5190,7 +3696,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
plcp = skb_push(p, D11_PHY_HDR_LEN);
/* add Broadcom tx descriptor header */
- txh = (d11txh_t *) skb_push(p, D11_TXH_LEN);
+ txh = (struct d11txh *) skb_push(p, D11_TXH_LEN);
memset(txh, 0, D11_TXH_LEN);
/* setup frameid */
@@ -5198,7 +3704,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
/* non-AP STA should never use BCMC queue */
if (queue == TX_BCMC_FIFO) {
wiphy_err(wlc->wiphy, "wl%d: %s: ASSERT queue == "
- "TX_BCMC!\n", WLCWLUNIT(wlc), __func__);
+ "TX_BCMC!\n", BRCMS_UNIT(wlc), __func__);
frameid = bcmc_fid_generate(wlc, NULL, txh);
} else {
/* Increment the counter for first fragment */
@@ -5244,7 +3750,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ?
true : false;
} else {
- rate_val[k] = WLC_RATE_1M;
+ rate_val[k] = BRCM_RATE_1M;
}
} else {
rate_val[k] = txrate[k]->idx;
@@ -5266,12 +3772,12 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
/* (1) RATE: determine and validate primary rate and fallback rates */
if (!RSPEC_ACTIVE(rspec[k])) {
- rspec[k] = WLC_RATE_1M;
+ rspec[k] = BRCM_RATE_1M;
} else {
if (!is_multicast_ether_addr(h->addr1)) {
/* set tx antenna config */
- wlc_antsel_antcfg_get(wlc->asi, false, false, 0,
- 0, &antcfg, &fbantcfg);
+ brcms_c_antsel_antcfg_get(wlc->asi, false,
+ false, 0, 0, &antcfg, &fbantcfg);
}
}
}
@@ -5290,7 +3796,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
/* For SISO MCS use STBC if possible */
if (IS_MCS(rspec[k])
- && WLC_STF_SS_STBC_TX(wlc, scb)) {
+ && BRCMS_STF_SS_STBC_TX(wlc, scb)) {
u8 stc;
stc = 1; /* Nss for single stream is always 1 */
@@ -5304,11 +3810,11 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
}
/* Is the phy configured to use 40MHZ frames? If so then pick the desired txbw */
- if (CHSPEC_WLC_BW(wlc->chanspec) == WLC_40_MHZ) {
+ if (CHSPEC_WLC_BW(wlc->chanspec) == BRCMS_40_MHZ) {
/* default txbw is 20in40 SB */
mimo_ctlchbw = mimo_txbw =
- CHSPEC_SB_UPPER(WLC_BAND_PI_RADIO_CHANSPEC)
- ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
+ CHSPEC_SB_UPPER(BRCMS_BAND_PI_RADIO_CHANSPEC)
+ ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ;
if (IS_MCS(rspec[k])) {
/* mcs 32 must be 40b/w DUP */
@@ -5357,15 +3863,15 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
rspec[k] &= ~RSPEC_SHORT_GI;
#endif
- mimo_preamble_type = WLC_MM_PREAMBLE;
+ mimo_preamble_type = BRCMS_MM_PREAMBLE;
if (txrate[k]->flags & IEEE80211_TX_RC_GREEN_FIELD)
- mimo_preamble_type = WLC_GF_PREAMBLE;
+ mimo_preamble_type = BRCMS_GF_PREAMBLE;
if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
&& (!IS_MCS(rspec[k]))) {
wiphy_err(wlc->wiphy, "wl%d: %s: IEEE80211_TX_"
"RC_MCS != IS_MCS(rspec)\n",
- WLCWLUNIT(wlc), __func__);
+ BRCMS_UNIT(wlc), __func__);
}
if (IS_MCS(rspec[k])) {
@@ -5375,7 +3881,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
if ((rspec[k] & RSPEC_SHORT_GI)
&& IS_SINGLE_STREAM(rspec[k] &
RSPEC_RATE_MASK)) {
- preamble_type[k] = WLC_MM_PREAMBLE;
+ preamble_type[k] = BRCMS_MM_PREAMBLE;
}
}
@@ -5383,7 +3889,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
if (!IS_MCS(rspec[0])
&& (tx_info->control.rates[0].
flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE))
- preamble_type[k] = WLC_SHORT_PREAMBLE;
+ preamble_type[k] = BRCMS_SHORT_PREAMBLE;
}
} else {
for (k = 0; k < hw->max_rates; k++) {
@@ -5392,7 +3898,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
rspec[k] |= (PHY_TXC1_BW_20MHZ << RSPEC_BW_SHIFT);
/* for nphy, stf of ofdm frames must follow policies */
- if (WLCISNPHY(wlc->band) && IS_OFDM(rspec[k])) {
+ if (BRCMS_ISNPHY(wlc->band) && IS_OFDM(rspec[k])) {
rspec[k] &= ~RSPEC_STF_MASK;
rspec[k] |= phyctl1_stf << RSPEC_STF_SHIFT;
}
@@ -5409,9 +3915,10 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
(phylen > wlc->RTSThresh) && !is_multicast_ether_addr(h->addr1))
use_rts = true;
- /* (3) PLCP: determine PLCP header and MAC duration, fill d11txh_t */
- wlc_compute_plcp(wlc, rspec[0], phylen, plcp);
- wlc_compute_plcp(wlc, rspec[1], phylen, plcp_fallback);
+ /* (3) PLCP: determine PLCP header and MAC duration,
+ * fill struct d11txh */
+ brcms_c_compute_plcp(wlc, rspec[0], phylen, plcp);
+ brcms_c_compute_plcp(wlc, rspec[1], phylen, plcp_fallback);
memcpy(&txh->FragPLCPFallback,
plcp_fallback, sizeof(txh->FragPLCPFallback));
@@ -5422,21 +3929,21 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
}
/* MIMO-RATE: need validation ?? */
- mainrates =
- IS_OFDM(rspec[0]) ? D11A_PHY_HDR_GRATE((ofdm_phy_hdr_t *) plcp) :
- plcp[0];
+ mainrates = IS_OFDM(rspec[0]) ?
+ D11A_PHY_HDR_GRATE((struct ofdm_phy_hdr *) plcp) :
+ plcp[0];
/* DUR field for main rate */
if (!ieee80211_is_pspoll(h->frame_control) &&
!is_multicast_ether_addr(h->addr1) && !use_rifs) {
durid =
- wlc_compute_frame_dur(wlc, rspec[0], preamble_type[0],
+ brcms_c_compute_frame_dur(wlc, rspec[0], preamble_type[0],
next_frag_len);
h->duration_id = cpu_to_le16(durid);
} else if (use_rifs) {
/* NAV protect to end of next max packet size */
durid =
- (u16) wlc_calc_frame_time(wlc, rspec[0],
+ (u16) brcms_c_calc_frame_time(wlc, rspec[0],
preamble_type[0],
DOT11_MAX_FRAG_LEN);
durid += RIFS_11N_TIME;
@@ -5449,7 +3956,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
else if (is_multicast_ether_addr(h->addr1) || use_rifs)
txh->FragDurFallback = 0;
else {
- durid = wlc_compute_frame_dur(wlc, rspec[1],
+ durid = brcms_c_compute_frame_dur(wlc, rspec[1],
preamble_type[1], next_frag_len);
txh->FragDurFallback = cpu_to_le16(durid);
}
@@ -5464,7 +3971,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
if (BAND_5G(wlc->band->bandtype))
mcl |= TXC_FREQBAND_5G;
- if (CHSPEC_IS40(WLC_BAND_PI_RADIO_CHANSPEC))
+ if (CHSPEC_IS40(BRCMS_BAND_PI_RADIO_CHANSPEC))
mcl |= TXC_BW_40;
/* set AMIC bit if using hardware TKIP MIC */
@@ -5477,9 +3984,9 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
mch = 0;
/* Set fallback rate preamble type */
- if ((preamble_type[1] == WLC_SHORT_PREAMBLE) ||
- (preamble_type[1] == WLC_GF_PREAMBLE)) {
- if (RSPEC2RATE(rspec[1]) != WLC_RATE_1M)
+ if ((preamble_type[1] == BRCMS_SHORT_PREAMBLE) ||
+ (preamble_type[1] == BRCMS_GF_PREAMBLE)) {
+ if (RSPEC2RATE(rspec[1]) != BRCM_RATE_1M)
mch |= TXC_PREAMBLE_DATA_FB_SHORT;
}
@@ -5508,29 +4015,30 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
txh->MaxABytes_FBR = cpu_to_le16(0);
txh->MinMBytes = cpu_to_le16(0);
- /* (5) RTS/CTS: determine RTS/CTS PLCP header and MAC duration, furnish d11txh_t */
+ /* (5) RTS/CTS: determine RTS/CTS PLCP header and MAC duration,
+ * furnish struct d11txh */
/* RTS PLCP header and RTS frame */
if (use_rts || use_cts) {
if (use_rts && use_cts)
use_cts = false;
for (k = 0; k < 2; k++) {
- rts_rspec[k] = wlc_rspec_to_rts_rspec(wlc, rspec[k],
+ rts_rspec[k] = brcms_c_rspec_to_rts_rspec(wlc, rspec[k],
false,
mimo_ctlchbw);
}
if (!IS_OFDM(rts_rspec[0]) &&
- !((RSPEC2RATE(rts_rspec[0]) == WLC_RATE_1M) ||
- (wlc->PLCPHdr_override == WLC_PLCP_LONG))) {
- rts_preamble_type[0] = WLC_SHORT_PREAMBLE;
+ !((RSPEC2RATE(rts_rspec[0]) == BRCM_RATE_1M) ||
+ (wlc->PLCPHdr_override == BRCMS_PLCP_LONG))) {
+ rts_preamble_type[0] = BRCMS_SHORT_PREAMBLE;
mch |= TXC_PREAMBLE_RTS_MAIN_SHORT;
}
if (!IS_OFDM(rts_rspec[1]) &&
- !((RSPEC2RATE(rts_rspec[1]) == WLC_RATE_1M) ||
- (wlc->PLCPHdr_override == WLC_PLCP_LONG))) {
- rts_preamble_type[1] = WLC_SHORT_PREAMBLE;
+ !((RSPEC2RATE(rts_rspec[1]) == BRCM_RATE_1M) ||
+ (wlc->PLCPHdr_override == BRCMS_PLCP_LONG))) {
+ rts_preamble_type[1] = BRCMS_SHORT_PREAMBLE;
mch |= TXC_PREAMBLE_RTS_FB_SHORT;
}
@@ -5549,10 +4057,10 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
else
rts_phylen = DOT11_RTS_LEN + FCS_LEN;
- wlc_compute_plcp(wlc, rts_rspec[0], rts_phylen, rts_plcp);
+ brcms_c_compute_plcp(wlc, rts_rspec[0], rts_phylen, rts_plcp);
/* fallback rate version of RTS PLCP header */
- wlc_compute_plcp(wlc, rts_rspec[1], rts_phylen,
+ brcms_c_compute_plcp(wlc, rts_rspec[1], rts_phylen,
rts_plcp_fallback);
memcpy(&txh->RTSPLCPFallback, rts_plcp_fallback,
sizeof(txh->RTSPLCPFallback));
@@ -5560,12 +4068,12 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
/* RTS frame fields... */
rts = (struct ieee80211_rts *)&txh->rts_frame;
- durid = wlc_compute_rtscts_dur(wlc, use_cts, rts_rspec[0],
+ durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec[0],
rspec[0], rts_preamble_type[0],
preamble_type[0], phylen, false);
rts->duration = cpu_to_le16(durid);
/* fallback rate version of RTS DUR field */
- durid = wlc_compute_rtscts_dur(wlc, use_cts,
+ durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
rts_rspec[1], rspec[1],
rts_preamble_type[1],
preamble_type[1], phylen, false);
@@ -5588,8 +4096,9 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
* high 8 bits: rts/cts rate/mcs
*/
mainrates |= (IS_OFDM(rts_rspec[0]) ?
- D11A_PHY_HDR_GRATE((ofdm_phy_hdr_t *) rts_plcp) :
- rts_plcp[0]) << 8;
+ D11A_PHY_HDR_GRATE(
+ (struct ofdm_phy_hdr *) rts_plcp) :
+ rts_plcp[0]) << 8;
} else {
memset((char *)txh->RTSPhyHeader, 0, D11_PHY_HDR_LEN);
memset((char *)&txh->rts_frame, 0,
@@ -5603,7 +4112,7 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
/* add null delimiter count */
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && IS_MCS(rspec)) {
txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] =
- wlc_ampdu_null_delim_cnt(wlc->ampdu, scb, rspec, phylen);
+ brcm_c_ampdu_null_delim_cnt(wlc->ampdu, scb, rspec, phylen);
}
#endif
@@ -5618,34 +4127,34 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
xfts |= (FRAMETYPE(rts_rspec[0], wlc->mimoft) << XFTS_RTS_FT_SHIFT);
xfts |= (FRAMETYPE(rts_rspec[1], wlc->mimoft) << XFTS_FBRRTS_FT_SHIFT);
xfts |=
- CHSPEC_CHANNEL(WLC_BAND_PI_RADIO_CHANSPEC) << XFTS_CHANNEL_SHIFT;
+ CHSPEC_CHANNEL(BRCMS_BAND_PI_RADIO_CHANSPEC) << XFTS_CHANNEL_SHIFT;
txh->XtraFrameTypes = cpu_to_le16(xfts);
/* PhyTxControlWord */
phyctl = FRAMETYPE(rspec[0], wlc->mimoft);
- if ((preamble_type[0] == WLC_SHORT_PREAMBLE) ||
- (preamble_type[0] == WLC_GF_PREAMBLE)) {
- if (RSPEC2RATE(rspec[0]) != WLC_RATE_1M)
+ if ((preamble_type[0] == BRCMS_SHORT_PREAMBLE) ||
+ (preamble_type[0] == BRCMS_GF_PREAMBLE)) {
+ if (RSPEC2RATE(rspec[0]) != BRCM_RATE_1M)
phyctl |= PHY_TXC_SHORT_HDR;
}
/* phytxant is properly bit shifted */
- phyctl |= wlc_stf_d11hdrs_phyctl_txant(wlc, rspec[0]);
+ phyctl |= brcms_c_stf_d11hdrs_phyctl_txant(wlc, rspec[0]);
txh->PhyTxControlWord = cpu_to_le16(phyctl);
/* PhyTxControlWord_1 */
- if (WLC_PHY_11N_CAP(wlc->band)) {
+ if (BRCMS_PHY_11N_CAP(wlc->band)) {
u16 phyctl1 = 0;
- phyctl1 = wlc_phytxctl1_calc(wlc, rspec[0]);
+ phyctl1 = brcms_c_phytxctl1_calc(wlc, rspec[0]);
txh->PhyTxControlWord_1 = cpu_to_le16(phyctl1);
- phyctl1 = wlc_phytxctl1_calc(wlc, rspec[1]);
+ phyctl1 = brcms_c_phytxctl1_calc(wlc, rspec[1]);
txh->PhyTxControlWord_1_Fbr = cpu_to_le16(phyctl1);
if (use_rts || use_cts) {
- phyctl1 = wlc_phytxctl1_calc(wlc, rts_rspec[0]);
+ phyctl1 = brcms_c_phytxctl1_calc(wlc, rts_rspec[0]);
txh->PhyTxControlWord_1_Rts = cpu_to_le16(phyctl1);
- phyctl1 = wlc_phytxctl1_calc(wlc, rts_rspec[1]);
+ phyctl1 = brcms_c_phytxctl1_calc(wlc, rts_rspec[1]);
txh->PhyTxControlWord_1_FbrRts = cpu_to_le16(phyctl1);
}
@@ -5654,15 +4163,17 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
* fill in non-zero MModeLen and/or MModeFbrLen
* it will be unnecessary if they are separated
*/
- if (IS_MCS(rspec[0]) && (preamble_type[0] == WLC_MM_PREAMBLE)) {
+ if (IS_MCS(rspec[0]) &&
+ (preamble_type[0] == BRCMS_MM_PREAMBLE)) {
u16 mmodelen =
- wlc_calc_lsig_len(wlc, rspec[0], phylen);
+ brcms_c_calc_lsig_len(wlc, rspec[0], phylen);
txh->MModeLen = cpu_to_le16(mmodelen);
}
- if (IS_MCS(rspec[1]) && (preamble_type[1] == WLC_MM_PREAMBLE)) {
+ if (IS_MCS(rspec[1]) &&
+ (preamble_type[1] == BRCMS_MM_PREAMBLE)) {
u16 mmodefbrlen =
- wlc_calc_lsig_len(wlc, rspec[1], phylen);
+ brcms_c_calc_lsig_len(wlc, rspec[1], phylen);
txh->MModeFbrLen = cpu_to_le16(mmodefbrlen);
}
}
@@ -5674,16 +4185,16 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
/* WME: Update TXOP threshold */
if ((!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) && (frag == 0)) {
frag_dur =
- wlc_calc_frame_time(wlc, rspec[0], preamble_type[0],
- phylen);
+ brcms_c_calc_frame_time(wlc, rspec[0],
+ preamble_type[0], phylen);
if (rts) {
/* 1 RTS or CTS-to-self frame */
dur =
- wlc_calc_cts_time(wlc, rts_rspec[0],
+ brcms_c_calc_cts_time(wlc, rts_rspec[0],
rts_preamble_type[0]);
dur_fallback =
- wlc_calc_cts_time(wlc, rts_rspec[1],
+ brcms_c_calc_cts_time(wlc, rts_rspec[1],
rts_preamble_type[1]);
/* (SIFS + CTS) + SIFS + frame + SIFS + ACK */
dur += le16_to_cpu(rts->duration);
@@ -5696,15 +4207,15 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
/* frame + SIFS + ACK */
dur = frag_dur;
dur +=
- wlc_compute_frame_dur(wlc, rspec[0],
+ brcms_c_compute_frame_dur(wlc, rspec[0],
preamble_type[0], 0);
dur_fallback =
- wlc_calc_frame_time(wlc, rspec[1],
+ brcms_c_calc_frame_time(wlc, rspec[1],
preamble_type[1],
phylen);
dur_fallback +=
- wlc_compute_frame_dur(wlc, rspec[1],
+ brcms_c_compute_frame_dur(wlc, rspec[1],
preamble_type[1], 0);
}
/* NEED to set TxFesTimeNormal (hard) */
@@ -5719,12 +4230,10 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
uint newfragthresh;
newfragthresh =
- wlc_calc_frame_len(wlc, rspec[0],
- preamble_type[0],
- (wlc->
- edcf_txop[ac] -
- (dur -
- frag_dur)));
+ brcms_c_calc_frame_len(wlc,
+ rspec[0], preamble_type[0],
+ (wlc->edcf_txop[ac] -
+ (dur - frag_dur)));
/* range bound the fragthreshold */
if (newfragthresh < DOT11_MIN_FRAG_LEN)
newfragthresh =
@@ -5759,32 +4268,9 @@ wlc_d11hdrs_mac80211(struct wlc_info *wlc, struct ieee80211_hw *hw,
return 0;
}
-void wlc_tbtt(struct wlc_info *wlc, d11regs_t *regs)
+void brcms_c_tbtt(struct brcms_c_info *wlc)
{
- struct wlc_bsscfg *cfg = wlc->cfg;
-
- if (BSSCFG_STA(cfg)) {
- /* run watchdog here if the watchdog timer is not armed */
- if (WLC_WATCHDOG_TBTT(wlc)) {
- u32 cur, delta;
- if (wlc->WDarmed) {
- wl_del_timer(wlc->wl, wlc->wdtimer);
- wlc->WDarmed = false;
- }
-
- cur = OSL_SYSUPTIME();
- delta = cur > wlc->WDlast ? cur - wlc->WDlast :
- (u32) ~0 - wlc->WDlast + cur + 1;
- if (delta >= TIMER_INTERVAL_WATCHDOG) {
- wlc_watchdog((void *)wlc);
- wlc->WDlast = cur;
- }
-
- wl_add_timer(wlc->wl, wlc->wdtimer,
- wlc_watchdog_backup_bi(wlc), true);
- wlc->WDarmed = true;
- }
- }
+ struct brcms_bss_cfg *cfg = wlc->cfg;
if (!cfg->BSS) {
/* DirFrmQ is now valid...defer setting until end of ATIM window */
@@ -5792,27 +4278,26 @@ void wlc_tbtt(struct wlc_info *wlc, d11regs_t *regs)
}
}
-static void wlc_war16165(struct wlc_info *wlc, bool tx)
+static void brcms_c_war16165(struct brcms_c_info *wlc, bool tx)
{
if (tx) {
/* the post-increment is used in STAY_AWAKE macro */
if (wlc->txpend16165war++ == 0)
- wlc_set_ps_ctrl(wlc);
+ brcms_c_set_ps_ctrl(wlc);
} else {
wlc->txpend16165war--;
if (wlc->txpend16165war == 0)
- wlc_set_ps_ctrl(wlc);
+ brcms_c_set_ps_ctrl(wlc);
}
}
-/* process an individual tx_status_t */
-/* WLC_HIGH_API */
+/* process an individual struct tx_status */
bool
-wlc_dotxstatus(struct wlc_info *wlc, tx_status_t *txs, u32 frm_tx2)
+brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs, u32 frm_tx2)
{
struct sk_buff *p;
uint queue;
- d11txh_t *txh;
+ struct d11txh *txh;
struct scb *scb = NULL;
bool free_pdu;
int tx_rts, tx_frame_count, tx_rts_count;
@@ -5845,21 +4330,21 @@ wlc_dotxstatus(struct wlc_info *wlc, tx_status_t *txs, u32 frm_tx2)
}
p = GETNEXTTXP(wlc, queue);
- if (WLC_WAR16165(wlc))
- wlc_war16165(wlc, false);
+ if (BRCMS_WAR16165(wlc))
+ brcms_c_war16165(wlc, false);
if (p == NULL)
goto fatal;
- txh = (d11txh_t *) (p->data);
+ txh = (struct d11txh *) (p->data);
mcl = le16_to_cpu(txh->MacTxControlLow);
if (txs->phyerr) {
if (WL_ERROR_ON()) {
wiphy_err(wlc->wiphy, "phyerr 0x%x, rate 0x%x\n",
txs->phyerr, txh->MainRates);
- wlc_print_txdesc(txh);
+ brcms_c_print_txdesc(txh);
}
- wlc_print_txstatus(txs);
+ brcms_c_print_txstatus(txs);
}
if (txs->frameid != cpu_to_le16(txh->TxFrameID))
@@ -5871,7 +4356,7 @@ wlc_dotxstatus(struct wlc_info *wlc, tx_status_t *txs, u32 frm_tx2)
scb = (struct scb *)tx_info->control.sta->drv_priv;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
- wlc_ampdu_dotxstatus(wlc->ampdu, scb, p, txs);
+ brcms_c_ampdu_dotxstatus(wlc->ampdu, scb, p, txs);
return false;
}
@@ -5892,25 +4377,41 @@ wlc_dotxstatus(struct wlc_info *wlc, tx_status_t *txs, u32 frm_tx2)
if (!lastframe) {
wiphy_err(wlc->wiphy, "Not last frame!\n");
} else {
- u16 sfbl, lfbl;
- ieee80211_tx_info_clear_status(tx_info);
+ /*
+ * Set information to be consumed by Minstrel ht.
+ *
+ * The "fallback limit" is the number of tx attempts a given
+ * MPDU is sent at the "primary" rate. Tx attempts beyond that
+ * limit are sent at the "secondary" rate.
+ * A 'short frame' does not exceed RTS treshold.
+ */
+ u16 sfbl, /* Short Frame Rate Fallback Limit */
+ lfbl, /* Long Frame Rate Fallback Limit */
+ fbl;
+
if (queue < AC_COUNT) {
- sfbl = WLC_WME_RETRY_SFB_GET(wlc, wme_fifo2ac[queue]);
- lfbl = WLC_WME_RETRY_LFB_GET(wlc, wme_fifo2ac[queue]);
+ sfbl = BRCMS_WME_RETRY_SFB_GET(wlc, wme_fifo2ac[queue]);
+ lfbl = BRCMS_WME_RETRY_LFB_GET(wlc, wme_fifo2ac[queue]);
} else {
sfbl = wlc->SFBL;
lfbl = wlc->LFBL;
}
txrate = tx_info->status.rates;
- /* FIXME: this should use a combination of sfbl, lfbl depending on frame length and RTS setting */
- if ((tx_frame_count > sfbl) && (txrate[1].idx >= 0)) {
+ if (txrate[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ fbl = lfbl;
+ else
+ fbl = sfbl;
+
+ ieee80211_tx_info_clear_status(tx_info);
+
+ if ((tx_frame_count > fbl) && (txrate[1].idx >= 0)) {
/* rate selection requested a fallback rate and we used it */
- txrate->count = lfbl;
- txrate[1].count = tx_frame_count - lfbl;
+ txrate[0].count = fbl;
+ txrate[1].count = tx_frame_count - fbl;
} else {
/* rate selection did not request fallback rate, or we didn't need it */
- txrate->count = tx_frame_count;
+ txrate[0].count = tx_frame_count;
/* rc80211_minstrel.c:minstrel_tx_status() expects unused rates to be marked with idx = -1 */
txrate[1].idx = -1;
txrate[1].count = 0;
@@ -5926,15 +4427,14 @@ wlc_dotxstatus(struct wlc_info *wlc, tx_status_t *txs, u32 frm_tx2)
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- totlen = bcm_pkttotlen(p);
+ totlen = brcmu_pkttotlen(p);
free_pdu = true;
- wlc_txfifo_complete(wlc, queue, 1);
+ brcms_c_txfifo_complete(wlc, queue, 1);
if (lastframe) {
p->next = NULL;
p->prev = NULL;
- wlc->txretried = 0;
/* remove PLCP & Broadcom tx descriptor header */
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
@@ -5948,48 +4448,42 @@ wlc_dotxstatus(struct wlc_info *wlc, tx_status_t *txs, u32 frm_tx2)
fatal:
if (p)
- bcm_pkt_buf_free_skb(p);
+ brcmu_pkt_buf_free_skb(p);
return true;
}
void
-wlc_txfifo_complete(struct wlc_info *wlc, uint fifo, s8 txpktpend)
+brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo, s8 txpktpend)
{
TXPKTPENDDEC(wlc, fifo, txpktpend);
BCMMSG(wlc->wiphy, "pktpend dec %d to %d\n", txpktpend,
TXPKTPENDGET(wlc, fifo));
/* There is more room; mark precedences related to this FIFO sendable */
- WLC_TX_FIFO_ENAB(wlc, fifo);
-
- if (!TXPKTPENDTOT(wlc)) {
- if (wlc->block_datafifo & DATA_BLOCK_TX_SUPR)
- wlc_bsscfg_tx_check(wlc);
- }
+ BRCMS_TX_FIFO_ENAB(wlc, fifo);
/* Clear MHF2_TXBCMC_NOW flag if BCMC fifo has drained */
if (AP_ENAB(wlc->pub) &&
- wlc->bcmcfifo_drain && !TXPKTPENDGET(wlc, TX_BCMC_FIFO)) {
- wlc->bcmcfifo_drain = false;
- wlc_mhf(wlc, MHF2, MHF2_TXBCMC_NOW, 0, WLC_BAND_AUTO);
+ !TXPKTPENDGET(wlc, TX_BCMC_FIFO)) {
+ brcms_c_mhf(wlc, MHF2, MHF2_TXBCMC_NOW, 0, BRCM_BAND_AUTO);
}
/* figure out which bsscfg is being worked on... */
}
/* Update beacon listen interval in shared memory */
-void wlc_bcn_li_upd(struct wlc_info *wlc)
+void brcms_c_bcn_li_upd(struct brcms_c_info *wlc)
{
if (AP_ENAB(wlc->pub))
return;
/* wake up every DTIM is the default */
if (wlc->bcn_li_dtim == 1)
- wlc_write_shm(wlc, M_BCN_LI, 0);
+ brcms_c_write_shm(wlc, M_BCN_LI, 0);
else
- wlc_write_shm(wlc, M_BCN_LI,
+ brcms_c_write_shm(wlc, M_BCN_LI,
(wlc->bcn_li_dtim << 8) | wlc->bcn_li_bcn);
}
@@ -6003,16 +4497,17 @@ void wlc_bcn_li_upd(struct wlc_info *wlc)
* |<---------- tsf_h ----------->||<--- tsf_l -->||<-RxTSFTime ->|
*
* The RxTSFTime are the lowest 16 bits and provided by the ucode. The
- * tsf_l is filled in by wlc_bmac_recv, which is done earlier in the
+ * tsf_l is filled in by brcms_b_recv, which is done earlier in the
* receive call sequence after rx interrupt. Only the higher 16 bits
* are used. Finally, the tsf_h is read from the tsf register.
*/
-static u64 wlc_recover_tsf64(struct wlc_info *wlc, struct wlc_d11rxhdr *rxh)
+static u64 brcms_c_recover_tsf64(struct brcms_c_info *wlc,
+ struct brcms_d11rxhdr *rxh)
{
u32 tsf_h, tsf_l;
u16 rx_tsf_0_15, rx_tsf_16_31;
- wlc_bmac_read_tsf(wlc->hw, &tsf_l, &tsf_h);
+ brcms_b_read_tsf(wlc->hw, &tsf_l, &tsf_h);
rx_tsf_16_31 = (u16)(tsf_l >> 16);
rx_tsf_0_15 = rxh->rxhdr.RxTSFTime;
@@ -6031,20 +4526,21 @@ static u64 wlc_recover_tsf64(struct wlc_info *wlc, struct wlc_d11rxhdr *rxh)
}
static void
-prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
+prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
+ struct sk_buff *p,
struct ieee80211_rx_status *rx_status)
{
- wlc_d11rxhdr_t *wlc_rxh = (wlc_d11rxhdr_t *) rxh;
+ struct brcms_d11rxhdr *wlc_rxh = (struct brcms_d11rxhdr *) rxh;
int preamble;
int channel;
ratespec_t rspec;
unsigned char *plcp;
/* fill in TSF and flag its presence */
- rx_status->mactime = wlc_recover_tsf64(wlc, wlc_rxh);
+ rx_status->mactime = brcms_c_recover_tsf64(wlc, wlc_rxh);
rx_status->flag |= RX_FLAG_MACTIME_MPDU;
- channel = WLC_CHAN_CHANNEL(rxh->RxChan);
+ channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
if (channel > 14) {
rx_status->band = IEEE80211_BAND_5GHZ;
@@ -6064,7 +4560,7 @@ prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
plcp = p->data;
- rspec = wlc_compute_rspec(rxh, plcp);
+ rspec = brcms_c_compute_rspec(rxh, plcp);
if (IS_MCS(rspec)) {
rx_status->rate_idx = rspec & RSPEC_RATE_MASK;
rx_status->flag |= RX_FLAG_HT;
@@ -6072,40 +4568,40 @@ prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
rx_status->flag |= RX_FLAG_40MHZ;
} else {
switch (RSPEC2RATE(rspec)) {
- case WLC_RATE_1M:
+ case BRCM_RATE_1M:
rx_status->rate_idx = 0;
break;
- case WLC_RATE_2M:
+ case BRCM_RATE_2M:
rx_status->rate_idx = 1;
break;
- case WLC_RATE_5M5:
+ case BRCM_RATE_5M5:
rx_status->rate_idx = 2;
break;
- case WLC_RATE_11M:
+ case BRCM_RATE_11M:
rx_status->rate_idx = 3;
break;
- case WLC_RATE_6M:
+ case BRCM_RATE_6M:
rx_status->rate_idx = 4;
break;
- case WLC_RATE_9M:
+ case BRCM_RATE_9M:
rx_status->rate_idx = 5;
break;
- case WLC_RATE_12M:
+ case BRCM_RATE_12M:
rx_status->rate_idx = 6;
break;
- case WLC_RATE_18M:
+ case BRCM_RATE_18M:
rx_status->rate_idx = 7;
break;
- case WLC_RATE_24M:
+ case BRCM_RATE_24M:
rx_status->rate_idx = 8;
break;
- case WLC_RATE_36M:
+ case BRCM_RATE_36M:
rx_status->rate_idx = 9;
break;
- case WLC_RATE_48M:
+ case BRCM_RATE_48M:
rx_status->rate_idx = 10;
break;
- case WLC_RATE_54M:
+ case BRCM_RATE_54M:
rx_status->rate_idx = 11;
break;
default:
@@ -6141,7 +4637,8 @@ prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p,
}
static void
-wlc_recvctl(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p)
+brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
+ struct sk_buff *p)
{
int len_mpdu;
struct ieee80211_rx_status rx_status;
@@ -6164,10 +4661,9 @@ wlc_recvctl(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p)
* Return true if more frames need to be processed. false otherwise.
* Param 'bound' indicates max. # frames to process before break out.
*/
-/* WLC_HIGH_API */
-void wlc_recv(struct wlc_info *wlc, struct sk_buff *p)
+void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
{
- d11rxhdr_t *rxh;
+ struct d11rxhdr *rxh;
struct ieee80211_hdr *h;
uint len;
bool is_amsdu;
@@ -6175,10 +4671,10 @@ void wlc_recv(struct wlc_info *wlc, struct sk_buff *p)
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
/* frame starts with rxhdr */
- rxh = (d11rxhdr_t *) (p->data);
+ rxh = (struct d11rxhdr *) (p->data);
/* strip off rxhdr */
- skb_pull(p, wlc->hwrxoff);
+ skb_pull(p, BRCMS_HWRXOFF);
/* fixup rx header endianness */
rxh->RxFrameSize = le16_to_cpu(rxh->RxFrameSize);
@@ -6196,7 +4692,7 @@ void wlc_recv(struct wlc_info *wlc, struct sk_buff *p)
/* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU subframes */
if (rxh->RxStatus1 & RXS_PBPRES) {
if (p->len < 2) {
- wiphy_err(wlc->wiphy, "wl%d: wlc_recv: rcvd runt of "
+ wiphy_err(wlc->wiphy, "wl%d: recv: rcvd runt of "
"len %d\n", wlc->pub->unit, p->len);
goto toss;
}
@@ -6248,11 +4744,11 @@ void wlc_recv(struct wlc_info *wlc, struct sk_buff *p)
if (is_amsdu)
goto toss;
- wlc_recvctl(wlc, rxh, p);
+ brcms_c_recvctl(wlc, rxh, p);
return;
toss:
- bcm_pkt_buf_free_skb(p);
+ brcmu_pkt_buf_free_skb(p);
}
/* calculate frame duration for Mixed-mode L-SIG spoofing, return
@@ -6262,7 +4758,8 @@ void wlc_recv(struct wlc_info *wlc, struct sk_buff *p)
* len = 3(nsyms + nstream + 3) - 3
*/
u16
-wlc_calc_lsig_len(struct wlc_info *wlc, ratespec_t ratespec, uint mac_len)
+brcms_c_calc_lsig_len(struct brcms_c_info *wlc, ratespec_t ratespec,
+ uint mac_len)
{
uint nsyms, len = 0, kNdps;
@@ -6302,8 +4799,8 @@ wlc_calc_lsig_len(struct wlc_info *wlc, ratespec_t ratespec, uint mac_len)
/* calculate frame duration of a given rate and length, return time in usec unit */
uint
-wlc_calc_frame_time(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
- uint mac_len)
+brcms_c_calc_frame_time(struct brcms_c_info *wlc, ratespec_t ratespec,
+ u8 preamble_type, uint mac_len)
{
uint nsyms, dur = 0, Ndps, kNdps;
uint rate = RSPEC2RATE(ratespec);
@@ -6311,7 +4808,7 @@ wlc_calc_frame_time(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
if (rate == 0) {
wiphy_err(wlc->wiphy, "wl%d: WAR: using rate of 1 mbps\n",
wlc->pub->unit);
- rate = WLC_RATE_1M;
+ rate = BRCM_RATE_1M;
}
BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, len%d\n",
@@ -6322,7 +4819,7 @@ wlc_calc_frame_time(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
int tot_streams = MCS_TXS(mcs) + RSPEC_STC(ratespec);
dur = PREN_PREAMBLE + (tot_streams * PREN_PREAMBLE_EXT);
- if (preamble_type == WLC_MM_PREAMBLE)
+ if (preamble_type == BRCMS_MM_PREAMBLE)
dur += PREN_MM_EXT;
/* 1000Ndbps = kbps * 4 */
kNdps =
@@ -6361,7 +4858,7 @@ wlc_calc_frame_time(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
mac_len = mac_len * 8 * 2;
/* calc ceiling of bits/rate = microseconds of air time */
dur = (mac_len + rate - 1) / rate;
- if (preamble_type & WLC_SHORT_PREAMBLE)
+ if (preamble_type & BRCMS_SHORT_PREAMBLE)
dur += BPHY_PLCP_SHORT_TIME;
else
dur += BPHY_PLCP_TIME;
@@ -6369,10 +4866,10 @@ wlc_calc_frame_time(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
return dur;
}
-/* The opposite of wlc_calc_frame_time */
+/* The opposite of brcms_c_calc_frame_time */
static uint
-wlc_calc_frame_len(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
- uint dur)
+brcms_c_calc_frame_len(struct brcms_c_info *wlc, ratespec_t ratespec,
+ u8 preamble_type, uint dur)
{
uint nsyms, mac_len, Ndps, kNdps;
uint rate = RSPEC2RATE(ratespec);
@@ -6405,7 +4902,7 @@ wlc_calc_frame_len(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
((nsyms * Ndps) -
(APHY_SERVICE_NBITS + APHY_TAIL_NBITS)) / 8;
} else {
- if (preamble_type & WLC_SHORT_PREAMBLE)
+ if (preamble_type & BRCMS_SHORT_PREAMBLE)
dur -= BPHY_PLCP_SHORT_TIME;
else
dur -= BPHY_PLCP_TIME;
@@ -6417,22 +4914,24 @@ wlc_calc_frame_len(struct wlc_info *wlc, ratespec_t ratespec, u8 preamble_type,
}
static uint
-wlc_calc_ba_time(struct wlc_info *wlc, ratespec_t rspec, u8 preamble_type)
+brcms_c_calc_ba_time(struct brcms_c_info *wlc, ratespec_t rspec,
+ u8 preamble_type)
{
BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, "
"preamble_type %d\n", wlc->pub->unit, rspec, preamble_type);
/* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that is less than
* or equal to the rate of the immediately previous frame in the FES
*/
- rspec = WLC_BASIC_RATE(wlc, rspec);
+ rspec = BRCMS_BASIC_RATE(wlc, rspec);
/* BA len == 32 == 16(ctl hdr) + 4(ba len) + 8(bitmap) + 4(fcs) */
- return wlc_calc_frame_time(wlc, rspec, preamble_type,
+ return brcms_c_calc_frame_time(wlc, rspec, preamble_type,
(DOT11_BA_LEN + DOT11_BA_BITMAP_LEN +
FCS_LEN));
}
static uint
-wlc_calc_ack_time(struct wlc_info *wlc, ratespec_t rspec, u8 preamble_type)
+brcms_c_calc_ack_time(struct brcms_c_info *wlc, ratespec_t rspec,
+ u8 preamble_type)
{
uint dur = 0;
@@ -6441,24 +4940,25 @@ wlc_calc_ack_time(struct wlc_info *wlc, ratespec_t rspec, u8 preamble_type)
/* Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that is less than
* or equal to the rate of the immediately previous frame in the FES
*/
- rspec = WLC_BASIC_RATE(wlc, rspec);
+ rspec = BRCMS_BASIC_RATE(wlc, rspec);
/* ACK frame len == 14 == 2(fc) + 2(dur) + 6(ra) + 4(fcs) */
dur =
- wlc_calc_frame_time(wlc, rspec, preamble_type,
+ brcms_c_calc_frame_time(wlc, rspec, preamble_type,
(DOT11_ACK_LEN + FCS_LEN));
return dur;
}
static uint
-wlc_calc_cts_time(struct wlc_info *wlc, ratespec_t rspec, u8 preamble_type)
+brcms_c_calc_cts_time(struct brcms_c_info *wlc, ratespec_t rspec,
+ u8 preamble_type)
{
BCMMSG(wlc->wiphy, "wl%d: ratespec 0x%x, preamble_type %d\n",
wlc->pub->unit, rspec, preamble_type);
- return wlc_calc_ack_time(wlc, rspec, preamble_type);
+ return brcms_c_calc_ack_time(wlc, rspec, preamble_type);
}
/* derive wlc->band->basic_rate[] table from 'rateset' */
-void wlc_rate_lookup_init(struct wlc_info *wlc, wlc_rateset_t *rateset)
+void brcms_c_rate_lookup_init(struct brcms_c_info *wlc, wlc_rateset_t *rateset)
{
u8 rate;
u8 mandatory;
@@ -6468,22 +4968,22 @@ void wlc_rate_lookup_init(struct wlc_info *wlc, wlc_rateset_t *rateset)
uint i;
/* incoming rates are in 500kbps units as in 802.11 Supported Rates */
- memset(br, 0, WLC_MAXRATE + 1);
+ memset(br, 0, BRCM_MAXRATE + 1);
/* For each basic rate in the rates list, make an entry in the
* best basic lookup.
*/
for (i = 0; i < rateset->count; i++) {
/* only make an entry for a basic rate */
- if (!(rateset->rates[i] & WLC_RATE_FLAG))
+ if (!(rateset->rates[i] & BRCMS_RATE_FLAG))
continue;
/* mask off basic bit */
- rate = (rateset->rates[i] & WLC_RATE_MASK);
+ rate = (rateset->rates[i] & BRCMS_RATE_MASK);
- if (rate > WLC_MAXRATE) {
- wiphy_err(wlc->wiphy, "wlc_rate_lookup_init: invalid "
- "rate 0x%X in rate set\n",
+ if (rate > BRCM_MAXRATE) {
+ wiphy_err(wlc->wiphy, "brcms_c_rate_lookup_init: "
+ "invalid rate 0x%X in rate set\n",
rateset->rates[i]);
continue;
}
@@ -6533,12 +5033,12 @@ void wlc_rate_lookup_init(struct wlc_info *wlc, wlc_rateset_t *rateset)
if (IS_OFDM(rate)) {
/* In 11g and 11a, the OFDM mandatory rates are 6, 12, and 24 Mbps */
- if (rate >= WLC_RATE_24M)
- mandatory = WLC_RATE_24M;
- else if (rate >= WLC_RATE_12M)
- mandatory = WLC_RATE_12M;
+ if (rate >= BRCM_RATE_24M)
+ mandatory = BRCM_RATE_24M;
+ else if (rate >= BRCM_RATE_12M)
+ mandatory = BRCM_RATE_12M;
else
- mandatory = WLC_RATE_6M;
+ mandatory = BRCM_RATE_6M;
} else {
/* In 11b, all the CCK rates are mandatory 1 - 11 Mbps */
mandatory = rate;
@@ -6548,7 +5048,8 @@ void wlc_rate_lookup_init(struct wlc_info *wlc, wlc_rateset_t *rateset)
}
}
-static void wlc_write_rate_shm(struct wlc_info *wlc, u8 rate, u8 basic_rate)
+static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
+ u8 basic_rate)
{
u8 phy_rate, index;
u8 basic_phy_rate, basic_index;
@@ -6565,27 +5066,27 @@ static void wlc_write_rate_shm(struct wlc_info *wlc, u8 rate, u8 basic_rate)
* for a given rate, the LS-nibble of the PLCP SIGNAL field is
* the index into the rate table.
*/
- phy_rate = rate_info[rate] & WLC_RATE_MASK;
- basic_phy_rate = rate_info[basic_rate] & WLC_RATE_MASK;
+ phy_rate = rate_info[rate] & BRCMS_RATE_MASK;
+ basic_phy_rate = rate_info[basic_rate] & BRCMS_RATE_MASK;
index = phy_rate & 0xf;
basic_index = basic_phy_rate & 0xf;
/* Find the SHM pointer to the ACK rate entry by looking in the
* Direct-map Table
*/
- basic_ptr = wlc_read_shm(wlc, (dir_table + basic_index * 2));
+ basic_ptr = brcms_c_read_shm(wlc, (dir_table + basic_index * 2));
/* Update the SHM BSS-basic-rate-set mapping table with the pointer
* to the correct basic rate for the given incoming rate
*/
- wlc_write_shm(wlc, (basic_table + index * 2), basic_ptr);
+ brcms_c_write_shm(wlc, (basic_table + index * 2), basic_ptr);
}
-static const wlc_rateset_t *wlc_rateset_get_hwrs(struct wlc_info *wlc)
+static const wlc_rateset_t *brcms_c_rateset_get_hwrs(struct brcms_c_info *wlc)
{
const wlc_rateset_t *rs_dflt;
- if (WLC_PHY_11N_CAP(wlc->band)) {
+ if (BRCMS_PHY_11N_CAP(wlc->band)) {
if (BAND_5G(wlc->band->bandtype))
rs_dflt = &ofdm_mimo_rates;
else
@@ -6598,48 +5099,48 @@ static const wlc_rateset_t *wlc_rateset_get_hwrs(struct wlc_info *wlc)
return rs_dflt;
}
-void wlc_set_ratetable(struct wlc_info *wlc)
+void brcms_c_set_ratetable(struct brcms_c_info *wlc)
{
const wlc_rateset_t *rs_dflt;
wlc_rateset_t rs;
u8 rate, basic_rate;
uint i;
- rs_dflt = wlc_rateset_get_hwrs(wlc);
+ rs_dflt = brcms_c_rateset_get_hwrs(wlc);
- wlc_rateset_copy(rs_dflt, &rs);
- wlc_rateset_mcs_upd(&rs, wlc->stf->txstreams);
+ brcms_c_rateset_copy(rs_dflt, &rs);
+ brcms_c_rateset_mcs_upd(&rs, wlc->stf->txstreams);
/* walk the phy rate table and update SHM basic rate lookup table */
for (i = 0; i < rs.count; i++) {
- rate = rs.rates[i] & WLC_RATE_MASK;
+ rate = rs.rates[i] & BRCMS_RATE_MASK;
- /* for a given rate WLC_BASIC_RATE returns the rate at
+ /* for a given rate BRCMS_BASIC_RATE returns the rate at
* which a response ACK/CTS should be sent.
*/
- basic_rate = WLC_BASIC_RATE(wlc, rate);
+ basic_rate = BRCMS_BASIC_RATE(wlc, rate);
if (basic_rate == 0) {
/* This should only happen if we are using a
* restricted rateset.
*/
- basic_rate = rs.rates[0] & WLC_RATE_MASK;
+ basic_rate = rs.rates[0] & BRCMS_RATE_MASK;
}
- wlc_write_rate_shm(wlc, rate, basic_rate);
+ brcms_c_write_rate_shm(wlc, rate, basic_rate);
}
}
/*
* Return true if the specified rate is supported by the specified band.
- * WLC_BAND_AUTO indicates the current band.
+ * BRCM_BAND_AUTO indicates the current band.
*/
-bool wlc_valid_rate(struct wlc_info *wlc, ratespec_t rspec, int band,
+bool brcms_c_valid_rate(struct brcms_c_info *wlc, ratespec_t rspec, int band,
bool verbose)
{
wlc_rateset_t *hw_rateset;
uint i;
- if ((band == WLC_BAND_AUTO) || (band == wlc->band->bandtype)) {
+ if ((band == BRCM_BAND_AUTO) || (band == wlc->band->bandtype)) {
hw_rateset = &wlc->band->hw_rateset;
} else if (NBANDS(wlc) > 1) {
hw_rateset = &wlc->bandstate[OTHERBANDUNIT(wlc)]->hw_rateset;
@@ -6661,40 +5162,38 @@ bool wlc_valid_rate(struct wlc_info *wlc, ratespec_t rspec, int band,
return true;
error:
if (verbose) {
- wiphy_err(wlc->wiphy, "wl%d: wlc_valid_rate: rate spec 0x%x "
+ wiphy_err(wlc->wiphy, "wl%d: valid_rate: rate spec 0x%x "
"not in hw_rateset\n", wlc->pub->unit, rspec);
}
return false;
}
-static void wlc_update_mimo_band_bwcap(struct wlc_info *wlc, u8 bwcap)
+static void brcms_c_update_mimo_band_bwcap(struct brcms_c_info *wlc, u8 bwcap)
{
uint i;
- struct wlcband *band;
+ struct brcms_band *band;
for (i = 0; i < NBANDS(wlc); i++) {
if (IS_SINGLEBAND_5G(wlc->deviceid))
i = BAND_5G_INDEX;
band = wlc->bandstate[i];
- if (band->bandtype == WLC_BAND_5G) {
- if ((bwcap == WLC_N_BW_40ALL)
- || (bwcap == WLC_N_BW_20IN2G_40IN5G))
+ if (band->bandtype == BRCM_BAND_5G) {
+ if ((bwcap == BRCMS_N_BW_40ALL)
+ || (bwcap == BRCMS_N_BW_20IN2G_40IN5G))
band->mimo_cap_40 = true;
else
band->mimo_cap_40 = false;
} else {
- if (bwcap == WLC_N_BW_40ALL)
+ if (bwcap == BRCMS_N_BW_40ALL)
band->mimo_cap_40 = true;
else
band->mimo_cap_40 = false;
}
}
-
- wlc->mimo_band_bwcap = bwcap;
}
-void wlc_mod_prb_rsp_rate_table(struct wlc_info *wlc, uint frame_len)
+void brcms_c_mod_prb_rsp_rate_table(struct brcms_c_info *wlc, uint frame_len)
{
const wlc_rateset_t *rs_dflt;
wlc_rateset_t rs;
@@ -6706,32 +5205,31 @@ void wlc_mod_prb_rsp_rate_table(struct wlc_info *wlc, uint frame_len)
sifs = SIFS(wlc->band);
- rs_dflt = wlc_rateset_get_hwrs(wlc);
+ rs_dflt = brcms_c_rateset_get_hwrs(wlc);
- wlc_rateset_copy(rs_dflt, &rs);
- wlc_rateset_mcs_upd(&rs, wlc->stf->txstreams);
+ brcms_c_rateset_copy(rs_dflt, &rs);
+ brcms_c_rateset_mcs_upd(&rs, wlc->stf->txstreams);
/* walk the phy rate table and update MAC core SHM basic rate table entries */
for (i = 0; i < rs.count; i++) {
- rate = rs.rates[i] & WLC_RATE_MASK;
+ rate = rs.rates[i] & BRCMS_RATE_MASK;
- entry_ptr = wlc_rate_shm_offset(wlc, rate);
+ entry_ptr = brcms_c_rate_shm_offset(wlc, rate);
/* Calculate the Probe Response PLCP for the given rate */
- wlc_compute_plcp(wlc, rate, frame_len, plcp);
+ brcms_c_compute_plcp(wlc, rate, frame_len, plcp);
/* Calculate the duration of the Probe Response frame plus SIFS for the MAC */
- dur =
- (u16) wlc_calc_frame_time(wlc, rate, WLC_LONG_PREAMBLE,
- frame_len);
+ dur = (u16) brcms_c_calc_frame_time(wlc, rate,
+ BRCMS_LONG_PREAMBLE, frame_len);
dur += sifs;
/* Update the SHM Rate Table entry Probe Response values */
- wlc_write_shm(wlc, entry_ptr + M_RT_PRS_PLCP_POS,
+ brcms_c_write_shm(wlc, entry_ptr + M_RT_PRS_PLCP_POS,
(u16) (plcp[0] + (plcp[1] << 8)));
- wlc_write_shm(wlc, entry_ptr + M_RT_PRS_PLCP_POS + 2,
+ brcms_c_write_shm(wlc, entry_ptr + M_RT_PRS_PLCP_POS + 2,
(u16) (plcp[2] + (plcp[3] << 8)));
- wlc_write_shm(wlc, entry_ptr + M_RT_PRS_DUR_POS, dur);
+ brcms_c_write_shm(wlc, entry_ptr + M_RT_PRS_DUR_POS, dur);
}
}
@@ -6748,11 +5246,12 @@ void wlc_mod_prb_rsp_rate_table(struct wlc_info *wlc, uint frame_len)
* and included up to, but not including, the 4 byte FCS.
*/
static void
-wlc_bcn_prb_template(struct wlc_info *wlc, u16 type, ratespec_t bcn_rspec,
- struct wlc_bsscfg *cfg, u16 *buf, int *len)
+brcms_c_bcn_prb_template(struct brcms_c_info *wlc, u16 type,
+ ratespec_t bcn_rspec,
+ struct brcms_bss_cfg *cfg, u16 *buf, int *len)
{
static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255};
- cck_phy_hdr_t *plcp;
+ struct cck_phy_hdr *plcp;
struct ieee80211_mgmt *h;
int hdr_len, body_len;
@@ -6767,12 +5266,12 @@ wlc_bcn_prb_template(struct wlc_info *wlc, u16 type, ratespec_t bcn_rspec,
/* format PHY and MAC headers */
memset((char *)buf, 0, hdr_len);
- plcp = (cck_phy_hdr_t *) buf;
+ plcp = (struct cck_phy_hdr *) buf;
/* PLCP for Probe Response frames are filled in from core's rate table */
if (type == IEEE80211_STYPE_BEACON && !MBSS_BCN_ENAB(cfg)) {
/* fill in PLCP */
- wlc_compute_plcp(wlc, bcn_rspec,
+ brcms_c_compute_plcp(wlc, bcn_rspec,
(DOT11_MAC_HDR_LEN + body_len + FCS_LEN),
(u8 *) plcp);
@@ -6780,7 +5279,7 @@ wlc_bcn_prb_template(struct wlc_info *wlc, u16 type, ratespec_t bcn_rspec,
/* "Regular" and 16 MBSS but not for 4 MBSS */
/* Update the phytxctl for the beacon based on the rspec */
if (!SOFTBCN_ENAB(cfg))
- wlc_beacon_phytxctl_txant_upd(wlc, bcn_rspec);
+ brcms_c_beacon_phytxctl_txant_upd(wlc, bcn_rspec);
if (MBSS_BCN_ENAB(cfg) && type == IEEE80211_STYPE_BEACON)
h = (struct ieee80211_mgmt *)&plcp[0];
@@ -6802,7 +5301,7 @@ wlc_bcn_prb_template(struct wlc_info *wlc, u16 type, ratespec_t bcn_rspec,
return;
}
-int wlc_get_header_len()
+int brcms_c_get_header_len()
{
return TXOFF;
}
@@ -6812,7 +5311,8 @@ int wlc_get_header_len()
* template updated.
* Otherwise, it updates the hardware template.
*/
-void wlc_bss_update_beacon(struct wlc_info *wlc, struct wlc_bsscfg *cfg)
+void brcms_c_bss_update_beacon(struct brcms_c_info *wlc,
+ struct brcms_bss_cfg *cfg)
{
int len = BCN_TMPL_LEN;
@@ -6846,31 +5346,31 @@ void wlc_bss_update_beacon(struct wlc_info *wlc, struct wlc_bsscfg *cfg)
}
wlc->bcn_rspec =
- wlc_lowest_basic_rspec(wlc, &cfg->current_bss->rateset);
+ brcms_c_lowest_basic_rspec(wlc, &cfg->current_bss->rateset);
/* update the template and ucode shm */
- wlc_bcn_prb_template(wlc, IEEE80211_STYPE_BEACON,
+ brcms_c_bcn_prb_template(wlc, IEEE80211_STYPE_BEACON,
wlc->bcn_rspec, cfg, bcn, &len);
- wlc_write_hw_bcntemplates(wlc, bcn, len, false);
+ brcms_c_write_hw_bcntemplates(wlc, bcn, len, false);
}
}
/*
* Update all beacons for the system.
*/
-void wlc_update_beacon(struct wlc_info *wlc)
+void brcms_c_update_beacon(struct brcms_c_info *wlc)
{
int idx;
- struct wlc_bsscfg *bsscfg;
+ struct brcms_bss_cfg *bsscfg;
/* update AP or IBSS beacons */
FOREACH_BSS(wlc, idx, bsscfg) {
if (bsscfg->up && (BSSCFG_AP(bsscfg) || !bsscfg->BSS))
- wlc_bss_update_beacon(wlc, bsscfg);
+ brcms_c_bss_update_beacon(wlc, bsscfg);
}
}
/* Write ssid into shared memory */
-void wlc_shm_ssid_upd(struct wlc_info *wlc, struct wlc_bsscfg *cfg)
+void brcms_c_shm_ssid_upd(struct brcms_c_info *wlc, struct brcms_bss_cfg *cfg)
{
u8 *ssidptr = cfg->SSID;
u16 base = M_SSID;
@@ -6880,27 +5380,28 @@ void wlc_shm_ssid_upd(struct wlc_info *wlc, struct wlc_bsscfg *cfg)
memset(ssidbuf, 0, IEEE80211_MAX_SSID_LEN);
memcpy(ssidbuf, ssidptr, cfg->SSID_len);
- wlc_copyto_shm(wlc, base, ssidbuf, IEEE80211_MAX_SSID_LEN);
+ brcms_c_copyto_shm(wlc, base, ssidbuf, IEEE80211_MAX_SSID_LEN);
if (!MBSS_BCN_ENAB(cfg))
- wlc_write_shm(wlc, M_SSIDLEN, (u16) cfg->SSID_len);
+ brcms_c_write_shm(wlc, M_SSIDLEN, (u16) cfg->SSID_len);
}
-void wlc_update_probe_resp(struct wlc_info *wlc, bool suspend)
+void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
{
int idx;
- struct wlc_bsscfg *bsscfg;
+ struct brcms_bss_cfg *bsscfg;
/* update AP or IBSS probe responses */
FOREACH_BSS(wlc, idx, bsscfg) {
if (bsscfg->up && (BSSCFG_AP(bsscfg) || !bsscfg->BSS))
- wlc_bss_update_probe_resp(wlc, bsscfg, suspend);
+ brcms_c_bss_update_probe_resp(wlc, bsscfg, suspend);
}
}
void
-wlc_bss_update_probe_resp(struct wlc_info *wlc, struct wlc_bsscfg *cfg,
- bool suspend)
+brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
+ struct brcms_bss_cfg *cfg,
+ bool suspend)
{
u16 prb_resp[BCN_TMPL_LEN / 2];
int len = BCN_TMPL_LEN;
@@ -6909,49 +5410,51 @@ wlc_bss_update_probe_resp(struct wlc_info *wlc, struct wlc_bsscfg *cfg,
if (!MBSS_PRB_ENAB(cfg)) {
/* create the probe response template */
- wlc_bcn_prb_template(wlc, IEEE80211_STYPE_PROBE_RESP, 0, cfg,
- prb_resp, &len);
+ brcms_c_bcn_prb_template(wlc, IEEE80211_STYPE_PROBE_RESP, 0,
+ cfg, prb_resp, &len);
if (suspend)
- wlc_suspend_mac_and_wait(wlc);
+ brcms_c_suspend_mac_and_wait(wlc);
/* write the probe response into the template region */
- wlc_bmac_write_template_ram(wlc->hw, T_PRS_TPL_BASE,
+ brcms_b_write_template_ram(wlc->hw, T_PRS_TPL_BASE,
(len + 3) & ~3, prb_resp);
/* write the length of the probe response frame (+PLCP/-FCS) */
- wlc_write_shm(wlc, M_PRB_RESP_FRM_LEN, (u16) len);
+ brcms_c_write_shm(wlc, M_PRB_RESP_FRM_LEN, (u16) len);
/* write the SSID and SSID length */
- wlc_shm_ssid_upd(wlc, cfg);
+ brcms_c_shm_ssid_upd(wlc, cfg);
/*
* Write PLCP headers and durations for probe response frames at all rates.
* Use the actual frame length covered by the PLCP header for the call to
- * wlc_mod_prb_rsp_rate_table() by subtracting the PLCP len and adding the FCS.
+ * brcms_c_mod_prb_rsp_rate_table() by subtracting the PLCP len
+ * and adding the FCS.
*/
len += (-D11_PHY_HDR_LEN + FCS_LEN);
- wlc_mod_prb_rsp_rate_table(wlc, (u16) len);
+ brcms_c_mod_prb_rsp_rate_table(wlc, (u16) len);
if (suspend)
- wlc_enable_mac(wlc);
+ brcms_c_enable_mac(wlc);
} else { /* Generating probe resp in sw; update local template */
/* error: No software probe response support without MBSS */
}
}
/* prepares pdu for transmission. returns BCM error codes */
-int wlc_prep_pdu(struct wlc_info *wlc, struct sk_buff *pdu, uint *fifop)
+int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu, uint *fifop)
{
uint fifo;
- d11txh_t *txh;
+ struct d11txh *txh;
struct ieee80211_hdr *h;
struct scb *scb;
- txh = (d11txh_t *) (pdu->data);
+ txh = (struct d11txh *) (pdu->data);
h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
- /* get the pkt queue info. This was put at wlc_sendctl or wlc_send for PDU */
+ /* get the pkt queue info. This was put at brcms_c_sendctl or
+ * brcms_c_send for PDU */
fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
scb = NULL;
@@ -6961,87 +5464,49 @@ int wlc_prep_pdu(struct wlc_info *wlc, struct sk_buff *pdu, uint *fifop)
/* return if insufficient dma resources */
if (TXAVAIL(wlc, fifo) < MAX_DMA_SEGS) {
/* Mark precedences related to this FIFO, unsendable */
- WLC_TX_FIFO_CLEAR(wlc, fifo);
+ BRCMS_TX_FIFO_CLEAR(wlc, fifo);
return -EBUSY;
}
return 0;
}
/* init tx reported rate mechanism */
-void wlc_reprate_init(struct wlc_info *wlc)
+void brcms_c_reprate_init(struct brcms_c_info *wlc)
{
int i;
- struct wlc_bsscfg *bsscfg;
+ struct brcms_bss_cfg *bsscfg;
FOREACH_BSS(wlc, i, bsscfg) {
- wlc_bsscfg_reprate_init(bsscfg);
+ brcms_c_bsscfg_reprate_init(bsscfg);
}
}
/* per bsscfg init tx reported rate mechanism */
-void wlc_bsscfg_reprate_init(struct wlc_bsscfg *bsscfg)
+void brcms_c_bsscfg_reprate_init(struct brcms_bss_cfg *bsscfg)
{
bsscfg->txrspecidx = 0;
memset((char *)bsscfg->txrspec, 0, sizeof(bsscfg->txrspec));
}
-/* Retrieve a consolidated set of revision information,
- * typically for the WLC_GET_REVINFO ioctl
- */
-int wlc_get_revision_info(struct wlc_info *wlc, void *buf, uint len)
-{
- wlc_rev_info_t *rinfo = (wlc_rev_info_t *) buf;
-
- if (len < WL_REV_INFO_LEGACY_LENGTH)
- return -EOVERFLOW;
-
- rinfo->vendorid = wlc->vendorid;
- rinfo->deviceid = wlc->deviceid;
- rinfo->radiorev = (wlc->band->radiorev << IDCODE_REV_SHIFT) |
- (wlc->band->radioid << IDCODE_ID_SHIFT);
- rinfo->chiprev = wlc->pub->sih->chiprev;
- rinfo->corerev = wlc->pub->corerev;
- rinfo->boardid = wlc->pub->sih->boardtype;
- rinfo->boardvendor = wlc->pub->sih->boardvendor;
- rinfo->boardrev = wlc->pub->boardrev;
- rinfo->ucoderev = wlc->ucode_rev;
- rinfo->driverrev = EPI_VERSION_NUM;
- rinfo->bus = wlc->pub->sih->bustype;
- rinfo->chipnum = wlc->pub->sih->chip;
-
- if (len >= (offsetof(wlc_rev_info_t, chippkg))) {
- rinfo->phytype = wlc->band->phytype;
- rinfo->phyrev = wlc->band->phyrev;
- rinfo->anarev = 0; /* obsolete stuff, suppress */
- }
-
- if (len >= sizeof(*rinfo)) {
- rinfo->chippkg = wlc->pub->sih->chippkg;
- }
-
- return 0;
-}
-
-void wlc_default_rateset(struct wlc_info *wlc, wlc_rateset_t *rs)
+void brcms_default_rateset(struct brcms_c_info *wlc, wlc_rateset_t *rs)
{
- wlc_rateset_default(rs, NULL, wlc->band->phytype, wlc->band->bandtype,
- false, WLC_RATE_MASK_FULL, (bool) N_ENAB(wlc->pub),
- CHSPEC_WLC_BW(wlc->default_bss->chanspec),
- wlc->stf->txstreams);
+ brcms_c_rateset_default(rs, NULL, wlc->band->phytype,
+ wlc->band->bandtype, false, BRCMS_RATE_MASK_FULL,
+ (bool) N_ENAB(wlc->pub),
+ CHSPEC_WLC_BW(wlc->default_bss->chanspec),
+ wlc->stf->txstreams);
}
-static void wlc_bss_default_init(struct wlc_info *wlc)
+static void brcms_c_bss_default_init(struct brcms_c_info *wlc)
{
chanspec_t chanspec;
- struct wlcband *band;
- wlc_bss_info_t *bi = wlc->default_bss;
+ struct brcms_band *band;
+ struct brcms_bss_info *bi = wlc->default_bss;
/* init default and target BSS with some sane initial values */
- memset((char *)(bi), 0, sizeof(wlc_bss_info_t));
- bi->beacon_period = ISSIM_ENAB(wlc->pub->sih) ? BEACON_INTERVAL_DEF_QT :
- BEACON_INTERVAL_DEFAULT;
- bi->dtim_period = ISSIM_ENAB(wlc->pub->sih) ? DTIM_INTERVAL_DEF_QT :
- DTIM_INTERVAL_DEFAULT;
+ memset((char *)(bi), 0, sizeof(struct brcms_bss_info));
+ bi->beacon_period = BEACON_INTERVAL_DEFAULT;
+ bi->dtim_period = DTIM_INTERVAL_DEFAULT;
/* fill the default channel as the first valid channel
* starting from the 2G channels
@@ -7051,20 +5516,21 @@ static void wlc_bss_default_init(struct wlc_info *wlc)
/* find the band of our default channel */
band = wlc->band;
- if (NBANDS(wlc) > 1 && band->bandunit != CHSPEC_WLCBANDUNIT(chanspec))
+ if (NBANDS(wlc) > 1 && band->bandunit != CHSPEC_BANDUNIT(chanspec))
band = wlc->bandstate[OTHERBANDUNIT(wlc)];
/* init bss rates to the band specific default rate set */
- wlc_rateset_default(&bi->rateset, NULL, band->phytype, band->bandtype,
- false, WLC_RATE_MASK_FULL, (bool) N_ENAB(wlc->pub),
- CHSPEC_WLC_BW(chanspec), wlc->stf->txstreams);
+ brcms_c_rateset_default(&bi->rateset, NULL, band->phytype,
+ band->bandtype, false, BRCMS_RATE_MASK_FULL,
+ (bool) N_ENAB(wlc->pub), CHSPEC_WLC_BW(chanspec),
+ wlc->stf->txstreams);
if (N_ENAB(wlc->pub))
- bi->flags |= WLC_BSS_HT;
+ bi->flags |= BRCMS_BSS_HT;
}
static ratespec_t
-mac80211_wlc_set_nrate(struct wlc_info *wlc, struct wlcband *cur_band,
+mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
u32 int_val)
{
u8 stf = (int_val & NRATE_STF_MASK) >> NRATE_STF_SHIFT;
@@ -7085,7 +5551,7 @@ mac80211_wlc_set_nrate(struct wlc_info *wlc, struct wlcband *cur_band,
/* mcs only allowed when nmode */
if (stf > PHY_TXC1_MODE_SDM) {
wiphy_err(wlc->wiphy, "wl%d: %s: Invalid stf\n",
- WLCWLUNIT(wlc), __func__);
+ BRCMS_UNIT(wlc), __func__);
bcmerror = -EINVAL;
goto done;
}
@@ -7096,7 +5562,7 @@ mac80211_wlc_set_nrate(struct wlc_info *wlc, struct wlcband *cur_band,
((stf != PHY_TXC1_MODE_SISO)
&& (stf != PHY_TXC1_MODE_CDD))) {
wiphy_err(wlc->wiphy, "wl%d: %s: Invalid mcs "
- "32\n", WLCWLUNIT(wlc), __func__);
+ "32\n", BRCMS_UNIT(wlc), __func__);
bcmerror = -EINVAL;
goto done;
}
@@ -7106,16 +5572,16 @@ mac80211_wlc_set_nrate(struct wlc_info *wlc, struct wlcband *cur_band,
if (stf != PHY_TXC1_MODE_SDM) {
BCMMSG(wlc->wiphy, "wl%d: enabling "
"SDM mode for mcs %d\n",
- WLCWLUNIT(wlc), rate);
+ BRCMS_UNIT(wlc), rate);
stf = PHY_TXC1_MODE_SDM;
}
} else {
/* MCS 0-7 may use SISO, CDD, and for phy_rev >= 3 STBC */
if ((stf > PHY_TXC1_MODE_STBC) ||
- (!WLC_STBC_CAP_PHY(wlc)
+ (!BRCMS_STBC_CAP_PHY(wlc)
&& (stf == PHY_TXC1_MODE_STBC))) {
wiphy_err(wlc->wiphy, "wl%d: %s: Invalid STBC"
- "\n", WLCWLUNIT(wlc), __func__);
+ "\n", BRCMS_UNIT(wlc), __func__);
bcmerror = -EINVAL;
goto done;
}
@@ -7123,28 +5589,28 @@ mac80211_wlc_set_nrate(struct wlc_info *wlc, struct wlcband *cur_band,
} else if (IS_OFDM(rate)) {
if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) {
wiphy_err(wlc->wiphy, "wl%d: %s: Invalid OFDM\n",
- WLCWLUNIT(wlc), __func__);
+ BRCMS_UNIT(wlc), __func__);
bcmerror = -EINVAL;
goto done;
}
} else if (IS_CCK(rate)) {
- if ((cur_band->bandtype != WLC_BAND_2G)
+ if ((cur_band->bandtype != BRCM_BAND_2G)
|| (stf != PHY_TXC1_MODE_SISO)) {
wiphy_err(wlc->wiphy, "wl%d: %s: Invalid CCK\n",
- WLCWLUNIT(wlc), __func__);
+ BRCMS_UNIT(wlc), __func__);
bcmerror = -EINVAL;
goto done;
}
} else {
wiphy_err(wlc->wiphy, "wl%d: %s: Unknown rate type\n",
- WLCWLUNIT(wlc), __func__);
+ BRCMS_UNIT(wlc), __func__);
bcmerror = -EINVAL;
goto done;
}
/* make sure multiple antennae are available for non-siso rates */
if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) {
wiphy_err(wlc->wiphy, "wl%d: %s: SISO antenna but !SISO "
- "request\n", WLCWLUNIT(wlc), __func__);
+ "request\n", BRCMS_UNIT(wlc), __func__);
bcmerror = -EINVAL;
goto done;
}
@@ -7169,7 +5635,7 @@ mac80211_wlc_set_nrate(struct wlc_info *wlc, struct wlcband *cur_band,
rspec |= RSPEC_SHORT_GI;
if ((rate != 0)
- && !wlc_valid_rate(wlc, rspec, cur_band->bandtype, true)) {
+ && !brcms_c_valid_rate(wlc, rspec, cur_band->bandtype, true)) {
return rate;
}
@@ -7180,7 +5646,7 @@ done:
/* formula: IDLE_BUSY_RATIO_X_16 = (100-duty_cycle)/duty_cycle*16 */
static int
-wlc_duty_cycle_set(struct wlc_info *wlc, int duty_cycle, bool isOFDM,
+brcms_c_duty_cycle_set(struct brcms_c_info *wlc, int duty_cycle, bool isOFDM,
bool writeToShm)
{
int idle_busy_ratio_x_16 = 0;
@@ -7196,7 +5662,7 @@ wlc_duty_cycle_set(struct wlc_info *wlc, int duty_cycle, bool isOFDM,
idle_busy_ratio_x_16 = (100 - duty_cycle) * 16 / duty_cycle;
/* Only write to shared memory when wl is up */
if (writeToShm)
- wlc_write_shm(wlc, offset, (u16) idle_busy_ratio_x_16);
+ brcms_c_write_shm(wlc, offset, (u16) idle_busy_ratio_x_16);
if (isOFDM)
wlc->tx_duty_cycle_ofdm = (u16) duty_cycle;
@@ -7209,97 +5675,85 @@ wlc_duty_cycle_set(struct wlc_info *wlc, int duty_cycle, bool isOFDM,
/* Read a single u16 from shared memory.
* SHM 'offset' needs to be an even address
*/
-u16 wlc_read_shm(struct wlc_info *wlc, uint offset)
+u16 brcms_c_read_shm(struct brcms_c_info *wlc, uint offset)
{
- return wlc_bmac_read_shm(wlc->hw, offset);
+ return brcms_b_read_shm(wlc->hw, offset);
}
/* Write a single u16 to shared memory.
* SHM 'offset' needs to be an even address
*/
-void wlc_write_shm(struct wlc_info *wlc, uint offset, u16 v)
+void brcms_c_write_shm(struct brcms_c_info *wlc, uint offset, u16 v)
{
- wlc_bmac_write_shm(wlc->hw, offset, v);
+ brcms_b_write_shm(wlc->hw, offset, v);
}
/* Copy a buffer to shared memory.
* SHM 'offset' needs to be an even address and
* Buffer length 'len' must be an even number of bytes
*/
-void wlc_copyto_shm(struct wlc_info *wlc, uint offset, const void *buf, int len)
+void brcms_c_copyto_shm(struct brcms_c_info *wlc, uint offset, const void *buf,
+ int len)
{
/* offset and len need to be even */
if (len <= 0 || (offset & 1) || (len & 1))
return;
- wlc_bmac_copyto_objmem(wlc->hw, offset, buf, len, OBJADDR_SHM_SEL);
+ brcms_b_copyto_objmem(wlc->hw, offset, buf, len, OBJADDR_SHM_SEL);
}
/* wrapper BMAC functions to for HIGH driver access */
-void wlc_mctrl(struct wlc_info *wlc, u32 mask, u32 val)
+void brcms_c_mctrl(struct brcms_c_info *wlc, u32 mask, u32 val)
{
- wlc_bmac_mctrl(wlc->hw, mask, val);
+ brcms_b_mctrl(wlc->hw, mask, val);
}
-void wlc_mhf(struct wlc_info *wlc, u8 idx, u16 mask, u16 val, int bands)
+void brcms_c_mhf(struct brcms_c_info *wlc, u8 idx, u16 mask, u16 val, int bands)
{
- wlc_bmac_mhf(wlc->hw, idx, mask, val, bands);
+ brcms_b_mhf(wlc->hw, idx, mask, val, bands);
}
-int wlc_xmtfifo_sz_get(struct wlc_info *wlc, uint fifo, uint *blocks)
+int brcms_c_xmtfifo_sz_get(struct brcms_c_info *wlc, uint fifo, uint *blocks)
{
- return wlc_bmac_xmtfifo_sz_get(wlc->hw, fifo, blocks);
+ return brcms_b_xmtfifo_sz_get(wlc->hw, fifo, blocks);
}
-void wlc_write_template_ram(struct wlc_info *wlc, int offset, int len,
+void brcms_c_write_template_ram(struct brcms_c_info *wlc, int offset, int len,
void *buf)
{
- wlc_bmac_write_template_ram(wlc->hw, offset, len, buf);
+ brcms_b_write_template_ram(wlc->hw, offset, len, buf);
}
-void wlc_write_hw_bcntemplates(struct wlc_info *wlc, void *bcn, int len,
+void brcms_c_write_hw_bcntemplates(struct brcms_c_info *wlc, void *bcn, int len,
bool both)
{
- wlc_bmac_write_hw_bcntemplates(wlc->hw, bcn, len, both);
+ brcms_b_write_hw_bcntemplates(wlc->hw, bcn, len, both);
}
void
-wlc_set_addrmatch(struct wlc_info *wlc, int match_reg_offset,
+brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
const u8 *addr)
{
- wlc_bmac_set_addrmatch(wlc->hw, match_reg_offset, addr);
+ brcms_b_set_addrmatch(wlc->hw, match_reg_offset, addr);
if (match_reg_offset == RCM_BSSID_OFFSET)
memcpy(wlc->cfg->BSSID, addr, ETH_ALEN);
}
-void wlc_set_cwmin(struct wlc_info *wlc, u16 newmin)
+void brcms_c_pllreq(struct brcms_c_info *wlc, bool set, mbool req_bit)
{
- wlc->band->CWmin = newmin;
- wlc_bmac_set_cwmin(wlc->hw, newmin);
+ brcms_b_pllreq(wlc->hw, set, req_bit);
}
-void wlc_set_cwmax(struct wlc_info *wlc, u16 newmax)
-{
- wlc->band->CWmax = newmax;
- wlc_bmac_set_cwmax(wlc->hw, newmax);
-}
-
-/* Search mem rw utilities */
-
-void wlc_pllreq(struct wlc_info *wlc, bool set, mbool req_bit)
-{
- wlc_bmac_pllreq(wlc->hw, set, req_bit);
-}
-
-void wlc_reset_bmac_done(struct wlc_info *wlc)
+void brcms_c_reset_bmac_done(struct brcms_c_info *wlc)
{
}
/* check for the particular priority flow control bit being set */
bool
-wlc_txflowcontrol_prio_isset(struct wlc_info *wlc, struct wlc_txq_info *q,
- int prio)
+brcms_c_txflowcontrol_prio_isset(struct brcms_c_info *wlc,
+ struct brcms_txq_info *q,
+ int prio)
{
uint prio_mask;
@@ -7313,8 +5767,9 @@ wlc_txflowcontrol_prio_isset(struct wlc_info *wlc, struct wlc_txq_info *q,
}
/* propagate the flow control to all interfaces using the given tx queue */
-void wlc_txflowcontrol(struct wlc_info *wlc, struct wlc_txq_info *qi,
- bool on, int prio)
+void brcms_c_txflowcontrol(struct brcms_c_info *wlc,
+ struct brcms_txq_info *qi,
+ bool on, int prio)
{
uint prio_bits;
uint cur_bits;
@@ -7351,12 +5806,13 @@ void wlc_txflowcontrol(struct wlc_info *wlc, struct wlc_txq_info *qi,
return;
}
- wlc_txflowcontrol_signal(wlc, qi, on, prio);
+ brcms_c_txflowcontrol_signal(wlc, qi, on, prio);
}
void
-wlc_txflowcontrol_override(struct wlc_info *wlc, struct wlc_txq_info *qi,
- bool on, uint override)
+brcms_c_txflowcontrol_override(struct brcms_c_info *wlc,
+ struct brcms_txq_info *qi,
+ bool on, uint override)
{
uint prev_override;
@@ -7374,7 +5830,7 @@ wlc_txflowcontrol_override(struct wlc_info *wlc, struct wlc_txq_info *qi,
return;
}
- wlc_txflowcontrol_signal(wlc, qi, ON, ALLPRIO);
+ brcms_c_txflowcontrol_signal(wlc, qi, ON, ALLPRIO);
} else {
mboolclr(qi->stopped, override);
/* clearing an override bit will only make a difference for
@@ -7386,48 +5842,51 @@ wlc_txflowcontrol_override(struct wlc_info *wlc, struct wlc_txq_info *qi,
}
if (qi->stopped == 0) {
- wlc_txflowcontrol_signal(wlc, qi, OFF, ALLPRIO);
+ brcms_c_txflowcontrol_signal(wlc, qi, OFF, ALLPRIO);
} else {
int prio;
for (prio = MAXPRIO; prio >= 0; prio--) {
if (!mboolisset(qi->stopped, NBITVAL(prio)))
- wlc_txflowcontrol_signal(wlc, qi, OFF,
- prio);
+ brcms_c_txflowcontrol_signal(
+ wlc, qi, OFF, prio);
}
}
}
}
-static void wlc_txflowcontrol_reset(struct wlc_info *wlc)
+static void brcms_c_txflowcontrol_reset(struct brcms_c_info *wlc)
{
- struct wlc_txq_info *qi;
+ struct brcms_txq_info *qi;
for (qi = wlc->tx_queues; qi != NULL; qi = qi->next) {
if (qi->stopped) {
- wlc_txflowcontrol_signal(wlc, qi, OFF, ALLPRIO);
+ brcms_c_txflowcontrol_signal(wlc, qi, OFF, ALLPRIO);
qi->stopped = 0;
}
}
}
static void
-wlc_txflowcontrol_signal(struct wlc_info *wlc, struct wlc_txq_info *qi, bool on,
- int prio)
+brcms_c_txflowcontrol_signal(struct brcms_c_info *wlc,
+ struct brcms_txq_info *qi, bool on, int prio)
{
- struct wlc_if *wlcif;
+#ifdef NON_FUNCTIONAL
+ /* wlcif_list is never filled so this function is not functional */
+ struct brcms_c_if *wlcif;
for (wlcif = wlc->wlcif_list; wlcif != NULL; wlcif = wlcif->next) {
- if (wlcif->qi == qi && wlcif->flags & WLC_IF_LINKED)
- wl_txflowcontrol(wlc->wl, wlcif->wlif, on, prio);
+ if (wlcif->qi == qi && wlcif->flags & BRCMS_IF_LINKED)
+ brcms_txflowcontrol(wlc->wl, wlcif->wlif, on, prio);
}
+#endif
}
-static struct wlc_txq_info *wlc_txq_alloc(struct wlc_info *wlc)
+static struct brcms_txq_info *brcms_c_txq_alloc(struct brcms_c_info *wlc)
{
- struct wlc_txq_info *qi, *p;
+ struct brcms_txq_info *qi, *p;
- qi = kzalloc(sizeof(struct wlc_txq_info), GFP_ATOMIC);
+ qi = kzalloc(sizeof(struct brcms_txq_info), GFP_ATOMIC);
if (qi != NULL) {
/*
* Have enough room for control packets along with HI watermark
@@ -7435,7 +5894,7 @@ static struct wlc_txq_info *wlc_txq_alloc(struct wlc_info *wlc)
* leave PS mode. The watermark for flowcontrol to OS packets
* will remain the same
*/
- bcm_pktq_init(&qi->q, WLC_PREC_COUNT,
+ brcmu_pktq_init(&qi->q, BRCMS_PREC_COUNT,
(2 * wlc->pub->tunables->datahiwat) + PKTQ_LEN_DEFAULT
+ wlc->pub->psq_pkts_total);
@@ -7452,9 +5911,10 @@ static struct wlc_txq_info *wlc_txq_alloc(struct wlc_info *wlc)
return qi;
}
-static void wlc_txq_free(struct wlc_info *wlc, struct wlc_txq_info *qi)
+static void brcms_c_txq_free(struct brcms_c_info *wlc,
+ struct brcms_txq_info *qi)
{
- struct wlc_txq_info *p;
+ struct brcms_txq_info *p;
if (qi == NULL)
return;
@@ -7476,17 +5936,17 @@ static void wlc_txq_free(struct wlc_info *wlc, struct wlc_txq_info *qi)
/*
* Flag 'scan in progress' to withhold dynamic phy calibration
*/
-void wlc_scan_start(struct wlc_info *wlc)
+void brcms_c_scan_start(struct brcms_c_info *wlc)
{
wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
}
-void wlc_scan_stop(struct wlc_info *wlc)
+void brcms_c_scan_stop(struct brcms_c_info *wlc)
{
wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
}
-void wlc_associate_upd(struct wlc_info *wlc, bool state)
+void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state)
{
wlc->pub->associated = state;
wlc->cfg->associated = state;
@@ -7497,11 +5957,11 @@ void wlc_associate_upd(struct wlc_info *wlc, bool state)
* AMPDU traffic, packets pending in hardware have to be invalidated so that
* when later on hardware releases them, they can be handled appropriately.
*/
-void wlc_inval_dma_pkts(struct wlc_hw_info *hw,
+void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
struct ieee80211_sta *sta,
void (*dma_callback_fn))
{
- struct hnddma_pub *dmah;
+ struct dma_pub *dmah;
int i;
for (i = 0; i < NFIFO; i++) {
dmah = hw->di[i];
@@ -7510,20 +5970,133 @@ void wlc_inval_dma_pkts(struct wlc_hw_info *hw,
}
}
-int wlc_get_curband(struct wlc_info *wlc)
+int brcms_c_get_curband(struct brcms_c_info *wlc)
{
return wlc->band->bandunit;
}
-void wlc_wait_for_tx_completion(struct wlc_info *wlc, bool drop)
+void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
{
/* flush packet queue when requested */
if (drop)
- bcm_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL);
+ brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL);
/* wait for queue and DMA fifos to run dry */
while (!pktq_empty(&wlc->pkt_queue->q) ||
TXPKTPENDTOT(wlc) > 0) {
- wl_msleep(wlc->wl, 1);
+ brcms_msleep(wlc->wl, 1);
+ }
+}
+
+int brcms_c_set_par(struct brcms_c_info *wlc, enum wlc_par_id par_id,
+ int int_val)
+{
+ int err = 0;
+
+ switch (par_id) {
+ case IOV_BCN_LI_BCN:
+ wlc->bcn_li_bcn = (u8) int_val;
+ if (wlc->pub->up)
+ brcms_c_bcn_li_upd(wlc);
+ break;
+ /* As long as override is false, this only sets the *user*
+ targets. User can twiddle this all he wants with no harm.
+ wlc_phy_txpower_set() explicitly sets override to false if
+ not internal or test.
+ */
+ case IOV_QTXPOWER:{
+ u8 qdbm;
+ bool override;
+
+ /* Remove override bit and clip to max qdbm value */
+ qdbm = (u8)min_t(u32, (int_val & ~WL_TXPWR_OVERRIDE), 0xff);
+ /* Extract override setting */
+ override = (int_val & WL_TXPWR_OVERRIDE) ? true : false;
+ err =
+ wlc_phy_txpower_set(wlc->band->pi, qdbm, override);
+ break;
+ }
+ case IOV_MPC:
+ wlc->mpc = (bool)int_val;
+ brcms_c_radio_mpc_upd(wlc);
+ break;
+ default:
+ err = -ENOTSUPP;
+ }
+ return err;
+}
+
+int brcms_c_get_par(struct brcms_c_info *wlc, enum wlc_par_id par_id,
+ int *ret_int_ptr)
+{
+ int err = 0;
+
+ switch (par_id) {
+ case IOV_BCN_LI_BCN:
+ *ret_int_ptr = wlc->bcn_li_bcn;
+ break;
+ case IOV_QTXPOWER: {
+ uint qdbm;
+ bool override;
+
+ err = wlc_phy_txpower_get(wlc->band->pi, &qdbm,
+ &override);
+ if (err != 0)
+ return err;
+
+ /* Return qdbm units */
+ *ret_int_ptr =
+ qdbm | (override ? WL_TXPWR_OVERRIDE : 0);
+ break;
+ }
+ case IOV_MPC:
+ *ret_int_ptr = (s32) wlc->mpc;
+ break;
+ default:
+ err = -ENOTSUPP;
+ }
+ return err;
+}
+
+/*
+ * Search the name=value vars for a specific one and return its value.
+ * Returns NULL if not found.
+ */
+char *getvar(char *vars, const char *name)
+{
+ char *s;
+ int len;
+
+ if (!name)
+ return NULL;
+
+ len = strlen(name);
+ if (len == 0)
+ return NULL;
+
+ /* first look in vars[] */
+ for (s = vars; s && *s;) {
+ if ((memcmp(s, name, len) == 0) && (s[len] == '='))
+ return &s[len + 1];
+
+ while (*s++)
+ ;
}
+ /* nothing found */
+ return NULL;
+}
+
+/*
+ * Search the vars for a specific one and return its value as
+ * an integer. Returns 0 if not found.
+ */
+int getintvar(char *vars, const char *name)
+{
+ char *val;
+
+ val = getvar(vars, name);
+ if (val == NULL)
+ return 0;
+
+ return simple_strtoul(val, NULL, 0);
}
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_main.h b/drivers/staging/brcm80211/brcmsmac/main.h
index fb48dfcb97d..f204b1f4747 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_main.h
+++ b/drivers/staging/brcm80211/brcmsmac/main.h
@@ -14,33 +14,67 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _wlc_h_
-#define _wlc_h_
+#ifndef _BRCM_MAIN_H_
+#define _BRCM_MAIN_H_
+
+#include <linux/etherdevice.h>
+
+#include <brcmu_utils.h>
+#include "types.h"
+#include "d11.h"
#define MA_WINDOW_SZ 8 /* moving average window size */
-#define WL_HWRXOFF 38 /* chip rx buffer offset */
+#define BRCMS_HWRXOFF 38 /* chip rx buffer offset */
#define INVCHANNEL 255 /* invalid channel */
-#define MAXCOREREV 28 /* max # supported core revisions (0 .. MAXCOREREV - 1) */
-#define WLC_MAXMODULES 22 /* max # wlc_module_register() calls */
+/* max # supported core revisions (0 .. MAXCOREREV - 1) */
+#define MAXCOREREV 28
+/* max # brcms_c_module_register() calls */
+#define BRCMS_MAXMODULES 22
+
+#define SEQNUM_SHIFT 4
+#define AMPDU_DELIMITER_LEN 4
+#define SEQNUM_MAX 0x1000
+
+#define APHY_CWMIN 15
+#define PHY_CWMAX 1023
+
+#define EDCF_AIFSN_MIN 1
+#define FRAGNUM_MASK 0xF
-#define WLC_BITSCNT(x) bcm_bitcount((u8 *)&(x), sizeof(u8))
+#define NTXRATE 64 /* # tx MPDUs rate is reported for */
+
+#define BRCMS_BITSCNT(x) brcmu_bitcount((u8 *)&(x), sizeof(u8))
/* Maximum wait time for a MAC suspend */
-#define WLC_MAX_MAC_SUSPEND 83000 /* uS: 83mS is max packet time (64KB ampdu @ 6Mbps) */
+/* uS: 83mS is max packet time (64KB ampdu @ 6Mbps) */
+#define BRCMS_MAX_MAC_SUSPEND 83000
/* Probe Response timeout - responses for probe requests older that this are tossed, zero to disable
*/
-#define WLC_PRB_RESP_TIMEOUT 0 /* Disable probe response timeout */
+#define BRCMS_PRB_RESP_TIMEOUT 0 /* Disable probe response timeout */
/* transmit buffer max headroom for protocol headers */
#define TXOFF (D11_TXH_LEN + D11_PHY_HDR_LEN)
-/* For managing scan result lists */
-struct wlc_bss_list {
- uint count;
- bool beacon; /* set for beacon, cleared for probe response */
- wlc_bss_info_t *ptrs[MAXBSS];
-};
+#define AC_COUNT 4
+
+/* Macros for doing definition and get/set of bitfields
+ * Usage example, e.g. a three-bit field (bits 4-6):
+ * #define <NAME>_M BITFIELD_MASK(3)
+ * #define <NAME>_S 4
+ * ...
+ * regval = R_REG(osh, &regs->regfoo);
+ * field = GFIELD(regval, <NAME>);
+ * regval = SFIELD(regval, <NAME>, 1);
+ * W_REG(osh, &regs->regfoo, regval);
+ */
+#define BITFIELD_MASK(width) \
+ (((unsigned)1 << (width)) - 1)
+#define GFIELD(val, field) \
+ (((val) >> field ## _S) & field ## _M)
+#define SFIELD(val, field, bits) \
+ (((val) & (~(field ## _M << field ## _S))) | \
+ ((unsigned)(bits) << field ## _S))
#define SW_TIMER_MAC_STAT_UPD 30 /* periodic MAC stats update */
@@ -52,26 +86,28 @@ struct wlc_bss_list {
#define VALID_COREREV(corerev) CONF_HAS(D11CONF, corerev)
/* values for shortslot_override */
-#define WLC_SHORTSLOT_AUTO -1 /* Driver will manage Shortslot setting */
-#define WLC_SHORTSLOT_OFF 0 /* Turn off short slot */
-#define WLC_SHORTSLOT_ON 1 /* Turn on short slot */
+#define BRCMS_SHORTSLOT_AUTO -1 /* Driver will manage Shortslot setting */
+#define BRCMS_SHORTSLOT_OFF 0 /* Turn off short slot */
+#define BRCMS_SHORTSLOT_ON 1 /* Turn on short slot */
/* value for short/long and mixmode/greenfield preamble */
-
-#define WLC_LONG_PREAMBLE (0)
-#define WLC_SHORT_PREAMBLE (1 << 0)
-#define WLC_GF_PREAMBLE (1 << 1)
-#define WLC_MM_PREAMBLE (1 << 2)
-#define WLC_IS_MIMO_PREAMBLE(_pre) (((_pre) == WLC_GF_PREAMBLE) || ((_pre) == WLC_MM_PREAMBLE))
+#define BRCMS_LONG_PREAMBLE (0)
+#define BRCMS_SHORT_PREAMBLE (1 << 0)
+#define BRCMS_GF_PREAMBLE (1 << 1)
+#define BRCMS_MM_PREAMBLE (1 << 2)
+#define BRCMS_IS_MIMO_PREAMBLE(_pre) (((_pre) == BRCMS_GF_PREAMBLE) || \
+ ((_pre) == BRCMS_MM_PREAMBLE))
/* values for barker_preamble */
-#define WLC_BARKER_SHORT_ALLOWED 0 /* Short pre-amble allowed */
+#define BRCMS_BARKER_SHORT_ALLOWED 0 /* Short pre-amble allowed */
/* A fifo is full. Clear precedences related to that FIFO */
-#define WLC_TX_FIFO_CLEAR(wlc, fifo) ((wlc)->tx_prec_map &= ~(wlc)->fifo2prec_map[fifo])
+#define BRCMS_TX_FIFO_CLEAR(wlc, fifo) \
+ ((wlc)->tx_prec_map &= ~(wlc)->fifo2prec_map[fifo])
/* Fifo is NOT full. Enable precedences for that FIFO */
-#define WLC_TX_FIFO_ENAB(wlc, fifo) ((wlc)->tx_prec_map |= (wlc)->fifo2prec_map[fifo])
+#define BRCMS_TX_FIFO_ENAB(wlc, fifo) \
+ ((wlc)->tx_prec_map |= (wlc)->fifo2prec_map[fifo])
/* TxFrameID */
/* seq and frag bits: SEQNUM_SHIFT, FRAGNUM_MASK (802.11.h) */
@@ -90,14 +126,14 @@ struct wlc_bss_list {
/* if wpa is in use then portopen is true when the group key is plumbed otherwise it is always true
*/
#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
-#define WLC_SW_KEYS(wlc, bsscfg) ((((wlc)->wsec_swkeys) || \
+#define BRCMS_SW_KEYS(wlc, bsscfg) ((((wlc)->wsec_swkeys) || \
((bsscfg)->wsec & WSEC_SWFLAG)))
-#define WLC_PORTOPEN(cfg) \
+#define BRCMS_PORTOPEN(cfg) \
(((cfg)->WPA_auth != WPA_AUTH_DISABLED && WSEC_ENABLED((cfg)->wsec)) ? \
(cfg)->wsec_portopen : true)
-#define PS_ALLOWED(wlc) wlc_ps_allowed(wlc)
+#define PS_ALLOWED(wlc) brcms_c_ps_allowed(wlc)
#define DATA_BLOCK_TX_SUPR (1 << 4)
@@ -105,11 +141,11 @@ struct wlc_bss_list {
extern const u8 prio2fifo[];
/* Ucode MCTL_WAKE override bits */
-#define WLC_WAKE_OVERRIDE_CLKCTL 0x01
-#define WLC_WAKE_OVERRIDE_PHYREG 0x02
-#define WLC_WAKE_OVERRIDE_MACSUSPEND 0x04
-#define WLC_WAKE_OVERRIDE_TXFIFO 0x08
-#define WLC_WAKE_OVERRIDE_FORCEFAST 0x10
+#define BRCMS_WAKE_OVERRIDE_CLKCTL 0x01
+#define BRCMS_WAKE_OVERRIDE_PHYREG 0x02
+#define BRCMS_WAKE_OVERRIDE_MACSUSPEND 0x04
+#define BRCMS_WAKE_OVERRIDE_TXFIFO 0x08
+#define BRCMS_WAKE_OVERRIDE_FORCEFAST 0x10
/* stuff pulled in from wlc.c */
@@ -136,7 +172,7 @@ extern const u8 prio2fifo[];
#define MAXTXFRAMEBURST 8 /* vanilla xpress mode: max frames/burst */
#define MAXFRAMEBURST_TXOP 10000 /* Frameburst TXOP in usec */
-/* Per-AC retry limit register definitions; uses bcmdefs.h bitfield macros */
+/* Per-AC retry limit register definitions; uses defs.h bitfield macros */
#define EDCF_SHORT_S 0
#define EDCF_SFB_S 4
#define EDCF_LONG_S 8
@@ -146,24 +182,34 @@ extern const u8 prio2fifo[];
#define EDCF_LONG_M BITFIELD_MASK(4)
#define EDCF_LFB_M BITFIELD_MASK(4)
-#define WLC_WME_RETRY_SHORT_GET(wlc, ac) GFIELD(wlc->wme_retries[ac], EDCF_SHORT)
-#define WLC_WME_RETRY_SFB_GET(wlc, ac) GFIELD(wlc->wme_retries[ac], EDCF_SFB)
-#define WLC_WME_RETRY_LONG_GET(wlc, ac) GFIELD(wlc->wme_retries[ac], EDCF_LONG)
-#define WLC_WME_RETRY_LFB_GET(wlc, ac) GFIELD(wlc->wme_retries[ac], EDCF_LFB)
+#define NFIFO 6 /* # tx/rx fifopairs */
-#define WLC_WME_RETRY_SHORT_SET(wlc, ac, val) \
+#define BRCMS_WME_RETRY_SHORT_GET(wlc, ac) \
+ GFIELD(wlc->wme_retries[ac], EDCF_SHORT)
+#define BRCMS_WME_RETRY_SFB_GET(wlc, ac) \
+ GFIELD(wlc->wme_retries[ac], EDCF_SFB)
+#define BRCMS_WME_RETRY_LONG_GET(wlc, ac) \
+ GFIELD(wlc->wme_retries[ac], EDCF_LONG)
+#define BRCMS_WME_RETRY_LFB_GET(wlc, ac) \
+ GFIELD(wlc->wme_retries[ac], EDCF_LFB)
+
+#define BRCMS_WME_RETRY_SHORT_SET(wlc, ac, val) \
(wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_SHORT, val))
-#define WLC_WME_RETRY_SFB_SET(wlc, ac, val) \
+#define BRCMS_WME_RETRY_SFB_SET(wlc, ac, val) \
(wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_SFB, val))
-#define WLC_WME_RETRY_LONG_SET(wlc, ac, val) \
+#define BRCMS_WME_RETRY_LONG_SET(wlc, ac, val) \
(wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_LONG, val))
-#define WLC_WME_RETRY_LFB_SET(wlc, ac, val) \
+#define BRCMS_WME_RETRY_LFB_SET(wlc, ac, val) \
(wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac], EDCF_LFB, val))
/* PLL requests */
-#define WLC_PLLREQ_SHARED 0x1 /* pll is shared on old chips */
-#define WLC_PLLREQ_RADIO_MON 0x2 /* hold pll for radio monitor register checking */
-#define WLC_PLLREQ_FLIP 0x4 /* hold/release pll for some short operation */
+
+/* pll is shared on old chips */
+#define BRCMS_PLLREQ_SHARED 0x1
+/* hold pll for radio monitor register checking */
+#define BRCMS_PLLREQ_RADIO_MON 0x2
+/* hold/release pll for some short operation */
+#define BRCMS_PLLREQ_FLIP 0x4
/*
* Macros to check if AP or STA is active.
@@ -194,9 +240,9 @@ extern const u8 prio2fifo[];
(MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN) : \
(ai_deviceremoved(wlc->hw->sih)))
-#define WLCWLUNIT(wlc) ((wlc)->pub->unit)
+#define BRCMS_UNIT(wlc) ((wlc)->pub->unit)
-struct wlc_protection {
+struct brcms_protection {
bool _g; /* use g spec protection, driver internal */
s8 g_override; /* override for use of g spec protection */
u8 gmode_user; /* user config gmode, operating band->gmode is different */
@@ -208,23 +254,10 @@ struct wlc_protection {
s8 nongf_override; /* override for use of GF protection */
s8 n_pam_override; /* override for preamble: MM or GF */
bool n_obss; /* indicated OBSS Non-HT STA present */
-
- uint longpre_detect_timeout; /* #sec until long preamble bcns gone */
- uint barker_detect_timeout; /* #sec until bcns signaling Barker long preamble */
- /* only is gone */
- uint ofdm_ibss_timeout; /* #sec until ofdm IBSS beacons gone */
- uint ofdm_ovlp_timeout; /* #sec until ofdm overlapping BSS bcns gone */
- uint nonerp_ibss_timeout; /* #sec until nonerp IBSS beacons gone */
- uint nonerp_ovlp_timeout; /* #sec until nonerp overlapping BSS bcns gone */
- uint g_ibss_timeout; /* #sec until bcns signaling Use_Protection gone */
- uint n_ibss_timeout; /* #sec until bcns signaling Use_OFDM_Protection gone */
- uint ht20in40_ovlp_timeout; /* #sec until 20MHz overlapping OPMODE gone */
- uint ht20in40_ibss_timeout; /* #sec until 20MHz-only HT station bcns gone */
- uint non_gf_ibss_timeout; /* #sec until non-GF bcns gone */
};
/* anything affects the single/dual streams/antenna operation */
-struct wlc_stf {
+struct brcms_stf {
u8 hw_txchain; /* HW txchain bitmap cfg */
u8 txchain; /* txchain bitmap being used */
u8 txstreams; /* number of txchains being used */
@@ -250,60 +283,106 @@ struct wlc_stf {
s8 spatial_policy;
};
-#define WLC_STF_SS_STBC_TX(wlc, scb) \
+#define BRCMS_STF_SS_STBC_TX(wlc, scb) \
(((wlc)->stf->txstreams > 1) && (((wlc)->band->band_stf_stbc_tx == ON) || \
(SCB_STBC_CAP((scb)) && \
(wlc)->band->band_stf_stbc_tx == AUTO && \
isset(&((wlc)->stf->ss_algo_channel), PHY_TXC1_MODE_STBC))))
-#define WLC_STBC_CAP_PHY(wlc) (WLCISNPHY(wlc->band) && NREV_GE(wlc->band->phyrev, 3))
+#define BRCMS_STBC_CAP_PHY(wlc) (BRCMS_ISNPHY(wlc->band) && \
+ NREV_GE(wlc->band->phyrev, 3))
-#define WLC_SGI_CAP_PHY(wlc) ((WLCISNPHY(wlc->band) && NREV_GE(wlc->band->phyrev, 3)) || \
- WLCISLCNPHY(wlc->band))
+#define BRCMS_SGI_CAP_PHY(wlc) ((BRCMS_ISNPHY(wlc->band) && \
+ NREV_GE(wlc->band->phyrev, 3)) || \
+ BRCMS_ISLCNPHY(wlc->band))
-#define WLC_CHAN_PHYTYPE(x) (((x) & RXS_CHAN_PHYTYPE_MASK) >> RXS_CHAN_PHYTYPE_SHIFT)
-#define WLC_CHAN_CHANNEL(x) (((x) & RXS_CHAN_ID_MASK) >> RXS_CHAN_ID_SHIFT)
-#define WLC_RX_CHANNEL(rxh) (WLC_CHAN_CHANNEL((rxh)->RxChan))
+#define BRCMS_CHAN_PHYTYPE(x) (((x) & RXS_CHAN_PHYTYPE_MASK) \
+ >> RXS_CHAN_PHYTYPE_SHIFT)
+#define BRCMS_CHAN_CHANNEL(x) (((x) & RXS_CHAN_ID_MASK) \
+ >> RXS_CHAN_ID_SHIFT)
+#define BRCMS_RX_CHANNEL(rxh) (BRCMS_CHAN_CHANNEL((rxh)->RxChan))
-/* wlc_bss_info flag bit values */
-#define WLC_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */
+/* brcms_bss_info flag bit values */
+#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */
-/* Flags used in wlc_txq_info.stopped */
+/* Flags used in brcms_c_txq_info.stopped */
#define TXQ_STOP_FOR_PRIOFC_MASK 0x000000FF /* per prio flow control bits */
#define TXQ_STOP_FOR_PKT_DRAIN 0x00000100 /* stop txq enqueue for packet drain */
#define TXQ_STOP_FOR_AMPDU_FLOW_CNTRL 0x00000200 /* stop txq enqueue for ampdu flow control */
-#define WLC_HT_WEP_RESTRICT 0x01 /* restrict HT with WEP */
-#define WLC_HT_TKIP_RESTRICT 0x02 /* restrict HT with TKIP */
+#define BRCMS_HT_WEP_RESTRICT 0x01 /* restrict HT with WEP */
+#define BRCMS_HT_TKIP_RESTRICT 0x02 /* restrict HT with TKIP */
+
+/* Maximum # of keys that wl driver supports in S/W.
+ * Keys supported in H/W is less than or equal to WSEC_MAX_KEYS.
+ */
+#define WSEC_MAX_KEYS 54 /* Max # of keys (50 + 4 default keys) */
+#define BRCMS_DEFAULT_KEYS 4 /* Default # of keys */
+
+/*
+* Max # of keys currently supported:
+*
+* s/w keys if WSEC_SW(wlc->wsec).
+* h/w keys otherwise.
+*/
+#define BRCMS_MAX_WSEC_KEYS(wlc) WSEC_MAX_KEYS
+
+/* number of 802.11 default (non-paired, group keys) */
+#define WSEC_MAX_DEFAULT_KEYS 4 /* # of default keys */
+
+struct wsec_iv {
+ u32 hi; /* upper 32 bits of IV */
+ u16 lo; /* lower 16 bits of IV */
+};
+
+#define BRCMS_NUMRXIVS 16 /* # rx IVs (one per 802.11e TID) */
+
+struct wsec_key {
+ u8 ea[ETH_ALEN]; /* per station */
+ u8 idx; /* key index in wsec_keys array */
+ u8 id; /* key ID [0-3] */
+ u8 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
+ u8 rcmta; /* rcmta entry index, same as idx by default */
+ u16 flags; /* misc flags */
+ u8 algo_hw; /* cache for hw register */
+ u8 aes_mode; /* cache for hw register */
+ s8 iv_len; /* IV length */
+ s8 icv_len; /* ICV length */
+ u32 len; /* key length..don't move this var */
+ /* data is 4byte aligned */
+ u8 data[WLAN_MAX_KEY_LEN]; /* key data */
+ struct wsec_iv rxiv[BRCMS_NUMRXIVS]; /* Rx IV (one per TID) */
+ struct wsec_iv txiv; /* Tx IV */
+};
/*
* core state (mac)
*/
-struct wlccore {
+struct brcms_core {
uint coreidx; /* # sb enumerated core */
/* fifo */
uint *txavail[NFIFO]; /* # tx descriptors available */
s16 txpktpend[NFIFO]; /* tx admission control */
- macstat_t *macstat_snapshot; /* mac hw prev read values */
+ struct macstat *macstat_snapshot; /* mac hw prev read values */
};
/*
* band state (phy+ana+radio)
*/
-struct wlcband {
- int bandtype; /* WLC_BAND_2G, WLC_BAND_5G */
+struct brcms_band {
+ int bandtype; /* BRCM_BAND_2G, BRCM_BAND_5G */
uint bandunit; /* bandstate[] index */
u16 phytype; /* phytype */
u16 phyrev;
u16 radioid;
u16 radiorev;
- wlc_phy_t *pi; /* pointer to phy specific information */
+ struct brcms_phy_pub *pi; /* pointer to phy specific information */
bool abgphy_encore;
- u8 gmode; /* currently active gmode (see wlioctl.h) */
+ u8 gmode; /* currently active gmode */
struct scb *hwrs_scb; /* permanent scb for hw rateset */
@@ -314,7 +393,7 @@ struct wlcband {
u8 band_stf_ss_mode; /* Configured STF type, 0:siso; 1:cdd */
s8 band_stf_stbc_tx; /* STBC TX 0:off; 1:force on; -1:auto */
wlc_rateset_t hw_rateset; /* rates supported by chip (phy-specific) */
- u8 basic_rate[WLC_MAXRATE + 1]; /* basic rates indexed by rate */
+ u8 basic_rate[BRCM_MAXRATE + 1]; /* basic rates indexed by rate */
bool mimo_cap_40; /* 40 MHz cap enabled on this band */
s8 antgain; /* antenna gain from srom */
@@ -324,7 +403,7 @@ struct wlcband {
};
/* tx completion callback takes 3 args */
-typedef void (*pkcb_fn_t) (struct wlc_info *wlc, uint txstatus, void *arg);
+typedef void (*pkcb_fn_t) (struct brcms_c_info *wlc, uint txstatus, void *arg);
struct pkt_cb {
pkcb_fn_t fn; /* function to call when tx frame completes */
@@ -336,7 +415,7 @@ struct pkt_cb {
/* module control blocks */
struct modulecb {
char name[32]; /* module name : NULL indicates empty array member */
- const bcm_iovar_t *iovars; /* iovar table */
+ const struct brcmu_iovar *iovars; /* iovar table */
void *hdl; /* handle passed when handler 'doiovar' is called */
watchdog_fn_t watchdog_fn; /* watchdog handler */
iovar_fn_t iovar_fn; /* iovar handler */
@@ -355,28 +434,46 @@ struct dumpcb_s {
struct dumpcb_s *next;
};
+struct edcf_acparam {
+ u8 ACI;
+ u8 ECW;
+ u16 TXOP;
+} __packed;
+
+struct wme_param_ie {
+ u8 oui[3];
+ u8 type;
+ u8 subtype;
+ u8 version;
+ u8 qosinfo;
+ u8 rsvd;
+ struct edcf_acparam acparam[AC_COUNT];
+} __packed;
+
/* virtual interface */
-struct wlc_if {
- struct wlc_if *next;
- u8 type; /* WLC_IFTYPE_BSS or WLC_IFTYPE_WDS */
+struct brcms_c_if {
+ struct brcms_c_if *next;
+ u8 type; /* BSS or WDS */
u8 index; /* assigned in wl_add_if(), index of the wlif if any,
* not necessarily corresponding to bsscfg._idx or
* AID2PVBMAP(scb).
*/
u8 flags; /* flags for the interface */
- struct wl_if *wlif; /* pointer to wlif */
- struct wlc_txq_info *qi; /* pointer to associated tx queue */
+ struct brcms_if *wlif; /* pointer to wlif */
+ struct brcms_txq_info *qi; /* pointer to associated tx queue */
union {
- struct scb *scb; /* pointer to scb if WLC_IFTYPE_WDS */
- struct wlc_bsscfg *bsscfg; /* pointer to bsscfg if WLC_IFTYPE_BSS */
+ /* pointer to scb if WDS */
+ struct scb *scb;
+ /* pointer to bsscfg if BSS */
+ struct brcms_bss_cfg *bsscfg;
} u;
};
-/* flags for the interface */
-#define WLC_IF_LINKED 0x02 /* this interface is linked to a wl_if */
+/* flags for the interface, this interface is linked to a brcms_if */
+#define BRCMS_IF_LINKED 0x02
-struct wlc_hwband {
- int bandtype; /* WLC_BAND_2G, WLC_BAND_5G */
+struct brcms_hw_band {
+ int bandtype; /* BRCM_BAND_2G, BRCM_BAND_5G */
uint bandunit; /* bandstate[] index */
u16 mhfs[MHFMAX]; /* MHF array shadow */
u8 bandhw_stf_ss_mode; /* HW configured STF type, 0:siso; 1:cdd */
@@ -388,16 +485,16 @@ struct wlc_hwband {
u16 phyrev;
u16 radioid;
u16 radiorev;
- wlc_phy_t *pi; /* pointer to phy specific information */
+ struct brcms_phy_pub *pi; /* pointer to phy specific information */
bool abgphy_encore;
};
-struct wlc_hw_info {
+struct brcms_hardware {
bool _piomode; /* true if pio mode */
- struct wlc_info *wlc;
+ struct brcms_c_info *wlc;
/* fifo */
- struct hnddma_pub *di[NFIFO]; /* hnddma handles, per fifo */
+ struct dma_pub *di[NFIFO]; /* dma handles, per fifo */
uint unit; /* device instance number */
@@ -413,14 +510,15 @@ struct wlc_hw_info {
u32 machwcap_backup; /* backup of machwcap */
u16 ucode_dbgsel; /* dbgsel for ucode debug(config gpio) */
- si_t *sih; /* SB handle (cookie for siutils calls) */
+ struct si_pub *sih; /* SI handle (cookie for siutils calls) */
char *vars; /* "environment" name=value */
uint vars_size; /* size of vars, free vars on detach */
d11regs_t *regs; /* pointer to device registers */
void *physhim; /* phy shim layer handler */
void *phy_sh; /* pointer to shared phy state */
- struct wlc_hwband *band;/* pointer to active per-band state */
- struct wlc_hwband *bandstate[MAXBANDS];/* band state per phy/radio */
+ struct brcms_hw_band *band;/* pointer to active per-band state */
+ /* band state per phy/radio */
+ struct brcms_hw_band *bandstate[MAXBANDS];
u16 bmac_phytxant; /* cache of high phytxant state */
bool shortslot; /* currently using 11g ShortSlot timing */
u16 SRL; /* 802.11 dot11ShortRetryLimit */
@@ -473,8 +571,8 @@ struct wlc_hw_info {
* if they belong to the same flow of traffic from the device. For multi-channel
* operation there are independent TX Queues for each channel.
*/
-struct wlc_txq_info {
- struct wlc_txq_info *next;
+struct brcms_txq_info {
+ struct brcms_txq_info *next;
struct pktq q;
uint stopped; /* tx flow control bits */
};
@@ -482,12 +580,13 @@ struct wlc_txq_info {
/*
* Principal common (os-independent) software data structure.
*/
-struct wlc_info {
- struct wlc_pub *pub; /* pointer to wlc public state */
- struct wl_info *wl; /* pointer to os-specific private state */
+struct brcms_c_info {
+ struct brcms_pub *pub; /* pointer to wlc public state */
+ struct brcms_info *wl; /* pointer to os-specific private state */
d11regs_t *regs; /* pointer to device registers */
- struct wlc_hw_info *hw; /* HW related state used primarily by BMAC */
+ /* HW related state used primarily by BMAC */
+ struct brcms_hardware *hw;
/* clock */
int clkreq_override; /* setting for clkreq for PCIE : Auto, 0, 1 */
@@ -504,11 +603,11 @@ struct wlc_info {
bool clk; /* core is out of reset and has clock */
/* multiband */
- struct wlccore *core; /* pointer to active io core */
- struct wlcband *band; /* pointer to active per-band state */
- struct wlccore *corestate; /* per-core state (one per hw core) */
+ struct brcms_core *core; /* pointer to active io core */
+ struct brcms_band *band; /* pointer to active per-band state */
+ struct brcms_core *corestate; /* per-core state (one per hw core) */
/* per-band state (one per phy/radio): */
- struct wlcband *bandstate[MAXBANDS];
+ struct brcms_band *bandstate[MAXBANDS];
bool war16165; /* PCI slow clock 16165 war flag */
@@ -526,9 +625,7 @@ struct wlc_info {
struct ampdu_info *ampdu; /* ampdu module handler */
struct antsel_info *asi; /* antsel module handler */
- wlc_cm_info_t *cmi; /* channel manager module handler */
-
- void *btparam; /* bus type specific cookie */
+ struct brcms_cm_info *cmi; /* channel manager module handler */
uint vars_size; /* size of vars, free vars on detach */
@@ -544,25 +641,18 @@ struct wlc_info {
bool bandinit_pending; /* track band init in auto band */
bool radio_monitor; /* radio timer is running */
- bool down_override; /* true=down */
bool going_down; /* down path intermediate variable */
bool mpc; /* enable minimum power consumption */
u8 mpc_dlycnt; /* # of watchdog cnt before turn disable radio */
u8 mpc_offcnt; /* # of watchdog cnt that radio is disabled */
u8 mpc_delay_off; /* delay radio disable by # of watchdog cnt */
- u8 prev_non_delay_mpc; /* prev state wlc_is_non_delay_mpc */
+ u8 prev_non_delay_mpc; /* prev state brcms_c_is_non_delay_mpc */
- /* timer */
- struct wl_timer *wdtimer; /* timer for watchdog routine */
- uint fast_timer; /* Periodic timeout for 'fast' timer */
- uint slow_timer; /* Periodic timeout for 'slow' timer */
- uint glacial_timer; /* Periodic timeout for 'glacial' timer */
- uint phycal_mlo; /* last time measurelow calibration was done */
- uint phycal_txpower; /* last time txpower calibration was done */
-
- struct wl_timer *radio_timer; /* timer for hw radio button monitor routine */
- struct wl_timer *pspoll_timer; /* periodic pspoll timer */
+ /* timer for watchdog routine */
+ struct brcms_timer *wdtimer;
+ /* timer for hw radio button monitor routine */
+ struct brcms_timer *radio_timer;
/* promiscuous */
bool monitor; /* monitor (MPDU sniffing) mode */
@@ -570,30 +660,11 @@ struct wlc_info {
bool bcnmisc_scan; /* bcns promisc mode override for scan */
bool bcnmisc_monitor; /* bcns promisc mode override for monitor */
- u8 bcn_wait_prd; /* max waiting period (for beacon) in 1024TU */
-
/* driver feature */
bool _rifs; /* enable per-packet rifs */
- s32 rifs_advert; /* RIFS mode advertisement */
s8 sgi_tx; /* sgi tx */
- bool wet; /* true if wireless ethernet bridging mode */
/* AP-STA synchronization, power save */
- bool check_for_unaligned_tbtt; /* check unaligned tbtt flag */
- bool PM_override; /* no power-save flag, override PM(user input) */
- bool PMenabled; /* current power-management state (CAM or PS) */
- bool PMpending; /* waiting for tx status with PM indicated set */
- bool PMblocked; /* block any PSPolling in PS mode, used to buffer
- * AP traffic, also used to indicate in progress
- * of scan, rm, etc. off home channel activity.
- */
- bool PSpoll; /* whether there is an outstanding PS-Poll frame */
- u8 PM; /* power-management mode (CAM, PS or FASTPS) */
- bool PMawakebcn; /* bcn recvd during current waking state */
-
- bool WME_PM_blocked; /* Can STA go to PM when in WME Auto mode */
- bool wake; /* host-specified PS-mode sleep state */
- u8 pspoll_prd; /* pspoll interval in milliseconds */
u8 bcn_li_bcn; /* beacon listen interval in # beacons */
u8 bcn_li_dtim; /* beacon listen interval in # dtims */
@@ -602,18 +673,16 @@ struct wlc_info {
/* WME */
ac_bitmap_t wme_dp; /* Discard (oldest first) policy per AC */
- bool wme_apsd; /* enable Advanced Power Save Delivery */
- ac_bitmap_t wme_admctl; /* bit i set if AC i under admission control */
u16 edcf_txop[AC_COUNT]; /* current txop for each ac */
- wme_param_ie_t wme_param_ie; /* WME parameter info element, which on STA
- * contains parameters in use locally, and on
- * AP contains parameters advertised to STA
- * in beacons and assoc responses.
- */
- bool wme_prec_queuing; /* enable/disable non-wme STA prec queuing */
+
+ /*
+ * WME parameter info element, which on STA contains parameters in use
+ * locally, and on AP contains parameters advertised to STA in beacons
+ * and assoc responses.
+ */
+ struct wme_param_ie wme_param_ie;
u16 wme_retries[AC_COUNT]; /* per-AC retry limits */
- int vlan_mode; /* OK to use 802.1Q Tags (ON, OFF, AUTO) */
u16 tx_prec_map; /* Precedence map based on HW FIFO space */
u16 fifo2prec_map[NFIFO]; /* pointer to fifo2_prec map based on WME */
@@ -621,60 +690,36 @@ struct wlc_info {
* BSS Configurations set of BSS configurations, idx 0 is default and
* always valid
*/
- struct wlc_bsscfg *bsscfg[WLC_MAXBSSCFG];
- struct wlc_bsscfg *cfg; /* the primary bsscfg (can be AP or STA) */
- u8 stas_associated; /* count of ASSOCIATED STA bsscfgs */
- u8 aps_associated; /* count of UP AP bsscfgs */
- u8 block_datafifo; /* prohibit posting frames to data fifos */
- bool bcmcfifo_drain; /* TX_BCMC_FIFO is set to drain */
+ struct brcms_bss_cfg *bsscfg[BRCMS_MAXBSSCFG];
+ struct brcms_bss_cfg *cfg; /* the primary bsscfg (can be AP or STA) */
/* tx queue */
- struct wlc_txq_info *tx_queues; /* common TX Queue list */
+ struct brcms_txq_info *tx_queues; /* common TX Queue list */
/* security */
- wsec_key_t *wsec_keys[WSEC_MAX_KEYS]; /* dynamic key storage */
- wsec_key_t *wsec_def_keys[WLC_DEFAULT_KEYS]; /* default key storage */
+ struct wsec_key *wsec_keys[WSEC_MAX_KEYS]; /* dynamic key storage */
+ /* default key storage */
+ struct wsec_key *wsec_def_keys[BRCMS_DEFAULT_KEYS];
bool wsec_swkeys; /* indicates that all keys should be
* treated as sw keys (used for debugging)
*/
struct modulecb *modulecb;
- struct dumpcb_s *dumpcb_head;
u8 mimoft; /* SIGN or 11N */
- u8 mimo_band_bwcap; /* bw cap per band type */
- s8 txburst_limit_override; /* tx burst limit override */
- u16 txburst_limit; /* tx burst limit value */
s8 cck_40txbw; /* 11N, cck tx b/w override when in 40MHZ mode */
s8 ofdm_40txbw; /* 11N, ofdm tx b/w override when in 40MHZ mode */
s8 mimo_40txbw; /* 11N, mimo tx b/w override when in 40MHZ mode */
/* HT CAP IE being advertised by this node: */
struct ieee80211_ht_cap ht_cap;
- uint seckeys; /* 54 key table shm address */
- uint tkmickeys; /* 12 TKIP MIC key table shm address */
-
- wlc_bss_info_t *default_bss; /* configured BSS parameters */
+ struct brcms_bss_info *default_bss; /* configured BSS parameters */
- u16 AID; /* association ID */
- u16 counter; /* per-sdu monotonically increasing counter */
u16 mc_fid_counter; /* BC/MC FIFO frame ID counter */
- bool ibss_allowed; /* false, all IBSS will be ignored during a scan
- * and the driver will not allow the creation of
- * an IBSS network
- */
- bool ibss_coalesce_allowed;
-
- char country_default[WLC_CNTRY_BUF_SZ]; /* saved country for leaving 802.11d
- * auto-country mode
- */
- char autocountry_default[WLC_CNTRY_BUF_SZ]; /* initial country for 802.11d
- * auto-country mode
- */
-#ifdef BCMDBG
- bcm_tlv_t *country_ie_override; /* debug override of announced Country IE */
-#endif
-
+ /* saved country for leaving 802.11d auto-country mode */
+ char country_default[BRCM_CNTRY_BUF_SZ];
+ /* initial country for 802.11d auto-country mode */
+ char autocountry_default[BRCM_CNTRY_BUF_SZ];
u16 prb_resp_timeout; /* do not send prb resp if request older than this,
* 0 = disable
*/
@@ -696,44 +741,17 @@ struct wlc_info {
u16 LFBL; /* Long Frame Rate Fallback Limit */
/* network config */
- bool shortpreamble; /* currently operating with CCK ShortPreambles */
bool shortslot; /* currently using 11g ShortSlot timing */
- s8 barker_preamble; /* current Barker Preamble Mode */
s8 shortslot_override; /* 11g ShortSlot override */
bool include_legacy_erp; /* include Legacy ERP info elt ID 47 as well as g ID 42 */
- bool barker_overlap_control; /* true: be aware of overlapping BSSs for barker */
- bool ignore_bcns; /* override: ignore non shortslot bcns in a 11g network */
- bool legacy_probe; /* restricts probe requests to CCK rates */
- struct wlc_protection *protection;
+ struct brcms_protection *protection;
s8 PLCPHdr_override; /* 802.11b Preamble Type override */
- struct wlc_stf *stf;
-
- struct pkt_cb *pkt_callback; /* tx completion callback handlers */
-
- u32 txretried; /* tx retried number in one msdu */
+ struct brcms_stf *stf;
ratespec_t bcn_rspec; /* save bcn ratespec purpose */
- bool apsd_sta_usp; /* Unscheduled Service Period in progress on STA */
- struct wl_timer *apsd_trigger_timer; /* timer for wme apsd trigger frames */
- u32 apsd_trigger_timeout; /* timeout value for apsd_trigger_timer (in ms)
- * 0 == disable
- */
- ac_bitmap_t apsd_trigger_ac; /* Permissible Access Category in which APSD Null
- * Trigger frames can be send
- */
- u8 htphy_membership; /* HT PHY membership */
-
- bool _regulatory_domain; /* 802.11d enabled? */
-
- u8 mimops_PM;
-
- u8 txpwr_percent; /* power output percentage */
-
- u8 ht_wsec_restriction; /* the restriction of HT with TKIP or WEP */
-
uint tempsense_lasttime;
u16 tx_duty_cycle_ofdm; /* maximum allowed duty cycle for OFDM */
@@ -741,30 +759,107 @@ struct wlc_info {
u16 next_bsscfg_ID;
- struct wlc_if *wlcif_list; /* linked list of wlc_if structs */
- struct wlc_txq_info *pkt_queue; /* txq for transmit packets */
+ struct brcms_txq_info *pkt_queue; /* txq for transmit packets */
u32 mpc_dur; /* total time (ms) in mpc mode except for the
* portion since radio is turned off last time
*/
u32 mpc_laston_ts; /* timestamp (ms) when radio is turned off last
* time
*/
- bool pr80838_war;
- uint hwrxoff;
struct wiphy *wiphy;
};
/* antsel module specific state */
struct antsel_info {
- struct wlc_info *wlc; /* pointer to main wlc structure */
- struct wlc_pub *pub; /* pointer to public fn */
+ struct brcms_c_info *wlc; /* pointer to main wlc structure */
+ struct brcms_pub *pub; /* pointer to public fn */
u8 antsel_type; /* Type of boardlevel mimo antenna switch-logic
* 0 = N/A, 1 = 2x4 board, 2 = 2x3 CB2 board
*/
u8 antsel_antswitch; /* board level antenna switch type */
bool antsel_avail; /* Ant selection availability (SROM based) */
- wlc_antselcfg_t antcfg_11n; /* antenna configuration */
- wlc_antselcfg_t antcfg_cur; /* current antenna config (auto) */
+ struct brcms_antselcfg antcfg_11n; /* antenna configuration */
+ struct brcms_antselcfg antcfg_cur; /* current antenna config (auto) */
+};
+
+/* BSS configuration state */
+struct brcms_bss_cfg {
+ struct brcms_c_info *wlc; /* wlc to which this bsscfg belongs to. */
+ bool up; /* is this configuration up operational */
+ bool enable; /* is this configuration enabled */
+ bool associated; /* is BSS in ASSOCIATED state */
+ bool BSS; /* infraustructure or adhac */
+ bool dtim_programmed;
+
+ u8 SSID_len; /* the length of SSID */
+ u8 SSID[IEEE80211_MAX_SSID_LEN]; /* SSID string */
+ struct scb *bcmc_scb[MAXBANDS]; /* one bcmc_scb per band */
+ s8 _idx; /* the index of this bsscfg,
+ * assigned at wlc_bsscfg_alloc()
+ */
+ /* MAC filter */
+ uint nmac; /* # of entries on maclist array */
+ int macmode; /* allow/deny stations on maclist array */
+ struct ether_addr *maclist; /* list of source MAC addrs to match */
+
+ /* security */
+ u32 wsec; /* wireless security bitvec */
+ s16 auth; /* 802.11 authentication: Open, Shared Key, WPA */
+ s16 openshared; /* try Open auth first, then Shared Key */
+ bool wsec_restrict; /* drop unencrypted packets if wsec is enabled */
+ bool eap_restrict; /* restrict data until 802.1X auth succeeds */
+ u16 WPA_auth; /* WPA: authenticated key management */
+ bool wpa2_preauth; /* default is true, wpa_cap sets value */
+ bool wsec_portopen; /* indicates keys are plumbed */
+ /* global txiv for WPA_NONE, tkip and aes */
+ struct wsec_iv wpa_none_txiv;
+ int wsec_index; /* 0-3: default tx key, -1: not set */
+ /* default key storage: */
+ struct wsec_key *bss_def_keys[BRCMS_DEFAULT_KEYS];
+
+ /* TKIP countermeasures */
+ bool tkip_countermeasures; /* flags TKIP no-assoc period */
+ u32 tk_cm_dt; /* detect timer */
+ u32 tk_cm_bt; /* blocking timer */
+ u32 tk_cm_bt_tmstmp; /* Timestamp when TKIP BT is activated */
+ bool tk_cm_activate; /* activate countermeasures after EAPOL-Key sent */
+
+ u8 BSSID[ETH_ALEN]; /* BSSID (associated) */
+ u8 cur_etheraddr[ETH_ALEN]; /* h/w address */
+ u16 bcmc_fid; /* the last BCMC FID queued to TX_BCMC_FIFO */
+ u16 bcmc_fid_shm; /* the last BCMC FID written to shared mem */
+
+ u32 flags; /* BSSCFG flags; see below */
+
+ u8 *bcn; /* AP beacon */
+ uint bcn_len; /* AP beacon length */
+ bool ar_disassoc; /* disassociated in associated recreation */
+
+ int auth_atmptd; /* auth type (open/shared) attempted */
+
+ pmkid_cand_t pmkid_cand[MAXPMKID]; /* PMKID candidate list */
+ uint npmkid_cand; /* num PMKID candidates */
+ pmkid_t pmkid[MAXPMKID]; /* PMKID cache */
+ uint npmkid; /* num cached PMKIDs */
+
+ struct brcms_bss_info *current_bss; /* BSS parms in ASSOCIATED state */
+
+ /* PM states */
+ bool PMawakebcn; /* bcn recvd during current waking state */
+ bool PMpending; /* waiting for tx status with PM indicated set */
+ bool priorPMstate; /* Detecting PM state transitions */
+ bool PSpoll; /* whether there is an outstanding PS-Poll frame */
+
+ /* BSSID entry in RCMTA, use the wsec key management infrastructure to
+ * manage the RCMTA entries.
+ */
+ struct wsec_key *rcmta;
+
+ /* 'unique' ID of this bsscfg, assigned at bsscfg allocation */
+ u16 ID;
+
+ uint txrspecidx; /* index into tx rate circular buffer */
+ ratespec_t txrspec[NTXRATE][2]; /* circular buffer of prev MPDUs tx rates */
};
#define CHANNEL_BANDUNIT(wlc, ch) (((ch) <= CH_MAX_2G_CHANNEL) ? BAND_2G_INDEX : BAND_5G_INDEX)
@@ -773,7 +868,7 @@ struct antsel_info {
#define IS_MBAND_UNLOCKED(wlc) \
((NBANDS(wlc) > 1) && !(wlc)->bandlocked)
-#define WLC_BAND_PI_RADIO_CHANSPEC wlc_phy_chanspec_get(wlc->band->pi)
+#define BRCMS_BAND_PI_RADIO_CHANSPEC wlc_phy_chanspec_get(wlc->band->pi)
/* sum the individual fifo tx pending packet counts */
#define TXPKTPENDTOT(wlc) ((wlc)->core->txpktpend[0] + (wlc)->core->txpktpend[1] + \
@@ -784,156 +879,147 @@ struct antsel_info {
#define TXPKTPENDCLR(wlc, fifo) ((wlc)->core->txpktpend[(fifo)] = 0)
#define TXAVAIL(wlc, fifo) (*(wlc)->core->txavail[(fifo)])
#define GETNEXTTXP(wlc, _queue) \
- dma_getnexttxp((wlc)->hw->di[(_queue)], HNDDMA_RANGE_TRANSMITTED)
+ dma_getnexttxp((wlc)->hw->di[(_queue)], DMA_RANGE_TRANSMITTED)
-#define WLC_IS_MATCH_SSID(wlc, ssid1, ssid2, len1, len2) \
+#define BRCMS_IS_MATCH_SSID(wlc, ssid1, ssid2, len1, len2) \
((len1 == len2) && !memcmp(ssid1, ssid2, len1))
-extern void wlc_fatal_error(struct wlc_info *wlc);
-extern void wlc_bmac_rpc_watchdog(struct wlc_info *wlc);
-extern void wlc_recv(struct wlc_info *wlc, struct sk_buff *p);
-extern bool wlc_dotxstatus(struct wlc_info *wlc, tx_status_t *txs, u32 frm_tx2);
-extern void wlc_txfifo(struct wlc_info *wlc, uint fifo, struct sk_buff *p,
- bool commit, s8 txpktpend);
-extern void wlc_txfifo_complete(struct wlc_info *wlc, uint fifo, s8 txpktpend);
-extern void wlc_txq_enq(void *ctx, struct scb *scb, struct sk_buff *sdu,
- uint prec);
-extern void wlc_info_init(struct wlc_info *wlc, int unit);
-extern void wlc_print_txstatus(tx_status_t *txs);
-extern int wlc_xmtfifo_sz_get(struct wlc_info *wlc, uint fifo, uint *blocks);
-extern void wlc_write_template_ram(struct wlc_info *wlc, int offset, int len,
- void *buf);
-extern void wlc_write_hw_bcntemplates(struct wlc_info *wlc, void *bcn, int len,
- bool both);
-extern void wlc_set_cwmin(struct wlc_info *wlc, u16 newmin);
-extern void wlc_set_cwmax(struct wlc_info *wlc, u16 newmax);
-extern void wlc_pllreq(struct wlc_info *wlc, bool set, mbool req_bit);
-extern void wlc_reset_bmac_done(struct wlc_info *wlc);
+extern void brcms_c_fatal_error(struct brcms_c_info *wlc);
+extern void brcms_b_rpc_watchdog(struct brcms_c_info *wlc);
+extern void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p);
+extern bool brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs,
+ u32 frm_tx2);
+extern void brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
+ struct sk_buff *p,
+ bool commit, s8 txpktpend);
+extern void brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo,
+ s8 txpktpend);
+extern void brcms_c_txq_enq(void *ctx, struct scb *scb, struct sk_buff *sdu,
+ uint prec);
+extern void brcms_c_info_init(struct brcms_c_info *wlc, int unit);
+extern void brcms_c_print_txstatus(struct tx_status *txs);
+extern int brcms_c_xmtfifo_sz_get(struct brcms_c_info *wlc, uint fifo,
+ uint *blocks);
+extern void brcms_c_write_template_ram(struct brcms_c_info *wlc, int offset,
+ int len, void *buf);
+extern void brcms_c_write_hw_bcntemplates(struct brcms_c_info *wlc, void *bcn,
+ int len, bool both);
+extern void brcms_c_pllreq(struct brcms_c_info *wlc, bool set, mbool req_bit);
+extern void brcms_c_reset_bmac_done(struct brcms_c_info *wlc);
#if defined(BCMDBG)
-extern void wlc_print_rxh(d11rxhdr_t *rxh);
-extern void wlc_print_hdrs(struct wlc_info *wlc, const char *prefix, u8 *frame,
- d11txh_t *txh, d11rxhdr_t *rxh, uint len);
-extern void wlc_print_txdesc(d11txh_t *txh);
+extern void brcms_c_print_rxh(struct d11rxhdr *rxh);
+extern void brcms_c_print_txdesc(struct d11txh *txh);
#else
-#define wlc_print_txdesc(a)
-#endif
-#if defined(BCMDBG)
-extern void wlc_print_dot11_mac_hdr(u8 *buf, int len);
+#define brcms_c_print_txdesc(a)
#endif
-extern void wlc_setxband(struct wlc_hw_info *wlc_hw, uint bandunit);
-extern void wlc_coredisable(struct wlc_hw_info *wlc_hw);
+extern void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit);
+extern void brcms_c_coredisable(struct brcms_hardware *wlc_hw);
-extern bool wlc_valid_rate(struct wlc_info *wlc, ratespec_t rate, int band,
- bool verbose);
-extern void wlc_ap_upd(struct wlc_info *wlc);
+extern bool brcms_c_valid_rate(struct brcms_c_info *wlc, ratespec_t rate,
+ int band, bool verbose);
+extern void brcms_c_ap_upd(struct brcms_c_info *wlc);
/* helper functions */
-extern void wlc_shm_ssid_upd(struct wlc_info *wlc, struct wlc_bsscfg *cfg);
-extern int wlc_set_gmode(struct wlc_info *wlc, u8 gmode, bool config);
-
-extern void wlc_mac_bcn_promisc_change(struct wlc_info *wlc, bool promisc);
-extern void wlc_mac_bcn_promisc(struct wlc_info *wlc);
-extern void wlc_mac_promisc(struct wlc_info *wlc);
-extern void wlc_txflowcontrol(struct wlc_info *wlc, struct wlc_txq_info *qi,
- bool on, int prio);
-extern void wlc_txflowcontrol_override(struct wlc_info *wlc,
- struct wlc_txq_info *qi,
+extern void brcms_c_shm_ssid_upd(struct brcms_c_info *wlc,
+ struct brcms_bss_cfg *cfg);
+extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
+
+extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
+ bool promisc);
+extern void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc);
+extern void brcms_c_mac_promisc(struct brcms_c_info *wlc);
+extern void brcms_c_txflowcontrol(struct brcms_c_info *wlc,
+ struct brcms_txq_info *qi,
+ bool on, int prio);
+extern void brcms_c_txflowcontrol_override(struct brcms_c_info *wlc,
+ struct brcms_txq_info *qi,
bool on, uint override);
-extern bool wlc_txflowcontrol_prio_isset(struct wlc_info *wlc,
- struct wlc_txq_info *qi, int prio);
-extern void wlc_send_q(struct wlc_info *wlc);
-extern int wlc_prep_pdu(struct wlc_info *wlc, struct sk_buff *pdu, uint *fifo);
-
-extern u16 wlc_calc_lsig_len(struct wlc_info *wlc, ratespec_t ratespec,
+extern bool brcms_c_txflowcontrol_prio_isset(struct brcms_c_info *wlc,
+ struct brcms_txq_info *qi,
+ int prio);
+extern void brcms_c_send_q(struct brcms_c_info *wlc);
+extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
+ uint *fifo);
+
+extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, ratespec_t ratespec,
uint mac_len);
-extern ratespec_t wlc_rspec_to_rts_rspec(struct wlc_info *wlc, ratespec_t rspec,
- bool use_rspec, u16 mimo_ctlchbw);
-extern u16 wlc_compute_rtscts_dur(struct wlc_info *wlc, bool cts_only,
- ratespec_t rts_rate, ratespec_t frame_rate,
- u8 rts_preamble_type,
- u8 frame_preamble_type, uint frame_len,
- bool ba);
-
-extern void wlc_tbtt(struct wlc_info *wlc, d11regs_t *regs);
-extern void wlc_inval_dma_pkts(struct wlc_hw_info *hw,
+extern ratespec_t brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
+ ratespec_t rspec,
+ bool use_rspec, u16 mimo_ctlchbw);
+extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
+ ratespec_t rts_rate,
+ ratespec_t frame_rate,
+ u8 rts_preamble_type,
+ u8 frame_preamble_type, uint frame_len,
+ bool ba);
+
+extern void brcms_c_tbtt(struct brcms_c_info *wlc);
+extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
struct ieee80211_sta *sta,
void (*dma_callback_fn));
-#if defined(BCMDBG)
-extern void wlc_dump_ie(struct wlc_info *wlc, bcm_tlv_t *ie,
- struct bcmstrbuf *b);
-#endif
-
-extern void wlc_reprate_init(struct wlc_info *wlc);
-extern void wlc_bsscfg_reprate_init(struct wlc_bsscfg *bsscfg);
+extern void brcms_c_reprate_init(struct brcms_c_info *wlc);
+extern void brcms_c_bsscfg_reprate_init(struct brcms_bss_cfg *bsscfg);
/* Shared memory access */
-extern void wlc_write_shm(struct wlc_info *wlc, uint offset, u16 v);
-extern u16 wlc_read_shm(struct wlc_info *wlc, uint offset);
-extern void wlc_copyto_shm(struct wlc_info *wlc, uint offset, const void *buf,
- int len);
-
-extern void wlc_update_beacon(struct wlc_info *wlc);
-extern void wlc_bss_update_beacon(struct wlc_info *wlc,
- struct wlc_bsscfg *bsscfg);
-
-extern void wlc_update_probe_resp(struct wlc_info *wlc, bool suspend);
-extern void wlc_bss_update_probe_resp(struct wlc_info *wlc,
- struct wlc_bsscfg *cfg, bool suspend);
-
-extern bool wlc_ismpc(struct wlc_info *wlc);
-extern bool wlc_is_non_delay_mpc(struct wlc_info *wlc);
-extern void wlc_radio_mpc_upd(struct wlc_info *wlc);
-extern bool wlc_prec_enq(struct wlc_info *wlc, struct pktq *q, void *pkt,
- int prec);
-extern bool wlc_prec_enq_head(struct wlc_info *wlc, struct pktq *q,
+extern void brcms_c_write_shm(struct brcms_c_info *wlc, uint offset, u16 v);
+extern u16 brcms_c_read_shm(struct brcms_c_info *wlc, uint offset);
+extern void brcms_c_copyto_shm(struct brcms_c_info *wlc, uint offset,
+ const void *buf, int len);
+
+extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
+extern void brcms_c_bss_update_beacon(struct brcms_c_info *wlc,
+ struct brcms_bss_cfg *bsscfg);
+
+extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
+extern void brcms_c_bss_update_probe_resp(struct brcms_c_info *wlc,
+ struct brcms_bss_cfg *cfg,
+ bool suspend);
+extern bool brcms_c_ismpc(struct brcms_c_info *wlc);
+extern bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc);
+extern void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc);
+extern bool brcms_c_prec_enq(struct brcms_c_info *wlc, struct pktq *q,
+ void *pkt, int prec);
+extern bool brcms_c_prec_enq_head(struct brcms_c_info *wlc, struct pktq *q,
struct sk_buff *pkt, int prec, bool head);
-extern u16 wlc_phytxctl1_calc(struct wlc_info *wlc, ratespec_t rspec);
-extern void wlc_compute_plcp(struct wlc_info *wlc, ratespec_t rate, uint length,
- u8 *plcp);
-extern uint wlc_calc_frame_time(struct wlc_info *wlc, ratespec_t ratespec,
- u8 preamble_type, uint mac_len);
-
-extern void wlc_set_chanspec(struct wlc_info *wlc, chanspec_t chanspec);
+extern u16 brcms_c_phytxctl1_calc(struct brcms_c_info *wlc, ratespec_t rspec);
+extern void brcms_c_compute_plcp(struct brcms_c_info *wlc, ratespec_t rate,
+ uint length, u8 *plcp);
+extern uint brcms_c_calc_frame_time(struct brcms_c_info *wlc,
+ ratespec_t ratespec,
+ u8 preamble_type, uint mac_len);
-extern bool wlc_timers_init(struct wlc_info *wlc, int unit);
+extern void brcms_c_set_chanspec(struct brcms_c_info *wlc,
+ chanspec_t chanspec);
-extern const bcm_iovar_t wlc_iovars[];
-
-extern int wlc_doiovar(void *hdl, const bcm_iovar_t *vi, u32 actionid,
- const char *name, void *params, uint p_len, void *arg,
- int len, int val_size, struct wlc_if *wlcif);
-
-#if defined(BCMDBG)
-extern void wlc_print_ies(struct wlc_info *wlc, u8 *ies, uint ies_len);
-#endif
+extern bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit);
-extern int wlc_set_nmode(struct wlc_info *wlc, s32 nmode);
-extern void wlc_mimops_action_ht_send(struct wlc_info *wlc,
- struct wlc_bsscfg *bsscfg,
+extern int brcms_c_set_nmode(struct brcms_c_info *wlc, s32 nmode);
+extern void brcms_c_mimops_action_ht_send(struct brcms_c_info *wlc,
+ struct brcms_bss_cfg *bsscfg,
u8 mimops_mode);
-extern void wlc_switch_shortslot(struct wlc_info *wlc, bool shortslot);
-extern void wlc_set_bssid(struct wlc_bsscfg *cfg);
-extern void wlc_edcf_setparams(struct wlc_info *wlc, bool suspend);
+extern void brcms_c_switch_shortslot(struct brcms_c_info *wlc, bool shortslot);
+extern void brcms_c_set_bssid(struct brcms_bss_cfg *cfg);
+extern void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend);
-extern void wlc_set_ratetable(struct wlc_info *wlc);
-extern int wlc_set_mac(struct wlc_bsscfg *cfg);
-extern void wlc_beacon_phytxctl_txant_upd(struct wlc_info *wlc,
+extern void brcms_c_set_ratetable(struct brcms_c_info *wlc);
+extern int brcms_c_set_mac(struct brcms_bss_cfg *cfg);
+extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
ratespec_t bcn_rate);
-extern void wlc_mod_prb_rsp_rate_table(struct wlc_info *wlc, uint frame_len);
-extern ratespec_t wlc_lowest_basic_rspec(struct wlc_info *wlc,
- wlc_rateset_t *rs);
-extern void wlc_radio_disable(struct wlc_info *wlc);
-extern void wlc_bcn_li_upd(struct wlc_info *wlc);
-
-extern int wlc_get_revision_info(struct wlc_info *wlc, void *buf, uint len);
-extern void wlc_set_home_chanspec(struct wlc_info *wlc, chanspec_t chanspec);
-extern void wlc_watchdog_upd(struct wlc_info *wlc, bool tbtt);
-extern bool wlc_ps_allowed(struct wlc_info *wlc);
-extern bool wlc_stay_awake(struct wlc_info *wlc);
-extern void wlc_wme_initparams_sta(struct wlc_info *wlc, wme_param_ie_t *pe);
-
-#endif /* _wlc_h_ */
+extern void brcms_c_mod_prb_rsp_rate_table(struct brcms_c_info *wlc,
+ uint frame_len);
+extern ratespec_t brcms_c_lowest_basic_rspec(struct brcms_c_info *wlc,
+ wlc_rateset_t *rs);
+extern void brcms_c_radio_disable(struct brcms_c_info *wlc);
+extern void brcms_c_bcn_li_upd(struct brcms_c_info *wlc);
+extern void brcms_c_set_home_chanspec(struct brcms_c_info *wlc,
+ chanspec_t chanspec);
+extern bool brcms_c_ps_allowed(struct brcms_c_info *wlc);
+extern bool brcms_c_stay_awake(struct brcms_c_info *wlc);
+extern void brcms_c_wme_initparams_sta(struct brcms_c_info *wlc,
+ struct wme_param_ie *pe);
+
+#endif /* _BRCM_MAIN_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/nicpci.c b/drivers/staging/brcm80211/brcmsmac/nicpci.c
index 18b844a8d2f..3d71c590fce 100644
--- a/drivers/staging/brcm80211/brcmsmac/nicpci.c
+++ b/drivers/staging/brcm80211/brcmsmac/nicpci.c
@@ -14,78 +14,245 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/string.h>
#include <linux/pci.h>
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <bcmnvram.h>
-#include <aiutils.h>
-#include <hndsoc.h>
-#include <bcmdevs.h>
-#include <sbchipc.h>
-#include <pci_core.h>
-#include <pcie_core.h>
-#include <nicpci.h>
-#include <pcicfg.h>
-
-typedef struct {
+
+#include <defs.h>
+#include <soc.h>
+#include <chipcommon.h>
+#include "aiutils.h"
+#include "pub.h"
+#include "nicpci.h"
+
+/* SPROM offsets */
+#define SRSH_ASPM_OFFSET 4 /* word 4 */
+#define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */
+#define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */
+#define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */
+
+#define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */
+#define SRSH_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */
+#define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */
+#define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */
+#define SRSH_BD_OFFSET 6 /* word 6 */
+
+/* chipcontrol */
+#define CHIPCTRL_4321_PLL_DOWN 0x800000/* serdes PLL down override */
+
+/* MDIO control */
+#define MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
+#define MDIOCTL_DIVISOR_VAL 0x2
+#define MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */
+#define MDIOCTL_ACCESS_DONE 0x100 /* Transaction complete */
+
+/* MDIO Data */
+#define MDIODATA_MASK 0x0000ffff /* data 2 bytes */
+#define MDIODATA_TA 0x00020000 /* Turnaround */
+
+#define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */
+#define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */
+#define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */
+#define MDIODATA_DEVADDR_MASK 0x0f800000
+ /* Physmedia devaddr Mask */
+
+/* MDIO Data for older revisions < 10 */
+#define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift */
+#define MDIODATA_REGADDR_MASK_OLD 0x003c0000
+ /* Regaddr Mask */
+#define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift */
+#define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000
+ /* Physmedia devaddr Mask */
+
+/* Transactions flags */
+#define MDIODATA_WRITE 0x10000000
+#define MDIODATA_READ 0x20000000
+#define MDIODATA_START 0x40000000
+
+#define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */
+#define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */
+
+/* serdes regs (rev < 10) */
+#define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */
+#define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
+#define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
+
+/* SERDES RX registers */
+#define SERDES_RX_CTRL 1 /* Rx cntrl */
+#define SERDES_RX_TIMER1 2 /* Rx Timer1 */
+#define SERDES_RX_CDR 6 /* CDR */
+#define SERDES_RX_CDRBW 7 /* CDR BW */
+/* SERDES RX control register */
+#define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */
+#define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */
+
+/* SERDES PLL registers */
+#define SERDES_PLL_CTRL 1 /* PLL control reg */
+#define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */
+
+/* Linkcontrol reg offset in PCIE Cap */
+#define PCIE_CAP_LINKCTRL_OFFSET 16 /* offset in pcie cap */
+#define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */
+#define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */
+#define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */
+
+#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */
+
+/* Power management threshold */
+#define PCIE_L1THRESHOLDTIME_MASK 0xFF00 /* bits 8 - 15 */
+#define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */
+#define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */
+#define PCIE_ASPMTIMER_EXTEND 0x01000000
+ /* > rev7:
+ * enable extend ASPM timer
+ */
+
+/* different register spaces to access thru pcie indirect access */
+#define PCIE_CONFIGREGS 1 /* Access to config space */
+#define PCIE_PCIEREGS 2 /* Access to pcie registers */
+
+/* PCIE protocol PHY diagnostic registers */
+#define PCIE_PLP_STATUSREG 0x204 /* Status */
+
+/* Status reg PCIE_PLP_STATUSREG */
+#define PCIE_PLP_POLARITYINV_STAT 0x10
+
+/* PCIE protocol DLLP diagnostic registers */
+#define PCIE_DLLP_LCREG 0x100 /* Link Control */
+#define PCIE_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */
+
+/* PCIE protocol TLP diagnostic registers */
+#define PCIE_TLP_WORKAROUNDSREG 0x004 /* TLP Workarounds */
+
+/* Sonics side: PCI core and host control registers */
+struct sbpciregs {
+ u32 control; /* PCI control */
+ u32 PAD[3];
+ u32 arbcontrol; /* PCI arbiter control */
+ u32 clkrun; /* Clkrun Control (>=rev11) */
+ u32 PAD[2];
+ u32 intstatus; /* Interrupt status */
+ u32 intmask; /* Interrupt mask */
+ u32 sbtopcimailbox; /* Sonics to PCI mailbox */
+ u32 PAD[9];
+ u32 bcastaddr; /* Sonics broadcast address */
+ u32 bcastdata; /* Sonics broadcast data */
+ u32 PAD[2];
+ u32 gpioin; /* ro: gpio input (>=rev2) */
+ u32 gpioout; /* rw: gpio output (>=rev2) */
+ u32 gpioouten; /* rw: gpio output enable (>= rev2) */
+ u32 gpiocontrol; /* rw: gpio control (>= rev2) */
+ u32 PAD[36];
+ u32 sbtopci0; /* Sonics to PCI translation 0 */
+ u32 sbtopci1; /* Sonics to PCI translation 1 */
+ u32 sbtopci2; /* Sonics to PCI translation 2 */
+ u32 PAD[189];
+ u32 pcicfg[4][64]; /* 0x400 - 0x7FF, PCI Cfg Space (>=rev8) */
+ u16 sprom[36]; /* SPROM shadow Area */
+ u32 PAD[46];
+};
+
+/* SB side: PCIE core and host control registers */
+struct sbpcieregs {
+ u32 control; /* host mode only */
+ u32 PAD[2];
+ u32 biststatus; /* bist Status: 0x00C */
+ u32 gpiosel; /* PCIE gpio sel: 0x010 */
+ u32 gpioouten; /* PCIE gpio outen: 0x14 */
+ u32 PAD[2];
+ u32 intstatus; /* Interrupt status: 0x20 */
+ u32 intmask; /* Interrupt mask: 0x24 */
+ u32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */
+ u32 PAD[53];
+ u32 sbtopcie0; /* sb to pcie translation 0: 0x100 */
+ u32 sbtopcie1; /* sb to pcie translation 1: 0x104 */
+ u32 sbtopcie2; /* sb to pcie translation 2: 0x108 */
+ u32 PAD[5];
+
+ /* pcie core supports in direct access to config space */
+ u32 configaddr; /* pcie config space access: Address field: 0x120 */
+ u32 configdata; /* pcie config space access: Data field: 0x124 */
+
+ /* mdio access to serdes */
+ u32 mdiocontrol; /* controls the mdio access: 0x128 */
+ u32 mdiodata; /* Data to the mdio access: 0x12c */
+
+ /* pcie protocol phy/dllp/tlp register indirect access mechanism */
+ u32 pcieindaddr; /* indirect access to
+ * the internal register: 0x130
+ */
+ u32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */
+
+ u32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */
+ u32 PAD[177];
+ u32 pciecfg[4][64]; /* 0x400 - 0x7FF, PCIE Cfg Space */
+ u16 sprom[64]; /* SPROM shadow Area */
+};
+
+struct pcicore_info {
union {
- sbpcieregs_t *pcieregs;
+ struct sbpcieregs *pcieregs;
struct sbpciregs *pciregs;
} regs; /* Memory mapped register to the core */
- si_t *sih; /* System interconnect handle */
+ struct si_pub *sih; /* System interconnect handle */
struct pci_dev *dev;
- u8 pciecap_lcreg_offset; /* PCIE capability LCreg offset in the config space */
+ u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
+ * in the config space
+ */
bool pcie_pr42767;
u8 pcie_polarity;
u8 pcie_war_aspm_ovr; /* Override ASPM/Clkreq settings */
u8 pmecap_offset; /* PM Capability offset in the config space */
bool pmecap; /* Capable of generating PME */
-} pcicore_info_t;
+};
/* debug/trace */
#define PCI_ERROR(args)
-#define PCIE_PUB(sih) \
- (((sih)->bustype == PCI_BUS) && ((sih)->buscoretype == PCIE_CORE_ID))
+#define PCIE_PUB(sih) \
+ (((sih)->bustype == PCI_BUS) && \
+ ((sih)->buscoretype == PCIE_CORE_ID))
/* routines to access mdio slave device registers */
-static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk);
-static int pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr,
+static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk);
+static int pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr,
bool write, uint *val);
-static int pcie_mdiowrite(pcicore_info_t *pi, uint physmedia, uint readdr,
+static int pcie_mdiowrite(struct pcicore_info *pi, uint physmedia, uint readdr,
uint val);
-static int pcie_mdioread(pcicore_info_t *pi, uint physmedia, uint readdr,
+static int pcie_mdioread(struct pcicore_info *pi, uint physmedia, uint readdr,
uint *ret_val);
-static void pcie_extendL1timer(pcicore_info_t *pi, bool extend);
-static void pcie_clkreq_upd(pcicore_info_t *pi, uint state);
-
-static void pcie_war_aspm_clkreq(pcicore_info_t *pi);
-static void pcie_war_serdes(pcicore_info_t *pi);
-static void pcie_war_noplldown(pcicore_info_t *pi);
-static void pcie_war_polarity(pcicore_info_t *pi);
-static void pcie_war_pci_setup(pcicore_info_t *pi);
+static void pcie_extendL1timer(struct pcicore_info *pi, bool extend);
+static void pcie_clkreq_upd(struct pcicore_info *pi, uint state);
-static bool pcicore_pmecap(pcicore_info_t *pi);
+static void pcie_war_aspm_clkreq(struct pcicore_info *pi);
+static void pcie_war_serdes(struct pcicore_info *pi);
+static void pcie_war_noplldown(struct pcicore_info *pi);
+static void pcie_war_polarity(struct pcicore_info *pi);
+static void pcie_war_pci_setup(struct pcicore_info *pi);
-#define PCIE_ASPM(sih) ((PCIE_PUB(sih)) && (((sih)->buscorerev >= 3) && ((sih)->buscorerev <= 5)))
+#define PCIE_ASPM(sih) \
+ ((PCIE_PUB(sih)) && \
+ (((sih)->buscorerev >= 3) && \
+ ((sih)->buscorerev <= 5)))
/* delay needed between the mdio control/ mdiodata register data access */
#define PR28829_DELAY() udelay(10)
-/* Initialize the PCI core. It's caller's responsibility to make sure that this is done
- * only once
+/* Initialize the PCI core.
+ * It's caller's responsibility to make sure that this is done only once
*/
-void *pcicore_init(si_t *sih, void *pdev, void *regs)
+void *pcicore_init(struct si_pub *sih, void *pdev, void *regs)
{
- pcicore_info_t *pi;
+ struct pcicore_info *pi;
- /* alloc pcicore_info_t */
- pi = kzalloc(sizeof(pcicore_info_t), GFP_ATOMIC);
+ /* alloc struct pcicore_info */
+ pi = kzalloc(sizeof(struct pcicore_info), GFP_ATOMIC);
if (pi == NULL) {
PCI_ERROR(("pci_attach: malloc failed!\n"));
return NULL;
@@ -96,23 +263,19 @@ void *pcicore_init(si_t *sih, void *pdev, void *regs)
if (sih->buscoretype == PCIE_CORE_ID) {
u8 cap_ptr;
- pi->regs.pcieregs = (sbpcieregs_t *) regs;
+ pi->regs.pcieregs = regs;
cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
NULL, NULL);
pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
} else
- pi->regs.pciregs = (struct sbpciregs *) regs;
+ pi->regs.pciregs = regs;
return pi;
}
void pcicore_deinit(void *pch)
{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
-
- if (pi == NULL)
- return;
- kfree(pi);
+ kfree(pch);
}
/* return cap_offset if requested capability exists in the PCI config space */
@@ -141,7 +304,9 @@ pcicore_find_pci_capability(void *dev, u8 req_cap_id,
if (cap_ptr == 0x00)
goto end;
- /* loop thr'u the capability list and see if the pcie capabilty exists */
+ /* loop thru the capability list
+ * and see if the pcie capability exists
+ */
pci_read_config_byte(dev, cap_ptr, &cap_id);
@@ -151,18 +316,18 @@ pcicore_find_pci_capability(void *dev, u8 req_cap_id,
break;
pci_read_config_byte(dev, cap_ptr, &cap_id);
}
- if (cap_id != req_cap_id) {
+ if (cap_id != req_cap_id)
goto end;
- }
+
/* found the caller requested capability */
- if ((buf != NULL) && (buflen != NULL)) {
+ if (buf != NULL && buflen != NULL) {
u8 cap_data;
bufsize = *buflen;
if (!bufsize)
goto end;
*buflen = 0;
- /* copy the cpability data excluding cap ID and next ptr */
+ /* copy the capability data excluding cap ID and next ptr */
cap_data = cap_ptr + 2;
if ((bufsize + cap_data) > PCI_SZPCR)
bufsize = PCI_SZPCR - cap_data;
@@ -173,38 +338,34 @@ pcicore_find_pci_capability(void *dev, u8 req_cap_id,
buf++;
}
}
- end:
+end:
return cap_ptr;
}
/* ***** Register Access API */
-uint
-pcie_readreg(sbpcieregs_t *pcieregs, uint addrtype,
- uint offset)
+static uint
+pcie_readreg(struct sbpcieregs *pcieregs, uint addrtype, uint offset)
{
uint retval = 0xFFFFFFFF;
switch (addrtype) {
case PCIE_CONFIGREGS:
- W_REG((&pcieregs->configaddr), offset);
+ W_REG(&pcieregs->configaddr, offset);
(void)R_REG((&pcieregs->configaddr));
- retval = R_REG(&(pcieregs->configdata));
+ retval = R_REG(&pcieregs->configdata);
break;
case PCIE_PCIEREGS:
- W_REG(&(pcieregs->pcieindaddr), offset);
- (void)R_REG((&pcieregs->pcieindaddr));
- retval = R_REG(&(pcieregs->pcieinddata));
- break;
- default:
+ W_REG(&pcieregs->pcieindaddr, offset);
+ (void)R_REG(&pcieregs->pcieindaddr);
+ retval = R_REG(&pcieregs->pcieinddata);
break;
}
return retval;
}
-uint
-pcie_writereg(sbpcieregs_t *pcieregs, uint addrtype,
- uint offset, uint val)
+static uint
+pcie_writereg(struct sbpcieregs *pcieregs, uint addrtype, uint offset, uint val)
{
switch (addrtype) {
case PCIE_CONFIGREGS:
@@ -221,26 +382,23 @@ pcie_writereg(sbpcieregs_t *pcieregs, uint addrtype,
return 0;
}
-static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk)
+static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
{
- sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ struct sbpcieregs *pcieregs = pi->regs.pcieregs;
uint mdiodata, i = 0;
uint pcie_serdes_spinwait = 200;
- mdiodata =
- MDIODATA_START | MDIODATA_WRITE | (MDIODATA_DEV_ADDR <<
- MDIODATA_DEVADDR_SHF) |
- (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) | MDIODATA_TA | (blk <<
- 4);
+ mdiodata = (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
+ (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
+ (MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
+ (blk << 4));
W_REG(&pcieregs->mdiodata, mdiodata);
PR28829_DELAY();
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
- if (R_REG(&(pcieregs->mdiocontrol)) &
- MDIOCTL_ACCESS_DONE) {
+ if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE)
break;
- }
udelay(1000);
i++;
}
@@ -254,35 +412,36 @@ static bool pcie_mdiosetblock(pcicore_info_t *pi, uint blk)
}
static int
-pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write,
+pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
uint *val)
{
- sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ struct sbpcieregs *pcieregs = pi->regs.pcieregs;
uint mdiodata;
uint i = 0;
uint pcie_serdes_spinwait = 10;
/* enable mdio access to SERDES */
- W_REG((&pcieregs->mdiocontrol),
- MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
+ W_REG(&pcieregs->mdiocontrol, MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
if (pi->sih->buscorerev >= 10) {
- /* new serdes is slower in rw, using two layers of reg address mapping */
+ /* new serdes is slower in rw,
+ * using two layers of reg address mapping
+ */
if (!pcie_mdiosetblock(pi, physmedia))
return 1;
- mdiodata = (MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
- (regaddr << MDIODATA_REGADDR_SHF);
+ mdiodata = ((MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
+ (regaddr << MDIODATA_REGADDR_SHF));
pcie_serdes_spinwait *= 20;
} else {
- mdiodata = (physmedia << MDIODATA_DEVADDR_SHF_OLD) |
- (regaddr << MDIODATA_REGADDR_SHF_OLD);
+ mdiodata = ((physmedia << MDIODATA_DEVADDR_SHF_OLD) |
+ (regaddr << MDIODATA_REGADDR_SHF_OLD));
}
if (!write)
mdiodata |= (MDIODATA_START | MDIODATA_READ | MDIODATA_TA);
else
- mdiodata |=
- (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA | *val);
+ mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
+ *val);
W_REG(&pcieregs->mdiodata, mdiodata);
@@ -290,16 +449,14 @@ pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write,
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
- if (R_REG(&(pcieregs->mdiocontrol)) &
- MDIOCTL_ACCESS_DONE) {
+ if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) {
if (!write) {
PR28829_DELAY();
- *val =
- (R_REG(&(pcieregs->mdiodata)) &
- MDIODATA_MASK);
+ *val = (R_REG(&pcieregs->mdiodata) &
+ MDIODATA_MASK);
}
/* Disable mdio access to SERDES */
- W_REG((&pcieregs->mdiocontrol), 0);
+ W_REG(&pcieregs->mdiocontrol, 0);
return 0;
}
udelay(1000);
@@ -308,28 +465,29 @@ pcie_mdioop(pcicore_info_t *pi, uint physmedia, uint regaddr, bool write,
PCI_ERROR(("pcie_mdioop: timed out op: %d\n", write));
/* Disable mdio access to SERDES */
- W_REG((&pcieregs->mdiocontrol), 0);
+ W_REG(&pcieregs->mdiocontrol, 0);
return 1;
}
/* use the mdio interface to read from mdio slaves */
static int
-pcie_mdioread(pcicore_info_t *pi, uint physmedia, uint regaddr, uint *regval)
+pcie_mdioread(struct pcicore_info *pi, uint physmedia, uint regaddr,
+ uint *regval)
{
return pcie_mdioop(pi, physmedia, regaddr, false, regval);
}
/* use the mdio interface to write to mdio slaves */
static int
-pcie_mdiowrite(pcicore_info_t *pi, uint physmedia, uint regaddr, uint val)
+pcie_mdiowrite(struct pcicore_info *pi, uint physmedia, uint regaddr, uint val)
{
return pcie_mdioop(pi, physmedia, regaddr, true, &val);
}
/* ***** Support functions ***** */
-u8 pcie_clkreq(void *pch, u32 mask, u32 val)
+static u8 pcie_clkreq(void *pch, u32 mask, u32 val)
{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
+ struct pcicore_info *pi = pch;
u32 reg_val;
u8 offset;
@@ -353,11 +511,11 @@ u8 pcie_clkreq(void *pch, u32 mask, u32 val)
return 0;
}
-static void pcie_extendL1timer(pcicore_info_t *pi, bool extend)
+static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
{
u32 w;
- si_t *sih = pi->sih;
- sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ struct si_pub *sih = pi->sih;
+ struct sbpcieregs *pcieregs = pi->regs.pcieregs;
if (!PCIE_PUB(sih) || sih->buscorerev < 7)
return;
@@ -372,9 +530,9 @@ static void pcie_extendL1timer(pcicore_info_t *pi, bool extend)
}
/* centralized clkreq control policy */
-static void pcie_clkreq_upd(pcicore_info_t *pi, uint state)
+static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
{
- si_t *sih = pi->sih;
+ struct si_pub *sih = pi->sih;
switch (state) {
case SI_DOATTACH:
@@ -384,8 +542,8 @@ static void pcie_clkreq_upd(pcicore_info_t *pi, uint state)
case SI_PCIDOWN:
if (sih->buscorerev == 6) { /* turn on serdes PLL down */
ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_addr), ~0,
- 0);
+ offsetof(chipcregs_t, chipcontrol_addr),
+ ~0, 0);
ai_corereg(sih, SI_CC_IDX,
offsetof(chipcregs_t, chipcontrol_data),
~0x40, 0);
@@ -396,8 +554,8 @@ static void pcie_clkreq_upd(pcicore_info_t *pi, uint state)
case SI_PCIUP:
if (sih->buscorerev == 6) { /* turn off serdes PLL down */
ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_addr), ~0,
- 0);
+ offsetof(chipcregs_t, chipcontrol_addr),
+ ~0, 0);
ai_corereg(sih, SI_CC_IDX,
offsetof(chipcregs_t, chipcontrol_data),
~0x40, 0x40);
@@ -405,31 +563,28 @@ static void pcie_clkreq_upd(pcicore_info_t *pi, uint state)
pcie_clkreq((void *)pi, 1, 0);
}
break;
- default:
- break;
}
}
/* ***** PCI core WARs ***** */
/* Done only once at attach time */
-static void pcie_war_polarity(pcicore_info_t *pi)
+static void pcie_war_polarity(struct pcicore_info *pi)
{
u32 w;
if (pi->pcie_polarity != 0)
return;
- w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS,
- PCIE_PLP_STATUSREG);
+ w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
/* Detect the current polarity at attach and force that polarity and
* disable changing the polarity
*/
if ((w & PCIE_PLP_POLARITYINV_STAT) == 0)
- pi->pcie_polarity = (SERDES_RX_CTRL_FORCE);
+ pi->pcie_polarity = SERDES_RX_CTRL_FORCE;
else
- pi->pcie_polarity =
- (SERDES_RX_CTRL_FORCE | SERDES_RX_CTRL_POLARITY);
+ pi->pcie_polarity = (SERDES_RX_CTRL_FORCE |
+ SERDES_RX_CTRL_POLARITY);
}
/* enable ASPM and CLKREQ if srom doesn't have it */
@@ -437,10 +592,10 @@ static void pcie_war_polarity(pcicore_info_t *pi)
* : Coming out of 'standby'/'hibernate'
* : If pcie_war_aspm_ovr state changed
*/
-static void pcie_war_aspm_clkreq(pcicore_info_t *pi)
+static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
{
- sbpcieregs_t *pcieregs = pi->regs.pcieregs;
- si_t *sih = pi->sih;
+ struct sbpcieregs *pcieregs = pi->regs.pcieregs;
+ struct si_pub *sih = pi->sih;
u16 val16, *reg16;
u32 w;
@@ -448,28 +603,23 @@ static void pcie_war_aspm_clkreq(pcicore_info_t *pi)
return;
/* bypass this on QT or VSIM */
- if (!ISSIM_ENAB(sih)) {
-
- reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
- val16 = R_REG(reg16);
+ reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
+ val16 = R_REG(reg16);
- val16 &= ~SRSH_ASPM_ENB;
- if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
- val16 |= SRSH_ASPM_ENB;
- else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB)
- val16 |= SRSH_ASPM_L1_ENB;
- else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
- val16 |= SRSH_ASPM_L0s_ENB;
+ val16 &= ~SRSH_ASPM_ENB;
+ if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
+ val16 |= SRSH_ASPM_ENB;
+ else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L1_ENAB)
+ val16 |= SRSH_ASPM_L1_ENB;
+ else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
+ val16 |= SRSH_ASPM_L0s_ENB;
- W_REG(reg16, val16);
+ W_REG(reg16, val16);
- pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset,
- &w);
- w &= ~PCIE_ASPM_ENAB;
- w |= pi->pcie_war_aspm_ovr;
- pci_write_config_dword(pi->dev,
- pi->pciecap_lcreg_offset, w);
- }
+ pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
+ w &= ~PCIE_ASPM_ENAB;
+ w |= pi->pcie_war_aspm_ovr;
+ pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
val16 = R_REG(reg16);
@@ -485,7 +635,7 @@ static void pcie_war_aspm_clkreq(pcicore_info_t *pi)
/* Apply the polarity determined at the start */
/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_war_serdes(pcicore_info_t *pi)
+static void pcie_war_serdes(struct pcicore_info *pi)
{
u32 w = 0;
@@ -502,9 +652,9 @@ static void pcie_war_serdes(pcicore_info_t *pi)
/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_misc_config_fixup(pcicore_info_t *pi)
+static void pcie_misc_config_fixup(struct pcicore_info *pi)
{
- sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ struct sbpcieregs *pcieregs = pi->regs.pcieregs;
u16 val16, *reg16;
reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
@@ -518,28 +668,28 @@ static void pcie_misc_config_fixup(pcicore_info_t *pi)
/* quick hack for testing */
/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_war_noplldown(pcicore_info_t *pi)
+static void pcie_war_noplldown(struct pcicore_info *pi)
{
- sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ struct sbpcieregs *pcieregs = pi->regs.pcieregs;
u16 *reg16;
/* turn off serdes PLL down */
ai_corereg(pi->sih, SI_CC_IDX, offsetof(chipcregs_t, chipcontrol),
CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
- /* clear srom shadow backdoor */
+ /* clear srom shadow backdoor */
reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
W_REG(reg16, 0);
}
/* Needs to happen when coming out of 'standby'/'hibernate' */
-static void pcie_war_pci_setup(pcicore_info_t *pi)
+static void pcie_war_pci_setup(struct pcicore_info *pi)
{
- si_t *sih = pi->sih;
- sbpcieregs_t *pcieregs = pi->regs.pcieregs;
+ struct si_pub *sih = pi->sih;
+ struct sbpcieregs *pcieregs = pi->regs.pcieregs;
u32 w;
- if ((sih->buscorerev == 0) || (sih->buscorerev == 1)) {
+ if (sih->buscorerev == 0 || sih->buscorerev == 1) {
w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
PCIE_TLP_WORKAROUNDSREG);
w |= 0x8;
@@ -549,7 +699,7 @@ static void pcie_war_pci_setup(pcicore_info_t *pi)
if (sih->buscorerev == 1) {
w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
- w |= (0x40);
+ w |= 0x40;
pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
}
@@ -561,8 +711,8 @@ static void pcie_war_pci_setup(pcicore_info_t *pi)
/* Change the L1 threshold for better performance */
w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
PCIE_DLLP_PMTHRESHREG);
- w &= ~(PCIE_L1THRESHOLDTIME_MASK);
- w |= (PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT);
+ w &= ~PCIE_L1THRESHOLDTIME_MASK;
+ w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
pcie_writereg(pcieregs, PCIE_PCIEREGS,
PCIE_DLLP_PMTHRESHREG, w);
@@ -572,41 +722,25 @@ static void pcie_war_pci_setup(pcicore_info_t *pi)
} else if (pi->sih->buscorerev == 7)
pcie_war_noplldown(pi);
- /* Note that the fix is actually in the SROM, that's why this is open-ended */
+ /* Note that the fix is actually in the SROM,
+ * that's why this is open-ended
+ */
if (pi->sih->buscorerev >= 6)
pcie_misc_config_fixup(pi);
}
-void pcie_war_ovr_aspm_update(void *pch, u8 aspm)
-{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
-
- if (!PCIE_ASPM(pi->sih))
- return;
-
- /* Validate */
- if (aspm > PCIE_ASPM_ENAB)
- return;
-
- pi->pcie_war_aspm_ovr = aspm;
-
- /* Update the current state */
- pcie_war_aspm_clkreq(pi);
-}
-
/* ***** Functions called during driver state changes ***** */
void pcicore_attach(void *pch, char *pvars, int state)
{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
- si_t *sih = pi->sih;
+ struct pcicore_info *pi = pch;
+ struct si_pub *sih = pi->sih;
/* Determine if this board needs override */
if (PCIE_ASPM(sih)) {
- if ((u32) getintvar(pvars, "boardflags2") & BFL2_PCIEWAR_OVR) {
+ if ((u32)getintvar(pvars, "boardflags2") & BFL2_PCIEWAR_OVR)
pi->pcie_war_aspm_ovr = PCIE_ASPM_DISAB;
- } else {
+ else
pi->pcie_war_aspm_ovr = PCIE_ASPM_ENAB;
- }
}
/* These need to happen in this order only */
@@ -622,7 +756,7 @@ void pcicore_attach(void *pch, char *pvars, int state)
void pcicore_hwup(void *pch)
{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
+ struct pcicore_info *pi = pch;
if (!pi || !PCIE_PUB(pi->sih))
return;
@@ -632,7 +766,7 @@ void pcicore_hwup(void *pch)
void pcicore_up(void *pch, int state)
{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
+ struct pcicore_info *pi = pch;
if (!pi || !PCIE_PUB(pi->sih))
return;
@@ -643,10 +777,12 @@ void pcicore_up(void *pch, int state)
pcie_clkreq_upd(pi, state);
}
-/* When the device is going to enter D3 state (or the system is going to enter S3/S4 states */
+/* When the device is going to enter D3 state
+ * (or the system is going to enter S3/S4 states)
+ */
void pcicore_sleep(void *pch)
{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
+ struct pcicore_info *pi = pch;
u32 w;
if (!pi || !PCIE_ASPM(pi->sih))
@@ -661,7 +797,7 @@ void pcicore_sleep(void *pch)
void pcicore_down(void *pch, int state)
{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
+ struct pcicore_info *pi = pch;
if (!pi || !PCIE_PUB(pi->sih))
return;
@@ -672,165 +808,43 @@ void pcicore_down(void *pch, int state)
pcie_extendL1timer(pi, false);
}
-/* ***** Wake-on-wireless-LAN (WOWL) support functions ***** */
-/* Just uses PCI config accesses to find out, when needed before sb_attach is done */
-bool pcicore_pmecap_fast(void *pch)
+/* precondition: current core is sii->buscoretype */
+void pcicore_fixcfg(void *pch, void *regs)
{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
- u8 cap_ptr;
- u32 pmecap;
-
- cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_PM, NULL,
- NULL);
-
- if (!cap_ptr)
- return false;
-
- pci_read_config_dword(pi->dev, cap_ptr, &pmecap);
+ struct pcicore_info *pi = pch;
+ struct si_info *sii = SI_INFO(pi->sih);
+ struct sbpciregs *pciregs = regs;
+ struct sbpcieregs *pcieregs = regs;
+ u16 val16, *reg16 = NULL;
+ uint pciidx;
- return (pmecap & (PCI_PM_CAP_PME_MASK << 16)) != 0;
-}
-
-/* return true if PM capability exists in the pci config space
- * Uses and caches the information using core handle
- */
-static bool pcicore_pmecap(pcicore_info_t *pi)
-{
- u8 cap_ptr;
- u32 pmecap;
-
- if (!pi->pmecap_offset) {
- cap_ptr = pcicore_find_pci_capability(pi->dev,
- PCI_CAP_ID_PM,
- NULL, NULL);
- if (!cap_ptr)
- return false;
-
- pi->pmecap_offset = cap_ptr;
-
- pci_read_config_dword(pi->dev, pi->pmecap_offset,
- &pmecap);
-
- /* At least one state can generate PME */
- pi->pmecap = (pmecap & (PCI_PM_CAP_PME_MASK << 16)) != 0;
+ /* check 'pi' is correct and fix it if not */
+ if (sii->pub.buscoretype == PCIE_CORE_ID)
+ reg16 = &pcieregs->sprom[SRSH_PI_OFFSET];
+ else if (sii->pub.buscoretype == PCI_CORE_ID)
+ reg16 = &pciregs->sprom[SRSH_PI_OFFSET];
+ pciidx = ai_coreidx(&sii->pub);
+ val16 = R_REG(reg16);
+ if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) {
+ val16 = (u16)(pciidx << SRSH_PI_SHIFT) |
+ (val16 & ~SRSH_PI_MASK);
+ W_REG(reg16, val16);
}
-
- return pi->pmecap;
}
-/* Enable PME generation */
-void pcicore_pmeen(void *pch)
+/* precondition: current core is pci core */
+void pcicore_pci_setup(void *pch, void *regs)
{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
+ struct pcicore_info *pi = pch;
+ struct sbpciregs *pciregs = regs;
u32 w;
- /* if not pmecapable return */
- if (!pcicore_pmecap(pi))
- return;
-
- pci_read_config_dword(pi->dev, pi->pmecap_offset + PCI_PM_CTRL,
- &w);
- w |= (PCI_PM_CTRL_PME_ENABLE);
- pci_write_config_dword(pi->dev,
- pi->pmecap_offset + PCI_PM_CTRL, w);
-}
-
-/*
- * Return true if PME status set
- */
-bool pcicore_pmestat(void *pch)
-{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
- u32 w;
-
- if (!pcicore_pmecap(pi))
- return false;
-
- pci_read_config_dword(pi->dev, pi->pmecap_offset + PCI_PM_CTRL,
- &w);
-
- return (w & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
-}
+ OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST);
-/* Disable PME generation, clear the PME status bit if set
- */
-void pcicore_pmeclr(void *pch)
-{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
- u32 w;
-
- if (!pcicore_pmecap(pi))
- return;
-
- pci_read_config_dword(pi->dev, pi->pmecap_offset + PCI_PM_CTRL,
- &w);
-
- PCI_ERROR(("pcicore_pci_pmeclr PMECSR : 0x%x\n", w));
-
- /* PMESTAT is cleared by writing 1 to it */
- w &= ~(PCI_PM_CTRL_PME_ENABLE);
-
- pci_write_config_dword(pi->dev,
- pi->pmecap_offset + PCI_PM_CTRL, w);
-}
-
-u32 pcie_lcreg(void *pch, u32 mask, u32 val)
-{
- pcicore_info_t *pi = (pcicore_info_t *) pch;
- u8 offset;
- u32 tmpval;
-
- offset = pi->pciecap_lcreg_offset;
- if (!offset)
- return 0;
-
- /* set operation */
- if (mask)
- pci_write_config_dword(pi->dev, offset, val);
-
- pci_read_config_dword(pi->dev, offset, &tmpval);
- return tmpval;
-}
-
-u32
-pcicore_pciereg(void *pch, u32 offset, u32 mask, u32 val, uint type)
-{
- u32 reg_val = 0;
- pcicore_info_t *pi = (pcicore_info_t *) pch;
- sbpcieregs_t *pcieregs = pi->regs.pcieregs;
-
- if (mask) {
- PCI_ERROR(("PCIEREG: 0x%x writeval 0x%x\n", offset, val));
- pcie_writereg(pcieregs, type, offset, val);
- }
-
- /* Should not read register 0x154 */
- if (pi->sih->buscorerev <= 5 && offset == PCIE_DLLP_PCIE11
- && type == PCIE_PCIEREGS)
- return reg_val;
-
- reg_val = pcie_readreg(pcieregs, type, offset);
- PCI_ERROR(("PCIEREG: 0x%x readval is 0x%x\n", offset, reg_val));
-
- return reg_val;
-}
-
-u32
-pcicore_pcieserdesreg(void *pch, u32 mdioslave, u32 offset, u32 mask,
- u32 val)
-{
- u32 reg_val = 0;
- pcicore_info_t *pi = (pcicore_info_t *) pch;
-
- if (mask) {
- PCI_ERROR(("PCIEMDIOREG: 0x%x writeval 0x%x\n", offset, val));
- pcie_mdiowrite(pi, mdioslave, offset, val);
+ if (SI_INFO(pi->sih)->pub.buscorerev >= 11) {
+ OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
+ w = R_REG(&pciregs->clkrun);
+ W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL);
+ w = R_REG(&pciregs->clkrun);
}
-
- if (pcie_mdioread(pi, mdioslave, offset, &reg_val))
- reg_val = 0xFFFFFFFF;
- PCI_ERROR(("PCIEMDIOREG: dev 0x%x offset 0x%x read 0x%x\n", mdioslave,
- offset, reg_val));
-
- return reg_val;
}
diff --git a/drivers/staging/brcm80211/brcmsmac/nicpci.h b/drivers/staging/brcm80211/brcmsmac/nicpci.h
new file mode 100644
index 00000000000..f71f842a215
--- /dev/null
+++ b/drivers/staging/brcm80211/brcmsmac/nicpci.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_NICPCI_H_
+#define _BRCM_NICPCI_H_
+
+#include "types.h"
+
+/* PCI configuration address space size */
+#define PCI_SZPCR 256
+
+/* Brcm PCI configuration registers */
+/* backplane address space accessed by BAR0 */
+#define PCI_BAR0_WIN 0x80
+/* sprom property control */
+#define PCI_SPROM_CONTROL 0x88
+/* mask of PCI and other cores interrupts */
+#define PCI_INT_MASK 0x94
+/* backplane core interrupt mask bits offset */
+#define PCI_SBIM_SHIFT 8
+/* backplane address space accessed by second 4KB of BAR0 */
+#define PCI_BAR0_WIN2 0xac
+/* pci config space gpio input (>=rev3) */
+#define PCI_GPIO_IN 0xb0
+/* pci config space gpio output (>=rev3) */
+#define PCI_GPIO_OUT 0xb4
+/* pci config space gpio output enable (>=rev3) */
+#define PCI_GPIO_OUTEN 0xb8
+
+/* bar0 + 4K accesses external sprom */
+#define PCI_BAR0_SPROM_OFFSET (4 * 1024)
+/* bar0 + 6K accesses pci core registers */
+#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024)
+/*
+ * pci core SB registers are at the end of the
+ * 8KB window, so their address is the "regular"
+ * address plus 4K
+ */
+#define PCI_BAR0_PCISBR_OFFSET (4 * 1024)
+/* bar0 window size Match with corerev 13 */
+#define PCI_BAR0_WINSZ (16 * 1024)
+/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
+/* bar0 + 8K accesses pci/pcie core registers */
+#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024)
+/* bar0 + 12K accesses chipc core registers */
+#define PCI_16KB0_CCREGS_OFFSET (12 * 1024)
+
+#define PCI_CLKRUN_DSBL 0x8000 /* Bit 15 forceClkrun */
+
+/* Sonics to PCI translation types */
+#define SBTOPCI_PREF 0x4 /* prefetch enable */
+#define SBTOPCI_BURST 0x8 /* burst enable */
+#define SBTOPCI_RC_READMULTI 0x20 /* memory read multiple */
+
+/* PCI core index in SROM shadow area */
+#define SRSH_PI_OFFSET 0 /* first word */
+#define SRSH_PI_MASK 0xf000 /* bit 15:12 */
+#define SRSH_PI_SHIFT 12 /* bit 15:12 */
+
+extern void *pcicore_init(struct si_pub *sih, void *pdev, void *regs);
+extern void pcicore_deinit(void *pch);
+extern void pcicore_attach(void *pch, char *pvars, int state);
+extern void pcicore_hwup(void *pch);
+extern void pcicore_up(void *pch, int state);
+extern void pcicore_sleep(void *pch);
+extern void pcicore_down(void *pch, int state);
+extern u8 pcicore_find_pci_capability(void *dev, u8 req_cap_id,
+ unsigned char *buf, u32 *buflen);
+extern void pcicore_fixcfg(void *pch, void *regs);
+extern void pcicore_pci_setup(void *pch, void *regs);
+
+#endif /* _BRCM_NICPCI_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/nvram.c b/drivers/staging/brcm80211/brcmsmac/nvram.c
deleted file mode 100644
index 085ec0b9224..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/nvram.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <bcmnvram.h>
-#include <sbchipc.h>
-#include <bcmdevs.h>
-#include <hndsoc.h>
-
-#define NVR_MSG(x)
-
-typedef struct _vars {
- struct _vars *next;
- int bufsz; /* allocated size */
- int size; /* actual vars size */
- char *vars;
-} vars_t;
-
-#define VARS_T_OH sizeof(vars_t)
-
-static vars_t *vars;
-
-#define NVRAM_FILE 1
-
-static char *findvar(char *vars, char *lim, const char *name);
-
-int nvram_init(void)
-{
-
- /* Make sure we read nvram in flash just once before freeing the memory */
- if (vars != NULL) {
- NVR_MSG(("nvram_init: called again without calling nvram_exit()\n"));
- return 0;
- }
- return 0;
-}
-
-int nvram_append(char *varlst, uint varsz)
-{
- uint bufsz = VARS_T_OH;
- vars_t *new;
-
- new = kmalloc(bufsz, GFP_ATOMIC);
- if (new == NULL)
- return -ENOMEM;
-
- new->vars = varlst;
- new->bufsz = bufsz;
- new->size = varsz;
- new->next = vars;
- vars = new;
-
- return 0;
-}
-
-void nvram_exit(void)
-{
- vars_t *this, *next;
-
- this = vars;
- if (this)
- kfree(this->vars);
-
- while (this) {
- next = this->next;
- kfree(this);
- this = next;
- }
- vars = NULL;
-}
-
-static char *findvar(char *vars, char *lim, const char *name)
-{
- char *s;
- int len;
-
- len = strlen(name);
-
- for (s = vars; (s < lim) && *s;) {
- if ((memcmp(s, name, len) == 0) && (s[len] == '='))
- return &s[len + 1];
-
- while (*s++)
- ;
- }
-
- return NULL;
-}
-
-/*
- * Search the name=value vars for a specific one and return its value.
- * Returns NULL if not found.
- */
-char *getvar(char *vars, const char *name)
-{
- char *s;
- int len;
-
- if (!name)
- return NULL;
-
- len = strlen(name);
- if (len == 0)
- return NULL;
-
- /* first look in vars[] */
- for (s = vars; s && *s;) {
- if ((memcmp(s, name, len) == 0) && (s[len] == '='))
- return &s[len + 1];
-
- while (*s++)
- ;
- }
- /* then query nvram */
- return nvram_get(name);
-}
-
-/*
- * Search the vars for a specific one and return its value as
- * an integer. Returns 0 if not found.
- */
-int getintvar(char *vars, const char *name)
-{
- char *val;
-
- val = getvar(vars, name);
- if (val == NULL)
- return 0;
-
- return simple_strtoul(val, NULL, 0);
-}
-
-char *nvram_get(const char *name)
-{
- char *v = NULL;
- vars_t *cur;
-
- for (cur = vars; cur; cur = cur->next) {
- v = findvar(cur->vars, cur->vars + cur->size, name);
- if (v)
- break;
- }
-
- return v;
-}
-
-int nvram_set(const char *name, const char *value)
-{
- return 0;
-}
-
-int nvram_unset(const char *name)
-{
- return 0;
-}
-
-int nvram_reset(void)
-{
- return 0;
-}
-
-int nvram_commit(void)
-{
- return 0;
-}
-
-int nvram_getall(char *buf, int count)
-{
- int len, resid = count;
- vars_t *this;
-
- this = vars;
- while (this) {
- char *from, *lim, *to;
- int acc;
-
- from = this->vars;
- lim = (char *)(this->vars + this->size);
- to = buf;
- acc = 0;
- while ((from < lim) && (*from)) {
- len = strlen(from) + 1;
- if (resid < (acc + len))
- return -EOVERFLOW;
- memcpy(to, from, len);
- acc += len;
- from += len;
- to += len;
- }
-
- resid -= acc;
- buf += acc;
- this = this->next;
- }
- if (resid < 1)
- return -EOVERFLOW;
- *buf = '\0';
- return 0;
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/otp.c b/drivers/staging/brcm80211/brcmsmac/otp.c
new file mode 100644
index 00000000000..34253cf3781
--- /dev/null
+++ b/drivers/staging/brcm80211/brcmsmac/otp.c
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+
+#include <brcm_hw_ids.h>
+#include <chipcommon.h>
+#include "aiutils.h"
+#include "otp.h"
+
+#define OTPS_GUP_MASK 0x00000f00
+#define OTPS_GUP_SHIFT 8
+#define OTPS_GUP_HW 0x00000100 /* h/w subregion is programmed */
+#define OTPS_GUP_SW 0x00000200 /* s/w subregion is programmed */
+#define OTPS_GUP_CI 0x00000400 /* chipid/pkgopt subregion is programmed */
+#define OTPS_GUP_FUSE 0x00000800 /* fuse subregion is programmed */
+
+/* Fields in otpprog in rev >= 21 */
+#define OTPP_COL_MASK 0x000000ff
+#define OTPP_COL_SHIFT 0
+#define OTPP_ROW_MASK 0x0000ff00
+#define OTPP_ROW_SHIFT 8
+#define OTPP_OC_MASK 0x0f000000
+#define OTPP_OC_SHIFT 24
+#define OTPP_READERR 0x10000000
+#define OTPP_VALUE_MASK 0x20000000
+#define OTPP_VALUE_SHIFT 29
+#define OTPP_START_BUSY 0x80000000
+#define OTPP_READ 0x40000000
+
+/* Opcodes for OTPP_OC field */
+#define OTPPOC_READ 0
+#define OTPPOC_BIT_PROG 1
+#define OTPPOC_VERIFY 3
+#define OTPPOC_INIT 4
+#define OTPPOC_SET 5
+#define OTPPOC_RESET 6
+#define OTPPOC_OCST 7
+#define OTPPOC_ROW_LOCK 8
+#define OTPPOC_PRESCN_TEST 9
+
+#define OTPTYPE_IPX(ccrev) ((ccrev) == 21 || (ccrev) >= 23)
+
+#define OTPP_TRIES 10000000 /* # of tries for OTPP */
+
+#define MAXNUMRDES 9 /* Maximum OTP redundancy entries */
+
+/* OTP common function type */
+typedef int (*otp_status_t) (void *oh);
+typedef int (*otp_size_t) (void *oh);
+typedef void *(*otp_init_t) (struct si_pub *sih);
+typedef u16(*otp_read_bit_t) (void *oh, chipcregs_t *cc, uint off);
+typedef int (*otp_read_region_t) (struct si_pub *sih, int region, u16 *data,
+ uint *wlen);
+typedef int (*otp_nvread_t) (void *oh, char *data, uint *len);
+
+/* OTP function struct */
+struct otp_fn_s {
+ otp_size_t size;
+ otp_read_bit_t read_bit;
+ otp_init_t init;
+ otp_read_region_t read_region;
+ otp_nvread_t nvread;
+ otp_status_t status;
+};
+
+struct otpinfo {
+ uint ccrev; /* chipc revision */
+ struct otp_fn_s *fn; /* OTP functions */
+ struct si_pub *sih; /* Saved sb handle */
+
+ /* IPX OTP section */
+ u16 wsize; /* Size of otp in words */
+ u16 rows; /* Geometry */
+ u16 cols; /* Geometry */
+ u32 status; /* Flag bits (lock/prog/rv).
+ * (Reflected only when OTP is power cycled)
+ */
+ u16 hwbase; /* hardware subregion offset */
+ u16 hwlim; /* hardware subregion boundary */
+ u16 swbase; /* software subregion offset */
+ u16 swlim; /* software subregion boundary */
+ u16 fbase; /* fuse subregion offset */
+ u16 flim; /* fuse subregion boundary */
+ int otpgu_base; /* offset to General Use Region */
+};
+
+static struct otpinfo otpinfo;
+
+/*
+ * IPX OTP Code
+ *
+ * Exported functions:
+ * ipxotp_status()
+ * ipxotp_size()
+ * ipxotp_init()
+ * ipxotp_read_bit()
+ * ipxotp_read_region()
+ * ipxotp_nvread()
+ *
+ */
+
+#define HWSW_RGN(rgn) (((rgn) == OTP_HW_RGN) ? "h/w" : "s/w")
+
+/* OTP layout */
+/* CC revs 21, 24 and 27 OTP General Use Region word offset */
+#define REVA4_OTPGU_BASE 12
+
+/* CC revs 23, 25, 26, 28 and above OTP General Use Region word offset */
+#define REVB8_OTPGU_BASE 20
+
+/* CC rev 36 OTP General Use Region word offset */
+#define REV36_OTPGU_BASE 12
+
+/* Subregion word offsets in General Use region */
+#define OTPGU_HSB_OFF 0
+#define OTPGU_SFB_OFF 1
+#define OTPGU_CI_OFF 2
+#define OTPGU_P_OFF 3
+#define OTPGU_SROM_OFF 4
+
+/* Flag bit offsets in General Use region */
+#define OTPGU_HWP_OFF 60
+#define OTPGU_SWP_OFF 61
+#define OTPGU_CIP_OFF 62
+#define OTPGU_FUSEP_OFF 63
+#define OTPGU_CIP_MSK 0x4000
+#define OTPGU_P_MSK 0xf000
+#define OTPGU_P_SHIFT (OTPGU_HWP_OFF % 16)
+
+/* OTP Size */
+#define OTP_SZ_FU_324 ((roundup(324, 8))/8) /* 324 bits */
+#define OTP_SZ_FU_288 (288/8) /* 288 bits */
+#define OTP_SZ_FU_216 (216/8) /* 216 bits */
+#define OTP_SZ_FU_72 (72/8) /* 72 bits */
+#define OTP_SZ_CHECKSUM (16/8) /* 16 bits */
+#define OTP4315_SWREG_SZ 178 /* 178 bytes */
+#define OTP_SZ_FU_144 (144/8) /* 144 bits */
+
+static int ipxotp_status(void *oh)
+{
+ struct otpinfo *oi = (struct otpinfo *) oh;
+ return (int)(oi->status);
+}
+
+/* Return size in bytes */
+static int ipxotp_size(void *oh)
+{
+ struct otpinfo *oi = (struct otpinfo *) oh;
+ return (int)oi->wsize * 2;
+}
+
+static u16 ipxotp_otpr(void *oh, chipcregs_t *cc, uint wn)
+{
+ struct otpinfo *oi;
+
+ oi = (struct otpinfo *) oh;
+
+ return R_REG(&cc->sromotp[wn]);
+}
+
+static u16 ipxotp_read_bit(void *oh, chipcregs_t *cc, uint off)
+{
+ struct otpinfo *oi = (struct otpinfo *) oh;
+ uint k, row, col;
+ u32 otpp, st;
+
+ row = off / oi->cols;
+ col = off % oi->cols;
+
+ otpp = OTPP_START_BUSY |
+ ((OTPPOC_READ << OTPP_OC_SHIFT) & OTPP_OC_MASK) |
+ ((row << OTPP_ROW_SHIFT) & OTPP_ROW_MASK) |
+ ((col << OTPP_COL_SHIFT) & OTPP_COL_MASK);
+ W_REG(&cc->otpprog, otpp);
+
+ for (k = 0;
+ ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
+ && (k < OTPP_TRIES); k++)
+ ;
+ if (k >= OTPP_TRIES) {
+ return 0xffff;
+ }
+ if (st & OTPP_READERR) {
+ return 0xffff;
+ }
+ st = (st & OTPP_VALUE_MASK) >> OTPP_VALUE_SHIFT;
+
+ return (int)st;
+}
+
+/* Calculate max HW/SW region byte size by subtracting fuse region and checksum size,
+ * osizew is oi->wsize (OTP size - GU size) in words
+ */
+static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
+{
+ int ret = 0;
+
+ switch (sih->chip) {
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
+ break;
+ case BCM4313_CHIP_ID:
+ ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
+ break;
+ default:
+ break; /* Don't know about this chip */
+ }
+
+ return ret;
+}
+
+static void _ipxotp_init(struct otpinfo *oi, chipcregs_t *cc)
+{
+ uint k;
+ u32 otpp, st;
+
+ /* record word offset of General Use Region for various chipcommon revs */
+ if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
+ || oi->sih->ccrev == 27) {
+ oi->otpgu_base = REVA4_OTPGU_BASE;
+ } else if (oi->sih->ccrev == 36) {
+ /* OTP size greater than equal to 2KB (128 words), otpgu_base is similar to rev23 */
+ if (oi->wsize >= 128)
+ oi->otpgu_base = REVB8_OTPGU_BASE;
+ else
+ oi->otpgu_base = REV36_OTPGU_BASE;
+ } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) {
+ oi->otpgu_base = REVB8_OTPGU_BASE;
+ }
+
+ /* First issue an init command so the status is up to date */
+ otpp =
+ OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
+
+ W_REG(&cc->otpprog, otpp);
+ for (k = 0;
+ ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
+ && (k < OTPP_TRIES); k++)
+ ;
+ if (k >= OTPP_TRIES) {
+ return;
+ }
+
+ /* Read OTP lock bits and subregion programmed indication bits */
+ oi->status = R_REG(&cc->otpstatus);
+
+ if ((oi->sih->chip == BCM43224_CHIP_ID)
+ || (oi->sih->chip == BCM43225_CHIP_ID)) {
+ u32 p_bits;
+ p_bits =
+ (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
+ OTPGU_P_MSK)
+ >> OTPGU_P_SHIFT;
+ oi->status |= (p_bits << OTPS_GUP_SHIFT);
+ }
+
+ /*
+ * h/w region base and fuse region limit are fixed to the top and
+ * the bottom of the general use region. Everything else can be flexible.
+ */
+ oi->hwbase = oi->otpgu_base + OTPGU_SROM_OFF;
+ oi->hwlim = oi->wsize;
+ if (oi->status & OTPS_GUP_HW) {
+ oi->hwlim =
+ ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
+ oi->swbase = oi->hwlim;
+ } else
+ oi->swbase = oi->hwbase;
+
+ /* subtract fuse and checksum from beginning */
+ oi->swlim = ipxotp_max_rgnsz(oi->sih, oi->wsize) / 2;
+
+ if (oi->status & OTPS_GUP_SW) {
+ oi->swlim =
+ ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
+ oi->fbase = oi->swlim;
+ } else
+ oi->fbase = oi->swbase;
+
+ oi->flim = oi->wsize;
+}
+
+static void *ipxotp_init(struct si_pub *sih)
+{
+ uint idx;
+ chipcregs_t *cc;
+ struct otpinfo *oi;
+
+ /* Make sure we're running IPX OTP */
+ if (!OTPTYPE_IPX(sih->ccrev))
+ return NULL;
+
+ /* Make sure OTP is not disabled */
+ if (ai_is_otp_disabled(sih))
+ return NULL;
+
+ /* OTP is always powered */
+ oi = &otpinfo;
+
+ /* Check for otp size */
+ switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
+ case 0:
+ /* Nothing there */
+ return NULL;
+ case 1: /* 32x64 */
+ oi->rows = 32;
+ oi->cols = 64;
+ oi->wsize = 128;
+ break;
+ case 2: /* 64x64 */
+ oi->rows = 64;
+ oi->cols = 64;
+ oi->wsize = 256;
+ break;
+ case 5: /* 96x64 */
+ oi->rows = 96;
+ oi->cols = 64;
+ oi->wsize = 384;
+ break;
+ case 7: /* 16x64 *//* 1024 bits */
+ oi->rows = 16;
+ oi->cols = 64;
+ oi->wsize = 64;
+ break;
+ default:
+ /* Don't know the geometry */
+ return NULL;
+ }
+
+ /* Retrieve OTP region info */
+ idx = ai_coreidx(sih);
+ cc = ai_setcoreidx(sih, SI_CC_IDX);
+
+ _ipxotp_init(oi, cc);
+
+ ai_setcoreidx(sih, idx);
+
+ return (void *)oi;
+}
+
+static int ipxotp_read_region(void *oh, int region, u16 *data, uint *wlen)
+{
+ struct otpinfo *oi = (struct otpinfo *) oh;
+ uint idx;
+ chipcregs_t *cc;
+ uint base, i, sz;
+
+ /* Validate region selection */
+ switch (region) {
+ case OTP_HW_RGN:
+ sz = (uint) oi->hwlim - oi->hwbase;
+ if (!(oi->status & OTPS_GUP_HW)) {
+ *wlen = sz;
+ return -ENODATA;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return -EOVERFLOW;
+ }
+ base = oi->hwbase;
+ break;
+ case OTP_SW_RGN:
+ sz = ((uint) oi->swlim - oi->swbase);
+ if (!(oi->status & OTPS_GUP_SW)) {
+ *wlen = sz;
+ return -ENODATA;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return -EOVERFLOW;
+ }
+ base = oi->swbase;
+ break;
+ case OTP_CI_RGN:
+ sz = OTPGU_CI_SZ;
+ if (!(oi->status & OTPS_GUP_CI)) {
+ *wlen = sz;
+ return -ENODATA;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return -EOVERFLOW;
+ }
+ base = oi->otpgu_base + OTPGU_CI_OFF;
+ break;
+ case OTP_FUSE_RGN:
+ sz = (uint) oi->flim - oi->fbase;
+ if (!(oi->status & OTPS_GUP_FUSE)) {
+ *wlen = sz;
+ return -ENODATA;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return -EOVERFLOW;
+ }
+ base = oi->fbase;
+ break;
+ case OTP_ALL_RGN:
+ sz = ((uint) oi->flim - oi->hwbase);
+ if (!(oi->status & (OTPS_GUP_HW | OTPS_GUP_SW))) {
+ *wlen = sz;
+ return -ENODATA;
+ }
+ if (*wlen < sz) {
+ *wlen = sz;
+ return -EOVERFLOW;
+ }
+ base = oi->hwbase;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ idx = ai_coreidx(oi->sih);
+ cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
+
+ /* Read the data */
+ for (i = 0; i < sz; i++)
+ data[i] = ipxotp_otpr(oh, cc, base + i);
+
+ ai_setcoreidx(oi->sih, idx);
+ *wlen = sz;
+ return 0;
+}
+
+static int ipxotp_nvread(void *oh, char *data, uint *len)
+{
+ return -ENOTSUPP;
+}
+
+static struct otp_fn_s ipxotp_fn = {
+ (otp_size_t) ipxotp_size,
+ (otp_read_bit_t) ipxotp_read_bit,
+
+ (otp_init_t) ipxotp_init,
+ (otp_read_region_t) ipxotp_read_region,
+ (otp_nvread_t) ipxotp_nvread,
+
+ (otp_status_t) ipxotp_status
+};
+
+/*
+ * otp_status()
+ * otp_size()
+ * otp_read_bit()
+ * otp_init()
+ * otp_read_region()
+ * otp_nvread()
+ */
+
+int otp_status(void *oh)
+{
+ struct otpinfo *oi = (struct otpinfo *) oh;
+
+ return oi->fn->status(oh);
+}
+
+int otp_size(void *oh)
+{
+ struct otpinfo *oi = (struct otpinfo *) oh;
+
+ return oi->fn->size(oh);
+}
+
+u16 otp_read_bit(void *oh, uint offset)
+{
+ struct otpinfo *oi = (struct otpinfo *) oh;
+ uint idx = ai_coreidx(oi->sih);
+ chipcregs_t *cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
+ u16 readBit = (u16) oi->fn->read_bit(oh, cc, offset);
+ ai_setcoreidx(oi->sih, idx);
+ return readBit;
+}
+
+void *otp_init(struct si_pub *sih)
+{
+ struct otpinfo *oi;
+ void *ret = NULL;
+
+ oi = &otpinfo;
+ memset(oi, 0, sizeof(struct otpinfo));
+
+ oi->ccrev = sih->ccrev;
+
+ if (OTPTYPE_IPX(oi->ccrev))
+ oi->fn = &ipxotp_fn;
+
+ if (oi->fn == NULL) {
+ return NULL;
+ }
+
+ oi->sih = sih;
+
+ ret = (oi->fn->init) (sih);
+
+ return ret;
+}
+
+int
+otp_read_region(struct si_pub *sih, int region, u16 *data,
+ uint *wlen) {
+ void *oh;
+ int err = 0;
+
+ if (ai_is_otp_disabled(sih)) {
+ err = -EPERM;
+ goto out;
+ }
+
+ oh = otp_init(sih);
+ if (oh == NULL) {
+ err = -EBADE;
+ goto out;
+ }
+
+ err = (((struct otpinfo *) oh)->fn->read_region)
+ (oh, region, data, wlen);
+
+ out:
+ return err;
+}
+
+int otp_nvread(void *oh, char *data, uint *len)
+{
+ struct otpinfo *oi = (struct otpinfo *) oh;
+
+ return oi->fn->nvread(oh, data, len);
+}
diff --git a/drivers/staging/brcm80211/include/bcmotp.h b/drivers/staging/brcm80211/brcmsmac/otp.h
index 5803accaa47..f6d3a56acf1 100644
--- a/drivers/staging/brcm80211/include/bcmotp.h
+++ b/drivers/staging/brcm80211/brcmsmac/otp.h
@@ -14,8 +14,10 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _bcmotp_h_
-#define _bcmotp_h_
+#ifndef _BRCM_OTP_H_
+#define _BRCM_OTP_H_
+
+#include "types.h"
/* OTP regions */
#define OTP_HW_RGN 1
@@ -37,8 +39,9 @@
extern int otp_status(void *oh);
extern int otp_size(void *oh);
extern u16 otp_read_bit(void *oh, uint offset);
-extern void *otp_init(si_t *sih);
-extern int otp_read_region(si_t *sih, int region, u16 *data, uint *wlen);
+extern void *otp_init(struct si_pub *sih);
+extern int otp_read_region(struct si_pub *sih, int region, u16 *data,
+ uint *wlen);
extern int otp_nvread(void *oh, char *data, uint *len);
-#endif /* _bcmotp_h_ */
+#endif /* _BRCM_OTP_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_cmn.c b/drivers/staging/brcm80211/brcmsmac/phy/phy_cmn.c
index 6cba4dfbc3d..17012fbe9c9 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_cmn.c
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -14,35 +14,27 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <wlc_cfg.h>
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/bitops.h>
#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <bcmdefs.h>
-#include <bcmnvram.h>
-#include <sbchipc.h>
-#include <bcmdevs.h>
-#include <sbhnddma.h>
-#include <wlc_phy_int.h>
-#include <wlc_phyreg_n.h>
-#include <wlc_phy_radio.h>
-#include <wlc_phy_lcn.h>
+#include <brcm_hw_ids.h>
+#include <chipcommon.h>
+#include <aiutils.h>
+#include <d11.h>
+#include <phy_shim.h>
+#include "phy_hal.h"
+#include "phy_int.h"
+#include "phy_radio.h"
+#include "phy_lcn.h"
+#include "phyreg_n.h"
u32 phyhal_msg_level = PHYHAL_ERROR;
-typedef struct _chan_info_basic {
+struct chan_info_basic {
u16 chan;
u16 freq;
-} chan_info_basic_t;
-
-static chan_info_basic_t chan_info_all[] = {
+};
+static struct chan_info_basic chan_info_all[] = {
{1, 2412},
{2, 2417},
{3, 2422},
@@ -117,41 +109,43 @@ u16 ltrn_list[PHY_LTRN_LIST_LEN] = {
const u8 ofdm_rate_lookup[] = {
- WLC_RATE_48M,
- WLC_RATE_24M,
- WLC_RATE_12M,
- WLC_RATE_6M,
- WLC_RATE_54M,
- WLC_RATE_36M,
- WLC_RATE_18M,
- WLC_RATE_9M
+ BRCM_RATE_48M,
+ BRCM_RATE_24M,
+ BRCM_RATE_12M,
+ BRCM_RATE_6M,
+ BRCM_RATE_54M,
+ BRCM_RATE_36M,
+ BRCM_RATE_18M,
+ BRCM_RATE_9M
};
#define PHY_WREG_LIMIT 24
-static void wlc_set_phy_uninitted(phy_info_t *pi);
-static u32 wlc_phy_get_radio_ver(phy_info_t *pi);
+static void wlc_set_phy_uninitted(struct brcms_phy *pi);
+static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi);
static void wlc_phy_timercb_phycal(void *arg);
-static bool wlc_phy_noise_calc_phy(phy_info_t *pi, u32 *cmplx_pwr,
+static bool wlc_phy_noise_calc_phy(struct brcms_phy *pi, u32 *cmplx_pwr,
s8 *pwr_ant);
-static void wlc_phy_cal_perical_mphase_schedule(phy_info_t *pi, uint delay);
-static void wlc_phy_noise_cb(phy_info_t *pi, u8 channel, s8 noise_dbm);
-static void wlc_phy_noise_sample_request(wlc_phy_t *pih, u8 reason,
+static void wlc_phy_cal_perical_mphase_schedule(struct brcms_phy *pi,
+ uint delay);
+
+static void wlc_phy_noise_cb(struct brcms_phy *pi, u8 channel, s8 noise_dbm);
+static void wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason,
u8 ch);
-static void wlc_phy_txpower_reg_limit_calc(phy_info_t *pi,
+static void wlc_phy_txpower_reg_limit_calc(struct brcms_phy *pi,
struct txpwr_limits *tp, chanspec_t);
-static bool wlc_phy_cal_txpower_recalc_sw(phy_info_t *pi);
+static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi);
-static s8 wlc_user_txpwr_antport_to_rfport(phy_info_t *pi, uint chan,
+static s8 wlc_user_txpwr_antport_to_rfport(struct brcms_phy *pi, uint chan,
u32 band, u8 rate);
-static void wlc_phy_upd_env_txpwr_rate_limits(phy_info_t *pi, u32 band);
-static s8 wlc_phy_env_measure_vbat(phy_info_t *pi);
-static s8 wlc_phy_env_measure_temperature(phy_info_t *pi);
+static void wlc_phy_upd_env_txpwr_rate_limits(struct brcms_phy *pi, u32 band);
+static s8 wlc_phy_env_measure_vbat(struct brcms_phy *pi);
+static s8 wlc_phy_env_measure_temperature(struct brcms_phy *pi);
-char *phy_getvar(phy_info_t *pi, const char *name)
+char *phy_getvar(struct brcms_phy *pi, const char *name)
{
char *vars = pi->vars;
char *s;
@@ -172,10 +166,10 @@ char *phy_getvar(phy_info_t *pi, const char *name)
;
}
- return nvram_get(name);
+ return NULL;
}
-int phy_getintvar(phy_info_t *pi, const char *name)
+int phy_getintvar(struct brcms_phy *pi, const char *name)
{
char *val;
@@ -186,29 +180,29 @@ int phy_getintvar(phy_info_t *pi, const char *name)
return simple_strtoul(val, NULL, 0);
}
-void wlc_phyreg_enter(wlc_phy_t *pih)
+void wlc_phyreg_enter(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
wlapi_bmac_ucode_wake_override_phyreg_set(pi->sh->physhim);
}
-void wlc_phyreg_exit(wlc_phy_t *pih)
+void wlc_phyreg_exit(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
wlapi_bmac_ucode_wake_override_phyreg_clear(pi->sh->physhim);
}
-void wlc_radioreg_enter(wlc_phy_t *pih)
+void wlc_radioreg_enter(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, MCTL_LOCK_RADIO);
udelay(10);
}
-void wlc_radioreg_exit(wlc_phy_t *pih)
+void wlc_radioreg_exit(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
volatile u16 dummy;
dummy = R_REG(&pi->regs->phyversion);
@@ -216,7 +210,7 @@ void wlc_radioreg_exit(wlc_phy_t *pih)
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0);
}
-u16 read_radio_reg(phy_info_t *pi, u16 addr)
+u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
{
u16 data;
@@ -247,16 +241,10 @@ u16 read_radio_reg(phy_info_t *pi, u16 addr)
if ((D11REV_GE(pi->sh->corerev, 24)) ||
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
- W_REG(&pi->regs->radioregaddr, addr);
-#ifdef __mips__
- (void)R_REG(&pi->regs->radioregaddr);
-#endif
+ W_REG_FLUSH(&pi->regs->radioregaddr, addr);
data = R_REG(&pi->regs->radioregdata);
} else {
- W_REG(&pi->regs->phy4waddr, addr);
-#ifdef __mips__
- (void)R_REG(&pi->regs->phy4waddr);
-#endif
+ W_REG_FLUSH(&pi->regs->phy4waddr, addr);
#ifdef __ARM_ARCH_4T__
__asm__(" .align 4 ");
@@ -272,7 +260,7 @@ u16 read_radio_reg(phy_info_t *pi, u16 addr)
return data;
}
-void write_radio_reg(phy_info_t *pi, u16 addr, u16 val)
+void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
if (NORADIO_ENAB(pi->pubpi))
return;
@@ -281,16 +269,10 @@ void write_radio_reg(phy_info_t *pi, u16 addr, u16 val)
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
- W_REG(&pi->regs->radioregaddr, addr);
-#ifdef __mips__
- (void)R_REG(&pi->regs->radioregaddr);
-#endif
+ W_REG_FLUSH(&pi->regs->radioregaddr, addr);
W_REG(&pi->regs->radioregdata, val);
} else {
- W_REG(&pi->regs->phy4waddr, addr);
-#ifdef __mips__
- (void)R_REG(&pi->regs->phy4waddr);
-#endif
+ W_REG_FLUSH(&pi->regs->phy4waddr, addr);
W_REG(&pi->regs->phy4wdatalo, val);
}
@@ -302,7 +284,7 @@ void write_radio_reg(phy_info_t *pi, u16 addr, u16 val)
}
}
-static u32 read_radio_id(phy_info_t *pi)
+static u32 read_radio_id(struct brcms_phy *pi)
{
u32 id;
@@ -312,29 +294,17 @@ static u32 read_radio_id(phy_info_t *pi)
if (D11REV_GE(pi->sh->corerev, 24)) {
u32 b0, b1, b2;
- W_REG(&pi->regs->radioregaddr, 0);
-#ifdef __mips__
- (void)R_REG(&pi->regs->radioregaddr);
-#endif
+ W_REG_FLUSH(&pi->regs->radioregaddr, 0);
b0 = (u32) R_REG(&pi->regs->radioregdata);
- W_REG(&pi->regs->radioregaddr, 1);
-#ifdef __mips__
- (void)R_REG(&pi->regs->radioregaddr);
-#endif
+ W_REG_FLUSH(&pi->regs->radioregaddr, 1);
b1 = (u32) R_REG(&pi->regs->radioregdata);
- W_REG(&pi->regs->radioregaddr, 2);
-#ifdef __mips__
- (void)R_REG(&pi->regs->radioregaddr);
-#endif
+ W_REG_FLUSH(&pi->regs->radioregaddr, 2);
b2 = (u32) R_REG(&pi->regs->radioregdata);
id = ((b0 & 0xf) << 28) | (((b2 << 8) | b1) << 12) | ((b0 >> 4)
& 0xf);
} else {
- W_REG(&pi->regs->phy4waddr, RADIO_IDCODE);
-#ifdef __mips__
- (void)R_REG(&pi->regs->phy4waddr);
-#endif
+ W_REG_FLUSH(&pi->regs->phy4waddr, RADIO_IDCODE);
id = (u32) R_REG(&pi->regs->phy4wdatalo);
id |= (u32) R_REG(&pi->regs->phy4wdatahi) << 16;
}
@@ -342,7 +312,7 @@ static u32 read_radio_id(phy_info_t *pi)
return id;
}
-void and_radio_reg(phy_info_t *pi, u16 addr, u16 val)
+void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
u16 rval;
@@ -353,7 +323,7 @@ void and_radio_reg(phy_info_t *pi, u16 addr, u16 val)
write_radio_reg(pi, addr, (rval & val));
}
-void or_radio_reg(phy_info_t *pi, u16 addr, u16 val)
+void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
u16 rval;
@@ -364,7 +334,7 @@ void or_radio_reg(phy_info_t *pi, u16 addr, u16 val)
write_radio_reg(pi, addr, (rval | val));
}
-void xor_radio_reg(phy_info_t *pi, u16 addr, u16 mask)
+void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask)
{
u16 rval;
@@ -375,7 +345,7 @@ void xor_radio_reg(phy_info_t *pi, u16 addr, u16 mask)
write_radio_reg(pi, addr, (rval ^ mask));
}
-void mod_radio_reg(phy_info_t *pi, u16 addr, u16 mask, u16 val)
+void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
{
u16 rval;
@@ -386,35 +356,31 @@ void mod_radio_reg(phy_info_t *pi, u16 addr, u16 mask, u16 val)
write_radio_reg(pi, addr, (rval & ~mask) | (val & mask));
}
-void write_phy_channel_reg(phy_info_t *pi, uint val)
+void write_phy_channel_reg(struct brcms_phy *pi, uint val)
{
W_REG(&pi->regs->phychannel, val);
}
-u16 read_phy_reg(phy_info_t *pi, u16 addr)
+u16 read_phy_reg(struct brcms_phy *pi, u16 addr)
{
d11regs_t *regs;
regs = pi->regs;
- W_REG(&regs->phyregaddr, addr);
-#ifdef __mips__
- (void)R_REG(&regs->phyregaddr);
-#endif
+ W_REG_FLUSH(&regs->phyregaddr, addr);
pi->phy_wreg = 0;
return R_REG(&regs->phyregdata);
}
-void write_phy_reg(phy_info_t *pi, u16 addr, u16 val)
+void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
d11regs_t *regs;
regs = pi->regs;
#ifdef __mips__
- W_REG(&regs->phyregaddr, addr);
- (void)R_REG(&regs->phyregaddr);
+ W_REG_FLUSH(&regs->phyregaddr, addr);
W_REG(&regs->phyregdata, val);
if (addr == 0x72)
(void)R_REG(&regs->phyregdata);
@@ -430,53 +396,44 @@ void write_phy_reg(phy_info_t *pi, u16 addr, u16 val)
#endif
}
-void and_phy_reg(phy_info_t *pi, u16 addr, u16 val)
+void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
d11regs_t *regs;
regs = pi->regs;
- W_REG(&regs->phyregaddr, addr);
-#ifdef __mips__
- (void)R_REG(&regs->phyregaddr);
-#endif
+ W_REG_FLUSH(&regs->phyregaddr, addr);
W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) & val));
pi->phy_wreg = 0;
}
-void or_phy_reg(phy_info_t *pi, u16 addr, u16 val)
+void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
d11regs_t *regs;
regs = pi->regs;
- W_REG(&regs->phyregaddr, addr);
-#ifdef __mips__
- (void)R_REG(&regs->phyregaddr);
-#endif
+ W_REG_FLUSH(&regs->phyregaddr, addr);
W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) | val));
pi->phy_wreg = 0;
}
-void mod_phy_reg(phy_info_t *pi, u16 addr, u16 mask, u16 val)
+void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
{
d11regs_t *regs;
regs = pi->regs;
- W_REG(&regs->phyregaddr, addr);
-#ifdef __mips__
- (void)R_REG(&regs->phyregaddr);
-#endif
+ W_REG_FLUSH(&regs->phyregaddr, addr);
W_REG(&regs->phyregdata,
((R_REG(&regs->phyregdata) & ~mask) | (val & mask)));
pi->phy_wreg = 0;
}
-static void WLBANDINITFN(wlc_set_phy_uninitted) (phy_info_t *pi)
+static void wlc_set_phy_uninitted(struct brcms_phy *pi)
{
int i, j;
@@ -513,11 +470,11 @@ static void WLBANDINITFN(wlc_set_phy_uninitted) (phy_info_t *pi)
}
}
-shared_phy_t *wlc_phy_shared_attach(shared_phy_params_t *shp)
+struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
{
- shared_phy_t *sh;
+ struct shared_phy *sh;
- sh = kzalloc(sizeof(shared_phy_t), GFP_ATOMIC);
+ sh = kzalloc(sizeof(struct shared_phy), GFP_ATOMIC);
if (sh == NULL) {
return NULL;
}
@@ -550,19 +507,14 @@ shared_phy_t *wlc_phy_shared_attach(shared_phy_params_t *shp)
return sh;
}
-void wlc_phy_shared_detach(shared_phy_t *phy_sh)
+struct brcms_phy_pub *
+wlc_phy_attach(struct shared_phy *sh, void *regs, int bandtype,
+ char *vars, struct wiphy *wiphy)
{
- if (phy_sh) {
- kfree(phy_sh);
- }
-}
-
-wlc_phy_t *wlc_phy_attach(shared_phy_t *sh, void *regs, int bandtype,
- char *vars, struct wiphy *wiphy)
-{
- phy_info_t *pi;
+ struct brcms_phy *pi;
u32 sflags = 0;
uint phyversion;
+ u32 idcode;
int i;
if (D11REV_IS(sh->corerev, 4))
@@ -584,7 +536,7 @@ wlc_phy_t *wlc_phy_attach(shared_phy_t *sh, void *regs, int bandtype,
return &pi->pubpi_ro;
}
- pi = kzalloc(sizeof(phy_info_t), GFP_ATOMIC);
+ pi = kzalloc(sizeof(struct brcms_phy), GFP_ATOMIC);
if (pi == NULL) {
return NULL;
}
@@ -633,27 +585,19 @@ wlc_phy_t *wlc_phy_attach(shared_phy_t *sh, void *regs, int bandtype,
}
}
- if (ISSIM_ENAB(pi->sh->sih)) {
- pi->pubpi.radioid = NORADIO_ID;
- pi->pubpi.radiorev = 5;
- } else {
- u32 idcode;
-
- wlc_phy_anacore((wlc_phy_t *) pi, ON);
-
- idcode = wlc_phy_get_radio_ver(pi);
- pi->pubpi.radioid =
- (idcode & IDCODE_ID_MASK) >> IDCODE_ID_SHIFT;
- pi->pubpi.radiorev =
- (idcode & IDCODE_REV_MASK) >> IDCODE_REV_SHIFT;
- pi->pubpi.radiover =
- (idcode & IDCODE_VER_MASK) >> IDCODE_VER_SHIFT;
- if (!VALID_RADIO(pi, pi->pubpi.radioid)) {
- goto err;
- }
+ wlc_phy_anacore((struct brcms_phy_pub *) pi, ON);
- wlc_phy_switch_radio((wlc_phy_t *) pi, OFF);
- }
+ idcode = wlc_phy_get_radio_ver(pi);
+ pi->pubpi.radioid =
+ (idcode & IDCODE_ID_MASK) >> IDCODE_ID_SHIFT;
+ pi->pubpi.radiorev =
+ (idcode & IDCODE_REV_MASK) >> IDCODE_REV_SHIFT;
+ pi->pubpi.radiover =
+ (idcode & IDCODE_VER_MASK) >> IDCODE_VER_SHIFT;
+ if (!VALID_RADIO(pi, pi->pubpi.radioid))
+ goto err;
+
+ wlc_phy_switch_radio((struct brcms_phy_pub *) pi, OFF);
wlc_set_phy_uninitted(pi);
@@ -692,9 +636,9 @@ wlc_phy_t *wlc_phy_attach(shared_phy_t *sh, void *regs, int bandtype,
pi->phynoise_polling = false;
for (i = 0; i < TXP_NUM_RATES; i++) {
- pi->txpwr_limit[i] = WLC_TXPWR_MAX;
- pi->txpwr_env_limit[i] = WLC_TXPWR_MAX;
- pi->tx_user_target[i] = WLC_TXPWR_MAX;
+ pi->txpwr_limit[i] = BRCMS_TXPWR_MAX;
+ pi->txpwr_env_limit[i] = BRCMS_TXPWR_MAX;
+ pi->tx_user_target[i] = BRCMS_TXPWR_MAX;
}
pi->radiopwr_override = RADIOPWR_OVERRIDE_DEF;
@@ -727,7 +671,7 @@ wlc_phy_t *wlc_phy_attach(shared_phy_t *sh, void *regs, int bandtype,
pi->vars = (char *)&pi->vars;
- memcpy(&pi->pubpi_ro, &pi->pubpi, sizeof(wlc_phy_t));
+ memcpy(&pi->pubpi_ro, &pi->pubpi, sizeof(struct brcms_phy_pub));
return &pi->pubpi_ro;
@@ -736,9 +680,9 @@ wlc_phy_t *wlc_phy_attach(shared_phy_t *sh, void *regs, int bandtype,
return NULL;
}
-void wlc_phy_detach(wlc_phy_t *pih)
+void wlc_phy_detach(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
if (pih) {
if (--pi->refcnt) {
@@ -763,10 +707,10 @@ void wlc_phy_detach(wlc_phy_t *pih)
}
bool
-wlc_phy_get_phyversion(wlc_phy_t *pih, u16 *phytype, u16 *phyrev,
+wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype, u16 *phyrev,
u16 *radioid, u16 *radiover)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
*phytype = (u16) pi->pubpi.phy_type;
*phyrev = (u16) pi->pubpi.phy_rev;
*radioid = pi->pubpi.radioid;
@@ -775,21 +719,21 @@ wlc_phy_get_phyversion(wlc_phy_t *pih, u16 *phytype, u16 *phyrev,
return true;
}
-bool wlc_phy_get_encore(wlc_phy_t *pih)
+bool wlc_phy_get_encore(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
return pi->pubpi.abgphy_encore;
}
-u32 wlc_phy_get_coreflags(wlc_phy_t *pih)
+u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
return pi->pubpi.coreflags;
}
static void wlc_phy_timercb_phycal(void *arg)
{
- phy_info_t *pi = (phy_info_t *) arg;
+ struct brcms_phy *pi = (struct brcms_phy *) arg;
uint delay = 5;
if (PHY_PERICAL_MPHASE_PENDING(pi)) {
@@ -810,9 +754,9 @@ static void wlc_phy_timercb_phycal(void *arg)
}
-void wlc_phy_anacore(wlc_phy_t *pih, bool on)
+void wlc_phy_anacore(struct brcms_phy_pub *pih, bool on)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
if (ISNPHY(pi)) {
if (on) {
@@ -847,9 +791,9 @@ void wlc_phy_anacore(wlc_phy_t *pih, bool on)
}
}
-u32 wlc_phy_clk_bwbits(wlc_phy_t *pih)
+u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
u32 phy_bw_clkbits = 0;
@@ -872,16 +816,16 @@ u32 wlc_phy_clk_bwbits(wlc_phy_t *pih)
return phy_bw_clkbits;
}
-void WLBANDINITFN(wlc_phy_por_inform) (wlc_phy_t *ppi)
+void wlc_phy_por_inform(struct brcms_phy_pub *ppi)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
pi->phy_init_por = true;
}
-void wlc_phy_edcrs_lock(wlc_phy_t *pih, bool lock)
+void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
pi->edcrs_threshold_lock = lock;
@@ -891,16 +835,16 @@ void wlc_phy_edcrs_lock(wlc_phy_t *pih, bool lock)
write_phy_reg(pi, 0x22f, 0x3c0);
}
-void wlc_phy_initcal_enable(wlc_phy_t *pih, bool initcal)
+void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
pi->do_initcal = initcal;
}
-void wlc_phy_hw_clk_state_upd(wlc_phy_t *pih, bool newstate)
+void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *pih, bool newstate)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
if (!pi || !pi->sh)
return;
@@ -908,9 +852,9 @@ void wlc_phy_hw_clk_state_upd(wlc_phy_t *pih, bool newstate)
pi->sh->clk = newstate;
}
-void wlc_phy_hw_state_upd(wlc_phy_t *pih, bool newstate)
+void wlc_phy_hw_state_upd(struct brcms_phy_pub *pih, bool newstate)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
if (!pi || !pi->sh)
return;
@@ -918,11 +862,11 @@ void wlc_phy_hw_state_upd(wlc_phy_t *pih, bool newstate)
pi->sh->up = newstate;
}
-void WLBANDINITFN(wlc_phy_init) (wlc_phy_t *pih, chanspec_t chanspec)
+void wlc_phy_init(struct brcms_phy_pub *pih, chanspec_t chanspec)
{
u32 mc;
initfn_t phy_init = NULL;
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
if (pi->init_in_progress)
return;
@@ -957,7 +901,7 @@ void WLBANDINITFN(wlc_phy_init) (wlc_phy_t *pih, chanspec_t chanspec)
pi->nphy_gain_boost = true;
- wlc_phy_switch_radio((wlc_phy_t *) pi, ON);
+ wlc_phy_switch_radio((struct brcms_phy_pub *) pi, ON);
(*phy_init) (pi);
@@ -969,14 +913,14 @@ void WLBANDINITFN(wlc_phy_init) (wlc_phy_t *pih, chanspec_t chanspec)
if (!(ISNPHY(pi)))
wlc_phy_txpower_update_shm(pi);
- wlc_phy_ant_rxdiv_set((wlc_phy_t *) pi, pi->sh->rx_antdiv);
+ wlc_phy_ant_rxdiv_set((struct brcms_phy_pub *) pi, pi->sh->rx_antdiv);
pi->init_in_progress = false;
}
-void wlc_phy_cal_init(wlc_phy_t *pih)
+void wlc_phy_cal_init(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
initfn_t cal_init = NULL;
if (WARN((R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) != 0,
@@ -992,9 +936,9 @@ void wlc_phy_cal_init(wlc_phy_t *pih)
}
}
-int wlc_phy_down(wlc_phy_t *pih)
+int wlc_phy_down(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
int callbacks = 0;
if (pi->phycal_timer
@@ -1007,7 +951,7 @@ int wlc_phy_down(wlc_phy_t *pih)
return callbacks;
}
-static u32 wlc_phy_get_radio_ver(phy_info_t *pi)
+static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi)
{
u32 ver;
@@ -1017,7 +961,7 @@ static u32 wlc_phy_get_radio_ver(phy_info_t *pi)
}
void
-wlc_phy_table_addr(phy_info_t *pi, uint tbl_id, uint tbl_offset,
+wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
u16 tblAddr, u16 tblDataHi, u16 tblDataLo)
{
write_phy_reg(pi, tblAddr, (tbl_id << 10) | tbl_offset);
@@ -1025,19 +969,17 @@ wlc_phy_table_addr(phy_info_t *pi, uint tbl_id, uint tbl_offset,
pi->tbl_data_hi = tblDataHi;
pi->tbl_data_lo = tblDataLo;
- if ((pi->sh->chip == BCM43224_CHIP_ID ||
- pi->sh->chip == BCM43421_CHIP_ID) &&
- (pi->sh->chiprev == 1)) {
+ if (pi->sh->chip == BCM43224_CHIP_ID &&
+ pi->sh->chiprev == 1) {
pi->tbl_addr = tblAddr;
pi->tbl_save_id = tbl_id;
pi->tbl_save_offset = tbl_offset;
}
}
-void wlc_phy_table_data_write(phy_info_t *pi, uint width, u32 val)
+void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val)
{
- if ((pi->sh->chip == BCM43224_CHIP_ID ||
- pi->sh->chip == BCM43421_CHIP_ID) &&
+ if ((pi->sh->chip == BCM43224_CHIP_ID) &&
(pi->sh->chiprev == 1) &&
(pi->tbl_save_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
read_phy_reg(pi, pi->tbl_data_lo);
@@ -1058,7 +1000,7 @@ void wlc_phy_table_data_write(phy_info_t *pi, uint width, u32 val)
}
void
-wlc_phy_write_table(phy_info_t *pi, const phytbl_info_t *ptbl_info,
+wlc_phy_write_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
u16 tblAddr, u16 tblDataHi, u16 tblDataLo)
{
uint idx;
@@ -1073,8 +1015,7 @@ wlc_phy_write_table(phy_info_t *pi, const phytbl_info_t *ptbl_info,
for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
- if ((pi->sh->chip == BCM43224_CHIP_ID ||
- pi->sh->chip == BCM43421_CHIP_ID) &&
+ if ((pi->sh->chip == BCM43224_CHIP_ID) &&
(pi->sh->chiprev == 1) &&
(tbl_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
read_phy_reg(pi, tblDataLo);
@@ -1099,7 +1040,7 @@ wlc_phy_write_table(phy_info_t *pi, const phytbl_info_t *ptbl_info,
}
void
-wlc_phy_read_table(phy_info_t *pi, const phytbl_info_t *ptbl_info,
+wlc_phy_read_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
u16 tblAddr, u16 tblDataHi, u16 tblDataLo)
{
uint idx;
@@ -1114,8 +1055,7 @@ wlc_phy_read_table(phy_info_t *pi, const phytbl_info_t *ptbl_info,
for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
- if ((pi->sh->chip == BCM43224_CHIP_ID ||
- pi->sh->chip == BCM43421_CHIP_ID) &&
+ if ((pi->sh->chip == BCM43224_CHIP_ID) &&
(pi->sh->chiprev == 1)) {
(void)read_phy_reg(pi, tblDataLo);
@@ -1138,7 +1078,8 @@ wlc_phy_read_table(phy_info_t *pi, const phytbl_info_t *ptbl_info,
}
uint
-wlc_phy_init_radio_regs_allbands(phy_info_t *pi, radio_20xx_regs_t *radioregs)
+wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
+ struct radio_20xx_regs *radioregs)
{
uint i = 0;
@@ -1155,7 +1096,7 @@ wlc_phy_init_radio_regs_allbands(phy_info_t *pi, radio_20xx_regs_t *radioregs)
}
uint
-wlc_phy_init_radio_regs(phy_info_t *pi, radio_regs_t *radioregs,
+wlc_phy_init_radio_regs(struct brcms_phy *pi, struct radio_regs *radioregs,
u16 core_offset)
{
uint i = 0;
@@ -1169,7 +1110,7 @@ wlc_phy_init_radio_regs(phy_info_t *pi, radio_regs_t *radioregs,
address | core_offset,
(u16) radioregs[i].init_a);
if (ISNPHY(pi) && (++count % 4 == 0))
- WLC_PHY_WAR_PR51571(pi);
+ BRCMS_PHY_WAR_PR51571(pi);
}
} else {
if (radioregs[i].do_init_g) {
@@ -1178,7 +1119,7 @@ wlc_phy_init_radio_regs(phy_info_t *pi, radio_regs_t *radioregs,
address | core_offset,
(u16) radioregs[i].init_g);
if (ISNPHY(pi) && (++count % 4 == 0))
- WLC_PHY_WAR_PR51571(pi);
+ BRCMS_PHY_WAR_PR51571(pi);
}
}
@@ -1188,7 +1129,7 @@ wlc_phy_init_radio_regs(phy_info_t *pi, radio_regs_t *radioregs,
return i;
}
-void wlc_phy_do_dummy_tx(phy_info_t *pi, bool ofdm, bool pa_on)
+void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
{
#define DUMMY_PKT_LEN 20
d11regs_t *regs = pi->regs;
@@ -1243,11 +1184,6 @@ void wlc_phy_do_dummy_tx(phy_info_t *pi, bool ofdm, bool pa_on)
i = 0;
count = ofdm ? 30 : 250;
-
- if (ISSIM_ENAB(pi->sh->sih)) {
- count *= 100;
- }
-
while ((i++ < count)
&& (R_REG(&regs->txe_status) & (1 << 7))) {
udelay(10);
@@ -1271,9 +1207,9 @@ void wlc_phy_do_dummy_tx(phy_info_t *pi, bool ofdm, bool pa_on)
}
}
-void wlc_phy_hold_upd(wlc_phy_t *pih, mbool id, bool set)
+void wlc_phy_hold_upd(struct brcms_phy_pub *pih, mbool id, bool set)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
if (set) {
mboolset(pi->measure_hold, id);
@@ -1284,9 +1220,9 @@ void wlc_phy_hold_upd(wlc_phy_t *pih, mbool id, bool set)
return;
}
-void wlc_phy_mute_upd(wlc_phy_t *pih, bool mute, mbool flags)
+void wlc_phy_mute_upd(struct brcms_phy_pub *pih, bool mute, mbool flags)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
if (mute) {
mboolset(pi->measure_hold, PHY_HOLD_FOR_MUTE);
@@ -1299,9 +1235,9 @@ void wlc_phy_mute_upd(wlc_phy_t *pih, bool mute, mbool flags)
return;
}
-void wlc_phy_clear_tssi(wlc_phy_t *pih)
+void wlc_phy_clear_tssi(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
if (ISNPHY(pi)) {
return;
@@ -1313,14 +1249,14 @@ void wlc_phy_clear_tssi(wlc_phy_t *pih)
}
}
-static bool wlc_phy_cal_txpower_recalc_sw(phy_info_t *pi)
+static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi)
{
return false;
}
-void wlc_phy_switch_radio(wlc_phy_t *pih, bool on)
+void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
if (NORADIO_ENAB(pi->pubpi))
return;
@@ -1361,37 +1297,37 @@ void wlc_phy_switch_radio(wlc_phy_t *pih, bool on)
}
}
-u16 wlc_phy_bw_state_get(wlc_phy_t *ppi)
+u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
return pi->bw;
}
-void wlc_phy_bw_state_set(wlc_phy_t *ppi, u16 bw)
+void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
pi->bw = bw;
}
-void wlc_phy_chanspec_radio_set(wlc_phy_t *ppi, chanspec_t newch)
+void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, chanspec_t newch)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
pi->radio_chanspec = newch;
}
-chanspec_t wlc_phy_chanspec_get(wlc_phy_t *ppi)
+chanspec_t wlc_phy_chanspec_get(struct brcms_phy_pub *ppi)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
return pi->radio_chanspec;
}
-void wlc_phy_chanspec_set(wlc_phy_t *ppi, chanspec_t chanspec)
+void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, chanspec_t chanspec)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
u16 m_cur_channel;
chansetfn_t chanspec_set = NULL;
@@ -1424,7 +1360,7 @@ int wlc_phy_chanspec_freq2bandrange_lpssn(uint freq)
return range;
}
-int wlc_phy_chanspec_bandrange_get(phy_info_t *pi, chanspec_t chanspec)
+int wlc_phy_chanspec_bandrange_get(struct brcms_phy *pi, chanspec_t chanspec)
{
int range = -1;
uint channel = CHSPEC_CHANNEL(chanspec);
@@ -1439,9 +1375,10 @@ int wlc_phy_chanspec_bandrange_get(phy_info_t *pi, chanspec_t chanspec)
return range;
}
-void wlc_phy_chanspec_ch14_widefilter_set(wlc_phy_t *ppi, bool wide_filter)
+void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
+ bool wide_filter)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
pi->channel_14_wide_filter = wide_filter;
@@ -1458,9 +1395,10 @@ int wlc_phy_channel2freq(uint channel)
}
void
-wlc_phy_chanspec_band_validch(wlc_phy_t *ppi, uint band, chanvec_t *channels)
+wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
+ chanvec_t *channels)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
uint i;
uint channel;
@@ -1473,15 +1411,15 @@ wlc_phy_chanspec_band_validch(wlc_phy_t *ppi, uint band, chanvec_t *channels)
&& (channel <= LAST_REF5_CHANNUM))
continue;
- if (((band == WLC_BAND_2G) && (channel <= CH_MAX_2G_CHANNEL)) ||
- ((band == WLC_BAND_5G) && (channel > CH_MAX_2G_CHANNEL)))
+ if ((band == BRCM_BAND_2G && channel <= CH_MAX_2G_CHANNEL) ||
+ (band == BRCM_BAND_5G && channel > CH_MAX_2G_CHANNEL))
setbit(channels->vec, channel);
}
}
-chanspec_t wlc_phy_chanspec_band_firstch(wlc_phy_t *ppi, uint band)
+chanspec_t wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
uint i;
uint channel;
chanspec_t chspec;
@@ -1505,7 +1443,7 @@ chanspec_t wlc_phy_chanspec_band_firstch(wlc_phy_t *ppi, uint band)
chspec =
channel | WL_CHANSPEC_BW_40 |
WL_CHANSPEC_CTL_SB_LOWER;
- if (band == WLC_BAND_2G)
+ if (band == BRCM_BAND_2G)
chspec |= WL_CHANSPEC_BAND_2G;
else
chspec |= WL_CHANSPEC_BAND_5G;
@@ -1516,17 +1454,17 @@ chanspec_t wlc_phy_chanspec_band_firstch(wlc_phy_t *ppi, uint band)
&& (channel <= LAST_REF5_CHANNUM))
continue;
- if (((band == WLC_BAND_2G) && (channel <= CH_MAX_2G_CHANNEL)) ||
- ((band == WLC_BAND_5G) && (channel > CH_MAX_2G_CHANNEL)))
+ if ((band == BRCM_BAND_2G && channel <= CH_MAX_2G_CHANNEL) ||
+ (band == BRCM_BAND_5G && channel > CH_MAX_2G_CHANNEL))
return chspec;
}
return (chanspec_t) INVCHANSPEC;
}
-int wlc_phy_txpower_get(wlc_phy_t *ppi, uint *qdbm, bool *override)
+int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
*qdbm = pi->tx_user_target[0];
if (override != NULL)
@@ -1534,41 +1472,42 @@ int wlc_phy_txpower_get(wlc_phy_t *ppi, uint *qdbm, bool *override)
return 0;
}
-void wlc_phy_txpower_target_set(wlc_phy_t *ppi, struct txpwr_limits *txpwr)
+void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
+ struct txpwr_limits *txpwr)
{
bool mac_enabled = false;
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
memcpy(&pi->tx_user_target[TXP_FIRST_CCK],
- &txpwr->cck[0], WLC_NUM_RATES_CCK);
+ &txpwr->cck[0], BRCMS_NUM_RATES_CCK);
memcpy(&pi->tx_user_target[TXP_FIRST_OFDM],
- &txpwr->ofdm[0], WLC_NUM_RATES_OFDM);
+ &txpwr->ofdm[0], BRCMS_NUM_RATES_OFDM);
memcpy(&pi->tx_user_target[TXP_FIRST_OFDM_20_CDD],
- &txpwr->ofdm_cdd[0], WLC_NUM_RATES_OFDM);
+ &txpwr->ofdm_cdd[0], BRCMS_NUM_RATES_OFDM);
memcpy(&pi->tx_user_target[TXP_FIRST_OFDM_40_SISO],
- &txpwr->ofdm_40_siso[0], WLC_NUM_RATES_OFDM);
+ &txpwr->ofdm_40_siso[0], BRCMS_NUM_RATES_OFDM);
memcpy(&pi->tx_user_target[TXP_FIRST_OFDM_40_CDD],
- &txpwr->ofdm_40_cdd[0], WLC_NUM_RATES_OFDM);
+ &txpwr->ofdm_40_cdd[0], BRCMS_NUM_RATES_OFDM);
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_20_SISO],
- &txpwr->mcs_20_siso[0], WLC_NUM_RATES_MCS_1_STREAM);
+ &txpwr->mcs_20_siso[0], BRCMS_NUM_RATES_MCS_1_STREAM);
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_20_CDD],
- &txpwr->mcs_20_cdd[0], WLC_NUM_RATES_MCS_1_STREAM);
+ &txpwr->mcs_20_cdd[0], BRCMS_NUM_RATES_MCS_1_STREAM);
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_20_STBC],
- &txpwr->mcs_20_stbc[0], WLC_NUM_RATES_MCS_1_STREAM);
+ &txpwr->mcs_20_stbc[0], BRCMS_NUM_RATES_MCS_1_STREAM);
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_20_SDM],
- &txpwr->mcs_20_mimo[0], WLC_NUM_RATES_MCS_2_STREAM);
+ &txpwr->mcs_20_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM);
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SISO],
- &txpwr->mcs_40_siso[0], WLC_NUM_RATES_MCS_1_STREAM);
+ &txpwr->mcs_40_siso[0], BRCMS_NUM_RATES_MCS_1_STREAM);
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_CDD],
- &txpwr->mcs_40_cdd[0], WLC_NUM_RATES_MCS_1_STREAM);
+ &txpwr->mcs_40_cdd[0], BRCMS_NUM_RATES_MCS_1_STREAM);
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_STBC],
- &txpwr->mcs_40_stbc[0], WLC_NUM_RATES_MCS_1_STREAM);
+ &txpwr->mcs_40_stbc[0], BRCMS_NUM_RATES_MCS_1_STREAM);
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM],
- &txpwr->mcs_40_mimo[0], WLC_NUM_RATES_MCS_2_STREAM);
+ &txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM);
if (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)
mac_enabled = true;
@@ -1583,9 +1522,9 @@ void wlc_phy_txpower_target_set(wlc_phy_t *ppi, struct txpwr_limits *txpwr)
wlapi_enable_mac(pi->sh->physhim);
}
-int wlc_phy_txpower_set(wlc_phy_t *ppi, uint qdbm, bool override)
+int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
int i;
if (qdbm > 127)
@@ -1619,13 +1558,13 @@ int wlc_phy_txpower_set(wlc_phy_t *ppi, uint qdbm, bool override)
}
void
-wlc_phy_txpower_sromlimit(wlc_phy_t *ppi, uint channel, u8 *min_pwr,
+wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint channel, u8 *min_pwr,
u8 *max_pwr, int txp_rate_idx)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
uint i;
- *min_pwr = pi->min_txpower * WLC_TXPWR_DB_FACTOR;
+ *min_pwr = pi->min_txpower * BRCMS_TXPWR_DB_FACTOR;
if (ISNPHY(pi)) {
if (txp_rate_idx < 0)
@@ -1639,7 +1578,7 @@ wlc_phy_txpower_sromlimit(wlc_phy_t *ppi, uint channel, u8 *min_pwr,
*max_pwr = pi->tx_srom_max_rate_2g[txp_rate_idx];
} else {
- *max_pwr = WLC_TXPWR_MAX;
+ *max_pwr = BRCMS_TXPWR_MAX;
if (txp_rate_idx < 0)
txp_rate_idx = TXP_FIRST_OFDM;
@@ -1669,10 +1608,10 @@ wlc_phy_txpower_sromlimit(wlc_phy_t *ppi, uint channel, u8 *min_pwr,
}
void
-wlc_phy_txpower_sromlimit_max_get(wlc_phy_t *ppi, uint chan, u8 *max_txpwr,
- u8 *min_txpwr)
+wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
+ u8 *max_txpwr, u8 *min_txpwr)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
u8 tx_pwr_max = 0;
u8 tx_pwr_min = 255;
u8 max_num_rate;
@@ -1700,27 +1639,27 @@ wlc_phy_txpower_sromlimit_max_get(wlc_phy_t *ppi, uint chan, u8 *max_txpwr,
}
void
-wlc_phy_txpower_boardlimit_band(wlc_phy_t *ppi, uint bandunit, s32 *max_pwr,
- s32 *min_pwr, u32 *step_pwr)
+wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint bandunit,
+ s32 *max_pwr, s32 *min_pwr, u32 *step_pwr)
{
return;
}
-u8 wlc_phy_txpower_get_target_min(wlc_phy_t *ppi)
+u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
return pi->tx_power_min;
}
-u8 wlc_phy_txpower_get_target_max(wlc_phy_t *ppi)
+u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
return pi->tx_power_max;
}
-void wlc_phy_txpower_recalc_target(phy_info_t *pi)
+void wlc_phy_txpower_recalc_target(struct brcms_phy *pi)
{
u8 maxtxpwr, mintxpwr, rate, pactrl;
uint target_chan;
@@ -1731,7 +1670,7 @@ void wlc_phy_txpower_recalc_target(phy_info_t *pi)
u8 max_num_rate;
u8 start_rate = 0;
chanspec_t chspec;
- u32 band = CHSPEC2WLC_BAND(pi->radio_chanspec);
+ u32 band = CHSPEC2BAND(pi->radio_chanspec);
initfn_t txpwr_recalc_fn = NULL;
chspec = pi->radio_chanspec;
@@ -1788,7 +1727,8 @@ void wlc_phy_txpower_recalc_target(phy_info_t *pi)
{
- wlc_phy_txpower_sromlimit((wlc_phy_t *) pi, target_chan,
+ wlc_phy_txpower_sromlimit((struct brcms_phy_pub *) pi,
+ target_chan,
&mintxpwr, &maxtxpwr, rate);
maxtxpwr = min(maxtxpwr, pi->txpwr_limit[rate]);
@@ -1839,10 +1779,10 @@ void wlc_phy_txpower_recalc_target(phy_info_t *pi)
}
void
-wlc_phy_txpower_reg_limit_calc(phy_info_t *pi, struct txpwr_limits *txpwr,
+wlc_phy_txpower_reg_limit_calc(struct brcms_phy *pi, struct txpwr_limits *txpwr,
chanspec_t chanspec)
{
- u8 tmp_txpwr_limit[2 * WLC_NUM_RATES_OFDM];
+ u8 tmp_txpwr_limit[2 * BRCMS_NUM_RATES_OFDM];
u8 *txpwr_ptr1 = NULL, *txpwr_ptr2 = NULL;
int rate_start_index = 0, rate1, rate2, k;
@@ -1885,16 +1825,15 @@ wlc_phy_txpower_reg_limit_calc(phy_info_t *pi, struct txpwr_limits *txpwr,
break;
}
- for (rate2 = 0; rate2 < WLC_NUM_RATES_OFDM; rate2++) {
+ for (rate2 = 0; rate2 < BRCMS_NUM_RATES_OFDM; rate2++) {
tmp_txpwr_limit[rate2] = 0;
- tmp_txpwr_limit[WLC_NUM_RATES_OFDM + rate2] =
+ tmp_txpwr_limit[BRCMS_NUM_RATES_OFDM + rate2] =
txpwr_ptr1[rate2];
}
wlc_phy_mcs_to_ofdm_powers_nphy(tmp_txpwr_limit, 0,
- WLC_NUM_RATES_OFDM - 1,
- WLC_NUM_RATES_OFDM);
+ BRCMS_NUM_RATES_OFDM - 1, BRCMS_NUM_RATES_OFDM);
for (rate1 = rate_start_index, rate2 = 0;
- rate2 < WLC_NUM_RATES_OFDM; rate1++, rate2++)
+ rate2 < BRCMS_NUM_RATES_OFDM; rate1++, rate2++)
pi->txpwr_limit[rate1] =
min(txpwr_ptr2[rate2],
tmp_txpwr_limit[rate2]);
@@ -1927,16 +1866,15 @@ wlc_phy_txpower_reg_limit_calc(phy_info_t *pi, struct txpwr_limits *txpwr,
rate_start_index = WL_TX_POWER_MCS40_CDD_FIRST;
break;
}
- for (rate2 = 0; rate2 < WLC_NUM_RATES_OFDM; rate2++) {
+ for (rate2 = 0; rate2 < BRCMS_NUM_RATES_OFDM; rate2++) {
tmp_txpwr_limit[rate2] = 0;
- tmp_txpwr_limit[WLC_NUM_RATES_OFDM + rate2] =
+ tmp_txpwr_limit[BRCMS_NUM_RATES_OFDM + rate2] =
txpwr_ptr1[rate2];
}
wlc_phy_ofdm_to_mcs_powers_nphy(tmp_txpwr_limit, 0,
- WLC_NUM_RATES_OFDM - 1,
- WLC_NUM_RATES_OFDM);
+ BRCMS_NUM_RATES_OFDM - 1, BRCMS_NUM_RATES_OFDM);
for (rate1 = rate_start_index, rate2 = 0;
- rate2 < WLC_NUM_RATES_MCS_1_STREAM;
+ rate2 < BRCMS_NUM_RATES_MCS_1_STREAM;
rate1++, rate2++)
pi->txpwr_limit[rate1] =
min(txpwr_ptr2[rate2],
@@ -1957,7 +1895,7 @@ wlc_phy_txpower_reg_limit_calc(phy_info_t *pi, struct txpwr_limits *txpwr,
break;
}
for (rate1 = rate_start_index, rate2 = 0;
- rate2 < WLC_NUM_RATES_MCS_1_STREAM;
+ rate2 < BRCMS_NUM_RATES_MCS_1_STREAM;
rate1++, rate2++)
pi->txpwr_limit[rate1] = txpwr_ptr1[rate2];
}
@@ -1976,7 +1914,7 @@ wlc_phy_txpower_reg_limit_calc(phy_info_t *pi, struct txpwr_limits *txpwr,
break;
}
for (rate1 = rate_start_index, rate2 = 0;
- rate2 < WLC_NUM_RATES_MCS_2_STREAM;
+ rate2 < BRCMS_NUM_RATES_MCS_2_STREAM;
rate1++, rate2++)
pi->txpwr_limit[rate1] = txpwr_ptr1[rate2];
}
@@ -1991,23 +1929,23 @@ wlc_phy_txpower_reg_limit_calc(phy_info_t *pi, struct txpwr_limits *txpwr,
}
}
-void wlc_phy_txpwr_percent_set(wlc_phy_t *ppi, u8 txpwr_percent)
+void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
pi->txpwr_percent = txpwr_percent;
}
-void wlc_phy_machwcap_set(wlc_phy_t *ppi, u32 machwcap)
+void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
pi->sh->machwcap = machwcap;
}
-void wlc_phy_runbist_config(wlc_phy_t *ppi, bool start_end)
+void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
u16 rxc;
rxc = 0;
@@ -2036,17 +1974,17 @@ void wlc_phy_runbist_config(wlc_phy_t *ppi, bool start_end)
}
void
-wlc_phy_txpower_limit_set(wlc_phy_t *ppi, struct txpwr_limits *txpwr,
+wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *txpwr,
chanspec_t chanspec)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
wlc_phy_txpower_reg_limit_calc(pi, txpwr, chanspec);
if (ISLCNPHY(pi)) {
int i, j;
for (i = TXP_FIRST_OFDM_20_CDD, j = 0;
- j < WLC_NUM_RATES_MCS_1_STREAM; i++, j++) {
+ j < BRCMS_NUM_RATES_MCS_1_STREAM; i++, j++) {
if (txpwr->mcs_20_siso[j])
pi->txpwr_limit[i] = txpwr->mcs_20_siso[j];
else
@@ -2061,21 +1999,21 @@ wlc_phy_txpower_limit_set(wlc_phy_t *ppi, struct txpwr_limits *txpwr,
wlapi_enable_mac(pi->sh->physhim);
}
-void wlc_phy_ofdm_rateset_war(wlc_phy_t *pih, bool war)
+void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
pi->ofdm_rateset_war = war;
}
-void wlc_phy_bf_preempt_enable(wlc_phy_t *pih, bool bf_preempt)
+void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
pi->bf_preempt_4306 = bf_preempt;
}
-void wlc_phy_txpower_update_shm(phy_info_t *pi)
+void wlc_phy_txpower_update_shm(struct brcms_phy *pi)
{
int j;
if (ISNPHY(pi)) {
@@ -2112,7 +2050,7 @@ void wlc_phy_txpower_update_shm(phy_info_t *pi)
}
wlapi_bmac_mhf(pi->sh->physhim, MHF2, MHF2_HWPWRCTL,
- MHF2_HWPWRCTL, WLC_BAND_ALL);
+ MHF2_HWPWRCTL, BRCM_BAND_ALL);
} else {
int i;
@@ -2126,9 +2064,9 @@ void wlc_phy_txpower_update_shm(phy_info_t *pi)
}
}
-bool wlc_phy_txpower_hw_ctrl_get(wlc_phy_t *ppi)
+bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
if (ISNPHY(pi)) {
return pi->nphy_txpwrctrl;
@@ -2137,9 +2075,9 @@ bool wlc_phy_txpower_hw_ctrl_get(wlc_phy_t *ppi)
}
}
-void wlc_phy_txpower_hw_ctrl_set(wlc_phy_t *ppi, bool hwpwrctrl)
+void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
bool cur_hwpwrctrl = pi->hwpwrctrl;
bool suspend;
@@ -2175,7 +2113,7 @@ void wlc_phy_txpower_hw_ctrl_set(wlc_phy_t *ppi, bool hwpwrctrl)
}
}
-void wlc_phy_txpower_ipa_upd(phy_info_t *pi)
+void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi)
{
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
@@ -2187,9 +2125,9 @@ void wlc_phy_txpower_ipa_upd(phy_info_t *pi)
}
}
-static u32 wlc_phy_txpower_est_power_nphy(phy_info_t *pi);
+static u32 wlc_phy_txpower_est_power_nphy(struct brcms_phy *pi);
-static u32 wlc_phy_txpower_est_power_nphy(phy_info_t *pi)
+static u32 wlc_phy_txpower_est_power_nphy(struct brcms_phy *pi)
{
s16 tx0_status, tx1_status;
u16 estPower1, estPower2;
@@ -2239,14 +2177,15 @@ static u32 wlc_phy_txpower_est_power_nphy(phy_info_t *pi)
}
void
-wlc_phy_txpower_get_current(wlc_phy_t *ppi, tx_power_t *power, uint channel)
+wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi, struct tx_power *power,
+ uint channel)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
uint rate, num_rates;
u8 min_pwr, max_pwr;
#if WL_TX_POWER_RATES != TXP_NUM_RATES
-#error "tx_power_t struct out of sync with this fn"
+#error "struct tx_power out of sync with this fn"
#endif
if (ISNPHY(pi)) {
@@ -2280,9 +2219,9 @@ wlc_phy_txpower_get_current(wlc_phy_t *ppi, tx_power_t *power, uint channel)
u32 est_pout;
wlapi_suspend_mac_and_wait(pi->sh->physhim);
- wlc_phyreg_enter((wlc_phy_t *) pi);
+ wlc_phyreg_enter((struct brcms_phy_pub *) pi);
est_pout = wlc_phy_txpower_est_power_nphy(pi);
- wlc_phyreg_exit((wlc_phy_t *) pi);
+ wlc_phyreg_exit((struct brcms_phy_pub *) pi);
wlapi_enable_mac(pi->sh->physhim);
power->est_Pout[0] = (est_pout >> 8) & 0xff;
@@ -2308,8 +2247,7 @@ wlc_phy_txpower_get_current(wlc_phy_t *ppi, tx_power_t *power, uint channel)
power->tx_power_max_rate_ind[0] = pi->tx_power_max_rate_ind;
power->tx_power_max_rate_ind[1] = pi->tx_power_max_rate_ind;
- } else if (!pi->hwpwrctrl) {
- } else if (pi->sh->up) {
+ } else if (pi->hwpwrctrl && pi->sh->up) {
wlc_phyreg_enter(ppi);
if (ISLCNPHY(pi)) {
@@ -2336,44 +2274,23 @@ wlc_phy_txpower_get_current(wlc_phy_t *ppi, tx_power_t *power, uint channel)
}
}
-void wlc_phy_antsel_type_set(wlc_phy_t *ppi, u8 antsel_type)
+void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
pi->antsel_type = antsel_type;
}
-bool wlc_phy_test_ison(wlc_phy_t *ppi)
+bool wlc_phy_test_ison(struct brcms_phy_pub *ppi)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
return pi->phytest_on;
}
-bool wlc_phy_ant_rxdiv_get(wlc_phy_t *ppi, u8 *pval)
+void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val)
{
- phy_info_t *pi = (phy_info_t *) ppi;
- bool ret = true;
-
- wlc_phyreg_enter(ppi);
-
- if (ISNPHY(pi)) {
-
- ret = false;
- } else if (ISLCNPHY(pi)) {
- u16 crsctrl = read_phy_reg(pi, 0x410);
- u16 div = crsctrl & (0x1 << 1);
- *pval = (div | ((crsctrl & (0x1 << 0)) ^ (div >> 1)));
- }
-
- wlc_phyreg_exit(ppi);
-
- return ret;
-}
-
-void wlc_phy_ant_rxdiv_set(wlc_phy_t *ppi, u8 val)
-{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
bool suspend;
pi->sh->rx_antdiv = val;
@@ -2381,10 +2298,10 @@ void wlc_phy_ant_rxdiv_set(wlc_phy_t *ppi, u8 val)
if (!(ISNPHY(pi) && D11REV_IS(pi->sh->corerev, 16))) {
if (val > ANT_RX_DIV_FORCE_1)
wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_ANTDIV,
- MHF1_ANTDIV, WLC_BAND_ALL);
+ MHF1_ANTDIV, BRCM_BAND_ALL);
else
wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_ANTDIV, 0,
- WLC_BAND_ALL);
+ BRCM_BAND_ALL);
}
if (ISNPHY(pi)) {
@@ -2419,7 +2336,7 @@ void wlc_phy_ant_rxdiv_set(wlc_phy_t *ppi, u8 val)
}
static bool
-wlc_phy_noise_calc_phy(phy_info_t *pi, u32 *cmplx_pwr, s8 *pwr_ant)
+wlc_phy_noise_calc_phy(struct brcms_phy *pi, u32 *cmplx_pwr, s8 *pwr_ant)
{
s8 cmplx_pwr_dbm[PHY_CORE_MAX];
u8 i;
@@ -2445,9 +2362,9 @@ wlc_phy_noise_calc_phy(phy_info_t *pi, u32 *cmplx_pwr, s8 *pwr_ant)
}
static void
-wlc_phy_noise_sample_request(wlc_phy_t *pih, u8 reason, u8 ch)
+wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
s8 noise_dbm = PHY_NOISE_FIXED_VAL_NPHY;
bool sampling_in_progress = (pi->phynoise_state != 0);
bool wait_for_intr = true;
@@ -2528,7 +2445,7 @@ wlc_phy_noise_sample_request(wlc_phy_t *pih, u8 reason, u8 ch)
OR_REG(&pi->regs->maccommand,
MCMD_BG_NOISE);
} else {
- phy_iq_est_t est[PHY_CORE_MAX];
+ struct phy_iq_est est[PHY_CORE_MAX];
u32 cmplx_pwr[PHY_CORE_MAX];
s8 noise_dbm_ant[PHY_CORE_MAX];
u16 log_num_samps, num_samps, classif_state = 0;
@@ -2579,7 +2496,7 @@ wlc_phy_noise_sample_request(wlc_phy_t *pih, u8 reason, u8 ch)
}
-void wlc_phy_noise_sample_request_external(wlc_phy_t *pih)
+void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *pih)
{
u8 channel;
@@ -2588,7 +2505,7 @@ void wlc_phy_noise_sample_request_external(wlc_phy_t *pih)
wlc_phy_noise_sample_request(pih, PHY_NOISE_SAMPLE_EXTERNAL, channel);
}
-static void wlc_phy_noise_cb(phy_info_t *pi, u8 channel, s8 noise_dbm)
+static void wlc_phy_noise_cb(struct brcms_phy *pi, u8 channel, s8 noise_dbm)
{
if (!pi->phynoise_state)
return;
@@ -2609,7 +2526,7 @@ static void wlc_phy_noise_cb(phy_info_t *pi, u8 channel, s8 noise_dbm)
}
-static s8 wlc_phy_noise_read_shmem(phy_info_t *pi)
+static s8 wlc_phy_noise_read_shmem(struct brcms_phy *pi)
{
u32 cmplx_pwr[PHY_CORE_MAX];
s8 noise_dbm_ant[PHY_CORE_MAX];
@@ -2650,9 +2567,9 @@ static s8 wlc_phy_noise_read_shmem(phy_info_t *pi)
}
-void wlc_phy_noise_sample_intr(wlc_phy_t *pih)
+void wlc_phy_noise_sample_intr(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
u16 jssi_aux;
u8 channel = 0;
s8 noise_dbm = PHY_NOISE_FIXED_VAL_NPHY;
@@ -2761,28 +2678,28 @@ void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_cmplx_pwr_dB, u8 core)
}
}
-void wlc_phy_rssi_compute(wlc_phy_t *pih, void *ctx)
+void wlc_phy_rssi_compute(struct brcms_phy_pub *pih, void *ctx)
{
- wlc_d11rxhdr_t *wlc_rxhdr = (wlc_d11rxhdr_t *) ctx;
- d11rxhdr_t *rxh = &wlc_rxhdr->rxhdr;
+ struct brcms_d11rxhdr *wlc_rxhdr = (struct brcms_d11rxhdr *) ctx;
+ struct d11rxhdr *rxh = &wlc_rxhdr->rxhdr;
int rssi = le16_to_cpu(rxh->PhyRxStatus_1) & PRXS1_JSSI_MASK;
uint radioid = pih->radioid;
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
if (NORADIO_ENAB(pi->pubpi)) {
- rssi = WLC_RSSI_INVALID;
+ rssi = BRCMS_RSSI_INVALID;
goto end;
}
if ((pi->sh->corerev >= 11)
&& !(le16_to_cpu(rxh->RxStatus2) & RXS_PHYRXST_VALID)) {
- rssi = WLC_RSSI_INVALID;
+ rssi = BRCMS_RSSI_INVALID;
goto end;
}
if (ISLCNPHY(pi)) {
u8 gidx = (le16_to_cpu(rxh->PhyRxStatus_2) & 0xFC00) >> 10;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (rssi > 127)
rssi -= 256;
@@ -2810,20 +2727,20 @@ void wlc_phy_rssi_compute(wlc_phy_t *pih, void *ctx)
wlc_rxhdr->rssi = (s8) rssi;
}
-void wlc_phy_freqtrack_start(wlc_phy_t *pih)
+void wlc_phy_freqtrack_start(struct brcms_phy_pub *pih)
{
return;
}
-void wlc_phy_freqtrack_end(wlc_phy_t *pih)
+void wlc_phy_freqtrack_end(struct brcms_phy_pub *pih)
{
return;
}
-void wlc_phy_set_deaf(wlc_phy_t *ppi, bool user_flag)
+void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag)
{
- phy_info_t *pi;
- pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi;
+ pi = (struct brcms_phy *) ppi;
if (ISLCNPHY(pi))
wlc_lcnphy_deaf_mode(pi, true);
@@ -2831,9 +2748,9 @@ void wlc_phy_set_deaf(wlc_phy_t *ppi, bool user_flag)
wlc_nphy_deaf_mode(pi, true);
}
-void wlc_phy_watchdog(wlc_phy_t *pih)
+void wlc_phy_watchdog(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
bool delay_phy_cal = false;
pi->sh->now++;
@@ -2841,7 +2758,7 @@ void wlc_phy_watchdog(wlc_phy_t *pih)
return;
if (!(SCAN_RM_IN_PROGRESS(pi) || PLT_INPROG_PHY(pi))) {
- wlc_phy_noise_sample_request((wlc_phy_t *) pi,
+ wlc_phy_noise_sample_request((struct brcms_phy_pub *) pi,
PHY_NOISE_SAMPLE_MON,
CHSPEC_CHANNEL(pi->
radio_chanspec));
@@ -2872,7 +2789,7 @@ void wlc_phy_watchdog(wlc_phy_t *pih)
(pi->nphy_perical != PHY_PERICAL_MANUAL) &&
((pi->sh->now - pi->nphy_perical_last) >=
pi->sh->glacial_timer))
- wlc_phy_cal_perical((wlc_phy_t *) pi,
+ wlc_phy_cal_perical((struct brcms_phy_pub *) pi,
PHY_PERICAL_WATCHDOG);
wlc_phy_txpwr_papd_cal_nphy(pi);
@@ -2896,9 +2813,9 @@ void wlc_phy_watchdog(wlc_phy_t *pih)
}
}
-void wlc_phy_BSSinit(wlc_phy_t *pih, bool bonlyap, int rssi)
+void wlc_phy_BSSinit(struct brcms_phy_pub *pih, bool bonlyap, int rssi)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
uint i;
uint k;
@@ -2994,7 +2911,7 @@ void wlc_phy_cordic(fixed theta, cs32 *val)
val[0].q = val[0].q * signx;
}
-void wlc_phy_cal_perical_mphase_reset(phy_info_t *pi)
+void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi)
{
wlapi_del_timer(pi->sh->physhim, pi->phycal_timer);
@@ -3003,7 +2920,8 @@ void wlc_phy_cal_perical_mphase_reset(phy_info_t *pi)
pi->mphase_txcal_cmdidx = 0;
}
-static void wlc_phy_cal_perical_mphase_schedule(phy_info_t *pi, uint delay)
+static void
+wlc_phy_cal_perical_mphase_schedule(struct brcms_phy *pi, uint delay)
{
if ((pi->nphy_perical != PHY_PERICAL_MPHASE) &&
@@ -3016,12 +2934,12 @@ static void wlc_phy_cal_perical_mphase_schedule(phy_info_t *pi, uint delay)
wlapi_add_timer(pi->sh->physhim, pi->phycal_timer, delay, 0);
}
-void wlc_phy_cal_perical(wlc_phy_t *pih, u8 reason)
+void wlc_phy_cal_perical(struct brcms_phy_pub *pih, u8 reason)
{
s16 nphy_currtemp = 0;
s16 delta_temp = 0;
bool do_periodic_cal = true;
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
if (!ISNPHY(pi))
return;
@@ -3096,7 +3014,7 @@ void wlc_phy_cal_perical(wlc_phy_t *pih, u8 reason)
}
}
-void wlc_phy_cal_perical_mphase_restart(phy_info_t *pi)
+void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi)
{
pi->mphase_cal_phase_id = MPHASE_CAL_STATE_INIT;
pi->mphase_txcal_cmdidx = 0;
@@ -3114,9 +3032,9 @@ u8 wlc_phy_nbits(s32 value)
return nbits;
}
-void wlc_phy_stf_chain_init(wlc_phy_t *pih, u8 txchain, u8 rxchain)
+void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
pi->sh->hw_phytxchain = txchain;
pi->sh->hw_phyrxchain = rxchain;
@@ -3125,9 +3043,9 @@ void wlc_phy_stf_chain_init(wlc_phy_t *pih, u8 txchain, u8 rxchain)
pi->pubpi.phy_corenum = (u8) PHY_BITSCNT(pi->sh->phyrxchain);
}
-void wlc_phy_stf_chain_set(wlc_phy_t *pih, u8 txchain, u8 rxchain)
+void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
pi->sh->phytxchain = txchain;
@@ -3137,19 +3055,19 @@ void wlc_phy_stf_chain_set(wlc_phy_t *pih, u8 txchain, u8 rxchain)
pi->pubpi.phy_corenum = (u8) PHY_BITSCNT(pi->sh->phyrxchain);
}
-void wlc_phy_stf_chain_get(wlc_phy_t *pih, u8 *txchain, u8 *rxchain)
+void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
*txchain = pi->sh->phytxchain;
*rxchain = pi->sh->phyrxchain;
}
-u8 wlc_phy_stf_chain_active_get(wlc_phy_t *pih)
+u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih)
{
s16 nphy_currtemp;
u8 active_bitmap;
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
active_bitmap = (pi->phy_txcore_heatedup) ? 0x31 : 0x33;
@@ -3177,9 +3095,9 @@ u8 wlc_phy_stf_chain_active_get(wlc_phy_t *pih)
return active_bitmap;
}
-s8 wlc_phy_stf_ssmode_get(wlc_phy_t *pih, chanspec_t chanspec)
+s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, chanspec_t chanspec)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
u8 siso_mcs_id, cdd_mcs_id;
siso_mcs_id =
@@ -3201,7 +3119,7 @@ const u8 *wlc_phy_get_ofdm_rate_lookup(void)
return ofdm_rate_lookup;
}
-void wlc_lcnphy_epa_switch(phy_info_t *pi, bool mode)
+void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
{
if ((pi->sh->chip == BCM4313_CHIP_ID) &&
(pi->sh->boardflags & BFL_FEM)) {
@@ -3239,7 +3157,7 @@ void wlc_lcnphy_epa_switch(phy_info_t *pi, bool mode)
}
static s8
-wlc_user_txpwr_antport_to_rfport(phy_info_t *pi, uint chan, u32 band,
+wlc_user_txpwr_antport_to_rfport(struct brcms_phy *pi, uint chan, u32 band,
u8 rate)
{
s8 offset = 0;
@@ -3249,7 +3167,7 @@ wlc_user_txpwr_antport_to_rfport(phy_info_t *pi, uint chan, u32 band,
return offset;
}
-static s8 wlc_phy_env_measure_vbat(phy_info_t *pi)
+static s8 wlc_phy_env_measure_vbat(struct brcms_phy *pi)
{
if (ISLCNPHY(pi))
return wlc_lcnphy_vbatsense(pi, 0);
@@ -3257,7 +3175,7 @@ static s8 wlc_phy_env_measure_vbat(phy_info_t *pi)
return 0;
}
-static s8 wlc_phy_env_measure_temperature(phy_info_t *pi)
+static s8 wlc_phy_env_measure_temperature(struct brcms_phy *pi)
{
if (ISLCNPHY(pi))
return wlc_lcnphy_tempsense_degree(pi, 0);
@@ -3265,40 +3183,40 @@ static s8 wlc_phy_env_measure_temperature(phy_info_t *pi)
return 0;
}
-static void wlc_phy_upd_env_txpwr_rate_limits(phy_info_t *pi, u32 band)
+static void wlc_phy_upd_env_txpwr_rate_limits(struct brcms_phy *pi, u32 band)
{
u8 i;
s8 temp, vbat;
for (i = 0; i < TXP_NUM_RATES; i++)
- pi->txpwr_env_limit[i] = WLC_TXPWR_MAX;
+ pi->txpwr_env_limit[i] = BRCMS_TXPWR_MAX;
vbat = wlc_phy_env_measure_vbat(pi);
temp = wlc_phy_env_measure_temperature(pi);
}
-void wlc_phy_ldpc_override_set(wlc_phy_t *ppi, bool ldpc)
+void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool ldpc)
{
return;
}
void
-wlc_phy_get_pwrdet_offsets(phy_info_t *pi, s8 *cckoffset, s8 *ofdmoffset)
+wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset, s8 *ofdmoffset)
{
*cckoffset = 0;
*ofdmoffset = 0;
}
-s8 wlc_phy_upd_rssi_offset(phy_info_t *pi, s8 rssi, chanspec_t chanspec)
+s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, chanspec_t chanspec)
{
return rssi;
}
-bool wlc_phy_txpower_ipa_ison(wlc_phy_t *ppi)
+bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *ppi)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
if (ISNPHY(pi))
return wlc_phy_n_txpower_ipa_ison(pi);
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_hal.h
new file mode 100644
index 00000000000..e27d9e95a2d
--- /dev/null
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_hal.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * phy_hal.h: functionality exported from the phy to higher layers
+ */
+
+#ifndef _BRCM_PHY_HAL_H_
+#define _BRCM_PHY_HAL_H_
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include <phy_shim.h>
+
+#define IDCODE_VER_MASK 0x0000000f
+#define IDCODE_VER_SHIFT 0
+#define IDCODE_MFG_MASK 0x00000fff
+#define IDCODE_MFG_SHIFT 0
+#define IDCODE_ID_MASK 0x0ffff000
+#define IDCODE_ID_SHIFT 12
+#define IDCODE_REV_MASK 0xf0000000
+#define IDCODE_REV_SHIFT 28
+
+#define NORADIO_ID 0xe4f5
+#define NORADIO_IDCODE 0x4e4f5246
+
+#define BCM2055_ID 0x2055
+#define BCM2055_IDCODE 0x02055000
+#define BCM2055A0_IDCODE 0x1205517f
+
+#define BCM2056_ID 0x2056
+#define BCM2056_IDCODE 0x02056000
+#define BCM2056A0_IDCODE 0x1205617f
+
+#define BCM2057_ID 0x2057
+#define BCM2057_IDCODE 0x02057000
+#define BCM2057A0_IDCODE 0x1205717f
+
+#define BCM2064_ID 0x2064
+#define BCM2064_IDCODE 0x02064000
+#define BCM2064A0_IDCODE 0x0206417f
+
+#define PHY_TPC_HW_OFF false
+#define PHY_TPC_HW_ON true
+
+#define PHY_PERICAL_DRIVERUP 1
+#define PHY_PERICAL_WATCHDOG 2
+#define PHY_PERICAL_PHYINIT 3
+#define PHY_PERICAL_JOIN_BSS 4
+#define PHY_PERICAL_START_IBSS 5
+#define PHY_PERICAL_UP_BSS 6
+#define PHY_PERICAL_CHAN 7
+#define PHY_FULLCAL 8
+
+#define PHY_PERICAL_DISABLE 0
+#define PHY_PERICAL_SPHASE 1
+#define PHY_PERICAL_MPHASE 2
+#define PHY_PERICAL_MANUAL 3
+
+#define PHY_HOLD_FOR_ASSOC 1
+#define PHY_HOLD_FOR_SCAN 2
+#define PHY_HOLD_FOR_RM 4
+#define PHY_HOLD_FOR_PLT 8
+#define PHY_HOLD_FOR_MUTE 16
+#define PHY_HOLD_FOR_NOT_ASSOC 0x20
+
+#define PHY_MUTE_FOR_PREISM 1
+#define PHY_MUTE_ALL 0xffffffff
+
+#define PHY_NOISE_FIXED_VAL (-95)
+#define PHY_NOISE_FIXED_VAL_NPHY (-92)
+#define PHY_NOISE_FIXED_VAL_LCNPHY (-92)
+
+#define PHY_MODE_CAL 0x0002
+#define PHY_MODE_NOISEM 0x0004
+
+#define BRCMS_TXPWR_DB_FACTOR 4
+
+/* a large TX Power as an init value to factor out of min() calculations,
+ * keep low enough to fit in an s8, units are .25 dBm
+ */
+#define BRCMS_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */
+
+#define BRCMS_NUM_RATES_CCK 4
+#define BRCMS_NUM_RATES_OFDM 8
+#define BRCMS_NUM_RATES_MCS_1_STREAM 8
+#define BRCMS_NUM_RATES_MCS_2_STREAM 8
+#define BRCMS_NUM_RATES_MCS_3_STREAM 8
+#define BRCMS_NUM_RATES_MCS_4_STREAM 8
+
+#define BRCMS_RSSI_INVALID 0 /* invalid RSSI value */
+
+struct txpwr_limits {
+ u8 cck[BRCMS_NUM_RATES_CCK];
+ u8 ofdm[BRCMS_NUM_RATES_OFDM];
+
+ u8 ofdm_cdd[BRCMS_NUM_RATES_OFDM];
+
+ u8 ofdm_40_siso[BRCMS_NUM_RATES_OFDM];
+ u8 ofdm_40_cdd[BRCMS_NUM_RATES_OFDM];
+
+ u8 mcs_20_siso[BRCMS_NUM_RATES_MCS_1_STREAM];
+ u8 mcs_20_cdd[BRCMS_NUM_RATES_MCS_1_STREAM];
+ u8 mcs_20_stbc[BRCMS_NUM_RATES_MCS_1_STREAM];
+ u8 mcs_20_mimo[BRCMS_NUM_RATES_MCS_2_STREAM];
+
+ u8 mcs_40_siso[BRCMS_NUM_RATES_MCS_1_STREAM];
+ u8 mcs_40_cdd[BRCMS_NUM_RATES_MCS_1_STREAM];
+ u8 mcs_40_stbc[BRCMS_NUM_RATES_MCS_1_STREAM];
+ u8 mcs_40_mimo[BRCMS_NUM_RATES_MCS_2_STREAM];
+ u8 mcs32;
+};
+
+struct tx_power {
+ u32 flags;
+ chanspec_t chanspec; /* txpwr report for this channel */
+ chanspec_t local_chanspec; /* channel on which we are associated */
+ u8 local_max; /* local max according to the AP */
+ u8 local_constraint; /* local constraint according to the AP */
+ s8 antgain[2]; /* Ant gain for each band - from SROM */
+ u8 rf_cores; /* count of RF Cores being reported */
+ u8 est_Pout[4]; /* Latest tx power out estimate per RF chain */
+ u8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain
+ * without adjustment
+ */
+ u8 est_Pout_cck; /* Latest CCK tx power out estimate */
+ u8 tx_power_max[4]; /* Maximum target power among all rates */
+ u8 tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */
+ u8 user_limit[WL_TX_POWER_RATES]; /* User limit */
+ u8 reg_limit[WL_TX_POWER_RATES]; /* Regulatory power limit */
+ u8 board_limit[WL_TX_POWER_RATES]; /* Max power board can support (SROM) */
+ u8 target[WL_TX_POWER_RATES]; /* Latest target power */
+};
+
+struct tx_inst_power {
+ u8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
+ u8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
+};
+
+struct chanvec {
+ u8 vec[MAXCHANNEL / NBBY];
+};
+
+struct shared_phy_params {
+ struct si_pub *sih;
+ void *physhim;
+ uint unit;
+ uint corerev;
+ uint bustype;
+ uint buscorerev;
+ char *vars;
+ u16 vid;
+ u16 did;
+ uint chip;
+ uint chiprev;
+ uint chippkg;
+ uint sromrev;
+ uint boardtype;
+ uint boardrev;
+ uint boardvendor;
+ u32 boardflags;
+ u32 boardflags2;
+};
+
+
+extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
+extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh, void *regs,
+ int bandtype, char *vars, struct wiphy *wiphy);
+extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
+
+extern bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
+ u16 *phyrev, u16 *radioid,
+ u16 *radiover);
+extern bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
+extern u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
+
+extern void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+extern void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+extern void wlc_phy_init(struct brcms_phy_pub *ppi, chanspec_t chanspec);
+extern void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
+extern int wlc_phy_down(struct brcms_phy_pub *ppi);
+extern u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
+extern void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
+extern void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
+
+extern void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi,
+ chanspec_t chanspec);
+extern chanspec_t wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
+extern void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi,
+ chanspec_t newch);
+extern u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
+extern void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
+
+extern void wlc_phy_rssi_compute(struct brcms_phy_pub *pih, void *ctx);
+extern void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
+extern void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
+extern bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
+
+extern void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
+
+extern void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
+extern void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
+
+
+extern void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
+
+extern void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
+ bool wide_filter);
+extern void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
+ chanvec_t *channels);
+extern chanspec_t wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi,
+ uint band);
+
+extern void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan,
+ u8 *_min_, u8 *_max_, int rate);
+extern void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi,
+ uint chan, u8 *_max_, u8 *_min_);
+extern void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi,
+ uint band, s32 *, s32 *, u32 *);
+extern void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi,
+ struct txpwr_limits *,
+ chanspec_t chanspec);
+extern int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm,
+ bool *override);
+extern int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm,
+ bool override);
+extern void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
+ struct txpwr_limits *);
+extern bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
+extern void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi,
+ bool hwpwrctrl);
+extern u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
+extern u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
+extern bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
+
+extern void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain,
+ u8 rxchain);
+extern void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain,
+ u8 rxchain);
+extern void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain,
+ u8 *rxchain);
+extern u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
+extern s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih,
+ chanspec_t chanspec);
+extern void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
+
+extern void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
+extern void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
+extern void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
+extern void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
+
+extern void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
+extern void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
+extern void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, mbool id, bool val);
+extern void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, mbool flags);
+
+extern void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
+
+extern void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
+ struct tx_power *power, uint channel);
+
+extern void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
+extern bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
+extern void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi,
+ u8 txpwr_percent);
+extern void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
+extern void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih,
+ bool bf_preempt);
+extern void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
+
+extern void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
+
+extern void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
+extern void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
+
+extern const u8 *wlc_phy_get_ofdm_rate_lookup(void);
+
+extern s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
+ u8 mcs_offset);
+extern s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
+#endif /* _BRCM_PHY_HAL_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
index 10cbf520474..a01b01ccd9f 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_int.h
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_int.h
@@ -14,15 +14,14 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _wlc_phy_int_h_
-#define _wlc_phy_int_h_
+#ifndef _BRCM_PHY_INT_H_
+#define _BRCM_PHY_INT_H_
-#include <linux/kernel.h>
-#include <bcmdefs.h>
-#include <bcmutils.h>
+#include <types.h>
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
-#include <bcmsrom_fmt.h>
-#include <wlc_phy_hal.h>
+#define PHY_VERSION { 1, 82, 8, 0 }
#define PHYHAL_ERROR 0x0001
#define PHYHAL_TRACE 0x0002
@@ -42,23 +41,29 @@ extern u32 phyhal_msg_level;
#define LCNXN_BASEREV 16
-struct wlc_hw_info;
-typedef struct phy_info phy_info_t;
-typedef void (*initfn_t) (phy_info_t *);
-typedef void (*chansetfn_t) (phy_info_t *, chanspec_t);
-typedef int (*longtrnfn_t) (phy_info_t *, int);
-typedef void (*txiqccgetfn_t) (phy_info_t *, u16 *, u16 *);
-typedef void (*txiqccsetfn_t) (phy_info_t *, u16, u16);
-typedef u16(*txloccgetfn_t) (phy_info_t *);
-typedef void (*radioloftgetfn_t) (phy_info_t *, u8 *, u8 *, u8 *,
+struct brcms_phy_srom_fem {
+ u8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */
+ u8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */
+ u8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */
+ u8 triso; /* TR switch isolation */
+ u8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */
+};
+
+typedef void (*initfn_t) (struct brcms_phy *);
+typedef void (*chansetfn_t) (struct brcms_phy *, chanspec_t);
+typedef int (*longtrnfn_t) (struct brcms_phy *, int);
+typedef void (*txiqccgetfn_t) (struct brcms_phy *, u16 *, u16 *);
+typedef void (*txiqccsetfn_t) (struct brcms_phy *, u16, u16);
+typedef u16(*txloccgetfn_t) (struct brcms_phy *);
+typedef void (*radioloftgetfn_t) (struct brcms_phy *, u8 *, u8 *, u8 *,
u8 *);
-typedef s32(*rxsigpwrfn_t) (phy_info_t *, s32);
-typedef void (*detachfn_t) (phy_info_t *);
+typedef s32(*rxsigpwrfn_t) (struct brcms_phy *, s32);
+typedef void (*detachfn_t) (struct brcms_phy *);
#undef ISNPHY
#undef ISLCNPHY
#define ISNPHY(pi) PHYTYPE_IS((pi)->pubpi.phy_type, PHY_TYPE_N)
-#define ISLCNPHY(pi) PHYTYPE_IS((pi)->pubpi.phy_type, PHY_TYPE_LCN)
+#define ISLCNPHY(pi) PHYTYPE_IS((pi)->pubpi.phy_type, PHY_TYPE_LCN)
#define ISPHY_11N_CAP(pi) (ISNPHY(pi) || ISLCNPHY(pi))
@@ -215,7 +220,7 @@ enum {
MPHASE_CAL_STATE_IDLETSSI
};
-typedef enum {
+enum phy_cal_mode {
CAL_FULL,
CAL_RECAL,
CAL_CURRECAL,
@@ -223,7 +228,7 @@ typedef enum {
CAL_GCTRL,
CAL_SOFT,
CAL_DIGLO
-} phy_cal_mode_t;
+};
#define RDR_NTIERS 1
#define RDR_TIER_SIZE 64
@@ -248,7 +253,7 @@ typedef enum {
#define PHY_CHAIN_TX_DISABLE_TEMP 115
#define PHY_HYSTERESIS_DELTATEMP 5
-#define PHY_BITSCNT(x) bcm_bitcount((u8 *)&(x), sizeof(u8))
+#define PHY_BITSCNT(x) brcmu_bitcount((u8 *)&(x), sizeof(u8))
#define MOD_PHY_REG(pi, phy_type, reg_name, field, value) \
mod_phy_reg(pi, phy_type##_##reg_name, phy_type##_##reg_name##_##field##_MASK, \
@@ -285,21 +290,21 @@ typedef enum {
#define PHY_LTRN_LIST_LEN 64
extern u16 ltrn_list[PHY_LTRN_LIST_LEN];
-typedef struct _phy_table_info {
+struct phy_table_info {
uint table;
int q;
uint max;
-} phy_table_info_t;
+};
-typedef struct phytbl_info {
+struct phytbl_info {
const void *tbl_ptr;
u32 tbl_len;
u32 tbl_id;
u32 tbl_offset;
u32 tbl_width;
-} phytbl_info_t;
+};
-typedef struct {
+struct interference_info {
u8 curr_home_channel;
u16 crsminpwrthld_40_stored;
u16 crsminpwrthld_20L_stored;
@@ -369,10 +374,9 @@ typedef struct {
u16 radio_2057_core2_rssi_wb2_gc_stored;
u16 radio_2057_core1_rssi_nb_gc_stored;
u16 radio_2057_core2_rssi_nb_gc_stored;
+};
-} interference_info_t;
-
-typedef struct {
+struct aci_save_gphy {
u16 rc_cal_ovr;
u16 phycrsth1;
u16 phycrsth2;
@@ -406,21 +410,21 @@ typedef struct {
u16 div_srch_gn_back;
u16 ant_dwell;
u16 ant_wr_settle;
-} aci_save_gphy_t;
+};
-typedef struct _lo_complex_t {
+struct lo_complex_abgphy_info {
s8 i;
s8 q;
-} lo_complex_abgphy_info_t;
+};
-typedef struct _nphy_iq_comp {
+struct nphy_iq_comp {
s16 a0;
s16 b0;
s16 a1;
s16 b1;
-} nphy_iq_comp_t;
+};
-typedef struct _nphy_txpwrindex {
+struct nphy_txpwrindex {
s8 index;
s8 index_internal;
s8 index_internal_save;
@@ -431,20 +435,20 @@ typedef struct _nphy_txpwrindex {
u16 iqcomp_a;
u16 iqcomp_b;
u16 locomp;
-} phy_txpwrindex_t;
+};
-typedef struct {
+struct txiqcal_cache {
u16 txcal_coeffs_2G[8];
u16 txcal_radio_regs_2G[8];
- nphy_iq_comp_t rxcal_coeffs_2G;
+ struct nphy_iq_comp rxcal_coeffs_2G;
u16 txcal_coeffs_5G[8];
u16 txcal_radio_regs_5G[8];
- nphy_iq_comp_t rxcal_coeffs_5G;
-} txiqcal_cache_t;
+ struct nphy_iq_comp rxcal_coeffs_5G;
+};
-typedef struct _nphy_pwrctrl {
+struct nphy_pwrctrl {
s8 max_pwr_2g;
s8 idle_targ_2g;
s16 pwrdet_2g_a1;
@@ -471,34 +475,34 @@ typedef struct _nphy_pwrctrl {
s16 a1;
s16 b0;
s16 b1;
-} phy_pwrctrl_t;
+};
-typedef struct _nphy_txgains {
+struct nphy_txgains {
u16 txlpf[2];
u16 txgm[2];
u16 pga[2];
u16 pad[2];
u16 ipa[2];
-} nphy_txgains_t;
+};
#define PHY_NOISEVAR_BUFSIZE 10
-typedef struct _nphy_noisevar_buf {
+struct nphy_noisevar_buf {
int bufcount;
int tone_id[PHY_NOISEVAR_BUFSIZE];
u32 noise_vars[PHY_NOISEVAR_BUFSIZE];
u32 min_noise_vars[PHY_NOISEVAR_BUFSIZE];
-} phy_noisevar_buf_t;
+};
-typedef struct {
+struct rssical_cache {
u16 rssical_radio_regs_2G[2];
u16 rssical_phyregs_2G[12];
u16 rssical_radio_regs_5G[2];
u16 rssical_phyregs_5G[12];
-} rssical_cache_t;
+};
-typedef struct {
+struct lcnphy_cal_results {
u16 txiqlocal_a;
u16 txiqlocal_b;
@@ -522,12 +526,12 @@ typedef struct {
u16 rxiqcal_coeff_a0;
u16 rxiqcal_coeff_b0;
-} lcnphy_cal_results_t;
+};
struct shared_phy {
- struct phy_info *phy_head;
+ struct brcms_phy *phy_head;
uint unit;
- si_t *sih;
+ struct si_pub *sih;
void *physhim;
uint corerev;
u32 machwcap;
@@ -561,7 +565,7 @@ struct shared_phy {
bool _rifs_phy;
};
-struct phy_pub {
+struct brcms_phy_pub {
uint phy_type;
uint phy_rev;
u8 phy_corenum;
@@ -574,12 +578,6 @@ struct phy_pub {
bool abgphy_encore;
};
-struct phy_info_nphy;
-typedef struct phy_info_nphy phy_info_nphy_t;
-
-struct phy_info_lcnphy;
-typedef struct phy_info_lcnphy phy_info_lcnphy_t;
-
struct phy_func_ptr {
initfn_t init;
initfn_t calinit;
@@ -594,23 +592,22 @@ struct phy_func_ptr {
rxsigpwrfn_t rxsigpwr;
detachfn_t detach;
};
-typedef struct phy_func_ptr phy_func_ptr_t;
-struct phy_info {
- wlc_phy_t pubpi_ro;
- shared_phy_t *sh;
- phy_func_ptr_t pi_fptr;
+struct brcms_phy {
+ struct brcms_phy_pub pubpi_ro;
+ struct shared_phy *sh;
+ struct phy_func_ptr pi_fptr;
void *pi_ptr;
union {
- phy_info_lcnphy_t *pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcnphy;
} u;
bool user_txpwr_at_rfport;
d11regs_t *regs;
- struct phy_info *next;
+ struct brcms_phy *next;
char *vars;
- wlc_phy_t pubpi;
+ struct brcms_phy_pub pubpi;
bool do_initcal;
bool phytest_on;
@@ -653,8 +650,8 @@ struct phy_info {
s8 tx_power_offset[TXP_NUM_RATES];
u8 tx_power_target[TXP_NUM_RATES];
- srom_fem_t srom_fem2g;
- srom_fem_t srom_fem5g;
+ struct brcms_phy_srom_fem srom_fem2g;
+ struct brcms_phy_srom_fem srom_fem5g;
u8 tx_power_max;
u8 tx_power_max_rate_ind;
@@ -725,7 +722,8 @@ struct phy_info {
u16 mintxbias;
u16 mintxmag;
- lo_complex_abgphy_info_t gphy_locomp_iq[STATIC_NUM_RF][STATIC_NUM_BB];
+ struct lo_complex_abgphy_info gphy_locomp_iq
+ [STATIC_NUM_RF][STATIC_NUM_BB];
s8 stats_11b_txpower[STATIC_NUM_RF][STATIC_NUM_BB];
u16 gain_table[TX_GAIN_TABLE_LENGTH];
bool loopback_gain;
@@ -783,8 +781,8 @@ struct phy_info {
u32 nphy_bb_mult_save;
u16 nphy_txiqlocal_bestc[11];
bool nphy_txiqlocal_coeffsvalid;
- phy_txpwrindex_t nphy_txpwrindex[PHY_CORE_NUM_2];
- phy_pwrctrl_t nphy_pwrctrl_info[PHY_CORE_NUM_2];
+ struct nphy_txpwrindex nphy_txpwrindex[PHY_CORE_NUM_2];
+ struct nphy_pwrctrl nphy_pwrctrl_info[PHY_CORE_NUM_2];
u16 cck2gpo;
u32 ofdm2gpo;
u32 ofdm5gpo;
@@ -852,8 +850,8 @@ struct phy_info {
bool internal_tx_iqlo_cal_tapoff_intpa_nphy;
s16 nphy_lastcal_temp;
- txiqcal_cache_t calibration_cache;
- rssical_cache_t rssical_cache;
+ struct txiqcal_cache calibration_cache;
+ struct rssical_cache rssical_cache;
u8 nphy_txpwr_idx[2];
u8 nphy_papd_cal_type;
@@ -884,7 +882,7 @@ struct phy_info {
u8 nphy_txcal_pwr_idx[2];
u8 nphy_rxcal_pwr_idx[2];
u16 nphy_cal_orig_tx_gain[2];
- nphy_txgains_t nphy_cal_target_gain;
+ struct nphy_txgains nphy_cal_target_gain;
u16 nphy_txcal_bbmult;
u16 nphy_gmval;
@@ -895,7 +893,7 @@ struct phy_info {
bool nphy_aband_spurwar_en;
u16 nphy_rccal_value;
u16 nphy_crsminpwr[3];
- phy_noisevar_buf_t nphy_saved_noisevars;
+ struct nphy_noisevar_buf nphy_saved_noisevars;
bool nphy_anarxlpf_adjusted;
bool nphy_crsminpwr_adjusted;
bool nphy_noisevars_adjusted;
@@ -939,141 +937,145 @@ struct phy_info {
struct wiphy *wiphy;
};
-typedef s32 fixed;
-
-typedef struct _cs32 {
+struct _cs32 {
fixed q;
fixed i;
-} cs32;
+};
-typedef struct radio_regs {
+struct radio_regs {
u16 address;
u32 init_a;
u32 init_g;
u8 do_init_a;
u8 do_init_g;
-} radio_regs_t;
+};
-typedef struct radio_20xx_regs {
+struct radio_20xx_regs {
u16 address;
u8 init;
u8 do_init;
-} radio_20xx_regs_t;
+};
-typedef struct lcnphy_radio_regs {
+struct lcnphy_radio_regs {
u16 address;
u8 init_a;
u8 init_g;
u8 do_init_a;
u8 do_init_g;
-} lcnphy_radio_regs_t;
-
-extern lcnphy_radio_regs_t lcnphy_radio_regs_2064[];
-extern lcnphy_radio_regs_t lcnphy_radio_regs_2066[];
-extern radio_regs_t regs_2055[], regs_SYN_2056[], regs_TX_2056[],
- regs_RX_2056[];
-extern radio_regs_t regs_SYN_2056_A1[], regs_TX_2056_A1[], regs_RX_2056_A1[];
-extern radio_regs_t regs_SYN_2056_rev5[], regs_TX_2056_rev5[],
- regs_RX_2056_rev5[];
-extern radio_regs_t regs_SYN_2056_rev6[], regs_TX_2056_rev6[],
- regs_RX_2056_rev6[];
-extern radio_regs_t regs_SYN_2056_rev7[], regs_TX_2056_rev7[],
- regs_RX_2056_rev7[];
-extern radio_regs_t regs_SYN_2056_rev8[], regs_TX_2056_rev8[],
- regs_RX_2056_rev8[];
-extern radio_20xx_regs_t regs_2057_rev4[], regs_2057_rev5[], regs_2057_rev5v1[];
-extern radio_20xx_regs_t regs_2057_rev7[], regs_2057_rev8[];
-
-extern char *phy_getvar(phy_info_t *pi, const char *name);
-extern int phy_getintvar(phy_info_t *pi, const char *name);
+};
+
+extern struct lcnphy_radio_regs lcnphy_radio_regs_2064[];
+extern struct lcnphy_radio_regs lcnphy_radio_regs_2066[];
+extern struct radio_regs regs_2055[], regs_SYN_2056[], regs_TX_2056[],
+ regs_RX_2056[];
+extern struct radio_regs regs_SYN_2056_A1[], regs_TX_2056_A1[],
+ regs_RX_2056_A1[];
+extern struct radio_regs regs_SYN_2056_rev5[], regs_TX_2056_rev5[],
+ regs_RX_2056_rev5[];
+extern struct radio_regs regs_SYN_2056_rev6[], regs_TX_2056_rev6[],
+ regs_RX_2056_rev6[];
+extern struct radio_regs regs_SYN_2056_rev7[], regs_TX_2056_rev7[],
+ regs_RX_2056_rev7[];
+extern struct radio_regs regs_SYN_2056_rev8[], regs_TX_2056_rev8[],
+ regs_RX_2056_rev8[];
+extern struct radio_20xx_regs regs_2057_rev4[], regs_2057_rev5[],
+ regs_2057_rev5v1[];
+extern struct radio_20xx_regs regs_2057_rev7[], regs_2057_rev8[];
+
+extern char *phy_getvar(struct brcms_phy *pi, const char *name);
+extern int phy_getintvar(struct brcms_phy *pi, const char *name);
#define PHY_GETVAR(pi, name) phy_getvar(pi, name)
#define PHY_GETINTVAR(pi, name) phy_getintvar(pi, name)
-extern u16 read_phy_reg(phy_info_t *pi, u16 addr);
-extern void write_phy_reg(phy_info_t *pi, u16 addr, u16 val);
-extern void and_phy_reg(phy_info_t *pi, u16 addr, u16 val);
-extern void or_phy_reg(phy_info_t *pi, u16 addr, u16 val);
-extern void mod_phy_reg(phy_info_t *pi, u16 addr, u16 mask, u16 val);
+extern u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
+extern void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+extern void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+extern void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+extern void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
-extern u16 read_radio_reg(phy_info_t *pi, u16 addr);
-extern void or_radio_reg(phy_info_t *pi, u16 addr, u16 val);
-extern void and_radio_reg(phy_info_t *pi, u16 addr, u16 val);
-extern void mod_radio_reg(phy_info_t *pi, u16 addr, u16 mask,
+extern u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
+extern void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+extern void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+extern void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask,
u16 val);
-extern void xor_radio_reg(phy_info_t *pi, u16 addr, u16 mask);
+extern void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
-extern void write_radio_reg(phy_info_t *pi, u16 addr, u16 val);
+extern void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void wlc_phyreg_enter(wlc_phy_t *pih);
-extern void wlc_phyreg_exit(wlc_phy_t *pih);
-extern void wlc_radioreg_enter(wlc_phy_t *pih);
-extern void wlc_radioreg_exit(wlc_phy_t *pih);
+extern void wlc_phyreg_enter(struct brcms_phy_pub *pih);
+extern void wlc_phyreg_exit(struct brcms_phy_pub *pih);
+extern void wlc_radioreg_enter(struct brcms_phy_pub *pih);
+extern void wlc_radioreg_exit(struct brcms_phy_pub *pih);
-extern void wlc_phy_read_table(phy_info_t *pi, const phytbl_info_t *ptbl_info,
+extern void wlc_phy_read_table(struct brcms_phy *pi,
+ const struct phytbl_info *ptbl_info,
u16 tblAddr, u16 tblDataHi,
u16 tblDatalo);
-extern void wlc_phy_write_table(phy_info_t *pi,
- const phytbl_info_t *ptbl_info, u16 tblAddr,
- u16 tblDataHi, u16 tblDatalo);
-extern void wlc_phy_table_addr(phy_info_t *pi, uint tbl_id, uint tbl_offset,
- u16 tblAddr, u16 tblDataHi,
+extern void wlc_phy_write_table(struct brcms_phy *pi,
+ const struct phytbl_info *ptbl_info,
+ u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+extern void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id,
+ uint tbl_offset, u16 tblAddr, u16 tblDataHi,
u16 tblDataLo);
-extern void wlc_phy_table_data_write(phy_info_t *pi, uint width, u32 val);
+extern void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
-extern void write_phy_channel_reg(phy_info_t *pi, uint val);
-extern void wlc_phy_txpower_update_shm(phy_info_t *pi);
+extern void write_phy_channel_reg(struct brcms_phy *pi, uint val);
+extern void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
extern void wlc_phy_cordic(fixed theta, cs32 *val);
extern u8 wlc_phy_nbits(s32 value);
extern void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
-extern uint wlc_phy_init_radio_regs_allbands(phy_info_t *pi,
- radio_20xx_regs_t *radioregs);
-extern uint wlc_phy_init_radio_regs(phy_info_t *pi, radio_regs_t *radioregs,
+extern uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
+ struct radio_20xx_regs *radioregs);
+extern uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
+ struct radio_regs *radioregs,
u16 core_offset);
-extern void wlc_phy_txpower_ipa_upd(phy_info_t *pi);
+extern void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
-extern void wlc_phy_do_dummy_tx(phy_info_t *pi, bool ofdm, bool pa_on);
+extern void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
extern void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real,
s32 *eps_imag);
-extern void wlc_phy_cal_perical_mphase_reset(phy_info_t *pi);
-extern void wlc_phy_cal_perical_mphase_restart(phy_info_t *pi);
+extern void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
+extern void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
-extern bool wlc_phy_attach_nphy(phy_info_t *pi);
-extern bool wlc_phy_attach_lcnphy(phy_info_t *pi);
+extern bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+extern bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
-extern void wlc_phy_detach_lcnphy(phy_info_t *pi);
+extern void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
-extern void wlc_phy_init_nphy(phy_info_t *pi);
-extern void wlc_phy_init_lcnphy(phy_info_t *pi);
+extern void wlc_phy_init_nphy(struct brcms_phy *pi);
+extern void wlc_phy_init_lcnphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_init_nphy(phy_info_t *pi);
-extern void wlc_phy_cal_init_lcnphy(phy_info_t *pi);
+extern void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
+extern void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
-extern void wlc_phy_chanspec_set_nphy(phy_info_t *pi, chanspec_t chanspec);
-extern void wlc_phy_chanspec_set_lcnphy(phy_info_t *pi, chanspec_t chanspec);
-extern void wlc_phy_chanspec_set_fixup_lcnphy(phy_info_t *pi,
+extern void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi,
+ chanspec_t chanspec);
+extern void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi,
+ chanspec_t chanspec);
+extern void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi,
chanspec_t chanspec);
extern int wlc_phy_channel2freq(uint channel);
extern int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
-extern int wlc_phy_chanspec_bandrange_get(phy_info_t *, chanspec_t);
+extern int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, chanspec_t);
-extern void wlc_lcnphy_set_tx_pwr_ctrl(phy_info_t *pi, u16 mode);
-extern s8 wlc_lcnphy_get_current_tx_pwr_idx(phy_info_t *pi);
+extern void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
+extern s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
-extern void wlc_phy_txpower_recalc_target_nphy(phy_info_t *pi);
-extern void wlc_lcnphy_txpower_recalc_target(phy_info_t *pi);
-extern void wlc_phy_txpower_recalc_target_lcnphy(phy_info_t *pi);
+extern void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
+extern void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
+extern void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_set_tx_pwr_by_index(phy_info_t *pi, int index);
-extern void wlc_lcnphy_tx_pu(phy_info_t *pi, bool bEnable);
-extern void wlc_lcnphy_stop_tx_tone(phy_info_t *pi);
-extern void wlc_lcnphy_start_tx_tone(phy_info_t *pi, s32 f_kHz,
+extern void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
+extern void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
+extern void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
+extern void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz,
u16 max_val, bool iqcalmode);
-extern void wlc_phy_txpower_sromlimit_get_nphy(phy_info_t *pi, uint chan,
+extern void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
u8 *max_pwr, u8 rate_id);
extern void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
u8 rate_mcs_end,
@@ -1083,21 +1085,21 @@ extern void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power,
u8 rate_ofdm_end,
u8 rate_mcs_start);
-extern u16 wlc_lcnphy_tempsense(phy_info_t *pi, bool mode);
-extern s16 wlc_lcnphy_tempsense_new(phy_info_t *pi, bool mode);
-extern s8 wlc_lcnphy_tempsense_degree(phy_info_t *pi, bool mode);
-extern s8 wlc_lcnphy_vbatsense(phy_info_t *pi, bool mode);
-extern void wlc_phy_carrier_suppress_lcnphy(phy_info_t *pi);
-extern void wlc_lcnphy_crsuprs(phy_info_t *pi, int channel);
-extern void wlc_lcnphy_epa_switch(phy_info_t *pi, bool mode);
-extern void wlc_2064_vco_cal(phy_info_t *pi);
+extern u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
+extern s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
+extern s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
+extern s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
+extern void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
+extern void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
+extern void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
+extern void wlc_2064_vco_cal(struct brcms_phy *pi);
-extern void wlc_phy_txpower_recalc_target(phy_info_t *pi);
+extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
#define LCNPHY_TBL_ID_PAPDCOMPDELTATBL 0x18
#define LCNPHY_TX_POWER_TABLE_SIZE 128
#define LCNPHY_MAX_TX_POWER_INDEX (LCNPHY_TX_POWER_TABLE_SIZE - 1)
-#define LCNPHY_TBL_ID_TXPWRCTL 0x07
+#define LCNPHY_TBL_ID_TXPWRCTL 0x07
#define LCNPHY_TX_PWR_CTRL_OFF 0
#define LCNPHY_TX_PWR_CTRL_SW (0x1 << 15)
#define LCNPHY_TX_PWR_CTRL_HW ((0x1 << 15) | \
@@ -1106,36 +1108,39 @@ extern void wlc_phy_txpower_recalc_target(phy_info_t *pi);
#define LCNPHY_TX_PWR_CTRL_TEMPBASED 0xE001
-extern void wlc_lcnphy_write_table(phy_info_t *pi, const phytbl_info_t *pti);
-extern void wlc_lcnphy_read_table(phy_info_t *pi, phytbl_info_t *pti);
-extern void wlc_lcnphy_set_tx_iqcc(phy_info_t *pi, u16 a, u16 b);
-extern void wlc_lcnphy_set_tx_locc(phy_info_t *pi, u16 didq);
-extern void wlc_lcnphy_get_tx_iqcc(phy_info_t *pi, u16 *a, u16 *b);
-extern u16 wlc_lcnphy_get_tx_locc(phy_info_t *pi);
-extern void wlc_lcnphy_get_radio_loft(phy_info_t *pi, u8 *ei0,
+extern void wlc_lcnphy_write_table(struct brcms_phy *pi,
+ const struct phytbl_info *pti);
+extern void wlc_lcnphy_read_table(struct brcms_phy *pi,
+ struct phytbl_info *pti);
+extern void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
+extern void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
+extern void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
+extern u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
+extern void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0,
u8 *eq0, u8 *fi0, u8 *fq0);
-extern void wlc_lcnphy_calib_modes(phy_info_t *pi, uint mode);
-extern void wlc_lcnphy_deaf_mode(phy_info_t *pi, bool mode);
-extern bool wlc_phy_tpc_isenabled_lcnphy(phy_info_t *pi);
-extern void wlc_lcnphy_tx_pwr_update_npt(phy_info_t *pi);
+extern void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
+extern void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
+extern bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
+extern void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
extern s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
-extern void wlc_lcnphy_get_tssi(phy_info_t *pi, s8 *ofdm_pwr,
+extern void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr,
s8 *cck_pwr);
-extern void wlc_lcnphy_tx_power_adjustment(wlc_phy_t *ppi);
+extern void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
-extern s32 wlc_lcnphy_rx_signal_power(phy_info_t *pi, s32 gain_index);
+extern s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
#define NPHY_MAX_HPVGA1_INDEX 10
#define NPHY_DEF_HPVGA1_INDEXLIMIT 7
-typedef struct _phy_iq_est {
+struct phy_iq_est {
s32 iq_prod;
u32 i_pwr;
u32 q_pwr;
-} phy_iq_est_t;
+};
-extern void wlc_phy_stay_in_carriersearch_nphy(phy_info_t *pi, bool enable);
-extern void wlc_nphy_deaf_mode(phy_info_t *pi, bool mode);
+extern void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi,
+ bool enable);
+extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
#define wlc_phy_write_table_nphy(pi, pti) wlc_phy_write_table(pi, pti, 0x72, \
0x74, 0x73)
@@ -1145,82 +1150,86 @@ extern void wlc_nphy_deaf_mode(phy_info_t *pi, bool mode);
0x72, 0x74, 0x73)
#define wlc_nphy_table_data_write(pi, w, v) wlc_phy_table_data_write((pi), (w), (v))
-extern void wlc_phy_table_read_nphy(phy_info_t *pi, u32, u32 l, u32 o,
+extern void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o,
u32 w, void *d);
-extern void wlc_phy_table_write_nphy(phy_info_t *pi, u32, u32, u32,
+extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
u32, const void *);
#define PHY_IPA(pi) \
((pi->ipa2g_on && CHSPEC_IS2G(pi->radio_chanspec)) || \
(pi->ipa5g_on && CHSPEC_IS5G(pi->radio_chanspec)))
-#define WLC_PHY_WAR_PR51571(pi) \
+#define BRCMS_PHY_WAR_PR51571(pi) \
if (((pi)->sh->bustype == PCI_BUS) && NREV_LT((pi)->pubpi.phy_rev, 3)) \
(void)R_REG(&(pi)->regs->maccontrol)
-extern void wlc_phy_cal_perical_nphy_run(phy_info_t *pi, u8 caltype);
-extern void wlc_phy_aci_reset_nphy(phy_info_t *pi);
-extern void wlc_phy_pa_override_nphy(phy_info_t *pi, bool en);
+extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
+extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
+extern void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
-extern u8 wlc_phy_get_chan_freq_range_nphy(phy_info_t *pi, uint chan);
-extern void wlc_phy_switch_radio_nphy(phy_info_t *pi, bool on);
+extern u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
+extern void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
-extern void wlc_phy_stf_chain_upd_nphy(phy_info_t *pi);
+extern void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
-extern void wlc_phy_force_rfseq_nphy(phy_info_t *pi, u8 cmd);
-extern s16 wlc_phy_tempsense_nphy(phy_info_t *pi);
+extern void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
+extern s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
-extern u16 wlc_phy_classifier_nphy(phy_info_t *pi, u16 mask, u16 val);
+extern u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
-extern void wlc_phy_rx_iq_est_nphy(phy_info_t *pi, phy_iq_est_t *est,
+extern void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
u16 num_samps, u8 wait_time,
u8 wait_for_crs);
-extern void wlc_phy_rx_iq_coeffs_nphy(phy_info_t *pi, u8 write,
- nphy_iq_comp_t *comp);
-extern void wlc_phy_aci_and_noise_reduction_nphy(phy_info_t *pi);
+extern void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
+ struct nphy_iq_comp *comp);
+extern void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
-extern void wlc_phy_rxcore_setstate_nphy(wlc_phy_t *pih, u8 rxcore_bitmask);
-extern u8 wlc_phy_rxcore_getstate_nphy(wlc_phy_t *pih);
+extern void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih,
+ u8 rxcore_bitmask);
+extern u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
-extern void wlc_phy_txpwrctrl_enable_nphy(phy_info_t *pi, u8 ctrl_type);
-extern void wlc_phy_txpwr_fixpower_nphy(phy_info_t *pi);
-extern void wlc_phy_txpwr_apply_nphy(phy_info_t *pi);
-extern void wlc_phy_txpwr_papd_cal_nphy(phy_info_t *pi);
-extern u16 wlc_phy_txpwr_idx_get_nphy(phy_info_t *pi);
+extern void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
+extern void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
+extern void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
+extern void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
+extern u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
-extern nphy_txgains_t wlc_phy_get_tx_gain_nphy(phy_info_t *pi);
-extern int wlc_phy_cal_txiqlo_nphy(phy_info_t *pi, nphy_txgains_t target_gain,
+extern struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
+extern int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
+ struct nphy_txgains target_gain,
bool full, bool m);
-extern int wlc_phy_cal_rxiq_nphy(phy_info_t *pi, nphy_txgains_t target_gain,
+extern int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi,
+ struct nphy_txgains target_gain,
u8 type, bool d);
-extern void wlc_phy_txpwr_index_nphy(phy_info_t *pi, u8 core_mask,
+extern void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
s8 txpwrindex, bool res);
-extern void wlc_phy_rssisel_nphy(phy_info_t *pi, u8 core, u8 rssi_type);
-extern int wlc_phy_poll_rssi_nphy(phy_info_t *pi, u8 rssi_type,
+extern void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
+extern int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
s32 *rssi_buf, u8 nsamps);
-extern void wlc_phy_rssi_cal_nphy(phy_info_t *pi);
-extern int wlc_phy_aci_scan_nphy(phy_info_t *pi);
-extern void wlc_phy_cal_txgainctrl_nphy(phy_info_t *pi, s32 dBm_targetpower,
- bool debug);
-extern int wlc_phy_tx_tone_nphy(phy_info_t *pi, u32 f_kHz, u16 max_val,
+extern void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
+extern int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
+extern void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi,
+ s32 dBm_targetpower, bool debug);
+extern int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
u8 mode, u8, bool);
-extern void wlc_phy_stopplayback_nphy(phy_info_t *pi);
-extern void wlc_phy_est_tonepwr_nphy(phy_info_t *pi, s32 *qdBm_pwrbuf,
+extern void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
+extern void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
u8 num_samps);
-extern void wlc_phy_radio205x_vcocal_nphy(phy_info_t *pi);
+extern void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
-extern int wlc_phy_rssi_compute_nphy(phy_info_t *pi, wlc_d11rxhdr_t *wlc_rxh);
+extern int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi,
+ struct brcms_d11rxhdr *wlc_rxh);
#define NPHY_TESTPATTERN_BPHY_EVM 0
#define NPHY_TESTPATTERN_BPHY_RFCS 1
-extern void wlc_phy_nphy_tkip_rifs_war(phy_info_t *pi, u8 rifs);
+extern void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
-void wlc_phy_get_pwrdet_offsets(phy_info_t *pi, s8 *cckoffset,
+void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset,
s8 *ofdmoffset);
-extern s8 wlc_phy_upd_rssi_offset(phy_info_t *pi, s8 rssi,
+extern s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi,
chanspec_t chanspec);
-extern bool wlc_phy_n_txpower_ipa_ison(phy_info_t *pih);
-#endif /* _wlc_phy_int_h_ */
+extern bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
+#endif /* _BRCM_PHY_INT_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_lcn.c b/drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.c
index b8864c5b7a1..6a3fbe67302 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_lcn.c
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -14,28 +14,20 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/bitops.h>
#include <linux/delay.h>
-#include <wlc_cfg.h>
-#include <linux/pci.h>
-#include <aiutils.h>
-#include <wlc_pmu.h>
-#include <bcmnvram.h>
-#include <bcmdevs.h>
-#include <sbhnddma.h>
-
-#include "wlc_phy_radio.h"
-#include "wlc_phy_int.h"
-#include "wlc_phy_qmath.h"
-#include "wlc_phy_lcn.h"
-#include "wlc_phytbl_lcn.h"
+#include <pmu.h>
+#include <d11.h>
+#include <phy_shim.h>
+#include "phy_qmath.h"
+#include "phy_hal.h"
+#include "phy_radio.h"
+#include "phytbl_lcn.h"
+#include "phy_lcn.h"
#define PLL_2064_NDIV 90
-#define PLL_2064_LOW_END_VCO 3000
-#define PLL_2064_LOW_END_KVCO 27
+#define PLL_2064_LOW_END_VCO 3000
+#define PLL_2064_LOW_END_KVCO 27
#define PLL_2064_HIGH_END_VCO 4200
#define PLL_2064_HIGH_END_KVCO 68
#define PLL_2064_LOOP_BW_DOUBLER 200
@@ -46,7 +38,7 @@
#define PLL_2064_MHZ 1000000
#define PLL_2064_OPEN_LOOP_DELAY 5
-#define TEMPSENSE 1
+#define TEMPSENSE 1
#define VBATSENSE 2
#define NOISE_IF_UPD_CHK_INTERVAL 1
@@ -58,10 +50,10 @@
#define NOISE_IF_CHK 1
#define NOISE_IF_ON 2
-#define PAPD_BLANKING_PROFILE 3
+#define PAPD_BLANKING_PROFILE 3
#define PAPD2LUT 0
-#define PAPD_CORR_NORM 0
-#define PAPD_BLANKING_THRESHOLD 0
+#define PAPD_CORR_NORM 0
+#define PAPD_BLANKING_THRESHOLD 0
#define PAPD_STOP_AFTER_LAST_UPDATE 0
#define LCN_TARGET_PWR 60
@@ -116,9 +108,9 @@
#define LCNPHY_TBL_ID_SAMPLEPLAY 0x15
#define LCNPHY_TBL_ID_SAMPLEPLAY1 0x16
-#define LCNPHY_TX_PWR_CTRL_RATE_OFFSET 832
-#define LCNPHY_TX_PWR_CTRL_MAC_OFFSET 128
-#define LCNPHY_TX_PWR_CTRL_GAIN_OFFSET 192
+#define LCNPHY_TX_PWR_CTRL_RATE_OFFSET 832
+#define LCNPHY_TX_PWR_CTRL_MAC_OFFSET 128
+#define LCNPHY_TX_PWR_CTRL_GAIN_OFFSET 192
#define LCNPHY_TX_PWR_CTRL_IQ_OFFSET 320
#define LCNPHY_TX_PWR_CTRL_LO_OFFSET 448
#define LCNPHY_TX_PWR_CTRL_PWR_OFFSET 576
@@ -144,60 +136,55 @@
(0 != (read_phy_reg((pi), 0x43b) & (0x1 << 6)))
#define wlc_lcnphy_total_tx_frames(pi) \
- wlapi_bmac_read_shm((pi)->sh->physhim, M_UCODE_MACSTAT + offsetof(macstat_t, txallfrm))
+ wlapi_bmac_read_shm((pi)->sh->physhim, \
+ M_UCODE_MACSTAT + offsetof(struct macstat, txallfrm))
-typedef struct {
+struct lcnphy_txgains {
u16 gm_gain;
u16 pga_gain;
u16 pad_gain;
u16 dac_gain;
-} lcnphy_txgains_t;
+};
-typedef enum {
+enum lcnphy_cal_mode {
LCNPHY_CAL_FULL,
LCNPHY_CAL_RECAL,
LCNPHY_CAL_CURRECAL,
LCNPHY_CAL_DIGCAL,
LCNPHY_CAL_GCTRL
-} lcnphy_cal_mode_t;
-
-typedef struct {
- lcnphy_txgains_t gains;
- bool useindex;
- u8 index;
-} lcnphy_txcalgains_t;
+};
-typedef struct {
+struct lcnphy_rx_iqcomp {
u8 chan;
s16 a;
s16 b;
-} lcnphy_rx_iqcomp_t;
+};
-typedef struct {
+struct lcnphy_spb_tone {
s16 re;
s16 im;
-} lcnphy_spb_tone_t;
+};
-typedef struct {
+struct lcnphy_unsign16_struct {
u16 re;
u16 im;
-} lcnphy_unsign16_struct;
+};
-typedef struct {
+struct lcnphy_iq_est {
u32 iq_prod;
u32 i_pwr;
u32 q_pwr;
-} lcnphy_iq_est_t;
+};
-typedef struct {
+struct lcnphy_sfo_cfg {
u16 ptcentreTs20;
u16 ptcentreFactor;
-} lcnphy_sfo_cfg_t;
+};
-typedef enum {
+enum lcnphy_papd_cal_type {
LCNPHY_PAPD_CAL_CW,
LCNPHY_PAPD_CAL_OFDM
-} lcnphy_papd_cal_type_t;
+};
typedef u16 iqcal_gain_params_lcnphy[9];
@@ -214,7 +201,7 @@ static const u16 iqcal_gainparams_numgains_lcnphy[1] = {
sizeof(*tbl_iqcal_gainparams_lcnphy_2G),
};
-static const lcnphy_sfo_cfg_t lcnphy_sfo_cfg[] = {
+static const struct lcnphy_sfo_cfg lcnphy_sfo_cfg[] = {
{965, 1087},
{967, 1085},
{969, 1082},
@@ -280,7 +267,7 @@ u16 lcnphy_iqcal_ir_gainladder[] = {
};
static const
-lcnphy_spb_tone_t lcnphy_spb_tone_3750[] = {
+struct lcnphy_spb_tone lcnphy_spb_tone_3750[] = {
{88, 0},
{73, 49},
{34, 81},
@@ -373,7 +360,7 @@ u16 rxiq_cal_rf_reg[11] = {
};
static const
-lcnphy_rx_iqcomp_t lcnphy_rx_iqcomp_table_rev0[] = {
+struct lcnphy_rx_iqcomp lcnphy_rx_iqcomp_table_rev0[] = {
{1, 0, 0},
{2, 0, 0},
{3, 0, 0},
@@ -548,13 +535,7 @@ static const s8 lcnphy_gain_index_offset_for_rssi[] = {
-2
};
-extern const u8 spur_tbl_rev0[];
-extern const u32 dot11lcnphytbl_rx_gain_info_sz_rev1;
-extern const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_rev1[];
-extern const dot11lcnphytbl_info_t dot11lcn_sw_ctrl_tbl_info_4313_bt_epa;
-extern const dot11lcnphytbl_info_t dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250;
-
-typedef struct _chan_info_2064_lcnphy {
+struct chan_info_2064_lcnphy {
uint chan;
uint freq;
u8 logen_buftune;
@@ -565,9 +546,9 @@ typedef struct _chan_info_2064_lcnphy {
u8 pa_rxrf_lna1_freq_tune;
u8 pa_rxrf_lna2_freq_tune;
u8 rxrf_rxrf_spare1;
-} chan_info_2064_lcnphy_t;
+};
-static chan_info_2064_lcnphy_t chan_info_2064_lcnphy[] = {
+static struct chan_info_2064_lcnphy chan_info_2064_lcnphy[] = {
{1, 2412, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{2, 2417, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{3, 2422, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
@@ -584,7 +565,7 @@ static chan_info_2064_lcnphy_t chan_info_2064_lcnphy[] = {
{14, 2484, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
};
-lcnphy_radio_regs_t lcnphy_radio_regs_2064[] = {
+struct lcnphy_radio_regs lcnphy_radio_regs_2064[] = {
{0x00, 0, 0, 0, 0},
{0x01, 0x64, 0x64, 0, 0},
{0x02, 0x20, 0x20, 0, 0},
@@ -896,9 +877,8 @@ lcnphy_radio_regs_t lcnphy_radio_regs_2064[] = {
#define LCNPHY_NUM_DIG_FILT_COEFFS 16
#define LCNPHY_NUM_TX_DIG_FILTERS_CCK 13
-u16
- LCNPHY_txdigfiltcoeffs_cck[LCNPHY_NUM_TX_DIG_FILTERS_CCK]
- [LCNPHY_NUM_DIG_FILT_COEFFS + 1] = {
+u16 LCNPHY_txdigfiltcoeffs_cck[LCNPHY_NUM_TX_DIG_FILTERS_CCK]
+ [LCNPHY_NUM_DIG_FILT_COEFFS + 1] = {
{0, 1, 415, 1874, 64, 128, 64, 792, 1656, 64, 128, 64, 778, 1582, 64,
128, 64,},
{1, 1, 402, 1847, 259, 59, 259, 671, 1794, 68, 54, 68, 608, 1863, 93,
@@ -928,9 +908,8 @@ u16
};
#define LCNPHY_NUM_TX_DIG_FILTERS_OFDM 3
-u16
- LCNPHY_txdigfiltcoeffs_ofdm[LCNPHY_NUM_TX_DIG_FILTERS_OFDM]
- [LCNPHY_NUM_DIG_FILT_COEFFS + 1] = {
+u16 LCNPHY_txdigfiltcoeffs_ofdm[LCNPHY_NUM_TX_DIG_FILTERS_OFDM]
+ [LCNPHY_NUM_DIG_FILT_COEFFS + 1] = {
{0, 0, 0xa2, 0x0, 0x100, 0x100, 0x0, 0x0, 0x0, 0x100, 0x0, 0x0,
0x278, 0xfea0, 0x80, 0x100, 0x80,},
{1, 0, 374, 0xFF79, 16, 32, 16, 799, 0xFE74, 50, 32, 50,
@@ -982,78 +961,83 @@ u16
static u32 wlc_lcnphy_qdiv_roundup(u32 divident, u32 divisor,
u8 precision);
-static void wlc_lcnphy_set_rx_gain_by_distribution(phy_info_t *pi,
+static void wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
u16 ext_lna, u16 trsw,
u16 biq2, u16 biq1,
u16 tia, u16 lna2,
u16 lna1);
-static void wlc_lcnphy_clear_tx_power_offsets(phy_info_t *pi);
-static void wlc_lcnphy_set_pa_gain(phy_info_t *pi, u16 gain);
-static void wlc_lcnphy_set_trsw_override(phy_info_t *pi, bool tx, bool rx);
-static void wlc_lcnphy_set_bbmult(phy_info_t *pi, u8 m0);
-static u8 wlc_lcnphy_get_bbmult(phy_info_t *pi);
-static void wlc_lcnphy_get_tx_gain(phy_info_t *pi, lcnphy_txgains_t *gains);
-static void wlc_lcnphy_set_tx_gain_override(phy_info_t *pi, bool bEnable);
-static void wlc_lcnphy_toggle_afe_pwdn(phy_info_t *pi);
-static void wlc_lcnphy_rx_gain_override_enable(phy_info_t *pi, bool enable);
-static void wlc_lcnphy_set_tx_gain(phy_info_t *pi,
- lcnphy_txgains_t *target_gains);
-static bool wlc_lcnphy_rx_iq_est(phy_info_t *pi, u16 num_samps,
- u8 wait_time, lcnphy_iq_est_t *iq_est);
-static bool wlc_lcnphy_calc_rx_iq_comp(phy_info_t *pi, u16 num_samps);
-static u16 wlc_lcnphy_get_pa_gain(phy_info_t *pi);
-static void wlc_lcnphy_afe_clk_init(phy_info_t *pi, u8 mode);
-extern void wlc_lcnphy_tx_pwr_ctrl_init(wlc_phy_t *ppi);
-static void wlc_lcnphy_radio_2064_channel_tune_4313(phy_info_t *pi,
+static void wlc_lcnphy_clear_tx_power_offsets(struct brcms_phy *pi);
+static void wlc_lcnphy_set_pa_gain(struct brcms_phy *pi, u16 gain);
+static void wlc_lcnphy_set_trsw_override(struct brcms_phy *pi, bool tx,
+ bool rx);
+static void wlc_lcnphy_set_bbmult(struct brcms_phy *pi, u8 m0);
+static u8 wlc_lcnphy_get_bbmult(struct brcms_phy *pi);
+static void wlc_lcnphy_get_tx_gain(struct brcms_phy *pi,
+ struct lcnphy_txgains *gains);
+static void wlc_lcnphy_set_tx_gain_override(struct brcms_phy *pi, bool bEnable);
+static void wlc_lcnphy_toggle_afe_pwdn(struct brcms_phy *pi);
+static void wlc_lcnphy_rx_gain_override_enable(struct brcms_phy *pi,
+ bool enable);
+static void wlc_lcnphy_set_tx_gain(struct brcms_phy *pi,
+ struct lcnphy_txgains *target_gains);
+static bool wlc_lcnphy_rx_iq_est(struct brcms_phy *pi, u16 num_samps,
+ u8 wait_time, struct lcnphy_iq_est *iq_est);
+static bool wlc_lcnphy_calc_rx_iq_comp(struct brcms_phy *pi, u16 num_samps);
+static u16 wlc_lcnphy_get_pa_gain(struct brcms_phy *pi);
+static void wlc_lcnphy_afe_clk_init(struct brcms_phy *pi, u8 mode);
+static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi);
+static void wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi,
u8 channel);
-static void wlc_lcnphy_load_tx_gain_table(phy_info_t *pi,
- const lcnphy_tx_gain_tbl_entry *g);
+static void wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
+ const struct lcnphy_tx_gain_tbl_entry *g);
-static void wlc_lcnphy_samp_cap(phy_info_t *pi, int clip_detect_algo,
+static void wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo,
u16 thresh, s16 *ptr, int mode);
static int wlc_lcnphy_calc_floor(s16 coeff, int type);
-static void wlc_lcnphy_tx_iqlo_loopback(phy_info_t *pi,
+static void wlc_lcnphy_tx_iqlo_loopback(struct brcms_phy *pi,
u16 *values_to_save);
-static void wlc_lcnphy_tx_iqlo_loopback_cleanup(phy_info_t *pi,
+static void wlc_lcnphy_tx_iqlo_loopback_cleanup(struct brcms_phy *pi,
u16 *values_to_save);
-static void wlc_lcnphy_set_cc(phy_info_t *pi, int cal_type, s16 coeff_x,
+static void wlc_lcnphy_set_cc(struct brcms_phy *pi, int cal_type, s16 coeff_x,
s16 coeff_y);
-static lcnphy_unsign16_struct wlc_lcnphy_get_cc(phy_info_t *pi, int cal_type);
-static void wlc_lcnphy_a1(phy_info_t *pi, int cal_type,
+static struct lcnphy_unsign16_struct wlc_lcnphy_get_cc(struct brcms_phy *pi,
+ int cal_type);
+static void wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type,
int num_levels, int step_size_lg2);
-static void wlc_lcnphy_tx_iqlo_soft_cal_full(phy_info_t *pi);
+static void wlc_lcnphy_tx_iqlo_soft_cal_full(struct brcms_phy *pi);
-static void wlc_lcnphy_set_chanspec_tweaks(phy_info_t *pi,
+static void wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi,
chanspec_t chanspec);
-static void wlc_lcnphy_agc_temp_init(phy_info_t *pi);
-static void wlc_lcnphy_temp_adj(phy_info_t *pi);
-static void wlc_lcnphy_clear_papd_comptable(phy_info_t *pi);
-static void wlc_lcnphy_baseband_init(phy_info_t *pi);
-static void wlc_lcnphy_radio_init(phy_info_t *pi);
-static void wlc_lcnphy_rc_cal(phy_info_t *pi);
-static void wlc_lcnphy_rcal(phy_info_t *pi);
-static void wlc_lcnphy_txrx_spur_avoidance_mode(phy_info_t *pi, bool enable);
-static int wlc_lcnphy_load_tx_iir_filter(phy_info_t *pi, bool is_ofdm,
+static void wlc_lcnphy_agc_temp_init(struct brcms_phy *pi);
+static void wlc_lcnphy_temp_adj(struct brcms_phy *pi);
+static void wlc_lcnphy_clear_papd_comptable(struct brcms_phy *pi);
+static void wlc_lcnphy_baseband_init(struct brcms_phy *pi);
+static void wlc_lcnphy_radio_init(struct brcms_phy *pi);
+static void wlc_lcnphy_rc_cal(struct brcms_phy *pi);
+static void wlc_lcnphy_rcal(struct brcms_phy *pi);
+static void wlc_lcnphy_txrx_spur_avoidance_mode(struct brcms_phy *pi,
+ bool enable);
+static int wlc_lcnphy_load_tx_iir_filter(struct brcms_phy *pi, bool is_ofdm,
s16 filt_type);
-static void wlc_lcnphy_set_rx_iq_comp(phy_info_t *pi, u16 a, u16 b);
+static void wlc_lcnphy_set_rx_iq_comp(struct brcms_phy *pi, u16 a, u16 b);
-void wlc_lcnphy_write_table(phy_info_t *pi, const phytbl_info_t *pti)
+void wlc_lcnphy_write_table(struct brcms_phy *pi, const struct phytbl_info *pti)
{
wlc_phy_write_table(pi, pti, 0x455, 0x457, 0x456);
}
-void wlc_lcnphy_read_table(phy_info_t *pi, phytbl_info_t *pti)
+void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti)
{
wlc_phy_read_table(pi, pti, 0x455, 0x457, 0x456);
}
static void
-wlc_lcnphy_common_read_table(phy_info_t *pi, u32 tbl_id,
+wlc_lcnphy_common_read_table(struct brcms_phy *pi, u32 tbl_id,
const void *tbl_ptr, u32 tbl_len,
u32 tbl_width, u32 tbl_offset)
{
- phytbl_info_t tab;
+ struct phytbl_info tab;
tab.tbl_id = tbl_id;
tab.tbl_ptr = tbl_ptr;
tab.tbl_len = tbl_len;
@@ -1063,12 +1047,12 @@ wlc_lcnphy_common_read_table(phy_info_t *pi, u32 tbl_id,
}
static void
-wlc_lcnphy_common_write_table(phy_info_t *pi, u32 tbl_id,
+wlc_lcnphy_common_write_table(struct brcms_phy *pi, u32 tbl_id,
const void *tbl_ptr, u32 tbl_len,
u32 tbl_width, u32 tbl_offset)
{
- phytbl_info_t tab;
+ struct phytbl_info tab;
tab.tbl_id = tbl_id;
tab.tbl_ptr = tbl_ptr;
tab.tbl_len = tbl_len;
@@ -1123,10 +1107,10 @@ static int wlc_lcnphy_calc_floor(s16 coeff_x, int type)
return k;
}
-s8 wlc_lcnphy_get_current_tx_pwr_idx(phy_info_t *pi)
+s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi)
{
s8 index;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (txpwrctrl_off(pi))
index = pi_lcn->lcnphy_current_index;
@@ -1139,16 +1123,16 @@ s8 wlc_lcnphy_get_current_tx_pwr_idx(phy_info_t *pi)
return index;
}
-static u32 wlc_lcnphy_measure_digital_power(phy_info_t *pi, u16 nsamples)
+static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
{
- lcnphy_iq_est_t iq_est = { 0, 0, 0 };
+ struct lcnphy_iq_est iq_est = { 0, 0, 0 };
if (!wlc_lcnphy_rx_iq_est(pi, nsamples, 32, &iq_est))
return 0;
return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
}
-void wlc_lcnphy_crsuprs(phy_info_t *pi, int channel)
+void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel)
{
u16 afectrlovr, afectrlovrval;
afectrlovr = read_phy_reg(pi, 0x43b);
@@ -1179,7 +1163,7 @@ void wlc_lcnphy_crsuprs(phy_info_t *pi, int channel)
}
}
-static void wlc_lcnphy_toggle_afe_pwdn(phy_info_t *pi)
+static void wlc_lcnphy_toggle_afe_pwdn(struct brcms_phy *pi)
{
u16 save_AfeCtrlOvrVal, save_AfeCtrlOvr;
@@ -1196,7 +1180,8 @@ static void wlc_lcnphy_toggle_afe_pwdn(phy_info_t *pi)
write_phy_reg(pi, 0x43b, save_AfeCtrlOvr);
}
-static void wlc_lcnphy_txrx_spur_avoidance_mode(phy_info_t *pi, bool enable)
+static void
+wlc_lcnphy_txrx_spur_avoidance_mode(struct brcms_phy *pi, bool enable)
{
if (enable) {
write_phy_reg(pi, 0x942, 0x7);
@@ -1215,11 +1200,11 @@ static void wlc_lcnphy_txrx_spur_avoidance_mode(phy_info_t *pi, bool enable)
wlapi_switch_macfreq(pi->sh->physhim, enable);
}
-void wlc_phy_chanspec_set_lcnphy(phy_info_t *pi, chanspec_t chanspec)
+void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, chanspec_t chanspec)
{
u8 channel = CHSPEC_CHANNEL(chanspec);
- wlc_phy_chanspec_radio_set((wlc_phy_t *) pi, chanspec);
+ wlc_phy_chanspec_radio_set((struct brcms_phy_pub *) pi, chanspec);
wlc_lcnphy_set_chanspec_tweaks(pi, pi->radio_chanspec);
@@ -1252,7 +1237,7 @@ void wlc_phy_chanspec_set_lcnphy(phy_info_t *pi, chanspec_t chanspec)
}
-static void wlc_lcnphy_set_dac_gain(phy_info_t *pi, u16 dac_gain)
+static void wlc_lcnphy_set_dac_gain(struct brcms_phy *pi, u16 dac_gain)
{
u16 dac_ctrl;
@@ -1263,7 +1248,7 @@ static void wlc_lcnphy_set_dac_gain(phy_info_t *pi, u16 dac_gain)
}
-static void wlc_lcnphy_set_tx_gain_override(phy_info_t *pi, bool bEnable)
+static void wlc_lcnphy_set_tx_gain_override(struct brcms_phy *pi, bool bEnable)
{
u16 bit = bEnable ? 1 : 0;
@@ -1274,7 +1259,7 @@ static void wlc_lcnphy_set_tx_gain_override(phy_info_t *pi, bool bEnable)
mod_phy_reg(pi, 0x43b, (0x1 << 6), bit << 6);
}
-static u16 wlc_lcnphy_get_pa_gain(phy_info_t *pi)
+static u16 wlc_lcnphy_get_pa_gain(struct brcms_phy *pi)
{
u16 pa_gain;
@@ -1285,8 +1270,8 @@ static u16 wlc_lcnphy_get_pa_gain(phy_info_t *pi)
return pa_gain;
}
-static void
-wlc_lcnphy_set_tx_gain(phy_info_t *pi, lcnphy_txgains_t *target_gains)
+static void wlc_lcnphy_set_tx_gain(struct brcms_phy *pi,
+ struct lcnphy_txgains *target_gains)
{
u16 pa_gain = wlc_lcnphy_get_pa_gain(pi);
@@ -1311,10 +1296,10 @@ wlc_lcnphy_set_tx_gain(phy_info_t *pi, lcnphy_txgains_t *target_gains)
wlc_lcnphy_enable_tx_gain_override(pi);
}
-static void wlc_lcnphy_set_bbmult(phy_info_t *pi, u8 m0)
+static void wlc_lcnphy_set_bbmult(struct brcms_phy *pi, u8 m0)
{
u16 m0m1 = (u16) m0 << 8;
- phytbl_info_t tab;
+ struct phytbl_info tab;
tab.tbl_ptr = &m0m1;
tab.tbl_len = 1;
@@ -1324,10 +1309,10 @@ static void wlc_lcnphy_set_bbmult(phy_info_t *pi, u8 m0)
wlc_lcnphy_write_table(pi, &tab);
}
-static void wlc_lcnphy_clear_tx_power_offsets(phy_info_t *pi)
+static void wlc_lcnphy_clear_tx_power_offsets(struct brcms_phy *pi)
{
u32 data_buf[64];
- phytbl_info_t tab;
+ struct phytbl_info tab;
memset(data_buf, 0, sizeof(data_buf));
@@ -1347,13 +1332,14 @@ static void wlc_lcnphy_clear_tx_power_offsets(phy_info_t *pi)
wlc_lcnphy_write_table(pi, &tab);
}
-typedef enum {
+enum lcnphy_tssi_mode {
LCNPHY_TSSI_PRE_PA,
LCNPHY_TSSI_POST_PA,
LCNPHY_TSSI_EXT
-} lcnphy_tssi_mode_t;
+};
-static void wlc_lcnphy_set_tssi_mux(phy_info_t *pi, lcnphy_tssi_mode_t pos)
+static void
+wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
{
mod_phy_reg(pi, 0x4d7, (0x1 << 0), (0x1) << 0);
@@ -1392,7 +1378,7 @@ static void wlc_lcnphy_set_tssi_mux(phy_info_t *pi, lcnphy_tssi_mode_t pos)
}
}
-static u16 wlc_lcnphy_rfseq_tbl_adc_pwrup(phy_info_t *pi)
+static u16 wlc_lcnphy_rfseq_tbl_adc_pwrup(struct brcms_phy *pi)
{
u16 N1, N2, N3, N4, N5, N6, N;
N1 = ((read_phy_reg(pi, 0x4a5) & (0xff << 0))
@@ -1413,10 +1399,10 @@ static u16 wlc_lcnphy_rfseq_tbl_adc_pwrup(phy_info_t *pi)
return N;
}
-static void wlc_lcnphy_pwrctrl_rssiparams(phy_info_t *pi)
+static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
{
u16 auxpga_vmid, auxpga_vmid_temp, auxpga_gain_temp;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
auxpga_vmid =
(2 << 8) | (pi_lcn->lcnphy_rssi_vc << 4) | pi_lcn->lcnphy_rssi_vf;
@@ -1457,9 +1443,9 @@ static void wlc_lcnphy_pwrctrl_rssiparams(phy_info_t *pi)
mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
}
-static void wlc_lcnphy_tssi_setup(phy_info_t *pi)
+static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
{
- phytbl_info_t tab;
+ struct phytbl_info tab;
u32 rfseq, ind;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
@@ -1571,10 +1557,10 @@ static void wlc_lcnphy_tssi_setup(phy_info_t *pi)
wlc_lcnphy_pwrctrl_rssiparams(pi);
}
-void wlc_lcnphy_tx_pwr_update_npt(phy_info_t *pi)
+void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi)
{
u16 tx_cnt, tx_total, npt;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
tx_total = wlc_lcnphy_total_tx_frames(pi);
tx_cnt = tx_total - pi_lcn->lcnphy_tssi_tx_cnt;
@@ -1601,9 +1587,9 @@ s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1)
return p;
}
-static void wlc_lcnphy_txpower_reset_npt(phy_info_t *pi)
+static void wlc_lcnphy_txpower_reset_npt(struct brcms_phy *pi)
{
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
return;
@@ -1611,18 +1597,18 @@ static void wlc_lcnphy_txpower_reset_npt(phy_info_t *pi)
pi_lcn->lcnphy_tssi_npt = LCNPHY_TX_PWR_CTRL_START_NPT;
}
-void wlc_lcnphy_txpower_recalc_target(phy_info_t *pi)
+void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi)
{
- phytbl_info_t tab;
- u32 rate_table[WLC_NUM_RATES_CCK + WLC_NUM_RATES_OFDM +
- WLC_NUM_RATES_MCS_1_STREAM];
+ struct phytbl_info tab;
+ u32 rate_table[BRCMS_NUM_RATES_CCK + BRCMS_NUM_RATES_OFDM +
+ BRCMS_NUM_RATES_MCS_1_STREAM];
uint i, j;
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
return;
for (i = 0, j = 0; i < ARRAY_SIZE(rate_table); i++, j++) {
- if (i == WLC_NUM_RATES_CCK + WLC_NUM_RATES_OFDM)
+ if (i == BRCMS_NUM_RATES_CCK + BRCMS_NUM_RATES_OFDM)
j = TXP_FIRST_MCS_20_SISO;
rate_table[i] = (u32) ((s32) (-pi->tx_power_offset[j]));
@@ -1642,13 +1628,13 @@ void wlc_lcnphy_txpower_recalc_target(phy_info_t *pi)
}
}
-static void wlc_lcnphy_set_tx_pwr_soft_ctrl(phy_info_t *pi, s8 index)
+static void wlc_lcnphy_set_tx_pwr_soft_ctrl(struct brcms_phy *pi, s8 index)
{
u32 cck_offset[4] = { 22, 22, 22, 22 };
u32 ofdm_offset, reg_offset_cck;
int i;
u16 index2;
- phytbl_info_t tab;
+ struct phytbl_info tab;
if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi))
return;
@@ -1695,13 +1681,13 @@ static void wlc_lcnphy_set_tx_pwr_soft_ctrl(phy_info_t *pi, s8 index)
}
-static s8 wlc_lcnphy_tempcompensated_txpwrctrl(phy_info_t *pi)
+static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
{
s8 index, delta_brd, delta_temp, new_index, tempcorrx;
s16 manp, meas_temp, temp_diff;
bool neg = 0;
u16 temp;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi))
return pi_lcn->lcnphy_current_index;
@@ -1760,7 +1746,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(phy_info_t *pi)
return new_index;
}
-static u16 wlc_lcnphy_set_tx_pwr_ctrl_mode(phy_info_t *pi, u16 mode)
+static u16 wlc_lcnphy_set_tx_pwr_ctrl_mode(struct brcms_phy *pi, u16 mode)
{
u16 current_mode = mode;
@@ -1773,11 +1759,11 @@ static u16 wlc_lcnphy_set_tx_pwr_ctrl_mode(phy_info_t *pi, u16 mode)
return current_mode;
}
-void wlc_lcnphy_set_tx_pwr_ctrl(phy_info_t *pi, u16 mode)
+void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode)
{
u16 old_mode = wlc_lcnphy_get_tx_pwr_ctrl(pi);
s8 index;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
mode = wlc_lcnphy_set_tx_pwr_ctrl_mode(pi, mode);
old_mode = wlc_lcnphy_set_tx_pwr_ctrl_mode(pi, old_mode);
@@ -1824,7 +1810,7 @@ void wlc_lcnphy_set_tx_pwr_ctrl(phy_info_t *pi, u16 mode)
}
}
-static bool wlc_lcnphy_iqcal_wait(phy_info_t *pi)
+static bool wlc_lcnphy_iqcal_wait(struct brcms_phy *pi)
{
uint delay_count = 0;
@@ -1840,12 +1826,12 @@ static bool wlc_lcnphy_iqcal_wait(phy_info_t *pi)
}
static void
-wlc_lcnphy_tx_iqlo_cal(phy_info_t *pi,
- lcnphy_txgains_t *target_gains,
- lcnphy_cal_mode_t cal_mode, bool keep_tone)
+wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
+ struct lcnphy_txgains *target_gains,
+ enum lcnphy_cal_mode cal_mode, bool keep_tone)
{
- lcnphy_txgains_t cal_gains, temp_gains;
+ struct lcnphy_txgains cal_gains, temp_gains;
u16 hash;
u8 band_idx;
int j;
@@ -1871,10 +1857,10 @@ wlc_lcnphy_tx_iqlo_cal(phy_info_t *pi,
u16 tx_pwr_ctrl_old, save_txpwrctrlrfctrl2;
u16 save_sslpnCalibClkEnCtrl, save_sslpnRxFeClkEnCtrl;
bool tx_gain_override_old;
- lcnphy_txgains_t old_gains;
+ struct lcnphy_txgains old_gains;
uint i, n_cal_cmds = 0, n_cal_start = 0;
u16 *values_to_save;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (NORADIO_ENAB(pi->pubpi))
return;
@@ -2075,11 +2061,11 @@ wlc_lcnphy_tx_iqlo_cal(phy_info_t *pi,
}
-static void wlc_lcnphy_idle_tssi_est(wlc_phy_t *ppi)
+static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
{
bool suspend, tx_gain_override_old;
- lcnphy_txgains_t old_gains;
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct lcnphy_txgains old_gains;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
u16 idleTssi, idleTssi0_2C, idleTssi0_OB, idleTssi0_regvalue_OB,
idleTssi0_regvalue_2C;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
@@ -2091,7 +2077,7 @@ static void wlc_lcnphy_idle_tssi_est(wlc_phy_t *ppi)
idleTssi = read_phy_reg(pi, 0x4ab);
suspend =
(0 ==
- (R_REG(&((phy_info_t *) pi)->regs->maccontrol) &
+ (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) &
MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -2141,20 +2127,20 @@ static void wlc_lcnphy_idle_tssi_est(wlc_phy_t *ppi)
wlapi_enable_mac(pi->sh->physhim);
}
-static void wlc_lcnphy_vbat_temp_sense_setup(phy_info_t *pi, u8 mode)
+static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode)
{
bool suspend;
u16 save_txpwrCtrlEn;
u8 auxpga_vmidcourse, auxpga_vmidfine, auxpga_gain;
u16 auxpga_vmid;
- phytbl_info_t tab;
+ struct phytbl_info tab;
u32 val;
u8 save_reg007, save_reg0FF, save_reg11F, save_reg005, save_reg025,
save_reg112;
u16 values_to_save[14];
s8 index;
int i;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
udelay(999);
save_reg007 = (u8) read_radio_reg(pi, RADIO_2064_REG007);
@@ -2283,15 +2269,15 @@ static void wlc_lcnphy_vbat_temp_sense_setup(phy_info_t *pi, u8 mode)
udelay(999);
}
-void WLBANDINITFN(wlc_lcnphy_tx_pwr_ctrl_init) (wlc_phy_t *ppi)
+static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
{
- lcnphy_txgains_t tx_gains;
+ struct lcnphy_txgains tx_gains;
u8 bbmult;
- phytbl_info_t tab;
+ struct phytbl_info tab;
s32 a1, b0, b1;
s32 tssi, pwr, maxtargetpwr, mintargetpwr;
bool suspend;
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
suspend =
(0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
@@ -2361,10 +2347,10 @@ void WLBANDINITFN(wlc_lcnphy_tx_pwr_ctrl_init) (wlc_phy_t *ppi)
wlapi_enable_mac(pi->sh->physhim);
}
-static u8 wlc_lcnphy_get_bbmult(phy_info_t *pi)
+static u8 wlc_lcnphy_get_bbmult(struct brcms_phy *pi)
{
u16 m0m1;
- phytbl_info_t tab;
+ struct phytbl_info tab;
tab.tbl_ptr = &m0m1;
tab.tbl_len = 1;
@@ -2376,7 +2362,7 @@ static u8 wlc_lcnphy_get_bbmult(phy_info_t *pi)
return (u8) ((m0m1 & 0xff00) >> 8);
}
-static void wlc_lcnphy_set_pa_gain(phy_info_t *pi, u16 gain)
+static void wlc_lcnphy_set_pa_gain(struct brcms_phy *pi, u16 gain)
{
mod_phy_reg(pi, 0x4fb,
LCNPHY_txgainctrlovrval1_pagain_ovr_val1_MASK,
@@ -2387,7 +2373,7 @@ static void wlc_lcnphy_set_pa_gain(phy_info_t *pi, u16 gain)
}
void
-wlc_lcnphy_get_radio_loft(phy_info_t *pi,
+wlc_lcnphy_get_radio_loft(struct brcms_phy *pi,
u8 *ei0, u8 *eq0, u8 *fi0, u8 *fq0)
{
*ei0 = LCNPHY_IQLOCC_READ(read_radio_reg(pi, RADIO_2064_REG089));
@@ -2396,7 +2382,8 @@ wlc_lcnphy_get_radio_loft(phy_info_t *pi,
*fq0 = LCNPHY_IQLOCC_READ(read_radio_reg(pi, RADIO_2064_REG08C));
}
-static void wlc_lcnphy_get_tx_gain(phy_info_t *pi, lcnphy_txgains_t *gains)
+static void
+wlc_lcnphy_get_tx_gain(struct brcms_phy *pi, struct lcnphy_txgains *gains)
{
u16 dac_gain;
@@ -2415,9 +2402,9 @@ static void wlc_lcnphy_get_tx_gain(phy_info_t *pi, lcnphy_txgains_t *gains)
}
}
-void wlc_lcnphy_set_tx_iqcc(phy_info_t *pi, u16 a, u16 b)
+void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b)
{
- phytbl_info_t tab;
+ struct phytbl_info tab;
u16 iqcc[2];
iqcc[0] = a;
@@ -2431,9 +2418,9 @@ void wlc_lcnphy_set_tx_iqcc(phy_info_t *pi, u16 a, u16 b)
wlc_lcnphy_write_table(pi, &tab);
}
-void wlc_lcnphy_set_tx_locc(phy_info_t *pi, u16 didq)
+void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq)
{
- phytbl_info_t tab;
+ struct phytbl_info tab;
tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL;
tab.tbl_width = 16;
@@ -2443,14 +2430,14 @@ void wlc_lcnphy_set_tx_locc(phy_info_t *pi, u16 didq)
wlc_lcnphy_write_table(pi, &tab);
}
-void wlc_lcnphy_set_tx_pwr_by_index(phy_info_t *pi, int index)
+void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index)
{
- phytbl_info_t tab;
+ struct phytbl_info tab;
u16 a, b;
u8 bb_mult;
u32 bbmultiqcomp, txgain, locoeffs, rfpower;
- lcnphy_txgains_t gains;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct lcnphy_txgains gains;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
pi_lcn->lcnphy_tx_power_idx_override = (s8) index;
pi_lcn->lcnphy_current_index = (u8) index;
@@ -2502,7 +2489,7 @@ void wlc_lcnphy_set_tx_pwr_by_index(phy_info_t *pi, int index)
}
}
-static void wlc_lcnphy_set_trsw_override(phy_info_t *pi, bool tx, bool rx)
+static void wlc_lcnphy_set_trsw_override(struct brcms_phy *pi, bool tx, bool rx)
{
mod_phy_reg(pi, 0x44d,
@@ -2512,10 +2499,10 @@ static void wlc_lcnphy_set_trsw_override(phy_info_t *pi, bool tx, bool rx)
or_phy_reg(pi, 0x44c, (0x1 << 1) | (0x1 << 0));
}
-static void wlc_lcnphy_clear_papd_comptable(phy_info_t *pi)
+static void wlc_lcnphy_clear_papd_comptable(struct brcms_phy *pi)
{
u32 j;
- phytbl_info_t tab;
+ struct phytbl_info tab;
u32 temp_offset[128];
tab.tbl_ptr = temp_offset;
tab.tbl_len = 128;
@@ -2532,7 +2519,7 @@ static void wlc_lcnphy_clear_papd_comptable(phy_info_t *pi)
}
static void
-wlc_lcnphy_set_rx_gain_by_distribution(phy_info_t *pi,
+wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
u16 trsw,
u16 ext_lna,
u16 biq2,
@@ -2566,7 +2553,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(phy_info_t *pi,
}
-static void wlc_lcnphy_rx_gain_override_enable(phy_info_t *pi, bool enable)
+static void
+wlc_lcnphy_rx_gain_override_enable(struct brcms_phy *pi, bool enable)
{
u16 ebit = enable ? 1 : 0;
@@ -2591,7 +2579,7 @@ static void wlc_lcnphy_rx_gain_override_enable(phy_info_t *pi, bool enable)
}
}
-void wlc_lcnphy_tx_pu(phy_info_t *pi, bool bEnable)
+void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable)
{
if (!bEnable) {
@@ -2669,7 +2657,7 @@ void wlc_lcnphy_tx_pu(phy_info_t *pi, bool bEnable)
}
static void
-wlc_lcnphy_run_samples(phy_info_t *pi,
+wlc_lcnphy_run_samples(struct brcms_phy *pi,
u16 num_samps,
u16 num_loops, u16 wait, bool iqcalmode)
{
@@ -2695,7 +2683,7 @@ wlc_lcnphy_run_samples(phy_info_t *pi,
or_radio_reg(pi, RADIO_2064_REG112, 0x6);
}
-void wlc_lcnphy_deaf_mode(phy_info_t *pi, bool mode)
+void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode)
{
u8 phybw40;
@@ -2720,7 +2708,7 @@ void wlc_lcnphy_deaf_mode(phy_info_t *pi, bool mode)
}
void
-wlc_lcnphy_start_tx_tone(phy_info_t *pi, s32 f_kHz, u16 max_val,
+wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
bool iqcalmode)
{
u8 phy_bw;
@@ -2730,8 +2718,8 @@ wlc_lcnphy_start_tx_tone(phy_info_t *pi, s32 f_kHz, u16 max_val,
cs32 tone_samp;
u32 data_buf[64];
u16 i_samp, q_samp;
- phytbl_info_t tab;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct phytbl_info tab;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
pi->phy_tx_tone_freq = f_kHz;
@@ -2783,10 +2771,10 @@ wlc_lcnphy_start_tx_tone(phy_info_t *pi, s32 f_kHz, u16 max_val,
wlc_lcnphy_run_samples(pi, num_samps, 0xffff, 0, iqcalmode);
}
-void wlc_lcnphy_stop_tx_tone(phy_info_t *pi)
+void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi)
{
s16 playback_status;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
pi->phy_tx_tone_freq = 0;
if (pi_lcn->lcnphy_spurmod) {
@@ -2814,16 +2802,16 @@ void wlc_lcnphy_stop_tx_tone(phy_info_t *pi)
wlc_lcnphy_deaf_mode(pi, false);
}
-static void wlc_lcnphy_clear_trsw_override(phy_info_t *pi)
+static void wlc_lcnphy_clear_trsw_override(struct brcms_phy *pi)
{
and_phy_reg(pi, 0x44c, (u16) ~((0x1 << 1) | (0x1 << 0)));
}
-void wlc_lcnphy_get_tx_iqcc(phy_info_t *pi, u16 *a, u16 *b)
+void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b)
{
u16 iqcc[2];
- phytbl_info_t tab;
+ struct phytbl_info tab;
tab.tbl_ptr = iqcc;
tab.tbl_len = 2;
@@ -2836,9 +2824,9 @@ void wlc_lcnphy_get_tx_iqcc(phy_info_t *pi, u16 *a, u16 *b)
*b = iqcc[1];
}
-u16 wlc_lcnphy_get_tx_locc(phy_info_t *pi)
+u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi)
{
- phytbl_info_t tab;
+ struct phytbl_info tab;
u16 didq;
tab.tbl_id = 0;
@@ -2851,18 +2839,18 @@ u16 wlc_lcnphy_get_tx_locc(phy_info_t *pi)
return didq;
}
-static void wlc_lcnphy_txpwrtbl_iqlo_cal(phy_info_t *pi)
+static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
{
- lcnphy_txgains_t target_gains, old_gains;
+ struct lcnphy_txgains target_gains, old_gains;
u8 save_bb_mult;
u16 a, b, didq, save_pa_gain = 0;
uint idx, SAVE_txpwrindex = 0xFF;
u32 val;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
- phytbl_info_t tab;
+ struct phytbl_info tab;
u8 ei0, eq0, fi0, fq0;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
wlc_lcnphy_get_tx_gain(pi, &old_gains);
save_pa_gain = wlc_lcnphy_get_pa_gain(pi);
@@ -2965,7 +2953,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(phy_info_t *pi)
wlc_lcnphy_set_tx_pwr_by_index(pi, SAVE_txpwrindex);
}
-s16 wlc_lcnphy_tempsense_new(phy_info_t *pi, bool mode)
+s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s16 avg = 0;
@@ -3010,13 +2998,13 @@ s16 wlc_lcnphy_tempsense_new(phy_info_t *pi, bool mode)
return avg;
}
-u16 wlc_lcnphy_tempsense(phy_info_t *pi, bool mode)
+u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s32 avg = 0;
bool suspend = 0;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (NORADIO_ENAB(pi->pubpi))
return -1;
@@ -3071,7 +3059,7 @@ u16 wlc_lcnphy_tempsense(phy_info_t *pi, bool mode)
return (u16) avg;
}
-s8 wlc_lcnphy_tempsense_degree(phy_info_t *pi, bool mode)
+s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode)
{
s32 degree = wlc_lcnphy_tempsense_new(pi, mode);
degree =
@@ -3080,7 +3068,7 @@ s8 wlc_lcnphy_tempsense_degree(phy_info_t *pi, bool mode)
return (s8) degree;
}
-s8 wlc_lcnphy_vbatsense(phy_info_t *pi, bool mode)
+s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
{
u16 vbatsenseval;
s32 avg = 0;
@@ -3116,7 +3104,7 @@ s8 wlc_lcnphy_vbatsense(phy_info_t *pi, bool mode)
return (s8) avg;
}
-static void wlc_lcnphy_afe_clk_init(phy_info_t *pi, u8 mode)
+static void wlc_lcnphy_afe_clk_init(struct brcms_phy *pi, u8 mode)
{
u8 phybw40;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
@@ -3131,9 +3119,9 @@ static void wlc_lcnphy_afe_clk_init(phy_info_t *pi, u8 mode)
}
static bool
-wlc_lcnphy_rx_iq_est(phy_info_t *pi,
+wlc_lcnphy_rx_iq_est(struct brcms_phy *pi,
u16 num_samps,
- u8 wait_time, lcnphy_iq_est_t *iq_est)
+ u8 wait_time, struct lcnphy_iq_est *iq_est)
{
int wait_count = 0;
bool result = true;
@@ -3177,17 +3165,17 @@ wlc_lcnphy_rx_iq_est(phy_info_t *pi,
return result;
}
-static bool wlc_lcnphy_calc_rx_iq_comp(phy_info_t *pi, u16 num_samps)
+static bool wlc_lcnphy_calc_rx_iq_comp(struct brcms_phy *pi, u16 num_samps)
{
#define LCNPHY_MIN_RXIQ_PWR 2
bool result;
u16 a0_new, b0_new;
- lcnphy_iq_est_t iq_est = { 0, 0, 0 };
+ struct lcnphy_iq_est iq_est = { 0, 0, 0 };
s32 a, b, temp;
s16 iq_nbits, qq_nbits, arsh, brsh;
s32 iq;
u32 ii, qq;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
a0_new = ((read_phy_reg(pi, 0x645) & (0x3ff << 0)) >> 0);
b0_new = ((read_phy_reg(pi, 0x646) & (0x3ff << 0)) >> 0);
@@ -3263,11 +3251,12 @@ static bool wlc_lcnphy_calc_rx_iq_comp(phy_info_t *pi, u16 num_samps)
}
static bool
-wlc_lcnphy_rx_iq_cal(phy_info_t *pi, const lcnphy_rx_iqcomp_t *iqcomp,
+wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
+ const struct lcnphy_rx_iqcomp *iqcomp,
int iqcomp_sz, bool tx_switch, bool rx_switch, int module,
int tx_gain_idx)
{
- lcnphy_txgains_t old_gains;
+ struct lcnphy_txgains old_gains;
u16 tx_pwr_ctrl;
u8 tx_gain_index_old = 0;
bool result = false, tx_gain_override_old = false;
@@ -3280,7 +3269,7 @@ wlc_lcnphy_rx_iq_cal(phy_info_t *pi, const lcnphy_rx_iqcomp_t *iqcomp,
u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
u16 values_to_save[11];
s16 *ptr;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
ptr = kmalloc(sizeof(s16) * 131, GFP_ATOMIC);
if (NULL == ptr) {
@@ -3431,18 +3420,18 @@ wlc_lcnphy_rx_iq_cal(phy_info_t *pi, const lcnphy_rx_iqcomp_t *iqcomp,
return result;
}
-static void wlc_lcnphy_temp_adj(phy_info_t *pi)
+static void wlc_lcnphy_temp_adj(struct brcms_phy *pi)
{
if (NORADIO_ENAB(pi->pubpi))
return;
}
-static void wlc_lcnphy_glacial_timer_based_cal(phy_info_t *pi)
+static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
{
bool suspend;
s8 index;
u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
suspend =
(0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
if (!suspend)
@@ -3462,17 +3451,17 @@ static void wlc_lcnphy_glacial_timer_based_cal(phy_info_t *pi)
}
-static void wlc_lcnphy_periodic_cal(phy_info_t *pi)
+static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
{
bool suspend, full_cal;
- const lcnphy_rx_iqcomp_t *rx_iqcomp;
+ const struct lcnphy_rx_iqcomp *rx_iqcomp;
int rx_iqcomp_sz;
u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
s8 index;
- phytbl_info_t tab;
+ struct phytbl_info tab;
s32 a1, b0, b1;
s32 tssi, pwr, maxtargetpwr, mintargetpwr;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (NORADIO_ENAB(pi->pubpi))
return;
@@ -3506,7 +3495,7 @@ static void wlc_lcnphy_periodic_cal(phy_info_t *pi)
if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi)) {
- wlc_lcnphy_idle_tssi_est((wlc_phy_t *) pi);
+ wlc_lcnphy_idle_tssi_est((struct brcms_phy_pub *) pi);
b0 = pi->txpa_2g[0];
b1 = pi->txpa_2g[1];
@@ -3534,11 +3523,11 @@ static void wlc_lcnphy_periodic_cal(phy_info_t *pi)
wlapi_enable_mac(pi->sh->physhim);
}
-void wlc_lcnphy_calib_modes(phy_info_t *pi, uint mode)
+void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode)
{
u16 temp_new;
int temp1, temp2, temp_diff;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
switch (mode) {
case PHY_PERICAL_CHAN:
@@ -3568,12 +3557,13 @@ void wlc_lcnphy_calib_modes(phy_info_t *pi, uint mode)
break;
case LCNPHY_PERICAL_TEMPBASED_TXPWRCTRL:
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
- wlc_lcnphy_tx_power_adjustment((wlc_phy_t *) pi);
+ wlc_lcnphy_tx_power_adjustment(
+ (struct brcms_phy_pub *) pi);
break;
}
}
-void wlc_lcnphy_get_tssi(phy_info_t *pi, s8 *ofdm_pwr, s8 *cck_pwr)
+void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, s8 *cck_pwr)
{
s8 cck_offset;
u16 status;
@@ -3595,16 +3585,17 @@ void wlc_lcnphy_get_tssi(phy_info_t *pi, s8 *ofdm_pwr, s8 *cck_pwr)
}
}
-void WLBANDINITFN(wlc_phy_cal_init_lcnphy) (phy_info_t *pi)
+void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi)
{
return;
}
-static void wlc_lcnphy_set_chanspec_tweaks(phy_info_t *pi, chanspec_t chanspec)
+static void
+wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, chanspec_t chanspec)
{
u8 channel = CHSPEC_CHANNEL(chanspec);
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (NORADIO_ENAB(pi->pubpi))
return;
@@ -3653,12 +3644,12 @@ static void wlc_lcnphy_set_chanspec_tweaks(phy_info_t *pi, chanspec_t chanspec)
write_phy_reg(pi, 0x44a, 0x80);
}
-void wlc_lcnphy_tx_power_adjustment(wlc_phy_t *ppi)
+void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi)
{
s8 index;
u16 index2;
- phy_info_t *pi = (phy_info_t *) ppi;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) && SAVE_txpwrctrl) {
index = wlc_lcnphy_tempcompensated_txpwrctrl(pi);
@@ -3670,7 +3661,7 @@ void wlc_lcnphy_tx_power_adjustment(wlc_phy_t *ppi)
}
}
-static void wlc_lcnphy_set_rx_iq_comp(phy_info_t *pi, u16 a, u16 b)
+static void wlc_lcnphy_set_rx_iq_comp(struct brcms_phy *pi, u16 a, u16 b)
{
mod_phy_reg(pi, 0x645, (0x3ff << 0), (a) << 0);
@@ -3686,10 +3677,10 @@ static void wlc_lcnphy_set_rx_iq_comp(phy_info_t *pi, u16 a, u16 b)
}
-void WLBANDINITFN(wlc_phy_init_lcnphy) (phy_info_t *pi)
+void wlc_phy_init_lcnphy(struct brcms_phy *pi)
{
u8 phybw40;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
pi_lcn->lcnphy_cal_counter = 0;
@@ -3709,9 +3700,9 @@ void WLBANDINITFN(wlc_phy_init_lcnphy) (phy_info_t *pi)
wlc_lcnphy_radio_init(pi);
if (CHSPEC_IS2G(pi->radio_chanspec))
- wlc_lcnphy_tx_pwr_ctrl_init((wlc_phy_t *) pi);
+ wlc_lcnphy_tx_pwr_ctrl_init((struct brcms_phy_pub *) pi);
- wlc_phy_chanspec_set((wlc_phy_t *) pi, pi->radio_chanspec);
+ wlc_phy_chanspec_set((struct brcms_phy_pub *) pi, pi->radio_chanspec);
si_pmu_regcontrol(pi->sh->sih, 0, 0xf, 0x9);
@@ -3736,7 +3727,7 @@ void WLBANDINITFN(wlc_phy_init_lcnphy) (phy_info_t *pi)
}
static void
-wlc_lcnphy_tx_iqlo_loopback(phy_info_t *pi, u16 *values_to_save)
+wlc_lcnphy_tx_iqlo_loopback(struct brcms_phy *pi, u16 *values_to_save)
{
u16 vmid;
int i;
@@ -3829,14 +3820,14 @@ wlc_lcnphy_tx_iqlo_loopback(phy_info_t *pi, u16 *values_to_save)
}
static void
-wlc_lcnphy_samp_cap(phy_info_t *pi, int clip_detect_algo, u16 thresh,
+wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
s16 *ptr, int mode)
{
u32 curval1, curval2, stpptr, curptr, strptr, val;
u16 sslpnCalibClkEnCtrl, timer;
u16 old_sslpnCalibClkEnCtrl;
s16 imag, real;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
timer = 0;
old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
@@ -3905,9 +3896,9 @@ wlc_lcnphy_samp_cap(phy_info_t *pi, int clip_detect_algo, u16 thresh,
W_REG(&pi->regs->psm_corectlsts, curval1);
}
-static void wlc_lcnphy_tx_iqlo_soft_cal_full(phy_info_t *pi)
+static void wlc_lcnphy_tx_iqlo_soft_cal_full(struct brcms_phy *pi)
{
- lcnphy_unsign16_struct iqcc0, locc2, locc3, locc4;
+ struct lcnphy_unsign16_struct iqcc0, locc2, locc3, locc4;
wlc_lcnphy_set_cc(pi, 0, 0, 0);
wlc_lcnphy_set_cc(pi, 2, 0, 0);
@@ -3928,7 +3919,7 @@ static void wlc_lcnphy_tx_iqlo_soft_cal_full(phy_info_t *pi)
}
static void
-wlc_lcnphy_set_cc(phy_info_t *pi, int cal_type, s16 coeff_x, s16 coeff_y)
+wlc_lcnphy_set_cc(struct brcms_phy *pi, int cal_type, s16 coeff_x, s16 coeff_y)
{
u16 di0dq0;
u16 x, y, data_rf;
@@ -3972,11 +3963,12 @@ wlc_lcnphy_set_cc(phy_info_t *pi, int cal_type, s16 coeff_x, s16 coeff_y)
}
}
-static lcnphy_unsign16_struct wlc_lcnphy_get_cc(phy_info_t *pi, int cal_type)
+static struct lcnphy_unsign16_struct
+wlc_lcnphy_get_cc(struct brcms_phy *pi, int cal_type)
{
u16 a, b, didq;
u8 di0, dq0, ei, eq, fi, fq;
- lcnphy_unsign16_struct cc;
+ struct lcnphy_unsign16_struct cc;
cc.re = 0;
cc.im = 0;
switch (cal_type) {
@@ -4007,11 +3999,12 @@ static lcnphy_unsign16_struct wlc_lcnphy_get_cc(phy_info_t *pi, int cal_type)
}
static void
-wlc_lcnphy_a1(phy_info_t *pi, int cal_type, int num_levels, int step_size_lg2)
+wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
+ int step_size_lg2)
{
- const lcnphy_spb_tone_t *phy_c1;
- lcnphy_spb_tone_t phy_c2;
- lcnphy_unsign16_struct phy_c3;
+ const struct lcnphy_spb_tone *phy_c1;
+ struct lcnphy_spb_tone phy_c2;
+ struct lcnphy_unsign16_struct phy_c3;
int phy_c4, phy_c5, k, l, j, phy_c6;
u16 phy_c7, phy_c8, phy_c9;
s16 phy_c10, phy_c11, phy_c12, phy_c13, phy_c14, phy_c15, phy_c16;
@@ -4205,7 +4198,7 @@ wlc_lcnphy_a1(phy_info_t *pi, int cal_type, int num_levels, int step_size_lg2)
}
static void
-wlc_lcnphy_tx_iqlo_loopback_cleanup(phy_info_t *pi, u16 *values_to_save)
+wlc_lcnphy_tx_iqlo_loopback_cleanup(struct brcms_phy *pi, u16 *values_to_save)
{
int i;
@@ -4220,11 +4213,10 @@ wlc_lcnphy_tx_iqlo_loopback_cleanup(phy_info_t *pi, u16 *values_to_save)
}
static void
-WLBANDINITFN(wlc_lcnphy_load_tx_gain_table) (phy_info_t *pi,
- const lcnphy_tx_gain_tbl_entry *
- gain_table) {
+wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
+ const struct lcnphy_tx_gain_tbl_entry *gain_table) {
u32 j;
- phytbl_info_t tab;
+ struct phytbl_info tab;
u32 val;
u16 pa_gain;
u16 gm_gain;
@@ -4256,9 +4248,9 @@ WLBANDINITFN(wlc_lcnphy_load_tx_gain_table) (phy_info_t *pi,
}
}
-static void wlc_lcnphy_load_rfpower(phy_info_t *pi)
+static void wlc_lcnphy_load_rfpower(struct brcms_phy *pi)
{
- phytbl_info_t tab;
+ struct phytbl_info tab;
u32 val, bbmult, rfgain;
u8 index;
u8 scale_factor = 1;
@@ -4305,11 +4297,11 @@ static void wlc_lcnphy_load_rfpower(phy_info_t *pi)
}
}
-static void WLBANDINITFN(wlc_lcnphy_tbl_init) (phy_info_t *pi)
+static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
{
uint idx;
u8 phybw40;
- phytbl_info_t tab;
+ struct phytbl_info tab;
u32 val;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
@@ -4400,10 +4392,10 @@ static void WLBANDINITFN(wlc_lcnphy_tbl_init) (phy_info_t *pi)
wlc_lcnphy_clear_papd_comptable(pi);
}
-static void WLBANDINITFN(wlc_lcnphy_rev0_baseband_init) (phy_info_t *pi)
+static void wlc_lcnphy_rev0_baseband_init(struct brcms_phy *pi)
{
u16 afectrl1;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
write_radio_reg(pi, RADIO_2064_REG11C, 0x0);
@@ -4447,7 +4439,7 @@ static void WLBANDINITFN(wlc_lcnphy_rev0_baseband_init) (phy_info_t *pi)
}
-static void WLBANDINITFN(wlc_lcnphy_rev2_baseband_init) (phy_info_t *pi)
+static void wlc_lcnphy_rev2_baseband_init(struct brcms_phy *pi)
{
if (CHSPEC_IS5G(pi->radio_chanspec)) {
mod_phy_reg(pi, 0x416, (0xff << 0), 80 << 0);
@@ -4456,12 +4448,12 @@ static void WLBANDINITFN(wlc_lcnphy_rev2_baseband_init) (phy_info_t *pi)
}
}
-static void wlc_lcnphy_agc_temp_init(phy_info_t *pi)
+static void wlc_lcnphy_agc_temp_init(struct brcms_phy *pi)
{
s16 temp;
- phytbl_info_t tab;
+ struct phytbl_info tab;
u32 tableBuffer[2];
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (NORADIO_ENAB(pi->pubpi))
return;
@@ -4517,7 +4509,7 @@ static void wlc_lcnphy_agc_temp_init(phy_info_t *pi)
}
-static void WLBANDINITFN(wlc_lcnphy_bu_tweaks) (phy_info_t *pi)
+static void wlc_lcnphy_bu_tweaks(struct brcms_phy *pi)
{
if (NORADIO_ENAB(pi->pubpi))
return;
@@ -4572,7 +4564,7 @@ static void WLBANDINITFN(wlc_lcnphy_bu_tweaks) (phy_info_t *pi)
}
}
-static void WLBANDINITFN(wlc_lcnphy_baseband_init) (phy_info_t *pi)
+static void wlc_lcnphy_baseband_init(struct brcms_phy *pi)
{
wlc_lcnphy_tbl_init(pi);
@@ -4582,10 +4574,10 @@ static void WLBANDINITFN(wlc_lcnphy_baseband_init) (phy_info_t *pi)
wlc_lcnphy_bu_tweaks(pi);
}
-static void WLBANDINITFN(wlc_radio_2064_init) (phy_info_t *pi)
+static void wlc_radio_2064_init(struct brcms_phy *pi)
{
u32 i;
- lcnphy_radio_regs_t *lcnphyregs = NULL;
+ struct lcnphy_radio_regs *lcnphyregs = NULL;
lcnphyregs = lcnphy_radio_regs_2064;
@@ -4643,7 +4635,7 @@ static void WLBANDINITFN(wlc_radio_2064_init) (phy_info_t *pi)
wlc_lcnphy_rc_cal(pi);
}
-static void WLBANDINITFN(wlc_lcnphy_radio_init) (phy_info_t *pi)
+static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
{
if (NORADIO_ENAB(pi->pubpi))
return;
@@ -4651,7 +4643,7 @@ static void WLBANDINITFN(wlc_lcnphy_radio_init) (phy_info_t *pi)
wlc_radio_2064_init(pi);
}
-static void wlc_lcnphy_rcal(phy_info_t *pi)
+static void wlc_lcnphy_rcal(struct brcms_phy *pi)
{
u8 rcal_value;
@@ -4682,7 +4674,7 @@ static void wlc_lcnphy_rcal(phy_info_t *pi)
and_radio_reg(pi, RADIO_2064_REG057, 0xFE);
}
-static void wlc_lcnphy_rc_cal(phy_info_t *pi)
+static void wlc_lcnphy_rc_cal(struct brcms_phy *pi)
{
u8 dflt_rc_cal_val;
u16 flt_val;
@@ -4705,11 +4697,11 @@ static void wlc_lcnphy_rc_cal(phy_info_t *pi)
return;
}
-static bool wlc_phy_txpwr_srom_read_lcnphy(phy_info_t *pi)
+static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
{
s8 txpwr = 0;
int i;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (CHSPEC_IS2G(pi->radio_chanspec)) {
u16 cckpo = 0;
@@ -4811,7 +4803,7 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(phy_info_t *pi)
pi_lcn->lcnphy_freqoffset_corr =
(u8) PHY_GETINTVAR(pi, "freqoffset_corr");
if ((u8) getintvar(pi->vars, "aa2g") > 1)
- wlc_phy_ant_rxdiv_set((wlc_phy_t *) pi,
+ wlc_phy_ant_rxdiv_set((struct brcms_phy_pub *) pi,
(u8) getintvar(pi->vars,
"aa2g"));
}
@@ -4827,7 +4819,7 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(phy_info_t *pi)
return true;
}
-void wlc_2064_vco_cal(phy_info_t *pi)
+void wlc_2064_vco_cal(struct brcms_phy *pi)
{
u8 calnrst;
@@ -4843,10 +4835,10 @@ void wlc_2064_vco_cal(phy_info_t *pi)
}
static void
-wlc_lcnphy_radio_2064_channel_tune_4313(phy_info_t *pi, u8 channel)
+wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
{
uint i;
- const chan_info_2064_lcnphy_t *ci;
+ const struct chan_info_2064_lcnphy *ci;
u8 rfpll_doubler = 0;
u8 pll_pwrup, pll_pwrup_ovr;
fixed qFxtal, qFref, qFvco, qFcal;
@@ -5008,7 +5000,7 @@ wlc_lcnphy_radio_2064_channel_tune_4313(phy_info_t *pi, u8 channel)
}
}
-bool wlc_phy_tpc_isenabled_lcnphy(phy_info_t *pi)
+bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi)
{
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
return 0;
@@ -5017,7 +5009,7 @@ bool wlc_phy_tpc_isenabled_lcnphy(phy_info_t *pi)
wlc_lcnphy_get_tx_pwr_ctrl((pi)));
}
-void wlc_phy_txpower_recalc_target_lcnphy(phy_info_t *pi)
+void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi)
{
u16 pwr_ctrl;
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) {
@@ -5033,16 +5025,16 @@ void wlc_phy_txpower_recalc_target_lcnphy(phy_info_t *pi)
return;
}
-void wlc_phy_detach_lcnphy(phy_info_t *pi)
+void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
{
kfree(pi->u.pi_lcnphy);
}
-bool wlc_phy_attach_lcnphy(phy_info_t *pi)
+bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
{
- phy_info_lcnphy_t *pi_lcn;
+ struct brcms_phy_lcnphy *pi_lcn;
- pi->u.pi_lcnphy = kzalloc(sizeof(phy_info_lcnphy_t), GFP_ATOMIC);
+ pi->u.pi_lcnphy = kzalloc(sizeof(struct brcms_phy_lcnphy), GFP_ATOMIC);
if (pi->u.pi_lcnphy == NULL) {
return false;
}
@@ -5085,7 +5077,7 @@ bool wlc_phy_attach_lcnphy(phy_info_t *pi)
return true;
}
-static void wlc_lcnphy_set_rx_gain(phy_info_t *pi, u32 gain)
+static void wlc_lcnphy_set_rx_gain(struct brcms_phy *pi, u32 gain)
{
u16 trsw, ext_lna, lna1, lna2, tia, biq0, biq1, gain0_15, gain16_19;
@@ -5115,12 +5107,12 @@ static void wlc_lcnphy_set_rx_gain(phy_info_t *pi, u32 gain)
wlc_lcnphy_rx_gain_override_enable(pi, true);
}
-static u32 wlc_lcnphy_get_receive_power(phy_info_t *pi, s32 *gain_index)
+static u32 wlc_lcnphy_get_receive_power(struct brcms_phy *pi, s32 *gain_index)
{
u32 received_power = 0;
s32 max_index = 0;
u32 gain_code = 0;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
max_index = 36;
if (*gain_index >= 0)
@@ -5151,7 +5143,7 @@ static u32 wlc_lcnphy_get_receive_power(phy_info_t *pi, s32 *gain_index)
return received_power;
}
-s32 wlc_lcnphy_rx_signal_power(phy_info_t *pi, s32 gain_index)
+s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index)
{
s32 gain = 0;
s32 nominal_power_db;
@@ -5159,7 +5151,7 @@ s32 wlc_lcnphy_rx_signal_power(phy_info_t *pi, s32 gain_index)
input_power_db;
s32 received_power, temperature;
uint freq;
- phy_info_lcnphy_t *pi_lcn = pi->u.pi_lcnphy;
+ struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
received_power = wlc_lcnphy_get_receive_power(pi, &gain_index);
@@ -5223,7 +5215,7 @@ s32 wlc_lcnphy_rx_signal_power(phy_info_t *pi, s32 gain_index)
}
static int
-wlc_lcnphy_load_tx_iir_filter(phy_info_t *pi, bool is_ofdm, s16 filt_type)
+wlc_lcnphy_load_tx_iir_filter(struct brcms_phy *pi, bool is_ofdm, s16 filt_type)
{
s16 filt_index = -1;
int j;
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_lcn.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.h
index b7bfc7230df..f4a8ab09da4 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_lcn.h
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_lcn.h
@@ -14,10 +14,12 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _wlc_phy_lcn_h_
-#define _wlc_phy_lcn_h_
+#ifndef _BRCM_PHY_LCN_H_
+#define _BRCM_PHY_LCN_H_
-struct phy_info_lcnphy {
+#include <types.h>
+
+struct brcms_phy_lcnphy {
int lcnphy_txrf_sp_9_override;
u8 lcnphy_full_cal_channel;
u8 lcnphy_cal_counter;
@@ -98,7 +100,7 @@ struct phy_info_lcnphy {
u16 lcnphy_extstxctrl1;
s16 lcnphy_cck_dig_filt_type;
s16 lcnphy_ofdm_dig_filt_type;
- lcnphy_cal_results_t lcnphy_cal_results;
+ struct lcnphy_cal_results lcnphy_cal_results;
u8 lcnphy_psat_pwr;
u8 lcnphy_psat_indx;
@@ -116,4 +118,4 @@ struct phy_info_lcnphy {
uint lcnphy_aci_start_time;
s8 lcnphy_tx_power_offset[TXP_NUM_RATES];
};
-#endif /* _wlc_phy_lcn_h_ */
+#endif /* _BRCM_PHY_LCN_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_n.c b/drivers/staging/brcm80211/brcmsmac/phy/phy_n.c
index 71275094e81..f8e41923942 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_n.c
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_n.c
@@ -14,23 +14,19 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <bcmdefs.h>
-#include <wlc_cfg.h>
#include <linux/delay.h>
-#include <linux/pci.h>
-#include <aiutils.h>
-#include <sbchipc.h>
-#include <wlc_pmu.h>
-
-#include <bcmdevs.h>
-#include <sbhnddma.h>
-#include <wlc_phy_radio.h>
-#include <wlc_phy_int.h>
-#include <wlc_phyreg_n.h>
-#include <wlc_phytbl_n.h>
+#include <brcm_hw_ids.h>
+#include <aiutils.h>
+#include <chipcommon.h>
+#include <pmu.h>
+#include <d11.h>
+#include <phy_shim.h>
+#include "phy_int.h"
+#include "phy_hal.h"
+#include "phy_radio.h"
+#include "phyreg_n.h"
+#include "phytbl_n.h"
#define READ_RADIO_REG2(pi, radio_type, jspace, core, reg_name) \
read_radio_reg(pi, radio_type##_##jspace##_##reg_name | \
@@ -141,7 +137,11 @@
#define NPHY_ADJUSTED_MINCRSPOWER 0x1e
-typedef struct _nphy_iqcal_params {
+/* 5357 Chip specific ChipControl register bits */
+#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */
+#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */
+
+struct nphy_iqcal_params {
u16 txlpf;
u16 txgm;
u16 pga;
@@ -149,20 +149,20 @@ typedef struct _nphy_iqcal_params {
u16 ipa;
u16 cal_gain;
u16 ncorr[5];
-} nphy_iqcal_params_t;
+};
-typedef struct _nphy_txiqcal_ladder {
+struct nphy_txiqcal_ladder {
u8 percent;
u8 g_env;
-} nphy_txiqcal_ladder_t;
+};
-typedef struct {
- nphy_txgains_t gains;
+struct nphy_ipa_txcalgains {
+ struct nphy_txgains gains;
bool useindex;
u8 index;
-} nphy_ipa_txcalgains_t;
+};
-typedef struct nphy_papd_restore_state_t {
+struct nphy_papd_restore_state {
u16 fbmix[2];
u16 vga_master[2];
u16 intpa_master[2];
@@ -171,20 +171,20 @@ typedef struct nphy_papd_restore_state_t {
u16 pwrup[2];
u16 atten[2];
u16 mm;
-} nphy_papd_restore_state;
+};
-typedef struct _nphy_ipa_txrxgain {
+struct nphy_ipa_txrxgain {
u16 hpvga;
u16 lpf_biq1;
u16 lpf_biq0;
u16 lna2;
u16 lna1;
s8 txpwrindex;
-} nphy_ipa_txrxgain_t;
+};
#define NPHY_IPA_RXCAL_MAXGAININDEX (6 - 1)
-nphy_ipa_txrxgain_t nphy_ipa_rxcal_gaintbl_5GHz[] = { {0, 0, 0, 0, 0, 100},
+struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_5GHz[] = { {0, 0, 0, 0, 0, 100},
{0, 0, 0, 0, 0, 50},
{0, 0, 0, 0, 0, -1},
{0, 0, 0, 3, 0, -1},
@@ -192,7 +192,7 @@ nphy_ipa_txrxgain_t nphy_ipa_rxcal_gaintbl_5GHz[] = { {0, 0, 0, 0, 0, 100},
{0, 2, 3, 3, 0, -1}
};
-nphy_ipa_txrxgain_t nphy_ipa_rxcal_gaintbl_2GHz[] = { {0, 0, 0, 0, 0, 128},
+struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_2GHz[] = { {0, 0, 0, 0, 0, 128},
{0, 0, 0, 0, 0, 70},
{0, 0, 0, 0, 0, 20},
{0, 0, 0, 3, 0, 20},
@@ -200,7 +200,8 @@ nphy_ipa_txrxgain_t nphy_ipa_rxcal_gaintbl_2GHz[] = { {0, 0, 0, 0, 0, 128},
{0, 2, 3, 3, 0, 20}
};
-nphy_ipa_txrxgain_t nphy_ipa_rxcal_gaintbl_5GHz_rev7[] = { {0, 0, 0, 0, 0, 100},
+struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_5GHz_rev7[] = {
+{0, 0, 0, 0, 0, 100},
{0, 0, 0, 0, 0, 50},
{0, 0, 0, 0, 0, -1},
{0, 0, 0, 3, 0, -1},
@@ -208,7 +209,8 @@ nphy_ipa_txrxgain_t nphy_ipa_rxcal_gaintbl_5GHz_rev7[] = { {0, 0, 0, 0, 0, 100},
{0, 0, 5, 3, 0, -1}
};
-nphy_ipa_txrxgain_t nphy_ipa_rxcal_gaintbl_2GHz_rev7[] = { {0, 0, 0, 0, 0, 10},
+struct nphy_ipa_txrxgain nphy_ipa_rxcal_gaintbl_2GHz_rev7[] = {
+{0, 0, 0, 0, 0, 10},
{0, 0, 0, 1, 0, 10},
{0, 0, 1, 2, 0, 10},
{0, 0, 1, 3, 0, 10},
@@ -255,7 +257,7 @@ u16 NPHY_IPA_REV4_txdigi_filtcoeffs[][NPHY_NUM_DIG_FILT_COEFFS] = {
0x97, 0x12d, 0x97, 0x25a, 0xd10, 0x25a}
};
-typedef struct _chan_info_nphy_2055 {
+struct chan_info_nphy_2055 {
u16 chan;
u16 freq;
uint unknown;
@@ -287,9 +289,9 @@ typedef struct _chan_info_nphy_2055 {
u16 PHY_BW4;
u16 PHY_BW5;
u16 PHY_BW6;
-} chan_info_nphy_2055_t;
+};
-typedef struct _chan_info_nphy_radio205x {
+struct chan_info_nphy_radio205x {
u16 chan;
u16 freq;
u8 RF_SYN_pll_vcocal1;
@@ -335,9 +337,9 @@ typedef struct _chan_info_nphy_radio205x {
u16 PHY_BW4;
u16 PHY_BW5;
u16 PHY_BW6;
-} chan_info_nphy_radio205x_t;
+};
-typedef struct _chan_info_nphy_radio2057 {
+struct chan_info_nphy_radio2057 {
u16 chan;
u16 freq;
u8 RF_vcocal_countval0;
@@ -374,9 +376,9 @@ typedef struct _chan_info_nphy_radio2057 {
u16 PHY_BW4;
u16 PHY_BW5;
u16 PHY_BW6;
-} chan_info_nphy_radio2057_t;
+};
-typedef struct _chan_info_nphy_radio2057_rev5 {
+struct chan_info_nphy_radio2057_rev5 {
u16 chan;
u16 freq;
u8 RF_vcocal_countval0;
@@ -403,18 +405,18 @@ typedef struct _chan_info_nphy_radio2057_rev5 {
u16 PHY_BW4;
u16 PHY_BW5;
u16 PHY_BW6;
-} chan_info_nphy_radio2057_rev5_t;
+};
-typedef struct nphy_sfo_cfg {
+struct nphy_sfo_cfg {
u16 PHY_BW1a;
u16 PHY_BW2;
u16 PHY_BW3;
u16 PHY_BW4;
u16 PHY_BW5;
u16 PHY_BW6;
-} nphy_sfo_cfg_t;
+};
-static chan_info_nphy_2055_t chan_info_nphy_2055[] = {
+static struct chan_info_nphy_2055 chan_info_nphy_2055[] = {
{
184, 4920, 3280, 0x71, 0x01, 0xEC, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F, 0x8F, 0xFF, 0x00, 0x0F,
@@ -913,7 +915,7 @@ static chan_info_nphy_2055_t chan_info_nphy_2055[] = {
0x01, 0x80, 0x3E6, 0x3E2, 0x3DE, 0x41B, 0x41F, 0x424}
};
-static chan_info_nphy_radio205x_t chan_info_nphyrev3_2056[] = {
+static struct chan_info_nphy_radio205x chan_info_nphyrev3_2056[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f,
@@ -1536,7 +1538,7 @@ static chan_info_nphy_radio205x_t chan_info_nphyrev3_2056[] = {
0x0f, 0x00, 0x0d, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static chan_info_nphy_radio205x_t chan_info_nphyrev4_2056_A1[] = {
+static struct chan_info_nphy_radio205x chan_info_nphyrev4_2056_A1[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f,
@@ -2159,7 +2161,7 @@ static chan_info_nphy_radio205x_t chan_info_nphyrev4_2056_A1[] = {
0x0f, 0x00, 0x0e, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static chan_info_nphy_radio205x_t chan_info_nphyrev5_2056v5[] = {
+static struct chan_info_nphy_radio205x chan_info_nphyrev5_2056v5[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70,
@@ -2782,7 +2784,7 @@ static chan_info_nphy_radio205x_t chan_info_nphyrev5_2056v5[] = {
0x0d, 0x00, 0x08, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static chan_info_nphy_radio205x_t chan_info_nphyrev6_2056v6[] = {
+static struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v6[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77,
@@ -3405,7 +3407,7 @@ static chan_info_nphy_radio205x_t chan_info_nphyrev6_2056v6[] = {
0x09, 0x00, 0x09, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static chan_info_nphy_radio205x_t chan_info_nphyrev5n6_2056v7[] = {
+static struct chan_info_nphy_radio205x chan_info_nphyrev5n6_2056v7[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70,
@@ -4028,7 +4030,7 @@ static chan_info_nphy_radio205x_t chan_info_nphyrev5n6_2056v7[] = {
0x0d, 0x00, 0x08, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static chan_info_nphy_radio205x_t chan_info_nphyrev6_2056v8[] = {
+static struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v8[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77,
@@ -4651,7 +4653,7 @@ static chan_info_nphy_radio205x_t chan_info_nphyrev6_2056v8[] = {
0x09, 0x00, 0x09, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static chan_info_nphy_radio205x_t chan_info_nphyrev6_2056v11[] = {
+static struct chan_info_nphy_radio205x chan_info_nphyrev6_2056v11[] = {
{
184, 4920, 0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x02, 0x0c, 0x01,
0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00, 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77,
@@ -5274,7 +5276,7 @@ static chan_info_nphy_radio205x_t chan_info_nphyrev6_2056v11[] = {
0x09, 0x00, 0x09, 0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424}
};
-static chan_info_nphy_radio2057_t chan_info_nphyrev7_2057_rev4[] = {
+static struct chan_info_nphy_radio2057 chan_info_nphyrev7_2057_rev4[] = {
{
184, 4920, 0x68, 0x16, 0x10, 0x0c, 0x0c, 0x0c, 0x30, 0xec, 0x01, 0x0f,
0x00, 0x0f, 0x00, 0xff, 0x00, 0x00, 0x0f, 0x0f, 0xf3, 0x00, 0xef, 0x00,
@@ -6137,7 +6139,7 @@ static chan_info_nphy_radio2057_t chan_info_nphyrev7_2057_rev4[] = {
0x0424}
};
-static chan_info_nphy_radio2057_rev5_t chan_info_nphyrev8_2057_rev5[] = {
+static struct chan_info_nphy_radio2057_rev5 chan_info_nphyrev8_2057_rev5[] = {
{
1, 2412, 0x48, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x6c, 0x09, 0x0d,
0x08, 0x0e, 0x61, 0x03, 0xff, 0x61, 0x03, 0xff, 0x03c9, 0x03c5, 0x03c1,
@@ -6196,7 +6198,7 @@ static chan_info_nphy_radio2057_rev5_t chan_info_nphyrev8_2057_rev5[] = {
0x041b, 0x041f, 0x0424}
};
-static chan_info_nphy_radio2057_rev5_t chan_info_nphyrev9_2057_rev5v1[] = {
+static struct chan_info_nphy_radio2057_rev5 chan_info_nphyrev9_2057_rev5v1[] = {
{
1, 2412, 0x48, 0x16, 0x30, 0x1b, 0x0a, 0x0a, 0x30, 0x6c, 0x09, 0x0d,
0x08, 0x0e, 0x61, 0x03, 0xff, 0x61, 0x03, 0xff, 0x03c9, 0x03c5, 0x03c1,
@@ -6255,7 +6257,7 @@ static chan_info_nphy_radio2057_rev5_t chan_info_nphyrev9_2057_rev5v1[] = {
0x041b, 0x041f, 0x0424}
};
-static chan_info_nphy_radio2057_t chan_info_nphyrev8_2057_rev7[] = {
+static struct chan_info_nphy_radio2057 chan_info_nphyrev8_2057_rev7[] = {
{
184, 4920, 0x68, 0x16, 0x10, 0x0c, 0x0c, 0x0c, 0x30, 0xec, 0x01, 0x0f,
0x00, 0x0f, 0x00, 0xff, 0x00, 0xd3, 0x0f, 0x0f, 0xd3, 0x00, 0xff, 0x00,
@@ -6996,7 +6998,7 @@ static chan_info_nphy_radio2057_t chan_info_nphyrev8_2057_rev7[] = {
0x0424}
};
-static chan_info_nphy_radio2057_t chan_info_nphyrev8_2057_rev8[] = {
+static struct chan_info_nphy_radio2057 chan_info_nphyrev8_2057_rev8[] = {
{
186, 4930, 0x6b, 0x16, 0x10, 0x0c, 0x0c, 0x0c, 0x30, 0xed, 0x01, 0x0f,
0x00, 0x0f, 0x00, 0xff, 0x00, 0xd3, 0x0f, 0x0f, 0xd3, 0x00, 0xff, 0x00,
@@ -7731,7 +7733,7 @@ static chan_info_nphy_radio2057_t chan_info_nphyrev8_2057_rev8[] = {
0x0424}
};
-radio_regs_t regs_2055[] = {
+struct radio_regs regs_2055[] = {
{0x02, 0x80, 0x80, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0x27, 0x27, 0, 0},
@@ -7960,7 +7962,7 @@ radio_regs_t regs_2055[] = {
{0xFFFF, 0, 0, 0, 0},
};
-radio_regs_t regs_SYN_2056[] = {
+struct radio_regs regs_SYN_2056[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8145,7 +8147,7 @@ radio_regs_t regs_SYN_2056[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_TX_2056[] = {
+struct radio_regs regs_TX_2056[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8294,7 +8296,7 @@ radio_regs_t regs_TX_2056[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_RX_2056[] = {
+struct radio_regs regs_RX_2056[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8445,7 +8447,7 @@ radio_regs_t regs_RX_2056[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_SYN_2056_A1[] = {
+struct radio_regs regs_SYN_2056_A1[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8630,7 +8632,7 @@ radio_regs_t regs_SYN_2056_A1[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_TX_2056_A1[] = {
+struct radio_regs regs_TX_2056_A1[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8779,7 +8781,7 @@ radio_regs_t regs_TX_2056_A1[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_RX_2056_A1[] = {
+struct radio_regs regs_RX_2056_A1[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -8930,7 +8932,7 @@ radio_regs_t regs_RX_2056_A1[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_SYN_2056_rev5[] = {
+struct radio_regs regs_SYN_2056_rev5[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9115,7 +9117,7 @@ radio_regs_t regs_SYN_2056_rev5[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_TX_2056_rev5[] = {
+struct radio_regs regs_TX_2056_rev5[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9272,7 +9274,7 @@ radio_regs_t regs_TX_2056_rev5[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_RX_2056_rev5[] = {
+struct radio_regs regs_RX_2056_rev5[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9423,7 +9425,7 @@ radio_regs_t regs_RX_2056_rev5[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_SYN_2056_rev6[] = {
+struct radio_regs regs_SYN_2056_rev6[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9608,7 +9610,7 @@ radio_regs_t regs_SYN_2056_rev6[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_TX_2056_rev6[] = {
+struct radio_regs regs_TX_2056_rev6[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9765,7 +9767,7 @@ radio_regs_t regs_TX_2056_rev6[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_RX_2056_rev6[] = {
+struct radio_regs regs_RX_2056_rev6[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -9916,7 +9918,7 @@ radio_regs_t regs_RX_2056_rev6[] = {
{0xFFFF, 0, 0, 0, 0}
};
-radio_regs_t regs_SYN_2056_rev7[] = {
+struct radio_regs regs_SYN_2056_rev7[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10101,7 +10103,7 @@ radio_regs_t regs_SYN_2056_rev7[] = {
{0xFFFF, 0, 0, 0, 0},
};
-radio_regs_t regs_TX_2056_rev7[] = {
+struct radio_regs regs_TX_2056_rev7[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10258,7 +10260,7 @@ radio_regs_t regs_TX_2056_rev7[] = {
{0xFFFF, 0, 0, 0, 0},
};
-radio_regs_t regs_RX_2056_rev7[] = {
+struct radio_regs regs_RX_2056_rev7[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10409,7 +10411,7 @@ radio_regs_t regs_RX_2056_rev7[] = {
{0xFFFF, 0, 0, 0, 0},
};
-radio_regs_t regs_SYN_2056_rev8[] = {
+struct radio_regs regs_SYN_2056_rev8[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10594,7 +10596,7 @@ radio_regs_t regs_SYN_2056_rev8[] = {
{0xFFFF, 0, 0, 0, 0},
};
-radio_regs_t regs_TX_2056_rev8[] = {
+struct radio_regs regs_TX_2056_rev8[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10751,7 +10753,7 @@ radio_regs_t regs_TX_2056_rev8[] = {
{0xFFFF, 0, 0, 0, 0},
};
-radio_regs_t regs_RX_2056_rev8[] = {
+struct radio_regs regs_RX_2056_rev8[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -10902,7 +10904,7 @@ radio_regs_t regs_RX_2056_rev8[] = {
{0xFFFF, 0, 0, 0, 0},
};
-radio_regs_t regs_SYN_2056_rev11[] = {
+struct radio_regs regs_SYN_2056_rev11[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -11087,7 +11089,7 @@ radio_regs_t regs_SYN_2056_rev11[] = {
{0xFFFF, 0, 0, 0, 0},
};
-radio_regs_t regs_TX_2056_rev11[] = {
+struct radio_regs regs_TX_2056_rev11[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -11244,7 +11246,7 @@ radio_regs_t regs_TX_2056_rev11[] = {
{0xFFFF, 0, 0, 0, 0},
};
-radio_regs_t regs_RX_2056_rev11[] = {
+struct radio_regs regs_RX_2056_rev11[] = {
{0x02, 0, 0, 0, 0},
{0x03, 0, 0, 0, 0},
{0x04, 0, 0, 0, 0},
@@ -11395,7 +11397,7 @@ radio_regs_t regs_RX_2056_rev11[] = {
{0xFFFF, 0, 0, 0, 0},
};
-radio_20xx_regs_t regs_2057_rev4[] = {
+struct radio_20xx_regs regs_2057_rev4[] = {
{0x00, 0x84, 0},
{0x01, 0, 0},
{0x02, 0x60, 0},
@@ -11785,7 +11787,7 @@ radio_20xx_regs_t regs_2057_rev4[] = {
{0xFFFF, 0, 0},
};
-radio_20xx_regs_t regs_2057_rev5[] = {
+struct radio_20xx_regs regs_2057_rev5[] = {
{0x00, 0, 1},
{0x01, 0x57, 1},
{0x02, 0x20, 1},
@@ -12117,7 +12119,7 @@ radio_20xx_regs_t regs_2057_rev5[] = {
{0xFFFF, 0, 0}
};
-radio_20xx_regs_t regs_2057_rev5v1[] = {
+struct radio_20xx_regs regs_2057_rev5v1[] = {
{0x00, 0x15, 1},
{0x01, 0x57, 1},
{0x02, 0x20, 1},
@@ -12449,7 +12451,7 @@ radio_20xx_regs_t regs_2057_rev5v1[] = {
{0xFFFF, 0, 0}
};
-radio_20xx_regs_t regs_2057_rev7[] = {
+struct radio_20xx_regs regs_2057_rev7[] = {
{0x00, 0, 1},
{0x01, 0x57, 1},
{0x02, 0x20, 1},
@@ -12865,7 +12867,7 @@ radio_20xx_regs_t regs_2057_rev7[] = {
{0xFFFF, 0, 0}
};
-radio_20xx_regs_t regs_2057_rev8[] = {
+struct radio_20xx_regs regs_2057_rev8[] = {
{0x00, 0x8, 1},
{0x01, 0x57, 1},
{0x02, 0x20, 1},
@@ -14083,118 +14085,118 @@ static u8 ant_sw_ctrl_tbl_rev8_2057v7_core0[] = {
static u8 ant_sw_ctrl_tbl_rev8_2057v7_core1[] = {
0x09, 0x0a, 0x09, 0x0a, 0x15, 0x16 };
-static bool wlc_phy_chan2freq_nphy(phy_info_t *pi, uint channel, int *f,
- chan_info_nphy_radio2057_t **t0,
- chan_info_nphy_radio205x_t **t1,
- chan_info_nphy_radio2057_rev5_t **t2,
- chan_info_nphy_2055_t **t3);
-static void wlc_phy_chanspec_nphy_setup(phy_info_t *pi, chanspec_t chans,
- const nphy_sfo_cfg_t *c);
+static bool wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
+ struct chan_info_nphy_radio2057 **t0,
+ struct chan_info_nphy_radio205x **t1,
+ struct chan_info_nphy_radio2057_rev5 **t2,
+ struct chan_info_nphy_2055 **t3);
+static void wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, chanspec_t chans,
+ const struct nphy_sfo_cfg *c);
-static void wlc_phy_adjust_rx_analpfbw_nphy(phy_info_t *pi,
+static void wlc_phy_adjust_rx_analpfbw_nphy(struct brcms_phy *pi,
u16 reduction_factr);
-static void wlc_phy_adjust_min_noisevar_nphy(phy_info_t *pi, int ntones, int *,
- u32 *buf);
-static void wlc_phy_adjust_crsminpwr_nphy(phy_info_t *pi, u8 minpwr);
-static void wlc_phy_txlpfbw_nphy(phy_info_t *pi);
-static void wlc_phy_spurwar_nphy(phy_info_t *pi);
-
-static void wlc_phy_radio_preinit_2055(phy_info_t *pi);
-static void wlc_phy_radio_init_2055(phy_info_t *pi);
-static void wlc_phy_radio_postinit_2055(phy_info_t *pi);
-static void wlc_phy_radio_preinit_205x(phy_info_t *pi);
-static void wlc_phy_radio_init_2056(phy_info_t *pi);
-static void wlc_phy_radio_postinit_2056(phy_info_t *pi);
-static void wlc_phy_radio_init_2057(phy_info_t *pi);
-static void wlc_phy_radio_postinit_2057(phy_info_t *pi);
-static void wlc_phy_workarounds_nphy(phy_info_t *pi);
-static void wlc_phy_workarounds_nphy_gainctrl(phy_info_t *pi);
-static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(phy_info_t *pi);
-static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(phy_info_t *pi);
-static void wlc_phy_adjust_lnagaintbl_nphy(phy_info_t *pi);
-
-static void wlc_phy_restore_rssical_nphy(phy_info_t *pi);
-static void wlc_phy_reapply_txcal_coeffs_nphy(phy_info_t *pi);
-static void wlc_phy_tx_iq_war_nphy(phy_info_t *pi);
-static int wlc_phy_cal_rxiq_nphy_rev3(phy_info_t *pi, nphy_txgains_t tg,
- u8 type, bool d);
-static void wlc_phy_rxcal_gainctrl_nphy_rev5(phy_info_t *pi, u8 rxcore,
+static void wlc_phy_adjust_min_noisevar_nphy(struct brcms_phy *pi,
+ int ntones, int *, u32 *buf);
+static void wlc_phy_adjust_crsminpwr_nphy(struct brcms_phy *pi, u8 minpwr);
+static void wlc_phy_txlpfbw_nphy(struct brcms_phy *pi);
+static void wlc_phy_spurwar_nphy(struct brcms_phy *pi);
+
+static void wlc_phy_radio_preinit_2055(struct brcms_phy *pi);
+static void wlc_phy_radio_init_2055(struct brcms_phy *pi);
+static void wlc_phy_radio_postinit_2055(struct brcms_phy *pi);
+static void wlc_phy_radio_preinit_205x(struct brcms_phy *pi);
+static void wlc_phy_radio_init_2056(struct brcms_phy *pi);
+static void wlc_phy_radio_postinit_2056(struct brcms_phy *pi);
+static void wlc_phy_radio_init_2057(struct brcms_phy *pi);
+static void wlc_phy_radio_postinit_2057(struct brcms_phy *pi);
+static void wlc_phy_workarounds_nphy(struct brcms_phy *pi);
+static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi);
+static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi);
+static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi);
+static void wlc_phy_adjust_lnagaintbl_nphy(struct brcms_phy *pi);
+
+static void wlc_phy_restore_rssical_nphy(struct brcms_phy *pi);
+static void wlc_phy_reapply_txcal_coeffs_nphy(struct brcms_phy *pi);
+static void wlc_phy_tx_iq_war_nphy(struct brcms_phy *pi);
+static int wlc_phy_cal_rxiq_nphy_rev3(struct brcms_phy *pi,
+ struct nphy_txgains tg, u8 type, bool d);
+static void wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rxcore,
u16 *rg, u8 type);
-static void wlc_phy_update_mimoconfig_nphy(phy_info_t *pi, s32 preamble);
-static void wlc_phy_savecal_nphy(phy_info_t *pi);
-static void wlc_phy_restorecal_nphy(phy_info_t *pi);
-static void wlc_phy_resetcca_nphy(phy_info_t *pi);
-
-static void wlc_phy_txpwrctrl_config_nphy(phy_info_t *pi);
-static void wlc_phy_internal_cal_txgain_nphy(phy_info_t *pi);
-static void wlc_phy_precal_txgain_nphy(phy_info_t *pi);
-static void wlc_phy_update_txcal_ladder_nphy(phy_info_t *pi, u16 core);
-
-static void wlc_phy_extpa_set_tx_digi_filts_nphy(phy_info_t *pi);
-static void wlc_phy_ipa_set_tx_digi_filts_nphy(phy_info_t *pi);
-static void wlc_phy_ipa_restore_tx_digi_filts_nphy(phy_info_t *pi);
-static u16 wlc_phy_ipa_get_bbmult_nphy(phy_info_t *pi);
-static void wlc_phy_ipa_set_bbmult_nphy(phy_info_t *pi, u8 m0, u8 m1);
-static u32 *wlc_phy_get_ipa_gaintbl_nphy(phy_info_t *pi);
-
-static void wlc_phy_a1_nphy(phy_info_t *pi, u8 core, u32 winsz, u32,
+static void wlc_phy_update_mimoconfig_nphy(struct brcms_phy *pi, s32 preamble);
+static void wlc_phy_savecal_nphy(struct brcms_phy *pi);
+static void wlc_phy_restorecal_nphy(struct brcms_phy *pi);
+static void wlc_phy_resetcca_nphy(struct brcms_phy *pi);
+
+static void wlc_phy_txpwrctrl_config_nphy(struct brcms_phy *pi);
+static void wlc_phy_internal_cal_txgain_nphy(struct brcms_phy *pi);
+static void wlc_phy_precal_txgain_nphy(struct brcms_phy *pi);
+static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core);
+
+static void wlc_phy_extpa_set_tx_digi_filts_nphy(struct brcms_phy *pi);
+static void wlc_phy_ipa_set_tx_digi_filts_nphy(struct brcms_phy *pi);
+static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi);
+static u16 wlc_phy_ipa_get_bbmult_nphy(struct brcms_phy *pi);
+static void wlc_phy_ipa_set_bbmult_nphy(struct brcms_phy *pi, u8 m0, u8 m1);
+static u32 *wlc_phy_get_ipa_gaintbl_nphy(struct brcms_phy *pi);
+
+static void wlc_phy_a1_nphy(struct brcms_phy *pi, u8 core, u32 winsz, u32,
u32 e);
-static u8 wlc_phy_a3_nphy(phy_info_t *pi, u8 start_gain, u8 core);
-static void wlc_phy_a2_nphy(phy_info_t *pi, nphy_ipa_txcalgains_t *,
- phy_cal_mode_t, u8);
-static void wlc_phy_papd_cal_cleanup_nphy(phy_info_t *pi,
- nphy_papd_restore_state *state);
-static void wlc_phy_papd_cal_setup_nphy(phy_info_t *pi,
- nphy_papd_restore_state *state, u8);
+static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core);
+static void wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *,
+ enum phy_cal_mode, u8);
+static void wlc_phy_papd_cal_cleanup_nphy(struct brcms_phy *pi,
+ struct nphy_papd_restore_state *state);
+static void wlc_phy_papd_cal_setup_nphy(struct brcms_phy *pi,
+ struct nphy_papd_restore_state *state, u8);
-static void wlc_phy_clip_det_nphy(phy_info_t *pi, u8 write, u16 *vals);
+static void wlc_phy_clip_det_nphy(struct brcms_phy *pi, u8 write, u16 *vals);
-static void wlc_phy_set_rfseq_nphy(phy_info_t *pi, u8 cmd, u8 *evts,
+static void wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *evts,
u8 *dlys, u8 len);
-static u16 wlc_phy_read_lpf_bw_ctl_nphy(phy_info_t *pi, u16 offset);
+static u16 wlc_phy_read_lpf_bw_ctl_nphy(struct brcms_phy *pi, u16 offset);
static void
-wlc_phy_rfctrl_override_nphy_rev7(phy_info_t *pi, u16 field, u16 value,
+wlc_phy_rfctrl_override_nphy_rev7(struct brcms_phy *pi, u16 field, u16 value,
u8 core_mask, u8 off,
u8 override_id);
-static void wlc_phy_rssi_cal_nphy_rev2(phy_info_t *pi, u8 rssi_type);
-static void wlc_phy_rssi_cal_nphy_rev3(phy_info_t *pi);
+static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type);
+static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi);
-static bool wlc_phy_txpwr_srom_read_nphy(phy_info_t *pi);
+static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi);
static void wlc_phy_txpwr_nphy_srom_convert(u8 *srom_max,
u16 *pwr_offset,
u8 tmp_max_pwr, u8 rate_start,
u8 rate_end);
-static void wlc_phy_txpwr_limit_to_tbl_nphy(phy_info_t *pi);
-static void wlc_phy_txpwrctrl_coeff_setup_nphy(phy_info_t *pi);
-static void wlc_phy_txpwrctrl_idle_tssi_nphy(phy_info_t *pi);
-static void wlc_phy_txpwrctrl_pwr_setup_nphy(phy_info_t *pi);
+static void wlc_phy_txpwr_limit_to_tbl_nphy(struct brcms_phy *pi);
+static void wlc_phy_txpwrctrl_coeff_setup_nphy(struct brcms_phy *pi);
+static void wlc_phy_txpwrctrl_idle_tssi_nphy(struct brcms_phy *pi);
+static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi);
-static bool wlc_phy_txpwr_ison_nphy(phy_info_t *pi);
-static u8 wlc_phy_txpwr_idx_cur_get_nphy(phy_info_t *pi, u8 core);
-static void wlc_phy_txpwr_idx_cur_set_nphy(phy_info_t *pi, u8 idx0,
+static bool wlc_phy_txpwr_ison_nphy(struct brcms_phy *pi);
+static u8 wlc_phy_txpwr_idx_cur_get_nphy(struct brcms_phy *pi, u8 core);
+static void wlc_phy_txpwr_idx_cur_set_nphy(struct brcms_phy *pi, u8 idx0,
u8 idx1);
-static void wlc_phy_a4(phy_info_t *pi, bool full_cal);
+static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal);
-static u16 wlc_phy_radio205x_rcal(phy_info_t *pi);
+static u16 wlc_phy_radio205x_rcal(struct brcms_phy *pi);
-static u16 wlc_phy_radio2057_rccal(phy_info_t *pi);
+static u16 wlc_phy_radio2057_rccal(struct brcms_phy *pi);
-static u16 wlc_phy_gen_load_samples_nphy(phy_info_t *pi, u32 f_kHz,
+static u16 wlc_phy_gen_load_samples_nphy(struct brcms_phy *pi, u32 f_kHz,
u16 max_val,
u8 dac_test_mode);
-static void wlc_phy_loadsampletable_nphy(phy_info_t *pi, cs32 *tone_buf,
+static void wlc_phy_loadsampletable_nphy(struct brcms_phy *pi, cs32 *tone_buf,
u16 num_samps);
-static void wlc_phy_runsamples_nphy(phy_info_t *pi, u16 n, u16 lps,
+static void wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 n, u16 lps,
u16 wait, u8 iq, u8 dac_test_mode,
bool modify_bbmult);
-bool wlc_phy_bist_check_phy(wlc_phy_t *pih)
+bool wlc_phy_bist_check_phy(struct brcms_phy_pub *pih)
{
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
u32 phybist0, phybist1, phybist2, phybist3, phybist4;
if (NREV_GE(pi->pubpi.phy_rev, 16))
@@ -14214,7 +14216,7 @@ bool wlc_phy_bist_check_phy(wlc_phy_t *pih)
return false;
}
-static void WLBANDINITFN(wlc_phy_bphy_init_nphy) (phy_info_t *pi)
+static void wlc_phy_bphy_init_nphy(struct brcms_phy *pi)
{
u16 addr, val;
@@ -14243,10 +14245,10 @@ static void WLBANDINITFN(wlc_phy_bphy_init_nphy) (phy_info_t *pi)
}
void
-wlc_phy_table_write_nphy(phy_info_t *pi, u32 id, u32 len, u32 offset,
+wlc_phy_table_write_nphy(struct brcms_phy *pi, u32 id, u32 len, u32 offset,
u32 width, const void *data)
{
- mimophytbl_info_t tbl;
+ struct phytbl_info tbl;
tbl.tbl_id = id;
tbl.tbl_len = len;
@@ -14257,10 +14259,10 @@ wlc_phy_table_write_nphy(phy_info_t *pi, u32 id, u32 len, u32 offset,
}
void
-wlc_phy_table_read_nphy(phy_info_t *pi, u32 id, u32 len, u32 offset,
+wlc_phy_table_read_nphy(struct brcms_phy *pi, u32 id, u32 len, u32 offset,
u32 width, void *data)
{
- mimophytbl_info_t tbl;
+ struct phytbl_info tbl;
tbl.tbl_id = id;
tbl.tbl_len = len;
@@ -14270,7 +14272,8 @@ wlc_phy_table_read_nphy(phy_info_t *pi, u32 id, u32 len, u32 offset,
wlc_phy_read_table_nphy(pi, &tbl);
}
-static void WLBANDINITFN(wlc_phy_static_table_download_nphy) (phy_info_t *pi)
+static void
+wlc_phy_static_table_download_nphy(struct brcms_phy *pi)
{
uint idx;
@@ -14293,7 +14296,7 @@ static void WLBANDINITFN(wlc_phy_static_table_download_nphy) (phy_info_t *pi)
}
}
-static void WLBANDINITFN(wlc_phy_tbl_init_nphy) (phy_info_t *pi)
+static void wlc_phy_tbl_init_nphy(struct brcms_phy *pi)
{
uint idx = 0;
u8 antswctrllut;
@@ -14415,13 +14418,13 @@ static void WLBANDINITFN(wlc_phy_tbl_init_nphy) (phy_info_t *pi)
}
static void
-wlc_phy_write_txmacreg_nphy(phy_info_t *pi, u16 holdoff, u16 delay)
+wlc_phy_write_txmacreg_nphy(struct brcms_phy *pi, u16 holdoff, u16 delay)
{
write_phy_reg(pi, 0x77, holdoff);
write_phy_reg(pi, 0xb4, delay);
}
-void wlc_phy_nphy_tkip_rifs_war(phy_info_t *pi, u8 rifs)
+void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs)
{
u16 holdoff, delay;
@@ -14442,7 +14445,7 @@ void wlc_phy_nphy_tkip_rifs_war(phy_info_t *pi, u8 rifs)
}
}
-bool wlc_phy_attach_nphy(phy_info_t *pi)
+bool wlc_phy_attach_nphy(struct brcms_phy *pi)
{
uint i;
@@ -14467,7 +14470,7 @@ bool wlc_phy_attach_nphy(phy_info_t *pi)
pi->n_preamble_override = AUTO;
if (NREV_IS(pi->pubpi.phy_rev, 3) || NREV_IS(pi->pubpi.phy_rev, 4))
- pi->n_preamble_override = WLC_N_PREAMBLE_MIXEDMODE;
+ pi->n_preamble_override = BRCMS_N_PREAMBLE_MIXEDMODE;
pi->nphy_txrx_chain = AUTO;
pi->phy_scraminit = AUTO;
@@ -14501,7 +14504,7 @@ bool wlc_phy_attach_nphy(phy_info_t *pi)
return true;
}
-static void wlc_phy_txpwrctrl_config_nphy(phy_info_t *pi)
+static void wlc_phy_txpwrctrl_config_nphy(struct brcms_phy *pi)
{
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
@@ -14521,11 +14524,11 @@ static void wlc_phy_txpwrctrl_config_nphy(phy_info_t *pi)
pi->phy_5g_pwrgain = true;
}
-void WLBANDINITFN(wlc_phy_init_nphy) (phy_info_t *pi)
+void wlc_phy_init_nphy(struct brcms_phy *pi)
{
u16 val;
u16 clip1_ths[2];
- nphy_txgains_t target_gain;
+ struct nphy_txgains target_gain;
u8 tx_pwr_ctrl_state;
bool do_nphy_cal = false;
uint core;
@@ -14550,11 +14553,6 @@ void WLBANDINITFN(wlc_phy_init_nphy) (phy_info_t *pi)
}
}
- if ((!PHY_IPA(pi)) && (pi->sh->chip == BCM5357_CHIP_ID)) {
- si_pmu_chipcontrol(pi->sh->sih, 1, CCTRL5357_EXTPA,
- CCTRL5357_EXTPA);
- }
-
if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
CHSPEC_IS40(pi->radio_chanspec)) {
@@ -14707,12 +14705,10 @@ void WLBANDINITFN(wlc_phy_init_nphy) (phy_info_t *pi)
tx_pwrctrl_tbl = wlc_phy_get_ipa_gaintbl_nphy(pi);
} else {
if (CHSPEC_IS5G(pi->radio_chanspec)) {
- if NREV_IS
- (pi->pubpi.phy_rev, 3) {
+ if (NREV_IS(pi->pubpi.phy_rev, 3)) {
tx_pwrctrl_tbl =
nphy_tpc_5GHz_txgain_rev3;
- } else if NREV_IS
- (pi->pubpi.phy_rev, 4) {
+ } else if (NREV_IS(pi->pubpi.phy_rev, 4)) {
tx_pwrctrl_tbl =
(pi->srom_fem5g.extpagain == 3) ?
nphy_tpc_5GHz_txgain_HiPwrEPA :
@@ -14834,7 +14830,7 @@ void WLBANDINITFN(wlc_phy_init_nphy) (phy_info_t *pi)
}
if (pi->sh->phyrxchain != 0x3) {
- wlc_phy_rxcore_setstate_nphy((wlc_phy_t *) pi,
+ wlc_phy_rxcore_setstate_nphy((struct brcms_phy_pub *) pi,
pi->sh->phyrxchain);
}
@@ -14873,7 +14869,8 @@ void WLBANDINITFN(wlc_phy_init_nphy) (phy_info_t *pi)
target_gain = wlc_phy_get_tx_gain_nphy(pi);
if (pi->antsel_type == ANTSEL_2x3)
- wlc_phy_antsel_init((wlc_phy_t *) pi, true);
+ wlc_phy_antsel_init((struct brcms_phy_pub *) pi,
+ true);
if (pi->nphy_perical != PHY_PERICAL_MPHASE) {
wlc_phy_rssi_cal_nphy(pi);
@@ -14903,7 +14900,7 @@ void WLBANDINITFN(wlc_phy_init_nphy) (phy_info_t *pi)
} else if (pi->mphase_cal_phase_id ==
MPHASE_CAL_STATE_IDLE) {
- wlc_phy_cal_perical((wlc_phy_t *) pi,
+ wlc_phy_cal_perical((struct brcms_phy_pub *) pi,
PHY_PERICAL_PHYINIT);
}
} else {
@@ -14927,14 +14924,13 @@ void WLBANDINITFN(wlc_phy_init_nphy) (phy_info_t *pi)
}
-static void wlc_phy_update_mimoconfig_nphy(phy_info_t *pi, s32 preamble)
+static void wlc_phy_update_mimoconfig_nphy(struct brcms_phy *pi, s32 preamble)
{
bool gf_preamble = false;
u16 val;
- if (preamble == WLC_N_PREAMBLE_GF) {
+ if (preamble == BRCMS_N_PREAMBLE_GF)
gf_preamble = true;
- }
val = read_phy_reg(pi, 0xed);
@@ -14946,7 +14942,7 @@ static void wlc_phy_update_mimoconfig_nphy(phy_info_t *pi, s32 preamble)
write_phy_reg(pi, 0xed, val);
}
-static void wlc_phy_resetcca_nphy(phy_info_t *pi)
+static void wlc_phy_resetcca_nphy(struct brcms_phy *pi)
{
u16 val;
@@ -14962,7 +14958,7 @@ static void wlc_phy_resetcca_nphy(phy_info_t *pi)
wlc_phy_force_rfseq_nphy(pi, NPHY_RFSEQ_RESET2RX);
}
-void wlc_phy_pa_override_nphy(phy_info_t *pi, bool en)
+void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en)
{
u16 rfctrlintc_override_val;
@@ -14991,21 +14987,21 @@ void wlc_phy_pa_override_nphy(phy_info_t *pi, bool en)
}
-void wlc_phy_stf_chain_upd_nphy(phy_info_t *pi)
+void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi)
{
u16 txrx_chain =
(NPHY_RfseqCoreActv_TxRxChain0 | NPHY_RfseqCoreActv_TxRxChain1);
bool CoreActv_override = false;
- if (pi->nphy_txrx_chain == WLC_N_TXRX_CHAIN0) {
+ if (pi->nphy_txrx_chain == BRCMS_N_TXRX_CHAIN0) {
txrx_chain = NPHY_RfseqCoreActv_TxRxChain0;
CoreActv_override = true;
if (NREV_LE(pi->pubpi.phy_rev, 2)) {
and_phy_reg(pi, 0xa0, ~0x20);
}
- } else if (pi->nphy_txrx_chain == WLC_N_TXRX_CHAIN1) {
+ } else if (pi->nphy_txrx_chain == BRCMS_N_TXRX_CHAIN1) {
txrx_chain = NPHY_RfseqCoreActv_TxRxChain1;
CoreActv_override = true;
@@ -15026,12 +15022,12 @@ void wlc_phy_stf_chain_upd_nphy(phy_info_t *pi)
}
}
-void wlc_phy_rxcore_setstate_nphy(wlc_phy_t *pih, u8 rxcore_bitmask)
+void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask)
{
u16 regval;
u16 tbl_buf[16];
uint i;
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
u16 tbl_opcode;
bool suspend;
@@ -15107,10 +15103,10 @@ void wlc_phy_rxcore_setstate_nphy(wlc_phy_t *pih, u8 rxcore_bitmask)
wlapi_enable_mac(pi->sh->physhim);
}
-u8 wlc_phy_rxcore_getstate_nphy(wlc_phy_t *pih)
+u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih)
{
u16 regval, rxen_bits;
- phy_info_t *pi = (phy_info_t *) pih;
+ struct brcms_phy *pi = (struct brcms_phy *) pih;
regval = read_phy_reg(pi, 0xa2);
rxen_bits = (regval >> 4) & 0xf;
@@ -15118,12 +15114,12 @@ u8 wlc_phy_rxcore_getstate_nphy(wlc_phy_t *pih)
return (u8) rxen_bits;
}
-bool wlc_phy_n_txpower_ipa_ison(phy_info_t *pi)
+bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pi)
{
return PHY_IPA(pi);
}
-static void wlc_phy_txpwr_limit_to_tbl_nphy(phy_info_t *pi)
+static void wlc_phy_txpwr_limit_to_tbl_nphy(struct brcms_phy *pi)
{
u8 idx, idx2, i, delta_ind;
@@ -15217,11 +15213,12 @@ static void wlc_phy_txpwr_limit_to_tbl_nphy(phy_info_t *pi)
}
}
-void wlc_phy_cal_init_nphy(phy_info_t *pi)
+void wlc_phy_cal_init_nphy(struct brcms_phy *pi)
{
}
-static void wlc_phy_war_force_trsw_to_R_cliplo_nphy(phy_info_t *pi, u8 core)
+static void
+wlc_phy_war_force_trsw_to_R_cliplo_nphy(struct brcms_phy *pi, u8 core)
{
if (core == PHY_CORE_0) {
write_phy_reg(pi, 0x38, 0x4);
@@ -15240,7 +15237,7 @@ static void wlc_phy_war_force_trsw_to_R_cliplo_nphy(phy_info_t *pi, u8 core)
}
}
-static void wlc_phy_war_txchain_upd_nphy(phy_info_t *pi, u8 txchain)
+static void wlc_phy_war_txchain_upd_nphy(struct brcms_phy *pi, u8 txchain)
{
u8 txchain0, txchain1;
@@ -15255,7 +15252,7 @@ static void wlc_phy_war_txchain_upd_nphy(phy_info_t *pi, u8 txchain)
}
}
-static void wlc_phy_workarounds_nphy(phy_info_t *pi)
+static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
{
u8 rfseq_rx2tx_events[] = {
NPHY_RFSEQ_CMD_NOP,
@@ -15374,9 +15371,7 @@ static void wlc_phy_workarounds_nphy(phy_info_t *pi)
if (pi->phyhang_avoid)
wlc_phy_stay_in_carriersearch_nphy(pi, true);
- if (!ISSIM_ENAB(pi->sh->sih)) {
- or_phy_reg(pi, 0xb1, NPHY_IQFlip_ADC1 | NPHY_IQFlip_ADC2);
- }
+ or_phy_reg(pi, 0xb1, NPHY_IQFlip_ADC1 | NPHY_IQFlip_ADC2);
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
@@ -16219,8 +16214,8 @@ static void wlc_phy_workarounds_nphy(phy_info_t *pi)
if (pi->sh->boardflags2 & BFL2_SINGLEANT_CCK) {
wlapi_bmac_mhf(pi->sh->physhim, MHF4,
- MHF4_BPHY_TXCORE0,
- MHF4_BPHY_TXCORE0, WLC_BAND_ALL);
+ MHF4_BPHY_TXCORE0,
+ MHF4_BPHY_TXCORE0, BRCM_BAND_ALL);
}
}
} else {
@@ -16288,7 +16283,7 @@ static void wlc_phy_workarounds_nphy(phy_info_t *pi)
wlapi_bmac_mhf(pi->sh->physhim, MHF3,
MHF3_NPHY_MLADV_WAR,
MHF3_NPHY_MLADV_WAR,
- WLC_BAND_ALL);
+ BRCM_BAND_ALL);
} else if (NREV_IS(pi->pubpi.phy_rev, 2)) {
write_phy_reg(pi, 0x1e3, 0x0);
@@ -16330,7 +16325,7 @@ static void wlc_phy_workarounds_nphy(phy_info_t *pi)
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
-static void wlc_phy_workarounds_nphy_gainctrl(phy_info_t *pi)
+static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
{
u16 w1th, hpf_code, currband;
int ctr;
@@ -16950,7 +16945,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(phy_info_t *pi)
}
}
-static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(phy_info_t *pi)
+static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
{
s8 lna1_gain_db[] = { 8, 13, 17, 22 };
s8 lna2_gain_db[] = { -2, 7, 11, 15 };
@@ -17005,7 +17000,7 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(phy_info_t *pi)
}
}
-static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(phy_info_t *pi)
+static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
{
u16 currband;
s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
@@ -17211,7 +17206,7 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(phy_info_t *pi)
}
-static void wlc_phy_adjust_lnagaintbl_nphy(phy_info_t *pi)
+static void wlc_phy_adjust_lnagaintbl_nphy(struct brcms_phy *pi)
{
uint core;
int ctr;
@@ -17274,7 +17269,7 @@ static void wlc_phy_adjust_lnagaintbl_nphy(phy_info_t *pi)
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
-void wlc_phy_switch_radio_nphy(phy_info_t *pi, bool on)
+void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on)
{
if (on) {
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
@@ -17284,14 +17279,14 @@ void wlc_phy_switch_radio_nphy(phy_info_t *pi, bool on)
wlc_phy_radio_postinit_2057(pi);
}
- wlc_phy_chanspec_set((wlc_phy_t *) pi,
+ wlc_phy_chanspec_set((struct brcms_phy_pub *) pi,
pi->radio_chanspec);
} else if (NREV_GE(pi->pubpi.phy_rev, 3)) {
wlc_phy_radio_preinit_205x(pi);
wlc_phy_radio_init_2056(pi);
wlc_phy_radio_postinit_2056(pi);
- wlc_phy_chanspec_set((wlc_phy_t *) pi,
+ wlc_phy_chanspec_set((struct brcms_phy_pub *) pi,
pi->radio_chanspec);
} else {
wlc_phy_radio_preinit_2055(pi);
@@ -17357,7 +17352,7 @@ void wlc_phy_switch_radio_nphy(phy_info_t *pi, bool on)
}
}
-static void wlc_phy_radio_preinit_2055(phy_info_t *pi)
+static void wlc_phy_radio_preinit_2055(struct brcms_phy *pi)
{
and_phy_reg(pi, 0x78, ~RFCC_POR_FORCE);
@@ -17366,12 +17361,12 @@ static void wlc_phy_radio_preinit_2055(phy_info_t *pi)
or_phy_reg(pi, 0x78, RFCC_POR_FORCE);
}
-static void wlc_phy_radio_init_2055(phy_info_t *pi)
+static void wlc_phy_radio_init_2055(struct brcms_phy *pi)
{
wlc_phy_init_radio_regs(pi, regs_2055, RADIO_DEFAULT_CORE);
}
-static void wlc_phy_radio_postinit_2055(phy_info_t *pi)
+static void wlc_phy_radio_postinit_2055(struct brcms_phy *pi)
{
and_radio_reg(pi, RADIO_2055_MASTER_CNTRL1,
@@ -17409,7 +17404,7 @@ static void wlc_phy_radio_postinit_2055(phy_info_t *pi)
and_radio_reg(pi, RADIO_2055_CAL_LPO_CNTRL,
~(RADIO_2055_CAL_LPO_ENABLE));
- wlc_phy_chanspec_set((wlc_phy_t *) pi, pi->radio_chanspec);
+ wlc_phy_chanspec_set((struct brcms_phy_pub *) pi, pi->radio_chanspec);
write_radio_reg(pi, RADIO_2055_CORE1_RXBB_LPF, 9);
write_radio_reg(pi, RADIO_2055_CORE2_RXBB_LPF, 9);
@@ -17436,7 +17431,7 @@ static void wlc_phy_radio_postinit_2055(phy_info_t *pi)
udelay(2);
}
-static void wlc_phy_radio_preinit_205x(phy_info_t *pi)
+static void wlc_phy_radio_preinit_205x(struct brcms_phy *pi)
{
and_phy_reg(pi, 0x78, ~RFCC_CHIP0_PU);
@@ -17447,11 +17442,11 @@ static void wlc_phy_radio_preinit_205x(phy_info_t *pi)
}
-static void wlc_phy_radio_init_2056(phy_info_t *pi)
+static void wlc_phy_radio_init_2056(struct brcms_phy *pi)
{
- radio_regs_t *regs_SYN_2056_ptr = NULL;
- radio_regs_t *regs_TX_2056_ptr = NULL;
- radio_regs_t *regs_RX_2056_ptr = NULL;
+ struct radio_regs *regs_SYN_2056_ptr = NULL;
+ struct radio_regs *regs_TX_2056_ptr = NULL;
+ struct radio_regs *regs_RX_2056_ptr = NULL;
if (NREV_IS(pi->pubpi.phy_rev, 3)) {
regs_SYN_2056_ptr = regs_SYN_2056;
@@ -17510,7 +17505,7 @@ static void wlc_phy_radio_init_2056(phy_info_t *pi)
wlc_phy_init_radio_regs(pi, regs_RX_2056_ptr, (u16) RADIO_2056_RX1);
}
-static void wlc_phy_radio_postinit_2056(phy_info_t *pi)
+static void wlc_phy_radio_postinit_2056(struct brcms_phy *pi)
{
mod_radio_reg(pi, RADIO_2056_SYN_COM_CTRL, 0xb, 0xb);
@@ -17535,9 +17530,9 @@ static void wlc_phy_radio_postinit_2056(phy_info_t *pi)
}
}
-static void wlc_phy_radio_init_2057(phy_info_t *pi)
+static void wlc_phy_radio_init_2057(struct brcms_phy *pi)
{
- radio_20xx_regs_t *regs_2057_ptr = NULL;
+ struct radio_20xx_regs *regs_2057_ptr = NULL;
if (NREV_IS(pi->pubpi.phy_rev, 7)) {
@@ -17576,16 +17571,11 @@ static void wlc_phy_radio_init_2057(phy_info_t *pi)
wlc_phy_init_radio_regs_allbands(pi, regs_2057_ptr);
}
-static void wlc_phy_radio_postinit_2057(phy_info_t *pi)
+static void wlc_phy_radio_postinit_2057(struct brcms_phy *pi)
{
mod_radio_reg(pi, RADIO_2057_XTALPUOVR_PINCTRL, 0x1, 0x1);
- if (pi->sh->chip == !BCM6362_CHIP_ID) {
-
- mod_radio_reg(pi, RADIO_2057_XTALPUOVR_PINCTRL, 0x2, 0x2);
- }
-
mod_radio_reg(pi, RADIO_2057_RFPLL_MISC_CAL_RESETN, 0x78, 0x78);
mod_radio_reg(pi, RADIO_2057_XTAL_CONFIG2, 0x80, 0x80);
mdelay(2);
@@ -17601,16 +17591,16 @@ static void wlc_phy_radio_postinit_2057(phy_info_t *pi)
}
static bool
-wlc_phy_chan2freq_nphy(phy_info_t *pi, uint channel, int *f,
- chan_info_nphy_radio2057_t **t0,
- chan_info_nphy_radio205x_t **t1,
- chan_info_nphy_radio2057_rev5_t **t2,
- chan_info_nphy_2055_t **t3)
+wlc_phy_chan2freq_nphy(struct brcms_phy *pi, uint channel, int *f,
+ struct chan_info_nphy_radio2057 **t0,
+ struct chan_info_nphy_radio205x **t1,
+ struct chan_info_nphy_radio2057_rev5 **t2,
+ struct chan_info_nphy_2055 **t3)
{
uint i;
- chan_info_nphy_radio2057_t *chan_info_tbl_p_0 = NULL;
- chan_info_nphy_radio205x_t *chan_info_tbl_p_1 = NULL;
- chan_info_nphy_radio2057_rev5_t *chan_info_tbl_p_2 = NULL;
+ struct chan_info_nphy_radio2057 *chan_info_tbl_p_0 = NULL;
+ struct chan_info_nphy_radio205x *chan_info_tbl_p_1 = NULL;
+ struct chan_info_nphy_radio2057_rev5 *chan_info_tbl_p_2 = NULL;
u32 tbl_len = 0;
int freq = 0;
@@ -17769,13 +17759,13 @@ wlc_phy_chan2freq_nphy(phy_info_t *pi, uint channel, int *f,
return false;
}
-u8 wlc_phy_get_chan_freq_range_nphy(phy_info_t *pi, uint channel)
+u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint channel)
{
int freq;
- chan_info_nphy_radio2057_t *t0 = NULL;
- chan_info_nphy_radio205x_t *t1 = NULL;
- chan_info_nphy_radio2057_rev5_t *t2 = NULL;
- chan_info_nphy_2055_t *t3 = NULL;
+ struct chan_info_nphy_radio2057 *t0 = NULL;
+ struct chan_info_nphy_radio205x *t1 = NULL;
+ struct chan_info_nphy_radio2057_rev5 *t2 = NULL;
+ struct chan_info_nphy_2055 *t3 = NULL;
if (NORADIO_ENAB(pi->pubpi))
return WL_CHAN_FREQ_RANGE_2G;
@@ -17798,7 +17788,8 @@ u8 wlc_phy_get_chan_freq_range_nphy(phy_info_t *pi, uint channel)
}
static void
-wlc_phy_chanspec_radio2055_setup(phy_info_t *pi, chan_info_nphy_2055_t *ci)
+wlc_phy_chanspec_radio2055_setup(struct brcms_phy *pi,
+ struct chan_info_nphy_2055 *ci)
{
write_radio_reg(pi, RADIO_2055_PLL_REF, ci->RF_pll_ref);
@@ -17806,21 +17797,21 @@ wlc_phy_chanspec_radio2055_setup(phy_info_t *pi, chan_info_nphy_2055_t *ci)
write_radio_reg(pi, RADIO_2055_RF_PLL_MOD1, ci->RF_rf_pll_mod1);
write_radio_reg(pi, RADIO_2055_VCO_CAP_TAIL, ci->RF_vco_cap_tail);
- WLC_PHY_WAR_PR51571(pi);
+ BRCMS_PHY_WAR_PR51571(pi);
write_radio_reg(pi, RADIO_2055_VCO_CAL1, ci->RF_vco_cal1);
write_radio_reg(pi, RADIO_2055_VCO_CAL2, ci->RF_vco_cal2);
write_radio_reg(pi, RADIO_2055_PLL_LF_C1, ci->RF_pll_lf_c1);
write_radio_reg(pi, RADIO_2055_PLL_LF_R1, ci->RF_pll_lf_r1);
- WLC_PHY_WAR_PR51571(pi);
+ BRCMS_PHY_WAR_PR51571(pi);
write_radio_reg(pi, RADIO_2055_PLL_LF_C2, ci->RF_pll_lf_c2);
write_radio_reg(pi, RADIO_2055_LGBUF_CEN_BUF, ci->RF_lgbuf_cen_buf);
write_radio_reg(pi, RADIO_2055_LGEN_TUNE1, ci->RF_lgen_tune1);
write_radio_reg(pi, RADIO_2055_LGEN_TUNE2, ci->RF_lgen_tune2);
- WLC_PHY_WAR_PR51571(pi);
+ BRCMS_PHY_WAR_PR51571(pi);
write_radio_reg(pi, RADIO_2055_CORE1_LGBUF_A_TUNE,
ci->RF_core1_lgbuf_a_tune);
@@ -17830,7 +17821,7 @@ wlc_phy_chanspec_radio2055_setup(phy_info_t *pi, chan_info_nphy_2055_t *ci)
write_radio_reg(pi, RADIO_2055_CORE1_TX_PGA_PAD_TN,
ci->RF_core1_tx_pga_pad_tn);
- WLC_PHY_WAR_PR51571(pi);
+ BRCMS_PHY_WAR_PR51571(pi);
write_radio_reg(pi, RADIO_2055_CORE1_TX_MX_BGTRIM,
ci->RF_core1_tx_mx_bgtrim);
@@ -17840,7 +17831,7 @@ wlc_phy_chanspec_radio2055_setup(phy_info_t *pi, chan_info_nphy_2055_t *ci)
ci->RF_core2_lgbuf_g_tune);
write_radio_reg(pi, RADIO_2055_CORE2_RXRF_REG1, ci->RF_core2_rxrf_reg1);
- WLC_PHY_WAR_PR51571(pi);
+ BRCMS_PHY_WAR_PR51571(pi);
write_radio_reg(pi, RADIO_2055_CORE2_TX_PGA_PAD_TN,
ci->RF_core2_tx_pga_pad_tn);
@@ -17852,7 +17843,7 @@ wlc_phy_chanspec_radio2055_setup(phy_info_t *pi, chan_info_nphy_2055_t *ci)
write_radio_reg(pi, RADIO_2055_VCO_CAL10, 0x05);
write_radio_reg(pi, RADIO_2055_VCO_CAL10, 0x45);
- WLC_PHY_WAR_PR51571(pi);
+ BRCMS_PHY_WAR_PR51571(pi);
write_radio_reg(pi, RADIO_2055_VCO_CAL10, 0x65);
@@ -17860,10 +17851,10 @@ wlc_phy_chanspec_radio2055_setup(phy_info_t *pi, chan_info_nphy_2055_t *ci)
}
static void
-wlc_phy_chanspec_radio2056_setup(phy_info_t *pi,
- const chan_info_nphy_radio205x_t *ci)
+wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
+ const struct chan_info_nphy_radio205x *ci)
{
- radio_regs_t *regs_SYN_2056_ptr = NULL;
+ struct radio_regs *regs_SYN_2056_ptr = NULL;
write_radio_reg(pi,
RADIO_2056_SYN_PLL_VCOCAL1 | RADIO_2056_SYN,
@@ -17986,23 +17977,12 @@ wlc_phy_chanspec_radio2056_setup(phy_info_t *pi,
write_radio_reg(pi, RADIO_2056_SYN_PLL_LOOPFILTER2 |
RADIO_2056_SYN, 0x1f);
- if ((pi->sh->chip == BCM4716_CHIP_ID) ||
- (pi->sh->chip == BCM47162_CHIP_ID)) {
-
- write_radio_reg(pi,
- RADIO_2056_SYN_PLL_LOOPFILTER4 |
- RADIO_2056_SYN, 0x14);
- write_radio_reg(pi,
- RADIO_2056_SYN_PLL_CP2 |
- RADIO_2056_SYN, 0x00);
- } else {
- write_radio_reg(pi,
- RADIO_2056_SYN_PLL_LOOPFILTER4 |
- RADIO_2056_SYN, 0xb);
- write_radio_reg(pi,
- RADIO_2056_SYN_PLL_CP2 |
- RADIO_2056_SYN, 0x14);
- }
+ write_radio_reg(pi,
+ RADIO_2056_SYN_PLL_LOOPFILTER4 |
+ RADIO_2056_SYN, 0xb);
+ write_radio_reg(pi,
+ RADIO_2056_SYN_PLL_CP2 |
+ RADIO_2056_SYN, 0x14);
}
}
@@ -18049,38 +18029,25 @@ wlc_phy_chanspec_radio2056_setup(phy_info_t *pi,
WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
PADG_IDAC, 0xcc);
- if ((pi->sh->chip == BCM4716_CHIP_ID) ||
- (pi->sh->chip ==
- BCM47162_CHIP_ID)) {
- bias = 0x40;
- cascbias = 0x45;
- pag_boost_tune = 0x5;
- pgag_boost_tune = 0x33;
- padg_boost_tune = 0x77;
- mixg_boost_tune = 0x55;
- } else {
- bias = 0x25;
- cascbias = 0x20;
-
- if ((pi->sh->chip ==
- BCM43224_CHIP_ID)
- || (pi->sh->chip ==
- BCM43225_CHIP_ID)
- || (pi->sh->chip ==
- BCM43421_CHIP_ID)) {
- if (pi->sh->chippkg ==
- BCM43224_FAB_SMIC) {
- bias = 0x2a;
- cascbias = 0x38;
- }
+ bias = 0x25;
+ cascbias = 0x20;
+
+ if ((pi->sh->chip ==
+ BCM43224_CHIP_ID)
+ || (pi->sh->chip ==
+ BCM43225_CHIP_ID)) {
+ if (pi->sh->chippkg ==
+ BCM43224_FAB_SMIC) {
+ bias = 0x2a;
+ cascbias = 0x38;
}
-
- pag_boost_tune = 0x4;
- pgag_boost_tune = 0x03;
- padg_boost_tune = 0x77;
- mixg_boost_tune = 0x65;
}
+ pag_boost_tune = 0x4;
+ pgag_boost_tune = 0x03;
+ padg_boost_tune = 0x77;
+ mixg_boost_tune = 0x65;
+
WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
INTPAG_IMAIN_STAT, bias);
WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
@@ -18178,8 +18145,7 @@ wlc_phy_chanspec_radio2056_setup(phy_info_t *pi,
cascbias = 0x30;
if ((pi->sh->chip == BCM43224_CHIP_ID) ||
- (pi->sh->chip == BCM43225_CHIP_ID) ||
- (pi->sh->chip == BCM43421_CHIP_ID)) {
+ (pi->sh->chip == BCM43225_CHIP_ID)) {
if (pi->sh->chippkg == BCM43224_FAB_SMIC) {
cascbias = 0x35;
}
@@ -18201,7 +18167,7 @@ wlc_phy_chanspec_radio2056_setup(phy_info_t *pi,
wlc_phy_radio205x_vcocal_nphy(pi);
}
-void wlc_phy_radio205x_vcocal_nphy(phy_info_t *pi)
+void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi)
{
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
mod_radio_reg(pi, RADIO_2057_RFPLL_MISC_EN, 0x01, 0x0);
@@ -18222,7 +18188,7 @@ void wlc_phy_radio205x_vcocal_nphy(phy_info_t *pi)
#define MAX_205x_RCAL_WAITLOOPS 10000
-static u16 wlc_phy_radio205x_rcal(phy_info_t *pi)
+static u16 wlc_phy_radio205x_rcal(struct brcms_phy *pi)
{
u16 rcal_reg = 0;
int i;
@@ -18328,9 +18294,9 @@ static u16 wlc_phy_radio205x_rcal(phy_info_t *pi)
}
static void
-wlc_phy_chanspec_radio2057_setup(phy_info_t *pi,
- const chan_info_nphy_radio2057_t *ci,
- const chan_info_nphy_radio2057_rev5_t *ci2)
+wlc_phy_chanspec_radio2057_setup(struct brcms_phy *pi,
+ const struct chan_info_nphy_radio2057 *ci,
+ const struct chan_info_nphy_radio2057_rev5 *ci2)
{
int coreNum;
u16 txmix2g_tune_boost_pu = 0;
@@ -18523,7 +18489,7 @@ wlc_phy_chanspec_radio2057_setup(phy_info_t *pi,
wlc_phy_radio205x_vcocal_nphy(pi);
}
-static u16 wlc_phy_radio2057_rccal(phy_info_t *pi)
+static u16 wlc_phy_radio2057_rccal(struct brcms_phy *pi)
{
u16 rccal_valid;
int i;
@@ -18607,7 +18573,7 @@ static u16 wlc_phy_radio2057_rccal(phy_info_t *pi)
}
static void
-wlc_phy_adjust_rx_analpfbw_nphy(phy_info_t *pi, u16 reduction_factr)
+wlc_phy_adjust_rx_analpfbw_nphy(struct brcms_phy *pi, u16 reduction_factr)
{
if (NREV_GE(pi->pubpi.phy_rev, 3) && NREV_LT(pi->pubpi.phy_rev, 7)) {
if ((CHSPEC_CHANNEL(pi->radio_chanspec) == 11) &&
@@ -18635,8 +18601,8 @@ wlc_phy_adjust_rx_analpfbw_nphy(phy_info_t *pi, u16 reduction_factr)
}
static void
-wlc_phy_adjust_min_noisevar_nphy(phy_info_t *pi, int ntones, int *tone_id_buf,
- u32 *noise_var_buf)
+wlc_phy_adjust_min_noisevar_nphy(struct brcms_phy *pi, int ntones,
+ int *tone_id_buf, u32 *noise_var_buf)
{
int i;
u32 offset;
@@ -18684,7 +18650,7 @@ wlc_phy_adjust_min_noisevar_nphy(phy_info_t *pi, int ntones, int *tone_id_buf,
}
}
-static void wlc_phy_adjust_crsminpwr_nphy(phy_info_t *pi, u8 minpwr)
+static void wlc_phy_adjust_crsminpwr_nphy(struct brcms_phy *pi, u8 minpwr)
{
u16 regval;
@@ -18735,7 +18701,7 @@ static void wlc_phy_adjust_crsminpwr_nphy(phy_info_t *pi, u8 minpwr)
}
}
-static void wlc_phy_txlpfbw_nphy(phy_info_t *pi)
+static void wlc_phy_txlpfbw_nphy(struct brcms_phy *pi)
{
u8 tx_lpf_bw = 0;
@@ -18774,7 +18740,7 @@ static void wlc_phy_txlpfbw_nphy(phy_info_t *pi)
}
}
-static void wlc_phy_spurwar_nphy(phy_info_t *pi)
+static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
{
u16 cur_channel = 0;
int nphy_adj_tone_id_buf[] = { 57, 58 };
@@ -18907,14 +18873,8 @@ static void wlc_phy_spurwar_nphy(phy_info_t *pi)
case 38:
case 102:
case 118:
- if ((pi->sh->chip == BCM4716_CHIP_ID) &&
- (pi->sh->chippkg == BCM4717_PKG_ID)) {
- nphy_adj_tone_id_buf[0] = 32;
- nphy_adj_noise_var_buf[0] = 0x21f;
- } else {
- nphy_adj_tone_id_buf[0] = 0;
- nphy_adj_noise_var_buf[0] = 0x0;
- }
+ nphy_adj_tone_id_buf[0] = 0;
+ nphy_adj_noise_var_buf[0] = 0x0;
break;
case 134:
nphy_adj_tone_id_buf[0] = 32;
@@ -18952,8 +18912,8 @@ static void wlc_phy_spurwar_nphy(phy_info_t *pi)
}
static void
-wlc_phy_chanspec_nphy_setup(phy_info_t *pi, chanspec_t chanspec,
- const nphy_sfo_cfg_t *ci)
+wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, chanspec_t chanspec,
+ const struct nphy_sfo_cfg *ci)
{
u16 val;
@@ -19041,34 +19001,20 @@ wlc_phy_chanspec_nphy_setup(phy_info_t *pi, chanspec_t chanspec,
if (pi->nphy_aband_spurwar_en &&
((val == 38) || (val == 102)
- || (val == 118))) {
- if ((pi->sh->chip ==
- BCM4716_CHIP_ID)
- && (pi->sh->chippkg ==
- BCM4717_PKG_ID)) {
- spuravoid = 0;
- } else {
- spuravoid = 1;
- }
- }
+ || (val == 118)))
+ spuravoid = 1;
}
}
if (pi->phy_spuravoid == SPURAVOID_FORCEON)
spuravoid = 1;
- if ((pi->sh->chip == BCM4716_CHIP_ID) ||
- (pi->sh->chip == BCM47162_CHIP_ID)) {
- si_pmu_spuravoid(pi->sh->sih, spuravoid);
- } else {
- wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
- si_pmu_spuravoid(pi->sh->sih, spuravoid);
- wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
- }
+ wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
+ si_pmu_spuravoid(pi->sh->sih, spuravoid);
+ wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
if ((pi->sh->chip == BCM43224_CHIP_ID) ||
- (pi->sh->chip == BCM43225_CHIP_ID) ||
- (pi->sh->chip == BCM43421_CHIP_ID)) {
+ (pi->sh->chip == BCM43225_CHIP_ID)) {
if (spuravoid == 1) {
@@ -19085,10 +19031,7 @@ wlc_phy_chanspec_nphy_setup(phy_info_t *pi, chanspec_t chanspec,
}
}
- if (!((pi->sh->chip == BCM4716_CHIP_ID) ||
- (pi->sh->chip == BCM47162_CHIP_ID))) {
- wlapi_bmac_core_phypll_reset(pi->sh->physhim);
- }
+ wlapi_bmac_core_phypll_reset(pi->sh->physhim);
mod_phy_reg(pi, 0x01, (0x1 << 15),
((spuravoid > 0) ? (0x1 << 15) : 0));
@@ -19104,13 +19047,13 @@ wlc_phy_chanspec_nphy_setup(phy_info_t *pi, chanspec_t chanspec,
wlc_phy_spurwar_nphy(pi);
}
-void wlc_phy_chanspec_set_nphy(phy_info_t *pi, chanspec_t chanspec)
+void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, chanspec_t chanspec)
{
int freq;
- chan_info_nphy_radio2057_t *t0 = NULL;
- chan_info_nphy_radio205x_t *t1 = NULL;
- chan_info_nphy_radio2057_rev5_t *t2 = NULL;
- chan_info_nphy_2055_t *t3 = NULL;
+ struct chan_info_nphy_radio2057 *t0 = NULL;
+ struct chan_info_nphy_radio205x *t1 = NULL;
+ struct chan_info_nphy_radio2057_rev5 *t2 = NULL;
+ struct chan_info_nphy_2055 *t3 = NULL;
if (NORADIO_ENAB(pi->pubpi)) {
return;
@@ -19120,7 +19063,7 @@ void wlc_phy_chanspec_set_nphy(phy_info_t *pi, chanspec_t chanspec)
(pi, CHSPEC_CHANNEL(chanspec), &freq, &t0, &t1, &t2, &t3))
return;
- wlc_phy_chanspec_radio_set((wlc_phy_t *) pi, chanspec);
+ wlc_phy_chanspec_radio_set((struct brcms_phy_pub *) pi, chanspec);
if (CHSPEC_BW(chanspec) != pi->bw)
wlapi_bmac_bw_set(pi->sh->physhim, CHSPEC_BW(chanspec));
@@ -19157,12 +19100,9 @@ void wlc_phy_chanspec_set_nphy(phy_info_t *pi, chanspec_t chanspec)
wlc_phy_chanspec_radio2057_setup(pi, t0, t2);
wlc_phy_chanspec_nphy_setup(pi, chanspec,
- (pi->pubpi.radiorev ==
- 5) ? (const nphy_sfo_cfg_t
- *)&(t2->
- PHY_BW1a)
- : (const nphy_sfo_cfg_t *)
- &(t0->PHY_BW1a));
+ (pi->pubpi.radiorev == 5) ?
+ (const struct nphy_sfo_cfg *)&(t2->PHY_BW1a) :
+ (const struct nphy_sfo_cfg *)&(t0->PHY_BW1a));
} else {
@@ -19173,8 +19113,7 @@ void wlc_phy_chanspec_set_nphy(phy_info_t *pi, chanspec_t chanspec)
wlc_phy_chanspec_radio2056_setup(pi, t1);
wlc_phy_chanspec_nphy_setup(pi, chanspec,
- (const nphy_sfo_cfg_t *)
- &(t1->PHY_BW1a));
+ (const struct nphy_sfo_cfg *) &(t1->PHY_BW1a));
}
} else {
@@ -19185,13 +19124,13 @@ void wlc_phy_chanspec_set_nphy(phy_info_t *pi, chanspec_t chanspec)
wlc_phy_chanspec_radio2055_setup(pi, t3);
wlc_phy_chanspec_nphy_setup(pi, chanspec,
- (const nphy_sfo_cfg_t *)&(t3->
+ (const struct nphy_sfo_cfg *)&(t3->
PHY_BW1a));
}
}
-static void wlc_phy_savecal_nphy(phy_info_t *pi)
+static void wlc_phy_savecal_nphy(struct brcms_phy *pi)
{
void *tbl_ptr;
int coreNum;
@@ -19340,7 +19279,7 @@ static void wlc_phy_savecal_nphy(phy_info_t *pi)
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
-static void wlc_phy_restorecal_nphy(phy_info_t *pi)
+static void wlc_phy_restorecal_nphy(struct brcms_phy *pi)
{
u16 *loft_comp;
u16 txcal_coeffs_bphy[4];
@@ -19539,9 +19478,9 @@ static void wlc_phy_restorecal_nphy(phy_info_t *pi)
}
}
-void wlc_phy_antsel_init(wlc_phy_t *ppi, bool lut_init)
+void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
{
- phy_info_t *pi = (phy_info_t *) ppi;
+ struct brcms_phy *pi = (struct brcms_phy *) ppi;
u16 mask = 0xfc00;
u32 mc = 0;
@@ -19599,7 +19538,7 @@ void wlc_phy_antsel_init(wlc_phy_t *ppi, bool lut_init)
}
}
-u16 wlc_phy_classifier_nphy(phy_info_t *pi, u16 mask, u16 val)
+u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val)
{
u16 curr_ctl, new_ctl;
bool suspended = false;
@@ -19624,7 +19563,7 @@ u16 wlc_phy_classifier_nphy(phy_info_t *pi, u16 mask, u16 val)
return new_ctl;
}
-static void wlc_phy_clip_det_nphy(phy_info_t *pi, u8 write, u16 *vals)
+static void wlc_phy_clip_det_nphy(struct brcms_phy *pi, u8 write, u16 *vals)
{
if (write == 0) {
@@ -19636,7 +19575,7 @@ static void wlc_phy_clip_det_nphy(phy_info_t *pi, u8 write, u16 *vals)
}
}
-void wlc_phy_force_rfseq_nphy(phy_info_t *pi, u8 cmd)
+void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd)
{
u16 trigger_mask, status_mask;
u16 orig_RfseqCoreActv;
@@ -19681,7 +19620,7 @@ void wlc_phy_force_rfseq_nphy(phy_info_t *pi, u8 cmd)
}
static void
-wlc_phy_set_rfseq_nphy(phy_info_t *pi, u8 cmd, u8 *events, u8 *dlys,
+wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
u8 len)
{
u32 t1_offset, t2_offset;
@@ -19712,7 +19651,7 @@ wlc_phy_set_rfseq_nphy(phy_info_t *pi, u8 cmd, u8 *events, u8 *dlys,
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
-static u16 wlc_phy_read_lpf_bw_ctl_nphy(phy_info_t *pi, u16 offset)
+static u16 wlc_phy_read_lpf_bw_ctl_nphy(struct brcms_phy *pi, u16 offset)
{
u16 lpf_bw_ctl_val = 0;
u16 rx2tx_lpf_rc_lut_offset = 0;
@@ -19736,7 +19675,7 @@ static u16 wlc_phy_read_lpf_bw_ctl_nphy(phy_info_t *pi, u16 offset)
}
static void
-wlc_phy_rfctrl_override_nphy_rev7(phy_info_t *pi, u16 field, u16 value,
+wlc_phy_rfctrl_override_nphy_rev7(struct brcms_phy *pi, u16 field, u16 value,
u8 core_mask, u8 off, u8 override_id)
{
u8 core_num;
@@ -20006,7 +19945,7 @@ wlc_phy_rfctrl_override_nphy_rev7(phy_info_t *pi, u16 field, u16 value,
}
static void
-wlc_phy_rfctrl_override_nphy(phy_info_t *pi, u16 field, u16 value,
+wlc_phy_rfctrl_override_nphy(struct brcms_phy *pi, u16 field, u16 value,
u8 core_mask, u8 off)
{
u8 core_num;
@@ -20252,7 +20191,7 @@ wlc_phy_rfctrl_override_nphy(phy_info_t *pi, u16 field, u16 value,
}
static void
-wlc_phy_rfctrl_override_1tomany_nphy(phy_info_t *pi, u16 cmd, u16 value,
+wlc_phy_rfctrl_override_1tomany_nphy(struct brcms_phy *pi, u16 cmd, u16 value,
u8 core_mask, u8 off)
{
u16 rfmxgain = 0, lpfgain = 0;
@@ -20338,7 +20277,7 @@ wlc_phy_rfctrl_override_1tomany_nphy(phy_info_t *pi, u16 cmd, u16 value,
}
static void
-wlc_phy_scale_offset_rssi_nphy(phy_info_t *pi, u16 scale, s8 offset,
+wlc_phy_scale_offset_rssi_nphy(struct brcms_phy *pi, u16 scale, s8 offset,
u8 coresel, u8 rail, u8 rssi_type)
{
u16 valuetostuff;
@@ -20478,7 +20417,7 @@ wlc_phy_scale_offset_rssi_nphy(phy_info_t *pi, u16 scale, s8 offset,
}
}
-void wlc_phy_rssisel_nphy(phy_info_t *pi, u8 core_code, u8 rssi_type)
+void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core_code, u8 rssi_type)
{
u16 mask, val;
u16 afectrlovr_rssi_val, rfctrlcmd_rxen_val, rfctrlcmd_coresel_val,
@@ -20782,7 +20721,7 @@ void wlc_phy_rssisel_nphy(phy_info_t *pi, u8 core_code, u8 rssi_type)
}
int
-wlc_phy_poll_rssi_nphy(phy_info_t *pi, u8 rssi_type, s32 *rssi_buf,
+wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type, s32 *rssi_buf,
u8 nsamps)
{
s16 rssi0, rssi1;
@@ -20881,7 +20820,7 @@ wlc_phy_poll_rssi_nphy(phy_info_t *pi, u8 rssi_type, s32 *rssi_buf,
return rssi_out_val;
}
-s16 wlc_phy_tempsense_nphy(phy_info_t *pi)
+s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
{
u16 core1_txrf_iqcal1_save, core1_txrf_iqcal2_save;
u16 core2_txrf_iqcal1_save, core2_txrf_iqcal2_save;
@@ -21035,19 +20974,9 @@ s16 wlc_phy_tempsense_nphy(phy_info_t *pi)
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x03, 16,
&auxADC_rssi_ctrlH_save);
- if (pi->sh->chip == BCM5357_CHIP_ID) {
- radio_temp[0] = (193 * (radio_temp[1] + radio_temp2[1])
- + 88 * (auxADC_Vl) - 27111 +
- 128) / 256;
- } else if (pi->sh->chip == BCM43236_CHIP_ID) {
- radio_temp[0] = (198 * (radio_temp[1] + radio_temp2[1])
- + 91 * (auxADC_Vl) - 27243 +
- 128) / 256;
- } else {
- radio_temp[0] = (179 * (radio_temp[1] + radio_temp2[1])
- + 82 * (auxADC_Vl) - 28861 +
- 128) / 256;
- }
+ radio_temp[0] = (179 * (radio_temp[1] + radio_temp2[1])
+ + 82 * (auxADC_Vl) - 28861 +
+ 128) / 256;
offset = (s16) pi->phy_tempsense_offset;
@@ -21064,10 +20993,8 @@ s16 wlc_phy_tempsense_nphy(phy_info_t *pi)
write_radio_reg(pi, RADIO_2056_SYN_TEMPPROCSENSE, 0x01);
wlc_phy_poll_rssi_nphy(pi, NPHY_RSSI_SEL_IQ, radio_temp, 1);
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
- } else {
+ if (NREV_LT(pi->pubpi.phy_rev, 7))
write_radio_reg(pi, RADIO_2056_SYN_TEMPPROCSENSE, 0x05);
- }
wlc_phy_poll_rssi_nphy(pi, NPHY_RSSI_SEL_IQ, radio_temp2, 1);
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
@@ -21165,7 +21092,7 @@ s16 wlc_phy_tempsense_nphy(phy_info_t *pi)
}
static void
-wlc_phy_set_rssi_2055_vcm(phy_info_t *pi, u8 rssi_type, u8 *vcm_buf)
+wlc_phy_set_rssi_2055_vcm(struct brcms_phy *pi, u8 rssi_type, u8 *vcm_buf)
{
u8 core;
@@ -21219,7 +21146,7 @@ wlc_phy_set_rssi_2055_vcm(phy_info_t *pi, u8 rssi_type, u8 *vcm_buf)
}
}
-void wlc_phy_rssi_cal_nphy(phy_info_t *pi)
+void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi)
{
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
@@ -21231,7 +21158,7 @@ void wlc_phy_rssi_cal_nphy(phy_info_t *pi)
}
}
-static void wlc_phy_rssi_cal_nphy_rev2(phy_info_t *pi, u8 rssi_type)
+static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
{
s32 target_code;
u16 classif_state;
@@ -21439,9 +21366,9 @@ static void wlc_phy_rssi_cal_nphy_rev2(phy_info_t *pi, u8 rssi_type)
}
int
-wlc_phy_rssi_compute_nphy(phy_info_t *pi, wlc_d11rxhdr_t *wlc_rxh)
+wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct brcms_d11rxhdr *wlc_rxh)
{
- d11rxhdr_t *rxh = &wlc_rxh->rxhdr;
+ struct d11rxhdr *rxh = &wlc_rxh->rxhdr;
s16 rxpwr, rxpwr0, rxpwr1;
s16 phyRx0_l, phyRx2_l;
@@ -21479,7 +21406,7 @@ wlc_phy_rssi_compute_nphy(phy_info_t *pi, wlc_d11rxhdr_t *wlc_rxh)
}
static void
-wlc_phy_rfctrlintc_override_nphy(phy_info_t *pi, u8 field, u16 value,
+wlc_phy_rfctrlintc_override_nphy(struct brcms_phy *pi, u8 field, u16 value,
u8 core_code)
{
u16 mask;
@@ -21714,7 +21641,7 @@ wlc_phy_rfctrlintc_override_nphy(phy_info_t *pi, u8 field, u16 value,
}
}
-static void wlc_phy_rssi_cal_nphy_rev3(phy_info_t *pi)
+static void wlc_phy_rssi_cal_nphy_rev3(struct brcms_phy *pi)
{
u16 classif_state;
u16 clip_state[2];
@@ -21853,7 +21780,8 @@ static void wlc_phy_rssi_cal_nphy_rev3(phy_info_t *pi)
}
}
- rxcore_state = wlc_phy_rxcore_getstate_nphy((wlc_phy_t *) pi);
+ rxcore_state = wlc_phy_rxcore_getstate_nphy(
+ (struct brcms_phy_pub *) pi);
vcm_level_max = 8;
@@ -22201,7 +22129,7 @@ static void wlc_phy_rssi_cal_nphy_rev3(phy_info_t *pi)
wlc_phy_clip_det_nphy(pi, 1, clip_state);
}
-static void wlc_phy_restore_rssical_nphy(phy_info_t *pi)
+static void wlc_phy_restore_rssical_nphy(struct brcms_phy *pi)
{
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (pi->nphy_rssical_chanspec_2G == 0)
@@ -22308,7 +22236,7 @@ static void wlc_phy_restore_rssical_nphy(phy_info_t *pi)
}
static u16
-wlc_phy_gen_load_samples_nphy(phy_info_t *pi, u32 f_kHz, u16 max_val,
+wlc_phy_gen_load_samples_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
u8 dac_test_mode)
{
u8 phy_bw, is_phybw40;
@@ -22357,7 +22285,7 @@ wlc_phy_gen_load_samples_nphy(phy_info_t *pi, u32 f_kHz, u16 max_val,
}
int
-wlc_phy_tx_tone_nphy(phy_info_t *pi, u32 f_kHz, u16 max_val,
+wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
u8 iqmode, u8 dac_test_mode, bool modify_bbmult)
{
u16 num_samps;
@@ -22377,7 +22305,7 @@ wlc_phy_tx_tone_nphy(phy_info_t *pi, u32 f_kHz, u16 max_val,
}
static void
-wlc_phy_loadsampletable_nphy(phy_info_t *pi, cs32 *tone_buf,
+wlc_phy_loadsampletable_nphy(struct brcms_phy *pi, cs32 *tone_buf,
u16 num_samps)
{
u16 t;
@@ -22405,7 +22333,7 @@ wlc_phy_loadsampletable_nphy(phy_info_t *pi, cs32 *tone_buf,
}
static void
-wlc_phy_runsamples_nphy(phy_info_t *pi, u16 num_samps, u16 loops,
+wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
u16 wait, u8 iqmode, u8 dac_test_mode,
bool modify_bbmult)
{
@@ -22492,7 +22420,7 @@ wlc_phy_runsamples_nphy(phy_info_t *pi, u16 num_samps, u16 loops,
write_phy_reg(pi, 0xa1, orig_RfseqCoreActv);
}
-void wlc_phy_stopplayback_nphy(phy_info_t *pi)
+void wlc_phy_stopplayback_nphy(struct brcms_phy *pi)
{
u16 playback_status;
u16 bb_mult;
@@ -22534,11 +22462,11 @@ void wlc_phy_stopplayback_nphy(phy_info_t *pi)
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
-nphy_txgains_t wlc_phy_get_tx_gain_nphy(phy_info_t *pi)
+struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi)
{
u16 base_idx[2], curr_gain[2];
u8 core_no;
- nphy_txgains_t target_gain;
+ struct nphy_txgains target_gain;
u32 *tx_pwrctrl_tbl = NULL;
if (pi->nphy_txpwrctrl == PHY_TPC_HW_OFF) {
@@ -22584,21 +22512,21 @@ nphy_txgains_t wlc_phy_get_tx_gain_nphy(phy_info_t *pi)
}
}
} else {
+ uint phyrev = pi->pubpi.phy_rev;
+
base_idx[0] = (read_phy_reg(pi, 0x1ed) >> 8) & 0x7f;
base_idx[1] = (read_phy_reg(pi, 0x1ee) >> 8) & 0x7f;
for (core_no = 0; core_no < 2; core_no++) {
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (NREV_GE(phyrev, 3)) {
if (PHY_IPA(pi)) {
tx_pwrctrl_tbl =
wlc_phy_get_ipa_gaintbl_nphy(pi);
} else {
if (CHSPEC_IS5G(pi->radio_chanspec)) {
- if NREV_IS
- (pi->pubpi.phy_rev, 3) {
+ if (NREV_IS(phyrev, 3)) {
tx_pwrctrl_tbl =
nphy_tpc_5GHz_txgain_rev3;
- } else if NREV_IS
- (pi->pubpi.phy_rev, 4) {
+ } else if (NREV_IS(phyrev, 4)) {
tx_pwrctrl_tbl =
(pi->srom_fem5g.
extpagain ==
@@ -22611,8 +22539,7 @@ nphy_txgains_t wlc_phy_get_tx_gain_nphy(phy_info_t *pi)
nphy_tpc_5GHz_txgain_rev5;
}
} else {
- if (NREV_GE
- (pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(phyrev, 7)) {
if (pi->pubpi.
radiorev == 3) {
tx_pwrctrl_tbl =
@@ -22625,9 +22552,7 @@ nphy_txgains_t wlc_phy_get_tx_gain_nphy(phy_info_t *pi)
}
} else {
- if (NREV_GE
- (pi->pubpi.phy_rev,
- 5)
+ if (NREV_GE(phyrev, 5)
&& (pi->srom_fem2g.
extpagain ==
3)) {
@@ -22640,8 +22565,7 @@ nphy_txgains_t wlc_phy_get_tx_gain_nphy(phy_info_t *pi)
}
}
}
- if NREV_GE
- (pi->pubpi.phy_rev, 7) {
+ if (NREV_GE(phyrev, 7)) {
target_gain.ipa[core_no] =
(tx_pwrctrl_tbl[base_idx[core_no]]
>> 16) & 0x7;
@@ -22692,9 +22616,9 @@ nphy_txgains_t wlc_phy_get_tx_gain_nphy(phy_info_t *pi)
}
static void
-wlc_phy_iqcal_gainparams_nphy(phy_info_t *pi, u16 core_no,
- nphy_txgains_t target_gain,
- nphy_iqcal_params_t *params)
+wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
+ struct nphy_txgains target_gain,
+ struct nphy_iqcal_params *params)
{
u8 k;
int idx;
@@ -22755,7 +22679,7 @@ wlc_phy_iqcal_gainparams_nphy(phy_info_t *pi, u16 core_no,
}
}
-static void wlc_phy_txcal_radio_setup_nphy(phy_info_t *pi)
+static void wlc_phy_txcal_radio_setup_nphy(struct brcms_phy *pi)
{
u16 jtag_core, core;
@@ -23065,7 +22989,7 @@ static void wlc_phy_txcal_radio_setup_nphy(phy_info_t *pi)
}
}
-static void wlc_phy_txcal_radio_cleanup_nphy(phy_info_t *pi)
+static void wlc_phy_txcal_radio_cleanup_nphy(struct brcms_phy *pi)
{
u16 jtag_core, core;
@@ -23203,7 +23127,7 @@ static void wlc_phy_txcal_radio_cleanup_nphy(phy_info_t *pi)
}
}
-static void wlc_phy_txcal_physetup_nphy(phy_info_t *pi)
+static void wlc_phy_txcal_physetup_nphy(struct brcms_phy *pi)
{
u16 val, mask;
@@ -23355,7 +23279,7 @@ static void wlc_phy_txcal_physetup_nphy(phy_info_t *pi)
}
}
-static void wlc_phy_txcal_phycleanup_nphy(phy_info_t *pi)
+static void wlc_phy_txcal_phycleanup_nphy(struct brcms_phy *pi)
{
u16 mask;
@@ -23437,7 +23361,7 @@ static void wlc_phy_txcal_phycleanup_nphy(phy_info_t *pi)
#define NPHY_TEST_TONE_FREQ_20MHz 2500
void
-wlc_phy_est_tonepwr_nphy(phy_info_t *pi, s32 *qdBm_pwrbuf, u8 num_samps)
+wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf, u8 num_samps)
{
u16 tssi_reg;
s32 temp, pwrindex[2];
@@ -23456,7 +23380,7 @@ wlc_phy_est_tonepwr_nphy(phy_info_t *pi, s32 *qdBm_pwrbuf, u8 num_samps)
tssi_type =
CHSPEC_IS5G(pi->radio_chanspec) ?
- (u8)NPHY_RSSI_SEL_TSSI_5G:(u8)NPHY_RSSI_SEL_TSSI_2G;
+ (u8)NPHY_RSSI_SEL_TSSI_5G : (u8)NPHY_RSSI_SEL_TSSI_2G;
wlc_phy_poll_rssi_nphy(pi, tssi_type, rssi_buf, num_samps);
@@ -23484,7 +23408,7 @@ wlc_phy_est_tonepwr_nphy(phy_info_t *pi, s32 *qdBm_pwrbuf, u8 num_samps)
(u32) pwrindex[1], 32, &qdBm_pwrbuf[1]);
}
-static void wlc_phy_internal_cal_txgain_nphy(phy_info_t *pi)
+static void wlc_phy_internal_cal_txgain_nphy(struct brcms_phy *pi)
{
u16 txcal_gain[2];
@@ -23508,7 +23432,7 @@ static void wlc_phy_internal_cal_txgain_nphy(phy_info_t *pi)
txcal_gain);
}
-static void wlc_phy_precal_txgain_nphy(phy_info_t *pi)
+static void wlc_phy_precal_txgain_nphy(struct brcms_phy *pi)
{
bool save_bbmult = false;
u8 txcal_index_2057_rev5n7 = 0;
@@ -23596,7 +23520,8 @@ static void wlc_phy_precal_txgain_nphy(phy_info_t *pi)
}
void
-wlc_phy_cal_txgainctrl_nphy(phy_info_t *pi, s32 dBm_targetpower, bool debug)
+wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, s32 dBm_targetpower,
+ bool debug)
{
int gainctrl_loopidx;
uint core;
@@ -23763,20 +23688,20 @@ wlc_phy_cal_txgainctrl_nphy(phy_info_t *pi, s32 dBm_targetpower, bool debug)
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
-static void wlc_phy_update_txcal_ladder_nphy(phy_info_t *pi, u16 core)
+static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
{
int index;
u32 bbmult_scale;
u16 bbmult;
u16 tblentry;
- nphy_txiqcal_ladder_t ladder_lo[] = {
+ struct nphy_txiqcal_ladder ladder_lo[] = {
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
{25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
{25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
};
- nphy_txiqcal_ladder_t ladder_iq[] = {
+ struct nphy_txiqcal_ladder ladder_iq[] = {
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
{25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
{100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
@@ -23805,9 +23730,9 @@ static void wlc_phy_update_txcal_ladder_nphy(phy_info_t *pi, u16 core)
}
}
-void wlc_phy_cal_perical_nphy_run(phy_info_t *pi, u8 caltype)
+void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype)
{
- nphy_txgains_t target_gain;
+ struct nphy_txgains target_gain;
u8 tx_pwr_ctrl_state;
bool fullcal = true;
bool restore_tx_gain = false;
@@ -23842,7 +23767,7 @@ void wlc_phy_cal_perical_nphy_run(phy_info_t *pi, u8 caltype)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
- wlc_phyreg_enter((wlc_phy_t *) pi);
+ wlc_phyreg_enter((struct brcms_phy_pub *) pi);
if ((pi->mphase_cal_phase_id == MPHASE_CAL_STATE_IDLE) ||
(pi->mphase_cal_phase_id == MPHASE_CAL_STATE_INIT)) {
@@ -23865,7 +23790,7 @@ void wlc_phy_cal_perical_nphy_run(phy_info_t *pi, u8 caltype)
wlc_phy_txpwrctrl_enable_nphy(pi, PHY_TPC_HW_OFF);
if (pi->antsel_type == ANTSEL_2x3)
- wlc_phy_antsel_init((wlc_phy_t *) pi, true);
+ wlc_phy_antsel_init((struct brcms_phy_pub *) pi, true);
mphase = (pi->mphase_cal_phase_id != MPHASE_CAL_STATE_IDLE);
if (!mphase) {
@@ -23882,12 +23807,12 @@ void wlc_phy_cal_perical_nphy_run(phy_info_t *pi, u8 caltype)
if (PHY_IPA(pi))
wlc_phy_a4(pi, true);
- wlc_phyreg_exit((wlc_phy_t *) pi);
+ wlc_phyreg_exit((struct brcms_phy_pub *) pi);
wlapi_enable_mac(pi->sh->physhim);
wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION,
10000);
wlapi_suspend_mac_and_wait(pi->sh->physhim);
- wlc_phyreg_enter((wlc_phy_t *) pi);
+ wlc_phyreg_enter((struct brcms_phy_pub *) pi);
if (0 == wlc_phy_cal_rxiq_nphy(pi, target_gain,
(pi->
@@ -24052,12 +23977,12 @@ void wlc_phy_cal_perical_nphy_run(phy_info_t *pi, u8 caltype)
}
wlc_phy_txpwrctrl_enable_nphy(pi, tx_pwr_ctrl_state);
- wlc_phyreg_exit((wlc_phy_t *) pi);
+ wlc_phyreg_exit((struct brcms_phy_pub *) pi);
wlapi_enable_mac(pi->sh->physhim);
}
int
-wlc_phy_cal_txiqlo_nphy(phy_info_t *pi, nphy_txgains_t target_gain,
+wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
bool fullcal, bool mphase)
{
u16 val;
@@ -24072,7 +23997,7 @@ wlc_phy_cal_txiqlo_nphy(phy_info_t *pi, nphy_txgains_t target_gain,
u16 tone_freq;
u16 gain_save[2];
u16 cal_gain[2];
- nphy_iqcal_params_t cal_params[2];
+ struct nphy_iqcal_params cal_params[2];
u32 tbl_len;
void *tbl_ptr;
bool ladder_updated[2];
@@ -24427,7 +24352,7 @@ wlc_phy_cal_txiqlo_nphy(phy_info_t *pi, nphy_txgains_t target_gain,
return bcmerror;
}
-static void wlc_phy_reapply_txcal_coeffs_nphy(phy_info_t *pi)
+static void wlc_phy_reapply_txcal_coeffs_nphy(struct brcms_phy *pi)
{
u16 tbl_buf[7];
@@ -24462,9 +24387,9 @@ static void wlc_phy_reapply_txcal_coeffs_nphy(phy_info_t *pi)
}
}
-static void wlc_phy_tx_iq_war_nphy(phy_info_t *pi)
+static void wlc_phy_tx_iq_war_nphy(struct brcms_phy *pi)
{
- nphy_iq_comp_t tx_comp;
+ struct nphy_iq_comp tx_comp;
wlc_phy_table_read_nphy(pi, 15, 4, 0x50, 16, (void *)&tx_comp);
@@ -24475,7 +24400,8 @@ static void wlc_phy_tx_iq_war_nphy(phy_info_t *pi)
}
void
-wlc_phy_rx_iq_coeffs_nphy(phy_info_t *pi, u8 write, nphy_iq_comp_t *pcomp)
+wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
+ struct nphy_iq_comp *pcomp)
{
if (write) {
write_phy_reg(pi, 0x9a, pcomp->a0);
@@ -24491,8 +24417,8 @@ wlc_phy_rx_iq_coeffs_nphy(phy_info_t *pi, u8 write, nphy_iq_comp_t *pcomp)
}
void
-wlc_phy_rx_iq_est_nphy(phy_info_t *pi, phy_iq_est_t *est, u16 num_samps,
- u8 wait_time, u8 wait_for_crs)
+wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
+ u16 num_samps, u8 wait_time, u8 wait_for_crs)
{
u8 core;
@@ -24525,11 +24451,11 @@ wlc_phy_rx_iq_est_nphy(phy_info_t *pi, phy_iq_est_t *est, u16 num_samps,
}
#define CAL_RETRY_CNT 2
-static void wlc_phy_calc_rx_iq_comp_nphy(phy_info_t *pi, u8 core_mask)
+static void wlc_phy_calc_rx_iq_comp_nphy(struct brcms_phy *pi, u8 core_mask)
{
u8 curr_core;
- phy_iq_est_t est[PHY_CORE_MAX];
- nphy_iq_comp_t old_comp, new_comp;
+ struct phy_iq_est est[PHY_CORE_MAX];
+ struct nphy_iq_comp old_comp, new_comp;
s32 iq = 0;
u32 ii = 0, qq = 0;
s16 iq_nbits, qq_nbits, brsh, arsh;
@@ -24634,7 +24560,8 @@ static void wlc_phy_calc_rx_iq_comp_nphy(phy_info_t *pi, u8 core_mask)
}
if (bcmerror != 0) {
- printk("%s: Failed, cnt = %d\n", __func__, cal_retry);
+ printk(KERN_DEBUG "%s: Failed, cnt = %d\n", __func__,
+ cal_retry);
if (cal_retry < CAL_RETRY_CNT) {
cal_retry++;
@@ -24642,13 +24569,12 @@ static void wlc_phy_calc_rx_iq_comp_nphy(phy_info_t *pi, u8 core_mask)
}
new_comp = old_comp;
- } else if (cal_retry > 0) {
}
wlc_phy_rx_iq_coeffs_nphy(pi, 1, &new_comp);
}
-static void wlc_phy_rxcal_radio_setup_nphy(phy_info_t *pi, u8 rx_core)
+static void wlc_phy_rxcal_radio_setup_nphy(struct brcms_phy *pi, u8 rx_core)
{
u16 offtune_val;
u16 bias_g = 0;
@@ -24945,7 +24871,7 @@ static void wlc_phy_rxcal_radio_setup_nphy(phy_info_t *pi, u8 rx_core)
}
}
-static void wlc_phy_rxcal_radio_cleanup_nphy(phy_info_t *pi, u8 rx_core)
+static void wlc_phy_rxcal_radio_cleanup_nphy(struct brcms_phy *pi, u8 rx_core)
{
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
if (rx_core == PHY_CORE_0) {
@@ -25115,7 +25041,7 @@ static void wlc_phy_rxcal_radio_cleanup_nphy(phy_info_t *pi, u8 rx_core)
}
}
-static void wlc_phy_rxcal_physetup_nphy(phy_info_t *pi, u8 rx_core)
+static void wlc_phy_rxcal_physetup_nphy(struct brcms_phy *pi, u8 rx_core)
{
u8 tx_core;
u16 rx_antval, tx_antval;
@@ -25242,7 +25168,7 @@ static void wlc_phy_rxcal_physetup_nphy(phy_info_t *pi, u8 rx_core)
}
}
-static void wlc_phy_rxcal_phycleanup_nphy(phy_info_t *pi, u8 rx_core)
+static void wlc_phy_rxcal_phycleanup_nphy(struct brcms_phy *pi, u8 rx_core)
{
write_phy_reg(pi, 0xa2, pi->tx_rx_cal_phy_saveregs[0]);
@@ -25269,14 +25195,14 @@ static void wlc_phy_rxcal_phycleanup_nphy(phy_info_t *pi, u8 rx_core)
}
static void
-wlc_phy_rxcal_gainctrl_nphy_rev5(phy_info_t *pi, u8 rx_core,
+wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rx_core,
u16 *rxgain, u8 cal_type)
{
u16 num_samps;
- phy_iq_est_t est[PHY_CORE_MAX];
+ struct phy_iq_est est[PHY_CORE_MAX];
u8 tx_core;
- nphy_iq_comp_t save_comp, zero_comp;
+ struct nphy_iq_comp save_comp, zero_comp;
u32 i_pwr, q_pwr, curr_pwr, optim_pwr = 0, prev_pwr = 0, thresh_pwr =
10000;
s16 desired_log2_pwr, actual_log2_pwr, delta_pwr;
@@ -25285,7 +25211,7 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(phy_info_t *pi, u8 rx_core,
s8 optim_gaintbl_index = 0, prev_gaintbl_index = 0;
s8 curr_gaintbl_index = 3;
u8 gainctrl_dirn = NPHY_RXCAL_GAIN_INIT;
- nphy_ipa_txrxgain_t *nphy_rxcal_gaintbl;
+ struct nphy_ipa_txrxgain *nphy_rxcal_gaintbl;
u16 hpvga, lpf_biq1, lpf_biq0, lna2, lna1;
int fine_gain_idx;
s8 txpwrindex;
@@ -25477,14 +25403,14 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(phy_info_t *pi, u8 rx_core,
}
static void
-wlc_phy_rxcal_gainctrl_nphy(phy_info_t *pi, u8 rx_core, u16 *rxgain,
+wlc_phy_rxcal_gainctrl_nphy(struct brcms_phy *pi, u8 rx_core, u16 *rxgain,
u8 cal_type)
{
wlc_phy_rxcal_gainctrl_nphy_rev5(pi, rx_core, rxgain, cal_type);
}
static u8
-wlc_phy_rc_sweep_nphy(phy_info_t *pi, u8 core_idx, u8 loopback_type)
+wlc_phy_rc_sweep_nphy(struct brcms_phy *pi, u8 core_idx, u8 loopback_type)
{
u32 target_bws[2] = { 9500, 21000 };
u32 ref_tones[2] = { 3000, 6000 };
@@ -25516,7 +25442,7 @@ wlc_phy_rc_sweep_nphy(phy_info_t *pi, u8 core_idx, u8 loopback_type)
u16 rccal_val, last_rccal_val = 0, best_rccal_val = 0;
u32 ref_iq_vals = 0, target_iq_vals = 0;
u16 num_samps, log_num_samps = 10;
- phy_iq_est_t est[PHY_CORE_MAX];
+ struct phy_iq_est est[PHY_CORE_MAX];
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
return 0;
@@ -25724,16 +25650,16 @@ wlc_phy_rc_sweep_nphy(phy_info_t *pi, u8 core_idx, u8 loopback_type)
}
#define WAIT_FOR_SCOPE 4000
-static int
-wlc_phy_cal_rxiq_nphy_rev3(phy_info_t *pi, nphy_txgains_t target_gain,
- u8 cal_type, bool debug)
+static int wlc_phy_cal_rxiq_nphy_rev3(struct brcms_phy *pi,
+ struct nphy_txgains target_gain,
+ u8 cal_type, bool debug)
{
u16 orig_BBConfig;
u8 core_no, rx_core;
u8 best_rccal[2];
u16 gain_save[2];
u16 cal_gain[2];
- nphy_iqcal_params_t cal_params[2];
+ struct nphy_iqcal_params cal_params[2];
u8 rxcore_state;
s8 rxlpf_rccal_hpc, txlpf_rccal_lpc;
s8 txlpf_idac;
@@ -25760,7 +25686,8 @@ wlc_phy_cal_rxiq_nphy_rev3(phy_info_t *pi, nphy_txgains_t target_gain,
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_RFSEQ, 2, 0x110, 16, cal_gain);
- rxcore_state = wlc_phy_rxcore_getstate_nphy((wlc_phy_t *) pi);
+ rxcore_state = wlc_phy_rxcore_getstate_nphy(
+ (struct brcms_phy_pub *) pi);
for (rx_core = 0; rx_core < pi->pubpi.phy_corenum; rx_core++) {
@@ -25795,8 +25722,8 @@ wlc_phy_cal_rxiq_nphy_rev3(phy_info_t *pi, nphy_txgains_t target_gain,
if (rx_core == PHY_CORE_1) {
if (rxcore_state == 1) {
- wlc_phy_rxcore_setstate_nphy((wlc_phy_t
- *) pi, 3);
+ wlc_phy_rxcore_setstate_nphy(
+ (struct brcms_phy_pub *) pi, 3);
}
wlc_phy_rxcal_gainctrl_nphy(pi, rx_core, NULL,
@@ -25807,9 +25734,9 @@ wlc_phy_cal_rxiq_nphy_rev3(phy_info_t *pi, nphy_txgains_t target_gain,
pi->nphy_rccal_value = best_rccal[rx_core];
if (rxcore_state == 1) {
- wlc_phy_rxcore_setstate_nphy((wlc_phy_t
- *) pi,
- rxcore_state);
+ wlc_phy_rxcore_setstate_nphy(
+ (struct brcms_phy_pub *) pi,
+ rxcore_state);
}
}
}
@@ -25882,10 +25809,10 @@ wlc_phy_cal_rxiq_nphy_rev3(phy_info_t *pi, nphy_txgains_t target_gain,
}
static int
-wlc_phy_cal_rxiq_nphy_rev2(phy_info_t *pi, nphy_txgains_t target_gain,
- bool debug)
+wlc_phy_cal_rxiq_nphy_rev2(struct brcms_phy *pi,
+ struct nphy_txgains target_gain, bool debug)
{
- phy_iq_est_t est[PHY_CORE_MAX];
+ struct phy_iq_est est[PHY_CORE_MAX];
u8 core_num, rx_core, tx_core;
u16 lna_vals[] = { 0x3, 0x3, 0x1 };
u16 hpf1_vals[] = { 0x7, 0x2, 0x0 };
@@ -25901,7 +25828,7 @@ wlc_phy_cal_rxiq_nphy_rev2(phy_info_t *pi, nphy_txgains_t target_gain,
u16 core_no;
u16 gain_save[2];
u16 cal_gain[2];
- nphy_iqcal_params_t cal_params[2];
+ struct nphy_iqcal_params cal_params[2];
u8 phy_bw;
int bcmerror = 0;
bool first_playtone = true;
@@ -26108,7 +26035,7 @@ wlc_phy_cal_rxiq_nphy_rev2(phy_info_t *pi, nphy_txgains_t target_gain,
}
int
-wlc_phy_cal_rxiq_nphy(phy_info_t *pi, nphy_txgains_t target_gain,
+wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
u8 cal_type, bool debug)
{
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
@@ -26123,7 +26050,7 @@ wlc_phy_cal_rxiq_nphy(phy_info_t *pi, nphy_txgains_t target_gain,
}
}
-static void wlc_phy_extpa_set_tx_digi_filts_nphy(phy_info_t *pi)
+static void wlc_phy_extpa_set_tx_digi_filts_nphy(struct brcms_phy *pi)
{
int j, type = 2;
u16 addr_offset = 0x2c5;
@@ -26134,7 +26061,7 @@ static void wlc_phy_extpa_set_tx_digi_filts_nphy(phy_info_t *pi)
}
}
-static void wlc_phy_ipa_set_tx_digi_filts_nphy(phy_info_t *pi)
+static void wlc_phy_ipa_set_tx_digi_filts_nphy(struct brcms_phy *pi)
{
int j, type;
u16 addr_offset[] = { 0x186, 0x195,
@@ -26172,7 +26099,7 @@ static void wlc_phy_ipa_set_tx_digi_filts_nphy(phy_info_t *pi)
}
}
-static void wlc_phy_ipa_restore_tx_digi_filts_nphy(phy_info_t *pi)
+static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
{
int j;
@@ -26189,7 +26116,7 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(phy_info_t *pi)
}
}
-static u16 wlc_phy_ipa_get_bbmult_nphy(phy_info_t *pi)
+static u16 wlc_phy_ipa_get_bbmult_nphy(struct brcms_phy *pi)
{
u16 m0m1;
@@ -26198,7 +26125,7 @@ static u16 wlc_phy_ipa_get_bbmult_nphy(phy_info_t *pi)
return m0m1;
}
-static void wlc_phy_ipa_set_bbmult_nphy(phy_info_t *pi, u8 m0, u8 m1)
+static void wlc_phy_ipa_set_bbmult_nphy(struct brcms_phy *pi, u8 m0, u8 m1)
{
u16 m0m1 = (u16) ((m0 << 8) | m1);
@@ -26206,7 +26133,7 @@ static void wlc_phy_ipa_set_bbmult_nphy(phy_info_t *pi, u8 m0, u8 m1)
wlc_phy_table_write_nphy(pi, 15, 1, 95, 16, &m0m1);
}
-static u32 *wlc_phy_get_ipa_gaintbl_nphy(phy_info_t *pi)
+static u32 *wlc_phy_get_ipa_gaintbl_nphy(struct brcms_phy *pi)
{
u32 *tx_pwrctrl_tbl = NULL;
@@ -26237,11 +26164,6 @@ static u32 *wlc_phy_get_ipa_gaintbl_nphy(phy_info_t *pi)
} else if (NREV_IS(pi->pubpi.phy_rev, 6)) {
tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev6;
- if (pi->sh->chip == BCM47162_CHIP_ID) {
-
- tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
- }
-
} else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
@@ -26274,8 +26196,8 @@ static u32 *wlc_phy_get_ipa_gaintbl_nphy(phy_info_t *pi)
}
static void
-wlc_phy_papd_cal_setup_nphy(phy_info_t *pi, nphy_papd_restore_state *state,
- u8 core)
+wlc_phy_papd_cal_setup_nphy(struct brcms_phy *pi,
+ struct nphy_papd_restore_state *state, u8 core)
{
s32 tone_freq;
u8 off_core;
@@ -26544,7 +26466,8 @@ wlc_phy_papd_cal_setup_nphy(phy_info_t *pi, nphy_papd_restore_state *state,
}
static void
-wlc_phy_papd_cal_cleanup_nphy(phy_info_t *pi, nphy_papd_restore_state *state)
+wlc_phy_papd_cal_cleanup_nphy(struct brcms_phy *pi,
+ struct nphy_papd_restore_state *state)
{
u8 core;
@@ -26670,7 +26593,7 @@ wlc_phy_papd_cal_cleanup_nphy(phy_info_t *pi, nphy_papd_restore_state *state)
}
static void
-wlc_phy_a1_nphy(phy_info_t *pi, u8 core, u32 winsz, u32 start,
+wlc_phy_a1_nphy(struct brcms_phy *pi, u8 core, u32 winsz, u32 start,
u32 end)
{
u32 *buf, *src, *dst, sz;
@@ -26722,15 +26645,15 @@ wlc_phy_a1_nphy(phy_info_t *pi, u8 core, u32 winsz, u32 start,
}
static void
-wlc_phy_a2_nphy(phy_info_t *pi, nphy_ipa_txcalgains_t *txgains,
- phy_cal_mode_t cal_mode, u8 core)
+wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *txgains,
+ enum phy_cal_mode cal_mode, u8 core)
{
u16 phy_a1, phy_a2, phy_a3;
u16 phy_a4, phy_a5;
bool phy_a6;
u8 phy_a7, m[2];
u32 phy_a8 = 0;
- nphy_txgains_t phy_a9;
+ struct nphy_txgains phy_a9;
if (NREV_LT(pi->pubpi.phy_rev, 3))
return;
@@ -26781,11 +26704,8 @@ wlc_phy_a2_nphy(phy_info_t *pi, nphy_ipa_txcalgains_t *txgains,
phy_a2 = 63;
if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (pi->sh->chip == BCM6362_CHIP_ID) {
- phy_a1 = 35;
- phy_a3 = 35;
- } else if ((pi->pubpi.radiorev == 4)
- || (pi->pubpi.radiorev == 6)) {
+ if ((pi->pubpi.radiorev == 4)
+ || (pi->pubpi.radiorev == 6)) {
phy_a1 = 30;
phy_a3 = 30;
} else {
@@ -26891,16 +26811,10 @@ wlc_phy_a2_nphy(phy_info_t *pi, nphy_ipa_txcalgains_t *txgains,
if (txgains->useindex) {
phy_a4 = 15 - ((txgains->index) >> 3);
if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (NREV_GE(pi->pubpi.phy_rev, 6)) {
+ if (NREV_GE(pi->pubpi.phy_rev, 6))
phy_a5 = 0x00f7 | (phy_a4 << 8);
- if (pi->sh->chip ==
- BCM47162_CHIP_ID) {
- phy_a5 =
- 0x10f7 | (phy_a4 <<
- 8);
- }
- } else
+ else
if (NREV_IS(pi->pubpi.phy_rev, 5))
phy_a5 = 0x10f7 | (phy_a4 << 8);
else
@@ -27020,12 +26934,12 @@ wlc_phy_a2_nphy(phy_info_t *pi, nphy_ipa_txcalgains_t *txgains,
}
}
-static u8 wlc_phy_a3_nphy(phy_info_t *pi, u8 start_gain, u8 core)
+static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
{
int phy_a1;
int phy_a2;
bool phy_a3;
- nphy_ipa_txcalgains_t phy_a4;
+ struct nphy_ipa_txcalgains phy_a4;
bool phy_a5 = false;
bool phy_a6 = true;
s32 phy_a7, phy_a8;
@@ -27181,10 +27095,10 @@ static u8 wlc_phy_a3_nphy(phy_info_t *pi, u8 start_gain, u8 core)
}
-static void wlc_phy_a4(phy_info_t *pi, bool full_cal)
+static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal)
{
- nphy_ipa_txcalgains_t phy_b1[2];
- nphy_papd_restore_state phy_b2;
+ struct nphy_ipa_txcalgains phy_b1[2];
+ struct nphy_papd_restore_state phy_b2;
bool phy_b3;
u8 phy_b4;
u8 phy_b5;
@@ -27470,7 +27384,7 @@ static void wlc_phy_a4(phy_info_t *pi, bool full_cal)
}
}
-void wlc_phy_txpwr_fixpower_nphy(phy_info_t *pi)
+void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi)
{
uint core;
u32 txgain;
@@ -27528,20 +27442,20 @@ void wlc_phy_txpwr_fixpower_nphy(phy_info_t *pi)
pi->nphy_txpwrindex[PHY_CORE_1].index_internal_save = txpi[1];
for (core = 0; core < pi->pubpi.phy_corenum; core++) {
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ uint phyrev = pi->pubpi.phy_rev;
+
+ if (NREV_GE(phyrev, 3)) {
if (PHY_IPA(pi)) {
u32 *tx_gaintbl =
wlc_phy_get_ipa_gaintbl_nphy(pi);
txgain = tx_gaintbl[txpi[core]];
} else {
if (CHSPEC_IS5G(pi->radio_chanspec)) {
- if NREV_IS
- (pi->pubpi.phy_rev, 3) {
+ if (NREV_IS(phyrev, 3)) {
txgain =
nphy_tpc_5GHz_txgain_rev3
[txpi[core]];
- } else if NREV_IS
- (pi->pubpi.phy_rev, 4) {
+ } else if (NREV_IS(phyrev, 4)) {
txgain =
(pi->srom_fem5g.extpagain ==
3) ?
@@ -27555,7 +27469,7 @@ void wlc_phy_txpwr_fixpower_nphy(phy_info_t *pi)
[txpi[core]];
}
} else {
- if (NREV_GE(pi->pubpi.phy_rev, 5) &&
+ if (NREV_GE(phyrev, 5) &&
(pi->srom_fem2g.extpagain == 3)) {
txgain =
nphy_tpc_txgain_HiPwrEPA
@@ -27571,20 +27485,19 @@ void wlc_phy_txpwr_fixpower_nphy(phy_info_t *pi)
txgain = nphy_tpc_txgain[txpi[core]];
}
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (NREV_GE(phyrev, 3))
rad_gain = (txgain >> 16) & ((1 << (32 - 16 + 1)) - 1);
- } else {
+ else
rad_gain = (txgain >> 16) & ((1 << (28 - 16 + 1)) - 1);
- }
- if (NREV_GE(pi->pubpi.phy_rev, 7)) {
+ if (NREV_GE(phyrev, 7))
dac_gain = (txgain >> 8) & ((1 << (10 - 8 + 1)) - 1);
- } else {
+ else
dac_gain = (txgain >> 8) & ((1 << (13 - 8 + 1)) - 1);
- }
+
bbmult = (txgain >> 0) & ((1 << (7 - 0 + 1)) - 1);
- if (NREV_GE(pi->pubpi.phy_rev, 3)) {
+ if (NREV_GE(phyrev, 3)) {
mod_phy_reg(pi, ((core == PHY_CORE_0) ? 0x8f :
0xa5), (0x1 << 8), (0x1 << 8));
} else {
@@ -27682,7 +27595,7 @@ wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power, u8 rate_ofdm_start,
}
}
-void wlc_phy_txpwr_apply_nphy(phy_info_t *pi)
+void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi)
{
uint rate1, rate2, band_num;
u8 tmp_bw40po = 0, tmp_cddpo = 0, tmp_stbcpo = 0;
@@ -27880,7 +27793,7 @@ void wlc_phy_txpwr_apply_nphy(phy_info_t *pi)
return;
}
-static void wlc_phy_txpwr_srom_read_ppr_nphy(phy_info_t *pi)
+static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
{
u16 bw40po, cddpo, stbcpo, bwduppo;
uint band_num;
@@ -28088,7 +28001,7 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(phy_info_t *pi)
wlc_phy_txpwr_apply_nphy(pi);
}
-static bool wlc_phy_txpwr_srom_read_nphy(phy_info_t *pi)
+static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
{
pi->antswitch = (u8) PHY_GETINTVAR(pi, "antswitch");
@@ -28148,7 +28061,7 @@ static bool wlc_phy_txpwr_srom_read_nphy(phy_info_t *pi)
return true;
}
-void wlc_phy_txpower_recalc_target_nphy(phy_info_t *pi)
+void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi)
{
u8 tx_pwr_ctrl_state;
wlc_phy_txpwr_limit_to_tbl_nphy(pi);
@@ -28168,7 +28081,7 @@ void wlc_phy_txpower_recalc_target_nphy(phy_info_t *pi)
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, 0);
}
-static void wlc_phy_txpwrctrl_coeff_setup_nphy(phy_info_t *pi)
+static void wlc_phy_txpwrctrl_coeff_setup_nphy(struct brcms_phy *pi)
{
u32 idx;
u16 iqloCalbuf[7];
@@ -28239,7 +28152,7 @@ static void wlc_phy_txpwrctrl_coeff_setup_nphy(phy_info_t *pi)
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
-static void wlc_phy_ipa_internal_tssi_setup_nphy(phy_info_t *pi)
+static void wlc_phy_ipa_internal_tssi_setup_nphy(struct brcms_phy *pi)
{
u8 core;
@@ -28349,7 +28262,7 @@ static void wlc_phy_ipa_internal_tssi_setup_nphy(phy_info_t *pi)
}
}
-static void wlc_phy_txpwrctrl_idle_tssi_nphy(phy_info_t *pi)
+static void wlc_phy_txpwrctrl_idle_tssi_nphy(struct brcms_phy *pi)
{
s32 rssi_buf[4];
s32 int_val;
@@ -28415,7 +28328,7 @@ static void wlc_phy_txpwrctrl_idle_tssi_nphy(phy_info_t *pi)
}
-static void wlc_phy_txpwrctrl_pwr_setup_nphy(phy_info_t *pi)
+static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
{
u32 idx;
s16 a1[2], b0[2], b1[2];
@@ -28652,13 +28565,13 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(phy_info_t *pi)
wlc_phy_stay_in_carriersearch_nphy(pi, false);
}
-static bool wlc_phy_txpwr_ison_nphy(phy_info_t *pi)
+static bool wlc_phy_txpwr_ison_nphy(struct brcms_phy *pi)
{
return read_phy_reg((pi), 0x1e7) & ((0x1 << 15) |
(0x1 << 14) | (0x1 << 13));
}
-static u8 wlc_phy_txpwr_idx_cur_get_nphy(phy_info_t *pi, u8 core)
+static u8 wlc_phy_txpwr_idx_cur_get_nphy(struct brcms_phy *pi, u8 core)
{
u16 tmp;
tmp = read_phy_reg(pi, ((core == PHY_CORE_0) ? 0x1ed : 0x1ee));
@@ -28668,7 +28581,7 @@ static u8 wlc_phy_txpwr_idx_cur_get_nphy(phy_info_t *pi, u8 core)
}
static void
-wlc_phy_txpwr_idx_cur_set_nphy(phy_info_t *pi, u8 idx0, u8 idx1)
+wlc_phy_txpwr_idx_cur_set_nphy(struct brcms_phy *pi, u8 idx0, u8 idx1)
{
mod_phy_reg(pi, 0x1e7, (0x7f << 0), idx0);
@@ -28676,7 +28589,7 @@ wlc_phy_txpwr_idx_cur_set_nphy(phy_info_t *pi, u8 idx0, u8 idx1)
mod_phy_reg(pi, 0x222, (0xff << 0), idx1);
}
-u16 wlc_phy_txpwr_idx_get_nphy(phy_info_t *pi)
+u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi)
{
u16 tmp;
u16 pwr_idx[2];
@@ -28698,7 +28611,7 @@ u16 wlc_phy_txpwr_idx_get_nphy(phy_info_t *pi)
return tmp;
}
-void wlc_phy_txpwr_papd_cal_nphy(phy_info_t *pi)
+void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi)
{
if (PHY_IPA(pi)
&& (pi->nphy_force_papd_cal
@@ -28714,7 +28627,7 @@ void wlc_phy_txpwr_papd_cal_nphy(phy_info_t *pi)
}
}
-void wlc_phy_txpwrctrl_enable_nphy(phy_info_t *pi, u8 ctrl_type)
+void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type)
{
u16 mask = 0, val = 0, ishw = 0;
u8 ctr;
@@ -28783,7 +28696,7 @@ void wlc_phy_txpwrctrl_enable_nphy(phy_info_t *pi, u8 ctrl_type)
if (NREV_LT(pi->pubpi.phy_rev, 2) && IS40MHZ(pi))
wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_IQSWAP_WAR,
- MHF1_IQSWAP_WAR, WLC_BAND_ALL);
+ MHF1_IQSWAP_WAR, BRCM_BAND_ALL);
} else {
@@ -28842,7 +28755,7 @@ void wlc_phy_txpwrctrl_enable_nphy(phy_info_t *pi, u8 ctrl_type)
if (NREV_LT(pi->pubpi.phy_rev, 2) && IS40MHZ(pi))
wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_IQSWAP_WAR,
- 0x0, WLC_BAND_ALL);
+ 0x0, BRCM_BAND_ALL);
if (PHY_IPA(pi)) {
mod_phy_reg(pi, (0 == PHY_CORE_0) ? 0x297 :
@@ -28860,7 +28773,7 @@ void wlc_phy_txpwrctrl_enable_nphy(phy_info_t *pi, u8 ctrl_type)
}
void
-wlc_phy_txpwr_index_nphy(phy_info_t *pi, u8 core_mask, s8 txpwrindex,
+wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask, s8 txpwrindex,
bool restore_cals)
{
u8 core, txpwrctl_tbl;
@@ -29099,7 +29012,7 @@ wlc_phy_txpwr_index_nphy(phy_info_t *pi, u8 core_mask, s8 txpwrindex,
}
void
-wlc_phy_txpower_sromlimit_get_nphy(phy_info_t *pi, uint chan, u8 *max_pwr,
+wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan, u8 *max_pwr,
u8 txp_rate_idx)
{
u8 chan_freq_range;
@@ -29126,7 +29039,7 @@ wlc_phy_txpower_sromlimit_get_nphy(phy_info_t *pi, uint chan, u8 *max_pwr,
return;
}
-void wlc_phy_stay_in_carriersearch_nphy(phy_info_t *pi, bool enable)
+void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable)
{
u16 clip_off[] = { 0xffff, 0xffff };
@@ -29154,7 +29067,7 @@ void wlc_phy_stay_in_carriersearch_nphy(phy_info_t *pi, bool enable)
}
}
-void wlc_nphy_deaf_mode(phy_info_t *pi, bool mode)
+void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode)
{
wlapi_suspend_mac_and_wait(pi->sh->physhim);
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_qmath.c b/drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.c
index c98176fd0aa..01ff0c8eb4b 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_qmath.c
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.c
@@ -14,9 +14,7 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/types.h>
-
-#include "wlc_phy_qmath.h"
+#include "phy_qmath.h"
/*
Description: This function make 16 bit unsigned multiplication. To fit the output into
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_qmath.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.h
index 3dcee1c4aa6..20e3783f921 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_qmath.h
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_qmath.h
@@ -14,8 +14,10 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef __QMATH_H__
-#define __QMATH_H__
+#ifndef _BRCM_QMATH_H_
+#define _BRCM_QMATH_H_
+
+#include <types.h>
u16 qm_mulu16(u16 op1, u16 op2);
@@ -37,4 +39,4 @@ s16 qm_norm32(s32 op);
void qm_log10(s32 N, s16 qN, s16 *log10N, s16 *qLog10N);
-#endif /* #ifndef __QMATH_H__ */
+#endif /* #ifndef _BRCM_QMATH_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_radio.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_radio.h
index 72176ae2882..c3a675455ff 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_radio.h
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phy_radio.h
@@ -14,8 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _BCM20XX_H
-#define _BCM20XX_H
+#ifndef _BRCM_PHY_RADIO_H_
+#define _BRCM_PHY_RADIO_H_
#define RADIO_IDCODE 0x01
@@ -1530,4 +1530,4 @@
#define RADIO_2057_VCM_MASK 0x7
-#endif /* _BCM20XX_H */
+#endif /* _BRCM_PHY_RADIO_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/phy_version.h b/drivers/staging/brcm80211/brcmsmac/phy/phy_version.h
deleted file mode 100644
index 51a223880bc..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/phy/phy_version.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef phy_version_h_
-#define phy_version_h_
-
-#define PHY_MAJOR_VERSION 1
-
-#define PHY_MINOR_VERSION 82
-
-#define PHY_RC_NUMBER 8
-
-#define PHY_INCREMENTAL_NUMBER 0
-
-#define PHY_BUILD_NUMBER 0
-
-#define PHY_VERSION { 1, 82, 8, 0 }
-
-#define PHY_VERSION_NUM 0x01520800
-
-#define PHY_VERSION_STR "1.82.8.0"
-
-#endif /* phy_version_h_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phyreg_n.h b/drivers/staging/brcm80211/brcmsmac/phy/phyreg_n.h
index 211bc3a842a..a97c3a79947 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phyreg_n.h
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phyreg_n.h
@@ -123,13 +123,13 @@
#define NPHY_REV3_RFSEQ_CMD_CLR_RXRX_BIAS 0xf
#define NPHY_REV3_RFSEQ_CMD_END 0x1f
-#define NPHY_RSSI_SEL_W1 0x0
-#define NPHY_RSSI_SEL_W2 0x1
-#define NPHY_RSSI_SEL_NB 0x2
-#define NPHY_RSSI_SEL_IQ 0x3
-#define NPHY_RSSI_SEL_TSSI_2G 0x4
-#define NPHY_RSSI_SEL_TSSI_5G 0x5
-#define NPHY_RSSI_SEL_TBD 0x6
+#define NPHY_RSSI_SEL_W1 0x0
+#define NPHY_RSSI_SEL_W2 0x1
+#define NPHY_RSSI_SEL_NB 0x2
+#define NPHY_RSSI_SEL_IQ 0x3
+#define NPHY_RSSI_SEL_TSSI_2G 0x4
+#define NPHY_RSSI_SEL_TSSI_5G 0x5
+#define NPHY_RSSI_SEL_TBD 0x6
#define NPHY_RAIL_I 0x0
#define NPHY_RAIL_Q 0x1
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_lcn.c b/drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.c
index 81c59b05482..023d05aa97a 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_lcn.c
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -14,10 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/types.h>
-#include <sbhnddma.h>
-#include <wlc_phy_int.h>
-#include <wlc_phytbl_lcn.h>
+#include <types.h>
+#include "phytbl_lcn.h"
const u32 dot11lcn_gain_tbl_rev0[] = {
0x00000000,
@@ -1507,7 +1505,7 @@ const u32 dot11lcn_gain_tbl_5G[] = {
0x00000000
};
-const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_rev0[] = {
+const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = {
{&dot11lcn_gain_tbl_rev0,
sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18,
0, 32}
@@ -1522,7 +1520,7 @@ const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_rev0[] = {
,
};
-const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_rev1[] = {
+const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = {
{&dot11lcn_gain_tbl_rev1,
sizeof(dot11lcn_gain_tbl_rev1) / sizeof(dot11lcn_gain_tbl_rev1[0]), 18,
0, 32}
@@ -1537,7 +1535,7 @@ const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_rev1[] = {
,
};
-const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_2G_rev2[] = {
+const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = {
{&dot11lcn_gain_tbl_2G,
sizeof(dot11lcn_gain_tbl_2G) / sizeof(dot11lcn_gain_tbl_2G[0]), 18, 0,
32}
@@ -1555,7 +1553,7 @@ const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_2G_rev2[] = {
17, 0, 8}
};
-const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_5G_rev2[] = {
+const struct phytbl_info dot11lcnphytbl_rx_gain_info_5G_rev2[] = {
{&dot11lcn_gain_tbl_5G,
sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
32}
@@ -1573,7 +1571,7 @@ const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_5G_rev2[] = {
17, 0, 8}
};
-const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = {
+const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = {
{&dot11lcn_gain_tbl_extlna_2G,
sizeof(dot11lcn_gain_tbl_extlna_2G) /
sizeof(dot11lcn_gain_tbl_extlna_2G[0]), 18, 0, 32}
@@ -1591,7 +1589,7 @@ const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = {
sizeof(dot11lcn_gain_val_tbl_extlna_2G[0]), 17, 0, 8}
};
-const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = {
+const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = {
{&dot11lcn_gain_tbl_5G,
sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
32}
@@ -1610,20 +1608,20 @@ const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = {
};
const u32 dot11lcnphytbl_rx_gain_info_sz_rev0 =
- sizeof(dot11lcnphytbl_rx_gain_info_rev0) /
- sizeof(dot11lcnphytbl_rx_gain_info_rev0[0]);
+ sizeof(dot11lcnphytbl_rx_gain_info_rev0) /
+ sizeof(dot11lcnphytbl_rx_gain_info_rev0[0]);
const u32 dot11lcnphytbl_rx_gain_info_sz_rev1 =
- sizeof(dot11lcnphytbl_rx_gain_info_rev1) /
- sizeof(dot11lcnphytbl_rx_gain_info_rev1[0]);
+ sizeof(dot11lcnphytbl_rx_gain_info_rev1) /
+ sizeof(dot11lcnphytbl_rx_gain_info_rev1[0]);
const u32 dot11lcnphytbl_rx_gain_info_2G_rev2_sz =
- sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2) /
- sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2[0]);
+ sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2) /
+ sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2[0]);
const u32 dot11lcnphytbl_rx_gain_info_5G_rev2_sz =
- sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2) /
- sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2[0]);
+ sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2) /
+ sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2[0]);
const u16 dot11lcn_min_sig_sq_tbl_rev0[] = {
0x014d,
@@ -2775,7 +2773,7 @@ const u32 dot11lcn_papd_compdelta_tbl_rev0[] = {
0x00080000,
};
-const dot11lcnphytbl_info_t dot11lcnphytbl_info_rev0[] = {
+const struct phytbl_info dot11lcnphytbl_info_rev0[] = {
{&dot11lcn_min_sig_sq_tbl_rev0,
sizeof(dot11lcn_min_sig_sq_tbl_rev0) /
sizeof(dot11lcn_min_sig_sq_tbl_rev0[0]), 2, 0, 16}
@@ -2834,34 +2832,35 @@ const dot11lcnphytbl_info_t dot11lcnphytbl_info_rev0[] = {
,
};
-const dot11lcnphytbl_info_t dot11lcn_sw_ctrl_tbl_info_4313 = {
+const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313 = {
&dot11lcn_sw_ctrl_tbl_4313_rev0,
sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0) /
sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0[0]), 15, 0, 16
};
-const dot11lcnphytbl_info_t dot11lcn_sw_ctrl_tbl_info_4313_epa = {
+const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa = {
&dot11lcn_sw_ctrl_tbl_4313_epa_rev0,
sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0) /
sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0[0]), 15, 0, 16
};
-const dot11lcnphytbl_info_t dot11lcn_sw_ctrl_tbl_info_4313_bt_epa = {
+const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa = {
&dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo,
sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo) /
sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo[0]), 15, 0, 16
};
-const dot11lcnphytbl_info_t dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250 = {
+const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250 = {
&dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0,
sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0) /
sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0[0]), 15, 0, 16
};
const u32 dot11lcnphytbl_info_sz_rev0 =
- sizeof(dot11lcnphytbl_info_rev0) / sizeof(dot11lcnphytbl_info_rev0[0]);
+ sizeof(dot11lcnphytbl_info_rev0) / sizeof(dot11lcnphytbl_info_rev0[0]);
-const lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
+const struct lcnphy_tx_gain_tbl_entry
+dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
{3, 0, 31, 0, 72,}
,
{3, 0, 31, 0, 70,}
@@ -3120,7 +3119,7 @@ const lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
,
};
-const lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = {
+const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = {
{7, 0, 31, 0, 72,}
,
{7, 0, 31, 0, 70,}
@@ -3379,7 +3378,7 @@ const lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = {
,
};
-const lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[128] = {
+const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[128] = {
{255, 255, 0xf0, 0, 152,}
,
{255, 255, 0xf0, 0, 147,}
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_lcn.h b/drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.h
index 5a64a988d10..5f75e16bf5a 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_lcn.h
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phytbl_lcn.h
@@ -14,36 +14,41 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-typedef phytbl_info_t dot11lcnphytbl_info_t;
+#include <types.h>
+#include "phy_int.h"
-extern const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_rev0[];
+extern const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[];
extern const u32 dot11lcnphytbl_rx_gain_info_sz_rev0;
-extern const dot11lcnphytbl_info_t dot11lcn_sw_ctrl_tbl_info_4313;
-extern const dot11lcnphytbl_info_t dot11lcn_sw_ctrl_tbl_info_4313_epa;
-extern const dot11lcnphytbl_info_t dot11lcn_sw_ctrl_tbl_info_4313_epa_combo;
+extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313;
+extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa;
+extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa_combo;
+extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa;
+extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250;
-extern const dot11lcnphytbl_info_t dot11lcnphytbl_info_rev0[];
+extern const struct phytbl_info dot11lcnphytbl_info_rev0[];
extern const u32 dot11lcnphytbl_info_sz_rev0;
-extern const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_2G_rev2[];
+extern const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[];
extern const u32 dot11lcnphytbl_rx_gain_info_2G_rev2_sz;
-extern const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_5G_rev2[];
+extern const struct phytbl_info dot11lcnphytbl_rx_gain_info_5G_rev2[];
extern const u32 dot11lcnphytbl_rx_gain_info_5G_rev2_sz;
-extern const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[];
+extern const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[];
-extern const dot11lcnphytbl_info_t dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[];
+extern const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[];
-typedef struct {
+struct lcnphy_tx_gain_tbl_entry {
unsigned char gm;
unsigned char pga;
unsigned char pad;
unsigned char dac;
unsigned char bb_mult;
-} lcnphy_tx_gain_tbl_entry;
+};
-extern const lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[];
-extern const lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_extPA_gaintable_rev0[];
+extern const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[];
-extern const lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[];
+extern const struct
+lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_extPA_gaintable_rev0[];
+
+extern const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[];
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_n.c b/drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.c
index 742df997a3b..7f741f4868a 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_n.c
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.c
@@ -14,11 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
-
-#include <sbhnddma.h>
-#include <wlc_phy_int.h>
-#include <wlc_phytbl_n.h>
+#include <types.h>
+#include "phytbl_n.h"
const u32 frame_struct_rev0[] = {
0x08004a04,
@@ -4439,7 +4436,7 @@ const u16 loft_lut_core1_rev0[] = {
0x0103,
};
-const mimophytbl_info_t mimophytbl_info_rev0_volatile[] = {
+const struct phytbl_info mimophytbl_info_rev0_volatile[] = {
{&bdi_tbl_rev0, sizeof(bdi_tbl_rev0) / sizeof(bdi_tbl_rev0[0]), 21, 0,
16}
,
@@ -4487,7 +4484,7 @@ const mimophytbl_info_t mimophytbl_info_rev0_volatile[] = {
,
};
-const mimophytbl_info_t mimophytbl_info_rev0[] = {
+const struct phytbl_info mimophytbl_info_rev0[] = {
{&frame_struct_rev0,
sizeof(frame_struct_rev0) / sizeof(frame_struct_rev0[0]), 10, 0, 32}
,
@@ -4538,10 +4535,10 @@ const mimophytbl_info_t mimophytbl_info_rev0[] = {
};
const u32 mimophytbl_info_sz_rev0 =
- sizeof(mimophytbl_info_rev0) / sizeof(mimophytbl_info_rev0[0]);
+ sizeof(mimophytbl_info_rev0) / sizeof(mimophytbl_info_rev0[0]);
const u32 mimophytbl_info_sz_rev0_volatile =
- sizeof(mimophytbl_info_rev0_volatile) /
- sizeof(mimophytbl_info_rev0_volatile[0]);
+ sizeof(mimophytbl_info_rev0_volatile) /
+ sizeof(mimophytbl_info_rev0_volatile[0]);
const u16 ant_swctrl_tbl_rev3[] = {
0x0082,
@@ -9364,34 +9361,34 @@ const u32 papd_cal_scalars_tbl_core1_rev3[] = {
0x002606a4,
};
-const mimophytbl_info_t mimophytbl_info_rev3_volatile[] = {
+const struct phytbl_info mimophytbl_info_rev3_volatile[] = {
{&ant_swctrl_tbl_rev3,
sizeof(ant_swctrl_tbl_rev3) / sizeof(ant_swctrl_tbl_rev3[0]), 9, 0, 16}
,
};
-const mimophytbl_info_t mimophytbl_info_rev3_volatile1[] = {
+const struct phytbl_info mimophytbl_info_rev3_volatile1[] = {
{&ant_swctrl_tbl_rev3_1,
sizeof(ant_swctrl_tbl_rev3_1) / sizeof(ant_swctrl_tbl_rev3_1[0]), 9, 0,
16}
,
};
-const mimophytbl_info_t mimophytbl_info_rev3_volatile2[] = {
+const struct phytbl_info mimophytbl_info_rev3_volatile2[] = {
{&ant_swctrl_tbl_rev3_2,
sizeof(ant_swctrl_tbl_rev3_2) / sizeof(ant_swctrl_tbl_rev3_2[0]), 9, 0,
16}
,
};
-const mimophytbl_info_t mimophytbl_info_rev3_volatile3[] = {
+const struct phytbl_info mimophytbl_info_rev3_volatile3[] = {
{&ant_swctrl_tbl_rev3_3,
sizeof(ant_swctrl_tbl_rev3_3) / sizeof(ant_swctrl_tbl_rev3_3[0]), 9, 0,
16}
,
};
-const mimophytbl_info_t mimophytbl_info_rev3[] = {
+const struct phytbl_info mimophytbl_info_rev3[] = {
{&frame_struct_rev3,
sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
,
@@ -9478,19 +9475,19 @@ const mimophytbl_info_t mimophytbl_info_rev3[] = {
};
const u32 mimophytbl_info_sz_rev3 =
- sizeof(mimophytbl_info_rev3) / sizeof(mimophytbl_info_rev3[0]);
+ sizeof(mimophytbl_info_rev3) / sizeof(mimophytbl_info_rev3[0]);
const u32 mimophytbl_info_sz_rev3_volatile =
- sizeof(mimophytbl_info_rev3_volatile) /
- sizeof(mimophytbl_info_rev3_volatile[0]);
+ sizeof(mimophytbl_info_rev3_volatile) /
+ sizeof(mimophytbl_info_rev3_volatile[0]);
const u32 mimophytbl_info_sz_rev3_volatile1 =
- sizeof(mimophytbl_info_rev3_volatile1) /
- sizeof(mimophytbl_info_rev3_volatile1[0]);
+ sizeof(mimophytbl_info_rev3_volatile1) /
+ sizeof(mimophytbl_info_rev3_volatile1[0]);
const u32 mimophytbl_info_sz_rev3_volatile2 =
- sizeof(mimophytbl_info_rev3_volatile2) /
- sizeof(mimophytbl_info_rev3_volatile2[0]);
+ sizeof(mimophytbl_info_rev3_volatile2) /
+ sizeof(mimophytbl_info_rev3_volatile2[0]);
const u32 mimophytbl_info_sz_rev3_volatile3 =
- sizeof(mimophytbl_info_rev3_volatile3) /
- sizeof(mimophytbl_info_rev3_volatile3[0]);
+ sizeof(mimophytbl_info_rev3_volatile3) /
+ sizeof(mimophytbl_info_rev3_volatile3[0]);
const u32 tmap_tbl_rev7[] = {
0x8a88aa80,
@@ -10470,7 +10467,7 @@ const u32 papd_cal_scalars_tbl_core1_rev7[] = {
0x004e068c,
};
-const mimophytbl_info_t mimophytbl_info_rev7[] = {
+const struct phytbl_info mimophytbl_info_rev7[] = {
{&frame_struct_rev3,
sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
,
@@ -10582,9 +10579,9 @@ const mimophytbl_info_t mimophytbl_info_rev7[] = {
};
const u32 mimophytbl_info_sz_rev7 =
- sizeof(mimophytbl_info_rev7) / sizeof(mimophytbl_info_rev7[0]);
+ sizeof(mimophytbl_info_rev7) / sizeof(mimophytbl_info_rev7[0]);
-const mimophytbl_info_t mimophytbl_info_rev16[] = {
+const struct phytbl_info mimophytbl_info_rev16[] = {
{&noise_var_tbl_rev7,
sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32}
,
@@ -10629,4 +10626,4 @@ const mimophytbl_info_t mimophytbl_info_rev16[] = {
};
const u32 mimophytbl_info_sz_rev16 =
- sizeof(mimophytbl_info_rev16) / sizeof(mimophytbl_info_rev16[0]);
+ sizeof(mimophytbl_info_rev16) / sizeof(mimophytbl_info_rev16[0]);
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_n.h b/drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.h
index 396122f5e50..c5266cf2372 100644
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phytbl_n.h
+++ b/drivers/staging/brcm80211/brcmsmac/phy/phytbl_n.h
@@ -16,24 +16,25 @@
#define ANT_SWCTRL_TBL_REV3_IDX (0)
-typedef phytbl_info_t mimophytbl_info_t;
+#include <types.h>
+#include "phy_int.h"
-extern const mimophytbl_info_t mimophytbl_info_rev0[],
- mimophytbl_info_rev0_volatile[];
+extern const struct phytbl_info mimophytbl_info_rev0[],
+ mimophytbl_info_rev0_volatile[];
extern const u32 mimophytbl_info_sz_rev0, mimophytbl_info_sz_rev0_volatile;
-extern const mimophytbl_info_t mimophytbl_info_rev3[],
- mimophytbl_info_rev3_volatile[], mimophytbl_info_rev3_volatile1[],
- mimophytbl_info_rev3_volatile2[], mimophytbl_info_rev3_volatile3[];
+extern const struct phytbl_info mimophytbl_info_rev3[],
+ mimophytbl_info_rev3_volatile[], mimophytbl_info_rev3_volatile1[],
+ mimophytbl_info_rev3_volatile2[], mimophytbl_info_rev3_volatile3[];
extern const u32 mimophytbl_info_sz_rev3, mimophytbl_info_sz_rev3_volatile,
- mimophytbl_info_sz_rev3_volatile1, mimophytbl_info_sz_rev3_volatile2,
- mimophytbl_info_sz_rev3_volatile3;
+ mimophytbl_info_sz_rev3_volatile1, mimophytbl_info_sz_rev3_volatile2,
+ mimophytbl_info_sz_rev3_volatile3;
extern const u32 noise_var_tbl_rev3[];
-extern const mimophytbl_info_t mimophytbl_info_rev7[];
+extern const struct phytbl_info mimophytbl_info_rev7[];
extern const u32 mimophytbl_info_sz_rev7;
extern const u32 noise_var_tbl_rev7[];
-extern const mimophytbl_info_t mimophytbl_info_rev16[];
+extern const struct phytbl_info mimophytbl_info_rev16[];
extern const u32 mimophytbl_info_sz_rev16;
diff --git a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_hal.h b/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_hal.h
deleted file mode 100644
index 8939153efa5..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/phy/wlc_phy_hal.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wlc_phy_h_
-#define _wlc_phy_h_
-
-#include <wlioctl.h>
-#include <aiutils.h>
-#include <d11.h>
-#include <wlc_phy_shim.h>
-#include <net/mac80211.h> /* struct wiphy */
-
-#define IDCODE_VER_MASK 0x0000000f
-#define IDCODE_VER_SHIFT 0
-#define IDCODE_MFG_MASK 0x00000fff
-#define IDCODE_MFG_SHIFT 0
-#define IDCODE_ID_MASK 0x0ffff000
-#define IDCODE_ID_SHIFT 12
-#define IDCODE_REV_MASK 0xf0000000
-#define IDCODE_REV_SHIFT 28
-
-#define NORADIO_ID 0xe4f5
-#define NORADIO_IDCODE 0x4e4f5246
-
-#define BCM2055_ID 0x2055
-#define BCM2055_IDCODE 0x02055000
-#define BCM2055A0_IDCODE 0x1205517f
-
-#define BCM2056_ID 0x2056
-#define BCM2056_IDCODE 0x02056000
-#define BCM2056A0_IDCODE 0x1205617f
-
-#define BCM2057_ID 0x2057
-#define BCM2057_IDCODE 0x02057000
-#define BCM2057A0_IDCODE 0x1205717f
-
-#define BCM2064_ID 0x2064
-#define BCM2064_IDCODE 0x02064000
-#define BCM2064A0_IDCODE 0x0206417f
-
-#define PHY_TPC_HW_OFF false
-#define PHY_TPC_HW_ON true
-
-#define PHY_PERICAL_DRIVERUP 1
-#define PHY_PERICAL_WATCHDOG 2
-#define PHY_PERICAL_PHYINIT 3
-#define PHY_PERICAL_JOIN_BSS 4
-#define PHY_PERICAL_START_IBSS 5
-#define PHY_PERICAL_UP_BSS 6
-#define PHY_PERICAL_CHAN 7
-#define PHY_FULLCAL 8
-
-#define PHY_PERICAL_DISABLE 0
-#define PHY_PERICAL_SPHASE 1
-#define PHY_PERICAL_MPHASE 2
-#define PHY_PERICAL_MANUAL 3
-
-#define PHY_HOLD_FOR_ASSOC 1
-#define PHY_HOLD_FOR_SCAN 2
-#define PHY_HOLD_FOR_RM 4
-#define PHY_HOLD_FOR_PLT 8
-#define PHY_HOLD_FOR_MUTE 16
-#define PHY_HOLD_FOR_NOT_ASSOC 0x20
-
-#define PHY_MUTE_FOR_PREISM 1
-#define PHY_MUTE_ALL 0xffffffff
-
-#define PHY_NOISE_FIXED_VAL (-95)
-#define PHY_NOISE_FIXED_VAL_NPHY (-92)
-#define PHY_NOISE_FIXED_VAL_LCNPHY (-92)
-
-#define PHY_MODE_CAL 0x0002
-#define PHY_MODE_NOISEM 0x0004
-
-#define WLC_TXPWR_DB_FACTOR 4
-
-#define WLC_NUM_RATES_CCK 4
-#define WLC_NUM_RATES_OFDM 8
-#define WLC_NUM_RATES_MCS_1_STREAM 8
-#define WLC_NUM_RATES_MCS_2_STREAM 8
-#define WLC_NUM_RATES_MCS_3_STREAM 8
-#define WLC_NUM_RATES_MCS_4_STREAM 8
-typedef struct txpwr_limits {
- u8 cck[WLC_NUM_RATES_CCK];
- u8 ofdm[WLC_NUM_RATES_OFDM];
-
- u8 ofdm_cdd[WLC_NUM_RATES_OFDM];
-
- u8 ofdm_40_siso[WLC_NUM_RATES_OFDM];
- u8 ofdm_40_cdd[WLC_NUM_RATES_OFDM];
-
- u8 mcs_20_siso[WLC_NUM_RATES_MCS_1_STREAM];
- u8 mcs_20_cdd[WLC_NUM_RATES_MCS_1_STREAM];
- u8 mcs_20_stbc[WLC_NUM_RATES_MCS_1_STREAM];
- u8 mcs_20_mimo[WLC_NUM_RATES_MCS_2_STREAM];
-
- u8 mcs_40_siso[WLC_NUM_RATES_MCS_1_STREAM];
- u8 mcs_40_cdd[WLC_NUM_RATES_MCS_1_STREAM];
- u8 mcs_40_stbc[WLC_NUM_RATES_MCS_1_STREAM];
- u8 mcs_40_mimo[WLC_NUM_RATES_MCS_2_STREAM];
- u8 mcs32;
-} txpwr_limits_t;
-
-typedef struct {
- u8 vec[MAXCHANNEL / NBBY];
-} chanvec_t;
-
-struct rpc_info;
-typedef struct shared_phy shared_phy_t;
-
-struct phy_pub;
-
-typedef struct phy_pub wlc_phy_t;
-
-typedef struct shared_phy_params {
- si_t *sih;
- void *physhim;
- uint unit;
- uint corerev;
- uint bustype;
- uint buscorerev;
- char *vars;
- u16 vid;
- u16 did;
- uint chip;
- uint chiprev;
- uint chippkg;
- uint sromrev;
- uint boardtype;
- uint boardrev;
- uint boardvendor;
- u32 boardflags;
- u32 boardflags2;
-} shared_phy_params_t;
-
-
-extern shared_phy_t *wlc_phy_shared_attach(shared_phy_params_t *shp);
-extern void wlc_phy_shared_detach(shared_phy_t *phy_sh);
-extern wlc_phy_t *wlc_phy_attach(shared_phy_t *sh, void *regs, int bandtype,
- char *vars, struct wiphy *wiphy);
-extern void wlc_phy_detach(wlc_phy_t *ppi);
-
-extern bool wlc_phy_get_phyversion(wlc_phy_t *pih, u16 *phytype,
- u16 *phyrev, u16 *radioid,
- u16 *radiover);
-extern bool wlc_phy_get_encore(wlc_phy_t *pih);
-extern u32 wlc_phy_get_coreflags(wlc_phy_t *pih);
-
-extern void wlc_phy_hw_clk_state_upd(wlc_phy_t *ppi, bool newstate);
-extern void wlc_phy_hw_state_upd(wlc_phy_t *ppi, bool newstate);
-extern void wlc_phy_init(wlc_phy_t *ppi, chanspec_t chanspec);
-extern void wlc_phy_watchdog(wlc_phy_t *ppi);
-extern int wlc_phy_down(wlc_phy_t *ppi);
-extern u32 wlc_phy_clk_bwbits(wlc_phy_t *pih);
-extern void wlc_phy_cal_init(wlc_phy_t *ppi);
-extern void wlc_phy_antsel_init(wlc_phy_t *ppi, bool lut_init);
-
-extern void wlc_phy_chanspec_set(wlc_phy_t *ppi, chanspec_t chanspec);
-extern chanspec_t wlc_phy_chanspec_get(wlc_phy_t *ppi);
-extern void wlc_phy_chanspec_radio_set(wlc_phy_t *ppi, chanspec_t newch);
-extern u16 wlc_phy_bw_state_get(wlc_phy_t *ppi);
-extern void wlc_phy_bw_state_set(wlc_phy_t *ppi, u16 bw);
-
-extern void wlc_phy_rssi_compute(wlc_phy_t *pih, void *ctx);
-extern void wlc_phy_por_inform(wlc_phy_t *ppi);
-extern void wlc_phy_noise_sample_intr(wlc_phy_t *ppi);
-extern bool wlc_phy_bist_check_phy(wlc_phy_t *ppi);
-
-extern void wlc_phy_set_deaf(wlc_phy_t *ppi, bool user_flag);
-
-extern void wlc_phy_switch_radio(wlc_phy_t *ppi, bool on);
-extern void wlc_phy_anacore(wlc_phy_t *ppi, bool on);
-
-
-extern void wlc_phy_BSSinit(wlc_phy_t *ppi, bool bonlyap, int rssi);
-
-extern void wlc_phy_chanspec_ch14_widefilter_set(wlc_phy_t *ppi,
- bool wide_filter);
-extern void wlc_phy_chanspec_band_validch(wlc_phy_t *ppi, uint band,
- chanvec_t *channels);
-extern chanspec_t wlc_phy_chanspec_band_firstch(wlc_phy_t *ppi, uint band);
-
-extern void wlc_phy_txpower_sromlimit(wlc_phy_t *ppi, uint chan,
- u8 *_min_, u8 *_max_, int rate);
-extern void wlc_phy_txpower_sromlimit_max_get(wlc_phy_t *ppi, uint chan,
- u8 *_max_, u8 *_min_);
-extern void wlc_phy_txpower_boardlimit_band(wlc_phy_t *ppi, uint band, s32 *,
- s32 *, u32 *);
-extern void wlc_phy_txpower_limit_set(wlc_phy_t *ppi, struct txpwr_limits *,
- chanspec_t chanspec);
-extern int wlc_phy_txpower_get(wlc_phy_t *ppi, uint *qdbm, bool *override);
-extern int wlc_phy_txpower_set(wlc_phy_t *ppi, uint qdbm, bool override);
-extern void wlc_phy_txpower_target_set(wlc_phy_t *ppi, struct txpwr_limits *);
-extern bool wlc_phy_txpower_hw_ctrl_get(wlc_phy_t *ppi);
-extern void wlc_phy_txpower_hw_ctrl_set(wlc_phy_t *ppi, bool hwpwrctrl);
-extern u8 wlc_phy_txpower_get_target_min(wlc_phy_t *ppi);
-extern u8 wlc_phy_txpower_get_target_max(wlc_phy_t *ppi);
-extern bool wlc_phy_txpower_ipa_ison(wlc_phy_t *pih);
-
-extern void wlc_phy_stf_chain_init(wlc_phy_t *pih, u8 txchain,
- u8 rxchain);
-extern void wlc_phy_stf_chain_set(wlc_phy_t *pih, u8 txchain,
- u8 rxchain);
-extern void wlc_phy_stf_chain_get(wlc_phy_t *pih, u8 *txchain,
- u8 *rxchain);
-extern u8 wlc_phy_stf_chain_active_get(wlc_phy_t *pih);
-extern s8 wlc_phy_stf_ssmode_get(wlc_phy_t *pih, chanspec_t chanspec);
-extern void wlc_phy_ldpc_override_set(wlc_phy_t *ppi, bool val);
-
-extern void wlc_phy_cal_perical(wlc_phy_t *ppi, u8 reason);
-extern void wlc_phy_noise_sample_request_external(wlc_phy_t *ppi);
-extern void wlc_phy_edcrs_lock(wlc_phy_t *pih, bool lock);
-extern void wlc_phy_cal_papd_recal(wlc_phy_t *ppi);
-
-extern void wlc_phy_ant_rxdiv_set(wlc_phy_t *ppi, u8 val);
-extern bool wlc_phy_ant_rxdiv_get(wlc_phy_t *ppi, u8 *pval);
-extern void wlc_phy_clear_tssi(wlc_phy_t *ppi);
-extern void wlc_phy_hold_upd(wlc_phy_t *ppi, mbool id, bool val);
-extern void wlc_phy_mute_upd(wlc_phy_t *ppi, bool val, mbool flags);
-
-extern void wlc_phy_antsel_type_set(wlc_phy_t *ppi, u8 antsel_type);
-
-extern void wlc_phy_txpower_get_current(wlc_phy_t *ppi, tx_power_t *power,
- uint channel);
-
-extern void wlc_phy_initcal_enable(wlc_phy_t *pih, bool initcal);
-extern bool wlc_phy_test_ison(wlc_phy_t *ppi);
-extern void wlc_phy_txpwr_percent_set(wlc_phy_t *ppi, u8 txpwr_percent);
-extern void wlc_phy_ofdm_rateset_war(wlc_phy_t *pih, bool war);
-extern void wlc_phy_bf_preempt_enable(wlc_phy_t *pih, bool bf_preempt);
-extern void wlc_phy_machwcap_set(wlc_phy_t *ppi, u32 machwcap);
-
-extern void wlc_phy_runbist_config(wlc_phy_t *ppi, bool start_end);
-
-extern void wlc_phy_freqtrack_start(wlc_phy_t *ppi);
-extern void wlc_phy_freqtrack_end(wlc_phy_t *ppi);
-
-extern const u8 *wlc_phy_get_ofdm_rate_lookup(void);
-
-extern s8 wlc_phy_get_tx_power_offset_by_mcs(wlc_phy_t *ppi,
- u8 mcs_offset);
-extern s8 wlc_phy_get_tx_power_offset(wlc_phy_t *ppi, u8 tbl_offset);
-#endif /* _wlc_phy_h_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/phy_shim.c b/drivers/staging/brcm80211/brcmsmac/phy_shim.c
new file mode 100644
index 00000000000..82ecdcda271
--- /dev/null
+++ b/drivers/staging/brcm80211/brcmsmac/phy_shim.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This is "two-way" interface, acting as the SHIM layer between WL and PHY layer.
+ * WL driver can optinally call this translation layer to do some preprocessing, then reach PHY.
+ * On the PHY->WL driver direction, all calls go through this layer since PHY doesn't have the
+ * access to wlc_hw pointer.
+ */
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "bmac.h"
+#include "main.h"
+#include "mac80211_if.h"
+#include "phy_shim.h"
+
+/* PHY SHIM module specific state */
+struct phy_shim_info {
+ struct brcms_hardware *wlc_hw; /* pointer to main wlc_hw structure */
+ void *wlc; /* pointer to main wlc structure */
+ void *wl; /* pointer to os-specific private state */
+};
+
+struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
+ void *wl, void *wlc) {
+ struct phy_shim_info *physhim = NULL;
+
+ physhim = kzalloc(sizeof(struct phy_shim_info), GFP_ATOMIC);
+ if (!physhim) {
+ wiphy_err(wlc_hw->wlc->wiphy,
+ "wl%d: wlc_phy_shim_attach: out of mem\n",
+ wlc_hw->unit);
+ return NULL;
+ }
+ physhim->wlc_hw = wlc_hw;
+ physhim->wlc = wlc;
+ physhim->wl = wl;
+
+ return physhim;
+}
+
+void wlc_phy_shim_detach(struct phy_shim_info *physhim)
+{
+ kfree(physhim);
+}
+
+struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+ void (*fn) (void *arg), void *arg,
+ const char *name)
+{
+ return (struct wlapi_timer *)
+ brcms_init_timer(physhim->wl, fn, arg, name);
+}
+
+void wlapi_free_timer(struct phy_shim_info *physhim, struct wlapi_timer *t)
+{
+ brcms_free_timer(physhim->wl, (struct brcms_timer *)t);
+}
+
+void
+wlapi_add_timer(struct phy_shim_info *physhim, struct wlapi_timer *t, uint ms,
+ int periodic)
+{
+ brcms_add_timer(physhim->wl, (struct brcms_timer *)t, ms, periodic);
+}
+
+bool wlapi_del_timer(struct phy_shim_info *physhim, struct wlapi_timer *t)
+{
+ return brcms_del_timer(physhim->wl, (struct brcms_timer *)t);
+}
+
+void wlapi_intrson(struct phy_shim_info *physhim)
+{
+ brcms_intrson(physhim->wl);
+}
+
+u32 wlapi_intrsoff(struct phy_shim_info *physhim)
+{
+ return brcms_intrsoff(physhim->wl);
+}
+
+void wlapi_intrsrestore(struct phy_shim_info *physhim, u32 macintmask)
+{
+ brcms_intrsrestore(physhim->wl, macintmask);
+}
+
+void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, u16 v)
+{
+ brcms_b_write_shm(physhim->wlc_hw, offset, v);
+}
+
+u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset)
+{
+ return brcms_b_read_shm(physhim->wlc_hw, offset);
+}
+
+void
+wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, u16 mask,
+ u16 val, int bands)
+{
+ brcms_b_mhf(physhim->wlc_hw, idx, mask, val, bands);
+}
+
+void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags)
+{
+ brcms_b_corereset(physhim->wlc_hw, flags);
+}
+
+void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim)
+{
+ brcms_c_suspend_mac_and_wait(physhim->wlc);
+}
+
+void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode)
+{
+ brcms_b_switch_macfreq(physhim->wlc_hw, spurmode);
+}
+
+void wlapi_enable_mac(struct phy_shim_info *physhim)
+{
+ brcms_c_enable_mac(physhim->wlc);
+}
+
+void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, u32 val)
+{
+ brcms_b_mctrl(physhim->wlc_hw, mask, val);
+}
+
+void wlapi_bmac_phy_reset(struct phy_shim_info *physhim)
+{
+ brcms_b_phy_reset(physhim->wlc_hw);
+}
+
+void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw)
+{
+ brcms_b_bw_set(physhim->wlc_hw, bw);
+}
+
+u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim)
+{
+ return brcms_b_get_txant(physhim->wlc_hw);
+}
+
+void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk)
+{
+ brcms_b_phyclk_fgc(physhim->wlc_hw, clk);
+}
+
+void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk)
+{
+ brcms_b_macphyclk_set(physhim->wlc_hw, clk);
+}
+
+void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on)
+{
+ brcms_b_core_phypll_ctl(physhim->wlc_hw, on);
+}
+
+void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim)
+{
+ brcms_b_core_phypll_reset(physhim->wlc_hw);
+}
+
+void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *physhim)
+{
+ brcms_c_ucode_wake_override_set(physhim->wlc_hw,
+ BRCMS_WAKE_OVERRIDE_PHYREG);
+}
+
+void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *physhim)
+{
+ brcms_c_ucode_wake_override_clear(physhim->wlc_hw,
+ BRCMS_WAKE_OVERRIDE_PHYREG);
+}
+
+void
+wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int offset,
+ int len, void *buf)
+{
+ brcms_b_write_template_ram(physhim->wlc_hw, offset, len, buf);
+}
+
+u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, u8 rate)
+{
+ return brcms_b_rate_shm_offset(physhim->wlc_hw, rate);
+}
+
+void wlapi_ucode_sample_init(struct phy_shim_info *physhim)
+{
+}
+
+void
+wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint offset, void *buf,
+ int len, u32 sel)
+{
+ brcms_b_copyfrom_objmem(physhim->wlc_hw, offset, buf, len, sel);
+}
+
+void
+wlapi_copyto_objmem(struct phy_shim_info *physhim, uint offset, const void *buf,
+ int l, u32 sel)
+{
+ brcms_b_copyto_objmem(physhim->wlc_hw, offset, buf, l, sel);
+}
diff --git a/drivers/staging/brcm80211/brcmsmac/phy_shim.h b/drivers/staging/brcm80211/brcmsmac/phy_shim.h
new file mode 100644
index 00000000000..2d12bb4400f
--- /dev/null
+++ b/drivers/staging/brcm80211/brcmsmac/phy_shim.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * phy_shim.h: stuff defined in phy_shim.c and included only by the phy
+ */
+
+#ifndef _BRCM_PHY_SHIM_H_
+#define _BRCM_PHY_SHIM_H_
+
+#include "types.h"
+
+#define RADAR_TYPE_NONE 0 /* Radar type None */
+#define RADAR_TYPE_ETSI_1 1 /* ETSI 1 Radar type */
+#define RADAR_TYPE_ETSI_2 2 /* ETSI 2 Radar type */
+#define RADAR_TYPE_ETSI_3 3 /* ETSI 3 Radar type */
+#define RADAR_TYPE_ITU_E 4 /* ITU E Radar type */
+#define RADAR_TYPE_ITU_K 5 /* ITU K Radar type */
+#define RADAR_TYPE_UNCLASSIFIED 6 /* Unclassified Radar type */
+#define RADAR_TYPE_BIN5 7 /* long pulse radar type */
+#define RADAR_TYPE_STG2 8 /* staggered-2 radar */
+#define RADAR_TYPE_STG3 9 /* staggered-3 radar */
+#define RADAR_TYPE_FRA 10 /* French radar */
+
+/* French radar pulse widths */
+#define FRA_T1_20MHZ 52770
+#define FRA_T2_20MHZ 61538
+#define FRA_T3_20MHZ 66002
+#define FRA_T1_40MHZ 105541
+#define FRA_T2_40MHZ 123077
+#define FRA_T3_40MHZ 132004
+#define FRA_ERR_20MHZ 60
+#define FRA_ERR_40MHZ 120
+
+#define ANTSEL_NA 0 /* No boardlevel selection available */
+#define ANTSEL_2x4 1 /* 2x4 boardlevel selection available */
+#define ANTSEL_2x3 2 /* 2x3 CB2 boardlevel selection available */
+
+/* Rx Antenna diversity control values */
+#define ANT_RX_DIV_FORCE_0 0 /* Use antenna 0 */
+#define ANT_RX_DIV_FORCE_1 1 /* Use antenna 1 */
+#define ANT_RX_DIV_START_1 2 /* Choose starting with 1 */
+#define ANT_RX_DIV_START_0 3 /* Choose starting with 0 */
+#define ANT_RX_DIV_ENABLE 3 /* APHY bbConfig Enable RX Diversity */
+#define ANT_RX_DIV_DEF ANT_RX_DIV_START_0 /* default antdiv setting */
+
+#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */
+#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */
+#define WL_ANT_IDX_1 0 /* antenna index 1 */
+#define WL_ANT_IDX_2 1 /* antenna index 2 */
+
+/* values for n_preamble_type */
+#define BRCMS_N_PREAMBLE_MIXEDMODE 0
+#define BRCMS_N_PREAMBLE_GF 1
+#define BRCMS_N_PREAMBLE_GF_BRCM 2
+
+#define WL_TX_POWER_RATES_LEGACY 45
+#define WL_TX_POWER_MCS20_FIRST 12
+#define WL_TX_POWER_MCS20_NUM 16
+#define WL_TX_POWER_MCS40_FIRST 28
+#define WL_TX_POWER_MCS40_NUM 17
+
+
+#define WL_TX_POWER_RATES 101
+#define WL_TX_POWER_CCK_FIRST 0
+#define WL_TX_POWER_CCK_NUM 4
+#define WL_TX_POWER_OFDM_FIRST 4 /* Index for first 20MHz OFDM SISO rate */
+#define WL_TX_POWER_OFDM20_CDD_FIRST 12 /* Index for first 20MHz OFDM CDD rate */
+#define WL_TX_POWER_OFDM40_SISO_FIRST 52 /* Index for first 40MHz OFDM SISO rate */
+#define WL_TX_POWER_OFDM40_CDD_FIRST 60 /* Index for first 40MHz OFDM CDD rate */
+#define WL_TX_POWER_OFDM_NUM 8
+#define WL_TX_POWER_MCS20_SISO_FIRST 20 /* Index for first 20MHz MCS SISO rate */
+#define WL_TX_POWER_MCS20_CDD_FIRST 28 /* Index for first 20MHz MCS CDD rate */
+#define WL_TX_POWER_MCS20_STBC_FIRST 36 /* Index for first 20MHz MCS STBC rate */
+#define WL_TX_POWER_MCS20_SDM_FIRST 44 /* Index for first 20MHz MCS SDM rate */
+#define WL_TX_POWER_MCS40_SISO_FIRST 68 /* Index for first 40MHz MCS SISO rate */
+#define WL_TX_POWER_MCS40_CDD_FIRST 76 /* Index for first 40MHz MCS CDD rate */
+#define WL_TX_POWER_MCS40_STBC_FIRST 84 /* Index for first 40MHz MCS STBC rate */
+#define WL_TX_POWER_MCS40_SDM_FIRST 92 /* Index for first 40MHz MCS SDM rate */
+#define WL_TX_POWER_MCS_1_STREAM_NUM 8
+#define WL_TX_POWER_MCS_2_STREAM_NUM 8
+#define WL_TX_POWER_MCS_32 100 /* Index for 40MHz rate MCS 32 */
+#define WL_TX_POWER_MCS_32_NUM 1
+
+/* sslpnphy specifics */
+#define WL_TX_POWER_MCS20_SISO_FIRST_SSN 12 /* Index for first 20MHz MCS SISO rate */
+
+/* struct tx_power::flags bits */
+#define WL_TX_POWER_F_ENABLED 1
+#define WL_TX_POWER_F_HW 2
+#define WL_TX_POWER_F_MIMO 4
+#define WL_TX_POWER_F_SISO 8
+
+/* values to force tx/rx chain */
+#define BRCMS_N_TXRX_CHAIN0 0
+#define BRCMS_N_TXRX_CHAIN1 1
+
+extern struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
+ void *wl, void *wlc);
+extern void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+
+/* PHY to WL utility functions */
+extern struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+ void (*fn) (void *arg), void *arg,
+ const char *name);
+extern void wlapi_free_timer(struct phy_shim_info *physhim,
+ struct wlapi_timer *t);
+extern void wlapi_add_timer(struct phy_shim_info *physhim,
+ struct wlapi_timer *t, uint ms, int periodic);
+extern bool wlapi_del_timer(struct phy_shim_info *physhim,
+ struct wlapi_timer *t);
+extern void wlapi_intrson(struct phy_shim_info *physhim);
+extern u32 wlapi_intrsoff(struct phy_shim_info *physhim);
+extern void wlapi_intrsrestore(struct phy_shim_info *physhim,
+ u32 macintmask);
+
+extern void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset,
+ u16 v);
+extern u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
+extern void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx,
+ u16 mask, u16 val, int bands);
+extern void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
+extern void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
+extern void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
+extern void wlapi_enable_mac(struct phy_shim_info *physhim);
+extern void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask,
+ u32 val);
+extern void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
+extern void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
+extern void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
+extern void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
+extern void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
+extern void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
+extern void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *
+ physhim);
+extern void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *
+ physhim);
+extern void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
+ int len, void *buf);
+extern u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim,
+ u8 rate);
+extern void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
+extern void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint,
+ void *buf, int, u32 sel);
+extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
+ const void *buf, int, u32);
+
+extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
+ u32 phy_mode);
+extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
+#endif /* _BRCM_PHY_SHIM_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/pmu.c b/drivers/staging/brcm80211/brcmsmac/pmu.c
new file mode 100644
index 00000000000..e8b2b81d2d0
--- /dev/null
+++ b/drivers/staging/brcm80211/brcmsmac/pmu.c
@@ -0,0 +1,474 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <brcm_hw_ids.h>
+#include <chipcommon.h>
+#include <brcmu_utils.h>
+#include "pub.h"
+#include "aiutils.h"
+#include "pmu.h"
+
+/*
+ * external LPO crystal frequency
+ */
+#define EXT_ILP_HZ 32768
+
+/*
+ * Duration for ILP clock frequency measurment in milliseconds
+ *
+ * remark: 1000 must be an integer multiple of this duration
+ */
+#define ILP_CALC_DUR 10
+
+/* Fields in pmucontrol */
+#define PCTL_ILP_DIV_MASK 0xffff0000
+#define PCTL_ILP_DIV_SHIFT 16
+#define PCTL_PLL_PLLCTL_UPD 0x00000400 /* rev 2 */
+#define PCTL_NOILP_ON_WAIT 0x00000200 /* rev 1 */
+#define PCTL_HT_REQ_EN 0x00000100
+#define PCTL_ALP_REQ_EN 0x00000080
+#define PCTL_XTALFREQ_MASK 0x0000007c
+#define PCTL_XTALFREQ_SHIFT 2
+#define PCTL_ILP_DIV_EN 0x00000002
+#define PCTL_LPO_SEL 0x00000001
+
+/* ILP clock */
+#define ILP_CLOCK 32000
+
+/* ALP clock on pre-PMU chips */
+#define ALP_CLOCK 20000000
+
+/* pmustatus */
+#define PST_EXTLPOAVAIL 0x0100
+#define PST_WDRESET 0x0080
+#define PST_INTPEND 0x0040
+#define PST_SBCLKST 0x0030
+#define PST_SBCLKST_ILP 0x0010
+#define PST_SBCLKST_ALP 0x0020
+#define PST_SBCLKST_HT 0x0030
+#define PST_ALPAVAIL 0x0008
+#define PST_HTAVAIL 0x0004
+#define PST_RESINIT 0x0003
+
+/* PMU resource bit position */
+#define PMURES_BIT(bit) (1 << (bit))
+
+/* PMU corerev and chip specific PLL controls.
+ * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary number
+ * to differentiate different PLLs controlled by the same PMU rev.
+ */
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
+#define PMU1_PLL0_PLLCTL0 0
+#define PMU1_PLL0_PLLCTL1 1
+#define PMU1_PLL0_PLLCTL2 2
+#define PMU1_PLL0_PLLCTL3 3
+#define PMU1_PLL0_PLLCTL4 4
+#define PMU1_PLL0_PLLCTL5 5
+
+/* pmu XtalFreqRatio */
+#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF
+#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000
+#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31
+
+/* 4313 resources */
+#define RES4313_BB_PU_RSRC 0
+#define RES4313_ILP_REQ_RSRC 1
+#define RES4313_XTAL_PU_RSRC 2
+#define RES4313_ALP_AVAIL_RSRC 3
+#define RES4313_RADIO_PU_RSRC 4
+#define RES4313_BG_PU_RSRC 5
+#define RES4313_VREG1P4_PU_RSRC 6
+#define RES4313_AFE_PWRSW_RSRC 7
+#define RES4313_RX_PWRSW_RSRC 8
+#define RES4313_TX_PWRSW_RSRC 9
+#define RES4313_BB_PWRSW_RSRC 10
+#define RES4313_SYNTH_PWRSW_RSRC 11
+#define RES4313_MISC_PWRSW_RSRC 12
+#define RES4313_BB_PLL_PWRSW_RSRC 13
+#define RES4313_HT_AVAIL_RSRC 14
+#define RES4313_MACPHY_CLK_AVAIL_RSRC 15
+
+/* Determine min/max rsrc masks. Value 0 leaves hardware at default. */
+static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
+{
+ u32 min_mask = 0, max_mask = 0;
+ uint rsrcs;
+
+ /* # resources */
+ rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+
+ /* determine min/max rsrc masks */
+ switch (sih->chip) {
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ /* ??? */
+ break;
+
+ case BCM4313_CHIP_ID:
+ min_mask = PMURES_BIT(RES4313_BB_PU_RSRC) |
+ PMURES_BIT(RES4313_XTAL_PU_RSRC) |
+ PMURES_BIT(RES4313_ALP_AVAIL_RSRC) |
+ PMURES_BIT(RES4313_BB_PLL_PWRSW_RSRC);
+ max_mask = 0xffff;
+ break;
+ default:
+ break;
+ }
+
+ *pmin = min_mask;
+ *pmax = max_mask;
+}
+
+static void
+si_pmu_spuravoid_pllupdate(struct si_pub *sih, chipcregs_t *cc, u8 spuravoid)
+{
+ u32 tmp = 0;
+
+ switch (sih->chip) {
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ if (spuravoid == 1) {
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(&cc->pllcontrol_data, 0x11500010);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ W_REG(&cc->pllcontrol_data, 0x000C0C06);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(&cc->pllcontrol_data, 0x0F600a08);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
+ W_REG(&cc->pllcontrol_data, 0x00000000);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
+ W_REG(&cc->pllcontrol_data, 0x2001E920);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ W_REG(&cc->pllcontrol_data, 0x88888815);
+ } else {
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(&cc->pllcontrol_data, 0x11100010);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ W_REG(&cc->pllcontrol_data, 0x000c0c06);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(&cc->pllcontrol_data, 0x03000a08);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
+ W_REG(&cc->pllcontrol_data, 0x00000000);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
+ W_REG(&cc->pllcontrol_data, 0x200005c0);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ W_REG(&cc->pllcontrol_data, 0x88888815);
+ }
+ tmp = 1 << 10;
+ break;
+
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
+ W_REG(&cc->pllcontrol_data, 0x11100008);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
+ W_REG(&cc->pllcontrol_data, 0x0c000c06);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
+ W_REG(&cc->pllcontrol_data, 0x03000a08);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
+ W_REG(&cc->pllcontrol_data, 0x00000000);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
+ W_REG(&cc->pllcontrol_data, 0x200005c0);
+ W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
+ W_REG(&cc->pllcontrol_data, 0x88888855);
+
+ tmp = 1 << 10;
+ break;
+
+ default:
+ /* bail out */
+ return;
+ }
+
+ tmp |= R_REG(&cc->pmucontrol);
+ W_REG(&cc->pmucontrol, tmp);
+}
+
+u32 si_pmu_ilp_clock(struct si_pub *sih)
+{
+ static u32 ilpcycles_per_sec;
+
+ if (!PMUCTL_ENAB(sih))
+ return ILP_CLOCK;
+
+ if (ilpcycles_per_sec == 0) {
+ u32 start, end, delta;
+ u32 origidx = ai_coreidx(sih);
+ chipcregs_t *cc = ai_setcoreidx(sih, SI_CC_IDX);
+ start = R_REG(&cc->pmutimer);
+ mdelay(ILP_CALC_DUR);
+ end = R_REG(&cc->pmutimer);
+ delta = end - start;
+ ilpcycles_per_sec = delta * (1000 / ILP_CALC_DUR);
+ ai_setcoreidx(sih, origidx);
+ }
+
+ return ilpcycles_per_sec;
+}
+
+u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
+{
+ uint delay = PMU_MAX_TRANSITION_DLY;
+
+ switch (sih->chip) {
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ case BCM4313_CHIP_ID:
+ delay = 3700;
+ break;
+ default:
+ break;
+ }
+
+ return (u16) delay;
+}
+
+void si_pmu_sprom_enable(struct si_pub *sih, bool enable)
+{
+ chipcregs_t *cc;
+ uint origidx;
+
+ /* Remember original core before switch to chipc */
+ origidx = ai_coreidx(sih);
+ cc = ai_setcoreidx(sih, SI_CC_IDX);
+
+ /* Return to original core */
+ ai_setcoreidx(sih, origidx);
+}
+
+/* Read/write a chipcontrol reg */
+u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
+{
+ ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, chipcontrol_addr), ~0,
+ reg);
+ return ai_corereg(sih, SI_CC_IDX,
+ offsetof(chipcregs_t, chipcontrol_data), mask, val);
+}
+
+/* Read/write a regcontrol reg */
+u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
+{
+ ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, regcontrol_addr), ~0,
+ reg);
+ return ai_corereg(sih, SI_CC_IDX,
+ offsetof(chipcregs_t, regcontrol_data), mask, val);
+}
+
+/* Read/write a pllcontrol reg */
+u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
+{
+ ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, pllcontrol_addr), ~0,
+ reg);
+ return ai_corereg(sih, SI_CC_IDX,
+ offsetof(chipcregs_t, pllcontrol_data), mask, val);
+}
+
+/* PMU PLL update */
+void si_pmu_pllupd(struct si_pub *sih)
+{
+ ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, pmucontrol),
+ PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
+}
+
+/* query alp/xtal clock frequency */
+u32 si_pmu_alp_clock(struct si_pub *sih)
+{
+ u32 clock = ALP_CLOCK;
+
+ /* bail out with default */
+ if (!PMUCTL_ENAB(sih))
+ return clock;
+
+ switch (sih->chip) {
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ case BCM4313_CHIP_ID:
+ /* always 20Mhz */
+ clock = 20000 * 1000;
+ break;
+ default:
+ break;
+ }
+
+ return clock;
+}
+
+void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid)
+{
+ chipcregs_t *cc;
+ uint origidx, intr_val;
+
+ /* Remember original core before switch to chipc */
+ cc = (chipcregs_t *) ai_switch_core(sih, CC_CORE_ID, &origidx,
+ &intr_val);
+
+ /* update the pll changes */
+ si_pmu_spuravoid_pllupdate(sih, cc, spuravoid);
+
+ /* Return to original core */
+ ai_restore_core(sih, origidx, intr_val);
+}
+
+/* initialize PMU */
+void si_pmu_init(struct si_pub *sih)
+{
+ chipcregs_t *cc;
+ uint origidx;
+
+ /* Remember original core before switch to chipc */
+ origidx = ai_coreidx(sih);
+ cc = ai_setcoreidx(sih, SI_CC_IDX);
+
+ if (sih->pmurev == 1)
+ AND_REG(&cc->pmucontrol, ~PCTL_NOILP_ON_WAIT);
+ else if (sih->pmurev >= 2)
+ OR_REG(&cc->pmucontrol, PCTL_NOILP_ON_WAIT);
+
+ /* Return to original core */
+ ai_setcoreidx(sih, origidx);
+}
+
+/* initialize PMU chip controls and other chip level stuff */
+void si_pmu_chip_init(struct si_pub *sih)
+{
+ uint origidx;
+
+ /* Gate off SPROM clock and chip select signals */
+ si_pmu_sprom_enable(sih, false);
+
+ /* Remember original core */
+ origidx = ai_coreidx(sih);
+
+ /* Return to original core */
+ ai_setcoreidx(sih, origidx);
+}
+
+/* initialize PMU switch/regulators */
+void si_pmu_swreg_init(struct si_pub *sih)
+{
+}
+
+/* initialize PLL */
+void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq)
+{
+ chipcregs_t *cc;
+ uint origidx;
+
+ /* Remember original core before switch to chipc */
+ origidx = ai_coreidx(sih);
+ cc = ai_setcoreidx(sih, SI_CC_IDX);
+
+ switch (sih->chip) {
+ case BCM4313_CHIP_ID:
+ case BCM43224_CHIP_ID:
+ case BCM43225_CHIP_ID:
+ /* ??? */
+ break;
+ default:
+ break;
+ }
+
+ /* Return to original core */
+ ai_setcoreidx(sih, origidx);
+}
+
+/* initialize PMU resources */
+void si_pmu_res_init(struct si_pub *sih)
+{
+ chipcregs_t *cc;
+ uint origidx;
+ u32 min_mask = 0, max_mask = 0;
+
+ /* Remember original core before switch to chipc */
+ origidx = ai_coreidx(sih);
+ cc = ai_setcoreidx(sih, SI_CC_IDX);
+
+ /* Determine min/max rsrc masks */
+ si_pmu_res_masks(sih, &min_mask, &max_mask);
+
+ /* It is required to program max_mask first and then min_mask */
+
+ /* Program max resource mask */
+
+ if (max_mask)
+ W_REG(&cc->max_res_mask, max_mask);
+
+ /* Program min resource mask */
+
+ if (min_mask)
+ W_REG(&cc->min_res_mask, min_mask);
+
+ /* Add some delay; allow resources to come up and settle. */
+ mdelay(2);
+
+ /* Return to original core */
+ ai_setcoreidx(sih, origidx);
+}
+
+u32 si_pmu_measure_alpclk(struct si_pub *sih)
+{
+ chipcregs_t *cc;
+ uint origidx;
+ u32 alp_khz;
+
+ if (sih->pmurev < 10)
+ return 0;
+
+ /* Remember original core before switch to chipc */
+ origidx = ai_coreidx(sih);
+ cc = ai_setcoreidx(sih, SI_CC_IDX);
+
+ if (R_REG(&cc->pmustatus) & PST_EXTLPOAVAIL) {
+ u32 ilp_ctr, alp_hz;
+
+ /*
+ * Enable the reg to measure the freq,
+ * in case it was disabled before
+ */
+ W_REG(&cc->pmu_xtalfreq,
+ 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
+
+ /* Delay for well over 4 ILP clocks */
+ udelay(1000);
+
+ /* Read the latched number of ALP ticks per 4 ILP ticks */
+ ilp_ctr =
+ R_REG(&cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK;
+
+ /*
+ * Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT
+ * bit to save power
+ */
+ W_REG(&cc->pmu_xtalfreq, 0);
+
+ /* Calculate ALP frequency */
+ alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4;
+
+ /*
+ * Round to nearest 100KHz, and at
+ * the same time convert to KHz
+ */
+ alp_khz = (alp_hz + 50000) / 100000 * 100;
+ } else
+ alp_khz = 0;
+
+ /* Return to original core */
+ ai_setcoreidx(sih, origidx);
+
+ return alp_khz;
+}
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_pmu.h b/drivers/staging/brcm80211/brcmsmac/pmu.h
index bd5b809b2e3..0c7e48c4bcd 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_pmu.h
+++ b/drivers/staging/brcm80211/brcmsmac/pmu.h
@@ -15,13 +15,10 @@
*/
-#ifndef WLC_PMU_H_
-#define WLC_PMU_H_
-
-#include <linux/types.h>
-
-#include <aiutils.h>
+#ifndef _BRCM_PMU_H_
+#define _BRCM_PMU_H_
+#include "types.h"
/*
* LDO selections used in si_pmu_set_ldo_voltage
*/
@@ -36,23 +33,20 @@
#define SET_LDO_VOLTAGE_LNLDO1 9
#define SET_LDO_VOLTAGE_LNLDO2_SEL 10
-extern void si_pmu_set_ldo_voltage(si_t *sih, u8 ldo, u8 voltage);
-extern u16 si_pmu_fast_pwrup_delay(si_t *sih);
-extern void si_pmu_sprom_enable(si_t *sih, bool enable);
-extern u32 si_pmu_chipcontrol(si_t *sih, uint reg, u32 mask, u32 val);
-extern u32 si_pmu_regcontrol(si_t *sih, uint reg, u32 mask, u32 val);
-extern u32 si_pmu_ilp_clock(si_t *sih);
-extern u32 si_pmu_alp_clock(si_t *sih);
-extern void si_pmu_pllupd(si_t *sih);
-extern void si_pmu_spuravoid(si_t *sih, u8 spuravoid);
-extern u32 si_pmu_pllcontrol(si_t *sih, uint reg, u32 mask, u32 val);
-extern void si_pmu_init(si_t *sih);
-extern void si_pmu_chip_init(si_t *sih);
-extern void si_pmu_pll_init(si_t *sih, u32 xtalfreq);
-extern void si_pmu_res_init(si_t *sih);
-extern void si_pmu_swreg_init(si_t *sih);
-extern u32 si_pmu_measure_alpclk(si_t *sih);
-extern bool si_pmu_is_otp_powered(si_t *sih);
-extern void si_pmu_otp_power(si_t *sih, bool on);
+extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
+extern void si_pmu_sprom_enable(struct si_pub *sih, bool enable);
+extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
+extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
+extern u32 si_pmu_ilp_clock(struct si_pub *sih);
+extern u32 si_pmu_alp_clock(struct si_pub *sih);
+extern void si_pmu_pllupd(struct si_pub *sih);
+extern void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid);
+extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
+extern void si_pmu_init(struct si_pub *sih);
+extern void si_pmu_chip_init(struct si_pub *sih);
+extern void si_pmu_pll_init(struct si_pub *sih, u32 xtalfreq);
+extern void si_pmu_res_init(struct si_pub *sih);
+extern void si_pmu_swreg_init(struct si_pub *sih);
+extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
-#endif /* WLC_PMU_H_ */
+#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_pub.h b/drivers/staging/brcm80211/brcmsmac/pub.h
index 9334deacda1..01d74609560 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_pub.h
+++ b/drivers/staging/brcm80211/brcmsmac/pub.h
@@ -14,10 +14,14 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _wlc_pub_h_
-#define _wlc_pub_h_
+#ifndef _BRCM_PUB_H_
+#define _BRCM_PUB_H_
-#define WLC_NUMRATES 16 /* max # of rates in a rateset */
+#include <brcmu_wifi.h>
+#include "types.h"
+#include "defs.h"
+
+#define BRCMS_NUMRATES 16 /* max # of rates in a rateset */
#define MAXMULTILIST 32 /* max # multicast addresses */
#define D11_PHY_HDR_LEN 6 /* Phy header length - 6 bytes */
@@ -32,36 +36,37 @@
#define PHY_TYPE_HT 7 /* Phy type 3-Stream N */
/* bw */
-#define WLC_10_MHZ 10 /* 10Mhz nphy channel bandwidth */
-#define WLC_20_MHZ 20 /* 20Mhz nphy channel bandwidth */
-#define WLC_40_MHZ 40 /* 40Mhz nphy channel bandwidth */
+#define BRCMS_10_MHZ 10 /* 10Mhz nphy channel bandwidth */
+#define BRCMS_20_MHZ 20 /* 20Mhz nphy channel bandwidth */
+#define BRCMS_40_MHZ 40 /* 40Mhz nphy channel bandwidth */
-#define CHSPEC_WLC_BW(chanspec) (CHSPEC_IS40(chanspec) ? WLC_40_MHZ : \
- CHSPEC_IS20(chanspec) ? WLC_20_MHZ : \
- WLC_10_MHZ)
+#define CHSPEC_WLC_BW(chanspec) (CHSPEC_IS40(chanspec) ? BRCMS_40_MHZ : \
+ CHSPEC_IS20(chanspec) ? BRCMS_20_MHZ : \
+ BRCMS_10_MHZ)
-#define WLC_RSSI_MINVAL -200 /* Low value, e.g. for forcing roam */
-#define WLC_RSSI_NO_SIGNAL -91 /* NDIS RSSI link quality cutoffs */
-#define WLC_RSSI_VERY_LOW -80 /* Very low quality cutoffs */
-#define WLC_RSSI_LOW -70 /* Low quality cutoffs */
-#define WLC_RSSI_GOOD -68 /* Good quality cutoffs */
-#define WLC_RSSI_VERY_GOOD -58 /* Very good quality cutoffs */
-#define WLC_RSSI_EXCELLENT -57 /* Excellent quality cutoffs */
+#define BRCMS_RSSI_MINVAL -200 /* Low value, e.g. for forcing roam */
+#define BRCMS_RSSI_NO_SIGNAL -91 /* NDIS RSSI link quality cutoffs */
+#define BRCMS_RSSI_VERY_LOW -80 /* Very low quality cutoffs */
+#define BRCMS_RSSI_LOW -70 /* Low quality cutoffs */
+#define BRCMS_RSSI_GOOD -68 /* Good quality cutoffs */
+#define BRCMS_RSSI_VERY_GOOD -58 /* Very good quality cutoffs */
+#define BRCMS_RSSI_EXCELLENT -57 /* Excellent quality cutoffs */
-#define WLC_PHYTYPE(_x) (_x) /* macro to perform WLC PHY -> D11 PHY TYPE, currently 1:1 */
+/* macro to perform PHY -> D11 PHY TYPE, currently 1:1 */
+#define BRCMS_PHYTYPE(_x) (_x)
#define MA_WINDOW_SZ 8 /* moving average window size */
-#define WLC_SNR_INVALID 0 /* invalid SNR value */
+#define BRCMS_SNR_INVALID 0 /* invalid SNR value */
/* a large TX Power as an init value to factor out of min() calculations,
* keep low enough to fit in an s8, units are .25 dBm
*/
-#define WLC_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */
+#define BRCMS_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */
/* rate related definitions */
-#define WLC_RATE_FLAG 0x80 /* Flag to indicate it is a basic rate */
-#define WLC_RATE_MASK 0x7f /* Rate value mask w/o basic rate flag */
+#define BRCMS_RATE_FLAG 0x80 /* Flag to indicate it is a basic rate */
+#define BRCMS_RATE_MASK 0x7f /* Rate value mask w/o basic rate flag */
/* legacy rx Antenna diversity for SISO rates */
#define ANT_RX_DIV_FORCE_0 0 /* Use antenna 0 */
@@ -96,9 +101,13 @@
#define AIDMAPSZ (roundup(MAXSCB, NBBY)/NBBY) /* aid bitmap size in bytes */
#endif /* AIDMAPSZ */
-struct ieee80211_tx_queue_params;
+#define MAX_STREAMS_SUPPORTED 4 /* max number of streams supported */
+
+#define WL_SPURAVOID_OFF 0
+#define WL_SPURAVOID_ON1 1
+#define WL_SPURAVOID_ON2 2
-typedef struct wlc_tunables {
+struct brcms_tunables {
int ntxd; /* size of tx descriptor table */
int nrxd; /* size of rx descriptor table */
int rxbufsz; /* size of rx buffers to post */
@@ -114,14 +123,15 @@ typedef struct wlc_tunables {
int rxbnd; /* max # of rx bufs to process before deferring to dpc */
int txsbnd; /* max # tx status to process in wlc_txstatus() */
int memreserved; /* memory reserved for BMAC's USB dma rx */
-} wlc_tunables_t;
+};
-typedef struct wlc_rateset {
+struct brcms_rateset {
uint count; /* number of rates in rates[] */
- u8 rates[WLC_NUMRATES]; /* rates in 500kbps units w/hi bit set if basic */
+ /* rates in 500kbps units w/hi bit set if basic */
+ u8 rates[BRCMS_NUMRATES];
u8 htphy_membership; /* HT PHY Membership */
u8 mcs[MCSSET_LEN]; /* supported mcs index bit map */
-} wlc_rateset_t;
+};
struct rsn_parms {
u8 flags; /* misc booleans (e.g., supported) */
@@ -134,7 +144,6 @@ struct rsn_parms {
};
/*
- * buffer length needed for wlc_format_ssid
* 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
*/
#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
@@ -152,8 +161,8 @@ struct rsn_parms {
IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD |\
IEEE80211_HT_CAP_MAX_AMSDU | IEEE80211_HT_CAP_DSSSCCK40)
-/* wlc internal bss_info, wl external one is in wlioctl.h */
-typedef struct wlc_bss_info {
+/* wlc internal bss_info */
+struct brcms_bss_info {
u8 BSSID[ETH_ALEN]; /* network BSSID */
u16 flags; /* flags for internal attributes */
u8 SSID_len; /* the length of SSID */
@@ -168,7 +177,7 @@ typedef struct wlc_bss_info {
u8 dtim_period; /* DTIM period */
s8 phy_noise; /* noise right after tx (in dBm) */
u16 capability; /* Capability information */
- u8 wme_qosinfo; /* QoS Info from WME IE; valid if WLC_BSS_WME flag set */
+ u8 wme_qosinfo; /* QoS Info from WME IE; valid if BSS_WME flag set */
struct rsn_parms wpa;
struct rsn_parms wpa2;
u16 qbss_load_aac; /* qbss load available admission capacity */
@@ -176,21 +185,7 @@ typedef struct wlc_bss_info {
u8 qbss_load_chan_free; /* indicates how free the channel is */
u8 mcipher; /* multicast cipher */
u8 wpacfg; /* wpa config index */
-} wlc_bss_info_t;
-
-/* forward declarations */
-struct wlc_if;
-
-/* wlc_ioctl error codes */
-#define WLC_ENOIOCTL 1 /* No such Ioctl */
-#define WLC_EINVAL 2 /* Invalid value */
-#define WLC_ETOOSMALL 3 /* Value too small */
-#define WLC_ETOOBIG 4 /* Value too big */
-#define WLC_ERANGE 5 /* Out of range */
-#define WLC_EDOWN 6 /* Down */
-#define WLC_EUP 7 /* Up */
-#define WLC_ENOMEM 8 /* No Memory */
-#define WLC_EBUSY 9 /* Busy */
+};
/* IOVar flags for common error checks */
#define IOVF_MFG (1<<3) /* flag for mfgtest iovars */
@@ -211,7 +206,7 @@ struct wlc_if;
/* watchdog down and dump callback function proto's */
typedef int (*watchdog_fn_t) (void *handle);
typedef int (*down_fn_t) (void *handle);
-typedef int (*dump_fn_t) (void *handle, struct bcmstrbuf *b);
+typedef int (*dump_fn_t) (void *handle, struct brcmu_strbuf *b);
/* IOVar handler
*
@@ -222,14 +217,14 @@ typedef int (*dump_fn_t) (void *handle, struct bcmstrbuf *b);
* params/plen - parameters and length for a get, input only.
* arg/len - buffer and length for value to be set or retrieved, input or output.
* vsize - value size, valid for integer type only.
- * wlcif - interface context (wlc_if pointer)
+ * wlcif - interface context (brcms_c_if pointer)
*
* All pointers may point into the same buffer.
*/
-typedef int (*iovar_fn_t) (void *handle, const bcm_iovar_t *vi,
+typedef int (*iovar_fn_t) (void *handle, const struct brcmu_iovar *vi,
u32 actionid, const char *name, void *params,
uint plen, void *arg, int alen, int vsize,
- struct wlc_if *wlcif);
+ struct brcms_c_if *wlcif);
#define MAC80211_PROMISC_BCNS (1 << 0)
#define MAC80211_SCAN (1 << 1)
@@ -238,20 +233,21 @@ typedef int (*iovar_fn_t) (void *handle, const bcm_iovar_t *vi,
* Public portion of "common" os-independent state structure.
* The wlc handle points at this.
*/
-struct wlc_pub {
+struct brcms_pub {
void *wlc;
struct ieee80211_hw *ieee_hw;
struct scb *global_scb;
- scb_ampdu_t *global_ampdu;
+ struct scb_ampdu *global_ampdu;
uint mac80211_state;
uint unit; /* device instance number */
uint corerev; /* core revision */
- si_t *sih; /* SB handle (cookie for siutils calls) */
+ struct si_pub *sih; /* SI handle (cookie for siutils calls) */
char *vars; /* "environment" name=value */
bool up; /* interface up and running */
bool hw_off; /* HW is off */
- wlc_tunables_t *tunables; /* tunables: ntxd, nrxd, maxscb, etc. */
+ /* tunables: ntxd, nrxd, maxscb, etc. */
+ struct brcms_tunables *tunables;
bool hw_up; /* one time hw up/down(from boot or hibernation) */
bool _piomode; /* true if pio mode *//* BMAC_NOTE: NEED In both */
uint _nbands; /* # bands supported */
@@ -307,13 +303,10 @@ struct wlc_pub {
u16 boardrev; /* version # of particular board */
u8 sromrev; /* version # of the srom */
- char srom_ccode[WLC_CNTRY_BUF_SZ]; /* Country Code in SROM */
+ char srom_ccode[BRCM_CNTRY_BUF_SZ]; /* Country Code in SROM */
u32 boardflags; /* Board specific flags from srom */
u32 boardflags2; /* More board flags if sromrev >= 4 */
bool tempsense_disable; /* disable periodic tempsense check */
-
- bool _lmac; /* lmac module included and enabled */
- bool _lmacproto; /* lmac protocol module included and enabled */
bool phy_11ncapable; /* the PHY/HW is capable of 802.11N */
bool _ampdumac; /* mac assist ampdu enabled or not */
@@ -321,7 +314,7 @@ struct wlc_pub {
};
/* wl_monitor rx status per packet */
-typedef struct wl_rxsts {
+struct wl_rxsts {
uint pkterror; /* error flags per pkt */
uint phytype; /* 802.11 A/B/G ... */
uint channel; /* channel */
@@ -335,8 +328,8 @@ typedef struct wl_rxsts {
uint preamble; /* Unknown, short, long */
uint encoding; /* Unknown, CCK, PBCC, OFDM */
uint nfrmtype; /* special 802.11n frames(AMPDU, AMSDU) */
- struct wl_if *wlif; /* wl interface */
-} wl_rxsts_t;
+ struct brcms_if *wlif; /* wl interface */
+};
/* status per error RX pkt */
#define WL_RXS_CRC_ERROR 0x00000001 /* CRC Error in packet */
@@ -371,13 +364,12 @@ typedef struct wl_rxsts {
#define WL_RXS_NFRM_AMSDU_FIRST 0x00000004 /* first MSDU in A-MSDU */
#define WL_RXS_NFRM_AMSDU_SUB 0x00000008 /* subsequent MSDU(s) in A-MSDU */
-/* forward declare and use the struct notation so we don't have to
- * have it defined if not necessary.
- */
-struct wlc_info;
-struct wlc_hw_info;
-struct wlc_bsscfg;
-struct wlc_if;
+enum wlc_par_id {
+ IOV_MPC = 1,
+ IOV_RTSTHRESH,
+ IOV_QTXPOWER,
+ IOV_BCN_LI_BCN /* Beacon listen interval in # of beacons */
+};
/***********************************************
* Feature-related macros to optimize out code *
@@ -426,149 +418,238 @@ struct wlc_if;
#define PROMISC_ENAB(wlc) ((wlc)->promisc)
-#define WLC_PREC_COUNT 16 /* Max precedence level implemented */
+#define BRCMS_PREC_COUNT 16 /* Max precedence level implemented */
/* pri is priority encoded in the packet. This maps the Packet priority to
* enqueue precedence as defined in wlc_prec_map
*/
extern const u8 wlc_prio2prec_map[];
-#define WLC_PRIO_TO_PREC(pri) wlc_prio2prec_map[(pri) & 7]
+#define BRCMS_PRIO_TO_PREC(pri) wlc_prio2prec_map[(pri) & 7]
/* This maps priority to one precedence higher - Used by PS-Poll response packets to
* simulate enqueue-at-head operation, but still maintain the order on the queue
*/
-#define WLC_PRIO_TO_HI_PREC(pri) min(WLC_PRIO_TO_PREC(pri) + 1, WLC_PREC_COUNT - 1)
+#define BRCMS_PRIO_TO_HI_PREC(pri) min(BRCMS_PRIO_TO_PREC(pri) + 1,\
+ BRCMS_PREC_COUNT - 1)
extern const u8 wme_fifo2ac[];
#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
/* Mask to describe all precedence levels */
-#define WLC_PREC_BMP_ALL MAXBITVAL(WLC_PREC_COUNT)
+#define BRCMS_PREC_BMP_ALL MAXBITVAL(BRCMS_PREC_COUNT)
/* Define a bitmap of precedences comprised by each AC */
-#define WLC_PREC_BMP_AC_BE (NBITVAL(WLC_PRIO_TO_PREC(PRIO_8021D_BE)) | \
- NBITVAL(WLC_PRIO_TO_HI_PREC(PRIO_8021D_BE)) | \
- NBITVAL(WLC_PRIO_TO_PREC(PRIO_8021D_EE)) | \
- NBITVAL(WLC_PRIO_TO_HI_PREC(PRIO_8021D_EE)))
-#define WLC_PREC_BMP_AC_BK (NBITVAL(WLC_PRIO_TO_PREC(PRIO_8021D_BK)) | \
- NBITVAL(WLC_PRIO_TO_HI_PREC(PRIO_8021D_BK)) | \
- NBITVAL(WLC_PRIO_TO_PREC(PRIO_8021D_NONE)) | \
- NBITVAL(WLC_PRIO_TO_HI_PREC(PRIO_8021D_NONE)))
-#define WLC_PREC_BMP_AC_VI (NBITVAL(WLC_PRIO_TO_PREC(PRIO_8021D_CL)) | \
- NBITVAL(WLC_PRIO_TO_HI_PREC(PRIO_8021D_CL)) | \
- NBITVAL(WLC_PRIO_TO_PREC(PRIO_8021D_VI)) | \
- NBITVAL(WLC_PRIO_TO_HI_PREC(PRIO_8021D_VI)))
-#define WLC_PREC_BMP_AC_VO (NBITVAL(WLC_PRIO_TO_PREC(PRIO_8021D_VO)) | \
- NBITVAL(WLC_PRIO_TO_HI_PREC(PRIO_8021D_VO)) | \
- NBITVAL(WLC_PRIO_TO_PREC(PRIO_8021D_NC)) | \
- NBITVAL(WLC_PRIO_TO_HI_PREC(PRIO_8021D_NC)))
+#define BRCMS_PREC_BMP_AC_BE (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BE)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BE)) | \
+ NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_EE)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_EE)))
+#define BRCMS_PREC_BMP_AC_BK (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BK)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BK)) | \
+ NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NONE)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NONE)))
+#define BRCMS_PREC_BMP_AC_VI (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_CL)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_CL)) | \
+ NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VI)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VI)))
+#define BRCMS_PREC_BMP_AC_VO (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VO)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VO)) | \
+ NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NC)) | \
+ NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NC)))
/* WME Support */
#define WME_ENAB(pub) ((pub)->_wme != OFF)
#define WME_AUTO(wlc) ((wlc)->pub->_wme == AUTO)
-#define WLC_USE_COREFLAGS 0xffffffff /* invalid core flags, use the saved coreflags */
+/* invalid core flags, use the saved coreflags */
+#define BRCMS_USE_COREFLAGS 0xffffffff
/* network protection config */
-#define WLC_PROT_G_SPEC 1 /* SPEC g protection */
-#define WLC_PROT_G_OVR 2 /* SPEC g prot override */
-#define WLC_PROT_G_USER 3 /* gmode specified by user */
-#define WLC_PROT_OVERLAP 4 /* overlap */
-#define WLC_PROT_N_USER 10 /* nmode specified by user */
-#define WLC_PROT_N_CFG 11 /* n protection */
-#define WLC_PROT_N_CFG_OVR 12 /* n protection override */
-#define WLC_PROT_N_NONGF 13 /* non-GF protection */
-#define WLC_PROT_N_NONGF_OVR 14 /* non-GF protection override */
-#define WLC_PROT_N_PAM_OVR 15 /* n preamble override */
-#define WLC_PROT_N_OBSS 16 /* non-HT OBSS present */
+#define BRCMS_PROT_G_SPEC 1 /* SPEC g protection */
+#define BRCMS_PROT_G_OVR 2 /* SPEC g prot override */
+#define BRCMS_PROT_G_USER 3 /* gmode specified by user */
+#define BRCMS_PROT_OVERLAP 4 /* overlap */
+#define BRCMS_PROT_N_USER 10 /* nmode specified by user */
+#define BRCMS_PROT_N_CFG 11 /* n protection */
+#define BRCMS_PROT_N_CFG_OVR 12 /* n protection override */
+#define BRCMS_PROT_N_NONGF 13 /* non-GF protection */
+#define BRCMS_PROT_N_NONGF_OVR 14 /* non-GF protection override */
+#define BRCMS_PROT_N_PAM_OVR 15 /* n preamble override */
+#define BRCMS_PROT_N_OBSS 16 /* non-HT OBSS present */
+
+/*
+ * 54g modes (basic bits may still be overridden)
+ *
+ * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11
+ * Preamble: Long
+ * Shortslot: Off
+ * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: Auto
+ * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
+ * Extended Rateset: 6b, 9, 12b, 48
+ * Preamble: Short required
+ * Shortslot: Auto
+ * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: On
+ * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
+ * Preamble: Short required
+ * Shortslot: On and required
+ * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b
+ * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
+ * Preamble: Long
+ * Shortslot: Auto
+ */
+#define GMODE_LEGACY_B 0
+#define GMODE_AUTO 1
+#define GMODE_ONLY 2
+#define GMODE_B_DEFERRED 3
+#define GMODE_PERFORMANCE 4
+#define GMODE_LRS 5
+#define GMODE_MAX 6
+
+/* values for PLCPHdr_override */
+#define BRCMS_PLCP_AUTO -1
+#define BRCMS_PLCP_SHORT 0
+#define BRCMS_PLCP_LONG 1
+
+/* values for g_protection_override and n_protection_override */
+#define BRCMS_PROTECTION_AUTO -1
+#define BRCMS_PROTECTION_OFF 0
+#define BRCMS_PROTECTION_ON 1
+#define BRCMS_PROTECTION_MMHDR_ONLY 2
+#define BRCMS_PROTECTION_CTS_ONLY 3
+
+/* values for g_protection_control and n_protection_control */
+#define BRCMS_PROTECTION_CTL_OFF 0
+#define BRCMS_PROTECTION_CTL_LOCAL 1
+#define BRCMS_PROTECTION_CTL_OVERLAP 2
+
+/* values for n_protection */
+#define BRCMS_N_PROTECTION_OFF 0
+#define BRCMS_N_PROTECTION_OPTIONAL 1
+#define BRCMS_N_PROTECTION_20IN40 2
+#define BRCMS_N_PROTECTION_MIXEDMODE 3
+
+/* values for band specific 40MHz capabilities */
+#define BRCMS_N_BW_20ALL 0
+#define BRCMS_N_BW_40ALL 1
+#define BRCMS_N_BW_20IN2G_40IN5G 2
+
+/* bitflags for SGI support (sgi_rx iovar) */
+#define BRCMS_N_SGI_20 0x01
+#define BRCMS_N_SGI_40 0x02
+
+/* defines used by the nrate iovar */
+#define NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
+#define NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
+#define NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
+#define NRATE_STF_SHIFT 8 /* stf mode shift */
+#define NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
+#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
+#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
+#define NRATE_SGI_SHIFT 23 /* sgi mode */
+#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
+#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
+
+#define NRATE_STF_SISO 0 /* stf mode SISO */
+#define NRATE_STF_CDD 1 /* stf mode CDD */
+#define NRATE_STF_STBC 2 /* stf mode STBC */
+#define NRATE_STF_SDM 3 /* stf mode SDM */
+
+#define ANT_SELCFG_MAX 4 /* max number of antenna configurations */
+
+#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */
+
+struct brcms_antselcfg {
+ u8 ant_config[ANT_SELCFG_MAX]; /* antenna configuration */
+ u8 num_antcfg; /* number of available antenna configurations */
+};
/* common functions for every port */
-extern void *wlc_attach(struct wl_info *wl, u16 vendor, u16 device, uint unit,
- bool piomode, void *regsva, uint bustype, void *btparam,
- uint *perr);
-extern uint wlc_detach(struct wlc_info *wlc);
-extern int wlc_up(struct wlc_info *wlc);
-extern uint wlc_down(struct wlc_info *wlc);
-
-extern int wlc_set(struct wlc_info *wlc, int cmd, int arg);
-extern int wlc_get(struct wlc_info *wlc, int cmd, int *arg);
-extern int wlc_iovar_getint(struct wlc_info *wlc, const char *name, int *arg);
-extern int wlc_iovar_setint(struct wlc_info *wlc, const char *name, int arg);
-extern bool wlc_chipmatch(u16 vendor, u16 device);
-extern void wlc_init(struct wlc_info *wlc);
-extern void wlc_reset(struct wlc_info *wlc);
-
-extern void wlc_intrson(struct wlc_info *wlc);
-extern u32 wlc_intrsoff(struct wlc_info *wlc);
-extern void wlc_intrsrestore(struct wlc_info *wlc, u32 macintmask);
-extern bool wlc_intrsupd(struct wlc_info *wlc);
-extern bool wlc_isr(struct wlc_info *wlc, bool *wantdpc);
-extern bool wlc_dpc(struct wlc_info *wlc, bool bounded);
-extern bool wlc_sendpkt_mac80211(struct wlc_info *wlc, struct sk_buff *sdu,
- struct ieee80211_hw *hw);
-extern int wlc_iovar_op(struct wlc_info *wlc, const char *name, void *params,
- int p_len, void *arg, int len, bool set,
- struct wlc_if *wlcif);
-extern int wlc_ioctl(struct wlc_info *wlc, int cmd, void *arg, int len,
- struct wlc_if *wlcif);
-extern bool wlc_aggregatable(struct wlc_info *wlc, u8 tid);
+extern void *brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device,
+ uint unit, bool piomode, void *regsva, uint bustype,
+ void *btparam, uint *perr);
+extern uint brcms_c_detach(struct brcms_c_info *wlc);
+extern int brcms_c_up(struct brcms_c_info *wlc);
+extern uint brcms_c_down(struct brcms_c_info *wlc);
+
+extern int brcms_c_set(struct brcms_c_info *wlc, int cmd, int arg);
+extern int brcms_c_get(struct brcms_c_info *wlc, int cmd, int *arg);
+extern bool brcms_c_chipmatch(u16 vendor, u16 device);
+extern void brcms_c_init(struct brcms_c_info *wlc);
+extern void brcms_c_reset(struct brcms_c_info *wlc);
+
+extern void brcms_c_intrson(struct brcms_c_info *wlc);
+extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
+extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
+extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
+extern bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc);
+extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
+extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
+ struct sk_buff *sdu,
+ struct ieee80211_hw *hw);
+extern int brcms_c_ioctl(struct brcms_c_info *wlc, int cmd, void *arg, int len,
+ struct brcms_c_if *wlcif);
+extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
/* helper functions */
-extern void wlc_statsupd(struct wlc_info *wlc);
-extern void wlc_protection_upd(struct wlc_info *wlc, uint idx, int val);
-extern int wlc_get_header_len(void);
-extern void wlc_mac_bcn_promisc_change(struct wlc_info *wlc, bool promisc);
-extern void wlc_set_addrmatch(struct wlc_info *wlc, int match_reg_offset,
- const u8 *addr);
-extern void wlc_wme_setparams(struct wlc_info *wlc, u16 aci,
+extern void brcms_c_statsupd(struct brcms_c_info *wlc);
+extern void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx,
+ int val);
+extern int brcms_c_get_header_len(void);
+extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
+ bool promisc);
+extern void brcms_c_set_addrmatch(struct brcms_c_info *wlc,
+ int match_reg_offset,
+ const u8 *addr);
+extern void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
const struct ieee80211_tx_queue_params *arg,
bool suspend);
-extern struct wlc_pub *wlc_pub(void *wlc);
+extern struct brcms_pub *brcms_c_pub(void *wlc);
/* common functions for every port */
-extern void wlc_mhf(struct wlc_info *wlc, u8 idx, u16 mask, u16 val,
+extern void brcms_c_mhf(struct brcms_c_info *wlc, u8 idx, u16 mask, u16 val,
int bands);
-extern void wlc_rate_lookup_init(struct wlc_info *wlc, wlc_rateset_t *rateset);
-extern void wlc_default_rateset(struct wlc_info *wlc, wlc_rateset_t *rs);
-
-struct ieee80211_sta;
-extern void wlc_ampdu_flush(struct wlc_info *wlc, struct ieee80211_sta *sta,
- u16 tid);
+extern void brcms_c_rate_lookup_init(struct brcms_c_info *wlc,
+ wlc_rateset_t *rateset);
+extern void brcms_default_rateset(struct brcms_c_info *wlc, wlc_rateset_t *rs);
+
+extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
+ struct ieee80211_sta *sta, u16 tid);
+extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
+ u8 ba_wsize, uint max_rx_ampdu_bytes);
+extern int brcms_c_set_par(struct brcms_c_info *wlc, enum wlc_par_id par_id,
+ int val);
+extern int brcms_c_get_par(struct brcms_c_info *wlc, enum wlc_par_id par_id,
+ int *ret_int_ptr);
+extern char *getvar(char *vars, const char *name);
+extern int getintvar(char *vars, const char *name);
/* wlc_phy.c helper functions */
-extern void wlc_set_ps_ctrl(struct wlc_info *wlc);
-extern void wlc_mctrl(struct wlc_info *wlc, u32 mask, u32 val);
+extern void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc);
+extern void brcms_c_mctrl(struct brcms_c_info *wlc, u32 mask, u32 val);
-/* ioctl */
-extern int wlc_iovar_check(struct wlc_pub *pub, const bcm_iovar_t *vi,
- void *arg,
- int len, bool set);
-
-extern int wlc_module_register(struct wlc_pub *pub, const bcm_iovar_t *iovars,
- const char *name, void *hdl, iovar_fn_t iovar_fn,
+extern int brcms_c_module_register(struct brcms_pub *pub,
+ const char *name, void *hdl,
watchdog_fn_t watchdog_fn, down_fn_t down_fn);
-extern int wlc_module_unregister(struct wlc_pub *pub, const char *name,
+extern int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
void *hdl);
-extern void wlc_suspend_mac_and_wait(struct wlc_info *wlc);
-extern void wlc_enable_mac(struct wlc_info *wlc);
-extern void wlc_associate_upd(struct wlc_info *wlc, bool state);
-extern void wlc_scan_start(struct wlc_info *wlc);
-extern void wlc_scan_stop(struct wlc_info *wlc);
-extern int wlc_get_curband(struct wlc_info *wlc);
-extern void wlc_wait_for_tx_completion(struct wlc_info *wlc, bool drop);
-
-#if defined(BCMDBG)
-extern int wlc_iocregchk(struct wlc_info *wlc, uint band);
-#endif
+extern void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
+extern void brcms_c_enable_mac(struct brcms_c_info *wlc);
+extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
+extern void brcms_c_scan_start(struct brcms_c_info *wlc);
+extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
+extern int brcms_c_get_curband(struct brcms_c_info *wlc);
+extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
+ bool drop);
/* helper functions */
-extern bool wlc_check_radio_disabled(struct wlc_info *wlc);
-extern bool wlc_radio_monitor_stop(struct wlc_info *wlc);
-
-#if defined(BCMDBG)
-extern int wlc_format_ssid(char *buf, const unsigned char ssid[], uint ssid_len);
-#endif
+extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+extern bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc);
#define MAXBANDS 2 /* Maximum #of bands */
/* bandstate array indices */
@@ -579,6 +660,6 @@ extern int wlc_format_ssid(char *buf, const unsigned char ssid[], uint ssid_len)
#define BAND_5G_NAME "5G"
/* BMAC RPC: 7 u32 params: pkttotlen, fifo, commit, fid, txpktpend, pktflag, rpc_id */
-#define WLC_RPCTX_PARAMS 32
+#define BRCMS_RPCTX_PARAMS 32
-#endif /* _wlc_pub_h_ */
+#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_rate.c b/drivers/staging/brcm80211/brcmsmac/rate.c
index 87b252d6a7f..f0e4b99c256 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_rate.c
+++ b/drivers/staging/brcm80211/brcmsmac/rate.c
@@ -13,26 +13,16 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <proto/802.11.h>
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <aiutils.h>
-#include <wlioctl.h>
-#include <sbhnddma.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
-#include "wlc_types.h"
#include "d11.h"
-#include "wl_dbg.h"
-#include "wlc_cfg.h"
-#include "wlc_scb.h"
-#include "wlc_pub.h"
-#include "wlc_rate.h"
+#include "pub.h"
+#include "rate.h"
/* Rate info per rate: It tells whether a rate is ofdm or not and its phy_rate value */
-const u8 rate_info[WLC_MAXRATE + 1] = {
+const u8 rate_info[BRCM_MAXRATE + 1] = {
/* 0 1 2 3 4 5 6 7 8 9 */
/* 0 */ 0x00, 0x00, 0x0a, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00,
/* 10 */ 0x00, 0x37, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x00,
@@ -48,134 +38,143 @@ const u8 rate_info[WLC_MAXRATE + 1] = {
};
/* rates are in units of Kbps */
-const mcs_info_t mcs_table[MCS_TABLE_SIZE] = {
+const struct brcms_mcs_info mcs_table[MCS_TABLE_SIZE] = {
/* MCS 0: SS 1, MOD: BPSK, CR 1/2 */
{6500, 13500, CEIL(6500 * 10, 9), CEIL(13500 * 10, 9), 0x00,
- WLC_RATE_6M},
+ BRCM_RATE_6M},
/* MCS 1: SS 1, MOD: QPSK, CR 1/2 */
{13000, 27000, CEIL(13000 * 10, 9), CEIL(27000 * 10, 9), 0x08,
- WLC_RATE_12M},
+ BRCM_RATE_12M},
/* MCS 2: SS 1, MOD: QPSK, CR 3/4 */
{19500, 40500, CEIL(19500 * 10, 9), CEIL(40500 * 10, 9), 0x0A,
- WLC_RATE_18M},
+ BRCM_RATE_18M},
/* MCS 3: SS 1, MOD: 16QAM, CR 1/2 */
{26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0x10,
- WLC_RATE_24M},
+ BRCM_RATE_24M},
/* MCS 4: SS 1, MOD: 16QAM, CR 3/4 */
{39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x12,
- WLC_RATE_36M},
+ BRCM_RATE_36M},
/* MCS 5: SS 1, MOD: 64QAM, CR 2/3 */
{52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0x19,
- WLC_RATE_48M},
+ BRCM_RATE_48M},
/* MCS 6: SS 1, MOD: 64QAM, CR 3/4 */
{58500, 121500, CEIL(58500 * 10, 9), CEIL(121500 * 10, 9), 0x1A,
- WLC_RATE_54M},
+ BRCM_RATE_54M},
/* MCS 7: SS 1, MOD: 64QAM, CR 5/6 */
{65000, 135000, CEIL(65000 * 10, 9), CEIL(135000 * 10, 9), 0x1C,
- WLC_RATE_54M},
+ BRCM_RATE_54M},
/* MCS 8: SS 2, MOD: BPSK, CR 1/2 */
{13000, 27000, CEIL(13000 * 10, 9), CEIL(27000 * 10, 9), 0x40,
- WLC_RATE_6M},
+ BRCM_RATE_6M},
/* MCS 9: SS 2, MOD: QPSK, CR 1/2 */
{26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0x48,
- WLC_RATE_12M},
+ BRCM_RATE_12M},
/* MCS 10: SS 2, MOD: QPSK, CR 3/4 */
{39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x4A,
- WLC_RATE_18M},
+ BRCM_RATE_18M},
/* MCS 11: SS 2, MOD: 16QAM, CR 1/2 */
{52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0x50,
- WLC_RATE_24M},
+ BRCM_RATE_24M},
/* MCS 12: SS 2, MOD: 16QAM, CR 3/4 */
{78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0x52,
- WLC_RATE_36M},
+ BRCM_RATE_36M},
/* MCS 13: SS 2, MOD: 64QAM, CR 2/3 */
{104000, 216000, CEIL(104000 * 10, 9), CEIL(216000 * 10, 9), 0x59,
- WLC_RATE_48M},
+ BRCM_RATE_48M},
/* MCS 14: SS 2, MOD: 64QAM, CR 3/4 */
{117000, 243000, CEIL(117000 * 10, 9), CEIL(243000 * 10, 9), 0x5A,
- WLC_RATE_54M},
+ BRCM_RATE_54M},
/* MCS 15: SS 2, MOD: 64QAM, CR 5/6 */
{130000, 270000, CEIL(130000 * 10, 9), CEIL(270000 * 10, 9), 0x5C,
- WLC_RATE_54M},
+ BRCM_RATE_54M},
/* MCS 16: SS 3, MOD: BPSK, CR 1/2 */
{19500, 40500, CEIL(19500 * 10, 9), CEIL(40500 * 10, 9), 0x80,
- WLC_RATE_6M},
+ BRCM_RATE_6M},
/* MCS 17: SS 3, MOD: QPSK, CR 1/2 */
{39000, 81000, CEIL(39000 * 10, 9), CEIL(81000 * 10, 9), 0x88,
- WLC_RATE_12M},
+ BRCM_RATE_12M},
/* MCS 18: SS 3, MOD: QPSK, CR 3/4 */
{58500, 121500, CEIL(58500 * 10, 9), CEIL(121500 * 10, 9), 0x8A,
- WLC_RATE_18M},
+ BRCM_RATE_18M},
/* MCS 19: SS 3, MOD: 16QAM, CR 1/2 */
{78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0x90,
- WLC_RATE_24M},
+ BRCM_RATE_24M},
/* MCS 20: SS 3, MOD: 16QAM, CR 3/4 */
{117000, 243000, CEIL(117000 * 10, 9), CEIL(243000 * 10, 9), 0x92,
- WLC_RATE_36M},
+ BRCM_RATE_36M},
/* MCS 21: SS 3, MOD: 64QAM, CR 2/3 */
{156000, 324000, CEIL(156000 * 10, 9), CEIL(324000 * 10, 9), 0x99,
- WLC_RATE_48M},
+ BRCM_RATE_48M},
/* MCS 22: SS 3, MOD: 64QAM, CR 3/4 */
{175500, 364500, CEIL(175500 * 10, 9), CEIL(364500 * 10, 9), 0x9A,
- WLC_RATE_54M},
+ BRCM_RATE_54M},
/* MCS 23: SS 3, MOD: 64QAM, CR 5/6 */
{195000, 405000, CEIL(195000 * 10, 9), CEIL(405000 * 10, 9), 0x9B,
- WLC_RATE_54M},
+ BRCM_RATE_54M},
/* MCS 24: SS 4, MOD: BPSK, CR 1/2 */
{26000, 54000, CEIL(26000 * 10, 9), CEIL(54000 * 10, 9), 0xC0,
- WLC_RATE_6M},
+ BRCM_RATE_6M},
/* MCS 25: SS 4, MOD: QPSK, CR 1/2 */
{52000, 108000, CEIL(52000 * 10, 9), CEIL(108000 * 10, 9), 0xC8,
- WLC_RATE_12M},
+ BRCM_RATE_12M},
/* MCS 26: SS 4, MOD: QPSK, CR 3/4 */
{78000, 162000, CEIL(78000 * 10, 9), CEIL(162000 * 10, 9), 0xCA,
- WLC_RATE_18M},
+ BRCM_RATE_18M},
/* MCS 27: SS 4, MOD: 16QAM, CR 1/2 */
{104000, 216000, CEIL(104000 * 10, 9), CEIL(216000 * 10, 9), 0xD0,
- WLC_RATE_24M},
+ BRCM_RATE_24M},
/* MCS 28: SS 4, MOD: 16QAM, CR 3/4 */
{156000, 324000, CEIL(156000 * 10, 9), CEIL(324000 * 10, 9), 0xD2,
- WLC_RATE_36M},
+ BRCM_RATE_36M},
/* MCS 29: SS 4, MOD: 64QAM, CR 2/3 */
{208000, 432000, CEIL(208000 * 10, 9), CEIL(432000 * 10, 9), 0xD9,
- WLC_RATE_48M},
+ BRCM_RATE_48M},
/* MCS 30: SS 4, MOD: 64QAM, CR 3/4 */
{234000, 486000, CEIL(234000 * 10, 9), CEIL(486000 * 10, 9), 0xDA,
- WLC_RATE_54M},
+ BRCM_RATE_54M},
/* MCS 31: SS 4, MOD: 64QAM, CR 5/6 */
{260000, 540000, CEIL(260000 * 10, 9), CEIL(540000 * 10, 9), 0xDB,
- WLC_RATE_54M},
+ BRCM_RATE_54M},
/* MCS 32: SS 1, MOD: BPSK, CR 1/2 */
- {0, 6000, 0, CEIL(6000 * 10, 9), 0x00, WLC_RATE_6M},
+ {0, 6000, 0, CEIL(6000 * 10, 9), 0x00, BRCM_RATE_6M},
};
/* phycfg for legacy OFDM frames: code rate, modulation scheme, spatial streams
* Number of spatial streams: always 1
* other fields: refer to table 78 of section 17.3.2.2 of the original .11a standard
*/
-typedef struct legacy_phycfg {
+struct legacy_phycfg {
u32 rate_ofdm; /* ofdm mac rate */
u8 tx_phy_ctl3; /* phy ctl byte 3, code rate, modulation type, # of streams */
-} legacy_phycfg_t;
+};
#define LEGACY_PHYCFG_TABLE_SIZE 12 /* Number of legacy_rate_cfg entries in the table */
/* In CCK mode LPPHY overloads OFDM Modulation bits with CCK Data Rate */
/* Eventually MIMOPHY would also be converted to this format */
/* 0 = 1Mbps; 1 = 2Mbps; 2 = 5.5Mbps; 3 = 11Mbps */
-static const legacy_phycfg_t legacy_phycfg_table[LEGACY_PHYCFG_TABLE_SIZE] = {
- {WLC_RATE_1M, 0x00}, /* CCK 1Mbps, data rate 0 */
- {WLC_RATE_2M, 0x08}, /* CCK 2Mbps, data rate 1 */
- {WLC_RATE_5M5, 0x10}, /* CCK 5.5Mbps, data rate 2 */
- {WLC_RATE_11M, 0x18}, /* CCK 11Mbps, data rate 3 */
- {WLC_RATE_6M, 0x00}, /* OFDM 6Mbps, code rate 1/2, BPSK, 1 spatial stream */
- {WLC_RATE_9M, 0x02}, /* OFDM 9Mbps, code rate 3/4, BPSK, 1 spatial stream */
- {WLC_RATE_12M, 0x08}, /* OFDM 12Mbps, code rate 1/2, QPSK, 1 spatial stream */
- {WLC_RATE_18M, 0x0A}, /* OFDM 18Mbps, code rate 3/4, QPSK, 1 spatial stream */
- {WLC_RATE_24M, 0x10}, /* OFDM 24Mbps, code rate 1/2, 16-QAM, 1 spatial stream */
- {WLC_RATE_36M, 0x12}, /* OFDM 36Mbps, code rate 3/4, 16-QAM, 1 spatial stream */
- {WLC_RATE_48M, 0x19}, /* OFDM 48Mbps, code rate 2/3, 64-QAM, 1 spatial stream */
- {WLC_RATE_54M, 0x1A}, /* OFDM 54Mbps, code rate 3/4, 64-QAM, 1 spatial stream */
+static const struct
+legacy_phycfg legacy_phycfg_table[LEGACY_PHYCFG_TABLE_SIZE] = {
+ {BRCM_RATE_1M, 0x00}, /* CCK 1Mbps, data rate 0 */
+ {BRCM_RATE_2M, 0x08}, /* CCK 2Mbps, data rate 1 */
+ {BRCM_RATE_5M5, 0x10}, /* CCK 5.5Mbps, data rate 2 */
+ {BRCM_RATE_11M, 0x18}, /* CCK 11Mbps, data rate 3 */
+ /* OFDM 6Mbps, code rate 1/2, BPSK, 1 spatial stream */
+ {BRCM_RATE_6M, 0x00},
+ /* OFDM 9Mbps, code rate 3/4, BPSK, 1 spatial stream */
+ {BRCM_RATE_9M, 0x02},
+ /* OFDM 12Mbps, code rate 1/2, QPSK, 1 spatial stream */
+ {BRCM_RATE_12M, 0x08},
+ /* OFDM 18Mbps, code rate 3/4, QPSK, 1 spatial stream */
+ {BRCM_RATE_18M, 0x0A},
+ /* OFDM 24Mbps, code rate 1/2, 16-QAM, 1 spatial stream */
+ {BRCM_RATE_24M, 0x10},
+ /* OFDM 36Mbps, code rate 3/4, 16-QAM, 1 spatial stream */
+ {BRCM_RATE_36M, 0x12},
+ /* OFDM 48Mbps, code rate 2/3, 64-QAM, 1 spatial stream */
+ {BRCM_RATE_48M, 0x19},
+ /* OFDM 54Mbps, code rate 3/4, 64-QAM, 1 spatial stream */
+ {BRCM_RATE_54M, 0x1A},
};
/* Hardware rates (also encodes default basic rates) */
@@ -256,12 +255,10 @@ const wlc_rateset_t cck_rates = {
0x00, 0x00, 0x00, 0x00}
};
-static bool wlc_rateset_valid(wlc_rateset_t *rs, bool check_brate);
-
/* check if rateset is valid.
* if check_brate is true, rateset without a basic rate is considered NOT valid.
*/
-static bool wlc_rateset_valid(wlc_rateset_t *rs, bool check_brate)
+static bool brcms_c_rateset_valid(wlc_rateset_t *rs, bool check_brate)
{
uint idx;
@@ -273,13 +270,13 @@ static bool wlc_rateset_valid(wlc_rateset_t *rs, bool check_brate)
/* error if no basic rates */
for (idx = 0; idx < rs->count; idx++) {
- if (rs->rates[idx] & WLC_RATE_FLAG)
+ if (rs->rates[idx] & BRCMS_RATE_FLAG)
return true;
}
return false;
}
-void wlc_rateset_mcs_upd(wlc_rateset_t *rs, u8 txstreams)
+void brcms_c_rateset_mcs_upd(wlc_rateset_t *rs, u8 txstreams)
{
int i;
for (i = txstreams; i < MAX_STREAMS_SUPPORTED; i++)
@@ -290,11 +287,11 @@ void wlc_rateset_mcs_upd(wlc_rateset_t *rs, u8 txstreams)
* and check if resulting rateset is valid.
*/
bool
-wlc_rate_hwrs_filter_sort_validate(wlc_rateset_t *rs,
+brcms_c_rate_hwrs_filter_sort_validate(wlc_rateset_t *rs,
const wlc_rateset_t *hw_rs,
bool check_brate, u8 txstreams)
{
- u8 rateset[WLC_MAXRATE + 1];
+ u8 rateset[BRCM_MAXRATE + 1];
u8 r;
uint count;
uint i;
@@ -303,18 +300,17 @@ wlc_rate_hwrs_filter_sort_validate(wlc_rateset_t *rs,
count = rs->count;
for (i = 0; i < count; i++) {
- /* mask off "basic rate" bit, WLC_RATE_FLAG */
- r = (int)rs->rates[i] & WLC_RATE_MASK;
- if ((r > WLC_MAXRATE) || (rate_info[r] == 0)) {
+ /* mask off "basic rate" bit, BRCMS_RATE_FLAG */
+ r = (int)rs->rates[i] & BRCMS_RATE_MASK;
+ if ((r > BRCM_MAXRATE) || (rate_info[r] == 0))
continue;
- }
rateset[r] = rs->rates[i]; /* preserve basic bit! */
}
/* fill out the rates in order, looking at only supported rates */
count = 0;
for (i = 0; i < hw_rs->count; i++) {
- r = hw_rs->rates[i] & WLC_RATE_MASK;
+ r = hw_rs->rates[i] & BRCMS_RATE_MASK;
if (rateset[r])
rs->rates[count++] = rateset[r];
}
@@ -325,14 +321,14 @@ wlc_rate_hwrs_filter_sort_validate(wlc_rateset_t *rs,
for (i = 0; i < MCSSET_LEN; i++)
rs->mcs[i] = (rs->mcs[i] & hw_rs->mcs[i]);
- if (wlc_rateset_valid(rs, check_brate))
+ if (brcms_c_rateset_valid(rs, check_brate))
return true;
else
return false;
}
/* calculate the rate of a rx'd frame and return it as a ratespec */
-ratespec_t wlc_compute_rspec(d11rxhdr_t *rxh, u8 *plcp)
+ratespec_t brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp)
{
int phy_type;
ratespec_t rspec = PHY_TXC1_BW_20MHZ << RSPEC_BW_SHIFT;
@@ -345,12 +341,13 @@ ratespec_t wlc_compute_rspec(d11rxhdr_t *rxh, u8 *plcp)
switch (rxh->PhyRxStatus_0 & PRXS0_FT_MASK) {
case PRXS0_CCK:
rspec =
- CCK_PHY2MAC_RATE(((cck_phy_hdr_t *) plcp)->signal);
+ CCK_PHY2MAC_RATE(
+ ((struct cck_phy_hdr *) plcp)->signal);
break;
case PRXS0_OFDM:
rspec =
- OFDM_PHY2MAC_RATE(((ofdm_phy_hdr_t *) plcp)->
- rlpt[0]);
+ OFDM_PHY2MAC_RATE(
+ ((struct ofdm_phy_hdr *) plcp)->rlpt[0]);
break;
case PRXS0_PREN:
rspec = (plcp[0] & MIMO_PLCP_MCS_MASK) | RSPEC_MIMORATE;
@@ -370,15 +367,17 @@ ratespec_t wlc_compute_rspec(d11rxhdr_t *rxh, u8 *plcp)
rspec |= RSPEC_SHORT_GI;
} else
if ((phy_type == PHY_TYPE_A) || (rxh->PhyRxStatus_0 & PRXS0_OFDM))
- rspec = OFDM_PHY2MAC_RATE(((ofdm_phy_hdr_t *) plcp)->rlpt[0]);
+ rspec = OFDM_PHY2MAC_RATE(
+ ((struct ofdm_phy_hdr *) plcp)->rlpt[0]);
else
- rspec = CCK_PHY2MAC_RATE(((cck_phy_hdr_t *) plcp)->signal);
+ rspec = CCK_PHY2MAC_RATE(
+ ((struct cck_phy_hdr *) plcp)->signal);
return rspec;
}
/* copy rateset src to dst as-is (no masking or sorting) */
-void wlc_rateset_copy(const wlc_rateset_t *src, wlc_rateset_t *dst)
+void brcms_c_rateset_copy(const wlc_rateset_t *src, wlc_rateset_t *dst)
{
memcpy(dst, src, sizeof(wlc_rateset_t));
}
@@ -393,7 +392,7 @@ void wlc_rateset_copy(const wlc_rateset_t *src, wlc_rateset_t *dst)
* 'xmask' is the copy mask (typically 0x7f or 0xff).
*/
void
-wlc_rateset_filter(wlc_rateset_t *src, wlc_rateset_t *dst, bool basic_only,
+brcms_c_rateset_filter(wlc_rateset_t *src, wlc_rateset_t *dst, bool basic_only,
u8 rates, uint xmask, bool mcsallow)
{
uint i;
@@ -403,28 +402,28 @@ wlc_rateset_filter(wlc_rateset_t *src, wlc_rateset_t *dst, bool basic_only,
count = 0;
for (i = 0; i < src->count; i++) {
r = src->rates[i];
- if (basic_only && !(r & WLC_RATE_FLAG))
+ if (basic_only && !(r & BRCMS_RATE_FLAG))
continue;
- if ((rates == WLC_RATES_CCK) && IS_OFDM((r & WLC_RATE_MASK)))
+ if (rates == BRCMS_RATES_CCK && IS_OFDM((r & BRCMS_RATE_MASK)))
continue;
- if ((rates == WLC_RATES_OFDM) && IS_CCK((r & WLC_RATE_MASK)))
+ if (rates == BRCMS_RATES_OFDM && IS_CCK((r & BRCMS_RATE_MASK)))
continue;
dst->rates[count++] = r & xmask;
}
dst->count = count;
dst->htphy_membership = src->htphy_membership;
- if (mcsallow && rates != WLC_RATES_CCK)
+ if (mcsallow && rates != BRCMS_RATES_CCK)
memcpy(&dst->mcs[0], &src->mcs[0], MCSSET_LEN);
else
- wlc_rateset_mcs_clear(dst);
+ brcms_c_rateset_mcs_clear(dst);
}
/* select rateset for a given phy_type and bandtype and filter it, sort it
* and fill rs_tgt with result
*/
void
-wlc_rateset_default(wlc_rateset_t *rs_tgt, const wlc_rateset_t *rs_hw,
+brcms_c_rateset_default(wlc_rateset_t *rs_tgt, const wlc_rateset_t *rs_hw,
uint phy_type, int bandtype, bool cck_only, uint rate_mask,
bool mcsallow, u8 bw, u8 txstreams)
{
@@ -435,10 +434,10 @@ wlc_rateset_default(wlc_rateset_t *rs_tgt, const wlc_rateset_t *rs_hw,
(PHYTYPE_IS(phy_type, PHY_TYPE_LCN)) ||
(PHYTYPE_IS(phy_type, PHY_TYPE_SSN))) {
if (BAND_5G(bandtype)) {
- rs_dflt = (bw == WLC_20_MHZ ?
+ rs_dflt = (bw == BRCMS_20_MHZ ?
&ofdm_mimo_rates : &ofdm_40bw_mimo_rates);
} else {
- rs_dflt = (bw == WLC_20_MHZ ?
+ rs_dflt = (bw == BRCMS_20_MHZ ?
&cck_ofdm_mimo_rates :
&cck_ofdm_40bw_mimo_rates);
}
@@ -457,16 +456,16 @@ wlc_rateset_default(wlc_rateset_t *rs_tgt, const wlc_rateset_t *rs_hw,
if (!rs_hw)
rs_hw = rs_dflt;
- wlc_rateset_copy(rs_dflt, &rs_sel);
- wlc_rateset_mcs_upd(&rs_sel, txstreams);
- wlc_rateset_filter(&rs_sel, rs_tgt, false,
- cck_only ? WLC_RATES_CCK : WLC_RATES_CCK_OFDM,
+ brcms_c_rateset_copy(rs_dflt, &rs_sel);
+ brcms_c_rateset_mcs_upd(&rs_sel, txstreams);
+ brcms_c_rateset_filter(&rs_sel, rs_tgt, false,
+ cck_only ? BRCMS_RATES_CCK : BRCMS_RATES_CCK_OFDM,
rate_mask, mcsallow);
- wlc_rate_hwrs_filter_sort_validate(rs_tgt, rs_hw, false,
+ brcms_c_rate_hwrs_filter_sort_validate(rs_tgt, rs_hw, false,
mcsallow ? txstreams : 1);
}
-s16 wlc_rate_legacy_phyctl(uint rate)
+s16 brcms_c_rate_legacy_phyctl(uint rate)
{
uint i;
for (i = 0; i < LEGACY_PHYCFG_TABLE_SIZE; i++)
@@ -476,23 +475,23 @@ s16 wlc_rate_legacy_phyctl(uint rate)
return -1;
}
-void wlc_rateset_mcs_clear(wlc_rateset_t *rateset)
+void brcms_c_rateset_mcs_clear(wlc_rateset_t *rateset)
{
uint i;
for (i = 0; i < MCSSET_LEN; i++)
rateset->mcs[i] = 0;
}
-void wlc_rateset_mcs_build(wlc_rateset_t *rateset, u8 txstreams)
+void brcms_c_rateset_mcs_build(wlc_rateset_t *rateset, u8 txstreams)
{
memcpy(&rateset->mcs[0], &cck_ofdm_mimo_rates.mcs[0], MCSSET_LEN);
- wlc_rateset_mcs_upd(rateset, txstreams);
+ brcms_c_rateset_mcs_upd(rateset, txstreams);
}
/* Based on bandwidth passed, allow/disallow MCS 32 in the rateset */
-void wlc_rateset_bw_mcs_filter(wlc_rateset_t *rateset, u8 bw)
+void brcms_c_rateset_bw_mcs_filter(wlc_rateset_t *rateset, u8 bw)
{
- if (bw == WLC_40_MHZ)
+ if (bw == BRCMS_40_MHZ)
setbit(rateset->mcs, 32);
else
clrbit(rateset->mcs, 32);
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_rate.h b/drivers/staging/brcm80211/brcmsmac/rate.h
index 5575e83bdc6..dbfd3e5816d 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_rate.h
+++ b/drivers/staging/brcm80211/brcmsmac/rate.h
@@ -14,31 +14,33 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _WLC_RATE_H_
-#define _WLC_RATE_H_
+#ifndef _BRCM_RATE_H_
+#define _BRCM_RATE_H_
+
+#include "types.h"
extern const u8 rate_info[];
-extern const struct wlc_rateset cck_ofdm_mimo_rates;
-extern const struct wlc_rateset ofdm_mimo_rates;
-extern const struct wlc_rateset cck_ofdm_rates;
-extern const struct wlc_rateset ofdm_rates;
-extern const struct wlc_rateset cck_rates;
-extern const struct wlc_rateset gphy_legacy_rates;
-extern const struct wlc_rateset wlc_lrs_rates;
-extern const struct wlc_rateset rate_limit_1_2;
-
-typedef struct mcs_info {
+extern const struct brcms_rateset cck_ofdm_mimo_rates;
+extern const struct brcms_rateset ofdm_mimo_rates;
+extern const struct brcms_rateset cck_ofdm_rates;
+extern const struct brcms_rateset ofdm_rates;
+extern const struct brcms_rateset cck_rates;
+extern const struct brcms_rateset gphy_legacy_rates;
+extern const struct brcms_rateset wlc_lrs_rates;
+extern const struct brcms_rateset rate_limit_1_2;
+
+struct brcms_mcs_info {
u32 phy_rate_20; /* phy rate in kbps [20Mhz] */
u32 phy_rate_40; /* phy rate in kbps [40Mhz] */
u32 phy_rate_20_sgi; /* phy rate in kbps [20Mhz] with SGI */
u32 phy_rate_40_sgi; /* phy rate in kbps [40Mhz] with SGI */
u8 tx_phy_ctl3; /* phy ctl byte 3, code rate, modulation type, # of streams */
u8 leg_ofdm; /* matching legacy ofdm rate in 500bkps */
-} mcs_info_t;
+};
-#define WLC_MAXMCS 32 /* max valid mcs index */
+#define BRCMS_MAXMCS 32 /* max valid mcs index */
#define MCS_TABLE_SIZE 33 /* Number of mcs entries in the table */
-extern const mcs_info_t mcs_table[];
+extern const struct brcms_mcs_info mcs_table[];
#define MCS_INVALID 0xFF
#define MCS_CR_MASK 0x07 /* Code Rate bit mask */
@@ -55,14 +57,14 @@ extern const mcs_info_t mcs_table[];
#define VALID_MCS(_mcs) ((_mcs < MCS_TABLE_SIZE))
/* Macro to use the rate_info table */
-#define WLC_RATE_MASK_FULL 0xff /* Rate value mask with basic rate flag */
+#define BRCMS_RATE_MASK_FULL 0xff /* Rate value mask with basic rate flag */
-#define WLC_RATE_500K_TO_BPS(rate) ((rate) * 500000) /* convert 500kbps to bps */
+/* convert 500kbps to bps */
+#define BRCMS_RATE_500K_TO_BPS(rate) ((rate) * 500000)
/* rate spec : holds rate and mode specific information required to generate a tx frame. */
/* Legacy CCK and OFDM information is held in the same manner as was done in the past */
/* (in the lower byte) the upper 3 bytes primarily hold MIMO specific information */
-typedef u32 ratespec_t;
/* rate spec bit fields */
#define RSPEC_RATE_MASK 0x0000007F /* Either 500Kbps units or MIMO MCS idx */
@@ -80,10 +82,10 @@ typedef u32 ratespec_t;
#define RSPEC_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
#define RSPEC_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicates override rate only */
-#define WLC_HTPHY 127 /* HT PHY Membership */
+#define BRCMS_HTPHY 127 /* HT PHY Membership */
#define RSPEC_ACTIVE(rspec) (rspec & (RSPEC_RATE_MASK | RSPEC_MIMORATE))
-#define RSPEC2RATE(rspec) ((rspec & RSPEC_MIMORATE) ? \
+#define RSPEC2RATE(rspec) ((rspec & RSPEC_MIMORATE) ? \
MCS_RATE((rspec & RSPEC_RATE_MASK), RSPEC_IS40MHZ(rspec), RSPEC_ISSGI(rspec)) : \
(rspec & RSPEC_RATE_MASK))
/* return rate in unit of 500Kbps -- for internal use in wlc_rate_sel.c */
@@ -110,13 +112,14 @@ typedef u32 ratespec_t;
#define PLCP3_STC_SHIFT 4
/* Rate info table; takes a legacy rate or ratespec_t */
-#define IS_MCS(r) (r & RSPEC_MIMORATE)
-#define IS_OFDM(r) (!IS_MCS(r) && (rate_info[(r) & RSPEC_RATE_MASK] & WLC_RATE_FLAG))
+#define IS_MCS(r) (r & RSPEC_MIMORATE)
+#define IS_OFDM(r) (!IS_MCS(r) && (rate_info[(r) & RSPEC_RATE_MASK] & \
+ BRCMS_RATE_FLAG))
#define IS_CCK(r) (!IS_MCS(r) && ( \
- ((r) & WLC_RATE_MASK) == WLC_RATE_1M || \
- ((r) & WLC_RATE_MASK) == WLC_RATE_2M || \
- ((r) & WLC_RATE_MASK) == WLC_RATE_5M5 || \
- ((r) & WLC_RATE_MASK) == WLC_RATE_11M))
+ ((r) & BRCMS_RATE_MASK) == BRCM_RATE_1M || \
+ ((r) & BRCMS_RATE_MASK) == BRCM_RATE_2M || \
+ ((r) & BRCMS_RATE_MASK) == BRCM_RATE_5M5 || \
+ ((r) & BRCMS_RATE_MASK) == BRCM_RATE_11M))
#define IS_SINGLE_STREAM(mcs) (((mcs) <= HIGHEST_SINGLE_STREAM_MCS) || ((mcs) == 32))
#define CCK_RSPEC(cck) ((cck) & RSPEC_RATE_MASK)
#define OFDM_RSPEC(ofdm) (((ofdm) & RSPEC_RATE_MASK) |\
@@ -132,38 +135,39 @@ extern const u8 ofdm_rate_lookup[];
#define OFDM_PHY2MAC_RATE(rlpt) (ofdm_rate_lookup[rlpt & 0x7])
#define CCK_PHY2MAC_RATE(signal) (signal/5)
-/* Rates specified in wlc_rateset_filter() */
-#define WLC_RATES_CCK_OFDM 0
-#define WLC_RATES_CCK 1
-#define WLC_RATES_OFDM 2
-
-/* use the stuct form instead of typedef to fix dependency problems */
-struct wlc_rateset;
+/* Rates specified in brcms_c_rateset_filter() */
+#define BRCMS_RATES_CCK_OFDM 0
+#define BRCMS_RATES_CCK 1
+#define BRCMS_RATES_OFDM 2
/* sanitize, and sort a rateset with the basic bit(s) preserved, validate rateset */
-extern bool wlc_rate_hwrs_filter_sort_validate(struct wlc_rateset *rs,
- const struct wlc_rateset *hw_rs,
- bool check_brate,
- u8 txstreams);
+extern bool
+brcms_c_rate_hwrs_filter_sort_validate(struct brcms_rateset *rs,
+ const struct brcms_rateset *hw_rs,
+ bool check_brate, u8 txstreams);
/* copy rateset src to dst as-is (no masking or sorting) */
-extern void wlc_rateset_copy(const struct wlc_rateset *src,
- struct wlc_rateset *dst);
+extern void brcms_c_rateset_copy(const struct brcms_rateset *src,
+ struct brcms_rateset *dst);
/* would be nice to have these documented ... */
-extern ratespec_t wlc_compute_rspec(d11rxhdr_t *rxh, u8 *plcp);
-
-extern void wlc_rateset_filter(struct wlc_rateset *src, struct wlc_rateset *dst,
- bool basic_only, u8 rates, uint xmask,
- bool mcsallow);
-extern void wlc_rateset_default(struct wlc_rateset *rs_tgt,
- const struct wlc_rateset *rs_hw, uint phy_type,
- int bandtype, bool cck_only, uint rate_mask,
- bool mcsallow, u8 bw, u8 txstreams);
-extern s16 wlc_rate_legacy_phyctl(uint rate);
-
-extern void wlc_rateset_mcs_upd(struct wlc_rateset *rs, u8 txstreams);
-extern void wlc_rateset_mcs_clear(struct wlc_rateset *rateset);
-extern void wlc_rateset_mcs_build(struct wlc_rateset *rateset, u8 txstreams);
-extern void wlc_rateset_bw_mcs_filter(struct wlc_rateset *rateset, u8 bw);
-
-#endif /* _WLC_RATE_H_ */
+extern ratespec_t brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
+
+extern void brcms_c_rateset_filter(struct brcms_rateset *src,
+ struct brcms_rateset *dst, bool basic_only, u8 rates, uint xmask,
+ bool mcsallow);
+
+extern void
+brcms_c_rateset_default(struct brcms_rateset *rs_tgt,
+ const struct brcms_rateset *rs_hw, uint phy_type,
+ int bandtype, bool cck_only, uint rate_mask,
+ bool mcsallow, u8 bw, u8 txstreams);
+
+extern s16 brcms_c_rate_legacy_phyctl(uint rate);
+
+extern void brcms_c_rateset_mcs_upd(struct brcms_rateset *rs, u8 txstreams);
+extern void brcms_c_rateset_mcs_clear(struct brcms_rateset *rateset);
+extern void brcms_c_rateset_mcs_build(struct brcms_rateset *rateset,
+ u8 txstreams);
+extern void brcms_c_rateset_bw_mcs_filter(struct brcms_rateset *rateset, u8 bw);
+
+#endif /* _BRCM_RATE_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_scb.h b/drivers/staging/brcm80211/brcmsmac/scb.h
index f07a891d5d2..d6c8328554d 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_scb.h
+++ b/drivers/staging/brcm80211/brcmsmac/scb.h
@@ -14,39 +14,44 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _wlc_scb_h_
-#define _wlc_scb_h_
+#ifndef _BRCM_SCB_H_
+#define _BRCM_SCB_H_
+
+#include <linux/if_ether.h>
+#include <brcmu_utils.h>
+#include <defs.h>
+#include "types.h"
#define AMPDU_TX_BA_MAX_WSIZE 64 /* max Tx ba window size (in pdu) */
/* structure to store per-tid state for the ampdu initiator */
-typedef struct scb_ampdu_tid_ini {
- u32 magic;
+struct scb_ampdu_tid_ini {
u8 tx_in_transit; /* number of pending mpdus in transit in driver */
u8 tid; /* initiator tid for easy lookup */
u8 txretry[AMPDU_TX_BA_MAX_WSIZE]; /* tx retry count; indexed by seq modulo */
struct scb *scb; /* backptr for easy lookup */
-} scb_ampdu_tid_ini_t;
+ u8 ba_wsize; /* negotiated ba window size (in pdu) */
+};
#define AMPDU_MAX_SCB_TID NUMPRIO
-typedef struct scb_ampdu {
+struct scb_ampdu {
struct scb *scb; /* back pointer for easy reference */
u8 mpdu_density; /* mpdu density */
u8 max_pdu; /* max pdus allowed in ampdu */
u8 release; /* # of mpdus released at a time */
u16 min_len; /* min mpdu len to support the density */
- u32 max_rxlen; /* max ampdu rcv length; 8k, 16k, 32k, 64k */
+ u32 max_rx_ampdu_bytes; /* max ampdu rcv length; 8k, 16k, 32k, 64k */
struct pktq txq; /* sdu transmit queue pending aggregation */
/* This could easily be a ini[] pointer and we keep this info in wl itself instead
* of having mac80211 hold it for us. Also could be made dynamic per tid instead of
* static.
*/
- scb_ampdu_tid_ini_t ini[AMPDU_MAX_SCB_TID]; /* initiator info - per tid (NUMPRIO) */
-} scb_ampdu_t;
+ /* initiator info - per tid (NUMPRIO): */
+ struct scb_ampdu_tid_ini ini[AMPDU_MAX_SCB_TID];
+};
-#define SCB_MAGIC 0xbeefcafe
-#define INI_MAGIC 0xabcd1234
+#define SCB_MAGIC 0xbeefcafe
/* station control block - one per remote MAC address */
struct scb {
@@ -64,7 +69,7 @@ struct scb {
*/
u16 seqnum[NUMPRIO]; /* WME: driver maintained sw seqnum per priority */
- scb_ampdu_t scb_ampdu; /* AMPDU state including per tid info */
+ struct scb_ampdu scb_ampdu; /* AMPDU state including per tid info */
};
/* scb flags */
@@ -77,4 +82,4 @@ struct scb {
#define SCB_PS(a) NULL
#define SCB_STBC_CAP(a) ((a)->flags & SCB_STBCCAP)
#define SCB_AMPDU(a) true
-#endif /* _wlc_scb_h_ */
+#endif /* _BRCM_SCB_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/bcmsrom_tbl.h b/drivers/staging/brcm80211/brcmsmac/srom.c
index f4b3e61dc37..f39442ed4ce 100644
--- a/drivers/staging/brcm80211/brcmsmac/bcmsrom_tbl.h
+++ b/drivers/staging/brcm80211/brcmsmac/srom.c
@@ -14,19 +14,341 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _bcmsrom_tbl_h_
-#define _bcmsrom_tbl_h_
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/etherdevice.h>
+#include <stdarg.h>
-#include "wlioctl.h"
+#include <chipcommon.h>
+#include <brcmu_utils.h>
+#include "nicpci.h"
+#include "aiutils.h"
+#include "otp.h"
+#include "srom.h"
-typedef struct {
- const char *name;
- u32 revmask;
- u32 flags;
- u16 off;
- u16 mask;
-} sromvar_t;
+#define SROM_OFFSET(sih) ((sih->ccrev > 31) ? \
+ (((sih->cccaps & CC_CAP_SROM) == 0) ? NULL : \
+ ((u8 *)curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP)) : \
+ ((u8 *)curmap + PCI_BAR0_SPROM_OFFSET))
+
+#if defined(BCMDBG)
+#define WRITE_ENABLE_DELAY 500 /* 500 ms after write enable/disable toggle */
+#define WRITE_WORD_DELAY 20 /* 20 ms between each word write */
+#endif
+
+/* Maximum srom: 6 Kilobits == 768 bytes */
+#define SROM_MAX 768
+
+/* PCI fields */
+#define PCI_F0DEVID 48
+
+#define SROM_WORDS 64
+
+#define SROM_SSID 2
+
+#define SROM_WL1LHMAXP 29
+
+#define SROM_WL1LPAB0 30
+#define SROM_WL1LPAB1 31
+#define SROM_WL1LPAB2 32
+
+#define SROM_WL1HPAB0 33
+#define SROM_WL1HPAB1 34
+#define SROM_WL1HPAB2 35
+
+#define SROM_MACHI_IL0 36
+#define SROM_MACMID_IL0 37
+#define SROM_MACLO_IL0 38
+#define SROM_MACHI_ET1 42
+#define SROM_MACMID_ET1 43
+#define SROM_MACLO_ET1 44
+#define SROM3_MACHI 37
+#define SROM3_MACMID 38
+#define SROM3_MACLO 39
+
+#define SROM_BXARSSI2G 40
+#define SROM_BXARSSI5G 41
+
+#define SROM_TRI52G 42
+#define SROM_TRI5GHL 43
+
+#define SROM_RXPO52G 45
+
+#define SROM_AABREV 46
+/* Fields in AABREV */
+#define SROM_BR_MASK 0x00ff
+#define SROM_CC_MASK 0x0f00
+#define SROM_CC_SHIFT 8
+#define SROM_AA0_MASK 0x3000
+#define SROM_AA0_SHIFT 12
+#define SROM_AA1_MASK 0xc000
+#define SROM_AA1_SHIFT 14
+
+#define SROM_WL0PAB0 47
+#define SROM_WL0PAB1 48
+#define SROM_WL0PAB2 49
+
+#define SROM_LEDBH10 50
+#define SROM_LEDBH32 51
+
+#define SROM_WL10MAXP 52
+
+#define SROM_WL1PAB0 53
+#define SROM_WL1PAB1 54
+#define SROM_WL1PAB2 55
+
+#define SROM_ITT 56
+
+#define SROM_BFL 57
+#define SROM_BFL2 28
+#define SROM3_BFL2 61
+
+#define SROM_AG10 58
+
+#define SROM_CCODE 59
+
+#define SROM_OPO 60
+
+#define SROM3_LEDDC 62
+
+#define SROM_CRCREV 63
+
+/* SROM Rev 4: Reallocate the software part of the srom to accommodate
+ * MIMO features. It assumes up to two PCIE functions and 440 bytes
+ * of usable srom i.e. the usable storage in chips with OTP that
+ * implements hardware redundancy.
+ */
+
+#define SROM4_WORDS 220
+
+#define SROM4_SIGN 32
+#define SROM4_SIGNATURE 0x5372
+
+#define SROM4_BREV 33
+
+#define SROM4_BFL0 34
+#define SROM4_BFL1 35
+#define SROM4_BFL2 36
+#define SROM4_BFL3 37
+#define SROM5_BFL0 37
+#define SROM5_BFL1 38
+#define SROM5_BFL2 39
+#define SROM5_BFL3 40
+
+#define SROM4_MACHI 38
+#define SROM4_MACMID 39
+#define SROM4_MACLO 40
+#define SROM5_MACHI 41
+#define SROM5_MACMID 42
+#define SROM5_MACLO 43
+
+#define SROM4_CCODE 41
+#define SROM4_REGREV 42
+#define SROM5_CCODE 34
+#define SROM5_REGREV 35
+
+#define SROM4_LEDBH10 43
+#define SROM4_LEDBH32 44
+#define SROM5_LEDBH10 59
+#define SROM5_LEDBH32 60
+
+#define SROM4_LEDDC 45
+#define SROM5_LEDDC 45
+
+#define SROM4_AA 46
+
+#define SROM4_AG10 47
+#define SROM4_AG32 48
+#define SROM4_TXPID2G 49
+#define SROM4_TXPID5G 51
+#define SROM4_TXPID5GL 53
+#define SROM4_TXPID5GH 55
+
+#define SROM4_TXRXC 61
+#define SROM4_TXCHAIN_MASK 0x000f
+#define SROM4_TXCHAIN_SHIFT 0
+#define SROM4_RXCHAIN_MASK 0x00f0
+#define SROM4_RXCHAIN_SHIFT 4
+#define SROM4_SWITCH_MASK 0xff00
+#define SROM4_SWITCH_SHIFT 8
+
+/* Per-path fields */
+#define MAX_PATH_SROM 4
+#define SROM4_PATH0 64
+#define SROM4_PATH1 87
+#define SROM4_PATH2 110
+#define SROM4_PATH3 133
+
+#define SROM4_2G_ITT_MAXP 0
+#define SROM4_2G_PA 1
+#define SROM4_5G_ITT_MAXP 5
+#define SROM4_5GLH_MAXP 6
+#define SROM4_5G_PA 7
+#define SROM4_5GL_PA 11
+#define SROM4_5GH_PA 15
+
+/* All the miriad power offsets */
+#define SROM4_2G_CCKPO 156
+#define SROM4_2G_OFDMPO 157
+#define SROM4_5G_OFDMPO 159
+#define SROM4_5GL_OFDMPO 161
+#define SROM4_5GH_OFDMPO 163
+#define SROM4_2G_MCSPO 165
+#define SROM4_5G_MCSPO 173
+#define SROM4_5GL_MCSPO 181
+#define SROM4_5GH_MCSPO 189
+#define SROM4_CDDPO 197
+#define SROM4_STBCPO 198
+#define SROM4_BW40PO 199
+#define SROM4_BWDUPPO 200
+
+#define SROM4_CRCREV 219
+
+/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
+ * This is acombined srom for both MIMO and SISO boards, usable in
+ * the .130 4Kilobit OTP with hardware redundancy.
+ */
+#define SROM8_BREV 65
+
+#define SROM8_BFL0 66
+#define SROM8_BFL1 67
+#define SROM8_BFL2 68
+#define SROM8_BFL3 69
+
+#define SROM8_MACHI 70
+#define SROM8_MACMID 71
+#define SROM8_MACLO 72
+
+#define SROM8_CCODE 73
+#define SROM8_REGREV 74
+
+#define SROM8_LEDBH10 75
+#define SROM8_LEDBH32 76
+
+#define SROM8_LEDDC 77
+
+#define SROM8_AA 78
+
+#define SROM8_AG10 79
+#define SROM8_AG32 80
+
+#define SROM8_TXRXC 81
+
+#define SROM8_BXARSSI2G 82
+#define SROM8_BXARSSI5G 83
+#define SROM8_TRI52G 84
+#define SROM8_TRI5GHL 85
+#define SROM8_RXPO52G 86
+
+#define SROM8_FEM2G 87
+#define SROM8_FEM5G 88
+#define SROM8_FEM_ANTSWLUT_MASK 0xf800
+#define SROM8_FEM_ANTSWLUT_SHIFT 11
+#define SROM8_FEM_TR_ISO_MASK 0x0700
+#define SROM8_FEM_TR_ISO_SHIFT 8
+#define SROM8_FEM_PDET_RANGE_MASK 0x00f8
+#define SROM8_FEM_PDET_RANGE_SHIFT 3
+#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006
+#define SROM8_FEM_EXTPA_GAIN_SHIFT 1
+#define SROM8_FEM_TSSIPOS_MASK 0x0001
+#define SROM8_FEM_TSSIPOS_SHIFT 0
+
+#define SROM8_THERMAL 89
+
+/* Temp sense related entries */
+#define SROM8_MPWR_RAWTS 90
+#define SROM8_TS_SLP_OPT_CORRX 91
+/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
+#define SROM8_FOC_HWIQ_IQSWP 92
+
+/* Temperature delta for PHY calibration */
+#define SROM8_PHYCAL_TEMPDELTA 93
+
+/* Per-path offsets & fields */
+#define SROM8_PATH0 96
+#define SROM8_PATH1 112
+#define SROM8_PATH2 128
+#define SROM8_PATH3 144
+
+#define SROM8_2G_ITT_MAXP 0
+#define SROM8_2G_PA 1
+#define SROM8_5G_ITT_MAXP 4
+#define SROM8_5GLH_MAXP 5
+#define SROM8_5G_PA 6
+#define SROM8_5GL_PA 9
+#define SROM8_5GH_PA 12
+
+/* All the miriad power offsets */
+#define SROM8_2G_CCKPO 160
+
+#define SROM8_2G_OFDMPO 161
+#define SROM8_5G_OFDMPO 163
+#define SROM8_5GL_OFDMPO 165
+#define SROM8_5GH_OFDMPO 167
+
+#define SROM8_2G_MCSPO 169
+#define SROM8_5G_MCSPO 177
+#define SROM8_5GL_MCSPO 185
+#define SROM8_5GH_MCSPO 193
+
+#define SROM8_CDDPO 201
+#define SROM8_STBCPO 202
+#define SROM8_BW40PO 203
+#define SROM8_BWDUPPO 204
+
+/* SISO PA parameters are in the path0 spaces */
+#define SROM8_SISO 96
+
+/* Legacy names for SISO PA paramters */
+#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP)
+#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA)
+#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1)
+#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2)
+#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP)
+#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP)
+#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA)
+#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1)
+#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2)
+#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA)
+#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1)
+#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2)
+#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA)
+#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1)
+#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2)
+
+/* SROM REV 9 */
+#define SROM9_2GPO_CCKBW20 160
+#define SROM9_2GPO_CCKBW20UL 161
+#define SROM9_2GPO_LOFDMBW20 162
+#define SROM9_2GPO_LOFDMBW20UL 164
+
+#define SROM9_5GLPO_LOFDMBW20 166
+#define SROM9_5GLPO_LOFDMBW20UL 168
+#define SROM9_5GMPO_LOFDMBW20 170
+#define SROM9_5GMPO_LOFDMBW20UL 172
+#define SROM9_5GHPO_LOFDMBW20 174
+#define SROM9_5GHPO_LOFDMBW20UL 176
+
+#define SROM9_2GPO_MCSBW20 178
+#define SROM9_2GPO_MCSBW20UL 180
+#define SROM9_2GPO_MCSBW40 182
+
+#define SROM9_5GLPO_MCSBW20 184
+#define SROM9_5GLPO_MCSBW20UL 186
+#define SROM9_5GLPO_MCSBW40 188
+#define SROM9_5GMPO_MCSBW20 190
+#define SROM9_5GMPO_MCSBW20UL 192
+#define SROM9_5GMPO_MCSBW40 194
+#define SROM9_5GHPO_MCSBW20 196
+#define SROM9_5GHPO_MCSBW20UL 198
+#define SROM9_5GHPO_MCSBW40 200
+
+#define SROM9_PO_MCS32 202
+#define SROM9_PO_LOFDM40DUP 203
+
+/* SROM flags (see sromvar_t) */
#define SRFL_MORE 1 /* value continues as described by the next entry */
#define SRFL_NOFFS 2 /* value bits can't be all one's */
#define SRFL_PRHEX 4 /* value is in hexdecimal format */
@@ -36,6 +358,23 @@ typedef struct {
#define SRFL_LEDDC 0x40 /* value is an LED duty cycle */
#define SRFL_NOVAR 0x80 /* do not generate a nvram param, entry is for mfgc */
+/* Max. nvram variable table size */
+#define MAXSZ_NVRAM_VARS 4096
+
+struct brcms_sromvar {
+ const char *name;
+ u32 revmask;
+ u32 flags;
+ u16 off;
+ u16 mask;
+};
+
+struct brcms_varbuf {
+ char *base; /* pointer to buffer base */
+ char *buf; /* pointer to current position */
+ unsigned int size; /* current (residual) size in bytes */
+};
+
/* Assumptions:
* - Ethernet address spans across 3 consective words
*
@@ -48,8 +387,7 @@ typedef struct {
* - The last entry's name field must be NULL to indicate the end of the table. Other
* entries must have non-NULL name.
*/
-
-static const sromvar_t pci_sromvars[] = {
+static const struct brcms_sromvar pci_sromvars[] = {
{"devid", 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID, 0xffff},
{"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK},
{"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
@@ -393,7 +731,7 @@ static const sromvar_t pci_sromvars[] = {
{NULL, 0, 0, 0, 0}
};
-static const sromvar_t perpath_pci_sromvars[] = {
+static const struct brcms_sromvar perpath_pci_sromvars[] = {
{"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
{"itt2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
{"itt5ga", 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
@@ -437,77 +775,463 @@ static const sromvar_t perpath_pci_sromvars[] = {
{NULL, 0, 0, 0, 0}
};
-#if !(defined(PHY_TYPE_N) && defined(PHY_TYPE_LP))
-#define PHY_TYPE_N 4 /* N-Phy value */
-#define PHY_TYPE_LP 5 /* LP-Phy value */
-#endif /* !(defined(PHY_TYPE_N) && defined(PHY_TYPE_LP)) */
-#if !defined(PHY_TYPE_NULL)
-#define PHY_TYPE_NULL 0xf /* Invalid Phy value */
-#endif /* !defined(PHY_TYPE_NULL) */
-
-typedef struct {
- u16 phy_type;
- u16 bandrange;
- u16 chain;
- const char *vars;
-} pavars_t;
-
-static const pavars_t pavars[] = {
- /* NPHY */
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL, 0,
- "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL, 1,
- "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH, 0,
- "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH, 1,
- "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
- /* LPPHY */
- {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_2G, 0, "pa0b0 pa0b1 pa0b2"},
- {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GL, 0, "pa1lob0 pa1lob1 pa1lob2"},
- {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GM, 0, "pa1b0 pa1b1 pa1b2"},
- {PHY_TYPE_LP, WL_CHAN_FREQ_RANGE_5GH, 0, "pa1hib0 pa1hib1 pa1hib2"},
- {PHY_TYPE_NULL, 0, 0, ""}
-};
+static void _initvars_srom_pci(u8 sromrev, u16 *srom, uint off,
+ struct brcms_varbuf *b);
+static int initvars_srom_pci(struct si_pub *sih, void *curmap, char **vars,
+ uint *count);
+static int sprom_read_pci(struct si_pub *sih, u16 *sprom,
+ uint wordoff, u16 *buf, uint nwords, bool check_crc);
+#if defined(BCMNVRAMR)
+static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz);
+#endif
-typedef struct {
- u16 phy_type;
- u16 bandrange;
- const char *vars;
-} povars_t;
-
-static const povars_t povars[] = {
- /* NPHY */
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G,
- "mcs2gpo0 mcs2gpo1 mcs2gpo2 mcs2gpo3 "
- "mcs2gpo4 mcs2gpo5 mcs2gpo6 mcs2gpo7"},
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GL,
- "mcs5glpo0 mcs5glpo1 mcs5glpo2 mcs5glpo3 "
- "mcs5glpo4 mcs5glpo5 mcs5glpo6 mcs5glpo7"},
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GM,
- "mcs5gpo0 mcs5gpo1 mcs5gpo2 mcs5gpo3 "
- "mcs5gpo4 mcs5gpo5 mcs5gpo6 mcs5gpo7"},
- {PHY_TYPE_N, WL_CHAN_FREQ_RANGE_5GH,
- "mcs5ghpo0 mcs5ghpo1 mcs5ghpo2 mcs5ghpo3 "
- "mcs5ghpo4 mcs5ghpo5 mcs5ghpo6 mcs5ghpo7"},
- {PHY_TYPE_NULL, 0, ""}
-};
+static int initvars_table(char *start, char *end,
+ char **vars, uint *count);
+
+/* Initialization of varbuf structure */
+static void varbuf_init(struct brcms_varbuf *b, char *buf, uint size)
+{
+ b->size = size;
+ b->base = b->buf = buf;
+}
+
+/* append a null terminated var=value string */
+static int varbuf_append(struct brcms_varbuf *b, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+ size_t len;
+ char *s;
+
+ if (b->size < 2)
+ return 0;
+
+ va_start(ap, fmt);
+ r = vsnprintf(b->buf, b->size, fmt, ap);
+ va_end(ap);
+
+ /* C99 snprintf behavior returns r >= size on overflow,
+ * others return -1 on overflow.
+ * All return -1 on format error.
+ * We need to leave room for 2 null terminations, one for the current var
+ * string, and one for final null of the var table. So check that the
+ * strlen written, r, leaves room for 2 chars.
+ */
+ if ((r == -1) || (r > (int)(b->size - 2))) {
+ b->size = 0;
+ return 0;
+ }
+
+ /* Remove any earlier occurrence of the same variable */
+ s = strchr(b->buf, '=');
+ if (s != NULL) {
+ len = (size_t) (s - b->buf);
+ for (s = b->base; s < b->buf;) {
+ if ((memcmp(s, b->buf, len) == 0) && s[len] == '=') {
+ len = strlen(s) + 1;
+ memmove(s, (s + len),
+ ((b->buf + r + 1) - (s + len)));
+ b->buf -= len;
+ b->size += (unsigned int)len;
+ break;
+ }
+
+ while (*s++)
+ ;
+ }
+ }
+
+ /* skip over this string's null termination */
+ r++;
+ b->size -= r;
+ b->buf += r;
+
+ return r;
+}
+
+/*
+ * Initialize local vars from the right source for this platform.
+ * Return 0 on success, nonzero on error.
+ */
+int srom_var_init(struct si_pub *sih, uint bustype, void *curmap,
+ char **vars, uint *count)
+{
+ uint len;
+
+ len = 0;
+
+ if (vars == NULL || count == NULL)
+ return 0;
+
+ *vars = NULL;
+ *count = 0;
+
+ if (curmap != NULL && bustype == PCI_BUS)
+ return initvars_srom_pci(sih, curmap, vars, count);
+
+ return -EINVAL;
+}
+
+static inline void ltoh16_buf(u16 *buf, unsigned int size)
+{
+ for (size /= 2; size; size--)
+ *(buf + size) = le16_to_cpu(*(buf + size));
+}
+
+static inline void htol16_buf(u16 *buf, unsigned int size)
+{
+ for (size /= 2; size; size--)
+ *(buf + size) = cpu_to_le16(*(buf + size));
+}
+
+/*
+ * Read in and validate sprom.
+ * Return 0 on success, nonzero on error.
+ */
+static int
+sprom_read_pci(struct si_pub *sih, u16 *sprom, uint wordoff,
+ u16 *buf, uint nwords, bool check_crc)
+{
+ int err = 0;
+ uint i;
+
+ /* read the sprom */
+ for (i = 0; i < nwords; i++)
+ buf[i] = R_REG(&sprom[wordoff + i]);
+
+ if (check_crc) {
+
+ if (buf[0] == 0xffff) {
+ /* The hardware thinks that an srom that starts with 0xffff
+ * is blank, regardless of the rest of the content, so declare
+ * it bad.
+ */
+ return -ENODATA;
+ }
+
+ /* fixup the endianness so crc8 will pass */
+ htol16_buf(buf, nwords * 2);
+ if (brcmu_crc8((u8 *) buf, nwords * 2, CRC8_INIT_VALUE) !=
+ CRC8_GOOD_VALUE) {
+ /* DBG only pci always read srom4 first, then srom8/9 */
+ err = -EIO;
+ }
+ /* now correct the endianness of the byte array */
+ ltoh16_buf(buf, nwords * 2);
+ }
+ return err;
+}
+
+#if defined(BCMNVRAMR)
+static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
+{
+ u8 *otp;
+ uint sz = OTP_SZ_MAX / 2; /* size in words */
+ int err = 0;
+
+ otp = kzalloc(OTP_SZ_MAX, GFP_ATOMIC);
+ if (otp == NULL) {
+ return -ENOMEM;
+ }
+
+ err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz);
+
+ memcpy(buf, otp, bufsz);
+
+ kfree(otp);
+
+ /* Check CRC */
+ if (buf[0] == 0xffff) {
+ /* The hardware thinks that an srom that starts with 0xffff
+ * is blank, regardless of the rest of the content, so declare
+ * it bad.
+ */
+ return -ENODATA;
+ }
+
+ /* fixup the endianness so crc8 will pass */
+ htol16_buf(buf, bufsz);
+ if (brcmu_crc8((u8 *) buf, SROM4_WORDS * 2, CRC8_INIT_VALUE) !=
+ CRC8_GOOD_VALUE) {
+ err = -EIO;
+ }
+ /* now correct the endianness of the byte array */
+ ltoh16_buf(buf, bufsz);
+
+ return err;
+}
+#endif /* defined(BCMNVRAMR) */
+/*
+* Create variable table from memory.
+* Return 0 on success, nonzero on error.
+*/
+static int initvars_table(char *start, char *end,
+ char **vars, uint *count)
+{
+ int c = (int)(end - start);
+
+ /* do it only when there is more than just the null string */
+ if (c > 1) {
+ char *vp = kmalloc(c, GFP_ATOMIC);
+ if (!vp)
+ return -ENOMEM;
+ memcpy(vp, start, c);
+ *vars = vp;
+ *count = c;
+ } else {
+ *vars = NULL;
+ *count = 0;
+ }
+
+ return 0;
+}
+
+/* Parse SROM and create name=value pairs. 'srom' points to
+ * the SROM word array. 'off' specifies the offset of the
+ * first word 'srom' points to, which should be either 0 or
+ * SROM3_SWRG_OFF (full SROM or software region).
+ */
+
+static uint mask_shift(u16 mask)
+{
+ uint i;
+ for (i = 0; i < (sizeof(mask) << 3); i++) {
+ if (mask & (1 << i))
+ return i;
+ }
+ return 0;
+}
+
+static uint mask_width(u16 mask)
+{
+ int i;
+ for (i = (sizeof(mask) << 3) - 1; i >= 0; i--) {
+ if (mask & (1 << i))
+ return (uint) (i - mask_shift(mask) + 1);
+ }
+ return 0;
+}
+
+static void
+_initvars_srom_pci(u8 sromrev, u16 *srom, uint off, struct brcms_varbuf *b)
+{
+ u16 w;
+ u32 val;
+ const struct brcms_sromvar *srv;
+ uint width;
+ uint flags;
+ u32 sr = (1 << sromrev);
+
+ varbuf_append(b, "sromrev=%d", sromrev);
+
+ for (srv = pci_sromvars; srv->name != NULL; srv++) {
+ const char *name;
+
+ if ((srv->revmask & sr) == 0)
+ continue;
+
+ if (srv->off < off)
+ continue;
+
+ flags = srv->flags;
+ name = srv->name;
+
+ /* This entry is for mfgc only. Don't generate param for it, */
+ if (flags & SRFL_NOVAR)
+ continue;
+
+ if (flags & SRFL_ETHADDR) {
+ u8 ea[ETH_ALEN];
+
+ ea[0] = (srom[srv->off - off] >> 8) & 0xff;
+ ea[1] = srom[srv->off - off] & 0xff;
+ ea[2] = (srom[srv->off + 1 - off] >> 8) & 0xff;
+ ea[3] = srom[srv->off + 1 - off] & 0xff;
+ ea[4] = (srom[srv->off + 2 - off] >> 8) & 0xff;
+ ea[5] = srom[srv->off + 2 - off] & 0xff;
+
+ varbuf_append(b, "%s=%pM", name, ea);
+ } else {
+ w = srom[srv->off - off];
+ val = (w & srv->mask) >> mask_shift(srv->mask);
+ width = mask_width(srv->mask);
+
+ while (srv->flags & SRFL_MORE) {
+ srv++;
+ if (srv->off == 0 || srv->off < off)
+ continue;
+
+ w = srom[srv->off - off];
+ val +=
+ ((w & srv->mask) >> mask_shift(srv->
+ mask)) <<
+ width;
+ width += mask_width(srv->mask);
+ }
+
+ if ((flags & SRFL_NOFFS)
+ && ((int)val == (1 << width) - 1))
+ continue;
+
+ if (flags & SRFL_CCODE) {
+ if (val == 0)
+ varbuf_append(b, "ccode=");
+ else
+ varbuf_append(b, "ccode=%c%c",
+ (val >> 8), (val & 0xff));
+ }
+ /* LED Powersave duty cycle has to be scaled:
+ *(oncount >> 24) (offcount >> 8)
+ */
+ else if (flags & SRFL_LEDDC) {
+ u32 w32 = (((val >> 8) & 0xff) << 24) | /* oncount */
+ (((val & 0xff)) << 8); /* offcount */
+ varbuf_append(b, "leddc=%d", w32);
+ } else if (flags & SRFL_PRHEX)
+ varbuf_append(b, "%s=0x%x", name, val);
+ else if ((flags & SRFL_PRSIGN)
+ && (val & (1 << (width - 1))))
+ varbuf_append(b, "%s=%d", name,
+ (int)(val | (~0 << width)));
+ else
+ varbuf_append(b, "%s=%u", name, val);
+ }
+ }
+
+ if (sromrev >= 4) {
+ /* Do per-path variables */
+ uint p, pb, psz;
+
+ if (sromrev >= 8) {
+ pb = SROM8_PATH0;
+ psz = SROM8_PATH1 - SROM8_PATH0;
+ } else {
+ pb = SROM4_PATH0;
+ psz = SROM4_PATH1 - SROM4_PATH0;
+ }
+
+ for (p = 0; p < MAX_PATH_SROM; p++) {
+ for (srv = perpath_pci_sromvars; srv->name != NULL;
+ srv++) {
+ if ((srv->revmask & sr) == 0)
+ continue;
+
+ if (pb + srv->off < off)
+ continue;
+
+ /* This entry is for mfgc only. Don't generate param for it, */
+ if (srv->flags & SRFL_NOVAR)
+ continue;
+
+ w = srom[pb + srv->off - off];
+ val = (w & srv->mask) >> mask_shift(srv->mask);
+ width = mask_width(srv->mask);
+
+ /* Cheating: no per-path var is more than 1 word */
+
+ if ((srv->flags & SRFL_NOFFS)
+ && ((int)val == (1 << width) - 1))
+ continue;
+
+ if (srv->flags & SRFL_PRHEX)
+ varbuf_append(b, "%s%d=0x%x", srv->name,
+ p, val);
+ else
+ varbuf_append(b, "%s%d=%d", srv->name,
+ p, val);
+ }
+ pb += psz;
+ }
+ }
+}
+
+/*
+ * Initialize nonvolatile variable table from sprom.
+ * Return 0 on success, nonzero on error.
+ */
+static int initvars_srom_pci(struct si_pub *sih, void *curmap, char **vars,
+ uint *count)
+{
+ u16 *srom, *sromwindow;
+ u8 sromrev = 0;
+ u32 sr;
+ struct brcms_varbuf b;
+ char *vp, *base = NULL;
+ int err = 0;
+
+ /*
+ * Apply CRC over SROM content regardless SROM is present or not.
+ */
+ srom = kmalloc(SROM_MAX, GFP_ATOMIC);
+ if (!srom)
+ return -ENOMEM;
+
+ sromwindow = (u16 *) SROM_OFFSET(sih);
+ if (ai_is_sprom_available(sih)) {
+ err = sprom_read_pci(sih, sromwindow, 0, srom, SROM_WORDS,
+ true);
+
+ if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) ||
+ (((sih->buscoretype == PCIE_CORE_ID)
+ && (sih->buscorerev >= 6))
+ || ((sih->buscoretype == PCI_CORE_ID)
+ && (sih->buscorerev >= 0xe)))) {
+ /* sromrev >= 4, read more */
+ err = sprom_read_pci(sih, sromwindow, 0, srom,
+ SROM4_WORDS, true);
+ sromrev = srom[SROM4_CRCREV] & 0xff;
+ } else if (err == 0) {
+ /* srom is good and is rev < 4 */
+ /* top word of sprom contains version and crc8 */
+ sromrev = srom[SROM_CRCREV] & 0xff;
+ /* bcm4401 sroms misprogrammed */
+ if (sromrev == 0x10)
+ sromrev = 1;
+ }
+ }
+#if defined(BCMNVRAMR)
+ /* Use OTP if SPROM not available */
+ else {
+ err = otp_read_pci(sih, srom, SROM_MAX);
+ if (err == 0)
+ /* OTP only contain SROM rev8/rev9 for now */
+ sromrev = srom[SROM4_CRCREV] & 0xff;
+ }
+#else
+ else
+ err = -ENODEV;
+#endif
+
+ if (!err) {
+ /* Bitmask for the sromrev */
+ sr = 1 << sromrev;
+
+ /* srom version check: Current valid versions: 1, 2, 3, 4, 5, 8, 9 */
+ if ((sr & 0x33e) == 0) {
+ err = -EINVAL;
+ goto errout;
+ }
+
+ base = kmalloc(MAXSZ_NVRAM_VARS, GFP_ATOMIC);
+ if (!base) {
+ err = -ENOMEM;
+ goto errout;
+ }
+
+ varbuf_init(&b, base, MAXSZ_NVRAM_VARS);
+
+ /* parse SROM into name=value pairs. */
+ _initvars_srom_pci(sromrev, srom, 0, &b);
-typedef struct {
- u8 tag; /* Broadcom subtag name */
- u8 len; /* Length field of the tuple, note that it includes the
- * subtag name (1 byte): 1 + tuple content length
- */
- const char *params;
-} cis_tuple_t;
+ /* final nullbyte terminator */
+ vp = b.buf;
+ *vp++ = '\0';
-#define OTP_RAW (0xff - 1) /* Reserved tuple number for wrvar Raw input */
-#define OTP_VERS_1 (0xff - 2) /* CISTPL_VERS_1 */
-#define OTP_MANFID (0xff - 3) /* CISTPL_MANFID */
-#define OTP_RAW1 (0xff - 4) /* Like RAW, but comes first */
+ err = initvars_table(base, vp, vars, count);
+ kfree(base);
+ }
-#endif /* _bcmsrom_tbl_h_ */
+errout:
+ kfree(srom);
+ return err;
+}
diff --git a/drivers/staging/brcm80211/include/bcmsrom.h b/drivers/staging/brcm80211/brcmsmac/srom.h
index b2dc8951c5d..efc4d1edd86 100644
--- a/drivers/staging/brcm80211/include/bcmsrom.h
+++ b/drivers/staging/brcm80211/brcmsmac/srom.h
@@ -14,16 +14,16 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _bcmsrom_h_
-#define _bcmsrom_h_
+#ifndef _BRCM_SROM_H_
+#define _BRCM_SROM_H_
-#include <bcmsrom_fmt.h>
+#include "types.h"
/* Prototypes */
-extern int srom_var_init(si_t *sih, uint bus, void *curmap,
+extern int srom_var_init(struct si_pub *sih, uint bus, void *curmap,
char **vars, uint *count);
-extern int srom_read(si_t *sih, uint bus, void *curmap,
+extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
uint byteoff, uint nbytes, u16 *buf, bool check_crc);
/* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP
@@ -31,4 +31,4 @@ extern int srom_read(si_t *sih, uint bus, void *curmap,
*/
extern int srom_parsecis(u8 **pcis, uint ciscnt,
char **vars, uint *count);
-#endif /* _bcmsrom_h_ */
+#endif /* _BRCM_SROM_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_stf.c b/drivers/staging/brcm80211/brcmsmac/stf.c
index c4f58172182..a55ff010178 100644
--- a/drivers/staging/brcm80211/brcmsmac/wlc_stf.c
+++ b/drivers/staging/brcm80211/brcmsmac/stf.c
@@ -14,47 +14,31 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <net/mac80211.h>
-#include <proto/802.11.h>
-
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <aiutils.h>
-#include <wlioctl.h>
-#include <bcmwifi.h>
-#include <bcmnvram.h>
-#include <sbhnddma.h>
-
-#include "wlc_types.h"
+#include "types.h"
#include "d11.h"
-#include "wl_dbg.h"
-#include "wlc_cfg.h"
-#include "wlc_rate.h"
-#include "wlc_scb.h"
-#include "wlc_pub.h"
-#include "wlc_key.h"
-#include "phy/wlc_phy_hal.h"
-#include "wlc_channel.h"
-#include "wlc_main.h"
-#include "wl_export.h"
-#include "wlc_bmac.h"
-#include "wlc_stf.h"
+#include "rate.h"
+#include "phy/phy_hal.h"
+#include "channel.h"
+#include "main.h"
+#include "bmac.h"
+#include "stf.h"
#define MIN_SPATIAL_EXPANSION 0
#define MAX_SPATIAL_EXPANSION 1
-#define WLC_STF_SS_STBC_RX(wlc) (WLCISNPHY(wlc->band) && \
+#define BRCMS_STF_SS_STBC_RX(wlc) (BRCMS_ISNPHY(wlc->band) && \
NREV_GT(wlc->band->phyrev, 3) && NREV_LE(wlc->band->phyrev, 6))
-static bool wlc_stf_stbc_tx_set(struct wlc_info *wlc, s32 int_val);
-static int wlc_stf_txcore_set(struct wlc_info *wlc, u8 Nsts, u8 val);
-static int wlc_stf_spatial_policy_set(struct wlc_info *wlc, int val);
-static void wlc_stf_stbc_rx_ht_update(struct wlc_info *wlc, int val);
+static bool brcms_c_stf_stbc_tx_set(struct brcms_c_info *wlc, s32 int_val);
+static int brcms_c_stf_txcore_set(struct brcms_c_info *wlc, u8 Nsts, u8 val);
+static int brcms_c_stf_spatial_policy_set(struct brcms_c_info *wlc, int val);
+static void brcms_c_stf_stbc_rx_ht_update(struct brcms_c_info *wlc, int val);
-static void _wlc_stf_phy_txant_upd(struct wlc_info *wlc);
-static u16 _wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec);
+static void _brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+static u16 _brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
+ ratespec_t rspec);
#define NSTS_1 1
#define NSTS_2 2
@@ -68,10 +52,10 @@ const u8 txcore_default[5] = {
(0x0f) /* For Nsts = 4, enable all cores */
};
-static void wlc_stf_stbc_rx_ht_update(struct wlc_info *wlc, int val)
+static void brcms_c_stf_stbc_rx_ht_update(struct brcms_c_info *wlc, int val)
{
/* MIMOPHYs rev3-6 cannot receive STBC with only one rx core active */
- if (WLC_STF_SS_STBC_RX(wlc)) {
+ if (BRCMS_STF_SS_STBC_RX(wlc)) {
if ((wlc->stf->rxstreams == 1) && (val != HT_CAP_RX_STBC_NO))
return;
}
@@ -80,15 +64,15 @@ static void wlc_stf_stbc_rx_ht_update(struct wlc_info *wlc, int val)
wlc->ht_cap.cap_info |= (val << IEEE80211_HT_CAP_RX_STBC_SHIFT);
if (wlc->pub->up) {
- wlc_update_beacon(wlc);
- wlc_update_probe_resp(wlc, true);
+ brcms_c_update_beacon(wlc);
+ brcms_c_update_probe_resp(wlc, true);
}
}
/* every WLC_TEMPSENSE_PERIOD seconds temperature check to decide whether to turn on/off txchain */
-void wlc_tempsense_upd(struct wlc_info *wlc)
+void brcms_c_tempsense_upd(struct brcms_c_info *wlc)
{
- wlc_phy_t *pi = wlc->band->pi;
+ struct brcms_phy_pub *pi = wlc->band->pi;
uint active_chains, txchain;
/* Check if the chip is too hot. Disable one Tx chain, if it is */
@@ -99,21 +83,21 @@ void wlc_tempsense_upd(struct wlc_info *wlc)
if (wlc->stf->txchain == wlc->stf->hw_txchain) {
if (txchain && (txchain < wlc->stf->hw_txchain)) {
/* turn off 1 tx chain */
- wlc_stf_txchain_set(wlc, txchain, true);
+ brcms_c_stf_txchain_set(wlc, txchain, true);
}
} else if (wlc->stf->txchain < wlc->stf->hw_txchain) {
if (txchain == wlc->stf->hw_txchain) {
/* turn back on txchain */
- wlc_stf_txchain_set(wlc, txchain, true);
+ brcms_c_stf_txchain_set(wlc, txchain, true);
}
}
}
void
-wlc_stf_ss_algo_channel_get(struct wlc_info *wlc, u16 *ss_algo_channel,
+brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc, u16 *ss_algo_channel,
chanspec_t chanspec)
{
- tx_power_t power;
+ struct tx_power power;
u8 siso_mcs_id, cdd_mcs_id, stbc_mcs_id;
/* Clear previous settings */
@@ -151,7 +135,7 @@ wlc_stf_ss_algo_channel_get(struct wlc_info *wlc, u16 *ss_algo_channel,
setbit(ss_algo_channel, PHY_TXC1_MODE_STBC);
}
-static bool wlc_stf_stbc_tx_set(struct wlc_info *wlc, s32 int_val)
+static bool brcms_c_stf_stbc_tx_set(struct brcms_c_info *wlc, s32 int_val)
{
if ((int_val != AUTO) && (int_val != OFF) && (int_val != ON)) {
return false;
@@ -161,7 +145,7 @@ static bool wlc_stf_stbc_tx_set(struct wlc_info *wlc, s32 int_val)
return false;
if ((int_val == OFF) || (wlc->stf->txstreams == 1)
- || !WLC_STBC_CAP_PHY(wlc))
+ || !BRCMS_STBC_CAP_PHY(wlc))
wlc->ht_cap.cap_info &= ~IEEE80211_HT_CAP_TX_STBC;
else
wlc->ht_cap.cap_info |= IEEE80211_HT_CAP_TX_STBC;
@@ -172,33 +156,34 @@ static bool wlc_stf_stbc_tx_set(struct wlc_info *wlc, s32 int_val)
return true;
}
-bool wlc_stf_stbc_rx_set(struct wlc_info *wlc, s32 int_val)
+bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val)
{
if ((int_val != HT_CAP_RX_STBC_NO)
&& (int_val != HT_CAP_RX_STBC_ONE_STREAM)) {
return false;
}
- if (WLC_STF_SS_STBC_RX(wlc)) {
+ if (BRCMS_STF_SS_STBC_RX(wlc)) {
if ((int_val != HT_CAP_RX_STBC_NO)
&& (wlc->stf->rxstreams == 1))
return false;
}
- wlc_stf_stbc_rx_ht_update(wlc, int_val);
+ brcms_c_stf_stbc_rx_ht_update(wlc, int_val);
return true;
}
-static int wlc_stf_txcore_set(struct wlc_info *wlc, u8 Nsts, u8 core_mask)
+static int brcms_c_stf_txcore_set(struct brcms_c_info *wlc, u8 Nsts,
+ u8 core_mask)
{
BCMMSG(wlc->wiphy, "wl%d: Nsts %d core_mask %x\n",
wlc->pub->unit, Nsts, core_mask);
- if (WLC_BITSCNT(core_mask) > wlc->stf->txstreams) {
+ if (BRCMS_BITSCNT(core_mask) > wlc->stf->txstreams) {
core_mask = 0;
}
- if ((WLC_BITSCNT(core_mask) == wlc->stf->txstreams) &&
+ if ((BRCMS_BITSCNT(core_mask) == wlc->stf->txstreams) &&
((core_mask & ~wlc->stf->txchain)
|| !(core_mask & wlc->stf->txchain))) {
core_mask = wlc->stf->txchain;
@@ -211,18 +196,18 @@ static int wlc_stf_txcore_set(struct wlc_info *wlc, u8 Nsts, u8 core_mask)
* frames when 1 stream core map changed
*/
wlc->stf->phytxant = core_mask << PHY_TXC_ANT_SHIFT;
- wlc_bmac_txant_set(wlc->hw, wlc->stf->phytxant);
+ brcms_b_txant_set(wlc->hw, wlc->stf->phytxant);
if (wlc->clk) {
- wlc_suspend_mac_and_wait(wlc);
- wlc_beacon_phytxctl_txant_upd(wlc, wlc->bcn_rspec);
- wlc_enable_mac(wlc);
+ brcms_c_suspend_mac_and_wait(wlc);
+ brcms_c_beacon_phytxctl_txant_upd(wlc, wlc->bcn_rspec);
+ brcms_c_enable_mac(wlc);
}
}
return 0;
}
-static int wlc_stf_spatial_policy_set(struct wlc_info *wlc, int val)
+static int brcms_c_stf_spatial_policy_set(struct brcms_c_info *wlc, int val)
{
int i;
u8 core_mask = 0;
@@ -233,12 +218,12 @@ static int wlc_stf_spatial_policy_set(struct wlc_info *wlc, int val)
for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++) {
core_mask = (val == MAX_SPATIAL_EXPANSION) ?
wlc->stf->txchain : txcore_default[i];
- wlc_stf_txcore_set(wlc, (u8) i, core_mask);
+ brcms_c_stf_txcore_set(wlc, (u8) i, core_mask);
}
return 0;
}
-int wlc_stf_txchain_set(struct wlc_info *wlc, s32 int_val, bool force)
+int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force)
{
u8 txchain = (u8) int_val;
u8 txstreams;
@@ -252,7 +237,7 @@ int wlc_stf_txchain_set(struct wlc_info *wlc, s32 int_val, bool force)
return -EINVAL;
/* if nrate override is configured to be non-SISO STF mode, reject reducing txchain to 1 */
- txstreams = (u8) WLC_BITSCNT(txchain);
+ txstreams = (u8) BRCMS_BITSCNT(txchain);
if (txstreams > MAX_STREAMS_SUPPORTED)
return -EINVAL;
@@ -288,24 +273,24 @@ int wlc_stf_txchain_set(struct wlc_info *wlc, s32 int_val, bool force)
wlc->stf->txchain = txchain;
wlc->stf->txstreams = txstreams;
- wlc_stf_stbc_tx_set(wlc, wlc->band->band_stf_stbc_tx);
- wlc_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
- wlc_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
+ brcms_c_stf_stbc_tx_set(wlc, wlc->band->band_stf_stbc_tx);
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
wlc->stf->txant =
(wlc->stf->txstreams == 1) ? ANT_TX_FORCE_0 : ANT_TX_DEF;
- _wlc_stf_phy_txant_upd(wlc);
+ _brcms_c_stf_phy_txant_upd(wlc);
wlc_phy_stf_chain_set(wlc->band->pi, wlc->stf->txchain,
wlc->stf->rxchain);
for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++)
- wlc_stf_txcore_set(wlc, (u8) i, txcore_default[i]);
+ brcms_c_stf_txcore_set(wlc, (u8) i, txcore_default[i]);
return 0;
}
/* update wlc->stf->ss_opmode which represents the operational stf_ss mode we're using */
-int wlc_stf_ss_update(struct wlc_info *wlc, struct wlcband *band)
+int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band)
{
int ret_code = 0;
u8 prev_stf_ss;
@@ -314,7 +299,7 @@ int wlc_stf_ss_update(struct wlc_info *wlc, struct wlcband *band)
prev_stf_ss = wlc->stf->ss_opmode;
/* NOTE: opmode can only be SISO or CDD as STBC is decided on a per-packet basis */
- if (WLC_STBC_CAP_PHY(wlc) &&
+ if (BRCMS_STBC_CAP_PHY(wlc) &&
wlc->stf->ss_algosel_auto
&& (wlc->stf->ss_algo_channel != (u16) -1)) {
upd_stf_ss = (wlc->stf->no_cddstbc || (wlc->stf->txstreams == 1)
@@ -331,70 +316,37 @@ int wlc_stf_ss_update(struct wlc_info *wlc, struct wlcband *band)
}
if (prev_stf_ss != upd_stf_ss) {
wlc->stf->ss_opmode = upd_stf_ss;
- wlc_bmac_band_stf_ss_set(wlc->hw, upd_stf_ss);
+ brcms_b_band_stf_ss_set(wlc->hw, upd_stf_ss);
}
return ret_code;
}
-int wlc_stf_attach(struct wlc_info *wlc)
+int brcms_c_stf_attach(struct brcms_c_info *wlc)
{
wlc->bandstate[BAND_2G_INDEX]->band_stf_ss_mode = PHY_TXC1_MODE_SISO;
wlc->bandstate[BAND_5G_INDEX]->band_stf_ss_mode = PHY_TXC1_MODE_CDD;
- if (WLCISNPHY(wlc->band) &&
+ if (BRCMS_ISNPHY(wlc->band) &&
(wlc_phy_txpower_hw_ctrl_get(wlc->band->pi) != PHY_TPC_HW_ON))
wlc->bandstate[BAND_2G_INDEX]->band_stf_ss_mode =
PHY_TXC1_MODE_CDD;
- wlc_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
- wlc_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
+ brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
- wlc_stf_stbc_rx_ht_update(wlc, HT_CAP_RX_STBC_NO);
+ brcms_c_stf_stbc_rx_ht_update(wlc, HT_CAP_RX_STBC_NO);
wlc->bandstate[BAND_2G_INDEX]->band_stf_stbc_tx = OFF;
wlc->bandstate[BAND_5G_INDEX]->band_stf_stbc_tx = OFF;
- if (WLC_STBC_CAP_PHY(wlc)) {
+ if (BRCMS_STBC_CAP_PHY(wlc)) {
wlc->stf->ss_algosel_auto = true;
wlc->stf->ss_algo_channel = (u16) -1; /* Init the default value */
}
return 0;
}
-void wlc_stf_detach(struct wlc_info *wlc)
-{
-}
-
-int wlc_stf_ant_txant_validate(struct wlc_info *wlc, s8 val)
+void brcms_c_stf_detach(struct brcms_c_info *wlc)
{
- int bcmerror = 0;
-
- /* when there is only 1 tx_streams, don't allow to change the txant */
- if (WLCISNPHY(wlc->band) && (wlc->stf->txstreams == 1))
- return ((val == wlc->stf->txant) ? bcmerror : -EINVAL);
-
- switch (val) {
- case -1:
- val = ANT_TX_DEF;
- break;
- case 0:
- val = ANT_TX_FORCE_0;
- break;
- case 1:
- val = ANT_TX_FORCE_1;
- break;
- case 3:
- val = ANT_TX_LAST_RX;
- break;
- default:
- bcmerror = -EINVAL;
- break;
- }
-
- if (bcmerror == 0)
- wlc->stf->txant = (s8) val;
-
- return bcmerror;
-
}
/*
@@ -411,24 +363,25 @@ int wlc_stf_ant_txant_validate(struct wlc_info *wlc, s8 val)
* do tx-antenna selection for SISO transmissions
* for NREV>=7, bit 6 and bit 7 mean antenna 0 and 1 respectively, nit6+bit7 means both cores active
*/
-static void _wlc_stf_phy_txant_upd(struct wlc_info *wlc)
+static void _brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc)
{
s8 txant;
txant = (s8) wlc->stf->txant;
- if (WLC_PHY_11N_CAP(wlc->band)) {
+ if (BRCMS_PHY_11N_CAP(wlc->band)) {
if (txant == ANT_TX_FORCE_0) {
wlc->stf->phytxant = PHY_TXC_ANT_0;
} else if (txant == ANT_TX_FORCE_1) {
wlc->stf->phytxant = PHY_TXC_ANT_1;
- if (WLCISNPHY(wlc->band) &&
+ if (BRCMS_ISNPHY(wlc->band) &&
NREV_GE(wlc->band->phyrev, 3)
&& NREV_LT(wlc->band->phyrev, 7)) {
wlc->stf->phytxant = PHY_TXC_ANT_2;
}
} else {
- if (WLCISLCNPHY(wlc->band) || WLCISSSLPNPHY(wlc->band))
+ if (BRCMS_ISLCNPHY(wlc->band) ||
+ BRCMS_ISSSLPNPHY(wlc->band))
wlc->stf->phytxant = PHY_TXC_LCNPHY_ANT_LAST;
else {
/* catch out of sync wlc->stf->txcore */
@@ -446,15 +399,15 @@ static void _wlc_stf_phy_txant_upd(struct wlc_info *wlc)
wlc->stf->phytxant = PHY_TXC_OLD_ANT_LAST;
}
- wlc_bmac_txant_set(wlc->hw, wlc->stf->phytxant);
+ brcms_b_txant_set(wlc->hw, wlc->stf->phytxant);
}
-void wlc_stf_phy_txant_upd(struct wlc_info *wlc)
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc)
{
- _wlc_stf_phy_txant_upd(wlc);
+ _brcms_c_stf_phy_txant_upd(wlc);
}
-void wlc_stf_phy_chain_calc(struct wlc_info *wlc)
+void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc)
{
/* get available rx/tx chains */
wlc->stf->hw_txchain = (u8) getintvar(wlc->pub->vars, "txchain");
@@ -462,7 +415,7 @@ void wlc_stf_phy_chain_calc(struct wlc_info *wlc)
/* these parameter are intended to be used for all PHY types */
if (wlc->stf->hw_txchain == 0 || wlc->stf->hw_txchain == 0xf) {
- if (WLCISNPHY(wlc->band)) {
+ if (BRCMS_ISNPHY(wlc->band)) {
wlc->stf->hw_txchain = TXCHAIN_DEF_NPHY;
} else {
wlc->stf->hw_txchain = TXCHAIN_DEF;
@@ -470,10 +423,10 @@ void wlc_stf_phy_chain_calc(struct wlc_info *wlc)
}
wlc->stf->txchain = wlc->stf->hw_txchain;
- wlc->stf->txstreams = (u8) WLC_BITSCNT(wlc->stf->hw_txchain);
+ wlc->stf->txstreams = (u8) BRCMS_BITSCNT(wlc->stf->hw_txchain);
if (wlc->stf->hw_rxchain == 0 || wlc->stf->hw_rxchain == 0xf) {
- if (WLCISNPHY(wlc->band)) {
+ if (BRCMS_ISNPHY(wlc->band)) {
wlc->stf->hw_rxchain = RXCHAIN_DEF_NPHY;
} else {
wlc->stf->hw_rxchain = RXCHAIN_DEF;
@@ -481,17 +434,18 @@ void wlc_stf_phy_chain_calc(struct wlc_info *wlc)
}
wlc->stf->rxchain = wlc->stf->hw_rxchain;
- wlc->stf->rxstreams = (u8) WLC_BITSCNT(wlc->stf->hw_rxchain);
+ wlc->stf->rxstreams = (u8) BRCMS_BITSCNT(wlc->stf->hw_rxchain);
/* initialize the txcore table */
memcpy(wlc->stf->txcore, txcore_default, sizeof(wlc->stf->txcore));
/* default spatial_policy */
wlc->stf->spatial_policy = MIN_SPATIAL_EXPANSION;
- wlc_stf_spatial_policy_set(wlc, MIN_SPATIAL_EXPANSION);
+ brcms_c_stf_spatial_policy_set(wlc, MIN_SPATIAL_EXPANSION);
}
-static u16 _wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec)
+static u16 _brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
+ ratespec_t rspec)
{
u16 phytxant = wlc->stf->phytxant;
@@ -503,19 +457,19 @@ static u16 _wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec)
return phytxant;
}
-u16 wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec)
+u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, ratespec_t rspec)
{
- return _wlc_stf_phytxchain_sel(wlc, rspec);
+ return _brcms_c_stf_phytxchain_sel(wlc, rspec);
}
-u16 wlc_stf_d11hdrs_phyctl_txant(struct wlc_info *wlc, ratespec_t rspec)
+u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, ratespec_t rspec)
{
u16 phytxant = wlc->stf->phytxant;
u16 mask = PHY_TXC_ANT_MASK;
/* for non-siso rates or default setting, use the available chains */
- if (WLCISNPHY(wlc->band)) {
- phytxant = _wlc_stf_phytxchain_sel(wlc, rspec);
+ if (BRCMS_ISNPHY(wlc->band)) {
+ phytxant = _brcms_c_stf_phytxchain_sel(wlc, rspec);
mask = PHY_TXC_HTANT_MASK;
}
phytxant |= phytxant & mask;
diff --git a/drivers/staging/brcm80211/brcmsmac/stf.h b/drivers/staging/brcm80211/brcmsmac/stf.h
new file mode 100644
index 00000000000..06c2a399649
--- /dev/null
+++ b/drivers/staging/brcm80211/brcmsmac/stf.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_STF_H_
+#define _BRCM_STF_H_
+
+#include "types.h"
+
+extern int brcms_c_stf_attach(struct brcms_c_info *wlc);
+extern void brcms_c_stf_detach(struct brcms_c_info *wlc);
+
+extern void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
+extern void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
+ u16 *ss_algo_channel,
+ chanspec_t chanspec);
+extern int brcms_c_stf_ss_update(struct brcms_c_info *wlc,
+ struct brcms_band *band);
+extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+extern int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val,
+ bool force);
+extern bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
+extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+extern void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
+extern u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
+ ratespec_t rspec);
+extern u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc,
+ ratespec_t rspec);
+
+#endif /* _BRCM_STF_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/types.h b/drivers/staging/brcm80211/brcmsmac/types.h
new file mode 100644
index 00000000000..bbf21897ae0
--- /dev/null
+++ b/drivers/staging/brcm80211/brcmsmac/types.h
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_TYPES_H_
+#define _BRCM_TYPES_H_
+
+#include <linux/types.h>
+
+/* Bus types */
+#define SI_BUS 0 /* SOC Interconnect */
+#define PCI_BUS 1 /* PCI target */
+#define SDIO_BUS 3 /* SDIO target */
+#define JTAG_BUS 4 /* JTAG */
+#define USB_BUS 5 /* USB (does not support R/W REG) */
+#define SPI_BUS 6 /* gSPI target */
+#define RPC_BUS 7 /* RPC target */
+
+#define WL_CHAN_FREQ_RANGE_2G 0
+#define WL_CHAN_FREQ_RANGE_5GL 1
+#define WL_CHAN_FREQ_RANGE_5GM 2
+#define WL_CHAN_FREQ_RANGE_5GH 3
+
+#define MAX_DMA_SEGS 4
+
+/* boardflags */
+#define BFL_PACTRL 0x00000002 /* Board has gpio 9 controlling the PA */
+#define BFL_NOPLLDOWN 0x00000020 /* Not ok to power down the chip pll and oscillator */
+#define BFL_FEM 0x00000800 /* Board supports the Front End Module */
+#define BFL_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */
+#define BFL_NOPA 0x00010000 /* Board has no PA */
+#define BFL_BUCKBOOST 0x00200000 /* Power topology uses BUCKBOOST */
+#define BFL_FEM_BT 0x00400000 /* Board has FEM and switch to share antenna w/ BT */
+#define BFL_NOCBUCK 0x00800000 /* Power topology doesn't use CBUCK */
+#define BFL_PALDO 0x02000000 /* Power topology uses PALDO */
+#define BFL_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */
+
+/* boardflags2 */
+#define BFL2_RXBB_INT_REG_DIS 0x00000001 /* Board has an external rxbb regulator */
+#define BFL2_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */
+#define BFL2_TXPWRCTRL_EN 0x00000004 /* Board permits enabling TX Power Control */
+#define BFL2_2X4_DIV 0x00000008 /* Board supports the 2X4 diversity switch */
+#define BFL2_5G_PWRGAIN 0x00000010 /* Board supports 5G band power gain */
+#define BFL2_PCIEWAR_OVR 0x00000020 /* Board overrides ASPM and Clkreq settings */
+#define BFL2_LEGACY 0x00000080
+#define BFL2_SKWRKFEM_BRD 0x00000100 /* 4321mcm93 board uses Skyworks FEM */
+#define BFL2_SPUR_WAR 0x00000200 /* Board has a WAR for clock-harmonic spurs */
+#define BFL2_GPLL_WAR 0x00000400 /* Flag to narrow G-band PLL loop b/w */
+#define BFL2_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */
+#define BFL2_2G_SPUR_WAR 0x00002000 /* WAR to reduce and avoid clock-harmonic spurs in 2G */
+#define BFL2_GPLL_WAR2 0x00010000 /* Flag to widen G-band PLL loop b/w */
+#define BFL2_IPALVLSHIFT_3P3 0x00020000
+#define BFL2_INTERNDET_TXIQCAL 0x00040000 /* Use internal envelope detector for TX IQCAL */
+#define BFL2_XTALBUFOUTEN 0x00080000 /* Keep the buffered Xtal output from radio "ON"
+ * Most drivers will turn it off without this flag
+ * to save power.
+ */
+
+/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */
+#define BOARD_GPIO_PACTRL 0x200 /* bit 9 controls the PA on new 4306 boards */
+#define BOARD_GPIO_12 0x1000 /* gpio 12 */
+#define BOARD_GPIO_13 0x2000 /* gpio 13 */
+
+/* **** Core type/rev defaults **** */
+#define D11CONF 0x0fffffb0 /* Supported D11 revs: 4, 5, 7-27
+ * also need to update wlc.h MAXCOREREV
+ */
+
+#define NCONF 0x000001ff /* Supported nphy revs:
+ * 0 4321a0
+ * 1 4321a1
+ * 2 4321b0/b1/c0/c1
+ * 3 4322a0
+ * 4 4322a1
+ * 5 4716a0
+ * 6 43222a0, 43224a0
+ * 7 43226a0
+ * 8 5357a0, 43236a0
+ */
+
+#define LCNCONF 0x00000007 /* Supported lcnphy revs:
+ * 0 4313a0, 4336a0, 4330a0
+ * 1
+ * 2 4330a0
+ */
+
+#define SSLPNCONF 0x0000000f /* Supported sslpnphy revs:
+ * 0 4329a0/k0
+ * 1 4329b0/4329C0
+ * 2 4319a0
+ * 3 5356a0
+ */
+
+/********************************************************************
+ * Phy/Core Configuration. Defines macros to to check core phy/rev *
+ * compile-time configuration. Defines default core support. *
+ * ******************************************************************
+ */
+
+/* Basic macros to check a configuration bitmask */
+
+#define CONF_HAS(config, val) ((config) & (1 << (val)))
+#define CONF_MSK(config, mask) ((config) & (mask))
+#define MSK_RANGE(low, hi) ((1 << ((hi)+1)) - (1 << (low)))
+#define CONF_RANGE(config, low, hi) (CONF_MSK(config, MSK_RANGE(low, high)))
+
+#define CONF_IS(config, val) ((config) == (1 << (val)))
+#define CONF_GE(config, val) ((config) & (0-(1 << (val))))
+#define CONF_GT(config, val) ((config) & (0-2*(1 << (val))))
+#define CONF_LT(config, val) ((config) & ((1 << (val))-1))
+#define CONF_LE(config, val) ((config) & (2*(1 << (val))-1))
+
+/* Wrappers for some of the above, specific to config constants */
+
+#define NCONF_HAS(val) CONF_HAS(NCONF, val)
+#define NCONF_MSK(mask) CONF_MSK(NCONF, mask)
+#define NCONF_IS(val) CONF_IS(NCONF, val)
+#define NCONF_GE(val) CONF_GE(NCONF, val)
+#define NCONF_GT(val) CONF_GT(NCONF, val)
+#define NCONF_LT(val) CONF_LT(NCONF, val)
+#define NCONF_LE(val) CONF_LE(NCONF, val)
+
+#define LCNCONF_HAS(val) CONF_HAS(LCNCONF, val)
+#define LCNCONF_MSK(mask) CONF_MSK(LCNCONF, mask)
+#define LCNCONF_IS(val) CONF_IS(LCNCONF, val)
+#define LCNCONF_GE(val) CONF_GE(LCNCONF, val)
+#define LCNCONF_GT(val) CONF_GT(LCNCONF, val)
+#define LCNCONF_LT(val) CONF_LT(LCNCONF, val)
+#define LCNCONF_LE(val) CONF_LE(LCNCONF, val)
+
+#define D11CONF_HAS(val) CONF_HAS(D11CONF, val)
+#define D11CONF_MSK(mask) CONF_MSK(D11CONF, mask)
+#define D11CONF_IS(val) CONF_IS(D11CONF, val)
+#define D11CONF_GE(val) CONF_GE(D11CONF, val)
+#define D11CONF_GT(val) CONF_GT(D11CONF, val)
+#define D11CONF_LT(val) CONF_LT(D11CONF, val)
+#define D11CONF_LE(val) CONF_LE(D11CONF, val)
+
+#define PHYCONF_HAS(val) CONF_HAS(PHYTYPE, val)
+#define PHYCONF_IS(val) CONF_IS(PHYTYPE, val)
+
+#define NREV_IS(var, val) (NCONF_HAS(val) && (NCONF_IS(val) || ((var) == (val))))
+#define NREV_GE(var, val) (NCONF_GE(val) && (!NCONF_LT(val) || ((var) >= (val))))
+#define NREV_GT(var, val) (NCONF_GT(val) && (!NCONF_LE(val) || ((var) > (val))))
+#define NREV_LT(var, val) (NCONF_LT(val) && (!NCONF_GE(val) || ((var) < (val))))
+#define NREV_LE(var, val) (NCONF_LE(val) && (!NCONF_GT(val) || ((var) <= (val))))
+
+#define LCNREV_IS(var, val) (LCNCONF_HAS(val) && (LCNCONF_IS(val) || ((var) == (val))))
+#define LCNREV_GE(var, val) (LCNCONF_GE(val) && (!LCNCONF_LT(val) || ((var) >= (val))))
+#define LCNREV_GT(var, val) (LCNCONF_GT(val) && (!LCNCONF_LE(val) || ((var) > (val))))
+#define LCNREV_LT(var, val) (LCNCONF_LT(val) && (!LCNCONF_GE(val) || ((var) < (val))))
+#define LCNREV_LE(var, val) (LCNCONF_LE(val) && (!LCNCONF_GT(val) || ((var) <= (val))))
+
+#define D11REV_IS(var, val) (D11CONF_HAS(val) && (D11CONF_IS(val) || ((var) == (val))))
+#define D11REV_GE(var, val) (D11CONF_GE(val) && (!D11CONF_LT(val) || ((var) >= (val))))
+#define D11REV_GT(var, val) (D11CONF_GT(val) && (!D11CONF_LE(val) || ((var) > (val))))
+#define D11REV_LT(var, val) (D11CONF_LT(val) && (!D11CONF_GE(val) || ((var) < (val))))
+#define D11REV_LE(var, val) (D11CONF_LE(val) && (!D11CONF_GT(val) || ((var) <= (val))))
+
+#define PHYTYPE_IS(var, val) (PHYCONF_HAS(val) && (PHYCONF_IS(val) || ((var) == (val))))
+
+/* Finally, early-exit from switch case if anyone wants it... */
+
+#define CASECHECK(config, val) if (!(CONF_HAS(config, val))) break
+#define CASEMSK(config, mask) if (!(CONF_MSK(config, mask))) break
+
+/* Set up PHYTYPE automatically: (depends on PHY_TYPE_X, from d11.h) */
+
+#define _PHYCONF_N (1 << PHY_TYPE_N)
+#define _PHYCONF_LCN (1 << PHY_TYPE_LCN)
+#define _PHYCONF_SSLPN (1 << PHY_TYPE_SSN)
+
+#define PHYTYPE (_PHYCONF_N | _PHYCONF_LCN | _PHYCONF_SSLPN)
+
+/* Utility macro to identify 802.11n (HT) capable PHYs */
+#define PHYTYPE_11N_CAP(phytype) \
+ (PHYTYPE_IS(phytype, PHY_TYPE_N) || \
+ PHYTYPE_IS(phytype, PHY_TYPE_LCN) || \
+ PHYTYPE_IS(phytype, PHY_TYPE_SSN))
+
+/* Last but not least: shorter wlc-specific var checks */
+#define BRCMS_ISNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_N)
+#define BRCMS_ISLCNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_LCN)
+#define BRCMS_ISSSLPNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_SSN)
+
+#define BRCMS_PHY_11N_CAP(band) PHYTYPE_11N_CAP((band)->phytype)
+
+/**********************************************************************
+ * ------------- End of Core phy/rev configuration. ----------------- *
+ * ********************************************************************
+ */
+
+/*************************************************
+ * Defaults for tunables (e.g. sizing constants)
+ *
+ * For each new tunable, add a member to the end
+ * of struct brcms_tunables in brcms_c_pub.h to enable
+ * runtime checks of tunable values. (Directly
+ * using the macros in code invalidates ROM code)
+ *
+ * ***********************************************
+ */
+#define NTXD 256 /* Max # of entries in Tx FIFO based on 4kb page size */
+#define NRXD 256 /* Max # of entries in Rx FIFO based on 4kb page size */
+#define NRXBUFPOST 32 /* try to keep this # rbufs posted to the chip */
+#define MAXSCB 32 /* Maximum SCBs in cache for STA */
+#define AMPDU_NUM_MPDU 16 /* max allowed number of mpdus in an ampdu (2 streams) */
+
+/* Count of packet callback structures. either of following
+ * 1. Set to the number of SCBs since a STA
+ * can queue up a rate callback for each IBSS STA it knows about, and an AP can
+ * queue up an "are you there?" Null Data callback for each associated STA
+ * 2. controlled by tunable config file
+ */
+#define MAXPKTCB MAXSCB /* Max number of packet callbacks */
+
+/* NetBSD also needs to keep track of this */
+
+/* Number of BSS handled in ucode bcn/prb */
+#define BRCMS_MAX_UCODE_BSS (16)
+/* Number of BSS handled in sw bcn/prb */
+#define BRCMS_MAX_UCODE_BSS4 (4)
+/* max # BSS configs */
+#define BRCMS_MAXBSSCFG (1)
+/* max # available networks */
+#define MAXBSS 64
+/* data msg txq hiwat mark */
+#define BRCMS_DATAHIWAT 50
+#define BRCMS_AMPDUDATAHIWAT 255
+
+/* bounded rx loops */
+#define RXBND 8 /* max # frames to process in brcms_c_recv() */
+#define TXSBND 8 /* max # tx status to process in wlc_txstatus() */
+
+#define BAND_5G(bt) ((bt) == BRCM_BAND_5G)
+#define BAND_2G(bt) ((bt) == BRCM_BAND_2G)
+
+#define BCMMSG(dev, fmt, args...) \
+do { \
+ if (brcm_msg_level & LOG_TRACE_VAL) \
+ wiphy_err(dev, "%s: " fmt, __func__, ##args); \
+} while (0)
+
+#define WL_ERROR_ON() (brcm_msg_level & LOG_ERROR_VAL)
+
+/* register access macros */
+#ifndef __BIG_ENDIAN
+#ifndef __mips__
+#define R_REG(r) \
+ ({\
+ sizeof(*(r)) == sizeof(u8) ? \
+ readb((u8 *)(r)) : \
+ sizeof(*(r)) == sizeof(u16) ? readw((u16 *)(r)) : \
+ readl((u32 *)(r)); \
+ })
+#else /* __mips__ */
+#define R_REG(r) \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ __asm__ __volatile__("sync"); \
+ switch (sizeof(*(r))) { \
+ case sizeof(u8): \
+ __osl_v = readb((u8 *)(r)); \
+ break; \
+ case sizeof(u16): \
+ __osl_v = readw((u16 *)(r)); \
+ break; \
+ case sizeof(u32): \
+ __osl_v = \
+ readl((u32 *)(r)); \
+ break; \
+ } \
+ __asm__ __volatile__("sync"); \
+ __osl_v; \
+ })
+#endif /* __mips__ */
+
+#define W_REG(r, v) do { \
+ switch (sizeof(*(r))) { \
+ case sizeof(u8): \
+ writeb((u8)(v), (u8 *)(r)); break; \
+ case sizeof(u16): \
+ writew((u16)(v), (u16 *)(r)); break; \
+ case sizeof(u32): \
+ writel((u32)(v), (u32 *)(r)); break; \
+ }; \
+ } while (0)
+#else /* __BIG_ENDIAN */
+#define R_REG(r) \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ switch (sizeof(*(r))) { \
+ case sizeof(u8): \
+ __osl_v = \
+ readb((u8 *)((r)^3)); \
+ break; \
+ case sizeof(u16): \
+ __osl_v = \
+ readw((u16 *)((r)^2)); \
+ break; \
+ case sizeof(u32): \
+ __osl_v = readl((u32 *)(r)); \
+ break; \
+ } \
+ __osl_v; \
+ })
+
+#define W_REG(r, v) do { \
+ switch (sizeof(*(r))) { \
+ case sizeof(u8): \
+ writeb((u8)(v), \
+ (u8 *)((r)^3)); break; \
+ case sizeof(u16): \
+ writew((u16)(v), \
+ (u16 *)((r)^2)); break; \
+ case sizeof(u32): \
+ writel((u32)(v), \
+ (u32 *)(r)); break; \
+ } \
+ } while (0)
+#endif /* __BIG_ENDIAN */
+
+#ifdef __mips__
+/*
+ * bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder
+ * transactions. As a fix, a read after write is performed on certain places
+ * in the code. Older chips and the newer 5357 family don't require this fix.
+ */
+#define W_REG_FLUSH(r, v) ({ W_REG((r), (v)); (void)R_REG(r); })
+#else
+#define W_REG_FLUSH(r, v) W_REG((r), (v))
+#endif /* __mips__ */
+
+#define AND_REG(r, v) W_REG((r), R_REG(r) & (v))
+#define OR_REG(r, v) W_REG((r), R_REG(r) | (v))
+
+#define SET_REG(r, mask, val) \
+ W_REG((r), ((R_REG(r) & ~(mask)) | (val)))
+
+/* multi-bool data type: set of bools, mbool is true if any is set */
+typedef u32 mbool;
+#define mboolset(mb, bit) ((mb) |= (bit)) /* set one bool */
+#define mboolclr(mb, bit) ((mb) &= ~(bit)) /* clear one bool */
+#define mboolisset(mb, bit) (((mb) & (bit)) != 0) /* true if one bool is set */
+#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val)))
+
+/* forward declarations */
+struct wiphy;
+struct ieee80211_sta;
+struct ieee80211_tx_queue_params;
+struct brcms_info;
+struct brcms_c_info;
+struct brcms_hardware;
+struct brcms_c_if;
+struct brcmu_iovar;
+struct brcmu_strbuf;
+struct brcms_txq_info;
+struct brcms_band;
+struct dma_pub;
+struct si_pub;
+struct tx_status;
+struct d11rxhdr;
+struct brcms_d11rxhdr;
+struct txpwr_limits;
+struct brcms_phy;
+
+typedef volatile struct intctrlregs intctrlregs_t;
+typedef volatile struct pio2regs pio2regs_t;
+typedef volatile struct pio2regp pio2regp_t;
+typedef volatile struct pio4regs pio4regs_t;
+typedef volatile struct pio4regp pio4regp_t;
+typedef volatile struct fifo64 fifo64_t;
+typedef volatile struct d11regs d11regs_t;
+typedef volatile struct dma32diag dma32diag_t;
+typedef volatile struct dma64regs dma64regs_t;
+typedef struct brcms_rateset wlc_rateset_t;
+typedef u32 ratespec_t;
+typedef struct chanvec chanvec_t;
+typedef s32 fixed;
+typedef struct _cs32 cs32;
+typedef volatile union pmqreg pmqreg_t;
+
+/* brcm_msg_level is a bit vector with defs in defs.h */
+extern u32 brcm_msg_level;
+
+#endif /* _BRCM_TYPES_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wl_ucode_loader.c b/drivers/staging/brcm80211/brcmsmac/ucode_loader.c
index cc00dd19746..bf733fb18ce 100644
--- a/drivers/staging/brcm80211/brcmsmac/wl_ucode_loader.c
+++ b/drivers/staging/brcm80211/brcmsmac/ucode_loader.c
@@ -14,9 +14,9 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/types.h>
-#include <bcmdefs.h>
-#include <wl_ucode.h>
+#include <defs.h>
+#include "types.h"
+#include <ucode_loader.h>
enum {
D11UCODE_NAMETAG_START = 0,
@@ -53,59 +53,63 @@ u32 bcm43xx_24_lcnsz;
u32 *bcm43xx_bommajor;
u32 *bcm43xx_bomminor;
-int wl_ucode_data_init(struct wl_info *wl)
+int brcms_ucode_data_init(struct brcms_info *wl)
{
int rc;
- rc = wl_check_firmwares(wl);
+ rc = brcms_check_firmwares(wl);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&d11lcn0bsinitvals24,
- D11LCN0BSINITVALS24);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&d11lcn0initvals24,
+ rc = rc < 0 ? rc :
+ brcms_ucode_init_buf(wl, (void **)&d11lcn0bsinitvals24,
+ D11LCN0BSINITVALS24);
+ rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&d11lcn0initvals24,
D11LCN0INITVALS24);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&d11lcn1bsinitvals24,
- D11LCN1BSINITVALS24);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&d11lcn1initvals24,
+ rc = rc < 0 ? rc :
+ brcms_ucode_init_buf(wl, (void **)&d11lcn1bsinitvals24,
+ D11LCN1BSINITVALS24);
+ rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&d11lcn1initvals24,
D11LCN1INITVALS24);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&d11lcn2bsinitvals24,
- D11LCN2BSINITVALS24);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&d11lcn2initvals24,
+ rc = rc < 0 ? rc :
+ brcms_ucode_init_buf(wl, (void **)&d11lcn2bsinitvals24,
+ D11LCN2BSINITVALS24);
+ rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&d11lcn2initvals24,
D11LCN2INITVALS24);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&d11n0absinitvals16,
- D11N0ABSINITVALS16);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&d11n0bsinitvals16,
+ rc = rc < 0 ? rc :
+ brcms_ucode_init_buf(wl, (void **)&d11n0absinitvals16,
+ D11N0ABSINITVALS16);
+ rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&d11n0bsinitvals16,
D11N0BSINITVALS16);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&d11n0initvals16,
+ rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&d11n0initvals16,
D11N0INITVALS16);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&bcm43xx_16_mimo,
+ rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&bcm43xx_16_mimo,
D11UCODE_OVERSIGHT16_MIMO);
- rc = rc < 0 ? rc : wl_ucode_init_uint(wl, &bcm43xx_16_mimosz,
+ rc = rc < 0 ? rc : brcms_ucode_init_uint(wl, &bcm43xx_16_mimosz,
D11UCODE_OVERSIGHT16_MIMOSZ);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&bcm43xx_24_lcn,
+ rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&bcm43xx_24_lcn,
D11UCODE_OVERSIGHT24_LCN);
- rc = rc < 0 ? rc : wl_ucode_init_uint(wl, &bcm43xx_24_lcnsz,
+ rc = rc < 0 ? rc : brcms_ucode_init_uint(wl, &bcm43xx_24_lcnsz,
D11UCODE_OVERSIGHT24_LCNSZ);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&bcm43xx_bommajor,
+ rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&bcm43xx_bommajor,
D11UCODE_OVERSIGHT_BOMMAJOR);
- rc = rc < 0 ? rc : wl_ucode_init_buf(wl, (void **)&bcm43xx_bomminor,
+ rc = rc < 0 ? rc : brcms_ucode_init_buf(wl, (void **)&bcm43xx_bomminor,
D11UCODE_OVERSIGHT_BOMMINOR);
return rc;
}
-void wl_ucode_data_free(void)
+void brcms_ucode_data_free(void)
{
- wl_ucode_free_buf((void *)d11lcn0bsinitvals24);
- wl_ucode_free_buf((void *)d11lcn0initvals24);
- wl_ucode_free_buf((void *)d11lcn1bsinitvals24);
- wl_ucode_free_buf((void *)d11lcn1initvals24);
- wl_ucode_free_buf((void *)d11lcn2bsinitvals24);
- wl_ucode_free_buf((void *)d11lcn2initvals24);
- wl_ucode_free_buf((void *)d11n0absinitvals16);
- wl_ucode_free_buf((void *)d11n0bsinitvals16);
- wl_ucode_free_buf((void *)d11n0initvals16);
- wl_ucode_free_buf((void *)bcm43xx_16_mimo);
- wl_ucode_free_buf((void *)bcm43xx_24_lcn);
- wl_ucode_free_buf((void *)bcm43xx_bommajor);
- wl_ucode_free_buf((void *)bcm43xx_bomminor);
+ brcms_ucode_free_buf((void *)d11lcn0bsinitvals24);
+ brcms_ucode_free_buf((void *)d11lcn0initvals24);
+ brcms_ucode_free_buf((void *)d11lcn1bsinitvals24);
+ brcms_ucode_free_buf((void *)d11lcn1initvals24);
+ brcms_ucode_free_buf((void *)d11lcn2bsinitvals24);
+ brcms_ucode_free_buf((void *)d11lcn2initvals24);
+ brcms_ucode_free_buf((void *)d11n0absinitvals16);
+ brcms_ucode_free_buf((void *)d11n0bsinitvals16);
+ brcms_ucode_free_buf((void *)d11n0initvals16);
+ brcms_ucode_free_buf((void *)bcm43xx_16_mimo);
+ brcms_ucode_free_buf((void *)bcm43xx_24_lcn);
+ brcms_ucode_free_buf((void *)bcm43xx_bommajor);
+ brcms_ucode_free_buf((void *)bcm43xx_bomminor);
return;
}
diff --git a/drivers/staging/brcm80211/brcmsmac/wl_ucode.h b/drivers/staging/brcm80211/brcmsmac/ucode_loader.h
index 6933fda0e6a..ca53deced7b 100644
--- a/drivers/staging/brcm80211/brcmsmac/wl_ucode.h
+++ b/drivers/staging/brcm80211/brcmsmac/ucode_loader.h
@@ -14,6 +14,8 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include "types.h" /* forward structure declarations */
+
#define MIN_FW_SIZE 40000 /* minimum firmware file size in bytes */
#define MAX_FW_SIZE 150000
@@ -39,11 +41,12 @@ extern u32 bcm43xx_16_mimosz;
extern u32 *bcm43xx_24_lcn;
extern u32 bcm43xx_24_lcnsz;
-extern int wl_ucode_data_init(struct wl_info *wl);
-extern void wl_ucode_data_free(void);
+extern int brcms_ucode_data_init(struct brcms_info *wl);
+extern void brcms_ucode_data_free(void);
-extern int wl_ucode_init_buf(struct wl_info *wl, void **pbuf, unsigned int idx);
-extern int wl_ucode_init_uint(struct wl_info *wl, unsigned *data,
+extern int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf,
+ unsigned int idx);
+extern int brcms_ucode_init_uint(struct brcms_info *wl, unsigned *data,
unsigned int idx);
-extern void wl_ucode_free_buf(void *);
-extern int wl_check_firmwares(struct wl_info *wl);
+extern void brcms_ucode_free_buf(void *);
+extern int brcms_check_firmwares(struct brcms_info *wl);
diff --git a/drivers/staging/brcm80211/brcmsmac/wl_dbg.h b/drivers/staging/brcm80211/brcmsmac/wl_dbg.h
deleted file mode 100644
index 5582de3ee72..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/wl_dbg.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wl_dbg_h_
-#define _wl_dbg_h_
-
-#include <linux/device.h> /* dev_err() */
-
-/* wl_msg_level is a bit vector with defs in wlioctl.h */
-extern u32 wl_msg_level;
-
-#define BCMMSG(dev, fmt, args...) \
-do { \
- if (wl_msg_level & WL_TRACE_VAL) \
- wiphy_err(dev, "%s: " fmt, __func__, ##args); \
-} while (0)
-
-#ifdef BCMDBG
-
-
-/* Extra message control for AMPDU debugging */
-#define WL_AMPDU_UPDN_VAL 0x00000001 /* Config up/down related */
-#define WL_AMPDU_ERR_VAL 0x00000002 /* Calls to beaocn update */
-#define WL_AMPDU_TX_VAL 0x00000004 /* Transmit data path */
-#define WL_AMPDU_RX_VAL 0x00000008 /* Receive data path */
-#define WL_AMPDU_CTL_VAL 0x00000010 /* TSF-related items */
-#define WL_AMPDU_HW_VAL 0x00000020 /* AMPDU_HW */
-#define WL_AMPDU_HWTXS_VAL 0x00000040 /* AMPDU_HWTXS */
-#define WL_AMPDU_HWDBG_VAL 0x00000080 /* AMPDU_DBG */
-
-extern u32 wl_ampdu_dbg;
-
-#define WL_AMPDU_PRINT(level, fmt, args...) \
-do { \
- if (wl_ampdu_dbg & level) { \
- WL_AMPDU(fmt, ##args); \
- } \
-} while (0)
-
-#define WL_AMPDU_UPDN(fmt, args...) \
- WL_AMPDU_PRINT(WL_AMPDU_UPDN_VAL, fmt, ##args)
-#define WL_AMPDU_RX(fmt, args...) \
- WL_AMPDU_PRINT(WL_AMPDU_RX_VAL, fmt, ##args)
-#define WL_AMPDU_ERR(fmt, args...) \
- WL_AMPDU_PRINT(WL_AMPDU_ERR_VAL, fmt, ##args)
-#define WL_AMPDU_TX(fmt, args...) \
- WL_AMPDU_PRINT(WL_AMPDU_TX_VAL, fmt, ##args)
-#define WL_AMPDU_CTL(fmt, args...) \
- WL_AMPDU_PRINT(WL_AMPDU_CTL_VAL, fmt, ##args)
-#define WL_AMPDU_HW(fmt, args...) \
- WL_AMPDU_PRINT(WL_AMPDU_HW_VAL, fmt, ##args)
-#define WL_AMPDU_HWTXS(fmt, args...) \
- WL_AMPDU_PRINT(WL_AMPDU_HWTXS_VAL, fmt, ##args)
-#define WL_AMPDU_HWDBG(fmt, args...) \
- WL_AMPDU_PRINT(WL_AMPDU_HWDBG_VAL, fmt, ##args)
-#define WL_AMPDU_ERR_ON() (wl_ampdu_dbg & WL_AMPDU_ERR_VAL)
-#define WL_AMPDU_HW_ON() (wl_ampdu_dbg & WL_AMPDU_HW_VAL)
-#define WL_AMPDU_HWTXS_ON() (wl_ampdu_dbg & WL_AMPDU_HWTXS_VAL)
-
-#else /* BCMDBG */
-
-
-#define WL_AMPDU_UPDN(fmt, args...) no_printk(fmt, ##args)
-#define WL_AMPDU_RX(fmt, args...) no_printk(fmt, ##args)
-#define WL_AMPDU_ERR(fmt, args...) no_printk(fmt, ##args)
-#define WL_AMPDU_TX(fmt, args...) no_printk(fmt, ##args)
-#define WL_AMPDU_CTL(fmt, args...) no_printk(fmt, ##args)
-#define WL_AMPDU_HW(fmt, args...) no_printk(fmt, ##args)
-#define WL_AMPDU_HWTXS(fmt, args...) no_printk(fmt, ##args)
-#define WL_AMPDU_HWDBG(fmt, args...) no_printk(fmt, ##args)
-#define WL_AMPDU_ERR_ON() 0
-#define WL_AMPDU_HW_ON() 0
-#define WL_AMPDU_HWTXS_ON() 0
-
-#endif /* BCMDBG */
-
-#define WL_ERROR_ON() (wl_msg_level & WL_ERROR_VAL)
-
-#endif /* _wl_dbg_h_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wl_export.h b/drivers/staging/brcm80211/brcmsmac/wl_export.h
deleted file mode 100644
index 0fe0b24b586..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/wl_export.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wl_export_h_
-#define _wl_export_h_
-
-/* misc callbacks */
-struct wl_info;
-struct wl_if;
-struct wlc_if;
-extern void wl_init(struct wl_info *wl);
-extern uint wl_reset(struct wl_info *wl);
-extern void wl_intrson(struct wl_info *wl);
-extern u32 wl_intrsoff(struct wl_info *wl);
-extern void wl_intrsrestore(struct wl_info *wl, u32 macintmask);
-extern int wl_up(struct wl_info *wl);
-extern void wl_down(struct wl_info *wl);
-extern void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state,
- int prio);
-extern bool wl_alloc_dma_resources(struct wl_info *wl, uint dmaddrwidth);
-extern bool wl_rfkill_set_hw_state(struct wl_info *wl);
-
-/* timer functions */
-struct wl_timer;
-extern struct wl_timer *wl_init_timer(struct wl_info *wl,
- void (*fn) (void *arg), void *arg,
- const char *name);
-extern void wl_free_timer(struct wl_info *wl, struct wl_timer *timer);
-extern void wl_add_timer(struct wl_info *wl, struct wl_timer *timer, uint ms,
- int periodic);
-extern bool wl_del_timer(struct wl_info *wl, struct wl_timer *timer);
-extern void wl_msleep(struct wl_info *wl, uint ms);
-
-#endif /* _wl_export_h_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.h b/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.h
deleted file mode 100644
index 63d403b036f..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/wlc_ampdu.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wlc_ampdu_h_
-#define _wlc_ampdu_h_
-
-extern struct ampdu_info *wlc_ampdu_attach(struct wlc_info *wlc);
-extern void wlc_ampdu_detach(struct ampdu_info *ampdu);
-extern int wlc_sendampdu(struct ampdu_info *ampdu, struct wlc_txq_info *qi,
- struct sk_buff **aggp, int prec);
-extern void wlc_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
- struct sk_buff *p, tx_status_t *txs);
-extern void wlc_ampdu_macaddr_upd(struct wlc_info *wlc);
-extern void wlc_ampdu_shm_upd(struct ampdu_info *ampdu);
-
-#endif /* _wlc_ampdu_h_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_bmac.h b/drivers/staging/brcm80211/brcmsmac/wlc_bmac.h
deleted file mode 100644
index a5dccc273ac..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/wlc_bmac.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#ifndef _wlc_bmac_h_
-#define _wlc_bmac_h_
-
-/* XXXXX this interface is under wlc.c by design
- * http://hwnbu-twiki.broadcom.com/bin/view/Mwgroup/WlBmacDesign
- *
- * high driver files(e.g. wlc_ampdu.c etc)
- * wlc.h/wlc.c
- * wlc_bmac.h/wlc_bmac.c
- *
- * So don't include this in files other than wlc.c, wlc_bmac* wl_rte.c(dongle port) and wl_phy.c
- * create wrappers in wlc.c if needed
- */
-
-/* dup state between BMAC(struct wlc_hw_info) and HIGH(struct wlc_info)
- driver */
-typedef struct wlc_bmac_state {
- u32 machwcap; /* mac hw capibility */
- u32 preamble_ovr; /* preamble override */
-} wlc_bmac_state_t;
-
-enum {
- IOV_BMAC_DIAG,
- IOV_BMAC_SBGPIOTIMERVAL,
- IOV_BMAC_SBGPIOOUT,
- IOV_BMAC_CCGPIOCTRL, /* CC GPIOCTRL REG */
- IOV_BMAC_CCGPIOOUT, /* CC GPIOOUT REG */
- IOV_BMAC_CCGPIOOUTEN, /* CC GPIOOUTEN REG */
- IOV_BMAC_CCGPIOIN, /* CC GPIOIN REG */
- IOV_BMAC_WPSGPIO, /* WPS push button GPIO pin */
- IOV_BMAC_OTPDUMP,
- IOV_BMAC_OTPSTAT,
- IOV_BMAC_PCIEASPM, /* obfuscation clkreq/aspm control */
- IOV_BMAC_PCIEADVCORRMASK, /* advanced correctable error mask */
- IOV_BMAC_PCIECLKREQ, /* PCIE 1.1 clockreq enab support */
- IOV_BMAC_PCIELCREG, /* PCIE LCREG */
- IOV_BMAC_SBGPIOTIMERMASK,
- IOV_BMAC_RFDISABLEDLY,
- IOV_BMAC_PCIEREG, /* PCIE REG */
- IOV_BMAC_PCICFGREG, /* PCI Config register */
- IOV_BMAC_PCIESERDESREG, /* PCIE SERDES REG (dev, 0}offset) */
- IOV_BMAC_PCIEGPIOOUT, /* PCIEOUT REG */
- IOV_BMAC_PCIEGPIOOUTEN, /* PCIEOUTEN REG */
- IOV_BMAC_PCIECLKREQENCTRL, /* clkreqenctrl REG (PCIE REV > 6.0 */
- IOV_BMAC_DMALPBK,
- IOV_BMAC_CCREG,
- IOV_BMAC_COREREG,
- IOV_BMAC_SDCIS,
- IOV_BMAC_SDIO_DRIVE,
- IOV_BMAC_OTPW,
- IOV_BMAC_NVOTPW,
- IOV_BMAC_SROM,
- IOV_BMAC_SRCRC,
- IOV_BMAC_CIS_SOURCE,
- IOV_BMAC_CISVAR,
- IOV_BMAC_OTPLOCK,
- IOV_BMAC_OTP_CHIPID,
- IOV_BMAC_CUSTOMVAR1,
- IOV_BMAC_BOARDFLAGS,
- IOV_BMAC_BOARDFLAGS2,
- IOV_BMAC_WPSLED,
- IOV_BMAC_NVRAM_SOURCE,
- IOV_BMAC_OTP_RAW_READ,
- IOV_BMAC_LAST
-};
-
-extern int wlc_bmac_attach(struct wlc_info *wlc, u16 vendor, u16 device,
- uint unit, bool piomode, void *regsva, uint bustype,
- void *btparam);
-extern int wlc_bmac_detach(struct wlc_info *wlc);
-extern void wlc_bmac_watchdog(void *arg);
-
-/* up/down, reset, clk */
-extern void wlc_bmac_copyto_objmem(struct wlc_hw_info *wlc_hw,
- uint offset, const void *buf, int len,
- u32 sel);
-extern void wlc_bmac_copyfrom_objmem(struct wlc_hw_info *wlc_hw, uint offset,
- void *buf, int len, u32 sel);
-#define wlc_bmac_copyfrom_shm(wlc_hw, offset, buf, len) \
- wlc_bmac_copyfrom_objmem(wlc_hw, offset, buf, len, OBJADDR_SHM_SEL)
-#define wlc_bmac_copyto_shm(wlc_hw, offset, buf, len) \
- wlc_bmac_copyto_objmem(wlc_hw, offset, buf, len, OBJADDR_SHM_SEL)
-
-extern void wlc_bmac_core_phypll_reset(struct wlc_hw_info *wlc_hw);
-extern void wlc_bmac_core_phypll_ctl(struct wlc_hw_info *wlc_hw, bool on);
-extern void wlc_bmac_phyclk_fgc(struct wlc_hw_info *wlc_hw, bool clk);
-extern void wlc_bmac_macphyclk_set(struct wlc_hw_info *wlc_hw, bool clk);
-extern void wlc_bmac_phy_reset(struct wlc_hw_info *wlc_hw);
-extern void wlc_bmac_corereset(struct wlc_hw_info *wlc_hw, u32 flags);
-extern void wlc_bmac_reset(struct wlc_hw_info *wlc_hw);
-extern void wlc_bmac_init(struct wlc_hw_info *wlc_hw, chanspec_t chanspec,
- bool mute);
-extern int wlc_bmac_up_prep(struct wlc_hw_info *wlc_hw);
-extern int wlc_bmac_up_finish(struct wlc_hw_info *wlc_hw);
-extern int wlc_bmac_down_prep(struct wlc_hw_info *wlc_hw);
-extern int wlc_bmac_down_finish(struct wlc_hw_info *wlc_hw);
-extern void wlc_bmac_switch_macfreq(struct wlc_hw_info *wlc_hw, u8 spurmode);
-
-/* chanspec, ucode interface */
-extern void wlc_bmac_set_chanspec(struct wlc_hw_info *wlc_hw,
- chanspec_t chanspec,
- bool mute, struct txpwr_limits *txpwr);
-
-extern int wlc_bmac_xmtfifo_sz_get(struct wlc_hw_info *wlc_hw, uint fifo,
- uint *blocks);
-extern void wlc_bmac_mhf(struct wlc_hw_info *wlc_hw, u8 idx, u16 mask,
- u16 val, int bands);
-extern void wlc_bmac_mctrl(struct wlc_hw_info *wlc_hw, u32 mask, u32 val);
-extern u16 wlc_bmac_mhf_get(struct wlc_hw_info *wlc_hw, u8 idx, int bands);
-extern void wlc_bmac_txant_set(struct wlc_hw_info *wlc_hw, u16 phytxant);
-extern u16 wlc_bmac_get_txant(struct wlc_hw_info *wlc_hw);
-extern void wlc_bmac_antsel_type_set(struct wlc_hw_info *wlc_hw,
- u8 antsel_type);
-extern int wlc_bmac_state_get(struct wlc_hw_info *wlc_hw,
- wlc_bmac_state_t *state);
-extern void wlc_bmac_write_shm(struct wlc_hw_info *wlc_hw, uint offset, u16 v);
-extern u16 wlc_bmac_read_shm(struct wlc_hw_info *wlc_hw, uint offset);
-extern void wlc_bmac_write_template_ram(struct wlc_hw_info *wlc_hw, int offset,
- int len, void *buf);
-extern void wlc_bmac_copyfrom_vars(struct wlc_hw_info *wlc_hw, char **buf,
- uint *len);
-
-extern void wlc_bmac_hw_etheraddr(struct wlc_hw_info *wlc_hw,
- u8 *ea);
-
-extern bool wlc_bmac_radio_read_hwdisabled(struct wlc_hw_info *wlc_hw);
-extern void wlc_bmac_set_shortslot(struct wlc_hw_info *wlc_hw, bool shortslot);
-extern void wlc_bmac_band_stf_ss_set(struct wlc_hw_info *wlc_hw, u8 stf_mode);
-
-extern void wlc_bmac_wait_for_wake(struct wlc_hw_info *wlc_hw);
-
-extern void wlc_ucode_wake_override_set(struct wlc_hw_info *wlc_hw,
- u32 override_bit);
-extern void wlc_ucode_wake_override_clear(struct wlc_hw_info *wlc_hw,
- u32 override_bit);
-
-extern void wlc_bmac_set_addrmatch(struct wlc_hw_info *wlc_hw,
- int match_reg_offset,
- const u8 *addr);
-extern void wlc_bmac_write_hw_bcntemplates(struct wlc_hw_info *wlc_hw,
- void *bcn, int len, bool both);
-
-extern void wlc_bmac_read_tsf(struct wlc_hw_info *wlc_hw, u32 *tsf_l_ptr,
- u32 *tsf_h_ptr);
-extern void wlc_bmac_set_cwmin(struct wlc_hw_info *wlc_hw, u16 newmin);
-extern void wlc_bmac_set_cwmax(struct wlc_hw_info *wlc_hw, u16 newmax);
-
-extern void wlc_bmac_retrylimit_upd(struct wlc_hw_info *wlc_hw, u16 SRL,
- u16 LRL);
-
-extern void wlc_bmac_fifoerrors(struct wlc_hw_info *wlc_hw);
-
-
-/* API for BMAC driver (e.g. wlc_phy.c etc) */
-
-extern void wlc_bmac_bw_set(struct wlc_hw_info *wlc_hw, u16 bw);
-extern void wlc_bmac_pllreq(struct wlc_hw_info *wlc_hw, bool set,
- mbool req_bit);
-extern void wlc_bmac_hw_up(struct wlc_hw_info *wlc_hw);
-extern u16 wlc_bmac_rate_shm_offset(struct wlc_hw_info *wlc_hw, u8 rate);
-extern void wlc_bmac_antsel_set(struct wlc_hw_info *wlc_hw, u32 antsel_avail);
-
-#endif /* _wlc_bmac_h_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_bsscfg.h b/drivers/staging/brcm80211/brcmsmac/wlc_bsscfg.h
deleted file mode 100644
index 2572541bde9..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/wlc_bsscfg.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _WLC_BSSCFG_H_
-#define _WLC_BSSCFG_H_
-
-/* Check if a particular BSS config is AP or STA */
-#define BSSCFG_AP(cfg) (0)
-#define BSSCFG_STA(cfg) (1)
-
-#define BSSCFG_IBSS(cfg) (!(cfg)->BSS)
-
-#define NTXRATE 64 /* # tx MPDUs rate is reported for */
-#define MAXMACLIST 64 /* max # source MAC matches */
-#define BCN_TEMPLATE_COUNT 2
-
-/* Iterator for "associated" STA bss configs:
- (struct wlc_info *wlc, int idx, struct wlc_bsscfg *cfg) */
-#define FOREACH_AS_STA(wlc, idx, cfg) \
- for (idx = 0; (int) idx < WLC_MAXBSSCFG; idx++) \
- if ((cfg = (wlc)->bsscfg[idx]) && BSSCFG_STA(cfg) && cfg->associated)
-
-/* As above for all non-NULL BSS configs */
-#define FOREACH_BSS(wlc, idx, cfg) \
- for (idx = 0; (int) idx < WLC_MAXBSSCFG; idx++) \
- if ((cfg = (wlc)->bsscfg[idx]))
-
-/* BSS configuration state */
-struct wlc_bsscfg {
- struct wlc_info *wlc; /* wlc to which this bsscfg belongs to. */
- bool up; /* is this configuration up operational */
- bool enable; /* is this configuration enabled */
- bool associated; /* is BSS in ASSOCIATED state */
- bool BSS; /* infraustructure or adhac */
- bool dtim_programmed;
-
- u8 SSID_len; /* the length of SSID */
- u8 SSID[IEEE80211_MAX_SSID_LEN]; /* SSID string */
- struct scb *bcmc_scb[MAXBANDS]; /* one bcmc_scb per band */
- s8 _idx; /* the index of this bsscfg,
- * assigned at wlc_bsscfg_alloc()
- */
- /* MAC filter */
- uint nmac; /* # of entries on maclist array */
- int macmode; /* allow/deny stations on maclist array */
- struct ether_addr *maclist; /* list of source MAC addrs to match */
-
- /* security */
- u32 wsec; /* wireless security bitvec */
- s16 auth; /* 802.11 authentication: Open, Shared Key, WPA */
- s16 openshared; /* try Open auth first, then Shared Key */
- bool wsec_restrict; /* drop unencrypted packets if wsec is enabled */
- bool eap_restrict; /* restrict data until 802.1X auth succeeds */
- u16 WPA_auth; /* WPA: authenticated key management */
- bool wpa2_preauth; /* default is true, wpa_cap sets value */
- bool wsec_portopen; /* indicates keys are plumbed */
- wsec_iv_t wpa_none_txiv; /* global txiv for WPA_NONE, tkip and aes */
- int wsec_index; /* 0-3: default tx key, -1: not set */
- wsec_key_t *bss_def_keys[WLC_DEFAULT_KEYS]; /* default key storage */
-
- /* TKIP countermeasures */
- bool tkip_countermeasures; /* flags TKIP no-assoc period */
- u32 tk_cm_dt; /* detect timer */
- u32 tk_cm_bt; /* blocking timer */
- u32 tk_cm_bt_tmstmp; /* Timestamp when TKIP BT is activated */
- bool tk_cm_activate; /* activate countermeasures after EAPOL-Key sent */
-
- u8 BSSID[ETH_ALEN]; /* BSSID (associated) */
- u8 cur_etheraddr[ETH_ALEN]; /* h/w address */
- u16 bcmc_fid; /* the last BCMC FID queued to TX_BCMC_FIFO */
- u16 bcmc_fid_shm; /* the last BCMC FID written to shared mem */
-
- u32 flags; /* WLC_BSSCFG flags; see below */
-
- u8 *bcn; /* AP beacon */
- uint bcn_len; /* AP beacon length */
- bool ar_disassoc; /* disassociated in associated recreation */
-
- int auth_atmptd; /* auth type (open/shared) attempted */
-
- pmkid_cand_t pmkid_cand[MAXPMKID]; /* PMKID candidate list */
- uint npmkid_cand; /* num PMKID candidates */
- pmkid_t pmkid[MAXPMKID]; /* PMKID cache */
- uint npmkid; /* num cached PMKIDs */
-
- wlc_bss_info_t *current_bss; /* BSS parms in ASSOCIATED state */
-
- /* PM states */
- bool PMawakebcn; /* bcn recvd during current waking state */
- bool PMpending; /* waiting for tx status with PM indicated set */
- bool priorPMstate; /* Detecting PM state transitions */
- bool PSpoll; /* whether there is an outstanding PS-Poll frame */
-
- /* BSSID entry in RCMTA, use the wsec key management infrastructure to
- * manage the RCMTA entries.
- */
- wsec_key_t *rcmta;
-
- /* 'unique' ID of this bsscfg, assigned at bsscfg allocation */
- u16 ID;
-
- uint txrspecidx; /* index into tx rate circular buffer */
- ratespec_t txrspec[NTXRATE][2]; /* circular buffer of prev MPDUs tx rates */
-};
-
-#define WLC_BSSCFG_11N_DISABLE 0x1000 /* Do not advertise .11n IEs for this BSS */
-#define WLC_BSSCFG_HW_BCN 0x20 /* The BSS is generating beacons in HW */
-
-#define HWBCN_ENAB(cfg) (((cfg)->flags & WLC_BSSCFG_HW_BCN) != 0)
-#define HWPRB_ENAB(cfg) (((cfg)->flags & WLC_BSSCFG_HW_PRB) != 0)
-
-/* Extend N_ENAB to per-BSS */
-#define BSS_N_ENAB(wlc, cfg) \
- (N_ENAB((wlc)->pub) && !((cfg)->flags & WLC_BSSCFG_11N_DISABLE))
-
-#define MBSS_BCN_ENAB(cfg) 0
-#define MBSS_PRB_ENAB(cfg) 0
-#define SOFTBCN_ENAB(pub) (0)
-#define SOFTPRB_ENAB(pub) (0)
-#define wlc_bsscfg_tx_check(a) do { } while (0);
-
-#endif /* _WLC_BSSCFG_H_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_cfg.h b/drivers/staging/brcm80211/brcmsmac/wlc_cfg.h
deleted file mode 100644
index 85fbd063531..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/wlc_cfg.h
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wlc_cfg_h_
-#define _wlc_cfg_h_
-
-#define NBANDS(wlc) ((wlc)->pub->_nbands)
-#define NBANDS_PUB(pub) ((pub)->_nbands)
-#define NBANDS_HW(hw) ((hw)->_nbands)
-
-#define IS_SINGLEBAND_5G(device) 0
-
-/* **** Core type/rev defaults **** */
-#define D11_DEFAULT 0x0fffffb0 /* Supported D11 revs: 4, 5, 7-27
- * also need to update wlc.h MAXCOREREV
- */
-
-#define NPHY_DEFAULT 0x000001ff /* Supported nphy revs:
- * 0 4321a0
- * 1 4321a1
- * 2 4321b0/b1/c0/c1
- * 3 4322a0
- * 4 4322a1
- * 5 4716a0
- * 6 43222a0, 43224a0
- * 7 43226a0
- * 8 5357a0, 43236a0
- */
-
-#define LCNPHY_DEFAULT 0x00000007 /* Supported lcnphy revs:
- * 0 4313a0, 4336a0, 4330a0
- * 1
- * 2 4330a0
- */
-
-#define SSLPNPHY_DEFAULT 0x0000000f /* Supported sslpnphy revs:
- * 0 4329a0/k0
- * 1 4329b0/4329C0
- * 2 4319a0
- * 3 5356a0
- */
-
-
-/* For undefined values, use defaults */
-#ifndef D11CONF
-#define D11CONF D11_DEFAULT
-#endif
-#ifndef NCONF
-#define NCONF NPHY_DEFAULT
-#endif
-#ifndef LCNCONF
-#define LCNCONF LCNPHY_DEFAULT
-#endif
-
-#ifndef SSLPNCONF
-#define SSLPNCONF SSLPNPHY_DEFAULT
-#endif
-
-/********************************************************************
- * Phy/Core Configuration. Defines macros to to check core phy/rev *
- * compile-time configuration. Defines default core support. *
- * ******************************************************************
- */
-
-/* Basic macros to check a configuration bitmask */
-
-#define CONF_HAS(config, val) ((config) & (1 << (val)))
-#define CONF_MSK(config, mask) ((config) & (mask))
-#define MSK_RANGE(low, hi) ((1 << ((hi)+1)) - (1 << (low)))
-#define CONF_RANGE(config, low, hi) (CONF_MSK(config, MSK_RANGE(low, high)))
-
-#define CONF_IS(config, val) ((config) == (1 << (val)))
-#define CONF_GE(config, val) ((config) & (0-(1 << (val))))
-#define CONF_GT(config, val) ((config) & (0-2*(1 << (val))))
-#define CONF_LT(config, val) ((config) & ((1 << (val))-1))
-#define CONF_LE(config, val) ((config) & (2*(1 << (val))-1))
-
-/* Wrappers for some of the above, specific to config constants */
-
-#define NCONF_HAS(val) CONF_HAS(NCONF, val)
-#define NCONF_MSK(mask) CONF_MSK(NCONF, mask)
-#define NCONF_IS(val) CONF_IS(NCONF, val)
-#define NCONF_GE(val) CONF_GE(NCONF, val)
-#define NCONF_GT(val) CONF_GT(NCONF, val)
-#define NCONF_LT(val) CONF_LT(NCONF, val)
-#define NCONF_LE(val) CONF_LE(NCONF, val)
-
-#define LCNCONF_HAS(val) CONF_HAS(LCNCONF, val)
-#define LCNCONF_MSK(mask) CONF_MSK(LCNCONF, mask)
-#define LCNCONF_IS(val) CONF_IS(LCNCONF, val)
-#define LCNCONF_GE(val) CONF_GE(LCNCONF, val)
-#define LCNCONF_GT(val) CONF_GT(LCNCONF, val)
-#define LCNCONF_LT(val) CONF_LT(LCNCONF, val)
-#define LCNCONF_LE(val) CONF_LE(LCNCONF, val)
-
-#define D11CONF_HAS(val) CONF_HAS(D11CONF, val)
-#define D11CONF_MSK(mask) CONF_MSK(D11CONF, mask)
-#define D11CONF_IS(val) CONF_IS(D11CONF, val)
-#define D11CONF_GE(val) CONF_GE(D11CONF, val)
-#define D11CONF_GT(val) CONF_GT(D11CONF, val)
-#define D11CONF_LT(val) CONF_LT(D11CONF, val)
-#define D11CONF_LE(val) CONF_LE(D11CONF, val)
-
-#define PHYCONF_HAS(val) CONF_HAS(PHYTYPE, val)
-#define PHYCONF_IS(val) CONF_IS(PHYTYPE, val)
-
-#define NREV_IS(var, val) (NCONF_HAS(val) && (NCONF_IS(val) || ((var) == (val))))
-#define NREV_GE(var, val) (NCONF_GE(val) && (!NCONF_LT(val) || ((var) >= (val))))
-#define NREV_GT(var, val) (NCONF_GT(val) && (!NCONF_LE(val) || ((var) > (val))))
-#define NREV_LT(var, val) (NCONF_LT(val) && (!NCONF_GE(val) || ((var) < (val))))
-#define NREV_LE(var, val) (NCONF_LE(val) && (!NCONF_GT(val) || ((var) <= (val))))
-
-#define LCNREV_IS(var, val) (LCNCONF_HAS(val) && (LCNCONF_IS(val) || ((var) == (val))))
-#define LCNREV_GE(var, val) (LCNCONF_GE(val) && (!LCNCONF_LT(val) || ((var) >= (val))))
-#define LCNREV_GT(var, val) (LCNCONF_GT(val) && (!LCNCONF_LE(val) || ((var) > (val))))
-#define LCNREV_LT(var, val) (LCNCONF_LT(val) && (!LCNCONF_GE(val) || ((var) < (val))))
-#define LCNREV_LE(var, val) (LCNCONF_LE(val) && (!LCNCONF_GT(val) || ((var) <= (val))))
-
-#define D11REV_IS(var, val) (D11CONF_HAS(val) && (D11CONF_IS(val) || ((var) == (val))))
-#define D11REV_GE(var, val) (D11CONF_GE(val) && (!D11CONF_LT(val) || ((var) >= (val))))
-#define D11REV_GT(var, val) (D11CONF_GT(val) && (!D11CONF_LE(val) || ((var) > (val))))
-#define D11REV_LT(var, val) (D11CONF_LT(val) && (!D11CONF_GE(val) || ((var) < (val))))
-#define D11REV_LE(var, val) (D11CONF_LE(val) && (!D11CONF_GT(val) || ((var) <= (val))))
-
-#define PHYTYPE_IS(var, val) (PHYCONF_HAS(val) && (PHYCONF_IS(val) || ((var) == (val))))
-
-/* Finally, early-exit from switch case if anyone wants it... */
-
-#define CASECHECK(config, val) if (!(CONF_HAS(config, val))) break
-#define CASEMSK(config, mask) if (!(CONF_MSK(config, mask))) break
-
-#if (D11CONF ^ (D11CONF & D11_DEFAULT))
-#error "Unsupported MAC revision configured"
-#endif
-#if (NCONF ^ (NCONF & NPHY_DEFAULT))
-#error "Unsupported NPHY revision configured"
-#endif
-#if (LCNCONF ^ (LCNCONF & LCNPHY_DEFAULT))
-#error "Unsupported LPPHY revision configured"
-#endif
-
-/* *** Consistency checks *** */
-#if !D11CONF
-#error "No MAC revisions configured!"
-#endif
-
-#if !NCONF && !LCNCONF && !SSLPNCONF
-#error "No PHY configured!"
-#endif
-
-/* Set up PHYTYPE automatically: (depends on PHY_TYPE_X, from d11.h) */
-
-#define _PHYCONF_N (1 << PHY_TYPE_N)
-
-#if LCNCONF
-#define _PHYCONF_LCN (1 << PHY_TYPE_LCN)
-#else
-#define _PHYCONF_LCN 0
-#endif /* LCNCONF */
-
-#if SSLPNCONF
-#define _PHYCONF_SSLPN (1 << PHY_TYPE_SSN)
-#else
-#define _PHYCONF_SSLPN 0
-#endif /* SSLPNCONF */
-
-#define PHYTYPE (_PHYCONF_N | _PHYCONF_LCN | _PHYCONF_SSLPN)
-
-/* Utility macro to identify 802.11n (HT) capable PHYs */
-#define PHYTYPE_11N_CAP(phytype) \
- (PHYTYPE_IS(phytype, PHY_TYPE_N) || \
- PHYTYPE_IS(phytype, PHY_TYPE_LCN) || \
- PHYTYPE_IS(phytype, PHY_TYPE_SSN))
-
-/* Last but not least: shorter wlc-specific var checks */
-#define WLCISNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_N)
-#define WLCISLCNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_LCN)
-#define WLCISSSLPNPHY(band) PHYTYPE_IS((band)->phytype, PHY_TYPE_SSN)
-
-#define WLC_PHY_11N_CAP(band) PHYTYPE_11N_CAP((band)->phytype)
-
-/**********************************************************************
- * ------------- End of Core phy/rev configuration. ----------------- *
- * ********************************************************************
- */
-
-/*************************************************
- * Defaults for tunables (e.g. sizing constants)
- *
- * For each new tunable, add a member to the end
- * of wlc_tunables_t in wlc_pub.h to enable
- * runtime checks of tunable values. (Directly
- * using the macros in code invalidates ROM code)
- *
- * ***********************************************
- */
-#ifndef NTXD
-#define NTXD 256 /* Max # of entries in Tx FIFO based on 4kb page size */
-#endif /* NTXD */
-#ifndef NRXD
-#define NRXD 256 /* Max # of entries in Rx FIFO based on 4kb page size */
-#endif /* NRXD */
-
-#ifndef NRXBUFPOST
-#define NRXBUFPOST 32 /* try to keep this # rbufs posted to the chip */
-#endif /* NRXBUFPOST */
-
-#ifndef MAXSCB /* station control blocks in cache */
-#define MAXSCB 32 /* Maximum SCBs in cache for STA */
-#endif /* MAXSCB */
-
-#ifndef AMPDU_NUM_MPDU
-#define AMPDU_NUM_MPDU 16 /* max allowed number of mpdus in an ampdu (2 streams) */
-#endif /* AMPDU_NUM_MPDU */
-
-#ifndef AMPDU_NUM_MPDU_3STREAMS
-#define AMPDU_NUM_MPDU_3STREAMS 32 /* max allowed number of mpdus in an ampdu for 3+ streams */
-#endif /* AMPDU_NUM_MPDU_3STREAMS */
-
-/* Count of packet callback structures. either of following
- * 1. Set to the number of SCBs since a STA
- * can queue up a rate callback for each IBSS STA it knows about, and an AP can
- * queue up an "are you there?" Null Data callback for each associated STA
- * 2. controlled by tunable config file
- */
-#ifndef MAXPKTCB
-#define MAXPKTCB MAXSCB /* Max number of packet callbacks */
-#endif /* MAXPKTCB */
-
-#ifndef CTFPOOLSZ
-#define CTFPOOLSZ 128
-#endif /* CTFPOOLSZ */
-
-/* NetBSD also needs to keep track of this */
-#define WLC_MAX_UCODE_BSS (16) /* Number of BSS handled in ucode bcn/prb */
-#define WLC_MAX_UCODE_BSS4 (4) /* Number of BSS handled in sw bcn/prb */
-#ifndef WLC_MAXBSSCFG
-#define WLC_MAXBSSCFG (1) /* max # BSS configs */
-#endif /* WLC_MAXBSSCFG */
-
-#ifndef MAXBSS
-#define MAXBSS 64 /* max # available networks */
-#endif /* MAXBSS */
-
-#ifndef WLC_DATAHIWAT
-#define WLC_DATAHIWAT 50 /* data msg txq hiwat mark */
-#endif /* WLC_DATAHIWAT */
-
-#ifndef WLC_AMPDUDATAHIWAT
-#define WLC_AMPDUDATAHIWAT 255
-#endif /* WLC_AMPDUDATAHIWAT */
-
-/* bounded rx loops */
-#ifndef RXBND
-#define RXBND 8 /* max # frames to process in wlc_recv() */
-#endif /* RXBND */
-#ifndef TXSBND
-#define TXSBND 8 /* max # tx status to process in wlc_txstatus() */
-#endif /* TXSBND */
-
-#define BAND_5G(bt) ((bt) == WLC_BAND_5G)
-#define BAND_2G(bt) ((bt) == WLC_BAND_2G)
-
-#define WLBANDINITDATA(_data) _data
-#define WLBANDINITFN(_fn) _fn
-
-#endif /* _wlc_cfg_h_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_key.h b/drivers/staging/brcm80211/brcmsmac/wlc_key.h
deleted file mode 100644
index cab10c73793..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/wlc_key.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wlc_key_h_
-#define _wlc_key_h_
-
-struct scb;
-struct wlc_info;
-struct wlc_bsscfg;
-/* Maximum # of keys that wl driver supports in S/W.
- * Keys supported in H/W is less than or equal to WSEC_MAX_KEYS.
- */
-#define WSEC_MAX_KEYS 54 /* Max # of keys (50 + 4 default keys) */
-#define WLC_DEFAULT_KEYS 4 /* Default # of keys */
-
-#define WSEC_MAX_WOWL_KEYS 5 /* Max keys in WOWL mode (1 + 4 default keys) */
-
-#define WPA2_GTK_MAX 3
-
-/*
-* Max # of keys currently supported:
-*
-* s/w keys if WSEC_SW(wlc->wsec).
-* h/w keys otherwise.
-*/
-#define WLC_MAX_WSEC_KEYS(wlc) WSEC_MAX_KEYS
-
-/* number of 802.11 default (non-paired, group keys) */
-#define WSEC_MAX_DEFAULT_KEYS 4 /* # of default keys */
-
-/* Max # of hardware keys supported */
-#define WLC_MAX_WSEC_HW_KEYS(wlc) WSEC_MAX_RCMTA_KEYS
-
-/* Max # of hardware TKIP MIC keys supported */
-#define WLC_MAX_TKMIC_HW_KEYS(wlc) (WSEC_MAX_TKMIC_ENGINE_KEYS)
-
-#define WSEC_HW_TKMIC_KEY(wlc, key, bsscfg) \
- ((((wlc)->machwcap & MCAP_TKIPMIC)) && \
- (key) && ((key)->algo == CRYPTO_ALGO_TKIP) && \
- !WSEC_SOFTKEY(wlc, key, bsscfg) && \
- WSEC_KEY_INDEX(wlc, key) >= WLC_DEFAULT_KEYS && \
- (WSEC_KEY_INDEX(wlc, key) < WSEC_MAX_TKMIC_ENGINE_KEYS))
-
-/* index of key in key table */
-#define WSEC_KEY_INDEX(wlc, key) ((key)->idx)
-
-#define WSEC_SOFTKEY(wlc, key, bsscfg) (WLC_SW_KEYS(wlc, bsscfg) || \
- WSEC_KEY_INDEX(wlc, key) >= WLC_MAX_WSEC_HW_KEYS(wlc))
-
-/* get a key, non-NULL only if key allocated and not clear */
-#define WSEC_KEY(wlc, i) (((wlc)->wsec_keys[i] && (wlc)->wsec_keys[i]->len) ? \
- (wlc)->wsec_keys[i] : NULL)
-
-#define WSEC_SCB_KEY_VALID(scb) (((scb)->key && (scb)->key->len) ? true : false)
-
-/* default key */
-#define WSEC_BSS_DEFAULT_KEY(bsscfg) (((bsscfg)->wsec_index == -1) ? \
- (struct wsec_key *)NULL:(bsscfg)->bss_def_keys[(bsscfg)->wsec_index])
-
-/* Macros for key management in IBSS mode */
-#define WSEC_IBSS_MAX_PEERS 16 /* Max # of IBSS Peers */
-#define WSEC_IBSS_RCMTA_INDEX(idx) \
- (((idx - WSEC_MAX_DEFAULT_KEYS) % WSEC_IBSS_MAX_PEERS) + WSEC_MAX_DEFAULT_KEYS)
-
-/* contiguous # key slots for infrastructure mode STA */
-#define WSEC_BSS_STA_KEY_GROUP_SIZE 5
-
-typedef struct wsec_iv {
- u32 hi; /* upper 32 bits of IV */
- u16 lo; /* lower 16 bits of IV */
-} wsec_iv_t;
-
-#define WLC_NUMRXIVS 16 /* # rx IVs (one per 802.11e TID) */
-
-typedef struct wsec_key {
- u8 ea[ETH_ALEN]; /* per station */
- u8 idx; /* key index in wsec_keys array */
- u8 id; /* key ID [0-3] */
- u8 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
- u8 rcmta; /* rcmta entry index, same as idx by default */
- u16 flags; /* misc flags */
- u8 algo_hw; /* cache for hw register */
- u8 aes_mode; /* cache for hw register */
- s8 iv_len; /* IV length */
- s8 icv_len; /* ICV length */
- u32 len; /* key length..don't move this var */
- /* data is 4byte aligned */
- u8 data[WLAN_MAX_KEY_LEN]; /* key data */
- wsec_iv_t rxiv[WLC_NUMRXIVS]; /* Rx IV (one per TID) */
- wsec_iv_t txiv; /* Tx IV */
-
-} wsec_key_t;
-
-#define broken_roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
-
-/* For use with wsec_key_t.flags */
-
-#define WSEC_BS_UPDATE (1 << 0) /* Indicates hw needs key update on BS switch */
-#define WSEC_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */
-#define WSEC_TKIP_ERROR (1 << 2) /* Provoke deliberate MIC error */
-#define WSEC_REPLAY_ERROR (1 << 3) /* Provoke deliberate replay */
-#define WSEC_IBSS_PEER_GROUP_KEY (1 << 7) /* Flag: group key for a IBSS PEER */
-#define WSEC_ICV_ERROR (1 << 8) /* Provoke deliberate ICV error */
-
-#define wlc_key_insert(a, b, c, d, e, f, g, h, i, j) (-EBADE)
-#define wlc_key_update(a, b, c) do {} while (0)
-#define wlc_key_remove(a, b, c) do {} while (0)
-#define wlc_key_remove_all(a, b) do {} while (0)
-#define wlc_key_delete(a, b, c) do {} while (0)
-#define wlc_scb_key_delete(a, b) do {} while (0)
-#define wlc_key_lookup(a, b, c, d, e) (NULL)
-#define wlc_key_hw_init_all(a) do {} while (0)
-#define wlc_key_hw_init(a, b, c) do {} while (0)
-#define wlc_key_hw_wowl_init(a, b, c, d) do {} while (0)
-#define wlc_key_sw_wowl_update(a, b, c, d, e) do {} while (0)
-#define wlc_key_sw_wowl_create(a, b, c) (-EBADE)
-#define wlc_key_iv_update(a, b, c, d, e) do {(void)e; } while (0)
-#define wlc_key_iv_init(a, b, c) do {} while (0)
-#define wlc_key_set_error(a, b, c) (-EBADE)
-#define wlc_key_dump_hw(a, b) (-EBADE)
-#define wlc_key_dump_sw(a, b) (-EBADE)
-#define wlc_key_defkeyflag(a) (0)
-#define wlc_rcmta_add_bssid(a, b) do {} while (0)
-#define wlc_rcmta_del_bssid(a, b) do {} while (0)
-#define wlc_key_scb_delete(a, b) do {} while (0)
-
-#endif /* _wlc_key_h_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_phy_shim.c b/drivers/staging/brcm80211/brcmsmac/wlc_phy_shim.c
deleted file mode 100644
index 16fea021f4a..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/wlc_phy_shim.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This is "two-way" interface, acting as the SHIM layer between WL and PHY layer.
- * WL driver can optinally call this translation layer to do some preprocessing, then reach PHY.
- * On the PHY->WL driver direction, all calls go through this layer since PHY doesn't have the
- * access to wlc_hw pointer.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <proto/802.11.h>
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <bcmwifi.h>
-#include <aiutils.h>
-#include <wlioctl.h>
-#include <sbconfig.h>
-#include <sbchipc.h>
-#include <pcicfg.h>
-#include <sbhnddma.h>
-#include <hnddma.h>
-#include <wlc_pmu.h>
-
-#include "wlc_types.h"
-#include "wl_dbg.h"
-#include "wlc_cfg.h"
-#include "d11.h"
-#include "wlc_rate.h"
-#include "wlc_scb.h"
-#include "wlc_pub.h"
-#include "phy/wlc_phy_hal.h"
-#include "wlc_channel.h"
-#include "bcmsrom.h"
-#include "wlc_key.h"
-#include "wlc_bmac.h"
-#include "wlc_phy_hal.h"
-#include "wl_export.h"
-#include "wlc_main.h"
-#include "wlc_phy_shim.h"
-
-/* PHY SHIM module specific state */
-struct wlc_phy_shim_info {
- struct wlc_hw_info *wlc_hw; /* pointer to main wlc_hw structure */
- void *wlc; /* pointer to main wlc structure */
- void *wl; /* pointer to os-specific private state */
-};
-
-wlc_phy_shim_info_t *wlc_phy_shim_attach(struct wlc_hw_info *wlc_hw,
- void *wl, void *wlc) {
- wlc_phy_shim_info_t *physhim = NULL;
-
- physhim = kzalloc(sizeof(wlc_phy_shim_info_t), GFP_ATOMIC);
- if (!physhim) {
- wiphy_err(wlc_hw->wlc->wiphy,
- "wl%d: wlc_phy_shim_attach: out of mem\n",
- wlc_hw->unit);
- return NULL;
- }
- physhim->wlc_hw = wlc_hw;
- physhim->wlc = wlc;
- physhim->wl = wl;
-
- return physhim;
-}
-
-void wlc_phy_shim_detach(wlc_phy_shim_info_t *physhim)
-{
- kfree(physhim);
-}
-
-struct wlapi_timer *wlapi_init_timer(wlc_phy_shim_info_t *physhim,
- void (*fn) (void *arg), void *arg,
- const char *name)
-{
- return (struct wlapi_timer *)wl_init_timer(physhim->wl, fn, arg, name);
-}
-
-void wlapi_free_timer(wlc_phy_shim_info_t *physhim, struct wlapi_timer *t)
-{
- wl_free_timer(physhim->wl, (struct wl_timer *)t);
-}
-
-void
-wlapi_add_timer(wlc_phy_shim_info_t *physhim, struct wlapi_timer *t, uint ms,
- int periodic)
-{
- wl_add_timer(physhim->wl, (struct wl_timer *)t, ms, periodic);
-}
-
-bool wlapi_del_timer(wlc_phy_shim_info_t *physhim, struct wlapi_timer *t)
-{
- return wl_del_timer(physhim->wl, (struct wl_timer *)t);
-}
-
-void wlapi_intrson(wlc_phy_shim_info_t *physhim)
-{
- wl_intrson(physhim->wl);
-}
-
-u32 wlapi_intrsoff(wlc_phy_shim_info_t *physhim)
-{
- return wl_intrsoff(physhim->wl);
-}
-
-void wlapi_intrsrestore(wlc_phy_shim_info_t *physhim, u32 macintmask)
-{
- wl_intrsrestore(physhim->wl, macintmask);
-}
-
-void wlapi_bmac_write_shm(wlc_phy_shim_info_t *physhim, uint offset, u16 v)
-{
- wlc_bmac_write_shm(physhim->wlc_hw, offset, v);
-}
-
-u16 wlapi_bmac_read_shm(wlc_phy_shim_info_t *physhim, uint offset)
-{
- return wlc_bmac_read_shm(physhim->wlc_hw, offset);
-}
-
-void
-wlapi_bmac_mhf(wlc_phy_shim_info_t *physhim, u8 idx, u16 mask,
- u16 val, int bands)
-{
- wlc_bmac_mhf(physhim->wlc_hw, idx, mask, val, bands);
-}
-
-void wlapi_bmac_corereset(wlc_phy_shim_info_t *physhim, u32 flags)
-{
- wlc_bmac_corereset(physhim->wlc_hw, flags);
-}
-
-void wlapi_suspend_mac_and_wait(wlc_phy_shim_info_t *physhim)
-{
- wlc_suspend_mac_and_wait(physhim->wlc);
-}
-
-void wlapi_switch_macfreq(wlc_phy_shim_info_t *physhim, u8 spurmode)
-{
- wlc_bmac_switch_macfreq(physhim->wlc_hw, spurmode);
-}
-
-void wlapi_enable_mac(wlc_phy_shim_info_t *physhim)
-{
- wlc_enable_mac(physhim->wlc);
-}
-
-void wlapi_bmac_mctrl(wlc_phy_shim_info_t *physhim, u32 mask, u32 val)
-{
- wlc_bmac_mctrl(physhim->wlc_hw, mask, val);
-}
-
-void wlapi_bmac_phy_reset(wlc_phy_shim_info_t *physhim)
-{
- wlc_bmac_phy_reset(physhim->wlc_hw);
-}
-
-void wlapi_bmac_bw_set(wlc_phy_shim_info_t *physhim, u16 bw)
-{
- wlc_bmac_bw_set(physhim->wlc_hw, bw);
-}
-
-u16 wlapi_bmac_get_txant(wlc_phy_shim_info_t *physhim)
-{
- return wlc_bmac_get_txant(physhim->wlc_hw);
-}
-
-void wlapi_bmac_phyclk_fgc(wlc_phy_shim_info_t *physhim, bool clk)
-{
- wlc_bmac_phyclk_fgc(physhim->wlc_hw, clk);
-}
-
-void wlapi_bmac_macphyclk_set(wlc_phy_shim_info_t *physhim, bool clk)
-{
- wlc_bmac_macphyclk_set(physhim->wlc_hw, clk);
-}
-
-void wlapi_bmac_core_phypll_ctl(wlc_phy_shim_info_t *physhim, bool on)
-{
- wlc_bmac_core_phypll_ctl(physhim->wlc_hw, on);
-}
-
-void wlapi_bmac_core_phypll_reset(wlc_phy_shim_info_t *physhim)
-{
- wlc_bmac_core_phypll_reset(physhim->wlc_hw);
-}
-
-void wlapi_bmac_ucode_wake_override_phyreg_set(wlc_phy_shim_info_t *physhim)
-{
- wlc_ucode_wake_override_set(physhim->wlc_hw, WLC_WAKE_OVERRIDE_PHYREG);
-}
-
-void wlapi_bmac_ucode_wake_override_phyreg_clear(wlc_phy_shim_info_t *physhim)
-{
- wlc_ucode_wake_override_clear(physhim->wlc_hw,
- WLC_WAKE_OVERRIDE_PHYREG);
-}
-
-void
-wlapi_bmac_write_template_ram(wlc_phy_shim_info_t *physhim, int offset,
- int len, void *buf)
-{
- wlc_bmac_write_template_ram(physhim->wlc_hw, offset, len, buf);
-}
-
-u16 wlapi_bmac_rate_shm_offset(wlc_phy_shim_info_t *physhim, u8 rate)
-{
- return wlc_bmac_rate_shm_offset(physhim->wlc_hw, rate);
-}
-
-void wlapi_ucode_sample_init(wlc_phy_shim_info_t *physhim)
-{
-}
-
-void
-wlapi_copyfrom_objmem(wlc_phy_shim_info_t *physhim, uint offset, void *buf,
- int len, u32 sel)
-{
- wlc_bmac_copyfrom_objmem(physhim->wlc_hw, offset, buf, len, sel);
-}
-
-void
-wlapi_copyto_objmem(wlc_phy_shim_info_t *physhim, uint offset, const void *buf,
- int l, u32 sel)
-{
- wlc_bmac_copyto_objmem(physhim->wlc_hw, offset, buf, l, sel);
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_phy_shim.h b/drivers/staging/brcm80211/brcmsmac/wlc_phy_shim.h
deleted file mode 100644
index c151a5d8c69..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/wlc_phy_shim.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wlc_phy_shim_h_
-#define _wlc_phy_shim_h_
-
-#define RADAR_TYPE_NONE 0 /* Radar type None */
-#define RADAR_TYPE_ETSI_1 1 /* ETSI 1 Radar type */
-#define RADAR_TYPE_ETSI_2 2 /* ETSI 2 Radar type */
-#define RADAR_TYPE_ETSI_3 3 /* ETSI 3 Radar type */
-#define RADAR_TYPE_ITU_E 4 /* ITU E Radar type */
-#define RADAR_TYPE_ITU_K 5 /* ITU K Radar type */
-#define RADAR_TYPE_UNCLASSIFIED 6 /* Unclassified Radar type */
-#define RADAR_TYPE_BIN5 7 /* long pulse radar type */
-#define RADAR_TYPE_STG2 8 /* staggered-2 radar */
-#define RADAR_TYPE_STG3 9 /* staggered-3 radar */
-#define RADAR_TYPE_FRA 10 /* French radar */
-
-/* French radar pulse widths */
-#define FRA_T1_20MHZ 52770
-#define FRA_T2_20MHZ 61538
-#define FRA_T3_20MHZ 66002
-#define FRA_T1_40MHZ 105541
-#define FRA_T2_40MHZ 123077
-#define FRA_T3_40MHZ 132004
-#define FRA_ERR_20MHZ 60
-#define FRA_ERR_40MHZ 120
-
-#define ANTSEL_NA 0 /* No boardlevel selection available */
-#define ANTSEL_2x4 1 /* 2x4 boardlevel selection available */
-#define ANTSEL_2x3 2 /* 2x3 CB2 boardlevel selection available */
-
-/* Rx Antenna diversity control values */
-#define ANT_RX_DIV_FORCE_0 0 /* Use antenna 0 */
-#define ANT_RX_DIV_FORCE_1 1 /* Use antenna 1 */
-#define ANT_RX_DIV_START_1 2 /* Choose starting with 1 */
-#define ANT_RX_DIV_START_0 3 /* Choose starting with 0 */
-#define ANT_RX_DIV_ENABLE 3 /* APHY bbConfig Enable RX Diversity */
-#define ANT_RX_DIV_DEF ANT_RX_DIV_START_0 /* default antdiv setting */
-
-/* Forward declarations */
-struct wlc_hw_info;
-typedef struct wlc_phy_shim_info wlc_phy_shim_info_t;
-
-extern wlc_phy_shim_info_t *wlc_phy_shim_attach(struct wlc_hw_info *wlc_hw,
- void *wl, void *wlc);
-extern void wlc_phy_shim_detach(wlc_phy_shim_info_t *physhim);
-
-/* PHY to WL utility functions */
-struct wlapi_timer;
-extern struct wlapi_timer *wlapi_init_timer(wlc_phy_shim_info_t *physhim,
- void (*fn) (void *arg), void *arg,
- const char *name);
-extern void wlapi_free_timer(wlc_phy_shim_info_t *physhim,
- struct wlapi_timer *t);
-extern void wlapi_add_timer(wlc_phy_shim_info_t *physhim,
- struct wlapi_timer *t, uint ms, int periodic);
-extern bool wlapi_del_timer(wlc_phy_shim_info_t *physhim,
- struct wlapi_timer *t);
-extern void wlapi_intrson(wlc_phy_shim_info_t *physhim);
-extern u32 wlapi_intrsoff(wlc_phy_shim_info_t *physhim);
-extern void wlapi_intrsrestore(wlc_phy_shim_info_t *physhim,
- u32 macintmask);
-
-extern void wlapi_bmac_write_shm(wlc_phy_shim_info_t *physhim, uint offset,
- u16 v);
-extern u16 wlapi_bmac_read_shm(wlc_phy_shim_info_t *physhim, uint offset);
-extern void wlapi_bmac_mhf(wlc_phy_shim_info_t *physhim, u8 idx,
- u16 mask, u16 val, int bands);
-extern void wlapi_bmac_corereset(wlc_phy_shim_info_t *physhim, u32 flags);
-extern void wlapi_suspend_mac_and_wait(wlc_phy_shim_info_t *physhim);
-extern void wlapi_switch_macfreq(wlc_phy_shim_info_t *physhim, u8 spurmode);
-extern void wlapi_enable_mac(wlc_phy_shim_info_t *physhim);
-extern void wlapi_bmac_mctrl(wlc_phy_shim_info_t *physhim, u32 mask,
- u32 val);
-extern void wlapi_bmac_phy_reset(wlc_phy_shim_info_t *physhim);
-extern void wlapi_bmac_bw_set(wlc_phy_shim_info_t *physhim, u16 bw);
-extern void wlapi_bmac_phyclk_fgc(wlc_phy_shim_info_t *physhim, bool clk);
-extern void wlapi_bmac_macphyclk_set(wlc_phy_shim_info_t *physhim, bool clk);
-extern void wlapi_bmac_core_phypll_ctl(wlc_phy_shim_info_t *physhim, bool on);
-extern void wlapi_bmac_core_phypll_reset(wlc_phy_shim_info_t *physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_set(wlc_phy_shim_info_t *
- physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_clear(wlc_phy_shim_info_t *
- physhim);
-extern void wlapi_bmac_write_template_ram(wlc_phy_shim_info_t *physhim, int o,
- int len, void *buf);
-extern u16 wlapi_bmac_rate_shm_offset(wlc_phy_shim_info_t *physhim,
- u8 rate);
-extern void wlapi_ucode_sample_init(wlc_phy_shim_info_t *physhim);
-extern void wlapi_copyfrom_objmem(wlc_phy_shim_info_t *physhim, uint,
- void *buf, int, u32 sel);
-extern void wlapi_copyto_objmem(wlc_phy_shim_info_t *physhim, uint,
- const void *buf, int, u32);
-
-extern void wlapi_high_update_phy_mode(wlc_phy_shim_info_t *physhim,
- u32 phy_mode);
-extern u16 wlapi_bmac_get_txant(wlc_phy_shim_info_t *physhim);
-#endif /* _wlc_phy_shim_h_ */
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_pmu.c b/drivers/staging/brcm80211/brcmsmac/wlc_pmu.c
deleted file mode 100644
index 82986bd1ccf..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/wlc_pmu.c
+++ /dev/null
@@ -1,1929 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-
-#include <bcmdevs.h>
-#include <sbchipc.h>
-#include <bcmutils.h>
-#include <bcmnvram.h>
-#include "wlc_pmu.h"
-
-/*
- * d11 slow to fast clock transition time in slow clock cycles
- */
-#define D11SCC_SLOW2FAST_TRANSITION 2
-
-/*
- * external LPO crystal frequency
- */
-#define EXT_ILP_HZ 32768
-
-/*
- * Duration for ILP clock frequency measurment in milliseconds
- *
- * remark: 1000 must be an integer multiple of this duration
- */
-#define ILP_CALC_DUR 10
-
-/*
- * FVCO frequency
- */
-#define FVCO_880 880000 /* 880MHz */
-#define FVCO_1760 1760000 /* 1760MHz */
-#define FVCO_1440 1440000 /* 1440MHz */
-#define FVCO_960 960000 /* 960MHz */
-
-/*
- * PMU crystal table indices for 1440MHz fvco
- */
-#define PMU1_XTALTAB0_1440_12000K 0
-#define PMU1_XTALTAB0_1440_13000K 1
-#define PMU1_XTALTAB0_1440_14400K 2
-#define PMU1_XTALTAB0_1440_15360K 3
-#define PMU1_XTALTAB0_1440_16200K 4
-#define PMU1_XTALTAB0_1440_16800K 5
-#define PMU1_XTALTAB0_1440_19200K 6
-#define PMU1_XTALTAB0_1440_19800K 7
-#define PMU1_XTALTAB0_1440_20000K 8
-#define PMU1_XTALTAB0_1440_25000K 9
-#define PMU1_XTALTAB0_1440_26000K 10
-#define PMU1_XTALTAB0_1440_30000K 11
-#define PMU1_XTALTAB0_1440_37400K 12
-#define PMU1_XTALTAB0_1440_38400K 13
-#define PMU1_XTALTAB0_1440_40000K 14
-#define PMU1_XTALTAB0_1440_48000K 15
-
-/*
- * PMU crystal table indices for 960MHz fvco
- */
-#define PMU1_XTALTAB0_960_12000K 0
-#define PMU1_XTALTAB0_960_13000K 1
-#define PMU1_XTALTAB0_960_14400K 2
-#define PMU1_XTALTAB0_960_15360K 3
-#define PMU1_XTALTAB0_960_16200K 4
-#define PMU1_XTALTAB0_960_16800K 5
-#define PMU1_XTALTAB0_960_19200K 6
-#define PMU1_XTALTAB0_960_19800K 7
-#define PMU1_XTALTAB0_960_20000K 8
-#define PMU1_XTALTAB0_960_25000K 9
-#define PMU1_XTALTAB0_960_26000K 10
-#define PMU1_XTALTAB0_960_30000K 11
-#define PMU1_XTALTAB0_960_37400K 12
-#define PMU1_XTALTAB0_960_38400K 13
-#define PMU1_XTALTAB0_960_40000K 14
-#define PMU1_XTALTAB0_960_48000K 15
-
-/*
- * PMU crystal table indices for 880MHz fvco
- */
-#define PMU1_XTALTAB0_880_12000K 0
-#define PMU1_XTALTAB0_880_13000K 1
-#define PMU1_XTALTAB0_880_14400K 2
-#define PMU1_XTALTAB0_880_15360K 3
-#define PMU1_XTALTAB0_880_16200K 4
-#define PMU1_XTALTAB0_880_16800K 5
-#define PMU1_XTALTAB0_880_19200K 6
-#define PMU1_XTALTAB0_880_19800K 7
-#define PMU1_XTALTAB0_880_20000K 8
-#define PMU1_XTALTAB0_880_24000K 9
-#define PMU1_XTALTAB0_880_25000K 10
-#define PMU1_XTALTAB0_880_26000K 11
-#define PMU1_XTALTAB0_880_30000K 12
-#define PMU1_XTALTAB0_880_37400K 13
-#define PMU1_XTALTAB0_880_38400K 14
-#define PMU1_XTALTAB0_880_40000K 15
-
-/*
- * crystal frequency values
- */
-#define XTAL_FREQ_24000MHZ 24000
-#define XTAL_FREQ_30000MHZ 30000
-#define XTAL_FREQ_37400MHZ 37400
-#define XTAL_FREQ_48000MHZ 48000
-
-/*
- * Resource dependancies mask change action
- *
- * @RES_DEPEND_SET: Override the dependancies mask
- * @RES_DEPEND_ADD: Add to the dependancies mask
- * @RES_DEPEND_REMOVE: Remove from the dependancies mask
- */
-#define RES_DEPEND_SET 0
-#define RES_DEPEND_ADD 1
-#define RES_DEPEND_REMOVE -1
-
-/* d11 slow to fast clock transition time in slow clock cycles */
-#define D11SCC_SLOW2FAST_TRANSITION 2
-
-/* Setup resource up/down timers */
-typedef struct {
- u8 resnum;
- u16 updown;
-} pmu_res_updown_t;
-
-/* Change resource dependancies masks */
-typedef struct {
- u32 res_mask; /* resources (chip specific) */
- s8 action; /* action */
- u32 depend_mask; /* changes to the dependancies mask */
- bool(*filter) (si_t *sih); /* action is taken when filter is NULL or return true */
-} pmu_res_depend_t;
-
-/* setup pll and query clock speed */
-typedef struct {
- u16 fref;
- u8 xf;
- u8 p1div;
- u8 p2div;
- u8 ndiv_int;
- u32 ndiv_frac;
-} pmu1_xtaltab0_t;
-
-/*
- * prototypes used in resource tables
- */
-static bool si_pmu_res_depfltr_bb(si_t *sih);
-static bool si_pmu_res_depfltr_ncb(si_t *sih);
-static bool si_pmu_res_depfltr_paldo(si_t *sih);
-static bool si_pmu_res_depfltr_npaldo(si_t *sih);
-
-static const pmu_res_updown_t bcm4328a0_res_updown[] = {
- {
- RES4328_EXT_SWITCHER_PWM, 0x0101}, {
- RES4328_BB_SWITCHER_PWM, 0x1f01}, {
- RES4328_BB_SWITCHER_BURST, 0x010f}, {
- RES4328_BB_EXT_SWITCHER_BURST, 0x0101}, {
- RES4328_ILP_REQUEST, 0x0202}, {
- RES4328_RADIO_SWITCHER_PWM, 0x0f01}, {
- RES4328_RADIO_SWITCHER_BURST, 0x0f01}, {
- RES4328_ROM_SWITCH, 0x0101}, {
- RES4328_PA_REF_LDO, 0x0f01}, {
- RES4328_RADIO_LDO, 0x0f01}, {
- RES4328_AFE_LDO, 0x0f01}, {
- RES4328_PLL_LDO, 0x0f01}, {
- RES4328_BG_FILTBYP, 0x0101}, {
- RES4328_TX_FILTBYP, 0x0101}, {
- RES4328_RX_FILTBYP, 0x0101}, {
- RES4328_XTAL_PU, 0x0101}, {
- RES4328_XTAL_EN, 0xa001}, {
- RES4328_BB_PLL_FILTBYP, 0x0101}, {
- RES4328_RF_PLL_FILTBYP, 0x0101}, {
- RES4328_BB_PLL_PU, 0x0701}
-};
-
-static const pmu_res_depend_t bcm4328a0_res_depend[] = {
- /* Adjust ILP request resource not to force ext/BB switchers into burst mode */
- {
- PMURES_BIT(RES4328_ILP_REQUEST),
- RES_DEPEND_SET,
- PMURES_BIT(RES4328_EXT_SWITCHER_PWM) |
- PMURES_BIT(RES4328_BB_SWITCHER_PWM), NULL}
-};
-
-static const pmu_res_updown_t bcm4325a0_res_updown_qt[] = {
- {
- RES4325_HT_AVAIL, 0x0300}, {
- RES4325_BBPLL_PWRSW_PU, 0x0101}, {
- RES4325_RFPLL_PWRSW_PU, 0x0101}, {
- RES4325_ALP_AVAIL, 0x0100}, {
- RES4325_XTAL_PU, 0x1000}, {
- RES4325_LNLDO1_PU, 0x0800}, {
- RES4325_CLDO_CBUCK_PWM, 0x0101}, {
- RES4325_CBUCK_PWM, 0x0803}
-};
-
-static const pmu_res_updown_t bcm4325a0_res_updown[] = {
- {
- RES4325_XTAL_PU, 0x1501}
-};
-
-static const pmu_res_depend_t bcm4325a0_res_depend[] = {
- /* Adjust OTP PU resource dependencies - remove BB BURST */
- {
- PMURES_BIT(RES4325_OTP_PU),
- RES_DEPEND_REMOVE,
- PMURES_BIT(RES4325_BUCK_BOOST_BURST), NULL},
- /* Adjust ALP/HT Avail resource dependencies - bring up BB along if it is used. */
- {
- PMURES_BIT(RES4325_ALP_AVAIL) | PMURES_BIT(RES4325_HT_AVAIL),
- RES_DEPEND_ADD,
- PMURES_BIT(RES4325_BUCK_BOOST_BURST) |
- PMURES_BIT(RES4325_BUCK_BOOST_PWM), si_pmu_res_depfltr_bb},
- /* Adjust HT Avail resource dependencies - bring up RF switches along with HT. */
- {
- PMURES_BIT(RES4325_HT_AVAIL),
- RES_DEPEND_ADD,
- PMURES_BIT(RES4325_RX_PWRSW_PU) |
- PMURES_BIT(RES4325_TX_PWRSW_PU) |
- PMURES_BIT(RES4325_LOGEN_PWRSW_PU) |
- PMURES_BIT(RES4325_AFE_PWRSW_PU), NULL},
- /* Adjust ALL resource dependencies - remove CBUCK dependancies if it is not used. */
- {
- PMURES_BIT(RES4325_ILP_REQUEST) |
- PMURES_BIT(RES4325_ABUCK_BURST) |
- PMURES_BIT(RES4325_ABUCK_PWM) |
- PMURES_BIT(RES4325_LNLDO1_PU) |
- PMURES_BIT(RES4325C1_LNLDO2_PU) |
- PMURES_BIT(RES4325_XTAL_PU) |
- PMURES_BIT(RES4325_ALP_AVAIL) |
- PMURES_BIT(RES4325_RX_PWRSW_PU) |
- PMURES_BIT(RES4325_TX_PWRSW_PU) |
- PMURES_BIT(RES4325_RFPLL_PWRSW_PU) |
- PMURES_BIT(RES4325_LOGEN_PWRSW_PU) |
- PMURES_BIT(RES4325_AFE_PWRSW_PU) |
- PMURES_BIT(RES4325_BBPLL_PWRSW_PU) |
- PMURES_BIT(RES4325_HT_AVAIL), RES_DEPEND_REMOVE,
- PMURES_BIT(RES4325B0_CBUCK_LPOM) |
- PMURES_BIT(RES4325B0_CBUCK_BURST) |
- PMURES_BIT(RES4325B0_CBUCK_PWM), si_pmu_res_depfltr_ncb}
-};
-
-static const pmu_res_updown_t bcm4315a0_res_updown_qt[] = {
- {
- RES4315_HT_AVAIL, 0x0101}, {
- RES4315_XTAL_PU, 0x0100}, {
- RES4315_LNLDO1_PU, 0x0100}, {
- RES4315_PALDO_PU, 0x0100}, {
- RES4315_CLDO_PU, 0x0100}, {
- RES4315_CBUCK_PWM, 0x0100}, {
- RES4315_CBUCK_BURST, 0x0100}, {
- RES4315_CBUCK_LPOM, 0x0100}
-};
-
-static const pmu_res_updown_t bcm4315a0_res_updown[] = {
- {
- RES4315_XTAL_PU, 0x2501}
-};
-
-static const pmu_res_depend_t bcm4315a0_res_depend[] = {
- /* Adjust OTP PU resource dependencies - not need PALDO unless write */
- {
- PMURES_BIT(RES4315_OTP_PU),
- RES_DEPEND_REMOVE,
- PMURES_BIT(RES4315_PALDO_PU), si_pmu_res_depfltr_npaldo},
- /* Adjust ALP/HT Avail resource dependencies - bring up PALDO along if it is used. */
- {
- PMURES_BIT(RES4315_ALP_AVAIL) | PMURES_BIT(RES4315_HT_AVAIL),
- RES_DEPEND_ADD,
- PMURES_BIT(RES4315_PALDO_PU), si_pmu_res_depfltr_paldo},
- /* Adjust HT Avail resource dependencies - bring up RF switches along with HT. */
- {
- PMURES_BIT(RES4315_HT_AVAIL),
- RES_DEPEND_ADD,
- PMURES_BIT(RES4315_RX_PWRSW_PU) |
- PMURES_BIT(RES4315_TX_PWRSW_PU) |
- PMURES_BIT(RES4315_LOGEN_PWRSW_PU) |
- PMURES_BIT(RES4315_AFE_PWRSW_PU), NULL},
- /* Adjust ALL resource dependencies - remove CBUCK dependancies if it is not used. */
- {
- PMURES_BIT(RES4315_CLDO_PU) | PMURES_BIT(RES4315_ILP_REQUEST) |
- PMURES_BIT(RES4315_LNLDO1_PU) |
- PMURES_BIT(RES4315_OTP_PU) |
- PMURES_BIT(RES4315_LNLDO2_PU) |
- PMURES_BIT(RES4315_XTAL_PU) |
- PMURES_BIT(RES4315_ALP_AVAIL) |
- PMURES_BIT(RES4315_RX_PWRSW_PU) |
- PMURES_BIT(RES4315_TX_PWRSW_PU) |
- PMURES_BIT(RES4315_RFPLL_PWRSW_PU) |
- PMURES_BIT(RES4315_LOGEN_PWRSW_PU) |
- PMURES_BIT(RES4315_AFE_PWRSW_PU) |
- PMURES_BIT(RES4315_BBPLL_PWRSW_PU) |
- PMURES_BIT(RES4315_HT_AVAIL), RES_DEPEND_REMOVE,
- PMURES_BIT(RES4315_CBUCK_LPOM) |
- PMURES_BIT(RES4315_CBUCK_BURST) |
- PMURES_BIT(RES4315_CBUCK_PWM), si_pmu_res_depfltr_ncb}
-};
-
- /* 4329 specific. needs to come back this issue later */
-static const pmu_res_updown_t bcm4329_res_updown[] = {
- {
- RES4329_XTAL_PU, 0x1501}
-};
-
-static const pmu_res_depend_t bcm4329_res_depend[] = {
- /* Adjust HT Avail resource dependencies */
- {
- PMURES_BIT(RES4329_HT_AVAIL),
- RES_DEPEND_ADD,
- PMURES_BIT(RES4329_CBUCK_LPOM) |
- PMURES_BIT(RES4329_CBUCK_BURST) |
- PMURES_BIT(RES4329_CBUCK_PWM) |
- PMURES_BIT(RES4329_CLDO_PU) |
- PMURES_BIT(RES4329_PALDO_PU) |
- PMURES_BIT(RES4329_LNLDO1_PU) |
- PMURES_BIT(RES4329_XTAL_PU) |
- PMURES_BIT(RES4329_ALP_AVAIL) |
- PMURES_BIT(RES4329_RX_PWRSW_PU) |
- PMURES_BIT(RES4329_TX_PWRSW_PU) |
- PMURES_BIT(RES4329_RFPLL_PWRSW_PU) |
- PMURES_BIT(RES4329_LOGEN_PWRSW_PU) |
- PMURES_BIT(RES4329_AFE_PWRSW_PU) |
- PMURES_BIT(RES4329_BBPLL_PWRSW_PU), NULL}
-};
-
-static const pmu_res_updown_t bcm4319a0_res_updown_qt[] = {
- {
- RES4319_HT_AVAIL, 0x0101}, {
- RES4319_XTAL_PU, 0x0100}, {
- RES4319_LNLDO1_PU, 0x0100}, {
- RES4319_PALDO_PU, 0x0100}, {
- RES4319_CLDO_PU, 0x0100}, {
- RES4319_CBUCK_PWM, 0x0100}, {
- RES4319_CBUCK_BURST, 0x0100}, {
- RES4319_CBUCK_LPOM, 0x0100}
-};
-
-static const pmu_res_updown_t bcm4319a0_res_updown[] = {
- {
- RES4319_XTAL_PU, 0x3f01}
-};
-
-static const pmu_res_depend_t bcm4319a0_res_depend[] = {
- /* Adjust OTP PU resource dependencies - not need PALDO unless write */
- {
- PMURES_BIT(RES4319_OTP_PU),
- RES_DEPEND_REMOVE,
- PMURES_BIT(RES4319_PALDO_PU), si_pmu_res_depfltr_npaldo},
- /* Adjust HT Avail resource dependencies - bring up PALDO along if it is used. */
- {
- PMURES_BIT(RES4319_HT_AVAIL),
- RES_DEPEND_ADD,
- PMURES_BIT(RES4319_PALDO_PU), si_pmu_res_depfltr_paldo},
- /* Adjust HT Avail resource dependencies - bring up RF switches along with HT. */
- {
- PMURES_BIT(RES4319_HT_AVAIL),
- RES_DEPEND_ADD,
- PMURES_BIT(RES4319_RX_PWRSW_PU) |
- PMURES_BIT(RES4319_TX_PWRSW_PU) |
- PMURES_BIT(RES4319_RFPLL_PWRSW_PU) |
- PMURES_BIT(RES4319_LOGEN_PWRSW_PU) |
- PMURES_BIT(RES4319_AFE_PWRSW_PU), NULL}
-};
-
-static const pmu_res_updown_t bcm4336a0_res_updown_qt[] = {
- {
- RES4336_HT_AVAIL, 0x0101}, {
- RES4336_XTAL_PU, 0x0100}, {
- RES4336_CLDO_PU, 0x0100}, {
- RES4336_CBUCK_PWM, 0x0100}, {
- RES4336_CBUCK_BURST, 0x0100}, {
- RES4336_CBUCK_LPOM, 0x0100}
-};
-
-static const pmu_res_updown_t bcm4336a0_res_updown[] = {
- {
- RES4336_HT_AVAIL, 0x0D01}
-};
-
-static const pmu_res_depend_t bcm4336a0_res_depend[] = {
- /* Just a dummy entry for now */
- {
- PMURES_BIT(RES4336_RSVD), RES_DEPEND_ADD, 0, NULL}
-};
-
-static const pmu_res_updown_t bcm4330a0_res_updown_qt[] = {
- {
- RES4330_HT_AVAIL, 0x0101}, {
- RES4330_XTAL_PU, 0x0100}, {
- RES4330_CLDO_PU, 0x0100}, {
- RES4330_CBUCK_PWM, 0x0100}, {
- RES4330_CBUCK_BURST, 0x0100}, {
- RES4330_CBUCK_LPOM, 0x0100}
-};
-
-static const pmu_res_updown_t bcm4330a0_res_updown[] = {
- {
- RES4330_HT_AVAIL, 0x0e02}
-};
-
-static const pmu_res_depend_t bcm4330a0_res_depend[] = {
- /* Just a dummy entry for now */
- {
- PMURES_BIT(RES4330_HT_AVAIL), RES_DEPEND_ADD, 0, NULL}
-};
-
-/* the following table is based on 1440Mhz fvco */
-static const pmu1_xtaltab0_t pmu1_xtaltab0_1440[] = {
- {
- 12000, 1, 1, 1, 0x78, 0x0}, {
- 13000, 2, 1, 1, 0x6E, 0xC4EC4E}, {
- 14400, 3, 1, 1, 0x64, 0x0}, {
- 15360, 4, 1, 1, 0x5D, 0xC00000}, {
- 16200, 5, 1, 1, 0x58, 0xE38E38}, {
- 16800, 6, 1, 1, 0x55, 0xB6DB6D}, {
- 19200, 7, 1, 1, 0x4B, 0}, {
- 19800, 8, 1, 1, 0x48, 0xBA2E8B}, {
- 20000, 9, 1, 1, 0x48, 0x0}, {
- 25000, 10, 1, 1, 0x39, 0x999999}, {
- 26000, 11, 1, 1, 0x37, 0x627627}, {
- 30000, 12, 1, 1, 0x30, 0x0}, {
- 37400, 13, 2, 1, 0x4D, 0x15E76}, {
- 38400, 13, 2, 1, 0x4B, 0x0}, {
- 40000, 14, 2, 1, 0x48, 0x0}, {
- 48000, 15, 2, 1, 0x3c, 0x0}, {
- 0, 0, 0, 0, 0, 0}
-};
-
-static const pmu1_xtaltab0_t pmu1_xtaltab0_960[] = {
- {
- 12000, 1, 1, 1, 0x50, 0x0}, {
- 13000, 2, 1, 1, 0x49, 0xD89D89}, {
- 14400, 3, 1, 1, 0x42, 0xAAAAAA}, {
- 15360, 4, 1, 1, 0x3E, 0x800000}, {
- 16200, 5, 1, 1, 0x39, 0x425ED0}, {
- 16800, 6, 1, 1, 0x39, 0x249249}, {
- 19200, 7, 1, 1, 0x32, 0x0}, {
- 19800, 8, 1, 1, 0x30, 0x7C1F07}, {
- 20000, 9, 1, 1, 0x30, 0x0}, {
- 25000, 10, 1, 1, 0x26, 0x666666}, {
- 26000, 11, 1, 1, 0x24, 0xEC4EC4}, {
- 30000, 12, 1, 1, 0x20, 0x0}, {
- 37400, 13, 2, 1, 0x33, 0x563EF9}, {
- 38400, 14, 2, 1, 0x32, 0x0}, {
- 40000, 15, 2, 1, 0x30, 0x0}, {
- 48000, 16, 2, 1, 0x28, 0x0}, {
- 0, 0, 0, 0, 0, 0}
-};
-
-static const pmu1_xtaltab0_t pmu1_xtaltab0_880_4329[] = {
- {
- 12000, 1, 3, 22, 0x9, 0xFFFFEF}, {
- 13000, 2, 1, 6, 0xb, 0x483483}, {
- 14400, 3, 1, 10, 0xa, 0x1C71C7}, {
- 15360, 4, 1, 5, 0xb, 0x755555}, {
- 16200, 5, 1, 10, 0x5, 0x6E9E06}, {
- 16800, 6, 1, 10, 0x5, 0x3Cf3Cf}, {
- 19200, 7, 1, 4, 0xb, 0x755555}, {
- 19800, 8, 1, 11, 0x4, 0xA57EB}, {
- 20000, 9, 1, 11, 0x4, 0x0}, {
- 24000, 10, 3, 11, 0xa, 0x0}, {
- 25000, 11, 5, 16, 0xb, 0x0}, {
- 26000, 12, 1, 1, 0x21, 0xD89D89}, {
- 30000, 13, 3, 8, 0xb, 0x0}, {
- 37400, 14, 3, 1, 0x46, 0x969696}, {
- 38400, 15, 1, 1, 0x16, 0xEAAAAA}, {
- 40000, 16, 1, 2, 0xb, 0}, {
- 0, 0, 0, 0, 0, 0}
-};
-
-/* the following table is based on 880Mhz fvco */
-static const pmu1_xtaltab0_t pmu1_xtaltab0_880[] = {
- {
- 12000, 1, 3, 22, 0x9, 0xFFFFEF}, {
- 13000, 2, 1, 6, 0xb, 0x483483}, {
- 14400, 3, 1, 10, 0xa, 0x1C71C7}, {
- 15360, 4, 1, 5, 0xb, 0x755555}, {
- 16200, 5, 1, 10, 0x5, 0x6E9E06}, {
- 16800, 6, 1, 10, 0x5, 0x3Cf3Cf}, {
- 19200, 7, 1, 4, 0xb, 0x755555}, {
- 19800, 8, 1, 11, 0x4, 0xA57EB}, {
- 20000, 9, 1, 11, 0x4, 0x0}, {
- 24000, 10, 3, 11, 0xa, 0x0}, {
- 25000, 11, 5, 16, 0xb, 0x0}, {
- 26000, 12, 1, 2, 0x10, 0xEC4EC4}, {
- 30000, 13, 3, 8, 0xb, 0x0}, {
- 33600, 14, 1, 2, 0xd, 0x186186}, {
- 38400, 15, 1, 2, 0xb, 0x755555}, {
- 40000, 16, 1, 2, 0xb, 0}, {
- 0, 0, 0, 0, 0, 0}
-};
-
-/* true if the power topology uses the buck boost to provide 3.3V to VDDIO_RF and WLAN PA */
-static bool si_pmu_res_depfltr_bb(si_t *sih)
-{
- return (sih->boardflags & BFL_BUCKBOOST) != 0;
-}
-
-/* true if the power topology doesn't use the cbuck. Key on chiprev also if the chip is BCM4325. */
-static bool si_pmu_res_depfltr_ncb(si_t *sih)
-{
-
- return (sih->boardflags & BFL_NOCBUCK) != 0;
-}
-
-/* true if the power topology uses the PALDO */
-static bool si_pmu_res_depfltr_paldo(si_t *sih)
-{
- return (sih->boardflags & BFL_PALDO) != 0;
-}
-
-/* true if the power topology doesn't use the PALDO */
-static bool si_pmu_res_depfltr_npaldo(si_t *sih)
-{
- return (sih->boardflags & BFL_PALDO) == 0;
-}
-
-/* Return dependancies (direct or all/indirect) for the given resources */
-static u32
-si_pmu_res_deps(si_t *sih, chipcregs_t *cc, u32 rsrcs,
- bool all)
-{
- u32 deps = 0;
- u32 i;
-
- for (i = 0; i <= PMURES_MAX_RESNUM; i++) {
- if (!(rsrcs & PMURES_BIT(i)))
- continue;
- W_REG(&cc->res_table_sel, i);
- deps |= R_REG(&cc->res_dep_mask);
- }
-
- return !all ? deps : (deps
- ? (deps |
- si_pmu_res_deps(sih, cc, deps,
- true)) : 0);
-}
-
-/* Determine min/max rsrc masks. Value 0 leaves hardware at default. */
-static void si_pmu_res_masks(si_t *sih, u32 * pmin, u32 * pmax)
-{
- u32 min_mask = 0, max_mask = 0;
- uint rsrcs;
- char *val;
-
- /* # resources */
- rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
-
- /* determine min/max rsrc masks */
- switch (sih->chip) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- case BCM43421_CHIP_ID:
- case BCM43235_CHIP_ID:
- case BCM43236_CHIP_ID:
- case BCM43238_CHIP_ID:
- case BCM4331_CHIP_ID:
- case BCM6362_CHIP_ID:
- /* ??? */
- break;
-
- case BCM4329_CHIP_ID:
- /* 4329 spedific issue. Needs to come back this issue later */
- /* Down to save the power. */
- min_mask =
- PMURES_BIT(RES4329_CBUCK_LPOM) |
- PMURES_BIT(RES4329_CLDO_PU);
- /* Allow (but don't require) PLL to turn on */
- max_mask = 0x3ff63e;
- break;
- case BCM4319_CHIP_ID:
- /* We only need a few resources to be kept on all the time */
- min_mask = PMURES_BIT(RES4319_CBUCK_LPOM) |
- PMURES_BIT(RES4319_CLDO_PU);
-
- /* Allow everything else to be turned on upon requests */
- max_mask = ~(~0 << rsrcs);
- break;
- case BCM4336_CHIP_ID:
- /* Down to save the power. */
- min_mask =
- PMURES_BIT(RES4336_CBUCK_LPOM) | PMURES_BIT(RES4336_CLDO_PU)
- | PMURES_BIT(RES4336_LDO3P3_PU) | PMURES_BIT(RES4336_OTP_PU)
- | PMURES_BIT(RES4336_DIS_INT_RESET_PD);
- /* Allow (but don't require) PLL to turn on */
- max_mask = 0x1ffffff;
- break;
-
- case BCM4330_CHIP_ID:
- /* Down to save the power. */
- min_mask =
- PMURES_BIT(RES4330_CBUCK_LPOM) | PMURES_BIT(RES4330_CLDO_PU)
- | PMURES_BIT(RES4330_DIS_INT_RESET_PD) |
- PMURES_BIT(RES4330_LDO3P3_PU) | PMURES_BIT(RES4330_OTP_PU);
- /* Allow (but don't require) PLL to turn on */
- max_mask = 0xfffffff;
- break;
-
- case BCM4313_CHIP_ID:
- min_mask = PMURES_BIT(RES4313_BB_PU_RSRC) |
- PMURES_BIT(RES4313_XTAL_PU_RSRC) |
- PMURES_BIT(RES4313_ALP_AVAIL_RSRC) |
- PMURES_BIT(RES4313_BB_PLL_PWRSW_RSRC);
- max_mask = 0xffff;
- break;
- default:
- break;
- }
-
- /* Apply nvram override to min mask */
- val = getvar(NULL, "rmin");
- if (val != NULL) {
- min_mask = (u32) simple_strtoul(val, NULL, 0);
- }
- /* Apply nvram override to max mask */
- val = getvar(NULL, "rmax");
- if (val != NULL) {
- max_mask = (u32) simple_strtoul(val, NULL, 0);
- }
-
- *pmin = min_mask;
- *pmax = max_mask;
-}
-
-/* Return up time in ILP cycles for the given resource. */
-static uint
-si_pmu_res_uptime(si_t *sih, chipcregs_t *cc, u8 rsrc) {
- u32 deps;
- uint up, i, dup, dmax;
- u32 min_mask = 0, max_mask = 0;
-
- /* uptime of resource 'rsrc' */
- W_REG(&cc->res_table_sel, rsrc);
- up = (R_REG(&cc->res_updn_timer) >> 8) & 0xff;
-
- /* direct dependancies of resource 'rsrc' */
- deps = si_pmu_res_deps(sih, cc, PMURES_BIT(rsrc), false);
- for (i = 0; i <= PMURES_MAX_RESNUM; i++) {
- if (!(deps & PMURES_BIT(i)))
- continue;
- deps &= ~si_pmu_res_deps(sih, cc, PMURES_BIT(i), true);
- }
- si_pmu_res_masks(sih, &min_mask, &max_mask);
- deps &= ~min_mask;
-
- /* max uptime of direct dependancies */
- dmax = 0;
- for (i = 0; i <= PMURES_MAX_RESNUM; i++) {
- if (!(deps & PMURES_BIT(i)))
- continue;
- dup = si_pmu_res_uptime(sih, cc, (u8) i);
- if (dmax < dup)
- dmax = dup;
- }
-
- return up + dmax + PMURES_UP_TRANSITION;
-}
-
-static void
-si_pmu_spuravoid_pllupdate(si_t *sih, chipcregs_t *cc, u8 spuravoid)
-{
- u32 tmp = 0;
- u8 phypll_offset = 0;
- u8 bcm5357_bcm43236_p1div[] = { 0x1, 0x5, 0x5 };
- u8 bcm5357_bcm43236_ndiv[] = { 0x30, 0xf6, 0xfc };
-
- switch (sih->chip) {
- case BCM5357_CHIP_ID:
- case BCM43235_CHIP_ID:
- case BCM43236_CHIP_ID:
- case BCM43238_CHIP_ID:
-
- /*
- * BCM5357 needs to touch PLL1_PLLCTL[02],
- * so offset PLL0_PLLCTL[02] by 6
- */
- phypll_offset = (sih->chip == BCM5357_CHIP_ID) ? 6 : 0;
-
- /* RMW only the P1 divider */
- W_REG(&cc->pllcontrol_addr,
- PMU1_PLL0_PLLCTL0 + phypll_offset);
- tmp = R_REG(&cc->pllcontrol_data);
- tmp &= (~(PMU1_PLL0_PC0_P1DIV_MASK));
- tmp |=
- (bcm5357_bcm43236_p1div[spuravoid] <<
- PMU1_PLL0_PC0_P1DIV_SHIFT);
- W_REG(&cc->pllcontrol_data, tmp);
-
- /* RMW only the int feedback divider */
- W_REG(&cc->pllcontrol_addr,
- PMU1_PLL0_PLLCTL2 + phypll_offset);
- tmp = R_REG(&cc->pllcontrol_data);
- tmp &= ~(PMU1_PLL0_PC2_NDIV_INT_MASK);
- tmp |=
- (bcm5357_bcm43236_ndiv[spuravoid]) <<
- PMU1_PLL0_PC2_NDIV_INT_SHIFT;
- W_REG(&cc->pllcontrol_data, tmp);
-
- tmp = 1 << 10;
- break;
-
- case BCM4331_CHIP_ID:
- if (spuravoid == 2) {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11500014);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x0FC00a08);
- } else if (spuravoid == 1) {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11500014);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x0F600a08);
- } else {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100014);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000a08);
- }
- tmp = 1 << 10;
- break;
-
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- case BCM43421_CHIP_ID:
- case BCM6362_CHIP_ID:
- if (spuravoid == 1) {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11500010);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x000C0C06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x0F600a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x2001E920);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
- } else {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100010);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x000c0c06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x200005c0);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
- }
- tmp = 1 << 10;
- break;
-
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100008);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x0c000c06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x200005c0);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888855);
-
- tmp = 1 << 10;
- break;
-
- case BCM4716_CHIP_ID:
- case BCM4748_CHIP_ID:
- case BCM47162_CHIP_ID:
- if (spuravoid == 1) {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11500060);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x080C0C06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x0F600000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x2001E924);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
- } else {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100060);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x080c0c06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x200005c0);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
- }
-
- tmp = 3 << 9;
- break;
-
- case BCM4319_CHIP_ID:
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100070);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x1014140a);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888854);
-
- if (spuravoid == 1) {
- /* spur_avoid ON, so enable 41/82/164Mhz clock mode */
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x05201828);
- } else {
- /* enable 40/80/160Mhz clock mode */
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x05001828);
- }
- break;
- case BCM4336_CHIP_ID:
- /* Looks like these are only for default xtal freq 26MHz */
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x02100020);
-
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x0C0C0C0C);
-
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x01240C0C);
-
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x202C2820);
-
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888825);
-
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- if (spuravoid == 1)
- W_REG(&cc->pllcontrol_data, 0x00EC4EC4);
- else
- W_REG(&cc->pllcontrol_data, 0x00762762);
-
- tmp = PCTL_PLL_PLLCTL_UPD;
- break;
-
- default:
- /* bail out */
- return;
- }
-
- tmp |= R_REG(&cc->pmucontrol);
- W_REG(&cc->pmucontrol, tmp);
-}
-
-/* select default xtal frequency for each chip */
-static const pmu1_xtaltab0_t *si_pmu1_xtaldef0(si_t *sih)
-{
- switch (sih->chip) {
- case BCM4329_CHIP_ID:
- /* Default to 38400Khz */
- return &pmu1_xtaltab0_880_4329[PMU1_XTALTAB0_880_38400K];
- case BCM4319_CHIP_ID:
- /* Default to 30000Khz */
- return &pmu1_xtaltab0_1440[PMU1_XTALTAB0_1440_30000K];
- case BCM4336_CHIP_ID:
- /* Default to 26000Khz */
- return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_26000K];
- case BCM4330_CHIP_ID:
- /* Default to 37400Khz */
- if (CST4330_CHIPMODE_SDIOD(sih->chipst))
- return &pmu1_xtaltab0_960[PMU1_XTALTAB0_960_37400K];
- else
- return &pmu1_xtaltab0_1440[PMU1_XTALTAB0_1440_37400K];
- default:
- break;
- }
- return NULL;
-}
-
-/* select xtal table for each chip */
-static const pmu1_xtaltab0_t *si_pmu1_xtaltab0(si_t *sih)
-{
- switch (sih->chip) {
- case BCM4329_CHIP_ID:
- return pmu1_xtaltab0_880_4329;
- case BCM4319_CHIP_ID:
- return pmu1_xtaltab0_1440;
- case BCM4336_CHIP_ID:
- return pmu1_xtaltab0_960;
- case BCM4330_CHIP_ID:
- if (CST4330_CHIPMODE_SDIOD(sih->chipst))
- return pmu1_xtaltab0_960;
- else
- return pmu1_xtaltab0_1440;
- default:
- break;
- }
- return NULL;
-}
-
-/* query alp/xtal clock frequency */
-static u32
-si_pmu1_alpclk0(si_t *sih, chipcregs_t *cc)
-{
- const pmu1_xtaltab0_t *xt;
- u32 xf;
-
- /* Find the frequency in the table */
- xf = (R_REG(&cc->pmucontrol) & PCTL_XTALFREQ_MASK) >>
- PCTL_XTALFREQ_SHIFT;
- for (xt = si_pmu1_xtaltab0(sih); xt != NULL && xt->fref != 0; xt++)
- if (xt->xf == xf)
- break;
- /* Could not find it so assign a default value */
- if (xt == NULL || xt->fref == 0)
- xt = si_pmu1_xtaldef0(sih);
- return xt->fref * 1000;
-}
-
-/* select default pll fvco for each chip */
-static u32 si_pmu1_pllfvco0(si_t *sih)
-{
- switch (sih->chip) {
- case BCM4329_CHIP_ID:
- return FVCO_880;
- case BCM4319_CHIP_ID:
- return FVCO_1440;
- case BCM4336_CHIP_ID:
- return FVCO_960;
- case BCM4330_CHIP_ID:
- if (CST4330_CHIPMODE_SDIOD(sih->chipst))
- return FVCO_960;
- else
- return FVCO_1440;
- default:
- break;
- }
- return 0;
-}
-
-static void si_pmu_set_4330_plldivs(si_t *sih)
-{
- u32 FVCO = si_pmu1_pllfvco0(sih) / 1000;
- u32 m1div, m2div, m3div, m4div, m5div, m6div;
- u32 pllc1, pllc2;
-
- m2div = m3div = m4div = m6div = FVCO / 80;
- m5div = FVCO / 160;
-
- if (CST4330_CHIPMODE_SDIOD(sih->chipst))
- m1div = FVCO / 80;
- else
- m1div = FVCO / 90;
- pllc1 =
- (m1div << PMU1_PLL0_PC1_M1DIV_SHIFT) | (m2div <<
- PMU1_PLL0_PC1_M2DIV_SHIFT) |
- (m3div << PMU1_PLL0_PC1_M3DIV_SHIFT) | (m4div <<
- PMU1_PLL0_PC1_M4DIV_SHIFT);
- si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL1, ~0, pllc1);
-
- pllc2 = si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL1, 0, 0);
- pllc2 &= ~(PMU1_PLL0_PC2_M5DIV_MASK | PMU1_PLL0_PC2_M6DIV_MASK);
- pllc2 |=
- ((m5div << PMU1_PLL0_PC2_M5DIV_SHIFT) |
- (m6div << PMU1_PLL0_PC2_M6DIV_SHIFT));
- si_pmu_pllcontrol(sih, PMU1_PLL0_PLLCTL2, ~0, pllc2);
-}
-
-/* Set up PLL registers in the PMU as per the crystal speed.
- * XtalFreq field in pmucontrol register being 0 indicates the PLL
- * is not programmed and the h/w default is assumed to work, in which
- * case the xtal frequency is unknown to the s/w so we need to call
- * si_pmu1_xtaldef0() wherever it is needed to return a default value.
- */
-static void si_pmu1_pllinit0(si_t *sih, chipcregs_t *cc, u32 xtal)
-{
- const pmu1_xtaltab0_t *xt;
- u32 tmp;
- u32 buf_strength = 0;
- u8 ndiv_mode = 1;
-
- /* Use h/w default PLL config */
- if (xtal == 0) {
- return;
- }
-
- /* Find the frequency in the table */
- for (xt = si_pmu1_xtaltab0(sih); xt != NULL && xt->fref != 0; xt++)
- if (xt->fref == xtal)
- break;
-
- /* Check current PLL state, bail out if it has been programmed or
- * we don't know how to program it.
- */
- if (xt == NULL || xt->fref == 0) {
- return;
- }
- /* for 4319 bootloader already programs the PLL but bootloader does not
- * program the PLL4 and PLL5. So Skip this check for 4319
- */
- if ((((R_REG(&cc->pmucontrol) & PCTL_XTALFREQ_MASK) >>
- PCTL_XTALFREQ_SHIFT) == xt->xf) &&
- !((sih->chip == BCM4319_CHIP_ID)
- || (sih->chip == BCM4330_CHIP_ID)))
- return;
-
- switch (sih->chip) {
- case BCM4329_CHIP_ID:
- /* Change the BBPLL drive strength to 8 for all channels */
- buf_strength = 0x888888;
- AND_REG(&cc->min_res_mask,
- ~(PMURES_BIT(RES4329_BBPLL_PWRSW_PU) |
- PMURES_BIT(RES4329_HT_AVAIL)));
- AND_REG(&cc->max_res_mask,
- ~(PMURES_BIT(RES4329_BBPLL_PWRSW_PU) |
- PMURES_BIT(RES4329_HT_AVAIL)));
- SPINWAIT(R_REG(&cc->clk_ctl_st) & CCS_HTAVAIL,
- PMU_MAX_TRANSITION_DLY);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- if (xt->fref == 38400)
- tmp = 0x200024C0;
- else if (xt->fref == 37400)
- tmp = 0x20004500;
- else if (xt->fref == 26000)
- tmp = 0x200024C0;
- else
- tmp = 0x200005C0; /* Chip Dflt Settings */
- W_REG(&cc->pllcontrol_data, tmp);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- tmp =
- R_REG(&cc->pllcontrol_data) & PMU1_PLL0_PC5_CLK_DRV_MASK;
- if ((xt->fref == 38400) || (xt->fref == 37400)
- || (xt->fref == 26000))
- tmp |= 0x15;
- else
- tmp |= 0x25; /* Chip Dflt Settings */
- W_REG(&cc->pllcontrol_data, tmp);
- break;
-
- case BCM4319_CHIP_ID:
- /* Change the BBPLL drive strength to 2 for all channels */
- buf_strength = 0x222222;
-
- /* Make sure the PLL is off */
- /* WAR65104: Disable the HT_AVAIL resource first and then
- * after a delay (more than downtime for HT_AVAIL) remove the
- * BBPLL resource; backplane clock moves to ALP from HT.
- */
- AND_REG(&cc->min_res_mask,
- ~(PMURES_BIT(RES4319_HT_AVAIL)));
- AND_REG(&cc->max_res_mask,
- ~(PMURES_BIT(RES4319_HT_AVAIL)));
-
- udelay(100);
- AND_REG(&cc->min_res_mask,
- ~(PMURES_BIT(RES4319_BBPLL_PWRSW_PU)));
- AND_REG(&cc->max_res_mask,
- ~(PMURES_BIT(RES4319_BBPLL_PWRSW_PU)));
-
- udelay(100);
- SPINWAIT(R_REG(&cc->clk_ctl_st) & CCS_HTAVAIL,
- PMU_MAX_TRANSITION_DLY);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- tmp = 0x200005c0;
- W_REG(&cc->pllcontrol_data, tmp);
- break;
-
- case BCM4336_CHIP_ID:
- AND_REG(&cc->min_res_mask,
- ~(PMURES_BIT(RES4336_HT_AVAIL) |
- PMURES_BIT(RES4336_MACPHY_CLKAVAIL)));
- AND_REG(&cc->max_res_mask,
- ~(PMURES_BIT(RES4336_HT_AVAIL) |
- PMURES_BIT(RES4336_MACPHY_CLKAVAIL)));
- udelay(100);
- SPINWAIT(R_REG(&cc->clk_ctl_st) & CCS_HTAVAIL,
- PMU_MAX_TRANSITION_DLY);
- break;
-
- case BCM4330_CHIP_ID:
- AND_REG(&cc->min_res_mask,
- ~(PMURES_BIT(RES4330_HT_AVAIL) |
- PMURES_BIT(RES4330_MACPHY_CLKAVAIL)));
- AND_REG(&cc->max_res_mask,
- ~(PMURES_BIT(RES4330_HT_AVAIL) |
- PMURES_BIT(RES4330_MACPHY_CLKAVAIL)));
- udelay(100);
- SPINWAIT(R_REG(&cc->clk_ctl_st) & CCS_HTAVAIL,
- PMU_MAX_TRANSITION_DLY);
- break;
-
- default:
- break;
- }
-
- /* Write p1div and p2div to pllcontrol[0] */
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- tmp = R_REG(&cc->pllcontrol_data) &
- ~(PMU1_PLL0_PC0_P1DIV_MASK | PMU1_PLL0_PC0_P2DIV_MASK);
- tmp |=
- ((xt->
- p1div << PMU1_PLL0_PC0_P1DIV_SHIFT) & PMU1_PLL0_PC0_P1DIV_MASK) |
- ((xt->
- p2div << PMU1_PLL0_PC0_P2DIV_SHIFT) & PMU1_PLL0_PC0_P2DIV_MASK);
- W_REG(&cc->pllcontrol_data, tmp);
-
- if ((sih->chip == BCM4330_CHIP_ID))
- si_pmu_set_4330_plldivs(sih);
-
- if ((sih->chip == BCM4329_CHIP_ID)
- && (sih->chiprev == 0)) {
-
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- tmp = R_REG(&cc->pllcontrol_data);
- tmp = tmp & (~DOT11MAC_880MHZ_CLK_DIVISOR_MASK);
- tmp = tmp | DOT11MAC_880MHZ_CLK_DIVISOR_VAL;
- W_REG(&cc->pllcontrol_data, tmp);
- }
- if ((sih->chip == BCM4319_CHIP_ID) ||
- (sih->chip == BCM4336_CHIP_ID) ||
- (sih->chip == BCM4330_CHIP_ID))
- ndiv_mode = PMU1_PLL0_PC2_NDIV_MODE_MFB;
- else
- ndiv_mode = PMU1_PLL0_PC2_NDIV_MODE_MASH;
-
- /* Write ndiv_int and ndiv_mode to pllcontrol[2] */
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- tmp = R_REG(&cc->pllcontrol_data) &
- ~(PMU1_PLL0_PC2_NDIV_INT_MASK | PMU1_PLL0_PC2_NDIV_MODE_MASK);
- tmp |=
- ((xt->
- ndiv_int << PMU1_PLL0_PC2_NDIV_INT_SHIFT) &
- PMU1_PLL0_PC2_NDIV_INT_MASK) | ((ndiv_mode <<
- PMU1_PLL0_PC2_NDIV_MODE_SHIFT) &
- PMU1_PLL0_PC2_NDIV_MODE_MASK);
- W_REG(&cc->pllcontrol_data, tmp);
-
- /* Write ndiv_frac to pllcontrol[3] */
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- tmp = R_REG(&cc->pllcontrol_data) & ~PMU1_PLL0_PC3_NDIV_FRAC_MASK;
- tmp |= ((xt->ndiv_frac << PMU1_PLL0_PC3_NDIV_FRAC_SHIFT) &
- PMU1_PLL0_PC3_NDIV_FRAC_MASK);
- W_REG(&cc->pllcontrol_data, tmp);
-
- /* Write clock driving strength to pllcontrol[5] */
- if (buf_strength) {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- tmp =
- R_REG(&cc->pllcontrol_data) & ~PMU1_PLL0_PC5_CLK_DRV_MASK;
- tmp |= (buf_strength << PMU1_PLL0_PC5_CLK_DRV_SHIFT);
- W_REG(&cc->pllcontrol_data, tmp);
- }
-
- /* to operate the 4319 usb in 24MHz/48MHz; chipcontrol[2][84:83] needs
- * to be updated.
- */
- if ((sih->chip == BCM4319_CHIP_ID)
- && (xt->fref != XTAL_FREQ_30000MHZ)) {
- W_REG(&cc->chipcontrol_addr, PMU1_PLL0_CHIPCTL2);
- tmp =
- R_REG(&cc->chipcontrol_data) & ~CCTL_4319USB_XTAL_SEL_MASK;
- if (xt->fref == XTAL_FREQ_24000MHZ) {
- tmp |=
- (CCTL_4319USB_24MHZ_PLL_SEL <<
- CCTL_4319USB_XTAL_SEL_SHIFT);
- } else if (xt->fref == XTAL_FREQ_48000MHZ) {
- tmp |=
- (CCTL_4319USB_48MHZ_PLL_SEL <<
- CCTL_4319USB_XTAL_SEL_SHIFT);
- }
- W_REG(&cc->chipcontrol_data, tmp);
- }
-
- /* Flush deferred pll control registers writes */
- if (sih->pmurev >= 2)
- OR_REG(&cc->pmucontrol, PCTL_PLL_PLLCTL_UPD);
-
- /* Write XtalFreq. Set the divisor also. */
- tmp = R_REG(&cc->pmucontrol) &
- ~(PCTL_ILP_DIV_MASK | PCTL_XTALFREQ_MASK);
- tmp |= (((((xt->fref + 127) / 128) - 1) << PCTL_ILP_DIV_SHIFT) &
- PCTL_ILP_DIV_MASK) |
- ((xt->xf << PCTL_XTALFREQ_SHIFT) & PCTL_XTALFREQ_MASK);
-
- if ((sih->chip == BCM4329_CHIP_ID)
- && sih->chiprev == 0) {
- /* clear the htstretch before clearing HTReqEn */
- AND_REG(&cc->clkstretch, ~CSTRETCH_HT);
- tmp &= ~PCTL_HT_REQ_EN;
- }
-
- W_REG(&cc->pmucontrol, tmp);
-}
-
-u32 si_pmu_ilp_clock(si_t *sih)
-{
- static u32 ilpcycles_per_sec;
-
- if (ISSIM_ENAB(sih) || !PMUCTL_ENAB(sih))
- return ILP_CLOCK;
-
- if (ilpcycles_per_sec == 0) {
- u32 start, end, delta;
- u32 origidx = ai_coreidx(sih);
- chipcregs_t *cc = ai_setcoreidx(sih, SI_CC_IDX);
- start = R_REG(&cc->pmutimer);
- mdelay(ILP_CALC_DUR);
- end = R_REG(&cc->pmutimer);
- delta = end - start;
- ilpcycles_per_sec = delta * (1000 / ILP_CALC_DUR);
- ai_setcoreidx(sih, origidx);
- }
-
- return ilpcycles_per_sec;
-}
-
-void si_pmu_set_ldo_voltage(si_t *sih, u8 ldo, u8 voltage)
-{
- u8 sr_cntl_shift = 0, rc_shift = 0, shift = 0, mask = 0;
- u8 addr = 0;
-
- switch (sih->chip) {
- case BCM4336_CHIP_ID:
- switch (ldo) {
- case SET_LDO_VOLTAGE_CLDO_PWM:
- addr = 4;
- rc_shift = 1;
- mask = 0xf;
- break;
- case SET_LDO_VOLTAGE_CLDO_BURST:
- addr = 4;
- rc_shift = 5;
- mask = 0xf;
- break;
- case SET_LDO_VOLTAGE_LNLDO1:
- addr = 4;
- rc_shift = 17;
- mask = 0xf;
- break;
- default:
- return;
- }
- break;
- case BCM4330_CHIP_ID:
- switch (ldo) {
- case SET_LDO_VOLTAGE_CBUCK_PWM:
- addr = 3;
- rc_shift = 0;
- mask = 0x1f;
- break;
- default:
- return;
- }
- break;
- default:
- return;
- }
-
- shift = sr_cntl_shift + rc_shift;
-
- ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, regcontrol_addr),
- ~0, addr);
- ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, regcontrol_data),
- mask << shift, (voltage & mask) << shift);
-}
-
-u16 si_pmu_fast_pwrup_delay(si_t *sih)
-{
- uint delay = PMU_MAX_TRANSITION_DLY;
- chipcregs_t *cc;
- uint origidx;
-#ifdef BCMDBG
- char chn[8];
- chn[0] = 0; /* to suppress compile error */
-#endif
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- switch (sih->chip) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- case BCM43421_CHIP_ID:
- case BCM43235_CHIP_ID:
- case BCM43236_CHIP_ID:
- case BCM43238_CHIP_ID:
- case BCM4331_CHIP_ID:
- case BCM6362_CHIP_ID:
- case BCM4313_CHIP_ID:
- delay = ISSIM_ENAB(sih) ? 70 : 3700;
- break;
- case BCM4329_CHIP_ID:
- if (ISSIM_ENAB(sih))
- delay = 70;
- else {
- u32 ilp = si_pmu_ilp_clock(sih);
- delay =
- (si_pmu_res_uptime(sih, cc, RES4329_HT_AVAIL) +
- D11SCC_SLOW2FAST_TRANSITION) * ((1000000 + ilp -
- 1) / ilp);
- delay = (11 * delay) / 10;
- }
- break;
- case BCM4319_CHIP_ID:
- delay = ISSIM_ENAB(sih) ? 70 : 3700;
- break;
- case BCM4336_CHIP_ID:
- if (ISSIM_ENAB(sih))
- delay = 70;
- else {
- u32 ilp = si_pmu_ilp_clock(sih);
- delay =
- (si_pmu_res_uptime(sih, cc, RES4336_HT_AVAIL) +
- D11SCC_SLOW2FAST_TRANSITION) * ((1000000 + ilp -
- 1) / ilp);
- delay = (11 * delay) / 10;
- }
- break;
- case BCM4330_CHIP_ID:
- if (ISSIM_ENAB(sih))
- delay = 70;
- else {
- u32 ilp = si_pmu_ilp_clock(sih);
- delay =
- (si_pmu_res_uptime(sih, cc, RES4330_HT_AVAIL) +
- D11SCC_SLOW2FAST_TRANSITION) * ((1000000 + ilp -
- 1) / ilp);
- delay = (11 * delay) / 10;
- }
- break;
- default:
- break;
- }
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-
- return (u16) delay;
-}
-
-void si_pmu_sprom_enable(si_t *sih, bool enable)
-{
- chipcregs_t *cc;
- uint origidx;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* Read/write a chipcontrol reg */
-u32 si_pmu_chipcontrol(si_t *sih, uint reg, u32 mask, u32 val)
-{
- ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, chipcontrol_addr), ~0,
- reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, chipcontrol_data), mask, val);
-}
-
-/* Read/write a regcontrol reg */
-u32 si_pmu_regcontrol(si_t *sih, uint reg, u32 mask, u32 val)
-{
- ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, regcontrol_addr), ~0,
- reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, regcontrol_data), mask, val);
-}
-
-/* Read/write a pllcontrol reg */
-u32 si_pmu_pllcontrol(si_t *sih, uint reg, u32 mask, u32 val)
-{
- ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, pllcontrol_addr), ~0,
- reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(chipcregs_t, pllcontrol_data), mask, val);
-}
-
-/* PMU PLL update */
-void si_pmu_pllupd(si_t *sih)
-{
- ai_corereg(sih, SI_CC_IDX, offsetof(chipcregs_t, pmucontrol),
- PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
-}
-
-/* query alp/xtal clock frequency */
-u32 si_pmu_alp_clock(si_t *sih)
-{
- chipcregs_t *cc;
- uint origidx;
- u32 clock = ALP_CLOCK;
-
- /* bail out with default */
- if (!PMUCTL_ENAB(sih))
- return clock;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- switch (sih->chip) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- case BCM43421_CHIP_ID:
- case BCM43235_CHIP_ID:
- case BCM43236_CHIP_ID:
- case BCM43238_CHIP_ID:
- case BCM4331_CHIP_ID:
- case BCM6362_CHIP_ID:
- case BCM4716_CHIP_ID:
- case BCM4748_CHIP_ID:
- case BCM47162_CHIP_ID:
- case BCM4313_CHIP_ID:
- case BCM5357_CHIP_ID:
- /* always 20Mhz */
- clock = 20000 * 1000;
- break;
- case BCM4329_CHIP_ID:
- case BCM4319_CHIP_ID:
- case BCM4336_CHIP_ID:
- case BCM4330_CHIP_ID:
-
- clock = si_pmu1_alpclk0(sih, cc);
- break;
- case BCM5356_CHIP_ID:
- /* always 25Mhz */
- clock = 25000 * 1000;
- break;
- default:
- break;
- }
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
- return clock;
-}
-
-void si_pmu_spuravoid(si_t *sih, u8 spuravoid)
-{
- chipcregs_t *cc;
- uint origidx, intr_val;
- u32 tmp = 0;
-
- /* Remember original core before switch to chipc */
- cc = (chipcregs_t *) ai_switch_core(sih, CC_CORE_ID, &origidx,
- &intr_val);
-
- /* force the HT off */
- if (sih->chip == BCM4336_CHIP_ID) {
- tmp = R_REG(&cc->max_res_mask);
- tmp &= ~RES4336_HT_AVAIL;
- W_REG(&cc->max_res_mask, tmp);
- /* wait for the ht to really go away */
- SPINWAIT(((R_REG(&cc->clk_ctl_st) & CCS_HTAVAIL) == 0),
- 10000);
- }
-
- /* update the pll changes */
- si_pmu_spuravoid_pllupdate(sih, cc, spuravoid);
-
- /* enable HT back on */
- if (sih->chip == BCM4336_CHIP_ID) {
- tmp = R_REG(&cc->max_res_mask);
- tmp |= RES4336_HT_AVAIL;
- W_REG(&cc->max_res_mask, tmp);
- }
-
- /* Return to original core */
- ai_restore_core(sih, origidx, intr_val);
-}
-
-/* initialize PMU */
-void si_pmu_init(si_t *sih)
-{
- chipcregs_t *cc;
- uint origidx;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- if (sih->pmurev == 1)
- AND_REG(&cc->pmucontrol, ~PCTL_NOILP_ON_WAIT);
- else if (sih->pmurev >= 2)
- OR_REG(&cc->pmucontrol, PCTL_NOILP_ON_WAIT);
-
- if ((sih->chip == BCM4329_CHIP_ID) && (sih->chiprev == 2)) {
- /* Fix for 4329b0 bad LPOM state. */
- W_REG(&cc->regcontrol_addr, 2);
- OR_REG(&cc->regcontrol_data, 0x100);
-
- W_REG(&cc->regcontrol_addr, 3);
- OR_REG(&cc->regcontrol_data, 0x4);
- }
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU chip controls and other chip level stuff */
-void si_pmu_chip_init(si_t *sih)
-{
- uint origidx;
-
- /* Gate off SPROM clock and chip select signals */
- si_pmu_sprom_enable(sih, false);
-
- /* Remember original core */
- origidx = ai_coreidx(sih);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU switch/regulators */
-void si_pmu_swreg_init(si_t *sih)
-{
- switch (sih->chip) {
- case BCM4336_CHIP_ID:
- /* Reduce CLDO PWM output voltage to 1.2V */
- si_pmu_set_ldo_voltage(sih, SET_LDO_VOLTAGE_CLDO_PWM, 0xe);
- /* Reduce CLDO BURST output voltage to 1.2V */
- si_pmu_set_ldo_voltage(sih, SET_LDO_VOLTAGE_CLDO_BURST,
- 0xe);
- /* Reduce LNLDO1 output voltage to 1.2V */
- si_pmu_set_ldo_voltage(sih, SET_LDO_VOLTAGE_LNLDO1, 0xe);
- if (sih->chiprev == 0)
- si_pmu_regcontrol(sih, 2, 0x400000, 0x400000);
- break;
-
- case BCM4330_CHIP_ID:
- /* CBUCK Voltage is 1.8 by default and set that to 1.5 */
- si_pmu_set_ldo_voltage(sih, SET_LDO_VOLTAGE_CBUCK_PWM, 0);
- break;
- default:
- break;
- }
-}
-
-/* initialize PLL */
-void si_pmu_pll_init(si_t *sih, uint xtalfreq)
-{
- chipcregs_t *cc;
- uint origidx;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- switch (sih->chip) {
- case BCM4329_CHIP_ID:
- if (xtalfreq == 0)
- xtalfreq = 38400;
- si_pmu1_pllinit0(sih, cc, xtalfreq);
- break;
- case BCM4313_CHIP_ID:
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- case BCM43421_CHIP_ID:
- case BCM43235_CHIP_ID:
- case BCM43236_CHIP_ID:
- case BCM43238_CHIP_ID:
- case BCM4331_CHIP_ID:
- case BCM6362_CHIP_ID:
- /* ??? */
- break;
- case BCM4319_CHIP_ID:
- case BCM4336_CHIP_ID:
- case BCM4330_CHIP_ID:
- si_pmu1_pllinit0(sih, cc, xtalfreq);
- break;
- default:
- break;
- }
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU resources */
-void si_pmu_res_init(si_t *sih)
-{
- chipcregs_t *cc;
- uint origidx;
- const pmu_res_updown_t *pmu_res_updown_table = NULL;
- uint pmu_res_updown_table_sz = 0;
- const pmu_res_depend_t *pmu_res_depend_table = NULL;
- uint pmu_res_depend_table_sz = 0;
- u32 min_mask = 0, max_mask = 0;
- char name[8], *val;
- uint i, rsrcs;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- switch (sih->chip) {
- case BCM4329_CHIP_ID:
- /* Optimize resources up/down timers */
- if (ISSIM_ENAB(sih)) {
- pmu_res_updown_table = NULL;
- pmu_res_updown_table_sz = 0;
- } else {
- pmu_res_updown_table = bcm4329_res_updown;
- pmu_res_updown_table_sz =
- ARRAY_SIZE(bcm4329_res_updown);
- }
- /* Optimize resources dependencies */
- pmu_res_depend_table = bcm4329_res_depend;
- pmu_res_depend_table_sz = ARRAY_SIZE(bcm4329_res_depend);
- break;
-
- case BCM4319_CHIP_ID:
- /* Optimize resources up/down timers */
- if (ISSIM_ENAB(sih)) {
- pmu_res_updown_table = bcm4319a0_res_updown_qt;
- pmu_res_updown_table_sz =
- ARRAY_SIZE(bcm4319a0_res_updown_qt);
- } else {
- pmu_res_updown_table = bcm4319a0_res_updown;
- pmu_res_updown_table_sz =
- ARRAY_SIZE(bcm4319a0_res_updown);
- }
- /* Optimize resources dependancies masks */
- pmu_res_depend_table = bcm4319a0_res_depend;
- pmu_res_depend_table_sz = ARRAY_SIZE(bcm4319a0_res_depend);
- break;
-
- case BCM4336_CHIP_ID:
- /* Optimize resources up/down timers */
- if (ISSIM_ENAB(sih)) {
- pmu_res_updown_table = bcm4336a0_res_updown_qt;
- pmu_res_updown_table_sz =
- ARRAY_SIZE(bcm4336a0_res_updown_qt);
- } else {
- pmu_res_updown_table = bcm4336a0_res_updown;
- pmu_res_updown_table_sz =
- ARRAY_SIZE(bcm4336a0_res_updown);
- }
- /* Optimize resources dependancies masks */
- pmu_res_depend_table = bcm4336a0_res_depend;
- pmu_res_depend_table_sz = ARRAY_SIZE(bcm4336a0_res_depend);
- break;
-
- case BCM4330_CHIP_ID:
- /* Optimize resources up/down timers */
- if (ISSIM_ENAB(sih)) {
- pmu_res_updown_table = bcm4330a0_res_updown_qt;
- pmu_res_updown_table_sz =
- ARRAY_SIZE(bcm4330a0_res_updown_qt);
- } else {
- pmu_res_updown_table = bcm4330a0_res_updown;
- pmu_res_updown_table_sz =
- ARRAY_SIZE(bcm4330a0_res_updown);
- }
- /* Optimize resources dependancies masks */
- pmu_res_depend_table = bcm4330a0_res_depend;
- pmu_res_depend_table_sz = ARRAY_SIZE(bcm4330a0_res_depend);
- break;
-
- default:
- break;
- }
-
- /* # resources */
- rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
-
- /* Program up/down timers */
- while (pmu_res_updown_table_sz--) {
- W_REG(&cc->res_table_sel,
- pmu_res_updown_table[pmu_res_updown_table_sz].resnum);
- W_REG(&cc->res_updn_timer,
- pmu_res_updown_table[pmu_res_updown_table_sz].updown);
- }
- /* Apply nvram overrides to up/down timers */
- for (i = 0; i < rsrcs; i++) {
- snprintf(name, sizeof(name), "r%dt", i);
- val = getvar(NULL, name);
- if (val == NULL)
- continue;
- W_REG(&cc->res_table_sel, (u32) i);
- W_REG(&cc->res_updn_timer,
- (u32) simple_strtoul(val, NULL, 0));
- }
-
- /* Program resource dependencies table */
- while (pmu_res_depend_table_sz--) {
- if (pmu_res_depend_table[pmu_res_depend_table_sz].filter != NULL
- && !(pmu_res_depend_table[pmu_res_depend_table_sz].
- filter) (sih))
- continue;
- for (i = 0; i < rsrcs; i++) {
- if ((pmu_res_depend_table[pmu_res_depend_table_sz].
- res_mask & PMURES_BIT(i)) == 0)
- continue;
- W_REG(&cc->res_table_sel, i);
- switch (pmu_res_depend_table[pmu_res_depend_table_sz].
- action) {
- case RES_DEPEND_SET:
- W_REG(&cc->res_dep_mask,
- pmu_res_depend_table
- [pmu_res_depend_table_sz].depend_mask);
- break;
- case RES_DEPEND_ADD:
- OR_REG(&cc->res_dep_mask,
- pmu_res_depend_table
- [pmu_res_depend_table_sz].depend_mask);
- break;
- case RES_DEPEND_REMOVE:
- AND_REG(&cc->res_dep_mask,
- ~pmu_res_depend_table
- [pmu_res_depend_table_sz].depend_mask);
- break;
- default:
- break;
- }
- }
- }
- /* Apply nvram overrides to dependancies masks */
- for (i = 0; i < rsrcs; i++) {
- snprintf(name, sizeof(name), "r%dd", i);
- val = getvar(NULL, name);
- if (val == NULL)
- continue;
- W_REG(&cc->res_table_sel, (u32) i);
- W_REG(&cc->res_dep_mask,
- (u32) simple_strtoul(val, NULL, 0));
- }
-
- /* Determine min/max rsrc masks */
- si_pmu_res_masks(sih, &min_mask, &max_mask);
-
- /* It is required to program max_mask first and then min_mask */
-
- /* Program max resource mask */
-
- if (max_mask)
- W_REG(&cc->max_res_mask, max_mask);
-
- /* Program min resource mask */
-
- if (min_mask)
- W_REG(&cc->min_res_mask, min_mask);
-
- /* Add some delay; allow resources to come up and settle. */
- mdelay(2);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-u32 si_pmu_measure_alpclk(si_t *sih)
-{
- chipcregs_t *cc;
- uint origidx;
- u32 alp_khz;
-
- if (sih->pmurev < 10)
- return 0;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- if (R_REG(&cc->pmustatus) & PST_EXTLPOAVAIL) {
- u32 ilp_ctr, alp_hz;
-
- /*
- * Enable the reg to measure the freq,
- * in case it was disabled before
- */
- W_REG(&cc->pmu_xtalfreq,
- 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
-
- /* Delay for well over 4 ILP clocks */
- udelay(1000);
-
- /* Read the latched number of ALP ticks per 4 ILP ticks */
- ilp_ctr =
- R_REG(&cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK;
-
- /*
- * Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT
- * bit to save power
- */
- W_REG(&cc->pmu_xtalfreq, 0);
-
- /* Calculate ALP frequency */
- alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4;
-
- /*
- * Round to nearest 100KHz, and at
- * the same time convert to KHz
- */
- alp_khz = (alp_hz + 50000) / 100000 * 100;
- } else
- alp_khz = 0;
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-
- return alp_khz;
-}
-
-bool si_pmu_is_otp_powered(si_t *sih)
-{
- uint idx;
- chipcregs_t *cc;
- bool st;
-
- /* Remember original core before switch to chipc */
- idx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- switch (sih->chip) {
- case BCM4329_CHIP_ID:
- st = (R_REG(&cc->res_state) & PMURES_BIT(RES4329_OTP_PU))
- != 0;
- break;
- case BCM4319_CHIP_ID:
- st = (R_REG(&cc->res_state) & PMURES_BIT(RES4319_OTP_PU))
- != 0;
- break;
- case BCM4336_CHIP_ID:
- st = (R_REG(&cc->res_state) & PMURES_BIT(RES4336_OTP_PU))
- != 0;
- break;
- case BCM4330_CHIP_ID:
- st = (R_REG(&cc->res_state) & PMURES_BIT(RES4330_OTP_PU))
- != 0;
- break;
-
- /* These chip doesn't use PMU bit to power up/down OTP. OTP always on.
- * Use OTP_INIT command to reset/refresh state.
- */
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- case BCM43421_CHIP_ID:
- case BCM43236_CHIP_ID:
- case BCM43235_CHIP_ID:
- case BCM43238_CHIP_ID:
- st = true;
- break;
- default:
- st = true;
- break;
- }
-
- /* Return to original core */
- ai_setcoreidx(sih, idx);
- return st;
-}
-
-/* power up/down OTP through PMU resources */
-void si_pmu_otp_power(si_t *sih, bool on)
-{
- chipcregs_t *cc;
- uint origidx;
- u32 rsrcs = 0; /* rsrcs to turn on/off OTP power */
-
- /* Don't do anything if OTP is disabled */
- if (ai_is_otp_disabled(sih))
- return;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- switch (sih->chip) {
- case BCM4329_CHIP_ID:
- rsrcs = PMURES_BIT(RES4329_OTP_PU);
- break;
- case BCM4319_CHIP_ID:
- rsrcs = PMURES_BIT(RES4319_OTP_PU);
- break;
- case BCM4336_CHIP_ID:
- rsrcs = PMURES_BIT(RES4336_OTP_PU);
- break;
- case BCM4330_CHIP_ID:
- rsrcs = PMURES_BIT(RES4330_OTP_PU);
- break;
- default:
- break;
- }
-
- if (rsrcs != 0) {
- u32 otps;
-
- /* Figure out the dependancies (exclude min_res_mask) */
- u32 deps = si_pmu_res_deps(sih, cc, rsrcs, true);
- u32 min_mask = 0, max_mask = 0;
- si_pmu_res_masks(sih, &min_mask, &max_mask);
- deps &= ~min_mask;
- /* Turn on/off the power */
- if (on) {
- OR_REG(&cc->min_res_mask, (rsrcs | deps));
- SPINWAIT(!(R_REG(&cc->res_state) & rsrcs),
- PMU_MAX_TRANSITION_DLY);
- } else {
- AND_REG(&cc->min_res_mask, ~(rsrcs | deps));
- }
-
- SPINWAIT((((otps = R_REG(&cc->otpstatus)) & OTPS_READY) !=
- (on ? OTPS_READY : 0)), 100);
- }
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_stf.h b/drivers/staging/brcm80211/brcmsmac/wlc_stf.h
deleted file mode 100644
index 2b1180b128a..00000000000
--- a/drivers/staging/brcm80211/brcmsmac/wlc_stf.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wlc_stf_h_
-#define _wlc_stf_h_
-
-extern int wlc_stf_attach(struct wlc_info *wlc);
-extern void wlc_stf_detach(struct wlc_info *wlc);
-
-extern void wlc_tempsense_upd(struct wlc_info *wlc);
-extern void wlc_stf_ss_algo_channel_get(struct wlc_info *wlc,
- u16 *ss_algo_channel,
- chanspec_t chanspec);
-extern int wlc_stf_ss_update(struct wlc_info *wlc, struct wlcband *band);
-extern void wlc_stf_phy_txant_upd(struct wlc_info *wlc);
-extern int wlc_stf_txchain_set(struct wlc_info *wlc, s32 int_val, bool force);
-extern bool wlc_stf_stbc_rx_set(struct wlc_info *wlc, s32 int_val);
-
-extern int wlc_stf_ant_txant_validate(struct wlc_info *wlc, s8 val);
-extern void wlc_stf_phy_txant_upd(struct wlc_info *wlc);
-extern void wlc_stf_phy_chain_calc(struct wlc_info *wlc);
-extern u16 wlc_stf_phytxchain_sel(struct wlc_info *wlc, ratespec_t rspec);
-extern u16 wlc_stf_d11hdrs_phyctl_txant(struct wlc_info *wlc, ratespec_t rspec);
-
-#endif /* _wlc_stf_h_ */
diff --git a/drivers/staging/brcm80211/util/Makefile b/drivers/staging/brcm80211/brcmutil/Makefile
index f9b36cafdc8..6403423c021 100644
--- a/drivers/staging/brcm80211/util/Makefile
+++ b/drivers/staging/brcm80211/brcmutil/Makefile
@@ -16,12 +16,12 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y := \
- -Idrivers/staging/brcm80211/util \
+ -Idrivers/staging/brcm80211/brcmutil \
-Idrivers/staging/brcm80211/include
BRCMUTIL_OFILES := \
- bcmutils.o \
- bcmwifi.o
+ utils.o \
+ wifi.o
MODULEPFX := brcmutil
diff --git a/drivers/staging/brcm80211/util/bcmutils.c b/drivers/staging/brcm80211/brcmutil/utils.c
index 43e5bb3aec0..37b6b779779 100644
--- a/drivers/staging/brcm80211/util/bcmutils.c
+++ b/drivers/staging/brcm80211/brcmutil/utils.c
@@ -14,27 +14,15 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/printk.h>
-#include <bcmdefs.h>
-#include <stdarg.h>
-#include <bcmutils.h>
-#include <bcmnvram.h>
-#include <bcmdevs.h>
-#include <proto/802.11.h>
+#include <brcmu_utils.h>
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver utilities.");
MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
-struct sk_buff *bcm_pkt_buf_get_skb(uint len)
+struct sk_buff *brcmu_pkt_buf_get_skb(uint len)
{
struct sk_buff *skb;
@@ -46,10 +34,10 @@ struct sk_buff *bcm_pkt_buf_get_skb(uint len)
return skb;
}
-EXPORT_SYMBOL(bcm_pkt_buf_get_skb);
+EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
/* Free the driver packet. Free the tag if present */
-void bcm_pkt_buf_free_skb(struct sk_buff *skb)
+void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
{
struct sk_buff *nskb;
int nest = 0;
@@ -74,11 +62,11 @@ void bcm_pkt_buf_free_skb(struct sk_buff *skb)
skb = nskb;
}
}
-EXPORT_SYMBOL(bcm_pkt_buf_free_skb);
+EXPORT_SYMBOL(brcmu_pkt_buf_free_skb);
/* copy a buffer into a pkt buffer chain */
-uint bcm_pktfrombuf(struct sk_buff *p, uint offset, int len,
+uint brcmu_pktfrombuf(struct sk_buff *p, uint offset, int len,
unsigned char *buf)
{
uint n, ret = 0;
@@ -105,10 +93,10 @@ uint bcm_pktfrombuf(struct sk_buff *p, uint offset, int len,
return ret;
}
-EXPORT_SYMBOL(bcm_pktfrombuf);
+EXPORT_SYMBOL(brcmu_pktfrombuf);
/* return total length of buffer chain */
-uint bcm_pkttotlen(struct sk_buff *p)
+uint brcmu_pkttotlen(struct sk_buff *p)
{
uint total;
@@ -117,13 +105,13 @@ uint bcm_pkttotlen(struct sk_buff *p)
total += p->len;
return total;
}
-EXPORT_SYMBOL(bcm_pkttotlen);
+EXPORT_SYMBOL(brcmu_pkttotlen);
/*
* osl multiple-precedence packet queue
* hi_prec is always >= the number of the highest non-empty precedence
*/
-struct sk_buff *bcm_pktq_penq(struct pktq *pq, int prec,
+struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
struct sk_buff *p)
{
struct pktq_prec *q;
@@ -148,9 +136,9 @@ struct sk_buff *bcm_pktq_penq(struct pktq *pq, int prec,
return p;
}
-EXPORT_SYMBOL(bcm_pktq_penq);
+EXPORT_SYMBOL(brcmu_pktq_penq);
-struct sk_buff *bcm_pktq_penq_head(struct pktq *pq, int prec,
+struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
struct sk_buff *p)
{
struct pktq_prec *q;
@@ -174,9 +162,9 @@ struct sk_buff *bcm_pktq_penq_head(struct pktq *pq, int prec,
return p;
}
-EXPORT_SYMBOL(bcm_pktq_penq_head);
+EXPORT_SYMBOL(brcmu_pktq_penq_head);
-struct sk_buff *bcm_pktq_pdeq(struct pktq *pq, int prec)
+struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
{
struct pktq_prec *q;
struct sk_buff *p;
@@ -199,9 +187,9 @@ struct sk_buff *bcm_pktq_pdeq(struct pktq *pq, int prec)
return p;
}
-EXPORT_SYMBOL(bcm_pktq_pdeq);
+EXPORT_SYMBOL(brcmu_pktq_pdeq);
-struct sk_buff *bcm_pktq_pdeq_tail(struct pktq *pq, int prec)
+struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
{
struct pktq_prec *q;
struct sk_buff *p, *prev;
@@ -227,10 +215,10 @@ struct sk_buff *bcm_pktq_pdeq_tail(struct pktq *pq, int prec)
return p;
}
-EXPORT_SYMBOL(bcm_pktq_pdeq_tail);
+EXPORT_SYMBOL(brcmu_pktq_pdeq_tail);
void
-bcm_pktq_pflush(struct pktq *pq, int prec, bool dir,
+brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
ifpkt_cb_t fn, void *arg)
{
struct pktq_prec *q;
@@ -246,7 +234,7 @@ bcm_pktq_pflush(struct pktq *pq, int prec, bool dir,
else
prev->prev = p->prev;
p->prev = NULL;
- bcm_pkt_buf_free_skb(p);
+ brcmu_pkt_buf_free_skb(p);
q->len--;
pq->len--;
p = (head ? q->head : prev->prev);
@@ -260,18 +248,18 @@ bcm_pktq_pflush(struct pktq *pq, int prec, bool dir,
q->tail = NULL;
}
}
-EXPORT_SYMBOL(bcm_pktq_pflush);
+EXPORT_SYMBOL(brcmu_pktq_pflush);
-void bcm_pktq_flush(struct pktq *pq, bool dir,
+void brcmu_pktq_flush(struct pktq *pq, bool dir,
ifpkt_cb_t fn, void *arg)
{
int prec;
for (prec = 0; prec < pq->num_prec; prec++)
- bcm_pktq_pflush(pq, prec, dir, fn, arg);
+ brcmu_pktq_pflush(pq, prec, dir, fn, arg);
}
-EXPORT_SYMBOL(bcm_pktq_flush);
+EXPORT_SYMBOL(brcmu_pktq_flush);
-void bcm_pktq_init(struct pktq *pq, int num_prec, int max_len)
+void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len)
{
int prec;
@@ -286,9 +274,9 @@ void bcm_pktq_init(struct pktq *pq, int num_prec, int max_len)
for (prec = 0; prec < num_prec; prec++)
pq->q[prec].max = pq->max;
}
-EXPORT_SYMBOL(bcm_pktq_init);
+EXPORT_SYMBOL(brcmu_pktq_init);
-struct sk_buff *bcm_pktq_peek_tail(struct pktq *pq, int *prec_out)
+struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out)
{
int prec;
@@ -304,10 +292,10 @@ struct sk_buff *bcm_pktq_peek_tail(struct pktq *pq, int *prec_out)
return pq->q[prec].tail;
}
-EXPORT_SYMBOL(bcm_pktq_peek_tail);
+EXPORT_SYMBOL(brcmu_pktq_peek_tail);
/* Return sum of lengths of a specific set of precedences */
-int bcm_pktq_mlen(struct pktq *pq, uint prec_bmp)
+int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp)
{
int prec, len;
@@ -319,10 +307,10 @@ int bcm_pktq_mlen(struct pktq *pq, uint prec_bmp)
return len;
}
-EXPORT_SYMBOL(bcm_pktq_mlen);
+EXPORT_SYMBOL(brcmu_pktq_mlen);
/* Priority dequeue from a specific set of precedences */
-struct sk_buff *bcm_pktq_mdeq(struct pktq *pq, uint prec_bmp,
+struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
int *prec_out)
{
struct pktq_prec *q;
@@ -360,10 +348,10 @@ struct sk_buff *bcm_pktq_mdeq(struct pktq *pq, uint prec_bmp,
return p;
}
-EXPORT_SYMBOL(bcm_pktq_mdeq);
+EXPORT_SYMBOL(brcmu_pktq_mdeq);
/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
-int bcm_ether_atoe(char *p, u8 *ea)
+int brcmu_ether_atoe(char *p, u8 *ea)
{
int i = 0;
@@ -375,11 +363,11 @@ int bcm_ether_atoe(char *p, u8 *ea)
return i == 6;
}
-EXPORT_SYMBOL(bcm_ether_atoe);
+EXPORT_SYMBOL(brcmu_ether_atoe);
#if defined(BCMDBG)
/* pretty hex print a pkt buffer chain */
-void bcm_prpkt(const char *msg, struct sk_buff *p0)
+void brcmu_prpkt(const char *msg, struct sk_buff *p0)
{
struct sk_buff *p;
@@ -389,13 +377,14 @@ void bcm_prpkt(const char *msg, struct sk_buff *p0)
for (p = p0; p; p = p->next)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, p->data, p->len);
}
-EXPORT_SYMBOL(bcm_prpkt);
+EXPORT_SYMBOL(brcmu_prpkt);
#endif /* defined(BCMDBG) */
/* iovar table lookup */
-const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+const struct brcmu_iovar *brcmu_iovar_lookup(const struct brcmu_iovar *table,
+ const char *name)
{
- const bcm_iovar_t *vi;
+ const struct brcmu_iovar *vi;
const char *lookup_name;
/* skip any ':' delimited option prefixes */
@@ -413,9 +402,10 @@ const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
return NULL; /* var name not found */
}
-EXPORT_SYMBOL(bcm_iovar_lookup);
+EXPORT_SYMBOL(brcmu_iovar_lookup);
-int bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+int brcmu_iovar_lencheck(const struct brcmu_iovar *vi, void *arg, int len,
+ bool set)
{
int bcmerror = 0;
@@ -458,7 +448,7 @@ int bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
return bcmerror;
}
-EXPORT_SYMBOL(bcm_iovar_lencheck);
+EXPORT_SYMBOL(brcmu_iovar_lencheck);
/*******************************************************************************
* crc8
@@ -517,29 +507,29 @@ static const u8 crc8_table[256] = {
0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
};
-u8 bcm_crc8(u8 *pdata, /* pointer to array of data to process */
+u8 brcmu_crc8(u8 *pdata, /* pointer to array of data to process */
uint nbytes, /* number of input data bytes to process */
u8 crc /* either CRC8_INIT_VALUE or previous return value */
- ) {
+ ) {
/* loop over the buffer data */
while (nbytes-- > 0)
crc = crc8_table[(crc ^ *pdata++) & 0xff];
return crc;
}
-EXPORT_SYMBOL(bcm_crc8);
+EXPORT_SYMBOL(brcmu_crc8);
/*
* Traverse a string of 1-byte tag/1-byte length/variable-length value
* triples, returning a pointer to the substring whose first element
* matches tag
*/
-bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key)
+struct brcmu_tlv *brcmu_parse_tlvs(void *buf, int buflen, uint key)
{
- bcm_tlv_t *elt;
+ struct brcmu_tlv *elt;
int totlen;
- elt = (bcm_tlv_t *) buf;
+ elt = (struct brcmu_tlv *) buf;
totlen = buflen;
/* find tagged parameter */
@@ -550,18 +540,19 @@ bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key)
if ((elt->id == key) && (totlen >= (len + 2)))
return elt;
- elt = (bcm_tlv_t *) ((u8 *) elt + (len + 2));
+ elt = (struct brcmu_tlv *) ((u8 *) elt + (len + 2));
totlen -= (len + 2);
}
return NULL;
}
-EXPORT_SYMBOL(bcm_parse_tlvs);
+EXPORT_SYMBOL(brcmu_parse_tlvs);
#if defined(BCMDBG)
int
-bcm_format_flags(const bcm_bit_desc_t *bd, u32 flags, char *buf, int len)
+brcmu_format_flags(const struct brcmu_bit_desc *bd, u32 flags, char *buf,
+ int len)
{
int i;
char *p = buf;
@@ -612,10 +603,10 @@ bcm_format_flags(const bcm_bit_desc_t *bd, u32 flags, char *buf, int len)
return (int)(p - buf);
}
-EXPORT_SYMBOL(bcm_format_flags);
+EXPORT_SYMBOL(brcmu_format_flags);
/* print bytes formatted as hex to a string. return the resulting string length */
-int bcm_format_hex(char *str, const void *bytes, int len)
+int brcmu_format_hex(char *str, const void *bytes, int len)
{
int i;
char *p = str;
@@ -627,10 +618,10 @@ int bcm_format_hex(char *str, const void *bytes, int len)
}
return (int)(p - str);
}
-EXPORT_SYMBOL(bcm_format_hex);
+EXPORT_SYMBOL(brcmu_format_hex);
#endif /* defined(BCMDBG) */
-char *bcm_chipname(uint chipid, char *buf, uint len)
+char *brcmu_chipname(uint chipid, char *buf, uint len)
{
const char *fmt;
@@ -638,9 +629,9 @@ char *bcm_chipname(uint chipid, char *buf, uint len)
snprintf(buf, len, fmt, chipid);
return buf;
}
-EXPORT_SYMBOL(bcm_chipname);
+EXPORT_SYMBOL(brcmu_chipname);
-uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
+uint brcmu_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
{
uint len;
@@ -657,7 +648,7 @@ uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
return len;
}
-EXPORT_SYMBOL(bcm_mkiovar);
+EXPORT_SYMBOL(brcmu_mkiovar);
/* Quarter dBm units to mW
* Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
@@ -681,7 +672,7 @@ EXPORT_SYMBOL(bcm_mkiovar);
#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
static const u16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
-/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
+/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
@@ -689,7 +680,7 @@ static const u16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
};
-u16 bcm_qdbm_to_mw(u8 qdbm)
+u16 brcmu_qdbm_to_mw(u8 qdbm)
{
uint factor = 1;
int idx = qdbm - QDBM_OFFSET;
@@ -712,9 +703,9 @@ u16 bcm_qdbm_to_mw(u8 qdbm)
*/
return (nqdBm_to_mW_map[idx] + factor / 2) / factor;
}
-EXPORT_SYMBOL(bcm_qdbm_to_mw);
+EXPORT_SYMBOL(brcmu_qdbm_to_mw);
-u8 bcm_mw_to_qdbm(u16 mw)
+u8 brcmu_mw_to_qdbm(u16 mw)
{
u8 qdbm;
int offset;
@@ -744,9 +735,9 @@ u8 bcm_mw_to_qdbm(u16 mw)
return qdbm;
}
-EXPORT_SYMBOL(bcm_mw_to_qdbm);
+EXPORT_SYMBOL(brcmu_mw_to_qdbm);
-uint bcm_bitcount(u8 *bitmap, uint length)
+uint brcmu_bitcount(u8 *bitmap, uint length)
{
uint bitcount = 0, i;
u8 tmp;
@@ -759,18 +750,18 @@ uint bcm_bitcount(u8 *bitmap, uint length)
}
return bitcount;
}
-EXPORT_SYMBOL(bcm_bitcount);
+EXPORT_SYMBOL(brcmu_bitcount);
-/* Initialization of bcmstrbuf structure */
-void bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+/* Initialization of brcmu_strbuf structure */
+void brcmu_binit(struct brcmu_strbuf *b, char *buf, uint size)
{
b->origsize = b->size = size;
b->origbuf = b->buf = buf;
}
-EXPORT_SYMBOL(bcm_binit);
+EXPORT_SYMBOL(brcmu_binit);
/* Buffer sprintf wrapper to guard against buffer overflow */
-int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+int brcmu_bprintf(struct brcmu_strbuf *b, const char *fmt, ...)
{
va_list ap;
int r;
@@ -780,7 +771,7 @@ int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
/* Non Ansi C99 compliant returns -1,
* Ansi compliant return r >= b->size,
- * bcmstdlib returns 0, handle all
+ * stdlib returns 0, handle all
*/
if ((r == -1) || (r >= (int)b->size) || (r == 0)) {
b->size = 0;
@@ -793,4 +784,4 @@ int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
return r;
}
-EXPORT_SYMBOL(bcm_bprintf);
+EXPORT_SYMBOL(brcmu_bprintf);
diff --git a/drivers/staging/brcm80211/util/bcmwifi.c b/drivers/staging/brcm80211/brcmutil/wifi.c
index 955a3ab1a82..b9ffe8682a2 100644
--- a/drivers/staging/brcm80211/util/bcmwifi.c
+++ b/drivers/staging/brcm80211/brcmutil/wifi.c
@@ -13,12 +13,7 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <bcmdefs.h>
-#include <bcmutils.h>
-#include <bcmwifi.h>
+#include <brcmu_wifi.h>
/*
* Verify the chanspec is using a legal set of parameters, i.e. that the
@@ -26,7 +21,7 @@
* combination could be legal given any set of circumstances.
* RETURNS: true is the chanspec is malformed, false if it looks good.
*/
-bool bcm_chspec_malformed(chanspec_t chanspec)
+bool brcmu_chspec_malformed(chanspec_t chanspec)
{
/* must be 2G or 5G band */
if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec))
@@ -46,14 +41,14 @@ bool bcm_chspec_malformed(chanspec_t chanspec)
return false;
}
-EXPORT_SYMBOL(bcm_chspec_malformed);
+EXPORT_SYMBOL(brcmu_chspec_malformed);
/*
* This function returns the channel number that control traffic is being sent on, for legacy
* channels this is just the channel number, for 40MHZ channels it is the upper or lowre 20MHZ
* sideband depending on the chanspec selected
*/
-u8 bcm_chspec_ctlchan(chanspec_t chspec)
+u8 brcmu_chspec_ctlchan(chanspec_t chspec)
{
u8 ctl_chan;
@@ -76,7 +71,7 @@ u8 bcm_chspec_ctlchan(chanspec_t chspec)
return ctl_chan;
}
-EXPORT_SYMBOL(bcm_chspec_ctlchan);
+EXPORT_SYMBOL(brcmu_chspec_ctlchan);
/*
* Return the channel number for a given frequency and base frequency.
@@ -97,7 +92,7 @@ EXPORT_SYMBOL(bcm_chspec_ctlchan);
*
* Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
*/
-int bcm_mhz2channel(uint freq, uint start_factor)
+int brcmu_mhz2channel(uint freq, uint start_factor)
{
int ch = -1;
uint base;
@@ -133,5 +128,4 @@ int bcm_mhz2channel(uint freq, uint start_factor)
return ch;
}
-EXPORT_SYMBOL(bcm_mhz2channel);
-
+EXPORT_SYMBOL(brcmu_mhz2channel);
diff --git a/drivers/staging/brcm80211/include/aidmp.h b/drivers/staging/brcm80211/include/aidmp.h
deleted file mode 100644
index 7e0ce8f2434..00000000000
--- a/drivers/staging/brcm80211/include/aidmp.h
+++ /dev/null
@@ -1,374 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _AIDMP_H
-#define _AIDMP_H
-
-/* Manufacturer Ids */
-#define MFGID_ARM 0x43b
-#define MFGID_BRCM 0x4bf
-#define MFGID_MIPS 0x4a7
-
-/* Component Classes */
-#define CC_SIM 0
-#define CC_EROM 1
-#define CC_CORESIGHT 9
-#define CC_VERIF 0xb
-#define CC_OPTIMO 0xd
-#define CC_GEN 0xe
-#define CC_PRIMECELL 0xf
-
-/* Enumeration ROM registers */
-#define ER_EROMENTRY 0x000
-#define ER_REMAPCONTROL 0xe00
-#define ER_REMAPSELECT 0xe04
-#define ER_MASTERSELECT 0xe10
-#define ER_ITCR 0xf00
-#define ER_ITIP 0xf04
-
-/* Erom entries */
-#define ER_TAG 0xe
-#define ER_TAG1 0x6
-#define ER_VALID 1
-#define ER_CI 0
-#define ER_MP 2
-#define ER_ADD 4
-#define ER_END 0xe
-#define ER_BAD 0xffffffff
-
-/* EROM CompIdentA */
-#define CIA_MFG_MASK 0xfff00000
-#define CIA_MFG_SHIFT 20
-#define CIA_CID_MASK 0x000fff00
-#define CIA_CID_SHIFT 8
-#define CIA_CCL_MASK 0x000000f0
-#define CIA_CCL_SHIFT 4
-
-/* EROM CompIdentB */
-#define CIB_REV_MASK 0xff000000
-#define CIB_REV_SHIFT 24
-#define CIB_NSW_MASK 0x00f80000
-#define CIB_NSW_SHIFT 19
-#define CIB_NMW_MASK 0x0007c000
-#define CIB_NMW_SHIFT 14
-#define CIB_NSP_MASK 0x00003e00
-#define CIB_NSP_SHIFT 9
-#define CIB_NMP_MASK 0x000001f0
-#define CIB_NMP_SHIFT 4
-
-/* EROM MasterPortDesc */
-#define MPD_MUI_MASK 0x0000ff00
-#define MPD_MUI_SHIFT 8
-#define MPD_MP_MASK 0x000000f0
-#define MPD_MP_SHIFT 4
-
-/* EROM AddrDesc */
-#define AD_ADDR_MASK 0xfffff000
-#define AD_SP_MASK 0x00000f00
-#define AD_SP_SHIFT 8
-#define AD_ST_MASK 0x000000c0
-#define AD_ST_SHIFT 6
-#define AD_ST_SLAVE 0x00000000
-#define AD_ST_BRIDGE 0x00000040
-#define AD_ST_SWRAP 0x00000080
-#define AD_ST_MWRAP 0x000000c0
-#define AD_SZ_MASK 0x00000030
-#define AD_SZ_SHIFT 4
-#define AD_SZ_4K 0x00000000
-#define AD_SZ_8K 0x00000010
-#define AD_SZ_16K 0x00000020
-#define AD_SZ_SZD 0x00000030
-#define AD_AG32 0x00000008
-#define AD_ADDR_ALIGN 0x00000fff
-#define AD_SZ_BASE 0x00001000 /* 4KB */
-
-/* EROM SizeDesc */
-#define SD_SZ_MASK 0xfffff000
-#define SD_SG32 0x00000008
-#define SD_SZ_ALIGN 0x00000fff
-
-#ifndef _LANGUAGE_ASSEMBLY
-
-typedef volatile struct _aidmp {
- u32 oobselina30; /* 0x000 */
- u32 oobselina74; /* 0x004 */
- u32 PAD[6];
- u32 oobselinb30; /* 0x020 */
- u32 oobselinb74; /* 0x024 */
- u32 PAD[6];
- u32 oobselinc30; /* 0x040 */
- u32 oobselinc74; /* 0x044 */
- u32 PAD[6];
- u32 oobselind30; /* 0x060 */
- u32 oobselind74; /* 0x064 */
- u32 PAD[38];
- u32 oobselouta30; /* 0x100 */
- u32 oobselouta74; /* 0x104 */
- u32 PAD[6];
- u32 oobseloutb30; /* 0x120 */
- u32 oobseloutb74; /* 0x124 */
- u32 PAD[6];
- u32 oobseloutc30; /* 0x140 */
- u32 oobseloutc74; /* 0x144 */
- u32 PAD[6];
- u32 oobseloutd30; /* 0x160 */
- u32 oobseloutd74; /* 0x164 */
- u32 PAD[38];
- u32 oobsynca; /* 0x200 */
- u32 oobseloutaen; /* 0x204 */
- u32 PAD[6];
- u32 oobsyncb; /* 0x220 */
- u32 oobseloutben; /* 0x224 */
- u32 PAD[6];
- u32 oobsyncc; /* 0x240 */
- u32 oobseloutcen; /* 0x244 */
- u32 PAD[6];
- u32 oobsyncd; /* 0x260 */
- u32 oobseloutden; /* 0x264 */
- u32 PAD[38];
- u32 oobaextwidth; /* 0x300 */
- u32 oobainwidth; /* 0x304 */
- u32 oobaoutwidth; /* 0x308 */
- u32 PAD[5];
- u32 oobbextwidth; /* 0x320 */
- u32 oobbinwidth; /* 0x324 */
- u32 oobboutwidth; /* 0x328 */
- u32 PAD[5];
- u32 oobcextwidth; /* 0x340 */
- u32 oobcinwidth; /* 0x344 */
- u32 oobcoutwidth; /* 0x348 */
- u32 PAD[5];
- u32 oobdextwidth; /* 0x360 */
- u32 oobdinwidth; /* 0x364 */
- u32 oobdoutwidth; /* 0x368 */
- u32 PAD[37];
- u32 ioctrlset; /* 0x400 */
- u32 ioctrlclear; /* 0x404 */
- u32 ioctrl; /* 0x408 */
- u32 PAD[61];
- u32 iostatus; /* 0x500 */
- u32 PAD[127];
- u32 ioctrlwidth; /* 0x700 */
- u32 iostatuswidth; /* 0x704 */
- u32 PAD[62];
- u32 resetctrl; /* 0x800 */
- u32 resetstatus; /* 0x804 */
- u32 resetreadid; /* 0x808 */
- u32 resetwriteid; /* 0x80c */
- u32 PAD[60];
- u32 errlogctrl; /* 0x900 */
- u32 errlogdone; /* 0x904 */
- u32 errlogstatus; /* 0x908 */
- u32 errlogaddrlo; /* 0x90c */
- u32 errlogaddrhi; /* 0x910 */
- u32 errlogid; /* 0x914 */
- u32 errloguser; /* 0x918 */
- u32 errlogflags; /* 0x91c */
- u32 PAD[56];
- u32 intstatus; /* 0xa00 */
- u32 PAD[127];
- u32 config; /* 0xe00 */
- u32 PAD[63];
- u32 itcr; /* 0xf00 */
- u32 PAD[3];
- u32 itipooba; /* 0xf10 */
- u32 itipoobb; /* 0xf14 */
- u32 itipoobc; /* 0xf18 */
- u32 itipoobd; /* 0xf1c */
- u32 PAD[4];
- u32 itipoobaout; /* 0xf30 */
- u32 itipoobbout; /* 0xf34 */
- u32 itipoobcout; /* 0xf38 */
- u32 itipoobdout; /* 0xf3c */
- u32 PAD[4];
- u32 itopooba; /* 0xf50 */
- u32 itopoobb; /* 0xf54 */
- u32 itopoobc; /* 0xf58 */
- u32 itopoobd; /* 0xf5c */
- u32 PAD[4];
- u32 itopoobain; /* 0xf70 */
- u32 itopoobbin; /* 0xf74 */
- u32 itopoobcin; /* 0xf78 */
- u32 itopoobdin; /* 0xf7c */
- u32 PAD[4];
- u32 itopreset; /* 0xf90 */
- u32 PAD[15];
- u32 peripherialid4; /* 0xfd0 */
- u32 peripherialid5; /* 0xfd4 */
- u32 peripherialid6; /* 0xfd8 */
- u32 peripherialid7; /* 0xfdc */
- u32 peripherialid0; /* 0xfe0 */
- u32 peripherialid1; /* 0xfe4 */
- u32 peripherialid2; /* 0xfe8 */
- u32 peripherialid3; /* 0xfec */
- u32 componentid0; /* 0xff0 */
- u32 componentid1; /* 0xff4 */
- u32 componentid2; /* 0xff8 */
- u32 componentid3; /* 0xffc */
-} aidmp_t;
-
-#endif /* _LANGUAGE_ASSEMBLY */
-
-/* Out-of-band Router registers */
-#define OOB_BUSCONFIG 0x020
-#define OOB_STATUSA 0x100
-#define OOB_STATUSB 0x104
-#define OOB_STATUSC 0x108
-#define OOB_STATUSD 0x10c
-#define OOB_ENABLEA0 0x200
-#define OOB_ENABLEA1 0x204
-#define OOB_ENABLEA2 0x208
-#define OOB_ENABLEA3 0x20c
-#define OOB_ENABLEB0 0x280
-#define OOB_ENABLEB1 0x284
-#define OOB_ENABLEB2 0x288
-#define OOB_ENABLEB3 0x28c
-#define OOB_ENABLEC0 0x300
-#define OOB_ENABLEC1 0x304
-#define OOB_ENABLEC2 0x308
-#define OOB_ENABLEC3 0x30c
-#define OOB_ENABLED0 0x380
-#define OOB_ENABLED1 0x384
-#define OOB_ENABLED2 0x388
-#define OOB_ENABLED3 0x38c
-#define OOB_ITCR 0xf00
-#define OOB_ITIPOOBA 0xf10
-#define OOB_ITIPOOBB 0xf14
-#define OOB_ITIPOOBC 0xf18
-#define OOB_ITIPOOBD 0xf1c
-#define OOB_ITOPOOBA 0xf30
-#define OOB_ITOPOOBB 0xf34
-#define OOB_ITOPOOBC 0xf38
-#define OOB_ITOPOOBD 0xf3c
-
-/* DMP wrapper registers */
-#define AI_OOBSELINA30 0x000
-#define AI_OOBSELINA74 0x004
-#define AI_OOBSELINB30 0x020
-#define AI_OOBSELINB74 0x024
-#define AI_OOBSELINC30 0x040
-#define AI_OOBSELINC74 0x044
-#define AI_OOBSELIND30 0x060
-#define AI_OOBSELIND74 0x064
-#define AI_OOBSELOUTA30 0x100
-#define AI_OOBSELOUTA74 0x104
-#define AI_OOBSELOUTB30 0x120
-#define AI_OOBSELOUTB74 0x124
-#define AI_OOBSELOUTC30 0x140
-#define AI_OOBSELOUTC74 0x144
-#define AI_OOBSELOUTD30 0x160
-#define AI_OOBSELOUTD74 0x164
-#define AI_OOBSYNCA 0x200
-#define AI_OOBSELOUTAEN 0x204
-#define AI_OOBSYNCB 0x220
-#define AI_OOBSELOUTBEN 0x224
-#define AI_OOBSYNCC 0x240
-#define AI_OOBSELOUTCEN 0x244
-#define AI_OOBSYNCD 0x260
-#define AI_OOBSELOUTDEN 0x264
-#define AI_OOBAEXTWIDTH 0x300
-#define AI_OOBAINWIDTH 0x304
-#define AI_OOBAOUTWIDTH 0x308
-#define AI_OOBBEXTWIDTH 0x320
-#define AI_OOBBINWIDTH 0x324
-#define AI_OOBBOUTWIDTH 0x328
-#define AI_OOBCEXTWIDTH 0x340
-#define AI_OOBCINWIDTH 0x344
-#define AI_OOBCOUTWIDTH 0x348
-#define AI_OOBDEXTWIDTH 0x360
-#define AI_OOBDINWIDTH 0x364
-#define AI_OOBDOUTWIDTH 0x368
-
-#if defined(__BIG_ENDIAN) && defined(BCMHND74K)
-/* Selective swapped defines for those registers we need in
- * big-endian code.
- */
-#define AI_IOCTRLSET 0x404
-#define AI_IOCTRLCLEAR 0x400
-#define AI_IOCTRL 0x40c
-#define AI_IOSTATUS 0x504
-#define AI_RESETCTRL 0x804
-#define AI_RESETSTATUS 0x800
-
-#else /* !__BIG_ENDIAN || !BCMHND74K */
-
-#define AI_IOCTRLSET 0x400
-#define AI_IOCTRLCLEAR 0x404
-#define AI_IOCTRL 0x408
-#define AI_IOSTATUS 0x500
-#define AI_RESETCTRL 0x800
-#define AI_RESETSTATUS 0x804
-
-#endif /* __BIG_ENDIAN && BCMHND74K */
-
-#define AI_IOCTRLWIDTH 0x700
-#define AI_IOSTATUSWIDTH 0x704
-
-#define AI_RESETREADID 0x808
-#define AI_RESETWRITEID 0x80c
-#define AI_ERRLOGCTRL 0xa00
-#define AI_ERRLOGDONE 0xa04
-#define AI_ERRLOGSTATUS 0xa08
-#define AI_ERRLOGADDRLO 0xa0c
-#define AI_ERRLOGADDRHI 0xa10
-#define AI_ERRLOGID 0xa14
-#define AI_ERRLOGUSER 0xa18
-#define AI_ERRLOGFLAGS 0xa1c
-#define AI_INTSTATUS 0xa00
-#define AI_CONFIG 0xe00
-#define AI_ITCR 0xf00
-#define AI_ITIPOOBA 0xf10
-#define AI_ITIPOOBB 0xf14
-#define AI_ITIPOOBC 0xf18
-#define AI_ITIPOOBD 0xf1c
-#define AI_ITIPOOBAOUT 0xf30
-#define AI_ITIPOOBBOUT 0xf34
-#define AI_ITIPOOBCOUT 0xf38
-#define AI_ITIPOOBDOUT 0xf3c
-#define AI_ITOPOOBA 0xf50
-#define AI_ITOPOOBB 0xf54
-#define AI_ITOPOOBC 0xf58
-#define AI_ITOPOOBD 0xf5c
-#define AI_ITOPOOBAIN 0xf70
-#define AI_ITOPOOBBIN 0xf74
-#define AI_ITOPOOBCIN 0xf78
-#define AI_ITOPOOBDIN 0xf7c
-#define AI_ITOPRESET 0xf90
-#define AI_PERIPHERIALID4 0xfd0
-#define AI_PERIPHERIALID5 0xfd4
-#define AI_PERIPHERIALID6 0xfd8
-#define AI_PERIPHERIALID7 0xfdc
-#define AI_PERIPHERIALID0 0xfe0
-#define AI_PERIPHERIALID1 0xfe4
-#define AI_PERIPHERIALID2 0xfe8
-#define AI_PERIPHERIALID3 0xfec
-#define AI_COMPONENTID0 0xff0
-#define AI_COMPONENTID1 0xff4
-#define AI_COMPONENTID2 0xff8
-#define AI_COMPONENTID3 0xffc
-
-/* resetctrl */
-#define AIRC_RESET 1
-
-/* config */
-#define AICFG_OOB 0x00000020
-#define AICFG_IOS 0x00000010
-#define AICFG_IOC 0x00000008
-#define AICFG_TO 0x00000004
-#define AICFG_ERRL 0x00000002
-#define AICFG_RST 0x00000001
-
-#endif /* _AIDMP_H */
diff --git a/drivers/staging/brcm80211/include/bcmdefs.h b/drivers/staging/brcm80211/include/bcmdefs.h
deleted file mode 100644
index 55631f36743..00000000000
--- a/drivers/staging/brcm80211/include/bcmdefs.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmdefs_h_
-#define _bcmdefs_h_
-
-#define SI_BUS 0
-#define PCI_BUS 1
-#define PCMCIA_BUS 2
-#define SDIO_BUS 3
-#define JTAG_BUS 4
-#define USB_BUS 5
-#define SPI_BUS 6
-
-
-#ifndef OFF
-#define OFF 0
-#endif
-
-#ifndef ON
-#define ON 1 /* ON = 1 */
-#endif
-
-#define AUTO (-1) /* Auto = -1 */
-
-/* Bus types */
-#define SI_BUS 0 /* SOC Interconnect */
-#define PCI_BUS 1 /* PCI target */
-#define SDIO_BUS 3 /* SDIO target */
-#define JTAG_BUS 4 /* JTAG */
-#define USB_BUS 5 /* USB (does not support R/W REG) */
-#define SPI_BUS 6 /* gSPI target */
-#define RPC_BUS 7 /* RPC target */
-
-
-/* Defines for DMA Address Width - Shared between OSL and HNDDMA */
-#define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */
-#define DMADDR_MASK_30 0xc0000000 /* Address mask for 30-bits */
-#define DMADDR_MASK_0 0xffffffff /* Address mask for 0-bits (hi-part) */
-
-#define DMADDRWIDTH_30 30 /* 30-bit addressing capability */
-#define DMADDRWIDTH_32 32 /* 32-bit addressing capability */
-#define DMADDRWIDTH_63 63 /* 64-bit addressing capability */
-#define DMADDRWIDTH_64 64 /* 64-bit addressing capability */
-
-#ifdef BCMDMA64OSL
-typedef struct {
- u32 loaddr;
- u32 hiaddr;
-} dma64addr_t;
-
-typedef dma64addr_t dmaaddr_t;
-#define PHYSADDRHI(_pa) ((_pa).hiaddr)
-#define PHYSADDRHISET(_pa, _val) \
- do { \
- (_pa).hiaddr = (_val); \
- } while (0)
-#define PHYSADDRLO(_pa) ((_pa).loaddr)
-#define PHYSADDRLOSET(_pa, _val) \
- do { \
- (_pa).loaddr = (_val); \
- } while (0)
-
-#else
-typedef unsigned long dmaaddr_t;
-#define PHYSADDRHI(_pa) (0)
-#define PHYSADDRHISET(_pa, _val)
-#define PHYSADDRLO(_pa) ((_pa))
-#define PHYSADDRLOSET(_pa, _val) \
- do { \
- (_pa) = (_val); \
- } while (0)
-#endif /* BCMDMA64OSL */
-
-/* One physical DMA segment */
-typedef struct {
- dmaaddr_t addr;
- u32 length;
-} hnddma_seg_t;
-
-#define MAX_DMA_SEGS 4
-
-typedef struct {
- void *oshdmah; /* Opaque handle for OSL to store its information */
- uint origsize; /* Size of the virtual packet */
- uint nsegs;
- hnddma_seg_t segs[MAX_DMA_SEGS];
-} hnddma_seg_map_t;
-
-/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
- * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL.
- * There is a compile time check in wlc.c which ensure that this value is at least as big
- * as TXOFF. This value is used in dma_rxfill (hnddma.c).
- */
-
-#define BCMEXTRAHDROOM 172
-
-/* Macros for doing definition and get/set of bitfields
- * Usage example, e.g. a three-bit field (bits 4-6):
- * #define <NAME>_M BITFIELD_MASK(3)
- * #define <NAME>_S 4
- * ...
- * regval = R_REG(osh, &regs->regfoo);
- * field = GFIELD(regval, <NAME>);
- * regval = SFIELD(regval, <NAME>, 1);
- * W_REG(osh, &regs->regfoo, regval);
- */
-#define BITFIELD_MASK(width) \
- (((unsigned)1 << (width)) - 1)
-#define GFIELD(val, field) \
- (((val) >> field ## _S) & field ## _M)
-#define SFIELD(val, field, bits) \
- (((val) & (~(field ## _M << field ## _S))) | \
- ((unsigned)(bits) << field ## _S))
-
-/*
- * Priority definitions according 802.1D
- */
-#define PRIO_8021D_NONE 2
-#define PRIO_8021D_BK 1
-#define PRIO_8021D_BE 0
-#define PRIO_8021D_EE 3
-#define PRIO_8021D_CL 4
-#define PRIO_8021D_VI 5
-#define PRIO_8021D_VO 6
-#define PRIO_8021D_NC 7
-#define MAXPRIO 7
-#define NUMPRIO (MAXPRIO + 1)
-
-/* Max. nvram variable table size */
-#define MAXSZ_NVRAM_VARS 4096
-
-/* handle forward declaration */
-struct wl_info;
-struct wlc_bsscfg;
-
-#endif /* _bcmdefs_h_ */
diff --git a/drivers/staging/brcm80211/include/bcmdevs.h b/drivers/staging/brcm80211/include/bcmdevs.h
deleted file mode 100644
index 26947efa83e..00000000000
--- a/drivers/staging/brcm80211/include/bcmdevs.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BCMDEVS_H
-#define _BCMDEVS_H
-
-#define BCM4325_D11DUAL_ID 0x431b
-#define BCM4325_D11G_ID 0x431c
-#define BCM4325_D11A_ID 0x431d
-
-#define BCM4329_D11N2G_ID 0x432f /* 4329 802.11n 2.4G device */
-#define BCM4329_D11N5G_ID 0x4330 /* 4329 802.11n 5G device */
-#define BCM4329_D11NDUAL_ID 0x432e
-
-#define BCM4319_D11N_ID 0x4337 /* 4319 802.11n dualband device */
-#define BCM4319_D11N2G_ID 0x4338 /* 4319 802.11n 2.4G device */
-#define BCM4319_D11N5G_ID 0x4339 /* 4319 802.11n 5G device */
-
-#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */
-
-#define BCM43225_D11N2G_ID 0x4357 /* 43225 802.11n 2.4GHz device */
-
-#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */
-#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */
-
-#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
-
-/* Chip IDs */
-#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */
-#define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */
-
-#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */
-#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */
-#define BCM43421_CHIP_ID 43421 /* 43421 chipcommon chipid */
-#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */
-#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */
-#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */
-#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */
-#define BCM4325_CHIP_ID 0x4325 /* 4325 chipcommon chipid */
-#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */
-#define BCM4336_CHIP_ID 0x4336 /* 4336 chipcommon chipid */
-#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */
-#define BCM6362_CHIP_ID 0x6362 /* 6362 chipcommon chipid */
-
-/* these are router chips */
-#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
-#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */
-#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */
-#define BCM5356_CHIP_ID 0x5356 /* 5356 chipcommon chipid */
-#define BCM5357_CHIP_ID 0x5357 /* 5357 chipcommon chipid */
-
-/* Package IDs */
-#define BCM4329_289PIN_PKG_ID 0 /* 4329 289-pin package id */
-#define BCM4329_182PIN_PKG_ID 1 /* 4329N 182-pin package id */
-#define BCM4717_PKG_ID 9 /* 4717 package id */
-#define BCM4718_PKG_ID 10 /* 4718 package id */
-#define HDLSIM_PKG_ID 14 /* HDL simulator package id */
-#define HWSIM_PKG_ID 15 /* Hardware simulator package id */
-#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */
-
-/* boardflags */
-#define BFL_PACTRL 0x00000002 /* Board has gpio 9 controlling the PA */
-#define BFL_NOPLLDOWN 0x00000020 /* Not ok to power down the chip pll and oscillator */
-#define BFL_FEM 0x00000800 /* Board supports the Front End Module */
-#define BFL_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */
-#define BFL_NOPA 0x00010000 /* Board has no PA */
-#define BFL_BUCKBOOST 0x00200000 /* Power topology uses BUCKBOOST */
-#define BFL_FEM_BT 0x00400000 /* Board has FEM and switch to share antenna w/ BT */
-#define BFL_NOCBUCK 0x00800000 /* Power topology doesn't use CBUCK */
-#define BFL_PALDO 0x02000000 /* Power topology uses PALDO */
-#define BFL_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */
-
-/* boardflags2 */
-#define BFL2_RXBB_INT_REG_DIS 0x00000001 /* Board has an external rxbb regulator */
-#define BFL2_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */
-#define BFL2_TXPWRCTRL_EN 0x00000004 /* Board permits enabling TX Power Control */
-#define BFL2_2X4_DIV 0x00000008 /* Board supports the 2X4 diversity switch */
-#define BFL2_5G_PWRGAIN 0x00000010 /* Board supports 5G band power gain */
-#define BFL2_PCIEWAR_OVR 0x00000020 /* Board overrides ASPM and Clkreq settings */
-#define BFL2_LEGACY 0x00000080
-#define BFL2_SKWRKFEM_BRD 0x00000100 /* 4321mcm93 board uses Skyworks FEM */
-#define BFL2_SPUR_WAR 0x00000200 /* Board has a WAR for clock-harmonic spurs */
-#define BFL2_GPLL_WAR 0x00000400 /* Flag to narrow G-band PLL loop b/w */
-#define BFL2_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */
-#define BFL2_2G_SPUR_WAR 0x00002000 /* WAR to reduce and avoid clock-harmonic spurs in 2G */
-#define BFL2_GPLL_WAR2 0x00010000 /* Flag to widen G-band PLL loop b/w */
-#define BFL2_IPALVLSHIFT_3P3 0x00020000
-#define BFL2_INTERNDET_TXIQCAL 0x00040000 /* Use internal envelope detector for TX IQCAL */
-#define BFL2_XTALBUFOUTEN 0x00080000 /* Keep the buffered Xtal output from radio "ON"
- * Most drivers will turn it off without this flag
- * to save power.
- */
-
-/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */
-#define BOARD_GPIO_PACTRL 0x200 /* bit 9 controls the PA on new 4306 boards */
-#define BOARD_GPIO_12 0x1000 /* gpio 12 */
-#define BOARD_GPIO_13 0x2000 /* gpio 13 */
-
-#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */
-#define PCI_CFG_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal power-up */
-#define PCI_CFG_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL power-down */
-
-/* power control defines */
-#define PLL_DELAY 150 /* us pll on delay */
-#define FREF_DELAY 200 /* us fref change delay */
-#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */
-
-/* Reference board types */
-#define SPI_BOARD 0x0402
-
-#endif /* _BCMDEVS_H */
diff --git a/drivers/staging/brcm80211/include/bcmnvram.h b/drivers/staging/brcm80211/include/bcmnvram.h
deleted file mode 100644
index 12645ddf000..00000000000
--- a/drivers/staging/brcm80211/include/bcmnvram.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmnvram_h_
-#define _bcmnvram_h_
-
-#ifndef _LANGUAGE_ASSEMBLY
-
-#include <bcmdefs.h>
-
-struct nvram_header {
- u32 magic;
- u32 len;
- u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
- u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
- u32 config_ncdl; /* ncdl values for memc */
-};
-
-/*
- * Initialize NVRAM access. May be unnecessary or undefined on certain
- * platforms.
- */
-extern int nvram_init(void);
-
-/*
- * Append a chunk of nvram variables to the global list
- */
-extern int nvram_append(char *vars, uint varsz);
-
-/*
- * Check for reset button press for restoring factory defaults.
- */
-extern int nvram_reset(void);
-
-/*
- * Disable NVRAM access. May be unnecessary or undefined on certain
- * platforms.
- */
-extern void nvram_exit(void);
-
-/*
- * Get the value of an NVRAM variable. The pointer returned may be
- * invalid after a set.
- * @param name name of variable to get
- * @return value of variable or NULL if undefined
- */
-extern char *nvram_get(const char *name);
-
-/*
- * Get the value of an NVRAM variable.
- * @param name name of variable to get
- * @return value of variable or NUL if undefined
- */
-#define nvram_safe_get(name) (nvram_get(name) ? : "")
-
-/*
- * Match an NVRAM variable.
- * @param name name of variable to match
- * @param match value to compare against value of variable
- * @return true if variable is defined and its value is string equal
- * to match or false otherwise
- */
-static inline int nvram_match(char *name, char *match)
-{
- const char *value = nvram_get(name);
- return value && !strcmp(value, match);
-}
-
-/*
- * Inversely match an NVRAM variable.
- * @param name name of variable to match
- * @param match value to compare against value of variable
- * @return true if variable is defined and its value is not string
- * equal to invmatch or false otherwise
- */
-static inline int nvram_invmatch(char *name, char *invmatch)
-{
- const char *value = nvram_get(name);
- return value && strcmp(value, invmatch);
-}
-
-/*
- * Set the value of an NVRAM variable. The name and value strings are
- * copied into private storage. Pointers to previously set values
- * may become invalid. The new value may be immediately
- * retrieved but will not be permanently stored until a commit.
- * @param name name of variable to set
- * @param value value of variable
- * @return 0 on success and errno on failure
- */
-extern int nvram_set(const char *name, const char *value);
-
-/*
- * Unset an NVRAM variable. Pointers to previously set values
- * remain valid until a set.
- * @param name name of variable to unset
- * @return 0 on success and errno on failure
- * NOTE: use nvram_commit to commit this change to flash.
- */
-extern int nvram_unset(const char *name);
-
-/*
- * Commit NVRAM variables to permanent storage. All pointers to values
- * may be invalid after a commit.
- * NVRAM values are undefined after a commit.
- * @return 0 on success and errno on failure
- */
-extern int nvram_commit(void);
-
-/*
- * Get all NVRAM variables (format name=value\0 ... \0\0).
- * @param buf buffer to store variables
- * @param count size of buffer in bytes
- * @return 0 on success and errno on failure
- */
-extern int nvram_getall(char *nvram_buf, int count);
-
-#endif /* _LANGUAGE_ASSEMBLY */
-
-/* variable access */
-extern char *getvar(char *vars, const char *name);
-extern int getintvar(char *vars, const char *name);
-
-/* The NVRAM version number stored as an NVRAM variable */
-#define NVRAM_SOFTWARE_VERSION "1"
-
-#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */
-#define NVRAM_CLEAR_MAGIC 0x0
-#define NVRAM_INVALID_MAGIC 0xFFFFFFFF
-#define NVRAM_VERSION 1
-#define NVRAM_HEADER_SIZE 20
-#define NVRAM_SPACE 0x8000
-
-#define NVRAM_MAX_VALUE_LEN 255
-#define NVRAM_MAX_PARAM_LEN 64
-
-#define NVRAM_CRC_START_POSITION 9 /* magic, len, crc8 to be skipped */
-#define NVRAM_CRC_VER_MASK 0xffffff00 /* for crc_ver_init */
-
-#endif /* _bcmnvram_h_ */
diff --git a/drivers/staging/brcm80211/include/bcmsdh.h b/drivers/staging/brcm80211/include/bcmsdh.h
deleted file mode 100644
index 3b57dc13b1d..00000000000
--- a/drivers/staging/brcm80211/include/bcmsdh.h
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmsdh_h_
-#define _bcmsdh_h_
-
-#include <linux/skbuff.h>
-#define BCMSDH_ERROR_VAL 0x0001 /* Error */
-#define BCMSDH_INFO_VAL 0x0002 /* Info */
-extern const uint bcmsdh_msglevel;
-
-#ifdef BCMDBG
-#define BCMSDH_ERROR(x) \
- do { \
- if ((bcmsdh_msglevel & BCMSDH_ERROR_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#define BCMSDH_INFO(x) \
- do { \
- if ((bcmsdh_msglevel & BCMSDH_INFO_VAL) && net_ratelimit()) \
- printk x; \
- } while (0)
-#else /* BCMDBG */
-#define BCMSDH_ERROR(x)
-#define BCMSDH_INFO(x)
-#endif /* BCMDBG */
-
-/* forward declarations */
-typedef struct bcmsdh_info bcmsdh_info_t;
-typedef void (*bcmsdh_cb_fn_t) (void *);
-
-/* Attach and build an interface to the underlying SD host driver.
- * - Allocates resources (structs, arrays, mem, OS handles, etc) needed by bcmsdh.
- * - Returns the bcmsdh handle and virtual address base for register access.
- * The returned handle should be used in all subsequent calls, but the bcmsh
- * implementation may maintain a single "default" handle (e.g. the first or
- * most recent one) to enable single-instance implementations to pass NULL.
- */
-extern bcmsdh_info_t *bcmsdh_attach(void *cfghdl, void **regsva, uint irq);
-
-/* Detach - freeup resources allocated in attach */
-extern int bcmsdh_detach(void *sdh);
-
-/* Query if SD device interrupts are enabled */
-extern bool bcmsdh_intr_query(void *sdh);
-
-/* Enable/disable SD interrupt */
-extern int bcmsdh_intr_enable(void *sdh);
-extern int bcmsdh_intr_disable(void *sdh);
-
-/* Register/deregister device interrupt handler. */
-extern int bcmsdh_intr_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
-extern int bcmsdh_intr_dereg(void *sdh);
-
-#if defined(DHD_DEBUG)
-/* Query pending interrupt status from the host controller */
-extern bool bcmsdh_intr_pending(void *sdh);
-#endif
-extern int bcmsdh_claim_host_and_lock(void *sdh);
-extern int bcmsdh_release_host_and_unlock(void *sdh);
-
-/* Register a callback to be called if and when bcmsdh detects
- * device removal. No-op in the case of non-removable/hardwired devices.
- */
-extern int bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh);
-
-/* Access SDIO address space (e.g. CCCR) using CMD52 (single-byte interface).
- * fn: function number
- * addr: unmodified SDIO-space address
- * data: data byte to write
- * err: pointer to error code (or NULL)
- */
-extern u8 bcmsdh_cfg_read(void *sdh, uint func, u32 addr, int *err);
-extern void bcmsdh_cfg_write(void *sdh, uint func, u32 addr, u8 data,
- int *err);
-
-/* Read/Write 4bytes from/to cfg space */
-extern u32 bcmsdh_cfg_read_word(void *sdh, uint fnc_num, u32 addr,
- int *err);
-extern void bcmsdh_cfg_write_word(void *sdh, uint fnc_num, u32 addr,
- u32 data, int *err);
-
-/* Read CIS content for specified function.
- * fn: function whose CIS is being requested (0 is common CIS)
- * cis: pointer to memory location to place results
- * length: number of bytes to read
- * Internally, this routine uses the values from the cis base regs (0x9-0xB)
- * to form an SDIO-space address to read the data from.
- */
-extern int bcmsdh_cis_read(void *sdh, uint func, u8 *cis, uint length);
-
-/* Synchronous access to device (client) core registers via CMD53 to F1.
- * addr: backplane address (i.e. >= regsva from attach)
- * size: register width in bytes (2 or 4)
- * data: data for register write
- */
-extern u32 bcmsdh_reg_read(void *sdh, u32 addr, uint size);
-extern u32 bcmsdh_reg_write(void *sdh, u32 addr, uint size, u32 data);
-
-/* Indicate if last reg read/write failed */
-extern bool bcmsdh_regfail(void *sdh);
-
-/* Buffer transfer to/from device (client) core via cmd53.
- * fn: function number
- * addr: backplane address (i.e. >= regsva from attach)
- * flags: backplane width, address increment, sync/async
- * buf: pointer to memory data buffer
- * nbytes: number of bytes to transfer to/from buf
- * pkt: pointer to packet associated with buf (if any)
- * complete: callback function for command completion (async only)
- * handle: handle for completion callback (first arg in callback)
- * Returns 0 or error code.
- * NOTE: Async operation is not currently supported.
- */
-typedef void (*bcmsdh_cmplt_fn_t) (void *handle, int status, bool sync_waiting);
-extern int bcmsdh_send_buf(void *sdh, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, void *pkt,
- bcmsdh_cmplt_fn_t complete, void *handle);
-extern int bcmsdh_recv_buf(void *sdh, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt,
- bcmsdh_cmplt_fn_t complete, void *handle);
-
-/* Flags bits */
-#define SDIO_REQ_4BYTE 0x1 /* Four-byte target (backplane) width (vs. two-byte) */
-#define SDIO_REQ_FIXED 0x2 /* Fixed address (FIFO) (vs. incrementing address) */
-#define SDIO_REQ_ASYNC 0x4 /* Async request (vs. sync request) */
-
-/* Pending (non-error) return code */
-#define BCME_PENDING 1
-
-/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only).
- * rw: read or write (0/1)
- * addr: direct SDIO address
- * buf: pointer to memory data buffer
- * nbytes: number of bytes to transfer to/from buf
- * Returns 0 or error code.
- */
-extern int bcmsdh_rwdata(void *sdh, uint rw, u32 addr, u8 *buf,
- uint nbytes);
-
-/* Issue an abort to the specified function */
-extern int bcmsdh_abort(void *sdh, uint fn);
-
-/* Start SDIO Host Controller communication */
-extern int bcmsdh_start(void *sdh, int stage);
-
-/* Stop SDIO Host Controller communication */
-extern int bcmsdh_stop(void *sdh);
-
-/* Returns the "Device ID" of target device on the SDIO bus. */
-extern int bcmsdh_query_device(void *sdh);
-
-/* Returns the number of IO functions reported by the device */
-extern uint bcmsdh_query_iofnum(void *sdh);
-
-/* Miscellaneous knob tweaker. */
-extern int bcmsdh_iovar_op(void *sdh, const char *name,
- void *params, int plen, void *arg, int len,
- bool set);
-
-/* Reset and reinitialize the device */
-extern int bcmsdh_reset(bcmsdh_info_t *sdh);
-
-/* helper functions */
-
-extern void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
-
-/* callback functions */
-typedef struct {
- /* attach to device */
- void *(*attach) (u16 vend_id, u16 dev_id, u16 bus, u16 slot,
- u16 func, uint bustype, void *regsva, void *param);
- /* detach from device */
- void (*detach) (void *ch);
-} bcmsdh_driver_t;
-
-/* platform specific/high level functions */
-extern int bcmsdh_register(bcmsdh_driver_t *driver);
-extern void bcmsdh_unregister(void);
-extern bool bcmsdh_chipmatch(u16 vendor, u16 device);
-extern void bcmsdh_device_remove(void *sdh);
-
-/* Function to pass device-status bits to DHD. */
-extern u32 bcmsdh_get_dstatus(void *sdh);
-
-/* Function to return current window addr */
-extern u32 bcmsdh_cur_sbwad(void *sdh);
-
-/* Function to pass chipid and rev to lower layers for controlling pr's */
-extern void bcmsdh_chipinfo(void *sdh, u32 chip, u32 chiprev);
-
-#endif /* _bcmsdh_h_ */
diff --git a/drivers/staging/brcm80211/include/bcmsdpcm.h b/drivers/staging/brcm80211/include/bcmsdpcm.h
deleted file mode 100644
index 5175e67a6d2..00000000000
--- a/drivers/staging/brcm80211/include/bcmsdpcm.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmsdpcm_h_
-#define _bcmsdpcm_h_
-
-/*
- * Software allocation of To SB Mailbox resources
- */
-
-/* intstatus bits */
-#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */
-#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */
-#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */
-#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */
-
-#define I_TOSBMAIL (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT)
-
-/* tosbmailbox bits corresponding to intstatus bits */
-#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */
-#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */
-#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */
-#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */
-#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */
-
-/* tosbmailboxdata */
-#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */
-#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */
-
-/*
- * Software allocation of To Host Mailbox resources
- */
-
-/* intstatus bits */
-#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */
-#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */
-#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */
-#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */
-
-#define I_TOHOSTMAIL (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT)
-
-/* tohostmailbox bits corresponding to intstatus bits */
-#define HMB_FC_ON (1 << 0) /* To Host Mailbox Flow Control State */
-#define HMB_FC_CHANGE (1 << 1) /* To Host Mailbox Flow Control State Changed */
-#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */
-#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */
-#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */
-
-/* tohostmailboxdata */
-#define HMB_DATA_NAKHANDLED 1 /* we're ready to retransmit NAK'd frame to host */
-#define HMB_DATA_DEVREADY 2 /* we're ready to to talk to host after enable */
-#define HMB_DATA_FC 4 /* per prio flowcontrol update flag to host */
-#define HMB_DATA_FWREADY 8 /* firmware is ready for protocol activity */
-
-#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */
-#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */
-
-#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */
-#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */
-
-/*
- * Software-defined protocol header
- */
-
-/* Current protocol version */
-#define SDPCM_PROT_VERSION 4
-
-/* SW frame header */
-#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */
-#define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff) /* p starts w/SW Header */
-
-#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */
-#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */
-#define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f) /* p starts w/SW Header */
-
-#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */
-#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */
-#define SDPCM_PACKET_FLAGS(p) ((((u8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */
-
-/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */
-#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */
-#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */
-#define SDPCM_NEXTLEN_VALUE(p) ((((u8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */
-#define SDPCM_NEXTLEN_OFFSET 2
-
-/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
-#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
-#define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
-#define SDPCM_DOFFSET_MASK 0xff000000
-#define SDPCM_DOFFSET_SHIFT 24
-
-#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
-#define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
-#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
-#define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
-#define SDPCM_VERSION_OFFSET 6 /* Version # */
-#define SDPCM_VERSION_VALUE(p) (((u8 *)p)[SDPCM_VERSION_OFFSET] & 0xff)
-#define SDPCM_UNUSED_OFFSET 7 /* Spare */
-#define SDPCM_UNUSED_VALUE(p) (((u8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff)
-
-#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
-
-/* logical channel numbers */
-#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */
-#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
-#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
-#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */
-#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
-#define SDPCM_MAX_CHANNEL 15
-
-#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */
-
-#define SDPCM_FLAG_RESVD0 0x01
-#define SDPCM_FLAG_RESVD1 0x02
-#define SDPCM_FLAG_GSPI_TXENAB 0x04
-#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */
-
-/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */
-#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT)
-
-#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
-
-/* For TEST_CHANNEL packets, define another 4-byte header */
-#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2);
- * Semantics of Ext byte depend on command.
- * Len is current or requested frame length, not
- * including test header; sent little-endian.
- */
-#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */
-#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */
-#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */
-#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count */
-#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off */
-
-/* Handy macro for filling in datagen packets with a pattern */
-#define SDPCM_TEST_FILL(byteno, id) ((u8)(id + byteno))
-
-/*
- * Software counters (first part matches hardware counters)
- */
-
-typedef volatile struct {
- u32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */
- u32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */
- u32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */
- u32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */
- u32 abort; /* AbortCount, SDIO: aborts */
- u32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */
- u32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */
- u32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */
- u32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */
- u32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */
- u32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */
- u32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */
- u32 rxdescuflo; /* receive descriptor underflows */
- u32 rxfifooflo; /* receive fifo overflows */
- u32 txfifouflo; /* transmit fifo underflows */
- u32 runt; /* runt (too short) frames recv'd from bus */
- u32 badlen; /* frame's rxh len does not match its hw tag len */
- u32 badcksum; /* frame's hw tag chksum doesn't agree with len value */
- u32 seqbreak; /* break in sequence # space from one rx frame to the next */
- u32 rxfcrc; /* frame rx header indicates crc error */
- u32 rxfwoos; /* frame rx header indicates write out of sync */
- u32 rxfwft; /* frame rx header indicates write frame termination */
- u32 rxfabort; /* frame rx header indicates frame aborted */
- u32 woosint; /* write out of sync interrupt */
- u32 roosint; /* read out of sync interrupt */
- u32 rftermint; /* read frame terminate interrupt */
- u32 wftermint; /* write frame terminate interrupt */
-} sdpcmd_cnt_t;
-
-/*
- * Shared structure between dongle and the host.
- * The structure contains pointers to trap or assert information.
- */
-#define SDPCM_SHARED_VERSION 0x0002
-#define SDPCM_SHARED_VERSION_MASK 0x00FF
-#define SDPCM_SHARED_ASSERT_BUILT 0x0100
-#define SDPCM_SHARED_ASSERT 0x0200
-#define SDPCM_SHARED_TRAP 0x0400
-
-typedef struct {
- u32 flags;
- u32 trap_addr;
- u32 assert_exp_addr;
- u32 assert_file_addr;
- u32 assert_line;
- u32 console_addr; /* Address of hndrte_cons_t */
- u32 msgtrace_addr;
- u8 tag[32];
-} sdpcm_shared_t;
-
-extern sdpcm_shared_t sdpcm_shared;
-
-#endif /* _bcmsdpcm_h_ */
diff --git a/drivers/staging/brcm80211/include/bcmsrom_fmt.h b/drivers/staging/brcm80211/include/bcmsrom_fmt.h
deleted file mode 100644
index 4666afd883a..00000000000
--- a/drivers/staging/brcm80211/include/bcmsrom_fmt.h
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmsrom_fmt_h_
-#define _bcmsrom_fmt_h_
-
-/* Maximum srom: 6 Kilobits == 768 bytes */
-#define SROM_MAX 768
-#define SROM_MAXW 384
-#define VARS_MAX 4096
-
-/* PCI fields */
-#define PCI_F0DEVID 48
-
-#define SROM_WORDS 64
-
-#define SROM3_SWRGN_OFF 28 /* s/w region offset in words */
-
-#define SROM_SSID 2
-
-#define SROM_WL1LHMAXP 29
-
-#define SROM_WL1LPAB0 30
-#define SROM_WL1LPAB1 31
-#define SROM_WL1LPAB2 32
-
-#define SROM_WL1HPAB0 33
-#define SROM_WL1HPAB1 34
-#define SROM_WL1HPAB2 35
-
-#define SROM_MACHI_IL0 36
-#define SROM_MACMID_IL0 37
-#define SROM_MACLO_IL0 38
-#define SROM_MACHI_ET0 39
-#define SROM_MACMID_ET0 40
-#define SROM_MACLO_ET0 41
-#define SROM_MACHI_ET1 42
-#define SROM_MACMID_ET1 43
-#define SROM_MACLO_ET1 44
-#define SROM3_MACHI 37
-#define SROM3_MACMID 38
-#define SROM3_MACLO 39
-
-#define SROM_BXARSSI2G 40
-#define SROM_BXARSSI5G 41
-
-#define SROM_TRI52G 42
-#define SROM_TRI5GHL 43
-
-#define SROM_RXPO52G 45
-
-#define SROM2_ENETPHY 45
-
-#define SROM_AABREV 46
-/* Fields in AABREV */
-#define SROM_BR_MASK 0x00ff
-#define SROM_CC_MASK 0x0f00
-#define SROM_CC_SHIFT 8
-#define SROM_AA0_MASK 0x3000
-#define SROM_AA0_SHIFT 12
-#define SROM_AA1_MASK 0xc000
-#define SROM_AA1_SHIFT 14
-
-#define SROM_WL0PAB0 47
-#define SROM_WL0PAB1 48
-#define SROM_WL0PAB2 49
-
-#define SROM_LEDBH10 50
-#define SROM_LEDBH32 51
-
-#define SROM_WL10MAXP 52
-
-#define SROM_WL1PAB0 53
-#define SROM_WL1PAB1 54
-#define SROM_WL1PAB2 55
-
-#define SROM_ITT 56
-
-#define SROM_BFL 57
-#define SROM_BFL2 28
-#define SROM3_BFL2 61
-
-#define SROM_AG10 58
-
-#define SROM_CCODE 59
-
-#define SROM_OPO 60
-
-#define SROM3_LEDDC 62
-
-#define SROM_CRCREV 63
-
-/* SROM Rev 4: Reallocate the software part of the srom to accommodate
- * MIMO features. It assumes up to two PCIE functions and 440 bytes
- * of usable srom i.e. the usable storage in chips with OTP that
- * implements hardware redundancy.
- */
-
-#define SROM4_WORDS 220
-
-#define SROM4_SIGN 32
-#define SROM4_SIGNATURE 0x5372
-
-#define SROM4_BREV 33
-
-#define SROM4_BFL0 34
-#define SROM4_BFL1 35
-#define SROM4_BFL2 36
-#define SROM4_BFL3 37
-#define SROM5_BFL0 37
-#define SROM5_BFL1 38
-#define SROM5_BFL2 39
-#define SROM5_BFL3 40
-
-#define SROM4_MACHI 38
-#define SROM4_MACMID 39
-#define SROM4_MACLO 40
-#define SROM5_MACHI 41
-#define SROM5_MACMID 42
-#define SROM5_MACLO 43
-
-#define SROM4_CCODE 41
-#define SROM4_REGREV 42
-#define SROM5_CCODE 34
-#define SROM5_REGREV 35
-
-#define SROM4_LEDBH10 43
-#define SROM4_LEDBH32 44
-#define SROM5_LEDBH10 59
-#define SROM5_LEDBH32 60
-
-#define SROM4_LEDDC 45
-#define SROM5_LEDDC 45
-
-#define SROM4_AA 46
-#define SROM4_AA2G_MASK 0x00ff
-#define SROM4_AA2G_SHIFT 0
-#define SROM4_AA5G_MASK 0xff00
-#define SROM4_AA5G_SHIFT 8
-
-#define SROM4_AG10 47
-#define SROM4_AG32 48
-
-#define SROM4_TXPID2G 49
-#define SROM4_TXPID5G 51
-#define SROM4_TXPID5GL 53
-#define SROM4_TXPID5GH 55
-
-#define SROM4_TXRXC 61
-#define SROM4_TXCHAIN_MASK 0x000f
-#define SROM4_TXCHAIN_SHIFT 0
-#define SROM4_RXCHAIN_MASK 0x00f0
-#define SROM4_RXCHAIN_SHIFT 4
-#define SROM4_SWITCH_MASK 0xff00
-#define SROM4_SWITCH_SHIFT 8
-
-/* Per-path fields */
-#define MAX_PATH_SROM 4
-#define SROM4_PATH0 64
-#define SROM4_PATH1 87
-#define SROM4_PATH2 110
-#define SROM4_PATH3 133
-
-#define SROM4_2G_ITT_MAXP 0
-#define SROM4_2G_PA 1
-#define SROM4_5G_ITT_MAXP 5
-#define SROM4_5GLH_MAXP 6
-#define SROM4_5G_PA 7
-#define SROM4_5GL_PA 11
-#define SROM4_5GH_PA 15
-
-/* Fields in the ITT_MAXP and 5GLH_MAXP words */
-#define B2G_MAXP_MASK 0xff
-#define B2G_ITT_SHIFT 8
-#define B5G_MAXP_MASK 0xff
-#define B5G_ITT_SHIFT 8
-#define B5GH_MAXP_MASK 0xff
-#define B5GL_MAXP_SHIFT 8
-
-/* All the miriad power offsets */
-#define SROM4_2G_CCKPO 156
-#define SROM4_2G_OFDMPO 157
-#define SROM4_5G_OFDMPO 159
-#define SROM4_5GL_OFDMPO 161
-#define SROM4_5GH_OFDMPO 163
-#define SROM4_2G_MCSPO 165
-#define SROM4_5G_MCSPO 173
-#define SROM4_5GL_MCSPO 181
-#define SROM4_5GH_MCSPO 189
-#define SROM4_CDDPO 197
-#define SROM4_STBCPO 198
-#define SROM4_BW40PO 199
-#define SROM4_BWDUPPO 200
-
-#define SROM4_CRCREV 219
-
-/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
- * This is acombined srom for both MIMO and SISO boards, usable in
- * the .130 4Kilobit OTP with hardware redundancy.
- */
-
-#define SROM8_SIGN 64
-
-#define SROM8_BREV 65
-
-#define SROM8_BFL0 66
-#define SROM8_BFL1 67
-#define SROM8_BFL2 68
-#define SROM8_BFL3 69
-
-#define SROM8_MACHI 70
-#define SROM8_MACMID 71
-#define SROM8_MACLO 72
-
-#define SROM8_CCODE 73
-#define SROM8_REGREV 74
-
-#define SROM8_LEDBH10 75
-#define SROM8_LEDBH32 76
-
-#define SROM8_LEDDC 77
-
-#define SROM8_AA 78
-
-#define SROM8_AG10 79
-#define SROM8_AG32 80
-
-#define SROM8_TXRXC 81
-
-#define SROM8_BXARSSI2G 82
-#define SROM8_BXARSSI5G 83
-#define SROM8_TRI52G 84
-#define SROM8_TRI5GHL 85
-#define SROM8_RXPO52G 86
-
-#define SROM8_FEM2G 87
-#define SROM8_FEM5G 88
-#define SROM8_FEM_ANTSWLUT_MASK 0xf800
-#define SROM8_FEM_ANTSWLUT_SHIFT 11
-#define SROM8_FEM_TR_ISO_MASK 0x0700
-#define SROM8_FEM_TR_ISO_SHIFT 8
-#define SROM8_FEM_PDET_RANGE_MASK 0x00f8
-#define SROM8_FEM_PDET_RANGE_SHIFT 3
-#define SROM8_FEM_EXTPA_GAIN_MASK 0x0006
-#define SROM8_FEM_EXTPA_GAIN_SHIFT 1
-#define SROM8_FEM_TSSIPOS_MASK 0x0001
-#define SROM8_FEM_TSSIPOS_SHIFT 0
-
-#define SROM8_THERMAL 89
-
-/* Temp sense related entries */
-#define SROM8_MPWR_RAWTS 90
-#define SROM8_TS_SLP_OPT_CORRX 91
-/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
-#define SROM8_FOC_HWIQ_IQSWP 92
-
-/* Temperature delta for PHY calibration */
-#define SROM8_PHYCAL_TEMPDELTA 93
-
-/* Per-path offsets & fields */
-#define SROM8_PATH0 96
-#define SROM8_PATH1 112
-#define SROM8_PATH2 128
-#define SROM8_PATH3 144
-
-#define SROM8_2G_ITT_MAXP 0
-#define SROM8_2G_PA 1
-#define SROM8_5G_ITT_MAXP 4
-#define SROM8_5GLH_MAXP 5
-#define SROM8_5G_PA 6
-#define SROM8_5GL_PA 9
-#define SROM8_5GH_PA 12
-
-/* All the miriad power offsets */
-#define SROM8_2G_CCKPO 160
-
-#define SROM8_2G_OFDMPO 161
-#define SROM8_5G_OFDMPO 163
-#define SROM8_5GL_OFDMPO 165
-#define SROM8_5GH_OFDMPO 167
-
-#define SROM8_2G_MCSPO 169
-#define SROM8_5G_MCSPO 177
-#define SROM8_5GL_MCSPO 185
-#define SROM8_5GH_MCSPO 193
-
-#define SROM8_CDDPO 201
-#define SROM8_STBCPO 202
-#define SROM8_BW40PO 203
-#define SROM8_BWDUPPO 204
-
-/* SISO PA parameters are in the path0 spaces */
-#define SROM8_SISO 96
-
-/* Legacy names for SISO PA paramters */
-#define SROM8_W0_ITTMAXP (SROM8_SISO + SROM8_2G_ITT_MAXP)
-#define SROM8_W0_PAB0 (SROM8_SISO + SROM8_2G_PA)
-#define SROM8_W0_PAB1 (SROM8_SISO + SROM8_2G_PA + 1)
-#define SROM8_W0_PAB2 (SROM8_SISO + SROM8_2G_PA + 2)
-#define SROM8_W1_ITTMAXP (SROM8_SISO + SROM8_5G_ITT_MAXP)
-#define SROM8_W1_MAXP_LCHC (SROM8_SISO + SROM8_5GLH_MAXP)
-#define SROM8_W1_PAB0 (SROM8_SISO + SROM8_5G_PA)
-#define SROM8_W1_PAB1 (SROM8_SISO + SROM8_5G_PA + 1)
-#define SROM8_W1_PAB2 (SROM8_SISO + SROM8_5G_PA + 2)
-#define SROM8_W1_PAB0_LC (SROM8_SISO + SROM8_5GL_PA)
-#define SROM8_W1_PAB1_LC (SROM8_SISO + SROM8_5GL_PA + 1)
-#define SROM8_W1_PAB2_LC (SROM8_SISO + SROM8_5GL_PA + 2)
-#define SROM8_W1_PAB0_HC (SROM8_SISO + SROM8_5GH_PA)
-#define SROM8_W1_PAB1_HC (SROM8_SISO + SROM8_5GH_PA + 1)
-#define SROM8_W1_PAB2_HC (SROM8_SISO + SROM8_5GH_PA + 2)
-
-#define SROM8_CRCREV 219
-
-/* SROM REV 9 */
-#define SROM9_2GPO_CCKBW20 160
-#define SROM9_2GPO_CCKBW20UL 161
-#define SROM9_2GPO_LOFDMBW20 162
-#define SROM9_2GPO_LOFDMBW20UL 164
-
-#define SROM9_5GLPO_LOFDMBW20 166
-#define SROM9_5GLPO_LOFDMBW20UL 168
-#define SROM9_5GMPO_LOFDMBW20 170
-#define SROM9_5GMPO_LOFDMBW20UL 172
-#define SROM9_5GHPO_LOFDMBW20 174
-#define SROM9_5GHPO_LOFDMBW20UL 176
-
-#define SROM9_2GPO_MCSBW20 178
-#define SROM9_2GPO_MCSBW20UL 180
-#define SROM9_2GPO_MCSBW40 182
-
-#define SROM9_5GLPO_MCSBW20 184
-#define SROM9_5GLPO_MCSBW20UL 186
-#define SROM9_5GLPO_MCSBW40 188
-#define SROM9_5GMPO_MCSBW20 190
-#define SROM9_5GMPO_MCSBW20UL 192
-#define SROM9_5GMPO_MCSBW40 194
-#define SROM9_5GHPO_MCSBW20 196
-#define SROM9_5GHPO_MCSBW20UL 198
-#define SROM9_5GHPO_MCSBW40 200
-
-#define SROM9_PO_MCS32 202
-#define SROM9_PO_LOFDM40DUP 203
-
-#define SROM9_REV_CRC 219
-
-typedef struct {
- u8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */
- u8 extpagain; /* Ext PA gain-type: full-gain: 0, pa-lite: 1, no_pa: 2 */
- u8 pdetrange; /* support 32 combinations of different Pdet dynamic ranges */
- u8 triso; /* TR switch isolation */
- u8 antswctrllut; /* antswctrl lookup table configuration: 32 possible choices */
-} srom_fem_t;
-
-#endif /* _bcmsrom_fmt_h_ */
diff --git a/drivers/staging/brcm80211/include/bcmutils.h b/drivers/staging/brcm80211/include/bcmutils.h
deleted file mode 100644
index 17683f2f785..00000000000
--- a/drivers/staging/brcm80211/include/bcmutils.h
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmutils_h_
-#define _bcmutils_h_
-
-/* Buffer structure for collecting string-formatted data
-* using bcm_bprintf() API.
-* Use bcm_binit() to initialize before use
-*/
-
- struct bcmstrbuf {
- char *buf; /* pointer to current position in origbuf */
- unsigned int size; /* current (residual) size in bytes */
- char *origbuf; /* unmodified pointer to orignal buffer */
- unsigned int origsize; /* unmodified orignal buffer size in bytes */
- };
-
-/* ** driver-only section ** */
-
-#define GPIO_PIN_NOTDEFINED 0x20 /* Pin not defined */
-
-/*
- * Spin at most 'us' microseconds while 'exp' is true.
- * Caller should explicitly test 'exp' when this completes
- * and take appropriate error action if 'exp' is still true.
- */
-#define SPINWAIT(exp, us) { \
- uint countdown = (us) + 9; \
- while ((exp) && (countdown >= 10)) {\
- udelay(10); \
- countdown -= 10; \
- } \
-}
-
-/* osl multi-precedence packet queue */
-#ifndef PKTQ_LEN_DEFAULT
-#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */
-#endif
-#ifndef PKTQ_MAX_PREC
-#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */
-#endif
-
- struct pktq_prec {
- struct sk_buff *head; /* first packet to dequeue */
- struct sk_buff *tail; /* last packet to dequeue */
- u16 len; /* number of queued packets */
- u16 max; /* maximum number of queued packets */
- };
-
-/* multi-priority pkt queue */
- struct pktq {
- u16 num_prec; /* number of precedences in use */
- u16 hi_prec; /* rapid dequeue hint (>= highest non-empty prec) */
- u16 max; /* total max packets */
- u16 len; /* total number of packets */
- /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
- struct pktq_prec q[PKTQ_MAX_PREC];
- };
-
-#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
-
-/* fn(pkt, arg). return true if pkt belongs to if */
-typedef bool(*ifpkt_cb_t) (struct sk_buff *, void *);
-
-/* operations on a specific precedence in packet queue */
-
-#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
-#define pktq_plen(pq, prec) ((pq)->q[prec].len)
-#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
-#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
-#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
-
-#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
-#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
-
-extern struct sk_buff *bcm_pktq_penq(struct pktq *pq, int prec,
- struct sk_buff *p);
-extern struct sk_buff *bcm_pktq_penq_head(struct pktq *pq, int prec,
- struct sk_buff *p);
-extern struct sk_buff *bcm_pktq_pdeq(struct pktq *pq, int prec);
-extern struct sk_buff *bcm_pktq_pdeq_tail(struct pktq *pq, int prec);
-
-/* packet primitives */
-extern struct sk_buff *bcm_pkt_buf_get_skb(uint len);
-extern void bcm_pkt_buf_free_skb(struct sk_buff *skb);
-
-/* Empty the queue at particular precedence level */
-extern void bcm_pktq_pflush(struct pktq *pq, int prec,
- bool dir, ifpkt_cb_t fn, void *arg);
-
-/* operations on a set of precedences in packet queue */
-
-extern int bcm_pktq_mlen(struct pktq *pq, uint prec_bmp);
-extern struct sk_buff *bcm_pktq_mdeq(struct pktq *pq, uint prec_bmp,
- int *prec_out);
-
-/* operations on packet queue as a whole */
-
-#define pktq_len(pq) ((int)(pq)->len)
-#define pktq_max(pq) ((int)(pq)->max)
-#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
-#define pktq_full(pq) ((pq)->len >= (pq)->max)
-#define pktq_empty(pq) ((pq)->len == 0)
-
-/* operations for single precedence queues */
-#define pktenq(pq, p) bcm_pktq_penq(((struct pktq *)pq), 0, (p))
-#define pktenq_head(pq, p) bcm_pktq_penq_head(((struct pktq *)pq), 0, (p))
-#define pktdeq(pq) bcm_pktq_pdeq(((struct pktq *)pq), 0)
-#define pktdeq_tail(pq) bcm_pktq_pdeq_tail(((struct pktq *)pq), 0)
-#define pktqinit(pq, len) bcm_pktq_init(((struct pktq *)pq), 1, len)
-
-extern void bcm_pktq_init(struct pktq *pq, int num_prec, int max_len);
-/* prec_out may be NULL if caller is not interested in return value */
-extern struct sk_buff *bcm_pktq_peek_tail(struct pktq *pq, int *prec_out);
-extern void bcm_pktq_flush(struct pktq *pq, bool dir,
- ifpkt_cb_t fn, void *arg);
-
-/* externs */
-/* packet */
-extern uint bcm_pktfrombuf(struct sk_buff *p,
- uint offset, int len, unsigned char *buf);
-extern uint bcm_pkttotlen(struct sk_buff *p);
-
-/* ethernet address */
-extern int bcm_ether_atoe(char *p, u8 *ea);
-
-/* ip address */
- struct ipv4_addr;
- extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
-
-#ifdef BCMDBG
-extern void bcm_prpkt(const char *msg, struct sk_buff *p0);
-#else
-#define bcm_prpkt(a, b)
-#endif /* BCMDBG */
-
-#define bcm_perf_enable()
-#define bcmlog(fmt, a1, a2)
-#define bcmdumplog(buf, size) (*buf = '\0')
-#define bcmdumplogent(buf, idx) -1
-
-#define bcmtslog(tstamp, fmt, a1, a2)
-#define bcmprinttslogs()
-#define bcmprinttstamp(us)
-
-/* Support for sharing code across in-driver iovar implementations.
- * The intent is that a driver use this structure to map iovar names
- * to its (private) iovar identifiers, and the lookup function to
- * find the entry. Macros are provided to map ids and get/set actions
- * into a single number space for a switch statement.
- */
-
-/* iovar structure */
- typedef struct bcm_iovar {
- const char *name; /* name for lookup and display */
- u16 varid; /* id for switch */
- u16 flags; /* driver-specific flag bits */
- u16 type; /* base type of argument */
- u16 minlen; /* min length for buffer vars */
- } bcm_iovar_t;
-
-/* varid definitions are per-driver, may use these get/set bits */
-
-/* IOVar action bits for id mapping */
-#define IOV_GET 0 /* Get an iovar */
-#define IOV_SET 1 /* Set an iovar */
-
-/* Varid to actionid mapping */
-#define IOV_GVAL(id) ((id)*2)
-#define IOV_SVAL(id) (((id)*2)+IOV_SET)
-#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET)
-#define IOV_ID(actionid) (actionid >> 1)
-
-/* flags are per-driver based on driver attributes */
-
- extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table,
- const char *name);
- extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg,
- int len, bool set);
-
-/* Base type definitions */
-#define IOVT_VOID 0 /* no value (implictly set only) */
-#define IOVT_BOOL 1 /* any value ok (zero/nonzero) */
-#define IOVT_INT8 2 /* integer values are range-checked */
-#define IOVT_UINT8 3 /* unsigned int 8 bits */
-#define IOVT_INT16 4 /* int 16 bits */
-#define IOVT_UINT16 5 /* unsigned int 16 bits */
-#define IOVT_INT32 6 /* int 32 bits */
-#define IOVT_UINT32 7 /* unsigned int 32 bits */
-#define IOVT_BUFFER 8 /* buffer is size-checked as per minlen */
-#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
-
-/* Initializer for IOV type strings */
-#define BCM_IOV_TYPE_INIT { \
- "void", \
- "bool", \
- "s8", \
- "u8", \
- "s16", \
- "u16", \
- "s32", \
- "u32", \
- "buffer", \
- "" }
-
-#define BCM_IOVT_IS_INT(type) (\
- (type == IOVT_BOOL) || \
- (type == IOVT_INT8) || \
- (type == IOVT_UINT8) || \
- (type == IOVT_INT16) || \
- (type == IOVT_UINT16) || \
- (type == IOVT_INT32) || \
- (type == IOVT_UINT32))
-
-/* ** driver/apps-shared section ** */
-
-#define BCME_STRLEN 64 /* Max string length for BCM errors */
-
-#ifndef ABS
-#define ABS(a) (((a) < 0) ? -(a) : (a))
-#endif /* ABS */
-
-#define CEIL(x, y) (((x) + ((y)-1)) / (y))
-#define ISPOWEROF2(x) ((((x)-1)&(x)) == 0)
-
-/* map physical to virtual I/O */
-#if !defined(CONFIG_MMC_MSM7X00A)
-#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), \
- (unsigned long)(size))
-#else
-#define REG_MAP(pa, size) (void *)(0)
-#endif
-
-/* register access macros */
-#if defined(BCMSDIO)
-#ifdef BRCM_FULLMAC
-#include <bcmsdh.h>
-#endif
-#define OSL_WRITE_REG(r, v) \
- (bcmsdh_reg_write(NULL, (unsigned long)(r), sizeof(*(r)), (v)))
-#define OSL_READ_REG(r) \
- (bcmsdh_reg_read(NULL, (unsigned long)(r), sizeof(*(r))))
-#endif
-
-#if defined(BCMSDIO)
-#define SELECT_BUS_WRITE(mmap_op, bus_op) bus_op
-#define SELECT_BUS_READ(mmap_op, bus_op) bus_op
-#else
-#define SELECT_BUS_WRITE(mmap_op, bus_op) mmap_op
-#define SELECT_BUS_READ(mmap_op, bus_op) mmap_op
-#endif
-
-/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
-#define PKTBUFSZ 2048
-
-#define OSL_SYSUPTIME() ((u32)jiffies * (1000 / HZ))
-#ifdef BRCM_FULLMAC
-#include <linux/kernel.h> /* for vsn/printf's */
-#include <linux/string.h> /* for mem*, str* */
-#endif
-/* bcopy's: Linux kernel doesn't provide these (anymore) */
-#define bcopy(src, dst, len) memcpy((dst), (src), (len))
-
-/* register access macros */
-#ifndef __BIG_ENDIAN
-#ifndef __mips__
-#define R_REG(r) (\
- SELECT_BUS_READ(sizeof(*(r)) == sizeof(u8) ? \
- readb((volatile u8*)(r)) : \
- sizeof(*(r)) == sizeof(u16) ? readw((volatile u16*)(r)) : \
- readl((volatile u32*)(r)), OSL_READ_REG(r)) \
-)
-#else /* __mips__ */
-#define R_REG(r) (\
- SELECT_BUS_READ( \
- ({ \
- __typeof(*(r)) __osl_v; \
- __asm__ __volatile__("sync"); \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- __osl_v = readb((volatile u8*)(r)); \
- break; \
- case sizeof(u16): \
- __osl_v = readw((volatile u16*)(r)); \
- break; \
- case sizeof(u32): \
- __osl_v = \
- readl((volatile u32*)(r)); \
- break; \
- } \
- __asm__ __volatile__("sync"); \
- __osl_v; \
- }), \
- ({ \
- __typeof(*(r)) __osl_v; \
- __asm__ __volatile__("sync"); \
- __osl_v = OSL_READ_REG(r); \
- __asm__ __volatile__("sync"); \
- __osl_v; \
- })) \
-)
-#endif /* __mips__ */
-
-#define W_REG(r, v) do { \
- SELECT_BUS_WRITE( \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- writeb((u8)(v), (volatile u8*)(r)); break; \
- case sizeof(u16): \
- writew((u16)(v), (volatile u16*)(r)); break; \
- case sizeof(u32): \
- writel((u32)(v), (volatile u32*)(r)); break; \
- }, \
- (OSL_WRITE_REG(r, v))); \
- } while (0)
-#else /* __BIG_ENDIAN */
-#define R_REG(r) (\
- SELECT_BUS_READ( \
- ({ \
- __typeof(*(r)) __osl_v; \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- __osl_v = \
- readb((volatile u8*)((r)^3)); \
- break; \
- case sizeof(u16): \
- __osl_v = \
- readw((volatile u16*)((r)^2)); \
- break; \
- case sizeof(u32): \
- __osl_v = readl((volatile u32*)(r)); \
- break; \
- } \
- __osl_v; \
- }), \
- OSL_READ_REG(r)) \
-)
-#define W_REG(r, v) do { \
- SELECT_BUS_WRITE( \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- writeb((u8)(v), \
- (volatile u8*)((r)^3)); break; \
- case sizeof(u16): \
- writew((u16)(v), \
- (volatile u16*)((r)^2)); break; \
- case sizeof(u32): \
- writel((u32)(v), \
- (volatile u32*)(r)); break; \
- }, \
- (OSL_WRITE_REG(r, v))); \
- } while (0)
-#endif /* __BIG_ENDIAN */
-
-#define AND_REG(r, v) W_REG((r), R_REG(r) & (v))
-#define OR_REG(r, v) W_REG((r), R_REG(r) | (v))
-
-#define SET_REG(r, mask, val) \
- W_REG((r), ((R_REG(r) & ~(mask)) | (val)))
-
-#ifndef setbit
-#ifndef NBBY /* the BSD family defines NBBY */
-#define NBBY 8 /* 8 bits per byte */
-#endif /* #ifndef NBBY */
-#define setbit(a, i) (((u8 *)a)[(i)/NBBY] |= 1<<((i)%NBBY))
-#define clrbit(a, i) (((u8 *)a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
-#define isset(a, i) (((const u8 *)a)[(i)/NBBY] & (1<<((i)%NBBY)))
-#define isclr(a, i) ((((const u8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
-#endif /* setbit */
-
-#define NBITS(type) (sizeof(type) * 8)
-#define NBITVAL(nbits) (1 << (nbits))
-#define MAXBITVAL(nbits) ((1 << (nbits)) - 1)
-#define NBITMASK(nbits) MAXBITVAL(nbits)
-#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8)
-
-/* basic mux operation - can be optimized on several architectures */
-#define MUX(pred, true, false) ((pred) ? (true) : (false))
-
-/* modulo inc/dec - assumes x E [0, bound - 1] */
-#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
-#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
-
-/* modulo inc/dec, bound = 2^k */
-#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
-#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
-
-/* modulo add/sub - assumes x, y E [0, bound - 1] */
-#define MODADD(x, y, bound) \
- MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
-#define MODSUB(x, y, bound) \
- MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
-
-/* module add/sub, bound = 2^k */
-#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
-#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
-
-/* crc defines */
-#define CRC8_INIT_VALUE 0xff /* Initial CRC8 checksum value */
-#define CRC8_GOOD_VALUE 0x9f /* Good final CRC8 checksum value */
-#define CRC16_INIT_VALUE 0xffff /* Initial CRC16 checksum value */
-#define CRC16_GOOD_VALUE 0xf0b8 /* Good final CRC16 checksum value */
-
-/* bcm_format_flags() bit description structure */
- typedef struct bcm_bit_desc {
- u32 bit;
- const char *name;
- } bcm_bit_desc_t;
-
-/* tag_ID/length/value_buffer tuple */
- typedef struct bcm_tlv {
- u8 id;
- u8 len;
- u8 data[1];
- } bcm_tlv_t;
-
-/* Check that bcm_tlv_t fits into the given buflen */
-#define bcm_valid_tlv(elt, buflen) ((buflen) >= 2 && (int)(buflen) >= (int)(2 + (elt)->len))
-
-#define ETHER_ADDR_STR_LEN 18 /* 18-bytes of Ethernet address buffer length */
-
-/* crypto utility function */
-/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */
- static inline void
- xor_128bit_block(const u8 *src1, const u8 *src2, u8 *dst) {
- if (
-#ifdef __i386__
- 1 ||
-#endif
- (((unsigned long) src1 | (unsigned long) src2 | (unsigned long) dst) &
- 3) == 0) {
- /* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */
- /* x86 supports unaligned. This version runs 6x-9x faster on x86. */
- ((u32 *) dst)[0] =
- ((const u32 *)src1)[0] ^ ((const u32 *)
- src2)[0];
- ((u32 *) dst)[1] =
- ((const u32 *)src1)[1] ^ ((const u32 *)
- src2)[1];
- ((u32 *) dst)[2] =
- ((const u32 *)src1)[2] ^ ((const u32 *)
- src2)[2];
- ((u32 *) dst)[3] =
- ((const u32 *)src1)[3] ^ ((const u32 *)
- src2)[3];
- } else {
- /* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */
- int k;
- for (k = 0; k < 16; k++)
- dst[k] = src1[k] ^ src2[k];
- }
- }
-
-/* externs */
-/* crc */
-extern u8 bcm_crc8(u8 *p, uint nbytes, u8 crc);
-/* format/print */
-#if defined(BCMDBG)
- extern int bcm_format_flags(const bcm_bit_desc_t *bd, u32 flags,
- char *buf, int len);
- extern int bcm_format_hex(char *str, const void *bytes, int len);
-#endif
- extern char *bcm_chipname(uint chipid, char *buf, uint len);
-
- extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen,
- uint key);
-
-/* multi-bool data type: set of bools, mbool is true if any is set */
- typedef u32 mbool;
-#define mboolset(mb, bit) ((mb) |= (bit)) /* set one bool */
-#define mboolclr(mb, bit) ((mb) &= ~(bit)) /* clear one bool */
-#define mboolisset(mb, bit) (((mb) & (bit)) != 0) /* true if one bool is set */
-#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val)))
-
-/* power conversion */
- extern u16 bcm_qdbm_to_mw(u8 qdbm);
- extern u8 bcm_mw_to_qdbm(u16 mw);
-
- extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size);
- extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
-
- extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf,
- uint len);
- extern uint bcm_bitcount(u8 *bitmap, uint bytelength);
-
-#endif /* _bcmutils_h_ */
diff --git a/drivers/staging/brcm80211/include/brcm_hw_ids.h b/drivers/staging/brcm80211/include/brcm_hw_ids.h
new file mode 100644
index 00000000000..5fb17d53c9b
--- /dev/null
+++ b/drivers/staging/brcm80211/include/brcm_hw_ids.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_HW_IDS_H_
+#define _BRCM_HW_IDS_H_
+
+#define BCM4325_D11DUAL_ID 0x431b
+#define BCM4325_D11G_ID 0x431c
+#define BCM4325_D11A_ID 0x431d
+
+#define BCM4329_D11N2G_ID 0x432f /* 4329 802.11n 2.4G device */
+#define BCM4329_D11N5G_ID 0x4330 /* 4329 802.11n 5G device */
+#define BCM4329_D11NDUAL_ID 0x432e
+
+#define BCM4319_D11N_ID 0x4337 /* 4319 802.11n dualband device */
+#define BCM4319_D11N2G_ID 0x4338 /* 4319 802.11n 2.4G device */
+#define BCM4319_D11N5G_ID 0x4339 /* 4319 802.11n 5G device */
+
+#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */
+#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db */
+
+#define BCM43225_D11N2G_ID 0x4357 /* 43225 802.11n 2.4GHz device */
+
+#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */
+#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */
+
+#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
+
+/* Chip IDs */
+#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */
+#define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */
+
+#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */
+#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */
+#define BCM43421_CHIP_ID 43421 /* 43421 chipcommon chipid */
+#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */
+#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */
+#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */
+#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */
+#define BCM4325_CHIP_ID 0x4325 /* 4325 chipcommon chipid */
+#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */
+#define BCM4336_CHIP_ID 0x4336 /* 4336 chipcommon chipid */
+#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */
+#define BCM6362_CHIP_ID 0x6362 /* 6362 chipcommon chipid */
+
+#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/staging/brcm80211/include/brcmu_utils.h b/drivers/staging/brcm80211/include/brcmu_utils.h
new file mode 100644
index 00000000000..2d54cc5f4b1
--- /dev/null
+++ b/drivers/staging/brcm80211/include/brcmu_utils.h
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCMU_UTILS_H_
+#define _BRCMU_UTILS_H_
+
+#include <linux/skbuff.h>
+
+/* Buffer structure for collecting string-formatted data
+* using brcmu_bprintf() API.
+* Use brcmu_binit() to initialize before use
+*/
+
+struct brcmu_strbuf {
+ char *buf; /* pointer to current position in origbuf */
+ unsigned int size; /* current (residual) size in bytes */
+ char *origbuf; /* unmodified pointer to orignal buffer */
+ unsigned int origsize; /* unmodified orignal buffer size in bytes */
+};
+
+/*
+ * Spin at most 'us' microseconds while 'exp' is true.
+ * Caller should explicitly test 'exp' when this completes
+ * and take appropriate error action if 'exp' is still true.
+ */
+#define SPINWAIT(exp, us) { \
+ uint countdown = (us) + 9; \
+ while ((exp) && (countdown >= 10)) {\
+ udelay(10); \
+ countdown -= 10; \
+ } \
+}
+
+/* osl multi-precedence packet queue */
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */
+#endif
+
+struct pktq_prec {
+ struct sk_buff *head; /* first packet to dequeue */
+ struct sk_buff *tail; /* last packet to dequeue */
+ u16 len; /* number of queued packets */
+ u16 max; /* maximum number of queued packets */
+};
+
+/* multi-priority pkt queue */
+struct pktq {
+ u16 num_prec; /* number of precedences in use */
+ u16 hi_prec; /* rapid dequeue hint (>= highest non-empty prec) */
+ u16 max; /* total max packets */
+ u16 len; /* total number of packets */
+ /*
+ * q array must be last since # of elements can be either
+ * PKTQ_MAX_PREC or 1
+ */
+ struct pktq_prec q[PKTQ_MAX_PREC];
+};
+
+/* fn(pkt, arg). return true if pkt belongs to if */
+typedef bool(*ifpkt_cb_t) (struct sk_buff *, void *);
+
+/* operations on a specific precedence in packet queue */
+
+#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
+#define pktq_plen(pq, prec) ((pq)->q[prec].len)
+#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
+
+extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
+ struct sk_buff *p);
+extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
+ struct sk_buff *p);
+extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
+extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
+
+/* packet primitives */
+extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
+extern void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
+
+/* Empty the queue at particular precedence level */
+extern void brcmu_pktq_pflush(struct pktq *pq, int prec,
+ bool dir, ifpkt_cb_t fn, void *arg);
+
+/* operations on a set of precedences in packet queue */
+
+extern int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
+ int *prec_out);
+
+/* operations on packet queue as a whole */
+
+#define pktq_len(pq) ((int)(pq)->len)
+#define pktq_max(pq) ((int)(pq)->max)
+#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
+#define pktq_full(pq) ((pq)->len >= (pq)->max)
+#define pktq_empty(pq) ((pq)->len == 0)
+
+/* operations for single precedence queues */
+#define pktenq(pq, p) brcmu_pktq_penq(((struct pktq *)pq), 0, (p))
+#define pktenq_head(pq, p)\
+ brcmu_pktq_penq_head(((struct pktq *)pq), 0, (p))
+#define pktdeq(pq) brcmu_pktq_pdeq(((struct pktq *)pq), 0)
+#define pktdeq_tail(pq) brcmu_pktq_pdeq_tail(((struct pktq *)pq), 0)
+#define pktqinit(pq, len) brcmu_pktq_init(((struct pktq *)pq), 1, len)
+
+extern void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
+/* prec_out may be NULL if caller is not interested in return value */
+extern struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
+extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
+ ifpkt_cb_t fn, void *arg);
+
+/* externs */
+/* packet */
+extern uint brcmu_pktfrombuf(struct sk_buff *p,
+ uint offset, int len, unsigned char *buf);
+extern uint brcmu_pkttotlen(struct sk_buff *p);
+
+/* ethernet address */
+extern int brcmu_ether_atoe(char *p, u8 *ea);
+
+/* ip address */
+struct ipv4_addr;
+
+#ifdef BCMDBG
+extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
+#else
+#define brcmu_prpkt(a, b)
+#endif /* BCMDBG */
+
+/* Support for sharing code across in-driver iovar implementations.
+ * The intent is that a driver use this structure to map iovar names
+ * to its (private) iovar identifiers, and the lookup function to
+ * find the entry. Macros are provided to map ids and get/set actions
+ * into a single number space for a switch statement.
+ */
+
+/* iovar structure */
+struct brcmu_iovar {
+ const char *name; /* name for lookup and display */
+ u16 varid; /* id for switch */
+ u16 flags; /* driver-specific flag bits */
+ u16 type; /* base type of argument */
+ u16 minlen; /* min length for buffer vars */
+};
+
+/* varid definitions are per-driver, may use these get/set bits */
+
+/* IOVar action bits for id mapping */
+#define IOV_GET 0 /* Get an iovar */
+#define IOV_SET 1 /* Set an iovar */
+
+/* Varid to actionid mapping */
+#define IOV_GVAL(id) ((id)*2)
+#define IOV_SVAL(id) (((id)*2)+IOV_SET)
+#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET)
+#define IOV_ID(actionid) (actionid >> 1)
+
+extern const struct
+brcmu_iovar *brcmu_iovar_lookup(const struct brcmu_iovar *table,
+ const char *name);
+extern int brcmu_iovar_lencheck(const struct brcmu_iovar *table, void *arg,
+ int len, bool set);
+
+/* Base type definitions */
+#define IOVT_VOID 0 /* no value (implictly set only) */
+#define IOVT_BOOL 1 /* any value ok (zero/nonzero) */
+#define IOVT_INT8 2 /* integer values are range-checked */
+#define IOVT_UINT8 3 /* unsigned int 8 bits */
+#define IOVT_INT16 4 /* int 16 bits */
+#define IOVT_UINT16 5 /* unsigned int 16 bits */
+#define IOVT_INT32 6 /* int 32 bits */
+#define IOVT_UINT32 7 /* unsigned int 32 bits */
+#define IOVT_BUFFER 8 /* buffer is size-checked as per minlen */
+#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
+
+/* ** driver/apps-shared section ** */
+
+#define BCME_STRLEN 64 /* Max string length for BCM errors */
+
+#ifndef ABS
+#define ABS(a) (((a) < 0) ? -(a) : (a))
+#endif /* ABS */
+
+#define CEIL(x, y) (((x) + ((y)-1)) / (y))
+#define ISPOWEROF2(x) ((((x)-1)&(x)) == 0)
+
+/* map physical to virtual I/O */
+#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), \
+ (unsigned long)(size))
+
+/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
+#define PKTBUFSZ 2048
+
+#define OSL_SYSUPTIME() ((u32)jiffies * (1000 / HZ))
+
+#ifndef setbit
+#ifndef NBBY /* the BSD family defines NBBY */
+#define NBBY 8 /* 8 bits per byte */
+#endif /* #ifndef NBBY */
+#define setbit(a, i) (((u8 *)a)[(i)/NBBY] |= 1<<((i)%NBBY))
+#define clrbit(a, i) (((u8 *)a)[(i)/NBBY] &= ~(1<<((i)%NBBY)))
+#define isset(a, i) (((const u8 *)a)[(i)/NBBY] & (1<<((i)%NBBY)))
+#define isclr(a, i) ((((const u8 *)a)[(i)/NBBY] & (1<<((i)%NBBY))) == 0)
+#endif /* setbit */
+
+#define NBITS(type) (sizeof(type) * 8)
+#define NBITVAL(nbits) (1 << (nbits))
+#define MAXBITVAL(nbits) ((1 << (nbits)) - 1)
+#define NBITMASK(nbits) MAXBITVAL(nbits)
+#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8)
+
+/* basic mux operation - can be optimized on several architectures */
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+
+/* modulo inc/dec - assumes x E [0, bound - 1] */
+#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
+#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
+
+/* modulo inc/dec, bound = 2^k */
+#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
+#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
+
+/* modulo add/sub - assumes x, y E [0, bound - 1] */
+#define MODADD(x, y, bound) \
+ MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
+#define MODSUB(x, y, bound) \
+ MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
+
+/* module add/sub, bound = 2^k */
+#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
+#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+
+/* crc defines */
+#define CRC8_INIT_VALUE 0xff /* Initial CRC8 checksum value */
+#define CRC8_GOOD_VALUE 0x9f /* Good final CRC8 checksum value */
+#define CRC16_INIT_VALUE 0xffff /* Initial CRC16 checksum value */
+#define CRC16_GOOD_VALUE 0xf0b8 /* Good final CRC16 checksum value */
+
+/* brcmu_format_flags() bit description structure */
+struct brcmu_bit_desc {
+ u32 bit;
+ const char *name;
+};
+
+/* tag_ID/length/value_buffer tuple */
+struct brcmu_tlv {
+ u8 id;
+ u8 len;
+ u8 data[1];
+};
+
+#define ETHER_ADDR_STR_LEN 18 /* 18-bytes of Ethernet address buffer length */
+
+/* externs */
+/* crc */
+extern u8 brcmu_crc8(u8 *p, uint nbytes, u8 crc);
+
+/* format/print */
+#if defined(BCMDBG)
+extern int brcmu_format_flags(const struct brcmu_bit_desc *bd, u32 flags,
+ char *buf, int len);
+extern int brcmu_format_hex(char *str, const void *bytes, int len);
+#endif
+
+extern char *brcmu_chipname(uint chipid, char *buf, uint len);
+
+extern struct brcmu_tlv *brcmu_parse_tlvs(void *buf, int buflen,
+ uint key);
+
+/* power conversion */
+extern u16 brcmu_qdbm_to_mw(u8 qdbm);
+extern u8 brcmu_mw_to_qdbm(u16 mw);
+
+extern void brcmu_binit(struct brcmu_strbuf *b, char *buf, uint size);
+extern int brcmu_bprintf(struct brcmu_strbuf *b, const char *fmt, ...);
+
+extern uint brcmu_mkiovar(char *name, char *data, uint datalen,
+ char *buf, uint len);
+extern uint brcmu_bitcount(u8 *bitmap, uint bytelength);
+
+#endif /* _BRCMU_UTILS_H_ */
diff --git a/drivers/staging/brcm80211/include/bcmwifi.h b/drivers/staging/brcm80211/include/brcmu_wifi.h
index a573ebff768..fde592bd917 100644
--- a/drivers/staging/brcm80211/include/bcmwifi.h
+++ b/drivers/staging/brcm80211/include/brcmu_wifi.h
@@ -14,8 +14,11 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#ifndef _bcmwifi_h_
-#define _bcmwifi_h_
+#ifndef _BRCMU_WIFI_H_
+#define _BRCMU_WIFI_H_
+
+#include <linux/if_ether.h> /* for ETH_ALEN */
+#include <linux/ieee80211.h> /* for WLAN_PMKID_LEN */
/* A chanspec holds the channel number, band, bandwidth and control sideband */
typedef u16 chanspec_t;
@@ -28,7 +31,7 @@ typedef u16 chanspec_t;
#define CH_10MHZ_APART 2
#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
-#define WLC_MAX_2G_CHANNEL CH_MAX_2G_CHANNEL /* legacy define */
+#define BRCM_MAX_2G_CHANNEL CH_MAX_2G_CHANNEL /* legacy define */
#define MAXCHANNEL 224 /* max # supported channels. The max channel no is 216,
* this is that + 1 rounded up to a multiple of NBBY (8).
* DO NOT MAKE it > 255: channels are u8's all over
@@ -64,7 +67,8 @@ typedef u16 chanspec_t;
#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? ((channel) - CH_10MHZ_APART) : 0)
#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
((channel) + CH_10MHZ_APART) : 0)
-#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
+#define CHSPEC_BANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : \
+ BAND_2G_INDEX)
#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
@@ -107,26 +111,30 @@ typedef u16 chanspec_t;
#define CHSPEC_CTL_CHAN(chspec) ((CHSPEC_SB_LOWER(chspec)) ? \
(LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \
(UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))))
-#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
+#define CHSPEC2BAND(chspec) (CHSPEC_IS5G(chspec) ? BRCM_BAND_5G : BRCM_BAND_2G)
#define CHANSPEC_STR_LEN 8
/* defined rate in 500kbps */
-#define WLC_MAXRATE 108 /* in 500kbps units */
-#define WLC_RATE_1M 2 /* in 500kbps units */
-#define WLC_RATE_2M 4 /* in 500kbps units */
-#define WLC_RATE_5M5 11 /* in 500kbps units */
-#define WLC_RATE_11M 22 /* in 500kbps units */
-#define WLC_RATE_6M 12 /* in 500kbps units */
-#define WLC_RATE_9M 18 /* in 500kbps units */
-#define WLC_RATE_12M 24 /* in 500kbps units */
-#define WLC_RATE_18M 36 /* in 500kbps units */
-#define WLC_RATE_24M 48 /* in 500kbps units */
-#define WLC_RATE_36M 72 /* in 500kbps units */
-#define WLC_RATE_48M 96 /* in 500kbps units */
-#define WLC_RATE_54M 108 /* in 500kbps units */
-
-#define WLC_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */
+#define BRCM_MAXRATE 108 /* in 500kbps units */
+#define BRCM_RATE_1M 2 /* in 500kbps units */
+#define BRCM_RATE_2M 4 /* in 500kbps units */
+#define BRCM_RATE_5M5 11 /* in 500kbps units */
+#define BRCM_RATE_11M 22 /* in 500kbps units */
+#define BRCM_RATE_6M 12 /* in 500kbps units */
+#define BRCM_RATE_9M 18 /* in 500kbps units */
+#define BRCM_RATE_12M 24 /* in 500kbps units */
+#define BRCM_RATE_18M 36 /* in 500kbps units */
+#define BRCM_RATE_24M 48 /* in 500kbps units */
+#define BRCM_RATE_36M 72 /* in 500kbps units */
+#define BRCM_RATE_48M 96 /* in 500kbps units */
+#define BRCM_RATE_54M 108 /* in 500kbps units */
+
+#define BRCM_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */
+
+#define MCSSET_LEN 16
+
+#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0)
/*
* Verify the chanspec is using a legal set of parameters, i.e. that the
@@ -134,14 +142,14 @@ typedef u16 chanspec_t;
* combination could be legal given any set of circumstances.
* RETURNS: true is the chanspec is malformed, false if it looks good.
*/
-extern bool bcm_chspec_malformed(chanspec_t chanspec);
+extern bool brcmu_chspec_malformed(chanspec_t chanspec);
/*
* This function returns the channel number that control traffic is being sent on, for legacy
* channels this is just the channel number, for 40MHZ channels it is the upper or lowre 20MHZ
* sideband depending on the chanspec selected
*/
-extern u8 bcm_chspec_ctlchan(chanspec_t chspec);
+extern u8 brcmu_chspec_ctlchan(chanspec_t chspec);
/*
* Return the channel number for a given frequency and base frequency.
@@ -162,6 +170,74 @@ extern u8 bcm_chspec_ctlchan(chanspec_t chspec);
*
* Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
*/
-extern int bcm_mhz2channel(uint freq, uint start_factor);
-
-#endif /* _bcmwifi_h_ */
+extern int brcmu_mhz2channel(uint freq, uint start_factor);
+
+/* Enumerate crypto algorithms */
+#define CRYPTO_ALGO_OFF 0
+#define CRYPTO_ALGO_WEP1 1
+#define CRYPTO_ALGO_TKIP 2
+#define CRYPTO_ALGO_WEP128 3
+#define CRYPTO_ALGO_AES_CCM 4
+#define CRYPTO_ALGO_AES_RESERVED1 5
+#define CRYPTO_ALGO_AES_RESERVED2 6
+#define CRYPTO_ALGO_NALG 7
+
+/* wireless security bitvec */
+#define WEP_ENABLED 0x0001
+#define TKIP_ENABLED 0x0002
+#define AES_ENABLED 0x0004
+#define WSEC_SWFLAG 0x0008
+#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */
+
+/* WPA authentication mode bitvec */
+#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
+#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
+#define WPA_AUTH_RESERVED1 0x0008
+#define WPA_AUTH_RESERVED2 0x0010
+ /* #define WPA_AUTH_8021X 0x0020 *//* 802.1x, reserved */
+#define WPA2_AUTH_RESERVED1 0x0020
+#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
+#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
+#define WPA2_AUTH_RESERVED3 0x0200
+#define WPA2_AUTH_RESERVED4 0x0400
+#define WPA2_AUTH_RESERVED5 0x0800
+
+/* pmkid */
+#define MAXPMKID 16
+
+#define DOT11_DEFAULT_RTS_LEN 2347
+#define DOT11_DEFAULT_FRAG_LEN 2346
+
+#define DOT11_ICV_AES_LEN 8
+#define DOT11_QOS_LEN 2
+#define DOT11_IV_MAX_LEN 8
+#define DOT11_A4_HDR_LEN 30
+
+#define HT_CAP_RX_STBC_NO 0x0
+#define HT_CAP_RX_STBC_ONE_STREAM 0x1
+
+typedef struct _pmkid {
+ u8 BSSID[ETH_ALEN];
+ u8 PMKID[WLAN_PMKID_LEN];
+} pmkid_t;
+
+typedef struct _pmkid_list {
+ u32 npmkid;
+ pmkid_t pmkid[1];
+} pmkid_list_t;
+
+typedef struct _pmkid_cand {
+ u8 BSSID[ETH_ALEN];
+ u8 preauth;
+} pmkid_cand_t;
+
+typedef struct _pmkid_cand_list {
+ u32 npmkid_cand;
+ pmkid_cand_t pmkid_cand[1];
+} pmkid_cand_list_t;
+
+typedef u8 ac_bitmap_t;
+
+#endif /* _BRCMU_WIFI_H_ */
diff --git a/drivers/staging/brcm80211/include/chipcommon.h b/drivers/staging/brcm80211/include/chipcommon.h
new file mode 100644
index 00000000000..296582aced6
--- /dev/null
+++ b/drivers/staging/brcm80211/include/chipcommon.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SBCHIPC_H
+#define _SBCHIPC_H
+
+#include "defs.h" /* for PAD macro */
+
+typedef volatile struct {
+ u32 chipid; /* 0x0 */
+ u32 capabilities;
+ u32 corecontrol; /* corerev >= 1 */
+ u32 bist;
+
+ /* OTP */
+ u32 otpstatus; /* 0x10, corerev >= 10 */
+ u32 otpcontrol;
+ u32 otpprog;
+ u32 otplayout; /* corerev >= 23 */
+
+ /* Interrupt control */
+ u32 intstatus; /* 0x20 */
+ u32 intmask;
+
+ /* Chip specific regs */
+ u32 chipcontrol; /* 0x28, rev >= 11 */
+ u32 chipstatus; /* 0x2c, rev >= 11 */
+
+ /* Jtag Master */
+ u32 jtagcmd; /* 0x30, rev >= 10 */
+ u32 jtagir;
+ u32 jtagdr;
+ u32 jtagctrl;
+
+ /* serial flash interface registers */
+ u32 flashcontrol; /* 0x40 */
+ u32 flashaddress;
+ u32 flashdata;
+ u32 PAD[1];
+
+ /* Silicon backplane configuration broadcast control */
+ u32 broadcastaddress; /* 0x50 */
+ u32 broadcastdata;
+
+ /* gpio - cleared only by power-on-reset */
+ u32 gpiopullup; /* 0x58, corerev >= 20 */
+ u32 gpiopulldown; /* 0x5c, corerev >= 20 */
+ u32 gpioin; /* 0x60 */
+ u32 gpioout; /* 0x64 */
+ u32 gpioouten; /* 0x68 */
+ u32 gpiocontrol; /* 0x6C */
+ u32 gpiointpolarity; /* 0x70 */
+ u32 gpiointmask; /* 0x74 */
+
+ /* GPIO events corerev >= 11 */
+ u32 gpioevent;
+ u32 gpioeventintmask;
+
+ /* Watchdog timer */
+ u32 watchdog; /* 0x80 */
+
+ /* GPIO events corerev >= 11 */
+ u32 gpioeventintpolarity;
+
+ /* GPIO based LED powersave registers corerev >= 16 */
+ u32 gpiotimerval; /* 0x88 */
+ u32 gpiotimeroutmask;
+
+ /* clock control */
+ u32 clockcontrol_n; /* 0x90 */
+ u32 clockcontrol_sb; /* aka m0 */
+ u32 clockcontrol_pci; /* aka m1 */
+ u32 clockcontrol_m2; /* mii/uart/mipsref */
+ u32 clockcontrol_m3; /* cpu */
+ u32 clkdiv; /* corerev >= 3 */
+ u32 gpiodebugsel; /* corerev >= 28 */
+ u32 capabilities_ext; /* 0xac */
+
+ /* pll delay registers (corerev >= 4) */
+ u32 pll_on_delay; /* 0xb0 */
+ u32 fref_sel_delay;
+ u32 slow_clk_ctl; /* 5 < corerev < 10 */
+ u32 PAD;
+
+ /* Instaclock registers (corerev >= 10) */
+ u32 system_clk_ctl; /* 0xc0 */
+ u32 clkstatestretch;
+ u32 PAD[2];
+
+ /* Indirect backplane access (corerev >= 22) */
+ u32 bp_addrlow; /* 0xd0 */
+ u32 bp_addrhigh;
+ u32 bp_data;
+ u32 PAD;
+ u32 bp_indaccess;
+ u32 PAD[3];
+
+ /* More clock dividers (corerev >= 32) */
+ u32 clkdiv2;
+ u32 PAD[2];
+
+ /* In AI chips, pointer to erom */
+ u32 eromptr; /* 0xfc */
+
+ /* ExtBus control registers (corerev >= 3) */
+ u32 pcmcia_config; /* 0x100 */
+ u32 pcmcia_memwait;
+ u32 pcmcia_attrwait;
+ u32 pcmcia_iowait;
+ u32 ide_config;
+ u32 ide_memwait;
+ u32 ide_attrwait;
+ u32 ide_iowait;
+ u32 prog_config;
+ u32 prog_waitcount;
+ u32 flash_config;
+ u32 flash_waitcount;
+ u32 SECI_config; /* 0x130 SECI configuration */
+ u32 PAD[3];
+
+ /* Enhanced Coexistence Interface (ECI) registers (corerev >= 21) */
+ u32 eci_output; /* 0x140 */
+ u32 eci_control;
+ u32 eci_inputlo;
+ u32 eci_inputmi;
+ u32 eci_inputhi;
+ u32 eci_inputintpolaritylo;
+ u32 eci_inputintpolaritymi;
+ u32 eci_inputintpolarityhi;
+ u32 eci_intmasklo;
+ u32 eci_intmaskmi;
+ u32 eci_intmaskhi;
+ u32 eci_eventlo;
+ u32 eci_eventmi;
+ u32 eci_eventhi;
+ u32 eci_eventmasklo;
+ u32 eci_eventmaskmi;
+ u32 eci_eventmaskhi;
+ u32 PAD[3];
+
+ /* SROM interface (corerev >= 32) */
+ u32 sromcontrol; /* 0x190 */
+ u32 sromaddress;
+ u32 sromdata;
+ u32 PAD[17];
+
+ /* Clock control and hardware workarounds (corerev >= 20) */
+ u32 clk_ctl_st; /* 0x1e0 */
+ u32 hw_war;
+ u32 PAD[70];
+
+ /* UARTs */
+ u8 uart0data; /* 0x300 */
+ u8 uart0imr;
+ u8 uart0fcr;
+ u8 uart0lcr;
+ u8 uart0mcr;
+ u8 uart0lsr;
+ u8 uart0msr;
+ u8 uart0scratch;
+ u8 PAD[248]; /* corerev >= 1 */
+
+ u8 uart1data; /* 0x400 */
+ u8 uart1imr;
+ u8 uart1fcr;
+ u8 uart1lcr;
+ u8 uart1mcr;
+ u8 uart1lsr;
+ u8 uart1msr;
+ u8 uart1scratch;
+ u32 PAD[126];
+
+ /* PMU registers (corerev >= 20) */
+ u32 pmucontrol; /* 0x600 */
+ u32 pmucapabilities;
+ u32 pmustatus;
+ u32 res_state;
+ u32 res_pending;
+ u32 pmutimer;
+ u32 min_res_mask;
+ u32 max_res_mask;
+ u32 res_table_sel;
+ u32 res_dep_mask;
+ u32 res_updn_timer;
+ u32 res_timer;
+ u32 clkstretch;
+ u32 pmuwatchdog;
+ u32 gpiosel; /* 0x638, rev >= 1 */
+ u32 gpioenable; /* 0x63c, rev >= 1 */
+ u32 res_req_timer_sel;
+ u32 res_req_timer;
+ u32 res_req_mask;
+ u32 PAD;
+ u32 chipcontrol_addr; /* 0x650 */
+ u32 chipcontrol_data; /* 0x654 */
+ u32 regcontrol_addr;
+ u32 regcontrol_data;
+ u32 pllcontrol_addr;
+ u32 pllcontrol_data;
+ u32 pmustrapopt; /* 0x668, corerev >= 28 */
+ u32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */
+ u32 PAD[100];
+ u16 sromotp[768];
+} chipcregs_t;
+
+/* chipid */
+#define CID_ID_MASK 0x0000ffff /* Chip Id mask */
+#define CID_REV_MASK 0x000f0000 /* Chip Revision mask */
+#define CID_REV_SHIFT 16 /* Chip Revision shift */
+#define CID_PKG_MASK 0x00f00000 /* Package Option mask */
+#define CID_PKG_SHIFT 20 /* Package Option shift */
+#define CID_CC_MASK 0x0f000000 /* CoreCount (corerev >= 4) */
+#define CID_CC_SHIFT 24
+#define CID_TYPE_MASK 0xf0000000 /* Chip Type */
+#define CID_TYPE_SHIFT 28
+
+/* capabilities */
+#define CC_CAP_UARTS_MASK 0x00000003 /* Number of UARTs */
+#define CC_CAP_MIPSEB 0x00000004 /* MIPS is in big-endian mode */
+#define CC_CAP_UCLKSEL 0x00000018 /* UARTs clock select */
+#define CC_CAP_UINTCLK 0x00000008 /* UARTs are driven by internal divided clock */
+#define CC_CAP_UARTGPIO 0x00000020 /* UARTs own GPIOs 15:12 */
+#define CC_CAP_EXTBUS_MASK 0x000000c0 /* External bus mask */
+#define CC_CAP_EXTBUS_NONE 0x00000000 /* No ExtBus present */
+#define CC_CAP_EXTBUS_FULL 0x00000040 /* ExtBus: PCMCIA, IDE & Prog */
+#define CC_CAP_EXTBUS_PROG 0x00000080 /* ExtBus: ProgIf only */
+#define CC_CAP_FLASH_MASK 0x00000700 /* Type of flash */
+#define CC_CAP_PLL_MASK 0x00038000 /* Type of PLL */
+#define CC_CAP_PWR_CTL 0x00040000 /* Power control */
+#define CC_CAP_OTPSIZE 0x00380000 /* OTP Size (0 = none) */
+#define CC_CAP_OTPSIZE_SHIFT 19 /* OTP Size shift */
+#define CC_CAP_OTPSIZE_BASE 5 /* OTP Size base */
+#define CC_CAP_JTAGP 0x00400000 /* JTAG Master Present */
+#define CC_CAP_ROM 0x00800000 /* Internal boot rom active */
+#define CC_CAP_BKPLN64 0x08000000 /* 64-bit backplane */
+#define CC_CAP_PMU 0x10000000 /* PMU Present, rev >= 20 */
+#define CC_CAP_SROM 0x40000000 /* Srom Present, rev >= 32 */
+#define CC_CAP_NFLASH 0x80000000 /* Nand flash present, rev >= 35 */
+
+#define CC_CAP2_SECI 0x00000001 /* SECI Present, rev >= 36 */
+#define CC_CAP2_GSIO 0x00000002 /* GSIO (spi/i2c) present, rev >= 37 */
+
+/* pmucapabilities */
+#define PCAP_REV_MASK 0x000000ff
+#define PCAP_RC_MASK 0x00001f00
+#define PCAP_RC_SHIFT 8
+#define PCAP_TC_MASK 0x0001e000
+#define PCAP_TC_SHIFT 13
+#define PCAP_PC_MASK 0x001e0000
+#define PCAP_PC_SHIFT 17
+#define PCAP_VC_MASK 0x01e00000
+#define PCAP_VC_SHIFT 21
+#define PCAP_CC_MASK 0x1e000000
+#define PCAP_CC_SHIFT 25
+#define PCAP5_PC_MASK 0x003e0000 /* PMU corerev >= 5 */
+#define PCAP5_PC_SHIFT 17
+#define PCAP5_VC_MASK 0x07c00000
+#define PCAP5_VC_SHIFT 22
+#define PCAP5_CC_MASK 0xf8000000
+#define PCAP5_CC_SHIFT 27
+
+/*
+* Maximum delay for the PMU state transition in us.
+* This is an upper bound intended for spinwaits etc.
+*/
+#define PMU_MAX_TRANSITION_DLY 15000
+
+#endif /* _SBCHIPC_H */
diff --git a/drivers/staging/brcm80211/include/defs.h b/drivers/staging/brcm80211/include/defs.h
new file mode 100644
index 00000000000..8b3e17dec15
--- /dev/null
+++ b/drivers/staging/brcm80211/include/defs.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_DEFS_H_
+#define _BRCM_DEFS_H_
+
+#include <linux/types.h>
+
+#define SI_BUS 0
+#define PCI_BUS 1
+#define PCMCIA_BUS 2
+#define SDIO_BUS 3
+#define JTAG_BUS 4
+#define USB_BUS 5
+#define SPI_BUS 6
+
+#ifndef OFF
+#define OFF 0
+#endif
+
+#ifndef ON
+#define ON 1 /* ON = 1 */
+#endif
+
+#define AUTO (-1) /* Auto = -1 */
+
+/*
+ * Priority definitions according 802.1D
+ */
+#define PRIO_8021D_NONE 2
+#define PRIO_8021D_BK 1
+#define PRIO_8021D_BE 0
+#define PRIO_8021D_EE 3
+#define PRIO_8021D_CL 4
+#define PRIO_8021D_VI 5
+#define PRIO_8021D_VO 6
+#define PRIO_8021D_NC 7
+
+#define MAXPRIO 7
+#define NUMPRIO (MAXPRIO + 1)
+
+#define WL_NUMRATES 16 /* max # of rates in a rateset */
+
+typedef struct wl_rateset {
+ u32 count; /* # rates in this set */
+ u8 rates[WL_NUMRATES]; /* rates in 500kbps units w/hi bit set if basic */
+} wl_rateset_t;
+
+#define BRCM_CNTRY_BUF_SZ 4 /* Country string is 3 bytes + NUL */
+
+#define BRCM_SET_CHANNEL 30
+#define BRCM_SET_SRL 32
+#define BRCM_SET_LRL 34
+
+#define BRCM_SET_RATESET 72
+#define BRCM_SET_BCNPRD 76
+#define BRCM_GET_CURR_RATESET 114 /* current rateset */
+#define BRCM_GET_PHYLIST 180
+
+/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
+#define WL_RADIO_SW_DISABLE (1<<0)
+#define WL_RADIO_HW_DISABLE (1<<1)
+#define WL_RADIO_MPC_DISABLE (1<<2)
+#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */
+
+/* Override bit for SET_TXPWR. if set, ignore other level limits */
+#define WL_TXPWR_OVERRIDE (1U<<31)
+
+/* band types */
+#define BRCM_BAND_AUTO 0 /* auto-select */
+#define BRCM_BAND_5G 1 /* 5 Ghz */
+#define BRCM_BAND_2G 2 /* 2.4 Ghz */
+#define BRCM_BAND_ALL 3 /* all bands */
+
+/* Values for PM */
+#define PM_OFF 0
+#define PM_MAX 1
+
+/* Message levels */
+#define LOG_ERROR_VAL 0x00000001
+#define LOG_TRACE_VAL 0x00000002
+
+#define PM_OFF 0
+#define PM_MAX 1
+#define PM_FAST 2
+
+/*
+ * Sonics Configuration Space Registers.
+ */
+#define SBCONFIGOFF 0xf00 /* core sbconfig regs are top 256bytes of regs */
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+#endif /* _BRCM_DEFS_H_ */
diff --git a/drivers/staging/brcm80211/include/hndsoc.h b/drivers/staging/brcm80211/include/hndsoc.h
deleted file mode 100644
index 6435686b329..00000000000
--- a/drivers/staging/brcm80211/include/hndsoc.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _HNDSOC_H
-#define _HNDSOC_H
-
-/* Include the soci specific files */
-#include <sbconfig.h>
-#include <aidmp.h>
-
-/*
- * SOC Interconnect Address Map.
- * All regions may not exist on all chips.
- */
-#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */
-#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */
-#define SI_PCI_MEM_SZ (64 * 1024 * 1024)
-#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */
-#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */
-#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */
-
-#ifdef SI_ENUM_BASE_VARIABLE
-#define SI_ENUM_BASE (sii->pub.si_enum_base)
-#else
-#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
-#endif /* SI_ENUM_BASE_VARIABLE */
-
-#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */
-#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */
-#define SI_MAXCORES 16 /* Max cores (this is arbitrary, for software
- * convenience and could be changed if we
- * make any larger chips
- */
-
-#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */
-#define SI_FASTRAM_SWAPPED 0x19800000
-
-#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */
-#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */
-#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */
-#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */
-#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */
-#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */
-#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */
-#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */
-#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */
-#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */
-
-#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */
-#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */
-#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */
-#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2
- * (2 ZettaBytes), low 32 bits
- */
-#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2
- * (2 ZettaBytes), high 32 bits
- */
-
-/* core codes */
-#define NODEV_CORE_ID 0x700 /* Invalid coreid */
-#define CC_CORE_ID 0x800 /* chipcommon core */
-#define ILINE20_CORE_ID 0x801 /* iline20 core */
-#define SRAM_CORE_ID 0x802 /* sram core */
-#define SDRAM_CORE_ID 0x803 /* sdram core */
-#define PCI_CORE_ID 0x804 /* pci core */
-#define MIPS_CORE_ID 0x805 /* mips core */
-#define ENET_CORE_ID 0x806 /* enet mac core */
-#define CODEC_CORE_ID 0x807 /* v90 codec core */
-#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
-#define ADSL_CORE_ID 0x809 /* ADSL core */
-#define ILINE100_CORE_ID 0x80a /* iline100 core */
-#define IPSEC_CORE_ID 0x80b /* ipsec core */
-#define UTOPIA_CORE_ID 0x80c /* utopia core */
-#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
-#define SOCRAM_CORE_ID 0x80e /* internal memory core */
-#define MEMC_CORE_ID 0x80f /* memc sdram core */
-#define OFDM_CORE_ID 0x810 /* OFDM phy core */
-#define EXTIF_CORE_ID 0x811 /* external interface core */
-#define D11_CORE_ID 0x812 /* 802.11 MAC core */
-#define APHY_CORE_ID 0x813 /* 802.11a phy core */
-#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
-#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
-#define MIPS33_CORE_ID 0x816 /* mips3302 core */
-#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
-#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
-#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
-#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
-#define SDIOH_CORE_ID 0x81b /* sdio host core */
-#define ROBO_CORE_ID 0x81c /* roboswitch core */
-#define ATA100_CORE_ID 0x81d /* parallel ATA core */
-#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
-#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
-#define PCIE_CORE_ID 0x820 /* pci express core */
-#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
-#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
-#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
-#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
-#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
-#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
-#define PMU_CORE_ID 0x827 /* PMU core */
-#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
-#define SDIOD_CORE_ID 0x829 /* SDIO device core */
-#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
-#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
-#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
-#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
-#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
-#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
-#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
-#define SC_CORE_ID 0x831 /* shared common core */
-#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
-#define SPIH_CORE_ID 0x833 /* SPI host core */
-#define I2S_CORE_ID 0x834 /* I2S core */
-#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
-#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
-#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
-#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all
- * unused address ranges
- */
-
-/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
- * and chipcommon being the first core:
- */
-#define SI_CC_IDX 0
-
-/* SOC Interconnect types (aka chip types) */
-#define SOCI_AI 1
-
-/* Common core control flags */
-#define SICF_BIST_EN 0x8000
-#define SICF_PME_EN 0x4000
-#define SICF_CORE_BITS 0x3ffc
-#define SICF_FGC 0x0002
-#define SICF_CLOCK_EN 0x0001
-
-/* Common core status flags */
-#define SISF_BIST_DONE 0x8000
-#define SISF_BIST_ERROR 0x4000
-#define SISF_GATED_CLK 0x2000
-#define SISF_DMA64 0x1000
-#define SISF_CORE_BITS 0x0fff
-
-/* A register that is common to all cores to
- * communicate w/PMU regarding clock control.
- */
-#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */
-
-/* clk_ctl_st register */
-#define CCS_FORCEALP 0x00000001 /* force ALP request */
-#define CCS_FORCEHT 0x00000002 /* force HT request */
-#define CCS_FORCEILP 0x00000004 /* force ILP request */
-#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */
-#define CCS_HTAREQ 0x00000010 /* HT Avail Request */
-#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */
-#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */
-#define CCS_ERSRC_REQ_SHIFT 8
-#define CCS_ALPAVAIL 0x00010000 /* ALP is available */
-#define CCS_HTAVAIL 0x00020000 /* HT is available */
-#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */
-#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */
-#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */
-#define CCS_ERSRC_STS_SHIFT 24
-
-#define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */
-#define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */
-
-/* Not really related to SOC Interconnect, but a couple of software
- * conventions for the use the flash space:
- */
-
-/* Minimum amount of flash we support */
-#define FLASH_MIN 0x00020000 /* Minimum flash size */
-
-/* A boot/binary may have an embedded block that describes its size */
-#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */
-#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */
-#define BISZ_MAGIC_IDX 0 /* Word 0: magic */
-#define BISZ_TXTST_IDX 1 /* 1: text start */
-#define BISZ_TXTEND_IDX 2 /* 2: text end */
-#define BISZ_DATAST_IDX 3 /* 3: data start */
-#define BISZ_DATAEND_IDX 4 /* 4: data end */
-#define BISZ_BSSST_IDX 5 /* 5: bss start */
-#define BISZ_BSSEND_IDX 6 /* 6: bss end */
-#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */
-
-#endif /* _HNDSOC_H */
diff --git a/drivers/staging/brcm80211/include/nicpci.h b/drivers/staging/brcm80211/include/nicpci.h
deleted file mode 100644
index 30321eb0477..00000000000
--- a/drivers/staging/brcm80211/include/nicpci.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _NICPCI_H
-#define _NICPCI_H
-
-#if defined(BCMSDIO) || (defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS))
-#define pcicore_find_pci_capability(a, b, c, d) (0)
-#define pcie_readreg(a, b, c, d) (0)
-#define pcie_writereg(a, b, c, d, e) (0)
-
-#define pcie_clkreq(a, b, c) (0)
-#define pcie_lcreg(a, b, c) (0)
-
-#define pcicore_init(a, b, c) (0x0dadbeef)
-#define pcicore_deinit(a) do { } while (0)
-#define pcicore_attach(a, b, c) do { } while (0)
-#define pcicore_hwup(a) do { } while (0)
-#define pcicore_up(a, b) do { } while (0)
-#define pcicore_sleep(a) do { } while (0)
-#define pcicore_down(a, b) do { } while (0)
-
-#define pcie_war_ovr_aspm_update(a, b) do { } while (0)
-
-#define pcicore_pcieserdesreg(a, b, c, d, e) (0)
-#define pcicore_pciereg(a, b, c, d, e) (0)
-
-#define pcicore_pmecap_fast(a) (false)
-#define pcicore_pmeen(a) do { } while (0)
-#define pcicore_pmeclr(a) do { } while (0)
-#define pcicore_pmestat(a) (false)
-#else
-struct sbpcieregs;
-
-extern u8 pcicore_find_pci_capability(void *dev, u8 req_cap_id,
- unsigned char *buf, u32 *buflen);
-extern uint pcie_readreg(struct sbpcieregs *pcieregs,
- uint addrtype, uint offset);
-extern uint pcie_writereg(struct sbpcieregs *pcieregs,
- uint addrtype, uint offset, uint val);
-
-extern u8 pcie_clkreq(void *pch, u32 mask, u32 val);
-extern u32 pcie_lcreg(void *pch, u32 mask, u32 val);
-
-extern void *pcicore_init(si_t *sih, void *pdev, void *regs);
-extern void pcicore_deinit(void *pch);
-extern void pcicore_attach(void *pch, char *pvars, int state);
-extern void pcicore_hwup(void *pch);
-extern void pcicore_up(void *pch, int state);
-extern void pcicore_sleep(void *pch);
-extern void pcicore_down(void *pch, int state);
-
-extern void pcie_war_ovr_aspm_update(void *pch, u8 aspm);
-extern u32 pcicore_pcieserdesreg(void *pch, u32 mdioslave, u32 offset,
- u32 mask, u32 val);
-
-extern u32 pcicore_pciereg(void *pch, u32 offset, u32 mask,
- u32 val, uint type);
-
-extern bool pcicore_pmecap_fast(void *pch);
-extern void pcicore_pmeen(void *pch);
-extern void pcicore_pmeclr(void *pch);
-extern bool pcicore_pmestat(void *pch);
-#endif /* defined(BCMSDIO)||(defined(BCMBUSTYPE) && (BCMBUSTYPE==SI_BUS)) */
-
-#endif /* _NICPCI_H */
diff --git a/drivers/staging/brcm80211/include/pci_core.h b/drivers/staging/brcm80211/include/pci_core.h
deleted file mode 100644
index 9153dcb8160..00000000000
--- a/drivers/staging/brcm80211/include/pci_core.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _PCI_CORE_H_
-#define _PCI_CORE_H_
-
-#ifndef _LANGUAGE_ASSEMBLY
-
-/* cpp contortions to concatenate w/arg prescan */
-#ifndef PAD
-#define _PADLINE(line) pad ## line
-#define _XSTR(line) _PADLINE(line)
-#define PAD _XSTR(__LINE__)
-#endif
-
-/* Sonics side: PCI core and host control registers */
-struct sbpciregs {
- u32 control; /* PCI control */
- u32 PAD[3];
- u32 arbcontrol; /* PCI arbiter control */
- u32 clkrun; /* Clkrun Control (>=rev11) */
- u32 PAD[2];
- u32 intstatus; /* Interrupt status */
- u32 intmask; /* Interrupt mask */
- u32 sbtopcimailbox; /* Sonics to PCI mailbox */
- u32 PAD[9];
- u32 bcastaddr; /* Sonics broadcast address */
- u32 bcastdata; /* Sonics broadcast data */
- u32 PAD[2];
- u32 gpioin; /* ro: gpio input (>=rev2) */
- u32 gpioout; /* rw: gpio output (>=rev2) */
- u32 gpioouten; /* rw: gpio output enable (>= rev2) */
- u32 gpiocontrol; /* rw: gpio control (>= rev2) */
- u32 PAD[36];
- u32 sbtopci0; /* Sonics to PCI translation 0 */
- u32 sbtopci1; /* Sonics to PCI translation 1 */
- u32 sbtopci2; /* Sonics to PCI translation 2 */
- u32 PAD[189];
- u32 pcicfg[4][64]; /* 0x400 - 0x7FF, PCI Cfg Space (>=rev8) */
- u16 sprom[36]; /* SPROM shadow Area */
- u32 PAD[46];
-};
-
-#endif /* _LANGUAGE_ASSEMBLY */
-
-/* PCI control */
-#define PCI_RST_OE 0x01 /* When set, drives PCI_RESET out to pin */
-#define PCI_RST 0x02 /* Value driven out to pin */
-#define PCI_CLK_OE 0x04 /* When set, drives clock as gated by PCI_CLK out to pin */
-#define PCI_CLK 0x08 /* Gate for clock driven out to pin */
-
-/* PCI arbiter control */
-#define PCI_INT_ARB 0x01 /* When set, use an internal arbiter */
-#define PCI_EXT_ARB 0x02 /* When set, use an external arbiter */
-/* ParkID - for PCI corerev >= 8 */
-#define PCI_PARKID_MASK 0x1c /* Selects which agent is parked on an idle bus */
-#define PCI_PARKID_SHIFT 2
-#define PCI_PARKID_EXT0 0 /* External master 0 */
-#define PCI_PARKID_EXT1 1 /* External master 1 */
-#define PCI_PARKID_EXT2 2 /* External master 2 */
-#define PCI_PARKID_EXT3 3 /* External master 3 (rev >= 11) */
-#define PCI_PARKID_INT 3 /* Internal master (rev < 11) */
-#define PCI11_PARKID_INT 4 /* Internal master (rev >= 11) */
-#define PCI_PARKID_LAST 4 /* Last active master (rev < 11) */
-#define PCI11_PARKID_LAST 5 /* Last active master (rev >= 11) */
-
-#define PCI_CLKRUN_DSBL 0x8000 /* Bit 15 forceClkrun */
-
-/* Interrupt status/mask */
-#define PCI_INTA 0x01 /* PCI INTA# is asserted */
-#define PCI_INTB 0x02 /* PCI INTB# is asserted */
-#define PCI_SERR 0x04 /* PCI SERR# has been asserted (write one to clear) */
-#define PCI_PERR 0x08 /* PCI PERR# has been asserted (write one to clear) */
-#define PCI_PME 0x10 /* PCI PME# is asserted */
-
-/* (General) PCI/SB mailbox interrupts, two bits per pci function */
-#define MAILBOX_F0_0 0x100 /* function 0, int 0 */
-#define MAILBOX_F0_1 0x200 /* function 0, int 1 */
-#define MAILBOX_F1_0 0x400 /* function 1, int 0 */
-#define MAILBOX_F1_1 0x800 /* function 1, int 1 */
-#define MAILBOX_F2_0 0x1000 /* function 2, int 0 */
-#define MAILBOX_F2_1 0x2000 /* function 2, int 1 */
-#define MAILBOX_F3_0 0x4000 /* function 3, int 0 */
-#define MAILBOX_F3_1 0x8000 /* function 3, int 1 */
-
-/* Sonics broadcast address */
-#define BCAST_ADDR_MASK 0xff /* Broadcast register address */
-
-/* Sonics to PCI translation types */
-#define SBTOPCI0_MASK 0xfc000000
-#define SBTOPCI1_MASK 0xfc000000
-#define SBTOPCI2_MASK 0xc0000000
-#define SBTOPCI_MEM 0
-#define SBTOPCI_IO 1
-#define SBTOPCI_CFG0 2
-#define SBTOPCI_CFG1 3
-#define SBTOPCI_PREF 0x4 /* prefetch enable */
-#define SBTOPCI_BURST 0x8 /* burst enable */
-#define SBTOPCI_RC_MASK 0x30 /* read command (>= rev11) */
-#define SBTOPCI_RC_READ 0x00 /* memory read */
-#define SBTOPCI_RC_READLINE 0x10 /* memory read line */
-#define SBTOPCI_RC_READMULTI 0x20 /* memory read multiple */
-
-/* PCI core index in SROM shadow area */
-#define SRSH_PI_OFFSET 0 /* first word */
-#define SRSH_PI_MASK 0xf000 /* bit 15:12 */
-#define SRSH_PI_SHIFT 12 /* bit 15:12 */
-
-#endif /* _PCI_CORE_H_ */
diff --git a/drivers/staging/brcm80211/include/pcicfg.h b/drivers/staging/brcm80211/include/pcicfg.h
deleted file mode 100644
index d0c617a63c4..00000000000
--- a/drivers/staging/brcm80211/include/pcicfg.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _h_pcicfg_
-#define _h_pcicfg_
-
-#include <linux/pci_regs.h>
-
-/* PCI configuration address space size */
-#define PCI_SZPCR 256
-
-/* Everything below is BRCM HND proprietary */
-
-/* Brcm PCI configuration registers */
-#define PCI_BAR0_WIN 0x80 /* backplane address space accessed by BAR0 */
-#define PCI_SPROM_CONTROL 0x88 /* sprom property control */
-#define PCI_INT_MASK 0x94 /* mask of PCI and other cores interrupts */
-#define PCI_SBIM_SHIFT 8 /* backplane core interrupt mask bits offset */
-#define PCI_BAR0_WIN2 0xac /* backplane address space accessed by second 4KB of BAR0 */
-#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */
-#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */
-#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */
-
-#define PCI_BAR0_SPROM_OFFSET (4 * 1024) /* bar0 + 4K accesses external sprom */
-#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) /* bar0 + 6K accesses pci core registers */
-#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) /* pci core SB registers are at the end of the
- * 8KB window, so their address is the "regular"
- * address plus 4K
- */
-#define PCI_BAR0_WINSZ (16 * 1024) /* bar0 window size Match with corerev 13 */
-/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
-#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */
-#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) /* bar0 + 12K accesses chipc core registers */
-
-#define PCI_SBIM_STATUS_SERR 0x4 /* backplane SBErr interrupt status */
-
-#endif /* _h_pcicfg_ */
diff --git a/drivers/staging/brcm80211/include/pcie_core.h b/drivers/staging/brcm80211/include/pcie_core.h
deleted file mode 100644
index cd54ddcf459..00000000000
--- a/drivers/staging/brcm80211/include/pcie_core.h
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _PCIE_CORE_H
-#define _PCIE_CORE_H
-
-/* cpp contortions to concatenate w/arg prescan */
-#ifndef PAD
-#define _PADLINE(line) pad ## line
-#define _XSTR(line) _PADLINE(line)
-#define PAD _XSTR(__LINE__)
-#endif
-
-/* PCIE Enumeration space offsets */
-#define PCIE_CORE_CONFIG_OFFSET 0x0
-#define PCIE_FUNC0_CONFIG_OFFSET 0x400
-#define PCIE_FUNC1_CONFIG_OFFSET 0x500
-#define PCIE_FUNC2_CONFIG_OFFSET 0x600
-#define PCIE_FUNC3_CONFIG_OFFSET 0x700
-#define PCIE_SPROM_SHADOW_OFFSET 0x800
-#define PCIE_SBCONFIG_OFFSET 0xE00
-
-/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */
-#define PCIE_DEV_BAR0_SIZE 0x4000
-#define PCIE_BAR0_WINMAPCORE_OFFSET 0x0
-#define PCIE_BAR0_EXTSPROM_OFFSET 0x1000
-#define PCIE_BAR0_PCIECORE_OFFSET 0x2000
-#define PCIE_BAR0_CCCOREREG_OFFSET 0x3000
-
-/* different register spaces to access thr'u pcie indirect access */
-#define PCIE_CONFIGREGS 1 /* Access to config space */
-#define PCIE_PCIEREGS 2 /* Access to pcie registers */
-
-/* SB side: PCIE core and host control registers */
-typedef struct sbpcieregs {
- u32 control; /* host mode only */
- u32 PAD[2];
- u32 biststatus; /* bist Status: 0x00C */
- u32 gpiosel; /* PCIE gpio sel: 0x010 */
- u32 gpioouten; /* PCIE gpio outen: 0x14 */
- u32 PAD[2];
- u32 intstatus; /* Interrupt status: 0x20 */
- u32 intmask; /* Interrupt mask: 0x24 */
- u32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */
- u32 PAD[53];
- u32 sbtopcie0; /* sb to pcie translation 0: 0x100 */
- u32 sbtopcie1; /* sb to pcie translation 1: 0x104 */
- u32 sbtopcie2; /* sb to pcie translation 2: 0x108 */
- u32 PAD[5];
-
- /* pcie core supports in direct access to config space */
- u32 configaddr; /* pcie config space access: Address field: 0x120 */
- u32 configdata; /* pcie config space access: Data field: 0x124 */
-
- /* mdio access to serdes */
- u32 mdiocontrol; /* controls the mdio access: 0x128 */
- u32 mdiodata; /* Data to the mdio access: 0x12c */
-
- /* pcie protocol phy/dllp/tlp register indirect access mechanism */
- u32 pcieindaddr; /* indirect access to the internal register: 0x130 */
- u32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */
-
- u32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */
- u32 PAD[177];
- u32 pciecfg[4][64]; /* 0x400 - 0x7FF, PCIE Cfg Space */
- u16 sprom[64]; /* SPROM shadow Area */
-} sbpcieregs_t;
-
-/* PCI control */
-#define PCIE_RST_OE 0x01 /* When set, drives PCI_RESET out to pin */
-#define PCIE_RST 0x02 /* Value driven out to pin */
-
-#define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */
-#define PCIE_CFGDATA 0x124 /* offsetof(configdata) */
-
-/* Interrupt status/mask */
-#define PCIE_INTA 0x01 /* PCIE INTA message is received */
-#define PCIE_INTB 0x02 /* PCIE INTB message is received */
-#define PCIE_INTFATAL 0x04 /* PCIE INTFATAL message is received */
-#define PCIE_INTNFATAL 0x08 /* PCIE INTNONFATAL message is received */
-#define PCIE_INTCORR 0x10 /* PCIE INTCORR message is received */
-#define PCIE_INTPME 0x20 /* PCIE INTPME message is received */
-
-/* SB to PCIE translation masks */
-#define SBTOPCIE0_MASK 0xfc000000
-#define SBTOPCIE1_MASK 0xfc000000
-#define SBTOPCIE2_MASK 0xc0000000
-
-/* Access type bits (0:1) */
-#define SBTOPCIE_MEM 0
-#define SBTOPCIE_IO 1
-#define SBTOPCIE_CFG0 2
-#define SBTOPCIE_CFG1 3
-
-/* Prefetch enable bit 2 */
-#define SBTOPCIE_PF 4
-
-/* Write Burst enable for memory write bit 3 */
-#define SBTOPCIE_WR_BURST 8
-
-/* config access */
-#define CONFIGADDR_FUNC_MASK 0x7000
-#define CONFIGADDR_FUNC_SHF 12
-#define CONFIGADDR_REG_MASK 0x0FFF
-#define CONFIGADDR_REG_SHF 0
-
-#define PCIE_CONFIG_INDADDR(f, r) \
- ((((f) & CONFIGADDR_FUNC_MASK) << CONFIGADDR_FUNC_SHF) | \
- (((r) & CONFIGADDR_REG_MASK) << CONFIGADDR_REG_SHF))
-
-/* PCIE protocol regs Indirect Address */
-#define PCIEADDR_PROT_MASK 0x300
-#define PCIEADDR_PROT_SHF 8
-#define PCIEADDR_PL_TLP 0
-#define PCIEADDR_PL_DLLP 1
-#define PCIEADDR_PL_PLP 2
-
-/* PCIE protocol PHY diagnostic registers */
-#define PCIE_PLP_MODEREG 0x200 /* Mode */
-#define PCIE_PLP_STATUSREG 0x204 /* Status */
-#define PCIE_PLP_LTSSMCTRLREG 0x208 /* LTSSM control */
-#define PCIE_PLP_LTLINKNUMREG 0x20c /* Link Training Link number */
-#define PCIE_PLP_LTLANENUMREG 0x210 /* Link Training Lane number */
-#define PCIE_PLP_LTNFTSREG 0x214 /* Link Training N_FTS */
-#define PCIE_PLP_ATTNREG 0x218 /* Attention */
-#define PCIE_PLP_ATTNMASKREG 0x21C /* Attention Mask */
-#define PCIE_PLP_RXERRCTR 0x220 /* Rx Error */
-#define PCIE_PLP_RXFRMERRCTR 0x224 /* Rx Framing Error */
-#define PCIE_PLP_RXERRTHRESHREG 0x228 /* Rx Error threshold */
-#define PCIE_PLP_TESTCTRLREG 0x22C /* Test Control reg */
-#define PCIE_PLP_SERDESCTRLOVRDREG 0x230 /* SERDES Control Override */
-#define PCIE_PLP_TIMINGOVRDREG 0x234 /* Timing param override */
-#define PCIE_PLP_RXTXSMDIAGREG 0x238 /* RXTX State Machine Diag */
-#define PCIE_PLP_LTSSMDIAGREG 0x23C /* LTSSM State Machine Diag */
-
-/* PCIE protocol DLLP diagnostic registers */
-#define PCIE_DLLP_LCREG 0x100 /* Link Control */
-#define PCIE_DLLP_LSREG 0x104 /* Link Status */
-#define PCIE_DLLP_LAREG 0x108 /* Link Attention */
-#define PCIE_DLLP_LAMASKREG 0x10C /* Link Attention Mask */
-#define PCIE_DLLP_NEXTTXSEQNUMREG 0x110 /* Next Tx Seq Num */
-#define PCIE_DLLP_ACKEDTXSEQNUMREG 0x114 /* Acked Tx Seq Num */
-#define PCIE_DLLP_PURGEDTXSEQNUMREG 0x118 /* Purged Tx Seq Num */
-#define PCIE_DLLP_RXSEQNUMREG 0x11C /* Rx Sequence Number */
-#define PCIE_DLLP_LRREG 0x120 /* Link Replay */
-#define PCIE_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */
-#define PCIE_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */
-#define PCIE_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */
-#define PCIE_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */
-#define PCIE_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */
-#define PCIE_DLLP_RTRRWREG 0x138 /* Retry buffer Read/Write */
-#define PCIE_DLLP_ECTHRESHREG 0x13C /* Error Count Threshold */
-#define PCIE_DLLP_TLPERRCTRREG 0x140 /* TLP Error Counter */
-#define PCIE_DLLP_ERRCTRREG 0x144 /* Error Counter */
-#define PCIE_DLLP_NAKRXCTRREG 0x148 /* NAK Received Counter */
-#define PCIE_DLLP_TESTREG 0x14C /* Test */
-#define PCIE_DLLP_PKTBIST 0x150 /* Packet BIST */
-#define PCIE_DLLP_PCIE11 0x154 /* DLLP PCIE 1.1 reg */
-
-#define PCIE_DLLP_LSREG_LINKUP (1 << 16)
-
-/* PCIE protocol TLP diagnostic registers */
-#define PCIE_TLP_CONFIGREG 0x000 /* Configuration */
-#define PCIE_TLP_WORKAROUNDSREG 0x004 /* TLP Workarounds */
-#define PCIE_TLP_WRDMAUPPER 0x010 /* Write DMA Upper Address */
-#define PCIE_TLP_WRDMALOWER 0x014 /* Write DMA Lower Address */
-#define PCIE_TLP_WRDMAREQ_LBEREG 0x018 /* Write DMA Len/ByteEn Req */
-#define PCIE_TLP_RDDMAUPPER 0x01C /* Read DMA Upper Address */
-#define PCIE_TLP_RDDMALOWER 0x020 /* Read DMA Lower Address */
-#define PCIE_TLP_RDDMALENREG 0x024 /* Read DMA Len Req */
-#define PCIE_TLP_MSIDMAUPPER 0x028 /* MSI DMA Upper Address */
-#define PCIE_TLP_MSIDMALOWER 0x02C /* MSI DMA Lower Address */
-#define PCIE_TLP_MSIDMALENREG 0x030 /* MSI DMA Len Req */
-#define PCIE_TLP_SLVREQLENREG 0x034 /* Slave Request Len */
-#define PCIE_TLP_FCINPUTSREQ 0x038 /* Flow Control Inputs */
-#define PCIE_TLP_TXSMGRSREQ 0x03C /* Tx StateMachine and Gated Req */
-#define PCIE_TLP_ADRACKCNTARBLEN 0x040 /* Address Ack XferCnt and ARB Len */
-#define PCIE_TLP_DMACPLHDR0 0x044 /* DMA Completion Hdr 0 */
-#define PCIE_TLP_DMACPLHDR1 0x048 /* DMA Completion Hdr 1 */
-#define PCIE_TLP_DMACPLHDR2 0x04C /* DMA Completion Hdr 2 */
-#define PCIE_TLP_DMACPLMISC0 0x050 /* DMA Completion Misc0 */
-#define PCIE_TLP_DMACPLMISC1 0x054 /* DMA Completion Misc1 */
-#define PCIE_TLP_DMACPLMISC2 0x058 /* DMA Completion Misc2 */
-#define PCIE_TLP_SPTCTRLLEN 0x05C /* Split Controller Req len */
-#define PCIE_TLP_SPTCTRLMSIC0 0x060 /* Split Controller Misc 0 */
-#define PCIE_TLP_SPTCTRLMSIC1 0x064 /* Split Controller Misc 1 */
-#define PCIE_TLP_BUSDEVFUNC 0x068 /* Bus/Device/Func */
-#define PCIE_TLP_RESETCTR 0x06C /* Reset Counter */
-#define PCIE_TLP_RTRYBUF 0x070 /* Retry Buffer value */
-#define PCIE_TLP_TGTDEBUG1 0x074 /* Target Debug Reg1 */
-#define PCIE_TLP_TGTDEBUG2 0x078 /* Target Debug Reg2 */
-#define PCIE_TLP_TGTDEBUG3 0x07C /* Target Debug Reg3 */
-#define PCIE_TLP_TGTDEBUG4 0x080 /* Target Debug Reg4 */
-
-/* MDIO control */
-#define MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
-#define MDIOCTL_DIVISOR_VAL 0x2
-#define MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */
-#define MDIOCTL_ACCESS_DONE 0x100 /* Tranaction complete */
-
-/* MDIO Data */
-#define MDIODATA_MASK 0x0000ffff /* data 2 bytes */
-#define MDIODATA_TA 0x00020000 /* Turnaround */
-#define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */
-#define MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */
-#define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */
-#define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */
-#define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */
-#define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */
-#define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */
-#define MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */
-#define MDIODATA_WRITE 0x10000000 /* write Transaction */
-#define MDIODATA_READ 0x20000000 /* Read Transaction */
-#define MDIODATA_START 0x40000000 /* start of Transaction */
-
-#define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */
-#define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */
-
-/* MDIO devices (SERDES modules)
- * unlike old pcie cores (rev < 10), rev10 pcie serde organizes registers into a few blocks.
- * two layers mapping (blockidx, register offset) is required
- */
-#define MDIO_DEV_IEEE0 0x000
-#define MDIO_DEV_IEEE1 0x001
-#define MDIO_DEV_BLK0 0x800
-#define MDIO_DEV_BLK1 0x801
-#define MDIO_DEV_BLK2 0x802
-#define MDIO_DEV_BLK3 0x803
-#define MDIO_DEV_BLK4 0x804
-#define MDIO_DEV_TXPLL 0x808 /* TXPLL register block idx */
-#define MDIO_DEV_TXCTRL0 0x820
-#define MDIO_DEV_SERDESID 0x831
-#define MDIO_DEV_RXCTRL0 0x840
-
-/* serdes regs (rev < 10) */
-#define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */
-#define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
-#define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
- /* SERDES RX registers */
-#define SERDES_RX_CTRL 1 /* Rx cntrl */
-#define SERDES_RX_TIMER1 2 /* Rx Timer1 */
-#define SERDES_RX_CDR 6 /* CDR */
-#define SERDES_RX_CDRBW 7 /* CDR BW */
-
- /* SERDES RX control register */
-#define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */
-#define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */
-
- /* SERDES PLL registers */
-#define SERDES_PLL_CTRL 1 /* PLL control reg */
-#define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */
-
-/* Power management threshold */
-#define PCIE_L0THRESHOLDTIME_MASK 0xFF00 /* bits 0 - 7 */
-#define PCIE_L1THRESHOLDTIME_MASK 0xFF00 /* bits 8 - 15 */
-#define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */
-#define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */
-#define PCIE_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */
-
-/* SPROM offsets */
-#define SRSH_ASPM_OFFSET 4 /* word 4 */
-#define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */
-#define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */
-#define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */
-#define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */
-#define SRSH_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */
-#define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */
-#define SRSH_CLKREQ_OFFSET_REV8 52 /* word 52 for srom rev 8 */
-#define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */
-#define SRSH_BD_OFFSET 6 /* word 6 */
-#define SRSH_AUTOINIT_OFFSET 18 /* auto initialization enable */
-
-/* Linkcontrol reg offset in PCIE Cap */
-#define PCIE_CAP_LINKCTRL_OFFSET 16 /* linkctrl offset in pcie cap */
-#define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */
-#define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */
-#define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */
-
-#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */
-#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */
-#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */
-#define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */
-
-/* Status reg PCIE_PLP_STATUSREG */
-#define PCIE_PLP_POLARITYINV_STAT 0x10
-#endif /* _PCIE_CORE_H */
diff --git a/drivers/staging/brcm80211/include/proto/802.11.h b/drivers/staging/brcm80211/include/proto/802.11.h
deleted file mode 100644
index 374125d770b..00000000000
--- a/drivers/staging/brcm80211/include/proto/802.11.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _802_11_H_
-#define _802_11_H_
-
-#include <linux/if_ether.h>
-
-#define DOT11_A3_HDR_LEN 24
-#define DOT11_A4_HDR_LEN 30
-#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN
-#define DOT11_ICV_AES_LEN 8
-#define DOT11_QOS_LEN 2
-
-#define DOT11_IV_MAX_LEN 8
-
-#define DOT11_DEFAULT_RTS_LEN 2347
-
-#define DOT11_MIN_FRAG_LEN 256
-#define DOT11_MAX_FRAG_LEN 2346
-#define DOT11_DEFAULT_FRAG_LEN 2346
-
-#define DOT11_MIN_BEACON_PERIOD 1
-#define DOT11_MAX_BEACON_PERIOD 0xFFFF
-
-#define DOT11_MIN_DTIM_PERIOD 1
-#define DOT11_MAX_DTIM_PERIOD 0xFF
-
-#define DOT11_OUI_LEN 3
-
-#define DOT11_RTS_LEN 16
-#define DOT11_CTS_LEN 10
-#define DOT11_ACK_LEN 10
-
-#define DOT11_BA_BITMAP_LEN 128
-#define DOT11_BA_LEN 4
-
-#define WME_OUI "\x00\x50\xf2"
-#define WME_VER 1
-#define WME_TYPE 2
-#define WME_SUBTYPE_PARAM_IE 1
-
-#define AC_BE 0
-#define AC_BK 1
-#define AC_VI 2
-#define AC_VO 3
-#define AC_COUNT 4
-
-typedef u8 ac_bitmap_t;
-
-#define AC_BITMAP_ALL 0xf
-#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0)
-
-struct edcf_acparam {
- u8 ACI;
- u8 ECW;
- u16 TXOP;
-} __attribute__((packed));
-typedef struct edcf_acparam edcf_acparam_t;
-
-struct wme_param_ie {
- u8 oui[3];
- u8 type;
- u8 subtype;
- u8 version;
- u8 qosinfo;
- u8 rsvd;
- edcf_acparam_t acparam[AC_COUNT];
-} __attribute__((packed));
-typedef struct wme_param_ie wme_param_ie_t;
-#define WME_PARAM_IE_LEN 24
-
-#define EDCF_AIFSN_MIN 1
-#define EDCF_AIFSN_MAX 15
-#define EDCF_AIFSN_MASK 0x0f
-#define EDCF_ACM_MASK 0x10
-#define EDCF_ACI_MASK 0x60
-#define EDCF_ACI_SHIFT 5
-
-#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
-#define EDCF_ECWMIN_MASK 0x0f
-#define EDCF_ECWMAX_MASK 0xf0
-#define EDCF_ECWMAX_SHIFT 4
-
-#define EDCF_TXOP2USEC(txop) ((txop) << 5)
-
-#define EDCF_AC_BE_ACI_STA 0x03
-#define EDCF_AC_BE_ECW_STA 0xA4
-#define EDCF_AC_BE_TXOP_STA 0x0000
-#define EDCF_AC_BK_ACI_STA 0x27
-#define EDCF_AC_BK_ECW_STA 0xA4
-#define EDCF_AC_BK_TXOP_STA 0x0000
-#define EDCF_AC_VI_ACI_STA 0x42
-#define EDCF_AC_VI_ECW_STA 0x43
-#define EDCF_AC_VI_TXOP_STA 0x005e
-#define EDCF_AC_VO_ACI_STA 0x62
-#define EDCF_AC_VO_ECW_STA 0x32
-#define EDCF_AC_VO_TXOP_STA 0x002f
-
-#define EDCF_AC_VO_TXOP_AP 0x002f
-
-#define SEQNUM_SHIFT 4
-#define SEQNUM_MAX 0x1000
-#define FRAGNUM_MASK 0xF
-
-#define DOT11_MNG_RSN_ID 48
-#define DOT11_MNG_WPA_ID 221
-#define DOT11_MNG_VS_ID 221
-
-#define DOT11_BSSTYPE_INFRASTRUCTURE 0
-#define DOT11_BSSTYPE_ANY 2
-#define DOT11_SCANTYPE_ACTIVE 0
-
-#define PREN_PREAMBLE 24
-#define PREN_MM_EXT 12
-#define PREN_PREAMBLE_EXT 4
-
-#define RIFS_11N_TIME 2
-
-#define APHY_SLOT_TIME 9
-#define APHY_SIFS_TIME 16
-#define APHY_PREAMBLE_TIME 16
-#define APHY_SIGNAL_TIME 4
-#define APHY_SYMBOL_TIME 4
-#define APHY_SERVICE_NBITS 16
-#define APHY_TAIL_NBITS 6
-#define APHY_CWMIN 15
-
-#define BPHY_SLOT_TIME 20
-#define BPHY_SIFS_TIME 10
-#define BPHY_PLCP_TIME 192
-#define BPHY_PLCP_SHORT_TIME 96
-
-#define DOT11_OFDM_SIGNAL_EXTENSION 6
-
-#define PHY_CWMAX 1023
-
-#define DOT11_MAXNUMFRAGS 16
-
-typedef struct d11cnt {
- u32 txfrag;
- u32 txmulti;
- u32 txfail;
- u32 txretry;
- u32 txretrie;
- u32 rxdup;
- u32 txrts;
- u32 txnocts;
- u32 txnoack;
- u32 rxfrag;
- u32 rxmulti;
- u32 rxcrc;
- u32 txfrmsnt;
- u32 rxundec;
-} d11cnt_t;
-
-#define MCSSET_LEN 16
-
-#define HT_CAP_IE_LEN 26
-
-#define HT_CAP_RX_STBC_NO 0x0
-#define HT_CAP_RX_STBC_ONE_STREAM 0x1
-
-#define AMPDU_MAX_MPDU_DENSITY IEEE80211_HT_MPDU_DENSITY_16
-
-#define AMPDU_DELIMITER_LEN 4
-
-#define DOT11N_TXBURST 0x0008
-
-#define WPA_VERSION 1
-#define WPA_OUI "\x00\x50\xF2"
-
-#define WFA_OUI "\x00\x50\xF2"
-#define WFA_OUI_LEN 3
-
-#define WFA_OUI_TYPE_WPA 1
-
-#define RSN_AKM_NONE 0
-#define RSN_AKM_UNSPECIFIED 1
-#define RSN_AKM_PSK 2
-
-#define DOT11_MAX_DEFAULT_KEYS 4
-#define DOT11_WPA_KEY_RSC_LEN 8
-
-#define BRCM_OUI "\x00\x10\x18"
-
-#endif /* _802_11_H_ */
diff --git a/drivers/staging/brcm80211/include/proto/bcmeth.h b/drivers/staging/brcm80211/include/proto/bcmeth.h
deleted file mode 100644
index e98ee654458..00000000000
--- a/drivers/staging/brcm80211/include/proto/bcmeth.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BCMETH_H_
-#define _BCMETH_H_
-
-#define BCMILCP_SUBTYPE_RATE 1
-#define BCMILCP_SUBTYPE_LINK 2
-#define BCMILCP_SUBTYPE_CSA 3
-#define BCMILCP_SUBTYPE_LARQ 4
-#define BCMILCP_SUBTYPE_VENDOR 5
-#define BCMILCP_SUBTYPE_FLH 17
-#define BCMILCP_SUBTYPE_VENDOR_LONG 32769
-#define BCMILCP_SUBTYPE_CERT 32770
-#define BCMILCP_SUBTYPE_SES 32771
-#define BCMILCP_BCM_SUBTYPE_RESERVED 0
-#define BCMILCP_BCM_SUBTYPE_EVENT 1
-#define BCMILCP_BCM_SUBTYPE_SES 2
-#define BCMILCP_BCM_SUBTYPE_DPT 4
-#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8
-#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0
-
-typedef struct bcmeth_hdr {
- u16 subtype;
- u16 length;
- u8 version;
- u8 oui[3];
- u16 usr_subtype;
-} __attribute__((packed)) bcmeth_hdr_t;
-
-#endif /* _BCMETH_H_ */
diff --git a/drivers/staging/brcm80211/include/proto/bcmevent.h b/drivers/staging/brcm80211/include/proto/bcmevent.h
deleted file mode 100644
index 1b60789aef0..00000000000
--- a/drivers/staging/brcm80211/include/proto/bcmevent.h
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BCMEVENT_H_
-#define _BCMEVENT_H_
-
-#include <linux/if_ether.h>
-
-#define BCM_EVENT_MSG_VERSION 1
-#define BCM_MSG_IFNAME_MAX 16
-
-#define WLC_EVENT_MSG_LINK 0x01
-#define WLC_EVENT_MSG_FLUSHTXQ 0x02
-#define WLC_EVENT_MSG_GROUP 0x04
-
-typedef struct {
- u16 version;
- u16 flags;
- u32 event_type;
- u32 status;
- u32 reason;
- u32 auth_type;
- u32 datalen;
- u8 addr[ETH_ALEN];
- char ifname[BCM_MSG_IFNAME_MAX];
-} __attribute__((packed)) wl_event_msg_t;
-
-#ifdef BRCM_FULLMAC
-typedef struct bcm_event {
- struct ethhdr eth;
- bcmeth_hdr_t bcm_hdr;
- wl_event_msg_t event;
-} __attribute__((packed)) bcm_event_t;
-#endif
-#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - \
- sizeof(struct ether_header))
-
-#define WLC_E_SET_SSID 0
-#define WLC_E_JOIN 1
-#define WLC_E_START 2
-#define WLC_E_AUTH 3
-#define WLC_E_AUTH_IND 4
-#define WLC_E_DEAUTH 5
-#define WLC_E_DEAUTH_IND 6
-#define WLC_E_ASSOC 7
-#define WLC_E_ASSOC_IND 8
-#define WLC_E_REASSOC 9
-#define WLC_E_REASSOC_IND 10
-#define WLC_E_DISASSOC 11
-#define WLC_E_DISASSOC_IND 12
-#define WLC_E_QUIET_START 13
-#define WLC_E_QUIET_END 14
-#define WLC_E_BEACON_RX 15
-#define WLC_E_LINK 16
-#define WLC_E_MIC_ERROR 17
-#define WLC_E_NDIS_LINK 18
-#define WLC_E_ROAM 19
-#define WLC_E_TXFAIL 20
-#define WLC_E_PMKID_CACHE 21
-#define WLC_E_RETROGRADE_TSF 22
-#define WLC_E_PRUNE 23
-#define WLC_E_AUTOAUTH 24
-#define WLC_E_EAPOL_MSG 25
-#define WLC_E_SCAN_COMPLETE 26
-#define WLC_E_ADDTS_IND 27
-#define WLC_E_DELTS_IND 28
-#define WLC_E_BCNSENT_IND 29
-#define WLC_E_BCNRX_MSG 30
-#define WLC_E_BCNLOST_MSG 31
-#define WLC_E_ROAM_PREP 32
-#define WLC_E_PFN_NET_FOUND 33
-#define WLC_E_PFN_NET_LOST 34
-#define WLC_E_RESET_COMPLETE 35
-#define WLC_E_JOIN_START 36
-#define WLC_E_ROAM_START 37
-#define WLC_E_ASSOC_START 38
-#define WLC_E_IBSS_ASSOC 39
-#define WLC_E_RADIO 40
-#define WLC_E_PSM_WATCHDOG 41
-#define WLC_E_PROBREQ_MSG 44
-#define WLC_E_SCAN_CONFIRM_IND 45
-#define WLC_E_PSK_SUP 46
-#define WLC_E_COUNTRY_CODE_CHANGED 47
-#define WLC_E_EXCEEDED_MEDIUM_TIME 48
-#define WLC_E_ICV_ERROR 49
-#define WLC_E_UNICAST_DECODE_ERROR 50
-#define WLC_E_MULTICAST_DECODE_ERROR 51
-#define WLC_E_TRACE 52
-#define WLC_E_IF 54
-#define WLC_E_RSSI 56
-#define WLC_E_PFN_SCAN_COMPLETE 57
-#define WLC_E_EXTLOG_MSG 58
-#define WLC_E_ACTION_FRAME 59
-#define WLC_E_ACTION_FRAME_COMPLETE 60
-#define WLC_E_PRE_ASSOC_IND 61
-#define WLC_E_PRE_REASSOC_IND 62
-#define WLC_E_CHANNEL_ADOPTED 63
-#define WLC_E_AP_STARTED 64
-#define WLC_E_DFS_AP_STOP 65
-#define WLC_E_DFS_AP_RESUME 66
-#define WLC_E_RESERVED1 67
-#define WLC_E_RESERVED2 68
-#define WLC_E_ESCAN_RESULT 69
-#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70
-#define WLC_E_DCS_REQUEST 73
-
-#define WLC_E_FIFO_CREDIT_MAP 74
-
-#define WLC_E_LAST 75
-
-typedef struct {
- uint event;
- const char *name;
-} bcmevent_name_t;
-
-extern const bcmevent_name_t bcmevent_names[];
-extern const int bcmevent_names_size;
-
-#define WLC_E_STATUS_SUCCESS 0
-#define WLC_E_STATUS_FAIL 1
-#define WLC_E_STATUS_TIMEOUT 2
-#define WLC_E_STATUS_NO_NETWORKS 3
-#define WLC_E_STATUS_ABORT 4
-#define WLC_E_STATUS_NO_ACK 5
-#define WLC_E_STATUS_UNSOLICITED 6
-#define WLC_E_STATUS_ATTEMPT 7
-#define WLC_E_STATUS_PARTIAL 8
-#define WLC_E_STATUS_NEWSCAN 9
-#define WLC_E_STATUS_NEWASSOC 10
-#define WLC_E_STATUS_11HQUIET 11
-#define WLC_E_STATUS_SUPPRESS 12
-#define WLC_E_STATUS_NOCHANS 13
-#define WLC_E_STATUS_CS_ABORT 15
-#define WLC_E_STATUS_ERROR 16
-
-#define WLC_E_REASON_INITIAL_ASSOC 0
-#define WLC_E_REASON_LOW_RSSI 1
-#define WLC_E_REASON_DEAUTH 2
-#define WLC_E_REASON_DISASSOC 3
-#define WLC_E_REASON_BCNS_LOST 4
-#define WLC_E_REASON_MINTXRATE 9
-#define WLC_E_REASON_TXFAIL 10
-
-#define WLC_E_REASON_FAST_ROAM_FAILED 5
-#define WLC_E_REASON_DIRECTED_ROAM 6
-#define WLC_E_REASON_TSPEC_REJECTED 7
-#define WLC_E_REASON_BETTER_AP 8
-
-#define WLC_E_PRUNE_ENCR_MISMATCH 1
-#define WLC_E_PRUNE_BCAST_BSSID 2
-#define WLC_E_PRUNE_MAC_DENY 3
-#define WLC_E_PRUNE_MAC_NA 4
-#define WLC_E_PRUNE_REG_PASSV 5
-#define WLC_E_PRUNE_SPCT_MGMT 6
-#define WLC_E_PRUNE_RADAR 7
-#define WLC_E_RSN_MISMATCH 8
-#define WLC_E_PRUNE_NO_COMMON_RATES 9
-#define WLC_E_PRUNE_BASIC_RATES 10
-#define WLC_E_PRUNE_CIPHER_NA 12
-#define WLC_E_PRUNE_KNOWN_STA 13
-#define WLC_E_PRUNE_WDS_PEER 15
-#define WLC_E_PRUNE_QBSS_LOAD 16
-#define WLC_E_PRUNE_HOME_AP 17
-
-#define WLC_E_SUP_OTHER 0
-#define WLC_E_SUP_DECRYPT_KEY_DATA 1
-#define WLC_E_SUP_BAD_UCAST_WEP128 2
-#define WLC_E_SUP_BAD_UCAST_WEP40 3
-#define WLC_E_SUP_UNSUP_KEY_LEN 4
-#define WLC_E_SUP_PW_KEY_CIPHER 5
-#define WLC_E_SUP_MSG3_TOO_MANY_IE 6
-#define WLC_E_SUP_MSG3_IE_MISMATCH 7
-#define WLC_E_SUP_NO_INSTALL_FLAG 8
-#define WLC_E_SUP_MSG3_NO_GTK 9
-#define WLC_E_SUP_GRP_KEY_CIPHER 10
-#define WLC_E_SUP_GRP_MSG1_NO_GTK 11
-#define WLC_E_SUP_GTK_DECRYPT_FAIL 12
-#define WLC_E_SUP_SEND_FAIL 13
-#define WLC_E_SUP_DEAUTH 14
-
-#define WLC_E_IF_ADD 1
-#define WLC_E_IF_DEL 2
-#define WLC_E_IF_CHANGE 3
-
-#define WLC_E_IF_ROLE_STA 0
-#define WLC_E_IF_ROLE_AP 1
-#define WLC_E_IF_ROLE_WDS 2
-
-#define WLC_E_LINK_BCN_LOSS 1
-#define WLC_E_LINK_DISASSOC 2
-#define WLC_E_LINK_ASSOC_REC 3
-#define WLC_E_LINK_BSSCFG_DIS 4
-
-#endif /* _BCMEVENT_H_ */
diff --git a/drivers/staging/brcm80211/include/sbchipc.h b/drivers/staging/brcm80211/include/sbchipc.h
deleted file mode 100644
index 8c01c638ab8..00000000000
--- a/drivers/staging/brcm80211/include/sbchipc.h
+++ /dev/null
@@ -1,1588 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _SBCHIPC_H
-#define _SBCHIPC_H
-
-#ifndef _LANGUAGE_ASSEMBLY
-
-/* cpp contortions to concatenate w/arg prescan */
-#ifndef PAD
-#define _PADLINE(line) pad ## line
-#define _XSTR(line) _PADLINE(line)
-#define PAD _XSTR(__LINE__)
-#endif /* PAD */
-
-typedef volatile struct {
- u32 chipid; /* 0x0 */
- u32 capabilities;
- u32 corecontrol; /* corerev >= 1 */
- u32 bist;
-
- /* OTP */
- u32 otpstatus; /* 0x10, corerev >= 10 */
- u32 otpcontrol;
- u32 otpprog;
- u32 otplayout; /* corerev >= 23 */
-
- /* Interrupt control */
- u32 intstatus; /* 0x20 */
- u32 intmask;
-
- /* Chip specific regs */
- u32 chipcontrol; /* 0x28, rev >= 11 */
- u32 chipstatus; /* 0x2c, rev >= 11 */
-
- /* Jtag Master */
- u32 jtagcmd; /* 0x30, rev >= 10 */
- u32 jtagir;
- u32 jtagdr;
- u32 jtagctrl;
-
- /* serial flash interface registers */
- u32 flashcontrol; /* 0x40 */
- u32 flashaddress;
- u32 flashdata;
- u32 PAD[1];
-
- /* Silicon backplane configuration broadcast control */
- u32 broadcastaddress; /* 0x50 */
- u32 broadcastdata;
-
- /* gpio - cleared only by power-on-reset */
- u32 gpiopullup; /* 0x58, corerev >= 20 */
- u32 gpiopulldown; /* 0x5c, corerev >= 20 */
- u32 gpioin; /* 0x60 */
- u32 gpioout; /* 0x64 */
- u32 gpioouten; /* 0x68 */
- u32 gpiocontrol; /* 0x6C */
- u32 gpiointpolarity; /* 0x70 */
- u32 gpiointmask; /* 0x74 */
-
- /* GPIO events corerev >= 11 */
- u32 gpioevent;
- u32 gpioeventintmask;
-
- /* Watchdog timer */
- u32 watchdog; /* 0x80 */
-
- /* GPIO events corerev >= 11 */
- u32 gpioeventintpolarity;
-
- /* GPIO based LED powersave registers corerev >= 16 */
- u32 gpiotimerval; /* 0x88 */
- u32 gpiotimeroutmask;
-
- /* clock control */
- u32 clockcontrol_n; /* 0x90 */
- u32 clockcontrol_sb; /* aka m0 */
- u32 clockcontrol_pci; /* aka m1 */
- u32 clockcontrol_m2; /* mii/uart/mipsref */
- u32 clockcontrol_m3; /* cpu */
- u32 clkdiv; /* corerev >= 3 */
- u32 gpiodebugsel; /* corerev >= 28 */
- u32 capabilities_ext; /* 0xac */
-
- /* pll delay registers (corerev >= 4) */
- u32 pll_on_delay; /* 0xb0 */
- u32 fref_sel_delay;
- u32 slow_clk_ctl; /* 5 < corerev < 10 */
- u32 PAD;
-
- /* Instaclock registers (corerev >= 10) */
- u32 system_clk_ctl; /* 0xc0 */
- u32 clkstatestretch;
- u32 PAD[2];
-
- /* Indirect backplane access (corerev >= 22) */
- u32 bp_addrlow; /* 0xd0 */
- u32 bp_addrhigh;
- u32 bp_data;
- u32 PAD;
- u32 bp_indaccess;
- u32 PAD[3];
-
- /* More clock dividers (corerev >= 32) */
- u32 clkdiv2;
- u32 PAD[2];
-
- /* In AI chips, pointer to erom */
- u32 eromptr; /* 0xfc */
-
- /* ExtBus control registers (corerev >= 3) */
- u32 pcmcia_config; /* 0x100 */
- u32 pcmcia_memwait;
- u32 pcmcia_attrwait;
- u32 pcmcia_iowait;
- u32 ide_config;
- u32 ide_memwait;
- u32 ide_attrwait;
- u32 ide_iowait;
- u32 prog_config;
- u32 prog_waitcount;
- u32 flash_config;
- u32 flash_waitcount;
- u32 SECI_config; /* 0x130 SECI configuration */
- u32 PAD[3];
-
- /* Enhanced Coexistence Interface (ECI) registers (corerev >= 21) */
- u32 eci_output; /* 0x140 */
- u32 eci_control;
- u32 eci_inputlo;
- u32 eci_inputmi;
- u32 eci_inputhi;
- u32 eci_inputintpolaritylo;
- u32 eci_inputintpolaritymi;
- u32 eci_inputintpolarityhi;
- u32 eci_intmasklo;
- u32 eci_intmaskmi;
- u32 eci_intmaskhi;
- u32 eci_eventlo;
- u32 eci_eventmi;
- u32 eci_eventhi;
- u32 eci_eventmasklo;
- u32 eci_eventmaskmi;
- u32 eci_eventmaskhi;
- u32 PAD[3];
-
- /* SROM interface (corerev >= 32) */
- u32 sromcontrol; /* 0x190 */
- u32 sromaddress;
- u32 sromdata;
- u32 PAD[17];
-
- /* Clock control and hardware workarounds (corerev >= 20) */
- u32 clk_ctl_st; /* 0x1e0 */
- u32 hw_war;
- u32 PAD[70];
-
- /* UARTs */
- u8 uart0data; /* 0x300 */
- u8 uart0imr;
- u8 uart0fcr;
- u8 uart0lcr;
- u8 uart0mcr;
- u8 uart0lsr;
- u8 uart0msr;
- u8 uart0scratch;
- u8 PAD[248]; /* corerev >= 1 */
-
- u8 uart1data; /* 0x400 */
- u8 uart1imr;
- u8 uart1fcr;
- u8 uart1lcr;
- u8 uart1mcr;
- u8 uart1lsr;
- u8 uart1msr;
- u8 uart1scratch;
- u32 PAD[126];
-
- /* PMU registers (corerev >= 20) */
- u32 pmucontrol; /* 0x600 */
- u32 pmucapabilities;
- u32 pmustatus;
- u32 res_state;
- u32 res_pending;
- u32 pmutimer;
- u32 min_res_mask;
- u32 max_res_mask;
- u32 res_table_sel;
- u32 res_dep_mask;
- u32 res_updn_timer;
- u32 res_timer;
- u32 clkstretch;
- u32 pmuwatchdog;
- u32 gpiosel; /* 0x638, rev >= 1 */
- u32 gpioenable; /* 0x63c, rev >= 1 */
- u32 res_req_timer_sel;
- u32 res_req_timer;
- u32 res_req_mask;
- u32 PAD;
- u32 chipcontrol_addr; /* 0x650 */
- u32 chipcontrol_data; /* 0x654 */
- u32 regcontrol_addr;
- u32 regcontrol_data;
- u32 pllcontrol_addr;
- u32 pllcontrol_data;
- u32 pmustrapopt; /* 0x668, corerev >= 28 */
- u32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */
- u32 PAD[100];
- u16 sromotp[768];
-} chipcregs_t;
-
-#endif /* _LANGUAGE_ASSEMBLY */
-
-#if defined(__BIG_ENDIAN) && defined(BCMHND74K)
-/* Selective swapped defines for those registers we need in
- * big-endian code.
- */
-#define CC_CHIPID 4
-#define CC_CAPABILITIES 0
-#define CC_CHIPST 0x28
-#define CC_EROMPTR 0xf8
-
-#else /* !__BIG_ENDIAN || !BCMHND74K */
-
-#define CC_CHIPID 0
-#define CC_CAPABILITIES 4
-#define CC_CHIPST 0x2c
-#define CC_EROMPTR 0xfc
-
-#endif /* __BIG_ENDIAN && BCMHND74K */
-
-#define CC_OTPST 0x10
-#define CC_JTAGCMD 0x30
-#define CC_JTAGIR 0x34
-#define CC_JTAGDR 0x38
-#define CC_JTAGCTRL 0x3c
-#define CC_GPIOPU 0x58
-#define CC_GPIOPD 0x5c
-#define CC_GPIOIN 0x60
-#define CC_GPIOOUT 0x64
-#define CC_GPIOOUTEN 0x68
-#define CC_GPIOCTRL 0x6c
-#define CC_GPIOPOL 0x70
-#define CC_GPIOINTM 0x74
-#define CC_WATCHDOG 0x80
-#define CC_CLKC_N 0x90
-#define CC_CLKC_M0 0x94
-#define CC_CLKC_M1 0x98
-#define CC_CLKC_M2 0x9c
-#define CC_CLKC_M3 0xa0
-#define CC_CLKDIV 0xa4
-#define CC_SYS_CLK_CTL 0xc0
-#define CC_CLK_CTL_ST SI_CLK_CTL_ST
-#define PMU_CTL 0x600
-#define PMU_CAP 0x604
-#define PMU_ST 0x608
-#define PMU_RES_STATE 0x60c
-#define PMU_TIMER 0x614
-#define PMU_MIN_RES_MASK 0x618
-#define PMU_MAX_RES_MASK 0x61c
-#define CC_CHIPCTL_ADDR 0x650
-#define CC_CHIPCTL_DATA 0x654
-#define PMU_REG_CONTROL_ADDR 0x658
-#define PMU_REG_CONTROL_DATA 0x65C
-#define PMU_PLL_CONTROL_ADDR 0x660
-#define PMU_PLL_CONTROL_DATA 0x664
-#define CC_SROM_OTP 0x800 /* SROM/OTP address space */
-
-/* chipid */
-#define CID_ID_MASK 0x0000ffff /* Chip Id mask */
-#define CID_REV_MASK 0x000f0000 /* Chip Revision mask */
-#define CID_REV_SHIFT 16 /* Chip Revision shift */
-#define CID_PKG_MASK 0x00f00000 /* Package Option mask */
-#define CID_PKG_SHIFT 20 /* Package Option shift */
-#define CID_CC_MASK 0x0f000000 /* CoreCount (corerev >= 4) */
-#define CID_CC_SHIFT 24
-#define CID_TYPE_MASK 0xf0000000 /* Chip Type */
-#define CID_TYPE_SHIFT 28
-
-/* capabilities */
-#define CC_CAP_UARTS_MASK 0x00000003 /* Number of UARTs */
-#define CC_CAP_MIPSEB 0x00000004 /* MIPS is in big-endian mode */
-#define CC_CAP_UCLKSEL 0x00000018 /* UARTs clock select */
-#define CC_CAP_UINTCLK 0x00000008 /* UARTs are driven by internal divided clock */
-#define CC_CAP_UARTGPIO 0x00000020 /* UARTs own GPIOs 15:12 */
-#define CC_CAP_EXTBUS_MASK 0x000000c0 /* External bus mask */
-#define CC_CAP_EXTBUS_NONE 0x00000000 /* No ExtBus present */
-#define CC_CAP_EXTBUS_FULL 0x00000040 /* ExtBus: PCMCIA, IDE & Prog */
-#define CC_CAP_EXTBUS_PROG 0x00000080 /* ExtBus: ProgIf only */
-#define CC_CAP_FLASH_MASK 0x00000700 /* Type of flash */
-#define CC_CAP_PLL_MASK 0x00038000 /* Type of PLL */
-#define CC_CAP_PWR_CTL 0x00040000 /* Power control */
-#define CC_CAP_OTPSIZE 0x00380000 /* OTP Size (0 = none) */
-#define CC_CAP_OTPSIZE_SHIFT 19 /* OTP Size shift */
-#define CC_CAP_OTPSIZE_BASE 5 /* OTP Size base */
-#define CC_CAP_JTAGP 0x00400000 /* JTAG Master Present */
-#define CC_CAP_ROM 0x00800000 /* Internal boot rom active */
-#define CC_CAP_BKPLN64 0x08000000 /* 64-bit backplane */
-#define CC_CAP_PMU 0x10000000 /* PMU Present, rev >= 20 */
-#define CC_CAP_SROM 0x40000000 /* Srom Present, rev >= 32 */
-#define CC_CAP_NFLASH 0x80000000 /* Nand flash present, rev >= 35 */
-
-#define CC_CAP2_SECI 0x00000001 /* SECI Present, rev >= 36 */
-#define CC_CAP2_GSIO 0x00000002 /* GSIO (spi/i2c) present, rev >= 37 */
-
-/* PLL type */
-#define PLL_NONE 0x00000000
-#define PLL_TYPE1 0x00010000 /* 48MHz base, 3 dividers */
-#define PLL_TYPE2 0x00020000 /* 48MHz, 4 dividers */
-#define PLL_TYPE3 0x00030000 /* 25MHz, 2 dividers */
-#define PLL_TYPE4 0x00008000 /* 48MHz, 4 dividers */
-#define PLL_TYPE5 0x00018000 /* 25MHz, 4 dividers */
-#define PLL_TYPE6 0x00028000 /* 100/200 or 120/240 only */
-#define PLL_TYPE7 0x00038000 /* 25MHz, 4 dividers */
-
-/* ILP clock */
-#define ILP_CLOCK 32000
-
-/* ALP clock on pre-PMU chips */
-#define ALP_CLOCK 20000000
-
-/* HT clock */
-#define HT_CLOCK 80000000
-
-/* corecontrol */
-#define CC_UARTCLKO 0x00000001 /* Drive UART with internal clock */
-#define CC_SE 0x00000002 /* sync clk out enable (corerev >= 3) */
-#define CC_UARTCLKEN 0x00000008 /* enable UART Clock (corerev > = 21 */
-
-/* chipcontrol */
-#define CHIPCTRL_4321A0_DEFAULT 0x3a4
-#define CHIPCTRL_4321A1_DEFAULT 0x0a4
-#define CHIPCTRL_4321_PLL_DOWN 0x800000 /* serdes PLL down override */
-
-/* Fields in the otpstatus register in rev >= 21 */
-#define OTPS_OL_MASK 0x000000ff
-#define OTPS_OL_MFG 0x00000001 /* manuf row is locked */
-#define OTPS_OL_OR1 0x00000002 /* otp redundancy row 1 is locked */
-#define OTPS_OL_OR2 0x00000004 /* otp redundancy row 2 is locked */
-#define OTPS_OL_GU 0x00000008 /* general use region is locked */
-#define OTPS_GUP_MASK 0x00000f00
-#define OTPS_GUP_SHIFT 8
-#define OTPS_GUP_HW 0x00000100 /* h/w subregion is programmed */
-#define OTPS_GUP_SW 0x00000200 /* s/w subregion is programmed */
-#define OTPS_GUP_CI 0x00000400 /* chipid/pkgopt subregion is programmed */
-#define OTPS_GUP_FUSE 0x00000800 /* fuse subregion is programmed */
-#define OTPS_READY 0x00001000
-#define OTPS_RV(x) (1 << (16 + (x))) /* redundancy entry valid */
-#define OTPS_RV_MASK 0x0fff0000
-
-/* Fields in the otpcontrol register in rev >= 21 */
-#define OTPC_PROGSEL 0x00000001
-#define OTPC_PCOUNT_MASK 0x0000000e
-#define OTPC_PCOUNT_SHIFT 1
-#define OTPC_VSEL_MASK 0x000000f0
-#define OTPC_VSEL_SHIFT 4
-#define OTPC_TMM_MASK 0x00000700
-#define OTPC_TMM_SHIFT 8
-#define OTPC_ODM 0x00000800
-#define OTPC_PROGEN 0x80000000
-
-/* Fields in otpprog in rev >= 21 and HND OTP */
-#define OTPP_COL_MASK 0x000000ff
-#define OTPP_COL_SHIFT 0
-#define OTPP_ROW_MASK 0x0000ff00
-#define OTPP_ROW_SHIFT 8
-#define OTPP_OC_MASK 0x0f000000
-#define OTPP_OC_SHIFT 24
-#define OTPP_READERR 0x10000000
-#define OTPP_VALUE_MASK 0x20000000
-#define OTPP_VALUE_SHIFT 29
-#define OTPP_START_BUSY 0x80000000
-#define OTPP_READ 0x40000000 /* HND OTP */
-
-/* otplayout reg corerev >= 36 */
-#define OTP_CISFORMAT_NEW 0x80000000
-
-/* Opcodes for OTPP_OC field */
-#define OTPPOC_READ 0
-#define OTPPOC_BIT_PROG 1
-#define OTPPOC_VERIFY 3
-#define OTPPOC_INIT 4
-#define OTPPOC_SET 5
-#define OTPPOC_RESET 6
-#define OTPPOC_OCST 7
-#define OTPPOC_ROW_LOCK 8
-#define OTPPOC_PRESCN_TEST 9
-
-/* Jtagm characteristics that appeared at a given corerev */
-#define JTAGM_CREV_OLD 10 /* Old command set, 16bit max IR */
-#define JTAGM_CREV_IRP 22 /* Able to do pause-ir */
-#define JTAGM_CREV_RTI 28 /* Able to do return-to-idle */
-
-/* jtagcmd */
-#define JCMD_START 0x80000000
-#define JCMD_BUSY 0x80000000
-#define JCMD_STATE_MASK 0x60000000
-#define JCMD_STATE_TLR 0x00000000 /* Test-logic-reset */
-#define JCMD_STATE_PIR 0x20000000 /* Pause IR */
-#define JCMD_STATE_PDR 0x40000000 /* Pause DR */
-#define JCMD_STATE_RTI 0x60000000 /* Run-test-idle */
-#define JCMD0_ACC_MASK 0x0000f000
-#define JCMD0_ACC_IRDR 0x00000000
-#define JCMD0_ACC_DR 0x00001000
-#define JCMD0_ACC_IR 0x00002000
-#define JCMD0_ACC_RESET 0x00003000
-#define JCMD0_ACC_IRPDR 0x00004000
-#define JCMD0_ACC_PDR 0x00005000
-#define JCMD0_IRW_MASK 0x00000f00
-#define JCMD_ACC_MASK 0x000f0000 /* Changes for corerev 11 */
-#define JCMD_ACC_IRDR 0x00000000
-#define JCMD_ACC_DR 0x00010000
-#define JCMD_ACC_IR 0x00020000
-#define JCMD_ACC_RESET 0x00030000
-#define JCMD_ACC_IRPDR 0x00040000
-#define JCMD_ACC_PDR 0x00050000
-#define JCMD_ACC_PIR 0x00060000
-#define JCMD_ACC_IRDR_I 0x00070000 /* rev 28: return to run-test-idle */
-#define JCMD_ACC_DR_I 0x00080000 /* rev 28: return to run-test-idle */
-#define JCMD_IRW_MASK 0x00001f00
-#define JCMD_IRW_SHIFT 8
-#define JCMD_DRW_MASK 0x0000003f
-
-/* jtagctrl */
-#define JCTRL_FORCE_CLK 4 /* Force clock */
-#define JCTRL_EXT_EN 2 /* Enable external targets */
-#define JCTRL_EN 1 /* Enable Jtag master */
-
-/* Fields in clkdiv */
-#define CLKD_SFLASH 0x0f000000
-#define CLKD_SFLASH_SHIFT 24
-#define CLKD_OTP 0x000f0000
-#define CLKD_OTP_SHIFT 16
-#define CLKD_JTAG 0x00000f00
-#define CLKD_JTAG_SHIFT 8
-#define CLKD_UART 0x000000ff
-
-#define CLKD2_SROM 0x00000003
-
-/* intstatus/intmask */
-#define CI_GPIO 0x00000001 /* gpio intr */
-#define CI_EI 0x00000002 /* extif intr (corerev >= 3) */
-#define CI_TEMP 0x00000004 /* temp. ctrl intr (corerev >= 15) */
-#define CI_SIRQ 0x00000008 /* serial IRQ intr (corerev >= 15) */
-#define CI_PMU 0x00000020 /* pmu intr (corerev >= 21) */
-#define CI_UART 0x00000040 /* uart intr (corerev >= 21) */
-#define CI_WDRESET 0x80000000 /* watchdog reset occurred */
-
-/* slow_clk_ctl */
-#define SCC_SS_MASK 0x00000007 /* slow clock source mask */
-#define SCC_SS_LPO 0x00000000 /* source of slow clock is LPO */
-#define SCC_SS_XTAL 0x00000001 /* source of slow clock is crystal */
-#define SCC_SS_PCI 0x00000002 /* source of slow clock is PCI */
-#define SCC_LF 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */
-#define SCC_LP 0x00000400 /* LPOPowerDown, 1: LPO is disabled,
- * 0: LPO is enabled
- */
-#define SCC_FS 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock,
- * 0: power logic control
- */
-#define SCC_IP 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors
- * PLL clock disable requests from core
- */
-#define SCC_XC 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't
- * disable crystal when appropriate
- */
-#define SCC_XP 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */
-#define SCC_CD_MASK 0xffff0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */
-#define SCC_CD_SHIFT 16
-
-/* system_clk_ctl */
-#define SYCC_IE 0x00000001 /* ILPen: Enable Idle Low Power */
-#define SYCC_AE 0x00000002 /* ALPen: Enable Active Low Power */
-#define SYCC_FP 0x00000004 /* ForcePLLOn */
-#define SYCC_AR 0x00000008 /* Force ALP (or HT if ALPen is not set */
-#define SYCC_HR 0x00000010 /* Force HT */
-#define SYCC_CD_MASK 0xffff0000 /* ClkDiv (ILP = 1/(4 * (divisor + 1)) */
-#define SYCC_CD_SHIFT 16
-
-/* Indirect backplane access */
-#define BPIA_BYTEEN 0x0000000f
-#define BPIA_SZ1 0x00000001
-#define BPIA_SZ2 0x00000003
-#define BPIA_SZ4 0x00000007
-#define BPIA_SZ8 0x0000000f
-#define BPIA_WRITE 0x00000100
-#define BPIA_START 0x00000200
-#define BPIA_BUSY 0x00000200
-#define BPIA_ERROR 0x00000400
-
-/* pcmcia/prog/flash_config */
-#define CF_EN 0x00000001 /* enable */
-#define CF_EM_MASK 0x0000000e /* mode */
-#define CF_EM_SHIFT 1
-#define CF_EM_FLASH 0 /* flash/asynchronous mode */
-#define CF_EM_SYNC 2 /* synchronous mode */
-#define CF_EM_PCMCIA 4 /* pcmcia mode */
-#define CF_DS 0x00000010 /* destsize: 0=8bit, 1=16bit */
-#define CF_BS 0x00000020 /* byteswap */
-#define CF_CD_MASK 0x000000c0 /* clock divider */
-#define CF_CD_SHIFT 6
-#define CF_CD_DIV2 0x00000000 /* backplane/2 */
-#define CF_CD_DIV3 0x00000040 /* backplane/3 */
-#define CF_CD_DIV4 0x00000080 /* backplane/4 */
-#define CF_CE 0x00000100 /* clock enable */
-#define CF_SB 0x00000200 /* size/bytestrobe (synch only) */
-
-/* pcmcia_memwait */
-#define PM_W0_MASK 0x0000003f /* waitcount0 */
-#define PM_W1_MASK 0x00001f00 /* waitcount1 */
-#define PM_W1_SHIFT 8
-#define PM_W2_MASK 0x001f0000 /* waitcount2 */
-#define PM_W2_SHIFT 16
-#define PM_W3_MASK 0x1f000000 /* waitcount3 */
-#define PM_W3_SHIFT 24
-
-/* pcmcia_attrwait */
-#define PA_W0_MASK 0x0000003f /* waitcount0 */
-#define PA_W1_MASK 0x00001f00 /* waitcount1 */
-#define PA_W1_SHIFT 8
-#define PA_W2_MASK 0x001f0000 /* waitcount2 */
-#define PA_W2_SHIFT 16
-#define PA_W3_MASK 0x1f000000 /* waitcount3 */
-#define PA_W3_SHIFT 24
-
-/* pcmcia_iowait */
-#define PI_W0_MASK 0x0000003f /* waitcount0 */
-#define PI_W1_MASK 0x00001f00 /* waitcount1 */
-#define PI_W1_SHIFT 8
-#define PI_W2_MASK 0x001f0000 /* waitcount2 */
-#define PI_W2_SHIFT 16
-#define PI_W3_MASK 0x1f000000 /* waitcount3 */
-#define PI_W3_SHIFT 24
-
-/* prog_waitcount */
-#define PW_W0_MASK 0x0000001f /* waitcount0 */
-#define PW_W1_MASK 0x00001f00 /* waitcount1 */
-#define PW_W1_SHIFT 8
-#define PW_W2_MASK 0x001f0000 /* waitcount2 */
-#define PW_W2_SHIFT 16
-#define PW_W3_MASK 0x1f000000 /* waitcount3 */
-#define PW_W3_SHIFT 24
-
-#define PW_W0 0x0000000c
-#define PW_W1 0x00000a00
-#define PW_W2 0x00020000
-#define PW_W3 0x01000000
-
-/* flash_waitcount */
-#define FW_W0_MASK 0x0000003f /* waitcount0 */
-#define FW_W1_MASK 0x00001f00 /* waitcount1 */
-#define FW_W1_SHIFT 8
-#define FW_W2_MASK 0x001f0000 /* waitcount2 */
-#define FW_W2_SHIFT 16
-#define FW_W3_MASK 0x1f000000 /* waitcount3 */
-#define FW_W3_SHIFT 24
-
-/* When Srom support present, fields in sromcontrol */
-#define SRC_START 0x80000000
-#define SRC_BUSY 0x80000000
-#define SRC_OPCODE 0x60000000
-#define SRC_OP_READ 0x00000000
-#define SRC_OP_WRITE 0x20000000
-#define SRC_OP_WRDIS 0x40000000
-#define SRC_OP_WREN 0x60000000
-#define SRC_OTPSEL 0x00000010
-#define SRC_LOCK 0x00000008
-#define SRC_SIZE_MASK 0x00000006
-#define SRC_SIZE_1K 0x00000000
-#define SRC_SIZE_4K 0x00000002
-#define SRC_SIZE_16K 0x00000004
-#define SRC_SIZE_SHIFT 1
-#define SRC_PRESENT 0x00000001
-
-/* Fields in pmucontrol */
-#define PCTL_ILP_DIV_MASK 0xffff0000
-#define PCTL_ILP_DIV_SHIFT 16
-#define PCTL_PLL_PLLCTL_UPD 0x00000400 /* rev 2 */
-#define PCTL_NOILP_ON_WAIT 0x00000200 /* rev 1 */
-#define PCTL_HT_REQ_EN 0x00000100
-#define PCTL_ALP_REQ_EN 0x00000080
-#define PCTL_XTALFREQ_MASK 0x0000007c
-#define PCTL_XTALFREQ_SHIFT 2
-#define PCTL_ILP_DIV_EN 0x00000002
-#define PCTL_LPO_SEL 0x00000001
-
-/* Fields in clkstretch */
-#define CSTRETCH_HT 0xffff0000
-#define CSTRETCH_ALP 0x0000ffff
-
-/* gpiotimerval */
-#define GPIO_ONTIME_SHIFT 16
-
-/* clockcontrol_n */
-#define CN_N1_MASK 0x3f /* n1 control */
-#define CN_N2_MASK 0x3f00 /* n2 control */
-#define CN_N2_SHIFT 8
-#define CN_PLLC_MASK 0xf0000 /* pll control */
-#define CN_PLLC_SHIFT 16
-
-/* clockcontrol_sb/pci/uart */
-#define CC_M1_MASK 0x3f /* m1 control */
-#define CC_M2_MASK 0x3f00 /* m2 control */
-#define CC_M2_SHIFT 8
-#define CC_M3_MASK 0x3f0000 /* m3 control */
-#define CC_M3_SHIFT 16
-#define CC_MC_MASK 0x1f000000 /* mux control */
-#define CC_MC_SHIFT 24
-
-/* N3M Clock control magic field values */
-#define CC_F6_2 0x02 /* A factor of 2 in */
-#define CC_F6_3 0x03 /* 6-bit fields like */
-#define CC_F6_4 0x05 /* N1, M1 or M3 */
-#define CC_F6_5 0x09
-#define CC_F6_6 0x11
-#define CC_F6_7 0x21
-
-#define CC_F5_BIAS 5 /* 5-bit fields get this added */
-
-#define CC_MC_BYPASS 0x08
-#define CC_MC_M1 0x04
-#define CC_MC_M1M2 0x02
-#define CC_MC_M1M2M3 0x01
-#define CC_MC_M1M3 0x11
-
-/* Type 2 Clock control magic field values */
-#define CC_T2_BIAS 2 /* n1, n2, m1 & m3 bias */
-#define CC_T2M2_BIAS 3 /* m2 bias */
-
-#define CC_T2MC_M1BYP 1
-#define CC_T2MC_M2BYP 2
-#define CC_T2MC_M3BYP 4
-
-/* Type 6 Clock control magic field values */
-#define CC_T6_MMASK 1 /* bits of interest in m */
-#define CC_T6_M0 120000000 /* sb clock for m = 0 */
-#define CC_T6_M1 100000000 /* sb clock for m = 1 */
-#define SB2MIPS_T6(sb) (2 * (sb))
-
-/* Common clock base */
-#define CC_CLOCK_BASE1 24000000 /* Half the clock freq */
-#define CC_CLOCK_BASE2 12500000 /* Alternate crystal on some PLLs */
-
-/* Clock control values for 200MHz in 5350 */
-#define CLKC_5350_N 0x0311
-#define CLKC_5350_M 0x04020009
-
-/* Flash types in the chipcommon capabilities register */
-#define FLASH_NONE 0x000 /* No flash */
-#define SFLASH_ST 0x100 /* ST serial flash */
-#define SFLASH_AT 0x200 /* Atmel serial flash */
-#define PFLASH 0x700 /* Parallel flash */
-
-/* Bits in the ExtBus config registers */
-#define CC_CFG_EN 0x0001 /* Enable */
-#define CC_CFG_EM_MASK 0x000e /* Extif Mode */
-#define CC_CFG_EM_ASYNC 0x0000 /* Async/Parallel flash */
-#define CC_CFG_EM_SYNC 0x0002 /* Synchronous */
-#define CC_CFG_EM_PCMCIA 0x0004 /* PCMCIA */
-#define CC_CFG_EM_IDE 0x0006 /* IDE */
-#define CC_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */
-#define CC_CFG_CD_MASK 0x00e0 /* Sync: Clock divisor, rev >= 20 */
-#define CC_CFG_CE 0x0100 /* Sync: Clock enable, rev >= 20 */
-#define CC_CFG_SB 0x0200 /* Sync: Size/Bytestrobe, rev >= 20 */
-#define CC_CFG_IS 0x0400 /* Extif Sync Clk Select, rev >= 20 */
-
-/* ExtBus address space */
-#define CC_EB_BASE 0x1a000000 /* Chipc ExtBus base address */
-#define CC_EB_PCMCIA_MEM 0x1a000000 /* PCMCIA 0 memory base address */
-#define CC_EB_PCMCIA_IO 0x1a200000 /* PCMCIA 0 I/O base address */
-#define CC_EB_PCMCIA_CFG 0x1a400000 /* PCMCIA 0 config base address */
-#define CC_EB_IDE 0x1a800000 /* IDE memory base */
-#define CC_EB_PCMCIA1_MEM 0x1a800000 /* PCMCIA 1 memory base address */
-#define CC_EB_PCMCIA1_IO 0x1aa00000 /* PCMCIA 1 I/O base address */
-#define CC_EB_PCMCIA1_CFG 0x1ac00000 /* PCMCIA 1 config base address */
-#define CC_EB_PROGIF 0x1b000000 /* ProgIF Async/Sync base address */
-
-/* Start/busy bit in flashcontrol */
-#define SFLASH_OPCODE 0x000000ff
-#define SFLASH_ACTION 0x00000700
-#define SFLASH_CS_ACTIVE 0x00001000 /* Chip Select Active, rev >= 20 */
-#define SFLASH_START 0x80000000
-#define SFLASH_BUSY SFLASH_START
-
-/* flashcontrol action codes */
-#define SFLASH_ACT_OPONLY 0x0000 /* Issue opcode only */
-#define SFLASH_ACT_OP1D 0x0100 /* opcode + 1 data byte */
-#define SFLASH_ACT_OP3A 0x0200 /* opcode + 3 addr bytes */
-#define SFLASH_ACT_OP3A1D 0x0300 /* opcode + 3 addr & 1 data bytes */
-#define SFLASH_ACT_OP3A4D 0x0400 /* opcode + 3 addr & 4 data bytes */
-#define SFLASH_ACT_OP3A4X4D 0x0500 /* opcode + 3 addr, 4 don't care & 4 data bytes */
-#define SFLASH_ACT_OP3A1X4D 0x0700 /* opcode + 3 addr, 1 don't care & 4 data bytes */
-
-/* flashcontrol action+opcodes for ST flashes */
-#define SFLASH_ST_WREN 0x0006 /* Write Enable */
-#define SFLASH_ST_WRDIS 0x0004 /* Write Disable */
-#define SFLASH_ST_RDSR 0x0105 /* Read Status Register */
-#define SFLASH_ST_WRSR 0x0101 /* Write Status Register */
-#define SFLASH_ST_READ 0x0303 /* Read Data Bytes */
-#define SFLASH_ST_PP 0x0302 /* Page Program */
-#define SFLASH_ST_SE 0x02d8 /* Sector Erase */
-#define SFLASH_ST_BE 0x00c7 /* Bulk Erase */
-#define SFLASH_ST_DP 0x00b9 /* Deep Power-down */
-#define SFLASH_ST_RES 0x03ab /* Read Electronic Signature */
-#define SFLASH_ST_CSA 0x1000 /* Keep chip select asserted */
-#define SFLASH_ST_SSE 0x0220 /* Sub-sector Erase */
-
-/* Status register bits for ST flashes */
-#define SFLASH_ST_WIP 0x01 /* Write In Progress */
-#define SFLASH_ST_WEL 0x02 /* Write Enable Latch */
-#define SFLASH_ST_BP_MASK 0x1c /* Block Protect */
-#define SFLASH_ST_BP_SHIFT 2
-#define SFLASH_ST_SRWD 0x80 /* Status Register Write Disable */
-
-/* flashcontrol action+opcodes for Atmel flashes */
-#define SFLASH_AT_READ 0x07e8
-#define SFLASH_AT_PAGE_READ 0x07d2
-#define SFLASH_AT_BUF1_READ
-#define SFLASH_AT_BUF2_READ
-#define SFLASH_AT_STATUS 0x01d7
-#define SFLASH_AT_BUF1_WRITE 0x0384
-#define SFLASH_AT_BUF2_WRITE 0x0387
-#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283
-#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286
-#define SFLASH_AT_BUF1_PROGRAM 0x0288
-#define SFLASH_AT_BUF2_PROGRAM 0x0289
-#define SFLASH_AT_PAGE_ERASE 0x0281
-#define SFLASH_AT_BLOCK_ERASE 0x0250
-#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
-#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
-#define SFLASH_AT_BUF1_LOAD 0x0253
-#define SFLASH_AT_BUF2_LOAD 0x0255
-#define SFLASH_AT_BUF1_COMPARE 0x0260
-#define SFLASH_AT_BUF2_COMPARE 0x0261
-#define SFLASH_AT_BUF1_REPROGRAM 0x0258
-#define SFLASH_AT_BUF2_REPROGRAM 0x0259
-
-/* Status register bits for Atmel flashes */
-#define SFLASH_AT_READY 0x80
-#define SFLASH_AT_MISMATCH 0x40
-#define SFLASH_AT_ID_MASK 0x38
-#define SFLASH_AT_ID_SHIFT 3
-
-/*
- * These are the UART port assignments, expressed as offsets from the base
- * register. These assignments should hold for any serial port based on
- * a 8250, 16450, or 16550(A).
- */
-
-#define UART_RX 0 /* In: Receive buffer (DLAB=0) */
-#define UART_TX 0 /* Out: Transmit buffer (DLAB=0) */
-#define UART_DLL 0 /* Out: Divisor Latch Low (DLAB=1) */
-#define UART_IER 1 /* In/Out: Interrupt Enable Register (DLAB=0) */
-#define UART_DLM 1 /* Out: Divisor Latch High (DLAB=1) */
-#define UART_IIR 2 /* In: Interrupt Identity Register */
-#define UART_FCR 2 /* Out: FIFO Control Register */
-#define UART_LCR 3 /* Out: Line Control Register */
-#define UART_MCR 4 /* Out: Modem Control Register */
-#define UART_LSR 5 /* In: Line Status Register */
-#define UART_MSR 6 /* In: Modem Status Register */
-#define UART_SCR 7 /* I/O: Scratch Register */
-#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
-#define UART_LCR_WLEN8 0x03 /* Word length: 8 bits */
-#define UART_MCR_OUT2 0x08 /* MCR GPIO out 2 */
-#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */
-#define UART_LSR_RX_FIFO 0x80 /* Receive FIFO error */
-#define UART_LSR_TDHR 0x40 /* Data-hold-register empty */
-#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */
-#define UART_LSR_BREAK 0x10 /* Break interrupt */
-#define UART_LSR_FRAMING 0x08 /* Framing error */
-#define UART_LSR_PARITY 0x04 /* Parity error */
-#define UART_LSR_OVERRUN 0x02 /* Overrun error */
-#define UART_LSR_RXRDY 0x01 /* Receiver ready */
-#define UART_FCR_FIFO_ENABLE 1 /* FIFO control register bit controlling FIFO enable/disable */
-
-/* Interrupt Identity Register (IIR) bits */
-#define UART_IIR_FIFO_MASK 0xc0 /* IIR FIFO disable/enabled mask */
-#define UART_IIR_INT_MASK 0xf /* IIR interrupt ID source */
-#define UART_IIR_MDM_CHG 0x0 /* Modem status changed */
-#define UART_IIR_NOINT 0x1 /* No interrupt pending */
-#define UART_IIR_THRE 0x2 /* THR empty */
-#define UART_IIR_RCVD_DATA 0x4 /* Received data available */
-#define UART_IIR_RCVR_STATUS 0x6 /* Receiver status */
-#define UART_IIR_CHAR_TIME 0xc /* Character time */
-
-/* Interrupt Enable Register (IER) bits */
-#define UART_IER_EDSSI 8 /* enable modem status interrupt */
-#define UART_IER_ELSI 4 /* enable receiver line status interrupt */
-#define UART_IER_ETBEI 2 /* enable transmitter holding register empty interrupt */
-#define UART_IER_ERBFI 1 /* enable data available interrupt */
-
-/* pmustatus */
-#define PST_EXTLPOAVAIL 0x0100
-#define PST_WDRESET 0x0080
-#define PST_INTPEND 0x0040
-#define PST_SBCLKST 0x0030
-#define PST_SBCLKST_ILP 0x0010
-#define PST_SBCLKST_ALP 0x0020
-#define PST_SBCLKST_HT 0x0030
-#define PST_ALPAVAIL 0x0008
-#define PST_HTAVAIL 0x0004
-#define PST_RESINIT 0x0003
-
-/* pmucapabilities */
-#define PCAP_REV_MASK 0x000000ff
-#define PCAP_RC_MASK 0x00001f00
-#define PCAP_RC_SHIFT 8
-#define PCAP_TC_MASK 0x0001e000
-#define PCAP_TC_SHIFT 13
-#define PCAP_PC_MASK 0x001e0000
-#define PCAP_PC_SHIFT 17
-#define PCAP_VC_MASK 0x01e00000
-#define PCAP_VC_SHIFT 21
-#define PCAP_CC_MASK 0x1e000000
-#define PCAP_CC_SHIFT 25
-#define PCAP5_PC_MASK 0x003e0000 /* PMU corerev >= 5 */
-#define PCAP5_PC_SHIFT 17
-#define PCAP5_VC_MASK 0x07c00000
-#define PCAP5_VC_SHIFT 22
-#define PCAP5_CC_MASK 0xf8000000
-#define PCAP5_CC_SHIFT 27
-
-/* PMU Resource Request Timer registers */
-/* This is based on PmuRev0 */
-#define PRRT_TIME_MASK 0x03ff
-#define PRRT_INTEN 0x0400
-#define PRRT_REQ_ACTIVE 0x0800
-#define PRRT_ALP_REQ 0x1000
-#define PRRT_HT_REQ 0x2000
-
-/* PMU resource bit position */
-#define PMURES_BIT(bit) (1 << (bit))
-
-/* PMU resource number limit */
-#define PMURES_MAX_RESNUM 30
-
-/* PMU chip control0 register */
-#define PMU_CHIPCTL0 0
-
-/* PMU chip control1 register */
-#define PMU_CHIPCTL1 1
-#define PMU_CC1_RXC_DLL_BYPASS 0x00010000
-
-#define PMU_CC1_IF_TYPE_MASK 0x00000030
-#define PMU_CC1_IF_TYPE_RMII 0x00000000
-#define PMU_CC1_IF_TYPE_MII 0x00000010
-#define PMU_CC1_IF_TYPE_RGMII 0x00000020
-
-#define PMU_CC1_SW_TYPE_MASK 0x000000c0
-#define PMU_CC1_SW_TYPE_EPHY 0x00000000
-#define PMU_CC1_SW_TYPE_EPHYMII 0x00000040
-#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080
-#define PMU_CC1_SW_TYPE_RGMII 0x000000c0
-
-/* PMU corerev and chip specific PLL controls.
- * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary number
- * to differentiate different PLLs controlled by the same PMU rev.
- */
-/* pllcontrol registers */
-/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */
-#define PMU0_PLL0_PLLCTL0 0
-#define PMU0_PLL0_PC0_PDIV_MASK 1
-#define PMU0_PLL0_PC0_PDIV_FREQ 25000
-#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038
-#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3
-#define PMU0_PLL0_PC0_DIV_ARM_BASE 8
-
-/* PC0_DIV_ARM for PLLOUT_ARM */
-#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0
-#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1
-#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2
-#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3 /* Default */
-#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4
-#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5
-#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6
-#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7
-
-/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */
-#define PMU0_PLL0_PLLCTL1 1
-#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000
-#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28
-#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00
-#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8
-#define PMU0_PLL0_PC1_STOP_MOD 0x00000040
-
-/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */
-#define PMU0_PLL0_PLLCTL2 2
-#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf
-#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4
-
-/* pllcontrol registers */
-/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
-#define PMU1_PLL0_PLLCTL0 0
-#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000
-#define PMU1_PLL0_PC0_P1DIV_SHIFT 20
-#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000
-#define PMU1_PLL0_PC0_P2DIV_SHIFT 24
-
-/* m<x>div */
-#define PMU1_PLL0_PLLCTL1 1
-#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff
-#define PMU1_PLL0_PC1_M1DIV_SHIFT 0
-#define PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00
-#define PMU1_PLL0_PC1_M2DIV_SHIFT 8
-#define PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000
-#define PMU1_PLL0_PC1_M3DIV_SHIFT 16
-#define PMU1_PLL0_PC1_M4DIV_MASK 0xff000000
-#define PMU1_PLL0_PC1_M4DIV_SHIFT 24
-
-#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
-#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
-#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
-
-/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
-#define PMU1_PLL0_PLLCTL2 2
-#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff
-#define PMU1_PLL0_PC2_M5DIV_SHIFT 0
-#define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00
-#define PMU1_PLL0_PC2_M6DIV_SHIFT 8
-#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000
-#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17
-#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1
-#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 /* recommended for 4319 */
-#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000
-#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20
-
-/* ndiv_frac */
-#define PMU1_PLL0_PLLCTL3 3
-#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff
-#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0
-
-/* pll_ctrl */
-#define PMU1_PLL0_PLLCTL4 4
-
-/* pll_ctrl, vco_rng, clkdrive_ch<x> */
-#define PMU1_PLL0_PLLCTL5 5
-#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00
-#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8
-
-/* PMU rev 2 control words */
-#define PMU2_PHY_PLL_PLLCTL 4
-#define PMU2_SI_PLL_PLLCTL 10
-
-/* PMU rev 2 */
-/* pllcontrol registers */
-/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
-#define PMU2_PLL_PLLCTL0 0
-#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000
-#define PMU2_PLL_PC0_P1DIV_SHIFT 20
-#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000
-#define PMU2_PLL_PC0_P2DIV_SHIFT 24
-
-/* m<x>div */
-#define PMU2_PLL_PLLCTL1 1
-#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff
-#define PMU2_PLL_PC1_M1DIV_SHIFT 0
-#define PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00
-#define PMU2_PLL_PC1_M2DIV_SHIFT 8
-#define PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000
-#define PMU2_PLL_PC1_M3DIV_SHIFT 16
-#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000
-#define PMU2_PLL_PC1_M4DIV_SHIFT 24
-
-/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
-#define PMU2_PLL_PLLCTL2 2
-#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff
-#define PMU2_PLL_PC2_M5DIV_SHIFT 0
-#define PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00
-#define PMU2_PLL_PC2_M6DIV_SHIFT 8
-#define PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000
-#define PMU2_PLL_PC2_NDIV_MODE_SHIFT 17
-#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000
-#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20
-
-/* ndiv_frac */
-#define PMU2_PLL_PLLCTL3 3
-#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff
-#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0
-
-/* pll_ctrl */
-#define PMU2_PLL_PLLCTL4 4
-
-/* pll_ctrl, vco_rng, clkdrive_ch<x> */
-#define PMU2_PLL_PLLCTL5 5
-#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00
-#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8
-#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000
-#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12
-#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000
-#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16
-#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000
-#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20
-#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000
-#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24
-#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000
-#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28
-
-/* PMU rev 5 (& 6) */
-#define PMU5_PLL_P1P2_OFF 0
-#define PMU5_PLL_P1_MASK 0x0f000000
-#define PMU5_PLL_P1_SHIFT 24
-#define PMU5_PLL_P2_MASK 0x00f00000
-#define PMU5_PLL_P2_SHIFT 20
-#define PMU5_PLL_M14_OFF 1
-#define PMU5_PLL_MDIV_MASK 0x000000ff
-#define PMU5_PLL_MDIV_WIDTH 8
-#define PMU5_PLL_NM5_OFF 2
-#define PMU5_PLL_NDIV_MASK 0xfff00000
-#define PMU5_PLL_NDIV_SHIFT 20
-#define PMU5_PLL_NDIV_MODE_MASK 0x000e0000
-#define PMU5_PLL_NDIV_MODE_SHIFT 17
-#define PMU5_PLL_FMAB_OFF 3
-#define PMU5_PLL_MRAT_MASK 0xf0000000
-#define PMU5_PLL_MRAT_SHIFT 28
-#define PMU5_PLL_ABRAT_MASK 0x08000000
-#define PMU5_PLL_ABRAT_SHIFT 27
-#define PMU5_PLL_FDIV_MASK 0x07ffffff
-#define PMU5_PLL_PLLCTL_OFF 4
-#define PMU5_PLL_PCHI_OFF 5
-#define PMU5_PLL_PCHI_MASK 0x0000003f
-
-/* pmu XtalFreqRatio */
-#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF
-#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000
-#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31
-
-/* Divider allocation in 4716/47162/5356/5357 */
-#define PMU5_MAINPLL_CPU 1
-#define PMU5_MAINPLL_MEM 2
-#define PMU5_MAINPLL_SI 3
-
-#define PMU7_PLL_PLLCTL7 7
-#define PMU7_PLL_PLLCTL8 8
-#define PMU7_PLL_PLLCTL11 11
-
-/* PLL usage in 4716/47162 */
-#define PMU4716_MAINPLL_PLL0 12
-
-/* PLL usage in 5356/5357 */
-#define PMU5356_MAINPLL_PLL0 0
-#define PMU5357_MAINPLL_PLL0 0
-
-/* 4716/47162 resources */
-#define RES4716_PROC_PLL_ON 0x00000040
-#define RES4716_PROC_HT_AVAIL 0x00000080
-
-/* 4716/4717/4718 Chip specific ChipControl register bits */
-#define CCTRL471X_I2S_PINS_ENABLE 0x0080 /* I2S pins off by default, shared with pflash */
-
-/* 5354 resources */
-#define RES5354_EXT_SWITCHER_PWM 0 /* 0x00001 */
-#define RES5354_BB_SWITCHER_PWM 1 /* 0x00002 */
-#define RES5354_BB_SWITCHER_BURST 2 /* 0x00004 */
-#define RES5354_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */
-#define RES5354_ILP_REQUEST 4 /* 0x00010 */
-#define RES5354_RADIO_SWITCHER_PWM 5 /* 0x00020 */
-#define RES5354_RADIO_SWITCHER_BURST 6 /* 0x00040 */
-#define RES5354_ROM_SWITCH 7 /* 0x00080 */
-#define RES5354_PA_REF_LDO 8 /* 0x00100 */
-#define RES5354_RADIO_LDO 9 /* 0x00200 */
-#define RES5354_AFE_LDO 10 /* 0x00400 */
-#define RES5354_PLL_LDO 11 /* 0x00800 */
-#define RES5354_BG_FILTBYP 12 /* 0x01000 */
-#define RES5354_TX_FILTBYP 13 /* 0x02000 */
-#define RES5354_RX_FILTBYP 14 /* 0x04000 */
-#define RES5354_XTAL_PU 15 /* 0x08000 */
-#define RES5354_XTAL_EN 16 /* 0x10000 */
-#define RES5354_BB_PLL_FILTBYP 17 /* 0x20000 */
-#define RES5354_RF_PLL_FILTBYP 18 /* 0x40000 */
-#define RES5354_BB_PLL_PU 19 /* 0x80000 */
-
-/* 5357 Chip specific ChipControl register bits */
-#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */
-#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */
-
-/* 4328 resources */
-#define RES4328_EXT_SWITCHER_PWM 0 /* 0x00001 */
-#define RES4328_BB_SWITCHER_PWM 1 /* 0x00002 */
-#define RES4328_BB_SWITCHER_BURST 2 /* 0x00004 */
-#define RES4328_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */
-#define RES4328_ILP_REQUEST 4 /* 0x00010 */
-#define RES4328_RADIO_SWITCHER_PWM 5 /* 0x00020 */
-#define RES4328_RADIO_SWITCHER_BURST 6 /* 0x00040 */
-#define RES4328_ROM_SWITCH 7 /* 0x00080 */
-#define RES4328_PA_REF_LDO 8 /* 0x00100 */
-#define RES4328_RADIO_LDO 9 /* 0x00200 */
-#define RES4328_AFE_LDO 10 /* 0x00400 */
-#define RES4328_PLL_LDO 11 /* 0x00800 */
-#define RES4328_BG_FILTBYP 12 /* 0x01000 */
-#define RES4328_TX_FILTBYP 13 /* 0x02000 */
-#define RES4328_RX_FILTBYP 14 /* 0x04000 */
-#define RES4328_XTAL_PU 15 /* 0x08000 */
-#define RES4328_XTAL_EN 16 /* 0x10000 */
-#define RES4328_BB_PLL_FILTBYP 17 /* 0x20000 */
-#define RES4328_RF_PLL_FILTBYP 18 /* 0x40000 */
-#define RES4328_BB_PLL_PU 19 /* 0x80000 */
-
-/* 4325 A0/A1 resources */
-#define RES4325_BUCK_BOOST_BURST 0 /* 0x00000001 */
-#define RES4325_CBUCK_BURST 1 /* 0x00000002 */
-#define RES4325_CBUCK_PWM 2 /* 0x00000004 */
-#define RES4325_CLDO_CBUCK_BURST 3 /* 0x00000008 */
-#define RES4325_CLDO_CBUCK_PWM 4 /* 0x00000010 */
-#define RES4325_BUCK_BOOST_PWM 5 /* 0x00000020 */
-#define RES4325_ILP_REQUEST 6 /* 0x00000040 */
-#define RES4325_ABUCK_BURST 7 /* 0x00000080 */
-#define RES4325_ABUCK_PWM 8 /* 0x00000100 */
-#define RES4325_LNLDO1_PU 9 /* 0x00000200 */
-#define RES4325_OTP_PU 10 /* 0x00000400 */
-#define RES4325_LNLDO3_PU 11 /* 0x00000800 */
-#define RES4325_LNLDO4_PU 12 /* 0x00001000 */
-#define RES4325_XTAL_PU 13 /* 0x00002000 */
-#define RES4325_ALP_AVAIL 14 /* 0x00004000 */
-#define RES4325_RX_PWRSW_PU 15 /* 0x00008000 */
-#define RES4325_TX_PWRSW_PU 16 /* 0x00010000 */
-#define RES4325_RFPLL_PWRSW_PU 17 /* 0x00020000 */
-#define RES4325_LOGEN_PWRSW_PU 18 /* 0x00040000 */
-#define RES4325_AFE_PWRSW_PU 19 /* 0x00080000 */
-#define RES4325_BBPLL_PWRSW_PU 20 /* 0x00100000 */
-#define RES4325_HT_AVAIL 21 /* 0x00200000 */
-
-/* 4325 B0/C0 resources */
-#define RES4325B0_CBUCK_LPOM 1 /* 0x00000002 */
-#define RES4325B0_CBUCK_BURST 2 /* 0x00000004 */
-#define RES4325B0_CBUCK_PWM 3 /* 0x00000008 */
-#define RES4325B0_CLDO_PU 4 /* 0x00000010 */
-
-/* 4325 C1 resources */
-#define RES4325C1_LNLDO2_PU 12 /* 0x00001000 */
-
-/* 4325 chip-specific ChipStatus register bits */
-#define CST4325_SPROM_OTP_SEL_MASK 0x00000003
-#define CST4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
-#define CST4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
-#define CST4325_OTP_SEL 2 /* OTP is powered up, no SPROM */
-#define CST4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
-#define CST4325_SDIO_USB_MODE_MASK 0x00000004
-#define CST4325_SDIO_USB_MODE_SHIFT 2
-#define CST4325_RCAL_VALID_MASK 0x00000008
-#define CST4325_RCAL_VALID_SHIFT 3
-#define CST4325_RCAL_VALUE_MASK 0x000001f0
-#define CST4325_RCAL_VALUE_SHIFT 4
-#define CST4325_PMUTOP_2B_MASK 0x00000200 /* 1 for 2b, 0 for to 2a */
-#define CST4325_PMUTOP_2B_SHIFT 9
-
-#define RES4329_RESERVED0 0 /* 0x00000001 */
-#define RES4329_CBUCK_LPOM 1 /* 0x00000002 */
-#define RES4329_CBUCK_BURST 2 /* 0x00000004 */
-#define RES4329_CBUCK_PWM 3 /* 0x00000008 */
-#define RES4329_CLDO_PU 4 /* 0x00000010 */
-#define RES4329_PALDO_PU 5 /* 0x00000020 */
-#define RES4329_ILP_REQUEST 6 /* 0x00000040 */
-#define RES4329_RESERVED7 7 /* 0x00000080 */
-#define RES4329_RESERVED8 8 /* 0x00000100 */
-#define RES4329_LNLDO1_PU 9 /* 0x00000200 */
-#define RES4329_OTP_PU 10 /* 0x00000400 */
-#define RES4329_RESERVED11 11 /* 0x00000800 */
-#define RES4329_LNLDO2_PU 12 /* 0x00001000 */
-#define RES4329_XTAL_PU 13 /* 0x00002000 */
-#define RES4329_ALP_AVAIL 14 /* 0x00004000 */
-#define RES4329_RX_PWRSW_PU 15 /* 0x00008000 */
-#define RES4329_TX_PWRSW_PU 16 /* 0x00010000 */
-#define RES4329_RFPLL_PWRSW_PU 17 /* 0x00020000 */
-#define RES4329_LOGEN_PWRSW_PU 18 /* 0x00040000 */
-#define RES4329_AFE_PWRSW_PU 19 /* 0x00080000 */
-#define RES4329_BBPLL_PWRSW_PU 20 /* 0x00100000 */
-#define RES4329_HT_AVAIL 21 /* 0x00200000 */
-
-#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
-#define CST4329_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
-#define CST4329_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
-#define CST4329_OTP_SEL 2 /* OTP is powered up, no SPROM */
-#define CST4329_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
-#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
-#define CST4329_SPI_SDIO_MODE_SHIFT 2
-
-/* 4312 chip-specific ChipStatus register bits */
-#define CST4312_SPROM_OTP_SEL_MASK 0x00000003
-#define CST4312_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
-#define CST4312_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
-#define CST4312_OTP_SEL 2 /* OTP is powered up, no SPROM */
-#define CST4312_OTP_BAD 3 /* OTP is broken, SPROM is present */
-
-/* 4312 resources (all PMU chips with little memory constraint) */
-#define RES4312_SWITCHER_BURST 0 /* 0x00000001 */
-#define RES4312_SWITCHER_PWM 1 /* 0x00000002 */
-#define RES4312_PA_REF_LDO 2 /* 0x00000004 */
-#define RES4312_CORE_LDO_BURST 3 /* 0x00000008 */
-#define RES4312_CORE_LDO_PWM 4 /* 0x00000010 */
-#define RES4312_RADIO_LDO 5 /* 0x00000020 */
-#define RES4312_ILP_REQUEST 6 /* 0x00000040 */
-#define RES4312_BG_FILTBYP 7 /* 0x00000080 */
-#define RES4312_TX_FILTBYP 8 /* 0x00000100 */
-#define RES4312_RX_FILTBYP 9 /* 0x00000200 */
-#define RES4312_XTAL_PU 10 /* 0x00000400 */
-#define RES4312_ALP_AVAIL 11 /* 0x00000800 */
-#define RES4312_BB_PLL_FILTBYP 12 /* 0x00001000 */
-#define RES4312_RF_PLL_FILTBYP 13 /* 0x00002000 */
-#define RES4312_HT_AVAIL 14 /* 0x00004000 */
-
-/* 4322 resources */
-#define RES4322_RF_LDO 0
-#define RES4322_ILP_REQUEST 1
-#define RES4322_XTAL_PU 2
-#define RES4322_ALP_AVAIL 3
-#define RES4322_SI_PLL_ON 4
-#define RES4322_HT_SI_AVAIL 5
-#define RES4322_PHY_PLL_ON 6
-#define RES4322_HT_PHY_AVAIL 7
-#define RES4322_OTP_PU 8
-
-/* 4322 chip-specific ChipStatus register bits */
-#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020
-#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0
-#define CST4322_SPROM_OTP_SEL_SHIFT 6
-#define CST4322_NO_SPROM_OTP 0 /* no OTP, no SPROM */
-#define CST4322_SPROM_PRESENT 1 /* SPROM is present */
-#define CST4322_OTP_PRESENT 2 /* OTP is present */
-#define CST4322_PCI_OR_USB 0x00000100
-#define CST4322_BOOT_MASK 0x00000600
-#define CST4322_BOOT_SHIFT 9
-#define CST4322_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
-#define CST4322_BOOT_FROM_ROM 1 /* boot from ROM */
-#define CST4322_BOOT_FROM_FLASH 2 /* boot from FLASH */
-#define CST4322_BOOT_FROM_INVALID 3
-#define CST4322_ILP_DIV_EN 0x00000800
-#define CST4322_FLASH_TYPE_MASK 0x00001000
-#define CST4322_FLASH_TYPE_SHIFT 12
-#define CST4322_FLASH_TYPE_SHIFT_ST 0 /* ST serial FLASH */
-#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 /* ATMEL flash */
-#define CST4322_ARM_TAP_SEL 0x00002000
-#define CST4322_RES_INIT_MODE_MASK 0x0000c000
-#define CST4322_RES_INIT_MODE_SHIFT 14
-#define CST4322_RES_INIT_MODE_ILPAVAIL 0 /* resinitmode: ILP available */
-#define CST4322_RES_INIT_MODE_ILPREQ 1 /* resinitmode: ILP request */
-#define CST4322_RES_INIT_MODE_ALPAVAIL 2 /* resinitmode: ALP available */
-#define CST4322_RES_INIT_MODE_HTAVAIL 3 /* resinitmode: HT available */
-#define CST4322_PCIPLLCLK_GATING 0x00010000
-#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000
-#define CST4322_PCI_CARDBUS_MODE 0x00040000
-
-/* 43224 chip-specific ChipControl register bits */
-#define CCTRL43224_GPIO_TOGGLE 0x8000
-#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */
-#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */
-
-/* 43236 resources */
-#define RES43236_REGULATOR 0
-#define RES43236_ILP_REQUEST 1
-#define RES43236_XTAL_PU 2
-#define RES43236_ALP_AVAIL 3
-#define RES43236_SI_PLL_ON 4
-#define RES43236_HT_SI_AVAIL 5
-
-/* 43236 chip-specific ChipControl register bits */
-#define CCTRL43236_BT_COEXIST (1<<0) /* 0 disable */
-#define CCTRL43236_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */
-#define CCTRL43236_EXT_LNA (1<<2) /* 0 disable */
-#define CCTRL43236_ANT_MUX_2o3 (1<<3) /* 2o3 mux, chipcontrol bit 3 */
-#define CCTRL43236_GSIO (1<<4) /* 0 disable */
-
-/* 43236 Chip specific ChipStatus register bits */
-#define CST43236_SFLASH_MASK 0x00000040
-#define CST43236_OTP_MASK 0x00000080
-#define CST43236_HSIC_MASK 0x00000100 /* USB/HSIC */
-#define CST43236_BP_CLK 0x00000200 /* 120/96Mbps */
-#define CST43236_BOOT_MASK 0x00001800
-#define CST43236_BOOT_SHIFT 11
-#define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
-#define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */
-#define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */
-#define CST43236_BOOT_FROM_INVALID 3
-
-/* 4331 resources */
-#define RES4331_REGULATOR 0
-#define RES4331_ILP_REQUEST 1
-#define RES4331_XTAL_PU 2
-#define RES4331_ALP_AVAIL 3
-#define RES4331_SI_PLL_ON 4
-#define RES4331_HT_SI_AVAIL 5
-
-/* 4331 chip-specific ChipControl register bits */
-#define CCTRL4331_BT_COEXIST (1<<0) /* 0 disable */
-#define CCTRL4331_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */
-#define CCTRL4331_EXT_LNA (1<<2) /* 0 disable */
-#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /* sprom/gpio13-15 mux */
-#define CCTRL4331_EXTPA_EN (1<<4) /* 0 ext pa disable, 1 ext pa enabled */
-#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /* set drive out GPIO_CLK on sprom_cs pin */
-#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /* use sprom_cs pin as PCIE mdio interface */
-#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) /* aband extpa will be at gpio2/5 and sprom_dout */
-#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /* override core control on pipe_AuxClkEnable */
-#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /* override core control on pipe_AuxPowerDown */
-#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /* pcie_auxclkenable */
-#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /* pcie_pipe_pllpowerdown */
-#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /* enable bt_shd0 at gpio4 */
-#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /* enable bt_shd1 at gpio5 */
-
-/* 4331 Chip specific ChipStatus register bits */
-#define CST4331_XTAL_FREQ 0x00000001 /* crystal frequency 20/40Mhz */
-#define CST4331_SPROM_PRESENT 0x00000002
-#define CST4331_OTP_PRESENT 0x00000004
-#define CST4331_LDO_RF 0x00000008
-#define CST4331_LDO_PAR 0x00000010
-
-/* 4315 resources */
-#define RES4315_CBUCK_LPOM 1 /* 0x00000002 */
-#define RES4315_CBUCK_BURST 2 /* 0x00000004 */
-#define RES4315_CBUCK_PWM 3 /* 0x00000008 */
-#define RES4315_CLDO_PU 4 /* 0x00000010 */
-#define RES4315_PALDO_PU 5 /* 0x00000020 */
-#define RES4315_ILP_REQUEST 6 /* 0x00000040 */
-#define RES4315_LNLDO1_PU 9 /* 0x00000200 */
-#define RES4315_OTP_PU 10 /* 0x00000400 */
-#define RES4315_LNLDO2_PU 12 /* 0x00001000 */
-#define RES4315_XTAL_PU 13 /* 0x00002000 */
-#define RES4315_ALP_AVAIL 14 /* 0x00004000 */
-#define RES4315_RX_PWRSW_PU 15 /* 0x00008000 */
-#define RES4315_TX_PWRSW_PU 16 /* 0x00010000 */
-#define RES4315_RFPLL_PWRSW_PU 17 /* 0x00020000 */
-#define RES4315_LOGEN_PWRSW_PU 18 /* 0x00040000 */
-#define RES4315_AFE_PWRSW_PU 19 /* 0x00080000 */
-#define RES4315_BBPLL_PWRSW_PU 20 /* 0x00100000 */
-#define RES4315_HT_AVAIL 21 /* 0x00200000 */
-
-/* 4315 chip-specific ChipStatus register bits */
-#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 /* gpio [7:6], SDIO CIS selection */
-#define CST4315_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */
-#define CST4315_SPROM_SEL 0x00000001 /* use SPROM, OTP is powered up */
-#define CST4315_OTP_SEL 0x00000002 /* use OTP, OTP is powered up */
-#define CST4315_OTP_PWRDN 0x00000003 /* use SPROM, OTP is powered down */
-#define CST4315_SDIO_MODE 0x00000004 /* gpio [8], sdio/usb mode */
-#define CST4315_RCAL_VALID 0x00000008
-#define CST4315_RCAL_VALUE_MASK 0x000001f0
-#define CST4315_RCAL_VALUE_SHIFT 4
-#define CST4315_PALDO_EXTPNP 0x00000200 /* PALDO is configured with external PNP */
-#define CST4315_CBUCK_MODE_MASK 0x00000c00
-#define CST4315_CBUCK_MODE_BURST 0x00000400
-#define CST4315_CBUCK_MODE_LPBURST 0x00000c00
-
-/* 4319 resources */
-#define RES4319_CBUCK_LPOM 1 /* 0x00000002 */
-#define RES4319_CBUCK_BURST 2 /* 0x00000004 */
-#define RES4319_CBUCK_PWM 3 /* 0x00000008 */
-#define RES4319_CLDO_PU 4 /* 0x00000010 */
-#define RES4319_PALDO_PU 5 /* 0x00000020 */
-#define RES4319_ILP_REQUEST 6 /* 0x00000040 */
-#define RES4319_LNLDO1_PU 9 /* 0x00000200 */
-#define RES4319_OTP_PU 10 /* 0x00000400 */
-#define RES4319_LNLDO2_PU 12 /* 0x00001000 */
-#define RES4319_XTAL_PU 13 /* 0x00002000 */
-#define RES4319_ALP_AVAIL 14 /* 0x00004000 */
-#define RES4319_RX_PWRSW_PU 15 /* 0x00008000 */
-#define RES4319_TX_PWRSW_PU 16 /* 0x00010000 */
-#define RES4319_RFPLL_PWRSW_PU 17 /* 0x00020000 */
-#define RES4319_LOGEN_PWRSW_PU 18 /* 0x00040000 */
-#define RES4319_AFE_PWRSW_PU 19 /* 0x00080000 */
-#define RES4319_BBPLL_PWRSW_PU 20 /* 0x00100000 */
-#define RES4319_HT_AVAIL 21 /* 0x00200000 */
-
-/* 4319 chip-specific ChipStatus register bits */
-#define CST4319_SPI_CPULESSUSB 0x00000001
-#define CST4319_SPI_CLK_POL 0x00000002
-#define CST4319_SPI_CLK_PH 0x00000008
-#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /* gpio [7:6], SDIO CIS selection */
-#define CST4319_SPROM_OTP_SEL_SHIFT 6
-#define CST4319_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */
-#define CST4319_SPROM_SEL 0x00000040 /* use SPROM, OTP is powered up */
-#define CST4319_OTP_SEL 0x00000080 /* use OTP, OTP is powered up */
-#define CST4319_OTP_PWRDN 0x000000c0 /* use SPROM, OTP is powered down */
-#define CST4319_SDIO_USB_MODE 0x00000100 /* gpio [8], sdio/usb mode */
-#define CST4319_REMAP_SEL_MASK 0x00000600
-#define CST4319_ILPDIV_EN 0x00000800
-#define CST4319_XTAL_PD_POL 0x00001000
-#define CST4319_LPO_SEL 0x00002000
-#define CST4319_RES_INIT_MODE 0x0000c000
-#define CST4319_PALDO_EXTPNP 0x00010000 /* PALDO is configured with external PNP */
-#define CST4319_CBUCK_MODE_MASK 0x00060000
-#define CST4319_CBUCK_MODE_BURST 0x00020000
-#define CST4319_CBUCK_MODE_LPBURST 0x00060000
-#define CST4319_RCAL_VALID 0x01000000
-#define CST4319_RCAL_VALUE_MASK 0x3e000000
-#define CST4319_RCAL_VALUE_SHIFT 25
-
-#define PMU1_PLL0_CHIPCTL0 0
-#define PMU1_PLL0_CHIPCTL1 1
-#define PMU1_PLL0_CHIPCTL2 2
-#define CCTL_4319USB_XTAL_SEL_MASK 0x00180000
-#define CCTL_4319USB_XTAL_SEL_SHIFT 19
-#define CCTL_4319USB_48MHZ_PLL_SEL 1
-#define CCTL_4319USB_24MHZ_PLL_SEL 2
-
-/* PMU resources for 4336 */
-#define RES4336_CBUCK_LPOM 0
-#define RES4336_CBUCK_BURST 1
-#define RES4336_CBUCK_LP_PWM 2
-#define RES4336_CBUCK_PWM 3
-#define RES4336_CLDO_PU 4
-#define RES4336_DIS_INT_RESET_PD 5
-#define RES4336_ILP_REQUEST 6
-#define RES4336_LNLDO_PU 7
-#define RES4336_LDO3P3_PU 8
-#define RES4336_OTP_PU 9
-#define RES4336_XTAL_PU 10
-#define RES4336_ALP_AVAIL 11
-#define RES4336_RADIO_PU 12
-#define RES4336_BG_PU 13
-#define RES4336_VREG1p4_PU_PU 14
-#define RES4336_AFE_PWRSW_PU 15
-#define RES4336_RX_PWRSW_PU 16
-#define RES4336_TX_PWRSW_PU 17
-#define RES4336_BB_PWRSW_PU 18
-#define RES4336_SYNTH_PWRSW_PU 19
-#define RES4336_MISC_PWRSW_PU 20
-#define RES4336_LOGEN_PWRSW_PU 21
-#define RES4336_BBPLL_PWRSW_PU 22
-#define RES4336_MACPHY_CLKAVAIL 23
-#define RES4336_HT_AVAIL 24
-#define RES4336_RSVD 25
-
-/* 4336 chip-specific ChipStatus register bits */
-#define CST4336_SPI_MODE_MASK 0x00000001
-#define CST4336_SPROM_PRESENT 0x00000002
-#define CST4336_OTP_PRESENT 0x00000004
-#define CST4336_ARMREMAP_0 0x00000008
-#define CST4336_ILPDIV_EN_MASK 0x00000010
-#define CST4336_ILPDIV_EN_SHIFT 4
-#define CST4336_XTAL_PD_POL_MASK 0x00000020
-#define CST4336_XTAL_PD_POL_SHIFT 5
-#define CST4336_LPO_SEL_MASK 0x00000040
-#define CST4336_LPO_SEL_SHIFT 6
-#define CST4336_RES_INIT_MODE_MASK 0x00000180
-#define CST4336_RES_INIT_MODE_SHIFT 7
-#define CST4336_CBUCK_MODE_MASK 0x00000600
-#define CST4336_CBUCK_MODE_SHIFT 9
-
-/* 4330 resources */
-#define RES4330_CBUCK_LPOM 0
-#define RES4330_CBUCK_BURST 1
-#define RES4330_CBUCK_LP_PWM 2
-#define RES4330_CBUCK_PWM 3
-#define RES4330_CLDO_PU 4
-#define RES4330_DIS_INT_RESET_PD 5
-#define RES4330_ILP_REQUEST 6
-#define RES4330_LNLDO_PU 7
-#define RES4330_LDO3P3_PU 8
-#define RES4330_OTP_PU 9
-#define RES4330_XTAL_PU 10
-#define RES4330_ALP_AVAIL 11
-#define RES4330_RADIO_PU 12
-#define RES4330_BG_PU 13
-#define RES4330_VREG1p4_PU_PU 14
-#define RES4330_AFE_PWRSW_PU 15
-#define RES4330_RX_PWRSW_PU 16
-#define RES4330_TX_PWRSW_PU 17
-#define RES4330_BB_PWRSW_PU 18
-#define RES4330_SYNTH_PWRSW_PU 19
-#define RES4330_MISC_PWRSW_PU 20
-#define RES4330_LOGEN_PWRSW_PU 21
-#define RES4330_BBPLL_PWRSW_PU 22
-#define RES4330_MACPHY_CLKAVAIL 23
-#define RES4330_HT_AVAIL 24
-#define RES4330_5gRX_PWRSW_PU 25
-#define RES4330_5gTX_PWRSW_PU 26
-#define RES4330_5g_LOGEN_PWRSW_PU 27
-
-/* 4330 chip-specific ChipStatus register bits */
-#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /* SDIO || gSPI */
-#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /* USB || USBDA */
-#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /* SDIO */
-#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /* gSPI */
-#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /* USB packet-oriented */
-#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /* USB Direct Access */
-#define CST4330_OTP_PRESENT 0x00000010
-#define CST4330_LPO_AUTODET_EN 0x00000020
-#define CST4330_ARMREMAP_0 0x00000040
-#define CST4330_SPROM_PRESENT 0x00000080 /* takes priority over OTP if both set */
-#define CST4330_ILPDIV_EN 0x00000100
-#define CST4330_LPO_SEL 0x00000200
-#define CST4330_RES_INIT_MODE_SHIFT 10
-#define CST4330_RES_INIT_MODE_MASK 0x00000c00
-#define CST4330_CBUCK_MODE_SHIFT 12
-#define CST4330_CBUCK_MODE_MASK 0x00003000
-#define CST4330_CBUCK_POWER_OK 0x00004000
-#define CST4330_BB_PLL_LOCKED 0x00008000
-#define SOCDEVRAM_4330_BP_ADDR 0x1E000000
-#define SOCDEVRAM_4330_ARM_ADDR 0x00800000
-
-/* 4313 resources */
-#define RES4313_BB_PU_RSRC 0
-#define RES4313_ILP_REQ_RSRC 1
-#define RES4313_XTAL_PU_RSRC 2
-#define RES4313_ALP_AVAIL_RSRC 3
-#define RES4313_RADIO_PU_RSRC 4
-#define RES4313_BG_PU_RSRC 5
-#define RES4313_VREG1P4_PU_RSRC 6
-#define RES4313_AFE_PWRSW_RSRC 7
-#define RES4313_RX_PWRSW_RSRC 8
-#define RES4313_TX_PWRSW_RSRC 9
-#define RES4313_BB_PWRSW_RSRC 10
-#define RES4313_SYNTH_PWRSW_RSRC 11
-#define RES4313_MISC_PWRSW_RSRC 12
-#define RES4313_BB_PLL_PWRSW_RSRC 13
-#define RES4313_HT_AVAIL_RSRC 14
-#define RES4313_MACPHY_CLK_AVAIL_RSRC 15
-
-/* 4313 chip-specific ChipStatus register bits */
-#define CST4313_SPROM_PRESENT 1
-#define CST4313_OTP_PRESENT 2
-#define CST4313_SPROM_OTP_SEL_MASK 0x00000002
-#define CST4313_SPROM_OTP_SEL_SHIFT 0
-
-/* 4313 Chip specific ChipControl register bits */
-#define CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */
-
-/* 43228 resources */
-#define RES43228_NOT_USED 0
-#define RES43228_ILP_REQUEST 1
-#define RES43228_XTAL_PU 2
-#define RES43228_ALP_AVAIL 3
-#define RES43228_PLL_EN 4
-#define RES43228_HT_PHY_AVAIL 5
-
-/* 43228 chipstatus reg bits */
-#define CST43228_ILP_DIV_EN 0x1
-#define CST43228_OTP_PRESENT 0x2
-#define CST43228_SERDES_REFCLK_PADSEL 0x4
-#define CST43228_SDIO_MODE 0x8
-
-#define CST43228_SDIO_OTP_PRESENT 0x10
-#define CST43228_SDIO_RESET 0x20
-
-/*
-* Maximum delay for the PMU state transition in us.
-* This is an upper bound intended for spinwaits etc.
-*/
-#define PMU_MAX_TRANSITION_DLY 15000
-
-/* PMU resource up transition time in ILP cycles */
-#define PMURES_UP_TRANSITION 2
-
-/*
-* Register eci_inputlo bitfield values.
-* - BT packet type information bits [7:0]
-*/
-/* [3:0] - Task (link) type */
-#define BT_ACL 0x00
-#define BT_SCO 0x01
-#define BT_eSCO 0x02
-#define BT_A2DP 0x03
-#define BT_SNIFF 0x04
-#define BT_PAGE_SCAN 0x05
-#define BT_INQUIRY_SCAN 0x06
-#define BT_PAGE 0x07
-#define BT_INQUIRY 0x08
-#define BT_MSS 0x09
-#define BT_PARK 0x0a
-#define BT_RSSISCAN 0x0b
-#define BT_MD_ACL 0x0c
-#define BT_MD_eSCO 0x0d
-#define BT_SCAN_WITH_SCO_LINK 0x0e
-#define BT_SCAN_WITHOUT_SCO_LINK 0x0f
-/* [7:4] = packet duration code */
-/* [8] - Master / Slave */
-#define BT_MASTER 0
-#define BT_SLAVE 1
-/* [11:9] - multi-level priority */
-#define BT_LOWEST_PRIO 0x0
-#define BT_HIGHEST_PRIO 0x3
-
-/* WLAN - number of antenna */
-#define WLAN_NUM_ANT1 TXANT_0
-#define WLAN_NUM_ANT2 TXANT_1
-
-#endif /* _SBCHIPC_H */
diff --git a/drivers/staging/brcm80211/include/sbconfig.h b/drivers/staging/brcm80211/include/sbconfig.h
deleted file mode 100644
index 5247f01ec36..00000000000
--- a/drivers/staging/brcm80211/include/sbconfig.h
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _SBCONFIG_H
-#define _SBCONFIG_H
-
-/* cpp contortions to concatenate w/arg prescan */
-#ifndef PAD
-#define _PADLINE(line) pad ## line
-#define _XSTR(line) _PADLINE(line)
-#define PAD _XSTR(__LINE__)
-#endif
-
-/* enumeration in SB is based on the premise that cores are contiguos in the
- * enumeration space.
- */
-#define SB_BUS_SIZE 0x10000 /* Each bus gets 64Kbytes for cores */
-#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE)
-#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /* Max cores per bus */
-
-/*
- * Sonics Configuration Space Registers.
- */
-#define SBCONFIGOFF 0xf00 /* core sbconfig regs are top 256bytes of regs */
-#define SBCONFIGSIZE 256 /* sizeof (sbconfig_t) */
-
-#define SBIPSFLAG 0x08
-#define SBTPSFLAG 0x18
-#define SBTMERRLOGA 0x48 /* sonics >= 2.3 */
-#define SBTMERRLOG 0x50 /* sonics >= 2.3 */
-#define SBADMATCH3 0x60
-#define SBADMATCH2 0x68
-#define SBADMATCH1 0x70
-#define SBIMSTATE 0x90
-#define SBINTVEC 0x94
-#define SBTMSTATELOW 0x98
-#define SBTMSTATEHIGH 0x9c
-#define SBBWA0 0xa0
-#define SBIMCONFIGLOW 0xa8
-#define SBIMCONFIGHIGH 0xac
-#define SBADMATCH0 0xb0
-#define SBTMCONFIGLOW 0xb8
-#define SBTMCONFIGHIGH 0xbc
-#define SBBCONFIG 0xc0
-#define SBBSTATE 0xc8
-#define SBACTCNFG 0xd8
-#define SBFLAGST 0xe8
-#define SBIDLOW 0xf8
-#define SBIDHIGH 0xfc
-
-/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have
- * a few registers *below* that line. I think it would be very confusing to try
- * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here,
- */
-
-#define SBIMERRLOGA 0xea8
-#define SBIMERRLOG 0xeb0
-#define SBTMPORTCONNID0 0xed8
-#define SBTMPORTLOCK0 0xef8
-
-#ifndef _LANGUAGE_ASSEMBLY
-
-typedef volatile struct _sbconfig {
- u32 PAD[2];
- u32 sbipsflag; /* initiator port ocp slave flag */
- u32 PAD[3];
- u32 sbtpsflag; /* target port ocp slave flag */
- u32 PAD[11];
- u32 sbtmerrloga; /* (sonics >= 2.3) */
- u32 PAD;
- u32 sbtmerrlog; /* (sonics >= 2.3) */
- u32 PAD[3];
- u32 sbadmatch3; /* address match3 */
- u32 PAD;
- u32 sbadmatch2; /* address match2 */
- u32 PAD;
- u32 sbadmatch1; /* address match1 */
- u32 PAD[7];
- u32 sbimstate; /* initiator agent state */
- u32 sbintvec; /* interrupt mask */
- u32 sbtmstatelow; /* target state */
- u32 sbtmstatehigh; /* target state */
- u32 sbbwa0; /* bandwidth allocation table0 */
- u32 PAD;
- u32 sbimconfiglow; /* initiator configuration */
- u32 sbimconfighigh; /* initiator configuration */
- u32 sbadmatch0; /* address match0 */
- u32 PAD;
- u32 sbtmconfiglow; /* target configuration */
- u32 sbtmconfighigh; /* target configuration */
- u32 sbbconfig; /* broadcast configuration */
- u32 PAD;
- u32 sbbstate; /* broadcast state */
- u32 PAD[3];
- u32 sbactcnfg; /* activate configuration */
- u32 PAD[3];
- u32 sbflagst; /* current sbflags */
- u32 PAD[3];
- u32 sbidlow; /* identification */
- u32 sbidhigh; /* identification */
-} sbconfig_t;
-
-#endif /* _LANGUAGE_ASSEMBLY */
-
-/* sbipsflag */
-#define SBIPS_INT1_MASK 0x3f /* which sbflags get routed to mips interrupt 1 */
-#define SBIPS_INT1_SHIFT 0
-#define SBIPS_INT2_MASK 0x3f00 /* which sbflags get routed to mips interrupt 2 */
-#define SBIPS_INT2_SHIFT 8
-#define SBIPS_INT3_MASK 0x3f0000 /* which sbflags get routed to mips interrupt 3 */
-#define SBIPS_INT3_SHIFT 16
-#define SBIPS_INT4_MASK 0x3f000000 /* which sbflags get routed to mips interrupt 4 */
-#define SBIPS_INT4_SHIFT 24
-
-/* sbtpsflag */
-#define SBTPS_NUM0_MASK 0x3f /* interrupt sbFlag # generated by this core */
-#define SBTPS_F0EN0 0x40 /* interrupt is always sent on the backplane */
-
-/* sbtmerrlog */
-#define SBTMEL_CM 0x00000007 /* command */
-#define SBTMEL_CI 0x0000ff00 /* connection id */
-#define SBTMEL_EC 0x0f000000 /* error code */
-#define SBTMEL_ME 0x80000000 /* multiple error */
-
-/* sbimstate */
-#define SBIM_PC 0xf /* pipecount */
-#define SBIM_AP_MASK 0x30 /* arbitration policy */
-#define SBIM_AP_BOTH 0x00 /* use both timeslaces and token */
-#define SBIM_AP_TS 0x10 /* use timesliaces only */
-#define SBIM_AP_TK 0x20 /* use token only */
-#define SBIM_AP_RSV 0x30 /* reserved */
-#define SBIM_IBE 0x20000 /* inbanderror */
-#define SBIM_TO 0x40000 /* timeout */
-#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */
-#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */
-
-/* sbtmstatelow */
-#define SBTML_RESET 0x0001 /* reset */
-#define SBTML_REJ_MASK 0x0006 /* reject field */
-#define SBTML_REJ 0x0002 /* reject */
-#define SBTML_TMPREJ 0x0004 /* temporary reject, for error recovery */
-
-#define SBTML_SICF_SHIFT 16 /* Shift to locate the SI control flags in sbtml */
-
-/* sbtmstatehigh */
-#define SBTMH_SERR 0x0001 /* serror */
-#define SBTMH_INT 0x0002 /* interrupt */
-#define SBTMH_BUSY 0x0004 /* busy */
-#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */
-
-#define SBTMH_SISF_SHIFT 16 /* Shift to locate the SI status flags in sbtmh */
-
-/* sbbwa0 */
-#define SBBWA_TAB0_MASK 0xffff /* lookup table 0 */
-#define SBBWA_TAB1_MASK 0xffff /* lookup table 1 */
-#define SBBWA_TAB1_SHIFT 16
-
-/* sbimconfiglow */
-#define SBIMCL_STO_MASK 0x7 /* service timeout */
-#define SBIMCL_RTO_MASK 0x70 /* request timeout */
-#define SBIMCL_RTO_SHIFT 4
-#define SBIMCL_CID_MASK 0xff0000 /* connection id */
-#define SBIMCL_CID_SHIFT 16
-
-/* sbimconfighigh */
-#define SBIMCH_IEM_MASK 0xc /* inband error mode */
-#define SBIMCH_TEM_MASK 0x30 /* timeout error mode */
-#define SBIMCH_TEM_SHIFT 4
-#define SBIMCH_BEM_MASK 0xc0 /* bus error mode */
-#define SBIMCH_BEM_SHIFT 6
-
-/* sbadmatch0 */
-#define SBAM_TYPE_MASK 0x3 /* address type */
-#define SBAM_AD64 0x4 /* reserved */
-#define SBAM_ADINT0_MASK 0xf8 /* type0 size */
-#define SBAM_ADINT0_SHIFT 3
-#define SBAM_ADINT1_MASK 0x1f8 /* type1 size */
-#define SBAM_ADINT1_SHIFT 3
-#define SBAM_ADINT2_MASK 0x1f8 /* type2 size */
-#define SBAM_ADINT2_SHIFT 3
-#define SBAM_ADEN 0x400 /* enable */
-#define SBAM_ADNEG 0x800 /* negative decode */
-#define SBAM_BASE0_MASK 0xffffff00 /* type0 base address */
-#define SBAM_BASE0_SHIFT 8
-#define SBAM_BASE1_MASK 0xfffff000 /* type1 base address for the core */
-#define SBAM_BASE1_SHIFT 12
-#define SBAM_BASE2_MASK 0xffff0000 /* type2 base address for the core */
-#define SBAM_BASE2_SHIFT 16
-
-/* sbtmconfiglow */
-#define SBTMCL_CD_MASK 0xff /* clock divide */
-#define SBTMCL_CO_MASK 0xf800 /* clock offset */
-#define SBTMCL_CO_SHIFT 11
-#define SBTMCL_IF_MASK 0xfc0000 /* interrupt flags */
-#define SBTMCL_IF_SHIFT 18
-#define SBTMCL_IM_MASK 0x3000000 /* interrupt mode */
-#define SBTMCL_IM_SHIFT 24
-
-/* sbtmconfighigh */
-#define SBTMCH_BM_MASK 0x3 /* busy mode */
-#define SBTMCH_RM_MASK 0x3 /* retry mode */
-#define SBTMCH_RM_SHIFT 2
-#define SBTMCH_SM_MASK 0x30 /* stop mode */
-#define SBTMCH_SM_SHIFT 4
-#define SBTMCH_EM_MASK 0x300 /* sb error mode */
-#define SBTMCH_EM_SHIFT 8
-#define SBTMCH_IM_MASK 0xc00 /* int mode */
-#define SBTMCH_IM_SHIFT 10
-
-/* sbbconfig */
-#define SBBC_LAT_MASK 0x3 /* sb latency */
-#define SBBC_MAX0_MASK 0xf0000 /* maxccntr0 */
-#define SBBC_MAX0_SHIFT 16
-#define SBBC_MAX1_MASK 0xf00000 /* maxccntr1 */
-#define SBBC_MAX1_SHIFT 20
-
-/* sbbstate */
-#define SBBS_SRD 0x1 /* st reg disable */
-#define SBBS_HRD 0x2 /* hold reg disable */
-
-/* sbidlow */
-#define SBIDL_CS_MASK 0x3 /* config space */
-#define SBIDL_AR_MASK 0x38 /* # address ranges supported */
-#define SBIDL_AR_SHIFT 3
-#define SBIDL_SYNCH 0x40 /* sync */
-#define SBIDL_INIT 0x80 /* initiator */
-#define SBIDL_MINLAT_MASK 0xf00 /* minimum backplane latency */
-#define SBIDL_MINLAT_SHIFT 8
-#define SBIDL_MAXLAT 0xf000 /* maximum backplane latency */
-#define SBIDL_MAXLAT_SHIFT 12
-#define SBIDL_FIRST 0x10000 /* this initiator is first */
-#define SBIDL_CW_MASK 0xc0000 /* cycle counter width */
-#define SBIDL_CW_SHIFT 18
-#define SBIDL_TP_MASK 0xf00000 /* target ports */
-#define SBIDL_TP_SHIFT 20
-#define SBIDL_IP_MASK 0xf000000 /* initiator ports */
-#define SBIDL_IP_SHIFT 24
-#define SBIDL_RV_MASK 0xf0000000 /* sonics backplane revision code */
-#define SBIDL_RV_SHIFT 28
-#define SBIDL_RV_2_2 0x00000000 /* version 2.2 or earlier */
-#define SBIDL_RV_2_3 0x10000000 /* version 2.3 */
-
-/* sbidhigh */
-#define SBIDH_RC_MASK 0x000f /* revision code */
-#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */
-#define SBIDH_RCE_SHIFT 8
-#define SBCOREREV(sbidh) \
- ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
-#define SBIDH_CC_MASK 0x8ff0 /* core code */
-#define SBIDH_CC_SHIFT 4
-#define SBIDH_VC_MASK 0xffff0000 /* vendor code */
-#define SBIDH_VC_SHIFT 16
-
-#define SB_COMMIT 0xfd8 /* update buffered registers value */
-
-/* vendor codes */
-#define SB_VEND_BCM 0x4243 /* Broadcom's SB vendor code */
-
-#endif /* _SBCONFIG_H */
diff --git a/drivers/staging/brcm80211/include/sbhnddma.h b/drivers/staging/brcm80211/include/sbhnddma.h
deleted file mode 100644
index 08cb7f6e0d8..00000000000
--- a/drivers/staging/brcm80211/include/sbhnddma.h
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _sbhnddma_h_
-#define _sbhnddma_h_
-
-/* DMA structure:
- * support two DMA engines: 32 bits address or 64 bit addressing
- * basic DMA register set is per channel(transmit or receive)
- * a pair of channels is defined for convenience
- */
-
-/* 32 bits addressing */
-
-/* dma registers per channel(xmt or rcv) */
-typedef volatile struct {
- u32 control; /* enable, et al */
- u32 addr; /* descriptor ring base address (4K aligned) */
- u32 ptr; /* last descriptor posted to chip */
- u32 status; /* current active descriptor, et al */
-} dma32regs_t;
-
-typedef volatile struct {
- dma32regs_t xmt; /* dma tx channel */
- dma32regs_t rcv; /* dma rx channel */
-} dma32regp_t;
-
-typedef volatile struct { /* diag access */
- u32 fifoaddr; /* diag address */
- u32 fifodatalow; /* low 32bits of data */
- u32 fifodatahigh; /* high 32bits of data */
- u32 pad; /* reserved */
-} dma32diag_t;
-
-/*
- * DMA Descriptor
- * Descriptors are only read by the hardware, never written back.
- */
-typedef volatile struct {
- u32 ctrl; /* misc control bits & bufcount */
- u32 addr; /* data buffer address */
-} dma32dd_t;
-
-/*
- * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page.
- */
-#define D32RINGALIGN_BITS 12
-#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS)
-#define D32RINGALIGN (1 << D32RINGALIGN_BITS)
-
-#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t))
-
-/* transmit channel control */
-#define XC_XE ((u32)1 << 0) /* transmit enable */
-#define XC_SE ((u32)1 << 1) /* transmit suspend request */
-#define XC_LE ((u32)1 << 2) /* loopback enable */
-#define XC_FL ((u32)1 << 4) /* flush request */
-#define XC_PD ((u32)1 << 11) /* parity check disable */
-#define XC_AE ((u32)3 << 16) /* address extension bits */
-#define XC_AE_SHIFT 16
-
-/* transmit descriptor table pointer */
-#define XP_LD_MASK 0xfff /* last valid descriptor */
-
-/* transmit channel status */
-#define XS_CD_MASK 0x0fff /* current descriptor pointer */
-#define XS_XS_MASK 0xf000 /* transmit state */
-#define XS_XS_SHIFT 12
-#define XS_XS_DISABLED 0x0000 /* disabled */
-#define XS_XS_ACTIVE 0x1000 /* active */
-#define XS_XS_IDLE 0x2000 /* idle wait */
-#define XS_XS_STOPPED 0x3000 /* stopped */
-#define XS_XS_SUSP 0x4000 /* suspend pending */
-#define XS_XE_MASK 0xf0000 /* transmit errors */
-#define XS_XE_SHIFT 16
-#define XS_XE_NOERR 0x00000 /* no error */
-#define XS_XE_DPE 0x10000 /* descriptor protocol error */
-#define XS_XE_DFU 0x20000 /* data fifo underrun */
-#define XS_XE_BEBR 0x30000 /* bus error on buffer read */
-#define XS_XE_BEDA 0x40000 /* bus error on descriptor access */
-#define XS_AD_MASK 0xfff00000 /* active descriptor */
-#define XS_AD_SHIFT 20
-
-/* receive channel control */
-#define RC_RE ((u32)1 << 0) /* receive enable */
-#define RC_RO_MASK 0xfe /* receive frame offset */
-#define RC_RO_SHIFT 1
-#define RC_FM ((u32)1 << 8) /* direct fifo receive (pio) mode */
-#define RC_SH ((u32)1 << 9) /* separate rx header descriptor enable */
-#define RC_OC ((u32)1 << 10) /* overflow continue */
-#define RC_PD ((u32)1 << 11) /* parity check disable */
-#define RC_AE ((u32)3 << 16) /* address extension bits */
-#define RC_AE_SHIFT 16
-
-/* receive descriptor table pointer */
-#define RP_LD_MASK 0xfff /* last valid descriptor */
-
-/* receive channel status */
-#define RS_CD_MASK 0x0fff /* current descriptor pointer */
-#define RS_RS_MASK 0xf000 /* receive state */
-#define RS_RS_SHIFT 12
-#define RS_RS_DISABLED 0x0000 /* disabled */
-#define RS_RS_ACTIVE 0x1000 /* active */
-#define RS_RS_IDLE 0x2000 /* idle wait */
-#define RS_RS_STOPPED 0x3000 /* reserved */
-#define RS_RE_MASK 0xf0000 /* receive errors */
-#define RS_RE_SHIFT 16
-#define RS_RE_NOERR 0x00000 /* no error */
-#define RS_RE_DPE 0x10000 /* descriptor protocol error */
-#define RS_RE_DFO 0x20000 /* data fifo overflow */
-#define RS_RE_BEBW 0x30000 /* bus error on buffer write */
-#define RS_RE_BEDA 0x40000 /* bus error on descriptor access */
-#define RS_AD_MASK 0xfff00000 /* active descriptor */
-#define RS_AD_SHIFT 20
-
-/* fifoaddr */
-#define FA_OFF_MASK 0xffff /* offset */
-#define FA_SEL_MASK 0xf0000 /* select */
-#define FA_SEL_SHIFT 16
-#define FA_SEL_XDD 0x00000 /* transmit dma data */
-#define FA_SEL_XDP 0x10000 /* transmit dma pointers */
-#define FA_SEL_RDD 0x40000 /* receive dma data */
-#define FA_SEL_RDP 0x50000 /* receive dma pointers */
-#define FA_SEL_XFD 0x80000 /* transmit fifo data */
-#define FA_SEL_XFP 0x90000 /* transmit fifo pointers */
-#define FA_SEL_RFD 0xc0000 /* receive fifo data */
-#define FA_SEL_RFP 0xd0000 /* receive fifo pointers */
-#define FA_SEL_RSD 0xe0000 /* receive frame status data */
-#define FA_SEL_RSP 0xf0000 /* receive frame status pointers */
-
-/* descriptor control flags */
-#define CTRL_BC_MASK 0x00001fff /* buffer byte count, real data len must <= 4KB */
-#define CTRL_AE ((u32)3 << 16) /* address extension bits */
-#define CTRL_AE_SHIFT 16
-#define CTRL_PARITY ((u32)3 << 18) /* parity bit */
-#define CTRL_EOT ((u32)1 << 28) /* end of descriptor table */
-#define CTRL_IOC ((u32)1 << 29) /* interrupt on completion */
-#define CTRL_EOF ((u32)1 << 30) /* end of frame */
-#define CTRL_SOF ((u32)1 << 31) /* start of frame */
-
-/* control flags in the range [27:20] are core-specific and not defined here */
-#define CTRL_CORE_MASK 0x0ff00000
-
-/* 64 bits addressing */
-
-/* dma registers per channel(xmt or rcv) */
-typedef volatile struct {
- u32 control; /* enable, et al */
- u32 ptr; /* last descriptor posted to chip */
- u32 addrlow; /* descriptor ring base address low 32-bits (8K aligned) */
- u32 addrhigh; /* descriptor ring base address bits 63:32 (8K aligned) */
- u32 status0; /* current descriptor, xmt state */
- u32 status1; /* active descriptor, xmt error */
-} dma64regs_t;
-
-typedef volatile struct {
- dma64regs_t tx; /* dma64 tx channel */
- dma64regs_t rx; /* dma64 rx channel */
-} dma64regp_t;
-
-typedef volatile struct { /* diag access */
- u32 fifoaddr; /* diag address */
- u32 fifodatalow; /* low 32bits of data */
- u32 fifodatahigh; /* high 32bits of data */
- u32 pad; /* reserved */
-} dma64diag_t;
-
-/*
- * DMA Descriptor
- * Descriptors are only read by the hardware, never written back.
- */
-typedef volatile struct {
- u32 ctrl1; /* misc control bits & bufcount */
- u32 ctrl2; /* buffer count and address extension */
- u32 addrlow; /* memory address of the date buffer, bits 31:0 */
- u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
-} dma64dd_t;
-
-/*
- * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical address.
- */
-#define D64RINGALIGN_BITS 13
-#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
-#define D64RINGALIGN (1 << D64RINGALIGN_BITS)
-
-#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
-
-/* transmit channel control */
-#define D64_XC_XE 0x00000001 /* transmit enable */
-#define D64_XC_SE 0x00000002 /* transmit suspend request */
-#define D64_XC_LE 0x00000004 /* loopback enable */
-#define D64_XC_FL 0x00000010 /* flush request */
-#define D64_XC_PD 0x00000800 /* parity check disable */
-#define D64_XC_AE 0x00030000 /* address extension bits */
-#define D64_XC_AE_SHIFT 16
-
-/* transmit descriptor table pointer */
-#define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
-
-/* transmit channel status */
-#define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
-#define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
-#define D64_XS0_XS_SHIFT 28
-#define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
-#define D64_XS0_XS_ACTIVE 0x10000000 /* active */
-#define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
-#define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
-#define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
-
-#define D64_XS1_AD_MASK 0x00001fff /* active descriptor */
-#define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
-#define D64_XS1_XE_SHIFT 28
-#define D64_XS1_XE_NOERR 0x00000000 /* no error */
-#define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
-#define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
-#define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
-#define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
-#define D64_XS1_XE_COREE 0x50000000 /* core error */
-
-/* receive channel control */
-#define D64_RC_RE 0x00000001 /* receive enable */
-#define D64_RC_RO_MASK 0x000000fe /* receive frame offset */
-#define D64_RC_RO_SHIFT 1
-#define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */
-#define D64_RC_SH 0x00000200 /* separate rx header descriptor enable */
-#define D64_RC_OC 0x00000400 /* overflow continue */
-#define D64_RC_PD 0x00000800 /* parity check disable */
-#define D64_RC_AE 0x00030000 /* address extension bits */
-#define D64_RC_AE_SHIFT 16
-
-/* flags for dma controller */
-#define DMA_CTRL_PEN (1 << 0) /* partity enable */
-#define DMA_CTRL_ROC (1 << 1) /* rx overflow continue */
-#define DMA_CTRL_RXMULTI (1 << 2) /* allow rx scatter to multiple descriptors */
-#define DMA_CTRL_UNFRAMED (1 << 3) /* Unframed Rx/Tx data */
-
-/* receive descriptor table pointer */
-#define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
-
-/* receive channel status */
-#define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
-#define D64_RS0_RS_MASK 0xf0000000 /* receive state */
-#define D64_RS0_RS_SHIFT 28
-#define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
-#define D64_RS0_RS_ACTIVE 0x10000000 /* active */
-#define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
-#define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
-#define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
-
-#define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
-#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
-#define D64_RS1_RE_SHIFT 28
-#define D64_RS1_RE_NOERR 0x00000000 /* no error */
-#define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
-#define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
-#define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
-#define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
-#define D64_RS1_RE_COREE 0x50000000 /* core error */
-
-/* fifoaddr */
-#define D64_FA_OFF_MASK 0xffff /* offset */
-#define D64_FA_SEL_MASK 0xf0000 /* select */
-#define D64_FA_SEL_SHIFT 16
-#define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
-#define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
-#define D64_FA_SEL_RDD 0x40000 /* receive dma data */
-#define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
-#define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
-#define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
-#define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
-#define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
-#define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
-#define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
-
-/* descriptor control flags 1 */
-#define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
-#define D64_CTRL1_EOT ((u32)1 << 28) /* end of descriptor table */
-#define D64_CTRL1_IOC ((u32)1 << 29) /* interrupt on completion */
-#define D64_CTRL1_EOF ((u32)1 << 30) /* end of frame */
-#define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
-
-/* descriptor control flags 2 */
-#define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count. real data len must <= 16KB */
-#define D64_CTRL2_AE 0x00030000 /* address extension bits */
-#define D64_CTRL2_AE_SHIFT 16
-#define D64_CTRL2_PARITY 0x00040000 /* parity bit */
-
-/* control flags in the range [27:20] are core-specific and not defined here */
-#define D64_CTRL_CORE_MASK 0x0ff00000
-
-#define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
-#define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
-#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1 */
-#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
-
-/* receive frame status */
-typedef volatile struct {
- u16 len;
- u16 flags;
-} dma_rxh_t;
-
-#endif /* _sbhnddma_h_ */
diff --git a/drivers/staging/brcm80211/include/sbsdio.h b/drivers/staging/brcm80211/include/sbsdio.h
deleted file mode 100644
index c7facd3795a..00000000000
--- a/drivers/staging/brcm80211/include/sbsdio.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _SBSDIO_H
-#define _SBSDIO_H
-
-#define SBSDIO_NUM_FUNCTION 3 /* as of sdiod rev 0, supports 3 functions */
-
-/* function 1 miscellaneous registers */
-#define SBSDIO_SPROM_CS 0x10000 /* sprom command and status */
-#define SBSDIO_SPROM_INFO 0x10001 /* sprom info register */
-#define SBSDIO_SPROM_DATA_LOW 0x10002 /* sprom indirect access data byte 0 */
-#define SBSDIO_SPROM_DATA_HIGH 0x10003 /* sprom indirect access data byte 1 */
-#define SBSDIO_SPROM_ADDR_LOW 0x10004 /* sprom indirect access addr byte 0 */
-#define SBSDIO_SPROM_ADDR_HIGH 0x10005 /* sprom indirect access addr byte 0 */
-#define SBSDIO_CHIP_CTRL_DATA 0x10006 /* xtal_pu (gpio) output */
-#define SBSDIO_CHIP_CTRL_EN 0x10007 /* xtal_pu (gpio) enable */
-#define SBSDIO_WATERMARK 0x10008 /* rev < 7, watermark for sdio device */
-#define SBSDIO_DEVICE_CTL 0x10009 /* control busy signal generation */
-
-/* registers introduced in rev 8, some content (mask/bits) defs in sbsdpcmdev.h */
-#define SBSDIO_FUNC1_SBADDRLOW 0x1000A /* SB Address Window Low (b15) */
-#define SBSDIO_FUNC1_SBADDRMID 0x1000B /* SB Address Window Mid (b23:b16) */
-#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C /* SB Address Window High (b31:b24) */
-#define SBSDIO_FUNC1_FRAMECTRL 0x1000D /* Frame Control (frame term/abort) */
-#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E /* ChipClockCSR (ALP/HT ctl/status) */
-#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F /* SdioPullUp (on cmd, d0-d2) */
-#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 /* Write Frame Byte Count Low */
-#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A /* Write Frame Byte Count High */
-#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B /* Read Frame Byte Count Low */
-#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* Read Frame Byte Count High */
-
-#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */
-#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001C /* f1 misc register end */
-
-/* SBSDIO_SPROM_CS */
-#define SBSDIO_SPROM_IDLE 0
-#define SBSDIO_SPROM_WRITE 1
-#define SBSDIO_SPROM_READ 2
-#define SBSDIO_SPROM_WEN 4
-#define SBSDIO_SPROM_WDS 7
-#define SBSDIO_SPROM_DONE 8
-
-/* SBSDIO_SPROM_INFO */
-#define SROM_SZ_MASK 0x03 /* SROM size, 1: 4k, 2: 16k */
-#define SROM_BLANK 0x04 /* depreciated in corerev 6 */
-#define SROM_OTP 0x80 /* OTP present */
-
-/* SBSDIO_CHIP_CTRL */
-#define SBSDIO_CHIP_CTRL_XTAL 0x01 /* or'd with onchip xtal_pu,
- * 1: power on oscillator
- * (for 4318 only)
- */
-/* SBSDIO_WATERMARK */
-#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device
- * to wait before sending data to host
- */
-
-/* SBSDIO_DEVICE_CTL */
-#define SBSDIO_DEVCTL_SETBUSY 0x01 /* 1: device will assert busy signal when
- * receiving CMD53
- */
-#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 /* 1: assertion of sdio interrupt is
- * synchronous to the sdio clock
- */
-#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 /* 1: mask all interrupts to host
- * except the chipActive (rev 8)
- */
-#define SBSDIO_DEVCTL_PADS_ISO 0x08 /* 1: isolate internal sdio signals, put
- * external pads in tri-state; requires
- * sdio bus power cycle to clear (rev 9)
- */
-#define SBSDIO_DEVCTL_SB_RST_CTL 0x30 /* Force SD->SB reset mapping (rev 11) */
-#define SBSDIO_DEVCTL_RST_CORECTL 0x00 /* Determined by CoreControl bit */
-#define SBSDIO_DEVCTL_RST_BPRESET 0x10 /* Force backplane reset */
-#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 /* Force no backplane reset */
-
-/* SBSDIO_FUNC1_CHIPCLKCSR */
-#define SBSDIO_FORCE_ALP 0x01 /* Force ALP request to backplane */
-#define SBSDIO_FORCE_HT 0x02 /* Force HT request to backplane */
-#define SBSDIO_FORCE_ILP 0x04 /* Force ILP request to backplane */
-#define SBSDIO_ALP_AVAIL_REQ 0x08 /* Make ALP ready (power up xtal) */
-#define SBSDIO_HT_AVAIL_REQ 0x10 /* Make HT ready (power up PLL) */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 /* Squelch clock requests from HW */
-#define SBSDIO_ALP_AVAIL 0x40 /* Status: ALP is ready */
-#define SBSDIO_HT_AVAIL 0x80 /* Status: HT is ready */
-/* In rev8, actual avail bits followed original docs */
-#define SBSDIO_Rev8_HT_AVAIL 0x40
-#define SBSDIO_Rev8_ALP_AVAIL 0x80
-
-#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-#define SBSDIO_CLKAV(regval, alponly) (SBSDIO_ALPAV(regval) && \
- (alponly ? 1 : SBSDIO_HTAV(regval)))
-
-/* SBSDIO_FUNC1_SDIOPULLUP */
-#define SBSDIO_PULLUP_D0 0x01 /* Enable D0/MISO pullup */
-#define SBSDIO_PULLUP_D1 0x02 /* Enable D1/INT# pullup */
-#define SBSDIO_PULLUP_D2 0x04 /* Enable D2 pullup */
-#define SBSDIO_PULLUP_CMD 0x08 /* Enable CMD/MOSI pullup */
-#define SBSDIO_PULLUP_ALL 0x0f /* All valid bits */
-
-/* function 1 OCP space */
-#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF /* sb offset addr is <= 15 bits, 32k */
-#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000
-#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 /* with b15, maps to 32-bit SB access */
-
-/* some duplication with sbsdpcmdev.h here */
-/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
-#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid bits in SBADDRLOW */
-#define SBSDIO_SBADDRMID_MASK 0xff /* Valid bits in SBADDRMID */
-#define SBSDIO_SBADDRHIGH_MASK 0xffU /* Valid bits in SBADDRHIGH */
-#define SBSDIO_SBWINDOW_MASK 0xffff8000 /* Address bits from SBADDR regs */
-
-/* direct(mapped) cis space */
-#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */
-#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */
-#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */
-
-#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */
-
-#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 /* manfid tuple length, include tuple,
- * link bytes
- */
-
-/* indirect cis access (in sprom) */
-#define SBSDIO_SPROM_CIS_OFFSET 0x8 /* 8 control bytes first, CIS starts from
- * 8th byte
- */
-
-#define SBSDIO_BYTEMODE_DATALEN_MAX 64 /* sdio byte mode: maximum length of one
- * data command
- */
-
-#define SBSDIO_CORE_ADDR_MASK 0x1FFFF /* sdio core function one address mask */
-
-#endif /* _SBSDIO_H */
diff --git a/drivers/staging/brcm80211/include/sbsdpcmdev.h b/drivers/staging/brcm80211/include/sbsdpcmdev.h
deleted file mode 100644
index afd35811d4a..00000000000
--- a/drivers/staging/brcm80211/include/sbsdpcmdev.h
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _sbsdpcmdev_h_
-#define _sbsdpcmdev_h_
-
-/* cpp contortions to concatenate w/arg prescan */
-#ifndef PAD
-#define _PADLINE(line) pad ## line
-#define _XSTR(line) _PADLINE(line)
-#define PAD _XSTR(__LINE__)
-#endif /* PAD */
-
-typedef volatile struct {
- dma64regs_t xmt; /* dma tx */
- u32 PAD[2];
- dma64regs_t rcv; /* dma rx */
- u32 PAD[2];
-} dma64p_t;
-
-/* dma64 sdiod corerev >= 1 */
-typedef volatile struct {
- dma64p_t dma64regs[2];
- dma64diag_t dmafifo; /* DMA Diagnostic Regs, 0x280-0x28c */
- u32 PAD[92];
-} sdiodma64_t;
-
-/* dma32 sdiod corerev == 0 */
-typedef volatile struct {
- dma32regp_t dma32regs[2]; /* dma tx & rx, 0x200-0x23c */
- dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x240-0x24c */
- u32 PAD[108];
-} sdiodma32_t;
-
-/* dma32 regs for pcmcia core */
-typedef volatile struct {
- dma32regp_t dmaregs; /* DMA Regs, 0x200-0x21c, rev8 */
- dma32diag_t dmafifo; /* DMA Diagnostic Regs, 0x220-0x22c */
- u32 PAD[116];
-} pcmdma32_t;
-
-/* core registers */
-typedef volatile struct {
- u32 corecontrol; /* CoreControl, 0x000, rev8 */
- u32 corestatus; /* CoreStatus, 0x004, rev8 */
- u32 PAD[1];
- u32 biststatus; /* BistStatus, 0x00c, rev8 */
-
- /* PCMCIA access */
- u16 pcmciamesportaladdr; /* PcmciaMesPortalAddr, 0x010, rev8 */
- u16 PAD[1];
- u16 pcmciamesportalmask; /* PcmciaMesPortalMask, 0x014, rev8 */
- u16 PAD[1];
- u16 pcmciawrframebc; /* PcmciaWrFrameBC, 0x018, rev8 */
- u16 PAD[1];
- u16 pcmciaunderflowtimer; /* PcmciaUnderflowTimer, 0x01c, rev8 */
- u16 PAD[1];
-
- /* interrupt */
- u32 intstatus; /* IntStatus, 0x020, rev8 */
- u32 hostintmask; /* IntHostMask, 0x024, rev8 */
- u32 intmask; /* IntSbMask, 0x028, rev8 */
- u32 sbintstatus; /* SBIntStatus, 0x02c, rev8 */
- u32 sbintmask; /* SBIntMask, 0x030, rev8 */
- u32 funcintmask; /* SDIO Function Interrupt Mask, SDIO rev4 */
- u32 PAD[2];
- u32 tosbmailbox; /* ToSBMailbox, 0x040, rev8 */
- u32 tohostmailbox; /* ToHostMailbox, 0x044, rev8 */
- u32 tosbmailboxdata; /* ToSbMailboxData, 0x048, rev8 */
- u32 tohostmailboxdata; /* ToHostMailboxData, 0x04c, rev8 */
-
- /* synchronized access to registers in SDIO clock domain */
- u32 sdioaccess; /* SdioAccess, 0x050, rev8 */
- u32 PAD[3];
-
- /* PCMCIA frame control */
- u8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */
- u8 PAD[3];
- u8 pcmciawatermark; /* pcmciaWaterMark, 0x064, rev8 */
- u8 PAD[155];
-
- /* interrupt batching control */
- u32 intrcvlazy; /* IntRcvLazy, 0x100, rev8 */
- u32 PAD[3];
-
- /* counters */
- u32 cmd52rd; /* Cmd52RdCount, 0x110, rev8, SDIO: cmd52 reads */
- u32 cmd52wr; /* Cmd52WrCount, 0x114, rev8, SDIO: cmd52 writes */
- u32 cmd53rd; /* Cmd53RdCount, 0x118, rev8, SDIO: cmd53 reads */
- u32 cmd53wr; /* Cmd53WrCount, 0x11c, rev8, SDIO: cmd53 writes */
- u32 abort; /* AbortCount, 0x120, rev8, SDIO: aborts */
- u32 datacrcerror; /* DataCrcErrorCount, 0x124, rev8, SDIO: frames w/bad CRC */
- u32 rdoutofsync; /* RdOutOfSyncCount, 0x128, rev8, SDIO/PCMCIA: Rd Frm OOS */
- u32 wroutofsync; /* RdOutOfSyncCount, 0x12c, rev8, SDIO/PCMCIA: Wr Frm OOS */
- u32 writebusy; /* WriteBusyCount, 0x130, rev8, SDIO: dev asserted "busy" */
- u32 readwait; /* ReadWaitCount, 0x134, rev8, SDIO: read: no data avail */
- u32 readterm; /* ReadTermCount, 0x138, rev8, SDIO: rd frm terminates */
- u32 writeterm; /* WriteTermCount, 0x13c, rev8, SDIO: wr frm terminates */
- u32 PAD[40];
- u32 clockctlstatus; /* ClockCtlStatus, 0x1e0, rev8 */
- u32 PAD[7];
-
- /* DMA engines */
- volatile union {
- pcmdma32_t pcm32;
- sdiodma32_t sdiod32;
- sdiodma64_t sdiod64;
- } dma;
-
- /* SDIO/PCMCIA CIS region */
- char cis[512]; /* 512 byte CIS, 0x400-0x5ff, rev6 */
-
- /* PCMCIA function control registers */
- char pcmciafcr[256]; /* PCMCIA FCR, 0x600-6ff, rev6 */
- u16 PAD[55];
-
- /* PCMCIA backplane access */
- u16 backplanecsr; /* BackplaneCSR, 0x76E, rev6 */
- u16 backplaneaddr0; /* BackplaneAddr0, 0x770, rev6 */
- u16 backplaneaddr1; /* BackplaneAddr1, 0x772, rev6 */
- u16 backplaneaddr2; /* BackplaneAddr2, 0x774, rev6 */
- u16 backplaneaddr3; /* BackplaneAddr3, 0x776, rev6 */
- u16 backplanedata0; /* BackplaneData0, 0x778, rev6 */
- u16 backplanedata1; /* BackplaneData1, 0x77a, rev6 */
- u16 backplanedata2; /* BackplaneData2, 0x77c, rev6 */
- u16 backplanedata3; /* BackplaneData3, 0x77e, rev6 */
- u16 PAD[31];
-
- /* sprom "size" & "blank" info */
- u16 spromstatus; /* SPROMStatus, 0x7BE, rev2 */
- u32 PAD[464];
-
- /* Sonics SiliconBackplane registers */
- sbconfig_t sbconfig; /* SbConfig Regs, 0xf00-0xfff, rev8 */
-} sdpcmd_regs_t;
-
-/* corecontrol */
-#define CC_CISRDY (1 << 0) /* CIS Ready */
-#define CC_BPRESEN (1 << 1) /* CCCR RES signal causes backplane reset */
-#define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
-#define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation bit (rev 11) */
-#define CC_XMTDATAAVAIL_MODE (1 << 4) /* data avail generates an interrupt */
-#define CC_XMTDATAAVAIL_CTRL (1 << 5) /* data avail interrupt ctrl */
-
-/* corestatus */
-#define CS_PCMCIAMODE (1 << 0) /* Device Mode; 0=SDIO, 1=PCMCIA */
-#define CS_SMARTDEV (1 << 1) /* 1=smartDev enabled */
-#define CS_F2ENABLED (1 << 2) /* 1=host has enabled the device */
-
-#define PCMCIA_MES_PA_MASK 0x7fff /* PCMCIA Message Portal Address Mask */
-#define PCMCIA_MES_PM_MASK 0x7fff /* PCMCIA Message Portal Mask Mask */
-#define PCMCIA_WFBC_MASK 0xffff /* PCMCIA Write Frame Byte Count Mask */
-#define PCMCIA_UT_MASK 0x07ff /* PCMCIA Underflow Timer Mask */
-
-/* intstatus */
-#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
-#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
-#define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
-#define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
-#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
-#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
-#define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
-#define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
-#define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
-#define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
-#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
-#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
-#define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
-#define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
-#define I_PC (1 << 10) /* descriptor error */
-#define I_PD (1 << 11) /* data error */
-#define I_DE (1 << 12) /* Descriptor protocol Error */
-#define I_RU (1 << 13) /* Receive descriptor Underflow */
-#define I_RO (1 << 14) /* Receive fifo Overflow */
-#define I_XU (1 << 15) /* Transmit fifo Underflow */
-#define I_RI (1 << 16) /* Receive Interrupt */
-#define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
-#define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
-#define I_XI (1 << 24) /* Transmit Interrupt */
-#define I_RF_TERM (1 << 25) /* Read Frame Terminate */
-#define I_WF_TERM (1 << 26) /* Write Frame Terminate */
-#define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
-#define I_SBINT (1 << 28) /* sbintstatus Interrupt */
-#define I_CHIPACTIVE (1 << 29) /* chip transitioned from doze to active state */
-#define I_SRESET (1 << 30) /* CCCR RES interrupt */
-#define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
-#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) /* DMA Errors */
-#define I_DMA (I_RI | I_XI | I_ERRORS)
-
-/* sbintstatus */
-#define I_SB_SERR (1 << 8) /* Backplane SError (write) */
-#define I_SB_RESPERR (1 << 9) /* Backplane Response Error (read) */
-#define I_SB_SPROMERR (1 << 10) /* Error accessing the sprom */
-
-/* sdioaccess */
-#define SDA_DATA_MASK 0x000000ff /* Read/Write Data Mask */
-#define SDA_ADDR_MASK 0x000fff00 /* Read/Write Address Mask */
-#define SDA_ADDR_SHIFT 8 /* Read/Write Address Shift */
-#define SDA_WRITE 0x01000000 /* Write bit */
-#define SDA_READ 0x00000000 /* Write bit cleared for Read */
-#define SDA_BUSY 0x80000000 /* Busy bit */
-
-/* sdioaccess-accessible register address spaces */
-#define SDA_CCCR_SPACE 0x000 /* sdioAccess CCCR register space */
-#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */
-#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */
-#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */
-
-/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
-#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */
-#define SDA_CHIPCONTROLENAB 0x007 /* ChipControlEnable */
-#define SDA_F2WATERMARK 0x008 /* Function 2 Watermark */
-#define SDA_DEVICECONTROL 0x009 /* DeviceControl */
-#define SDA_SBADDRLOW 0x00a /* SbAddrLow */
-#define SDA_SBADDRMID 0x00b /* SbAddrMid */
-#define SDA_SBADDRHIGH 0x00c /* SbAddrHigh */
-#define SDA_FRAMECTRL 0x00d /* FrameCtrl */
-#define SDA_CHIPCLOCKCSR 0x00e /* ChipClockCSR */
-#define SDA_SDIOPULLUP 0x00f /* SdioPullUp */
-#define SDA_SDIOWRFRAMEBCLOW 0x019 /* SdioWrFrameBCLow */
-#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */
-#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */
-#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */
-
-/* SDA_F2WATERMARK */
-#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */
-
-/* SDA_SBADDRLOW */
-#define SDA_SBADDRLOW_MASK 0x80 /* SbAddrLow Mask */
-
-/* SDA_SBADDRMID */
-#define SDA_SBADDRMID_MASK 0xff /* SbAddrMid Mask */
-
-/* SDA_SBADDRHIGH */
-#define SDA_SBADDRHIGH_MASK 0xff /* SbAddrHigh Mask */
-
-/* SDA_FRAMECTRL */
-#define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
-#define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
-#define SFC_CRC4WOOS (1 << 2) /* HW reports CRC error for write out of sync */
-#define SFC_ABORTALL (1 << 3) /* Abort cancels all in-progress frames */
-
-/* pcmciaframectrl */
-#define PFC_RF_TERM (1 << 0) /* Read Frame Terminate */
-#define PFC_WF_TERM (1 << 1) /* Write Frame Terminate */
-
-/* intrcvlazy */
-#define IRL_TO_MASK 0x00ffffff /* timeout */
-#define IRL_FC_MASK 0xff000000 /* frame count */
-#define IRL_FC_SHIFT 24 /* frame count */
-
-/* rx header */
-typedef volatile struct {
- u16 len;
- u16 flags;
-} sdpcmd_rxh_t;
-
-/* rx header flags */
-#define RXF_CRC 0x0001 /* CRC error detected */
-#define RXF_WOOS 0x0002 /* write frame out of sync */
-#define RXF_WF_TERM 0x0004 /* write frame terminated */
-#define RXF_ABORT 0x0008 /* write frame aborted */
-#define RXF_DISCARD (RXF_CRC | RXF_WOOS | RXF_WF_TERM | RXF_ABORT) /* bad frame */
-
-/* HW frame tag */
-#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */
-
-#endif /* _sbsdpcmdev_h_ */
diff --git a/drivers/staging/brcm80211/include/sdio.h b/drivers/staging/brcm80211/include/sdio.h
deleted file mode 100644
index 670e379b9aa..00000000000
--- a/drivers/staging/brcm80211/include/sdio.h
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _SDIO_H
-#define _SDIO_H
-
-#ifdef BCMSDIO
-
-/* CCCR structure for function 0 */
-typedef volatile struct {
- u8 cccr_sdio_rev; /* RO, cccr and sdio revision */
- u8 sd_rev; /* RO, sd spec revision */
- u8 io_en; /* I/O enable */
- u8 io_rdy; /* I/O ready reg */
- u8 intr_ctl; /* Master and per function interrupt enable control */
- u8 intr_status; /* RO, interrupt pending status */
- u8 io_abort; /* read/write abort or reset all functions */
- u8 bus_inter; /* bus interface control */
- u8 capability; /* RO, card capability */
-
- u8 cis_base_low; /* 0x9 RO, common CIS base address, LSB */
- u8 cis_base_mid;
- u8 cis_base_high; /* 0xB RO, common CIS base address, MSB */
-
- /* suspend/resume registers */
- u8 bus_suspend; /* 0xC */
- u8 func_select; /* 0xD */
- u8 exec_flag; /* 0xE */
- u8 ready_flag; /* 0xF */
-
- u8 fn0_blk_size[2]; /* 0x10(LSB), 0x11(MSB) */
-
- u8 power_control; /* 0x12 (SDIO version 1.10) */
-
- u8 speed_control; /* 0x13 */
-} sdio_regs_t;
-
-/* SDIO Device CCCR offsets */
-#define SDIOD_CCCR_REV 0x00
-#define SDIOD_CCCR_SDREV 0x01
-#define SDIOD_CCCR_IOEN 0x02
-#define SDIOD_CCCR_IORDY 0x03
-#define SDIOD_CCCR_INTEN 0x04
-#define SDIOD_CCCR_INTPEND 0x05
-#define SDIOD_CCCR_IOABORT 0x06
-#define SDIOD_CCCR_BICTRL 0x07
-#define SDIOD_CCCR_CAPABLITIES 0x08
-#define SDIOD_CCCR_CISPTR_0 0x09
-#define SDIOD_CCCR_CISPTR_1 0x0A
-#define SDIOD_CCCR_CISPTR_2 0x0B
-#define SDIOD_CCCR_BUSSUSP 0x0C
-#define SDIOD_CCCR_FUNCSEL 0x0D
-#define SDIOD_CCCR_EXECFLAGS 0x0E
-#define SDIOD_CCCR_RDYFLAGS 0x0F
-#define SDIOD_CCCR_BLKSIZE_0 0x10
-#define SDIOD_CCCR_BLKSIZE_1 0x11
-#define SDIOD_CCCR_POWER_CONTROL 0x12
-#define SDIOD_CCCR_SPEED_CONTROL 0x13
-
-/* Broadcom extensions (corerev >= 1) */
-#define SDIOD_CCCR_BRCM_SEPINT 0xf2
-
-/* cccr_sdio_rev */
-#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */
-#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */
-
-/* sd_rev */
-#define SD_REV_PHY_MASK 0x0f /* SD format version number */
-
-/* io_en */
-#define SDIO_FUNC_ENABLE_1 0x02 /* function 1 I/O enable */
-#define SDIO_FUNC_ENABLE_2 0x04 /* function 2 I/O enable */
-
-/* io_rdys */
-#define SDIO_FUNC_READY_1 0x02 /* function 1 I/O ready */
-#define SDIO_FUNC_READY_2 0x04 /* function 2 I/O ready */
-
-/* intr_ctl */
-#define INTR_CTL_MASTER_EN 0x1 /* interrupt enable master */
-#define INTR_CTL_FUNC1_EN 0x2 /* interrupt enable for function 1 */
-#define INTR_CTL_FUNC2_EN 0x4 /* interrupt enable for function 2 */
-
-/* intr_status */
-#define INTR_STATUS_FUNC1 0x2 /* interrupt pending for function 1 */
-#define INTR_STATUS_FUNC2 0x4 /* interrupt pending for function 2 */
-
-/* io_abort */
-#define IO_ABORT_RESET_ALL 0x08 /* I/O card reset */
-#define IO_ABORT_FUNC_MASK 0x07 /* abort selction: function x */
-
-/* bus_inter */
-#define BUS_CARD_DETECT_DIS 0x80 /* Card Detect disable */
-#define BUS_SPI_CONT_INTR_CAP 0x40 /* support continuous SPI interrupt */
-#define BUS_SPI_CONT_INTR_EN 0x20 /* continuous SPI interrupt enable */
-#define BUS_SD_DATA_WIDTH_MASK 0x03 /* bus width mask */
-#define BUS_SD_DATA_WIDTH_4BIT 0x02 /* bus width 4-bit mode */
-#define BUS_SD_DATA_WIDTH_1BIT 0x00 /* bus width 1-bit mode */
-
-/* capability */
-#define SDIO_CAP_4BLS 0x80 /* 4-bit support for low speed card */
-#define SDIO_CAP_LSC 0x40 /* low speed card */
-#define SDIO_CAP_E4MI 0x20 /* enable interrupt between block of data in 4-bit mode */
-#define SDIO_CAP_S4MI 0x10 /* support interrupt between block of data in 4-bit mode */
-#define SDIO_CAP_SBS 0x08 /* support suspend/resume */
-#define SDIO_CAP_SRW 0x04 /* support read wait */
-#define SDIO_CAP_SMB 0x02 /* support multi-block transfer */
-#define SDIO_CAP_SDC 0x01 /* Support Direct commands during multi-byte transfer */
-
-/* power_control */
-#define SDIO_POWER_SMPC 0x01 /* supports master power control (RO) */
-#define SDIO_POWER_EMPC 0x02 /* enable master power control (allow > 200mA) (RW) */
-
-/* speed_control (control device entry into high-speed clocking mode) */
-#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */
-#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */
-
-/* brcm sepint */
-#define SDIO_SEPINT_MASK 0x01 /* route sdpcmdev intr onto separate pad (chip-specific) */
-#define SDIO_SEPINT_OE 0x02 /* 1 asserts output enable for above pad */
-#define SDIO_SEPINT_ACT_HI 0x04 /* use active high interrupt level instead of active low */
-
-/* FBR structure for function 1-7, FBR addresses and register offsets */
-typedef volatile struct {
- u8 devctr; /* device interface, CSA control */
- u8 ext_dev; /* extended standard I/O device type code */
- u8 pwr_sel; /* power selection support */
- u8 PAD[6]; /* reserved */
-
- u8 cis_low; /* CIS LSB */
- u8 cis_mid;
- u8 cis_high; /* CIS MSB */
- u8 csa_low; /* code storage area, LSB */
- u8 csa_mid;
- u8 csa_high; /* code storage area, MSB */
- u8 csa_dat_win; /* data access window to function */
-
- u8 fnx_blk_size[2]; /* block size, little endian */
-} sdio_fbr_t;
-
-/* Maximum number of I/O funcs */
-#define SDIOD_MAX_IOFUNCS 7
-
-/* SDIO Device FBR Start Address */
-#define SDIOD_FBR_STARTADDR 0x100
-
-/* SDIO Device FBR Size */
-#define SDIOD_FBR_SIZE 0x100
-
-/* Macro to calculate FBR register base */
-#define SDIOD_FBR_BASE(n) ((n) * 0x100)
-
-/* Function register offsets */
-#define SDIOD_FBR_DEVCTR 0x00 /* basic info for function */
-#define SDIOD_FBR_EXT_DEV 0x01 /* extended I/O device code */
-#define SDIOD_FBR_PWR_SEL 0x02 /* power selection bits */
-
-/* SDIO Function CIS ptr offset */
-#define SDIOD_FBR_CISPTR_0 0x09
-#define SDIOD_FBR_CISPTR_1 0x0A
-#define SDIOD_FBR_CISPTR_2 0x0B
-
-/* Code Storage Area pointer */
-#define SDIOD_FBR_CSA_ADDR_0 0x0C
-#define SDIOD_FBR_CSA_ADDR_1 0x0D
-#define SDIOD_FBR_CSA_ADDR_2 0x0E
-#define SDIOD_FBR_CSA_DATA 0x0F
-
-/* SDIO Function I/O Block Size */
-#define SDIOD_FBR_BLKSIZE_0 0x10
-#define SDIOD_FBR_BLKSIZE_1 0x11
-
-/* devctr */
-#define SDIOD_FBR_DEVCTR_DIC 0x0f /* device interface code */
-#define SDIOD_FBR_DECVTR_CSA 0x40 /* CSA support flag */
-#define SDIOD_FBR_DEVCTR_CSA_EN 0x80 /* CSA enabled */
-/* interface codes */
-#define SDIOD_DIC_NONE 0 /* SDIO standard interface is not supported */
-#define SDIOD_DIC_UART 1
-#define SDIOD_DIC_BLUETOOTH_A 2
-#define SDIOD_DIC_BLUETOOTH_B 3
-#define SDIOD_DIC_GPS 4
-#define SDIOD_DIC_CAMERA 5
-#define SDIOD_DIC_PHS 6
-#define SDIOD_DIC_WLAN 7
-#define SDIOD_DIC_EXT 0xf /* extended device interface, read ext_dev register */
-
-/* pwr_sel */
-#define SDIOD_PWR_SEL_SPS 0x01 /* supports power selection */
-#define SDIOD_PWR_SEL_EPS 0x02 /* enable power selection (low-current mode) */
-
-/* misc defines */
-#define SDIO_FUNC_0 0
-#define SDIO_FUNC_1 1
-#define SDIO_FUNC_2 2
-#define SDIO_FUNC_3 3
-#define SDIO_FUNC_4 4
-#define SDIO_FUNC_5 5
-#define SDIO_FUNC_6 6
-#define SDIO_FUNC_7 7
-
-#define SD_CARD_TYPE_UNKNOWN 0 /* bad type or unrecognized */
-#define SD_CARD_TYPE_IO 1 /* IO only card */
-#define SD_CARD_TYPE_MEMORY 2 /* memory only card */
-#define SD_CARD_TYPE_COMBO 3 /* IO and memory combo card */
-
-#define SDIO_MAX_BLOCK_SIZE 2048 /* maximum block size for block mode operation */
-#define SDIO_MIN_BLOCK_SIZE 1 /* minimum block size for block mode operation */
-
-/* Card registers: status bit position */
-#define CARDREG_STATUS_BIT_OUTOFRANGE 31
-#define CARDREG_STATUS_BIT_COMCRCERROR 23
-#define CARDREG_STATUS_BIT_ILLEGALCOMMAND 22
-#define CARDREG_STATUS_BIT_ERROR 19
-#define CARDREG_STATUS_BIT_IOCURRENTSTATE3 12
-#define CARDREG_STATUS_BIT_IOCURRENTSTATE2 11
-#define CARDREG_STATUS_BIT_IOCURRENTSTATE1 10
-#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9
-#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4
-
-#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */
-#define SD_CMD_SEND_OPCOND 1
-#define SD_CMD_MMC_SET_RCA 3
-#define SD_CMD_IO_SEND_OP_COND 5 /* mandatory for SDIO */
-#define SD_CMD_SELECT_DESELECT_CARD 7
-#define SD_CMD_SEND_CSD 9
-#define SD_CMD_SEND_CID 10
-#define SD_CMD_STOP_TRANSMISSION 12
-#define SD_CMD_SEND_STATUS 13
-#define SD_CMD_GO_INACTIVE_STATE 15
-#define SD_CMD_SET_BLOCKLEN 16
-#define SD_CMD_READ_SINGLE_BLOCK 17
-#define SD_CMD_READ_MULTIPLE_BLOCK 18
-#define SD_CMD_WRITE_BLOCK 24
-#define SD_CMD_WRITE_MULTIPLE_BLOCK 25
-#define SD_CMD_PROGRAM_CSD 27
-#define SD_CMD_SET_WRITE_PROT 28
-#define SD_CMD_CLR_WRITE_PROT 29
-#define SD_CMD_SEND_WRITE_PROT 30
-#define SD_CMD_ERASE_WR_BLK_START 32
-#define SD_CMD_ERASE_WR_BLK_END 33
-#define SD_CMD_ERASE 38
-#define SD_CMD_LOCK_UNLOCK 42
-#define SD_CMD_IO_RW_DIRECT 52 /* mandatory for SDIO */
-#define SD_CMD_IO_RW_EXTENDED 53 /* mandatory for SDIO */
-#define SD_CMD_APP_CMD 55
-#define SD_CMD_GEN_CMD 56
-#define SD_CMD_READ_OCR 58
-#define SD_CMD_CRC_ON_OFF 59 /* mandatory for SDIO */
-#define SD_ACMD_SD_STATUS 13
-#define SD_ACMD_SEND_NUM_WR_BLOCKS 22
-#define SD_ACMD_SET_WR_BLOCK_ERASE_CNT 23
-#define SD_ACMD_SD_SEND_OP_COND 41
-#define SD_ACMD_SET_CLR_CARD_DETECT 42
-#define SD_ACMD_SEND_SCR 51
-
-/* argument for SD_CMD_IO_RW_DIRECT and SD_CMD_IO_RW_EXTENDED */
-#define SD_IO_OP_READ 0 /* Read_Write: Read */
-#define SD_IO_OP_WRITE 1 /* Read_Write: Write */
-#define SD_IO_RW_NORMAL 0 /* no RAW */
-#define SD_IO_RW_RAW 1 /* RAW */
-#define SD_IO_BYTE_MODE 0 /* Byte Mode */
-#define SD_IO_BLOCK_MODE 1 /* BlockMode */
-#define SD_IO_FIXED_ADDRESS 0 /* fix Address */
-#define SD_IO_INCREMENT_ADDRESS 1 /* IncrementAddress */
-
-/* build SD_CMD_IO_RW_DIRECT Argument */
-#define SDIO_IO_RW_DIRECT_ARG(rw, raw, func, addr, data) \
- ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((raw) & 1) << 27) | \
- (((addr) & 0x1FFFF) << 9) | ((data) & 0xFF))
-
-/* build SD_CMD_IO_RW_EXTENDED Argument */
-#define SDIO_IO_RW_EXTENDED_ARG(rw, blk, func, addr, inc_addr, count) \
- ((((rw) & 1) << 31) | (((func) & 0x7) << 28) | (((blk) & 1) << 27) | \
- (((inc_addr) & 1) << 26) | (((addr) & 0x1FFFF) << 9) | ((count) & 0x1FF))
-
-/* SDIO response parameters */
-#define SD_RSP_NO_NONE 0
-#define SD_RSP_NO_1 1
-#define SD_RSP_NO_2 2
-#define SD_RSP_NO_3 3
-#define SD_RSP_NO_4 4
-#define SD_RSP_NO_5 5
-#define SD_RSP_NO_6 6
-
- /* Modified R6 response (to CMD3) */
-#define SD_RSP_MR6_COM_CRC_ERROR 0x8000
-#define SD_RSP_MR6_ILLEGAL_COMMAND 0x4000
-#define SD_RSP_MR6_ERROR 0x2000
-
- /* Modified R1 in R4 Response (to CMD5) */
-#define SD_RSP_MR1_SBIT 0x80
-#define SD_RSP_MR1_PARAMETER_ERROR 0x40
-#define SD_RSP_MR1_RFU5 0x20
-#define SD_RSP_MR1_FUNC_NUM_ERROR 0x10
-#define SD_RSP_MR1_COM_CRC_ERROR 0x08
-#define SD_RSP_MR1_ILLEGAL_COMMAND 0x04
-#define SD_RSP_MR1_RFU1 0x02
-#define SD_RSP_MR1_IDLE_STATE 0x01
-
- /* R5 response (to CMD52 and CMD53) */
-#define SD_RSP_R5_COM_CRC_ERROR 0x80
-#define SD_RSP_R5_ILLEGAL_COMMAND 0x40
-#define SD_RSP_R5_IO_CURRENTSTATE1 0x20
-#define SD_RSP_R5_IO_CURRENTSTATE0 0x10
-#define SD_RSP_R5_ERROR 0x08
-#define SD_RSP_R5_RFU 0x04
-#define SD_RSP_R5_FUNC_NUM_ERROR 0x02
-#define SD_RSP_R5_OUT_OF_RANGE 0x01
-
-#define SD_RSP_R5_ERRBITS 0xCB
-
-/* ------------------------------------------------
- * SDIO Commands and responses
- *
- * I/O only commands are:
- * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
- * ------------------------------------------------
- */
-
-/* SDIO Commands */
-#define SDIOH_CMD_0 0
-#define SDIOH_CMD_3 3
-#define SDIOH_CMD_5 5
-#define SDIOH_CMD_7 7
-#define SDIOH_CMD_15 15
-#define SDIOH_CMD_52 52
-#define SDIOH_CMD_53 53
-#define SDIOH_CMD_59 59
-
-/* SDIO Command Responses */
-#define SDIOH_RSP_NONE 0
-#define SDIOH_RSP_R1 1
-#define SDIOH_RSP_R2 2
-#define SDIOH_RSP_R3 3
-#define SDIOH_RSP_R4 4
-#define SDIOH_RSP_R5 5
-#define SDIOH_RSP_R6 6
-
-/*
- * SDIO Response Error flags
- */
-#define SDIOH_RSP5_ERROR_FLAGS 0xCB
-
-/* ------------------------------------------------
- * SDIO Command structures. I/O only commands are:
- *
- * CMD0, CMD3, CMD5, CMD7, CMD15, CMD52, CMD53
- * ------------------------------------------------
- */
-
-#define CMD5_OCR_M BITFIELD_MASK(24)
-#define CMD5_OCR_S 0
-
-#define CMD7_RCA_M BITFIELD_MASK(16)
-#define CMD7_RCA_S 16
-
-#define CMD_15_RCA_M BITFIELD_MASK(16)
-#define CMD_15_RCA_S 16
-
-#define CMD52_DATA_M BITFIELD_MASK(8) /* Bits [7:0] - Write Data/Stuff bits of CMD52
- */
-#define CMD52_DATA_S 0
-#define CMD52_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
-#define CMD52_REG_ADDR_S 9
-#define CMD52_RAW_M BITFIELD_MASK(1) /* Bit 27 - Read after Write flag */
-#define CMD52_RAW_S 27
-#define CMD52_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
-#define CMD52_FUNCTION_S 28
-#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
-#define CMD52_RW_FLAG_S 31
-
-#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */
-#define CMD53_BYTE_BLK_CNT_S 0
-#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
-#define CMD53_REG_ADDR_S 9
-#define CMD53_OP_CODE_M BITFIELD_MASK(1) /* Bit 26 - R/W Operation Code */
-#define CMD53_OP_CODE_S 26
-#define CMD53_BLK_MODE_M BITFIELD_MASK(1) /* Bit 27 - Block Mode */
-#define CMD53_BLK_MODE_S 27
-#define CMD53_FUNCTION_M BITFIELD_MASK(3) /* Bits [30:28] - Function number */
-#define CMD53_FUNCTION_S 28
-#define CMD53_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
-#define CMD53_RW_FLAG_S 31
-
-/* ------------------------------------------------------
- * SDIO Command Response structures for SD1 and SD4 modes
- * -----------------------------------------------------
- */
-#define RSP4_IO_OCR_M BITFIELD_MASK(24) /* Bits [23:0] - Card's OCR Bits [23:0] */
-#define RSP4_IO_OCR_S 0
-#define RSP4_STUFF_M BITFIELD_MASK(3) /* Bits [26:24] - Stuff bits */
-#define RSP4_STUFF_S 24
-#define RSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 27 - Memory present */
-#define RSP4_MEM_PRESENT_S 27
-#define RSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [30:28] - Number of I/O funcs */
-#define RSP4_NUM_FUNCS_S 28
-#define RSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 31 - SDIO card ready */
-#define RSP4_CARD_READY_S 31
-
-#define RSP6_STATUS_M BITFIELD_MASK(16) /* Bits [15:0] - Card status bits [19,22,23,12:0]
- */
-#define RSP6_STATUS_S 0
-#define RSP6_IO_RCA_M BITFIELD_MASK(16) /* Bits [31:16] - RCA bits[31-16] */
-#define RSP6_IO_RCA_S 16
-
-#define RSP1_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error */
-#define RSP1_AKE_SEQ_ERROR_S 3
-#define RSP1_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
-#define RSP1_APP_CMD_S 5
-#define RSP1_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data (buff empty) */
-#define RSP1_READY_FOR_DATA_S 8
-#define RSP1_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - State of card
- * when Cmd was received
- */
-#define RSP1_CURR_STATE_S 9
-#define RSP1_EARSE_RESET_M BITFIELD_MASK(1) /* Bit 13 - Erase seq cleared */
-#define RSP1_EARSE_RESET_S 13
-#define RSP1_CARD_ECC_DISABLE_M BITFIELD_MASK(1) /* Bit 14 - Card ECC disabled */
-#define RSP1_CARD_ECC_DISABLE_S 14
-#define RSP1_WP_ERASE_SKIP_M BITFIELD_MASK(1) /* Bit 15 - Partial blocks erased due to W/P */
-#define RSP1_WP_ERASE_SKIP_S 15
-#define RSP1_CID_CSD_OVERW_M BITFIELD_MASK(1) /* Bit 16 - Illegal write to CID or R/O bits
- * of CSD
- */
-#define RSP1_CID_CSD_OVERW_S 16
-#define RSP1_ERROR_M BITFIELD_MASK(1) /* Bit 19 - General/Unknown error */
-#define RSP1_ERROR_S 19
-#define RSP1_CC_ERROR_M BITFIELD_MASK(1) /* Bit 20 - Internal Card Control error */
-#define RSP1_CC_ERROR_S 20
-#define RSP1_CARD_ECC_FAILED_M BITFIELD_MASK(1) /* Bit 21 - Card internal ECC failed
- * to correct data
- */
-#define RSP1_CARD_ECC_FAILED_S 21
-#define RSP1_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 22 - Cmd not legal for the card state */
-#define RSP1_ILLEGAL_CMD_S 22
-#define RSP1_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 23 - CRC check of previous command failed
- */
-#define RSP1_COM_CRC_ERROR_S 23
-#define RSP1_LOCK_UNLOCK_FAIL_M BITFIELD_MASK(1) /* Bit 24 - Card lock-unlock Cmd Seq error */
-#define RSP1_LOCK_UNLOCK_FAIL_S 24
-#define RSP1_CARD_LOCKED_M BITFIELD_MASK(1) /* Bit 25 - Card locked by the host */
-#define RSP1_CARD_LOCKED_S 25
-#define RSP1_WP_VIOLATION_M BITFIELD_MASK(1) /* Bit 26 - Attempt to program
- * write-protected blocks
- */
-#define RSP1_WP_VIOLATION_S 26
-#define RSP1_ERASE_PARAM_M BITFIELD_MASK(1) /* Bit 27 - Invalid erase blocks */
-#define RSP1_ERASE_PARAM_S 27
-#define RSP1_ERASE_SEQ_ERR_M BITFIELD_MASK(1) /* Bit 28 - Erase Cmd seq error */
-#define RSP1_ERASE_SEQ_ERR_S 28
-#define RSP1_BLK_LEN_ERR_M BITFIELD_MASK(1) /* Bit 29 - Block length error */
-#define RSP1_BLK_LEN_ERR_S 29
-#define RSP1_ADDR_ERR_M BITFIELD_MASK(1) /* Bit 30 - Misaligned address */
-#define RSP1_ADDR_ERR_S 30
-#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */
-#define RSP1_OUT_OF_RANGE_S 31
-
-#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */
-#define RSP5_DATA_S 0
-#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */
-#define RSP5_FLAGS_S 8
-#define RSP5_STUFF_M BITFIELD_MASK(16) /* Bits [31:16] - Stuff bits */
-#define RSP5_STUFF_S 16
-
-/* ----------------------------------------------
- * SDIO Command Response structures for SPI mode
- * ----------------------------------------------
- */
-#define SPIRSP4_IO_OCR_M BITFIELD_MASK(16) /* Bits [15:0] - Card's OCR Bits [23:8] */
-#define SPIRSP4_IO_OCR_S 0
-#define SPIRSP4_STUFF_M BITFIELD_MASK(3) /* Bits [18:16] - Stuff bits */
-#define SPIRSP4_STUFF_S 16
-#define SPIRSP4_MEM_PRESENT_M BITFIELD_MASK(1) /* Bit 19 - Memory present */
-#define SPIRSP4_MEM_PRESENT_S 19
-#define SPIRSP4_NUM_FUNCS_M BITFIELD_MASK(3) /* Bits [22:20] - Number of I/O funcs */
-#define SPIRSP4_NUM_FUNCS_S 20
-#define SPIRSP4_CARD_READY_M BITFIELD_MASK(1) /* Bit 23 - SDIO card ready */
-#define SPIRSP4_CARD_READY_S 23
-#define SPIRSP4_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - idle state */
-#define SPIRSP4_IDLE_STATE_S 24
-#define SPIRSP4_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
-#define SPIRSP4_ILLEGAL_CMD_S 26
-#define SPIRSP4_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
-#define SPIRSP4_COM_CRC_ERROR_S 27
-#define SPIRSP4_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
- */
-#define SPIRSP4_FUNC_NUM_ERROR_S 28
-#define SPIRSP4_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
-#define SPIRSP4_PARAM_ERROR_S 30
-#define SPIRSP4_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
-#define SPIRSP4_START_BIT_S 31
-
-#define SPIRSP5_DATA_M BITFIELD_MASK(8) /* Bits [23:16] - R/W Data */
-#define SPIRSP5_DATA_S 16
-#define SPIRSP5_IDLE_STATE_M BITFIELD_MASK(1) /* Bit 24 - Idle state */
-#define SPIRSP5_IDLE_STATE_S 24
-#define SPIRSP5_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 26 - Illegal Cmd error */
-#define SPIRSP5_ILLEGAL_CMD_S 26
-#define SPIRSP5_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 27 - COM CRC error */
-#define SPIRSP5_COM_CRC_ERROR_S 27
-#define SPIRSP5_FUNC_NUM_ERROR_M BITFIELD_MASK(1) /* Bit 28 - Function number error
- */
-#define SPIRSP5_FUNC_NUM_ERROR_S 28
-#define SPIRSP5_PARAM_ERROR_M BITFIELD_MASK(1) /* Bit 30 - Parameter Error Bit */
-#define SPIRSP5_PARAM_ERROR_S 30
-#define SPIRSP5_START_BIT_M BITFIELD_MASK(1) /* Bit 31 - Start Bit */
-#define SPIRSP5_START_BIT_S 31
-
-/* RSP6 card status format; Pg 68 Physical Layer spec v 1.10 */
-#define RSP6STAT_AKE_SEQ_ERROR_M BITFIELD_MASK(1) /* Bit 3 - Authentication seq error
- */
-#define RSP6STAT_AKE_SEQ_ERROR_S 3
-#define RSP6STAT_APP_CMD_M BITFIELD_MASK(1) /* Bit 5 - Card expects ACMD */
-#define RSP6STAT_APP_CMD_S 5
-#define RSP6STAT_READY_FOR_DATA_M BITFIELD_MASK(1) /* Bit 8 - Ready for data
- * (buff empty)
- */
-#define RSP6STAT_READY_FOR_DATA_S 8
-#define RSP6STAT_CURR_STATE_M BITFIELD_MASK(4) /* Bits [12:9] - Card state at
- * Cmd reception
- */
-#define RSP6STAT_CURR_STATE_S 9
-#define RSP6STAT_ERROR_M BITFIELD_MASK(1) /* Bit 13 - General/Unknown error Bit 19
- */
-#define RSP6STAT_ERROR_S 13
-#define RSP6STAT_ILLEGAL_CMD_M BITFIELD_MASK(1) /* Bit 14 - Illegal cmd for
- * card state Bit 22
- */
-#define RSP6STAT_ILLEGAL_CMD_S 14
-#define RSP6STAT_COM_CRC_ERROR_M BITFIELD_MASK(1) /* Bit 15 - CRC previous command
- * failed Bit 23
- */
-#define RSP6STAT_COM_CRC_ERROR_S 15
-
-#define SDIOH_XFER_TYPE_READ SD_IO_OP_READ
-#define SDIOH_XFER_TYPE_WRITE SD_IO_OP_WRITE
-
-#endif /* def BCMSDIO */
-#endif /* _SDIO_H */
diff --git a/drivers/staging/brcm80211/include/soc.h b/drivers/staging/brcm80211/include/soc.h
new file mode 100644
index 00000000000..6e5a705c493
--- /dev/null
+++ b/drivers/staging/brcm80211/include/soc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_SOC_H
+#define _BRCM_SOC_H
+
+#ifdef SI_ENUM_BASE_VARIABLE
+#define SI_ENUM_BASE (sii->pub.si_enum_base)
+#else
+#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
+#endif /* SI_ENUM_BASE_VARIABLE */
+
+/* core codes */
+#define NODEV_CORE_ID 0x700 /* Invalid coreid */
+#define CC_CORE_ID 0x800 /* chipcommon core */
+#define ILINE20_CORE_ID 0x801 /* iline20 core */
+#define SRAM_CORE_ID 0x802 /* sram core */
+#define SDRAM_CORE_ID 0x803 /* sdram core */
+#define PCI_CORE_ID 0x804 /* pci core */
+#define MIPS_CORE_ID 0x805 /* mips core */
+#define ENET_CORE_ID 0x806 /* enet mac core */
+#define CODEC_CORE_ID 0x807 /* v90 codec core */
+#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
+#define ADSL_CORE_ID 0x809 /* ADSL core */
+#define ILINE100_CORE_ID 0x80a /* iline100 core */
+#define IPSEC_CORE_ID 0x80b /* ipsec core */
+#define UTOPIA_CORE_ID 0x80c /* utopia core */
+#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
+#define SOCRAM_CORE_ID 0x80e /* internal memory core */
+#define MEMC_CORE_ID 0x80f /* memc sdram core */
+#define OFDM_CORE_ID 0x810 /* OFDM phy core */
+#define EXTIF_CORE_ID 0x811 /* external interface core */
+#define D11_CORE_ID 0x812 /* 802.11 MAC core */
+#define APHY_CORE_ID 0x813 /* 802.11a phy core */
+#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
+#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
+#define MIPS33_CORE_ID 0x816 /* mips3302 core */
+#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
+#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
+#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
+#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
+#define SDIOH_CORE_ID 0x81b /* sdio host core */
+#define ROBO_CORE_ID 0x81c /* roboswitch core */
+#define ATA100_CORE_ID 0x81d /* parallel ATA core */
+#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
+#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
+#define PCIE_CORE_ID 0x820 /* pci express core */
+#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
+#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
+#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
+#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
+#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
+#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
+#define PMU_CORE_ID 0x827 /* PMU core */
+#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
+#define SDIOD_CORE_ID 0x829 /* SDIO device core */
+#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
+#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
+#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
+#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
+#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
+#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
+#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
+#define SC_CORE_ID 0x831 /* shared common core */
+#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
+#define SPIH_CORE_ID 0x833 /* SPI host core */
+#define I2S_CORE_ID 0x834 /* I2S core */
+#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
+#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
+#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
+#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all
+ * unused address ranges
+ */
+
+/* Common core control flags */
+#define SICF_BIST_EN 0x8000
+#define SICF_PME_EN 0x4000
+#define SICF_CORE_BITS 0x3ffc
+#define SICF_FGC 0x0002
+#define SICF_CLOCK_EN 0x0001
+
+#endif /* _BRCM_SOC_H */
diff --git a/drivers/staging/brcm80211/include/wlioctl.h b/drivers/staging/brcm80211/include/wlioctl.h
deleted file mode 100644
index 2876bd9eff8..00000000000
--- a/drivers/staging/brcm80211/include/wlioctl.h
+++ /dev/null
@@ -1,1365 +0,0 @@
-/*
- * Copyright (c) 2010 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _wlioctl_h_
-#define _wlioctl_h_
-
-#include <linux/ieee80211.h>
-#ifdef BRCM_FULLMAC
-#include <proto/bcmeth.h>
-#endif
-#include <proto/bcmevent.h>
-#include <proto/802.11.h>
-#include <bcmwifi.h>
-
-#ifndef INTF_NAME_SIZ
-#define INTF_NAME_SIZ 16
-#endif
-
-#ifdef BRCM_FULLMAC
-
-#define WL_BSS_INFO_VERSION 108 /* current ver of wl_bss_info struct */
-
-/* BSS info structure
- * Applications MUST CHECK ie_offset field and length field to access IEs and
- * next bss_info structure in a vector (in wl_scan_results_t)
- */
-typedef struct wl_bss_info {
- u32 version; /* version field */
- u32 length; /* byte length of data in this record,
- * starting at version and including IEs
- */
- u8 BSSID[ETH_ALEN];
- u16 beacon_period; /* units are Kusec */
- u16 capability; /* Capability information */
- u8 SSID_len;
- u8 SSID[32];
- struct {
- uint count; /* # rates in this set */
- u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */
- } rateset; /* supported rates */
- chanspec_t chanspec; /* chanspec for bss */
- u16 atim_window; /* units are Kusec */
- u8 dtim_period; /* DTIM period */
- s16 RSSI; /* receive signal strength (in dBm) */
- s8 phy_noise; /* noise (in dBm) */
-
- u8 n_cap; /* BSS is 802.11N Capable */
- u32 nbss_cap; /* 802.11N BSS Capabilities (based on HT_CAP_*) */
- u8 ctl_ch; /* 802.11N BSS control channel number */
- u32 reserved32[1]; /* Reserved for expansion of BSS properties */
- u8 flags; /* flags */
- u8 reserved[3]; /* Reserved for expansion of BSS properties */
- u8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
-
- u16 ie_offset; /* offset at which IEs start, from beginning */
- u32 ie_length; /* byte length of Information Elements */
- s16 SNR; /* average SNR of during frame reception */
- /* Add new fields here */
- /* variable length Information Elements */
-} wl_bss_info_t;
-#endif /* BRCM_FULLMAC */
-
-typedef struct wlc_ssid {
- u32 SSID_len;
- unsigned char SSID[32];
-} wlc_ssid_t;
-
-#ifdef BRCM_FULLMAC
-typedef struct chan_scandata {
- u8 txpower;
- u8 pad;
- chanspec_t channel; /* Channel num, bw, ctrl_sb and band */
- u32 channel_mintime;
- u32 channel_maxtime;
-} chan_scandata_t;
-
-typedef enum wl_scan_type {
- EXTDSCAN_FOREGROUND_SCAN,
- EXTDSCAN_BACKGROUND_SCAN,
- EXTDSCAN_FORCEDBACKGROUND_SCAN
-} wl_scan_type_t;
-
-#define WLC_EXTDSCAN_MAX_SSID 5
-
-#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */
-#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */
-#define WL_BSS_FLAGS_RSSI_ONCHANNEL 0x04 /* rssi info was received on channel (vs offchannel) */
-
-typedef struct wl_extdscan_params {
- s8 nprobes; /* 0, passive, otherwise active */
- s8 split_scan; /* split scan */
- s8 band; /* band */
- s8 pad;
- wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */
- u32 tx_rate; /* in 500ksec units */
- wl_scan_type_t scan_type; /* enum */
- s32 channel_num;
- chan_scandata_t channel_list[1]; /* list of chandata structs */
-} wl_extdscan_params_t;
-
-#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
-
-#define WL_BSSTYPE_INFRA 1
-#define WL_BSSTYPE_INDEP 0
-#define WL_BSSTYPE_ANY 2
-
-/* Bitmask for scan_type */
-#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */
-#define WL_SCANFLAGS_RESERVED 0x02 /* Reserved */
-#define WL_SCANFLAGS_PROHIBITED 0x04 /* allow scanning prohibited channels */
-
-typedef struct wl_scan_params {
- wlc_ssid_t ssid; /* default: {0, ""} */
- u8 bssid[ETH_ALEN]; /* default: bcast */
- s8 bss_type; /* default: any,
- * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
- */
- u8 scan_type; /* flags, 0 use default */
- s32 nprobes; /* -1 use default, number of probes per channel */
- s32 active_time; /* -1 use default, dwell time per channel for
- * active scanning
- */
- s32 passive_time; /* -1 use default, dwell time per channel
- * for passive scanning
- */
- s32 home_time; /* -1 use default, dwell time for the home channel
- * between channel scans
- */
- s32 channel_num; /* count of channels and ssids that follow
- *
- * low half is count of channels in channel_list, 0
- * means default (use all available channels)
- *
- * high half is entries in wlc_ssid_t array that
- * follows channel_list, aligned for s32 (4 bytes)
- * meaning an odd channel count implies a 2-byte pad
- * between end of channel_list and first ssid
- *
- * if ssid count is zero, single ssid in the fixed
- * parameter portion is assumed, otherwise ssid in
- * the fixed portion is ignored
- */
- u16 channel_list[1]; /* list of chanspecs */
-} wl_scan_params_t;
-
-/* size of wl_scan_params not including variable length array */
-#define WL_SCAN_PARAMS_FIXED_SIZE 64
-
-/* masks for channel and ssid count */
-#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
-#define WL_SCAN_PARAMS_NSSID_SHIFT 16
-
-#define WL_SCAN_ACTION_START 1
-#define WL_SCAN_ACTION_CONTINUE 2
-#define WL_SCAN_ACTION_ABORT 3
-
-#define ISCAN_REQ_VERSION 1
-
-/* incremental scan struct */
-typedef struct wl_iscan_params {
- u32 version;
- u16 action;
- u16 scan_duration;
- wl_scan_params_t params;
-} wl_iscan_params_t;
-
-/* 3 fields + size of wl_scan_params, not including variable length array */
-#define WL_ISCAN_PARAMS_FIXED_SIZE (offsetof(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
-
-typedef struct wl_scan_results {
- u32 buflen;
- u32 version;
- u32 count;
- wl_bss_info_t bss_info[1];
-} wl_scan_results_t;
-
-/* size of wl_scan_results not including variable length array */
-#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
-
-/* wl_iscan_results status values */
-#define WL_SCAN_RESULTS_SUCCESS 0
-#define WL_SCAN_RESULTS_PARTIAL 1
-#define WL_SCAN_RESULTS_PENDING 2
-#define WL_SCAN_RESULTS_ABORTED 3
-#define WL_SCAN_RESULTS_NO_MEM 4
-
-#define ESCAN_REQ_VERSION 1
-
-typedef struct wl_escan_params {
- u32 version;
- u16 action;
- u16 sync_id;
- wl_scan_params_t params;
-} wl_escan_params_t;
-
-#define WL_ESCAN_PARAMS_FIXED_SIZE (offsetof(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
-
-typedef struct wl_escan_result {
- u32 buflen;
- u32 version;
- u16 sync_id;
- u16 bss_count;
- wl_bss_info_t bss_info[1];
-} wl_escan_result_t;
-
-#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
-
-/* incremental scan results struct */
-typedef struct wl_iscan_results {
- u32 status;
- wl_scan_results_t results;
-} wl_iscan_results_t;
-
-/* size of wl_iscan_results not including variable length array */
-#define WL_ISCAN_RESULTS_FIXED_SIZE \
- (WL_SCAN_RESULTS_FIXED_SIZE + offsetof(wl_iscan_results_t, results))
-
-typedef struct wl_probe_params {
- wlc_ssid_t ssid;
- u8 bssid[ETH_ALEN];
- u8 mac[ETH_ALEN];
-} wl_probe_params_t;
-#endif /* BRCM_FULLMAC */
-
-#define WL_NUMRATES 16 /* max # of rates in a rateset */
-typedef struct wl_rateset {
- u32 count; /* # rates in this set */
- u8 rates[WL_NUMRATES]; /* rates in 500kbps units w/hi bit set if basic */
-} wl_rateset_t;
-
-#ifdef BRCM_FULLMAC
-typedef struct wl_rateset_args {
- u32 count; /* # rates in this set */
- u8 rates[WL_NUMRATES]; /* rates in 500kbps units w/hi bit set if basic */
- u8 mcs[MCSSET_LEN]; /* supported mcs index bit map */
-} wl_rateset_args_t;
-
-/* u32 list */
-typedef struct wl_u32_list {
- /* in - # of elements, out - # of entries */
- u32 count;
- /* variable length u32 list */
- u32 element[1];
-} wl_u32_list_t;
-
-/* used for association with a specific BSSID and chanspec list */
-typedef struct wl_assoc_params {
- u8 bssid[ETH_ALEN]; /* 00:00:00:00:00:00: broadcast scan */
- u16 bssid_cnt;
- s32 chanspec_num; /* 0: all available channels,
- * otherwise count of chanspecs in chanspec_list
- */
- chanspec_t chanspec_list[1]; /* list of chanspecs */
-} wl_assoc_params_t;
-#define WL_ASSOC_PARAMS_FIXED_SIZE (sizeof(wl_assoc_params_t) - sizeof(chanspec_t))
-
-/* used for reassociation/roam to a specific BSSID and channel */
-typedef wl_assoc_params_t wl_reassoc_params_t;
-#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE
-
-/* used for join with or without a specific bssid and channel list */
-typedef struct wl_join_params {
- wlc_ssid_t ssid;
- wl_assoc_params_t params; /* optional field, but it must include the fixed portion
- * of the wl_assoc_params_t struct when it does present.
- */
-} wl_join_params_t;
-#define WL_JOIN_PARAMS_FIXED_SIZE (sizeof(wl_join_params_t) - sizeof(chanspec_t))
-
-#endif /* BRCM_FULLMAC */
-
-/* defines used by the nrate iovar */
-#define NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
-#define NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
-#define NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
-#define NRATE_STF_SHIFT 8 /* stf mode shift */
-#define NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
-#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
-#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
-#define NRATE_SGI_SHIFT 23 /* sgi mode */
-#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
-#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
-
-#define NRATE_STF_SISO 0 /* stf mode SISO */
-#define NRATE_STF_CDD 1 /* stf mode CDD */
-#define NRATE_STF_STBC 2 /* stf mode STBC */
-#define NRATE_STF_SDM 3 /* stf mode SDM */
-
-#define ANTENNA_NUM_1 1 /* total number of antennas to be used */
-#define ANTENNA_NUM_2 2
-#define ANTENNA_NUM_3 3
-#define ANTENNA_NUM_4 4
-
-#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */
-#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */
-#define ANT_SELCFG_MAX 4 /* max number of antenna configurations */
-#define ANT_SELCFG_TX_UNICAST 0 /* unicast tx antenna configuration */
-#define ANT_SELCFG_RX_UNICAST 1 /* unicast rx antenna configuration */
-#define ANT_SELCFG_TX_DEF 2 /* default tx antenna configuration */
-#define ANT_SELCFG_RX_DEF 3 /* default rx antenna configuration */
-
-#define MAX_STREAMS_SUPPORTED 4 /* max number of streams supported */
-
-typedef struct {
- u8 ant_config[ANT_SELCFG_MAX]; /* antenna configuration */
- u8 num_antcfg; /* number of available antenna configurations */
-} wlc_antselcfg_t;
-
-#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */
-
-#ifdef BRCM_FULLMAC
-#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */
-#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */
-
-#define IBSS_MED 15 /* Mediom in-bss congestion percentage */
-#define IBSS_HI 25 /* Hi in-bss congestion percentage */
-#define OBSS_MED 12
-#define OBSS_HI 25
-#define INTERFER_MED 5
-#define INTERFER_HI 10
-
-#define CCA_FLAG_2G_ONLY 0x01 /* Return a channel from 2.4 Ghz band */
-#define CCA_FLAG_5G_ONLY 0x02 /* Return a channel from 2.4 Ghz band */
-#define CCA_FLAG_IGNORE_DURATION 0x04 /* Ignore dwell time for each channel */
-#define CCA_FLAGS_PREFER_1_6_11 0x10
-#define CCA_FLAG_IGNORE_INTERFER 0x20 /* do not exlude channel based on interfer level */
-
-#define CCA_ERRNO_BAND 1 /* After filtering for band pref, no choices left */
-#define CCA_ERRNO_DURATION 2 /* After filtering for duration, no choices left */
-#define CCA_ERRNO_PREF_CHAN 3 /* After filtering for chan pref, no choices left */
-#define CCA_ERRNO_INTERFER 4 /* After filtering for interference, no choices left */
-#define CCA_ERRNO_TOO_FEW 5 /* Only 1 channel was input */
-
-typedef struct {
- u32 duration; /* millisecs spent sampling this channel */
- u32 congest_ibss; /* millisecs in our bss (presumably this traffic will */
- /* move if cur bss moves channels) */
- u32 congest_obss; /* traffic not in our bss */
- u32 interference; /* millisecs detecting a non 802.11 interferer. */
- u32 timestamp; /* second timestamp */
-} cca_congest_t;
-
-typedef struct {
- chanspec_t chanspec; /* Which channel? */
- u8 num_secs; /* How many secs worth of data */
- cca_congest_t secs[1]; /* Data */
-} cca_congest_channel_req_t;
-
-#endif /* BRCM_FULLMAC */
-
-#define WLC_CNTRY_BUF_SZ 4 /* Country string is 3 bytes + NUL */
-
-#ifdef BRCM_FULLMAC
-typedef struct wl_country {
- char country_abbrev[WLC_CNTRY_BUF_SZ]; /* nul-terminated country code used in
- * the Country IE
- */
- s32 rev; /* revision specifier for ccode
- * on set, -1 indicates unspecified.
- * on get, rev >= 0
- */
- char ccode[WLC_CNTRY_BUF_SZ]; /* nul-terminated built-in country code.
- * variable length, but fixed size in
- * struct allows simple allocation for
- * expected country strings <= 3 chars.
- */
-} wl_country_t;
-
-typedef struct wl_channels_in_country {
- u32 buflen;
- u32 band;
- char country_abbrev[WLC_CNTRY_BUF_SZ];
- u32 count;
- u32 channel[1];
-} wl_channels_in_country_t;
-
-typedef struct wl_country_list {
- u32 buflen;
- u32 band_set;
- u32 band;
- u32 count;
- char country_abbrev[1];
-} wl_country_list_t;
-
-#define WL_NUM_RPI_BINS 8
-#define WL_RM_TYPE_BASIC 1
-#define WL_RM_TYPE_CCA 2
-#define WL_RM_TYPE_RPI 3
-
-#define WL_RM_FLAG_PARALLEL (1<<0)
-
-#define WL_RM_FLAG_LATE (1<<1)
-#define WL_RM_FLAG_INCAPABLE (1<<2)
-#define WL_RM_FLAG_REFUSED (1<<3)
-
-typedef struct wl_rm_req_elt {
- s8 type;
- s8 flags;
- chanspec_t chanspec;
- u32 token; /* token for this measurement */
- u32 tsf_h; /* TSF high 32-bits of Measurement start time */
- u32 tsf_l; /* TSF low 32-bits */
- u32 dur; /* TUs */
-} wl_rm_req_elt_t;
-
-typedef struct wl_rm_req {
- u32 token; /* overall measurement set token */
- u32 count; /* number of measurement requests */
- void *cb; /* completion callback function: may be NULL */
- void *cb_arg; /* arg to completion callback function */
- wl_rm_req_elt_t req[1]; /* variable length block of requests */
-} wl_rm_req_t;
-#define WL_RM_REQ_FIXED_LEN offsetof(wl_rm_req_t, req)
-
-typedef struct wl_rm_rep_elt {
- s8 type;
- s8 flags;
- chanspec_t chanspec;
- u32 token; /* token for this measurement */
- u32 tsf_h; /* TSF high 32-bits of Measurement start time */
- u32 tsf_l; /* TSF low 32-bits */
- u32 dur; /* TUs */
- u32 len; /* byte length of data block */
- u8 data[1]; /* variable length data block */
-} wl_rm_rep_elt_t;
-#define WL_RM_REP_ELT_FIXED_LEN 24 /* length excluding data block */
-
-#define WL_RPI_REP_BIN_NUM 8
-typedef struct wl_rm_rpi_rep {
- u8 rpi[WL_RPI_REP_BIN_NUM];
- s8 rpi_max[WL_RPI_REP_BIN_NUM];
-} wl_rm_rpi_rep_t;
-
-typedef struct wl_rm_rep {
- u32 token; /* overall measurement set token */
- u32 len; /* length of measurement report block */
- wl_rm_rep_elt_t rep[1]; /* variable length block of reports */
-} wl_rm_rep_t;
-#define WL_RM_REP_FIXED_LEN 8
-#endif /* BRCM_FULLMAC */
-
-/* Enumerate crypto algorithms */
-#define CRYPTO_ALGO_OFF 0
-#define CRYPTO_ALGO_WEP1 1
-#define CRYPTO_ALGO_TKIP 2
-#define CRYPTO_ALGO_WEP128 3
-#define CRYPTO_ALGO_AES_CCM 4
-#define CRYPTO_ALGO_AES_RESERVED1 5
-#define CRYPTO_ALGO_AES_RESERVED2 6
-#define CRYPTO_ALGO_NALG 7
-
-#define WSEC_GEN_MIC_ERROR 0x0001
-#define WSEC_GEN_REPLAY 0x0002
-#define WSEC_GEN_ICV_ERROR 0x0004
-
-#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */
-#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */
-#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */
-#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */
-#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */
-
-typedef struct wl_wsec_key {
- u32 index; /* key index */
- u32 len; /* key length */
- u8 data[WLAN_MAX_KEY_LEN]; /* key data */
- u32 pad_1[18];
- u32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */
- u32 flags; /* misc flags */
- u32 pad_2[2];
- int pad_3;
- int iv_initialized; /* has IV been initialized already? */
- int pad_4;
- /* Rx IV */
- struct {
- u32 hi; /* upper 32 bits of IV */
- u16 lo; /* lower 16 bits of IV */
- } rxiv;
- u32 pad_5[2];
- u8 ea[ETH_ALEN]; /* per station */
-} wl_wsec_key_t;
-
-#define WSEC_MIN_PSK_LEN 8
-#define WSEC_MAX_PSK_LEN 64
-
-/* Flag for key material needing passhash'ing */
-#define WSEC_PASSPHRASE (1<<0)
-
-/* receptacle for WLC_SET_WSEC_PMK parameter */
-typedef struct {
- unsigned short key_len; /* octets in key material */
- unsigned short flags; /* key handling qualification */
- u8 key[WSEC_MAX_PSK_LEN]; /* PMK material */
-} wsec_pmk_t;
-
-/* wireless security bitvec */
-#define WEP_ENABLED 0x0001
-#define TKIP_ENABLED 0x0002
-#define AES_ENABLED 0x0004
-#define WSEC_SWFLAG 0x0008
-#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */
-
-/* WPA authentication mode bitvec */
-#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
-#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
-#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
-#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
-#define WPA_AUTH_RESERVED1 0x0008
-#define WPA_AUTH_RESERVED2 0x0010
- /* #define WPA_AUTH_8021X 0x0020 *//* 802.1x, reserved */
-#define WPA2_AUTH_RESERVED1 0x0020
-#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
-#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
-#define WPA2_AUTH_RESERVED3 0x0200
-#define WPA2_AUTH_RESERVED4 0x0400
-#define WPA2_AUTH_RESERVED5 0x0800
-
-/* pmkid */
-#define MAXPMKID 16
-
-typedef struct _pmkid {
- u8 BSSID[ETH_ALEN];
- u8 PMKID[WLAN_PMKID_LEN];
-} pmkid_t;
-
-typedef struct _pmkid_list {
- u32 npmkid;
- pmkid_t pmkid[1];
-} pmkid_list_t;
-
-typedef struct _pmkid_cand {
- u8 BSSID[ETH_ALEN];
- u8 preauth;
-} pmkid_cand_t;
-
-typedef struct _pmkid_cand_list {
- u32 npmkid_cand;
- pmkid_cand_t pmkid_cand[1];
-} pmkid_cand_list_t;
-
-typedef struct wl_led_info {
- u32 index; /* led index */
- u32 behavior;
- u8 activehi;
-} wl_led_info_t;
-
-/* R_REG and W_REG struct passed through ioctl */
-typedef struct {
- u32 byteoff; /* byte offset of the field in d11regs_t */
- u32 val; /* read/write value of the field */
- u32 size; /* sizeof the field */
- uint band; /* band (optional) */
-} rw_reg_t;
-
-
-#ifdef BRCM_FULLMAC
-/* Used to get specific STA parameters */
-typedef struct {
- u32 val;
- u8 ea[ETH_ALEN];
-} scb_val_t;
-#endif /* BRCM_FULLMAC */
-
-/* channel encoding */
-typedef struct channel_info {
- int hw_channel;
- int target_channel;
- int scan_channel;
-} channel_info_t;
-
-/* For ioctls that take a list of MAC addresses */
-struct maclist {
- uint count; /* number of MAC addresses */
- u8 ea[1][ETH_ALEN]; /* variable length array of MAC addresses */
-};
-
-#ifdef BRCM_FULLMAC
-/* Linux network driver ioctl encoding */
-typedef struct wl_ioctl {
- uint cmd; /* common ioctl definition */
- void *buf; /* pointer to user buffer */
- uint len; /* length of user buffer */
- u8 set; /* get or set request (optional) */
- uint used; /* bytes read or written (optional) */
- uint needed; /* bytes needed (optional) */
-} wl_ioctl_t;
-#endif /* BRCM_FULLMAC */
-
-
-/*
- * Structure for passing hardware and software
- * revision info up from the driver.
- */
-typedef struct wlc_rev_info {
- uint vendorid; /* PCI vendor id */
- uint deviceid; /* device id of chip */
- uint radiorev; /* radio revision */
- uint chiprev; /* chip revision */
- uint corerev; /* core revision */
- uint boardid; /* board identifier (usu. PCI sub-device id) */
- uint boardvendor; /* board vendor (usu. PCI sub-vendor id) */
- uint boardrev; /* board revision */
- uint driverrev; /* driver version */
- uint ucoderev; /* microcode version */
- uint bus; /* bus type */
- uint chipnum; /* chip number */
- uint phytype; /* phy type */
- uint phyrev; /* phy revision */
- uint anarev; /* anacore rev */
- uint chippkg; /* chip package info */
-} wlc_rev_info_t;
-
-#define WL_REV_INFO_LEGACY_LENGTH 48
-
-#ifdef BRCM_FULLMAC
-#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
-#define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */
-#define WLC_IOCTL_MAXLEN 8192
-#endif
-
-/* common ioctl definitions */
-#define WLC_GET_MAGIC 0
-#define WLC_GET_VERSION 1
-#define WLC_UP 2
-#define WLC_DOWN 3
-#define WLC_GET_LOOP 4
-#define WLC_SET_LOOP 5
-#define WLC_DUMP 6
-#define WLC_GET_MSGLEVEL 7
-#define WLC_SET_MSGLEVEL 8
-#define WLC_GET_PROMISC 9
-#define WLC_SET_PROMISC 10
-#define WLC_OVERLAY_IOCTL 11
-#define WLC_GET_RATE 12
- /* #define WLC_SET_RATE 13 *//* no longer supported */
-#define WLC_GET_INSTANCE 14
- /* #define WLC_GET_FRAG 15 *//* no longer supported */
- /* #define WLC_SET_FRAG 16 *//* no longer supported */
- /* #define WLC_GET_RTS 17 *//* no longer supported */
- /* #define WLC_SET_RTS 18 *//* no longer supported */
-#define WLC_GET_INFRA 19
-#define WLC_SET_INFRA 20
-#define WLC_GET_AUTH 21
-#define WLC_SET_AUTH 22
-#define WLC_GET_BSSID 23
-#define WLC_SET_BSSID 24
-#define WLC_GET_SSID 25
-#define WLC_SET_SSID 26
-#define WLC_RESTART 27
- /* #define WLC_DUMP_SCB 28 *//* no longer supported */
-#define WLC_GET_CHANNEL 29
-#define WLC_SET_CHANNEL 30
-#define WLC_GET_SRL 31
-#define WLC_SET_SRL 32
-#define WLC_GET_LRL 33
-#define WLC_SET_LRL 34
-#define WLC_GET_PLCPHDR 35
-#define WLC_SET_PLCPHDR 36
-#define WLC_GET_RADIO 37
-#define WLC_SET_RADIO 38
-#define WLC_GET_PHYTYPE 39
-#define WLC_DUMP_RATE 40
-#define WLC_SET_RATE_PARAMS 41
-#define WLC_GET_FIXRATE 42
-#define WLC_SET_FIXRATE 43
- /* #define WLC_GET_WEP 42 *//* no longer supported */
- /* #define WLC_SET_WEP 43 *//* no longer supported */
-#define WLC_GET_KEY 44
-#define WLC_SET_KEY 45
-#define WLC_GET_REGULATORY 46
-#define WLC_SET_REGULATORY 47
-#define WLC_GET_PASSIVE_SCAN 48
-#define WLC_SET_PASSIVE_SCAN 49
-#define WLC_SCAN 50
-#define WLC_SCAN_RESULTS 51
-#define WLC_DISASSOC 52
-#define WLC_REASSOC 53
-#define WLC_GET_ROAM_TRIGGER 54
-#define WLC_SET_ROAM_TRIGGER 55
-#define WLC_GET_ROAM_DELTA 56
-#define WLC_SET_ROAM_DELTA 57
-#define WLC_GET_ROAM_SCAN_PERIOD 58
-#define WLC_SET_ROAM_SCAN_PERIOD 59
-#define WLC_EVM 60 /* diag */
-#define WLC_GET_TXANT 61
-#define WLC_SET_TXANT 62
-#define WLC_GET_ANTDIV 63
-#define WLC_SET_ANTDIV 64
- /* #define WLC_GET_TXPWR 65 *//* no longer supported */
- /* #define WLC_SET_TXPWR 66 *//* no longer supported */
-#define WLC_GET_CLOSED 67
-#define WLC_SET_CLOSED 68
-#define WLC_GET_MACLIST 69
-#define WLC_SET_MACLIST 70
-#define WLC_GET_RATESET 71
-#define WLC_SET_RATESET 72
- /* #define WLC_GET_LOCALE 73 *//* no longer supported */
-#define WLC_LONGTRAIN 74
-#define WLC_GET_BCNPRD 75
-#define WLC_SET_BCNPRD 76
-#define WLC_GET_DTIMPRD 77
-#define WLC_SET_DTIMPRD 78
-#define WLC_GET_SROM 79
-#define WLC_SET_SROM 80
-#define WLC_GET_WEP_RESTRICT 81
-#define WLC_SET_WEP_RESTRICT 82
-#define WLC_GET_COUNTRY 83
-#define WLC_SET_COUNTRY 84
-#define WLC_GET_PM 85
-#define WLC_SET_PM 86
-#define WLC_GET_WAKE 87
-#define WLC_SET_WAKE 88
- /* #define WLC_GET_D11CNTS 89 *//* -> "counters" iovar */
-#define WLC_GET_FORCELINK 90 /* ndis only */
-#define WLC_SET_FORCELINK 91 /* ndis only */
-#define WLC_FREQ_ACCURACY 92 /* diag */
-#define WLC_CARRIER_SUPPRESS 93 /* diag */
-#define WLC_GET_PHYREG 94
-#define WLC_SET_PHYREG 95
-#define WLC_GET_RADIOREG 96
-#define WLC_SET_RADIOREG 97
-#define WLC_GET_REVINFO 98
-#define WLC_GET_UCANTDIV 99
-#define WLC_SET_UCANTDIV 100
-#define WLC_R_REG 101
-#define WLC_W_REG 102
-/* #define WLC_DIAG_LOOPBACK 103 old tray diag */
- /* #define WLC_RESET_D11CNTS 104 *//* -> "reset_d11cnts" iovar */
-#define WLC_GET_MACMODE 105
-#define WLC_SET_MACMODE 106
-#define WLC_GET_MONITOR 107
-#define WLC_SET_MONITOR 108
-#define WLC_GET_GMODE 109
-#define WLC_SET_GMODE 110
-#define WLC_GET_LEGACY_ERP 111
-#define WLC_SET_LEGACY_ERP 112
-#define WLC_GET_RX_ANT 113
-#define WLC_GET_CURR_RATESET 114 /* current rateset */
-#define WLC_GET_SCANSUPPRESS 115
-#define WLC_SET_SCANSUPPRESS 116
-#define WLC_GET_AP 117
-#define WLC_SET_AP 118
-#define WLC_GET_EAP_RESTRICT 119
-#define WLC_SET_EAP_RESTRICT 120
-#define WLC_SCB_AUTHORIZE 121
-#define WLC_SCB_DEAUTHORIZE 122
-#define WLC_GET_WDSLIST 123
-#define WLC_SET_WDSLIST 124
-#define WLC_GET_ATIM 125
-#define WLC_SET_ATIM 126
-#define WLC_GET_RSSI 127
-#define WLC_GET_PHYANTDIV 128
-#define WLC_SET_PHYANTDIV 129
-#define WLC_AP_RX_ONLY 130
-#define WLC_GET_TX_PATH_PWR 131
-#define WLC_SET_TX_PATH_PWR 132
-#define WLC_GET_WSEC 133
-#define WLC_SET_WSEC 134
-#define WLC_GET_PHY_NOISE 135
-#define WLC_GET_BSS_INFO 136
-#define WLC_GET_PKTCNTS 137
-#define WLC_GET_LAZYWDS 138
-#define WLC_SET_LAZYWDS 139
-#define WLC_GET_BANDLIST 140
-#define WLC_GET_BAND 141
-#define WLC_SET_BAND 142
-#define WLC_SCB_DEAUTHENTICATE 143
-#define WLC_GET_SHORTSLOT 144
-#define WLC_GET_SHORTSLOT_OVERRIDE 145
-#define WLC_SET_SHORTSLOT_OVERRIDE 146
-#define WLC_GET_SHORTSLOT_RESTRICT 147
-#define WLC_SET_SHORTSLOT_RESTRICT 148
-#define WLC_GET_GMODE_PROTECTION 149
-#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150
-#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151
-#define WLC_UPGRADE 152
- /* #define WLC_GET_MRATE 153 *//* no longer supported */
- /* #define WLC_SET_MRATE 154 *//* no longer supported */
-#define WLC_GET_IGNORE_BCNS 155
-#define WLC_SET_IGNORE_BCNS 156
-#define WLC_GET_SCB_TIMEOUT 157
-#define WLC_SET_SCB_TIMEOUT 158
-#define WLC_GET_ASSOCLIST 159
-#define WLC_GET_CLK 160
-#define WLC_SET_CLK 161
-#define WLC_GET_UP 162
-#define WLC_OUT 163
-#define WLC_GET_WPA_AUTH 164
-#define WLC_SET_WPA_AUTH 165
-#define WLC_GET_UCFLAGS 166
-#define WLC_SET_UCFLAGS 167
-#define WLC_GET_PWRIDX 168
-#define WLC_SET_PWRIDX 169
-#define WLC_GET_TSSI 170
-#define WLC_GET_SUP_RATESET_OVERRIDE 171
-#define WLC_SET_SUP_RATESET_OVERRIDE 172
- /* #define WLC_SET_FAST_TIMER 173 *//* no longer supported */
- /* #define WLC_GET_FAST_TIMER 174 *//* no longer supported */
- /* #define WLC_SET_SLOW_TIMER 175 *//* no longer supported */
- /* #define WLC_GET_SLOW_TIMER 176 *//* no longer supported */
- /* #define WLC_DUMP_PHYREGS 177 *//* no longer supported */
-#define WLC_GET_PROTECTION_CONTROL 178
-#define WLC_SET_PROTECTION_CONTROL 179
-#define WLC_GET_PHYLIST 180
-#define WLC_ENCRYPT_STRENGTH 181 /* ndis only */
-#define WLC_DECRYPT_STATUS 182 /* ndis only */
-#define WLC_GET_KEY_SEQ 183
-#define WLC_GET_SCAN_CHANNEL_TIME 184
-#define WLC_SET_SCAN_CHANNEL_TIME 185
-#define WLC_GET_SCAN_UNASSOC_TIME 186
-#define WLC_SET_SCAN_UNASSOC_TIME 187
-#define WLC_GET_SCAN_HOME_TIME 188
-#define WLC_SET_SCAN_HOME_TIME 189
-#define WLC_GET_SCAN_NPROBES 190
-#define WLC_SET_SCAN_NPROBES 191
-#define WLC_GET_PRB_RESP_TIMEOUT 192
-#define WLC_SET_PRB_RESP_TIMEOUT 193
-#define WLC_GET_ATTEN 194
-#define WLC_SET_ATTEN 195
-#define WLC_GET_SHMEM 196 /* diag */
-#define WLC_SET_SHMEM 197 /* diag */
- /* #define WLC_GET_GMODE_PROTECTION_CTS 198 *//* no longer supported */
- /* #define WLC_SET_GMODE_PROTECTION_CTS 199 *//* no longer supported */
-#define WLC_SET_WSEC_TEST 200
-#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201
-#define WLC_TKIP_COUNTERMEASURES 202
-#define WLC_GET_PIOMODE 203
-#define WLC_SET_PIOMODE 204
-#define WLC_SET_ASSOC_PREFER 205
-#define WLC_GET_ASSOC_PREFER 206
-#define WLC_SET_ROAM_PREFER 207
-#define WLC_GET_ROAM_PREFER 208
-#define WLC_SET_LED 209
-#define WLC_GET_LED 210
-#define WLC_RESERVED6 211
-#define WLC_RESERVED7 212
-#define WLC_GET_CHANNEL_QA 213
-#define WLC_START_CHANNEL_QA 214
-#define WLC_GET_CHANNEL_SEL 215
-#define WLC_START_CHANNEL_SEL 216
-#define WLC_GET_VALID_CHANNELS 217
-#define WLC_GET_FAKEFRAG 218
-#define WLC_SET_FAKEFRAG 219
-#define WLC_GET_PWROUT_PERCENTAGE 220
-#define WLC_SET_PWROUT_PERCENTAGE 221
-#define WLC_SET_BAD_FRAME_PREEMPT 222
-#define WLC_GET_BAD_FRAME_PREEMPT 223
-#define WLC_SET_LEAP_LIST 224
-#define WLC_GET_LEAP_LIST 225
-#define WLC_GET_CWMIN 226
-#define WLC_SET_CWMIN 227
-#define WLC_GET_CWMAX 228
-#define WLC_SET_CWMAX 229
-#define WLC_GET_WET 230
-#define WLC_SET_WET 231
-#define WLC_GET_PUB 232
- /* #define WLC_SET_GLACIAL_TIMER 233 *//* no longer supported */
- /* #define WLC_GET_GLACIAL_TIMER 234 *//* no longer supported */
-#define WLC_GET_KEY_PRIMARY 235
-#define WLC_SET_KEY_PRIMARY 236
- /* #define WLC_DUMP_RADIOREGS 237 *//* no longer supported */
-#define WLC_RESERVED4 238
-#define WLC_RESERVED5 239
-#define WLC_UNSET_CALLBACK 240
-#define WLC_SET_CALLBACK 241
-#define WLC_GET_RADAR 242
-#define WLC_SET_RADAR 243
-#define WLC_SET_SPECT_MANAGMENT 244
-#define WLC_GET_SPECT_MANAGMENT 245
-#define WLC_WDS_GET_REMOTE_HWADDR 246 /* handled in wl_linux.c/wl_vx.c */
-#define WLC_WDS_GET_WPA_SUP 247
-#define WLC_SET_CS_SCAN_TIMER 248
-#define WLC_GET_CS_SCAN_TIMER 249
-#define WLC_MEASURE_REQUEST 250
-#define WLC_INIT 251
-#define WLC_SEND_QUIET 252
-#define WLC_KEEPALIVE 253
-#define WLC_SEND_PWR_CONSTRAINT 254
-#define WLC_UPGRADE_STATUS 255
-#define WLC_CURRENT_PWR 256
-#define WLC_GET_SCAN_PASSIVE_TIME 257
-#define WLC_SET_SCAN_PASSIVE_TIME 258
-#define WLC_LEGACY_LINK_BEHAVIOR 259
-#define WLC_GET_CHANNELS_IN_COUNTRY 260
-#define WLC_GET_COUNTRY_LIST 261
-#define WLC_GET_VAR 262 /* get value of named variable */
-#define WLC_SET_VAR 263 /* set named variable to value */
-#define WLC_NVRAM_GET 264 /* deprecated */
-#define WLC_NVRAM_SET 265
-#define WLC_NVRAM_DUMP 266
-#define WLC_REBOOT 267
-#define WLC_SET_WSEC_PMK 268
-#define WLC_GET_AUTH_MODE 269
-#define WLC_SET_AUTH_MODE 270
-#define WLC_GET_WAKEENTRY 271
-#define WLC_SET_WAKEENTRY 272
-#define WLC_NDCONFIG_ITEM 273 /* currently handled in wl_oid.c */
-#define WLC_NVOTPW 274
-#define WLC_OTPW 275
-#define WLC_IOV_BLOCK_GET 276
-#define WLC_IOV_MODULES_GET 277
-#define WLC_SOFT_RESET 278
-#define WLC_GET_ALLOW_MODE 279
-#define WLC_SET_ALLOW_MODE 280
-#define WLC_GET_DESIRED_BSSID 281
-#define WLC_SET_DESIRED_BSSID 282
-#define WLC_DISASSOC_MYAP 283
-#define WLC_GET_RESERVED10 284
-#define WLC_GET_RESERVED11 285
-#define WLC_GET_RESERVED12 286
-#define WLC_GET_RESERVED13 287
-#define WLC_GET_RESERVED14 288
-#define WLC_SET_RESERVED15 289
-#define WLC_SET_RESERVED16 290
-#define WLC_GET_RESERVED17 291
-#define WLC_GET_RESERVED18 292
-#define WLC_GET_RESERVED19 293
-#define WLC_SET_RESERVED1A 294
-#define WLC_GET_RESERVED1B 295
-#define WLC_GET_RESERVED1C 296
-#define WLC_GET_RESERVED1D 297
-#define WLC_SET_RESERVED1E 298
-#define WLC_GET_RESERVED1F 299
-#define WLC_GET_RESERVED20 300
-#define WLC_GET_RESERVED21 301
-#define WLC_GET_RESERVED22 302
-#define WLC_GET_RESERVED23 303
-#define WLC_GET_RESERVED24 304
-#define WLC_SET_RESERVED25 305
-#define WLC_GET_RESERVED26 306
-#define WLC_NPHY_SAMPLE_COLLECT 307 /* Nphy sample collect mode */
-#define WLC_UM_PRIV 308 /* for usermode driver private ioctl */
-#define WLC_GET_CMD 309
- /* #define WLC_LAST 310 *//* Never used - can be reused */
-#define WLC_RESERVED8 311
-#define WLC_RESERVED9 312
-#define WLC_RESERVED1 313
-#define WLC_RESERVED2 314
-#define WLC_RESERVED3 315
-#define WLC_LAST 316
-
-#ifndef EPICTRL_COOKIE
-#define EPICTRL_COOKIE 0xABADCEDE
-#endif
-
-#define WL_DECRYPT_STATUS_SUCCESS 1
-#define WL_DECRYPT_STATUS_FAILURE 2
-#define WL_DECRYPT_STATUS_UNKNOWN 3
-
-/* allows user-mode app to poll the status of USB image upgrade */
-#define WLC_UPGRADE_SUCCESS 0
-#define WLC_UPGRADE_PENDING 1
-
-/* WLC_GET_AUTH, WLC_SET_AUTH values */
-#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */
-#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
-#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */
-
-/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
-#define WL_RADIO_SW_DISABLE (1<<0)
-#define WL_RADIO_HW_DISABLE (1<<1)
-#define WL_RADIO_MPC_DISABLE (1<<2)
-#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */
-
-#define WL_SPURAVOID_OFF 0
-#define WL_SPURAVOID_ON1 1
-#define WL_SPURAVOID_ON2 2
-
-/* Override bit for WLC_SET_TXPWR. if set, ignore other level limits */
-#define WL_TXPWR_OVERRIDE (1U<<31)
-
-#define WL_PHY_PAVARS_LEN 6 /* Phy type, Band range, chain, a1, b0, b1 */
-
-typedef struct wl_po {
- u16 phy_type; /* Phy type */
- u16 band;
- u16 cckpo;
- u32 ofdmpo;
- u16 mcspo[8];
-} wl_po_t;
-
-/* a large TX Power as an init value to factor out of min() calculations,
- * keep low enough to fit in an s8, units are .25 dBm
- */
-#define WLC_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */
-
-/* "diag" iovar argument and error code */
-#define WL_DIAG_INTERRUPT 1 /* d11 loopback interrupt test */
-#define WL_DIAG_LOOPBACK 2 /* d11 loopback data test */
-#define WL_DIAG_MEMORY 3 /* d11 memory test */
-#define WL_DIAG_LED 4 /* LED test */
-#define WL_DIAG_REG 5 /* d11/phy register test */
-#define WL_DIAG_SROM 6 /* srom read/crc test */
-#define WL_DIAG_DMA 7 /* DMA test */
-
-#define WL_DIAGERR_SUCCESS 0
-#define WL_DIAGERR_FAIL_TO_RUN 1 /* unable to run requested diag */
-#define WL_DIAGERR_NOT_SUPPORTED 2 /* diag requested is not supported */
-#define WL_DIAGERR_INTERRUPT_FAIL 3 /* loopback interrupt test failed */
-#define WL_DIAGERR_LOOPBACK_FAIL 4 /* loopback data test failed */
-#define WL_DIAGERR_SROM_FAIL 5 /* srom read failed */
-#define WL_DIAGERR_SROM_BADCRC 6 /* srom crc failed */
-#define WL_DIAGERR_REG_FAIL 7 /* d11/phy register test failed */
-#define WL_DIAGERR_MEMORY_FAIL 8 /* d11 memory test failed */
-#define WL_DIAGERR_NOMEM 9 /* diag test failed due to no memory */
-#define WL_DIAGERR_DMA_FAIL 10 /* DMA test failed */
-
-#define WL_DIAGERR_MEMORY_TIMEOUT 11 /* d11 memory test didn't finish in time */
-#define WL_DIAGERR_MEMORY_BADPATTERN 12 /* d11 memory test result in bad pattern */
-
-/* band types */
-#define WLC_BAND_AUTO 0 /* auto-select */
-#define WLC_BAND_5G 1 /* 5 Ghz */
-#define WLC_BAND_2G 2 /* 2.4 Ghz */
-#define WLC_BAND_ALL 3 /* all bands */
-
-/* band range returned by band_range iovar */
-#define WL_CHAN_FREQ_RANGE_2G 0
-#define WL_CHAN_FREQ_RANGE_5GL 1
-#define WL_CHAN_FREQ_RANGE_5GM 2
-#define WL_CHAN_FREQ_RANGE_5GH 3
-
-/* phy types (returned by WLC_GET_PHYTPE) */
-#define WLC_PHY_TYPE_A 0
-#define WLC_PHY_TYPE_B 1
-#define WLC_PHY_TYPE_G 2
-#define WLC_PHY_TYPE_N 4
-#define WLC_PHY_TYPE_LP 5
-#define WLC_PHY_TYPE_SSN 6
-#define WLC_PHY_TYPE_HT 7
-#define WLC_PHY_TYPE_LCN 8
-#define WLC_PHY_TYPE_NULL 0xf
-
-/* MAC list modes */
-#define WLC_MACMODE_DISABLED 0 /* MAC list disabled */
-#define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */
-#define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */
-
-/*
- * 54g modes (basic bits may still be overridden)
- *
- * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11
- * Preamble: Long
- * Shortslot: Off
- * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
- * Extended Rateset: 6, 9, 12, 48
- * Preamble: Long
- * Shortslot: Auto
- * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
- * Extended Rateset: 6b, 9, 12b, 48
- * Preamble: Short required
- * Shortslot: Auto
- * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
- * Extended Rateset: 6, 9, 12, 48
- * Preamble: Long
- * Shortslot: On
- * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
- * Preamble: Short required
- * Shortslot: On and required
- * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b
- * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
- * Preamble: Long
- * Shortslot: Auto
- */
-#define GMODE_LEGACY_B 0
-#define GMODE_AUTO 1
-#define GMODE_ONLY 2
-#define GMODE_B_DEFERRED 3
-#define GMODE_PERFORMANCE 4
-#define GMODE_LRS 5
-#define GMODE_MAX 6
-
-/* values for PLCPHdr_override */
-#define WLC_PLCP_AUTO -1
-#define WLC_PLCP_SHORT 0
-#define WLC_PLCP_LONG 1
-
-/* values for g_protection_override and n_protection_override */
-#define WLC_PROTECTION_AUTO -1
-#define WLC_PROTECTION_OFF 0
-#define WLC_PROTECTION_ON 1
-#define WLC_PROTECTION_MMHDR_ONLY 2
-#define WLC_PROTECTION_CTS_ONLY 3
-
-/* values for g_protection_control and n_protection_control */
-#define WLC_PROTECTION_CTL_OFF 0
-#define WLC_PROTECTION_CTL_LOCAL 1
-#define WLC_PROTECTION_CTL_OVERLAP 2
-
-/* values for n_protection */
-#define WLC_N_PROTECTION_OFF 0
-#define WLC_N_PROTECTION_OPTIONAL 1
-#define WLC_N_PROTECTION_20IN40 2
-#define WLC_N_PROTECTION_MIXEDMODE 3
-
-/* values for n_preamble_type */
-#define WLC_N_PREAMBLE_MIXEDMODE 0
-#define WLC_N_PREAMBLE_GF 1
-#define WLC_N_PREAMBLE_GF_BRCM 2
-
-/* values for band specific 40MHz capabilities */
-#define WLC_N_BW_20ALL 0
-#define WLC_N_BW_40ALL 1
-#define WLC_N_BW_20IN2G_40IN5G 2
-
-/* values to force tx/rx chain */
-#define WLC_N_TXRX_CHAIN0 0
-#define WLC_N_TXRX_CHAIN1 1
-
-/* bitflags for SGI support (sgi_rx iovar) */
-#define WLC_N_SGI_20 0x01
-#define WLC_N_SGI_40 0x02
-
-/* Values for PM */
-#define PM_OFF 0
-#define PM_MAX 1
-
-/* interference mitigation options */
-#define INTERFERE_OVRRIDE_OFF -1 /* interference override off */
-#define INTERFERE_NONE 0 /* off */
-#define NON_WLAN 1 /* foreign/non 802.11 interference, no auto detect */
-#define WLAN_MANUAL 2 /* ACI: no auto detection */
-#define WLAN_AUTO 3 /* ACI: auto detect */
-#define WLAN_AUTO_W_NOISE 4 /* ACI: auto - detect and non 802.11 interference */
-#define AUTO_ACTIVE (1 << 7) /* Auto is currently active */
-
-#define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */
-#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */
-#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */
-#define WL_ANT_IDX_1 0 /* antenna index 1 */
-#define WL_ANT_IDX_2 1 /* antenna index 2 */
-
-#ifndef WL_RSSI_ANT_MAX
-#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */
-#elif WL_RSSI_ANT_MAX != 4
-#error "WL_RSSI_ANT_MAX does not match"
-#endif
-
-/* RSSI per antenna */
-typedef struct {
- u32 version; /* version field */
- u32 count; /* number of valid antenna rssi */
- s8 rssi_ant[WL_RSSI_ANT_MAX]; /* rssi per antenna */
-} wl_rssi_ant_t;
-
-#define NUM_PWRCTRL_RATES 12
-
-typedef struct {
- u8 txpwr_band_max[NUM_PWRCTRL_RATES]; /* User set target */
- u8 txpwr_limit[NUM_PWRCTRL_RATES]; /* reg and local power limit */
- u8 txpwr_local_max; /* local max according to the AP */
- u8 txpwr_local_constraint; /* local constraint according to the AP */
- u8 txpwr_chan_reg_max; /* Regulatory max for this channel */
- u8 txpwr_target[2][NUM_PWRCTRL_RATES]; /* Latest target for 2.4 and 5 Ghz */
- u8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
- u8 txpwr_opo[NUM_PWRCTRL_RATES]; /* On G phy, OFDM power offset */
- u8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /* Max CCK power for this band (SROM) */
- u8 txpwr_bphy_ofdm_max; /* Max OFDM power for this band (SROM) */
- u8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /* Max power for A band (SROM) */
- s8 txpwr_antgain[2]; /* Ant gain for each band - from SROM */
- u8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
-} tx_power_legacy_t;
-
-#define WL_TX_POWER_RATES_LEGACY 45
-#define WL_TX_POWER_MCS20_FIRST 12
-#define WL_TX_POWER_MCS20_NUM 16
-#define WL_TX_POWER_MCS40_FIRST 28
-#define WL_TX_POWER_MCS40_NUM 17
-
-
-#define WL_TX_POWER_RATES 101
-#define WL_TX_POWER_CCK_FIRST 0
-#define WL_TX_POWER_CCK_NUM 4
-#define WL_TX_POWER_OFDM_FIRST 4 /* Index for first 20MHz OFDM SISO rate */
-#define WL_TX_POWER_OFDM20_CDD_FIRST 12 /* Index for first 20MHz OFDM CDD rate */
-#define WL_TX_POWER_OFDM40_SISO_FIRST 52 /* Index for first 40MHz OFDM SISO rate */
-#define WL_TX_POWER_OFDM40_CDD_FIRST 60 /* Index for first 40MHz OFDM CDD rate */
-#define WL_TX_POWER_OFDM_NUM 8
-#define WL_TX_POWER_MCS20_SISO_FIRST 20 /* Index for first 20MHz MCS SISO rate */
-#define WL_TX_POWER_MCS20_CDD_FIRST 28 /* Index for first 20MHz MCS CDD rate */
-#define WL_TX_POWER_MCS20_STBC_FIRST 36 /* Index for first 20MHz MCS STBC rate */
-#define WL_TX_POWER_MCS20_SDM_FIRST 44 /* Index for first 20MHz MCS SDM rate */
-#define WL_TX_POWER_MCS40_SISO_FIRST 68 /* Index for first 40MHz MCS SISO rate */
-#define WL_TX_POWER_MCS40_CDD_FIRST 76 /* Index for first 40MHz MCS CDD rate */
-#define WL_TX_POWER_MCS40_STBC_FIRST 84 /* Index for first 40MHz MCS STBC rate */
-#define WL_TX_POWER_MCS40_SDM_FIRST 92 /* Index for first 40MHz MCS SDM rate */
-#define WL_TX_POWER_MCS_1_STREAM_NUM 8
-#define WL_TX_POWER_MCS_2_STREAM_NUM 8
-#define WL_TX_POWER_MCS_32 100 /* Index for 40MHz rate MCS 32 */
-#define WL_TX_POWER_MCS_32_NUM 1
-
-/* sslpnphy specifics */
-#define WL_TX_POWER_MCS20_SISO_FIRST_SSN 12 /* Index for first 20MHz MCS SISO rate */
-
-/* tx_power_t.flags bits */
-#define WL_TX_POWER_F_ENABLED 1
-#define WL_TX_POWER_F_HW 2
-#define WL_TX_POWER_F_MIMO 4
-#define WL_TX_POWER_F_SISO 8
-
-typedef struct {
- u32 flags;
- chanspec_t chanspec; /* txpwr report for this channel */
- chanspec_t local_chanspec; /* channel on which we are associated */
- u8 local_max; /* local max according to the AP */
- u8 local_constraint; /* local constraint according to the AP */
- s8 antgain[2]; /* Ant gain for each band - from SROM */
- u8 rf_cores; /* count of RF Cores being reported */
- u8 est_Pout[4]; /* Latest tx power out estimate per RF chain */
- u8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain
- * without adjustment
- */
- u8 est_Pout_cck; /* Latest CCK tx power out estimate */
- u8 tx_power_max[4]; /* Maximum target power among all rates */
- u8 tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */
- u8 user_limit[WL_TX_POWER_RATES]; /* User limit */
- u8 reg_limit[WL_TX_POWER_RATES]; /* Regulatory power limit */
- u8 board_limit[WL_TX_POWER_RATES]; /* Max power board can support (SROM) */
- u8 target[WL_TX_POWER_RATES]; /* Latest target power */
-} tx_power_t;
-
-typedef struct tx_inst_power {
- u8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
- u8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
-} tx_inst_power_t;
-
-/* Message levels */
-#define WL_ERROR_VAL 0x00000001
-#define WL_TRACE_VAL 0x00000002
-
-/* maximum channels returned by the get valid channels iovar */
-#define WL_NUMCHANNELS 64
-#define WL_NUMCHANSPECS 100
-
-struct tsinfo_arg {
- u8 octets[3];
-};
-
-#define NFIFO 6 /* # tx/rx fifopairs */
-
-struct wl_msglevel2 {
- u32 low;
- u32 high;
-};
-
-/* structure for per-tid ampdu control */
-struct ampdu_tid_control {
- u8 tid; /* tid */
- u8 enable; /* enable/disable */
-};
-
-/* structure for identifying ea/tid for sending addba/delba */
-struct ampdu_ea_tid {
- u8 ea[ETH_ALEN]; /* Station address */
- u8 tid; /* tid */
-};
-/* structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */
-struct ampdu_retry_tid {
- u8 tid; /* tid */
- u8 retry; /* retry value */
-};
-
-
-/* Software feature flag defines used by wlfeatureflag */
-#define WL_SWFL_NOHWRADIO 0x0004
-#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */
-#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */
-
-#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */
-
-
-/* Pattern matching filter. Specifies an offset within received packets to
- * start matching, the pattern to match, the size of the pattern, and a bitmask
- * that indicates which bits within the pattern should be matched.
- */
-typedef struct wl_pkt_filter_pattern {
- u32 offset; /* Offset within received packet to start pattern matching.
- * Offset '0' is the first byte of the ethernet header.
- */
- u32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */
- u8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts
- * at offset 0. Pattern immediately follows mask.
- */
-} wl_pkt_filter_pattern_t;
-
-/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
-typedef struct wl_pkt_filter {
- u32 id; /* Unique filter id, specified by app. */
- u32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */
- u32 negate_match; /* Negate the result of filter matches */
- union { /* Filter definitions */
- wl_pkt_filter_pattern_t pattern; /* Pattern matching filter */
- } u;
-} wl_pkt_filter_t;
-
-#define WL_PKT_FILTER_FIXED_LEN offsetof(wl_pkt_filter_t, u)
-#define WL_PKT_FILTER_PATTERN_FIXED_LEN offsetof(wl_pkt_filter_pattern_t, mask_and_pattern)
-
-/* IOVAR "pkt_filter_enable" parameter. */
-typedef struct wl_pkt_filter_enable {
- u32 id; /* Unique filter id */
- u32 enable; /* Enable/disable bool */
-} wl_pkt_filter_enable_t;
-
-
-#define WLC_RSSI_INVALID 0 /* invalid RSSI value */
-
-/* n-mode support capability */
-/* 2x2 includes both 1x1 & 2x2 devices
- * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
- * control it independently
- */
-#define WL_11N_2x2 1
-#define WL_11N_3x3 3
-#define WL_11N_4x4 4
-
-/* define 11n feature disable flags */
-#define WLFEATURE_DISABLE_11N 0x00000001
-#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
-#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
-#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
-#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
-#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
-#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
-#define WLFEATURE_DISABLE_11N_GF 0x00000080
-
-#define WL_EVENTING_MASK_LEN 16
-
-#define TOE_TX_CSUM_OL 0x00000001
-#define TOE_RX_CSUM_OL 0x00000002
-
-#define PM_OFF 0
-#define PM_MAX 1
-#define PM_FAST 2
-
-typedef enum sup_auth_status {
- WLC_SUP_DISCONNECTED = 0,
- WLC_SUP_CONNECTING,
- WLC_SUP_IDREQUIRED,
- WLC_SUP_AUTHENTICATING,
- WLC_SUP_AUTHENTICATED,
- WLC_SUP_KEYXCHANGE,
- WLC_SUP_KEYED,
- WLC_SUP_TIMEOUT,
- WLC_SUP_LAST_BASIC_STATE,
- WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED,
- WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE,
- WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE,
- WLC_SUP_KEYXCHANGE_PREP_M4,
- WLC_SUP_KEYXCHANGE_WAIT_G1,
- WLC_SUP_KEYXCHANGE_PREP_G2
-} sup_auth_status_t;
-#endif /* _wlioctl_h_ */
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 1502d80f6f7..5e65dde5845 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -2,6 +2,7 @@ config COMEDI
tristate "Data acquisition support (comedi)"
default N
depends on m
+ depends on BROKEN || FRV || M32R || MN10300 || SUPERH || TILE || X86
---help---
Enable support a wide range of data acquisition devices
for Linux.
@@ -160,6 +161,7 @@ config COMEDI_PCL730
config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
@@ -171,6 +173,7 @@ config COMEDI_PCL812
config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for Advantech PCL-814 and PCL-816 ISA cards
@@ -180,6 +183,7 @@ config COMEDI_PCL816
config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for Advantech PCL-818 ISA cards
@@ -269,6 +273,7 @@ config COMEDI_DAS800
config COMEDI_DAS1800
tristate "DAS1800 and compatible ISA card support"
+ depends on VIRT_TO_BUS
select COMEDI_FC
default N
---help---
@@ -340,6 +345,7 @@ config COMEDI_DT2817
config COMEDI_DT282X
tristate "Data Translation DT2821 series and DT-EZ ISA card support"
select COMEDI_FC
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for Data Translation DT2821 series including DT-EZ
@@ -419,6 +425,7 @@ config COMEDI_ADQ12B
config COMEDI_NI_AT_A2150
tristate "NI AT-A2150 ISA card support"
depends on COMEDI_NI_COMMON
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for National Instruments AT-A2150 cards
@@ -536,6 +543,7 @@ if COMEDI_PCI_DRIVERS && PCI
config COMEDI_ADDI_APCI_035
tristate "ADDI-DATA APCI_035 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_035 cards
@@ -545,6 +553,7 @@ config COMEDI_ADDI_APCI_035
config COMEDI_ADDI_APCI_1032
tristate "ADDI-DATA APCI_1032 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_1032 cards
@@ -554,6 +563,7 @@ config COMEDI_ADDI_APCI_1032
config COMEDI_ADDI_APCI_1500
tristate "ADDI-DATA APCI_1500 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_1500 cards
@@ -563,6 +573,7 @@ config COMEDI_ADDI_APCI_1500
config COMEDI_ADDI_APCI_1516
tristate "ADDI-DATA APCI_1516 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_1516 cards
@@ -572,6 +583,7 @@ config COMEDI_ADDI_APCI_1516
config COMEDI_ADDI_APCI_1564
tristate "ADDI-DATA APCI_1564 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_1564 cards
@@ -581,6 +593,7 @@ config COMEDI_ADDI_APCI_1564
config COMEDI_ADDI_APCI_16XX
tristate "ADDI-DATA APCI_16xx support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_16xx cards
@@ -590,6 +603,7 @@ config COMEDI_ADDI_APCI_16XX
config COMEDI_ADDI_APCI_2016
tristate "ADDI-DATA APCI_2016 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_2016 cards
@@ -599,6 +613,7 @@ config COMEDI_ADDI_APCI_2016
config COMEDI_ADDI_APCI_2032
tristate "ADDI-DATA APCI_2032 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_2032 cards
@@ -608,6 +623,7 @@ config COMEDI_ADDI_APCI_2032
config COMEDI_ADDI_APCI_2200
tristate "ADDI-DATA APCI_2200 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_2200 cards
@@ -617,6 +633,7 @@ config COMEDI_ADDI_APCI_2200
config COMEDI_ADDI_APCI_3001
tristate "ADDI-DATA APCI_3001 support"
+ depends on VIRT_TO_BUS
select COMEDI_FC
default N
---help---
@@ -627,6 +644,7 @@ config COMEDI_ADDI_APCI_3001
config COMEDI_ADDI_APCI_3120
tristate "ADDI-DATA APCI_3520 support"
+ depends on VIRT_TO_BUS
select COMEDI_FC
default N
---help---
@@ -637,6 +655,7 @@ config COMEDI_ADDI_APCI_3120
config COMEDI_ADDI_APCI_3501
tristate "ADDI-DATA APCI_3501 support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_3501 cards
@@ -646,6 +665,7 @@ config COMEDI_ADDI_APCI_3501
config COMEDI_ADDI_APCI_3XXX
tristate "ADDI-DATA APCI_3xxx support"
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADDI-DATA APCI_3xxx cards
@@ -712,6 +732,7 @@ config COMEDI_ADL_PCI9111
config COMEDI_ADL_PCI9118
tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
select COMEDI_FC
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
@@ -820,6 +841,16 @@ config COMEDI_DT3000
To compile this driver as a module, choose M here: the module will be
called dt3000.
+config COMEDI_DYNA_PCI10XX
+ tristate "Dynalog PCI DAQ series support"
+ default N
+ ---help---
+ Enable support for Dynalog PCI DAQ series
+ PCI-1050
+
+ To compile this driver as a module, choose M here: the module will be
+ called dyna_pci10xx.
+
config COMEDI_UNIOXX5
tristate "Fastwel UNIOxx-5 analog and digital io board support"
default N
@@ -1287,6 +1318,7 @@ config COMEDI_NI_LABPC
depends on COMEDI_MITE
select COMEDI_8255
select COMEDI_FC
+ depends on VIRT_TO_BUS
default N
---help---
Enable support for National Instruments Lab-PC and compatibles
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index 6c900e2756f..14ea35ac015 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -514,12 +514,16 @@
return 0x1 + pfi_channel;
else
return 0xb + pfi_channel;
- } static inline unsigned NI_USUAL_RTSI_SELECT(unsigned rtsi_channel) {
+ }
+
+ static inline unsigned NI_USUAL_RTSI_SELECT(unsigned rtsi_channel)
+ {
if (rtsi_channel < 7)
return 0xb + rtsi_channel;
else
return 0x1b;
}
+
/* mode bits for NI general-purpose counters, set with
* INSN_CONFIG_SET_COUNTER_MODE */
#define NI_GPCT_COUNTING_MODE_SHIFT 16
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index e7e72b8d8cd..e90e3cceb5f 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -383,8 +383,8 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
/* fill devinfo structure */
devinfo.version_code = COMEDI_VERSION_CODE;
devinfo.n_subdevs = dev->n_subdevices;
- memcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
- memcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
+ strlcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
+ strlcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
if (read_subdev)
devinfo.read_subdevice = read_subdev - dev->subdevices;
@@ -1291,10 +1291,10 @@ static int do_lock_ioctl(struct comedi_device *dev, unsigned int arg,
s->lock = file;
spin_unlock_irqrestore(&s->spin_lock, flags);
+#if 0
if (ret < 0)
return ret;
-#if 0
if (s->lock_f)
ret = s->lock_f(dev, s);
#endif
@@ -2175,9 +2175,8 @@ int comedi_alloc_board_minor(struct device *hardware_device)
return -EBUSY;
}
info->device->minor = i;
- csdev = COMEDI_DEVICE_CREATE(comedi_class, NULL,
- MKDEV(COMEDI_MAJOR, i), NULL,
- hardware_device, "comedi%i", i);
+ csdev = device_create(comedi_class, hardware_device,
+ MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i", i);
if (!IS_ERR(csdev))
info->device->class_dev = csdev;
dev_set_drvdata(csdev, info);
@@ -2276,10 +2275,9 @@ int comedi_alloc_subdevice_minor(struct comedi_device *dev,
return -EBUSY;
}
s->minor = i;
- csdev = COMEDI_DEVICE_CREATE(comedi_class, dev->class_dev,
- MKDEV(COMEDI_MAJOR, i), NULL, NULL,
- "comedi%i_subd%i", dev->minor,
- (int)(s - dev->subdevices));
+ csdev = device_create(comedi_class, dev->class_dev,
+ MKDEV(COMEDI_MAJOR, i), NULL, "comedi%i_subd%i",
+ dev->minor, (int)(s - dev->subdevices));
if (!IS_ERR(csdev))
s->class_dev = csdev;
dev_set_drvdata(csdev, info);
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 68aa9176d24..7a0d4bcbc35 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -61,9 +61,6 @@
#define COMEDI_NUM_BOARD_MINORS 0x30
#define COMEDI_FIRST_SUBDEVICE_MINOR COMEDI_NUM_BOARD_MINORS
-#define COMEDI_DEVICE_CREATE(cs, parent, devt, drvdata, device, fmt...) \
- device_create(cs, ((parent) ? (parent) : (device)), devt, drvdata, fmt)
-
struct comedi_subdevice {
struct comedi_device *device;
int type;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 6d60e91b3a8..db1fd63aaab 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -502,7 +502,11 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
}
if (i == n_pages) {
async->prealloc_buf =
+#ifdef PAGE_KERNEL_NOCACHE
vmap(pages, n_pages, VM_MAP, PAGE_KERNEL_NOCACHE);
+#else
+ vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
+#endif
}
vfree(pages);
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index 354fb7d2984..33bf1f5aad4 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_COMEDI_AMPLC_PCI224) += amplc_pci224.o
obj-$(CONFIG_COMEDI_AMPLC_PCI230) += amplc_pci230.o
obj-$(CONFIG_COMEDI_CONTEC_PCI_DIO) += contec_pci_dio.o
obj-$(CONFIG_COMEDI_DT3000) += dt3000.o
+obj-$(CONFIG_COMEDI_DYNA_PCI10XX) += dyna_pci10xx.o
obj-$(CONFIG_COMEDI_UNIOXX5) += unioxx5.o
obj-$(CONFIG_COMEDI_GSC_HPDI) += gsc_hpdi.o
obj-$(CONFIG_COMEDI_ICP_MULTI) += icp_multi.o
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index 6cf19ed683a..6fb7594319c 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -58,8 +58,8 @@ You should also find the complete GPL in the COPYING file accompanying this sour
#include <linux/timer.h>
#include <linux/pci.h>
#include <linux/gfp.h>
+#include <linux/io.h>
#include "../../comedidev.h"
-#include <asm/io.h>
#if defined(CONFIG_APCI_1710) || defined(CONFIG_APCI_3200) || defined(CONFIG_APCI_3300)
#include <asm/i387.h>
#endif
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index 08b71d9974b..f17654e44ae 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -67,6 +67,7 @@ Configuration options:
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include "amcc_s5933.h"
#include "8253.h"
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index 1b5682104a0..29455a8e88b 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -234,7 +234,7 @@ static int pci1723_insn_read_ao(struct comedi_device *dev,
int n, chan;
chan = CR_CHAN(insn->chanspec);
- DPRINTK(" adv_PCI1723 DEBUG: pci1723_insn_read_ao() ----- \n");
+ DPRINTK(" adv_PCI1723 DEBUG: pci1723_insn_read_ao() -----\n");
for (n = 0; n < insn->n; n++)
data[n] = devpriv->ao_data[chan];
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 60c2b12d6ff..a6df30b7fd7 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -102,6 +102,7 @@ TODO:
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include "../comedidev.h"
#include <linux/ioport.h>
@@ -198,7 +199,7 @@ static void das1800_flush_dma(struct comedi_device *dev,
struct comedi_subdevice *s);
static void das1800_flush_dma_channel(struct comedi_device *dev,
struct comedi_subdevice *s,
- unsigned int channel, uint16_t * buffer);
+ unsigned int channel, uint16_t *buffer);
static void das1800_handle_fifo_half_full(struct comedi_device *dev,
struct comedi_subdevice *s);
static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
@@ -1050,9 +1051,8 @@ static void munge_data(struct comedi_device *dev, uint16_t * array,
/* convert to unsigned type if we are in a bipolar mode */
if (!unipolar) {
- for (i = 0; i < num_elements; i++) {
+ for (i = 0; i < num_elements; i++)
array[i] = munge_bipolar_sample(dev, array[i]);
- }
}
}
@@ -1060,7 +1060,7 @@ static void munge_data(struct comedi_device *dev, uint16_t * array,
* Assumes dma lock is held */
static void das1800_flush_dma_channel(struct comedi_device *dev,
struct comedi_subdevice *s,
- unsigned int channel, uint16_t * buffer)
+ unsigned int channel, uint16_t *buffer)
{
unsigned int num_bytes, num_samples;
struct comedi_cmd *cmd = &s->async->cmd;
@@ -1153,7 +1153,8 @@ static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
break;
dpnt = inw(dev->iobase + DAS1800_FIFO);
/* convert to unsigned type if we are in a bipolar mode */
- if (!unipolar) ;
+ if (!unipolar)
+ ;
dpnt = munge_bipolar_sample(dev, dpnt);
cfc_write_to_buffer(s, dpnt);
if (cmd->stop_src == TRIG_COUNT)
@@ -1364,9 +1365,8 @@ static int control_a_bits(struct comedi_cmd cmd)
int control_a;
control_a = FFEN; /* enable fifo */
- if (cmd.stop_src == TRIG_EXT) {
+ if (cmd.stop_src == TRIG_EXT)
control_a |= ATEN;
- }
switch (cmd.start_src) {
case TRIG_EXT:
control_a |= TGEN | CGSL;
@@ -1443,9 +1443,8 @@ static int setup_counters(struct comedi_device *dev, struct comedi_cmd cmd)
&(cmd.convert_arg),
cmd.
flags & TRIG_ROUND_MASK);
- if (das1800_set_frequency(dev) < 0) {
+ if (das1800_set_frequency(dev) < 0)
return -1;
- }
}
break;
case TRIG_TIMER: /* in burst mode */
@@ -1454,9 +1453,8 @@ static int setup_counters(struct comedi_device *dev, struct comedi_cmd cmd)
&(devpriv->divisor2),
&(cmd.scan_begin_arg),
cmd.flags & TRIG_ROUND_MASK);
- if (das1800_set_frequency(dev) < 0) {
+ if (das1800_set_frequency(dev) < 0)
return -1;
- }
break;
default:
break;
@@ -1553,11 +1551,10 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
/* disable dma on TRIG_WAKE_EOS, or TRIG_RT
* (because dma in handler is unsafe at hard real-time priority) */
- if (cmd.flags & (TRIG_WAKE_EOS | TRIG_RT)) {
+ if (cmd.flags & (TRIG_WAKE_EOS | TRIG_RT))
devpriv->irq_dma_bits &= ~DMA_ENABLED;
- } else {
+ else
devpriv->irq_dma_bits |= devpriv->dma_bits;
- }
/* interrupt on end of conversion for TRIG_WAKE_EOS */
if (cmd.flags & TRIG_WAKE_EOS) {
/* interrupt fifo not empty */
@@ -1567,9 +1564,8 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
devpriv->irq_dma_bits |= FIMD;
}
/* determine how many conversions we need */
- if (cmd.stop_src == TRIG_COUNT) {
+ if (cmd.stop_src == TRIG_COUNT)
devpriv->count = cmd.stop_arg * cmd.chanlist_len;
- }
das1800_cancel(dev, s);
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 8cea9dca3d7..95ebc267bb7 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -61,6 +61,7 @@ Notes:
#include <linux/gfp.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <asm/dma.h>
#include "comedi_fc.h"
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
new file mode 100644
index 00000000000..da8a2bf3165
--- /dev/null
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -0,0 +1,462 @@
+/*
+ * comedi/drivers/dyna_pci10xx.c
+ * Copyright (C) 2011 Prashant Shah, pshah.mumbai@gmail.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ Driver: dyna_pci10xx
+ Devices: Dynalog India PCI DAQ Cards, http://www.dynalogindia.com/
+ Author: Prashant Shah <pshah.mumbai@gmail.com>
+ Developed at Automation Labs, Chemical Dept., IIT Bombay, India.
+ Prof. Kannan Moudgalya <kannan@iitb.ac.in>
+ http://www.iitb.ac.in
+ Status: Stable
+ Version: 1.0
+ Device Supported :
+ - Dynalog PCI 1050
+
+ Notes :
+ - Dynalog India Pvt. Ltd. does not have a registered PCI Vendor ID and
+ they are using the PLX Technlogies Vendor ID since that is the PCI Chip used
+ in the card.
+ - Dynalog India Pvt. Ltd. has provided the internal register specification for
+ their cards in their manuals.
+*/
+
+#include "../comedidev.h"
+#include "comedi_pci.h"
+#include <linux/mutex.h>
+
+#define PCI_VENDOR_ID_DYNALOG 0x10b5
+#define DRV_NAME "dyna_pci10xx"
+
+#define READ_TIMEOUT 50
+
+static DEFINE_MUTEX(start_stop_sem);
+
+static DEFINE_PCI_DEVICE_TABLE(dyna_pci10xx_pci_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_DYNALOG, 0x1050) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, dyna_pci10xx_pci_table);
+
+static int dyna_pci10xx_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it);
+static int dyna_pci10xx_detach(struct comedi_device *dev);
+
+static const struct comedi_lrange range_pci1050_ai = { 3, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ UNI_RANGE(10)
+ }
+};
+
+static const char range_codes_pci1050_ai[] = { 0x00, 0x10, 0x30 };
+
+static const struct comedi_lrange range_pci1050_ao = { 1, {
+ UNI_RANGE(10)
+ }
+};
+
+static const char range_codes_pci1050_ao[] = { 0x00 };
+
+struct boardtype {
+ const char *name;
+ int device_id;
+ int ai_chans;
+ int ai_bits;
+ int ao_chans;
+ int ao_bits;
+ int di_chans;
+ int di_bits;
+ int do_chans;
+ int do_bits;
+ const struct comedi_lrange *range_ai;
+ const char *range_codes_ai;
+ const struct comedi_lrange *range_ao;
+ const char *range_codes_ao;
+};
+
+static const struct boardtype boardtypes[] = {
+ {
+ .name = "dyna_pci1050",
+ .device_id = 0x1050,
+ .ai_chans = 16,
+ .ai_bits = 12,
+ .ao_chans = 16,
+ .ao_bits = 12,
+ .di_chans = 16,
+ .di_bits = 16,
+ .do_chans = 16,
+ .do_bits = 16,
+ .range_ai = &range_pci1050_ai,
+ .range_codes_ai = range_codes_pci1050_ai,
+ .range_ao = &range_pci1050_ao,
+ .range_codes_ao = range_codes_pci1050_ao,
+ },
+ /* dummy entry corresponding to driver name */
+ {.name = DRV_NAME},
+};
+
+static struct comedi_driver driver_dyna_pci10xx = {
+ .driver_name = DRV_NAME,
+ .module = THIS_MODULE,
+ .attach = dyna_pci10xx_attach,
+ .detach = dyna_pci10xx_detach,
+ .board_name = &boardtypes[0].name,
+ .offset = sizeof(struct boardtype),
+ .num_names = ARRAY_SIZE(boardtypes),
+};
+
+struct dyna_pci10xx_private {
+ struct pci_dev *pci_dev; /* ptr to PCI device */
+ char valid; /* card is usable */
+ struct mutex mutex;
+
+ /* device base address registers */
+ unsigned long BADR0, BADR1, BADR2, BADR3, BADR4, BADR5;
+};
+
+#define thisboard ((const struct boardtype *)dev->board_ptr)
+#define devpriv ((struct dyna_pci10xx_private *)dev->private)
+
+/******************************************************************************/
+/************************** READ WRITE FUNCTIONS ******************************/
+/******************************************************************************/
+
+/* analog input callback */
+static int dyna_pci10xx_insn_read_ai(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ int n, counter;
+ u16 d = 0;
+ unsigned int chan, range;
+
+ /* get the channel number and range */
+ chan = CR_CHAN(insn->chanspec);
+ range = thisboard->range_codes_ai[CR_RANGE((insn->chanspec))];
+
+ mutex_lock(&devpriv->mutex);
+ /* convert n samples */
+ for (n = 0; n < insn->n; n++) {
+ /* trigger conversion */
+ smp_mb();
+ outw_p(0x0000 + range + chan, devpriv->BADR2 + 2);
+ udelay(10);
+ /* read data */
+ for (counter = 0; counter < READ_TIMEOUT; counter++) {
+ d = inw_p(devpriv->BADR2);
+
+ /* check if read is successfull if the EOC bit is set */
+ if (d & (1 << 15))
+ goto conv_finish;
+ }
+ data[n] = 0;
+ printk(KERN_DEBUG "comedi: dyna_pci10xx: "
+ "timeout reading analog input\n");
+ continue;
+conv_finish:
+ /* mask the first 4 bits - EOC bits */
+ d &= 0x0FFF;
+ data[n] = d;
+ }
+ mutex_unlock(&devpriv->mutex);
+
+ /* return the number of samples read/written */
+ return n;
+}
+
+/* analog output callback */
+static int dyna_pci10xx_insn_write_ao(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ int n;
+ unsigned int chan, range;
+
+ chan = CR_CHAN(insn->chanspec);
+ range = thisboard->range_codes_ai[CR_RANGE((insn->chanspec))];
+
+ mutex_lock(&devpriv->mutex);
+ for (n = 0; n < insn->n; n++) {
+ smp_mb();
+ /* trigger conversion and write data */
+ outw_p(data[n], devpriv->BADR2);
+ udelay(10);
+ }
+ mutex_unlock(&devpriv->mutex);
+ return n;
+}
+
+/* digital input bit interface */
+static int dyna_pci10xx_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ u16 d = 0;
+
+ if (insn->n != 2)
+ return -EINVAL;
+
+ mutex_lock(&devpriv->mutex);
+ smp_mb();
+ d = inw_p(devpriv->BADR3);
+ udelay(10);
+
+ /* on return the data[0] contains output and data[1] contains input */
+ data[1] = d;
+ data[0] = s->state;
+ mutex_unlock(&devpriv->mutex);
+ return 2;
+}
+
+/* digital output bit interface */
+static int dyna_pci10xx_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data)
+{
+ if (insn->n != 2)
+ return -EINVAL;
+
+ /* The insn data is a mask in data[0] and the new data
+ * in data[1], each channel cooresponding to a bit.
+ * s->state contains the previous write data
+ */
+ mutex_lock(&devpriv->mutex);
+ if (data[0]) {
+ s->state &= ~data[0];
+ s->state |= (data[0] & data[1]);
+ smp_mb();
+ outw_p(s->state, devpriv->BADR3);
+ udelay(10);
+ }
+
+ /*
+ * On return, data[1] contains the value of the digital
+ * input and output lines. We just return the software copy of the
+ * output values if it was a purely digital output subdevice.
+ */
+ data[1] = s->state;
+ mutex_unlock(&devpriv->mutex);
+ return 2;
+}
+
+/******************************************************************************/
+/*********************** INITIALIZATION FUNCTIONS *****************************/
+/******************************************************************************/
+
+static int dyna_pci10xx_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it)
+{
+ struct comedi_subdevice *s;
+ struct pci_dev *pcidev;
+ unsigned int opt_bus, opt_slot;
+ int board_index, i;
+
+ mutex_lock(&start_stop_sem);
+
+ if (alloc_private(dev, sizeof(struct dyna_pci10xx_private)) < 0) {
+ printk(KERN_ERR "comedi: dyna_pci10xx: "
+ "failed to allocate memory!\n");
+ mutex_unlock(&start_stop_sem);
+ return -ENOMEM;
+ }
+
+ opt_bus = it->options[0];
+ opt_slot = it->options[1];
+ dev->board_name = thisboard->name;
+ dev->irq = 0;
+
+ /*
+ * Probe the PCI bus and located the matching device
+ */
+ for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
+ pcidev != NULL;
+ pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
+
+ board_index = -1;
+ for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) {
+ if ((pcidev->vendor == PCI_VENDOR_ID_DYNALOG) &&
+ (pcidev->device == boardtypes[i].device_id)) {
+ board_index = i;
+ break;
+ }
+ }
+ if (board_index < 0)
+ continue;
+
+ /* Found matching vendor/device. */
+ if (opt_bus || opt_slot) {
+ /* Check bus/slot. */
+ if (opt_bus != pcidev->bus->number
+ || opt_slot != PCI_SLOT(pcidev->devfn))
+ continue; /* no match */
+ }
+
+ goto found;
+ }
+ printk(KERN_ERR "comedi: dyna_pci10xx: no supported device found!\n");
+ mutex_unlock(&start_stop_sem);
+ return -EIO;
+
+found:
+
+ if (!pcidev) {
+ if (opt_bus || opt_slot) {
+ printk(KERN_ERR "comedi: dyna_pci10xx: "
+ "invalid PCI device at b:s %d:%d\n",
+ opt_bus, opt_slot);
+ } else {
+ printk(KERN_ERR "comedi: dyna_pci10xx: "
+ "invalid PCI device\n");
+ }
+ mutex_unlock(&start_stop_sem);
+ return -EIO;
+ }
+
+ if (comedi_pci_enable(pcidev, DRV_NAME)) {
+ printk(KERN_ERR "comedi: dyna_pci10xx: "
+ "failed to enable PCI device and request regions!");
+ mutex_unlock(&start_stop_sem);
+ return -EIO;
+ }
+
+ mutex_init(&devpriv->mutex);
+ dev->board_ptr = &boardtypes[board_index];
+ devpriv->pci_dev = pcidev;
+
+ printk(KERN_INFO "comedi: dyna_pci10xx: device found!\n");
+
+ /* initialize device base address registers */
+ devpriv->BADR0 = pci_resource_start(pcidev, 0);
+ devpriv->BADR1 = pci_resource_start(pcidev, 1);
+ devpriv->BADR2 = pci_resource_start(pcidev, 2);
+ devpriv->BADR3 = pci_resource_start(pcidev, 3);
+ devpriv->BADR4 = pci_resource_start(pcidev, 4);
+ devpriv->BADR5 = pci_resource_start(pcidev, 5);
+
+ if (alloc_subdevices(dev, 4) < 0) {
+ printk(KERN_ERR "comedi: dyna_pci10xx: "
+ "failed allocating subdevices\n");
+ mutex_unlock(&start_stop_sem);
+ return -ENOMEM;
+ }
+
+ /* analog input */
+ s = dev->subdevices + 0;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF;
+ s->n_chan = thisboard->ai_chans;
+ s->maxdata = 0x0FFF;
+ s->range_table = thisboard->range_ai;
+ s->len_chanlist = 16;
+ s->insn_read = dyna_pci10xx_insn_read_ai;
+
+ /* analog output */
+ s = dev->subdevices + 1;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = thisboard->ao_chans;
+ s->maxdata = 0x0FFF;
+ s->range_table = thisboard->range_ao;
+ s->len_chanlist = 16;
+ s->insn_write = dyna_pci10xx_insn_write_ao;
+
+ /* digital input */
+ s = dev->subdevices + 2;
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND;
+ s->n_chan = thisboard->di_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->len_chanlist = thisboard->di_chans;
+ s->insn_bits = dyna_pci10xx_di_insn_bits;
+
+ /* digital output */
+ s = dev->subdevices + 3;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
+ s->n_chan = thisboard->do_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->len_chanlist = thisboard->do_chans;
+ s->state = 0;
+ s->insn_bits = dyna_pci10xx_do_insn_bits;
+
+ devpriv->valid = 1;
+ mutex_unlock(&start_stop_sem);
+
+ printk(KERN_INFO "comedi: dyna_pci10xx: %s - device setup completed!\n",
+ boardtypes[board_index].name);
+
+ return 1;
+}
+
+static int dyna_pci10xx_detach(struct comedi_device *dev)
+{
+ if (devpriv && devpriv->pci_dev) {
+ comedi_pci_disable(devpriv->pci_dev);
+ mutex_destroy(&devpriv->mutex);
+ }
+
+ return 0;
+}
+
+static int __devinit driver_dyna_pci10xx_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ return comedi_pci_auto_config(dev, driver_dyna_pci10xx.driver_name);
+}
+
+static void __devexit driver_dyna_pci10xx_pci_remove(struct pci_dev *dev)
+{
+ comedi_pci_auto_unconfig(dev);
+}
+
+static struct pci_driver driver_dyna_pci10xx_pci_driver = {
+ .id_table = dyna_pci10xx_pci_table,
+ .probe = &driver_dyna_pci10xx_pci_probe,
+ .remove = __devexit_p(&driver_dyna_pci10xx_pci_remove)
+};
+
+static int __init driver_dyna_pci10xx_init_module(void)
+{
+ int retval;
+
+ retval = comedi_driver_register(&driver_dyna_pci10xx);
+ if (retval < 0)
+ return retval;
+
+ driver_dyna_pci10xx_pci_driver.name =
+ (char *)driver_dyna_pci10xx.driver_name;
+ return pci_register_driver(&driver_dyna_pci10xx_pci_driver);
+}
+
+static void __exit driver_dyna_pci10xx_cleanup_module(void)
+{
+ pci_unregister_driver(&driver_dyna_pci10xx_pci_driver);
+ comedi_driver_unregister(&driver_dyna_pci10xx);
+}
+
+module_init(driver_dyna_pci10xx_init_module);
+module_exit(driver_dyna_pci10xx_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Prashant Shah <pshah.mumbai@gmail.com>");
+MODULE_DESCRIPTION("Comedi based drivers for Dynalog PCI DAQ cards");
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index c192b71ec04..32e675e3f0b 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -69,6 +69,7 @@ TRIG_WAKE_EOS
#include "../comedidev.h"
#include <linux/ioport.h>
+#include <linux/io.h>
#include <asm/dma.h>
#include "8253.h"
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 2672629e9ff..e2420123db8 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -51,7 +51,7 @@ IRQ is assigned but not used.
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-static struct pcmcia_device *pcmcia_cur_dev = NULL;
+static struct pcmcia_device *pcmcia_cur_dev;
#define DIO700_SIZE 8 /* size of io region used by board */
@@ -381,11 +381,11 @@ static int dio700_attach(struct comedi_device *dev, struct comedi_devconfig *it)
#endif
break;
default:
- printk("bug! couldn't determine board type\n");
+ printk(KERN_ERR "bug! couldn't determine board type\n");
return -EINVAL;
break;
}
- printk("comedi%d: ni_daq_700: %s, io 0x%lx", dev->minor,
+ printk(KERN_ERR "comedi%d: ni_daq_700: %s, io 0x%lx", dev->minor,
thisboard->name, iobase);
#ifdef incomplete
if (irq)
@@ -396,7 +396,7 @@ static int dio700_attach(struct comedi_device *dev, struct comedi_devconfig *it)
printk("\n");
if (iobase == 0) {
- printk("io base address is zero!\n");
+ printk(KERN_ERR "io base address is zero!\n");
return -EINVAL;
}
@@ -421,7 +421,7 @@ static int dio700_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int dio700_detach(struct comedi_device *dev)
{
- printk("comedi%d: ni_daq_700: cs-remove\n", dev->minor);
+ printk(KERN_ERR "comedi%d: ni_daq_700: cs-remove\n", dev->minor);
if (dev->subdevices)
subdev_700_cleanup(dev, dev->subdevices + 0);
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index ab8f37022a3..6859af0778c 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -78,6 +78,7 @@ NI manuals:
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include "../comedidev.h"
#include <linux/delay.h>
@@ -212,8 +213,10 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it);
static int labpc_cancel(struct comedi_device *dev, struct comedi_subdevice *s);
static irqreturn_t labpc_interrupt(int irq, void *d);
static int labpc_drain_fifo(struct comedi_device *dev);
+#ifdef CONFIG_ISA_DMA_API
static void labpc_drain_dma(struct comedi_device *dev);
static void handle_isa_dma(struct comedi_device *dev);
+#endif
static void labpc_drain_dregs(struct comedi_device *dev);
static int labpc_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd);
@@ -237,9 +240,9 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data);
-static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd);
static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd);
#ifdef CONFIG_COMEDI_PCI
+static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd);
static int labpc_find_device(struct comedi_device *dev, int bus, int slot);
#endif
static int labpc_dio_mem_callback(int dir, int port, int data,
@@ -526,7 +529,10 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
{
struct comedi_subdevice *s;
int i;
- unsigned long dma_flags, isr_flags;
+ unsigned long isr_flags;
+#ifdef CONFIG_ISA_DMA_API
+ unsigned long dma_flags;
+#endif
short lsb, msb;
printk(KERN_ERR "comedi%d: ni_labpc: %s, io 0x%lx", dev->minor,
@@ -586,6 +592,7 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
}
dev->irq = irq;
+#ifdef CONFIG_ISA_DMA_API
/* grab dma channel */
if (dma_chan > 3) {
printk(KERN_ERR " invalid dma channel %u\n", dma_chan);
@@ -609,6 +616,7 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase,
set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
release_dma_lock(dma_flags);
}
+#endif
dev->board_name = thisboard->name;
@@ -723,9 +731,15 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* get base address, irq etc. based on bustype */
switch (thisboard->bustype) {
case isa_bustype:
+#ifdef CONFIG_ISA_DMA_API
iobase = it->options[0];
irq = it->options[1];
dma_chan = it->options[2];
+#else
+ printk(KERN_ERR " this driver has not been built with ISA DMA "
+ "support.\n");
+ return -EINVAL;
+#endif
break;
case pci_bustype:
#ifdef CONFIG_COMEDI_PCI
@@ -796,10 +810,12 @@ int labpc_common_detach(struct comedi_device *dev)
if (dev->subdevices)
subdev_8255_cleanup(dev, dev->subdevices + 2);
+#ifdef CONFIG_ISA_DMA_API
/* only free stuff if it has been allocated by _attach */
kfree(devpriv->dma_buffer);
if (devpriv->dma_chan)
free_dma(devpriv->dma_chan);
+#endif
if (dev->irq)
free_irq(dev->irq, dev);
if (thisboard->bustype == isa_bustype && dev->iobase)
@@ -1134,7 +1150,9 @@ static int labpc_ai_cmdtest(struct comedi_device *dev,
static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
int channel, range, aref;
+#ifdef CONFIG_ISA_DMA_API
unsigned long irq_flags;
+#endif
int ret;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
@@ -1181,6 +1199,7 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->write_byte(INIT_A1_BITS,
dev->iobase + COUNTER_A_CONTROL_REG);
+#ifdef CONFIG_ISA_DMA_API
/* figure out what method we will use to transfer data */
if (devpriv->dma_chan && /* need a dma channel allocated */
/*
@@ -1192,7 +1211,9 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
thisboard->bustype == isa_bustype) {
xfer = isa_dma_transfer;
/* pc-plus has no fifo-half full interrupt */
- } else if (thisboard->register_layout == labpc_1200_layout &&
+ } else
+#endif
+ if (thisboard->register_layout == labpc_1200_layout &&
/* wake-end-of-scan should interrupt on fifo not empty */
(cmd->flags & TRIG_WAKE_EOS) == 0 &&
/* make sure we are taking more than just a few points */
@@ -1316,6 +1337,7 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
labpc_clear_adc_fifo(dev);
+#ifdef CONFIG_ISA_DMA_API
/* set up dma transfer */
if (xfer == isa_dma_transfer) {
irq_flags = claim_dma_lock();
@@ -1339,6 +1361,7 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->command3_bits |= DMA_EN_BIT | DMATC_INTR_EN_BIT;
} else
devpriv->command3_bits &= ~DMA_EN_BIT & ~DMATC_INTR_EN_BIT;
+#endif
/* enable error interrupts */
devpriv->command3_bits |= ERR_INTR_EN_BIT;
@@ -1425,6 +1448,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
+#ifdef CONFIG_ISA_DMA_API
if (devpriv->current_transfer == isa_dma_transfer) {
/*
* if a dma terminal count of external stop trigger
@@ -1436,6 +1460,7 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
handle_isa_dma(dev);
}
} else
+#endif
labpc_drain_fifo(dev);
if (devpriv->status1_bits & TIMER_BIT) {
@@ -1508,6 +1533,7 @@ static int labpc_drain_fifo(struct comedi_device *dev)
return 0;
}
+#ifdef CONFIG_ISA_DMA_API
static void labpc_drain_dma(struct comedi_device *dev)
{
struct comedi_subdevice *s = dev->read_subdev;
@@ -1570,13 +1596,16 @@ static void handle_isa_dma(struct comedi_device *dev)
/* clear dma tc interrupt */
devpriv->write_byte(0x1, dev->iobase + DMATC_CLEAR_REG);
}
+#endif
/* makes sure all data acquired by board is transferred to comedi (used
* when acquisition is terminated by stop_src == TRIG_EXT). */
static void labpc_drain_dregs(struct comedi_device *dev)
{
+#ifdef CONFIG_ISA_DMA_API
if (devpriv->current_transfer == isa_dma_transfer)
labpc_drain_dma(dev);
+#endif
labpc_drain_fifo(dev);
}
@@ -1768,6 +1797,7 @@ static int labpc_eeprom_write_insn(struct comedi_device *dev,
return 1;
}
+#ifdef CONFIG_ISA_DMA_API
/* utility function that suggests a dma transfer size in bytes */
static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd)
{
@@ -1791,6 +1821,7 @@ static unsigned int labpc_suggest_transfer_size(struct comedi_cmd cmd)
return size;
}
+#endif
/* figures out what counter values to use based on command */
static void labpc_adc_timing(struct comedi_device *dev, struct comedi_cmd *cmd)
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index 09ff4723b22..6fc74645af2 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -114,6 +114,7 @@
#include <linux/delay.h>
#include <linux/ioport.h>
+#include <linux/io.h>
#include <asm/dma.h>
#include "8253.h"
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index 8f3fc6ee088..0b9bee36eb5 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -38,6 +38,7 @@ Configuration Options:
#include <linux/mc146818rtc.h>
#include <linux/gfp.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <asm/dma.h>
#include "8253.h"
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 8933e5089bd..b45a9bd8b48 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -104,6 +104,7 @@ A word or two about DMA. Driver support DMA operations at two ways:
#include <linux/mc146818rtc.h>
#include <linux/gfp.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <asm/dma.h>
#include "8253.h"
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 7f09ed755fe..13844196050 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -29,8 +29,8 @@ Status: Works. Only tested on DM7520-8. Not SMP safe.
Configuration options:
[0] - PCI bus of device (optional)
- If bus/slot is not specified, the first available PCI
- device will be used.
+ If bus / slot is not specified, the first available PCI
+ device will be used.
[1] - PCI slot of device (optional)
*/
/*
@@ -186,7 +186,7 @@ Configuration options:
| PLX_DEMAND_MODE_BIT)
#define DMA_TRANSFER_BITS (\
-/* descriptors in PCI memory*/ PLX_DESC_IN_PCI_BIT \
+/* descriptors in PCI memory*/ PLX_DESC_IN_PCI_BIT \
/* interrupt at end of block */ | PLX_INTR_TERM_COUNT \
/* from board to PCI */ | PLX_XFER_LOCAL_TO_PCI)
@@ -869,7 +869,7 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
/* Show board configuration */
- printk("%s:", dev->board_name);
+ printk(KERN_INFO "%s:", dev->board_name);
/*
* Allocate the subdevice structures. alloc_subdevice() is a
@@ -958,7 +958,7 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return ret;
}
dev->irq = devpriv->pci_dev->irq;
- printk("( irq=%u )", dev->irq);
+ printk(KERN_INFO "( irq=%u )", dev->irq);
ret = rtd520_probe_fifo_depth(dev);
if (ret < 0)
@@ -1026,7 +1026,8 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
RtdDma0Mode(dev, DMA_MODE_BITS);
- RtdDma0Source(dev, DMAS_ADFIFO_HALF_FULL); /* set DMA trigger source */
+ /* set DMA trigger source */
+ RtdDma0Source(dev, DMAS_ADFIFO_HALF_FULL);
} else {
printk(KERN_INFO "( no IRQ->no DMA )");
}
@@ -1202,11 +1203,13 @@ static unsigned short rtdConvertChanGain(struct comedi_device *dev,
CHAN_ARRAY_SET(devpriv->chanBipolar, chanIndex);
} else if (range < thisboard->rangeUniStart) { /* second batch are +-10 */
r |= 0x100; /* +-10 range */
- r |= ((range - thisboard->range10Start) & 0x7) << 4; /* gain */
+ /* gain */
+ r |= ((range - thisboard->range10Start) & 0x7) << 4;
CHAN_ARRAY_SET(devpriv->chanBipolar, chanIndex);
} else { /* last batch is +10 */
r |= 0x200; /* +10 range */
- r |= ((range - thisboard->rangeUniStart) & 0x7) << 4; /* gain */
+ /* gain */
+ r |= ((range - thisboard->rangeUniStart) & 0x7) << 4;
CHAN_ARRAY_CLEAR(devpriv->chanBipolar, chanIndex);
}
@@ -1336,7 +1339,8 @@ static int rtd_ai_rinsn(struct comedi_device *dev,
/*printk ("rtd520: Got 0x%x after %d usec\n", d, ii+1); */
d = d >> 3; /* low 3 bits are marker lines */
if (CHAN_ARRAY_TEST(devpriv->chanBipolar, 0))
- data[n] = d + 2048; /* convert to comedi unsigned data */
+ /* convert to comedi unsigned data */
+ data[n] = d + 2048;
else
data[n] = d;
}
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index ebfce33f0b4..ade2202b623 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -38,7 +38,7 @@ Status: in development
#include <linux/sched.h>
#include <linux/slab.h>
-#include <asm/termios.h>
+#include <linux/termios.h>
#include <asm/ioctls.h>
#include <linux/serial.h>
#include <linux/poll.h>
@@ -192,9 +192,8 @@ static int tty_read(struct file *f, int timeout)
elapsed =
(1000000 * (now.tv_sec - start.tv_sec) +
now.tv_usec - start.tv_usec);
- if (elapsed > timeout) {
+ if (elapsed > timeout)
break;
- }
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(((timeout -
elapsed) * HZ) / 10000);
@@ -204,9 +203,8 @@ static int tty_read(struct file *f, int timeout)
unsigned char ch;
f->f_pos = 0;
- if (f->f_op->read(f, &ch, 1, &f->f_pos) == 1) {
+ if (f->f_op->read(f, &ch, 1, &f->f_pos) == 1)
result = ch;
- }
}
} else {
/* Device does not support poll, busy wait */
@@ -215,9 +213,8 @@ static int tty_read(struct file *f, int timeout)
unsigned char ch;
retries++;
- if (retries >= timeout) {
+ if (retries >= timeout)
break;
- }
f->f_pos = 0;
if (f->f_op->read(f, &ch, 1, &f->f_pos) == 1) {
@@ -329,7 +326,7 @@ static struct serial_data serial_read(struct file *f, int timeout)
length++;
if (data < 0) {
- printk("serial2002 error\n");
+ printk(KERN_ERR "serial2002 error\n");
break;
} else if (data & 0x80) {
result.value = (result.value << 7) | (data & 0x7f);
@@ -402,7 +399,7 @@ static int serial_2002_open(struct comedi_device *dev)
devpriv->tty = filp_open(port, O_RDWR, 0);
if (IS_ERR(devpriv->tty)) {
result = (int)PTR_ERR(devpriv->tty);
- printk("serial_2002: file open error = %d\n", result);
+ printk(KERN_ERR "serial_2002: file open error = %d\n", result);
} else {
struct config_t {
@@ -516,9 +513,8 @@ static int serial_2002_open(struct comedi_device *dev)
}
break;
}
- if (sign) {
+ if (sign)
min = -min;
- }
cur_config[channel].min
= min;
}
@@ -557,9 +553,8 @@ static int serial_2002_open(struct comedi_device *dev)
}
break;
}
- if (sign) {
+ if (sign)
max = -max;
- }
cur_config[channel].max
= max;
}
@@ -622,9 +617,8 @@ static int serial_2002_open(struct comedi_device *dev)
int j, chan;
for (chan = 0, j = 0; j < 32; j++) {
- if (c[j].kind == kind) {
+ if (c[j].kind == kind)
chan++;
- }
}
s = &dev->subdevices[i];
s->n_chan = chan;
@@ -649,9 +643,8 @@ static int serial_2002_open(struct comedi_device *dev)
}
for (chan = 0, j = 0; j < 32; j++) {
if (c[j].kind == kind) {
- if (mapping) {
+ if (mapping)
mapping[chan] = j;
- }
if (range) {
range[j].length = 1;
range[j].range.min =
@@ -704,9 +697,8 @@ err_alloc_configs:
static void serial_2002_close(struct comedi_device *dev)
{
- if (!IS_ERR(devpriv->tty) && (devpriv->tty != 0)) {
+ if (!IS_ERR(devpriv->tty) && (devpriv->tty != 0))
filp_close(devpriv->tty, 0);
- }
}
static int serial2002_di_rinsn(struct comedi_device *dev,
@@ -723,9 +715,8 @@ static int serial2002_di_rinsn(struct comedi_device *dev,
poll_digital(devpriv->tty, chan);
while (1) {
read = serial_read(devpriv->tty, 1000);
- if (read.kind != is_digital || read.index == chan) {
+ if (read.kind != is_digital || read.index == chan)
break;
- }
}
data[n] = read.value;
}
@@ -765,9 +756,8 @@ static int serial2002_ai_rinsn(struct comedi_device *dev,
poll_channel(devpriv->tty, chan);
while (1) {
read = serial_read(devpriv->tty, 1000);
- if (read.kind != is_channel || read.index == chan) {
+ if (read.kind != is_channel || read.index == chan)
break;
- }
}
data[n] = read.value;
}
@@ -801,9 +791,8 @@ static int serial2002_ao_rinsn(struct comedi_device *dev,
int n;
int chan = CR_CHAN(insn->chanspec);
- for (n = 0; n < insn->n; n++) {
+ for (n = 0; n < insn->n; n++)
data[n] = devpriv->ao_readback[chan];
- }
return n;
}
@@ -822,9 +811,8 @@ static int serial2002_ei_rinsn(struct comedi_device *dev,
poll_channel(devpriv->tty, chan);
while (1) {
read = serial_read(devpriv->tty, 1000);
- if (read.kind != is_channel || read.index == chan) {
+ if (read.kind != is_channel || read.index == chan)
break;
- }
}
data[n] = read.value;
}
@@ -838,9 +826,8 @@ static int serial2002_attach(struct comedi_device *dev,
printk("comedi%d: serial2002: ", dev->minor);
dev->board_name = thisboard->name;
- if (alloc_private(dev, sizeof(struct serial2002_private)) < 0) {
+ if (alloc_private(dev, sizeof(struct serial2002_private)) < 0)
return -ENOMEM;
- }
dev->open = serial_2002_open;
dev->close = serial_2002_close;
devpriv->port = it->options[0];
diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c
index 598884ec3ed..89e62aa134b 100644
--- a/drivers/staging/comedi/drivers/unioxx5.c
+++ b/drivers/staging/comedi/drivers/unioxx5.c
@@ -75,8 +75,10 @@ Devices: [Fastwel] UNIOxx-5 (unioxx5),
/* 'private' structure for each subdevice */
struct unioxx5_subd_priv {
int usp_iobase;
- unsigned char usp_module_type[12]; /* 12 modules. each can be 70L or 73L */
- unsigned char usp_extra_data[12][4]; /* for saving previous written value for analog modules */
+ /* 12 modules. each can be 70L or 73L */
+ unsigned char usp_module_type[12];
+ /* for saving previous written value for analog modules */
+ unsigned char usp_extra_data[12][4];
unsigned char usp_prev_wr_val[3]; /* previous written value */
unsigned char usp_prev_cn_val[3]; /* previous channel value */
};
@@ -169,7 +171,7 @@ static int unioxx5_attach(struct comedi_device *dev,
return -1;
}
- printk("attached\n");
+ printk(KERN_INFO "attached\n");
return 0;
}
@@ -181,7 +183,8 @@ static int unioxx5_subdev_read(struct comedi_device *dev,
int channel, type;
channel = CR_CHAN(insn->chanspec);
- type = usp->usp_module_type[channel / 2]; /* defining module type(analog or digital) */
+ /* defining module type(analog or digital) */
+ type = usp->usp_module_type[channel / 2];
if (type == MODULE_DIGITAL) {
if (!__unioxx5_digital_read(usp, data, channel, dev->minor))
@@ -202,7 +205,8 @@ static int unioxx5_subdev_write(struct comedi_device *dev,
int channel, type;
channel = CR_CHAN(insn->chanspec);
- type = usp->usp_module_type[channel / 2]; /* defining module type(analog or digital) */
+ /* defining module type(analog or digital) */
+ type = usp->usp_module_type[channel / 2];
if (type == MODULE_DIGITAL) {
if (!__unioxx5_digital_write(usp, data, channel, dev->minor))
@@ -261,9 +265,12 @@ static int unioxx5_insn_config(struct comedi_device *dev,
* change channel type on input or output) *
\* */
outb(1, usp->usp_iobase + 0);
- outb(flags, usp->usp_iobase + channel_offset); /* changes type of _one_ channel */
- outb(0, usp->usp_iobase + 0); /* sets channels bank to 0(allows directly input/output) */
- usp->usp_prev_cn_val[channel_offset - 1] = flags; /* saves written value */
+ /* changes type of _one_ channel */
+ outb(flags, usp->usp_iobase + channel_offset);
+ /* sets channels bank to 0(allows directly input/output) */
+ outb(0, usp->usp_iobase + 0);
+ /* saves written value */
+ usp->usp_prev_cn_val[channel_offset - 1] = flags;
return 0;
}
@@ -304,14 +311,15 @@ static int __unioxx5_subdev_init(struct comedi_subdevice *subdev,
}
usp->usp_iobase = subdev_iobase;
- printk("comedi%d: |", minor);
+ printk(KERN_INFO "comedi%d: |", minor);
/* defining modules types */
for (i = 0; i < 12; i++) {
to = 10000;
__unioxx5_analog_config(usp, i * 2);
- outb(i + 1, subdev_iobase + 5); /* sends channel number to card */
+ /* sends channel number to card */
+ outb(i + 1, subdev_iobase + 5);
outb('H', subdev_iobase + 6); /* requests EEPROM world */
while (!(inb(subdev_iobase + 0) & TxBE))
; /* waits while writting will be allowed */
@@ -346,9 +354,10 @@ static int __unioxx5_subdev_init(struct comedi_subdevice *subdev,
subdev->range_table = &range_digital;
subdev->insn_read = unioxx5_subdev_read;
subdev->insn_write = unioxx5_subdev_write;
- subdev->insn_config = unioxx5_insn_config; /* for digital modules only!!! */
+ /* for digital modules only!!! */
+ subdev->insn_config = unioxx5_insn_config;
- printk("subdevice configured\n");
+ printk(KERN_INFO "subdevice configured\n");
return 0;
}
@@ -367,7 +376,8 @@ static int __unioxx5_digital_write(struct unioxx5_subd_priv *usp,
return 0;
}
- val = usp->usp_prev_wr_val[channel_offset - 1]; /* getting previous written value */
+ /* getting previous written value */
+ val = usp->usp_prev_wr_val[channel_offset - 1];
if (*data)
val |= mask;
@@ -375,7 +385,8 @@ static int __unioxx5_digital_write(struct unioxx5_subd_priv *usp,
val &= ~mask;
outb(val, usp->usp_iobase + channel_offset);
- usp->usp_prev_wr_val[channel_offset - 1] = val; /* saving new written value */
+ /* saving new written value */
+ usp->usp_prev_wr_val[channel_offset - 1] = val;
return 1;
}
@@ -399,7 +410,6 @@ static int __unioxx5_digital_read(struct unioxx5_subd_priv *usp,
if (channel_offset > 1)
channel -= 2 << channel_offset; /* this operation is created for correct readed value to 0 or 1 */
-
*data >>= channel;
return 1;
}
@@ -444,7 +454,8 @@ static int __unioxx5_analog_write(struct unioxx5_subd_priv *usp,
usp->usp_extra_data[module][i] = (unsigned char)((*data & 0xFF00) >> 8);
/* while(!((inb(usp->usp_iobase + 0)) & TxBE)); */
- outb(module + 1, usp->usp_iobase + 5); /* sending module number to card(1 .. 12) */
+ /* sending module number to card(1 .. 12) */
+ outb(module + 1, usp->usp_iobase + 5);
outb('W', usp->usp_iobase + 6); /* sends (W)rite command to module */
/* sending for bytes to module(one byte per cycle iteration) */
@@ -475,7 +486,8 @@ static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp,
}
__unioxx5_analog_config(usp, channel);
- outb(module_no + 1, usp->usp_iobase + 5); /* sends module number to card(1 .. 12) */
+ /* sends module number to card(1 .. 12) */
+ outb(module_no + 1, usp->usp_iobase + 5);
outb('V', usp->usp_iobase + 6); /* sends to module (V)erify command */
control = inb(usp->usp_iobase); /* get control register byte */
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 1d09bfa2edf..bf62e0dd6f6 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -1465,6 +1465,7 @@ static int usbdux_ao_inttrig(struct comedi_device *dev,
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ao_inttrig: invalid trignum\n",
dev->minor);
+ up(&this_usbduxsub->sem);
return -EINVAL;
}
if (!(this_usbduxsub->ao_cmd_running)) {
@@ -1935,11 +1936,8 @@ static int usbdux_pwm_cancel(struct comedi_device *dev,
dev_dbg(&this_usbduxsub->interface->dev,
"comedi %d: sending pwm off command to the usb device.\n",
dev->minor);
- res = send_dux_commands(this_usbduxsub, SENDPWMOFF);
- if (res < 0)
- return res;
- return res;
+ return send_dux_commands(this_usbduxsub, SENDPWMOFF);
}
static void usbduxsub_pwm_irq(struct urb *urb)
@@ -2674,6 +2672,7 @@ static int usbdux_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret < 0) {
dev_err(&udev->interface->dev,
"comedi%d: error alloc space for subdev\n", dev->minor);
+ up(&udev->sem);
up(&start_stop_sem);
return ret;
}
diff --git a/drivers/staging/cs5535_gpio/Kconfig b/drivers/staging/cs5535_gpio/Kconfig
deleted file mode 100644
index a1b3a8d2b86..00000000000
--- a/drivers/staging/cs5535_gpio/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-config CS5535_GPIO
- tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
- depends on X86_32
- help
- Note: this driver is DEPRECATED. Please use the cs5535-gpio module
- in the GPIO section instead (CONFIG_GPIO_CS5535).
-
- Give userspace access to the GPIO pins on the AMD CS5535 and
- CS5536 Geode companion devices.
-
- If compiled as a module, it will be called cs5535_gpio.
diff --git a/drivers/staging/cs5535_gpio/Makefile b/drivers/staging/cs5535_gpio/Makefile
deleted file mode 100644
index d67c4b85f19..00000000000
--- a/drivers/staging/cs5535_gpio/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
diff --git a/drivers/staging/cs5535_gpio/TODO b/drivers/staging/cs5535_gpio/TODO
deleted file mode 100644
index 98d1cd1e236..00000000000
--- a/drivers/staging/cs5535_gpio/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-This is an obsolete driver for some the CS5535 and CS5536 southbridge GPIOs.
-It has been replaced by a driver that makes use of the Linux GPIO subsystem.
-Please switch to that driver, and let dilinger@queued.net know if there's
-anything missing from the new driver.
-
-This driver is scheduled for removal in 2.6.40.
diff --git a/drivers/staging/cs5535_gpio/cs5535_gpio.c b/drivers/staging/cs5535_gpio/cs5535_gpio.c
deleted file mode 100644
index b25f9d103b3..00000000000
--- a/drivers/staging/cs5535_gpio/cs5535_gpio.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * AMD CS5535/CS5536 GPIO driver.
- * Allows a user space process to play with the GPIO pins.
- *
- * Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the smems of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- */
-
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/cdev.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-
-#define NAME "cs5535_gpio"
-
-MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
-MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO Pin Driver");
-MODULE_LICENSE("GPL");
-
-static int major;
-module_param(major, int, 0);
-MODULE_PARM_DESC(major, "Major device number");
-
-static ulong mask;
-module_param(mask, ulong, 0);
-MODULE_PARM_DESC(mask, "GPIO channel mask");
-
-#define MSR_LBAR_GPIO 0x5140000C
-
-static u32 gpio_base;
-
-static struct pci_device_id divil_pci[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
- { } /* NULL entry */
-};
-MODULE_DEVICE_TABLE(pci, divil_pci);
-
-static struct cdev cs5535_gpio_cdev;
-
-/* reserve 32 entries even though some aren't usable */
-#define CS5535_GPIO_COUNT 32
-
-/* IO block size */
-#define CS5535_GPIO_SIZE 256
-
-struct gpio_regmap {
- u32 rd_offset;
- u32 wr_offset;
- char on;
- char off;
-};
-static struct gpio_regmap rm[] =
-{
- { 0x30, 0x00, '1', '0' }, /* GPIOx_READ_BACK / GPIOx_OUT_VAL */
- { 0x20, 0x20, 'I', 'i' }, /* GPIOx_IN_EN */
- { 0x04, 0x04, 'O', 'o' }, /* GPIOx_OUT_EN */
- { 0x08, 0x08, 't', 'T' }, /* GPIOx_OUT_OD_EN */
- { 0x18, 0x18, 'P', 'p' }, /* GPIOx_OUT_PU_EN */
- { 0x1c, 0x1c, 'D', 'd' }, /* GPIOx_OUT_PD_EN */
-};
-
-
-/**
- * Gets the register offset for the GPIO bank.
- * Low (0-15) starts at 0x00, high (16-31) starts at 0x80
- */
-static inline u32 cs5535_lowhigh_base(int reg)
-{
- return (reg & 0x10) << 3;
-}
-
-static ssize_t cs5535_gpio_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- u32 m = iminor(file->f_path.dentry->d_inode);
- int i, j;
- u32 base = gpio_base + cs5535_lowhigh_base(m);
- u32 m0, m1;
- char c;
-
- /**
- * Creates the mask for atomic bit programming.
- * The high 16 bits and the low 16 bits are used to set the mask.
- * For example, GPIO 15 maps to 31,15: 0,1 => On; 1,0=> Off
- */
- m1 = 1 << (m & 0x0F);
- m0 = m1 << 16;
-
- for (i = 0; i < len; ++i) {
- if (get_user(c, data+i))
- return -EFAULT;
-
- for (j = 0; j < ARRAY_SIZE(rm); j++) {
- if (c == rm[j].on) {
- outl(m1, base + rm[j].wr_offset);
- /* If enabling output, turn off AUX 1 and AUX 2 */
- if (c == 'O') {
- outl(m0, base + 0x10);
- outl(m0, base + 0x14);
- }
- break;
- } else if (c == rm[j].off) {
- outl(m0, base + rm[j].wr_offset);
- break;
- }
- }
- }
- *ppos = 0;
- return len;
-}
-
-static ssize_t cs5535_gpio_read(struct file *file, char __user *buf,
- size_t len, loff_t *ppos)
-{
- u32 m = iminor(file->f_path.dentry->d_inode);
- u32 base = gpio_base + cs5535_lowhigh_base(m);
- int rd_bit = 1 << (m & 0x0f);
- int i;
- char ch;
- ssize_t count = 0;
-
- if (*ppos >= ARRAY_SIZE(rm))
- return 0;
-
- for (i = *ppos; (i < (*ppos + len)) && (i < ARRAY_SIZE(rm)); i++) {
- ch = (inl(base + rm[i].rd_offset) & rd_bit) ?
- rm[i].on : rm[i].off;
-
- if (put_user(ch, buf+count))
- return -EFAULT;
-
- count++;
- }
-
- /* add a line-feed if there is room */
- if ((i == ARRAY_SIZE(rm)) && (count < len)) {
- if (put_user('\n', buf + count))
- return -EFAULT;
- count++;
- }
-
- *ppos += count;
- return count;
-}
-
-static int cs5535_gpio_open(struct inode *inode, struct file *file)
-{
- u32 m = iminor(inode);
-
- /* the mask says which pins are usable by this driver */
- if ((mask & (1 << m)) == 0)
- return -EINVAL;
-
- return nonseekable_open(inode, file);
-}
-
-static const struct file_operations cs5535_gpio_fops = {
- .owner = THIS_MODULE,
- .write = cs5535_gpio_write,
- .read = cs5535_gpio_read,
- .open = cs5535_gpio_open,
- .llseek = no_llseek,
-};
-
-static int __init cs5535_gpio_init(void)
-{
- dev_t dev_id;
- u32 low, hi;
- int retval;
-
- if (pci_dev_present(divil_pci) == 0) {
- printk(KERN_WARNING NAME ": DIVIL not found\n");
- return -ENODEV;
- }
-
- /* Grab the GPIO I/O range */
- rdmsr(MSR_LBAR_GPIO, low, hi);
-
- /* Check the mask and whether GPIO is enabled (sanity check) */
- if (hi != 0x0000f001) {
- printk(KERN_WARNING NAME ": GPIO not enabled\n");
- return -ENODEV;
- }
-
- /* Mask off the IO base address */
- gpio_base = low & 0x0000ff00;
-
- /**
- * Some GPIO pins
- * 31-29,23 : reserved (always mask out)
- * 28 : Power Button
- * 26 : PME#
- * 22-16 : LPC
- * 14,15 : SMBus
- * 9,8 : UART1
- * 7 : PCI INTB
- * 3,4 : UART2/DDC
- * 2 : IDE_IRQ0
- * 0 : PCI INTA
- *
- * If a mask was not specified, be conservative and only allow:
- * 1,2,5,6,10-13,24,25,27
- */
- if (mask != 0)
- mask &= 0x1f7fffff;
- else
- mask = 0x0b003c66;
-
- if (!request_region(gpio_base, CS5535_GPIO_SIZE, NAME)) {
- printk(KERN_ERR NAME ": can't allocate I/O for GPIO\n");
- return -ENODEV;
- }
-
- if (major) {
- dev_id = MKDEV(major, 0);
- retval = register_chrdev_region(dev_id, CS5535_GPIO_COUNT,
- NAME);
- } else {
- retval = alloc_chrdev_region(&dev_id, 0, CS5535_GPIO_COUNT,
- NAME);
- major = MAJOR(dev_id);
- }
-
- if (retval) {
- release_region(gpio_base, CS5535_GPIO_SIZE);
- return -1;
- }
-
- printk(KERN_DEBUG NAME ": base=%#x mask=%#lx major=%d\n",
- gpio_base, mask, major);
-
- cdev_init(&cs5535_gpio_cdev, &cs5535_gpio_fops);
- cdev_add(&cs5535_gpio_cdev, dev_id, CS5535_GPIO_COUNT);
-
- return 0;
-}
-
-static void __exit cs5535_gpio_cleanup(void)
-{
- dev_t dev_id = MKDEV(major, 0);
-
- cdev_del(&cs5535_gpio_cdev);
- unregister_chrdev_region(dev_id, CS5535_GPIO_COUNT);
- release_region(gpio_base, CS5535_GPIO_SIZE);
-}
-
-module_init(cs5535_gpio_init);
-module_exit(cs5535_gpio_cleanup);
diff --git a/drivers/staging/cxd2099/Kconfig b/drivers/staging/cxd2099/Kconfig
index 9d638c30735..b48aefddc84 100644
--- a/drivers/staging/cxd2099/Kconfig
+++ b/drivers/staging/cxd2099/Kconfig
@@ -1,9 +1,10 @@
config DVB_CXD2099
- tristate "CXD2099AR Common Interface driver"
- depends on DVB_CORE && PCI && I2C && DVB_NGENE
- ---help---
- Support for the CI module found on cineS2 DVB-S2, supported by
- the Micronas PCIe device driver (ngene).
+ tristate "CXD2099AR Common Interface driver"
+ depends on DVB_CORE && PCI && I2C
+ ---help---
+ Support for the CI module found on cards based on
+ - Micronas ngene PCIe bridge: cineS2 etc.
+ - Digital Devices PCIe bridge: Octopus series
For now, data is passed through '/dev/dvb/adapterX/sec0':
- Encrypted data must be written to 'sec0'.
diff --git a/drivers/staging/cxd2099/cxd2099.c b/drivers/staging/cxd2099/cxd2099.c
index b49186c74eb..1c04185bcfd 100644
--- a/drivers/staging/cxd2099/cxd2099.c
+++ b/drivers/staging/cxd2099/cxd2099.c
@@ -1,7 +1,7 @@
/*
* cxd2099.c: Driver for the CXD2099AR Common Interface Controller
*
- * Copyright (C) 2010 DigitalDevices UG
+ * Copyright (C) 2010-2011 Digital Devices GmbH
*
*
* This program is free software; you can redistribute it and/or
@@ -22,7 +22,6 @@
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*/
-#include <linux/version.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -42,13 +41,13 @@ struct cxd {
struct dvb_ca_en50221 en;
struct i2c_adapter *i2c;
- u8 adr;
+ struct cxd2099_cfg cfg;
+
u8 regs[0x23];
u8 lastaddress;
u8 clk_reg_f;
u8 clk_reg_b;
int mode;
- u32 bitrate;
int ready;
int dr;
int slot_stat;
@@ -90,9 +89,9 @@ static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr,
u8 reg, u8 *val)
{
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
- .buf = &reg, .len = 1 },
+ .buf = &reg, .len = 1},
{.addr = adr, .flags = I2C_M_RD,
- .buf = val, .len = 1 } };
+ .buf = val, .len = 1} };
if (i2c_transfer(adapter, msgs, 2) != 2) {
printk(KERN_ERR "error in i2c_read_reg\n");
@@ -105,9 +104,9 @@ static int i2c_read(struct i2c_adapter *adapter, u8 adr,
u8 reg, u8 *data, u8 n)
{
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
- .buf = &reg, .len = 1 },
- {.addr = adr, .flags = I2C_M_RD,
- .buf = data, .len = n } };
+ .buf = &reg, .len = 1},
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = data, .len = n} };
if (i2c_transfer(adapter, msgs, 2) != 2) {
printk(KERN_ERR "error in i2c_read\n");
@@ -120,10 +119,10 @@ static int read_block(struct cxd *ci, u8 adr, u8 *data, u8 n)
{
int status;
- status = i2c_write_reg(ci->i2c, ci->adr, 0, adr);
+ status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
if (!status) {
ci->lastaddress = adr;
- status = i2c_read(ci->i2c, ci->adr, 1, data, n);
+ status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, n);
}
return status;
}
@@ -137,24 +136,24 @@ static int read_reg(struct cxd *ci, u8 reg, u8 *val)
static int read_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
{
int status;
- u8 addr[3] = { 2, address&0xff, address>>8 };
+ u8 addr[3] = {2, address & 0xff, address >> 8};
- status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
if (!status)
- status = i2c_read(ci->i2c, ci->adr, 3, data, n);
+ status = i2c_read(ci->i2c, ci->cfg.adr, 3, data, n);
return status;
}
static int write_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
{
int status;
- u8 addr[3] = { 2, address&0xff, address>>8 };
+ u8 addr[3] = {2, address & 0xff, address >> 8};
- status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
if (!status) {
u8 buf[256] = {3};
memcpy(buf+1, data, n);
- status = i2c_write(ci->i2c, ci->adr, buf, n+1);
+ status = i2c_write(ci->i2c, ci->cfg.adr, buf, n+1);
}
return status;
}
@@ -162,39 +161,64 @@ static int write_pccard(struct cxd *ci, u16 address, u8 *data, u8 n)
static int read_io(struct cxd *ci, u16 address, u8 *val)
{
int status;
- u8 addr[3] = { 2, address&0xff, address>>8 };
+ u8 addr[3] = {2, address & 0xff, address >> 8};
- status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
if (!status)
- status = i2c_read(ci->i2c, ci->adr, 3, val, 1);
+ status = i2c_read(ci->i2c, ci->cfg.adr, 3, val, 1);
return status;
}
static int write_io(struct cxd *ci, u16 address, u8 val)
{
int status;
- u8 addr[3] = { 2, address&0xff, address>>8 };
- u8 buf[2] = { 3, val };
+ u8 addr[3] = {2, address & 0xff, address >> 8};
+ u8 buf[2] = {3, val};
- status = i2c_write(ci->i2c, ci->adr, addr, 3);
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
if (!status)
- status = i2c_write(ci->i2c, ci->adr, buf, 2);
-
+ status = i2c_write(ci->i2c, ci->cfg.adr, buf, 2);
return status;
}
+#if 0
+static int read_io_data(struct cxd *ci, u8 *data, u8 n)
+{
+ int status;
+ u8 addr[3] = { 2, 0, 0 };
+
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
+ if (!status)
+ status = i2c_read(ci->i2c, ci->cfg.adr, 3, data, n);
+ return 0;
+}
+
+static int write_io_data(struct cxd *ci, u8 *data, u8 n)
+{
+ int status;
+ u8 addr[3] = {2, 0, 0};
+
+ status = i2c_write(ci->i2c, ci->cfg.adr, addr, 3);
+ if (!status) {
+ u8 buf[256] = {3};
+ memcpy(buf+1, data, n);
+ status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1);
+ }
+ return 0;
+}
+#endif
static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask)
{
int status;
- status = i2c_write_reg(ci->i2c, ci->adr, 0, reg);
+ status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg);
if (!status && reg >= 6 && reg <= 8 && mask != 0xff)
- status = i2c_read_reg(ci->i2c, ci->adr, 1, &ci->regs[reg]);
- ci->regs[reg] = (ci->regs[reg]&(~mask))|val;
+ status = i2c_read_reg(ci->i2c, ci->cfg.adr, 1, &ci->regs[reg]);
+ ci->regs[reg] = (ci->regs[reg] & (~mask)) | val;
if (!status) {
ci->lastaddress = reg;
- status = i2c_write_reg(ci->i2c, ci->adr, 1, ci->regs[reg]);
+ status = i2c_write_reg(ci->i2c, ci->cfg.adr, 1, ci->regs[reg]);
}
if (reg == 0x20)
ci->regs[reg] &= 0x7f;
@@ -212,11 +236,11 @@ static int write_block(struct cxd *ci, u8 adr, u8 *data, int n)
int status;
u8 buf[256] = {1};
- status = i2c_write_reg(ci->i2c, ci->adr, 0, adr);
+ status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
if (!status) {
ci->lastaddress = adr;
- memcpy(buf+1, data, n);
- status = i2c_write(ci->i2c, ci->adr, buf, n+1);
+ memcpy(buf + 1, data, n);
+ status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1);
}
return status;
}
@@ -250,12 +274,16 @@ static void cam_mode(struct cxd *ci, int mode)
write_regm(ci, 0x20, 0x80, 0x80);
break;
case 0x01:
+#ifdef BUFFER_MODE
+ if (!ci->en.read_data)
+ return;
printk(KERN_INFO "enable cam buffer mode\n");
/* write_reg(ci, 0x0d, 0x00); */
/* write_reg(ci, 0x0e, 0x01); */
write_regm(ci, 0x08, 0x40, 0x40);
/* read_reg(ci, 0x12, &dummy); */
write_regm(ci, 0x08, 0x80, 0x80);
+#endif
break;
default:
break;
@@ -265,8 +293,6 @@ static void cam_mode(struct cxd *ci, int mode)
-#define CHK_ERROR(s) if ((status = s)) break
-
static int init(struct cxd *ci)
{
int status;
@@ -274,63 +300,160 @@ static int init(struct cxd *ci)
mutex_lock(&ci->lock);
ci->mode = -1;
do {
- CHK_ERROR(write_reg(ci, 0x00, 0x00));
- CHK_ERROR(write_reg(ci, 0x01, 0x00));
- CHK_ERROR(write_reg(ci, 0x02, 0x10));
- CHK_ERROR(write_reg(ci, 0x03, 0x00));
- CHK_ERROR(write_reg(ci, 0x05, 0xFF));
- CHK_ERROR(write_reg(ci, 0x06, 0x1F));
- CHK_ERROR(write_reg(ci, 0x07, 0x1F));
- CHK_ERROR(write_reg(ci, 0x08, 0x28));
- CHK_ERROR(write_reg(ci, 0x14, 0x20));
-
- CHK_ERROR(write_reg(ci, 0x09, 0x4D)); /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
- CHK_ERROR(write_reg(ci, 0x0A, 0xA7)); /* TOSTRT = 8, Mode B (gated clock), falling Edge, Serial, POL=HIGH, MSB */
-
- /* Sync detector */
- CHK_ERROR(write_reg(ci, 0x0B, 0x33));
- CHK_ERROR(write_reg(ci, 0x0C, 0x33));
-
- CHK_ERROR(write_regm(ci, 0x14, 0x00, 0x0F));
- CHK_ERROR(write_reg(ci, 0x15, ci->clk_reg_b));
- CHK_ERROR(write_regm(ci, 0x16, 0x00, 0x0F));
- CHK_ERROR(write_reg(ci, 0x17, ci->clk_reg_f));
-
- CHK_ERROR(write_reg(ci, 0x20, 0x28)); /* Integer Divider, Falling Edge, Internal Sync, */
- CHK_ERROR(write_reg(ci, 0x21, 0x00)); /* MCLKI = TICLK/8 */
- CHK_ERROR(write_reg(ci, 0x22, 0x07)); /* MCLKI = TICLK/8 */
-
-
- CHK_ERROR(write_regm(ci, 0x20, 0x80, 0x80)); /* Reset CAM state machine */
-
- CHK_ERROR(write_regm(ci, 0x03, 0x02, 02)); /* Enable IREQA Interrupt */
- CHK_ERROR(write_reg(ci, 0x01, 0x04)); /* Enable CD Interrupt */
- CHK_ERROR(write_reg(ci, 0x00, 0x31)); /* Enable TS1,Hot Swap,Slot A */
- CHK_ERROR(write_regm(ci, 0x09, 0x08, 0x08)); /* Put TS in bypass */
+ status = write_reg(ci, 0x00, 0x00);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x01, 0x00);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x02, 0x10);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x03, 0x00);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x05, 0xFF);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x06, 0x1F);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x07, 0x1F);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x08, 0x28);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x14, 0x20);
+ if (status < 0)
+ break;
+
+#if 0
+ status = write_reg(ci, 0x09, 0x4D); /* Input Mode C, BYPass Serial, TIVAL = low, MSB */
+ if (status < 0)
+ break;
+#endif
+ status = write_reg(ci, 0x0A, 0xA7); /* TOSTRT = 8, Mode B (gated clock), falling Edge, Serial, POL=HIGH, MSB */
+ if (status < 0)
+ break;
+
+ status = write_reg(ci, 0x0B, 0x33);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x0C, 0x33);
+ if (status < 0)
+ break;
+
+ status = write_regm(ci, 0x14, 0x00, 0x0F);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x15, ci->clk_reg_b);
+ if (status < 0)
+ break;
+ status = write_regm(ci, 0x16, 0x00, 0x0F);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x17, ci->clk_reg_f);
+ if (status < 0)
+ break;
+
+ if (ci->cfg.clock_mode) {
+ if (ci->cfg.polarity) {
+ status = write_reg(ci, 0x09, 0x6f);
+ if (status < 0)
+ break;
+ } else {
+ status = write_reg(ci, 0x09, 0x6d);
+ if (status < 0)
+ break;
+ }
+ status = write_reg(ci, 0x20, 0x68);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x21, 0x00);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x22, 0x02);
+ if (status < 0)
+ break;
+ } else {
+ if (ci->cfg.polarity) {
+ status = write_reg(ci, 0x09, 0x4f);
+ if (status < 0)
+ break;
+ } else {
+ status = write_reg(ci, 0x09, 0x4d);
+ if (status < 0)
+ break;
+ }
+
+ status = write_reg(ci, 0x20, 0x28);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x21, 0x00);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x22, 0x07);
+ if (status < 0)
+ break;
+ }
+
+ status = write_regm(ci, 0x20, 0x80, 0x80);
+ if (status < 0)
+ break;
+ status = write_regm(ci, 0x03, 0x02, 0x02);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x01, 0x04);
+ if (status < 0)
+ break;
+ status = write_reg(ci, 0x00, 0x31);
+ if (status < 0)
+ break;
+
+ /* Put TS in bypass */
+ status = write_regm(ci, 0x09, 0x08, 0x08);
+ if (status < 0)
+ break;
ci->cammode = -1;
-#ifdef BUFFER_MODE
cam_mode(ci, 0);
-#endif
} while (0);
mutex_unlock(&ci->lock);
return 0;
}
-
static int read_attribute_mem(struct dvb_ca_en50221 *ca,
int slot, int address)
{
struct cxd *ci = ca->data;
+#if 0
+ if (ci->amem_read) {
+ if (address <= 0 || address > 1024)
+ return -EIO;
+ return ci->amem[address];
+ }
+
+ mutex_lock(&ci->lock);
+ write_regm(ci, 0x06, 0x00, 0x05);
+ read_pccard(ci, 0, &ci->amem[0], 128);
+ read_pccard(ci, 128, &ci->amem[0], 128);
+ read_pccard(ci, 256, &ci->amem[0], 128);
+ read_pccard(ci, 384, &ci->amem[0], 128);
+ write_regm(ci, 0x06, 0x05, 0x05);
+ mutex_unlock(&ci->lock);
+ return ci->amem[address];
+#else
u8 val;
mutex_lock(&ci->lock);
set_mode(ci, 1);
read_pccard(ci, address, &val, 1);
mutex_unlock(&ci->lock);
+ /* printk(KERN_INFO "%02x:%02x\n", address,val); */
return val;
+#endif
}
-
static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot,
int address, u8 value)
{
@@ -373,6 +496,15 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
struct cxd *ci = ca->data;
mutex_lock(&ci->lock);
+#if 0
+ write_reg(ci, 0x00, 0x21);
+ write_reg(ci, 0x06, 0x1F);
+ write_reg(ci, 0x00, 0x31);
+#else
+#if 0
+ write_reg(ci, 0x06, 0x1F);
+ write_reg(ci, 0x06, 0x2F);
+#else
cam_mode(ci, 0);
write_reg(ci, 0x00, 0x21);
write_reg(ci, 0x06, 0x1F);
@@ -380,13 +512,25 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
write_regm(ci, 0x20, 0x80, 0x80);
write_reg(ci, 0x03, 0x02);
ci->ready = 0;
+#endif
+#endif
ci->mode = -1;
{
int i;
+#if 0
+ u8 val;
+#endif
for (i = 0; i < 100; i++) {
msleep(10);
+#if 0
+ read_reg(ci, 0x06, &val);
+ printk(KERN_INFO "%d:%02x\n", i, val);
+ if (!(val&0x10))
+ break;
+#else
if (ci->ready)
break;
+#endif
}
}
mutex_unlock(&ci->lock);
@@ -400,12 +544,12 @@ static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
printk(KERN_INFO "slot_shutdown\n");
mutex_lock(&ci->lock);
- /* write_regm(ci, 0x09, 0x08, 0x08); */
- write_regm(ci, 0x20, 0x80, 0x80);
- write_regm(ci, 0x06, 0x07, 0x07);
+ write_regm(ci, 0x09, 0x08, 0x08);
+ write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */
+ write_regm(ci, 0x06, 0x07, 0x07); /* Clear IO Mode */
ci->mode = -1;
mutex_unlock(&ci->lock);
- return 0; /* shutdown(ci); */
+ return 0;
}
static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
@@ -460,7 +604,6 @@ static int campoll(struct cxd *ci)
if (istat&8 && ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) {
ci->ready = 1;
ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY;
- printk(KERN_INFO "READY\n");
}
}
return 0;
@@ -511,7 +654,7 @@ static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
struct cxd *ci = ca->data;
mutex_lock(&ci->lock);
- printk(KERN_INFO "write_data %d\n", ecount);
+ printk(kern_INFO "write_data %d\n", ecount);
write_reg(ci, 0x0d, ecount>>8);
write_reg(ci, 0x0e, ecount&0xff);
write_block(ci, 0x11, ebuf, ecount);
@@ -536,15 +679,15 @@ static struct dvb_ca_en50221 en_templ = {
};
-struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv,
+struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
+ void *priv,
struct i2c_adapter *i2c)
{
struct cxd *ci = 0;
- u32 bitrate = 62000000;
u8 val;
- if (i2c_read_reg(i2c, adr, 0, &val) < 0) {
- printk(KERN_ERR "No CXD2099 detected at %02x\n", adr);
+ if (i2c_read_reg(i2c, cfg->adr, 0, &val) < 0) {
+ printk(KERN_INFO "No CXD2099 detected at %02x\n", cfg->adr);
return 0;
}
@@ -554,21 +697,20 @@ struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv,
memset(ci, 0, sizeof(*ci));
mutex_init(&ci->lock);
+ memcpy(&ci->cfg, cfg, sizeof(struct cxd2099_cfg));
ci->i2c = i2c;
- ci->adr = adr;
ci->lastaddress = 0xff;
ci->clk_reg_b = 0x4a;
ci->clk_reg_f = 0x1b;
- ci->bitrate = bitrate;
memcpy(&ci->en, &en_templ, sizeof(en_templ));
ci->en.data = ci;
init(ci);
- printk(KERN_INFO "Attached CXD2099AR at %02x\n", ci->adr);
+ printk(KERN_INFO "Attached CXD2099AR at %02x\n", ci->cfg.adr);
return &ci->en;
}
EXPORT_SYMBOL(cxd2099_attach);
MODULE_DESCRIPTION("cxd2099");
-MODULE_AUTHOR("Ralph Metzler <rjkm@metzlerbros.de>");
+MODULE_AUTHOR("Ralph Metzler");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/cxd2099/cxd2099.h b/drivers/staging/cxd2099/cxd2099.h
index bed54ff3e30..19c588a5958 100644
--- a/drivers/staging/cxd2099/cxd2099.h
+++ b/drivers/staging/cxd2099/cxd2099.h
@@ -1,7 +1,7 @@
/*
* cxd2099.h: Driver for the CXD2099AR Common Interface Controller
*
- * Copyright (C) 2010 DigitalDevices UG
+ * Copyright (C) 2010-2011 Digital Devices GmbH
*
*
* This program is free software; you can redistribute it and/or
@@ -27,11 +27,21 @@
#include <dvb_ca_en50221.h>
+struct cxd2099_cfg {
+ u32 bitrate;
+ u8 adr;
+ u8 polarity:1;
+ u8 clock_mode:1;
+};
+
#if defined(CONFIG_DVB_CXD2099) || \
- (defined(CONFIG_DVB_CXD2099_MODULE) && defined(MODULE))
-struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv, struct i2c_adapter *i2c);
+ (defined(CONFIG_DVB_CXD2099_MODULE) && defined(MODULE))
+struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
+ void *priv, struct i2c_adapter *i2c);
#else
-static inline struct dvb_ca_en50221 *cxd2099_attach(u8 adr, void *priv, struct i2c_adapter *i2c)
+
+static inline struct dvb_ca_en50221 *cxd2099_attach(struct cxd2099_cfg *cfg,
+ void *priv, struct i2c_adapter *i2c)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
diff --git a/drivers/staging/cxt1e1/sbecom_inline_linux.h b/drivers/staging/cxt1e1/sbecom_inline_linux.h
index 501a331ded5..c0563e68997 100644
--- a/drivers/staging/cxt1e1/sbecom_inline_linux.h
+++ b/drivers/staging/cxt1e1/sbecom_inline_linux.h
@@ -47,7 +47,6 @@
#include <sys/types.h>
#else
#include <linux/types.h>
-#include <linux/version.h>
#if defined(CONFIG_SMP) && ! defined(__SMP__)
#define __SMP__
#endif
diff --git a/drivers/staging/dt3155v4l/Kconfig b/drivers/staging/dt3155v4l/Kconfig
index 5cd5a575b64..226a1ca90b3 100644
--- a/drivers/staging/dt3155v4l/Kconfig
+++ b/drivers/staging/dt3155v4l/Kconfig
@@ -1,7 +1,7 @@
config VIDEO_DT3155
tristate "DT3155 frame grabber, Video4Linux interface"
depends on PCI && VIDEO_DEV && VIDEO_V4L2
- select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF2_DMA_CONTIG
default n
---help---
Enables dt3155 device driver for the DataTranslation DT3155 frame grabber.
@@ -18,3 +18,11 @@ config DT3155_CCIR
---help---
Select it for CCIR/50Hz (European region),
or leave it unselected for RS-170/60Hz (North America).
+
+config DT3155_STREAMING
+ bool "Selects streaming capture method"
+ depends on VIDEO_DT3155
+ default y
+ ---help---
+ Select it if you want to use streaming of memory mapped buffers
+ or leave it unselected if you want to use read method (one copy more).
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.c b/drivers/staging/dt3155v4l/dt3155v4l.c
index 15d7efeed29..05aa41cf875 100644
--- a/drivers/staging/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/dt3155v4l/dt3155v4l.c
@@ -22,9 +22,10 @@
#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/slab.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf2-dma-contig.h>
#include "dt3155v4l.h"
@@ -38,6 +39,12 @@
#define DT3155_BUF_SIZE (768 * 576)
+#ifdef CONFIG_DT3155_STREAMING
+#define DT3155_CAPTURE_METHOD V4L2_CAP_STREAMING
+#else
+#define DT3155_CAPTURE_METHOD V4L2_CAP_READWRITE
+#endif
+
/* global initializers (for all boards) */
#ifdef CONFIG_DT3155_CCIR
static const u8 csr2_init = VT_50HZ;
@@ -197,14 +204,14 @@ static int wait_i2c_reg(void __iomem *addr)
static int
dt3155_start_acq(struct dt3155_priv *pd)
{
- struct videobuf_buffer *vb = pd->curr_buf;
+ struct vb2_buffer *vb = pd->curr_buf;
dma_addr_t dma_addr;
- dma_addr = videobuf_to_dma_contig(vb);
+ dma_addr = vb2_dma_contig_plane_paddr(vb, 0);
iowrite32(dma_addr, pd->regs + EVEN_DMA_START);
- iowrite32(dma_addr + vb->width, pd->regs + ODD_DMA_START);
- iowrite32(vb->width, pd->regs + EVEN_DMA_STRIDE);
- iowrite32(vb->width, pd->regs + ODD_DMA_STRIDE);
+ iowrite32(dma_addr + img_width, pd->regs + ODD_DMA_START);
+ iowrite32(img_width, pd->regs + EVEN_DMA_STRIDE);
+ iowrite32(img_width, pd->regs + ODD_DMA_STRIDE);
/* enable interrupts, clear all irq flags */
iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START |
FLD_END_EVEN | FLD_END_ODD, pd->regs + INT_CSR);
@@ -221,95 +228,110 @@ dt3155_start_acq(struct dt3155_priv *pd)
return 0; /* success */
}
+/*
+ * driver-specific callbacks (vb2_ops)
+ */
static int
-dt3155_stop_acq(struct dt3155_priv *pd)
+dt3155_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned long sizes[],
+ void *alloc_ctxs[])
{
- int tmp;
+ struct dt3155_priv *pd = vb2_get_drv_priv(q);
+ void *ret;
+
+ if (*num_buffers == 0)
+ *num_buffers = 1;
+ *num_planes = 1;
+ sizes[0] = img_width * img_height;
+ if (pd->q->alloc_ctx[0])
+ return 0;
+ ret = vb2_dma_contig_init_ctx(&pd->pdev->dev);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ pd->q->alloc_ctx[0] = ret;
+ return 0;
+}
- /* stop the board */
- wait_i2c_reg(pd->regs);
- write_i2c_reg(pd->regs, CSR2, pd->csr2);
+static void
+dt3155_wait_prepare(struct vb2_queue *q)
+{
+ struct dt3155_priv *pd = vb2_get_drv_priv(q);
- /* disable all irqs, clear all irq flags */
- iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD, pd->regs + INT_CSR);
- write_i2c_reg(pd->regs, EVEN_CSR, CSR_ERROR | CSR_DONE);
- write_i2c_reg(pd->regs, ODD_CSR, CSR_ERROR | CSR_DONE);
- tmp = ioread32(pd->regs + CSR1) & (FLD_CRPT_EVEN | FLD_CRPT_ODD);
- if (tmp)
- printk(KERN_ERR "dt3155: corrupted field %u\n", tmp);
- iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
- FLD_DN_ODD | FLD_DN_EVEN | CAP_CONT_EVEN | CAP_CONT_ODD,
- pd->regs + CSR1);
- return 0;
+ mutex_unlock(pd->vdev->lock);
+}
+
+static void
+dt3155_wait_finish(struct vb2_queue *q)
+{
+ struct dt3155_priv *pd = vb2_get_drv_priv(q);
+
+ mutex_lock(pd->vdev->lock);
}
-/* Locking: Caller holds q->vb_lock */
static int
-dt3155_buf_setup(struct videobuf_queue *q, unsigned int *count,
- unsigned int *size)
+dt3155_buf_prepare(struct vb2_buffer *vb)
{
- *size = img_width * img_height;
+ vb2_set_plane_payload(vb, 0, img_width * img_height);
return 0;
}
-/* Locking: Caller holds q->vb_lock */
static int
-dt3155_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
+dt3155_start_streaming(struct vb2_queue *q)
{
- int ret = 0;
-
- vb->width = img_width;
- vb->height = img_height;
- vb->size = img_width * img_height;
- vb->field = field;
- if (vb->state == VIDEOBUF_NEEDS_INIT)
- ret = videobuf_iolock(q, vb, NULL);
- if (ret) {
- vb->state = VIDEOBUF_ERROR;
- printk(KERN_ERR "ERROR: videobuf_iolock() failed\n");
- videobuf_dma_contig_free(q, vb); /* FIXME: needed? */
- } else
- vb->state = VIDEOBUF_PREPARED;
- return ret;
+ return 0;
}
-/* Locking: Caller holds q->vb_lock & q->irqlock */
-static void
-dt3155_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static int
+dt3155_stop_streaming(struct vb2_queue *q)
{
- struct dt3155_priv *pd = q->priv_data;
-
- if (vb->state != VIDEOBUF_NEEDS_INIT) {
- vb->state = VIDEOBUF_QUEUED;
- list_add_tail(&vb->queue, &pd->dmaq);
- wake_up_interruptible_sync(&pd->do_dma);
- } else
- vb->state = VIDEOBUF_ERROR;
+ struct dt3155_priv *pd = vb2_get_drv_priv(q);
+ struct vb2_buffer *vb;
+
+ spin_lock_irq(&pd->lock);
+ while (!list_empty(&pd->dmaq)) {
+ vb = list_first_entry(&pd->dmaq, typeof(*vb), done_entry);
+ list_del(&vb->done_entry);
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irq(&pd->lock);
+ msleep(45); /* irq hendler will stop the hardware */
+ return 0;
}
-/* Locking: Caller holds q->vb_lock */
static void
-dt3155_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+dt3155_buf_queue(struct vb2_buffer *vb)
{
- if (vb->state == VIDEOBUF_ACTIVE)
- videobuf_waiton(q, vb, 0, 0); /* FIXME: cannot be interrupted */
- videobuf_dma_contig_free(q, vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
+ struct dt3155_priv *pd = vb2_get_drv_priv(vb->vb2_queue);
+
+ /* pd->q->streaming = 1 when dt3155_buf_queue() is invoked */
+ spin_lock_irq(&pd->lock);
+ if (pd->curr_buf)
+ list_add_tail(&vb->done_entry, &pd->dmaq);
+ else {
+ pd->curr_buf = vb;
+ dt3155_start_acq(pd);
+ }
+ spin_unlock_irq(&pd->lock);
}
+/*
+ * end driver-specific callbacks
+ */
-static struct videobuf_queue_ops vbq_ops = {
- .buf_setup = dt3155_buf_setup,
+const struct vb2_ops q_ops = {
+ .queue_setup = dt3155_queue_setup,
+ .wait_prepare = dt3155_wait_prepare,
+ .wait_finish = dt3155_wait_finish,
.buf_prepare = dt3155_buf_prepare,
+ .start_streaming = dt3155_start_streaming,
+ .stop_streaming = dt3155_stop_streaming,
.buf_queue = dt3155_buf_queue,
- .buf_release = dt3155_buf_release,
};
static irqreturn_t
dt3155_irq_handler_even(int irq, void *dev_id)
{
struct dt3155_priv *ipd = dev_id;
- struct videobuf_buffer *ivb;
+ struct vb2_buffer *ivb;
dma_addr_t dma_addr;
u32 tmp;
@@ -341,33 +363,22 @@ dt3155_irq_handler_even(int irq, void *dev_id)
}
spin_lock(&ipd->lock);
- if (ipd->curr_buf && ipd->curr_buf->state == VIDEOBUF_ACTIVE) {
- if (waitqueue_active(&ipd->curr_buf->done)) {
- do_gettimeofday(&ipd->curr_buf->ts);
- ipd->curr_buf->field_count = ipd->field_count;
- ipd->curr_buf->state = VIDEOBUF_DONE;
- wake_up(&ipd->curr_buf->done);
- } else {
- ivb = ipd->curr_buf;
- goto load_dma;
- }
- } else
- goto stop_dma;
- if (list_empty(&ipd->dmaq))
- goto stop_dma;
- ivb = list_first_entry(&ipd->dmaq, typeof(*ivb), queue);
- list_del(&ivb->queue);
- if (ivb->state == VIDEOBUF_QUEUED) {
- ivb->state = VIDEOBUF_ACTIVE;
- ipd->curr_buf = ivb;
- } else
+ if (ipd->curr_buf) {
+ do_gettimeofday(&ipd->curr_buf->v4l2_buf.timestamp);
+ ipd->curr_buf->v4l2_buf.sequence = (ipd->field_count) >> 1;
+ vb2_buffer_done(ipd->curr_buf, VB2_BUF_STATE_DONE);
+ }
+
+ if (!ipd->q->streaming || list_empty(&ipd->dmaq))
goto stop_dma;
-load_dma:
- dma_addr = videobuf_to_dma_contig(ivb);
+ ivb = list_first_entry(&ipd->dmaq, typeof(*ivb), done_entry);
+ list_del(&ivb->done_entry);
+ ipd->curr_buf = ivb;
+ dma_addr = vb2_dma_contig_plane_paddr(ivb, 0);
iowrite32(dma_addr, ipd->regs + EVEN_DMA_START);
- iowrite32(dma_addr + ivb->width, ipd->regs + ODD_DMA_START);
- iowrite32(ivb->width, ipd->regs + EVEN_DMA_STRIDE);
- iowrite32(ivb->width, ipd->regs + ODD_DMA_STRIDE);
+ iowrite32(dma_addr + img_width, ipd->regs + ODD_DMA_START);
+ iowrite32(img_width, ipd->regs + EVEN_DMA_STRIDE);
+ iowrite32(img_width, ipd->regs + ODD_DMA_STRIDE);
mmiowb();
/* enable interrupts, clear all irq flags */
iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START |
@@ -379,6 +390,8 @@ stop_dma:
ipd->curr_buf = NULL;
/* stop the board */
write_i2c_reg_nowait(ipd->regs, CSR2, ipd->csr2);
+ iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
+ FLD_DN_ODD | FLD_DN_EVEN, ipd->regs + CSR1);
/* disable interrupts, clear all irq flags */
iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD, ipd->regs + INT_CSR);
spin_unlock(&ipd->lock);
@@ -386,61 +399,31 @@ stop_dma:
}
static int
-dt3155_threadfn(void *arg)
-{
- struct dt3155_priv *pd = arg;
- struct videobuf_buffer *vb;
- unsigned long flags;
-
- while (1) {
- wait_event_interruptible(pd->do_dma,
- kthread_should_stop() || !list_empty(&pd->dmaq));
- if (kthread_should_stop())
- break;
-
- spin_lock_irqsave(&pd->lock, flags);
- if (pd->curr_buf) /* dma is active */
- goto done;
- if (list_empty(&pd->dmaq)) /* no empty biffers */
- goto done;
- vb = list_first_entry(&pd->dmaq, typeof(*vb), queue);
- list_del(&vb->queue);
- if (vb->state == VIDEOBUF_QUEUED) {
- vb->state = VIDEOBUF_ACTIVE;
- pd->curr_buf = vb;
- spin_unlock_irqrestore(&pd->lock, flags);
- /* start dma */
- dt3155_start_acq(pd);
- continue;
- } else
- printk(KERN_DEBUG "%s(): This is a BUG\n", __func__);
-done:
- spin_unlock_irqrestore(&pd->lock, flags);
- }
- return 0;
-}
-
-static int
dt3155_open(struct file *filp)
{
int ret = 0;
struct dt3155_priv *pd = video_drvdata(filp);
- printk(KERN_INFO "dt3155: open(): minor: %i\n", pd->vdev->minor);
+ printk(KERN_INFO "dt3155: open(): minor: %i, users: %i\n",
+ pd->vdev->minor, pd->users);
- if (mutex_lock_interruptible(&pd->mux) == -EINTR)
- return -ERESTARTSYS;
if (!pd->users) {
- pd->vidq = kzalloc(sizeof(*pd->vidq), GFP_KERNEL);
- if (!pd->vidq) {
+ pd->q = kzalloc(sizeof(*pd->q), GFP_KERNEL);
+ if (!pd->q) {
printk(KERN_ERR "dt3155: error: alloc queue\n");
ret = -ENOMEM;
goto err_alloc_queue;
}
- videobuf_queue_dma_contig_init(pd->vidq, &vbq_ops,
- &pd->pdev->dev, &pd->lock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
- sizeof(struct videobuf_buffer), pd, NULL);
+ pd->q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ pd->q->io_modes = VB2_READ | VB2_MMAP;
+ pd->q->ops = &q_ops;
+ pd->q->mem_ops = &vb2_dma_contig_memops;
+ pd->q->drv_priv = pd;
+ pd->curr_buf = NULL;
+ pd->field_count = 0;
+ vb2_queue_init(pd->q); /* cannot fail */
+ INIT_LIST_HEAD(&pd->dmaq);
+ spin_lock_init(&pd->lock);
/* disable all irqs, clear all irq flags */
iowrite32(FLD_START | FLD_END_EVEN | FLD_END_ODD,
pd->regs + INT_CSR);
@@ -451,26 +434,13 @@ dt3155_open(struct file *filp)
printk(KERN_ERR "dt3155: error: request_irq\n");
goto err_request_irq;
}
- pd->curr_buf = NULL;
- pd->thread = kthread_run(dt3155_threadfn, pd,
- "dt3155_thread_%i", pd->vdev->minor);
- if (IS_ERR(pd->thread)) {
- printk(KERN_ERR "dt3155: kthread_run() failed\n");
- ret = PTR_ERR(pd->thread);
- goto err_thread;
- }
- pd->field_count = 0;
}
pd->users++;
- goto done;
-err_thread:
- free_irq(pd->pdev->irq, pd);
+ return 0; /* success */
err_request_irq:
- kfree(pd->vidq);
- pd->vidq = NULL;
+ kfree(pd->q);
+ pd->q = NULL;
err_alloc_queue:
-done:
- mutex_unlock(&pd->mux);
return ret;
}
@@ -478,61 +448,29 @@ static int
dt3155_release(struct file *filp)
{
struct dt3155_priv *pd = video_drvdata(filp);
- struct videobuf_buffer *tmp;
- unsigned long flags;
- int ret = 0;
- printk(KERN_INFO "dt3155: release(): minor: %i\n", pd->vdev->minor);
+ printk(KERN_INFO "dt3155: release(): minor: %i, users: %i\n",
+ pd->vdev->minor, pd->users - 1);
- if (mutex_lock_interruptible(&pd->mux) == -EINTR)
- return -ERESTARTSYS;
pd->users--;
BUG_ON(pd->users < 0);
- if (pd->acq_fp == filp) {
- spin_lock_irqsave(&pd->lock, flags);
- INIT_LIST_HEAD(&pd->dmaq); /* queue is emptied */
- tmp = pd->curr_buf;
- spin_unlock_irqrestore(&pd->lock, flags);
- if (tmp)
- videobuf_waiton(pd->vidq, tmp, 0, 1); /* block, interruptible */
- dt3155_stop_acq(pd);
- videobuf_stop(pd->vidq);
- pd->acq_fp = NULL;
- pd->streaming = 0;
- }
if (!pd->users) {
- kthread_stop(pd->thread);
+ vb2_queue_release(pd->q);
free_irq(pd->pdev->irq, pd);
- kfree(pd->vidq);
- pd->vidq = NULL;
+ if (pd->q->alloc_ctx[0])
+ vb2_dma_contig_cleanup_ctx(pd->q->alloc_ctx[0]);
+ kfree(pd->q);
+ pd->q = NULL;
}
- mutex_unlock(&pd->mux);
- return ret;
+ return 0;
}
static ssize_t
dt3155_read(struct file *filp, char __user *user, size_t size, loff_t *loff)
{
struct dt3155_priv *pd = video_drvdata(filp);
- int ret;
-
- if (mutex_lock_interruptible(&pd->mux) == -EINTR)
- return -ERESTARTSYS;
- if (!pd->acq_fp) {
- pd->acq_fp = filp;
- pd->streaming = 0;
- } else if (pd->acq_fp != filp) {
- ret = -EBUSY;
- goto done;
- } else if (pd->streaming == 1) {
- ret = -EINVAL;
- goto done;
- }
- ret = videobuf_read_stream(pd->vidq, user, size, loff, 0,
- filp->f_flags & O_NONBLOCK);
-done:
- mutex_unlock(&pd->mux);
- return ret;
+
+ return vb2_read(pd->q, user, size, loff, filp->f_flags & O_NONBLOCK);
}
static unsigned int
@@ -540,7 +478,7 @@ dt3155_poll(struct file *filp, struct poll_table_struct *polltbl)
{
struct dt3155_priv *pd = video_drvdata(filp);
- return videobuf_poll_stream(filp, pd->vidq, polltbl);
+ return vb2_poll(pd->q, filp, polltbl);
}
static int
@@ -548,7 +486,7 @@ dt3155_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct dt3155_priv *pd = video_drvdata(filp);
- return videobuf_mmap_mapper(pd->vidq, vma);
+ return vb2_mmap(pd->q, vma);
}
static const struct v4l2_file_operations dt3155_fops = {
@@ -565,46 +503,16 @@ static int
dt3155_ioc_streamon(struct file *filp, void *p, enum v4l2_buf_type type)
{
struct dt3155_priv *pd = video_drvdata(filp);
- int ret = -ERESTARTSYS;
-
- if (mutex_lock_interruptible(&pd->mux) == -EINTR)
- return ret;
- if (!pd->acq_fp) {
- ret = videobuf_streamon(pd->vidq);
- if (ret)
- goto unlock;
- pd->acq_fp = filp;
- pd->streaming = 1;
- wake_up_interruptible_sync(&pd->do_dma);
- } else if (pd->acq_fp == filp) {
- pd->streaming = 1;
- ret = videobuf_streamon(pd->vidq);
- if (!ret)
- wake_up_interruptible_sync(&pd->do_dma);
- } else
- ret = -EBUSY;
-unlock:
- mutex_unlock(&pd->mux);
- return ret;
+
+ return vb2_streamon(pd->q, type);
}
static int
dt3155_ioc_streamoff(struct file *filp, void *p, enum v4l2_buf_type type)
{
struct dt3155_priv *pd = video_drvdata(filp);
- struct videobuf_buffer *tmp;
- unsigned long flags;
- int ret;
-
- ret = videobuf_streamoff(pd->vidq);
- if (ret)
- return ret;
- spin_lock_irqsave(&pd->lock, flags);
- tmp = pd->curr_buf;
- spin_unlock_irqrestore(&pd->lock, flags);
- if (tmp)
- videobuf_waiton(pd->vidq, tmp, 0, 1); /* block, interruptible */
- return ret;
+
+ return vb2_streamoff(pd->q, type);
}
static int
@@ -618,8 +526,7 @@ dt3155_ioc_querycap(struct file *filp, void *p, struct v4l2_capability *cap)
cap->version =
KERNEL_VERSION(DT3155_VER_MAJ, DT3155_VER_MIN, DT3155_VER_EXT);
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
+ DT3155_CAPTURE_METHOD;
return 0;
}
@@ -667,93 +574,39 @@ dt3155_ioc_try_fmt_vid_cap(struct file *filp, void *p, struct v4l2_format *f)
static int
dt3155_ioc_s_fmt_vid_cap(struct file *filp, void *p, struct v4l2_format *f)
{
- struct dt3155_priv *pd = video_drvdata(filp);
- int ret = -ERESTARTSYS;
-
- if (mutex_lock_interruptible(&pd->mux) == -EINTR)
- return ret;
- if (!pd->acq_fp) {
- pd->acq_fp = filp;
- pd->streaming = 0;
- } else if (pd->acq_fp != filp) {
- ret = -EBUSY;
- goto done;
- }
-/* FIXME: we don't change the format for now
- if (pd->vidq->streaming || pd->vidq->reading || pd->curr_buff) {
- ret = -EBUSY;
- goto done;
- }
-*/
- ret = dt3155_ioc_g_fmt_vid_cap(filp, p, f);
-done:
- mutex_unlock(&pd->mux);
- return ret;
+ return dt3155_ioc_g_fmt_vid_cap(filp, p, f);
}
static int
dt3155_ioc_reqbufs(struct file *filp, void *p, struct v4l2_requestbuffers *b)
{
struct dt3155_priv *pd = video_drvdata(filp);
- struct videobuf_queue *q = pd->vidq;
- int ret = -ERESTARTSYS;
- if (b->memory != V4L2_MEMORY_MMAP)
- return -EINVAL;
- if (mutex_lock_interruptible(&pd->mux) == -EINTR)
- return ret;
- if (!pd->acq_fp)
- pd->acq_fp = filp;
- else if (pd->acq_fp != filp) {
- ret = -EBUSY;
- goto done;
- }
- pd->streaming = 1;
- ret = 0;
-done:
- mutex_unlock(&pd->mux);
- if (ret)
- return ret;
- if (b->count)
- ret = videobuf_reqbufs(q, b);
- else { /* FIXME: is it necessary? */
- printk(KERN_DEBUG "dt3155: request to free buffers\n");
- /* ret = videobuf_mmap_free(q); */
- ret = dt3155_ioc_streamoff(filp, p,
- V4L2_BUF_TYPE_VIDEO_CAPTURE);
- }
- return ret;
+ return vb2_reqbufs(pd->q, b);
}
static int
dt3155_ioc_querybuf(struct file *filp, void *p, struct v4l2_buffer *b)
{
struct dt3155_priv *pd = video_drvdata(filp);
- struct videobuf_queue *q = pd->vidq;
- return videobuf_querybuf(q, b);
+ return vb2_querybuf(pd->q, b);
}
static int
dt3155_ioc_qbuf(struct file *filp, void *p, struct v4l2_buffer *b)
{
struct dt3155_priv *pd = video_drvdata(filp);
- struct videobuf_queue *q = pd->vidq;
- int ret;
- ret = videobuf_qbuf(q, b);
- if (ret)
- return ret;
- return videobuf_querybuf(q, b);
+ return vb2_qbuf(pd->q, b);
}
static int
dt3155_ioc_dqbuf(struct file *filp, void *p, struct v4l2_buffer *b)
{
struct dt3155_priv *pd = video_drvdata(filp);
- struct videobuf_queue *q = pd->vidq;
- return videobuf_dqbuf(q, b, filp->f_flags & O_NONBLOCK);
+ return vb2_dqbuf(pd->q, b, filp->f_flags & O_NONBLOCK);
}
static int
@@ -880,21 +733,21 @@ static const struct v4l2_ioctl_ops dt3155_ioctl_ops = {
};
static int __devinit
-dt3155_init_board(struct pci_dev *dev)
+dt3155_init_board(struct pci_dev *pdev)
{
- struct dt3155_priv *pd = pci_get_drvdata(dev);
+ struct dt3155_priv *pd = pci_get_drvdata(pdev);
void *buf_cpu;
dma_addr_t buf_dma;
int i;
u8 tmp;
- pci_set_master(dev); /* dt3155 needs it */
+ pci_set_master(pdev); /* dt3155 needs it */
/* resetting the adapter */
iowrite32(FLD_CRPT_ODD | FLD_CRPT_EVEN | FLD_DN_ODD | FLD_DN_EVEN,
pd->regs + CSR1);
mmiowb();
- msleep(10);
+ msleep(20);
/* initializing adaper registers */
iowrite32(FIFO_EN | SRST, pd->regs + CSR1);
@@ -949,7 +802,7 @@ dt3155_init_board(struct pci_dev *dev)
write_i2c_reg(pd->regs, AD_CMD, VIDEO_CNL_1 | SYNC_CNL_1 | SYNC_LVL_3);
/* allocate memory, and initialize the DMA machine */
- buf_cpu = dma_alloc_coherent(&dev->dev, DT3155_BUF_SIZE, &buf_dma,
+ buf_cpu = dma_alloc_coherent(&pdev->dev, DT3155_BUF_SIZE, &buf_dma,
GFP_KERNEL);
if (!buf_cpu) {
printk(KERN_ERR "dt3155: dma_alloc_coherent "
@@ -975,7 +828,7 @@ dt3155_init_board(struct pci_dev *dev)
iowrite32(FIFO_EN | SRST | FLD_DN_EVEN | FLD_DN_ODD, pd->regs + CSR1);
/* deallocate memory */
- dma_free_coherent(&dev->dev, DT3155_BUF_SIZE, buf_cpu, buf_dma);
+ dma_free_coherent(&pdev->dev, DT3155_BUF_SIZE, buf_cpu, buf_dma);
if (tmp & BUSY_EVEN) {
printk(KERN_ERR "dt3155: BUSY_EVEN not cleared\n");
return -EIO;
@@ -996,7 +849,7 @@ static struct video_device dt3155_vdev = {
/* same as in drivers/base/dma-coherent.c */
struct dma_coherent_mem {
void *virt_base;
- u32 device_base;
+ dma_addr_t device_base;
int size;
int flags;
unsigned long *bitmap;
@@ -1058,18 +911,18 @@ dt3155_free_coherent(struct device *dev)
}
static int __devinit
-dt3155_probe(struct pci_dev *dev, const struct pci_device_id *id)
+dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int err;
struct dt3155_priv *pd;
printk(KERN_INFO "dt3155: probe()\n");
- err = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_ERR "dt3155: cannot set dma_mask\n");
return -ENODEV;
}
- err = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_ERR "dt3155: cannot set dma_coherent_mask\n");
return -ENODEV;
@@ -1085,31 +938,31 @@ dt3155_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto err_video_device_alloc;
}
*pd->vdev = dt3155_vdev;
- pci_set_drvdata(dev, pd); /* for use in dt3155_remove() */
+ pci_set_drvdata(pdev, pd); /* for use in dt3155_remove() */
video_set_drvdata(pd->vdev, pd); /* for use in video_fops */
pd->users = 0;
- pd->acq_fp = NULL;
- pd->pdev = dev;
+ pd->pdev = pdev;
INIT_LIST_HEAD(&pd->dmaq);
- init_waitqueue_head(&pd->do_dma);
mutex_init(&pd->mux);
+ pd->vdev->lock = &pd->mux; /* for locking v4l2_file_operations */
+ spin_lock_init(&pd->lock);
pd->csr2 = csr2_init;
pd->config = config_init;
- err = pci_enable_device(pd->pdev);
+ err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "dt3155: pci_dev not enabled\n");
goto err_enable_dev;
}
- err = pci_request_region(pd->pdev, 0, pci_name(pd->pdev));
+ err = pci_request_region(pdev, 0, pci_name(pdev));
if (err)
goto err_req_region;
- pd->regs = pci_iomap(pd->pdev, 0, pci_resource_len(pd->pdev, 0));
+ pd->regs = pci_iomap(pdev, 0, pci_resource_len(pd->pdev, 0));
if (!pd->regs) {
err = -ENOMEM;
printk(KERN_ERR "dt3155: pci_iomap failed\n");
goto err_pci_iomap;
}
- err = dt3155_init_board(pd->pdev);
+ err = dt3155_init_board(pdev);
if (err) {
printk(KERN_ERR "dt3155: dt3155_init_board failed\n");
goto err_init_board;
@@ -1119,7 +972,7 @@ dt3155_probe(struct pci_dev *dev, const struct pci_device_id *id)
printk(KERN_ERR "dt3155: Cannot register video device\n");
goto err_init_board;
}
- err = dt3155_alloc_coherent(&dev->dev, DT3155_CHUNK_SIZE,
+ err = dt3155_alloc_coherent(&pdev->dev, DT3155_CHUNK_SIZE,
DMA_MEMORY_MAP);
if (err)
printk(KERN_INFO "dt3155: preallocated 8 buffers\n");
@@ -1127,11 +980,11 @@ dt3155_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0; /* success */
err_init_board:
- pci_iounmap(pd->pdev, pd->regs);
+ pci_iounmap(pdev, pd->regs);
err_pci_iomap:
- pci_release_region(pd->pdev, 0);
+ pci_release_region(pdev, 0);
err_req_region:
- pci_disable_device(pd->pdev);
+ pci_disable_device(pdev);
err_enable_dev:
video_device_release(pd->vdev);
err_video_device_alloc:
@@ -1140,16 +993,16 @@ err_video_device_alloc:
}
static void __devexit
-dt3155_remove(struct pci_dev *dev)
+dt3155_remove(struct pci_dev *pdev)
{
- struct dt3155_priv *pd = pci_get_drvdata(dev);
+ struct dt3155_priv *pd = pci_get_drvdata(pdev);
printk(KERN_INFO "dt3155: remove()\n");
- dt3155_free_coherent(&dev->dev);
+ dt3155_free_coherent(&pdev->dev);
video_unregister_device(pd->vdev);
- pci_iounmap(dev, pd->regs);
- pci_release_region(pd->pdev, 0);
- pci_disable_device(pd->pdev);
+ pci_iounmap(pdev, pd->regs);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
/*
* video_device_release() is invoked automatically
* see: struct video_device dt3155_vdev
diff --git a/drivers/staging/dt3155v4l/dt3155v4l.h b/drivers/staging/dt3155v4l/dt3155v4l.h
index aa68a6f38aa..b0792b3d9b7 100644
--- a/drivers/staging/dt3155v4l/dt3155v4l.h
+++ b/drivers/staging/dt3155v4l/dt3155v4l.h
@@ -179,18 +179,13 @@ struct dt3155_stats {
* struct dt3155_priv - private data structure
*
* @vdev: pointer to video_device structure
- * @acq_fp pointer to filp that starts acquisition
- * @streaming streaming is negotiated
* @pdev: pointer to pci_dev structure
- * @vidq pointer to videobuf_queue structure
+ * @q pointer to vb2_queue structure
* @curr_buf: pointer to curren buffer
- * @thread pointer to worker thraed
+ * @mux: mutex to protect the instance
* @irq_handler: irq handler for the driver
- * @qt_ops local copy of dma-contig qtype_ops
* @dmaq queue for dma buffers
- * @do_dma wait queue of the kernel thread
- * @mux: mutex to protect the instance
- * @lock spinlock for videobuf queues
+ * @lock spinlock for dma queue
* @field_count fields counter
* @stats: statistics structure
* @users open count
@@ -200,17 +195,12 @@ struct dt3155_stats {
*/
struct dt3155_priv {
struct video_device *vdev;
- struct file *acq_fp;
- int streaming;
struct pci_dev *pdev;
- struct videobuf_queue *vidq;
- struct videobuf_buffer *curr_buf;
- struct task_struct *thread;
+ struct vb2_queue *q;
+ struct vb2_buffer *curr_buf;
+ struct mutex mux;
irq_handler_t irq_handler;
- struct videobuf_qtype_ops qt_ops;
struct list_head dmaq;
- wait_queue_head_t do_dma;
- struct mutex mux;
spinlock_t lock;
unsigned int field_count;
struct dt3155_stats stats;
diff --git a/drivers/staging/easycap/Kconfig b/drivers/staging/easycap/Kconfig
index 6ed208c6185..a425a6f9cdc 100644
--- a/drivers/staging/easycap/Kconfig
+++ b/drivers/staging/easycap/Kconfig
@@ -1,6 +1,7 @@
config EASYCAP
tristate "EasyCAP USB ID 05e1:0408 support"
- depends on USB && VIDEO_DEV && (SND || SOUND_OSS_CORE)
+ depends on USB && VIDEO_DEV && SND
+ select SND_PCM
---help---
This is an integrated audio/video driver for EasyCAP cards with
@@ -15,35 +16,6 @@ config EASYCAP
To compile this driver as a module, choose M here: the
module will be called easycap
-choice
- prompt "Sound Interface"
- depends on EASYCAP
- default EASYCAP_SND
- ---help---
-
-config EASYCAP_SND
- bool "ALSA"
- depends on SND
- select SND_PCM
-
- ---help---
- Say 'Y' if you want to use ALSA interface
-
- This will disable Open Sound System (OSS) binding.
-
-config EASYCAP_OSS
- bool "OSS (DEPRECATED)"
- depends on SOUND_OSS_CORE
-
- ---help---
- Say 'Y' if you prefer Open Sound System (OSS) interface
-
- This will disable Advanced Linux Sound Architecture (ALSA) binding.
-
- Once binding to ALSA interface will be stable this option will be
- removed.
-endchoice
-
config EASYCAP_DEBUG
bool "Enable EasyCAP driver debugging"
depends on EASYCAP
diff --git a/drivers/staging/easycap/Makefile b/drivers/staging/easycap/Makefile
index b13e9ac473b..a34e75f59c1 100644
--- a/drivers/staging/easycap/Makefile
+++ b/drivers/staging/easycap/Makefile
@@ -4,9 +4,7 @@ easycap-objs += easycap_ioctl.o
easycap-objs += easycap_settings.o
easycap-objs += easycap_testcard.o
easycap-objs += easycap_sound.o
-easycap-$(CONFIG_EASYCAP_OSS) += easycap_sound_oss.o
-
-obj-$(CONFIG_EASYCAP) += easycap.o
+obj-$(CONFIG_EASYCAP) += easycap.o
ccflags-y := -Wall
diff --git a/drivers/staging/easycap/README b/drivers/staging/easycap/README
index 6b5ac0d34bd..796b032384b 100644
--- a/drivers/staging/easycap/README
+++ b/drivers/staging/easycap/README
@@ -27,28 +27,6 @@ BUILD OPTIONS AND DEPENDENCIES
Unless EASYCAP_DEBUG is defined during compilation it will not be possible
to select a debug level at the time of module installation.
-If the parameter EASYCAP_IS_VIDEODEV_CLIENT is undefined during compilation
-the built module is entirely independent of the videodev module, and when
-the EasyCAP is physically plugged into a USB port the special files
-/dev/easycap0 and /dev/easysnd1 are created as video and sound sources
-respectively.
-
-If the parameter EASYCAP_IS_VIDEODEV_CLIENT is defined during compilation
-the built easycap module is configured to register with the videodev module,
-in which case the special files created when the EasyCAP is plugged in are
-/dev/video0 and /dev/easysnd0.
-
-During in-tree builds the following should should be defined whenever the
-parameter EASYCAP_IS_VIDEODEV_CLIENT is defined:
-
-EASYCAP_NEEDS_V4L2_DEVICE_H
-EASYCAP_NEEDS_V4L2_FOPS
-EASYCAP_NEEDS_UNLOCKED_IOCTL
-
-If the build is performed out-of-tree against older kernels the parameters
-to be defined depend on the kernel version in a way which will not be
-discussed here.
-
KNOWN RUNTIME ISSUES
--------------------
@@ -82,8 +60,8 @@ Three module parameters are defined:
debug the easycap module is configured at diagnostic level n (0 to 9)
gain audio gain level n (0 to 31, default is 16)
-bars 0 => testcard bars when incoming video signal is lost
- 1 => testcard bars when incoming video signal is lost (default)
+bars whether to display testcard bars when incoming video signal is lost
+ 0 => no, 1 => yes (default)
SUPPORTED TV STANDARDS AND RESOLUTIONS
@@ -128,8 +106,6 @@ hardware, but as yet it has actually been tested on only a few of these.
I have been unable to test and calibrate the S-video input myself because I
do not possess any equipment with S-video output.
-This driver does not understand the V4L1 IOCTL commands.
-
UDEV RULES
----------
diff --git a/drivers/staging/easycap/easycap.h b/drivers/staging/easycap/easycap.h
index 1f94e2389ef..22b24b6c5a5 100644
--- a/drivers/staging/easycap/easycap.h
+++ b/drivers/staging/easycap/easycap.h
@@ -62,7 +62,6 @@
#include <linux/uaccess.h>
#include <linux/i2c.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <linux/poll.h>
#include <linux/mm.h>
@@ -70,7 +69,6 @@
#include <linux/delay.h>
#include <linux/types.h>
-#ifndef CONFIG_EASYCAP_OSS
#include <linux/vmalloc.h>
#include <linux/sound.h>
#include <sound/core.h>
@@ -79,16 +77,11 @@
#include <sound/info.h>
#include <sound/initval.h>
#include <sound/control.h>
-#endif /* !CONFIG_EASYCAP_OSS */
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <linux/videodev2.h>
#include <linux/soundcard.h>
-#ifndef PAGE_SIZE
-#error "PAGE_SIZE not defined"
-#endif /* PAGE_SIZE */
-
/*---------------------------------------------------------------------------*/
/* VENDOR, PRODUCT: Syntek Semiconductor Co., Ltd
*
@@ -285,8 +278,6 @@ struct inputset {
*/
/*---------------------------------------------------------------------------*/
struct easycap {
-#define TELLTALE "expectedstring"
- char telltale[16];
int isdongle;
int minor;
@@ -420,7 +411,6 @@ struct easycap {
* ALSA
*/
/*---------------------------------------------------------------------------*/
-#ifndef CONFIG_EASYCAP_OSS
struct snd_pcm_hardware alsa_hardware;
struct snd_card *psnd_card;
struct snd_pcm *psnd_pcm;
@@ -428,7 +418,6 @@ struct easycap {
int dma_fill;
int dma_next;
int dma_read;
-#endif /* !CONFIG_EASYCAP_OSS */
/*---------------------------------------------------------------------------*/
/*
* SOUND PROPERTIES
@@ -510,12 +499,8 @@ int adjust_volume(struct easycap *, int);
* AUDIO FUNCTION PROTOTYPES
*/
/*---------------------------------------------------------------------------*/
-#ifndef CONFIG_EASYCAP_OSS
int easycap_alsa_probe(struct easycap *);
void easycap_alsa_complete(struct urb *);
-#else /* CONFIG_EASYCAP_OSS */
-void easyoss_complete(struct urb *);
-#endif /* !CONFIG_EASYCAP_OSS */
int easycap_sound_setup(struct easycap *);
int submit_audio_urbs(struct easycap *);
@@ -603,34 +588,6 @@ extern int easycap_debug;
#define JOM(n, format, args...) do {} while (0)
#endif /* CONFIG_EASYCAP_DEBUG */
-#define MICROSECONDS(X, Y) \
- ((1000000*((long long int)(X.tv_sec - Y.tv_sec))) + \
- (long long int)(X.tv_usec - Y.tv_usec))
-
-/*---------------------------------------------------------------------------*/
-/*
- * (unsigned char *)P pointer to next byte pair
- * (long int *)X pointer to accumulating count
- * (long int *)Y pointer to accumulating sum
- * (long long int *)Z pointer to accumulating sum of squares
- */
-/*---------------------------------------------------------------------------*/
-#define SUMMER(P, X, Y, Z) do { \
- unsigned char *p; \
- unsigned int u0, u1, u2; \
- long int s; \
- p = (unsigned char *)(P); \
- u0 = (unsigned int) (*p); \
- u1 = (unsigned int) (*(p + 1)); \
- u2 = (unsigned int) ((u1 << 8) | u0); \
- if (0x8000 & u2) \
- s = -(long int)(0x7FFF & (~u2)); \
- else \
- s = (long int)(0x7FFF & u2); \
- *((X)) += (long int) 1; \
- *((Y)) += (long int) s; \
- *((Z)) += ((long long int)(s) * (long long int)(s)); \
-} while (0)
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
@@ -644,8 +601,5 @@ extern struct easycap_format easycap_format[];
extern struct v4l2_queryctrl easycap_control[];
extern struct usb_driver easycap_usb_driver;
extern struct easycap_dongle easycapdc60_dongle[];
-#ifdef CONFIG_EASYCAP_OSS
-extern struct usb_class_driver easyoss_class;
-#endif /* !CONFIG_EASYCAP_OSS */
#endif /* !__EASYCAP_H__ */
diff --git a/drivers/staging/easycap/easycap_ioctl.c b/drivers/staging/easycap/easycap_ioctl.c
index b3bd11d5879..0accab97a7f 100644
--- a/drivers/staging/easycap/easycap_ioctl.c
+++ b/drivers/staging/easycap/easycap_ioctl.c
@@ -25,6 +25,7 @@
*/
/*****************************************************************************/
+#include <linux/version.h>
#include "easycap.h"
/*--------------------------------------------------------------------------*/
@@ -966,10 +967,6 @@ long easycap_unlocked_ioctl(struct file *file,
SAY("ERROR: peasycap is NULL\n");
return -1;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- return -EFAULT;
- }
p = peasycap->pusb_device;
if (!p) {
SAM("ERROR: peasycap->pusb_device is NULL\n");
@@ -1003,12 +1000,6 @@ long easycap_unlocked_ioctl(struct file *file,
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
return -ERESTARTSYS;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
- return -EFAULT;
- }
- p = peasycap->pusb_device;
if (!peasycap->pusb_device) {
SAM("ERROR: peasycap->pusb_device is NULL\n");
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
@@ -2356,14 +2347,8 @@ long easycap_unlocked_ioctl(struct file *file,
/*---------------------------------------------------------------------------*/
JOM(8, "calling wake_up on wq_video and wq_audio\n");
wake_up_interruptible(&(peasycap->wq_video));
-#ifdef CONFIG_EASYCAP_OSS
- wake_up_interruptible(&(peasycap->wq_audio));
-
-#else
if (peasycap->psubstream)
snd_pcm_period_elapsed(peasycap->psubstream);
-#endif /* CONFIG_EASYCAP_OSS */
-/*---------------------------------------------------------------------------*/
break;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
diff --git a/drivers/staging/easycap/easycap_main.c b/drivers/staging/easycap/easycap_main.c
index 62e07f6a026..bea281624c4 100644
--- a/drivers/staging/easycap/easycap_main.c
+++ b/drivers/staging/easycap/easycap_main.c
@@ -158,10 +158,6 @@ static int easycap_open(struct inode *inode, struct file *file)
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- return -EFAULT;
- }
if (!peasycap->pusb_device) {
SAM("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
@@ -695,10 +691,6 @@ static int videodev_release(struct video_device *pvideo_device)
SAY("ending unsuccessfully\n");
return -EFAULT;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- return -EFAULT;
- }
if (0 != kill_video_urbs(peasycap)) {
SAM("ERROR: kill_video_urbs() failed\n");
return -EFAULT;
@@ -736,10 +728,6 @@ static void easycap_delete(struct kref *pkref)
SAM("ERROR: peasycap is NULL: cannot perform deletions\n");
return;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- return;
- }
kd = isdongle(peasycap);
/*---------------------------------------------------------------------------*/
/*
@@ -889,20 +877,6 @@ static void easycap_delete(struct kref *pkref)
JOM(4, "easyoss_delete(): isoc audio buffers freed: %i pages\n",
m * (0x01 << AUDIO_ISOC_ORDER));
/*---------------------------------------------------------------------------*/
-#ifdef CONFIG_EASYCAP_OSS
- JOM(4, "freeing audio buffers.\n");
- gone = 0;
- for (k = 0; k < peasycap->audio_buffer_page_many; k++) {
- if (peasycap->audio_buffer[k].pgo) {
- free_page((unsigned long)peasycap->audio_buffer[k].pgo);
- peasycap->audio_buffer[k].pgo = NULL;
- peasycap->allocation_audio_page -= 1;
- gone++;
- }
- }
- JOM(4, "easyoss_delete(): audio buffers freed: %i pages\n", gone);
-#endif /* CONFIG_EASYCAP_OSS */
-/*---------------------------------------------------------------------------*/
JOM(4, "freeing easycap structure.\n");
allocation_video_urb = peasycap->allocation_video_urb;
allocation_video_page = peasycap->allocation_video_page;
@@ -913,8 +887,6 @@ static void easycap_delete(struct kref *pkref)
allocation_audio_struct = peasycap->allocation_audio_struct;
registered_audio = peasycap->registered_audio;
- kfree(peasycap);
-
if (0 <= kd && DONGLE_MANY > kd) {
if (mutex_lock_interruptible(&mutex_dongle)) {
SAY("ERROR: cannot down mutex_dongle\n");
@@ -929,6 +901,9 @@ static void easycap_delete(struct kref *pkref)
} else {
SAY("ERROR: cannot purge dongle[].peasycap");
}
+
+ kfree(peasycap);
+
/*---------------------------------------------------------------------------*/
SAY("%8i=video urbs after all deletions\n", allocation_video_urb);
SAY("%8i=video pages after all deletions\n", allocation_video_page);
@@ -961,10 +936,6 @@ static unsigned int easycap_poll(struct file *file, poll_table *wait)
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- return -EFAULT;
- }
if (!peasycap->pusb_device) {
SAY("ERROR: peasycap->pusb_device is NULL\n");
return -EFAULT;
@@ -995,11 +966,6 @@ static unsigned int easycap_poll(struct file *file, poll_table *wait)
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
return -ERESTARTSYS;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
- return -ERESTARTSYS;
- }
if (!peasycap->pusb_device) {
SAM("ERROR: peasycap->pusb_device is NULL\n");
mutex_unlock(&easycapdc60_dongle[kd].mutex_video);
@@ -2454,10 +2420,6 @@ static void easycap_vma_open(struct vm_area_struct *pvma)
SAY("ERROR: peasycap is NULL\n");
return;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- return;
- }
peasycap->vma_many++;
JOT(8, "%i=peasycap->vma_many\n", peasycap->vma_many);
return;
@@ -2472,10 +2434,6 @@ static void easycap_vma_close(struct vm_area_struct *pvma)
SAY("ERROR: peasycap is NULL\n");
return;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- return;
- }
peasycap->vma_many--;
JOT(8, "%i=peasycap->vma_many\n", peasycap->vma_many);
return;
@@ -2606,10 +2564,6 @@ static void easycap_complete(struct urb *purb)
SAY("ERROR: easycap_complete(): peasycap is NULL\n");
return;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- return;
- }
if (peasycap->video_eof)
return;
for (i = 0; i < VIDEO_ISOC_BUFFER_MANY; i++)
@@ -3000,7 +2954,6 @@ static int easycap_usb_probe(struct usb_interface *intf,
struct easycap_format *peasycap_format;
int fmtidx;
struct inputset *inputset;
- struct v4l2_device *pv4l2_device;
usbdev = interface_to_usbdev(intf);
@@ -3054,7 +3007,6 @@ static int easycap_usb_probe(struct usb_interface *intf,
*/
/*---------------------------------------------------------------------------*/
peasycap->minor = -1;
- strcpy(&peasycap->telltale[0], TELLTALE);
kref_init(&peasycap->kref);
JOM(8, "intf[%i]: after kref_init(..._video) "
"%i=peasycap->kref.refcount.counter\n",
@@ -3267,23 +3219,6 @@ static int easycap_usb_probe(struct usb_interface *intf,
bInterfaceNumber);
return -ENODEV;
}
-/*---------------------------------------------------------------------------*/
-/*
- * SOME VERSIONS OF THE videodev MODULE OVERWRITE THE DATA WHICH HAS
- * BEEN WRITTEN BY THE CALL TO usb_set_intfdata() IN easycap_usb_probe(),
- * REPLACING IT WITH A POINTER TO THE EMBEDDED v4l2_device STRUCTURE.
- * TO DETECT THIS, THE STRING IN THE easycap.telltale[] BUFFER IS CHECKED.
-*/
-/*---------------------------------------------------------------------------*/
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- pv4l2_device = usb_get_intfdata(intf);
- if (!pv4l2_device) {
- SAY("ERROR: pv4l2_device is NULL\n");
- return -ENODEV;
- }
- peasycap = (struct easycap *)
- container_of(pv4l2_device, struct easycap, v4l2_device);
- }
}
/*---------------------------------------------------------------------------*/
if ((USB_CLASS_VIDEO == bInterfaceClass) ||
@@ -3775,14 +3710,12 @@ static int easycap_usb_probe(struct usb_interface *intf,
* THE VIDEO DEVICE CAN BE REGISTERED NOW, AS IT IS READY.
*/
/*--------------------------------------------------------------------------*/
- if (0 != (v4l2_device_register(&(intf->dev),
- &(peasycap->v4l2_device)))) {
+ if (v4l2_device_register(&intf->dev, &peasycap->v4l2_device)) {
SAM("v4l2_device_register() failed\n");
return -ENODEV;
- } else {
- JOM(4, "registered device instance: %s\n",
- &(peasycap->v4l2_device.name[0]));
}
+ JOM(4, "registered device instance: %s\n",
+ peasycap->v4l2_device.name);
/*---------------------------------------------------------------------------*/
/*
* FIXME
@@ -3947,32 +3880,6 @@ static int easycap_usb_probe(struct usb_interface *intf,
INIT_LIST_HEAD(&(peasycap->urb_audio_head));
peasycap->purb_audio_head = &(peasycap->urb_audio_head);
-#ifdef CONFIG_EASYCAP_OSS
- JOM(4, "allocating an audio buffer\n");
- JOM(4, ".... scattered over %i pages\n",
- peasycap->audio_buffer_page_many);
-
- for (k = 0; k < peasycap->audio_buffer_page_many; k++) {
- if (peasycap->audio_buffer[k].pgo) {
- SAM("ERROR: attempting to reallocate audio buffers\n");
- } else {
- pbuf = (void *) __get_free_page(GFP_KERNEL);
- if (!pbuf) {
- SAM("ERROR: Could not allocate audio "
- "buffer page %i\n", k);
- return -ENOMEM;
- } else
- peasycap->allocation_audio_page += 1;
-
- peasycap->audio_buffer[k].pgo = pbuf;
- }
- peasycap->audio_buffer[k].pto = peasycap->audio_buffer[k].pgo;
- }
-
- peasycap->audio_fill = 0;
- peasycap->audio_read = 0;
- JOM(4, "allocation of audio buffer done: %i pages\n", k);
-#endif /* CONFIG_EASYCAP_OSS */
/*---------------------------------------------------------------------------*/
JOM(4, "allocating %i isoc audio buffers of size %i\n",
AUDIO_ISOC_BUFFER_MANY,
@@ -4049,11 +3956,7 @@ static int easycap_usb_probe(struct usb_interface *intf,
"peasycap->audio_isoc_buffer[.].pgo;\n");
JOM(4, " purb->transfer_buffer_length = %i;\n",
peasycap->audio_isoc_buffer_size);
-#ifdef CONFIG_EASYCAP_OSS
- JOM(4, " purb->complete = easyoss_complete;\n");
-#else /* CONFIG_EASYCAP_OSS */
JOM(4, " purb->complete = easycap_alsa_complete;\n");
-#endif /* CONFIG_EASYCAP_OSS */
JOM(4, " purb->context = peasycap;\n");
JOM(4, " purb->start_frame = 0;\n");
JOM(4, " purb->number_of_packets = %i;\n",
@@ -4076,11 +3979,7 @@ static int easycap_usb_probe(struct usb_interface *intf,
purb->transfer_buffer = peasycap->audio_isoc_buffer[k].pgo;
purb->transfer_buffer_length =
peasycap->audio_isoc_buffer_size;
-#ifdef CONFIG_EASYCAP_OSS
- purb->complete = easyoss_complete;
-#else /* CONFIG_EASYCAP_OSS */
purb->complete = easycap_alsa_complete;
-#endif /* CONFIG_EASYCAP_OSS */
purb->context = peasycap;
purb->start_frame = 0;
purb->number_of_packets = peasycap->audio_isoc_framesperdesc;
@@ -4103,7 +4002,6 @@ static int easycap_usb_probe(struct usb_interface *intf,
* THE AUDIO DEVICE CAN BE REGISTERED NOW, AS IT IS READY.
*/
/*---------------------------------------------------------------------------*/
-#ifndef CONFIG_EASYCAP_OSS
JOM(4, "initializing ALSA card\n");
rc = easycap_alsa_probe(peasycap);
@@ -4112,15 +4010,6 @@ static int easycap_usb_probe(struct usb_interface *intf,
return -ENODEV;
}
-#else /* CONFIG_EASYCAP_OSS */
- rc = usb_register_dev(intf, &easyoss_class);
- if (rc) {
- SAY("ERROR: usb_register_dev() failed\n");
- usb_set_intfdata(intf, NULL);
- return -ENODEV;
- }
- SAM("easyoss attached to minor #%d\n", intf->minor);
-#endif /* CONFIG_EASYCAP_OSS */
JOM(8, "kref_get() with %i=kref.refcount.counter\n",
peasycap->kref.refcount.counter);
@@ -4146,7 +4035,7 @@ static int easycap_usb_probe(struct usb_interface *intf,
* WHEN THIS FUNCTION IS CALLED THE EasyCAP HAS ALREADY BEEN PHYSICALLY
* UNPLUGGED. HENCE peasycap->pusb_device IS NO LONGER VALID.
*
- * THIS FUNCTION AFFECTS BOTH OSS AND ALSA. BEWARE.
+ * THIS FUNCTION AFFECTS ALSA. BEWARE.
*/
/*---------------------------------------------------------------------------*/
static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
@@ -4159,7 +4048,6 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
struct list_head *plist_head;
struct data_urb *pdata_urb;
int minor, m, kd;
- struct v4l2_device *pv4l2_device;
JOT(4, "\n");
@@ -4187,29 +4075,6 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
}
/*---------------------------------------------------------------------------*/
/*
- * SOME VERSIONS OF THE videodev MODULE OVERWRITE THE DATA WHICH HAS
- * BEEN WRITTEN BY THE CALL TO usb_set_intfdata() IN easycap_usb_probe(),
- * REPLACING IT WITH A POINTER TO THE EMBEDDED v4l2_device STRUCTURE.
- * TO DETECT THIS, THE STRING IN THE easycap.telltale[] BUFFER IS CHECKED.
-*/
-/*---------------------------------------------------------------------------*/
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- pv4l2_device = usb_get_intfdata(pusb_interface);
- if (!pv4l2_device) {
- SAY("ERROR: pv4l2_device is NULL\n");
- return;
- }
- peasycap = (struct easycap *)
- container_of(pv4l2_device, struct easycap, v4l2_device);
- }
-/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/
-/*---------------------------------------------------------------------------*/
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- return;
- }
-/*---------------------------------------------------------------------------*/
-/*
* IF THE WAIT QUEUES ARE NOT CLEARED A DEADLOCK IS POSSIBLE. BEWARE.
*/
/*---------------------------------------------------------------------------*/
@@ -4321,19 +4186,12 @@ static void easycap_usb_disconnect(struct usb_interface *pusb_interface)
JOM(4, "locked dongle[%i].mutex_audio\n", kd);
} else
SAY("ERROR: %i=kd is bad: cannot lock dongle\n", kd);
-#ifndef CONFIG_EASYCAP_OSS
if (0 != snd_card_free(peasycap->psnd_card)) {
SAY("ERROR: snd_card_free() failed\n");
} else {
peasycap->psnd_card = NULL;
(peasycap->registered_audio)--;
}
-#else /* CONFIG_EASYCAP_OSS */
- usb_deregister_dev(pusb_interface, &easyoss_class);
- peasycap->registered_audio--;
- JOM(4, "intf[%i]: usb_deregister_dev()\n", bInterfaceNumber);
- SAM("easyoss detached from minor #%d\n", minor);
-#endif /* CONFIG_EASYCAP_OSS */
if (0 <= kd && DONGLE_MANY > kd) {
mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
JOM(4, "unlocked dongle[%i].mutex_audio\n", kd);
diff --git a/drivers/staging/easycap/easycap_settings.c b/drivers/staging/easycap/easycap_settings.c
index 898559dad70..70f59b13c34 100644
--- a/drivers/staging/easycap/easycap_settings.c
+++ b/drivers/staging/easycap/easycap_settings.c
@@ -567,7 +567,7 @@ int fillin_formats(void)
default:
return -3;
}
- bytesperline = width * ((mask3 & 0x00F0) >> 4);
+ bytesperline = width * ((mask3 & 0x00E0) >> 5);
sizeimage = bytesperline * height;
for (m = 0; m < INTERLACE_MANY; m++) {
diff --git a/drivers/staging/easycap/easycap_sound.c b/drivers/staging/easycap/easycap_sound.c
index a3402b00a8b..213d0400b3e 100644
--- a/drivers/staging/easycap/easycap_sound.c
+++ b/drivers/staging/easycap/easycap_sound.c
@@ -30,7 +30,6 @@
#include "easycap.h"
-#ifndef CONFIG_EASYCAP_OSS
/*--------------------------------------------------------------------------*/
/*
* PARAMETERS USED WHEN REGISTERING THE AUDIO INTERFACE
@@ -92,10 +91,6 @@ easycap_alsa_complete(struct urb *purb)
SAY("ERROR: peasycap is NULL\n");
return;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- return;
- }
much = 0;
if (peasycap->audio_idle) {
JOM(16, "%i=audio_idle %i=audio_isoc_streaming\n",
@@ -310,10 +305,6 @@ static int easycap_alsa_open(struct snd_pcm_substream *pss)
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- return -EFAULT;
- }
if (peasycap->psnd_card != psnd_card) {
SAM("ERROR: bad peasycap->psnd_card\n");
return -EFAULT;
@@ -350,10 +341,6 @@ static int easycap_alsa_close(struct snd_pcm_substream *pss)
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- return -EFAULT;
- }
pss->private_data = NULL;
peasycap->psubstream = NULL;
JOT(4, "ending successfully\n");
@@ -441,10 +428,6 @@ static int easycap_alsa_prepare(struct snd_pcm_substream *pss)
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- return -EFAULT;
- }
JOM(16, "ALSA decides %8i Hz=rate\n", pss->runtime->rate);
JOM(16, "ALSA decides %8ld =period_size\n", pss->runtime->period_size);
@@ -488,11 +471,6 @@ static int easycap_alsa_trigger(struct snd_pcm_substream *pss, int cmd)
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- return -EFAULT;
- }
-
switch (cmd) {
case SNDRV_PCM_TRIGGER_START: {
peasycap->audio_idle = 0;
@@ -523,10 +501,6 @@ static snd_pcm_uframes_t easycap_alsa_pointer(struct snd_pcm_substream *pss)
SAY("ERROR: peasycap is NULL\n");
return -EFAULT;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- return -EFAULT;
- }
if ((0 != peasycap->audio_eof) || (0 != peasycap->audio_idle)) {
JOM(8, "returning -EIO because "
"%i=audio_idle %i=audio_eof\n",
@@ -584,10 +558,6 @@ int easycap_alsa_probe(struct easycap *peasycap)
SAY("ERROR: peasycap is NULL\n");
return -ENODEV;
}
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- return -EFAULT;
- }
if (0 > peasycap->minor) {
SAY("ERROR: no minor\n");
return -ENODEV;
@@ -644,7 +614,6 @@ int easycap_alsa_probe(struct easycap *peasycap)
SAM("registered %s\n", &psnd_card->id[0]);
return 0;
}
-#endif /*! CONFIG_EASYCAP_OSS */
/*****************************************************************************/
/*****************************************************************************/
@@ -762,11 +731,7 @@ submit_audio_urbs(struct easycap *peasycap)
purb->transfer_flags = URB_ISO_ASAP;
purb->transfer_buffer = peasycap->audio_isoc_buffer[isbuf].pgo;
purb->transfer_buffer_length = peasycap->audio_isoc_buffer_size;
-#ifdef CONFIG_EASYCAP_OSS
- purb->complete = easyoss_complete;
-#else /* CONFIG_EASYCAP_OSS */
purb->complete = easycap_alsa_complete;
-#endif /* CONFIG_EASYCAP_OSS */
purb->context = peasycap;
purb->start_frame = 0;
purb->number_of_packets = peasycap->audio_isoc_framesperdesc;
diff --git a/drivers/staging/easycap/easycap_sound_oss.c b/drivers/staging/easycap/easycap_sound_oss.c
deleted file mode 100644
index d92baf22276..00000000000
--- a/drivers/staging/easycap/easycap_sound_oss.c
+++ /dev/null
@@ -1,954 +0,0 @@
-/******************************************************************************
-* *
-* easycap_sound.c *
-* *
-* Audio driver for EasyCAP USB2.0 Video Capture Device DC60 *
-* *
-* *
-******************************************************************************/
-/*
- *
- * Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
- *
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * The software is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this software; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
-*/
-/*****************************************************************************/
-
-#include "easycap.h"
-
-/*****************************************************************************/
-/**************************** **************************/
-/**************************** Open Sound System **************************/
-/**************************** **************************/
-/*****************************************************************************/
-/*--------------------------------------------------------------------------*/
-/*
- * PARAMETERS USED WHEN REGISTERING THE AUDIO INTERFACE
- */
-/*--------------------------------------------------------------------------*/
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
-/*
- * ON COMPLETION OF AN AUDIO URB ITS DATA IS COPIED TO THE AUDIO BUFFERS
- * PROVIDED peasycap->audio_idle IS ZERO. REGARDLESS OF THIS BEING TRUE,
- * IT IS RESUBMITTED PROVIDED peasycap->audio_isoc_streaming IS NOT ZERO.
- */
-/*---------------------------------------------------------------------------*/
-void
-easyoss_complete(struct urb *purb)
-{
- struct easycap *peasycap;
- struct data_buffer *paudio_buffer;
- u8 *p1, *p2;
- s16 tmp;
- int i, j, more, much, leap, rc;
-#ifdef UPSAMPLE
- int k;
- s16 oldaudio, newaudio, delta;
-#endif /*UPSAMPLE*/
-
- JOT(16, "\n");
-
- if (!purb) {
- SAY("ERROR: purb is NULL\n");
- return;
- }
- peasycap = purb->context;
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL\n");
- return;
- }
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- return;
- }
- much = 0;
- if (peasycap->audio_idle) {
- JOM(16, "%i=audio_idle %i=audio_isoc_streaming\n",
- peasycap->audio_idle, peasycap->audio_isoc_streaming);
- if (peasycap->audio_isoc_streaming) {
- rc = usb_submit_urb(purb, GFP_ATOMIC);
- if (rc) {
- if (-ENODEV != rc && -ENOENT != rc) {
- SAM("ERROR: while %i=audio_idle, "
- "usb_submit_urb() failed with rc: -%s: %d\n",
- peasycap->audio_idle,
- strerror(rc), rc);
- }
- }
- }
- return;
- }
-/*---------------------------------------------------------------------------*/
- if (purb->status) {
- if ((-ESHUTDOWN == purb->status) || (-ENOENT == purb->status)) {
- JOM(16, "urb status -ESHUTDOWN or -ENOENT\n");
- return;
- }
- SAM("ERROR: non-zero urb status: -%s: %d\n",
- strerror(purb->status), purb->status);
- goto resubmit;
- }
-/*---------------------------------------------------------------------------*/
-/*
- * PROCEED HERE WHEN NO ERROR
- */
-/*---------------------------------------------------------------------------*/
-#ifdef UPSAMPLE
- oldaudio = peasycap->oldaudio;
-#endif /*UPSAMPLE*/
-
- for (i = 0; i < purb->number_of_packets; i++) {
- if (!purb->iso_frame_desc[i].status) {
-
- SAM("-%s\n", strerror(purb->iso_frame_desc[i].status));
-
- more = purb->iso_frame_desc[i].actual_length;
-
- if (!more)
- peasycap->audio_mt++;
- else {
- if (peasycap->audio_mt) {
- JOM(12, "%4i empty audio urb frames\n",
- peasycap->audio_mt);
- peasycap->audio_mt = 0;
- }
-
- p1 = (u8 *)(purb->transfer_buffer + purb->iso_frame_desc[i].offset);
-
- leap = 0;
- p1 += leap;
- more -= leap;
- /*
- * COPY more BYTES FROM ISOC BUFFER
- * TO AUDIO BUFFER, CONVERTING
- * 8-BIT MONO TO 16-BIT SIGNED
- * LITTLE-ENDIAN SAMPLES IF NECESSARY
- */
- while (more) {
- if (0 > more) {
- SAM("MISTAKE: more is negative\n");
- return;
- }
- if (peasycap->audio_buffer_page_many <= peasycap->audio_fill) {
- SAM("ERROR: bad peasycap->audio_fill\n");
- return;
- }
-
- paudio_buffer = &peasycap->audio_buffer[peasycap->audio_fill];
- if (PAGE_SIZE < (paudio_buffer->pto - paudio_buffer->pgo)) {
- SAM("ERROR: bad paudio_buffer->pto\n");
- return;
- }
- if (PAGE_SIZE == (paudio_buffer->pto - paudio_buffer->pgo)) {
-
- paudio_buffer->pto = paudio_buffer->pgo;
- (peasycap->audio_fill)++;
- if (peasycap->audio_buffer_page_many <= peasycap->audio_fill)
- peasycap->audio_fill = 0;
-
- JOM(8, "bumped peasycap->"
- "audio_fill to %i\n",
- peasycap->audio_fill);
-
- paudio_buffer = &peasycap->audio_buffer[peasycap->audio_fill];
- paudio_buffer->pto = paudio_buffer->pgo;
-
- if (!(peasycap->audio_fill % peasycap->audio_pages_per_fragment)) {
- JOM(12, "wakeup call on wq_audio, %i=frag reading %i=fragment fill\n",
- (peasycap->audio_read / peasycap->audio_pages_per_fragment),
- (peasycap->audio_fill / peasycap->audio_pages_per_fragment));
- wake_up_interruptible(&(peasycap->wq_audio));
- }
- }
-
- much = PAGE_SIZE - (int)(paudio_buffer->pto - paudio_buffer->pgo);
-
- if (!peasycap->microphone) {
- if (much > more)
- much = more;
-
- memcpy(paudio_buffer->pto, p1, much);
- p1 += much;
- more -= much;
- } else {
-#ifdef UPSAMPLE
- if (much % 16)
- JOM(8, "MISTAKE? much"
- " is not divisible by 16\n");
- if (much > (16 * more))
- much = 16 * more;
- p2 = (u8 *)paudio_buffer->pto;
-
- for (j = 0; j < (much/16); j++) {
- newaudio = ((int) *p1) - 128;
- newaudio = 128 * newaudio;
-
- delta = (newaudio - oldaudio) / 4;
- tmp = oldaudio + delta;
-
- for (k = 0; k < 4; k++) {
- *p2 = (0x00FF & tmp);
- *(p2 + 1) = (0xFF00 & tmp) >> 8;
- p2 += 2;
- *p2 = (0x00FF & tmp);
- *(p2 + 1) = (0xFF00 & tmp) >> 8;
- p2 += 2;
-
- tmp += delta;
- }
- p1++;
- more--;
- oldaudio = tmp;
- }
-#else /*!UPSAMPLE*/
- if (much > (2 * more))
- much = 2 * more;
- p2 = (u8 *)paudio_buffer->pto;
-
- for (j = 0; j < (much / 2); j++) {
- tmp = ((int) *p1) - 128;
- tmp = 128 * tmp;
- *p2 = (0x00FF & tmp);
- *(p2 + 1) = (0xFF00 & tmp) >> 8;
- p1++;
- p2 += 2;
- more--;
- }
-#endif /*UPSAMPLE*/
- }
- (paudio_buffer->pto) += much;
- }
- }
- } else {
- JOM(12, "discarding audio samples because "
- "%i=purb->iso_frame_desc[i].status\n",
- purb->iso_frame_desc[i].status);
- }
-
-#ifdef UPSAMPLE
- peasycap->oldaudio = oldaudio;
-#endif /*UPSAMPLE*/
-
- }
-/*---------------------------------------------------------------------------*/
-/*
- * RESUBMIT THIS URB
- */
-/*---------------------------------------------------------------------------*/
-resubmit:
- if (peasycap->audio_isoc_streaming) {
- rc = usb_submit_urb(purb, GFP_ATOMIC);
- if (rc) {
- if (-ENODEV != rc && -ENOENT != rc) {
- SAM("ERROR: while %i=audio_idle, "
- "usb_submit_urb() failed "
- "with rc: -%s: %d\n", peasycap->audio_idle,
- strerror(rc), rc);
- }
- }
- }
- return;
-}
-/*****************************************************************************/
-/*---------------------------------------------------------------------------*/
-/*
- * THE AUDIO URBS ARE SUBMITTED AT THIS EARLY STAGE SO THAT IT IS POSSIBLE TO
- * STREAM FROM /dev/easyoss1 WITH SIMPLE PROGRAMS SUCH AS cat WHICH DO NOT
- * HAVE AN IOCTL INTERFACE.
- */
-/*---------------------------------------------------------------------------*/
-static int easyoss_open(struct inode *inode, struct file *file)
-{
- struct usb_interface *pusb_interface;
- struct easycap *peasycap;
- int subminor;
- struct v4l2_device *pv4l2_device;
-
- JOT(4, "begins\n");
-
- subminor = iminor(inode);
-
- pusb_interface = usb_find_interface(&easycap_usb_driver, subminor);
- if (!pusb_interface) {
- SAY("ERROR: pusb_interface is NULL\n");
- SAY("ending unsuccessfully\n");
- return -1;
- }
- peasycap = usb_get_intfdata(pusb_interface);
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL\n");
- SAY("ending unsuccessfully\n");
- return -1;
- }
-/*---------------------------------------------------------------------------*/
-/*
- * SOME VERSIONS OF THE videodev MODULE OVERWRITE THE DATA WHICH HAS
- * BEEN WRITTEN BY THE CALL TO usb_set_intfdata() IN easycap_usb_probe(),
- * REPLACING IT WITH A POINTER TO THE EMBEDDED v4l2_device STRUCTURE.
- * TO DETECT THIS, THE STRING IN THE easycap.telltale[] BUFFER IS CHECKED.
-*/
-/*---------------------------------------------------------------------------*/
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- pv4l2_device = usb_get_intfdata(pusb_interface);
- if (!pv4l2_device) {
- SAY("ERROR: pv4l2_device is NULL\n");
- return -EFAULT;
- }
- peasycap = container_of(pv4l2_device,
- struct easycap, v4l2_device);
- }
-/*---------------------------------------------------------------------------*/
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- return -EFAULT;
- }
-/*---------------------------------------------------------------------------*/
-
- file->private_data = peasycap;
-
- if (0 != easycap_sound_setup(peasycap)) {
- ;
- ;
- }
- return 0;
-}
-/*****************************************************************************/
-static int easyoss_release(struct inode *inode, struct file *file)
-{
- struct easycap *peasycap;
-
- JOT(4, "begins\n");
-
- peasycap = file->private_data;
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL.\n");
- return -EFAULT;
- }
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- return -EFAULT;
- }
- if (0 != kill_audio_urbs(peasycap)) {
- SAM("ERROR: kill_audio_urbs() failed\n");
- return -EFAULT;
- }
- JOM(4, "ending successfully\n");
- return 0;
-}
-/*****************************************************************************/
-static ssize_t easyoss_read(struct file *file, char __user *puserspacebuffer,
- size_t kount, loff_t *poff)
-{
- struct timeval timeval;
- long long int above, below, mean;
- struct signed_div_result sdr;
- unsigned char *p0;
- long int kount1, more, rc, l0, lm;
- int fragment, kd;
- struct easycap *peasycap;
- struct data_buffer *pdata_buffer;
- size_t szret;
-
-/*---------------------------------------------------------------------------*/
-/*
- * DO A BLOCKING READ TO TRANSFER DATA TO USER SPACE.
- *
- ******************************************************************************
- ***** N.B. IF THIS FUNCTION RETURNS 0, NOTHING IS SEEN IN USER SPACE. ******
- ***** THIS CONDITION SIGNIFIES END-OF-FILE. ******
- ******************************************************************************
- */
-/*---------------------------------------------------------------------------*/
-
- JOT(8, "%5zd=kount %5lld=*poff\n", kount, *poff);
-
- if (!file) {
- SAY("ERROR: file is NULL\n");
- return -ERESTARTSYS;
- }
- peasycap = file->private_data;
- if (!peasycap) {
- SAY("ERROR in easyoss_read(): peasycap is NULL\n");
- return -EFAULT;
- }
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- return -EFAULT;
- }
- if (!peasycap->pusb_device) {
- SAY("ERROR: peasycap->pusb_device is NULL\n");
- return -EFAULT;
- }
- kd = isdongle(peasycap);
- if (0 <= kd && DONGLE_MANY > kd) {
- if (mutex_lock_interruptible(&(easycapdc60_dongle[kd].mutex_audio))) {
- SAY("ERROR: "
- "cannot lock dongle[%i].mutex_audio\n", kd);
- return -ERESTARTSYS;
- }
- JOM(4, "locked dongle[%i].mutex_audio\n", kd);
- /*
- * MEANWHILE, easycap_usb_disconnect()
- * MAY HAVE FREED POINTER peasycap,
- * IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
- * IF NECESSARY, BAIL OUT.
- */
- if (kd != isdongle(peasycap))
- return -ERESTARTSYS;
- if (!file) {
- SAY("ERROR: file is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -ERESTARTSYS;
- }
- peasycap = file->private_data;
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -ERESTARTSYS;
- }
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap: %p\n", peasycap);
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -ERESTARTSYS;
- }
- if (!peasycap->pusb_device) {
- SAM("ERROR: peasycap->pusb_device is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -ERESTARTSYS;
- }
- } else {
- /*
- * IF easycap_usb_disconnect()
- * HAS ALREADY FREED POINTER peasycap BEFORE THE
- * ATTEMPT TO ACQUIRE THE SEMAPHORE,
- * isdongle() WILL HAVE FAILED. BAIL OUT.
- */
- return -ERESTARTSYS;
- }
-/*---------------------------------------------------------------------------*/
- JOT(16, "%sBLOCKING kount=%zd, *poff=%lld\n",
- (file->f_flags & O_NONBLOCK) ? "NON" : "", kount, *poff);
-
- if ((0 > peasycap->audio_read) ||
- (peasycap->audio_buffer_page_many <= peasycap->audio_read)) {
- SAM("ERROR: peasycap->audio_read out of range\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- pdata_buffer = &peasycap->audio_buffer[peasycap->audio_read];
- if (!pdata_buffer) {
- SAM("ERROR: pdata_buffer is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- JOM(12, "before wait, %i=frag read %i=frag fill\n",
- (peasycap->audio_read / peasycap->audio_pages_per_fragment),
- (peasycap->audio_fill / peasycap->audio_pages_per_fragment));
- fragment = (peasycap->audio_read / peasycap->audio_pages_per_fragment);
- while ((fragment == (peasycap->audio_fill / peasycap->audio_pages_per_fragment)) ||
- (0 == (PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo)))) {
- if (file->f_flags & O_NONBLOCK) {
- JOM(16, "returning -EAGAIN as instructed\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EAGAIN;
- }
- rc = wait_event_interruptible(peasycap->wq_audio,
- (peasycap->audio_idle || peasycap->audio_eof ||
- ((fragment !=
- (peasycap->audio_fill / peasycap->audio_pages_per_fragment)) &&
- (0 < (PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo))))));
- if (rc) {
- SAM("aborted by signal\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -ERESTARTSYS;
- }
- if (peasycap->audio_eof) {
- JOM(8, "returning 0 because %i=audio_eof\n",
- peasycap->audio_eof);
- kill_audio_urbs(peasycap);
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return 0;
- }
- if (peasycap->audio_idle) {
- JOM(16, "returning 0 because %i=audio_idle\n",
- peasycap->audio_idle);
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return 0;
- }
- if (!peasycap->audio_isoc_streaming) {
- JOM(16, "returning 0 because audio urbs not streaming\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return 0;
- }
- }
- JOM(12, "after wait, %i=frag read %i=frag fill\n",
- (peasycap->audio_read / peasycap->audio_pages_per_fragment),
- (peasycap->audio_fill / peasycap->audio_pages_per_fragment));
- szret = (size_t)0;
- fragment = (peasycap->audio_read / peasycap->audio_pages_per_fragment);
- while (fragment == (peasycap->audio_read / peasycap->audio_pages_per_fragment)) {
- if (!pdata_buffer->pgo) {
- SAM("ERROR: pdata_buffer->pgo is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- if (!pdata_buffer->pto) {
- SAM("ERROR: pdata_buffer->pto is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- kount1 = PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo);
- if (0 > kount1) {
- SAM("MISTAKE: kount1 is negative\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -ERESTARTSYS;
- }
- if (!kount1) {
- peasycap->audio_read++;
- if (peasycap->audio_buffer_page_many <= peasycap->audio_read)
- peasycap->audio_read = 0;
- JOM(12, "bumped peasycap->audio_read to %i\n",
- peasycap->audio_read);
-
- if (fragment != (peasycap->audio_read / peasycap->audio_pages_per_fragment))
- break;
-
- if ((0 > peasycap->audio_read) ||
- (peasycap->audio_buffer_page_many <= peasycap->audio_read)) {
- SAM("ERROR: peasycap->audio_read out of range\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- pdata_buffer = &peasycap->audio_buffer[peasycap->audio_read];
- if (!pdata_buffer) {
- SAM("ERROR: pdata_buffer is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- if (!pdata_buffer->pgo) {
- SAM("ERROR: pdata_buffer->pgo is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- if (!pdata_buffer->pto) {
- SAM("ERROR: pdata_buffer->pto is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- kount1 = PAGE_SIZE - (pdata_buffer->pto - pdata_buffer->pgo);
- }
- JOM(12, "ready to send %zd bytes\n", kount1);
- JOM(12, "still to send %li bytes\n", (long int) kount);
- more = kount1;
- if (more > kount)
- more = kount;
- JOM(12, "agreed to send %li bytes from page %i\n",
- more, peasycap->audio_read);
- if (!more)
- break;
-
- /*
- * ACCUMULATE DYNAMIC-RANGE INFORMATION
- */
- p0 = (unsigned char *)pdata_buffer->pgo;
- l0 = 0;
- lm = more/2;
- while (l0 < lm) {
- SUMMER(p0, &peasycap->audio_sample,
- &peasycap->audio_niveau,
- &peasycap->audio_square);
- l0++;
- p0 += 2;
- }
- /*-----------------------------------------------------------*/
- rc = copy_to_user(puserspacebuffer, pdata_buffer->pto, more);
- if (rc) {
- SAM("ERROR: copy_to_user() returned %li\n", rc);
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- *poff += (loff_t)more;
- szret += (size_t)more;
- pdata_buffer->pto += more;
- puserspacebuffer += more;
- kount -= (size_t)more;
- }
- JOM(12, "after read, %i=frag read %i=frag fill\n",
- (peasycap->audio_read / peasycap->audio_pages_per_fragment),
- (peasycap->audio_fill / peasycap->audio_pages_per_fragment));
- if (kount < 0) {
- SAM("MISTAKE: %li=kount %li=szret\n",
- (long int)kount, (long int)szret);
- }
-/*---------------------------------------------------------------------------*/
-/*
- * CALCULATE DYNAMIC RANGE FOR (VAPOURWARE) AUTOMATIC VOLUME CONTROL
- */
-/*---------------------------------------------------------------------------*/
- if (peasycap->audio_sample) {
- below = peasycap->audio_sample;
- above = peasycap->audio_square;
- sdr = signed_div(above, below);
- above = sdr.quotient;
- mean = peasycap->audio_niveau;
- sdr = signed_div(mean, peasycap->audio_sample);
-
- JOM(8, "%8lli=mean %8lli=meansquare after %lli samples, =>\n",
- sdr.quotient, above, peasycap->audio_sample);
-
- sdr = signed_div(above, 32768);
- JOM(8, "audio dynamic range is roughly %lli\n", sdr.quotient);
- }
-/*---------------------------------------------------------------------------*/
-/*
- * UPDATE THE AUDIO CLOCK
- */
-/*---------------------------------------------------------------------------*/
- do_gettimeofday(&timeval);
- if (!peasycap->timeval1.tv_sec) {
- peasycap->audio_bytes = 0;
- peasycap->timeval3 = timeval;
- peasycap->timeval1 = peasycap->timeval3;
- sdr.quotient = 192000;
- } else {
- peasycap->audio_bytes += (long long int) szret;
- below = ((long long int)(1000000)) *
- ((long long int)(timeval.tv_sec - peasycap->timeval3.tv_sec)) +
- (long long int)(timeval.tv_usec - peasycap->timeval3.tv_usec);
- above = 1000000 * ((long long int) peasycap->audio_bytes);
-
- if (below)
- sdr = signed_div(above, below);
- else
- sdr.quotient = 192000;
- }
- JOM(8, "audio streaming at %lli bytes/second\n", sdr.quotient);
- peasycap->dnbydt = sdr.quotient;
-
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- JOM(4, "unlocked easycapdc60_dongle[%i].mutex_audio\n", kd);
- JOM(8, "returning %li\n", (long int)szret);
- return szret;
-
-}
-/*---------------------------------------------------------------------------*/
-static long easyoss_unlocked_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct easycap *peasycap;
- struct usb_device *p;
- int kd;
-
- if (!file) {
- SAY("ERROR: file is NULL\n");
- return -ERESTARTSYS;
- }
- peasycap = file->private_data;
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL.\n");
- return -EFAULT;
- }
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- return -EFAULT;
- }
- p = peasycap->pusb_device;
- if (!p) {
- SAM("ERROR: peasycap->pusb_device is NULL\n");
- return -EFAULT;
- }
- kd = isdongle(peasycap);
- if (0 <= kd && DONGLE_MANY > kd) {
- if (mutex_lock_interruptible(&easycapdc60_dongle[kd].mutex_audio)) {
- SAY("ERROR: cannot lock "
- "easycapdc60_dongle[%i].mutex_audio\n", kd);
- return -ERESTARTSYS;
- }
- JOM(4, "locked easycapdc60_dongle[%i].mutex_audio\n", kd);
- /*
- * MEANWHILE, easycap_usb_disconnect()
- * MAY HAVE FREED POINTER peasycap,
- * IN WHICH CASE A REPEAT CALL TO isdongle() WILL FAIL.
- * IF NECESSARY, BAIL OUT.
- */
- if (kd != isdongle(peasycap))
- return -ERESTARTSYS;
- if (!file) {
- SAY("ERROR: file is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -ERESTARTSYS;
- }
- peasycap = file->private_data;
- if (!peasycap) {
- SAY("ERROR: peasycap is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -ERESTARTSYS;
- }
- if (memcmp(&peasycap->telltale[0], TELLTALE, strlen(TELLTALE))) {
- SAY("ERROR: bad peasycap\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- p = peasycap->pusb_device;
- if (!peasycap->pusb_device) {
- SAM("ERROR: peasycap->pusb_device is NULL\n");
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -ERESTARTSYS;
- }
- } else {
- /*
- * IF easycap_usb_disconnect()
- * HAS ALREADY FREED POINTER peasycap BEFORE THE
- * ATTEMPT TO ACQUIRE THE SEMAPHORE,
- * isdongle() WILL HAVE FAILED. BAIL OUT.
- */
- return -ERESTARTSYS;
- }
-/*---------------------------------------------------------------------------*/
- switch (cmd) {
- case SNDCTL_DSP_GETCAPS: {
- int caps;
- JOM(8, "SNDCTL_DSP_GETCAPS\n");
-
-#ifdef UPSAMPLE
- if (peasycap->microphone)
- caps = 0x04400000;
- else
- caps = 0x04400000;
-#else
- if (peasycap->microphone)
- caps = 0x02400000;
- else
- caps = 0x04400000;
-#endif /*UPSAMPLE*/
-
- if (copy_to_user((void __user *)arg, &caps, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- break;
- }
- case SNDCTL_DSP_GETFMTS: {
- int incoming;
- JOM(8, "SNDCTL_DSP_GETFMTS\n");
-
-#ifdef UPSAMPLE
- if (peasycap->microphone)
- incoming = AFMT_S16_LE;
- else
- incoming = AFMT_S16_LE;
-#else
- if (peasycap->microphone)
- incoming = AFMT_S16_LE;
- else
- incoming = AFMT_S16_LE;
-#endif /*UPSAMPLE*/
-
- if (copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- break;
- }
- case SNDCTL_DSP_SETFMT: {
- int incoming, outgoing;
- JOM(8, "SNDCTL_DSP_SETFMT\n");
- if (copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- JOM(8, "........... %i=incoming\n", incoming);
-
-#ifdef UPSAMPLE
- if (peasycap->microphone)
- outgoing = AFMT_S16_LE;
- else
- outgoing = AFMT_S16_LE;
-#else
- if (peasycap->microphone)
- outgoing = AFMT_S16_LE;
- else
- outgoing = AFMT_S16_LE;
-#endif /*UPSAMPLE*/
-
- if (incoming != outgoing) {
- JOM(8, "........... %i=outgoing\n", outgoing);
- JOM(8, " cf. %i=AFMT_S16_LE\n", AFMT_S16_LE);
- JOM(8, " cf. %i=AFMT_U8\n", AFMT_U8);
- if (copy_to_user((void __user *)arg, &outgoing, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EINVAL ;
- }
- break;
- }
- case SNDCTL_DSP_STEREO: {
- int incoming;
- JOM(8, "SNDCTL_DSP_STEREO\n");
- if (copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- JOM(8, "........... %i=incoming\n", incoming);
-
-#ifdef UPSAMPLE
- if (peasycap->microphone)
- incoming = 1;
- else
- incoming = 1;
-#else
- if (peasycap->microphone)
- incoming = 0;
- else
- incoming = 1;
-#endif /*UPSAMPLE*/
-
- if (copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- break;
- }
- case SNDCTL_DSP_SPEED: {
- int incoming;
- JOM(8, "SNDCTL_DSP_SPEED\n");
- if (copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- JOM(8, "........... %i=incoming\n", incoming);
-
-#ifdef UPSAMPLE
- if (peasycap->microphone)
- incoming = 32000;
- else
- incoming = 48000;
-#else
- if (peasycap->microphone)
- incoming = 8000;
- else
- incoming = 48000;
-#endif /*UPSAMPLE*/
-
- if (copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- break;
- }
- case SNDCTL_DSP_GETTRIGGER: {
- int incoming;
- JOM(8, "SNDCTL_DSP_GETTRIGGER\n");
- if (copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- JOM(8, "........... %i=incoming\n", incoming);
-
- incoming = PCM_ENABLE_INPUT;
- if (copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- break;
- }
- case SNDCTL_DSP_SETTRIGGER: {
- int incoming;
- JOM(8, "SNDCTL_DSP_SETTRIGGER\n");
- if (copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- JOM(8, "........... %i=incoming\n", incoming);
- JOM(8, "........... cf 0x%x=PCM_ENABLE_INPUT "
- "0x%x=PCM_ENABLE_OUTPUT\n",
- PCM_ENABLE_INPUT, PCM_ENABLE_OUTPUT);
- ;
- ;
- ;
- ;
- break;
- }
- case SNDCTL_DSP_GETBLKSIZE: {
- int incoming;
- JOM(8, "SNDCTL_DSP_GETBLKSIZE\n");
- if (copy_from_user(&incoming, (void __user *)arg, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- JOM(8, "........... %i=incoming\n", incoming);
- incoming = peasycap->audio_bytes_per_fragment;
- if (copy_to_user((void __user *)arg, &incoming, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- break;
- }
- case SNDCTL_DSP_GETISPACE: {
- struct audio_buf_info audio_buf_info;
-
- JOM(8, "SNDCTL_DSP_GETISPACE\n");
-
- audio_buf_info.bytes = peasycap->audio_bytes_per_fragment;
- audio_buf_info.fragments = 1;
- audio_buf_info.fragsize = 0;
- audio_buf_info.fragstotal = 0;
-
- if (copy_to_user((void __user *)arg, &audio_buf_info, sizeof(int))) {
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -EFAULT;
- }
- break;
- }
- case 0x00005401:
- case 0x00005402:
- case 0x00005403:
- case 0x00005404:
- case 0x00005405:
- case 0x00005406: {
- JOM(8, "SNDCTL_TMR_...: 0x%08X unsupported\n", cmd);
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -ENOIOCTLCMD;
- }
- default: {
- JOM(8, "ERROR: unrecognized DSP IOCTL command: 0x%08X\n", cmd);
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return -ENOIOCTLCMD;
- }
- }
- mutex_unlock(&easycapdc60_dongle[kd].mutex_audio);
- return 0;
-}
-/*****************************************************************************/
-
-static const struct file_operations easyoss_fops = {
- .owner = THIS_MODULE,
- .open = easyoss_open,
- .release = easyoss_release,
- .unlocked_ioctl = easyoss_unlocked_ioctl,
- .read = easyoss_read,
- .llseek = no_llseek,
-};
-struct usb_class_driver easyoss_class = {
- .name = "usb/easyoss%d",
- .fops = &easyoss_fops,
- .minor_base = USB_SKEL_MINOR_BASE,
-};
-/*****************************************************************************/
diff --git a/drivers/staging/echo/echo.c b/drivers/staging/echo/echo.c
index c0adae1bf6d..afbf5442b42 100644
--- a/drivers/staging/echo/echo.c
+++ b/drivers/staging/echo/echo.c
@@ -276,7 +276,6 @@ error_oom:
kfree(ec);
return NULL;
}
-
EXPORT_SYMBOL_GPL(oslec_create);
void oslec_free(struct oslec_state *ec)
@@ -290,14 +289,12 @@ void oslec_free(struct oslec_state *ec)
kfree(ec->snapshot);
kfree(ec);
}
-
EXPORT_SYMBOL_GPL(oslec_free);
void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode)
{
ec->adaption_mode = adaption_mode;
}
-
EXPORT_SYMBOL_GPL(oslec_adaption_mode);
void oslec_flush(struct oslec_state *ec)
@@ -324,14 +321,12 @@ void oslec_flush(struct oslec_state *ec)
ec->curr_pos = ec->taps - 1;
ec->Pstates = 0;
}
-
EXPORT_SYMBOL_GPL(oslec_flush);
void oslec_snapshot(struct oslec_state *ec)
{
memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t));
}
-
EXPORT_SYMBOL_GPL(oslec_snapshot);
/* Dual Path Echo Canceller */
@@ -406,7 +401,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
/* efficient "out with the old and in with the new" algorithm so
we don't have to recalculate over the whole block of
samples. */
- new = (int)tx *(int)tx;
+ new = (int)tx * (int)tx;
old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
(int)ec->fir_state.history[ec->fir_state.curr_pos];
ec->Pstates +=
@@ -603,7 +598,6 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
return (int16_t) ec->clean_nlp << 1;
}
-
EXPORT_SYMBOL_GPL(oslec_update);
/* This function is separated from the echo canceller is it is usually called
@@ -628,7 +622,7 @@ EXPORT_SYMBOL_GPL(oslec_update);
giving very clean DC removal.
*/
-int16_t oslec_hpf_tx(struct oslec_state * ec, int16_t tx)
+int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx)
{
int tmp, tmp1;
@@ -657,7 +651,6 @@ int16_t oslec_hpf_tx(struct oslec_state * ec, int16_t tx)
return tx;
}
-
EXPORT_SYMBOL_GPL(oslec_hpf_tx);
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/et131x/et1310_address_map.h b/drivers/staging/et131x/et1310_address_map.h
index 425e9274f28..410677ee22b 100644
--- a/drivers/staging/et131x/et1310_address_map.h
+++ b/drivers/staging/et131x/et1310_address_map.h
@@ -267,19 +267,19 @@ struct txdma_regs { /* Location: */
u32 service_complete; /* 0x1028 */
u32 cache_rd_index; /* 0x102C */
u32 cache_wr_index; /* 0x1030 */
- u32 TxDmaError; /* 0x1034 */
- u32 DescAbortCount; /* 0x1038 */
- u32 PayloadAbortCnt; /* 0x103c */
- u32 WriteBackAbortCnt; /* 0x1040 */
- u32 DescTimeoutCnt; /* 0x1044 */
- u32 PayloadTimeoutCnt; /* 0x1048 */
- u32 WriteBackTimeoutCnt; /* 0x104c */
- u32 DescErrorCount; /* 0x1050 */
- u32 PayloadErrorCnt; /* 0x1054 */
- u32 WriteBackErrorCnt; /* 0x1058 */
- u32 DroppedTLPCount; /* 0x105c */
- u32 NewServiceComplete; /* 0x1060 */
- u32 EthernetPacketCount; /* 0x1064 */
+ u32 tx_dma_error; /* 0x1034 */
+ u32 desc_abort_cnt; /* 0x1038 */
+ u32 payload_abort_cnt; /* 0x103c */
+ u32 writeback_abort_cnt; /* 0x1040 */
+ u32 desc_timeout_cnt; /* 0x1044 */
+ u32 payload_timeout_cnt; /* 0x1048 */
+ u32 writeback_timeout_cnt; /* 0x104c */
+ u32 desc_error_cnt; /* 0x1050 */
+ u32 payload_error_cnt; /* 0x1054 */
+ u32 writeback_error_cnt; /* 0x1058 */
+ u32 dropped_tlp_cnt; /* 0x105c */
+ u32 new_service_complete; /* 0x1060 */
+ u32 ethernet_packet_cnt; /* 0x1064 */
};
/* END OF TXDMA REGISTER ADDRESS MAP */
@@ -700,42 +700,27 @@ struct txmac_regs { /* Location: */
/*
* structure for Wake On Lan Source Address Lo reg in rxmac address map
* located at address 0x4010
+ *
+ * 31-24: sa3
+ * 23-16: sa4
+ * 15-8: sa5
+ * 7-0: sa6
*/
-typedef union _RXMAC_WOL_SA_LO_t {
- u32 value;
- struct {
-#ifdef _BIT_FIELDS_HTOL
- u32 sa3:8; /* bits 24-31 */
- u32 sa4:8; /* bits 16-23 */
- u32 sa5:8; /* bits 8-15 */
- u32 sa6:8; /* bits 0-7 */
-#else
- u32 sa6:8; /* bits 0-7 */
- u32 sa5:8; /* bits 8-15 */
- u32 sa4:8; /* bits 16-23 */
- u32 sa3:8; /* bits 24-31 */
-#endif
- } bits;
-} RXMAC_WOL_SA_LO_t, *PRXMAC_WOL_SA_LO_t;
+
+#define ET_WOL_LO_SA3_SHIFT 24
+#define ET_WOL_LO_SA4_SHIFT 16
+#define ET_WOL_LO_SA5_SHIFT 8
/*
* structure for Wake On Lan Source Address Hi reg in rxmac address map
* located at address 0x4014
+ *
+ * 31-16: reserved
+ * 15-8: sa1
+ * 7-0: sa2
*/
-typedef union _RXMAC_WOL_SA_HI_t {
- u32 value;
- struct {
-#ifdef _BIT_FIELDS_HTOL
- u32 reserved:16; /* bits 16-31 */
- u32 sa1:8; /* bits 8-15 */
- u32 sa2:8; /* bits 0-7 */
-#else
- u32 sa2:8; /* bits 0-7 */
- u32 sa1:8; /* bits 8-15 */
- u32 reserved:16; /* bits 16-31 */
-#endif
- } bits;
-} RXMAC_WOL_SA_HI_t, *PRXMAC_WOL_SA_HI_t;
+
+#define ET_WOL_HI_SA1_SHIFT 8
/*
* structure for Wake On Lan mask reg in rxmac address map
@@ -746,65 +731,45 @@ typedef union _RXMAC_WOL_SA_HI_t {
/*
* structure for Unicast Paket Filter Address 1 reg in rxmac address map
* located at address 0x4068
+ *
+ * 31-24: addr1_3
+ * 23-16: addr1_4
+ * 15-8: addr1_5
+ * 7-0: addr1_6
*/
-typedef union _RXMAC_UNI_PF_ADDR1_t {
- u32 value;
- struct {
-#ifdef _BIT_FIELDS_HTOL
- u32 addr1_3:8; /* bits 24-31 */
- u32 addr1_4:8; /* bits 16-23 */
- u32 addr1_5:8; /* bits 8-15 */
- u32 addr1_6:8; /* bits 0-7 */
-#else
- u32 addr1_6:8; /* bits 0-7 */
- u32 addr1_5:8; /* bits 8-15 */
- u32 addr1_4:8; /* bits 16-23 */
- u32 addr1_3:8; /* bits 24-31 */
-#endif
- } bits;
-} RXMAC_UNI_PF_ADDR1_t, *PRXMAC_UNI_PF_ADDR1_t;
+
+#define ET_UNI_PF_ADDR1_3_SHIFT 24
+#define ET_UNI_PF_ADDR1_4_SHIFT 16
+#define ET_UNI_PF_ADDR1_5_SHIFT 8
/*
* structure for Unicast Paket Filter Address 2 reg in rxmac address map
* located at address 0x406C
+ *
+ * 31-24: addr2_3
+ * 23-16: addr2_4
+ * 15-8: addr2_5
+ * 7-0: addr2_6
*/
-typedef union _RXMAC_UNI_PF_ADDR2_t {
- u32 value;
- struct {
-#ifdef _BIT_FIELDS_HTOL
- u32 addr2_3:8; /* bits 24-31 */
- u32 addr2_4:8; /* bits 16-23 */
- u32 addr2_5:8; /* bits 8-15 */
- u32 addr2_6:8; /* bits 0-7 */
-#else
- u32 addr2_6:8; /* bits 0-7 */
- u32 addr2_5:8; /* bits 8-15 */
- u32 addr2_4:8; /* bits 16-23 */
- u32 addr2_3:8; /* bits 24-31 */
-#endif
- } bits;
-} RXMAC_UNI_PF_ADDR2_t, *PRXMAC_UNI_PF_ADDR2_t;
+
+#define ET_UNI_PF_ADDR2_3_SHIFT 24
+#define ET_UNI_PF_ADDR2_4_SHIFT 16
+#define ET_UNI_PF_ADDR2_5_SHIFT 8
/*
* structure for Unicast Paket Filter Address 1 & 2 reg in rxmac address map
* located at address 0x4070
+ *
+ * 31-24: addr2_1
+ * 23-16: addr2_2
+ * 15-8: addr1_1
+ * 7-0: addr1_2
*/
-typedef union _RXMAC_UNI_PF_ADDR3_t {
- u32 value;
- struct {
-#ifdef _BIT_FIELDS_HTOL
- u32 addr2_1:8; /* bits 24-31 */
- u32 addr2_2:8; /* bits 16-23 */
- u32 addr1_1:8; /* bits 8-15 */
- u32 addr1_2:8; /* bits 0-7 */
-#else
- u32 addr1_2:8; /* bits 0-7 */
- u32 addr1_1:8; /* bits 8-15 */
- u32 addr2_2:8; /* bits 16-23 */
- u32 addr2_1:8; /* bits 24-31 */
-#endif
- } bits;
-} RXMAC_UNI_PF_ADDR3_t, *PRXMAC_UNI_PF_ADDR3_t;
+
+#define ET_UNI_PF_ADDR2_1_SHIFT 24
+#define ET_UNI_PF_ADDR2_2_SHIFT 16
+#define ET_UNI_PF_ADDR1_1_SHIFT 8
+
/*
* structure for Multicast Hash reg in rxmac address map
@@ -888,13 +853,13 @@ typedef union _RXMAC_UNI_PF_ADDR3_t {
/*
* Rx MAC Module of JAGCore Address Mapping
*/
-typedef struct _RXMAC_t { /* Location: */
+struct rxmac_regs { /* Location: */
u32 ctrl; /* 0x4000 */
u32 crc0; /* 0x4004 */
u32 crc12; /* 0x4008 */
u32 crc34; /* 0x400C */
- RXMAC_WOL_SA_LO_t sa_lo; /* 0x4010 */
- RXMAC_WOL_SA_HI_t sa_hi; /* 0x4014 */
+ u32 sa_lo; /* 0x4010 */
+ u32 sa_hi; /* 0x4014 */
u32 mask0_word0; /* 0x4018 */
u32 mask0_word1; /* 0x401C */
u32 mask0_word2; /* 0x4020 */
@@ -915,9 +880,9 @@ typedef struct _RXMAC_t { /* Location: */
u32 mask4_word1; /* 0x405C */
u32 mask4_word2; /* 0x4060 */
u32 mask4_word3; /* 0x4064 */
- RXMAC_UNI_PF_ADDR1_t uni_pf_addr1; /* 0x4068 */
- RXMAC_UNI_PF_ADDR2_t uni_pf_addr2; /* 0x406C */
- RXMAC_UNI_PF_ADDR3_t uni_pf_addr3; /* 0x4070 */
+ u32 uni_pf_addr1; /* 0x4068 */
+ u32 uni_pf_addr2; /* 0x406C */
+ u32 uni_pf_addr3; /* 0x4070 */
u32 multi_hash1; /* 0x4074 */
u32 multi_hash2; /* 0x4078 */
u32 multi_hash3; /* 0x407C */
@@ -930,7 +895,7 @@ typedef struct _RXMAC_t { /* Location: */
u32 mif_ctrl; /* 0x4098 */
u32 err_reg; /* 0x409C */
-} RXMAC_t, *PRXMAC_t;
+};
/* END OF RXMAC REGISTER ADDRESS MAP */
@@ -1123,47 +1088,33 @@ typedef struct _RXMAC_t { /* Location: */
/*
* structure for Mac Station Address, Part 1 reg in mac address map.
* located at address 0x5040
+ *
+ * 31-24: Octet6
+ * 23-16: Octet5
+ * 15-8: Octet4
+ * 7-0: Octet3
*/
-typedef union _MAC_STATION_ADDR1_t {
- u32 value;
- struct {
-#ifdef _BIT_FIELDS_HTOL
- u32 Octet6:8; /* bits 24-31 */
- u32 Octet5:8; /* bits 16-23 */
- u32 Octet4:8; /* bits 8-15 */
- u32 Octet3:8; /* bits 0-7 */
-#else
- u32 Octet3:8; /* bits 0-7 */
- u32 Octet4:8; /* bits 8-15 */
- u32 Octet5:8; /* bits 16-23 */
- u32 Octet6:8; /* bits 24-31 */
-#endif
- } bits;
-} MAC_STATION_ADDR1_t, *PMAC_STATION_ADDR1_t;
+
+#define ET_MAC_STATION_ADDR1_OC6_SHIFT 24
+#define ET_MAC_STATION_ADDR1_OC5_SHIFT 16
+#define ET_MAC_STATION_ADDR1_OC4_SHIFT 8
/*
* structure for Mac Station Address, Part 2 reg in mac address map.
* located at address 0x5044
+ *
+ * 31-24: Octet2
+ * 23-16: Octet1
+ * 15-0: reserved
*/
-typedef union _MAC_STATION_ADDR2_t {
- u32 value;
- struct {
-#ifdef _BIT_FIELDS_HTOL
- u32 Octet2:8; /* bits 24-31 */
- u32 Octet1:8; /* bits 16-23 */
- u32 reserved:16; /* bits 0-15 */
-#else
- u32 reserved:16; /* bit 0-15 */
- u32 Octet1:8; /* bits 16-23 */
- u32 Octet2:8; /* bits 24-31 */
-#endif
- } bits;
-} MAC_STATION_ADDR2_t, *PMAC_STATION_ADDR2_t;
+
+#define ET_MAC_STATION_ADDR2_OC2_SHIFT 24
+#define ET_MAC_STATION_ADDR2_OC1_SHIFT 16
/*
* MAC Module of JAGCore Address Mapping
*/
-typedef struct _MAC_t { /* Location: */
+struct mac_regs { /* Location: */
u32 cfg1; /* 0x5000 */
u32 cfg2; /* 0x5004 */
u32 ipg; /* 0x5008 */
@@ -1180,9 +1131,9 @@ typedef struct _MAC_t { /* Location: */
u32 mii_mgmt_indicator; /* 0x5034 */
u32 if_ctrl; /* 0x5038 */
u32 if_stat; /* 0x503C */
- MAC_STATION_ADDR1_t station_addr_1; /* 0x5040 */
- MAC_STATION_ADDR2_t station_addr_2; /* 0x5044 */
-} MAC_t, *PMAC_t;
+ u32 station_addr_1; /* 0x5040 */
+ u32 station_addr_2; /* 0x5044 */
+};
/* END OF MAC REGISTER ADDRESS MAP */
@@ -1253,148 +1204,148 @@ struct macstat_regs { /* Location: */
u32 pad[32]; /* 0x6000 - 607C */
/* Tx/Rx 0-64 Byte Frame Counter */
- u32 TR64; /* 0x6080 */
+ u32 txrx_0_64_byte_frames; /* 0x6080 */
/* Tx/Rx 65-127 Byte Frame Counter */
- u32 TR127; /* 0x6084 */
+ u32 txrx_65_127_byte_frames; /* 0x6084 */
/* Tx/Rx 128-255 Byte Frame Counter */
- u32 TR255; /* 0x6088 */
+ u32 txrx_128_255_byte_frames; /* 0x6088 */
/* Tx/Rx 256-511 Byte Frame Counter */
- u32 TR511; /* 0x608C */
+ u32 txrx_256_511_byte_frames; /* 0x608C */
/* Tx/Rx 512-1023 Byte Frame Counter */
- u32 TR1K; /* 0x6090 */
+ u32 txrx_512_1023_byte_frames; /* 0x6090 */
/* Tx/Rx 1024-1518 Byte Frame Counter */
- u32 TRMax; /* 0x6094 */
+ u32 txrx_1024_1518_byte_frames; /* 0x6094 */
/* Tx/Rx 1519-1522 Byte Good VLAN Frame Count */
- u32 TRMgv; /* 0x6098 */
+ u32 txrx_1519_1522_gvln_frames; /* 0x6098 */
/* Rx Byte Counter */
- u32 RByt; /* 0x609C */
+ u32 rx_bytes; /* 0x609C */
/* Rx Packet Counter */
- u32 RPkt; /* 0x60A0 */
+ u32 rx_packets; /* 0x60A0 */
/* Rx FCS Error Counter */
- u32 RFcs; /* 0x60A4 */
+ u32 rx_fcs_errs; /* 0x60A4 */
/* Rx Multicast Packet Counter */
- u32 RMca; /* 0x60A8 */
+ u32 rx_multicast_packets; /* 0x60A8 */
/* Rx Broadcast Packet Counter */
- u32 RBca; /* 0x60AC */
+ u32 rx_broadcast_packets; /* 0x60AC */
/* Rx Control Frame Packet Counter */
- u32 RxCf; /* 0x60B0 */
+ u32 rx_control_frames; /* 0x60B0 */
/* Rx Pause Frame Packet Counter */
- u32 RxPf; /* 0x60B4 */
+ u32 rx_pause_frames; /* 0x60B4 */
/* Rx Unknown OP Code Counter */
- u32 RxUo; /* 0x60B8 */
+ u32 rx_unknown_opcodes; /* 0x60B8 */
/* Rx Alignment Error Counter */
- u32 RAln; /* 0x60BC */
+ u32 rx_align_errs; /* 0x60BC */
/* Rx Frame Length Error Counter */
- u32 RFlr; /* 0x60C0 */
+ u32 rx_frame_len_errs; /* 0x60C0 */
/* Rx Code Error Counter */
- u32 RCde; /* 0x60C4 */
+ u32 rx_code_errs; /* 0x60C4 */
/* Rx Carrier Sense Error Counter */
- u32 RCse; /* 0x60C8 */
+ u32 rx_carrier_sense_errs; /* 0x60C8 */
/* Rx Undersize Packet Counter */
- u32 RUnd; /* 0x60CC */
+ u32 rx_undersize_packets; /* 0x60CC */
/* Rx Oversize Packet Counter */
- u32 ROvr; /* 0x60D0 */
+ u32 rx_oversize_packets; /* 0x60D0 */
/* Rx Fragment Counter */
- u32 RFrg; /* 0x60D4 */
+ u32 rx_fragment_packets; /* 0x60D4 */
/* Rx Jabber Counter */
- u32 RJbr; /* 0x60D8 */
+ u32 rx_jabbers; /* 0x60D8 */
/* Rx Drop */
- u32 RDrp; /* 0x60DC */
+ u32 rx_drops; /* 0x60DC */
/* Tx Byte Counter */
- u32 TByt; /* 0x60E0 */
+ u32 tx_bytes; /* 0x60E0 */
/* Tx Packet Counter */
- u32 TPkt; /* 0x60E4 */
+ u32 tx_packets; /* 0x60E4 */
/* Tx Multicast Packet Counter */
- u32 TMca; /* 0x60E8 */
+ u32 tx_multicast_packets; /* 0x60E8 */
/* Tx Broadcast Packet Counter */
- u32 TBca; /* 0x60EC */
+ u32 tx_broadcast_packets; /* 0x60EC */
/* Tx Pause Control Frame Counter */
- u32 TxPf; /* 0x60F0 */
+ u32 tx_pause_frames; /* 0x60F0 */
/* Tx Deferral Packet Counter */
- u32 TDfr; /* 0x60F4 */
+ u32 tx_deferred; /* 0x60F4 */
/* Tx Excessive Deferral Packet Counter */
- u32 TEdf; /* 0x60F8 */
+ u32 tx_excessive_deferred; /* 0x60F8 */
/* Tx Single Collision Packet Counter */
- u32 TScl; /* 0x60FC */
+ u32 tx_single_collisions; /* 0x60FC */
/* Tx Multiple Collision Packet Counter */
- u32 TMcl; /* 0x6100 */
+ u32 tx_multiple_collisions; /* 0x6100 */
/* Tx Late Collision Packet Counter */
- u32 TLcl; /* 0x6104 */
+ u32 tx_late_collisions; /* 0x6104 */
/* Tx Excessive Collision Packet Counter */
- u32 TXcl; /* 0x6108 */
+ u32 tx_excessive_collisions; /* 0x6108 */
/* Tx Total Collision Packet Counter */
- u32 TNcl; /* 0x610C */
+ u32 tx_total_collisions; /* 0x610C */
/* Tx Pause Frame Honored Counter */
- u32 TPfh; /* 0x6110 */
+ u32 tx_pause_honored_frames; /* 0x6110 */
/* Tx Drop Frame Counter */
- u32 TDrp; /* 0x6114 */
+ u32 tx_drops; /* 0x6114 */
/* Tx Jabber Frame Counter */
- u32 TJbr; /* 0x6118 */
+ u32 tx_jabbers; /* 0x6118 */
/* Tx FCS Error Counter */
- u32 TFcs; /* 0x611C */
+ u32 tx_fcs_errs; /* 0x611C */
/* Tx Control Frame Counter */
- u32 TxCf; /* 0x6120 */
+ u32 tx_control_frames; /* 0x6120 */
/* Tx Oversize Frame Counter */
- u32 TOvr; /* 0x6124 */
+ u32 tx_oversize_frames; /* 0x6124 */
/* Tx Undersize Frame Counter */
- u32 TUnd; /* 0x6128 */
+ u32 tx_undersize_frames; /* 0x6128 */
/* Tx Fragments Frame Counter */
- u32 TFrg; /* 0x612C */
+ u32 tx_fragments; /* 0x612C */
/* Carry Register One Register */
- u32 Carry1; /* 0x6130 */
+ u32 carry_reg1; /* 0x6130 */
/* Carry Register Two Register */
- u32 Carry2; /* 0x6134 */
+ u32 carry_reg2; /* 0x6134 */
/* Carry Register One Mask Register */
- u32 Carry1M; /* 0x6138 */
+ u32 carry_reg1_mask; /* 0x6138 */
/* Carry Register Two Mask Register */
- u32 Carry2M; /* 0x613C */
+ u32 carry_reg2_mask; /* 0x613C */
};
/* END OF MAC STAT REGISTER ADDRESS MAP */
@@ -1448,7 +1399,7 @@ struct mmc_regs { /* Location: */
/*
* JAGCore Address Mapping
*/
-typedef struct _ADDRESS_MAP_t {
+struct address_map {
struct global_regs global;
/* unused section of global address map */
u8 unused_global[4096 - sizeof(struct global_regs)];
@@ -1461,12 +1412,12 @@ typedef struct _ADDRESS_MAP_t {
struct txmac_regs txmac;
/* unused section of txmac address map */
u8 unused_txmac[4096 - sizeof(struct txmac_regs)];
- RXMAC_t rxmac;
+ struct rxmac_regs rxmac;
/* unused section of rxmac address map */
- u8 unused_rxmac[4096 - sizeof(RXMAC_t)];
- MAC_t mac;
+ u8 unused_rxmac[4096 - sizeof(struct rxmac_regs)];
+ struct mac_regs mac;
/* unused section of mac address map */
- u8 unused_mac[4096 - sizeof(MAC_t)];
+ u8 unused_mac[4096 - sizeof(struct mac_regs)];
struct macstat_regs macstat;
/* unused section of mac stat address map */
u8 unused_mac_stat[4096 - sizeof(struct macstat_regs)];
@@ -1478,6 +1429,6 @@ typedef struct _ADDRESS_MAP_t {
u8 unused_exp_rom[4096]; /* MGS-size TBD */
u8 unused__[524288]; /* unused section of address map */
-} ADDRESS_MAP_t, *PADDRESS_MAP_t;
+};
#endif /* _ET1310_ADDRESS_MAP_H_ */
diff --git a/drivers/staging/et131x/et1310_mac.c b/drivers/staging/et131x/et1310_mac.c
index 78f72fa5d5e..656be4b99cf 100644
--- a/drivers/staging/et131x/et1310_mac.c
+++ b/drivers/staging/et131x/et1310_mac.c
@@ -104,9 +104,9 @@
*/
void ConfigMACRegs1(struct et131x_adapter *etdev)
{
- struct _MAC_t __iomem *pMac = &etdev->regs->mac;
- MAC_STATION_ADDR1_t station1;
- MAC_STATION_ADDR2_t station2;
+ struct mac_regs __iomem *pMac = &etdev->regs->mac;
+ u32 station1;
+ u32 station2;
u32 ipg;
/* First we need to reset everything. Write to MAC configuration
@@ -136,14 +136,14 @@ void ConfigMACRegs1(struct et131x_adapter *etdev)
* station address is used for generating and checking pause control
* packets.
*/
- station2.bits.Octet1 = etdev->addr[0];
- station2.bits.Octet2 = etdev->addr[1];
- station1.bits.Octet3 = etdev->addr[2];
- station1.bits.Octet4 = etdev->addr[3];
- station1.bits.Octet5 = etdev->addr[4];
- station1.bits.Octet6 = etdev->addr[5];
- writel(station1.value, &pMac->station_addr_1.value);
- writel(station2.value, &pMac->station_addr_2.value);
+ station2 = (etdev->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
+ (etdev->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
+ station1 = (etdev->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
+ (etdev->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
+ (etdev->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
+ etdev->addr[2];
+ writel(station1, &pMac->station_addr_1);
+ writel(station2, &pMac->station_addr_2);
/* Max ethernet packet in bytes that will passed by the mac without
* being truncated. Allow the MAC to pass 4 more than our max packet
@@ -165,7 +165,7 @@ void ConfigMACRegs1(struct et131x_adapter *etdev)
void ConfigMACRegs2(struct et131x_adapter *etdev)
{
int32_t delay = 0;
- struct _MAC_t __iomem *pMac = &etdev->regs->mac;
+ struct mac_regs __iomem *pMac = &etdev->regs->mac;
u32 cfg1;
u32 cfg2;
u32 ifctrl;
@@ -229,7 +229,7 @@ void ConfigMACRegs2(struct et131x_adapter *etdev)
writel(ctl, &etdev->regs->txmac.ctl);
/* Ready to start the RXDMA/TXDMA engine */
- if (etdev->Flags & fMP_ADAPTER_LOWER_POWER) {
+ if (etdev->flags & fMP_ADAPTER_LOWER_POWER) {
et131x_rx_dma_enable(etdev);
et131x_tx_dma_enable(etdev);
}
@@ -237,9 +237,9 @@ void ConfigMACRegs2(struct et131x_adapter *etdev)
void ConfigRxMacRegs(struct et131x_adapter *etdev)
{
- struct _RXMAC_t __iomem *pRxMac = &etdev->regs->rxmac;
- RXMAC_WOL_SA_LO_t sa_lo;
- RXMAC_WOL_SA_HI_t sa_hi;
+ struct rxmac_regs __iomem *pRxMac = &etdev->regs->rxmac;
+ u32 sa_lo;
+ u32 sa_hi = 0;
u32 pf_ctrl = 0;
/* Disable the MAC while it is being configured (also disable WOL) */
@@ -280,15 +280,15 @@ void ConfigRxMacRegs(struct et131x_adapter *etdev)
writel(0, &pRxMac->mask4_word3);
/* Lets setup the WOL Source Address */
- sa_lo.bits.sa3 = etdev->addr[2];
- sa_lo.bits.sa4 = etdev->addr[3];
- sa_lo.bits.sa5 = etdev->addr[4];
- sa_lo.bits.sa6 = etdev->addr[5];
- writel(sa_lo.value, &pRxMac->sa_lo.value);
+ sa_lo = (etdev->addr[2] << ET_WOL_LO_SA3_SHIFT) |
+ (etdev->addr[3] << ET_WOL_LO_SA4_SHIFT) |
+ (etdev->addr[4] << ET_WOL_LO_SA5_SHIFT) |
+ etdev->addr[5];
+ writel(sa_lo, &pRxMac->sa_lo);
- sa_hi.bits.sa1 = etdev->addr[0];
- sa_hi.bits.sa2 = etdev->addr[1];
- writel(sa_hi.value, &pRxMac->sa_hi.value);
+ sa_hi = (u32) (etdev->addr[0] << ET_WOL_HI_SA1_SHIFT) |
+ etdev->addr[1];
+ writel(sa_hi, &pRxMac->sa_hi);
/* Disable all Packet Filtering */
writel(0, &pRxMac->pf_ctrl);
@@ -298,9 +298,9 @@ void ConfigRxMacRegs(struct et131x_adapter *etdev)
SetupDeviceForUnicast(etdev);
pf_ctrl |= 4; /* Unicast filter */
} else {
- writel(0, &pRxMac->uni_pf_addr1.value);
- writel(0, &pRxMac->uni_pf_addr2.value);
- writel(0, &pRxMac->uni_pf_addr3.value);
+ writel(0, &pRxMac->uni_pf_addr1);
+ writel(0, &pRxMac->uni_pf_addr2);
+ writel(0, &pRxMac->uni_pf_addr3);
}
/* Let's initialize the Multicast hash */
@@ -384,31 +384,64 @@ void ConfigMacStatRegs(struct et131x_adapter *etdev)
struct macstat_regs __iomem *macstat =
&etdev->regs->macstat;
- /* Next we need to initialize all the MAC_STAT registers to zero on
+ /* Next we need to initialize all the macstat registers to zero on
* the device.
*/
- writel(0, &macstat->RFcs);
- writel(0, &macstat->RAln);
- writel(0, &macstat->RFlr);
- writel(0, &macstat->RDrp);
- writel(0, &macstat->RCde);
- writel(0, &macstat->ROvr);
- writel(0, &macstat->RFrg);
-
- writel(0, &macstat->TScl);
- writel(0, &macstat->TDfr);
- writel(0, &macstat->TMcl);
- writel(0, &macstat->TLcl);
- writel(0, &macstat->TNcl);
- writel(0, &macstat->TOvr);
- writel(0, &macstat->TUnd);
+ writel(0, &macstat->txrx_0_64_byte_frames);
+ writel(0, &macstat->txrx_65_127_byte_frames);
+ writel(0, &macstat->txrx_128_255_byte_frames);
+ writel(0, &macstat->txrx_256_511_byte_frames);
+ writel(0, &macstat->txrx_512_1023_byte_frames);
+ writel(0, &macstat->txrx_1024_1518_byte_frames);
+ writel(0, &macstat->txrx_1519_1522_gvln_frames);
+
+ writel(0, &macstat->rx_bytes);
+ writel(0, &macstat->rx_packets);
+ writel(0, &macstat->rx_fcs_errs);
+ writel(0, &macstat->rx_multicast_packets);
+ writel(0, &macstat->rx_broadcast_packets);
+ writel(0, &macstat->rx_control_frames);
+ writel(0, &macstat->rx_pause_frames);
+ writel(0, &macstat->rx_unknown_opcodes);
+ writel(0, &macstat->rx_align_errs);
+ writel(0, &macstat->rx_frame_len_errs);
+ writel(0, &macstat->rx_code_errs);
+ writel(0, &macstat->rx_carrier_sense_errs);
+ writel(0, &macstat->rx_undersize_packets);
+ writel(0, &macstat->rx_oversize_packets);
+ writel(0, &macstat->rx_fragment_packets);
+ writel(0, &macstat->rx_jabbers);
+ writel(0, &macstat->rx_drops);
+
+ writel(0, &macstat->tx_bytes);
+ writel(0, &macstat->tx_packets);
+ writel(0, &macstat->tx_multicast_packets);
+ writel(0, &macstat->tx_broadcast_packets);
+ writel(0, &macstat->tx_pause_frames);
+ writel(0, &macstat->tx_deferred);
+ writel(0, &macstat->tx_excessive_deferred);
+ writel(0, &macstat->tx_single_collisions);
+ writel(0, &macstat->tx_multiple_collisions);
+ writel(0, &macstat->tx_late_collisions);
+ writel(0, &macstat->tx_excessive_collisions);
+ writel(0, &macstat->tx_total_collisions);
+ writel(0, &macstat->tx_pause_honored_frames);
+ writel(0, &macstat->tx_drops);
+ writel(0, &macstat->tx_jabbers);
+ writel(0, &macstat->tx_fcs_errs);
+ writel(0, &macstat->tx_control_frames);
+ writel(0, &macstat->tx_oversize_frames);
+ writel(0, &macstat->tx_undersize_frames);
+ writel(0, &macstat->tx_fragments);
+ writel(0, &macstat->carry_reg1);
+ writel(0, &macstat->carry_reg2);
/* Unmask any counters that we want to track the overflow of.
* Initially this will be all counters. It may become clear later
* that we do not need to track all counters.
*/
- writel(0xFFFFBE32, &macstat->Carry1M);
- writel(0xFFFE7E8B, &macstat->Carry2M);
+ writel(0xFFFFBE32, &macstat->carry_reg1_mask);
+ writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
}
void ConfigFlowControl(struct et131x_adapter *etdev)
@@ -452,26 +485,26 @@ void ConfigFlowControl(struct et131x_adapter *etdev)
*/
void UpdateMacStatHostCounters(struct et131x_adapter *etdev)
{
- struct _ce_stats_t *stats = &etdev->Stats;
+ struct ce_stats *stats = &etdev->stats;
struct macstat_regs __iomem *macstat =
&etdev->regs->macstat;
- stats->collisions += readl(&macstat->TNcl);
- stats->first_collision += readl(&macstat->TScl);
- stats->tx_deferred += readl(&macstat->TDfr);
- stats->excessive_collisions += readl(&macstat->TMcl);
- stats->late_collisions += readl(&macstat->TLcl);
- stats->tx_uflo += readl(&macstat->TUnd);
- stats->max_pkt_error += readl(&macstat->TOvr);
-
- stats->alignment_err += readl(&macstat->RAln);
- stats->crc_err += readl(&macstat->RCde);
- stats->norcvbuf += readl(&macstat->RDrp);
- stats->rx_ov_flow += readl(&macstat->ROvr);
- stats->code_violations += readl(&macstat->RFcs);
- stats->length_err += readl(&macstat->RFlr);
-
- stats->other_errors += readl(&macstat->RFrg);
+ stats->collisions += readl(&macstat->tx_total_collisions);
+ stats->first_collision += readl(&macstat->tx_single_collisions);
+ stats->tx_deferred += readl(&macstat->tx_deferred);
+ stats->excessive_collisions += readl(&macstat->tx_multiple_collisions);
+ stats->late_collisions += readl(&macstat->tx_late_collisions);
+ stats->tx_uflo += readl(&macstat->tx_undersize_frames);
+ stats->max_pkt_error += readl(&macstat->tx_oversize_frames);
+
+ stats->alignment_err += readl(&macstat->rx_align_errs);
+ stats->crc_err += readl(&macstat->rx_code_errs);
+ stats->norcvbuf += readl(&macstat->rx_drops);
+ stats->rx_ov_flow += readl(&macstat->rx_oversize_packets);
+ stats->code_violations += readl(&macstat->rx_fcs_errs);
+ stats->length_err += readl(&macstat->rx_frame_len_errs);
+
+ stats->other_errors += readl(&macstat->rx_fragment_packets);
}
/**
@@ -484,17 +517,17 @@ void UpdateMacStatHostCounters(struct et131x_adapter *etdev)
*/
void HandleMacStatInterrupt(struct et131x_adapter *etdev)
{
- u32 Carry1;
- u32 Carry2;
+ u32 carry_reg1;
+ u32 carry_reg2;
/* Read the interrupt bits from the register(s). These are Clear On
* Write.
*/
- Carry1 = readl(&etdev->regs->macstat.Carry1);
- Carry2 = readl(&etdev->regs->macstat.Carry2);
+ carry_reg1 = readl(&etdev->regs->macstat.carry_reg1);
+ carry_reg2 = readl(&etdev->regs->macstat.carry_reg2);
- writel(Carry1, &etdev->regs->macstat.Carry1);
- writel(Carry2, &etdev->regs->macstat.Carry2);
+ writel(carry_reg2, &etdev->regs->macstat.carry_reg1);
+ writel(carry_reg2, &etdev->regs->macstat.carry_reg2);
/* We need to do update the host copy of all the MAC_STAT counters.
* For each counter, check it's overflow bit. If the overflow bit is
@@ -502,39 +535,39 @@ void HandleMacStatInterrupt(struct et131x_adapter *etdev)
* revolution of the counter. This routine is called when the counter
* block indicates that one of the counters has wrapped.
*/
- if (Carry1 & (1 << 14))
- etdev->Stats.code_violations += COUNTER_WRAP_16_BIT;
- if (Carry1 & (1 << 8))
- etdev->Stats.alignment_err += COUNTER_WRAP_12_BIT;
- if (Carry1 & (1 << 7))
- etdev->Stats.length_err += COUNTER_WRAP_16_BIT;
- if (Carry1 & (1 << 2))
- etdev->Stats.other_errors += COUNTER_WRAP_16_BIT;
- if (Carry1 & (1 << 6))
- etdev->Stats.crc_err += COUNTER_WRAP_16_BIT;
- if (Carry1 & (1 << 3))
- etdev->Stats.rx_ov_flow += COUNTER_WRAP_16_BIT;
- if (Carry1 & (1 << 0))
- etdev->Stats.norcvbuf += COUNTER_WRAP_16_BIT;
- if (Carry2 & (1 << 16))
- etdev->Stats.max_pkt_error += COUNTER_WRAP_12_BIT;
- if (Carry2 & (1 << 15))
- etdev->Stats.tx_uflo += COUNTER_WRAP_12_BIT;
- if (Carry2 & (1 << 6))
- etdev->Stats.first_collision += COUNTER_WRAP_12_BIT;
- if (Carry2 & (1 << 8))
- etdev->Stats.tx_deferred += COUNTER_WRAP_12_BIT;
- if (Carry2 & (1 << 5))
- etdev->Stats.excessive_collisions += COUNTER_WRAP_12_BIT;
- if (Carry2 & (1 << 4))
- etdev->Stats.late_collisions += COUNTER_WRAP_12_BIT;
- if (Carry2 & (1 << 2))
- etdev->Stats.collisions += COUNTER_WRAP_12_BIT;
+ if (carry_reg1 & (1 << 14))
+ etdev->stats.code_violations += COUNTER_WRAP_16_BIT;
+ if (carry_reg1 & (1 << 8))
+ etdev->stats.alignment_err += COUNTER_WRAP_12_BIT;
+ if (carry_reg1 & (1 << 7))
+ etdev->stats.length_err += COUNTER_WRAP_16_BIT;
+ if (carry_reg1 & (1 << 2))
+ etdev->stats.other_errors += COUNTER_WRAP_16_BIT;
+ if (carry_reg1 & (1 << 6))
+ etdev->stats.crc_err += COUNTER_WRAP_16_BIT;
+ if (carry_reg1 & (1 << 3))
+ etdev->stats.rx_ov_flow += COUNTER_WRAP_16_BIT;
+ if (carry_reg1 & (1 << 0))
+ etdev->stats.norcvbuf += COUNTER_WRAP_16_BIT;
+ if (carry_reg2 & (1 << 16))
+ etdev->stats.max_pkt_error += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 15))
+ etdev->stats.tx_uflo += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 6))
+ etdev->stats.first_collision += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 8))
+ etdev->stats.tx_deferred += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 5))
+ etdev->stats.excessive_collisions += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 4))
+ etdev->stats.late_collisions += COUNTER_WRAP_12_BIT;
+ if (carry_reg2 & (1 << 2))
+ etdev->stats.collisions += COUNTER_WRAP_12_BIT;
}
void SetupDeviceForMulticast(struct et131x_adapter *etdev)
{
- struct _RXMAC_t __iomem *rxmac = &etdev->regs->rxmac;
+ struct rxmac_regs __iomem *rxmac = &etdev->regs->rxmac;
uint32_t nIndex;
uint32_t result;
uint32_t hash1 = 0;
@@ -582,10 +615,10 @@ void SetupDeviceForMulticast(struct et131x_adapter *etdev)
void SetupDeviceForUnicast(struct et131x_adapter *etdev)
{
- struct _RXMAC_t __iomem *rxmac = &etdev->regs->rxmac;
- RXMAC_UNI_PF_ADDR1_t uni_pf1;
- RXMAC_UNI_PF_ADDR2_t uni_pf2;
- RXMAC_UNI_PF_ADDR3_t uni_pf3;
+ struct rxmac_regs __iomem *rxmac = &etdev->regs->rxmac;
+ u32 uni_pf1;
+ u32 uni_pf2;
+ u32 uni_pf3;
u32 pm_csr;
/* Set up unicast packet filter reg 3 to be the first two octets of
@@ -597,25 +630,25 @@ void SetupDeviceForUnicast(struct et131x_adapter *etdev)
* Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
* MAC address for first address
*/
- uni_pf3.bits.addr1_1 = etdev->addr[0];
- uni_pf3.bits.addr1_2 = etdev->addr[1];
- uni_pf3.bits.addr2_1 = etdev->addr[0];
- uni_pf3.bits.addr2_2 = etdev->addr[1];
+ uni_pf3 = (etdev->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
+ (etdev->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
+ (etdev->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
+ etdev->addr[1];
- uni_pf2.bits.addr2_3 = etdev->addr[2];
- uni_pf2.bits.addr2_4 = etdev->addr[3];
- uni_pf2.bits.addr2_5 = etdev->addr[4];
- uni_pf2.bits.addr2_6 = etdev->addr[5];
+ uni_pf2 = (etdev->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
+ (etdev->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
+ (etdev->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
+ etdev->addr[5];
- uni_pf1.bits.addr1_3 = etdev->addr[2];
- uni_pf1.bits.addr1_4 = etdev->addr[3];
- uni_pf1.bits.addr1_5 = etdev->addr[4];
- uni_pf1.bits.addr1_6 = etdev->addr[5];
+ uni_pf1 = (etdev->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
+ (etdev->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
+ (etdev->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
+ etdev->addr[5];
pm_csr = readl(&etdev->regs->global.pm_csr);
if ((pm_csr & ET_PM_PHY_SW_COMA) == 0) {
- writel(uni_pf1.value, &rxmac->uni_pf_addr1.value);
- writel(uni_pf2.value, &rxmac->uni_pf_addr2.value);
- writel(uni_pf3.value, &rxmac->uni_pf_addr3.value);
+ writel(uni_pf1, &rxmac->uni_pf_addr1);
+ writel(uni_pf2, &rxmac->uni_pf_addr2);
+ writel(uni_pf3, &rxmac->uni_pf_addr3);
}
}
diff --git a/drivers/staging/et131x/et1310_phy.c b/drivers/staging/et131x/et1310_phy.c
index 2798a2ff612..0bcb7fb6e2c 100644
--- a/drivers/staging/et131x/et1310_phy.c
+++ b/drivers/staging/et131x/et1310_phy.c
@@ -99,7 +99,7 @@ static void et131x_xcvr_init(struct et131x_adapter *etdev);
/**
* PhyMiRead - Read from the PHY through the MII Interface on the MAC
* @etdev: pointer to our private adapter structure
- * @xcvrAddr: the address of the transciever
+ * @xcvrAddr: the address of the transceiver
* @xcvrReg: the register to read
* @value: pointer to a 16-bit value in which the value will be stored
*
@@ -108,7 +108,7 @@ static void et131x_xcvr_init(struct et131x_adapter *etdev);
int PhyMiRead(struct et131x_adapter *etdev, u8 xcvrAddr,
u8 xcvrReg, u16 *value)
{
- struct _MAC_t __iomem *mac = &etdev->regs->mac;
+ struct mac_regs __iomem *mac = &etdev->regs->mac;
int status = 0;
u32 delay;
u32 miiAddr;
@@ -176,9 +176,9 @@ int PhyMiRead(struct et131x_adapter *etdev, u8 xcvrAddr,
*/
int MiWrite(struct et131x_adapter *etdev, u8 xcvrReg, u16 value)
{
- struct _MAC_t __iomem *mac = &etdev->regs->mac;
+ struct mac_regs __iomem *mac = &etdev->regs->mac;
int status = 0;
- u8 xcvrAddr = etdev->Stats.xcvr_addr;
+ u8 xcvrAddr = etdev->stats.xcvr_addr;
u32 delay;
u32 miiAddr;
u32 miiCmd;
@@ -259,8 +259,8 @@ int et131x_xcvr_find(struct et131x_adapter *etdev)
xcvr_id = (u32) ((idr1 << 16) | idr2);
if (idr1 != 0 && idr1 != 0xffff) {
- etdev->Stats.xcvr_id = xcvr_id;
- etdev->Stats.xcvr_addr = xcvr_addr;
+ etdev->stats.xcvr_id = xcvr_id;
+ etdev->stats.xcvr_addr = xcvr_addr;
return 0;
}
}
@@ -582,7 +582,7 @@ static void et131x_xcvr_init(struct et131x_adapter *etdev)
u16 lcr2;
/* Zero out the adapter structure variable representing BMSR */
- etdev->Bmsr.value = 0;
+ etdev->bmsr = 0;
MiRead(etdev, (u8) offsetof(struct mi_regs, isr), &isr);
MiRead(etdev, (u8) offsetof(struct mi_regs, imr), &imr);
@@ -590,7 +590,7 @@ static void et131x_xcvr_init(struct et131x_adapter *etdev)
/* Set the link status interrupt only. Bad behavior when link status
* and auto neg are set, we run into a nested interrupt problem
*/
- imr |= 0x0105;
+ imr |= 0x0105;
MiWrite(etdev, (u8) offsetof(struct mi_regs, imr), imr);
@@ -729,7 +729,7 @@ static void et131x_xcvr_init(struct et131x_adapter *etdev)
}
void et131x_Mii_check(struct et131x_adapter *etdev,
- MI_BMSR_t bmsr, MI_BMSR_t bmsr_ints)
+ u16 bmsr, u16 bmsr_ints)
{
u8 link_status;
u32 autoneg_status;
@@ -740,8 +740,8 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
u32 polarity;
unsigned long flags;
- if (bmsr_ints.bits.link_status) {
- if (bmsr.bits.link_status) {
+ if (bmsr_ints & MI_BMSR_LINK_STATUS) {
+ if (bmsr & MI_BMSR_LINK_STATUS) {
etdev->boot_coma = 20;
/* Update our state variables and indicate the
@@ -750,7 +750,6 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
spin_lock_irqsave(&etdev->Lock, flags);
etdev->MediaState = NETIF_STATUS_MEDIA_CONNECT;
- etdev->Flags &= ~fMP_ADAPTER_LINK_DETECTION;
spin_unlock_irqrestore(&etdev->Lock, flags);
@@ -780,8 +779,7 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
* Timer expires, we can report disconnected (handled
* in the LinkDetectionDPC).
*/
- if (!(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
- (etdev->MediaState == NETIF_STATUS_MEDIA_DISCONNECT)) {
+ if ((etdev->MediaState == NETIF_STATUS_MEDIA_DISCONNECT)) {
spin_lock_irqsave(&etdev->Lock, flags);
etdev->MediaState =
NETIF_STATUS_MEDIA_DISCONNECT;
@@ -822,9 +820,10 @@ void et131x_Mii_check(struct et131x_adapter *etdev,
}
}
- if (bmsr_ints.bits.auto_neg_complete ||
- (etdev->AiForceDpx == 3 && bmsr_ints.bits.link_status)) {
- if (bmsr.bits.auto_neg_complete || etdev->AiForceDpx == 3) {
+ if ((bmsr_ints & MI_BMSR_AUTO_NEG_COMPLETE) ||
+ (etdev->AiForceDpx == 3 && (bmsr_ints & MI_BMSR_LINK_STATUS))) {
+ if ((bmsr & MI_BMSR_AUTO_NEG_COMPLETE) ||
+ etdev->AiForceDpx == 3) {
ET1310_PhyLinkStatus(etdev,
&link_status, &autoneg_status,
&speed, &duplex, &mdi_mdix,
diff --git a/drivers/staging/et131x/et1310_phy.h b/drivers/staging/et131x/et1310_phy.h
index 946c0c54740..6b38a3e0cab 100644
--- a/drivers/staging/et131x/et1310_phy.h
+++ b/drivers/staging/et131x/et1310_phy.h
@@ -126,119 +126,66 @@ struct mi_regs {
u8 mi_res4[3]; /* Future use by MI working group(Reg 0x1D - 0x1F) */
};
-/* MI Register 0: Basic mode control register */
-typedef union _MI_BMCR_t {
- u16 value;
- struct {
-#ifdef _BIT_FIELDS_HTOL
- u16 reset:1; /* bit 15 */
- u16 loopback:1; /* bit 14 */
- u16 speed_sel:1; /* bit 13 */
- u16 enable_autoneg:1; /* bit 12 */
- u16 power_down:1; /* bit 11 */
- u16 isolate:1; /* bit 10 */
- u16 restart_autoneg:1; /* bit 9 */
- u16 duplex_mode:1; /* bit 8 */
- u16 col_test:1; /* bit 7 */
- u16 speed_1000_sel:1; /* bit 6 */
- u16 res1:6; /* bits 0-5 */
-#else
- u16 res1:6; /* bits 0-5 */
- u16 speed_1000_sel:1; /* bit 6 */
- u16 col_test:1; /* bit 7 */
- u16 duplex_mode:1; /* bit 8 */
- u16 restart_autoneg:1; /* bit 9 */
- u16 isolate:1; /* bit 10 */
- u16 power_down:1; /* bit 11 */
- u16 enable_autoneg:1; /* bit 12 */
- u16 speed_sel:1; /* bit 13 */
- u16 loopback:1; /* bit 14 */
- u16 reset:1; /* bit 15 */
-#endif
- } bits;
-} MI_BMCR_t, *PMI_BMCR_t;
-
-/* MI Register 1: Basic mode status register */
-typedef union _MI_BMSR_t {
- u16 value;
- struct {
-#ifdef _BIT_FIELDS_HTOL
- u16 link_100T4:1; /* bit 15 */
- u16 link_100fdx:1; /* bit 14 */
- u16 link_100hdx:1; /* bit 13 */
- u16 link_10fdx:1; /* bit 12 */
- u16 link_10hdx:1; /* bit 11 */
- u16 link_100T2fdx:1; /* bit 10 */
- u16 link_100T2hdx:1; /* bit 9 */
- u16 extend_status:1; /* bit 8 */
- u16 res1:1; /* bit 7 */
- u16 preamble_supress:1; /* bit 6 */
- u16 auto_neg_complete:1; /* bit 5 */
- u16 remote_fault:1; /* bit 4 */
- u16 auto_neg_able:1; /* bit 3 */
- u16 link_status:1; /* bit 2 */
- u16 jabber_detect:1; /* bit 1 */
- u16 ext_cap:1; /* bit 0 */
-#else
- u16 ext_cap:1; /* bit 0 */
- u16 jabber_detect:1; /* bit 1 */
- u16 link_status:1; /* bit 2 */
- u16 auto_neg_able:1; /* bit 3 */
- u16 remote_fault:1; /* bit 4 */
- u16 auto_neg_complete:1; /* bit 5 */
- u16 preamble_supress:1; /* bit 6 */
- u16 res1:1; /* bit 7 */
- u16 extend_status:1; /* bit 8 */
- u16 link_100T2hdx:1; /* bit 9 */
- u16 link_100T2fdx:1; /* bit 10 */
- u16 link_10hdx:1; /* bit 11 */
- u16 link_10fdx:1; /* bit 12 */
- u16 link_100hdx:1; /* bit 13 */
- u16 link_100fdx:1; /* bit 14 */
- u16 link_100T4:1; /* bit 15 */
-#endif
- } bits;
-} MI_BMSR_t, *PMI_BMSR_t;
-
-/* MI Register 4: Auto-negotiation advertisement register */
-typedef union _MI_ANAR_t {
- u16 value;
- struct {
-#ifdef _BIT_FIELDS_HTOL
- u16 np_indication:1; /* bit 15 */
- u16 res2:1; /* bit 14 */
- u16 remote_fault:1; /* bit 13 */
- u16 res1:1; /* bit 12 */
- u16 cap_asmpause:1; /* bit 11 */
- u16 cap_pause:1; /* bit 10 */
- u16 cap_100T4:1; /* bit 9 */
- u16 cap_100fdx:1; /* bit 8 */
- u16 cap_100hdx:1; /* bit 7 */
- u16 cap_10fdx:1; /* bit 6 */
- u16 cap_10hdx:1; /* bit 5 */
- u16 selector:5; /* bits 0-4 */
-#else
- u16 selector:5; /* bits 0-4 */
- u16 cap_10hdx:1; /* bit 5 */
- u16 cap_10fdx:1; /* bit 6 */
- u16 cap_100hdx:1; /* bit 7 */
- u16 cap_100fdx:1; /* bit 8 */
- u16 cap_100T4:1; /* bit 9 */
- u16 cap_pause:1; /* bit 10 */
- u16 cap_asmpause:1; /* bit 11 */
- u16 res1:1; /* bit 12 */
- u16 remote_fault:1; /* bit 13 */
- u16 res2:1; /* bit 14 */
- u16 np_indication:1; /* bit 15 */
-#endif
- } bits;
-} MI_ANAR_t, *PMI_ANAR_t;
+/*
+ * MI Register 0: Basic mode control register
+ * 15: reset
+ * 14: loopback
+ * 13: speed_sel
+ * 12: enable_autoneg
+ * 11: power_down
+ * 10: isolate
+ * 9: restart_autoneg
+ * 8: duplex_mode
+ * 7: col_test
+ * 6: speed_1000_sel
+ * 5-0: res1
+ */
+
+/*
+ * MI Register 1: Basic mode status register
+ * 15: link_100T4
+ * 14: link_100fdx
+ * 13: link_100hdx
+ * 12: link_10fdx
+ * 11: link_10hdx
+ * 10: link_100T2fdx
+ * 9: link_100T2hdx
+ * 8: extend_status
+ * 7: res1
+ * 6: preamble_supress
+ * 5: auto_neg_complete
+ * 4: remote_fault
+ * 3: auto_neg_able
+ * 2: link_status
+ * 1: jabber_detect
+ * 0: ext_cap
+ */
+
+#define MI_BMSR_LINK_STATUS 0x04
+#define MI_BMSR_AUTO_NEG_COMPLETE 0x20
+
+/*
+ * MI Register 4: Auto-negotiation advertisement register
+ *
+ * 15: np_indication
+ * 14: res2
+ * 13: remote_fault
+ * 12: res1
+ * 11: cap_asmpause
+ * 10: cap_pause
+ * 9: cap_100T4
+ * 8: cap_100fdx
+ * 7: cap_100hdx
+ * 6: cap_10fdx
+ * 5: cap_10hdx
+ * 4-0: selector
+ */
/* MI Register 5: Auto-negotiation link partner advertisement register
* 15: np_indication
* 14: acknowledge
* 13: remote_fault
- * 12: res1:1;
+ * 12: res1
* 11: cap_asmpause
* 10: cap_pause
* 9: cap_100T4
@@ -258,7 +205,7 @@ typedef union _MI_ANAR_t {
* 0: lp_an_able
*/
-/* MI Register 7: Auto-negotiation next page transmit reg(0x07)
+/* MI Register 7: Auto-negotiation next page transmit reg(0x07)
* 15: np
* 14: reserved
* 13: msg_page
@@ -267,7 +214,7 @@ typedef union _MI_ANAR_t {
* 10-0 msg
*/
-/* MI Register 8: Link Partner Next Page Reg(0x08)
+/* MI Register 8: Link Partner Next Page Reg(0x08)
* 15: np
* 14: ack
* 13: msg_page
@@ -473,7 +420,7 @@ typedef union _MI_ANAR_t {
#define TRUEPHY_ADV_DUPLEX_FULL 0x01
#define TRUEPHY_ADV_DUPLEX_HALF 0x02
#define TRUEPHY_ADV_DUPLEX_BOTH \
- (TRUEPHY_ADV_DUPLEX_FULL | TRUEPHY_ADV_DUPLEX_HALF)
+ (TRUEPHY_ADV_DUPLEX_FULL | TRUEPHY_ADV_DUPLEX_HALF)
#define PHY_CONTROL 0x00 /* #define TRU_MI_CONTROL_REGISTER 0 */
#define PHY_STATUS 0x01 /* #define TRU_MI_STATUS_REGISTER 1 */
diff --git a/drivers/staging/et131x/et1310_pm.c b/drivers/staging/et131x/et1310_pm.c
index 2bc19448d2e..29d4d66d345 100644
--- a/drivers/staging/et131x/et1310_pm.c
+++ b/drivers/staging/et131x/et1310_pm.c
@@ -121,7 +121,7 @@ void EnablePhyComa(struct et131x_adapter *etdev)
/* Stop sending packets. */
spin_lock_irqsave(&etdev->send_hw_lock, flags);
- etdev->Flags |= fMP_ADAPTER_LOWER_POWER;
+ etdev->flags |= fMP_ADAPTER_LOWER_POWER;
spin_unlock_irqrestore(&etdev->send_hw_lock, flags);
/* Wait for outstanding Receive packets */
@@ -172,7 +172,7 @@ void DisablePhyComa(struct et131x_adapter *etdev)
et131x_adapter_setup(etdev);
/* Allow Tx to restart */
- etdev->Flags &= ~fMP_ADAPTER_LOWER_POWER;
+ etdev->flags &= ~fMP_ADAPTER_LOWER_POWER;
/* Need to re-enable Rx. */
et131x_rx_dma_enable(etdev);
diff --git a/drivers/staging/et131x/et1310_rx.c b/drivers/staging/et131x/et1310_rx.c
index fc6bd438366..7e386e07ff9 100644
--- a/drivers/staging/et131x/et1310_rx.c
+++ b/drivers/staging/et131x/et1310_rx.c
@@ -88,7 +88,23 @@
#include "et1310_rx.h"
#include "et131x.h"
-void nic_return_rfd(struct et131x_adapter *etdev, struct rfd *rfd);
+static inline u32 bump_fbr(u32 *fbr, u32 limit)
+{
+ u32 v = *fbr;
+ v++;
+ /* This works for all cases where limit < 1024. The 1023 case
+ works because 1023++ is 1024 which means the if condition is not
+ taken but the carry of the bit into the wrap bit toggles the wrap
+ value correctly */
+ if ((v & ET_DMA10_MASK) > limit) {
+ v &= ~ET_DMA10_MASK;
+ v ^= ET_DMA10_WRAP;
+ }
+ /* For the 1023 case */
+ v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
+ *fbr = v;
+ return v;
+}
/**
* et131x_rx_dma_memory_alloc
@@ -246,7 +262,7 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
&rx_ring->Fbr1MemPa[i]);
if (!rx_ring->Fbr1MemVa[i]) {
- dev_err(&adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"Could not alloc memory\n");
return -ENOMEM;
}
@@ -378,7 +394,7 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
SLAB_HWCACHE_ALIGN,
NULL);
- adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE;
+ adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
/* The RFDs are going to be put on lists later on, so initialize the
* lists now.
@@ -491,7 +507,7 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
/* Free Packet Status Ring */
if (rx_ring->pPSRingVa) {
pktStatRingSize =
- sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
+ sizeof(struct pkt_stat_desc) * adapter->rx_ring.PsrNumEntries;
pci_free_consistent(adapter->pdev, pktStatRingSize,
rx_ring->pPSRingVa, rx_ring->pPSRingPa);
@@ -512,9 +528,9 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
/* Free receive packet pool */
/* Destroy the lookaside (RFD) pool */
- if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) {
+ if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
kmem_cache_destroy(rx_ring->RecvLookaside);
- adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
+ adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
}
/* Free the FBR Lookup Table */
@@ -708,6 +724,82 @@ void SetRxDmaTimer(struct et131x_adapter *etdev)
}
/**
+ * NICReturnRFD - Recycle a RFD and put it back onto the receive list
+ * @etdev: pointer to our adapter
+ * @rfd: pointer to the RFD
+ */
+void nic_return_rfd(struct et131x_adapter *etdev, struct rfd *rfd)
+{
+ struct rx_ring *rx_local = &etdev->rx_ring;
+ struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
+ u16 bi = rfd->bufferindex;
+ u8 ri = rfd->ringindex;
+ unsigned long flags;
+
+ /* We don't use any of the OOB data besides status. Otherwise, we
+ * need to clean up OOB data
+ */
+ if (
+#ifdef USE_FBR0
+ (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
+#endif
+ (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
+ spin_lock_irqsave(&etdev->FbrLock, flags);
+
+ if (ri == 1) {
+ struct fbr_desc *next =
+ (struct fbr_desc *) (rx_local->pFbr1RingVa) +
+ INDEX10(rx_local->local_Fbr1_full);
+
+ /* Handle the Free Buffer Ring advancement here. Write
+ * the PA / Buffer Index for the returned buffer into
+ * the oldest (next to be freed)FBR entry
+ */
+ next->addr_hi = rx_local->fbr[1]->bus_high[bi];
+ next->addr_lo = rx_local->fbr[1]->bus_low[bi];
+ next->word2 = bi;
+
+ writel(bump_fbr(&rx_local->local_Fbr1_full,
+ rx_local->Fbr1NumEntries - 1),
+ &rx_dma->fbr1_full_offset);
+ }
+#ifdef USE_FBR0
+ else {
+ struct fbr_desc *next = (struct fbr_desc *)
+ rx_local->pFbr0RingVa +
+ INDEX10(rx_local->local_Fbr0_full);
+
+ /* Handle the Free Buffer Ring advancement here. Write
+ * the PA / Buffer Index for the returned buffer into
+ * the oldest (next to be freed) FBR entry
+ */
+ next->addr_hi = rx_local->fbr[0]->bus_high[bi];
+ next->addr_lo = rx_local->fbr[0]->bus_low[bi];
+ next->word2 = bi;
+
+ writel(bump_fbr(&rx_local->local_Fbr0_full,
+ rx_local->Fbr0NumEntries - 1),
+ &rx_dma->fbr0_full_offset);
+ }
+#endif
+ spin_unlock_irqrestore(&etdev->FbrLock, flags);
+ } else {
+ dev_err(&etdev->pdev->dev,
+ "NICReturnRFD illegal Buffer Index returned\n");
+ }
+
+ /* The processing on this RFD is done, so put it back on the tail of
+ * our list
+ */
+ spin_lock_irqsave(&etdev->rcv_lock, flags);
+ list_add_tail(&rfd->list_node, &rx_local->RecvList);
+ rx_local->nReadyRecv++;
+ spin_unlock_irqrestore(&etdev->rcv_lock, flags);
+
+ WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
+}
+
+/**
* et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
* @etdev: pointer to our adapter structure
*/
@@ -776,7 +868,7 @@ void et131x_rx_dma_enable(struct et131x_adapter *etdev)
* the packet to it, puts the RFD in the RecvPendList, and also returns
* the pointer to the RFD.
*/
-struct rfd * nic_rx_pkts(struct et131x_adapter *etdev)
+struct rfd *nic_rx_pkts(struct et131x_adapter *etdev)
{
struct rx_ring *rx_local = &etdev->rx_ring;
struct rx_status_block *status;
@@ -879,7 +971,7 @@ struct rfd * nic_rx_pkts(struct et131x_adapter *etdev)
* also counted here.
*/
if (len < (NIC_MIN_PACKET_SIZE + 4)) {
- etdev->Stats.other_errors++;
+ etdev->stats.other_errors++;
len = 0;
}
@@ -947,16 +1039,16 @@ struct rfd * nic_rx_pkts(struct et131x_adapter *etdev)
}
if (len > 0)
- etdev->Stats.multircv++;
+ etdev->stats.multircv++;
} else if (word0 & ALCATEL_BROADCAST_PKT)
- etdev->Stats.brdcstrcv++;
+ etdev->stats.brdcstrcv++;
else
/* Not sure what this counter measures in
* promiscuous mode. Perhaps we should check
* the MAC address to see if it is directed
* to us in promiscuous mode.
*/
- etdev->Stats.unircv++;
+ etdev->stats.unircv++;
}
if (len > 0) {
@@ -1034,13 +1126,12 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
* Free buffer ring.
*/
if (!etdev->PacketFilter ||
- !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
- rfd->len == 0) {
+ !netif_carrier_ok(etdev->netdev) ||
+ rfd->len == 0)
continue;
- }
/* Increment the number of packets we received */
- etdev->Stats.ipackets++;
+ etdev->net_stats.rx_packets++;
/* Set the status on the packet, either resources or success */
if (etdev->rx_ring.nReadyRecv < RFD_LOW_WATER_MARK) {
@@ -1059,96 +1150,3 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
etdev->rx_ring.UnfinishedReceives = false;
}
-static inline u32 bump_fbr(u32 *fbr, u32 limit)
-{
- u32 v = *fbr;
- v++;
- /* This works for all cases where limit < 1024. The 1023 case
- works because 1023++ is 1024 which means the if condition is not
- taken but the carry of the bit into the wrap bit toggles the wrap
- value correctly */
- if ((v & ET_DMA10_MASK) > limit) {
- v &= ~ET_DMA10_MASK;
- v ^= ET_DMA10_WRAP;
- }
- /* For the 1023 case */
- v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
- *fbr = v;
- return v;
-}
-
-/**
- * NICReturnRFD - Recycle a RFD and put it back onto the receive list
- * @etdev: pointer to our adapter
- * @rfd: pointer to the RFD
- */
-void nic_return_rfd(struct et131x_adapter *etdev, struct rfd *rfd)
-{
- struct rx_ring *rx_local = &etdev->rx_ring;
- struct rxdma_regs __iomem *rx_dma = &etdev->regs->rxdma;
- u16 bi = rfd->bufferindex;
- u8 ri = rfd->ringindex;
- unsigned long flags;
-
- /* We don't use any of the OOB data besides status. Otherwise, we
- * need to clean up OOB data
- */
- if (
-#ifdef USE_FBR0
- (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
-#endif
- (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
- spin_lock_irqsave(&etdev->FbrLock, flags);
-
- if (ri == 1) {
- struct fbr_desc *next =
- (struct fbr_desc *) (rx_local->pFbr1RingVa) +
- INDEX10(rx_local->local_Fbr1_full);
-
- /* Handle the Free Buffer Ring advancement here. Write
- * the PA / Buffer Index for the returned buffer into
- * the oldest (next to be freed)FBR entry
- */
- next->addr_hi = rx_local->fbr[1]->bus_high[bi];
- next->addr_lo = rx_local->fbr[1]->bus_low[bi];
- next->word2 = bi;
-
- writel(bump_fbr(&rx_local->local_Fbr1_full,
- rx_local->Fbr1NumEntries - 1),
- &rx_dma->fbr1_full_offset);
- }
-#ifdef USE_FBR0
- else {
- struct fbr_desc *next = (struct fbr_desc *)
- rx_local->pFbr0RingVa +
- INDEX10(rx_local->local_Fbr0_full);
-
- /* Handle the Free Buffer Ring advancement here. Write
- * the PA / Buffer Index for the returned buffer into
- * the oldest (next to be freed) FBR entry
- */
- next->addr_hi = rx_local->fbr[0]->bus_high[bi];
- next->addr_lo = rx_local->fbr[0]->bus_low[bi];
- next->word2 = bi;
-
- writel(bump_fbr(&rx_local->local_Fbr0_full,
- rx_local->Fbr0NumEntries - 1),
- &rx_dma->fbr0_full_offset);
- }
-#endif
- spin_unlock_irqrestore(&etdev->FbrLock, flags);
- } else {
- dev_err(&etdev->pdev->dev,
- "NICReturnRFD illegal Buffer Index returned\n");
- }
-
- /* The processing on this RFD is done, so put it back on the tail of
- * our list
- */
- spin_lock_irqsave(&etdev->rcv_lock, flags);
- list_add_tail(&rfd->list_node, &rx_local->RecvList);
- rx_local->nReadyRecv++;
- spin_unlock_irqrestore(&etdev->rcv_lock, flags);
-
- WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
-}
diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
index 4241d2afecc..8fb3051fe28 100644
--- a/drivers/staging/et131x/et1310_tx.c
+++ b/drivers/staging/et131x/et1310_tx.c
@@ -307,7 +307,7 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
/* We need to see if the link is up; if it's not, make the
* netif layer think we're good and drop the packet
*/
- if ((etdev->Flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
+ if ((etdev->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
!netif_carrier_ok(netdev)) {
dev_kfree_skb_any(skb);
skb = NULL;
@@ -635,11 +635,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
struct net_device_stats *stats = &etdev->net_stats;
if (tcb->flags & fMP_DEST_BROAD)
- atomic_inc(&etdev->Stats.brdcstxmt);
+ atomic_inc(&etdev->stats.brdcstxmt);
else if (tcb->flags & fMP_DEST_MULTI)
- atomic_inc(&etdev->Stats.multixmt);
+ atomic_inc(&etdev->stats.multixmt);
else
- atomic_inc(&etdev->Stats.unixmt);
+ atomic_inc(&etdev->stats.unixmt);
if (tcb->skb) {
stats->tx_bytes += tcb->skb->len;
@@ -673,7 +673,7 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
/* Add the TCB to the Ready Q */
spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
- etdev->Stats.opackets++;
+ etdev->net_stats.tx_packets++;
if (etdev->tx_ring.tcb_qtail)
etdev->tx_ring.tcb_qtail->next = tcb;
@@ -747,7 +747,7 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
struct tcb *tcb;
u32 index;
- serviced = readl(&etdev->regs->txdma.NewServiceComplete);
+ serviced = readl(&etdev->regs->txdma.new_service_complete);
index = INDEX10(serviced);
/* Has the ring wrapped? Process any descriptors that do not have
diff --git a/drivers/staging/et131x/et131x.h b/drivers/staging/et131x/et131x.h
index 8aa3365b83c..48ebac0e55c 100644
--- a/drivers/staging/et131x/et131x.h
+++ b/drivers/staging/et131x/et131x.h
@@ -108,12 +108,12 @@ void et131x_setphy_normal(struct et131x_adapter *adapter);
int PhyMiRead(struct et131x_adapter *adapter, u8 xcvrAddr,
u8 xcvrReg, u16 *value);
#define MiRead(adapter, xcvrReg, value) \
- PhyMiRead((adapter), (adapter)->Stats.xcvr_addr, (xcvrReg), (value))
+ PhyMiRead((adapter), (adapter)->stats.xcvr_addr, (xcvrReg), (value))
int32_t MiWrite(struct et131x_adapter *adapter,
u8 xcvReg, u16 value);
void et131x_Mii_check(struct et131x_adapter *pAdapter,
- MI_BMSR_t bmsr, MI_BMSR_t bmsr_ints);
+ u16 bmsr, u16 bmsr_ints);
/* This last is not strictly required (the driver could call the TPAL
* version instead), but this sets the adapter up correctly, and calls the
diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
index c852f867645..408c50ba4f2 100644
--- a/drivers/staging/et131x/et131x_adapter.h
+++ b/drivers/staging/et131x/et131x_adapter.h
@@ -67,7 +67,7 @@
* Do not change these values: if changed, then change also in respective
* TXdma and Rxdma engines
*/
-#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
+#define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
#define NUM_TCB 64
/*
@@ -98,11 +98,7 @@ struct rfd {
#define FLOW_NONE 3
/* Struct to define some device statistics */
-typedef struct _ce_stats_t {
- /* Link Input/Output stats */
- uint64_t ipackets; /* # of in packets */
- uint64_t opackets; /* # of out packets */
-
+struct ce_stats {
/* MIB II variables
*
* NOTE: atomic_t types are only guaranteed to store 24-bits; if we
@@ -118,7 +114,7 @@ typedef struct _ce_stats_t {
u32 norcvbuf; /* # Rx packets discarded */
u32 noxmtbuf; /* # Tx packets discarded */
- /* Transciever state informations. */
+ /* Transceiver state informations. */
u8 xcvr_addr;
u32 xcvr_id;
@@ -143,7 +139,7 @@ typedef struct _ce_stats_t {
u32 SynchrounousIterations;
u32 InterruptStatus;
-} CE_STATS_t, *PCE_STATS_t;
+};
/* The private adapter structure */
@@ -154,7 +150,7 @@ struct et131x_adapter {
struct work_struct task;
/* Flags that indicate current state of the adapter */
- u32 Flags;
+ u32 flags;
u32 HwErrCount;
/* Configuration */
@@ -186,7 +182,7 @@ struct et131x_adapter {
u8 MCList[NIC_MAX_MCAST_LIST][ETH_ALEN];
/* Pointer to the device's PCI register space */
- ADDRESS_MAP_t __iomem *regs;
+ struct address_map __iomem *regs;
/* Registry parameters */
u8 SpeedDuplex; /* speed/duplex */
@@ -226,7 +222,7 @@ struct et131x_adapter {
u32 CachedMaskValue;
/* Xcvr status at last poll */
- MI_BMSR_t Bmsr;
+ u16 bmsr;
/* Tx Memory Variables */
struct tx_ring tx_ring;
@@ -239,10 +235,9 @@ struct et131x_adapter {
u8 ReplicaPhyLoopbkPF; /* Replica Enable Pass/Fail */
/* Stats */
- CE_STATS_t Stats;
+ struct ce_stats stats;
struct net_device_stats net_stats;
- struct net_device_stats net_stats_prev;
};
#endif /* __ET131X_ADAPTER_H__ */
diff --git a/drivers/staging/et131x/et131x_defs.h b/drivers/staging/et131x/et131x_defs.h
index d81fc77a501..3d5193fdb00 100644
--- a/drivers/staging/et131x/et131x_defs.h
+++ b/drivers/staging/et131x/et131x_defs.h
@@ -95,7 +95,6 @@
#define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
#define fMP_ADAPTER_REMOVE_IN_PROGRESS 0x08000000
#define fMP_ADAPTER_HALT_IN_PROGRESS 0x10000000
-#define fMP_ADAPTER_LINK_DETECTION 0x20000000
#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
#define fMP_ADAPTER_NOT_READY_MASK 0x3ff00000
diff --git a/drivers/staging/et131x/et131x_initpci.c b/drivers/staging/et131x/et131x_initpci.c
index 50237acd698..8c8d6b87a25 100644
--- a/drivers/staging/et131x/et131x_initpci.c
+++ b/drivers/staging/et131x/et131x_initpci.c
@@ -113,13 +113,13 @@
static u32 et131x_speed_set;
module_param(et131x_speed_set, uint, 0);
MODULE_PARM_DESC(et131x_speed_set,
- "Set Link speed and dublex manually (0-5) [0]\n \
- 1 : 10Mb Half-Duplex\n \
- 2 : 10Mb Full-Duplex\n \
- 3 : 100Mb Half-Duplex\n \
- 4 : 100Mb Full-Duplex\n \
- 5 : 1000Mb Full-Duplex\n \
- 0 : Auto Speed Auto Dublex");
+ "Set Link speed and dublex manually (0-5) [0]\n"
+ "1 : 10Mb Half-Duplex\n"
+ "2 : 10Mb Full-Duplex\n"
+ "3 : 100Mb Half-Duplex\n"
+ "4 : 100Mb Full-Duplex\n"
+ "5 : 1000Mb Full-Duplex\n"
+ "0 : Auto Speed Auto Dublex");
/**
* et131x_hwaddr_init - set up the MAC Address on the ET1310
@@ -274,14 +274,14 @@ void et131x_error_timer_handler(unsigned long data)
dev_err(&etdev->pdev->dev,
"No interrupts, in PHY coma, pm_csr = 0x%x\n", pm_csr);
- if (!etdev->Bmsr.bits.link_status &&
+ if (!(etdev->bmsr & MI_BMSR_LINK_STATUS) &&
etdev->RegistryPhyComa &&
etdev->boot_coma < 11) {
etdev->boot_coma++;
}
if (etdev->boot_coma == 10) {
- if (!etdev->Bmsr.bits.link_status
+ if (!(etdev->bmsr & MI_BMSR_LINK_STATUS)
&& etdev->RegistryPhyComa) {
if ((pm_csr & ET_PM_PHY_SW_COMA) == 0) {
/* NOTE - This was originally a 'sync with
@@ -312,7 +312,6 @@ void et131x_link_detection_handler(unsigned long data)
spin_lock_irqsave(&etdev->Lock, flags);
etdev->MediaState = NETIF_STATUS_MEDIA_DISCONNECT;
- etdev->Flags &= ~fMP_ADAPTER_LINK_DETECTION;
spin_unlock_irqrestore(&etdev->Lock, flags);
@@ -539,7 +538,8 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
struct et131x_adapter *etdev;
- /* Setup the fundamental net_device and private adapter structure elements */
+ /* Setup the fundamental net_device and private adapter structure
+ * elements */
SET_NETDEV_DEV(netdev, &pdev->dev);
/* Allocate private adapter struct and copy in relevant information */
@@ -807,12 +807,12 @@ static struct pci_device_id et131x_pci_table[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, et131x_pci_table);
static struct pci_driver et131x_driver = {
- .name = DRIVER_NAME,
- .id_table = et131x_pci_table,
- .probe = et131x_pci_setup,
- .remove = __devexit_p(et131x_pci_remove),
- .suspend = NULL, /* et131x_pci_suspend */
- .resume = NULL, /* et131x_pci_resume */
+ .name = DRIVER_NAME,
+ .id_table = et131x_pci_table,
+ .probe = et131x_pci_setup,
+ .remove = __devexit_p(et131x_pci_remove),
+ .suspend = NULL, /* et131x_pci_suspend */
+ .resume = NULL, /* et131x_pci_resume */
};
diff --git a/drivers/staging/et131x/et131x_isr.c b/drivers/staging/et131x/et131x_isr.c
index f716e408712..9c33209c840 100644
--- a/drivers/staging/et131x/et131x_isr.c
+++ b/drivers/staging/et131x/et131x_isr.c
@@ -222,7 +222,7 @@ irqreturn_t et131x_isr(int irq, void *dev_id)
* DPC. We will clear the software copy of that in that
* routine.
*/
- adapter->Stats.InterruptStatus = status;
+ adapter->stats.InterruptStatus = status;
/* Schedule the ISR handler as a bottom-half task in the
* kernel's tq_immediate queue, and mark the queue for
@@ -244,8 +244,8 @@ void et131x_isr_handler(struct work_struct *work)
{
struct et131x_adapter *etdev =
container_of(work, struct et131x_adapter, task);
- u32 status = etdev->Stats.InterruptStatus;
- ADDRESS_MAP_t __iomem *iomem = etdev->regs;
+ u32 status = etdev->stats.InterruptStatus;
+ struct address_map __iomem *iomem = etdev->regs;
/*
* These first two are by far the most common. Once handled, we clear
@@ -268,7 +268,7 @@ void et131x_isr_handler(struct work_struct *work)
u32 txdma_err;
/* Following read also clears the register (COR) */
- txdma_err = readl(&iomem->txdma.TxDmaError);
+ txdma_err = readl(&iomem->txdma.tx_dma_error);
dev_warn(&etdev->pdev->dev,
"TXDMA_ERR interrupt, error = %d\n",
@@ -365,7 +365,8 @@ void et131x_isr_handler(struct work_struct *work)
/* Handle the PHY interrupt */
if (status & ET_INTR_PHY) {
u32 pm_csr;
- MI_BMSR_t BmsrInts, BmsrData;
+ u16 bmsr_ints;
+ u16 bmsr_data;
u16 myisr;
/* If we are in coma mode when we get this interrupt,
@@ -390,14 +391,13 @@ void et131x_isr_handler(struct work_struct *work)
if (!etdev->ReplicaPhyLoopbk) {
MiRead(etdev,
(uint8_t) offsetof(struct mi_regs, bmsr),
- &BmsrData.value);
+ &bmsr_data);
- BmsrInts.value =
- etdev->Bmsr.value ^ BmsrData.value;
- etdev->Bmsr.value = BmsrData.value;
+ bmsr_ints = etdev->bmsr ^ bmsr_data;
+ etdev->bmsr = bmsr_data;
/* Do all the cable in / cable out stuff */
- et131x_Mii_check(etdev, BmsrData, BmsrInts);
+ et131x_Mii_check(etdev, bmsr_data, bmsr_ints);
}
}
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
index b25bae29042..5f25bbad36b 100644
--- a/drivers/staging/et131x/et131x_netdev.c
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -88,83 +88,18 @@
#include "et131x_adapter.h"
#include "et131x.h"
-struct net_device_stats *et131x_stats(struct net_device *netdev);
-int et131x_open(struct net_device *netdev);
-int et131x_close(struct net_device *netdev);
-int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd);
-void et131x_multicast(struct net_device *netdev);
-int et131x_tx(struct sk_buff *skb, struct net_device *netdev);
-void et131x_tx_timeout(struct net_device *netdev);
-int et131x_change_mtu(struct net_device *netdev, int new_mtu);
-int et131x_set_mac_addr(struct net_device *netdev, void *new_mac);
-void et131x_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
-void et131x_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
-void et131x_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
-
-static const struct net_device_ops et131x_netdev_ops = {
- .ndo_open = et131x_open,
- .ndo_stop = et131x_close,
- .ndo_start_xmit = et131x_tx,
- .ndo_set_multicast_list = et131x_multicast,
- .ndo_tx_timeout = et131x_tx_timeout,
- .ndo_change_mtu = et131x_change_mtu,
- .ndo_set_mac_address = et131x_set_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_get_stats = et131x_stats,
- .ndo_do_ioctl = et131x_ioctl,
-};
-
-/**
- * et131x_device_alloc
- *
- * Returns pointer to the allocated and initialized net_device struct for
- * this device.
- *
- * Create instances of net_device and wl_private for the new adapter and
- * register the device's entry points in the net_device structure.
- */
-struct net_device *et131x_device_alloc(void)
-{
- struct net_device *netdev;
-
- /* Alloc net_device and adapter structs */
- netdev = alloc_etherdev(sizeof(struct et131x_adapter));
-
- if (netdev == NULL) {
- printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
- return NULL;
- }
-
- /* Setup the function registration table (and other data) for a
- * net_device
- */
- /* netdev->init = &et131x_init; */
- /* netdev->set_config = &et131x_config; */
- netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
- netdev->netdev_ops = &et131x_netdev_ops;
-
- /* netdev->ethtool_ops = &et131x_ethtool_ops; */
-
- /* Poll? */
- /* netdev->poll = &et131x_poll; */
- /* netdev->poll_controller = &et131x_poll_controller; */
- return netdev;
-}
-
/**
* et131x_stats - Return the current device statistics.
* @netdev: device whose stats are being queried
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-struct net_device_stats *et131x_stats(struct net_device *netdev)
+static struct net_device_stats *et131x_stats(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
struct net_device_stats *stats = &adapter->net_stats;
- CE_STATS_t *devstat = &adapter->Stats;
+ struct ce_stats *devstat = &adapter->stats;
- stats->rx_packets = devstat->ipackets;
- stats->tx_packets = devstat->opackets;
stats->rx_errors = devstat->length_err + devstat->alignment_err +
devstat->crc_err + devstat->code_violations + devstat->other_errors;
stats->tx_errors = devstat->max_pkt_error;
@@ -227,7 +162,7 @@ int et131x_open(struct net_device *netdev)
/* Enable device interrupts */
et131x_enable_interrupts(adapter);
- adapter->Flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
+ adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
/* We're ready to move some data, so start the queue */
netif_start_queue(netdev);
@@ -255,7 +190,7 @@ int et131x_close(struct net_device *netdev)
et131x_disable_interrupts(adapter);
/* Deregistering ISR */
- adapter->Flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
+ adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
free_irq(netdev->irq, netdev);
/* Stop the error timer */
@@ -279,7 +214,7 @@ int et131x_ioctl_mii(struct net_device *netdev, struct ifreq *reqbuf, int cmd)
switch (cmd) {
case SIOCGMIIPHY:
- data->phy_id = etdev->Stats.xcvr_addr;
+ data->phy_id = etdev->stats.xcvr_addr;
break;
case SIOCGMIIREG:
@@ -511,18 +446,14 @@ void et131x_tx_timeout(struct net_device *netdev)
struct tcb *tcb;
unsigned long flags;
- /* Just skip this part if the adapter is doing link detection */
- if (etdev->Flags & fMP_ADAPTER_LINK_DETECTION)
- return;
-
/* Any nonrecoverable hardware error?
* Checks adapter->flags for any failure in phy reading
*/
- if (etdev->Flags & fMP_ADAPTER_NON_RECOVER_ERROR)
+ if (etdev->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
return;
/* Hardware failure? */
- if (etdev->Flags & fMP_ADAPTER_HARDWARE_ERROR) {
+ if (etdev->flags & fMP_ADAPTER_HARDWARE_ERROR) {
dev_err(&etdev->pdev->dev, "hardware error - reset\n");
return;
}
@@ -540,7 +471,7 @@ void et131x_tx_timeout(struct net_device *netdev)
flags);
dev_warn(&etdev->pdev->dev,
- "Send stuck - reset. tcb->WrIndex %x, Flags 0x%08x\n",
+ "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
tcb->index,
tcb->flags);
@@ -609,7 +540,7 @@ int et131x_change_mtu(struct net_device *netdev, int new_mtu)
et131x_adapter_setup(adapter);
/* Enable interrupts */
- if (adapter->Flags & fMP_ADAPTER_INTERRUPT_IN_USE)
+ if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
et131x_enable_interrupts(adapter);
/* Restart the Tx and Rx DMA engines */
@@ -691,7 +622,7 @@ int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
et131x_adapter_setup(adapter);
/* Enable interrupts */
- if (adapter->Flags & fMP_ADAPTER_INTERRUPT_IN_USE)
+ if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
et131x_enable_interrupts(adapter);
/* Restart the Tx and Rx DMA engines */
@@ -702,3 +633,54 @@ int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
netif_wake_queue(netdev);
return result;
}
+
+static const struct net_device_ops et131x_netdev_ops = {
+ .ndo_open = et131x_open,
+ .ndo_stop = et131x_close,
+ .ndo_start_xmit = et131x_tx,
+ .ndo_set_multicast_list = et131x_multicast,
+ .ndo_tx_timeout = et131x_tx_timeout,
+ .ndo_change_mtu = et131x_change_mtu,
+ .ndo_set_mac_address = et131x_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats = et131x_stats,
+ .ndo_do_ioctl = et131x_ioctl,
+};
+
+/**
+ * et131x_device_alloc
+ *
+ * Returns pointer to the allocated and initialized net_device struct for
+ * this device.
+ *
+ * Create instances of net_device and wl_private for the new adapter and
+ * register the device's entry points in the net_device structure.
+ */
+struct net_device *et131x_device_alloc(void)
+{
+ struct net_device *netdev;
+
+ /* Alloc net_device and adapter structs */
+ netdev = alloc_etherdev(sizeof(struct et131x_adapter));
+
+ if (netdev == NULL) {
+ printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
+ return NULL;
+ }
+
+ /* Setup the function registration table (and other data) for a
+ * net_device
+ */
+ /* netdev->init = &et131x_init; */
+ /* netdev->set_config = &et131x_config; */
+ netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
+ netdev->netdev_ops = &et131x_netdev_ops;
+
+ /* netdev->ethtool_ops = &et131x_ethtool_ops; */
+
+ /* Poll? */
+ /* netdev->poll = &et131x_poll; */
+ /* netdev->poll_controller = &et131x_poll_controller; */
+ return netdev;
+}
+
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.conf b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.conf
deleted file mode 100644
index e2321a42e31..00000000000
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.conf
+++ /dev/null
@@ -1,14 +0,0 @@
-device "ft1000_cs"
- class "network" module "ft1000","ft1000_cs"
-
-card "flarion FT1000"
- manfid 0x02cc, 0x0100
- bind "ft1000_cs"
-
-card "flarion FT1000"
- manfid 0x02cc, 0x1000
- bind "ft1000_cs"
-
-card "flarion FT1000"
- manfid 0x02cc, 0x1300
- bind "ft1000_cs"
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
index 61e1cfc8044..3b0130fe608 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
@@ -1,409 +1,89 @@
-//---------------------------------------------------------------------------
-// FT1000 driver for Flarion Flash OFDM NIC Device
-//
-// Copyright (C) 2002 Flarion Technologies, All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License as published by the Free
-// Software Foundation; either version 2 of the License, or (at your option) any
-// later version. This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-// more details. You should have received a copy of the GNU General Public
-// License along with this program; if not, write to the
-// Free Software Foundation, Inc., 59 Temple Place -
-// Suite 330, Boston, MA 02111-1307, USA.
-//---------------------------------------------------------------------------
-//
-// File: ft1000.h
-//
-// Description: Common structures and defines
-//
-// History:
-// 8/29/02 Whc Ported to Linux.
-// 7/19/04 Whc Drop packet and cmd msg with pseudo header
-// checksum
-// 10/27/04 Whc Added dynamic downloading of test image.
-// 01/11/04 Whc Added support for Magnemite ASIC
-//
-//---------------------------------------------------------------------------
+/*---------------------------------------------------------------------------
+ FT1000 driver for Flarion Flash OFDM NIC Device
+
+ Copyright (C) 2002 Flarion Technologies, All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at your option) any
+ later version. This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details. You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the
+ Free Software Foundation, Inc., 59 Temple Place -
+ Suite 330, Boston, MA 02111-1307, USA.
+---------------------------------------------------------------------------
+ Description: Common structures and defines
+---------------------------------------------------------------------------*/
#ifndef _FT1000H_
#define _FT1000H_
-
-#define FT1000_DRV_VER 0x01010300
-
-#define DSPVERSZ 4
-#define HWSERNUMSZ 16
-#define SKUSZ 20
-#define EUISZ 8
-#define MODESZ 2
-#define CALVERSZ 2
-#define CALDATESZ 6
-
-// Pseudo Header structure
-typedef struct _PSEUDO_HDR
-{
- unsigned short length; // length of msg body
- unsigned char source; // hardware source id
- // Host = 0x10
- // Dsp = 0x20
- unsigned char destination; // hardware destination id (refer to source)
- unsigned char portdest; // software destination port id
- // Host = 0x00
- // Applicaton Broadcast = 0x10
- // Network Stack = 0x20
- // Dsp OAM = 0x80
- // Dsp Airlink = 0x90
- // Dsp Loader = 0xa0
- // Dsp MIP = 0xb0
- unsigned char portsrc; // software source port id (refer to portdest)
- unsigned short sh_str_id; // not used
- unsigned char control; // not used
- unsigned char rsvd1;
- unsigned char seq_num; // message sequence number
- unsigned char rsvd2;
- unsigned short qos_class; // not used
- unsigned short checksum; // pseudo header checksum
-} __attribute__ ((packed)) PSEUDO_HDR, *PPSEUDO_HDR;
-
-// Definitions to maintain compatibility between other platforms
-#define UCHAR u8
-#define USHORT u16
-#define ULONG u32
-#define BOOLEAN u8
-#define PULONG u32 *
-#define PUSHORT u16 *
-#define PUCHAR u8 *
-#define PCHAR u8 *
-#define UINT u32
-
-#define ELECTRABUZZ_ID 0 // ASIC ID for Electrabuzz
-#define MAGNEMITE_ID 0x1a01 // ASIC ID for Magnemite
-
-// MEMORY MAP common to both ELECTRABUZZ and MAGNEMITE
-#define FT1000_REG_DPRAM_ADDR 0x000E // DPADR - Dual Port Ram Indirect Address Register
-#define FT1000_REG_SUP_CTRL 0x0020 // HCTR - Host Control Register
-#define FT1000_REG_SUP_STAT 0x0022 // HSTAT - Host Status Register
-#define FT1000_REG_RESET 0x0024 // HCTR - Host Control Register
-#define FT1000_REG_SUP_ISR 0x0026 // HISR - Host Interrupt Status Register
-#define FT1000_REG_SUP_IMASK 0x0028 // HIMASK - Host Interrupt Mask
-#define FT1000_REG_DOORBELL 0x002a // DBELL - Door Bell Register
-#define FT1000_REG_ASIC_ID 0x002e // ASICID - ASIC Identification Number
- // (Electrabuzz=0 Magnemite=0x1A01)
-
-// MEMORY MAP FOR ELECTRABUZZ ASIC
-
-#define FT1000_REG_UFIFO_STAT 0x0000 // UFSR - Uplink FIFO status register
-#define FT1000_REG_UFIFO_BEG 0x0002 // UFBR - Uplink FIFO beginning register
-#define FT1000_REG_UFIFO_MID 0x0004 // UFMR - Uplink FIFO middle register
-#define FT1000_REG_UFIFO_END 0x0006 // UFER - Uplink FIFO end register
-#define FT1000_REG_DFIFO_STAT 0x0008 // DFSR - Downlink FIFO status register
-#define FT1000_REG_DFIFO 0x000A // DFR - Downlink FIFO Register
-#define FT1000_REG_DPRAM_DATA 0x000C // DPRAM - Dual Port Indirect Data Register
-#define FT1000_REG_WATERMARK 0x0010 // WMARK - Watermark Register
-
-// MEMORY MAP FOR MAGNEMITE
-#define FT1000_REG_MAG_UFDR 0x0000 // UFDR - Uplink FIFO Data Register (32-bits)
-#define FT1000_REG_MAG_UFDRL 0x0000 // UFDRL - Uplink FIFO Data Register low-word (16-bits)
-#define FT1000_REG_MAG_UFDRH 0x0002 // UFDRH - Uplink FIFO Data Register high-word (16-bits)
-#define FT1000_REG_MAG_UFER 0x0004 // UFER - Uplink FIFO End Register
-#define FT1000_REG_MAG_UFSR 0x0006 // UFSR - Uplink FIFO Status Register
-#define FT1000_REG_MAG_DFR 0x0008 // DFR - Downlink FIFO Register (32-bits)
-#define FT1000_REG_MAG_DFRL 0x0008 // DFRL - Downlink FIFO Register low-word (16-bits)
-#define FT1000_REG_MAG_DFRH 0x000a // DFRH - Downlink FIFO Register high-word (16-bits)
-#define FT1000_REG_MAG_DFSR 0x000c // DFSR - Downlink FIFO Status Register
-#define FT1000_REG_MAG_DPDATA 0x0010 // DPDATA - Dual Port RAM Indirect Data Register (32-bits)
-#define FT1000_REG_MAG_DPDATAL 0x0010 // DPDATAL - Dual Port RAM Indirect Data Register low-word (16-bits)
-#define FT1000_REG_MAG_DPDATAH 0x0012 // DPDATAH - Dual Port RAM Indirect Data Register high-word (16-bits)
-#define FT1000_REG_MAG_WATERMARK 0x002c // WMARK - Watermark Register
-
-// Reserved Dual Port RAM offsets for Electrabuzz
-#define FT1000_DPRAM_TX_BASE 0x0002 // Host to PC Card Messaging Area
-#define FT1000_DPRAM_RX_BASE 0x0800 // PC Card to Host Messaging Area
-#define FT1000_FIFO_LEN 0x7FC // total length for DSP FIFO tracking
-#define FT1000_HI_HO 0x7FE // heartbeat with HI/HO
-#define FT1000_DSP_STATUS 0xFFE // dsp status - non-zero is a request to reset dsp
-#define FT1000_DSP_LED 0xFFA // dsp led status for PAD device
-#define FT1000_DSP_CON_STATE 0xFF8 // DSP Connection Status Info
-#define FT1000_DPRAM_FEFE 0x002 // location for dsp ready indicator
-#define FT1000_DSP_TIMER0 0x1FF0 // Timer Field from Basestation
-#define FT1000_DSP_TIMER1 0x1FF2 // Timer Field from Basestation
-#define FT1000_DSP_TIMER2 0x1FF4 // Timer Field from Basestation
-#define FT1000_DSP_TIMER3 0x1FF6 // Timer Field from Basestation
-
-// Reserved Dual Port RAM offsets for Magnemite
-#define FT1000_DPRAM_MAG_TX_BASE 0x0000 // Host to PC Card Messaging Area
-#define FT1000_DPRAM_MAG_RX_BASE 0x0200 // PC Card to Host Messaging Area
-#define FT1000_MAG_FIFO_LEN 0x1FF // total length for DSP FIFO tracking
-#define FT1000_MAG_FIFO_LEN_INDX 0x1 // low-word index
-#define FT1000_MAG_HI_HO 0x1FF // heartbeat with HI/HO
-#define FT1000_MAG_HI_HO_INDX 0x0 // high-word index
-#define FT1000_MAG_DSP_LED 0x3FE // dsp led status for PAD device
-#define FT1000_MAG_DSP_LED_INDX 0x0 // dsp led status for PAD device
-
-#define FT1000_MAG_DSP_CON_STATE 0x3FE // DSP Connection Status Info
-#define FT1000_MAG_DSP_CON_STATE_INDX 0x1 // DSP Connection Status Info
-
-#define FT1000_MAG_DPRAM_FEFE 0x000 // location for dsp ready indicator
-#define FT1000_MAG_DPRAM_FEFE_INDX 0x0 // location for dsp ready indicator
-
-#define FT1000_MAG_DSP_TIMER0 0x3FC // Timer Field from Basestation
-#define FT1000_MAG_DSP_TIMER0_INDX 0x1
-
-#define FT1000_MAG_DSP_TIMER1 0x3FC // Timer Field from Basestation
-#define FT1000_MAG_DSP_TIMER1_INDX 0x0
-
-#define FT1000_MAG_DSP_TIMER2 0x3FD // Timer Field from Basestation
-#define FT1000_MAG_DSP_TIMER2_INDX 0x1
-
-#define FT1000_MAG_DSP_TIMER3 0x3FD // Timer Field from Basestation
-#define FT1000_MAG_DSP_TIMER3_INDX 0x0
-
-#define FT1000_MAG_TOTAL_LEN 0x200
-#define FT1000_MAG_TOTAL_LEN_INDX 0x1
-
-#define FT1000_MAG_PH_LEN 0x200
-#define FT1000_MAG_PH_LEN_INDX 0x0
-
-#define FT1000_MAG_PORT_ID 0x201
-#define FT1000_MAG_PORT_ID_INDX 0x0
-
-#define HOST_INTF_LE 0x0 // Host interface little endian mode
-#define HOST_INTF_BE 0x1 // Host interface big endian mode
-
-// PC Card to Host Doorbell assignments
-#define FT1000_DB_DPRAM_RX 0x0001 // this value indicates that DSP has
- // data for host in DPRAM
-#define FT1000_ASIC_RESET_REQ 0x0004 // DSP requesting host to reset the ASIC
-#define FT1000_DSP_ASIC_RESET 0x0008 // DSP indicating host that it will reset the ASIC
-#define FT1000_DB_COND_RESET 0x0010 // DSP request for a card reset.
-
-// Host to PC Card Doorbell assignments
-#define FT1000_DB_DPRAM_TX 0x0100 // this value indicates that host has
- // data for DSP in DPRAM.
-#define FT1000_ASIC_RESET_DSP 0x0400 // Responds to FT1000_ASIC_RESET_REQ
-#define FT1000_DB_HB 0x1000 // Indicates that supervisor
- // has a heartbeat message for DSP.
-
-#define FT1000_DPRAM_BASE 0x0000 // Dual Port RAM starting offset
-
-#define hi 0x6869 // PC Card heartbeat values
-#define ho 0x686f // PC Card heartbeat values
-
-// Magnemite specific defines
-#define hi_mag 0x6968 // Byte swap hi to avoid additional system call
-#define ho_mag 0x6f68 // Byte swap ho to avoid additional system call
-
-//
-// Bit field definitions for Host Interrupt Status Register
-//
-// Indicate the cause of an interrupt.
-//
-#define ISR_EMPTY 0x00 // no bits set
-#define ISR_DOORBELL_ACK 0x01 // Doorbell acknowledge from DSP
-#define ISR_DOORBELL_PEND 0x02 // Doorbell pending from DSP
-#define ISR_RCV 0x04 // Packet available in Downlink FIFO
-#define ISR_WATERMARK 0x08 // Watermark requirements satisfied
-
-// Bit field definition for Host Interrupt Mask
-#define ISR_MASK_NONE 0x0000 // no bits set
-#define ISR_MASK_DOORBELL_ACK 0x0001 // Doorbell acknowledge mask
-#define ISR_MASK_DOORBELL_PEND 0x0002 // Doorbell pending mask
-#define ISR_MASK_RCV 0x0004 // Downlink Packet available mask
-#define ISR_MASK_WATERMARK 0x0008 // Watermark interrupt mask
-#define ISR_MASK_ALL 0xffff // Mask all interrupts
-
-// Bit field definition for Host Control Register
-#define DSP_RESET_BIT 0x0001 // Bit field to control dsp reset state
- // (0 = out of reset 1 = reset)
-#define ASIC_RESET_BIT 0x0002 // Bit field to control ASIC reset state
- // (0 = out of reset 1 = reset)
-
-// Default interrupt mask (Enable Doorbell pending and Packet available interrupts)
-#define ISR_DEFAULT_MASK 0x7ff9
-
-// Application specific IDs
-#define DSPID 0x20
-#define HOSTID 0x10
-#define DSPAIRID 0x90
-#define DRIVERID 0x00
-#define NETWORKID 0x20
-
-// Size of DPRAM Message
-#define MAX_CMD_SQSIZE 1780
-
-#define ENET_MAX_SIZE 1514
-#define ENET_HEADER_SIZE 14
-
-#define SLOWQ_TYPE 0
-#define FASTQ_TYPE 1
-
-#define MAX_DSP_SESS_REC 1024
-
-#define DSP_QID_OFFSET 4
-#define PSEUDOSZ 16
-#define PSEUDOSZWRD 8
-
-// Maximum number of occurrence of pseudo header errors before resetting PC Card.
-#define MAX_PH_ERR 300
-
-// Driver message types
-#define MEDIA_STATE 0x0010
-#define TIME_UPDATE 0x0020
-#define DSP_PROVISION 0x0030
-#define DSP_INIT_MSG 0x0050
-#define DSP_HIBERNATE 0x0060
-
-#define DSP_STORE_INFO 0x0070
-#define DSP_GET_INFO 0x0071
-#define GET_DRV_ERR_RPT_MSG 0x0073
-#define RSP_DRV_ERR_RPT_MSG 0x0074
-
-// Driver Error Messages for DSP
-#define DSP_HB_INFO 0x7ef0
-#define DSP_FIFO_INFO 0x7ef1
-#define DSP_CONDRESET_INFO 0x7ef2
-#define DSP_CMDLEN_INFO 0x7ef3
-#define DSP_CMDPHCKSUM_INFO 0x7ef4
-#define DSP_PKTPHCKSUM_INFO 0x7ef5
-#define DSP_PKTLEN_INFO 0x7ef6
-#define DSP_USER_RESET 0x7ef7
-#define FIFO_FLUSH_MAXLIMIT 0x7ef8
-#define FIFO_FLUSH_BADCNT 0x7ef9
-#define FIFO_ZERO_LEN 0x7efa
-
-#define HOST_QID_OFFSET 5
-#define QTYPE_OFFSET 13
-
-#define SUCCESS 0x00
-#define FAILURE 0x01
-#define TRUE 0x1
-#define FALSE 0x0
-
-#define MAX_NUM_APP 6
-
-#define MAXIMUM_ASIC_HB_CNT 15
-
-typedef struct _DRVMSG {
- PSEUDO_HDR pseudo;
- u16 type;
- u16 length;
- u8 data[0];
-} __attribute__ ((packed)) DRVMSG, *PDRVMSG;
-
-typedef struct _MEDIAMSG {
- PSEUDO_HDR pseudo;
- u16 type;
- u16 length;
- u16 state;
- u32 ip_addr;
- u32 net_mask;
- u32 gateway;
- u32 dns_1;
- u32 dns_2;
-} __attribute__ ((packed)) MEDIAMSG, *PMEDIAMSG;
-
-typedef struct _TIMEMSG {
- PSEUDO_HDR pseudo;
- u16 type;
- u16 length;
- u8 timeval[8];
-} __attribute__ ((packed)) TIMEMSG, *PTIMEMSG;
-
-typedef struct _DSPINITMSG {
- PSEUDO_HDR pseudo;
- u16 type;
- u16 length;
- u8 DspVer[DSPVERSZ]; // DSP version number
- u8 HwSerNum[HWSERNUMSZ]; // Hardware Serial Number
- u8 Sku[SKUSZ]; // SKU
- u8 eui64[EUISZ]; // EUI64
- u8 ProductMode[MODESZ]; // Product Mode (Market/Production)
- u8 RfCalVer[CALVERSZ]; // Rf Calibration version
- u8 RfCalDate[CALDATESZ]; // Rf Calibration date
-} __attribute__ ((packed)) DSPINITMSG, *PDSPINITMSG;
-
-typedef struct _DSPHIBERNATE {
- PSEUDO_HDR pseudo;
- u16 type;
- u16 length;
- u32 timeout;
- u16 sess_info[0];
-} DSPHIBERNATE, *PDSPHIBERNATE;
-
-typedef struct _APP_INFO_BLOCK
-{
- u32 fileobject; // Application's file object
- u16 app_id; // Application id
-} APP_INFO_BLOCK, *PAPP_INFO_BLOCK;
-
-typedef struct _PROV_RECORD {
- struct list_head list;
- u8 *pprov_data;
-} PROV_RECORD, *PPROV_RECORD;
-
-typedef struct _FT1000_INFO {
- struct net_device_stats stats;
- u16 DrvErrNum;
- u16 AsicID;
- int ASICResetNum;
- int DspAsicReset;
- int PktIntfErr;
- int DSPResetNum;
- int NumIOCTLBufs;
- int IOCTLBufLvl;
- int DeviceCreated;
- int CardReady;
- int DspHibernateFlag;
- int DSPReady;
- u8 DeviceName[15];
- int DeviceMajor;
- int registered;
- int mediastate;
- u16 packetseqnum;
- u8 squeseqnum; // sequence number on slow queue
- spinlock_t dpram_lock;
- u16 CurrentInterruptEnableMask;
- int InterruptsEnabled;
- u16 fifo_cnt;
- u8 DspVer[DSPVERSZ]; // DSP version number
- u8 HwSerNum[HWSERNUMSZ]; // Hardware Serial Number
- u8 Sku[SKUSZ]; // SKU
- u8 eui64[EUISZ]; // EUI64
- time_t ConTm; // Connection Time
- u16 LedStat;
- u16 ConStat;
- u16 ProgConStat;
- u8 ProductMode[MODESZ];
- u8 RfCalVer[CALVERSZ];
- u8 RfCalDate[CALDATESZ];
- u16 DSP_TIME[4];
- struct list_head prov_list;
- int appcnt;
- APP_INFO_BLOCK app_info[MAX_NUM_APP];
- u16 DSPInfoBlklen;
- u16 DrvMsgPend;
+#include "../ft1000.h"
+
+#define FT1000_DRV_VER 0x01010300
+
+#define FT1000_DPRAM_BASE 0x0000 /* Dual Port RAM starting offset */
+
+/* Maximum number of occurrence of pseudo header errors before resetting PC Card. */
+#define MAX_PH_ERR 300
+
+#define SUCCESS 0x00
+#define FAILURE 0x01
+
+struct ft1000_info {
+ struct net_device_stats stats;
+ u16 DrvErrNum;
+ u16 AsicID;
+ int PktIntfErr;
+ int CardReady;
+ int registered;
+ int mediastate;
+ u16 packetseqnum;
+ u8 squeseqnum; /* sequence number on slow queue */
+ spinlock_t dpram_lock;
+ u16 fifo_cnt;
+ u8 DspVer[DSPVERSZ]; /* DSP version number */
+ u8 HwSerNum[HWSERNUMSZ]; /* Hardware Serial Number */
+ u8 Sku[SKUSZ]; /* SKU */
+ u8 eui64[EUISZ]; /* EUI64 */
+ time_t ConTm; /* Connection Time */
+ u16 LedStat;
+ u16 ConStat;
+ u16 ProgConStat;
+ u8 ProductMode[MODESZ];
+ u8 RfCalVer[CALVERSZ];
+ u8 RfCalDate[CALDATESZ];
+ u16 DSP_TIME[4];
+ struct list_head prov_list;
+ u16 DSPInfoBlklen;
int (*ft1000_reset)(void *);
- void *link;
- u16 DSPInfoBlk[MAX_DSP_SESS_REC];
- union {
- u16 Rec[MAX_DSP_SESS_REC];
- u32 MagRec[MAX_DSP_SESS_REC/2];
- } DSPSess;
+ void *link;
+ u16 DSPInfoBlk[MAX_DSP_SESS_REC];
+ union {
+ u16 Rec[MAX_DSP_SESS_REC];
+ u32 MagRec[MAX_DSP_SESS_REC/2];
+ } DSPSess;
struct proc_dir_entry *proc_ft1000;
char netdevname[IFNAMSIZ];
-} FT1000_INFO, *PFT1000_INFO;
+};
-typedef struct _DPRAM_BLK {
- struct list_head list;
- u16 *pbuffer;
-} __attribute__ ((packed)) DPRAM_BLK, *PDPRAM_BLK;
-
-extern u16 ft1000_read_dpram (struct net_device *dev, int offset);
+extern u16 ft1000_read_dpram(struct net_device *dev, int offset);
extern void card_bootload(struct net_device *dev);
-extern u16 ft1000_read_dpram_mag_16 (struct net_device *dev, int offset, int Index);
-extern u32 ft1000_read_dpram_mag_32 (struct net_device *dev, int offset);
-void ft1000_write_dpram_mag_32 (struct net_device *dev, int offset, u32 value);
+extern u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index);
+extern u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset);
+void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value);
+
+/* Read the value of a given ASIC register. */
+static inline u16 ft1000_read_reg(struct net_device *dev, u16 offset)
+{
+ return inw(dev->base_addr + offset);
+}
+
+/* Set the value of a given ASIC register. */
+static inline void ft1000_write_reg(struct net_device *dev, u16 offset, u16 value)
+{
+ outw(value, dev->base_addr + offset);
+}
-#endif // _FT1000H_
+#endif
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
index 68ea035635f..6a1c1d4dcca 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c
@@ -29,52 +29,16 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/byteorder.h>
-#include <asm/uaccess.h>
-
/*====================================================================*/
-/* Module parameters */
-
-#define INT_MODULE_PARM(n, v) static int n = v; MODULE_PARM(n, "i")
-
MODULE_AUTHOR("Wai Chan");
MODULE_DESCRIPTION("FT1000 PCMCIA driver");
MODULE_LICENSE("GPL");
-/* Newer, simpler way of listing specific interrupts */
-
-/* The old way: bit map of interrupts to choose from */
-/* This means pick from 15, 14, 12, 11, 10, 9, 7, 5, 4, and 3 */
-
-/*
- All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
- you do not define PCMCIA_DEBUG at all, all the debug code will be
- left out. If you compile with PCMCIA_DEBUG=0, the debug code will
- be present but disabled.
-*/
-#ifdef FT_DEBUG
-#define DEBUG(n, args...) printk(KERN_DEBUG args)
-#else
-#define DEBUG(n, args...)
-#endif
-
/*====================================================================*/
struct net_device *init_ft1000_card(struct pcmcia_device *link,
@@ -82,100 +46,39 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
void stop_ft1000_card(struct net_device *);
static int ft1000_config(struct pcmcia_device *link);
-static void ft1000_release(struct pcmcia_device *link);
-
-/*
- The attach() and detach() entry points are used to create and destroy
- "instances" of the driver, where each instance represents everything
- needed to manage one actual PCMCIA card.
-*/
-
static void ft1000_detach(struct pcmcia_device *link);
-static int ft1000_attach(struct pcmcia_device *link);
-
-typedef struct local_info_t {
- struct pcmcia_device *link;
- struct net_device *dev;
-} local_info_t;
-
-#define MAX_ASIC_RESET_CNT 10
-#define COR_DEFAULT 0x55
+static int ft1000_attach(struct pcmcia_device *link);
/*====================================================================*/
-static void ft1000_reset(struct pcmcia_device * link)
+static void ft1000_reset(struct pcmcia_device *link)
{
pcmcia_reset_card(link->socket);
}
-/*======================================================================
-
-
-======================================================================*/
-
static int ft1000_attach(struct pcmcia_device *link)
{
-
- local_info_t *local;
-
- DEBUG(0, "ft1000_cs: ft1000_attach()\n");
-
- local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local) {
- return -ENOMEM;
- }
- local->link = link;
-
- link->priv = local;
- local->dev = NULL;
-
+ link->priv = NULL;
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
return ft1000_config(link);
-
-} /* ft1000_attach */
-
-/*======================================================================
-
- This deletes a driver "instance". The device is de-registered
- with Card Services. If it has been released, all local data
- structures are freed. Otherwise, the structures will be freed
- when the device is released.
-
-======================================================================*/
+}
static void ft1000_detach(struct pcmcia_device *link)
{
- struct net_device *dev = ((local_info_t *) link->priv)->dev;
-
- DEBUG(0, "ft1000_cs: ft1000_detach(0x%p)\n", link);
-
- if (link == NULL) {
- DEBUG(0,"ft1000_cs:ft1000_detach: Got a NULL pointer\n");
- return;
- }
+ struct net_device *dev = link->priv;
- if (dev) {
+ if (dev)
stop_ft1000_card(dev);
- }
pcmcia_disable_device(link);
-
- /* This points to the parent local_info_t struct */
free_netdev(dev);
+}
-} /* ft1000_detach */
-
-/*======================================================================
-
- Check if the io window is configured
-
-======================================================================*/
int ft1000_confcheck(struct pcmcia_device *link, void *priv_data)
{
-
return pcmcia_request_io(link);
-} /* ft1000_confcheck */
+}
/*======================================================================
@@ -187,7 +90,7 @@ int ft1000_confcheck(struct pcmcia_device *link, void *priv_data)
static int ft1000_config(struct pcmcia_device *link)
{
- int ret;
+ int ret;
dev_dbg(&link->dev, "ft1000_cs: ft1000_config(0x%p)\n", link);
@@ -205,9 +108,8 @@ static int ft1000_config(struct pcmcia_device *link)
goto failed;
}
- ((local_info_t *) link->priv)->dev = init_ft1000_card(link,
- &ft1000_reset);
- if (((local_info_t *) link->priv)->dev == NULL) {
+ link->priv = init_ft1000_card(link, &ft1000_reset);
+ if (!link->priv) {
printk(KERN_INFO "ft1000: Could not register as network device\n");
goto failed;
}
@@ -216,57 +118,13 @@ static int ft1000_config(struct pcmcia_device *link)
return 0;
failed:
- ft1000_release(link);
+ pcmcia_disable_device(link);
return -ENODEV;
-
-} /* ft1000_config */
-
-/*======================================================================
-
- After a card is removed, ft1000_release() will unregister the
- device, and release the PCMCIA configuration. If the device is
- still open, this will be postponed until it is closed.
-
-======================================================================*/
-
-static void ft1000_release(struct pcmcia_device * link)
-{
-
- DEBUG(0, "ft1000_cs: ft1000_release(0x%p)\n", link);
-
- /*
- If the device is currently in use, we won't release until it
- is actually closed, because until then, we can't be sure that
- no one will try to access the device or its data structures.
- */
-
- /*
- In a normal driver, additional code may be needed to release
- other kernel data structures associated with this device.
- */
- kfree((local_info_t *) link->priv);
- /* Don't bother checking to see if these succeed or not */
-
- pcmcia_disable_device(link);
-} /* ft1000_release */
-
-/*======================================================================
-
- The card status event handler. Mostly, this schedules other
- stuff to run after an event is received.
-
- When a CARD_REMOVAL event is received, we immediately set a
- private flag to block future accesses to this device. All the
- functions that actually access the device should check this flag
- to make sure the card is still present.
-
-======================================================================*/
+}
static int ft1000_suspend(struct pcmcia_device *link)
{
- struct net_device *dev = ((local_info_t *) link->priv)->dev;
-
- DEBUG(1, "ft1000_cs: ft1000_event(0x%06x)\n", event);
+ struct net_device *dev = link->priv;
if (link->open)
netif_device_detach(dev);
@@ -275,13 +133,9 @@ static int ft1000_suspend(struct pcmcia_device *link)
static int ft1000_resume(struct pcmcia_device *link)
{
-/* struct net_device *dev = link->priv;
- */
return 0;
}
-
-
/*====================================================================*/
static const struct pcmcia_device_id ft1000_ids[] = {
@@ -294,26 +148,22 @@ static const struct pcmcia_device_id ft1000_ids[] = {
MODULE_DEVICE_TABLE(pcmcia, ft1000_ids);
static struct pcmcia_driver ft1000_cs_driver = {
- .owner = THIS_MODULE,
- .drv = {
- .name = "ft1000_cs",
- },
- .probe = ft1000_attach,
- .remove = ft1000_detach,
+ .owner = THIS_MODULE,
+ .name = "ft1000_cs",
+ .probe = ft1000_attach,
+ .remove = ft1000_detach,
.id_table = ft1000_ids,
- .suspend = ft1000_suspend,
- .resume = ft1000_resume,
+ .suspend = ft1000_suspend,
+ .resume = ft1000_resume,
};
static int __init init_ft1000_cs(void)
{
- DEBUG(0, "ft1000_cs: loading\n");
return pcmcia_register_driver(&ft1000_cs_driver);
}
static void __exit exit_ft1000_cs(void)
{
- DEBUG(0, "ft1000_cs: unloading\n");
pcmcia_unregister_driver(&ft1000_cs_driver);
}
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dev.h b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dev.h
deleted file mode 100644
index 0b63f051f27..00000000000
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dev.h
+++ /dev/null
@@ -1,66 +0,0 @@
-//---------------------------------------------------------------------------
-// FT1000 driver for Flarion Flash OFDM NIC Device
-//
-// Copyright (C) 2002 Flarion Technologies, All rights reserved.
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms of the GNU General Public License as published by the Free
-// Software Foundation; either version 2 of the License, or (at your option) any
-// later version. This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-// or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-// more details. You should have received a copy of the GNU General Public
-// License along with this program; if not, write to the
-// Free Software Foundation, Inc., 59 Temple Place -
-// Suite 330, Boston, MA 02111-1307, USA.
-//---------------------------------------------------------------------------
-//
-// File: ft1000_dev.h
-//
-// Description: Register definitions and bit masks for the FT1000 NIC
-//
-// History:
-// 2/5/02 Ivan Bohannon Written.
-// 8/29/02 Whc Ported to Linux.
-//
-//---------------------------------------------------------------------------
-#ifndef _FT1000_DEVH_
-#define _FT1000_DEVH_
-
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_read_reg
-// Description: This function will read the value of a given ASIC register.
-// Input:
-// dev - device structure
-// offset - ASIC register offset
-// Output:
-// data - ASIC register value
-//
-//---------------------------------------------------------------------------
-static inline u16 ft1000_read_reg (struct net_device *dev, u16 offset) {
- u16 data = 0;
-
- data = inw(dev->base_addr + offset);
-
- return (data);
-}
-
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_write_reg
-// Description: This function will set the value for a given ASIC register.
-// Input:
-// dev - device structure
-// offset - ASIC register offset
-// value - value to write
-// Output:
-// None.
-//
-//---------------------------------------------------------------------------
-static inline void ft1000_write_reg (struct net_device *dev, u16 offset, u16 value) {
- outw (value, dev->base_addr + offset);
-}
-
-#endif // _FT1000_DEVH_
-
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
index fb375ea26dd..c956857e2d5 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_dnld.c
@@ -34,7 +34,6 @@
#include <asm/uaccess.h>
#include <linux/vmalloc.h>
-#include "ft1000_dev.h"
#include "ft1000.h"
#include "boot.h"
@@ -87,26 +86,14 @@
#define STATE_DONE_PROV 0x06
#define STATE_DONE_FILE 0x07
-USHORT get_handshake(struct net_device *dev, USHORT expected_value);
-void put_handshake(struct net_device *dev, USHORT handshake_value);
-USHORT get_request_type(struct net_device *dev);
+u16 get_handshake(struct net_device *dev, u16 expected_value);
+void put_handshake(struct net_device *dev, u16 handshake_value);
+u16 get_request_type(struct net_device *dev);
long get_request_value(struct net_device *dev);
void put_request_value(struct net_device *dev, long lvalue);
-USHORT hdr_checksum(PPSEUDO_HDR pHdr);
+u16 hdr_checksum(struct pseudo_hdr *pHdr);
-typedef struct _DSP_FILE_HDR {
- u32 build_date;
- u32 dsp_coff_date;
- u32 loader_code_address;
- u32 loader_code_size;
- u32 loader_code_end;
- u32 dsp_code_address;
- u32 dsp_code_size;
- u32 dsp_code_end;
- u32 reserved[8];
-} __attribute__ ((packed)) DSP_FILE_HDR, *PDSP_FILE_HDR;
-
-typedef struct _DSP_FILE_HDR_5 {
+struct dsp_file_hdr {
u32 version_id; // Version ID of this image format.
u32 package_id; // Package ID of code release.
u32 build_date; // Date/time stamp when file was built.
@@ -118,18 +105,9 @@ typedef struct _DSP_FILE_HDR_5 {
u32 version_data_offset; // Offset were scrambled version data begins.
u32 version_data_size; // Size, in words, of scrambled version data.
u32 nDspImages; // Number of DSP images in file.
-} __attribute__ ((packed)) DSP_FILE_HDR_5, *PDSP_FILE_HDR_5;
-
-typedef struct _DSP_IMAGE_INFO {
- u32 coff_date; // Date/time when DSP Coff image was built.
- u32 begin_offset; // Offset in file where image begins.
- u32 end_offset; // Offset in file where image begins.
- u32 run_address; // On chip Start address of DSP code.
- u32 image_size; // Size of image.
- u32 version; // Embedded version # of DSP code.
-} __attribute__ ((packed)) DSP_IMAGE_INFO, *PDSP_IMAGE_INFO;
+} __attribute__ ((packed));
-typedef struct _DSP_IMAGE_INFO_V6 {
+struct dsp_image_info {
u32 coff_date; // Date/time when DSP Coff image was built.
u32 begin_offset; // Offset in file where image begins.
u32 end_offset; // Offset in file where image begins.
@@ -138,20 +116,20 @@ typedef struct _DSP_IMAGE_INFO_V6 {
u32 version; // Embedded version # of DSP code.
unsigned short checksum; // Dsp File checksum
unsigned short pad1;
-} __attribute__ ((packed)) DSP_IMAGE_INFO_V6, *PDSP_IMAGE_INFO_V6;
+} __attribute__ ((packed));
void card_bootload(struct net_device *dev)
{
- FT1000_INFO *info = (PFT1000_INFO) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
unsigned long flags;
- PULONG pdata;
- UINT size;
- UINT i;
- ULONG templong;
+ u32 *pdata;
+ u32 size;
+ u32 i;
+ u32 templong;
DEBUG(0, "card_bootload is called\n");
- pdata = (PULONG) bootimage;
+ pdata = (u32 *) bootimage;
size = sizeof(bootimage);
// check for odd word
@@ -172,11 +150,11 @@ void card_bootload(struct net_device *dev)
spin_unlock_irqrestore(&info->dpram_lock, flags);
}
-USHORT get_handshake(struct net_device *dev, USHORT expected_value)
+u16 get_handshake(struct net_device *dev, u16 expected_value)
{
- FT1000_INFO *info = (PFT1000_INFO) netdev_priv(dev);
- USHORT handshake;
- ULONG tempx;
+ struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ u16 handshake;
+ u32 tempx;
int loopcnt;
loopcnt = 0;
@@ -190,7 +168,7 @@ USHORT get_handshake(struct net_device *dev, USHORT expected_value)
tempx =
ntohl(ft1000_read_dpram_mag_32
(dev, DWNLD_MAG_HANDSHAKE_LOC));
- handshake = (USHORT) tempx;
+ handshake = (u16) tempx;
}
if ((handshake == expected_value)
@@ -207,27 +185,27 @@ USHORT get_handshake(struct net_device *dev, USHORT expected_value)
}
-void put_handshake(struct net_device *dev, USHORT handshake_value)
+void put_handshake(struct net_device *dev, u16 handshake_value)
{
- FT1000_INFO *info = (PFT1000_INFO) netdev_priv(dev);
- ULONG tempx;
+ struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ u32 tempx;
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
DWNLD_HANDSHAKE_LOC);
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, handshake_value); /* Handshake */
} else {
- tempx = (ULONG) handshake_value;
+ tempx = (u32) handshake_value;
tempx = ntohl(tempx);
ft1000_write_dpram_mag_32(dev, DWNLD_MAG_HANDSHAKE_LOC, tempx); /* Handshake */
}
}
-USHORT get_request_type(struct net_device *dev)
+u16 get_request_type(struct net_device *dev)
{
- FT1000_INFO *info = (PFT1000_INFO) netdev_priv(dev);
- USHORT request_type;
- ULONG tempx;
+ struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ u16 request_type;
+ u32 tempx;
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR, DWNLD_TYPE_LOC);
@@ -235,7 +213,7 @@ USHORT get_request_type(struct net_device *dev)
} else {
tempx = ft1000_read_dpram_mag_32(dev, DWNLD_MAG_TYPE_LOC);
tempx = ntohl(tempx);
- request_type = (USHORT) tempx;
+ request_type = (u16) tempx;
}
return request_type;
@@ -244,9 +222,9 @@ USHORT get_request_type(struct net_device *dev)
long get_request_value(struct net_device *dev)
{
- FT1000_INFO *info = (PFT1000_INFO) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
long value;
- USHORT w_val;
+ u16 w_val;
if (info->AsicID == ELECTRABUZZ_ID) {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
@@ -273,19 +251,19 @@ long get_request_value(struct net_device *dev)
void put_request_value(struct net_device *dev, long lvalue)
{
- FT1000_INFO *info = (PFT1000_INFO) netdev_priv(dev);
- USHORT size;
- ULONG tempx;
+ struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
+ u16 size;
+ u32 tempx;
if (info->AsicID == ELECTRABUZZ_ID) {
- size = (USHORT) (lvalue >> 16);
+ size = (u16) (lvalue >> 16);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
DWNLD_SIZE_MSW_LOC);
ft1000_write_reg(dev, FT1000_REG_DPRAM_DATA, size);
- size = (USHORT) (lvalue);
+ size = (u16) (lvalue);
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
DWNLD_SIZE_LSW_LOC);
@@ -298,10 +276,10 @@ void put_request_value(struct net_device *dev, long lvalue)
}
-USHORT hdr_checksum(PPSEUDO_HDR pHdr)
+u16 hdr_checksum(struct pseudo_hdr *pHdr)
{
- USHORT *usPtr = (USHORT *) pHdr;
- USHORT chksum;
+ u16 *usPtr = (u16 *) pHdr;
+ u16 chksum;
chksum = ((((((usPtr[0] ^ usPtr[1]) ^ usPtr[2]) ^ usPtr[3]) ^
usPtr[4]) ^ usPtr[5]) ^ usPtr[6]);
@@ -309,32 +287,29 @@ USHORT hdr_checksum(PPSEUDO_HDR pHdr)
return chksum;
}
-int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
+int card_download(struct net_device *dev, const u8 *pFileStart, u32 FileLength)
{
- FT1000_INFO *info = (PFT1000_INFO) netdev_priv(dev);
+ struct ft1000_info *info = (struct ft1000_info *) netdev_priv(dev);
int Status = SUCCESS;
- USHORT DspWordCnt = 0;
- UINT uiState;
- USHORT handshake;
- PPSEUDO_HDR pHdr;
- USHORT usHdrLength;
- PDSP_FILE_HDR pFileHdr;
+ u32 uiState;
+ u16 handshake;
+ struct pseudo_hdr *pHdr;
+ u16 usHdrLength;
long word_length;
- USHORT request;
- USHORT temp;
- PPROV_RECORD pprov_record;
- PUCHAR pbuffer;
- PDSP_FILE_HDR_5 pFileHdr5;
- PDSP_IMAGE_INFO pDspImageInfo = NULL;
- PDSP_IMAGE_INFO_V6 pDspImageInfoV6 = NULL;
+ u16 request;
+ u16 temp;
+ struct prov_record *pprov_record;
+ u8 *pbuffer;
+ struct dsp_file_hdr *pFileHdr5;
+ struct dsp_image_info *pDspImageInfoV6 = NULL;
long requested_version;
- BOOLEAN bGoodVersion = 0;
- PDRVMSG pMailBoxData;
- USHORT *pUsData = NULL;
- USHORT *pUsFile = NULL;
- UCHAR *pUcFile = NULL;
- UCHAR *pBootEnd = NULL;
- UCHAR *pCodeEnd = NULL;
+ bool bGoodVersion = 0;
+ struct drv_msg *pMailBoxData;
+ u16 *pUsData = NULL;
+ u16 *pUsFile = NULL;
+ u8 *pUcFile = NULL;
+ u8 *pBootEnd = NULL;
+ u8 *pCodeEnd = NULL;
int imageN;
long file_version;
long loader_code_address = 0;
@@ -345,36 +320,22 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
unsigned long templong;
unsigned long image_chksum = 0;
- //
- // Get version id of file, at first 4 bytes of file, for newer files.
- //
file_version = *(long *)pFileStart;
+ if (file_version != 6) {
+ printk(KERN_ERR "ft1000: unsupported firmware version %ld\n", file_version);
+ Status = FAILURE;
+ }
uiState = STATE_START_DWNLD;
- pFileHdr = (PDSP_FILE_HDR) pFileStart;
- pFileHdr5 = (PDSP_FILE_HDR_5) pFileStart;
-
- switch (file_version) {
- case 5:
- case 6:
- pUsFile =
- (USHORT *) ((long)pFileStart + pFileHdr5->loader_offset);
- pUcFile =
- (UCHAR *) ((long)pFileStart + pFileHdr5->loader_offset);
+ pFileHdr5 = (struct dsp_file_hdr *) pFileStart;
- pBootEnd =
- (UCHAR *) ((long)pFileStart + pFileHdr5->loader_code_end);
-
- loader_code_address = pFileHdr5->loader_code_address;
- loader_code_size = pFileHdr5->loader_code_size;
- bGoodVersion = FALSE;
- break;
-
- default:
- Status = FAILURE;
- break;
- }
+ pUsFile = (u16 *) ((long)pFileStart + pFileHdr5->loader_offset);
+ pUcFile = (u8 *) ((long)pFileStart + pFileHdr5->loader_offset);
+ pBootEnd = (u8 *) ((long)pFileStart + pFileHdr5->loader_code_end);
+ loader_code_address = pFileHdr5->loader_code_address;
+ loader_code_size = pFileHdr5->loader_code_size;
+ bGoodVersion = false;
while ((Status == SUCCESS) && (uiState != STATE_DONE_FILE)) {
@@ -411,8 +372,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
break;
case REQUEST_DONE_BL:
/* Reposition ptrs to beginning of code section */
- pUsFile = (USHORT *) ((long)pBootEnd);
- pUcFile = (UCHAR *) ((long)pBootEnd);
+ pUsFile = (u16 *) ((long)pBootEnd);
+ pUcFile = (u8 *) ((long)pBootEnd);
uiState = STATE_CODE_DWNLD;
break;
case REQUEST_CODE_SEGMENT:
@@ -432,45 +393,24 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
// Provide mutual exclusive access while reading ASIC registers.
spin_lock_irqsave(&info->dpram_lock,
flags);
- if (file_version == 5) {
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- ft1000_write_reg(dev,
- FT1000_REG_DPRAM_ADDR,
- DWNLD_PS_HDR_LOC);
-
- for (; word_length > 0; word_length--) { /* In words */
- //temp = *pUsFile;
- //temp = RtlUshortByteSwap(temp);
- ft1000_write_reg(dev,
- FT1000_REG_DPRAM_DATA,
- *pUsFile);
- pUsFile++;
- pUcFile += 2;
- DspWordCnt++;
- }
- } else {
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- outw(DWNLD_MAG_PS_HDR_LOC,
+ /*
+ * Position ASIC DPRAM auto-increment pointer.
+ */
+ outw(DWNLD_MAG_PS_HDR_LOC,
+ dev->base_addr +
+ FT1000_REG_DPRAM_ADDR);
+ if (word_length & 0x01)
+ word_length++;
+ word_length = word_length / 2;
+
+ for (; word_length > 0; word_length--) { /* In words */
+ templong = *pUsFile++;
+ templong |=
+ (*pUsFile++ << 16);
+ pUcFile += 4;
+ outl(templong,
dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
- if (word_length & 0x01) {
- word_length++;
- }
- word_length = word_length / 2;
-
- for (; word_length > 0; word_length--) { /* In words */
- templong = *pUsFile++;
- templong |=
- (*pUsFile++ << 16);
- pUcFile += 4;
- outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
- }
+ FT1000_REG_MAG_DPDATAL);
}
spin_unlock_irqrestore(&info->
dpram_lock,
@@ -520,24 +460,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
break;
case REQUEST_DONE_CL:
/* Reposition ptrs to beginning of provisioning section */
- switch (file_version) {
- case 5:
- case 6:
- pUsFile =
- (USHORT *) ((long)pFileStart
- +
- pFileHdr5->
- commands_offset);
- pUcFile =
- (UCHAR *) ((long)pFileStart
- +
- pFileHdr5->
- commands_offset);
- break;
- default:
- Status = FAILURE;
- break;
- }
+ pUsFile = (u16 *) ((long)pFileStart + pFileHdr5->commands_offset);
+ pUcFile = (u8 *) ((long)pFileStart + pFileHdr5->commands_offset);
uiState = STATE_DONE_DWNLD;
break;
case REQUEST_CODE_SEGMENT:
@@ -558,45 +482,24 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
Status = FAILURE;
break;
}
- if (file_version == 5) {
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- ft1000_write_reg(dev,
- FT1000_REG_DPRAM_ADDR,
- DWNLD_PS_HDR_LOC);
-
- for (; word_length > 0; word_length--) { /* In words */
- //temp = *pUsFile;
- //temp = RtlUshortByteSwap(temp);
- ft1000_write_reg(dev,
- FT1000_REG_DPRAM_DATA,
- *pUsFile);
- pUsFile++;
- pUcFile += 2;
- DspWordCnt++;
- }
- } else {
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- outw(DWNLD_MAG_PS_HDR_LOC,
+ /*
+ * Position ASIC DPRAM auto-increment pointer.
+ */
+ outw(DWNLD_MAG_PS_HDR_LOC,
+ dev->base_addr +
+ FT1000_REG_DPRAM_ADDR);
+ if (word_length & 0x01)
+ word_length++;
+ word_length = word_length / 2;
+
+ for (; word_length > 0; word_length--) { /* In words */
+ templong = *pUsFile++;
+ templong |=
+ (*pUsFile++ << 16);
+ pUcFile += 4;
+ outl(templong,
dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
- if (word_length & 0x01) {
- word_length++;
- }
- word_length = word_length / 2;
-
- for (; word_length > 0; word_length--) { /* In words */
- templong = *pUsFile++;
- templong |=
- (*pUsFile++ << 16);
- pUcFile += 4;
- outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
- }
+ FT1000_REG_MAG_DPDATAL);
}
break;
@@ -606,9 +509,9 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
(long)(info->DSPInfoBlklen + 1) / 2;
put_request_value(dev, word_length);
pMailBoxData =
- (PDRVMSG) & info->DSPInfoBlk[0];
+ (struct drv_msg *) & info->DSPInfoBlk[0];
pUsData =
- (USHORT *) & pMailBoxData->data[0];
+ (u16 *) & pMailBoxData->data[0];
// Provide mutual exclusive access while reading ASIC registers.
spin_lock_irqsave(&info->dpram_lock,
flags);
@@ -658,51 +561,32 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
pFileHdr5->version_data_size;
put_request_value(dev, word_length);
pUsFile =
- (USHORT *) ((long)pFileStart +
+ (u16 *) ((long)pFileStart +
pFileHdr5->
version_data_offset);
// Provide mutual exclusive access while reading ASIC registers.
spin_lock_irqsave(&info->dpram_lock,
flags);
- if (file_version == 5) {
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- ft1000_write_reg(dev,
- FT1000_REG_DPRAM_ADDR,
- DWNLD_PS_HDR_LOC);
-
- for (; word_length > 0; word_length--) { /* In words */
- ft1000_write_reg(dev,
- FT1000_REG_DPRAM_DATA,
- *pUsFile
- /*temp */
- );
- pUsFile++;
- }
- } else {
- /*
- * Position ASIC DPRAM auto-increment pointer.
- */
- outw(DWNLD_MAG_PS_HDR_LOC,
+ /*
+ * Position ASIC DPRAM auto-increment pointer.
+ */
+ outw(DWNLD_MAG_PS_HDR_LOC,
+ dev->base_addr +
+ FT1000_REG_DPRAM_ADDR);
+ if (word_length & 0x01)
+ word_length++;
+ word_length = word_length / 2;
+
+ for (; word_length > 0; word_length--) { /* In words */
+ templong =
+ ntohs(*pUsFile++);
+ temp =
+ ntohs(*pUsFile++);
+ templong |=
+ (temp << 16);
+ outl(templong,
dev->base_addr +
- FT1000_REG_DPRAM_ADDR);
- if (word_length & 0x01) {
- word_length++;
- }
- word_length = word_length / 2;
-
- for (; word_length > 0; word_length--) { /* In words */
- templong =
- ntohs(*pUsFile++);
- temp =
- ntohs(*pUsFile++);
- templong |=
- (temp << 16);
- outl(templong,
- dev->base_addr +
- FT1000_REG_MAG_DPDATAL);
- }
+ FT1000_REG_MAG_DPDATAL);
}
spin_unlock_irqrestore(&info->
dpram_lock,
@@ -710,120 +594,71 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
break;
case REQUEST_CODE_BY_VERSION:
- bGoodVersion = FALSE;
+ bGoodVersion = false;
requested_version =
get_request_value(dev);
- if (file_version == 5) {
- pDspImageInfo =
- (PDSP_IMAGE_INFO) ((long)
- pFileStart
- +
- sizeof
- (DSP_FILE_HDR_5));
- for (imageN = 0;
- imageN <
- pFileHdr5->nDspImages;
- imageN++) {
- if (pDspImageInfo->
- version ==
- requested_version) {
- bGoodVersion =
- TRUE;
- pUsFile =
- (USHORT
- *) ((long)
- pFileStart
- +
- pDspImageInfo->
- begin_offset);
- pUcFile =
- (UCHAR
- *) ((long)
- pFileStart
- +
- pDspImageInfo->
- begin_offset);
- pCodeEnd =
- (UCHAR
- *) ((long)
- pFileStart
- +
- pDspImageInfo->
- end_offset);
- run_address =
- pDspImageInfo->
- run_address;
- run_size =
- pDspImageInfo->
- image_size;
- break;
- }
- pDspImageInfo++;
- }
- } else {
- pDspImageInfoV6 =
- (PDSP_IMAGE_INFO_V6) ((long)
- pFileStart
- +
- sizeof
- (DSP_FILE_HDR_5));
- for (imageN = 0;
- imageN <
- pFileHdr5->nDspImages;
- imageN++) {
- temp = (USHORT)
- (pDspImageInfoV6->
- version);
- templong = temp;
- temp = (USHORT)
- (pDspImageInfoV6->
- version >> 16);
- templong |=
- (temp << 16);
- if (templong ==
- requested_version) {
- bGoodVersion =
- TRUE;
- pUsFile =
- (USHORT
- *) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- begin_offset);
- pUcFile =
- (UCHAR
- *) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- begin_offset);
- pCodeEnd =
- (UCHAR
- *) ((long)
- pFileStart
- +
- pDspImageInfoV6->
- end_offset);
- run_address =
- pDspImageInfoV6->
- run_address;
- run_size =
- pDspImageInfoV6->
- image_size;
- image_chksum =
- (ULONG)
- pDspImageInfoV6->
- checksum;
- DEBUG(0,
- "ft1000_dnld: image_chksum = 0x%8x\n",
- (unsigned
- int)
- image_chksum);
- break;
- }
- pDspImageInfoV6++;
+ pDspImageInfoV6 =
+ (struct dsp_image_info *) ((long)
+ pFileStart
+ +
+ sizeof
+ (struct dsp_file_hdr));
+ for (imageN = 0;
+ imageN <
+ pFileHdr5->nDspImages;
+ imageN++) {
+ temp = (u16)
+ (pDspImageInfoV6->
+ version);
+ templong = temp;
+ temp = (u16)
+ (pDspImageInfoV6->
+ version >> 16);
+ templong |=
+ (temp << 16);
+ if (templong ==
+ requested_version) {
+ bGoodVersion =
+ true;
+ pUsFile =
+ (u16
+ *) ((long)
+ pFileStart
+ +
+ pDspImageInfoV6->
+ begin_offset);
+ pUcFile =
+ (u8
+ *) ((long)
+ pFileStart
+ +
+ pDspImageInfoV6->
+ begin_offset);
+ pCodeEnd =
+ (u8
+ *) ((long)
+ pFileStart
+ +
+ pDspImageInfoV6->
+ end_offset);
+ run_address =
+ pDspImageInfoV6->
+ run_address;
+ run_size =
+ pDspImageInfoV6->
+ image_size;
+ image_chksum =
+ (u32)
+ pDspImageInfoV6->
+ checksum;
+ DEBUG(0,
+ "ft1000_dnld: image_chksum = 0x%8x\n",
+ (unsigned
+ int)
+ image_chksum);
+ break;
}
+ pDspImageInfoV6++;
}
if (!bGoodVersion) {
/*
@@ -852,7 +687,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
break;
}
- pHdr = (PPSEUDO_HDR) pUsFile;
+ pHdr = (struct pseudo_hdr *) pUsFile;
if (pHdr->portdest == 0x80 /* DspOAM */
&& (pHdr->portsrc == 0x00 /* Driver */
@@ -872,7 +707,7 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
case STATE_SECTION_PROV:
- pHdr = (PPSEUDO_HDR) pUcFile;
+ pHdr = (struct pseudo_hdr *) pUcFile;
if (pHdr->checksum == hdr_checksum(pHdr)) {
if (pHdr->portdest != 0x80 /* Dsp OAM */ ) {
@@ -883,15 +718,15 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
// Get buffer for provisioning data
pbuffer =
- kmalloc((usHdrLength + sizeof(PSEUDO_HDR)),
+ kmalloc((usHdrLength + sizeof(struct pseudo_hdr)),
GFP_ATOMIC);
if (pbuffer) {
memcpy(pbuffer, (void *)pUcFile,
- (UINT) (usHdrLength +
- sizeof(PSEUDO_HDR)));
+ (u32) (usHdrLength +
+ sizeof(struct pseudo_hdr)));
// link provisioning data
pprov_record =
- kmalloc(sizeof(PROV_RECORD),
+ kmalloc(sizeof(struct prov_record),
GFP_ATOMIC);
if (pprov_record) {
pprov_record->pprov_data =
@@ -901,8 +736,8 @@ int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength)
&info->prov_list);
// Move to next entry if available
pUcFile =
- (UCHAR *) ((unsigned long) pUcFile +
- (unsigned long) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(PSEUDO_HDR));
+ (u8 *) ((unsigned long) pUcFile +
+ (unsigned long) ((usHdrLength + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr));
if ((unsigned long) (pUcFile) -
(unsigned long) (pFileStart) >=
(unsigned long) FileLength) {
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index 830822f86e4..990b2afb3d6 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -54,10 +54,9 @@
#endif
#include <linux/delay.h>
-#include "ft1000_dev.h"
#include "ft1000.h"
-int card_download(struct net_device *dev, const u8 *pFileStart, UINT FileLength);
+int card_download(struct net_device *dev, const u8 *pFileStart, u32 FileLength);
void ft1000InitProc(struct net_device *dev);
void ft1000CleanupProc(struct net_device *dev);
@@ -89,40 +88,6 @@ MODULE_SUPPORTED_DEVICE("FT1000");
//---------------------------------------------------------------------------
//
-// Function: ft1000_asic_read
-// Description: This function will retrieve the value of a specific ASIC
-// register.
-// Input:
-// dev - network device structure
-// offset - ASIC register to read
-// Output:
-// value - value of ASIC register
-//
-//---------------------------------------------------------------------------
-inline u16 ft1000_asic_read(struct net_device *dev, u16 offset)
-{
- return (ft1000_read_reg(dev, offset));
-}
-
-//---------------------------------------------------------------------------
-//
-// Function: ft1000_asic_write
-// Description: This function will set the value of a specific ASIC
-// register.
-// Input:
-// dev - network device structure
-// value - value to set ASIC register
-// Output:
-// none
-//
-//---------------------------------------------------------------------------
-inline void ft1000_asic_write(struct net_device *dev, u16 offset, u16 value)
-{
- ft1000_write_reg(dev, offset, value);
-}
-
-//---------------------------------------------------------------------------
-//
// Function: ft1000_read_fifo_len
// Description: This function will read the ASIC Uplink FIFO status register
// which will return the number of bytes remaining in the Uplink FIFO.
@@ -136,7 +101,7 @@ inline void ft1000_asic_write(struct net_device *dev, u16 offset, u16 value)
//---------------------------------------------------------------------------
static inline u16 ft1000_read_fifo_len(struct net_device *dev)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
if (info->AsicID == ELECTRABUZZ_ID) {
return (ft1000_read_reg(dev, FT1000_REG_UFIFO_STAT) - 16);
@@ -159,7 +124,7 @@ static inline u16 ft1000_read_fifo_len(struct net_device *dev)
//---------------------------------------------------------------------------
u16 ft1000_read_dpram(struct net_device * dev, int offset)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
u16 data;
@@ -188,7 +153,7 @@ u16 ft1000_read_dpram(struct net_device * dev, int offset)
static inline void ft1000_write_dpram(struct net_device *dev,
int offset, u16 value)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
// Provide mutual exclusive access while reading ASIC registers.
@@ -212,7 +177,7 @@ static inline void ft1000_write_dpram(struct net_device *dev,
//---------------------------------------------------------------------------
u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
u16 data;
@@ -246,7 +211,7 @@ u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index)
static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
int offset, u16 value, int Index)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
// Provide mutual exclusive access while reading ASIC registers.
@@ -274,7 +239,7 @@ static inline void ft1000_write_dpram_mag_16(struct net_device *dev,
//---------------------------------------------------------------------------
u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
u32 data;
@@ -302,7 +267,7 @@ u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset)
//---------------------------------------------------------------------------
void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
unsigned long flags;
// Provide mutual exclusive access while reading ASIC registers.
@@ -324,17 +289,14 @@ void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value)
//---------------------------------------------------------------------------
static void ft1000_enable_interrupts(struct net_device *dev)
{
- FT1000_INFO *info = netdev_priv(dev);
u16 tempword;
DEBUG(1, "ft1000_hw:ft1000_enable_interrupts()\n");
- ft1000_write_reg(dev, FT1000_REG_SUP_IMASK,
- info->CurrentInterruptEnableMask);
+ ft1000_write_reg(dev, FT1000_REG_SUP_IMASK, ISR_DEFAULT_MASK);
tempword = ft1000_read_reg(dev, FT1000_REG_SUP_IMASK);
DEBUG(1,
"ft1000_hw:ft1000_enable_interrupts:current interrupt enable mask = 0x%x\n",
tempword);
- info->InterruptsEnabled = TRUE;
}
//---------------------------------------------------------------------------
@@ -349,7 +311,6 @@ static void ft1000_enable_interrupts(struct net_device *dev)
//---------------------------------------------------------------------------
static void ft1000_disable_interrupts(struct net_device *dev)
{
- FT1000_INFO *info = netdev_priv(dev);
u16 tempword;
DEBUG(1, "ft1000_hw: ft1000_disable_interrupts()\n");
@@ -358,7 +319,6 @@ static void ft1000_disable_interrupts(struct net_device *dev)
DEBUG(1,
"ft1000_hw:ft1000_disable_interrupts:current interrupt enable mask = 0x%x\n",
tempword);
- info->InterruptsEnabled = FALSE;
}
//---------------------------------------------------------------------------
@@ -374,13 +334,12 @@ static void ft1000_disable_interrupts(struct net_device *dev)
//---------------------------------------------------------------------------
static void ft1000_reset_asic(struct net_device *dev)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
u16 tempword;
DEBUG(1, "ft1000_hw:ft1000_reset_asic called\n");
(*info->ft1000_reset) (info->link);
- info->ASICResetNum++;
// Let's use the register provided by the Magnemite ASIC to reset the
// ASIC and DSP.
@@ -412,17 +371,17 @@ static void ft1000_reset_asic(struct net_device *dev)
// Input:
// dev - device structure
// Output:
-// status - FALSE (card reset fail)
-// TRUE (card reset successful)
+// status - false (card reset fail)
+// true (card reset successful)
//
//---------------------------------------------------------------------------
static int ft1000_reset_card(struct net_device *dev)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
u16 tempword;
int i;
unsigned long flags;
- PPROV_RECORD ptr;
+ struct prov_record *ptr;
DEBUG(1, "ft1000_hw:ft1000_reset_card called.....\n");
@@ -437,7 +396,7 @@ static int ft1000_reset_card(struct net_device *dev)
while (list_empty(&info->prov_list) == 0) {
DEBUG(0,
"ft1000_hw:ft1000_reset_card:deleting provisioning record\n");
- ptr = list_entry(info->prov_list.next, PROV_RECORD, list);
+ ptr = list_entry(info->prov_list.next, struct prov_record, list);
list_del(&ptr->list);
kfree(ptr->pprov_data);
kfree(ptr);
@@ -457,14 +416,12 @@ static int ft1000_reset_card(struct net_device *dev)
if (ft1000_card_present == 1) {
spin_lock_irqsave(&info->dpram_lock, flags);
if (info->AsicID == ELECTRABUZZ_ID) {
- if (info->DspHibernateFlag == 0) {
- ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
- FT1000_DPRAM_RX_BASE);
- for (i = 0; i < MAX_DSP_SESS_REC; i++) {
- info->DSPSess.Rec[i] =
- ft1000_read_reg(dev,
- FT1000_REG_DPRAM_DATA);
- }
+ ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
+ FT1000_DPRAM_RX_BASE);
+ for (i = 0; i < MAX_DSP_SESS_REC; i++) {
+ info->DSPSess.Rec[i] =
+ ft1000_read_reg(dev,
+ FT1000_REG_DPRAM_DATA);
}
} else {
ft1000_write_reg(dev, FT1000_REG_DPRAM_ADDR,
@@ -482,8 +439,6 @@ static int ft1000_reset_card(struct net_device *dev)
//reset ASIC
ft1000_reset_asic(dev);
- info->DSPResetNum++;
-
DEBUG(1, "ft1000_hw:ft1000_reset_card:downloading dsp image\n");
if (info->AsicID == MAGNEMITE_ID) {
@@ -517,7 +472,7 @@ static int ft1000_reset_card(struct net_device *dev)
if (i == 50) {
DEBUG(0,
"ft1000_hw:ft1000_reset_card:No FEFE detected from DSP\n");
- return FALSE;
+ return false;
}
} else {
@@ -528,7 +483,7 @@ static int ft1000_reset_card(struct net_device *dev)
if (card_download(dev, fw_entry->data, fw_entry->size)) {
DEBUG(1, "card download unsuccessful\n");
- return FALSE;
+ return false;
} else {
DEBUG(1, "card download successful\n");
}
@@ -564,7 +519,7 @@ static int ft1000_reset_card(struct net_device *dev)
// poll_timer.data = (u_long)dev;
// add_timer(&poll_timer);
- return TRUE;
+ return true;
}
@@ -576,8 +531,8 @@ static int ft1000_reset_card(struct net_device *dev)
// Input:
// dev - device structure
// Output:
-// status - FALSE (device is not present)
-// TRUE (device is present)
+// status - false (device is not present)
+// true (device is present)
//
//---------------------------------------------------------------------------
static int ft1000_chkcard(struct net_device *dev)
@@ -590,7 +545,7 @@ static int ft1000_chkcard(struct net_device *dev)
if (tempword == 0) {
DEBUG(1,
"ft1000_hw:ft1000_chkcard: IMASK = 0 Card not detected\n");
- return FALSE;
+ return false;
}
// The system will return the value of 0xffff for the version register
// if the device is not present.
@@ -598,9 +553,9 @@ static int ft1000_chkcard(struct net_device *dev)
if (tempword == 0xffff) {
DEBUG(1,
"ft1000_hw:ft1000_chkcard: Version = 0xffff Card not detected\n");
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
@@ -619,8 +574,8 @@ static void ft1000_hbchk(u_long data)
{
struct net_device *dev = (struct net_device *)data;
- FT1000_INFO *info;
- USHORT tempword;
+ struct ft1000_info *info;
+ u16 tempword;
info = netdev_priv(dev);
@@ -835,12 +790,12 @@ static void ft1000_hbchk(u_long data)
//---------------------------------------------------------------------------
void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qtype)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
int i;
u16 tempword;
unsigned long flags;
- size += PSEUDOSZ;
+ size += sizeof(struct pseudo_hdr);
// check for odd byte and increment to 16-bit word align value
if ((size & 0x0001)) {
size++;
@@ -918,9 +873,9 @@ void ft1000_send_cmd (struct net_device *dev, u16 *ptempbuffer, int size, u16 qt
// = 1 (successful)
//
//---------------------------------------------------------------------------
-BOOLEAN ft1000_receive_cmd(struct net_device *dev, u16 * pbuffer, int maxsz, u16 *pnxtph)
+bool ft1000_receive_cmd(struct net_device *dev, u16 * pbuffer, int maxsz, u16 *pnxtph)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
u16 size;
u16 *ppseudohdr;
int i;
@@ -928,18 +883,18 @@ BOOLEAN ft1000_receive_cmd(struct net_device *dev, u16 * pbuffer, int maxsz, u16
unsigned long flags;
if (info->AsicID == ELECTRABUZZ_ID) {
- size = ( ft1000_read_dpram(dev, *pnxtph) ) + PSEUDOSZ;
+ size = ( ft1000_read_dpram(dev, *pnxtph) ) + sizeof(struct pseudo_hdr);
} else {
size =
ntohs(ft1000_read_dpram_mag_16
(dev, FT1000_MAG_PH_LEN,
- FT1000_MAG_PH_LEN_INDX)) + PSEUDOSZ;
+ FT1000_MAG_PH_LEN_INDX)) + sizeof(struct pseudo_hdr);
}
if (size > maxsz) {
DEBUG(1,
"FT1000:ft1000_receive_cmd:Invalid command length = %d\n",
size);
- return FALSE;
+ return false;
} else {
ppseudohdr = (u16 *) pbuffer;
spin_lock_irqsave(&info->dpram_lock, flags);
@@ -994,9 +949,9 @@ BOOLEAN ft1000_receive_cmd(struct net_device *dev, u16 * pbuffer, int maxsz, u16
DEBUG(1,
"FT1000:ft1000_receive_cmd:Pseudo header checksum mismatch\n");
// Drop this message
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
}
@@ -1013,17 +968,17 @@ BOOLEAN ft1000_receive_cmd(struct net_device *dev, u16 * pbuffer, int maxsz, u16
//---------------------------------------------------------------------------
void ft1000_proc_drvmsg(struct net_device *dev)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
u16 msgtype;
u16 tempword;
- PMEDIAMSG pmediamsg;
- PDSPINITMSG pdspinitmsg;
- PDRVMSG pdrvmsg;
+ struct media_msg *pmediamsg;
+ struct dsp_init_msg *pdspinitmsg;
+ struct drv_msg *pdrvmsg;
u16 len;
u16 i;
- PPROV_RECORD ptr;
- PPSEUDO_HDR ppseudo_hdr;
- PUSHORT pmsg;
+ struct prov_record *ptr;
+ struct pseudo_hdr *ppseudo_hdr;
+ u16 *pmsg;
struct timeval tv;
union {
u8 byte[2];
@@ -1039,7 +994,7 @@ void ft1000_proc_drvmsg(struct net_device *dev)
if ( ft1000_receive_cmd(dev, &cmdbuffer[0], MAX_CMD_SQSIZE, &tempword) ) {
// Get the message type which is total_len + PSEUDO header + msgtype + message body
- pdrvmsg = (PDRVMSG) & cmdbuffer[0];
+ pdrvmsg = (struct drv_msg *) & cmdbuffer[0];
msgtype = ntohs(pdrvmsg->type);
DEBUG(1, "Command message type = 0x%x\n", msgtype);
switch (msgtype) {
@@ -1062,12 +1017,12 @@ void ft1000_proc_drvmsg(struct net_device *dev)
}
ptr =
list_entry(info->prov_list.next,
- PROV_RECORD, list);
+ struct prov_record, list);
len = *(u16 *) ptr->pprov_data;
len = htons(len);
- pmsg = (PUSHORT) ptr->pprov_data;
- ppseudo_hdr = (PPSEUDO_HDR) pmsg;
+ pmsg = (u16 *) ptr->pprov_data;
+ ppseudo_hdr = (struct pseudo_hdr *) pmsg;
// Insert slow queue sequence number
ppseudo_hdr->seq_num = info->squeseqnum++;
ppseudo_hdr->portsrc = 0;
@@ -1091,7 +1046,7 @@ void ft1000_proc_drvmsg(struct net_device *dev)
info->CardReady = 1;
break;
case MEDIA_STATE:
- pmediamsg = (PMEDIAMSG) & cmdbuffer[0];
+ pmediamsg = (struct media_msg *) & cmdbuffer[0];
if (info->ProgConStat != 0xFF) {
if (pmediamsg->state) {
DEBUG(1, "Media is up\n");
@@ -1123,7 +1078,7 @@ void ft1000_proc_drvmsg(struct net_device *dev)
}
break;
case DSP_INIT_MSG:
- pdspinitmsg = (PDSPINITMSG) & cmdbuffer[0];
+ pdspinitmsg = (struct dsp_init_msg *) & cmdbuffer[0];
memcpy(info->DspVer, pdspinitmsg->DspVer, DSPVERSZ);
DEBUG(1, "DSPVER = 0x%2x 0x%2x 0x%2x 0x%2x\n",
info->DspVer[0], info->DspVer[1], info->DspVer[2],
@@ -1140,7 +1095,7 @@ void ft1000_proc_drvmsg(struct net_device *dev)
dev->dev_addr[5] = info->eui64[7];
if (ntohs(pdspinitmsg->length) ==
- (sizeof(DSPINITMSG) - 20)) {
+ (sizeof(struct dsp_init_msg) - 20)) {
memcpy(info->ProductMode,
pdspinitmsg->ProductMode, MODESZ);
memcpy(info->RfCalVer, pdspinitmsg->RfCalVer,
@@ -1157,7 +1112,7 @@ void ft1000_proc_drvmsg(struct net_device *dev)
tempword = ntohs(pdrvmsg->length);
info->DSPInfoBlklen = tempword;
if (tempword < (MAX_DSP_SESS_REC - 4)) {
- pmsg = (PUSHORT) & pdrvmsg->data[0];
+ pmsg = (u16 *) & pdrvmsg->data[0];
for (i = 0; i < ((tempword + 1) / 2); i++) {
DEBUG(1,
"FT1000:drivermsg:dsp info data = 0x%x\n",
@@ -1169,7 +1124,6 @@ void ft1000_proc_drvmsg(struct net_device *dev)
case DSP_GET_INFO:
DEBUG(1, "FT1000:drivermsg:Got DSP_GET_INFO\n");
// copy dsp info block to dsp
- info->DrvMsgPend = 1;
// allow any outstanding ioctl to finish
mdelay(10);
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
@@ -1185,8 +1139,8 @@ void ft1000_proc_drvmsg(struct net_device *dev)
if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
// Put message into Slow Queue
// Form Pseudo header
- pmsg = (PUSHORT) info->DSPInfoBlk;
- ppseudo_hdr = (PPSEUDO_HDR) pmsg;
+ pmsg = (u16 *) info->DSPInfoBlk;
+ ppseudo_hdr = (struct pseudo_hdr *) pmsg;
ppseudo_hdr->length =
htons(info->DSPInfoBlklen + 4);
ppseudo_hdr->source = 0x10;
@@ -1210,15 +1164,13 @@ void ft1000_proc_drvmsg(struct net_device *dev)
info->DSPInfoBlk[8] = 0x7200;
info->DSPInfoBlk[9] =
htons(info->DSPInfoBlklen);
- ft1000_send_cmd (dev, (PUSHORT)info->DSPInfoBlk, (USHORT)(info->DSPInfoBlklen+4), 0);
+ ft1000_send_cmd (dev, (u16 *)info->DSPInfoBlk, (u16)(info->DSPInfoBlklen+4), 0);
}
- info->DrvMsgPend = 0;
break;
case GET_DRV_ERR_RPT_MSG:
DEBUG(1, "FT1000:drivermsg:Got GET_DRV_ERR_RPT_MSG\n");
// copy driver error message to dsp
- info->DrvMsgPend = 1;
// allow any outstanding ioctl to finish
mdelay(10);
tempword = ft1000_read_reg(dev, FT1000_REG_DOORBELL);
@@ -1234,8 +1186,8 @@ void ft1000_proc_drvmsg(struct net_device *dev)
if ((tempword & FT1000_DB_DPRAM_TX) == 0) {
// Put message into Slow Queue
// Form Pseudo header
- pmsg = (PUSHORT) & tempbuffer[0];
- ppseudo_hdr = (PPSEUDO_HDR) pmsg;
+ pmsg = (u16 *) & tempbuffer[0];
+ ppseudo_hdr = (struct pseudo_hdr *) pmsg;
ppseudo_hdr->length = htons(0x0012);
ppseudo_hdr->source = 0x10;
ppseudo_hdr->destination = 0x20;
@@ -1255,7 +1207,7 @@ void ft1000_proc_drvmsg(struct net_device *dev)
for (i=1; i<7; i++) {
ppseudo_hdr->checksum ^= *pmsg++;
}
- pmsg = (PUSHORT) & tempbuffer[16];
+ pmsg = (u16 *) & tempbuffer[16];
*pmsg++ = htons(RSP_DRV_ERR_RPT_MSG);
*pmsg++ = htons(0x000e);
*pmsg++ = htons(info->DSP_TIME[0]);
@@ -1270,10 +1222,9 @@ void ft1000_proc_drvmsg(struct net_device *dev)
*pmsg++ = convert.wrd;
*pmsg++ = htons(info->DrvErrNum);
- ft1000_send_cmd (dev, (PUSHORT)&tempbuffer[0], (USHORT)(0x0012), 0);
+ ft1000_send_cmd (dev, (u16 *)&tempbuffer[0], (u16)(0x0012), 0);
info->DrvErrNum = 0;
}
- info->DrvMsgPend = 0;
break;
default:
@@ -1296,7 +1247,7 @@ void ft1000_proc_drvmsg(struct net_device *dev)
//---------------------------------------------------------------------------
int ft1000_parse_dpram_msg(struct net_device *dev)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
u16 doorbell;
u16 portid;
u16 nxtph;
@@ -1340,13 +1291,11 @@ int ft1000_parse_dpram_msg(struct net_device *dev)
ft1000_write_reg(dev, FT1000_REG_SUP_CTRL,
HOST_INTF_BE);
}
- info->DspAsicReset = 0;
}
if (doorbell & FT1000_DSP_ASIC_RESET) {
DEBUG(0,
"FT1000:ft1000_parse_dpram_msg: Got a dsp ASIC reset message\n");
- info->DspAsicReset = 1;
ft1000_write_reg(dev, FT1000_REG_DOORBELL,
FT1000_DSP_ASIC_RESET);
udelay(200);
@@ -1368,7 +1317,7 @@ int ft1000_parse_dpram_msg(struct net_device *dev)
}
DEBUG(1, "FT1000:ft1000_parse_dpram_msg:total length = %d\n",
total_len);
- if ((total_len < MAX_CMD_SQSIZE) && (total_len > PSEUDOSZ)) {
+ if ((total_len < MAX_CMD_SQSIZE) && (total_len > sizeof(struct pseudo_hdr))) {
total_len += nxtph;
cnt = 0;
// ft1000_read_reg will return a value that needs to be byteswap
@@ -1453,7 +1402,7 @@ int ft1000_parse_dpram_msg(struct net_device *dev)
//---------------------------------------------------------------------------
static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
u16 i;
u32 templong;
u16 tempword;
@@ -1600,7 +1549,7 @@ static void ft1000_flush_fifo(struct net_device *dev, u16 DrvErrNum)
int ft1000_copy_up_pkt(struct net_device *dev)
{
u16 tempword;
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
u16 len;
struct sk_buff *skb;
u16 i;
@@ -1715,7 +1664,7 @@ int ft1000_copy_up_pkt(struct net_device *dev)
tempword = ft1000_read_reg(dev, FT1000_REG_DFIFO);
*pbuffer++ = (u8) (tempword >> 8);
*pbuffer++ = (u8) tempword;
- if (ft1000_chkcard(dev) == FALSE) {
+ if (ft1000_chkcard(dev) == false) {
kfree_skb(skb);
return FAILURE;
}
@@ -1787,11 +1736,11 @@ int ft1000_copy_up_pkt(struct net_device *dev)
//---------------------------------------------------------------------------
int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
union {
- PSEUDO_HDR blk;
- u16 buff[sizeof(PSEUDO_HDR) >> 1];
- u8 buffc[sizeof(PSEUDO_HDR)];
+ struct pseudo_hdr blk;
+ u16 buff[sizeof(struct pseudo_hdr) >> 1];
+ u8 buffc[sizeof(struct pseudo_hdr)];
} pseudo;
int i;
u32 *plong;
@@ -1947,7 +1896,7 @@ int ft1000_copy_down_pkt(struct net_device *dev, u16 * packet, u16 len)
static struct net_device_stats *ft1000_stats(struct net_device *dev)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
return (&info->stats);
}
@@ -1971,7 +1920,7 @@ static int ft1000_open(struct net_device *dev)
static int ft1000_close(struct net_device *dev)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
DEBUG(0, "ft1000_hw: ft1000_close()\n");
@@ -1993,7 +1942,7 @@ static int ft1000_close(struct net_device *dev)
static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
u8 *pdata;
DEBUG(1, "ft1000_hw: ft1000_start_xmit()\n");
@@ -2030,7 +1979,7 @@ static int ft1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- FT1000_INFO *info = netdev_priv(dev);
+ struct ft1000_info *info = netdev_priv(dev);
u16 tempword;
u16 inttype;
int cnt;
@@ -2042,7 +1991,7 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
- if (ft1000_chkcard(dev) == FALSE) {
+ if (ft1000_chkcard(dev) == false) {
ft1000_disable_interrupts(dev);
return IRQ_HANDLED;
}
@@ -2095,8 +2044,8 @@ static irqreturn_t ft1000_interrupt(int irq, void *dev_id)
void stop_ft1000_card(struct net_device *dev)
{
- FT1000_INFO *info = netdev_priv(dev);
- PPROV_RECORD ptr;
+ struct ft1000_info *info = netdev_priv(dev);
+ struct prov_record *ptr;
// int cnt;
DEBUG(0, "ft1000_hw: stop_ft1000_card()\n");
@@ -2108,7 +2057,7 @@ void stop_ft1000_card(struct net_device *dev)
// Make sure we free any memory reserve for provisioning
while (list_empty(&info->prov_list) == 0) {
- ptr = list_entry(info->prov_list.next, PROV_RECORD, list);
+ ptr = list_entry(info->prov_list.next, struct prov_record, list);
list_del(&ptr->list);
kfree(ptr->pprov_data);
kfree(ptr);
@@ -2130,7 +2079,7 @@ void stop_ft1000_card(struct net_device *dev)
static void ft1000_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- FT1000_INFO *ft_info;
+ struct ft1000_info *ft_info;
ft_info = netdev_priv(dev);
snprintf(info->driver, 32, "ft1000");
@@ -2142,7 +2091,7 @@ static void ft1000_get_drvinfo(struct net_device *dev,
static u32 ft1000_get_link(struct net_device *dev)
{
- FT1000_INFO *info;
+ struct ft1000_info *info;
info = netdev_priv(dev);
return info->mediastate;
}
@@ -2155,7 +2104,7 @@ static const struct ethtool_ops ops = {
struct net_device *init_ft1000_card(struct pcmcia_device *link,
void *ft1000_reset)
{
- FT1000_INFO *info;
+ struct ft1000_info *info;
struct net_device *dev;
static const struct net_device_ops ft1000ops = // Slavius 21.10.2009 due to kernel changes
@@ -2180,7 +2129,7 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
return NULL;
}
- dev = alloc_etherdev(sizeof(FT1000_INFO));
+ dev = alloc_etherdev(sizeof(struct ft1000_info));
if (!dev) {
printk(KERN_ERR "ft1000: failed to allocate etherdev\n");
return NULL;
@@ -2189,7 +2138,7 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
SET_NETDEV_DEV(dev, &link->dev);
info = netdev_priv(dev);
- memset(info, 0, sizeof(FT1000_INFO));
+ memset(info, 0, sizeof(struct ft1000_info));
DEBUG(1, "address of dev = 0x%8x\n", (u32) dev);
DEBUG(1, "address of dev info = 0x%8x\n", (u32) info);
@@ -2199,16 +2148,11 @@ struct net_device *init_ft1000_card(struct pcmcia_device *link,
spin_lock_init(&info->dpram_lock);
info->DrvErrNum = 0;
- info->ASICResetNum = 0;
info->registered = 1;
info->link = link;
info->ft1000_reset = ft1000_reset;
info->mediastate = 0;
info->fifo_cnt = 0;
- info->DeviceCreated = FALSE;
- info->DeviceMajor = 0;
- info->CurrentInterruptEnableMask = ISR_DEFAULT_MASK;
- info->InterruptsEnabled = FALSE;
info->CardReady = 0;
info->DSP_TIME[0] = 0;
info->DSP_TIME[1] = 0;
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
index bdfb1aec58d..9e728b3415e 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
@@ -22,6 +22,7 @@
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/netdevice.h>
+#include <asm/io.h>
#include <asm/uaccess.h>
#include "ft1000.h"
@@ -53,7 +54,7 @@ int ft1000ReadProc(char *page, char **start, off_t off,
struct net_device *dev;
int len;
int i;
- FT1000_INFO *info;
+ struct ft1000_info *info;
char *status[] =
{ "Idle (Disconnect)", "Searching", "Active (Connected)",
"Waiting for L2", "Sleep", "No Coverage", "", ""
@@ -75,16 +76,14 @@ int ft1000ReadProc(char *page, char **start, off_t off,
/* Wrap-around */
if (info->AsicID == ELECTRABUZZ_ID) {
- if (info->DspHibernateFlag == 0) {
- if (info->ProgConStat != 0xFF) {
- info->LedStat =
- ft1000_read_dpram(dev, FT1000_DSP_LED);
- info->ConStat =
- ft1000_read_dpram(dev,
- FT1000_DSP_CON_STATE);
- } else {
- info->ConStat = 0xf;
- }
+ if (info->ProgConStat != 0xFF) {
+ info->LedStat =
+ ft1000_read_dpram(dev, FT1000_DSP_LED);
+ info->ConStat =
+ ft1000_read_dpram(dev,
+ FT1000_DSP_CON_STATE);
+ } else {
+ info->ConStat = 0xf;
}
} else {
if (info->ProgConStat != 0xFF) {
@@ -172,7 +171,7 @@ static int ft1000NotifyProc(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = ptr;
- FT1000_INFO *info;
+ struct ft1000_info *info;
info = netdev_priv(dev);
@@ -193,7 +192,7 @@ static struct notifier_block ft1000_netdev_notifier = {
void ft1000InitProc(struct net_device *dev)
{
- FT1000_INFO *info;
+ struct ft1000_info *info;
info = netdev_priv(dev);
@@ -206,7 +205,7 @@ void ft1000InitProc(struct net_device *dev)
void ft1000CleanupProc(struct net_device *dev)
{
- FT1000_INFO *info;
+ struct ft1000_info *info;
info = netdev_priv(dev);
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index b0a4211f43a..3f303ea1433 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -671,7 +671,6 @@ static int ft1000_reset_card(struct net_device *dev)
return TRUE;
}
-#ifdef HAVE_NET_DEVICE_OPS
static const struct net_device_ops ftnet_ops =
{
.ndo_open = &ft1000_open,
@@ -679,7 +678,6 @@ static const struct net_device_ops ftnet_ops =
.ndo_start_xmit = &ft1000_start_xmit,
.ndo_get_stats = &ft1000_netdev_stats,
};
-#endif
//---------------------------------------------------------------------------
@@ -764,14 +762,7 @@ int init_ft1000_netdev(struct ft1000_device *ft1000dev)
INIT_LIST_HEAD(&pInfo->nodes.list);
-#ifdef HAVE_NET_DEVICE_OPS
netdev->netdev_ops = &ftnet_ops;
-#else
- netdev->hard_start_xmit = &ft1000_start_xmit;
- netdev->get_stats = &ft1000_netdev_stats;
- netdev->open = &ft1000_open;
- netdev->stop = &ft1000_close;
-#endif
ft1000dev->net = netdev;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
index 6a8a1969f9e..3f4207fd159 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_ioctl.h
@@ -26,38 +26,6 @@
#ifndef _FT1000IOCTLH_
#define _FT1000IOCTLH_
-#define DSPVERSZ 4
-#define HWSERNUMSZ 16
-#define SKUSZ 20
-#define EUISZ 8
-#define CALVERSZ 2
-#define CALDATESZ 6
-
-#define MAX_DNLD_BLKSZ 1024
-
-// Standard Flarion Pseudo header
-struct pseudo_hdr {
- unsigned short length; //length of msg body
- unsigned char source; //source address (0x10=Host 0x20=DSP)
- unsigned char destination; //destination address (refer to source address)
- unsigned char portdest; //destination port id
- // 0x00=Driver
- // 0x10=Application Broadcast
- // 0x20=Network Stack
- // 0x80=Dsp OAM
- // 0x90=Dsp Airlink
- // 0xa0=Dsp Loader
- // 0xb0=Dsp MIP
- unsigned char portsrc; //source port id (refer to portdest)
- unsigned short sh_str_id; //stream id (Not applicable on Mobile)
- unsigned char control; //stream id (Not applicable on Mobile)
- unsigned char rsvd1; //reserved
- unsigned char seq_num; //sequence number
- unsigned char rsvd2; //reserved
- unsigned short qos_class; //Quality of Service class (Not applicable on Mobile)
- unsigned short checksum; //Pseudo header checksum
-} __attribute__ ((packed));
-
typedef struct _IOCTL_GET_VER
{
unsigned long drv_ver;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
index 0b30020c754..51c084756b4 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
@@ -2,81 +2,18 @@
#define _FT1000_USB_H_
/*Jim*/
+#include "../ft1000.h"
#include "ft1000_ioctl.h"
#define FT1000_DRV_VER 0x01010403
-#define MODESZ 2
#define MAX_NUM_APP 6
#define MAX_MSG_LIMIT 200
#define NUM_OF_FREE_BUFFERS 1500
-// Driver message types
-#define MEDIA_STATE 0x0010
-#define DSP_PROVISION 0x0030
-#define DSP_INIT_MSG 0x0050
-#define DSP_STORE_INFO 0x0070
-#define DSP_GET_INFO 0x0071
-#define GET_DRV_ERR_RPT_MSG 0x0073
-#define RSP_DRV_ERR_RPT_MSG 0x0074
-
-
-// Size of DPRAM Command
-#define MAX_CMD_SQSIZE 1780
-#define SLOWQ_TYPE 0
#define PSEUDOSZ 16
-#define DSP_QID_OFFSET 4
-
-
-// MEMORY MAP FOR ELECTRABUZZ ASIC
-#define FT1000_REG_DFIFO_STAT 0x0008 // Downlink FIFO status register
-#define FT1000_REG_DPRAM_DATA 0x000C // DPRAM VALUE in DPRAM ADDR
-
-#define FT1000_DSP_LED 0xFFA // dsp led status for PAD device
-
-#define FT1000_MAG_DSP_LED 0x3FE // dsp led status for PAD device
-#define FT1000_MAG_DSP_LED_INDX 0x1 // dsp led status for PAD device
#define SUCCESS 0x00
-
-#define DRIVERID 0x00
-
-// Driver Error Messages for DSP
-#define DSP_CONDRESET_INFO 0x7ef2
-#define DSP_HB_INFO 0x7ef0
-
-// Magnemite specific defines
-#define hi_mag 0x6968 // Byte swap hi to avoid additional system call
-#define ho_mag 0x6f68 // Byte swap ho to avoid additional system call
-
-
-
-struct media_msg {
- struct pseudo_hdr pseudo;
- u16 type;
- u16 length;
- u16 state;
- u32 ip_addr;
- u32 net_mask;
- u32 gateway;
- u32 dns_1;
- u32 dns_2;
-} __attribute__ ((packed));
-
-struct dsp_init_msg {
- struct pseudo_hdr pseudo;
- u16 type;
- u16 length;
- u8 DspVer[DSPVERSZ]; // DSP version number
- u8 HwSerNum[HWSERNUMSZ]; // Hardware Serial Number
- u8 Sku[SKUSZ]; // SKU
- u8 eui64[EUISZ]; // EUI64
- u8 ProductMode[MODESZ]; // Product Mode (Market/Production)
- u8 RfCalVer[CALVERSZ]; // Rf Calibration version
- u8 RfCalDate[CALDATESZ]; // Rf Calibration date
-} __attribute__ ((packed));
-
-
struct app_info_block {
u32 nTxMsg; // DPRAM msg sent to DSP with app_id
u32 nRxMsg; // DPRAM msg rcv from dsp with app_id
@@ -90,11 +27,6 @@ struct app_info_block {
struct list_head app_sqlist; // link list of msgs for applicaton on slow queue
} __attribute__((packed));
-struct prov_record {
- struct list_head list;
- u8 *pprov_data;
-};
-
/*end of Jim*/
#define DEBUG(args...) printk(KERN_INFO args)
@@ -108,350 +40,25 @@ struct prov_record {
#define LARGE_TIMEOUT 5000
-#define MAX_DSP_SESS_REC 1024
-
-#define MAX_NUM_CARDS 32
-
-#define DSPVERSZ 4
-#define HWSERNUMSZ 16
-#define SKUSZ 20
-#define EUISZ 8
-#define CALVERSZ 2
-#define CALDATESZ 6
-#define MODESZ 2
-
-#define DSPID 0x20
-#define HOSTID 0x10
-
-#define DSPOAM 0x80
-#define DSPAIRID 0x90
-
-#define DRIVERID 0x00
-#define FMM 0x10
-#define NETWORKID 0x20
-#define AUTOLNCHID 0x30
-#define DSPLPBKID 0x40
-
#define DSPBCMSGID 0x10
-#define ENET_MAX_SIZE 1514
-#define ENET_HEADER_SIZE 14
-
-
-#define CIS_NET_ADDR_OFFSET 0xff0
-
-// MAGNEMITE specific
-
-#define FT1000_REG_MAG_UFDR 0x0000 // Uplink FIFO Data Register.
-
-#define FT1000_REG_MAG_UFDRL 0x0000 // Uplink FIFO Data Register low-word.
-
-#define FT1000_REG_MAG_UFDRH 0x0002 // Uplink FIFO Data Register high-word.
-
-#define FT1000_REG_MAG_UFER 0x0004 // Uplink FIFO End Register
-
-#define FT1000_REG_MAG_UFSR 0x0006 // Uplink FIFO Status Register
-
-#define FT1000_REG_MAG_DFR 0x0008 // Downlink FIFO Register
-
-#define FT1000_REG_MAG_DFRL 0x0008 // Downlink FIFO Register low-word
-
-#define FT1000_REG_MAG_DFRH 0x000a // Downlink FIFO Register high-word
-
-#define FT1000_REG_MAG_DFSR 0x000c // Downlink FIFO Status Register
-
-#define FT1000_REG_MAG_DPDATA 0x0010 // Dual Port RAM Indirect Data Register
-
-#define FT1000_REG_MAG_DPDATAL 0x0010 // Dual Port RAM Indirect Data Register low-word
-
-#define FT1000_REG_MAG_DPDATAH 0x0012 // Dual Port RAM Indirect Data Register high-word
-
-#define FT1000_REG_MAG_WATERMARK 0x002c // Supv. Control Reg. LLC register
-
-#define FT1000_REG_MAG_VERSION 0x0030 // LLC Version LLC register
-
-
-
-// Common
-
-#define FT1000_REG_DPRAM_ADDR 0x000E // DPRAM ADDRESS when card in IO mode
-
-#define FT1000_REG_SUP_CTRL 0x0020 // Supv. Control Reg. LLC register
-
-#define FT1000_REG_SUP_STAT 0x0022 // Supv. Status Reg LLC register
-
-#define FT1000_REG_RESET 0x0024 // Reset Reg LLC register
-
-#define FT1000_REG_SUP_ISR 0x0026 // Supv ISR LLC register
-
-#define FT1000_REG_SUP_IMASK 0x0028 // Supervisor Interrupt Mask LLC register
-
-#define FT1000_REG_DOORBELL 0x002a // Door Bell Reg LLC register
-
-#define FT1000_REG_ASIC_ID 0x002e // ASIC Identification Number
-
- // (Electrabuzz=0 Magnemite=TBD)
-
-
-
-// DSP doorbells
-
-#define FT1000_DB_DPRAM_RX 0x0001 // this value indicates that DSP has
-
- // data for host in DPRAM SlowQ
-
-#define FT1000_DB_DNLD_RX 0x0002 // Downloader handshake doorbell
-
-#define FT1000_ASIC_RESET_REQ 0x0004
-
-#define FT1000_DSP_ASIC_RESET 0x0008
-
-
-
-#define FT1000_DB_COND_RESET 0x0010
-
-
-
-// Host doorbells
-
-#define FT1000_DB_DPRAM_TX 0x0100 // this value indicates that host has
-
- // data for DSP in DPRAM.
-
-#define FT1000_DB_DNLD_TX 0x0200 // Downloader handshake doorbell
-
-#define FT1000_ASIC_RESET_DSP 0x0400
-
-#define FT1000_DB_HB 0x1000 // this value indicates that supervisor
-
-
-
-// Electrabuzz specific DPRAM mapping // has a heartbeat message for DSP.
-
-#define FT1000_DPRAM_BASE 0x1000 // 0x0000 to 0x07FF DPRAM 2Kx16 - R/W from PCMCIA or DSP
-
-#define FT1000_DPRAM_TX_BASE 0x1002 // TX AREA (SlowQ)
-
-#define FT1000_DPRAM_RX_BASE 0x1800 // RX AREA (SlowQ)
-
-#define FT1000_DPRAM_SIZE 0x1000 // 4K bytes
-
-
-
-#define FT1000_DRV_DEBUG 0x17E0 // Debug area for driver
-
-#define FT1000_FIFO_LEN 0x17FC // total length for DSP FIFO tracking
-
-#define FT1000_HI_HO 0x17FE // heartbeat with HI/HO
-
-#define FT1000_DSP_STATUS 0x1FFE // dsp status - non-zero is a request to reset dsp
-
-
-
-#define FT1000_DSP_CON_STATE 0x1FF8 // DSP Connection Status Info
-
-#define FT1000_DSP_LEDS 0x1FFA // DSP LEDS for rcv pwr strength, Rx data, Tx data
-
-#define DSP_TIMESTAMP 0x1FFC // dsp timestamp
-
-#define DSP_TIMESTAMP_DIFF 0x1FFA // difference of dsp timestamp in DPRAM and Pseudo header.
-
-
-
-#define FT1000_DPRAM_FEFE 0x1002 // Dsp Downloader handshake location
-
-
-
-#define FT1000_DSP_TIMER0 0x1FF0
-
-#define FT1000_DSP_TIMER1 0x1FF2
-
-#define FT1000_DSP_TIMER2 0x1FF4
-
-#define FT1000_DSP_TIMER3 0x1FF6
-
-
+/* Electrabuzz specific DPRAM mapping */
+/* this is used by ft1000_usb driver - isn't that a bug? */
+#undef FT1000_DPRAM_RX_BASE
+#define FT1000_DPRAM_RX_BASE 0x1800 /* RX AREA (SlowQ) */
// MEMORY MAP FOR MAGNEMITE
-
-#define FT1000_DPRAM_MAG_TX_BASE 0x0000 // TX AREA (SlowQ)
-
-#define FT1000_DPRAM_MAG_RX_BASE 0x0200 // RX AREA (SlowQ)
-
-
-
-#define FT1000_MAG_FIFO_LEN 0x1FF // total length for DSP FIFO tracking
-
-#define FT1000_MAG_FIFO_LEN_INDX 0x1 // low-word index
-
-#define FT1000_MAG_HI_HO 0x1FF // heartbeat with HI/HO
-
-#define FT1000_MAG_HI_HO_INDX 0x0 // high-word index
-
-#define FT1000_MAG_DSP_LEDS 0x3FE // dsp led status for PAD device
-
-#define FT1000_MAG_DSP_LEDS_INDX 0x1 // dsp led status for PAD device
-
-
-
-#define FT1000_MAG_DSP_CON_STATE 0x3FE // DSP Connection Status Info
-
-#define FT1000_MAG_DSP_CON_STATE_INDX 0x0 // DSP Connection Status Info
-
-
-
-#define FT1000_MAG_DPRAM_FEFE 0x000 // location for dsp ready indicator
-
-#define FT1000_MAG_DPRAM_FEFE_INDX 0x0 // location for dsp ready indicator
-
-
-
-#define FT1000_MAG_DSP_TIMER0 0x3FC
-
-#define FT1000_MAG_DSP_TIMER0_INDX 0x1
-
-
-
-#define FT1000_MAG_DSP_TIMER1 0x3FC
-
-#define FT1000_MAG_DSP_TIMER1_INDX 0x0
-
-
-
-#define FT1000_MAG_DSP_TIMER2 0x3FD
-
-#define FT1000_MAG_DSP_TIMER2_INDX 0x1
-
-
-
-#define FT1000_MAG_DSP_TIMER3 0x3FD
-
-#define FT1000_MAG_DSP_TIMER3_INDX 0x0
-
-
-
-#define FT1000_MAG_TOTAL_LEN 0x200
-
-#define FT1000_MAG_TOTAL_LEN_INDX 0x1
-
-
-
-#define FT1000_MAG_PH_LEN 0x200
-
-#define FT1000_MAG_PH_LEN_INDX 0x0
-
-
-
-#define FT1000_MAG_PORT_ID 0x201
-
-#define FT1000_MAG_PORT_ID_INDX 0x0
-
-
-
-//
-
-// Constants for the FT1000_REG_SUP_ISR
-
-//
-
-// Indicate the cause of an interrupt.
-
-//
-
-// SUPERVISOR ISR BIT MAPS
-
-
-
-#define ISR_EMPTY (u8)0x00 // no bits set in ISR
-
-#define ISR_DOORBELL_ACK (u8)0x01 // the doorbell i sent has been received.
-
-#define ISR_DOORBELL_PEND (u8)0x02 // doorbell for me
-
-#define ISR_RCV (u8)0x04 // packet received with no errors
-
-#define ISR_WATERMARK (u8)0x08 //
-
-
-
-// Interrupt mask register defines
-
-// note these are different from the ISR BIT MAPS.
-
-#define ISR_MASK_NONE 0x0000
-
-#define ISR_MASK_DOORBELL_ACK 0x0001
-
-#define ISR_MASK_DOORBELL_PEND 0x0002
-
-#define ISR_MASK_RCV 0x0004
-
-#define ISR_MASK_WATERMARK 0x0008 // Normally we will only mask the watermark interrupt when we want to enable interrupts.
-
-#define ISR_MASK_ALL 0xffff
-
-
-
-#define HOST_INTF_LE 0x0000 // Host interface little endian
-
-#define HOST_INTF_BE 0x0001 // Host interface big endian
-
-
-
-#define ISR_DEFAULT_MASK 0x7ff9
-
-
-
-#define hi 0x6869
-
-#define ho 0x686f
-
-
-
-#define FT1000_ASIC_RESET 0x80 // COR value for soft reset to PCMCIA core
-
-#define FT1000_ASIC_BITS 0x51 // Bits set in COR register under normal operation
-
-#define FT1000_ASIC_MAG_BITS 0x55 // Bits set in COR register under normal operation
-
-
-
-#define FT1000_COR_OFFSET 0x100
-
-
-
-#define ELECTRABUZZ_ID 0 // ASIC ID for ELECTRABUZZ
-
-#define MAGNEMITE_ID 0x1a01 // ASIC ID for MAGNEMITE
-
-
+/* the indexes are swapped comparing to PCMCIA - is it OK or a bug? */
+#undef FT1000_MAG_DSP_LED_INDX
+#define FT1000_MAG_DSP_LED_INDX 0x1 /* dsp led status for PAD device */
+#undef FT1000_MAG_DSP_CON_STATE_INDX
+#define FT1000_MAG_DSP_CON_STATE_INDX 0x0 /* DSP Connection Status Info */
// Maximum times trying to get ASIC out of reset
-
#define MAX_ASIC_RESET_CNT 20
-
-
-#define DSP_RESET_BIT 0x1
-
-#define ASIC_RESET_BIT 0x2
-
-#define DSP_UNENCRYPTED 0x4
-
-#define DSP_ENCRYPTED 0x8
-
-#define EFUSE_MEM_DISABLE 0x0040
-
-
#define MAX_BUF_SIZE 4096
-struct drv_msg {
- struct pseudo_hdr pseudo;
- u16 type;
- u16 length;
- u8 data[0];
-} __attribute__ ((packed));
-
struct ft1000_device
{
struct usb_device *dev;
diff --git a/drivers/staging/ft1000/ft1000.h b/drivers/staging/ft1000/ft1000.h
new file mode 100644
index 00000000000..03baa577923
--- /dev/null
+++ b/drivers/staging/ft1000/ft1000.h
@@ -0,0 +1,252 @@
+/*
+ * Common structures and definitions for FT1000 Flarion Flash OFDM PCMCIA and USB devices
+ *
+ * Originally copyright (c) 2002 Flarion Technologies
+ *
+ */
+
+#define DSPVERSZ 4
+#define HWSERNUMSZ 16
+#define SKUSZ 20
+#define EUISZ 8
+#define MODESZ 2
+#define CALVERSZ 2
+#define CALDATESZ 6
+
+#define ELECTRABUZZ_ID 0 /* ASIC ID for Electrabuzz */
+#define MAGNEMITE_ID 0x1a01 /* ASIC ID for Magnemite */
+
+/* MEMORY MAP common to both ELECTRABUZZ and MAGNEMITE */
+#define FT1000_REG_DPRAM_ADDR 0x000E /* DPADR - Dual Port Ram Indirect Address Register */
+#define FT1000_REG_SUP_CTRL 0x0020 /* HCTR - Host Control Register */
+#define FT1000_REG_SUP_STAT 0x0022 /* HSTAT - Host Status Register */
+#define FT1000_REG_RESET 0x0024 /* HCTR - Host Control Register */
+#define FT1000_REG_SUP_ISR 0x0026 /* HISR - Host Interrupt Status Register */
+#define FT1000_REG_SUP_IMASK 0x0028 /* HIMASK - Host Interrupt Mask */
+#define FT1000_REG_DOORBELL 0x002a /* DBELL - Door Bell Register */
+#define FT1000_REG_ASIC_ID 0x002e /* ASICID - ASIC Identification Number */
+
+/* MEMORY MAP FOR ELECTRABUZZ ASIC */
+#define FT1000_REG_UFIFO_STAT 0x0000 /* UFSR - Uplink FIFO status register */
+#define FT1000_REG_UFIFO_BEG 0x0002 /* UFBR - Uplink FIFO beginning register */
+#define FT1000_REG_UFIFO_MID 0x0004 /* UFMR - Uplink FIFO middle register */
+#define FT1000_REG_UFIFO_END 0x0006 /* UFER - Uplink FIFO end register */
+#define FT1000_REG_DFIFO_STAT 0x0008 /* DFSR - Downlink FIFO status register */
+#define FT1000_REG_DFIFO 0x000A /* DFR - Downlink FIFO Register */
+#define FT1000_REG_DPRAM_DATA 0x000C /* DPRAM - Dual Port Indirect Data Register */
+#define FT1000_REG_WATERMARK 0x0010 /* WMARK - Watermark Register */
+
+/* MEMORY MAP FOR MAGNEMITE */
+#define FT1000_REG_MAG_UFDR 0x0000 /* UFDR - Uplink FIFO Data Register (32-bits) */
+#define FT1000_REG_MAG_UFDRL 0x0000 /* UFDRL - Uplink FIFO Data Register low-word (16-bits) */
+#define FT1000_REG_MAG_UFDRH 0x0002 /* UFDRH - Uplink FIFO Data Register high-word (16-bits) */
+#define FT1000_REG_MAG_UFER 0x0004 /* UFER - Uplink FIFO End Register */
+#define FT1000_REG_MAG_UFSR 0x0006 /* UFSR - Uplink FIFO Status Register */
+#define FT1000_REG_MAG_DFR 0x0008 /* DFR - Downlink FIFO Register (32-bits) */
+#define FT1000_REG_MAG_DFRL 0x0008 /* DFRL - Downlink FIFO Register low-word (16-bits) */
+#define FT1000_REG_MAG_DFRH 0x000a /* DFRH - Downlink FIFO Register high-word (16-bits) */
+#define FT1000_REG_MAG_DFSR 0x000c /* DFSR - Downlink FIFO Status Register */
+#define FT1000_REG_MAG_DPDATA 0x0010 /* DPDATA - Dual Port RAM Indirect Data Register (32-bits) */
+#define FT1000_REG_MAG_DPDATAL 0x0010 /* DPDATAL - Dual Port RAM Indirect Data Register low-word (16-bits) */
+#define FT1000_REG_MAG_DPDATAH 0x0012 /* DPDATAH - Dual Port RAM Indirect Data Register high-word (16-bits) */
+#define FT1000_REG_MAG_WATERMARK 0x002c /* WMARK - Watermark Register */
+#define FT1000_REG_MAG_VERSION 0x0030 /* LLC Version */
+
+/* Reserved Dual Port RAM offsets for Electrabuzz */
+#define FT1000_DPRAM_TX_BASE 0x0002 /* Host to PC Card Messaging Area */
+#define FT1000_DPRAM_RX_BASE 0x0800 /* PC Card to Host Messaging Area */
+#define FT1000_FIFO_LEN 0x07FC /* total length for DSP FIFO tracking */
+#define FT1000_HI_HO 0x07FE /* heartbeat with HI/HO */
+#define FT1000_DSP_STATUS 0x0FFE /* dsp status - non-zero is a request to reset dsp */
+#define FT1000_DSP_LED 0x0FFA /* dsp led status for PAD device */
+#define FT1000_DSP_CON_STATE 0x0FF8 /* DSP Connection Status Info */
+#define FT1000_DPRAM_FEFE 0x0002 /* location for dsp ready indicator */
+#define FT1000_DSP_TIMER0 0x1FF0 /* Timer Field from Basestation */
+#define FT1000_DSP_TIMER1 0x1FF2 /* Timer Field from Basestation */
+#define FT1000_DSP_TIMER2 0x1FF4 /* Timer Field from Basestation */
+#define FT1000_DSP_TIMER3 0x1FF6 /* Timer Field from Basestation */
+
+/* Reserved Dual Port RAM offsets for Magnemite */
+#define FT1000_DPRAM_MAG_TX_BASE 0x0000 /* Host to PC Card Messaging Area */
+#define FT1000_DPRAM_MAG_RX_BASE 0x0200 /* PC Card to Host Messaging Area */
+
+#define FT1000_MAG_FIFO_LEN 0x1FF /* total length for DSP FIFO tracking */
+#define FT1000_MAG_FIFO_LEN_INDX 0x1 /* low-word index */
+#define FT1000_MAG_HI_HO 0x1FF /* heartbeat with HI/HO */
+#define FT1000_MAG_HI_HO_INDX 0x0 /* high-word index */
+#define FT1000_MAG_DSP_LED 0x3FE /* dsp led status for PAD device */
+#define FT1000_MAG_DSP_LED_INDX 0x0 /* dsp led status for PAD device */
+#define FT1000_MAG_DSP_CON_STATE 0x3FE /* DSP Connection Status Info */
+#define FT1000_MAG_DSP_CON_STATE_INDX 0x1 /* DSP Connection Status Info */
+#define FT1000_MAG_DPRAM_FEFE 0x000 /* location for dsp ready indicator */
+#define FT1000_MAG_DPRAM_FEFE_INDX 0x0 /* location for dsp ready indicator */
+#define FT1000_MAG_DSP_TIMER0 0x3FC /* Timer Field from Basestation */
+#define FT1000_MAG_DSP_TIMER0_INDX 0x1
+#define FT1000_MAG_DSP_TIMER1 0x3FC /* Timer Field from Basestation */
+#define FT1000_MAG_DSP_TIMER1_INDX 0x0
+#define FT1000_MAG_DSP_TIMER2 0x3FD /* Timer Field from Basestation */
+#define FT1000_MAG_DSP_TIMER2_INDX 0x1
+#define FT1000_MAG_DSP_TIMER3 0x3FD /* Timer Field from Basestation */
+#define FT1000_MAG_DSP_TIMER3_INDX 0x0
+#define FT1000_MAG_TOTAL_LEN 0x200
+#define FT1000_MAG_TOTAL_LEN_INDX 0x1
+#define FT1000_MAG_PH_LEN 0x200
+#define FT1000_MAG_PH_LEN_INDX 0x0
+#define FT1000_MAG_PORT_ID 0x201
+#define FT1000_MAG_PORT_ID_INDX 0x0
+
+#define HOST_INTF_LE 0x0 /* Host interface little endian mode */
+#define HOST_INTF_BE 0x1 /* Host interface big endian mode */
+
+/* FT1000 to Host Doorbell assignments */
+#define FT1000_DB_DPRAM_RX 0x0001 /* this value indicates that DSP has data for host in DPRAM */
+#define FT1000_DB_DNLD_RX 0x0002 /* Downloader handshake doorbell */
+#define FT1000_ASIC_RESET_REQ 0x0004 /* DSP requesting host to reset the ASIC */
+#define FT1000_DSP_ASIC_RESET 0x0008 /* DSP indicating host that it will reset the ASIC */
+#define FT1000_DB_COND_RESET 0x0010 /* DSP request for a card reset. */
+
+/* Host to FT1000 Doorbell assignments */
+#define FT1000_DB_DPRAM_TX 0x0100 /* this value indicates that host has data for DSP in DPRAM. */
+#define FT1000_DB_DNLD_TX 0x0200 /* Downloader handshake doorbell */
+#define FT1000_ASIC_RESET_DSP 0x0400 /* Responds to FT1000_ASIC_RESET_REQ */
+#define FT1000_DB_HB 0x1000 /* Indicates that supervisor has a heartbeat message for DSP. */
+
+#define hi 0x6869 /* PC Card heartbeat values */
+#define ho 0x686f /* PC Card heartbeat values */
+
+/* Magnemite specific defines */
+#define hi_mag 0x6968 /* Byte swap hi to avoid additional system call */
+#define ho_mag 0x6f68 /* Byte swap ho to avoid additional system call */
+
+/* Bit field definitions for Host Interrupt Status Register */
+/* Indicate the cause of an interrupt. */
+#define ISR_EMPTY 0x00 /* no bits set */
+#define ISR_DOORBELL_ACK 0x01 /* Doorbell acknowledge from DSP */
+#define ISR_DOORBELL_PEND 0x02 /* Doorbell pending from DSP */
+#define ISR_RCV 0x04 /* Packet available in Downlink FIFO */
+#define ISR_WATERMARK 0x08 /* Watermark requirements satisfied */
+
+/* Bit field definition for Host Interrupt Mask */
+#define ISR_MASK_NONE 0x0000 /* no bits set */
+#define ISR_MASK_DOORBELL_ACK 0x0001 /* Doorbell acknowledge mask */
+#define ISR_MASK_DOORBELL_PEND 0x0002 /* Doorbell pending mask */
+#define ISR_MASK_RCV 0x0004 /* Downlink Packet available mask */
+#define ISR_MASK_WATERMARK 0x0008 /* Watermark interrupt mask */
+#define ISR_MASK_ALL 0xffff /* Mask all interrupts */
+/* Default interrupt mask (Enable Doorbell pending and Packet available interrupts) */
+#define ISR_DEFAULT_MASK 0x7ff9
+
+/* Bit field definition for Host Control Register */
+#define DSP_RESET_BIT 0x0001 /* Bit field to control dsp reset state */
+ /* (0 = out of reset 1 = reset) */
+#define ASIC_RESET_BIT 0x0002 /* Bit field to control ASIC reset state */
+ /* (0 = out of reset 1 = reset) */
+#define DSP_UNENCRYPTED 0x0004
+#define DSP_ENCRYPTED 0x0008
+#define EFUSE_MEM_DISABLE 0x0040
+
+/* Application specific IDs */
+#define DSPID 0x20
+#define HOSTID 0x10
+#define DSPAIRID 0x90
+#define DRIVERID 0x00
+#define NETWORKID 0x20
+
+/* Size of DPRAM Message */
+#define MAX_CMD_SQSIZE 1780
+
+#define ENET_MAX_SIZE 1514
+#define ENET_HEADER_SIZE 14
+
+#define SLOWQ_TYPE 0
+#define FASTQ_TYPE 1
+
+#define MAX_DSP_SESS_REC 1024
+
+#define DSP_QID_OFFSET 4
+
+/* Driver message types */
+#define MEDIA_STATE 0x0010
+#define TIME_UPDATE 0x0020
+#define DSP_PROVISION 0x0030
+#define DSP_INIT_MSG 0x0050
+#define DSP_HIBERNATE 0x0060
+#define DSP_STORE_INFO 0x0070
+#define DSP_GET_INFO 0x0071
+#define GET_DRV_ERR_RPT_MSG 0x0073
+#define RSP_DRV_ERR_RPT_MSG 0x0074
+
+/* Driver Error Messages for DSP */
+#define DSP_HB_INFO 0x7ef0
+#define DSP_FIFO_INFO 0x7ef1
+#define DSP_CONDRESET_INFO 0x7ef2
+#define DSP_CMDLEN_INFO 0x7ef3
+#define DSP_CMDPHCKSUM_INFO 0x7ef4
+#define DSP_PKTPHCKSUM_INFO 0x7ef5
+#define DSP_PKTLEN_INFO 0x7ef6
+#define DSP_USER_RESET 0x7ef7
+#define FIFO_FLUSH_MAXLIMIT 0x7ef8
+#define FIFO_FLUSH_BADCNT 0x7ef9
+#define FIFO_ZERO_LEN 0x7efa
+
+/* Pseudo Header structure */
+struct pseudo_hdr {
+ unsigned short length; /* length of msg body */
+ unsigned char source; /* hardware source id */
+ /* Host = 0x10 */
+ /* Dsp = 0x20 */
+ unsigned char destination; /* hardware destination id (refer to source) */
+ unsigned char portdest; /* software destination port id */
+ /* Host = 0x00 */
+ /* Applicaton Broadcast = 0x10 */
+ /* Network Stack = 0x20 */
+ /* Dsp OAM = 0x80 */
+ /* Dsp Airlink = 0x90 */
+ /* Dsp Loader = 0xa0 */
+ /* Dsp MIP = 0xb0 */
+ unsigned char portsrc; /* software source port id (refer to portdest) */
+ unsigned short sh_str_id; /* not used */
+ unsigned char control; /* not used */
+ unsigned char rsvd1;
+ unsigned char seq_num; /* message sequence number */
+ unsigned char rsvd2;
+ unsigned short qos_class; /* not used */
+ unsigned short checksum; /* pseudo header checksum */
+} __packed;
+
+struct drv_msg {
+ struct pseudo_hdr pseudo;
+ u16 type;
+ u16 length;
+ u8 data[0];
+} __packed;
+
+struct media_msg {
+ struct pseudo_hdr pseudo;
+ u16 type;
+ u16 length;
+ u16 state;
+ u32 ip_addr;
+ u32 net_mask;
+ u32 gateway;
+ u32 dns_1;
+ u32 dns_2;
+} __packed;
+
+struct dsp_init_msg {
+ struct pseudo_hdr pseudo;
+ u16 type;
+ u16 length;
+ u8 DspVer[DSPVERSZ]; /* DSP version number */
+ u8 HwSerNum[HWSERNUMSZ]; /* Hardware Serial Number */
+ u8 Sku[SKUSZ]; /* SKU */
+ u8 eui64[EUISZ]; /* EUI64 */
+ u8 ProductMode[MODESZ]; /* Product Mode (Market/Production) */
+ u8 RfCalVer[CALVERSZ]; /* Rf Calibration version */
+ u8 RfCalDate[CALDATESZ]; /* Rf Calibration date */
+} __packed;
+
+struct prov_record {
+ struct list_head list;
+ u8 *pprov_data;
+};
diff --git a/drivers/staging/generic_serial/Kconfig b/drivers/staging/generic_serial/Kconfig
deleted file mode 100644
index 795daea3775..00000000000
--- a/drivers/staging/generic_serial/Kconfig
+++ /dev/null
@@ -1,45 +0,0 @@
-config A2232
- tristate "Commodore A2232 serial support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && ZORRO && BROKEN
- ---help---
- This option supports the 2232 7-port serial card shipped with the
- Amiga 2000 and other Zorro-bus machines, dating from 1989. At
- a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip
- each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The
- ports were connected with 8 pin DIN connectors on the card bracket,
- for which 8 pin to DB25 adapters were supplied. The card also had
- jumpers internally to toggle various pinning configurations.
-
- This driver can be built as a module; but then "generic_serial"
- will also be built as a module. This has to be loaded before
- "ser_a2232". If you want to do this, answer M here.
-
-config SX
- tristate "Specialix SX (and SI) card support"
- depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) && BROKEN
- help
- This is a driver for the SX and SI multiport serial cards.
- Please read the file <file:Documentation/serial/sx.txt> for details.
-
- This driver can only be built as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want).
- The module will be called sx. If you want to do that, say M here.
-
-config RIO
- tristate "Specialix RIO system support"
- depends on SERIAL_NONSTANDARD && BROKEN
- help
- This is a driver for the Specialix RIO, a smart serial card which
- drives an outboard box that can support up to 128 ports. Product
- information is at <http://www.perle.com/support/documentation.html#multiport>.
- There are both ISA and PCI versions.
-
-config RIO_OLDPCI
- bool "Support really old RIO/PCI cards"
- depends on RIO
- help
- Older RIO PCI cards need some initialization-time configuration to
- determine the IRQ and some control addresses. If you have a RIO and
- this doesn't seem to work, try setting this to Y.
-
-
diff --git a/drivers/staging/generic_serial/Makefile b/drivers/staging/generic_serial/Makefile
deleted file mode 100644
index ffc90c8b013..00000000000
--- a/drivers/staging/generic_serial/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o
-obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o
-obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o
-obj-$(CONFIG_A2232) += ser_a2232.o generic_serial.o
-obj-$(CONFIG_SX) += sx.o generic_serial.o
-obj-$(CONFIG_RIO) += rio/ generic_serial.o
diff --git a/drivers/staging/generic_serial/TODO b/drivers/staging/generic_serial/TODO
deleted file mode 100644
index 88756453ac6..00000000000
--- a/drivers/staging/generic_serial/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-These are a few tty/serial drivers that either do not build,
-or work if they do build, or if they seem to work, are for obsolete
-hardware, or are full of unfixable races and no one uses them anymore.
-
-If no one steps up to adopt any of these drivers, they will be removed
-in the 2.6.41 release.
diff --git a/drivers/staging/generic_serial/generic_serial.c b/drivers/staging/generic_serial/generic_serial.c
deleted file mode 100644
index f29dda4e9f2..00000000000
--- a/drivers/staging/generic_serial/generic_serial.c
+++ /dev/null
@@ -1,844 +0,0 @@
-/*
- * generic_serial.c
- *
- * Copyright (C) 1998/1999 R.E.Wolff@BitWizard.nl
- *
- * written for the SX serial driver.
- * Contains the code that should be shared over all the serial drivers.
- *
- * Credit for the idea to do it this way might go to Alan Cox.
- *
- *
- * Version 0.1 -- December, 1998. Initial version.
- * Version 0.2 -- March, 1999. Some more routines. Bugfixes. Etc.
- * Version 0.5 -- August, 1999. Some more fixes. Reformat for Linus.
- *
- * BitWizard is actively maintaining this file. We sometimes find
- * that someone submitted changes to this file. We really appreciate
- * your help, but please submit changes through us. We're doing our
- * best to be responsive. -- REW
- * */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/tty.h>
-#include <linux/sched.h>
-#include <linux/serial.h>
-#include <linux/mm.h>
-#include <linux/generic_serial.h>
-#include <linux/interrupt.h>
-#include <linux/tty_flip.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <asm/uaccess.h>
-
-#define DEBUG
-
-static int gs_debug;
-
-#ifdef DEBUG
-#define gs_dprintk(f, str...) if (gs_debug & f) printk (str)
-#else
-#define gs_dprintk(f, str...) /* nothing */
-#endif
-
-#define func_enter() gs_dprintk (GS_DEBUG_FLOW, "gs: enter %s\n", __func__)
-#define func_exit() gs_dprintk (GS_DEBUG_FLOW, "gs: exit %s\n", __func__)
-
-#define RS_EVENT_WRITE_WAKEUP 1
-
-module_param(gs_debug, int, 0644);
-
-
-int gs_put_char(struct tty_struct * tty, unsigned char ch)
-{
- struct gs_port *port;
-
- func_enter ();
-
- port = tty->driver_data;
-
- if (!port) return 0;
-
- if (! (port->port.flags & ASYNC_INITIALIZED)) return 0;
-
- /* Take a lock on the serial tranmit buffer! */
- mutex_lock(& port->port_write_mutex);
-
- if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) {
- /* Sorry, buffer is full, drop character. Update statistics???? -- REW */
- mutex_unlock(&port->port_write_mutex);
- return 0;
- }
-
- port->xmit_buf[port->xmit_head++] = ch;
- port->xmit_head &= SERIAL_XMIT_SIZE - 1;
- port->xmit_cnt++; /* Characters in buffer */
-
- mutex_unlock(&port->port_write_mutex);
- func_exit ();
- return 1;
-}
-
-
-/*
-> Problems to take into account are:
-> -1- Interrupts that empty part of the buffer.
-> -2- page faults on the access to userspace.
-> -3- Other processes that are also trying to do a "write".
-*/
-
-int gs_write(struct tty_struct * tty,
- const unsigned char *buf, int count)
-{
- struct gs_port *port;
- int c, total = 0;
- int t;
-
- func_enter ();
-
- port = tty->driver_data;
-
- if (!port) return 0;
-
- if (! (port->port.flags & ASYNC_INITIALIZED))
- return 0;
-
- /* get exclusive "write" access to this port (problem 3) */
- /* This is not a spinlock because we can have a disk access (page
- fault) in copy_from_user */
- mutex_lock(& port->port_write_mutex);
-
- while (1) {
-
- c = count;
-
- /* This is safe because we "OWN" the "head". No one else can
- change the "head": we own the port_write_mutex. */
- /* Don't overrun the end of the buffer */
- t = SERIAL_XMIT_SIZE - port->xmit_head;
- if (t < c) c = t;
-
- /* This is safe because the xmit_cnt can only decrease. This
- would increase "t", so we might copy too little chars. */
- /* Don't copy past the "head" of the buffer */
- t = SERIAL_XMIT_SIZE - 1 - port->xmit_cnt;
- if (t < c) c = t;
-
- /* Can't copy more? break out! */
- if (c <= 0) break;
-
- memcpy (port->xmit_buf + port->xmit_head, buf, c);
-
- port -> xmit_cnt += c;
- port -> xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE -1);
- buf += c;
- count -= c;
- total += c;
- }
- mutex_unlock(& port->port_write_mutex);
-
- gs_dprintk (GS_DEBUG_WRITE, "write: interrupts are %s\n",
- (port->port.flags & GS_TX_INTEN)?"enabled": "disabled");
-
- if (port->xmit_cnt &&
- !tty->stopped &&
- !tty->hw_stopped &&
- !(port->port.flags & GS_TX_INTEN)) {
- port->port.flags |= GS_TX_INTEN;
- port->rd->enable_tx_interrupts (port);
- }
- func_exit ();
- return total;
-}
-
-
-
-int gs_write_room(struct tty_struct * tty)
-{
- struct gs_port *port = tty->driver_data;
- int ret;
-
- func_enter ();
- ret = SERIAL_XMIT_SIZE - port->xmit_cnt - 1;
- if (ret < 0)
- ret = 0;
- func_exit ();
- return ret;
-}
-
-
-int gs_chars_in_buffer(struct tty_struct *tty)
-{
- struct gs_port *port = tty->driver_data;
- func_enter ();
-
- func_exit ();
- return port->xmit_cnt;
-}
-
-
-static int gs_real_chars_in_buffer(struct tty_struct *tty)
-{
- struct gs_port *port;
- func_enter ();
-
- port = tty->driver_data;
-
- if (!port->rd) return 0;
- if (!port->rd->chars_in_buffer) return 0;
-
- func_exit ();
- return port->xmit_cnt + port->rd->chars_in_buffer (port);
-}
-
-
-static int gs_wait_tx_flushed (void * ptr, unsigned long timeout)
-{
- struct gs_port *port = ptr;
- unsigned long end_jiffies;
- int jiffies_to_transmit, charsleft = 0, rv = 0;
- int rcib;
-
- func_enter();
-
- gs_dprintk (GS_DEBUG_FLUSH, "port=%p.\n", port);
- if (port) {
- gs_dprintk (GS_DEBUG_FLUSH, "xmit_cnt=%x, xmit_buf=%p, tty=%p.\n",
- port->xmit_cnt, port->xmit_buf, port->port.tty);
- }
-
- if (!port || port->xmit_cnt < 0 || !port->xmit_buf) {
- gs_dprintk (GS_DEBUG_FLUSH, "ERROR: !port, !port->xmit_buf or prot->xmit_cnt < 0.\n");
- func_exit();
- return -EINVAL; /* This is an error which we don't know how to handle. */
- }
-
- rcib = gs_real_chars_in_buffer(port->port.tty);
-
- if(rcib <= 0) {
- gs_dprintk (GS_DEBUG_FLUSH, "nothing to wait for.\n");
- func_exit();
- return rv;
- }
- /* stop trying: now + twice the time it would normally take + seconds */
- if (timeout == 0) timeout = MAX_SCHEDULE_TIMEOUT;
- end_jiffies = jiffies;
- if (timeout != MAX_SCHEDULE_TIMEOUT)
- end_jiffies += port->baud?(2 * rcib * 10 * HZ / port->baud):0;
- end_jiffies += timeout;
-
- gs_dprintk (GS_DEBUG_FLUSH, "now=%lx, end=%lx (%ld).\n",
- jiffies, end_jiffies, end_jiffies-jiffies);
-
- /* the expression is actually jiffies < end_jiffies, but that won't
- work around the wraparound. Tricky eh? */
- while ((charsleft = gs_real_chars_in_buffer (port->port.tty)) &&
- time_after (end_jiffies, jiffies)) {
- /* Units check:
- chars * (bits/char) * (jiffies /sec) / (bits/sec) = jiffies!
- check! */
-
- charsleft += 16; /* Allow 16 chars more to be transmitted ... */
- jiffies_to_transmit = port->baud?(1 + charsleft * 10 * HZ / port->baud):0;
- /* ^^^ Round up.... */
- if (jiffies_to_transmit <= 0) jiffies_to_transmit = 1;
-
- gs_dprintk (GS_DEBUG_FLUSH, "Expect to finish in %d jiffies "
- "(%d chars).\n", jiffies_to_transmit, charsleft);
-
- msleep_interruptible(jiffies_to_msecs(jiffies_to_transmit));
- if (signal_pending (current)) {
- gs_dprintk (GS_DEBUG_FLUSH, "Signal pending. Bombing out: ");
- rv = -EINTR;
- break;
- }
- }
-
- gs_dprintk (GS_DEBUG_FLUSH, "charsleft = %d.\n", charsleft);
- set_current_state (TASK_RUNNING);
-
- func_exit();
- return rv;
-}
-
-
-
-void gs_flush_buffer(struct tty_struct *tty)
-{
- struct gs_port *port;
- unsigned long flags;
-
- func_enter ();
-
- port = tty->driver_data;
-
- if (!port) return;
-
- /* XXX Would the write semaphore do? */
- spin_lock_irqsave (&port->driver_lock, flags);
- port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
- spin_unlock_irqrestore (&port->driver_lock, flags);
-
- tty_wakeup(tty);
- func_exit ();
-}
-
-
-void gs_flush_chars(struct tty_struct * tty)
-{
- struct gs_port *port;
-
- func_enter ();
-
- port = tty->driver_data;
-
- if (!port) return;
-
- if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
- !port->xmit_buf) {
- func_exit ();
- return;
- }
-
- /* Beats me -- REW */
- port->port.flags |= GS_TX_INTEN;
- port->rd->enable_tx_interrupts (port);
- func_exit ();
-}
-
-
-void gs_stop(struct tty_struct * tty)
-{
- struct gs_port *port;
-
- func_enter ();
-
- port = tty->driver_data;
-
- if (!port) return;
-
- if (port->xmit_cnt &&
- port->xmit_buf &&
- (port->port.flags & GS_TX_INTEN) ) {
- port->port.flags &= ~GS_TX_INTEN;
- port->rd->disable_tx_interrupts (port);
- }
- func_exit ();
-}
-
-
-void gs_start(struct tty_struct * tty)
-{
- struct gs_port *port;
-
- port = tty->driver_data;
-
- if (!port) return;
-
- if (port->xmit_cnt &&
- port->xmit_buf &&
- !(port->port.flags & GS_TX_INTEN) ) {
- port->port.flags |= GS_TX_INTEN;
- port->rd->enable_tx_interrupts (port);
- }
- func_exit ();
-}
-
-
-static void gs_shutdown_port (struct gs_port *port)
-{
- unsigned long flags;
-
- func_enter();
-
- if (!port) return;
-
- if (!(port->port.flags & ASYNC_INITIALIZED))
- return;
-
- spin_lock_irqsave(&port->driver_lock, flags);
-
- if (port->xmit_buf) {
- free_page((unsigned long) port->xmit_buf);
- port->xmit_buf = NULL;
- }
-
- if (port->port.tty)
- set_bit(TTY_IO_ERROR, &port->port.tty->flags);
-
- port->rd->shutdown_port (port);
-
- port->port.flags &= ~ASYNC_INITIALIZED;
- spin_unlock_irqrestore(&port->driver_lock, flags);
-
- func_exit();
-}
-
-
-void gs_hangup(struct tty_struct *tty)
-{
- struct gs_port *port;
- unsigned long flags;
-
- func_enter ();
-
- port = tty->driver_data;
- tty = port->port.tty;
- if (!tty)
- return;
-
- gs_shutdown_port (port);
- spin_lock_irqsave(&port->port.lock, flags);
- port->port.flags &= ~(ASYNC_NORMAL_ACTIVE|GS_ACTIVE);
- port->port.tty = NULL;
- port->port.count = 0;
- spin_unlock_irqrestore(&port->port.lock, flags);
-
- wake_up_interruptible(&port->port.open_wait);
- func_exit ();
-}
-
-
-int gs_block_til_ready(void *port_, struct file * filp)
-{
- struct gs_port *gp = port_;
- struct tty_port *port = &gp->port;
- DECLARE_WAITQUEUE(wait, current);
- int retval;
- int do_clocal = 0;
- int CD;
- struct tty_struct *tty;
- unsigned long flags;
-
- func_enter ();
-
- if (!port) return 0;
-
- tty = port->tty;
-
- gs_dprintk (GS_DEBUG_BTR, "Entering gs_block_till_ready.\n");
- /*
- * If the device is in the middle of being closed, then block
- * until it's done, and then try again.
- */
- if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
- interruptible_sleep_on(&port->close_wait);
- if (port->flags & ASYNC_HUP_NOTIFY)
- return -EAGAIN;
- else
- return -ERESTARTSYS;
- }
-
- gs_dprintk (GS_DEBUG_BTR, "after hung up\n");
-
- /*
- * If non-blocking mode is set, or the port is not enabled,
- * then make the check up front and then exit.
- */
- if ((filp->f_flags & O_NONBLOCK) ||
- (tty->flags & (1 << TTY_IO_ERROR))) {
- port->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
- }
-
- gs_dprintk (GS_DEBUG_BTR, "after nonblock\n");
-
- if (C_CLOCAL(tty))
- do_clocal = 1;
-
- /*
- * Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, port->count is dropped by one, so that
- * rs_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal.
- */
- retval = 0;
-
- add_wait_queue(&port->open_wait, &wait);
-
- gs_dprintk (GS_DEBUG_BTR, "after add waitq.\n");
- spin_lock_irqsave(&port->lock, flags);
- if (!tty_hung_up_p(filp)) {
- port->count--;
- }
- port->blocked_open++;
- spin_unlock_irqrestore(&port->lock, flags);
- while (1) {
- CD = tty_port_carrier_raised(port);
- gs_dprintk (GS_DEBUG_BTR, "CD is now %d.\n", CD);
- set_current_state (TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) ||
- !(port->flags & ASYNC_INITIALIZED)) {
- if (port->flags & ASYNC_HUP_NOTIFY)
- retval = -EAGAIN;
- else
- retval = -ERESTARTSYS;
- break;
- }
- if (!(port->flags & ASYNC_CLOSING) &&
- (do_clocal || CD))
- break;
- gs_dprintk (GS_DEBUG_BTR, "signal_pending is now: %d (%lx)\n",
- (int)signal_pending (current), *(long*)(&current->blocked));
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- schedule();
- }
- gs_dprintk (GS_DEBUG_BTR, "Got out of the loop. (%d)\n",
- port->blocked_open);
- set_current_state (TASK_RUNNING);
- remove_wait_queue(&port->open_wait, &wait);
-
- spin_lock_irqsave(&port->lock, flags);
- if (!tty_hung_up_p(filp)) {
- port->count++;
- }
- port->blocked_open--;
- if (retval == 0)
- port->flags |= ASYNC_NORMAL_ACTIVE;
- spin_unlock_irqrestore(&port->lock, flags);
- func_exit ();
- return retval;
-}
-
-
-void gs_close(struct tty_struct * tty, struct file * filp)
-{
- unsigned long flags;
- struct gs_port *port;
-
- func_enter ();
-
- port = tty->driver_data;
-
- if (!port) return;
-
- if (!port->port.tty) {
- /* This seems to happen when this is called from vhangup. */
- gs_dprintk (GS_DEBUG_CLOSE, "gs: Odd: port->port.tty is NULL\n");
- port->port.tty = tty;
- }
-
- spin_lock_irqsave(&port->port.lock, flags);
-
- if (tty_hung_up_p(filp)) {
- spin_unlock_irqrestore(&port->port.lock, flags);
- if (port->rd->hungup)
- port->rd->hungup (port);
- func_exit ();
- return;
- }
-
- if ((tty->count == 1) && (port->port.count != 1)) {
- printk(KERN_ERR "gs: gs_close port %p: bad port count;"
- " tty->count is 1, port count is %d\n", port, port->port.count);
- port->port.count = 1;
- }
- if (--port->port.count < 0) {
- printk(KERN_ERR "gs: gs_close port %p: bad port count: %d\n", port, port->port.count);
- port->port.count = 0;
- }
-
- if (port->port.count) {
- gs_dprintk(GS_DEBUG_CLOSE, "gs_close port %p: count: %d\n", port, port->port.count);
- spin_unlock_irqrestore(&port->port.lock, flags);
- func_exit ();
- return;
- }
- port->port.flags |= ASYNC_CLOSING;
-
- /*
- * Now we wait for the transmit buffer to clear; and we notify
- * the line discipline to only process XON/XOFF characters.
- */
- tty->closing = 1;
- /* if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, port->closing_wait); */
-
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receive line status interrupts, and tell the
- * interrupt driver to stop checking the data ready bit in the
- * line status register.
- */
-
- spin_lock(&port->driver_lock);
- port->rd->disable_rx_interrupts (port);
- spin_unlock(&port->driver_lock);
- spin_unlock_irqrestore(&port->port.lock, flags);
-
- /* close has no way of returning "EINTR", so discard return value */
- if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- gs_wait_tx_flushed (port, port->closing_wait);
-
- port->port.flags &= ~GS_ACTIVE;
-
- gs_flush_buffer(tty);
-
- tty_ldisc_flush(tty);
- tty->closing = 0;
-
- spin_lock_irqsave(&port->driver_lock, flags);
- port->event = 0;
- port->rd->close (port);
- port->rd->shutdown_port (port);
- spin_unlock_irqrestore(&port->driver_lock, flags);
-
- spin_lock_irqsave(&port->port.lock, flags);
- port->port.tty = NULL;
-
- if (port->port.blocked_open) {
- if (port->close_delay) {
- spin_unlock_irqrestore(&port->port.lock, flags);
- msleep_interruptible(jiffies_to_msecs(port->close_delay));
- spin_lock_irqsave(&port->port.lock, flags);
- }
- wake_up_interruptible(&port->port.open_wait);
- }
- port->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING | ASYNC_INITIALIZED);
- spin_unlock_irqrestore(&port->port.lock, flags);
- wake_up_interruptible(&port->port.close_wait);
-
- func_exit ();
-}
-
-
-void gs_set_termios (struct tty_struct * tty,
- struct ktermios * old_termios)
-{
- struct gs_port *port;
- int baudrate, tmp, rv;
- struct ktermios *tiosp;
-
- func_enter();
-
- port = tty->driver_data;
-
- if (!port) return;
- if (!port->port.tty) {
- /* This seems to happen when this is called after gs_close. */
- gs_dprintk (GS_DEBUG_TERMIOS, "gs: Odd: port->port.tty is NULL\n");
- port->port.tty = tty;
- }
-
-
- tiosp = tty->termios;
-
- if (gs_debug & GS_DEBUG_TERMIOS) {
- gs_dprintk (GS_DEBUG_TERMIOS, "termios structure (%p):\n", tiosp);
- }
-
- if(old_termios && (gs_debug & GS_DEBUG_TERMIOS)) {
- if(tiosp->c_iflag != old_termios->c_iflag) printk("c_iflag changed\n");
- if(tiosp->c_oflag != old_termios->c_oflag) printk("c_oflag changed\n");
- if(tiosp->c_cflag != old_termios->c_cflag) printk("c_cflag changed\n");
- if(tiosp->c_lflag != old_termios->c_lflag) printk("c_lflag changed\n");
- if(tiosp->c_line != old_termios->c_line) printk("c_line changed\n");
- if(!memcmp(tiosp->c_cc, old_termios->c_cc, NCC)) printk("c_cc changed\n");
- }
-
- baudrate = tty_get_baud_rate(tty);
-
- if ((tiosp->c_cflag & CBAUD) == B38400) {
- if ( (port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
- baudrate = 57600;
- else if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
- baudrate = 115200;
- else if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
- baudrate = 230400;
- else if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
- baudrate = 460800;
- else if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)
- baudrate = (port->baud_base / port->custom_divisor);
- }
-
- /* I recommend using THIS instead of the mess in termios (and
- duplicating the above code). Next we should create a clean
- interface towards this variable. If your card supports arbitrary
- baud rates, (e.g. CD1400 or 16550 based cards) then everything
- will be very easy..... */
- port->baud = baudrate;
-
- /* Two timer ticks seems enough to wakeup something like SLIP driver */
- /* Baudrate/10 is cps. Divide by HZ to get chars per tick. */
- tmp = (baudrate / 10 / HZ) * 2;
-
- if (tmp < 0) tmp = 0;
- if (tmp >= SERIAL_XMIT_SIZE) tmp = SERIAL_XMIT_SIZE-1;
-
- port->wakeup_chars = tmp;
-
- /* We should really wait for the characters to be all sent before
- changing the settings. -- CAL */
- rv = gs_wait_tx_flushed (port, MAX_SCHEDULE_TIMEOUT);
- if (rv < 0) return /* rv */;
-
- rv = port->rd->set_real_termios(port);
- if (rv < 0) return /* rv */;
-
- if ((!old_termios ||
- (old_termios->c_cflag & CRTSCTS)) &&
- !( tiosp->c_cflag & CRTSCTS)) {
- tty->stopped = 0;
- gs_start(tty);
- }
-
-#ifdef tytso_patch_94Nov25_1726
- /* This "makes sense", Why is it commented out? */
-
- if (!(old_termios->c_cflag & CLOCAL) &&
- (tty->termios->c_cflag & CLOCAL))
- wake_up_interruptible(&port->gs.open_wait);
-#endif
-
- func_exit();
- return /* 0 */;
-}
-
-
-
-/* Must be called with interrupts enabled */
-int gs_init_port(struct gs_port *port)
-{
- unsigned long flags;
-
- func_enter ();
-
- if (port->port.flags & ASYNC_INITIALIZED) {
- func_exit ();
- return 0;
- }
- if (!port->xmit_buf) {
- /* We may sleep in get_zeroed_page() */
- unsigned long tmp;
-
- tmp = get_zeroed_page(GFP_KERNEL);
- spin_lock_irqsave (&port->driver_lock, flags);
- if (port->xmit_buf)
- free_page (tmp);
- else
- port->xmit_buf = (unsigned char *) tmp;
- spin_unlock_irqrestore(&port->driver_lock, flags);
- if (!port->xmit_buf) {
- func_exit ();
- return -ENOMEM;
- }
- }
-
- spin_lock_irqsave (&port->driver_lock, flags);
- if (port->port.tty)
- clear_bit(TTY_IO_ERROR, &port->port.tty->flags);
- mutex_init(&port->port_write_mutex);
- port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
- spin_unlock_irqrestore(&port->driver_lock, flags);
- gs_set_termios(port->port.tty, NULL);
- spin_lock_irqsave (&port->driver_lock, flags);
- port->port.flags |= ASYNC_INITIALIZED;
- port->port.flags &= ~GS_TX_INTEN;
-
- spin_unlock_irqrestore(&port->driver_lock, flags);
- func_exit ();
- return 0;
-}
-
-
-int gs_setserial(struct gs_port *port, struct serial_struct __user *sp)
-{
- struct serial_struct sio;
-
- if (copy_from_user(&sio, sp, sizeof(struct serial_struct)))
- return(-EFAULT);
-
- if (!capable(CAP_SYS_ADMIN)) {
- if ((sio.baud_base != port->baud_base) ||
- (sio.close_delay != port->close_delay) ||
- ((sio.flags & ~ASYNC_USR_MASK) !=
- (port->port.flags & ~ASYNC_USR_MASK)))
- return(-EPERM);
- }
-
- port->port.flags = (port->port.flags & ~ASYNC_USR_MASK) |
- (sio.flags & ASYNC_USR_MASK);
-
- port->baud_base = sio.baud_base;
- port->close_delay = sio.close_delay;
- port->closing_wait = sio.closing_wait;
- port->custom_divisor = sio.custom_divisor;
-
- gs_set_termios (port->port.tty, NULL);
-
- return 0;
-}
-
-
-/*****************************************************************************/
-
-/*
- * Generate the serial struct info.
- */
-
-int gs_getserial(struct gs_port *port, struct serial_struct __user *sp)
-{
- struct serial_struct sio;
-
- memset(&sio, 0, sizeof(struct serial_struct));
- sio.flags = port->port.flags;
- sio.baud_base = port->baud_base;
- sio.close_delay = port->close_delay;
- sio.closing_wait = port->closing_wait;
- sio.custom_divisor = port->custom_divisor;
- sio.hub6 = 0;
-
- /* If you want you can override these. */
- sio.type = PORT_UNKNOWN;
- sio.xmit_fifo_size = -1;
- sio.line = -1;
- sio.port = -1;
- sio.irq = -1;
-
- if (port->rd->getserial)
- port->rd->getserial (port, &sio);
-
- if (copy_to_user(sp, &sio, sizeof(struct serial_struct)))
- return -EFAULT;
- return 0;
-
-}
-
-
-void gs_got_break(struct gs_port *port)
-{
- func_enter ();
-
- tty_insert_flip_char(port->port.tty, 0, TTY_BREAK);
- tty_schedule_flip(port->port.tty);
- if (port->port.flags & ASYNC_SAK) {
- do_SAK (port->port.tty);
- }
-
- func_exit ();
-}
-
-
-EXPORT_SYMBOL(gs_put_char);
-EXPORT_SYMBOL(gs_write);
-EXPORT_SYMBOL(gs_write_room);
-EXPORT_SYMBOL(gs_chars_in_buffer);
-EXPORT_SYMBOL(gs_flush_buffer);
-EXPORT_SYMBOL(gs_flush_chars);
-EXPORT_SYMBOL(gs_stop);
-EXPORT_SYMBOL(gs_start);
-EXPORT_SYMBOL(gs_hangup);
-EXPORT_SYMBOL(gs_block_til_ready);
-EXPORT_SYMBOL(gs_close);
-EXPORT_SYMBOL(gs_set_termios);
-EXPORT_SYMBOL(gs_init_port);
-EXPORT_SYMBOL(gs_setserial);
-EXPORT_SYMBOL(gs_getserial);
-EXPORT_SYMBOL(gs_got_break);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/generic_serial/rio/Makefile b/drivers/staging/generic_serial/rio/Makefile
deleted file mode 100644
index 1661875883f..00000000000
--- a/drivers/staging/generic_serial/rio/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Makefile for the linux rio-subsystem.
-#
-# (C) R.E.Wolff@BitWizard.nl
-#
-# This file is GPL. See other files for the full Blurb. I'm lazy today.
-#
-
-obj-$(CONFIG_RIO) += rio.o
-
-rio-y := rio_linux.o rioinit.o rioboot.o riocmd.o rioctrl.o riointr.o \
- rioparam.o rioroute.o riotable.o riotty.o
diff --git a/drivers/staging/generic_serial/rio/board.h b/drivers/staging/generic_serial/rio/board.h
deleted file mode 100644
index bdea633a907..00000000000
--- a/drivers/staging/generic_serial/rio/board.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : board.h
-** SID : 1.2
-** Last Modified : 11/6/98 11:34:07
-** Retrieved : 11/6/98 11:34:20
-**
-** ident @(#)board.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_board_h__
-#define __rio_board_h__
-
-/*
-** board.h contains the definitions for the *hardware* of the host cards.
-** It describes the memory overlay for the dual port RAM area.
-*/
-
-#define DP_SRAM1_SIZE 0x7C00
-#define DP_SRAM2_SIZE 0x0200
-#define DP_SRAM3_SIZE 0x7000
-#define DP_SCRATCH_SIZE 0x1000
-#define DP_PARMMAP_ADDR 0x01FE /* offset into SRAM2 */
-#define DP_STARTUP_ADDR 0x01F8 /* offset into SRAM2 */
-
-/*
-** The shape of the Host Control area, at offset 0x7C00, Write Only
-*/
-struct s_Ctrl {
- u8 DpCtl; /* 7C00 */
- u8 Dp_Unused2_[127];
- u8 DpIntSet; /* 7C80 */
- u8 Dp_Unused3_[127];
- u8 DpTpuReset; /* 7D00 */
- u8 Dp_Unused4_[127];
- u8 DpIntReset; /* 7D80 */
- u8 Dp_Unused5_[127];
-};
-
-/*
-** The PROM data area on the host (0x7C00), Read Only
-*/
-struct s_Prom {
- u16 DpSlxCode[2];
- u16 DpRev;
- u16 Dp_Unused6_;
- u16 DpUniq[4];
- u16 DpJahre;
- u16 DpWoche;
- u16 DpHwFeature[5];
- u16 DpOemId;
- u16 DpSiggy[16];
-};
-
-/*
-** Union of the Ctrl and Prom areas
-*/
-union u_CtrlProm { /* This is the control/PROM area (0x7C00) */
- struct s_Ctrl DpCtrl;
- struct s_Prom DpProm;
-};
-
-/*
-** The top end of memory!
-*/
-struct s_ParmMapS { /* Area containing Parm Map Pointer */
- u8 Dp_Unused8_[DP_PARMMAP_ADDR];
- u16 DpParmMapAd;
-};
-
-struct s_StartUpS {
- u8 Dp_Unused9_[DP_STARTUP_ADDR];
- u8 Dp_LongJump[0x4];
- u8 Dp_Unused10_[2];
- u8 Dp_ShortJump[0x2];
-};
-
-union u_Sram2ParmMap { /* This is the top of memory (0x7E00-0x7FFF) */
- u8 DpSramMem[DP_SRAM2_SIZE];
- struct s_ParmMapS DpParmMapS;
- struct s_StartUpS DpStartUpS;
-};
-
-/*
-** This is the DP RAM overlay.
-*/
-struct DpRam {
- u8 DpSram1[DP_SRAM1_SIZE]; /* 0000 - 7BFF */
- union u_CtrlProm DpCtrlProm; /* 7C00 - 7DFF */
- union u_Sram2ParmMap DpSram2ParmMap; /* 7E00 - 7FFF */
- u8 DpScratch[DP_SCRATCH_SIZE]; /* 8000 - 8FFF */
- u8 DpSram3[DP_SRAM3_SIZE]; /* 9000 - FFFF */
-};
-
-#define DpControl DpCtrlProm.DpCtrl.DpCtl
-#define DpSetInt DpCtrlProm.DpCtrl.DpIntSet
-#define DpResetTpu DpCtrlProm.DpCtrl.DpTpuReset
-#define DpResetInt DpCtrlProm.DpCtrl.DpIntReset
-
-#define DpSlx DpCtrlProm.DpProm.DpSlxCode
-#define DpRevision DpCtrlProm.DpProm.DpRev
-#define DpUnique DpCtrlProm.DpProm.DpUniq
-#define DpYear DpCtrlProm.DpProm.DpJahre
-#define DpWeek DpCtrlProm.DpProm.DpWoche
-#define DpSignature DpCtrlProm.DpProm.DpSiggy
-
-#define DpParmMapR DpSram2ParmMap.DpParmMapS.DpParmMapAd
-#define DpSram2 DpSram2ParmMap.DpSramMem
-
-#endif
diff --git a/drivers/staging/generic_serial/rio/cirrus.h b/drivers/staging/generic_serial/rio/cirrus.h
deleted file mode 100644
index 5ab51679caa..00000000000
--- a/drivers/staging/generic_serial/rio/cirrus.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/****************************************************************************
- ******* *******
- ******* CIRRUS.H *******
- ******* *******
- ****************************************************************************
-
- Author : Jeremy Rolls
- Date : 3 Aug 1990
-
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Version : 0.01
-
-
- Mods
- ----------------------------------------------------------------------------
- Date By Description
- ----------------------------------------------------------------------------
-
- ***************************************************************************/
-
-#ifndef _cirrus_h
-#define _cirrus_h 1
-
-/* Bit fields for particular registers shared with driver */
-
-/* COR1 - driver and RTA */
-#define RIOC_COR1_ODD 0x80 /* Odd parity */
-#define RIOC_COR1_EVEN 0x00 /* Even parity */
-#define RIOC_COR1_NOP 0x00 /* No parity */
-#define RIOC_COR1_FORCE 0x20 /* Force parity */
-#define RIOC_COR1_NORMAL 0x40 /* With parity */
-#define RIOC_COR1_1STOP 0x00 /* 1 stop bit */
-#define RIOC_COR1_15STOP 0x04 /* 1.5 stop bits */
-#define RIOC_COR1_2STOP 0x08 /* 2 stop bits */
-#define RIOC_COR1_5BITS 0x00 /* 5 data bits */
-#define RIOC_COR1_6BITS 0x01 /* 6 data bits */
-#define RIOC_COR1_7BITS 0x02 /* 7 data bits */
-#define RIOC_COR1_8BITS 0x03 /* 8 data bits */
-
-#define RIOC_COR1_HOST 0xef /* Safe host bits */
-
-/* RTA only */
-#define RIOC_COR1_CINPCK 0x00 /* Check parity of received characters */
-#define RIOC_COR1_CNINPCK 0x10 /* Don't check parity */
-
-/* COR2 bits for both RTA and driver use */
-#define RIOC_COR2_IXANY 0x80 /* IXANY - any character is XON */
-#define RIOC_COR2_IXON 0x40 /* IXON - enable tx soft flowcontrol */
-#define RIOC_COR2_RTSFLOW 0x02 /* Enable tx hardware flow control */
-
-/* Additional driver bits */
-#define RIOC_COR2_HUPCL 0x20 /* Hang up on close */
-#define RIOC_COR2_CTSFLOW 0x04 /* Enable rx hardware flow control */
-#define RIOC_COR2_IXOFF 0x01 /* Enable rx software flow control */
-#define RIOC_COR2_DTRFLOW 0x08 /* Enable tx hardware flow control */
-
-/* RTA use only */
-#define RIOC_COR2_ETC 0x20 /* Embedded transmit options */
-#define RIOC_COR2_LOCAL 0x10 /* Local loopback mode */
-#define RIOC_COR2_REMOTE 0x08 /* Remote loopback mode */
-#define RIOC_COR2_HOST 0xc2 /* Safe host bits */
-
-/* COR3 - RTA use only */
-#define RIOC_COR3_SCDRNG 0x80 /* Enable special char detect for range */
-#define RIOC_COR3_SCD34 0x40 /* Special character detect for SCHR's 3 + 4 */
-#define RIOC_COR3_FCT 0x20 /* Flow control transparency */
-#define RIOC_COR3_SCD12 0x10 /* Special character detect for SCHR's 1 + 2 */
-#define RIOC_COR3_FIFO12 0x0c /* 12 chars for receive FIFO threshold */
-#define RIOC_COR3_FIFO10 0x0a /* 10 chars for receive FIFO threshold */
-#define RIOC_COR3_FIFO8 0x08 /* 8 chars for receive FIFO threshold */
-#define RIOC_COR3_FIFO6 0x06 /* 6 chars for receive FIFO threshold */
-
-#define RIOC_COR3_THRESHOLD RIOC_COR3_FIFO8 /* MUST BE LESS THAN MCOR_THRESHOLD */
-
-#define RIOC_COR3_DEFAULT (RIOC_COR3_FCT | RIOC_COR3_THRESHOLD)
- /* Default bits for COR3 */
-
-/* COR4 driver and RTA use */
-#define RIOC_COR4_IGNCR 0x80 /* Throw away CR's on input */
-#define RIOC_COR4_ICRNL 0x40 /* Map CR -> NL on input */
-#define RIOC_COR4_INLCR 0x20 /* Map NL -> CR on input */
-#define RIOC_COR4_IGNBRK 0x10 /* Ignore Break */
-#define RIOC_COR4_NBRKINT 0x08 /* No interrupt on break (-BRKINT) */
-#define RIOC_COR4_RAISEMOD 0x01 /* Raise modem output lines on non-zero baud */
-
-
-/* COR4 driver only */
-#define RIOC_COR4_IGNPAR 0x04 /* IGNPAR (ignore characters with errors) */
-#define RIOC_COR4_PARMRK 0x02 /* PARMRK */
-
-#define RIOC_COR4_HOST 0xf8 /* Safe host bits */
-
-/* COR4 RTA only */
-#define RIOC_COR4_CIGNPAR 0x02 /* Thrown away bad characters */
-#define RIOC_COR4_CPARMRK 0x04 /* PARMRK characters */
-#define RIOC_COR4_CNPARMRK 0x03 /* Don't PARMRK */
-
-/* COR5 driver and RTA use */
-#define RIOC_COR5_ISTRIP 0x80 /* Strip input chars to 7 bits */
-#define RIOC_COR5_LNE 0x40 /* Enable LNEXT processing */
-#define RIOC_COR5_CMOE 0x20 /* Match good and errored characters */
-#define RIOC_COR5_ONLCR 0x02 /* NL -> CR NL on output */
-#define RIOC_COR5_OCRNL 0x01 /* CR -> NL on output */
-
-/*
-** Spare bits - these are not used in the CIRRUS registers, so we use
-** them to set various other features.
-*/
-/*
-** tstop and tbusy indication
-*/
-#define RIOC_COR5_TSTATE_ON 0x08 /* Turn on monitoring of tbusy and tstop */
-#define RIOC_COR5_TSTATE_OFF 0x04 /* Turn off monitoring of tbusy and tstop */
-/*
-** TAB3
-*/
-#define RIOC_COR5_TAB3 0x10 /* TAB3 mode */
-
-#define RIOC_COR5_HOST 0xc3 /* Safe host bits */
-
-/* CCSR */
-#define RIOC_CCSR_TXFLOFF 0x04 /* Tx is xoffed */
-
-/* MSVR1 */
-/* NB. DTR / CD swapped from Cirrus spec as the pins are also reversed on the
- RTA. This is because otherwise DCD would get lost on the 1 parallel / 3
- serial option.
-*/
-#define RIOC_MSVR1_CD 0x80 /* CD (DSR on Cirrus) */
-#define RIOC_MSVR1_RTS 0x40 /* RTS (CTS on Cirrus) */
-#define RIOC_MSVR1_RI 0x20 /* RI */
-#define RIOC_MSVR1_DTR 0x10 /* DTR (CD on Cirrus) */
-#define RIOC_MSVR1_CTS 0x01 /* CTS output pin (RTS on Cirrus) */
-/* Next two used to indicate state of tbusy and tstop to driver */
-#define RIOC_MSVR1_TSTOP 0x08 /* Set if port flow controlled */
-#define RIOC_MSVR1_TEMPTY 0x04 /* Set if port tx buffer empty */
-
-#define RIOC_MSVR1_HOST 0xf3 /* The bits the host wants */
-
-/* Defines for the subscripts of a CONFIG packet */
-#define RIOC_CONFIG_COR1 1 /* Option register 1 */
-#define RIOC_CONFIG_COR2 2 /* Option register 2 */
-#define RIOC_CONFIG_COR4 3 /* Option register 4 */
-#define RIOC_CONFIG_COR5 4 /* Option register 5 */
-#define RIOC_CONFIG_TXXON 5 /* Tx XON character */
-#define RIOC_CONFIG_TXXOFF 6 /* Tx XOFF character */
-#define RIOC_CONFIG_RXXON 7 /* Rx XON character */
-#define RIOC_CONFIG_RXXOFF 8 /* Rx XOFF character */
-#define RIOC_CONFIG_LNEXT 9 /* LNEXT character */
-#define RIOC_CONFIG_TXBAUD 10 /* Tx baud rate */
-#define RIOC_CONFIG_RXBAUD 11 /* Rx baud rate */
-
-#define RIOC_PRE_EMPTIVE 0x80 /* Pre-emptive bit in command field */
-
-/* Packet types going from Host to remote - with the exception of OPEN, MOPEN,
- CONFIG, SBREAK and MEMDUMP the remaining bytes of the data array will not
- be used
-*/
-#define RIOC_OPEN 0x00 /* Open a port */
-#define RIOC_CONFIG 0x01 /* Configure a port */
-#define RIOC_MOPEN 0x02 /* Modem open (block for DCD) */
-#define RIOC_CLOSE 0x03 /* Close a port */
-#define RIOC_WFLUSH (0x04 | RIOC_PRE_EMPTIVE) /* Write flush */
-#define RIOC_RFLUSH (0x05 | RIOC_PRE_EMPTIVE) /* Read flush */
-#define RIOC_RESUME (0x06 | RIOC_PRE_EMPTIVE) /* Resume if xoffed */
-#define RIOC_SBREAK 0x07 /* Start break */
-#define RIOC_EBREAK 0x08 /* End break */
-#define RIOC_SUSPEND (0x09 | RIOC_PRE_EMPTIVE) /* Susp op (behave as tho xoffed) */
-#define RIOC_FCLOSE (0x0a | RIOC_PRE_EMPTIVE) /* Force close */
-#define RIOC_XPRINT 0x0b /* Xprint packet */
-#define RIOC_MBIS (0x0c | RIOC_PRE_EMPTIVE) /* Set modem lines */
-#define RIOC_MBIC (0x0d | RIOC_PRE_EMPTIVE) /* Clear modem lines */
-#define RIOC_MSET (0x0e | RIOC_PRE_EMPTIVE) /* Set modem lines */
-#define RIOC_PCLOSE 0x0f /* Pseudo close - Leaves rx/tx enabled */
-#define RIOC_MGET (0x10 | RIOC_PRE_EMPTIVE) /* Force update of modem status */
-#define RIOC_MEMDUMP (0x11 | RIOC_PRE_EMPTIVE) /* Send back mem from addr supplied */
-#define RIOC_READ_REGISTER (0x12 | RIOC_PRE_EMPTIVE) /* Read CD1400 register (debug) */
-
-/* "Command" packets going from remote to host COMPLETE and MODEM_STATUS
- use data[4] / data[3] to indicate current state and modem status respectively
-*/
-
-#define RIOC_COMPLETE (0x20 | RIOC_PRE_EMPTIVE)
- /* Command complete */
-#define RIOC_BREAK_RECEIVED (0x21 | RIOC_PRE_EMPTIVE)
- /* Break received */
-#define RIOC_MODEM_STATUS (0x22 | RIOC_PRE_EMPTIVE)
- /* Change in modem status */
-
-/* "Command" packet that could go either way - handshake wake-up */
-#define RIOC_HANDSHAKE (0x23 | RIOC_PRE_EMPTIVE)
- /* Wake-up to HOST / RTA */
-
-#endif
diff --git a/drivers/staging/generic_serial/rio/cmdblk.h b/drivers/staging/generic_serial/rio/cmdblk.h
deleted file mode 100644
index 9ed4f861675..00000000000
--- a/drivers/staging/generic_serial/rio/cmdblk.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : cmdblk.h
-** SID : 1.2
-** Last Modified : 11/6/98 11:34:09
-** Retrieved : 11/6/98 11:34:20
-**
-** ident @(#)cmdblk.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_cmdblk_h__
-#define __rio_cmdblk_h__
-
-/*
-** the structure of a command block, used to queue commands destined for
-** a rup.
-*/
-
-struct CmdBlk {
- struct CmdBlk *NextP; /* Pointer to next command block */
- struct PKT Packet; /* A packet, to copy to the rup */
- /* The func to call to check if OK */
- int (*PreFuncP) (unsigned long, struct CmdBlk *);
- int PreArg; /* The arg for the func */
- /* The func to call when completed */
- int (*PostFuncP) (unsigned long, struct CmdBlk *);
- int PostArg; /* The arg for the func */
-};
-
-#define NUM_RIO_CMD_BLKS (3 * (MAX_RUP * 4 + LINKS_PER_UNIT * 4))
-#endif
diff --git a/drivers/staging/generic_serial/rio/cmdpkt.h b/drivers/staging/generic_serial/rio/cmdpkt.h
deleted file mode 100644
index c1e7a279807..00000000000
--- a/drivers/staging/generic_serial/rio/cmdpkt.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : cmdpkt.h
-** SID : 1.2
-** Last Modified : 11/6/98 11:34:09
-** Retrieved : 11/6/98 11:34:20
-**
-** ident @(#)cmdpkt.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-#ifndef __rio_cmdpkt_h__
-#define __rio_cmdpkt_h__
-
-/*
-** overlays for the data area of a packet. Used in both directions
-** (to build a packet to send, and to interpret a packet that arrives)
-** and is very inconvenient for MIPS, so they appear as two separate
-** structures - those used for modifying/reading packets on the card
-** and those for modifying/reading packets in real memory, which have an _M
-** suffix.
-*/
-
-#define RTA_BOOT_DATA_SIZE (PKT_MAX_DATA_LEN-2)
-
-/*
-** The boot information packet looks like this:
-** This structure overlays a PktCmd->CmdData structure, and so starts
-** at Data[2] in the actual pkt!
-*/
-struct BootSequence {
- u16 NumPackets;
- u16 LoadBase;
- u16 CodeSize;
-};
-
-#define BOOT_SEQUENCE_LEN 8
-
-struct SamTop {
- u8 Unit;
- u8 Link;
-};
-
-struct CmdHdr {
- u8 PcCommand;
- union {
- u8 PcPhbNum;
- u8 PcLinkNum;
- u8 PcIDNum;
- } U0;
-};
-
-
-struct PktCmd {
- union {
- struct {
- struct CmdHdr CmdHdr;
- struct BootSequence PcBootSequence;
- } S1;
- struct {
- u16 PcSequence;
- u8 PcBootData[RTA_BOOT_DATA_SIZE];
- } S2;
- struct {
- u16 __crud__;
- u8 PcUniqNum[4]; /* this is really a uint. */
- u8 PcModuleTypes; /* what modules are fitted */
- } S3;
- struct {
- struct CmdHdr CmdHdr;
- u8 __undefined__;
- u8 PcModemStatus;
- u8 PcPortStatus;
- u8 PcSubCommand; /* commands like mem or register dump */
- u16 PcSubAddr; /* Address for command */
- u8 PcSubData[64]; /* Date area for command */
- } S4;
- struct {
- struct CmdHdr CmdHdr;
- u8 PcCommandText[1];
- u8 __crud__[20];
- u8 PcIDNum2; /* It had to go somewhere! */
- } S5;
- struct {
- struct CmdHdr CmdHdr;
- struct SamTop Topology[LINKS_PER_UNIT];
- } S6;
- } U1;
-};
-
-struct PktCmd_M {
- union {
- struct {
- struct {
- u8 PcCommand;
- union {
- u8 PcPhbNum;
- u8 PcLinkNum;
- u8 PcIDNum;
- } U0;
- } CmdHdr;
- struct {
- u16 NumPackets;
- u16 LoadBase;
- u16 CodeSize;
- } PcBootSequence;
- } S1;
- struct {
- u16 PcSequence;
- u8 PcBootData[RTA_BOOT_DATA_SIZE];
- } S2;
- struct {
- u16 __crud__;
- u8 PcUniqNum[4]; /* this is really a uint. */
- u8 PcModuleTypes; /* what modules are fitted */
- } S3;
- struct {
- u16 __cmd_hdr__;
- u8 __undefined__;
- u8 PcModemStatus;
- u8 PcPortStatus;
- u8 PcSubCommand;
- u16 PcSubAddr;
- u8 PcSubData[64];
- } S4;
- struct {
- u16 __cmd_hdr__;
- u8 PcCommandText[1];
- u8 __crud__[20];
- u8 PcIDNum2; /* Tacked on end */
- } S5;
- struct {
- u16 __cmd_hdr__;
- struct Top Topology[LINKS_PER_UNIT];
- } S6;
- } U1;
-};
-
-#define Command U1.S1.CmdHdr.PcCommand
-#define PhbNum U1.S1.CmdHdr.U0.PcPhbNum
-#define IDNum U1.S1.CmdHdr.U0.PcIDNum
-#define IDNum2 U1.S5.PcIDNum2
-#define LinkNum U1.S1.CmdHdr.U0.PcLinkNum
-#define Sequence U1.S2.PcSequence
-#define BootData U1.S2.PcBootData
-#define BootSequence U1.S1.PcBootSequence
-#define UniqNum U1.S3.PcUniqNum
-#define ModemStatus U1.S4.PcModemStatus
-#define PortStatus U1.S4.PcPortStatus
-#define SubCommand U1.S4.PcSubCommand
-#define SubAddr U1.S4.PcSubAddr
-#define SubData U1.S4.PcSubData
-#define CommandText U1.S5.PcCommandText
-#define RouteTopology U1.S6.Topology
-#define ModuleTypes U1.S3.PcModuleTypes
-
-#endif
diff --git a/drivers/staging/generic_serial/rio/daemon.h b/drivers/staging/generic_serial/rio/daemon.h
deleted file mode 100644
index 4af90323fd0..00000000000
--- a/drivers/staging/generic_serial/rio/daemon.h
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : daemon.h
-** SID : 1.3
-** Last Modified : 11/6/98 11:34:09
-** Retrieved : 11/6/98 11:34:21
-**
-** ident @(#)daemon.h 1.3
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_daemon_h__
-#define __rio_daemon_h__
-
-
-/*
-** structures used on /dev/rio
-*/
-
-struct Error {
- unsigned int Error;
- unsigned int Entry;
- unsigned int Other;
-};
-
-struct DownLoad {
- char __user *DataP;
- unsigned int Count;
- unsigned int ProductCode;
-};
-
-/*
-** A few constants....
-*/
-#ifndef MAX_VERSION_LEN
-#define MAX_VERSION_LEN 256
-#endif
-
-#ifndef MAX_XP_CTRL_LEN
-#define MAX_XP_CTRL_LEN 16 /* ALSO IN PORT.H */
-#endif
-
-struct PortSetup {
- unsigned int From; /* Set/Clear XP & IXANY Control from this port.... */
- unsigned int To; /* .... to this port */
- unsigned int XpCps; /* at this speed */
- char XpOn[MAX_XP_CTRL_LEN]; /* this is the start string */
- char XpOff[MAX_XP_CTRL_LEN]; /* this is the stop string */
- u8 IxAny; /* enable/disable IXANY */
- u8 IxOn; /* enable/disable IXON */
- u8 Lock; /* lock port params */
- u8 Store; /* store params across closes */
- u8 Drain; /* close only when drained */
-};
-
-struct LpbReq {
- unsigned int Host;
- unsigned int Link;
- struct LPB __user *LpbP;
-};
-
-struct RupReq {
- unsigned int HostNum;
- unsigned int RupNum;
- struct RUP __user *RupP;
-};
-
-struct PortReq {
- unsigned int SysPort;
- struct Port __user *PortP;
-};
-
-struct StreamInfo {
- unsigned int SysPort;
- int RQueue;
- int WQueue;
-};
-
-struct HostReq {
- unsigned int HostNum;
- struct Host __user *HostP;
-};
-
-struct HostDpRam {
- unsigned int HostNum;
- struct DpRam __user *DpRamP;
-};
-
-struct DebugCtrl {
- unsigned int SysPort;
- unsigned int Debug;
- unsigned int Wait;
-};
-
-struct MapInfo {
- unsigned int FirstPort; /* 8 ports, starting from this (tty) number */
- unsigned int RtaUnique; /* reside on this RTA (unique number) */
-};
-
-struct MapIn {
- unsigned int NumEntries; /* How many port sets are we mapping? */
- struct MapInfo *MapInfoP; /* Pointer to (user space) info */
-};
-
-struct SendPack {
- unsigned int PortNum;
- unsigned char Len;
- unsigned char Data[PKT_MAX_DATA_LEN];
-};
-
-struct SpecialRupCmd {
- struct PKT Packet;
- unsigned short Host;
- unsigned short RupNum;
-};
-
-struct IdentifyRta {
- unsigned long RtaUnique;
- u8 ID;
-};
-
-struct KillNeighbour {
- unsigned long UniqueNum;
- u8 Link;
-};
-
-struct rioVersion {
- char version[MAX_VERSION_LEN];
- char relid[MAX_VERSION_LEN];
- int buildLevel;
- char buildDate[MAX_VERSION_LEN];
-};
-
-
-/*
-** RIOC commands are for the daemon type operations
-**
-** 09.12.1998 ARG - ESIL 0776 part fix
-** Definition for 'RIOC' also appears in rioioctl.h, so we'd better do a
-** #ifndef here first.
-** rioioctl.h also now has #define 'RIO_QUICK_CHECK' as this ioctl is now
-** allowed to be used by customers.
-*/
-#ifndef RIOC
-#define RIOC ('R'<<8)|('i'<<16)|('o'<<24)
-#endif
-
-/*
-** Boot stuff
-*/
-#define RIO_GET_TABLE (RIOC | 100)
-#define RIO_PUT_TABLE (RIOC | 101)
-#define RIO_ASSIGN_RTA (RIOC | 102)
-#define RIO_DELETE_RTA (RIOC | 103)
-#define RIO_HOST_FOAD (RIOC | 104)
-#define RIO_QUICK_CHECK (RIOC | 105)
-#define RIO_SIGNALS_ON (RIOC | 106)
-#define RIO_SIGNALS_OFF (RIOC | 107)
-#define RIO_CHANGE_NAME (RIOC | 108)
-#define RIO_DOWNLOAD (RIOC | 109)
-#define RIO_GET_LOG (RIOC | 110)
-#define RIO_SETUP_PORTS (RIOC | 111)
-#define RIO_ALL_MODEM (RIOC | 112)
-
-/*
-** card state, debug stuff
-*/
-#define RIO_NUM_HOSTS (RIOC | 120)
-#define RIO_HOST_LPB (RIOC | 121)
-#define RIO_HOST_RUP (RIOC | 122)
-#define RIO_HOST_PORT (RIOC | 123)
-#define RIO_PARMS (RIOC | 124)
-#define RIO_HOST_REQ (RIOC | 125)
-#define RIO_READ_CONFIG (RIOC | 126)
-#define RIO_SET_CONFIG (RIOC | 127)
-#define RIO_VERSID (RIOC | 128)
-#define RIO_FLAGS (RIOC | 129)
-#define RIO_SETDEBUG (RIOC | 130)
-#define RIO_GETDEBUG (RIOC | 131)
-#define RIO_READ_LEVELS (RIOC | 132)
-#define RIO_SET_FAST_BUS (RIOC | 133)
-#define RIO_SET_SLOW_BUS (RIOC | 134)
-#define RIO_SET_BYTE_MODE (RIOC | 135)
-#define RIO_SET_WORD_MODE (RIOC | 136)
-#define RIO_STREAM_INFO (RIOC | 137)
-#define RIO_START_POLLER (RIOC | 138)
-#define RIO_STOP_POLLER (RIOC | 139)
-#define RIO_LAST_ERROR (RIOC | 140)
-#define RIO_TICK (RIOC | 141)
-#define RIO_TOCK (RIOC | 241) /* I did this on purpose, you know. */
-#define RIO_SEND_PACKET (RIOC | 142)
-#define RIO_SET_BUSY (RIOC | 143)
-#define SPECIAL_RUP_CMD (RIOC | 144)
-#define RIO_FOAD_RTA (RIOC | 145)
-#define RIO_ZOMBIE_RTA (RIOC | 146)
-#define RIO_IDENTIFY_RTA (RIOC | 147)
-#define RIO_KILL_NEIGHBOUR (RIOC | 148)
-#define RIO_DEBUG_MEM (RIOC | 149)
-/*
-** 150 - 167 used..... See below
-*/
-#define RIO_GET_PORT_SETUP (RIOC | 168)
-#define RIO_RESUME (RIOC | 169)
-#define RIO_MESG (RIOC | 170)
-#define RIO_NO_MESG (RIOC | 171)
-#define RIO_WHAT_MESG (RIOC | 172)
-#define RIO_HOST_DPRAM (RIOC | 173)
-#define RIO_MAP_B50_TO_50 (RIOC | 174)
-#define RIO_MAP_B50_TO_57600 (RIOC | 175)
-#define RIO_MAP_B110_TO_110 (RIOC | 176)
-#define RIO_MAP_B110_TO_115200 (RIOC | 177)
-#define RIO_GET_PORT_PARAMS (RIOC | 178)
-#define RIO_SET_PORT_PARAMS (RIOC | 179)
-#define RIO_GET_PORT_TTY (RIOC | 180)
-#define RIO_SET_PORT_TTY (RIOC | 181)
-#define RIO_SYSLOG_ONLY (RIOC | 182)
-#define RIO_SYSLOG_CONS (RIOC | 183)
-#define RIO_CONS_ONLY (RIOC | 184)
-#define RIO_BLOCK_OPENS (RIOC | 185)
-
-/*
-** 02.03.1999 ARG - ESIL 0820 fix :
-** RIOBootMode is no longer use by the driver, so these ioctls
-** are now obsolete :
-**
-#define RIO_GET_BOOT_MODE (RIOC | 186)
-#define RIO_SET_BOOT_MODE (RIOC | 187)
-**
-*/
-
-#define RIO_MEM_DUMP (RIOC | 189)
-#define RIO_READ_REGISTER (RIOC | 190)
-#define RIO_GET_MODTYPE (RIOC | 191)
-#define RIO_SET_TIMER (RIOC | 192)
-#define RIO_READ_CHECK (RIOC | 196)
-#define RIO_WAITING_FOR_RESTART (RIOC | 197)
-#define RIO_BIND_RTA (RIOC | 198)
-#define RIO_GET_BINDINGS (RIOC | 199)
-#define RIO_PUT_BINDINGS (RIOC | 200)
-
-#define RIO_MAKE_DEV (RIOC | 201)
-#define RIO_MINOR (RIOC | 202)
-
-#define RIO_IDENTIFY_DRIVER (RIOC | 203)
-#define RIO_DISPLAY_HOST_CFG (RIOC | 204)
-
-
-/*
-** MAKE_DEV / MINOR stuff
-*/
-#define RIO_DEV_DIRECT 0x0000
-#define RIO_DEV_MODEM 0x0200
-#define RIO_DEV_XPRINT 0x0400
-#define RIO_DEV_MASK 0x0600
-
-/*
-** port management, xprint stuff
-*/
-#define rIOCN(N) (RIOC|(N))
-#define rIOCR(N,T) (RIOC|(N))
-#define rIOCW(N,T) (RIOC|(N))
-
-#define RIO_GET_XP_ON rIOCR(150,char[16]) /* start xprint string */
-#define RIO_SET_XP_ON rIOCW(151,char[16])
-#define RIO_GET_XP_OFF rIOCR(152,char[16]) /* finish xprint string */
-#define RIO_SET_XP_OFF rIOCW(153,char[16])
-#define RIO_GET_XP_CPS rIOCR(154,int) /* xprint CPS */
-#define RIO_SET_XP_CPS rIOCW(155,int)
-#define RIO_GET_IXANY rIOCR(156,int) /* ixany allowed? */
-#define RIO_SET_IXANY rIOCW(157,int)
-#define RIO_SET_IXANY_ON rIOCN(158) /* allow ixany */
-#define RIO_SET_IXANY_OFF rIOCN(159) /* disallow ixany */
-#define RIO_GET_MODEM rIOCR(160,int) /* port is modem/direct line? */
-#define RIO_SET_MODEM rIOCW(161,int)
-#define RIO_SET_MODEM_ON rIOCN(162) /* port is a modem */
-#define RIO_SET_MODEM_OFF rIOCN(163) /* port is direct */
-#define RIO_GET_IXON rIOCR(164,int) /* ixon allowed? */
-#define RIO_SET_IXON rIOCW(165,int)
-#define RIO_SET_IXON_ON rIOCN(166) /* allow ixon */
-#define RIO_SET_IXON_OFF rIOCN(167) /* disallow ixon */
-
-#define RIO_GET_SIVIEW ((('s')<<8) | 106) /* backwards compatible with SI */
-
-#define RIO_IOCTL_UNKNOWN -2
-
-#endif
diff --git a/drivers/staging/generic_serial/rio/errors.h b/drivers/staging/generic_serial/rio/errors.h
deleted file mode 100644
index bdb05234090..00000000000
--- a/drivers/staging/generic_serial/rio/errors.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : errors.h
-** SID : 1.2
-** Last Modified : 11/6/98 11:34:10
-** Retrieved : 11/6/98 11:34:21
-**
-** ident @(#)errors.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_errors_h__
-#define __rio_errors_h__
-
-/*
-** error codes
-*/
-
-#define NOTHING_WRONG_AT_ALL 0
-#define BAD_CHARACTER_IN_NAME 1
-#define TABLE_ENTRY_ISNT_PROPERLY_NULL 2
-#define UNKNOWN_HOST_NUMBER 3
-#define ZERO_RTA_ID 4
-#define BAD_RTA_ID 5
-#define DUPLICATED_RTA_ID 6
-#define DUPLICATE_UNIQUE_NUMBER 7
-#define BAD_TTY_NUMBER 8
-#define TTY_NUMBER_IN_USE 9
-#define NAME_USED_TWICE 10
-#define HOST_ID_NOT_ZERO 11
-#define BOOT_IN_PROGRESS 12
-#define COPYIN_FAILED 13
-#define HOST_FILE_TOO_LARGE 14
-#define COPYOUT_FAILED 15
-#define NOT_SUPER_USER 16
-#define RIO_ALREADY_POLLING 17
-
-#define ID_NUMBER_OUT_OF_RANGE 18
-#define PORT_NUMBER_OUT_OF_RANGE 19
-#define HOST_NUMBER_OUT_OF_RANGE 20
-#define RUP_NUMBER_OUT_OF_RANGE 21
-#define TTY_NUMBER_OUT_OF_RANGE 22
-#define LINK_NUMBER_OUT_OF_RANGE 23
-
-#define HOST_NOT_RUNNING 24
-#define IOCTL_COMMAND_UNKNOWN 25
-#define RIO_SYSTEM_HALTED 26
-#define WAIT_FOR_DRAIN_BROKEN 27
-#define PORT_NOT_MAPPED_INTO_SYSTEM 28
-#define EXCLUSIVE_USE_SET 29
-#define WAIT_FOR_NOT_CLOSING_BROKEN 30
-#define WAIT_FOR_PORT_TO_OPEN_BROKEN 31
-#define WAIT_FOR_CARRIER_BROKEN 32
-#define WAIT_FOR_NOT_IN_USE_BROKEN 33
-#define WAIT_FOR_CAN_ADD_COMMAND_BROKEN 34
-#define WAIT_FOR_ADD_COMMAND_BROKEN 35
-#define WAIT_FOR_NOT_PARAM_BROKEN 36
-#define WAIT_FOR_RETRY_BROKEN 37
-#define HOST_HAS_ALREADY_BEEN_BOOTED 38
-#define UNIT_IS_IN_USE 39
-#define COULDNT_FIND_ENTRY 40
-#define RTA_UNIQUE_NUMBER_ZERO 41
-#define CLOSE_COMMAND_FAILED 42
-#define WAIT_FOR_CLOSE_BROKEN 43
-#define CPS_VALUE_OUT_OF_RANGE 44
-#define ID_ALREADY_IN_USE 45
-#define SIGNALS_ALREADY_SET 46
-#define NOT_RECEIVING_PROCESS 47
-#define RTA_NUMBER_WRONG 48
-#define NO_SUCH_PRODUCT 49
-#define HOST_SYSPORT_BAD 50
-#define ID_NOT_TENTATIVE 51
-#define XPRINT_CPS_OUT_OF_RANGE 52
-#define NOT_ENOUGH_CORE_FOR_PCI_COPY 53
-
-
-#endif /* __rio_errors_h__ */
diff --git a/drivers/staging/generic_serial/rio/func.h b/drivers/staging/generic_serial/rio/func.h
deleted file mode 100644
index 078d44f85e4..00000000000
--- a/drivers/staging/generic_serial/rio/func.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : func.h
-** SID : 1.3
-** Last Modified : 11/6/98 11:34:10
-** Retrieved : 11/6/98 11:34:21
-**
-** ident @(#)func.h 1.3
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __func_h_def
-#define __func_h_def
-
-#include <linux/kdev_t.h>
-
-/* rioboot.c */
-int RIOBootCodeRTA(struct rio_info *, struct DownLoad *);
-int RIOBootCodeHOST(struct rio_info *, struct DownLoad *);
-int RIOBootCodeUNKNOWN(struct rio_info *, struct DownLoad *);
-void msec_timeout(struct Host *);
-int RIOBootRup(struct rio_info *, unsigned int, struct Host *, struct PKT __iomem *);
-int RIOBootOk(struct rio_info *, struct Host *, unsigned long);
-int RIORtaBound(struct rio_info *, unsigned int);
-void rio_fill_host_slot(int, int, unsigned int, struct Host *);
-
-/* riocmd.c */
-int RIOFoadRta(struct Host *, struct Map *);
-int RIOZombieRta(struct Host *, struct Map *);
-int RIOCommandRta(struct rio_info *, unsigned long, int (*func) (struct Host *, struct Map *));
-int RIOIdentifyRta(struct rio_info *, void __user *);
-int RIOKillNeighbour(struct rio_info *, void __user *);
-int RIOSuspendBootRta(struct Host *, int, int);
-int RIOFoadWakeup(struct rio_info *);
-struct CmdBlk *RIOGetCmdBlk(void);
-void RIOFreeCmdBlk(struct CmdBlk *);
-int RIOQueueCmdBlk(struct Host *, unsigned int, struct CmdBlk *);
-void RIOPollHostCommands(struct rio_info *, struct Host *);
-int RIOWFlushMark(unsigned long, struct CmdBlk *);
-int RIORFlushEnable(unsigned long, struct CmdBlk *);
-int RIOUnUse(unsigned long, struct CmdBlk *);
-
-/* rioctrl.c */
-int riocontrol(struct rio_info *, dev_t, int, unsigned long, int);
-
-int RIOPreemptiveCmd(struct rio_info *, struct Port *, unsigned char);
-
-/* rioinit.c */
-void rioinit(struct rio_info *, struct RioHostInfo *);
-void RIOInitHosts(struct rio_info *, struct RioHostInfo *);
-void RIOISAinit(struct rio_info *, int);
-int RIODoAT(struct rio_info *, int, int);
-caddr_t RIOCheckForATCard(int);
-int RIOAssignAT(struct rio_info *, int, void __iomem *, int);
-int RIOBoardTest(unsigned long, void __iomem *, unsigned char, int);
-void RIOAllocDataStructs(struct rio_info *);
-void RIOSetupDataStructs(struct rio_info *);
-int RIODefaultName(struct rio_info *, struct Host *, unsigned int);
-struct rioVersion *RIOVersid(void);
-void RIOHostReset(unsigned int, struct DpRam __iomem *, unsigned int);
-
-/* riointr.c */
-void RIOTxEnable(char *);
-void RIOServiceHost(struct rio_info *, struct Host *);
-int riotproc(struct rio_info *, struct ttystatics *, int, int);
-
-/* rioparam.c */
-int RIOParam(struct Port *, int, int, int);
-int RIODelay(struct Port *PortP, int);
-int RIODelay_ni(struct Port *PortP, int);
-void ms_timeout(struct Port *);
-int can_add_transmit(struct PKT __iomem **, struct Port *);
-void add_transmit(struct Port *);
-void put_free_end(struct Host *, struct PKT __iomem *);
-int can_remove_receive(struct PKT __iomem **, struct Port *);
-void remove_receive(struct Port *);
-
-/* rioroute.c */
-int RIORouteRup(struct rio_info *, unsigned int, struct Host *, struct PKT __iomem *);
-void RIOFixPhbs(struct rio_info *, struct Host *, unsigned int);
-unsigned int GetUnitType(unsigned int);
-int RIOSetChange(struct rio_info *);
-int RIOFindFreeID(struct rio_info *, struct Host *, unsigned int *, unsigned int *);
-
-
-/* riotty.c */
-
-int riotopen(struct tty_struct *tty, struct file *filp);
-int riotclose(void *ptr);
-int riotioctl(struct rio_info *, struct tty_struct *, int, caddr_t);
-void ttyseth(struct Port *, struct ttystatics *, struct old_sgttyb *sg);
-
-/* riotable.c */
-int RIONewTable(struct rio_info *);
-int RIOApel(struct rio_info *);
-int RIODeleteRta(struct rio_info *, struct Map *);
-int RIOAssignRta(struct rio_info *, struct Map *);
-int RIOReMapPorts(struct rio_info *, struct Host *, struct Map *);
-int RIOChangeName(struct rio_info *, struct Map *);
-
-#if 0
-/* riodrvr.c */
-struct rio_info *rio_install(struct RioHostInfo *);
-int rio_uninstall(struct rio_info *);
-int rio_open(struct rio_info *, int, struct file *);
-int rio_close(struct rio_info *, struct file *);
-int rio_read(struct rio_info *, struct file *, char *, int);
-int rio_write(struct rio_info *, struct file *f, char *, int);
-int rio_ioctl(struct rio_info *, struct file *, int, char *);
-int rio_select(struct rio_info *, struct file *f, int, struct sel *);
-int rio_intr(char *);
-int rio_isr_thread(char *);
-struct rio_info *rio_info_store(int cmd, struct rio_info *p);
-#endif
-
-extern void rio_copy_to_card(void *from, void __iomem *to, int len);
-extern int rio_minor(struct tty_struct *tty);
-extern int rio_ismodem(struct tty_struct *tty);
-
-extern void rio_start_card_running(struct Host *HostP);
-
-#endif /* __func_h_def */
diff --git a/drivers/staging/generic_serial/rio/host.h b/drivers/staging/generic_serial/rio/host.h
deleted file mode 100644
index 78f24540c22..00000000000
--- a/drivers/staging/generic_serial/rio/host.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : host.h
-** SID : 1.2
-** Last Modified : 11/6/98 11:34:10
-** Retrieved : 11/6/98 11:34:21
-**
-** ident @(#)host.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_host_h__
-#define __rio_host_h__
-
-/*
-** the host structure - one per host card in the system.
-*/
-
-#define MAX_EXTRA_UNITS 64
-
-/*
-** Host data structure. This is used for the software equiv. of
-** the host.
-*/
-struct Host {
- struct pci_dev *pdev;
- unsigned char Type; /* RIO_EISA, RIO_MCA, ... */
- unsigned char Ivec; /* POLLED or ivec number */
- unsigned char Mode; /* Control stuff */
- unsigned char Slot; /* Slot */
- void __iomem *Caddr; /* KV address of DPRAM */
- struct DpRam __iomem *CardP; /* KV address of DPRAM, with overlay */
- unsigned long PaddrP; /* Phys. address of DPRAM */
- char Name[MAX_NAME_LEN]; /* The name of the host */
- unsigned int UniqueNum; /* host unique number */
- spinlock_t HostLock; /* Lock structure for MPX */
- unsigned int WorkToBeDone; /* set to true each interrupt */
- unsigned int InIntr; /* Being serviced? */
- unsigned int IntSrvDone; /* host's interrupt has been serviced */
- void (*Copy) (void *, void __iomem *, int); /* copy func */
- struct timer_list timer;
- /*
- ** I M P O R T A N T !
- **
- ** The rest of this data structure is cleared to zero after
- ** a RIO_HOST_FOAD command.
- */
-
- unsigned long Flags; /* Whats going down */
-#define RC_WAITING 0
-#define RC_STARTUP 1
-#define RC_RUNNING 2
-#define RC_STUFFED 3
-#define RC_READY 7
-#define RUN_STATE 7
-/*
-** Boot mode applies to the way in which hosts in this system will
-** boot RTAs
-*/
-#define RC_BOOT_ALL 0x8 /* Boot all RTAs attached */
-#define RC_BOOT_OWN 0x10 /* Only boot RTAs bound to this system */
-#define RC_BOOT_NONE 0x20 /* Don't boot any RTAs (slave mode) */
-
- struct Top Topology[LINKS_PER_UNIT]; /* one per link */
- struct Map Mapping[MAX_RUP]; /* Mappings for host */
- struct PHB __iomem *PhbP; /* Pointer to the PHB array */
- unsigned short __iomem *PhbNumP; /* Ptr to Number of PHB's */
- struct LPB __iomem *LinkStrP; /* Link Structure Array */
- struct RUP __iomem *RupP; /* Sixteen real rups here */
- struct PARM_MAP __iomem *ParmMapP; /* points to the parmmap */
- unsigned int ExtraUnits[MAX_EXTRA_UNITS]; /* unknown things */
- unsigned int NumExtraBooted; /* how many of the above */
- /*
- ** Twenty logical rups.
- ** The first sixteen are the real Rup entries (above), the last four
- ** are the link RUPs.
- */
- struct UnixRup UnixRups[MAX_RUP + LINKS_PER_UNIT];
- int timeout_id; /* For calling 100 ms delays */
- int timeout_sem; /* For calling 100 ms delays */
- unsigned long locks; /* long req'd for set_bit --RR */
- char ____end_marker____;
-};
-#define Control CardP->DpControl
-#define SetInt CardP->DpSetInt
-#define ResetTpu CardP->DpResetTpu
-#define ResetInt CardP->DpResetInt
-#define Signature CardP->DpSignature
-#define Sram1 CardP->DpSram1
-#define Sram2 CardP->DpSram2
-#define Sram3 CardP->DpSram3
-#define Scratch CardP->DpScratch
-#define __ParmMapR CardP->DpParmMapR
-#define SLX CardP->DpSlx
-#define Revision CardP->DpRevision
-#define Unique CardP->DpUnique
-#define Year CardP->DpYear
-#define Week CardP->DpWeek
-
-#define RIO_DUMBPARM 0x0860 /* what not to expect */
-
-#endif
diff --git a/drivers/staging/generic_serial/rio/link.h b/drivers/staging/generic_serial/rio/link.h
deleted file mode 100644
index f3bf11a04d4..00000000000
--- a/drivers/staging/generic_serial/rio/link.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/****************************************************************************
- ******* *******
- ******* L I N K
- ******* *******
- ****************************************************************************
-
- Author : Ian Nandhra / Jeremy Rolls
- Date :
-
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Version : 0.01
-
-
- Mods
- ----------------------------------------------------------------------------
- Date By Description
- ----------------------------------------------------------------------------
-
- ***************************************************************************/
-
-#ifndef _link_h
-#define _link_h 1
-
-/*************************************************
- * Define the Link Status stuff
- ************************************************/
-/* Boot request stuff */
-#define BOOT_REQUEST ((ushort) 0) /* Request for a boot */
-#define BOOT_ABORT ((ushort) 1) /* Abort a boot */
-#define BOOT_SEQUENCE ((ushort) 2) /* Packet with the number of packets
- and load address */
-#define BOOT_COMPLETED ((ushort) 3) /* Boot completed */
-
-
-struct LPB {
- u16 link_number; /* Link Number */
- u16 in_ch; /* Link In Channel */
- u16 out_ch; /* Link Out Channel */
- u8 attached_serial[4]; /* Attached serial number */
- u8 attached_host_serial[4];
- /* Serial number of Host who
- booted the other end */
- u16 descheduled; /* Currently Descheduled */
- u16 state; /* Current state */
- u16 send_poll; /* Send a Poll Packet */
- u16 ltt_p; /* Process Descriptor */
- u16 lrt_p; /* Process Descriptor */
- u16 lrt_status; /* Current lrt status */
- u16 ltt_status; /* Current ltt status */
- u16 timeout; /* Timeout value */
- u16 topology; /* Topology bits */
- u16 mon_ltt;
- u16 mon_lrt;
- u16 WaitNoBoot; /* Secs to hold off booting */
- u16 add_packet_list; /* Add packets to here */
- u16 remove_packet_list; /* Send packets from here */
-
- u16 lrt_fail_chan; /* Lrt's failure channel */
- u16 ltt_fail_chan; /* Ltt's failure channel */
-
- /* RUP structure for HOST to driver communications */
- struct RUP rup;
- struct RUP link_rup; /* RUP for the link (POLL,
- topology etc.) */
- u16 attached_link; /* Number of attached link */
- u16 csum_errors; /* csum errors */
- u16 num_disconnects; /* number of disconnects */
- u16 num_sync_rcvd; /* # sync's received */
- u16 num_sync_rqst; /* # sync requests */
- u16 num_tx; /* Num pkts sent */
- u16 num_rx; /* Num pkts received */
- u16 module_attached; /* Module tpyes of attached */
- u16 led_timeout; /* LED timeout */
- u16 first_port; /* First port to service */
- u16 last_port; /* Last port to service */
-};
-
-#endif
-
-/*********** end of file ***********/
diff --git a/drivers/staging/generic_serial/rio/linux_compat.h b/drivers/staging/generic_serial/rio/linux_compat.h
deleted file mode 100644
index 34c0d2899ef..00000000000
--- a/drivers/staging/generic_serial/rio/linux_compat.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * (C) 2000 R.E.Wolff@BitWizard.nl
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/interrupt.h>
-
-
-#define DEBUG_ALL
-
-struct ttystatics {
- struct termios tm;
-};
-
-extern int rio_debug;
-
-#define RIO_DEBUG_INIT 0x000001
-#define RIO_DEBUG_BOOT 0x000002
-#define RIO_DEBUG_CMD 0x000004
-#define RIO_DEBUG_CTRL 0x000008
-#define RIO_DEBUG_INTR 0x000010
-#define RIO_DEBUG_PARAM 0x000020
-#define RIO_DEBUG_ROUTE 0x000040
-#define RIO_DEBUG_TABLE 0x000080
-#define RIO_DEBUG_TTY 0x000100
-#define RIO_DEBUG_FLOW 0x000200
-#define RIO_DEBUG_MODEMSIGNALS 0x000400
-#define RIO_DEBUG_PROBE 0x000800
-#define RIO_DEBUG_CLEANUP 0x001000
-#define RIO_DEBUG_IFLOW 0x002000
-#define RIO_DEBUG_PFE 0x004000
-#define RIO_DEBUG_REC 0x008000
-#define RIO_DEBUG_SPINLOCK 0x010000
-#define RIO_DEBUG_DELAY 0x020000
-#define RIO_DEBUG_MOD_COUNT 0x040000
-
-
-/* Copied over from riowinif.h . This is ugly. The winif file declares
-also much other stuff which is incompatible with the headers from
-the older driver. The older driver includes "brates.h" which shadows
-the definitions from Linux, and is incompatible... */
-
-/* RxBaud and TxBaud definitions... */
-#define RIO_B0 0x00 /* RTS / DTR signals dropped */
-#define RIO_B50 0x01 /* 50 baud */
-#define RIO_B75 0x02 /* 75 baud */
-#define RIO_B110 0x03 /* 110 baud */
-#define RIO_B134 0x04 /* 134.5 baud */
-#define RIO_B150 0x05 /* 150 baud */
-#define RIO_B200 0x06 /* 200 baud */
-#define RIO_B300 0x07 /* 300 baud */
-#define RIO_B600 0x08 /* 600 baud */
-#define RIO_B1200 0x09 /* 1200 baud */
-#define RIO_B1800 0x0A /* 1800 baud */
-#define RIO_B2400 0x0B /* 2400 baud */
-#define RIO_B4800 0x0C /* 4800 baud */
-#define RIO_B9600 0x0D /* 9600 baud */
-#define RIO_B19200 0x0E /* 19200 baud */
-#define RIO_B38400 0x0F /* 38400 baud */
-#define RIO_B56000 0x10 /* 56000 baud */
-#define RIO_B57600 0x11 /* 57600 baud */
-#define RIO_B64000 0x12 /* 64000 baud */
-#define RIO_B115200 0x13 /* 115200 baud */
-#define RIO_B2000 0x14 /* 2000 baud */
diff --git a/drivers/staging/generic_serial/rio/map.h b/drivers/staging/generic_serial/rio/map.h
deleted file mode 100644
index 28a66129293..00000000000
--- a/drivers/staging/generic_serial/rio/map.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : map.h
-** SID : 1.2
-** Last Modified : 11/6/98 11:34:11
-** Retrieved : 11/6/98 11:34:21
-**
-** ident @(#)map.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_map_h__
-#define __rio_map_h__
-
-/*
-** mapping structure passed to and from the config.rio program to
-** determine the current topology of the world
-*/
-
-#define MAX_MAP_ENTRY 17
-#define TOTAL_MAP_ENTRIES (MAX_MAP_ENTRY*RIO_SLOTS)
-#define MAX_NAME_LEN 32
-
-struct Map {
- unsigned int HostUniqueNum; /* Supporting hosts unique number */
- unsigned int RtaUniqueNum; /* Unique number */
- /*
- ** The next two IDs must be swapped on big-endian architectures
- ** when using a v2.04 /etc/rio/config with a v3.00 driver (when
- ** upgrading for example).
- */
- unsigned short ID; /* ID used in the subnet */
- unsigned short ID2; /* ID of 2nd block of 8 for 16 port */
- unsigned long Flags; /* Booted, ID Given, Disconnected */
- unsigned long SysPort; /* First tty mapped to this port */
- struct Top Topology[LINKS_PER_UNIT]; /* ID connected to each link */
- char Name[MAX_NAME_LEN]; /* Cute name by which RTA is known */
-};
-
-/*
-** Flag values:
-*/
-#define RTA_BOOTED 0x00000001
-#define RTA_NEWBOOT 0x00000010
-#define MSG_DONE 0x00000020
-#define RTA_INTERCONNECT 0x00000040
-#define RTA16_SECOND_SLOT 0x00000080
-#define BEEN_HERE 0x00000100
-#define SLOT_TENTATIVE 0x40000000
-#define SLOT_IN_USE 0x80000000
-
-/*
-** HostUniqueNum is the unique number from the host card that this RTA
-** is to be connected to.
-** RtaUniqueNum is the unique number of the RTA concerned. It will be ZERO
-** if the slot in the table is unused. If it is the same as the HostUniqueNum
-** then this slot represents a host card.
-** Flags contains current boot/route state info
-** SysPort is a value in the range 0-504, being the number of the first tty
-** on this RTA. Each RTA supports 8 ports. The SysPort value must be modulo 8.
-** SysPort 0-127 correspond to /dev/ttyr001 to /dev/ttyr128, with minor
-** numbers 0-127. SysPort 128-255 correspond to /dev/ttyr129 to /dev/ttyr256,
-** again with minor numbers 0-127, and so on for SysPorts 256-383 and 384-511
-** ID will be in the range 0-16 for a `known' RTA. ID will be 0xFFFF for an
-** unused slot/unknown ID etc.
-** The Topology array contains the ID of the unit connected to each of the
-** four links on this unit. The entry will be 0xFFFF if NOTHING is connected
-** to the link, or will be 0xFF00 if an UNKNOWN unit is connected to the link.
-** The Name field is a null-terminated string, up to 31 characters, containing
-** the 'cute' name that the sysadmin/users know the RTA by. It is permissible
-** for this string to contain any character in the range \040 to \176 inclusive.
-** In particular, ctrl sequences and DEL (0x7F, \177) are not allowed. The
-** special character '%' IS allowable, and needs no special action.
-**
-*/
-
-#endif
diff --git a/drivers/staging/generic_serial/rio/param.h b/drivers/staging/generic_serial/rio/param.h
deleted file mode 100644
index 7e9b6283e8a..00000000000
--- a/drivers/staging/generic_serial/rio/param.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : param.h
-** SID : 1.2
-** Last Modified : 11/6/98 11:34:12
-** Retrieved : 11/6/98 11:34:21
-**
-** ident @(#)param.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_param_h__
-#define __rio_param_h__
-
-/*
-** the param command block, as used in OPEN and PARAM calls.
-*/
-
-struct phb_param {
- u8 Cmd; /* It is very important that these line up */
- u8 Cor1; /* with what is expected at the other end. */
- u8 Cor2; /* to confirm that you've got it right, */
- u8 Cor4; /* check with cirrus/cirrus.h */
- u8 Cor5;
- u8 TxXon; /* Transmit X-On character */
- u8 TxXoff; /* Transmit X-Off character */
- u8 RxXon; /* Receive X-On character */
- u8 RxXoff; /* Receive X-Off character */
- u8 LNext; /* Literal-next character */
- u8 TxBaud; /* Transmit baudrate */
- u8 RxBaud; /* Receive baudrate */
-};
-
-#endif
diff --git a/drivers/staging/generic_serial/rio/parmmap.h b/drivers/staging/generic_serial/rio/parmmap.h
deleted file mode 100644
index acc8fa439df..00000000000
--- a/drivers/staging/generic_serial/rio/parmmap.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/****************************************************************************
- ******* *******
- ******* H O S T M E M O R Y M A P
- ******* *******
- ****************************************************************************
-
- Author : Ian Nandhra / Jeremy Rolls
- Date :
-
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Version : 0.01
-
-
- Mods
- ----------------------------------------------------------------------------
- Date By Description
- ----------------------------------------------------------------------------
-6/4/1991 jonb Made changes to accommodate Mips R3230 bus
- ***************************************************************************/
-
-#ifndef _parmap_h
-#define _parmap_h
-
-typedef struct PARM_MAP PARM_MAP;
-
-struct PARM_MAP {
- u16 phb_ptr; /* Pointer to the PHB array */
- u16 phb_num_ptr; /* Ptr to Number of PHB's */
- u16 free_list; /* Free List pointer */
- u16 free_list_end; /* Free List End pointer */
- u16 q_free_list_ptr; /* Ptr to Q_BUF variable */
- u16 unit_id_ptr; /* Unit Id */
- u16 link_str_ptr; /* Link Structure Array */
- u16 bootloader_1; /* 1st Stage Boot Loader */
- u16 bootloader_2; /* 2nd Stage Boot Loader */
- u16 port_route_map_ptr; /* Port Route Map */
- u16 route_ptr; /* Unit Route Map */
- u16 map_present; /* Route Map present */
- s16 pkt_num; /* Total number of packets */
- s16 q_num; /* Total number of Q packets */
- u16 buffers_per_port; /* Number of buffers per port */
- u16 heap_size; /* Initial size of heap */
- u16 heap_left; /* Current Heap left */
- u16 error; /* Error code */
- u16 tx_max; /* Max number of tx pkts per phb */
- u16 rx_max; /* Max number of rx pkts per phb */
- u16 rx_limit; /* For high / low watermarks */
- s16 links; /* Links to use */
- s16 timer; /* Interrupts per second */
- u16 rups; /* Pointer to the RUPs */
- u16 max_phb; /* Mostly for debugging */
- u16 living; /* Just increments!! */
- u16 init_done; /* Initialisation over */
- u16 booting_link;
- u16 idle_count; /* Idle time counter */
- u16 busy_count; /* Busy counter */
- u16 idle_control; /* Control Idle Process */
- u16 tx_intr; /* TX interrupt pending */
- u16 rx_intr; /* RX interrupt pending */
- u16 rup_intr; /* RUP interrupt pending */
-};
-
-#endif
-
-/*********** end of file ***********/
diff --git a/drivers/staging/generic_serial/rio/pci.h b/drivers/staging/generic_serial/rio/pci.h
deleted file mode 100644
index 6032f913595..00000000000
--- a/drivers/staging/generic_serial/rio/pci.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : pci.h
-** SID : 1.2
-** Last Modified : 11/6/98 11:34:12
-** Retrieved : 11/6/98 11:34:21
-**
-** ident @(#)pci.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_pci_h__
-#define __rio_pci_h__
-
-/*
-** PCI stuff
-*/
-
-#define PCITpFastClock 0x80
-#define PCITpSlowClock 0x00
-#define PCITpFastLinks 0x40
-#define PCITpSlowLinks 0x00
-#define PCITpIntEnable 0x04
-#define PCITpIntDisable 0x00
-#define PCITpBusEnable 0x02
-#define PCITpBusDisable 0x00
-#define PCITpBootFromRam 0x01
-#define PCITpBootFromLink 0x00
-
-#define RIO_PCI_VENDOR 0x11CB
-#define RIO_PCI_DEVICE 0x8000
-#define RIO_PCI_BASE_CLASS 0x02
-#define RIO_PCI_SUB_CLASS 0x80
-#define RIO_PCI_PROG_IFACE 0x00
-
-#define RIO_PCI_RID 0x0008
-#define RIO_PCI_BADR0 0x0010
-#define RIO_PCI_INTLN 0x003C
-#define RIO_PCI_INTPIN 0x003D
-
-#define RIO_PCI_MEM_SIZE 65536
-
-#define RIO_PCI_TURBO_TP 0x80
-#define RIO_PCI_FAST_LINKS 0x40
-#define RIO_PCI_INT_ENABLE 0x04
-#define RIO_PCI_TP_BUS_ENABLE 0x02
-#define RIO_PCI_BOOT_FROM_RAM 0x01
-
-#define RIO_PCI_DEFAULT_MODE 0x05
-
-#endif /* __rio_pci_h__ */
diff --git a/drivers/staging/generic_serial/rio/phb.h b/drivers/staging/generic_serial/rio/phb.h
deleted file mode 100644
index a4c48ae4e36..00000000000
--- a/drivers/staging/generic_serial/rio/phb.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/****************************************************************************
- ******* *******
- ******* P H B H E A D E R *******
- ******* *******
- ****************************************************************************
-
- Author : Ian Nandhra, Jeremy Rolls
- Date :
-
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Version : 0.01
-
-
- Mods
- ----------------------------------------------------------------------------
- Date By Description
- ----------------------------------------------------------------------------
-
- ***************************************************************************/
-
-#ifndef _phb_h
-#define _phb_h 1
-
-/*************************************************
- * Handshake asserted. Deasserted by the LTT(s)
- ************************************************/
-#define PHB_HANDSHAKE_SET ((ushort) 0x001) /* Set by LRT */
-
-#define PHB_HANDSHAKE_RESET ((ushort) 0x002) /* Set by ISR / driver */
-
-#define PHB_HANDSHAKE_FLAGS (PHB_HANDSHAKE_RESET | PHB_HANDSHAKE_SET)
- /* Reset by ltt */
-
-
-/*************************************************
- * Maximum number of PHB's
- ************************************************/
-#define MAX_PHB ((ushort) 128) /* range 0-127 */
-
-/*************************************************
- * Defines for the mode fields
- ************************************************/
-#define TXPKT_INCOMPLETE 0x0001 /* Previous tx packet not completed */
-#define TXINTR_ENABLED 0x0002 /* Tx interrupt is enabled */
-#define TX_TAB3 0x0004 /* TAB3 mode */
-#define TX_OCRNL 0x0008 /* OCRNL mode */
-#define TX_ONLCR 0x0010 /* ONLCR mode */
-#define TX_SENDSPACES 0x0020 /* Send n spaces command needs
- completing */
-#define TX_SENDNULL 0x0040 /* Escaping NULL needs completing */
-#define TX_SENDLF 0x0080 /* LF -> CR LF needs completing */
-#define TX_PARALLELBUG 0x0100 /* CD1400 LF -> CR LF bug on parallel
- port */
-#define TX_HANGOVER (TX_SENDSPACES | TX_SENDLF | TX_SENDNULL)
-#define TX_DTRFLOW 0x0200 /* DTR tx flow control */
-#define TX_DTRFLOWED 0x0400 /* DTR is low - don't allow more data
- into the FIFO */
-#define TX_DATAINFIFO 0x0800 /* There is data in the FIFO */
-#define TX_BUSY 0x1000 /* Data in FIFO, shift or holding regs */
-
-#define RX_SPARE 0x0001 /* SPARE */
-#define RXINTR_ENABLED 0x0002 /* Rx interrupt enabled */
-#define RX_ICRNL 0x0008 /* ICRNL mode */
-#define RX_INLCR 0x0010 /* INLCR mode */
-#define RX_IGNCR 0x0020 /* IGNCR mode */
-#define RX_CTSFLOW 0x0040 /* CTSFLOW enabled */
-#define RX_IXOFF 0x0080 /* IXOFF enabled */
-#define RX_CTSFLOWED 0x0100 /* CTSFLOW and CTS dropped */
-#define RX_IXOFFED 0x0200 /* IXOFF and xoff sent */
-#define RX_BUFFERED 0x0400 /* Try and pass on complete packets */
-
-#define PORT_ISOPEN 0x0001 /* Port open? */
-#define PORT_HUPCL 0x0002 /* Hangup on close? */
-#define PORT_MOPENPEND 0x0004 /* Modem open pending */
-#define PORT_ISPARALLEL 0x0008 /* Parallel port */
-#define PORT_BREAK 0x0010 /* Port on break */
-#define PORT_STATUSPEND 0x0020 /* Status packet pending */
-#define PORT_BREAKPEND 0x0040 /* Break packet pending */
-#define PORT_MODEMPEND 0x0080 /* Modem status packet pending */
-#define PORT_PARALLELBUG 0x0100 /* CD1400 LF -> CR LF bug on parallel
- port */
-#define PORT_FULLMODEM 0x0200 /* Full modem signals */
-#define PORT_RJ45 0x0400 /* RJ45 connector - no RI signal */
-#define PORT_RESTRICTED 0x0600 /* Restricted connector - no RI / DTR */
-
-#define PORT_MODEMBITS 0x0600 /* Mask for modem fields */
-
-#define PORT_WCLOSE 0x0800 /* Waiting for close */
-#define PORT_HANDSHAKEFIX 0x1000 /* Port has H/W flow control fix */
-#define PORT_WASPCLOSED 0x2000 /* Port closed with PCLOSE */
-#define DUMPMODE 0x4000 /* Dump RTA mem */
-#define READ_REG 0x8000 /* Read CD1400 register */
-
-
-
-/**************************************************************************
- * PHB Structure
- * A few words.
- *
- * Normally Packets are added to the end of the list and removed from
- * the start. The pointer tx_add points to a SPACE to put a Packet.
- * The pointer tx_remove points to the next Packet to remove
- *************************************************************************/
-
-struct PHB {
- u8 source;
- u8 handshake;
- u8 status;
- u16 timeout; /* Maximum of 1.9 seconds */
- u8 link; /* Send down this link */
- u8 destination;
- u16 tx_start;
- u16 tx_end;
- u16 tx_add;
- u16 tx_remove;
-
- u16 rx_start;
- u16 rx_end;
- u16 rx_add;
- u16 rx_remove;
-
-};
-
-#endif
-
-/*********** end of file ***********/
diff --git a/drivers/staging/generic_serial/rio/pkt.h b/drivers/staging/generic_serial/rio/pkt.h
deleted file mode 100644
index a9458164f02..00000000000
--- a/drivers/staging/generic_serial/rio/pkt.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/****************************************************************************
- ******* *******
- ******* P A C K E T H E A D E R F I L E
- ******* *******
- ****************************************************************************
-
- Author : Ian Nandhra / Jeremy Rolls
- Date :
-
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Version : 0.01
-
-
- Mods
- ----------------------------------------------------------------------------
- Date By Description
- ----------------------------------------------------------------------------
-
- ***************************************************************************/
-
-#ifndef _pkt_h
-#define _pkt_h 1
-
-#define PKT_CMD_BIT ((ushort) 0x080)
-#define PKT_CMD_DATA ((ushort) 0x080)
-
-#define PKT_ACK ((ushort) 0x040)
-
-#define PKT_TGL ((ushort) 0x020)
-
-#define PKT_LEN_MASK ((ushort) 0x07f)
-
-#define DATA_WNDW ((ushort) 0x10)
-#define PKT_TTL_MASK ((ushort) 0x0f)
-
-#define PKT_MAX_DATA_LEN 72
-
-#define PKT_LENGTH sizeof(struct PKT)
-#define SYNC_PKT_LENGTH (PKT_LENGTH + 4)
-
-#define CONTROL_PKT_LEN_MASK PKT_LEN_MASK
-#define CONTROL_PKT_CMD_BIT PKT_CMD_BIT
-#define CONTROL_PKT_ACK (PKT_ACK << 8)
-#define CONTROL_PKT_TGL (PKT_TGL << 8)
-#define CONTROL_PKT_TTL_MASK (PKT_TTL_MASK << 8)
-#define CONTROL_DATA_WNDW (DATA_WNDW << 8)
-
-struct PKT {
- u8 dest_unit; /* Destination Unit Id */
- u8 dest_port; /* Destination POrt */
- u8 src_unit; /* Source Unit Id */
- u8 src_port; /* Source POrt */
- u8 len;
- u8 control;
- u8 data[PKT_MAX_DATA_LEN];
- /* Actual data :-) */
- u16 csum; /* C-SUM */
-};
-#endif
-
-/*********** end of file ***********/
diff --git a/drivers/staging/generic_serial/rio/port.h b/drivers/staging/generic_serial/rio/port.h
deleted file mode 100644
index 49cf6d15ee5..00000000000
--- a/drivers/staging/generic_serial/rio/port.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : port.h
-** SID : 1.3
-** Last Modified : 11/6/98 11:34:12
-** Retrieved : 11/6/98 11:34:21
-**
-** ident @(#)port.h 1.3
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_port_h__
-#define __rio_port_h__
-
-/*
-** Port data structure
-*/
-struct Port {
- struct gs_port gs;
- int PortNum; /* RIO port no., 0-511 */
- struct Host *HostP;
- void __iomem *Caddr;
- unsigned short HostPort; /* Port number on host card */
- unsigned char RupNum; /* Number of RUP for port */
- unsigned char ID2; /* Second ID of RTA for port */
- unsigned long State; /* FLAGS for open & xopen */
-#define RIO_LOPEN 0x00001 /* Local open */
-#define RIO_MOPEN 0x00002 /* Modem open */
-#define RIO_WOPEN 0x00004 /* Waiting for open */
-#define RIO_CLOSING 0x00008 /* The port is being close */
-#define RIO_XPBUSY 0x00010 /* Transparent printer busy */
-#define RIO_BREAKING 0x00020 /* Break in progress */
-#define RIO_DIRECT 0x00040 /* Doing Direct output */
-#define RIO_EXCLUSIVE 0x00080 /* Stream open for exclusive use */
-#define RIO_NDELAY 0x00100 /* Stream is open FNDELAY */
-#define RIO_CARR_ON 0x00200 /* Stream has carrier present */
-#define RIO_XPWANTR 0x00400 /* Stream wanted by Xprint */
-#define RIO_RBLK 0x00800 /* Stream is read-blocked */
-#define RIO_BUSY 0x01000 /* Stream is BUSY for write */
-#define RIO_TIMEOUT 0x02000 /* Stream timeout in progress */
-#define RIO_TXSTOP 0x04000 /* Stream output is stopped */
-#define RIO_WAITFLUSH 0x08000 /* Stream waiting for flush */
-#define RIO_DYNOROD 0x10000 /* Drain failed */
-#define RIO_DELETED 0x20000 /* RTA has been deleted */
-#define RIO_ISSCANCODE 0x40000 /* This line is in scancode mode */
-#define RIO_USING_EUC 0x100000 /* Using extended Unix chars */
-#define RIO_CAN_COOK 0x200000 /* This line can do cooking */
-#define RIO_TRIAD_MODE 0x400000 /* Enable TRIAD special ops. */
-#define RIO_TRIAD_BLOCK 0x800000 /* Next read will block */
-#define RIO_TRIAD_FUNC 0x1000000 /* Seen a function key coming in */
-#define RIO_THROTTLE_RX 0x2000000 /* RX needs to be throttled. */
-
- unsigned long Config; /* FLAGS for NOREAD.... */
-#define RIO_NOREAD 0x0001 /* Are not allowed to read port */
-#define RIO_NOWRITE 0x0002 /* Are not allowed to write port */
-#define RIO_NOXPRINT 0x0004 /* Are not allowed to xprint port */
-#define RIO_NOMASK 0x0007 /* All not allowed things */
-#define RIO_IXANY 0x0008 /* Port is allowed ixany */
-#define RIO_MODEM 0x0010 /* Stream is a modem device */
-#define RIO_IXON 0x0020 /* Port is allowed ixon */
-#define RIO_WAITDRAIN 0x0040 /* Wait for port to completely drain */
-#define RIO_MAP_50_TO_50 0x0080 /* Map 50 baud to 50 baud */
-#define RIO_MAP_110_TO_110 0x0100 /* Map 110 baud to 110 baud */
-
-/*
-** 15.10.1998 ARG - ESIL 0761 prt fix
-** As LynxOS does not appear to support Hardware Flow Control .....
-** Define our own flow control flags in 'Config'.
-*/
-#define RIO_CTSFLOW 0x0200 /* RIO's own CTSFLOW flag */
-#define RIO_RTSFLOW 0x0400 /* RIO's own RTSFLOW flag */
-
-
- struct PHB __iomem *PhbP; /* pointer to PHB for port */
- u16 __iomem *TxAdd; /* Add packets here */
- u16 __iomem *TxStart; /* Start of add array */
- u16 __iomem *TxEnd; /* End of add array */
- u16 __iomem *RxRemove; /* Remove packets here */
- u16 __iomem *RxStart; /* Start of remove array */
- u16 __iomem *RxEnd; /* End of remove array */
- unsigned int RtaUniqueNum; /* Unique number of RTA */
- unsigned short PortState; /* status of port */
- unsigned short ModemState; /* status of modem lines */
- unsigned long ModemLines; /* Modem bits sent to RTA */
- unsigned char CookMode; /* who expands CR/LF? */
- unsigned char ParamSem; /* Prevent write during param */
- unsigned char Mapped; /* if port mapped onto host */
- unsigned char SecondBlock; /* if port belongs to 2nd block
- of 16 port RTA */
- unsigned char InUse; /* how many pre-emptive cmds */
- unsigned char Lock; /* if params locked */
- unsigned char Store; /* if params stored across closes */
- unsigned char FirstOpen; /* TRUE if first time port opened */
- unsigned char FlushCmdBodge; /* if doing a (non)flush */
- unsigned char MagicFlags; /* require intr processing */
-#define MAGIC_FLUSH 0x01 /* mirror of WflushFlag */
-#define MAGIC_REBOOT 0x02 /* RTA re-booted, re-open ports */
-#define MORE_OUTPUT_EYGOR 0x04 /* riotproc failed to empty clists */
- unsigned char WflushFlag; /* 1 How many WFLUSHs active */
-/*
-** Transparent print stuff
-*/
- struct Xprint {
-#ifndef MAX_XP_CTRL_LEN
-#define MAX_XP_CTRL_LEN 16 /* ALSO IN DAEMON.H */
-#endif
- unsigned int XpCps;
- char XpOn[MAX_XP_CTRL_LEN];
- char XpOff[MAX_XP_CTRL_LEN];
- unsigned short XpLen; /* strlen(XpOn)+strlen(XpOff) */
- unsigned char XpActive;
- unsigned char XpLastTickOk; /* TRUE if we can process */
-#define XP_OPEN 00001
-#define XP_RUNABLE 00002
- struct ttystatics *XttyP;
- } Xprint;
- unsigned char RxDataStart;
- unsigned char Cor2Copy; /* copy of COR2 */
- char *Name; /* points to the Rta's name */
- char *TxRingBuffer;
- unsigned short TxBufferIn; /* New data arrives here */
- unsigned short TxBufferOut; /* Intr removes data here */
- unsigned short OldTxBufferOut; /* Indicates if draining */
- int TimeoutId; /* Timeout ID */
- unsigned int Debug;
- unsigned char WaitUntilBooted; /* True if open should block */
- unsigned int statsGather; /* True if gathering stats */
- unsigned long txchars; /* Chars transmitted */
- unsigned long rxchars; /* Chars received */
- unsigned long opens; /* port open count */
- unsigned long closes; /* port close count */
- unsigned long ioctls; /* ioctl count */
- unsigned char LastRxTgl; /* Last state of rx toggle bit */
- spinlock_t portSem; /* Lock using this sem */
- int MonitorTstate; /* Monitoring ? */
- int timeout_id; /* For calling 100 ms delays */
- int timeout_sem; /* For calling 100 ms delays */
- int firstOpen; /* First time open ? */
- char *p; /* save the global struc here .. */
-};
-
-struct ModuleInfo {
- char *Name;
- unsigned int Flags[4]; /* one per port on a module */
-};
-
-/*
-** This struct is required because trying to grab an entire Port structure
-** runs into problems with differing struct sizes between driver and config.
-*/
-struct PortParams {
- unsigned int Port;
- unsigned long Config;
- unsigned long State;
- struct ttystatics *TtyP;
-};
-
-#endif
diff --git a/drivers/staging/generic_serial/rio/protsts.h b/drivers/staging/generic_serial/rio/protsts.h
deleted file mode 100644
index 8ab79401d3e..00000000000
--- a/drivers/staging/generic_serial/rio/protsts.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/****************************************************************************
- ******* *******
- ******* P R O T O C O L S T A T U S S T R U C T U R E *******
- ******* *******
- ****************************************************************************
-
- Author : Ian Nandhra / Jeremy Rolls
- Date :
-
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Version : 0.01
-
-
- Mods
- ----------------------------------------------------------------------------
- Date By Description
- ----------------------------------------------------------------------------
-
- ***************************************************************************/
-
-#ifndef _protsts_h
-#define _protsts_h 1
-
-/*************************************************
- * ACK bit. Last Packet received OK. Set by
- * rxpkt to indicate that the Packet has been
- * received OK and that the LTT must set the ACK
- * bit in the next outward bound Packet
- * and re-set by LTT's after xmit.
- *
- * Gets shoved into rx_status
- ************************************************/
-#define PHB_RX_LAST_PKT_ACKED ((ushort) 0x080)
-
-/*******************************************************
- * The Rx TOGGLE bit.
- * Stuffed into rx_status by RXPKT
- ******************************************************/
-#define PHB_RX_DATA_WNDW ((ushort) 0x040)
-
-/*******************************************************
- * The Rx TOGGLE bit. Matches the setting in PKT.H
- * Stuffed into rx_status
- ******************************************************/
-#define PHB_RX_TGL ((ushort) 0x2000)
-
-
-/*************************************************
- * This bit is set by the LRT to indicate that
- * an ACK (packet) must be returned.
- *
- * Gets shoved into tx_status
- ************************************************/
-#define PHB_TX_SEND_PKT_ACK ((ushort) 0x08)
-
-/*************************************************
- * Set by LTT to indicate that an ACK is required
- *************************************************/
-#define PHB_TX_ACK_RQRD ((ushort) 0x01)
-
-
-/*******************************************************
- * The Tx TOGGLE bit.
- * Stuffed into tx_status by RXPKT from the PKT WndW
- * field. Looked by the LTT when the NEXT Packet
- * is going to be sent.
- ******************************************************/
-#define PHB_TX_DATA_WNDW ((ushort) 0x04)
-
-
-/*******************************************************
- * The Tx TOGGLE bit. Matches the setting in PKT.H
- * Stuffed into tx_status
- ******************************************************/
-#define PHB_TX_TGL ((ushort) 0x02)
-
-/*******************************************************
- * Request intr bit. Set when the queue has gone quiet
- * and the PHB has requested an interrupt.
- ******************************************************/
-#define PHB_TX_INTR ((ushort) 0x100)
-
-/*******************************************************
- * SET if the PHB cannot send any more data down the
- * Link
- ******************************************************/
-#define PHB_TX_HANDSHAKE ((ushort) 0x010)
-
-
-#define RUP_SEND_WNDW ((ushort) 0x08) ;
-
-#endif
-
-/*********** end of file ***********/
diff --git a/drivers/staging/generic_serial/rio/rio.h b/drivers/staging/generic_serial/rio/rio.h
deleted file mode 100644
index 1bf36223a4e..00000000000
--- a/drivers/staging/generic_serial/rio/rio.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 1998 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : rio.h
-** SID : 1.3
-** Last Modified : 11/6/98 11:34:13
-** Retrieved : 11/6/98 11:34:22
-**
-** ident @(#)rio.h 1.3
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_rio_h__
-#define __rio_rio_h__
-
-/*
-** Maximum numbers of things
-*/
-#define RIO_SLOTS 4 /* number of configuration slots */
-#define RIO_HOSTS 4 /* number of hosts that can be found */
-#define PORTS_PER_HOST 128 /* number of ports per host */
-#define LINKS_PER_UNIT 4 /* number of links from a host */
-#define RIO_PORTS (PORTS_PER_HOST * RIO_HOSTS) /* max. no. of ports */
-#define RTAS_PER_HOST (MAX_RUP) /* number of RTAs per host */
-#define PORTS_PER_RTA (PORTS_PER_HOST/RTAS_PER_HOST) /* ports on a rta */
-#define PORTS_PER_MODULE 4 /* number of ports on a plug-in module */
- /* number of modules on an RTA */
-#define MODULES_PER_RTA (PORTS_PER_RTA/PORTS_PER_MODULE)
-#define MAX_PRODUCT 16 /* numbr of different product codes */
-#define MAX_MODULE_TYPES 16 /* number of different types of module */
-
-#define RIO_CONTROL_DEV 128 /* minor number of host/control device */
-#define RIO_INVALID_MAJOR 0 /* test first host card's major no for validity */
-
-/*
-** number of RTAs that can be bound to a master
-*/
-#define MAX_RTA_BINDINGS (MAX_RUP * RIO_HOSTS)
-
-/*
-** Unit types
-*/
-#define PC_RTA16 0x90000000
-#define PC_RTA8 0xe0000000
-#define TYPE_HOST 0
-#define TYPE_RTA8 1
-#define TYPE_RTA16 2
-
-/*
-** Flag values returned by functions
-*/
-
-#define RIO_FAIL -1
-
-/*
-** SysPort value for something that hasn't any ports
-*/
-#define NO_PORT 0xFFFFFFFF
-
-/*
-** Unit ID Of all hosts
-*/
-#define HOST_ID 0
-
-/*
-** Break bytes into nybles
-*/
-#define LONYBLE(X) ((X) & 0xF)
-#define HINYBLE(X) (((X)>>4) & 0xF)
-
-/*
-** Flag values passed into some functions
-*/
-#define DONT_SLEEP 0
-#define OK_TO_SLEEP 1
-
-#define DONT_PRINT 1
-#define DO_PRINT 0
-
-#define PRINT_TO_LOG_CONS 0
-#define PRINT_TO_CONS 1
-#define PRINT_TO_LOG 2
-
-/*
-** Timeout has trouble with times of less than 3 ticks...
-*/
-#define MIN_TIMEOUT 3
-
-/*
-** Generally useful constants
-*/
-
-#define HUNDRED_MS ((HZ/10)?(HZ/10):1)
-#define ONE_MEG 0x100000
-#define SIXTY_FOUR_K 0x10000
-
-#define RIO_AT_MEM_SIZE SIXTY_FOUR_K
-#define RIO_EISA_MEM_SIZE SIXTY_FOUR_K
-#define RIO_MCA_MEM_SIZE SIXTY_FOUR_K
-
-#define COOK_WELL 0
-#define COOK_MEDIUM 1
-#define COOK_RAW 2
-
-/*
-** Pointer manipulation stuff
-** RIO_PTR takes hostp->Caddr and the offset into the DP RAM area
-** and produces a UNIX caddr_t (pointer) to the object
-** RIO_OBJ takes hostp->Caddr and a UNIX pointer to an object and
-** returns the offset into the DP RAM area.
-*/
-#define RIO_PTR(C,O) (((unsigned char __iomem *)(C))+(0xFFFF&(O)))
-#define RIO_OFF(C,O) ((unsigned char __iomem *)(O)-(unsigned char __iomem *)(C))
-
-/*
-** How to convert from various different device number formats:
-** DEV is a dev number, as passed to open, close etc - NOT a minor
-** number!
-**/
-
-#define RIO_MODEM_MASK 0x1FF
-#define RIO_MODEM_BIT 0x200
-#define RIO_UNMODEM(DEV) (MINOR(DEV) & RIO_MODEM_MASK)
-#define RIO_ISMODEM(DEV) (MINOR(DEV) & RIO_MODEM_BIT)
-#define RIO_PORT(DEV,FIRST_MAJ) ( (MAJOR(DEV) - FIRST_MAJ) * PORTS_PER_HOST) \
- + MINOR(DEV)
-#define CSUM(pkt_ptr) (((u16 *)(pkt_ptr))[0] + ((u16 *)(pkt_ptr))[1] + \
- ((u16 *)(pkt_ptr))[2] + ((u16 *)(pkt_ptr))[3] + \
- ((u16 *)(pkt_ptr))[4] + ((u16 *)(pkt_ptr))[5] + \
- ((u16 *)(pkt_ptr))[6] + ((u16 *)(pkt_ptr))[7] + \
- ((u16 *)(pkt_ptr))[8] + ((u16 *)(pkt_ptr))[9] )
-
-#define RIO_LINK_ENABLE 0x80FF /* FF is a hack, mainly for Mips, to */
- /* prevent a really stupid race condition. */
-
-#define NOT_INITIALISED 0
-#define INITIALISED 1
-
-#define NOT_POLLING 0
-#define POLLING 1
-
-#define NOT_CHANGED 0
-#define CHANGED 1
-
-#define NOT_INUSE 0
-
-#define DISCONNECT 0
-#define CONNECT 1
-
-/* ------ Control Codes ------ */
-
-#define CONTROL '^'
-#define IFOAD ( CONTROL + 1 )
-#define IDENTIFY ( CONTROL + 2 )
-#define ZOMBIE ( CONTROL + 3 )
-#define UFOAD ( CONTROL + 4 )
-#define IWAIT ( CONTROL + 5 )
-
-#define IFOAD_MAGIC 0xF0AD /* of course */
-#define ZOMBIE_MAGIC (~0xDEAD) /* not dead -> zombie */
-#define UFOAD_MAGIC 0xD1E /* kill-your-neighbour */
-#define IWAIT_MAGIC 0xB1DE /* Bide your time */
-
-/* ------ Error Codes ------ */
-
-#define E_NO_ERROR ((ushort) 0)
-
-/* ------ Free Lists ------ */
-
-struct rio_free_list {
- u16 next;
- u16 prev;
-};
-
-/* NULL for card side linked lists */
-#define TPNULL ((ushort)(0x8000))
-/* We can add another packet to a transmit queue if the packet pointer pointed
- * to by the TxAdd pointer has PKT_IN_USE clear in its address. */
-#define PKT_IN_USE 0x1
-
-/* ------ Topology ------ */
-
-struct Top {
- u8 Unit;
- u8 Link;
-};
-
-#endif /* __rio_h__ */
diff --git a/drivers/staging/generic_serial/rio/rio_linux.c b/drivers/staging/generic_serial/rio/rio_linux.c
deleted file mode 100644
index 5e33293d24e..00000000000
--- a/drivers/staging/generic_serial/rio/rio_linux.c
+++ /dev/null
@@ -1,1204 +0,0 @@
-
-/* rio_linux.c -- Linux driver for the Specialix RIO series cards.
- *
- *
- * (C) 1999 R.E.Wolff@BitWizard.nl
- *
- * Specialix pays for the development and support of this driver.
- * Please DO contact support@specialix.co.uk if you require
- * support. But please read the documentation (rio.txt) first.
- *
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
- * */
-
-#include <linux/module.h>
-#include <linux/kdev_t.h>
-#include <asm/io.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/mm.h>
-#include <linux/serial.h>
-#include <linux/fcntl.h>
-#include <linux/major.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/miscdevice.h>
-#include <linux/init.h>
-
-#include <linux/generic_serial.h>
-#include <asm/uaccess.h>
-
-#include "linux_compat.h"
-#include "pkt.h"
-#include "daemon.h"
-#include "rio.h"
-#include "riospace.h"
-#include "cmdpkt.h"
-#include "map.h"
-#include "rup.h"
-#include "port.h"
-#include "riodrvr.h"
-#include "rioinfo.h"
-#include "func.h"
-#include "errors.h"
-#include "pci.h"
-
-#include "parmmap.h"
-#include "unixrup.h"
-#include "board.h"
-#include "host.h"
-#include "phb.h"
-#include "link.h"
-#include "cmdblk.h"
-#include "route.h"
-#include "cirrus.h"
-#include "rioioctl.h"
-#include "param.h"
-#include "protsts.h"
-#include "rioboard.h"
-
-
-#include "rio_linux.h"
-
-/* I don't think that this driver can handle more than 512 ports on
-one machine. Specialix specifies max 4 boards in one machine. I don't
-know why. If you want to try anyway you'll have to increase the number
-of boards in rio.h. You'll have to allocate more majors if you need
-more than 512 ports.... */
-
-#ifndef RIO_NORMAL_MAJOR0
-/* This allows overriding on the compiler commandline, or in a "major.h"
- include or something like that */
-#define RIO_NORMAL_MAJOR0 154
-#define RIO_NORMAL_MAJOR1 156
-#endif
-
-#ifndef PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8
-#define PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8 0x2000
-#endif
-
-#ifndef RIO_WINDOW_LEN
-#define RIO_WINDOW_LEN 0x10000
-#endif
-
-
-/* Configurable options:
- (Don't be too sure that it'll work if you toggle them) */
-
-/* Am I paranoid or not ? ;-) */
-#undef RIO_PARANOIA_CHECK
-
-
-/* 20 -> 2000 per second. The card should rate-limit interrupts at 1000
- Hz, but it is user configurable. I don't recommend going above 1000
- Hz. The interrupt ratelimit might trigger if the interrupt is
- shared with a very active other device.
- undef this if you want to disable the check....
-*/
-#define IRQ_RATE_LIMIT 200
-
-
-/* These constants are derived from SCO Source */
-static DEFINE_MUTEX(rio_fw_mutex);
-static struct Conf
- RIOConf = {
- /* locator */ "RIO Config here",
- /* startuptime */ HZ * 2,
- /* how long to wait for card to run */
- /* slowcook */ 0,
- /* TRUE -> always use line disc. */
- /* intrpolltime */ 1,
- /* The frequency of OUR polls */
- /* breakinterval */ 25,
- /* x10 mS XXX: units seem to be 1ms not 10! -- REW */
- /* timer */ 10,
- /* mS */
- /* RtaLoadBase */ 0x7000,
- /* HostLoadBase */ 0x7C00,
- /* XpHz */ 5,
- /* number of Xprint hits per second */
- /* XpCps */ 120,
- /* Xprint characters per second */
- /* XpOn */ "\033d#",
- /* start Xprint for a wyse 60 */
- /* XpOff */ "\024",
- /* end Xprint for a wyse 60 */
- /* MaxXpCps */ 2000,
- /* highest Xprint speed */
- /* MinXpCps */ 10,
- /* slowest Xprint speed */
- /* SpinCmds */ 1,
- /* non-zero for mega fast boots */
- /* First Addr */ 0x0A0000,
- /* First address to look at */
- /* Last Addr */ 0xFF0000,
- /* Last address looked at */
- /* BufferSize */ 1024,
- /* Bytes per port of buffering */
- /* LowWater */ 256,
- /* how much data left before wakeup */
- /* LineLength */ 80,
- /* how wide is the console? */
- /* CmdTimeout */ HZ,
- /* how long a close command may take */
-};
-
-
-
-
-/* Function prototypes */
-
-static void rio_disable_tx_interrupts(void *ptr);
-static void rio_enable_tx_interrupts(void *ptr);
-static void rio_disable_rx_interrupts(void *ptr);
-static void rio_enable_rx_interrupts(void *ptr);
-static int rio_carrier_raised(struct tty_port *port);
-static void rio_shutdown_port(void *ptr);
-static int rio_set_real_termios(void *ptr);
-static void rio_hungup(void *ptr);
-static void rio_close(void *ptr);
-static int rio_chars_in_buffer(void *ptr);
-static long rio_fw_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-static int rio_init_drivers(void);
-
-static void my_hd(void *addr, int len);
-
-static struct tty_driver *rio_driver, *rio_driver2;
-
-/* The name "p" is a bit non-descript. But that's what the rio-lynxos
-sources use all over the place. */
-struct rio_info *p;
-
-int rio_debug;
-
-
-/* You can have the driver poll your card.
- - Set rio_poll to 1 to poll every timer tick (10ms on Intel).
- This is used when the card cannot use an interrupt for some reason.
-*/
-static int rio_poll = 1;
-
-
-/* These are the only open spaces in my computer. Yours may have more
- or less.... */
-static int rio_probe_addrs[] = { 0xc0000, 0xd0000, 0xe0000 };
-
-#define NR_RIO_ADDRS ARRAY_SIZE(rio_probe_addrs)
-
-
-/* Set the mask to all-ones. This alas, only supports 32 interrupts.
- Some architectures may need more. -- Changed to LONG to
- support up to 64 bits on 64bit architectures. -- REW 20/06/99 */
-static long rio_irqmask = -1;
-
-MODULE_AUTHOR("Rogier Wolff <R.E.Wolff@bitwizard.nl>, Patrick van de Lageweg <patrick@bitwizard.nl>");
-MODULE_DESCRIPTION("RIO driver");
-MODULE_LICENSE("GPL");
-module_param(rio_poll, int, 0);
-module_param(rio_debug, int, 0644);
-module_param(rio_irqmask, long, 0);
-
-static struct real_driver rio_real_driver = {
- rio_disable_tx_interrupts,
- rio_enable_tx_interrupts,
- rio_disable_rx_interrupts,
- rio_enable_rx_interrupts,
- rio_shutdown_port,
- rio_set_real_termios,
- rio_chars_in_buffer,
- rio_close,
- rio_hungup,
- NULL
-};
-
-/*
- * Firmware loader driver specific routines
- *
- */
-
-static const struct file_operations rio_fw_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = rio_fw_ioctl,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice rio_fw_device = {
- RIOCTL_MISC_MINOR, "rioctl", &rio_fw_fops
-};
-
-
-
-
-
-#ifdef RIO_PARANOIA_CHECK
-
-/* This doesn't work. Who's paranoid around here? Not me! */
-
-static inline int rio_paranoia_check(struct rio_port const *port, char *name, const char *routine)
-{
-
- static const char *badmagic = KERN_ERR "rio: Warning: bad rio port magic number for device %s in %s\n";
- static const char *badinfo = KERN_ERR "rio: Warning: null rio port for device %s in %s\n";
-
- if (!port) {
- printk(badinfo, name, routine);
- return 1;
- }
- if (port->magic != RIO_MAGIC) {
- printk(badmagic, name, routine);
- return 1;
- }
-
- return 0;
-}
-#else
-#define rio_paranoia_check(a,b,c) 0
-#endif
-
-
-#ifdef DEBUG
-static void my_hd(void *ad, int len)
-{
- int i, j, ch;
- unsigned char *addr = ad;
-
- for (i = 0; i < len; i += 16) {
- rio_dprintk(RIO_DEBUG_PARAM, "%08lx ", (unsigned long) addr + i);
- for (j = 0; j < 16; j++) {
- rio_dprintk(RIO_DEBUG_PARAM, "%02x %s", addr[j + i], (j == 7) ? " " : "");
- }
- for (j = 0; j < 16; j++) {
- ch = addr[j + i];
- rio_dprintk(RIO_DEBUG_PARAM, "%c", (ch < 0x20) ? '.' : ((ch > 0x7f) ? '.' : ch));
- }
- rio_dprintk(RIO_DEBUG_PARAM, "\n");
- }
-}
-#else
-#define my_hd(ad,len) do{/* nothing*/ } while (0)
-#endif
-
-
-/* Delay a number of jiffies, allowing a signal to interrupt */
-int RIODelay(struct Port *PortP, int njiffies)
-{
- func_enter();
-
- rio_dprintk(RIO_DEBUG_DELAY, "delaying %d jiffies\n", njiffies);
- msleep_interruptible(jiffies_to_msecs(njiffies));
- func_exit();
-
- if (signal_pending(current))
- return RIO_FAIL;
- else
- return !RIO_FAIL;
-}
-
-
-/* Delay a number of jiffies, disallowing a signal to interrupt */
-int RIODelay_ni(struct Port *PortP, int njiffies)
-{
- func_enter();
-
- rio_dprintk(RIO_DEBUG_DELAY, "delaying %d jiffies (ni)\n", njiffies);
- msleep(jiffies_to_msecs(njiffies));
- func_exit();
- return !RIO_FAIL;
-}
-
-void rio_copy_to_card(void *from, void __iomem *to, int len)
-{
- rio_copy_toio(to, from, len);
-}
-
-int rio_minor(struct tty_struct *tty)
-{
- return tty->index + ((tty->driver == rio_driver) ? 0 : 256);
-}
-
-static int rio_set_real_termios(void *ptr)
-{
- return RIOParam((struct Port *) ptr, RIOC_CONFIG, 1, 1);
-}
-
-
-static void rio_reset_interrupt(struct Host *HostP)
-{
- func_enter();
-
- switch (HostP->Type) {
- case RIO_AT:
- case RIO_MCA:
- case RIO_PCI:
- writeb(0xFF, &HostP->ResetInt);
- }
-
- func_exit();
-}
-
-
-static irqreturn_t rio_interrupt(int irq, void *ptr)
-{
- struct Host *HostP;
- func_enter();
-
- HostP = ptr; /* &p->RIOHosts[(long)ptr]; */
- rio_dprintk(RIO_DEBUG_IFLOW, "rio: enter rio_interrupt (%d/%d)\n", irq, HostP->Ivec);
-
- /* AAargh! The order in which to do these things is essential and
- not trivial.
-
- - hardware twiddling goes before "recursive". Otherwise when we
- poll the card, and a recursive interrupt happens, we won't
- ack the card, so it might keep on interrupting us. (especially
- level sensitive interrupt systems like PCI).
-
- - Rate limit goes before hardware twiddling. Otherwise we won't
- catch a card that has gone bonkers.
-
- - The "initialized" test goes after the hardware twiddling. Otherwise
- the card will stick us in the interrupt routine again.
-
- - The initialized test goes before recursive.
- */
-
- rio_dprintk(RIO_DEBUG_IFLOW, "rio: We've have noticed the interrupt\n");
- if (HostP->Ivec == irq) {
- /* Tell the card we've noticed the interrupt. */
- rio_reset_interrupt(HostP);
- }
-
- if ((HostP->Flags & RUN_STATE) != RC_RUNNING)
- return IRQ_HANDLED;
-
- if (test_and_set_bit(RIO_BOARD_INTR_LOCK, &HostP->locks)) {
- printk(KERN_ERR "Recursive interrupt! (host %p/irq%d)\n", ptr, HostP->Ivec);
- return IRQ_HANDLED;
- }
-
- RIOServiceHost(p, HostP);
-
- rio_dprintk(RIO_DEBUG_IFLOW, "riointr() doing host %p type %d\n", ptr, HostP->Type);
-
- clear_bit(RIO_BOARD_INTR_LOCK, &HostP->locks);
- rio_dprintk(RIO_DEBUG_IFLOW, "rio: exit rio_interrupt (%d/%d)\n", irq, HostP->Ivec);
- func_exit();
- return IRQ_HANDLED;
-}
-
-
-static void rio_pollfunc(unsigned long data)
-{
- func_enter();
-
- rio_interrupt(0, &p->RIOHosts[data]);
- mod_timer(&p->RIOHosts[data].timer, jiffies + rio_poll);
-
- func_exit();
-}
-
-
-/* ********************************************************************** *
- * Here are the routines that actually *
- * interface with the generic_serial driver *
- * ********************************************************************** */
-
-/* Ehhm. I don't know how to fiddle with interrupts on the Specialix
- cards. .... Hmm. Ok I figured it out. You don't. -- REW */
-
-static void rio_disable_tx_interrupts(void *ptr)
-{
- func_enter();
-
- /* port->gs.port.flags &= ~GS_TX_INTEN; */
-
- func_exit();
-}
-
-
-static void rio_enable_tx_interrupts(void *ptr)
-{
- struct Port *PortP = ptr;
- /* int hn; */
-
- func_enter();
-
- /* hn = PortP->HostP - p->RIOHosts;
-
- rio_dprintk (RIO_DEBUG_TTY, "Pushing host %d\n", hn);
- rio_interrupt (-1,(void *) hn, NULL); */
-
- RIOTxEnable((char *) PortP);
-
- /*
- * In general we cannot count on "tx empty" interrupts, although
- * the interrupt routine seems to be able to tell the difference.
- */
- PortP->gs.port.flags &= ~GS_TX_INTEN;
-
- func_exit();
-}
-
-
-static void rio_disable_rx_interrupts(void *ptr)
-{
- func_enter();
- func_exit();
-}
-
-static void rio_enable_rx_interrupts(void *ptr)
-{
- /* struct rio_port *port = ptr; */
- func_enter();
- func_exit();
-}
-
-
-/* Jeez. Isn't this simple? */
-static int rio_carrier_raised(struct tty_port *port)
-{
- struct Port *PortP = container_of(port, struct Port, gs.port);
- int rv;
-
- func_enter();
- rv = (PortP->ModemState & RIOC_MSVR1_CD) != 0;
-
- rio_dprintk(RIO_DEBUG_INIT, "Getting CD status: %d\n", rv);
-
- func_exit();
- return rv;
-}
-
-
-/* Jeez. Isn't this simple? Actually, we can sync with the actual port
- by just pushing stuff into the queue going to the port... */
-static int rio_chars_in_buffer(void *ptr)
-{
- func_enter();
-
- func_exit();
- return 0;
-}
-
-
-/* Nothing special here... */
-static void rio_shutdown_port(void *ptr)
-{
- struct Port *PortP;
-
- func_enter();
-
- PortP = (struct Port *) ptr;
- PortP->gs.port.tty = NULL;
- func_exit();
-}
-
-
-/* I haven't the foggiest why the decrement use count has to happen
- here. The whole linux serial drivers stuff needs to be redesigned.
- My guess is that this is a hack to minimize the impact of a bug
- elsewhere. Thinking about it some more. (try it sometime) Try
- running minicom on a serial port that is driven by a modularized
- driver. Have the modem hangup. Then remove the driver module. Then
- exit minicom. I expect an "oops". -- REW */
-static void rio_hungup(void *ptr)
-{
- struct Port *PortP;
-
- func_enter();
-
- PortP = (struct Port *) ptr;
- PortP->gs.port.tty = NULL;
-
- func_exit();
-}
-
-
-/* The standard serial_close would become shorter if you'd wrap it like
- this.
- rs_close (...){save_flags;cli;real_close();dec_use_count;restore_flags;}
- */
-static void rio_close(void *ptr)
-{
- struct Port *PortP;
-
- func_enter();
-
- PortP = (struct Port *) ptr;
-
- riotclose(ptr);
-
- if (PortP->gs.port.count) {
- printk(KERN_ERR "WARNING port count:%d\n", PortP->gs.port.count);
- PortP->gs.port.count = 0;
- }
-
- PortP->gs.port.tty = NULL;
- func_exit();
-}
-
-
-
-static long rio_fw_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int rc = 0;
- func_enter();
-
- /* The "dev" argument isn't used. */
- mutex_lock(&rio_fw_mutex);
- rc = riocontrol(p, 0, cmd, arg, capable(CAP_SYS_ADMIN));
- mutex_unlock(&rio_fw_mutex);
-
- func_exit();
- return rc;
-}
-
-extern int RIOShortCommand(struct rio_info *p, struct Port *PortP, int command, int len, int arg);
-
-static int rio_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int rc;
- struct Port *PortP;
- int ival;
-
- func_enter();
-
- PortP = (struct Port *) tty->driver_data;
-
- rc = 0;
- switch (cmd) {
- case TIOCSSOFTCAR:
- if ((rc = get_user(ival, (unsigned __user *) argp)) == 0) {
- tty->termios->c_cflag = (tty->termios->c_cflag & ~CLOCAL) | (ival ? CLOCAL : 0);
- }
- break;
- case TIOCGSERIAL:
- rc = -EFAULT;
- if (access_ok(VERIFY_WRITE, argp, sizeof(struct serial_struct)))
- rc = gs_getserial(&PortP->gs, argp);
- break;
- case TCSBRK:
- if (PortP->State & RIO_DELETED) {
- rio_dprintk(RIO_DEBUG_TTY, "BREAK on deleted RTA\n");
- rc = -EIO;
- } else {
- if (RIOShortCommand(p, PortP, RIOC_SBREAK, 2, 250) ==
- RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
- rc = -EIO;
- }
- }
- break;
- case TCSBRKP:
- if (PortP->State & RIO_DELETED) {
- rio_dprintk(RIO_DEBUG_TTY, "BREAK on deleted RTA\n");
- rc = -EIO;
- } else {
- int l;
- l = arg ? arg * 100 : 250;
- if (l > 255)
- l = 255;
- if (RIOShortCommand(p, PortP, RIOC_SBREAK, 2,
- arg ? arg * 100 : 250) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
- rc = -EIO;
- }
- }
- break;
- case TIOCSSERIAL:
- rc = -EFAULT;
- if (access_ok(VERIFY_READ, argp, sizeof(struct serial_struct)))
- rc = gs_setserial(&PortP->gs, argp);
- break;
- default:
- rc = -ENOIOCTLCMD;
- break;
- }
- func_exit();
- return rc;
-}
-
-
-/* The throttle/unthrottle scheme for the Specialix card is different
- * from other drivers and deserves some explanation.
- * The Specialix hardware takes care of XON/XOFF
- * and CTS/RTS flow control itself. This means that all we have to
- * do when signalled by the upper tty layer to throttle/unthrottle is
- * to make a note of it here. When we come to read characters from the
- * rx buffers on the card (rio_receive_chars()) we look to see if the
- * upper layer can accept more (as noted here in rio_rx_throt[]).
- * If it can't we simply don't remove chars from the cards buffer.
- * When the tty layer can accept chars, we again note that here and when
- * rio_receive_chars() is called it will remove them from the cards buffer.
- * The card will notice that a ports buffer has drained below some low
- * water mark and will unflow control the line itself, using whatever
- * flow control scheme is in use for that port. -- Simon Allen
- */
-
-static void rio_throttle(struct tty_struct *tty)
-{
- struct Port *port = (struct Port *) tty->driver_data;
-
- func_enter();
- /* If the port is using any type of input flow
- * control then throttle the port.
- */
-
- if ((tty->termios->c_cflag & CRTSCTS) || (I_IXOFF(tty))) {
- port->State |= RIO_THROTTLE_RX;
- }
-
- func_exit();
-}
-
-
-static void rio_unthrottle(struct tty_struct *tty)
-{
- struct Port *port = (struct Port *) tty->driver_data;
-
- func_enter();
- /* Always unthrottle even if flow control is not enabled on
- * this port in case we disabled flow control while the port
- * was throttled
- */
-
- port->State &= ~RIO_THROTTLE_RX;
-
- func_exit();
- return;
-}
-
-
-
-
-
-/* ********************************************************************** *
- * Here are the initialization routines. *
- * ********************************************************************** */
-
-
-static struct vpd_prom *get_VPD_PROM(struct Host *hp)
-{
- static struct vpd_prom vpdp;
- char *p;
- int i;
-
- func_enter();
- rio_dprintk(RIO_DEBUG_PROBE, "Going to verify vpd prom at %p.\n", hp->Caddr + RIO_VPD_ROM);
-
- p = (char *) &vpdp;
- for (i = 0; i < sizeof(struct vpd_prom); i++)
- *p++ = readb(hp->Caddr + RIO_VPD_ROM + i * 2);
- /* read_rio_byte (hp, RIO_VPD_ROM + i*2); */
-
- /* Terminate the identifier string.
- *** requires one extra byte in struct vpd_prom *** */
- *p++ = 0;
-
- if (rio_debug & RIO_DEBUG_PROBE)
- my_hd((char *) &vpdp, 0x20);
-
- func_exit();
-
- return &vpdp;
-}
-
-static const struct tty_operations rio_ops = {
- .open = riotopen,
- .close = gs_close,
- .write = gs_write,
- .put_char = gs_put_char,
- .flush_chars = gs_flush_chars,
- .write_room = gs_write_room,
- .chars_in_buffer = gs_chars_in_buffer,
- .flush_buffer = gs_flush_buffer,
- .ioctl = rio_ioctl,
- .throttle = rio_throttle,
- .unthrottle = rio_unthrottle,
- .set_termios = gs_set_termios,
- .stop = gs_stop,
- .start = gs_start,
- .hangup = gs_hangup,
-};
-
-static int rio_init_drivers(void)
-{
- int error = -ENOMEM;
-
- rio_driver = alloc_tty_driver(256);
- if (!rio_driver)
- goto out;
- rio_driver2 = alloc_tty_driver(256);
- if (!rio_driver2)
- goto out1;
-
- func_enter();
-
- rio_driver->owner = THIS_MODULE;
- rio_driver->driver_name = "specialix_rio";
- rio_driver->name = "ttySR";
- rio_driver->major = RIO_NORMAL_MAJOR0;
- rio_driver->type = TTY_DRIVER_TYPE_SERIAL;
- rio_driver->subtype = SERIAL_TYPE_NORMAL;
- rio_driver->init_termios = tty_std_termios;
- rio_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- rio_driver->flags = TTY_DRIVER_REAL_RAW;
- tty_set_operations(rio_driver, &rio_ops);
-
- rio_driver2->owner = THIS_MODULE;
- rio_driver2->driver_name = "specialix_rio";
- rio_driver2->name = "ttySR";
- rio_driver2->major = RIO_NORMAL_MAJOR1;
- rio_driver2->type = TTY_DRIVER_TYPE_SERIAL;
- rio_driver2->subtype = SERIAL_TYPE_NORMAL;
- rio_driver2->init_termios = tty_std_termios;
- rio_driver2->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- rio_driver2->flags = TTY_DRIVER_REAL_RAW;
- tty_set_operations(rio_driver2, &rio_ops);
-
- rio_dprintk(RIO_DEBUG_INIT, "set_termios = %p\n", gs_set_termios);
-
- if ((error = tty_register_driver(rio_driver)))
- goto out2;
- if ((error = tty_register_driver(rio_driver2)))
- goto out3;
- func_exit();
- return 0;
- out3:
- tty_unregister_driver(rio_driver);
- out2:
- put_tty_driver(rio_driver2);
- out1:
- put_tty_driver(rio_driver);
- out:
- printk(KERN_ERR "rio: Couldn't register a rio driver, error = %d\n", error);
- return 1;
-}
-
-static const struct tty_port_operations rio_port_ops = {
- .carrier_raised = rio_carrier_raised,
-};
-
-static int rio_init_datastructures(void)
-{
- int i;
- struct Port *port;
- func_enter();
-
- /* Many drivers statically allocate the maximum number of ports
- There is no reason not to allocate them dynamically. Is there? -- REW */
- /* However, the RIO driver allows users to configure their first
- RTA as the ports numbered 504-511. We therefore need to allocate
- the whole range. :-( -- REW */
-
-#define RI_SZ sizeof(struct rio_info)
-#define HOST_SZ sizeof(struct Host)
-#define PORT_SZ sizeof(struct Port *)
-#define TMIO_SZ sizeof(struct termios *)
- rio_dprintk(RIO_DEBUG_INIT, "getting : %Zd %Zd %Zd %Zd %Zd bytes\n", RI_SZ, RIO_HOSTS * HOST_SZ, RIO_PORTS * PORT_SZ, RIO_PORTS * TMIO_SZ, RIO_PORTS * TMIO_SZ);
-
- if (!(p = kzalloc(RI_SZ, GFP_KERNEL)))
- goto free0;
- if (!(p->RIOHosts = kzalloc(RIO_HOSTS * HOST_SZ, GFP_KERNEL)))
- goto free1;
- if (!(p->RIOPortp = kzalloc(RIO_PORTS * PORT_SZ, GFP_KERNEL)))
- goto free2;
- p->RIOConf = RIOConf;
- rio_dprintk(RIO_DEBUG_INIT, "Got : %p %p %p\n", p, p->RIOHosts, p->RIOPortp);
-
-#if 1
- for (i = 0; i < RIO_PORTS; i++) {
- port = p->RIOPortp[i] = kzalloc(sizeof(struct Port), GFP_KERNEL);
- if (!port) {
- goto free6;
- }
- rio_dprintk(RIO_DEBUG_INIT, "initing port %d (%d)\n", i, port->Mapped);
- tty_port_init(&port->gs.port);
- port->gs.port.ops = &rio_port_ops;
- port->PortNum = i;
- port->gs.magic = RIO_MAGIC;
- port->gs.close_delay = HZ / 2;
- port->gs.closing_wait = 30 * HZ;
- port->gs.rd = &rio_real_driver;
- spin_lock_init(&port->portSem);
- }
-#else
- /* We could postpone initializing them to when they are configured. */
-#endif
-
-
-
- if (rio_debug & RIO_DEBUG_INIT) {
- my_hd(&rio_real_driver, sizeof(rio_real_driver));
- }
-
-
- func_exit();
- return 0;
-
- free6:for (i--; i >= 0; i--)
- kfree(p->RIOPortp[i]);
-/*free5:
- free4:
- free3:*/ kfree(p->RIOPortp);
- free2:kfree(p->RIOHosts);
- free1:
- rio_dprintk(RIO_DEBUG_INIT, "Not enough memory! %p %p %p\n", p, p->RIOHosts, p->RIOPortp);
- kfree(p);
- free0:
- return -ENOMEM;
-}
-
-static void __exit rio_release_drivers(void)
-{
- func_enter();
- tty_unregister_driver(rio_driver2);
- tty_unregister_driver(rio_driver);
- put_tty_driver(rio_driver2);
- put_tty_driver(rio_driver);
- func_exit();
-}
-
-
-#ifdef CONFIG_PCI
- /* This was written for SX, but applies to RIO too...
- (including bugs....)
-
- There is another bit besides Bit 17. Turning that bit off
- (on boards shipped with the fix in the eeprom) results in a
- hang on the next access to the card.
- */
-
- /********************************************************
- * Setting bit 17 in the CNTRL register of the PLX 9050 *
- * chip forces a retry on writes while a read is pending.*
- * This is to prevent the card locking up on Intel Xeon *
- * multiprocessor systems with the NX chipset. -- NV *
- ********************************************************/
-
-/* Newer cards are produced with this bit set from the configuration
- EEprom. As the bit is read/write for the CPU, we can fix it here,
- if we detect that it isn't set correctly. -- REW */
-
-static void fix_rio_pci(struct pci_dev *pdev)
-{
- unsigned long hwbase;
- unsigned char __iomem *rebase;
- unsigned int t;
-
-#define CNTRL_REG_OFFSET 0x50
-#define CNTRL_REG_GOODVALUE 0x18260000
-
- hwbase = pci_resource_start(pdev, 0);
- rebase = ioremap(hwbase, 0x80);
- t = readl(rebase + CNTRL_REG_OFFSET);
- if (t != CNTRL_REG_GOODVALUE) {
- printk(KERN_DEBUG "rio: performing cntrl reg fix: %08x -> %08x\n", t, CNTRL_REG_GOODVALUE);
- writel(CNTRL_REG_GOODVALUE, rebase + CNTRL_REG_OFFSET);
- }
- iounmap(rebase);
-}
-#endif
-
-
-static int __init rio_init(void)
-{
- int found = 0;
- int i;
- struct Host *hp;
- int retval;
- struct vpd_prom *vpdp;
- int okboard;
-
-#ifdef CONFIG_PCI
- struct pci_dev *pdev = NULL;
- unsigned short tshort;
-#endif
-
- func_enter();
- rio_dprintk(RIO_DEBUG_INIT, "Initing rio module... (rio_debug=%d)\n", rio_debug);
-
- if (abs((long) (&rio_debug) - rio_debug) < 0x10000) {
- printk(KERN_WARNING "rio: rio_debug is an address, instead of a value. " "Assuming -1. Was %x/%p.\n", rio_debug, &rio_debug);
- rio_debug = -1;
- }
-
- if (misc_register(&rio_fw_device) < 0) {
- printk(KERN_ERR "RIO: Unable to register firmware loader driver.\n");
- return -EIO;
- }
-
- retval = rio_init_datastructures();
- if (retval < 0) {
- misc_deregister(&rio_fw_device);
- return retval;
- }
-#ifdef CONFIG_PCI
- /* First look for the JET devices: */
- while ((pdev = pci_get_device(PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8, pdev))) {
- u32 tint;
-
- if (pci_enable_device(pdev))
- continue;
-
- /* Specialix has a whole bunch of cards with
- 0x2000 as the device ID. They say its because
- the standard requires it. Stupid standard. */
- /* It seems that reading a word doesn't work reliably on 2.0.
- Also, reading a non-aligned dword doesn't work. So we read the
- whole dword at 0x2c and extract the word at 0x2e (SUBSYSTEM_ID)
- ourselves */
- pci_read_config_dword(pdev, 0x2c, &tint);
- tshort = (tint >> 16) & 0xffff;
- rio_dprintk(RIO_DEBUG_PROBE, "Got a specialix card: %x.\n", tint);
- if (tshort != 0x0100) {
- rio_dprintk(RIO_DEBUG_PROBE, "But it's not a RIO card (%d)...\n", tshort);
- continue;
- }
- rio_dprintk(RIO_DEBUG_PROBE, "cp1\n");
-
- hp = &p->RIOHosts[p->RIONumHosts];
- hp->PaddrP = pci_resource_start(pdev, 2);
- hp->Ivec = pdev->irq;
- if (((1 << hp->Ivec) & rio_irqmask) == 0)
- hp->Ivec = 0;
- hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN);
- hp->CardP = (struct DpRam __iomem *) hp->Caddr;
- hp->Type = RIO_PCI;
- hp->Copy = rio_copy_to_card;
- hp->Mode = RIO_PCI_BOOT_FROM_RAM;
- spin_lock_init(&hp->HostLock);
- rio_reset_interrupt(hp);
- rio_start_card_running(hp);
-
- rio_dprintk(RIO_DEBUG_PROBE, "Going to test it (%p/%p).\n", (void *) p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr);
- if (RIOBoardTest(p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr, RIO_PCI, 0) == 0) {
- rio_dprintk(RIO_DEBUG_INIT, "Done RIOBoardTest\n");
- writeb(0xFF, &p->RIOHosts[p->RIONumHosts].ResetInt);
- p->RIOHosts[p->RIONumHosts].UniqueNum =
- ((readb(&p->RIOHosts[p->RIONumHosts].Unique[0]) & 0xFF) << 0) |
- ((readb(&p->RIOHosts[p->RIONumHosts].Unique[1]) & 0xFF) << 8) | ((readb(&p->RIOHosts[p->RIONumHosts].Unique[2]) & 0xFF) << 16) | ((readb(&p->RIOHosts[p->RIONumHosts].Unique[3]) & 0xFF) << 24);
- rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n", p->RIOHosts[p->RIONumHosts].UniqueNum);
-
- fix_rio_pci(pdev);
-
- p->RIOHosts[p->RIONumHosts].pdev = pdev;
- pci_dev_get(pdev);
-
- p->RIOLastPCISearch = 0;
- p->RIONumHosts++;
- found++;
- } else {
- iounmap(p->RIOHosts[p->RIONumHosts].Caddr);
- p->RIOHosts[p->RIONumHosts].Caddr = NULL;
- }
- }
-
- /* Then look for the older PCI card.... : */
-
- /* These older PCI cards have problems (only byte-mode access is
- supported), which makes them a bit awkward to support.
- They also have problems sharing interrupts. Be careful.
- (The driver now refuses to share interrupts for these
- cards. This should be sufficient).
- */
-
- /* Then look for the older RIO/PCI devices: */
- while ((pdev = pci_get_device(PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_RIO, pdev))) {
- if (pci_enable_device(pdev))
- continue;
-
-#ifdef CONFIG_RIO_OLDPCI
- hp = &p->RIOHosts[p->RIONumHosts];
- hp->PaddrP = pci_resource_start(pdev, 0);
- hp->Ivec = pdev->irq;
- if (((1 << hp->Ivec) & rio_irqmask) == 0)
- hp->Ivec = 0;
- hp->Ivec |= 0x8000; /* Mark as non-sharable */
- hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN);
- hp->CardP = (struct DpRam __iomem *) hp->Caddr;
- hp->Type = RIO_PCI;
- hp->Copy = rio_copy_to_card;
- hp->Mode = RIO_PCI_BOOT_FROM_RAM;
- spin_lock_init(&hp->HostLock);
-
- rio_dprintk(RIO_DEBUG_PROBE, "Ivec: %x\n", hp->Ivec);
- rio_dprintk(RIO_DEBUG_PROBE, "Mode: %x\n", hp->Mode);
-
- rio_reset_interrupt(hp);
- rio_start_card_running(hp);
- rio_dprintk(RIO_DEBUG_PROBE, "Going to test it (%p/%p).\n", (void *) p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr);
- if (RIOBoardTest(p->RIOHosts[p->RIONumHosts].PaddrP, p->RIOHosts[p->RIONumHosts].Caddr, RIO_PCI, 0) == 0) {
- writeb(0xFF, &p->RIOHosts[p->RIONumHosts].ResetInt);
- p->RIOHosts[p->RIONumHosts].UniqueNum =
- ((readb(&p->RIOHosts[p->RIONumHosts].Unique[0]) & 0xFF) << 0) |
- ((readb(&p->RIOHosts[p->RIONumHosts].Unique[1]) & 0xFF) << 8) | ((readb(&p->RIOHosts[p->RIONumHosts].Unique[2]) & 0xFF) << 16) | ((readb(&p->RIOHosts[p->RIONumHosts].Unique[3]) & 0xFF) << 24);
- rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n", p->RIOHosts[p->RIONumHosts].UniqueNum);
-
- p->RIOHosts[p->RIONumHosts].pdev = pdev;
- pci_dev_get(pdev);
-
- p->RIOLastPCISearch = 0;
- p->RIONumHosts++;
- found++;
- } else {
- iounmap(p->RIOHosts[p->RIONumHosts].Caddr);
- p->RIOHosts[p->RIONumHosts].Caddr = NULL;
- }
-#else
- printk(KERN_ERR "Found an older RIO PCI card, but the driver is not " "compiled to support it.\n");
-#endif
- }
-#endif /* PCI */
-
- /* Now probe for ISA cards... */
- for (i = 0; i < NR_RIO_ADDRS; i++) {
- hp = &p->RIOHosts[p->RIONumHosts];
- hp->PaddrP = rio_probe_addrs[i];
- /* There was something about the IRQs of these cards. 'Forget what.--REW */
- hp->Ivec = 0;
- hp->Caddr = ioremap(p->RIOHosts[p->RIONumHosts].PaddrP, RIO_WINDOW_LEN);
- hp->CardP = (struct DpRam __iomem *) hp->Caddr;
- hp->Type = RIO_AT;
- hp->Copy = rio_copy_to_card; /* AT card PCI???? - PVDL
- * -- YES! this is now a normal copy. Only the
- * old PCI card uses the special PCI copy.
- * Moreover, the ISA card will work with the
- * special PCI copy anyway. -- REW */
- hp->Mode = 0;
- spin_lock_init(&hp->HostLock);
-
- vpdp = get_VPD_PROM(hp);
- rio_dprintk(RIO_DEBUG_PROBE, "Got VPD ROM\n");
- okboard = 0;
- if ((strncmp(vpdp->identifier, RIO_ISA_IDENT, 16) == 0) || (strncmp(vpdp->identifier, RIO_ISA2_IDENT, 16) == 0) || (strncmp(vpdp->identifier, RIO_ISA3_IDENT, 16) == 0)) {
- /* Board is present... */
- if (RIOBoardTest(hp->PaddrP, hp->Caddr, RIO_AT, 0) == 0) {
- /* ... and feeling fine!!!! */
- rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, uniqid = %x.\n", p->RIOHosts[p->RIONumHosts].UniqueNum);
- if (RIOAssignAT(p, hp->PaddrP, hp->Caddr, 0)) {
- rio_dprintk(RIO_DEBUG_PROBE, "Hmm Tested ok, host%d uniqid = %x.\n", p->RIONumHosts, p->RIOHosts[p->RIONumHosts - 1].UniqueNum);
- okboard++;
- found++;
- }
- }
-
- if (!okboard) {
- iounmap(hp->Caddr);
- hp->Caddr = NULL;
- }
- }
- }
-
-
- for (i = 0; i < p->RIONumHosts; i++) {
- hp = &p->RIOHosts[i];
- if (hp->Ivec) {
- int mode = IRQF_SHARED;
- if (hp->Ivec & 0x8000) {
- mode = 0;
- hp->Ivec &= 0x7fff;
- }
- rio_dprintk(RIO_DEBUG_INIT, "Requesting interrupt hp: %p rio_interrupt: %d Mode: %x\n", hp, hp->Ivec, hp->Mode);
- retval = request_irq(hp->Ivec, rio_interrupt, mode, "rio", hp);
- rio_dprintk(RIO_DEBUG_INIT, "Return value from request_irq: %d\n", retval);
- if (retval) {
- printk(KERN_ERR "rio: Cannot allocate irq %d.\n", hp->Ivec);
- hp->Ivec = 0;
- }
- rio_dprintk(RIO_DEBUG_INIT, "Got irq %d.\n", hp->Ivec);
- if (hp->Ivec != 0) {
- rio_dprintk(RIO_DEBUG_INIT, "Enabling interrupts on rio card.\n");
- hp->Mode |= RIO_PCI_INT_ENABLE;
- } else
- hp->Mode &= ~RIO_PCI_INT_ENABLE;
- rio_dprintk(RIO_DEBUG_INIT, "New Mode: %x\n", hp->Mode);
- rio_start_card_running(hp);
- }
- /* Init the timer "always" to make sure that it can safely be
- deleted when we unload... */
-
- setup_timer(&hp->timer, rio_pollfunc, i);
- if (!hp->Ivec) {
- rio_dprintk(RIO_DEBUG_INIT, "Starting polling at %dj intervals.\n", rio_poll);
- mod_timer(&hp->timer, jiffies + rio_poll);
- }
- }
-
- if (found) {
- rio_dprintk(RIO_DEBUG_INIT, "rio: total of %d boards detected.\n", found);
- rio_init_drivers();
- } else {
- /* deregister the misc device we created earlier */
- misc_deregister(&rio_fw_device);
- }
-
- func_exit();
- return found ? 0 : -EIO;
-}
-
-
-static void __exit rio_exit(void)
-{
- int i;
- struct Host *hp;
-
- func_enter();
-
- for (i = 0, hp = p->RIOHosts; i < p->RIONumHosts; i++, hp++) {
- RIOHostReset(hp->Type, hp->CardP, hp->Slot);
- if (hp->Ivec) {
- free_irq(hp->Ivec, hp);
- rio_dprintk(RIO_DEBUG_INIT, "freed irq %d.\n", hp->Ivec);
- }
- /* It is safe/allowed to del_timer a non-active timer */
- del_timer_sync(&hp->timer);
- if (hp->Caddr)
- iounmap(hp->Caddr);
- if (hp->Type == RIO_PCI)
- pci_dev_put(hp->pdev);
- }
-
- if (misc_deregister(&rio_fw_device) < 0) {
- printk(KERN_INFO "rio: couldn't deregister control-device\n");
- }
-
-
- rio_dprintk(RIO_DEBUG_CLEANUP, "Cleaning up drivers\n");
-
- rio_release_drivers();
-
- /* Release dynamically allocated memory */
- kfree(p->RIOPortp);
- kfree(p->RIOHosts);
- kfree(p);
-
- func_exit();
-}
-
-module_init(rio_init);
-module_exit(rio_exit);
diff --git a/drivers/staging/generic_serial/rio/rio_linux.h b/drivers/staging/generic_serial/rio/rio_linux.h
deleted file mode 100644
index 7f26cd7c815..00000000000
--- a/drivers/staging/generic_serial/rio/rio_linux.h
+++ /dev/null
@@ -1,197 +0,0 @@
-
-/*
- * rio_linux.h
- *
- * Copyright (C) 1998,1999,2000 R.E.Wolff@BitWizard.nl
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * RIO serial driver.
- *
- * Version 1.0 -- July, 1999.
- *
- */
-
-#define RIO_NBOARDS 4
-#define RIO_PORTSPERBOARD 128
-#define RIO_NPORTS (RIO_NBOARDS * RIO_PORTSPERBOARD)
-
-#define MODEM_SUPPORT
-
-#ifdef __KERNEL__
-
-#define RIO_MAGIC 0x12345678
-
-
-struct vpd_prom {
- unsigned short id;
- char hwrev;
- char hwass;
- int uniqid;
- char myear;
- char mweek;
- char hw_feature[5];
- char oem_id;
- char identifier[16];
-};
-
-
-#define RIO_DEBUG_ALL 0xffffffff
-
-#define O_OTHER(tty) \
- ((O_OLCUC(tty)) ||\
- (O_ONLCR(tty)) ||\
- (O_OCRNL(tty)) ||\
- (O_ONOCR(tty)) ||\
- (O_ONLRET(tty)) ||\
- (O_OFILL(tty)) ||\
- (O_OFDEL(tty)) ||\
- (O_NLDLY(tty)) ||\
- (O_CRDLY(tty)) ||\
- (O_TABDLY(tty)) ||\
- (O_BSDLY(tty)) ||\
- (O_VTDLY(tty)) ||\
- (O_FFDLY(tty)))
-
-/* Same for input. */
-#define I_OTHER(tty) \
- ((I_INLCR(tty)) ||\
- (I_IGNCR(tty)) ||\
- (I_ICRNL(tty)) ||\
- (I_IUCLC(tty)) ||\
- (L_ISIG(tty)))
-
-
-#endif /* __KERNEL__ */
-
-
-#define RIO_BOARD_INTR_LOCK 1
-
-
-#ifndef RIOCTL_MISC_MINOR
-/* Allow others to gather this into "major.h" or something like that */
-#define RIOCTL_MISC_MINOR 169
-#endif
-
-
-/* Allow us to debug "in the field" without requiring clients to
- recompile.... */
-#if 1
-#define rio_spin_lock_irqsave(sem, flags) do { \
- rio_dprintk (RIO_DEBUG_SPINLOCK, "spinlockirqsave: %p %s:%d\n", \
- sem, __FILE__, __LINE__);\
- spin_lock_irqsave(sem, flags);\
- } while (0)
-
-#define rio_spin_unlock_irqrestore(sem, flags) do { \
- rio_dprintk (RIO_DEBUG_SPINLOCK, "spinunlockirqrestore: %p %s:%d\n",\
- sem, __FILE__, __LINE__);\
- spin_unlock_irqrestore(sem, flags);\
- } while (0)
-
-#define rio_spin_lock(sem) do { \
- rio_dprintk (RIO_DEBUG_SPINLOCK, "spinlock: %p %s:%d\n",\
- sem, __FILE__, __LINE__);\
- spin_lock(sem);\
- } while (0)
-
-#define rio_spin_unlock(sem) do { \
- rio_dprintk (RIO_DEBUG_SPINLOCK, "spinunlock: %p %s:%d\n",\
- sem, __FILE__, __LINE__);\
- spin_unlock(sem);\
- } while (0)
-#else
-#define rio_spin_lock_irqsave(sem, flags) \
- spin_lock_irqsave(sem, flags)
-
-#define rio_spin_unlock_irqrestore(sem, flags) \
- spin_unlock_irqrestore(sem, flags)
-
-#define rio_spin_lock(sem) \
- spin_lock(sem)
-
-#define rio_spin_unlock(sem) \
- spin_unlock(sem)
-
-#endif
-
-
-
-#ifdef CONFIG_RIO_OLDPCI
-static inline void __iomem *rio_memcpy_toio(void __iomem *dummy, void __iomem *dest, void *source, int n)
-{
- char __iomem *dst = dest;
- char *src = source;
-
- while (n--) {
- writeb(*src++, dst++);
- (void) readb(dummy);
- }
-
- return dest;
-}
-
-static inline void __iomem *rio_copy_toio(void __iomem *dest, void *source, int n)
-{
- char __iomem *dst = dest;
- char *src = source;
-
- while (n--)
- writeb(*src++, dst++);
-
- return dest;
-}
-
-
-static inline void *rio_memcpy_fromio(void *dest, void __iomem *source, int n)
-{
- char *dst = dest;
- char __iomem *src = source;
-
- while (n--)
- *dst++ = readb(src++);
-
- return dest;
-}
-
-#else
-#define rio_memcpy_toio(dummy,dest,source,n) memcpy_toio(dest, source, n)
-#define rio_copy_toio memcpy_toio
-#define rio_memcpy_fromio memcpy_fromio
-#endif
-
-#define DEBUG 1
-
-
-/*
- This driver can spew a whole lot of debugging output at you. If you
- need maximum performance, you should disable the DEBUG define. To
- aid in debugging in the field, I'm leaving the compile-time debug
- features enabled, and disable them "runtime". That allows me to
- instruct people with problems to enable debugging without requiring
- them to recompile...
-*/
-
-#ifdef DEBUG
-#define rio_dprintk(f, str...) do { if (rio_debug & f) printk (str);} while (0)
-#define func_enter() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s\n", __func__)
-#define func_exit() rio_dprintk (RIO_DEBUG_FLOW, "rio: exit %s\n", __func__)
-#define func_enter2() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s (port %d)\n",__func__, port->line)
-#else
-#define rio_dprintk(f, str...) /* nothing */
-#define func_enter()
-#define func_exit()
-#define func_enter2()
-#endif
diff --git a/drivers/staging/generic_serial/rio/rioboard.h b/drivers/staging/generic_serial/rio/rioboard.h
deleted file mode 100644
index 252230043c8..00000000000
--- a/drivers/staging/generic_serial/rio/rioboard.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/************************************************************************/
-/* */
-/* Title : RIO Host Card Hardware Definitions */
-/* */
-/* Author : N.P.Vassallo */
-/* */
-/* Creation : 26th April 1999 */
-/* */
-/* Version : 1.0.0 */
-/* */
-/* Copyright : (c) Specialix International Ltd. 1999 *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- * */
-/* Description : Prototypes, structures and definitions */
-/* describing the RIO board hardware */
-/* */
-/************************************************************************/
-
-#ifndef _rioboard_h /* If RIOBOARD.H not already defined */
-#define _rioboard_h 1
-
-/*****************************************************************************
-*********************** ***********************
-*********************** Hardware Control Registers ***********************
-*********************** ***********************
-*****************************************************************************/
-
-/* Hardware Registers... */
-
-#define RIO_REG_BASE 0x7C00 /* Base of control registers */
-
-#define RIO_CONFIG RIO_REG_BASE + 0x0000 /* WRITE: Configuration Register */
-#define RIO_INTSET RIO_REG_BASE + 0x0080 /* WRITE: Interrupt Set */
-#define RIO_RESET RIO_REG_BASE + 0x0100 /* WRITE: Host Reset */
-#define RIO_INTRESET RIO_REG_BASE + 0x0180 /* WRITE: Interrupt Reset */
-
-#define RIO_VPD_ROM RIO_REG_BASE + 0x0000 /* READ: Vital Product Data ROM */
-#define RIO_INTSTAT RIO_REG_BASE + 0x0080 /* READ: Interrupt Status (Jet boards only) */
-#define RIO_RESETSTAT RIO_REG_BASE + 0x0100 /* READ: Reset Status (Jet boards only) */
-
-/* RIO_VPD_ROM definitions... */
-#define VPD_SLX_ID1 0x00 /* READ: Specialix Identifier #1 */
-#define VPD_SLX_ID2 0x01 /* READ: Specialix Identifier #2 */
-#define VPD_HW_REV 0x02 /* READ: Hardware Revision */
-#define VPD_HW_ASSEM 0x03 /* READ: Hardware Assembly Level */
-#define VPD_UNIQUEID4 0x04 /* READ: Unique Identifier #4 */
-#define VPD_UNIQUEID3 0x05 /* READ: Unique Identifier #3 */
-#define VPD_UNIQUEID2 0x06 /* READ: Unique Identifier #2 */
-#define VPD_UNIQUEID1 0x07 /* READ: Unique Identifier #1 */
-#define VPD_MANU_YEAR 0x08 /* READ: Year Of Manufacture (0 = 1970) */
-#define VPD_MANU_WEEK 0x09 /* READ: Week Of Manufacture (0 = week 1 Jan) */
-#define VPD_HWFEATURE1 0x0A /* READ: Hardware Feature Byte 1 */
-#define VPD_HWFEATURE2 0x0B /* READ: Hardware Feature Byte 2 */
-#define VPD_HWFEATURE3 0x0C /* READ: Hardware Feature Byte 3 */
-#define VPD_HWFEATURE4 0x0D /* READ: Hardware Feature Byte 4 */
-#define VPD_HWFEATURE5 0x0E /* READ: Hardware Feature Byte 5 */
-#define VPD_OEMID 0x0F /* READ: OEM Identifier */
-#define VPD_IDENT 0x10 /* READ: Identifier string (16 bytes) */
-#define VPD_IDENT_LEN 0x10
-
-/* VPD ROM Definitions... */
-#define SLX_ID1 0x4D
-#define SLX_ID2 0x98
-
-#define PRODUCT_ID(a) ((a>>4)&0xF) /* Use to obtain Product ID from VPD_UNIQUEID1 */
-
-#define ID_SX_ISA 0x2
-#define ID_RIO_EISA 0x3
-#define ID_SX_PCI 0x5
-#define ID_SX_EISA 0x7
-#define ID_RIO_RTA16 0x9
-#define ID_RIO_ISA 0xA
-#define ID_RIO_MCA 0xB
-#define ID_RIO_SBUS 0xC
-#define ID_RIO_PCI 0xD
-#define ID_RIO_RTA8 0xE
-
-/* Transputer bootstrap definitions... */
-
-#define BOOTLOADADDR (0x8000 - 6)
-#define BOOTINDICATE (0x8000 - 2)
-
-/* Firmware load position... */
-
-#define FIRMWARELOADADDR 0x7C00 /* Firmware is loaded _before_ this address */
-
-/*****************************************************************************
-***************************** *****************************
-***************************** RIO (Rev1) ISA *****************************
-***************************** *****************************
-*****************************************************************************/
-
-/* Control Register Definitions... */
-#define RIO_ISA_IDENT "JBJGPGGHINSMJPJR"
-
-#define RIO_ISA_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_ISA_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_ISA_CFG_IRQMASK 0x30 /* Interrupt mask */
-#define RIO_ISA_CFG_IRQ12 0x10 /* Interrupt Level 12 */
-#define RIO_ISA_CFG_IRQ11 0x20 /* Interrupt Level 11 */
-#define RIO_ISA_CFG_IRQ9 0x30 /* Interrupt Level 9 */
-#define RIO_ISA_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
-#define RIO_ISA_CFG_WAITSTATE0 0x80 /* 0 waitstates, else 1 */
-
-/*****************************************************************************
-***************************** *****************************
-***************************** RIO (Rev2) ISA *****************************
-***************************** *****************************
-*****************************************************************************/
-
-/* Control Register Definitions... */
-#define RIO_ISA2_IDENT "JBJGPGGHINSMJPJR"
-
-#define RIO_ISA2_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_ISA2_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_ISA2_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
-#define RIO_ISA2_CFG_16BIT 0x08 /* 16bit mode, else 8bit */
-#define RIO_ISA2_CFG_IRQMASK 0x30 /* Interrupt mask */
-#define RIO_ISA2_CFG_IRQ15 0x00 /* Interrupt Level 15 */
-#define RIO_ISA2_CFG_IRQ12 0x10 /* Interrupt Level 12 */
-#define RIO_ISA2_CFG_IRQ11 0x20 /* Interrupt Level 11 */
-#define RIO_ISA2_CFG_IRQ9 0x30 /* Interrupt Level 9 */
-#define RIO_ISA2_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
-#define RIO_ISA2_CFG_WAITSTATE0 0x80 /* 0 waitstates, else 1 */
-
-/*****************************************************************************
-***************************** ******************************
-***************************** RIO (Jet) ISA ******************************
-***************************** ******************************
-*****************************************************************************/
-
-/* Control Register Definitions... */
-#define RIO_ISA3_IDENT "JET HOST BY KEV#"
-
-#define RIO_ISA3_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_ISA3_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
-#define RIO_ISA32_CFG_IRQMASK 0xF30 /* Interrupt mask */
-#define RIO_ISA3_CFG_IRQ15 0xF0 /* Interrupt Level 15 */
-#define RIO_ISA3_CFG_IRQ12 0xC0 /* Interrupt Level 12 */
-#define RIO_ISA3_CFG_IRQ11 0xB0 /* Interrupt Level 11 */
-#define RIO_ISA3_CFG_IRQ10 0xA0 /* Interrupt Level 10 */
-#define RIO_ISA3_CFG_IRQ9 0x90 /* Interrupt Level 9 */
-
-/*****************************************************************************
-********************************* ********************************
-********************************* RIO MCA ********************************
-********************************* ********************************
-*****************************************************************************/
-
-/* Control Register Definitions... */
-#define RIO_MCA_IDENT "JBJGPGGHINSMJPJR"
-
-#define RIO_MCA_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_MCA_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_MCA_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
-
-/*****************************************************************************
-******************************** ********************************
-******************************** RIO EISA ********************************
-******************************** ********************************
-*****************************************************************************/
-
-/* EISA Configuration Space Definitions... */
-#define EISA_PRODUCT_ID1 0xC80
-#define EISA_PRODUCT_ID2 0xC81
-#define EISA_PRODUCT_NUMBER 0xC82
-#define EISA_REVISION_NUMBER 0xC83
-#define EISA_CARD_ENABLE 0xC84
-#define EISA_VPD_UNIQUEID4 0xC88 /* READ: Unique Identifier #4 */
-#define EISA_VPD_UNIQUEID3 0xC8A /* READ: Unique Identifier #3 */
-#define EISA_VPD_UNIQUEID2 0xC90 /* READ: Unique Identifier #2 */
-#define EISA_VPD_UNIQUEID1 0xC92 /* READ: Unique Identifier #1 */
-#define EISA_VPD_MANU_YEAR 0xC98 /* READ: Year Of Manufacture (0 = 1970) */
-#define EISA_VPD_MANU_WEEK 0xC9A /* READ: Week Of Manufacture (0 = week 1 Jan) */
-#define EISA_MEM_ADDR_23_16 0xC00
-#define EISA_MEM_ADDR_31_24 0xC01
-#define EISA_RIO_CONFIG 0xC02 /* WRITE: Configuration Register */
-#define EISA_RIO_INTSET 0xC03 /* WRITE: Interrupt Set */
-#define EISA_RIO_INTRESET 0xC03 /* READ: Interrupt Reset */
-
-/* Control Register Definitions... */
-#define RIO_EISA_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_EISA_CFG_LINK20 0x02 /* 20Mbps link, else 10Mbps */
-#define RIO_EISA_CFG_BUSENABLE 0x04 /* Enable processor bus */
-#define RIO_EISA_CFG_PROCRUN 0x08 /* Processor running, else reset */
-#define RIO_EISA_CFG_IRQMASK 0xF0 /* Interrupt mask */
-#define RIO_EISA_CFG_IRQ15 0xF0 /* Interrupt Level 15 */
-#define RIO_EISA_CFG_IRQ14 0xE0 /* Interrupt Level 14 */
-#define RIO_EISA_CFG_IRQ12 0xC0 /* Interrupt Level 12 */
-#define RIO_EISA_CFG_IRQ11 0xB0 /* Interrupt Level 11 */
-#define RIO_EISA_CFG_IRQ10 0xA0 /* Interrupt Level 10 */
-#define RIO_EISA_CFG_IRQ9 0x90 /* Interrupt Level 9 */
-#define RIO_EISA_CFG_IRQ7 0x70 /* Interrupt Level 7 */
-#define RIO_EISA_CFG_IRQ6 0x60 /* Interrupt Level 6 */
-#define RIO_EISA_CFG_IRQ5 0x50 /* Interrupt Level 5 */
-#define RIO_EISA_CFG_IRQ4 0x40 /* Interrupt Level 4 */
-#define RIO_EISA_CFG_IRQ3 0x30 /* Interrupt Level 3 */
-
-/*****************************************************************************
-******************************** ********************************
-******************************** RIO SBus ********************************
-******************************** ********************************
-*****************************************************************************/
-
-/* Control Register Definitions... */
-#define RIO_SBUS_IDENT "JBPGK#\0\0\0\0\0\0\0\0\0\0"
-
-#define RIO_SBUS_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_SBUS_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_SBUS_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
-#define RIO_SBUS_CFG_IRQMASK 0x38 /* Interrupt mask */
-#define RIO_SBUS_CFG_IRQNONE 0x00 /* No Interrupt */
-#define RIO_SBUS_CFG_IRQ7 0x38 /* Interrupt Level 7 */
-#define RIO_SBUS_CFG_IRQ6 0x30 /* Interrupt Level 6 */
-#define RIO_SBUS_CFG_IRQ5 0x28 /* Interrupt Level 5 */
-#define RIO_SBUS_CFG_IRQ4 0x20 /* Interrupt Level 4 */
-#define RIO_SBUS_CFG_IRQ3 0x18 /* Interrupt Level 3 */
-#define RIO_SBUS_CFG_IRQ2 0x10 /* Interrupt Level 2 */
-#define RIO_SBUS_CFG_IRQ1 0x08 /* Interrupt Level 1 */
-#define RIO_SBUS_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
-#define RIO_SBUS_CFG_PROC25 0x80 /* 25Mhz processor clock, else 20Mhz */
-
-/*****************************************************************************
-********************************* ********************************
-********************************* RIO PCI ********************************
-********************************* ********************************
-*****************************************************************************/
-
-/* Control Register Definitions... */
-#define RIO_PCI_IDENT "ECDDPGJGJHJRGSK#"
-
-#define RIO_PCI_CFG_BOOTRAM 0x01 /* Boot from RAM, else Link */
-#define RIO_PCI_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_PCI_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
-#define RIO_PCI_CFG_LINK20 0x40 /* 20Mbps link, else 10Mbps */
-#define RIO_PCI_CFG_PROC25 0x80 /* 25Mhz processor clock, else 20Mhz */
-
-/* PCI Definitions... */
-#define SPX_VENDOR_ID 0x11CB /* Assigned by the PCI SIG */
-#define SPX_DEVICE_ID 0x8000 /* RIO bridge boards */
-#define SPX_PLXDEVICE_ID 0x2000 /* PLX bridge boards */
-#define SPX_SUB_VENDOR_ID SPX_VENDOR_ID /* Same as vendor id */
-#define RIO_SUB_SYS_ID 0x0800 /* RIO PCI board */
-
-/*****************************************************************************
-***************************** ******************************
-***************************** RIO (Jet) PCI ******************************
-***************************** ******************************
-*****************************************************************************/
-
-/* Control Register Definitions... */
-#define RIO_PCI2_IDENT "JET HOST BY KEV#"
-
-#define RIO_PCI2_CFG_BUSENABLE 0x02 /* Enable processor bus */
-#define RIO_PCI2_CFG_INTENABLE 0x04 /* Interrupt enable, else disable */
-
-/* PCI Definitions... */
-#define RIO2_SUB_SYS_ID 0x0100 /* RIO (Jet) PCI board */
-
-#endif /*_rioboard_h */
-
-/* End of RIOBOARD.H */
diff --git a/drivers/staging/generic_serial/rio/rioboot.c b/drivers/staging/generic_serial/rio/rioboot.c
deleted file mode 100644
index ffa01c59021..00000000000
--- a/drivers/staging/generic_serial/rio/rioboot.c
+++ /dev/null
@@ -1,1113 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : rioboot.c
-** SID : 1.3
-** Last Modified : 11/6/98 10:33:36
-** Retrieved : 11/6/98 10:33:48
-**
-** ident @(#)rioboot.c 1.3
-**
-** -----------------------------------------------------------------------------
-*/
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/termios.h>
-#include <linux/serial.h>
-#include <linux/vmalloc.h>
-#include <linux/generic_serial.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-
-
-#include "linux_compat.h"
-#include "rio_linux.h"
-#include "pkt.h"
-#include "daemon.h"
-#include "rio.h"
-#include "riospace.h"
-#include "cmdpkt.h"
-#include "map.h"
-#include "rup.h"
-#include "port.h"
-#include "riodrvr.h"
-#include "rioinfo.h"
-#include "func.h"
-#include "errors.h"
-#include "pci.h"
-
-#include "parmmap.h"
-#include "unixrup.h"
-#include "board.h"
-#include "host.h"
-#include "phb.h"
-#include "link.h"
-#include "cmdblk.h"
-#include "route.h"
-
-static int RIOBootComplete(struct rio_info *p, struct Host *HostP, unsigned int Rup, struct PktCmd __iomem *PktCmdP);
-
-static const unsigned char RIOAtVec2Ctrl[] = {
- /* 0 */ INTERRUPT_DISABLE,
- /* 1 */ INTERRUPT_DISABLE,
- /* 2 */ INTERRUPT_DISABLE,
- /* 3 */ INTERRUPT_DISABLE,
- /* 4 */ INTERRUPT_DISABLE,
- /* 5 */ INTERRUPT_DISABLE,
- /* 6 */ INTERRUPT_DISABLE,
- /* 7 */ INTERRUPT_DISABLE,
- /* 8 */ INTERRUPT_DISABLE,
- /* 9 */ IRQ_9 | INTERRUPT_ENABLE,
- /* 10 */ INTERRUPT_DISABLE,
- /* 11 */ IRQ_11 | INTERRUPT_ENABLE,
- /* 12 */ IRQ_12 | INTERRUPT_ENABLE,
- /* 13 */ INTERRUPT_DISABLE,
- /* 14 */ INTERRUPT_DISABLE,
- /* 15 */ IRQ_15 | INTERRUPT_ENABLE
-};
-
-/**
- * RIOBootCodeRTA - Load RTA boot code
- * @p: RIO to load
- * @rbp: Download descriptor
- *
- * Called when the user process initiates booting of the card firmware.
- * Lads the firmware
- */
-
-int RIOBootCodeRTA(struct rio_info *p, struct DownLoad * rbp)
-{
- int offset;
-
- func_enter();
-
- rio_dprintk(RIO_DEBUG_BOOT, "Data at user address %p\n", rbp->DataP);
-
- /*
- ** Check that we have set aside enough memory for this
- */
- if (rbp->Count > SIXTY_FOUR_K) {
- rio_dprintk(RIO_DEBUG_BOOT, "RTA Boot Code Too Large!\n");
- p->RIOError.Error = HOST_FILE_TOO_LARGE;
- func_exit();
- return -ENOMEM;
- }
-
- if (p->RIOBooting) {
- rio_dprintk(RIO_DEBUG_BOOT, "RTA Boot Code : BUSY BUSY BUSY!\n");
- p->RIOError.Error = BOOT_IN_PROGRESS;
- func_exit();
- return -EBUSY;
- }
-
- /*
- ** The data we load in must end on a (RTA_BOOT_DATA_SIZE) byte boundary,
- ** so calculate how far we have to move the data up the buffer
- ** to achieve this.
- */
- offset = (RTA_BOOT_DATA_SIZE - (rbp->Count % RTA_BOOT_DATA_SIZE)) % RTA_BOOT_DATA_SIZE;
-
- /*
- ** Be clean, and clear the 'unused' portion of the boot buffer,
- ** because it will (eventually) be part of the Rta run time environment
- ** and so should be zeroed.
- */
- memset(p->RIOBootPackets, 0, offset);
-
- /*
- ** Copy the data from user space into the array
- */
-
- if (copy_from_user(((u8 *)p->RIOBootPackets) + offset, rbp->DataP, rbp->Count)) {
- rio_dprintk(RIO_DEBUG_BOOT, "Bad data copy from user space\n");
- p->RIOError.Error = COPYIN_FAILED;
- func_exit();
- return -EFAULT;
- }
-
- /*
- ** Make sure that our copy of the size includes that offset we discussed
- ** earlier.
- */
- p->RIONumBootPkts = (rbp->Count + offset) / RTA_BOOT_DATA_SIZE;
- p->RIOBootCount = rbp->Count;
-
- func_exit();
- return 0;
-}
-
-/**
- * rio_start_card_running - host card start
- * @HostP: The RIO to kick off
- *
- * Start a RIO processor unit running. Encapsulates the knowledge
- * of the card type.
- */
-
-void rio_start_card_running(struct Host *HostP)
-{
- switch (HostP->Type) {
- case RIO_AT:
- rio_dprintk(RIO_DEBUG_BOOT, "Start ISA card running\n");
- writeb(BOOT_FROM_RAM | EXTERNAL_BUS_ON | HostP->Mode | RIOAtVec2Ctrl[HostP->Ivec & 0xF], &HostP->Control);
- break;
- case RIO_PCI:
- /*
- ** PCI is much the same as MCA. Everything is once again memory
- ** mapped, so we are writing to memory registers instead of io
- ** ports.
- */
- rio_dprintk(RIO_DEBUG_BOOT, "Start PCI card running\n");
- writeb(PCITpBootFromRam | PCITpBusEnable | HostP->Mode, &HostP->Control);
- break;
- default:
- rio_dprintk(RIO_DEBUG_BOOT, "Unknown host type %d\n", HostP->Type);
- break;
- }
- return;
-}
-
-/*
-** Load in the host boot code - load it directly onto all halted hosts
-** of the correct type.
-**
-** Put your rubber pants on before messing with this code - even the magic
-** numbers have trouble understanding what they are doing here.
-*/
-
-int RIOBootCodeHOST(struct rio_info *p, struct DownLoad *rbp)
-{
- struct Host *HostP;
- u8 __iomem *Cad;
- PARM_MAP __iomem *ParmMapP;
- int RupN;
- int PortN;
- unsigned int host;
- u8 __iomem *StartP;
- u8 __iomem *DestP;
- int wait_count;
- u16 OldParmMap;
- u16 offset; /* It is very important that this is a u16 */
- u8 *DownCode = NULL;
- unsigned long flags;
-
- HostP = NULL; /* Assure the compiler we've initialized it */
-
-
- /* Walk the hosts */
- for (host = 0; host < p->RIONumHosts; host++) {
- rio_dprintk(RIO_DEBUG_BOOT, "Attempt to boot host %d\n", host);
- HostP = &p->RIOHosts[host];
-
- rio_dprintk(RIO_DEBUG_BOOT, "Host Type = 0x%x, Mode = 0x%x, IVec = 0x%x\n", HostP->Type, HostP->Mode, HostP->Ivec);
-
- /* Don't boot hosts already running */
- if ((HostP->Flags & RUN_STATE) != RC_WAITING) {
- rio_dprintk(RIO_DEBUG_BOOT, "%s %d already running\n", "Host", host);
- continue;
- }
-
- /*
- ** Grab a pointer to the card (ioremapped)
- */
- Cad = HostP->Caddr;
-
- /*
- ** We are going to (try) and load in rbp->Count bytes.
- ** The last byte will reside at p->RIOConf.HostLoadBase-1;
- ** Therefore, we need to start copying at address
- ** (caddr+p->RIOConf.HostLoadBase-rbp->Count)
- */
- StartP = &Cad[p->RIOConf.HostLoadBase - rbp->Count];
-
- rio_dprintk(RIO_DEBUG_BOOT, "kernel virtual address for host is %p\n", Cad);
- rio_dprintk(RIO_DEBUG_BOOT, "kernel virtual address for download is %p\n", StartP);
- rio_dprintk(RIO_DEBUG_BOOT, "host loadbase is 0x%x\n", p->RIOConf.HostLoadBase);
- rio_dprintk(RIO_DEBUG_BOOT, "size of download is 0x%x\n", rbp->Count);
-
- /* Make sure it fits */
- if (p->RIOConf.HostLoadBase < rbp->Count) {
- rio_dprintk(RIO_DEBUG_BOOT, "Bin too large\n");
- p->RIOError.Error = HOST_FILE_TOO_LARGE;
- func_exit();
- return -EFBIG;
- }
- /*
- ** Ensure that the host really is stopped.
- ** Disable it's external bus & twang its reset line.
- */
- RIOHostReset(HostP->Type, HostP->CardP, HostP->Slot);
-
- /*
- ** Copy the data directly from user space to the SRAM.
- ** This ain't going to be none too clever if the download
- ** code is bigger than this segment.
- */
- rio_dprintk(RIO_DEBUG_BOOT, "Copy in code\n");
-
- /* Buffer to local memory as we want to use I/O space and
- some cards only do 8 or 16 bit I/O */
-
- DownCode = vmalloc(rbp->Count);
- if (!DownCode) {
- p->RIOError.Error = NOT_ENOUGH_CORE_FOR_PCI_COPY;
- func_exit();
- return -ENOMEM;
- }
- if (copy_from_user(DownCode, rbp->DataP, rbp->Count)) {
- kfree(DownCode);
- p->RIOError.Error = COPYIN_FAILED;
- func_exit();
- return -EFAULT;
- }
- HostP->Copy(DownCode, StartP, rbp->Count);
- vfree(DownCode);
-
- rio_dprintk(RIO_DEBUG_BOOT, "Copy completed\n");
-
- /*
- ** S T O P !
- **
- ** Up to this point the code has been fairly rational, and possibly
- ** even straight forward. What follows is a pile of crud that will
- ** magically turn into six bytes of transputer assembler. Normally
- ** you would expect an array or something, but, being me, I have
- ** chosen [been told] to use a technique whereby the startup code
- ** will be correct if we change the loadbase for the code. Which
- ** brings us onto another issue - the loadbase is the *end* of the
- ** code, not the start.
- **
- ** If I were you I wouldn't start from here.
- */
-
- /*
- ** We now need to insert a short boot section into
- ** the memory at the end of Sram2. This is normally (de)composed
- ** of the last eight bytes of the download code. The
- ** download has been assembled/compiled to expect to be
- ** loaded from 0x7FFF downwards. We have loaded it
- ** at some other address. The startup code goes into the small
- ** ram window at Sram2, in the last 8 bytes, which are really
- ** at addresses 0x7FF8-0x7FFF.
- **
- ** If the loadbase is, say, 0x7C00, then we need to branch to
- ** address 0x7BFE to run the host.bin startup code. We assemble
- ** this jump manually.
- **
- ** The two byte sequence 60 08 is loaded into memory at address
- ** 0x7FFE,F. This is a local branch to location 0x7FF8 (60 is nfix 0,
- ** which adds '0' to the .O register, complements .O, and then shifts
- ** it left by 4 bit positions, 08 is a jump .O+8 instruction. This will
- ** add 8 to .O (which was 0xFFF0), and will branch RELATIVE to the new
- ** location. Now, the branch starts from the value of .PC (or .IP or
- ** whatever the bloody register is called on this chip), and the .PC
- ** will be pointing to the location AFTER the branch, in this case
- ** .PC == 0x8000, so the branch will be to 0x8000+0xFFF8 = 0x7FF8.
- **
- ** A long branch is coded at 0x7FF8. This consists of loading a four
- ** byte offset into .O using nfix (as above) and pfix operators. The
- ** pfix operates in exactly the same way as the nfix operator, but
- ** without the complement operation. The offset, of course, must be
- ** relative to the address of the byte AFTER the branch instruction,
- ** which will be (urm) 0x7FFC, so, our final destination of the branch
- ** (loadbase-2), has to be reached from here. Imagine that the loadbase
- ** is 0x7C00 (which it is), then we will need to branch to 0x7BFE (which
- ** is the first byte of the initial two byte short local branch of the
- ** download code).
- **
- ** To code a jump from 0x7FFC (which is where the branch will start
- ** from) to 0x7BFE, we will need to branch 0xFC02 bytes (0x7FFC+0xFC02)=
- ** 0x7BFE.
- ** This will be coded as four bytes:
- ** 60 2C 20 02
- ** being nfix .O+0
- ** pfix .O+C
- ** pfix .O+0
- ** jump .O+2
- **
- ** The nfix operator is used, so that the startup code will be
- ** compatible with the whole Tp family. (lies, damn lies, it'll never
- ** work in a month of Sundays).
- **
- ** The nfix nyble is the 1s complement of the nyble value you
- ** want to load - in this case we wanted 'F' so we nfix loaded '0'.
- */
-
-
- /*
- ** Dest points to the top 8 bytes of Sram2. The Tp jumps
- ** to 0x7FFE at reset time, and starts executing. This is
- ** a short branch to 0x7FF8, where a long branch is coded.
- */
-
- DestP = &Cad[0x7FF8]; /* <<<---- READ THE ABOVE COMMENTS */
-
-#define NFIX(N) (0x60 | (N)) /* .O = (~(.O + N))<<4 */
-#define PFIX(N) (0x20 | (N)) /* .O = (.O + N)<<4 */
-#define JUMP(N) (0x00 | (N)) /* .PC = .PC + .O */
-
- /*
- ** 0x7FFC is the address of the location following the last byte of
- ** the four byte jump instruction.
- ** READ THE ABOVE COMMENTS
- **
- ** offset is (TO-FROM) % MEMSIZE, but with compound buggering about.
- ** Memsize is 64K for this range of Tp, so offset is a short (unsigned,
- ** cos I don't understand 2's complement).
- */
- offset = (p->RIOConf.HostLoadBase - 2) - 0x7FFC;
-
- writeb(NFIX(((unsigned short) (~offset) >> (unsigned short) 12) & 0xF), DestP);
- writeb(PFIX((offset >> 8) & 0xF), DestP + 1);
- writeb(PFIX((offset >> 4) & 0xF), DestP + 2);
- writeb(JUMP(offset & 0xF), DestP + 3);
-
- writeb(NFIX(0), DestP + 6);
- writeb(JUMP(8), DestP + 7);
-
- rio_dprintk(RIO_DEBUG_BOOT, "host loadbase is 0x%x\n", p->RIOConf.HostLoadBase);
- rio_dprintk(RIO_DEBUG_BOOT, "startup offset is 0x%x\n", offset);
-
- /*
- ** Flag what is going on
- */
- HostP->Flags &= ~RUN_STATE;
- HostP->Flags |= RC_STARTUP;
-
- /*
- ** Grab a copy of the current ParmMap pointer, so we
- ** can tell when it has changed.
- */
- OldParmMap = readw(&HostP->__ParmMapR);
-
- rio_dprintk(RIO_DEBUG_BOOT, "Original parmmap is 0x%x\n", OldParmMap);
-
- /*
- ** And start it running (I hope).
- ** As there is nothing dodgy or obscure about the
- ** above code, this is guaranteed to work every time.
- */
- rio_dprintk(RIO_DEBUG_BOOT, "Host Type = 0x%x, Mode = 0x%x, IVec = 0x%x\n", HostP->Type, HostP->Mode, HostP->Ivec);
-
- rio_start_card_running(HostP);
-
- rio_dprintk(RIO_DEBUG_BOOT, "Set control port\n");
-
- /*
- ** Now, wait for up to five seconds for the Tp to setup the parmmap
- ** pointer:
- */
- for (wait_count = 0; (wait_count < p->RIOConf.StartupTime) && (readw(&HostP->__ParmMapR) == OldParmMap); wait_count++) {
- rio_dprintk(RIO_DEBUG_BOOT, "Checkout %d, 0x%x\n", wait_count, readw(&HostP->__ParmMapR));
- mdelay(100);
-
- }
-
- /*
- ** If the parmmap pointer is unchanged, then the host code
- ** has crashed & burned in a really spectacular way
- */
- if (readw(&HostP->__ParmMapR) == OldParmMap) {
- rio_dprintk(RIO_DEBUG_BOOT, "parmmap 0x%x\n", readw(&HostP->__ParmMapR));
- rio_dprintk(RIO_DEBUG_BOOT, "RIO Mesg Run Fail\n");
- HostP->Flags &= ~RUN_STATE;
- HostP->Flags |= RC_STUFFED;
- RIOHostReset( HostP->Type, HostP->CardP, HostP->Slot );
- continue;
- }
-
- rio_dprintk(RIO_DEBUG_BOOT, "Running 0x%x\n", readw(&HostP->__ParmMapR));
-
- /*
- ** Well, the board thought it was OK, and setup its parmmap
- ** pointer. For the time being, we will pretend that this
- ** board is running, and check out what the error flag says.
- */
-
- /*
- ** Grab a 32 bit pointer to the parmmap structure
- */
- ParmMapP = (PARM_MAP __iomem *) RIO_PTR(Cad, readw(&HostP->__ParmMapR));
- rio_dprintk(RIO_DEBUG_BOOT, "ParmMapP : %p\n", ParmMapP);
- ParmMapP = (PARM_MAP __iomem *)(Cad + readw(&HostP->__ParmMapR));
- rio_dprintk(RIO_DEBUG_BOOT, "ParmMapP : %p\n", ParmMapP);
-
- /*
- ** The links entry should be 0xFFFF; we set it up
- ** with a mask to say how many PHBs to use, and
- ** which links to use.
- */
- if (readw(&ParmMapP->links) != 0xFFFF) {
- rio_dprintk(RIO_DEBUG_BOOT, "RIO Mesg Run Fail %s\n", HostP->Name);
- rio_dprintk(RIO_DEBUG_BOOT, "Links = 0x%x\n", readw(&ParmMapP->links));
- HostP->Flags &= ~RUN_STATE;
- HostP->Flags |= RC_STUFFED;
- RIOHostReset( HostP->Type, HostP->CardP, HostP->Slot );
- continue;
- }
-
- writew(RIO_LINK_ENABLE, &ParmMapP->links);
-
- /*
- ** now wait for the card to set all the parmmap->XXX stuff
- ** this is a wait of up to two seconds....
- */
- rio_dprintk(RIO_DEBUG_BOOT, "Looking for init_done - %d ticks\n", p->RIOConf.StartupTime);
- HostP->timeout_id = 0;
- for (wait_count = 0; (wait_count < p->RIOConf.StartupTime) && !readw(&ParmMapP->init_done); wait_count++) {
- rio_dprintk(RIO_DEBUG_BOOT, "Waiting for init_done\n");
- mdelay(100);
- }
- rio_dprintk(RIO_DEBUG_BOOT, "OK! init_done!\n");
-
- if (readw(&ParmMapP->error) != E_NO_ERROR || !readw(&ParmMapP->init_done)) {
- rio_dprintk(RIO_DEBUG_BOOT, "RIO Mesg Run Fail %s\n", HostP->Name);
- rio_dprintk(RIO_DEBUG_BOOT, "Timedout waiting for init_done\n");
- HostP->Flags &= ~RUN_STATE;
- HostP->Flags |= RC_STUFFED;
- RIOHostReset( HostP->Type, HostP->CardP, HostP->Slot );
- continue;
- }
-
- rio_dprintk(RIO_DEBUG_BOOT, "Got init_done\n");
-
- /*
- ** It runs! It runs!
- */
- rio_dprintk(RIO_DEBUG_BOOT, "Host ID %x Running\n", HostP->UniqueNum);
-
- /*
- ** set the time period between interrupts.
- */
- writew(p->RIOConf.Timer, &ParmMapP->timer);
-
- /*
- ** Translate all the 16 bit pointers in the __ParmMapR into
- ** 32 bit pointers for the driver in ioremap space.
- */
- HostP->ParmMapP = ParmMapP;
- HostP->PhbP = (struct PHB __iomem *) RIO_PTR(Cad, readw(&ParmMapP->phb_ptr));
- HostP->RupP = (struct RUP __iomem *) RIO_PTR(Cad, readw(&ParmMapP->rups));
- HostP->PhbNumP = (unsigned short __iomem *) RIO_PTR(Cad, readw(&ParmMapP->phb_num_ptr));
- HostP->LinkStrP = (struct LPB __iomem *) RIO_PTR(Cad, readw(&ParmMapP->link_str_ptr));
-
- /*
- ** point the UnixRups at the real Rups
- */
- for (RupN = 0; RupN < MAX_RUP; RupN++) {
- HostP->UnixRups[RupN].RupP = &HostP->RupP[RupN];
- HostP->UnixRups[RupN].Id = RupN + 1;
- HostP->UnixRups[RupN].BaseSysPort = NO_PORT;
- spin_lock_init(&HostP->UnixRups[RupN].RupLock);
- }
-
- for (RupN = 0; RupN < LINKS_PER_UNIT; RupN++) {
- HostP->UnixRups[RupN + MAX_RUP].RupP = &HostP->LinkStrP[RupN].rup;
- HostP->UnixRups[RupN + MAX_RUP].Id = 0;
- HostP->UnixRups[RupN + MAX_RUP].BaseSysPort = NO_PORT;
- spin_lock_init(&HostP->UnixRups[RupN + MAX_RUP].RupLock);
- }
-
- /*
- ** point the PortP->Phbs at the real Phbs
- */
- for (PortN = p->RIOFirstPortsMapped; PortN < p->RIOLastPortsMapped + PORTS_PER_RTA; PortN++) {
- if (p->RIOPortp[PortN]->HostP == HostP) {
- struct Port *PortP = p->RIOPortp[PortN];
- struct PHB __iomem *PhbP;
- /* int oldspl; */
-
- if (!PortP->Mapped)
- continue;
-
- PhbP = &HostP->PhbP[PortP->HostPort];
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- PortP->PhbP = PhbP;
-
- PortP->TxAdd = (u16 __iomem *) RIO_PTR(Cad, readw(&PhbP->tx_add));
- PortP->TxStart = (u16 __iomem *) RIO_PTR(Cad, readw(&PhbP->tx_start));
- PortP->TxEnd = (u16 __iomem *) RIO_PTR(Cad, readw(&PhbP->tx_end));
- PortP->RxRemove = (u16 __iomem *) RIO_PTR(Cad, readw(&PhbP->rx_remove));
- PortP->RxStart = (u16 __iomem *) RIO_PTR(Cad, readw(&PhbP->rx_start));
- PortP->RxEnd = (u16 __iomem *) RIO_PTR(Cad, readw(&PhbP->rx_end));
-
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- /*
- ** point the UnixRup at the base SysPort
- */
- if (!(PortN % PORTS_PER_RTA))
- HostP->UnixRups[PortP->RupNum].BaseSysPort = PortN;
- }
- }
-
- rio_dprintk(RIO_DEBUG_BOOT, "Set the card running... \n");
- /*
- ** last thing - show the world that everything is in place
- */
- HostP->Flags &= ~RUN_STATE;
- HostP->Flags |= RC_RUNNING;
- }
- /*
- ** MPX always uses a poller. This is actually patched into the system
- ** configuration and called directly from each clock tick.
- **
- */
- p->RIOPolling = 1;
-
- p->RIOSystemUp++;
-
- rio_dprintk(RIO_DEBUG_BOOT, "Done everything %x\n", HostP->Ivec);
- func_exit();
- return 0;
-}
-
-
-
-/**
- * RIOBootRup - Boot an RTA
- * @p: rio we are working with
- * @Rup: Rup number
- * @HostP: host object
- * @PacketP: packet to use
- *
- * If we have successfully processed this boot, then
- * return 1. If we havent, then return 0.
- */
-
-int RIOBootRup(struct rio_info *p, unsigned int Rup, struct Host *HostP, struct PKT __iomem *PacketP)
-{
- struct PktCmd __iomem *PktCmdP = (struct PktCmd __iomem *) PacketP->data;
- struct PktCmd_M *PktReplyP;
- struct CmdBlk *CmdBlkP;
- unsigned int sequence;
-
- /*
- ** If we haven't been told what to boot, we can't boot it.
- */
- if (p->RIONumBootPkts == 0) {
- rio_dprintk(RIO_DEBUG_BOOT, "No RTA code to download yet\n");
- return 0;
- }
-
- /*
- ** Special case of boot completed - if we get one of these then we
- ** don't need a command block. For all other cases we do, so handle
- ** this first and then get a command block, then handle every other
- ** case, relinquishing the command block if disaster strikes!
- */
- if ((readb(&PacketP->len) & PKT_CMD_BIT) && (readb(&PktCmdP->Command) == BOOT_COMPLETED))
- return RIOBootComplete(p, HostP, Rup, PktCmdP);
-
- /*
- ** Try to allocate a command block. This is in kernel space
- */
- if (!(CmdBlkP = RIOGetCmdBlk())) {
- rio_dprintk(RIO_DEBUG_BOOT, "No command blocks to boot RTA! come back later.\n");
- return 0;
- }
-
- /*
- ** Fill in the default info on the command block
- */
- CmdBlkP->Packet.dest_unit = Rup < (unsigned short) MAX_RUP ? Rup : 0;
- CmdBlkP->Packet.dest_port = BOOT_RUP;
- CmdBlkP->Packet.src_unit = 0;
- CmdBlkP->Packet.src_port = BOOT_RUP;
-
- CmdBlkP->PreFuncP = CmdBlkP->PostFuncP = NULL;
- PktReplyP = (struct PktCmd_M *) CmdBlkP->Packet.data;
-
- /*
- ** process COMMANDS on the boot rup!
- */
- if (readb(&PacketP->len) & PKT_CMD_BIT) {
- /*
- ** We only expect one type of command - a BOOT_REQUEST!
- */
- if (readb(&PktCmdP->Command) != BOOT_REQUEST) {
- rio_dprintk(RIO_DEBUG_BOOT, "Unexpected command %d on BOOT RUP %d of host %Zd\n", readb(&PktCmdP->Command), Rup, HostP - p->RIOHosts);
- RIOFreeCmdBlk(CmdBlkP);
- return 1;
- }
-
- /*
- ** Build a Boot Sequence command block
- **
- ** We no longer need to use "Boot Mode", we'll always allow
- ** boot requests - the boot will not complete if the device
- ** appears in the bindings table.
- **
- ** We'll just (always) set the command field in packet reply
- ** to allow an attempted boot sequence :
- */
- PktReplyP->Command = BOOT_SEQUENCE;
-
- PktReplyP->BootSequence.NumPackets = p->RIONumBootPkts;
- PktReplyP->BootSequence.LoadBase = p->RIOConf.RtaLoadBase;
- PktReplyP->BootSequence.CodeSize = p->RIOBootCount;
-
- CmdBlkP->Packet.len = BOOT_SEQUENCE_LEN | PKT_CMD_BIT;
-
- memcpy((void *) &CmdBlkP->Packet.data[BOOT_SEQUENCE_LEN], "BOOT", 4);
-
- rio_dprintk(RIO_DEBUG_BOOT, "Boot RTA on Host %Zd Rup %d - %d (0x%x) packets to 0x%x\n", HostP - p->RIOHosts, Rup, p->RIONumBootPkts, p->RIONumBootPkts, p->RIOConf.RtaLoadBase);
-
- /*
- ** If this host is in slave mode, send the RTA an invalid boot
- ** sequence command block to force it to kill the boot. We wait
- ** for half a second before sending this packet to prevent the RTA
- ** attempting to boot too often. The master host should then grab
- ** the RTA and make it its own.
- */
- p->RIOBooting++;
- RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
- return 1;
- }
-
- /*
- ** It is a request for boot data.
- */
- sequence = readw(&PktCmdP->Sequence);
-
- rio_dprintk(RIO_DEBUG_BOOT, "Boot block %d on Host %Zd Rup%d\n", sequence, HostP - p->RIOHosts, Rup);
-
- if (sequence >= p->RIONumBootPkts) {
- rio_dprintk(RIO_DEBUG_BOOT, "Got a request for packet %d, max is %d\n", sequence, p->RIONumBootPkts);
- }
-
- PktReplyP->Sequence = sequence;
- memcpy(PktReplyP->BootData, p->RIOBootPackets[p->RIONumBootPkts - sequence - 1], RTA_BOOT_DATA_SIZE);
- CmdBlkP->Packet.len = PKT_MAX_DATA_LEN;
- RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
- return 1;
-}
-
-/**
- * RIOBootComplete - RTA boot is done
- * @p: RIO we are working with
- * @HostP: Host structure
- * @Rup: RUP being used
- * @PktCmdP: Packet command that was used
- *
- * This function is called when an RTA been booted.
- * If booted by a host, HostP->HostUniqueNum is the booting host.
- * If booted by an RTA, HostP->Mapping[Rup].RtaUniqueNum is the booting RTA.
- * RtaUniq is the booted RTA.
- */
-
-static int RIOBootComplete(struct rio_info *p, struct Host *HostP, unsigned int Rup, struct PktCmd __iomem *PktCmdP)
-{
- struct Map *MapP = NULL;
- struct Map *MapP2 = NULL;
- int Flag;
- int found;
- int host, rta;
- int EmptySlot = -1;
- int entry, entry2;
- char *MyType, *MyName;
- unsigned int MyLink;
- unsigned short RtaType;
- u32 RtaUniq = (readb(&PktCmdP->UniqNum[0])) + (readb(&PktCmdP->UniqNum[1]) << 8) + (readb(&PktCmdP->UniqNum[2]) << 16) + (readb(&PktCmdP->UniqNum[3]) << 24);
-
- p->RIOBooting = 0;
-
- rio_dprintk(RIO_DEBUG_BOOT, "RTA Boot completed - BootInProgress now %d\n", p->RIOBooting);
-
- /*
- ** Determine type of unit (16/8 port RTA).
- */
-
- RtaType = GetUnitType(RtaUniq);
- if (Rup >= (unsigned short) MAX_RUP)
- rio_dprintk(RIO_DEBUG_BOOT, "RIO: Host %s has booted an RTA(%d) on link %c\n", HostP->Name, 8 * RtaType, readb(&PktCmdP->LinkNum) + 'A');
- else
- rio_dprintk(RIO_DEBUG_BOOT, "RIO: RTA %s has booted an RTA(%d) on link %c\n", HostP->Mapping[Rup].Name, 8 * RtaType, readb(&PktCmdP->LinkNum) + 'A');
-
- rio_dprintk(RIO_DEBUG_BOOT, "UniqNum is 0x%x\n", RtaUniq);
-
- if (RtaUniq == 0x00000000 || RtaUniq == 0xffffffff) {
- rio_dprintk(RIO_DEBUG_BOOT, "Illegal RTA Uniq Number\n");
- return 1;
- }
-
- /*
- ** If this RTA has just booted an RTA which doesn't belong to this
- ** system, or the system is in slave mode, do not attempt to create
- ** a new table entry for it.
- */
-
- if (!RIOBootOk(p, HostP, RtaUniq)) {
- MyLink = readb(&PktCmdP->LinkNum);
- if (Rup < (unsigned short) MAX_RUP) {
- /*
- ** RtaUniq was clone booted (by this RTA). Instruct this RTA
- ** to hold off further attempts to boot on this link for 30
- ** seconds.
- */
- if (RIOSuspendBootRta(HostP, HostP->Mapping[Rup].ID, MyLink)) {
- rio_dprintk(RIO_DEBUG_BOOT, "RTA failed to suspend booting on link %c\n", 'A' + MyLink);
- }
- } else
- /*
- ** RtaUniq was booted by this host. Set the booting link
- ** to hold off for 30 seconds to give another unit a
- ** chance to boot it.
- */
- writew(30, &HostP->LinkStrP[MyLink].WaitNoBoot);
- rio_dprintk(RIO_DEBUG_BOOT, "RTA %x not owned - suspend booting down link %c on unit %x\n", RtaUniq, 'A' + MyLink, HostP->Mapping[Rup].RtaUniqueNum);
- return 1;
- }
-
- /*
- ** Check for a SLOT_IN_USE entry for this RTA attached to the
- ** current host card in the driver table.
- **
- ** If it exists, make a note that we have booted it. Other parts of
- ** the driver are interested in this information at a later date,
- ** in particular when the booting RTA asks for an ID for this unit,
- ** we must have set the BOOTED flag, and the NEWBOOT flag is used
- ** to force an open on any ports that where previously open on this
- ** unit.
- */
- for (entry = 0; entry < MAX_RUP; entry++) {
- unsigned int sysport;
-
- if ((HostP->Mapping[entry].Flags & SLOT_IN_USE) && (HostP->Mapping[entry].RtaUniqueNum == RtaUniq)) {
- HostP->Mapping[entry].Flags |= RTA_BOOTED | RTA_NEWBOOT;
- if ((sysport = HostP->Mapping[entry].SysPort) != NO_PORT) {
- if (sysport < p->RIOFirstPortsBooted)
- p->RIOFirstPortsBooted = sysport;
- if (sysport > p->RIOLastPortsBooted)
- p->RIOLastPortsBooted = sysport;
- /*
- ** For a 16 port RTA, check the second bank of 8 ports
- */
- if (RtaType == TYPE_RTA16) {
- entry2 = HostP->Mapping[entry].ID2 - 1;
- HostP->Mapping[entry2].Flags |= RTA_BOOTED | RTA_NEWBOOT;
- sysport = HostP->Mapping[entry2].SysPort;
- if (sysport < p->RIOFirstPortsBooted)
- p->RIOFirstPortsBooted = sysport;
- if (sysport > p->RIOLastPortsBooted)
- p->RIOLastPortsBooted = sysport;
- }
- }
- if (RtaType == TYPE_RTA16)
- rio_dprintk(RIO_DEBUG_BOOT, "RTA will be given IDs %d+%d\n", entry + 1, entry2 + 1);
- else
- rio_dprintk(RIO_DEBUG_BOOT, "RTA will be given ID %d\n", entry + 1);
- return 1;
- }
- }
-
- rio_dprintk(RIO_DEBUG_BOOT, "RTA not configured for this host\n");
-
- if (Rup >= (unsigned short) MAX_RUP) {
- /*
- ** It was a host that did the booting
- */
- MyType = "Host";
- MyName = HostP->Name;
- } else {
- /*
- ** It was an RTA that did the booting
- */
- MyType = "RTA";
- MyName = HostP->Mapping[Rup].Name;
- }
- MyLink = readb(&PktCmdP->LinkNum);
-
- /*
- ** There is no SLOT_IN_USE entry for this RTA attached to the current
- ** host card in the driver table.
- **
- ** Check for a SLOT_TENTATIVE entry for this RTA attached to the
- ** current host card in the driver table.
- **
- ** If we find one, then we re-use that slot.
- */
- for (entry = 0; entry < MAX_RUP; entry++) {
- if ((HostP->Mapping[entry].Flags & SLOT_TENTATIVE) && (HostP->Mapping[entry].RtaUniqueNum == RtaUniq)) {
- if (RtaType == TYPE_RTA16) {
- entry2 = HostP->Mapping[entry].ID2 - 1;
- if ((HostP->Mapping[entry2].Flags & SLOT_TENTATIVE) && (HostP->Mapping[entry2].RtaUniqueNum == RtaUniq))
- rio_dprintk(RIO_DEBUG_BOOT, "Found previous tentative slots (%d+%d)\n", entry, entry2);
- else
- continue;
- } else
- rio_dprintk(RIO_DEBUG_BOOT, "Found previous tentative slot (%d)\n", entry);
- if (!p->RIONoMessage)
- printk("RTA connected to %s '%s' (%c) not configured.\n", MyType, MyName, MyLink + 'A');
- return 1;
- }
- }
-
- /*
- ** There is no SLOT_IN_USE or SLOT_TENTATIVE entry for this RTA
- ** attached to the current host card in the driver table.
- **
- ** Check if there is a SLOT_IN_USE or SLOT_TENTATIVE entry on another
- ** host for this RTA in the driver table.
- **
- ** For a SLOT_IN_USE entry on another host, we need to delete the RTA
- ** entry from the other host and add it to this host (using some of
- ** the functions from table.c which do this).
- ** For a SLOT_TENTATIVE entry on another host, we must cope with the
- ** following scenario:
- **
- ** + Plug 8 port RTA into host A. (This creates SLOT_TENTATIVE entry
- ** in table)
- ** + Unplug RTA and plug into host B. (We now have 2 SLOT_TENTATIVE
- ** entries)
- ** + Configure RTA on host B. (This slot now becomes SLOT_IN_USE)
- ** + Unplug RTA and plug back into host A.
- ** + Configure RTA on host A. We now have the same RTA configured
- ** with different ports on two different hosts.
- */
- rio_dprintk(RIO_DEBUG_BOOT, "Have we seen RTA %x before?\n", RtaUniq);
- found = 0;
- Flag = 0; /* Convince the compiler this variable is initialized */
- for (host = 0; !found && (host < p->RIONumHosts); host++) {
- for (rta = 0; rta < MAX_RUP; rta++) {
- if ((p->RIOHosts[host].Mapping[rta].Flags & (SLOT_IN_USE | SLOT_TENTATIVE)) && (p->RIOHosts[host].Mapping[rta].RtaUniqueNum == RtaUniq)) {
- Flag = p->RIOHosts[host].Mapping[rta].Flags;
- MapP = &p->RIOHosts[host].Mapping[rta];
- if (RtaType == TYPE_RTA16) {
- MapP2 = &p->RIOHosts[host].Mapping[MapP->ID2 - 1];
- rio_dprintk(RIO_DEBUG_BOOT, "This RTA is units %d+%d from host %s\n", rta + 1, MapP->ID2, p->RIOHosts[host].Name);
- } else
- rio_dprintk(RIO_DEBUG_BOOT, "This RTA is unit %d from host %s\n", rta + 1, p->RIOHosts[host].Name);
- found = 1;
- break;
- }
- }
- }
-
- /*
- ** There is no SLOT_IN_USE or SLOT_TENTATIVE entry for this RTA
- ** attached to the current host card in the driver table.
- **
- ** If we have not found a SLOT_IN_USE or SLOT_TENTATIVE entry on
- ** another host for this RTA in the driver table...
- **
- ** Check for a SLOT_IN_USE entry for this RTA in the config table.
- */
- if (!MapP) {
- rio_dprintk(RIO_DEBUG_BOOT, "Look for RTA %x in RIOSavedTable\n", RtaUniq);
- for (rta = 0; rta < TOTAL_MAP_ENTRIES; rta++) {
- rio_dprintk(RIO_DEBUG_BOOT, "Check table entry %d (%x)", rta, p->RIOSavedTable[rta].RtaUniqueNum);
-
- if ((p->RIOSavedTable[rta].Flags & SLOT_IN_USE) && (p->RIOSavedTable[rta].RtaUniqueNum == RtaUniq)) {
- MapP = &p->RIOSavedTable[rta];
- Flag = p->RIOSavedTable[rta].Flags;
- if (RtaType == TYPE_RTA16) {
- for (entry2 = rta + 1; entry2 < TOTAL_MAP_ENTRIES; entry2++) {
- if (p->RIOSavedTable[entry2].RtaUniqueNum == RtaUniq)
- break;
- }
- MapP2 = &p->RIOSavedTable[entry2];
- rio_dprintk(RIO_DEBUG_BOOT, "This RTA is from table entries %d+%d\n", rta, entry2);
- } else
- rio_dprintk(RIO_DEBUG_BOOT, "This RTA is from table entry %d\n", rta);
- break;
- }
- }
- }
-
- /*
- ** There is no SLOT_IN_USE or SLOT_TENTATIVE entry for this RTA
- ** attached to the current host card in the driver table.
- **
- ** We may have found a SLOT_IN_USE entry on another host for this
- ** RTA in the config table, or a SLOT_IN_USE or SLOT_TENTATIVE entry
- ** on another host for this RTA in the driver table.
- **
- ** Check the driver table for room to fit this newly discovered RTA.
- ** RIOFindFreeID() first looks for free slots and if it does not
- ** find any free slots it will then attempt to oust any
- ** tentative entry in the table.
- */
- EmptySlot = 1;
- if (RtaType == TYPE_RTA16) {
- if (RIOFindFreeID(p, HostP, &entry, &entry2) == 0) {
- RIODefaultName(p, HostP, entry);
- rio_fill_host_slot(entry, entry2, RtaUniq, HostP);
- EmptySlot = 0;
- }
- } else {
- if (RIOFindFreeID(p, HostP, &entry, NULL) == 0) {
- RIODefaultName(p, HostP, entry);
- rio_fill_host_slot(entry, 0, RtaUniq, HostP);
- EmptySlot = 0;
- }
- }
-
- /*
- ** There is no SLOT_IN_USE or SLOT_TENTATIVE entry for this RTA
- ** attached to the current host card in the driver table.
- **
- ** If we found a SLOT_IN_USE entry on another host for this
- ** RTA in the config or driver table, and there are enough free
- ** slots in the driver table, then we need to move it over and
- ** delete it from the other host.
- ** If we found a SLOT_TENTATIVE entry on another host for this
- ** RTA in the driver table, just delete the other host entry.
- */
- if (EmptySlot == 0) {
- if (MapP) {
- if (Flag & SLOT_IN_USE) {
- rio_dprintk(RIO_DEBUG_BOOT, "This RTA configured on another host - move entry to current host (1)\n");
- HostP->Mapping[entry].SysPort = MapP->SysPort;
- memcpy(HostP->Mapping[entry].Name, MapP->Name, MAX_NAME_LEN);
- HostP->Mapping[entry].Flags = SLOT_IN_USE | RTA_BOOTED | RTA_NEWBOOT;
- RIOReMapPorts(p, HostP, &HostP->Mapping[entry]);
- if (HostP->Mapping[entry].SysPort < p->RIOFirstPortsBooted)
- p->RIOFirstPortsBooted = HostP->Mapping[entry].SysPort;
- if (HostP->Mapping[entry].SysPort > p->RIOLastPortsBooted)
- p->RIOLastPortsBooted = HostP->Mapping[entry].SysPort;
- rio_dprintk(RIO_DEBUG_BOOT, "SysPort %d, Name %s\n", (int) MapP->SysPort, MapP->Name);
- } else {
- rio_dprintk(RIO_DEBUG_BOOT, "This RTA has a tentative entry on another host - delete that entry (1)\n");
- HostP->Mapping[entry].Flags = SLOT_TENTATIVE | RTA_BOOTED | RTA_NEWBOOT;
- }
- if (RtaType == TYPE_RTA16) {
- if (Flag & SLOT_IN_USE) {
- HostP->Mapping[entry2].Flags = SLOT_IN_USE | RTA_BOOTED | RTA_NEWBOOT | RTA16_SECOND_SLOT;
- HostP->Mapping[entry2].SysPort = MapP2->SysPort;
- /*
- ** Map second block of ttys for 16 port RTA
- */
- RIOReMapPorts(p, HostP, &HostP->Mapping[entry2]);
- if (HostP->Mapping[entry2].SysPort < p->RIOFirstPortsBooted)
- p->RIOFirstPortsBooted = HostP->Mapping[entry2].SysPort;
- if (HostP->Mapping[entry2].SysPort > p->RIOLastPortsBooted)
- p->RIOLastPortsBooted = HostP->Mapping[entry2].SysPort;
- rio_dprintk(RIO_DEBUG_BOOT, "SysPort %d, Name %s\n", (int) HostP->Mapping[entry2].SysPort, HostP->Mapping[entry].Name);
- } else
- HostP->Mapping[entry2].Flags = SLOT_TENTATIVE | RTA_BOOTED | RTA_NEWBOOT | RTA16_SECOND_SLOT;
- memset(MapP2, 0, sizeof(struct Map));
- }
- memset(MapP, 0, sizeof(struct Map));
- if (!p->RIONoMessage)
- printk("An orphaned RTA has been adopted by %s '%s' (%c).\n", MyType, MyName, MyLink + 'A');
- } else if (!p->RIONoMessage)
- printk("RTA connected to %s '%s' (%c) not configured.\n", MyType, MyName, MyLink + 'A');
- RIOSetChange(p);
- return 1;
- }
-
- /*
- ** There is no room in the driver table to make an entry for the
- ** booted RTA. Keep a note of its Uniq Num in the overflow table,
- ** so we can ignore it's ID requests.
- */
- if (!p->RIONoMessage)
- printk("The RTA connected to %s '%s' (%c) cannot be configured. You cannot configure more than 128 ports to one host card.\n", MyType, MyName, MyLink + 'A');
- for (entry = 0; entry < HostP->NumExtraBooted; entry++) {
- if (HostP->ExtraUnits[entry] == RtaUniq) {
- /*
- ** already got it!
- */
- return 1;
- }
- }
- /*
- ** If there is room, add the unit to the list of extras
- */
- if (HostP->NumExtraBooted < MAX_EXTRA_UNITS)
- HostP->ExtraUnits[HostP->NumExtraBooted++] = RtaUniq;
- return 1;
-}
-
-
-/*
-** If the RTA or its host appears in the RIOBindTab[] structure then
-** we mustn't boot the RTA and should return 0.
-** This operation is slightly different from the other drivers for RIO
-** in that this is designed to work with the new utilities
-** not config.rio and is FAR SIMPLER.
-** We no longer support the RIOBootMode variable. It is all done from the
-** "boot/noboot" field in the rio.cf file.
-*/
-int RIOBootOk(struct rio_info *p, struct Host *HostP, unsigned long RtaUniq)
-{
- int Entry;
- unsigned int HostUniq = HostP->UniqueNum;
-
- /*
- ** Search bindings table for RTA or its parent.
- ** If it exists, return 0, else 1.
- */
- for (Entry = 0; (Entry < MAX_RTA_BINDINGS) && (p->RIOBindTab[Entry] != 0); Entry++) {
- if ((p->RIOBindTab[Entry] == HostUniq) || (p->RIOBindTab[Entry] == RtaUniq))
- return 0;
- }
- return 1;
-}
-
-/*
-** Make an empty slot tentative. If this is a 16 port RTA, make both
-** slots tentative, and the second one RTA_SECOND_SLOT as well.
-*/
-
-void rio_fill_host_slot(int entry, int entry2, unsigned int rta_uniq, struct Host *host)
-{
- int link;
-
- rio_dprintk(RIO_DEBUG_BOOT, "rio_fill_host_slot(%d, %d, 0x%x...)\n", entry, entry2, rta_uniq);
-
- host->Mapping[entry].Flags = (RTA_BOOTED | RTA_NEWBOOT | SLOT_TENTATIVE);
- host->Mapping[entry].SysPort = NO_PORT;
- host->Mapping[entry].RtaUniqueNum = rta_uniq;
- host->Mapping[entry].HostUniqueNum = host->UniqueNum;
- host->Mapping[entry].ID = entry + 1;
- host->Mapping[entry].ID2 = 0;
- if (entry2) {
- host->Mapping[entry2].Flags = (RTA_BOOTED | RTA_NEWBOOT | SLOT_TENTATIVE | RTA16_SECOND_SLOT);
- host->Mapping[entry2].SysPort = NO_PORT;
- host->Mapping[entry2].RtaUniqueNum = rta_uniq;
- host->Mapping[entry2].HostUniqueNum = host->UniqueNum;
- host->Mapping[entry2].Name[0] = '\0';
- host->Mapping[entry2].ID = entry2 + 1;
- host->Mapping[entry2].ID2 = entry + 1;
- host->Mapping[entry].ID2 = entry2 + 1;
- }
- /*
- ** Must set these up, so that utilities show
- ** topology of 16 port RTAs correctly
- */
- for (link = 0; link < LINKS_PER_UNIT; link++) {
- host->Mapping[entry].Topology[link].Unit = ROUTE_DISCONNECT;
- host->Mapping[entry].Topology[link].Link = NO_LINK;
- if (entry2) {
- host->Mapping[entry2].Topology[link].Unit = ROUTE_DISCONNECT;
- host->Mapping[entry2].Topology[link].Link = NO_LINK;
- }
- }
-}
diff --git a/drivers/staging/generic_serial/rio/riocmd.c b/drivers/staging/generic_serial/rio/riocmd.c
deleted file mode 100644
index 61efd538e85..00000000000
--- a/drivers/staging/generic_serial/rio/riocmd.c
+++ /dev/null
@@ -1,939 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** ported from the existing SCO driver source
-**
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : riocmd.c
-** SID : 1.2
-** Last Modified : 11/6/98 10:33:41
-** Retrieved : 11/6/98 10:33:49
-**
-** ident @(#)riocmd.c 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-
-#include <linux/termios.h>
-#include <linux/serial.h>
-
-#include <linux/generic_serial.h>
-
-#include "linux_compat.h"
-#include "rio_linux.h"
-#include "pkt.h"
-#include "daemon.h"
-#include "rio.h"
-#include "riospace.h"
-#include "cmdpkt.h"
-#include "map.h"
-#include "rup.h"
-#include "port.h"
-#include "riodrvr.h"
-#include "rioinfo.h"
-#include "func.h"
-#include "errors.h"
-#include "pci.h"
-
-#include "parmmap.h"
-#include "unixrup.h"
-#include "board.h"
-#include "host.h"
-#include "phb.h"
-#include "link.h"
-#include "cmdblk.h"
-#include "route.h"
-#include "cirrus.h"
-
-
-static struct IdentifyRta IdRta;
-static struct KillNeighbour KillUnit;
-
-int RIOFoadRta(struct Host *HostP, struct Map *MapP)
-{
- struct CmdBlk *CmdBlkP;
-
- rio_dprintk(RIO_DEBUG_CMD, "FOAD RTA\n");
-
- CmdBlkP = RIOGetCmdBlk();
-
- if (!CmdBlkP) {
- rio_dprintk(RIO_DEBUG_CMD, "FOAD RTA: GetCmdBlk failed\n");
- return -ENXIO;
- }
-
- CmdBlkP->Packet.dest_unit = MapP->ID;
- CmdBlkP->Packet.dest_port = BOOT_RUP;
- CmdBlkP->Packet.src_unit = 0;
- CmdBlkP->Packet.src_port = BOOT_RUP;
- CmdBlkP->Packet.len = 0x84;
- CmdBlkP->Packet.data[0] = IFOAD;
- CmdBlkP->Packet.data[1] = 0;
- CmdBlkP->Packet.data[2] = IFOAD_MAGIC & 0xFF;
- CmdBlkP->Packet.data[3] = (IFOAD_MAGIC >> 8) & 0xFF;
-
- if (RIOQueueCmdBlk(HostP, MapP->ID - 1, CmdBlkP) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_CMD, "FOAD RTA: Failed to queue foad command\n");
- return -EIO;
- }
- return 0;
-}
-
-int RIOZombieRta(struct Host *HostP, struct Map *MapP)
-{
- struct CmdBlk *CmdBlkP;
-
- rio_dprintk(RIO_DEBUG_CMD, "ZOMBIE RTA\n");
-
- CmdBlkP = RIOGetCmdBlk();
-
- if (!CmdBlkP) {
- rio_dprintk(RIO_DEBUG_CMD, "ZOMBIE RTA: GetCmdBlk failed\n");
- return -ENXIO;
- }
-
- CmdBlkP->Packet.dest_unit = MapP->ID;
- CmdBlkP->Packet.dest_port = BOOT_RUP;
- CmdBlkP->Packet.src_unit = 0;
- CmdBlkP->Packet.src_port = BOOT_RUP;
- CmdBlkP->Packet.len = 0x84;
- CmdBlkP->Packet.data[0] = ZOMBIE;
- CmdBlkP->Packet.data[1] = 0;
- CmdBlkP->Packet.data[2] = ZOMBIE_MAGIC & 0xFF;
- CmdBlkP->Packet.data[3] = (ZOMBIE_MAGIC >> 8) & 0xFF;
-
- if (RIOQueueCmdBlk(HostP, MapP->ID - 1, CmdBlkP) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_CMD, "ZOMBIE RTA: Failed to queue zombie command\n");
- return -EIO;
- }
- return 0;
-}
-
-int RIOCommandRta(struct rio_info *p, unsigned long RtaUnique, int (*func) (struct Host * HostP, struct Map * MapP))
-{
- unsigned int Host;
-
- rio_dprintk(RIO_DEBUG_CMD, "Command RTA 0x%lx func %p\n", RtaUnique, func);
-
- if (!RtaUnique)
- return (0);
-
- for (Host = 0; Host < p->RIONumHosts; Host++) {
- unsigned int Rta;
- struct Host *HostP = &p->RIOHosts[Host];
-
- for (Rta = 0; Rta < RTAS_PER_HOST; Rta++) {
- struct Map *MapP = &HostP->Mapping[Rta];
-
- if (MapP->RtaUniqueNum == RtaUnique) {
- uint Link;
-
- /*
- ** now, lets just check we have a route to it...
- ** IF the routing stuff is working, then one of the
- ** topology entries for this unit will have a legit
- ** route *somewhere*. We care not where - if its got
- ** any connections, we can get to it.
- */
- for (Link = 0; Link < LINKS_PER_UNIT; Link++) {
- if (MapP->Topology[Link].Unit <= (u8) MAX_RUP) {
- /*
- ** Its worth trying the operation...
- */
- return (*func) (HostP, MapP);
- }
- }
- }
- }
- }
- return -ENXIO;
-}
-
-
-int RIOIdentifyRta(struct rio_info *p, void __user * arg)
-{
- unsigned int Host;
-
- if (copy_from_user(&IdRta, arg, sizeof(IdRta))) {
- rio_dprintk(RIO_DEBUG_CMD, "RIO_IDENTIFY_RTA copy failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
-
- for (Host = 0; Host < p->RIONumHosts; Host++) {
- unsigned int Rta;
- struct Host *HostP = &p->RIOHosts[Host];
-
- for (Rta = 0; Rta < RTAS_PER_HOST; Rta++) {
- struct Map *MapP = &HostP->Mapping[Rta];
-
- if (MapP->RtaUniqueNum == IdRta.RtaUnique) {
- uint Link;
- /*
- ** now, lets just check we have a route to it...
- ** IF the routing stuff is working, then one of the
- ** topology entries for this unit will have a legit
- ** route *somewhere*. We care not where - if its got
- ** any connections, we can get to it.
- */
- for (Link = 0; Link < LINKS_PER_UNIT; Link++) {
- if (MapP->Topology[Link].Unit <= (u8) MAX_RUP) {
- /*
- ** Its worth trying the operation...
- */
- struct CmdBlk *CmdBlkP;
-
- rio_dprintk(RIO_DEBUG_CMD, "IDENTIFY RTA\n");
-
- CmdBlkP = RIOGetCmdBlk();
-
- if (!CmdBlkP) {
- rio_dprintk(RIO_DEBUG_CMD, "IDENTIFY RTA: GetCmdBlk failed\n");
- return -ENXIO;
- }
-
- CmdBlkP->Packet.dest_unit = MapP->ID;
- CmdBlkP->Packet.dest_port = BOOT_RUP;
- CmdBlkP->Packet.src_unit = 0;
- CmdBlkP->Packet.src_port = BOOT_RUP;
- CmdBlkP->Packet.len = 0x84;
- CmdBlkP->Packet.data[0] = IDENTIFY;
- CmdBlkP->Packet.data[1] = 0;
- CmdBlkP->Packet.data[2] = IdRta.ID;
-
- if (RIOQueueCmdBlk(HostP, MapP->ID - 1, CmdBlkP) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_CMD, "IDENTIFY RTA: Failed to queue command\n");
- return -EIO;
- }
- return 0;
- }
- }
- }
- }
- }
- return -ENOENT;
-}
-
-
-int RIOKillNeighbour(struct rio_info *p, void __user * arg)
-{
- uint Host;
- uint ID;
- struct Host *HostP;
- struct CmdBlk *CmdBlkP;
-
- rio_dprintk(RIO_DEBUG_CMD, "KILL HOST NEIGHBOUR\n");
-
- if (copy_from_user(&KillUnit, arg, sizeof(KillUnit))) {
- rio_dprintk(RIO_DEBUG_CMD, "RIO_KILL_NEIGHBOUR copy failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
-
- if (KillUnit.Link > 3)
- return -ENXIO;
-
- CmdBlkP = RIOGetCmdBlk();
-
- if (!CmdBlkP) {
- rio_dprintk(RIO_DEBUG_CMD, "UFOAD: GetCmdBlk failed\n");
- return -ENXIO;
- }
-
- CmdBlkP->Packet.dest_unit = 0;
- CmdBlkP->Packet.src_unit = 0;
- CmdBlkP->Packet.dest_port = BOOT_RUP;
- CmdBlkP->Packet.src_port = BOOT_RUP;
- CmdBlkP->Packet.len = 0x84;
- CmdBlkP->Packet.data[0] = UFOAD;
- CmdBlkP->Packet.data[1] = KillUnit.Link;
- CmdBlkP->Packet.data[2] = UFOAD_MAGIC & 0xFF;
- CmdBlkP->Packet.data[3] = (UFOAD_MAGIC >> 8) & 0xFF;
-
- for (Host = 0; Host < p->RIONumHosts; Host++) {
- ID = 0;
- HostP = &p->RIOHosts[Host];
-
- if (HostP->UniqueNum == KillUnit.UniqueNum) {
- if (RIOQueueCmdBlk(HostP, RTAS_PER_HOST + KillUnit.Link, CmdBlkP) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_CMD, "UFOAD: Failed queue command\n");
- return -EIO;
- }
- return 0;
- }
-
- for (ID = 0; ID < RTAS_PER_HOST; ID++) {
- if (HostP->Mapping[ID].RtaUniqueNum == KillUnit.UniqueNum) {
- CmdBlkP->Packet.dest_unit = ID + 1;
- if (RIOQueueCmdBlk(HostP, ID, CmdBlkP) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_CMD, "UFOAD: Failed queue command\n");
- return -EIO;
- }
- return 0;
- }
- }
- }
- RIOFreeCmdBlk(CmdBlkP);
- return -ENXIO;
-}
-
-int RIOSuspendBootRta(struct Host *HostP, int ID, int Link)
-{
- struct CmdBlk *CmdBlkP;
-
- rio_dprintk(RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA ID %d, link %c\n", ID, 'A' + Link);
-
- CmdBlkP = RIOGetCmdBlk();
-
- if (!CmdBlkP) {
- rio_dprintk(RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA: GetCmdBlk failed\n");
- return -ENXIO;
- }
-
- CmdBlkP->Packet.dest_unit = ID;
- CmdBlkP->Packet.dest_port = BOOT_RUP;
- CmdBlkP->Packet.src_unit = 0;
- CmdBlkP->Packet.src_port = BOOT_RUP;
- CmdBlkP->Packet.len = 0x84;
- CmdBlkP->Packet.data[0] = IWAIT;
- CmdBlkP->Packet.data[1] = Link;
- CmdBlkP->Packet.data[2] = IWAIT_MAGIC & 0xFF;
- CmdBlkP->Packet.data[3] = (IWAIT_MAGIC >> 8) & 0xFF;
-
- if (RIOQueueCmdBlk(HostP, ID - 1, CmdBlkP) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_CMD, "SUSPEND BOOT ON RTA: Failed to queue iwait command\n");
- return -EIO;
- }
- return 0;
-}
-
-int RIOFoadWakeup(struct rio_info *p)
-{
- int port;
- struct Port *PortP;
- unsigned long flags;
-
- for (port = 0; port < RIO_PORTS; port++) {
- PortP = p->RIOPortp[port];
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->Config = 0;
- PortP->State = 0;
- PortP->InUse = NOT_INUSE;
- PortP->PortState = 0;
- PortP->FlushCmdBodge = 0;
- PortP->ModemLines = 0;
- PortP->ModemState = 0;
- PortP->CookMode = 0;
- PortP->ParamSem = 0;
- PortP->Mapped = 0;
- PortP->WflushFlag = 0;
- PortP->MagicFlags = 0;
- PortP->RxDataStart = 0;
- PortP->TxBufferIn = 0;
- PortP->TxBufferOut = 0;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- }
- return (0);
-}
-
-/*
-** Incoming command on the COMMAND_RUP to be processed.
-*/
-static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struct PKT __iomem *PacketP)
-{
- struct PktCmd __iomem *PktCmdP = (struct PktCmd __iomem *)PacketP->data;
- struct Port *PortP;
- struct UnixRup *UnixRupP;
- unsigned short SysPort;
- unsigned short ReportedModemStatus;
- unsigned short rup;
- unsigned short subCommand;
- unsigned long flags;
-
- func_enter();
-
- /*
- ** 16 port RTA note:
- ** Command rup packets coming from the RTA will have pkt->data[1] (which
- ** translates to PktCmdP->PhbNum) set to the host port number for the
- ** particular unit. To access the correct BaseSysPort for a 16 port RTA,
- ** we can use PhbNum to get the rup number for the appropriate 8 port
- ** block (for the first block, this should be equal to 'Rup').
- */
- rup = readb(&PktCmdP->PhbNum) / (unsigned short) PORTS_PER_RTA;
- UnixRupP = &HostP->UnixRups[rup];
- SysPort = UnixRupP->BaseSysPort + (readb(&PktCmdP->PhbNum) % (unsigned short) PORTS_PER_RTA);
- rio_dprintk(RIO_DEBUG_CMD, "Command on rup %d, port %d\n", rup, SysPort);
-
- if (UnixRupP->BaseSysPort == NO_PORT) {
- rio_dprintk(RIO_DEBUG_CMD, "OBSCURE ERROR!\n");
- rio_dprintk(RIO_DEBUG_CMD, "Diagnostics follow. Please WRITE THESE DOWN and report them to Specialix Technical Support\n");
- rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: Host number %Zd, name ``%s''\n", HostP - p->RIOHosts, HostP->Name);
- rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: Rup number 0x%x\n", rup);
-
- if (Rup < (unsigned short) MAX_RUP) {
- rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: This is the RUP for RTA ``%s''\n", HostP->Mapping[Rup].Name);
- } else
- rio_dprintk(RIO_DEBUG_CMD, "CONTROL information: This is the RUP for link ``%c'' of host ``%s''\n", ('A' + Rup - MAX_RUP), HostP->Name);
-
- rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Destination 0x%x:0x%x\n", readb(&PacketP->dest_unit), readb(&PacketP->dest_port));
- rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Source 0x%x:0x%x\n", readb(&PacketP->src_unit), readb(&PacketP->src_port));
- rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Length 0x%x (%d)\n", readb(&PacketP->len), readb(&PacketP->len));
- rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Control 0x%x (%d)\n", readb(&PacketP->control), readb(&PacketP->control));
- rio_dprintk(RIO_DEBUG_CMD, "PACKET information: Check 0x%x (%d)\n", readw(&PacketP->csum), readw(&PacketP->csum));
- rio_dprintk(RIO_DEBUG_CMD, "COMMAND information: Host Port Number 0x%x, " "Command Code 0x%x\n", readb(&PktCmdP->PhbNum), readb(&PktCmdP->Command));
- return 1;
- }
- PortP = p->RIOPortp[SysPort];
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- switch (readb(&PktCmdP->Command)) {
- case RIOC_BREAK_RECEIVED:
- rio_dprintk(RIO_DEBUG_CMD, "Received a break!\n");
- /* If the current line disc. is not multi-threading and
- the current processor is not the default, reset rup_intr
- and return 0 to ensure that the command packet is
- not freed. */
- /* Call tmgr HANGUP HERE */
- /* Fix this later when every thing works !!!! RAMRAJ */
- gs_got_break(&PortP->gs);
- break;
-
- case RIOC_COMPLETE:
- rio_dprintk(RIO_DEBUG_CMD, "Command complete on phb %d host %Zd\n", readb(&PktCmdP->PhbNum), HostP - p->RIOHosts);
- subCommand = 1;
- switch (readb(&PktCmdP->SubCommand)) {
- case RIOC_MEMDUMP:
- rio_dprintk(RIO_DEBUG_CMD, "Memory dump cmd (0x%x) from addr 0x%x\n", readb(&PktCmdP->SubCommand), readw(&PktCmdP->SubAddr));
- break;
- case RIOC_READ_REGISTER:
- rio_dprintk(RIO_DEBUG_CMD, "Read register (0x%x)\n", readw(&PktCmdP->SubAddr));
- p->CdRegister = (readb(&PktCmdP->ModemStatus) & RIOC_MSVR1_HOST);
- break;
- default:
- subCommand = 0;
- break;
- }
- if (subCommand)
- break;
- rio_dprintk(RIO_DEBUG_CMD, "New status is 0x%x was 0x%x\n", readb(&PktCmdP->PortStatus), PortP->PortState);
- if (PortP->PortState != readb(&PktCmdP->PortStatus)) {
- rio_dprintk(RIO_DEBUG_CMD, "Mark status & wakeup\n");
- PortP->PortState = readb(&PktCmdP->PortStatus);
- /* What should we do here ...
- wakeup( &PortP->PortState );
- */
- } else
- rio_dprintk(RIO_DEBUG_CMD, "No change\n");
-
- /* FALLTHROUGH */
- case RIOC_MODEM_STATUS:
- /*
- ** Knock out the tbusy and tstop bits, as these are not relevant
- ** to the check for modem status change (they're just there because
- ** it's a convenient place to put them!).
- */
- ReportedModemStatus = readb(&PktCmdP->ModemStatus);
- if ((PortP->ModemState & RIOC_MSVR1_HOST) ==
- (ReportedModemStatus & RIOC_MSVR1_HOST)) {
- rio_dprintk(RIO_DEBUG_CMD, "Modem status unchanged 0x%x\n", PortP->ModemState);
- /*
- ** Update ModemState just in case tbusy or tstop states have
- ** changed.
- */
- PortP->ModemState = ReportedModemStatus;
- } else {
- rio_dprintk(RIO_DEBUG_CMD, "Modem status change from 0x%x to 0x%x\n", PortP->ModemState, ReportedModemStatus);
- PortP->ModemState = ReportedModemStatus;
-#ifdef MODEM_SUPPORT
- if (PortP->Mapped) {
- /***********************************************************\
- *************************************************************
- *** ***
- *** M O D E M S T A T E C H A N G E ***
- *** ***
- *************************************************************
- \***********************************************************/
- /*
- ** If the device is a modem, then check the modem
- ** carrier.
- */
- if (PortP->gs.port.tty == NULL)
- break;
- if (PortP->gs.port.tty->termios == NULL)
- break;
-
- if (!(PortP->gs.port.tty->termios->c_cflag & CLOCAL) && ((PortP->State & (RIO_MOPEN | RIO_WOPEN)))) {
-
- rio_dprintk(RIO_DEBUG_CMD, "Is there a Carrier?\n");
- /*
- ** Is there a carrier?
- */
- if (PortP->ModemState & RIOC_MSVR1_CD) {
- /*
- ** Has carrier just appeared?
- */
- if (!(PortP->State & RIO_CARR_ON)) {
- rio_dprintk(RIO_DEBUG_CMD, "Carrier just came up.\n");
- PortP->State |= RIO_CARR_ON;
- /*
- ** wakeup anyone in WOPEN
- */
- if (PortP->State & (PORT_ISOPEN | RIO_WOPEN))
- wake_up_interruptible(&PortP->gs.port.open_wait);
- }
- } else {
- /*
- ** Has carrier just dropped?
- */
- if (PortP->State & RIO_CARR_ON) {
- if (PortP->State & (PORT_ISOPEN | RIO_WOPEN | RIO_MOPEN))
- tty_hangup(PortP->gs.port.tty);
- PortP->State &= ~RIO_CARR_ON;
- rio_dprintk(RIO_DEBUG_CMD, "Carrirer just went down\n");
- }
- }
- }
- }
-#endif
- }
- break;
-
- default:
- rio_dprintk(RIO_DEBUG_CMD, "Unknown command %d on CMD_RUP of host %Zd\n", readb(&PktCmdP->Command), HostP - p->RIOHosts);
- break;
- }
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
-
- func_exit();
-
- return 1;
-}
-
-/*
-** The command mechanism:
-** Each rup has a chain of commands associated with it.
-** This chain is maintained by routines in this file.
-** Periodically we are called and we run a quick check of all the
-** active chains to determine if there is a command to be executed,
-** and if the rup is ready to accept it.
-**
-*/
-
-/*
-** Allocate an empty command block.
-*/
-struct CmdBlk *RIOGetCmdBlk(void)
-{
- struct CmdBlk *CmdBlkP;
-
- CmdBlkP = kzalloc(sizeof(struct CmdBlk), GFP_ATOMIC);
- return CmdBlkP;
-}
-
-/*
-** Return a block to the head of the free list.
-*/
-void RIOFreeCmdBlk(struct CmdBlk *CmdBlkP)
-{
- kfree(CmdBlkP);
-}
-
-/*
-** attach a command block to the list of commands to be performed for
-** a given rup.
-*/
-int RIOQueueCmdBlk(struct Host *HostP, uint Rup, struct CmdBlk *CmdBlkP)
-{
- struct CmdBlk **Base;
- struct UnixRup *UnixRupP;
- unsigned long flags;
-
- if (Rup >= (unsigned short) (MAX_RUP + LINKS_PER_UNIT)) {
- rio_dprintk(RIO_DEBUG_CMD, "Illegal rup number %d in RIOQueueCmdBlk\n", Rup);
- RIOFreeCmdBlk(CmdBlkP);
- return RIO_FAIL;
- }
-
- UnixRupP = &HostP->UnixRups[Rup];
-
- rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
-
- /*
- ** If the RUP is currently inactive, then put the request
- ** straight on the RUP....
- */
- if ((UnixRupP->CmdsWaitingP == NULL) && (UnixRupP->CmdPendingP == NULL) && (readw(&UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE) && (CmdBlkP->PreFuncP ? (*CmdBlkP->PreFuncP) (CmdBlkP->PreArg, CmdBlkP)
- : 1)) {
- rio_dprintk(RIO_DEBUG_CMD, "RUP inactive-placing command straight on. Cmd byte is 0x%x\n", CmdBlkP->Packet.data[0]);
-
- /*
- ** Whammy! blat that pack!
- */
- HostP->Copy(&CmdBlkP->Packet, RIO_PTR(HostP->Caddr, readw(&UnixRupP->RupP->txpkt)), sizeof(struct PKT));
-
- /*
- ** place command packet on the pending position.
- */
- UnixRupP->CmdPendingP = CmdBlkP;
-
- /*
- ** set the command register
- */
- writew(TX_PACKET_READY, &UnixRupP->RupP->txcontrol);
-
- rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
-
- return 0;
- }
- rio_dprintk(RIO_DEBUG_CMD, "RUP active - en-queing\n");
-
- if (UnixRupP->CmdsWaitingP != NULL)
- rio_dprintk(RIO_DEBUG_CMD, "Rup active - command waiting\n");
- if (UnixRupP->CmdPendingP != NULL)
- rio_dprintk(RIO_DEBUG_CMD, "Rup active - command pending\n");
- if (readw(&UnixRupP->RupP->txcontrol) != TX_RUP_INACTIVE)
- rio_dprintk(RIO_DEBUG_CMD, "Rup active - command rup not ready\n");
-
- Base = &UnixRupP->CmdsWaitingP;
-
- rio_dprintk(RIO_DEBUG_CMD, "First try to queue cmdblk %p at %p\n", CmdBlkP, Base);
-
- while (*Base) {
- rio_dprintk(RIO_DEBUG_CMD, "Command cmdblk %p here\n", *Base);
- Base = &((*Base)->NextP);
- rio_dprintk(RIO_DEBUG_CMD, "Now try to queue cmd cmdblk %p at %p\n", CmdBlkP, Base);
- }
-
- rio_dprintk(RIO_DEBUG_CMD, "Will queue cmdblk %p at %p\n", CmdBlkP, Base);
-
- *Base = CmdBlkP;
-
- CmdBlkP->NextP = NULL;
-
- rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
-
- return 0;
-}
-
-/*
-** Here we go - if there is an empty rup, fill it!
-** must be called at splrio() or higher.
-*/
-void RIOPollHostCommands(struct rio_info *p, struct Host *HostP)
-{
- struct CmdBlk *CmdBlkP;
- struct UnixRup *UnixRupP;
- struct PKT __iomem *PacketP;
- unsigned short Rup;
- unsigned long flags;
-
-
- Rup = MAX_RUP + LINKS_PER_UNIT;
-
- do { /* do this loop for each RUP */
- /*
- ** locate the rup we are processing & lock it
- */
- UnixRupP = &HostP->UnixRups[--Rup];
-
- spin_lock_irqsave(&UnixRupP->RupLock, flags);
-
- /*
- ** First check for incoming commands:
- */
- if (readw(&UnixRupP->RupP->rxcontrol) != RX_RUP_INACTIVE) {
- int FreeMe;
-
- PacketP = (struct PKT __iomem *) RIO_PTR(HostP->Caddr, readw(&UnixRupP->RupP->rxpkt));
-
- switch (readb(&PacketP->dest_port)) {
- case BOOT_RUP:
- rio_dprintk(RIO_DEBUG_CMD, "Incoming Boot %s packet '%x'\n", readb(&PacketP->len) & 0x80 ? "Command" : "Data", readb(&PacketP->data[0]));
- rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
- FreeMe = RIOBootRup(p, Rup, HostP, PacketP);
- rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
- break;
-
- case COMMAND_RUP:
- /*
- ** Free the RUP lock as loss of carrier causes a
- ** ttyflush which will (eventually) call another
- ** routine that uses the RUP lock.
- */
- rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
- FreeMe = RIOCommandRup(p, Rup, HostP, PacketP);
- if (readb(&PacketP->data[5]) == RIOC_MEMDUMP) {
- rio_dprintk(RIO_DEBUG_CMD, "Memdump from 0x%x complete\n", readw(&(PacketP->data[6])));
- rio_memcpy_fromio(p->RIOMemDump, &(PacketP->data[8]), 32);
- }
- rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
- break;
-
- case ROUTE_RUP:
- rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
- FreeMe = RIORouteRup(p, Rup, HostP, PacketP);
- rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
- break;
-
- default:
- rio_dprintk(RIO_DEBUG_CMD, "Unknown RUP %d\n", readb(&PacketP->dest_port));
- FreeMe = 1;
- break;
- }
-
- if (FreeMe) {
- rio_dprintk(RIO_DEBUG_CMD, "Free processed incoming command packet\n");
- put_free_end(HostP, PacketP);
-
- writew(RX_RUP_INACTIVE, &UnixRupP->RupP->rxcontrol);
-
- if (readw(&UnixRupP->RupP->handshake) == PHB_HANDSHAKE_SET) {
- rio_dprintk(RIO_DEBUG_CMD, "Handshake rup %d\n", Rup);
- writew(PHB_HANDSHAKE_SET | PHB_HANDSHAKE_RESET, &UnixRupP->RupP->handshake);
- }
- }
- }
-
- /*
- ** IF a command was running on the port,
- ** and it has completed, then tidy it up.
- */
- if ((CmdBlkP = UnixRupP->CmdPendingP) && /* ASSIGN! */
- (readw(&UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE)) {
- /*
- ** we are idle.
- ** there is a command in pending.
- ** Therefore, this command has finished.
- ** So, wakeup whoever is waiting for it (and tell them
- ** what happened).
- */
- if (CmdBlkP->Packet.dest_port == BOOT_RUP)
- rio_dprintk(RIO_DEBUG_CMD, "Free Boot %s Command Block '%x'\n", CmdBlkP->Packet.len & 0x80 ? "Command" : "Data", CmdBlkP->Packet.data[0]);
-
- rio_dprintk(RIO_DEBUG_CMD, "Command %p completed\n", CmdBlkP);
-
- /*
- ** Clear the Rup lock to prevent mutual exclusion.
- */
- if (CmdBlkP->PostFuncP) {
- rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
- (*CmdBlkP->PostFuncP) (CmdBlkP->PostArg, CmdBlkP);
- rio_spin_lock_irqsave(&UnixRupP->RupLock, flags);
- }
-
- /*
- ** ....clear the pending flag....
- */
- UnixRupP->CmdPendingP = NULL;
-
- /*
- ** ....and return the command block to the freelist.
- */
- RIOFreeCmdBlk(CmdBlkP);
- }
-
- /*
- ** If there is a command for this rup, and the rup
- ** is idle, then process the command
- */
- if ((CmdBlkP = UnixRupP->CmdsWaitingP) && /* ASSIGN! */
- (UnixRupP->CmdPendingP == NULL) && (readw(&UnixRupP->RupP->txcontrol) == TX_RUP_INACTIVE)) {
- /*
- ** if the pre-function is non-zero, call it.
- ** If it returns RIO_FAIL then don't
- ** send this command yet!
- */
- if (!(CmdBlkP->PreFuncP ? (*CmdBlkP->PreFuncP) (CmdBlkP->PreArg, CmdBlkP) : 1)) {
- rio_dprintk(RIO_DEBUG_CMD, "Not ready to start command %p\n", CmdBlkP);
- } else {
- rio_dprintk(RIO_DEBUG_CMD, "Start new command %p Cmd byte is 0x%x\n", CmdBlkP, CmdBlkP->Packet.data[0]);
- /*
- ** Whammy! blat that pack!
- */
- HostP->Copy(&CmdBlkP->Packet, RIO_PTR(HostP->Caddr, readw(&UnixRupP->RupP->txpkt)), sizeof(struct PKT));
-
- /*
- ** remove the command from the rup command queue...
- */
- UnixRupP->CmdsWaitingP = CmdBlkP->NextP;
-
- /*
- ** ...and place it on the pending position.
- */
- UnixRupP->CmdPendingP = CmdBlkP;
-
- /*
- ** set the command register
- */
- writew(TX_PACKET_READY, &UnixRupP->RupP->txcontrol);
-
- /*
- ** the command block will be freed
- ** when the command has been processed.
- */
- }
- }
- spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
- } while (Rup);
-}
-
-int RIOWFlushMark(unsigned long iPortP, struct CmdBlk *CmdBlkP)
-{
- struct Port *PortP = (struct Port *) iPortP;
- unsigned long flags;
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->WflushFlag++;
- PortP->MagicFlags |= MAGIC_FLUSH;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return RIOUnUse(iPortP, CmdBlkP);
-}
-
-int RIORFlushEnable(unsigned long iPortP, struct CmdBlk *CmdBlkP)
-{
- struct Port *PortP = (struct Port *) iPortP;
- struct PKT __iomem *PacketP;
- unsigned long flags;
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- while (can_remove_receive(&PacketP, PortP)) {
- remove_receive(PortP);
- put_free_end(PortP->HostP, PacketP);
- }
-
- if (readw(&PortP->PhbP->handshake) == PHB_HANDSHAKE_SET) {
- /*
- ** MAGIC! (Basically, handshake the RX buffer, so that
- ** the RTAs upstream can be re-enabled.)
- */
- rio_dprintk(RIO_DEBUG_CMD, "Util: Set RX handshake bit\n");
- writew(PHB_HANDSHAKE_SET | PHB_HANDSHAKE_RESET, &PortP->PhbP->handshake);
- }
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return RIOUnUse(iPortP, CmdBlkP);
-}
-
-int RIOUnUse(unsigned long iPortP, struct CmdBlk *CmdBlkP)
-{
- struct Port *PortP = (struct Port *) iPortP;
- unsigned long flags;
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- rio_dprintk(RIO_DEBUG_CMD, "Decrement in use count for port\n");
-
- if (PortP->InUse) {
- if (--PortP->InUse != NOT_INUSE) {
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return 0;
- }
- }
- /*
- ** While PortP->InUse is set (i.e. a preemptive command has been sent to
- ** the RTA and is awaiting completion), any transmit data is prevented from
- ** being transferred from the write queue into the transmit packets
- ** (add_transmit) and no furthur transmit interrupt will be sent for that
- ** data. The next interrupt will occur up to 500ms later (RIOIntr is called
- ** twice a second as a safety measure). This was the case when kermit was
- ** used to send data into a RIO port. After each packet was sent, TCFLSH
- ** was called to flush the read queue preemptively. PortP->InUse was
- ** incremented, thereby blocking the 6 byte acknowledgement packet
- ** transmitted back. This acknowledgment hung around for 500ms before
- ** being sent, thus reducing input performance substantially!.
- ** When PortP->InUse becomes NOT_INUSE, we must ensure that any data
- ** hanging around in the transmit buffer is sent immediately.
- */
- writew(1, &PortP->HostP->ParmMapP->tx_intr);
- /* What to do here ..
- wakeup( (caddr_t)&(PortP->InUse) );
- */
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return 0;
-}
-
-/*
-**
-** How to use this file:
-**
-** To send a command down a rup, you need to allocate a command block, fill
-** in the packet information, fill in the command number, fill in the pre-
-** and post- functions and arguments, and then add the command block to the
-** queue of command blocks for the port in question. When the port is idle,
-** then the pre-function will be called. If this returns RIO_FAIL then the
-** command will be re-queued and tried again at a later date (probably in one
-** clock tick). If the pre-function returns NOT RIO_FAIL, then the command
-** packet will be queued on the RUP, and the txcontrol field set to the
-** command number. When the txcontrol field has changed from being the
-** command number, then the post-function will be called, with the argument
-** specified earlier, a pointer to the command block, and the value of
-** txcontrol.
-**
-** To allocate a command block, call RIOGetCmdBlk(). This returns a pointer
-** to the command block structure allocated, or NULL if there aren't any.
-** The block will have been zeroed for you.
-**
-** The structure has the following fields:
-**
-** struct CmdBlk
-** {
-** struct CmdBlk *NextP; ** Pointer to next command block **
-** struct PKT Packet; ** A packet, to copy to the rup **
-** int (*PreFuncP)(); ** The func to call to check if OK **
-** int PreArg; ** The arg for the func **
-** int (*PostFuncP)(); ** The func to call when completed **
-** int PostArg; ** The arg for the func **
-** };
-**
-** You need to fill in ALL fields EXCEPT NextP, which is used to link the
-** blocks together either on the free list or on the Rup list.
-**
-** Packet is an actual packet structure to be filled in with the packet
-** information associated with the command. You need to fill in everything,
-** as the command processor doesn't process the command packet in any way.
-**
-** The PreFuncP is called before the packet is enqueued on the host rup.
-** PreFuncP is called as (*PreFuncP)(PreArg, CmdBlkP);. PreFuncP must
-** return !RIO_FAIL to have the packet queued on the rup, and RIO_FAIL
-** if the packet is NOT to be queued.
-**
-** The PostFuncP is called when the command has completed. It is called
-** as (*PostFuncP)(PostArg, CmdBlkP, txcontrol);. PostFuncP is not expected
-** to return a value. PostFuncP does NOT need to free the command block,
-** as this happens automatically after PostFuncP returns.
-**
-** Once the command block has been filled in, it is attached to the correct
-** queue by calling RIOQueueCmdBlk( HostP, Rup, CmdBlkP ) where HostP is
-** a pointer to the struct Host, Rup is the NUMBER of the rup (NOT a pointer
-** to it!), and CmdBlkP is the pointer to the command block allocated using
-** RIOGetCmdBlk().
-**
-*/
diff --git a/drivers/staging/generic_serial/rio/rioctrl.c b/drivers/staging/generic_serial/rio/rioctrl.c
deleted file mode 100644
index 780506326a7..00000000000
--- a/drivers/staging/generic_serial/rio/rioctrl.c
+++ /dev/null
@@ -1,1504 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : rioctrl.c
-** SID : 1.3
-** Last Modified : 11/6/98 10:33:42
-** Retrieved : 11/6/98 10:33:49
-**
-** ident @(#)rioctrl.c 1.3
-**
-** -----------------------------------------------------------------------------
-*/
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-
-#include <linux/termios.h>
-#include <linux/serial.h>
-
-#include <linux/generic_serial.h>
-
-
-#include "linux_compat.h"
-#include "rio_linux.h"
-#include "pkt.h"
-#include "daemon.h"
-#include "rio.h"
-#include "riospace.h"
-#include "cmdpkt.h"
-#include "map.h"
-#include "rup.h"
-#include "port.h"
-#include "riodrvr.h"
-#include "rioinfo.h"
-#include "func.h"
-#include "errors.h"
-#include "pci.h"
-
-#include "parmmap.h"
-#include "unixrup.h"
-#include "board.h"
-#include "host.h"
-#include "phb.h"
-#include "link.h"
-#include "cmdblk.h"
-#include "route.h"
-#include "cirrus.h"
-#include "rioioctl.h"
-
-
-static struct LpbReq LpbReq;
-static struct RupReq RupReq;
-static struct PortReq PortReq;
-static struct HostReq HostReq; /* oh really? global? and no locking? */
-static struct HostDpRam HostDpRam;
-static struct DebugCtrl DebugCtrl;
-static struct Map MapEnt;
-static struct PortSetup PortSetup;
-static struct DownLoad DownLoad;
-static struct SendPack SendPack;
-/* static struct StreamInfo StreamInfo; */
-/* static char modemtable[RIO_PORTS]; */
-static struct SpecialRupCmd SpecialRupCmd;
-static struct PortParams PortParams;
-static struct portStats portStats;
-
-static struct SubCmdStruct {
- ushort Host;
- ushort Rup;
- ushort Port;
- ushort Addr;
-} SubCmd;
-
-struct PortTty {
- uint port;
- struct ttystatics Tty;
-};
-
-static struct PortTty PortTty;
-typedef struct ttystatics TERMIO;
-
-/*
-** This table is used when the config.rio downloads bin code to the
-** driver. We index the table using the product code, 0-F, and call
-** the function pointed to by the entry, passing the information
-** about the boot.
-** The RIOBootCodeUNKNOWN entry is there to politely tell the calling
-** process to bog off.
-*/
-static int
- (*RIOBootTable[MAX_PRODUCT]) (struct rio_info *, struct DownLoad *) = {
- /* 0 */ RIOBootCodeHOST,
- /* Host Card */
- /* 1 */ RIOBootCodeRTA,
- /* RTA */
-};
-
-#define drv_makedev(maj, min) ((((uint) maj & 0xff) << 8) | ((uint) min & 0xff))
-
-static int copy_from_io(void __user *to, void __iomem *from, size_t size)
-{
- void *buf = kmalloc(size, GFP_KERNEL);
- int res = -ENOMEM;
- if (buf) {
- rio_memcpy_fromio(buf, from, size);
- res = copy_to_user(to, buf, size);
- kfree(buf);
- }
- return res;
-}
-
-int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su)
-{
- uint Host; /* leave me unsigned! */
- uint port; /* and me! */
- struct Host *HostP;
- ushort loop;
- int Entry;
- struct Port *PortP;
- struct PKT __iomem *PacketP;
- int retval = 0;
- unsigned long flags;
- void __user *argp = (void __user *)arg;
-
- func_enter();
-
- /* Confuse the compiler to think that we've initialized these */
- Host = 0;
- PortP = NULL;
-
- rio_dprintk(RIO_DEBUG_CTRL, "control ioctl cmd: 0x%x arg: %p\n", cmd, argp);
-
- switch (cmd) {
- /*
- ** RIO_SET_TIMER
- **
- ** Change the value of the host card interrupt timer.
- ** If the host card number is -1 then all host cards are changed
- ** otherwise just the specified host card will be changed.
- */
- case RIO_SET_TIMER:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET_TIMER to %ldms\n", arg);
- {
- int host, value;
- host = (arg >> 16) & 0x0000FFFF;
- value = arg & 0x0000ffff;
- if (host == -1) {
- for (host = 0; host < p->RIONumHosts; host++) {
- if (p->RIOHosts[host].Flags == RC_RUNNING) {
- writew(value, &p->RIOHosts[host].ParmMapP->timer);
- }
- }
- } else if (host >= p->RIONumHosts) {
- return -EINVAL;
- } else {
- if (p->RIOHosts[host].Flags == RC_RUNNING) {
- writew(value, &p->RIOHosts[host].ParmMapP->timer);
- }
- }
- }
- return 0;
-
- case RIO_FOAD_RTA:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_FOAD_RTA\n");
- return RIOCommandRta(p, arg, RIOFoadRta);
-
- case RIO_ZOMBIE_RTA:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_ZOMBIE_RTA\n");
- return RIOCommandRta(p, arg, RIOZombieRta);
-
- case RIO_IDENTIFY_RTA:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_IDENTIFY_RTA\n");
- return RIOIdentifyRta(p, argp);
-
- case RIO_KILL_NEIGHBOUR:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_KILL_NEIGHBOUR\n");
- return RIOKillNeighbour(p, argp);
-
- case SPECIAL_RUP_CMD:
- {
- struct CmdBlk *CmdBlkP;
-
- rio_dprintk(RIO_DEBUG_CTRL, "SPECIAL_RUP_CMD\n");
- if (copy_from_user(&SpecialRupCmd, argp, sizeof(SpecialRupCmd))) {
- rio_dprintk(RIO_DEBUG_CTRL, "SPECIAL_RUP_CMD copy failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- CmdBlkP = RIOGetCmdBlk();
- if (!CmdBlkP) {
- rio_dprintk(RIO_DEBUG_CTRL, "SPECIAL_RUP_CMD GetCmdBlk failed\n");
- return -ENXIO;
- }
- CmdBlkP->Packet = SpecialRupCmd.Packet;
- if (SpecialRupCmd.Host >= p->RIONumHosts)
- SpecialRupCmd.Host = 0;
- rio_dprintk(RIO_DEBUG_CTRL, "Queue special rup command for host %d rup %d\n", SpecialRupCmd.Host, SpecialRupCmd.RupNum);
- if (RIOQueueCmdBlk(&p->RIOHosts[SpecialRupCmd.Host], SpecialRupCmd.RupNum, CmdBlkP) == RIO_FAIL) {
- printk(KERN_WARNING "rio: FAILED TO QUEUE SPECIAL RUP COMMAND\n");
- }
- return 0;
- }
-
- case RIO_DEBUG_MEM:
- return -EPERM;
-
- case RIO_ALL_MODEM:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_ALL_MODEM\n");
- p->RIOError.Error = IOCTL_COMMAND_UNKNOWN;
- return -EINVAL;
-
- case RIO_GET_TABLE:
- /*
- ** Read the routing table from the device driver to user space
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_TABLE\n");
-
- if ((retval = RIOApel(p)) != 0)
- return retval;
-
- if (copy_to_user(argp, p->RIOConnectTable, TOTAL_MAP_ENTRIES * sizeof(struct Map))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_TABLE copy failed\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
-
- {
- int entry;
- rio_dprintk(RIO_DEBUG_CTRL, "*****\nMAP ENTRIES\n");
- for (entry = 0; entry < TOTAL_MAP_ENTRIES; entry++) {
- if ((p->RIOConnectTable[entry].ID == 0) && (p->RIOConnectTable[entry].HostUniqueNum == 0) && (p->RIOConnectTable[entry].RtaUniqueNum == 0))
- continue;
-
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.HostUniqueNum = 0x%x\n", entry, p->RIOConnectTable[entry].HostUniqueNum);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.RtaUniqueNum = 0x%x\n", entry, p->RIOConnectTable[entry].RtaUniqueNum);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.ID = 0x%x\n", entry, p->RIOConnectTable[entry].ID);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.ID2 = 0x%x\n", entry, p->RIOConnectTable[entry].ID2);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Flags = 0x%x\n", entry, (int) p->RIOConnectTable[entry].Flags);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.SysPort = 0x%x\n", entry, (int) p->RIOConnectTable[entry].SysPort);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[0].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[0].Unit);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[0].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[0].Link);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[1].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[1].Unit);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[1].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[1].Link);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[2].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[2].Unit);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[2].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[2].Link);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[3].Unit = %x\n", entry, p->RIOConnectTable[entry].Topology[3].Unit);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Top[4].Link = %x\n", entry, p->RIOConnectTable[entry].Topology[3].Link);
- rio_dprintk(RIO_DEBUG_CTRL, "Map entry %d.Name = %s\n", entry, p->RIOConnectTable[entry].Name);
- }
- rio_dprintk(RIO_DEBUG_CTRL, "*****\nEND MAP ENTRIES\n");
- }
- p->RIOQuickCheck = NOT_CHANGED; /* a table has been gotten */
- return 0;
-
- case RIO_PUT_TABLE:
- /*
- ** Write the routing table to the device driver from user space
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_TABLE\n");
-
- if (!su) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_TABLE !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copy_from_user(&p->RIOConnectTable[0], argp, TOTAL_MAP_ENTRIES * sizeof(struct Map))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_TABLE copy failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
-/*
-***********************************
- {
- int entry;
- rio_dprint(RIO_DEBUG_CTRL, ("*****\nMAP ENTRIES\n") );
- for ( entry=0; entry<TOTAL_MAP_ENTRIES; entry++ )
- {
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.HostUniqueNum = 0x%x\n", entry, p->RIOConnectTable[entry].HostUniqueNum ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.RtaUniqueNum = 0x%x\n", entry, p->RIOConnectTable[entry].RtaUniqueNum ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.ID = 0x%x\n", entry, p->RIOConnectTable[entry].ID ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.ID2 = 0x%x\n", entry, p->RIOConnectTable[entry].ID2 ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.Flags = 0x%x\n", entry, p->RIOConnectTable[entry].Flags ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.SysPort = 0x%x\n", entry, p->RIOConnectTable[entry].SysPort ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.Top[0].Unit = %b\n", entry, p->RIOConnectTable[entry].Topology[0].Unit ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.Top[0].Link = %b\n", entry, p->RIOConnectTable[entry].Topology[0].Link ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.Top[1].Unit = %b\n", entry, p->RIOConnectTable[entry].Topology[1].Unit ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.Top[1].Link = %b\n", entry, p->RIOConnectTable[entry].Topology[1].Link ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.Top[2].Unit = %b\n", entry, p->RIOConnectTable[entry].Topology[2].Unit ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.Top[2].Link = %b\n", entry, p->RIOConnectTable[entry].Topology[2].Link ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.Top[3].Unit = %b\n", entry, p->RIOConnectTable[entry].Topology[3].Unit ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.Top[4].Link = %b\n", entry, p->RIOConnectTable[entry].Topology[3].Link ) );
- rio_dprint(RIO_DEBUG_CTRL, ("Map entry %d.Name = %s\n", entry, p->RIOConnectTable[entry].Name ) );
- }
- rio_dprint(RIO_DEBUG_CTRL, ("*****\nEND MAP ENTRIES\n") );
- }
-***********************************
-*/
- return RIONewTable(p);
-
- case RIO_GET_BINDINGS:
- /*
- ** Send bindings table, containing unique numbers of RTAs owned
- ** by this system to user space
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_BINDINGS\n");
-
- if (!su) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_BINDINGS !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copy_to_user(argp, p->RIOBindTab, (sizeof(ulong) * MAX_RTA_BINDINGS))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_BINDINGS copy failed\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_PUT_BINDINGS:
- /*
- ** Receive a bindings table, containing unique numbers of RTAs owned
- ** by this system
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_BINDINGS\n");
-
- if (!su) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_BINDINGS !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copy_from_user(&p->RIOBindTab[0], argp, (sizeof(ulong) * MAX_RTA_BINDINGS))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_PUT_BINDINGS copy failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_BIND_RTA:
- {
- int EmptySlot = -1;
- /*
- ** Bind this RTA to host, so that it will be booted by
- ** host in 'boot owned RTAs' mode.
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_BIND_RTA\n");
-
- if (!su) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_BIND_RTA !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- for (Entry = 0; Entry < MAX_RTA_BINDINGS; Entry++) {
- if ((EmptySlot == -1) && (p->RIOBindTab[Entry] == 0L))
- EmptySlot = Entry;
- else if (p->RIOBindTab[Entry] == arg) {
- /*
- ** Already exists - delete
- */
- p->RIOBindTab[Entry] = 0L;
- rio_dprintk(RIO_DEBUG_CTRL, "Removing Rta %ld from p->RIOBindTab\n", arg);
- return 0;
- }
- }
- /*
- ** Dosen't exist - add
- */
- if (EmptySlot != -1) {
- p->RIOBindTab[EmptySlot] = arg;
- rio_dprintk(RIO_DEBUG_CTRL, "Adding Rta %lx to p->RIOBindTab\n", arg);
- } else {
- rio_dprintk(RIO_DEBUG_CTRL, "p->RIOBindTab full! - Rta %lx not added\n", arg);
- return -ENOMEM;
- }
- return 0;
- }
-
- case RIO_RESUME:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME\n");
- port = arg;
- if ((port < 0) || (port > 511)) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME: Bad port number %d\n", port);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- PortP = p->RIOPortp[port];
- if (!PortP->Mapped) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME: Port %d not mapped\n", port);
- p->RIOError.Error = PORT_NOT_MAPPED_INTO_SYSTEM;
- return -EINVAL;
- }
- if (!(PortP->State & (RIO_LOPEN | RIO_MOPEN))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME: Port %d not open\n", port);
- return -EINVAL;
- }
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- if (RIOPreemptiveCmd(p, (p->RIOPortp[port]), RIOC_RESUME) ==
- RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME failed\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return -EBUSY;
- } else {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME: Port %d resumed\n", port);
- PortP->State |= RIO_BUSY;
- }
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return retval;
-
- case RIO_ASSIGN_RTA:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_ASSIGN_RTA\n");
- if (!su) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_ASSIGN_RTA !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copy_from_user(&MapEnt, argp, sizeof(MapEnt))) {
- rio_dprintk(RIO_DEBUG_CTRL, "Copy from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- return RIOAssignRta(p, &MapEnt);
-
- case RIO_CHANGE_NAME:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_CHANGE_NAME\n");
- if (!su) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_CHANGE_NAME !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copy_from_user(&MapEnt, argp, sizeof(MapEnt))) {
- rio_dprintk(RIO_DEBUG_CTRL, "Copy from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- return RIOChangeName(p, &MapEnt);
-
- case RIO_DELETE_RTA:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_DELETE_RTA\n");
- if (!su) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_DELETE_RTA !Root\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copy_from_user(&MapEnt, argp, sizeof(MapEnt))) {
- rio_dprintk(RIO_DEBUG_CTRL, "Copy from data space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- return RIODeleteRta(p, &MapEnt);
-
- case RIO_QUICK_CHECK:
- if (copy_to_user(argp, &p->RIORtaDisCons, sizeof(unsigned int))) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_LAST_ERROR:
- if (copy_to_user(argp, &p->RIOError, sizeof(struct Error)))
- return -EFAULT;
- return 0;
-
- case RIO_GET_LOG:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_LOG\n");
- return -EINVAL;
-
- case RIO_GET_MODTYPE:
- if (copy_from_user(&port, argp, sizeof(unsigned int))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "Get module type for port %d\n", port);
- if (port < 0 || port > 511) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_MODTYPE: Bad port number %d\n", port);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- PortP = (p->RIOPortp[port]);
- if (!PortP->Mapped) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_MODTYPE: Port %d not mapped\n", port);
- p->RIOError.Error = PORT_NOT_MAPPED_INTO_SYSTEM;
- return -EINVAL;
- }
- /*
- ** Return module type of port
- */
- port = PortP->HostP->UnixRups[PortP->RupNum].ModTypes;
- if (copy_to_user(argp, &port, sizeof(unsigned int))) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return (0);
- case RIO_BLOCK_OPENS:
- rio_dprintk(RIO_DEBUG_CTRL, "Opens block until booted\n");
- for (Entry = 0; Entry < RIO_PORTS; Entry++) {
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- p->RIOPortp[Entry]->WaitUntilBooted = 1;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- }
- return 0;
-
- case RIO_SETUP_PORTS:
- rio_dprintk(RIO_DEBUG_CTRL, "Setup ports\n");
- if (copy_from_user(&PortSetup, argp, sizeof(PortSetup))) {
- p->RIOError.Error = COPYIN_FAILED;
- rio_dprintk(RIO_DEBUG_CTRL, "EFAULT");
- return -EFAULT;
- }
- if (PortSetup.From > PortSetup.To || PortSetup.To >= RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- rio_dprintk(RIO_DEBUG_CTRL, "ENXIO");
- return -ENXIO;
- }
- if (PortSetup.XpCps > p->RIOConf.MaxXpCps || PortSetup.XpCps < p->RIOConf.MinXpCps) {
- p->RIOError.Error = XPRINT_CPS_OUT_OF_RANGE;
- rio_dprintk(RIO_DEBUG_CTRL, "EINVAL");
- return -EINVAL;
- }
- if (!p->RIOPortp) {
- printk(KERN_ERR "rio: No p->RIOPortp array!\n");
- rio_dprintk(RIO_DEBUG_CTRL, "No p->RIOPortp array!\n");
- return -EIO;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "entering loop (%d %d)!\n", PortSetup.From, PortSetup.To);
- for (loop = PortSetup.From; loop <= PortSetup.To; loop++) {
- rio_dprintk(RIO_DEBUG_CTRL, "in loop (%d)!\n", loop);
- }
- rio_dprintk(RIO_DEBUG_CTRL, "after loop (%d)!\n", loop);
- rio_dprintk(RIO_DEBUG_CTRL, "Retval:%x\n", retval);
- return retval;
-
- case RIO_GET_PORT_SETUP:
- rio_dprintk(RIO_DEBUG_CTRL, "Get port setup\n");
- if (copy_from_user(&PortSetup, argp, sizeof(PortSetup))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (PortSetup.From >= RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
-
- port = PortSetup.To = PortSetup.From;
- PortSetup.IxAny = (p->RIOPortp[port]->Config & RIO_IXANY) ? 1 : 0;
- PortSetup.IxOn = (p->RIOPortp[port]->Config & RIO_IXON) ? 1 : 0;
- PortSetup.Drain = (p->RIOPortp[port]->Config & RIO_WAITDRAIN) ? 1 : 0;
- PortSetup.Store = p->RIOPortp[port]->Store;
- PortSetup.Lock = p->RIOPortp[port]->Lock;
- PortSetup.XpCps = p->RIOPortp[port]->Xprint.XpCps;
- memcpy(PortSetup.XpOn, p->RIOPortp[port]->Xprint.XpOn, MAX_XP_CTRL_LEN);
- memcpy(PortSetup.XpOff, p->RIOPortp[port]->Xprint.XpOff, MAX_XP_CTRL_LEN);
- PortSetup.XpOn[MAX_XP_CTRL_LEN - 1] = '\0';
- PortSetup.XpOff[MAX_XP_CTRL_LEN - 1] = '\0';
-
- if (copy_to_user(argp, &PortSetup, sizeof(PortSetup))) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_GET_PORT_PARAMS:
- rio_dprintk(RIO_DEBUG_CTRL, "Get port params\n");
- if (copy_from_user(&PortParams, argp, sizeof(struct PortParams))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (PortParams.Port >= RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[PortParams.Port]);
- PortParams.Config = PortP->Config;
- PortParams.State = PortP->State;
- rio_dprintk(RIO_DEBUG_CTRL, "Port %d\n", PortParams.Port);
-
- if (copy_to_user(argp, &PortParams, sizeof(struct PortParams))) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_GET_PORT_TTY:
- rio_dprintk(RIO_DEBUG_CTRL, "Get port tty\n");
- if (copy_from_user(&PortTty, argp, sizeof(struct PortTty))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (PortTty.port >= RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
-
- rio_dprintk(RIO_DEBUG_CTRL, "Port %d\n", PortTty.port);
- PortP = (p->RIOPortp[PortTty.port]);
- if (copy_to_user(argp, &PortTty, sizeof(struct PortTty))) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_SET_PORT_TTY:
- if (copy_from_user(&PortTty, argp, sizeof(struct PortTty))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "Set port %d tty\n", PortTty.port);
- if (PortTty.port >= (ushort) RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[PortTty.port]);
- RIOParam(PortP, RIOC_CONFIG, PortP->State & RIO_MODEM,
- OK_TO_SLEEP);
- return retval;
-
- case RIO_SET_PORT_PARAMS:
- rio_dprintk(RIO_DEBUG_CTRL, "Set port params\n");
- if (copy_from_user(&PortParams, argp, sizeof(PortParams))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (PortParams.Port >= (ushort) RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[PortParams.Port]);
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->Config = PortParams.Config;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return retval;
-
- case RIO_GET_PORT_STATS:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GET_PORT_STATS\n");
- if (copy_from_user(&portStats, argp, sizeof(struct portStats))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (portStats.port < 0 || portStats.port >= RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[portStats.port]);
- portStats.gather = PortP->statsGather;
- portStats.txchars = PortP->txchars;
- portStats.rxchars = PortP->rxchars;
- portStats.opens = PortP->opens;
- portStats.closes = PortP->closes;
- portStats.ioctls = PortP->ioctls;
- if (copy_to_user(argp, &portStats, sizeof(struct portStats))) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_RESET_PORT_STATS:
- port = arg;
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESET_PORT_STATS\n");
- if (port >= RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[port]);
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->txchars = 0;
- PortP->rxchars = 0;
- PortP->opens = 0;
- PortP->closes = 0;
- PortP->ioctls = 0;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return retval;
-
- case RIO_GATHER_PORT_STATS:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GATHER_PORT_STATS\n");
- if (copy_from_user(&portStats, argp, sizeof(struct portStats))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (portStats.port < 0 || portStats.port >= RIO_PORTS) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- PortP = (p->RIOPortp[portStats.port]);
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->statsGather = portStats.gather;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return retval;
-
- case RIO_READ_CONFIG:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_CONFIG\n");
- if (copy_to_user(argp, &p->RIOConf, sizeof(struct Conf))) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_SET_CONFIG:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET_CONFIG\n");
- if (!su) {
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copy_from_user(&p->RIOConf, argp, sizeof(struct Conf))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- /*
- ** move a few value around
- */
- for (Host = 0; Host < p->RIONumHosts; Host++)
- if ((p->RIOHosts[Host].Flags & RUN_STATE) == RC_RUNNING)
- writew(p->RIOConf.Timer, &p->RIOHosts[Host].ParmMapP->timer);
- return retval;
-
- case RIO_START_POLLER:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_START_POLLER\n");
- return -EINVAL;
-
- case RIO_STOP_POLLER:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_STOP_POLLER\n");
- if (!su) {
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- p->RIOPolling = NOT_POLLING;
- return retval;
-
- case RIO_SETDEBUG:
- case RIO_GETDEBUG:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_SETDEBUG/RIO_GETDEBUG\n");
- if (copy_from_user(&DebugCtrl, argp, sizeof(DebugCtrl))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (DebugCtrl.SysPort == NO_PORT) {
- if (cmd == RIO_SETDEBUG) {
- if (!su) {
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- p->rio_debug = DebugCtrl.Debug;
- p->RIODebugWait = DebugCtrl.Wait;
- rio_dprintk(RIO_DEBUG_CTRL, "Set global debug to 0x%x set wait to 0x%x\n", p->rio_debug, p->RIODebugWait);
- } else {
- rio_dprintk(RIO_DEBUG_CTRL, "Get global debug 0x%x wait 0x%x\n", p->rio_debug, p->RIODebugWait);
- DebugCtrl.Debug = p->rio_debug;
- DebugCtrl.Wait = p->RIODebugWait;
- if (copy_to_user(argp, &DebugCtrl, sizeof(DebugCtrl))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET/GET DEBUG: bad port number %d\n", DebugCtrl.SysPort);
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- }
- } else if (DebugCtrl.SysPort >= RIO_PORTS && DebugCtrl.SysPort != NO_PORT) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET/GET DEBUG: bad port number %d\n", DebugCtrl.SysPort);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- } else if (cmd == RIO_SETDEBUG) {
- if (!su) {
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- p->RIOPortp[DebugCtrl.SysPort]->Debug = DebugCtrl.Debug;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_SETDEBUG 0x%x\n", p->RIOPortp[DebugCtrl.SysPort]->Debug);
- } else {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GETDEBUG 0x%x\n", p->RIOPortp[DebugCtrl.SysPort]->Debug);
- DebugCtrl.Debug = p->RIOPortp[DebugCtrl.SysPort]->Debug;
- if (copy_to_user(argp, &DebugCtrl, sizeof(DebugCtrl))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_GETDEBUG: Bad copy to user space\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- }
- return retval;
-
- case RIO_VERSID:
- /*
- ** Enquire about the release and version.
- ** We return MAX_VERSION_LEN bytes, being a
- ** textual null terminated string.
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_VERSID\n");
- if (copy_to_user(argp, RIOVersid(), sizeof(struct rioVersion))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_VERSID: Bad copy to user space (host=%d)\n", Host);
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_NUM_HOSTS:
- /*
- ** Enquire as to the number of hosts located
- ** at init time.
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_NUM_HOSTS\n");
- if (copy_to_user(argp, &p->RIONumHosts, sizeof(p->RIONumHosts))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_NUM_HOSTS: Bad copy to user space\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- case RIO_HOST_FOAD:
- /*
- ** Kill host. This may not be in the final version...
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_FOAD %ld\n", arg);
- if (!su) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_FOAD: Not super user\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- p->RIOHalted = 1;
- p->RIOSystemUp = 0;
-
- for (Host = 0; Host < p->RIONumHosts; Host++) {
- (void) RIOBoardTest(p->RIOHosts[Host].PaddrP, p->RIOHosts[Host].Caddr, p->RIOHosts[Host].Type, p->RIOHosts[Host].Slot);
- memset(&p->RIOHosts[Host].Flags, 0, ((char *) &p->RIOHosts[Host].____end_marker____) - ((char *) &p->RIOHosts[Host].Flags));
- p->RIOHosts[Host].Flags = RC_WAITING;
- }
- RIOFoadWakeup(p);
- p->RIONumBootPkts = 0;
- p->RIOBooting = 0;
- printk("HEEEEELP!\n");
-
- for (loop = 0; loop < RIO_PORTS; loop++) {
- spin_lock_init(&p->RIOPortp[loop]->portSem);
- p->RIOPortp[loop]->InUse = NOT_INUSE;
- }
-
- p->RIOSystemUp = 0;
- return retval;
-
- case RIO_DOWNLOAD:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_DOWNLOAD\n");
- if (!su) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Not super user\n");
- p->RIOError.Error = NOT_SUPER_USER;
- return -EPERM;
- }
- if (copy_from_user(&DownLoad, argp, sizeof(DownLoad))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "Copied in download code for product code 0x%x\n", DownLoad.ProductCode);
-
- /*
- ** It is important that the product code is an unsigned object!
- */
- if (DownLoad.ProductCode >= MAX_PRODUCT) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_DOWNLOAD: Bad product code %d passed\n", DownLoad.ProductCode);
- p->RIOError.Error = NO_SUCH_PRODUCT;
- return -ENXIO;
- }
- /*
- ** do something!
- */
- retval = (*(RIOBootTable[DownLoad.ProductCode])) (p, &DownLoad);
- /* <-- Panic */
- p->RIOHalted = 0;
- /*
- ** and go back, content with a job well completed.
- */
- return retval;
-
- case RIO_PARMS:
- {
- unsigned int host;
-
- if (copy_from_user(&host, argp, sizeof(host))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_REQ: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- /*
- ** Fetch the parmmap
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_PARMS\n");
- if (copy_from_io(argp, p->RIOHosts[host].ParmMapP, sizeof(PARM_MAP))) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_PARMS: Copy out to user space failed\n");
- return -EFAULT;
- }
- }
- return retval;
-
- case RIO_HOST_REQ:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_REQ\n");
- if (copy_from_user(&HostReq, argp, sizeof(HostReq))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_REQ: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (HostReq.HostNum >= p->RIONumHosts) {
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_REQ: Illegal host number %d\n", HostReq.HostNum);
- return -ENXIO;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "Request for host %d\n", HostReq.HostNum);
-
- if (copy_to_user(HostReq.HostP, &p->RIOHosts[HostReq.HostNum], sizeof(struct Host))) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_REQ: Bad copy to user space\n");
- return -EFAULT;
- }
- return retval;
-
- case RIO_HOST_DPRAM:
- rio_dprintk(RIO_DEBUG_CTRL, "Request for DPRAM\n");
- if (copy_from_user(&HostDpRam, argp, sizeof(HostDpRam))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (HostDpRam.HostNum >= p->RIONumHosts) {
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Illegal host number %d\n", HostDpRam.HostNum);
- return -ENXIO;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "Request for host %d\n", HostDpRam.HostNum);
-
- if (p->RIOHosts[HostDpRam.HostNum].Type == RIO_PCI) {
- int off;
- /* It's hardware like this that really gets on my tits. */
- static unsigned char copy[sizeof(struct DpRam)];
- for (off = 0; off < sizeof(struct DpRam); off++)
- copy[off] = readb(p->RIOHosts[HostDpRam.HostNum].Caddr + off);
- if (copy_to_user(HostDpRam.DpRamP, copy, sizeof(struct DpRam))) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Bad copy to user space\n");
- return -EFAULT;
- }
- } else if (copy_from_io(HostDpRam.DpRamP, p->RIOHosts[HostDpRam.HostNum].Caddr, sizeof(struct DpRam))) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_DPRAM: Bad copy to user space\n");
- return -EFAULT;
- }
- return retval;
-
- case RIO_SET_BUSY:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET_BUSY\n");
- if (arg > 511) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_SET_BUSY: Bad port number %ld\n", arg);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- p->RIOPortp[arg]->State |= RIO_BUSY;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return retval;
-
- case RIO_HOST_PORT:
- /*
- ** The daemon want port information
- ** (probably for debug reasons)
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_PORT\n");
- if (copy_from_user(&PortReq, argp, sizeof(PortReq))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_PORT: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
-
- if (PortReq.SysPort >= RIO_PORTS) { /* SysPort is unsigned */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_PORT: Illegal port number %d\n", PortReq.SysPort);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "Request for port %d\n", PortReq.SysPort);
- if (copy_to_user(PortReq.PortP, p->RIOPortp[PortReq.SysPort], sizeof(struct Port))) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_PORT: Bad copy to user space\n");
- return -EFAULT;
- }
- return retval;
-
- case RIO_HOST_RUP:
- /*
- ** The daemon want rup information
- ** (probably for debug reasons)
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP\n");
- if (copy_from_user(&RupReq, argp, sizeof(RupReq))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP: Copy in from user space failed\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (RupReq.HostNum >= p->RIONumHosts) { /* host is unsigned */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP: Illegal host number %d\n", RupReq.HostNum);
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- if (RupReq.RupNum >= MAX_RUP + LINKS_PER_UNIT) { /* eek! */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP: Illegal rup number %d\n", RupReq.RupNum);
- p->RIOError.Error = RUP_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- HostP = &p->RIOHosts[RupReq.HostNum];
-
- if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP: Host %d not running\n", RupReq.HostNum);
- p->RIOError.Error = HOST_NOT_RUNNING;
- return -EIO;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "Request for rup %d from host %d\n", RupReq.RupNum, RupReq.HostNum);
-
- if (copy_from_io(RupReq.RupP, HostP->UnixRups[RupReq.RupNum].RupP, sizeof(struct RUP))) {
- p->RIOError.Error = COPYOUT_FAILED;
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_RUP: Bad copy to user space\n");
- return -EFAULT;
- }
- return retval;
-
- case RIO_HOST_LPB:
- /*
- ** The daemon want lpb information
- ** (probably for debug reasons)
- */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB\n");
- if (copy_from_user(&LpbReq, argp, sizeof(LpbReq))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB: Bad copy from user space\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (LpbReq.Host >= p->RIONumHosts) { /* host is unsigned */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB: Illegal host number %d\n", LpbReq.Host);
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
- if (LpbReq.Link >= LINKS_PER_UNIT) { /* eek! */
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB: Illegal link number %d\n", LpbReq.Link);
- p->RIOError.Error = LINK_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- HostP = &p->RIOHosts[LpbReq.Host];
-
- if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB: Host %d not running\n", LpbReq.Host);
- p->RIOError.Error = HOST_NOT_RUNNING;
- return -EIO;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "Request for lpb %d from host %d\n", LpbReq.Link, LpbReq.Host);
-
- if (copy_from_io(LpbReq.LpbP, &HostP->LinkStrP[LpbReq.Link], sizeof(struct LPB))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_HOST_LPB: Bad copy to user space\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return retval;
-
- /*
- ** Here 3 IOCTL's that allow us to change the way in which
- ** rio logs errors. send them just to syslog or send them
- ** to both syslog and console or send them to just the console.
- **
- ** See RioStrBuf() in util.c for the other half.
- */
- case RIO_SYSLOG_ONLY:
- p->RIOPrintLogState = PRINT_TO_LOG; /* Just syslog */
- return 0;
-
- case RIO_SYSLOG_CONS:
- p->RIOPrintLogState = PRINT_TO_LOG_CONS; /* syslog and console */
- return 0;
-
- case RIO_CONS_ONLY:
- p->RIOPrintLogState = PRINT_TO_CONS; /* Just console */
- return 0;
-
- case RIO_SIGNALS_ON:
- if (p->RIOSignalProcess) {
- p->RIOError.Error = SIGNALS_ALREADY_SET;
- return -EBUSY;
- }
- /* FIXME: PID tracking */
- p->RIOSignalProcess = current->pid;
- p->RIOPrintDisabled = DONT_PRINT;
- return retval;
-
- case RIO_SIGNALS_OFF:
- if (p->RIOSignalProcess != current->pid) {
- p->RIOError.Error = NOT_RECEIVING_PROCESS;
- return -EPERM;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "Clear signal process to zero\n");
- p->RIOSignalProcess = 0;
- return retval;
-
- case RIO_SET_BYTE_MODE:
- for (Host = 0; Host < p->RIONumHosts; Host++)
- if (p->RIOHosts[Host].Type == RIO_AT)
- p->RIOHosts[Host].Mode &= ~WORD_OPERATION;
- return retval;
-
- case RIO_SET_WORD_MODE:
- for (Host = 0; Host < p->RIONumHosts; Host++)
- if (p->RIOHosts[Host].Type == RIO_AT)
- p->RIOHosts[Host].Mode |= WORD_OPERATION;
- return retval;
-
- case RIO_SET_FAST_BUS:
- for (Host = 0; Host < p->RIONumHosts; Host++)
- if (p->RIOHosts[Host].Type == RIO_AT)
- p->RIOHosts[Host].Mode |= FAST_AT_BUS;
- return retval;
-
- case RIO_SET_SLOW_BUS:
- for (Host = 0; Host < p->RIONumHosts; Host++)
- if (p->RIOHosts[Host].Type == RIO_AT)
- p->RIOHosts[Host].Mode &= ~FAST_AT_BUS;
- return retval;
-
- case RIO_MAP_B50_TO_50:
- case RIO_MAP_B50_TO_57600:
- case RIO_MAP_B110_TO_110:
- case RIO_MAP_B110_TO_115200:
- rio_dprintk(RIO_DEBUG_CTRL, "Baud rate mapping\n");
- port = arg;
- if (port < 0 || port > 511) {
- rio_dprintk(RIO_DEBUG_CTRL, "Baud rate mapping: Bad port number %d\n", port);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- switch (cmd) {
- case RIO_MAP_B50_TO_50:
- p->RIOPortp[port]->Config |= RIO_MAP_50_TO_50;
- break;
- case RIO_MAP_B50_TO_57600:
- p->RIOPortp[port]->Config &= ~RIO_MAP_50_TO_50;
- break;
- case RIO_MAP_B110_TO_110:
- p->RIOPortp[port]->Config |= RIO_MAP_110_TO_110;
- break;
- case RIO_MAP_B110_TO_115200:
- p->RIOPortp[port]->Config &= ~RIO_MAP_110_TO_110;
- break;
- }
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return retval;
-
- case RIO_STREAM_INFO:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_STREAM_INFO\n");
- return -EINVAL;
-
- case RIO_SEND_PACKET:
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_SEND_PACKET\n");
- if (copy_from_user(&SendPack, argp, sizeof(SendPack))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_SEND_PACKET: Bad copy from user space\n");
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- if (SendPack.PortNum >= 128) {
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -ENXIO;
- }
-
- PortP = p->RIOPortp[SendPack.PortNum];
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- if (!can_add_transmit(&PacketP, PortP)) {
- p->RIOError.Error = UNIT_IS_IN_USE;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return -ENOSPC;
- }
-
- for (loop = 0; loop < (ushort) (SendPack.Len & 127); loop++)
- writeb(SendPack.Data[loop], &PacketP->data[loop]);
-
- writeb(SendPack.Len, &PacketP->len);
-
- add_transmit(PortP);
- /*
- ** Count characters transmitted for port statistics reporting
- */
- if (PortP->statsGather)
- PortP->txchars += (SendPack.Len & 127);
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return retval;
-
- case RIO_NO_MESG:
- if (su)
- p->RIONoMessage = 1;
- return su ? 0 : -EPERM;
-
- case RIO_MESG:
- if (su)
- p->RIONoMessage = 0;
- return su ? 0 : -EPERM;
-
- case RIO_WHAT_MESG:
- if (copy_to_user(argp, &p->RIONoMessage, sizeof(p->RIONoMessage))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_WHAT_MESG: Bad copy to user space\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_MEM_DUMP:
- if (copy_from_user(&SubCmd, argp, sizeof(struct SubCmdStruct))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_MEM_DUMP host %d rup %d addr %x\n", SubCmd.Host, SubCmd.Rup, SubCmd.Addr);
-
- if (SubCmd.Rup >= MAX_RUP + LINKS_PER_UNIT) {
- p->RIOError.Error = RUP_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- if (SubCmd.Host >= p->RIONumHosts) {
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- port = p->RIOHosts[SubCmd.Host].UnixRups[SubCmd.Rup].BaseSysPort;
-
- PortP = p->RIOPortp[port];
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- if (RIOPreemptiveCmd(p, PortP, RIOC_MEMDUMP) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_MEM_DUMP failed\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return -EBUSY;
- } else
- PortP->State |= RIO_BUSY;
-
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- if (copy_to_user(argp, p->RIOMemDump, MEMDUMP_SIZE)) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_MEM_DUMP copy failed\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_TICK:
- if (arg >= p->RIONumHosts)
- return -EINVAL;
- rio_dprintk(RIO_DEBUG_CTRL, "Set interrupt for host %ld\n", arg);
- writeb(0xFF, &p->RIOHosts[arg].SetInt);
- return 0;
-
- case RIO_TOCK:
- if (arg >= p->RIONumHosts)
- return -EINVAL;
- rio_dprintk(RIO_DEBUG_CTRL, "Clear interrupt for host %ld\n", arg);
- writeb(0xFF, &p->RIOHosts[arg].ResetInt);
- return 0;
-
- case RIO_READ_CHECK:
- /* Check reads for pkts with data[0] the same */
- p->RIOReadCheck = !p->RIOReadCheck;
- if (copy_to_user(argp, &p->RIOReadCheck, sizeof(unsigned int))) {
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
-
- case RIO_READ_REGISTER:
- if (copy_from_user(&SubCmd, argp, sizeof(struct SubCmdStruct))) {
- p->RIOError.Error = COPYIN_FAILED;
- return -EFAULT;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_REGISTER host %d rup %d port %d reg %x\n", SubCmd.Host, SubCmd.Rup, SubCmd.Port, SubCmd.Addr);
-
- if (SubCmd.Port > 511) {
- rio_dprintk(RIO_DEBUG_CTRL, "Baud rate mapping: Bad port number %d\n", SubCmd.Port);
- p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- if (SubCmd.Rup >= MAX_RUP + LINKS_PER_UNIT) {
- p->RIOError.Error = RUP_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- if (SubCmd.Host >= p->RIONumHosts) {
- p->RIOError.Error = HOST_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- port = p->RIOHosts[SubCmd.Host].UnixRups[SubCmd.Rup].BaseSysPort + SubCmd.Port;
- PortP = p->RIOPortp[port];
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- if (RIOPreemptiveCmd(p, PortP, RIOC_READ_REGISTER) ==
- RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_REGISTER failed\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return -EBUSY;
- } else
- PortP->State |= RIO_BUSY;
-
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- if (copy_to_user(argp, &p->CdRegister, sizeof(unsigned int))) {
- rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_REGISTER copy failed\n");
- p->RIOError.Error = COPYOUT_FAILED;
- return -EFAULT;
- }
- return 0;
- /*
- ** rio_make_dev: given port number (0-511) ORed with port type
- ** (RIO_DEV_DIRECT, RIO_DEV_MODEM, RIO_DEV_XPRINT) return dev_t
- ** value to pass to mknod to create the correct device node.
- */
- case RIO_MAKE_DEV:
- {
- unsigned int port = arg & RIO_MODEM_MASK;
- unsigned int ret;
-
- switch (arg & RIO_DEV_MASK) {
- case RIO_DEV_DIRECT:
- ret = drv_makedev(MAJOR(dev), port);
- rio_dprintk(RIO_DEBUG_CTRL, "Makedev direct 0x%x is 0x%x\n", port, ret);
- return ret;
- case RIO_DEV_MODEM:
- ret = drv_makedev(MAJOR(dev), (port | RIO_MODEM_BIT));
- rio_dprintk(RIO_DEBUG_CTRL, "Makedev modem 0x%x is 0x%x\n", port, ret);
- return ret;
- case RIO_DEV_XPRINT:
- ret = drv_makedev(MAJOR(dev), port);
- rio_dprintk(RIO_DEBUG_CTRL, "Makedev printer 0x%x is 0x%x\n", port, ret);
- return ret;
- }
- rio_dprintk(RIO_DEBUG_CTRL, "MAKE Device is called\n");
- return -EINVAL;
- }
- /*
- ** rio_minor: given a dev_t from a stat() call, return
- ** the port number (0-511) ORed with the port type
- ** ( RIO_DEV_DIRECT, RIO_DEV_MODEM, RIO_DEV_XPRINT )
- */
- case RIO_MINOR:
- {
- dev_t dv;
- int mino;
- unsigned long ret;
-
- dv = (dev_t) (arg);
- mino = RIO_UNMODEM(dv);
-
- if (RIO_ISMODEM(dv)) {
- rio_dprintk(RIO_DEBUG_CTRL, "Minor for device 0x%x: modem %d\n", dv, mino);
- ret = mino | RIO_DEV_MODEM;
- } else {
- rio_dprintk(RIO_DEBUG_CTRL, "Minor for device 0x%x: direct %d\n", dv, mino);
- ret = mino | RIO_DEV_DIRECT;
- }
- return ret;
- }
- }
- rio_dprintk(RIO_DEBUG_CTRL, "INVALID DAEMON IOCTL 0x%x\n", cmd);
- p->RIOError.Error = IOCTL_COMMAND_UNKNOWN;
-
- func_exit();
- return -EINVAL;
-}
-
-/*
-** Pre-emptive commands go on RUPs and are only one byte long.
-*/
-int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
-{
- struct CmdBlk *CmdBlkP;
- struct PktCmd_M *PktCmdP;
- int Ret;
- ushort rup;
- int port;
-
- if (PortP->State & RIO_DELETED) {
- rio_dprintk(RIO_DEBUG_CTRL, "Preemptive command to deleted RTA ignored\n");
- return RIO_FAIL;
- }
-
- if ((PortP->InUse == (typeof(PortP->InUse))-1) ||
- !(CmdBlkP = RIOGetCmdBlk())) {
- rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block "
- "for command %d on port %d\n", Cmd, PortP->PortNum);
- return RIO_FAIL;
- }
-
- rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n",
- CmdBlkP, PortP->InUse);
-
- PktCmdP = (struct PktCmd_M *)&CmdBlkP->Packet.data[0];
-
- CmdBlkP->Packet.src_unit = 0;
- if (PortP->SecondBlock)
- rup = PortP->ID2;
- else
- rup = PortP->RupNum;
- CmdBlkP->Packet.dest_unit = rup;
- CmdBlkP->Packet.src_port = COMMAND_RUP;
- CmdBlkP->Packet.dest_port = COMMAND_RUP;
- CmdBlkP->Packet.len = PKT_CMD_BIT | 2;
- CmdBlkP->PostFuncP = RIOUnUse;
- CmdBlkP->PostArg = (unsigned long) PortP;
- PktCmdP->Command = Cmd;
- port = PortP->HostPort % (ushort) PORTS_PER_RTA;
- /*
- ** Index ports 8-15 for 2nd block of 16 port RTA.
- */
- if (PortP->SecondBlock)
- port += (ushort) PORTS_PER_RTA;
- PktCmdP->PhbNum = port;
-
- switch (Cmd) {
- case RIOC_MEMDUMP:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p "
- "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
- PktCmdP->SubCommand = RIOC_MEMDUMP;
- PktCmdP->SubAddr = SubCmd.Addr;
- break;
- case RIOC_FCLOSE:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n",
- CmdBlkP);
- break;
- case RIOC_READ_REGISTER:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) "
- "command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
- PktCmdP->SubCommand = RIOC_READ_REGISTER;
- PktCmdP->SubAddr = SubCmd.Addr;
- break;
- case RIOC_RESUME:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n",
- CmdBlkP);
- break;
- case RIOC_RFLUSH:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n",
- CmdBlkP);
- CmdBlkP->PostFuncP = RIORFlushEnable;
- break;
- case RIOC_SUSPEND:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n",
- CmdBlkP);
- break;
-
- case RIOC_MGET:
- rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n",
- CmdBlkP);
- break;
-
- case RIOC_MSET:
- case RIOC_MBIC:
- case RIOC_MBIS:
- CmdBlkP->Packet.data[4] = (char) PortP->ModemLines;
- rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command "
- "blk %p\n", CmdBlkP);
- break;
-
- case RIOC_WFLUSH:
- /*
- ** If we have queued up the maximum number of Write flushes
- ** allowed then we should not bother sending any more to the
- ** RTA.
- */
- if (PortP->WflushFlag == (typeof(PortP->WflushFlag))-1) {
- rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, "
- "WflushFlag about to wrap!");
- RIOFreeCmdBlk(CmdBlkP);
- return (RIO_FAIL);
- } else {
- rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command "
- "blk %p\n", CmdBlkP);
- CmdBlkP->PostFuncP = RIOWFlushMark;
- }
- break;
- }
-
- PortP->InUse++;
-
- Ret = RIOQueueCmdBlk(PortP->HostP, rup, CmdBlkP);
-
- return Ret;
-}
diff --git a/drivers/staging/generic_serial/rio/riodrvr.h b/drivers/staging/generic_serial/rio/riodrvr.h
deleted file mode 100644
index 0907e711b35..00000000000
--- a/drivers/staging/generic_serial/rio/riodrvr.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : riodrvr.h
-** SID : 1.3
-** Last Modified : 11/6/98 09:22:46
-** Retrieved : 11/6/98 09:22:46
-**
-** ident @(#)riodrvr.h 1.3
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __riodrvr_h
-#define __riodrvr_h
-
-#include <asm/param.h> /* for HZ */
-
-#define MEMDUMP_SIZE 32
-#define MOD_DISABLE (RIO_NOREAD|RIO_NOWRITE|RIO_NOXPRINT)
-
-
-struct rio_info {
- int mode; /* Intr or polled, word/byte */
- spinlock_t RIOIntrSem; /* Interrupt thread sem */
- int current_chan; /* current channel */
- int RIOFailed; /* Not initialised ? */
- int RIOInstallAttempts; /* no. of rio-install() calls */
- int RIOLastPCISearch; /* status of last search */
- int RIONumHosts; /* Number of RIO Hosts */
- struct Host *RIOHosts; /* RIO Host values */
- struct Port **RIOPortp; /* RIO port values */
-/*
-** 02.03.1999 ARG - ESIL 0820 fix
-** We no longer use RIOBootMode
-**
- int RIOBootMode; * RIO boot mode *
-**
-*/
- int RIOPrintDisabled; /* RIO printing disabled ? */
- int RIOPrintLogState; /* RIO printing state ? */
- int RIOPolling; /* Polling ? */
-/*
-** 09.12.1998 ARG - ESIL 0776 part fix
-** The 'RIO_QUICK_CHECK' ioctl was using RIOHalted.
-** The fix for this ESIL introduces another member (RIORtaDisCons) here to be
-** updated in RIOConCon() - to keep track of RTA connections/disconnections.
-** 'RIO_QUICK_CHECK' now returns the value of RIORtaDisCons.
-*/
- int RIOHalted; /* halted ? */
- int RIORtaDisCons; /* RTA connections/disconnections */
- unsigned int RIOReadCheck; /* Rio read check */
- unsigned int RIONoMessage; /* To display message or not */
- unsigned int RIONumBootPkts; /* how many packets for an RTA */
- unsigned int RIOBootCount; /* size of RTA code */
- unsigned int RIOBooting; /* count of outstanding boots */
- unsigned int RIOSystemUp; /* Booted ?? */
- unsigned int RIOCounting; /* for counting interrupts */
- unsigned int RIOIntCount; /* # of intr since last check */
- unsigned int RIOTxCount; /* number of xmit intrs */
- unsigned int RIORxCount; /* number of rx intrs */
- unsigned int RIORupCount; /* number of rup intrs */
- int RIXTimer;
- int RIOBufferSize; /* Buffersize */
- int RIOBufferMask; /* Buffersize */
-
- int RIOFirstMajor; /* First host card's major no */
-
- unsigned int RIOLastPortsMapped; /* highest port number known */
- unsigned int RIOFirstPortsMapped; /* lowest port number known */
-
- unsigned int RIOLastPortsBooted; /* highest port number running */
- unsigned int RIOFirstPortsBooted; /* lowest port number running */
-
- unsigned int RIOLastPortsOpened; /* highest port number running */
- unsigned int RIOFirstPortsOpened; /* lowest port number running */
-
- /* Flag to say that the topology information has been changed. */
- unsigned int RIOQuickCheck;
- unsigned int CdRegister; /* ??? */
- int RIOSignalProcess; /* Signalling process */
- int rio_debug; /* To debug ... */
- int RIODebugWait; /* For what ??? */
- int tpri; /* Thread prio */
- int tid; /* Thread id */
- unsigned int _RIO_Polled; /* Counter for polling */
- unsigned int _RIO_Interrupted; /* Counter for interrupt */
- int intr_tid; /* iointset return value */
- int TxEnSem; /* TxEnable Semaphore */
-
-
- struct Error RIOError; /* to Identify what went wrong */
- struct Conf RIOConf; /* Configuration ??? */
- struct ttystatics channel[RIO_PORTS]; /* channel information */
- char RIOBootPackets[1 + (SIXTY_FOUR_K / RTA_BOOT_DATA_SIZE)]
- [RTA_BOOT_DATA_SIZE];
- struct Map RIOConnectTable[TOTAL_MAP_ENTRIES];
- struct Map RIOSavedTable[TOTAL_MAP_ENTRIES];
-
- /* RTA to host binding table for master/slave operation */
- unsigned long RIOBindTab[MAX_RTA_BINDINGS];
- /* RTA memory dump variable */
- unsigned char RIOMemDump[MEMDUMP_SIZE];
- struct ModuleInfo RIOModuleTypes[MAX_MODULE_TYPES];
-
-};
-
-
-#ifdef linux
-#define debug(x) printk x
-#else
-#define debug(x) kkprintf x
-#endif
-
-
-
-#define RIO_RESET_INT 0x7d80
-
-#endif /* __riodrvr.h */
diff --git a/drivers/staging/generic_serial/rio/rioinfo.h b/drivers/staging/generic_serial/rio/rioinfo.h
deleted file mode 100644
index 42ff1e79d96..00000000000
--- a/drivers/staging/generic_serial/rio/rioinfo.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : rioinfo.h
-** SID : 1.2
-** Last Modified : 11/6/98 14:07:49
-** Retrieved : 11/6/98 14:07:50
-**
-** ident @(#)rioinfo.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rioinfo_h
-#define __rioinfo_h
-
-/*
-** Host card data structure
-*/
-struct RioHostInfo {
- long location; /* RIO Card Base I/O address */
- long vector; /* RIO Card IRQ vector */
- int bus; /* ISA/EISA/MCA/PCI */
- int mode; /* pointer to host mode - INTERRUPT / POLLED */
- struct old_sgttyb
- *Sg; /* pointer to default term characteristics */
-};
-
-
-/* Mode in rio device info */
-#define INTERRUPTED_MODE 0x01 /* Interrupt is generated */
-#define POLLED_MODE 0x02 /* No interrupt */
-#define AUTO_MODE 0x03 /* Auto mode */
-
-#define WORD_ACCESS_MODE 0x10 /* Word Access Mode */
-#define BYTE_ACCESS_MODE 0x20 /* Byte Access Mode */
-
-
-/* Bus type that RIO supports */
-#define ISA_BUS 0x01 /* The card is ISA */
-#define EISA_BUS 0x02 /* The card is EISA */
-#define MCA_BUS 0x04 /* The card is MCA */
-#define PCI_BUS 0x08 /* The card is PCI */
-
-/*
-** 11.11.1998 ARG - ESIL ???? part fix
-** Moved definition for 'CHAN' here from rioinfo.c (it is now
-** called 'DEF_TERM_CHARACTERISTICS').
-*/
-
-#define DEF_TERM_CHARACTERISTICS \
-{ \
- B19200, B19200, /* input and output speed */ \
- 'H' - '@', /* erase char */ \
- -1, /* 2nd erase char */ \
- 'U' - '@', /* kill char */ \
- ECHO | CRMOD, /* mode */ \
- 'C' - '@', /* interrupt character */ \
- '\\' - '@', /* quit char */ \
- 'Q' - '@', /* start char */ \
- 'S' - '@', /* stop char */ \
- 'D' - '@', /* EOF */ \
- -1, /* brk */ \
- (LCRTBS | LCRTERA | LCRTKIL | LCTLECH), /* local mode word */ \
- 'Z' - '@', /* process stop */ \
- 'Y' - '@', /* delayed stop */ \
- 'R' - '@', /* reprint line */ \
- 'O' - '@', /* flush output */ \
- 'W' - '@', /* word erase */ \
- 'V' - '@' /* literal next char */ \
-}
-
-#endif /* __rioinfo_h */
diff --git a/drivers/staging/generic_serial/rio/rioinit.c b/drivers/staging/generic_serial/rio/rioinit.c
deleted file mode 100644
index fb62b383f1d..00000000000
--- a/drivers/staging/generic_serial/rio/rioinit.c
+++ /dev/null
@@ -1,421 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : rioinit.c
-** SID : 1.3
-** Last Modified : 11/6/98 10:33:43
-** Retrieved : 11/6/98 10:33:49
-**
-** ident @(#)rioinit.c 1.3
-**
-** -----------------------------------------------------------------------------
-*/
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-
-#include <linux/termios.h>
-#include <linux/serial.h>
-
-#include <linux/generic_serial.h>
-
-
-#include "linux_compat.h"
-#include "pkt.h"
-#include "daemon.h"
-#include "rio.h"
-#include "riospace.h"
-#include "cmdpkt.h"
-#include "map.h"
-#include "rup.h"
-#include "port.h"
-#include "riodrvr.h"
-#include "rioinfo.h"
-#include "func.h"
-#include "errors.h"
-#include "pci.h"
-
-#include "parmmap.h"
-#include "unixrup.h"
-#include "board.h"
-#include "host.h"
-#include "phb.h"
-#include "link.h"
-#include "cmdblk.h"
-#include "route.h"
-#include "cirrus.h"
-#include "rioioctl.h"
-#include "rio_linux.h"
-
-int RIOPCIinit(struct rio_info *p, int Mode);
-
-static int RIOScrub(int, u8 __iomem *, int);
-
-
-/**
-** RIOAssignAT :
-**
-** Fill out the fields in the p->RIOHosts structure now we know we know
-** we have a board present.
-**
-** bits < 0 indicates 8 bit operation requested,
-** bits > 0 indicates 16 bit operation.
-*/
-
-int RIOAssignAT(struct rio_info *p, int Base, void __iomem *virtAddr, int mode)
-{
- int bits;
- struct DpRam __iomem *cardp = (struct DpRam __iomem *)virtAddr;
-
- if ((Base < ONE_MEG) || (mode & BYTE_ACCESS_MODE))
- bits = BYTE_OPERATION;
- else
- bits = WORD_OPERATION;
-
- /*
- ** Board has passed its scrub test. Fill in all the
- ** transient stuff.
- */
- p->RIOHosts[p->RIONumHosts].Caddr = virtAddr;
- p->RIOHosts[p->RIONumHosts].CardP = virtAddr;
-
- /*
- ** Revision 01 AT host cards don't support WORD operations,
- */
- if (readb(&cardp->DpRevision) == 01)
- bits = BYTE_OPERATION;
-
- p->RIOHosts[p->RIONumHosts].Type = RIO_AT;
- p->RIOHosts[p->RIONumHosts].Copy = rio_copy_to_card;
- /* set this later */
- p->RIOHosts[p->RIONumHosts].Slot = -1;
- p->RIOHosts[p->RIONumHosts].Mode = SLOW_LINKS | SLOW_AT_BUS | bits;
- writeb(BOOT_FROM_RAM | EXTERNAL_BUS_OFF | p->RIOHosts[p->RIONumHosts].Mode | INTERRUPT_DISABLE ,
- &p->RIOHosts[p->RIONumHosts].Control);
- writeb(0xFF, &p->RIOHosts[p->RIONumHosts].ResetInt);
- writeb(BOOT_FROM_RAM | EXTERNAL_BUS_OFF | p->RIOHosts[p->RIONumHosts].Mode | INTERRUPT_DISABLE,
- &p->RIOHosts[p->RIONumHosts].Control);
- writeb(0xFF, &p->RIOHosts[p->RIONumHosts].ResetInt);
- p->RIOHosts[p->RIONumHosts].UniqueNum =
- ((readb(&p->RIOHosts[p->RIONumHosts].Unique[0])&0xFF)<<0)|
- ((readb(&p->RIOHosts[p->RIONumHosts].Unique[1])&0xFF)<<8)|
- ((readb(&p->RIOHosts[p->RIONumHosts].Unique[2])&0xFF)<<16)|
- ((readb(&p->RIOHosts[p->RIONumHosts].Unique[3])&0xFF)<<24);
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Uniquenum 0x%x\n",p->RIOHosts[p->RIONumHosts].UniqueNum);
-
- p->RIONumHosts++;
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Tests Passed at 0x%x\n", Base);
- return(1);
-}
-
-static u8 val[] = {
-#ifdef VERY_LONG_TEST
- 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
- 0xa5, 0xff, 0x5a, 0x00, 0xff, 0xc9, 0x36,
-#endif
- 0xff, 0x00, 0x00 };
-
-#define TEST_END sizeof(val)
-
-/*
-** RAM test a board.
-** Nothing too complicated, just enough to check it out.
-*/
-int RIOBoardTest(unsigned long paddr, void __iomem *caddr, unsigned char type, int slot)
-{
- struct DpRam __iomem *DpRam = caddr;
- void __iomem *ram[4];
- int size[4];
- int op, bank;
- int nbanks;
-
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Reset host type=%d, DpRam=%p, slot=%d\n",
- type, DpRam, slot);
-
- RIOHostReset(type, DpRam, slot);
-
- /*
- ** Scrub the memory. This comes in several banks:
- ** DPsram1 - 7000h bytes
- ** DPsram2 - 200h bytes
- ** DPsram3 - 7000h bytes
- ** scratch - 1000h bytes
- */
-
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Setup ram/size arrays\n");
-
- size[0] = DP_SRAM1_SIZE;
- size[1] = DP_SRAM2_SIZE;
- size[2] = DP_SRAM3_SIZE;
- size[3] = DP_SCRATCH_SIZE;
-
- ram[0] = DpRam->DpSram1;
- ram[1] = DpRam->DpSram2;
- ram[2] = DpRam->DpSram3;
- nbanks = (type == RIO_PCI) ? 3 : 4;
- if (nbanks == 4)
- ram[3] = DpRam->DpScratch;
-
-
- if (nbanks == 3) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Memory: %p(0x%x), %p(0x%x), %p(0x%x)\n",
- ram[0], size[0], ram[1], size[1], ram[2], size[2]);
- } else {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: %p(0x%x), %p(0x%x), %p(0x%x), %p(0x%x)\n",
- ram[0], size[0], ram[1], size[1], ram[2], size[2], ram[3], size[3]);
- }
-
- /*
- ** This scrub operation will test for crosstalk between
- ** banks. TEST_END is a magic number, and relates to the offset
- ** within the 'val' array used by Scrub.
- */
- for (op=0; op<TEST_END; op++) {
- for (bank=0; bank<nbanks; bank++) {
- if (RIOScrub(op, ram[bank], size[bank]) == RIO_FAIL) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: RIOScrub band %d, op %d failed\n",
- bank, op);
- return RIO_FAIL;
- }
- }
- }
-
- rio_dprintk (RIO_DEBUG_INIT, "Test completed\n");
- return 0;
-}
-
-
-/*
-** Scrub an area of RAM.
-** Define PRETEST and POSTTEST for a more thorough checking of the
-** state of the memory.
-** Call with op set to an index into the above 'val' array to determine
-** which value will be written into memory.
-** Call with op set to zero means that the RAM will not be read and checked
-** before it is written.
-** Call with op not zero and the RAM will be read and compared with val[op-1]
-** to check that the data from the previous phase was retained.
-*/
-
-static int RIOScrub(int op, u8 __iomem *ram, int size)
-{
- int off;
- unsigned char oldbyte;
- unsigned char newbyte;
- unsigned char invbyte;
- unsigned short oldword;
- unsigned short newword;
- unsigned short invword;
- unsigned short swapword;
-
- if (op) {
- oldbyte = val[op-1];
- oldword = oldbyte | (oldbyte<<8);
- } else
- oldbyte = oldword = 0; /* Tell the compiler we've initilalized them. */
- newbyte = val[op];
- newword = newbyte | (newbyte<<8);
- invbyte = ~newbyte;
- invword = invbyte | (invbyte<<8);
-
- /*
- ** Check that the RAM contains the value that should have been left there
- ** by the previous test (not applicable for pass zero)
- */
- if (op) {
- for (off=0; off<size; off++) {
- if (readb(ram + off) != oldbyte) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Byte Pre Check 1: BYTE at offset 0x%x should have been=%x, was=%x\n", off, oldbyte, readb(ram + off));
- return RIO_FAIL;
- }
- }
- for (off=0; off<size; off+=2) {
- if (readw(ram + off) != oldword) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Word Pre Check: WORD at offset 0x%x should have been=%x, was=%x\n",off,oldword, readw(ram + off));
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Word Pre Check: BYTE at offset 0x%x is %x BYTE at offset 0x%x is %x\n", off, readb(ram + off), off+1, readb(ram+off+1));
- return RIO_FAIL;
- }
- }
- }
-
- /*
- ** Now write the INVERSE of the test data into every location, using
- ** BYTE write operations, first checking before each byte is written
- ** that the location contains the old value still, and checking after
- ** the write that the location contains the data specified - this is
- ** the BYTE read/write test.
- */
- for (off=0; off<size; off++) {
- if (op && (readb(ram + off) != oldbyte)) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Byte Pre Check 2: BYTE at offset 0x%x should have been=%x, was=%x\n", off, oldbyte, readb(ram + off));
- return RIO_FAIL;
- }
- writeb(invbyte, ram + off);
- if (readb(ram + off) != invbyte) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Byte Inv Check: BYTE at offset 0x%x should have been=%x, was=%x\n", off, invbyte, readb(ram + off));
- return RIO_FAIL;
- }
- }
-
- /*
- ** now, use WORD operations to write the test value into every location,
- ** check as before that the location contains the previous test value
- ** before overwriting, and that it contains the data value written
- ** afterwards.
- ** This is the WORD operation test.
- */
- for (off=0; off<size; off+=2) {
- if (readw(ram + off) != invword) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Word Inv Check: WORD at offset 0x%x should have been=%x, was=%x\n", off, invword, readw(ram + off));
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Word Inv Check: BYTE at offset 0x%x is %x BYTE at offset 0x%x is %x\n", off, readb(ram + off), off+1, readb(ram+off+1));
- return RIO_FAIL;
- }
-
- writew(newword, ram + off);
- if ( readw(ram + off) != newword ) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Post Word Check 1: WORD at offset 0x%x should have been=%x, was=%x\n", off, newword, readw(ram + off));
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Post Word Check 1: BYTE at offset 0x%x is %x BYTE at offset 0x%x is %x\n", off, readb(ram + off), off+1, readb(ram + off + 1));
- return RIO_FAIL;
- }
- }
-
- /*
- ** now run through the block of memory again, first in byte mode
- ** then in word mode, and check that all the locations contain the
- ** required test data.
- */
- for (off=0; off<size; off++) {
- if (readb(ram + off) != newbyte) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Post Byte Check: BYTE at offset 0x%x should have been=%x, was=%x\n", off, newbyte, readb(ram + off));
- return RIO_FAIL;
- }
- }
-
- for (off=0; off<size; off+=2) {
- if (readw(ram + off) != newword ) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Post Word Check 2: WORD at offset 0x%x should have been=%x, was=%x\n", off, newword, readw(ram + off));
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: Post Word Check 2: BYTE at offset 0x%x is %x BYTE at offset 0x%x is %x\n", off, readb(ram + off), off+1, readb(ram + off + 1));
- return RIO_FAIL;
- }
- }
-
- /*
- ** time to check out byte swapping errors
- */
- swapword = invbyte | (newbyte << 8);
-
- for (off=0; off<size; off+=2) {
- writeb(invbyte, &ram[off]);
- writeb(newbyte, &ram[off+1]);
- }
-
- for ( off=0; off<size; off+=2 ) {
- if (readw(ram + off) != swapword) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: SwapWord Check 1: WORD at offset 0x%x should have been=%x, was=%x\n", off, swapword, readw(ram + off));
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: SwapWord Check 1: BYTE at offset 0x%x is %x BYTE at offset 0x%x is %x\n", off, readb(ram + off), off+1, readb(ram + off + 1));
- return RIO_FAIL;
- }
- writew(~swapword, ram + off);
- }
-
- for (off=0; off<size; off+=2) {
- if (readb(ram + off) != newbyte) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: SwapWord Check 2: BYTE at offset 0x%x should have been=%x, was=%x\n", off, newbyte, readb(ram + off));
- return RIO_FAIL;
- }
- if (readb(ram + off + 1) != invbyte) {
- rio_dprintk (RIO_DEBUG_INIT, "RIO-init: SwapWord Check 2: BYTE at offset 0x%x should have been=%x, was=%x\n", off+1, invbyte, readb(ram + off + 1));
- return RIO_FAIL;
- }
- writew(newword, ram + off);
- }
- return 0;
-}
-
-
-int RIODefaultName(struct rio_info *p, struct Host *HostP, unsigned int UnitId)
-{
- memcpy(HostP->Mapping[UnitId].Name, "UNKNOWN RTA X-XX", 17);
- HostP->Mapping[UnitId].Name[12]='1'+(HostP-p->RIOHosts);
- if ((UnitId+1) > 9) {
- HostP->Mapping[UnitId].Name[14]='0'+((UnitId+1)/10);
- HostP->Mapping[UnitId].Name[15]='0'+((UnitId+1)%10);
- }
- else {
- HostP->Mapping[UnitId].Name[14]='1'+UnitId;
- HostP->Mapping[UnitId].Name[15]=0;
- }
- return 0;
-}
-
-#define RIO_RELEASE "Linux"
-#define RELEASE_ID "1.0"
-
-static struct rioVersion stVersion;
-
-struct rioVersion *RIOVersid(void)
-{
- strlcpy(stVersion.version, "RIO driver for linux V1.0",
- sizeof(stVersion.version));
- strlcpy(stVersion.buildDate, "Aug 15 2010",
- sizeof(stVersion.buildDate));
-
- return &stVersion;
-}
-
-void RIOHostReset(unsigned int Type, struct DpRam __iomem *DpRamP, unsigned int Slot)
-{
- /*
- ** Reset the Tpu
- */
- rio_dprintk (RIO_DEBUG_INIT, "RIOHostReset: type 0x%x", Type);
- switch ( Type ) {
- case RIO_AT:
- rio_dprintk (RIO_DEBUG_INIT, " (RIO_AT)\n");
- writeb(BOOT_FROM_RAM | EXTERNAL_BUS_OFF | INTERRUPT_DISABLE | BYTE_OPERATION |
- SLOW_LINKS | SLOW_AT_BUS, &DpRamP->DpControl);
- writeb(0xFF, &DpRamP->DpResetTpu);
- udelay(3);
- rio_dprintk (RIO_DEBUG_INIT, "RIOHostReset: Don't know if it worked. Try reset again\n");
- writeb(BOOT_FROM_RAM | EXTERNAL_BUS_OFF | INTERRUPT_DISABLE |
- BYTE_OPERATION | SLOW_LINKS | SLOW_AT_BUS, &DpRamP->DpControl);
- writeb(0xFF, &DpRamP->DpResetTpu);
- udelay(3);
- break;
- case RIO_PCI:
- rio_dprintk (RIO_DEBUG_INIT, " (RIO_PCI)\n");
- writeb(RIO_PCI_BOOT_FROM_RAM, &DpRamP->DpControl);
- writeb(0xFF, &DpRamP->DpResetInt);
- writeb(0xFF, &DpRamP->DpResetTpu);
- udelay(100);
- break;
- default:
- rio_dprintk (RIO_DEBUG_INIT, " (UNKNOWN)\n");
- break;
- }
- return;
-}
diff --git a/drivers/staging/generic_serial/rio/riointr.c b/drivers/staging/generic_serial/rio/riointr.c
deleted file mode 100644
index 2e71aecae20..00000000000
--- a/drivers/staging/generic_serial/rio/riointr.c
+++ /dev/null
@@ -1,645 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : riointr.c
-** SID : 1.2
-** Last Modified : 11/6/98 10:33:44
-** Retrieved : 11/6/98 10:33:49
-**
-** ident @(#)riointr.c 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-
-#include <linux/termios.h>
-#include <linux/serial.h>
-
-#include <linux/generic_serial.h>
-
-#include <linux/delay.h>
-
-#include "linux_compat.h"
-#include "rio_linux.h"
-#include "pkt.h"
-#include "daemon.h"
-#include "rio.h"
-#include "riospace.h"
-#include "cmdpkt.h"
-#include "map.h"
-#include "rup.h"
-#include "port.h"
-#include "riodrvr.h"
-#include "rioinfo.h"
-#include "func.h"
-#include "errors.h"
-#include "pci.h"
-
-#include "parmmap.h"
-#include "unixrup.h"
-#include "board.h"
-#include "host.h"
-#include "phb.h"
-#include "link.h"
-#include "cmdblk.h"
-#include "route.h"
-#include "cirrus.h"
-#include "rioioctl.h"
-
-
-static void RIOReceive(struct rio_info *, struct Port *);
-
-
-static char *firstchars(char *p, int nch)
-{
- static char buf[2][128];
- static int t = 0;
- t = !t;
- memcpy(buf[t], p, nch);
- buf[t][nch] = 0;
- return buf[t];
-}
-
-
-#define INCR( P, I ) ((P) = (((P)+(I)) & p->RIOBufferMask))
-/* Enable and start the transmission of packets */
-void RIOTxEnable(char *en)
-{
- struct Port *PortP;
- struct rio_info *p;
- struct tty_struct *tty;
- int c;
- struct PKT __iomem *PacketP;
- unsigned long flags;
-
- PortP = (struct Port *) en;
- p = (struct rio_info *) PortP->p;
- tty = PortP->gs.port.tty;
-
-
- rio_dprintk(RIO_DEBUG_INTR, "tx port %d: %d chars queued.\n", PortP->PortNum, PortP->gs.xmit_cnt);
-
- if (!PortP->gs.xmit_cnt)
- return;
-
-
- /* This routine is an order of magnitude simpler than the specialix
- version. One of the disadvantages is that this version will send
- an incomplete packet (usually 64 bytes instead of 72) once for
- every 4k worth of data. Let's just say that this won't influence
- performance significantly..... */
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- while (can_add_transmit(&PacketP, PortP)) {
- c = PortP->gs.xmit_cnt;
- if (c > PKT_MAX_DATA_LEN)
- c = PKT_MAX_DATA_LEN;
-
- /* Don't copy past the end of the source buffer */
- if (c > SERIAL_XMIT_SIZE - PortP->gs.xmit_tail)
- c = SERIAL_XMIT_SIZE - PortP->gs.xmit_tail;
-
- {
- int t;
- t = (c > 10) ? 10 : c;
-
- rio_dprintk(RIO_DEBUG_INTR, "rio: tx port %d: copying %d chars: %s - %s\n", PortP->PortNum, c, firstchars(PortP->gs.xmit_buf + PortP->gs.xmit_tail, t), firstchars(PortP->gs.xmit_buf + PortP->gs.xmit_tail + c - t, t));
- }
- /* If for one reason or another, we can't copy more data,
- we're done! */
- if (c == 0)
- break;
-
- rio_memcpy_toio(PortP->HostP->Caddr, PacketP->data, PortP->gs.xmit_buf + PortP->gs.xmit_tail, c);
- /* udelay (1); */
-
- writeb(c, &(PacketP->len));
- if (!(PortP->State & RIO_DELETED)) {
- add_transmit(PortP);
- /*
- ** Count chars tx'd for port statistics reporting
- */
- if (PortP->statsGather)
- PortP->txchars += c;
- }
- PortP->gs.xmit_tail = (PortP->gs.xmit_tail + c) & (SERIAL_XMIT_SIZE - 1);
- PortP->gs.xmit_cnt -= c;
- }
-
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
-
- if (PortP->gs.xmit_cnt <= (PortP->gs.wakeup_chars + 2 * PKT_MAX_DATA_LEN))
- tty_wakeup(PortP->gs.port.tty);
-
-}
-
-
-/*
-** RIO Host Service routine. Does all the work traditionally associated with an
-** interrupt.
-*/
-static int RupIntr;
-static int RxIntr;
-static int TxIntr;
-
-void RIOServiceHost(struct rio_info *p, struct Host *HostP)
-{
- rio_spin_lock(&HostP->HostLock);
- if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
- static int t = 0;
- rio_spin_unlock(&HostP->HostLock);
- if ((t++ % 200) == 0)
- rio_dprintk(RIO_DEBUG_INTR, "Interrupt but host not running. flags=%x.\n", (int) HostP->Flags);
- return;
- }
- rio_spin_unlock(&HostP->HostLock);
-
- if (readw(&HostP->ParmMapP->rup_intr)) {
- writew(0, &HostP->ParmMapP->rup_intr);
- p->RIORupCount++;
- RupIntr++;
- rio_dprintk(RIO_DEBUG_INTR, "rio: RUP interrupt on host %Zd\n", HostP - p->RIOHosts);
- RIOPollHostCommands(p, HostP);
- }
-
- if (readw(&HostP->ParmMapP->rx_intr)) {
- int port;
-
- writew(0, &HostP->ParmMapP->rx_intr);
- p->RIORxCount++;
- RxIntr++;
-
- rio_dprintk(RIO_DEBUG_INTR, "rio: RX interrupt on host %Zd\n", HostP - p->RIOHosts);
- /*
- ** Loop through every port. If the port is mapped into
- ** the system ( i.e. has /dev/ttyXXXX associated ) then it is
- ** worth checking. If the port isn't open, grab any packets
- ** hanging on its receive queue and stuff them on the free
- ** list; check for commands on the way.
- */
- for (port = p->RIOFirstPortsBooted; port < p->RIOLastPortsBooted + PORTS_PER_RTA; port++) {
- struct Port *PortP = p->RIOPortp[port];
- struct tty_struct *ttyP;
- struct PKT __iomem *PacketP;
-
- /*
- ** not mapped in - most of the RIOPortp[] information
- ** has not been set up!
- ** Optimise: ports come in bundles of eight.
- */
- if (!PortP->Mapped) {
- port += 7;
- continue; /* with the next port */
- }
-
- /*
- ** If the host board isn't THIS host board, check the next one.
- ** optimise: ports come in bundles of eight.
- */
- if (PortP->HostP != HostP) {
- port += 7;
- continue;
- }
-
- /*
- ** Let us see - is the port open? If not, then don't service it.
- */
- if (!(PortP->PortState & PORT_ISOPEN)) {
- continue;
- }
-
- /*
- ** find corresponding tty structure. The process of mapping
- ** the ports puts these here.
- */
- ttyP = PortP->gs.port.tty;
-
- /*
- ** Lock the port before we begin working on it.
- */
- rio_spin_lock(&PortP->portSem);
-
- /*
- ** Process received data if there is any.
- */
- if (can_remove_receive(&PacketP, PortP))
- RIOReceive(p, PortP);
-
- /*
- ** If there is no data left to be read from the port, and
- ** it's handshake bit is set, then we must clear the handshake,
- ** so that that downstream RTA is re-enabled.
- */
- if (!can_remove_receive(&PacketP, PortP) && (readw(&PortP->PhbP->handshake) == PHB_HANDSHAKE_SET)) {
- /*
- ** MAGIC! ( Basically, handshake the RX buffer, so that
- ** the RTAs upstream can be re-enabled. )
- */
- rio_dprintk(RIO_DEBUG_INTR, "Set RX handshake bit\n");
- writew(PHB_HANDSHAKE_SET | PHB_HANDSHAKE_RESET, &PortP->PhbP->handshake);
- }
- rio_spin_unlock(&PortP->portSem);
- }
- }
-
- if (readw(&HostP->ParmMapP->tx_intr)) {
- int port;
-
- writew(0, &HostP->ParmMapP->tx_intr);
-
- p->RIOTxCount++;
- TxIntr++;
- rio_dprintk(RIO_DEBUG_INTR, "rio: TX interrupt on host %Zd\n", HostP - p->RIOHosts);
-
- /*
- ** Loop through every port.
- ** If the port is mapped into the system ( i.e. has /dev/ttyXXXX
- ** associated ) then it is worth checking.
- */
- for (port = p->RIOFirstPortsBooted; port < p->RIOLastPortsBooted + PORTS_PER_RTA; port++) {
- struct Port *PortP = p->RIOPortp[port];
- struct tty_struct *ttyP;
- struct PKT __iomem *PacketP;
-
- /*
- ** not mapped in - most of the RIOPortp[] information
- ** has not been set up!
- */
- if (!PortP->Mapped) {
- port += 7;
- continue; /* with the next port */
- }
-
- /*
- ** If the host board isn't running, then its data structures
- ** are no use to us - continue quietly.
- */
- if (PortP->HostP != HostP) {
- port += 7;
- continue; /* with the next port */
- }
-
- /*
- ** Let us see - is the port open? If not, then don't service it.
- */
- if (!(PortP->PortState & PORT_ISOPEN)) {
- continue;
- }
-
- rio_dprintk(RIO_DEBUG_INTR, "rio: Looking into port %d.\n", port);
- /*
- ** Lock the port before we begin working on it.
- */
- rio_spin_lock(&PortP->portSem);
-
- /*
- ** If we can't add anything to the transmit queue, then
- ** we need do none of this processing.
- */
- if (!can_add_transmit(&PacketP, PortP)) {
- rio_dprintk(RIO_DEBUG_INTR, "Can't add to port, so skipping.\n");
- rio_spin_unlock(&PortP->portSem);
- continue;
- }
-
- /*
- ** find corresponding tty structure. The process of mapping
- ** the ports puts these here.
- */
- ttyP = PortP->gs.port.tty;
- /* If ttyP is NULL, the port is getting closed. Forget about it. */
- if (!ttyP) {
- rio_dprintk(RIO_DEBUG_INTR, "no tty, so skipping.\n");
- rio_spin_unlock(&PortP->portSem);
- continue;
- }
- /*
- ** If there is more room available we start up the transmit
- ** data process again. This can be direct I/O, if the cookmode
- ** is set to COOK_RAW or COOK_MEDIUM, or will be a call to the
- ** riotproc( T_OUTPUT ) if we are in COOK_WELL mode, to fetch
- ** characters via the line discipline. We must always call
- ** the line discipline,
- ** so that user input characters can be echoed correctly.
- **
- ** ++++ Update +++++
- ** With the advent of double buffering, we now see if
- ** TxBufferOut-In is non-zero. If so, then we copy a packet
- ** to the output place, and set it going. If this empties
- ** the buffer, then we must issue a wakeup( ) on OUT.
- ** If it frees space in the buffer then we must issue
- ** a wakeup( ) on IN.
- **
- ** ++++ Extra! Extra! If PortP->WflushFlag is set, then we
- ** have to send a WFLUSH command down the PHB, to mark the
- ** end point of a WFLUSH. We also need to clear out any
- ** data from the double buffer! ( note that WflushFlag is a
- ** *count* of the number of WFLUSH commands outstanding! )
- **
- ** ++++ And there's more!
- ** If an RTA is powered off, then on again, and rebooted,
- ** whilst it has ports open, then we need to re-open the ports.
- ** ( reasonable enough ). We can't do this when we spot the
- ** re-boot, in interrupt time, because the queue is probably
- ** full. So, when we come in here, we need to test if any
- ** ports are in this condition, and re-open the port before
- ** we try to send any more data to it. Now, the re-booted
- ** RTA will be discarding packets from the PHB until it
- ** receives this open packet, but don't worry tooo much
- ** about that. The one thing that is interesting is the
- ** combination of this effect and the WFLUSH effect!
- */
- /* For now don't handle RTA reboots. -- REW.
- Reenabled. Otherwise RTA reboots didn't work. Duh. -- REW */
- if (PortP->MagicFlags) {
- if (PortP->MagicFlags & MAGIC_REBOOT) {
- /*
- ** well, the RTA has been rebooted, and there is room
- ** on its queue to add the open packet that is required.
- **
- ** The messy part of this line is trying to decide if
- ** we need to call the Param function as a tty or as
- ** a modem.
- ** DONT USE CLOCAL AS A TEST FOR THIS!
- **
- ** If we can't param the port, then move on to the
- ** next port.
- */
- PortP->InUse = NOT_INUSE;
-
- rio_spin_unlock(&PortP->portSem);
- if (RIOParam(PortP, RIOC_OPEN, ((PortP->Cor2Copy & (RIOC_COR2_RTSFLOW | RIOC_COR2_CTSFLOW)) == (RIOC_COR2_RTSFLOW | RIOC_COR2_CTSFLOW)) ? 1 : 0, DONT_SLEEP) == RIO_FAIL)
- continue; /* with next port */
- rio_spin_lock(&PortP->portSem);
- PortP->MagicFlags &= ~MAGIC_REBOOT;
- }
-
- /*
- ** As mentioned above, this is a tacky hack to cope
- ** with WFLUSH
- */
- if (PortP->WflushFlag) {
- rio_dprintk(RIO_DEBUG_INTR, "Want to WFLUSH mark this port\n");
-
- if (PortP->InUse)
- rio_dprintk(RIO_DEBUG_INTR, "FAILS - PORT IS IN USE\n");
- }
-
- while (PortP->WflushFlag && can_add_transmit(&PacketP, PortP) && (PortP->InUse == NOT_INUSE)) {
- int p;
- struct PktCmd __iomem *PktCmdP;
-
- rio_dprintk(RIO_DEBUG_INTR, "Add WFLUSH marker to data queue\n");
- /*
- ** make it look just like a WFLUSH command
- */
- PktCmdP = (struct PktCmd __iomem *) &PacketP->data[0];
-
- writeb(RIOC_WFLUSH, &PktCmdP->Command);
-
- p = PortP->HostPort % (u16) PORTS_PER_RTA;
-
- /*
- ** If second block of ports for 16 port RTA, add 8
- ** to index 8-15.
- */
- if (PortP->SecondBlock)
- p += PORTS_PER_RTA;
-
- writeb(p, &PktCmdP->PhbNum);
-
- /*
- ** to make debuggery easier
- */
- writeb('W', &PacketP->data[2]);
- writeb('F', &PacketP->data[3]);
- writeb('L', &PacketP->data[4]);
- writeb('U', &PacketP->data[5]);
- writeb('S', &PacketP->data[6]);
- writeb('H', &PacketP->data[7]);
- writeb(' ', &PacketP->data[8]);
- writeb('0' + PortP->WflushFlag, &PacketP->data[9]);
- writeb(' ', &PacketP->data[10]);
- writeb(' ', &PacketP->data[11]);
- writeb('\0', &PacketP->data[12]);
-
- /*
- ** its two bytes long!
- */
- writeb(PKT_CMD_BIT | 2, &PacketP->len);
-
- /*
- ** queue it!
- */
- if (!(PortP->State & RIO_DELETED)) {
- add_transmit(PortP);
- /*
- ** Count chars tx'd for port statistics reporting
- */
- if (PortP->statsGather)
- PortP->txchars += 2;
- }
-
- if (--(PortP->WflushFlag) == 0) {
- PortP->MagicFlags &= ~MAGIC_FLUSH;
- }
-
- rio_dprintk(RIO_DEBUG_INTR, "Wflush count now stands at %d\n", PortP->WflushFlag);
- }
- if (PortP->MagicFlags & MORE_OUTPUT_EYGOR) {
- if (PortP->MagicFlags & MAGIC_FLUSH) {
- PortP->MagicFlags |= MORE_OUTPUT_EYGOR;
- } else {
- if (!can_add_transmit(&PacketP, PortP)) {
- rio_spin_unlock(&PortP->portSem);
- continue;
- }
- rio_spin_unlock(&PortP->portSem);
- RIOTxEnable((char *) PortP);
- rio_spin_lock(&PortP->portSem);
- PortP->MagicFlags &= ~MORE_OUTPUT_EYGOR;
- }
- }
- }
-
-
- /*
- ** If we can't add anything to the transmit queue, then
- ** we need do none of the remaining processing.
- */
- if (!can_add_transmit(&PacketP, PortP)) {
- rio_spin_unlock(&PortP->portSem);
- continue;
- }
-
- rio_spin_unlock(&PortP->portSem);
- RIOTxEnable((char *) PortP);
- }
- }
-}
-
-/*
-** Routine for handling received data for tty drivers
-*/
-static void RIOReceive(struct rio_info *p, struct Port *PortP)
-{
- struct tty_struct *TtyP;
- unsigned short transCount;
- struct PKT __iomem *PacketP;
- register unsigned int DataCnt;
- unsigned char __iomem *ptr;
- unsigned char *buf;
- int copied = 0;
-
- static int intCount, RxIntCnt;
-
- /*
- ** The receive data process is to remove packets from the
- ** PHB until there aren't any more or the current cblock
- ** is full. When this occurs, there will be some left over
- ** data in the packet, that we must do something with.
- ** As we haven't unhooked the packet from the read list
- ** yet, we can just leave the packet there, having first
- ** made a note of how far we got. This means that we need
- ** a pointer per port saying where we start taking the
- ** data from - this will normally be zero, but when we
- ** run out of space it will be set to the offset of the
- ** next byte to copy from the packet data area. The packet
- ** length field is decremented by the number of bytes that
- ** we successfully removed from the packet. When this reaches
- ** zero, we reset the offset pointer to be zero, and free
- ** the packet from the front of the queue.
- */
-
- intCount++;
-
- TtyP = PortP->gs.port.tty;
- if (!TtyP) {
- rio_dprintk(RIO_DEBUG_INTR, "RIOReceive: tty is null. \n");
- return;
- }
-
- if (PortP->State & RIO_THROTTLE_RX) {
- rio_dprintk(RIO_DEBUG_INTR, "RIOReceive: Throttled. Can't handle more input.\n");
- return;
- }
-
- if (PortP->State & RIO_DELETED) {
- while (can_remove_receive(&PacketP, PortP)) {
- remove_receive(PortP);
- put_free_end(PortP->HostP, PacketP);
- }
- } else {
- /*
- ** loop, just so long as:
- ** i ) there's some data ( i.e. can_remove_receive )
- ** ii ) we haven't been blocked
- ** iii ) there's somewhere to put the data
- ** iv ) we haven't outstayed our welcome
- */
- transCount = 1;
- while (can_remove_receive(&PacketP, PortP)
- && transCount) {
- RxIntCnt++;
-
- /*
- ** check that it is not a command!
- */
- if (readb(&PacketP->len) & PKT_CMD_BIT) {
- rio_dprintk(RIO_DEBUG_INTR, "RIO: unexpected command packet received on PHB\n");
- /* rio_dprint(RIO_DEBUG_INTR, (" sysport = %d\n", p->RIOPortp->PortNum)); */
- rio_dprintk(RIO_DEBUG_INTR, " dest_unit = %d\n", readb(&PacketP->dest_unit));
- rio_dprintk(RIO_DEBUG_INTR, " dest_port = %d\n", readb(&PacketP->dest_port));
- rio_dprintk(RIO_DEBUG_INTR, " src_unit = %d\n", readb(&PacketP->src_unit));
- rio_dprintk(RIO_DEBUG_INTR, " src_port = %d\n", readb(&PacketP->src_port));
- rio_dprintk(RIO_DEBUG_INTR, " len = %d\n", readb(&PacketP->len));
- rio_dprintk(RIO_DEBUG_INTR, " control = %d\n", readb(&PacketP->control));
- rio_dprintk(RIO_DEBUG_INTR, " csum = %d\n", readw(&PacketP->csum));
- rio_dprintk(RIO_DEBUG_INTR, " data bytes: ");
- for (DataCnt = 0; DataCnt < PKT_MAX_DATA_LEN; DataCnt++)
- rio_dprintk(RIO_DEBUG_INTR, "%d\n", readb(&PacketP->data[DataCnt]));
- remove_receive(PortP);
- put_free_end(PortP->HostP, PacketP);
- continue; /* with next packet */
- }
-
- /*
- ** How many characters can we move 'upstream' ?
- **
- ** Determine the minimum of the amount of data
- ** available and the amount of space in which to
- ** put it.
- **
- ** 1. Get the packet length by masking 'len'
- ** for only the length bits.
- ** 2. Available space is [buffer size] - [space used]
- **
- ** Transfer count is the minimum of packet length
- ** and available space.
- */
-
- transCount = tty_buffer_request_room(TtyP, readb(&PacketP->len) & PKT_LEN_MASK);
- rio_dprintk(RIO_DEBUG_REC, "port %d: Copy %d bytes\n", PortP->PortNum, transCount);
- /*
- ** To use the following 'kkprintfs' for debugging - change the '#undef'
- ** to '#define', (this is the only place ___DEBUG_IT___ occurs in the
- ** driver).
- */
- ptr = (unsigned char __iomem *) PacketP->data + PortP->RxDataStart;
-
- tty_prepare_flip_string(TtyP, &buf, transCount);
- rio_memcpy_fromio(buf, ptr, transCount);
- PortP->RxDataStart += transCount;
- writeb(readb(&PacketP->len)-transCount, &PacketP->len);
- copied += transCount;
-
-
-
- if (readb(&PacketP->len) == 0) {
- /*
- ** If we have emptied the packet, then we can
- ** free it, and reset the start pointer for
- ** the next packet.
- */
- remove_receive(PortP);
- put_free_end(PortP->HostP, PacketP);
- PortP->RxDataStart = 0;
- }
- }
- }
- if (copied) {
- rio_dprintk(RIO_DEBUG_REC, "port %d: pushing tty flip buffer: %d total bytes copied.\n", PortP->PortNum, copied);
- tty_flip_buffer_push(TtyP);
- }
-
- return;
-}
-
diff --git a/drivers/staging/generic_serial/rio/rioioctl.h b/drivers/staging/generic_serial/rio/rioioctl.h
deleted file mode 100644
index e8af5b30519..00000000000
--- a/drivers/staging/generic_serial/rio/rioioctl.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : rioioctl.h
-** SID : 1.2
-** Last Modified : 11/6/98 11:34:13
-** Retrieved : 11/6/98 11:34:22
-**
-** ident @(#)rioioctl.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rioioctl_h__
-#define __rioioctl_h__
-
-/*
-** RIO device driver - user ioctls and associated structures.
-*/
-
-struct portStats {
- int port;
- int gather;
- unsigned long txchars;
- unsigned long rxchars;
- unsigned long opens;
- unsigned long closes;
- unsigned long ioctls;
-};
-
-#define RIOC ('R'<<8)|('i'<<16)|('o'<<24)
-
-#define RIO_QUICK_CHECK (RIOC | 105)
-#define RIO_GATHER_PORT_STATS (RIOC | 193)
-#define RIO_RESET_PORT_STATS (RIOC | 194)
-#define RIO_GET_PORT_STATS (RIOC | 195)
-
-#endif /* __rioioctl_h__ */
diff --git a/drivers/staging/generic_serial/rio/rioparam.c b/drivers/staging/generic_serial/rio/rioparam.c
deleted file mode 100644
index 6415f3f32a7..00000000000
--- a/drivers/staging/generic_serial/rio/rioparam.c
+++ /dev/null
@@ -1,663 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : rioparam.c
-** SID : 1.3
-** Last Modified : 11/6/98 10:33:45
-** Retrieved : 11/6/98 10:33:50
-**
-** ident @(#)rioparam.c 1.3
-**
-** -----------------------------------------------------------------------------
-*/
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-
-#include <linux/termios.h>
-#include <linux/serial.h>
-
-#include <linux/generic_serial.h>
-
-
-#include "linux_compat.h"
-#include "rio_linux.h"
-#include "pkt.h"
-#include "daemon.h"
-#include "rio.h"
-#include "riospace.h"
-#include "cmdpkt.h"
-#include "map.h"
-#include "rup.h"
-#include "port.h"
-#include "riodrvr.h"
-#include "rioinfo.h"
-#include "func.h"
-#include "errors.h"
-#include "pci.h"
-
-#include "parmmap.h"
-#include "unixrup.h"
-#include "board.h"
-#include "host.h"
-#include "phb.h"
-#include "link.h"
-#include "cmdblk.h"
-#include "route.h"
-#include "cirrus.h"
-#include "rioioctl.h"
-#include "param.h"
-
-
-
-/*
-** The Scam, based on email from jeremyr@bugs.specialix.co.uk....
-**
-** To send a command on a particular port, you put a packet with the
-** command bit set onto the port. The command bit is in the len field,
-** and gets ORed in with the actual byte count.
-**
-** When you send a packet with the command bit set the first
-** data byte (data[0]) is interpreted as the command to execute.
-** It also governs what data structure overlay should accompany the packet.
-** Commands are defined in cirrus/cirrus.h
-**
-** If you want the command to pre-emt data already on the queue for the
-** port, set the pre-emptive bit in conjunction with the command bit.
-** It is not defined what will happen if you set the preemptive bit
-** on a packet that is NOT a command.
-**
-** Pre-emptive commands should be queued at the head of the queue using
-** add_start(), whereas normal commands and data are enqueued using
-** add_end().
-**
-** Most commands do not use the remaining bytes in the data array. The
-** exceptions are OPEN MOPEN and CONFIG. (NB. As with the SI CONFIG and
-** OPEN are currently analogous). With these three commands the following
-** 11 data bytes are all used to pass config information such as baud rate etc.
-** The fields are also defined in cirrus.h. Some contain straightforward
-** information such as the transmit XON character. Two contain the transmit and
-** receive baud rates respectively. For most baud rates there is a direct
-** mapping between the rates defined in <sys/termio.h> and the byte in the
-** packet. There are additional (non UNIX-standard) rates defined in
-** /u/dos/rio/cirrus/h/brates.h.
-**
-** The rest of the data fields contain approximations to the Cirrus registers
-** that are used to program number of bits etc. Each registers bit fields is
-** defined in cirrus.h.
-**
-** NB. Only use those bits that are defined as being driver specific
-** or common to the RTA and the driver.
-**
-** All commands going from RTA->Host will be dealt with by the Host code - you
-** will never see them. As with the SI there will be three fields to look out
-** for in each phb (not yet defined - needs defining a.s.a.p).
-**
-** modem_status - current state of handshake pins.
-**
-** port_status - current port status - equivalent to hi_stat for SI, indicates
-** if port is IDLE_OPEN, IDLE_CLOSED etc.
-**
-** break_status - bit X set if break has been received.
-**
-** Happy hacking.
-**
-*/
-
-/*
-** RIOParam is used to open or configure a port. You pass it a PortP,
-** which will have a tty struct attached to it. You also pass a command,
-** either OPEN or CONFIG. The port's setup is taken from the t_ fields
-** of the tty struct inside the PortP, and the port is either opened
-** or re-configured. You must also tell RIOParam if the device is a modem
-** device or not (i.e. top bit of minor number set or clear - take special
-** care when deciding on this!).
-** RIOParam neither flushes nor waits for drain, and is NOT preemptive.
-**
-** RIOParam assumes it will be called at splrio(), and also assumes
-** that CookMode is set correctly in the port structure.
-**
-** NB. for MPX
-** tty lock must NOT have been previously acquired.
-*/
-int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
-{
- struct tty_struct *TtyP;
- int retval;
- struct phb_param __iomem *phb_param_ptr;
- struct PKT __iomem *PacketP;
- int res;
- u8 Cor1 = 0, Cor2 = 0, Cor4 = 0, Cor5 = 0;
- u8 TxXon = 0, TxXoff = 0, RxXon = 0, RxXoff = 0;
- u8 LNext = 0, TxBaud = 0, RxBaud = 0;
- int retries = 0xff;
- unsigned long flags;
-
- func_enter();
-
- TtyP = PortP->gs.port.tty;
-
- rio_dprintk(RIO_DEBUG_PARAM, "RIOParam: Port:%d cmd:%d Modem:%d SleepFlag:%d Mapped: %d, tty=%p\n", PortP->PortNum, cmd, Modem, SleepFlag, PortP->Mapped, TtyP);
-
- if (!TtyP) {
- rio_dprintk(RIO_DEBUG_PARAM, "Can't call rioparam with null tty.\n");
-
- func_exit();
-
- return RIO_FAIL;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- if (cmd == RIOC_OPEN) {
- /*
- ** If the port is set to store or lock the parameters, and it is
- ** paramed with OPEN, we want to restore the saved port termio, but
- ** only if StoredTermio has been saved, i.e. NOT 1st open after reboot.
- */
- }
-
- /*
- ** wait for space
- */
- while (!(res = can_add_transmit(&PacketP, PortP)) || (PortP->InUse != NOT_INUSE)) {
- if (retries-- <= 0) {
- break;
- }
- if (PortP->InUse != NOT_INUSE) {
- rio_dprintk(RIO_DEBUG_PARAM, "Port IN_USE for pre-emptive command\n");
- }
-
- if (!res) {
- rio_dprintk(RIO_DEBUG_PARAM, "Port has no space on transmit queue\n");
- }
-
- if (SleepFlag != OK_TO_SLEEP) {
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- func_exit();
-
- return RIO_FAIL;
- }
-
- rio_dprintk(RIO_DEBUG_PARAM, "wait for can_add_transmit\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- retval = RIODelay(PortP, HUNDRED_MS);
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- if (retval == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_PARAM, "wait for can_add_transmit broken by signal\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- func_exit();
- return -EINTR;
- }
- if (PortP->State & RIO_DELETED) {
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- func_exit();
- return 0;
- }
- }
-
- if (!res) {
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- func_exit();
-
- return RIO_FAIL;
- }
-
- rio_dprintk(RIO_DEBUG_PARAM, "can_add_transmit() returns %x\n", res);
- rio_dprintk(RIO_DEBUG_PARAM, "Packet is %p\n", PacketP);
-
- phb_param_ptr = (struct phb_param __iomem *) PacketP->data;
-
-
- switch (TtyP->termios->c_cflag & CSIZE) {
- case CS5:
- {
- rio_dprintk(RIO_DEBUG_PARAM, "5 bit data\n");
- Cor1 |= RIOC_COR1_5BITS;
- break;
- }
- case CS6:
- {
- rio_dprintk(RIO_DEBUG_PARAM, "6 bit data\n");
- Cor1 |= RIOC_COR1_6BITS;
- break;
- }
- case CS7:
- {
- rio_dprintk(RIO_DEBUG_PARAM, "7 bit data\n");
- Cor1 |= RIOC_COR1_7BITS;
- break;
- }
- case CS8:
- {
- rio_dprintk(RIO_DEBUG_PARAM, "8 bit data\n");
- Cor1 |= RIOC_COR1_8BITS;
- break;
- }
- }
-
- if (TtyP->termios->c_cflag & CSTOPB) {
- rio_dprintk(RIO_DEBUG_PARAM, "2 stop bits\n");
- Cor1 |= RIOC_COR1_2STOP;
- } else {
- rio_dprintk(RIO_DEBUG_PARAM, "1 stop bit\n");
- Cor1 |= RIOC_COR1_1STOP;
- }
-
- if (TtyP->termios->c_cflag & PARENB) {
- rio_dprintk(RIO_DEBUG_PARAM, "Enable parity\n");
- Cor1 |= RIOC_COR1_NORMAL;
- } else {
- rio_dprintk(RIO_DEBUG_PARAM, "Disable parity\n");
- Cor1 |= RIOC_COR1_NOP;
- }
- if (TtyP->termios->c_cflag & PARODD) {
- rio_dprintk(RIO_DEBUG_PARAM, "Odd parity\n");
- Cor1 |= RIOC_COR1_ODD;
- } else {
- rio_dprintk(RIO_DEBUG_PARAM, "Even parity\n");
- Cor1 |= RIOC_COR1_EVEN;
- }
-
- /*
- ** COR 2
- */
- if (TtyP->termios->c_iflag & IXON) {
- rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop output control\n");
- Cor2 |= RIOC_COR2_IXON;
- } else {
- if (PortP->Config & RIO_IXON) {
- rio_dprintk(RIO_DEBUG_PARAM, "Force enable start/stop output control\n");
- Cor2 |= RIOC_COR2_IXON;
- } else
- rio_dprintk(RIO_DEBUG_PARAM, "IXON has been disabled.\n");
- }
-
- if (TtyP->termios->c_iflag & IXANY) {
- if (PortP->Config & RIO_IXANY) {
- rio_dprintk(RIO_DEBUG_PARAM, "Enable any key to restart output\n");
- Cor2 |= RIOC_COR2_IXANY;
- } else
- rio_dprintk(RIO_DEBUG_PARAM, "IXANY has been disabled due to sanity reasons.\n");
- }
-
- if (TtyP->termios->c_iflag & IXOFF) {
- rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop input control 2\n");
- Cor2 |= RIOC_COR2_IXOFF;
- }
-
- if (TtyP->termios->c_cflag & HUPCL) {
- rio_dprintk(RIO_DEBUG_PARAM, "Hangup on last close\n");
- Cor2 |= RIOC_COR2_HUPCL;
- }
-
- if (C_CRTSCTS(TtyP)) {
- rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control enabled\n");
- Cor2 |= RIOC_COR2_CTSFLOW;
- Cor2 |= RIOC_COR2_RTSFLOW;
- } else {
- rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control disabled\n");
- Cor2 &= ~RIOC_COR2_CTSFLOW;
- Cor2 &= ~RIOC_COR2_RTSFLOW;
- }
-
-
- if (TtyP->termios->c_cflag & CLOCAL) {
- rio_dprintk(RIO_DEBUG_PARAM, "Local line\n");
- } else {
- rio_dprintk(RIO_DEBUG_PARAM, "Possible Modem line\n");
- }
-
- /*
- ** COR 4 (there is no COR 3)
- */
- if (TtyP->termios->c_iflag & IGNBRK) {
- rio_dprintk(RIO_DEBUG_PARAM, "Ignore break condition\n");
- Cor4 |= RIOC_COR4_IGNBRK;
- }
- if (!(TtyP->termios->c_iflag & BRKINT)) {
- rio_dprintk(RIO_DEBUG_PARAM, "Break generates NULL condition\n");
- Cor4 |= RIOC_COR4_NBRKINT;
- } else {
- rio_dprintk(RIO_DEBUG_PARAM, "Interrupt on break condition\n");
- }
-
- if (TtyP->termios->c_iflag & INLCR) {
- rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage return on input\n");
- Cor4 |= RIOC_COR4_INLCR;
- }
-
- if (TtyP->termios->c_iflag & IGNCR) {
- rio_dprintk(RIO_DEBUG_PARAM, "Ignore carriage return on input\n");
- Cor4 |= RIOC_COR4_IGNCR;
- }
-
- if (TtyP->termios->c_iflag & ICRNL) {
- rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on input\n");
- Cor4 |= RIOC_COR4_ICRNL;
- }
- if (TtyP->termios->c_iflag & IGNPAR) {
- rio_dprintk(RIO_DEBUG_PARAM, "Ignore characters with parity errors\n");
- Cor4 |= RIOC_COR4_IGNPAR;
- }
- if (TtyP->termios->c_iflag & PARMRK) {
- rio_dprintk(RIO_DEBUG_PARAM, "Mark parity errors\n");
- Cor4 |= RIOC_COR4_PARMRK;
- }
-
- /*
- ** Set the RAISEMOD flag to ensure that the modem lines are raised
- ** on reception of a config packet.
- ** The download code handles the zero baud condition.
- */
- Cor4 |= RIOC_COR4_RAISEMOD;
-
- /*
- ** COR 5
- */
-
- Cor5 = RIOC_COR5_CMOE;
-
- /*
- ** Set to monitor tbusy/tstop (or not).
- */
-
- if (PortP->MonitorTstate)
- Cor5 |= RIOC_COR5_TSTATE_ON;
- else
- Cor5 |= RIOC_COR5_TSTATE_OFF;
-
- /*
- ** Could set LNE here if you wanted LNext processing. SVR4 will use it.
- */
- if (TtyP->termios->c_iflag & ISTRIP) {
- rio_dprintk(RIO_DEBUG_PARAM, "Strip input characters\n");
- if (!(PortP->State & RIO_TRIAD_MODE)) {
- Cor5 |= RIOC_COR5_ISTRIP;
- }
- }
-
- if (TtyP->termios->c_oflag & ONLCR) {
- rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage-return, newline on output\n");
- if (PortP->CookMode == COOK_MEDIUM)
- Cor5 |= RIOC_COR5_ONLCR;
- }
- if (TtyP->termios->c_oflag & OCRNL) {
- rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on output\n");
- if (PortP->CookMode == COOK_MEDIUM)
- Cor5 |= RIOC_COR5_OCRNL;
- }
- if ((TtyP->termios->c_oflag & TABDLY) == TAB3) {
- rio_dprintk(RIO_DEBUG_PARAM, "Tab delay 3 set\n");
- if (PortP->CookMode == COOK_MEDIUM)
- Cor5 |= RIOC_COR5_TAB3;
- }
-
- /*
- ** Flow control bytes.
- */
- TxXon = TtyP->termios->c_cc[VSTART];
- TxXoff = TtyP->termios->c_cc[VSTOP];
- RxXon = TtyP->termios->c_cc[VSTART];
- RxXoff = TtyP->termios->c_cc[VSTOP];
- /*
- ** LNEXT byte
- */
- LNext = 0;
-
- /*
- ** Baud rate bytes
- */
- rio_dprintk(RIO_DEBUG_PARAM, "Mapping of rx/tx baud %x (%x)\n", TtyP->termios->c_cflag, CBAUD);
-
- switch (TtyP->termios->c_cflag & CBAUD) {
-#define e(b) case B ## b : RxBaud = TxBaud = RIO_B ## b ;break
- e(50);
- e(75);
- e(110);
- e(134);
- e(150);
- e(200);
- e(300);
- e(600);
- e(1200);
- e(1800);
- e(2400);
- e(4800);
- e(9600);
- e(19200);
- e(38400);
- e(57600);
- e(115200); /* e(230400);e(460800); e(921600); */
- }
-
- rio_dprintk(RIO_DEBUG_PARAM, "tx baud 0x%x, rx baud 0x%x\n", TxBaud, RxBaud);
-
-
- /*
- ** Leftovers
- */
- if (TtyP->termios->c_cflag & CREAD)
- rio_dprintk(RIO_DEBUG_PARAM, "Enable receiver\n");
-#ifdef RCV1EN
- if (TtyP->termios->c_cflag & RCV1EN)
- rio_dprintk(RIO_DEBUG_PARAM, "RCV1EN (?)\n");
-#endif
-#ifdef XMT1EN
- if (TtyP->termios->c_cflag & XMT1EN)
- rio_dprintk(RIO_DEBUG_PARAM, "XMT1EN (?)\n");
-#endif
- if (TtyP->termios->c_lflag & ISIG)
- rio_dprintk(RIO_DEBUG_PARAM, "Input character signal generating enabled\n");
- if (TtyP->termios->c_lflag & ICANON)
- rio_dprintk(RIO_DEBUG_PARAM, "Canonical input: erase and kill enabled\n");
- if (TtyP->termios->c_lflag & XCASE)
- rio_dprintk(RIO_DEBUG_PARAM, "Canonical upper/lower presentation\n");
- if (TtyP->termios->c_lflag & ECHO)
- rio_dprintk(RIO_DEBUG_PARAM, "Enable input echo\n");
- if (TtyP->termios->c_lflag & ECHOE)
- rio_dprintk(RIO_DEBUG_PARAM, "Enable echo erase\n");
- if (TtyP->termios->c_lflag & ECHOK)
- rio_dprintk(RIO_DEBUG_PARAM, "Enable echo kill\n");
- if (TtyP->termios->c_lflag & ECHONL)
- rio_dprintk(RIO_DEBUG_PARAM, "Enable echo newline\n");
- if (TtyP->termios->c_lflag & NOFLSH)
- rio_dprintk(RIO_DEBUG_PARAM, "Disable flush after interrupt or quit\n");
-#ifdef TOSTOP
- if (TtyP->termios->c_lflag & TOSTOP)
- rio_dprintk(RIO_DEBUG_PARAM, "Send SIGTTOU for background output\n");
-#endif
-#ifdef XCLUDE
- if (TtyP->termios->c_lflag & XCLUDE)
- rio_dprintk(RIO_DEBUG_PARAM, "Exclusive use of this line\n");
-#endif
- if (TtyP->termios->c_iflag & IUCLC)
- rio_dprintk(RIO_DEBUG_PARAM, "Map uppercase to lowercase on input\n");
- if (TtyP->termios->c_oflag & OPOST)
- rio_dprintk(RIO_DEBUG_PARAM, "Enable output post-processing\n");
- if (TtyP->termios->c_oflag & OLCUC)
- rio_dprintk(RIO_DEBUG_PARAM, "Map lowercase to uppercase on output\n");
- if (TtyP->termios->c_oflag & ONOCR)
- rio_dprintk(RIO_DEBUG_PARAM, "No carriage return output at column 0\n");
- if (TtyP->termios->c_oflag & ONLRET)
- rio_dprintk(RIO_DEBUG_PARAM, "Newline performs carriage return function\n");
- if (TtyP->termios->c_oflag & OFILL)
- rio_dprintk(RIO_DEBUG_PARAM, "Use fill characters for delay\n");
- if (TtyP->termios->c_oflag & OFDEL)
- rio_dprintk(RIO_DEBUG_PARAM, "Fill character is DEL\n");
- if (TtyP->termios->c_oflag & NLDLY)
- rio_dprintk(RIO_DEBUG_PARAM, "Newline delay set\n");
- if (TtyP->termios->c_oflag & CRDLY)
- rio_dprintk(RIO_DEBUG_PARAM, "Carriage return delay set\n");
- if (TtyP->termios->c_oflag & TABDLY)
- rio_dprintk(RIO_DEBUG_PARAM, "Tab delay set\n");
- /*
- ** These things are kind of useful in a later life!
- */
- PortP->Cor2Copy = Cor2;
-
- if (PortP->State & RIO_DELETED) {
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- func_exit();
-
- return RIO_FAIL;
- }
-
- /*
- ** Actually write the info into the packet to be sent
- */
- writeb(cmd, &phb_param_ptr->Cmd);
- writeb(Cor1, &phb_param_ptr->Cor1);
- writeb(Cor2, &phb_param_ptr->Cor2);
- writeb(Cor4, &phb_param_ptr->Cor4);
- writeb(Cor5, &phb_param_ptr->Cor5);
- writeb(TxXon, &phb_param_ptr->TxXon);
- writeb(RxXon, &phb_param_ptr->RxXon);
- writeb(TxXoff, &phb_param_ptr->TxXoff);
- writeb(RxXoff, &phb_param_ptr->RxXoff);
- writeb(LNext, &phb_param_ptr->LNext);
- writeb(TxBaud, &phb_param_ptr->TxBaud);
- writeb(RxBaud, &phb_param_ptr->RxBaud);
-
- /*
- ** Set the length/command field
- */
- writeb(12 | PKT_CMD_BIT, &PacketP->len);
-
- /*
- ** The packet is formed - now, whack it off
- ** to its final destination:
- */
- add_transmit(PortP);
- /*
- ** Count characters transmitted for port statistics reporting
- */
- if (PortP->statsGather)
- PortP->txchars += 12;
-
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
-
- rio_dprintk(RIO_DEBUG_PARAM, "add_transmit returned.\n");
- /*
- ** job done.
- */
- func_exit();
-
- return 0;
-}
-
-
-/*
-** We can add another packet to a transmit queue if the packet pointer pointed
-** to by the TxAdd pointer has PKT_IN_USE clear in its address.
-*/
-int can_add_transmit(struct PKT __iomem **PktP, struct Port *PortP)
-{
- struct PKT __iomem *tp;
-
- *PktP = tp = (struct PKT __iomem *) RIO_PTR(PortP->Caddr, readw(PortP->TxAdd));
-
- return !((unsigned long) tp & PKT_IN_USE);
-}
-
-/*
-** To add a packet to the queue, you set the PKT_IN_USE bit in the address,
-** and then move the TxAdd pointer along one position to point to the next
-** packet pointer. You must wrap the pointer from the end back to the start.
-*/
-void add_transmit(struct Port *PortP)
-{
- if (readw(PortP->TxAdd) & PKT_IN_USE) {
- rio_dprintk(RIO_DEBUG_PARAM, "add_transmit: Packet has been stolen!");
- }
- writew(readw(PortP->TxAdd) | PKT_IN_USE, PortP->TxAdd);
- PortP->TxAdd = (PortP->TxAdd == PortP->TxEnd) ? PortP->TxStart : PortP->TxAdd + 1;
- writew(RIO_OFF(PortP->Caddr, PortP->TxAdd), &PortP->PhbP->tx_add);
-}
-
-/****************************************
- * Put a packet onto the end of the
- * free list
- ****************************************/
-void put_free_end(struct Host *HostP, struct PKT __iomem *PktP)
-{
- struct rio_free_list __iomem *tmp_pointer;
- unsigned short old_end, new_end;
- unsigned long flags;
-
- rio_spin_lock_irqsave(&HostP->HostLock, flags);
-
- /*************************************************
- * Put a packet back onto the back of the free list
- *
- ************************************************/
-
- rio_dprintk(RIO_DEBUG_PFE, "put_free_end(PktP=%p)\n", PktP);
-
- if ((old_end = readw(&HostP->ParmMapP->free_list_end)) != TPNULL) {
- new_end = RIO_OFF(HostP->Caddr, PktP);
- tmp_pointer = (struct rio_free_list __iomem *) RIO_PTR(HostP->Caddr, old_end);
- writew(new_end, &tmp_pointer->next);
- writew(old_end, &((struct rio_free_list __iomem *) PktP)->prev);
- writew(TPNULL, &((struct rio_free_list __iomem *) PktP)->next);
- writew(new_end, &HostP->ParmMapP->free_list_end);
- } else { /* First packet on the free list this should never happen! */
- rio_dprintk(RIO_DEBUG_PFE, "put_free_end(): This should never happen\n");
- writew(RIO_OFF(HostP->Caddr, PktP), &HostP->ParmMapP->free_list_end);
- tmp_pointer = (struct rio_free_list __iomem *) PktP;
- writew(TPNULL, &tmp_pointer->prev);
- writew(TPNULL, &tmp_pointer->next);
- }
- rio_dprintk(RIO_DEBUG_CMD, "Before unlock: %p\n", &HostP->HostLock);
- rio_spin_unlock_irqrestore(&HostP->HostLock, flags);
-}
-
-/*
-** can_remove_receive(PktP,P) returns non-zero if PKT_IN_USE is set
-** for the next packet on the queue. It will also set PktP to point to the
-** relevant packet, [having cleared the PKT_IN_USE bit]. If PKT_IN_USE is clear,
-** then can_remove_receive() returns 0.
-*/
-int can_remove_receive(struct PKT __iomem **PktP, struct Port *PortP)
-{
- if (readw(PortP->RxRemove) & PKT_IN_USE) {
- *PktP = (struct PKT __iomem *) RIO_PTR(PortP->Caddr, readw(PortP->RxRemove) & ~PKT_IN_USE);
- return 1;
- }
- return 0;
-}
-
-/*
-** To remove a packet from the receive queue you clear its PKT_IN_USE bit,
-** and then bump the pointers. Once the pointers get to the end, they must
-** be wrapped back to the start.
-*/
-void remove_receive(struct Port *PortP)
-{
- writew(readw(PortP->RxRemove) & ~PKT_IN_USE, PortP->RxRemove);
- PortP->RxRemove = (PortP->RxRemove == PortP->RxEnd) ? PortP->RxStart : PortP->RxRemove + 1;
- writew(RIO_OFF(PortP->Caddr, PortP->RxRemove), &PortP->PhbP->rx_remove);
-}
diff --git a/drivers/staging/generic_serial/rio/rioroute.c b/drivers/staging/generic_serial/rio/rioroute.c
deleted file mode 100644
index 8757378e832..00000000000
--- a/drivers/staging/generic_serial/rio/rioroute.c
+++ /dev/null
@@ -1,1039 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : rioroute.c
-** SID : 1.3
-** Last Modified : 11/6/98 10:33:46
-** Retrieved : 11/6/98 10:33:50
-**
-** ident @(#)rioroute.c 1.3
-**
-** -----------------------------------------------------------------------------
-*/
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-
-#include <linux/termios.h>
-#include <linux/serial.h>
-
-#include <linux/generic_serial.h>
-
-
-#include "linux_compat.h"
-#include "rio_linux.h"
-#include "pkt.h"
-#include "daemon.h"
-#include "rio.h"
-#include "riospace.h"
-#include "cmdpkt.h"
-#include "map.h"
-#include "rup.h"
-#include "port.h"
-#include "riodrvr.h"
-#include "rioinfo.h"
-#include "func.h"
-#include "errors.h"
-#include "pci.h"
-
-#include "parmmap.h"
-#include "unixrup.h"
-#include "board.h"
-#include "host.h"
-#include "phb.h"
-#include "link.h"
-#include "cmdblk.h"
-#include "route.h"
-#include "cirrus.h"
-#include "rioioctl.h"
-#include "param.h"
-
-static int RIOCheckIsolated(struct rio_info *, struct Host *, unsigned int);
-static int RIOIsolate(struct rio_info *, struct Host *, unsigned int);
-static int RIOCheck(struct Host *, unsigned int);
-static void RIOConCon(struct rio_info *, struct Host *, unsigned int, unsigned int, unsigned int, unsigned int, int);
-
-
-/*
-** Incoming on the ROUTE_RUP
-** I wrote this while I was tired. Forgive me.
-*/
-int RIORouteRup(struct rio_info *p, unsigned int Rup, struct Host *HostP, struct PKT __iomem * PacketP)
-{
- struct PktCmd __iomem *PktCmdP = (struct PktCmd __iomem *) PacketP->data;
- struct PktCmd_M *PktReplyP;
- struct CmdBlk *CmdBlkP;
- struct Port *PortP;
- struct Map *MapP;
- struct Top *TopP;
- int ThisLink, ThisLinkMin, ThisLinkMax;
- int port;
- int Mod, Mod1, Mod2;
- unsigned short RtaType;
- unsigned int RtaUniq;
- unsigned int ThisUnit, ThisUnit2; /* 2 ids to accommodate 16 port RTA */
- unsigned int OldUnit, NewUnit, OldLink, NewLink;
- char *MyType, *MyName;
- int Lies;
- unsigned long flags;
-
- /*
- ** Is this unit telling us it's current link topology?
- */
- if (readb(&PktCmdP->Command) == ROUTE_TOPOLOGY) {
- MapP = HostP->Mapping;
-
- /*
- ** The packet can be sent either by the host or by an RTA.
- ** If it comes from the host, then we need to fill in the
- ** Topology array in the host structure. If it came in
- ** from an RTA then we need to fill in the Mapping structure's
- ** Topology array for the unit.
- */
- if (Rup >= (unsigned short) MAX_RUP) {
- ThisUnit = HOST_ID;
- TopP = HostP->Topology;
- MyType = "Host";
- MyName = HostP->Name;
- ThisLinkMin = ThisLinkMax = Rup - MAX_RUP;
- } else {
- ThisUnit = Rup + 1;
- TopP = HostP->Mapping[Rup].Topology;
- MyType = "RTA";
- MyName = HostP->Mapping[Rup].Name;
- ThisLinkMin = 0;
- ThisLinkMax = LINKS_PER_UNIT - 1;
- }
-
- /*
- ** Lies will not be tolerated.
- ** If any pair of links claim to be connected to the same
- ** place, then ignore this packet completely.
- */
- Lies = 0;
- for (ThisLink = ThisLinkMin + 1; ThisLink <= ThisLinkMax; ThisLink++) {
- /*
- ** it won't lie about network interconnect, total disconnects
- ** and no-IDs. (or at least, it doesn't *matter* if it does)
- */
- if (readb(&PktCmdP->RouteTopology[ThisLink].Unit) > (unsigned short) MAX_RUP)
- continue;
-
- for (NewLink = ThisLinkMin; NewLink < ThisLink; NewLink++) {
- if ((readb(&PktCmdP->RouteTopology[ThisLink].Unit) == readb(&PktCmdP->RouteTopology[NewLink].Unit)) && (readb(&PktCmdP->RouteTopology[ThisLink].Link) == readb(&PktCmdP->RouteTopology[NewLink].Link))) {
- Lies++;
- }
- }
- }
-
- if (Lies) {
- rio_dprintk(RIO_DEBUG_ROUTE, "LIES! DAMN LIES! %d LIES!\n", Lies);
- rio_dprintk(RIO_DEBUG_ROUTE, "%d:%c %d:%c %d:%c %d:%c\n",
- readb(&PktCmdP->RouteTopology[0].Unit),
- 'A' + readb(&PktCmdP->RouteTopology[0].Link),
- readb(&PktCmdP->RouteTopology[1].Unit),
- 'A' + readb(&PktCmdP->RouteTopology[1].Link), readb(&PktCmdP->RouteTopology[2].Unit), 'A' + readb(&PktCmdP->RouteTopology[2].Link), readb(&PktCmdP->RouteTopology[3].Unit), 'A' + readb(&PktCmdP->RouteTopology[3].Link));
- return 1;
- }
-
- /*
- ** now, process each link.
- */
- for (ThisLink = ThisLinkMin; ThisLink <= ThisLinkMax; ThisLink++) {
- /*
- ** this is what it was connected to
- */
- OldUnit = TopP[ThisLink].Unit;
- OldLink = TopP[ThisLink].Link;
-
- /*
- ** this is what it is now connected to
- */
- NewUnit = readb(&PktCmdP->RouteTopology[ThisLink].Unit);
- NewLink = readb(&PktCmdP->RouteTopology[ThisLink].Link);
-
- if (OldUnit != NewUnit || OldLink != NewLink) {
- /*
- ** something has changed!
- */
-
- if (NewUnit > MAX_RUP && NewUnit != ROUTE_DISCONNECT && NewUnit != ROUTE_NO_ID && NewUnit != ROUTE_INTERCONNECT) {
- rio_dprintk(RIO_DEBUG_ROUTE, "I have a link from %s %s to unit %d:%d - I don't like it.\n", MyType, MyName, NewUnit, NewLink);
- } else {
- /*
- ** put the new values in
- */
- TopP[ThisLink].Unit = NewUnit;
- TopP[ThisLink].Link = NewLink;
-
- RIOSetChange(p);
-
- if (OldUnit <= MAX_RUP) {
- /*
- ** If something has become bust, then re-enable them messages
- */
- if (!p->RIONoMessage)
- RIOConCon(p, HostP, ThisUnit, ThisLink, OldUnit, OldLink, DISCONNECT);
- }
-
- if ((NewUnit <= MAX_RUP) && !p->RIONoMessage)
- RIOConCon(p, HostP, ThisUnit, ThisLink, NewUnit, NewLink, CONNECT);
-
- if (NewUnit == ROUTE_NO_ID)
- rio_dprintk(RIO_DEBUG_ROUTE, "%s %s (%c) is connected to an unconfigured unit.\n", MyType, MyName, 'A' + ThisLink);
-
- if (NewUnit == ROUTE_INTERCONNECT) {
- if (!p->RIONoMessage)
- printk(KERN_DEBUG "rio: %s '%s' (%c) is connected to another network.\n", MyType, MyName, 'A' + ThisLink);
- }
-
- /*
- ** perform an update for 'the other end', so that these messages
- ** only appears once. Only disconnect the other end if it is pointing
- ** at us!
- */
- if (OldUnit == HOST_ID) {
- if (HostP->Topology[OldLink].Unit == ThisUnit && HostP->Topology[OldLink].Link == ThisLink) {
- rio_dprintk(RIO_DEBUG_ROUTE, "SETTING HOST (%c) TO DISCONNECTED!\n", OldLink + 'A');
- HostP->Topology[OldLink].Unit = ROUTE_DISCONNECT;
- HostP->Topology[OldLink].Link = NO_LINK;
- } else {
- rio_dprintk(RIO_DEBUG_ROUTE, "HOST(%c) WAS NOT CONNECTED TO %s (%c)!\n", OldLink + 'A', HostP->Mapping[ThisUnit - 1].Name, ThisLink + 'A');
- }
- } else if (OldUnit <= MAX_RUP) {
- if (HostP->Mapping[OldUnit - 1].Topology[OldLink].Unit == ThisUnit && HostP->Mapping[OldUnit - 1].Topology[OldLink].Link == ThisLink) {
- rio_dprintk(RIO_DEBUG_ROUTE, "SETTING RTA %s (%c) TO DISCONNECTED!\n", HostP->Mapping[OldUnit - 1].Name, OldLink + 'A');
- HostP->Mapping[OldUnit - 1].Topology[OldLink].Unit = ROUTE_DISCONNECT;
- HostP->Mapping[OldUnit - 1].Topology[OldLink].Link = NO_LINK;
- } else {
- rio_dprintk(RIO_DEBUG_ROUTE, "RTA %s (%c) WAS NOT CONNECTED TO %s (%c)\n", HostP->Mapping[OldUnit - 1].Name, OldLink + 'A', HostP->Mapping[ThisUnit - 1].Name, ThisLink + 'A');
- }
- }
- if (NewUnit == HOST_ID) {
- rio_dprintk(RIO_DEBUG_ROUTE, "MARKING HOST (%c) CONNECTED TO %s (%c)\n", NewLink + 'A', MyName, ThisLink + 'A');
- HostP->Topology[NewLink].Unit = ThisUnit;
- HostP->Topology[NewLink].Link = ThisLink;
- } else if (NewUnit <= MAX_RUP) {
- rio_dprintk(RIO_DEBUG_ROUTE, "MARKING RTA %s (%c) CONNECTED TO %s (%c)\n", HostP->Mapping[NewUnit - 1].Name, NewLink + 'A', MyName, ThisLink + 'A');
- HostP->Mapping[NewUnit - 1].Topology[NewLink].Unit = ThisUnit;
- HostP->Mapping[NewUnit - 1].Topology[NewLink].Link = ThisLink;
- }
- }
- RIOSetChange(p);
- RIOCheckIsolated(p, HostP, OldUnit);
- }
- }
- return 1;
- }
-
- /*
- ** The only other command we recognise is a route_request command
- */
- if (readb(&PktCmdP->Command) != ROUTE_REQUEST) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Unknown command %d received on rup %d host %p ROUTE_RUP\n", readb(&PktCmdP->Command), Rup, HostP);
- return 1;
- }
-
- RtaUniq = (readb(&PktCmdP->UniqNum[0])) + (readb(&PktCmdP->UniqNum[1]) << 8) + (readb(&PktCmdP->UniqNum[2]) << 16) + (readb(&PktCmdP->UniqNum[3]) << 24);
-
- /*
- ** Determine if 8 or 16 port RTA
- */
- RtaType = GetUnitType(RtaUniq);
-
- rio_dprintk(RIO_DEBUG_ROUTE, "Received a request for an ID for serial number %x\n", RtaUniq);
-
- Mod = readb(&PktCmdP->ModuleTypes);
- Mod1 = LONYBLE(Mod);
- if (RtaType == TYPE_RTA16) {
- /*
- ** Only one ident is set for a 16 port RTA. To make compatible
- ** with 8 port, set 2nd ident in Mod2 to the same as Mod1.
- */
- Mod2 = Mod1;
- rio_dprintk(RIO_DEBUG_ROUTE, "Backplane type is %s (all ports)\n", p->RIOModuleTypes[Mod1].Name);
- } else {
- Mod2 = HINYBLE(Mod);
- rio_dprintk(RIO_DEBUG_ROUTE, "Module types are %s (ports 0-3) and %s (ports 4-7)\n", p->RIOModuleTypes[Mod1].Name, p->RIOModuleTypes[Mod2].Name);
- }
-
- /*
- ** try to unhook a command block from the command free list.
- */
- if (!(CmdBlkP = RIOGetCmdBlk())) {
- rio_dprintk(RIO_DEBUG_ROUTE, "No command blocks to route RTA! come back later.\n");
- return 0;
- }
-
- /*
- ** Fill in the default info on the command block
- */
- CmdBlkP->Packet.dest_unit = Rup;
- CmdBlkP->Packet.dest_port = ROUTE_RUP;
- CmdBlkP->Packet.src_unit = HOST_ID;
- CmdBlkP->Packet.src_port = ROUTE_RUP;
- CmdBlkP->Packet.len = PKT_CMD_BIT | 1;
- CmdBlkP->PreFuncP = CmdBlkP->PostFuncP = NULL;
- PktReplyP = (struct PktCmd_M *) CmdBlkP->Packet.data;
-
- if (!RIOBootOk(p, HostP, RtaUniq)) {
- rio_dprintk(RIO_DEBUG_ROUTE, "RTA %x tried to get an ID, but does not belong - FOAD it!\n", RtaUniq);
- PktReplyP->Command = ROUTE_FOAD;
- memcpy(PktReplyP->CommandText, "RT_FOAD", 7);
- RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
- return 1;
- }
-
- /*
- ** Check to see if the RTA is configured for this host
- */
- for (ThisUnit = 0; ThisUnit < MAX_RUP; ThisUnit++) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Entry %d Flags=%s %s UniqueNum=0x%x\n",
- ThisUnit, HostP->Mapping[ThisUnit].Flags & SLOT_IN_USE ? "Slot-In-Use" : "Not In Use", HostP->Mapping[ThisUnit].Flags & SLOT_TENTATIVE ? "Slot-Tentative" : "Not Tentative", HostP->Mapping[ThisUnit].RtaUniqueNum);
-
- /*
- ** We have an entry for it.
- */
- if ((HostP->Mapping[ThisUnit].Flags & (SLOT_IN_USE | SLOT_TENTATIVE)) && (HostP->Mapping[ThisUnit].RtaUniqueNum == RtaUniq)) {
- if (RtaType == TYPE_RTA16) {
- ThisUnit2 = HostP->Mapping[ThisUnit].ID2 - 1;
- rio_dprintk(RIO_DEBUG_ROUTE, "Found unit 0x%x at slots %d+%d\n", RtaUniq, ThisUnit, ThisUnit2);
- } else
- rio_dprintk(RIO_DEBUG_ROUTE, "Found unit 0x%x at slot %d\n", RtaUniq, ThisUnit);
- /*
- ** If we have no knowledge of booting it, then the host has
- ** been re-booted, and so we must kill the RTA, so that it
- ** will be booted again (potentially with new bins)
- ** and it will then re-ask for an ID, which we will service.
- */
- if ((HostP->Mapping[ThisUnit].Flags & SLOT_IN_USE) && !(HostP->Mapping[ThisUnit].Flags & RTA_BOOTED)) {
- if (!(HostP->Mapping[ThisUnit].Flags & MSG_DONE)) {
- if (!p->RIONoMessage)
- printk(KERN_DEBUG "rio: RTA '%s' is being updated.\n", HostP->Mapping[ThisUnit].Name);
- HostP->Mapping[ThisUnit].Flags |= MSG_DONE;
- }
- PktReplyP->Command = ROUTE_FOAD;
- memcpy(PktReplyP->CommandText, "RT_FOAD", 7);
- RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
- return 1;
- }
-
- /*
- ** Send the ID (entry) to this RTA. The ID number is implicit as
- ** the offset into the table. It is worth noting at this stage
- ** that offset zero in the table contains the entries for the
- ** RTA with ID 1!!!!
- */
- PktReplyP->Command = ROUTE_ALLOCATE;
- PktReplyP->IDNum = ThisUnit + 1;
- if (RtaType == TYPE_RTA16) {
- if (HostP->Mapping[ThisUnit].Flags & SLOT_IN_USE)
- /*
- ** Adjust the phb and tx pkt dest_units for 2nd block of 8
- ** only if the RTA has ports associated (SLOT_IN_USE)
- */
- RIOFixPhbs(p, HostP, ThisUnit2);
- PktReplyP->IDNum2 = ThisUnit2 + 1;
- rio_dprintk(RIO_DEBUG_ROUTE, "RTA '%s' has been allocated IDs %d+%d\n", HostP->Mapping[ThisUnit].Name, PktReplyP->IDNum, PktReplyP->IDNum2);
- } else {
- PktReplyP->IDNum2 = ROUTE_NO_ID;
- rio_dprintk(RIO_DEBUG_ROUTE, "RTA '%s' has been allocated ID %d\n", HostP->Mapping[ThisUnit].Name, PktReplyP->IDNum);
- }
- memcpy(PktReplyP->CommandText, "RT_ALLOCAT", 10);
-
- RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
-
- /*
- ** If this is a freshly booted RTA, then we need to re-open
- ** the ports, if any where open, so that data may once more
- ** flow around the system!
- */
- if ((HostP->Mapping[ThisUnit].Flags & RTA_NEWBOOT) && (HostP->Mapping[ThisUnit].SysPort != NO_PORT)) {
- /*
- ** look at the ports associated with this beast and
- ** see if any where open. If they was, then re-open
- ** them, using the info from the tty flags.
- */
- for (port = 0; port < PORTS_PER_RTA; port++) {
- PortP = p->RIOPortp[port + HostP->Mapping[ThisUnit].SysPort];
- if (PortP->State & (RIO_MOPEN | RIO_LOPEN)) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Re-opened this port\n");
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->MagicFlags |= MAGIC_REBOOT;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- }
- }
- if (RtaType == TYPE_RTA16) {
- for (port = 0; port < PORTS_PER_RTA; port++) {
- PortP = p->RIOPortp[port + HostP->Mapping[ThisUnit2].SysPort];
- if (PortP->State & (RIO_MOPEN | RIO_LOPEN)) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Re-opened this port\n");
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->MagicFlags |= MAGIC_REBOOT;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- }
- }
- }
- }
-
- /*
- ** keep a copy of the module types!
- */
- HostP->UnixRups[ThisUnit].ModTypes = Mod;
- if (RtaType == TYPE_RTA16)
- HostP->UnixRups[ThisUnit2].ModTypes = Mod;
-
- /*
- ** If either of the modules on this unit is read-only or write-only
- ** or none-xprint, then we need to transfer that info over to the
- ** relevant ports.
- */
- if (HostP->Mapping[ThisUnit].SysPort != NO_PORT) {
- for (port = 0; port < PORTS_PER_MODULE; port++) {
- p->RIOPortp[port + HostP->Mapping[ThisUnit].SysPort]->Config &= ~RIO_NOMASK;
- p->RIOPortp[port + HostP->Mapping[ThisUnit].SysPort]->Config |= p->RIOModuleTypes[Mod1].Flags[port];
- p->RIOPortp[port + PORTS_PER_MODULE + HostP->Mapping[ThisUnit].SysPort]->Config &= ~RIO_NOMASK;
- p->RIOPortp[port + PORTS_PER_MODULE + HostP->Mapping[ThisUnit].SysPort]->Config |= p->RIOModuleTypes[Mod2].Flags[port];
- }
- if (RtaType == TYPE_RTA16) {
- for (port = 0; port < PORTS_PER_MODULE; port++) {
- p->RIOPortp[port + HostP->Mapping[ThisUnit2].SysPort]->Config &= ~RIO_NOMASK;
- p->RIOPortp[port + HostP->Mapping[ThisUnit2].SysPort]->Config |= p->RIOModuleTypes[Mod1].Flags[port];
- p->RIOPortp[port + PORTS_PER_MODULE + HostP->Mapping[ThisUnit2].SysPort]->Config &= ~RIO_NOMASK;
- p->RIOPortp[port + PORTS_PER_MODULE + HostP->Mapping[ThisUnit2].SysPort]->Config |= p->RIOModuleTypes[Mod2].Flags[port];
- }
- }
- }
-
- /*
- ** Job done, get on with the interrupts!
- */
- return 1;
- }
- }
- /*
- ** There is no table entry for this RTA at all.
- **
- ** Lets check to see if we actually booted this unit - if not,
- ** then we reset it and it will go round the loop of being booted
- ** we can then worry about trying to fit it into the table.
- */
- for (ThisUnit = 0; ThisUnit < HostP->NumExtraBooted; ThisUnit++)
- if (HostP->ExtraUnits[ThisUnit] == RtaUniq)
- break;
- if (ThisUnit == HostP->NumExtraBooted && ThisUnit != MAX_EXTRA_UNITS) {
- /*
- ** if the unit wasn't in the table, and the table wasn't full, then
- ** we reset the unit, because we didn't boot it.
- ** However, if the table is full, it could be that we did boot
- ** this unit, and so we won't reboot it, because it isn't really
- ** all that disastrous to keep the old bins in most cases. This
- ** is a rather tacky feature, but we are on the edge of reallity
- ** here, because the implication is that someone has connected
- ** 16+MAX_EXTRA_UNITS onto one host.
- */
- static int UnknownMesgDone = 0;
-
- if (!UnknownMesgDone) {
- if (!p->RIONoMessage)
- printk(KERN_DEBUG "rio: One or more unknown RTAs are being updated.\n");
- UnknownMesgDone = 1;
- }
-
- PktReplyP->Command = ROUTE_FOAD;
- memcpy(PktReplyP->CommandText, "RT_FOAD", 7);
- } else {
- /*
- ** we did boot it (as an extra), and there may now be a table
- ** slot free (because of a delete), so we will try to make
- ** a tentative entry for it, so that the configurator can see it
- ** and fill in the details for us.
- */
- if (RtaType == TYPE_RTA16) {
- if (RIOFindFreeID(p, HostP, &ThisUnit, &ThisUnit2) == 0) {
- RIODefaultName(p, HostP, ThisUnit);
- rio_fill_host_slot(ThisUnit, ThisUnit2, RtaUniq, HostP);
- }
- } else {
- if (RIOFindFreeID(p, HostP, &ThisUnit, NULL) == 0) {
- RIODefaultName(p, HostP, ThisUnit);
- rio_fill_host_slot(ThisUnit, 0, RtaUniq, HostP);
- }
- }
- PktReplyP->Command = ROUTE_USED;
- memcpy(PktReplyP->CommandText, "RT_USED", 7);
- }
- RIOQueueCmdBlk(HostP, Rup, CmdBlkP);
- return 1;
-}
-
-
-void RIOFixPhbs(struct rio_info *p, struct Host *HostP, unsigned int unit)
-{
- unsigned short link, port;
- struct Port *PortP;
- unsigned long flags;
- int PortN = HostP->Mapping[unit].SysPort;
-
- rio_dprintk(RIO_DEBUG_ROUTE, "RIOFixPhbs unit %d sysport %d\n", unit, PortN);
-
- if (PortN != -1) {
- unsigned short dest_unit = HostP->Mapping[unit].ID2;
-
- /*
- ** Get the link number used for the 1st 8 phbs on this unit.
- */
- PortP = p->RIOPortp[HostP->Mapping[dest_unit - 1].SysPort];
-
- link = readw(&PortP->PhbP->link);
-
- for (port = 0; port < PORTS_PER_RTA; port++, PortN++) {
- unsigned short dest_port = port + 8;
- u16 __iomem *TxPktP;
- struct PKT __iomem *Pkt;
-
- PortP = p->RIOPortp[PortN];
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- /*
- ** If RTA is not powered on, the tx packets will be
- ** unset, so go no further.
- */
- if (!PortP->TxStart) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Tx pkts not set up yet\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- break;
- }
-
- /*
- ** For the second slot of a 16 port RTA, the driver needs to
- ** sort out the phb to port mappings. The dest_unit for this
- ** group of 8 phbs is set to the dest_unit of the accompanying
- ** 8 port block. The dest_port of the second unit is set to
- ** be in the range 8-15 (i.e. 8 is added). Thus, for a 16 port
- ** RTA with IDs 5 and 6, traffic bound for port 6 of unit 6
- ** (being the second map ID) will be sent to dest_unit 5, port
- ** 14. When this RTA is deleted, dest_unit for ID 6 will be
- ** restored, and the dest_port will be reduced by 8.
- ** Transmit packets also have a destination field which needs
- ** adjusting in the same manner.
- ** Note that the unit/port bytes in 'dest' are swapped.
- ** We also need to adjust the phb and rup link numbers for the
- ** second block of 8 ttys.
- */
- for (TxPktP = PortP->TxStart; TxPktP <= PortP->TxEnd; TxPktP++) {
- /*
- ** *TxPktP is the pointer to the transmit packet on the host
- ** card. This needs to be translated into a 32 bit pointer
- ** so it can be accessed from the driver.
- */
- Pkt = (struct PKT __iomem *) RIO_PTR(HostP->Caddr, readw(TxPktP));
-
- /*
- ** If the packet is used, reset it.
- */
- Pkt = (struct PKT __iomem *) ((unsigned long) Pkt & ~PKT_IN_USE);
- writeb(dest_unit, &Pkt->dest_unit);
- writeb(dest_port, &Pkt->dest_port);
- }
- rio_dprintk(RIO_DEBUG_ROUTE, "phb dest: Old %x:%x New %x:%x\n", readw(&PortP->PhbP->destination) & 0xff, (readw(&PortP->PhbP->destination) >> 8) & 0xff, dest_unit, dest_port);
- writew(dest_unit + (dest_port << 8), &PortP->PhbP->destination);
- writew(link, &PortP->PhbP->link);
-
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- }
- /*
- ** Now make sure the range of ports to be serviced includes
- ** the 2nd 8 on this 16 port RTA.
- */
- if (link > 3)
- return;
- if (((unit * 8) + 7) > readw(&HostP->LinkStrP[link].last_port)) {
- rio_dprintk(RIO_DEBUG_ROUTE, "last port on host link %d: %d\n", link, (unit * 8) + 7);
- writew((unit * 8) + 7, &HostP->LinkStrP[link].last_port);
- }
- }
-}
-
-/*
-** Check to see if the new disconnection has isolated this unit.
-** If it has, then invalidate all its link information, and tell
-** the world about it. This is done to ensure that the configurator
-** only gets up-to-date information about what is going on.
-*/
-static int RIOCheckIsolated(struct rio_info *p, struct Host *HostP, unsigned int UnitId)
-{
- unsigned long flags;
- rio_spin_lock_irqsave(&HostP->HostLock, flags);
-
- if (RIOCheck(HostP, UnitId)) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Unit %d is NOT isolated\n", UnitId);
- rio_spin_unlock_irqrestore(&HostP->HostLock, flags);
- return (0);
- }
-
- RIOIsolate(p, HostP, UnitId);
- RIOSetChange(p);
- rio_spin_unlock_irqrestore(&HostP->HostLock, flags);
- return 1;
-}
-
-/*
-** Invalidate all the link interconnectivity of this unit, and of
-** all the units attached to it. This will mean that the entire
-** subnet will re-introduce itself.
-*/
-static int RIOIsolate(struct rio_info *p, struct Host *HostP, unsigned int UnitId)
-{
- unsigned int link, unit;
-
- UnitId--; /* this trick relies on the Unit Id being UNSIGNED! */
-
- if (UnitId >= MAX_RUP) /* dontcha just lurv unsigned maths! */
- return (0);
-
- if (HostP->Mapping[UnitId].Flags & BEEN_HERE)
- return (0);
-
- HostP->Mapping[UnitId].Flags |= BEEN_HERE;
-
- if (p->RIOPrintDisabled == DO_PRINT)
- rio_dprintk(RIO_DEBUG_ROUTE, "RIOMesgIsolated %s", HostP->Mapping[UnitId].Name);
-
- for (link = 0; link < LINKS_PER_UNIT; link++) {
- unit = HostP->Mapping[UnitId].Topology[link].Unit;
- HostP->Mapping[UnitId].Topology[link].Unit = ROUTE_DISCONNECT;
- HostP->Mapping[UnitId].Topology[link].Link = NO_LINK;
- RIOIsolate(p, HostP, unit);
- }
- HostP->Mapping[UnitId].Flags &= ~BEEN_HERE;
- return 1;
-}
-
-static int RIOCheck(struct Host *HostP, unsigned int UnitId)
-{
- unsigned char link;
-
-/* rio_dprint(RIO_DEBUG_ROUTE, ("Check to see if unit %d has a route to the host\n",UnitId)); */
- rio_dprintk(RIO_DEBUG_ROUTE, "RIOCheck : UnitID = %d\n", UnitId);
-
- if (UnitId == HOST_ID) {
- /* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d is NOT isolated - it IS the host!\n", UnitId)); */
- return 1;
- }
-
- UnitId--;
-
- if (UnitId >= MAX_RUP) {
- /* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d - ignored.\n", UnitId)); */
- return 0;
- }
-
- for (link = 0; link < LINKS_PER_UNIT; link++) {
- if (HostP->Mapping[UnitId].Topology[link].Unit == HOST_ID) {
- /* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d is connected directly to host via link (%c).\n",
- UnitId, 'A'+link)); */
- return 1;
- }
- }
-
- if (HostP->Mapping[UnitId].Flags & BEEN_HERE) {
- /* rio_dprint(RIO_DEBUG_ROUTE, ("Been to Unit %d before - ignoring\n", UnitId)); */
- return 0;
- }
-
- HostP->Mapping[UnitId].Flags |= BEEN_HERE;
-
- for (link = 0; link < LINKS_PER_UNIT; link++) {
- /* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d check link (%c)\n", UnitId,'A'+link)); */
- if (RIOCheck(HostP, HostP->Mapping[UnitId].Topology[link].Unit)) {
- /* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d is connected to something that knows the host via link (%c)\n", UnitId,link+'A')); */
- HostP->Mapping[UnitId].Flags &= ~BEEN_HERE;
- return 1;
- }
- }
-
- HostP->Mapping[UnitId].Flags &= ~BEEN_HERE;
-
- /* rio_dprint(RIO_DEBUG_ROUTE, ("Unit %d DOESN'T KNOW THE HOST!\n", UnitId)); */
-
- return 0;
-}
-
-/*
-** Returns the type of unit (host, 16/8 port RTA)
-*/
-
-unsigned int GetUnitType(unsigned int Uniq)
-{
- switch ((Uniq >> 28) & 0xf) {
- case RIO_AT:
- case RIO_MCA:
- case RIO_EISA:
- case RIO_PCI:
- rio_dprintk(RIO_DEBUG_ROUTE, "Unit type: Host\n");
- return (TYPE_HOST);
- case RIO_RTA_16:
- rio_dprintk(RIO_DEBUG_ROUTE, "Unit type: 16 port RTA\n");
- return (TYPE_RTA16);
- case RIO_RTA:
- rio_dprintk(RIO_DEBUG_ROUTE, "Unit type: 8 port RTA\n");
- return (TYPE_RTA8);
- default:
- rio_dprintk(RIO_DEBUG_ROUTE, "Unit type: Unrecognised\n");
- return (99);
- }
-}
-
-int RIOSetChange(struct rio_info *p)
-{
- if (p->RIOQuickCheck != NOT_CHANGED)
- return (0);
- p->RIOQuickCheck = CHANGED;
- if (p->RIOSignalProcess) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Send SIG-HUP");
- /*
- psignal( RIOSignalProcess, SIGHUP );
- */
- }
- return (0);
-}
-
-static void RIOConCon(struct rio_info *p,
- struct Host *HostP,
- unsigned int FromId,
- unsigned int FromLink,
- unsigned int ToId,
- unsigned int ToLink,
- int Change)
-{
- char *FromName;
- char *FromType;
- char *ToName;
- char *ToType;
- unsigned int tp;
-
-/*
-** 15.10.1998 ARG - ESIL 0759
-** (Part) fix for port being trashed when opened whilst RTA "disconnected"
-**
-** What's this doing in here anyway ?
-** It was causing the port to be 'unmapped' if opened whilst RTA "disconnected"
-**
-** 09.12.1998 ARG - ESIL 0776 - part fix
-** Okay, We've found out what this was all about now !
-** Someone had botched this to use RIOHalted to indicated the number of RTAs
-** 'disconnected'. The value in RIOHalted was then being used in the
-** 'RIO_QUICK_CHECK' ioctl. A none zero value indicating that a least one RTA
-** is 'disconnected'. The change was put in to satisfy a customer's needs.
-** Having taken this bit of code out 'RIO_QUICK_CHECK' now no longer works for
-** the customer.
-**
- if (Change == CONNECT) {
- if (p->RIOHalted) p->RIOHalted --;
- }
- else {
- p->RIOHalted ++;
- }
-**
-** So - we need to implement it slightly differently - a new member of the
-** rio_info struct - RIORtaDisCons (RIO RTA connections) keeps track of RTA
-** connections and disconnections.
-*/
- if (Change == CONNECT) {
- if (p->RIORtaDisCons)
- p->RIORtaDisCons--;
- } else {
- p->RIORtaDisCons++;
- }
-
- if (p->RIOPrintDisabled == DONT_PRINT)
- return;
-
- if (FromId > ToId) {
- tp = FromId;
- FromId = ToId;
- ToId = tp;
- tp = FromLink;
- FromLink = ToLink;
- ToLink = tp;
- }
-
- FromName = FromId ? HostP->Mapping[FromId - 1].Name : HostP->Name;
- FromType = FromId ? "RTA" : "HOST";
- ToName = ToId ? HostP->Mapping[ToId - 1].Name : HostP->Name;
- ToType = ToId ? "RTA" : "HOST";
-
- rio_dprintk(RIO_DEBUG_ROUTE, "Link between %s '%s' (%c) and %s '%s' (%c) %s.\n", FromType, FromName, 'A' + FromLink, ToType, ToName, 'A' + ToLink, (Change == CONNECT) ? "established" : "disconnected");
- printk(KERN_DEBUG "rio: Link between %s '%s' (%c) and %s '%s' (%c) %s.\n", FromType, FromName, 'A' + FromLink, ToType, ToName, 'A' + ToLink, (Change == CONNECT) ? "established" : "disconnected");
-}
-
-/*
-** RIORemoveFromSavedTable :
-**
-** Delete and RTA entry from the saved table given to us
-** by the configuration program.
-*/
-static int RIORemoveFromSavedTable(struct rio_info *p, struct Map *pMap)
-{
- int entry;
-
- /*
- ** We loop for all entries even after finding an entry and
- ** zeroing it because we may have two entries to delete if
- ** it's a 16 port RTA.
- */
- for (entry = 0; entry < TOTAL_MAP_ENTRIES; entry++) {
- if (p->RIOSavedTable[entry].RtaUniqueNum == pMap->RtaUniqueNum) {
- memset(&p->RIOSavedTable[entry], 0, sizeof(struct Map));
- }
- }
- return 0;
-}
-
-
-/*
-** RIOCheckDisconnected :
-**
-** Scan the unit links to and return zero if the unit is completely
-** disconnected.
-*/
-static int RIOFreeDisconnected(struct rio_info *p, struct Host *HostP, int unit)
-{
- int link;
-
-
- rio_dprintk(RIO_DEBUG_ROUTE, "RIOFreeDisconnect unit %d\n", unit);
- /*
- ** If the slot is tentative and does not belong to the
- ** second half of a 16 port RTA then scan to see if
- ** is disconnected.
- */
- for (link = 0; link < LINKS_PER_UNIT; link++) {
- if (HostP->Mapping[unit].Topology[link].Unit != ROUTE_DISCONNECT)
- break;
- }
-
- /*
- ** If not all links are disconnected then we can forget about it.
- */
- if (link < LINKS_PER_UNIT)
- return 1;
-
-#ifdef NEED_TO_FIX_THIS
- /* Ok so all the links are disconnected. But we may have only just
- ** made this slot tentative and not yet received a topology update.
- ** Lets check how long ago we made it tentative.
- */
- rio_dprintk(RIO_DEBUG_ROUTE, "Just about to check LBOLT on entry %d\n", unit);
- if (drv_getparm(LBOLT, (ulong_t *) & current_time))
- rio_dprintk(RIO_DEBUG_ROUTE, "drv_getparm(LBOLT,....) Failed.\n");
-
- elapse_time = current_time - TentTime[unit];
- rio_dprintk(RIO_DEBUG_ROUTE, "elapse %d = current %d - tent %d (%d usec)\n", elapse_time, current_time, TentTime[unit], drv_hztousec(elapse_time));
- if (drv_hztousec(elapse_time) < WAIT_TO_FINISH) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Skipping slot %d, not timed out yet %d\n", unit, drv_hztousec(elapse_time));
- return 1;
- }
-#endif
-
- /*
- ** We have found an usable slot.
- ** If it is half of a 16 port RTA then delete the other half.
- */
- if (HostP->Mapping[unit].ID2 != 0) {
- int nOther = (HostP->Mapping[unit].ID2) - 1;
-
- rio_dprintk(RIO_DEBUG_ROUTE, "RioFreedis second slot %d.\n", nOther);
- memset(&HostP->Mapping[nOther], 0, sizeof(struct Map));
- }
- RIORemoveFromSavedTable(p, &HostP->Mapping[unit]);
-
- return 0;
-}
-
-
-/*
-** RIOFindFreeID :
-**
-** This function scans the given host table for either one
-** or two free unit ID's.
-*/
-
-int RIOFindFreeID(struct rio_info *p, struct Host *HostP, unsigned int * pID1, unsigned int * pID2)
-{
- int unit, tempID;
-
- /*
- ** Initialise the ID's to MAX_RUP.
- ** We do this to make the loop for setting the ID's as simple as
- ** possible.
- */
- *pID1 = MAX_RUP;
- if (pID2 != NULL)
- *pID2 = MAX_RUP;
-
- /*
- ** Scan all entries of the host mapping table for free slots.
- ** We scan for free slots first and then if that is not successful
- ** we start all over again looking for tentative slots we can re-use.
- */
- for (unit = 0; unit < MAX_RUP; unit++) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Scanning unit %d\n", unit);
- /*
- ** If the flags are zero then the slot is empty.
- */
- if (HostP->Mapping[unit].Flags == 0) {
- rio_dprintk(RIO_DEBUG_ROUTE, " This slot is empty.\n");
- /*
- ** If we haven't allocated the first ID then do it now.
- */
- if (*pID1 == MAX_RUP) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Make tentative entry for first unit %d\n", unit);
- *pID1 = unit;
-
- /*
- ** If the second ID is not needed then we can return
- ** now.
- */
- if (pID2 == NULL)
- return 0;
- } else {
- /*
- ** Allocate the second slot and return.
- */
- rio_dprintk(RIO_DEBUG_ROUTE, "Make tentative entry for second unit %d\n", unit);
- *pID2 = unit;
- return 0;
- }
- }
- }
-
- /*
- ** If we manage to come out of the free slot loop then we
- ** need to start all over again looking for tentative slots
- ** that we can re-use.
- */
- rio_dprintk(RIO_DEBUG_ROUTE, "Starting to scan for tentative slots\n");
- for (unit = 0; unit < MAX_RUP; unit++) {
- if (((HostP->Mapping[unit].Flags & SLOT_TENTATIVE) || (HostP->Mapping[unit].Flags == 0)) && !(HostP->Mapping[unit].Flags & RTA16_SECOND_SLOT)) {
- rio_dprintk(RIO_DEBUG_ROUTE, " Slot %d looks promising.\n", unit);
-
- if (unit == *pID1) {
- rio_dprintk(RIO_DEBUG_ROUTE, " No it isn't, its the 1st half\n");
- continue;
- }
-
- /*
- ** Slot is Tentative or Empty, but not a tentative second
- ** slot of a 16 porter.
- ** Attempt to free up this slot (and its parnter if
- ** it is a 16 port slot. The second slot will become
- ** empty after a call to RIOFreeDisconnected so thats why
- ** we look for empty slots above as well).
- */
- if (HostP->Mapping[unit].Flags != 0)
- if (RIOFreeDisconnected(p, HostP, unit) != 0)
- continue;
- /*
- ** If we haven't allocated the first ID then do it now.
- */
- if (*pID1 == MAX_RUP) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Grab tentative entry for first unit %d\n", unit);
- *pID1 = unit;
-
- /*
- ** Clear out this slot now that we intend to use it.
- */
- memset(&HostP->Mapping[unit], 0, sizeof(struct Map));
-
- /*
- ** If the second ID is not needed then we can return
- ** now.
- */
- if (pID2 == NULL)
- return 0;
- } else {
- /*
- ** Allocate the second slot and return.
- */
- rio_dprintk(RIO_DEBUG_ROUTE, "Grab tentative/empty entry for second unit %d\n", unit);
- *pID2 = unit;
-
- /*
- ** Clear out this slot now that we intend to use it.
- */
- memset(&HostP->Mapping[unit], 0, sizeof(struct Map));
-
- /* At this point under the right(wrong?) conditions
- ** we may have a first unit ID being higher than the
- ** second unit ID. This is a bad idea if we are about
- ** to fill the slots with a 16 port RTA.
- ** Better check and swap them over.
- */
-
- if (*pID1 > *pID2) {
- rio_dprintk(RIO_DEBUG_ROUTE, "Swapping IDS %d %d\n", *pID1, *pID2);
- tempID = *pID1;
- *pID1 = *pID2;
- *pID2 = tempID;
- }
- return 0;
- }
- }
- }
-
- /*
- ** If we manage to get to the end of the second loop then we
- ** can give up and return a failure.
- */
- return 1;
-}
-
-
-/*
-** The link switch scenario.
-**
-** Rta Wun (A) is connected to Tuw (A).
-** The tables are all up to date, and the system is OK.
-**
-** If Wun (A) is now moved to Wun (B) before Wun (A) can
-** become disconnected, then the follow happens:
-**
-** Tuw (A) spots the change of unit:link at the other end
-** of its link and Tuw sends a topology packet reflecting
-** the change: Tuw (A) now disconnected from Wun (A), and
-** this is closely followed by a packet indicating that
-** Tuw (A) is now connected to Wun (B).
-**
-** Wun (B) will spot that it has now become connected, and
-** Wun will send a topology packet, which indicates that
-** both Wun (A) and Wun (B) is connected to Tuw (A).
-**
-** Eventually Wun (A) realises that it is now disconnected
-** and Wun will send out a topology packet indicating that
-** Wun (A) is now disconnected.
-*/
diff --git a/drivers/staging/generic_serial/rio/riospace.h b/drivers/staging/generic_serial/rio/riospace.h
deleted file mode 100644
index ffb31d4332b..00000000000
--- a/drivers/staging/generic_serial/rio/riospace.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : riospace.h
-** SID : 1.2
-** Last Modified : 11/6/98 11:34:13
-** Retrieved : 11/6/98 11:34:22
-**
-** ident @(#)riospace.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_riospace_h__
-#define __rio_riospace_h__
-
-#define RIO_LOCATOR_LEN 16
-#define MAX_RIO_BOARDS 4
-
-/*
-** DONT change this file. At all. Unless you can rebuild the entire
-** device driver, which you probably can't, then the rest of the
-** driver won't see any changes you make here. So don't make any.
-** In particular, it won't be able to see changes to RIO_SLOTS
-*/
-
-struct Conf {
- char Locator[24];
- unsigned int StartupTime;
- unsigned int SlowCook;
- unsigned int IntrPollTime;
- unsigned int BreakInterval;
- unsigned int Timer;
- unsigned int RtaLoadBase;
- unsigned int HostLoadBase;
- unsigned int XpHz;
- unsigned int XpCps;
- char *XpOn;
- char *XpOff;
- unsigned int MaxXpCps;
- unsigned int MinXpCps;
- unsigned int SpinCmds;
- unsigned int FirstAddr;
- unsigned int LastAddr;
- unsigned int BufferSize;
- unsigned int LowWater;
- unsigned int LineLength;
- unsigned int CmdTime;
-};
-
-/*
-** Board types - these MUST correspond to product codes!
-*/
-#define RIO_EMPTY 0x0
-#define RIO_EISA 0x3
-#define RIO_RTA_16 0x9
-#define RIO_AT 0xA
-#define RIO_MCA 0xB
-#define RIO_PCI 0xD
-#define RIO_RTA 0xE
-
-/*
-** Board data structure. This is used for configuration info
-*/
-struct Brd {
- unsigned char Type; /* RIO_EISA, RIO_MCA, RIO_AT, RIO_EMPTY... */
- unsigned char Ivec; /* POLLED or ivec number */
- unsigned char Mode; /* Control stuff, see below */
-};
-
-struct Board {
- char Locator[RIO_LOCATOR_LEN];
- int NumSlots;
- struct Brd Boards[MAX_RIO_BOARDS];
-};
-
-#define BOOT_FROM_LINK 0x00
-#define BOOT_FROM_RAM 0x01
-#define EXTERNAL_BUS_OFF 0x00
-#define EXTERNAL_BUS_ON 0x02
-#define INTERRUPT_DISABLE 0x00
-#define INTERRUPT_ENABLE 0x04
-#define BYTE_OPERATION 0x00
-#define WORD_OPERATION 0x08
-#define POLLED INTERRUPT_DISABLE
-#define IRQ_15 (0x00 | INTERRUPT_ENABLE)
-#define IRQ_12 (0x10 | INTERRUPT_ENABLE)
-#define IRQ_11 (0x20 | INTERRUPT_ENABLE)
-#define IRQ_9 (0x30 | INTERRUPT_ENABLE)
-#define SLOW_LINKS 0x00
-#define FAST_LINKS 0x40
-#define SLOW_AT_BUS 0x00
-#define FAST_AT_BUS 0x80
-#define SLOW_PCI_TP 0x00
-#define FAST_PCI_TP 0x80
-/*
-** Debug levels
-*/
-#define DBG_NONE 0x00000000
-
-#define DBG_INIT 0x00000001
-#define DBG_OPEN 0x00000002
-#define DBG_CLOSE 0x00000004
-#define DBG_IOCTL 0x00000008
-
-#define DBG_READ 0x00000010
-#define DBG_WRITE 0x00000020
-#define DBG_INTR 0x00000040
-#define DBG_PROC 0x00000080
-
-#define DBG_PARAM 0x00000100
-#define DBG_CMD 0x00000200
-#define DBG_XPRINT 0x00000400
-#define DBG_POLL 0x00000800
-
-#define DBG_DAEMON 0x00001000
-#define DBG_FAIL 0x00002000
-#define DBG_MODEM 0x00004000
-#define DBG_LIST 0x00008000
-
-#define DBG_ROUTE 0x00010000
-#define DBG_UTIL 0x00020000
-#define DBG_BOOT 0x00040000
-#define DBG_BUFFER 0x00080000
-
-#define DBG_MON 0x00100000
-#define DBG_SPECIAL 0x00200000
-#define DBG_VPIX 0x00400000
-#define DBG_FLUSH 0x00800000
-
-#define DBG_QENABLE 0x01000000
-
-#define DBG_ALWAYS 0x80000000
-
-#endif /* __rio_riospace_h__ */
diff --git a/drivers/staging/generic_serial/rio/riotable.c b/drivers/staging/generic_serial/rio/riotable.c
deleted file mode 100644
index 3d15802dc0f..00000000000
--- a/drivers/staging/generic_serial/rio/riotable.c
+++ /dev/null
@@ -1,941 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : riotable.c
-** SID : 1.2
-** Last Modified : 11/6/98 10:33:47
-** Retrieved : 11/6/98 10:33:50
-**
-** ident @(#)riotable.c 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-
-#include <linux/termios.h>
-#include <linux/serial.h>
-
-#include <linux/generic_serial.h>
-
-
-#include "linux_compat.h"
-#include "rio_linux.h"
-#include "pkt.h"
-#include "daemon.h"
-#include "rio.h"
-#include "riospace.h"
-#include "cmdpkt.h"
-#include "map.h"
-#include "rup.h"
-#include "port.h"
-#include "riodrvr.h"
-#include "rioinfo.h"
-#include "func.h"
-#include "errors.h"
-#include "pci.h"
-
-#include "parmmap.h"
-#include "unixrup.h"
-#include "board.h"
-#include "host.h"
-#include "phb.h"
-#include "link.h"
-#include "cmdblk.h"
-#include "route.h"
-#include "cirrus.h"
-#include "rioioctl.h"
-#include "param.h"
-#include "protsts.h"
-
-/*
-** A configuration table has been loaded. It is now up to us
-** to sort it out and use the information contained therein.
-*/
-int RIONewTable(struct rio_info *p)
-{
- int Host, Host1, Host2, NameIsUnique, Entry, SubEnt;
- struct Map *MapP;
- struct Map *HostMapP;
- struct Host *HostP;
-
- char *cptr;
-
- /*
- ** We have been sent a new table to install. We need to break
- ** it down into little bits and spread it around a bit to see
- ** what we have got.
- */
- /*
- ** Things to check:
- ** (things marked 'xx' aren't checked any more!)
- ** (1) That there are no booted Hosts/RTAs out there.
- ** (2) That the names are properly formed
- ** (3) That blank entries really are.
- ** xx (4) That hosts mentioned in the table actually exist. xx
- ** (5) That the IDs are unique (per host).
- ** (6) That host IDs are zero
- ** (7) That port numbers are valid
- ** (8) That port numbers aren't duplicated
- ** (9) That names aren't duplicated
- ** xx (10) That hosts that actually exist are mentioned in the table. xx
- */
- rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(1)\n");
- if (p->RIOSystemUp) { /* (1) */
- p->RIOError.Error = HOST_HAS_ALREADY_BEEN_BOOTED;
- return -EBUSY;
- }
-
- p->RIOError.Error = NOTHING_WRONG_AT_ALL;
- p->RIOError.Entry = -1;
- p->RIOError.Other = -1;
-
- for (Entry = 0; Entry < TOTAL_MAP_ENTRIES; Entry++) {
- MapP = &p->RIOConnectTable[Entry];
- if ((MapP->Flags & RTA16_SECOND_SLOT) == 0) {
- rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(2)\n");
- cptr = MapP->Name; /* (2) */
- cptr[MAX_NAME_LEN - 1] = '\0';
- if (cptr[0] == '\0') {
- memcpy(MapP->Name, MapP->RtaUniqueNum ? "RTA NN" : "HOST NN", 8);
- MapP->Name[5] = '0' + Entry / 10;
- MapP->Name[6] = '0' + Entry % 10;
- }
- while (*cptr) {
- if (*cptr < ' ' || *cptr > '~') {
- p->RIOError.Error = BAD_CHARACTER_IN_NAME;
- p->RIOError.Entry = Entry;
- return -ENXIO;
- }
- cptr++;
- }
- }
-
- /*
- ** If the entry saved was a tentative entry then just forget
- ** about it.
- */
- if (MapP->Flags & SLOT_TENTATIVE) {
- MapP->HostUniqueNum = 0;
- MapP->RtaUniqueNum = 0;
- continue;
- }
-
- rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(3)\n");
- if (!MapP->RtaUniqueNum && !MapP->HostUniqueNum) { /* (3) */
- if (MapP->ID || MapP->SysPort || MapP->Flags) {
- rio_dprintk(RIO_DEBUG_TABLE, "%s pretending to be empty but isn't\n", MapP->Name);
- p->RIOError.Error = TABLE_ENTRY_ISNT_PROPERLY_NULL;
- p->RIOError.Entry = Entry;
- return -ENXIO;
- }
- rio_dprintk(RIO_DEBUG_TABLE, "!RIO: Daemon: test (3) passes\n");
- continue;
- }
-
- rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(4)\n");
- for (Host = 0; Host < p->RIONumHosts; Host++) { /* (4) */
- if (p->RIOHosts[Host].UniqueNum == MapP->HostUniqueNum) {
- HostP = &p->RIOHosts[Host];
- /*
- ** having done the lookup, we don't really want to do
- ** it again, so hang the host number in a safe place
- */
- MapP->Topology[0].Unit = Host;
- break;
- }
- }
-
- if (Host >= p->RIONumHosts) {
- rio_dprintk(RIO_DEBUG_TABLE, "RTA %s has unknown host unique number 0x%x\n", MapP->Name, MapP->HostUniqueNum);
- MapP->HostUniqueNum = 0;
- /* MapP->RtaUniqueNum = 0; */
- /* MapP->ID = 0; */
- /* MapP->Flags = 0; */
- /* MapP->SysPort = 0; */
- /* MapP->Name[0] = 0; */
- continue;
- }
-
- rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(5)\n");
- if (MapP->RtaUniqueNum) { /* (5) */
- if (!MapP->ID) {
- rio_dprintk(RIO_DEBUG_TABLE, "RIO: RTA %s has been allocated an ID of zero!\n", MapP->Name);
- p->RIOError.Error = ZERO_RTA_ID;
- p->RIOError.Entry = Entry;
- return -ENXIO;
- }
- if (MapP->ID > MAX_RUP) {
- rio_dprintk(RIO_DEBUG_TABLE, "RIO: RTA %s has been allocated an invalid ID %d\n", MapP->Name, MapP->ID);
- p->RIOError.Error = ID_NUMBER_OUT_OF_RANGE;
- p->RIOError.Entry = Entry;
- return -ENXIO;
- }
- for (SubEnt = 0; SubEnt < Entry; SubEnt++) {
- if (MapP->HostUniqueNum == p->RIOConnectTable[SubEnt].HostUniqueNum && MapP->ID == p->RIOConnectTable[SubEnt].ID) {
- rio_dprintk(RIO_DEBUG_TABLE, "Dupl. ID number allocated to RTA %s and RTA %s\n", MapP->Name, p->RIOConnectTable[SubEnt].Name);
- p->RIOError.Error = DUPLICATED_RTA_ID;
- p->RIOError.Entry = Entry;
- p->RIOError.Other = SubEnt;
- return -ENXIO;
- }
- /*
- ** If the RtaUniqueNum is the same, it may be looking at both
- ** entries for a 16 port RTA, so check the ids
- */
- if ((MapP->RtaUniqueNum == p->RIOConnectTable[SubEnt].RtaUniqueNum)
- && (MapP->ID2 != p->RIOConnectTable[SubEnt].ID)) {
- rio_dprintk(RIO_DEBUG_TABLE, "RTA %s has duplicate unique number\n", MapP->Name);
- rio_dprintk(RIO_DEBUG_TABLE, "RTA %s has duplicate unique number\n", p->RIOConnectTable[SubEnt].Name);
- p->RIOError.Error = DUPLICATE_UNIQUE_NUMBER;
- p->RIOError.Entry = Entry;
- p->RIOError.Other = SubEnt;
- return -ENXIO;
- }
- }
- rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(7a)\n");
- /* (7a) */
- if ((MapP->SysPort != NO_PORT) && (MapP->SysPort % PORTS_PER_RTA)) {
- rio_dprintk(RIO_DEBUG_TABLE, "TTY Port number %d-RTA %s is not a multiple of %d!\n", (int) MapP->SysPort, MapP->Name, PORTS_PER_RTA);
- p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
- p->RIOError.Entry = Entry;
- return -ENXIO;
- }
- rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(7b)\n");
- /* (7b) */
- if ((MapP->SysPort != NO_PORT) && (MapP->SysPort >= RIO_PORTS)) {
- rio_dprintk(RIO_DEBUG_TABLE, "TTY Port number %d for RTA %s is too big\n", (int) MapP->SysPort, MapP->Name);
- p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
- p->RIOError.Entry = Entry;
- return -ENXIO;
- }
- for (SubEnt = 0; SubEnt < Entry; SubEnt++) {
- if (p->RIOConnectTable[SubEnt].Flags & RTA16_SECOND_SLOT)
- continue;
- if (p->RIOConnectTable[SubEnt].RtaUniqueNum) {
- rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(8)\n");
- /* (8) */
- if ((MapP->SysPort != NO_PORT) && (MapP->SysPort == p->RIOConnectTable[SubEnt].SysPort)) {
- rio_dprintk(RIO_DEBUG_TABLE, "RTA %s:same TTY port # as RTA %s (%d)\n", MapP->Name, p->RIOConnectTable[SubEnt].Name, (int) MapP->SysPort);
- p->RIOError.Error = TTY_NUMBER_IN_USE;
- p->RIOError.Entry = Entry;
- p->RIOError.Other = SubEnt;
- return -ENXIO;
- }
- rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(9)\n");
- if (strcmp(MapP->Name, p->RIOConnectTable[SubEnt].Name) == 0 && !(MapP->Flags & RTA16_SECOND_SLOT)) { /* (9) */
- rio_dprintk(RIO_DEBUG_TABLE, "RTA name %s used twice\n", MapP->Name);
- p->RIOError.Error = NAME_USED_TWICE;
- p->RIOError.Entry = Entry;
- p->RIOError.Other = SubEnt;
- return -ENXIO;
- }
- }
- }
- } else { /* (6) */
- rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: entering(6)\n");
- if (MapP->ID) {
- rio_dprintk(RIO_DEBUG_TABLE, "RIO:HOST %s has been allocated ID that isn't zero!\n", MapP->Name);
- p->RIOError.Error = HOST_ID_NOT_ZERO;
- p->RIOError.Entry = Entry;
- return -ENXIO;
- }
- if (MapP->SysPort != NO_PORT) {
- rio_dprintk(RIO_DEBUG_TABLE, "RIO: HOST %s has been allocated port numbers!\n", MapP->Name);
- p->RIOError.Error = HOST_SYSPORT_BAD;
- p->RIOError.Entry = Entry;
- return -ENXIO;
- }
- }
- }
-
- /*
- ** wow! if we get here then it's a goody!
- */
-
- /*
- ** Zero the (old) entries for each host...
- */
- for (Host = 0; Host < RIO_HOSTS; Host++) {
- for (Entry = 0; Entry < MAX_RUP; Entry++) {
- memset(&p->RIOHosts[Host].Mapping[Entry], 0, sizeof(struct Map));
- }
- memset(&p->RIOHosts[Host].Name[0], 0, sizeof(p->RIOHosts[Host].Name));
- }
-
- /*
- ** Copy in the new table entries
- */
- for (Entry = 0; Entry < TOTAL_MAP_ENTRIES; Entry++) {
- rio_dprintk(RIO_DEBUG_TABLE, "RIONewTable: Copy table for Host entry %d\n", Entry);
- MapP = &p->RIOConnectTable[Entry];
-
- /*
- ** Now, if it is an empty slot ignore it!
- */
- if (MapP->HostUniqueNum == 0)
- continue;
-
- /*
- ** we saved the host number earlier, so grab it back
- */
- HostP = &p->RIOHosts[MapP->Topology[0].Unit];
-
- /*
- ** If it is a host, then we only need to fill in the name field.
- */
- if (MapP->ID == 0) {
- rio_dprintk(RIO_DEBUG_TABLE, "Host entry found. Name %s\n", MapP->Name);
- memcpy(HostP->Name, MapP->Name, MAX_NAME_LEN);
- continue;
- }
-
- /*
- ** Its an RTA entry, so fill in the host mapping entries for it
- ** and the port mapping entries. Notice that entry zero is for
- ** ID one.
- */
- HostMapP = &HostP->Mapping[MapP->ID - 1];
-
- if (MapP->Flags & SLOT_IN_USE) {
- rio_dprintk(RIO_DEBUG_TABLE, "Rta entry found. Name %s\n", MapP->Name);
- /*
- ** structure assign, then sort out the bits we shouldn't have done
- */
- *HostMapP = *MapP;
-
- HostMapP->Flags = SLOT_IN_USE;
- if (MapP->Flags & RTA16_SECOND_SLOT)
- HostMapP->Flags |= RTA16_SECOND_SLOT;
-
- RIOReMapPorts(p, HostP, HostMapP);
- } else {
- rio_dprintk(RIO_DEBUG_TABLE, "TENTATIVE Rta entry found. Name %s\n", MapP->Name);
- }
- }
-
- for (Entry = 0; Entry < TOTAL_MAP_ENTRIES; Entry++) {
- p->RIOSavedTable[Entry] = p->RIOConnectTable[Entry];
- }
-
- for (Host = 0; Host < p->RIONumHosts; Host++) {
- for (SubEnt = 0; SubEnt < LINKS_PER_UNIT; SubEnt++) {
- p->RIOHosts[Host].Topology[SubEnt].Unit = ROUTE_DISCONNECT;
- p->RIOHosts[Host].Topology[SubEnt].Link = NO_LINK;
- }
- for (Entry = 0; Entry < MAX_RUP; Entry++) {
- for (SubEnt = 0; SubEnt < LINKS_PER_UNIT; SubEnt++) {
- p->RIOHosts[Host].Mapping[Entry].Topology[SubEnt].Unit = ROUTE_DISCONNECT;
- p->RIOHosts[Host].Mapping[Entry].Topology[SubEnt].Link = NO_LINK;
- }
- }
- if (!p->RIOHosts[Host].Name[0]) {
- memcpy(p->RIOHosts[Host].Name, "HOST 1", 7);
- p->RIOHosts[Host].Name[5] += Host;
- }
- /*
- ** Check that default name assigned is unique.
- */
- Host1 = Host;
- NameIsUnique = 0;
- while (!NameIsUnique) {
- NameIsUnique = 1;
- for (Host2 = 0; Host2 < p->RIONumHosts; Host2++) {
- if (Host2 == Host)
- continue;
- if (strcmp(p->RIOHosts[Host].Name, p->RIOHosts[Host2].Name)
- == 0) {
- NameIsUnique = 0;
- Host1++;
- if (Host1 >= p->RIONumHosts)
- Host1 = 0;
- p->RIOHosts[Host].Name[5] = '1' + Host1;
- }
- }
- }
- /*
- ** Rename host if name already used.
- */
- if (Host1 != Host) {
- rio_dprintk(RIO_DEBUG_TABLE, "Default name %s already used\n", p->RIOHosts[Host].Name);
- memcpy(p->RIOHosts[Host].Name, "HOST 1", 7);
- p->RIOHosts[Host].Name[5] += Host1;
- }
- rio_dprintk(RIO_DEBUG_TABLE, "Assigning default name %s\n", p->RIOHosts[Host].Name);
- }
- return 0;
-}
-
-/*
-** User process needs the config table - build it from first
-** principles.
-**
-* FIXME: SMP locking
-*/
-int RIOApel(struct rio_info *p)
-{
- int Host;
- int link;
- int Rup;
- int Next = 0;
- struct Map *MapP;
- struct Host *HostP;
- unsigned long flags;
-
- rio_dprintk(RIO_DEBUG_TABLE, "Generating a table to return to config.rio\n");
-
- memset(&p->RIOConnectTable[0], 0, sizeof(struct Map) * TOTAL_MAP_ENTRIES);
-
- for (Host = 0; Host < RIO_HOSTS; Host++) {
- rio_dprintk(RIO_DEBUG_TABLE, "Processing host %d\n", Host);
- HostP = &p->RIOHosts[Host];
- rio_spin_lock_irqsave(&HostP->HostLock, flags);
-
- MapP = &p->RIOConnectTable[Next++];
- MapP->HostUniqueNum = HostP->UniqueNum;
- if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
- rio_spin_unlock_irqrestore(&HostP->HostLock, flags);
- continue;
- }
- MapP->RtaUniqueNum = 0;
- MapP->ID = 0;
- MapP->Flags = SLOT_IN_USE;
- MapP->SysPort = NO_PORT;
- for (link = 0; link < LINKS_PER_UNIT; link++)
- MapP->Topology[link] = HostP->Topology[link];
- memcpy(MapP->Name, HostP->Name, MAX_NAME_LEN);
- for (Rup = 0; Rup < MAX_RUP; Rup++) {
- if (HostP->Mapping[Rup].Flags & (SLOT_IN_USE | SLOT_TENTATIVE)) {
- p->RIOConnectTable[Next] = HostP->Mapping[Rup];
- if (HostP->Mapping[Rup].Flags & SLOT_IN_USE)
- p->RIOConnectTable[Next].Flags |= SLOT_IN_USE;
- if (HostP->Mapping[Rup].Flags & SLOT_TENTATIVE)
- p->RIOConnectTable[Next].Flags |= SLOT_TENTATIVE;
- if (HostP->Mapping[Rup].Flags & RTA16_SECOND_SLOT)
- p->RIOConnectTable[Next].Flags |= RTA16_SECOND_SLOT;
- Next++;
- }
- }
- rio_spin_unlock_irqrestore(&HostP->HostLock, flags);
- }
- return 0;
-}
-
-/*
-** config.rio has taken a dislike to one of the gross maps entries.
-** if the entry is suitably inactive, then we can gob on it and remove
-** it from the table.
-*/
-int RIODeleteRta(struct rio_info *p, struct Map *MapP)
-{
- int host, entry, port, link;
- int SysPort;
- struct Host *HostP;
- struct Map *HostMapP;
- struct Port *PortP;
- int work_done = 0;
- unsigned long lock_flags, sem_flags;
-
- rio_dprintk(RIO_DEBUG_TABLE, "Delete entry on host %x, rta %x\n", MapP->HostUniqueNum, MapP->RtaUniqueNum);
-
- for (host = 0; host < p->RIONumHosts; host++) {
- HostP = &p->RIOHosts[host];
-
- rio_spin_lock_irqsave(&HostP->HostLock, lock_flags);
-
- if ((HostP->Flags & RUN_STATE) != RC_RUNNING) {
- rio_spin_unlock_irqrestore(&HostP->HostLock, lock_flags);
- continue;
- }
-
- for (entry = 0; entry < MAX_RUP; entry++) {
- if (MapP->RtaUniqueNum == HostP->Mapping[entry].RtaUniqueNum) {
- HostMapP = &HostP->Mapping[entry];
- rio_dprintk(RIO_DEBUG_TABLE, "Found entry offset %d on host %s\n", entry, HostP->Name);
-
- /*
- ** Check all four links of the unit are disconnected
- */
- for (link = 0; link < LINKS_PER_UNIT; link++) {
- if (HostMapP->Topology[link].Unit != ROUTE_DISCONNECT) {
- rio_dprintk(RIO_DEBUG_TABLE, "Entry is in use and cannot be deleted!\n");
- p->RIOError.Error = UNIT_IS_IN_USE;
- rio_spin_unlock_irqrestore(&HostP->HostLock, lock_flags);
- return -EBUSY;
- }
- }
- /*
- ** Slot has been allocated, BUT not booted/routed/
- ** connected/selected or anything else-ed
- */
- SysPort = HostMapP->SysPort;
-
- if (SysPort != NO_PORT) {
- for (port = SysPort; port < SysPort + PORTS_PER_RTA; port++) {
- PortP = p->RIOPortp[port];
- rio_dprintk(RIO_DEBUG_TABLE, "Unmap port\n");
-
- rio_spin_lock_irqsave(&PortP->portSem, sem_flags);
-
- PortP->Mapped = 0;
-
- if (PortP->State & (RIO_MOPEN | RIO_LOPEN)) {
-
- rio_dprintk(RIO_DEBUG_TABLE, "Gob on port\n");
- PortP->TxBufferIn = PortP->TxBufferOut = 0;
- /* What should I do
- wakeup( &PortP->TxBufferIn );
- wakeup( &PortP->TxBufferOut);
- */
- PortP->InUse = NOT_INUSE;
- /* What should I do
- wakeup( &PortP->InUse );
- signal(PortP->TtyP->t_pgrp,SIGKILL);
- ttyflush(PortP->TtyP,(FREAD|FWRITE));
- */
- PortP->State |= RIO_CLOSING | RIO_DELETED;
- }
-
- /*
- ** For the second slot of a 16 port RTA, the
- ** driver needs to reset the changes made to
- ** the phb to port mappings in RIORouteRup.
- */
- if (PortP->SecondBlock) {
- u16 dest_unit = HostMapP->ID;
- u16 dest_port = port - SysPort;
- u16 __iomem *TxPktP;
- struct PKT __iomem *Pkt;
-
- for (TxPktP = PortP->TxStart; TxPktP <= PortP->TxEnd; TxPktP++) {
- /*
- ** *TxPktP is the pointer to the
- ** transmit packet on the host card.
- ** This needs to be translated into
- ** a 32 bit pointer so it can be
- ** accessed from the driver.
- */
- Pkt = (struct PKT __iomem *) RIO_PTR(HostP->Caddr, readw(&*TxPktP));
- rio_dprintk(RIO_DEBUG_TABLE, "Tx packet (%x) destination: Old %x:%x New %x:%x\n", readw(TxPktP), readb(&Pkt->dest_unit), readb(&Pkt->dest_port), dest_unit, dest_port);
- writew(dest_unit, &Pkt->dest_unit);
- writew(dest_port, &Pkt->dest_port);
- }
- rio_dprintk(RIO_DEBUG_TABLE, "Port %d phb destination: Old %x:%x New %x:%x\n", port, readb(&PortP->PhbP->destination) & 0xff, (readb(&PortP->PhbP->destination) >> 8) & 0xff, dest_unit, dest_port);
- writew(dest_unit + (dest_port << 8), &PortP->PhbP->destination);
- }
- rio_spin_unlock_irqrestore(&PortP->portSem, sem_flags);
- }
- }
- rio_dprintk(RIO_DEBUG_TABLE, "Entry nulled.\n");
- memset(HostMapP, 0, sizeof(struct Map));
- work_done++;
- }
- }
- rio_spin_unlock_irqrestore(&HostP->HostLock, lock_flags);
- }
-
- /* XXXXX lock me up */
- for (entry = 0; entry < TOTAL_MAP_ENTRIES; entry++) {
- if (p->RIOSavedTable[entry].RtaUniqueNum == MapP->RtaUniqueNum) {
- memset(&p->RIOSavedTable[entry], 0, sizeof(struct Map));
- work_done++;
- }
- if (p->RIOConnectTable[entry].RtaUniqueNum == MapP->RtaUniqueNum) {
- memset(&p->RIOConnectTable[entry], 0, sizeof(struct Map));
- work_done++;
- }
- }
- if (work_done)
- return 0;
-
- rio_dprintk(RIO_DEBUG_TABLE, "Couldn't find entry to be deleted\n");
- p->RIOError.Error = COULDNT_FIND_ENTRY;
- return -ENXIO;
-}
-
-int RIOAssignRta(struct rio_info *p, struct Map *MapP)
-{
- int host;
- struct Map *HostMapP;
- char *sptr;
- int link;
-
-
- rio_dprintk(RIO_DEBUG_TABLE, "Assign entry on host %x, rta %x, ID %d, Sysport %d\n", MapP->HostUniqueNum, MapP->RtaUniqueNum, MapP->ID, (int) MapP->SysPort);
-
- if ((MapP->ID != (u16) - 1) && ((int) MapP->ID < (int) 1 || (int) MapP->ID > MAX_RUP)) {
- rio_dprintk(RIO_DEBUG_TABLE, "Bad ID in map entry!\n");
- p->RIOError.Error = ID_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- if (MapP->RtaUniqueNum == 0) {
- rio_dprintk(RIO_DEBUG_TABLE, "Rta Unique number zero!\n");
- p->RIOError.Error = RTA_UNIQUE_NUMBER_ZERO;
- return -EINVAL;
- }
- if ((MapP->SysPort != NO_PORT) && (MapP->SysPort % PORTS_PER_RTA)) {
- rio_dprintk(RIO_DEBUG_TABLE, "Port %d not multiple of %d!\n", (int) MapP->SysPort, PORTS_PER_RTA);
- p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
- if ((MapP->SysPort != NO_PORT) && (MapP->SysPort >= RIO_PORTS)) {
- rio_dprintk(RIO_DEBUG_TABLE, "Port %d not valid!\n", (int) MapP->SysPort);
- p->RIOError.Error = TTY_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- /*
- ** Copy the name across to the map entry.
- */
- MapP->Name[MAX_NAME_LEN - 1] = '\0';
- sptr = MapP->Name;
- while (*sptr) {
- if (*sptr < ' ' || *sptr > '~') {
- rio_dprintk(RIO_DEBUG_TABLE, "Name entry contains non-printing characters!\n");
- p->RIOError.Error = BAD_CHARACTER_IN_NAME;
- return -EINVAL;
- }
- sptr++;
- }
-
- for (host = 0; host < p->RIONumHosts; host++) {
- if (MapP->HostUniqueNum == p->RIOHosts[host].UniqueNum) {
- if ((p->RIOHosts[host].Flags & RUN_STATE) != RC_RUNNING) {
- p->RIOError.Error = HOST_NOT_RUNNING;
- return -ENXIO;
- }
-
- /*
- ** Now we have a host we need to allocate an ID
- ** if the entry does not already have one.
- */
- if (MapP->ID == (u16) - 1) {
- int nNewID;
-
- rio_dprintk(RIO_DEBUG_TABLE, "Attempting to get a new ID for rta \"%s\"\n", MapP->Name);
- /*
- ** The idea here is to allow RTA's to be assigned
- ** before they actually appear on the network.
- ** This allows the addition of RTA's without having
- ** to plug them in.
- ** What we do is:
- ** - Find a free ID and allocate it to the RTA.
- ** - If this map entry is the second half of a
- ** 16 port entry then find the other half and
- ** make sure the 2 cross reference each other.
- */
- if (RIOFindFreeID(p, &p->RIOHosts[host], &nNewID, NULL) != 0) {
- p->RIOError.Error = COULDNT_FIND_ENTRY;
- return -EBUSY;
- }
- MapP->ID = (u16) nNewID + 1;
- rio_dprintk(RIO_DEBUG_TABLE, "Allocated ID %d for this new RTA.\n", MapP->ID);
- HostMapP = &p->RIOHosts[host].Mapping[nNewID];
- HostMapP->RtaUniqueNum = MapP->RtaUniqueNum;
- HostMapP->HostUniqueNum = MapP->HostUniqueNum;
- HostMapP->ID = MapP->ID;
- for (link = 0; link < LINKS_PER_UNIT; link++) {
- HostMapP->Topology[link].Unit = ROUTE_DISCONNECT;
- HostMapP->Topology[link].Link = NO_LINK;
- }
- if (MapP->Flags & RTA16_SECOND_SLOT) {
- int unit;
-
- for (unit = 0; unit < MAX_RUP; unit++)
- if (p->RIOHosts[host].Mapping[unit].RtaUniqueNum == MapP->RtaUniqueNum)
- break;
- if (unit == MAX_RUP) {
- p->RIOError.Error = COULDNT_FIND_ENTRY;
- return -EBUSY;
- }
- HostMapP->Flags |= RTA16_SECOND_SLOT;
- HostMapP->ID2 = MapP->ID2 = p->RIOHosts[host].Mapping[unit].ID;
- p->RIOHosts[host].Mapping[unit].ID2 = MapP->ID;
- rio_dprintk(RIO_DEBUG_TABLE, "Cross referenced id %d to ID %d.\n", MapP->ID, p->RIOHosts[host].Mapping[unit].ID);
- }
- }
-
- HostMapP = &p->RIOHosts[host].Mapping[MapP->ID - 1];
-
- if (HostMapP->Flags & SLOT_IN_USE) {
- rio_dprintk(RIO_DEBUG_TABLE, "Map table slot for ID %d is already in use.\n", MapP->ID);
- p->RIOError.Error = ID_ALREADY_IN_USE;
- return -EBUSY;
- }
-
- /*
- ** Assign the sys ports and the name, and mark the slot as
- ** being in use.
- */
- HostMapP->SysPort = MapP->SysPort;
- if ((MapP->Flags & RTA16_SECOND_SLOT) == 0)
- memcpy(HostMapP->Name, MapP->Name, MAX_NAME_LEN);
- HostMapP->Flags = SLOT_IN_USE | RTA_BOOTED;
-#ifdef NEED_TO_FIX
- RIO_SV_BROADCAST(p->RIOHosts[host].svFlags[MapP->ID - 1]);
-#endif
- if (MapP->Flags & RTA16_SECOND_SLOT)
- HostMapP->Flags |= RTA16_SECOND_SLOT;
-
- RIOReMapPorts(p, &p->RIOHosts[host], HostMapP);
- /*
- ** Adjust 2nd block of 8 phbs
- */
- if (MapP->Flags & RTA16_SECOND_SLOT)
- RIOFixPhbs(p, &p->RIOHosts[host], HostMapP->ID - 1);
-
- if (HostMapP->SysPort != NO_PORT) {
- if (HostMapP->SysPort < p->RIOFirstPortsBooted)
- p->RIOFirstPortsBooted = HostMapP->SysPort;
- if (HostMapP->SysPort > p->RIOLastPortsBooted)
- p->RIOLastPortsBooted = HostMapP->SysPort;
- }
- if (MapP->Flags & RTA16_SECOND_SLOT)
- rio_dprintk(RIO_DEBUG_TABLE, "Second map of RTA %s added to configuration\n", p->RIOHosts[host].Mapping[MapP->ID2 - 1].Name);
- else
- rio_dprintk(RIO_DEBUG_TABLE, "RTA %s added to configuration\n", MapP->Name);
- return 0;
- }
- }
- p->RIOError.Error = UNKNOWN_HOST_NUMBER;
- rio_dprintk(RIO_DEBUG_TABLE, "Unknown host %x\n", MapP->HostUniqueNum);
- return -ENXIO;
-}
-
-
-int RIOReMapPorts(struct rio_info *p, struct Host *HostP, struct Map *HostMapP)
-{
- struct Port *PortP;
- unsigned int SubEnt;
- unsigned int HostPort;
- unsigned int SysPort;
- u16 RtaType;
- unsigned long flags;
-
- rio_dprintk(RIO_DEBUG_TABLE, "Mapping sysport %d to id %d\n", (int) HostMapP->SysPort, HostMapP->ID);
-
- /*
- ** We need to tell the UnixRups which sysport the rup corresponds to
- */
- HostP->UnixRups[HostMapP->ID - 1].BaseSysPort = HostMapP->SysPort;
-
- if (HostMapP->SysPort == NO_PORT)
- return (0);
-
- RtaType = GetUnitType(HostMapP->RtaUniqueNum);
- rio_dprintk(RIO_DEBUG_TABLE, "Mapping sysport %d-%d\n", (int) HostMapP->SysPort, (int) HostMapP->SysPort + PORTS_PER_RTA - 1);
-
- /*
- ** now map each of its eight ports
- */
- for (SubEnt = 0; SubEnt < PORTS_PER_RTA; SubEnt++) {
- rio_dprintk(RIO_DEBUG_TABLE, "subent = %d, HostMapP->SysPort = %d\n", SubEnt, (int) HostMapP->SysPort);
- SysPort = HostMapP->SysPort + SubEnt; /* portnumber within system */
- /* portnumber on host */
-
- HostPort = (HostMapP->ID - 1) * PORTS_PER_RTA + SubEnt;
-
- rio_dprintk(RIO_DEBUG_TABLE, "c1 p = %p, p->rioPortp = %p\n", p, p->RIOPortp);
- PortP = p->RIOPortp[SysPort];
- rio_dprintk(RIO_DEBUG_TABLE, "Map port\n");
-
- /*
- ** Point at all the real neat data structures
- */
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- PortP->HostP = HostP;
- PortP->Caddr = HostP->Caddr;
-
- /*
- ** The PhbP cannot be filled in yet
- ** unless the host has been booted
- */
- if ((HostP->Flags & RUN_STATE) == RC_RUNNING) {
- struct PHB __iomem *PhbP = PortP->PhbP = &HostP->PhbP[HostPort];
- PortP->TxAdd = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->tx_add));
- PortP->TxStart = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->tx_start));
- PortP->TxEnd = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->tx_end));
- PortP->RxRemove = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->rx_remove));
- PortP->RxStart = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->rx_start));
- PortP->RxEnd = (u16 __iomem *) RIO_PTR(HostP->Caddr, readw(&PhbP->rx_end));
- } else
- PortP->PhbP = NULL;
-
- /*
- ** port related flags
- */
- PortP->HostPort = HostPort;
- /*
- ** For each part of a 16 port RTA, RupNum is ID - 1.
- */
- PortP->RupNum = HostMapP->ID - 1;
- if (HostMapP->Flags & RTA16_SECOND_SLOT) {
- PortP->ID2 = HostMapP->ID2 - 1;
- PortP->SecondBlock = 1;
- } else {
- PortP->ID2 = 0;
- PortP->SecondBlock = 0;
- }
- PortP->RtaUniqueNum = HostMapP->RtaUniqueNum;
-
- /*
- ** If the port was already mapped then thats all we need to do.
- */
- if (PortP->Mapped) {
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- continue;
- } else
- HostMapP->Flags &= ~RTA_NEWBOOT;
-
- PortP->State = 0;
- PortP->Config = 0;
- /*
- ** Check out the module type - if it is special (read only etc.)
- ** then we need to set flags in the PortP->Config.
- ** Note: For 16 port RTA, all ports are of the same type.
- */
- if (RtaType == TYPE_RTA16) {
- PortP->Config |= p->RIOModuleTypes[HostP->UnixRups[HostMapP->ID - 1].ModTypes].Flags[SubEnt % PORTS_PER_MODULE];
- } else {
- if (SubEnt < PORTS_PER_MODULE)
- PortP->Config |= p->RIOModuleTypes[LONYBLE(HostP->UnixRups[HostMapP->ID - 1].ModTypes)].Flags[SubEnt % PORTS_PER_MODULE];
- else
- PortP->Config |= p->RIOModuleTypes[HINYBLE(HostP->UnixRups[HostMapP->ID - 1].ModTypes)].Flags[SubEnt % PORTS_PER_MODULE];
- }
-
- /*
- ** more port related flags
- */
- PortP->PortState = 0;
- PortP->ModemLines = 0;
- PortP->ModemState = 0;
- PortP->CookMode = COOK_WELL;
- PortP->ParamSem = 0;
- PortP->FlushCmdBodge = 0;
- PortP->WflushFlag = 0;
- PortP->MagicFlags = 0;
- PortP->Lock = 0;
- PortP->Store = 0;
- PortP->FirstOpen = 1;
-
- /*
- ** Buffers 'n things
- */
- PortP->RxDataStart = 0;
- PortP->Cor2Copy = 0;
- PortP->Name = &HostMapP->Name[0];
- PortP->statsGather = 0;
- PortP->txchars = 0;
- PortP->rxchars = 0;
- PortP->opens = 0;
- PortP->closes = 0;
- PortP->ioctls = 0;
- if (PortP->TxRingBuffer)
- memset(PortP->TxRingBuffer, 0, p->RIOBufferSize);
- else if (p->RIOBufferSize) {
- PortP->TxRingBuffer = kzalloc(p->RIOBufferSize, GFP_KERNEL);
- }
- PortP->TxBufferOut = 0;
- PortP->TxBufferIn = 0;
- PortP->Debug = 0;
- /*
- ** LastRxTgl stores the state of the rx toggle bit for this
- ** port, to be compared with the state of the next pkt received.
- ** If the same, we have received the same rx pkt from the RTA
- ** twice. Initialise to a value not equal to PHB_RX_TGL or 0.
- */
- PortP->LastRxTgl = ~(u8) PHB_RX_TGL;
-
- /*
- ** and mark the port as usable
- */
- PortP->Mapped = 1;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- }
- if (HostMapP->SysPort < p->RIOFirstPortsMapped)
- p->RIOFirstPortsMapped = HostMapP->SysPort;
- if (HostMapP->SysPort > p->RIOLastPortsMapped)
- p->RIOLastPortsMapped = HostMapP->SysPort;
-
- return 0;
-}
-
-int RIOChangeName(struct rio_info *p, struct Map *MapP)
-{
- int host;
- struct Map *HostMapP;
- char *sptr;
-
- rio_dprintk(RIO_DEBUG_TABLE, "Change name entry on host %x, rta %x, ID %d, Sysport %d\n", MapP->HostUniqueNum, MapP->RtaUniqueNum, MapP->ID, (int) MapP->SysPort);
-
- if (MapP->ID > MAX_RUP) {
- rio_dprintk(RIO_DEBUG_TABLE, "Bad ID in map entry!\n");
- p->RIOError.Error = ID_NUMBER_OUT_OF_RANGE;
- return -EINVAL;
- }
-
- MapP->Name[MAX_NAME_LEN - 1] = '\0';
- sptr = MapP->Name;
-
- while (*sptr) {
- if (*sptr < ' ' || *sptr > '~') {
- rio_dprintk(RIO_DEBUG_TABLE, "Name entry contains non-printing characters!\n");
- p->RIOError.Error = BAD_CHARACTER_IN_NAME;
- return -EINVAL;
- }
- sptr++;
- }
-
- for (host = 0; host < p->RIONumHosts; host++) {
- if (MapP->HostUniqueNum == p->RIOHosts[host].UniqueNum) {
- if ((p->RIOHosts[host].Flags & RUN_STATE) != RC_RUNNING) {
- p->RIOError.Error = HOST_NOT_RUNNING;
- return -ENXIO;
- }
- if (MapP->ID == 0) {
- memcpy(p->RIOHosts[host].Name, MapP->Name, MAX_NAME_LEN);
- return 0;
- }
-
- HostMapP = &p->RIOHosts[host].Mapping[MapP->ID - 1];
-
- if (HostMapP->RtaUniqueNum != MapP->RtaUniqueNum) {
- p->RIOError.Error = RTA_NUMBER_WRONG;
- return -ENXIO;
- }
- memcpy(HostMapP->Name, MapP->Name, MAX_NAME_LEN);
- return 0;
- }
- }
- p->RIOError.Error = UNKNOWN_HOST_NUMBER;
- rio_dprintk(RIO_DEBUG_TABLE, "Unknown host %x\n", MapP->HostUniqueNum);
- return -ENXIO;
-}
diff --git a/drivers/staging/generic_serial/rio/riotty.c b/drivers/staging/generic_serial/rio/riotty.c
deleted file mode 100644
index e7e9911d7a7..00000000000
--- a/drivers/staging/generic_serial/rio/riotty.c
+++ /dev/null
@@ -1,654 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : riotty.c
-** SID : 1.3
-** Last Modified : 11/6/98 10:33:47
-** Retrieved : 11/6/98 10:33:50
-**
-** ident @(#)riotty.c 1.3
-**
-** -----------------------------------------------------------------------------
-*/
-
-#define __EXPLICIT_DEF_H__
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/string.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-
-#include <linux/termios.h>
-
-#include <linux/serial.h>
-
-#include <linux/generic_serial.h>
-
-
-#include "linux_compat.h"
-#include "rio_linux.h"
-#include "pkt.h"
-#include "daemon.h"
-#include "rio.h"
-#include "riospace.h"
-#include "cmdpkt.h"
-#include "map.h"
-#include "rup.h"
-#include "port.h"
-#include "riodrvr.h"
-#include "rioinfo.h"
-#include "func.h"
-#include "errors.h"
-#include "pci.h"
-
-#include "parmmap.h"
-#include "unixrup.h"
-#include "board.h"
-#include "host.h"
-#include "phb.h"
-#include "link.h"
-#include "cmdblk.h"
-#include "route.h"
-#include "cirrus.h"
-#include "rioioctl.h"
-#include "param.h"
-
-static void RIOClearUp(struct Port *PortP);
-
-/* Below belongs in func.h */
-int RIOShortCommand(struct rio_info *p, struct Port *PortP, int command, int len, int arg);
-
-
-extern struct rio_info *p;
-
-
-int riotopen(struct tty_struct *tty, struct file *filp)
-{
- unsigned int SysPort;
- int repeat_this = 250;
- struct Port *PortP; /* pointer to the port structure */
- unsigned long flags;
- int retval = 0;
-
- func_enter();
-
- /* Make sure driver_data is NULL in case the rio isn't booted jet. Else gs_close
- is going to oops.
- */
- tty->driver_data = NULL;
-
- SysPort = rio_minor(tty);
-
- if (p->RIOFailed) {
- rio_dprintk(RIO_DEBUG_TTY, "System initialisation failed\n");
- func_exit();
- return -ENXIO;
- }
-
- rio_dprintk(RIO_DEBUG_TTY, "port open SysPort %d (mapped:%d)\n", SysPort, p->RIOPortp[SysPort]->Mapped);
-
- /*
- ** Validate that we have received a legitimate request.
- ** Currently, just check that we are opening a port on
- ** a host card that actually exists, and that the port
- ** has been mapped onto a host.
- */
- if (SysPort >= RIO_PORTS) { /* out of range ? */
- rio_dprintk(RIO_DEBUG_TTY, "Illegal port number %d\n", SysPort);
- func_exit();
- return -ENXIO;
- }
-
- /*
- ** Grab pointer to the port structure
- */
- PortP = p->RIOPortp[SysPort]; /* Get control struc */
- rio_dprintk(RIO_DEBUG_TTY, "PortP: %p\n", PortP);
- if (!PortP->Mapped) { /* we aren't mapped yet! */
- /*
- ** The system doesn't know which RTA this port
- ** corresponds to.
- */
- rio_dprintk(RIO_DEBUG_TTY, "port not mapped into system\n");
- func_exit();
- return -ENXIO;
- }
-
- tty->driver_data = PortP;
-
- PortP->gs.port.tty = tty;
- PortP->gs.port.count++;
-
- rio_dprintk(RIO_DEBUG_TTY, "%d bytes in tx buffer\n", PortP->gs.xmit_cnt);
-
- retval = gs_init_port(&PortP->gs);
- if (retval) {
- PortP->gs.port.count--;
- return -ENXIO;
- }
- /*
- ** If the host hasn't been booted yet, then
- ** fail
- */
- if ((PortP->HostP->Flags & RUN_STATE) != RC_RUNNING) {
- rio_dprintk(RIO_DEBUG_TTY, "Host not running\n");
- func_exit();
- return -ENXIO;
- }
-
- /*
- ** If the RTA has not booted yet and the user has chosen to block
- ** until the RTA is present then we must spin here waiting for
- ** the RTA to boot.
- */
- /* I find the above code a bit hairy. I find the below code
- easier to read and shorter. Now, if it works too that would
- be great... -- REW
- */
- rio_dprintk(RIO_DEBUG_TTY, "Checking if RTA has booted... \n");
- while (!(PortP->HostP->Mapping[PortP->RupNum].Flags & RTA_BOOTED)) {
- if (!PortP->WaitUntilBooted) {
- rio_dprintk(RIO_DEBUG_TTY, "RTA never booted\n");
- func_exit();
- return -ENXIO;
- }
-
- /* Under Linux you'd normally use a wait instead of this
- busy-waiting. I'll stick with the old implementation for
- now. --REW
- */
- if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_TTY, "RTA_wait_for_boot: EINTR in delay \n");
- func_exit();
- return -EINTR;
- }
- if (repeat_this-- <= 0) {
- rio_dprintk(RIO_DEBUG_TTY, "Waiting for RTA to boot timeout\n");
- func_exit();
- return -EIO;
- }
- }
- rio_dprintk(RIO_DEBUG_TTY, "RTA has been booted\n");
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- if (p->RIOHalted) {
- goto bombout;
- }
-
- /*
- ** If the port is in the final throws of being closed,
- ** we should wait here (politely), waiting
- ** for it to finish, so that it doesn't close us!
- */
- while ((PortP->State & RIO_CLOSING) && !p->RIOHalted) {
- rio_dprintk(RIO_DEBUG_TTY, "Waiting for RIO_CLOSING to go away\n");
- if (repeat_this-- <= 0) {
- rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n");
- RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
- retval = -EINTR;
- goto bombout;
- }
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- retval = -EINTR;
- goto bombout;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- }
-
- if (!PortP->Mapped) {
- rio_dprintk(RIO_DEBUG_TTY, "Port unmapped while closing!\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- retval = -ENXIO;
- func_exit();
- return retval;
- }
-
- if (p->RIOHalted) {
- goto bombout;
- }
-
-/*
-** 15.10.1998 ARG - ESIL 0761 part fix
-** RIO has it's own CTSFLOW and RTSFLOW flags in 'Config' in the port structure,
-** we need to make sure that the flags are clear when the port is opened.
-*/
- /* Uh? Suppose I turn these on and then another process opens
- the port again? The flags get cleared! Not good. -- REW */
- if (!(PortP->State & (RIO_LOPEN | RIO_MOPEN))) {
- PortP->Config &= ~(RIO_CTSFLOW | RIO_RTSFLOW);
- }
-
- if (!(PortP->firstOpen)) { /* First time ? */
- rio_dprintk(RIO_DEBUG_TTY, "First open for this port\n");
-
-
- PortP->firstOpen++;
- PortP->CookMode = 0; /* XXX RIOCookMode(tp); */
- PortP->InUse = NOT_INUSE;
-
- /* Tentative fix for bug PR27. Didn't work. */
- /* PortP->gs.xmit_cnt = 0; */
-
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
-
- /* Someone explain to me why this delay/config is
- here. If I read the docs correctly the "open"
- command piggybacks the parameters immediately.
- -- REW */
- RIOParam(PortP, RIOC_OPEN, 1, OK_TO_SLEEP); /* Open the port */
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- /*
- ** wait for the port to be not closed.
- */
- while (!(PortP->PortState & PORT_ISOPEN) && !p->RIOHalted) {
- rio_dprintk(RIO_DEBUG_TTY, "Waiting for PORT_ISOPEN-currently %x\n", PortP->PortState);
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_TTY, "Waiting for open to finish broken by signal\n");
- RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
- func_exit();
- return -EINTR;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- }
-
- if (p->RIOHalted) {
- retval = -EIO;
- bombout:
- /* RIOClearUp( PortP ); */
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return retval;
- }
- rio_dprintk(RIO_DEBUG_TTY, "PORT_ISOPEN found\n");
- }
- rio_dprintk(RIO_DEBUG_TTY, "Modem - test for carrier\n");
- /*
- ** ACTION
- ** insert test for carrier here. -- ???
- ** I already see that test here. What's the deal? -- REW
- */
- if ((PortP->gs.port.tty->termios->c_cflag & CLOCAL) ||
- (PortP->ModemState & RIOC_MSVR1_CD)) {
- rio_dprintk(RIO_DEBUG_TTY, "open(%d) Modem carr on\n", SysPort);
- /*
- tp->tm.c_state |= CARR_ON;
- wakeup((caddr_t) &tp->tm.c_canq);
- */
- PortP->State |= RIO_CARR_ON;
- wake_up_interruptible(&PortP->gs.port.open_wait);
- } else { /* no carrier - wait for DCD */
- /*
- while (!(PortP->gs.port.tty->termios->c_state & CARR_ON) &&
- !(filp->f_flags & O_NONBLOCK) && !p->RIOHalted )
- */
- while (!(PortP->State & RIO_CARR_ON) && !(filp->f_flags & O_NONBLOCK) && !p->RIOHalted) {
- rio_dprintk(RIO_DEBUG_TTY, "open(%d) sleeping for carr on\n", SysPort);
- /*
- PortP->gs.port.tty->termios->c_state |= WOPEN;
- */
- PortP->State |= RIO_WOPEN;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- /*
- ** ACTION: verify that this is a good thing
- ** to do here. -- ???
- ** I think it's OK. -- REW
- */
- rio_dprintk(RIO_DEBUG_TTY, "open(%d) sleeping for carr broken by signal\n", SysPort);
- RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
- /*
- tp->tm.c_state &= ~WOPEN;
- */
- PortP->State &= ~RIO_WOPEN;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- func_exit();
- return -EINTR;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- }
- PortP->State &= ~RIO_WOPEN;
- }
- if (p->RIOHalted)
- goto bombout;
- rio_dprintk(RIO_DEBUG_TTY, "Setting RIO_MOPEN\n");
- PortP->State |= RIO_MOPEN;
-
- if (p->RIOHalted)
- goto bombout;
-
- rio_dprintk(RIO_DEBUG_TTY, "high level open done\n");
-
- /*
- ** Count opens for port statistics reporting
- */
- if (PortP->statsGather)
- PortP->opens++;
-
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- rio_dprintk(RIO_DEBUG_TTY, "Returning from open\n");
- func_exit();
- return 0;
-}
-
-/*
-** RIOClose the port.
-** The operating system thinks that this is last close for the device.
-** As there are two interfaces to the port (Modem and tty), we need to
-** check that both are closed before we close the device.
-*/
-int riotclose(void *ptr)
-{
- struct Port *PortP = ptr; /* pointer to the port structure */
- int deleted = 0;
- int try = -1; /* Disable the timeouts by setting them to -1 */
- int repeat_this = -1; /* Congrats to those having 15 years of
- uptime! (You get to break the driver.) */
- unsigned long end_time;
- struct tty_struct *tty;
- unsigned long flags;
- int rv = 0;
-
- rio_dprintk(RIO_DEBUG_TTY, "port close SysPort %d\n", PortP->PortNum);
-
- /* PortP = p->RIOPortp[SysPort]; */
- rio_dprintk(RIO_DEBUG_TTY, "Port is at address %p\n", PortP);
- /* tp = PortP->TtyP; *//* Get tty */
- tty = PortP->gs.port.tty;
- rio_dprintk(RIO_DEBUG_TTY, "TTY is at address %p\n", tty);
-
- if (PortP->gs.closing_wait)
- end_time = jiffies + PortP->gs.closing_wait;
- else
- end_time = jiffies + MAX_SCHEDULE_TIMEOUT;
-
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- /*
- ** Setting this flag will make any process trying to open
- ** this port block until we are complete closing it.
- */
- PortP->State |= RIO_CLOSING;
-
- if ((PortP->State & RIO_DELETED)) {
- rio_dprintk(RIO_DEBUG_TTY, "Close on deleted RTA\n");
- deleted = 1;
- }
-
- if (p->RIOHalted) {
- RIOClearUp(PortP);
- rv = -EIO;
- goto close_end;
- }
-
- rio_dprintk(RIO_DEBUG_TTY, "Clear bits\n");
- /*
- ** clear the open bits for this device
- */
- PortP->State &= ~RIO_MOPEN;
- PortP->State &= ~RIO_CARR_ON;
- PortP->ModemState &= ~RIOC_MSVR1_CD;
- /*
- ** If the device was open as both a Modem and a tty line
- ** then we need to wimp out here, as the port has not really
- ** been finally closed (gee, whizz!) The test here uses the
- ** bit for the OTHER mode of operation, to see if THAT is
- ** still active!
- */
- if ((PortP->State & (RIO_LOPEN | RIO_MOPEN))) {
- /*
- ** The port is still open for the other task -
- ** return, pretending that we are still active.
- */
- rio_dprintk(RIO_DEBUG_TTY, "Channel %d still open !\n", PortP->PortNum);
- PortP->State &= ~RIO_CLOSING;
- if (PortP->firstOpen)
- PortP->firstOpen--;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return -EIO;
- }
-
- rio_dprintk(RIO_DEBUG_TTY, "Closing down - everything must go!\n");
-
- PortP->State &= ~RIO_DYNOROD;
-
- /*
- ** This is where we wait for the port
- ** to drain down before closing. Bye-bye....
- ** (We never meant to do this)
- */
- rio_dprintk(RIO_DEBUG_TTY, "Timeout 1 starts\n");
-
- if (!deleted)
- while ((PortP->InUse != NOT_INUSE) && !p->RIOHalted && (PortP->TxBufferIn != PortP->TxBufferOut)) {
- if (repeat_this-- <= 0) {
- rv = -EINTR;
- rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n");
- RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
- goto close_end;
- }
- rio_dprintk(RIO_DEBUG_TTY, "Calling timeout to flush in closing\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- if (RIODelay_ni(PortP, HUNDRED_MS * 10) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_TTY, "RTA EINTR in delay \n");
- rv = -EINTR;
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- goto close_end;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- }
-
- PortP->TxBufferIn = PortP->TxBufferOut = 0;
- repeat_this = 0xff;
-
- PortP->InUse = 0;
- if ((PortP->State & (RIO_LOPEN | RIO_MOPEN))) {
- /*
- ** The port has been re-opened for the other task -
- ** return, pretending that we are still active.
- */
- rio_dprintk(RIO_DEBUG_TTY, "Channel %d re-open!\n", PortP->PortNum);
- PortP->State &= ~RIO_CLOSING;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- if (PortP->firstOpen)
- PortP->firstOpen--;
- return -EIO;
- }
-
- if (p->RIOHalted) {
- RIOClearUp(PortP);
- goto close_end;
- }
-
- /* Can't call RIOShortCommand with the port locked. */
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
-
- if (RIOShortCommand(p, PortP, RIOC_CLOSE, 1, 0) == RIO_FAIL) {
- RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- goto close_end;
- }
-
- if (!deleted)
- while (try && (PortP->PortState & PORT_ISOPEN)) {
- try--;
- if (time_after(jiffies, end_time)) {
- rio_dprintk(RIO_DEBUG_TTY, "Run out of tries - force the bugger shut!\n");
- RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
- break;
- }
- rio_dprintk(RIO_DEBUG_TTY, "Close: PortState:ISOPEN is %d\n", PortP->PortState & PORT_ISOPEN);
-
- if (p->RIOHalted) {
- RIOClearUp(PortP);
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- goto close_end;
- }
- if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
- rio_dprintk(RIO_DEBUG_TTY, "RTA EINTR in delay \n");
- RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
- break;
- }
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- rio_dprintk(RIO_DEBUG_TTY, "Close: try was %d on completion\n", try);
-
- /* RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE); */
-
-/*
-** 15.10.1998 ARG - ESIL 0761 part fix
-** RIO has it's own CTSFLOW and RTSFLOW flags in 'Config' in the port structure,** we need to make sure that the flags are clear when the port is opened.
-*/
- PortP->Config &= ~(RIO_CTSFLOW | RIO_RTSFLOW);
-
- /*
- ** Count opens for port statistics reporting
- */
- if (PortP->statsGather)
- PortP->closes++;
-
-close_end:
- /* XXX: Why would a "DELETED" flag be reset here? I'd have
- thought that a "deleted" flag means that the port was
- permanently gone, but here we can make it reappear by it
- being in close during the "deletion".
- */
- PortP->State &= ~(RIO_CLOSING | RIO_DELETED);
- if (PortP->firstOpen)
- PortP->firstOpen--;
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- rio_dprintk(RIO_DEBUG_TTY, "Return from close\n");
- return rv;
-}
-
-
-
-static void RIOClearUp(struct Port *PortP)
-{
- rio_dprintk(RIO_DEBUG_TTY, "RIOHalted set\n");
- PortP->Config = 0; /* Direct semaphore */
- PortP->PortState = 0;
- PortP->firstOpen = 0;
- PortP->FlushCmdBodge = 0;
- PortP->ModemState = PortP->CookMode = 0;
- PortP->Mapped = 0;
- PortP->WflushFlag = 0;
- PortP->MagicFlags = 0;
- PortP->RxDataStart = 0;
- PortP->TxBufferIn = 0;
- PortP->TxBufferOut = 0;
-}
-
-/*
-** Put a command onto a port.
-** The PortPointer, command, length and arg are passed.
-** The len is the length *inclusive* of the command byte,
-** and so for a command that takes no data, len==1.
-** The arg is a single byte, and is only used if len==2.
-** Other values of len aren't allowed, and will cause
-** a panic.
-*/
-int RIOShortCommand(struct rio_info *p, struct Port *PortP, int command, int len, int arg)
-{
- struct PKT __iomem *PacketP;
- int retries = 20; /* at 10 per second -> 2 seconds */
- unsigned long flags;
-
- rio_dprintk(RIO_DEBUG_TTY, "entering shortcommand.\n");
-
- if (PortP->State & RIO_DELETED) {
- rio_dprintk(RIO_DEBUG_TTY, "Short command to deleted RTA ignored\n");
- return RIO_FAIL;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
-
- /*
- ** If the port is in use for pre-emptive command, then wait for it to
- ** be free again.
- */
- while ((PortP->InUse != NOT_INUSE) && !p->RIOHalted) {
- rio_dprintk(RIO_DEBUG_TTY, "Waiting for not in use (%d)\n", retries);
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- if (retries-- <= 0) {
- return RIO_FAIL;
- }
- if (RIODelay_ni(PortP, HUNDRED_MS) == RIO_FAIL) {
- return RIO_FAIL;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- }
- if (PortP->State & RIO_DELETED) {
- rio_dprintk(RIO_DEBUG_TTY, "Short command to deleted RTA ignored\n");
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return RIO_FAIL;
- }
-
- while (!can_add_transmit(&PacketP, PortP) && !p->RIOHalted) {
- rio_dprintk(RIO_DEBUG_TTY, "Waiting to add short command to queue (%d)\n", retries);
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- if (retries-- <= 0) {
- rio_dprintk(RIO_DEBUG_TTY, "out of tries. Failing\n");
- return RIO_FAIL;
- }
- if (RIODelay_ni(PortP, HUNDRED_MS) == RIO_FAIL) {
- return RIO_FAIL;
- }
- rio_spin_lock_irqsave(&PortP->portSem, flags);
- }
-
- if (p->RIOHalted) {
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return RIO_FAIL;
- }
-
- /*
- ** set the command byte and the argument byte
- */
- writeb(command, &PacketP->data[0]);
-
- if (len == 2)
- writeb(arg, &PacketP->data[1]);
-
- /*
- ** set the length of the packet and set the command bit.
- */
- writeb(PKT_CMD_BIT | len, &PacketP->len);
-
- add_transmit(PortP);
- /*
- ** Count characters transmitted for port statistics reporting
- */
- if (PortP->statsGather)
- PortP->txchars += len;
-
- rio_spin_unlock_irqrestore(&PortP->portSem, flags);
- return p->RIOHalted ? RIO_FAIL : ~RIO_FAIL;
-}
-
-
diff --git a/drivers/staging/generic_serial/rio/route.h b/drivers/staging/generic_serial/rio/route.h
deleted file mode 100644
index 46e963771c3..00000000000
--- a/drivers/staging/generic_serial/rio/route.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/****************************************************************************
- ******* *******
- ******* R O U T E H E A D E R
- ******* *******
- ****************************************************************************
-
- Author : Ian Nandhra / Jeremy Rolls
- Date :
-
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Version : 0.01
-
-
- Mods
- ----------------------------------------------------------------------------
- Date By Description
- ----------------------------------------------------------------------------
-
- ***************************************************************************/
-
-#ifndef _route_h
-#define _route_h
-
-#define MAX_LINKS 4
-#define MAX_NODES 17 /* Maximum nodes in a subnet */
-#define NODE_BYTES ((MAX_NODES / 8) + 1) /* Number of bytes needed for
- 1 bit per node */
-#define ROUTE_DATA_SIZE (NODE_BYTES + 2) /* Number of bytes for complete
- info about cost etc. */
-#define ROUTES_PER_PACKET ((PKT_MAX_DATA_LEN -2)/ ROUTE_DATA_SIZE)
- /* Number of nodes we can squeeze
- into one packet */
-#define MAX_TOPOLOGY_PACKETS (MAX_NODES / ROUTES_PER_PACKET + 1)
-/************************************************
- * Define the types of command for the ROUTE RUP.
- ************************************************/
-#define ROUTE_REQUEST 0 /* Request an ID */
-#define ROUTE_FOAD 1 /* Kill the RTA */
-#define ROUTE_ALREADY 2 /* ID given already */
-#define ROUTE_USED 3 /* All ID's used */
-#define ROUTE_ALLOCATE 4 /* Here it is */
-#define ROUTE_REQ_TOP 5 /* I bet you didn't expect....
- the Topological Inquisition */
-#define ROUTE_TOPOLOGY 6 /* Topology request answered FD */
-/*******************************************************************
- * Define the Route Map Structure
- *
- * The route map gives a pointer to a Link Structure to use.
- * This allows Disconnected Links to be checked quickly
- ******************************************************************/
-typedef struct COST_ROUTE COST_ROUTE;
-struct COST_ROUTE {
- unsigned char cost; /* Cost down this link */
- unsigned char route[NODE_BYTES]; /* Nodes through this route */
-};
-
-typedef struct ROUTE_STR ROUTE_STR;
-struct ROUTE_STR {
- COST_ROUTE cost_route[MAX_LINKS];
- /* cost / route for this link */
- ushort favoured; /* favoured link */
-};
-
-
-#define NO_LINK (short) 5 /* Link unattached */
-#define ROUTE_NO_ID (short) 100 /* No Id */
-#define ROUTE_DISCONNECT (ushort) 0xff /* Not connected */
-#define ROUTE_INTERCONNECT (ushort) 0x40 /* Sub-net interconnect */
-
-
-#define SYNC_RUP (ushort) 255
-#define COMMAND_RUP (ushort) 254
-#define ERROR_RUP (ushort) 253
-#define POLL_RUP (ushort) 252
-#define BOOT_RUP (ushort) 251
-#define ROUTE_RUP (ushort) 250
-#define STATUS_RUP (ushort) 249
-#define POWER_RUP (ushort) 248
-
-#define HIGHEST_RUP (ushort) 255 /* Set to Top one */
-#define LOWEST_RUP (ushort) 248 /* Set to bottom one */
-
-#endif
-
-/*********** end of file ***********/
diff --git a/drivers/staging/generic_serial/rio/rup.h b/drivers/staging/generic_serial/rio/rup.h
deleted file mode 100644
index 4ae90cb207a..00000000000
--- a/drivers/staging/generic_serial/rio/rup.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/****************************************************************************
- ******* *******
- ******* R U P S T R U C T U R E
- ******* *******
- ****************************************************************************
-
- Author : Ian Nandhra
- Date :
-
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Version : 0.01
-
-
- Mods
- ----------------------------------------------------------------------------
- Date By Description
- ----------------------------------------------------------------------------
-
- ***************************************************************************/
-
-#ifndef _rup_h
-#define _rup_h 1
-
-#define MAX_RUP ((short) 16)
-#define PKTS_PER_RUP ((short) 2) /* They are always used in pairs */
-
-/*************************************************
- * Define all the packet request stuff
- ************************************************/
-#define TX_RUP_INACTIVE 0 /* Nothing to transmit */
-#define TX_PACKET_READY 1 /* Transmit packet ready */
-#define TX_LOCK_RUP 2 /* Transmit side locked */
-
-#define RX_RUP_INACTIVE 0 /* Nothing received */
-#define RX_PACKET_READY 1 /* Packet received */
-
-#define RUP_NO_OWNER 0xff /* RUP not owned by any process */
-
-struct RUP {
- u16 txpkt; /* Outgoing packet */
- u16 rxpkt; /* Incoming packet */
- u16 link; /* Which link to send down? */
- u8 rup_dest_unit[2]; /* Destination unit */
- u16 handshake; /* For handshaking */
- u16 timeout; /* Timeout */
- u16 status; /* Status */
- u16 txcontrol; /* Transmit control */
- u16 rxcontrol; /* Receive control */
-};
-
-#endif
-
-/*********** end of file ***********/
diff --git a/drivers/staging/generic_serial/rio/unixrup.h b/drivers/staging/generic_serial/rio/unixrup.h
deleted file mode 100644
index 7abf0cba0f2..00000000000
--- a/drivers/staging/generic_serial/rio/unixrup.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-** -----------------------------------------------------------------------------
-**
-** Perle Specialix driver for Linux
-** Ported from existing RIO Driver for SCO sources.
- *
- * (C) 1990 - 2000 Specialix International Ltd., Byfleet, Surrey, UK.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-**
-** Module : unixrup.h
-** SID : 1.2
-** Last Modified : 11/6/98 11:34:20
-** Retrieved : 11/6/98 11:34:22
-**
-** ident @(#)unixrup.h 1.2
-**
-** -----------------------------------------------------------------------------
-*/
-
-#ifndef __rio_unixrup_h__
-#define __rio_unixrup_h__
-
-/*
-** UnixRup data structure. This contains pointers to actual RUPs on the
-** host card, and all the command/boot control stuff.
-*/
-struct UnixRup {
- struct CmdBlk *CmdsWaitingP; /* Commands waiting to be done */
- struct CmdBlk *CmdPendingP; /* The command currently being sent */
- struct RUP __iomem *RupP; /* the Rup to send it to */
- unsigned int Id; /* Id number */
- unsigned int BaseSysPort; /* SysPort of first tty on this RTA */
- unsigned int ModTypes; /* Modules on this RTA */
- spinlock_t RupLock; /* Lock structure for MPX */
- /* struct lockb RupLock; *//* Lock structure for MPX */
-};
-
-#endif /* __rio_unixrup_h__ */
diff --git a/drivers/staging/generic_serial/ser_a2232.c b/drivers/staging/generic_serial/ser_a2232.c
deleted file mode 100644
index 3f47c2ead8e..00000000000
--- a/drivers/staging/generic_serial/ser_a2232.c
+++ /dev/null
@@ -1,831 +0,0 @@
-/* drivers/char/ser_a2232.c */
-
-/* $Id: ser_a2232.c,v 0.4 2000/01/25 12:00:00 ehaase Exp $ */
-
-/* Linux serial driver for the Amiga A2232 board */
-
-/* This driver is MAINTAINED. Before applying any changes, please contact
- * the author.
- */
-
-/* Copyright (c) 2000-2001 Enver Haase <ehaase@inf.fu-berlin.de>
- * alias The A2232 driver project <A2232@gmx.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-/***************************** Documentation ************************/
-/*
- * This driver is in EXPERIMENTAL state. That means I could not find
- * someone with five A2232 boards with 35 ports running at 19200 bps
- * at the same time and test the machine's behaviour.
- * However, I know that you can performance-tweak this driver (see
- * the source code).
- * One thing to consider is the time this driver consumes during the
- * Amiga's vertical blank interrupt. Everything that is to be done
- * _IS DONE_ when entering the vertical blank interrupt handler of
- * this driver.
- * However, it would be more sane to only do the job for only ONE card
- * instead of ALL cards at a time; or, more generally, to handle only
- * SOME ports instead of ALL ports at a time.
- * However, as long as no-one runs into problems I guess I shouldn't
- * change the driver as it runs fine for me :) .
- *
- * Version history of this file:
- * 0.4 Resolved licensing issues.
- * 0.3 Inclusion in the Linux/m68k tree, small fixes.
- * 0.2 Added documentation, minor typo fixes.
- * 0.1 Initial release.
- *
- * TO DO:
- * - Handle incoming BREAK events. I guess "Stevens: Advanced
- * Programming in the UNIX(R) Environment" is a good reference
- * on what is to be done.
- * - When installing as a module, don't simply 'printk' text, but
- * send it to the TTY used by the user.
- *
- * THANKS TO:
- * - Jukka Marin (65EC02 code).
- * - The other NetBSD developers on whose A2232 driver I had a
- * pretty close look. However, I didn't copy any code so it
- * is okay to put my code under the GPL and include it into
- * Linux.
- */
-/***************************** End of Documentation *****************/
-
-/***************************** Defines ******************************/
-/*
- * Enables experimental 115200 (normal) 230400 (turbo) baud rate.
- * The A2232 specification states it can only operate at speeds up to
- * 19200 bits per second, and I was not able to send a file via
- * "sz"/"rz" and a null-modem cable from one A2232 port to another
- * at 115200 bits per second.
- * However, this might work for you.
- */
-#undef A2232_SPEEDHACK
-/*
- * Default is not to use RTS/CTS so you could be talked to death.
- */
-#define A2232_SUPPRESS_RTSCTS_WARNING
-/************************* End of Defines ***************************/
-
-/***************************** Includes *****************************/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/tty.h>
-
-#include <asm/setup.h>
-#include <asm/amigaints.h>
-#include <asm/amigahw.h>
-#include <linux/zorro.h>
-#include <asm/irq.h>
-#include <linux/mutex.h>
-
-#include <linux/delay.h>
-
-#include <linux/serial.h>
-#include <linux/generic_serial.h>
-#include <linux/tty_flip.h>
-
-#include "ser_a2232.h"
-#include "ser_a2232fw.h"
-/************************* End of Includes **************************/
-
-/***************************** Prototypes ***************************/
-/* The interrupt service routine */
-static irqreturn_t a2232_vbl_inter(int irq, void *data);
-/* Initialize the port structures */
-static void a2232_init_portstructs(void);
-/* Initialize and register TTY drivers. */
-/* returns 0 IFF successful */
-static int a2232_init_drivers(void);
-
-/* BEGIN GENERIC_SERIAL PROTOTYPES */
-static void a2232_disable_tx_interrupts(void *ptr);
-static void a2232_enable_tx_interrupts(void *ptr);
-static void a2232_disable_rx_interrupts(void *ptr);
-static void a2232_enable_rx_interrupts(void *ptr);
-static int a2232_carrier_raised(struct tty_port *port);
-static void a2232_shutdown_port(void *ptr);
-static int a2232_set_real_termios(void *ptr);
-static int a2232_chars_in_buffer(void *ptr);
-static void a2232_close(void *ptr);
-static void a2232_hungup(void *ptr);
-/* static void a2232_getserial (void *ptr, struct serial_struct *sp); */
-/* END GENERIC_SERIAL PROTOTYPES */
-
-/* Functions that the TTY driver struct expects */
-static int a2232_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg);
-static void a2232_throttle(struct tty_struct *tty);
-static void a2232_unthrottle(struct tty_struct *tty);
-static int a2232_open(struct tty_struct * tty, struct file * filp);
-/************************* End of Prototypes ************************/
-
-/***************************** Global variables *********************/
-/*---------------------------------------------------------------------------
- * Interface from generic_serial.c back here
- *--------------------------------------------------------------------------*/
-static struct real_driver a2232_real_driver = {
- a2232_disable_tx_interrupts,
- a2232_enable_tx_interrupts,
- a2232_disable_rx_interrupts,
- a2232_enable_rx_interrupts,
- a2232_shutdown_port,
- a2232_set_real_termios,
- a2232_chars_in_buffer,
- a2232_close,
- a2232_hungup,
- NULL /* a2232_getserial */
-};
-
-static void *a2232_driver_ID = &a2232_driver_ID; // Some memory address WE own.
-
-/* Ports structs */
-static struct a2232_port a2232_ports[MAX_A2232_BOARDS*NUMLINES];
-
-/* TTY driver structs */
-static struct tty_driver *a2232_driver;
-
-/* nr of cards completely (all ports) and correctly configured */
-static int nr_a2232;
-
-/* zorro_dev structs for the A2232's */
-static struct zorro_dev *zd_a2232[MAX_A2232_BOARDS];
-/***************************** End of Global variables **************/
-
-/* Helper functions */
-
-static inline volatile struct a2232memory *a2232mem(unsigned int board)
-{
- return (volatile struct a2232memory *)ZTWO_VADDR(zd_a2232[board]->resource.start);
-}
-
-static inline volatile struct a2232status *a2232stat(unsigned int board,
- unsigned int portonboard)
-{
- volatile struct a2232memory *mem = a2232mem(board);
- return &(mem->Status[portonboard]);
-}
-
-static inline void a2232_receive_char(struct a2232_port *port, int ch, int err)
-{
-/* Mostly stolen from other drivers.
- Maybe one could implement a more efficient version by not only
- transferring one character at a time.
-*/
- struct tty_struct *tty = port->gs.port.tty;
-
-#if 0
- switch(err) {
- case TTY_BREAK:
- break;
- case TTY_PARITY:
- break;
- case TTY_OVERRUN:
- break;
- case TTY_FRAME:
- break;
- }
-#endif
-
- tty_insert_flip_char(tty, ch, err);
- tty_flip_buffer_push(tty);
-}
-
-/***************************** Functions ****************************/
-/*** BEGIN OF REAL_DRIVER FUNCTIONS ***/
-
-static void a2232_disable_tx_interrupts(void *ptr)
-{
- struct a2232_port *port;
- volatile struct a2232status *stat;
- unsigned long flags;
-
- port = ptr;
- stat = a2232stat(port->which_a2232, port->which_port_on_a2232);
- stat->OutDisable = -1;
-
- /* Does this here really have to be? */
- local_irq_save(flags);
- port->gs.port.flags &= ~GS_TX_INTEN;
- local_irq_restore(flags);
-}
-
-static void a2232_enable_tx_interrupts(void *ptr)
-{
- struct a2232_port *port;
- volatile struct a2232status *stat;
- unsigned long flags;
-
- port = ptr;
- stat = a2232stat(port->which_a2232, port->which_port_on_a2232);
- stat->OutDisable = 0;
-
- /* Does this here really have to be? */
- local_irq_save(flags);
- port->gs.port.flags |= GS_TX_INTEN;
- local_irq_restore(flags);
-}
-
-static void a2232_disable_rx_interrupts(void *ptr)
-{
- struct a2232_port *port;
- port = ptr;
- port->disable_rx = -1;
-}
-
-static void a2232_enable_rx_interrupts(void *ptr)
-{
- struct a2232_port *port;
- port = ptr;
- port->disable_rx = 0;
-}
-
-static int a2232_carrier_raised(struct tty_port *port)
-{
- struct a2232_port *ap = container_of(port, struct a2232_port, gs.port);
- return ap->cd_status;
-}
-
-static void a2232_shutdown_port(void *ptr)
-{
- struct a2232_port *port;
- volatile struct a2232status *stat;
- unsigned long flags;
-
- port = ptr;
- stat = a2232stat(port->which_a2232, port->which_port_on_a2232);
-
- local_irq_save(flags);
-
- port->gs.port.flags &= ~GS_ACTIVE;
-
- if (port->gs.port.tty && port->gs.port.tty->termios->c_cflag & HUPCL) {
- /* Set DTR and RTS to Low, flush output.
- The NetBSD driver "msc.c" does it this way. */
- stat->Command = ( (stat->Command & ~A2232CMD_CMask) |
- A2232CMD_Close );
- stat->OutFlush = -1;
- stat->Setup = -1;
- }
-
- local_irq_restore(flags);
-
- /* After analyzing control flow, I think a2232_shutdown_port
- is actually the last call from the system when at application
- level someone issues a "echo Hello >>/dev/ttyY0".
- Therefore I think the MOD_DEC_USE_COUNT should be here and
- not in "a2232_close()". See the comment in "sx.c", too.
- If you run into problems, compile this driver into the
- kernel instead of compiling it as a module. */
-}
-
-static int a2232_set_real_termios(void *ptr)
-{
- unsigned int cflag, baud, chsize, stopb, parity, softflow;
- int rate;
- int a2232_param, a2232_cmd;
- unsigned long flags;
- unsigned int i;
- struct a2232_port *port = ptr;
- volatile struct a2232status *status;
- volatile struct a2232memory *mem;
-
- if (!port->gs.port.tty || !port->gs.port.tty->termios) return 0;
-
- status = a2232stat(port->which_a2232, port->which_port_on_a2232);
- mem = a2232mem(port->which_a2232);
-
- a2232_param = a2232_cmd = 0;
-
- // get baud rate
- baud = port->gs.baud;
- if (baud == 0) {
- /* speed == 0 -> drop DTR, do nothing else */
- local_irq_save(flags);
- // Clear DTR (and RTS... mhhh).
- status->Command = ( (status->Command & ~A2232CMD_CMask) |
- A2232CMD_Close );
- status->OutFlush = -1;
- status->Setup = -1;
-
- local_irq_restore(flags);
- return 0;
- }
-
- rate = A2232_BAUD_TABLE_NOAVAIL;
- for (i=0; i < A2232_BAUD_TABLE_NUM_RATES * 3; i += 3){
- if (a2232_baud_table[i] == baud){
- if (mem->Common.Crystal == A2232_TURBO) rate = a2232_baud_table[i+2];
- else rate = a2232_baud_table[i+1];
- }
- }
- if (rate == A2232_BAUD_TABLE_NOAVAIL){
- printk("a2232: Board %d Port %d unsupported baud rate: %d baud. Using another.\n",port->which_a2232,port->which_port_on_a2232,baud);
- // This is useful for both (turbo or normal) Crystal versions.
- rate = A2232PARAM_B9600;
- }
- a2232_param |= rate;
-
- cflag = port->gs.port.tty->termios->c_cflag;
-
- // get character size
- chsize = cflag & CSIZE;
- switch (chsize){
- case CS8: a2232_param |= A2232PARAM_8Bit; break;
- case CS7: a2232_param |= A2232PARAM_7Bit; break;
- case CS6: a2232_param |= A2232PARAM_6Bit; break;
- case CS5: a2232_param |= A2232PARAM_5Bit; break;
- default: printk("a2232: Board %d Port %d unsupported character size: %d. Using 8 data bits.\n",
- port->which_a2232,port->which_port_on_a2232,chsize);
- a2232_param |= A2232PARAM_8Bit; break;
- }
-
- // get number of stop bits
- stopb = cflag & CSTOPB;
- if (stopb){ // two stop bits instead of one
- printk("a2232: Board %d Port %d 2 stop bits unsupported. Using 1 stop bit.\n",
- port->which_a2232,port->which_port_on_a2232);
- }
-
- // Warn if RTS/CTS not wanted
- if (!(cflag & CRTSCTS)){
-#ifndef A2232_SUPPRESS_RTSCTS_WARNING
- printk("a2232: Board %d Port %d cannot switch off firmware-implemented RTS/CTS hardware flow control.\n",
- port->which_a2232,port->which_port_on_a2232);
-#endif
- }
-
- /* I think this is correct.
- However, IXOFF means _input_ flow control and I wonder
- if one should care about IXON _output_ flow control,
- too. If this makes problems, one should turn the A2232
- firmware XON/XOFF "SoftFlow" flow control off and use
- the conventional way of inserting START/STOP characters
- by hand in throttle()/unthrottle().
- */
- softflow = !!( port->gs.port.tty->termios->c_iflag & IXOFF );
-
- // get Parity (Enabled/Disabled? If Enabled, Odd or Even?)
- parity = cflag & (PARENB | PARODD);
- if (parity & PARENB){
- if (parity & PARODD){
- a2232_cmd |= A2232CMD_OddParity;
- }
- else{
- a2232_cmd |= A2232CMD_EvenParity;
- }
- }
- else a2232_cmd |= A2232CMD_NoParity;
-
-
- /* Hmm. Maybe an own a2232_port structure
- member would be cleaner? */
- if (cflag & CLOCAL)
- port->gs.port.flags &= ~ASYNC_CHECK_CD;
- else
- port->gs.port.flags |= ASYNC_CHECK_CD;
-
-
- /* Now we have all parameters and can go to set them: */
- local_irq_save(flags);
-
- status->Param = a2232_param | A2232PARAM_RcvBaud;
- status->Command = a2232_cmd | A2232CMD_Open | A2232CMD_Enable;
- status->SoftFlow = softflow;
- status->OutDisable = 0;
- status->Setup = -1;
-
- local_irq_restore(flags);
- return 0;
-}
-
-static int a2232_chars_in_buffer(void *ptr)
-{
- struct a2232_port *port;
- volatile struct a2232status *status;
- unsigned char ret; /* we need modulo-256 arithmetics */
- port = ptr;
- status = a2232stat(port->which_a2232, port->which_port_on_a2232);
-#if A2232_IOBUFLEN != 256
-#error "Re-Implement a2232_chars_in_buffer()!"
-#endif
- ret = (status->OutHead - status->OutTail);
- return ret;
-}
-
-static void a2232_close(void *ptr)
-{
- a2232_disable_tx_interrupts(ptr);
- a2232_disable_rx_interrupts(ptr);
- /* see the comment in a2232_shutdown_port above. */
-}
-
-static void a2232_hungup(void *ptr)
-{
- a2232_close(ptr);
-}
-/*** END OF REAL_DRIVER FUNCTIONS ***/
-
-/*** BEGIN FUNCTIONS EXPECTED BY TTY DRIVER STRUCTS ***/
-static int a2232_ioctl( struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
-static void a2232_throttle(struct tty_struct *tty)
-{
-/* Throttle: System cannot take another chars: Drop RTS or
- send the STOP char or whatever.
- The A2232 firmware does RTS/CTS anyway, and XON/XOFF
- if switched on. So the only thing we can do at this
- layer here is not taking any characters out of the
- A2232 buffer any more. */
- struct a2232_port *port = tty->driver_data;
- port->throttle_input = -1;
-}
-
-static void a2232_unthrottle(struct tty_struct *tty)
-{
-/* Unthrottle: dual to "throttle()" above. */
- struct a2232_port *port = tty->driver_data;
- port->throttle_input = 0;
-}
-
-static int a2232_open(struct tty_struct * tty, struct file * filp)
-{
-/* More or less stolen from other drivers. */
- int line;
- int retval;
- struct a2232_port *port;
-
- line = tty->index;
- port = &a2232_ports[line];
-
- tty->driver_data = port;
- port->gs.port.tty = tty;
- port->gs.port.count++;
- retval = gs_init_port(&port->gs);
- if (retval) {
- port->gs.port.count--;
- return retval;
- }
- port->gs.port.flags |= GS_ACTIVE;
- retval = gs_block_til_ready(port, filp);
-
- if (retval) {
- port->gs.port.count--;
- return retval;
- }
-
- a2232_enable_rx_interrupts(port);
-
- return 0;
-}
-/*** END OF FUNCTIONS EXPECTED BY TTY DRIVER STRUCTS ***/
-
-static irqreturn_t a2232_vbl_inter(int irq, void *data)
-{
-#if A2232_IOBUFLEN != 256
-#error "Re-Implement a2232_vbl_inter()!"
-#endif
-
-struct a2232_port *port;
-volatile struct a2232memory *mem;
-volatile struct a2232status *status;
-unsigned char newhead;
-unsigned char bufpos; /* Must be unsigned char. We need the modulo-256 arithmetics */
-unsigned char ncd, ocd, ccd; /* names consistent with the NetBSD driver */
-volatile u_char *ibuf, *cbuf, *obuf;
-int ch, err, n, p;
- for (n = 0; n < nr_a2232; n++){ /* for every completely initialized A2232 board */
- mem = a2232mem(n);
- for (p = 0; p < NUMLINES; p++){ /* for every port on this board */
- err = 0;
- port = &a2232_ports[n*NUMLINES+p];
- if ( port->gs.port.flags & GS_ACTIVE ){ /* if the port is used */
-
- status = a2232stat(n,p);
-
- if (!port->disable_rx && !port->throttle_input){ /* If input is not disabled */
- newhead = status->InHead; /* 65EC02 write pointer */
- bufpos = status->InTail;
-
- /* check for input for this port */
- if (newhead != bufpos) {
- /* buffer for input chars/events */
- ibuf = mem->InBuf[p];
-
- /* data types of bytes in ibuf */
- cbuf = mem->InCtl[p];
-
- /* do for all chars */
- while (bufpos != newhead) {
- /* which type of input data? */
- switch (cbuf[bufpos]) {
- /* switch on input event (CD, BREAK, etc.) */
- case A2232INCTL_EVENT:
- switch (ibuf[bufpos++]) {
- case A2232EVENT_Break:
- /* TODO: Handle BREAK signal */
- break;
- /* A2232EVENT_CarrierOn and A2232EVENT_CarrierOff are
- handled in a separate queue and should not occur here. */
- case A2232EVENT_Sync:
- printk("A2232: 65EC02 software sent SYNC event, don't know what to do. Ignoring.");
- break;
- default:
- printk("A2232: 65EC02 software broken, unknown event type %d occurred.\n",ibuf[bufpos-1]);
- } /* event type switch */
- break;
- case A2232INCTL_CHAR:
- /* Receive incoming char */
- a2232_receive_char(port, ibuf[bufpos], err);
- bufpos++;
- break;
- default:
- printk("A2232: 65EC02 software broken, unknown data type %d occurred.\n",cbuf[bufpos]);
- bufpos++;
- } /* switch on input data type */
- } /* while there's something in the buffer */
-
- status->InTail = bufpos; /* tell 65EC02 what we've read */
-
- } /* if there was something in the buffer */
- } /* If input is not disabled */
-
- /* Now check if there's something to output */
- obuf = mem->OutBuf[p];
- bufpos = status->OutHead;
- while ( (port->gs.xmit_cnt > 0) &&
- (!port->gs.port.tty->stopped) &&
- (!port->gs.port.tty->hw_stopped) ){ /* While there are chars to transmit */
- if (((bufpos+1) & A2232_IOBUFLENMASK) != status->OutTail) { /* If the A2232 buffer is not full */
- ch = port->gs.xmit_buf[port->gs.xmit_tail]; /* get the next char to transmit */
- port->gs.xmit_tail = (port->gs.xmit_tail+1) & (SERIAL_XMIT_SIZE-1); /* modulo-addition for the gs.xmit_buf ring-buffer */
- obuf[bufpos++] = ch; /* put it into the A2232 buffer */
- port->gs.xmit_cnt--;
- }
- else{ /* If A2232 the buffer is full */
- break; /* simply stop filling it. */
- }
- }
- status->OutHead = bufpos;
-
- /* WakeUp if output buffer runs low */
- if ((port->gs.xmit_cnt <= port->gs.wakeup_chars) && port->gs.port.tty) {
- tty_wakeup(port->gs.port.tty);
- }
- } // if the port is used
- } // for every port on the board
-
- /* Now check the CD message queue */
- newhead = mem->Common.CDHead;
- bufpos = mem->Common.CDTail;
- if (newhead != bufpos){ /* There are CD events in queue */
- ocd = mem->Common.CDStatus; /* get old status bits */
- while (newhead != bufpos){ /* read all events */
- ncd = mem->CDBuf[bufpos++]; /* get one event */
- ccd = ncd ^ ocd; /* mask of changed lines */
- ocd = ncd; /* save new status bits */
- for(p=0; p < NUMLINES; p++){ /* for all ports */
- if (ccd & 1){ /* this one changed */
-
- struct a2232_port *port = &a2232_ports[n*7+p];
- port->cd_status = !(ncd & 1); /* ncd&1 <=> CD is now off */
-
- if (!(port->gs.port.flags & ASYNC_CHECK_CD))
- ; /* Don't report DCD changes */
- else if (port->cd_status) { // if DCD on: DCD went UP!
-
- /* Are we blocking in open?*/
- wake_up_interruptible(&port->gs.port.open_wait);
- }
- else { // if DCD off: DCD went DOWN!
- if (port->gs.port.tty)
- tty_hangup (port->gs.port.tty);
- }
-
- } // if CD changed for this port
- ccd >>= 1;
- ncd >>= 1; /* Shift bits for next line */
- } // for every port
- } // while CD events in queue
- mem->Common.CDStatus = ocd; /* save new status */
- mem->Common.CDTail = bufpos; /* remove events */
- } // if events in CD queue
-
- } // for every completely initialized A2232 board
- return IRQ_HANDLED;
-}
-
-static const struct tty_port_operations a2232_port_ops = {
- .carrier_raised = a2232_carrier_raised,
-};
-
-static void a2232_init_portstructs(void)
-{
- struct a2232_port *port;
- int i;
-
- for (i = 0; i < MAX_A2232_BOARDS*NUMLINES; i++) {
- port = a2232_ports + i;
- tty_port_init(&port->gs.port);
- port->gs.port.ops = &a2232_port_ops;
- port->which_a2232 = i/NUMLINES;
- port->which_port_on_a2232 = i%NUMLINES;
- port->disable_rx = port->throttle_input = port->cd_status = 0;
- port->gs.magic = A2232_MAGIC;
- port->gs.close_delay = HZ/2;
- port->gs.closing_wait = 30 * HZ;
- port->gs.rd = &a2232_real_driver;
- }
-}
-
-static const struct tty_operations a2232_ops = {
- .open = a2232_open,
- .close = gs_close,
- .write = gs_write,
- .put_char = gs_put_char,
- .flush_chars = gs_flush_chars,
- .write_room = gs_write_room,
- .chars_in_buffer = gs_chars_in_buffer,
- .flush_buffer = gs_flush_buffer,
- .ioctl = a2232_ioctl,
- .throttle = a2232_throttle,
- .unthrottle = a2232_unthrottle,
- .set_termios = gs_set_termios,
- .stop = gs_stop,
- .start = gs_start,
- .hangup = gs_hangup,
-};
-
-static int a2232_init_drivers(void)
-{
- int error;
-
- a2232_driver = alloc_tty_driver(NUMLINES * nr_a2232);
- if (!a2232_driver)
- return -ENOMEM;
- a2232_driver->owner = THIS_MODULE;
- a2232_driver->driver_name = "commodore_a2232";
- a2232_driver->name = "ttyY";
- a2232_driver->major = A2232_NORMAL_MAJOR;
- a2232_driver->type = TTY_DRIVER_TYPE_SERIAL;
- a2232_driver->subtype = SERIAL_TYPE_NORMAL;
- a2232_driver->init_termios = tty_std_termios;
- a2232_driver->init_termios.c_cflag =
- B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- a2232_driver->init_termios.c_ispeed = 9600;
- a2232_driver->init_termios.c_ospeed = 9600;
- a2232_driver->flags = TTY_DRIVER_REAL_RAW;
- tty_set_operations(a2232_driver, &a2232_ops);
- if ((error = tty_register_driver(a2232_driver))) {
- printk(KERN_ERR "A2232: Couldn't register A2232 driver, error = %d\n",
- error);
- put_tty_driver(a2232_driver);
- return 1;
- }
- return 0;
-}
-
-static int __init a2232board_init(void)
-{
- struct zorro_dev *z;
-
- unsigned int boardaddr;
- int bcount;
- short start;
- u_char *from;
- volatile u_char *to;
- volatile struct a2232memory *mem;
- int error, i;
-
-#ifdef CONFIG_SMP
- return -ENODEV; /* This driver is not SMP aware. Is there an SMP ZorroII-bus-machine? */
-#endif
-
- if (!MACH_IS_AMIGA){
- return -ENODEV;
- }
-
- printk("Commodore A2232 driver initializing.\n"); /* Say that we're alive. */
-
- z = NULL;
- nr_a2232 = 0;
- while ( (z = zorro_find_device(ZORRO_WILDCARD, z)) ){
- if ( (z->id != ZORRO_PROD_CBM_A2232_PROTOTYPE) &&
- (z->id != ZORRO_PROD_CBM_A2232) ){
- continue; // The board found was no A2232
- }
- if (!zorro_request_device(z,"A2232 driver"))
- continue;
-
- printk("Commodore A2232 found (#%d).\n",nr_a2232);
-
- zd_a2232[nr_a2232] = z;
-
- boardaddr = ZTWO_VADDR( z->resource.start );
- printk("Board is located at address 0x%x, size is 0x%x.\n", boardaddr, (unsigned int) ((z->resource.end+1) - (z->resource.start)));
-
- mem = (volatile struct a2232memory *) boardaddr;
-
- (void) mem->Enable6502Reset; /* copy the code across to the board */
- to = (u_char *)mem; from = a2232_65EC02code; bcount = sizeof(a2232_65EC02code) - 2;
- start = *(short *)from;
- from += sizeof(start);
- to += start;
- while(bcount--) *to++ = *from++;
- printk("65EC02 software uploaded to the A2232 memory.\n");
-
- mem->Common.Crystal = A2232_UNKNOWN; /* use automatic speed check */
-
- /* start 6502 running */
- (void) mem->ResetBoard;
- printk("A2232's 65EC02 CPU up and running.\n");
-
- /* wait until speed detector has finished */
- for (bcount = 0; bcount < 2000; bcount++) {
- udelay(1000);
- if (mem->Common.Crystal)
- break;
- }
- printk((mem->Common.Crystal?"A2232 oscillator crystal detected by 65EC02 software: ":"65EC02 software could not determine A2232 oscillator crystal: "));
- switch (mem->Common.Crystal){
- case A2232_UNKNOWN:
- printk("Unknown crystal.\n");
- break;
- case A2232_NORMAL:
- printk ("Normal crystal.\n");
- break;
- case A2232_TURBO:
- printk ("Turbo crystal.\n");
- break;
- default:
- printk ("0x%x. Huh?\n",mem->Common.Crystal);
- }
-
- nr_a2232++;
-
- }
-
- printk("Total: %d A2232 boards initialized.\n", nr_a2232); /* Some status report if no card was found */
-
- a2232_init_portstructs();
-
- /*
- a2232_init_drivers also registers the drivers. Must be here because all boards
- have to be detected first.
- */
- if (a2232_init_drivers()) return -ENODEV; // maybe we should use a different -Exxx?
-
- error = request_irq(IRQ_AMIGA_VERTB, a2232_vbl_inter, 0,
- "A2232 serial VBL", a2232_driver_ID);
- if (error) {
- for (i = 0; i < nr_a2232; i++)
- zorro_release_device(zd_a2232[i]);
- tty_unregister_driver(a2232_driver);
- put_tty_driver(a2232_driver);
- }
- return error;
-}
-
-static void __exit a2232board_exit(void)
-{
- int i;
-
- for (i = 0; i < nr_a2232; i++) {
- zorro_release_device(zd_a2232[i]);
- }
-
- tty_unregister_driver(a2232_driver);
- put_tty_driver(a2232_driver);
- free_irq(IRQ_AMIGA_VERTB, a2232_driver_ID);
-}
-
-module_init(a2232board_init);
-module_exit(a2232board_exit);
-
-MODULE_AUTHOR("Enver Haase");
-MODULE_DESCRIPTION("Amiga A2232 multi-serial board driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/generic_serial/ser_a2232.h b/drivers/staging/generic_serial/ser_a2232.h
deleted file mode 100644
index bc09eb9e118..00000000000
--- a/drivers/staging/generic_serial/ser_a2232.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/* drivers/char/ser_a2232.h */
-
-/* $Id: ser_a2232.h,v 0.4 2000/01/25 12:00:00 ehaase Exp $ */
-
-/* Linux serial driver for the Amiga A2232 board */
-
-/* This driver is MAINTAINED. Before applying any changes, please contact
- * the author.
- */
-
-/* Copyright (c) 2000-2001 Enver Haase <ehaase@inf.fu-berlin.de>
- * alias The A2232 driver project <A2232@gmx.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#ifndef _SER_A2232_H_
-#define _SER_A2232_H_
-
-/*
- How many boards are to be supported at maximum;
- "up to five A2232 Multiport Serial Cards may be installed in a
- single Amiga 2000" states the A2232 User's Guide. If you have
- more slots available, you might want to change the value below.
-*/
-#define MAX_A2232_BOARDS 5
-
-#ifndef A2232_NORMAL_MAJOR
-/* This allows overriding on the compiler commandline, or in a "major.h"
- include or something like that */
-#define A2232_NORMAL_MAJOR 224 /* /dev/ttyY* */
-#define A2232_CALLOUT_MAJOR 225 /* /dev/cuy* */
-#endif
-
-/* Some magic is always good - Who knows :) */
-#define A2232_MAGIC 0x000a2232
-
-/* A2232 port structure to keep track of the
- status of every single line used */
-struct a2232_port{
- struct gs_port gs;
- unsigned int which_a2232;
- unsigned int which_port_on_a2232;
- short disable_rx;
- short throttle_input;
- short cd_status;
-};
-
-#define NUMLINES 7 /* number of lines per board */
-#define A2232_IOBUFLEN 256 /* number of bytes per buffer */
-#define A2232_IOBUFLENMASK 0xff /* mask for maximum number of bytes */
-
-
-#define A2232_UNKNOWN 0 /* crystal not known */
-#define A2232_NORMAL 1 /* normal A2232 (1.8432 MHz oscillator) */
-#define A2232_TURBO 2 /* turbo A2232 (3.6864 MHz oscillator) */
-
-
-struct a2232common {
- char Crystal; /* normal (1) or turbo (2) board? */
- u_char Pad_a;
- u_char TimerH; /* timer value after speed check */
- u_char TimerL;
- u_char CDHead; /* head pointer for CD message queue */
- u_char CDTail; /* tail pointer for CD message queue */
- u_char CDStatus;
- u_char Pad_b;
-};
-
-struct a2232status {
- u_char InHead; /* input queue head */
- u_char InTail; /* input queue tail */
- u_char OutDisable; /* disables output */
- u_char OutHead; /* output queue head */
- u_char OutTail; /* output queue tail */
- u_char OutCtrl; /* soft flow control character to send */
- u_char OutFlush; /* flushes output buffer */
- u_char Setup; /* causes reconfiguration */
- u_char Param; /* parameter byte - see A2232PARAM */
- u_char Command; /* command byte - see A2232CMD */
- u_char SoftFlow; /* enables xon/xoff flow control */
- /* private 65EC02 fields: */
- u_char XonOff; /* stores XON/XOFF enable/disable */
-};
-
-#define A2232_MEMPAD1 \
- (0x0200 - NUMLINES * sizeof(struct a2232status) - \
- sizeof(struct a2232common))
-#define A2232_MEMPAD2 (0x2000 - NUMLINES * A2232_IOBUFLEN - A2232_IOBUFLEN)
-
-struct a2232memory {
- struct a2232status Status[NUMLINES]; /* 0x0000-0x006f status areas */
- struct a2232common Common; /* 0x0070-0x0077 common flags */
- u_char Dummy1[A2232_MEMPAD1]; /* 0x00XX-0x01ff */
- u_char OutBuf[NUMLINES][A2232_IOBUFLEN];/* 0x0200-0x08ff output bufs */
- u_char InBuf[NUMLINES][A2232_IOBUFLEN]; /* 0x0900-0x0fff input bufs */
- u_char InCtl[NUMLINES][A2232_IOBUFLEN]; /* 0x1000-0x16ff control data */
- u_char CDBuf[A2232_IOBUFLEN]; /* 0x1700-0x17ff CD event buffer */
- u_char Dummy2[A2232_MEMPAD2]; /* 0x1800-0x2fff */
- u_char Code[0x1000]; /* 0x3000-0x3fff code area */
- u_short InterruptAck; /* 0x4000 intr ack */
- u_char Dummy3[0x3ffe]; /* 0x4002-0x7fff */
- u_short Enable6502Reset; /* 0x8000 Stop board, */
- /* 6502 RESET line held low */
- u_char Dummy4[0x3ffe]; /* 0x8002-0xbfff */
- u_short ResetBoard; /* 0xc000 reset board & run, */
- /* 6502 RESET line held high */
-};
-
-#undef A2232_MEMPAD1
-#undef A2232_MEMPAD2
-
-#define A2232INCTL_CHAR 0 /* corresponding byte in InBuf is a character */
-#define A2232INCTL_EVENT 1 /* corresponding byte in InBuf is an event */
-
-#define A2232EVENT_Break 1 /* break set */
-#define A2232EVENT_CarrierOn 2 /* carrier raised */
-#define A2232EVENT_CarrierOff 3 /* carrier dropped */
-#define A2232EVENT_Sync 4 /* don't know, defined in 2232.ax */
-
-#define A2232CMD_Enable 0x1 /* enable/DTR bit */
-#define A2232CMD_Close 0x2 /* close the device */
-#define A2232CMD_Open 0xb /* open the device */
-#define A2232CMD_CMask 0xf /* command mask */
-#define A2232CMD_RTSOff 0x0 /* turn off RTS */
-#define A2232CMD_RTSOn 0x8 /* turn on RTS */
-#define A2232CMD_Break 0xd /* transmit a break */
-#define A2232CMD_RTSMask 0xc /* mask for RTS stuff */
-#define A2232CMD_NoParity 0x00 /* don't use parity */
-#define A2232CMD_OddParity 0x20 /* odd parity */
-#define A2232CMD_EvenParity 0x60 /* even parity */
-#define A2232CMD_ParityMask 0xe0 /* parity mask */
-
-#define A2232PARAM_B115200 0x0 /* baud rates */
-#define A2232PARAM_B50 0x1
-#define A2232PARAM_B75 0x2
-#define A2232PARAM_B110 0x3
-#define A2232PARAM_B134 0x4
-#define A2232PARAM_B150 0x5
-#define A2232PARAM_B300 0x6
-#define A2232PARAM_B600 0x7
-#define A2232PARAM_B1200 0x8
-#define A2232PARAM_B1800 0x9
-#define A2232PARAM_B2400 0xa
-#define A2232PARAM_B3600 0xb
-#define A2232PARAM_B4800 0xc
-#define A2232PARAM_B7200 0xd
-#define A2232PARAM_B9600 0xe
-#define A2232PARAM_B19200 0xf
-#define A2232PARAM_BaudMask 0xf /* baud rate mask */
-#define A2232PARAM_RcvBaud 0x10 /* enable receive baud rate */
-#define A2232PARAM_8Bit 0x00 /* numbers of bits */
-#define A2232PARAM_7Bit 0x20
-#define A2232PARAM_6Bit 0x40
-#define A2232PARAM_5Bit 0x60
-#define A2232PARAM_BitMask 0x60 /* numbers of bits mask */
-
-
-/* Standard speeds tables, -1 means unavailable, -2 means 0 baud: switch off line */
-#define A2232_BAUD_TABLE_NOAVAIL -1
-#define A2232_BAUD_TABLE_NUM_RATES (18)
-static int a2232_baud_table[A2232_BAUD_TABLE_NUM_RATES*3] = {
- //Baud //Normal //Turbo
- 50, A2232PARAM_B50, A2232_BAUD_TABLE_NOAVAIL,
- 75, A2232PARAM_B75, A2232_BAUD_TABLE_NOAVAIL,
- 110, A2232PARAM_B110, A2232_BAUD_TABLE_NOAVAIL,
- 134, A2232PARAM_B134, A2232_BAUD_TABLE_NOAVAIL,
- 150, A2232PARAM_B150, A2232PARAM_B75,
- 200, A2232_BAUD_TABLE_NOAVAIL, A2232_BAUD_TABLE_NOAVAIL,
- 300, A2232PARAM_B300, A2232PARAM_B150,
- 600, A2232PARAM_B600, A2232PARAM_B300,
- 1200, A2232PARAM_B1200, A2232PARAM_B600,
- 1800, A2232PARAM_B1800, A2232_BAUD_TABLE_NOAVAIL,
- 2400, A2232PARAM_B2400, A2232PARAM_B1200,
- 4800, A2232PARAM_B4800, A2232PARAM_B2400,
- 9600, A2232PARAM_B9600, A2232PARAM_B4800,
- 19200, A2232PARAM_B19200, A2232PARAM_B9600,
- 38400, A2232_BAUD_TABLE_NOAVAIL, A2232PARAM_B19200,
- 57600, A2232_BAUD_TABLE_NOAVAIL, A2232_BAUD_TABLE_NOAVAIL,
-#ifdef A2232_SPEEDHACK
- 115200, A2232PARAM_B115200, A2232_BAUD_TABLE_NOAVAIL,
- 230400, A2232_BAUD_TABLE_NOAVAIL, A2232PARAM_B115200
-#else
- 115200, A2232_BAUD_TABLE_NOAVAIL, A2232_BAUD_TABLE_NOAVAIL,
- 230400, A2232_BAUD_TABLE_NOAVAIL, A2232_BAUD_TABLE_NOAVAIL
-#endif
-};
-#endif
diff --git a/drivers/staging/generic_serial/ser_a2232fw.ax b/drivers/staging/generic_serial/ser_a2232fw.ax
deleted file mode 100644
index 73643803276..00000000000
--- a/drivers/staging/generic_serial/ser_a2232fw.ax
+++ /dev/null
@@ -1,529 +0,0 @@
-;.lib "axm"
-;
-;begin
-;title "A2232 serial board driver"
-;
-;set modules "2232"
-;set executable "2232.bin"
-;
-;;;;set nolink
-;
-;set temporary directory "t:"
-;
-;set assembly options "-m6502 -l60:t:list"
-;set link options "bin"; loadadr"
-;;;bin2c 2232.bin msc6502.h msc6502code
-;end
-;
-;
-; ### Commodore A2232 serial board driver for NetBSD by JM v1.3 ###
-;
-; - Created 950501 by JM -
-;
-;
-; Serial board driver software.
-;
-;
-% Copyright (c) 1995 Jukka Marin <jmarin@jmp.fi>.
-% All rights reserved.
-%
-% Redistribution and use in source and binary forms, with or without
-% modification, are permitted provided that the following conditions
-% are met:
-% 1. Redistributions of source code must retain the above copyright
-% notice, and the entire permission notice in its entirety,
-% including the disclaimer of warranties.
-% 2. Redistributions in binary form must reproduce the above copyright
-% notice, this list of conditions and the following disclaimer in the
-% documentation and/or other materials provided with the distribution.
-% 3. The name of the author may not be used to endorse or promote
-% products derived from this software without specific prior
-% written permission.
-%
-% ALTERNATIVELY, this product may be distributed under the terms of
-% the GNU General Public License, in which case the provisions of the
-% GPL are required INSTEAD OF the above restrictions. (This clause is
-% necessary due to a potential bad interaction between the GPL and
-% the restrictions contained in a BSD-style copyright.)
-%
-% THIS SOFTWARE IS PROVIDED `AS IS'' AND ANY EXPRESS OR IMPLIED
-% WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-% OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-% DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
-% INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-% (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-% HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-% STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-% ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-% OF THE POSSIBILITY OF SUCH DAMAGE.
-;
-;
-; Bugs:
-;
-; - Can't send a break yet
-;
-;
-;
-; Edited:
-;
-; - 950501 by JM -> v0.1 - Created this file.
-; - 951029 by JM -> v1.3 - Carrier Detect events now queued in a separate
-; queue.
-;
-;
-
-
-CODE equ $3800 ; start address for program code
-
-
-CTL_CHAR equ $00 ; byte in ibuf is a character
-CTL_EVENT equ $01 ; byte in ibuf is an event
-
-EVENT_BREAK equ $01
-EVENT_CDON equ $02
-EVENT_CDOFF equ $03
-EVENT_SYNC equ $04
-
-XON equ $11
-XOFF equ $13
-
-
-VARBASE macro *starting_address ; was VARINIT
-_varbase set \1
- endm
-
-VARDEF macro *name space_needs
-\1 equ _varbase
-_varbase set _varbase+\2
- endm
-
-
-stz macro * address
- db $64,\1
- endm
-
-stzax macro * address
- db $9e,<\1,>\1
- endm
-
-
-biti macro * immediate value
- db $89,\1
- endm
-
-smb0 macro * address
- db $87,\1
- endm
-smb1 macro * address
- db $97,\1
- endm
-smb2 macro * address
- db $a7,\1
- endm
-smb3 macro * address
- db $b7,\1
- endm
-smb4 macro * address
- db $c7,\1
- endm
-smb5 macro * address
- db $d7,\1
- endm
-smb6 macro * address
- db $e7,\1
- endm
-smb7 macro * address
- db $f7,\1
- endm
-
-
-
-;-----------------------------------------------------------------------;
-; ;
-; stuff common for all ports, non-critical (run once / loop) ;
-; ;
-DO_SLOW macro * port_number ;
- .local ; ;
- lda CIA+C_PA ; check all CD inputs ;
- cmp CommonCDo ; changed from previous accptd? ;
- beq =over ; nope, do nothing else here ;
- ; ;
- cmp CommonCDb ; bouncing? ;
- beq =nobounce ; nope -> ;
- ; ;
- sta CommonCDb ; save current state ;
- lda #64 ; reinitialize counter ;
- sta CommonCDc ; ;
- jmp =over ; skip CD save ;
- ; ;
-=nobounce dec CommonCDc ; no, decrement bounce counter ;
- bpl =over ; not done yet, so skip CD save ;
- ; ;
-=saveCD ldx CDHead ; get write index ;
- sta cdbuf,x ; save status in buffer ;
- inx ; ;
- cpx CDTail ; buffer full? ;
- .if ne ; no: preserve status: ;
- stx CDHead ; update index in RAM ;
- sta CommonCDo ; save state for the next check ;
- .end ; ;
-=over .end local ;
- endm ;
- ;
-;-----------------------------------------------------------------------;
-
-
-; port specific stuff (no data transfer)
-
-DO_PORT macro * port_number
- .local ; ;
- lda SetUp\1 ; reconfiguration request? ;
- .if ne ; yes: ;
- lda SoftFlow\1 ; get XON/XOFF flag ;
- sta XonOff\1 ; save it ;
- lda Param\1 ; get parameter ;
- ora #%00010000 ; use baud generator for Rx ;
- sta ACIA\1+A_CTRL ; store in control register ;
- stz OutDisable\1 ; enable transmit output ;
- stz SetUp\1 ; no reconfiguration no more ;
- .end ; ;
- ; ;
- lda InHead\1 ; get write index ;
- sbc InTail\1 ; buffer full soon? ;
- cmp #200 ; 200 chars or more in buffer? ;
- lda Command\1 ; get Command reg value ;
- and #%11110011 ; turn RTS OFF by default ;
- .if cc ; still room in buffer: ;
- ora #%00001000 ; turn RTS ON ;
- .end ; ;
- sta ACIA\1+A_CMD ; set/clear RTS ;
- ; ;
- lda OutFlush\1 ; request to flush output buffer;
- .if ne ; yessh! ;
- lda OutHead\1 ; get head ;
- sta OutTail\1 ; save as tail ;
- stz OutDisable\1 ; enable transmit output ;
- stz OutFlush\1 ; clear request ;
- .end
- .end local
- endm
-
-
-DO_DATA macro * port number
- .local
- lda ACIA\1+A_SR ; read ACIA status register ;
- biti [1<<3] ; something received? ;
- .if ne ; yes: ;
- biti [1<<1] ; framing error? ;
- .if ne ; yes: ;
- lda ACIA\1+A_DATA ; read received character ;
- bne =SEND ; not break -> ignore it ;
- ldx InHead\1 ; get write pointer ;
- lda #CTL_EVENT ; get type of byte ;
- sta ictl\1,x ; save it in InCtl buffer ;
- lda #EVENT_BREAK ; event code ;
- sta ibuf\1,x ; save it as well ;
- inx ; ;
- cpx InTail\1 ; still room in buffer? ;
- .if ne ; absolutely: ;
- stx InHead\1 ; update index in memory ;
- .end ; ;
- jmp =SEND ; go check if anything to send ;
- .end ; ;
- ; normal char received: ;
- ldx InHead\1 ; get write index ;
- lda ACIA\1+A_DATA ; read received character ;
- sta ibuf\1,x ; save char in buffer ;
- stzax ictl\1 ; set type to CTL_CHAR ;
- inx ; ;
- cpx InTail\1 ; buffer full? ;
- .if ne ; no: preserve character: ;
- stx InHead\1 ; update index in RAM ;
- .end ; ;
- and #$7f ; mask off parity if any ;
- cmp #XOFF ; XOFF from remote host? ;
- .if eq ; yes: ;
- lda XonOff\1 ; if XON/XOFF handshaking.. ;
- sta OutDisable\1 ; ..disable transmitter ;
- .end ; ;
- .end ; ;
- ; ;
- ; BUFFER FULL CHECK WAS HERE ;
- ; ;
-=SEND lda ACIA\1+A_SR ; transmit register empty? ;
- and #[1<<4] ; ;
- .if ne ; yes: ;
- ldx OutCtrl\1 ; sending out XON/XOFF? ;
- .if ne ; yes: ;
- lda CIA+C_PB ; check CTS signal ;
- and #[1<<\1] ; (for this port only) ;
- bne =DONE ; not allowed to send -> done ;
- stx ACIA\1+A_DATA ; transmit control char ;
- stz OutCtrl\1 ; clear flag ;
- jmp =DONE ; and we're done ;
- .end ; ;
- ; ;
- ldx OutTail\1 ; anything to transmit? ;
- cpx OutHead\1 ; ;
- .if ne ; yes: ;
- lda OutDisable\1 ; allowed to transmit? ;
- .if eq ; yes: ;
- lda CIA+C_PB ; check CTS signal ;
- and #[1<<\1] ; (for this port only) ;
- bne =DONE ; not allowed to send -> done ;
- lda obuf\1,x ; get a char from buffer ;
- sta ACIA\1+A_DATA ; send it away ;
- inc OutTail\1 ; update read index ;
- .end ; ;
- .end ; ;
- .end ; ;
-=DONE .end local
- endm
-
-
-
-PORTVAR macro * port number
- VARDEF InHead\1 1
- VARDEF InTail\1 1
- VARDEF OutDisable\1 1
- VARDEF OutHead\1 1
- VARDEF OutTail\1 1
- VARDEF OutCtrl\1 1
- VARDEF OutFlush\1 1
- VARDEF SetUp\1 1
- VARDEF Param\1 1
- VARDEF Command\1 1
- VARDEF SoftFlow\1 1
- ; private:
- VARDEF XonOff\1 1
- endm
-
-
- VARBASE 0 ; start variables at address $0000
- PORTVAR 0 ; define variables for port 0
- PORTVAR 1 ; define variables for port 1
- PORTVAR 2 ; define variables for port 2
- PORTVAR 3 ; define variables for port 3
- PORTVAR 4 ; define variables for port 4
- PORTVAR 5 ; define variables for port 5
- PORTVAR 6 ; define variables for port 6
-
-
-
- VARDEF Crystal 1 ; 0 = unknown, 1 = normal, 2 = turbo
- VARDEF Pad_a 1
- VARDEF TimerH 1
- VARDEF TimerL 1
- VARDEF CDHead 1
- VARDEF CDTail 1
- VARDEF CDStatus 1
- VARDEF Pad_b 1
-
- VARDEF CommonCDo 1 ; for carrier detect optimization
- VARDEF CommonCDc 1 ; for carrier detect debouncing
- VARDEF CommonCDb 1 ; for carrier detect debouncing
-
-
- VARBASE $0200
- VARDEF obuf0 256 ; output data (characters only)
- VARDEF obuf1 256
- VARDEF obuf2 256
- VARDEF obuf3 256
- VARDEF obuf4 256
- VARDEF obuf5 256
- VARDEF obuf6 256
-
- VARDEF ibuf0 256 ; input data (characters, events etc - see ictl)
- VARDEF ibuf1 256
- VARDEF ibuf2 256
- VARDEF ibuf3 256
- VARDEF ibuf4 256
- VARDEF ibuf5 256
- VARDEF ibuf6 256
-
- VARDEF ictl0 256 ; input control information (type of data in ibuf)
- VARDEF ictl1 256
- VARDEF ictl2 256
- VARDEF ictl3 256
- VARDEF ictl4 256
- VARDEF ictl5 256
- VARDEF ictl6 256
-
- VARDEF cdbuf 256 ; CD event queue
-
-
-ACIA0 equ $4400
-ACIA1 equ $4c00
-ACIA2 equ $5400
-ACIA3 equ $5c00
-ACIA4 equ $6400
-ACIA5 equ $6c00
-ACIA6 equ $7400
-
-A_DATA equ $00
-A_SR equ $02
-A_CMD equ $04
-A_CTRL equ $06
-; 00 write transmit data read received data
-; 02 reset ACIA read status register
-; 04 write command register read command register
-; 06 write control register read control register
-
-CIA equ $7c00 ; 8520 CIA
-C_PA equ $00 ; port A data register
-C_PB equ $02 ; port B data register
-C_DDRA equ $04 ; data direction register for port A
-C_DDRB equ $06 ; data direction register for port B
-C_TAL equ $08 ; timer A
-C_TAH equ $0a
-C_TBL equ $0c ; timer B
-C_TBH equ $0e
-C_TODL equ $10 ; TOD LSB
-C_TODM equ $12 ; TOD middle byte
-C_TODH equ $14 ; TOD MSB
-C_DATA equ $18 ; serial data register
-C_INTCTRL equ $1a ; interrupt control register
-C_CTRLA equ $1c ; control register A
-C_CTRLB equ $1e ; control register B
-
-
-
-
-
- section main,code,CODE-2
-
- db >CODE,<CODE
-
-;-----------------------------------------------------------------------;
-; here's the initialization code: ;
-; ;
-R_RESET ldx #$ff ;
- txs ; initialize stack pointer ;
- cld ; in case a 6502 is used... ;
- ldx #0 ; ;
- lda #0 ; ;
- ldy #Crystal ; this many bytes to clear ;
-clr_loop sta 0,x ; clear zero page variables ;
- inx ; ;
- dey ; ;
- bne clr_loop ; ;
- ; ;
- stz CommonCDo ; force CD test at boot ;
- stz CommonCDb ; ;
- stz CDHead ; clear queue ;
- stz CDTail ; ;
- ; ;
- lda #0 ; ;
- sta Pad_a ; ;
- lda #170 ; test cmp ;
- cmp #100 ; ;
- .if cs ; ;
- inc Pad_a ; C was set ;
- .end ; ;
- ;
-;-----------------------------------------------------------------------;
-; Speed check ;
-;-----------------------------------------------------------------------;
- ;
- lda Crystal ; speed already set? ;
- beq DoSpeedy ; ;
- jmp LOOP ; yes, skip speed test ;
- ; ;
-DoSpeedy lda #%10011000 ; 8N1, 1200/2400 bps ;
- sta ACIA0+A_CTRL ; ;
- lda #%00001011 ; enable DTR ;
- sta ACIA0+A_CMD ; ;
- lda ACIA0+A_SR ; read status register ;
- ; ;
- lda #%10000000 ; disable all ints (unnecessary);
- sta CIA+C_INTCTRL ; ;
- lda #255 ; program the timer ;
- sta CIA+C_TAL ; ;
- sta CIA+C_TAH ; ;
- ; ;
- ldx #0 ; ;
- stx ACIA0+A_DATA ; transmit a zero ;
- nop ; ;
- nop ; ;
- lda ACIA0+A_SR ; read status ;
- nop ; ;
- nop ; ;
- stx ACIA0+A_DATA ; transmit a zero ;
-Speedy1 lda ACIA0+A_SR ; read status ;
- and #[1<<4] ; transmit data reg empty? ;
- beq Speedy1 ; not yet, wait more ;
- ; ;
- lda #%00010001 ; load & start the timer ;
- stx ACIA0+A_DATA ; transmit one more zero ;
- sta CIA+C_CTRLA ; ;
-Speedy2 lda ACIA0+A_SR ; read status ;
- and #[1<<4] ; transmit data reg empty? ;
- beq Speedy2 ; not yet, wait more ;
- stx CIA+C_CTRLA ; stop the timer ;
- ; ;
- lda CIA+C_TAL ; copy timer value for 68k ;
- sta TimerL ; ;
- lda CIA+C_TAH ; ;
- sta TimerH ; ;
- cmp #$d0 ; turbo or normal? ;
- .if cs ; ;
- lda #2 ; turbo! :-) ;
- .else ; ;
- lda #1 ; normal :-( ;
- .end ; ;
- sta Crystal ; ;
- lda #0 ; ;
- sta ACIA0+A_SR ; ;
- sta ACIA0+A_CTRL ; reset UART ;
- sta ACIA0+A_CMD ; ;
- ;
- jmp LOOP ;
- ;
-; ;
-;-----------------------------------------------------------------------;
-; ;
-; The Real Thing: ;
-; ;
-LOOP DO_SLOW ; do non-critical things ;
- jsr do_input ; check for received data
- DO_PORT 0
- jsr do_input
- DO_PORT 1
- jsr do_input
- DO_PORT 2
- jsr do_input
- DO_PORT 3
- jsr do_input
- DO_PORT 4
- jsr do_input
- DO_PORT 5
- jsr do_input
- DO_PORT 6
- jsr do_input
- jmp LOOP
-
-
-do_input DO_DATA 0
- DO_DATA 1
- DO_DATA 2
- DO_DATA 3
- DO_DATA 4
- DO_DATA 5
- DO_DATA 6
- rts
-
-
-;-----------------------------------------------------------------------;
- section vectors,data,$3ffa
- dw $d0d0
- dw R_RESET
- dw $c0ce
-;-----------------------------------------------------------------------;
-
-
-
- end
-
-
-
diff --git a/drivers/staging/generic_serial/ser_a2232fw.h b/drivers/staging/generic_serial/ser_a2232fw.h
deleted file mode 100644
index e09a30acfe5..00000000000
--- a/drivers/staging/generic_serial/ser_a2232fw.h
+++ /dev/null
@@ -1,306 +0,0 @@
-/* drivers/char/ser_a2232fw.h */
-
-/* $Id: ser_a2232fw.h,v 0.4 2000/01/25 12:00:00 ehaase Exp $ */
-
-/*
- * Copyright (c) 1995 Jukka Marin <jmarin@jmp.fi>.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, and the entire permission notice in its entirety,
- * including the disclaimer of warranties.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote
- * products derived from this software without specific prior
- * written permission.
- *
- * ALTERNATIVELY, this product may be distributed under the terms of
- * the GNU Public License, in which case the provisions of the GPL are
- * required INSTEAD OF the above restrictions. (This clause is
- * necessary due to a potential bad interaction between the GPL and
- * the restrictions contained in a BSD-style copyright.)
- *
- * THIS SOFTWARE IS PROVIDED `AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- * OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/* This is the 65EC02 code by Jukka Marin that is executed by
- the A2232's 65EC02 processor (base address: 0x3800)
- Source file: ser_a2232fw.ax
- Version: 1.3 (951029)
- Known Bugs: Cannot send a break yet
-*/
-static unsigned char a2232_65EC02code[] = {
- 0x38, 0x00, 0xA2, 0xFF, 0x9A, 0xD8, 0xA2, 0x00,
- 0xA9, 0x00, 0xA0, 0x54, 0x95, 0x00, 0xE8, 0x88,
- 0xD0, 0xFA, 0x64, 0x5C, 0x64, 0x5E, 0x64, 0x58,
- 0x64, 0x59, 0xA9, 0x00, 0x85, 0x55, 0xA9, 0xAA,
- 0xC9, 0x64, 0x90, 0x02, 0xE6, 0x55, 0xA5, 0x54,
- 0xF0, 0x03, 0x4C, 0x92, 0x38, 0xA9, 0x98, 0x8D,
- 0x06, 0x44, 0xA9, 0x0B, 0x8D, 0x04, 0x44, 0xAD,
- 0x02, 0x44, 0xA9, 0x80, 0x8D, 0x1A, 0x7C, 0xA9,
- 0xFF, 0x8D, 0x08, 0x7C, 0x8D, 0x0A, 0x7C, 0xA2,
- 0x00, 0x8E, 0x00, 0x44, 0xEA, 0xEA, 0xAD, 0x02,
- 0x44, 0xEA, 0xEA, 0x8E, 0x00, 0x44, 0xAD, 0x02,
- 0x44, 0x29, 0x10, 0xF0, 0xF9, 0xA9, 0x11, 0x8E,
- 0x00, 0x44, 0x8D, 0x1C, 0x7C, 0xAD, 0x02, 0x44,
- 0x29, 0x10, 0xF0, 0xF9, 0x8E, 0x1C, 0x7C, 0xAD,
- 0x08, 0x7C, 0x85, 0x57, 0xAD, 0x0A, 0x7C, 0x85,
- 0x56, 0xC9, 0xD0, 0x90, 0x05, 0xA9, 0x02, 0x4C,
- 0x82, 0x38, 0xA9, 0x01, 0x85, 0x54, 0xA9, 0x00,
- 0x8D, 0x02, 0x44, 0x8D, 0x06, 0x44, 0x8D, 0x04,
- 0x44, 0x4C, 0x92, 0x38, 0xAD, 0x00, 0x7C, 0xC5,
- 0x5C, 0xF0, 0x1F, 0xC5, 0x5E, 0xF0, 0x09, 0x85,
- 0x5E, 0xA9, 0x40, 0x85, 0x5D, 0x4C, 0xB8, 0x38,
- 0xC6, 0x5D, 0x10, 0x0E, 0xA6, 0x58, 0x9D, 0x00,
- 0x17, 0xE8, 0xE4, 0x59, 0xF0, 0x04, 0x86, 0x58,
- 0x85, 0x5C, 0x20, 0x23, 0x3A, 0xA5, 0x07, 0xF0,
- 0x0F, 0xA5, 0x0A, 0x85, 0x0B, 0xA5, 0x08, 0x09,
- 0x10, 0x8D, 0x06, 0x44, 0x64, 0x02, 0x64, 0x07,
- 0xA5, 0x00, 0xE5, 0x01, 0xC9, 0xC8, 0xA5, 0x09,
- 0x29, 0xF3, 0xB0, 0x02, 0x09, 0x08, 0x8D, 0x04,
- 0x44, 0xA5, 0x06, 0xF0, 0x08, 0xA5, 0x03, 0x85,
- 0x04, 0x64, 0x02, 0x64, 0x06, 0x20, 0x23, 0x3A,
- 0xA5, 0x13, 0xF0, 0x0F, 0xA5, 0x16, 0x85, 0x17,
- 0xA5, 0x14, 0x09, 0x10, 0x8D, 0x06, 0x4C, 0x64,
- 0x0E, 0x64, 0x13, 0xA5, 0x0C, 0xE5, 0x0D, 0xC9,
- 0xC8, 0xA5, 0x15, 0x29, 0xF3, 0xB0, 0x02, 0x09,
- 0x08, 0x8D, 0x04, 0x4C, 0xA5, 0x12, 0xF0, 0x08,
- 0xA5, 0x0F, 0x85, 0x10, 0x64, 0x0E, 0x64, 0x12,
- 0x20, 0x23, 0x3A, 0xA5, 0x1F, 0xF0, 0x0F, 0xA5,
- 0x22, 0x85, 0x23, 0xA5, 0x20, 0x09, 0x10, 0x8D,
- 0x06, 0x54, 0x64, 0x1A, 0x64, 0x1F, 0xA5, 0x18,
- 0xE5, 0x19, 0xC9, 0xC8, 0xA5, 0x21, 0x29, 0xF3,
- 0xB0, 0x02, 0x09, 0x08, 0x8D, 0x04, 0x54, 0xA5,
- 0x1E, 0xF0, 0x08, 0xA5, 0x1B, 0x85, 0x1C, 0x64,
- 0x1A, 0x64, 0x1E, 0x20, 0x23, 0x3A, 0xA5, 0x2B,
- 0xF0, 0x0F, 0xA5, 0x2E, 0x85, 0x2F, 0xA5, 0x2C,
- 0x09, 0x10, 0x8D, 0x06, 0x5C, 0x64, 0x26, 0x64,
- 0x2B, 0xA5, 0x24, 0xE5, 0x25, 0xC9, 0xC8, 0xA5,
- 0x2D, 0x29, 0xF3, 0xB0, 0x02, 0x09, 0x08, 0x8D,
- 0x04, 0x5C, 0xA5, 0x2A, 0xF0, 0x08, 0xA5, 0x27,
- 0x85, 0x28, 0x64, 0x26, 0x64, 0x2A, 0x20, 0x23,
- 0x3A, 0xA5, 0x37, 0xF0, 0x0F, 0xA5, 0x3A, 0x85,
- 0x3B, 0xA5, 0x38, 0x09, 0x10, 0x8D, 0x06, 0x64,
- 0x64, 0x32, 0x64, 0x37, 0xA5, 0x30, 0xE5, 0x31,
- 0xC9, 0xC8, 0xA5, 0x39, 0x29, 0xF3, 0xB0, 0x02,
- 0x09, 0x08, 0x8D, 0x04, 0x64, 0xA5, 0x36, 0xF0,
- 0x08, 0xA5, 0x33, 0x85, 0x34, 0x64, 0x32, 0x64,
- 0x36, 0x20, 0x23, 0x3A, 0xA5, 0x43, 0xF0, 0x0F,
- 0xA5, 0x46, 0x85, 0x47, 0xA5, 0x44, 0x09, 0x10,
- 0x8D, 0x06, 0x6C, 0x64, 0x3E, 0x64, 0x43, 0xA5,
- 0x3C, 0xE5, 0x3D, 0xC9, 0xC8, 0xA5, 0x45, 0x29,
- 0xF3, 0xB0, 0x02, 0x09, 0x08, 0x8D, 0x04, 0x6C,
- 0xA5, 0x42, 0xF0, 0x08, 0xA5, 0x3F, 0x85, 0x40,
- 0x64, 0x3E, 0x64, 0x42, 0x20, 0x23, 0x3A, 0xA5,
- 0x4F, 0xF0, 0x0F, 0xA5, 0x52, 0x85, 0x53, 0xA5,
- 0x50, 0x09, 0x10, 0x8D, 0x06, 0x74, 0x64, 0x4A,
- 0x64, 0x4F, 0xA5, 0x48, 0xE5, 0x49, 0xC9, 0xC8,
- 0xA5, 0x51, 0x29, 0xF3, 0xB0, 0x02, 0x09, 0x08,
- 0x8D, 0x04, 0x74, 0xA5, 0x4E, 0xF0, 0x08, 0xA5,
- 0x4B, 0x85, 0x4C, 0x64, 0x4A, 0x64, 0x4E, 0x20,
- 0x23, 0x3A, 0x4C, 0x92, 0x38, 0xAD, 0x02, 0x44,
- 0x89, 0x08, 0xF0, 0x3B, 0x89, 0x02, 0xF0, 0x1B,
- 0xAD, 0x00, 0x44, 0xD0, 0x32, 0xA6, 0x00, 0xA9,
- 0x01, 0x9D, 0x00, 0x10, 0xA9, 0x01, 0x9D, 0x00,
- 0x09, 0xE8, 0xE4, 0x01, 0xF0, 0x02, 0x86, 0x00,
- 0x4C, 0x65, 0x3A, 0xA6, 0x00, 0xAD, 0x00, 0x44,
- 0x9D, 0x00, 0x09, 0x9E, 0x00, 0x10, 0xE8, 0xE4,
- 0x01, 0xF0, 0x02, 0x86, 0x00, 0x29, 0x7F, 0xC9,
- 0x13, 0xD0, 0x04, 0xA5, 0x0B, 0x85, 0x02, 0xAD,
- 0x02, 0x44, 0x29, 0x10, 0xF0, 0x2C, 0xA6, 0x05,
- 0xF0, 0x0F, 0xAD, 0x02, 0x7C, 0x29, 0x01, 0xD0,
- 0x21, 0x8E, 0x00, 0x44, 0x64, 0x05, 0x4C, 0x98,
- 0x3A, 0xA6, 0x04, 0xE4, 0x03, 0xF0, 0x13, 0xA5,
- 0x02, 0xD0, 0x0F, 0xAD, 0x02, 0x7C, 0x29, 0x01,
- 0xD0, 0x08, 0xBD, 0x00, 0x02, 0x8D, 0x00, 0x44,
- 0xE6, 0x04, 0xAD, 0x02, 0x4C, 0x89, 0x08, 0xF0,
- 0x3B, 0x89, 0x02, 0xF0, 0x1B, 0xAD, 0x00, 0x4C,
- 0xD0, 0x32, 0xA6, 0x0C, 0xA9, 0x01, 0x9D, 0x00,
- 0x11, 0xA9, 0x01, 0x9D, 0x00, 0x0A, 0xE8, 0xE4,
- 0x0D, 0xF0, 0x02, 0x86, 0x0C, 0x4C, 0xDA, 0x3A,
- 0xA6, 0x0C, 0xAD, 0x00, 0x4C, 0x9D, 0x00, 0x0A,
- 0x9E, 0x00, 0x11, 0xE8, 0xE4, 0x0D, 0xF0, 0x02,
- 0x86, 0x0C, 0x29, 0x7F, 0xC9, 0x13, 0xD0, 0x04,
- 0xA5, 0x17, 0x85, 0x0E, 0xAD, 0x02, 0x4C, 0x29,
- 0x10, 0xF0, 0x2C, 0xA6, 0x11, 0xF0, 0x0F, 0xAD,
- 0x02, 0x7C, 0x29, 0x02, 0xD0, 0x21, 0x8E, 0x00,
- 0x4C, 0x64, 0x11, 0x4C, 0x0D, 0x3B, 0xA6, 0x10,
- 0xE4, 0x0F, 0xF0, 0x13, 0xA5, 0x0E, 0xD0, 0x0F,
- 0xAD, 0x02, 0x7C, 0x29, 0x02, 0xD0, 0x08, 0xBD,
- 0x00, 0x03, 0x8D, 0x00, 0x4C, 0xE6, 0x10, 0xAD,
- 0x02, 0x54, 0x89, 0x08, 0xF0, 0x3B, 0x89, 0x02,
- 0xF0, 0x1B, 0xAD, 0x00, 0x54, 0xD0, 0x32, 0xA6,
- 0x18, 0xA9, 0x01, 0x9D, 0x00, 0x12, 0xA9, 0x01,
- 0x9D, 0x00, 0x0B, 0xE8, 0xE4, 0x19, 0xF0, 0x02,
- 0x86, 0x18, 0x4C, 0x4F, 0x3B, 0xA6, 0x18, 0xAD,
- 0x00, 0x54, 0x9D, 0x00, 0x0B, 0x9E, 0x00, 0x12,
- 0xE8, 0xE4, 0x19, 0xF0, 0x02, 0x86, 0x18, 0x29,
- 0x7F, 0xC9, 0x13, 0xD0, 0x04, 0xA5, 0x23, 0x85,
- 0x1A, 0xAD, 0x02, 0x54, 0x29, 0x10, 0xF0, 0x2C,
- 0xA6, 0x1D, 0xF0, 0x0F, 0xAD, 0x02, 0x7C, 0x29,
- 0x04, 0xD0, 0x21, 0x8E, 0x00, 0x54, 0x64, 0x1D,
- 0x4C, 0x82, 0x3B, 0xA6, 0x1C, 0xE4, 0x1B, 0xF0,
- 0x13, 0xA5, 0x1A, 0xD0, 0x0F, 0xAD, 0x02, 0x7C,
- 0x29, 0x04, 0xD0, 0x08, 0xBD, 0x00, 0x04, 0x8D,
- 0x00, 0x54, 0xE6, 0x1C, 0xAD, 0x02, 0x5C, 0x89,
- 0x08, 0xF0, 0x3B, 0x89, 0x02, 0xF0, 0x1B, 0xAD,
- 0x00, 0x5C, 0xD0, 0x32, 0xA6, 0x24, 0xA9, 0x01,
- 0x9D, 0x00, 0x13, 0xA9, 0x01, 0x9D, 0x00, 0x0C,
- 0xE8, 0xE4, 0x25, 0xF0, 0x02, 0x86, 0x24, 0x4C,
- 0xC4, 0x3B, 0xA6, 0x24, 0xAD, 0x00, 0x5C, 0x9D,
- 0x00, 0x0C, 0x9E, 0x00, 0x13, 0xE8, 0xE4, 0x25,
- 0xF0, 0x02, 0x86, 0x24, 0x29, 0x7F, 0xC9, 0x13,
- 0xD0, 0x04, 0xA5, 0x2F, 0x85, 0x26, 0xAD, 0x02,
- 0x5C, 0x29, 0x10, 0xF0, 0x2C, 0xA6, 0x29, 0xF0,
- 0x0F, 0xAD, 0x02, 0x7C, 0x29, 0x08, 0xD0, 0x21,
- 0x8E, 0x00, 0x5C, 0x64, 0x29, 0x4C, 0xF7, 0x3B,
- 0xA6, 0x28, 0xE4, 0x27, 0xF0, 0x13, 0xA5, 0x26,
- 0xD0, 0x0F, 0xAD, 0x02, 0x7C, 0x29, 0x08, 0xD0,
- 0x08, 0xBD, 0x00, 0x05, 0x8D, 0x00, 0x5C, 0xE6,
- 0x28, 0xAD, 0x02, 0x64, 0x89, 0x08, 0xF0, 0x3B,
- 0x89, 0x02, 0xF0, 0x1B, 0xAD, 0x00, 0x64, 0xD0,
- 0x32, 0xA6, 0x30, 0xA9, 0x01, 0x9D, 0x00, 0x14,
- 0xA9, 0x01, 0x9D, 0x00, 0x0D, 0xE8, 0xE4, 0x31,
- 0xF0, 0x02, 0x86, 0x30, 0x4C, 0x39, 0x3C, 0xA6,
- 0x30, 0xAD, 0x00, 0x64, 0x9D, 0x00, 0x0D, 0x9E,
- 0x00, 0x14, 0xE8, 0xE4, 0x31, 0xF0, 0x02, 0x86,
- 0x30, 0x29, 0x7F, 0xC9, 0x13, 0xD0, 0x04, 0xA5,
- 0x3B, 0x85, 0x32, 0xAD, 0x02, 0x64, 0x29, 0x10,
- 0xF0, 0x2C, 0xA6, 0x35, 0xF0, 0x0F, 0xAD, 0x02,
- 0x7C, 0x29, 0x10, 0xD0, 0x21, 0x8E, 0x00, 0x64,
- 0x64, 0x35, 0x4C, 0x6C, 0x3C, 0xA6, 0x34, 0xE4,
- 0x33, 0xF0, 0x13, 0xA5, 0x32, 0xD0, 0x0F, 0xAD,
- 0x02, 0x7C, 0x29, 0x10, 0xD0, 0x08, 0xBD, 0x00,
- 0x06, 0x8D, 0x00, 0x64, 0xE6, 0x34, 0xAD, 0x02,
- 0x6C, 0x89, 0x08, 0xF0, 0x3B, 0x89, 0x02, 0xF0,
- 0x1B, 0xAD, 0x00, 0x6C, 0xD0, 0x32, 0xA6, 0x3C,
- 0xA9, 0x01, 0x9D, 0x00, 0x15, 0xA9, 0x01, 0x9D,
- 0x00, 0x0E, 0xE8, 0xE4, 0x3D, 0xF0, 0x02, 0x86,
- 0x3C, 0x4C, 0xAE, 0x3C, 0xA6, 0x3C, 0xAD, 0x00,
- 0x6C, 0x9D, 0x00, 0x0E, 0x9E, 0x00, 0x15, 0xE8,
- 0xE4, 0x3D, 0xF0, 0x02, 0x86, 0x3C, 0x29, 0x7F,
- 0xC9, 0x13, 0xD0, 0x04, 0xA5, 0x47, 0x85, 0x3E,
- 0xAD, 0x02, 0x6C, 0x29, 0x10, 0xF0, 0x2C, 0xA6,
- 0x41, 0xF0, 0x0F, 0xAD, 0x02, 0x7C, 0x29, 0x20,
- 0xD0, 0x21, 0x8E, 0x00, 0x6C, 0x64, 0x41, 0x4C,
- 0xE1, 0x3C, 0xA6, 0x40, 0xE4, 0x3F, 0xF0, 0x13,
- 0xA5, 0x3E, 0xD0, 0x0F, 0xAD, 0x02, 0x7C, 0x29,
- 0x20, 0xD0, 0x08, 0xBD, 0x00, 0x07, 0x8D, 0x00,
- 0x6C, 0xE6, 0x40, 0xAD, 0x02, 0x74, 0x89, 0x08,
- 0xF0, 0x3B, 0x89, 0x02, 0xF0, 0x1B, 0xAD, 0x00,
- 0x74, 0xD0, 0x32, 0xA6, 0x48, 0xA9, 0x01, 0x9D,
- 0x00, 0x16, 0xA9, 0x01, 0x9D, 0x00, 0x0F, 0xE8,
- 0xE4, 0x49, 0xF0, 0x02, 0x86, 0x48, 0x4C, 0x23,
- 0x3D, 0xA6, 0x48, 0xAD, 0x00, 0x74, 0x9D, 0x00,
- 0x0F, 0x9E, 0x00, 0x16, 0xE8, 0xE4, 0x49, 0xF0,
- 0x02, 0x86, 0x48, 0x29, 0x7F, 0xC9, 0x13, 0xD0,
- 0x04, 0xA5, 0x53, 0x85, 0x4A, 0xAD, 0x02, 0x74,
- 0x29, 0x10, 0xF0, 0x2C, 0xA6, 0x4D, 0xF0, 0x0F,
- 0xAD, 0x02, 0x7C, 0x29, 0x40, 0xD0, 0x21, 0x8E,
- 0x00, 0x74, 0x64, 0x4D, 0x4C, 0x56, 0x3D, 0xA6,
- 0x4C, 0xE4, 0x4B, 0xF0, 0x13, 0xA5, 0x4A, 0xD0,
- 0x0F, 0xAD, 0x02, 0x7C, 0x29, 0x40, 0xD0, 0x08,
- 0xBD, 0x00, 0x08, 0x8D, 0x00, 0x74, 0xE6, 0x4C,
- 0x60, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xD0, 0xD0, 0x00, 0x38,
- 0xCE, 0xC0,
-};
diff --git a/drivers/staging/generic_serial/sx.c b/drivers/staging/generic_serial/sx.c
deleted file mode 100644
index 4f94aaffbe8..00000000000
--- a/drivers/staging/generic_serial/sx.c
+++ /dev/null
@@ -1,2894 +0,0 @@
-/* sx.c -- driver for the Specialix SX series cards.
- *
- * This driver will also support the older SI, and XIO cards.
- *
- *
- * (C) 1998 - 2004 R.E.Wolff@BitWizard.nl
- *
- * Simon Allen (simonallen@cix.compulink.co.uk) wrote a previous
- * version of this driver. Some fragments may have been copied. (none
- * yet :-)
- *
- * Specialix pays for the development and support of this driver.
- * Please DO contact support@specialix.co.uk if you require
- * support. But please read the documentation (sx.txt) first.
- *
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
- * Revision history:
- * Revision 1.33 2000/03/09 10:00:00 pvdl,wolff
- * - Fixed module and port counting
- * - Fixed signal handling
- * - Fixed an Ooops
- *
- * Revision 1.32 2000/03/07 09:00:00 wolff,pvdl
- * - Fixed some sx_dprintk typos
- * - added detection for an invalid board/module configuration
- *
- * Revision 1.31 2000/03/06 12:00:00 wolff,pvdl
- * - Added support for EISA
- *
- * Revision 1.30 2000/01/21 17:43:06 wolff
- * - Added support for SX+
- *
- * Revision 1.26 1999/08/05 15:22:14 wolff
- * - Port to 2.3.x
- * - Reformatted to Linus' liking.
- *
- * Revision 1.25 1999/07/30 14:24:08 wolff
- * Had accidentally left "gs_debug" set to "-1" instead of "off" (=0).
- *
- * Revision 1.24 1999/07/28 09:41:52 wolff
- * - I noticed the remark about use-count straying in sx.txt. I checked
- * sx_open, and found a few places where that could happen. I hope it's
- * fixed now.
- *
- * Revision 1.23 1999/07/28 08:56:06 wolff
- * - Fixed crash when sx_firmware run twice.
- * - Added sx_slowpoll as a module parameter (I guess nobody really wanted
- * to change it from the default... )
- * - Fixed a stupid editing problem I introduced in 1.22.
- * - Fixed dropping characters on a termios change.
- *
- * Revision 1.22 1999/07/26 21:01:43 wolff
- * Russell Brown noticed that I had overlooked 4 out of six modem control
- * signals in sx_getsignals. Ooops.
- *
- * Revision 1.21 1999/07/23 09:11:33 wolff
- * I forgot to free dynamically allocated memory when the driver is unloaded.
- *
- * Revision 1.20 1999/07/20 06:25:26 wolff
- * The "closing wait" wasn't honoured. Thanks to James Griffiths for
- * reporting this.
- *
- * Revision 1.19 1999/07/11 08:59:59 wolff
- * Fixed an oops in close, when an open was pending. Changed the memtest
- * a bit. Should also test the board in word-mode, however my card fails the
- * memtest then. I still have to figure out what is wrong...
- *
- * Revision 1.18 1999/06/10 09:38:42 wolff
- * Changed the format of the firmware revision from %04x to %x.%02x .
- *
- * Revision 1.17 1999/06/04 09:44:35 wolff
- * fixed problem: reference to pci stuff when config_pci was off...
- * Thanks to Jorge Novo for noticing this.
- *
- * Revision 1.16 1999/06/02 08:30:15 wolff
- * added/removed the workaround for the DCD bug in the Firmware.
- * A bit more debugging code to locate that...
- *
- * Revision 1.15 1999/06/01 11:35:30 wolff
- * when DCD is left low (floating?), on TA's the firmware first tells us
- * that DCD is high, but after a short while suddenly comes to the
- * conclusion that it is low. All this would be fine, if it weren't that
- * Unix requires us to send a "hangup" signal in that case. This usually
- * all happens BEFORE the program has had a chance to ioctl the device
- * into clocal mode..
- *
- * Revision 1.14 1999/05/25 11:18:59 wolff
- * Added PCI-fix.
- * Added checks for return code of sx_sendcommand.
- * Don't issue "reconfig" if port isn't open yet. (bit us on TA modules...)
- *
- * Revision 1.13 1999/04/29 15:18:01 wolff
- * Fixed an "oops" that showed on SuSE 6.0 systems.
- * Activate DTR again after stty 0.
- *
- * Revision 1.12 1999/04/29 07:49:52 wolff
- * Improved "stty 0" handling a bit. (used to change baud to 9600 assuming
- * the connection would be dropped anyway. That is not always the case,
- * and confuses people).
- * Told the card to always monitor the modem signals.
- * Added support for dynamic gs_debug adjustments.
- * Now tells the rest of the system the number of ports.
- *
- * Revision 1.11 1999/04/24 11:11:30 wolff
- * Fixed two stupid typos in the memory test.
- *
- * Revision 1.10 1999/04/24 10:53:39 wolff
- * Added some of Christian's suggestions.
- * Fixed an HW_COOK_IN bug (ISIG was not in I_OTHER. We used to trust the
- * card to send the signal to the process.....)
- *
- * Revision 1.9 1999/04/23 07:26:38 wolff
- * Included Christian Lademann's 2.0 compile-warning fixes and interrupt
- * assignment redesign.
- * Cleanup of some other stuff.
- *
- * Revision 1.8 1999/04/16 13:05:30 wolff
- * fixed a DCD change unnoticed bug.
- *
- * Revision 1.7 1999/04/14 22:19:51 wolff
- * Fixed typo that showed up in 2.0.x builds (get_user instead of Get_user!)
- *
- * Revision 1.6 1999/04/13 18:40:20 wolff
- * changed misc-minor to 161, as assigned by HPA.
- *
- * Revision 1.5 1999/04/13 15:12:25 wolff
- * Fixed use-count leak when "hangup" occurred.
- * Added workaround for a stupid-PCIBIOS bug.
- *
- *
- * Revision 1.4 1999/04/01 22:47:40 wolff
- * Fixed < 1M linux-2.0 problem.
- * (vremap isn't compatible with ioremap in that case)
- *
- * Revision 1.3 1999/03/31 13:45:45 wolff
- * Firmware loading is now done through a separate IOCTL.
- *
- * Revision 1.2 1999/03/28 12:22:29 wolff
- * rcs cleanup
- *
- * Revision 1.1 1999/03/28 12:10:34 wolff
- * Readying for release on 2.0.x (sorry David, 1.01 becomes 1.1 for RCS).
- *
- * Revision 0.12 1999/03/28 09:20:10 wolff
- * Fixed problem in 0.11, continuing cleanup.
- *
- * Revision 0.11 1999/03/28 08:46:44 wolff
- * cleanup. Not good.
- *
- * Revision 0.10 1999/03/28 08:09:43 wolff
- * Fixed losing characters on close.
- *
- * Revision 0.9 1999/03/21 22:52:01 wolff
- * Ported back to 2.2.... (minor things)
- *
- * Revision 0.8 1999/03/21 22:40:33 wolff
- * Port to 2.0
- *
- * Revision 0.7 1999/03/21 19:06:34 wolff
- * Fixed hangup processing.
- *
- * Revision 0.6 1999/02/05 08:45:14 wolff
- * fixed real_raw problems. Inclusion into kernel imminent.
- *
- * Revision 0.5 1998/12/21 23:51:06 wolff
- * Snatched a nasty bug: sx_transmit_chars was getting re-entered, and it
- * shouldn't have. THATs why I want to have transmit interrupts even when
- * the buffer is empty.
- *
- * Revision 0.4 1998/12/17 09:34:46 wolff
- * PPP works. ioctl works. Basically works!
- *
- * Revision 0.3 1998/12/15 13:05:18 wolff
- * It works! Wow! Gotta start implementing IOCTL and stuff....
- *
- * Revision 0.2 1998/12/01 08:33:53 wolff
- * moved over to 2.1.130
- *
- * Revision 0.1 1998/11/03 21:23:51 wolff
- * Initial revision. Detects SX card.
- *
- * */
-
-#define SX_VERSION 1.33
-
-#include <linux/module.h>
-#include <linux/kdev_t.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/mm.h>
-#include <linux/serial.h>
-#include <linux/fcntl.h>
-#include <linux/major.h>
-#include <linux/delay.h>
-#include <linux/eisa.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/miscdevice.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-/* The 3.0.0 version of sxboards/sxwindow.h uses BYTE and WORD.... */
-#define BYTE u8
-#define WORD u16
-
-/* .... but the 3.0.4 version uses _u8 and _u16. */
-#define _u8 u8
-#define _u16 u16
-
-#include "sxboards.h"
-#include "sxwindow.h"
-
-#include <linux/generic_serial.h>
-#include "sx.h"
-
-/* I don't think that this driver can handle more than 256 ports on
- one machine. You'll have to increase the number of boards in sx.h
- if you want more than 4 boards. */
-
-#ifndef PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8
-#define PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8 0x2000
-#endif
-
-/* Configurable options:
- (Don't be too sure that it'll work if you toggle them) */
-
-/* Am I paranoid or not ? ;-) */
-#undef SX_PARANOIA_CHECK
-
-/* 20 -> 2000 per second. The card should rate-limit interrupts at 100
- Hz, but it is user configurable. I don't recommend going above 1000
- Hz. The interrupt ratelimit might trigger if the interrupt is
- shared with a very active other device. */
-#define IRQ_RATE_LIMIT 20
-
-/* Sharing interrupts is possible now. If the other device wants more
- than 2000 interrupts per second, we'd gracefully decline further
- interrupts. That's not what we want. On the other hand, if the
- other device interrupts 2000 times a second, don't use the SX
- interrupt. Use polling. */
-#undef IRQ_RATE_LIMIT
-
-#if 0
-/* Not implemented */
-/*
- * The following defines are mostly for testing purposes. But if you need
- * some nice reporting in your syslog, you can define them also.
- */
-#define SX_REPORT_FIFO
-#define SX_REPORT_OVERRUN
-#endif
-
-/* Function prototypes */
-static void sx_disable_tx_interrupts(void *ptr);
-static void sx_enable_tx_interrupts(void *ptr);
-static void sx_disable_rx_interrupts(void *ptr);
-static void sx_enable_rx_interrupts(void *ptr);
-static int sx_carrier_raised(struct tty_port *port);
-static void sx_shutdown_port(void *ptr);
-static int sx_set_real_termios(void *ptr);
-static void sx_close(void *ptr);
-static int sx_chars_in_buffer(void *ptr);
-static int sx_init_board(struct sx_board *board);
-static int sx_init_portstructs(int nboards, int nports);
-static long sx_fw_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
-static int sx_init_drivers(void);
-
-static struct tty_driver *sx_driver;
-
-static DEFINE_MUTEX(sx_boards_lock);
-static struct sx_board boards[SX_NBOARDS];
-static struct sx_port *sx_ports;
-static int sx_initialized;
-static int sx_nports;
-static int sx_debug;
-
-/* You can have the driver poll your card.
- - Set sx_poll to 1 to poll every timer tick (10ms on Intel).
- This is used when the card cannot use an interrupt for some reason.
-
- - set sx_slowpoll to 100 to do an extra poll once a second (on Intel). If
- the driver misses an interrupt (report this if it DOES happen to you!)
- everything will continue to work....
- */
-static int sx_poll = 1;
-static int sx_slowpoll;
-
-/* The card limits the number of interrupts per second.
- At 115k2 "100" should be sufficient.
- If you're using higher baudrates, you can increase this...
- */
-
-static int sx_maxints = 100;
-
-#ifdef CONFIG_ISA
-
-/* These are the only open spaces in my computer. Yours may have more
- or less.... -- REW
- duh: Card at 0xa0000 is possible on HP Netserver?? -- pvdl
-*/
-static int sx_probe_addrs[] = {
- 0xc0000, 0xd0000, 0xe0000,
- 0xc8000, 0xd8000, 0xe8000
-};
-static int si_probe_addrs[] = {
- 0xc0000, 0xd0000, 0xe0000,
- 0xc8000, 0xd8000, 0xe8000, 0xa0000
-};
-static int si1_probe_addrs[] = {
- 0xd0000
-};
-
-#define NR_SX_ADDRS ARRAY_SIZE(sx_probe_addrs)
-#define NR_SI_ADDRS ARRAY_SIZE(si_probe_addrs)
-#define NR_SI1_ADDRS ARRAY_SIZE(si1_probe_addrs)
-
-module_param_array(sx_probe_addrs, int, NULL, 0);
-module_param_array(si_probe_addrs, int, NULL, 0);
-#endif
-
-/* Set the mask to all-ones. This alas, only supports 32 interrupts.
- Some architectures may need more. */
-static int sx_irqmask = -1;
-
-module_param(sx_poll, int, 0);
-module_param(sx_slowpoll, int, 0);
-module_param(sx_maxints, int, 0);
-module_param(sx_debug, int, 0);
-module_param(sx_irqmask, int, 0);
-
-MODULE_LICENSE("GPL");
-
-static struct real_driver sx_real_driver = {
- sx_disable_tx_interrupts,
- sx_enable_tx_interrupts,
- sx_disable_rx_interrupts,
- sx_enable_rx_interrupts,
- sx_shutdown_port,
- sx_set_real_termios,
- sx_chars_in_buffer,
- sx_close,
-};
-
-/*
- This driver can spew a whole lot of debugging output at you. If you
- need maximum performance, you should disable the DEBUG define. To
- aid in debugging in the field, I'm leaving the compile-time debug
- features enabled, and disable them "runtime". That allows me to
- instruct people with problems to enable debugging without requiring
- them to recompile...
-*/
-#define DEBUG
-
-#ifdef DEBUG
-#define sx_dprintk(f, str...) if (sx_debug & f) printk (str)
-#else
-#define sx_dprintk(f, str...) /* nothing */
-#endif
-
-#define func_enter() sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s\n",__func__)
-#define func_exit() sx_dprintk(SX_DEBUG_FLOW, "sx: exit %s\n",__func__)
-
-#define func_enter2() sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s (port %d)\n", \
- __func__, port->line)
-
-/*
- * Firmware loader driver specific routines
- *
- */
-
-static const struct file_operations sx_fw_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = sx_fw_ioctl,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice sx_fw_device = {
- SXCTL_MISC_MINOR, "sxctl", &sx_fw_fops
-};
-
-#ifdef SX_PARANOIA_CHECK
-
-/* This doesn't work. Who's paranoid around here? Not me! */
-
-static inline int sx_paranoia_check(struct sx_port const *port,
- char *name, const char *routine)
-{
- static const char *badmagic = KERN_ERR "sx: Warning: bad sx port magic "
- "number for device %s in %s\n";
- static const char *badinfo = KERN_ERR "sx: Warning: null sx port for "
- "device %s in %s\n";
-
- if (!port) {
- printk(badinfo, name, routine);
- return 1;
- }
- if (port->magic != SX_MAGIC) {
- printk(badmagic, name, routine);
- return 1;
- }
-
- return 0;
-}
-#else
-#define sx_paranoia_check(a,b,c) 0
-#endif
-
-/* The timeouts. First try 30 times as fast as possible. Then give
- the card some time to breathe between accesses. (Otherwise the
- processor on the card might not be able to access its OWN bus... */
-
-#define TIMEOUT_1 30
-#define TIMEOUT_2 1000000
-
-#ifdef DEBUG
-static void my_hd_io(void __iomem *p, int len)
-{
- int i, j, ch;
- unsigned char __iomem *addr = p;
-
- for (i = 0; i < len; i += 16) {
- printk("%p ", addr + i);
- for (j = 0; j < 16; j++) {
- printk("%02x %s", readb(addr + j + i),
- (j == 7) ? " " : "");
- }
- for (j = 0; j < 16; j++) {
- ch = readb(addr + j + i);
- printk("%c", (ch < 0x20) ? '.' :
- ((ch > 0x7f) ? '.' : ch));
- }
- printk("\n");
- }
-}
-static void my_hd(void *p, int len)
-{
- int i, j, ch;
- unsigned char *addr = p;
-
- for (i = 0; i < len; i += 16) {
- printk("%p ", addr + i);
- for (j = 0; j < 16; j++) {
- printk("%02x %s", addr[j + i], (j == 7) ? " " : "");
- }
- for (j = 0; j < 16; j++) {
- ch = addr[j + i];
- printk("%c", (ch < 0x20) ? '.' :
- ((ch > 0x7f) ? '.' : ch));
- }
- printk("\n");
- }
-}
-#endif
-
-/* This needs redoing for Alpha -- REW -- Done. */
-
-static inline void write_sx_byte(struct sx_board *board, int offset, u8 byte)
-{
- writeb(byte, board->base + offset);
-}
-
-static inline u8 read_sx_byte(struct sx_board *board, int offset)
-{
- return readb(board->base + offset);
-}
-
-static inline void write_sx_word(struct sx_board *board, int offset, u16 word)
-{
- writew(word, board->base + offset);
-}
-
-static inline u16 read_sx_word(struct sx_board *board, int offset)
-{
- return readw(board->base + offset);
-}
-
-static int sx_busy_wait_eq(struct sx_board *board,
- int offset, int mask, int correctval)
-{
- int i;
-
- func_enter();
-
- for (i = 0; i < TIMEOUT_1; i++)
- if ((read_sx_byte(board, offset) & mask) == correctval) {
- func_exit();
- return 1;
- }
-
- for (i = 0; i < TIMEOUT_2; i++) {
- if ((read_sx_byte(board, offset) & mask) == correctval) {
- func_exit();
- return 1;
- }
- udelay(1);
- }
-
- func_exit();
- return 0;
-}
-
-static int sx_busy_wait_neq(struct sx_board *board,
- int offset, int mask, int badval)
-{
- int i;
-
- func_enter();
-
- for (i = 0; i < TIMEOUT_1; i++)
- if ((read_sx_byte(board, offset) & mask) != badval) {
- func_exit();
- return 1;
- }
-
- for (i = 0; i < TIMEOUT_2; i++) {
- if ((read_sx_byte(board, offset) & mask) != badval) {
- func_exit();
- return 1;
- }
- udelay(1);
- }
-
- func_exit();
- return 0;
-}
-
-/* 5.6.4 of 6210028 r2.3 */
-static int sx_reset(struct sx_board *board)
-{
- func_enter();
-
- if (IS_SX_BOARD(board)) {
-
- write_sx_byte(board, SX_CONFIG, 0);
- write_sx_byte(board, SX_RESET, 1); /* Value doesn't matter */
-
- if (!sx_busy_wait_eq(board, SX_RESET_STATUS, 1, 0)) {
- printk(KERN_INFO "sx: Card doesn't respond to "
- "reset...\n");
- return 0;
- }
- } else if (IS_EISA_BOARD(board)) {
- outb(board->irq << 4, board->eisa_base + 0xc02);
- } else if (IS_SI1_BOARD(board)) {
- write_sx_byte(board, SI1_ISA_RESET, 0); /*value doesn't matter*/
- } else {
- /* Gory details of the SI/ISA board */
- write_sx_byte(board, SI2_ISA_RESET, SI2_ISA_RESET_SET);
- write_sx_byte(board, SI2_ISA_IRQ11, SI2_ISA_IRQ11_CLEAR);
- write_sx_byte(board, SI2_ISA_IRQ12, SI2_ISA_IRQ12_CLEAR);
- write_sx_byte(board, SI2_ISA_IRQ15, SI2_ISA_IRQ15_CLEAR);
- write_sx_byte(board, SI2_ISA_INTCLEAR, SI2_ISA_INTCLEAR_CLEAR);
- write_sx_byte(board, SI2_ISA_IRQSET, SI2_ISA_IRQSET_CLEAR);
- }
-
- func_exit();
- return 1;
-}
-
-/* This doesn't work on machines where "NULL" isn't 0 */
-/* If you have one of those, someone will need to write
- the equivalent of this, which will amount to about 3 lines. I don't
- want to complicate this right now. -- REW
- (See, I do write comments every now and then :-) */
-#define OFFSETOF(strct, elem) ((long)&(((struct strct *)NULL)->elem))
-
-#define CHAN_OFFSET(port,elem) (port->ch_base + OFFSETOF (_SXCHANNEL, elem))
-#define MODU_OFFSET(board,addr,elem) (addr + OFFSETOF (_SXMODULE, elem))
-#define BRD_OFFSET(board,elem) (OFFSETOF (_SXCARD, elem))
-
-#define sx_write_channel_byte(port, elem, val) \
- write_sx_byte (port->board, CHAN_OFFSET (port, elem), val)
-
-#define sx_read_channel_byte(port, elem) \
- read_sx_byte (port->board, CHAN_OFFSET (port, elem))
-
-#define sx_write_channel_word(port, elem, val) \
- write_sx_word (port->board, CHAN_OFFSET (port, elem), val)
-
-#define sx_read_channel_word(port, elem) \
- read_sx_word (port->board, CHAN_OFFSET (port, elem))
-
-#define sx_write_module_byte(board, addr, elem, val) \
- write_sx_byte (board, MODU_OFFSET (board, addr, elem), val)
-
-#define sx_read_module_byte(board, addr, elem) \
- read_sx_byte (board, MODU_OFFSET (board, addr, elem))
-
-#define sx_write_module_word(board, addr, elem, val) \
- write_sx_word (board, MODU_OFFSET (board, addr, elem), val)
-
-#define sx_read_module_word(board, addr, elem) \
- read_sx_word (board, MODU_OFFSET (board, addr, elem))
-
-#define sx_write_board_byte(board, elem, val) \
- write_sx_byte (board, BRD_OFFSET (board, elem), val)
-
-#define sx_read_board_byte(board, elem) \
- read_sx_byte (board, BRD_OFFSET (board, elem))
-
-#define sx_write_board_word(board, elem, val) \
- write_sx_word (board, BRD_OFFSET (board, elem), val)
-
-#define sx_read_board_word(board, elem) \
- read_sx_word (board, BRD_OFFSET (board, elem))
-
-static int sx_start_board(struct sx_board *board)
-{
- if (IS_SX_BOARD(board)) {
- write_sx_byte(board, SX_CONFIG, SX_CONF_BUSEN);
- } else if (IS_EISA_BOARD(board)) {
- write_sx_byte(board, SI2_EISA_OFF, SI2_EISA_VAL);
- outb((board->irq << 4) | 4, board->eisa_base + 0xc02);
- } else if (IS_SI1_BOARD(board)) {
- write_sx_byte(board, SI1_ISA_RESET_CLEAR, 0);
- write_sx_byte(board, SI1_ISA_INTCL, 0);
- } else {
- /* Don't bug me about the clear_set.
- I haven't the foggiest idea what it's about -- REW */
- write_sx_byte(board, SI2_ISA_RESET, SI2_ISA_RESET_CLEAR);
- write_sx_byte(board, SI2_ISA_INTCLEAR, SI2_ISA_INTCLEAR_SET);
- }
- return 1;
-}
-
-#define SX_IRQ_REG_VAL(board) \
- ((board->flags & SX_ISA_BOARD) ? (board->irq << 4) : 0)
-
-/* Note. The SX register is write-only. Therefore, we have to enable the
- bus too. This is a no-op, if you don't mess with this driver... */
-static int sx_start_interrupts(struct sx_board *board)
-{
-
- /* Don't call this with board->irq == 0 */
-
- if (IS_SX_BOARD(board)) {
- write_sx_byte(board, SX_CONFIG, SX_IRQ_REG_VAL(board) |
- SX_CONF_BUSEN | SX_CONF_HOSTIRQ);
- } else if (IS_EISA_BOARD(board)) {
- inb(board->eisa_base + 0xc03);
- } else if (IS_SI1_BOARD(board)) {
- write_sx_byte(board, SI1_ISA_INTCL, 0);
- write_sx_byte(board, SI1_ISA_INTCL_CLEAR, 0);
- } else {
- switch (board->irq) {
- case 11:
- write_sx_byte(board, SI2_ISA_IRQ11, SI2_ISA_IRQ11_SET);
- break;
- case 12:
- write_sx_byte(board, SI2_ISA_IRQ12, SI2_ISA_IRQ12_SET);
- break;
- case 15:
- write_sx_byte(board, SI2_ISA_IRQ15, SI2_ISA_IRQ15_SET);
- break;
- default:
- printk(KERN_INFO "sx: SI/XIO card doesn't support "
- "interrupt %d.\n", board->irq);
- return 0;
- }
- write_sx_byte(board, SI2_ISA_INTCLEAR, SI2_ISA_INTCLEAR_SET);
- }
-
- return 1;
-}
-
-static int sx_send_command(struct sx_port *port,
- int command, int mask, int newstat)
-{
- func_enter2();
- write_sx_byte(port->board, CHAN_OFFSET(port, hi_hstat), command);
- func_exit();
- return sx_busy_wait_eq(port->board, CHAN_OFFSET(port, hi_hstat), mask,
- newstat);
-}
-
-static char *mod_type_s(int module_type)
-{
- switch (module_type) {
- case TA4:
- return "TA4";
- case TA8:
- return "TA8";
- case TA4_ASIC:
- return "TA4_ASIC";
- case TA8_ASIC:
- return "TA8_ASIC";
- case MTA_CD1400:
- return "MTA_CD1400";
- case SXDC:
- return "SXDC";
- default:
- return "Unknown/invalid";
- }
-}
-
-static char *pan_type_s(int pan_type)
-{
- switch (pan_type) {
- case MOD_RS232DB25:
- return "MOD_RS232DB25";
- case MOD_RS232RJ45:
- return "MOD_RS232RJ45";
- case MOD_RS422DB25:
- return "MOD_RS422DB25";
- case MOD_PARALLEL:
- return "MOD_PARALLEL";
- case MOD_2_RS232DB25:
- return "MOD_2_RS232DB25";
- case MOD_2_RS232RJ45:
- return "MOD_2_RS232RJ45";
- case MOD_2_RS422DB25:
- return "MOD_2_RS422DB25";
- case MOD_RS232DB25MALE:
- return "MOD_RS232DB25MALE";
- case MOD_2_PARALLEL:
- return "MOD_2_PARALLEL";
- case MOD_BLANK:
- return "empty";
- default:
- return "invalid";
- }
-}
-
-static int mod_compat_type(int module_type)
-{
- return module_type >> 4;
-}
-
-static void sx_reconfigure_port(struct sx_port *port)
-{
- if (sx_read_channel_byte(port, hi_hstat) == HS_IDLE_OPEN) {
- if (sx_send_command(port, HS_CONFIG, -1, HS_IDLE_OPEN) != 1) {
- printk(KERN_WARNING "sx: Sent reconfigure command, but "
- "card didn't react.\n");
- }
- } else {
- sx_dprintk(SX_DEBUG_TERMIOS, "sx: Not sending reconfigure: "
- "port isn't open (%02x).\n",
- sx_read_channel_byte(port, hi_hstat));
- }
-}
-
-static void sx_setsignals(struct sx_port *port, int dtr, int rts)
-{
- int t;
- func_enter2();
-
- t = sx_read_channel_byte(port, hi_op);
- if (dtr >= 0)
- t = dtr ? (t | OP_DTR) : (t & ~OP_DTR);
- if (rts >= 0)
- t = rts ? (t | OP_RTS) : (t & ~OP_RTS);
- sx_write_channel_byte(port, hi_op, t);
- sx_dprintk(SX_DEBUG_MODEMSIGNALS, "setsignals: %d/%d\n", dtr, rts);
-
- func_exit();
-}
-
-static int sx_getsignals(struct sx_port *port)
-{
- int i_stat, o_stat;
-
- o_stat = sx_read_channel_byte(port, hi_op);
- i_stat = sx_read_channel_byte(port, hi_ip);
-
- sx_dprintk(SX_DEBUG_MODEMSIGNALS, "getsignals: %d/%d (%d/%d) "
- "%02x/%02x\n",
- (o_stat & OP_DTR) != 0, (o_stat & OP_RTS) != 0,
- port->c_dcd, tty_port_carrier_raised(&port->gs.port),
- sx_read_channel_byte(port, hi_ip),
- sx_read_channel_byte(port, hi_state));
-
- return (((o_stat & OP_DTR) ? TIOCM_DTR : 0) |
- ((o_stat & OP_RTS) ? TIOCM_RTS : 0) |
- ((i_stat & IP_CTS) ? TIOCM_CTS : 0) |
- ((i_stat & IP_DCD) ? TIOCM_CAR : 0) |
- ((i_stat & IP_DSR) ? TIOCM_DSR : 0) |
- ((i_stat & IP_RI) ? TIOCM_RNG : 0));
-}
-
-static void sx_set_baud(struct sx_port *port)
-{
- int t;
-
- if (port->board->ta_type == MOD_SXDC) {
- switch (port->gs.baud) {
- /* Save some typing work... */
-#define e(x) case x: t = BAUD_ ## x; break
- e(50);
- e(75);
- e(110);
- e(150);
- e(200);
- e(300);
- e(600);
- e(1200);
- e(1800);
- e(2000);
- e(2400);
- e(4800);
- e(7200);
- e(9600);
- e(14400);
- e(19200);
- e(28800);
- e(38400);
- e(56000);
- e(57600);
- e(64000);
- e(76800);
- e(115200);
- e(128000);
- e(150000);
- e(230400);
- e(256000);
- e(460800);
- e(921600);
- case 134:
- t = BAUD_134_5;
- break;
- case 0:
- t = -1;
- break;
- default:
- /* Can I return "invalid"? */
- t = BAUD_9600;
- printk(KERN_INFO "sx: unsupported baud rate: %d.\n",
- port->gs.baud);
- break;
- }
-#undef e
- if (t > 0) {
-/* The baud rate is not set to 0, so we're enabeling DTR... -- REW */
- sx_setsignals(port, 1, -1);
- /* XXX This is not TA & MTA compatible */
- sx_write_channel_byte(port, hi_csr, 0xff);
-
- sx_write_channel_byte(port, hi_txbaud, t);
- sx_write_channel_byte(port, hi_rxbaud, t);
- } else {
- sx_setsignals(port, 0, -1);
- }
- } else {
- switch (port->gs.baud) {
-#define e(x) case x: t = CSR_ ## x; break
- e(75);
- e(150);
- e(300);
- e(600);
- e(1200);
- e(2400);
- e(4800);
- e(1800);
- e(9600);
- e(19200);
- e(57600);
- e(38400);
-/* TA supports 110, but not 115200, MTA supports 115200, but not 110 */
- case 110:
- if (port->board->ta_type == MOD_TA) {
- t = CSR_110;
- break;
- } else {
- t = CSR_9600;
- printk(KERN_INFO "sx: Unsupported baud rate: "
- "%d.\n", port->gs.baud);
- break;
- }
- case 115200:
- if (port->board->ta_type == MOD_TA) {
- t = CSR_9600;
- printk(KERN_INFO "sx: Unsupported baud rate: "
- "%d.\n", port->gs.baud);
- break;
- } else {
- t = CSR_110;
- break;
- }
- case 0:
- t = -1;
- break;
- default:
- t = CSR_9600;
- printk(KERN_INFO "sx: Unsupported baud rate: %d.\n",
- port->gs.baud);
- break;
- }
-#undef e
- if (t >= 0) {
- sx_setsignals(port, 1, -1);
- sx_write_channel_byte(port, hi_csr, t * 0x11);
- } else {
- sx_setsignals(port, 0, -1);
- }
- }
-}
-
-/* Simon Allen's version of this routine was 225 lines long. 85 is a lot
- better. -- REW */
-
-static int sx_set_real_termios(void *ptr)
-{
- struct sx_port *port = ptr;
-
- func_enter2();
-
- if (!port->gs.port.tty)
- return 0;
-
- /* What is this doing here? -- REW
- Ha! figured it out. It is to allow you to get DTR active again
- if you've dropped it with stty 0. Moved to set_baud, where it
- belongs (next to the drop dtr if baud == 0) -- REW */
- /* sx_setsignals (port, 1, -1); */
-
- sx_set_baud(port);
-
-#define CFLAG port->gs.port.tty->termios->c_cflag
- sx_write_channel_byte(port, hi_mr1,
- (C_PARENB(port->gs.port.tty) ? MR1_WITH : MR1_NONE) |
- (C_PARODD(port->gs.port.tty) ? MR1_ODD : MR1_EVEN) |
- (C_CRTSCTS(port->gs.port.tty) ? MR1_RTS_RXFLOW : 0) |
- (((CFLAG & CSIZE) == CS8) ? MR1_8_BITS : 0) |
- (((CFLAG & CSIZE) == CS7) ? MR1_7_BITS : 0) |
- (((CFLAG & CSIZE) == CS6) ? MR1_6_BITS : 0) |
- (((CFLAG & CSIZE) == CS5) ? MR1_5_BITS : 0));
-
- sx_write_channel_byte(port, hi_mr2,
- (C_CRTSCTS(port->gs.port.tty) ? MR2_CTS_TXFLOW : 0) |
- (C_CSTOPB(port->gs.port.tty) ? MR2_2_STOP :
- MR2_1_STOP));
-
- switch (CFLAG & CSIZE) {
- case CS8:
- sx_write_channel_byte(port, hi_mask, 0xff);
- break;
- case CS7:
- sx_write_channel_byte(port, hi_mask, 0x7f);
- break;
- case CS6:
- sx_write_channel_byte(port, hi_mask, 0x3f);
- break;
- case CS5:
- sx_write_channel_byte(port, hi_mask, 0x1f);
- break;
- default:
- printk(KERN_INFO "sx: Invalid wordsize: %u\n",
- (unsigned int)CFLAG & CSIZE);
- break;
- }
-
- sx_write_channel_byte(port, hi_prtcl,
- (I_IXON(port->gs.port.tty) ? SP_TXEN : 0) |
- (I_IXOFF(port->gs.port.tty) ? SP_RXEN : 0) |
- (I_IXANY(port->gs.port.tty) ? SP_TANY : 0) | SP_DCEN);
-
- sx_write_channel_byte(port, hi_break,
- (I_IGNBRK(port->gs.port.tty) ? BR_IGN : 0 |
- I_BRKINT(port->gs.port.tty) ? BR_INT : 0));
-
- sx_write_channel_byte(port, hi_txon, START_CHAR(port->gs.port.tty));
- sx_write_channel_byte(port, hi_rxon, START_CHAR(port->gs.port.tty));
- sx_write_channel_byte(port, hi_txoff, STOP_CHAR(port->gs.port.tty));
- sx_write_channel_byte(port, hi_rxoff, STOP_CHAR(port->gs.port.tty));
-
- sx_reconfigure_port(port);
-
- /* Tell line discipline whether we will do input cooking */
- if (I_OTHER(port->gs.port.tty)) {
- clear_bit(TTY_HW_COOK_IN, &port->gs.port.tty->flags);
- } else {
- set_bit(TTY_HW_COOK_IN, &port->gs.port.tty->flags);
- }
- sx_dprintk(SX_DEBUG_TERMIOS, "iflags: %x(%d) ",
- (unsigned int)port->gs.port.tty->termios->c_iflag,
- I_OTHER(port->gs.port.tty));
-
-/* Tell line discipline whether we will do output cooking.
- * If OPOST is set and no other output flags are set then we can do output
- * processing. Even if only *one* other flag in the O_OTHER group is set
- * we do cooking in software.
- */
- if (O_OPOST(port->gs.port.tty) && !O_OTHER(port->gs.port.tty)) {
- set_bit(TTY_HW_COOK_OUT, &port->gs.port.tty->flags);
- } else {
- clear_bit(TTY_HW_COOK_OUT, &port->gs.port.tty->flags);
- }
- sx_dprintk(SX_DEBUG_TERMIOS, "oflags: %x(%d)\n",
- (unsigned int)port->gs.port.tty->termios->c_oflag,
- O_OTHER(port->gs.port.tty));
- /* port->c_dcd = sx_get_CD (port); */
- func_exit();
- return 0;
-}
-
-/* ********************************************************************** *
- * the interrupt related routines *
- * ********************************************************************** */
-
-/* Note:
- Other drivers use the macro "MIN" to calculate how much to copy.
- This has the disadvantage that it will evaluate parts twice. That's
- expensive when it's IO (and the compiler cannot optimize those away!).
- Moreover, I'm not sure that you're race-free.
-
- I assign a value, and then only allow the value to decrease. This
- is always safe. This makes the code a few lines longer, and you
- know I'm dead against that, but I think it is required in this
- case. */
-
-static void sx_transmit_chars(struct sx_port *port)
-{
- int c;
- int tx_ip;
- int txroom;
-
- func_enter2();
- sx_dprintk(SX_DEBUG_TRANSMIT, "Port %p: transmit %d chars\n",
- port, port->gs.xmit_cnt);
-
- if (test_and_set_bit(SX_PORT_TRANSMIT_LOCK, &port->locks)) {
- return;
- }
-
- while (1) {
- c = port->gs.xmit_cnt;
-
- sx_dprintk(SX_DEBUG_TRANSMIT, "Copying %d ", c);
- tx_ip = sx_read_channel_byte(port, hi_txipos);
-
- /* Took me 5 minutes to deduce this formula.
- Luckily it is literally in the manual in section 6.5.4.3.5 */
- txroom = (sx_read_channel_byte(port, hi_txopos) - tx_ip - 1) &
- 0xff;
-
- /* Don't copy more bytes than there is room for in the buffer */
- if (c > txroom)
- c = txroom;
- sx_dprintk(SX_DEBUG_TRANSMIT, " %d(%d) ", c, txroom);
-
- /* Don't copy past the end of the hardware transmit buffer */
- if (c > 0x100 - tx_ip)
- c = 0x100 - tx_ip;
-
- sx_dprintk(SX_DEBUG_TRANSMIT, " %d(%d) ", c, 0x100 - tx_ip);
-
- /* Don't copy pas the end of the source buffer */
- if (c > SERIAL_XMIT_SIZE - port->gs.xmit_tail)
- c = SERIAL_XMIT_SIZE - port->gs.xmit_tail;
-
- sx_dprintk(SX_DEBUG_TRANSMIT, " %d(%ld) \n",
- c, SERIAL_XMIT_SIZE - port->gs.xmit_tail);
-
- /* If for one reason or another, we can't copy more data, we're
- done! */
- if (c == 0)
- break;
-
- memcpy_toio(port->board->base + CHAN_OFFSET(port, hi_txbuf) +
- tx_ip, port->gs.xmit_buf + port->gs.xmit_tail, c);
-
- /* Update the pointer in the card */
- sx_write_channel_byte(port, hi_txipos, (tx_ip + c) & 0xff);
-
- /* Update the kernel buffer end */
- port->gs.xmit_tail = (port->gs.xmit_tail + c) &
- (SERIAL_XMIT_SIZE - 1);
-
- /* This one last. (this is essential)
- It would allow others to start putting more data into the
- buffer! */
- port->gs.xmit_cnt -= c;
- }
-
- if (port->gs.xmit_cnt == 0) {
- sx_disable_tx_interrupts(port);
- }
-
- if ((port->gs.xmit_cnt <= port->gs.wakeup_chars) && port->gs.port.tty) {
- tty_wakeup(port->gs.port.tty);
- sx_dprintk(SX_DEBUG_TRANSMIT, "Waking up.... ldisc (%d)....\n",
- port->gs.wakeup_chars);
- }
-
- clear_bit(SX_PORT_TRANSMIT_LOCK, &port->locks);
- func_exit();
-}
-
-/* Note the symmetry between receiving chars and transmitting them!
- Note: The kernel should have implemented both a receive buffer and
- a transmit buffer. */
-
-/* Inlined: Called only once. Remove the inline when you add another call */
-static inline void sx_receive_chars(struct sx_port *port)
-{
- int c;
- int rx_op;
- struct tty_struct *tty;
- int copied = 0;
- unsigned char *rp;
-
- func_enter2();
- tty = port->gs.port.tty;
- while (1) {
- rx_op = sx_read_channel_byte(port, hi_rxopos);
- c = (sx_read_channel_byte(port, hi_rxipos) - rx_op) & 0xff;
-
- sx_dprintk(SX_DEBUG_RECEIVE, "rxop=%d, c = %d.\n", rx_op, c);
-
- /* Don't copy past the end of the hardware receive buffer */
- if (rx_op + c > 0x100)
- c = 0x100 - rx_op;
-
- sx_dprintk(SX_DEBUG_RECEIVE, "c = %d.\n", c);
-
- /* Don't copy more bytes than there is room for in the buffer */
-
- c = tty_prepare_flip_string(tty, &rp, c);
-
- sx_dprintk(SX_DEBUG_RECEIVE, "c = %d.\n", c);
-
- /* If for one reason or another, we can't copy more data, we're done! */
- if (c == 0)
- break;
-
- sx_dprintk(SX_DEBUG_RECEIVE, "Copying over %d chars. First is "
- "%d at %lx\n", c, read_sx_byte(port->board,
- CHAN_OFFSET(port, hi_rxbuf) + rx_op),
- CHAN_OFFSET(port, hi_rxbuf));
- memcpy_fromio(rp, port->board->base +
- CHAN_OFFSET(port, hi_rxbuf) + rx_op, c);
-
- /* This one last. ( Not essential.)
- It allows the card to start putting more data into the
- buffer!
- Update the pointer in the card */
- sx_write_channel_byte(port, hi_rxopos, (rx_op + c) & 0xff);
-
- copied += c;
- }
- if (copied) {
- struct timeval tv;
-
- do_gettimeofday(&tv);
- sx_dprintk(SX_DEBUG_RECEIVE, "pushing flipq port %d (%3d "
- "chars): %d.%06d (%d/%d)\n", port->line,
- copied, (int)(tv.tv_sec % 60), (int)tv.tv_usec,
- tty->raw, tty->real_raw);
-
- /* Tell the rest of the system the news. Great news. New
- characters! */
- tty_flip_buffer_push(tty);
- /* tty_schedule_flip (tty); */
- }
-
- func_exit();
-}
-
-/* Inlined: it is called only once. Remove the inline if you add another
- call */
-static inline void sx_check_modem_signals(struct sx_port *port)
-{
- int hi_state;
- int c_dcd;
-
- hi_state = sx_read_channel_byte(port, hi_state);
- sx_dprintk(SX_DEBUG_MODEMSIGNALS, "Checking modem signals (%d/%d)\n",
- port->c_dcd, tty_port_carrier_raised(&port->gs.port));
-
- if (hi_state & ST_BREAK) {
- hi_state &= ~ST_BREAK;
- sx_dprintk(SX_DEBUG_MODEMSIGNALS, "got a break.\n");
- sx_write_channel_byte(port, hi_state, hi_state);
- gs_got_break(&port->gs);
- }
- if (hi_state & ST_DCD) {
- hi_state &= ~ST_DCD;
- sx_dprintk(SX_DEBUG_MODEMSIGNALS, "got a DCD change.\n");
- sx_write_channel_byte(port, hi_state, hi_state);
- c_dcd = tty_port_carrier_raised(&port->gs.port);
- sx_dprintk(SX_DEBUG_MODEMSIGNALS, "DCD is now %d\n", c_dcd);
- if (c_dcd != port->c_dcd) {
- port->c_dcd = c_dcd;
- if (tty_port_carrier_raised(&port->gs.port)) {
- /* DCD went UP */
- if ((sx_read_channel_byte(port, hi_hstat) !=
- HS_IDLE_CLOSED) &&
- !(port->gs.port.tty->termios->
- c_cflag & CLOCAL)) {
- /* Are we blocking in open? */
- sx_dprintk(SX_DEBUG_MODEMSIGNALS, "DCD "
- "active, unblocking open\n");
- wake_up_interruptible(&port->gs.port.
- open_wait);
- } else {
- sx_dprintk(SX_DEBUG_MODEMSIGNALS, "DCD "
- "raised. Ignoring.\n");
- }
- } else {
- /* DCD went down! */
- if (!(port->gs.port.tty->termios->c_cflag & CLOCAL)){
- sx_dprintk(SX_DEBUG_MODEMSIGNALS, "DCD "
- "dropped. hanging up....\n");
- tty_hangup(port->gs.port.tty);
- } else {
- sx_dprintk(SX_DEBUG_MODEMSIGNALS, "DCD "
- "dropped. ignoring.\n");
- }
- }
- } else {
- sx_dprintk(SX_DEBUG_MODEMSIGNALS, "Hmmm. card told us "
- "DCD changed, but it didn't.\n");
- }
- }
-}
-
-/* This is what an interrupt routine should look like.
- * Small, elegant, clear.
- */
-
-static irqreturn_t sx_interrupt(int irq, void *ptr)
-{
- struct sx_board *board = ptr;
- struct sx_port *port;
- int i;
-
- func_enter();
- sx_dprintk(SX_DEBUG_FLOW, "sx: enter sx_interrupt (%d/%d)\n", irq,
- board->irq);
-
- /* AAargh! The order in which to do these things is essential and
- not trivial.
-
- - Rate limit goes before "recursive". Otherwise a series of
- recursive calls will hang the machine in the interrupt routine.
-
- - hardware twiddling goes before "recursive". Otherwise when we
- poll the card, and a recursive interrupt happens, we won't
- ack the card, so it might keep on interrupting us. (especially
- level sensitive interrupt systems like PCI).
-
- - Rate limit goes before hardware twiddling. Otherwise we won't
- catch a card that has gone bonkers.
-
- - The "initialized" test goes after the hardware twiddling. Otherwise
- the card will stick us in the interrupt routine again.
-
- - The initialized test goes before recursive.
- */
-
-#ifdef IRQ_RATE_LIMIT
- /* Aaargh! I'm ashamed. This costs more lines-of-code than the
- actual interrupt routine!. (Well, used to when I wrote that
- comment) */
- {
- static int lastjif;
- static int nintr = 0;
-
- if (lastjif == jiffies) {
- if (++nintr > IRQ_RATE_LIMIT) {
- free_irq(board->irq, board);
- printk(KERN_ERR "sx: Too many interrupts. "
- "Turning off interrupt %d.\n",
- board->irq);
- }
- } else {
- lastjif = jiffies;
- nintr = 0;
- }
- }
-#endif
-
- if (board->irq == irq) {
- /* Tell the card we've noticed the interrupt. */
-
- sx_write_board_word(board, cc_int_pending, 0);
- if (IS_SX_BOARD(board)) {
- write_sx_byte(board, SX_RESET_IRQ, 1);
- } else if (IS_EISA_BOARD(board)) {
- inb(board->eisa_base + 0xc03);
- write_sx_word(board, 8, 0);
- } else {
- write_sx_byte(board, SI2_ISA_INTCLEAR,
- SI2_ISA_INTCLEAR_CLEAR);
- write_sx_byte(board, SI2_ISA_INTCLEAR,
- SI2_ISA_INTCLEAR_SET);
- }
- }
-
- if (!sx_initialized)
- return IRQ_HANDLED;
- if (!(board->flags & SX_BOARD_INITIALIZED))
- return IRQ_HANDLED;
-
- if (test_and_set_bit(SX_BOARD_INTR_LOCK, &board->locks)) {
- printk(KERN_ERR "Recursive interrupt! (%d)\n", board->irq);
- return IRQ_HANDLED;
- }
-
- for (i = 0; i < board->nports; i++) {
- port = &board->ports[i];
- if (port->gs.port.flags & GS_ACTIVE) {
- if (sx_read_channel_byte(port, hi_state)) {
- sx_dprintk(SX_DEBUG_INTERRUPTS, "Port %d: "
- "modem signal change?... \n",i);
- sx_check_modem_signals(port);
- }
- if (port->gs.xmit_cnt) {
- sx_transmit_chars(port);
- }
- if (!(port->gs.port.flags & SX_RX_THROTTLE)) {
- sx_receive_chars(port);
- }
- }
- }
-
- clear_bit(SX_BOARD_INTR_LOCK, &board->locks);
-
- sx_dprintk(SX_DEBUG_FLOW, "sx: exit sx_interrupt (%d/%d)\n", irq,
- board->irq);
- func_exit();
- return IRQ_HANDLED;
-}
-
-static void sx_pollfunc(unsigned long data)
-{
- struct sx_board *board = (struct sx_board *)data;
-
- func_enter();
-
- sx_interrupt(0, board);
-
- mod_timer(&board->timer, jiffies + sx_poll);
- func_exit();
-}
-
-/* ********************************************************************** *
- * Here are the routines that actually *
- * interface with the generic_serial driver *
- * ********************************************************************** */
-
-/* Ehhm. I don't know how to fiddle with interrupts on the SX card. --REW */
-/* Hmm. Ok I figured it out. You don't. */
-
-static void sx_disable_tx_interrupts(void *ptr)
-{
- struct sx_port *port = ptr;
- func_enter2();
-
- port->gs.port.flags &= ~GS_TX_INTEN;
-
- func_exit();
-}
-
-static void sx_enable_tx_interrupts(void *ptr)
-{
- struct sx_port *port = ptr;
- int data_in_buffer;
- func_enter2();
-
- /* First transmit the characters that we're supposed to */
- sx_transmit_chars(port);
-
- /* The sx card will never interrupt us if we don't fill the buffer
- past 25%. So we keep considering interrupts off if that's the case. */
- data_in_buffer = (sx_read_channel_byte(port, hi_txipos) -
- sx_read_channel_byte(port, hi_txopos)) & 0xff;
-
- /* XXX Must be "HIGH_WATER" for SI card according to doc. */
- if (data_in_buffer < LOW_WATER)
- port->gs.port.flags &= ~GS_TX_INTEN;
-
- func_exit();
-}
-
-static void sx_disable_rx_interrupts(void *ptr)
-{
- /* struct sx_port *port = ptr; */
- func_enter();
-
- func_exit();
-}
-
-static void sx_enable_rx_interrupts(void *ptr)
-{
- /* struct sx_port *port = ptr; */
- func_enter();
-
- func_exit();
-}
-
-/* Jeez. Isn't this simple? */
-static int sx_carrier_raised(struct tty_port *port)
-{
- struct sx_port *sp = container_of(port, struct sx_port, gs.port);
- return ((sx_read_channel_byte(sp, hi_ip) & IP_DCD) != 0);
-}
-
-/* Jeez. Isn't this simple? */
-static int sx_chars_in_buffer(void *ptr)
-{
- struct sx_port *port = ptr;
- func_enter2();
-
- func_exit();
- return ((sx_read_channel_byte(port, hi_txipos) -
- sx_read_channel_byte(port, hi_txopos)) & 0xff);
-}
-
-static void sx_shutdown_port(void *ptr)
-{
- struct sx_port *port = ptr;
-
- func_enter();
-
- port->gs.port.flags &= ~GS_ACTIVE;
- if (port->gs.port.tty && (port->gs.port.tty->termios->c_cflag & HUPCL)) {
- sx_setsignals(port, 0, 0);
- sx_reconfigure_port(port);
- }
-
- func_exit();
-}
-
-/* ********************************************************************** *
- * Here are the routines that actually *
- * interface with the rest of the system *
- * ********************************************************************** */
-
-static int sx_open(struct tty_struct *tty, struct file *filp)
-{
- struct sx_port *port;
- int retval, line;
- unsigned long flags;
-
- func_enter();
-
- if (!sx_initialized) {
- return -EIO;
- }
-
- line = tty->index;
- sx_dprintk(SX_DEBUG_OPEN, "%d: opening line %d. tty=%p ctty=%p, "
- "np=%d)\n", task_pid_nr(current), line, tty,
- current->signal->tty, sx_nports);
-
- if ((line < 0) || (line >= SX_NPORTS) || (line >= sx_nports))
- return -ENODEV;
-
- port = &sx_ports[line];
- port->c_dcd = 0; /* Make sure that the first interrupt doesn't detect a
- 1 -> 0 transition. */
-
- sx_dprintk(SX_DEBUG_OPEN, "port = %p c_dcd = %d\n", port, port->c_dcd);
-
- spin_lock_irqsave(&port->gs.driver_lock, flags);
-
- tty->driver_data = port;
- port->gs.port.tty = tty;
- port->gs.port.count++;
- spin_unlock_irqrestore(&port->gs.driver_lock, flags);
-
- sx_dprintk(SX_DEBUG_OPEN, "starting port\n");
-
- /*
- * Start up serial port
- */
- retval = gs_init_port(&port->gs);
- sx_dprintk(SX_DEBUG_OPEN, "done gs_init\n");
- if (retval) {
- port->gs.port.count--;
- return retval;
- }
-
- port->gs.port.flags |= GS_ACTIVE;
- if (port->gs.port.count <= 1)
- sx_setsignals(port, 1, 1);
-
-#if 0
- if (sx_debug & SX_DEBUG_OPEN)
- my_hd(port, sizeof(*port));
-#else
- if (sx_debug & SX_DEBUG_OPEN)
- my_hd_io(port->board->base + port->ch_base, sizeof(*port));
-#endif
-
- if (port->gs.port.count <= 1) {
- if (sx_send_command(port, HS_LOPEN, -1, HS_IDLE_OPEN) != 1) {
- printk(KERN_ERR "sx: Card didn't respond to LOPEN "
- "command.\n");
- spin_lock_irqsave(&port->gs.driver_lock, flags);
- port->gs.port.count--;
- spin_unlock_irqrestore(&port->gs.driver_lock, flags);
- return -EIO;
- }
- }
-
- retval = gs_block_til_ready(port, filp);
- sx_dprintk(SX_DEBUG_OPEN, "Block til ready returned %d. Count=%d\n",
- retval, port->gs.port.count);
-
- if (retval) {
-/*
- * Don't lower gs.port.count here because sx_close() will be called later
- */
-
- return retval;
- }
- /* tty->low_latency = 1; */
-
- port->c_dcd = sx_carrier_raised(&port->gs.port);
- sx_dprintk(SX_DEBUG_OPEN, "at open: cd=%d\n", port->c_dcd);
-
- func_exit();
- return 0;
-
-}
-
-static void sx_close(void *ptr)
-{
- struct sx_port *port = ptr;
- /* Give the port 5 seconds to close down. */
- int to = 5 * HZ;
-
- func_enter();
-
- sx_setsignals(port, 0, 0);
- sx_reconfigure_port(port);
- sx_send_command(port, HS_CLOSE, 0, 0);
-
- while (to-- && (sx_read_channel_byte(port, hi_hstat) != HS_IDLE_CLOSED))
- if (msleep_interruptible(10))
- break;
- if (sx_read_channel_byte(port, hi_hstat) != HS_IDLE_CLOSED) {
- if (sx_send_command(port, HS_FORCE_CLOSED, -1, HS_IDLE_CLOSED)
- != 1) {
- printk(KERN_ERR "sx: sent the force_close command, but "
- "card didn't react\n");
- } else
- sx_dprintk(SX_DEBUG_CLOSE, "sent the force_close "
- "command.\n");
- }
-
- sx_dprintk(SX_DEBUG_CLOSE, "waited %d jiffies for close. count=%d\n",
- 5 * HZ - to - 1, port->gs.port.count);
-
- if (port->gs.port.count) {
- sx_dprintk(SX_DEBUG_CLOSE, "WARNING port count:%d\n",
- port->gs.port.count);
- /*printk("%s SETTING port count to zero: %p count: %d\n",
- __func__, port, port->gs.port.count);
- port->gs.port.count = 0;*/
- }
-
- func_exit();
-}
-
-/* This is relatively thorough. But then again it is only 20 lines. */
-#define MARCHUP for (i = min; i < max; i++)
-#define MARCHDOWN for (i = max - 1; i >= min; i--)
-#define W0 write_sx_byte(board, i, 0x55)
-#define W1 write_sx_byte(board, i, 0xaa)
-#define R0 if (read_sx_byte(board, i) != 0x55) return 1
-#define R1 if (read_sx_byte(board, i) != 0xaa) return 1
-
-/* This memtest takes a human-noticeable time. You normally only do it
- once a boot, so I guess that it is worth it. */
-static int do_memtest(struct sx_board *board, int min, int max)
-{
- int i;
-
- /* This is a marchb. Theoretically, marchb catches much more than
- simpler tests. In practise, the longer test just catches more
- intermittent errors. -- REW
- (For the theory behind memory testing see:
- Testing Semiconductor Memories by A.J. van de Goor.) */
- MARCHUP {
- W0;
- }
- MARCHUP {
- R0;
- W1;
- R1;
- W0;
- R0;
- W1;
- }
- MARCHUP {
- R1;
- W0;
- W1;
- }
- MARCHDOWN {
- R1;
- W0;
- W1;
- W0;
- }
- MARCHDOWN {
- R0;
- W1;
- W0;
- }
-
- return 0;
-}
-
-#undef MARCHUP
-#undef MARCHDOWN
-#undef W0
-#undef W1
-#undef R0
-#undef R1
-
-#define MARCHUP for (i = min; i < max; i += 2)
-#define MARCHDOWN for (i = max - 1; i >= min; i -= 2)
-#define W0 write_sx_word(board, i, 0x55aa)
-#define W1 write_sx_word(board, i, 0xaa55)
-#define R0 if (read_sx_word(board, i) != 0x55aa) return 1
-#define R1 if (read_sx_word(board, i) != 0xaa55) return 1
-
-#if 0
-/* This memtest takes a human-noticeable time. You normally only do it
- once a boot, so I guess that it is worth it. */
-static int do_memtest_w(struct sx_board *board, int min, int max)
-{
- int i;
-
- MARCHUP {
- W0;
- }
- MARCHUP {
- R0;
- W1;
- R1;
- W0;
- R0;
- W1;
- }
- MARCHUP {
- R1;
- W0;
- W1;
- }
- MARCHDOWN {
- R1;
- W0;
- W1;
- W0;
- }
- MARCHDOWN {
- R0;
- W1;
- W0;
- }
-
- return 0;
-}
-#endif
-
-static long sx_fw_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- long rc = 0;
- int __user *descr = (int __user *)arg;
- int i;
- static struct sx_board *board = NULL;
- int nbytes, offset;
- unsigned long data;
- char *tmp;
-
- func_enter();
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- tty_lock();
-
- sx_dprintk(SX_DEBUG_FIRMWARE, "IOCTL %x: %lx\n", cmd, arg);
-
- if (!board)
- board = &boards[0];
- if (board->flags & SX_BOARD_PRESENT) {
- sx_dprintk(SX_DEBUG_FIRMWARE, "Board present! (%x)\n",
- board->flags);
- } else {
- sx_dprintk(SX_DEBUG_FIRMWARE, "Board not present! (%x) all:",
- board->flags);
- for (i = 0; i < SX_NBOARDS; i++)
- sx_dprintk(SX_DEBUG_FIRMWARE, "<%x> ", boards[i].flags);
- sx_dprintk(SX_DEBUG_FIRMWARE, "\n");
- rc = -EIO;
- goto out;
- }
-
- switch (cmd) {
- case SXIO_SET_BOARD:
- sx_dprintk(SX_DEBUG_FIRMWARE, "set board to %ld\n", arg);
- rc = -EIO;
- if (arg >= SX_NBOARDS)
- break;
- sx_dprintk(SX_DEBUG_FIRMWARE, "not out of range\n");
- if (!(boards[arg].flags & SX_BOARD_PRESENT))
- break;
- sx_dprintk(SX_DEBUG_FIRMWARE, ".. and present!\n");
- board = &boards[arg];
- rc = 0;
- /* FIXME: And this does ... nothing?? */
- break;
- case SXIO_GET_TYPE:
- rc = -ENOENT; /* If we manage to miss one, return error. */
- if (IS_SX_BOARD(board))
- rc = SX_TYPE_SX;
- if (IS_CF_BOARD(board))
- rc = SX_TYPE_CF;
- if (IS_SI_BOARD(board))
- rc = SX_TYPE_SI;
- if (IS_SI1_BOARD(board))
- rc = SX_TYPE_SI;
- if (IS_EISA_BOARD(board))
- rc = SX_TYPE_SI;
- sx_dprintk(SX_DEBUG_FIRMWARE, "returning type= %ld\n", rc);
- break;
- case SXIO_DO_RAMTEST:
- if (sx_initialized) { /* Already initialized: better not ramtest the board. */
- rc = -EPERM;
- break;
- }
- if (IS_SX_BOARD(board)) {
- rc = do_memtest(board, 0, 0x7000);
- if (!rc)
- rc = do_memtest(board, 0, 0x7000);
- /*if (!rc) rc = do_memtest_w (board, 0, 0x7000); */
- } else {
- rc = do_memtest(board, 0, 0x7ff8);
- /* if (!rc) rc = do_memtest_w (board, 0, 0x7ff8); */
- }
- sx_dprintk(SX_DEBUG_FIRMWARE,
- "returning memtest result= %ld\n", rc);
- break;
- case SXIO_DOWNLOAD:
- if (sx_initialized) {/* Already initialized */
- rc = -EEXIST;
- break;
- }
- if (!sx_reset(board)) {
- rc = -EIO;
- break;
- }
- sx_dprintk(SX_DEBUG_INIT, "reset the board...\n");
-
- tmp = kmalloc(SX_CHUNK_SIZE, GFP_USER);
- if (!tmp) {
- rc = -ENOMEM;
- break;
- }
- /* FIXME: check returns */
- get_user(nbytes, descr++);
- get_user(offset, descr++);
- get_user(data, descr++);
- while (nbytes && data) {
- for (i = 0; i < nbytes; i += SX_CHUNK_SIZE) {
- if (copy_from_user(tmp, (char __user *)data + i,
- (i + SX_CHUNK_SIZE > nbytes) ?
- nbytes - i : SX_CHUNK_SIZE)) {
- kfree(tmp);
- rc = -EFAULT;
- goto out;
- }
- memcpy_toio(board->base2 + offset + i, tmp,
- (i + SX_CHUNK_SIZE > nbytes) ?
- nbytes - i : SX_CHUNK_SIZE);
- }
-
- get_user(nbytes, descr++);
- get_user(offset, descr++);
- get_user(data, descr++);
- }
- kfree(tmp);
- sx_nports += sx_init_board(board);
- rc = sx_nports;
- break;
- case SXIO_INIT:
- if (sx_initialized) { /* Already initialized */
- rc = -EEXIST;
- break;
- }
- /* This is not allowed until all boards are initialized... */
- for (i = 0; i < SX_NBOARDS; i++) {
- if ((boards[i].flags & SX_BOARD_PRESENT) &&
- !(boards[i].flags & SX_BOARD_INITIALIZED)) {
- rc = -EIO;
- break;
- }
- }
- for (i = 0; i < SX_NBOARDS; i++)
- if (!(boards[i].flags & SX_BOARD_PRESENT))
- break;
-
- sx_dprintk(SX_DEBUG_FIRMWARE, "initing portstructs, %d boards, "
- "%d channels, first board: %d ports\n",
- i, sx_nports, boards[0].nports);
- rc = sx_init_portstructs(i, sx_nports);
- sx_init_drivers();
- if (rc >= 0)
- sx_initialized++;
- break;
- case SXIO_SETDEBUG:
- sx_debug = arg;
- break;
- case SXIO_GETDEBUG:
- rc = sx_debug;
- break;
- case SXIO_GETGSDEBUG:
- case SXIO_SETGSDEBUG:
- rc = -EINVAL;
- break;
- case SXIO_GETNPORTS:
- rc = sx_nports;
- break;
- default:
- rc = -ENOTTY;
- break;
- }
-out:
- tty_unlock();
- func_exit();
- return rc;
-}
-
-static int sx_break(struct tty_struct *tty, int flag)
-{
- struct sx_port *port = tty->driver_data;
- int rv;
-
- func_enter();
- tty_lock();
-
- if (flag)
- rv = sx_send_command(port, HS_START, -1, HS_IDLE_BREAK);
- else
- rv = sx_send_command(port, HS_STOP, -1, HS_IDLE_OPEN);
- if (rv != 1)
- printk(KERN_ERR "sx: couldn't send break (%x).\n",
- read_sx_byte(port->board, CHAN_OFFSET(port, hi_hstat)));
- tty_unlock();
- func_exit();
- return 0;
-}
-
-static int sx_tiocmget(struct tty_struct *tty)
-{
- struct sx_port *port = tty->driver_data;
- return sx_getsignals(port);
-}
-
-static int sx_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct sx_port *port = tty->driver_data;
- int rts = -1, dtr = -1;
-
- if (set & TIOCM_RTS)
- rts = 1;
- if (set & TIOCM_DTR)
- dtr = 1;
- if (clear & TIOCM_RTS)
- rts = 0;
- if (clear & TIOCM_DTR)
- dtr = 0;
-
- sx_setsignals(port, dtr, rts);
- sx_reconfigure_port(port);
- return 0;
-}
-
-static int sx_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- int rc;
- struct sx_port *port = tty->driver_data;
- void __user *argp = (void __user *)arg;
-
- /* func_enter2(); */
-
- rc = 0;
- tty_lock();
- switch (cmd) {
- case TIOCGSERIAL:
- rc = gs_getserial(&port->gs, argp);
- break;
- case TIOCSSERIAL:
- rc = gs_setserial(&port->gs, argp);
- break;
- default:
- rc = -ENOIOCTLCMD;
- break;
- }
- tty_unlock();
-
- /* func_exit(); */
- return rc;
-}
-
-/* The throttle/unthrottle scheme for the Specialix card is different
- * from other drivers and deserves some explanation.
- * The Specialix hardware takes care of XON/XOFF
- * and CTS/RTS flow control itself. This means that all we have to
- * do when signalled by the upper tty layer to throttle/unthrottle is
- * to make a note of it here. When we come to read characters from the
- * rx buffers on the card (sx_receive_chars()) we look to see if the
- * upper layer can accept more (as noted here in sx_rx_throt[]).
- * If it can't we simply don't remove chars from the cards buffer.
- * When the tty layer can accept chars, we again note that here and when
- * sx_receive_chars() is called it will remove them from the cards buffer.
- * The card will notice that a ports buffer has drained below some low
- * water mark and will unflow control the line itself, using whatever
- * flow control scheme is in use for that port. -- Simon Allen
- */
-
-static void sx_throttle(struct tty_struct *tty)
-{
- struct sx_port *port = tty->driver_data;
-
- func_enter2();
- /* If the port is using any type of input flow
- * control then throttle the port.
- */
- if ((tty->termios->c_cflag & CRTSCTS) || (I_IXOFF(tty))) {
- port->gs.port.flags |= SX_RX_THROTTLE;
- }
- func_exit();
-}
-
-static void sx_unthrottle(struct tty_struct *tty)
-{
- struct sx_port *port = tty->driver_data;
-
- func_enter2();
- /* Always unthrottle even if flow control is not enabled on
- * this port in case we disabled flow control while the port
- * was throttled
- */
- port->gs.port.flags &= ~SX_RX_THROTTLE;
- func_exit();
- return;
-}
-
-/* ********************************************************************** *
- * Here are the initialization routines. *
- * ********************************************************************** */
-
-static int sx_init_board(struct sx_board *board)
-{
- int addr;
- int chans;
- int type;
-
- func_enter();
-
- /* This is preceded by downloading the download code. */
-
- board->flags |= SX_BOARD_INITIALIZED;
-
- if (read_sx_byte(board, 0))
- /* CF boards may need this. */
- write_sx_byte(board, 0, 0);
-
- /* This resets the processor again, to make sure it didn't do any
- foolish things while we were downloading the image */
- if (!sx_reset(board))
- return 0;
-
- sx_start_board(board);
- udelay(10);
- if (!sx_busy_wait_neq(board, 0, 0xff, 0)) {
- printk(KERN_ERR "sx: Ooops. Board won't initialize.\n");
- return 0;
- }
-
- /* Ok. So now the processor on the card is running. It gathered
- some info for us... */
- sx_dprintk(SX_DEBUG_INIT, "The sxcard structure:\n");
- if (sx_debug & SX_DEBUG_INIT)
- my_hd_io(board->base, 0x10);
- sx_dprintk(SX_DEBUG_INIT, "the first sx_module structure:\n");
- if (sx_debug & SX_DEBUG_INIT)
- my_hd_io(board->base + 0x80, 0x30);
-
- sx_dprintk(SX_DEBUG_INIT, "init_status: %x, %dk memory, firmware "
- "V%x.%02x,\n",
- read_sx_byte(board, 0), read_sx_byte(board, 1),
- read_sx_byte(board, 5), read_sx_byte(board, 4));
-
- if (read_sx_byte(board, 0) == 0xff) {
- printk(KERN_INFO "sx: No modules found. Sorry.\n");
- board->nports = 0;
- return 0;
- }
-
- chans = 0;
-
- if (IS_SX_BOARD(board)) {
- sx_write_board_word(board, cc_int_count, sx_maxints);
- } else {
- if (sx_maxints)
- sx_write_board_word(board, cc_int_count,
- SI_PROCESSOR_CLOCK / 8 / sx_maxints);
- }
-
- /* grab the first module type... */
- /* board->ta_type = mod_compat_type (read_sx_byte (board, 0x80 + 0x08)); */
- board->ta_type = mod_compat_type(sx_read_module_byte(board, 0x80,
- mc_chip));
-
- /* XXX byteorder */
- for (addr = 0x80; addr != 0; addr = read_sx_word(board, addr) & 0x7fff){
- type = sx_read_module_byte(board, addr, mc_chip);
- sx_dprintk(SX_DEBUG_INIT, "Module at %x: %d channels\n",
- addr, read_sx_byte(board, addr + 2));
-
- chans += sx_read_module_byte(board, addr, mc_type);
-
- sx_dprintk(SX_DEBUG_INIT, "module is an %s, which has %s/%s "
- "panels\n",
- mod_type_s(type),
- pan_type_s(sx_read_module_byte(board, addr,
- mc_mods) & 0xf),
- pan_type_s(sx_read_module_byte(board, addr,
- mc_mods) >> 4));
-
- sx_dprintk(SX_DEBUG_INIT, "CD1400 versions: %x/%x, ASIC "
- "version: %x\n",
- sx_read_module_byte(board, addr, mc_rev1),
- sx_read_module_byte(board, addr, mc_rev2),
- sx_read_module_byte(board, addr, mc_mtaasic_rev));
-
- /* The following combinations are illegal: It should theoretically
- work, but timing problems make the bus HANG. */
-
- if (mod_compat_type(type) != board->ta_type) {
- printk(KERN_ERR "sx: This is an invalid "
- "configuration.\nDon't mix TA/MTA/SXDC on the "
- "same hostadapter.\n");
- chans = 0;
- break;
- }
- if ((IS_EISA_BOARD(board) ||
- IS_SI_BOARD(board)) &&
- (mod_compat_type(type) == 4)) {
- printk(KERN_ERR "sx: This is an invalid "
- "configuration.\nDon't use SXDCs on an SI/XIO "
- "adapter.\n");
- chans = 0;
- break;
- }
-#if 0 /* Problem fixed: firmware 3.05 */
- if (IS_SX_BOARD(board) && (type == TA8)) {
- /* There are some issues with the firmware and the DCD/RTS
- lines. It might work if you tie them together or something.
- It might also work if you get a newer sx_firmware. Therefore
- this is just a warning. */
- printk(KERN_WARNING
- "sx: The SX host doesn't work too well "
- "with the TA8 adapters.\nSpecialix is working on it.\n");
- }
-#endif
- }
-
- if (chans) {
- if (board->irq > 0) {
- /* fixed irq, probably PCI */
- if (sx_irqmask & (1 << board->irq)) { /* may we use this irq? */
- if (request_irq(board->irq, sx_interrupt,
- IRQF_SHARED | IRQF_DISABLED,
- "sx", board)) {
- printk(KERN_ERR "sx: Cannot allocate "
- "irq %d.\n", board->irq);
- board->irq = 0;
- }
- } else
- board->irq = 0;
- } else if (board->irq < 0 && sx_irqmask) {
- /* auto-allocate irq */
- int irqnr;
- int irqmask = sx_irqmask & (IS_SX_BOARD(board) ?
- SX_ISA_IRQ_MASK : SI2_ISA_IRQ_MASK);
- for (irqnr = 15; irqnr > 0; irqnr--)
- if (irqmask & (1 << irqnr))
- if (!request_irq(irqnr, sx_interrupt,
- IRQF_SHARED | IRQF_DISABLED,
- "sx", board))
- break;
- if (!irqnr)
- printk(KERN_ERR "sx: Cannot allocate IRQ.\n");
- board->irq = irqnr;
- } else
- board->irq = 0;
-
- if (board->irq) {
- /* Found a valid interrupt, start up interrupts! */
- sx_dprintk(SX_DEBUG_INIT, "Using irq %d.\n",
- board->irq);
- sx_start_interrupts(board);
- board->poll = sx_slowpoll;
- board->flags |= SX_IRQ_ALLOCATED;
- } else {
- /* no irq: setup board for polled operation */
- board->poll = sx_poll;
- sx_dprintk(SX_DEBUG_INIT, "Using poll-interval %d.\n",
- board->poll);
- }
-
- /* The timer should be initialized anyway: That way we can
- safely del_timer it when the module is unloaded. */
- setup_timer(&board->timer, sx_pollfunc, (unsigned long)board);
-
- if (board->poll)
- mod_timer(&board->timer, jiffies + board->poll);
- } else {
- board->irq = 0;
- }
-
- board->nports = chans;
- sx_dprintk(SX_DEBUG_INIT, "returning %d ports.", board->nports);
-
- func_exit();
- return chans;
-}
-
-static void __devinit printheader(void)
-{
- static int header_printed;
-
- if (!header_printed) {
- printk(KERN_INFO "Specialix SX driver "
- "(C) 1998/1999 R.E.Wolff@BitWizard.nl\n");
- printk(KERN_INFO "sx: version " __stringify(SX_VERSION) "\n");
- header_printed = 1;
- }
-}
-
-static int __devinit probe_sx(struct sx_board *board)
-{
- struct vpd_prom vpdp;
- char *p;
- int i;
-
- func_enter();
-
- if (!IS_CF_BOARD(board)) {
- sx_dprintk(SX_DEBUG_PROBE, "Going to verify vpd prom at %p.\n",
- board->base + SX_VPD_ROM);
-
- if (sx_debug & SX_DEBUG_PROBE)
- my_hd_io(board->base + SX_VPD_ROM, 0x40);
-
- p = (char *)&vpdp;
- for (i = 0; i < sizeof(struct vpd_prom); i++)
- *p++ = read_sx_byte(board, SX_VPD_ROM + i * 2);
-
- if (sx_debug & SX_DEBUG_PROBE)
- my_hd(&vpdp, 0x20);
-
- sx_dprintk(SX_DEBUG_PROBE, "checking identifier...\n");
-
- if (strncmp(vpdp.identifier, SX_VPD_IDENT_STRING, 16) != 0) {
- sx_dprintk(SX_DEBUG_PROBE, "Got non-SX identifier: "
- "'%s'\n", vpdp.identifier);
- return 0;
- }
- }
-
- printheader();
-
- if (!IS_CF_BOARD(board)) {
- printk(KERN_DEBUG "sx: Found an SX board at %lx\n",
- board->hw_base);
- printk(KERN_DEBUG "sx: hw_rev: %d, assembly level: %d, "
- "uniq ID:%08x, ",
- vpdp.hwrev, vpdp.hwass, vpdp.uniqid);
- printk("Manufactured: %d/%d\n", 1970 + vpdp.myear, vpdp.mweek);
-
- if ((((vpdp.uniqid >> 24) & SX_UNIQUEID_MASK) !=
- SX_PCI_UNIQUEID1) && (((vpdp.uniqid >> 24) &
- SX_UNIQUEID_MASK) != SX_ISA_UNIQUEID1)) {
- /* This might be a bit harsh. This was the primary
- reason the SX/ISA card didn't work at first... */
- printk(KERN_ERR "sx: Hmm. Not an SX/PCI or SX/ISA "
- "card. Sorry: giving up.\n");
- return (0);
- }
-
- if (((vpdp.uniqid >> 24) & SX_UNIQUEID_MASK) ==
- SX_ISA_UNIQUEID1) {
- if (((unsigned long)board->hw_base) & 0x8000) {
- printk(KERN_WARNING "sx: Warning: There may be "
- "hardware problems with the card at "
- "%lx.\n", board->hw_base);
- printk(KERN_WARNING "sx: Read sx.txt for more "
- "info.\n");
- }
- }
- }
-
- board->nports = -1;
-
- /* This resets the processor, and keeps it off the bus. */
- if (!sx_reset(board))
- return 0;
- sx_dprintk(SX_DEBUG_INIT, "reset the board...\n");
-
- func_exit();
- return 1;
-}
-
-#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
-
-/* Specialix probes for this card at 32k increments from 640k to 16M.
- I consider machines with less than 16M unlikely nowadays, so I'm
- not probing above 1Mb. Also, 0xa0000, 0xb0000, are taken by the VGA
- card. 0xe0000 and 0xf0000 are taken by the BIOS. That only leaves
- 0xc0000, 0xc8000, 0xd0000 and 0xd8000 . */
-
-static int __devinit probe_si(struct sx_board *board)
-{
- int i;
-
- func_enter();
- sx_dprintk(SX_DEBUG_PROBE, "Going to verify SI signature hw %lx at "
- "%p.\n", board->hw_base, board->base + SI2_ISA_ID_BASE);
-
- if (sx_debug & SX_DEBUG_PROBE)
- my_hd_io(board->base + SI2_ISA_ID_BASE, 0x8);
-
- if (!IS_EISA_BOARD(board)) {
- if (IS_SI1_BOARD(board)) {
- for (i = 0; i < 8; i++) {
- write_sx_byte(board, SI2_ISA_ID_BASE + 7 - i,i);
- }
- }
- for (i = 0; i < 8; i++) {
- if ((read_sx_byte(board, SI2_ISA_ID_BASE + 7 - i) & 7)
- != i) {
- func_exit();
- return 0;
- }
- }
- }
-
- /* Now we're pretty much convinced that there is an SI board here,
- but to prevent trouble, we'd better double check that we don't
- have an SI1 board when we're probing for an SI2 board.... */
-
- write_sx_byte(board, SI2_ISA_ID_BASE, 0x10);
- if (IS_SI1_BOARD(board)) {
- /* This should be an SI1 board, which has this
- location writable... */
- if (read_sx_byte(board, SI2_ISA_ID_BASE) != 0x10) {
- func_exit();
- return 0;
- }
- } else {
- /* This should be an SI2 board, which has the bottom
- 3 bits non-writable... */
- if (read_sx_byte(board, SI2_ISA_ID_BASE) == 0x10) {
- func_exit();
- return 0;
- }
- }
-
- /* Now we're pretty much convinced that there is an SI board here,
- but to prevent trouble, we'd better double check that we don't
- have an SI1 board when we're probing for an SI2 board.... */
-
- write_sx_byte(board, SI2_ISA_ID_BASE, 0x10);
- if (IS_SI1_BOARD(board)) {
- /* This should be an SI1 board, which has this
- location writable... */
- if (read_sx_byte(board, SI2_ISA_ID_BASE) != 0x10) {
- func_exit();
- return 0;
- }
- } else {
- /* This should be an SI2 board, which has the bottom
- 3 bits non-writable... */
- if (read_sx_byte(board, SI2_ISA_ID_BASE) == 0x10) {
- func_exit();
- return 0;
- }
- }
-
- printheader();
-
- printk(KERN_DEBUG "sx: Found an SI board at %lx\n", board->hw_base);
- /* Compared to the SX boards, it is a complete guess as to what
- this card is up to... */
-
- board->nports = -1;
-
- /* This resets the processor, and keeps it off the bus. */
- if (!sx_reset(board))
- return 0;
- sx_dprintk(SX_DEBUG_INIT, "reset the board...\n");
-
- func_exit();
- return 1;
-}
-#endif
-
-static const struct tty_operations sx_ops = {
- .break_ctl = sx_break,
- .open = sx_open,
- .close = gs_close,
- .write = gs_write,
- .put_char = gs_put_char,
- .flush_chars = gs_flush_chars,
- .write_room = gs_write_room,
- .chars_in_buffer = gs_chars_in_buffer,
- .flush_buffer = gs_flush_buffer,
- .ioctl = sx_ioctl,
- .throttle = sx_throttle,
- .unthrottle = sx_unthrottle,
- .set_termios = gs_set_termios,
- .stop = gs_stop,
- .start = gs_start,
- .hangup = gs_hangup,
- .tiocmget = sx_tiocmget,
- .tiocmset = sx_tiocmset,
-};
-
-static const struct tty_port_operations sx_port_ops = {
- .carrier_raised = sx_carrier_raised,
-};
-
-static int sx_init_drivers(void)
-{
- int error;
-
- func_enter();
-
- sx_driver = alloc_tty_driver(sx_nports);
- if (!sx_driver)
- return 1;
- sx_driver->owner = THIS_MODULE;
- sx_driver->driver_name = "specialix_sx";
- sx_driver->name = "ttyX";
- sx_driver->major = SX_NORMAL_MAJOR;
- sx_driver->type = TTY_DRIVER_TYPE_SERIAL;
- sx_driver->subtype = SERIAL_TYPE_NORMAL;
- sx_driver->init_termios = tty_std_termios;
- sx_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- sx_driver->init_termios.c_ispeed = 9600;
- sx_driver->init_termios.c_ospeed = 9600;
- sx_driver->flags = TTY_DRIVER_REAL_RAW;
- tty_set_operations(sx_driver, &sx_ops);
-
- if ((error = tty_register_driver(sx_driver))) {
- put_tty_driver(sx_driver);
- printk(KERN_ERR "sx: Couldn't register sx driver, error = %d\n",
- error);
- return 1;
- }
- func_exit();
- return 0;
-}
-
-static int sx_init_portstructs(int nboards, int nports)
-{
- struct sx_board *board;
- struct sx_port *port;
- int i, j;
- int addr, chans;
- int portno;
-
- func_enter();
-
- /* Many drivers statically allocate the maximum number of ports
- There is no reason not to allocate them dynamically.
- Is there? -- REW */
- sx_ports = kcalloc(nports, sizeof(struct sx_port), GFP_KERNEL);
- if (!sx_ports)
- return -ENOMEM;
-
- port = sx_ports;
- for (i = 0; i < nboards; i++) {
- board = &boards[i];
- board->ports = port;
- for (j = 0; j < boards[i].nports; j++) {
- sx_dprintk(SX_DEBUG_INIT, "initing port %d\n", j);
- tty_port_init(&port->gs.port);
- port->gs.port.ops = &sx_port_ops;
- port->gs.magic = SX_MAGIC;
- port->gs.close_delay = HZ / 2;
- port->gs.closing_wait = 30 * HZ;
- port->board = board;
- port->gs.rd = &sx_real_driver;
-#ifdef NEW_WRITE_LOCKING
- port->gs.port_write_mutex = MUTEX;
-#endif
- spin_lock_init(&port->gs.driver_lock);
- /*
- * Initializing wait queue
- */
- port++;
- }
- }
-
- port = sx_ports;
- portno = 0;
- for (i = 0; i < nboards; i++) {
- board = &boards[i];
- board->port_base = portno;
- /* Possibly the configuration was rejected. */
- sx_dprintk(SX_DEBUG_PROBE, "Board has %d channels\n",
- board->nports);
- if (board->nports <= 0)
- continue;
- /* XXX byteorder ?? */
- for (addr = 0x80; addr != 0;
- addr = read_sx_word(board, addr) & 0x7fff) {
- chans = sx_read_module_byte(board, addr, mc_type);
- sx_dprintk(SX_DEBUG_PROBE, "Module at %x: %d "
- "channels\n", addr, chans);
- sx_dprintk(SX_DEBUG_PROBE, "Port at");
- for (j = 0; j < chans; j++) {
- /* The "sx-way" is the way it SHOULD be done.
- That way in the future, the firmware may for
- example pack the structures a bit more
- efficient. Neil tells me it isn't going to
- happen anytime soon though. */
- if (IS_SX_BOARD(board))
- port->ch_base = sx_read_module_word(
- board, addr + j * 2,
- mc_chan_pointer);
- else
- port->ch_base = addr + 0x100 + 0x300 *j;
-
- sx_dprintk(SX_DEBUG_PROBE, " %x",
- port->ch_base);
- port->line = portno++;
- port++;
- }
- sx_dprintk(SX_DEBUG_PROBE, "\n");
- }
- /* This has to be done earlier. */
- /* board->flags |= SX_BOARD_INITIALIZED; */
- }
-
- func_exit();
- return 0;
-}
-
-static unsigned int sx_find_free_board(void)
-{
- unsigned int i;
-
- for (i = 0; i < SX_NBOARDS; i++)
- if (!(boards[i].flags & SX_BOARD_PRESENT))
- break;
-
- return i;
-}
-
-static void __exit sx_release_drivers(void)
-{
- func_enter();
- tty_unregister_driver(sx_driver);
- put_tty_driver(sx_driver);
- func_exit();
-}
-
-static void __devexit sx_remove_card(struct sx_board *board,
- struct pci_dev *pdev)
-{
- if (board->flags & SX_BOARD_INITIALIZED) {
- /* The board should stop messing with us. (actually I mean the
- interrupt) */
- sx_reset(board);
- if ((board->irq) && (board->flags & SX_IRQ_ALLOCATED))
- free_irq(board->irq, board);
-
- /* It is safe/allowed to del_timer a non-active timer */
- del_timer(&board->timer);
- if (pdev) {
-#ifdef CONFIG_PCI
- iounmap(board->base2);
- pci_release_region(pdev, IS_CF_BOARD(board) ? 3 : 2);
-#endif
- } else {
- iounmap(board->base);
- release_region(board->hw_base, board->hw_len);
- }
-
- board->flags &= ~(SX_BOARD_INITIALIZED | SX_BOARD_PRESENT);
- }
-}
-
-#ifdef CONFIG_EISA
-
-static int __devinit sx_eisa_probe(struct device *dev)
-{
- struct eisa_device *edev = to_eisa_device(dev);
- struct sx_board *board;
- unsigned long eisa_slot = edev->base_addr;
- unsigned int i;
- int retval = -EIO;
-
- mutex_lock(&sx_boards_lock);
- i = sx_find_free_board();
- if (i == SX_NBOARDS) {
- mutex_unlock(&sx_boards_lock);
- goto err;
- }
- board = &boards[i];
- board->flags |= SX_BOARD_PRESENT;
- mutex_unlock(&sx_boards_lock);
-
- dev_info(dev, "XIO : Signature found in EISA slot %lu, "
- "Product %d Rev %d (REPORT THIS TO LKLM)\n",
- eisa_slot >> 12,
- inb(eisa_slot + EISA_VENDOR_ID_OFFSET + 2),
- inb(eisa_slot + EISA_VENDOR_ID_OFFSET + 3));
-
- board->eisa_base = eisa_slot;
- board->flags &= ~SX_BOARD_TYPE;
- board->flags |= SI_EISA_BOARD;
-
- board->hw_base = ((inb(eisa_slot + 0xc01) << 8) +
- inb(eisa_slot + 0xc00)) << 16;
- board->hw_len = SI2_EISA_WINDOW_LEN;
- if (!request_region(board->hw_base, board->hw_len, "sx")) {
- dev_err(dev, "can't request region\n");
- goto err_flag;
- }
- board->base2 =
- board->base = ioremap_nocache(board->hw_base, SI2_EISA_WINDOW_LEN);
- if (!board->base) {
- dev_err(dev, "can't remap memory\n");
- goto err_reg;
- }
-
- sx_dprintk(SX_DEBUG_PROBE, "IO hw_base address: %lx\n", board->hw_base);
- sx_dprintk(SX_DEBUG_PROBE, "base: %p\n", board->base);
- board->irq = inb(eisa_slot + 0xc02) >> 4;
- sx_dprintk(SX_DEBUG_PROBE, "IRQ: %d\n", board->irq);
-
- if (!probe_si(board))
- goto err_unmap;
-
- dev_set_drvdata(dev, board);
-
- return 0;
-err_unmap:
- iounmap(board->base);
-err_reg:
- release_region(board->hw_base, board->hw_len);
-err_flag:
- board->flags &= ~SX_BOARD_PRESENT;
-err:
- return retval;
-}
-
-static int __devexit sx_eisa_remove(struct device *dev)
-{
- struct sx_board *board = dev_get_drvdata(dev);
-
- sx_remove_card(board, NULL);
-
- return 0;
-}
-
-static struct eisa_device_id sx_eisa_tbl[] = {
- { "SLX" },
- { "" }
-};
-
-MODULE_DEVICE_TABLE(eisa, sx_eisa_tbl);
-
-static struct eisa_driver sx_eisadriver = {
- .id_table = sx_eisa_tbl,
- .driver = {
- .name = "sx",
- .probe = sx_eisa_probe,
- .remove = __devexit_p(sx_eisa_remove),
- }
-};
-
-#endif
-
-#ifdef CONFIG_PCI
- /********************************************************
- * Setting bit 17 in the CNTRL register of the PLX 9050 *
- * chip forces a retry on writes while a read is pending.*
- * This is to prevent the card locking up on Intel Xeon *
- * multiprocessor systems with the NX chipset. -- NV *
- ********************************************************/
-
-/* Newer cards are produced with this bit set from the configuration
- EEprom. As the bit is read/write for the CPU, we can fix it here,
- if we detect that it isn't set correctly. -- REW */
-
-static void __devinit fix_sx_pci(struct pci_dev *pdev, struct sx_board *board)
-{
- unsigned int hwbase;
- void __iomem *rebase;
- unsigned int t;
-
-#define CNTRL_REG_OFFSET 0x50
-#define CNTRL_REG_GOODVALUE 0x18260000
-
- pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &hwbase);
- hwbase &= PCI_BASE_ADDRESS_MEM_MASK;
- rebase = ioremap_nocache(hwbase, 0x80);
- t = readl(rebase + CNTRL_REG_OFFSET);
- if (t != CNTRL_REG_GOODVALUE) {
- printk(KERN_DEBUG "sx: performing cntrl reg fix: %08x -> "
- "%08x\n", t, CNTRL_REG_GOODVALUE);
- writel(CNTRL_REG_GOODVALUE, rebase + CNTRL_REG_OFFSET);
- }
- iounmap(rebase);
-}
-#endif
-
-static int __devinit sx_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
-#ifdef CONFIG_PCI
- struct sx_board *board;
- unsigned int i, reg;
- int retval = -EIO;
-
- mutex_lock(&sx_boards_lock);
- i = sx_find_free_board();
- if (i == SX_NBOARDS) {
- mutex_unlock(&sx_boards_lock);
- goto err;
- }
- board = &boards[i];
- board->flags |= SX_BOARD_PRESENT;
- mutex_unlock(&sx_boards_lock);
-
- retval = pci_enable_device(pdev);
- if (retval)
- goto err_flag;
-
- board->flags &= ~SX_BOARD_TYPE;
- board->flags |= (pdev->subsystem_vendor == 0x200) ? SX_PCI_BOARD :
- SX_CFPCI_BOARD;
-
- /* CF boards use base address 3.... */
- reg = IS_CF_BOARD(board) ? 3 : 2;
- retval = pci_request_region(pdev, reg, "sx");
- if (retval) {
- dev_err(&pdev->dev, "can't request region\n");
- goto err_flag;
- }
- board->hw_base = pci_resource_start(pdev, reg);
- board->base2 =
- board->base = ioremap_nocache(board->hw_base, WINDOW_LEN(board));
- if (!board->base) {
- dev_err(&pdev->dev, "ioremap failed\n");
- goto err_reg;
- }
-
- /* Most of the stuff on the CF board is offset by 0x18000 .... */
- if (IS_CF_BOARD(board))
- board->base += 0x18000;
-
- board->irq = pdev->irq;
-
- dev_info(&pdev->dev, "Got a specialix card: %p(%d) %x.\n", board->base,
- board->irq, board->flags);
-
- if (!probe_sx(board)) {
- retval = -EIO;
- goto err_unmap;
- }
-
- fix_sx_pci(pdev, board);
-
- pci_set_drvdata(pdev, board);
-
- return 0;
-err_unmap:
- iounmap(board->base2);
-err_reg:
- pci_release_region(pdev, reg);
-err_flag:
- board->flags &= ~SX_BOARD_PRESENT;
-err:
- return retval;
-#else
- return -ENODEV;
-#endif
-}
-
-static void __devexit sx_pci_remove(struct pci_dev *pdev)
-{
- struct sx_board *board = pci_get_drvdata(pdev);
-
- sx_remove_card(board, pdev);
-}
-
-/* Specialix has a whole bunch of cards with 0x2000 as the device ID. They say
- its because the standard requires it. So check for SUBVENDOR_ID. */
-static struct pci_device_id sx_pci_tbl[] = {
- { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8,
- .subvendor = PCI_ANY_ID, .subdevice = 0x0200 },
- { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8,
- .subvendor = PCI_ANY_ID, .subdevice = 0x0300 },
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, sx_pci_tbl);
-
-static struct pci_driver sx_pcidriver = {
- .name = "sx",
- .id_table = sx_pci_tbl,
- .probe = sx_pci_probe,
- .remove = __devexit_p(sx_pci_remove)
-};
-
-static int __init sx_init(void)
-{
-#ifdef CONFIG_EISA
- int retval1;
-#endif
-#ifdef CONFIG_ISA
- struct sx_board *board;
- unsigned int i;
-#endif
- unsigned int found = 0;
- int retval;
-
- func_enter();
- sx_dprintk(SX_DEBUG_INIT, "Initing sx module... (sx_debug=%d)\n",
- sx_debug);
- if (abs((long)(&sx_debug) - sx_debug) < 0x10000) {
- printk(KERN_WARNING "sx: sx_debug is an address, instead of a "
- "value. Assuming -1.\n(%p)\n", &sx_debug);
- sx_debug = -1;
- }
-
- if (misc_register(&sx_fw_device) < 0) {
- printk(KERN_ERR "SX: Unable to register firmware loader "
- "driver.\n");
- return -EIO;
- }
-#ifdef CONFIG_ISA
- for (i = 0; i < NR_SX_ADDRS; i++) {
- board = &boards[found];
- board->hw_base = sx_probe_addrs[i];
- board->hw_len = SX_WINDOW_LEN;
- if (!request_region(board->hw_base, board->hw_len, "sx"))
- continue;
- board->base2 =
- board->base = ioremap_nocache(board->hw_base, board->hw_len);
- if (!board->base)
- goto err_sx_reg;
- board->flags &= ~SX_BOARD_TYPE;
- board->flags |= SX_ISA_BOARD;
- board->irq = sx_irqmask ? -1 : 0;
-
- if (probe_sx(board)) {
- board->flags |= SX_BOARD_PRESENT;
- found++;
- } else {
- iounmap(board->base);
-err_sx_reg:
- release_region(board->hw_base, board->hw_len);
- }
- }
-
- for (i = 0; i < NR_SI_ADDRS; i++) {
- board = &boards[found];
- board->hw_base = si_probe_addrs[i];
- board->hw_len = SI2_ISA_WINDOW_LEN;
- if (!request_region(board->hw_base, board->hw_len, "sx"))
- continue;
- board->base2 =
- board->base = ioremap_nocache(board->hw_base, board->hw_len);
- if (!board->base)
- goto err_si_reg;
- board->flags &= ~SX_BOARD_TYPE;
- board->flags |= SI_ISA_BOARD;
- board->irq = sx_irqmask ? -1 : 0;
-
- if (probe_si(board)) {
- board->flags |= SX_BOARD_PRESENT;
- found++;
- } else {
- iounmap(board->base);
-err_si_reg:
- release_region(board->hw_base, board->hw_len);
- }
- }
- for (i = 0; i < NR_SI1_ADDRS; i++) {
- board = &boards[found];
- board->hw_base = si1_probe_addrs[i];
- board->hw_len = SI1_ISA_WINDOW_LEN;
- if (!request_region(board->hw_base, board->hw_len, "sx"))
- continue;
- board->base2 =
- board->base = ioremap_nocache(board->hw_base, board->hw_len);
- if (!board->base)
- goto err_si1_reg;
- board->flags &= ~SX_BOARD_TYPE;
- board->flags |= SI1_ISA_BOARD;
- board->irq = sx_irqmask ? -1 : 0;
-
- if (probe_si(board)) {
- board->flags |= SX_BOARD_PRESENT;
- found++;
- } else {
- iounmap(board->base);
-err_si1_reg:
- release_region(board->hw_base, board->hw_len);
- }
- }
-#endif
-#ifdef CONFIG_EISA
- retval1 = eisa_driver_register(&sx_eisadriver);
-#endif
- retval = pci_register_driver(&sx_pcidriver);
-
- if (found) {
- printk(KERN_INFO "sx: total of %d boards detected.\n", found);
- retval = 0;
- } else if (retval) {
-#ifdef CONFIG_EISA
- retval = retval1;
- if (retval1)
-#endif
- misc_deregister(&sx_fw_device);
- }
-
- func_exit();
- return retval;
-}
-
-static void __exit sx_exit(void)
-{
- int i;
-
- func_enter();
-#ifdef CONFIG_EISA
- eisa_driver_unregister(&sx_eisadriver);
-#endif
- pci_unregister_driver(&sx_pcidriver);
-
- for (i = 0; i < SX_NBOARDS; i++)
- sx_remove_card(&boards[i], NULL);
-
- if (misc_deregister(&sx_fw_device) < 0) {
- printk(KERN_INFO "sx: couldn't deregister firmware loader "
- "device\n");
- }
- sx_dprintk(SX_DEBUG_CLEANUP, "Cleaning up drivers (%d)\n",
- sx_initialized);
- if (sx_initialized)
- sx_release_drivers();
-
- kfree(sx_ports);
- func_exit();
-}
-
-module_init(sx_init);
-module_exit(sx_exit);
diff --git a/drivers/staging/generic_serial/sx.h b/drivers/staging/generic_serial/sx.h
deleted file mode 100644
index 87c2defdead..00000000000
--- a/drivers/staging/generic_serial/sx.h
+++ /dev/null
@@ -1,201 +0,0 @@
-
-/*
- * sx.h
- *
- * Copyright (C) 1998/1999 R.E.Wolff@BitWizard.nl
- *
- * SX serial driver.
- * -- Supports SI, XIO and SX host cards.
- * -- Supports TAs, MTAs and SXDCs.
- *
- * Version 1.3 -- March, 1999.
- *
- */
-
-#define SX_NBOARDS 4
-#define SX_PORTSPERBOARD 32
-#define SX_NPORTS (SX_NBOARDS * SX_PORTSPERBOARD)
-
-#ifdef __KERNEL__
-
-#define SX_MAGIC 0x12345678
-
-struct sx_port {
- struct gs_port gs;
- struct wait_queue *shutdown_wait;
- int ch_base;
- int c_dcd;
- struct sx_board *board;
- int line;
- unsigned long locks;
-};
-
-struct sx_board {
- int magic;
- void __iomem *base;
- void __iomem *base2;
- unsigned long hw_base;
- resource_size_t hw_len;
- int eisa_base;
- int port_base; /* Number of the first port */
- struct sx_port *ports;
- int nports;
- int flags;
- int irq;
- int poll;
- int ta_type;
- struct timer_list timer;
- unsigned long locks;
-};
-
-struct vpd_prom {
- unsigned short id;
- char hwrev;
- char hwass;
- int uniqid;
- char myear;
- char mweek;
- char hw_feature[5];
- char oem_id;
- char identifier[16];
-};
-
-#ifndef MOD_RS232DB25MALE
-#define MOD_RS232DB25MALE 0x0a
-#endif
-
-#define SI_ISA_BOARD 0x00000001
-#define SX_ISA_BOARD 0x00000002
-#define SX_PCI_BOARD 0x00000004
-#define SX_CFPCI_BOARD 0x00000008
-#define SX_CFISA_BOARD 0x00000010
-#define SI_EISA_BOARD 0x00000020
-#define SI1_ISA_BOARD 0x00000040
-
-#define SX_BOARD_PRESENT 0x00001000
-#define SX_BOARD_INITIALIZED 0x00002000
-#define SX_IRQ_ALLOCATED 0x00004000
-
-#define SX_BOARD_TYPE 0x000000ff
-
-#define IS_SX_BOARD(board) (board->flags & (SX_PCI_BOARD | SX_CFPCI_BOARD | \
- SX_ISA_BOARD | SX_CFISA_BOARD))
-
-#define IS_SI_BOARD(board) (board->flags & SI_ISA_BOARD)
-#define IS_SI1_BOARD(board) (board->flags & SI1_ISA_BOARD)
-
-#define IS_EISA_BOARD(board) (board->flags & SI_EISA_BOARD)
-
-#define IS_CF_BOARD(board) (board->flags & (SX_CFISA_BOARD | SX_CFPCI_BOARD))
-
-/* The SI processor clock is required to calculate the cc_int_count register
- value for the SI cards. */
-#define SI_PROCESSOR_CLOCK 25000000
-
-
-/* port flags */
-/* Make sure these don't clash with gs flags or async flags */
-#define SX_RX_THROTTLE 0x0000001
-
-
-
-#define SX_PORT_TRANSMIT_LOCK 0
-#define SX_BOARD_INTR_LOCK 0
-
-
-
-/* Debug flags. Add these together to get more debug info. */
-
-#define SX_DEBUG_OPEN 0x00000001
-#define SX_DEBUG_SETTING 0x00000002
-#define SX_DEBUG_FLOW 0x00000004
-#define SX_DEBUG_MODEMSIGNALS 0x00000008
-#define SX_DEBUG_TERMIOS 0x00000010
-#define SX_DEBUG_TRANSMIT 0x00000020
-#define SX_DEBUG_RECEIVE 0x00000040
-#define SX_DEBUG_INTERRUPTS 0x00000080
-#define SX_DEBUG_PROBE 0x00000100
-#define SX_DEBUG_INIT 0x00000200
-#define SX_DEBUG_CLEANUP 0x00000400
-#define SX_DEBUG_CLOSE 0x00000800
-#define SX_DEBUG_FIRMWARE 0x00001000
-#define SX_DEBUG_MEMTEST 0x00002000
-
-#define SX_DEBUG_ALL 0xffffffff
-
-
-#define O_OTHER(tty) \
- ((O_OLCUC(tty)) ||\
- (O_ONLCR(tty)) ||\
- (O_OCRNL(tty)) ||\
- (O_ONOCR(tty)) ||\
- (O_ONLRET(tty)) ||\
- (O_OFILL(tty)) ||\
- (O_OFDEL(tty)) ||\
- (O_NLDLY(tty)) ||\
- (O_CRDLY(tty)) ||\
- (O_TABDLY(tty)) ||\
- (O_BSDLY(tty)) ||\
- (O_VTDLY(tty)) ||\
- (O_FFDLY(tty)))
-
-/* Same for input. */
-#define I_OTHER(tty) \
- ((I_INLCR(tty)) ||\
- (I_IGNCR(tty)) ||\
- (I_ICRNL(tty)) ||\
- (I_IUCLC(tty)) ||\
- (L_ISIG(tty)))
-
-#define MOD_TA ( TA>>4)
-#define MOD_MTA (MTA_CD1400>>4)
-#define MOD_SXDC ( SXDC>>4)
-
-
-/* We copy the download code over to the card in chunks of ... bytes */
-#define SX_CHUNK_SIZE 128
-
-#endif /* __KERNEL__ */
-
-
-
-/* Specialix document 6210046-11 page 3 */
-#define SPX(X) (('S'<<24) | ('P' << 16) | (X))
-
-/* Specialix-Linux specific IOCTLS. */
-#define SPXL(X) (SPX(('L' << 8) | (X)))
-
-
-#define SXIO_SET_BOARD SPXL(0x01)
-#define SXIO_GET_TYPE SPXL(0x02)
-#define SXIO_DOWNLOAD SPXL(0x03)
-#define SXIO_INIT SPXL(0x04)
-#define SXIO_SETDEBUG SPXL(0x05)
-#define SXIO_GETDEBUG SPXL(0x06)
-#define SXIO_DO_RAMTEST SPXL(0x07)
-#define SXIO_SETGSDEBUG SPXL(0x08)
-#define SXIO_GETGSDEBUG SPXL(0x09)
-#define SXIO_GETNPORTS SPXL(0x0a)
-
-
-#ifndef SXCTL_MISC_MINOR
-/* Allow others to gather this into "major.h" or something like that */
-#define SXCTL_MISC_MINOR 167
-#endif
-
-#ifndef SX_NORMAL_MAJOR
-/* This allows overriding on the compiler commandline, or in a "major.h"
- include or something like that */
-#define SX_NORMAL_MAJOR 32
-#define SX_CALLOUT_MAJOR 33
-#endif
-
-
-#define SX_TYPE_SX 0x01
-#define SX_TYPE_SI 0x02
-#define SX_TYPE_CF 0x03
-
-
-#define WINDOW_LEN(board) (IS_CF_BOARD(board)?0x20000:SX_WINDOW_LEN)
-/* Need a #define for ^^^^^^^ !!! */
-
diff --git a/drivers/staging/generic_serial/sxboards.h b/drivers/staging/generic_serial/sxboards.h
deleted file mode 100644
index 427927dc7db..00000000000
--- a/drivers/staging/generic_serial/sxboards.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/************************************************************************/
-/* */
-/* Title : SX/SI/XIO Board Hardware Definitions */
-/* */
-/* Author : N.P.Vassallo */
-/* */
-/* Creation : 16th March 1998 */
-/* */
-/* Version : 3.0.0 */
-/* */
-/* Copyright : (c) Specialix International Ltd. 1998 */
-/* */
-/* Description : Prototypes, structures and definitions */
-/* describing the SX/SI/XIO board hardware */
-/* */
-/************************************************************************/
-
-/* History...
-
-3.0.0 16/03/98 NPV Creation.
-
-*/
-
-#ifndef _sxboards_h /* If SXBOARDS.H not already defined */
-#define _sxboards_h 1
-
-/*****************************************************************************
-******************************* ******************************
-******************************* Board Types ******************************
-******************************* ******************************
-*****************************************************************************/
-
-/* BUS types... */
-#define BUS_ISA 0
-#define BUS_MCA 1
-#define BUS_EISA 2
-#define BUS_PCI 3
-
-/* Board phases... */
-#define SI1_Z280 1
-#define SI2_Z280 2
-#define SI3_T225 3
-
-/* Board types... */
-#define CARD_TYPE(bus,phase) (bus<<4|phase)
-#define CARD_BUS(type) ((type>>4)&0xF)
-#define CARD_PHASE(type) (type&0xF)
-
-#define TYPE_SI1_ISA CARD_TYPE(BUS_ISA,SI1_Z280)
-#define TYPE_SI2_ISA CARD_TYPE(BUS_ISA,SI2_Z280)
-#define TYPE_SI2_EISA CARD_TYPE(BUS_EISA,SI2_Z280)
-#define TYPE_SI2_PCI CARD_TYPE(BUS_PCI,SI2_Z280)
-
-#define TYPE_SX_ISA CARD_TYPE(BUS_ISA,SI3_T225)
-#define TYPE_SX_PCI CARD_TYPE(BUS_PCI,SI3_T225)
-/*****************************************************************************
-****************************** ******************************
-****************************** Phase 1 Z280 ******************************
-****************************** ******************************
-*****************************************************************************/
-
-/* ISA board details... */
-#define SI1_ISA_WINDOW_LEN 0x10000 /* 64 Kbyte shared memory window */
-//#define SI1_ISA_MEMORY_LEN 0x8000 /* Usable memory - unused define*/
-//#define SI1_ISA_ADDR_LOW 0x0A0000 /* Lowest address = 640 Kbyte */
-//#define SI1_ISA_ADDR_HIGH 0xFF8000 /* Highest address = 16Mbyte - 32Kbyte */
-//#define SI2_ISA_ADDR_STEP SI2_ISA_WINDOW_LEN/* ISA board address step */
-//#define SI2_ISA_IRQ_MASK 0x9800 /* IRQs 15,12,11 */
-
-/* ISA board, register definitions... */
-//#define SI2_ISA_ID_BASE 0x7FF8 /* READ: Board ID string */
-#define SI1_ISA_RESET 0x8000 /* WRITE: Host Reset */
-#define SI1_ISA_RESET_CLEAR 0xc000 /* WRITE: Host Reset clear*/
-#define SI1_ISA_WAIT 0x9000 /* WRITE: Host wait */
-#define SI1_ISA_WAIT_CLEAR 0xd000 /* WRITE: Host wait clear */
-#define SI1_ISA_INTCL 0xa000 /* WRITE: Host Reset */
-#define SI1_ISA_INTCL_CLEAR 0xe000 /* WRITE: Host Reset */
-
-
-/*****************************************************************************
-****************************** ******************************
-****************************** Phase 2 Z280 ******************************
-****************************** ******************************
-*****************************************************************************/
-
-/* ISA board details... */
-#define SI2_ISA_WINDOW_LEN 0x8000 /* 32 Kbyte shared memory window */
-#define SI2_ISA_MEMORY_LEN 0x7FF8 /* Usable memory */
-#define SI2_ISA_ADDR_LOW 0x0A0000 /* Lowest address = 640 Kbyte */
-#define SI2_ISA_ADDR_HIGH 0xFF8000 /* Highest address = 16Mbyte - 32Kbyte */
-#define SI2_ISA_ADDR_STEP SI2_ISA_WINDOW_LEN/* ISA board address step */
-#define SI2_ISA_IRQ_MASK 0x9800 /* IRQs 15,12,11 */
-
-/* ISA board, register definitions... */
-#define SI2_ISA_ID_BASE 0x7FF8 /* READ: Board ID string */
-#define SI2_ISA_RESET SI2_ISA_ID_BASE /* WRITE: Host Reset */
-#define SI2_ISA_IRQ11 (SI2_ISA_ID_BASE+1) /* WRITE: Set IRQ11 */
-#define SI2_ISA_IRQ12 (SI2_ISA_ID_BASE+2) /* WRITE: Set IRQ12 */
-#define SI2_ISA_IRQ15 (SI2_ISA_ID_BASE+3) /* WRITE: Set IRQ15 */
-#define SI2_ISA_IRQSET (SI2_ISA_ID_BASE+4) /* WRITE: Set Host Interrupt */
-#define SI2_ISA_INTCLEAR (SI2_ISA_ID_BASE+5) /* WRITE: Enable Host Interrupt */
-
-#define SI2_ISA_IRQ11_SET 0x10
-#define SI2_ISA_IRQ11_CLEAR 0x00
-#define SI2_ISA_IRQ12_SET 0x10
-#define SI2_ISA_IRQ12_CLEAR 0x00
-#define SI2_ISA_IRQ15_SET 0x10
-#define SI2_ISA_IRQ15_CLEAR 0x00
-#define SI2_ISA_INTCLEAR_SET 0x10
-#define SI2_ISA_INTCLEAR_CLEAR 0x00
-#define SI2_ISA_IRQSET_CLEAR 0x10
-#define SI2_ISA_IRQSET_SET 0x00
-#define SI2_ISA_RESET_SET 0x00
-#define SI2_ISA_RESET_CLEAR 0x10
-
-/* PCI board details... */
-#define SI2_PCI_WINDOW_LEN 0x100000 /* 1 Mbyte memory window */
-
-/* PCI board register definitions... */
-#define SI2_PCI_SET_IRQ 0x40001 /* Set Host Interrupt */
-#define SI2_PCI_RESET 0xC0001 /* Host Reset */
-
-/*****************************************************************************
-****************************** ******************************
-****************************** Phase 3 T225 ******************************
-****************************** ******************************
-*****************************************************************************/
-
-/* General board details... */
-#define SX_WINDOW_LEN 64*1024 /* 64 Kbyte memory window */
-
-/* ISA board details... */
-#define SX_ISA_ADDR_LOW 0x0A0000 /* Lowest address = 640 Kbyte */
-#define SX_ISA_ADDR_HIGH 0xFF8000 /* Highest address = 16Mbyte - 32Kbyte */
-#define SX_ISA_ADDR_STEP SX_WINDOW_LEN /* ISA board address step */
-#define SX_ISA_IRQ_MASK 0x9E00 /* IRQs 15,12,11,10,9 */
-
-/* Hardware register definitions... */
-#define SX_EVENT_STATUS 0x7800 /* READ: T225 Event Status */
-#define SX_EVENT_STROBE 0x7800 /* WRITE: T225 Event Strobe */
-#define SX_EVENT_ENABLE 0x7880 /* WRITE: T225 Event Enable */
-#define SX_VPD_ROM 0x7C00 /* READ: Vital Product Data ROM */
-#define SX_CONFIG 0x7C00 /* WRITE: Host Configuration Register */
-#define SX_IRQ_STATUS 0x7C80 /* READ: Host Interrupt Status */
-#define SX_SET_IRQ 0x7C80 /* WRITE: Set Host Interrupt */
-#define SX_RESET_STATUS 0x7D00 /* READ: Host Reset Status */
-#define SX_RESET 0x7D00 /* WRITE: Host Reset */
-#define SX_RESET_IRQ 0x7D80 /* WRITE: Reset Host Interrupt */
-
-/* SX_VPD_ROM definitions... */
-#define SX_VPD_SLX_ID1 0x00
-#define SX_VPD_SLX_ID2 0x01
-#define SX_VPD_HW_REV 0x02
-#define SX_VPD_HW_ASSEM 0x03
-#define SX_VPD_UNIQUEID4 0x04
-#define SX_VPD_UNIQUEID3 0x05
-#define SX_VPD_UNIQUEID2 0x06
-#define SX_VPD_UNIQUEID1 0x07
-#define SX_VPD_MANU_YEAR 0x08
-#define SX_VPD_MANU_WEEK 0x09
-#define SX_VPD_IDENT 0x10
-#define SX_VPD_IDENT_STRING "JET HOST BY KEV#"
-
-/* SX unique identifiers... */
-#define SX_UNIQUEID_MASK 0xF0
-#define SX_ISA_UNIQUEID1 0x20
-#define SX_PCI_UNIQUEID1 0x50
-
-/* SX_CONFIG definitions... */
-#define SX_CONF_BUSEN 0x02 /* Enable T225 memory and I/O */
-#define SX_CONF_HOSTIRQ 0x04 /* Enable board to host interrupt */
-
-/* SX bootstrap... */
-#define SX_BOOTSTRAP "\x28\x20\x21\x02\x60\x0a"
-#define SX_BOOTSTRAP_SIZE 6
-#define SX_BOOTSTRAP_ADDR (0x8000-SX_BOOTSTRAP_SIZE)
-
-/*****************************************************************************
-********************************** **********************************
-********************************** EISA **********************************
-********************************** **********************************
-*****************************************************************************/
-
-#define SI2_EISA_OFF 0x42
-#define SI2_EISA_VAL 0x01
-#define SI2_EISA_WINDOW_LEN 0x10000
-
-/*****************************************************************************
-*********************************** **********************************
-*********************************** PCI **********************************
-*********************************** **********************************
-*****************************************************************************/
-
-/* General definitions... */
-
-#define SPX_VENDOR_ID 0x11CB /* Assigned by the PCI SIG */
-#define SPX_DEVICE_ID 0x4000 /* SI/XIO boards */
-#define SPX_PLXDEVICE_ID 0x2000 /* SX boards */
-
-#define SPX_SUB_VENDOR_ID SPX_VENDOR_ID /* Same as vendor id */
-#define SI2_SUB_SYS_ID 0x400 /* Phase 2 (Z280) board */
-#define SX_SUB_SYS_ID 0x200 /* Phase 3 (t225) board */
-
-#endif /*_sxboards_h */
-
-/* End of SXBOARDS.H */
diff --git a/drivers/staging/generic_serial/sxwindow.h b/drivers/staging/generic_serial/sxwindow.h
deleted file mode 100644
index cf01b662aef..00000000000
--- a/drivers/staging/generic_serial/sxwindow.h
+++ /dev/null
@@ -1,393 +0,0 @@
-/************************************************************************/
-/* */
-/* Title : SX Shared Memory Window Structure */
-/* */
-/* Author : N.P.Vassallo */
-/* */
-/* Creation : 16th March 1998 */
-/* */
-/* Version : 3.0.0 */
-/* */
-/* Copyright : (c) Specialix International Ltd. 1998 */
-/* */
-/* Description : Prototypes, structures and definitions */
-/* describing the SX/SI/XIO cards shared */
-/* memory window structure: */
-/* SXCARD */
-/* SXMODULE */
-/* SXCHANNEL */
-/* */
-/************************************************************************/
-
-/* History...
-
-3.0.0 16/03/98 NPV Creation. (based on STRUCT.H)
-
-*/
-
-#ifndef _sxwindow_h /* If SXWINDOW.H not already defined */
-#define _sxwindow_h 1
-
-/*****************************************************************************
-*************************** ***************************
-*************************** Common Definitions ***************************
-*************************** ***************************
-*****************************************************************************/
-
-typedef struct _SXCARD *PSXCARD; /* SXCARD structure pointer */
-typedef struct _SXMODULE *PMOD; /* SXMODULE structure pointer */
-typedef struct _SXCHANNEL *PCHAN; /* SXCHANNEL structure pointer */
-
-/*****************************************************************************
-********************************* *********************************
-********************************* SXCARD *********************************
-********************************* *********************************
-*****************************************************************************/
-
-typedef struct _SXCARD
-{
- BYTE cc_init_status; /* 0x00 Initialisation status */
- BYTE cc_mem_size; /* 0x01 Size of memory on card */
- WORD cc_int_count; /* 0x02 Interrupt count */
- WORD cc_revision; /* 0x04 Download code revision */
- BYTE cc_isr_count; /* 0x06 Count when ISR is run */
- BYTE cc_main_count; /* 0x07 Count when main loop is run */
- WORD cc_int_pending; /* 0x08 Interrupt pending */
- WORD cc_poll_count; /* 0x0A Count when poll is run */
- BYTE cc_int_set_count; /* 0x0C Count when host interrupt is set */
- BYTE cc_rfu[0x80 - 0x0D]; /* 0x0D Pad structure to 128 bytes (0x80) */
-
-} SXCARD;
-
-/* SXCARD.cc_init_status definitions... */
-#define ADAPTERS_FOUND (BYTE)0x01
-#define NO_ADAPTERS_FOUND (BYTE)0xFF
-
-/* SXCARD.cc_mem_size definitions... */
-#define SX_MEMORY_SIZE (BYTE)0x40
-
-/* SXCARD.cc_int_count definitions... */
-#define INT_COUNT_DEFAULT 100 /* Hz */
-
-/*****************************************************************************
-******************************** ********************************
-******************************** SXMODULE ********************************
-******************************** ********************************
-*****************************************************************************/
-
-#define TOP_POINTER(a) ((a)|0x8000) /* Sets top bit of word */
-#define UNTOP_POINTER(a) ((a)&~0x8000) /* Clears top bit of word */
-
-typedef struct _SXMODULE
-{
- WORD mc_next; /* 0x00 Next module "pointer" (ORed with 0x8000) */
- BYTE mc_type; /* 0x02 Type of TA in terms of number of channels */
- BYTE mc_mod_no; /* 0x03 Module number on SI bus cable (0 closest to card) */
- BYTE mc_dtr; /* 0x04 Private DTR copy (TA only) */
- BYTE mc_rfu1; /* 0x05 Reserved */
- WORD mc_uart; /* 0x06 UART base address for this module */
- BYTE mc_chip; /* 0x08 Chip type / number of ports */
- BYTE mc_current_uart; /* 0x09 Current uart selected for this module */
-#ifdef DOWNLOAD
- PCHAN mc_chan_pointer[8]; /* 0x0A Pointer to each channel structure */
-#else
- WORD mc_chan_pointer[8]; /* 0x0A Define as WORD if not compiling into download */
-#endif
- WORD mc_rfu2; /* 0x1A Reserved */
- BYTE mc_opens1; /* 0x1C Number of open ports on first four ports on MTA/SXDC */
- BYTE mc_opens2; /* 0x1D Number of open ports on second four ports on MTA/SXDC */
- BYTE mc_mods; /* 0x1E Types of connector module attached to MTA/SXDC */
- BYTE mc_rev1; /* 0x1F Revision of first CD1400 on MTA/SXDC */
- BYTE mc_rev2; /* 0x20 Revision of second CD1400 on MTA/SXDC */
- BYTE mc_mtaasic_rev; /* 0x21 Revision of MTA ASIC 1..4 -> A, B, C, D */
- BYTE mc_rfu3[0x100 - 0x22]; /* 0x22 Pad structure to 256 bytes (0x100) */
-
-} SXMODULE;
-
-/* SXMODULE.mc_type definitions... */
-#define FOUR_PORTS (BYTE)4
-#define EIGHT_PORTS (BYTE)8
-
-/* SXMODULE.mc_chip definitions... */
-#define CHIP_MASK 0xF0
-#define TA (BYTE)0
-#define TA4 (TA | FOUR_PORTS)
-#define TA8 (TA | EIGHT_PORTS)
-#define TA4_ASIC (BYTE)0x0A
-#define TA8_ASIC (BYTE)0x0B
-#define MTA_CD1400 (BYTE)0x28
-#define SXDC (BYTE)0x48
-
-/* SXMODULE.mc_mods definitions... */
-#define MOD_RS232DB25 0x00 /* RS232 DB25 (socket/plug) */
-#define MOD_RS232RJ45 0x01 /* RS232 RJ45 (shielded/opto-isolated) */
-#define MOD_RESERVED_2 0x02 /* Reserved (RS485) */
-#define MOD_RS422DB25 0x03 /* RS422 DB25 Socket */
-#define MOD_RESERVED_4 0x04 /* Reserved */
-#define MOD_PARALLEL 0x05 /* Parallel */
-#define MOD_RESERVED_6 0x06 /* Reserved (RS423) */
-#define MOD_RESERVED_7 0x07 /* Reserved */
-#define MOD_2_RS232DB25 0x08 /* Rev 2.0 RS232 DB25 (socket/plug) */
-#define MOD_2_RS232RJ45 0x09 /* Rev 2.0 RS232 RJ45 */
-#define MOD_RESERVED_A 0x0A /* Rev 2.0 Reserved */
-#define MOD_2_RS422DB25 0x0B /* Rev 2.0 RS422 DB25 */
-#define MOD_RESERVED_C 0x0C /* Rev 2.0 Reserved */
-#define MOD_2_PARALLEL 0x0D /* Rev 2.0 Parallel */
-#define MOD_RESERVED_E 0x0E /* Rev 2.0 Reserved */
-#define MOD_BLANK 0x0F /* Blank Panel */
-
-/*****************************************************************************
-******************************** *******************************
-******************************** SXCHANNEL *******************************
-******************************** *******************************
-*****************************************************************************/
-
-#define TX_BUFF_OFFSET 0x60 /* Transmit buffer offset in channel structure */
-#define BUFF_POINTER(a) (((a)+TX_BUFF_OFFSET)|0x8000)
-#define UNBUFF_POINTER(a) (jet_channel*)(((a)&~0x8000)-TX_BUFF_OFFSET)
-#define BUFFER_SIZE 256
-#define HIGH_WATER ((BUFFER_SIZE / 4) * 3)
-#define LOW_WATER (BUFFER_SIZE / 4)
-
-typedef struct _SXCHANNEL
-{
- WORD next_item; /* 0x00 Offset from window base of next channels hi_txbuf (ORred with 0x8000) */
- WORD addr_uart; /* 0x02 INTERNAL pointer to uart address. Includes FASTPATH bit */
- WORD module; /* 0x04 Offset from window base of parent SXMODULE structure */
- BYTE type; /* 0x06 Chip type / number of ports (copy of mc_chip) */
- BYTE chan_number; /* 0x07 Channel number on the TA/MTA/SXDC */
- WORD xc_status; /* 0x08 Flow control and I/O status */
- BYTE hi_rxipos; /* 0x0A Receive buffer input index */
- BYTE hi_rxopos; /* 0x0B Receive buffer output index */
- BYTE hi_txopos; /* 0x0C Transmit buffer output index */
- BYTE hi_txipos; /* 0x0D Transmit buffer input index */
- BYTE hi_hstat; /* 0x0E Command register */
- BYTE dtr_bit; /* 0x0F INTERNAL DTR control byte (TA only) */
- BYTE txon; /* 0x10 INTERNAL copy of hi_txon */
- BYTE txoff; /* 0x11 INTERNAL copy of hi_txoff */
- BYTE rxon; /* 0x12 INTERNAL copy of hi_rxon */
- BYTE rxoff; /* 0x13 INTERNAL copy of hi_rxoff */
- BYTE hi_mr1; /* 0x14 Mode Register 1 (databits,parity,RTS rx flow)*/
- BYTE hi_mr2; /* 0x15 Mode Register 2 (stopbits,local,CTS tx flow)*/
- BYTE hi_csr; /* 0x16 Clock Select Register (baud rate) */
- BYTE hi_op; /* 0x17 Modem Output Signal */
- BYTE hi_ip; /* 0x18 Modem Input Signal */
- BYTE hi_state; /* 0x19 Channel status */
- BYTE hi_prtcl; /* 0x1A Channel protocol (flow control) */
- BYTE hi_txon; /* 0x1B Transmit XON character */
- BYTE hi_txoff; /* 0x1C Transmit XOFF character */
- BYTE hi_rxon; /* 0x1D Receive XON character */
- BYTE hi_rxoff; /* 0x1E Receive XOFF character */
- BYTE close_prev; /* 0x1F INTERNAL channel previously closed flag */
- BYTE hi_break; /* 0x20 Break and error control */
- BYTE break_state; /* 0x21 INTERNAL copy of hi_break */
- BYTE hi_mask; /* 0x22 Mask for received data */
- BYTE mask; /* 0x23 INTERNAL copy of hi_mask */
- BYTE mod_type; /* 0x24 MTA/SXDC hardware module type */
- BYTE ccr_state; /* 0x25 INTERNAL MTA/SXDC state of CCR register */
- BYTE ip_mask; /* 0x26 Input handshake mask */
- BYTE hi_parallel; /* 0x27 Parallel port flag */
- BYTE par_error; /* 0x28 Error code for parallel loopback test */
- BYTE any_sent; /* 0x29 INTERNAL data sent flag */
- BYTE asic_txfifo_size; /* 0x2A INTERNAL SXDC transmit FIFO size */
- BYTE rfu1[2]; /* 0x2B Reserved */
- BYTE csr; /* 0x2D INTERNAL copy of hi_csr */
-#ifdef DOWNLOAD
- PCHAN nextp; /* 0x2E Offset from window base of next channel structure */
-#else
- WORD nextp; /* 0x2E Define as WORD if not compiling into download */
-#endif
- BYTE prtcl; /* 0x30 INTERNAL copy of hi_prtcl */
- BYTE mr1; /* 0x31 INTERNAL copy of hi_mr1 */
- BYTE mr2; /* 0x32 INTERNAL copy of hi_mr2 */
- BYTE hi_txbaud; /* 0x33 Extended transmit baud rate (SXDC only if((hi_csr&0x0F)==0x0F) */
- BYTE hi_rxbaud; /* 0x34 Extended receive baud rate (SXDC only if((hi_csr&0xF0)==0xF0) */
- BYTE txbreak_state; /* 0x35 INTERNAL MTA/SXDC transmit break state */
- BYTE txbaud; /* 0x36 INTERNAL copy of hi_txbaud */
- BYTE rxbaud; /* 0x37 INTERNAL copy of hi_rxbaud */
- WORD err_framing; /* 0x38 Count of receive framing errors */
- WORD err_parity; /* 0x3A Count of receive parity errors */
- WORD err_overrun; /* 0x3C Count of receive overrun errors */
- WORD err_overflow; /* 0x3E Count of receive buffer overflow errors */
- BYTE rfu2[TX_BUFF_OFFSET - 0x40]; /* 0x40 Reserved until hi_txbuf */
- BYTE hi_txbuf[BUFFER_SIZE]; /* 0x060 Transmit buffer */
- BYTE hi_rxbuf[BUFFER_SIZE]; /* 0x160 Receive buffer */
- BYTE rfu3[0x300 - 0x260]; /* 0x260 Reserved until 768 bytes (0x300) */
-
-} SXCHANNEL;
-
-/* SXCHANNEL.addr_uart definitions... */
-#define FASTPATH 0x1000 /* Set to indicate fast rx/tx processing (TA only) */
-
-/* SXCHANNEL.xc_status definitions... */
-#define X_TANY 0x0001 /* XON is any character (TA only) */
-#define X_TION 0x0001 /* Tx interrupts on (MTA only) */
-#define X_TXEN 0x0002 /* Tx XON/XOFF enabled (TA only) */
-#define X_RTSEN 0x0002 /* RTS FLOW enabled (MTA only) */
-#define X_TXRC 0x0004 /* XOFF received (TA only) */
-#define X_RTSLOW 0x0004 /* RTS dropped (MTA only) */
-#define X_RXEN 0x0008 /* Rx XON/XOFF enabled */
-#define X_ANYXO 0x0010 /* XOFF pending/sent or RTS dropped */
-#define X_RXSE 0x0020 /* Rx XOFF sent */
-#define X_NPEND 0x0040 /* Rx XON pending or XOFF pending */
-#define X_FPEND 0x0080 /* Rx XOFF pending */
-#define C_CRSE 0x0100 /* Carriage return sent (TA only) */
-#define C_TEMR 0x0100 /* Tx empty requested (MTA only) */
-#define C_TEMA 0x0200 /* Tx empty acked (MTA only) */
-#define C_ANYP 0x0200 /* Any protocol bar tx XON/XOFF (TA only) */
-#define C_EN 0x0400 /* Cooking enabled (on MTA means port is also || */
-#define C_HIGH 0x0800 /* Buffer previously hit high water */
-#define C_CTSEN 0x1000 /* CTS automatic flow-control enabled */
-#define C_DCDEN 0x2000 /* DCD/DTR checking enabled */
-#define C_BREAK 0x4000 /* Break detected */
-#define C_RTSEN 0x8000 /* RTS automatic flow control enabled (MTA only) */
-#define C_PARITY 0x8000 /* Parity checking enabled (TA only) */
-
-/* SXCHANNEL.hi_hstat definitions... */
-#define HS_IDLE_OPEN 0x00 /* Channel open state */
-#define HS_LOPEN 0x02 /* Local open command (no modem monitoring) */
-#define HS_MOPEN 0x04 /* Modem open command (wait for DCD signal) */
-#define HS_IDLE_MPEND 0x06 /* Waiting for DCD signal state */
-#define HS_CONFIG 0x08 /* Configuration command */
-#define HS_CLOSE 0x0A /* Close command */
-#define HS_START 0x0C /* Start transmit break command */
-#define HS_STOP 0x0E /* Stop transmit break command */
-#define HS_IDLE_CLOSED 0x10 /* Closed channel state */
-#define HS_IDLE_BREAK 0x12 /* Transmit break state */
-#define HS_FORCE_CLOSED 0x14 /* Force close command */
-#define HS_RESUME 0x16 /* Clear pending XOFF command */
-#define HS_WFLUSH 0x18 /* Flush transmit buffer command */
-#define HS_RFLUSH 0x1A /* Flush receive buffer command */
-#define HS_SUSPEND 0x1C /* Suspend output command (like XOFF received) */
-#define PARALLEL 0x1E /* Parallel port loopback test command (Diagnostics Only) */
-#define ENABLE_RX_INTS 0x20 /* Enable receive interrupts command (Diagnostics Only) */
-#define ENABLE_TX_INTS 0x22 /* Enable transmit interrupts command (Diagnostics Only) */
-#define ENABLE_MDM_INTS 0x24 /* Enable modem interrupts command (Diagnostics Only) */
-#define DISABLE_INTS 0x26 /* Disable interrupts command (Diagnostics Only) */
-
-/* SXCHANNEL.hi_mr1 definitions... */
-#define MR1_BITS 0x03 /* Data bits mask */
-#define MR1_5_BITS 0x00 /* 5 data bits */
-#define MR1_6_BITS 0x01 /* 6 data bits */
-#define MR1_7_BITS 0x02 /* 7 data bits */
-#define MR1_8_BITS 0x03 /* 8 data bits */
-#define MR1_PARITY 0x1C /* Parity mask */
-#define MR1_ODD 0x04 /* Odd parity */
-#define MR1_EVEN 0x00 /* Even parity */
-#define MR1_WITH 0x00 /* Parity enabled */
-#define MR1_FORCE 0x08 /* Force parity */
-#define MR1_NONE 0x10 /* No parity */
-#define MR1_NOPARITY MR1_NONE /* No parity */
-#define MR1_ODDPARITY (MR1_WITH|MR1_ODD) /* Odd parity */
-#define MR1_EVENPARITY (MR1_WITH|MR1_EVEN) /* Even parity */
-#define MR1_MARKPARITY (MR1_FORCE|MR1_ODD) /* Mark parity */
-#define MR1_SPACEPARITY (MR1_FORCE|MR1_EVEN) /* Space parity */
-#define MR1_RTS_RXFLOW 0x80 /* RTS receive flow control */
-
-/* SXCHANNEL.hi_mr2 definitions... */
-#define MR2_STOP 0x0F /* Stop bits mask */
-#define MR2_1_STOP 0x07 /* 1 stop bit */
-#define MR2_2_STOP 0x0F /* 2 stop bits */
-#define MR2_CTS_TXFLOW 0x10 /* CTS transmit flow control */
-#define MR2_RTS_TOGGLE 0x20 /* RTS toggle on transmit */
-#define MR2_NORMAL 0x00 /* Normal mode */
-#define MR2_AUTO 0x40 /* Auto-echo mode (TA only) */
-#define MR2_LOCAL 0x80 /* Local echo mode */
-#define MR2_REMOTE 0xC0 /* Remote echo mode (TA only) */
-
-/* SXCHANNEL.hi_csr definitions... */
-#define CSR_75 0x0 /* 75 baud */
-#define CSR_110 0x1 /* 110 baud (TA), 115200 (MTA/SXDC) */
-#define CSR_38400 0x2 /* 38400 baud */
-#define CSR_150 0x3 /* 150 baud */
-#define CSR_300 0x4 /* 300 baud */
-#define CSR_600 0x5 /* 600 baud */
-#define CSR_1200 0x6 /* 1200 baud */
-#define CSR_2000 0x7 /* 2000 baud */
-#define CSR_2400 0x8 /* 2400 baud */
-#define CSR_4800 0x9 /* 4800 baud */
-#define CSR_1800 0xA /* 1800 baud */
-#define CSR_9600 0xB /* 9600 baud */
-#define CSR_19200 0xC /* 19200 baud */
-#define CSR_57600 0xD /* 57600 baud */
-#define CSR_EXTBAUD 0xF /* Extended baud rate (hi_txbaud/hi_rxbaud) */
-
-/* SXCHANNEL.hi_op definitions... */
-#define OP_RTS 0x01 /* RTS modem output signal */
-#define OP_DTR 0x02 /* DTR modem output signal */
-
-/* SXCHANNEL.hi_ip definitions... */
-#define IP_CTS 0x02 /* CTS modem input signal */
-#define IP_DCD 0x04 /* DCD modem input signal */
-#define IP_DSR 0x20 /* DTR modem input signal */
-#define IP_RI 0x40 /* RI modem input signal */
-
-/* SXCHANNEL.hi_state definitions... */
-#define ST_BREAK 0x01 /* Break received (clear with config) */
-#define ST_DCD 0x02 /* DCD signal changed state */
-
-/* SXCHANNEL.hi_prtcl definitions... */
-#define SP_TANY 0x01 /* Transmit XON/XANY (if SP_TXEN enabled) */
-#define SP_TXEN 0x02 /* Transmit XON/XOFF flow control */
-#define SP_CEN 0x04 /* Cooking enabled */
-#define SP_RXEN 0x08 /* Rx XON/XOFF enabled */
-#define SP_DCEN 0x20 /* DCD / DTR check */
-#define SP_DTR_RXFLOW 0x40 /* DTR receive flow control */
-#define SP_PAEN 0x80 /* Parity checking enabled */
-
-/* SXCHANNEL.hi_break definitions... */
-#define BR_IGN 0x01 /* Ignore any received breaks */
-#define BR_INT 0x02 /* Interrupt on received break */
-#define BR_PARMRK 0x04 /* Enable parmrk parity error processing */
-#define BR_PARIGN 0x08 /* Ignore chars with parity errors */
-#define BR_ERRINT 0x80 /* Treat parity/framing/overrun errors as exceptions */
-
-/* SXCHANNEL.par_error definitions.. */
-#define DIAG_IRQ_RX 0x01 /* Indicate serial receive interrupt (diags only) */
-#define DIAG_IRQ_TX 0x02 /* Indicate serial transmit interrupt (diags only) */
-#define DIAG_IRQ_MD 0x04 /* Indicate serial modem interrupt (diags only) */
-
-/* SXCHANNEL.hi_txbaud/hi_rxbaud definitions... (SXDC only) */
-#define BAUD_75 0x00 /* 75 baud */
-#define BAUD_115200 0x01 /* 115200 baud */
-#define BAUD_38400 0x02 /* 38400 baud */
-#define BAUD_150 0x03 /* 150 baud */
-#define BAUD_300 0x04 /* 300 baud */
-#define BAUD_600 0x05 /* 600 baud */
-#define BAUD_1200 0x06 /* 1200 baud */
-#define BAUD_2000 0x07 /* 2000 baud */
-#define BAUD_2400 0x08 /* 2400 baud */
-#define BAUD_4800 0x09 /* 4800 baud */
-#define BAUD_1800 0x0A /* 1800 baud */
-#define BAUD_9600 0x0B /* 9600 baud */
-#define BAUD_19200 0x0C /* 19200 baud */
-#define BAUD_57600 0x0D /* 57600 baud */
-#define BAUD_230400 0x0E /* 230400 baud */
-#define BAUD_460800 0x0F /* 460800 baud */
-#define BAUD_921600 0x10 /* 921600 baud */
-#define BAUD_50 0x11 /* 50 baud */
-#define BAUD_110 0x12 /* 110 baud */
-#define BAUD_134_5 0x13 /* 134.5 baud */
-#define BAUD_200 0x14 /* 200 baud */
-#define BAUD_7200 0x15 /* 7200 baud */
-#define BAUD_56000 0x16 /* 56000 baud */
-#define BAUD_64000 0x17 /* 64000 baud */
-#define BAUD_76800 0x18 /* 76800 baud */
-#define BAUD_128000 0x19 /* 128000 baud */
-#define BAUD_150000 0x1A /* 150000 baud */
-#define BAUD_14400 0x1B /* 14400 baud */
-#define BAUD_256000 0x1C /* 256000 baud */
-#define BAUD_28800 0x1D /* 28800 baud */
-
-/* SXCHANNEL.txbreak_state definiions... */
-#define TXBREAK_OFF 0 /* Not sending break */
-#define TXBREAK_START 1 /* Begin sending break */
-#define TXBREAK_START1 2 /* Begin sending break, part 1 */
-#define TXBREAK_ON 3 /* Sending break */
-#define TXBREAK_STOP 4 /* Stop sending break */
-#define TXBREAK_STOP1 5 /* Stop sending break, part 1 */
-
-#endif /* _sxwindow_h */
-
-/* End of SXWINDOW.H */
-
diff --git a/drivers/staging/generic_serial/vme_scc.c b/drivers/staging/generic_serial/vme_scc.c
deleted file mode 100644
index 96838640f57..00000000000
--- a/drivers/staging/generic_serial/vme_scc.c
+++ /dev/null
@@ -1,1145 +0,0 @@
-/*
- * drivers/char/vme_scc.c: MVME147, MVME162, BVME6000 SCC serial ports
- * implementation.
- * Copyright 1999 Richard Hirst <richard@sleepie.demon.co.uk>
- *
- * Based on atari_SCC.c which was
- * Copyright 1994-95 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
- * Partially based on PC-Linux serial.c by Linus Torvalds and Theodore Ts'o
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kdev_t.h>
-#include <asm/io.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/mm.h>
-#include <linux/serial.h>
-#include <linux/fcntl.h>
-#include <linux/major.h>
-#include <linux/delay.h>
-#include <linux/miscdevice.h>
-#include <linux/console.h>
-#include <linux/init.h>
-#include <asm/setup.h>
-#include <asm/bootinfo.h>
-
-#ifdef CONFIG_MVME147_SCC
-#include <asm/mvme147hw.h>
-#endif
-#ifdef CONFIG_MVME162_SCC
-#include <asm/mvme16xhw.h>
-#endif
-#ifdef CONFIG_BVME6000_SCC
-#include <asm/bvme6000hw.h>
-#endif
-
-#include <linux/generic_serial.h>
-#include "scc.h"
-
-
-#define CHANNEL_A 0
-#define CHANNEL_B 1
-
-#define SCC_MINOR_BASE 64
-
-/* Shadows for all SCC write registers */
-static unsigned char scc_shadow[2][16];
-
-/* Location to access for SCC register access delay */
-static volatile unsigned char *scc_del = NULL;
-
-/* To keep track of STATUS_REG state for detection of Ext/Status int source */
-static unsigned char scc_last_status_reg[2];
-
-/***************************** Prototypes *****************************/
-
-/* Function prototypes */
-static void scc_disable_tx_interrupts(void * ptr);
-static void scc_enable_tx_interrupts(void * ptr);
-static void scc_disable_rx_interrupts(void * ptr);
-static void scc_enable_rx_interrupts(void * ptr);
-static int scc_carrier_raised(struct tty_port *port);
-static void scc_shutdown_port(void * ptr);
-static int scc_set_real_termios(void *ptr);
-static void scc_hungup(void *ptr);
-static void scc_close(void *ptr);
-static int scc_chars_in_buffer(void * ptr);
-static int scc_open(struct tty_struct * tty, struct file * filp);
-static int scc_ioctl(struct tty_struct * tty,
- unsigned int cmd, unsigned long arg);
-static void scc_throttle(struct tty_struct *tty);
-static void scc_unthrottle(struct tty_struct *tty);
-static irqreturn_t scc_tx_int(int irq, void *data);
-static irqreturn_t scc_rx_int(int irq, void *data);
-static irqreturn_t scc_stat_int(int irq, void *data);
-static irqreturn_t scc_spcond_int(int irq, void *data);
-static void scc_setsignals(struct scc_port *port, int dtr, int rts);
-static int scc_break_ctl(struct tty_struct *tty, int break_state);
-
-static struct tty_driver *scc_driver;
-
-static struct scc_port scc_ports[2];
-
-/*---------------------------------------------------------------------------
- * Interface from generic_serial.c back here
- *--------------------------------------------------------------------------*/
-
-static struct real_driver scc_real_driver = {
- scc_disable_tx_interrupts,
- scc_enable_tx_interrupts,
- scc_disable_rx_interrupts,
- scc_enable_rx_interrupts,
- scc_shutdown_port,
- scc_set_real_termios,
- scc_chars_in_buffer,
- scc_close,
- scc_hungup,
- NULL
-};
-
-
-static const struct tty_operations scc_ops = {
- .open = scc_open,
- .close = gs_close,
- .write = gs_write,
- .put_char = gs_put_char,
- .flush_chars = gs_flush_chars,
- .write_room = gs_write_room,
- .chars_in_buffer = gs_chars_in_buffer,
- .flush_buffer = gs_flush_buffer,
- .ioctl = scc_ioctl,
- .throttle = scc_throttle,
- .unthrottle = scc_unthrottle,
- .set_termios = gs_set_termios,
- .stop = gs_stop,
- .start = gs_start,
- .hangup = gs_hangup,
- .break_ctl = scc_break_ctl,
-};
-
-static const struct tty_port_operations scc_port_ops = {
- .carrier_raised = scc_carrier_raised,
-};
-
-/*----------------------------------------------------------------------------
- * vme_scc_init() and support functions
- *---------------------------------------------------------------------------*/
-
-static int __init scc_init_drivers(void)
-{
- int error;
-
- scc_driver = alloc_tty_driver(2);
- if (!scc_driver)
- return -ENOMEM;
- scc_driver->owner = THIS_MODULE;
- scc_driver->driver_name = "scc";
- scc_driver->name = "ttyS";
- scc_driver->major = TTY_MAJOR;
- scc_driver->minor_start = SCC_MINOR_BASE;
- scc_driver->type = TTY_DRIVER_TYPE_SERIAL;
- scc_driver->subtype = SERIAL_TYPE_NORMAL;
- scc_driver->init_termios = tty_std_termios;
- scc_driver->init_termios.c_cflag =
- B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- scc_driver->init_termios.c_ispeed = 9600;
- scc_driver->init_termios.c_ospeed = 9600;
- scc_driver->flags = TTY_DRIVER_REAL_RAW;
- tty_set_operations(scc_driver, &scc_ops);
-
- if ((error = tty_register_driver(scc_driver))) {
- printk(KERN_ERR "scc: Couldn't register scc driver, error = %d\n",
- error);
- put_tty_driver(scc_driver);
- return 1;
- }
-
- return 0;
-}
-
-
-/* ports[] array is indexed by line no (i.e. [0] for ttyS0, [1] for ttyS1).
- */
-
-static void __init scc_init_portstructs(void)
-{
- struct scc_port *port;
- int i;
-
- for (i = 0; i < 2; i++) {
- port = scc_ports + i;
- tty_port_init(&port->gs.port);
- port->gs.port.ops = &scc_port_ops;
- port->gs.magic = SCC_MAGIC;
- port->gs.close_delay = HZ/2;
- port->gs.closing_wait = 30 * HZ;
- port->gs.rd = &scc_real_driver;
-#ifdef NEW_WRITE_LOCKING
- port->gs.port_write_mutex = MUTEX;
-#endif
- init_waitqueue_head(&port->gs.port.open_wait);
- init_waitqueue_head(&port->gs.port.close_wait);
- }
-}
-
-
-#ifdef CONFIG_MVME147_SCC
-static int __init mvme147_scc_init(void)
-{
- struct scc_port *port;
- int error;
-
- printk(KERN_INFO "SCC: MVME147 Serial Driver\n");
- /* Init channel A */
- port = &scc_ports[0];
- port->channel = CHANNEL_A;
- port->ctrlp = (volatile unsigned char *)M147_SCC_A_ADDR;
- port->datap = port->ctrlp + 1;
- port->port_a = &scc_ports[0];
- port->port_b = &scc_ports[1];
- error = request_irq(MVME147_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
- "SCC-A TX", port);
- if (error)
- goto fail;
- error = request_irq(MVME147_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
- "SCC-A status", port);
- if (error)
- goto fail_free_a_tx;
- error = request_irq(MVME147_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
- "SCC-A RX", port);
- if (error)
- goto fail_free_a_stat;
- error = request_irq(MVME147_IRQ_SCCA_SPCOND, scc_spcond_int,
- IRQF_DISABLED, "SCC-A special cond", port);
- if (error)
- goto fail_free_a_rx;
-
- {
- SCC_ACCESS_INIT(port);
-
- /* disable interrupts for this channel */
- SCCwrite(INT_AND_DMA_REG, 0);
- /* Set the interrupt vector */
- SCCwrite(INT_VECTOR_REG, MVME147_IRQ_SCC_BASE);
- /* Interrupt parameters: vector includes status, status low */
- SCCwrite(MASTER_INT_CTRL, MIC_VEC_INCL_STAT);
- SCCmod(MASTER_INT_CTRL, 0xff, MIC_MASTER_INT_ENAB);
- }
-
- /* Init channel B */
- port = &scc_ports[1];
- port->channel = CHANNEL_B;
- port->ctrlp = (volatile unsigned char *)M147_SCC_B_ADDR;
- port->datap = port->ctrlp + 1;
- port->port_a = &scc_ports[0];
- port->port_b = &scc_ports[1];
- error = request_irq(MVME147_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
- "SCC-B TX", port);
- if (error)
- goto fail_free_a_spcond;
- error = request_irq(MVME147_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
- "SCC-B status", port);
- if (error)
- goto fail_free_b_tx;
- error = request_irq(MVME147_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
- "SCC-B RX", port);
- if (error)
- goto fail_free_b_stat;
- error = request_irq(MVME147_IRQ_SCCB_SPCOND, scc_spcond_int,
- IRQF_DISABLED, "SCC-B special cond", port);
- if (error)
- goto fail_free_b_rx;
-
- {
- SCC_ACCESS_INIT(port);
-
- /* disable interrupts for this channel */
- SCCwrite(INT_AND_DMA_REG, 0);
- }
-
- /* Ensure interrupts are enabled in the PCC chip */
- m147_pcc->serial_cntrl=PCC_LEVEL_SERIAL|PCC_INT_ENAB;
-
- /* Initialise the tty driver structures and register */
- scc_init_portstructs();
- scc_init_drivers();
-
- return 0;
-
-fail_free_b_rx:
- free_irq(MVME147_IRQ_SCCB_RX, port);
-fail_free_b_stat:
- free_irq(MVME147_IRQ_SCCB_STAT, port);
-fail_free_b_tx:
- free_irq(MVME147_IRQ_SCCB_TX, port);
-fail_free_a_spcond:
- free_irq(MVME147_IRQ_SCCA_SPCOND, port);
-fail_free_a_rx:
- free_irq(MVME147_IRQ_SCCA_RX, port);
-fail_free_a_stat:
- free_irq(MVME147_IRQ_SCCA_STAT, port);
-fail_free_a_tx:
- free_irq(MVME147_IRQ_SCCA_TX, port);
-fail:
- return error;
-}
-#endif
-
-
-#ifdef CONFIG_MVME162_SCC
-static int __init mvme162_scc_init(void)
-{
- struct scc_port *port;
- int error;
-
- if (!(mvme16x_config & MVME16x_CONFIG_GOT_SCCA))
- return (-ENODEV);
-
- printk(KERN_INFO "SCC: MVME162 Serial Driver\n");
- /* Init channel A */
- port = &scc_ports[0];
- port->channel = CHANNEL_A;
- port->ctrlp = (volatile unsigned char *)MVME_SCC_A_ADDR;
- port->datap = port->ctrlp + 2;
- port->port_a = &scc_ports[0];
- port->port_b = &scc_ports[1];
- error = request_irq(MVME162_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
- "SCC-A TX", port);
- if (error)
- goto fail;
- error = request_irq(MVME162_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
- "SCC-A status", port);
- if (error)
- goto fail_free_a_tx;
- error = request_irq(MVME162_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
- "SCC-A RX", port);
- if (error)
- goto fail_free_a_stat;
- error = request_irq(MVME162_IRQ_SCCA_SPCOND, scc_spcond_int,
- IRQF_DISABLED, "SCC-A special cond", port);
- if (error)
- goto fail_free_a_rx;
-
- {
- SCC_ACCESS_INIT(port);
-
- /* disable interrupts for this channel */
- SCCwrite(INT_AND_DMA_REG, 0);
- /* Set the interrupt vector */
- SCCwrite(INT_VECTOR_REG, MVME162_IRQ_SCC_BASE);
- /* Interrupt parameters: vector includes status, status low */
- SCCwrite(MASTER_INT_CTRL, MIC_VEC_INCL_STAT);
- SCCmod(MASTER_INT_CTRL, 0xff, MIC_MASTER_INT_ENAB);
- }
-
- /* Init channel B */
- port = &scc_ports[1];
- port->channel = CHANNEL_B;
- port->ctrlp = (volatile unsigned char *)MVME_SCC_B_ADDR;
- port->datap = port->ctrlp + 2;
- port->port_a = &scc_ports[0];
- port->port_b = &scc_ports[1];
- error = request_irq(MVME162_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
- "SCC-B TX", port);
- if (error)
- goto fail_free_a_spcond;
- error = request_irq(MVME162_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
- "SCC-B status", port);
- if (error)
- goto fail_free_b_tx;
- error = request_irq(MVME162_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
- "SCC-B RX", port);
- if (error)
- goto fail_free_b_stat;
- error = request_irq(MVME162_IRQ_SCCB_SPCOND, scc_spcond_int,
- IRQF_DISABLED, "SCC-B special cond", port);
- if (error)
- goto fail_free_b_rx;
-
- {
- SCC_ACCESS_INIT(port); /* Either channel will do */
-
- /* disable interrupts for this channel */
- SCCwrite(INT_AND_DMA_REG, 0);
- }
-
- /* Ensure interrupts are enabled in the MC2 chip */
- *(volatile char *)0xfff4201d = 0x14;
-
- /* Initialise the tty driver structures and register */
- scc_init_portstructs();
- scc_init_drivers();
-
- return 0;
-
-fail_free_b_rx:
- free_irq(MVME162_IRQ_SCCB_RX, port);
-fail_free_b_stat:
- free_irq(MVME162_IRQ_SCCB_STAT, port);
-fail_free_b_tx:
- free_irq(MVME162_IRQ_SCCB_TX, port);
-fail_free_a_spcond:
- free_irq(MVME162_IRQ_SCCA_SPCOND, port);
-fail_free_a_rx:
- free_irq(MVME162_IRQ_SCCA_RX, port);
-fail_free_a_stat:
- free_irq(MVME162_IRQ_SCCA_STAT, port);
-fail_free_a_tx:
- free_irq(MVME162_IRQ_SCCA_TX, port);
-fail:
- return error;
-}
-#endif
-
-
-#ifdef CONFIG_BVME6000_SCC
-static int __init bvme6000_scc_init(void)
-{
- struct scc_port *port;
- int error;
-
- printk(KERN_INFO "SCC: BVME6000 Serial Driver\n");
- /* Init channel A */
- port = &scc_ports[0];
- port->channel = CHANNEL_A;
- port->ctrlp = (volatile unsigned char *)BVME_SCC_A_ADDR;
- port->datap = port->ctrlp + 4;
- port->port_a = &scc_ports[0];
- port->port_b = &scc_ports[1];
- error = request_irq(BVME_IRQ_SCCA_TX, scc_tx_int, IRQF_DISABLED,
- "SCC-A TX", port);
- if (error)
- goto fail;
- error = request_irq(BVME_IRQ_SCCA_STAT, scc_stat_int, IRQF_DISABLED,
- "SCC-A status", port);
- if (error)
- goto fail_free_a_tx;
- error = request_irq(BVME_IRQ_SCCA_RX, scc_rx_int, IRQF_DISABLED,
- "SCC-A RX", port);
- if (error)
- goto fail_free_a_stat;
- error = request_irq(BVME_IRQ_SCCA_SPCOND, scc_spcond_int,
- IRQF_DISABLED, "SCC-A special cond", port);
- if (error)
- goto fail_free_a_rx;
-
- {
- SCC_ACCESS_INIT(port);
-
- /* disable interrupts for this channel */
- SCCwrite(INT_AND_DMA_REG, 0);
- /* Set the interrupt vector */
- SCCwrite(INT_VECTOR_REG, BVME_IRQ_SCC_BASE);
- /* Interrupt parameters: vector includes status, status low */
- SCCwrite(MASTER_INT_CTRL, MIC_VEC_INCL_STAT);
- SCCmod(MASTER_INT_CTRL, 0xff, MIC_MASTER_INT_ENAB);
- }
-
- /* Init channel B */
- port = &scc_ports[1];
- port->channel = CHANNEL_B;
- port->ctrlp = (volatile unsigned char *)BVME_SCC_B_ADDR;
- port->datap = port->ctrlp + 4;
- port->port_a = &scc_ports[0];
- port->port_b = &scc_ports[1];
- error = request_irq(BVME_IRQ_SCCB_TX, scc_tx_int, IRQF_DISABLED,
- "SCC-B TX", port);
- if (error)
- goto fail_free_a_spcond;
- error = request_irq(BVME_IRQ_SCCB_STAT, scc_stat_int, IRQF_DISABLED,
- "SCC-B status", port);
- if (error)
- goto fail_free_b_tx;
- error = request_irq(BVME_IRQ_SCCB_RX, scc_rx_int, IRQF_DISABLED,
- "SCC-B RX", port);
- if (error)
- goto fail_free_b_stat;
- error = request_irq(BVME_IRQ_SCCB_SPCOND, scc_spcond_int,
- IRQF_DISABLED, "SCC-B special cond", port);
- if (error)
- goto fail_free_b_rx;
-
- {
- SCC_ACCESS_INIT(port); /* Either channel will do */
-
- /* disable interrupts for this channel */
- SCCwrite(INT_AND_DMA_REG, 0);
- }
-
- /* Initialise the tty driver structures and register */
- scc_init_portstructs();
- scc_init_drivers();
-
- return 0;
-
-fail:
- free_irq(BVME_IRQ_SCCA_STAT, port);
-fail_free_a_tx:
- free_irq(BVME_IRQ_SCCA_RX, port);
-fail_free_a_stat:
- free_irq(BVME_IRQ_SCCA_SPCOND, port);
-fail_free_a_rx:
- free_irq(BVME_IRQ_SCCB_TX, port);
-fail_free_a_spcond:
- free_irq(BVME_IRQ_SCCB_STAT, port);
-fail_free_b_tx:
- free_irq(BVME_IRQ_SCCB_RX, port);
-fail_free_b_stat:
- free_irq(BVME_IRQ_SCCB_SPCOND, port);
-fail_free_b_rx:
- return error;
-}
-#endif
-
-
-static int __init vme_scc_init(void)
-{
- int res = -ENODEV;
-
-#ifdef CONFIG_MVME147_SCC
- if (MACH_IS_MVME147)
- res = mvme147_scc_init();
-#endif
-#ifdef CONFIG_MVME162_SCC
- if (MACH_IS_MVME16x)
- res = mvme162_scc_init();
-#endif
-#ifdef CONFIG_BVME6000_SCC
- if (MACH_IS_BVME6000)
- res = bvme6000_scc_init();
-#endif
- return res;
-}
-
-module_init(vme_scc_init);
-
-
-/*---------------------------------------------------------------------------
- * Interrupt handlers
- *--------------------------------------------------------------------------*/
-
-static irqreturn_t scc_rx_int(int irq, void *data)
-{
- unsigned char ch;
- struct scc_port *port = data;
- struct tty_struct *tty = port->gs.port.tty;
- SCC_ACCESS_INIT(port);
-
- ch = SCCread_NB(RX_DATA_REG);
- if (!tty) {
- printk(KERN_WARNING "scc_rx_int with NULL tty!\n");
- SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
- return IRQ_HANDLED;
- }
- tty_insert_flip_char(tty, ch, 0);
-
- /* Check if another character is already ready; in that case, the
- * spcond_int() function must be used, because this character may have an
- * error condition that isn't signalled by the interrupt vector used!
- */
- if (SCCread(INT_PENDING_REG) &
- (port->channel == CHANNEL_A ? IPR_A_RX : IPR_B_RX)) {
- scc_spcond_int (irq, data);
- return IRQ_HANDLED;
- }
-
- SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
-
- tty_flip_buffer_push(tty);
- return IRQ_HANDLED;
-}
-
-
-static irqreturn_t scc_spcond_int(int irq, void *data)
-{
- struct scc_port *port = data;
- struct tty_struct *tty = port->gs.port.tty;
- unsigned char stat, ch, err;
- int int_pending_mask = port->channel == CHANNEL_A ?
- IPR_A_RX : IPR_B_RX;
- SCC_ACCESS_INIT(port);
-
- if (!tty) {
- printk(KERN_WARNING "scc_spcond_int with NULL tty!\n");
- SCCwrite(COMMAND_REG, CR_ERROR_RESET);
- SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
- return IRQ_HANDLED;
- }
- do {
- stat = SCCread(SPCOND_STATUS_REG);
- ch = SCCread_NB(RX_DATA_REG);
-
- if (stat & SCSR_RX_OVERRUN)
- err = TTY_OVERRUN;
- else if (stat & SCSR_PARITY_ERR)
- err = TTY_PARITY;
- else if (stat & SCSR_CRC_FRAME_ERR)
- err = TTY_FRAME;
- else
- err = 0;
-
- tty_insert_flip_char(tty, ch, err);
-
- /* ++TeSche: *All* errors have to be cleared manually,
- * else the condition persists for the next chars
- */
- if (err)
- SCCwrite(COMMAND_REG, CR_ERROR_RESET);
-
- } while(SCCread(INT_PENDING_REG) & int_pending_mask);
-
- SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
-
- tty_flip_buffer_push(tty);
- return IRQ_HANDLED;
-}
-
-
-static irqreturn_t scc_tx_int(int irq, void *data)
-{
- struct scc_port *port = data;
- SCC_ACCESS_INIT(port);
-
- if (!port->gs.port.tty) {
- printk(KERN_WARNING "scc_tx_int with NULL tty!\n");
- SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
- SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET);
- SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
- return IRQ_HANDLED;
- }
- while ((SCCread_NB(STATUS_REG) & SR_TX_BUF_EMPTY)) {
- if (port->x_char) {
- SCCwrite(TX_DATA_REG, port->x_char);
- port->x_char = 0;
- }
- else if ((port->gs.xmit_cnt <= 0) ||
- port->gs.port.tty->stopped ||
- port->gs.port.tty->hw_stopped)
- break;
- else {
- SCCwrite(TX_DATA_REG, port->gs.xmit_buf[port->gs.xmit_tail++]);
- port->gs.xmit_tail = port->gs.xmit_tail & (SERIAL_XMIT_SIZE-1);
- if (--port->gs.xmit_cnt <= 0)
- break;
- }
- }
- if ((port->gs.xmit_cnt <= 0) || port->gs.port.tty->stopped ||
- port->gs.port.tty->hw_stopped) {
- /* disable tx interrupts */
- SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
- SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET); /* disable tx_int on next tx underrun? */
- port->gs.port.flags &= ~GS_TX_INTEN;
- }
- if (port->gs.port.tty && port->gs.xmit_cnt <= port->gs.wakeup_chars)
- tty_wakeup(port->gs.port.tty);
-
- SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
- return IRQ_HANDLED;
-}
-
-
-static irqreturn_t scc_stat_int(int irq, void *data)
-{
- struct scc_port *port = data;
- unsigned channel = port->channel;
- unsigned char last_sr, sr, changed;
- SCC_ACCESS_INIT(port);
-
- last_sr = scc_last_status_reg[channel];
- sr = scc_last_status_reg[channel] = SCCread_NB(STATUS_REG);
- changed = last_sr ^ sr;
-
- if (changed & SR_DCD) {
- port->c_dcd = !!(sr & SR_DCD);
- if (!(port->gs.port.flags & ASYNC_CHECK_CD))
- ; /* Don't report DCD changes */
- else if (port->c_dcd) {
- wake_up_interruptible(&port->gs.port.open_wait);
- }
- else {
- if (port->gs.port.tty)
- tty_hangup (port->gs.port.tty);
- }
- }
- SCCwrite(COMMAND_REG, CR_EXTSTAT_RESET);
- SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
- return IRQ_HANDLED;
-}
-
-
-/*---------------------------------------------------------------------------
- * generic_serial.c callback funtions
- *--------------------------------------------------------------------------*/
-
-static void scc_disable_tx_interrupts(void *ptr)
-{
- struct scc_port *port = ptr;
- unsigned long flags;
- SCC_ACCESS_INIT(port);
-
- local_irq_save(flags);
- SCCmod(INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
- port->gs.port.flags &= ~GS_TX_INTEN;
- local_irq_restore(flags);
-}
-
-
-static void scc_enable_tx_interrupts(void *ptr)
-{
- struct scc_port *port = ptr;
- unsigned long flags;
- SCC_ACCESS_INIT(port);
-
- local_irq_save(flags);
- SCCmod(INT_AND_DMA_REG, 0xff, IDR_TX_INT_ENAB);
- /* restart the transmitter */
- scc_tx_int (0, port);
- local_irq_restore(flags);
-}
-
-
-static void scc_disable_rx_interrupts(void *ptr)
-{
- struct scc_port *port = ptr;
- unsigned long flags;
- SCC_ACCESS_INIT(port);
-
- local_irq_save(flags);
- SCCmod(INT_AND_DMA_REG,
- ~(IDR_RX_INT_MASK|IDR_PARERR_AS_SPCOND|IDR_EXTSTAT_INT_ENAB), 0);
- local_irq_restore(flags);
-}
-
-
-static void scc_enable_rx_interrupts(void *ptr)
-{
- struct scc_port *port = ptr;
- unsigned long flags;
- SCC_ACCESS_INIT(port);
-
- local_irq_save(flags);
- SCCmod(INT_AND_DMA_REG, 0xff,
- IDR_EXTSTAT_INT_ENAB|IDR_PARERR_AS_SPCOND|IDR_RX_INT_ALL);
- local_irq_restore(flags);
-}
-
-
-static int scc_carrier_raised(struct tty_port *port)
-{
- struct scc_port *sc = container_of(port, struct scc_port, gs.port);
- unsigned channel = sc->channel;
-
- return !!(scc_last_status_reg[channel] & SR_DCD);
-}
-
-
-static void scc_shutdown_port(void *ptr)
-{
- struct scc_port *port = ptr;
-
- port->gs.port.flags &= ~ GS_ACTIVE;
- if (port->gs.port.tty && (port->gs.port.tty->termios->c_cflag & HUPCL)) {
- scc_setsignals (port, 0, 0);
- }
-}
-
-
-static int scc_set_real_termios (void *ptr)
-{
- /* the SCC has char sizes 5,7,6,8 in that order! */
- static int chsize_map[4] = { 0, 2, 1, 3 };
- unsigned cflag, baud, chsize, channel, brgval = 0;
- unsigned long flags;
- struct scc_port *port = ptr;
- SCC_ACCESS_INIT(port);
-
- if (!port->gs.port.tty || !port->gs.port.tty->termios) return 0;
-
- channel = port->channel;
-
- if (channel == CHANNEL_A)
- return 0; /* Settings controlled by boot PROM */
-
- cflag = port->gs.port.tty->termios->c_cflag;
- baud = port->gs.baud;
- chsize = (cflag & CSIZE) >> 4;
-
- if (baud == 0) {
- /* speed == 0 -> drop DTR */
- local_irq_save(flags);
- SCCmod(TX_CTRL_REG, ~TCR_DTR, 0);
- local_irq_restore(flags);
- return 0;
- }
- else if ((MACH_IS_MVME16x && (baud < 50 || baud > 38400)) ||
- (MACH_IS_MVME147 && (baud < 50 || baud > 19200)) ||
- (MACH_IS_BVME6000 &&(baud < 50 || baud > 76800))) {
- printk(KERN_NOTICE "SCC: Bad speed requested, %d\n", baud);
- return 0;
- }
-
- if (cflag & CLOCAL)
- port->gs.port.flags &= ~ASYNC_CHECK_CD;
- else
- port->gs.port.flags |= ASYNC_CHECK_CD;
-
-#ifdef CONFIG_MVME147_SCC
- if (MACH_IS_MVME147)
- brgval = (M147_SCC_PCLK + baud/2) / (16 * 2 * baud) - 2;
-#endif
-#ifdef CONFIG_MVME162_SCC
- if (MACH_IS_MVME16x)
- brgval = (MVME_SCC_PCLK + baud/2) / (16 * 2 * baud) - 2;
-#endif
-#ifdef CONFIG_BVME6000_SCC
- if (MACH_IS_BVME6000)
- brgval = (BVME_SCC_RTxC + baud/2) / (16 * 2 * baud) - 2;
-#endif
- /* Now we have all parameters and can go to set them: */
- local_irq_save(flags);
-
- /* receiver's character size and auto-enables */
- SCCmod(RX_CTRL_REG, ~(RCR_CHSIZE_MASK|RCR_AUTO_ENAB_MODE),
- (chsize_map[chsize] << 6) |
- ((cflag & CRTSCTS) ? RCR_AUTO_ENAB_MODE : 0));
- /* parity and stop bits (both, Tx and Rx), clock mode never changes */
- SCCmod (AUX1_CTRL_REG,
- ~(A1CR_PARITY_MASK | A1CR_MODE_MASK),
- ((cflag & PARENB
- ? (cflag & PARODD ? A1CR_PARITY_ODD : A1CR_PARITY_EVEN)
- : A1CR_PARITY_NONE)
- | (cflag & CSTOPB ? A1CR_MODE_ASYNC_2 : A1CR_MODE_ASYNC_1)));
- /* sender's character size, set DTR for valid baud rate */
- SCCmod(TX_CTRL_REG, ~TCR_CHSIZE_MASK, chsize_map[chsize] << 5 | TCR_DTR);
- /* clock sources never change */
- /* disable BRG before changing the value */
- SCCmod(DPLL_CTRL_REG, ~DCR_BRG_ENAB, 0);
- /* BRG value */
- SCCwrite(TIMER_LOW_REG, brgval & 0xff);
- SCCwrite(TIMER_HIGH_REG, (brgval >> 8) & 0xff);
- /* BRG enable, and clock source never changes */
- SCCmod(DPLL_CTRL_REG, 0xff, DCR_BRG_ENAB);
-
- local_irq_restore(flags);
-
- return 0;
-}
-
-
-static int scc_chars_in_buffer (void *ptr)
-{
- struct scc_port *port = ptr;
- SCC_ACCESS_INIT(port);
-
- return (SCCread (SPCOND_STATUS_REG) & SCSR_ALL_SENT) ? 0 : 1;
-}
-
-
-/* Comment taken from sx.c (2.4.0):
- I haven't the foggiest why the decrement use count has to happen
- here. The whole linux serial drivers stuff needs to be redesigned.
- My guess is that this is a hack to minimize the impact of a bug
- elsewhere. Thinking about it some more. (try it sometime) Try
- running minicom on a serial port that is driven by a modularized
- driver. Have the modem hangup. Then remove the driver module. Then
- exit minicom. I expect an "oops". -- REW */
-
-static void scc_hungup(void *ptr)
-{
- scc_disable_tx_interrupts(ptr);
- scc_disable_rx_interrupts(ptr);
-}
-
-
-static void scc_close(void *ptr)
-{
- scc_disable_tx_interrupts(ptr);
- scc_disable_rx_interrupts(ptr);
-}
-
-
-/*---------------------------------------------------------------------------
- * Internal support functions
- *--------------------------------------------------------------------------*/
-
-static void scc_setsignals(struct scc_port *port, int dtr, int rts)
-{
- unsigned long flags;
- unsigned char t;
- SCC_ACCESS_INIT(port);
-
- local_irq_save(flags);
- t = SCCread(TX_CTRL_REG);
- if (dtr >= 0) t = dtr? (t | TCR_DTR): (t & ~TCR_DTR);
- if (rts >= 0) t = rts? (t | TCR_RTS): (t & ~TCR_RTS);
- SCCwrite(TX_CTRL_REG, t);
- local_irq_restore(flags);
-}
-
-
-static void scc_send_xchar(struct tty_struct *tty, char ch)
-{
- struct scc_port *port = tty->driver_data;
-
- port->x_char = ch;
- if (ch)
- scc_enable_tx_interrupts(port);
-}
-
-
-/*---------------------------------------------------------------------------
- * Driver entrypoints referenced from above
- *--------------------------------------------------------------------------*/
-
-static int scc_open (struct tty_struct * tty, struct file * filp)
-{
- int line = tty->index;
- int retval;
- struct scc_port *port = &scc_ports[line];
- int i, channel = port->channel;
- unsigned long flags;
- SCC_ACCESS_INIT(port);
-#if defined(CONFIG_MVME162_SCC) || defined(CONFIG_MVME147_SCC)
- static const struct {
- unsigned reg, val;
- } mvme_init_tab[] = {
- /* Values for MVME162 and MVME147 */
- /* no parity, 1 stop bit, async, 1:16 */
- { AUX1_CTRL_REG, A1CR_PARITY_NONE|A1CR_MODE_ASYNC_1|A1CR_CLKMODE_x16 },
- /* parity error is special cond, ints disabled, no DMA */
- { INT_AND_DMA_REG, IDR_PARERR_AS_SPCOND | IDR_RX_INT_DISAB },
- /* Rx 8 bits/char, no auto enable, Rx off */
- { RX_CTRL_REG, RCR_CHSIZE_8 },
- /* DTR off, Tx 8 bits/char, RTS off, Tx off */
- { TX_CTRL_REG, TCR_CHSIZE_8 },
- /* special features off */
- { AUX2_CTRL_REG, 0 },
- { CLK_CTRL_REG, CCR_RXCLK_BRG | CCR_TXCLK_BRG },
- { DPLL_CTRL_REG, DCR_BRG_ENAB | DCR_BRG_USE_PCLK },
- /* Start Rx */
- { RX_CTRL_REG, RCR_RX_ENAB | RCR_CHSIZE_8 },
- /* Start Tx */
- { TX_CTRL_REG, TCR_TX_ENAB | TCR_RTS | TCR_DTR | TCR_CHSIZE_8 },
- /* Ext/Stat ints: DCD only */
- { INT_CTRL_REG, ICR_ENAB_DCD_INT },
- /* Reset Ext/Stat ints */
- { COMMAND_REG, CR_EXTSTAT_RESET },
- /* ...again */
- { COMMAND_REG, CR_EXTSTAT_RESET },
- };
-#endif
-#if defined(CONFIG_BVME6000_SCC)
- static const struct {
- unsigned reg, val;
- } bvme_init_tab[] = {
- /* Values for BVME6000 */
- /* no parity, 1 stop bit, async, 1:16 */
- { AUX1_CTRL_REG, A1CR_PARITY_NONE|A1CR_MODE_ASYNC_1|A1CR_CLKMODE_x16 },
- /* parity error is special cond, ints disabled, no DMA */
- { INT_AND_DMA_REG, IDR_PARERR_AS_SPCOND | IDR_RX_INT_DISAB },
- /* Rx 8 bits/char, no auto enable, Rx off */
- { RX_CTRL_REG, RCR_CHSIZE_8 },
- /* DTR off, Tx 8 bits/char, RTS off, Tx off */
- { TX_CTRL_REG, TCR_CHSIZE_8 },
- /* special features off */
- { AUX2_CTRL_REG, 0 },
- { CLK_CTRL_REG, CCR_RTxC_XTAL | CCR_RXCLK_BRG | CCR_TXCLK_BRG },
- { DPLL_CTRL_REG, DCR_BRG_ENAB },
- /* Start Rx */
- { RX_CTRL_REG, RCR_RX_ENAB | RCR_CHSIZE_8 },
- /* Start Tx */
- { TX_CTRL_REG, TCR_TX_ENAB | TCR_RTS | TCR_DTR | TCR_CHSIZE_8 },
- /* Ext/Stat ints: DCD only */
- { INT_CTRL_REG, ICR_ENAB_DCD_INT },
- /* Reset Ext/Stat ints */
- { COMMAND_REG, CR_EXTSTAT_RESET },
- /* ...again */
- { COMMAND_REG, CR_EXTSTAT_RESET },
- };
-#endif
- if (!(port->gs.port.flags & ASYNC_INITIALIZED)) {
- local_irq_save(flags);
-#if defined(CONFIG_MVME147_SCC) || defined(CONFIG_MVME162_SCC)
- if (MACH_IS_MVME147 || MACH_IS_MVME16x) {
- for (i = 0; i < ARRAY_SIZE(mvme_init_tab); ++i)
- SCCwrite(mvme_init_tab[i].reg, mvme_init_tab[i].val);
- }
-#endif
-#if defined(CONFIG_BVME6000_SCC)
- if (MACH_IS_BVME6000) {
- for (i = 0; i < ARRAY_SIZE(bvme_init_tab); ++i)
- SCCwrite(bvme_init_tab[i].reg, bvme_init_tab[i].val);
- }
-#endif
-
- /* remember status register for detection of DCD and CTS changes */
- scc_last_status_reg[channel] = SCCread(STATUS_REG);
-
- port->c_dcd = 0; /* Prevent initial 1->0 interrupt */
- scc_setsignals (port, 1,1);
- local_irq_restore(flags);
- }
-
- tty->driver_data = port;
- port->gs.port.tty = tty;
- port->gs.port.count++;
- retval = gs_init_port(&port->gs);
- if (retval) {
- port->gs.port.count--;
- return retval;
- }
- port->gs.port.flags |= GS_ACTIVE;
- retval = gs_block_til_ready(port, filp);
-
- if (retval) {
- port->gs.port.count--;
- return retval;
- }
-
- port->c_dcd = tty_port_carrier_raised(&port->gs.port);
-
- scc_enable_rx_interrupts(port);
-
- return 0;
-}
-
-
-static void scc_throttle (struct tty_struct * tty)
-{
- struct scc_port *port = tty->driver_data;
- unsigned long flags;
- SCC_ACCESS_INIT(port);
-
- if (tty->termios->c_cflag & CRTSCTS) {
- local_irq_save(flags);
- SCCmod(TX_CTRL_REG, ~TCR_RTS, 0);
- local_irq_restore(flags);
- }
- if (I_IXOFF(tty))
- scc_send_xchar(tty, STOP_CHAR(tty));
-}
-
-
-static void scc_unthrottle (struct tty_struct * tty)
-{
- struct scc_port *port = tty->driver_data;
- unsigned long flags;
- SCC_ACCESS_INIT(port);
-
- if (tty->termios->c_cflag & CRTSCTS) {
- local_irq_save(flags);
- SCCmod(TX_CTRL_REG, 0xff, TCR_RTS);
- local_irq_restore(flags);
- }
- if (I_IXOFF(tty))
- scc_send_xchar(tty, START_CHAR(tty));
-}
-
-
-static int scc_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
-
-static int scc_break_ctl(struct tty_struct *tty, int break_state)
-{
- struct scc_port *port = tty->driver_data;
- unsigned long flags;
- SCC_ACCESS_INIT(port);
-
- local_irq_save(flags);
- SCCmod(TX_CTRL_REG, ~TCR_SEND_BREAK,
- break_state ? TCR_SEND_BREAK : 0);
- local_irq_restore(flags);
- return 0;
-}
-
-
-/*---------------------------------------------------------------------------
- * Serial console stuff...
- *--------------------------------------------------------------------------*/
-
-#define scc_delay() do { __asm__ __volatile__ (" nop; nop"); } while (0)
-
-static void scc_ch_write (char ch)
-{
- volatile char *p = NULL;
-
-#ifdef CONFIG_MVME147_SCC
- if (MACH_IS_MVME147)
- p = (volatile char *)M147_SCC_A_ADDR;
-#endif
-#ifdef CONFIG_MVME162_SCC
- if (MACH_IS_MVME16x)
- p = (volatile char *)MVME_SCC_A_ADDR;
-#endif
-#ifdef CONFIG_BVME6000_SCC
- if (MACH_IS_BVME6000)
- p = (volatile char *)BVME_SCC_A_ADDR;
-#endif
-
- do {
- scc_delay();
- }
- while (!(*p & 4));
- scc_delay();
- *p = 8;
- scc_delay();
- *p = ch;
-}
-
-/* The console must be locked when we get here. */
-
-static void scc_console_write (struct console *co, const char *str, unsigned count)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- while (count--)
- {
- if (*str == '\n')
- scc_ch_write ('\r');
- scc_ch_write (*str++);
- }
- local_irq_restore(flags);
-}
-
-static struct tty_driver *scc_console_device(struct console *c, int *index)
-{
- *index = c->index;
- return scc_driver;
-}
-
-static struct console sercons = {
- .name = "ttyS",
- .write = scc_console_write,
- .device = scc_console_device,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-
-static int __init vme_scc_console_init(void)
-{
- if (vme_brdtype == VME_TYPE_MVME147 ||
- vme_brdtype == VME_TYPE_MVME162 ||
- vme_brdtype == VME_TYPE_MVME172 ||
- vme_brdtype == VME_TYPE_BVME4000 ||
- vme_brdtype == VME_TYPE_BVME6000)
- register_console(&sercons);
- return 0;
-}
-console_initcall(vme_scc_console_init);
diff --git a/drivers/staging/gma500/Kconfig b/drivers/staging/gma500/Kconfig
index ce8bedaeaac..bfe2166acda 100644
--- a/drivers/staging/gma500/Kconfig
+++ b/drivers/staging/gma500/Kconfig
@@ -1,5 +1,5 @@
config DRM_PSB
- tristate "Intel GMA500 KMS Framebuffer"
+ tristate "Intel GMA5/600 KMS Framebuffer"
depends on DRM && PCI && X86
select FB_CFB_COPYAREA
select FB_CFB_FILLRECT
@@ -7,6 +7,27 @@ config DRM_PSB
select DRM_KMS_HELPER
select DRM_TTM
help
- Say yes for an experimental KMS framebuffer driver for the
- Intel GMA500 ('Poulsbo') graphics support.
+ Say yes for an experimental 2D KMS framebuffer driver for the
+ Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
+ devices.
+config DRM_PSB_MRST
+ bool "Intel GMA600 support (Experimental)"
+ depends on DRM_PSB
+ help
+ Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
+ platforms with LVDS ports. HDMI and MIPI are not currently
+ supported.
+
+config DRM_PSB_MFLD
+ bool "Intel Medfield support (Experimental)"
+ depends on DRM_PSB
+ help
+ Say yes to include support for Intel Medfield platforms with MIPI
+ interfaces.
+
+config DRM_PSB_CDV
+ bool "Intel Cedarview support (Experimental)"
+ depends on DRM_PSB
+ help
+ Say yes to include support for Intel Cedarview platforms
diff --git a/drivers/staging/gma500/Makefile b/drivers/staging/gma500/Makefile
index db73ec6d812..c729868b1b1 100644
--- a/drivers/staging/gma500/Makefile
+++ b/drivers/staging/gma500/Makefile
@@ -3,24 +3,50 @@
#
ccflags-y += -Iinclude/drm
-psb_gfx-y += psb_bl.o \
+psb_gfx-y += gem_glue.o \
+ accel_2d.o \
+ backlight.o \
+ framebuffer.o \
+ gem.o \
+ gtt.o \
+ intel_bios.o \
+ intel_i2c.o \
+ intel_opregion.o \
+ mmu.o \
+ power.o \
psb_drv.o \
- psb_gem.o \
- psb_fb.o \
- psb_2d.o \
- psb_gtt.o \
- psb_intel_bios.o \
- psb_intel_opregion.o \
psb_intel_display.o \
- psb_intel_i2c.o \
psb_intel_lvds.o \
psb_intel_modes.o \
psb_intel_sdvo.o \
psb_lid.o \
- psb_mmu.o \
- psb_powermgmt.o \
psb_irq.o \
+ psb_device.o \
+ mid_bios.o
+
+psb_gfx-$(CONFIG_DRM_PSB_CDV) += cdv_device.o \
+ cdv_intel_crt.o \
+ cdv_intel_display.o \
+ cdv_intel_hdmi.o \
+ cdv_intel_lvds.o
+
+psb_gfx-$(CONFIG_DRM_PSB_MRST) += mrst_device.o \
mrst_crtc.o \
- mrst_lvds.o
+ mrst_lvds.o \
+ mrst_hdmi.o \
+ mrst_hdmi_i2c.o
+
+psb_gfx-$(CONFIG_DRM_PSB_MFLD) += mdfld_device.o \
+ mdfld_output.o \
+ mdfld_pyr_cmd.o \
+ mdfld_tmd_vid.o \
+ mdfld_tpo_cmd.o \
+ mdfld_tpo_vid.o \
+ mdfld_dsi_pkg_sender.o \
+ mdfld_dsi_dpi.o \
+ mdfld_dsi_output.o \
+ mdfld_dsi_dbi.o \
+ mdfld_dsi_dbi_dpu.o \
+ mdfld_intel_display.o
obj-$(CONFIG_DRM_PSB) += psb_gfx.o
diff --git a/drivers/staging/gma500/TODO b/drivers/staging/gma500/TODO
index f692ce1d242..fc836158e74 100644
--- a/drivers/staging/gma500/TODO
+++ b/drivers/staging/gma500/TODO
@@ -1,26 +1,15 @@
-- Test on more platforms
-- Clean up the various chunks of unused code
- Sort out the power management side. Not important for Poulsbo but
- matters for Moorestown
-- Add Moorestown support (single pipe, no BIOS, no stolen memory,
- some other differences)
-- Sort out the bo and ttm code to support userframe buffers and DRM
- interfaces rather than just faking it enough for a framebuffer
+ matters for Moorestown/Medfield
+- Debug Oaktrail/Moorestown support (single pipe, no BIOS on mrst,
+ some other differences)
- Add 2D acceleration via console and DRM
+- Add scrolling acceleration using the GTT to do remapping on the main
+ framebuffer.
+- HDMI testing
+- Oaktrail HDMI and other features
+- Oaktrail MIPI
+- Medfield needs a lot of further love
As per kernel policy and the in the interest of the safety of various
kittens there is no support or plans to add hooks for the closed user space
stuff.
-
-
-Why bother ?
-- Proper display configuration
-- Can be made to work on Moorestown where VESA won't
-- Works on systems where the VESA BIOS is bust or the tables are broken
- without hacks
-- 2D acceleration
-
-Currently tested on
-+ Dell Mini 10 100x600
-
-
diff --git a/drivers/staging/gma500/psb_2d.c b/drivers/staging/gma500/accel_2d.c
index 0bd834c982d..14400fcfe8a 100644
--- a/drivers/staging/gma500/psb_2d.c
+++ b/drivers/staging/gma500/accel_2d.c
@@ -1,5 +1,5 @@
/**************************************************************************
- * Copyright (c) 2007, Intel Corporation.
+ * Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -38,16 +38,23 @@
#include "psb_drv.h"
#include "psb_reg.h"
-#include "psb_drv.h"
-#include "psb_fb.h"
+#include "framebuffer.h"
+/**
+ * psb_spank - reset the 2D engine
+ * @dev_priv: our PSB DRM device
+ *
+ * Soft reset the graphics engine and then reload the necessary registers.
+ * We use this at initialisation time but it will become relevant for
+ * accelerated X later
+ */
void psb_spank(struct drm_psb_private *dev_priv)
{
- PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
+ PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
_PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
_PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
_PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
- (void) PSB_RSGX32(PSB_CR_SOFT_RESET);
+ PSB_RSGX32(PSB_CR_SOFT_RESET);
msleep(1);
@@ -62,16 +69,24 @@ void psb_spank(struct drm_psb_private *dev_priv)
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
- PSB_WSGX32(dev_priv->pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+ PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
}
+/**
+ * psb2_2d_wait_available - wait for FIFO room
+ * @dev_priv: our DRM device
+ * @size: size (in dwords) of the command we want to issue
+ *
+ * Wait until there is room to load the FIFO with our data. If the
+ * device is not responding then reset it
+ */
static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
unsigned size)
{
uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
unsigned long t = jiffies + HZ;
- while(avail < size) {
+ while (avail < size) {
avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
if (time_after(jiffies, t)) {
psb_spank(dev_priv);
@@ -81,151 +96,77 @@ static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
return 0;
}
-/* FIXME: Remember if we expose the 2D engine to the DRM we need to serialize
- it with console use */
-
-int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
- unsigned size)
+/**
+ * psb_2d_submit - submit a 2D command
+ * @dev_priv: our DRM device
+ * @cmdbuf: command to issue
+ * @size: length (in dwords)
+ *
+ * Issue one or more 2D commands to the accelerator. This needs to be
+ * serialized later when we add the GEM interfaces for acceleration
+ */
+static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
+ unsigned size)
{
int ret = 0;
int i;
unsigned submit_size;
+ mutex_lock(&dev_priv->mutex_2d);
while (size > 0) {
submit_size = (size < 0x60) ? size : 0x60;
size -= submit_size;
ret = psb_2d_wait_available(dev_priv, submit_size);
if (ret)
- return ret;
+ break;
submit_size <<= 2;
- for (i = 0; i < submit_size; i += 4) {
- PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
- }
- (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
- }
- return 0;
-}
-
-static int psb_accel_2d_fillrect(struct drm_psb_private *dev_priv,
- uint32_t dst_offset, uint32_t dst_stride,
- uint32_t dst_format, uint16_t dst_x,
- uint16_t dst_y, uint16_t size_x,
- uint16_t size_y, uint32_t fill)
-{
- uint32_t buffer[10];
- uint32_t *buf;
-
- buf = buffer;
-
- *buf++ = PSB_2D_FENCE_BH;
-
- *buf++ =
- PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
- PSB_2D_DST_STRIDE_SHIFT);
- *buf++ = dst_offset;
-
- *buf++ =
- PSB_2D_BLIT_BH |
- PSB_2D_ROT_NONE |
- PSB_2D_COPYORDER_TL2BR |
- PSB_2D_DSTCK_DISABLE |
- PSB_2D_SRCCK_DISABLE | PSB_2D_USE_FILL | PSB_2D_ROP3_PATCOPY;
-
- *buf++ = fill << PSB_2D_FILLCOLOUR_SHIFT;
- *buf++ =
- (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
- PSB_2D_DST_YSTART_SHIFT);
- *buf++ =
- (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
- PSB_2D_DST_YSIZE_SHIFT);
- *buf++ = PSB_2D_FLUSH_BH;
-
- return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
-}
-
-static void psbfb_fillrect_accel(struct fb_info *info,
- const struct fb_fillrect *r)
-{
- struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = fbdev->pfb;
- struct drm_device *dev = psbfb->base.dev;
- struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- uint32_t offset;
- uint32_t stride;
- uint32_t format;
- if (!fb)
- return;
-
- offset = psbfb->gtt->offset;
- stride = fb->pitch;
+ for (i = 0; i < submit_size; i += 4)
+ PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
- switch (fb->depth) {
- case 8:
- format = PSB_2D_DST_332RGB;
- break;
- case 15:
- format = PSB_2D_DST_555RGB;
- break;
- case 16:
- format = PSB_2D_DST_565RGB;
- break;
- case 24:
- case 32:
- /* this is wrong but since we don't do blending its okay */
- format = PSB_2D_DST_8888ARGB;
- break;
- default:
- /* software fallback */
- cfb_fillrect(info, r);
- return;
+ (void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
}
-
- psb_accel_2d_fillrect(dev_priv,
- offset, stride, format,
- r->dx, r->dy, r->width, r->height, r->color);
+ mutex_unlock(&dev_priv->mutex_2d);
+ return ret;
}
-void psbfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- if (unlikely(info->state != FBINFO_STATE_RUNNING))
- return;
-
- if (1 || (info->flags & FBINFO_HWACCEL_DISABLED))
- return cfb_fillrect(info, rect);
-
- /*psb_check_power_state(dev, PSB_DEVICE_SGX); */
- psbfb_fillrect_accel(info, rect);
- /* Drop power again here on MRST FIXMEAC */
-}
+/**
+ * psb_accel_2d_copy_direction - compute blit order
+ * @xdir: X direction of move
+ * @ydir: Y direction of move
+ *
+ * Compute the correct order setings to ensure that an overlapping blit
+ * correctly copies all the pixels.
+ */
static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
{
if (xdir < 0)
return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
- PSB_2D_COPYORDER_TR2BL;
+ PSB_2D_COPYORDER_TR2BL;
else
return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
- PSB_2D_COPYORDER_TL2BR;
+ PSB_2D_COPYORDER_TL2BR;
}
-/*
- * @src_offset in bytes
- * @src_stride in bytes
- * @src_format psb 2D format defines
- * @dst_offset in bytes
- * @dst_stride in bytes
- * @dst_format psb 2D format defines
- * @src_x offset in pixels
- * @src_y offset in pixels
- * @dst_x offset in pixels
- * @dst_y offset in pixels
- * @size_x of the copied area
- * @size_y of the copied area
+/**
+ * psb_accel_2d_copy - accelerated 2D copy
+ * @dev_priv: our DRM device
+ * @src_offset in bytes
+ * @src_stride in bytes
+ * @src_format psb 2D format defines
+ * @dst_offset in bytes
+ * @dst_stride in bytes
+ * @dst_format psb 2D format defines
+ * @src_x offset in pixels
+ * @src_y offset in pixels
+ * @dst_x offset in pixels
+ * @dst_y offset in pixels
+ * @size_x of the copied area
+ * @size_y of the copied area
+ *
+ * Format and issue a 2D accelerated copy command.
*/
static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
uint32_t src_offset, uint32_t src_stride,
@@ -287,11 +228,18 @@ static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
}
+/**
+ * psbfb_copyarea_accel - copyarea acceleration for /dev/fb
+ * @info: our framebuffer
+ * @a: copyarea parameters from the framebuffer core
+ *
+ * Perform a 2D copy via the accelerator
+ */
static void psbfb_copyarea_accel(struct fb_info *info,
const struct fb_copyarea *a)
{
struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = fbdev->pfb;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -331,48 +279,56 @@ static void psbfb_copyarea_accel(struct fb_info *info,
return;
}
+ if (!gma_power_begin(dev, false)) {
+ cfb_copyarea(info, a);
+ return;
+ }
psb_accel_2d_copy(dev_priv,
offset, stride, src_format,
offset, stride, dst_format,
a->sx, a->sy, a->dx, a->dy, a->width, a->height);
+ gma_power_end(dev);
}
+/**
+ * psbfb_copyarea - 2D copy interface
+ * @info: our framebuffer
+ * @region: region to copy
+ *
+ * Copy an area of the framebuffer console either by the accelerator
+ * or directly using the cfb helpers according to the request
+ */
void psbfb_copyarea(struct fb_info *info,
const struct fb_copyarea *region)
{
if (unlikely(info->state != FBINFO_STATE_RUNNING))
return;
- if (info->flags & FBINFO_HWACCEL_DISABLED)
+ /* Avoid the 8 pixel erratum */
+ if (region->width == 8 || region->height == 8 ||
+ (info->flags & FBINFO_HWACCEL_DISABLED))
return cfb_copyarea(info, region);
- /* psb_check_power_state(dev, PSB_DEVICE_SGX); */
psbfb_copyarea_accel(info, region);
- /* Need to power back off here for MRST FIXMEAC */
-}
-
-void psbfb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- /* For now */
- cfb_imageblit(info, image);
}
+/**
+ * psbfb_sync - synchronize 2D
+ * @info: our framebuffer
+ *
+ * Wait for the 2D engine to quiesce so that we can do CPU
+ * access to the framebuffer again
+ */
int psbfb_sync(struct fb_info *info)
{
struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = fbdev->pfb;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long _end = jiffies + DRM_HZ;
int busy = 0;
-#if 0
- /* Just a way to quickly test if cmd issue explodes */
- u32 test[2] = {
- PSB_2D_FENCE_BH,
- };
- psbfb_2d_submit(dev_priv, test, 1);
-#endif
+ mutex_lock(&dev_priv->mutex_2d);
/*
* First idle the 2D engine.
*/
@@ -401,10 +357,56 @@ int psbfb_sync(struct fb_info *info)
_PSB_C2B_STATUS_BUSY) != 0);
out:
+ mutex_unlock(&dev_priv->mutex_2d);
return (busy) ? -EBUSY : 0;
}
-/*
- info->fix.accel = FB_ACCEL_I830;
- info->flags = FBINFO_DEFAULT;
-*/
+int psb_accel_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_2d_op *op = data;
+ u32 *op_ptr = &op->cmd[0];
+ int i;
+ struct drm_gem_object *obj;
+ struct gtt_range *gtt;
+ int err = -EINVAL;
+
+ if (!dev_priv->ops->accel_2d)
+ return -EOPNOTSUPP;
+ if (op->size > PSB_2D_OP_BUFLEN)
+ return -EINVAL;
+
+ /* The GEM object being used. We need to support separate src/dst/etc
+ in the end but for now keep them all the same */
+ obj = drm_gem_object_lookup(dev, file, op->src);
+ if (obj == NULL)
+ return -ENOENT;
+ gtt = container_of(obj, struct gtt_range, gem);
+
+ if (psb_gtt_pin(gtt) < 0)
+ goto bad_2;
+ for (i = 0; i < op->size; i++, op_ptr++) {
+ u32 r = *op_ptr & 0xF0000000;
+ /* Fill in the GTT offsets for the command buffer */
+ if (r == PSB_2D_SRC_SURF_BH ||
+ r == PSB_2D_DST_SURF_BH ||
+ r == PSB_2D_MASK_SURF_BH ||
+ r == PSB_2D_PAT_SURF_BH) {
+ i++;
+ op_ptr++;
+ if (i == op->size)
+ goto bad;
+ if (*op_ptr)
+ goto bad;
+ *op_ptr = gtt->offset;
+ continue;
+ }
+ }
+ psbfb_2d_submit(dev_priv, op->cmd, op->size);
+ err = 0;
+bad:
+ psb_gtt_unpin(gtt);
+bad_2:
+ drm_gem_object_unreference(obj);
+ return err;
+}
diff --git a/drivers/staging/gma500/backlight.c b/drivers/staging/gma500/backlight.c
new file mode 100644
index 00000000000..20793951fca
--- /dev/null
+++ b/drivers/staging/gma500/backlight.c
@@ -0,0 +1,49 @@
+/*
+ * GMA500 Backlight Interface
+ *
+ * Copyright (c) 2009-2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors: Eric Knopp
+ *
+ */
+
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_drv.h"
+#include "intel_bios.h"
+#include "power.h"
+
+int gma_backlight_init(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return dev_priv->ops->backlight_init(dev);
+#else
+ return 0;
+#endif
+}
+
+void gma_backlight_exit(struct drm_device *dev)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->backlight_device) {
+ dev_priv->backlight_device->props.brightness = 0;
+ backlight_update_status(dev_priv->backlight_device);
+ backlight_device_unregister(dev_priv->backlight_device);
+ }
+#endif
+}
diff --git a/drivers/staging/gma500/cdv_device.c b/drivers/staging/gma500/cdv_device.c
new file mode 100644
index 00000000000..87614e0d396
--- /dev/null
+++ b/drivers/staging/gma500/cdv_device.c
@@ -0,0 +1,351 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "cdv_device.h"
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+/* FIXME: should check if we are the active VGA device ?? */
+static void cdv_disable_vga(struct drm_device *dev)
+{
+ u8 sr1;
+ u32 vga_reg;
+
+ vga_reg = VGACNTRL;
+
+ outb(1, VGA_SR_INDEX);
+ sr1 = inb(VGA_SR_DATA);
+ outb(sr1 | 1<<5, VGA_SR_DATA);
+ udelay(300);
+
+ REG_WRITE(vga_reg, VGA_DISP_DISABLE);
+ REG_READ(vga_reg);
+}
+
+static int cdv_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ cdv_disable_vga(dev);
+
+ cdv_intel_crt_init(dev, &dev_priv->mode_dev);
+ cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
+
+ /* These bits indicate HDMI not SDVO on CDV, but we don't yet support
+ the HDMI interface */
+ if (REG_READ(SDVOB) & SDVO_DETECTED)
+ cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
+ if (REG_READ(SDVOC) & SDVO_DETECTED)
+ cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+ return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ * Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR 10
+#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+
+static int cdv_brightness;
+static struct backlight_device *cdv_backlight_device;
+
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return cdv_brightness;
+}
+
+
+static int cdv_backlight_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long core_clock;
+ /* u32 bl_max_freq; */
+ /* unsigned long value; */
+ u16 bl_max_freq;
+ uint32_t value;
+ uint32_t blc_pwm_precision_factor;
+
+ /* get bl_max_freq and pol from dev_priv*/
+ if (!dev_priv->lvds_bl) {
+ dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+ return -ENOENT;
+ }
+ bl_max_freq = dev_priv->lvds_bl->freq;
+ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+ core_clock = dev_priv->core_freq;
+
+ value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+ value *= blc_pwm_precision_factor;
+ value /= bl_max_freq;
+ value /= blc_pwm_precision_factor;
+
+ if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+ return -ERANGE;
+ else {
+ /* FIXME */
+ }
+ return 0;
+}
+
+static int cdv_set_brightness(struct backlight_device *bd)
+{
+ int level = bd->props.brightness;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ /*cdv_intel_lvds_set_brightness(dev, level); FIXME */
+ cdv_brightness = level;
+ return 0;
+}
+
+static const struct backlight_ops cdv_ops = {
+ .get_brightness = cdv_get_brightness,
+ .update_status = cdv_set_brightness,
+};
+
+static int cdv_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ cdv_backlight_device = backlight_device_register("psb-bl",
+ NULL, (void *)dev, &cdv_ops, &props);
+ if (IS_ERR(cdv_backlight_device))
+ return PTR_ERR(cdv_backlight_device);
+
+ ret = cdv_backlight_setup(dev);
+ if (ret < 0) {
+ backlight_device_unregister(cdv_backlight_device);
+ cdv_backlight_device = NULL;
+ return ret;
+ }
+ cdv_backlight_device->props.brightness = 100;
+ cdv_backlight_device->props.max_brightness = 100;
+ backlight_update_status(cdv_backlight_device);
+ dev_priv->backlight_device = cdv_backlight_device;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Cedarview specific chip logic and low level methods
+ * for power management
+ *
+ * FIXME: we need to implement the apm/ospm base management bits
+ * for this and the MID devices.
+ */
+
+static inline u32 CDV_MSG_READ32(uint port, uint offset)
+{
+ int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+ uint32_t ret_val = 0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
+ pci_dev_put(pci_root);
+ return ret_val;
+}
+
+static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
+{
+ int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_dev_put(pci_root);
+}
+
+#define PSB_APM_CMD 0x0
+#define PSB_APM_STS 0x04
+#define PSB_PM_SSC 0x20
+#define PSB_PM_SSS 0x30
+#define PSB_PWRGT_GFX_MASK 0x3
+#define CDV_PWRGT_DISPLAY_CNTR 0x000fc00c
+#define CDV_PWRGT_DISPLAY_STS 0x000fc00c
+
+static void cdv_init_pm(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_cnt;
+ int i;
+
+ dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+ PSB_APMBA) & 0xFFFF;
+ dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+ PSB_OSPMBA) & 0xFFFF;
+
+ /* Force power on for now */
+ pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+ pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+
+ outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+ for (i = 0; i < 5; i++) {
+ u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+ if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
+ break;
+ udelay(10);
+ }
+ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+ pwr_cnt &= ~CDV_PWRGT_DISPLAY_CNTR;
+ outl(pwr_cnt, dev_priv->ospm_base + PSB_PM_SSC);
+ for (i = 0; i < 5; i++) {
+ u32 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+ if ((pwr_sts & CDV_PWRGT_DISPLAY_STS) == 0)
+ break;
+ udelay(10);
+ }
+}
+
+/**
+ * cdv_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ *
+ * FIXME: review
+ */
+static int cdv_save_display_registers(struct drm_device *dev)
+{
+ return 0;
+}
+
+/**
+ * cdv_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ *
+ * FIXME: review
+ */
+static int cdv_restore_display_registers(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int cdv_power_down(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int cdv_power_up(struct drm_device *dev)
+{
+ return 0;
+}
+
+/* FIXME ? - shared with Poulsbo */
+static void cdv_get_core_freq(struct drm_device *dev)
+{
+ uint32_t clock;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+ pci_read_config_dword(pci_root, 0xD4, &clock);
+ pci_dev_put(pci_root);
+
+ switch (clock & 0x07) {
+ case 0:
+ dev_priv->core_freq = 100;
+ break;
+ case 1:
+ dev_priv->core_freq = 133;
+ break;
+ case 2:
+ dev_priv->core_freq = 150;
+ break;
+ case 3:
+ dev_priv->core_freq = 178;
+ break;
+ case 4:
+ dev_priv->core_freq = 200;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ dev_priv->core_freq = 266;
+ default:
+ dev_priv->core_freq = 0;
+ }
+}
+
+static int cdv_chip_setup(struct drm_device *dev)
+{
+ cdv_get_core_freq(dev);
+ gma_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
+ return 0;
+}
+
+/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
+
+const struct psb_ops cdv_chip_ops = {
+ .name = "Cedartrail",
+ .accel_2d = 0,
+ .pipes = 2,
+ .sgx_offset = MRST_SGX_OFFSET,
+ .chip_setup = cdv_chip_setup,
+
+ .crtc_helper = &cdv_intel_helper_funcs,
+ .crtc_funcs = &cdv_intel_crtc_funcs,
+
+ .output_init = cdv_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = cdv_backlight_init,
+#endif
+
+ .init_pm = cdv_init_pm,
+ .save_regs = cdv_save_display_registers,
+ .restore_regs = cdv_restore_display_registers,
+ .power_down = cdv_power_down,
+ .power_up = cdv_power_up,
+};
diff --git a/drivers/staging/gma500/cdv_device.h b/drivers/staging/gma500/cdv_device.h
new file mode 100644
index 00000000000..2a88b7beb55
--- /dev/null
+++ b/drivers/staging/gma500/cdv_device.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
+extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
+extern void cdv_intel_crt_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void cdv_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev);
+extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev,
+ int reg);
+extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc);
+
+extern inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ /* FIXME: msleep ?? */
+ mdelay(20);
+}
+
+
diff --git a/drivers/staging/gma500/cdv_intel_crt.c b/drivers/staging/gma500/cdv_intel_crt.c
new file mode 100644
index 00000000000..efda63b97b4
--- /dev/null
+++ b/drivers/staging/gma500/cdv_intel_crt.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright © 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+
+static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ u32 temp, reg;
+ reg = ADPA;
+
+ temp = REG_READ(reg);
+ temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ temp &= ~ADPA_DAC_ENABLE;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ temp |= ADPA_DAC_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ break;
+ }
+
+ REG_WRITE(reg, temp);
+}
+
+static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int max_clock = 0;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* The lowest clock for CDV is 20000KHz */
+ if (mode->clock < 20000)
+ return MODE_CLOCK_LOW;
+
+ /* The max clock for CDV is 355 instead of 400 */
+ max_clock = 355000;
+ if (mode->clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
+ return MODE_PANEL;
+
+ return MODE_OK;
+}
+
+static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct psb_intel_crtc *psb_intel_crtc =
+ to_psb_intel_crtc(crtc);
+ int dpll_md_reg;
+ u32 adpa, dpll_md;
+ u32 adpa_reg;
+
+ if (psb_intel_crtc->pipe == 0)
+ dpll_md_reg = DPLL_A_MD;
+ else
+ dpll_md_reg = DPLL_B_MD;
+
+ adpa_reg = ADPA;
+
+ /*
+ * Disable separate mode multiplier used when cloning SDVO to CRT
+ * XXX this needs to be adjusted when we really are cloning
+ */
+ {
+ dpll_md = REG_READ(dpll_md_reg);
+ REG_WRITE(dpll_md_reg,
+ dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+ }
+
+ adpa = 0;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+
+ if (psb_intel_crtc->pipe == 0)
+ adpa |= ADPA_PIPE_A_SELECT;
+ else
+ adpa |= ADPA_PIPE_B_SELECT;
+
+ REG_WRITE(adpa_reg, adpa);
+}
+
+
+/**
+ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+ *
+ * \return true if CRT is connected.
+ * \return false if CRT is disconnected.
+ */
+static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
+ bool force)
+{
+ struct drm_device *dev = connector->dev;
+ u32 hotplug_en;
+ int i, tries = 0, ret = false;
+ u32 adpa_orig;
+
+ /* disable the DAC when doing the hotplug detection */
+
+ adpa_orig = REG_READ(ADPA);
+
+ REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
+
+ /*
+ * On a CDV thep, CRT detect sequence need to be done twice
+ * to get a reliable result.
+ */
+ tries = 2;
+
+ hotplug_en = REG_READ(PORT_HOTPLUG_EN);
+ hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
+ hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+
+ for (i = 0; i < tries ; i++) {
+ unsigned long timeout;
+ /* turn on the FORCE_DETECT */
+ REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+ timeout = jiffies + msecs_to_jiffies(1000);
+ /* wait for FORCE_DETECT to go off */
+ do {
+ if (!(REG_READ(PORT_HOTPLUG_EN) &
+ CRT_HOTPLUG_FORCE_DETECT))
+ break;
+ msleep(1);
+ } while (time_after(timeout, jiffies));
+ }
+
+ if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
+ CRT_HOTPLUG_MONITOR_NONE)
+ ret = true;
+
+ /* Restore the saved ADPA */
+ REG_WRITE(ADPA, adpa_orig);
+ return ret;
+}
+
+static enum drm_connector_status cdv_intel_crt_detect(
+ struct drm_connector *connector, bool force)
+{
+ if (cdv_intel_crt_detect_hotplug(connector, force))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void cdv_intel_crt_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_output *intel_output = to_psb_intel_output(connector);
+
+ psb_intel_i2c_destroy(intel_output->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static int cdv_intel_crt_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_output *intel_output =
+ to_psb_intel_output(connector);
+ return psb_intel_ddc_get_modes(intel_output);
+}
+
+static int cdv_intel_crt_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ return 0;
+}
+
+/*
+ * Routines for controlling stuff on the analog port
+ */
+
+static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
+ .dpms = cdv_intel_crt_dpms,
+ .mode_fixup = cdv_intel_crt_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .commit = psb_intel_encoder_commit,
+ .mode_set = cdv_intel_crt_mode_set,
+};
+
+static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cdv_intel_crt_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = cdv_intel_crt_destroy,
+ .set_property = cdv_intel_crt_set_property,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_intel_crt_connector_helper_funcs = {
+ .mode_valid = cdv_intel_crt_mode_valid,
+ .get_modes = cdv_intel_crt_get_modes,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
+ .destroy = cdv_intel_crt_enc_destroy,
+};
+
+void cdv_intel_crt_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+
+ struct psb_intel_output *psb_intel_output;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ u32 i2c_reg;
+
+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
+ if (!psb_intel_output)
+ return;
+
+ psb_intel_output->mode_dev = mode_dev;
+ connector = &psb_intel_output->base;
+ drm_connector_init(dev, connector,
+ &cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ encoder = &psb_intel_output->enc;
+ drm_encoder_init(dev, encoder,
+ &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
+
+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
+ &psb_intel_output->enc);
+
+ /* Set up the DDC bus. */
+ i2c_reg = GPIOA;
+ /* Remove the following code for CDV */
+ /*
+ if (dev_priv->crt_ddc_bus != 0)
+ i2c_reg = dev_priv->crt_ddc_bus;
+ }*/
+ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
+ i2c_reg, "CRTDDC_A");
+ if (!psb_intel_output->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+ "failed.\n");
+ goto failed_ddc;
+ }
+
+ psb_intel_output->type = INTEL_OUTPUT_ANALOG;
+ /*
+ psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
+ psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ */
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_intel_crt_connector_helper_funcs);
+
+ drm_sysfs_connector_add(connector);
+
+ return;
+failed_ddc:
+ drm_encoder_cleanup(&psb_intel_output->enc);
+ drm_connector_cleanup(&psb_intel_output->base);
+ kfree(psb_intel_output);
+ return;
+}
diff --git a/drivers/staging/gma500/cdv_intel_display.c b/drivers/staging/gma500/cdv_intel_display.c
new file mode 100644
index 00000000000..7b97c600eff
--- /dev/null
+++ b/drivers/staging/gma500/cdv_intel_display.c
@@ -0,0 +1,1508 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include "framebuffer.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_intel_display.h"
+#include "power.h"
+#include "cdv_device.h"
+
+
+struct cdv_intel_range_t {
+ int min, max;
+};
+
+struct cdv_intel_p2_t {
+ int dot_limit;
+ int p2_slow, p2_fast;
+};
+
+struct cdv_intel_clock_t {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+};
+
+#define INTEL_P2_NUM 2
+
+struct cdv_intel_limit_t {
+ struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+ struct cdv_intel_p2_t p2;
+};
+
+#define CDV_LIMIT_SINGLE_LVDS_96 0
+#define CDV_LIMIT_SINGLE_LVDS_100 1
+#define CDV_LIMIT_DAC_HDMI_27 2
+#define CDV_LIMIT_DAC_HDMI_96 3
+
+static const struct cdv_intel_limit_t cdv_intel_limits[] = {
+ { /* CDV_SIGNLE_LVDS_96MHz */
+ .dot = {.min = 20000, .max = 115500},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 28, .max = 140},
+ .p1 = {.min = 2, .max = 10},
+ .p2 = {.dot_limit = 200000,
+ .p2_slow = 14, .p2_fast = 14},
+ },
+ { /* CDV_SINGLE_LVDS_100MHz */
+ .dot = {.min = 20000, .max = 115500},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 28, .max = 140},
+ .p1 = {.min = 2, .max = 10},
+ /* The single-channel range is 25-112Mhz, and dual-channel
+ * is 80-224Mhz. Prefer single channel as much as possible.
+ */
+ .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+ },
+ { /* CDV_DAC_HDMI_27MHz */
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1809000, .max = 3564000},
+ .n = {.min = 1, .max = 1},
+ .m = {.min = 67, .max = 132},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 65, .max = 130},
+ .p = {.min = 5, .max = 90},
+ .p1 = {.min = 1, .max = 9},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ },
+ { /* CDV_DAC_HDMI_96MHz */
+ .dot = {.min = 20000, .max = 400000},
+ .vco = {.min = 1800000, .max = 3600000},
+ .n = {.min = 2, .max = 6},
+ .m = {.min = 60, .max = 160},
+ .m1 = {.min = 0, .max = 0},
+ .m2 = {.min = 58, .max = 158},
+ .p = {.min = 5, .max = 100},
+ .p1 = {.min = 1, .max = 10},
+ .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+ },
+};
+
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && !in_dbg_master()) \
+ msleep(W); \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+
+
+static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle before read\n");
+ return ret;
+ }
+
+ REG_WRITE(SB_ADDR, reg);
+ REG_WRITE(SB_PCKT,
+ SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
+ SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+ SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle after read\n");
+ return ret;
+ }
+
+ *val = REG_READ(SB_DATA);
+
+ return 0;
+}
+
+static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+{
+ int ret;
+ static bool dpio_debug = true;
+ u32 temp;
+
+ if (dpio_debug) {
+ if (cdv_sb_read(dev, reg, &temp) == 0)
+ DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
+ DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
+ }
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle before write\n");
+ return ret;
+ }
+
+ REG_WRITE(SB_ADDR, reg);
+ REG_WRITE(SB_DATA, val);
+ REG_WRITE(SB_PCKT,
+ SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
+ SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+ SET_FIELD(0xf, SB_BYTE_ENABLE));
+
+ ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+ if (ret) {
+ DRM_ERROR("timeout waiting for SB to idle after write\n");
+ return ret;
+ }
+
+ if (dpio_debug) {
+ if (cdv_sb_read(dev, reg, &temp) == 0)
+ DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
+ }
+
+ return 0;
+}
+
+/* Reset the DPIO configuration register. The BIOS does this at every
+ * mode set.
+ */
+static void cdv_sb_reset(struct drm_device *dev)
+{
+
+ REG_WRITE(DPIO_CFG, 0);
+ REG_READ(DPIO_CFG);
+ REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
+}
+
+/* Unlike most Intel display engines, on Cedarview the DPLL registers
+ * are behind this sideband bus. They must be programmed while the
+ * DPLL reference clock is on in the DPLL control register, but before
+ * the DPLL is enabled in the DPLL control register.
+ */
+static int
+cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
+ struct cdv_intel_clock_t *clock)
+{
+ struct psb_intel_crtc *psb_crtc =
+ to_psb_intel_crtc(crtc);
+ int pipe = psb_crtc->pipe;
+ u32 m, n_vco, p;
+ int ret = 0;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ u32 ref_value;
+
+ cdv_sb_reset(dev);
+
+ if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
+ DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
+ return -EBUSY;
+ }
+
+ /* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
+ ref_value = 0x68A701;
+
+ cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
+
+ /* We don't know what the other fields of these regs are, so
+ * leave them in place.
+ */
+ ret = cdv_sb_read(dev, SB_M(pipe), &m);
+ if (ret)
+ return ret;
+ m &= ~SB_M_DIVIDER_MASK;
+ m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
+ ret = cdv_sb_write(dev, SB_M(pipe), m);
+ if (ret)
+ return ret;
+
+ ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
+ if (ret)
+ return ret;
+
+ /* Follow the BIOS to program the N_DIVIDER REG */
+ n_vco &= 0xFFFF;
+ n_vco |= 0x107;
+ n_vco &= ~(SB_N_VCO_SEL_MASK |
+ SB_N_DIVIDER_MASK |
+ SB_N_CB_TUNE_MASK);
+
+ n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
+
+ if (clock->vco < 2250000) {
+ n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
+ } else if (clock->vco < 2750000) {
+ n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
+ } else if (clock->vco < 3300000) {
+ n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
+ } else {
+ n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+ n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
+ }
+
+ ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
+ if (ret)
+ return ret;
+
+ ret = cdv_sb_read(dev, SB_P(pipe), &p);
+ if (ret)
+ return ret;
+ p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
+ p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
+ switch (clock->p2) {
+ case 5:
+ p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
+ break;
+ case 10:
+ p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
+ break;
+ case 14:
+ p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
+ break;
+ case 7:
+ p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
+ break;
+ default:
+ DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
+ return -EINVAL;
+ }
+ ret = cdv_sb_write(dev, SB_P(pipe), p);
+ if (ret)
+ return ret;
+
+ /* always Program the Lane Register for the Pipe A*/
+ if (pipe == 0) {
+ /* Program the Lane0/1 for HDMI B */
+ u32 lane_reg, lane_value;
+
+ lane_reg = PSB_LANE0;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE1;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ /* Program the Lane2/3 for HDMI C */
+ lane_reg = PSB_LANE2;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+
+ lane_reg = PSB_LANE3;
+ cdv_sb_read(dev, lane_reg, &lane_value);
+ lane_value &= ~(LANE_PLL_MASK);
+ lane_value |= LANE_PLL_ENABLE;
+ cdv_sb_write(dev, lane_reg, lane_value);
+ }
+
+ return 0;
+}
+
+/*
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *l_entry;
+
+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+ struct psb_intel_output *psb_intel_output =
+ to_psb_intel_output(l_entry);
+ if (psb_intel_output->type == type)
+ return true;
+ }
+ }
+ return false;
+}
+
+static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
+ int refclk)
+{
+ const struct cdv_intel_limit_t *limit;
+ if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ /*
+ * Now only single-channel LVDS is supported on CDV. If it is
+ * incorrect, please add the dual-channel LVDS.
+ */
+ if (refclk == 96000)
+ limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
+ else
+ limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+ } else {
+ if (refclk == 27000)
+ limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
+ else
+ limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
+ }
+ return limit;
+}
+
+/* m1 is reserved as 0 in CDV, n is a ring counter */
+static void cdv_intel_clock(struct drm_device *dev,
+ int refclk, struct cdv_intel_clock_t *clock)
+{
+ clock->m = clock->m2 + 2;
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = (refclk * clock->m) / clock->n;
+ clock->dot = clock->vco / clock->p;
+}
+
+
+#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
+static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
+ const struct cdv_intel_limit_t *limit,
+ struct cdv_intel_clock_t *clock)
+{
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ INTELPllInvalid("p1 out of range\n");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ INTELPllInvalid("p out of range\n");
+ /* unnecessary to check the range of m(m1/M2)/n again */
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ INTELPllInvalid("vco out of range\n");
+ /* XXX: We may need to be checking "Dot clock"
+ * depending on the multiplier, connector, etc.,
+ * rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ INTELPllInvalid("dot out of range\n");
+
+ return true;
+}
+
+static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+ int refclk,
+ struct cdv_intel_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cdv_intel_clock_t clock;
+ const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
+ int err = target;
+
+
+ if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+ /*
+ * For LVDS, if the panel is on, just rely on its current
+ * settings for dual-channel. We haven't figured out how to
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+ clock.m1 = 0;
+ /* m1 is reserved as 0 in CDV, n is a ring counter.
+ So skip the m1 loop */
+ for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
+ for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
+ clock.m2++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ cdv_intel_clock(dev, refclk, &clock);
+
+ if (!cdv_intel_PLL_is_valid(crtc,
+ limit, &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+
+ return err != target;
+}
+
+int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ int pipe = psb_intel_crtc->pipe;
+ unsigned long start, offset;
+ int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+ int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr;
+ int ret = 0;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ dev_err(dev->dev, "No FB bound\n");
+ goto psb_intel_pipe_cleaner;
+ }
+
+
+ /* We are displaying this buffer, make sure it is actually loaded
+ into the GTT */
+ ret = psb_gtt_pin(psbfb->gtt);
+ if (ret < 0)
+ goto psb_intel_pipe_set_base_exit;
+ start = psbfb->gtt->offset;
+ offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+
+ REG_WRITE(dspstride, crtc->fb->pitch);
+
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ goto psb_intel_pipe_set_base_exit;
+ }
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ dev_dbg(dev->dev,
+ "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
+
+ REG_WRITE(dspbase, offset);
+ REG_READ(dspbase);
+ REG_WRITE(dspsurf, start);
+ REG_READ(dspsurf);
+
+psb_intel_pipe_cleaner:
+ /* If there was a previous display we can now unpin it */
+ if (old_fb)
+ psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ u32 temp;
+ bool enabled;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable the DPLL */
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+
+ /* Jim Bish - switch plan and pipe per scott */
+ /* Enable the plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(dspcntr_reg,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ }
+
+ udelay(150);
+
+ /* Enable the pipe */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) == 0)
+ REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+
+ psb_intel_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Jim Bish - changed pipe/plane here as well. */
+
+ /* Wait for vblank for the disable to take effect */
+ cdv_intel_wait_for_vblank(dev);
+
+ /* Next, disable display pipes */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+ REG_READ(pipeconf_reg);
+ }
+
+ /* Wait for vblank for the disable to take effect. */
+ cdv_intel_wait_for_vblank(dev);
+
+ udelay(150);
+
+ /* Disable display plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+ }
+
+ temp = REG_READ(dpll_reg);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+ /*Set FIFO Watermarks*/
+ REG_WRITE(DSPARB, 0x3F3E);
+}
+
+static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void cdv_intel_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of prepare see cdv_intel_lvds_prepare */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void cdv_intel_encoder_commit(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of commit see cdv_intel_lvds_commit */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+ return (pfit_control >> 29) & 0x3;
+}
+
+static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+ int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int refclk;
+ struct cdv_intel_clock_t clock;
+ u32 dpll = 0, dspcntr, pipeconf;
+ bool ok, is_sdvo = false, is_dvo = false;
+ bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_hdmi = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ struct psb_intel_output *psb_intel_output =
+ to_psb_intel_output(connector);
+
+ if (!connector->encoder
+ || connector->encoder->crtc != crtc)
+ continue;
+
+ switch (psb_intel_output->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_SDVO:
+ is_sdvo = true;
+ break;
+ case INTEL_OUTPUT_DVO:
+ is_dvo = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ case INTEL_OUTPUT_HDMI:
+ is_hdmi = true;
+ break;
+ }
+ }
+
+ refclk = 96000;
+
+ /* Hack selection about ref clk for CRT */
+ /* Select 27MHz as the reference clk for HDMI */
+ if (is_crt || is_hdmi)
+ refclk = 27000;
+
+ drm_mode_debug_printmodeline(adjusted_mode);
+
+ ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+ &clock);
+ if (!ok) {
+ dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+ return 0;
+ }
+
+ dpll = DPLL_VGA_MODE_DIS;
+ if (is_tv) {
+ /* XXX: just matching BIOS for now */
+/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
+ dpll |= 3;
+ }
+ dpll |= PLL_REF_INPUT_DREFCLK;
+
+ dpll |= DPLL_SYNCLOCK_ENABLE;
+ dpll |= DPLL_VGA_MODE_DIS;
+ if (is_lvds)
+ dpll |= DPLLB_MODE_LVDS;
+ else
+ dpll |= DPLLB_MODE_DAC_SERIAL;
+ /* dpll |= (2 << 11); */
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+
+ /* Set up the display plane register */
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ if (pipe == 0)
+ dspcntr |= DISPPLANE_SEL_PIPE_A;
+ else
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+ pipeconf |= PIPEACONF_ENABLE;
+
+ REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(dpll_reg);
+
+ cdv_dpll_set_clock_cdv(dev, crtc, &clock);
+
+ udelay(150);
+
+
+ /* The LVDS pin pair needs to be on before the DPLLs are enabled.
+ * This is an exception to the general rule that mode_set doesn't turn
+ * things on.
+ */
+ if (is_lvds) {
+ u32 lvds = REG_READ(LVDS);
+
+ lvds |=
+ LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
+ LVDS_PIPEB_SELECT;
+ /* Set the B0-B3 data pairs corresponding to
+ * whether we're going to
+ * set the DPLLs for dual-channel mode or not.
+ */
+ if (clock.p2 == 7)
+ lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+ else
+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+
+ /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+ * appropriately here, but we need to look more
+ * thoroughly into how panels behave in the two modes.
+ */
+
+ REG_WRITE(LVDS, lvds);
+ REG_READ(LVDS);
+ }
+
+ dpll |= DPLL_VCO_ENABLE;
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (cdv_intel_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ drm_mode_debug_printmodeline(mode);
+
+ REG_WRITE(dpll_reg,
+ (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to stabilize. */
+ udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
+
+ if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
+ dev_err(dev->dev, "Failed to get DPLL lock\n");
+ return -EBUSY;
+ }
+
+ {
+ int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ }
+
+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ REG_WRITE(dspsize_reg,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+ REG_WRITE(pipesrc_reg,
+ ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs =
+ crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ cdv_intel_wait_for_vblank(dev);
+
+ return 0;
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int palreg = PALETTE_A;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ switch (psb_intel_crtc->pipe) {
+ case 0:
+ break;
+ case 1:
+ palreg = PALETTE_B;
+ break;
+ case 2:
+ palreg = PALETTE_C;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
+ return;
+ }
+
+ if (gma_power_begin(dev, false)) {
+ for (i = 0; i < 256; i++) {
+ REG_WRITE(palreg + 4 * i,
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]));
+ }
+ gma_power_end(dev);
+ } else {
+ for (i = 0; i < 256; i++) {
+ dev_priv->save_palette_a[i] =
+ ((psb_intel_crtc->lut_r[i] +
+ psb_intel_crtc->lut_adj[i]) << 16) |
+ ((psb_intel_crtc->lut_g[i] +
+ psb_intel_crtc->lut_adj[i]) << 8) |
+ (psb_intel_crtc->lut_b[i] +
+ psb_intel_crtc->lut_adj[i]);
+ }
+
+ }
+}
+
+/**
+ * Save HW states of giving crtc
+ */
+static void cdv_intel_crtc_save(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_psb_private *dev_priv =
+ (struct drm_psb_private *)dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+ int pipeA = (psb_intel_crtc->pipe == 0);
+ uint32_t paletteReg;
+ int i;
+
+ if (!crtc_state) {
+ dev_dbg(dev->dev, "No CRTC state found\n");
+ return;
+ }
+
+ crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+ crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+ crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+ crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+ crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+ crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+ crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+ crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+ crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+ crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+ crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+ crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+ crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+
+ /*NOTE: DSPSIZE DSPPOS only for psb*/
+ crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+ crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+
+ crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+
+ DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+ crtc_state->saveDSPCNTR,
+ crtc_state->savePIPECONF,
+ crtc_state->savePIPESRC,
+ crtc_state->saveFP0,
+ crtc_state->saveFP1,
+ crtc_state->saveDPLL,
+ crtc_state->saveHTOTAL,
+ crtc_state->saveHBLANK,
+ crtc_state->saveHSYNC,
+ crtc_state->saveVTOTAL,
+ crtc_state->saveVBLANK,
+ crtc_state->saveVSYNC,
+ crtc_state->saveDSPSTRIDE,
+ crtc_state->saveDSPSIZE,
+ crtc_state->saveDSPPOS,
+ crtc_state->saveDSPBASE
+ );
+
+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ for (i = 0; i < 256; ++i)
+ crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+}
+
+/**
+ * Restore HW states of giving crtc
+ */
+static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_psb_private * dev_priv =
+ (struct drm_psb_private *)dev->dev_private; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+ /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+ int pipeA = (psb_intel_crtc->pipe == 0);
+ uint32_t paletteReg;
+ int i;
+
+ if (!crtc_state) {
+ dev_dbg(dev->dev, "No crtc state\n");
+ return;
+ }
+
+ DRM_DEBUG(
+ "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+ REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
+ REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
+ REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
+ REG_READ(pipeA ? FPA0 : FPB0),
+ REG_READ(pipeA ? FPA1 : FPB1),
+ REG_READ(pipeA ? DPLL_A : DPLL_B),
+ REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
+ REG_READ(pipeA ? HBLANK_A : HBLANK_B),
+ REG_READ(pipeA ? HSYNC_A : HSYNC_B),
+ REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
+ REG_READ(pipeA ? VBLANK_A : VBLANK_B),
+ REG_READ(pipeA ? VSYNC_A : VSYNC_B),
+ REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
+ REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
+ REG_READ(pipeA ? DSPAPOS : DSPBPOS),
+ REG_READ(pipeA ? DSPABASE : DSPBBASE)
+ );
+
+ DRM_DEBUG(
+ "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+ crtc_state->saveDSPCNTR,
+ crtc_state->savePIPECONF,
+ crtc_state->savePIPESRC,
+ crtc_state->saveFP0,
+ crtc_state->saveFP1,
+ crtc_state->saveDPLL,
+ crtc_state->saveHTOTAL,
+ crtc_state->saveHBLANK,
+ crtc_state->saveHSYNC,
+ crtc_state->saveVTOTAL,
+ crtc_state->saveVBLANK,
+ crtc_state->saveVSYNC,
+ crtc_state->saveDSPSTRIDE,
+ crtc_state->saveDSPSIZE,
+ crtc_state->saveDSPPOS,
+ crtc_state->saveDSPBASE
+ );
+
+
+ if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+ REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(pipeA ? DPLL_A : DPLL_B);
+ DRM_DEBUG("write dpll: %x\n",
+ REG_READ(pipeA ? DPLL_A : DPLL_B));
+ udelay(150);
+ }
+
+ REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+ REG_READ(pipeA ? FPA0 : FPB0);
+
+ REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+ REG_READ(pipeA ? FPA1 : FPB1);
+
+ REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+ REG_READ(pipeA ? DPLL_A : DPLL_B);
+ udelay(150);
+
+ REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+ REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+ REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+ REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+ REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+ REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+ REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+
+ REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+ REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+
+ REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+ REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+
+ cdv_intel_wait_for_vblank(dev);
+
+ paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ for (i = 0; i < 256; ++i)
+ REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+}
+
+static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+ uint32_t temp;
+ size_t addr = 0;
+ struct gtt_range *gt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ /* if we want to turn of the cursor ignore width and height */
+ if (!handle) {
+ /* turn off the cursor */
+ temp = CURSOR_MODE_DISABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, 0);
+ gma_power_end(dev);
+ }
+
+ /* unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = NULL;
+ }
+
+ return 0;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (obj->size < width * height * 4) {
+ dev_dbg(dev->dev, "buffer is to small\n");
+ return -ENOMEM;
+ }
+
+ gt = container_of(obj, struct gtt_range, gem);
+
+ /* Pin the memory into the GTT */
+ ret = psb_gtt_pin(gt);
+ if (ret) {
+ dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+ return ret;
+ }
+
+ addr = gt->offset; /* Or resource.start ??? */
+
+ psb_intel_crtc->cursor_addr = addr;
+
+ temp = 0;
+ /* set the pipe for the cursor */
+ temp |= (pipe << 28);
+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, addr);
+ gma_power_end(dev);
+ }
+
+ /* unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = obj;
+ }
+ return 0;
+}
+
+static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t temp = 0;
+ uint32_t adder;
+
+
+ if (x < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+ x = -x;
+ }
+ if (y < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+ y = -y;
+ }
+
+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+ adder = psb_intel_crtc->cursor_addr;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, uint32_t start, uint32_t size)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int i;
+ int end = (start + size > 256) ? 256 : start + size;
+
+ for (i = start; i < end; i++) {
+ psb_intel_crtc->lut_r[i] = red[i] >> 8;
+ psb_intel_crtc->lut_g[i] = green[i] >> 8;
+ psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ cdv_intel_crtc_load_lut(crtc);
+}
+
+static int cdv_crtc_set_config(struct drm_mode_set *set)
+{
+ int ret = 0;
+ struct drm_device *dev = set->crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->rpm_enabled)
+ return drm_crtc_helper_set_config(set);
+
+ pm_runtime_forbid(&dev->pdev->dev);
+
+ ret = drm_crtc_helper_set_config(set);
+
+ pm_runtime_allow(&dev->pdev->dev);
+
+ return ret;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+
+/* FIXME: why are we using this, should it be cdv_ in this tree ? */
+
+static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
+{
+ clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+ clock->p = clock->p1 * clock->p2;
+ clock->vco = refclk * clock->m / (clock->n + 2);
+ clock->dot = clock->vco / clock->p;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+static int cdv_intel_crtc_clock_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ u32 dpll;
+ u32 fp;
+ struct cdv_intel_clock_t clock;
+ bool is_lvds;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_power_begin(dev, false)) {
+ dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ else
+ fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+ gma_power_end(dev);
+ } else {
+ dpll = (pipe == 0) ?
+ dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = (pipe == 0) ?
+ dev_priv->saveFPA0 :
+ dev_priv->saveFPB0;
+ else
+ fp = (pipe == 0) ?
+ dev_priv->saveFPA1 :
+ dev_priv->saveFPB1;
+
+ is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+ }
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+
+ if (is_lvds) {
+ clock.p1 =
+ ffs((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+ if (clock.p1 == 0) {
+ clock.p1 = 4;
+ dev_err(dev->dev, "PLL %d\n", dpll);
+ }
+ clock.p2 = 14;
+
+ if ((dpll & PLL_REF_INPUT_MASK) ==
+ PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+ /* XXX: might not be 66MHz */
+ i8xx_clock(66000, &clock);
+ } else
+ i8xx_clock(48000, &clock);
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 =
+ ((dpll &
+ DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+
+ i8xx_clock(48000, &clock);
+ }
+
+ /* XXX: It would be nice to validate the clocks, but we can't reuse
+ * i830PllIsValid() because it relies on the xf86_config connector
+ * configuration being accurate, which it isn't necessarily.
+ */
+
+ return clock.dot;
+}
+
+/** Returns the currently programmed mode of the given pipe. */
+struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+ struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ struct drm_display_mode *mode;
+ int htot;
+ int hsync;
+ int vtot;
+ int vsync;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (gma_power_begin(dev, false)) {
+ htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+ hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+ vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+ vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ gma_power_end(dev);
+ } else {
+ htot = (pipe == 0) ?
+ dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+ hsync = (pipe == 0) ?
+ dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+ vtot = (pipe == 0) ?
+ dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+ vsync = (pipe == 0) ?
+ dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+ }
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
+ mode->hdisplay = (htot & 0xffff) + 1;
+ mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+ mode->hsync_start = (hsync & 0xffff) + 1;
+ mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+ mode->vdisplay = (vtot & 0xffff) + 1;
+ mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+ mode->vsync_start = (vsync & 0xffff) + 1;
+ mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ return mode;
+}
+
+static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+ kfree(psb_intel_crtc->crtc_state);
+ drm_crtc_cleanup(crtc);
+ kfree(psb_intel_crtc);
+}
+
+const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+ .dpms = cdv_intel_crtc_dpms,
+ .mode_fixup = cdv_intel_crtc_mode_fixup,
+ .mode_set = cdv_intel_crtc_mode_set,
+ .mode_set_base = cdv_intel_pipe_set_base,
+ .prepare = cdv_intel_crtc_prepare,
+ .commit = cdv_intel_crtc_commit,
+};
+
+const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
+ .save = cdv_intel_crtc_save,
+ .restore = cdv_intel_crtc_restore,
+ .cursor_set = cdv_intel_crtc_cursor_set,
+ .cursor_move = cdv_intel_crtc_cursor_move,
+ .gamma_set = cdv_intel_crtc_gamma_set,
+ .set_config = cdv_crtc_set_config,
+ .destroy = cdv_intel_crtc_destroy,
+};
+
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on oaktrail
+ */
+void cdv_intel_cursor_init(struct drm_device *dev, int pipe)
+{
+ uint32_t control;
+ uint32_t base;
+
+ switch (pipe) {
+ case 0:
+ control = CURACNTR;
+ base = CURABASE;
+ break;
+ case 1:
+ control = CURBCNTR;
+ base = CURBBASE;
+ break;
+ case 2:
+ control = CURCCNTR;
+ base = CURCBASE;
+ break;
+ default:
+ return;
+ }
+
+ REG_WRITE(control, 0);
+ REG_WRITE(base, 0);
+}
+
diff --git a/drivers/staging/gma500/cdv_intel_hdmi.c b/drivers/staging/gma500/cdv_intel_hdmi.c
new file mode 100644
index 00000000000..cbca2b0c7d5
--- /dev/null
+++ b/drivers/staging/gma500/cdv_intel_hdmi.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ *
+ * FIXME:
+ * We should probably make this generic and share it with Medfield
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "psb_intel_drv.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+#include <linux/pm_runtime.h>
+
+/* hdmi control bits */
+#define HDMI_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define HDMI_BORDER_ENABLE (1 << 7)
+#define HDMI_AUDIO_ENABLE (1 << 6)
+#define HDMI_VSYNC_ACTIVE_HIGH (1 << 4)
+#define HDMI_HSYNC_ACTIVE_HIGH (1 << 3)
+/* hdmi-b control bits */
+#define HDMIB_PIPE_B_SELECT (1 << 30)
+
+
+struct mid_intel_hdmi_priv {
+ u32 hdmi_reg;
+ u32 save_HDMIB;
+ bool has_hdmi_sink;
+ bool has_hdmi_audio;
+ /* Should set this when detect hotplug */
+ bool hdmi_device_connected;
+ struct mdfld_hdmi_i2c *i2c_bus;
+ struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
+ struct drm_device *dev;
+};
+
+static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+ u32 hdmib;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+
+ hdmib = (2 << 10);
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
+
+ if (intel_crtc->pipe == 1)
+ hdmib |= HDMIB_PIPE_B_SELECT;
+
+ if (hdmi_priv->has_hdmi_audio) {
+ hdmib |= HDMI_AUDIO_ENABLE;
+ hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
+ }
+
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+ u32 hdmib;
+
+ hdmib = REG_READ(hdmi_priv->hdmi_reg);
+
+ if (mode != DRM_MODE_DPMS_ON)
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
+ else
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_save(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct psb_intel_output *output = to_psb_intel_output(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+
+ hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static void cdv_hdmi_restore(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct psb_intel_output *output = to_psb_intel_output(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+
+ REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
+ REG_READ(hdmi_priv->hdmi_reg);
+}
+
+static enum drm_connector_status cdv_hdmi_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct psb_intel_output *psb_intel_output =
+ to_psb_intel_output(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_output->dev_priv;
+ struct edid *edid = NULL;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ edid = drm_get_edid(&psb_intel_output->base,
+ psb_intel_output->hdmi_i2c_adapter);
+
+ hdmi_priv->has_hdmi_sink = false;
+ hdmi_priv->has_hdmi_audio = false;
+ if (edid) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ hdmi_priv->has_hdmi_sink =
+ drm_detect_hdmi_monitor(edid);
+ hdmi_priv->has_hdmi_audio =
+ drm_detect_monitor_audio(edid);
+ }
+
+ psb_intel_output->base.display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ return status;
+}
+
+static int cdv_hdmi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!strcmp(property->name, "scaling mode") && encoder) {
+ struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
+ bool centre;
+ uint64_t curValue;
+
+ if (!crtc)
+ return -1;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -1;
+ }
+
+ if (drm_connector_property_get_value(connector,
+ property, &curValue))
+ return -1;
+
+ if (curValue == value)
+ return 0;
+
+ if (drm_connector_property_set_value(connector,
+ property, value))
+ return -1;
+
+ centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
+ (value == DRM_MODE_SCALE_NO_SCALE);
+
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (centre) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
+ encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
+ return -1;
+ } else {
+ struct drm_encoder_helper_funcs *helpers
+ = encoder->helper_private;
+ helpers->mode_set(encoder, &crtc->saved_mode,
+ &crtc->saved_adjusted_mode);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Return the list of HDMI DDC modes if available.
+ */
+static int cdv_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_output *psb_intel_output =
+ to_psb_intel_output(connector);
+ struct edid *edid = NULL;
+ int ret = 0;
+
+ edid = drm_get_edid(&psb_intel_output->base,
+ psb_intel_output->hdmi_i2c_adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(&psb_intel_output->
+ base, edid);
+ ret = drm_add_edid_modes(&psb_intel_output->base, edid);
+ kfree(edid);
+ }
+ return ret;
+}
+
+static int cdv_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->clock < 20000)
+ return MODE_CLOCK_HIGH;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /*
+ * FIXME: for now we limit the size to 1680x1050 on CDV, otherwise it
+ * will go beyond the stolen memory size allocated to the framebuffer
+ */
+ if (mode->hdisplay > 1680)
+ return MODE_PANEL;
+ if (mode->vdisplay > 1050)
+ return MODE_PANEL;
+ return MODE_OK;
+}
+
+static void cdv_hdmi_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_output *psb_intel_output =
+ to_psb_intel_output(connector);
+
+ if (psb_intel_output->ddc_bus)
+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
+ .dpms = cdv_hdmi_dpms,
+ .mode_fixup = cdv_hdmi_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .mode_set = cdv_hdmi_mode_set,
+ .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_hdmi_connector_helper_funcs = {
+ .get_modes = cdv_hdmi_get_modes,
+ .mode_valid = cdv_hdmi_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = cdv_hdmi_save,
+ .restore = cdv_hdmi_restore,
+ .detect = cdv_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = cdv_hdmi_set_property,
+ .destroy = cdv_hdmi_destroy,
+};
+
+void cdv_hdmi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev, int reg)
+{
+ struct psb_intel_output *psb_intel_output;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct mid_intel_hdmi_priv *hdmi_priv;
+ int ddc_bus;
+
+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output) +
+ sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
+ if (!psb_intel_output)
+ return;
+
+ hdmi_priv = (struct mid_intel_hdmi_priv *)(psb_intel_output + 1);
+ psb_intel_output->mode_dev = mode_dev;
+ connector = &psb_intel_output->base;
+ encoder = &psb_intel_output->enc;
+ drm_connector_init(dev, &psb_intel_output->base,
+ &cdv_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID);
+
+ drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
+ &psb_intel_output->enc);
+ psb_intel_output->type = INTEL_OUTPUT_HDMI;
+ hdmi_priv->hdmi_reg = reg;
+ hdmi_priv->has_hdmi_sink = false;
+ psb_intel_output->dev_priv = hdmi_priv;
+
+ drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_hdmi_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
+
+ switch (reg) {
+ case SDVOB:
+ ddc_bus = GPIOE;
+ break;
+ case SDVOC:
+ ddc_bus = GPIOD;
+ break;
+ default:
+ DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
+ goto failed_ddc;
+ break;
+ }
+
+ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
+ ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
+
+ if (!psb_intel_output->ddc_bus) {
+ dev_err(dev->dev, "No ddc adapter available!\n");
+ goto failed_ddc;
+ }
+ psb_intel_output->hdmi_i2c_adapter =
+ &(psb_intel_output->ddc_bus->adapter);
+ hdmi_priv->dev = dev;
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_ddc:
+ drm_encoder_cleanup(&psb_intel_output->enc);
+ drm_connector_cleanup(&psb_intel_output->base);
+ kfree(psb_intel_output);
+}
diff --git a/drivers/staging/gma500/cdv_intel_lvds.c b/drivers/staging/gma500/cdv_intel_lvds.c
new file mode 100644
index 00000000000..988b2d0acf4
--- /dev/null
+++ b/drivers/staging/gma500/cdv_intel_lvds.c
@@ -0,0 +1,721 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ * Jesse Barnes <jesse.barnes@intel.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+
+#include "intel_bios.h"
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include <linux/pm_runtime.h>
+#include "cdv_device.h"
+
+/**
+ * LVDS I2C backlight control macros
+ */
+#define BRIGHTNESS_MAX_LEVEL 100
+#define BRIGHTNESS_MASK 0xFF
+#define BLC_I2C_TYPE 0x01
+#define BLC_PWM_TYPT 0x02
+
+#define BLC_POLARITY_NORMAL 0
+#define BLC_POLARITY_INVERSE 1
+
+#define PSB_BLC_MAX_PWM_REG_FREQ (0xFFFE)
+#define PSB_BLC_MIN_PWM_REG_FREQ (0x2)
+#define PSB_BLC_PWM_PRECISION_FACTOR (10)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+
+struct cdv_intel_lvds_priv {
+ /**
+ * Saved LVDO output states
+ */
+ uint32_t savePP_ON;
+ uint32_t savePP_OFF;
+ uint32_t saveLVDS;
+ uint32_t savePP_CONTROL;
+ uint32_t savePP_CYCLE;
+ uint32_t savePFIT_CONTROL;
+ uint32_t savePFIT_PGM_RATIOS;
+ uint32_t saveBLC_PWM_CTL;
+};
+
+/*
+ * Returns the maximum level of the backlight duty cycle field.
+ */
+static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 retval;
+
+ if (gma_power_begin(dev, false)) {
+ retval = ((REG_READ(BLC_PWM_CTL) &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ gma_power_end(dev);
+ } else
+ retval = ((dev_priv->saveBLC_PWM_CTL &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ return retval;
+}
+
+/*
+ * Set LVDS backlight level by I2C command
+ */
+static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
+ unsigned int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+ u8 out_buf[2];
+ unsigned int blc_i2c_brightness;
+
+ struct i2c_msg msgs[] = {
+ {
+ .addr = lvds_i2c_bus->slave_addr,
+ .flags = 0,
+ .len = 2,
+ .buf = out_buf,
+ }
+ };
+
+ blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+ BRIGHTNESS_MASK /
+ BRIGHTNESS_MAX_LEVEL);
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+
+ out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+ out_buf[1] = (u8)blc_i2c_brightness;
+
+ if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
+ return 0;
+
+ DRM_ERROR("I2C transfer error\n");
+ return -1;
+}
+
+
+static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ u32 max_pwm_blc;
+ u32 blc_pwm_duty_cycle;
+
+ max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
+
+ /*BLC_PWM_CTL Should be initiated while backlight device init*/
+ BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
+
+ blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+
+ if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+ blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+
+ blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+ REG_WRITE(BLC_PWM_CTL,
+ (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+ (blc_pwm_duty_cycle));
+
+ return 0;
+}
+
+/*
+ * Set LVDS backlight level either by I2C or PWM
+ */
+void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->lvds_bl) {
+ DRM_ERROR("NO LVDS Backlight Info\n");
+ return;
+ }
+
+ if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+ cdv_lvds_i2c_set_brightness(dev, level);
+ else
+ cdv_lvds_pwm_set_brightness(dev, level);
+}
+
+/**
+ * Sets the backlight level.
+ *
+ * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
+ */
+static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 blc_pwm_ctl;
+
+ if (gma_power_begin(dev, false)) {
+ blc_pwm_ctl =
+ REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ REG_WRITE(BLC_PWM_CTL,
+ (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+ gma_power_end(dev);
+ } else {
+ blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+ ~BACKLIGHT_DUTY_CYCLE_MASK;
+ dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+ (level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+ }
+}
+
+/**
+ * Sets the power state for the panel.
+ */
+static void cdv_intel_lvds_set_power(struct drm_device *dev,
+ struct psb_intel_output *output, bool on)
+{
+ u32 pp_status;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if (on) {
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+ POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while ((pp_status & PP_ON) == 0);
+
+ cdv_intel_lvds_set_backlight(dev,
+ output->
+ mode_dev->backlight_duty_cycle);
+ } else {
+ cdv_intel_lvds_set_backlight(dev, 0);
+
+ REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+ ~POWER_TARGET_ON);
+ do {
+ pp_status = REG_READ(PP_STATUS);
+ } while (pp_status & PP_ON);
+ }
+ gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+ if (mode == DRM_MODE_DPMS_ON)
+ cdv_intel_lvds_set_power(dev, output, true);
+ else
+ cdv_intel_lvds_set_power(dev, output, false);
+ /* XXX: We never power down the LVDS pairs. */
+}
+
+static void cdv_intel_lvds_save(struct drm_connector *connector)
+{
+}
+
+static void cdv_intel_lvds_restore(struct drm_connector *connector)
+{
+}
+
+int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct psb_intel_output *psb_intel_output =
+ to_psb_intel_output(connector);
+ struct drm_display_mode *fixed_mode =
+ psb_intel_output->mode_dev->panel_fixed_mode;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ /* just in case */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+ return MODE_OK;
+}
+
+bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct psb_intel_mode_device *mode_dev =
+ enc_to_psb_intel_output(encoder)->mode_dev;
+ struct drm_device *dev = encoder->dev;
+ struct drm_encoder *tmp_encoder;
+ struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+
+ /* Should never happen!! */
+ list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+ head) {
+ if (tmp_encoder != encoder
+ && tmp_encoder->crtc == encoder->crtc) {
+ printk(KERN_ERR "Can't enable LVDS and another "
+ "encoder on the same pipe\n");
+ return false;
+ }
+ }
+
+ /*
+ * If we have timings from the BIOS for the panel, put them in
+ * to the adjusted mode. The CRTC will be set up for this mode,
+ * with the panel scaling set up to source from the H/VDisplay
+ * of the original mode.
+ */
+ if (panel_fixed_mode != NULL) {
+ adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+ adjusted_mode->htotal = panel_fixed_mode->htotal;
+ adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+ adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+ adjusted_mode->clock = panel_fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode,
+ CRTC_INTERLACE_HALVE_V);
+ }
+
+ /*
+ * XXX: It would be nice to support lower refresh rates on the
+ * panels to reduce power consumption, and perhaps match the
+ * user's requested refresh rate.
+ */
+
+ return true;
+}
+
+static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+
+ cdv_intel_lvds_set_power(dev, output, false);
+
+ gma_power_end(dev);
+}
+
+static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
+
+ if (mode_dev->backlight_duty_cycle == 0)
+ mode_dev->backlight_duty_cycle =
+ cdv_intel_lvds_get_max_backlight(dev);
+
+ cdv_intel_lvds_set_power(dev, output, true);
+}
+
+static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pfit_control;
+
+ /*
+ * The LVDS pin pair will already have been turned on in the
+ * cdv_intel_crtc_mode_set since it has a large impact on the DPLL
+ * settings.
+ */
+
+ /*
+ * Enable automatic panel scaling so that non-native modes fill the
+ * screen. Should be enabled before the pipe is enabled, according to
+ * register description and PRM.
+ */
+ if (mode->hdisplay != adjusted_mode->hdisplay ||
+ mode->vdisplay != adjusted_mode->vdisplay)
+ pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+ HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ else
+ pfit_control = 0;
+
+ if (dev_priv->lvds_dither)
+ pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
+ REG_WRITE(PFIT_CONTROL, pfit_control);
+}
+
+/**
+ * Detect the LVDS connection.
+ *
+ * This always returns CONNECTOR_STATUS_CONNECTED.
+ * This connector should only have
+ * been set up if the LVDS was actually connected anyway.
+ */
+static enum drm_connector_status cdv_intel_lvds_detect(
+ struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+/**
+ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+ */
+static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct psb_intel_output *psb_intel_output =
+ to_psb_intel_output(connector);
+ struct psb_intel_mode_device *mode_dev =
+ psb_intel_output->mode_dev;
+ int ret;
+
+ ret = psb_intel_ddc_get_modes(psb_intel_output);
+
+ if (ret)
+ return ret;
+
+ /* Didn't get an EDID, so
+ * Set wide sync ranges so we get all modes
+ * handed to valid_mode for checking
+ */
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+ if (mode_dev->panel_fixed_mode != NULL) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * cdv_intel_lvds_destroy - unregister and free LVDS structures
+ * @connector: connector to free
+ *
+ * Unregister the DDC bus for this connector then free the driver private
+ * structure.
+ */
+void cdv_intel_lvds_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_output *psb_intel_output =
+ to_psb_intel_output(connector);
+
+ if (psb_intel_output->ddc_bus)
+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+int cdv_intel_lvds_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!strcmp(property->name, "scaling mode") && encoder) {
+ struct psb_intel_crtc *crtc =
+ to_psb_intel_crtc(encoder->crtc);
+ uint64_t curValue;
+
+ if (!crtc)
+ return -1;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ return -1;
+ }
+
+ if (drm_connector_property_get_value(connector,
+ property,
+ &curValue))
+ return -1;
+
+ if (curValue == value)
+ return 0;
+
+ if (drm_connector_property_set_value(connector,
+ property,
+ value))
+ return -1;
+
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc,
+ &crtc->saved_mode,
+ encoder->crtc->x,
+ encoder->crtc->y,
+ encoder->crtc->fb))
+ return -1;
+ }
+ } else if (!strcmp(property->name, "backlight") && encoder) {
+ if (drm_connector_property_set_value(connector,
+ property,
+ value))
+ return -1;
+ else {
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ struct drm_psb_private *dev_priv =
+ encoder->dev->dev_private;
+ struct backlight_device *bd =
+ dev_priv->backlight_device;
+ bd->props.brightness = value;
+ backlight_update_status(bd);
+#endif
+ }
+ } else if (!strcmp(property->name, "DPMS") && encoder) {
+ struct drm_encoder_helper_funcs *helpers =
+ encoder->helper_private;
+ helpers->dpms(encoder, value);
+ }
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs
+ cdv_intel_lvds_helper_funcs = {
+ .dpms = cdv_intel_lvds_encoder_dpms,
+ .mode_fixup = cdv_intel_lvds_mode_fixup,
+ .prepare = cdv_intel_lvds_prepare,
+ .mode_set = cdv_intel_lvds_mode_set,
+ .commit = cdv_intel_lvds_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ cdv_intel_lvds_connector_helper_funcs = {
+ .get_modes = cdv_intel_lvds_get_modes,
+ .mode_valid = cdv_intel_lvds_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .save = cdv_intel_lvds_save,
+ .restore = cdv_intel_lvds_restore,
+ .detect = cdv_intel_lvds_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = cdv_intel_lvds_set_property,
+ .destroy = cdv_intel_lvds_destroy,
+};
+
+
+static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
+ .destroy = cdv_intel_lvds_enc_destroy,
+};
+
+/**
+ * cdv_intel_lvds_init - setup LVDS connectors on this device
+ * @dev: drm device
+ *
+ * Create the connector, register the LVDS DDC bus, and try to figure out what
+ * modes we can display on the LVDS panel (if present).
+ */
+void cdv_intel_lvds_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct psb_intel_output *psb_intel_output;
+ struct cdv_intel_lvds_priv *lvds_priv;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *scan;
+ struct drm_crtc *crtc;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 lvds;
+ int pipe;
+
+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output) +
+ sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
+ if (!psb_intel_output)
+ return;
+
+ lvds_priv = (struct cdv_intel_lvds_priv *)(psb_intel_output + 1);
+
+ psb_intel_output->dev_priv = lvds_priv;
+
+ psb_intel_output->mode_dev = mode_dev;
+ connector = &psb_intel_output->base;
+ encoder = &psb_intel_output->enc;
+
+
+ drm_connector_init(dev, &psb_intel_output->base,
+ &cdv_intel_lvds_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+
+ drm_encoder_init(dev, &psb_intel_output->enc,
+ &cdv_intel_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS);
+
+
+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
+ &psb_intel_output->enc);
+ psb_intel_output->type = INTEL_OUTPUT_LVDS;
+
+ drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
+ drm_connector_helper_add(connector,
+ &cdv_intel_lvds_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /*Attach connector properties*/
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
+ drm_connector_attach_property(connector,
+ dev_priv->backlight_property,
+ BRIGHTNESS_MAX_LEVEL);
+
+ /**
+ * Set up I2C bus
+ * FIXME: distroy i2c_bus when exit
+ */
+ psb_intel_output->i2c_bus = psb_intel_i2c_create(dev,
+ GPIOB,
+ "LVDSBLC_B");
+ if (!psb_intel_output->i2c_bus) {
+ dev_printk(KERN_ERR,
+ &dev->pdev->dev, "I2C bus registration failed.\n");
+ goto failed_blc_i2c;
+ }
+ psb_intel_output->i2c_bus->slave_addr = 0x2C;
+ dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus;
+
+ /*
+ * LVDS discovery:
+ * 1) check for EDID on DDC
+ * 2) check for VBT data
+ * 3) check to see if LVDS is already on
+ * if none of the above, no panel
+ * 4) make sure lid is open
+ * if closed, act like it's not there for now
+ */
+
+ /* Set up the DDC bus. */
+ psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
+ GPIOC,
+ "LVDSDDC_C");
+ if (!psb_intel_output->ddc_bus) {
+ dev_printk(KERN_ERR, &dev->pdev->dev,
+ "DDC bus registration " "failed.\n");
+ goto failed_ddc;
+ }
+
+ /*
+ * Attempt to get the fixed panel mode from DDC. Assume that the
+ * preferred mode is the right one.
+ */
+ psb_intel_ddc_get_modes(psb_intel_output);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, scan);
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* Failed to get EDID, what about VBT? do we need this?*/
+ if (dev_priv->lfp_lvds_vbt_mode) {
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+ /*
+ * If we didn't get EDID, try checking if the panel is already turned
+ * on. If so, assume that whatever is currently programmed is the
+ * correct mode.
+ */
+ lvds = REG_READ(LVDS);
+ pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+ crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+
+ if (crtc && (lvds & LVDS_PORT_EN)) {
+ mode_dev->panel_fixed_mode =
+ cdv_intel_crtc_mode_get(dev, crtc);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ goto out; /* FIXME: check for quirks */
+ }
+ }
+
+ /* If we still don't have a mode after all that, give up. */
+ if (!mode_dev->panel_fixed_mode) {
+ DRM_DEBUG
+ ("Found no modes on the lvds, ignoring the LVDS\n");
+ goto failed_find;
+ }
+
+out:
+ drm_sysfs_connector_add(connector);
+ return;
+
+failed_find:
+ printk(KERN_ERR "Failed find\n");
+ if (psb_intel_output->ddc_bus)
+ psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+failed_ddc:
+ printk(KERN_ERR "Failed DDC\n");
+ if (psb_intel_output->i2c_bus)
+ psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
+failed_blc_i2c:
+ printk(KERN_ERR "Failed BLC\n");
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
diff --git a/drivers/staging/gma500/displays/hdmi.h b/drivers/staging/gma500/displays/hdmi.h
new file mode 100644
index 00000000000..d58ba9bd010
--- /dev/null
+++ b/drivers/staging/gma500/displays/hdmi.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+ */
+
+#ifndef HDMI_H
+#define HDMI_H
+
+extern void hdmi_init(struct drm_device *dev);
+
+#endif
diff --git a/drivers/staging/gma500/displays/pyr_cmd.h b/drivers/staging/gma500/displays/pyr_cmd.h
new file mode 100644
index 00000000000..84bae5c8c55
--- /dev/null
+++ b/drivers/staging/gma500/displays/pyr_cmd.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+ */
+
+#ifndef PYR_CMD_H
+#define PYR_CMD_H
+
+extern void pyr_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+
+#endif
+
diff --git a/drivers/staging/gma500/displays/pyr_vid.h b/drivers/staging/gma500/displays/pyr_vid.h
new file mode 100644
index 00000000000..ce98860fa68
--- /dev/null
+++ b/drivers/staging/gma500/displays/pyr_vid.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#ifndef PYR_VID_H
+#define PYR_VID_H
+
+extern void pyr_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+extern struct drm_display_mode *pyr_vid_get_config_mode(struct drm_device* dev);
+
+#endif
diff --git a/drivers/staging/gma500/displays/tmd_cmd.h b/drivers/staging/gma500/displays/tmd_cmd.h
new file mode 100644
index 00000000000..641e85eedec
--- /dev/null
+++ b/drivers/staging/gma500/displays/tmd_cmd.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+ */
+
+#ifndef TMD_CMD_H
+#define TMD_CMD_H
+
+extern void tmd_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+extern struct drm_display_mode *tmd_cmd_get_config_mode(struct drm_device *dev);
+
+#endif
diff --git a/drivers/staging/gma500/displays/tmd_vid.h b/drivers/staging/gma500/displays/tmd_vid.h
new file mode 100644
index 00000000000..7a5fa3b935e
--- /dev/null
+++ b/drivers/staging/gma500/displays/tmd_vid.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#ifndef TMD_VID_H
+#define TMD_VID_H
+
+extern void tmd_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+extern struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev);
+
+#endif
diff --git a/drivers/staging/gma500/displays/tpo_cmd.h b/drivers/staging/gma500/displays/tpo_cmd.h
new file mode 100644
index 00000000000..610552730d7
--- /dev/null
+++ b/drivers/staging/gma500/displays/tpo_cmd.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#ifndef TPO_CMD_H
+#define TPO_CMD_H
+
+extern void tpo_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+/* extern struct drm_display_mode * */
+/* tpo_cmd_get_config_mode(struct drm_device *dev); */
+
+#endif
diff --git a/drivers/staging/gma500/displays/tpo_vid.h b/drivers/staging/gma500/displays/tpo_vid.h
new file mode 100644
index 00000000000..c24f05722de
--- /dev/null
+++ b/drivers/staging/gma500/displays/tpo_vid.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+ */
+
+#ifndef TPO_VID_H
+#define TPO_VID_H
+
+extern void tpo_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+
+#endif
diff --git a/drivers/staging/gma500/psb_fb.c b/drivers/staging/gma500/framebuffer.c
index 084c36bbfe8..ebfde13ec18 100644
--- a/drivers/staging/gma500/psb_fb.c
+++ b/drivers/staging/gma500/framebuffer.c
@@ -1,5 +1,5 @@
/**************************************************************************
- * Copyright (c) 2007, Intel Corporation.
+ * Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -36,7 +36,9 @@
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_drv.h"
-#include "psb_fb.h"
+#include "framebuffer.h"
+
+#include "mdfld_output.h"
static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
@@ -50,14 +52,6 @@ static const struct drm_framebuffer_funcs psb_fb_funcs = {
#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
-void *psbfb_vdc_reg(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv;
- dev_priv = (struct drm_psb_private *) dev->dev_private;
- return dev_priv->vdc_reg;
-}
-/*EXPORT_SYMBOL(psbfb_vdc_reg); */
-
static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
@@ -97,109 +91,60 @@ static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
-static int psbfb_kms_off(struct drm_device *dev, int suspend)
+
+void psbfb_suspend(struct drm_device *dev)
{
struct drm_framebuffer *fb = 0;
struct psb_framebuffer *psbfb = to_psb_fb(fb);
- DRM_DEBUG("psbfb_kms_off_ioctl\n");
+ console_lock();
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
struct fb_info *info = psbfb->fbdev;
-
- if (suspend) {
- fb_set_suspend(info, 1);
- drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
- }
+ fb_set_suspend(info, 1);
+ drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
}
mutex_unlock(&dev->mode_config.mutex);
- return 0;
-}
-
-int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
-
- if (drm_psb_no_fb)
- return 0;
- console_lock();
- ret = psbfb_kms_off(dev, 0);
console_unlock();
-
- return ret;
}
-static int psbfb_kms_on(struct drm_device *dev, int resume)
+void psbfb_resume(struct drm_device *dev)
{
struct drm_framebuffer *fb = 0;
struct psb_framebuffer *psbfb = to_psb_fb(fb);
- DRM_DEBUG("psbfb_kms_on_ioctl\n");
-
+ console_lock();
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
struct fb_info *info = psbfb->fbdev;
-
- if (resume) {
- fb_set_suspend(info, 0);
- drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
- }
+ fb_set_suspend(info, 0);
+ drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
}
mutex_unlock(&dev->mode_config.mutex);
-
- return 0;
-}
-
-int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
-
- if (drm_psb_no_fb)
- return 0;
- console_lock();
- ret = psbfb_kms_on(dev, 0);
- console_unlock();
- drm_helper_disable_unused_functions(dev);
- return ret;
-}
-
-void psbfb_suspend(struct drm_device *dev)
-{
- console_lock();
- psbfb_kms_off(dev, 1);
- console_unlock();
-}
-
-void psbfb_resume(struct drm_device *dev)
-{
- console_lock();
- psbfb_kms_on(dev, 1);
console_unlock();
drm_helper_disable_unused_functions(dev);
}
static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- int page_num = 0;
- int i;
- unsigned long address = 0;
- int ret;
- unsigned long pfn;
struct psb_framebuffer *psbfb = vma->vm_private_data;
struct drm_device *dev = psbfb->base.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ int page_num;
+ int i;
+ unsigned long address;
+ int ret;
+ unsigned long pfn;
+ /* FIXME: assumes fb at stolen base which may not be true */
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-
address = (unsigned long)vmf->virtual_address;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
for (i = 0; i < page_num; i++) {
- pfn = (phys_addr >> PAGE_SHIFT); /* phys_to_pfn(phys_addr); */
+ pfn = (phys_addr >> PAGE_SHIFT);
ret = vm_insert_mixed(vma, address, pfn);
if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
@@ -208,22 +153,18 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
return ret;
}
-
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
}
-
return VM_FAULT_NOPAGE;
}
static void psbfb_vm_open(struct vm_area_struct *vma)
{
- DRM_DEBUG("vm_open\n");
}
static void psbfb_vm_close(struct vm_area_struct *vma)
{
- DRM_DEBUG("vm_close\n");
}
static struct vm_operations_struct psbfb_vm_ops = {
@@ -235,10 +176,7 @@ static struct vm_operations_struct psbfb_vm_ops = {
static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = fbdev->pfb;
- char *fb_screen_base = NULL;
- struct drm_device *dev = psbfb->base.dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
if (vma->vm_pgoff != 0)
return -EINVAL;
@@ -247,51 +185,22 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
if (!psbfb->addr_space)
psbfb->addr_space = vma->vm_file->f_mapping;
-
- fb_screen_base = (char *)info->screen_base;
-
- DRM_DEBUG("vm_pgoff 0x%lx, screen base %p vram_addr %p\n",
- vma->vm_pgoff, fb_screen_base,
- dev_priv->vram_addr);
-
- /* FIXME: ultimately this needs to become 'if entirely stolen memory' */
- if (1 || fb_screen_base == dev_priv->vram_addr) {
- vma->vm_ops = &psbfb_vm_ops;
- vma->vm_private_data = (void *)psbfb;
- vma->vm_flags |= VM_RESERVED | VM_IO |
- VM_MIXEDMAP | VM_DONTEXPAND;
- } else {
- /* GTT memory backed by kernel/user pages, needs a different
- approach ? - GEM ? */
- }
-
+ /*
+ * If this is a GEM object then info->screen_base is the virtual
+ * kernel remapping of the object. FIXME: Review if this is
+ * suitable for our mmap work
+ */
+ vma->vm_ops = &psbfb_vm_ops;
+ vma->vm_private_data = (void *)psbfb;
+ vma->vm_flags |= VM_RESERVED | VM_IO |
+ VM_MIXEDMAP | VM_DONTEXPAND;
return 0;
}
-static int psbfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
+static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
{
- struct psb_fbdev *fbdev = info->par;
- struct psb_framebuffer *psbfb = fbdev->pfb;
- struct drm_device *dev = psbfb->base.dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 __user *p = (u32 __user *)arg;
- u32 l;
- u32 buf[32];
- switch (cmd) {
- case 0x12345678:
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- if (get_user(l, p))
- return -EFAULT;
- if (l > 32)
- return -EMSGSIZE;
- if (copy_from_user(buf, p + 1, l * sizeof(u32)))
- return -EFAULT;
- psbfb_2d_submit(dev_priv, buf, l);
- return 0;
- default:
- return -ENOTTY;
- }
+ return -ENOTTY;
}
static struct fb_ops psbfb_ops = {
@@ -300,16 +209,80 @@ static struct fb_ops psbfb_ops = {
.fb_set_par = drm_fb_helper_set_par,
.fb_blank = drm_fb_helper_blank,
.fb_setcolreg = psbfb_setcolreg,
- .fb_fillrect = psbfb_fillrect,
+ .fb_fillrect = cfb_fillrect,
.fb_copyarea = psbfb_copyarea,
- .fb_imageblit = psbfb_imageblit,
+ .fb_imageblit = cfb_imageblit,
.fb_mmap = psbfb_mmap,
.fb_sync = psbfb_sync,
.fb_ioctl = psbfb_ioctl,
};
+static struct fb_ops psbfb_unaccel_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcolreg = psbfb_setcolreg,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_mmap = psbfb_mmap,
+ .fb_ioctl = psbfb_ioctl,
+};
+
+/**
+ * psb_framebuffer_init - initialize a framebuffer
+ * @dev: our DRM device
+ * @fb: framebuffer to set up
+ * @mode_cmd: mode description
+ * @gt: backing object
+ *
+ * Configure and fill in the boilerplate for our frame buffer. Return
+ * 0 on success or an error code if we fail.
+ */
+static int psb_framebuffer_init(struct drm_device *dev,
+ struct psb_framebuffer *fb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct gtt_range *gt)
+{
+ int ret;
+
+ if (mode_cmd->pitch & 63)
+ return -EINVAL;
+ switch (mode_cmd->bpp) {
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+ fb->gtt = gt;
+ return 0;
+}
+
+/**
+ * psb_framebuffer_create - create a framebuffer backed by gt
+ * @dev: our DRM device
+ * @mode_cmd: the description of the requested mode
+ * @gt: the backing object
+ *
+ * Create a framebuffer object backed by the gt, and fill in the
+ * boilerplate required
+ *
+ * TODO: review object references
+ */
+
static struct drm_framebuffer *psb_framebuffer_create
- (struct drm_device *dev, struct drm_mode_fb_cmd *r,
+ (struct drm_device *dev,
+ struct drm_mode_fb_cmd *mode_cmd,
struct gtt_range *gt)
{
struct psb_framebuffer *fb;
@@ -317,22 +290,14 @@ static struct drm_framebuffer *psb_framebuffer_create
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
- return NULL;
-
- ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
-
- if (ret)
- goto err;
-
- drm_helper_mode_fill_fb_struct(&fb->base, r);
-
- fb->gtt = gt;
+ return ERR_PTR(-ENOMEM);
+ ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
+ if (ret) {
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
return &fb->base;
-
-err:
- kfree(fb);
- return NULL;
}
/**
@@ -345,15 +310,22 @@ err:
* stolen memory or the system has no stolen memory we allocate a range
* and back it with a GEM object.
*
- * In this case the GEM object has no handle.
+ * In this case the GEM object has no handle.
+ *
+ * FIXME: console speed up - allocate twice the space if room and use
+ * hardware scrolling for acceleration.
*/
static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
{
struct gtt_range *backing;
/* Begin by trying to use stolen memory backing */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
- if (backing)
- return backing;
+ if (backing) {
+ if (drm_gem_private_object_init(dev,
+ &backing->gem, aligned_size) == 0)
+ return backing;
+ psb_gtt_free_range(dev, backing);
+ }
/* Next try using GEM host memory */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb(gem)", 0);
if (backing == NULL)
@@ -366,7 +338,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
}
return backing;
}
-
+
/**
* psbfb_create - create a framebuffer
* @fbdev: the framebuffer device
@@ -381,74 +353,96 @@ static int psbfb_create(struct psb_fbdev *fbdev,
struct drm_psb_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
- struct psb_framebuffer *psbfb;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_mode_fb_cmd mode_cmd;
struct device *device = &dev->pdev->dev;
- int size, aligned_size;
+ int size;
int ret;
struct gtt_range *backing;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
+ mode_cmd.bpp = sizes->surface_bpp;
+
+ /* No 24bit packed */
+ if (mode_cmd.bpp == 24)
+ mode_cmd.bpp = 32;
- mode_cmd.bpp = 32;
/* HW requires pitch to be 64 byte aligned */
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
- mode_cmd.depth = 24;
+ mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
+ mode_cmd.depth = sizes->surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
- aligned_size = ALIGN(size, PAGE_SIZE);
+ size = ALIGN(size, PAGE_SIZE);
/* Allocate the framebuffer in the GTT with stolen page backing */
- backing = psbfb_alloc(dev, aligned_size);
+ backing = psbfb_alloc(dev, size);
if (backing == NULL)
- return -ENOMEM;
+ return -ENOMEM;
mutex_lock(&dev->struct_mutex);
- fb = psb_framebuffer_create(dev, &mode_cmd, backing);
- if (!fb) {
- DRM_ERROR("failed to allocate fb.\n");
- ret = -ENOMEM;
- goto out_err1;
- }
- psbfb = to_psb_fb(fb);
- info = framebuffer_alloc(sizeof(struct psb_fbdev), device);
+ info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
- goto out_err0;
+ goto out_err1;
}
-
info->par = fbdev;
+ ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
+ if (ret)
+ goto out_unref;
+
+ fb = &psbfb->base;
psbfb->fbdev = info;
fbdev->psb_fb_helper.fb = fb;
fbdev->psb_fb_helper.fbdev = info;
- fbdev->pfb = psbfb;
strcpy(info->fix.id, "psbfb");
info->flags = FBINFO_DEFAULT;
- info->fbops = &psbfb_ops;
+ /* No 2D engine */
+ if (!dev_priv->ops->accel_2d)
+ info->fbops = &psbfb_unaccel_ops;
+ else
+ info->fbops = &psbfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+
info->fix.smem_start = dev->mode_config.fb_base;
info->fix.smem_len = size;
- /* Accessed via stolen memory directly, This only works for stolem
- memory however. Need to address this once we start using gtt
- pages we allocate */
- info->screen_base = (char *)dev_priv->vram_addr + backing->offset;
+ if (backing->stolen) {
+ /* Accessed stolen memory directly */
+ info->screen_base = (char *)dev_priv->vram_addr +
+ backing->offset;
+ } else {
+ /* Pin the pages into the GTT and create a mapping to them */
+ psb_gtt_pin(backing);
+ info->screen_base = vm_map_ram(backing->pages, backing->npage,
+ -1, PAGE_KERNEL);
+ if (info->screen_base == NULL) {
+ psb_gtt_unpin(backing);
+ ret = -ENOMEM;
+ goto out_unref;
+ }
+ psbfb->vm_map = 1;
+ }
info->screen_size = size;
- memset(info->screen_base, 0, size);
- if (dev_priv->pg->stolen_size) {
+ if (dev_priv->gtt.stolen_size) {
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
- goto out_err0;
+ goto out_unref;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- info->apertures->ranges[0].size = dev_priv->pg->stolen_size;
+ info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
@@ -464,17 +458,19 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
- DRM_DEBUG("fb depth is %d\n", fb->depth);
- DRM_DEBUG(" pitch is %d\n", fb->pitch);
-
- printk(KERN_INFO"allocated %dx%d fb\n",
- psbfb->base.width, psbfb->base.height);
+ dev_info(dev->dev, "allocated %dx%d fb\n",
+ psbfb->base.width, psbfb->base.height);
mutex_unlock(&dev->struct_mutex);
-
return 0;
-out_err0:
- fb->funcs->destroy(fb);
+out_unref:
+ if (backing->stolen)
+ psb_gtt_free_range(dev, backing);
+ else {
+ if (psbfb->vm_map)
+ vm_unmap_ram(info->screen_base, backing->npage);
+ drm_gem_object_unreference(&backing->gem);
+ }
out_err1:
mutex_unlock(&dev->struct_mutex);
psb_gtt_free_range(dev, backing);
@@ -493,45 +489,30 @@ static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
struct drm_mode_fb_cmd *cmd)
{
- struct gtt_range *r;
- struct drm_gem_object *obj;
- struct psb_framebuffer *psbfb;
+ struct gtt_range *r;
+ struct drm_gem_object *obj;
- /* Find the GEM object and thus the gtt range object that is
- to back this space */
+ /*
+ * Find the GEM object and thus the gtt range object that is
+ * to back this space
+ */
obj = drm_gem_object_lookup(dev, filp, cmd->handle);
if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- /* Allocate a framebuffer */
- psbfb = kzalloc(sizeof(*psbfb), GFP_KERNEL);
- if (psbfb == NULL) {
- drm_gem_object_unreference_unlocked(obj);
- return ERR_PTR(-ENOMEM);
- }
-
- /* Let the core code do all the work */
- r = container_of(obj, struct gtt_range, gem);
- if (psb_framebuffer_create(dev, cmd, r) == NULL) {
- drm_gem_object_unreference_unlocked(obj);
- kfree(psbfb);
- return ERR_PTR(-EINVAL);
- }
- /* Return the drm_framebuffer contained within the psb fbdev which
- has been initialized by the framebuffer creation */
- return &psbfb->base;
+ return ERR_PTR(-ENOENT);
+
+ /* Let the core code do all the work */
+ r = container_of(obj, struct gtt_range, gem);
+ return psb_framebuffer_create(dev, cmd, r);
}
static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
- DRM_DEBUG("%s\n", __func__);
}
static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
u16 *green, u16 *blue, int regno)
{
- DRM_DEBUG("%s\n", __func__);
}
static int psbfb_probe(struct drm_fb_helper *helper,
@@ -541,8 +522,6 @@ static int psbfb_probe(struct drm_fb_helper *helper,
int new_fb = 0;
int ret;
- DRM_DEBUG("%s\n", __func__);
-
if (!helper->fb) {
ret = psbfb_create(psb_fbdev, sizes);
if (ret)
@@ -561,24 +540,27 @@ struct drm_fb_helper_funcs psb_fb_helper_funcs = {
int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
{
struct fb_info *info;
- struct psb_framebuffer *psbfb = fbdev->pfb;
+ struct psb_framebuffer *psbfb = &fbdev->pfb;
if (fbdev->psb_fb_helper.fbdev) {
info = fbdev->psb_fb_helper.fbdev;
- /* FIXME: this is a bit more inside knowledge than I'd like
- but I don't see how to make a fake GEM object of the
- stolen space nicely */
- if (psbfb->gtt->stolen)
- psb_gtt_free_range(dev, psbfb->gtt);
- else
- drm_gem_object_unreference(&psbfb->gtt->gem);
+
+ /* If this is our base framebuffer then kill any virtual map
+ for the framebuffer layer and unpin it */
+ if (psbfb->vm_map) {
+ vm_unmap_ram(info->screen_base, psbfb->gtt->npage);
+ psb_gtt_unpin(psbfb->gtt);
+ }
unregister_framebuffer(info);
- iounmap(info->screen_base);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
-
drm_fb_helper_fini(&fbdev->psb_fb_helper);
drm_framebuffer_cleanup(&psbfb->base);
+
+ if (psbfb->gtt)
+ drm_gem_object_unreference(&psbfb->gtt->gem);
return 0;
}
@@ -586,20 +568,17 @@ int psb_fbdev_init(struct drm_device *dev)
{
struct psb_fbdev *fbdev;
struct drm_psb_private *dev_priv = dev->dev_private;
- int num_crtc;
fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
if (!fbdev) {
- DRM_ERROR("no memory\n");
+ dev_err(dev->dev, "no memory\n");
return -ENOMEM;
}
dev_priv->fbdev = fbdev;
fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
- num_crtc = 2;
-
- drm_fb_helper_init(dev, &fbdev->psb_fb_helper, num_crtc,
+ drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
INTELFB_CONN_LIMIT);
drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
@@ -626,22 +605,6 @@ static void psbfb_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
}
-int psbfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
-{
- struct fb_info *info;
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
-
- if (drm_psb_no_fb)
- return 0;
-
- info = psbfb->fbdev;
-
- if (info)
- framebuffer_release(info);
- return 0;
-}
-/*EXPORT_SYMBOL(psbfb_remove); */
-
/**
* psb_user_framebuffer_create_handle - add hamdle to a framebuffer
* @fb: framebuffer
@@ -656,11 +619,9 @@ static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
- struct gtt_range *r = psbfb->gtt;
- if (r->stolen)
- return -EOPNOTSUPP;
- return drm_gem_handle_create(file_priv, &r->gem, handle);
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct gtt_range *r = psbfb->gtt;
+ return drm_gem_handle_create(file_priv, &r->gem, handle);
}
/**
@@ -672,18 +633,37 @@ static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
*/
static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
- struct drm_device *dev = fb->dev;
struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct gtt_range *r = psbfb->gtt;
-
- if (psbfb->fbdev)
- psbfb_remove(dev, fb);
-
- /* Let DRM do its clean up */
+ struct drm_device *dev = fb->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_fbdev *fbdev = dev_priv->fbdev;
+ struct drm_crtc *crtc;
+ int reset = 0;
+
+ /* Should never get stolen memory for a user fb */
+ WARN_ON(r->stolen);
+
+ /* Check if we are erroneously live */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (crtc->fb == fb)
+ reset = 1;
+
+ if (reset)
+ /*
+ * Now force a sane response before we permit the DRM CRTC
+ * layer to do stupid things like blank the display. Instead
+ * we reset this framebuffer as if the user had forced a reset.
+ * We must do this before the cleanup so that the DRM layer
+ * doesn't get a chance to stick its oar in where it isn't
+ * wanted.
+ */
+ drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
+
+ /* Let DRM do its clean up */
drm_framebuffer_cleanup(fb);
/* We are no longer using the resource in GEM */
drm_gem_object_unreference_unlocked(&r->gem);
-
kfree(fb);
}
@@ -694,17 +674,14 @@ static const struct drm_mode_config_funcs psb_mode_funcs = {
static int psb_create_backlight_property(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv
- = (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_property *backlight;
if (dev_priv->backlight_property)
return 0;
- backlight = drm_property_create(dev,
- DRM_MODE_PROP_RANGE,
- "backlight",
- 2);
+ backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "backlight", 2);
backlight->values[0] = 0;
backlight->values[1] = 100;
@@ -715,25 +692,13 @@ static int psb_create_backlight_property(struct drm_device *dev)
static void psb_setup_outputs(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
- PSB_DEBUG_ENTRY("\n");
-
drm_mode_create_scaling_mode_property(dev);
-
psb_create_backlight_property(dev);
- if (IS_MRST(dev)) {
- if (dev_priv->iLVDS_enable)
- mrst_lvds_init(dev, &dev_priv->mode_dev);
- else
- DRM_ERROR("DSI is not supported\n");
- } else {
- psb_intel_lvds_init(dev, &dev_priv->mode_dev);
- psb_intel_sdvo_init(dev, SDVOB);
- }
+ dev_priv->ops->output_init(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
@@ -744,12 +709,15 @@ static void psb_setup_outputs(struct drm_device *dev)
/* valid crtcs */
switch (psb_intel_output->type) {
+ case INTEL_OUTPUT_ANALOG:
+ crtc_mask = (1 << 0);
+ clone_mask = (1 << INTEL_OUTPUT_ANALOG);
+ break;
case INTEL_OUTPUT_SDVO:
crtc_mask = ((1 << 0) | (1 << 1));
clone_mask = (1 << INTEL_OUTPUT_SDVO);
break;
case INTEL_OUTPUT_LVDS:
- PSB_DEBUG_ENTRY("LVDS.\n");
if (IS_MRST(dev))
crtc_mask = (1 << 0);
else
@@ -757,26 +725,24 @@ static void psb_setup_outputs(struct drm_device *dev)
clone_mask = (1 << INTEL_OUTPUT_LVDS);
break;
case INTEL_OUTPUT_MIPI:
- PSB_DEBUG_ENTRY("MIPI.\n");
crtc_mask = (1 << 0);
clone_mask = (1 << INTEL_OUTPUT_MIPI);
break;
case INTEL_OUTPUT_MIPI2:
- PSB_DEBUG_ENTRY("MIPI2.\n");
crtc_mask = (1 << 2);
clone_mask = (1 << INTEL_OUTPUT_MIPI2);
break;
case INTEL_OUTPUT_HDMI:
- PSB_DEBUG_ENTRY("HDMI.\n");
- crtc_mask = (1 << 1);
+ if (IS_MFLD(dev))
+ crtc_mask = (1 << 1);
+ else /* FIXME: review Oaktrail */
+ crtc_mask = (1 << 0); /* Cedarview */
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
}
-
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones =
psb_intel_connector_clones(dev, clone_mask);
-
}
}
@@ -787,8 +753,6 @@ void psb_modeset_init(struct drm_device *dev)
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int i;
- PSB_DEBUG_ENTRY("\n");
-
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
@@ -809,9 +773,6 @@ void psb_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 2048;
psb_setup_outputs(dev);
-
- /* setup fbs */
- /* drm_initial_config(dev); */
}
void psb_modeset_cleanup(struct drm_device *dev)
@@ -820,7 +781,6 @@ void psb_modeset_cleanup(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
psb_fbdev_fini(dev);
-
drm_mode_config_cleanup(dev);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/staging/gma500/psb_fb.h b/drivers/staging/gma500/framebuffer.h
index c8ec0d6febb..d1b2289447f 100644
--- a/drivers/staging/gma500/psb_fb.h
+++ b/drivers/staging/gma500/framebuffer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, Intel Corporation
+ * Copyright (c) 2008-2011, Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -19,10 +19,9 @@
*
*/
-#ifndef _PSB_FB_H_
-#define _PSB_FB_H_
+#ifndef _FRAMEBUFFER_H_
+#define _FRAMEBUFFER_H_
-#include <linux/version.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
@@ -33,14 +32,14 @@ struct psb_framebuffer {
struct address_space *addr_space;
struct fb_info *fbdev;
struct gtt_range *gtt;
+ bool vm_map; /* True if we must undo a vm_map_ram */
};
struct psb_fbdev {
struct drm_fb_helper psb_fb_helper;
- struct psb_framebuffer *pfb;
+ struct psb_framebuffer pfb;
};
-
#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
diff --git a/drivers/staging/gma500/psb_gem.c b/drivers/staging/gma500/gem.c
index 76ff7bacd35..65fdd6b8ab1 100644
--- a/drivers/staging/gma500/psb_gem.c
+++ b/drivers/staging/gma500/gem.c
@@ -19,12 +19,8 @@
* Authors: Alan Cox
*
* TODO:
- * - we don't actually put GEM objects into the GART yet
- * - we need to work out if the MMU is relevant as well (eg for
+ * - we need to work out if the MMU is relevant (eg for
* accelerated operations on a GEM object)
- * - cache coherency
- *
- * ie this is just an initial framework to get us going.
*/
#include <drm/drmP.h>
@@ -40,17 +36,9 @@ int psb_gem_init_object(struct drm_gem_object *obj)
void psb_gem_free_object(struct drm_gem_object *obj)
{
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
+ drm_gem_object_release_wrap(obj);
+ /* This must occur last as it frees up the memory of the GEM object */
psb_gtt_free_range(obj->dev, gtt);
- if (obj->map_list.map) {
- /* Do things GEM should do for us */
- struct drm_gem_mm *mm = obj->dev->mm_private;
- struct drm_map_list *list = &obj->map_list;
- drm_ht_remove_item(&mm->offset_hash, &list->hash);
- drm_mm_put_block(list->file_offset_node);
- kfree(list->map);
- list->map = NULL;
- }
- drm_gem_object_release(obj);
}
int psb_gem_get_aperture(struct drm_device *dev, void *data,
@@ -60,59 +48,6 @@ int psb_gem_get_aperture(struct drm_device *dev, void *data,
}
/**
- * psb_gem_create_mmap_offset - invent an mmap offset
- * @obj: our object
- *
- * This is basically doing by hand a pile of ugly crap which should
- * be done automatically by the GEM library code but isn't
- */
-static int psb_gem_create_mmap_offset(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
- int ret;
-
- list = &obj->map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (list->map == NULL)
- return -ENOMEM;
- map = list->map;
- map->type = _DRM_GEM;
- map->size = obj->size;
- map->handle =obj;
-
- list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, 0);
- if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOSPC;
- goto free_it;
- }
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->size / PAGE_SIZE, 0);
- if (!list->file_offset_node) {
- ret = -ENOMEM;
- goto free_it;
- }
- list->hash.key = list->file_offset_node->start;
- ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
- if (ret) {
- DRM_ERROR("failed to add to map hash\n");
- goto free_mm;
- }
- return 0;
-
-free_mm:
- drm_mm_put_block(list->file_offset_node);
-free_it:
- kfree(list->map);
- list->map = NULL;
- return ret;
-}
-
-/**
* psb_gem_dumb_map_gtt - buffer mapping for dumb interface
* @file: our drm client file
* @dev: drm device
@@ -129,7 +64,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
-
+
mutex_lock(&dev->struct_mutex);
/* GEM does all our handle to object mapping */
@@ -139,10 +74,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
goto unlock;
}
/* What validation is needed here ? */
-
+
/* Make it mmapable */
if (!obj->map_list.map) {
- ret = psb_gem_create_mmap_offset(obj);
+ ret = gem_create_mmap_offset(obj);
if (ret)
goto out;
}
@@ -175,21 +110,26 @@ static int psb_gem_create(struct drm_file *file,
size = roundup(size, PAGE_SIZE);
- /* Allocate our object - for now a direct gtt range which is not
+ /* Allocate our object - for now a direct gtt range which is not
stolen memory backed */
r = psb_gtt_alloc_range(dev, size, "gem", 0);
- if (r == NULL)
+ if (r == NULL) {
+ dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
return -ENOSPC;
+ }
/* Initialize the extra goodies GEM needs to do all the hard work */
if (drm_gem_object_init(dev, &r->gem, size) != 0) {
psb_gtt_free_range(dev, r);
/* GEM doesn't give an error code and we don't have an
EGEMSUCKS so make something up for now - FIXME */
+ dev_err(dev->dev, "GEM init failed for %lld\n", size);
return -ENOMEM;
}
/* Give the object a handle so we can carry it more easily */
ret = drm_gem_handle_create(file, &r->gem, &handle);
if (ret) {
+ dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
+ &r->gem, size);
drm_gem_object_release(&r->gem);
psb_gtt_free_range(dev, r);
return ret;
@@ -245,19 +185,13 @@ int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
* but we need to do the actual page work.
*
* This code eventually needs to handle faulting objects in and out
- * of the GART and repacking it when we run out of space. We can put
+ * of the GTT and repacking it when we run out of space. We can put
* that off for now and for our simple uses
*
* The VMA was set up by GEM. In doing so it also ensured that the
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*
- * To avoid aliasing and cache funnies we want to map the object
- * through the GART. For the moment this is slightly hackish. It would
- * be nicer if GEM provided mmap opened/closed hooks for us giving
- * the object so that we could track things nicely. That needs changes
- * to the core GEM code so must be tackled post staging
- *
* FIXME
*/
int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -268,9 +202,11 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long pfn;
pgoff_t page_offset;
struct drm_device *dev;
+ struct drm_psb_private *dev_priv;
obj = vma->vm_private_data; /* GEM object */
dev = obj->dev;
+ dev_priv = dev->dev_private;
r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
@@ -283,30 +219,26 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (r->mmapping == 0) {
ret = psb_gtt_pin(r);
if (ret < 0) {
- DRM_ERROR("gma500: pin failed: %d\n", ret);
- goto fail;
- }
+ dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
+ goto fail;
+ }
r->mmapping = 1;
}
- /* FIXME: Locking. We may also need to repack the GART sometimes */
-
- /* Page relative to the VMA start */
+ /* Page relative to the VMA start - we must calculate this ourselves
+ because vmf->pgoff is the fake GEM offset */
page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
>> PAGE_SHIFT;
- /* Bus address of the page is gart + object offset + page offset */
- /* Assumes gtt allocations are page aligned */
- pfn = (r->resource.start >> PAGE_SHIFT) + page_offset;
-
- pr_debug("Object GTT base at %p\n", (void *)(r->resource.start));
- pr_debug("Inserting %p pfn %lx, pa %lx\n", vmf->virtual_address,
- pfn, pfn << PAGE_SHIFT);
-
+ /* CPU view of the page, don't go via the GART for CPU writes */
+ if (r->stolen)
+ pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
+ else
+ pfn = page_to_pfn(r->pages[page_offset]);
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
fail:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
switch (ret) {
case 0:
case -ERESTARTSYS:
@@ -318,3 +250,46 @@ fail:
return VM_FAULT_SIGBUS;
}
}
+
+static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
+ int size, u32 *handle)
+{
+ struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
+ if (gtt == NULL)
+ return -ENOMEM;
+ if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
+ goto free_gtt;
+ if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
+ return 0;
+free_gtt:
+ psb_gtt_free_range(dev, gtt);
+ return -ENOMEM;
+}
+
+/*
+ * GEM interfaces for our specific client
+ */
+int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_psb_gem_create *args = data;
+ int ret;
+ if (args->flags & PSB_GEM_CREATE_STOLEN) {
+ ret = psb_gem_create_stolen(file, dev, args->size,
+ &args->handle);
+ if (ret == 0)
+ return 0;
+ /* Fall throguh */
+ args->flags &= ~PSB_GEM_CREATE_STOLEN;
+ }
+ return psb_gem_create(file, dev, args->size, &args->handle);
+}
+
+int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_psb_gem_mmap *args = data;
+ return dev->driver->dumb_map_offset(file, dev,
+ args->handle, &args->offset);
+}
+
diff --git a/drivers/staging/gma500/gem_glue.c b/drivers/staging/gma500/gem_glue.c
new file mode 100644
index 00000000000..daac1212065
--- /dev/null
+++ b/drivers/staging/gma500/gem_glue.c
@@ -0,0 +1,89 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+
+void drm_gem_object_release_wrap(struct drm_gem_object *obj)
+{
+ /* Remove the list map if one is present */
+ if (obj->map_list.map) {
+ struct drm_gem_mm *mm = obj->dev->mm_private;
+ struct drm_map_list *list = &obj->map_list;
+ drm_ht_remove_item(&mm->offset_hash, &list->hash);
+ drm_mm_put_block(list->file_offset_node);
+ kfree(list->map);
+ list->map = NULL;
+ }
+ drm_gem_object_release(obj);
+}
+
+/**
+ * gem_create_mmap_offset - invent an mmap offset
+ * @obj: our object
+ *
+ * Standard implementation of offset generation for mmap as is
+ * duplicated in several drivers. This belongs in GEM.
+ */
+int gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list;
+ struct drm_local_map *map;
+ int ret;
+
+ list = &obj->map_list;
+ list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+ if (list->map == NULL)
+ return -ENOMEM;
+ map = list->map;
+ map->type = _DRM_GEM;
+ map->size = obj->size;
+ map->handle = obj;
+
+ list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+ obj->size / PAGE_SIZE, 0, 0);
+ if (!list->file_offset_node) {
+ dev_err(dev->dev, "failed to allocate offset for bo %d\n",
+ obj->name);
+ ret = -ENOSPC;
+ goto free_it;
+ }
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ obj->size / PAGE_SIZE, 0);
+ if (!list->file_offset_node) {
+ ret = -ENOMEM;
+ goto free_it;
+ }
+ list->hash.key = list->file_offset_node->start;
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
+ dev_err(dev->dev, "failed to add to map hash\n");
+ goto free_mm;
+ }
+ return 0;
+
+free_mm:
+ drm_mm_put_block(list->file_offset_node);
+free_it:
+ kfree(list->map);
+ list->map = NULL;
+ return ret;
+}
diff --git a/drivers/staging/gma500/gem_glue.h b/drivers/staging/gma500/gem_glue.h
new file mode 100644
index 00000000000..ce5ce30f74d
--- /dev/null
+++ b/drivers/staging/gma500/gem_glue.h
@@ -0,0 +1,2 @@
+extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
+extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/staging/gma500/psb_gtt.c b/drivers/staging/gma500/gtt.c
index 74c5a6569d0..461ead251bb 100644
--- a/drivers/staging/gma500/psb_gtt.c
+++ b/drivers/staging/gma500/gtt.c
@@ -28,11 +28,11 @@
*/
/**
- * psb_gtt_mask_pte - generate GART pte entry
+ * psb_gtt_mask_pte - generate GTT pte entry
* @pfn: page number to encode
- * @type: type of memory in the GART
+ * @type: type of memory in the GTT
*
- * Set the GART entry for the appropriate memory type.
+ * Set the GTT entry for the appropriate memory type.
*/
static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
{
@@ -49,16 +49,16 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
}
/**
- * psb_gtt_entry - find the GART entries for a gtt_range
+ * psb_gtt_entry - find the GTT entries for a gtt_range
* @dev: our DRM device
* @r: our GTT range
- *
- * Given a gtt_range object return the GART offset of the page table
+ *
+ * Given a gtt_range object return the GTT offset of the page table
* entries for this gtt_range
*/
u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long offset;
offset = r->resource.start - dev_priv->gtt_mem->start;
@@ -67,20 +67,18 @@ u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
}
/**
- * psb_gtt_insert - put an object into the GART
+ * psb_gtt_insert - put an object into the GTT
* @dev: our DRM device
* @r: our GTT range
*
* Take our preallocated GTT range and insert the GEM object into
- * the GART.
+ * the GTT.
*
* FIXME: gtt lock ?
*/
static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
u32 *gtt_slot, pte;
- int numpages = (r->resource.end + 1 - r->resource.start) >> PAGE_SHIFT;
struct page **pages;
int i;
@@ -94,26 +92,25 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
gtt_slot = psb_gtt_entry(dev, r);
pages = r->pages;
- /* Make sure we have no alias present */
- wbinvd();
+ /* Make sure changes are visible to the GPU */
+ set_pages_array_uc(pages, r->npage);
- /* Write our page entries into the GART itself */
- for (i = 0; i < numpages; i++) {
+ /* Write our page entries into the GTT itself */
+ for (i = 0; i < r->npage; i++) {
pte = psb_gtt_mask_pte(page_to_pfn(*pages++), 0/*type*/);
iowrite32(pte, gtt_slot++);
}
/* Make sure all the entries are set before we return */
ioread32(gtt_slot - 1);
-
return 0;
}
/**
- * psb_gtt_remove - remove an object from the GART
+ * psb_gtt_remove - remove an object from the GTT
* @dev: our DRM device
* @r: our GTT range
*
- * Remove a preallocated GTT range from the GART. Overwrite all the
+ * Remove a preallocated GTT range from the GTT. Overwrite all the
* page table entries with the dummy page
*/
@@ -121,17 +118,17 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 *gtt_slot, pte;
- int numpages = (r->resource.end + 1 - r->resource.start) >> PAGE_SHIFT;
int i;
WARN_ON(r->stolen);
gtt_slot = psb_gtt_entry(dev, r);
- pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);;
+ pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
- for (i = 0; i < numpages; i++)
+ for (i = 0; i < r->npage; i++)
iowrite32(pte, gtt_slot++);
ioread32(gtt_slot - 1);
+ set_pages_array_wb(r->pages, r->npage);
}
/**
@@ -140,8 +137,6 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
*
* Pin and build an in kernel list of the pages that back our GEM object.
* While we hold this the pages cannot be swapped out
- *
- * FIXME: Do we need to cache flush when we update the GTT
*/
static int psb_gtt_attach_pages(struct gtt_range *gt)
{
@@ -149,7 +144,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
struct address_space *mapping;
int i;
struct page *p;
- int pages = (gt->resource.end + 1 - gt->resource.start) >> PAGE_SHIFT;
+ int pages = gt->gem.size / PAGE_SIZE;
WARN_ON(gt->pages);
@@ -160,6 +155,8 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
if (gt->pages == NULL)
return -ENOMEM;
+ gt->npage = pages;
+
for (i = 0; i < pages; i++) {
/* FIXME: review flags later */
p = read_cache_page_gfp(mapping, i,
@@ -183,20 +180,15 @@ err:
* @gt: the gtt range
*
* Undo the effect of psb_gtt_attach_pages. At this point the pages
- * must have been removed from the GART as they could now be paged out
+ * must have been removed from the GTT as they could now be paged out
* and move bus address.
- *
- * FIXME: Do we need to cache flush when we update the GTT
*/
static void psb_gtt_detach_pages(struct gtt_range *gt)
{
int i;
- int pages = (gt->resource.end + 1 - gt->resource.start) >> PAGE_SHIFT;
-
- for (i = 0; i < pages; i++) {
+ for (i = 0; i < gt->npage; i++) {
/* FIXME: do we need to force dirty */
set_page_dirty(gt->pages[i]);
- /* Undo the reference we took when populating the table */
page_cache_release(gt->pages[i]);
}
kfree(gt->pages);
@@ -215,7 +207,7 @@ static void psb_gtt_detach_pages(struct gtt_range *gt)
*/
int psb_gtt_pin(struct gtt_range *gt)
{
- int ret;
+ int ret = 0;
struct drm_device *dev = gt->gem.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -264,7 +256,7 @@ void psb_gtt_unpin(struct gtt_range *gt)
}
mutex_unlock(&dev_priv->gtt_mutex);
}
-
+
/*
* GTT resource allocator - allocate and manage GTT address space
*/
@@ -291,50 +283,45 @@ struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
struct resource *r = dev_priv->gtt_mem;
int ret;
unsigned long start, end;
-
+
if (backed) {
- /* The start of the GTT is the stolen pages */
- start = r->start;
- end = r->start + dev_priv->pg->stolen_size - 1;
- } else {
- /* The rest we will use for GEM backed objects */
- start = r->start + dev_priv->pg->stolen_size;
- end = r->end;
- }
+ /* The start of the GTT is the stolen pages */
+ start = r->start;
+ end = r->start + dev_priv->gtt.stolen_size - 1;
+ } else {
+ /* The rest we will use for GEM backed objects */
+ start = r->start + dev_priv->gtt.stolen_size;
+ end = r->end;
+ }
gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
if (gt == NULL)
return NULL;
- gt->resource.name = name;
- gt->stolen = backed;
- gt->in_gart = backed;
- /* Ensure this is set for non GEM objects */
- gt->gem.dev = dev;
- kref_init(&gt->kref);
-
+ gt->resource.name = name;
+ gt->stolen = backed;
+ gt->in_gart = backed;
+ /* Ensure this is set for non GEM objects */
+ gt->gem.dev = dev;
ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
len, start, end, PAGE_SIZE, NULL, NULL);
if (ret == 0) {
- gt->offset = gt->resource.start - r->start;
+ gt->offset = gt->resource.start - r->start;
return gt;
- }
+ }
kfree(gt);
return NULL;
}
/**
- * psb_gtt_destroy - final free up of a gtt
- * @kref: the kref of the gtt
- *
- * Called from the kernel kref put when the final reference to our
- * GTT object is dropped. At that point we can free up the resources.
+ * psb_gtt_free_range - release GTT address space
+ * @dev: our DRM device
+ * @gt: a mapping created with psb_gtt_alloc_range
*
- * For now we handle mmap clean up here to work around limits in GEM
+ * Release a resource that was allocated with psb_gtt_alloc_range. If the
+ * object has been pinned by mmap users we clean this up here currently.
*/
-static void psb_gtt_destroy(struct kref *kref)
+void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
{
- struct gtt_range *gt = container_of(kref, struct gtt_range, kref);
-
/* Undo the mmap pin if we are destroying the object */
if (gt->mmapping) {
psb_gtt_unpin(gt);
@@ -345,48 +332,16 @@ static void psb_gtt_destroy(struct kref *kref)
kfree(gt);
}
-/**
- * psb_gtt_kref_put - drop reference to a GTT object
- * @gt: the GT being dropped
- *
- * Drop a reference to a psb gtt
- */
-void psb_gtt_kref_put(struct gtt_range *gt)
+void psb_gtt_alloc(struct drm_device *dev)
{
- kref_put(&gt->kref, psb_gtt_destroy);
-}
-
-/**
- * psb_gtt_free_range - release GTT address space
- * @dev: our DRM device
- * @gt: a mapping created with psb_gtt_alloc_range
- *
- * Release a resource that was allocated with psb_gtt_alloc_range
- */
-void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
-{
- psb_gtt_kref_put(gt);
-}
-
-
-struct psb_gtt *psb_gtt_alloc(struct drm_device *dev)
-{
- struct psb_gtt *tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
-
- if (!tmp)
- return NULL;
-
- init_rwsem(&tmp->sem);
- tmp->dev = dev;
-
- return tmp;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ init_rwsem(&dev_priv->gtt.sem);
}
void psb_gtt_takedown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- /* FIXME: iounmap dev_priv->vram_addr etc */
if (dev_priv->gtt_map) {
iounmap(dev_priv->gtt_map);
dev_priv->gtt_map = NULL;
@@ -397,8 +352,8 @@ void psb_gtt_takedown(struct drm_device *dev)
PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
(void) PSB_RVDC32(PSB_PGETBL_CTL);
}
- kfree(dev_priv->pg);
- dev_priv->pg = NULL;
+ if (dev_priv->vram_addr)
+ iounmap(dev_priv->gtt_map);
}
int psb_gtt_init(struct drm_device *dev, int resume)
@@ -409,8 +364,6 @@ int psb_gtt_init(struct drm_device *dev, int resume)
unsigned i, num_pages;
unsigned pfn_base;
uint32_t vram_pages;
- uint32_t tt_pages;
- uint32_t *ttm_gtt_map;
uint32_t dvmt_mode = 0;
struct psb_gtt *pg;
@@ -419,10 +372,10 @@ int psb_gtt_init(struct drm_device *dev, int resume)
mutex_init(&dev_priv->gtt_mutex);
- dev_priv->pg = pg = psb_gtt_alloc(dev);
- if (pg == NULL)
- return -ENOMEM;
+ psb_gtt_alloc(dev);
+ pg = &dev_priv->gtt;
+ /* Enable the GTT */
pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
@@ -432,31 +385,53 @@ int psb_gtt_init(struct drm_device *dev, int resume)
(void) PSB_RVDC32(PSB_PGETBL_CTL);
/* The root resource we allocate address space from */
- dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
-
dev_priv->gtt_initialized = 1;
pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
- pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
- /* fix me: video mmu has hw bug to access 0x0D0000000,
- * then make gatt start at 0x0e000,0000 */
+ /*
+ * FIXME: video mmu has hw bug to access 0x0D0000000,
+ * then make gatt start at 0x0e000,0000
+ */
pg->mmu_gatt_start = 0xE0000000;
+
pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
- gtt_pages =
- pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT;
+ gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+ >> PAGE_SHIFT;
+ /* CDV workaround */
+ if (pg->gtt_start == 0 || gtt_pages == 0) {
+ dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
+ gtt_pages = 64;
+ pg->gtt_start = dev_priv->pge_ctl;
+ }
+
+ pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
- >> PAGE_SHIFT;
+ >> PAGE_SHIFT;
+ dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+
+ if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
+ static struct resource fudge; /* Preferably peppermint */
+
+ /* This can occur on CDV SDV systems. Fudge it in this case.
+ We really don't care what imaginary space is being allocated
+ at this point */
+ dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
+ pg->gatt_start = 0x40000000;
+ pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
+ fudge.start = 0x40000000;
+ fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
+ fudge.name = "fudge";
+ fudge.flags = IORESOURCE_MEM;
+ dev_priv->gtt_mem = &fudge;
+ }
pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
- vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
+ vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
+ - PAGE_SIZE;
stolen_size = vram_stolen_size;
- printk(KERN_INFO"GMMADR(region 0) start: 0x%08x (%dM).\n",
- pg->gatt_start, pg->gatt_pages/256);
- printk(KERN_INFO"GTTADR(region 3) start: 0x%08x (can map %dM RAM), and actual RAM base 0x%08x.\n",
- pg->gtt_start, gtt_pages * 4, pg->gtt_phys_start);
printk(KERN_INFO "Stolen memory information\n");
printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
@@ -467,7 +442,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
if (resume && (gtt_pages != pg->gtt_pages) &&
(stolen_size != pg->stolen_size)) {
- DRM_ERROR("GTT resume error.\n");
+ dev_err(dev->dev, "GTT resume error.\n");
ret = -EINVAL;
goto out_err;
}
@@ -475,62 +450,48 @@ int psb_gtt_init(struct drm_device *dev, int resume)
pg->gtt_pages = gtt_pages;
pg->stolen_size = stolen_size;
dev_priv->vram_stolen_size = vram_stolen_size;
- dev_priv->gtt_map =
- ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
+
+ /*
+ * Map the GTT and the stolen memory area
+ */
+ dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+ gtt_pages << PAGE_SHIFT);
if (!dev_priv->gtt_map) {
- DRM_ERROR("Failure to map gtt.\n");
+ dev_err(dev->dev, "Failure to map gtt.\n");
ret = -ENOMEM;
goto out_err;
}
dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
if (!dev_priv->vram_addr) {
- DRM_ERROR("Failure to map stolen base.\n");
+ dev_err(dev->dev, "Failure to map stolen base.\n");
ret = -ENOMEM;
goto out_err;
}
- DRM_DEBUG("%s: vram kernel virtual address %p\n", dev_priv->vram_addr);
-
- tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
- (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
-
- ttm_gtt_map = dev_priv->gtt_map + tt_pages / 2;
-
/*
- * insert vram stolen pages.
+ * Insert vram stolen pages into the GTT
*/
pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
- num_pages, pfn_base, 0);
+ num_pages, pfn_base << PAGE_SHIFT, 0);
for (i = 0; i < num_pages; ++i) {
pte = psb_gtt_mask_pte(pfn_base + i, 0);
iowrite32(pte, dev_priv->gtt_map + i);
}
/*
- * Init rest of gtt managed by IMG.
- */
- pfn_base = page_to_pfn(dev_priv->scratch_page);
- pte = psb_gtt_mask_pte(pfn_base, 0);
- for (; i < tt_pages / 2 - 1; ++i)
- iowrite32(pte, dev_priv->gtt_map + i);
-
- /*
- * Init rest of gtt managed by TTM.
+ * Init rest of GTT to the scratch page to avoid accidents or scribbles
*/
pfn_base = page_to_pfn(dev_priv->scratch_page);
pte = psb_gtt_mask_pte(pfn_base, 0);
- PSB_DEBUG_INIT("Initializing the rest of a total "
- "of %d gtt pages.\n", pg->gatt_pages);
+ for (; i < gtt_pages; ++i)
+ iowrite32(pte, dev_priv->gtt_map + i);
- for (; i < pg->gatt_pages - tt_pages / 2; ++i)
- iowrite32(pte, ttm_gtt_map + i);
(void) ioread32(dev_priv->gtt_map + i - 1);
-
return 0;
out_err:
diff --git a/drivers/staging/gma500/psb_gtt.h b/drivers/staging/gma500/gtt.h
index 535ae00f2ab..e0e1cb6f9bd 100644
--- a/drivers/staging/gma500/psb_gtt.h
+++ b/drivers/staging/gma500/gtt.h
@@ -22,8 +22,8 @@
#include <drm/drmP.h>
+/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
struct psb_gtt {
- struct drm_device *dev;
uint32_t gatt_start;
uint32_t mmu_gatt_start;
uint32_t gtt_start;
@@ -35,20 +35,20 @@ struct psb_gtt {
struct rw_semaphore sem;
};
-/*Exported functions*/
+/* Exported functions */
extern int psb_gtt_init(struct drm_device *dev, int resume);
extern void psb_gtt_takedown(struct drm_device *dev);
/* Each gtt_range describes an allocation in the GTT area */
struct gtt_range {
- struct resource resource;
- u32 offset;
- struct kref kref;
+ struct resource resource; /* Resource for our allocation */
+ u32 offset; /* GTT offset of our object */
struct drm_gem_object gem; /* GEM high level stuff */
int in_gart; /* Currently in the GART (ref ct) */
- bool stolen; /* Backed from stolen RAM */
- bool mmapping; /* Is mmappable */
+ bool stolen; /* Backed from stolen RAM */
+ bool mmapping; /* Is mmappable */
struct page **pages; /* Backing pages if present */
+ int npage; /* Number of backing pages */
};
extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
diff --git a/drivers/staging/gma500/psb_intel_bios.c b/drivers/staging/gma500/intel_bios.c
index 417965da5e2..096757f9bc8 100644
--- a/drivers/staging/gma500/psb_intel_bios.c
+++ b/drivers/staging/gma500/intel_bios.c
@@ -24,7 +24,7 @@
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "psb_intel_bios.h"
+#include "intel_bios.h"
static void *find_section(struct bdb_header *bdb, int section_id)
@@ -96,25 +96,20 @@ static void parse_backlight_data(struct drm_psb_private *dev_priv,
dev_priv->lvds_bl = NULL;
- if (lvds_opts) {
- DRM_DEBUG("lvds_options found at %p\n", lvds_opts);
+ if (lvds_opts)
p_type = lvds_opts->panel_type;
- } else {
- DRM_DEBUG("no lvds_options\n");
+ else
return;
- }
bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
if (!lvds_bl) {
- DRM_DEBUG("No memory\n");
+ dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
return;
}
-
memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
-
dev_priv->lvds_bl = lvds_bl;
}
@@ -144,26 +139,28 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
if (!lvds_lfp_data)
return;
- dev_priv->lvds_vbt = 1;
entry = &lvds_lfp_data->data[lvds_options->panel_type];
dvo_timing = &entry->dvo_timing;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
GFP_KERNEL);
+ if (panel_fixed_mode == NULL) {
+ dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
+ return;
+ }
+ dev_priv->lvds_vbt = 1;
fill_detail_timing_data(panel_fixed_mode, dvo_timing);
if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
} else {
- DRM_DEBUG("Ignoring bogus LVDS VBT mode.\n");
+ dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
dev_priv->lvds_vbt = 0;
kfree(panel_fixed_mode);
}
-
return;
}
@@ -257,7 +254,7 @@ bool psb_intel_init_bios(struct drm_device *dev)
}
if (!vbt) {
- DRM_ERROR("VBT signature missing\n");
+ dev_err(dev->dev, "VBT signature missing\n");
pci_unmap_rom(pdev, bios);
return -1;
}
diff --git a/drivers/staging/gma500/psb_intel_bios.h b/drivers/staging/gma500/intel_bios.h
index 70f1bf01818..70f1bf01818 100644
--- a/drivers/staging/gma500/psb_intel_bios.h
+++ b/drivers/staging/gma500/intel_bios.h
diff --git a/drivers/staging/gma500/psb_intel_i2c.c b/drivers/staging/gma500/intel_i2c.c
index e33432df510..e33432df510 100644
--- a/drivers/staging/gma500/psb_intel_i2c.c
+++ b/drivers/staging/gma500/intel_i2c.c
diff --git a/drivers/staging/gma500/psb_intel_opregion.c b/drivers/staging/gma500/intel_opregion.c
index 65e3e9b8dc1..d2e60376982 100644
--- a/drivers/staging/gma500/psb_intel_opregion.c
+++ b/drivers/staging/gma500/intel_opregion.c
@@ -33,24 +33,23 @@ struct opregion_header {
u8 driver_ver[16];
u32 mboxes;
u8 reserved[164];
-} __attribute__((packed));
+} __packed;
struct opregion_apci {
/*FIXME: add it later*/
-} __attribute__((packed));
+} __packed;
struct opregion_swsci {
/*FIXME: add it later*/
-} __attribute__((packed));
+} __packed;
struct opregion_acpi {
/*FIXME: add it later*/
-} __attribute__((packed));
+} __packed;
-int psb_intel_opregion_init(struct drm_device *dev)
+int gma_intel_opregion_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- /*struct psb_intel_opregion * opregion = &dev_priv->opregion;*/
u32 opregion_phy;
void *base;
u32 *lid_state;
@@ -58,11 +57,8 @@ int psb_intel_opregion_init(struct drm_device *dev)
dev_priv->lid_state = NULL;
pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
- if (opregion_phy == 0) {
- DRM_DEBUG("Opregion not supported, won't support lid-switch\n");
+ if (opregion_phy == 0)
return -ENOTSUPP;
- }
- DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
base = ioremap(opregion_phy, 8*1024);
if (!base)
@@ -70,9 +66,15 @@ int psb_intel_opregion_init(struct drm_device *dev)
lid_state = base + 0x01ac;
- DRM_DEBUG("Lid switch state 0x%08x\n", *lid_state);
-
dev_priv->lid_state = lid_state;
- dev_priv->lid_last_state = *lid_state;
+ dev_priv->lid_last_state = readl(lid_state);
+ return 0;
+}
+
+int gma_intel_opregion_exit(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->lid_state)
+ iounmap(dev_priv->lid_state);
return 0;
}
diff --git a/drivers/staging/gma500/mdfld_device.c b/drivers/staging/gma500/mdfld_device.c
new file mode 100644
index 00000000000..f47aeb7a203
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_device.c
@@ -0,0 +1,714 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "psb_drm.h"
+#include "psb_drv.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_output.h"
+#include "mid_bios.h"
+
+/*
+ * Provide the Medfield specific backlight management
+ */
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+static int mdfld_brightness;
+struct backlight_device *mdfld_backlight_device;
+
+static int mfld_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(mdfld_backlight_device);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int level = bd->props.brightness;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ if (gma_power_begin(dev, 0)) {
+ /* Calculate and set the brightness value */
+ u32 adjusted_level;
+
+ /* Adjust the backlight level with the percent in
+ * dev_priv->blc_adj2;
+ */
+ adjusted_level = level * dev_priv->blc_adj2;
+ adjusted_level = adjusted_level / 100;
+#if 0
+#ifndef CONFIG_MDFLD_DSI_DPU
+ if(!(dev_priv->dsr_fb_update & MDFLD_DSR_MIPI_CONTROL) &&
+ (dev_priv->dbi_panel_on || dev_priv->dbi_panel_on2)){
+ mdfld_dsi_dbi_exit_dsr(dev,MDFLD_DSR_MIPI_CONTROL, 0, 0);
+ dev_dbg(dev->dev, "Out of DSR before set brightness to %d.\n",adjusted_level);
+ }
+#endif
+ mdfld_dsi_brightness_control(dev, 0, adjusted_level);
+
+ if ((dev_priv->dbi_panel_on2) || (dev_priv->dpi_panel_on2))
+ mdfld_dsi_brightness_control(dev, 2, adjusted_level);
+#endif
+ gma_power_end(dev);
+ }
+ mdfld_brightness = level;
+ return 0;
+}
+
+int psb_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return mdfld_brightness;
+}
+
+static const struct backlight_ops mfld_ops = {
+ .get_brightness = psb_get_brightness,
+ .update_status = mfld_set_brightness,
+};
+
+static int mdfld_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct backlight_properties props;
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ mdfld_backlight_device = backlight_device_register("mfld-bl",
+ NULL, (void *)dev, &mfld_ops, &props);
+
+ if (IS_ERR(mdfld_backlight_device))
+ return PTR_ERR(mdfld_backlight_device);
+
+ dev_priv->blc_adj1 = 100;
+ dev_priv->blc_adj2 = 100;
+ mdfld_backlight_device->props.brightness = 100;
+ mdfld_backlight_device->props.max_brightness = 100;
+ backlight_update_status(mdfld_backlight_device);
+ dev_priv->backlight_device = mdfld_backlight_device;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Medfield specific chip logic and low level methods for
+ * power management.
+ */
+
+static void mdfld_init_pm(struct drm_device *dev)
+{
+ /* No work needed here yet */
+}
+
+/**
+ * mdfld_save_display_registers - save registers for pipe
+ * @dev: our device
+ * @pipe: pipe to save
+ *
+ * Save the pipe state of the device before we power it off. Keep everything
+ * we need to put it back again
+ */
+static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* register */
+ u32 dpll_reg = MRST_DPLL_A;
+ u32 fp_reg = MRST_FPA0;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 htot_reg = HTOTAL_A;
+ u32 hblank_reg = HBLANK_A;
+ u32 hsync_reg = HSYNC_A;
+ u32 vtot_reg = VTOTAL_A;
+ u32 vblank_reg = VBLANK_A;
+ u32 vsync_reg = VSYNC_A;
+ u32 pipesrc_reg = PIPEASRC;
+ u32 dspstride_reg = DSPASTRIDE;
+ u32 dsplinoff_reg = DSPALINOFF;
+ u32 dsptileoff_reg = DSPATILEOFF;
+ u32 dspsize_reg = DSPASIZE;
+ u32 dsppos_reg = DSPAPOS;
+ u32 dspsurf_reg = DSPASURF;
+ u32 mipi_reg = MIPI;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 dspstatus_reg = PIPEASTAT;
+ u32 palette_reg = PALETTE_A;
+
+ /* pointer to values */
+ u32 *dpll_val = &dev_priv->saveDPLL_A;
+ u32 *fp_val = &dev_priv->saveFPA0;
+ u32 *pipeconf_val = &dev_priv->savePIPEACONF;
+ u32 *htot_val = &dev_priv->saveHTOTAL_A;
+ u32 *hblank_val = &dev_priv->saveHBLANK_A;
+ u32 *hsync_val = &dev_priv->saveHSYNC_A;
+ u32 *vtot_val = &dev_priv->saveVTOTAL_A;
+ u32 *vblank_val = &dev_priv->saveVBLANK_A;
+ u32 *vsync_val = &dev_priv->saveVSYNC_A;
+ u32 *pipesrc_val = &dev_priv->savePIPEASRC;
+ u32 *dspstride_val = &dev_priv->saveDSPASTRIDE;
+ u32 *dsplinoff_val = &dev_priv->saveDSPALINOFF;
+ u32 *dsptileoff_val = &dev_priv->saveDSPATILEOFF;
+ u32 *dspsize_val = &dev_priv->saveDSPASIZE;
+ u32 *dsppos_val = &dev_priv->saveDSPAPOS;
+ u32 *dspsurf_val = &dev_priv->saveDSPASURF;
+ u32 *mipi_val = &dev_priv->saveMIPI;
+ u32 *dspcntr_val = &dev_priv->saveDSPACNTR;
+ u32 *dspstatus_val = &dev_priv->saveDSPASTATUS;
+ u32 *palette_val = dev_priv->save_palette_a;
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ /* register */
+ dpll_reg = MDFLD_DPLL_B;
+ fp_reg = MDFLD_DPLL_DIV0;
+ pipeconf_reg = PIPEBCONF;
+ htot_reg = HTOTAL_B;
+ hblank_reg = HBLANK_B;
+ hsync_reg = HSYNC_B;
+ vtot_reg = VTOTAL_B;
+ vblank_reg = VBLANK_B;
+ vsync_reg = VSYNC_B;
+ pipesrc_reg = PIPEBSRC;
+ dspstride_reg = DSPBSTRIDE;
+ dsplinoff_reg = DSPBLINOFF;
+ dsptileoff_reg = DSPBTILEOFF;
+ dspsize_reg = DSPBSIZE;
+ dsppos_reg = DSPBPOS;
+ dspsurf_reg = DSPBSURF;
+ dspcntr_reg = DSPBCNTR;
+ dspstatus_reg = PIPEBSTAT;
+ palette_reg = PALETTE_B;
+
+ /* values */
+ dpll_val = &dev_priv->saveDPLL_B;
+ fp_val = &dev_priv->saveFPB0;
+ pipeconf_val = &dev_priv->savePIPEBCONF;
+ htot_val = &dev_priv->saveHTOTAL_B;
+ hblank_val = &dev_priv->saveHBLANK_B;
+ hsync_val = &dev_priv->saveHSYNC_B;
+ vtot_val = &dev_priv->saveVTOTAL_B;
+ vblank_val = &dev_priv->saveVBLANK_B;
+ vsync_val = &dev_priv->saveVSYNC_B;
+ pipesrc_val = &dev_priv->savePIPEBSRC;
+ dspstride_val = &dev_priv->saveDSPBSTRIDE;
+ dsplinoff_val = &dev_priv->saveDSPBLINOFF;
+ dsptileoff_val = &dev_priv->saveDSPBTILEOFF;
+ dspsize_val = &dev_priv->saveDSPBSIZE;
+ dsppos_val = &dev_priv->saveDSPBPOS;
+ dspsurf_val = &dev_priv->saveDSPBSURF;
+ dspcntr_val = &dev_priv->saveDSPBCNTR;
+ dspstatus_val = &dev_priv->saveDSPBSTATUS;
+ palette_val = dev_priv->save_palette_b;
+ break;
+ case 2:
+ /* register */
+ pipeconf_reg = PIPECCONF;
+ htot_reg = HTOTAL_C;
+ hblank_reg = HBLANK_C;
+ hsync_reg = HSYNC_C;
+ vtot_reg = VTOTAL_C;
+ vblank_reg = VBLANK_C;
+ vsync_reg = VSYNC_C;
+ pipesrc_reg = PIPECSRC;
+ dspstride_reg = DSPCSTRIDE;
+ dsplinoff_reg = DSPCLINOFF;
+ dsptileoff_reg = DSPCTILEOFF;
+ dspsize_reg = DSPCSIZE;
+ dsppos_reg = DSPCPOS;
+ dspsurf_reg = DSPCSURF;
+ mipi_reg = MIPI_C;
+ dspcntr_reg = DSPCCNTR;
+ dspstatus_reg = PIPECSTAT;
+ palette_reg = PALETTE_C;
+
+ /* pointer to values */
+ pipeconf_val = &dev_priv->savePIPECCONF;
+ htot_val = &dev_priv->saveHTOTAL_C;
+ hblank_val = &dev_priv->saveHBLANK_C;
+ hsync_val = &dev_priv->saveHSYNC_C;
+ vtot_val = &dev_priv->saveVTOTAL_C;
+ vblank_val = &dev_priv->saveVBLANK_C;
+ vsync_val = &dev_priv->saveVSYNC_C;
+ pipesrc_val = &dev_priv->savePIPECSRC;
+ dspstride_val = &dev_priv->saveDSPCSTRIDE;
+ dsplinoff_val = &dev_priv->saveDSPCLINOFF;
+ dsptileoff_val = &dev_priv->saveDSPCTILEOFF;
+ dspsize_val = &dev_priv->saveDSPCSIZE;
+ dsppos_val = &dev_priv->saveDSPCPOS;
+ dspsurf_val = &dev_priv->saveDSPCSURF;
+ mipi_val = &dev_priv->saveMIPI_C;
+ dspcntr_val = &dev_priv->saveDSPCCNTR;
+ dspstatus_val = &dev_priv->saveDSPCSTATUS;
+ palette_val = dev_priv->save_palette_c;
+ break;
+ default:
+ DRM_ERROR("%s, invalid pipe number.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Pipe & plane A info */
+ *dpll_val = PSB_RVDC32(dpll_reg);
+ *fp_val = PSB_RVDC32(fp_reg);
+ *pipeconf_val = PSB_RVDC32(pipeconf_reg);
+ *htot_val = PSB_RVDC32(htot_reg);
+ *hblank_val = PSB_RVDC32(hblank_reg);
+ *hsync_val = PSB_RVDC32(hsync_reg);
+ *vtot_val = PSB_RVDC32(vtot_reg);
+ *vblank_val = PSB_RVDC32(vblank_reg);
+ *vsync_val = PSB_RVDC32(vsync_reg);
+ *pipesrc_val = PSB_RVDC32(pipesrc_reg);
+ *dspstride_val = PSB_RVDC32(dspstride_reg);
+ *dsplinoff_val = PSB_RVDC32(dsplinoff_reg);
+ *dsptileoff_val = PSB_RVDC32(dsptileoff_reg);
+ *dspsize_val = PSB_RVDC32(dspsize_reg);
+ *dsppos_val = PSB_RVDC32(dsppos_reg);
+ *dspsurf_val = PSB_RVDC32(dspsurf_reg);
+ *dspcntr_val = PSB_RVDC32(dspcntr_reg);
+ *dspstatus_val = PSB_RVDC32(dspstatus_reg);
+
+ /*save palette (gamma) */
+ for (i = 0; i < 256; i++)
+ palette_val[i] = PSB_RVDC32(palette_reg + (i<<2));
+
+ if (pipe == 1) {
+ dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+ dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+ dev_priv->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL);
+ dev_priv->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL);
+ return 0;
+ }
+ *mipi_val = PSB_RVDC32(mipi_reg);
+ return 0;
+}
+
+/**
+ * mdfld_save_cursor_overlay_registers - save cursor overlay info
+ * @dev: our device
+ *
+ * Save the cursor and overlay register state
+ */
+static int mdfld_save_cursor_overlay_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* Save cursor regs */
+ dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+ dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+ dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+
+ dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+ dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+ dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+
+ dev_priv->saveDSPCCURSOR_CTRL = PSB_RVDC32(CURCCNTR);
+ dev_priv->saveDSPCCURSOR_BASE = PSB_RVDC32(CURCBASE);
+ dev_priv->saveDSPCCURSOR_POS = PSB_RVDC32(CURCPOS);
+
+ /* HW overlay */
+ dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+ dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+ dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+ dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+ dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+ dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+ dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+
+ dev_priv->saveOV_OVADD_C = PSB_RVDC32(OV_OVADD + OV_C_OFFSET);
+ dev_priv->saveOV_OGAMC0_C = PSB_RVDC32(OV_OGAMC0 + OV_C_OFFSET);
+ dev_priv->saveOV_OGAMC1_C = PSB_RVDC32(OV_OGAMC1 + OV_C_OFFSET);
+ dev_priv->saveOV_OGAMC2_C = PSB_RVDC32(OV_OGAMC2 + OV_C_OFFSET);
+ dev_priv->saveOV_OGAMC3_C = PSB_RVDC32(OV_OGAMC3 + OV_C_OFFSET);
+ dev_priv->saveOV_OGAMC4_C = PSB_RVDC32(OV_OGAMC4 + OV_C_OFFSET);
+ dev_priv->saveOV_OGAMC5_C = PSB_RVDC32(OV_OGAMC5 + OV_C_OFFSET);
+
+ return 0;
+}
+/*
+ * mdfld_restore_display_registers - restore the state of a pipe
+ * @dev: our device
+ * @pipe: the pipe to restore
+ *
+ * Restore the state of a pipe to that which was saved by the register save
+ * functions.
+ */
+static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
+{
+ /* To get panel out of ULPS mode */
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dsi_config *dsi_config = NULL;
+ u32 i = 0;
+ u32 dpll = 0;
+ u32 timeout = 0;
+ u32 reg_offset = 0;
+
+ /* register */
+ u32 dpll_reg = MRST_DPLL_A;
+ u32 fp_reg = MRST_FPA0;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 htot_reg = HTOTAL_A;
+ u32 hblank_reg = HBLANK_A;
+ u32 hsync_reg = HSYNC_A;
+ u32 vtot_reg = VTOTAL_A;
+ u32 vblank_reg = VBLANK_A;
+ u32 vsync_reg = VSYNC_A;
+ u32 pipesrc_reg = PIPEASRC;
+ u32 dspstride_reg = DSPASTRIDE;
+ u32 dsplinoff_reg = DSPALINOFF;
+ u32 dsptileoff_reg = DSPATILEOFF;
+ u32 dspsize_reg = DSPASIZE;
+ u32 dsppos_reg = DSPAPOS;
+ u32 dspsurf_reg = DSPASURF;
+ u32 dspstatus_reg = PIPEASTAT;
+ u32 mipi_reg = MIPI;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 palette_reg = PALETTE_A;
+
+ /* values */
+ u32 dpll_val = dev_priv->saveDPLL_A & ~DPLL_VCO_ENABLE;
+ u32 fp_val = dev_priv->saveFPA0;
+ u32 pipeconf_val = dev_priv->savePIPEACONF;
+ u32 htot_val = dev_priv->saveHTOTAL_A;
+ u32 hblank_val = dev_priv->saveHBLANK_A;
+ u32 hsync_val = dev_priv->saveHSYNC_A;
+ u32 vtot_val = dev_priv->saveVTOTAL_A;
+ u32 vblank_val = dev_priv->saveVBLANK_A;
+ u32 vsync_val = dev_priv->saveVSYNC_A;
+ u32 pipesrc_val = dev_priv->savePIPEASRC;
+ u32 dspstride_val = dev_priv->saveDSPASTRIDE;
+ u32 dsplinoff_val = dev_priv->saveDSPALINOFF;
+ u32 dsptileoff_val = dev_priv->saveDSPATILEOFF;
+ u32 dspsize_val = dev_priv->saveDSPASIZE;
+ u32 dsppos_val = dev_priv->saveDSPAPOS;
+ u32 dspsurf_val = dev_priv->saveDSPASURF;
+ u32 dspstatus_val = dev_priv->saveDSPASTATUS;
+ u32 mipi_val = dev_priv->saveMIPI;
+ u32 dspcntr_val = dev_priv->saveDSPACNTR;
+ u32 *palette_val = dev_priv->save_palette_a;
+
+ switch (pipe) {
+ case 0:
+ dsi_config = dev_priv->dsi_configs[0];
+ break;
+ case 1:
+ /* register */
+ dpll_reg = MDFLD_DPLL_B;
+ fp_reg = MDFLD_DPLL_DIV0;
+ pipeconf_reg = PIPEBCONF;
+ htot_reg = HTOTAL_B;
+ hblank_reg = HBLANK_B;
+ hsync_reg = HSYNC_B;
+ vtot_reg = VTOTAL_B;
+ vblank_reg = VBLANK_B;
+ vsync_reg = VSYNC_B;
+ pipesrc_reg = PIPEBSRC;
+ dspstride_reg = DSPBSTRIDE;
+ dsplinoff_reg = DSPBLINOFF;
+ dsptileoff_reg = DSPBTILEOFF;
+ dspsize_reg = DSPBSIZE;
+ dsppos_reg = DSPBPOS;
+ dspsurf_reg = DSPBSURF;
+ dspcntr_reg = DSPBCNTR;
+ palette_reg = PALETTE_B;
+ dspstatus_reg = PIPEBSTAT;
+
+ /* values */
+ dpll_val = dev_priv->saveDPLL_B & ~DPLL_VCO_ENABLE;
+ fp_val = dev_priv->saveFPB0;
+ pipeconf_val = dev_priv->savePIPEBCONF;
+ htot_val = dev_priv->saveHTOTAL_B;
+ hblank_val = dev_priv->saveHBLANK_B;
+ hsync_val = dev_priv->saveHSYNC_B;
+ vtot_val = dev_priv->saveVTOTAL_B;
+ vblank_val = dev_priv->saveVBLANK_B;
+ vsync_val = dev_priv->saveVSYNC_B;
+ pipesrc_val = dev_priv->savePIPEBSRC;
+ dspstride_val = dev_priv->saveDSPBSTRIDE;
+ dsplinoff_val = dev_priv->saveDSPBLINOFF;
+ dsptileoff_val = dev_priv->saveDSPBTILEOFF;
+ dspsize_val = dev_priv->saveDSPBSIZE;
+ dsppos_val = dev_priv->saveDSPBPOS;
+ dspsurf_val = dev_priv->saveDSPBSURF;
+ dspcntr_val = dev_priv->saveDSPBCNTR;
+ dspstatus_val = dev_priv->saveDSPBSTATUS;
+ palette_val = dev_priv->save_palette_b;
+ break;
+ case 2:
+ reg_offset = MIPIC_REG_OFFSET;
+
+ /* register */
+ pipeconf_reg = PIPECCONF;
+ htot_reg = HTOTAL_C;
+ hblank_reg = HBLANK_C;
+ hsync_reg = HSYNC_C;
+ vtot_reg = VTOTAL_C;
+ vblank_reg = VBLANK_C;
+ vsync_reg = VSYNC_C;
+ pipesrc_reg = PIPECSRC;
+ dspstride_reg = DSPCSTRIDE;
+ dsplinoff_reg = DSPCLINOFF;
+ dsptileoff_reg = DSPCTILEOFF;
+ dspsize_reg = DSPCSIZE;
+ dsppos_reg = DSPCPOS;
+ dspsurf_reg = DSPCSURF;
+ mipi_reg = MIPI_C;
+ dspcntr_reg = DSPCCNTR;
+ palette_reg = PALETTE_C;
+ dspstatus_reg = PIPECSTAT;
+
+ /* values */
+ pipeconf_val = dev_priv->savePIPECCONF;
+ htot_val = dev_priv->saveHTOTAL_C;
+ hblank_val = dev_priv->saveHBLANK_C;
+ hsync_val = dev_priv->saveHSYNC_C;
+ vtot_val = dev_priv->saveVTOTAL_C;
+ vblank_val = dev_priv->saveVBLANK_C;
+ vsync_val = dev_priv->saveVSYNC_C;
+ pipesrc_val = dev_priv->savePIPECSRC;
+ dspstride_val = dev_priv->saveDSPCSTRIDE;
+ dsplinoff_val = dev_priv->saveDSPCLINOFF;
+ dsptileoff_val = dev_priv->saveDSPCTILEOFF;
+ dspsize_val = dev_priv->saveDSPCSIZE;
+ dsppos_val = dev_priv->saveDSPCPOS;
+ dspsurf_val = dev_priv->saveDSPCSURF;
+ dspstatus_val = dev_priv->saveDSPCSTATUS;
+ mipi_val = dev_priv->saveMIPI_C;
+ dspcntr_val = dev_priv->saveDSPCCNTR;
+ palette_val = dev_priv->save_palette_c;
+
+ dsi_config = dev_priv->dsi_configs[1];
+ break;
+ default:
+ DRM_ERROR("%s, invalid pipe number.\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Make sure VGA plane is off. it initializes to on after reset!*/
+ PSB_WVDC32(0x80000000, VGACNTRL);
+ if (pipe == 1) {
+ PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg);
+ PSB_RVDC32(dpll_reg);
+
+ PSB_WVDC32(fp_val, fp_reg);
+ } else {
+ dpll = PSB_RVDC32(dpll_reg);
+
+ if (!(dpll & DPLL_VCO_ENABLE)) {
+
+ /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
+ if (dpll & MDFLD_PWR_GATE_EN) {
+ dpll &= ~MDFLD_PWR_GATE_EN;
+ PSB_WVDC32(dpll, dpll_reg);
+ udelay(500); /* FIXME: 1 ? */
+ }
+
+ PSB_WVDC32(fp_val, fp_reg);
+ PSB_WVDC32(dpll_val, dpll_reg);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ dpll_val |= DPLL_VCO_ENABLE;
+ PSB_WVDC32(dpll_val, dpll_reg);
+ PSB_RVDC32(dpll_reg);
+
+ /* wait for DSI PLL to lock */
+ while ((timeout < 20000) && !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ udelay(150);
+ timeout++;
+ }
+
+ if (timeout == 20000) {
+ DRM_ERROR("%s, can't lock DSIPLL.\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+ }
+ /* Restore mode */
+ PSB_WVDC32(htot_val, htot_reg);
+ PSB_WVDC32(hblank_val, hblank_reg);
+ PSB_WVDC32(hsync_val, hsync_reg);
+ PSB_WVDC32(vtot_val, vtot_reg);
+ PSB_WVDC32(vblank_val, vblank_reg);
+ PSB_WVDC32(vsync_val, vsync_reg);
+ PSB_WVDC32(pipesrc_val, pipesrc_reg);
+ PSB_WVDC32(dspstatus_val, dspstatus_reg);
+
+ /* Set up the plane */
+ PSB_WVDC32(dspstride_val, dspstride_reg);
+ PSB_WVDC32(dsplinoff_val, dsplinoff_reg);
+ PSB_WVDC32(dsptileoff_val, dsptileoff_reg);
+ PSB_WVDC32(dspsize_val, dspsize_reg);
+ PSB_WVDC32(dsppos_val, dsppos_reg);
+ PSB_WVDC32(dspsurf_val, dspsurf_reg);
+
+ if (pipe == 1) {
+ PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
+ PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+ PSB_WVDC32(dev_priv->saveHDMIPHYMISCCTL, HDMIPHYMISCCTL);
+ PSB_WVDC32(dev_priv->saveHDMIB_CONTROL, HDMIB_CONTROL);
+
+ } else {
+ /* Set up pipe related registers */
+ PSB_WVDC32(mipi_val, mipi_reg);
+ /* Setup MIPI adapter + MIPI IP registers */
+ mdfld_dsi_controller_init(dsi_config, pipe);
+ msleep(20);
+ }
+ /* Enable the plane */
+ PSB_WVDC32(dspcntr_val, dspcntr_reg);
+ msleep(20);
+ /* Enable the pipe */
+ PSB_WVDC32(pipeconf_val, pipeconf_reg);
+
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(palette_val[i], palette_reg + (i<<2));
+ if (pipe == 1)
+ return 0;
+ if (!mdfld_panel_dpi(dev))
+ mdfld_enable_te(dev, pipe);
+ return 0;
+}
+
+/**
+ * mdfld_restore_cursor_overlay_registers - restore cursor
+ * @dev: our device
+ *
+ * Restore the cursor and overlay state that was saved earlier
+ */
+static int mdfld_restore_cursor_overlay_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* Enable Cursor A */
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
+
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
+
+ PSB_WVDC32(dev_priv->saveDSPCCURSOR_CTRL, CURCCNTR);
+ PSB_WVDC32(dev_priv->saveDSPCCURSOR_POS, CURCPOS);
+ PSB_WVDC32(dev_priv->saveDSPCCURSOR_BASE, CURCBASE);
+
+ /* Restore HW overlay */
+ PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
+
+ PSB_WVDC32(dev_priv->saveOV_OVADD_C, OV_OVADD + OV_C_OFFSET);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC0_C, OV_OGAMC0 + OV_C_OFFSET);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC1_C, OV_OGAMC1 + OV_C_OFFSET);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC2_C, OV_OGAMC2 + OV_C_OFFSET);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC3_C, OV_OGAMC3 + OV_C_OFFSET);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC4_C, OV_OGAMC4 + OV_C_OFFSET);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC5_C, OV_OGAMC5 + OV_C_OFFSET);
+
+ return 0;
+}
+
+/**
+ * mdfld_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ */
+static int mdfld_save_registers(struct drm_device *dev)
+{
+ /* FIXME: We need to shut down panels here if using them
+ and once the right bits are merged */
+ mdfld_save_cursor_overlay_registers(dev);
+ mdfld_save_display_registers(dev, 0);
+ mdfld_save_display_registers(dev, 0);
+ mdfld_save_display_registers(dev, 2);
+ mdfld_save_display_registers(dev, 1);
+ mdfld_disable_crtc(dev, 0);
+ mdfld_disable_crtc(dev, 2);
+ mdfld_disable_crtc(dev, 1);
+ return 0;
+}
+
+/**
+ * mdfld_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ */
+static int mdfld_restore_registers(struct drm_device *dev)
+{
+ mdfld_restore_display_registers(dev, 1);
+ mdfld_restore_display_registers(dev, 0);
+ mdfld_restore_display_registers(dev, 2);
+ mdfld_restore_cursor_overlay_registers(dev);
+ return 0;
+}
+
+static int mdfld_power_down(struct drm_device *dev)
+{
+ /* FIXME */
+ return 0;
+}
+
+static int mdfld_power_up(struct drm_device *dev)
+{
+ /* FIXME */
+ return 0;
+}
+
+const struct psb_ops mdfld_chip_ops = {
+ .name = "Medfield",
+ .accel_2d = 0,
+ .pipes = 3,
+ .crtcs = 2,
+ .sgx_offset = MRST_SGX_OFFSET,
+
+ .chip_setup = mid_chip_setup,
+
+ .crtc_helper = &mdfld_helper_funcs,
+ .crtc_funcs = &mdfld_intel_crtc_funcs,
+
+ .output_init = mdfld_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = mdfld_backlight_init,
+#endif
+
+ .init_pm = mdfld_init_pm,
+ .save_regs = mdfld_save_registers,
+ .restore_regs = mdfld_restore_registers,
+ .power_down = mdfld_power_down,
+ .power_up = mdfld_power_up,
+};
+
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.c b/drivers/staging/gma500/mdfld_dsi_dbi.c
new file mode 100644
index 00000000000..fd211f3467c
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_dsi_dbi.c
@@ -0,0 +1,761 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dbi_dpu.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+#include "power.h"
+#include <linux/pm_runtime.h>
+
+int enable_gfx_rtpm;
+
+extern struct drm_device *gpDrmDevice;
+extern int gfxrtdelay;
+int enter_dsr;
+struct mdfld_dsi_dbi_output *gdbi_output;
+extern bool gbgfxsuspended;
+extern int enable_gfx_rtpm;
+extern int gfxrtdelay;
+
+#define MDFLD_DSR_MAX_IDLE_COUNT 2
+
+/*
+ * set refreshing area
+ */
+int mdfld_dsi_dbi_update_area(struct mdfld_dsi_dbi_output *dbi_output,
+ u16 x1, u16 y1, u16 x2, u16 y2)
+{
+ struct mdfld_dsi_pkg_sender *sender =
+ mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+ u8 param[4];
+ u8 cmd;
+ int err;
+
+ if (!sender) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Set column */
+ cmd = DCS_SET_COLUMN_ADDRESS;
+ param[0] = x1 >> 8;
+ param[1] = x1;
+ param[2] = x2 >> 8;
+ param[3] = x2;
+
+ err = mdfld_dsi_send_dcs(sender,
+ cmd,
+ param,
+ 4,
+ CMD_DATA_SRC_SYSTEM_MEM,
+ MDFLD_DSI_QUEUE_PACKAGE);
+ if (err) {
+ dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
+ goto err_out;
+ }
+
+ /* Set page */
+ cmd = DCS_SET_PAGE_ADDRESS;
+ param[0] = y1 >> 8;
+ param[1] = y1;
+ param[2] = y2 >> 8;
+ param[3] = y2;
+
+ err = mdfld_dsi_send_dcs(sender,
+ cmd,
+ param,
+ 4,
+ CMD_DATA_SRC_SYSTEM_MEM,
+ MDFLD_DSI_QUEUE_PACKAGE);
+ if (err) {
+ dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
+ goto err_out;
+ }
+
+ /*update screen*/
+ err = mdfld_dsi_send_dcs(sender,
+ write_mem_start,
+ NULL,
+ 0,
+ CMD_DATA_SRC_PIPE,
+ MDFLD_DSI_QUEUE_PACKAGE);
+ if (err) {
+ dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
+ goto err_out;
+ }
+ mdfld_dsi_cmds_kick_out(sender);
+err_out:
+ return err;
+}
+
+/*
+ * set panel's power state
+ */
+int mdfld_dsi_dbi_update_power(struct mdfld_dsi_dbi_output *dbi_output,
+ int mode)
+{
+ struct drm_device *dev = dbi_output->dev;
+ struct mdfld_dsi_pkg_sender *sender =
+ mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+ u8 param = 0;
+ u32 err = 0;
+
+ if (!sender) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ /* Exit sleep mode */
+ err = mdfld_dsi_send_dcs(sender,
+ DCS_EXIT_SLEEP_MODE,
+ NULL,
+ 0,
+ CMD_DATA_SRC_SYSTEM_MEM,
+ MDFLD_DSI_QUEUE_PACKAGE);
+ if (err) {
+ dev_err(dev->dev, "DCS 0x%x sent failed\n",
+ DCS_EXIT_SLEEP_MODE);
+ goto power_err;
+ }
+
+ /* Set display on */
+ err = mdfld_dsi_send_dcs(sender,
+ DCS_SET_DISPLAY_ON,
+ NULL,
+ 0,
+ CMD_DATA_SRC_SYSTEM_MEM,
+ MDFLD_DSI_QUEUE_PACKAGE);
+ if (err) {
+ dev_err(dev->dev, "DCS 0x%x sent failed\n",
+ DCS_SET_DISPLAY_ON);
+ goto power_err;
+ }
+
+ /* set tear effect on */
+ err = mdfld_dsi_send_dcs(sender,
+ DCS_SET_TEAR_ON,
+ &param,
+ 1,
+ CMD_DATA_SRC_SYSTEM_MEM,
+ MDFLD_DSI_QUEUE_PACKAGE);
+ if (err) {
+ dev_err(dev->dev, "DCS 0x%x sent failed\n",
+ set_tear_on);
+ goto power_err;
+ }
+
+ /**
+ * FIXME: remove this later
+ */
+ err = mdfld_dsi_send_dcs(sender,
+ DCS_WRITE_MEM_START,
+ NULL,
+ 0,
+ CMD_DATA_SRC_PIPE,
+ MDFLD_DSI_QUEUE_PACKAGE);
+ if (err) {
+ dev_err(dev->dev, "DCS 0x%x sent failed\n",
+ DCS_WRITE_MEM_START);
+ goto power_err;
+ }
+ } else {
+ /* Set tear effect off */
+ err = mdfld_dsi_send_dcs(sender,
+ DCS_SET_TEAR_OFF,
+ NULL,
+ 0,
+ CMD_DATA_SRC_SYSTEM_MEM,
+ MDFLD_DSI_QUEUE_PACKAGE);
+ if (err) {
+ dev_err(dev->dev, "DCS 0x%x sent failed\n",
+ DCS_SET_TEAR_OFF);
+ goto power_err;
+ }
+
+ /* Turn display off */
+ err = mdfld_dsi_send_dcs(sender,
+ DCS_SET_DISPLAY_OFF,
+ NULL,
+ 0,
+ CMD_DATA_SRC_SYSTEM_MEM,
+ MDFLD_DSI_QUEUE_PACKAGE);
+ if (err) {
+ dev_err(dev->dev, "DCS 0x%x sent failed\n",
+ DCS_SET_DISPLAY_OFF);
+ goto power_err;
+ }
+
+ /* Now enter sleep mode */
+ err = mdfld_dsi_send_dcs(sender,
+ DCS_ENTER_SLEEP_MODE,
+ NULL,
+ 0,
+ CMD_DATA_SRC_SYSTEM_MEM,
+ MDFLD_DSI_QUEUE_PACKAGE);
+ if (err) {
+ dev_err(dev->dev, "DCS 0x%x sent failed\n",
+ DCS_ENTER_SLEEP_MODE);
+ goto power_err;
+ }
+ }
+ mdfld_dsi_cmds_kick_out(sender);
+power_err:
+ return err;
+}
+
+/*
+ * send a generic DCS command with a parameter list
+ */
+int mdfld_dsi_dbi_send_dcs(struct mdfld_dsi_dbi_output *dbi_output,
+ u8 dcs, u8 *param, u32 num, u8 data_src)
+{
+ struct mdfld_dsi_pkg_sender *sender =
+ mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+ int ret;
+
+ if (!sender) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ ret = mdfld_dsi_send_dcs(sender,
+ dcs,
+ param,
+ num,
+ data_src,
+ MDFLD_DSI_SEND_PACKAGE);
+
+ return ret;
+}
+
+/*
+ * Enter DSR
+ */
+void mdfld_dsi_dbi_enter_dsr(struct mdfld_dsi_dbi_output *dbi_output, int pipe)
+{
+ u32 reg_val;
+ struct drm_device *dev = dbi_output->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dbi_output->base.base.crtc;
+ struct psb_intel_crtc *psb_crtc = (crtc) ?
+ to_psb_intel_crtc(crtc) : NULL;
+ u32 dpll_reg = MRST_DPLL_A;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 dspcntr_reg = DSPACNTR;
+
+ if (!dbi_output)
+ return;
+
+ /* FIXME check if can go */
+ dev_priv->is_in_idle = true;
+
+ gdbi_output = dbi_output;
+ if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+ (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
+ return;
+
+ if (pipe == 2) {
+ dpll_reg = MRST_DPLL_A;
+ pipeconf_reg = PIPECCONF;
+ dspcntr_reg = DSPCCNTR;
+ }
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "hw begin failed\n");
+ return;
+ }
+ /* Disable te interrupts */
+ mdfld_disable_te(dev, pipe);
+
+ /* Disable plane */
+ reg_val = REG_READ(dspcntr_reg);
+ if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
+ REG_WRITE(dspcntr_reg, reg_val & ~DISPLAY_PLANE_ENABLE);
+ REG_READ(dspcntr_reg);
+ }
+
+ /* Disable pipe */
+ reg_val = REG_READ(pipeconf_reg);
+ if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
+ reg_val &= ~DISPLAY_PLANE_ENABLE;
+ reg_val |= (PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF);
+ REG_WRITE(pipeconf_reg, reg_val);
+ REG_READ(pipeconf_reg);
+ mdfldWaitForPipeDisable(dev, pipe);
+ }
+
+ /* Disable DPLL */
+ reg_val = REG_READ(dpll_reg);
+ if (!(reg_val & DPLL_VCO_ENABLE)) {
+ reg_val &= ~DPLL_VCO_ENABLE;
+ REG_WRITE(dpll_reg, reg_val);
+ REG_READ(dpll_reg);
+ udelay(500);
+ }
+
+ gma_power_end(dev);
+ dbi_output->mode_flags |= MODE_SETTING_IN_DSR;
+ if (pipe == 2) {
+ enter_dsr = 1;
+ /* pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay); */
+ }
+}
+
+static void mdfld_dbi_output_exit_dsr(struct mdfld_dsi_dbi_output *dbi_output,
+ int pipe)
+{
+ struct drm_device *dev = dbi_output->dev;
+ struct drm_crtc *crtc = dbi_output->base.base.crtc;
+ struct psb_intel_crtc *psb_crtc = (crtc) ?
+ to_psb_intel_crtc(crtc) : NULL;
+ u32 reg_val;
+ u32 dpll_reg = MRST_DPLL_A;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 reg_offset = 0;
+
+ /*if mode setting on-going, back off*/
+ if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+ (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
+ return;
+
+ if (pipe == 2) {
+ dpll_reg = MRST_DPLL_A;
+ pipeconf_reg = PIPECCONF;
+ dspcntr_reg = DSPCCNTR;
+ reg_offset = MIPIC_REG_OFFSET;
+ }
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "hw begin failed\n");
+ return;
+ }
+
+ /* Enable DPLL */
+ reg_val = REG_READ(dpll_reg);
+ if (!(reg_val & DPLL_VCO_ENABLE)) {
+ if (reg_val & MDFLD_PWR_GATE_EN) {
+ reg_val &= ~MDFLD_PWR_GATE_EN;
+ REG_WRITE(dpll_reg, reg_val);
+ REG_READ(dpll_reg);
+ udelay(500);
+ }
+
+ reg_val |= DPLL_VCO_ENABLE;
+ REG_WRITE(dpll_reg, reg_val);
+ REG_READ(dpll_reg);
+ udelay(500);
+
+ /* Add timeout */
+ while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK))
+ cpu_relax();
+ }
+
+ /* Enable pipe */
+ reg_val = REG_READ(pipeconf_reg);
+ if (!(reg_val & PIPEACONF_ENABLE)) {
+ reg_val |= PIPEACONF_ENABLE;
+ REG_WRITE(pipeconf_reg, reg_val);
+ REG_READ(pipeconf_reg);
+ udelay(500);
+ mdfldWaitForPipeEnable(dev, pipe);
+ }
+
+ /* Enable plane */
+ reg_val = REG_READ(dspcntr_reg);
+ if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
+ reg_val |= DISPLAY_PLANE_ENABLE;
+ REG_WRITE(dspcntr_reg, reg_val);
+ REG_READ(dspcntr_reg);
+ udelay(500);
+ }
+
+ /* Enable TE interrupt on this pipe */
+ mdfld_enable_te(dev, pipe);
+ gma_power_end(dev);
+
+ /*clean IN_DSR flag*/
+ dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
+}
+
+/*
+ * Exit from DSR
+ */
+void mdfld_dsi_dbi_exit_dsr(struct drm_device *dev, u32 update_src)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+ struct mdfld_dsi_dbi_output **dbi_output;
+ int i;
+ int pipe;
+
+ /* FIXME can go ? */
+ dev_priv->is_in_idle = false;
+ dbi_output = dsr_info->dbi_outputs;
+
+#ifdef CONFIG_PM_RUNTIME
+ if (!enable_gfx_rtpm) {
+/* pm_runtime_allow(&gpDrmDevice->pdev->dev); */
+/* schedule_delayed_work(&rtpm_work, 30 * 1000);*/ /* FIXME: HZ ? */
+ }
+#endif
+
+ /* For each output, exit dsr */
+ for (i = 0; i < dsr_info->dbi_output_num; i++) {
+ /* If panel has been turned off, skip */
+ if (!dbi_output[i] || !dbi_output[i]->dbi_panel_on)
+ continue;
+ pipe = dbi_output[i]->channel_num ? 2 : 0;
+ enter_dsr = 0;
+ mdfld_dbi_output_exit_dsr(dbi_output[i], pipe);
+ }
+ dev_priv->dsr_fb_update |= update_src;
+}
+
+static bool mdfld_dbi_is_in_dsr(struct drm_device *dev)
+{
+ if (REG_READ(MRST_DPLL_A) & DPLL_VCO_ENABLE)
+ return false;
+ if ((REG_READ(PIPEACONF) & PIPEACONF_ENABLE) ||
+ (REG_READ(PIPECCONF) & PIPEACONF_ENABLE))
+ return false;
+ if ((REG_READ(DSPACNTR) & DISPLAY_PLANE_ENABLE) ||
+ (REG_READ(DSPCCNTR) & DISPLAY_PLANE_ENABLE))
+ return false;
+
+ return true;
+}
+
+/* Periodically update dbi panel */
+void mdfld_dbi_update_panel(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+ struct mdfld_dsi_dbi_output **dbi_outputs;
+ struct mdfld_dsi_dbi_output *dbi_output;
+ int i;
+ int can_enter_dsr = 0;
+ u32 damage_mask;
+
+ dbi_outputs = dsr_info->dbi_outputs;
+ dbi_output = pipe ? dbi_outputs[1] : dbi_outputs[0];
+
+ if (!dbi_output)
+ return;
+
+ if (pipe == 0)
+ damage_mask = dev_priv->dsr_fb_update & MDFLD_DSR_DAMAGE_MASK_0;
+ else if (pipe == 2)
+ damage_mask = dev_priv->dsr_fb_update & MDFLD_DSR_DAMAGE_MASK_2;
+ else
+ return;
+
+ /* If FB is damaged and panel is on update on-panel FB */
+ if (damage_mask && dbi_output->dbi_panel_on) {
+ dbi_output->dsr_fb_update_done = false;
+
+ if (dbi_output->p_funcs->update_fb)
+ dbi_output->p_funcs->update_fb(dbi_output, pipe);
+
+ if (dev_priv->dsr_enable && dbi_output->dsr_fb_update_done)
+ dev_priv->dsr_fb_update &= ~damage_mask;
+
+ /*clean IN_DSR flag*/
+ dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
+
+ dbi_output->dsr_idle_count = 0;
+ } else {
+ dbi_output->dsr_idle_count++;
+ }
+
+ switch (dsr_info->dbi_output_num) {
+ case 1:
+ if (dbi_output->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT)
+ can_enter_dsr = 1;
+ break;
+ case 2:
+ if (dbi_outputs[0]->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT
+ && dbi_outputs[1]->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT)
+ can_enter_dsr = 1;
+ break;
+ default:
+ DRM_ERROR("Wrong DBI output number\n");
+ }
+
+ /* Try to enter DSR */
+ if (can_enter_dsr) {
+ for (i = 0; i < dsr_info->dbi_output_num; i++) {
+ if (!mdfld_dbi_is_in_dsr(dev) && dbi_outputs[i] &&
+ !(dbi_outputs[i]->mode_flags & MODE_SETTING_ON_GOING)) {
+ mdfld_dsi_dbi_enter_dsr(dbi_outputs[i],
+ dbi_outputs[i]->channel_num ? 2 : 0);
+#if 0
+ enter_dsr = 1;
+ pr_err("%s: enter_dsr = 1\n", __func__);
+#endif
+ }
+ }
+ /*schedule rpm suspend after gfxrtdelay*/
+#ifdef CONFIG_GFX_RTPM
+ if (!dev_priv->rpm_enabled
+ || !enter_dsr
+ /* || (REG_READ(HDMIB_CONTROL) & HDMIB_PORT_EN) */
+ || pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay))
+ dev_warn(dev->dev,
+ "Runtime PM schedule suspend failed, rpm %d\n",
+ dev_priv->rpm_enabled);
+#endif
+ }
+}
+
+int mdfld_dbi_dsr_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+
+ if (!dsr_info || IS_ERR(dsr_info)) {
+ dsr_info = kzalloc(sizeof(struct mdfld_dbi_dsr_info),
+ GFP_KERNEL);
+ if (!dsr_info) {
+ dev_err(dev->dev, "No memory\n");
+ return -ENOMEM;
+ }
+ dev_priv->dbi_dsr_info = dsr_info;
+ }
+ return 0;
+}
+
+void mdfld_dbi_dsr_exit(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+
+ if (dsr_info) {
+ kfree(dsr_info);
+ dev_priv->dbi_dsr_info = NULL;
+ }
+}
+
+void mdfld_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
+ int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+ int lane_count = dsi_config->lane_count;
+ u32 val = 0;
+
+ dev_dbg(dev->dev, "Init DBI interface on pipe %d...\n", pipe);
+
+ /* Un-ready device */
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
+
+ /* Init dsi adapter before kicking off */
+ REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
+
+ /* TODO: figure out how to setup these registers */
+ REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
+ REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset),
+ 0x000a0014);
+ REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
+ REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000001);
+ REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
+
+ /* Enable all interrupts */
+ REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
+ /* Max value: 20 clock cycles of txclkesc */
+ REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
+ /* Min 21 txclkesc, max: ffffh */
+ REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
+ /* Min: 7d0 max: 4e20 */
+ REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
+
+ /* Set up func_prg */
+ val |= lane_count;
+ val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
+ val |= DSI_DBI_COLOR_FORMAT_OPTION2;
+ REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
+
+ REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
+ REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
+
+ /* De-assert dbi_stall when half of DBI FIFO is empty */
+ /* REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000000); */
+
+ REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
+ REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
+ REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
+}
+
+#if 0
+/*DBI encoder helper funcs*/
+static const struct drm_encoder_helper_funcs mdfld_dsi_dbi_helper_funcs = {
+ .dpms = mdfld_dsi_dbi_dpms,
+ .mode_fixup = mdfld_dsi_dbi_mode_fixup,
+ .prepare = mdfld_dsi_dbi_prepare,
+ .mode_set = mdfld_dsi_dbi_mode_set,
+ .commit = mdfld_dsi_dbi_commit,
+};
+
+/*DBI encoder funcs*/
+static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+#endif
+
+/*
+ * Init DSI DBI encoder.
+ * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
+ * return pointer of newly allocated DBI encoder, NULL on error
+ */
+struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
+ struct mdfld_dsi_connector *dsi_connector,
+ struct panel_funcs *p_funcs)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dsi_dbi_output *dbi_output = NULL;
+ struct mdfld_dsi_config *dsi_config;
+ struct drm_connector *connector = NULL;
+ struct drm_encoder *encoder = NULL;
+ struct drm_display_mode *fixed_mode = NULL;
+ struct psb_gtt *pg = dev_priv ? (&dev_priv->gtt) : NULL;
+ struct mdfld_dbi_dpu_info *dpu_info = dev_priv ? (dev_priv->dbi_dpu_info) : NULL;
+ struct mdfld_dbi_dsr_info *dsr_info = dev_priv ? (dev_priv->dbi_dsr_info) : NULL;
+ u32 data = 0;
+ int pipe;
+ int ret;
+
+ if (!pg || !dsi_connector || !p_funcs) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ dsi_config = mdfld_dsi_get_config(dsi_connector);
+ pipe = dsi_connector->pipe;
+
+ /*panel hard-reset*/
+ if (p_funcs->reset) {
+ ret = p_funcs->reset(pipe);
+ if (ret) {
+ DRM_ERROR("Panel %d hard-reset failed\n", pipe);
+ return NULL;
+ }
+ }
+ /* Panel drvIC init */
+ if (p_funcs->drv_ic_init)
+ p_funcs->drv_ic_init(dsi_config, pipe);
+
+ /* Panel power mode detect */
+ ret = mdfld_dsi_get_power_mode(dsi_config,
+ &data,
+ MDFLD_DSI_HS_TRANSMISSION);
+ if (ret) {
+ DRM_ERROR("Panel %d get power mode failed\n", pipe);
+ dsi_connector->status = connector_status_disconnected;
+ } else {
+ DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
+ dsi_connector->status = connector_status_connected;
+ }
+
+ /*TODO: get panel info from DDB*/
+
+ dbi_output = kzalloc(sizeof(struct mdfld_dsi_dbi_output), GFP_KERNEL);
+ if (!dbi_output) {
+ dev_err(dev->dev, "No memory\n");
+ return NULL;
+ }
+
+ if (dsi_connector->pipe == 0) {
+ dbi_output->channel_num = 0;
+ dev_priv->dbi_output = dbi_output;
+ } else if (dsi_connector->pipe == 2) {
+ dbi_output->channel_num = 1;
+ dev_priv->dbi_output2 = dbi_output;
+ } else {
+ dev_err(dev->dev, "only support 2 DSI outputs\n");
+ goto out_err1;
+ }
+
+ dbi_output->dev = dev;
+ dbi_output->p_funcs = p_funcs;
+ fixed_mode = dsi_config->fixed_mode;
+ dbi_output->panel_fixed_mode = fixed_mode;
+
+ /* Create drm encoder object */
+ connector = &dsi_connector->base.base;
+ encoder = &dbi_output->base.base;
+ /* Review this if we ever get MIPI-HDMI bridges or similar */
+ drm_encoder_init(dev,
+ encoder,
+ p_funcs->encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs);
+
+ /* Attach to given connector */
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ /* Set possible CRTCs and clones */
+ if (dsi_connector->pipe) {
+ encoder->possible_crtcs = (1 << 2);
+ encoder->possible_clones = (1 << 1);
+ } else {
+ encoder->possible_crtcs = (1 << 0);
+ encoder->possible_clones = (1 << 0);
+ }
+
+ dev_priv->dsr_fb_update = 0;
+ dev_priv->dsr_enable = false;
+ dev_priv->exit_idle = mdfld_dsi_dbi_exit_dsr;
+
+ dbi_output->first_boot = true;
+ dbi_output->mode_flags = MODE_SETTING_IN_ENCODER;
+
+ /* Add this output to dpu_info if in DPU mode */
+ if (dpu_info && dsi_connector->status == connector_status_connected) {
+ if (dsi_connector->pipe == 0)
+ dpu_info->dbi_outputs[0] = dbi_output;
+ else
+ dpu_info->dbi_outputs[1] = dbi_output;
+
+ dpu_info->dbi_output_num++;
+ } else if (dsi_connector->status == connector_status_connected) {
+ /* Add this output to dsr_info if not */
+ if (dsi_connector->pipe == 0)
+ dsr_info->dbi_outputs[0] = dbi_output;
+ else
+ dsr_info->dbi_outputs[1] = dbi_output;
+
+ dsr_info->dbi_output_num++;
+ }
+ return &dbi_output->base;
+out_err1:
+ kfree(dbi_output);
+ return NULL;
+}
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.h b/drivers/staging/gma500/mdfld_dsi_dbi.h
new file mode 100644
index 00000000000..f0fa986fd93
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_dsi_dbi.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_DBI_H__
+#define __MDFLD_DSI_DBI_H__
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+
+/*
+ * DBI encoder which inherits from mdfld_dsi_encoder
+ */
+struct mdfld_dsi_dbi_output {
+ struct mdfld_dsi_encoder base;
+ struct drm_display_mode *panel_fixed_mode;
+ u8 last_cmd;
+ u8 lane_count;
+ u8 channel_num;
+ struct drm_device *dev;
+
+ /* Backlight operations */
+
+ /* DSR timer */
+ u32 dsr_idle_count;
+ bool dsr_fb_update_done;
+
+ /* Mode setting flags */
+ u32 mode_flags;
+
+ /* Panel status */
+ bool dbi_panel_on;
+ bool first_boot;
+ struct panel_funcs *p_funcs;
+
+ /* DPU */
+ u32 *dbi_cb_addr;
+ u32 dbi_cb_phy;
+ spinlock_t cb_lock;
+ u32 cb_write;
+};
+
+#define MDFLD_DSI_DBI_OUTPUT(dsi_encoder) \
+ container_of(dsi_encoder, struct mdfld_dsi_dbi_output, base)
+
+struct mdfld_dbi_dsr_info {
+ int dbi_output_num;
+ struct mdfld_dsi_dbi_output *dbi_outputs[2];
+
+ u32 dsr_idle_count;
+};
+
+#define DBI_CB_TIMEOUT_COUNT 0xffff
+
+/* Offsets */
+#define CMD_MEM_ADDR_OFFSET 0
+
+#define CMD_DATA_SRC_SYSTEM_MEM 0
+#define CMD_DATA_SRC_PIPE 1
+
+static inline int mdfld_dsi_dbi_fifo_ready(struct mdfld_dsi_dbi_output *dbi_output)
+{
+ struct drm_device *dev = dbi_output->dev;
+ u32 retry = DBI_CB_TIMEOUT_COUNT;
+ int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
+ int ret = 0;
+
+ /* Query the dbi fifo status*/
+ while (retry--) {
+ if (REG_READ(MIPIA_GEN_FIFO_STAT_REG + reg_offset) & (1 << 27))
+ break;
+ }
+
+ if (!retry) {
+ DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
+ ret = -EAGAIN;
+ }
+ return ret;
+}
+
+static inline int mdfld_dsi_dbi_cmd_sent(struct mdfld_dsi_dbi_output *dbi_output)
+{
+ struct drm_device *dev = dbi_output->dev;
+ u32 retry = DBI_CB_TIMEOUT_COUNT;
+ int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
+ int ret = 0;
+
+ /* Query the command execution status */
+ while (retry--)
+ if (!(REG_READ(MIPIA_CMD_ADD_REG + reg_offset) & (1 << 0)))
+ break;
+
+ if (!retry) {
+ DRM_ERROR("Timeout waiting for DBI command status\n");
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
+static inline int mdfld_dsi_dbi_cb_ready(struct mdfld_dsi_dbi_output *dbi_output)
+{
+ int ret = 0;
+
+ /* Query the command execution status*/
+ ret = mdfld_dsi_dbi_cmd_sent(dbi_output);
+ if (ret) {
+ DRM_ERROR("Peripheral is busy\n");
+ ret = -EAGAIN;
+ }
+ /* Query the dbi fifo status*/
+ ret = mdfld_dsi_dbi_fifo_ready(dbi_output);
+ if (ret) {
+ DRM_ERROR("DBI FIFO is not empty\n");
+ ret = -EAGAIN;
+ }
+ return ret;
+}
+
+extern void mdfld_dsi_dbi_output_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev, int pipe);
+extern void mdfld_dsi_dbi_exit_dsr(struct drm_device *dev, u32 update_src);
+extern void mdfld_dsi_dbi_enter_dsr(struct mdfld_dsi_dbi_output *dbi_output,
+ int pipe);
+extern int mdfld_dbi_dsr_init(struct drm_device *dev);
+extern void mdfld_dbi_dsr_exit(struct drm_device *dev);
+extern struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
+ struct mdfld_dsi_connector *dsi_connector,
+ struct panel_funcs *p_funcs);
+extern int mdfld_dsi_dbi_send_dcs(struct mdfld_dsi_dbi_output *dbi_output,
+ u8 dcs, u8 *param, u32 num, u8 data_src);
+extern int mdfld_dsi_dbi_update_area(struct mdfld_dsi_dbi_output *dbi_output,
+ u16 x1, u16 y1, u16 x2, u16 y2);
+extern int mdfld_dsi_dbi_update_power(struct mdfld_dsi_dbi_output *dbi_output,
+ int mode);
+extern void mdfld_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
+ int pipe);
+
+#endif /*__MDFLD_DSI_DBI_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c b/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c
new file mode 100644
index 00000000000..a4e2ff442b1
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c
@@ -0,0 +1,778 @@
+/*
+ * Copyright © 2010-2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jim Liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dbi_dpu.h"
+#include "mdfld_dsi_dbi.h"
+
+/*
+ * NOTE: all mdlfd_x_damage funcs should be called by holding dpu_update_lock
+ */
+
+static int mdfld_cursor_damage(struct mdfld_dbi_dpu_info *dpu_info,
+ mdfld_plane_t plane,
+ struct psb_drm_dpu_rect *damaged_rect)
+{
+ int x, y;
+ int new_x, new_y;
+ struct psb_drm_dpu_rect *rect;
+ struct psb_drm_dpu_rect *pipe_rect;
+ int cursor_size;
+ struct mdfld_cursor_info *cursor;
+ mdfld_plane_t fb_plane;
+
+ if (plane == MDFLD_CURSORA) {
+ cursor = &dpu_info->cursors[0];
+ x = dpu_info->cursors[0].x;
+ y = dpu_info->cursors[0].y;
+ cursor_size = dpu_info->cursors[0].size;
+ pipe_rect = &dpu_info->damage_pipea;
+ fb_plane = MDFLD_PLANEA;
+ } else {
+ cursor = &dpu_info->cursors[1];
+ x = dpu_info->cursors[1].x;
+ y = dpu_info->cursors[1].y;
+ cursor_size = dpu_info->cursors[1].size;
+ pipe_rect = &dpu_info->damage_pipec;
+ fb_plane = MDFLD_PLANEC;
+ }
+ new_x = damaged_rect->x;
+ new_y = damaged_rect->y;
+
+ if (x == new_x && y == new_y)
+ return 0;
+
+ rect = &dpu_info->damaged_rects[plane];
+ /* Move to right */
+ if (new_x >= x) {
+ if (new_y > y) {
+ rect->x = x;
+ rect->y = y;
+ rect->width = (new_x + cursor_size) - x;
+ rect->height = (new_y + cursor_size) - y;
+ goto cursor_out;
+ } else {
+ rect->x = x;
+ rect->y = new_y;
+ rect->width = (new_x + cursor_size) - x;
+ rect->height = (y - new_y);
+ goto cursor_out;
+ }
+ } else {
+ if (new_y > y) {
+ rect->x = new_x;
+ rect->y = y;
+ rect->width = (x + cursor_size) - new_x;
+ rect->height = new_y - y;
+ goto cursor_out;
+ } else {
+ rect->x = new_x;
+ rect->y = new_y;
+ rect->width = (x + cursor_size) - new_x;
+ rect->height = (y + cursor_size) - new_y;
+ }
+ }
+cursor_out:
+ if (new_x < 0)
+ cursor->x = 0;
+ else if (new_x > 864)
+ cursor->x = 864;
+ else
+ cursor->x = new_x;
+
+ if (new_y < 0)
+ cursor->y = 0;
+ else if (new_y > 480)
+ cursor->y = 480;
+ else
+ cursor->y = new_y;
+
+ /*
+ * FIXME: this is a workaround for cursor plane update,
+ * remove it later!
+ */
+ rect->x = 0;
+ rect->y = 0;
+ rect->width = 864;
+ rect->height = 480;
+
+ mdfld_check_boundary(dpu_info, rect);
+ mdfld_dpu_region_extent(pipe_rect, rect);
+
+ /* Update pending status of dpu_info */
+ dpu_info->pending |= (1 << plane);
+ /* Update fb panel as well */
+ dpu_info->pending |= (1 << fb_plane);
+ return 0;
+}
+
+static int mdfld_fb_damage(struct mdfld_dbi_dpu_info *dpu_info,
+ mdfld_plane_t plane,
+ struct psb_drm_dpu_rect *damaged_rect)
+{
+ struct psb_drm_dpu_rect *rect;
+
+ if (plane == MDFLD_PLANEA)
+ rect = &dpu_info->damage_pipea;
+ else
+ rect = &dpu_info->damage_pipec;
+
+ mdfld_check_boundary(dpu_info, damaged_rect);
+
+ /* Add fb damage area to this pipe */
+ mdfld_dpu_region_extent(rect, damaged_rect);
+
+ /* Update pending status of dpu_info */
+ dpu_info->pending |= (1 << plane);
+ return 0;
+}
+
+/* Do nothing here, right now */
+static int mdfld_overlay_damage(struct mdfld_dbi_dpu_info *dpu_info,
+ mdfld_plane_t plane,
+ struct psb_drm_dpu_rect *damaged_rect)
+{
+ return 0;
+}
+
+int mdfld_dbi_dpu_report_damage(struct drm_device *dev,
+ mdfld_plane_t plane,
+ struct psb_drm_dpu_rect *rect)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+ int ret = 0;
+
+ /* DPU not in use, no damage reporting needed */
+ if (dpu_info == NULL)
+ return 0;
+
+ spin_lock(&dpu_info->dpu_update_lock);
+
+ switch (plane) {
+ case MDFLD_PLANEA:
+ case MDFLD_PLANEC:
+ mdfld_fb_damage(dpu_info, plane, rect);
+ break;
+ case MDFLD_CURSORA:
+ case MDFLD_CURSORC:
+ mdfld_cursor_damage(dpu_info, plane, rect);
+ break;
+ case MDFLD_OVERLAYA:
+ case MDFLD_OVERLAYC:
+ mdfld_overlay_damage(dpu_info, plane, rect);
+ break;
+ default:
+ DRM_ERROR("Invalid plane type %d\n", plane);
+ ret = -EINVAL;
+ }
+ spin_unlock(&dpu_info->dpu_update_lock);
+ return ret;
+}
+
+int mdfld_dbi_dpu_report_fullscreen_damage(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv;
+ struct mdfld_dbi_dpu_info *dpu_info;
+ struct mdfld_dsi_config *dsi_config;
+ struct psb_drm_dpu_rect rect;
+ int i;
+
+ if (!dev) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ dev_priv = dev->dev_private;
+ dpu_info = dev_priv->dbi_dpu_info;
+
+ /* This is fine - we may be in non DPU mode */
+ if (!dpu_info)
+ return -EINVAL;
+
+ for (i = 0; i < dpu_info->dbi_output_num; i++) {
+ dsi_config = dev_priv->dsi_configs[i];
+ if (dsi_config) {
+ rect.x = rect.y = 0;
+ rect.width = dsi_config->fixed_mode->hdisplay;
+ rect.height = dsi_config->fixed_mode->vdisplay;
+ mdfld_dbi_dpu_report_damage(dev,
+ i ? (MDFLD_PLANEC) : (MDFLD_PLANEA),
+ &rect);
+ }
+ }
+ /* Exit DSR state */
+ mdfld_dpu_exit_dsr(dev);
+ return 0;
+}
+
+int mdfld_dsi_dbi_dsr_off(struct drm_device *dev,
+ struct psb_drm_dpu_rect *rect)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+
+ mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, rect);
+
+ /* If dual display mode */
+ if (dpu_info->dbi_output_num == 2)
+ mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, rect);
+
+ /* Force dsi to exit DSR mode */
+ mdfld_dpu_exit_dsr(dev);
+ return 0;
+}
+
+static void mdfld_dpu_cursor_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
+ mdfld_plane_t plane)
+{
+ struct drm_device *dev = dpu_info->dev;
+ u32 curpos_reg = CURAPOS;
+ u32 curbase_reg = CURABASE;
+ u32 curcntr_reg = CURACNTR;
+ struct mdfld_cursor_info *cursor = &dpu_info->cursors[0];
+
+ if (plane == MDFLD_CURSORC) {
+ curpos_reg = CURCPOS;
+ curbase_reg = CURCBASE;
+ curcntr_reg = CURCCNTR;
+ cursor = &dpu_info->cursors[1];
+ }
+
+ REG_WRITE(curcntr_reg, REG_READ(curcntr_reg));
+ REG_WRITE(curpos_reg,
+ (((cursor->x & CURSOR_POS_MASK) << CURSOR_X_SHIFT) |
+ ((cursor->y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT)));
+ REG_WRITE(curbase_reg, REG_READ(curbase_reg));
+}
+
+static void mdfld_dpu_fb_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
+ mdfld_plane_t plane)
+{
+ u32 pipesrc_reg = PIPEASRC;
+ u32 dspsize_reg = DSPASIZE;
+ u32 dspoff_reg = DSPALINOFF;
+ u32 dspsurf_reg = DSPASURF;
+ u32 dspstride_reg = DSPASTRIDE;
+ u32 stride;
+ struct psb_drm_dpu_rect *rect = &dpu_info->damage_pipea;
+ struct drm_device *dev = dpu_info->dev;
+
+ if (plane == MDFLD_PLANEC) {
+ pipesrc_reg = PIPECSRC;
+ dspsize_reg = DSPCSIZE;
+ dspoff_reg = DSPCLINOFF;
+ dspsurf_reg = DSPCSURF;
+ dspstride_reg = DSPCSTRIDE;
+ rect = &dpu_info->damage_pipec;
+ }
+
+ stride = REG_READ(dspstride_reg);
+ /* FIXME: should I do the pipe src update here? */
+ REG_WRITE(pipesrc_reg, ((rect->width - 1) << 16) | (rect->height - 1));
+ /* Flush plane */
+ REG_WRITE(dspsize_reg, ((rect->height - 1) << 16) | (rect->width - 1));
+ REG_WRITE(dspoff_reg, ((rect->x * 4) + (rect->y * stride)));
+ REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
+
+ /*
+ * TODO: wait for flip finished and restore the pipesrc reg,
+ * or cursor will be show at a wrong position
+ */
+}
+
+static void mdfld_dpu_overlay_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
+ mdfld_plane_t plane)
+{
+}
+
+/*
+ * TODO: we are still in dbi normal mode now, we will try to use partial
+ * mode later.
+ */
+static int mdfld_dbi_prepare_cb(struct mdfld_dsi_dbi_output *dbi_output,
+ struct mdfld_dbi_dpu_info *dpu_info, int pipe)
+{
+ u8 *cb_addr = (u8 *)dbi_output->dbi_cb_addr;
+ u32 *index;
+ struct psb_drm_dpu_rect *rect = pipe ?
+ (&dpu_info->damage_pipec) : (&dpu_info->damage_pipea);
+
+ /* FIXME: lock command buffer, this may lead to a deadlock,
+ as we already hold the dpu_update_lock */
+ if (!spin_trylock(&dbi_output->cb_lock)) {
+ DRM_ERROR("lock command buffer failed, try again\n");
+ return -EAGAIN;
+ }
+
+ index = &dbi_output->cb_write;
+
+ if (*index) {
+ DRM_ERROR("DBI command buffer unclean\n");
+ return -EAGAIN;
+ }
+
+ /* Column address */
+ *(cb_addr + ((*index)++)) = set_column_address;
+ *(cb_addr + ((*index)++)) = rect->x >> 8;
+ *(cb_addr + ((*index)++)) = rect->x;
+ *(cb_addr + ((*index)++)) = (rect->x + rect->width - 1) >> 8;
+ *(cb_addr + ((*index)++)) = (rect->x + rect->width - 1);
+
+ *index = 8;
+
+ /* Page address */
+ *(cb_addr + ((*index)++)) = set_page_addr;
+ *(cb_addr + ((*index)++)) = rect->y >> 8;
+ *(cb_addr + ((*index)++)) = rect->y;
+ *(cb_addr + ((*index)++)) = (rect->y + rect->height - 1) >> 8;
+ *(cb_addr + ((*index)++)) = (rect->y + rect->height - 1);
+
+ *index = 16;
+
+ /*write memory*/
+ *(cb_addr + ((*index)++)) = write_mem_start;
+
+ return 0;
+}
+
+static int mdfld_dbi_flush_cb(struct mdfld_dsi_dbi_output *dbi_output, int pipe)
+{
+ u32 cmd_phy = dbi_output->dbi_cb_phy;
+ u32 *index = &dbi_output->cb_write;
+ int reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+ struct drm_device *dev = dbi_output->dev;
+
+ if (*index == 0 || !dbi_output)
+ return 0;
+
+ REG_WRITE((MIPIA_CMD_LEN_REG + reg_offset), 0x010505);
+ REG_WRITE((MIPIA_CMD_ADD_REG + reg_offset), cmd_phy | 3);
+
+ *index = 0;
+
+ /* FIXME: unlock command buffer */
+ spin_unlock(&dbi_output->cb_lock);
+ return 0;
+}
+
+static int mdfld_dpu_update_pipe(struct mdfld_dsi_dbi_output *dbi_output,
+ struct mdfld_dbi_dpu_info *dpu_info, int pipe)
+{
+ struct drm_device *dev = dbi_output->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ mdfld_plane_t cursor_plane = MDFLD_CURSORA;
+ mdfld_plane_t fb_plane = MDFLD_PLANEA;
+ mdfld_plane_t overlay_plane = MDFLD_OVERLAYA;
+ int ret = 0;
+ u32 plane_mask = MDFLD_PIPEA_PLANE_MASK;
+
+ /* Damaged rects on this pipe */
+ if (pipe) {
+ cursor_plane = MDFLD_CURSORC;
+ fb_plane = MDFLD_PLANEC;
+ overlay_plane = MDFLD_OVERLAYC;
+ plane_mask = MDFLD_PIPEC_PLANE_MASK;
+ }
+
+ /*update cursor which assigned to @pipe*/
+ if (dpu_info->pending & (1 << cursor_plane))
+ mdfld_dpu_cursor_plane_flush(dpu_info, cursor_plane);
+
+ /*update fb which assigned to @pipe*/
+ if (dpu_info->pending & (1 << fb_plane))
+ mdfld_dpu_fb_plane_flush(dpu_info, fb_plane);
+
+ /* TODO: update overlay */
+ if (dpu_info->pending & (1 << overlay_plane))
+ mdfld_dpu_overlay_plane_flush(dpu_info, overlay_plane);
+
+ /* Flush damage area to panel fb */
+ if (dpu_info->pending & plane_mask) {
+ ret = mdfld_dbi_prepare_cb(dbi_output, dpu_info, pipe);
+ /*
+ * TODO: remove b_dsr_enable later,
+ * added it so that text console could boot smoothly
+ */
+ /* Clean pending flags on this pipe */
+ if (!ret && dev_priv->dsr_enable) {
+ dpu_info->pending &= ~plane_mask;
+ /* Reset overlay pipe damage rect */
+ mdfld_dpu_init_damage(dpu_info, pipe);
+ }
+ }
+ return ret;
+}
+
+static int mdfld_dpu_update_fb(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ struct psb_intel_crtc *psb_crtc;
+ struct mdfld_dsi_dbi_output **dbi_output;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+ bool pipe_updated[2];
+ unsigned long irq_flags;
+ u32 dpll_reg = MRST_DPLL_A;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 dsplinoff_reg = DSPALINOFF;
+ u32 dspsurf_reg = DSPASURF;
+ u32 mipi_state_reg = MIPIA_INTR_STAT_REG;
+ u32 reg_offset = 0;
+ int pipe;
+ int i;
+ int ret;
+
+ dbi_output = dpu_info->dbi_outputs;
+ pipe_updated[0] = pipe_updated[1] = false;
+
+ if (!gma_power_begin(dev, true))
+ return -EAGAIN;
+
+ /* Try to prevent any new damage reports */
+ if (!spin_trylock_irqsave(&dpu_info->dpu_update_lock, irq_flags))
+ return -EAGAIN;
+
+ for (i = 0; i < dpu_info->dbi_output_num; i++) {
+ crtc = dbi_output[i]->base.base.crtc;
+ psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL;
+
+ pipe = dbi_output[i]->channel_num ? 2 : 0;
+
+ if (pipe == 2) {
+ dspcntr_reg = DSPCCNTR;
+ pipeconf_reg = PIPECCONF;
+ dsplinoff_reg = DSPCLINOFF;
+ dspsurf_reg = DSPCSURF;
+ reg_offset = MIPIC_REG_OFFSET;
+ }
+
+ if (!(REG_READ((MIPIA_GEN_FIFO_STAT_REG + reg_offset))
+ & (1 << 27)) ||
+ !(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
+ !(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
+ !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE)) {
+ dev_err(dev->dev,
+ "DBI FIFO is busy, DSI %d state %x\n",
+ pipe,
+ REG_READ(mipi_state_reg + reg_offset));
+ continue;
+ }
+
+ /*
+ * If DBI output is in a exclusive state then the pipe
+ * change won't be updated
+ */
+ if (dbi_output[i]->dbi_panel_on &&
+ !(dbi_output[i]->mode_flags & MODE_SETTING_ON_GOING) &&
+ !(psb_crtc &&
+ psb_crtc->mode_flags & MODE_SETTING_ON_GOING) &&
+ !(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)) {
+ ret = mdfld_dpu_update_pipe(dbi_output[i],
+ dpu_info, dbi_output[i]->channel_num ? 2 : 0);
+ if (!ret)
+ pipe_updated[i] = true;
+ }
+ }
+
+ for (i = 0; i < dpu_info->dbi_output_num; i++)
+ if (pipe_updated[i])
+ mdfld_dbi_flush_cb(dbi_output[i],
+ dbi_output[i]->channel_num ? 2 : 0);
+
+ spin_unlock_irqrestore(&dpu_info->dpu_update_lock, irq_flags);
+ gma_power_end(dev);
+ return 0;
+}
+
+static int __mdfld_dbi_exit_dsr(struct mdfld_dsi_dbi_output *dbi_output,
+ int pipe)
+{
+ struct drm_device *dev = dbi_output->dev;
+ struct drm_crtc *crtc = dbi_output->base.base.crtc;
+ struct psb_intel_crtc *psb_crtc = (crtc) ? to_psb_intel_crtc(crtc)
+ : NULL;
+ u32 reg_val;
+ u32 dpll_reg = MRST_DPLL_A;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 dspbase_reg = DSPABASE;
+ u32 dspsurf_reg = DSPASURF;
+ u32 reg_offset = 0;
+
+ if (!dbi_output)
+ return 0;
+
+ /* If mode setting on-going, back off */
+ if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+ (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
+ return -EAGAIN;
+
+ if (pipe == 2) {
+ dpll_reg = MRST_DPLL_A;
+ pipeconf_reg = PIPECCONF;
+ dspcntr_reg = DSPCCNTR;
+ dspbase_reg = MDFLD_DSPCBASE;
+ dspsurf_reg = DSPCSURF;
+
+ reg_offset = MIPIC_REG_OFFSET;
+ }
+
+ if (!gma_power_begin(dev, true))
+ return -EAGAIN;
+
+ /* Enable DPLL */
+ reg_val = REG_READ(dpll_reg);
+ if (!(reg_val & DPLL_VCO_ENABLE)) {
+
+ if (reg_val & MDFLD_PWR_GATE_EN) {
+ reg_val &= ~MDFLD_PWR_GATE_EN;
+ REG_WRITE(dpll_reg, reg_val);
+ REG_READ(dpll_reg);
+ udelay(500);
+ }
+
+ reg_val |= DPLL_VCO_ENABLE;
+ REG_WRITE(dpll_reg, reg_val);
+ REG_READ(dpll_reg);
+ udelay(500);
+
+ /* FIXME: add timeout */
+ while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK))
+ cpu_relax();
+ }
+
+ /* Enable pipe */
+ reg_val = REG_READ(pipeconf_reg);
+ if (!(reg_val & PIPEACONF_ENABLE)) {
+ reg_val |= PIPEACONF_ENABLE;
+ REG_WRITE(pipeconf_reg, reg_val);
+ REG_READ(pipeconf_reg);
+ udelay(500);
+ mdfldWaitForPipeEnable(dev, pipe);
+ }
+
+ /* Enable plane */
+ reg_val = REG_READ(dspcntr_reg);
+ if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
+ reg_val |= DISPLAY_PLANE_ENABLE;
+ REG_WRITE(dspcntr_reg, reg_val);
+ REG_READ(dspcntr_reg);
+ udelay(500);
+ }
+
+ gma_power_end(dev);
+
+ /* Clean IN_DSR flag */
+ dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
+
+ return 0;
+}
+
+int mdfld_dpu_exit_dsr(struct drm_device *dev)
+{
+ struct mdfld_dsi_dbi_output **dbi_output;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+ int i;
+ int pipe;
+
+ dbi_output = dpu_info->dbi_outputs;
+
+ for (i = 0; i < dpu_info->dbi_output_num; i++) {
+ /* If this output is not in DSR mode, don't call exit dsr */
+ if (dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)
+ __mdfld_dbi_exit_dsr(dbi_output[i],
+ dbi_output[i]->channel_num ? 2 : 0);
+ }
+
+ /* Enable TE interrupt */
+ for (i = 0; i < dpu_info->dbi_output_num; i++) {
+ /* If this output is not in DSR mode, don't call exit dsr */
+ pipe = dbi_output[i]->channel_num ? 2 : 0;
+ if (dbi_output[i]->dbi_panel_on && pipe) {
+ mdfld_disable_te(dev, 0);
+ mdfld_enable_te(dev, 2);
+ } else if (dbi_output[i]->dbi_panel_on && !pipe) {
+ mdfld_disable_te(dev, 2);
+ mdfld_enable_te(dev, 0);
+ }
+ }
+ return 0;
+}
+
+static int mdfld_dpu_enter_dsr(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+ struct mdfld_dsi_dbi_output **dbi_output;
+ int i;
+
+ dbi_output = dpu_info->dbi_outputs;
+
+ for (i = 0; i < dpu_info->dbi_output_num; i++) {
+ /* If output is off or already in DSR state, don't re-enter */
+ if (dbi_output[i]->dbi_panel_on &&
+ !(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)) {
+ mdfld_dsi_dbi_enter_dsr(dbi_output[i],
+ dbi_output[i]->channel_num ? 2 : 0);
+ }
+ }
+
+ return 0;
+}
+
+static void mdfld_dbi_dpu_timer_func(unsigned long data)
+{
+ struct drm_device *dev = (struct drm_device *)data;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+ struct timer_list *dpu_timer = &dpu_info->dpu_timer;
+ unsigned long flags;
+
+ if (dpu_info->pending) {
+ dpu_info->idle_count = 0;
+ /* Update panel fb with damaged area */
+ mdfld_dpu_update_fb(dev);
+ } else {
+ dpu_info->idle_count++;
+ }
+
+ if (dpu_info->idle_count >= MDFLD_MAX_IDLE_COUNT) {
+ mdfld_dpu_enter_dsr(dev);
+ /* Stop timer by return */
+ return;
+ }
+
+ spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
+ if (!timer_pending(dpu_timer)) {
+ dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
+ add_timer(dpu_timer);
+ }
+ spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
+}
+
+void mdfld_dpu_update_panel(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+
+ if (dpu_info->pending) {
+ dpu_info->idle_count = 0;
+
+ /*update panel fb with damaged area*/
+ mdfld_dpu_update_fb(dev);
+ } else {
+ dpu_info->idle_count++;
+ }
+
+ if (dpu_info->idle_count >= MDFLD_MAX_IDLE_COUNT) {
+ /*enter dsr*/
+ mdfld_dpu_enter_dsr(dev);
+ }
+}
+
+static int mdfld_dbi_dpu_timer_init(struct drm_device *dev,
+ struct mdfld_dbi_dpu_info *dpu_info)
+{
+ struct timer_list *dpu_timer = &dpu_info->dpu_timer;
+ unsigned long flags;
+
+ spin_lock_init(&dpu_info->dpu_timer_lock);
+ spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
+
+ init_timer(dpu_timer);
+
+ dpu_timer->data = (unsigned long)dev;
+ dpu_timer->function = mdfld_dbi_dpu_timer_func;
+ dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
+
+ spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
+
+ return 0;
+}
+
+void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info *dpu_info)
+{
+ struct timer_list *dpu_timer = &dpu_info->dpu_timer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
+ if (!timer_pending(dpu_timer)) {
+ dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
+ add_timer(dpu_timer);
+ }
+ spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
+}
+
+int mdfld_dbi_dpu_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+
+ if (!dpu_info || IS_ERR(dpu_info)) {
+ dpu_info = kzalloc(sizeof(struct mdfld_dbi_dpu_info),
+ GFP_KERNEL);
+ if (!dpu_info) {
+ DRM_ERROR("No memory\n");
+ return -ENOMEM;
+ }
+ dev_priv->dbi_dpu_info = dpu_info;
+ }
+
+ dpu_info->dev = dev;
+
+ dpu_info->cursors[0].size = MDFLD_CURSOR_SIZE;
+ dpu_info->cursors[1].size = MDFLD_CURSOR_SIZE;
+
+ /*init dpu_update_lock*/
+ spin_lock_init(&dpu_info->dpu_update_lock);
+
+ /*init dpu refresh timer*/
+ mdfld_dbi_dpu_timer_init(dev, dpu_info);
+
+ /*init pipe damage area*/
+ mdfld_dpu_init_damage(dpu_info, 0);
+ mdfld_dpu_init_damage(dpu_info, 2);
+
+ return 0;
+}
+
+void mdfld_dbi_dpu_exit(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+
+ if (!dpu_info)
+ return;
+
+ del_timer_sync(&dpu_info->dpu_timer);
+ kfree(dpu_info);
+ dev_priv->dbi_dpu_info = NULL;
+}
+
+
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h b/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h
new file mode 100644
index 00000000000..42367ed48c0
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_DBI_DPU_H__
+#define __MDFLD_DSI_DBI_DPU_H__
+
+#include "mdfld_dsi_dbi.h"
+
+typedef enum {
+ MDFLD_PLANEA,
+ MDFLD_PLANEC,
+ MDFLD_CURSORA,
+ MDFLD_CURSORC,
+ MDFLD_OVERLAYA,
+ MDFLD_OVERLAYC,
+ MDFLD_PLANE_NUM,
+} mdfld_plane_t;
+
+#define MDFLD_PIPEA_PLANE_MASK 0x15
+#define MDFLD_PIPEC_PLANE_MASK 0x2A
+
+struct mdfld_cursor_info {
+ int x, y;
+ int size;
+};
+
+#define MDFLD_CURSOR_SIZE 64
+
+/*
+ * enter DSR mode if screen has no update for 2 frames.
+ */
+#define MDFLD_MAX_IDLE_COUNT 2
+
+struct mdfld_dbi_dpu_info {
+ struct drm_device *dev;
+ /* Lock */
+ spinlock_t dpu_update_lock;
+
+ /* Cursor postion */
+ struct mdfld_cursor_info cursors[2];
+
+ /* Damaged area for each plane */
+ struct psb_drm_dpu_rect damaged_rects[MDFLD_PLANE_NUM];
+
+ /* Final damaged area */
+ struct psb_drm_dpu_rect damage_pipea;
+ struct psb_drm_dpu_rect damage_pipec;
+
+ /* Pending */
+ u32 pending;
+
+ /* DPU timer */
+ struct timer_list dpu_timer;
+ spinlock_t dpu_timer_lock;
+
+ /* DPU idle count */
+ u32 idle_count;
+
+ /* DSI outputs */
+ struct mdfld_dsi_dbi_output *dbi_outputs[2];
+ int dbi_output_num;
+};
+
+static inline int mdfld_dpu_region_extent(struct psb_drm_dpu_rect *origin,
+ struct psb_drm_dpu_rect *rect)
+{
+ int x1, y1, x2, y2;
+
+ x1 = origin->x + origin->width;
+ y1 = origin->y + origin->height;
+
+ x2 = rect->x + rect->width;
+ y2 = rect->y + rect->height;
+
+ origin->x = min(origin->x, rect->x);
+ origin->y = min(origin->y, rect->y);
+ origin->width = max(x1, x2) - origin->x;
+ origin->height = max(y1, y2) - origin->y;
+
+ return 0;
+}
+
+static inline void mdfld_check_boundary(struct mdfld_dbi_dpu_info *dpu_info,
+ struct psb_drm_dpu_rect *rect)
+{
+ if (rect->x < 0)
+ rect->x = 0;
+ if (rect->y < 0)
+ rect->y = 0;
+
+ if (rect->x + rect->width > 864)
+ rect->width = 864 - rect->x;
+ if (rect->y + rect->height > 480)
+ rect->height = 480 - rect->height;
+
+ if (!rect->width)
+ rect->width = 1;
+ if (!rect->height)
+ rect->height = 1;
+}
+
+static inline void mdfld_dpu_init_damage(struct mdfld_dbi_dpu_info *dpu_info,
+ int pipe)
+{
+ struct psb_drm_dpu_rect *rect;
+
+ if (pipe == 0)
+ rect = &dpu_info->damage_pipea;
+ else
+ rect = &dpu_info->damage_pipec;
+
+ rect->x = 864;
+ rect->y = 480;
+ rect->width = -864;
+ rect->height = -480;
+}
+
+extern int mdfld_dsi_dbi_dsr_off(struct drm_device *dev,
+ struct psb_drm_dpu_rect *rect);
+extern int mdfld_dbi_dpu_report_damage(struct drm_device *dev,
+ mdfld_plane_t plane,
+ struct psb_drm_dpu_rect *rect);
+extern int mdfld_dbi_dpu_report_fullscreen_damage(struct drm_device *dev);
+extern int mdfld_dpu_exit_dsr(struct drm_device *dev);
+extern void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info *dpu_info);
+extern int mdfld_dbi_dpu_init(struct drm_device *dev);
+extern void mdfld_dbi_dpu_exit(struct drm_device *dev);
+extern void mdfld_dpu_update_panel(struct drm_device *dev);
+
+#endif /*__MDFLD_DSI_DBI_DPU_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.c b/drivers/staging/gma500/mdfld_dsi_dpi.c
new file mode 100644
index 00000000000..e685f1217ba
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_dsi_dpi.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+
+static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
+{
+ u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
+ int timeout = 0;
+
+ if (pipe == 2)
+ gen_fifo_stat_reg += MIPIC_REG_OFFSET;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) {
+ udelay(100);
+ timeout++;
+ }
+
+ if (timeout == 20000)
+ dev_warn(dev->dev, "MIPI: HS Data FIFO was never cleared!\n");
+}
+
+static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
+{
+ u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
+ int timeout = 0;
+
+ if (pipe == 2)
+ gen_fifo_stat_reg += MIPIC_REG_OFFSET;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_CTRL_FULL)) {
+ udelay(100);
+ timeout++;
+ }
+ if (timeout == 20000)
+ dev_warn(dev->dev, "MIPI: HS CMD FIFO was never cleared!\n");
+}
+
+static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe)
+{
+ u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
+ int timeout = 0;
+
+ if (pipe == 2)
+ gen_fifo_stat_reg += MIPIC_REG_OFFSET;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) & DPI_FIFO_EMPTY)
+ != DPI_FIFO_EMPTY)) {
+ udelay(100);
+ timeout++;
+ }
+
+ if (timeout == 20000)
+ dev_warn(dev->dev, "MIPI: DPI FIFO was never cleared!\n");
+}
+
+static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe)
+{
+ u32 intr_stat_reg = MIPIA_INTR_STAT_REG;
+ int timeout = 0;
+
+ if (pipe == 2)
+ intr_stat_reg += MIPIC_REG_OFFSET;
+
+ udelay(500);
+
+ /* This will time out after approximately 2+ seconds */
+ while ((timeout < 20000) && (!(REG_READ(intr_stat_reg) & DSI_INTR_STATE_SPL_PKG_SENT))) {
+ udelay(100);
+ timeout++;
+ }
+
+ if (timeout == 20000)
+ dev_warn(dev->dev, "MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n");
+}
+
+
+/* ************************************************************************* *\
+ * FUNCTION: mdfld_dsi_tpo_ic_init
+ *
+ * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
+ * restore_display_registers. since this function does not
+ * acquire the mutex, it is important that the calling function
+ * does!
+\* ************************************************************************* */
+void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ u32 dcsChannelNumber = dsi_config->channel_num;
+ u32 gen_data_reg = MIPIA_HS_GEN_DATA_REG;
+ u32 gen_ctrl_reg = MIPIA_HS_GEN_CTRL_REG;
+ u32 gen_ctrl_val = GEN_LONG_WRITE;
+
+ if (pipe == 2) {
+ gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
+ gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
+ }
+
+ gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
+
+ /* Flip page order */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00008036);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
+
+ /* 0xF0 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x005a5af0);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* Write protection key */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x005a5af1);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* 0xFC */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x005a5afc);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* 0xB7 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x770000b7);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000044);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
+
+ /* 0xB6 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000a0ab6);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+
+ /* 0xF2 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x081010f2);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x4a070708);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000000c5);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+ /* 0xF8 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x024003f8);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x01030a04);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x0e020220);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000004);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
+
+ /* 0xE2 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x398fc3e2);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x0000916f);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
+
+ /* 0xB0 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000000b0);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
+
+ /* 0xF4 */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x240242f4);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x78ee2002);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2a071050);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x507fee10);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x10300710);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
+
+ /* 0xBA */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x19fe07ba);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x101c0a31);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000010);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+ /* 0xBB */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x28ff07bb);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x24280a31);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000034);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+
+ /* 0xFB */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x535d05fb);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1b1a2130);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x221e180e);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x131d2120);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x535d0508);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1c1a2131);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x231f160d);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x111b2220);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x535c2008);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1f1d2433);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2c251a10);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2c34372d);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000023);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
+
+ /* 0xFA */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x525c0bfa);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1c1c232f);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x2623190e);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x18212625);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x545d0d0e);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1e1d2333);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x26231a10);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x1a222725);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x545d280f);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x21202635);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x31292013);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x31393d33);
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x00000029);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
+
+ /* Set DM */
+ mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+ REG_WRITE(gen_data_reg, 0x000100f7);
+ mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+ REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+}
+
+static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
+ int num_lane, int bpp)
+{
+ return (u16)((pixel_clock_count * bpp) / (num_lane * 8));
+}
+
+/*
+ * Calculate the dpi time basing on a given drm mode @mode
+ * return 0 on success.
+ * FIXME: I was using proposed mode value for calculation, may need to
+ * use crtc mode values later
+ */
+int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
+ struct mdfld_dsi_dpi_timing *dpi_timing,
+ int num_lane, int bpp)
+{
+ int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
+ int pclk_vsync, pclk_vfp, pclk_vbp, pclk_vactive;
+
+ if(!mode || !dpi_timing) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ pclk_hactive = mode->hdisplay;
+ pclk_hfp = mode->hsync_start - mode->hdisplay;
+ pclk_hsync = mode->hsync_end - mode->hsync_start;
+ pclk_hbp = mode->htotal - mode->hsync_end;
+
+ pclk_vactive = mode->vdisplay;
+ pclk_vfp = mode->vsync_start - mode->vdisplay;
+ pclk_vsync = mode->vsync_end - mode->vsync_start;
+ pclk_vbp = mode->vtotal - mode->vsync_end;
+
+ /*
+ * byte clock counts were calculated by following formula
+ * bclock_count = pclk_count * bpp / num_lane / 8
+ */
+ dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hsync, num_lane, bpp);
+ dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hbp, num_lane, bpp);
+ dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hfp, num_lane, bpp);
+ dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hactive, num_lane, bpp);
+ dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vsync, num_lane, bpp);
+ dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vbp, num_lane, bpp);
+ dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vfp, num_lane, bpp);
+
+ return 0;
+}
+
+void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+ int lane_count = dsi_config->lane_count;
+ struct mdfld_dsi_dpi_timing dpi_timing;
+ struct drm_display_mode *mode = dsi_config->mode;
+ u32 val = 0;
+
+ /*un-ready device*/
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
+
+ /*init dsi adapter before kicking off*/
+ REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
+
+ /*enable all interrupts*/
+ REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
+
+
+ /*set up func_prg*/
+ val |= lane_count;
+ val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
+
+ switch(dsi_config->bpp) {
+ case 16:
+ val |= DSI_DPI_COLOR_FORMAT_RGB565;
+ break;
+ case 18:
+ val |= DSI_DPI_COLOR_FORMAT_RGB666;
+ break;
+ case 24:
+ val |= DSI_DPI_COLOR_FORMAT_RGB888;
+ break;
+ default:
+ DRM_ERROR("unsupported color format, bpp = %d\n", dsi_config->bpp);
+ }
+ REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
+
+ REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset),
+ (mode->vtotal * mode->htotal * dsi_config->bpp / (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
+ REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff & DSI_LP_RX_TIMEOUT_MASK);
+
+ /*max value: 20 clock cycles of txclkesc*/
+ REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
+
+ /*min 21 txclkesc, max: ffffh*/
+ REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0xffff & DSI_RESET_TIMER_MASK);
+
+ REG_WRITE((MIPIA_DPI_RESOLUTION_REG + reg_offset), mode->vdisplay << 16 | mode->hdisplay);
+
+ /*set DPI timing registers*/
+ mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp);
+
+ REG_WRITE((MIPIA_HSYNC_COUNT_REG + reg_offset), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_HBP_COUNT_REG + reg_offset), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_HFP_COUNT_REG + reg_offset), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_HACTIVE_COUNT_REG + reg_offset), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_VSYNC_COUNT_REG + reg_offset), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_VBP_COUNT_REG + reg_offset), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_VFP_COUNT_REG + reg_offset), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
+
+ REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
+
+ /*min: 7d0 max: 4e20*/
+ REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x000007d0);
+
+ /*set up video mode*/
+ val = 0;
+ val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
+ REG_WRITE((MIPIA_VIDEO_MODE_FORMAT_REG + reg_offset), val);
+
+ REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
+
+ REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
+
+ /*TODO: figure out how to setup these registers*/
+ REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
+
+ REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), (0xa << 16) | 0x14);
+ /*set device ready*/
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
+}
+
+void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe)
+{
+ struct drm_device *dev = output->dev;
+ u32 reg_offset = 0;
+
+ if(output->panel_on)
+ return;
+
+ if(pipe)
+ reg_offset = MIPIC_REG_OFFSET;
+
+ /* clear special packet sent bit */
+ if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
+ REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
+ }
+
+ /*send turn on package*/
+ REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_TURN_ON);
+
+ /*wait for SPL_PKG_SENT interrupt*/
+ mdfld_wait_for_SPL_PKG_SENT(dev, pipe);
+
+ if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
+ REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
+ }
+
+ output->panel_on = 1;
+
+ /* FIXME the following is disabled to WA the X slow start issue for TMD panel */
+ /* if(pipe == 2) */
+ /* dev_priv->dpi_panel_on2 = true; */
+ /* else if (pipe == 0) */
+ /* dev_priv->dpi_panel_on = true; */
+}
+
+static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, int pipe)
+{
+ struct drm_device *dev = output->dev;
+ u32 reg_offset = 0;
+
+ /*if output is on, or mode setting didn't happen, ignore this*/
+ if((!output->panel_on) || output->first_boot) {
+ output->first_boot = 0;
+ return;
+ }
+
+ if(pipe)
+ reg_offset = MIPIC_REG_OFFSET;
+
+ /* Wait for dpi fifo to empty */
+ mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe);
+
+ /* Clear the special packet interrupt bit if set */
+ if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
+ REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
+ }
+
+ if(REG_READ(MIPIA_DPI_CONTROL_REG + reg_offset) == DSI_DPI_CTRL_HS_SHUTDOWN) {
+ dev_warn(dev->dev, "try to send the same package again, abort!");
+ goto shutdown_out;
+ }
+
+ REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_SHUTDOWN);
+
+shutdown_out:
+ output->panel_on = 0;
+ output->first_boot = 0;
+
+ /* FIXME the following is disabled to WA the X slow start issue for TMD panel */
+ /* if(pipe == 2) */
+ /* dev_priv->dpi_panel_on2 = false; */
+ /* else if (pipe == 0) */
+ /* dev_priv->dpi_panel_on = false; */
+ /* #ifdef CONFIG_PM_RUNTIME*/
+ /* if (drm_psb_ospm && !enable_gfx_rtpm) { */
+ /* pm_runtime_allow(&gpDrmDevice->pdev->dev); */
+ /* schedule_delayed_work(&dev_priv->rtpm_work, 30 * 1000); */
+ /* } */
+ /*if (enable_gfx_rtpm) */
+ /* pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay); */
+ /* #endif */
+}
+
+void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+ int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 mipi_reg = MIPI;
+ u32 pipeconf_reg = PIPEACONF;
+
+ if(pipe) {
+ mipi_reg = MIPI_C;
+ pipeconf_reg = PIPECCONF;
+ }
+
+ /* Start up display island if it was shutdown */
+ if (!gma_power_begin(dev, true))
+ return;
+
+ if(on) {
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID){
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+ } else {
+ /* Enable mipi port */
+ REG_WRITE(mipi_reg, (REG_READ(mipi_reg) | (1 << 31)));
+ REG_READ(mipi_reg);
+
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+ mdfld_dsi_tpo_ic_init(dsi_config, pipe);
+ }
+
+ if(pipe == 2) {
+ dev_priv->dpi_panel_on2 = true;
+ }
+ else {
+ dev_priv->dpi_panel_on = true;
+ }
+
+ } else {
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+ mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+ } else {
+ mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+ /* Disable mipi port */
+ REG_WRITE(mipi_reg, (REG_READ(mipi_reg) & ~(1<<31)));
+ REG_READ(mipi_reg);
+ }
+
+ if(pipe == 2)
+ dev_priv->dpi_panel_on2 = false;
+ else
+ dev_priv->dpi_panel_on = false;
+ }
+ gma_power_end(dev);
+}
+
+void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
+{
+ dev_dbg(encoder->dev->dev, "DPMS %s\n",
+ (mode == DRM_MODE_DPMS_ON ? "on":"off"));
+
+ if (mode == DRM_MODE_DPMS_ON)
+ mdfld_dsi_dpi_set_power(encoder, true);
+ else {
+ mdfld_dsi_dpi_set_power(encoder, false);
+#if 0 /* FIXME */
+#ifdef CONFIG_PM_RUNTIME
+ if (enable_gfx_rtpm)
+ pm_schedule_suspend(&gpDrmDevice->pdev->dev, gfxrtdelay);
+#endif
+#endif
+ }
+}
+
+bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
+
+ if(fixed_mode) {
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+ adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ }
+
+ return true;
+}
+
+void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder)
+{
+ mdfld_dsi_dpi_set_power(encoder, false);
+}
+
+void mdfld_dsi_dpi_commit(struct drm_encoder *encoder)
+{
+ mdfld_dsi_dpi_set_power(encoder, true);
+}
+
+void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+
+ u32 pipeconf_reg = PIPEACONF;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 mipi_reg = MIPI;
+ u32 reg_offset = 0;
+
+ u32 pipeconf = dev_priv->pipeconf;
+ u32 dspcntr = dev_priv->dspcntr;
+ u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+
+ dev_dbg(dev->dev, "set mode %dx%d on pipe %d\n",
+ mode->hdisplay, mode->vdisplay, pipe);
+
+ if(pipe) {
+ pipeconf_reg = PIPECCONF;
+ dspcntr_reg = DSPCCNTR;
+ mipi_reg = MIPI_C;
+ reg_offset = MIPIC_REG_OFFSET;
+ } else {
+ mipi |= 2;
+ }
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ /* Set up mipi port FIXME: do at init time */
+ REG_WRITE(mipi_reg, mipi);
+ REG_READ(mipi_reg);
+
+ /* Set up DSI controller DPI interface */
+ mdfld_dsi_dpi_controller_init(dsi_config, pipe);
+
+ if (mdfld_get_panel_type(dev, pipe) != TMD_VID) {
+ /* Turn on DPI interface */
+ mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+ }
+
+ /* Set up pipe */
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ /* Set up display plane */
+ REG_WRITE(dspcntr_reg, dspcntr);
+ REG_READ(dspcntr_reg);
+
+ msleep(20); /* FIXME: this should wait for vblank */
+
+ dev_dbg(dev->dev, "State %x, power %d\n",
+ REG_READ(MIPIA_INTR_STAT_REG + reg_offset),
+ dpi_output->panel_on);
+
+ if (mdfld_get_panel_type(dev, pipe) != TMD_VID) {
+ /* Init driver ic */
+ mdfld_dsi_tpo_ic_init(dsi_config, pipe);
+ /* Init backlight */
+ mdfld_dsi_brightness_init(dsi_config, pipe);
+ }
+ gma_power_end(dev);
+}
+
+
+/*
+ * Init DSI DPI encoder.
+ * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
+ * return pointer of newly allocated DPI encoder, NULL on error
+ */
+struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
+ struct mdfld_dsi_connector *dsi_connector,
+ struct panel_funcs *p_funcs)
+{
+ struct mdfld_dsi_dpi_output *dpi_output = NULL;
+ struct mdfld_dsi_config *dsi_config;
+ struct drm_connector *connector = NULL;
+ struct drm_encoder *encoder = NULL;
+ struct drm_display_mode *fixed_mode = NULL;
+ int pipe;
+ u32 data;
+ int ret;
+
+ if (!dsi_connector || !p_funcs) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ dsi_config = mdfld_dsi_get_config(dsi_connector);
+ pipe = dsi_connector->pipe;
+
+ /* Panel hard-reset */
+ if (p_funcs->reset) {
+ ret = p_funcs->reset(pipe);
+ if (ret) {
+ DRM_ERROR("Panel %d hard-reset failed\n", pipe);
+ return NULL;
+ }
+ }
+
+ /* Panel drvIC init */
+ if (p_funcs->drv_ic_init)
+ p_funcs->drv_ic_init(dsi_config, pipe);
+
+ /* Panel power mode detect */
+ ret = mdfld_dsi_get_power_mode(dsi_config,
+ &data,
+ MDFLD_DSI_LP_TRANSMISSION);
+ if (ret) {
+ DRM_ERROR("Panel %d get power mode failed\n", pipe);
+ dsi_connector->status = connector_status_disconnected;
+ } else {
+ DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
+ dsi_connector->status = connector_status_connected;
+ }
+
+ dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
+ if(!dpi_output) {
+ dev_err(dev->dev, "No memory for dsi_dpi_output\n");
+ return NULL;
+ }
+
+ if(dsi_connector->pipe)
+ dpi_output->panel_on = 0;
+ else
+ dpi_output->panel_on = 0;
+
+ dpi_output->dev = dev;
+ dpi_output->p_funcs = p_funcs;
+ dpi_output->first_boot = 1;
+
+ /* Get fixed mode */
+ dsi_config = mdfld_dsi_get_config(dsi_connector);
+ fixed_mode = dsi_config->fixed_mode;
+
+ /* Create drm encoder object */
+ connector = &dsi_connector->base.base;
+ encoder = &dpi_output->base.base;
+ /*
+ * On existing hardware this will be a panel of some form,
+ * if future devices also have HDMI bridges this will need
+ * revisiting
+ */
+ drm_encoder_init(dev,
+ encoder,
+ p_funcs->encoder_funcs,
+ DRM_MODE_ENCODER_LVDS);
+ drm_encoder_helper_add(encoder,
+ p_funcs->encoder_helper_funcs);
+
+ /* Attach to given connector */
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ /* Set possible crtcs and clones */
+ if(dsi_connector->pipe) {
+ encoder->possible_crtcs = (1 << 2);
+ encoder->possible_clones = (1 << 1);
+ } else {
+ encoder->possible_crtcs = (1 << 0);
+ encoder->possible_clones = (1 << 0);
+ }
+ return &dpi_output->base;
+}
+
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.h b/drivers/staging/gma500/mdfld_dsi_dpi.h
new file mode 100644
index 00000000000..ed92d45ee74
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_dsi_dpi.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_DPI_H__
+#define __MDFLD_DSI_DPI_H__
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+
+struct mdfld_dsi_dpi_timing {
+ u16 hsync_count;
+ u16 hbp_count;
+ u16 hfp_count;
+ u16 hactive_count;
+ u16 vsync_count;
+ u16 vbp_count;
+ u16 vfp_count;
+};
+
+struct mdfld_dsi_dpi_output {
+ struct mdfld_dsi_encoder base;
+ struct drm_device *dev;
+
+ int panel_on;
+ int first_boot;
+
+ struct panel_funcs *p_funcs;
+};
+
+#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder) \
+ container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
+
+extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
+ struct mdfld_dsi_dpi_timing *dpi_timing,
+ int num_lane, int bpp);
+extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
+ struct mdfld_dsi_connector *dsi_connector,
+ struct panel_funcs *p_funcs);
+
+/* Medfield DPI helper functions */
+extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
+extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
+extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
+extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output,
+ int pipe);
+extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *si_config,
+ int pipe);
+#endif /*__MDFLD_DSI_DPI_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c
new file mode 100644
index 00000000000..9050c0f78b1
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_dsi_output.c
@@ -0,0 +1,1013 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_output.h"
+#include <asm/intel_scu_ipc.h>
+#include "mdfld_dsi_pkg_sender.h"
+#include <linux/pm_runtime.h>
+
+#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
+
+static int CABC_control = 1;
+static int LABC_control = 1;
+
+module_param (CABC_control, int, 0644);
+module_param (LABC_control, int, 0644);
+
+/**
+ * make these MCS command global
+ * we don't need 'movl' everytime we send them.
+ * FIXME: these datas were provided by OEM, we should get them from GCT.
+ **/
+static u32 mdfld_dbi_mcs_hysteresis[] = {
+ 0x42000f57, 0x8c006400, 0xff00bf00, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0x38000aff, 0x82005000, 0xff00ab00, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0x000000ff,
+};
+
+static u32 mdfld_dbi_mcs_display_profile[] = {
+ 0x50281450, 0x0000c882, 0x00000000, 0x00000000,
+ 0x00000000,
+};
+
+static u32 mdfld_dbi_mcs_kbbc_profile[] = {
+ 0x00ffcc60, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static u32 mdfld_dbi_mcs_gamma_profile[] = {
+ 0x81111158, 0x88888888, 0x88888888,
+};
+
+/*
+ * write hysteresis values.
+ */
+static void mdfld_dsi_write_hysteresis (struct mdfld_dsi_config *dsi_config,
+ int pipe)
+{
+ struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ if(!sender) {
+ WARN_ON(1);
+ return;
+ }
+ mdfld_dsi_send_mcs_long_hs(sender,
+ mdfld_dbi_mcs_hysteresis,
+ 17,
+ MDFLD_DSI_SEND_PACKAGE);
+}
+
+/*
+ * write display profile values.
+ */
+static void mdfld_dsi_write_display_profile(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ if(!sender) {
+ WARN_ON(1);
+ return;
+ }
+ mdfld_dsi_send_mcs_long_hs(sender,
+ mdfld_dbi_mcs_display_profile,
+ 5,
+ MDFLD_DSI_SEND_PACKAGE);
+}
+
+/*
+ * write KBBC profile values.
+ */
+static void mdfld_dsi_write_kbbc_profile (struct mdfld_dsi_config * dsi_config, int pipe)
+{
+ struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ if(!sender) {
+ WARN_ON(1);
+ return;
+ }
+ mdfld_dsi_send_mcs_long_hs(sender,
+ mdfld_dbi_mcs_kbbc_profile,
+ 4,
+ MDFLD_DSI_SEND_PACKAGE);
+}
+
+/*
+ * write gamma setting.
+ */
+static void mdfld_dsi_write_gamma_setting (struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ if(!sender) {
+ WARN_ON(1);
+ return;
+ }
+ mdfld_dsi_send_mcs_long_hs(sender,
+ mdfld_dbi_mcs_gamma_profile,
+ 3,
+ MDFLD_DSI_SEND_PACKAGE);
+}
+
+/*
+ * Check and see if the generic control or data buffer is empty and ready.
+ */
+void mdfld_dsi_gen_fifo_ready (struct drm_device *dev, u32 gen_fifo_stat_reg, u32 fifo_stat)
+{
+ u32 GEN_BF_time_out_count = 0;
+
+ /* Check MIPI Adatper command registers */
+ for (GEN_BF_time_out_count = 0; GEN_BF_time_out_count < GEN_FB_TIME_OUT; GEN_BF_time_out_count++)
+ {
+ if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
+ break;
+ udelay (100);
+ }
+
+ if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
+ dev_err(dev->dev,
+ "mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x. \n",
+ gen_fifo_stat_reg);
+}
+
+/*
+ * Manage the DSI MIPI keyboard and display brightness.
+ * FIXME: this is exported to OSPM code. should work out an specific
+ * display interface to OSPM.
+ */
+void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
+ struct drm_device *dev = sender->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 gen_ctrl_val;
+
+ if(!sender) {
+ WARN_ON(1);
+ return;
+ }
+ /* Set default display backlight value to 85% (0xd8)*/
+ mdfld_dsi_send_mcs_short_hs(sender,
+ write_display_brightness,
+ 0xd8,
+ 1,
+ MDFLD_DSI_SEND_PACKAGE);
+
+ /* Set minimum brightness setting of CABC function to 20% (0x33)*/
+ mdfld_dsi_send_mcs_short_hs(sender,
+ write_cabc_min_bright,
+ 0x33,
+ 1,
+ MDFLD_DSI_SEND_PACKAGE);
+
+ mdfld_dsi_write_hysteresis(dsi_config, pipe);
+ mdfld_dsi_write_display_profile (dsi_config, pipe);
+ mdfld_dsi_write_kbbc_profile (dsi_config, pipe);
+ mdfld_dsi_write_gamma_setting (dsi_config, pipe);
+
+ /* Enable backlight or/and LABC */
+ gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON| BACKLIGHT_ON;
+ if (LABC_control == 1 || CABC_control == 1)
+ gen_ctrl_val |= DISPLAY_DIMMING_ON| DISPLAY_BRIGHTNESS_AUTO | GAMMA_AUTO;
+
+ if (LABC_control == 1)
+ gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON;
+
+ dev_priv->mipi_ctrl_display = gen_ctrl_val;
+
+ mdfld_dsi_send_mcs_short_hs(sender,
+ write_ctrl_display,
+ (u8)gen_ctrl_val,
+ 1,
+ MDFLD_DSI_SEND_PACKAGE);
+
+ if (CABC_control == 0)
+ return;
+ mdfld_dsi_send_mcs_short_hs(sender,
+ write_ctrl_cabc,
+ UI_IMAGE,
+ 1,
+ MDFLD_DSI_SEND_PACKAGE);
+}
+
+/*
+ * Manage the mipi display brightness.
+ * TODO: refine this interface later
+ */
+void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
+{
+ struct mdfld_dsi_pkg_sender *sender;
+ struct drm_psb_private *dev_priv;
+ struct mdfld_dsi_config *dsi_config;
+ u32 gen_ctrl_val;
+ int p_type;
+
+ if (!dev || (pipe != 0 && pipe != 2)) {
+ dev_err(dev->dev, "Invalid parameter\n");
+ return;
+ }
+
+ p_type = mdfld_get_panel_type(dev, 0);
+
+ dev_priv = dev->dev_private;
+
+ if(pipe)
+ dsi_config = dev_priv->dsi_configs[1];
+ else
+ dsi_config = dev_priv->dsi_configs[0];
+
+ sender = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ if(!sender) {
+ WARN_ON(1);
+ return;
+ }
+
+ gen_ctrl_val = ((level * 0xff) / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
+
+ dev_dbg(dev->dev,
+ "pipe = %d, gen_ctrl_val = %d. \n", pipe, gen_ctrl_val);
+
+ if(p_type == TMD_VID || p_type == TMD_CMD){
+ /* Set display backlight value */
+ mdfld_dsi_send_mcs_short_hs(sender,
+ tmd_write_display_brightness,
+ (u8)gen_ctrl_val,
+ 1,
+ MDFLD_DSI_SEND_PACKAGE);
+ } else {
+ /* Set display backlight value */
+ mdfld_dsi_send_mcs_short_hs(sender,
+ write_display_brightness,
+ (u8)gen_ctrl_val,
+ 1,
+ MDFLD_DSI_SEND_PACKAGE);
+
+
+ /* Enable backlight control */
+ if (level == 0)
+ gen_ctrl_val = 0;
+ else
+ gen_ctrl_val = dev_priv->mipi_ctrl_display;
+
+ mdfld_dsi_send_mcs_short_hs(sender,
+ write_ctrl_display,
+ (u8)gen_ctrl_val,
+ 1,
+ MDFLD_DSI_SEND_PACKAGE);
+ }
+}
+
+/*
+ * shut down DSI controller
+ */
+void mdfld_dsi_controller_shutdown(struct mdfld_dsi_config * dsi_config, int pipe)
+{
+ struct drm_device * dev;
+ u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+ int retry = 100;
+
+ if (!dsi_config) {
+ WARN_ON(1);
+ return;
+ }
+
+ dev = dsi_config->dev;
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "hw begin failed\n");
+ return;
+ }
+
+ if(!(REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) & DSI_DEVICE_READY))
+ goto shutdown_out;
+
+ /* Send shut down package, clean packet send bit first */
+ if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
+ REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset),
+ (REG_READ(MIPIA_INTR_STAT_REG + reg_offset) | DSI_INTR_STATE_SPL_PKG_SENT));
+ }
+
+ /*send shut down package in HS*/
+ REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_SHUTDOWN);
+
+
+ /*
+ * make sure shut down is sent.
+ * FIXME: add max retry counter
+ */
+ while(!(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT)) {
+ retry--;
+
+ if(!retry) {
+ dev_err(dev->dev, "timeout\n");
+ break;
+ }
+ }
+
+ /*sleep 1 ms to ensure shutdown finished*/
+ msleep(100);
+
+ /*un-ready device*/
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset),
+ (REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) & ~DSI_DEVICE_READY));
+
+shutdown_out:
+ gma_power_end(dev);
+}
+
+void mdfld_dsi_controller_startup(struct mdfld_dsi_config * dsi_config, int pipe)
+{
+ struct drm_device * dev;
+ u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+ int retry = 100;
+
+
+ if (!dsi_config) {
+ WARN_ON(1);
+ return;
+ }
+
+ dev = dsi_config->dev;
+ dev_dbg(dev->dev, "starting up DSI controller on pipe %d...\n", pipe);
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "hw begin failed\n");
+ return;
+ }
+
+ if((REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) & DSI_DEVICE_READY))
+ goto startup_out;
+
+ /*if config DPI, turn on DPI interface*/
+ if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
+ if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
+ REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
+ }
+
+ REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_TURN_ON);
+
+ /*
+ * make sure shut down is sent.
+ * FIXME: add max retry counter
+ */
+ while(!(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT)) {
+ retry--;
+ if(!retry) {
+ dev_err(dev->dev, "timeout\n");
+ break;
+ }
+ }
+
+ msleep(100);
+ }
+
+ /*set device ready*/
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset),
+ (REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) | DSI_DEVICE_READY));
+
+startup_out:
+ gma_power_end(dev);
+}
+
+
+static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
+ u8 dcs,
+ u32 *data,
+ u8 transmission)
+{
+ struct mdfld_dsi_pkg_sender *sender
+ = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ if (!sender || !data) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ if (transmission == MDFLD_DSI_HS_TRANSMISSION)
+ return mdfld_dsi_read_mcs_hs(sender, dcs, data, 1);
+ else if (transmission == MDFLD_DSI_LP_TRANSMISSION)
+ return mdfld_dsi_read_mcs_lp(sender, dcs, data, 1);
+ else
+ return -EINVAL;
+}
+
+int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
+ u32 *mode,
+ u8 transmission)
+{
+ if (!dsi_config || !mode) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, transmission);
+}
+
+int mdfld_dsi_get_diagnostic_result(struct mdfld_dsi_config *dsi_config,
+ u32 *result,
+ u8 transmission)
+{
+ if (!dsi_config || !result) {
+ DRM_ERROR("Invalid parameter\n");
+ return -EINVAL;
+ }
+
+ return mdfld_dsi_get_panel_status(dsi_config, 0x0f, result,
+ transmission);
+}
+
+/*
+ * NOTE: this function was used by OSPM.
+ * TODO: will be removed later, should work out display interfaces for OSPM
+ */
+void mdfld_dsi_controller_init(struct mdfld_dsi_config * dsi_config, int pipe)
+{
+ if(!dsi_config || ((pipe != 0) && (pipe != 2))) {
+ WARN_ON(1);
+ return;
+ }
+
+ if(dsi_config->type)
+ mdfld_dsi_dpi_controller_init(dsi_config, pipe);
+ else
+ mdfld_dsi_controller_dbi_init(dsi_config, pipe);
+}
+
+static void mdfld_dsi_connector_save(struct drm_connector * connector)
+{
+}
+
+static void mdfld_dsi_connector_restore(struct drm_connector * connector)
+{
+}
+
+static enum drm_connector_status mdfld_dsi_connector_detect(struct drm_connector * connector, bool force)
+{
+ struct psb_intel_output *psb_output
+ = to_psb_intel_output(connector);
+ struct mdfld_dsi_connector *dsi_connector
+ = MDFLD_DSI_CONNECTOR(psb_output);
+ return dsi_connector->status;
+}
+
+static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t value)
+{
+ struct drm_encoder *encoder = connector->encoder;
+
+ if (!strcmp(property->name, "scaling mode") && encoder) {
+ struct psb_intel_crtc * psb_crtc = to_psb_intel_crtc(encoder->crtc);
+ bool bTransitionFromToCentered;
+ uint64_t curValue;
+
+ if (!psb_crtc)
+ goto set_prop_error;
+
+ switch (value) {
+ case DRM_MODE_SCALE_FULLSCREEN:
+ break;
+ case DRM_MODE_SCALE_NO_SCALE:
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ break;
+ default:
+ goto set_prop_error;
+ }
+
+ if (drm_connector_property_get_value(connector, property, &curValue))
+ goto set_prop_error;
+
+ if (curValue == value)
+ goto set_prop_done;
+
+ if (drm_connector_property_set_value(connector, property, value))
+ goto set_prop_error;
+
+ bTransitionFromToCentered = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
+ (value == DRM_MODE_SCALE_NO_SCALE);
+
+ if (psb_crtc->saved_mode.hdisplay != 0 &&
+ psb_crtc->saved_mode.vdisplay != 0) {
+ if (bTransitionFromToCentered) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc, &psb_crtc->saved_mode,
+ encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
+ goto set_prop_error;
+ } else {
+ struct drm_encoder_helper_funcs *pEncHFuncs = encoder->helper_private;
+ pEncHFuncs->mode_set(encoder, &psb_crtc->saved_mode,
+ &psb_crtc->saved_adjusted_mode);
+ }
+ }
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ } else if (!strcmp(property->name, "backlight") && encoder) {
+ struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+ struct backlight_device *psb_bd = dev_priv->backlight_device;
+ dev_dbg(encoder->dev->dev, "backlight level = %d\n", (int)value);
+ if (drm_connector_property_set_value(connector, property, value))
+ goto set_prop_error;
+ else {
+ dev_dbg(encoder->dev->dev,
+ "set brightness to %d", (int)value);
+ if (psb_bd) {
+ psb_bd->props.brightness = value;
+ backlight_update_status(psb_bd);
+ }
+ }
+#endif
+ }
+set_prop_done:
+ return 0;
+set_prop_error:
+ return -1;
+}
+
+static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_output * psb_output = to_psb_intel_output(connector);
+ struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
+ struct mdfld_dsi_pkg_sender * sender;
+
+ if(!dsi_connector)
+ return;
+
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+
+ sender = dsi_connector->pkg_sender;
+
+ mdfld_dsi_pkg_sender_destroy(sender);
+
+ kfree(dsi_connector);
+}
+
+static int mdfld_dsi_connector_get_modes(struct drm_connector * connector)
+{
+ struct psb_intel_output * psb_output = to_psb_intel_output(connector);
+ struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
+ struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
+ struct drm_display_mode * fixed_mode = dsi_config->fixed_mode;
+ struct drm_display_mode * dup_mode = NULL;
+ struct drm_device * dev = connector->dev;
+
+ connector->display_info.min_vfreq = 0;
+ connector->display_info.max_vfreq = 200;
+ connector->display_info.min_hfreq = 0;
+ connector->display_info.max_hfreq = 200;
+
+ if(fixed_mode) {
+ dev_dbg(dev->dev, "fixed_mode %dx%d\n",
+ fixed_mode->hdisplay, fixed_mode->vdisplay);
+
+ dup_mode = drm_mode_duplicate(dev, fixed_mode);
+ drm_mode_probed_add(connector, dup_mode);
+ return 1;
+ }
+ dev_err(dev->dev, "Didn't get any modes!\n");
+ return 0;
+}
+
+static int mdfld_dsi_connector_mode_valid(struct drm_connector * connector, struct drm_display_mode * mode)
+{
+ struct psb_intel_output * psb_output = to_psb_intel_output(connector);
+ struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
+ struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
+ struct drm_display_mode * fixed_mode = dsi_config->fixed_mode;
+
+ dev_dbg(connector->dev->dev, "mode %p, fixed mode %p\n",
+ mode, fixed_mode);
+
+ if(mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if(mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /**
+ * FIXME: current DC has no fitting unit, reject any mode setting request
+ * will figure out a way to do up-scaling(pannel fitting) later.
+ **/
+ if(fixed_mode) {
+ if(mode->hdisplay != fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if(mode->vdisplay != fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+ dev_dbg(connector->dev->dev, "mode ok\n");
+
+ return MODE_OK;
+}
+
+static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
+{
+#ifdef CONFIG_PM_RUNTIME
+ struct drm_device * dev = connector->dev;
+ struct drm_psb_private * dev_priv = dev->dev_private;
+ bool panel_on, panel_on2;
+#endif
+ /* First, execute DPMS */
+ drm_helper_connector_dpms(connector, mode);
+
+#ifdef CONFIG_PM_RUNTIME
+ if(mdfld_panel_dpi(dev)) {
+ /* DPI panel */
+ panel_on = dev_priv->dpi_panel_on;
+ panel_on2 = dev_priv->dpi_panel_on2;
+ } else {
+ /* DBI panel */
+ panel_on = dev_priv->dbi_panel_on;
+ panel_on2 = dev_priv->dbi_panel_on2;
+ }
+
+ /* Then check all display panels + monitors status */
+ /* Make sure that the Display (B) sub-system status isn't i3 when
+ * R/W the DC register, otherwise "Fabric error" issue would occur
+ * during S0i3 state. */
+ if(!panel_on && !panel_on2 && !(REG_READ(HDMIB_CONTROL)
+ & HDMIB_PORT_EN)) {
+ /* Request rpm idle */
+ if(dev_priv->rpm_enabled)
+ pm_request_idle(&dev->pdev->dev);
+ }
+ /*
+ * if rpm wasn't enabled yet, try to allow it
+ * FIXME: won't enable rpm for DPI since DPI
+ * CRTC setting is a little messy now.
+ * Enable it later!
+ */
+#if 0
+ if(!dev_priv->rpm_enabled && !mdfld_panel_dpi(dev))
+ ospm_runtime_pm_allow(dev);
+#endif
+#endif
+}
+
+static struct drm_encoder *mdfld_dsi_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct psb_intel_output * psb_output = to_psb_intel_output(connector);
+ struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
+ struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
+ struct mdfld_dsi_encoder * encoder = NULL;
+
+ if(dsi_config->type == MDFLD_DSI_ENCODER_DBI)
+ encoder = dsi_config->encoders[MDFLD_DSI_ENCODER_DBI];
+ else if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
+ encoder = dsi_config->encoders[MDFLD_DSI_ENCODER_DPI];
+
+ dev_dbg(connector->dev->dev, "get encoder %p\n", encoder);
+
+ if(!encoder) {
+ dev_err(connector->dev->dev,
+ "Invalid encoder for type %d\n", dsi_config->type);
+ return NULL;
+ }
+ dsi_config->encoder = encoder;
+ return &encoder->base;
+}
+
+/* DSI connector funcs */
+static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
+ .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
+ .save = mdfld_dsi_connector_save,
+ .restore = mdfld_dsi_connector_restore,
+ .detect = mdfld_dsi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = mdfld_dsi_connector_set_property,
+ .destroy = mdfld_dsi_connector_destroy,
+};
+
+/* DSI connector helper funcs */
+static const struct drm_connector_helper_funcs mdfld_dsi_connector_helper_funcs = {
+ .get_modes = mdfld_dsi_connector_get_modes,
+ .mode_valid = mdfld_dsi_connector_mode_valid,
+ .best_encoder = mdfld_dsi_connector_best_encoder,
+};
+
+static int mdfld_dsi_get_default_config(struct drm_device * dev,
+ struct mdfld_dsi_config * config, int pipe)
+{
+ if(!dev || !config) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ config->bpp = 24;
+ config->type = mdfld_panel_dpi(dev);
+ config->lane_count = 2;
+ config->channel_num = 0;
+ /*NOTE: video mode is ignored when type is MDFLD_DSI_ENCODER_DBI*/
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+ config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE;
+ } else {
+ config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
+ }
+
+ return 0;
+}
+
+/*
+ * Returns the panel fixed mode from configuration.
+ */
+struct drm_display_mode *
+mdfld_dsi_get_configuration_mode(struct mdfld_dsi_config * dsi_config, int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_display_mode *mode;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
+ bool use_gct = false;
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode) {
+ dev_err(dev->dev, "Out of memory for mode\n");
+ return NULL;
+ }
+ if (use_gct) {
+ dev_dbg(dev->dev, "gct find MIPI panel.\n");
+
+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+ mode->hsync_start = mode->hdisplay + \
+ ((ti->hsync_offset_hi << 8) | \
+ ti->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start + \
+ ((ti->hsync_pulse_width_hi << 8) | \
+ ti->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+ ti->hblank_lo);
+ mode->vsync_start = \
+ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
+ ti->vsync_offset_lo);
+ mode->vsync_end = \
+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
+ ti->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay + \
+ ((ti->vblank_hi << 8) | ti->vblank_lo);
+ mode->clock = ti->pixel_clock * 10;
+ } else {
+ if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
+ if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+ mode->hdisplay = 480;
+ mode->vdisplay = 854;
+ mode->hsync_start = 487;
+ mode->hsync_end = 490;
+ mode->htotal = 499;
+ mode->vsync_start = 861;
+ mode->vsync_end = 865;
+ mode->vtotal = 873;
+ mode->clock = 33264;
+ } else {
+ mode->hdisplay = 864;
+ mode->vdisplay = 480;
+ mode->hsync_start = 873;
+ mode->hsync_end = 876;
+ mode->htotal = 887;
+ mode->vsync_start = 487;
+ mode->vsync_end = 490;
+ mode->vtotal = 499;
+ mode->clock = 33264;
+ }
+ } else if(dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
+ mode->hdisplay = 864;
+ mode->vdisplay = 480;
+ mode->hsync_start = 872;
+ mode->hsync_end = 876;
+ mode->htotal = 884;
+ mode->vsync_start = 482;
+ mode->vsync_end = 494;
+ mode->vtotal = 486;
+ mode->clock = 25777;
+
+ }
+ }
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ return mode;
+}
+
+int mdfld_dsi_panel_reset(int pipe)
+{
+ unsigned gpio;
+ int ret = 0;
+
+ switch (pipe) {
+ case 0:
+ gpio = 128;
+ break;
+ case 2:
+ gpio = 34;
+ break;
+ default:
+ DRM_ERROR("Invalid output\n");
+ return -EINVAL;
+ }
+
+ ret = gpio_request(gpio, "gfx");
+ if (ret) {
+ DRM_ERROR("gpio_rqueset failed\n");
+ return ret;
+ }
+
+ ret = gpio_direction_output(gpio, 1);
+ if (ret) {
+ DRM_ERROR("gpio_direction_output failed\n");
+ goto gpio_error;
+ }
+
+ gpio_get_value(128);
+
+gpio_error:
+ if (gpio_is_valid(gpio))
+ gpio_free(gpio);
+
+ return ret;
+}
+
+/*
+ * MIPI output init
+ * @dev drm device
+ * @pipe pipe number. 0 or 2
+ * @config
+ *
+ * Do the initialization of a MIPI output, including create DRM mode objects
+ * initialization of DSI output on @pipe
+ */
+void mdfld_dsi_output_init(struct drm_device *dev,
+ int pipe,
+ struct mdfld_dsi_config *config,
+ struct panel_funcs* p_cmd_funcs,
+ struct panel_funcs* p_vid_funcs)
+{
+ struct mdfld_dsi_config * dsi_config;
+ struct mdfld_dsi_connector * dsi_connector;
+ struct psb_intel_output * psb_output;
+ struct drm_connector * connector;
+ struct mdfld_dsi_encoder * encoder;
+ struct drm_psb_private * dev_priv = dev->dev_private;
+ struct panel_info dsi_panel_info;
+ u32 width_mm, height_mm;
+
+ dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
+
+ if(!dev || ((pipe != 0) && (pipe != 2))) {
+ WARN_ON(1);
+ return;
+ }
+
+ /*create a new connetor*/
+ dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
+ if(!dsi_connector) {
+ DRM_ERROR("No memory");
+ return;
+ }
+
+ dsi_connector->pipe = pipe;
+
+ /*set DSI config*/
+ if(config) {
+ dsi_config = config;
+ } else {
+ dsi_config = kzalloc(sizeof(struct mdfld_dsi_config), GFP_KERNEL);
+ if(!dsi_config) {
+ dev_err(dev->dev,
+ "cannot allocate memory for DSI config\n");
+ goto dsi_init_err0;
+ }
+
+ mdfld_dsi_get_default_config(dev, dsi_config, pipe);
+ }
+
+ dsi_connector->private = dsi_config;
+
+ dsi_config->changed = 1;
+ dsi_config->dev = dev;
+
+ /* Init fixed mode basing on DSI config type */
+ if(dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
+ dsi_config->fixed_mode = p_cmd_funcs->get_config_mode(dev);
+ if(p_cmd_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
+ goto dsi_init_err0;
+ } else if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
+ dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev);
+ if(p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
+ goto dsi_init_err0;
+ }
+
+ width_mm = dsi_panel_info.width_mm;
+ height_mm = dsi_panel_info.height_mm;
+
+ dsi_config->mode = dsi_config->fixed_mode;
+ dsi_config->connector = dsi_connector;
+
+ if(!dsi_config->fixed_mode) {
+ dev_err(dev->dev, "No pannel fixed mode was found\n");
+ goto dsi_init_err0;
+ }
+
+ if(pipe && dev_priv->dsi_configs[0]) {
+ dsi_config->dvr_ic_inited = 0;
+ dev_priv->dsi_configs[1] = dsi_config;
+ } else if(pipe == 0) {
+ dsi_config->dvr_ic_inited = 1;
+ dev_priv->dsi_configs[0] = dsi_config;
+ } else {
+ dev_err(dev->dev, "Trying to init MIPI1 before MIPI0\n");
+ goto dsi_init_err0;
+ }
+
+ /*init drm connector object*/
+ psb_output = &dsi_connector->base;
+
+ psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2;
+
+ connector = &psb_output->base;
+ /* Revisit type if MIPI/HDMI bridges ever appear on Medfield */
+ drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->display_info.width_mm = width_mm;
+ connector->display_info.height_mm = height_mm;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ /* Attach properties */
+ drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
+ drm_connector_attach_property(connector, dev_priv->backlight_property, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
+
+ /* Init DSI package sender on this output */
+ if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
+ DRM_ERROR("Package Sender initialization failed on pipe %d\n", pipe);
+ goto dsi_init_err0;
+ }
+
+ /* Init DBI & DPI encoders */
+ if (p_cmd_funcs) {
+ encoder = mdfld_dsi_dbi_init(dev, dsi_connector, p_cmd_funcs);
+ if(!encoder) {
+ dev_err(dev->dev, "Create DBI encoder failed\n");
+ goto dsi_init_err1;
+ }
+ encoder->private = dsi_config;
+ dsi_config->encoders[MDFLD_DSI_ENCODER_DBI] = encoder;
+ }
+
+ if(p_vid_funcs) {
+ encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs);
+ if(!encoder) {
+ dev_err(dev->dev, "Create DPI encoder failed\n");
+ goto dsi_init_err1;
+ }
+ encoder->private = dsi_config;
+ dsi_config->encoders[MDFLD_DSI_ENCODER_DPI] = encoder;
+ }
+
+ drm_sysfs_connector_add(connector);
+ return;
+
+ /*TODO: add code to destroy outputs on error*/
+dsi_init_err1:
+ /*destroy sender*/
+ mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
+
+ drm_connector_cleanup(connector);
+ kfree(dsi_config->fixed_mode);
+ kfree(dsi_config);
+dsi_init_err0:
+ kfree(dsi_connector);
+}
diff --git a/drivers/staging/gma500/mdfld_dsi_output.h b/drivers/staging/gma500/mdfld_dsi_output.h
new file mode 100644
index 00000000000..4699267efd6
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_dsi_output.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#ifndef __MDFLD_DSI_OUTPUT_H__
+#define __MDFLD_DSI_OUTPUT_H__
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+#include "psb_drv.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "power.h"
+#include "mdfld_output.h"
+
+#include <asm/mrst.h>
+
+
+static inline struct mdfld_dsi_config *
+ mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
+{
+ if (!connector)
+ return NULL;
+ return (struct mdfld_dsi_config *)connector->private;
+}
+
+static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
+{
+ struct mdfld_dsi_connector *dsi_connector;
+
+ if (!config)
+ return NULL;
+
+ dsi_connector = config->connector;
+
+ if (!dsi_connector)
+ return NULL;
+
+ return dsi_connector->pkg_sender;
+}
+
+static inline struct mdfld_dsi_config *
+ mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
+{
+ if (!encoder)
+ return NULL;
+ return (struct mdfld_dsi_config *)encoder->private;
+}
+
+static inline struct mdfld_dsi_connector *
+ mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
+{
+ struct mdfld_dsi_config *config;
+
+ if (!encoder)
+ return NULL;
+
+ config = mdfld_dsi_encoder_get_config(encoder);
+ if (!config)
+ return NULL;
+
+ return config->connector;
+}
+
+static inline void *mdfld_dsi_encoder_get_pkg_sender(
+ struct mdfld_dsi_encoder *encoder)
+{
+ struct mdfld_dsi_config *dsi_config;
+
+ dsi_config = mdfld_dsi_encoder_get_config(encoder);
+ if (!dsi_config)
+ return NULL;
+
+ return mdfld_dsi_get_pkg_sender(dsi_config);
+}
+
+static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
+{
+ struct mdfld_dsi_connector *connector;
+
+ if (!encoder)
+ return -1;
+
+ connector = mdfld_dsi_encoder_get_connector(encoder);
+ if (!connector)
+ return -1;
+
+ return connector->pipe;
+}
+
+extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
+ u32 gen_fifo_stat_reg, u32 fifo_stat);
+extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
+ int pipe);
+extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
+ int level);
+extern void mdfld_dsi_output_init(struct drm_device *dev, int pipe,
+ struct mdfld_dsi_config *config,
+ struct panel_funcs *p_cmd_funcs,
+ struct panel_funcs *p_vid_funcs);
+extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
+ int pipe);
+extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
+ u32 *mode,
+ u8 transmission);
+extern int mdfld_dsi_get_diagnostic_result(struct mdfld_dsi_config *dsi_config,
+ u32 *result,
+ u8 transmission);
+extern int mdfld_dsi_panel_reset(int pipe);
+
+#endif /*__MDFLD_DSI_OUTPUT_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_pkg_sender.c b/drivers/staging/gma500/mdfld_dsi_pkg_sender.c
new file mode 100644
index 00000000000..9b96a5c9abc
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_dsi_pkg_sender.c
@@ -0,0 +1,1484 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include <linux/freezer.h>
+
+#include "mdfld_dsi_output.h"
+#include "mdfld_dsi_pkg_sender.h"
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+
+#define MDFLD_DSI_DBI_FIFO_TIMEOUT 100
+#define MDFLD_DSI_MAX_RETURN_PACKET_SIZE 512
+#define MDFLD_DSI_READ_MAX_COUNT 5000
+
+static const char * const dsi_errors[] = {
+ "RX SOT Error",
+ "RX SOT Sync Error",
+ "RX EOT Sync Error",
+ "RX Escape Mode Entry Error",
+ "RX LP TX Sync Error",
+ "RX HS Receive Timeout Error",
+ "RX False Control Error",
+ "RX ECC Single Bit Error",
+ "RX ECC Multibit Error",
+ "RX Checksum Error",
+ "RX DSI Data Type Not Recognised",
+ "RX DSI VC ID Invalid",
+ "TX False Control Error",
+ "TX ECC Single Bit Error",
+ "TX ECC Multibit Error",
+ "TX Checksum Error",
+ "TX DSI Data Type Not Recognised",
+ "TX DSI VC ID invalid",
+ "High Contention",
+ "Low contention",
+ "DPI FIFO Under run",
+ "HS TX Timeout",
+ "LP RX Timeout",
+ "Turn Around ACK Timeout",
+ "ACK With No Error",
+ "RX Invalid TX Length",
+ "RX Prot Violation",
+ "HS Generic Write FIFO Full",
+ "LP Generic Write FIFO Full",
+ "Generic Read Data Avail",
+ "Special Packet Sent",
+ "Tearing Effect",
+};
+
+static int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
+ u32 mask)
+{
+ struct drm_device *dev = sender->dev;
+ u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
+ int retry = 0xffff;
+
+ while (retry--) {
+ if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
+ return 0;
+ udelay(100);
+ }
+ dev_err(dev->dev, "fifo is NOT empty 0x%08x\n",
+ REG_READ(gen_fifo_stat_reg));
+ return -EIO;
+}
+
+static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+ return wait_for_gen_fifo_empty(sender, (1 << 2) | (1 << 10) | (1 << 18)
+ | (1 << 26) | (1 << 27) | (1 << 28));
+}
+
+static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+ return wait_for_gen_fifo_empty(sender, (1 << 10) | (1 << 26));
+}
+
+static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+ return wait_for_gen_fifo_empty(sender, (1 << 2) | (1 << 18));
+}
+
+static int wait_for_dbi_fifo_empty(struct mdfld_dsi_pkg_sender *sender)
+{
+ return wait_for_gen_fifo_empty(sender, (1 << 27));
+}
+
+static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
+{
+ u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+ struct drm_device *dev = sender->dev;
+
+ switch (mask) {
+ case (1 << 0):
+ case (1 << 1):
+ case (1 << 2):
+ case (1 << 3):
+ case (1 << 4):
+ case (1 << 5):
+ case (1 << 6):
+ case (1 << 7):
+ case (1 << 8):
+ case (1 << 9):
+ case (1 << 10):
+ case (1 << 11):
+ case (1 << 12):
+ case (1 << 13):
+ break;
+ case (1 << 14):
+ /*wait for all fifo empty*/
+ /*wait_for_all_fifos_empty(sender)*/;
+ break;
+ case (1 << 15):
+ break;
+ case (1 << 16):
+ break;
+ case (1 << 17):
+ break;
+ case (1 << 18):
+ case (1 << 19):
+ /*wait for contention recovery time*/
+ /*mdelay(10);*/
+ /*wait for all fifo empty*/
+ if (0)
+ wait_for_all_fifos_empty(sender);
+ break;
+ case (1 << 20):
+ break;
+ case (1 << 21):
+ /*wait for all fifo empty*/
+ /*wait_for_all_fifos_empty(sender);*/
+ break;
+ case (1 << 22):
+ break;
+ case (1 << 23):
+ case (1 << 24):
+ case (1 << 25):
+ case (1 << 26):
+ case (1 << 27):
+ /* HS Gen fifo full */
+ REG_WRITE(intr_stat_reg, mask);
+ wait_for_hs_fifos_empty(sender);
+ break;
+ case (1 << 28):
+ /* LP Gen fifo full\n */
+ REG_WRITE(intr_stat_reg, mask);
+ wait_for_lp_fifos_empty(sender);
+ break;
+ case (1 << 29):
+ case (1 << 30):
+ case (1 << 31):
+ break;
+ }
+
+ if (mask & REG_READ(intr_stat_reg))
+ dev_warn(dev->dev, "Cannot clean interrupt 0x%08x\n", mask);
+
+ return 0;
+}
+
+static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
+{
+ struct drm_device *dev = sender->dev;
+ u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+ u32 mask;
+ u32 intr_stat;
+ int i;
+ int err = 0;
+
+ intr_stat = REG_READ(intr_stat_reg);
+
+ for (i = 0; i < 32; i++) {
+ mask = (0x00000001UL) << i;
+ if (intr_stat & mask) {
+ dev_dbg(dev->dev, "[DSI]: %s\n", dsi_errors[i]);
+ err = handle_dsi_error(sender, mask);
+ if (err)
+ dev_err(dev->dev, "Cannot handle error\n");
+ }
+ }
+ return err;
+}
+
+static inline int dbi_cmd_sent(struct mdfld_dsi_pkg_sender *sender)
+{
+ struct drm_device *dev = sender->dev;
+ u32 retry = 0xffff;
+ u32 dbi_cmd_addr_reg = sender->mipi_cmd_addr_reg;
+
+ /* Query the command execution status */
+ while (retry--) {
+ if (!(REG_READ(dbi_cmd_addr_reg) & (1 << 0)))
+ break;
+ }
+
+ if (!retry) {
+ dev_err(dev->dev, "Timeout waiting for DBI Command status\n");
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+/*
+ * NOTE: this interface is abandoned expect for write_mem_start DCS
+ * other DCS are sent via generic pkg interfaces
+ */
+static int send_dcs_pkg(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ struct drm_device *dev = sender->dev;
+ struct mdfld_dsi_dcs_pkg *dcs_pkg = &pkg->pkg.dcs_pkg;
+ u32 dbi_cmd_len_reg = sender->mipi_cmd_len_reg;
+ u32 dbi_cmd_addr_reg = sender->mipi_cmd_addr_reg;
+ u32 cb_phy = sender->dbi_cb_phy;
+ u32 index = 0;
+ u8 *cb = (u8 *)sender->dbi_cb_addr;
+ int i;
+ int ret;
+
+ if (!sender->dbi_pkg_support) {
+ dev_err(dev->dev, "Trying to send DCS on a non DBI output, abort!\n");
+ return -ENOTSUPP;
+ }
+
+ /*wait for DBI fifo empty*/
+ wait_for_dbi_fifo_empty(sender);
+
+ *(cb + (index++)) = dcs_pkg->cmd;
+ if (dcs_pkg->param_num) {
+ for (i = 0; i < dcs_pkg->param_num; i++)
+ *(cb + (index++)) = *(dcs_pkg->param + i);
+ }
+
+ REG_WRITE(dbi_cmd_len_reg, (1 + dcs_pkg->param_num));
+ REG_WRITE(dbi_cmd_addr_reg,
+ (cb_phy << CMD_MEM_ADDR_OFFSET)
+ | (1 << 0)
+ | ((dcs_pkg->data_src == CMD_DATA_SRC_PIPE) ? (1 << 1) : 0));
+
+ ret = dbi_cmd_sent(sender);
+ if (ret) {
+ dev_err(dev->dev, "command 0x%x not complete\n", dcs_pkg->cmd);
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int __send_short_pkg(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ struct drm_device *dev = sender->dev;
+ u32 hs_gen_ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+ u32 lp_gen_ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+ u32 gen_ctrl_val = 0;
+ struct mdfld_dsi_gen_short_pkg *short_pkg = &pkg->pkg.short_pkg;
+
+ gen_ctrl_val |= short_pkg->cmd << MCS_COMMANDS_POS;
+ gen_ctrl_val |= 0 << DCS_CHANNEL_NUMBER_POS;
+ gen_ctrl_val |= pkg->pkg_type;
+ gen_ctrl_val |= short_pkg->param << MCS_PARAMETER_POS;
+
+ if (pkg->transmission_type == MDFLD_DSI_HS_TRANSMISSION) {
+ /* wait for hs fifo empty */
+ /* wait_for_hs_fifos_empty(sender); */
+ /* Send pkg */
+ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
+ } else if (pkg->transmission_type == MDFLD_DSI_LP_TRANSMISSION) {
+ /* wait_for_lp_fifos_empty(sender); */
+ /* Send pkg*/
+ REG_WRITE(lp_gen_ctrl_reg, gen_ctrl_val);
+ } else {
+ dev_err(dev->dev, "Unknown transmission type %d\n",
+ pkg->transmission_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __send_long_pkg(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ struct drm_device *dev = sender->dev;
+ u32 hs_gen_ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+ u32 hs_gen_data_reg = sender->mipi_hs_gen_data_reg;
+ u32 lp_gen_ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+ u32 lp_gen_data_reg = sender->mipi_lp_gen_data_reg;
+ u32 gen_ctrl_val = 0;
+ u32 *dp;
+ int i;
+ struct mdfld_dsi_gen_long_pkg *long_pkg = &pkg->pkg.long_pkg;
+
+ dp = long_pkg->data;
+
+ /*
+ * Set up word count for long pkg
+ * FIXME: double check word count field.
+ * currently, using the byte counts of the payload as the word count.
+ * ------------------------------------------------------------
+ * | DI | WC | ECC| PAYLOAD |CHECKSUM|
+ * ------------------------------------------------------------
+ */
+ gen_ctrl_val |= (long_pkg->len << 2) << WORD_COUNTS_POS;
+ gen_ctrl_val |= 0 << DCS_CHANNEL_NUMBER_POS;
+ gen_ctrl_val |= pkg->pkg_type;
+
+ if (pkg->transmission_type == MDFLD_DSI_HS_TRANSMISSION) {
+ /* Wait for hs ctrl and data fifos to be empty */
+ /* wait_for_hs_fifos_empty(sender); */
+ for (i = 0; i < long_pkg->len; i++)
+ REG_WRITE(hs_gen_data_reg, *(dp + i));
+ REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
+ } else if (pkg->transmission_type == MDFLD_DSI_LP_TRANSMISSION) {
+ /* wait_for_lp_fifos_empty(sender); */
+ for (i = 0; i < long_pkg->len; i++)
+ REG_WRITE(lp_gen_data_reg, *(dp + i));
+ REG_WRITE(lp_gen_ctrl_reg, gen_ctrl_val);
+ } else {
+ dev_err(dev->dev, "Unknown transmission type %d\n",
+ pkg->transmission_type);
+ return -EINVAL;
+ }
+
+ return 0;
+
+}
+
+static int send_mcs_short_pkg(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ return __send_short_pkg(sender, pkg);
+}
+
+static int send_mcs_long_pkg(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ return __send_long_pkg(sender, pkg);
+}
+
+static int send_gen_short_pkg(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ return __send_short_pkg(sender, pkg);
+}
+
+static int send_gen_long_pkg(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ return __send_long_pkg(sender, pkg);
+}
+
+static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ u8 cmd;
+ u8 *data;
+
+ switch (pkg->pkg_type) {
+ case MDFLD_DSI_PKG_DCS:
+ cmd = pkg->pkg.dcs_pkg.cmd;
+ break;
+ case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
+ case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
+ cmd = pkg->pkg.short_pkg.cmd;
+ break;
+ case MDFLD_DSI_PKG_MCS_LONG_WRITE:
+ data = (u8 *)pkg->pkg.long_pkg.data;
+ cmd = *data;
+ break;
+ default:
+ return 0;
+ }
+
+ /* This prevents other package sending while doing msleep */
+ sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
+
+ /* Check panel mode v.s. sending command */
+ if ((sender->panel_mode & MDFLD_DSI_PANEL_MODE_SLEEP) &&
+ cmd != exit_sleep_mode) {
+ dev_err(sender->dev->dev,
+ "sending 0x%x when panel sleep in\n", cmd);
+ sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+ return -EINVAL;
+ }
+
+ /* Wait for 120 milliseconds in case exit_sleep_mode just be sent */
+ if (cmd == DCS_ENTER_SLEEP_MODE) {
+ /*TODO: replace it with msleep later*/
+ mdelay(120);
+ }
+ return 0;
+}
+
+static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ u8 cmd;
+ u8 *data;
+
+ switch (pkg->pkg_type) {
+ case MDFLD_DSI_PKG_DCS:
+ cmd = pkg->pkg.dcs_pkg.cmd;
+ break;
+ case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
+ case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
+ cmd = pkg->pkg.short_pkg.cmd;
+ break;
+ case MDFLD_DSI_PKG_MCS_LONG_WRITE:
+ data = (u8 *)pkg->pkg.long_pkg.data;
+ cmd = *data;
+ break;
+ default:
+ return 0;
+ }
+
+ /* Update panel status */
+ if (cmd == DCS_ENTER_SLEEP_MODE) {
+ sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
+ /*TODO: replace it with msleep later*/
+ mdelay(120);
+ } else if (cmd == DCS_EXIT_SLEEP_MODE) {
+ sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
+ /*TODO: replace it with msleep later*/
+ mdelay(120);
+ } else if (unlikely(cmd == DCS_SOFT_RESET)) {
+ /*TODO: replace it with msleep later*/
+ mdelay(5);
+ }
+ sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+ return 0;
+
+}
+
+static int do_send_pkg(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ int ret;
+
+ if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
+ dev_err(sender->dev->dev, "sender is busy\n");
+ return -EAGAIN;
+ }
+
+ ret = send_pkg_prepare(sender, pkg);
+ if (ret) {
+ dev_err(sender->dev->dev, "send_pkg_prepare error\n");
+ return ret;
+ }
+
+ switch (pkg->pkg_type) {
+ case MDFLD_DSI_PKG_DCS:
+ ret = send_dcs_pkg(sender, pkg);
+ break;
+ case MDFLD_DSI_PKG_GEN_SHORT_WRITE_0:
+ case MDFLD_DSI_PKG_GEN_SHORT_WRITE_1:
+ case MDFLD_DSI_PKG_GEN_SHORT_WRITE_2:
+ case MDFLD_DSI_PKG_GEN_READ_0:
+ case MDFLD_DSI_PKG_GEN_READ_1:
+ case MDFLD_DSI_PKG_GEN_READ_2:
+ ret = send_gen_short_pkg(sender, pkg);
+ break;
+ case MDFLD_DSI_PKG_GEN_LONG_WRITE:
+ ret = send_gen_long_pkg(sender, pkg);
+ break;
+ case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
+ case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
+ case MDFLD_DSI_PKG_MCS_READ:
+ ret = send_mcs_short_pkg(sender, pkg);
+ break;
+ case MDFLD_DSI_PKG_MCS_LONG_WRITE:
+ ret = send_mcs_long_pkg(sender, pkg);
+ break;
+ default:
+ dev_err(sender->dev->dev, "Invalid pkg type 0x%x\n",
+ pkg->pkg_type);
+ ret = -EINVAL;
+ }
+ send_pkg_done(sender, pkg);
+ return ret;
+}
+
+static int send_pkg(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ int err ;
+
+ /* Handle DSI error */
+ err = dsi_error_handler(sender);
+ if (err) {
+ dev_err(sender->dev->dev, "Error handling failed\n");
+ err = -EAGAIN;
+ goto send_pkg_err;
+ }
+
+ /* Send pkg */
+ err = do_send_pkg(sender, pkg);
+ if (err) {
+ dev_err(sender->dev->dev, "sent pkg failed\n");
+ err = -EAGAIN;
+ goto send_pkg_err;
+ }
+
+ /* FIXME: should I query complete and fifo empty here? */
+send_pkg_err:
+ return err;
+}
+
+static struct mdfld_dsi_pkg *pkg_sender_get_pkg_locked(
+ struct mdfld_dsi_pkg_sender *sender)
+{
+ struct mdfld_dsi_pkg *pkg;
+
+ if (list_empty(&sender->free_list)) {
+ dev_err(sender->dev->dev, "No free pkg left\n");
+ return NULL;
+ }
+ pkg = list_first_entry(&sender->free_list, struct mdfld_dsi_pkg, entry);
+ /* Detach from free list */
+ list_del_init(&pkg->entry);
+ return pkg;
+}
+
+static void pkg_sender_put_pkg_locked(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg)
+{
+ memset(pkg, 0, sizeof(struct mdfld_dsi_pkg));
+ INIT_LIST_HEAD(&pkg->entry);
+ list_add_tail(&pkg->entry, &sender->free_list);
+}
+
+static int mdfld_dbi_cb_init(struct mdfld_dsi_pkg_sender *sender,
+ struct psb_gtt *pg, int pipe)
+{
+ unsigned long phys;
+ void *virt_addr = NULL;
+
+ switch (pipe) {
+ case 0:
+ /* FIXME: Doesn't this collide with stolen space ? */
+ phys = pg->gtt_phys_start - 0x1000;
+ break;
+ case 2:
+ phys = pg->gtt_phys_start - 0x800;
+ break;
+ default:
+ dev_err(sender->dev->dev, "Unsupported channel %d\n", pipe);
+ return -EINVAL;
+ }
+
+ virt_addr = ioremap_nocache(phys, 0x800);
+ if (!virt_addr) {
+ dev_err(sender->dev->dev, "Map DBI command buffer error\n");
+ return -ENOMEM;
+ }
+ sender->dbi_cb_phy = phys;
+ sender->dbi_cb_addr = virt_addr;
+ return 0;
+}
+
+static void mdfld_dbi_cb_destroy(struct mdfld_dsi_pkg_sender *sender)
+{
+ if (sender && sender->dbi_cb_addr)
+ iounmap(sender->dbi_cb_addr);
+}
+
+static void pkg_sender_queue_pkg(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg,
+ int delay)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sender->lock, flags);
+
+ if (!delay) {
+ send_pkg(sender, pkg);
+ pkg_sender_put_pkg_locked(sender, pkg);
+ } else {
+ /* Queue it */
+ list_add_tail(&pkg->entry, &sender->pkg_list);
+ }
+ spin_unlock_irqrestore(&sender->lock, flags);
+}
+
+static void process_pkg_list(struct mdfld_dsi_pkg_sender *sender)
+{
+ struct mdfld_dsi_pkg *pkg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sender->lock, flags);
+
+ while (!list_empty(&sender->pkg_list)) {
+ pkg = list_first_entry(&sender->pkg_list,
+ struct mdfld_dsi_pkg, entry);
+ send_pkg(sender, pkg);
+ list_del_init(&pkg->entry);
+ pkg_sender_put_pkg_locked(sender, pkg);
+ }
+
+ spin_unlock_irqrestore(&sender->lock, flags);
+}
+
+static int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender,
+ u32 *data, u32 len, u8 transmission, int delay)
+{
+ struct mdfld_dsi_pkg *pkg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sender->lock, flags);
+ pkg = pkg_sender_get_pkg_locked(sender);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ if (!pkg) {
+ dev_err(sender->dev->dev, "No memory\n");
+ return -ENOMEM;
+ }
+ pkg->pkg_type = MDFLD_DSI_PKG_MCS_LONG_WRITE;
+ pkg->transmission_type = transmission;
+ pkg->pkg.long_pkg.data = data;
+ pkg->pkg.long_pkg.len = len;
+ INIT_LIST_HEAD(&pkg->entry);
+
+ pkg_sender_queue_pkg(sender, pkg, delay);
+ return 0;
+}
+
+static int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender,
+ u8 cmd, u8 param, u8 param_num,
+ u8 transmission,
+ int delay)
+{
+ struct mdfld_dsi_pkg *pkg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sender->lock, flags);
+ pkg = pkg_sender_get_pkg_locked(sender);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ if (!pkg) {
+ dev_err(sender->dev->dev, "No memory\n");
+ return -ENOMEM;
+ }
+
+ if (param_num) {
+ pkg->pkg_type = MDFLD_DSI_PKG_MCS_SHORT_WRITE_1;
+ pkg->pkg.short_pkg.param = param;
+ } else {
+ pkg->pkg_type = MDFLD_DSI_PKG_MCS_SHORT_WRITE_0;
+ pkg->pkg.short_pkg.param = 0;
+ }
+ pkg->transmission_type = transmission;
+ pkg->pkg.short_pkg.cmd = cmd;
+ INIT_LIST_HEAD(&pkg->entry);
+
+ pkg_sender_queue_pkg(sender, pkg, delay);
+ return 0;
+}
+
+static int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender,
+ u8 param0, u8 param1, u8 param_num,
+ u8 transmission,
+ int delay)
+{
+ struct mdfld_dsi_pkg *pkg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sender->lock, flags);
+ pkg = pkg_sender_get_pkg_locked(sender);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ if (!pkg) {
+ dev_err(sender->dev->dev, "No pkg memory\n");
+ return -ENOMEM;
+ }
+
+ switch (param_num) {
+ case 0:
+ pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_0;
+ pkg->pkg.short_pkg.cmd = 0;
+ pkg->pkg.short_pkg.param = 0;
+ break;
+ case 1:
+ pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_1;
+ pkg->pkg.short_pkg.cmd = param0;
+ pkg->pkg.short_pkg.param = 0;
+ break;
+ case 2:
+ pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_2;
+ pkg->pkg.short_pkg.cmd = param0;
+ pkg->pkg.short_pkg.param = param1;
+ break;
+ }
+
+ pkg->transmission_type = transmission;
+ INIT_LIST_HEAD(&pkg->entry);
+
+ pkg_sender_queue_pkg(sender, pkg, delay);
+ return 0;
+}
+
+static int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender,
+ u32 *data, u32 len, u8 transmission, int delay)
+{
+ struct mdfld_dsi_pkg *pkg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sender->lock, flags);
+ pkg = pkg_sender_get_pkg_locked(sender);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ if (!pkg) {
+ dev_err(sender->dev->dev, "No pkg memory\n");
+ return -ENOMEM;
+ }
+
+ pkg->pkg_type = MDFLD_DSI_PKG_GEN_LONG_WRITE;
+ pkg->transmission_type = transmission;
+ pkg->pkg.long_pkg.data = data;
+ pkg->pkg.long_pkg.len = len;
+
+ INIT_LIST_HEAD(&pkg->entry);
+
+ pkg_sender_queue_pkg(sender, pkg, delay);
+
+ return 0;
+}
+
+static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender,
+ struct mdfld_dsi_pkg *pkg,
+ u32 *data,
+ u16 len)
+{
+ unsigned long flags;
+ struct drm_device *dev = sender->dev;
+ int i;
+ u32 gen_data_reg;
+ int retry = MDFLD_DSI_READ_MAX_COUNT;
+ u8 transmission = pkg->transmission_type;
+
+ /*
+ * do reading.
+ * 0) send out generic read request
+ * 1) polling read data avail interrupt
+ * 2) read data
+ */
+ spin_lock_irqsave(&sender->lock, flags);
+
+ REG_WRITE(sender->mipi_intr_stat_reg, 1 << 29);
+
+ if ((REG_READ(sender->mipi_intr_stat_reg) & (1 << 29)))
+ DRM_ERROR("Can NOT clean read data valid interrupt\n");
+
+ /*send out read request*/
+ send_pkg(sender, pkg);
+
+ pkg_sender_put_pkg_locked(sender, pkg);
+
+ /*polling read data avail interrupt*/
+ while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & (1 << 29))) {
+ udelay(100);
+ retry--;
+ }
+
+ if (!retry) {
+ spin_unlock_irqrestore(&sender->lock, flags);
+ return -ETIMEDOUT;
+ }
+
+ REG_WRITE(sender->mipi_intr_stat_reg, (1 << 29));
+
+ /*read data*/
+ if (transmission == MDFLD_DSI_HS_TRANSMISSION)
+ gen_data_reg = sender->mipi_hs_gen_data_reg;
+ else if (transmission == MDFLD_DSI_LP_TRANSMISSION)
+ gen_data_reg = sender->mipi_lp_gen_data_reg;
+ else {
+ DRM_ERROR("Unknown transmission");
+ spin_unlock_irqrestore(&sender->lock, flags);
+ return -EINVAL;
+ }
+
+ for (i=0; i<len; i++)
+ *(data + i) = REG_READ(gen_data_reg);
+
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ return 0;
+}
+
+static int mdfld_dsi_read_gen(struct mdfld_dsi_pkg_sender *sender,
+ u8 param0,
+ u8 param1,
+ u8 param_num,
+ u32 *data,
+ u16 len,
+ u8 transmission)
+{
+ struct mdfld_dsi_pkg *pkg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sender->lock, flags);
+
+ pkg = pkg_sender_get_pkg_locked(sender);
+
+ spin_unlock_irqrestore(&sender->lock,flags);
+
+ if (!pkg) {
+ dev_err(sender->dev->dev, "No pkg memory\n");
+ return -ENOMEM;
+ }
+
+ switch (param_num) {
+ case 0:
+ pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_0;
+ pkg->pkg.short_pkg.cmd = 0;
+ pkg->pkg.short_pkg.param = 0;
+ break;
+ case 1:
+ pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_1;
+ pkg->pkg.short_pkg.cmd = param0;
+ pkg->pkg.short_pkg.param = 0;
+ break;
+ case 2:
+ pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_2;
+ pkg->pkg.short_pkg.cmd = param0;
+ pkg->pkg.short_pkg.param = param1;
+ break;
+ }
+
+ pkg->transmission_type = transmission;
+
+ INIT_LIST_HEAD(&pkg->entry);
+
+ return __read_panel_data(sender, pkg, data, len);
+}
+
+static int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender,
+ u8 cmd,
+ u32 *data,
+ u16 len,
+ u8 transmission)
+{
+ struct mdfld_dsi_pkg *pkg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sender->lock, flags);
+
+ pkg = pkg_sender_get_pkg_locked(sender);
+
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ if (!pkg) {
+ dev_err(sender->dev->dev, "No pkg memory\n");
+ return -ENOMEM;
+ }
+
+ pkg->pkg_type = MDFLD_DSI_PKG_MCS_READ;
+ pkg->pkg.short_pkg.cmd = cmd;
+ pkg->pkg.short_pkg.param = 0;
+
+ pkg->transmission_type = transmission;
+
+ INIT_LIST_HEAD(&pkg->entry);
+
+ return __read_panel_data(sender, pkg, data, len);
+}
+
+void dsi_controller_dbi_init(struct mdfld_dsi_config * dsi_config, int pipe)
+{
+ struct drm_device * dev = dsi_config->dev;
+ u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+ int lane_count = dsi_config->lane_count;
+ u32 val = 0;
+
+ /*un-ready device*/
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
+
+ /*init dsi adapter before kicking off*/
+ REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
+
+ /*TODO: figure out how to setup these registers*/
+ REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
+ REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), 0x000a0014);
+ REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
+ REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000001);
+ REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
+
+ /*enable all interrupts*/
+ REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
+ /*max value: 20 clock cycles of txclkesc*/
+ REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
+ /*min 21 txclkesc, max: ffffh*/
+ REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
+ /*min: 7d0 max: 4e20*/
+ REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
+
+ /*set up max return packet size*/
+ REG_WRITE((MIPIA_MAX_RETURN_PACK_SIZE_REG + reg_offset),
+ MDFLD_DSI_MAX_RETURN_PACKET_SIZE);
+
+ /*set up func_prg*/
+ val |= lane_count;
+ val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
+ val |= DSI_DBI_COLOR_FORMAT_OPTION2;
+ REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
+
+ REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
+ REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
+
+ REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
+ REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
+ REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
+}
+
+void dsi_controller_dpi_init(struct mdfld_dsi_config * dsi_config, int pipe)
+{
+ struct drm_device * dev = dsi_config->dev;
+ u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+ int lane_count = dsi_config->lane_count;
+ struct mdfld_dsi_dpi_timing dpi_timing;
+ struct drm_display_mode * mode = dsi_config->mode;
+ u32 val = 0;
+
+ /*un-ready device*/
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
+
+ /*init dsi adapter before kicking off*/
+ REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
+
+ /*enable all interrupts*/
+ REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
+
+ /*set up func_prg*/
+ val |= lane_count;
+ val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
+
+ switch(dsi_config->bpp) {
+ case 16:
+ val |= DSI_DPI_COLOR_FORMAT_RGB565;
+ break;
+ case 18:
+ val |= DSI_DPI_COLOR_FORMAT_RGB666;
+ break;
+ case 24:
+ val |= DSI_DPI_COLOR_FORMAT_RGB888;
+ break;
+ default:
+ DRM_ERROR("unsupported color format, bpp = %d\n", dsi_config->bpp);
+ }
+
+ REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
+
+ REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset),
+ (mode->vtotal * mode->htotal * dsi_config->bpp / (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
+ REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff & DSI_LP_RX_TIMEOUT_MASK);
+
+ /*max value: 20 clock cycles of txclkesc*/
+ REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
+
+ /*min 21 txclkesc, max: ffffh*/
+ REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0xffff & DSI_RESET_TIMER_MASK);
+
+ REG_WRITE((MIPIA_DPI_RESOLUTION_REG + reg_offset), mode->vdisplay << 16 | mode->hdisplay);
+
+ /*set DPI timing registers*/
+ mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp);
+
+ REG_WRITE((MIPIA_HSYNC_COUNT_REG + reg_offset), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_HBP_COUNT_REG + reg_offset), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_HFP_COUNT_REG + reg_offset), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_HACTIVE_COUNT_REG + reg_offset), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_VSYNC_COUNT_REG + reg_offset), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_VBP_COUNT_REG + reg_offset), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
+ REG_WRITE((MIPIA_VFP_COUNT_REG + reg_offset), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
+
+ REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
+
+ /*min: 7d0 max: 4e20*/
+ REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x000007d0);
+
+ /*set up video mode*/
+ val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
+ REG_WRITE((MIPIA_VIDEO_MODE_FORMAT_REG + reg_offset), val);
+
+ REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
+
+ REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
+
+ /*TODO: figure out how to setup these registers*/
+ REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
+
+ REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), (0xa << 16) | 0x14);
+
+ /*set device ready*/
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
+}
+
+static void dsi_controller_init(struct mdfld_dsi_config * dsi_config, int pipe)
+{
+ if (!dsi_config || ((pipe != 0) && (pipe != 2))) {
+ DRM_ERROR("Invalid parameters\n");
+ return;
+ }
+
+ if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
+ dsi_controller_dpi_init(dsi_config, pipe);
+ else if (dsi_config->type == MDFLD_DSI_ENCODER_DBI)
+ dsi_controller_dbi_init(dsi_config, pipe);
+ else
+ DRM_ERROR("Bad DSI encoder type\n");
+}
+
+void mdfld_dsi_cmds_kick_out(struct mdfld_dsi_pkg_sender *sender)
+{
+ process_pkg_list(sender);
+}
+
+int mdfld_dsi_send_dcs(struct mdfld_dsi_pkg_sender *sender,
+ u8 dcs, u8 *param, u32 param_num, u8 data_src,
+ int delay)
+{
+ struct mdfld_dsi_pkg *pkg;
+ u32 cb_phy = sender->dbi_cb_phy;
+ struct drm_device *dev = sender->dev;
+ u32 index = 0;
+ u8 *cb = (u8 *)sender->dbi_cb_addr;
+ unsigned long flags;
+ int retry;
+ u8 *dst = NULL;
+ u32 len;
+
+ if (!sender) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (!sender->dbi_pkg_support) {
+ dev_err(dev->dev, "No DBI pkg sending on this sender\n");
+ return -ENOTSUPP;
+ }
+
+ if (param_num > MDFLD_MAX_DCS_PARAM) {
+ dev_err(dev->dev, "Sender only supports up to %d DCS params\n",
+ MDFLD_MAX_DCS_PARAM);
+ return -EINVAL;
+ }
+
+ /*
+ * If dcs is write_mem_start, send it directly using DSI adapter
+ * interface
+ */
+ if (dcs == DCS_WRITE_MEM_START) {
+ if (!spin_trylock(&sender->lock))
+ return -EAGAIN;
+
+ /*
+ * query whether DBI FIFO is empty,
+ * if not wait it becoming empty
+ */
+ retry = MDFLD_DSI_DBI_FIFO_TIMEOUT;
+ while (retry &&
+ !(REG_READ(sender->mipi_gen_fifo_stat_reg) & (1 << 27))) {
+ udelay(500);
+ retry--;
+ }
+
+ /* If DBI FIFO timeout, drop this frame */
+ if (!retry) {
+ spin_unlock(&sender->lock);
+ return 0;
+ }
+
+ *(cb + (index++)) = write_mem_start;
+
+ REG_WRITE(sender->mipi_cmd_len_reg, 1);
+ REG_WRITE(sender->mipi_cmd_addr_reg,
+ cb_phy | (1 << 0) | (1 << 1));
+
+ retry = MDFLD_DSI_DBI_FIFO_TIMEOUT;
+ while (retry &&
+ (REG_READ(sender->mipi_cmd_addr_reg) & (1 << 0))) {
+ udelay(1);
+ retry--;
+ }
+
+ spin_unlock(&sender->lock);
+ return 0;
+ }
+
+ /* Get a free pkg */
+ spin_lock_irqsave(&sender->lock, flags);
+ pkg = pkg_sender_get_pkg_locked(sender);
+ spin_unlock_irqrestore(&sender->lock, flags);
+
+ if (!pkg) {
+ dev_err(dev->dev, "No packages memory\n");
+ return -ENOMEM;
+ }
+
+ dst = pkg->pkg.dcs_pkg.param;
+ memcpy(dst, param, param_num);
+
+ pkg->pkg_type = MDFLD_DSI_PKG_DCS;
+ pkg->transmission_type = MDFLD_DSI_DCS;
+ pkg->pkg.dcs_pkg.cmd = dcs;
+ pkg->pkg.dcs_pkg.param_num = param_num;
+ pkg->pkg.dcs_pkg.data_src = data_src;
+
+ INIT_LIST_HEAD(&pkg->entry);
+
+ if (param_num == 0)
+ return mdfld_dsi_send_mcs_short_hs(sender, dcs, 0, 0, delay);
+ else if (param_num == 1)
+ return mdfld_dsi_send_mcs_short_hs(sender, dcs,
+ param[0], 1, delay);
+ else if (param_num > 1) {
+ len = (param_num + 1) / 4;
+ if ((param_num + 1) % 4)
+ len++;
+ return mdfld_dsi_send_mcs_long_hs(sender,
+ (u32 *)&pkg->pkg.dcs_pkg, len, delay);
+ }
+ return 0;
+}
+
+int mdfld_dsi_send_mcs_short_hs(struct mdfld_dsi_pkg_sender *sender,
+ u8 cmd, u8 param, u8 param_num, int delay)
+{
+ if (!sender) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return mdfld_dsi_send_mcs_short(sender, cmd, param, param_num,
+ MDFLD_DSI_HS_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_mcs_short_lp(struct mdfld_dsi_pkg_sender *sender,
+ u8 cmd, u8 param, u8 param_num, int delay)
+{
+ if (!sender) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return mdfld_dsi_send_mcs_short(sender, cmd, param, param_num,
+ MDFLD_DSI_LP_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_mcs_long_hs(struct mdfld_dsi_pkg_sender *sender,
+ u32 *data,
+ u32 len,
+ int delay)
+{
+ if (!sender || !data || !len) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+ return mdfld_dsi_send_mcs_long(sender, data, len,
+ MDFLD_DSI_HS_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_mcs_long_lp(struct mdfld_dsi_pkg_sender *sender,
+ u32 *data,
+ u32 len,
+ int delay)
+{
+ if (!sender || !data || !len) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return mdfld_dsi_send_mcs_long(sender, data, len,
+ MDFLD_DSI_LP_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_gen_short_hs(struct mdfld_dsi_pkg_sender *sender,
+ u8 param0, u8 param1, u8 param_num, int delay)
+{
+ if (!sender) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return mdfld_dsi_send_gen_short(sender, param0, param1, param_num,
+ MDFLD_DSI_HS_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_gen_short_lp(struct mdfld_dsi_pkg_sender *sender,
+ u8 param0, u8 param1, u8 param_num, int delay)
+{
+ if (!sender || param_num < 0 || param_num > 2) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return mdfld_dsi_send_gen_short(sender, param0, param1, param_num,
+ MDFLD_DSI_LP_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_gen_long_hs(struct mdfld_dsi_pkg_sender *sender,
+ u32 *data,
+ u32 len,
+ int delay)
+{
+ if (!sender || !data || !len) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return mdfld_dsi_send_gen_long(sender, data, len,
+ MDFLD_DSI_HS_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_send_gen_long_lp(struct mdfld_dsi_pkg_sender *sender,
+ u32 *data,
+ u32 len,
+ int delay)
+{
+ if (!sender || !data || !len) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return mdfld_dsi_send_gen_long(sender, data, len,
+ MDFLD_DSI_LP_TRANSMISSION, delay);
+}
+
+int mdfld_dsi_read_gen_hs(struct mdfld_dsi_pkg_sender *sender,
+ u8 param0,
+ u8 param1,
+ u8 param_num,
+ u32 *data,
+ u16 len)
+{
+ if (!sender || !data || param_num < 0 || param_num > 2
+ || !data || !len) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ return mdfld_dsi_read_gen(sender, param0, param1, param_num,
+ data, len, MDFLD_DSI_HS_TRANSMISSION);
+
+}
+
+int mdfld_dsi_read_gen_lp(struct mdfld_dsi_pkg_sender *sender,
+ u8 param0,
+ u8 param1,
+ u8 param_num,
+ u32 *data,
+ u16 len)
+{
+ if (!sender || !data || param_num < 0 || param_num > 2
+ || !data || !len) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ return mdfld_dsi_read_gen(sender, param0, param1, param_num,
+ data, len, MDFLD_DSI_LP_TRANSMISSION);
+}
+
+int mdfld_dsi_read_mcs_hs(struct mdfld_dsi_pkg_sender *sender,
+ u8 cmd,
+ u32 *data,
+ u16 len)
+{
+ if (!sender || !data || !len) {
+ DRM_ERROR("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ return mdfld_dsi_read_mcs(sender, cmd, data, len,
+ MDFLD_DSI_HS_TRANSMISSION);
+}
+
+int mdfld_dsi_read_mcs_lp(struct mdfld_dsi_pkg_sender *sender,
+ u8 cmd,
+ u32 *data,
+ u16 len)
+{
+ if (!sender || !data || !len) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return mdfld_dsi_read_mcs(sender, cmd, data, len,
+ MDFLD_DSI_LP_TRANSMISSION);
+}
+
+int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+ int pipe)
+{
+ int ret;
+ struct mdfld_dsi_pkg_sender *pkg_sender;
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_get_config(dsi_connector);
+ struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_gtt *pg = &dev_priv->gtt;
+ int i;
+ struct mdfld_dsi_pkg *pkg, *tmp;
+ u32 mipi_val = 0;
+
+ if (!dsi_connector) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ pkg_sender = dsi_connector->pkg_sender;
+
+ if (!pkg_sender || IS_ERR(pkg_sender)) {
+ pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
+ GFP_KERNEL);
+ if (!pkg_sender) {
+ dev_err(dev->dev, "Create DSI pkg sender failed\n");
+ return -ENOMEM;
+ }
+
+ dsi_connector->pkg_sender = (void *)pkg_sender;
+ }
+
+ pkg_sender->dev = dev;
+ pkg_sender->dsi_connector = dsi_connector;
+ pkg_sender->pipe = pipe;
+ pkg_sender->pkg_num = 0;
+ pkg_sender->panel_mode = 0;
+ pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+
+ /* Init dbi command buffer*/
+
+ if (dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
+ pkg_sender->dbi_pkg_support = 1;
+ ret = mdfld_dbi_cb_init(pkg_sender, pg, pipe);
+ if (ret) {
+ dev_err(dev->dev, "DBI command buffer map failed\n");
+ goto mapping_err;
+ }
+ }
+
+ /* Init regs */
+ if (pipe == 0) {
+ pkg_sender->dpll_reg = MRST_DPLL_A;
+ pkg_sender->dspcntr_reg = DSPACNTR;
+ pkg_sender->pipeconf_reg = PIPEACONF;
+ pkg_sender->dsplinoff_reg = DSPALINOFF;
+ pkg_sender->dspsurf_reg = DSPASURF;
+ pkg_sender->pipestat_reg = PIPEASTAT;
+
+ pkg_sender->mipi_intr_stat_reg = MIPIA_INTR_STAT_REG;
+ pkg_sender->mipi_lp_gen_data_reg = MIPIA_LP_GEN_DATA_REG;
+ pkg_sender->mipi_hs_gen_data_reg = MIPIA_HS_GEN_DATA_REG;
+ pkg_sender->mipi_lp_gen_ctrl_reg = MIPIA_LP_GEN_CTRL_REG;
+ pkg_sender->mipi_hs_gen_ctrl_reg = MIPIA_HS_GEN_CTRL_REG;
+ pkg_sender->mipi_gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
+ pkg_sender->mipi_data_addr_reg = MIPIA_DATA_ADD_REG;
+ pkg_sender->mipi_data_len_reg = MIPIA_DATA_LEN_REG;
+ pkg_sender->mipi_cmd_addr_reg = MIPIA_CMD_ADD_REG;
+ pkg_sender->mipi_cmd_len_reg = MIPIA_CMD_LEN_REG;
+ } else if (pipe == 2) {
+ pkg_sender->dpll_reg = MRST_DPLL_A;
+ pkg_sender->dspcntr_reg = DSPCCNTR;
+ pkg_sender->pipeconf_reg = PIPECCONF;
+ pkg_sender->dsplinoff_reg = DSPCLINOFF;
+ pkg_sender->dspsurf_reg = DSPCSURF;
+ pkg_sender->pipestat_reg = PIPECSTAT;
+
+ pkg_sender->mipi_intr_stat_reg =
+ MIPIA_INTR_STAT_REG + MIPIC_REG_OFFSET;
+ pkg_sender->mipi_lp_gen_data_reg =
+ MIPIA_LP_GEN_DATA_REG + MIPIC_REG_OFFSET;
+ pkg_sender->mipi_hs_gen_data_reg =
+ MIPIA_HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
+ pkg_sender->mipi_lp_gen_ctrl_reg =
+ MIPIA_LP_GEN_CTRL_REG + MIPIC_REG_OFFSET;
+ pkg_sender->mipi_hs_gen_ctrl_reg =
+ MIPIA_HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
+ pkg_sender->mipi_gen_fifo_stat_reg =
+ MIPIA_GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+ pkg_sender->mipi_data_addr_reg =
+ MIPIA_DATA_ADD_REG + MIPIC_REG_OFFSET;
+ pkg_sender->mipi_data_len_reg =
+ MIPIA_DATA_LEN_REG + MIPIC_REG_OFFSET;
+ pkg_sender->mipi_cmd_addr_reg =
+ MIPIA_CMD_ADD_REG + MIPIC_REG_OFFSET;
+ pkg_sender->mipi_cmd_len_reg =
+ MIPIA_CMD_LEN_REG + MIPIC_REG_OFFSET;
+ }
+
+ /* Init pkg list */
+ INIT_LIST_HEAD(&pkg_sender->pkg_list);
+ INIT_LIST_HEAD(&pkg_sender->free_list);
+
+ spin_lock_init(&pkg_sender->lock);
+
+ /* Allocate free pkg pool */
+ for (i = 0; i < MDFLD_MAX_PKG_NUM; i++) {
+ pkg = kzalloc(sizeof(struct mdfld_dsi_pkg), GFP_KERNEL);
+ if (!pkg) {
+ dev_err(dev->dev, "Out of memory allocating pkg pool");
+ ret = -ENOMEM;
+ goto pkg_alloc_err;
+ }
+ INIT_LIST_HEAD(&pkg->entry);
+ list_add_tail(&pkg->entry, &pkg_sender->free_list);
+ }
+
+ /*
+ * For video mode, don't enable DPI timing output here,
+ * will init the DPI timing output during mode setting.
+ */
+ if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
+ mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+ else if (dsi_config->type == MDFLD_DSI_ENCODER_DBI)
+ mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX
+ | TE_TRIGGER_GPIO_PIN;
+ else
+ DRM_ERROR("Bad DSI encoder type\n");
+
+ if (pipe == 0) {
+ mipi_val |= 0x2;
+ REG_WRITE(MIPI, mipi_val);
+ REG_READ(MIPI);
+ } else if (pipe == 2) {
+ REG_WRITE(MIPI_C, mipi_val);
+ REG_READ(MIPI_C);
+ }
+
+ /*do dsi controller init*/
+ dsi_controller_init(dsi_config, pipe);
+
+ return 0;
+
+pkg_alloc_err:
+ list_for_each_entry_safe(pkg, tmp, &pkg_sender->free_list, entry) {
+ list_del(&pkg->entry);
+ kfree(pkg);
+ }
+
+ /* Free mapped command buffer */
+ mdfld_dbi_cb_destroy(pkg_sender);
+mapping_err:
+ kfree(pkg_sender);
+ dsi_connector->pkg_sender = NULL;
+ return ret;
+}
+
+void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
+{
+ struct mdfld_dsi_pkg *pkg, *tmp;
+
+ if (!sender || IS_ERR(sender))
+ return;
+
+ /* Free pkg pool */
+ list_for_each_entry_safe(pkg, tmp, &sender->free_list, entry) {
+ list_del(&pkg->entry);
+ kfree(pkg);
+ }
+ /* Free pkg list */
+ list_for_each_entry_safe(pkg, tmp, &sender->pkg_list, entry) {
+ list_del(&pkg->entry);
+ kfree(pkg);
+ }
+ mdfld_dbi_cb_destroy(sender); /* free mapped command buffer */
+ kfree(sender);
+}
diff --git a/drivers/staging/gma500/mdfld_dsi_pkg_sender.h b/drivers/staging/gma500/mdfld_dsi_pkg_sender.h
new file mode 100644
index 00000000000..f24abc70068
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_dsi_pkg_sender.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jackie Li<yaodong.li@intel.com>
+ */
+#ifndef __MDFLD_DSI_PKG_SENDER_H__
+#define __MDFLD_DSI_PKG_SENDER_H__
+
+#include <linux/kthread.h>
+
+#define MDFLD_MAX_DCS_PARAM 8
+#define MDFLD_MAX_PKG_NUM 2048
+
+enum {
+ MDFLD_DSI_PKG_DCS,
+ MDFLD_DSI_PKG_GEN_SHORT_WRITE_0 = 0x03,
+ MDFLD_DSI_PKG_GEN_SHORT_WRITE_1 = 0x13,
+ MDFLD_DSI_PKG_GEN_SHORT_WRITE_2 = 0x23,
+ MDFLD_DSI_PKG_GEN_READ_0 = 0x04,
+ MDFLD_DSI_PKG_GEN_READ_1 = 0x14,
+ MDFLD_DSI_PKG_GEN_READ_2 = 0x24,
+ MDFLD_DSI_PKG_GEN_LONG_WRITE = 0x29,
+ MDFLD_DSI_PKG_MCS_SHORT_WRITE_0 = 0x05,
+ MDFLD_DSI_PKG_MCS_SHORT_WRITE_1 = 0x15,
+ MDFLD_DSI_PKG_MCS_READ = 0x06,
+ MDFLD_DSI_PKG_MCS_LONG_WRITE = 0x39,
+};
+
+enum {
+ MDFLD_DSI_LP_TRANSMISSION,
+ MDFLD_DSI_HS_TRANSMISSION,
+ MDFLD_DSI_DCS,
+};
+
+enum {
+ MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
+};
+
+enum {
+ MDFLD_DSI_PKG_SENDER_FREE = 0x0,
+ MDFLD_DSI_PKG_SENDER_BUSY = 0x1,
+};
+
+enum {
+ MDFLD_DSI_SEND_PACKAGE,
+ MDFLD_DSI_QUEUE_PACKAGE,
+};
+
+struct mdfld_dsi_gen_short_pkg {
+ u8 cmd;
+ u8 param;
+};
+
+struct mdfld_dsi_gen_long_pkg {
+ u32 *data;
+ u32 len;
+};
+
+struct mdfld_dsi_dcs_pkg {
+ u8 cmd;
+ u8 param[MDFLD_MAX_DCS_PARAM];
+ u32 param_num;
+ u8 data_src;
+};
+
+struct mdfld_dsi_pkg {
+ u8 pkg_type;
+ u8 transmission_type;
+
+ union {
+ struct mdfld_dsi_gen_short_pkg short_pkg;
+ struct mdfld_dsi_gen_long_pkg long_pkg;
+ struct mdfld_dsi_dcs_pkg dcs_pkg;
+ } pkg;
+
+ struct list_head entry;
+};
+
+struct mdfld_dsi_pkg_sender {
+ struct drm_device *dev;
+ struct mdfld_dsi_connector *dsi_connector;
+ u32 status;
+
+ u32 panel_mode;
+
+ int pipe;
+
+ spinlock_t lock;
+ struct list_head pkg_list;
+ struct list_head free_list;
+
+ u32 pkg_num;
+
+ int dbi_pkg_support;
+
+ u32 dbi_cb_phy;
+ void *dbi_cb_addr;
+
+ /* Registers */
+ u32 dpll_reg;
+ u32 dspcntr_reg;
+ u32 pipeconf_reg;
+ u32 pipestat_reg;
+ u32 dsplinoff_reg;
+ u32 dspsurf_reg;
+
+ u32 mipi_intr_stat_reg;
+ u32 mipi_lp_gen_data_reg;
+ u32 mipi_hs_gen_data_reg;
+ u32 mipi_lp_gen_ctrl_reg;
+ u32 mipi_hs_gen_ctrl_reg;
+ u32 mipi_gen_fifo_stat_reg;
+ u32 mipi_data_addr_reg;
+ u32 mipi_data_len_reg;
+ u32 mipi_cmd_addr_reg;
+ u32 mipi_cmd_len_reg;
+};
+
+/* DCS definitions */
+#define DCS_SOFT_RESET 0x01
+#define DCS_ENTER_SLEEP_MODE 0x10
+#define DCS_EXIT_SLEEP_MODE 0x11
+#define DCS_SET_DISPLAY_OFF 0x28
+#define DCS_SET_DISPLAY_ON 0x29
+#define DCS_SET_COLUMN_ADDRESS 0x2a
+#define DCS_SET_PAGE_ADDRESS 0x2b
+#define DCS_WRITE_MEM_START 0x2c
+#define DCS_SET_TEAR_OFF 0x34
+#define DCS_SET_TEAR_ON 0x35
+
+extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+ int pipe);
+extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
+extern int mdfld_dsi_send_dcs(struct mdfld_dsi_pkg_sender *sender, u8 dcs,
+ u8 *param, u32 param_num, u8 data_src, int delay);
+extern int mdfld_dsi_send_mcs_short_hs(struct mdfld_dsi_pkg_sender *sender,
+ u8 cmd, u8 param, u8 param_num, int delay);
+extern int mdfld_dsi_send_mcs_short_lp(struct mdfld_dsi_pkg_sender *sender,
+ u8 cmd, u8 param, u8 param_num, int delay);
+extern int mdfld_dsi_send_mcs_long_hs(struct mdfld_dsi_pkg_sender *sender,
+ u32 *data, u32 len, int delay);
+extern int mdfld_dsi_send_mcs_long_lp(struct mdfld_dsi_pkg_sender *sender,
+ u32 *data, u32 len, int delay);
+extern int mdfld_dsi_send_gen_short_hs(struct mdfld_dsi_pkg_sender *sender,
+ u8 param0, u8 param1, u8 param_num, int delay);
+extern int mdfld_dsi_send_gen_short_lp(struct mdfld_dsi_pkg_sender *sender,
+ u8 param0, u8 param1, u8 param_num, int delay);
+extern int mdfld_dsi_send_gen_long_hs(struct mdfld_dsi_pkg_sender *sender,
+ u32 *data, u32 len, int delay);
+extern int mdfld_dsi_send_gen_long_lp(struct mdfld_dsi_pkg_sender *sender,
+ u32 *data, u32 len, int delay);
+
+extern int mdfld_dsi_read_gen_hs(struct mdfld_dsi_pkg_sender *sender,
+ u8 param0, u8 param1, u8 param_num, u32 *data, u16 len);
+extern int mdfld_dsi_read_gen_lp(struct mdfld_dsi_pkg_sender *sender,
+ u8 param0, u8 param1, u8 param_num, u32 *data, u16 len);
+extern int mdfld_dsi_read_mcs_hs(struct mdfld_dsi_pkg_sender *sender,
+ u8 cmd, u32 *data, u16 len);
+extern int mdfld_dsi_read_mcs_lp(struct mdfld_dsi_pkg_sender *sender,
+ u8 cmd, u32 *data, u16 len);
+
+extern void mdfld_dsi_cmds_kick_out(struct mdfld_dsi_pkg_sender *sender);
+
+#endif /* __MDFLD_DSI_PKG_SENDER_H__ */
diff --git a/drivers/staging/gma500/mdfld_intel_display.c b/drivers/staging/gma500/mdfld_intel_display.c
new file mode 100644
index 00000000000..aa2ff559383
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_intel_display.c
@@ -0,0 +1,1402 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#include "framebuffer.h"
+#include "psb_intel_display.h"
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_dbi_dpu.h"
+
+#include <linux/pm_runtime.h>
+
+#ifdef MIN
+#undef MIN
+#endif
+
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+
+/* Hardcoded currently */
+static int ksel = KSEL_CRYSTAL_19;
+
+extern void mdfld_save_display(struct drm_device *dev);
+extern bool gbgfxsuspended;
+
+struct psb_intel_range_t {
+ int min, max;
+};
+
+struct mdfld_limit_t {
+ struct psb_intel_range_t dot, m, p1;
+};
+
+struct mdfld_intel_clock_t {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+};
+
+
+
+#define COUNT_MAX 0x10000000
+
+void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
+{
+ int count, temp;
+ u32 pipeconf_reg = PIPEACONF;
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ pipeconf_reg = PIPEBCONF;
+ break;
+ case 2:
+ pipeconf_reg = PIPECCONF;
+ break;
+ default:
+ DRM_ERROR("Illegal Pipe Number. \n");
+ return;
+ }
+
+ /* FIXME JLIU7_PO */
+ psb_intel_wait_for_vblank(dev);
+ return;
+
+ /* Wait for for the pipe disable to take effect. */
+ for (count = 0; count < COUNT_MAX; count++) {
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_PIPE_STATE) == 0)
+ break;
+ }
+}
+
+void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
+{
+ int count, temp;
+ u32 pipeconf_reg = PIPEACONF;
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ pipeconf_reg = PIPEBCONF;
+ break;
+ case 2:
+ pipeconf_reg = PIPECCONF;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
+ return;
+ }
+
+ /* FIXME JLIU7_PO */
+ psb_intel_wait_for_vblank(dev);
+ return;
+
+ /* Wait for for the pipe enable to take effect. */
+ for (count = 0; count < COUNT_MAX; count++) {
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_PIPE_STATE) == 1)
+ break;
+ }
+}
+
+
+static int mdfld_intel_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t control = CURACNTR;
+ uint32_t base = CURABASE;
+ uint32_t temp;
+ size_t addr = 0;
+ struct gtt_range *gt;
+ struct drm_gem_object *obj;
+ int ret;
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ control = CURBCNTR;
+ base = CURBBASE;
+ break;
+ case 2:
+ control = CURCCNTR;
+ base = CURCBASE;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number. \n");
+ return -EINVAL;
+ }
+
+#if 1 /* FIXME_JLIU7 can't enalbe cursorB/C HW issue. need to remove after HW fix */
+ if (pipe != 0)
+ return 0;
+#endif
+ /* if we want to turn of the cursor ignore width and height */
+ if (!handle) {
+ dev_dbg(dev->dev, "cursor off\n");
+ /* turn off the cursor */
+ temp = 0;
+ temp |= CURSOR_MODE_DISABLE;
+
+ if (gma_power_begin(dev, true)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, 0);
+ gma_power_end(dev);
+ }
+ /* Unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = NULL;
+ }
+ return 0;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ DRM_ERROR("we currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (obj->size < width * height * 4) {
+ dev_dbg(dev->dev, "buffer is to small\n");
+ return -ENOMEM;
+ }
+
+ gt = container_of(obj, struct gtt_range, gem);
+
+ /* Pin the memory into the GTT */
+ ret = psb_gtt_pin(gt);
+ if (ret) {
+ dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+ return ret;
+ }
+
+
+ addr = gt->offset; /* Or resource.start ??? */
+
+ psb_intel_crtc->cursor_addr = addr;
+
+ temp = 0;
+ /* set the pipe for the cursor */
+ temp |= (pipe << 28);
+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+ if (gma_power_begin(dev, true)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, addr);
+ gma_power_end(dev);
+ }
+ /* unpin the old GEM object */
+ if (psb_intel_crtc->cursor_obj) {
+ gt = container_of(psb_intel_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+ psb_intel_crtc->cursor_obj = obj;
+ }
+ return 0;
+}
+
+static int mdfld_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
+ struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+ struct psb_drm_dpu_rect rect;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ uint32_t pos = CURAPOS;
+ uint32_t base = CURABASE;
+ uint32_t temp = 0;
+ uint32_t addr;
+
+ switch (pipe) {
+ case 0:
+ if (dpu_info) {
+ rect.x = x;
+ rect.y = y;
+
+ mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORA, &rect);
+ mdfld_dpu_exit_dsr(dev);
+ } else if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_0))
+ mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_CURSOR_0);
+ break;
+ case 1:
+ pos = CURBPOS;
+ base = CURBBASE;
+ break;
+ case 2:
+ if (dpu_info) {
+ mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORC, &rect);
+ mdfld_dpu_exit_dsr(dev);
+ } else if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_2))
+ mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_CURSOR_2);
+ pos = CURCPOS;
+ base = CURCBASE;
+ break;
+ default:
+ DRM_ERROR("Illegal Pipe Number. \n");
+ return -EINVAL;
+ }
+
+#if 1 /* FIXME_JLIU7 can't enalbe cursorB/C HW issue. need to remove after HW fix */
+ if (pipe != 0)
+ return 0;
+#endif
+ if (x < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+ x = -x;
+ }
+ if (y < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+ y = -y;
+ }
+
+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+ addr = psb_intel_crtc->cursor_addr;
+
+ if (gma_power_begin(dev, true)) {
+ REG_WRITE(pos, temp);
+ REG_WRITE(base, addr);
+ gma_power_end(dev);
+ }
+
+ return 0;
+}
+
+const struct drm_crtc_funcs mdfld_intel_crtc_funcs = {
+ .cursor_set = mdfld_intel_crtc_cursor_set,
+ .cursor_move = mdfld_intel_crtc_cursor_move,
+ .gamma_set = psb_intel_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = psb_intel_crtc_destroy,
+};
+
+static struct drm_device globle_dev;
+
+void mdfld__intel_plane_set_alpha(int enable)
+{
+ struct drm_device *dev = &globle_dev;
+ int dspcntr_reg = DSPACNTR;
+ u32 dspcntr;
+
+ dspcntr = REG_READ(dspcntr_reg);
+
+ if (enable) {
+ dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
+ dspcntr |= DISPPLANE_32BPP;
+ } else {
+ dspcntr &= ~DISPPLANE_32BPP;
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ }
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+}
+
+int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ /* struct drm_i915_master_private *master_priv; */
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ int pipe = psb_intel_crtc->pipe;
+ unsigned long start, offset;
+ int dsplinoff = DSPALINOFF;
+ int dspsurf = DSPASURF;
+ int dspstride = DSPASTRIDE;
+ int dspcntr_reg = DSPACNTR;
+ u32 dspcntr;
+ int ret = 0;
+
+ memcpy(&globle_dev, dev, sizeof(struct drm_device));
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ dev_err(dev->dev, "No FB bound\n");
+ goto psb_intel_pipe_cleaner;
+ }
+
+ switch (pipe) {
+ case 0:
+ dsplinoff = DSPALINOFF;
+ break;
+ case 1:
+ dsplinoff = DSPBLINOFF;
+ dspsurf = DSPBSURF;
+ dspstride = DSPBSTRIDE;
+ dspcntr_reg = DSPBCNTR;
+ break;
+ case 2:
+ dsplinoff = DSPCLINOFF;
+ dspsurf = DSPCSURF;
+ dspstride = DSPCSTRIDE;
+ dspcntr_reg = DSPCCNTR;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
+ return -EINVAL;
+ }
+
+ ret = psb_gtt_pin(psbfb->gtt);
+ if (ret < 0)
+ goto psb_intel_pipe_set_base_exit;
+
+ start = psbfb->gtt->offset;
+ offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+
+ REG_WRITE(dspstride, crtc->fb->pitch);
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ goto psb_intel_pipe_set_base_exit;
+ }
+ REG_WRITE(dspcntr_reg, dspcntr);
+
+ dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
+ start, offset, x, y);
+
+ REG_WRITE(dsplinoff, offset);
+ REG_READ(dsplinoff);
+ REG_WRITE(dspsurf, start);
+ REG_READ(dspsurf);
+
+psb_intel_pipe_cleaner:
+ /* If there was a previous display we can now unpin it */
+ if (old_fb)
+ psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+psb_intel_pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+/**
+ * Disable the pipe, plane and pll.
+ *
+ */
+void mdfld_disable_crtc (struct drm_device *dev, int pipe)
+{
+ int dpll_reg = MRST_DPLL_A;
+ int dspcntr_reg = DSPACNTR;
+ int dspbase_reg = MRST_DSPABASE;
+ int pipeconf_reg = PIPEACONF;
+ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
+ u32 temp;
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ dpll_reg = MDFLD_DPLL_B;
+ dspcntr_reg = DSPBCNTR;
+ dspbase_reg = DSPBSURF;
+ pipeconf_reg = PIPEBCONF;
+ break;
+ case 2:
+ dpll_reg = MRST_DPLL_A;
+ dspcntr_reg = DSPCCNTR;
+ dspbase_reg = MDFLD_DSPCBASE;
+ pipeconf_reg = PIPECCONF;
+ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number. \n");
+ return;
+ }
+
+ if (pipe != 1)
+ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+
+ /* Disable display plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+ }
+
+ /* FIXME_JLIU7 MDFLD_PO revisit */
+ /* Wait for vblank for the disable to take effect */
+// MDFLD_PO_JLIU7 psb_intel_wait_for_vblank(dev);
+
+ /* Next, disable display pipes */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ temp &= ~PIPEACONF_ENABLE;
+ temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+ REG_WRITE(pipeconf_reg, temp);
+ REG_READ(pipeconf_reg);
+
+ /* Wait for for the pipe disable to take effect. */
+ mdfldWaitForPipeDisable(dev, pipe);
+ }
+
+ temp = REG_READ(dpll_reg);
+ if (temp & DPLL_VCO_ENABLE) {
+ if (((pipe != 1) && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
+ || (pipe == 1)){
+ temp &= ~(DPLL_VCO_ENABLE);
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to turn off. */
+ /* FIXME_MDFLD PO may need more delay */
+ udelay(500);
+
+ if (!(temp & MDFLD_PWR_GATE_EN)) {
+ /* gating power of DPLL */
+ REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(5000);
+ }
+ }
+ }
+
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ int pipe = psb_intel_crtc->pipe;
+ int dpll_reg = MRST_DPLL_A;
+ int dspcntr_reg = DSPACNTR;
+ int dspbase_reg = MRST_DSPABASE;
+ int pipeconf_reg = PIPEACONF;
+ u32 pipestat_reg = PIPEASTAT;
+ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
+ u32 pipeconf = dev_priv->pipeconf;
+ u32 dspcntr = dev_priv->dspcntr;
+ u32 mipi_enable_reg = MIPIA_DEVICE_READY_REG;
+ u32 temp;
+ bool enabled;
+ int timeout = 0;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ /* Ignore if system is already in DSR and in suspended state. */
+ if(/*gbgfxsuspended */0 && dev_priv->dispstatus == false && mode == 3){
+ if(dev_priv->rpm_enabled && pipe == 1){
+ // dev_priv->is_mipi_on = false;
+ pm_request_idle(&dev->pdev->dev);
+ }
+ return;
+ }else if(mode == 0) {
+ //do not need to set gbdispstatus=true in crtc.
+ //this will be set in encoder such as mdfld_dsi_dbi_dpms
+ //gbdispstatus = true;
+ }
+
+
+/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
+/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ dpll_reg = DPLL_B;
+ dspcntr_reg = DSPBCNTR;
+ dspbase_reg = MRST_DSPBBASE;
+ pipeconf_reg = PIPEBCONF;
+ pipeconf = dev_priv->pipeconf1;
+ dspcntr = dev_priv->dspcntr1;
+ dpll_reg = MDFLD_DPLL_B;
+ break;
+ case 2:
+ dpll_reg = MRST_DPLL_A;
+ dspcntr_reg = DSPCCNTR;
+ dspbase_reg = MDFLD_DSPCBASE;
+ pipeconf_reg = PIPECCONF;
+ pipestat_reg = PIPECSTAT;
+ pipeconf = dev_priv->pipeconf2;
+ dspcntr = dev_priv->dspcntr2;
+ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+ mipi_enable_reg = MIPIA_DEVICE_READY_REG + MIPIC_REG_OFFSET;
+ break;
+ default:
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
+ return;
+ }
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable the DPLL */
+ temp = REG_READ(dpll_reg);
+
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
+ if (temp & MDFLD_PWR_GATE_EN) {
+ temp &= ~MDFLD_PWR_GATE_EN;
+ REG_WRITE(dpll_reg, temp);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+ }
+
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+ REG_READ(dpll_reg);
+
+ /**
+ * wait for DSI PLL to lock
+ * NOTE: only need to poll status of pipe 0 and pipe 1,
+ * since both MIPI pipes share the same PLL.
+ */
+ while ((pipe != 2) && (timeout < 20000) && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ udelay(150);
+ timeout ++;
+ }
+ }
+
+ /* Enable the plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(dspcntr_reg,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ }
+
+ /* Enable the pipe */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(pipeconf_reg, pipeconf);
+
+ /* Wait for for the pipe enable to take effect. */
+ mdfldWaitForPipeEnable(dev, pipe);
+ }
+
+ /*workaround for sighting 3741701 Random X blank display*/
+ /*perform w/a in video mode only on pipe A or C*/
+ if ((pipe == 0 || pipe == 2) &&
+ (mdfld_panel_dpi(dev) == true)) {
+ REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
+ msleep(100);
+ if(PIPE_VBLANK_STATUS & REG_READ(pipestat_reg)) {
+ printk(KERN_ALERT "OK");
+ } else {
+ printk(KERN_ALERT "STUCK!!!!");
+ /*shutdown controller*/
+ temp = REG_READ(dspcntr_reg);
+ REG_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ /*mdfld_dsi_dpi_shut_down(dev, pipe);*/
+ REG_WRITE(0xb048, 1);
+ msleep(100);
+ temp = REG_READ(pipeconf_reg);
+ temp &= ~PIPEACONF_ENABLE;
+ REG_WRITE(pipeconf_reg, temp);
+ msleep(100); /*wait for pipe disable*/
+ /*printk(KERN_ALERT "70008 is %x\n", REG_READ(0x70008));
+ printk(KERN_ALERT "b074 is %x\n", REG_READ(0xb074));*/
+ REG_WRITE(mipi_enable_reg, 0);
+ msleep(100);
+ printk(KERN_ALERT "70008 is %x\n", REG_READ(0x70008));
+ printk(KERN_ALERT "b074 is %x\n", REG_READ(0xb074));
+ REG_WRITE(0xb004, REG_READ(0xb004));
+ /* try to bring the controller back up again*/
+ REG_WRITE(mipi_enable_reg, 1);
+ temp = REG_READ(dspcntr_reg);
+ REG_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ /*mdfld_dsi_dpi_turn_on(dev, pipe);*/
+ REG_WRITE(0xb048, 2);
+ msleep(100);
+ temp = REG_READ(pipeconf_reg);
+ temp |= PIPEACONF_ENABLE;
+ REG_WRITE(pipeconf_reg, temp);
+ }
+ }
+
+ psb_intel_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+
+ break;
+ case DRM_MODE_DPMS_OFF:
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+ if (pipe != 1)
+ mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable display plane */
+ temp = REG_READ(dspcntr_reg);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(dspcntr_reg,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_READ(dspbase_reg);
+ }
+
+ /* FIXME_JLIU7 MDFLD_PO revisit */
+ /* Wait for vblank for the disable to take effect */
+// MDFLD_PO_JLIU7 psb_intel_wait_for_vblank(dev);
+
+ /* Next, disable display pipes */
+ temp = REG_READ(pipeconf_reg);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ temp &= ~PIPEACONF_ENABLE;
+ temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+ REG_WRITE(pipeconf_reg, temp);
+// REG_WRITE(pipeconf_reg, 0);
+ REG_READ(pipeconf_reg);
+
+ /* Wait for for the pipe disable to take effect. */
+ mdfldWaitForPipeDisable(dev, pipe);
+ }
+
+ temp = REG_READ(dpll_reg);
+ if (temp & DPLL_VCO_ENABLE) {
+ if (((pipe != 1) && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
+ || (pipe == 1)){
+ temp &= ~(DPLL_VCO_ENABLE);
+ REG_WRITE(dpll_reg, temp);
+ REG_READ(dpll_reg);
+ /* Wait for the clocks to turn off. */
+ /* FIXME_MDFLD PO may need more delay */
+ udelay(500);
+#if 0 /* MDFLD_PO_JLIU7 */
+ if (!(temp & MDFLD_PWR_GATE_EN)) {
+ /* gating power of DPLL */
+ REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(5000);
+ }
+#endif /* MDFLD_PO_JLIU7 */
+ }
+ }
+ break;
+ }
+
+ enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+
+#if 0 /* JB: Add vblank support later */
+ if (enabled)
+ dev_priv->vblank_pipe |= (1 << pipe);
+ else
+ dev_priv->vblank_pipe &= ~(1 << pipe);
+#endif
+
+ gma_power_end(dev);
+}
+
+
+#define MDFLD_LIMT_DPLL_19 0
+#define MDFLD_LIMT_DPLL_25 1
+#define MDFLD_LIMT_DPLL_83 2
+#define MDFLD_LIMT_DPLL_100 3
+#define MDFLD_LIMT_DSIPLL_19 4
+#define MDFLD_LIMT_DSIPLL_25 5
+#define MDFLD_LIMT_DSIPLL_83 6
+#define MDFLD_LIMT_DSIPLL_100 7
+
+#define MDFLD_DOT_MIN 19750 /* FIXME_MDFLD JLIU7 need to find out min & max for MDFLD */
+#define MDFLD_DOT_MAX 120000
+#define MDFLD_DPLL_M_MIN_19 113
+#define MDFLD_DPLL_M_MAX_19 155
+#define MDFLD_DPLL_P1_MIN_19 2
+#define MDFLD_DPLL_P1_MAX_19 10
+#define MDFLD_DPLL_M_MIN_25 101
+#define MDFLD_DPLL_M_MAX_25 130
+#define MDFLD_DPLL_P1_MIN_25 2
+#define MDFLD_DPLL_P1_MAX_25 10
+#define MDFLD_DPLL_M_MIN_83 64
+#define MDFLD_DPLL_M_MAX_83 64
+#define MDFLD_DPLL_P1_MIN_83 2
+#define MDFLD_DPLL_P1_MAX_83 2
+#define MDFLD_DPLL_M_MIN_100 64
+#define MDFLD_DPLL_M_MAX_100 64
+#define MDFLD_DPLL_P1_MIN_100 2
+#define MDFLD_DPLL_P1_MAX_100 2
+#define MDFLD_DSIPLL_M_MIN_19 131
+#define MDFLD_DSIPLL_M_MAX_19 175
+#define MDFLD_DSIPLL_P1_MIN_19 3
+#define MDFLD_DSIPLL_P1_MAX_19 8
+#define MDFLD_DSIPLL_M_MIN_25 97
+#define MDFLD_DSIPLL_M_MAX_25 140
+#define MDFLD_DSIPLL_P1_MIN_25 3
+#define MDFLD_DSIPLL_P1_MAX_25 9
+#define MDFLD_DSIPLL_M_MIN_83 33
+#define MDFLD_DSIPLL_M_MAX_83 92
+#define MDFLD_DSIPLL_P1_MIN_83 2
+#define MDFLD_DSIPLL_P1_MAX_83 3
+#define MDFLD_DSIPLL_M_MIN_100 97
+#define MDFLD_DSIPLL_M_MAX_100 140
+#define MDFLD_DSIPLL_P1_MIN_100 3
+#define MDFLD_DSIPLL_P1_MAX_100 9
+
+static const struct mdfld_limit_t mdfld_limits[] = {
+ { /* MDFLD_LIMT_DPLL_19 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
+ },
+ { /* MDFLD_LIMT_DPLL_25 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
+ },
+ { /* MDFLD_LIMT_DPLL_83 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
+ },
+ { /* MDFLD_LIMT_DPLL_100 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
+ .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
+ },
+ { /* MDFLD_LIMT_DSIPLL_19 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
+ },
+ { /* MDFLD_LIMT_DSIPLL_25 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
+ },
+ { /* MDFLD_LIMT_DSIPLL_83 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
+ },
+ { /* MDFLD_LIMT_DSIPLL_100 */
+ .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+ .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
+ .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
+ },
+};
+
+#define MDFLD_M_MIN 21
+#define MDFLD_M_MAX 180
+static const u32 mdfld_m_converts[] = {
+/* M configuration table from 9-bit LFSR table */
+ 224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
+ 173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */
+ 388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
+ 83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
+ 341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
+ 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
+ 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
+ 71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
+ 253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
+ 478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
+ 477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
+ 210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
+ 145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
+ 380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
+ 103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
+ 396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
+};
+
+static const struct mdfld_limit_t *mdfld_limit(struct drm_crtc *crtc)
+{
+ const struct mdfld_limit_t *limit = NULL;
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
+ || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
+ if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
+ else if (ksel == KSEL_BYPASS_25)
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
+ else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166))
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
+ else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 100 || dev_priv->core_freq == 200))
+ limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
+ } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+ if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
+ else if (ksel == KSEL_BYPASS_25)
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
+ else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166))
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
+ else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 100 || dev_priv->core_freq == 200))
+ limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
+ } else {
+ limit = NULL;
+ dev_err(dev->dev, "mdfld_limit Wrong display type.\n");
+ }
+
+ return limit;
+}
+
+/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+static void mdfld_clock(int refclk, struct mdfld_intel_clock_t *clock)
+{
+ clock->dot = (refclk * clock->m) / clock->p1;
+}
+
+/**
+ * Returns a set of divisors for the desired target clock with the given refclk,
+ * or FALSE. Divisor values are the actual divisors for
+ */
+static bool
+mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+ struct mdfld_intel_clock_t *best_clock)
+{
+ struct mdfld_intel_clock_t clock;
+ const struct mdfld_limit_t *limit = mdfld_limit(crtc);
+ int err = target;
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+ for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ mdfld_clock(refclk, &clock);
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ return err != target;
+}
+
+/**
+ * Return the pipe currently connected to the panel fitter,
+ * or -1 if the panel fitter is not present or not in use
+ */
+static int mdfld_panel_fitter_pipe(struct drm_device *dev)
+{
+ u32 pfit_control;
+
+ pfit_control = REG_READ(PFIT_CONTROL);
+
+ /* See if the panel fitter is in use */
+ if ((pfit_control & PFIT_ENABLE) == 0)
+ return -1;
+ return (pfit_control >> 29) & 3;
+}
+
+static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int pipe = psb_intel_crtc->pipe;
+ int fp_reg = MRST_FPA0;
+ int dpll_reg = MRST_DPLL_A;
+ int dspcntr_reg = DSPACNTR;
+ int pipeconf_reg = PIPEACONF;
+ int htot_reg = HTOTAL_A;
+ int hblank_reg = HBLANK_A;
+ int hsync_reg = HSYNC_A;
+ int vtot_reg = VTOTAL_A;
+ int vblank_reg = VBLANK_A;
+ int vsync_reg = VSYNC_A;
+ int dspsize_reg = DSPASIZE;
+ int dsppos_reg = DSPAPOS;
+ int pipesrc_reg = PIPEASRC;
+ u32 *pipeconf = &dev_priv->pipeconf;
+ u32 *dspcntr = &dev_priv->dspcntr;
+ int refclk = 0;
+ int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, clk_tmp = 0;
+ struct mdfld_intel_clock_t clock;
+ bool ok;
+ u32 dpll = 0, fp = 0;
+ bool is_crt = false, is_lvds = false, is_tv = false;
+ bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct psb_intel_output *psb_intel_output = NULL;
+ uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int timeout = 0;
+
+ dev_dbg(dev->dev, "pipe = 0x%x \n", pipe);
+
+ switch (pipe) {
+ case 0:
+ break;
+ case 1:
+ fp_reg = FPB0;
+ dpll_reg = DPLL_B;
+ dspcntr_reg = DSPBCNTR;
+ pipeconf_reg = PIPEBCONF;
+ htot_reg = HTOTAL_B;
+ hblank_reg = HBLANK_B;
+ hsync_reg = HSYNC_B;
+ vtot_reg = VTOTAL_B;
+ vblank_reg = VBLANK_B;
+ vsync_reg = VSYNC_B;
+ dspsize_reg = DSPBSIZE;
+ dsppos_reg = DSPBPOS;
+ pipesrc_reg = PIPEBSRC;
+ pipeconf = &dev_priv->pipeconf1;
+ dspcntr = &dev_priv->dspcntr1;
+ fp_reg = MDFLD_DPLL_DIV0;
+ dpll_reg = MDFLD_DPLL_B;
+ break;
+ case 2:
+ dpll_reg = MRST_DPLL_A;
+ dspcntr_reg = DSPCCNTR;
+ pipeconf_reg = PIPECCONF;
+ htot_reg = HTOTAL_C;
+ hblank_reg = HBLANK_C;
+ hsync_reg = HSYNC_C;
+ vtot_reg = VTOTAL_C;
+ vblank_reg = VBLANK_C;
+ vsync_reg = VSYNC_C;
+ dspsize_reg = DSPCSIZE;
+ dsppos_reg = DSPCPOS;
+ pipesrc_reg = PIPECSRC;
+ pipeconf = &dev_priv->pipeconf2;
+ dspcntr = &dev_priv->dspcntr2;
+ break;
+ default:
+ DRM_ERROR("Illegal Pipe Number. \n");
+ return 0;
+ }
+
+ dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
+ adjusted_mode->hdisplay);
+ dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
+ adjusted_mode->vdisplay);
+ dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
+ adjusted_mode->hsync_start);
+ dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
+ adjusted_mode->hsync_end);
+ dev_dbg(dev->dev, "adjusted_htotal = %d\n",
+ adjusted_mode->htotal);
+ dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
+ adjusted_mode->vsync_start);
+ dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
+ adjusted_mode->vsync_end);
+ dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
+ adjusted_mode->vtotal);
+ dev_dbg(dev->dev, "adjusted_clock = %d\n",
+ adjusted_mode->clock);
+ dev_dbg(dev->dev, "hdisplay = %d\n",
+ mode->hdisplay);
+ dev_dbg(dev->dev, "vdisplay = %d\n",
+ mode->vdisplay);
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ memcpy(&psb_intel_crtc->saved_mode, mode, sizeof(struct drm_display_mode));
+ memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, sizeof(struct drm_display_mode));
+
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+
+ encoder = connector->encoder;
+
+ if(!encoder)
+ continue;
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ psb_intel_output = to_psb_intel_output(connector);
+
+ dev_dbg(dev->dev, "output->type = 0x%x \n", psb_intel_output->type);
+
+ switch (psb_intel_output->type) {
+ case INTEL_OUTPUT_LVDS:
+ is_lvds = true;
+ break;
+ case INTEL_OUTPUT_TVOUT:
+ is_tv = true;
+ break;
+ case INTEL_OUTPUT_ANALOG:
+ is_crt = true;
+ break;
+ case INTEL_OUTPUT_MIPI:
+ is_mipi = true;
+ break;
+ case INTEL_OUTPUT_MIPI2:
+ is_mipi2 = true;
+ break;
+ case INTEL_OUTPUT_HDMI:
+ is_hdmi = true;
+ break;
+ }
+ }
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Disable the panel fitter if it was on our pipe */
+ if (mdfld_panel_fitter_pipe(dev) == pipe)
+ REG_WRITE(PFIT_CONTROL, 0);
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ if (pipe == 1) {
+ /* FIXME: To make HDMI display with 864x480 (TPO), 480x864 (PYR) or 480x854 (TMD), set the sprite
+ * width/height and souce image size registers with the adjusted mode for pipe B. */
+
+ /* The defined sprite rectangle must always be completely contained within the displayable
+ * area of the screen image (frame buffer). */
+ REG_WRITE(dspsize_reg, ((MIN(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
+ | (MIN(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
+ /* Set the CRTC with encoder mode. */
+ REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
+ | (mode->crtc_vdisplay - 1));
+ } else {
+ REG_WRITE(dspsize_reg, ((mode->crtc_vdisplay - 1) << 16) | (mode->crtc_hdisplay - 1));
+ REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+ }
+
+ REG_WRITE(dsppos_reg, 0);
+
+ if (psb_intel_output)
+ drm_connector_property_get_value(&psb_intel_output->base,
+ dev->mode_config.scaling_mode_property, &scalingType);
+
+ if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+ /*Moorestown doesn't have register support for centering so we need to
+ mess with the h/vblank and h/vsync start and ends to get centering*/
+ int offsetX = 0, offsetY = 0;
+
+ offsetX = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+ offsetY = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
+
+ REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - offsetX - 1) |
+ ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - offsetX - 1) |
+ ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - offsetY - 1) |
+ ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - offsetY - 1) |
+ ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+ } else {
+ REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ }
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs =
+ crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* setup pipeconf */
+ *pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
+
+ /* Set up the display plane register */
+ *dspcntr = REG_READ(dspcntr_reg);
+ *dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
+ *dspcntr |= DISPLAY_PLANE_ENABLE;
+/* MDFLD_PO_JLIU7 dspcntr |= DISPPLANE_BOTTOM; */
+/* MDFLD_PO_JLIU7 dspcntr |= DISPPLANE_GAMMA_ENABLE; */
+
+ if (is_mipi2)
+ {
+ goto mrst_crtc_mode_set_exit;
+ }
+/* FIXME JLIU7 Add MDFLD HDMI supports */
+/* FIXME_MDFLD JLIU7 DSIPLL clock *= 8? */
+/* FIXME_MDFLD JLIU7 need to revist for dual MIPI supports */
+ clk = adjusted_mode->clock;
+
+ if (is_hdmi) {
+ if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+ {
+ refclk = 19200;
+
+ if (is_mipi || is_mipi2)
+ {
+ clk_n = 1, clk_p2 = 8;
+ } else if (is_hdmi) {
+ clk_n = 1, clk_p2 = 10;
+ }
+ } else if (ksel == KSEL_BYPASS_25) {
+ refclk = 25000;
+
+ if (is_mipi || is_mipi2)
+ {
+ clk_n = 1, clk_p2 = 8;
+ } else if (is_hdmi) {
+ clk_n = 1, clk_p2 = 10;
+ }
+ } else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166)) {
+ refclk = 83000;
+
+ if (is_mipi || is_mipi2)
+ {
+ clk_n = 4, clk_p2 = 8;
+ } else if (is_hdmi) {
+ clk_n = 4, clk_p2 = 10;
+ }
+ } else if ((ksel == KSEL_BYPASS_83_100) &&
+ (dev_priv->core_freq == 100 || dev_priv->core_freq == 200)) {
+ refclk = 100000;
+ if (is_mipi || is_mipi2)
+ {
+ clk_n = 4, clk_p2 = 8;
+ } else if (is_hdmi) {
+ clk_n = 4, clk_p2 = 10;
+ }
+ }
+
+ if (is_mipi)
+ clk_byte = dev_priv->bpp / 8;
+ else if (is_mipi2)
+ clk_byte = dev_priv->bpp2 / 8;
+
+ clk_tmp = clk * clk_n * clk_p2 * clk_byte;
+
+ dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d. \n", clk, clk_n, clk_p2);
+ dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d. \n", adjusted_mode->clock, clk_tmp);
+
+ ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
+
+ if (!ok) {
+ dev_err(dev->dev,
+ "mdfldFindBestPLL fail in mdfld_crtc_mode_set. \n");
+ } else {
+ m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
+
+ dev_dbg(dev->dev, "dot clock = %d,"
+ "m = %d, p1 = %d, m_conv = %d. \n", clock.dot, clock.m,
+ clock.p1, m_conv);
+ }
+
+ dpll = REG_READ(dpll_reg);
+
+ if (dpll & DPLL_VCO_ENABLE) {
+ dpll &= ~DPLL_VCO_ENABLE;
+ REG_WRITE(dpll_reg, dpll);
+ REG_READ(dpll_reg);
+
+ /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ /* reset M1, N1 & P1 */
+ REG_WRITE(fp_reg, 0);
+ dpll &= ~MDFLD_P1_MASK;
+ REG_WRITE(dpll_reg, dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+ }
+
+ /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
+ if (dpll & MDFLD_PWR_GATE_EN) {
+ dpll &= ~MDFLD_PWR_GATE_EN;
+ REG_WRITE(dpll_reg, dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+ }
+
+ dpll = 0;
+
+#if 0 /* FIXME revisit later */
+ if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19) || (ksel == KSEL_BYPASS_25)) {
+ dpll &= ~MDFLD_INPUT_REF_SEL;
+ } else if (ksel == KSEL_BYPASS_83_100) {
+ dpll |= MDFLD_INPUT_REF_SEL;
+ }
+#endif /* FIXME revisit later */
+
+ if (is_hdmi)
+ dpll |= MDFLD_VCO_SEL;
+
+ fp = (clk_n / 2) << 16;
+ fp |= m_conv;
+
+ /* compute bitmask from p1 value */
+ dpll |= (1 << (clock.p1 - 2)) << 17;
+
+#if 0 /* 1080p30 & 720p */
+ dpll = 0x00050000;
+ fp = 0x000001be;
+#endif
+#if 0 /* 480p */
+ dpll = 0x02010000;
+ fp = 0x000000d2;
+#endif
+ } else {
+#if 0 /*DBI_TPO_480x864*/
+ dpll = 0x00020000;
+ fp = 0x00000156;
+#endif /* DBI_TPO_480x864 */ /* get from spec. */
+
+ dpll = 0x00800000;
+ fp = 0x000000c1;
+}
+
+ REG_WRITE(fp_reg, fp);
+ REG_WRITE(dpll_reg, dpll);
+ /* FIXME_MDFLD PO - change 500 to 1 after PO */
+ udelay(500);
+
+ dpll |= DPLL_VCO_ENABLE;
+ REG_WRITE(dpll_reg, dpll);
+ REG_READ(dpll_reg);
+
+ /* wait for DSI PLL to lock */
+ while ((timeout < 20000) && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ udelay(150);
+ timeout ++;
+ }
+
+ if (is_mipi)
+ goto mrst_crtc_mode_set_exit;
+
+ dev_dbg(dev->dev, "is_mipi = 0x%x \n", is_mipi);
+
+ REG_WRITE(pipeconf_reg, *pipeconf);
+ REG_READ(pipeconf_reg);
+
+ /* Wait for for the pipe enable to take effect. */
+//FIXME_JLIU7 HDMI mrstWaitForPipeEnable(dev);
+
+ REG_WRITE(dspcntr_reg, *dspcntr);
+ psb_intel_wait_for_vblank(dev);
+
+mrst_crtc_mode_set_exit:
+
+ gma_power_end(dev);
+
+ return 0;
+}
+
+static void mdfld_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void mdfld_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static bool mdfld_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
+ .dpms = mdfld_crtc_dpms,
+ .mode_fixup = mdfld_crtc_mode_fixup,
+ .mode_set = mdfld_crtc_mode_set,
+ .mode_set_base = mdfld__intel_pipe_set_base,
+ .prepare = mdfld_crtc_prepare,
+ .commit = mdfld_crtc_commit,
+};
diff --git a/drivers/staging/gma500/mdfld_msic.h b/drivers/staging/gma500/mdfld_msic.h
new file mode 100644
index 00000000000..a7ad6547249
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_msic.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jim Liu <jim.liu@intel.com>
+ */
+
+#define MSIC_PCI_DEVICE_ID 0x831
+
+int msic_regsiter_driver(void);
+int msic_unregister_driver(void);
+extern void hpd_notify_um(void);
diff --git a/drivers/staging/gma500/mdfld_output.c b/drivers/staging/gma500/mdfld_output.c
new file mode 100644
index 00000000000..ee55f87ba1f
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_output.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#include <linux/init.h>
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_dbi_dpu.h"
+
+#include "displays/tpo_cmd.h"
+#include "displays/tpo_vid.h"
+#include "displays/tmd_cmd.h"
+#include "displays/tmd_vid.h"
+#include "displays/pyr_cmd.h"
+#include "displays/pyr_vid.h"
+/* #include "displays/hdmi.h" */
+
+static int mdfld_dual_mipi;
+static int mdfld_hdmi;
+static int mdfld_dpu;
+
+module_param(mdfld_dual_mipi, int, 0600);
+MODULE_PARM_DESC(mdfld_dual_mipi, "Enable dual MIPI configuration");
+module_param(mdfld_hdmi, int, 0600);
+MODULE_PARM_DESC(mdfld_hdmi, "Enable Medfield HDMI");
+module_param(mdfld_dpu, int, 0600);
+MODULE_PARM_DESC(mdfld_dpu, "Enable Medfield DPU");
+
+/* For now a single type per device is all we cope with */
+int mdfld_get_panel_type(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ return dev_priv->panel_id;
+}
+
+int mdfld_panel_dpi(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->panel_id) {
+ case TMD_VID:
+ case TPO_VID:
+ case PYR_VID:
+ return true;
+ case TMD_CMD:
+ case TPO_CMD:
+ case PYR_CMD:
+ default:
+ return false;
+ }
+}
+
+static int init_panel(struct drm_device *dev, int mipi_pipe, int p_type)
+{
+ struct panel_funcs *p_cmd_funcs;
+ struct panel_funcs *p_vid_funcs;
+
+ /* Oh boy ... FIXME */
+ p_cmd_funcs = kzalloc(sizeof(struct panel_funcs), GFP_KERNEL);
+ if (p_cmd_funcs == NULL)
+ return -ENODEV;
+ p_vid_funcs = kzalloc(sizeof(struct panel_funcs), GFP_KERNEL);
+ if (p_vid_funcs == NULL) {
+ kfree(p_cmd_funcs);
+ return -ENODEV;
+ }
+
+ switch (p_type) {
+ case TPO_CMD:
+ tpo_cmd_init(dev, p_cmd_funcs);
+ mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
+ break;
+ case TPO_VID:
+ tpo_vid_init(dev, p_vid_funcs);
+ mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
+ break;
+ case TMD_CMD:
+ /*tmd_cmd_init(dev, p_cmd_funcs); */
+ mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
+ break;
+ case TMD_VID:
+ tmd_vid_init(dev, p_vid_funcs);
+ mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
+ break;
+ case PYR_CMD:
+ pyr_cmd_init(dev, p_cmd_funcs);
+ mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
+ break;
+ case PYR_VID:
+ mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
+ break;
+ case TPO: /* TPO panel supports both cmd & vid interfaces */
+ tpo_cmd_init(dev, p_cmd_funcs);
+ tpo_vid_init(dev, p_vid_funcs);
+ mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs,
+ p_vid_funcs);
+ break;
+ case TMD:
+ break;
+ case PYR:
+ break;
+#if 0
+ case HDMI:
+ dev_dbg(dev->dev, "Initializing HDMI");
+ mdfld_hdmi_init(dev, &dev_priv->mode_dev);
+ break;
+#endif
+ default:
+ dev_err(dev->dev, "Unsupported interface %d", p_type);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+int mdfld_output_init(struct drm_device *dev)
+{
+ int type;
+
+ /* MIPI panel 1 */
+ type = mdfld_get_panel_type(dev, 0);
+ dev_info(dev->dev, "panel 1: type is %d\n", type);
+ init_panel(dev, 0, type);
+
+ if (mdfld_dual_mipi) {
+ /* MIPI panel 2 */
+ type = mdfld_get_panel_type(dev, 2);
+ dev_info(dev->dev, "panel 2: type is %d\n", type);
+ init_panel(dev, 2, type);
+ }
+ if (mdfld_hdmi)
+ /* HDMI panel */
+ init_panel(dev, 0, HDMI);
+ return 0;
+}
+
+void mdfld_output_setup(struct drm_device *dev)
+{
+ /* FIXME: this is not the right place for this stuff ! */
+ if (IS_MFLD(dev)) {
+ if (mdfld_dpu)
+ mdfld_dbi_dpu_init(dev);
+ else
+ mdfld_dbi_dsr_init(dev);
+ }
+} \ No newline at end of file
diff --git a/drivers/staging/gma500/mdfld_output.h b/drivers/staging/gma500/mdfld_output.h
new file mode 100644
index 00000000000..daf33e7df9d
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_output.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#ifndef MDFLD_OUTPUT_H
+#define MDFLD_OUTPUT_H
+
+int mdfld_output_init(struct drm_device *dev);
+int mdfld_panel_dpi(struct drm_device *dev);
+int mdfld_get_panel_type(struct drm_device *dev, int pipe);
+void mdfld_disable_crtc (struct drm_device *dev, int pipe);
+
+extern const struct drm_crtc_helper_funcs mdfld_helper_funcs;
+extern const struct drm_crtc_funcs mdfld_intel_crtc_funcs;
+
+extern void mdfld_output_setup(struct drm_device *dev);
+
+#endif
diff --git a/drivers/staging/gma500/mdfld_pyr_cmd.c b/drivers/staging/gma500/mdfld_pyr_cmd.c
new file mode 100644
index 00000000000..523f2d8fe4f
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_pyr_cmd.c
@@ -0,0 +1,558 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+*/
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_dbi_dpu.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+#include "displays/pyr_cmd.h"
+
+static struct drm_display_mode *pyr_cmd_get_config_mode(struct drm_device *dev)
+{
+ struct drm_display_mode *mode;
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode) {
+ dev_err(dev->dev, "Out of memory\n");
+ return NULL;
+ }
+
+ dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+ dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+ dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+ dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+ dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+ dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+ dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+ dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+ dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+
+ mode->hdisplay = 480;
+ mode->vdisplay = 864;
+ mode->hsync_start = 487;
+ mode->hsync_end = 490;
+ mode->htotal = 499;
+ mode->vsync_start = 874;
+ mode->vsync_end = 878;
+ mode->vtotal = 886;
+ mode->clock = 25777;
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ return mode;
+}
+
+static bool pyr_dsi_dbi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_display_mode *fixed_mode = pyr_cmd_get_config_mode(dev);
+
+ if (fixed_mode) {
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+ adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ kfree(fixed_mode);
+ }
+ return true;
+}
+
+static void pyr_dsi_dbi_set_power(struct drm_encoder *encoder, bool on)
+{
+ int ret = 0;
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dbi_output *dbi_output =
+ MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 reg_offset = 0;
+ int pipe = (dbi_output->channel_num == 0) ? 0 : 2;
+
+ dev_dbg(dev->dev, "pipe %d : %s, panel on: %s\n", pipe,
+ on ? "On" : "Off",
+ dbi_output->dbi_panel_on ? "True" : "False");
+
+ if (pipe == 2) {
+ if (on)
+ dev_priv->dual_mipi = true;
+ else
+ dev_priv->dual_mipi = false;
+
+ reg_offset = MIPIC_REG_OFFSET;
+ } else {
+ if (!on)
+ dev_priv->dual_mipi = false;
+ }
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "hw begin failed\n");
+ return;
+ }
+
+
+ if (on) {
+ if (dbi_output->dbi_panel_on)
+ goto out_err;
+
+ ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_ON);
+ if (ret) {
+ dev_err(dev->dev, "power on error\n");
+ goto out_err;
+ }
+
+ dbi_output->dbi_panel_on = true;
+
+ if (pipe == 2) {
+ dev_priv->dbi_panel_on2 = true;
+ } else {
+ dev_priv->dbi_panel_on = true;
+ mdfld_enable_te(dev, 0);
+ }
+ } else {
+ if (!dbi_output->dbi_panel_on && !dbi_output->first_boot)
+ goto out_err;
+
+ dbi_output->dbi_panel_on = false;
+ dbi_output->first_boot = false;
+
+ if (pipe == 2) {
+ dev_priv->dbi_panel_on2 = false;
+ mdfld_disable_te(dev, 2);
+ } else {
+ dev_priv->dbi_panel_on = false;
+ mdfld_disable_te(dev, 0);
+
+ if (dev_priv->dbi_panel_on2)
+ mdfld_enable_te(dev, 2);
+ }
+
+ ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_OFF);
+ if (ret) {
+ dev_err(dev->dev, "power on error\n");
+ goto out_err;
+ }
+ }
+
+out_err:
+ gma_power_end(dev);
+
+ if (ret)
+ dev_err(dev->dev, "failed\n");
+}
+
+static void pyr_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
+ int pipe)
+{
+ struct drm_device *dev = dsi_config->dev;
+ u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+ int lane_count = dsi_config->lane_count;
+ u32 val = 0;
+
+ dev_dbg(dev->dev, "Init DBI interface on pipe %d...\n", pipe);
+
+ /* Un-ready device */
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
+
+ /* Init dsi adapter before kicking off */
+ REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
+
+ /* TODO: figure out how to setup these registers */
+ REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c600F);
+ REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset),
+ 0x000a0014);
+ REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
+ REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
+
+ /* Enable all interrupts */
+ REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
+ /* Max value: 20 clock cycles of txclkesc */
+ REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
+ /* Min 21 txclkesc, max: ffffh */
+ REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
+ /* Min: 7d0 max: 4e20 */
+ REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
+
+ /* Set up func_prg */
+ val |= lane_count;
+ val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
+ val |= DSI_DBI_COLOR_FORMAT_OPTION2;
+ REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
+
+ REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
+ REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
+
+ /* De-assert dbi_stall when half of DBI FIFO is empty */
+ /* REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000000); */
+
+ REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
+ REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000002);
+ REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
+ REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
+}
+
+static void pyr_dsi_dbi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int ret = 0;
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dbi_output *dsi_output =
+ MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct mdfld_dsi_connector *dsi_connector = dsi_config->connector;
+ int pipe = dsi_connector->pipe;
+ u8 param = 0;
+
+ /* Regs */
+ u32 mipi_reg = MIPI;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 reg_offset = 0;
+
+ /* Values */
+ u32 dspcntr_val = dev_priv->dspcntr;
+ u32 pipeconf_val = dev_priv->pipeconf;
+ u32 h_active_area = mode->hdisplay;
+ u32 v_active_area = mode->vdisplay;
+ u32 mipi_val = (PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX |
+ TE_TRIGGER_GPIO_PIN);
+
+ dev_dbg(dev->dev, "mipi_val =0x%x\n", mipi_val);
+
+ dev_dbg(dev->dev, "type %s\n", (pipe == 2) ? "MIPI2" : "MIPI");
+ dev_dbg(dev->dev, "h %d v %d\n", mode->hdisplay, mode->vdisplay);
+
+ if (pipe == 2) {
+ mipi_reg = MIPI_C;
+ dspcntr_reg = DSPCCNTR;
+ pipeconf_reg = PIPECCONF;
+
+ reg_offset = MIPIC_REG_OFFSET;
+
+ dspcntr_val = dev_priv->dspcntr2;
+ pipeconf_val = dev_priv->pipeconf2;
+ } else {
+ mipi_val |= 0x2; /* Two lanes for port A and C respectively */
+ }
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "hw begin failed\n");
+ return;
+ }
+
+ /* Set up pipe related registers */
+ REG_WRITE(mipi_reg, mipi_val);
+ REG_READ(mipi_reg);
+
+ pyr_dsi_controller_dbi_init(dsi_config, pipe);
+
+ msleep(20);
+
+ REG_WRITE(dspcntr_reg, dspcntr_val);
+ REG_READ(dspcntr_reg);
+
+ /* 20ms delay before sending exit_sleep_mode */
+ msleep(20);
+
+ /* Send exit_sleep_mode DCS */
+ ret = mdfld_dsi_dbi_send_dcs(dsi_output, exit_sleep_mode, NULL,
+ 0, CMD_DATA_SRC_SYSTEM_MEM);
+ if (ret) {
+ dev_err(dev->dev, "sent exit_sleep_mode faild\n");
+ goto out_err;
+ }
+
+ /*send set_tear_on DCS*/
+ ret = mdfld_dsi_dbi_send_dcs(dsi_output, set_tear_on,
+ &param, 1, CMD_DATA_SRC_SYSTEM_MEM);
+ if (ret) {
+ dev_err(dev->dev, "%s - sent set_tear_on faild\n", __func__);
+ goto out_err;
+ }
+
+ /* Do some init stuff */
+ mdfld_dsi_brightness_init(dsi_config, pipe);
+ mdfld_dsi_gen_fifo_ready(dev, (MIPIA_GEN_FIFO_STAT_REG + reg_offset),
+ HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+
+ REG_WRITE(pipeconf_reg, pipeconf_val | PIPEACONF_DSR);
+ REG_READ(pipeconf_reg);
+
+ /* TODO: this looks ugly, try to move it to CRTC mode setting */
+ if (pipe == 2)
+ dev_priv->pipeconf2 |= PIPEACONF_DSR;
+ else
+ dev_priv->pipeconf |= PIPEACONF_DSR;
+
+ dev_dbg(dev->dev, "pipeconf %x\n", REG_READ(pipeconf_reg));
+
+ ret = mdfld_dsi_dbi_update_area(dsi_output, 0, 0,
+ h_active_area - 1, v_active_area - 1);
+ if (ret) {
+ dev_err(dev->dev, "update area failed\n");
+ goto out_err;
+ }
+
+out_err:
+ gma_power_end(dev);
+
+ if (ret)
+ dev_err(dev->dev, "mode set failed\n");
+ else
+ dev_dbg(dev->dev, "mode set done successfully\n");
+}
+
+static void pyr_dsi_dbi_prepare(struct drm_encoder *encoder)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dbi_output *dbi_output =
+ MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+
+ dbi_output->mode_flags |= MODE_SETTING_IN_ENCODER;
+ dbi_output->mode_flags &= ~MODE_SETTING_ENCODER_DONE;
+
+ pyr_dsi_dbi_set_power(encoder, false);
+}
+
+static void pyr_dsi_dbi_commit(struct drm_encoder *encoder)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dbi_output *dbi_output =
+ MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+ struct drm_device *dev = dbi_output->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_drm_dpu_rect rect;
+
+ pyr_dsi_dbi_set_power(encoder, true);
+
+ dbi_output->mode_flags &= ~MODE_SETTING_IN_ENCODER;
+
+ rect.x = rect.y = 0;
+ rect.width = 864;
+ rect.height = 480;
+
+ if (dbi_output->channel_num == 1) {
+ dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_2;
+ /* If DPU enabled report a fullscreen damage */
+ mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, &rect);
+ } else {
+ dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_0;
+ mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, &rect);
+ }
+ dbi_output->mode_flags |= MODE_SETTING_ENCODER_DONE;
+}
+
+static void pyr_dsi_dbi_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dbi_output *dbi_output =
+ MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+ struct drm_device *dev = dbi_output->dev;
+
+ dev_dbg(dev->dev, "%s\n", (mode == DRM_MODE_DPMS_ON ? "on" : "off"));
+
+ if (mode == DRM_MODE_DPMS_ON)
+ pyr_dsi_dbi_set_power(encoder, true);
+ else
+ pyr_dsi_dbi_set_power(encoder, false);
+}
+
+/*
+ * Update the DBI MIPI Panel Frame Buffer.
+ */
+static void pyr_dsi_dbi_update_fb(struct mdfld_dsi_dbi_output *dbi_output,
+ int pipe)
+{
+ struct mdfld_dsi_pkg_sender *sender =
+ mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+ struct drm_device *dev = dbi_output->dev;
+ struct drm_crtc *crtc = dbi_output->base.base.crtc;
+ struct psb_intel_crtc *psb_crtc = (crtc) ?
+ to_psb_intel_crtc(crtc) : NULL;
+
+ u32 dpll_reg = MRST_DPLL_A;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 dsplinoff_reg = DSPALINOFF;
+ u32 dspsurf_reg = DSPASURF;
+ u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
+ u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
+ u32 reg_offset = 0;
+
+ u32 intr_status;
+ u32 fifo_stat_reg_val;
+ u32 dpll_reg_val;
+ u32 dspcntr_reg_val;
+ u32 pipeconf_reg_val;
+
+ /* If mode setting on-going, back off */
+ if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+ (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING) ||
+ !(dbi_output->mode_flags & MODE_SETTING_ENCODER_DONE))
+ return;
+
+ /*
+ * Look for errors here. In particular we're checking for whatever
+ * error status might have appeared during the last frame transmit
+ * (memory write).
+ *
+ * Normally, the bits we're testing here would be set infrequently,
+ * if at all. However, one panel (at least) returns at least one
+ * error bit on most frames. So we've disabled the kernel message
+ * for now.
+ *
+ * Still clear whatever error bits are set, except don't clear the
+ * ones that would make the Penwell DSI controller reset if we
+ * cleared them.
+ */
+ intr_status = REG_READ(INTR_STAT_REG);
+ if ((intr_status & 0x26FFFFFF) != 0) {
+ /* dev_err(dev->dev, "DSI status: 0x%08X\n", intr_status); */
+ intr_status &= 0x26F3FFFF;
+ REG_WRITE(INTR_STAT_REG, intr_status);
+ }
+
+ if (pipe == 2) {
+ dspcntr_reg = DSPCCNTR;
+ pipeconf_reg = PIPECCONF;
+ dsplinoff_reg = DSPCLINOFF;
+ dspsurf_reg = DSPCSURF;
+
+ hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
+ gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET,
+
+ reg_offset = MIPIC_REG_OFFSET;
+ }
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "hw begin failed\n");
+ return;
+ }
+
+ fifo_stat_reg_val = REG_READ(MIPIA_GEN_FIFO_STAT_REG + reg_offset);
+ dpll_reg_val = REG_READ(dpll_reg);
+ dspcntr_reg_val = REG_READ(dspcntr_reg);
+ pipeconf_reg_val = REG_READ(pipeconf_reg);
+
+ if (!(fifo_stat_reg_val & (1 << 27)) ||
+ (dpll_reg_val & DPLL_VCO_ENABLE) ||
+ !(dspcntr_reg_val & DISPLAY_PLANE_ENABLE) ||
+ !(pipeconf_reg_val & DISPLAY_PLANE_ENABLE)) {
+ goto update_fb_out0;
+ }
+
+ /* Refresh plane changes */
+ REG_WRITE(dsplinoff_reg, REG_READ(dsplinoff_reg));
+ REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
+ REG_READ(dspsurf_reg);
+
+ mdfld_dsi_send_dcs(sender,
+ write_mem_start,
+ NULL,
+ 0,
+ CMD_DATA_SRC_PIPE,
+ MDFLD_DSI_SEND_PACKAGE);
+
+ /*
+ * The idea here is to transmit a Generic Read command after the
+ * Write Memory Start/Continue commands finish. This asks for
+ * the panel to return an "ACK No Errors," or (if it has errors
+ * to report) an Error Report. This allows us to monitor the
+ * panel's perception of the health of the DSI.
+ */
+ mdfld_dsi_gen_fifo_ready(dev, gen_fifo_stat_reg,
+ HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+ REG_WRITE(hs_gen_ctrl_reg, (1 << WORD_COUNTS_POS) | GEN_READ_0);
+
+ dbi_output->dsr_fb_update_done = true;
+update_fb_out0:
+ gma_power_end(dev);
+}
+
+/*
+ * TODO: will be removed later, should work out display interfaces for power
+ */
+void pyr_dsi_adapter_init(struct mdfld_dsi_config *dsi_config, int pipe)
+{
+ if (!dsi_config || (pipe != 0 && pipe != 2)) {
+ WARN_ON(1);
+ return;
+ }
+ pyr_dsi_controller_dbi_init(dsi_config, pipe);
+}
+
+static int pyr_cmd_get_panel_info(struct drm_device *dev, int pipe,
+ struct panel_info *pi)
+{
+ if (!dev || !pi)
+ return -EINVAL;
+
+ pi->width_mm = PYR_PANEL_WIDTH;
+ pi->height_mm = PYR_PANEL_HEIGHT;
+
+ return 0;
+}
+
+/* PYR DBI encoder helper funcs */
+static const struct drm_encoder_helper_funcs pyr_dsi_dbi_helper_funcs = {
+ .dpms = pyr_dsi_dbi_dpms,
+ .mode_fixup = pyr_dsi_dbi_mode_fixup,
+ .prepare = pyr_dsi_dbi_prepare,
+ .mode_set = pyr_dsi_dbi_mode_set,
+ .commit = pyr_dsi_dbi_commit,
+};
+
+/* PYR DBI encoder funcs */
+static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+void pyr_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+ p_funcs->encoder_funcs = &mdfld_dsi_dbi_encoder_funcs;
+ p_funcs->encoder_helper_funcs = &pyr_dsi_dbi_helper_funcs;
+ p_funcs->get_config_mode = &pyr_cmd_get_config_mode;
+ p_funcs->update_fb = pyr_dsi_dbi_update_fb;
+ p_funcs->get_panel_info = pyr_cmd_get_panel_info;
+}
diff --git a/drivers/staging/gma500/mdfld_tmd_vid.c b/drivers/staging/gma500/mdfld_tmd_vid.c
new file mode 100644
index 00000000000..affdc09c676
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_tmd_vid.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jim Liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ * Gideon Eaton <eaton.
+ * Scott Rowe <scott.m.rowe@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+
+#include "mdfld_dsi_pkg_sender.h"
+
+#include "displays/tmd_vid.h"
+
+/* FIXME: static ? */
+struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
+{
+ struct drm_display_mode *mode;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
+ bool use_gct = false; /*Disable GCT for now*/
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode) {
+ dev_err(dev->dev, "Out of memory\n");
+ return NULL;
+ }
+
+ if (use_gct) {
+ dev_dbg(dev->dev, "gct find MIPI panel.\n");
+
+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+ mode->hsync_start = mode->hdisplay +
+ ((ti->hsync_offset_hi << 8) |
+ ti->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start +
+ ((ti->hsync_pulse_width_hi << 8) |
+ ti->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) |
+ ti->hblank_lo);
+ mode->vsync_start = \
+ mode->vdisplay + ((ti->vsync_offset_hi << 8) |
+ ti->vsync_offset_lo);
+ mode->vsync_end = \
+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
+ ti->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay +
+ ((ti->vblank_hi << 8) | ti->vblank_lo);
+ mode->clock = ti->pixel_clock * 10;
+
+ dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+ dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+ dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+ dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+ dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+ dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+ dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+ dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+ dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+ } else {
+ mode->hdisplay = 480;
+ mode->vdisplay = 854;
+ mode->hsync_start = 487;
+ mode->hsync_end = 490;
+ mode->htotal = 499;
+ mode->vsync_start = 861;
+ mode->vsync_end = 865;
+ mode->vtotal = 873;
+ mode->clock = 33264;
+ }
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ return mode;
+}
+
+static int tmd_vid_get_panel_info(struct drm_device *dev,
+ int pipe,
+ struct panel_info *pi)
+{
+ if (!dev || !pi)
+ return -EINVAL;
+
+ pi->width_mm = TMD_PANEL_WIDTH;
+ pi->height_mm = TMD_PANEL_HEIGHT;
+
+ return 0;
+}
+
+/*
+ * mdfld_init_TMD_MIPI - initialise a TMD interface
+ * @dsi_config: configuration
+ * @pipe: pipe to configure
+ *
+ * This function is called only by mrst_dsi_mode_set and
+ * restore_display_registers. since this function does not
+ * acquire the mutex, it is important that the calling function
+ * does!
+ */
+
+
+static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
+ int pipe)
+{
+ static u32 tmd_cmd_mcap_off[] = {0x000000b2};
+ static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
+ static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
+ static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
+ static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
+ static u32 tmd_cmd_set_mode[] = {0x000000b3};
+ static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
+ static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
+ static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
+ static u32 tmd_cmd_set_video_mode[] = {0x00000153};
+ /*no auto_bl,need add in furture*/
+ static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
+ static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
+
+ struct mdfld_dsi_pkg_sender *sender
+ = mdfld_dsi_get_pkg_sender(dsi_config);
+
+ DRM_INFO("Enter mdfld init TMD MIPI display.\n");
+
+ if (!sender) {
+ DRM_ERROR("Cannot get sender\n");
+ return;
+ }
+
+ if (dsi_config->dvr_ic_inited)
+ return;
+
+ msleep(3);
+
+ mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_mcap_off, 1, 0);
+ mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_enable_lane_switch, 1, 0);
+ mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_lane_num, 1, 0);
+ mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_pushing_clock0, 1, 0);
+ mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_pushing_clock1, 1, 0);
+ mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_mode, 1, 0);
+ mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_sync_pulse_mode, 1, 0);
+ mdfld_dsi_send_mcs_long_lp(sender, tmd_cmd_set_column, 2, 0);
+ mdfld_dsi_send_mcs_long_lp(sender, tmd_cmd_set_page, 2, 0);
+ mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_video_mode, 1, 0);
+ mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_enable_backlight, 1, 0);
+ mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_backlight_dimming, 1, 0);
+
+ dsi_config->dvr_ic_inited = 1;
+}
+
+/* TMD DPI encoder helper funcs */
+static const struct drm_encoder_helper_funcs
+ mdfld_tpo_dpi_encoder_helper_funcs = {
+ .dpms = mdfld_dsi_dpi_dpms,
+ .mode_fixup = mdfld_dsi_dpi_mode_fixup,
+ .prepare = mdfld_dsi_dpi_prepare,
+ .mode_set = mdfld_dsi_dpi_mode_set,
+ .commit = mdfld_dsi_dpi_commit,
+};
+
+/* TMD DPI encoder funcs */
+static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+void tmd_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+ if (!dev || !p_funcs) {
+ dev_err(dev->dev, "Invalid parameters\n");
+ return;
+ }
+
+ p_funcs->encoder_funcs = &mdfld_tpo_dpi_encoder_funcs;
+ p_funcs->encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs;
+ p_funcs->get_config_mode = &tmd_vid_get_config_mode;
+ p_funcs->update_fb = NULL;
+ p_funcs->get_panel_info = tmd_vid_get_panel_info;
+ p_funcs->reset = mdfld_dsi_panel_reset;
+ p_funcs->drv_ic_init = mdfld_dsi_tmd_drv_ic_init;
+}
diff --git a/drivers/staging/gma500/mdfld_tpo_cmd.c b/drivers/staging/gma500/mdfld_tpo_cmd.c
new file mode 100644
index 00000000000..c7f7c9c19bc
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_tpo_cmd.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Eaton <thomas.g.eaton@intel.com>
+ * Scott Rowe <scott.m.rowe@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+#include "mdfld_dsi_dbi_dpu.h"
+#include "mdfld_dsi_pkg_sender.h"
+
+#include "displays/tpo_cmd.h"
+
+static struct drm_display_mode *tpo_cmd_get_config_mode(struct drm_device *dev)
+{
+ struct drm_display_mode *mode;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
+ bool use_gct = false;
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ return NULL;
+
+ if (use_gct) {
+ dev_dbg(dev->dev, "gct find MIPI panel.\n");
+
+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+ mode->hsync_start = mode->hdisplay + \
+ ((ti->hsync_offset_hi << 8) | \
+ ti->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start + \
+ ((ti->hsync_pulse_width_hi << 8) | \
+ ti->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+ ti->hblank_lo);
+ mode->vsync_start = \
+ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
+ ti->vsync_offset_lo);
+ mode->vsync_end = \
+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
+ ti->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay + \
+ ((ti->vblank_hi << 8) | ti->vblank_lo);
+ mode->clock = ti->pixel_clock * 10;
+
+ dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+ dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+ dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+ dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+ dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+ dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+ dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+ dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+ dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+ } else {
+ mode->hdisplay = 864;
+ mode->vdisplay = 480;
+ mode->hsync_start = 872;
+ mode->hsync_end = 876;
+ mode->htotal = 884;
+ mode->vsync_start = 482;
+ mode->vsync_end = 494;
+ mode->vtotal = 486;
+ mode->clock = 25777;
+ }
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ return mode;
+}
+
+static bool mdfld_dsi_dbi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_display_mode *fixed_mode = tpo_cmd_get_config_mode(dev);
+
+ if (fixed_mode) {
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+ adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ kfree(fixed_mode);
+ }
+ return true;
+}
+
+static void mdfld_dsi_dbi_set_power(struct drm_encoder *encoder, bool on)
+{
+ int ret = 0;
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dbi_output *dbi_output =
+ MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct mdfld_dsi_pkg_sender *sender =
+ mdfld_dsi_encoder_get_pkg_sender(dsi_encoder);
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 reg_offset = 0;
+ int pipe = (dbi_output->channel_num == 0) ? 0 : 2;
+ u32 data = 0;
+
+ dev_dbg(dev->dev, "pipe %d : %s, panel on: %s\n",
+ pipe, on ? "On" : "Off",
+ dbi_output->dbi_panel_on ? "True" : "False");
+
+ if (pipe == 2) {
+ if (on)
+ dev_priv->dual_mipi = true;
+ else
+ dev_priv->dual_mipi = false;
+ reg_offset = MIPIC_REG_OFFSET;
+ } else {
+ if (!on)
+ dev_priv->dual_mipi = false;
+ }
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "hw begin failed\n");
+ return;
+ }
+
+ if (on) {
+ if (dbi_output->dbi_panel_on)
+ goto out_err;
+
+ ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_ON);
+ if (ret) {
+ dev_err(dev->dev, "power on error\n");
+ goto out_err;
+ }
+
+ dbi_output->dbi_panel_on = true;
+
+ if (pipe == 2)
+ dev_priv->dbi_panel_on2 = true;
+ else
+ dev_priv->dbi_panel_on = true;
+ mdfld_enable_te(dev, pipe);
+ } else {
+ if (!dbi_output->dbi_panel_on && !dbi_output->first_boot)
+ goto out_err;
+
+ dbi_output->dbi_panel_on = false;
+ dbi_output->first_boot = false;
+
+ if (pipe == 2)
+ dev_priv->dbi_panel_on2 = false;
+ else
+ dev_priv->dbi_panel_on = false;
+
+ mdfld_disable_te(dev, pipe);
+
+ ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_OFF);
+ if (ret) {
+ dev_err(dev->dev, "power on error\n");
+ goto out_err;
+ }
+ }
+
+ /*
+ * FIXME: this is a WA for TPO panel crash on DPMS on & off around
+ * 83 times. the root cause of this issue is that Booster in
+ * drvIC crashed. Add this WA so that we can resume the driver IC
+ * once we found that booster has a fault
+ */
+ mdfld_dsi_get_power_mode(dsi_config,
+ &data,
+ MDFLD_DSI_HS_TRANSMISSION);
+
+ if (on && data && !(data & (1 << 7))) {
+ /* Soft reset */
+ mdfld_dsi_send_dcs(sender,
+ DCS_SOFT_RESET,
+ NULL,
+ 0,
+ CMD_DATA_SRC_PIPE,
+ MDFLD_DSI_SEND_PACKAGE);
+
+ /* Init drvIC */
+ if (dbi_output->p_funcs->drv_ic_init)
+ dbi_output->p_funcs->drv_ic_init(dsi_config,
+ pipe);
+ }
+
+out_err:
+ gma_power_end(dev);
+ if (ret)
+ dev_err(dev->dev, "failed\n");
+}
+
+
+static void mdfld_dsi_dbi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int ret = 0;
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dbi_output *dsi_output =
+ MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+ struct mdfld_dsi_config *dsi_config =
+ mdfld_dsi_encoder_get_config(dsi_encoder);
+ struct mdfld_dsi_connector *dsi_connector = dsi_config->connector;
+ int pipe = dsi_connector->pipe;
+ u8 param = 0;
+
+ /* Regs */
+ u32 mipi_reg = MIPI;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 reg_offset = 0;
+
+ /* Values */
+ u32 dspcntr_val = dev_priv->dspcntr;
+ u32 pipeconf_val = dev_priv->pipeconf;
+ u32 h_active_area = mode->hdisplay;
+ u32 v_active_area = mode->vdisplay;
+ u32 mipi_val;
+
+ mipi_val = (PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX |
+ TE_TRIGGER_GPIO_PIN);
+
+ dev_dbg(dev->dev, "mipi_val =0x%x\n", mipi_val);
+
+ dev_dbg(dev->dev, "type %s\n", (pipe == 2) ? "MIPI2" : "MIPI");
+ dev_dbg(dev->dev, "h %d v %d\n", mode->hdisplay, mode->vdisplay);
+
+ if (pipe == 2) {
+ mipi_reg = MIPI_C;
+ dspcntr_reg = DSPCCNTR;
+ pipeconf_reg = PIPECCONF;
+
+ reg_offset = MIPIC_REG_OFFSET;
+
+ dspcntr_val = dev_priv->dspcntr2;
+ pipeconf_val = dev_priv->pipeconf2;
+ } else {
+ mipi_val |= 0x2; /*two lanes for port A and C respectively*/
+ }
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "hw begin failed\n");
+ return;
+ }
+
+ REG_WRITE(dspcntr_reg, dspcntr_val);
+ REG_READ(dspcntr_reg);
+
+ /* 20ms delay before sending exit_sleep_mode */
+ msleep(20);
+
+ /* Send exit_sleep_mode DCS */
+ ret = mdfld_dsi_dbi_send_dcs(dsi_output, DCS_EXIT_SLEEP_MODE,
+ NULL, 0, CMD_DATA_SRC_SYSTEM_MEM);
+ if (ret) {
+ dev_err(dev->dev, "sent exit_sleep_mode faild\n");
+ goto out_err;
+ }
+
+ /* Send set_tear_on DCS */
+ ret = mdfld_dsi_dbi_send_dcs(dsi_output, DCS_SET_TEAR_ON,
+ &param, 1, CMD_DATA_SRC_SYSTEM_MEM);
+ if (ret) {
+ dev_err(dev->dev, "%s - sent set_tear_on faild\n", __func__);
+ goto out_err;
+ }
+
+ /* Do some init stuff */
+ REG_WRITE(pipeconf_reg, pipeconf_val | PIPEACONF_DSR);
+ REG_READ(pipeconf_reg);
+
+ /* TODO: this looks ugly, try to move it to CRTC mode setting*/
+ if (pipe == 2)
+ dev_priv->pipeconf2 |= PIPEACONF_DSR;
+ else
+ dev_priv->pipeconf |= PIPEACONF_DSR;
+
+ dev_dbg(dev->dev, "pipeconf %x\n", REG_READ(pipeconf_reg));
+
+ ret = mdfld_dsi_dbi_update_area(dsi_output, 0, 0,
+ h_active_area - 1, v_active_area - 1);
+ if (ret) {
+ dev_err(dev->dev, "update area failed\n");
+ goto out_err;
+ }
+
+out_err:
+ gma_power_end(dev);
+
+ if (ret)
+ dev_err(dev->dev, "mode set failed\n");
+}
+
+static void mdfld_dsi_dbi_prepare(struct drm_encoder *encoder)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dbi_output *dbi_output
+ = MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+
+ dbi_output->mode_flags |= MODE_SETTING_IN_ENCODER;
+ dbi_output->mode_flags &= ~MODE_SETTING_ENCODER_DONE;
+
+ mdfld_dsi_dbi_set_power(encoder, false);
+}
+
+static void mdfld_dsi_dbi_commit(struct drm_encoder *encoder)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dbi_output *dbi_output =
+ MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+ struct drm_device *dev = dbi_output->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_drm_dpu_rect rect;
+
+ mdfld_dsi_dbi_set_power(encoder, true);
+ dbi_output->mode_flags &= ~MODE_SETTING_IN_ENCODER;
+
+ rect.x = rect.y = 0;
+ rect.width = 864;
+ rect.height = 480;
+
+ if (dbi_output->channel_num == 1) {
+ dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_2;
+ /*if dpu enabled report a fullscreen damage*/
+ mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, &rect);
+ } else {
+ dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_0;
+ mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, &rect);
+ }
+ dbi_output->mode_flags |= MODE_SETTING_ENCODER_DONE;
+}
+
+static void mdfld_dsi_dbi_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+ struct mdfld_dsi_dbi_output *dbi_output
+ = MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+ struct drm_device *dev = dbi_output->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ static bool bdispoff;
+
+ dev_dbg(dev->dev, "%s\n", (mode == DRM_MODE_DPMS_ON ? "on" : "off"));
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ /*
+ * FIXME: in case I am wrong!
+ * we don't need to exit dsr here to wake up plane/pipe/pll
+ * if everything goes right, hw_begin will resume them all
+ * during set_power.
+ */
+ if (bdispoff /* FIXME && gbgfxsuspended */) {
+ mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_2D_3D);
+ bdispoff = false;
+ dev_priv->dispstatus = true;
+ }
+
+ mdfld_dsi_dbi_set_power(encoder, true);
+ /* FIXME if (gbgfxsuspended)
+ gbgfxsuspended = false; */
+ } else {
+ /*
+ * I am not sure whether this is the perfect place to
+ * turn rpm on since we still have a lot of CRTC turnning
+ * on work to do.
+ */
+ bdispoff = true;
+ dev_priv->dispstatus = false;
+ mdfld_dsi_dbi_set_power(encoder, false);
+ }
+}
+
+
+/*
+ * Update the DBI MIPI Panel Frame Buffer.
+ */
+static void mdfld_dsi_dbi_update_fb(struct mdfld_dsi_dbi_output *dbi_output,
+ int pipe)
+{
+ struct mdfld_dsi_pkg_sender *sender =
+ mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+ struct drm_device *dev = dbi_output->dev;
+ struct drm_crtc *crtc = dbi_output->base.base.crtc;
+ struct psb_intel_crtc *psb_crtc = (crtc) ?
+ to_psb_intel_crtc(crtc) : NULL;
+ u32 dpll_reg = MRST_DPLL_A;
+ u32 dspcntr_reg = DSPACNTR;
+ u32 pipeconf_reg = PIPEACONF;
+ u32 dsplinoff_reg = DSPALINOFF;
+ u32 dspsurf_reg = DSPASURF;
+ u32 reg_offset = 0;
+
+ /* If mode setting on-going, back off */
+ if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+ (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING) ||
+ !(dbi_output->mode_flags & MODE_SETTING_ENCODER_DONE))
+ return;
+
+ if (pipe == 2) {
+ dspcntr_reg = DSPCCNTR;
+ pipeconf_reg = PIPECCONF;
+ dsplinoff_reg = DSPCLINOFF;
+ dspsurf_reg = DSPCSURF;
+ reg_offset = MIPIC_REG_OFFSET;
+ }
+
+ if (!gma_power_begin(dev, true)) {
+ dev_err(dev->dev, "hw begin failed\n");
+ return;
+ }
+
+ /* Check DBI FIFO status */
+ if (!(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
+ !(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
+ !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE))
+ goto update_fb_out0;
+
+ /* Refresh plane changes */
+ REG_WRITE(dsplinoff_reg, REG_READ(dsplinoff_reg));
+ REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
+ REG_READ(dspsurf_reg);
+
+ mdfld_dsi_send_dcs(sender,
+ DCS_WRITE_MEM_START,
+ NULL,
+ 0,
+ CMD_DATA_SRC_PIPE,
+ MDFLD_DSI_SEND_PACKAGE);
+
+ dbi_output->dsr_fb_update_done = true;
+update_fb_out0:
+ gma_power_end(dev);
+}
+
+static int tpo_cmd_get_panel_info(struct drm_device *dev,
+ int pipe,
+ struct panel_info *pi)
+{
+ if (!dev || !pi)
+ return -EINVAL;
+
+ pi->width_mm = TPO_PANEL_WIDTH;
+ pi->height_mm = TPO_PANEL_HEIGHT;
+
+ return 0;
+}
+
+
+/* TPO DBI encoder helper funcs */
+static const struct drm_encoder_helper_funcs mdfld_dsi_dbi_helper_funcs = {
+ .dpms = mdfld_dsi_dbi_dpms,
+ .mode_fixup = mdfld_dsi_dbi_mode_fixup,
+ .prepare = mdfld_dsi_dbi_prepare,
+ .mode_set = mdfld_dsi_dbi_mode_set,
+ .commit = mdfld_dsi_dbi_commit,
+};
+
+/* TPO DBI encoder funcs */
+static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+void tpo_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+ p_funcs->encoder_funcs = &mdfld_dsi_dbi_encoder_funcs;
+ p_funcs->encoder_helper_funcs = &mdfld_dsi_dbi_helper_funcs;
+ p_funcs->get_config_mode = &tpo_cmd_get_config_mode;
+ p_funcs->update_fb = mdfld_dsi_dbi_update_fb;
+ p_funcs->get_panel_info = tpo_cmd_get_panel_info;
+ p_funcs->reset = mdfld_dsi_panel_reset;
+ p_funcs->drv_ic_init = mdfld_dsi_brightness_init;
+}
diff --git a/drivers/staging/gma500/mdfld_tpo_vid.c b/drivers/staging/gma500/mdfld_tpo_vid.c
new file mode 100644
index 00000000000..95490175176
--- /dev/null
+++ b/drivers/staging/gma500/mdfld_tpo_vid.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * jim liu <jim.liu@intel.com>
+ * Jackie Li<yaodong.li@intel.com>
+ */
+
+#include "mdfld_dsi_dbi.h"
+#include "mdfld_dsi_dpi.h"
+#include "mdfld_dsi_output.h"
+#include "mdfld_output.h"
+
+#include "mdfld_dsi_pkg_sender.h"
+
+#include "displays/tpo_vid.h"
+
+static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
+{
+ struct drm_display_mode *mode;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
+ bool use_gct = false;
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode) {
+ dev_err(dev->dev, "out of memory\n");
+ return NULL;
+ }
+
+ if (use_gct) {
+ mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+ mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+ mode->hsync_start = mode->hdisplay + \
+ ((ti->hsync_offset_hi << 8) | \
+ ti->hsync_offset_lo);
+ mode->hsync_end = mode->hsync_start + \
+ ((ti->hsync_pulse_width_hi << 8) | \
+ ti->hsync_pulse_width_lo);
+ mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+ ti->hblank_lo);
+ mode->vsync_start = \
+ mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
+ ti->vsync_offset_lo);
+ mode->vsync_end = \
+ mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
+ ti->vsync_pulse_width_lo);
+ mode->vtotal = mode->vdisplay + \
+ ((ti->vblank_hi << 8) | ti->vblank_lo);
+ mode->clock = ti->pixel_clock * 10;
+
+ dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+ dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+ dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+ dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+ dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+ dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+ dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+ dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+ dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+ } else {
+ mode->hdisplay = 864;
+ mode->vdisplay = 480;
+ mode->hsync_start = 873;
+ mode->hsync_end = 876;
+ mode->htotal = 887;
+ mode->vsync_start = 487;
+ mode->vsync_end = 490;
+ mode->vtotal = 499;
+ mode->clock = 33264;
+ }
+
+ drm_mode_set_name(mode);
+ drm_mode_set_crtcinfo(mode, 0);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ return mode;
+}
+
+static int tpo_vid_get_panel_info(struct drm_device *dev,
+ int pipe,
+ struct panel_info *pi)
+{
+ if (!dev || !pi)
+ return -EINVAL;
+
+ pi->width_mm = TPO_PANEL_WIDTH;
+ pi->height_mm = TPO_PANEL_HEIGHT;
+
+ return 0;
+}
+
+/*TPO DPI encoder helper funcs*/
+static const struct drm_encoder_helper_funcs
+ mdfld_tpo_dpi_encoder_helper_funcs = {
+ .dpms = mdfld_dsi_dpi_dpms,
+ .mode_fixup = mdfld_dsi_dpi_mode_fixup,
+ .prepare = mdfld_dsi_dpi_prepare,
+ .mode_set = mdfld_dsi_dpi_mode_set,
+ .commit = mdfld_dsi_dpi_commit,
+};
+
+/*TPO DPI encoder funcs*/
+static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+void tpo_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+{
+ if (!dev || !p_funcs) {
+ dev_err(dev->dev, "tpo_vid_init: Invalid parameters\n");
+ return;
+ }
+
+ p_funcs->encoder_funcs = &mdfld_tpo_dpi_encoder_funcs;
+ p_funcs->encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs;
+ p_funcs->get_config_mode = &tpo_vid_get_config_mode;
+ p_funcs->update_fb = NULL;
+ p_funcs->get_panel_info = tpo_vid_get_panel_info;
+}
diff --git a/drivers/staging/gma500/medfield.h b/drivers/staging/gma500/medfield.h
new file mode 100644
index 00000000000..09e9687431f
--- /dev/null
+++ b/drivers/staging/gma500/medfield.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/* Medfield DSI controller registers */
+
+#define MIPIA_DEVICE_READY_REG 0xb000
+#define MIPIA_INTR_STAT_REG 0xb004
+#define MIPIA_INTR_EN_REG 0xb008
+#define MIPIA_DSI_FUNC_PRG_REG 0xb00c
+#define MIPIA_HS_TX_TIMEOUT_REG 0xb010
+#define MIPIA_LP_RX_TIMEOUT_REG 0xb014
+#define MIPIA_TURN_AROUND_TIMEOUT_REG 0xb018
+#define MIPIA_DEVICE_RESET_TIMER_REG 0xb01c
+#define MIPIA_DPI_RESOLUTION_REG 0xb020
+#define MIPIA_DBI_FIFO_THROTTLE_REG 0xb024
+#define MIPIA_HSYNC_COUNT_REG 0xb028
+#define MIPIA_HBP_COUNT_REG 0xb02c
+#define MIPIA_HFP_COUNT_REG 0xb030
+#define MIPIA_HACTIVE_COUNT_REG 0xb034
+#define MIPIA_VSYNC_COUNT_REG 0xb038
+#define MIPIA_VBP_COUNT_REG 0xb03c
+#define MIPIA_VFP_COUNT_REG 0xb040
+#define MIPIA_HIGH_LOW_SWITCH_COUNT_REG 0xb044
+#define MIPIA_DPI_CONTROL_REG 0xb048
+#define MIPIA_DPI_DATA_REG 0xb04c
+#define MIPIA_INIT_COUNT_REG 0xb050
+#define MIPIA_MAX_RETURN_PACK_SIZE_REG 0xb054
+#define MIPIA_VIDEO_MODE_FORMAT_REG 0xb058
+#define MIPIA_EOT_DISABLE_REG 0xb05c
+#define MIPIA_LP_BYTECLK_REG 0xb060
+#define MIPIA_LP_GEN_DATA_REG 0xb064
+#define MIPIA_HS_GEN_DATA_REG 0xb068
+#define MIPIA_LP_GEN_CTRL_REG 0xb06c
+#define MIPIA_HS_GEN_CTRL_REG 0xb070
+#define MIPIA_GEN_FIFO_STAT_REG 0xb074
+#define MIPIA_HS_LS_DBI_ENABLE_REG 0xb078
+#define MIPIA_DPHY_PARAM_REG 0xb080
+#define MIPIA_DBI_BW_CTRL_REG 0xb084
+#define MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG 0xb088
+
+#define DSI_DEVICE_READY (0x1)
+#define DSI_POWER_STATE_ULPS_ENTER (0x2 << 1)
+#define DSI_POWER_STATE_ULPS_EXIT (0x1 << 1)
+#define DSI_POWER_STATE_ULPS_OFFSET (0x1)
+
+
+#define DSI_ONE_DATA_LANE (0x1)
+#define DSI_TWO_DATA_LANE (0x2)
+#define DSI_THREE_DATA_LANE (0X3)
+#define DSI_FOUR_DATA_LANE (0x4)
+#define DSI_DPI_VIRT_CHANNEL_OFFSET (0x3)
+#define DSI_DBI_VIRT_CHANNEL_OFFSET (0x5)
+#define DSI_DPI_COLOR_FORMAT_RGB565 (0x01 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB666 (0x02 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK (0x03 << 7)
+#define DSI_DPI_COLOR_FORMAT_RGB888 (0x04 << 7)
+#define DSI_DBI_COLOR_FORMAT_OPTION2 (0x05 << 13)
+
+#define DSI_INTR_STATE_RXSOTERROR 1
+
+#define DSI_INTR_STATE_SPL_PKG_SENT (1 << 30)
+#define DSI_INTR_STATE_TE (1 << 31)
+
+#define DSI_HS_TX_TIMEOUT_MASK (0xffffff)
+
+#define DSI_LP_RX_TIMEOUT_MASK (0xffffff)
+
+#define DSI_TURN_AROUND_TIMEOUT_MASK (0x3f)
+
+#define DSI_RESET_TIMER_MASK (0xffff)
+
+#define DSI_DBI_FIFO_WM_HALF (0x0)
+#define DSI_DBI_FIFO_WM_QUARTER (0x1)
+#define DSI_DBI_FIFO_WM_LOW (0x2)
+
+#define DSI_DPI_TIMING_MASK (0xffff)
+
+#define DSI_INIT_TIMER_MASK (0xffff)
+
+#define DSI_DBI_RETURN_PACK_SIZE_MASK (0x3ff)
+
+#define DSI_LP_BYTECLK_MASK (0x0ffff)
+
+#define DSI_HS_CTRL_GEN_SHORT_W0 (0x03)
+#define DSI_HS_CTRL_GEN_SHORT_W1 (0x13)
+#define DSI_HS_CTRL_GEN_SHORT_W2 (0x23)
+#define DSI_HS_CTRL_GEN_R0 (0x04)
+#define DSI_HS_CTRL_GEN_R1 (0x14)
+#define DSI_HS_CTRL_GEN_R2 (0x24)
+#define DSI_HS_CTRL_GEN_LONG_W (0x29)
+#define DSI_HS_CTRL_MCS_SHORT_W0 (0x05)
+#define DSI_HS_CTRL_MCS_SHORT_W1 (0x15)
+#define DSI_HS_CTRL_MCS_R0 (0x06)
+#define DSI_HS_CTRL_MCS_LONG_W (0x39)
+#define DSI_HS_CTRL_VC_OFFSET (0x06)
+#define DSI_HS_CTRL_WC_OFFSET (0x08)
+
+#define DSI_FIFO_GEN_HS_DATA_FULL (1 << 0)
+#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY (1 << 1)
+#define DSI_FIFO_GEN_HS_DATA_EMPTY (1 << 2)
+#define DSI_FIFO_GEN_LP_DATA_FULL (1 << 8)
+#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY (1 << 9)
+#define DSI_FIFO_GEN_LP_DATA_EMPTY (1 << 10)
+#define DSI_FIFO_GEN_HS_CTRL_FULL (1 << 16)
+#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY (1 << 17)
+#define DSI_FIFO_GEN_HS_CTRL_EMPTY (1 << 18)
+#define DSI_FIFO_GEN_LP_CTRL_FULL (1 << 24)
+#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY (1 << 25)
+#define DSI_FIFO_GEN_LP_CTRL_EMPTY (1 << 26)
+#define DSI_FIFO_DBI_EMPTY (1 << 27)
+#define DSI_FIFO_DPI_EMPTY (1 << 28)
+
+#define DSI_DBI_HS_LP_SWITCH_MASK (0x1)
+
+#define DSI_HS_LP_SWITCH_COUNTER_OFFSET (0x0)
+#define DSI_LP_HS_SWITCH_COUNTER_OFFSET (0x16)
+
+#define DSI_DPI_CTRL_HS_SHUTDOWN (0x00000001)
+#define DSI_DPI_CTRL_HS_TURN_ON (0x00000002)
+
+/* Medfield DSI adapter registers */
+#define MIPIA_CONTROL_REG 0xb104
+#define MIPIA_DATA_ADD_REG 0xb108
+#define MIPIA_DATA_LEN_REG 0xb10c
+#define MIPIA_CMD_ADD_REG 0xb110
+#define MIPIA_CMD_LEN_REG 0xb114
+
+/*dsi power modes*/
+#define DSI_POWER_MODE_DISPLAY_ON (1 << 2)
+#define DSI_POWER_MODE_NORMAL_ON (1 << 3)
+#define DSI_POWER_MODE_SLEEP_OUT (1 << 4)
+#define DSI_POWER_MODE_PARTIAL_ON (1 << 5)
+#define DSI_POWER_MODE_IDLE_ON (1 << 6)
+
+enum {
+ MDFLD_DSI_ENCODER_DBI = 0,
+ MDFLD_DSI_ENCODER_DPI,
+};
+
+enum {
+ MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
+ MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
+ MDFLD_DSI_VIDEO_BURST_MODE = 3,
+};
+
+#define DSI_DPI_COMPLETE_LAST_LINE (1 << 2)
+#define DSI_DPI_DISABLE_BTA (1 << 3)
+/* Panel types */
+enum {
+ TPO_CMD,
+ TPO_VID,
+ TMD_CMD,
+ TMD_VID,
+ PYR_CMD,
+ PYR_VID,
+ TPO,
+ TMD,
+ PYR,
+ HDMI,
+ GCT_DETECT
+};
+
+/* Junk that belongs elsewhere */
+#define TPO_PANEL_WIDTH 84
+#define TPO_PANEL_HEIGHT 46
+#define TMD_PANEL_WIDTH 39
+#define TMD_PANEL_HEIGHT 71
+#define PYR_PANEL_WIDTH 53
+#define PYR_PANEL_HEIGHT 95
+
+/* Panel interface */
+struct panel_info {
+ u32 width_mm;
+ u32 height_mm;
+};
+
+struct mdfld_dsi_dbi_output;
+
+struct mdfld_dsi_connector_state {
+ u32 mipi_ctrl_reg;
+};
+
+struct mdfld_dsi_encoder_state {
+
+};
+
+struct mdfld_dsi_connector {
+ /*
+ * This is ugly, but I have to use connector in it! :-(
+ * FIXME: use drm_connector instead.
+ */
+ struct psb_intel_output base;
+
+ int pipe;
+ void *private;
+ void *pkg_sender;
+
+ /* Connection status */
+ enum drm_connector_status status;
+};
+
+struct mdfld_dsi_encoder {
+ struct drm_encoder base;
+ void *private;
+};
+
+/*
+ * DSI config, consists of one DSI connector, two DSI encoders.
+ * DRM will pick up on DSI encoder basing on differents configs.
+ */
+struct mdfld_dsi_config {
+ struct drm_device *dev;
+ struct drm_display_mode *fixed_mode;
+ struct drm_display_mode *mode;
+
+ struct mdfld_dsi_connector *connector;
+ struct mdfld_dsi_encoder *encoders[DRM_CONNECTOR_MAX_ENCODER];
+ struct mdfld_dsi_encoder *encoder;
+
+ int changed;
+
+ int bpp;
+ int type;
+ int lane_count;
+ /*Virtual channel number for this encoder*/
+ int channel_num;
+ /*video mode configure*/
+ int video_mode;
+
+ int dvr_ic_inited;
+};
+
+#define MDFLD_DSI_CONNECTOR(psb_output) \
+ (container_of(psb_output, struct mdfld_dsi_connector, base))
+
+#define MDFLD_DSI_ENCODER(encoder) \
+ (container_of(encoder, struct mdfld_dsi_encoder, base))
+
+struct panel_funcs {
+ const struct drm_encoder_funcs *encoder_funcs;
+ const struct drm_encoder_helper_funcs *encoder_helper_funcs;
+ struct drm_display_mode *(*get_config_mode) (struct drm_device *);
+ void (*update_fb) (struct mdfld_dsi_dbi_output *, int);
+ int (*get_panel_info) (struct drm_device *, int, struct panel_info *);
+ int (*reset)(int pipe);
+ void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
+};
+
diff --git a/drivers/staging/gma500/mid_bios.c b/drivers/staging/gma500/mid_bios.c
new file mode 100644
index 00000000000..8cfe301f8fb
--- /dev/null
+++ b/drivers/staging/gma500/mid_bios.c
@@ -0,0 +1,269 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+/* TODO
+ * - Split functions by vbt type
+ * - Make them all take drm_device
+ * - Check ioremap failures
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_drm.h"
+#include "psb_drv.h"
+#include "mid_bios.h"
+#include "mdfld_output.h"
+
+static int panel_id = GCT_DETECT;
+module_param_named(panel_id, panel_id, int, 0600);
+MODULE_PARM_DESC(panel_id, "Panel Identifier");
+
+
+static void mid_get_fuse_settings(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ uint32_t fuse_value = 0;
+ uint32_t fuse_value_tmp = 0;
+
+#define FB_REG06 0xD0810600
+#define FB_MIPI_DISABLE (1 << 11)
+#define FB_REG09 0xD0810900
+#define FB_REG09 0xD0810900
+#define FB_SKU_MASK 0x7000
+#define FB_SKU_SHIFT 12
+#define FB_SKU_100 0
+#define FB_SKU_100L 1
+#define FB_SKU_83 2
+ pci_write_config_dword(pci_root, 0xD0, FB_REG06);
+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+ /* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
+ if (IS_MRST(dev))
+ dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
+
+ DRM_INFO("internal display is %s\n",
+ dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
+
+ /* Prevent runtime suspend at start*/
+ if (dev_priv->iLVDS_enable) {
+ dev_priv->is_lvds_on = true;
+ dev_priv->is_mipi_on = false;
+ } else {
+ dev_priv->is_mipi_on = true;
+ dev_priv->is_lvds_on = false;
+ }
+
+ dev_priv->video_device_fuse = fuse_value;
+
+ pci_write_config_dword(pci_root, 0xD0, FB_REG09);
+ pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+
+ dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
+ fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
+
+ dev_priv->fuse_reg_value = fuse_value;
+
+ switch (fuse_value_tmp) {
+ case FB_SKU_100:
+ dev_priv->core_freq = 200;
+ break;
+ case FB_SKU_100L:
+ dev_priv->core_freq = 100;
+ break;
+ case FB_SKU_83:
+ dev_priv->core_freq = 166;
+ break;
+ default:
+ dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
+ fuse_value_tmp);
+ dev_priv->core_freq = 0;
+ }
+ dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
+ pci_dev_put(pci_root);
+}
+
+/*
+ * Get the revison ID, B0:D2:F0;0x08
+ */
+static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
+{
+ uint32_t platform_rev_id = 0;
+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+ pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
+ dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
+ pci_dev_put(pci_gfx_root);
+ dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
+ dev_priv->platform_rev_id);
+}
+
+static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct mrst_vbt *vbt = &dev_priv->vbt_data;
+ u32 addr;
+ u16 new_size;
+ u8 *vbt_virtual;
+ u8 bpi;
+ u8 number_desc = 0;
+ struct mrst_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+ struct gct_r10_timing_info ti;
+ void *pGCT;
+ struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+
+ /* Get the address of the platform config vbt, B0:D2:F0;0xFC */
+ pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
+ pci_dev_put(pci_gfx_root);
+
+ dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
+
+ /* check for platform config address == 0. */
+ /* this means fw doesn't support vbt */
+
+ if (addr == 0) {
+ vbt->size = 0;
+ return;
+ }
+
+ /* get the virtual address of the vbt */
+ vbt_virtual = ioremap(addr, sizeof(*vbt));
+
+ memcpy(vbt, vbt_virtual, sizeof(*vbt));
+ iounmap(vbt_virtual); /* Free virtual address space */
+
+ dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
+
+ switch (vbt->revision) {
+ case 0:
+ vbt->mrst_gct = ioremap(addr + sizeof(*vbt) - 4,
+ vbt->size - sizeof(*vbt) + 4);
+ pGCT = vbt->mrst_gct;
+ bpi = ((struct mrst_gct_v1 *)pGCT)->PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt =
+ ((struct mrst_gct_v1 *)pGCT)->PD.PanelType;
+ memcpy(&dev_priv->gct_data.DTD,
+ &((struct mrst_gct_v1 *)pGCT)->panel[bpi].DTD,
+ sizeof(struct mrst_timing_info));
+ dev_priv->gct_data.Panel_Port_Control =
+ ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ break;
+ case 1:
+ vbt->mrst_gct = ioremap(addr + sizeof(*vbt) - 4,
+ vbt->size - sizeof(*vbt) + 4);
+ pGCT = vbt->mrst_gct;
+ bpi = ((struct mrst_gct_v2 *)pGCT)->PD.BootPanelIndex;
+ dev_priv->gct_data.bpi = bpi;
+ dev_priv->gct_data.pt =
+ ((struct mrst_gct_v2 *)pGCT)->PD.PanelType;
+ memcpy(&dev_priv->gct_data.DTD,
+ &((struct mrst_gct_v2 *)pGCT)->panel[bpi].DTD,
+ sizeof(struct mrst_timing_info));
+ dev_priv->gct_data.Panel_Port_Control =
+ ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ break;
+ case 0x10:
+ /*header definition changed from rev 01 (v2) to rev 10h. */
+ /*so, some values have changed location*/
+ new_size = vbt->checksum; /*checksum contains lo size byte*/
+ /*LSB of mrst_gct contains hi size byte*/
+ new_size |= ((0xff & (unsigned int)vbt->mrst_gct)) << 8;
+
+ vbt->checksum = vbt->size; /*size contains the checksum*/
+ if (new_size > 0xff)
+ vbt->size = 0xff; /*restrict size to 255*/
+ else
+ vbt->size = new_size;
+
+ /* number of descriptors defined in the GCT */
+ number_desc = ((0xff00 & (unsigned int)vbt->mrst_gct)) >> 8;
+ bpi = ((0xff0000 & (unsigned int)vbt->mrst_gct)) >> 16;
+ vbt->mrst_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
+ GCT_R10_DISPLAY_DESC_SIZE * number_desc);
+ pGCT = vbt->mrst_gct;
+ pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
+ dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
+
+ /*copy the GCT display timings into a temp structure*/
+ memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
+
+ /*now copy the temp struct into the dev_priv->gct_data*/
+ dp_ti->pixel_clock = ti.pixel_clock;
+ dp_ti->hactive_hi = ti.hactive_hi;
+ dp_ti->hactive_lo = ti.hactive_lo;
+ dp_ti->hblank_hi = ti.hblank_hi;
+ dp_ti->hblank_lo = ti.hblank_lo;
+ dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
+ dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
+ dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
+ dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
+ dp_ti->vactive_hi = ti.vactive_hi;
+ dp_ti->vactive_lo = ti.vactive_lo;
+ dp_ti->vblank_hi = ti.vblank_hi;
+ dp_ti->vblank_lo = ti.vblank_lo;
+ dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
+ dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
+ dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
+ dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
+
+ /* Move the MIPI_Display_Descriptor data from GCT to dev priv */
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+ *((u8 *)pGCT + 0x0d);
+ dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
+ (*((u8 *)pGCT + 0x0e)) << 8;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown revision of GCT!\n");
+ vbt->size = 0;
+ }
+ if (IS_MFLD(dev_priv->dev)) {
+ if (panel_id == GCT_DETECT) {
+ if (dev_priv->gct_data.bpi == 2) {
+ dev_info(dev->dev, "[GFX] PYR Panel Detected\n");
+ dev_priv->panel_id = PYR_CMD;
+ panel_id = PYR_CMD;
+ } else if (dev_priv->gct_data.bpi == 0) {
+ dev_info(dev->dev, "[GFX] TMD Panel Detected.\n");
+ dev_priv->panel_id = TMD_VID;
+ panel_id = TMD_VID;
+ } else {
+ dev_info(dev->dev, "[GFX] Default Panel (TPO)\n");
+ dev_priv->panel_id = TPO_CMD;
+ panel_id = TPO_CMD;
+ }
+ } else {
+ dev_info(dev->dev, "[GFX] Panel Parameter Passed in through cmd line\n");
+ dev_priv->panel_id = panel_id;
+ }
+ }
+}
+
+int mid_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ mid_get_fuse_settings(dev);
+ mid_get_vbt_data(dev_priv);
+ mid_get_pci_revID(dev_priv);
+ return 0;
+}
diff --git a/drivers/staging/gma500/mid_bios.h b/drivers/staging/gma500/mid_bios.h
new file mode 100644
index 00000000000..00e7d564b7e
--- /dev/null
+++ b/drivers/staging/gma500/mid_bios.h
@@ -0,0 +1,21 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+extern int mid_chip_setup(struct drm_device *dev);
+
diff --git a/drivers/staging/gma500/psb_mmu.c b/drivers/staging/gma500/mmu.c
index c904d73b1de..c904d73b1de 100644
--- a/drivers/staging/gma500/psb_mmu.c
+++ b/drivers/staging/gma500/mmu.c
diff --git a/drivers/staging/gma500/mrst.h b/drivers/staging/gma500/mrst.h
index 5e4aaeb3711..b563dbc7310 100644
--- a/drivers/staging/gma500/mrst.h
+++ b/drivers/staging/gma500/mrst.h
@@ -25,7 +25,7 @@ struct mrst_vbt {
u8 size;
u8 checksum;
void *mrst_gct;
-} __attribute__ ((packed));
+} __packed;
struct mrst_timing_info {
u16 pixel_clock;
@@ -58,7 +58,7 @@ struct mrst_timing_info {
u8 stereo:1;
u8 unknown6:1;
u8 interlaced:1;
-} __attribute__((packed));
+} __packed;
struct gct_r10_timing_info {
u16 pixel_clock;
@@ -82,7 +82,7 @@ struct gct_r10_timing_info {
u16 vsync_pulse_width_hi:2;
u16 vsync_positive:1;
u16 rsvd_2:3;
-} __attribute__((packed));
+} __packed;
struct mrst_panel_descriptor_v1 {
u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
@@ -111,7 +111,7 @@ struct mrst_panel_descriptor_v1 {
/* Bit 6, Reserved, 2 bits, 00b */
/* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
/* Bit 14, Reserved, 2 bits, 00b */
-} __attribute__ ((packed));
+} __packed;
struct mrst_panel_descriptor_v2 {
u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
@@ -141,10 +141,10 @@ struct mrst_panel_descriptor_v2 {
/* Bit 6, Reserved, 2 bits, 00b */
/* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
/* Bit 14, Reserved, 2 bits, 00b */
-} __attribute__ ((packed));
+} __packed;
union mrst_panel_rx {
- struct{
+ struct {
u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
/* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
u16 MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
@@ -159,10 +159,10 @@ union mrst_panel_rx {
u16 Rsvd:5;/*5 bits,00000b */
} panelrx;
u16 panel_receiver;
-} __attribute__ ((packed));
+} __packed;
struct mrst_gct_v1 {
- union{ /*8 bits,Defined as follows: */
+ union { /*8 bits,Defined as follows: */
struct {
u8 PanelType:4; /*4 bits, Bit field for panels*/
/* 0 - 3: 0 = LVDS, 1 = MIPI*/
@@ -176,10 +176,10 @@ struct mrst_gct_v1 {
};
struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
union mrst_panel_rx panelrx[4]; /* panel receivers*/
-} __attribute__ ((packed));
+} __packed;
struct mrst_gct_v2 {
- union{ /*8 bits,Defined as follows: */
+ union { /*8 bits,Defined as follows: */
struct {
u8 PanelType:4; /*4 bits, Bit field for panels*/
/* 0 - 3: 0 = LVDS, 1 = MIPI*/
@@ -193,7 +193,7 @@ struct mrst_gct_v2 {
};
struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
union mrst_panel_rx panelrx[4]; /* panel receivers*/
-} __attribute__ ((packed));
+} __packed;
struct mrst_gct_data {
u8 bpi; /* boot panel index, number of panel used during boot */
@@ -205,13 +205,48 @@ struct mrst_gct_data {
u32 PP_Cycle_Delay;
u16 Panel_Backlight_Inverter_Descriptor;
u16 Panel_MIPI_Display_Descriptor;
-} __attribute__ ((packed));
-
-#define MODE_SETTING_IN_CRTC 0x1
-#define MODE_SETTING_IN_ENCODER 0x2
-#define MODE_SETTING_ON_GOING 0x3
-#define MODE_SETTING_IN_DSR 0x4
-#define MODE_SETTING_ENCODER_DONE 0x8
-#define GCT_R10_HEADER_SIZE 16
+} __packed;
+
+#define MODE_SETTING_IN_CRTC 0x1
+#define MODE_SETTING_IN_ENCODER 0x2
+#define MODE_SETTING_ON_GOING 0x3
+#define MODE_SETTING_IN_DSR 0x4
+#define MODE_SETTING_ENCODER_DONE 0x8
+
+#define GCT_R10_HEADER_SIZE 16
#define GCT_R10_DISPLAY_DESC_SIZE 28
+/*
+ * Moorestown HDMI interfaces
+ */
+
+struct mrst_hdmi_dev {
+ struct pci_dev *dev;
+ void __iomem *regs;
+ unsigned int mmio, mmio_len;
+ int dpms_mode;
+ struct hdmi_i2c_dev *i2c_dev;
+
+ /* register state */
+ u32 saveDPLL_CTRL;
+ u32 saveDPLL_DIV_CTRL;
+ u32 saveDPLL_ADJUST;
+ u32 saveDPLL_UPDATE;
+ u32 saveDPLL_CLK_ENABLE;
+ u32 savePCH_HTOTAL_B;
+ u32 savePCH_HBLANK_B;
+ u32 savePCH_HSYNC_B;
+ u32 savePCH_VTOTAL_B;
+ u32 savePCH_VBLANK_B;
+ u32 savePCH_VSYNC_B;
+ u32 savePCH_PIPEBCONF;
+ u32 savePCH_PIPEBSRC;
+};
+
+extern void mrst_hdmi_setup(struct drm_device *dev);
+extern void mrst_hdmi_teardown(struct drm_device *dev);
+extern int mrst_hdmi_i2c_init(struct pci_dev *dev);
+extern void mrst_hdmi_i2c_exit(struct pci_dev *dev);
+extern void mrst_hdmi_save(struct drm_device *dev);
+extern void mrst_hdmi_restore(struct drm_device *dev);
+extern void mrst_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/staging/gma500/mrst_crtc.c b/drivers/staging/gma500/mrst_crtc.c
index e4a0c033b5b..72464dd0f23 100644
--- a/drivers/staging/gma500/mrst_crtc.c
+++ b/drivers/staging/gma500/mrst_crtc.c
@@ -19,12 +19,12 @@
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
-#include "psb_fb.h"
+#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_display.h"
-#include "psb_powermgmt.h"
+#include "power.h"
struct psb_intel_range_t {
int min, max;
@@ -86,7 +86,7 @@ static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
{
const struct mrst_limit_t *limit = NULL;
struct drm_device *dev = crtc->dev;
- DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
|| psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
@@ -103,7 +103,7 @@ static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
}
} else {
limit = NULL;
- PSB_DEBUG_ENTRY("mrst_limit Wrong display type.\n");
+ dev_err(dev->dev, "mrst_limit Wrong display type.\n");
}
return limit;
@@ -117,7 +117,7 @@ static void mrst_clock(int refclk, struct mrst_clock_t *clock)
void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
{
- PSB_DEBUG_ENTRY("%s: dotclock = %d, m = %d, p1 = %d.\n",
+ pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n",
prefix, clock->dot, clock->m, clock->p1);
}
@@ -149,8 +149,7 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
}
}
}
- DRM_DEBUG("mrstFindBestPLL err = %d.\n", err);
-
+ dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
return err != target;
}
@@ -172,8 +171,6 @@ static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
u32 temp;
bool enabled;
- PSB_DEBUG_ENTRY("mode = %d, pipe = %d\n", mode, pipe);
-
if (!gma_power_begin(dev, true))
return;
@@ -296,7 +293,7 @@ static int mrst_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
@@ -320,8 +317,6 @@ static int mrst_crtc_mode_set(struct drm_crtc *crtc,
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_encoder *encoder;
- PSB_DEBUG_ENTRY("pipe = 0x%x\n", pipe);
-
if (!gma_power_begin(dev, true))
return 0;
@@ -446,10 +441,9 @@ static int mrst_crtc_mode_set(struct drm_crtc *crtc,
ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
- PSB_DEBUG_ENTRY(
- "mrstFindBestPLL fail in mrst_crtc_mode_set.\n");
+ dev_dbg(dev->dev, "mrstFindBestPLL fail in mrst_crtc_mode_set.\n");
} else {
- PSB_DEBUG_ENTRY("mrst_crtc_mode_set pixel clock = %d,"
+ dev_dbg(dev->dev, "mrst_crtc_mode_set pixel clock = %d,"
"m = %x, p1 = %x.\n", clock.dot, clock.m,
clock.p1);
}
@@ -540,11 +534,9 @@ int mrst_pipe_set_base(struct drm_crtc *crtc,
u32 dspcntr;
int ret = 0;
- PSB_DEBUG_ENTRY("\n");
-
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG("No FB bound\n");
+ dev_dbg(dev->dev, "No FB bound\n");
return 0;
}
@@ -574,13 +566,12 @@ int mrst_pipe_set_base(struct drm_crtc *crtc,
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
- DRM_ERROR("Unknown color depth\n");
+ dev_err(dev->dev, "Unknown color depth\n");
ret = -EINVAL;
goto pipe_set_base_exit;
}
REG_WRITE(dspcntr_reg, dspcntr);
- DRM_DEBUG("Writing base %08lX %08lX %d %d\n", start, offset, x, y);
if (0 /* FIXMEAC - check what PSB needs */) {
REG_WRITE(dspbase, offset);
REG_READ(dspbase);
diff --git a/drivers/staging/gma500/mrst_device.c b/drivers/staging/gma500/mrst_device.c
new file mode 100644
index 00000000000..6707fafbfa1
--- /dev/null
+++ b/drivers/staging/gma500/mrst_device.c
@@ -0,0 +1,634 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include <asm/mrst.h>
+#include <asm/intel_scu_ipc.h>
+#include "mid_bios.h"
+
+static int devtype;
+
+module_param_named(type, devtype, int, 0600);
+MODULE_PARM_DESC(type, "Moorestown/Oaktrail device type");
+
+#define DEVICE_MOORESTOWN 1
+#define DEVICE_OAKTRAIL 2
+#define DEVICE_MOORESTOWN_MM 3
+
+static int mrst_device_ident(struct drm_device *dev)
+{
+ /* User forced */
+ if (devtype)
+ return devtype;
+ if (dmi_match(DMI_PRODUCT_NAME, "OakTrail") ||
+ dmi_match(DMI_PRODUCT_NAME, "OakTrail platform"))
+ return DEVICE_OAKTRAIL;
+#if defined(CONFIG_X86_MRST)
+ if (dmi_match(DMI_PRODUCT_NAME, "MM") ||
+ dmi_match(DMI_PRODUCT_NAME, "MM 10"))
+ return DEVICE_MOORESTOWN_MM;
+ if (mrst_identify_cpu())
+ return DEVICE_MOORESTOWN;
+#endif
+ return DEVICE_OAKTRAIL;
+}
+
+
+/* IPC message and command defines used to enable/disable mipi panel voltages */
+#define IPC_MSG_PANEL_ON_OFF 0xE9
+#define IPC_CMD_PANEL_ON 1
+#define IPC_CMD_PANEL_OFF 0
+
+static int mrst_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->iLVDS_enable)
+ mrst_lvds_init(dev, &dev_priv->mode_dev);
+ else
+ dev_err(dev->dev, "DSI is not supported\n");
+ if (dev_priv->hdmi_priv)
+ mrst_hdmi_init(dev, &dev_priv->mode_dev);
+ return 0;
+}
+
+/*
+ * Provide the low level interfaces for the Moorestown backlight
+ */
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+#define BLC_ADJUSTMENT_MAX 100
+
+static struct backlight_device *mrst_backlight_device;
+static int mrst_brightness;
+
+static int mrst_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(mrst_backlight_device);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int level = bd->props.brightness;
+ u32 blc_pwm_ctl;
+ u32 max_pwm_blc;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ if (gma_power_begin(dev, 0)) {
+ /* Calculate and set the brightness value */
+ max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
+ blc_pwm_ctl = level * max_pwm_blc / 100;
+
+ /* Adjust the backlight level with the percent in
+ * dev_priv->blc_adj1;
+ */
+ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
+ blc_pwm_ctl = blc_pwm_ctl / 100;
+
+ /* Adjust the backlight level with the percent in
+ * dev_priv->blc_adj2;
+ */
+ blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
+ blc_pwm_ctl = blc_pwm_ctl / 100;
+
+ /* force PWM bit on */
+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+ REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
+ gma_power_end(dev);
+ }
+ mrst_brightness = level;
+ return 0;
+}
+
+static int mrst_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return mrst_brightness;
+}
+
+static int device_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long core_clock;
+ u16 bl_max_freq;
+ uint32_t value;
+ uint32_t blc_pwm_precision_factor;
+
+ dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+ dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
+ bl_max_freq = 256;
+ /* this needs to be set elsewhere */
+ blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
+
+ core_clock = dev_priv->core_freq;
+
+ value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+ value *= blc_pwm_precision_factor;
+ value /= bl_max_freq;
+ value /= blc_pwm_precision_factor;
+
+ if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
+ return -ERANGE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+ REG_WRITE(BLC_PWM_CTL, value | (value << 16));
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+static const struct backlight_ops mrst_ops = {
+ .get_brightness = mrst_get_brightness,
+ .update_status = mrst_set_brightness,
+};
+
+int mrst_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ mrst_backlight_device = backlight_device_register("mrst-bl",
+ NULL, (void *)dev, &mrst_ops, &props);
+
+ if (IS_ERR(mrst_backlight_device))
+ return PTR_ERR(mrst_backlight_device);
+
+ ret = device_backlight_init(dev);
+ if (ret < 0) {
+ backlight_device_unregister(mrst_backlight_device);
+ return ret;
+ }
+ mrst_backlight_device->props.brightness = 100;
+ mrst_backlight_device->props.max_brightness = 100;
+ backlight_update_status(mrst_backlight_device);
+ dev_priv->backlight_device = mrst_backlight_device;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Moorestown specific chip logic and low level methods
+ * for power management
+ */
+
+static void mrst_init_pm(struct drm_device *dev)
+{
+}
+
+/**
+ * mrst_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ */
+static int mrst_save_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+ u32 pp_stat;
+
+ /* Display arbitration control + watermarks */
+ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+ /* Pipe & plane A info */
+ dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
+ dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
+ dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
+ dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
+ dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
+ dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
+ dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
+ dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
+ dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
+ dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
+ dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+ dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
+ dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
+ dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
+ dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
+ dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
+ dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
+ dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+
+ /* Save cursor regs */
+ dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+ dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+ dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+
+ /* Save palette (gamma) */
+ for (i = 0; i < 256; i++)
+ dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+
+ if (dev_priv->hdmi_priv)
+ mrst_hdmi_save(dev);
+
+ /* Save performance state */
+ dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
+
+ /* LVDS state */
+ dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
+ dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+ dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
+ dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
+ dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
+ dev_priv->saveLVDS = PSB_RVDC32(LVDS);
+ dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+ dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
+ dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
+ dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
+
+ /* HW overlay */
+ dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+ dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+ dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+ dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+ dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+ dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+ dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+
+ /* DPST registers */
+ dev_priv->saveHISTOGRAM_INT_CONTROL_REG =
+ PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+ dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG =
+ PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+ dev_priv->savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
+
+ if (dev_priv->iLVDS_enable) {
+ /* Shut down the panel */
+ PSB_WVDC32(0, PP_CONTROL);
+
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x80000000);
+
+ /* Turn off the plane */
+ PSB_WVDC32(0x58000000, DSPACNTR);
+ /* Trigger the plane disable */
+ PSB_WVDC32(0, DSPASURF);
+
+ /* Wait ~4 ticks */
+ msleep(4);
+
+ /* Turn off pipe */
+ PSB_WVDC32(0x0, PIPEACONF);
+ /* Wait ~8 ticks */
+ msleep(8);
+
+ /* Turn off PLLs */
+ PSB_WVDC32(0, MRST_DPLL_A);
+ }
+ return 0;
+}
+
+/**
+ * mrst_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ */
+static int mrst_restore_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pp_stat;
+ int i;
+
+ /* Display arbitration + watermarks */
+ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+
+ /* Make sure VGA plane is off. it initializes to on after reset!*/
+ PSB_WVDC32(0x80000000, VGACNTRL);
+
+ /* set the plls */
+ PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
+ PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
+
+ /* Actually enable it */
+ PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
+ DRM_UDELAY(150);
+
+ /* Restore mode */
+ PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
+ PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
+ PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
+ PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
+ PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
+ PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
+ PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
+ PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
+
+ /* Restore performance mode*/
+ PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
+
+ /* Enable the pipe*/
+ if (dev_priv->iLVDS_enable)
+ PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
+
+ /* Set up the plane*/
+ PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
+ PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
+ PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
+
+ /* Enable the plane */
+ PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
+ PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
+
+ /* Enable Cursor A */
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
+ PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
+
+ /* Restore palette (gamma) */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i << 2));
+
+ if (dev_priv->hdmi_priv)
+ mrst_hdmi_restore(dev);
+
+ if (dev_priv->iLVDS_enable) {
+ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
+ PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
+ PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
+ PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+ PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
+ PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
+ PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
+ PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
+ PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
+ PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
+ }
+
+ /* Wait for cycle delay */
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x08000000);
+
+ /* Wait for panel power up */
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x10000000);
+
+ /* Restore HW overlay */
+ PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
+ PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
+
+ /* DPST registers */
+ PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG,
+ HISTOGRAM_INT_CONTROL);
+ PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG,
+ HISTOGRAM_LOGIC_CONTROL);
+ PSB_WVDC32(dev_priv->savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
+
+ return 0;
+}
+
+/**
+ * mrst_power_down - power down the display island
+ * @dev: our DRM device
+ *
+ * Power down the display interface of our device
+ */
+static int mrst_power_down(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_mask ;
+ u32 pwr_sts;
+
+ pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+ outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
+
+ while (true) {
+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+ if ((pwr_sts & pwr_mask) == pwr_mask)
+ break;
+ else
+ udelay(10);
+ }
+ return 0;
+}
+
+/*
+ * mrst_power_up
+ *
+ * Restore power to the specified island(s) (powergating)
+ */
+static int mrst_power_up(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+ u32 pwr_sts, pwr_cnt;
+
+ pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+ pwr_cnt &= ~pwr_mask;
+ outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
+
+ while (true) {
+ pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+ if ((pwr_sts & pwr_mask) == 0)
+ break;
+ else
+ udelay(10);
+ }
+ return 0;
+}
+
+#if defined(CONFIG_X86_MRST)
+static void mrst_lvds_cache_bl(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ intel_scu_ipc_ioread8(0x28, &(dev_priv->saveBKLTCNT));
+ intel_scu_ipc_ioread8(0x29, &(dev_priv->saveBKLTREQ));
+ intel_scu_ipc_ioread8(0x2A, &(dev_priv->saveBKLTBRTL));
+}
+
+static void mrst_mm_bl_power(struct drm_device *dev, bool on)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (on) {
+ intel_scu_ipc_iowrite8(0x2A, dev_priv->saveBKLTBRTL);
+ intel_scu_ipc_iowrite8(0x28, dev_priv->saveBKLTCNT);
+ intel_scu_ipc_iowrite8(0x29, dev_priv->saveBKLTREQ);
+ } else {
+ intel_scu_ipc_iowrite8(0x2A, 0);
+ intel_scu_ipc_iowrite8(0x28, 0);
+ intel_scu_ipc_iowrite8(0x29, 0);
+ }
+}
+
+static const struct psb_ops mrst_mm_chip_ops = {
+ .name = "Moorestown MM ",
+ .accel_2d = 1,
+ .pipes = 1,
+ .crtcs = 1,
+ .sgx_offset = MRST_SGX_OFFSET,
+
+ .crtc_helper = &mrst_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+
+ .output_init = mrst_output_init,
+
+ .lvds_bl_power = mrst_mm_bl_power,
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = mrst_backlight_init,
+#endif
+
+ .init_pm = mrst_init_pm,
+ .save_regs = mrst_save_display_registers,
+ .restore_regs = mrst_restore_display_registers,
+ .power_down = mrst_power_down,
+ .power_up = mrst_power_up,
+
+ .i2c_bus = 0,
+};
+
+#endif
+
+static void oaktrail_teardown(struct drm_device *dev)
+{
+ mrst_hdmi_teardown(dev);
+}
+
+static const struct psb_ops oaktrail_chip_ops = {
+ .name = "Oaktrail",
+ .accel_2d = 1,
+ .pipes = 2,
+ .crtcs = 2,
+ .sgx_offset = MRST_SGX_OFFSET,
+
+ .chip_setup = mid_chip_setup,
+ .chip_teardown = oaktrail_teardown,
+ .crtc_helper = &mrst_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+
+ .output_init = mrst_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = mrst_backlight_init,
+#endif
+
+ .init_pm = mrst_init_pm,
+ .save_regs = mrst_save_display_registers,
+ .restore_regs = mrst_restore_display_registers,
+ .power_down = mrst_power_down,
+ .power_up = mrst_power_up,
+
+ .i2c_bus = 1,
+};
+
+/**
+ * mrst_chip_setup - perform the initial chip init
+ * @dev: Our drm_device
+ *
+ * Figure out which incarnation we are and then scan the firmware for
+ * tables and information.
+ */
+static int mrst_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ switch (mrst_device_ident(dev)) {
+ case DEVICE_OAKTRAIL:
+ /* Dual CRTC, PC compatible, HDMI, I2C #2 */
+ dev_priv->ops = &oaktrail_chip_ops;
+ mrst_hdmi_setup(dev);
+ return mid_chip_setup(dev);
+#if defined(CONFIG_X86_MRST)
+ case DEVICE_MOORESTOWN_MM:
+ /* Single CRTC, No HDMI, I2C #0, BL control */
+ mrst_lvds_cache_bl(dev);
+ dev_priv->ops = &mrst_mm_chip_ops;
+ return mid_chip_setup(dev);
+ case DEVICE_MOORESTOWN:
+ /* Dual CRTC, No HDMI(?), I2C #1 */
+ return mid_chip_setup(dev);
+#endif
+ default:
+ dev_err(dev->dev, "unsupported device type.\n");
+ return -ENODEV;
+ }
+}
+
+const struct psb_ops mrst_chip_ops = {
+ .name = "Moorestown",
+ .accel_2d = 1,
+ .pipes = 2,
+ .crtcs = 2,
+ .sgx_offset = MRST_SGX_OFFSET,
+
+ .chip_setup = mrst_chip_setup,
+ .crtc_helper = &mrst_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+
+ .output_init = mrst_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = mrst_backlight_init,
+#endif
+
+ .init_pm = mrst_init_pm,
+ .save_regs = mrst_save_display_registers,
+ .restore_regs = mrst_restore_display_registers,
+ .power_down = mrst_power_down,
+ .power_up = mrst_power_up,
+
+ .i2c_bus = 2,
+};
+
diff --git a/drivers/staging/gma500/mrst_hdmi.c b/drivers/staging/gma500/mrst_hdmi.c
new file mode 100644
index 00000000000..e66607eb3d3
--- /dev/null
+++ b/drivers/staging/gma500/mrst_hdmi.c
@@ -0,0 +1,852 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Li Peng <peng.li@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+
+#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR 0x1000
+#define HCR_ENABLE_HDCP (1 << 5)
+#define HCR_ENABLE_AUDIO (1 << 2)
+#define HCR_ENABLE_PIXEL (1 << 1)
+#define HCR_ENABLE_TMDS (1 << 0)
+
+#define HDMI_HICR 0x1004
+#define HDMI_HSR 0x1008
+#define HDMI_HISR 0x100C
+#define HDMI_DETECT_HDP (1 << 0)
+
+#define HDMI_VIDEO_REG 0x3000
+#define HDMI_UNIT_EN (1 << 7)
+#define HDMI_MODE_OUTPUT (1 << 0)
+#define HDMI_HBLANK_A 0x3100
+
+#define HDMI_AUDIO_CTRL 0x4000
+#define HDMI_ENABLE_AUDIO (1 << 0)
+
+#define PCH_HTOTAL_B 0x3100
+#define PCH_HBLANK_B 0x3104
+#define PCH_HSYNC_B 0x3108
+#define PCH_VTOTAL_B 0x310C
+#define PCH_VBLANK_B 0x3110
+#define PCH_VSYNC_B 0x3114
+#define PCH_PIPEBSRC 0x311C
+
+#define PCH_PIPEB_DSL 0x3800
+#define PCH_PIPEB_SLC 0x3804
+#define PCH_PIPEBCONF 0x3808
+#define PCH_PIPEBSTAT 0x3824
+
+#define CDVO_DFT 0x5000
+#define CDVO_SLEWRATE 0x5004
+#define CDVO_STRENGTH 0x5008
+#define CDVO_RCOMP 0x500C
+
+#define DPLL_CTRL 0x6000
+#define DPLL_PDIV_SHIFT 16
+#define DPLL_PDIV_MASK (0xf << 16)
+#define DPLL_PWRDN (1 << 4)
+#define DPLL_RESET (1 << 3)
+#define DPLL_FASTEN (1 << 2)
+#define DPLL_ENSTAT (1 << 1)
+#define DPLL_DITHEN (1 << 0)
+
+#define DPLL_DIV_CTRL 0x6004
+#define DPLL_CLKF_MASK 0xffffffc0
+#define DPLL_CLKR_MASK (0x3f)
+
+#define DPLL_CLK_ENABLE 0x6008
+#define DPLL_EN_DISP (1 << 31)
+#define DPLL_SEL_HDMI (1 << 8)
+#define DPLL_EN_HDMI (1 << 1)
+#define DPLL_EN_VGA (1 << 0)
+
+#define DPLL_ADJUST 0x600C
+#define DPLL_STATUS 0x6010
+#define DPLL_UPDATE 0x6014
+#define DPLL_DFT 0x6020
+
+struct intel_range {
+ int min, max;
+};
+
+struct mrst_hdmi_limit {
+ struct intel_range vco, np, nr, nf;
+};
+
+struct mrst_hdmi_clock {
+ int np;
+ int nr;
+ int nf;
+ int dot;
+};
+
+#define VCO_MIN 320000
+#define VCO_MAX 1650000
+#define NP_MIN 1
+#define NP_MAX 15
+#define NR_MIN 1
+#define NR_MAX 64
+#define NF_MIN 2
+#define NF_MAX 4095
+
+static const struct mrst_hdmi_limit mrst_hdmi_limit = {
+ .vco = { .min = VCO_MIN, .max = VCO_MAX },
+ .np = { .min = NP_MIN, .max = NP_MAX },
+ .nr = { .min = NR_MIN, .max = NR_MAX },
+ .nf = { .min = NF_MIN, .max = NF_MAX },
+};
+
+static void wait_for_vblank(struct drm_device *dev)
+{
+ /* FIXME: Can we do this as a sleep ? */
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ mdelay(20);
+}
+
+static void scu_busy_loop(void *scu_base)
+{
+ u32 status = 0;
+ u32 loop_count = 0;
+
+ status = readl(scu_base + 0x04);
+ while (status & 1) {
+ udelay(1); /* scu processing time is in few u secods */
+ status = readl(scu_base + 0x04);
+ loop_count++;
+ /* break if scu doesn't reset busy bit after huge retry */
+ if (loop_count > 1000) {
+ DRM_DEBUG_KMS("SCU IPC timed out");
+ return;
+ }
+ }
+}
+
+static void mrst_hdmi_reset(struct drm_device *dev)
+{
+ void *base;
+ /* FIXME: at least make these defines */
+ unsigned int scu_ipc_mmio = 0xff11c000;
+ int scu_len = 1024;
+
+ base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+ if (base == NULL) {
+ DRM_ERROR("failed to map SCU mmio\n");
+ return;
+ }
+
+ /* scu ipc: assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffdf, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ /* scu ipc: de-assert hdmi controller reset */
+ writel(0xff11d118, base + 0x0c);
+ writel(0x7fffffff, base + 0x80);
+ writel(0x42005, base + 0x0);
+ scu_busy_loop(base);
+
+ iounmap(base);
+}
+
+static void mrst_hdmi_audio_enable(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+ HDMI_WRITE(HDMI_HCR, 0x67);
+ HDMI_READ(HDMI_HCR);
+
+ HDMI_WRITE(0x51a8, 0x10);
+ HDMI_READ(0x51a8);
+
+ HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
+ HDMI_READ(HDMI_AUDIO_CTRL);
+}
+
+static void mrst_hdmi_audio_disable(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+
+ HDMI_WRITE(0x51a8, 0x0);
+ HDMI_READ(0x51a8);
+
+ HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
+ HDMI_READ(HDMI_AUDIO_CTRL);
+
+ HDMI_WRITE(HDMI_HCR, 0x47);
+ HDMI_READ(HDMI_HCR);
+}
+
+void mrst_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ u32 temp;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_OFF:
+ /* Disable VGACNTRL */
+ REG_WRITE(VGACNTRL, 0x80000000);
+
+ /* Disable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+ REG_READ(DSPBCNTR);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+
+ /* Disable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Disable LNW Pipes, etc */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+ /* wait for pipe off */
+ udelay(150);
+ /* Disable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+ /* wait for dpll off */
+ udelay(150);
+ break;
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ /* Enable dpll */
+ temp = REG_READ(DPLL_CTRL);
+ if ((temp & DPLL_PWRDN) != 0) {
+ REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+ temp = REG_READ(DPLL_CLK_ENABLE);
+ REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+ REG_READ(DPLL_CLK_ENABLE);
+ }
+ /* wait for dpll warm up */
+ udelay(150);
+
+ /* Enable pipe B */
+ temp = REG_READ(PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PIPEBCONF);
+ }
+
+ /* Enable LNW Pipe B */
+ temp = REG_READ(PCH_PIPEBCONF);
+ if ((temp & PIPEACONF_ENABLE) == 0) {
+ REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+ REG_READ(PCH_PIPEBCONF);
+ }
+ wait_for_vblank(dev);
+
+ /* Enable plane */
+ temp = REG_READ(DSPBCNTR);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+ REG_READ(DSPBSURF);
+ }
+ psb_intel_crtc_load_lut(crtc);
+ }
+ /* DSPARB */
+ REG_WRITE(DSPARB, 0x00003fbf);
+ /* FW1 */
+ REG_WRITE(0x70034, 0x3f880a0a);
+ /* FW2 */
+ REG_WRITE(0x70038, 0x0b060808);
+ /* FW4 */
+ REG_WRITE(0x70050, 0x08030404);
+ /* FW5 */
+ REG_WRITE(0x70054, 0x04040404);
+ /* LNC Chicken Bits */
+ REG_WRITE(0x70400, 0x4000);
+}
+
+
+static void mrst_hdmi_dpms(struct drm_encoder *encoder, int mode)
+{
+ static int dpms_mode = -1;
+
+ struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ u32 temp;
+
+ if (dpms_mode == mode)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ temp = 0x0;
+ else
+ temp = 0x99;
+
+ dpms_mode = mode;
+ HDMI_WRITE(HDMI_VIDEO_REG, temp);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+ u32 htotal, new_crtc_htotal;
+
+ htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+ /*
+ * 1024 x 768 new_crtc_htotal = 0x1024;
+ * 1280 x 1024 new_crtc_htotal = 0x0c34;
+ */
+ new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+ return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void mrst_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+ int refclk, struct mrst_hdmi_clock *best_clock)
+{
+ int np_min, np_max, nr_min, nr_max;
+ int np, nr, nf;
+
+ np_min = DIV_ROUND_UP(mrst_hdmi_limit.vco.min, target * 10);
+ np_max = mrst_hdmi_limit.vco.max / (target * 10);
+ if (np_min < mrst_hdmi_limit.np.min)
+ np_min = mrst_hdmi_limit.np.min;
+ if (np_max > mrst_hdmi_limit.np.max)
+ np_max = mrst_hdmi_limit.np.max;
+
+ nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+ nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+ if (nr_min < mrst_hdmi_limit.nr.min)
+ nr_min = mrst_hdmi_limit.nr.min;
+ if (nr_max > mrst_hdmi_limit.nr.max)
+ nr_max = mrst_hdmi_limit.nr.max;
+
+ np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+ nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+ nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+ DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+ /*
+ * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
+ * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+ */
+ best_clock->np = np;
+ best_clock->nr = nr - 1;
+ best_clock->nf = (nf << 14);
+}
+
+int mrst_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int pipe = 1;
+ int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+ int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+ int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+ int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+ int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+ int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+ int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+ int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+ int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ int refclk;
+ struct mrst_hdmi_clock clock;
+ u32 dspcntr, pipeconf, dpll, temp;
+ int dspcntr_reg = DSPBCNTR;
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* XXX: Disable the panel fitter if it was on our pipe */
+
+ /* Disable dpll if necessary */
+ dpll = REG_READ(DPLL_CTRL);
+ if ((dpll & DPLL_PWRDN) == 0) {
+ REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+ REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+ REG_WRITE(DPLL_STATUS, 0x1);
+ }
+ udelay(150);
+
+ /* reset controller: FIXME - can we sort out the ioremap mess ? */
+ iounmap(hdmi_dev->regs);
+ mrst_hdmi_reset(dev);
+
+ /* program and enable dpll */
+ refclk = 25000;
+ mrst_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+ /* Setting DPLL */
+ dpll = REG_READ(DPLL_CTRL);
+ dpll &= ~DPLL_PDIV_MASK;
+ dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+ REG_WRITE(DPLL_CTRL, 0x00000008);
+ REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+ REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+ REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+ REG_WRITE(DPLL_UPDATE, 0x80000000);
+ REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+ udelay(150);
+
+ hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+ if (hdmi_dev->regs == NULL) {
+ DRM_ERROR("failed to do hdmi mmio mapping\n");
+ return -ENOMEM;
+ }
+
+ /* configure HDMI */
+ HDMI_WRITE(0x1004, 0x1fd);
+ HDMI_WRITE(0x2000, 0x1);
+ HDMI_WRITE(0x2008, 0x0);
+ HDMI_WRITE(0x3130, 0x8);
+ HDMI_WRITE(0x101c, 0x1800810);
+
+ temp = htotal_calculate(adjusted_mode);
+ REG_WRITE(htot_reg, temp);
+ REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(pipesrc_reg,
+ ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+ REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+ REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+ REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+ REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ REG_WRITE(PCH_PIPEBSRC,
+ ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+
+ temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+ HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
+
+ REG_WRITE(dspsize_reg,
+ ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+ REG_WRITE(dsppos_reg, 0);
+
+ /* Flush the plane changes */
+ {
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ }
+
+ /* Set up the display plane register */
+ dspcntr = REG_READ(dspcntr_reg);
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ /* setup pipeconf */
+ pipeconf = REG_READ(pipeconf_reg);
+ pipeconf |= PIPEACONF_ENABLE;
+
+ REG_WRITE(pipeconf_reg, pipeconf);
+ REG_READ(pipeconf_reg);
+
+ REG_WRITE(PCH_PIPEBCONF, pipeconf);
+ REG_READ(PCH_PIPEBCONF);
+ wait_for_vblank(dev);
+
+ REG_WRITE(dspcntr_reg, dspcntr);
+ wait_for_vblank(dev);
+
+ return 0;
+}
+
+static int mrst_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->clock < 20000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ return MODE_OK;
+}
+
+static bool mrst_hdmi_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static enum drm_connector_status
+mrst_hdmi_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status;
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ u32 temp;
+
+ temp = HDMI_READ(HDMI_HSR);
+ DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
+
+ if ((temp & HDMI_DETECT_HDP) != 0)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ return status;
+}
+
+static const unsigned char raw_edid[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
+ 0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
+ 0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
+ 0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
+ 0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
+ 0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
+};
+
+static int mrst_hdmi_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct i2c_adapter *i2c_adap;
+ struct edid *edid;
+ struct drm_display_mode *mode, *t;
+ int i = 0, ret = 0;
+
+ i2c_adap = i2c_get_adapter(3);
+ if (i2c_adap == NULL) {
+ DRM_ERROR("No ddc adapter available!\n");
+ edid = (struct edid *)raw_edid;
+ } else {
+ edid = (struct edid *)raw_edid;
+ /* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
+ }
+
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ }
+
+ /*
+ * prune modes that require frame buffer bigger than stolen mem
+ */
+ list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+ if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
+ i++;
+ drm_mode_remove(connector, mode);
+ }
+ }
+ return ret - i;
+}
+
+static void mrst_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+
+ mrst_hdmi_audio_enable(dev);
+ return;
+}
+
+static void mrst_hdmi_destroy(struct drm_connector *connector)
+{
+ return;
+}
+
+static const struct drm_encoder_helper_funcs mrst_hdmi_helper_funcs = {
+ .dpms = mrst_hdmi_dpms,
+ .mode_fixup = mrst_hdmi_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .mode_set = mrst_hdmi_mode_set,
+ .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_helper_funcs
+ mrst_hdmi_connector_helper_funcs = {
+ .get_modes = mrst_hdmi_get_modes,
+ .mode_valid = mrst_hdmi_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static const struct drm_connector_funcs mrst_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = mrst_hdmi_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = mrst_hdmi_destroy,
+};
+
+static void mrst_hdmi_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs mrst_hdmi_enc_funcs = {
+ .destroy = mrst_hdmi_enc_destroy,
+};
+
+void mrst_hdmi_init(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
+{
+ struct psb_intel_output *psb_intel_output;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
+ if (!psb_intel_output)
+ return;
+
+ psb_intel_output->mode_dev = mode_dev;
+ connector = &psb_intel_output->base;
+ encoder = &psb_intel_output->enc;
+ drm_connector_init(dev, &psb_intel_output->base,
+ &mrst_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_DVID);
+
+ drm_encoder_init(dev, &psb_intel_output->enc,
+ &mrst_hdmi_enc_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ drm_mode_connector_attach_encoder(&psb_intel_output->base,
+ &psb_intel_output->enc);
+
+ psb_intel_output->type = INTEL_OUTPUT_HDMI;
+ drm_encoder_helper_add(encoder, &mrst_hdmi_helper_funcs);
+ drm_connector_helper_add(connector, &mrst_hdmi_connector_helper_funcs);
+
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ drm_sysfs_connector_add(connector);
+
+ return;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
+ {}
+};
+
+void mrst_hdmi_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev;
+ struct mrst_hdmi_dev *hdmi_dev;
+ int ret;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
+ if (!pdev)
+ return;
+
+ hdmi_dev = kzalloc(sizeof(struct mrst_hdmi_dev), GFP_KERNEL);
+ if (!hdmi_dev) {
+ dev_err(dev->dev, "failed to allocate memory\n");
+ goto out;
+ }
+
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hdmi controller\n");
+ goto free;
+ }
+
+ hdmi_dev->mmio = pci_resource_start(pdev, 0);
+ hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
+ hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+ if (!hdmi_dev->regs) {
+ dev_err(dev->dev, "failed to map hdmi mmio\n");
+ goto free;
+ }
+
+ hdmi_dev->dev = pdev;
+ pci_set_drvdata(pdev, hdmi_dev);
+
+ /* Initialize i2c controller */
+ ret = mrst_hdmi_i2c_init(hdmi_dev->dev);
+ if (ret)
+ dev_err(dev->dev, "HDMI I2C initialization failed\n");
+
+ dev_priv->hdmi_priv = hdmi_dev;
+ mrst_hdmi_audio_disable(dev);
+ return;
+
+free:
+ kfree(hdmi_dev);
+out:
+ return;
+}
+
+void mrst_hdmi_teardown(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct pci_dev *pdev;
+
+ if (hdmi_dev) {
+ pdev = hdmi_dev->dev;
+ pci_set_drvdata(pdev, NULL);
+ mrst_hdmi_i2c_exit(pdev);
+ iounmap(hdmi_dev->regs);
+ kfree(hdmi_dev);
+ pci_dev_put(pdev);
+ }
+}
+
+/* save HDMI register state */
+void mrst_hdmi_save(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int i;
+
+ /* dpll */
+ hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
+ hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
+ hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
+ hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
+ hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
+
+ /* pipe B */
+ dev_priv->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
+ dev_priv->savePIPEBSRC = PSB_RVDC32(PIPEBSRC);
+ dev_priv->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B);
+ dev_priv->saveHBLANK_B = PSB_RVDC32(HBLANK_B);
+ dev_priv->saveHSYNC_B = PSB_RVDC32(HSYNC_B);
+ dev_priv->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B);
+ dev_priv->saveVBLANK_B = PSB_RVDC32(VBLANK_B);
+ dev_priv->saveVSYNC_B = PSB_RVDC32(VSYNC_B);
+
+ hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
+ hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
+ hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
+ hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
+ hdmi_dev->savePCH_HSYNC_B = PSB_RVDC32(PCH_HSYNC_B);
+ hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
+ hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
+ hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
+
+ /* plane */
+ dev_priv->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
+ dev_priv->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
+ dev_priv->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
+ dev_priv->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
+ dev_priv->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
+ dev_priv->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+
+ /* cursor B */
+ dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+ dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+ dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+
+ /* save palette */
+ for (i = 0; i < 256; i++)
+ dev_priv->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+}
+
+/* restore HDMI register state */
+void mrst_hdmi_restore(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ int i;
+
+ /* dpll */
+ PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
+ PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
+ PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
+ PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
+ PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
+ DRM_UDELAY(150);
+
+ /* pipe */
+ PSB_WVDC32(dev_priv->savePIPEBSRC, PIPEBSRC);
+ PSB_WVDC32(dev_priv->saveHTOTAL_B, HTOTAL_B);
+ PSB_WVDC32(dev_priv->saveHBLANK_B, HBLANK_B);
+ PSB_WVDC32(dev_priv->saveHSYNC_B, HSYNC_B);
+ PSB_WVDC32(dev_priv->saveVTOTAL_B, VTOTAL_B);
+ PSB_WVDC32(dev_priv->saveVBLANK_B, VBLANK_B);
+ PSB_WVDC32(dev_priv->saveVSYNC_B, VSYNC_B);
+
+ PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
+ PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
+ PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
+ PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B, PCH_HSYNC_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
+ PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
+
+ PSB_WVDC32(dev_priv->savePIPEBCONF, PIPEBCONF);
+ PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
+
+ /* plane */
+ PSB_WVDC32(dev_priv->saveDSPBLINOFF, DSPBLINOFF);
+ PSB_WVDC32(dev_priv->saveDSPBSTRIDE, DSPBSTRIDE);
+ PSB_WVDC32(dev_priv->saveDSPBTILEOFF, DSPBTILEOFF);
+ PSB_WVDC32(dev_priv->saveDSPBCNTR, DSPBCNTR);
+ PSB_WVDC32(dev_priv->saveDSPBSURF, DSPBSURF);
+
+ /* cursor B */
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
+ PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
+
+ /* restore palette */
+ for (i = 0; i < 256; i++)
+ PSB_WVDC32(dev_priv->save_palette_b[i], PALETTE_B + (i << 2));
+}
diff --git a/drivers/staging/gma500/mrst_hdmi_i2c.c b/drivers/staging/gma500/mrst_hdmi_i2c.c
new file mode 100644
index 00000000000..351b9d897b9
--- /dev/null
+++ b/drivers/staging/gma500/mrst_hdmi_i2c.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Li Peng <peng.li@intel.com>
+ */
+
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include "psb_drv.h"
+
+#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
+#define HDMI_WRITE(reg, val) writel(val, hdmi_dev->regs + (reg))
+
+#define HDMI_HCR 0x1000
+#define HCR_DETECT_HDP (1 << 6)
+#define HCR_ENABLE_HDCP (1 << 5)
+#define HCR_ENABLE_AUDIO (1 << 2)
+#define HCR_ENABLE_PIXEL (1 << 1)
+#define HCR_ENABLE_TMDS (1 << 0)
+#define HDMI_HICR 0x1004
+#define HDMI_INTR_I2C_ERROR (1 << 4)
+#define HDMI_INTR_I2C_FULL (1 << 3)
+#define HDMI_INTR_I2C_DONE (1 << 2)
+#define HDMI_INTR_HPD (1 << 0)
+#define HDMI_HSR 0x1008
+#define HDMI_HISR 0x100C
+#define HDMI_HI2CRDB0 0x1200
+#define HDMI_HI2CHCR 0x1240
+#define HI2C_HDCP_WRITE (0 << 2)
+#define HI2C_HDCP_RI_READ (1 << 2)
+#define HI2C_HDCP_READ (2 << 2)
+#define HI2C_EDID_READ (3 << 2)
+#define HI2C_READ_CONTINUE (1 << 1)
+#define HI2C_ENABLE_TRANSACTION (1 << 0)
+
+#define HDMI_ICRH 0x1100
+#define HDMI_HI2CTDR0 0x1244
+#define HDMI_HI2CTDR1 0x1248
+
+#define I2C_STAT_INIT 0
+#define I2C_READ_DONE 1
+#define I2C_TRANSACTION_DONE 2
+
+struct hdmi_i2c_dev {
+ struct i2c_adapter *adap;
+ struct mutex i2c_lock;
+ struct completion complete;
+ int status;
+ struct i2c_msg *msg;
+ int buf_offset;
+};
+
+static void hdmi_i2c_irq_enable(struct mrst_hdmi_dev *hdmi_dev)
+{
+ u32 temp;
+
+ temp = HDMI_READ(HDMI_HICR);
+ temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
+ HDMI_WRITE(HDMI_HICR, temp);
+ HDMI_READ(HDMI_HICR);
+}
+
+static void hdmi_i2c_irq_disable(struct mrst_hdmi_dev *hdmi_dev)
+{
+ HDMI_WRITE(HDMI_HICR, 0x0);
+ HDMI_READ(HDMI_HICR);
+}
+
+static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+ struct mrst_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 temp;
+
+ i2c_dev->status = I2C_STAT_INIT;
+ i2c_dev->msg = pmsg;
+ i2c_dev->buf_offset = 0;
+ INIT_COMPLETION(i2c_dev->complete);
+
+ /* Enable I2C transaction */
+ temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
+ HDMI_WRITE(HDMI_HI2CHCR, temp);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ while (i2c_dev->status != I2C_TRANSACTION_DONE)
+ wait_for_completion_interruptible_timeout(&i2c_dev->complete,
+ 10 * HZ);
+
+ return 0;
+}
+
+static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+{
+ /*
+ * XXX: i2c write seems isn't useful for EDID probe, don't do anything
+ */
+ return 0;
+}
+
+static int mrst_hdmi_i2c_access(struct i2c_adapter *adap,
+ struct i2c_msg *pmsg,
+ int num)
+{
+ struct mrst_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ int i, err = 0;
+
+ mutex_lock(&i2c_dev->i2c_lock);
+
+ /* Enable i2c unit */
+ HDMI_WRITE(HDMI_ICRH, 0x00008760);
+
+ /* Enable irq */
+ hdmi_i2c_irq_enable(hdmi_dev);
+ for (i = 0; i < num; i++) {
+ if (pmsg->len && pmsg->buf) {
+ if (pmsg->flags & I2C_M_RD)
+ err = xfer_read(adap, pmsg);
+ else
+ err = xfer_write(adap, pmsg);
+ }
+ pmsg++; /* next message */
+ }
+
+ /* Disable irq */
+ hdmi_i2c_irq_disable(hdmi_dev);
+
+ mutex_unlock(&i2c_dev->i2c_lock);
+
+ return i;
+}
+
+static u32 mrst_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm mrst_hdmi_i2c_algorithm = {
+ .master_xfer = mrst_hdmi_i2c_access,
+ .functionality = mrst_hdmi_i2c_func,
+};
+
+static struct i2c_adapter mrst_hdmi_i2c_adapter = {
+ .name = "mrst_hdmi_i2c",
+ .nr = 3,
+ .owner = THIS_MODULE,
+ .class = I2C_CLASS_DDC,
+ .algo = &mrst_hdmi_i2c_algorithm,
+};
+
+static void hdmi_i2c_read(struct mrst_hdmi_dev *hdmi_dev)
+{
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ struct i2c_msg *msg = i2c_dev->msg;
+ u8 *buf = msg->buf;
+ u32 temp;
+ int i, offset;
+
+ offset = i2c_dev->buf_offset;
+ for (i = 0; i < 0x10; i++) {
+ temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
+ memcpy(buf + (offset + i * 4), &temp, 4);
+ }
+ i2c_dev->buf_offset += (0x10 * 4);
+
+ /* clearing read buffer full intr */
+ temp = HDMI_READ(HDMI_HISR);
+ HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
+ HDMI_READ(HDMI_HISR);
+
+ /* continue read transaction */
+ temp = HDMI_READ(HDMI_HI2CHCR);
+ HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ i2c_dev->status = I2C_READ_DONE;
+ return;
+}
+
+static void hdmi_i2c_transaction_done(struct mrst_hdmi_dev *hdmi_dev)
+{
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 temp;
+
+ /* clear transaction done intr */
+ temp = HDMI_READ(HDMI_HISR);
+ HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
+ HDMI_READ(HDMI_HISR);
+
+
+ temp = HDMI_READ(HDMI_HI2CHCR);
+ HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
+ HDMI_READ(HDMI_HI2CHCR);
+
+ i2c_dev->status = I2C_TRANSACTION_DONE;
+ return;
+}
+
+static irqreturn_t mrst_hdmi_i2c_handler(int this_irq, void *dev)
+{
+ struct mrst_hdmi_dev *hdmi_dev = dev;
+ struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+ u32 stat;
+
+ stat = HDMI_READ(HDMI_HISR);
+
+ if (stat & HDMI_INTR_HPD) {
+ HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
+ HDMI_READ(HDMI_HISR);
+ }
+
+ if (stat & HDMI_INTR_I2C_FULL)
+ hdmi_i2c_read(hdmi_dev);
+
+ if (stat & HDMI_INTR_I2C_DONE)
+ hdmi_i2c_transaction_done(hdmi_dev);
+
+ complete(&i2c_dev->complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * choose alternate function 2 of GPIO pin 52, 53,
+ * which is used by HDMI I2C logic
+ */
+static void mrst_hdmi_i2c_gpio_fix(void)
+{
+ void *base;
+ unsigned int gpio_base = 0xff12c000;
+ int gpio_len = 0x1000;
+ u32 temp;
+
+ base = ioremap((resource_size_t)gpio_base, gpio_len);
+ if (base == NULL) {
+ DRM_ERROR("gpio ioremap fail\n");
+ return;
+ }
+
+ temp = readl(base + 0x44);
+ DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
+ writel((temp | 0x00000a00), (base + 0x44));
+ temp = readl(base + 0x44);
+ DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
+
+ iounmap(base);
+}
+
+int mrst_hdmi_i2c_init(struct pci_dev *dev)
+{
+ struct mrst_hdmi_dev *hdmi_dev;
+ struct hdmi_i2c_dev *i2c_dev;
+ int ret;
+
+ hdmi_dev = pci_get_drvdata(dev);
+
+ i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
+ if (i2c_dev == NULL) {
+ DRM_ERROR("Can't allocate interface\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_dev->adap = &mrst_hdmi_i2c_adapter;
+ i2c_dev->status = I2C_STAT_INIT;
+ init_completion(&i2c_dev->complete);
+ mutex_init(&i2c_dev->i2c_lock);
+ i2c_set_adapdata(&mrst_hdmi_i2c_adapter, hdmi_dev);
+ hdmi_dev->i2c_dev = i2c_dev;
+
+ /* Enable HDMI I2C function on gpio */
+ mrst_hdmi_i2c_gpio_fix();
+
+ /* request irq */
+ ret = request_irq(dev->irq, mrst_hdmi_i2c_handler, IRQF_SHARED,
+ mrst_hdmi_i2c_adapter.name, hdmi_dev);
+ if (ret) {
+ DRM_ERROR("Failed to request IRQ for I2C controller\n");
+ goto err;
+ }
+
+ /* Adapter registration */
+ ret = i2c_add_numbered_adapter(&mrst_hdmi_i2c_adapter);
+ return ret;
+
+err:
+ kfree(i2c_dev);
+exit:
+ return ret;
+}
+
+void mrst_hdmi_i2c_exit(struct pci_dev *dev)
+{
+ struct mrst_hdmi_dev *hdmi_dev;
+ struct hdmi_i2c_dev *i2c_dev;
+
+ hdmi_dev = pci_get_drvdata(dev);
+ if (i2c_del_adapter(&mrst_hdmi_i2c_adapter))
+ DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
+
+ i2c_dev = hdmi_dev->i2c_dev;
+ kfree(i2c_dev);
+ free_irq(dev->irq, hdmi_dev);
+}
diff --git a/drivers/staging/gma500/mrst_lvds.c b/drivers/staging/gma500/mrst_lvds.c
index 4a08b74f5ff..e7999a2a379 100644
--- a/drivers/staging/gma500/mrst_lvds.c
+++ b/drivers/staging/gma500/mrst_lvds.c
@@ -24,11 +24,11 @@
#include <drm/drmP.h>
#include <asm/mrst.h>
-#include "psb_intel_bios.h"
+#include "intel_bios.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "psb_powermgmt.h"
+#include "power.h"
#include <linux/pm_runtime.h>
/* The max/min PWM frequency in BPCR[31:17] - */
@@ -46,8 +46,7 @@ static void mrst_lvds_set_power(struct drm_device *dev,
struct psb_intel_output *output, bool on)
{
u32 pp_status;
- DRM_DRIVER_PRIVATE_T *dev_priv = dev->dev_private;
- PSB_DEBUG_ENTRY("\n");
+ struct drm_psb_private *dev_priv = dev->dev_private;
if (!gma_power_begin(dev, true))
return;
@@ -59,7 +58,11 @@ static void mrst_lvds_set_power(struct drm_device *dev,
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
dev_priv->is_lvds_on = true;
+ if (dev_priv->ops->lvds_bl_power)
+ dev_priv->ops->lvds_bl_power(dev, true);
} else {
+ if (dev_priv->ops->lvds_bl_power)
+ dev_priv->ops->lvds_bl_power(dev, false);
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
~POWER_TARGET_ON);
do {
@@ -68,7 +71,6 @@ static void mrst_lvds_set_power(struct drm_device *dev,
dev_priv->is_lvds_on = false;
pm_request_idle(&dev->pdev->dev);
}
-
gma_power_end(dev);
}
@@ -77,8 +79,6 @@ static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
- PSB_DEBUG_ENTRY("\n");
-
if (mode == DRM_MODE_DPMS_ON)
mrst_lvds_set_power(dev, output, true);
else
@@ -94,11 +94,10 @@ static void mrst_lvds_mode_set(struct drm_encoder *encoder,
struct psb_intel_mode_device *mode_dev =
enc_to_psb_intel_output(encoder)->mode_dev;
struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
u32 lvds_port;
uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
- PSB_DEBUG_ENTRY("\n");
-
if (!gma_power_begin(dev, true))
return;
@@ -112,7 +111,9 @@ static void mrst_lvds_mode_set(struct drm_encoder *encoder,
LVDS_PORT_EN |
LVDS_BORDER_EN;
- if (mode_dev->panel_wants_dither)
+ /* If the firmware says dither on Moorestown, or the BIOS does
+ on Oaktrail then enable dithering */
+ if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
REG_WRITE(LVDS, lvds_port);
@@ -146,13 +147,59 @@ static void mrst_lvds_mode_set(struct drm_encoder *encoder,
gma_power_end(dev);
}
+static void mrst_lvds_prepare(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
+
+ if (!gma_power_begin(dev, true))
+ return;
+
+ mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+ mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+ BACKLIGHT_DUTY_CYCLE_MASK);
+ mrst_lvds_set_power(dev, output, false);
+ gma_power_end(dev);
+}
+
+static u32 mrst_lvds_get_max_backlight(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ u32 ret;
+
+ if (gma_power_begin(dev, false)) {
+ ret = ((REG_READ(BLC_PWM_CTL) &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ gma_power_end(dev);
+ } else
+ ret = ((dev_priv->saveBLC_PWM_CTL &
+ BACKLIGHT_MODULATION_FREQ_MASK) >>
+ BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+
+ return ret;
+}
+
+static void mrst_lvds_commit(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+ struct psb_intel_mode_device *mode_dev = output->mode_dev;
+
+ if (mode_dev->backlight_duty_cycle == 0)
+ mode_dev->backlight_duty_cycle =
+ mrst_lvds_get_max_backlight(dev);
+ mrst_lvds_set_power(dev, output, true);
+}
static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
.dpms = mrst_lvds_dpms,
.mode_fixup = psb_intel_lvds_mode_fixup,
- .prepare = psb_intel_lvds_prepare,
+ .prepare = mrst_lvds_prepare,
.mode_set = mrst_lvds_mode_set,
- .commit = psb_intel_lvds_commit,
+ .commit = mrst_lvds_commit,
};
static struct drm_display_mode lvds_configuration_modes[] = {
@@ -252,8 +299,6 @@ void mrst_lvds_init(struct drm_device *dev,
struct i2c_adapter *i2c_adap;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
- PSB_DEBUG_ENTRY("\n");
-
psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
if (!psb_intel_output)
return;
@@ -302,16 +347,10 @@ void mrst_lvds_init(struct drm_device *dev,
* if closed, act like it's not there for now
*/
- /* This ifdef can go once the cpu ident stuff is cleaned up in arch */
-#if defined(CONFIG_X86_MRST)
- if (mrst_identify_cpu())
- i2c_adap = i2c_get_adapter(2);
- else /* Oaktrail uses I2C 1 */
-#endif
- i2c_adap = i2c_get_adapter(1);
+ i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
if (i2c_adap == NULL)
- printk(KERN_ALERT "No ddc adapter available!\n");
+ dev_err(dev->dev, "No ddc adapter available!\n");
/*
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
@@ -333,7 +372,6 @@ void mrst_lvds_init(struct drm_device *dev,
}
}
}
-
/*
* If we didn't get EDID, try geting panel timing
* from configuration data
@@ -341,15 +379,13 @@ void mrst_lvds_init(struct drm_device *dev,
mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
if (mode_dev->panel_fixed_mode) {
- mode_dev->panel_fixed_mode->type |=
- DRM_MODE_TYPE_PREFERRED;
+ mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out; /* FIXME: check for quirks */
}
/* If we still don't have a mode after all that, give up. */
if (!mode_dev->panel_fixed_mode) {
- DRM_DEBUG
- ("Found no modes on the lvds, ignoring the LVDS\n");
+ dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
goto failed_find;
}
@@ -358,7 +394,7 @@ out:
return;
failed_find:
- DRM_DEBUG("No LVDS modes found, disabling.\n");
+ dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
if (psb_intel_output->ddc_bus)
psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
diff --git a/drivers/staging/gma500/psb_powermgmt.c b/drivers/staging/gma500/power.c
index 1495415be6c..972bea7c1af 100644
--- a/drivers/staging/gma500/psb_powermgmt.c
+++ b/drivers/staging/gma500/power.c
@@ -1,7 +1,7 @@
/**************************************************************************
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009-2011, Intel Corporation.
* All Rights Reserved.
-
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -27,14 +27,16 @@
* Massively reworked
* Alan Cox <alan@linux.intel.com>
*/
-#include "psb_powermgmt.h"
+
+#include "power.h"
#include "psb_drv.h"
#include "psb_reg.h"
#include "psb_intel_reg.h"
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
-static struct mutex power_mutex;
+static struct mutex power_mutex; /* Serialize power ops */
+static spinlock_t power_ctrl_lock; /* Serialize power claim */
/**
* gma_power_init - initialise power manager
@@ -46,23 +48,17 @@ void gma_power_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ /* FIXME: Move APM/OSPM base into relevant device code */
dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
dev_priv->ospm_base &= 0xffff;
dev_priv->display_power = true; /* We start active */
dev_priv->display_count = 0; /* Currently no users */
dev_priv->suspended = false; /* And not suspended */
+ spin_lock_init(&power_ctrl_lock);
mutex_init(&power_mutex);
- if (!IS_MRST(dev)) {
- /* FIXME: wants further review */
- u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
- /* Disable 2D clock gating */
- gating &= ~3;
- gating |= 1;
- PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
- PSB_RSGX32(PSB_CR_CLKGATECTL);
- }
+ dev_priv->ops->init_pm(dev);
}
/**
@@ -73,114 +69,10 @@ void gma_power_init(struct drm_device *dev)
*/
void gma_power_uninit(struct drm_device *dev)
{
- mutex_destroy(&power_mutex);
pm_runtime_disable(&dev->pdev->dev);
pm_runtime_set_suspended(&dev->pdev->dev);
}
-
-/**
- * save_display_registers - save registers lost on suspend
- * @dev: our DRM device
- *
- * Save the state we need in order to be able to restore the interface
- * upon resume from suspend
- */
-static int save_display_registers(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_connector *connector;
-
- /* Display arbitration control + watermarks */
- dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
- dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
- dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
- dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
- dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
- dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
- dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
- dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
-
- /* Save crtc and output state */
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (drm_helper_crtc_in_use(crtc))
- crtc->funcs->save(crtc);
- }
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- connector->funcs->save(connector);
-
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
-}
-
-/**
- * restore_display_registers - restore lost register state
- * @dev: our DRM device
- *
- * Restore register state that was lost during suspend and resume.
- */
-static int restore_display_registers(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_connector *connector;
-
- /* Display arbitration + watermarks */
- PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
- PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
- PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
- PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
- PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
- PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
- PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
- PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
-
- /*make sure VGA plane is off. it initializes to on after reset!*/
- PSB_WVDC32(0x80000000, VGACNTRL);
-
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- if (drm_helper_crtc_in_use(crtc))
- crtc->funcs->restore(crtc);
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- connector->funcs->restore(connector);
-
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
-}
-
-/**
- * power_down - power down the display island
- * @dev: our DRM device
- *
- * Power down the display interface of our device
- */
-static void power_down(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 pwr_mask ;
- u32 pwr_sts;
-
- if (IS_MRST(dev)) {
- pwr_mask = PSB_PWRGT_DISPLAY_MASK;
- outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
-
- while (true) {
- pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
- if ((pwr_sts & pwr_mask) == pwr_mask)
- break;
- else
- udelay(10);
- }
- dev_priv->display_power = false;
- }
-}
-
-
/**
* gma_suspend_display - suspend the display logic
* @dev: our DRM device
@@ -190,73 +82,12 @@ static void power_down(struct drm_device *dev)
static void gma_suspend_display(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- int pp_stat;
if (dev_priv->suspended)
return;
-
- save_display_registers(dev);
-
- if (dev_priv->iLVDS_enable) {
- /*shutdown the panel*/
- PSB_WVDC32(0, PP_CONTROL);
-
- do {
- pp_stat = PSB_RVDC32(PP_STATUS);
- } while (pp_stat & 0x80000000);
-
- /*turn off the plane*/
- PSB_WVDC32(0x58000000, DSPACNTR);
- PSB_WVDC32(0, DSPASURF);/*trigger the plane disable*/
- /*wait ~4 ticks*/
- msleep(4);
-
- /*turn off pipe*/
- PSB_WVDC32(0x0, PIPEACONF);
- /*wait ~8 ticks*/
- msleep(8);
-
- /*turn off PLLs*/
- PSB_WVDC32(0, MRST_DPLL_A);
- } else {
- PSB_WVDC32(DPI_SHUT_DOWN, DPI_CONTROL_REG);
- PSB_WVDC32(0x0, PIPEACONF);
- PSB_WVDC32(0x2faf0000, BLC_PWM_CTL);
- while (REG_READ(0x70008) & 0x40000000);
- while ((PSB_RVDC32(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY)
- != DPI_FIFO_EMPTY);
- PSB_WVDC32(0, DEVICE_READY_REG);
- /* turn off panel power */
- }
- power_down(dev);
-}
-
-/*
- * power_up
- *
- * Description: Restore power to the specified island(s) (powergating)
- */
-static void power_up(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
- u32 pwr_sts, pwr_cnt;
-
- if (IS_MRST(dev)) {
- pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
- pwr_cnt &= ~pwr_mask;
- outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
-
- while (true) {
- pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
- if ((pwr_sts & pwr_mask) == 0)
- break;
- else
- udelay(10);
- }
- }
- dev_priv->suspended = false;
- dev_priv->display_power = true;
+ dev_priv->ops->save_regs(dev);
+ dev_priv->ops->power_down(dev);
+ dev_priv->display_power = false;
}
/**
@@ -274,20 +105,14 @@ static void gma_resume_display(struct pci_dev *pdev)
return;
/* turn on the display power island */
- power_up(dev);
+ dev_priv->ops->power_up(dev);
+ dev_priv->suspended = false;
+ dev_priv->display_power = true;
PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
pci_write_config_word(pdev, PSB_GMCH_CTRL,
dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
-
- /* Don't reinitialize the GTT as it is unnecessary. The gtt is
- * stored in memory so it will automatically be restored. All
- * we need to do is restore the PGETBL_CTL which we already do
- * above.
- */
- /*psb_gtt_init(dev_priv->pg, 1);*/
-
- restore_display_registers(dev);
+ dev_priv->ops->restore_regs(dev);
}
/**
@@ -339,7 +164,7 @@ static bool gma_resume_pci(struct pci_dev *pdev)
pci_restore_state(pdev);
pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM);
pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT);
- /* retoring MSI address and data in PCIx space */
+ /* restoring MSI address and data in PCIx space */
pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
ret = pci_enable_device(pdev);
@@ -360,8 +185,9 @@ static bool gma_resume_pci(struct pci_dev *pdev)
* perform the necessary shut down steps and save enough state that
* we can undo this when resume is called.
*/
-int gma_power_suspend(struct pci_dev *pdev, pm_message_t state)
+int gma_power_suspend(struct device *_dev)
{
+ struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -379,15 +205,15 @@ int gma_power_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-
/**
* gma_power_resume - resume power
* @pdev: PCI device
*
* Resume the PCI side of the graphics and then the displays
*/
-int gma_power_resume(struct pci_dev *pdev)
+int gma_power_resume(struct device *_dev)
{
+ struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
struct drm_device *dev = pci_get_drvdata(pdev);
mutex_lock(&power_mutex);
@@ -399,8 +225,6 @@ int gma_power_resume(struct pci_dev *pdev)
return 0;
}
-
-
/**
* gma_power_is_on - returne true if power is on
* @dev: our DRM device
@@ -413,7 +237,6 @@ bool gma_power_is_on(struct drm_device *dev)
return dev_priv->display_power;
}
-
/**
* gma_power_begin - begin requiring power
* @dev: our DRM device
@@ -421,22 +244,23 @@ bool gma_power_is_on(struct drm_device *dev)
*
* Begin an action that requires the display power island is enabled.
* We refcount the islands.
- *
- * FIXME: locking
*/
bool gma_power_begin(struct drm_device *dev, bool force_on)
{
struct drm_psb_private *dev_priv = dev->dev_private;
int ret;
+ unsigned long flags;
+ spin_lock_irqsave(&power_ctrl_lock, flags);
/* Power already on ? */
if (dev_priv->display_power) {
dev_priv->display_count++;
pm_runtime_get(&dev->pdev->dev);
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
return true;
}
if (force_on == false)
- return false;
+ goto out_false;
/* Ok power up needed */
ret = gma_resume_pci(dev->pdev);
@@ -445,12 +269,14 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
psb_irq_postinstall(dev);
pm_runtime_get(&dev->pdev->dev);
dev_priv->display_count++;
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
return true;
}
+out_false:
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
return false;
}
-
/**
* gma_power_end - end use of power
* @dev: Our DRM device
@@ -461,15 +287,17 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
void gma_power_end(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+ spin_lock_irqsave(&power_ctrl_lock, flags);
dev_priv->display_count--;
WARN_ON(dev_priv->display_count < 0);
+ spin_unlock_irqrestore(&power_ctrl_lock, flags);
pm_runtime_put(&dev->pdev->dev);
}
int psb_runtime_suspend(struct device *dev)
{
- static pm_message_t dummy;
- return gma_power_suspend(to_pci_dev(dev), dummy);
+ return gma_power_suspend(dev);
}
int psb_runtime_resume(struct device *dev)
@@ -486,4 +314,3 @@ int psb_runtime_idle(struct device *dev)
else
return 1;
}
-
diff --git a/drivers/staging/gma500/psb_powermgmt.h b/drivers/staging/gma500/power.h
index e005229af79..1969d2ecb32 100644
--- a/drivers/staging/gma500/psb_powermgmt.h
+++ b/drivers/staging/gma500/power.h
@@ -1,5 +1,5 @@
/**************************************************************************
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009-2011, Intel Corporation.
* All Rights Reserved.
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -39,8 +39,8 @@ void gma_power_uninit(struct drm_device *dev);
/*
* The kernel bus power management will call these functions
*/
-int gma_power_suspend(struct pci_dev *pdev, pm_message_t state);
-int gma_power_resume(struct pci_dev *pdev);
+int gma_power_suspend(struct device *dev);
+int gma_power_resume(struct device *dev);
/*
* These are the functions the driver should use to wrap all hw access
diff --git a/drivers/staging/gma500/psb_bl.c b/drivers/staging/gma500/psb_bl.c
deleted file mode 100644
index 5dffc71c512..00000000000
--- a/drivers/staging/gma500/psb_bl.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * psb backlight interface
- *
- * Copyright (c) 2009, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Authors: Eric Knopp
- *
- */
-
-#include <linux/backlight.h>
-#include <linux/version.h>
-#include "psb_drv.h"
-#include "psb_intel_reg.h"
-#include "psb_intel_drv.h"
-#include "psb_intel_bios.h"
-#include "psb_powermgmt.h"
-
-#define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF
-#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
-#define BLC_PWM_FREQ_CALC_CONSTANT 32
-#define MHz 1000000
-#define BRIGHTNESS_MIN_LEVEL 1
-#define BRIGHTNESS_MASK 0xFF
-#define BLC_POLARITY_NORMAL 0
-#define BLC_POLARITY_INVERSE 1
-#define BLC_ADJUSTMENT_MAX 100
-
-#define PSB_BLC_PWM_PRECISION_FACTOR 10
-#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
-#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
-
-#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
-#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
-
-static int psb_brightness;
-static struct backlight_device *psb_backlight_device;
-static u8 blc_brightnesscmd;
-static u8 blc_pol;
-static u8 blc_type;
-
-int psb_set_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev = bl_get_data(psb_backlight_device);
- int level = bd->props.brightness;
-
- DRM_DEBUG_DRIVER("backlight level set to %d\n", level);
-
- /* Percentage 1-100% being valid */
- if (level < 1)
- level = 1;
-
- psb_intel_lvds_set_brightness(dev, level);
- psb_brightness = level;
- return 0;
-}
-
-int mrst_set_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev = bl_get_data(psb_backlight_device);
- struct drm_psb_private *dev_priv = dev->dev_private;
- int level = bd->props.brightness;
- u32 blc_pwm_ctl;
- u32 max_pwm_blc;
-
- DRM_DEBUG_DRIVER("backlight level set to %d\n", level);
-
- /* Percentage 1-100% being valid */
- if (level < 1)
- level = 1;
-
- if (gma_power_begin(dev, 0)) {
- /* Calculate and set the brightness value */
- max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
- blc_pwm_ctl = level * max_pwm_blc / 100;
-
- /* Adjust the backlight level with the percent in
- * dev_priv->blc_adj1;
- */
- blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
- blc_pwm_ctl = blc_pwm_ctl / 100;
-
- /* Adjust the backlight level with the percent in
- * dev_priv->blc_adj2;
- */
- blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
- blc_pwm_ctl = blc_pwm_ctl / 100;
-
- if (blc_pol == BLC_POLARITY_INVERSE)
- blc_pwm_ctl = max_pwm_blc - blc_pwm_ctl;
- /* force PWM bit on */
- REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
- REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
- gma_power_end(dev);
- }
- psb_brightness = level;
- return 0;
-}
-
-int psb_get_brightness(struct backlight_device *bd)
-{
- DRM_DEBUG_DRIVER("brightness = 0x%x\n", psb_brightness);
-
- /* return locally cached var instead of HW read (due to DPST etc.) */
- /* FIXME: ideally return actual value in case firmware fiddled with
- it */
- return psb_brightness;
-}
-
-static const struct backlight_ops psb_ops = {
- .get_brightness = psb_get_brightness,
- .update_status = psb_set_brightness,
-};
-
-static int device_backlight_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- unsigned long core_clock;
- /* u32 bl_max_freq; */
- /* unsigned long value; */
- u16 bl_max_freq;
- uint32_t value;
- uint32_t blc_pwm_precision_factor;
-
- if (IS_MRST(dev)) {
- dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
- dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
- bl_max_freq = 256;
- /* this needs to be set elsewhere */
- blc_pol = BLC_POLARITY_NORMAL;
- blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
- } else {
- /* get bl_max_freq and pol from dev_priv*/
- if (!dev_priv->lvds_bl) {
- DRM_ERROR("Has no valid LVDS backlight info\n");
- return 1;
- }
- bl_max_freq = dev_priv->lvds_bl->freq;
- blc_pol = dev_priv->lvds_bl->pol;
- blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
- blc_brightnesscmd = dev_priv->lvds_bl->brightnesscmd;
- blc_type = dev_priv->lvds_bl->type;
- }
-
- core_clock = dev_priv->core_freq;
-
- value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
- value *= blc_pwm_precision_factor;
- value /= bl_max_freq;
- value /= blc_pwm_precision_factor;
-
- if (gma_power_begin(dev, false)) {
- if (IS_MRST(dev)) {
- if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
- return 2;
- else {
- REG_WRITE(BLC_PWM_CTL2,
- (0x80000000 | REG_READ(BLC_PWM_CTL2)));
- REG_WRITE(BLC_PWM_CTL, value | (value << 16));
- }
- } else {
- if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
- value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
- return 2;
- else {
- value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
- REG_WRITE(BLC_PWM_CTL,
- (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
- (value));
- }
- }
- gma_power_end(dev);
- }
- return 0;
-}
-
-int psb_backlight_init(struct drm_device *dev)
-{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- int ret = 0;
-
- struct backlight_properties props;
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 100;
- props.type = BACKLIGHT_PLATFORM;
-
- psb_backlight_device = backlight_device_register("psb-bl", NULL,
- (void *)dev, &psb_ops, &props);
- if (IS_ERR(psb_backlight_device))
- return PTR_ERR(psb_backlight_device);
-
- ret = device_backlight_init(dev);
- if (ret < 0)
- return ret;
-
- psb_backlight_device->props.brightness = 100;
- psb_backlight_device->props.max_brightness = 100;
- backlight_update_status(psb_backlight_device);
-#endif
- return 0;
-}
-
-void psb_backlight_exit(void)
-{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- psb_backlight_device->props.brightness = 0;
- backlight_update_status(psb_backlight_device);
- backlight_device_unregister(psb_backlight_device);
-#endif
-}
-
-struct backlight_device *psb_get_backlight_device(void)
-{
- return psb_backlight_device;
-}
diff --git a/drivers/staging/gma500/psb_device.c b/drivers/staging/gma500/psb_device.c
new file mode 100644
index 00000000000..46591323595
--- /dev/null
+++ b/drivers/staging/gma500/psb_device.c
@@ -0,0 +1,353 @@
+/**************************************************************************
+ * Copyright (c) 2011, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <linux/backlight.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "psb_drm.h"
+#include "psb_drv.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+
+
+static int psb_output_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_intel_lvds_init(dev, &dev_priv->mode_dev);
+ psb_intel_sdvo_init(dev, SDVOB);
+ return 0;
+}
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+
+/*
+ * Poulsbo Backlight Interfaces
+ */
+
+#define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */
+#define BLC_PWM_FREQ_CALC_CONSTANT 32
+#define MHz 1000000
+
+#define PSB_BLC_PWM_PRECISION_FACTOR 10
+#define PSB_BLC_MAX_PWM_REG_FREQ 0xFFFE
+#define PSB_BLC_MIN_PWM_REG_FREQ 0x2
+
+#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+#define PSB_BACKLIGHT_PWM_CTL_SHIFT (16)
+
+static int psb_brightness;
+static struct backlight_device *psb_backlight_device;
+
+static int psb_get_brightness(struct backlight_device *bd)
+{
+ /* return locally cached var instead of HW read (due to DPST etc.) */
+ /* FIXME: ideally return actual value in case firmware fiddled with
+ it */
+ return psb_brightness;
+}
+
+
+static int psb_backlight_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long core_clock;
+ /* u32 bl_max_freq; */
+ /* unsigned long value; */
+ u16 bl_max_freq;
+ uint32_t value;
+ uint32_t blc_pwm_precision_factor;
+
+ /* get bl_max_freq and pol from dev_priv*/
+ if (!dev_priv->lvds_bl) {
+ dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+ return -ENOENT;
+ }
+ bl_max_freq = dev_priv->lvds_bl->freq;
+ blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+
+ core_clock = dev_priv->core_freq;
+
+ value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+ value *= blc_pwm_precision_factor;
+ value /= bl_max_freq;
+ value /= blc_pwm_precision_factor;
+
+ if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+ value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+ return -ERANGE;
+ else {
+ value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+ REG_WRITE(BLC_PWM_CTL,
+ (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
+ }
+ return 0;
+}
+
+static int psb_set_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(psb_backlight_device);
+ int level = bd->props.brightness;
+
+ /* Percentage 1-100% being valid */
+ if (level < 1)
+ level = 1;
+
+ psb_intel_lvds_set_brightness(dev, level);
+ psb_brightness = level;
+ return 0;
+}
+
+static const struct backlight_ops psb_ops = {
+ .get_brightness = psb_get_brightness,
+ .update_status = psb_set_brightness,
+};
+
+static int psb_backlight_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.max_brightness = 100;
+ props.type = BACKLIGHT_PLATFORM;
+
+ psb_backlight_device = backlight_device_register("psb-bl",
+ NULL, (void *)dev, &psb_ops, &props);
+ if (IS_ERR(psb_backlight_device))
+ return PTR_ERR(psb_backlight_device);
+
+ ret = psb_backlight_setup(dev);
+ if (ret < 0) {
+ backlight_device_unregister(psb_backlight_device);
+ psb_backlight_device = NULL;
+ return ret;
+ }
+ psb_backlight_device->props.brightness = 100;
+ psb_backlight_device->props.max_brightness = 100;
+ backlight_update_status(psb_backlight_device);
+ dev_priv->backlight_device = psb_backlight_device;
+ return 0;
+}
+
+#endif
+
+/*
+ * Provide the Poulsbo specific chip logic and low level methods
+ * for power management
+ */
+
+static void psb_init_pm(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
+ gating &= ~3; /* Disable 2D clock gating */
+ gating |= 1;
+ PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
+ PSB_RSGX32(PSB_CR_CLKGATECTL);
+}
+
+/**
+ * psb_save_display_registers - save registers lost on suspend
+ * @dev: our DRM device
+ *
+ * Save the state we need in order to be able to restore the interface
+ * upon resume from suspend
+ */
+static int psb_save_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+
+ /* Display arbitration control + watermarks */
+ dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+ dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+ dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+ dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+ dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+ dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+ dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+ dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+
+ /* Save crtc and output state */
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (drm_helper_crtc_in_use(crtc))
+ crtc->funcs->save(crtc);
+ }
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ connector->funcs->save(connector);
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return 0;
+}
+
+/**
+ * psb_restore_display_registers - restore lost register state
+ * @dev: our DRM device
+ *
+ * Restore register state that was lost during suspend and resume.
+ */
+static int psb_restore_display_registers(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ int pp_stat;
+
+ /* Display arbitration + watermarks */
+ PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+ PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+ PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+ PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+ PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+ PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+ PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+ PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+
+ /*make sure VGA plane is off. it initializes to on after reset!*/
+ PSB_WVDC32(0x80000000, VGACNTRL);
+
+ mutex_lock(&dev->mode_config.mutex);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ if (drm_helper_crtc_in_use(crtc))
+ crtc->funcs->restore(crtc);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ connector->funcs->restore(connector);
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (dev_priv->iLVDS_enable) {
+ /*shutdown the panel*/
+ PSB_WVDC32(0, PP_CONTROL);
+ do {
+ pp_stat = PSB_RVDC32(PP_STATUS);
+ } while (pp_stat & 0x80000000);
+
+ /* Turn off the plane */
+ PSB_WVDC32(0x58000000, DSPACNTR);
+ PSB_WVDC32(0, DSPASURF);/*trigger the plane disable*/
+ /* Wait ~4 ticks */
+ msleep(4);
+ /* Turn off pipe */
+ PSB_WVDC32(0x0, PIPEACONF);
+ /* Wait ~8 ticks */
+ msleep(8);
+
+ /* Turn off PLLs */
+ PSB_WVDC32(0, MRST_DPLL_A);
+ } else {
+ PSB_WVDC32(DPI_SHUT_DOWN, DPI_CONTROL_REG);
+ PSB_WVDC32(0x0, PIPEACONF);
+ PSB_WVDC32(0x2faf0000, BLC_PWM_CTL);
+ while (REG_READ(0x70008) & 0x40000000)
+ cpu_relax();
+ while ((PSB_RVDC32(GEN_FIFO_STAT_REG) & DPI_FIFO_EMPTY)
+ != DPI_FIFO_EMPTY)
+ cpu_relax();
+ PSB_WVDC32(0, DEVICE_READY_REG);
+ }
+ return 0;
+}
+
+static int psb_power_down(struct drm_device *dev)
+{
+ return 0;
+}
+
+static int psb_power_up(struct drm_device *dev)
+{
+ return 0;
+}
+
+static void psb_get_core_freq(struct drm_device *dev)
+{
+ uint32_t clock;
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
+ /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
+
+ pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+ pci_read_config_dword(pci_root, 0xD4, &clock);
+ pci_dev_put(pci_root);
+
+ switch (clock & 0x07) {
+ case 0:
+ dev_priv->core_freq = 100;
+ break;
+ case 1:
+ dev_priv->core_freq = 133;
+ break;
+ case 2:
+ dev_priv->core_freq = 150;
+ break;
+ case 3:
+ dev_priv->core_freq = 178;
+ break;
+ case 4:
+ dev_priv->core_freq = 200;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ dev_priv->core_freq = 266;
+ default:
+ dev_priv->core_freq = 0;
+ }
+}
+
+static int psb_chip_setup(struct drm_device *dev)
+{
+ psb_get_core_freq(dev);
+ gma_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
+ return 0;
+}
+
+const struct psb_ops psb_chip_ops = {
+ .name = "Poulsbo",
+ .accel_2d = 1,
+ .pipes = 2,
+ .crtcs = 2,
+ .sgx_offset = PSB_SGX_OFFSET,
+ .chip_setup = psb_chip_setup,
+
+ .crtc_helper = &psb_intel_helper_funcs,
+ .crtc_funcs = &psb_intel_crtc_funcs,
+
+ .output_init = psb_output_init,
+
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ .backlight_init = psb_backlight_init,
+#endif
+
+ .init_pm = psb_init_pm,
+ .save_regs = psb_save_display_registers,
+ .restore_regs = psb_restore_display_registers,
+ .power_down = psb_power_down,
+ .power_up = psb_power_up,
+};
+
diff --git a/drivers/staging/gma500/psb_drm.h b/drivers/staging/gma500/psb_drm.h
index 49ffdd5b90e..0da84683568 100644
--- a/drivers/staging/gma500/psb_drm.h
+++ b/drivers/staging/gma500/psb_drm.h
@@ -1,5 +1,5 @@
/**************************************************************************
- * Copyright (c) 2007, Intel Corporation.
+ * Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
* Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
* All Rights Reserved.
@@ -22,84 +22,8 @@
#ifndef _PSB_DRM_H_
#define _PSB_DRM_H_
-#if defined(__linux__) && !defined(__KERNEL__)
-#include<stdint.h>
-#include <linux/types.h>
-#include "drm_mode.h"
-#endif
-
-#define DRM_PSB_SAREA_MAJOR 0
-#define DRM_PSB_SAREA_MINOR 2
-#define PSB_FIXED_SHIFT 16
-
#define PSB_NUM_PIPE 3
-/*
- * Public memory types.
- */
-
-typedef s32 psb_fixed;
-typedef u32 psb_ufixed;
-
-static inline s32 psb_int_to_fixed(int a)
-{
- return a * (1 << PSB_FIXED_SHIFT);
-}
-
-static inline u32 psb_unsigned_to_ufixed(unsigned int a)
-{
- return a << PSB_FIXED_SHIFT;
-}
-
-/*Status of the command sent to the gfx device.*/
-typedef enum {
- DRM_CMD_SUCCESS,
- DRM_CMD_FAILED,
- DRM_CMD_HANG
-} drm_cmd_status_t;
-
-struct drm_psb_scanout {
- u32 buffer_id; /* DRM buffer object ID */
- u32 rotation; /* Rotation as in RR_rotation definitions */
- u32 stride; /* Buffer stride in bytes */
- u32 depth; /* Buffer depth in bits (NOT) bpp */
- u32 width; /* Buffer width in pixels */
- u32 height; /* Buffer height in lines */
- s32 transform[3][3]; /* Buffer composite transform */
- /* (scaling, rot, reflect) */
-};
-
-#define DRM_PSB_SAREA_OWNERS 16
-#define DRM_PSB_SAREA_OWNER_2D 0
-#define DRM_PSB_SAREA_OWNER_3D 1
-
-#define DRM_PSB_SAREA_SCANOUTS 3
-
-struct drm_psb_sarea {
- /* Track changes of this data structure */
-
- u32 major;
- u32 minor;
-
- /* Last context to touch part of hw */
- u32 ctx_owners[DRM_PSB_SAREA_OWNERS];
-
- /* Definition of front- and rotated buffers */
- u32 num_scanouts;
- struct drm_psb_scanout scanouts[DRM_PSB_SAREA_SCANOUTS];
-
- int planeA_x;
- int planeA_y;
- int planeA_w;
- int planeA_h;
- int planeB_x;
- int planeB_y;
- int planeB_w;
- int planeB_h;
- /* Number of active scanouts */
- u32 num_active_scanouts;
-};
-
#define PSB_GPU_ACCESS_READ (1ULL << 32)
#define PSB_GPU_ACCESS_WRITE (1ULL << 33)
#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
@@ -194,10 +118,10 @@ struct drm_psb_register_rw_arg {
u32 OGAMC3;
u32 OGAMC4;
u32 OGAMC5;
- u32 IEP_ENABLED;
- u32 IEP_BLE_MINMAX;
- u32 IEP_BSSCC_CONTROL;
- u32 b_wait_vblank;
+ u32 IEP_ENABLED;
+ u32 IEP_BLE_MINMAX;
+ u32 IEP_BSSCC_CONTROL;
+ u32 b_wait_vblank;
} overlay;
u32 sprite_enable_mask;
@@ -221,44 +145,27 @@ struct drm_psb_register_rw_arg {
/* Controlling the kernel modesetting buffers */
-#define DRM_PSB_KMS_OFF 0x00
-#define DRM_PSB_KMS_ON 0x01
-#define DRM_PSB_VT_LEAVE 0x02
-#define DRM_PSB_VT_ENTER 0x03
-#define DRM_PSB_EXTENSION 0x06
#define DRM_PSB_SIZES 0x07
#define DRM_PSB_FUSE_REG 0x08
-#define DRM_PSB_VBT 0x09
#define DRM_PSB_DC_STATE 0x0A
#define DRM_PSB_ADB 0x0B
#define DRM_PSB_MODE_OPERATION 0x0C
#define DRM_PSB_STOLEN_MEMORY 0x0D
#define DRM_PSB_REGISTER_RW 0x0E
-#define DRM_PSB_GTT_MAP 0x0F
-#define DRM_PSB_GTT_UNMAP 0x10
-#define DRM_PSB_GETPAGEADDRS 0x11
-/**
+
+/*
* NOTE: Add new commands here, but increment
* the values below and increment their
* corresponding defines where they're
* defined elsewhere.
*/
-#define DRM_PVR_RESERVED1 0x12
-#define DRM_PVR_RESERVED2 0x13
-#define DRM_PVR_RESERVED3 0x14
-#define DRM_PVR_RESERVED4 0x15
-#define DRM_PVR_RESERVED5 0x16
-
-#define DRM_PSB_HIST_ENABLE 0x17
-#define DRM_PSB_HIST_STATUS 0x18
-#define DRM_PSB_UPDATE_GUARD 0x19
-#define DRM_PSB_INIT_COMM 0x1A
+
+#define DRM_PSB_GEM_CREATE 0x10
+#define DRM_PSB_2D_OP 0x11
+#define DRM_PSB_GEM_MMAP 0x12
#define DRM_PSB_DPST 0x1B
#define DRM_PSB_GAMMA 0x1C
#define DRM_PSB_DPST_BL 0x1D
-
-#define DRM_PVR_RESERVED6 0x1E
-
#define DRM_PSB_GET_PIPE_FROM_CRTC_ID 0x1F
#define PSB_MODE_OPERATION_MODE_VALID 0x01
@@ -272,4 +179,41 @@ struct drm_psb_get_pipe_from_crtc_id_arg {
u32 pipe;
};
+/* FIXME: move this into a medfield header once we are sure it isn't needed for an
+ ioctl */
+struct psb_drm_dpu_rect {
+ int x, y;
+ int width, height;
+};
+
+struct drm_psb_gem_create {
+ __u64 size;
+ __u32 handle;
+ __u32 flags;
+#define PSB_GEM_CREATE_STOLEN 1 /* Stolen memory can be used */
+};
+
+#define PSB_2D_OP_BUFLEN 16
+
+struct drm_psb_2d_op {
+ __u32 src; /* Handles, only src supported right now */
+ __u32 dst;
+ __u32 mask;
+ __u32 pat;
+ __u32 size; /* In dwords of command */
+ __u32 spare; /* And bumps array to u64 align */
+ __u32 cmd[PSB_2D_OP_BUFLEN];
+};
+
+struct drm_psb_gem_mmap {
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
#endif
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
index aa87b1b6a44..b2cdce7b97e 100644
--- a/drivers/staging/gma500/psb_drv.c
+++ b/drivers/staging/gma500/psb_drv.c
@@ -1,5 +1,5 @@
/**************************************************************************
- * Copyright (c) 2007, Intel Corporation.
+ * Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
* Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
* All Rights Reserved.
@@ -23,44 +23,65 @@
#include <drm/drm.h>
#include "psb_drm.h"
#include "psb_drv.h"
-#include "psb_fb.h"
+#include "framebuffer.h"
#include "psb_reg.h"
#include "psb_intel_reg.h"
-#include "psb_intel_bios.h"
+#include "intel_bios.h"
+#include "mid_bios.h"
+#include "mdfld_dsi_dbi.h"
#include <drm/drm_pciids.h>
-#include "psb_powermgmt.h"
+#include "power.h"
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/pm_runtime.h>
#include <acpi/video.h>
-int drm_psb_debug;
static int drm_psb_trap_pagefaults;
int drm_psb_no_fb;
static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(no_fb, "Disable FBdev");
MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
-module_param_named(debug, drm_psb_debug, int, 0600);
module_param_named(no_fb, drm_psb_no_fb, int, 0600);
module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
-static struct pci_device_id pciidlist[] = {
- { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108 },
- { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109 },
- { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
- { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
- { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
- { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
- { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
- { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
- { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
- { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MRST_4100},
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+ { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+#if defined(CONFIG_DRM_PSB_MRST)
+ { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+ { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+ { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+ { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+ { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+ { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+ { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+ { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+#endif
+#if defined(CONFIG_DRM_PSB_MFLD)
+ { 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ { 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ { 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ { 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ { 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ { 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ { 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+ { 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+#endif
+#if defined(CONFIG_DRM_PSB_CDV)
+ { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+#endif
{ 0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -69,10 +90,6 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
* Standard IOCTLs.
*/
-#define DRM_IOCTL_PSB_KMS_OFF \
- DRM_IO(DRM_PSB_KMS_OFF + DRM_COMMAND_BASE)
-#define DRM_IOCTL_PSB_KMS_ON \
- DRM_IO(DRM_PSB_KMS_ON + DRM_COMMAND_BASE)
#define DRM_IOCTL_PSB_SIZES \
DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \
struct drm_psb_sizes_arg)
@@ -104,6 +121,15 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \
DRM_IOWR(DRM_PSB_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
struct drm_psb_get_pipe_from_crtc_id_arg)
+#define DRM_IOCTL_PSB_GEM_CREATE \
+ DRM_IOWR(DRM_PSB_GEM_CREATE + DRM_COMMAND_BASE, \
+ struct drm_psb_gem_create)
+#define DRM_IOCTL_PSB_2D_OP \
+ DRM_IOW(DRM_PSB_2D_OP + DRM_COMMAND_BASE, \
+ struct drm_psb_2d_op)
+#define DRM_IOCTL_PSB_GEM_MMAP \
+ DRM_IOWR(DRM_PSB_GEM_MMAP + DRM_COMMAND_BASE, \
+ struct drm_psb_gem_mmap)
static int psb_sizes_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -128,11 +154,6 @@ static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
[DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
static struct drm_ioctl_desc psb_ioctls[] = {
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_OFF, psbfb_kms_off_ioctl,
- DRM_ROOT_ONLY),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_KMS_ON,
- psbfb_kms_on_ioctl,
- DRM_ROOT_ONLY),
PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH),
PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH),
PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
@@ -147,7 +168,12 @@ static struct drm_ioctl_desc psb_ioctls[] = {
PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
psb_intel_get_pipe_from_crtc_id, 0),
-
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_2D_OP, psb_accel_ioctl,
+ DRM_UNLOCKED| DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
};
static void psb_lastclose(struct drm_device *dev)
@@ -160,262 +186,17 @@ static void psb_do_takedown(struct drm_device *dev)
/* FIXME: do we need to clean up the gtt here ? */
}
-void mrst_get_fuse_settings(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
- uint32_t fuse_value = 0;
- uint32_t fuse_value_tmp = 0;
-
-#define FB_REG06 0xD0810600
-#define FB_MIPI_DISABLE (1 << 11)
-#define FB_REG09 0xD0810900
-#define FB_REG09 0xD0810900
-#define FB_SKU_MASK 0x7000
-#define FB_SKU_SHIFT 12
-#define FB_SKU_100 0
-#define FB_SKU_100L 1
-#define FB_SKU_83 2
- pci_write_config_dword(pci_root, 0xD0, FB_REG06);
- pci_read_config_dword(pci_root, 0xD4, &fuse_value);
-
- dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
-
- DRM_INFO("internal display is %s\n",
- dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
-
- /*prevent Runtime suspend at start*/
- if (dev_priv->iLVDS_enable) {
- dev_priv->is_lvds_on = true;
- dev_priv->is_mipi_on = false;
- }
- else {
- dev_priv->is_mipi_on = true;
- dev_priv->is_lvds_on = false;
- }
-
- dev_priv->video_device_fuse = fuse_value;
-
- pci_write_config_dword(pci_root, 0xD0, FB_REG09);
- pci_read_config_dword(pci_root, 0xD4, &fuse_value);
-
- DRM_INFO("SKU values is 0x%x. \n", fuse_value);
- fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
-
- dev_priv->fuse_reg_value = fuse_value;
-
- switch (fuse_value_tmp) {
- case FB_SKU_100:
- dev_priv->core_freq = 200;
- break;
- case FB_SKU_100L:
- dev_priv->core_freq = 100;
- break;
- case FB_SKU_83:
- dev_priv->core_freq = 166;
- break;
- default:
- DRM_ERROR("Invalid SKU values, SKU value = 0x%08x\n", fuse_value_tmp);
- dev_priv->core_freq = 0;
- }
- DRM_INFO("LNC core clk is %dMHz.\n", dev_priv->core_freq);
- pci_dev_put(pci_root);
-}
-
-void mid_get_pci_revID (struct drm_psb_private *dev_priv)
-{
- uint32_t platform_rev_id = 0;
- struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
-
- /*get the revison ID, B0:D2:F0;0x08 */
- pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
- dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
- pci_dev_put(pci_gfx_root);
- PSB_DEBUG_ENTRY("platform_rev_id is %x\n", dev_priv->platform_rev_id);
-}
-
-void mrst_get_vbt_data(struct drm_psb_private *dev_priv)
-{
- struct mrst_vbt *vbt = &dev_priv->vbt_data;
- u32 platform_config_address;
- u16 new_size;
- u8 *vbt_virtual;
- u8 bpi;
- u8 number_desc = 0;
- struct mrst_timing_info *dp_ti = &dev_priv->gct_data.DTD;
- struct gct_r10_timing_info ti;
- void *pGCT;
- struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
-
- /*get the address of the platform config vbt, B0:D2:F0;0xFC */
- pci_read_config_dword(pci_gfx_root, 0xFC, &platform_config_address);
- pci_dev_put(pci_gfx_root);
- DRM_INFO("drm platform config address is %x\n",
- platform_config_address);
-
- /* check for platform config address == 0. */
- /* this means fw doesn't support vbt */
-
- if (platform_config_address == 0) {
- vbt->size = 0;
- return;
- }
-
- /* get the virtual address of the vbt */
- vbt_virtual = ioremap(platform_config_address, sizeof(*vbt));
-
- memcpy(vbt, vbt_virtual, sizeof(*vbt));
- iounmap(vbt_virtual); /* Free virtual address space */
-
- printk(KERN_ALERT "GCT revision is %x\n", vbt->revision);
-
- switch (vbt->revision) {
- case 0:
- vbt->mrst_gct = NULL;
- vbt->mrst_gct = \
- ioremap(platform_config_address + sizeof(*vbt) - 4,
- vbt->size - sizeof(*vbt) + 4);
- pGCT = vbt->mrst_gct;
- bpi = ((struct mrst_gct_v1 *)pGCT)->PD.BootPanelIndex;
- dev_priv->gct_data.bpi = bpi;
- dev_priv->gct_data.pt =
- ((struct mrst_gct_v1 *)pGCT)->PD.PanelType;
- memcpy(&dev_priv->gct_data.DTD,
- &((struct mrst_gct_v1 *)pGCT)->panel[bpi].DTD,
- sizeof(struct mrst_timing_info));
- dev_priv->gct_data.Panel_Port_Control =
- ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
- break;
- case 1:
- vbt->mrst_gct = NULL;
- vbt->mrst_gct = \
- ioremap(platform_config_address + sizeof(*vbt) - 4,
- vbt->size - sizeof(*vbt) + 4);
- pGCT = vbt->mrst_gct;
- bpi = ((struct mrst_gct_v2 *)pGCT)->PD.BootPanelIndex;
- dev_priv->gct_data.bpi = bpi;
- dev_priv->gct_data.pt =
- ((struct mrst_gct_v2 *)pGCT)->PD.PanelType;
- memcpy(&dev_priv->gct_data.DTD,
- &((struct mrst_gct_v2 *)pGCT)->panel[bpi].DTD,
- sizeof(struct mrst_timing_info));
- dev_priv->gct_data.Panel_Port_Control =
- ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
- break;
- case 0x10:
- /*header definition changed from rev 01 (v2) to rev 10h. */
- /*so, some values have changed location*/
- new_size = vbt->checksum; /*checksum contains lo size byte*/
- /*LSB of mrst_gct contains hi size byte*/
- new_size |= ((0xff & (unsigned int)vbt->mrst_gct)) << 8;
-
- vbt->checksum = vbt->size; /*size contains the checksum*/
- if (new_size > 0xff)
- vbt->size = 0xff; /*restrict size to 255*/
- else
- vbt->size = new_size;
-
- /* number of descriptors defined in the GCT */
- number_desc = ((0xff00 & (unsigned int)vbt->mrst_gct)) >> 8;
- bpi = ((0xff0000 & (unsigned int)vbt->mrst_gct)) >> 16;
- vbt->mrst_gct = NULL;
- vbt->mrst_gct = \
- ioremap(platform_config_address + GCT_R10_HEADER_SIZE,
- GCT_R10_DISPLAY_DESC_SIZE * number_desc);
- pGCT = vbt->mrst_gct;
- pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
- dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
-
- /*copy the GCT display timings into a temp structure*/
- memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
-
- /*now copy the temp struct into the dev_priv->gct_data*/
- dp_ti->pixel_clock = ti.pixel_clock;
- dp_ti->hactive_hi = ti.hactive_hi;
- dp_ti->hactive_lo = ti.hactive_lo;
- dp_ti->hblank_hi = ti.hblank_hi;
- dp_ti->hblank_lo = ti.hblank_lo;
- dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
- dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
- dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
- dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
- dp_ti->vactive_hi = ti.vactive_hi;
- dp_ti->vactive_lo = ti.vactive_lo;
- dp_ti->vblank_hi = ti.vblank_hi;
- dp_ti->vblank_lo = ti.vblank_lo;
- dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
- dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
- dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
- dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
-
- /*mov the MIPI_Display_Descriptor data from GCT to dev priv*/
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- *((u8 *)pGCT + 0x0d);
- dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
- (*((u8 *)pGCT + 0x0e)) << 8;
- break;
- default:
- printk(KERN_ERR "Unknown revision of GCT!\n");
- vbt->size = 0;
- }
-}
-
-static void psb_get_core_freq(struct drm_device *dev)
-{
- uint32_t clock;
- struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
- /*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
-
- pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
- pci_read_config_dword(pci_root, 0xD4, &clock);
- pci_dev_put(pci_root);
-
- switch (clock & 0x07) {
- case 0:
- dev_priv->core_freq = 100;
- break;
- case 1:
- dev_priv->core_freq = 133;
- break;
- case 2:
- dev_priv->core_freq = 150;
- break;
- case 3:
- dev_priv->core_freq = 178;
- break;
- case 4:
- dev_priv->core_freq = 200;
- break;
- case 5:
- case 6:
- case 7:
- dev_priv->core_freq = 266;
- default:
- dev_priv->core_freq = 0;
- }
-}
-
static int psb_do_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
- struct psb_gtt *pg = dev_priv->pg;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_gtt *pg = &dev_priv->gtt;
uint32_t stolen_gtt;
- uint32_t tt_start;
- uint32_t tt_pages;
int ret = -ENOMEM;
if (pg->mmu_gatt_start & 0x0FFFFFFF) {
- DRM_ERROR("Gatt must be 256M aligned. This is a bug.\n");
+ dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
ret = -EINVAL;
goto out_err;
}
@@ -448,24 +229,17 @@ static int psb_do_init(struct drm_device *dev)
spin_lock_init(&dev_priv->irqmask_lock);
-
- tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
- pg->gatt_pages : PSB_TT_PRIV0_PLIMIT;
- tt_start = dev_priv->gatt_free_offset - pg->mmu_gatt_start;
- tt_pages -= tt_start >> PAGE_SHIFT;
- /* FIXME: can we kill ta_mem_size ? */
- dev_priv->sizes.ta_mem_size = 0;
+ mutex_init(&dev_priv->mutex_2d);
PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
PSB_RSGX32(PSB_CR_BIF_BANK1);
- PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
PSB_CR_BIF_CTRL);
psb_spank(dev_priv);
/* mmu_gatt ?? */
- PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
-
+ PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
return 0;
out_err:
psb_do_takedown(dev);
@@ -474,19 +248,21 @@ out_err:
static int psb_driver_unload(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
/* Kill vblank etc here */
- psb_backlight_exit(); /*writes minimum value to backlight HW reg */
+ gma_backlight_exit(dev);
if (drm_psb_no_fb == 0)
psb_modeset_cleanup(dev);
if (dev_priv) {
psb_lid_timer_takedown(dev_priv);
+ gma_intel_opregion_exit(dev);
+ if (dev_priv->ops->chip_teardown)
+ dev_priv->ops->chip_teardown(dev);
psb_do_takedown(dev);
@@ -495,7 +271,7 @@ static int psb_driver_unload(struct drm_device *dev)
dev_priv->pf_pd = NULL;
}
if (dev_priv->mmu) {
- struct psb_gtt *pg = dev_priv->pg;
+ struct psb_gtt *pg = &dev_priv->gtt;
down_read(&pg->sem);
psb_mmu_remove_pfn_sequence(
@@ -549,17 +325,12 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv == NULL)
return -ENOMEM;
- if (IS_MRST(dev))
- dev_priv->num_pipe = 1;
- else
- dev_priv->num_pipe = 2;
-
+ dev_priv->ops = (struct psb_ops *)chipset;
dev_priv->dev = dev;
-
dev->dev_private = (void *) dev_priv;
- dev_priv->chipset = chipset;
- PSB_DEBUG_INIT("Mapping MMIO\n");
+ dev_priv->num_pipe = dev_priv->ops->pipes;
+
resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
dev_priv->vdc_reg =
@@ -567,25 +338,14 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (!dev_priv->vdc_reg)
goto out_err;
- if (IS_MRST(dev))
- dev_priv->sgx_reg = ioremap(resource_start + MRST_SGX_OFFSET,
- PSB_SGX_SIZE);
- else
- dev_priv->sgx_reg = ioremap(resource_start + PSB_SGX_OFFSET,
+ dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
PSB_SGX_SIZE);
-
if (!dev_priv->sgx_reg)
goto out_err;
- if (IS_MRST(dev)) {
- mrst_get_fuse_settings(dev);
- mrst_get_vbt_data(dev_priv);
- mid_get_pci_revID(dev_priv);
- } else {
- psb_get_core_freq(dev);
- psb_intel_opregion_init(dev);
- psb_intel_init_bios(dev);
- }
+ ret = dev_priv->ops->chip_setup(dev);
+ if (ret)
+ goto out_err;
/* Init OSPM support */
gma_power_init(dev);
@@ -608,7 +368,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (!dev_priv->mmu)
goto out_err;
- pg = dev_priv->pg;
+ pg = &dev_priv->gtt;
tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
(pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
@@ -659,6 +419,10 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
dev->driver->get_vblank_counter = psb_get_vblank_counter;
+#if defined(CONFIG_DRM_PSB_MFLD)
+ /* FIXME: this is not the right place for this stuff ! */
+ mdfld_output_setup(dev);
+#endif
if (drm_psb_no_fb == 0) {
psb_modeset_init(dev);
psb_fbdev_init(dev);
@@ -672,7 +436,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
switch (psb_intel_output->type) {
case INTEL_OUTPUT_LVDS:
- ret = psb_backlight_init(dev);
+ case INTEL_OUTPUT_MIPI:
+ ret = gma_backlight_init(dev);
break;
}
}
@@ -685,7 +450,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
pm_runtime_set_active(&dev->pdev->dev);
#endif
/*Intel drm driver load is done, continue doing pvr load*/
- DRM_DEBUG("Pvr driver load\n");
return 0;
out_err:
psb_driver_unload(dev);
@@ -720,6 +484,11 @@ static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
struct drm_psb_dc_state_arg *arg =
(struct drm_psb_dc_state_arg *)data;
+
+ /* Double check MRST case */
+ if (IS_MRST(dev) || IS_MFLD(dev))
+ return -EOPNOTSUPP;
+
flags = arg->flags;
obj_id = arg->obj_id;
@@ -727,7 +496,7 @@ static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
obj = drm_mode_object_find(dev, obj_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
- DRM_DEBUG("Invalid CRTC object.\n");
+ dev_dbg(dev->dev, "Invalid CRTC object.\n");
return -EINVAL;
}
@@ -747,7 +516,7 @@ static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
obj = drm_mode_object_find(dev, obj_id,
DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
- DRM_DEBUG("Invalid connector id.\n");
+ dev_dbg(dev->dev, "Invalid connector id.\n");
return -EINVAL;
}
@@ -759,23 +528,27 @@ static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
return 0;
}
-
- DRM_DEBUG("Bad flags 0x%x\n", flags);
return -EINVAL;
}
+static inline void get_brightness(struct backlight_device *bd)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ if (bd) {
+ bd->props.brightness = bd->ops->get_brightness(bd);
+ backlight_update_status(bd);
+ }
+#endif
+}
+
static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_psb_private *dev_priv = psb_priv(dev);
uint32_t *arg = data;
- struct backlight_device bd;
- dev_priv->blc_adj2 = *arg;
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- bd.props.brightness = psb_get_brightness(&bd);
- psb_set_brightness(&bd);
-#endif
+ dev_priv->blc_adj2 = *arg;
+ get_brightness(dev_priv->backlight_device);
return 0;
}
@@ -784,13 +557,9 @@ static int psb_adb_ioctl(struct drm_device *dev, void *data,
{
struct drm_psb_private *dev_priv = psb_priv(dev);
uint32_t *arg = data;
- struct backlight_device bd;
- dev_priv->blc_adj1 = *arg;
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- bd.props.brightness = psb_get_brightness(&bd);
- psb_set_brightness(&bd);
-#endif
+ dev_priv->blc_adj1 = *arg;
+ get_brightness(dev_priv->backlight_device);
return 0;
}
@@ -838,7 +607,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
obj_id = lut_arg->output_id;
obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
- DRM_DEBUG("Invalid Connector object.\n");
+ dev_dbg(dev->dev, "Invalid Connector object.\n");
return -EINVAL;
}
@@ -879,7 +648,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
case PSB_MODE_OPERATION_SET_DC_BASE:
obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_FB);
if (!obj) {
- DRM_ERROR("Invalid FB id %d\n", obj_id);
+ dev_dbg(dev->dev, "Invalid FB id %d\n", obj_id);
return -EINVAL;
}
@@ -951,7 +720,7 @@ mode_op_out:
return ret;
default:
- DRM_DEBUG("Unsupported psb mode operation");
+ dev_dbg(dev->dev, "Unsupported psb mode operation\n");
return -EOPNOTSUPP;
}
@@ -970,6 +739,7 @@ static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
return 0;
}
+/* FIXME: needs Medfield changes */
static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1342,9 +1112,6 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
struct drm_device *dev = file_priv->minor->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
static unsigned int runtime_allowed;
- unsigned int nr = DRM_IOCTL_NR(cmd);
-
- DRM_DEBUG("cmd = %x, nr = %x\n", cmd, nr);
if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
runtime_allowed++;
@@ -1352,7 +1119,6 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
dev_priv->rpm_enabled = 1;
}
return drm_ioctl(filp, cmd, arg);
-
/* FIXME: do we need to wrap the other side of this */
}
@@ -1371,6 +1137,8 @@ static void psb_remove(struct pci_dev *pdev)
}
static const struct dev_pm_ops psb_pm_ops = {
+ .resume = gma_power_resume,
+ .suspend = gma_power_suspend,
.runtime_suspend = psb_runtime_suspend,
.runtime_resume = psb_runtime_resume,
.runtime_idle = psb_runtime_idle,
@@ -1384,7 +1152,7 @@ static struct vm_operations_struct psb_gem_vm_ops = {
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
- DRIVER_IRQ_VBL | DRIVER_MODESET| DRIVER_GEM ,
+ DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
.load = psb_driver_load,
.unload = psb_driver_unload,
@@ -1432,20 +1200,16 @@ static struct drm_driver driver = {
static struct pci_driver psb_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
- .resume = gma_power_resume,
- .suspend = gma_power_suspend,
.probe = psb_probe,
.remove = psb_remove,
-#ifdef CONFIG_PM
.driver.pm = &psb_pm_ops,
-#endif
};
static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
/* MLD Added this from Inaky's patch */
if (pci_enable_msi(pdev))
- DRM_ERROR("Enable MSI failed!\n");
+ dev_warn(&pdev->dev, "Enable MSI failed!\n");
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -1462,6 +1226,6 @@ static void __exit psb_exit(void)
late_initcall(psb_init);
module_exit(psb_exit);
-MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/staging/gma500/psb_drv.h
index e19a4547875..fd4732dd783 100644
--- a/drivers/staging/gma500/psb_drv.h
+++ b/drivers/staging/gma500/psb_drv.h
@@ -1,5 +1,5 @@
/**************************************************************************
- * Copyright (c) 2007-2008, Intel Corporation.
+ * Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -20,52 +20,47 @@
#ifndef _PSB_DRV_H_
#define _PSB_DRV_H_
-#include <linux/version.h>
#include <linux/kref.h>
#include <drm/drmP.h>
#include "drm_global.h"
+#include "gem_glue.h"
#include "psb_drm.h"
#include "psb_reg.h"
#include "psb_intel_drv.h"
-#include "psb_gtt.h"
-#include "psb_powermgmt.h"
+#include "gtt.h"
+#include "power.h"
#include "mrst.h"
+#include "medfield.h"
-/*Append new drm mode definition here, align with libdrm definition*/
-#define DRM_MODE_SCALE_NO_SCALE 2
+/* Append new drm mode definition here, align with libdrm definition */
+#define DRM_MODE_SCALE_NO_SCALE 2
enum {
- CHIP_PSB_8108 = 0,
- CHIP_PSB_8109 = 1,
- CHIP_MRST_4100 = 2,
+ CHIP_PSB_8108 = 0, /* Poulsbo */
+ CHIP_PSB_8109 = 1, /* Poulsbo */
+ CHIP_MRST_4100 = 2, /* Moorestown/Oaktrail */
+ CHIP_MFLD_0130 = 3, /* Medfield */
};
#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
/*
- *Hardware bugfixes
+ * Driver definitions
*/
-#define DRIVER_NAME "pvrsrvkm"
-#define DRIVER_DESC "drm driver for the Intel GMA500"
-#define DRIVER_AUTHOR "Intel Corporation"
+#define DRIVER_NAME "gma500"
+#define DRIVER_DESC "DRM driver for the Intel GMA500"
-#define PSB_DRM_DRIVER_DATE "2009-03-10"
-#define PSB_DRM_DRIVER_MAJOR 8
-#define PSB_DRM_DRIVER_MINOR 1
+#define PSB_DRM_DRIVER_DATE "2011-06-06"
+#define PSB_DRM_DRIVER_MAJOR 1
+#define PSB_DRM_DRIVER_MINOR 0
#define PSB_DRM_DRIVER_PATCHLEVEL 0
/*
- *TTM driver private offsets.
+ * Hardware offsets
*/
-
-#define DRM_PSB_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
-#define PSB_OBJECT_HASH_ORDER 13
-#define PSB_FILE_OBJECT_HASH_ORDER 12
-#define PSB_BO_HASH_ORDER 12
-
#define PSB_VDC_OFFSET 0x00000000
#define PSB_VDC_SIZE 0x000080000
#define MRST_MMIO_SIZE 0x0000C0000
@@ -73,42 +68,52 @@ enum {
#define PSB_SGX_SIZE 0x8000
#define PSB_SGX_OFFSET 0x00040000
#define MRST_SGX_OFFSET 0x00080000
+/*
+ * PCI resource identifiers
+ */
#define PSB_MMIO_RESOURCE 0
#define PSB_GATT_RESOURCE 2
#define PSB_GTT_RESOURCE 3
+/*
+ * PCI configuration
+ */
#define PSB_GMCH_CTRL 0x52
#define PSB_BSM 0x5C
#define _PSB_GMCH_ENABLED 0x4
#define PSB_PGETBL_CTL 0x2020
#define _PSB_PGETBL_ENABLED 0x00000001
#define PSB_SGX_2D_SLAVE_PORT 0x4000
+
+/* To get rid of */
#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
#define PSB_TT_PRIV0_PLIMIT (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
-#define PSB_NUM_VALIDATE_BUFFERS 2048
/*
- *Flags for external memory type field.
+ * SGX side MMU definitions (these can probably go)
*/
+/*
+ * Flags for external memory type field.
+ */
#define PSB_MMU_CACHED_MEMORY 0x0001 /* Bind to MMU only */
#define PSB_MMU_RO_MEMORY 0x0002 /* MMU RO memory */
#define PSB_MMU_WO_MEMORY 0x0004 /* MMU WO memory */
-
/*
- *PTE's and PDE's
+ * PTE's and PDE's
*/
-
#define PSB_PDE_MASK 0x003FFFFF
#define PSB_PDE_SHIFT 22
#define PSB_PTE_SHIFT 12
-
+/*
+ * Cache control
+ */
#define PSB_PTE_VALID 0x0001 /* PTE / PDE valid */
#define PSB_PTE_WO 0x0002 /* Write only */
#define PSB_PTE_RO 0x0004 /* Read only */
#define PSB_PTE_CACHED 0x0008 /* CPU cache coherent */
/*
- *VDC registers and bits
+ * VDC registers and bits
*/
#define PSB_MSVDX_CLOCKGATING 0x2064
#define PSB_TOPAZ_CLOCKGATING 0x2068
@@ -130,8 +135,12 @@ enum {
#define _LNC_IRQ_TOPAZ_FLAG (1<<20)
/* This flag includes all the display IRQ bits excepts the vblank irqs. */
-#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | _MDFLD_PIPEB_EVENT_FLAG | \
- _PSB_PIPEA_EVENT_FLAG | _PSB_VSYNC_PIPEA_FLAG | _MDFLD_MIPIA_FLAG | _MDFLD_MIPIC_FLAG)
+#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
+ _MDFLD_PIPEB_EVENT_FLAG | \
+ _PSB_PIPEA_EVENT_FLAG | \
+ _PSB_VSYNC_PIPEA_FLAG | \
+ _MDFLD_MIPIA_FLAG | \
+ _MDFLD_MIPIC_FLAG)
#define PSB_INT_IDENTITY_R 0x20A4
#define PSB_INT_MASK_R 0x20A8
#define PSB_INT_ENABLE_R 0x20A0
@@ -197,10 +206,25 @@ enum {
#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
#define PSB_LID_DELAY (DRM_HZ / 10)
-#define MDFLD_PNW_A0 0x00
#define MDFLD_PNW_B0 0x04
#define MDFLD_PNW_C0 0x08
+#define MDFLD_DSR_2D_3D_0 (1 << 0)
+#define MDFLD_DSR_2D_3D_2 (1 << 1)
+#define MDFLD_DSR_CURSOR_0 (1 << 2)
+#define MDFLD_DSR_CURSOR_2 (1 << 3)
+#define MDFLD_DSR_OVERLAY_0 (1 << 4)
+#define MDFLD_DSR_OVERLAY_2 (1 << 5)
+#define MDFLD_DSR_MIPI_CONTROL (1 << 6)
+#define MDFLD_DSR_DAMAGE_MASK_0 ((1 << 0) | (1 << 2) | (1 << 4))
+#define MDFLD_DSR_DAMAGE_MASK_2 ((1 << 1) | (1 << 3) | (1 << 5))
+#define MDFLD_DSR_2D_3D (MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
+
+#define MDFLD_DSR_RR 45
+#define MDFLD_DPU_ENABLE (1 << 31)
+#define MDFLD_DSR_FULLSCREEN (1 << 30)
+#define MDFLD_DSR_DELAY (DRM_HZ / MDFLD_DSR_RR)
+
#define PSB_PWR_STATE_ON 1
#define PSB_PWR_STATE_OFF 2
@@ -214,6 +238,12 @@ enum {
#define PSB_PCIx_MSI_ADDR_LOC 0x94
#define PSB_PCIx_MSI_DATA_LOC 0x98
+/* Medfield crystal settings */
+#define KSEL_CRYSTAL_19 1
+#define KSEL_BYPASS_19 5
+#define KSEL_BYPASS_25 6
+#define KSEL_BYPASS_83_100 7
+
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@@ -227,13 +257,13 @@ struct psb_intel_opregion {
int enabled;
};
+struct psb_ops;
struct drm_psb_private {
struct drm_device *dev;
+ const struct psb_ops *ops;
- unsigned long chipset;
-
- struct psb_gtt *pg;
+ struct psb_gtt gtt;
/* GTT Memory manager */
struct psb_gtt_mm *gtt_mm;
@@ -271,14 +301,14 @@ struct drm_psb_private {
/*
* Power
- */
+ */
bool suspended;
bool display_power;
int display_count;
/*
- *Modesetting
+ * Modesetting
*/
struct psb_intel_mode_device mode_dev;
@@ -287,12 +317,8 @@ struct drm_psb_private {
uint32_t num_pipe;
/*
- *Memory managers
+ * OSPM info (Power management base) (can go ?)
*/
-
- /*
- *OSPM info
- */
uint32_t ospm_base;
/*
@@ -304,11 +330,11 @@ struct drm_psb_private {
u32 fuse_reg_value;
u32 video_device_fuse;
- /* pci revision id for B0:D2:F0 */
+ /* PCI revision ID for B0:D2:F0 */
uint8_t platform_rev_id;
/*
- *LVDS info
+ * LVDS info
*/
int backlight_duty_cycle; /* restore backlight to this value */
bool panel_wants_dither;
@@ -316,10 +342,10 @@ struct drm_psb_private {
struct drm_display_mode *lfp_lvds_vbt_mode;
struct drm_display_mode *sdvo_lvds_vbt_mode;
- struct bdb_lvds_backlight *lvds_bl; /*LVDS backlight info from VBT*/
+ struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
struct psb_intel_i2c_chan *lvds_i2c_bus;
- /* Feature bits from the VBIOS*/
+ /* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
@@ -328,17 +354,30 @@ struct drm_psb_private {
int lvds_ssc_freq;
bool is_lvds_on;
bool is_mipi_on;
+ u32 mipi_ctrl_display;
unsigned int core_freq;
uint32_t iLVDS_enable;
- /*runtime PM state*/
+ /* Runtime PM state */
int rpm_enabled;
- /* Moorestown specific */
+ /* MID specific */
struct mrst_vbt vbt_data;
struct mrst_gct_data gct_data;
+ /* MIPI Panel type etc */
+ int panel_id;
+ bool dual_mipi; /* dual display - DPI & DBI */
+ bool dpi_panel_on; /* The DPI panel power is on */
+ bool dpi_panel_on2; /* The DPI panel power is on */
+ bool dbi_panel_on; /* The DBI panel power is on */
+ bool dbi_panel_on2; /* The DBI panel power is on */
+ u32 dsr_fb_update; /* DSR FB update counter */
+
+ /* Moorestown HDMI state */
+ struct mrst_hdmi_dev *hdmi_priv;
+
/* Moorestown pipe config register value cache */
uint32_t pipeconf;
uint32_t pipeconf1;
@@ -349,8 +388,13 @@ struct drm_psb_private {
uint32_t dspcntr1;
uint32_t dspcntr2;
+ /* Moorestown MM backlight cache */
+ uint8_t saveBKLTCNT;
+ uint8_t saveBKLTREQ;
+ uint8_t saveBKLTBRTL;
+
/*
- *Register state
+ * Register state
*/
uint32_t saveDSPACNTR;
uint32_t saveDSPBCNTR;
@@ -373,6 +417,7 @@ struct drm_psb_private {
uint32_t saveDSPAPOS;
uint32_t saveDSPABASE;
uint32_t saveDSPASURF;
+ uint32_t saveDSPASTATUS;
uint32_t saveFPB0;
uint32_t saveFPB1;
uint32_t saveDPLL_B;
@@ -388,6 +433,7 @@ struct drm_psb_private {
uint32_t saveDSPBPOS;
uint32_t saveDSPBBASE;
uint32_t saveDSPBSURF;
+ uint32_t saveDSPBSTATUS;
uint32_t saveVCLK_DIVISOR_VGA0;
uint32_t saveVCLK_DIVISOR_VGA1;
uint32_t saveVCLK_POST_DIV;
@@ -458,6 +504,77 @@ struct drm_psb_private {
uint32_t msi_addr;
uint32_t msi_data;
+ /* Medfield specific register save state */
+ uint32_t saveHDMIPHYMISCCTL;
+ uint32_t saveHDMIB_CONTROL;
+ uint32_t saveDSPCCNTR;
+ uint32_t savePIPECCONF;
+ uint32_t savePIPECSRC;
+ uint32_t saveHTOTAL_C;
+ uint32_t saveHBLANK_C;
+ uint32_t saveHSYNC_C;
+ uint32_t saveVTOTAL_C;
+ uint32_t saveVBLANK_C;
+ uint32_t saveVSYNC_C;
+ uint32_t saveDSPCSTRIDE;
+ uint32_t saveDSPCSIZE;
+ uint32_t saveDSPCPOS;
+ uint32_t saveDSPCSURF;
+ uint32_t saveDSPCSTATUS;
+ uint32_t saveDSPCLINOFF;
+ uint32_t saveDSPCTILEOFF;
+ uint32_t saveDSPCCURSOR_CTRL;
+ uint32_t saveDSPCCURSOR_BASE;
+ uint32_t saveDSPCCURSOR_POS;
+ uint32_t save_palette_c[256];
+ uint32_t saveOV_OVADD_C;
+ uint32_t saveOV_OGAMC0_C;
+ uint32_t saveOV_OGAMC1_C;
+ uint32_t saveOV_OGAMC2_C;
+ uint32_t saveOV_OGAMC3_C;
+ uint32_t saveOV_OGAMC4_C;
+ uint32_t saveOV_OGAMC5_C;
+
+ /* DSI register save */
+ uint32_t saveDEVICE_READY_REG;
+ uint32_t saveINTR_EN_REG;
+ uint32_t saveDSI_FUNC_PRG_REG;
+ uint32_t saveHS_TX_TIMEOUT_REG;
+ uint32_t saveLP_RX_TIMEOUT_REG;
+ uint32_t saveTURN_AROUND_TIMEOUT_REG;
+ uint32_t saveDEVICE_RESET_REG;
+ uint32_t saveDPI_RESOLUTION_REG;
+ uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
+ uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
+ uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
+ uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
+ uint32_t saveVERT_SYNC_PAD_COUNT_REG;
+ uint32_t saveVERT_BACK_PORCH_COUNT_REG;
+ uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
+ uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
+ uint32_t saveINIT_COUNT_REG;
+ uint32_t saveMAX_RET_PAK_REG;
+ uint32_t saveVIDEO_FMT_REG;
+ uint32_t saveEOT_DISABLE_REG;
+ uint32_t saveLP_BYTECLK_REG;
+ uint32_t saveHS_LS_DBI_ENABLE_REG;
+ uint32_t saveTXCLKESC_REG;
+ uint32_t saveDPHY_PARAM_REG;
+ uint32_t saveMIPI_CONTROL_REG;
+ uint32_t saveMIPI;
+ uint32_t saveMIPI_C;
+
+ /* DPST register save */
+ uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+ uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
+ uint32_t savePWM_CONTROL_LOGIC;
+
+ /*
+ * DSI info.
+ */
+ void * dbi_dsr_info;
+ void * dbi_dpu_info;
+ void * dsi_configs[2];
/*
* LID-Switch
*/
@@ -468,7 +585,7 @@ struct drm_psb_private {
u32 lid_last_state;
/*
- *Watchdog
+ * Watchdog
*/
uint32_t apm_reg;
@@ -478,14 +595,71 @@ struct drm_psb_private {
* Used for modifying backlight from
* xrandr -- consider removing and using HAL instead
*/
+ struct backlight_device *backlight_device;
struct drm_property *backlight_property;
uint32_t blc_adj1;
uint32_t blc_adj2;
- void * fbdev;
+ void *fbdev;
+ /* DPST state */
+ uint32_t dsr_idle_count;
+ bool is_in_idle;
+ bool dsr_enable;
+ void (*exit_idle)(struct drm_device *dev, u32 update_src);
+
+ /* 2D acceleration */
+ struct mutex mutex_2d;
+
+ /* FIXME: Arrays anyone ? */
+ struct mdfld_dsi_encoder *encoder0;
+ struct mdfld_dsi_encoder *encoder2;
+ struct mdfld_dsi_dbi_output * dbi_output;
+ struct mdfld_dsi_dbi_output * dbi_output2;
+ u32 bpp;
+ u32 bpp2;
+
+ bool dispstatus;
+};
+
+
+/*
+ * Operations for each board type
+ */
+
+struct psb_ops {
+ const char *name;
+ unsigned int accel_2d:1;
+ int pipes; /* Number of output pipes */
+ int crtcs; /* Number of CRTCs */
+ int sgx_offset; /* Base offset of SGX device */
+
+ /* Sub functions */
+ struct drm_crtc_helper_funcs const *crtc_helper;
+ struct drm_crtc_funcs const *crtc_funcs;
+
+ /* Setup hooks */
+ int (*chip_setup)(struct drm_device *dev);
+ void (*chip_teardown)(struct drm_device *dev);
+
+ /* Display management hooks */
+ int (*output_init)(struct drm_device *dev);
+ /* Power management hooks */
+ void (*init_pm)(struct drm_device *dev);
+ int (*save_regs)(struct drm_device *dev);
+ int (*restore_regs)(struct drm_device *dev);
+ int (*power_up)(struct drm_device *dev);
+ int (*power_down)(struct drm_device *dev);
+
+ void (*lvds_bl_power)(struct drm_device *dev, bool on);
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ /* Backlight */
+ int (*backlight_init)(struct drm_device *dev);
+#endif
+ int i2c_bus; /* I2C bus identifier for Moorestown */
};
+
struct psb_mmu_driver;
extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
@@ -497,7 +671,7 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
}
/*
- *MMU stuff.
+ * MMU stuff.
*/
extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
@@ -525,7 +699,7 @@ extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
unsigned long *pfn);
/*
- *Enable / disable MMU for different requestors.
+ * Enable / disable MMU for different requestors.
*/
@@ -552,7 +726,7 @@ extern void psb_irq_turn_on_dpst(struct drm_device *dev);
extern void psb_irq_turn_off_dpst(struct drm_device *dev);
extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
-extern int psb_vblank_wait2(struct drm_device *dev,unsigned int *sequence);
+extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
extern int psb_enable_vblank(struct drm_device *dev, int crtc);
extern void psb_disable_vblank(struct drm_device *dev, int crtc);
@@ -564,41 +738,33 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int mdfld_enable_te(struct drm_device *dev, int pipe);
+extern void mdfld_disable_te(struct drm_device *dev, int pipe);
+
/*
- * psb_opregion.c
+ * intel_opregion.c
*/
-extern int psb_intel_opregion_init(struct drm_device *dev);
+extern int gma_intel_opregion_init(struct drm_device *dev);
+extern int gma_intel_opregion_exit(struct drm_device *dev);
/*
- *psb_fb.c
+ * framebuffer.c
*/
extern int psbfb_probed(struct drm_device *dev);
extern int psbfb_remove(struct drm_device *dev,
struct drm_framebuffer *fb);
-extern int psbfb_kms_off_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int psbfb_kms_on_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern void *psbfb_vdc_reg(struct drm_device* dev);
-
/*
- * psb_2d.c
+ * accel_2d.c
*/
-extern void psbfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
extern void psbfb_copyarea(struct fb_info *info,
const struct fb_copyarea *region);
-extern void psbfb_imageblit(struct fb_info *info,
- const struct fb_image *image);
extern int psbfb_sync(struct fb_info *info);
-
extern void psb_spank(struct drm_psb_private *dev_priv);
-
-extern int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
- unsigned size);
+extern int psb_accel_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/*
- *psb_reset.c
+ * psb_reset.c
*/
extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
@@ -608,14 +774,11 @@ extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);
-extern int psb_fbdev_init(struct drm_device * dev);
+extern int psb_fbdev_init(struct drm_device *dev);
-/* psb_bl.c */
-int psb_backlight_init(struct drm_device *dev);
-void psb_backlight_exit(void);
-int psb_set_brightness(struct backlight_device *bd);
-int psb_get_brightness(struct backlight_device *bd);
-struct backlight_device * psb_get_backlight_device(void);
+/* backlight.c */
+int gma_backlight_init(struct drm_device *dev);
+void gma_backlight_exit(struct drm_device *dev);
/* mrst_crtc.c */
extern const struct drm_crtc_helper_funcs mrst_helper_funcs;
@@ -624,26 +787,43 @@ extern const struct drm_crtc_helper_funcs mrst_helper_funcs;
extern void mrst_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
+/* psb_intel_display.c */
+extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
+extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
+
/* psb_intel_lvds.c */
-extern void psb_intel_lvds_prepare(struct drm_encoder *encoder);
-extern void psb_intel_lvds_commit(struct drm_encoder *encoder);
extern const struct drm_connector_helper_funcs
psb_intel_lvds_connector_helper_funcs;
extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
-/* psb_gem.c */
+/* gem.c */
extern int psb_gem_init_object(struct drm_gem_object *obj);
extern void psb_gem_free_object(struct drm_gem_object *obj);
extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
- struct drm_file *file);
+ struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
uint32_t handle);
extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
+ uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+/* psb_device.c */
+extern const struct psb_ops psb_chip_ops;
+
+/* mrst_device.c */
+extern const struct psb_ops mrst_chip_ops;
+
+/* mdfld_device.c */
+extern const struct psb_ops mdfld_chip_ops;
+/* cdv_device.c */
+extern const struct psb_ops cdv_chip_ops;
/*
* Debug print bits setting
@@ -661,110 +841,62 @@ extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
#define PSB_D_MSVDX (1 << 9)
#define PSB_D_TOPAZ (1 << 10)
-#ifndef DRM_DEBUG_CODE
-/* To enable debug printout, set drm_psb_debug in psb_drv.c
- * to any combination of above print flags.
- */
-/* #define DRM_DEBUG_CODE 2 */
-#endif
-
-extern int drm_psb_debug;
extern int drm_psb_no_fb;
extern int drm_idle_check_interval;
-#define PSB_DEBUG_GENERAL(_fmt, _arg...) \
- PSB_DEBUG(PSB_D_GENERAL, _fmt, ##_arg)
-#define PSB_DEBUG_INIT(_fmt, _arg...) \
- PSB_DEBUG(PSB_D_INIT, _fmt, ##_arg)
-#define PSB_DEBUG_IRQ(_fmt, _arg...) \
- PSB_DEBUG(PSB_D_IRQ, _fmt, ##_arg)
-#define PSB_DEBUG_ENTRY(_fmt, _arg...) \
- PSB_DEBUG(PSB_D_ENTRY, _fmt, ##_arg)
-#define PSB_DEBUG_HV(_fmt, _arg...) \
- PSB_DEBUG(PSB_D_HV, _fmt, ##_arg)
-#define PSB_DEBUG_DBI_BF(_fmt, _arg...) \
- PSB_DEBUG(PSB_D_DBI_BF, _fmt, ##_arg)
-#define PSB_DEBUG_PM(_fmt, _arg...) \
- PSB_DEBUG(PSB_D_PM, _fmt, ##_arg)
-#define PSB_DEBUG_RENDER(_fmt, _arg...) \
- PSB_DEBUG(PSB_D_RENDER, _fmt, ##_arg)
-#define PSB_DEBUG_REG(_fmt, _arg...) \
- PSB_DEBUG(PSB_D_REG, _fmt, ##_arg)
-#define PSB_DEBUG_MSVDX(_fmt, _arg...) \
- PSB_DEBUG(PSB_D_MSVDX, _fmt, ##_arg)
-#define PSB_DEBUG_TOPAZ(_fmt, _arg...) \
- PSB_DEBUG(PSB_D_TOPAZ, _fmt, ##_arg)
-
-#if DRM_DEBUG_CODE
-#define PSB_DEBUG(_flag, _fmt, _arg...) \
- do { \
- if (unlikely((_flag) & drm_psb_debug)) \
- printk(KERN_DEBUG \
- "[psb:0x%02x:%s] " _fmt , _flag, \
- __func__ , ##_arg); \
- } while (0)
-#else
-#define PSB_DEBUG(_fmt, _arg...) do { } while (0)
-#endif
-
/*
- *Utilities
+ * Utilities
*/
-#define DRM_DRIVER_PRIVATE_T struct drm_psb_private
static inline u32 MRST_MSG_READ32(uint port, uint offset)
{
int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
uint32_t ret_val = 0;
- struct pci_dev *pci_root = pci_get_bus_and_slot (0, 0);
- pci_write_config_dword (pci_root, 0xD0, mcr);
- pci_read_config_dword (pci_root, 0xD4, &ret_val);
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
pci_dev_put(pci_root);
return ret_val;
}
static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
{
int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
- struct pci_dev *pci_root = pci_get_bus_and_slot (0, 0);
- pci_write_config_dword (pci_root, 0xD4, value);
- pci_write_config_dword (pci_root, 0xD0, mcr);
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
pci_dev_put(pci_root);
}
static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
{
int mcr = (0x10<<24) | (port << 16) | (offset << 8);
uint32_t ret_val = 0;
- struct pci_dev *pci_root = pci_get_bus_and_slot (0, 0);
- pci_write_config_dword (pci_root, 0xD0, mcr);
- pci_read_config_dword (pci_root, 0xD4, &ret_val);
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
+ pci_read_config_dword(pci_root, 0xD4, &ret_val);
pci_dev_put(pci_root);
return ret_val;
}
static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
{
int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
- struct pci_dev *pci_root = pci_get_bus_and_slot (0, 0);
- pci_write_config_dword (pci_root, 0xD4, value);
- pci_write_config_dword (pci_root, 0xD0, mcr);
+ struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+ pci_write_config_dword(pci_root, 0xD4, value);
+ pci_write_config_dword(pci_root, 0xD0, mcr);
pci_dev_put(pci_root);
}
static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- int reg_val = ioread32(dev_priv->vdc_reg + (reg));
- PSB_DEBUG_REG("reg = 0x%x. reg_val = 0x%x. \n", reg, reg_val);
- return reg_val;
+ return ioread32(dev_priv->vdc_reg + reg);
}
#define REG_READ(reg) REGISTER_READ(dev, (reg))
+
static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- if ((reg < 0x70084 || reg >0x70088) && (reg < 0xa000 || reg >0xa3ff))
- PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val);
-
iowrite32((val), dev_priv->vdc_reg + (reg));
}
@@ -774,9 +906,6 @@ static inline void REGISTER_WRITE16(struct drm_device *dev,
uint32_t reg, uint32_t val)
{
struct drm_psb_private *dev_priv = dev->dev_private;
-
- PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val);
-
iowrite16((val), dev_priv->vdc_reg + (reg));
}
@@ -786,60 +915,34 @@ static inline void REGISTER_WRITE8(struct drm_device *dev,
uint32_t reg, uint32_t val)
{
struct drm_psb_private *dev_priv = dev->dev_private;
-
- PSB_DEBUG_REG("reg = 0x%x, val = 0x%x. \n", reg, val);
-
iowrite8((val), dev_priv->vdc_reg + (reg));
}
-#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
+#define REG_WRITE8(reg, val) REGISTER_WRITE8(dev, (reg), (val))
-#define PSB_ALIGN_TO(_val, _align) \
- (((_val) + ((_align) - 1)) & ~((_align) - 1))
-#define PSB_WVDC32(_val, _offs) \
- iowrite32(_val, dev_priv->vdc_reg + (_offs))
-#define PSB_RVDC32(_offs) \
- ioread32(dev_priv->vdc_reg + (_offs))
+#define PSB_WVDC32(_val, _offs) iowrite32(_val, dev_priv->vdc_reg + (_offs))
+#define PSB_RVDC32(_offs) ioread32(dev_priv->vdc_reg + (_offs))
/* #define TRAP_SGX_PM_FAULT 1 */
#ifdef TRAP_SGX_PM_FAULT
-#define PSB_RSGX32(_offs) \
-({ \
- if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
- printk(KERN_ERR "access sgx when it's off!! (READ) %s, %d\n", \
- __FILE__, __LINE__); \
- mdelay(1000); \
- } \
- ioread32(dev_priv->sgx_reg + (_offs)); \
+#define PSB_RSGX32(_offs) \
+({ \
+ if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) { \
+ printk(KERN_ERR \
+ "access sgx when it's off!! (READ) %s, %d\n", \
+ __FILE__, __LINE__); \
+ melay(1000); \
+ } \
+ ioread32(dev_priv->sgx_reg + (_offs)); \
})
#else
-#define PSB_RSGX32(_offs) \
- ioread32(dev_priv->sgx_reg + (_offs))
+#define PSB_RSGX32(_offs) ioread32(dev_priv->sgx_reg + (_offs))
#endif
-#define PSB_WSGX32(_val, _offs) \
- iowrite32(_val, dev_priv->sgx_reg + (_offs))
+#define PSB_WSGX32(_val, _offs) iowrite32(_val, dev_priv->sgx_reg + (_offs))
#define MSVDX_REG_DUMP 0
-#if MSVDX_REG_DUMP
-
-#define PSB_WMSVDX32(_val, _offs) \
- printk("MSVDX: write %08x to reg 0x%08x\n", (unsigned int)(_val), (unsigned int)(_offs));\
- iowrite32(_val, dev_priv->msvdx_reg + (_offs))
-#define PSB_RMSVDX32(_offs) \
- ioread32(dev_priv->msvdx_reg + (_offs))
-
-#else
-
-#define PSB_WMSVDX32(_val, _offs) \
- iowrite32(_val, dev_priv->msvdx_reg + (_offs))
-#define PSB_RMSVDX32(_offs) \
- ioread32(dev_priv->msvdx_reg + (_offs))
-
-#endif
-#define PSB_ALPL(_val, _base) \
- (((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT))
-#define PSB_ALPLM(_val, _base) \
- ((((_val) >> (_base ## _ALIGNSHIFT)) << (_base ## _SHIFT)) & (_base ## _MASK))
+#define PSB_WMSVDX32(_val, _offs) iowrite32(_val, dev_priv->msvdx_reg + (_offs))
+#define PSB_RMSVDX32(_offs) ioread32(dev_priv->msvdx_reg + (_offs))
#endif
diff --git a/drivers/staging/gma500/psb_intel_display.c b/drivers/staging/gma500/psb_intel_display.c
index 4f47d09d65d..4afa671f974 100644
--- a/drivers/staging/gma500/psb_intel_display.c
+++ b/drivers/staging/gma500/psb_intel_display.c
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006-2007 Intel Corporation
+ * Copyright © 2006-2011 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -22,13 +22,14 @@
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
-#include "psb_fb.h"
+#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_display.h"
-#include "psb_powermgmt.h"
+#include "power.h"
+#include "mdfld_output.h"
struct psb_intel_clock_t {
/* given values */
@@ -331,7 +332,7 @@ static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
void psb_intel_wait_for_vblank(struct drm_device *dev)
{
/* Wait for 20ms, i.e. one cycle at 50hz. */
- udelay(20000);
+ mdelay(20);
}
int psb_intel_pipe_set_base(struct drm_crtc *crtc,
@@ -350,17 +351,15 @@ int psb_intel_pipe_set_base(struct drm_crtc *crtc,
u32 dspcntr;
int ret = 0;
- PSB_DEBUG_ENTRY("\n");
+ if (!gma_power_begin(dev, true))
+ return 0;
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG("No FB bound\n");
- return 0;
+ dev_dbg(dev->dev, "No FB bound\n");
+ goto psb_intel_pipe_cleaner;
}
- if (!gma_power_begin(dev, true))
- return 0;
-
/* We are displaying this buffer, make sure it is actually loaded
into the GTT */
ret = psb_gtt_pin(psbfb->gtt);
@@ -390,7 +389,7 @@ int psb_intel_pipe_set_base(struct drm_crtc *crtc,
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
- DRM_ERROR("Unknown color depth\n");
+ dev_err(dev->dev, "Unknown color depth\n");
ret = -EINVAL;
psb_gtt_unpin(psbfb->gtt);
goto psb_intel_pipe_set_base_exit;
@@ -398,7 +397,6 @@ int psb_intel_pipe_set_base(struct drm_crtc *crtc,
REG_WRITE(dspcntr_reg, dspcntr);
- DRM_DEBUG("Writing base %08lX %08lX %d %d\n", start, offset, x, y);
if (0 /* FIXMEAC - check what PSB needs */) {
REG_WRITE(dspbase, offset);
REG_READ(dspbase);
@@ -409,6 +407,7 @@ int psb_intel_pipe_set_base(struct drm_crtc *crtc,
REG_READ(dspbase);
}
+psb_intel_pipe_cleaner:
/* If there was a previous display we can now unpin it */
if (old_fb)
psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
@@ -588,6 +587,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int pipe = psb_intel_crtc->pipe;
int fp_reg = (pipe == 0) ? FPA0 : FPB0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
@@ -610,6 +610,12 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ /* No scan out no play */
+ if (crtc->fb == NULL) {
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+ return 0;
+ }
+
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct psb_intel_output *psb_intel_output =
to_psb_intel_output(connector);
@@ -642,7 +648,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
&clock);
if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
return 0;
}
@@ -706,7 +712,6 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
if (psb_intel_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
- DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
if (dpll & DPLL_VCO_ENABLE) {
@@ -723,17 +728,18 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds) {
u32 lvds = REG_READ(LVDS);
- lvds |=
- LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
- LVDS_PIPEB_SELECT;
+ lvds &= ~LVDS_PIPEB_SELECT;
+ if (pipe == 1)
+ lvds |= LVDS_PIPEB_SELECT;
+
+ lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
/* Set the B0-B3 data pairs corresponding to
* whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
+ lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
if (clock.p2 == 7)
lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
- else
- lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more
@@ -785,11 +791,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(dspcntr_reg, dspcntr);
/* Flush the plane changes */
- {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
- crtc_funcs->mode_set_base(crtc, x, y, old_fb);
- }
+ crtc_funcs->mode_set_base(crtc, x, y, old_fb);
psb_intel_wait_for_vblank(dev);
@@ -820,7 +822,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
palreg = PALETTE_C;
break;
default:
- DRM_ERROR("Illegal Pipe Number.\n");
+ dev_err(dev->dev, "Illegal Pipe Number.\n");
return;
}
@@ -863,10 +865,8 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
uint32_t paletteReg;
int i;
- DRM_DEBUG("\n");
-
if (!crtc_state) {
- DRM_DEBUG("No CRTC state found\n");
+ dev_err(dev->dev, "No CRTC state found\n");
return;
}
@@ -890,25 +890,6 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
- DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- crtc_state->saveDSPCNTR,
- crtc_state->savePIPECONF,
- crtc_state->savePIPESRC,
- crtc_state->saveFP0,
- crtc_state->saveFP1,
- crtc_state->saveDPLL,
- crtc_state->saveHTOTAL,
- crtc_state->saveHBLANK,
- crtc_state->saveHSYNC,
- crtc_state->saveVTOTAL,
- crtc_state->saveVBLANK,
- crtc_state->saveVSYNC,
- crtc_state->saveDSPSTRIDE,
- crtc_state->saveDSPSIZE,
- crtc_state->saveDSPPOS,
- crtc_state->saveDSPBASE
- );
-
paletteReg = pipeA ? PALETTE_A : PALETTE_B;
for (i = 0; i < 256; ++i)
crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
@@ -929,60 +910,15 @@ static void psb_intel_crtc_restore(struct drm_crtc *crtc)
uint32_t paletteReg;
int i;
- DRM_DEBUG("\n");
-
if (!crtc_state) {
- DRM_DEBUG("No crtc state\n");
+ dev_err(dev->dev, "No crtc state\n");
return;
}
- DRM_DEBUG(
- "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
- REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
- REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
- REG_READ(pipeA ? FPA0 : FPB0),
- REG_READ(pipeA ? FPA1 : FPB1),
- REG_READ(pipeA ? DPLL_A : DPLL_B),
- REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
- REG_READ(pipeA ? HBLANK_A : HBLANK_B),
- REG_READ(pipeA ? HSYNC_A : HSYNC_B),
- REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
- REG_READ(pipeA ? VBLANK_A : VBLANK_B),
- REG_READ(pipeA ? VSYNC_A : VSYNC_B),
- REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
- REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
- REG_READ(pipeA ? DSPAPOS : DSPBPOS),
- REG_READ(pipeA ? DSPABASE : DSPBBASE)
- );
-
- DRM_DEBUG(
- "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- crtc_state->saveDSPCNTR,
- crtc_state->savePIPECONF,
- crtc_state->savePIPESRC,
- crtc_state->saveFP0,
- crtc_state->saveFP1,
- crtc_state->saveDPLL,
- crtc_state->saveHTOTAL,
- crtc_state->saveHBLANK,
- crtc_state->saveHSYNC,
- crtc_state->saveVTOTAL,
- crtc_state->saveVBLANK,
- crtc_state->saveVSYNC,
- crtc_state->saveDSPSTRIDE,
- crtc_state->saveDSPSIZE,
- crtc_state->saveDSPPOS,
- crtc_state->saveDSPBASE
- );
-
-
if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
REG_WRITE(pipeA ? DPLL_A : DPLL_B,
crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
REG_READ(pipeA ? DPLL_A : DPLL_B);
- DRM_DEBUG("write dpll: %x\n",
- REG_READ(pipeA ? DPLL_A : DPLL_B));
udelay(150);
}
@@ -1039,11 +975,8 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_gem_object *obj;
int ret;
- DRM_DEBUG("\n");
-
/* if we want to turn of the cursor ignore width and height */
if (!handle) {
- DRM_DEBUG("cursor off\n");
/* turn off the cursor */
temp = CURSOR_MODE_DISABLE;
@@ -1067,7 +1000,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
/* Currently we only support 64x64 cursors */
if (width != 64 || height != 64) {
- DRM_ERROR("we currently only support 64x64 cursors\n");
+ dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
return -EINVAL;
}
@@ -1076,7 +1009,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
return -ENOENT;
if (obj->size < width * height * 4) {
- DRM_ERROR("buffer is to small\n");
+ dev_dbg(dev->dev, "buffer is to small\n");
return -ENOMEM;
}
@@ -1085,7 +1018,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
/* Pin the memory into the GTT */
ret = psb_gtt_pin(gt);
if (ret) {
- DRM_ERROR("Can not pin down handle 0x%x\n", handle);
+ dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
return ret;
}
@@ -1106,14 +1039,13 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
}
/* unpin the old bo */
- if (psb_intel_crtc->cursor_obj && psb_intel_crtc->cursor_obj != obj) {
+ if (psb_intel_crtc->cursor_obj) {
gt = container_of(psb_intel_crtc->cursor_obj,
struct gtt_range, gem);
psb_gtt_unpin(gt);
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
psb_intel_crtc->cursor_obj = obj;
}
-
return 0;
}
@@ -1148,7 +1080,7 @@ static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
-static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
u16 *green, u16 *blue, uint32_t type, uint32_t size)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
@@ -1309,7 +1241,7 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+void psb_intel_crtc_destroy(struct drm_crtc *crtc)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct gtt_range *gt;
@@ -1327,7 +1259,7 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
kfree(psb_intel_crtc);
}
-static const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.dpms = psb_intel_crtc_dpms,
.mode_fixup = psb_intel_crtc_mode_fixup,
.mode_set = psb_intel_crtc_mode_set,
@@ -1346,6 +1278,19 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
.destroy = psb_intel_crtc_destroy,
};
+/*
+ * Set the default value of cursor control and base register
+ * to zero. This is a workaround for h/w defect on Oaktrail
+ */
+static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
+{
+ u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
+ u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
+
+ REG_WRITE(control[pipe], 0);
+ REG_WRITE(base[pipe], 0);
+}
+
void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev)
{
@@ -1354,8 +1299,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
int i;
uint16_t *r_base, *g_base, *b_base;
- PSB_DEBUG_ENTRY("\n");
-
/* We allocate a extra array of drm_connector pointers
* for fbdev after the crtc */
psb_intel_crtc =
@@ -1368,12 +1311,13 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
psb_intel_crtc->crtc_state =
kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
if (!psb_intel_crtc->crtc_state) {
- DRM_INFO("Crtc state error: No memory\n");
+ dev_err(dev->dev, "Crtc state error: No memory\n");
kfree(psb_intel_crtc);
return;
}
- drm_crtc_init(dev, &psb_intel_crtc->base, &psb_intel_crtc_funcs);
+ /* Set the CRTC operations from the chip specific data */
+ drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
psb_intel_crtc->pipe = pipe;
@@ -1396,12 +1340,8 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
psb_intel_crtc->mode_dev = mode_dev;
psb_intel_crtc->cursor_addr = 0;
- if (IS_MRST(dev))
- drm_crtc_helper_add(&psb_intel_crtc->base,
- &mrst_helper_funcs);
- else
- drm_crtc_helper_add(&psb_intel_crtc->base,
- &psb_intel_helper_funcs);
+ drm_crtc_helper_add(&psb_intel_crtc->base,
+ dev_priv->ops->crtc_helper);
/* Setup the array of drm_connector pointer array */
psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
@@ -1414,6 +1354,7 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
psb_intel_crtc->mode_set.connectors =
(struct drm_connector **) (psb_intel_crtc + 1);
psb_intel_crtc->mode_set.num_connectors = 0;
+ psb_intel_cursor_init(dev, pipe);
}
int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -1425,7 +1366,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct psb_intel_crtc *crtc;
if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
+ dev_err(dev->dev, "called with no initialization\n");
return -EINVAL;
}
@@ -1433,7 +1374,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
DRM_MODE_OBJECT_CRTC);
if (!drmmode_obj) {
- DRM_ERROR("no such CRTC id\n");
+ dev_err(dev->dev, "no such CRTC id\n");
return -EINVAL;
}
diff --git a/drivers/staging/gma500/psb_intel_display.h b/drivers/staging/gma500/psb_intel_display.h
index 3724b971e91..535b49a5e40 100644
--- a/drivers/staging/gma500/psb_intel_display.h
+++ b/drivers/staging/gma500/psb_intel_display.h
@@ -21,5 +21,8 @@
#define _INTEL_DISPLAY_H_
bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
+void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+ u16 *green, u16 *blue, uint32_t type, uint32_t size);
+void psb_intel_crtc_destroy(struct drm_crtc *crtc);
#endif
diff --git a/drivers/staging/gma500/psb_intel_drv.h b/drivers/staging/gma500/psb_intel_drv.h
index 6006ddd993f..36b554b5c33 100644
--- a/drivers/staging/gma500/psb_intel_drv.h
+++ b/drivers/staging/gma500/psb_intel_drv.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009-2011, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -26,11 +26,6 @@
#include <linux/gpio.h>
/*
- * MOORESTOWN defines
- */
-#define DELAY_TIME1 2000 /* 1000 = 1ms */
-
-/*
* Display related stuff
*/
@@ -61,16 +56,10 @@
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
-enum mipi_panel_type {
- NSC_800X480 = 1,
- LGE_480X1024 = 2,
- TPO_864X480 = 3
-};
-
-/**
+/*
* Hold information useally put on the device driver privates here,
* since it needs to be shared across multiple of devices drivers privates.
-*/
+ */
struct psb_intel_mode_device {
/*
@@ -79,7 +68,7 @@ struct psb_intel_mode_device {
size_t(*bo_offset) (struct drm_device *dev, void *bo);
/*
- * Cursor
+ * Cursor (Can go ?)
*/
int cursor_needs_physical;
@@ -116,7 +105,7 @@ struct psb_intel_output {
void *dev_priv;
struct psb_intel_mode_device *mode_dev;
-
+ struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
};
struct psb_intel_crtc_state {
@@ -235,4 +224,7 @@ extern int psb_intel_lvds_set_property(struct drm_connector *connector,
extern void psb_intel_lvds_destroy(struct drm_connector *connector);
extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
+extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
+extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/staging/gma500/psb_intel_lvds.c b/drivers/staging/gma500/psb_intel_lvds.c
index b0a225b9f56..c6436da6073 100644
--- a/drivers/staging/gma500/psb_intel_lvds.c
+++ b/drivers/staging/gma500/psb_intel_lvds.c
@@ -21,21 +21,16 @@
*/
#include <linux/i2c.h>
-/* #include <drm/drm_crtc.h> */
-/* #include <drm/drm_edid.h> */
#include <drm/drmP.h>
-#include "psb_intel_bios.h"
+#include "intel_bios.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "psb_powermgmt.h"
+#include "power.h"
#include <linux/pm_runtime.h>
-u32 CoreClock;
-u32 PWMControlRegFreq;
-
-/**
+/*
* LVDS I2C backlight control macros
*/
#define BRIGHTNESS_MAX_LEVEL 100
@@ -53,7 +48,7 @@ u32 PWMControlRegFreq;
#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
struct psb_intel_lvds_priv {
- /**
+ /*
* Saved LVDO output states
*/
uint32_t savePP_ON;
@@ -66,9 +61,8 @@ struct psb_intel_lvds_priv {
uint32_t saveBLC_PWM_CTL;
};
-/* MRST defines end */
-/**
+/*
* Returns the maximum level of the backlight duty cycle field.
*/
static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
@@ -126,13 +120,13 @@ static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
out_buf[1] = (u8)blc_i2c_brightness;
if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
- DRM_DEBUG("I2C set brightness.(command, value) (%d, %d)\n",
+ dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
dev_priv->lvds_bl->brightnesscmd,
blc_i2c_brightness);
return 0;
}
- DRM_ERROR("I2C transfer error\n");
+ dev_err(dev->dev, "I2C transfer error\n");
return -1;
}
@@ -163,7 +157,7 @@ static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
return 0;
}
-/**
+/*
* Set LVDS backlight level either by I2C or PWM
*/
void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
@@ -172,10 +166,10 @@ void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
struct drm_psb_private *dev_priv =
(struct drm_psb_private *)dev->dev_private;
- DRM_DEBUG("backlight level is %d\n", level);
+ dev_dbg(dev->dev, "backlight level is %d\n", level);
if (!dev_priv->lvds_bl) {
- DRM_ERROR("NO LVDS Backlight Info\n");
+ dev_err(dev->dev, "NO LVDS Backlight Info\n");
return;
}
@@ -185,10 +179,10 @@ void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
psb_lvds_pwm_set_brightness(dev, level);
}
-/**
+/*
* Sets the backlight level.
*
- * \param level backlight level, from 0 to psb_intel_lvds_get_max_backlight().
+ * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
*/
static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
{
@@ -210,7 +204,7 @@ static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
}
}
-/**
+/*
* Sets the power state for the panel.
*/
static void psb_intel_lvds_set_power(struct drm_device *dev,
@@ -289,7 +283,7 @@ static void psb_intel_lvds_save(struct drm_connector *connector)
dev_priv->backlight_duty_cycle =
psb_intel_lvds_get_max_backlight(dev);
- DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+ dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
lvds_priv->savePP_ON,
lvds_priv->savePP_OFF,
lvds_priv->saveLVDS,
@@ -310,7 +304,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
struct psb_intel_lvds_priv *lvds_priv =
(struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
- DRM_DEBUG("(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+ dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
lvds_priv->savePP_ON,
lvds_priv->savePP_OFF,
lvds_priv->saveLVDS,
@@ -351,8 +345,6 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *fixed_mode =
psb_intel_output->mode_dev->panel_fixed_mode;
- PSB_DEBUG_ENTRY("\n");
-
if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
fixed_mode = psb_intel_output->mode_dev->panel_fixed_mode2;
@@ -387,12 +379,10 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct psb_intel_output *psb_intel_output =
enc_to_psb_intel_output(encoder);
- PSB_DEBUG_ENTRY("type = 0x%x, pipe = %d.\n",
- psb_intel_output->type, psb_intel_crtc->pipe);
-
if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
panel_fixed_mode = mode_dev->panel_fixed_mode2;
+ /* FIXME: review for Medfield */
/* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
printk(KERN_ERR "Can't support LVDS on pipe A\n");
@@ -442,14 +432,12 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
return true;
}
-void psb_intel_lvds_prepare(struct drm_encoder *encoder)
+static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
struct psb_intel_mode_device *mode_dev = output->mode_dev;
- PSB_DEBUG_ENTRY("\n");
-
if (!gma_power_begin(dev, true))
return;
@@ -462,14 +450,12 @@ void psb_intel_lvds_prepare(struct drm_encoder *encoder)
gma_power_end(dev);
}
-void psb_intel_lvds_commit(struct drm_encoder *encoder)
+static void psb_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
struct psb_intel_mode_device *mode_dev = output->mode_dev;
- PSB_DEBUG_ENTRY("\n");
-
if (mode_dev->backlight_duty_cycle == 0)
mode_dev->backlight_duty_cycle =
psb_intel_lvds_get_max_backlight(dev);
@@ -481,9 +467,8 @@ static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct psb_intel_mode_device *mode_dev =
- enc_to_psb_intel_output(encoder)->mode_dev;
struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
u32 pfit_control;
/*
@@ -505,13 +490,13 @@ static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
else
pfit_control = 0;
- if (mode_dev->panel_wants_dither)
+ if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
REG_WRITE(PFIT_CONTROL, pfit_control);
}
-/**
+/*
* Detect the LVDS connection.
*
* This always returns CONNECTOR_STATUS_CONNECTED.
@@ -524,7 +509,7 @@ static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
return connector_status_connected;
}
-/**
+/*
* Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
*/
static int psb_intel_lvds_get_modes(struct drm_connector *connector)
@@ -536,7 +521,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
psb_intel_output->mode_dev;
int ret = 0;
- ret = psb_intel_ddc_get_modes(psb_intel_output);
+ if (!IS_MRST(dev))
+ ret = psb_intel_ddc_get_modes(psb_intel_output);
if (ret)
return ret;
@@ -583,18 +569,17 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
- struct drm_encoder *pEncoder = connector->encoder;
+ struct drm_encoder *encoder = connector->encoder;
- PSB_DEBUG_ENTRY("\n");
+ if (!encoder)
+ return -1;
- if (!strcmp(property->name, "scaling mode") && pEncoder) {
- struct psb_intel_crtc *pPsbCrtc =
- to_psb_intel_crtc(pEncoder->crtc);
- uint64_t curValue;
+ if (!strcmp(property->name, "scaling mode")) {
+ struct psb_intel_crtc *crtc =
+ to_psb_intel_crtc(encoder->crtc);
+ uint64_t curval;
- PSB_DEBUG_ENTRY("scaling mode\n");
-
- if (!pPsbCrtc)
+ if (!crtc)
goto set_prop_error;
switch (value) {
@@ -610,10 +595,10 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
if (drm_connector_property_get_value(connector,
property,
- &curValue))
+ &curval))
goto set_prop_error;
- if (curValue == value)
+ if (curval == value)
goto set_prop_done;
if (drm_connector_property_set_value(connector,
@@ -621,34 +606,34 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
value))
goto set_prop_error;
- if (pPsbCrtc->saved_mode.hdisplay != 0 &&
- pPsbCrtc->saved_mode.vdisplay != 0) {
- if (!drm_crtc_helper_set_mode(pEncoder->crtc,
- &pPsbCrtc->saved_mode,
- pEncoder->crtc->x,
- pEncoder->crtc->y,
- pEncoder->crtc->fb))
+ if (crtc->saved_mode.hdisplay != 0 &&
+ crtc->saved_mode.vdisplay != 0) {
+ if (!drm_crtc_helper_set_mode(encoder->crtc,
+ &crtc->saved_mode,
+ encoder->crtc->x,
+ encoder->crtc->y,
+ encoder->crtc->fb))
goto set_prop_error;
}
- } else if (!strcmp(property->name, "backlight") && pEncoder) {
- PSB_DEBUG_ENTRY("backlight\n");
-
+ } else if (!strcmp(property->name, "backlight")) {
if (drm_connector_property_set_value(connector,
property,
value))
goto set_prop_error;
else {
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct backlight_device bd;
- bd.props.brightness = value;
- psb_set_brightness(&bd);
+ struct drm_psb_private *devp = encoder->dev->dev_private;
+ struct backlight_device *bd = devp->backlight_device;
+ if (bd) {
+ bd->props.brightness = value;
+ backlight_update_status(bd);
+ }
#endif
}
- } else if (!strcmp(property->name, "DPMS") && pEncoder) {
- struct drm_encoder_helper_funcs *pEncHFuncs
- = pEncoder->helper_private;
- PSB_DEBUG_ENTRY("DPMS\n");
- pEncHFuncs->dpms(pEncoder, value);
+ } else if (!strcmp(property->name, "DPMS")) {
+ struct drm_encoder_helper_funcs *hfuncs
+ = encoder->helper_private;
+ hfuncs->dpms(encoder, value);
}
set_prop_done:
@@ -722,7 +707,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
if (!lvds_priv) {
kfree(psb_intel_output);
- DRM_DEBUG("LVDS private allocation error\n");
+ dev_err(dev->dev, "LVDS private allocation error\n");
return;
}
@@ -758,7 +743,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
dev_priv->backlight_property,
BRIGHTNESS_MAX_LEVEL);
- /**
+ /*
* Set up I2C bus
* FIXME: distroy i2c_bus when exit
*/
@@ -806,7 +791,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
}
}
- /* Failed to get EDID, what about VBT? do we need this?*/
+ /* Failed to get EDID, what about VBT? do we need this? */
if (mode_dev->vbt_mode)
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, mode_dev->vbt_mode);
@@ -838,8 +823,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
/* If we still don't have a mode after all that, give up. */
if (!mode_dev->panel_fixed_mode) {
- DRM_DEBUG
- ("Found no modes on the lvds, ignoring the LVDS\n");
+ dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
goto failed_find;
}
@@ -849,26 +833,6 @@ void psb_intel_lvds_init(struct drm_device *dev,
*/
out:
drm_sysfs_connector_add(connector);
-
- PSB_DEBUG_ENTRY("hdisplay = %d\n",
- mode_dev->panel_fixed_mode->hdisplay);
- PSB_DEBUG_ENTRY(" vdisplay = %d\n",
- mode_dev->panel_fixed_mode->vdisplay);
- PSB_DEBUG_ENTRY(" hsync_start = %d\n",
- mode_dev->panel_fixed_mode->hsync_start);
- PSB_DEBUG_ENTRY(" hsync_end = %d\n",
- mode_dev->panel_fixed_mode->hsync_end);
- PSB_DEBUG_ENTRY(" htotal = %d\n",
- mode_dev->panel_fixed_mode->htotal);
- PSB_DEBUG_ENTRY(" vsync_start = %d\n",
- mode_dev->panel_fixed_mode->vsync_start);
- PSB_DEBUG_ENTRY(" vsync_end = %d\n",
- mode_dev->panel_fixed_mode->vsync_end);
- PSB_DEBUG_ENTRY(" vtotal = %d\n",
- mode_dev->panel_fixed_mode->vtotal);
- PSB_DEBUG_ENTRY(" clock = %d\n",
- mode_dev->panel_fixed_mode->clock);
-
return;
failed_find:
diff --git a/drivers/staging/gma500/psb_intel_reg.h b/drivers/staging/gma500/psb_intel_reg.h
index 1c283140bcc..1ac16aa791c 100644
--- a/drivers/staging/gma500/psb_intel_reg.h
+++ b/drivers/staging/gma500/psb_intel_reg.h
@@ -28,8 +28,8 @@
*
* The actual value is this field multiplied by two.
*/
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16)
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16)
/*
* This is the number of cycles out of the backlight modulation cycle for which
* the backlight is on.
@@ -37,55 +37,55 @@
* This field must be no greater than the number of cycles in the complete
* backlight modulation cycle.
*/
-#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
-#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
#define I915_GCFGC 0xf0
-#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
-#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
-#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
-#define I915_DISPLAY_CLOCK_MASK (7 << 4)
+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
#define I855_HPLLCC 0xc0
-#define I855_CLOCK_CONTROL_MASK (3 << 0)
-#define I855_CLOCK_133_200 (0 << 0)
-#define I855_CLOCK_100_200 (1 << 0)
-#define I855_CLOCK_100_133 (2 << 0)
-#define I855_CLOCK_166_250 (3 << 0)
+#define I855_CLOCK_CONTROL_MASK (3 << 0)
+#define I855_CLOCK_133_200 (0 << 0)
+#define I855_CLOCK_100_200 (1 << 0)
+#define I855_CLOCK_100_133 (2 << 0)
+#define I855_CLOCK_166_250 (3 << 0)
/* I830 CRTC registers */
-#define HTOTAL_A 0x60000
-#define HBLANK_A 0x60004
-#define HSYNC_A 0x60008
-#define VTOTAL_A 0x6000c
-#define VBLANK_A 0x60010
-#define VSYNC_A 0x60014
-#define PIPEASRC 0x6001c
-#define BCLRPAT_A 0x60020
-#define VSYNCSHIFT_A 0x60028
-
-#define HTOTAL_B 0x61000
-#define HBLANK_B 0x61004
-#define HSYNC_B 0x61008
-#define VTOTAL_B 0x6100c
-#define VBLANK_B 0x61010
-#define VSYNC_B 0x61014
-#define PIPEBSRC 0x6101c
-#define BCLRPAT_B 0x61020
-#define VSYNCSHIFT_B 0x61028
-
-#define HTOTAL_C 0x62000
-#define HBLANK_C 0x62004
-#define HSYNC_C 0x62008
-#define VTOTAL_C 0x6200c
-#define VBLANK_C 0x62010
-#define VSYNC_C 0x62014
-#define PIPECSRC 0x6201c
-#define BCLRPAT_C 0x62020
-#define VSYNCSHIFT_C 0x62028
-
-#define PP_STATUS 0x61200
-# define PP_ON (1 << 31)
+#define HTOTAL_A 0x60000
+#define HBLANK_A 0x60004
+#define HSYNC_A 0x60008
+#define VTOTAL_A 0x6000c
+#define VBLANK_A 0x60010
+#define VSYNC_A 0x60014
+#define PIPEASRC 0x6001c
+#define BCLRPAT_A 0x60020
+#define VSYNCSHIFT_A 0x60028
+
+#define HTOTAL_B 0x61000
+#define HBLANK_B 0x61004
+#define HSYNC_B 0x61008
+#define VTOTAL_B 0x6100c
+#define VBLANK_B 0x61010
+#define VSYNC_B 0x61014
+#define PIPEBSRC 0x6101c
+#define BCLRPAT_B 0x61020
+#define VSYNCSHIFT_B 0x61028
+
+#define HTOTAL_C 0x62000
+#define HBLANK_C 0x62004
+#define HSYNC_C 0x62008
+#define VTOTAL_C 0x6200c
+#define VBLANK_C 0x62010
+#define VSYNC_C 0x62014
+#define PIPECSRC 0x6201c
+#define BCLRPAT_C 0x62020
+#define VSYNCSHIFT_C 0x62028
+
+#define PP_STATUS 0x61200
+# define PP_ON (1 << 31)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -93,56 +93,57 @@
* - pipe enabled
* - LVDS/DVOB/DVOC on
*/
-# define PP_READY (1 << 30)
-# define PP_SEQUENCE_NONE (0 << 28)
-# define PP_SEQUENCE_ON (1 << 28)
-# define PP_SEQUENCE_OFF (2 << 28)
-# define PP_SEQUENCE_MASK 0x30000000
-#define PP_CONTROL 0x61204
-# define POWER_TARGET_ON (1 << 0)
-
-#define LVDSPP_ON 0x61208
-#define LVDSPP_OFF 0x6120c
-#define PP_CYCLE 0x61210
-
-#define PFIT_CONTROL 0x61230
-# define PFIT_ENABLE (1 << 31)
-# define PFIT_PIPE_MASK (3 << 29)
-# define PFIT_PIPE_SHIFT 29
-# define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
-# define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
-# define VERT_INTERP_DISABLE (0 << 10)
-# define VERT_INTERP_BILINEAR (1 << 10)
-# define VERT_INTERP_MASK (3 << 10)
-# define VERT_AUTO_SCALE (1 << 9)
-# define HORIZ_INTERP_DISABLE (0 << 6)
-# define HORIZ_INTERP_BILINEAR (1 << 6)
-# define HORIZ_INTERP_MASK (3 << 6)
-# define HORIZ_AUTO_SCALE (1 << 5)
-# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
-
-#define PFIT_PGM_RATIOS 0x61234
-# define PFIT_VERT_SCALE_MASK 0xfff00000
-# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+#define PP_READY (1 << 30)
+#define PP_SEQUENCE_NONE (0 << 28)
+#define PP_SEQUENCE_ON (1 << 28)
+#define PP_SEQUENCE_OFF (2 << 28)
+#define PP_SEQUENCE_MASK 0x30000000
+#define PP_CONTROL 0x61204
+#define POWER_TARGET_ON (1 << 0)
+
+#define LVDSPP_ON 0x61208
+#define LVDSPP_OFF 0x6120c
+#define PP_CYCLE 0x61210
+
+#define PFIT_CONTROL 0x61230
+#define PFIT_ENABLE (1 << 31)
+#define PFIT_PIPE_MASK (3 << 29)
+#define PFIT_PIPE_SHIFT 29
+#define PFIT_SCALING_MODE_PILLARBOX (1 << 27)
+#define PFIT_SCALING_MODE_LETTERBOX (3 << 26)
+#define VERT_INTERP_DISABLE (0 << 10)
+#define VERT_INTERP_BILINEAR (1 << 10)
+#define VERT_INTERP_MASK (3 << 10)
+#define VERT_AUTO_SCALE (1 << 9)
+#define HORIZ_INTERP_DISABLE (0 << 6)
+#define HORIZ_INTERP_BILINEAR (1 << 6)
+#define HORIZ_INTERP_MASK (3 << 6)
+#define HORIZ_AUTO_SCALE (1 << 5)
+#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+
+#define PFIT_PGM_RATIOS 0x61234
+#define PFIT_VERT_SCALE_MASK 0xfff00000
+#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
#define PFIT_AUTO_RATIOS 0x61238
+#define DPLL_A 0x06014
+#define DPLL_B 0x06018
+#define DPLL_VCO_ENABLE (1 << 31)
+#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_SYNCLOCK_ENABLE (1 << 29)
+#define DPLL_VGA_MODE_DIS (1 << 28)
+#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+#define DPLL_MODE_MASK (3 << 26)
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+#define DPLL_LOCK (1 << 15) /* CDV */
-#define DPLL_A 0x06014
-#define DPLL_B 0x06018
-# define DPLL_VCO_ENABLE (1 << 31)
-# define DPLL_DVO_HIGH_SPEED (1 << 30)
-# define DPLL_SYNCLOCK_ENABLE (1 << 29)
-# define DPLL_VGA_MODE_DIS (1 << 28)
-# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
-# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
-# define DPLL_MODE_MASK (3 << 26)
-# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
-# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
-# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
-# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
-# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
-# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
/*
* The i830 generation, in DAC/serial mode, defines p1 as two plus this
* bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
@@ -152,35 +153,35 @@
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
* this field (only one bit may be set).
*/
-# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
-# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
-# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
+#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+#define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required
* in DVO non-gang */
-# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
-# define PLL_REF_INPUT_DREFCLK (0 << 13)
-# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
-# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+#define PLL_REF_INPUT_DREFCLK (0 << 13)
+#define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+#define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO
* TVCLKIN */
-# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
-# define PLL_REF_INPUT_MASK (3 << 13)
-# define PLL_LOAD_PULSE_PHASE_SHIFT 9
+#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define PLL_REF_INPUT_MASK (3 << 13)
+#define PLL_LOAD_PULSE_PHASE_SHIFT 9
/*
* Parallel to Serial Load Pulse phase selection.
* Selects the phase for the 10X DPLL clock for the PCIe
* digital display port. The range is 4 to 13; 10 or more
* is just a flip delay. The default is 6
*/
-# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
-# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+#define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
/*
* SDVO multiplier for 945G/GM. Not used on 965.
*
* DPLL_MD_UDI_MULTIPLIER_MASK
*/
-# define SDVO_MULTIPLIER_MASK 0x000000ff
-# define SDVO_MULTIPLIER_SHIFT_HIRES 4
-# define SDVO_MULTIPLIER_SHIFT_VGA 0
+#define SDVO_MULTIPLIER_MASK 0x000000ff
+#define SDVO_MULTIPLIER_SHIFT_HIRES 4
+#define SDVO_MULTIPLIER_SHIFT_VGA 0
/*
* PLL_MD
@@ -194,11 +195,11 @@
*
* Value is pixels minus 1. Must be set to 1 pixel for SDVO.
*/
-# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
-# define DPLL_MD_UDI_DIVIDER_SHIFT 24
+#define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+#define DPLL_MD_UDI_DIVIDER_SHIFT 24
/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
-# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
-# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+#define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
/*
* SDVO/UDI pixel multiplier.
*
@@ -216,80 +217,94 @@
* This register field has values of multiplication factor minus 1, with
* a maximum multiplier of 5 for SDVO.
*/
-# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
-# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+#define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+#define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
/*
* SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
* This best be set to the default value (3) or the CRT won't work. No,
* I don't entirely understand what this does...
*/
-# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
-# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define DPLL_TEST 0x606c
-# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
-# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
-# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
-# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
-# define DPLLB_TEST_N_BYPASS (1 << 19)
-# define DPLLB_TEST_M_BYPASS (1 << 18)
-# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
-# define DPLLA_TEST_N_BYPASS (1 << 3)
-# define DPLLA_TEST_M_BYPASS (1 << 2)
-# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
+#define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
+#define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
+#define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
+#define DPLLB_TEST_N_BYPASS (1 << 19)
+#define DPLLB_TEST_M_BYPASS (1 << 18)
+#define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
+#define DPLLA_TEST_N_BYPASS (1 << 3)
+#define DPLLA_TEST_M_BYPASS (1 << 2)
+#define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
#define ADPA 0x61100
-#define ADPA_DAC_ENABLE (1<<31)
-#define ADPA_DAC_DISABLE 0
-#define ADPA_PIPE_SELECT_MASK (1<<30)
-#define ADPA_PIPE_A_SELECT 0
-#define ADPA_PIPE_B_SELECT (1<<30)
-#define ADPA_USE_VGA_HVPOLARITY (1<<15)
-#define ADPA_SETS_HVPOLARITY 0
-#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
-#define ADPA_VSYNC_CNTL_ENABLE 0
-#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
-#define ADPA_HSYNC_CNTL_ENABLE 0
-#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
-#define ADPA_VSYNC_ACTIVE_LOW 0
-#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
-#define ADPA_HSYNC_ACTIVE_LOW 0
-
-#define FPA0 0x06040
-#define FPA1 0x06044
-#define FPB0 0x06048
-#define FPB1 0x0604c
-# define FP_N_DIV_MASK 0x003f0000
-# define FP_N_DIV_SHIFT 16
-# define FP_M1_DIV_MASK 0x00003f00
-# define FP_M1_DIV_SHIFT 8
-# define FP_M2_DIV_MASK 0x0000003f
-# define FP_M2_DIV_SHIFT 0
-
+#define ADPA_DAC_ENABLE (1 << 31)
+#define ADPA_DAC_DISABLE 0
+#define ADPA_PIPE_SELECT_MASK (1 << 30)
+#define ADPA_PIPE_A_SELECT 0
+#define ADPA_PIPE_B_SELECT (1 << 30)
+#define ADPA_USE_VGA_HVPOLARITY (1 << 15)
+#define ADPA_SETS_HVPOLARITY 0
+#define ADPA_VSYNC_CNTL_DISABLE (1 << 11)
+#define ADPA_VSYNC_CNTL_ENABLE 0
+#define ADPA_HSYNC_CNTL_DISABLE (1 << 10)
+#define ADPA_HSYNC_CNTL_ENABLE 0
+#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
+#define ADPA_VSYNC_ACTIVE_LOW 0
+#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
+#define ADPA_HSYNC_ACTIVE_LOW 0
+
+#define FPA0 0x06040
+#define FPA1 0x06044
+#define FPB0 0x06048
+#define FPB1 0x0604c
+#define FP_N_DIV_MASK 0x003f0000
+#define FP_N_DIV_SHIFT 16
+#define FP_M1_DIV_MASK 0x00003f00
+#define FP_M1_DIV_SHIFT 8
+#define FP_M2_DIV_MASK 0x0000003f
+#define FP_M2_DIV_SHIFT 0
#define PORT_HOTPLUG_EN 0x61110
-# define SDVOB_HOTPLUG_INT_EN (1 << 26)
-# define SDVOC_HOTPLUG_INT_EN (1 << 25)
-# define TV_HOTPLUG_INT_EN (1 << 18)
-# define CRT_HOTPLUG_INT_EN (1 << 9)
-# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+#define SDVOB_HOTPLUG_INT_EN (1 << 26)
+#define SDVOC_HOTPLUG_INT_EN (1 << 25)
+#define TV_HOTPLUG_INT_EN (1 << 18)
+#define CRT_HOTPLUG_INT_EN (1 << 9)
+#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+/* CDV.. */
+#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
+#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
+#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
+#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
+#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
+#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
+#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
+#define CRT_HOTPLUG_DETECT_MASK 0x000000F8
#define PORT_HOTPLUG_STAT 0x61114
-# define CRT_HOTPLUG_INT_STATUS (1 << 11)
-# define TV_HOTPLUG_INT_STATUS (1 << 10)
-# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
-# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
-# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
-# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
-# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
-# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
+#define CRT_HOTPLUG_INT_STATUS (1 << 11)
+#define TV_HOTPLUG_INT_STATUS (1 << 10)
+#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
#define SDVOB 0x61140
#define SDVOC 0x61160
-#define SDVO_ENABLE (1 << 31)
-#define SDVO_PIPE_B_SELECT (1 << 30)
-#define SDVO_STALL_SELECT (1 << 29)
-#define SDVO_INTERRUPT_ENABLE (1 << 26)
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+
/**
* 915G/GM SDVO pixel multiplier.
*
@@ -297,18 +312,18 @@
*
* DPLL_MD_UDI_MULTIPLIER_MASK
*/
-#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
-#define SDVO_PORT_MULTIPLY_SHIFT 23
-#define SDVO_PHASE_SELECT_MASK (15 << 19)
-#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
-#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
-#define SDVOC_GANG_MODE (1 << 16)
-#define SDVO_BORDER_ENABLE (1 << 7)
-#define SDVOB_PCIE_CONCURRENCY (1 << 3)
-#define SDVO_DETECTED (1 << 2)
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16)
+#define SDVO_BORDER_ENABLE (1 << 7)
+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
+#define SDVO_DETECTED (1 << 2)
/* Bits to be preserved when writing */
#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
-#define SDVOC_PRESERVE_MASK (1 << 17)
+#define SDVOC_PRESERVE_MASK (1 << 17)
/*
* This register controls the LVDS output enable, pipe selection, and data
@@ -321,116 +336,116 @@
* Enables the LVDS port. This bit must be set before DPLLs are enabled, as
* the DPLL semantics change when the LVDS is assigned to that pipe.
*/
-# define LVDS_PORT_EN (1 << 31)
+#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
-# define LVDS_PIPEB_SELECT (1 << 30)
+#define LVDS_PIPEB_SELECT (1 << 30)
/* Turns on border drawing to allow centered display. */
-# define LVDS_BORDER_EN (1 << 15)
+#define LVDS_BORDER_EN (1 << 15)
/*
* Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
* pixel.
*/
-# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
-# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
-# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
+#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
+#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
+#define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
/*
* Controls the A3 data pair, which contains the additional LSBs for 24 bit
* mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
* on.
*/
-# define LVDS_A3_POWER_MASK (3 << 6)
-# define LVDS_A3_POWER_DOWN (0 << 6)
-# define LVDS_A3_POWER_UP (3 << 6)
+#define LVDS_A3_POWER_MASK (3 << 6)
+#define LVDS_A3_POWER_DOWN (0 << 6)
+#define LVDS_A3_POWER_UP (3 << 6)
/*
* Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
* is set.
*/
-# define LVDS_CLKB_POWER_MASK (3 << 4)
-# define LVDS_CLKB_POWER_DOWN (0 << 4)
-# define LVDS_CLKB_POWER_UP (3 << 4)
+#define LVDS_CLKB_POWER_MASK (3 << 4)
+#define LVDS_CLKB_POWER_DOWN (0 << 4)
+#define LVDS_CLKB_POWER_UP (3 << 4)
/*
* Controls the B0-B3 data pairs. This must be set to match the DPLL p2
* setting for whether we are in dual-channel mode. The B3 pair will
* additionally only be powered up when LVDS_A3_POWER_UP is set.
*/
-# define LVDS_B0B3_POWER_MASK (3 << 2)
-# define LVDS_B0B3_POWER_DOWN (0 << 2)
-# define LVDS_B0B3_POWER_UP (3 << 2)
-
-#define PIPEACONF 0x70008
-#define PIPEACONF_ENABLE (1<<31)
-#define PIPEACONF_DISABLE 0
-#define PIPEACONF_DOUBLE_WIDE (1<<30)
-#define PIPECONF_ACTIVE (1<<30)
-#define I965_PIPECONF_ACTIVE (1<<30)
-#define PIPECONF_DSIPLL_LOCK (1<<29)
-#define PIPEACONF_SINGLE_WIDE 0
-#define PIPEACONF_PIPE_UNLOCKED 0
-#define PIPEACONF_DSR (1<<26)
-#define PIPEACONF_PIPE_LOCKED (1<<25)
-#define PIPEACONF_PALETTE 0
-#define PIPECONF_FORCE_BORDER (1<<25)
-#define PIPEACONF_GAMMA (1<<24)
-#define PIPECONF_PROGRESSIVE (0 << 21)
+#define LVDS_B0B3_POWER_MASK (3 << 2)
+#define LVDS_B0B3_POWER_DOWN (0 << 2)
+#define LVDS_B0B3_POWER_UP (3 << 2)
+
+#define PIPEACONF 0x70008
+#define PIPEACONF_ENABLE (1 << 31)
+#define PIPEACONF_DISABLE 0
+#define PIPEACONF_DOUBLE_WIDE (1 << 30)
+#define PIPECONF_ACTIVE (1 << 30)
+#define I965_PIPECONF_ACTIVE (1 << 30)
+#define PIPECONF_DSIPLL_LOCK (1 << 29)
+#define PIPEACONF_SINGLE_WIDE 0
+#define PIPEACONF_PIPE_UNLOCKED 0
+#define PIPEACONF_DSR (1 << 26)
+#define PIPEACONF_PIPE_LOCKED (1 << 25)
+#define PIPEACONF_PALETTE 0
+#define PIPECONF_FORCE_BORDER (1 << 25)
+#define PIPEACONF_GAMMA (1 << 24)
+#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
-#define PIPECONF_PLANE_OFF (1<<19)
-#define PIPECONF_CURSOR_OFF (1<<18)
-
+#define PIPECONF_PLANE_OFF (1 << 19)
+#define PIPECONF_CURSOR_OFF (1 << 18)
-#define PIPEBCONF 0x71008
-#define PIPEBCONF_ENABLE (1<<31)
-#define PIPEBCONF_DISABLE 0
-#define PIPEBCONF_DOUBLE_WIDE (1<<30)
-#define PIPEBCONF_DISABLE 0
-#define PIPEBCONF_GAMMA (1<<24)
-#define PIPEBCONF_PALETTE 0
+#define PIPEBCONF 0x71008
+#define PIPEBCONF_ENABLE (1 << 31)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_DOUBLE_WIDE (1 << 30)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_GAMMA (1 << 24)
+#define PIPEBCONF_PALETTE 0
-#define PIPECCONF 0x72008
+#define PIPECCONF 0x72008
#define PIPEBGCMAXRED 0x71010
#define PIPEBGCMAXGREEN 0x71014
#define PIPEBGCMAXBLUE 0x71018
-#define PIPEASTAT 0x70024
+#define PIPEASTAT 0x70024
#define PIPEBSTAT 0x71024
#define PIPECSTAT 0x72024
-#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
-#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2)
-#define PIPE_VBLANK_CLEAR (1 << 1)
-#define PIPE_VBLANK_STATUS (1 << 1)
-#define PIPE_TE_STATUS (1UL<<6)
-#define PIPE_DPST_EVENT_STATUS (1UL<<7)
-#define PIPE_VSYNC_CLEAR (1UL<<9)
-#define PIPE_VSYNC_STATUS (1UL<<9)
-#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS (1UL<<10)
-#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS (1UL<<11)
-#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18)
-#define PIPE_TE_ENABLE (1UL<<22)
-#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
-#define PIPE_VSYNC_ENABL (1UL<<25)
-#define PIPE_HDMI_AUDIO_UNDERRUN (1UL<<26)
-#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL<<27)
-#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | PIPE_HDMI_AUDIO_BUFFER_DONE)
+#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1)
+#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2)
+#define PIPE_VBLANK_CLEAR (1 << 1)
+#define PIPE_VBLANK_STATUS (1 << 1)
+#define PIPE_TE_STATUS (1UL << 6)
+#define PIPE_DPST_EVENT_STATUS (1UL << 7)
+#define PIPE_VSYNC_CLEAR (1UL << 9)
+#define PIPE_VSYNC_STATUS (1UL << 9)
+#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS (1UL << 10)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS (1UL << 11)
+#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17)
+#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18)
+#define PIPE_TE_ENABLE (1UL << 22)
+#define PIPE_DPST_EVENT_ENABLE (1UL << 23)
+#define PIPE_VSYNC_ENABL (1UL << 25)
+#define PIPE_HDMI_AUDIO_UNDERRUN (1UL << 26)
+#define PIPE_HDMI_AUDIO_BUFFER_DONE (1UL << 27)
+#define PIPE_HDMI_AUDIO_INT_MASK (PIPE_HDMI_AUDIO_UNDERRUN | \
+ PIPE_HDMI_AUDIO_BUFFER_DONE)
#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
#define PIPE_VBLANK_MASK ((1 << 25)|(1 << 24)|(1 << 18)|(1 << 17))
#define HISTOGRAM_INT_CONTROL 0x61268
#define HISTOGRAM_BIN_DATA 0X61264
#define HISTOGRAM_LOGIC_CONTROL 0x61260
#define PWM_CONTROL_LOGIC 0x61250
-#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
-#define HISTOGRAM_INTERRUPT_ENABLE (1UL<<31)
-#define HISTOGRAM_LOGIC_ENABLE (1UL<<31)
-#define PWM_LOGIC_ENABLE (1UL<<31)
-#define PWM_PHASEIN_ENABLE (1UL<<25)
-#define PWM_PHASEIN_INT_ENABLE (1UL<<24)
-#define PWM_PHASEIN_VB_COUNT 0x00001f00
-#define PWM_PHASEIN_INC 0x0000001f
-#define HISTOGRAM_INT_CTRL_CLEAR (1UL<<30)
-#define DPST_YUV_LUMA_MODE 0
+#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10)
+#define HISTOGRAM_INTERRUPT_ENABLE (1UL << 31)
+#define HISTOGRAM_LOGIC_ENABLE (1UL << 31)
+#define PWM_LOGIC_ENABLE (1UL << 31)
+#define PWM_PHASEIN_ENABLE (1UL << 25)
+#define PWM_PHASEIN_INT_ENABLE (1UL << 24)
+#define PWM_PHASEIN_VB_COUNT 0x00001f00
+#define PWM_PHASEIN_INC 0x0000001f
+#define HISTOGRAM_INT_CTRL_CLEAR (1UL << 30)
+#define DPST_YUV_LUMA_MODE 0
struct dpst_ie_histogram_control {
union {
@@ -470,12 +485,12 @@ struct dpst_guardband {
#define PIPEBFRAMEPIXEL 0x71044
#define PIPECFRAMEHIGH 0x72040
#define PIPECFRAMEPIXEL 0x72044
-#define PIPE_FRAME_HIGH_MASK 0x0000ffff
-#define PIPE_FRAME_HIGH_SHIFT 0
-#define PIPE_FRAME_LOW_MASK 0xff000000
-#define PIPE_FRAME_LOW_SHIFT 24
-#define PIPE_PIXEL_MASK 0x00ffffff
-#define PIPE_PIXEL_SHIFT 0
+#define PIPE_FRAME_HIGH_MASK 0x0000ffff
+#define PIPE_FRAME_HIGH_SHIFT 0
+#define PIPE_FRAME_LOW_MASK 0xff000000
+#define PIPE_FRAME_LOW_SHIFT 24
+#define PIPE_PIXEL_MASK 0x00ffffff
+#define PIPE_PIXEL_SHIFT 0
#define DSPARB 0x70030
#define DSPFW1 0x70034
@@ -488,30 +503,30 @@ struct dpst_guardband {
#define DSPACNTR 0x70180
#define DSPBCNTR 0x71180
#define DSPCCNTR 0x72180
-#define DISPLAY_PLANE_ENABLE (1<<31)
+#define DISPLAY_PLANE_ENABLE (1 << 31)
#define DISPLAY_PLANE_DISABLE 0
-#define DISPPLANE_GAMMA_ENABLE (1<<30)
+#define DISPPLANE_GAMMA_ENABLE (1 << 30)
#define DISPPLANE_GAMMA_DISABLE 0
-#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
-#define DISPPLANE_8BPP (0x2<<26)
-#define DISPPLANE_15_16BPP (0x4<<26)
-#define DISPPLANE_16BPP (0x5<<26)
-#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
-#define DISPPLANE_32BPP (0x7<<26)
-#define DISPPLANE_STEREO_ENABLE (1<<25)
+#define DISPPLANE_PIXFORMAT_MASK (0xf << 26)
+#define DISPPLANE_8BPP (0x2 << 26)
+#define DISPPLANE_15_16BPP (0x4 << 26)
+#define DISPPLANE_16BPP (0x5 << 26)
+#define DISPPLANE_32BPP_NO_ALPHA (0x6 << 26)
+#define DISPPLANE_32BPP (0x7 << 26)
+#define DISPPLANE_STEREO_ENABLE (1 << 25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+#define DISPPLANE_SEL_PIPE_MASK (1 << 24)
#define DISPPLANE_SEL_PIPE_POS 24
#define DISPPLANE_SEL_PIPE_A 0
-#define DISPPLANE_SEL_PIPE_B (1<<24)
-#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
+#define DISPPLANE_SEL_PIPE_B (1 << 24)
+#define DISPPLANE_SRC_KEY_ENABLE (1 << 22)
#define DISPPLANE_SRC_KEY_DISABLE 0
-#define DISPPLANE_LINE_DOUBLE (1<<20)
+#define DISPPLANE_LINE_DOUBLE (1 << 20)
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
-#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
+#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18)
/* plane B only */
-#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
@@ -548,25 +563,25 @@ struct dpst_guardband {
#define DSPCSURF 0x7219C
#define DSPCTILEOFF 0x721A4
-#define DSPCKEYMAXVAL 0x721A0
-#define DSPCKEYMINVAL 0x72194
-#define DSPCKEYMSK 0x72198
+#define DSPCKEYMAXVAL 0x721A0
+#define DSPCKEYMINVAL 0x72194
+#define DSPCKEYMSK 0x72198
#define VGACNTRL 0x71400
-# define VGA_DISP_DISABLE (1 << 31)
-# define VGA_2X_MODE (1 << 30)
-# define VGA_PIPE_B_SELECT (1 << 29)
+#define VGA_DISP_DISABLE (1 << 31)
+#define VGA_2X_MODE (1 << 30)
+#define VGA_PIPE_B_SELECT (1 << 29)
/*
* Overlay registers
*/
#define OV_C_OFFSET 0x08000
#define OV_OVADD 0x30000
-#define OV_DOVASTA 0x30008
-# define OV_PIPE_SELECT ((1 << 6)|(1 << 7))
-# define OV_PIPE_SELECT_POS 6
-# define OV_PIPE_A 0
-# define OV_PIPE_C 1
+#define OV_DOVASTA 0x30008
+# define OV_PIPE_SELECT ((1 << 6)|(1 << 7))
+# define OV_PIPE_SELECT_POS 6
+# define OV_PIPE_A 0
+# define OV_PIPE_C 1
#define OV_OGAMC5 0x30010
#define OV_OGAMC4 0x30014
#define OV_OGAMC3 0x30018
@@ -574,7 +589,7 @@ struct dpst_guardband {
#define OV_OGAMC1 0x30020
#define OV_OGAMC0 0x30024
#define OVC_OVADD 0x38000
-#define OVC_DOVCSTA 0x38008
+#define OVC_DOVCSTA 0x38008
#define OVC_OGAMC5 0x38010
#define OVC_OGAMC4 0x38014
#define OVC_OGAMC3 0x38018
@@ -627,16 +642,16 @@ struct dpst_guardband {
/* Cursor A & B regs */
#define CURACNTR 0x70080
-#define CURSOR_MODE_DISABLE 0x00
-#define CURSOR_MODE_64_32B_AX 0x07
-#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
-#define MCURSOR_GAMMA_ENABLE (1 << 26)
+#define CURSOR_MODE_DISABLE 0x00
+#define CURSOR_MODE_64_32B_AX 0x07
+#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURABASE 0x70084
#define CURAPOS 0x70088
-#define CURSOR_POS_MASK 0x007FF
-#define CURSOR_POS_SIGN 0x8000
-#define CURSOR_X_SHIFT 0
-#define CURSOR_Y_SHIFT 16
+#define CURSOR_POS_MASK 0x007FF
+#define CURSOR_POS_SIGN 0x8000
+#define CURSOR_X_SHIFT 0
+#define CURSOR_Y_SHIFT 16
#define CURBCNTR 0x700c0
#define CURBBASE 0x700c4
#define CURBPOS 0x700c8
@@ -647,22 +662,22 @@ struct dpst_guardband {
/*
* Interrupt Registers
*/
-#define IER 0x020a0
-#define IIR 0x020a4
-#define IMR 0x020a8
-#define ISR 0x020ac
+#define IER 0x020a0
+#define IIR 0x020a4
+#define IMR 0x020a8
+#define ISR 0x020ac
/*
* MOORESTOWN delta registers
*/
#define MRST_DPLL_A 0x0f014
#define MDFLD_DPLL_B 0x0f018
-#define MDFLD_INPUT_REF_SEL (1 << 14)
-#define MDFLD_VCO_SEL (1 << 16)
-#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
-#define MDFLD_PLL_LATCHEN (1 << 28)
-#define MDFLD_PWR_GATE_EN (1 << 30)
-#define MDFLD_P1_MASK (0x1FF << 17)
+#define MDFLD_INPUT_REF_SEL (1 << 14)
+#define MDFLD_VCO_SEL (1 << 16)
+#define DPLLA_MODE_LVDS (2 << 26) /* mrst */
+#define MDFLD_PLL_LATCHEN (1 << 28)
+#define MDFLD_PWR_GATE_EN (1 << 30)
+#define MDFLD_P1_MASK (0x1FF << 17)
#define MRST_FPA0 0x0f040
#define MRST_FPA1 0x0f044
#define MDFLD_DPLL_DIV0 0x0f048
@@ -672,45 +687,45 @@ struct dpst_guardband {
/*
* MEDFIELD HDMI registers
*/
-#define HDMIPHYMISCCTL 0x61134
-# define HDMI_PHY_POWER_DOWN 0x7f
-#define HDMIB_CONTROL 0x61140
-# define HDMIB_PORT_EN (1 << 31)
-# define HDMIB_PIPE_B_SELECT (1 << 30)
-# define HDMIB_NULL_PACKET (1 << 9)
-#define HDMIB_HDCP_PORT (1 << 5)
+#define HDMIPHYMISCCTL 0x61134
+#define HDMI_PHY_POWER_DOWN 0x7f
+#define HDMIB_CONTROL 0x61140
+#define HDMIB_PORT_EN (1 << 31)
+#define HDMIB_PIPE_B_SELECT (1 << 30)
+#define HDMIB_NULL_PACKET (1 << 9)
+#define HDMIB_HDCP_PORT (1 << 5)
/* #define LVDS 0x61180 */
-# define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
-# define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
-# define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
+#define MRST_PANEL_8TO6_DITHER_ENABLE (1 << 25)
+#define MRST_PANEL_24_DOT_1_FORMAT (1 << 24)
+#define LVDS_A3_POWER_UP_0_OUTPUT (1 << 6)
#define MIPI 0x61190
#define MIPI_C 0x62190
-# define MIPI_PORT_EN (1 << 31)
+#define MIPI_PORT_EN (1 << 31)
/* Turns on border drawing to allow centered display. */
-# define SEL_FLOPPED_HSTX (1 << 23)
-# define PASS_FROM_SPHY_TO_AFE (1 << 16)
-# define MIPI_BORDER_EN (1 << 15)
-# define MIPIA_3LANE_MIPIC_1LANE 0x1
-# define MIPIA_2LANE_MIPIC_2LANE 0x2
-# define TE_TRIGGER_DSI_PROTOCOL (1 << 2)
-# define TE_TRIGGER_GPIO_PIN (1 << 3)
-#define MIPI_TE_COUNT 0x61194
+#define SEL_FLOPPED_HSTX (1 << 23)
+#define PASS_FROM_SPHY_TO_AFE (1 << 16)
+#define MIPI_BORDER_EN (1 << 15)
+#define MIPIA_3LANE_MIPIC_1LANE 0x1
+#define MIPIA_2LANE_MIPIC_2LANE 0x2
+#define TE_TRIGGER_DSI_PROTOCOL (1 << 2)
+#define TE_TRIGGER_GPIO_PIN (1 << 3)
+#define MIPI_TE_COUNT 0x61194
/* #define PP_CONTROL 0x61204 */
-# define POWER_DOWN_ON_RESET (1 << 1)
+#define POWER_DOWN_ON_RESET (1 << 1)
/* #define PFIT_CONTROL 0x61230 */
-# define PFIT_PIPE_SELECT (3 << 29)
-# define PFIT_PIPE_SELECT_SHIFT (29)
+#define PFIT_PIPE_SELECT (3 << 29)
+#define PFIT_PIPE_SELECT_SHIFT (29)
/* #define BLC_PWM_CTL 0x61254 */
-#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
-#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
+#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT (16)
+#define MRST_BACKLIGHT_MODULATION_FREQ_MASK (0xffff << 16)
/* #define PIPEACONF 0x70008 */
-#define PIPEACONF_PIPE_STATE (1<<30)
+#define PIPEACONF_PIPE_STATE (1 << 30)
/* #define DSPACNTR 0x70180 */
#define MRST_DSPABASE 0x7019c
@@ -724,281 +739,286 @@ struct dpst_guardband {
/*
* MIPI IP registers
*/
-#define MIPIC_REG_OFFSET 0x800
-#define DEVICE_READY_REG 0xb000
-#define LP_OUTPUT_HOLD (1 << 16)
-#define EXIT_ULPS_DEV_READY 0x3
-#define LP_OUTPUT_HOLD_RELEASE 0x810000
-# define ENTERING_ULPS (2 << 1)
-# define EXITING_ULPS (1 << 1)
-# define ULPS_MASK (3 << 1)
-# define BUS_POSSESSION (1 << 3)
-#define INTR_STAT_REG 0xb004
-#define RX_SOT_ERROR (1 << 0)
-#define RX_SOT_SYNC_ERROR (1 << 1)
-#define RX_ESCAPE_MODE_ENTRY_ERROR (1 << 3)
-#define RX_LP_TX_SYNC_ERROR (1 << 4)
-#define RX_HS_RECEIVE_TIMEOUT_ERROR (1 << 5)
-#define RX_FALSE_CONTROL_ERROR (1 << 6)
-#define RX_ECC_SINGLE_BIT_ERROR (1 << 7)
-#define RX_ECC_MULTI_BIT_ERROR (1 << 8)
-#define RX_CHECKSUM_ERROR (1 << 9)
-#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 10)
-#define RX_DSI_VC_ID_INVALID (1 << 11)
-#define TX_FALSE_CONTROL_ERROR (1 << 12)
-#define TX_ECC_SINGLE_BIT_ERROR (1 << 13)
-#define TX_ECC_MULTI_BIT_ERROR (1 << 14)
-#define TX_CHECKSUM_ERROR (1 << 15)
-#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 16)
-#define TX_DSI_VC_ID_INVALID (1 << 17)
-#define HIGH_CONTENTION (1 << 18)
-#define LOW_CONTENTION (1 << 19)
-#define DPI_FIFO_UNDER_RUN (1 << 20)
-#define HS_TX_TIMEOUT (1 << 21)
-#define LP_RX_TIMEOUT (1 << 22)
-#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
-#define ACK_WITH_NO_ERROR (1 << 24)
-#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
-#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
-#define SPL_PKT_SENT (1 << 30)
-#define INTR_EN_REG 0xb008
-#define DSI_FUNC_PRG_REG 0xb00c
-#define DPI_CHANNEL_NUMBER_POS 0x03
-#define DBI_CHANNEL_NUMBER_POS 0x05
-#define FMT_DPI_POS 0x07
-#define FMT_DBI_POS 0x0A
-#define DBI_DATA_WIDTH_POS 0x0D
+#define MIPIC_REG_OFFSET 0x800
+
+#define DEVICE_READY_REG 0xb000
+#define LP_OUTPUT_HOLD (1 << 16)
+#define EXIT_ULPS_DEV_READY 0x3
+#define LP_OUTPUT_HOLD_RELEASE 0x810000
+# define ENTERING_ULPS (2 << 1)
+# define EXITING_ULPS (1 << 1)
+# define ULPS_MASK (3 << 1)
+# define BUS_POSSESSION (1 << 3)
+#define INTR_STAT_REG 0xb004
+#define RX_SOT_ERROR (1 << 0)
+#define RX_SOT_SYNC_ERROR (1 << 1)
+#define RX_ESCAPE_MODE_ENTRY_ERROR (1 << 3)
+#define RX_LP_TX_SYNC_ERROR (1 << 4)
+#define RX_HS_RECEIVE_TIMEOUT_ERROR (1 << 5)
+#define RX_FALSE_CONTROL_ERROR (1 << 6)
+#define RX_ECC_SINGLE_BIT_ERROR (1 << 7)
+#define RX_ECC_MULTI_BIT_ERROR (1 << 8)
+#define RX_CHECKSUM_ERROR (1 << 9)
+#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 10)
+#define RX_DSI_VC_ID_INVALID (1 << 11)
+#define TX_FALSE_CONTROL_ERROR (1 << 12)
+#define TX_ECC_SINGLE_BIT_ERROR (1 << 13)
+#define TX_ECC_MULTI_BIT_ERROR (1 << 14)
+#define TX_CHECKSUM_ERROR (1 << 15)
+#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED (1 << 16)
+#define TX_DSI_VC_ID_INVALID (1 << 17)
+#define HIGH_CONTENTION (1 << 18)
+#define LOW_CONTENTION (1 << 19)
+#define DPI_FIFO_UNDER_RUN (1 << 20)
+#define HS_TX_TIMEOUT (1 << 21)
+#define LP_RX_TIMEOUT (1 << 22)
+#define TURN_AROUND_ACK_TIMEOUT (1 << 23)
+#define ACK_WITH_NO_ERROR (1 << 24)
+#define HS_GENERIC_WR_FIFO_FULL (1 << 27)
+#define LP_GENERIC_WR_FIFO_FULL (1 << 28)
+#define SPL_PKT_SENT (1 << 30)
+#define INTR_EN_REG 0xb008
+#define DSI_FUNC_PRG_REG 0xb00c
+#define DPI_CHANNEL_NUMBER_POS 0x03
+#define DBI_CHANNEL_NUMBER_POS 0x05
+#define FMT_DPI_POS 0x07
+#define FMT_DBI_POS 0x0A
+#define DBI_DATA_WIDTH_POS 0x0D
+
/* DPI PIXEL FORMATS */
-#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
-#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
-#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
- * 666 FORMAT
- */
-#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
-#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
-#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
-#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
-#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
-#define DBI_NOT_SUPPORTED 0x00 /* command mode
- * is not supported
- */
-#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
+#define RGB_565_FMT 0x01 /* RGB 565 FORMAT */
+#define RGB_666_FMT 0x02 /* RGB 666 FORMAT */
+#define LRGB_666_FMT 0x03 /* RGB LOOSELY PACKED
+ * 666 FORMAT
+ */
+#define RGB_888_FMT 0x04 /* RGB 888 FORMAT */
+#define VIRTUAL_CHANNEL_NUMBER_0 0x00 /* Virtual channel 0 */
+#define VIRTUAL_CHANNEL_NUMBER_1 0x01 /* Virtual channel 1 */
+#define VIRTUAL_CHANNEL_NUMBER_2 0x02 /* Virtual channel 2 */
+#define VIRTUAL_CHANNEL_NUMBER_3 0x03 /* Virtual channel 3 */
+
+#define DBI_NOT_SUPPORTED 0x00 /* command mode
+ * is not supported
+ */
+#define DBI_DATA_WIDTH_16BIT 0x01 /* 16 bit data */
#define DBI_DATA_WIDTH_9BIT 0x02 /* 9 bit data */
#define DBI_DATA_WIDTH_8BIT 0x03 /* 8 bit data */
-#define DBI_DATA_WIDTH_OPT1 0x04 /* option 1 */
-#define DBI_DATA_WIDTH_OPT2 0x05 /* option 2 */
-#define HS_TX_TIMEOUT_REG 0xb010
-#define LP_RX_TIMEOUT_REG 0xb014
-#define TURN_AROUND_TIMEOUT_REG 0xb018
-#define DEVICE_RESET_REG 0xb01C
-#define DPI_RESOLUTION_REG 0xb020
-#define RES_V_POS 0x10
-#define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */
-#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
-#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
-#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
-#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
-#define VERT_SYNC_PAD_COUNT_REG 0xb038
-#define VERT_BACK_PORCH_COUNT_REG 0xb03c
-#define VERT_FRONT_PORCH_COUNT_REG 0xb040
-#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
-#define DPI_CONTROL_REG 0xb048
-#define DPI_SHUT_DOWN (1 << 0)
-#define DPI_TURN_ON (1 << 1)
-#define DPI_COLOR_MODE_ON (1 << 2)
-#define DPI_COLOR_MODE_OFF (1 << 3)
-#define DPI_BACK_LIGHT_ON (1 << 4)
-#define DPI_BACK_LIGHT_OFF (1 << 5)
-#define DPI_LP (1 << 6)
-#define DPI_DATA_REG 0xb04c
-#define DPI_BACK_LIGHT_ON_DATA 0x07
-#define DPI_BACK_LIGHT_OFF_DATA 0x17
-#define INIT_COUNT_REG 0xb050
-#define MAX_RET_PAK_REG 0xb054
-#define VIDEO_FMT_REG 0xb058
-#define COMPLETE_LAST_PCKT (1 << 2)
-#define EOT_DISABLE_REG 0xb05c
-#define ENABLE_CLOCK_STOPPING (1 << 1)
-#define LP_BYTECLK_REG 0xb060
-#define LP_GEN_DATA_REG 0xb064
-#define HS_GEN_DATA_REG 0xb068
-#define LP_GEN_CTRL_REG 0xb06C
-#define HS_GEN_CTRL_REG 0xb070
-#define DCS_CHANNEL_NUMBER_POS 0x06
-#define MCS_COMMANDS_POS 0x8
-#define WORD_COUNTS_POS 0x8
-#define MCS_PARAMETER_POS 0x10
-#define GEN_FIFO_STAT_REG 0xb074
-#define HS_DATA_FIFO_FULL (1 << 0)
-#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
-#define HS_DATA_FIFO_EMPTY (1 << 2)
-#define LP_DATA_FIFO_FULL (1 << 8)
-#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
-#define LP_DATA_FIFO_EMPTY (1 << 10)
-#define HS_CTRL_FIFO_FULL (1 << 16)
-#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
-#define HS_CTRL_FIFO_EMPTY (1 << 18)
-#define LP_CTRL_FIFO_FULL (1 << 24)
-#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
-#define LP_CTRL_FIFO_EMPTY (1 << 26)
-#define DBI_FIFO_EMPTY (1 << 27)
-#define DPI_FIFO_EMPTY (1 << 28)
-#define HS_LS_DBI_ENABLE_REG 0xb078
-#define TXCLKESC_REG 0xb07c
-#define DPHY_PARAM_REG 0xb080
-#define DBI_BW_CTRL_REG 0xb084
-#define CLK_LANE_SWT_REG 0xb088
+#define DBI_DATA_WIDTH_OPT1 0x04 /* option 1 */
+#define DBI_DATA_WIDTH_OPT2 0x05 /* option 2 */
+
+#define HS_TX_TIMEOUT_REG 0xb010
+#define LP_RX_TIMEOUT_REG 0xb014
+#define TURN_AROUND_TIMEOUT_REG 0xb018
+#define DEVICE_RESET_REG 0xb01C
+#define DPI_RESOLUTION_REG 0xb020
+#define RES_V_POS 0x10
+#define DBI_RESOLUTION_REG 0xb024 /* Reserved for MDFLD */
+#define HORIZ_SYNC_PAD_COUNT_REG 0xb028
+#define HORIZ_BACK_PORCH_COUNT_REG 0xb02C
+#define HORIZ_FRONT_PORCH_COUNT_REG 0xb030
+#define HORIZ_ACTIVE_AREA_COUNT_REG 0xb034
+#define VERT_SYNC_PAD_COUNT_REG 0xb038
+#define VERT_BACK_PORCH_COUNT_REG 0xb03c
+#define VERT_FRONT_PORCH_COUNT_REG 0xb040
+#define HIGH_LOW_SWITCH_COUNT_REG 0xb044
+#define DPI_CONTROL_REG 0xb048
+#define DPI_SHUT_DOWN (1 << 0)
+#define DPI_TURN_ON (1 << 1)
+#define DPI_COLOR_MODE_ON (1 << 2)
+#define DPI_COLOR_MODE_OFF (1 << 3)
+#define DPI_BACK_LIGHT_ON (1 << 4)
+#define DPI_BACK_LIGHT_OFF (1 << 5)
+#define DPI_LP (1 << 6)
+#define DPI_DATA_REG 0xb04c
+#define DPI_BACK_LIGHT_ON_DATA 0x07
+#define DPI_BACK_LIGHT_OFF_DATA 0x17
+#define INIT_COUNT_REG 0xb050
+#define MAX_RET_PAK_REG 0xb054
+#define VIDEO_FMT_REG 0xb058
+#define COMPLETE_LAST_PCKT (1 << 2)
+#define EOT_DISABLE_REG 0xb05c
+#define ENABLE_CLOCK_STOPPING (1 << 1)
+#define LP_BYTECLK_REG 0xb060
+#define LP_GEN_DATA_REG 0xb064
+#define HS_GEN_DATA_REG 0xb068
+#define LP_GEN_CTRL_REG 0xb06C
+#define HS_GEN_CTRL_REG 0xb070
+#define DCS_CHANNEL_NUMBER_POS 0x6
+#define MCS_COMMANDS_POS 0x8
+#define WORD_COUNTS_POS 0x8
+#define MCS_PARAMETER_POS 0x10
+#define GEN_FIFO_STAT_REG 0xb074
+#define HS_DATA_FIFO_FULL (1 << 0)
+#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
+#define HS_DATA_FIFO_EMPTY (1 << 2)
+#define LP_DATA_FIFO_FULL (1 << 8)
+#define LP_DATA_FIFO_HALF_EMPTY (1 << 9)
+#define LP_DATA_FIFO_EMPTY (1 << 10)
+#define HS_CTRL_FIFO_FULL (1 << 16)
+#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17)
+#define HS_CTRL_FIFO_EMPTY (1 << 18)
+#define LP_CTRL_FIFO_FULL (1 << 24)
+#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25)
+#define LP_CTRL_FIFO_EMPTY (1 << 26)
+#define DBI_FIFO_EMPTY (1 << 27)
+#define DPI_FIFO_EMPTY (1 << 28)
+#define HS_LS_DBI_ENABLE_REG 0xb078
+#define TXCLKESC_REG 0xb07c
+#define DPHY_PARAM_REG 0xb080
+#define DBI_BW_CTRL_REG 0xb084
+#define CLK_LANE_SWT_REG 0xb088
/*
* MIPI Adapter registers
*/
-#define MIPI_CONTROL_REG 0xb104
-#define MIPI_2X_CLOCK_BITS ((1 << 0) | (1 << 1))
-#define MIPI_DATA_ADDRESS_REG 0xb108
-#define MIPI_DATA_LENGTH_REG 0xb10C
-#define MIPI_COMMAND_ADDRESS_REG 0xb110
-#define MIPI_COMMAND_LENGTH_REG 0xb114
-#define MIPI_READ_DATA_RETURN_REG0 0xb118
-#define MIPI_READ_DATA_RETURN_REG1 0xb11C
-#define MIPI_READ_DATA_RETURN_REG2 0xb120
-#define MIPI_READ_DATA_RETURN_REG3 0xb124
-#define MIPI_READ_DATA_RETURN_REG4 0xb128
-#define MIPI_READ_DATA_RETURN_REG5 0xb12C
-#define MIPI_READ_DATA_RETURN_REG6 0xb130
-#define MIPI_READ_DATA_RETURN_REG7 0xb134
-#define MIPI_READ_DATA_VALID_REG 0xb138
+#define MIPI_CONTROL_REG 0xb104
+#define MIPI_2X_CLOCK_BITS ((1 << 0) | (1 << 1))
+#define MIPI_DATA_ADDRESS_REG 0xb108
+#define MIPI_DATA_LENGTH_REG 0xb10C
+#define MIPI_COMMAND_ADDRESS_REG 0xb110
+#define MIPI_COMMAND_LENGTH_REG 0xb114
+#define MIPI_READ_DATA_RETURN_REG0 0xb118
+#define MIPI_READ_DATA_RETURN_REG1 0xb11C
+#define MIPI_READ_DATA_RETURN_REG2 0xb120
+#define MIPI_READ_DATA_RETURN_REG3 0xb124
+#define MIPI_READ_DATA_RETURN_REG4 0xb128
+#define MIPI_READ_DATA_RETURN_REG5 0xb12C
+#define MIPI_READ_DATA_RETURN_REG6 0xb130
+#define MIPI_READ_DATA_RETURN_REG7 0xb134
+#define MIPI_READ_DATA_VALID_REG 0xb138
+
/* DBI COMMANDS */
-#define soft_reset 0x01
+#define soft_reset 0x01
/*
* The display module performs a software reset.
* Registers are written with their SW Reset default values.
*/
-#define get_power_mode 0x0a
+#define get_power_mode 0x0a
/*
* The display module returns the current power mode
*/
-#define get_address_mode 0x0b
+#define get_address_mode 0x0b
/*
* The display module returns the current status.
*/
-#define get_pixel_format 0x0c
+#define get_pixel_format 0x0c
/*
* This command gets the pixel format for the RGB image data
* used by the interface.
*/
-#define get_display_mode 0x0d
+#define get_display_mode 0x0d
/*
* The display module returns the Display Image Mode status.
*/
-#define get_signal_mode 0x0e
+#define get_signal_mode 0x0e
/*
* The display module returns the Display Signal Mode.
*/
-#define get_diagnostic_result 0x0f
+#define get_diagnostic_result 0x0f
/*
* The display module returns the self-diagnostic results following
* a Sleep Out command.
*/
-#define enter_sleep_mode 0x10
+#define enter_sleep_mode 0x10
/*
* This command causes the display module to enter the Sleep mode.
* In this mode, all unnecessary blocks inside the display module are
* disabled except interface communication. This is the lowest power
* mode the display module supports.
*/
-#define exit_sleep_mode 0x11
+#define exit_sleep_mode 0x11
/*
* This command causes the display module to exit Sleep mode.
* All blocks inside the display module are enabled.
*/
-#define enter_partial_mode 0x12
+#define enter_partial_mode 0x12
/*
* This command causes the display module to enter the Partial Display
* Mode. The Partial Display Mode window is described by the
* set_partial_area command.
*/
-#define enter_normal_mode 0x13
+#define enter_normal_mode 0x13
/*
* This command causes the display module to enter the Normal mode.
* Normal Mode is defined as Partial Display mode and Scroll mode are off
*/
-#define exit_invert_mode 0x20
+#define exit_invert_mode 0x20
/*
* This command causes the display module to stop inverting the image
* data on the display device. The frame memory contents remain unchanged.
* No status bits are changed.
*/
-#define enter_invert_mode 0x21
+#define enter_invert_mode 0x21
/*
* This command causes the display module to invert the image data only on
* the display device. The frame memory contents remain unchanged.
* No status bits are changed.
*/
-#define set_gamma_curve 0x26
+#define set_gamma_curve 0x26
/*
* This command selects the desired gamma curve for the display device.
* Four fixed gamma curves are defined in section DCS spec.
*/
-#define set_display_off 0x28
+#define set_display_off 0x28
/* ************************************************************************* *\
This command causes the display module to stop displaying the image data
on the display device. The frame memory contents remain unchanged.
No status bits are changed.
\* ************************************************************************* */
-#define set_display_on 0x29
+#define set_display_on 0x29
/* ************************************************************************* *\
This command causes the display module to start displaying the image data
on the display device. The frame memory contents remain unchanged.
No status bits are changed.
\* ************************************************************************* */
-#define set_column_address 0x2a
+#define set_column_address 0x2a
/*
* This command defines the column extent of the frame memory accessed by
* the hostprocessor with the read_memory_continue and
* write_memory_continue commands.
* No status bits are changed.
*/
-#define set_page_addr 0x2b
+#define set_page_addr 0x2b
/*
* This command defines the page extent of the frame memory accessed by
* the host processor with the write_memory_continue and
- * read_memory_continue command.
+ * read_memory_continue command.
* No status bits are changed.
*/
-#define write_mem_start 0x2c
+#define write_mem_start 0x2c
/*
* This command transfers image data from the host processor to the
- * display module s frame memory starting at the pixel location specified
+ * display modules frame memory starting at the pixel location specified
* by preceding set_column_address and set_page_address commands.
*/
-#define set_partial_area 0x30
+#define set_partial_area 0x30
/*
* This command defines the Partial Display mode s display area.
* There are two parameters associated with this command, the first
* defines the Start Row (SR) and the second the End Row (ER). SR and ER
* refer to the Frame Memory Line Pointer.
*/
-#define set_scroll_area 0x33
+#define set_scroll_area 0x33
/*
* This command defines the display modules Vertical Scrolling Area.
*/
-#define set_tear_off 0x34
+#define set_tear_off 0x34
/*
* This command turns off the display modules Tearing Effect output
* signal on the TE signal line.
*/
-#define set_tear_on 0x35
+#define set_tear_on 0x35
/*
* This command turns on the display modules Tearing Effect output signal
* on the TE signal line.
*/
-#define set_address_mode 0x36
+#define set_address_mode 0x36
/*
* This command sets the data order for transfers from the host processor
* to display modules frame memory,bits B[7:5] and B3, and from the
* display modules frame memory to the display device, bits B[2:0] and B4.
*/
-#define set_scroll_start 0x37
+#define set_scroll_start 0x37
/*
* This command sets the start of the vertical scrolling area in the frame
* memory. The vertical scrolling area is fully defined when this command
@@ -1007,18 +1027,18 @@ No status bits are changed.
* line in the frame memory that is written to the display device as the
* first line of the vertical scroll area.
*/
-#define exit_idle_mode 0x38
+#define exit_idle_mode 0x38
/*
* This command causes the display module to exit Idle mode.
*/
-#define enter_idle_mode 0x39
+#define enter_idle_mode 0x39
/*
* This command causes the display module to enter Idle Mode.
* In Idle Mode, color expression is reduced. Colors are shown on the
* display device using the MSB of each of the R, G and B color
* components in the frame memory
*/
-#define set_pixel_format 0x3a
+#define set_pixel_format 0x3a
/*
* This command sets the pixel format for the RGB image data used by the
* interface.
@@ -1026,25 +1046,27 @@ No status bits are changed.
* Bits D[2:0] DBI Pixel Format Definition
* Bits D7 and D3 are not used.
*/
- #define DCS_PIXEL_FORMAT_3bbp 0x1
- #define DCS_PIXEL_FORMAT_8bbp 0x2
- #define DCS_PIXEL_FORMAT_12bbp 0x3
- #define DCS_PIXEL_FORMAT_16bbp 0x5
- #define DCS_PIXEL_FORMAT_18bbp 0x6
- #define DCS_PIXEL_FORMAT_24bbp 0x7
-#define write_mem_cont 0x3c
+#define DCS_PIXEL_FORMAT_3bpp 0x1
+#define DCS_PIXEL_FORMAT_8bpp 0x2
+#define DCS_PIXEL_FORMAT_12bpp 0x3
+#define DCS_PIXEL_FORMAT_16bpp 0x5
+#define DCS_PIXEL_FORMAT_18bpp 0x6
+#define DCS_PIXEL_FORMAT_24bpp 0x7
+
+#define write_mem_cont 0x3c
+
/*
* This command transfers image data from the host processor to the
* display module's frame memory continuing from the pixel location
* following the previous write_memory_continue or write_memory_start
* command.
*/
-#define set_tear_scanline 0x44
+#define set_tear_scanline 0x44
/*
* This command turns on the display modules Tearing Effect output signal
* on the TE signal line when the display module reaches line N.
*/
-#define get_scanline 0x45
+#define get_scanline 0x45
/*
* The display module returns the current scanline, N, used to update the
* display device. The total number of scanlines on a display device is
@@ -1094,22 +1116,22 @@ No status bits are changed.
#define GAMMA_AUTO (1 << 0)
/* DCS Interface Pixel Formats */
-#define DCS_PIXEL_FORMAT_3BPP 0x1
-#define DCS_PIXEL_FORMAT_8BPP 0x2
-#define DCS_PIXEL_FORMAT_12BPP 0x3
-#define DCS_PIXEL_FORMAT_16BPP 0x5
-#define DCS_PIXEL_FORMAT_18BPP 0x6
-#define DCS_PIXEL_FORMAT_24BPP 0x7
+#define DCS_PIXEL_FORMAT_3BPP 0x1
+#define DCS_PIXEL_FORMAT_8BPP 0x2
+#define DCS_PIXEL_FORMAT_12BPP 0x3
+#define DCS_PIXEL_FORMAT_16BPP 0x5
+#define DCS_PIXEL_FORMAT_18BPP 0x6
+#define DCS_PIXEL_FORMAT_24BPP 0x7
/* ONE PARAMETER READ DATA */
-#define addr_mode_data 0xfc
-#define diag_res_data 0x00
-#define disp_mode_data 0x23
-#define pxl_fmt_data 0x77
-#define pwr_mode_data 0x74
-#define sig_mode_data 0x00
+#define addr_mode_data 0xfc
+#define diag_res_data 0x00
+#define disp_mode_data 0x23
+#define pxl_fmt_data 0x77
+#define pwr_mode_data 0x74
+#define sig_mode_data 0x00
/* TWO PARAMETERS READ DATA */
-#define scanline_data1 0xff
-#define scanline_data2 0xff
+#define scanline_data1 0xff
+#define scanline_data2 0xff
#define NON_BURST_MODE_SYNC_PULSE 0x01 /* Non Burst Mode
* with Sync Pulse
*/
@@ -1117,7 +1139,8 @@ No status bits are changed.
* with Sync events
*/
#define BURST_MODE 0x03 /* Burst Mode */
-#define DBI_COMMAND_BUFFER_SIZE 0x240 /* 0x32 */ /* 0x120 */ /* Allocate at least
+#define DBI_COMMAND_BUFFER_SIZE 0x240 /* 0x32 */ /* 0x120 */
+ /* Allocate at least
* 0x100 Byte with 32
* byte alignment
*/
@@ -1125,13 +1148,88 @@ No status bits are changed.
* 0x100 Byte with 32
* byte alignment
*/
-#define DBI_CB_TIME_OUT 0xFFFF
-
-#define GEN_FB_TIME_OUT 2000
-#define ALIGNMENT_32BYTE_MASK (~((1 << 0)|(1 << 1)|(1 << 2)|(1 << 3)|(1 << 4)))
-#define SKU_83 0x01
-#define SKU_100 0x02
-#define SKU_100L 0x04
-#define SKU_BYPASS 0x08
+#define DBI_CB_TIME_OUT 0xFFFF
+
+#define GEN_FB_TIME_OUT 2000
+
+#define SKU_83 0x01
+#define SKU_100 0x02
+#define SKU_100L 0x04
+#define SKU_BYPASS 0x08
+
+/* Some handy macros for playing with bitfields. */
+#define PSB_MASK(high, low) (((1<<((high)-(low)+1))-1)<<(low))
+#define SET_FIELD(value, field) (((value) << field ## _SHIFT) & field ## _MASK)
+#define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
+
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
+/* PCI config space */
+
+#define SB_PCKT 0x02100 /* cedarview */
+# define SB_OPCODE_MASK PSB_MASK(31, 16)
+# define SB_OPCODE_SHIFT 16
+# define SB_OPCODE_READ 0
+# define SB_OPCODE_WRITE 1
+# define SB_DEST_MASK PSB_MASK(15, 8)
+# define SB_DEST_SHIFT 8
+# define SB_DEST_DPLL 0x88
+# define SB_BYTE_ENABLE_MASK PSB_MASK(7, 4)
+# define SB_BYTE_ENABLE_SHIFT 4
+# define SB_BUSY (1 << 0)
+
+
+/* 32-bit value read/written from the DPIO reg. */
+#define SB_DATA 0x02104 /* cedarview */
+/* 32-bit address of the DPIO reg to be read/written. */
+#define SB_ADDR 0x02108 /* cedarview */
+#define DPIO_CFG 0x02110 /* cedarview */
+# define DPIO_MODE_SELECT_1 (1 << 3)
+# define DPIO_MODE_SELECT_0 (1 << 2)
+# define DPIO_SFR_BYPASS (1 << 1)
+/* reset is active low */
+# define DPIO_CMN_RESET_N (1 << 0)
+
+/* Cedarview sideband registers */
+#define _SB_M_A 0x8008
+#define _SB_M_B 0x8028
+#define SB_M(pipe) _PIPE(pipe, _SB_M_A, _SB_M_B)
+# define SB_M_DIVIDER_MASK (0xFF << 24)
+# define SB_M_DIVIDER_SHIFT 24
+
+#define _SB_N_VCO_A 0x8014
+#define _SB_N_VCO_B 0x8034
+#define SB_N_VCO(pipe) _PIPE(pipe, _SB_N_VCO_A, _SB_N_VCO_B)
+#define SB_N_VCO_SEL_MASK PSB_MASK(31, 30)
+#define SB_N_VCO_SEL_SHIFT 30
+#define SB_N_DIVIDER_MASK PSB_MASK(29, 26)
+#define SB_N_DIVIDER_SHIFT 26
+#define SB_N_CB_TUNE_MASK PSB_MASK(25, 24)
+#define SB_N_CB_TUNE_SHIFT 24
+
+#define _SB_REF_A 0x8018
+#define _SB_REF_B 0x8038
+#define SB_REF_SFR(pipe) _PIPE(pipe, _SB_REF_A, _SB_REF_B)
+
+#define _SB_P_A 0x801c
+#define _SB_P_B 0x803c
+#define SB_P(pipe) _PIPE(pipe, _SB_P_A, _SB_P_B)
+#define SB_P2_DIVIDER_MASK PSB_MASK(31, 30)
+#define SB_P2_DIVIDER_SHIFT 30
+#define SB_P2_10 0 /* HDMI, DP, DAC */
+#define SB_P2_5 1 /* DAC */
+#define SB_P2_14 2 /* LVDS single */
+#define SB_P2_7 3 /* LVDS double */
+#define SB_P1_DIVIDER_MASK PSB_MASK(15, 12)
+#define SB_P1_DIVIDER_SHIFT 12
+
+#define PSB_LANE0 0x120
+#define PSB_LANE1 0x220
+#define PSB_LANE2 0x2320
+#define PSB_LANE3 0x2420
+
+#define LANE_PLL_MASK (0x7 << 20)
+#define LANE_PLL_ENABLE (0x3 << 20)
+
#endif
diff --git a/drivers/staging/gma500/psb_intel_sdvo.c b/drivers/staging/gma500/psb_intel_sdvo.c
index df1c006ecfa..a4bad1af4b7 100644
--- a/drivers/staging/gma500/psb_intel_sdvo.c
+++ b/drivers/staging/gma500/psb_intel_sdvo.c
@@ -107,14 +107,10 @@ static bool psb_intel_sdvo_read_byte(
ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
if (ret == 2) {
- /* DRM_DEBUG("got back from addr %02X = %02x\n",
- * out_buf[0], buf[0]);
- */
*ch = buf[0];
return true;
}
- DRM_DEBUG("i2c transfer returned %d\n", ret);
return false;
}
@@ -205,24 +201,25 @@ static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output,
int i;
if (0) {
- DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
+ printk(KERN_DEBUG "%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
for (i = 0; i < args_len; i++)
- printk(KERN_INFO"%02X ", ((u8 *) args)[i]);
+ printk(KERN_CONT "%02X ", ((u8 *) args)[i]);
for (; i < 8; i++)
- printk(" ");
+ printk(KERN_CONT " ");
for (i = 0;
i <
sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
- printk("(%s)", sdvo_cmd_names[i].name);
+ printk(KERN_CONT
+ "(%s)", sdvo_cmd_names[i].name);
break;
}
}
if (i ==
sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
- printk("(%02X)", cmd);
- printk("\n");
+ printk(KERN_CONT "(%02X)", cmd);
+ printk(KERN_CONT "\n");
}
for (i = 0; i < args_len; i++) {
@@ -267,17 +264,17 @@ static u8 psb_intel_sdvo_read_response(
&status);
if (0) {
- DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
+ pr_debug("%s: R: ", SDVO_NAME(sdvo_priv));
for (i = 0; i < response_len; i++)
- printk(KERN_INFO"%02X ", ((u8 *) response)[i]);
+ printk(KERN_CONT "%02X ", ((u8 *) response)[i]);
for (; i < 8; i++)
printk(" ");
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- printk(KERN_INFO"(%s)",
+ printk(KERN_CONT "(%s)",
cmd_status_names[status]);
else
- printk(KERN_INFO"(??? %d)", status);
- printk("\n");
+ printk(KERN_CONT "(??? %d)", status);
+ printk(KERN_CONT "\n");
}
if (status != SDVO_CMD_STATUS_PENDING)
@@ -997,7 +994,6 @@ int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
u8 response[2];
u8 status;
struct psb_intel_output *psb_intel_output;
- DRM_DEBUG("\n");
if (!connector)
return 0;
@@ -1198,7 +1194,7 @@ void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
- DRM_DEBUG("No SDVO device found on SDVO%c\n",
+ dev_dbg(dev->dev, "No SDVO device found on SDVO%c\n",
output_device == SDVOB ? 'B' : 'C');
goto err_i2c;
}
@@ -1242,8 +1238,7 @@ void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
unsigned char bytes[2];
memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
- DRM_DEBUG
- ("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
+ dev_dbg(dev->dev, "%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
goto err_i2c;
}
@@ -1267,7 +1262,7 @@ void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
pixel_clock_max);
- DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
+ dev_dbg(dev->dev, "%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
diff --git a/drivers/staging/gma500/psb_intel_sdvo_regs.h b/drivers/staging/gma500/psb_intel_sdvo_regs.h
index c7107a37e33..96862ea65ab 100644
--- a/drivers/staging/gma500/psb_intel_sdvo_regs.h
+++ b/drivers/staging/gma500/psb_intel_sdvo_regs.h
@@ -51,7 +51,7 @@ struct psb_intel_sdvo_caps {
unsigned int stall_support:1;
unsigned int pad:1;
u16 output_flags;
-} __attribute__ ((packed));
+} __packed;
/** This matches the EDID DTD structure, more or less */
struct psb_intel_sdvo_dtd {
@@ -82,18 +82,18 @@ struct psb_intel_sdvo_dtd {
u8 v_sync_off_high;
u8 reserved;
} part2;
-} __attribute__ ((packed));
+} __packed;
struct psb_intel_sdvo_pixel_clock_range {
u16 min; /**< pixel clock, in 10kHz units */
u16 max; /**< pixel clock, in 10kHz units */
-} __attribute__ ((packed));
+} __packed;
struct psb_intel_sdvo_preferred_input_timing_args {
u16 clock;
u16 width;
u16 height;
-} __attribute__ ((packed));
+} __packed;
/* I2C registers for SDVO */
#define SDVO_I2C_ARG_0 0x07
@@ -147,7 +147,7 @@ struct psb_intel_sdvo_get_trained_inputs_response {
unsigned int input0_trained:1;
unsigned int input1_trained:1;
unsigned int pad:6;
-} __attribute__ ((packed));
+} __packed;
/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
@@ -201,7 +201,7 @@ struct psb_intel_sdvo_get_interrupt_event_source_response {
u16 interrupt_status;
unsigned int ambient_light_interrupt:1;
unsigned int pad:7;
-} __attribute__ ((packed));
+} __packed;
/**
* Selects which input is affected by future input commands.
@@ -214,7 +214,7 @@ struct psb_intel_sdvo_get_interrupt_event_source_response {
struct psb_intel_sdvo_set_target_input_args {
unsigned int target_1:1;
unsigned int pad:7;
-} __attribute__ ((packed));
+} __packed;
/**
* Takes a struct psb_intel_sdvo_output_flags of which outputs are targeted by
diff --git a/drivers/staging/gma500/psb_irq.c b/drivers/staging/gma500/psb_irq.c
index 9ea37e58887..4a0fa42893f 100644
--- a/drivers/staging/gma500/psb_irq.c
+++ b/drivers/staging/gma500/psb_irq.c
@@ -26,8 +26,8 @@
#include "psb_drv.h"
#include "psb_reg.h"
#include "psb_intel_reg.h"
-#include "psb_powermgmt.h"
-
+#include "power.h"
+#include "mdfld_output.h"
/*
* inline functions
@@ -187,7 +187,8 @@ static void mid_pipe_event_handler(struct drm_device *dev, uint32_t pipe)
}
if (i == WAIT_STATUS_CLEAR_LOOP_COUNT)
- DRM_ERROR("%s, can't clear the status bits in pipe_stat_reg, its value = 0x%x.\n",
+ dev_err(dev->dev,
+ "%s, can't clear the status bits in pipe_stat_reg, its value = 0x%x.\n",
__func__, PSB_RVDC32(pipe_stat_reg));
if (pipe_stat_val & PIPE_VBLANK_STATUS)
@@ -219,21 +220,11 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
- if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG) {
- PSB_DEBUG_IRQ("Got DISP interrupt\n");
+ if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
dsp_int = 1;
- }
- if (vdc_stat & _PSB_IRQ_SGX_FLAG) {
- PSB_DEBUG_IRQ("Got SGX interrupt\n");
+ if (vdc_stat & _PSB_IRQ_SGX_FLAG)
sgx_int = 1;
- }
- if (vdc_stat & _PSB_IRQ_MSVDX_FLAG)
- PSB_DEBUG_IRQ("Got MSVDX interrupt\n");
-
- if (vdc_stat & _LNC_IRQ_TOPAZ_FLAG)
- PSB_DEBUG_IRQ("Got TOPAZ interrupt\n");
-
vdc_stat &= dev_priv->vdc_irq_mask;
spin_unlock(&dev_priv->irqmask_lock);
@@ -293,8 +284,6 @@ int psb_irq_postinstall(struct drm_device *dev)
(struct drm_psb_private *) dev->dev_private;
unsigned long irqflags;
- PSB_DEBUG_ENTRY("\n");
-
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
/* This register is safe even if display island is off */
@@ -326,8 +315,6 @@ void psb_irq_uninstall(struct drm_device *dev)
(struct drm_psb_private *) dev->dev_private;
unsigned long irqflags;
- PSB_DEBUG_ENTRY("\n");
-
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
@@ -395,8 +382,6 @@ int psb_irq_enable_dpst(struct drm_device *dev)
(struct drm_psb_private *) dev->dev_private;
unsigned long irqflags;
- PSB_DEBUG_ENTRY("\n");
-
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
/* enable DPST */
@@ -435,8 +420,6 @@ int psb_irq_disable_dpst(struct drm_device *dev)
(struct drm_psb_private *) dev->dev_private;
unsigned long irqflags;
- PSB_DEBUG_ENTRY("\n");
-
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
mid_disable_pipe_event(dev_priv, 0);
@@ -472,8 +455,12 @@ int psb_enable_vblank(struct drm_device *dev, int pipe)
uint32_t reg_val = 0;
uint32_t pipeconf_reg = mid_pipeconf(pipe);
- PSB_DEBUG_ENTRY("\n");
-
+#if defined(CONFIG_DRM_PSB_MFLD)
+ /* Medfield is different - we should perhaps extract out vblank
+ and blacklight etc ops */
+ if (IS_MFLD(dev) && !mdfld_panel_dpi(dev))
+ return mdfld_enable_te(dev, pipe);
+#endif
if (gma_power_begin(dev, false)) {
reg_val = REG_READ(pipeconf_reg);
gma_power_end(dev);
@@ -500,8 +487,10 @@ void psb_disable_vblank(struct drm_device *dev, int pipe)
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- PSB_DEBUG_ENTRY("\n");
-
+#if defined(CONFIG_DRM_PSB_MFLD)
+ if (IS_MFLD(dev) && !mdfld_panel_dpi(dev))
+ mdfld_disable_te(dev, pipe);
+#endif
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
mid_disable_pipe_event(dev_priv, pipe);
@@ -510,6 +499,58 @@ void psb_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
+/**
+ * mdfld_enable_te - enable TE events
+ * @dev: our DRM device
+ * @pipe: which pipe to work on
+ *
+ * Enable TE events on a Medfield display pipe. Medfield specific.
+ */
+int mdfld_enable_te(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+ uint32_t reg_val = 0;
+ uint32_t pipeconf_reg = mid_pipeconf(pipe);
+
+ if (gma_power_begin(dev, false)) {
+ reg_val = REG_READ(pipeconf_reg);
+ gma_power_end(dev);
+ }
+
+ if (!(reg_val & PIPEACONF_ENABLE))
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, flags);
+
+ mid_enable_pipe_event(dev_priv, pipe);
+ psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, flags);
+
+ return 0;
+}
+
+/**
+ * mdfld_disable_te - disable TE events
+ * @dev: our DRM device
+ * @pipe: which pipe to work on
+ *
+ * Disable TE events on a Medfield display pipe. Medfield specific.
+ */
+void mdfld_disable_te(struct drm_device *dev, int pipe)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->irqmask_lock, flags);
+
+ mid_disable_pipe_event(dev_priv, pipe);
+ psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, flags);
+}
+
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
@@ -535,7 +576,7 @@ u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
pipeconf_reg = PIPECCONF;
break;
default:
- DRM_ERROR("%s, invalded pipe.\n", __func__);
+ dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
return 0;
}
@@ -545,7 +586,7 @@ u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
reg_val = REG_READ(pipeconf_reg);
if (!(reg_val & PIPEACONF_ENABLE)) {
- DRM_ERROR("trying to get vblank count for disabled pipe %d\n",
+ dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
pipe);
goto psb_get_vblank_counter_exit;
}
diff --git a/drivers/staging/gma500/psb_irq.h b/drivers/staging/gma500/psb_irq.h
index 3e56f33efa6..216fda38b57 100644
--- a/drivers/staging/gma500/psb_irq.h
+++ b/drivers/staging/gma500/psb_irq.h
@@ -1,5 +1,5 @@
/**************************************************************************
- * Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2009-2011, Intel Corporation.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -34,10 +34,6 @@ int psb_irq_postinstall(struct drm_device *dev);
void psb_irq_uninstall(struct drm_device *dev);
irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
-void psb_irq_preinstall_islands(struct drm_device *dev, int hw_islands);
-int psb_irq_postinstall_islands(struct drm_device *dev, int hw_islands);
-void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
-
int psb_irq_enable_dpst(struct drm_device *dev);
int psb_irq_disable_dpst(struct drm_device *dev);
void psb_irq_turn_on_dpst(struct drm_device *dev);
@@ -46,4 +42,4 @@ int psb_enable_vblank(struct drm_device *dev, int pipe);
void psb_disable_vblank(struct drm_device *dev, int pipe);
u32 psb_get_vblank_counter(struct drm_device *dev, int pipe);
-#endif //_SYSIRQ_H_
+#endif /* _SYSIRQ_H_ */
diff --git a/drivers/staging/gma500/psb_lid.c b/drivers/staging/gma500/psb_lid.c
index 21fd202f293..af328516561 100644
--- a/drivers/staging/gma500/psb_lid.c
+++ b/drivers/staging/gma500/psb_lid.c
@@ -32,10 +32,10 @@ static void psb_lid_timer_func(unsigned long data)
u32 *lid_state = dev_priv->lid_state;
u32 pp_status;
- if (*lid_state == dev_priv->lid_last_state)
+ if (readl(lid_state) == dev_priv->lid_last_state)
goto lid_timer_schedule;
- if ((*lid_state) & 0x01) {
+ if ((readl(lid_state)) & 0x01) {
/*lid state is open*/
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
do {
@@ -54,7 +54,7 @@ static void psb_lid_timer_func(unsigned long data)
}
/* printk(KERN_INFO"%s: lid: closed\n", __FUNCTION__); */
- dev_priv->lid_last_state = *lid_state;
+ dev_priv->lid_last_state = readl(lid_state);
lid_timer_schedule:
spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
diff --git a/drivers/staging/gma500/psb_reg.h b/drivers/staging/gma500/psb_reg.h
index 9ad49892070..b81c7c1e9c2 100644
--- a/drivers/staging/gma500/psb_reg.h
+++ b/drivers/staging/gma500/psb_reg.h
@@ -22,161 +22,157 @@
#ifndef _PSB_REG_H_
#define _PSB_REG_H_
-#define PSB_CR_CLKGATECTL 0x0000
-#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
-#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
-#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
-#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
-#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
-#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
-#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
-#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
-#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
-#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
-#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
-#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
-#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
-#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
-#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
-#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
-
-#define PSB_CR_CORE_ID 0x0010
-#define _PSB_CC_ID_ID_SHIFT (16)
-#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
-#define _PSB_CC_ID_CONFIG_SHIFT (0)
-#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
-
-#define PSB_CR_CORE_REVISION 0x0014
-#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
-#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
-#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
-#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
-#define _PSB_CC_REVISION_MINOR_SHIFT (8)
-#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
-#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
-#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
-
-#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
-
-#define PSB_CR_SOFT_RESET 0x0080
-#define _PSB_CS_RESET_TSP_RESET (1 << 6)
-#define _PSB_CS_RESET_ISP_RESET (1 << 5)
-#define _PSB_CS_RESET_USE_RESET (1 << 4)
-#define _PSB_CS_RESET_TA_RESET (1 << 3)
-#define _PSB_CS_RESET_DPM_RESET (1 << 2)
-#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
-#define _PSB_CS_RESET_BIF_RESET (1 << 0)
-
-#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
-
-#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
-
-#define PSB_CR_EVENT_STATUS2 0x0118
-
-#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
-#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
-
-#define PSB_CR_EVENT_STATUS 0x012C
-
-#define PSB_CR_EVENT_HOST_ENABLE 0x0130
-
-#define PSB_CR_EVENT_HOST_CLEAR 0x0134
-#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
-#define _PSB_CE_TA_DPM_FAULT (1 << 28)
-#define _PSB_CE_TWOD_COMPLETE (1 << 27)
-#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
-#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
-#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
-#define _PSB_CE_SW_EVENT (1 << 14)
-#define _PSB_CE_TA_FINISHED (1 << 13)
-#define _PSB_CE_TA_TERMINATE (1 << 12)
-#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
-#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
-#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
-#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
-
-
-#define PSB_USE_OFFSET_MASK 0x0007FFFF
-#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
-#define PSB_CR_USE_CODE_BASE0 0x0A0C
-#define PSB_CR_USE_CODE_BASE1 0x0A10
-#define PSB_CR_USE_CODE_BASE2 0x0A14
-#define PSB_CR_USE_CODE_BASE3 0x0A18
-#define PSB_CR_USE_CODE_BASE4 0x0A1C
-#define PSB_CR_USE_CODE_BASE5 0x0A20
-#define PSB_CR_USE_CODE_BASE6 0x0A24
-#define PSB_CR_USE_CODE_BASE7 0x0A28
-#define PSB_CR_USE_CODE_BASE8 0x0A2C
-#define PSB_CR_USE_CODE_BASE9 0x0A30
-#define PSB_CR_USE_CODE_BASE10 0x0A34
-#define PSB_CR_USE_CODE_BASE11 0x0A38
-#define PSB_CR_USE_CODE_BASE12 0x0A3C
-#define PSB_CR_USE_CODE_BASE13 0x0A40
-#define PSB_CR_USE_CODE_BASE14 0x0A44
-#define PSB_CR_USE_CODE_BASE15 0x0A48
-#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
-#define _PSB_CUC_BASE_DM_SHIFT (25)
-#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
-#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
-#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
-#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
-#define _PSB_CUC_DM_VERTEX (0)
-#define _PSB_CUC_DM_PIXEL (1)
-#define _PSB_CUC_DM_RESERVED (2)
-#define _PSB_CUC_DM_EDM (3)
-
-#define PSB_CR_PDS_EXEC_BASE 0x0AB8
-#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
-#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
-
-#define PSB_CR_EVENT_KICKER 0x0AC4
-#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
-
-#define PSB_CR_EVENT_KICK 0x0AC8
-#define _PSB_CE_KICK_NOW (1 << 0)
-
-
-#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
-
-#define PSB_CR_BIF_CTRL 0x0C00
-#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
-#define _PSB_CB_CTRL_INVALDC (1 << 3)
-#define _PSB_CB_CTRL_FLUSH (1 << 2)
-
-#define PSB_CR_BIF_INT_STAT 0x0C04
-
-#define PSB_CR_BIF_FAULT 0x0C08
-#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
-#define _PSB_CBI_STAT_FAULT_SHIFT (0)
-#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
-#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
-#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
-#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
-#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
-#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
-#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
-#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
-#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
-#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
-
-#define PSB_CR_BIF_BANK0 0x0C78
-
-#define PSB_CR_BIF_BANK1 0x0C7C
-
-#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
-
-#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
-#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
-
-#define PSB_CR_2D_SOCIF 0x0E18
-#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
-#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
-#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
-
-#define PSB_CR_2D_BLIT_STATUS 0x0E04
-#define _PSB_C2B_STATUS_BUSY (1 << 24)
-#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
-#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
+#define PSB_CR_CLKGATECTL 0x0000
+#define _PSB_C_CLKGATECTL_AUTO_MAN_REG (1 << 24)
+#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT (20)
+#define _PSB_C_CLKGATECTL_USE_CLKG_MASK (0x3 << 20)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT (16)
+#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK (0x3 << 16)
+#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT (12)
+#define _PSB_C_CLKGATECTL_TA_CLKG_MASK (0x3 << 12)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT (8)
+#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK (0x3 << 8)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT (4)
+#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK (0x3 << 4)
+#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT (0)
+#define _PSB_C_CLKGATECTL_2D_CLKG_MASK (0x3 << 0)
+#define _PSB_C_CLKGATECTL_CLKG_ENABLED (0)
+#define _PSB_C_CLKGATECTL_CLKG_DISABLED (1)
+#define _PSB_C_CLKGATECTL_CLKG_AUTO (2)
+
+#define PSB_CR_CORE_ID 0x0010
+#define _PSB_CC_ID_ID_SHIFT (16)
+#define _PSB_CC_ID_ID_MASK (0xFFFF << 16)
+#define _PSB_CC_ID_CONFIG_SHIFT (0)
+#define _PSB_CC_ID_CONFIG_MASK (0xFFFF << 0)
+
+#define PSB_CR_CORE_REVISION 0x0014
+#define _PSB_CC_REVISION_DESIGNER_SHIFT (24)
+#define _PSB_CC_REVISION_DESIGNER_MASK (0xFF << 24)
+#define _PSB_CC_REVISION_MAJOR_SHIFT (16)
+#define _PSB_CC_REVISION_MAJOR_MASK (0xFF << 16)
+#define _PSB_CC_REVISION_MINOR_SHIFT (8)
+#define _PSB_CC_REVISION_MINOR_MASK (0xFF << 8)
+#define _PSB_CC_REVISION_MAINTENANCE_SHIFT (0)
+#define _PSB_CC_REVISION_MAINTENANCE_MASK (0xFF << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD1 0x0018
+
+#define PSB_CR_SOFT_RESET 0x0080
+#define _PSB_CS_RESET_TSP_RESET (1 << 6)
+#define _PSB_CS_RESET_ISP_RESET (1 << 5)
+#define _PSB_CS_RESET_USE_RESET (1 << 4)
+#define _PSB_CS_RESET_TA_RESET (1 << 3)
+#define _PSB_CS_RESET_DPM_RESET (1 << 2)
+#define _PSB_CS_RESET_TWOD_RESET (1 << 1)
+#define _PSB_CS_RESET_BIF_RESET (1 << 0)
+
+#define PSB_CR_DESIGNER_REV_FIELD2 0x001C
+
+#define PSB_CR_EVENT_HOST_ENABLE2 0x0110
+
+#define PSB_CR_EVENT_STATUS2 0x0118
+
+#define PSB_CR_EVENT_HOST_CLEAR2 0x0114
+#define _PSB_CE2_BIF_REQUESTER_FAULT (1 << 4)
+
+#define PSB_CR_EVENT_STATUS 0x012C
+
+#define PSB_CR_EVENT_HOST_ENABLE 0x0130
+
+#define PSB_CR_EVENT_HOST_CLEAR 0x0134
+#define _PSB_CE_MASTER_INTERRUPT (1 << 31)
+#define _PSB_CE_TA_DPM_FAULT (1 << 28)
+#define _PSB_CE_TWOD_COMPLETE (1 << 27)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS (1 << 25)
+#define _PSB_CE_DPM_TA_MEM_FREE (1 << 24)
+#define _PSB_CE_PIXELBE_END_RENDER (1 << 18)
+#define _PSB_CE_SW_EVENT (1 << 14)
+#define _PSB_CE_TA_FINISHED (1 << 13)
+#define _PSB_CE_TA_TERMINATE (1 << 12)
+#define _PSB_CE_DPM_REACHED_MEM_THRESH (1 << 3)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL (1 << 2)
+#define _PSB_CE_DPM_OUT_OF_MEMORY_MT (1 << 1)
+#define _PSB_CE_DPM_3D_MEM_FREE (1 << 0)
+
+
+#define PSB_USE_OFFSET_MASK 0x0007FFFF
+#define PSB_USE_OFFSET_SIZE (PSB_USE_OFFSET_MASK + 1)
+#define PSB_CR_USE_CODE_BASE0 0x0A0C
+#define PSB_CR_USE_CODE_BASE1 0x0A10
+#define PSB_CR_USE_CODE_BASE2 0x0A14
+#define PSB_CR_USE_CODE_BASE3 0x0A18
+#define PSB_CR_USE_CODE_BASE4 0x0A1C
+#define PSB_CR_USE_CODE_BASE5 0x0A20
+#define PSB_CR_USE_CODE_BASE6 0x0A24
+#define PSB_CR_USE_CODE_BASE7 0x0A28
+#define PSB_CR_USE_CODE_BASE8 0x0A2C
+#define PSB_CR_USE_CODE_BASE9 0x0A30
+#define PSB_CR_USE_CODE_BASE10 0x0A34
+#define PSB_CR_USE_CODE_BASE11 0x0A38
+#define PSB_CR_USE_CODE_BASE12 0x0A3C
+#define PSB_CR_USE_CODE_BASE13 0x0A40
+#define PSB_CR_USE_CODE_BASE14 0x0A44
+#define PSB_CR_USE_CODE_BASE15 0x0A48
+#define PSB_CR_USE_CODE_BASE(_i) (0x0A0C + ((_i) << 2))
+#define _PSB_CUC_BASE_DM_SHIFT (25)
+#define _PSB_CUC_BASE_DM_MASK (0x3 << 25)
+#define _PSB_CUC_BASE_ADDR_SHIFT (0) /* 1024-bit aligned address? */
+#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT (7)
+#define _PSB_CUC_BASE_ADDR_MASK (0x1FFFFFF << 0)
+#define _PSB_CUC_DM_VERTEX (0)
+#define _PSB_CUC_DM_PIXEL (1)
+#define _PSB_CUC_DM_RESERVED (2)
+#define _PSB_CUC_DM_EDM (3)
+
+#define PSB_CR_PDS_EXEC_BASE 0x0AB8
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT (20) /* 1MB aligned address */
+#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT (20)
+
+#define PSB_CR_EVENT_KICKER 0x0AC4
+#define _PSB_CE_KICKER_ADDRESS_SHIFT (4) /* 128-bit aligned address */
+
+#define PSB_CR_EVENT_KICK 0x0AC8
+#define _PSB_CE_KICK_NOW (1 << 0)
+
+#define PSB_CR_BIF_DIR_LIST_BASE1 0x0C38
+
+#define PSB_CR_BIF_CTRL 0x0C00
+#define _PSB_CB_CTRL_CLEAR_FAULT (1 << 4)
+#define _PSB_CB_CTRL_INVALDC (1 << 3)
+#define _PSB_CB_CTRL_FLUSH (1 << 2)
+
+#define PSB_CR_BIF_INT_STAT 0x0C04
+
+#define PSB_CR_BIF_FAULT 0x0C08
+#define _PSB_CBI_STAT_PF_N_RW (1 << 14)
+#define _PSB_CBI_STAT_FAULT_SHIFT (0)
+#define _PSB_CBI_STAT_FAULT_MASK (0x3FFF << 0)
+#define _PSB_CBI_STAT_FAULT_CACHE (1 << 1)
+#define _PSB_CBI_STAT_FAULT_TA (1 << 2)
+#define _PSB_CBI_STAT_FAULT_VDM (1 << 3)
+#define _PSB_CBI_STAT_FAULT_2D (1 << 4)
+#define _PSB_CBI_STAT_FAULT_PBE (1 << 5)
+#define _PSB_CBI_STAT_FAULT_TSP (1 << 6)
+#define _PSB_CBI_STAT_FAULT_ISP (1 << 7)
+#define _PSB_CBI_STAT_FAULT_USSEPDS (1 << 8)
+#define _PSB_CBI_STAT_FAULT_HOST (1 << 9)
+
+#define PSB_CR_BIF_BANK0 0x0C78
+#define PSB_CR_BIF_BANK1 0x0C7C
+#define PSB_CR_BIF_DIR_LIST_BASE0 0x0C84
+#define PSB_CR_BIF_TWOD_REQ_BASE 0x0C88
+#define PSB_CR_BIF_3D_REQ_BASE 0x0CAC
+
+#define PSB_CR_2D_SOCIF 0x0E18
+#define _PSB_C2_SOCIF_FREESPACE_SHIFT (0)
+#define _PSB_C2_SOCIF_FREESPACE_MASK (0xFF << 0)
+#define _PSB_C2_SOCIF_EMPTY (0x80 << 0)
+
+#define PSB_CR_2D_BLIT_STATUS 0x0E04
+#define _PSB_C2B_STATUS_BUSY (1 << 24)
+#define _PSB_C2B_STATUS_COMPLETE_SHIFT (0)
+#define _PSB_C2B_STATUS_COMPLETE_MASK (0xFFFFFF << 0)
/*
* 2D defs.
@@ -186,121 +182,121 @@
* 2D Slave Port Data : Block Header's Object Type
*/
-#define PSB_2D_CLIP_BH (0x00000000)
-#define PSB_2D_PAT_BH (0x10000000)
-#define PSB_2D_CTRL_BH (0x20000000)
-#define PSB_2D_SRC_OFF_BH (0x30000000)
-#define PSB_2D_MASK_OFF_BH (0x40000000)
-#define PSB_2D_RESERVED1_BH (0x50000000)
-#define PSB_2D_RESERVED2_BH (0x60000000)
-#define PSB_2D_FENCE_BH (0x70000000)
-#define PSB_2D_BLIT_BH (0x80000000)
-#define PSB_2D_SRC_SURF_BH (0x90000000)
-#define PSB_2D_DST_SURF_BH (0xA0000000)
-#define PSB_2D_PAT_SURF_BH (0xB0000000)
-#define PSB_2D_SRC_PAL_BH (0xC0000000)
-#define PSB_2D_PAT_PAL_BH (0xD0000000)
-#define PSB_2D_MASK_SURF_BH (0xE0000000)
-#define PSB_2D_FLUSH_BH (0xF0000000)
+#define PSB_2D_CLIP_BH (0x00000000)
+#define PSB_2D_PAT_BH (0x10000000)
+#define PSB_2D_CTRL_BH (0x20000000)
+#define PSB_2D_SRC_OFF_BH (0x30000000)
+#define PSB_2D_MASK_OFF_BH (0x40000000)
+#define PSB_2D_RESERVED1_BH (0x50000000)
+#define PSB_2D_RESERVED2_BH (0x60000000)
+#define PSB_2D_FENCE_BH (0x70000000)
+#define PSB_2D_BLIT_BH (0x80000000)
+#define PSB_2D_SRC_SURF_BH (0x90000000)
+#define PSB_2D_DST_SURF_BH (0xA0000000)
+#define PSB_2D_PAT_SURF_BH (0xB0000000)
+#define PSB_2D_SRC_PAL_BH (0xC0000000)
+#define PSB_2D_PAT_PAL_BH (0xD0000000)
+#define PSB_2D_MASK_SURF_BH (0xE0000000)
+#define PSB_2D_FLUSH_BH (0xF0000000)
/*
* Clip Definition block (PSB_2D_CLIP_BH)
*/
-#define PSB_2D_CLIPCOUNT_MAX (1)
-#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
-#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
-#define PSB_2D_CLIPCOUNT_SHIFT (0)
+#define PSB_2D_CLIPCOUNT_MAX (1)
+#define PSB_2D_CLIPCOUNT_MASK (0x00000000)
+#define PSB_2D_CLIPCOUNT_CLRMASK (0xFFFFFFFF)
+#define PSB_2D_CLIPCOUNT_SHIFT (0)
/* clip rectangle min & max */
-#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
-#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
-#define PSB_2D_CLIP_XMAX_SHIFT (12)
-#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
-#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
-#define PSB_2D_CLIP_XMIN_SHIFT (0)
+#define PSB_2D_CLIP_XMAX_MASK (0x00FFF000)
+#define PSB_2D_CLIP_XMAX_CLRMASK (0xFF000FFF)
+#define PSB_2D_CLIP_XMAX_SHIFT (12)
+#define PSB_2D_CLIP_XMIN_MASK (0x00000FFF)
+#define PSB_2D_CLIP_XMIN_CLRMASK (0x00FFF000)
+#define PSB_2D_CLIP_XMIN_SHIFT (0)
/* clip rectangle offset */
-#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
-#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
-#define PSB_2D_CLIP_YMAX_SHIFT (12)
-#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
-#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
-#define PSB_2D_CLIP_YMIN_SHIFT (0)
+#define PSB_2D_CLIP_YMAX_MASK (0x00FFF000)
+#define PSB_2D_CLIP_YMAX_CLRMASK (0xFF000FFF)
+#define PSB_2D_CLIP_YMAX_SHIFT (12)
+#define PSB_2D_CLIP_YMIN_MASK (0x00000FFF)
+#define PSB_2D_CLIP_YMIN_CLRMASK (0x00FFF000)
+#define PSB_2D_CLIP_YMIN_SHIFT (0)
/*
* Pattern Control (PSB_2D_PAT_BH)
*/
-#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
-#define PSB_2D_PAT_HEIGHT_SHIFT (0)
-#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
-#define PSB_2D_PAT_WIDTH_SHIFT (5)
-#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
-#define PSB_2D_PAT_YSTART_SHIFT (10)
-#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
-#define PSB_2D_PAT_XSTART_SHIFT (15)
+#define PSB_2D_PAT_HEIGHT_MASK (0x0000001F)
+#define PSB_2D_PAT_HEIGHT_SHIFT (0)
+#define PSB_2D_PAT_WIDTH_MASK (0x000003E0)
+#define PSB_2D_PAT_WIDTH_SHIFT (5)
+#define PSB_2D_PAT_YSTART_MASK (0x00007C00)
+#define PSB_2D_PAT_YSTART_SHIFT (10)
+#define PSB_2D_PAT_XSTART_MASK (0x000F8000)
+#define PSB_2D_PAT_XSTART_SHIFT (15)
/*
* 2D Control block (PSB_2D_CTRL_BH)
*/
/* Present Flags */
-#define PSB_2D_SRCCK_CTRL (0x00000001)
-#define PSB_2D_DSTCK_CTRL (0x00000002)
-#define PSB_2D_ALPHA_CTRL (0x00000004)
+#define PSB_2D_SRCCK_CTRL (0x00000001)
+#define PSB_2D_DSTCK_CTRL (0x00000002)
+#define PSB_2D_ALPHA_CTRL (0x00000004)
/* Colour Key Colour (SRC/DST)*/
-#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
-#define PSB_2D_CK_COL_CLRMASK (0x00000000)
-#define PSB_2D_CK_COL_SHIFT (0)
+#define PSB_2D_CK_COL_MASK (0xFFFFFFFF)
+#define PSB_2D_CK_COL_CLRMASK (0x00000000)
+#define PSB_2D_CK_COL_SHIFT (0)
/* Colour Key Mask (SRC/DST)*/
-#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
-#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
-#define PSB_2D_CK_MASK_SHIFT (0)
+#define PSB_2D_CK_MASK_MASK (0xFFFFFFFF)
+#define PSB_2D_CK_MASK_CLRMASK (0x00000000)
+#define PSB_2D_CK_MASK_SHIFT (0)
/* Alpha Control (Alpha/RGB)*/
-#define PSB_2D_GBLALPHA_MASK (0x000FF000)
-#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
-#define PSB_2D_GBLALPHA_SHIFT (12)
-#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
-#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
-#define PSB_2D_SRCALPHA_OP_SHIFT (20)
-#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
-#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
-#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
-#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
-#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
-#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
-#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
-#define PSB_2D_SRCALPHA_INVERT (0x00800000)
-#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
-#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
-#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
-#define PSB_2D_DSTALPHA_OP_SHIFT (24)
-#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
-#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
-#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
-#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
-#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
-#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
-#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
-#define PSB_2D_DSTALPHA_INVERT (0x08000000)
-#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
-
-#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
-#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
-#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
-#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
+#define PSB_2D_GBLALPHA_MASK (0x000FF000)
+#define PSB_2D_GBLALPHA_CLRMASK (0xFFF00FFF)
+#define PSB_2D_GBLALPHA_SHIFT (12)
+#define PSB_2D_SRCALPHA_OP_MASK (0x00700000)
+#define PSB_2D_SRCALPHA_OP_CLRMASK (0xFF8FFFFF)
+#define PSB_2D_SRCALPHA_OP_SHIFT (20)
+#define PSB_2D_SRCALPHA_OP_ONE (0x00000000)
+#define PSB_2D_SRCALPHA_OP_SRC (0x00100000)
+#define PSB_2D_SRCALPHA_OP_DST (0x00200000)
+#define PSB_2D_SRCALPHA_OP_SG (0x00300000)
+#define PSB_2D_SRCALPHA_OP_DG (0x00400000)
+#define PSB_2D_SRCALPHA_OP_GBL (0x00500000)
+#define PSB_2D_SRCALPHA_OP_ZERO (0x00600000)
+#define PSB_2D_SRCALPHA_INVERT (0x00800000)
+#define PSB_2D_SRCALPHA_INVERT_CLR (0xFF7FFFFF)
+#define PSB_2D_DSTALPHA_OP_MASK (0x07000000)
+#define PSB_2D_DSTALPHA_OP_CLRMASK (0xF8FFFFFF)
+#define PSB_2D_DSTALPHA_OP_SHIFT (24)
+#define PSB_2D_DSTALPHA_OP_ONE (0x00000000)
+#define PSB_2D_DSTALPHA_OP_SRC (0x01000000)
+#define PSB_2D_DSTALPHA_OP_DST (0x02000000)
+#define PSB_2D_DSTALPHA_OP_SG (0x03000000)
+#define PSB_2D_DSTALPHA_OP_DG (0x04000000)
+#define PSB_2D_DSTALPHA_OP_GBL (0x05000000)
+#define PSB_2D_DSTALPHA_OP_ZERO (0x06000000)
+#define PSB_2D_DSTALPHA_INVERT (0x08000000)
+#define PSB_2D_DSTALPHA_INVERT_CLR (0xF7FFFFFF)
+
+#define PSB_2D_PRE_MULTIPLICATION_ENABLE (0x10000000)
+#define PSB_2D_PRE_MULTIPLICATION_CLRMASK (0xEFFFFFFF)
+#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE (0x20000000)
+#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK (0xDFFFFFFF)
/*
*Source Offset (PSB_2D_SRC_OFF_BH)
*/
-#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
-#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
-#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
-#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
+#define PSB_2D_SRCOFF_XSTART_MASK ((0x00000FFF) << 12)
+#define PSB_2D_SRCOFF_XSTART_SHIFT (12)
+#define PSB_2D_SRCOFF_YSTART_MASK (0x00000FFF)
+#define PSB_2D_SRCOFF_YSTART_SHIFT (0)
/*
* Mask Offset (PSB_2D_MASK_OFF_BH)
*/
-#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
-#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
-#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
-#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
+#define PSB_2D_MASKOFF_XSTART_MASK ((0x00000FFF) << 12)
+#define PSB_2D_MASKOFF_XSTART_SHIFT (12)
+#define PSB_2D_MASKOFF_YSTART_MASK (0x00000FFF)
+#define PSB_2D_MASKOFF_YSTART_SHIFT (0)
/*
* 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
@@ -310,118 +306,118 @@
*Blit Rectangle (PSB_2D_BLIT_BH)
*/
-#define PSB_2D_ROT_MASK (3<<25)
-#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
-#define PSB_2D_ROT_NONE (0<<25)
-#define PSB_2D_ROT_90DEGS (1<<25)
-#define PSB_2D_ROT_180DEGS (2<<25)
-#define PSB_2D_ROT_270DEGS (3<<25)
-
-#define PSB_2D_COPYORDER_MASK (3<<23)
-#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
-#define PSB_2D_COPYORDER_TL2BR (0<<23)
-#define PSB_2D_COPYORDER_BR2TL (1<<23)
-#define PSB_2D_COPYORDER_TR2BL (2<<23)
-#define PSB_2D_COPYORDER_BL2TR (3<<23)
-
-#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
-#define PSB_2D_DSTCK_DISABLE (0x00000000)
-#define PSB_2D_DSTCK_PASS (0x00200000)
-#define PSB_2D_DSTCK_REJECT (0x00400000)
-
-#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
-#define PSB_2D_SRCCK_DISABLE (0x00000000)
-#define PSB_2D_SRCCK_PASS (0x00080000)
-#define PSB_2D_SRCCK_REJECT (0x00100000)
-
-#define PSB_2D_CLIP_ENABLE (0x00040000)
-
-#define PSB_2D_ALPHA_ENABLE (0x00020000)
-
-#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
-#define PSB_2D_PAT_MASK (0x00010000)
-#define PSB_2D_USE_PAT (0x00010000)
-#define PSB_2D_USE_FILL (0x00000000)
+#define PSB_2D_ROT_MASK (3 << 25)
+#define PSB_2D_ROT_CLRMASK (~PSB_2D_ROT_MASK)
+#define PSB_2D_ROT_NONE (0 << 25)
+#define PSB_2D_ROT_90DEGS (1 << 25)
+#define PSB_2D_ROT_180DEGS (2 << 25)
+#define PSB_2D_ROT_270DEGS (3 << 25)
+
+#define PSB_2D_COPYORDER_MASK (3 << 23)
+#define PSB_2D_COPYORDER_CLRMASK (~PSB_2D_COPYORDER_MASK)
+#define PSB_2D_COPYORDER_TL2BR (0 << 23)
+#define PSB_2D_COPYORDER_BR2TL (1 << 23)
+#define PSB_2D_COPYORDER_TR2BL (2 << 23)
+#define PSB_2D_COPYORDER_BL2TR (3 << 23)
+
+#define PSB_2D_DSTCK_CLRMASK (0xFF9FFFFF)
+#define PSB_2D_DSTCK_DISABLE (0x00000000)
+#define PSB_2D_DSTCK_PASS (0x00200000)
+#define PSB_2D_DSTCK_REJECT (0x00400000)
+
+#define PSB_2D_SRCCK_CLRMASK (0xFFE7FFFF)
+#define PSB_2D_SRCCK_DISABLE (0x00000000)
+#define PSB_2D_SRCCK_PASS (0x00080000)
+#define PSB_2D_SRCCK_REJECT (0x00100000)
+
+#define PSB_2D_CLIP_ENABLE (0x00040000)
+
+#define PSB_2D_ALPHA_ENABLE (0x00020000)
+
+#define PSB_2D_PAT_CLRMASK (0xFFFEFFFF)
+#define PSB_2D_PAT_MASK (0x00010000)
+#define PSB_2D_USE_PAT (0x00010000)
+#define PSB_2D_USE_FILL (0x00000000)
/*
* Tungsten Graphics note on rop codes: If rop A and rop B are
* identical, the mask surface will not be read and need not be
* set up.
*/
-#define PSB_2D_ROP3B_MASK (0x0000FF00)
-#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
-#define PSB_2D_ROP3B_SHIFT (8)
+#define PSB_2D_ROP3B_MASK (0x0000FF00)
+#define PSB_2D_ROP3B_CLRMASK (0xFFFF00FF)
+#define PSB_2D_ROP3B_SHIFT (8)
/* rop code A */
-#define PSB_2D_ROP3A_MASK (0x000000FF)
-#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
-#define PSB_2D_ROP3A_SHIFT (0)
+#define PSB_2D_ROP3A_MASK (0x000000FF)
+#define PSB_2D_ROP3A_CLRMASK (0xFFFFFF00)
+#define PSB_2D_ROP3A_SHIFT (0)
-#define PSB_2D_ROP4_MASK (0x0000FFFF)
+#define PSB_2D_ROP4_MASK (0x0000FFFF)
/*
* DWORD0: (Only pass if Pattern control == Use Fill Colour)
* Fill Colour RGBA8888
*/
-#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
-#define PSB_2D_FILLCOLOUR_SHIFT (0)
+#define PSB_2D_FILLCOLOUR_MASK (0xFFFFFFFF)
+#define PSB_2D_FILLCOLOUR_SHIFT (0)
/*
* DWORD1: (Always Present)
* X Start (Dest)
* Y Start (Dest)
*/
-#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
-#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
-#define PSB_2D_DST_XSTART_SHIFT (12)
-#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
-#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
-#define PSB_2D_DST_YSTART_SHIFT (0)
+#define PSB_2D_DST_XSTART_MASK (0x00FFF000)
+#define PSB_2D_DST_XSTART_CLRMASK (0xFF000FFF)
+#define PSB_2D_DST_XSTART_SHIFT (12)
+#define PSB_2D_DST_YSTART_MASK (0x00000FFF)
+#define PSB_2D_DST_YSTART_CLRMASK (0xFFFFF000)
+#define PSB_2D_DST_YSTART_SHIFT (0)
/*
* DWORD2: (Always Present)
* X Size (Dest)
* Y Size (Dest)
*/
-#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
-#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
-#define PSB_2D_DST_XSIZE_SHIFT (12)
-#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
-#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
-#define PSB_2D_DST_YSIZE_SHIFT (0)
+#define PSB_2D_DST_XSIZE_MASK (0x00FFF000)
+#define PSB_2D_DST_XSIZE_CLRMASK (0xFF000FFF)
+#define PSB_2D_DST_XSIZE_SHIFT (12)
+#define PSB_2D_DST_YSIZE_MASK (0x00000FFF)
+#define PSB_2D_DST_YSIZE_CLRMASK (0xFFFFF000)
+#define PSB_2D_DST_YSIZE_SHIFT (0)
/*
* Source Surface (PSB_2D_SRC_SURF_BH)
*/
/*
- * WORD 0
+ * WORD 0
*/
-#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
-#define PSB_2D_SRC_1_PAL (0x00000000)
-#define PSB_2D_SRC_2_PAL (0x00008000)
-#define PSB_2D_SRC_4_PAL (0x00010000)
-#define PSB_2D_SRC_8_PAL (0x00018000)
-#define PSB_2D_SRC_8_ALPHA (0x00020000)
-#define PSB_2D_SRC_4_ALPHA (0x00028000)
-#define PSB_2D_SRC_332RGB (0x00030000)
-#define PSB_2D_SRC_4444ARGB (0x00038000)
-#define PSB_2D_SRC_555RGB (0x00040000)
-#define PSB_2D_SRC_1555ARGB (0x00048000)
-#define PSB_2D_SRC_565RGB (0x00050000)
-#define PSB_2D_SRC_0888ARGB (0x00058000)
-#define PSB_2D_SRC_8888ARGB (0x00060000)
-#define PSB_2D_SRC_8888UYVY (0x00068000)
-#define PSB_2D_SRC_RESERVED (0x00070000)
-#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
-
-
-#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
-#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
-#define PSB_2D_SRC_STRIDE_SHIFT (0)
+#define PSB_2D_SRC_FORMAT_MASK (0x00078000)
+#define PSB_2D_SRC_1_PAL (0x00000000)
+#define PSB_2D_SRC_2_PAL (0x00008000)
+#define PSB_2D_SRC_4_PAL (0x00010000)
+#define PSB_2D_SRC_8_PAL (0x00018000)
+#define PSB_2D_SRC_8_ALPHA (0x00020000)
+#define PSB_2D_SRC_4_ALPHA (0x00028000)
+#define PSB_2D_SRC_332RGB (0x00030000)
+#define PSB_2D_SRC_4444ARGB (0x00038000)
+#define PSB_2D_SRC_555RGB (0x00040000)
+#define PSB_2D_SRC_1555ARGB (0x00048000)
+#define PSB_2D_SRC_565RGB (0x00050000)
+#define PSB_2D_SRC_0888ARGB (0x00058000)
+#define PSB_2D_SRC_8888ARGB (0x00060000)
+#define PSB_2D_SRC_8888UYVY (0x00068000)
+#define PSB_2D_SRC_RESERVED (0x00070000)
+#define PSB_2D_SRC_1555ARGB_LOOKUP (0x00078000)
+
+
+#define PSB_2D_SRC_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_SRC_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_SRC_STRIDE_SHIFT (0)
/*
* WORD 1 - Base Address
*/
-#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
-#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
-#define PSB_2D_SRC_ADDR_SHIFT (2)
-#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
+#define PSB_2D_SRC_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_SRC_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_SRC_ADDR_SHIFT (2)
+#define PSB_2D_SRC_ADDR_ALIGNSHIFT (2)
/*
* Pattern Surface (PSB_2D_PAT_SURF_BH)
@@ -430,31 +426,31 @@
* WORD 0
*/
-#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
-#define PSB_2D_PAT_1_PAL (0x00000000)
-#define PSB_2D_PAT_2_PAL (0x00008000)
-#define PSB_2D_PAT_4_PAL (0x00010000)
-#define PSB_2D_PAT_8_PAL (0x00018000)
-#define PSB_2D_PAT_8_ALPHA (0x00020000)
-#define PSB_2D_PAT_4_ALPHA (0x00028000)
-#define PSB_2D_PAT_332RGB (0x00030000)
-#define PSB_2D_PAT_4444ARGB (0x00038000)
-#define PSB_2D_PAT_555RGB (0x00040000)
-#define PSB_2D_PAT_1555ARGB (0x00048000)
-#define PSB_2D_PAT_565RGB (0x00050000)
-#define PSB_2D_PAT_0888ARGB (0x00058000)
-#define PSB_2D_PAT_8888ARGB (0x00060000)
-
-#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
-#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
-#define PSB_2D_PAT_STRIDE_SHIFT (0)
+#define PSB_2D_PAT_FORMAT_MASK (0x00078000)
+#define PSB_2D_PAT_1_PAL (0x00000000)
+#define PSB_2D_PAT_2_PAL (0x00008000)
+#define PSB_2D_PAT_4_PAL (0x00010000)
+#define PSB_2D_PAT_8_PAL (0x00018000)
+#define PSB_2D_PAT_8_ALPHA (0x00020000)
+#define PSB_2D_PAT_4_ALPHA (0x00028000)
+#define PSB_2D_PAT_332RGB (0x00030000)
+#define PSB_2D_PAT_4444ARGB (0x00038000)
+#define PSB_2D_PAT_555RGB (0x00040000)
+#define PSB_2D_PAT_1555ARGB (0x00048000)
+#define PSB_2D_PAT_565RGB (0x00050000)
+#define PSB_2D_PAT_0888ARGB (0x00058000)
+#define PSB_2D_PAT_8888ARGB (0x00060000)
+
+#define PSB_2D_PAT_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_PAT_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_PAT_STRIDE_SHIFT (0)
/*
* WORD 1 - Base Address
*/
-#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
-#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
-#define PSB_2D_PAT_ADDR_SHIFT (2)
-#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
+#define PSB_2D_PAT_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_PAT_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_PAT_ADDR_SHIFT (2)
+#define PSB_2D_PAT_ADDR_ALIGNSHIFT (2)
/*
* Destination Surface (PSB_2D_DST_SURF_BH)
@@ -463,26 +459,26 @@
* WORD 0
*/
-#define PSB_2D_DST_FORMAT_MASK (0x00078000)
-#define PSB_2D_DST_332RGB (0x00030000)
-#define PSB_2D_DST_4444ARGB (0x00038000)
-#define PSB_2D_DST_555RGB (0x00040000)
-#define PSB_2D_DST_1555ARGB (0x00048000)
-#define PSB_2D_DST_565RGB (0x00050000)
-#define PSB_2D_DST_0888ARGB (0x00058000)
-#define PSB_2D_DST_8888ARGB (0x00060000)
-#define PSB_2D_DST_8888AYUV (0x00070000)
-
-#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
-#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
-#define PSB_2D_DST_STRIDE_SHIFT (0)
+#define PSB_2D_DST_FORMAT_MASK (0x00078000)
+#define PSB_2D_DST_332RGB (0x00030000)
+#define PSB_2D_DST_4444ARGB (0x00038000)
+#define PSB_2D_DST_555RGB (0x00040000)
+#define PSB_2D_DST_1555ARGB (0x00048000)
+#define PSB_2D_DST_565RGB (0x00050000)
+#define PSB_2D_DST_0888ARGB (0x00058000)
+#define PSB_2D_DST_8888ARGB (0x00060000)
+#define PSB_2D_DST_8888AYUV (0x00070000)
+
+#define PSB_2D_DST_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_DST_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_DST_STRIDE_SHIFT (0)
/*
* WORD 1 - Base Address
*/
-#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
-#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
-#define PSB_2D_DST_ADDR_SHIFT (2)
-#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
+#define PSB_2D_DST_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_DST_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_DST_ADDR_SHIFT (2)
+#define PSB_2D_DST_ADDR_ALIGNSHIFT (2)
/*
* Mask Surface (PSB_2D_MASK_SURF_BH)
@@ -490,99 +486,97 @@
/*
* WORD 0
*/
-#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
-#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
-#define PSB_2D_MASK_STRIDE_SHIFT (0)
+#define PSB_2D_MASK_STRIDE_MASK (0x00007FFF)
+#define PSB_2D_MASK_STRIDE_CLRMASK (0xFFFF8000)
+#define PSB_2D_MASK_STRIDE_SHIFT (0)
/*
* WORD 1 - Base Address
*/
-#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
-#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
-#define PSB_2D_MASK_ADDR_SHIFT (2)
-#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
+#define PSB_2D_MASK_ADDR_MASK (0x0FFFFFFC)
+#define PSB_2D_MASK_ADDR_CLRMASK (0x00000003)
+#define PSB_2D_MASK_ADDR_SHIFT (2)
+#define PSB_2D_MASK_ADDR_ALIGNSHIFT (2)
/*
* Source Palette (PSB_2D_SRC_PAL_BH)
*/
-#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
-#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
-#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
-#define PSB_2D_SRCPAL_BYTEALIGN (1024)
+#define PSB_2D_SRCPAL_ADDR_SHIFT (0)
+#define PSB_2D_SRCPAL_ADDR_CLRMASK (0xF0000007)
+#define PSB_2D_SRCPAL_ADDR_MASK (0x0FFFFFF8)
+#define PSB_2D_SRCPAL_BYTEALIGN (1024)
/*
* Pattern Palette (PSB_2D_PAT_PAL_BH)
*/
-#define PSB_2D_PATPAL_ADDR_SHIFT (0)
-#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
-#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
-#define PSB_2D_PATPAL_BYTEALIGN (1024)
+#define PSB_2D_PATPAL_ADDR_SHIFT (0)
+#define PSB_2D_PATPAL_ADDR_CLRMASK (0xF0000007)
+#define PSB_2D_PATPAL_ADDR_MASK (0x0FFFFFF8)
+#define PSB_2D_PATPAL_BYTEALIGN (1024)
/*
* Rop3 Codes (2 LS bytes)
*/
-#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
-#define PSB_2D_ROP3_PATCOPY (0xF0F0)
-#define PSB_2D_ROP3_WHITENESS (0xFFFF)
-#define PSB_2D_ROP3_BLACKNESS (0x0000)
-#define PSB_2D_ROP3_SRC (0xCC)
-#define PSB_2D_ROP3_PAT (0xF0)
-#define PSB_2D_ROP3_DST (0xAA)
-
+#define PSB_2D_ROP3_SRCCOPY (0xCCCC)
+#define PSB_2D_ROP3_PATCOPY (0xF0F0)
+#define PSB_2D_ROP3_WHITENESS (0xFFFF)
+#define PSB_2D_ROP3_BLACKNESS (0x0000)
+#define PSB_2D_ROP3_SRC (0xCC)
+#define PSB_2D_ROP3_PAT (0xF0)
+#define PSB_2D_ROP3_DST (0xAA)
/*
* Sizes.
*/
-#define PSB_SCENE_HW_COOKIE_SIZE 16
-#define PSB_TA_MEM_HW_COOKIE_SIZE 16
+#define PSB_SCENE_HW_COOKIE_SIZE 16
+#define PSB_TA_MEM_HW_COOKIE_SIZE 16
/*
* Scene stuff.
*/
-#define PSB_NUM_HW_SCENES 2
+#define PSB_NUM_HW_SCENES 2
/*
* Scheduler completion actions.
*/
-#define PSB_RASTER_BLOCK 0
-#define PSB_RASTER 1
-#define PSB_RETURN 2
-#define PSB_TA 3
-
-
-/*Power management*/
-#define PSB_PUNIT_PORT 0x04
-#define PSB_OSPMBA 0x78
-#define PSB_APMBA 0x7a
-#define PSB_APM_CMD 0x0
-#define PSB_APM_STS 0x04
-#define PSB_PWRGT_VID_ENC_MASK 0x30
-#define PSB_PWRGT_VID_DEC_MASK 0xc
-#define PSB_PWRGT_GL3_MASK 0xc0
-
-#define PSB_PM_SSC 0x20
-#define PSB_PM_SSS 0x30
-#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
-#define MDFLD_PWRGT_DISPLAY_A_CNTR 0x0000000c
-#define MDFLD_PWRGT_DISPLAY_B_CNTR 0x0000c000
-#define MDFLD_PWRGT_DISPLAY_C_CNTR 0x00030000
-#define MDFLD_PWRGT_DISP_MIPI_CNTR 0x000c0000
-#define MDFLD_PWRGT_DISPLAY_CNTR (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR)// 0x000fc00c
-// Display SSS register bits are different in A0 vs. B0
-#define PSB_PWRGT_GFX_MASK 0x3
-#define MDFLD_PWRGT_DISPLAY_A_STS 0x000000c0
-#define MDFLD_PWRGT_DISPLAY_B_STS 0x00000300
-#define MDFLD_PWRGT_DISPLAY_C_STS 0x00000c00
-#define PSB_PWRGT_GFX_MASK_B0 0xc3
+#define PSB_RASTER_BLOCK 0
+#define PSB_RASTER 1
+#define PSB_RETURN 2
+#define PSB_TA 3
+
+/* Power management */
+#define PSB_PUNIT_PORT 0x04
+#define PSB_OSPMBA 0x78
+#define PSB_APMBA 0x7a
+#define PSB_APM_CMD 0x0
+#define PSB_APM_STS 0x04
+#define PSB_PWRGT_VID_ENC_MASK 0x30
+#define PSB_PWRGT_VID_DEC_MASK 0xc
+#define PSB_PWRGT_GL3_MASK 0xc0
+
+#define PSB_PM_SSC 0x20
+#define PSB_PM_SSS 0x30
+#define PSB_PWRGT_DISPLAY_MASK 0xc /*on a different BA than video/gfx*/
+#define MDFLD_PWRGT_DISPLAY_A_CNTR 0x0000000c
+#define MDFLD_PWRGT_DISPLAY_B_CNTR 0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_CNTR 0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_CNTR 0x000c0000
+#define MDFLD_PWRGT_DISPLAY_CNTR (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
+/* Display SSS register bits are different in A0 vs. B0 */
+#define PSB_PWRGT_GFX_MASK 0x3
+#define MDFLD_PWRGT_DISPLAY_A_STS 0x000000c0
+#define MDFLD_PWRGT_DISPLAY_B_STS 0x00000300
+#define MDFLD_PWRGT_DISPLAY_C_STS 0x00000c00
+#define PSB_PWRGT_GFX_MASK_B0 0xc3
#define MDFLD_PWRGT_DISPLAY_A_STS_B0 0x0000000c
-#define MDFLD_PWRGT_DISPLAY_B_STS_B0 0x0000c000
-#define MDFLD_PWRGT_DISPLAY_C_STS_B0 0x00030000
-#define MDFLD_PWRGT_DISP_MIPI_STS 0x000c0000
-#define MDFLD_PWRGT_DISPLAY_STS_A0 (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS)// 0x000fc00c
-#define MDFLD_PWRGT_DISPLAY_STS_B0 (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS)// 0x000fc00c
+#define MDFLD_PWRGT_DISPLAY_B_STS_B0 0x0000c000
+#define MDFLD_PWRGT_DISPLAY_C_STS_B0 0x00030000
+#define MDFLD_PWRGT_DISP_MIPI_STS 0x000c0000
+#define MDFLD_PWRGT_DISPLAY_STS_A0 (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+#define MDFLD_PWRGT_DISPLAY_STS_B0 (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
#endif
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index 46daade7a9e..d286b222318 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -325,7 +325,7 @@ static int blkvsc_do_operation(struct block_device_context *blkdev,
page_buf = alloc_page(GFP_KERNEL);
if (!page_buf) {
- kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
+ kmem_cache_free(blkdev->request_pool, blkvsc_req);
return -ENOMEM;
}
@@ -422,7 +422,7 @@ cleanup:
__free_page(page_buf);
- kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
+ kmem_cache_free(blkdev->request_pool, blkvsc_req);
return ret;
}
@@ -518,22 +518,18 @@ static int blkvsc_remove(struct hv_device *dev)
blkvsc_do_operation(blkdev, DO_FLUSH);
- blk_cleanup_queue(blkdev->gd->queue);
+ if (blkdev->users == 0) {
+ del_gendisk(blkdev->gd);
+ put_disk(blkdev->gd);
+ blk_cleanup_queue(blkdev->gd->queue);
- /*
- * Call to the vsc driver to let it know that the device is being
- * removed
- */
- storvsc_dev_remove(dev);
-
- del_gendisk(blkdev->gd);
+ storvsc_dev_remove(blkdev->device_ctx);
- kmem_cache_destroy(blkdev->request_pool);
-
- kfree(blkdev);
+ kmem_cache_destroy(blkdev->request_pool);
+ kfree(blkdev);
+ }
return 0;
-
}
static void blkvsc_shutdown(struct hv_device *dev)
@@ -568,13 +564,23 @@ static int blkvsc_release(struct gendisk *disk, fmode_t mode)
struct block_device_context *blkdev = disk->private_data;
unsigned long flags;
- if (blkdev->users == 1) {
+ spin_lock_irqsave(&blkdev->lock, flags);
+
+ if ((--blkdev->users == 0) && (blkdev->shutting_down)) {
+ blk_stop_queue(blkdev->gd->queue);
+ spin_unlock_irqrestore(&blkdev->lock, flags);
+
blkvsc_do_operation(blkdev, DO_FLUSH);
- }
+ del_gendisk(blkdev->gd);
+ put_disk(blkdev->gd);
+ blk_cleanup_queue(blkdev->gd->queue);
- spin_lock_irqsave(&blkdev->lock, flags);
- blkdev->users--;
- spin_unlock_irqrestore(&blkdev->lock, flags);
+ storvsc_dev_remove(blkdev->device_ctx);
+
+ kmem_cache_destroy(blkdev->request_pool);
+ kfree(blkdev);
+ } else
+ spin_unlock_irqrestore(&blkdev->lock, flags);
return 0;
}
@@ -824,7 +830,6 @@ static int blkvsc_drv_init(void)
BUILD_BUG_ON(sizeof(sector_t) != 8);
memcpy(&drv->dev_type, &dev_type, sizeof(struct hv_guid));
- drv->name = drv_name;
drv->driver.name = drv_name;
/* The driver belongs to vmbus */
@@ -921,7 +926,6 @@ static int blkvsc_probe(struct hv_device *dev)
else
blkdev->gd->first_minor = 0;
blkdev->gd->fops = &block_ops;
- blkdev->gd->events = DISK_EVENT_MEDIA_CHANGE;
blkdev->gd->private_data = blkdev;
blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
sprintf(blkdev->gd->disk_name, "hd%c", 'a' + major_info.index);
diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c
index f655e59a9a8..455f47a891f 100644
--- a/drivers/staging/hv/channel.c
+++ b/drivers/staging/hv/channel.c
@@ -39,7 +39,6 @@ static int create_gpadl_header(
u32 size, /* page-size multiple */
struct vmbus_channel_msginfo **msginfo,
u32 *messagecount);
-static void dump_vmbus_channel(struct vmbus_channel *channel);
static void vmbus_setevent(struct vmbus_channel *channel);
/*
@@ -186,12 +185,12 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
openMsg = (struct vmbus_channel_open_channel *)openInfo->msg;
openMsg->header.msgtype = CHANNELMSG_OPENCHANNEL;
- openMsg->openid = newchannel->offermsg.child_relid; /* FIXME */
+ openMsg->openid = newchannel->offermsg.child_relid;
openMsg->child_relid = newchannel->offermsg.child_relid;
openMsg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
openMsg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
PAGE_SHIFT;
- openMsg->server_contextarea_gpadlhandle = 0; /* TODO */
+ openMsg->server_contextarea_gpadlhandle = 0;
if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
@@ -210,9 +209,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
sizeof(struct vmbus_channel_open_channel));
if (ret != 0)
- goto Cleanup;
+ goto cleanup;
- t = wait_for_completion_timeout(&openInfo->waitevent, HZ);
+ t = wait_for_completion_timeout(&openInfo->waitevent, 5*HZ);
if (t == 0) {
err = -ETIMEDOUT;
goto errorout;
@@ -222,7 +221,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
if (openInfo->response.open_result.status)
err = openInfo->response.open_result.status;
-Cleanup:
+cleanup:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&openInfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
@@ -356,20 +355,35 @@ static int create_gpadl_header(void *kbuffer, u32 size,
sizeof(struct vmbus_channel_gpadl_body) +
pfncurr * sizeof(u64);
msgbody = kzalloc(msgsize, GFP_KERNEL);
- /* FIXME: we probably need to more if this fails */
- if (!msgbody)
+
+ if (!msgbody) {
+ struct vmbus_channel_msginfo *pos = NULL;
+ struct vmbus_channel_msginfo *tmp = NULL;
+ /*
+ * Free up all the allocated messages.
+ */
+ list_for_each_entry_safe(pos, tmp,
+ &msgheader->submsglist,
+ msglistentry) {
+
+ list_del(&pos->msglistentry);
+ kfree(pos);
+ }
+
goto nomem;
+ }
+
msgbody->msgsize = msgsize;
(*messagecount)++;
gpadl_body =
(struct vmbus_channel_gpadl_body *)msgbody->msg;
/*
- * FIXME:
* Gpadl is u32 and we are using a pointer which could
* be 64-bit
+ * This is governed by the guest/host protocol and
+ * so the hypervisor gurantees that this is ok.
*/
- /* gpadl_body->Gpadl = kbuffer; */
for (i = 0; i < pfncurr; i++)
gpadl_body->pfn[i] = pfn + pfnsum + i;
@@ -458,12 +472,11 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
sizeof(*msginfo));
if (ret != 0)
- goto Cleanup;
+ goto cleanup;
if (msgcount > 1) {
list_for_each(curr, &msginfo->submsglist) {
- /* FIXME: should this use list_entry() instead ? */
submsginfo = (struct vmbus_channel_msginfo *)curr;
gpadl_body =
(struct vmbus_channel_gpadl_body *)submsginfo->msg;
@@ -478,18 +491,18 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
submsginfo->msgsize -
sizeof(*submsginfo));
if (ret != 0)
- goto Cleanup;
+ goto cleanup;
}
}
- t = wait_for_completion_timeout(&msginfo->waitevent, HZ);
+ t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
BUG_ON(t == 0);
/* At this point, we received the gpadl created msg */
*gpadl_handle = gpadlmsg->gpadl;
-Cleanup:
+cleanup:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
@@ -532,7 +545,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
sizeof(struct vmbus_channel_gpadl_teardown));
BUG_ON(ret != 0);
- t = wait_for_completion_timeout(&info->waitevent, HZ);
+ t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
BUG_ON(t == 0);
/* Received a torndown response */
@@ -551,24 +564,15 @@ EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
void vmbus_close(struct vmbus_channel *channel)
{
struct vmbus_channel_close_channel *msg;
- struct vmbus_channel_msginfo *info;
- unsigned long flags;
int ret;
/* Stop callback and cancel the timer asap */
channel->onchannel_callback = NULL;
- del_timer_sync(&channel->poll_timer);
/* Send a closing message */
- info = kmalloc(sizeof(*info) +
- sizeof(struct vmbus_channel_close_channel), GFP_KERNEL);
- /* FIXME: can't do anything other than return here because the
- * function is void */
- if (!info)
- return;
+ msg = &channel->close_msg.msg;
- msg = (struct vmbus_channel_close_channel *)info->msg;
msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
msg->child_relid = channel->offermsg.child_relid;
@@ -580,8 +584,6 @@ void vmbus_close(struct vmbus_channel *channel)
vmbus_teardown_gpadl(channel,
channel->ringbuffer_gpadlhandle);
- /* TODO: Send a msg to release the childRelId */
-
/* Cleanup the ring buffers for this channel */
hv_ringbuffer_cleanup(&channel->outbound);
hv_ringbuffer_cleanup(&channel->inbound);
@@ -589,21 +591,7 @@ void vmbus_close(struct vmbus_channel *channel)
free_pages((unsigned long)channel->ringbuffer_pages,
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
- kfree(info);
-
- /*
- * If we are closing the channel during an error path in
- * opening the channel, don't free the channel since the
- * caller will free the channel
- */
- if (channel->state == CHANNEL_OPEN_STATE) {
- spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
- list_del(&channel->listentry);
- spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
-
- free_channel(channel);
- }
}
EXPORT_SYMBOL_GPL(vmbus_close);
@@ -632,7 +620,6 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
u64 aligned_data = 0;
int ret;
- dump_vmbus_channel(channel);
/* Setup the descriptor */
desc.type = type; /* VmbusPacketTypeDataInBand; */
@@ -650,7 +637,6 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
- /* TODO: We should determine if this is optional */
if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
vmbus_setevent(channel);
@@ -679,7 +665,6 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
- dump_vmbus_channel(channel);
/*
* Adjust the size down since vmbus_channel_packet_page_buffer is the
@@ -713,7 +698,6 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
- /* TODO: We should determine if this is optional */
if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
vmbus_setevent(channel);
@@ -739,7 +723,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
- dump_vmbus_channel(channel);
if ((pfncount < 0) || (pfncount > MAX_MULTIPAGE_BUFFER_COUNT))
return -EINVAL;
@@ -777,7 +760,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
- /* TODO: We should determine if this is optional */
if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
vmbus_setevent(channel);
@@ -829,7 +811,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
pr_err("Buffer too small - got %d needs %d\n",
bufferlen, userlen);
- return -1;
+ return -ETOOSMALL;
}
*requestid = desc.trans_id;
@@ -893,36 +875,3 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
return 0;
}
EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);
-
-/*
- * vmbus_onchannel_event - Channel event callback
- */
-void vmbus_onchannel_event(struct vmbus_channel *channel)
-{
- dump_vmbus_channel(channel);
-
- channel->onchannel_callback(channel->channel_callback_context);
-
- mod_timer(&channel->poll_timer, jiffies + usecs_to_jiffies(100));
-}
-
-/*
- * vmbus_ontimer - Timer event callback
- */
-void vmbus_ontimer(unsigned long data)
-{
- struct vmbus_channel *channel = (struct vmbus_channel *)data;
-
- if (channel->onchannel_callback)
- channel->onchannel_callback(channel->channel_callback_context);
-}
-
-/*
- * dump_vmbus_channel- Dump vmbus channel info to the console
- */
-static void dump_vmbus_channel(struct vmbus_channel *channel)
-{
- DPRINT_DBG(VMBUS, "Channel (%d)", channel->offermsg.child_relid);
- hv_dump_ring_info(&channel->outbound, "Outbound ");
- hv_dump_ring_info(&channel->inbound, "Inbound ");
-}
diff --git a/drivers/staging/hv/channel_mgmt.c b/drivers/staging/hv/channel_mgmt.c
index 957d61ee4ce..bf011f3fb85 100644
--- a/drivers/staging/hv/channel_mgmt.c
+++ b/drivers/staging/hv/channel_mgmt.c
@@ -283,10 +283,6 @@ static struct vmbus_channel *alloc_channel(void)
spin_lock_init(&channel->inbound_lock);
- init_timer(&channel->poll_timer);
- channel->poll_timer.data = (unsigned long)channel;
- channel->poll_timer.function = vmbus_ontimer;
-
channel->controlwq = create_workqueue("hv_vmbus_ctl");
if (!channel->controlwq) {
kfree(channel);
@@ -315,7 +311,6 @@ static void release_channel(struct work_struct *work)
*/
void free_channel(struct vmbus_channel *channel)
{
- del_timer_sync(&channel->poll_timer);
/*
* We have to release the channel's workqueue/thread in the vmbus's
@@ -482,7 +477,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
newchannel->monitor_grp = (u8)offer->monitorid / 32;
newchannel->monitor_bit = (u8)offer->monitorid % 32;
- /* TODO: Make sure the offer comes from our parent partition */
INIT_WORK(&newchannel->work, vmbus_process_offer);
queue_work(newchannel->controlwq, &newchannel->work);
}
@@ -773,7 +767,7 @@ int vmbus_request_offers(void)
goto cleanup;
}
- t = wait_for_completion_timeout(&msginfo->waitevent, HZ);
+ t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
diff --git a/drivers/staging/hv/connection.c b/drivers/staging/hv/connection.c
index 37bbf770ef1..e6b40392e08 100644
--- a/drivers/staging/hv/connection.c
+++ b/drivers/staging/hv/connection.c
@@ -51,13 +51,13 @@ int vmbus_connect(void)
/* Make sure we are not connecting or connected */
if (vmbus_connection.conn_state != DISCONNECTED)
- return -1;
+ return -EISCONN;
/* Initialize the vmbus connection */
vmbus_connection.conn_state = CONNECTING;
vmbus_connection.work_queue = create_workqueue("hv_vmbus_con");
if (!vmbus_connection.work_queue) {
- ret = -1;
+ ret = -ENOMEM;
goto cleanup;
}
@@ -74,7 +74,7 @@ int vmbus_connect(void)
vmbus_connection.int_page =
(void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0);
if (vmbus_connection.int_page == NULL) {
- ret = -1;
+ ret = -ENOMEM;
goto cleanup;
}
@@ -90,7 +90,7 @@ int vmbus_connect(void)
vmbus_connection.monitor_pages =
(void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 1);
if (vmbus_connection.monitor_pages == NULL) {
- ret = -1;
+ ret = -ENOMEM;
goto cleanup;
}
@@ -135,7 +135,7 @@ int vmbus_connect(void)
}
/* Wait for the connection response */
- t = wait_for_completion_timeout(&msginfo->waitevent, HZ);
+ t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
if (t == 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
flags);
@@ -157,7 +157,7 @@ int vmbus_connect(void)
pr_err("Unable to connect, "
"Version %d not supported by Hyper-V\n",
VMBUS_REVISION_NUMBER);
- ret = -1;
+ ret = -ECONNREFUSED;
goto cleanup;
}
@@ -185,44 +185,6 @@ cleanup:
return ret;
}
-/*
- * vmbus_disconnect -
- * Sends a disconnect request on the partition service connection
- */
-int vmbus_disconnect(void)
-{
- int ret = 0;
- struct vmbus_channel_message_header *msg;
-
- /* Make sure we are connected */
- if (vmbus_connection.conn_state != CONNECTED)
- return -1;
-
- msg = kzalloc(sizeof(struct vmbus_channel_message_header), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- msg->msgtype = CHANNELMSG_UNLOAD;
-
- ret = vmbus_post_msg(msg,
- sizeof(struct vmbus_channel_message_header));
- if (ret != 0)
- goto cleanup;
-
- free_pages((unsigned long)vmbus_connection.int_page, 0);
- free_pages((unsigned long)vmbus_connection.monitor_pages, 1);
-
- /* TODO: iterate thru the msg list and free up */
- destroy_workqueue(vmbus_connection.work_queue);
-
- vmbus_connection.conn_state = DISCONNECTED;
-
- pr_info("hv_vmbus disconnected\n");
-
-cleanup:
- kfree(msg);
- return ret;
-}
/*
* relid2channel - Get the channel object given its
@@ -262,7 +224,7 @@ static void process_chn_event(u32 relid)
channel = relid2channel(relid);
if (channel) {
- vmbus_onchannel_event(channel);
+ channel->onchannel_callback(channel->channel_callback_context);
} else {
pr_err("channel not found for relid - %u\n", relid);
}
diff --git a/drivers/staging/hv/hv.c b/drivers/staging/hv/hv.c
index a2cc0911de5..824f81679ae 100644
--- a/drivers/staging/hv/hv.c
+++ b/drivers/staging/hv/hv.c
@@ -277,11 +277,11 @@ u16 hv_post_message(union hv_connection_id connection_id,
unsigned long addr;
if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
- return -1;
+ return -EMSGSIZE;
addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC);
if (!addr)
- return -1;
+ return -ENOMEM;
aligned_msg = (struct hv_input_post_message *)
(ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN));
diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/staging/hv/hv_mouse.c
index 359e73741c4..d957fc22801 100644
--- a/drivers/staging/hv/hv_mouse.c
+++ b/drivers/staging/hv/hv_mouse.c
@@ -24,7 +24,6 @@
#include <linux/hiddev.h>
#include <linux/pci.h>
#include <linux/dmi.h>
-#include <linux/delay.h>
#include "hyperv.h"
@@ -936,7 +935,6 @@ static int __init mousevsc_init(void)
sizeof(struct hv_guid));
drv->driver.name = driver_name;
- drv->name = driver_name;
/* The driver belongs to vmbus */
vmbus_child_driver_register(&drv->driver);
diff --git a/drivers/staging/hv/hv_timesource.c b/drivers/staging/hv/hv_timesource.c
index 0efb0491525..2b0f9aaf912 100644
--- a/drivers/staging/hv/hv_timesource.c
+++ b/drivers/staging/hv/hv_timesource.c
@@ -22,7 +22,6 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/version.h>
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/staging/hv/hyperv.h b/drivers/staging/hv/hyperv.h
index 3310e9bdf56..1747a2404f6 100644
--- a/drivers/staging/hv/hyperv.h
+++ b/drivers/staging/hv/hyperv.h
@@ -523,46 +523,6 @@ enum vmbus_channel_state {
CHANNEL_OPEN_STATE,
};
-struct vmbus_channel {
- struct list_head listentry;
-
- struct hv_device *device_obj;
-
- struct timer_list poll_timer; /* SA-111 workaround */
- struct work_struct work;
-
- enum vmbus_channel_state state;
- /*
- * For util channels, stash the
- * the service index for easy access.
- */
- s8 util_index;
-
- struct vmbus_channel_offer_channel offermsg;
- /*
- * These are based on the OfferMsg.MonitorId.
- * Save it here for easy access.
- */
- u8 monitor_grp;
- u8 monitor_bit;
-
- u32 ringbuffer_gpadlhandle;
-
- /* Allocated memory for ring buffer */
- void *ringbuffer_pages;
- u32 ringbuffer_pagecount;
- struct hv_ring_buffer_info outbound; /* send to parent */
- struct hv_ring_buffer_info inbound; /* receive from parent */
- spinlock_t inbound_lock;
- struct workqueue_struct *controlwq;
-
- /* Channel callback are invoked in this workqueue context */
- /* HANDLE dataWorkQueue; */
-
- void (*onchannel_callback)(void *context);
- void *channel_callback_context;
-};
-
struct vmbus_channel_debug_info {
u32 relid;
enum vmbus_channel_state state;
@@ -609,6 +569,51 @@ struct vmbus_channel_msginfo {
unsigned char msg[0];
};
+struct vmbus_close_msg {
+ struct vmbus_channel_msginfo info;
+ struct vmbus_channel_close_channel msg;
+};
+
+struct vmbus_channel {
+ struct list_head listentry;
+
+ struct hv_device *device_obj;
+
+ struct work_struct work;
+
+ enum vmbus_channel_state state;
+ /*
+ * For util channels, stash the
+ * the service index for easy access.
+ */
+ s8 util_index;
+
+ struct vmbus_channel_offer_channel offermsg;
+ /*
+ * These are based on the OfferMsg.MonitorId.
+ * Save it here for easy access.
+ */
+ u8 monitor_grp;
+ u8 monitor_bit;
+
+ u32 ringbuffer_gpadlhandle;
+
+ /* Allocated memory for ring buffer */
+ void *ringbuffer_pages;
+ u32 ringbuffer_pagecount;
+ struct hv_ring_buffer_info outbound; /* send to parent */
+ struct hv_ring_buffer_info inbound; /* receive from parent */
+ spinlock_t inbound_lock;
+ struct workqueue_struct *controlwq;
+
+ struct vmbus_close_msg close_msg;
+
+ /* Channel callback are invoked in this workqueue context */
+ /* HANDLE dataWorkQueue; */
+
+ void (*onchannel_callback)(void *context);
+ void *channel_callback_context;
+};
void free_channel(struct vmbus_channel *channel);
@@ -691,7 +696,6 @@ extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
u32 *buffer_actual_len,
u64 *requestid);
-extern void vmbus_onchannel_event(struct vmbus_channel *channel);
extern void vmbus_get_debug_info(struct vmbus_channel *channel,
struct vmbus_channel_debug_info *debug);
diff --git a/drivers/staging/hv/hyperv_net.h b/drivers/staging/hv/hyperv_net.h
index 315097df799..27f987b48df 100644
--- a/drivers/staging/hv/hyperv_net.h
+++ b/drivers/staging/hv/hyperv_net.h
@@ -99,9 +99,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
int netvsc_initialize(struct hv_driver *drv);
int rndis_filter_open(struct hv_device *dev);
int rndis_filter_close(struct hv_device *dev);
-int rndis_filte_device_add(struct hv_device *dev,
+int rndis_filter_device_add(struct hv_device *dev,
void *additional_info);
-int rndis_filter_device_remove(struct hv_device *dev);
+void rndis_filter_device_remove(struct hv_device *dev);
int rndis_filter_receive(struct hv_device *dev,
struct hv_netvsc_packet *pkt);
@@ -355,10 +355,6 @@ struct nvsp_message {
/* #define NVSC_MIN_PROTOCOL_VERSION 1 */
/* #define NVSC_MAX_PROTOCOL_VERSION 1 */
-#define NETVSC_SEND_BUFFER_SIZE (64*1024) /* 64K */
-#define NETVSC_SEND_BUFFER_ID 0xface
-
-
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024) /* 1MB */
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
@@ -383,12 +379,6 @@ struct netvsc_device {
struct list_head recv_pkt_list;
spinlock_t recv_pkt_list_lock;
- /* Send buffer allocated by us but manages by NetVSP */
- void *send_buf;
- u32 send_buf_size;
- u32 send_buf_gpadl_handle;
- u32 send_section_size;
-
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
u32 recv_buf_size;
diff --git a/drivers/staging/hv/hyperv_vmbus.h b/drivers/staging/hv/hyperv_vmbus.h
index bf30a425b64..349ad80ce32 100644
--- a/drivers/staging/hv/hyperv_vmbus.h
+++ b/drivers/staging/hv/hyperv_vmbus.h
@@ -619,8 +619,6 @@ struct vmbus_channel *relid2channel(u32 relid);
int vmbus_connect(void);
-int vmbus_disconnect(void);
-
int vmbus_post_msg(void *buffer, size_t buflen);
int vmbus_set_event(u32 child_relid);
diff --git a/drivers/staging/hv/netvsc.c b/drivers/staging/hv/netvsc.c
index 41cbb26eccb..dc5e5c488e3 100644
--- a/drivers/staging/hv/netvsc.c
+++ b/drivers/staging/hv/netvsc.c
@@ -270,7 +270,7 @@ static int netvsc_init_recv_buf(struct hv_device *device)
goto cleanup;
}
- t = wait_for_completion_timeout(&net_device->channel_init_wait, HZ);
+ t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
BUG_ON(t == 0);
@@ -323,162 +323,6 @@ exit:
return ret;
}
-static int netvsc_destroy_send_buf(struct netvsc_device *net_device)
-{
- struct nvsp_message *revoke_packet;
- int ret = 0;
-
- /*
- * If we got a section count, it means we received a
- * SendReceiveBufferComplete msg (ie sent
- * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
- * to send a revoke msg here
- */
- if (net_device->send_section_size) {
- /* Send the revoke send buffer */
- revoke_packet = &net_device->revoke_packet;
- memset(revoke_packet, 0, sizeof(struct nvsp_message));
-
- revoke_packet->hdr.msg_type =
- NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
- revoke_packet->msg.v1_msg.
- revoke_send_buf.id = NETVSC_SEND_BUFFER_ID;
-
- ret = vmbus_sendpacket(net_device->dev->channel,
- revoke_packet,
- sizeof(struct nvsp_message),
- (unsigned long)revoke_packet,
- VM_PKT_DATA_INBAND, 0);
- /*
- * If we failed here, we might as well return and have a leak
- * rather than continue and a bugchk
- */
- if (ret != 0) {
- dev_err(&net_device->dev->device, "unable to send "
- "revoke send buffer to netvsp");
- return -1;
- }
- }
-
- /* Teardown the gpadl on the vsp end */
- if (net_device->send_buf_gpadl_handle) {
- ret = vmbus_teardown_gpadl(net_device->dev->channel,
- net_device->send_buf_gpadl_handle);
-
- /*
- * If we failed here, we might as well return and have a leak
- * rather than continue and a bugchk
- */
- if (ret != 0) {
- dev_err(&net_device->dev->device,
- "unable to teardown send buffer's gpadl");
- return -1;
- }
- net_device->send_buf_gpadl_handle = 0;
- }
-
- if (net_device->send_buf) {
- /* Free up the receive buffer */
- free_pages((unsigned long)net_device->send_buf,
- get_order(net_device->send_buf_size));
- net_device->send_buf = NULL;
- }
-
- return ret;
-}
-
-static int netvsc_init_send_buf(struct hv_device *device)
-{
- int ret = 0;
- int t;
- struct netvsc_device *net_device;
- struct nvsp_message *init_packet;
-
- net_device = get_outbound_net_device(device);
- if (!net_device) {
- dev_err(&device->device, "unable to get net device..."
- "device being destroyed?");
- return -1;
- }
- if (net_device->send_buf_size <= 0) {
- ret = -EINVAL;
- goto cleanup;
- }
-
- net_device->send_buf =
- (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
- get_order(net_device->send_buf_size));
- if (!net_device->send_buf) {
- dev_err(&device->device, "unable to allocate send "
- "buffer of size %d", net_device->send_buf_size);
- ret = -1;
- goto cleanup;
- }
-
- /*
- * Establish the gpadl handle for this buffer on this
- * channel. Note: This call uses the vmbus connection rather
- * than the channel to establish the gpadl handle.
- */
- ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
- net_device->send_buf_size,
- &net_device->send_buf_gpadl_handle);
- if (ret != 0) {
- dev_err(&device->device, "unable to establish send buffer's gpadl");
- goto cleanup;
- }
-
- /* Notify the NetVsp of the gpadl handle */
- init_packet = &net_device->channel_init_pkt;
-
- memset(init_packet, 0, sizeof(struct nvsp_message));
-
- init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
- init_packet->msg.v1_msg.send_recv_buf.
- gpadl_handle = net_device->send_buf_gpadl_handle;
- init_packet->msg.v1_msg.send_recv_buf.id =
- NETVSC_SEND_BUFFER_ID;
-
- /* Send the gpadl notification request */
- ret = vmbus_sendpacket(device->channel, init_packet,
- sizeof(struct nvsp_message),
- (unsigned long)init_packet,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0) {
- dev_err(&device->device,
- "unable to send receive buffer's gpadl to netvsp");
- goto cleanup;
- }
-
- t = wait_for_completion_timeout(&net_device->channel_init_wait, HZ);
-
- BUG_ON(t == 0);
-
- /* Check the response */
- if (init_packet->msg.v1_msg.
- send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
- dev_err(&device->device, "Unable to complete send buffer "
- "initialzation with NetVsp - status %d",
- init_packet->msg.v1_msg.
- send_send_buf_complete.status);
- ret = -1;
- goto cleanup;
- }
-
- net_device->send_section_size = init_packet->
- msg.v1_msg.send_send_buf_complete.section_size;
-
- goto exit;
-
-cleanup:
- netvsc_destroy_send_buf(net_device);
-
-exit:
- put_net_device(device);
- return ret;
-}
-
static int netvsc_connect_vsp(struct hv_device *device)
{
@@ -513,7 +357,7 @@ static int netvsc_connect_vsp(struct hv_device *device)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&net_device->channel_init_wait, HZ);
+ t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
@@ -556,8 +400,6 @@ static int netvsc_connect_vsp(struct hv_device *device)
/* Post the big receive buffer to NetVSP */
ret = netvsc_init_recv_buf(device);
- if (ret == 0)
- ret = netvsc_init_send_buf(device);
cleanup:
put_net_device(device);
@@ -567,7 +409,6 @@ cleanup:
static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
{
netvsc_destroy_recv_buf(net_device);
- netvsc_destroy_send_buf(net_device);
}
/*
@@ -698,10 +539,10 @@ int netvsc_send(struct hv_device *device,
(unsigned long)packet);
} else {
ret = vmbus_sendpacket(device->channel, &sendMessage,
- sizeof(struct nvsp_message),
- (unsigned long)packet,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ sizeof(struct nvsp_message),
+ (unsigned long)packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
}
@@ -1099,8 +940,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
spin_lock_init(&net_device->recv_pkt_list_lock);
- net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
-
INIT_LIST_HEAD(&net_device->recv_pkt_list);
for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 7b9c229f729..61989f0d9f0 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
+#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/device.h>
@@ -45,8 +46,8 @@
struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
- unsigned long avail;
- struct work_struct work;
+ atomic_t avail;
+ struct delayed_work dwork;
};
@@ -118,9 +119,10 @@ static void netvsc_xmit_completion(void *context)
dev_kfree_skb_any(skb);
- net_device_ctx->avail += num_pages;
- if (net_device_ctx->avail >= PACKET_PAGES_HIWATER)
- netif_wake_queue(net);
+ atomic_add(num_pages, &net_device_ctx->avail);
+ if (atomic_read(&net_device_ctx->avail) >=
+ PACKET_PAGES_HIWATER)
+ netif_wake_queue(net);
}
}
@@ -133,7 +135,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* Add 1 for skb->data and additional one for RNDIS */
num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
- if (num_pages > net_device_ctx->avail)
+ if (num_pages > atomic_read(&net_device_ctx->avail))
return NETDEV_TX_BUSY;
/* Allocate a netvsc packet based on # of frags. */
@@ -156,9 +158,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* Setup the rndis header */
packet->page_buf_cnt = num_pages;
- /* TODO: Flush all write buffers/ memory fence ??? */
- /* wmb(); */
-
/* Initialize it from the skb */
packet->total_data_buflen = skb->len;
@@ -188,8 +187,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
net->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
- net_device_ctx->avail -= num_pages;
- if (net_device_ctx->avail < PACKET_PAGES_LOWATER)
+ atomic_sub(num_pages, &net_device_ctx->avail);
+ if (atomic_read(&net_device_ctx->avail) < PACKET_PAGES_LOWATER)
netif_stop_queue(net);
} else {
/* we are shutting down or bus overloaded, just drop packet */
@@ -220,7 +219,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
netif_wake_queue(net);
netif_notify_peers(net);
ndev_ctx = netdev_priv(net);
- schedule_work(&ndev_ctx->work);
+ schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
netif_carrier_off(net);
netif_stop_queue(net);
@@ -318,7 +317,7 @@ static const struct net_device_ops device_ops = {
* Send GARP packet to network peers after migrations.
* After Quick Migration, the network is not immediately operational in the
* current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
- * another netif_notify_peers() into a scheduled work, otherwise GARP packet
+ * another netif_notify_peers() into a delayed work, otherwise GARP packet
* will not be sent after quick migration, and cause network disconnection.
*/
static void netvsc_send_garp(struct work_struct *w)
@@ -326,8 +325,7 @@ static void netvsc_send_garp(struct work_struct *w)
struct net_device_context *ndev_ctx;
struct net_device *net;
- msleep(20);
- ndev_ctx = container_of(w, struct net_device_context, work);
+ ndev_ctx = container_of(w, struct net_device_context, dwork.work);
net = dev_get_drvdata(&ndev_ctx->device_ctx->device);
netif_notify_peers(net);
}
@@ -349,13 +347,13 @@ static int netvsc_probe(struct hv_device *dev)
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
- net_device_ctx->avail = ring_size;
+ atomic_set(&net_device_ctx->avail, ring_size);
dev_set_drvdata(&dev->device, net);
- INIT_WORK(&net_device_ctx->work, netvsc_send_garp);
+ INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
/* Notify the netvsc driver of the new device */
device_info.ring_size = ring_size;
- ret = rndis_filte_device_add(dev, &device_info);
+ ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
free_netdev(net);
dev_set_drvdata(&dev->device, NULL);
@@ -364,17 +362,7 @@ static int netvsc_probe(struct hv_device *dev)
return ret;
}
- /*
- * If carrier is still off ie we did not get a link status callback,
- * update it if necessary
- */
- /*
- * FIXME: We should use a atomic or test/set instead to avoid getting
- * out of sync with the device's link status
- */
- if (!netif_carrier_ok(net))
- if (!device_info.link_state)
- netif_carrier_on(net);
+ netif_carrier_on(net);
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
@@ -400,16 +388,18 @@ static int netvsc_probe(struct hv_device *dev)
static int netvsc_remove(struct hv_device *dev)
{
struct net_device *net = dev_get_drvdata(&dev->device);
- int ret;
+ struct net_device_context *ndev_ctx;
if (net == NULL) {
dev_err(&dev->device, "No net device to remove\n");
return 0;
}
+ ndev_ctx = netdev_priv(net);
+ cancel_delayed_work_sync(&ndev_ctx->dwork);
+
/* Stop outbound asap */
netif_stop_queue(net);
- /* netif_carrier_off(net); */
unregister_netdev(net);
@@ -417,14 +407,10 @@ static int netvsc_remove(struct hv_device *dev)
* Call to the vsc driver to let it know that the device is being
* removed
*/
- ret = rndis_filter_device_remove(dev);
- if (ret != 0) {
- /* TODO: */
- netdev_err(net, "unable to remove vsc device (ret %d)\n", ret);
- }
+ rndis_filter_device_remove(dev);
free_netdev(net);
- return ret;
+ return 0;
}
/* The one and only one */
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c
index 3da333018b5..42f76728429 100644
--- a/drivers/staging/hv/ring_buffer.c
+++ b/drivers/staging/hv/ring_buffer.c
@@ -50,6 +50,8 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
{
u32 read_loc, write_loc;
+ smp_read_barrier_depends();
+
/* Capture the read/write indices before they changed */
read_loc = rbi->ring_buffer->read_index;
write_loc = rbi->ring_buffer->write_index;
@@ -411,7 +413,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
sizeof(u64));
/* Make sure we flush all writes before updating the writeIndex */
- mb();
+ smp_wmb();
/* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location);
@@ -513,7 +515,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
/* Make sure all reads are done before we update the read index since */
/* the writer may start writing to the read area once the read index */
/*is updated */
- mb();
+ smp_mb();
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/staging/hv/rndis_filter.c
index 60ebdb1b608..dbb52019975 100644
--- a/drivers/staging/hv/rndis_filter.c
+++ b/drivers/staging/hv/rndis_filter.c
@@ -139,14 +139,17 @@ static void put_rndis_request(struct rndis_device *dev,
kfree(req);
}
-static void dump_rndis_message(struct rndis_message *rndis_msg)
+static void dump_rndis_message(struct hv_device *hv_dev,
+ struct rndis_message *rndis_msg)
{
+ struct net_device *netdev = dev_get_drvdata(&hv_dev->device);
+
switch (rndis_msg->ndis_msg_type) {
case REMOTE_NDIS_PACKET_MSG:
- DPRINT_DBG(NETVSC, "REMOTE_NDIS_PACKET_MSG (len %u, "
+ netdev_dbg(netdev, "REMOTE_NDIS_PACKET_MSG (len %u, "
"data offset %u data len %u, # oob %u, "
"oob offset %u, oob len %u, pkt offset %u, "
- "pkt len %u",
+ "pkt len %u\n",
rndis_msg->msg_len,
rndis_msg->msg.pkt.data_offset,
rndis_msg->msg.pkt.data_len,
@@ -158,10 +161,10 @@ static void dump_rndis_message(struct rndis_message *rndis_msg)
break;
case REMOTE_NDIS_INITIALIZE_CMPLT:
- DPRINT_DBG(NETVSC, "REMOTE_NDIS_INITIALIZE_CMPLT "
+ netdev_dbg(netdev, "REMOTE_NDIS_INITIALIZE_CMPLT "
"(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
"device flags %d, max xfer size 0x%x, max pkts %u, "
- "pkt aligned %u)",
+ "pkt aligned %u)\n",
rndis_msg->msg_len,
rndis_msg->msg.init_complete.req_id,
rndis_msg->msg.init_complete.status,
@@ -176,9 +179,9 @@ static void dump_rndis_message(struct rndis_message *rndis_msg)
break;
case REMOTE_NDIS_QUERY_CMPLT:
- DPRINT_DBG(NETVSC, "REMOTE_NDIS_QUERY_CMPLT "
+ netdev_dbg(netdev, "REMOTE_NDIS_QUERY_CMPLT "
"(len %u, id 0x%x, status 0x%x, buf len %u, "
- "buf offset %u)",
+ "buf offset %u)\n",
rndis_msg->msg_len,
rndis_msg->msg.query_complete.req_id,
rndis_msg->msg.query_complete.status,
@@ -189,16 +192,16 @@ static void dump_rndis_message(struct rndis_message *rndis_msg)
break;
case REMOTE_NDIS_SET_CMPLT:
- DPRINT_DBG(NETVSC,
- "REMOTE_NDIS_SET_CMPLT (len %u, id 0x%x, status 0x%x)",
+ netdev_dbg(netdev,
+ "REMOTE_NDIS_SET_CMPLT (len %u, id 0x%x, status 0x%x)\n",
rndis_msg->msg_len,
rndis_msg->msg.set_complete.req_id,
rndis_msg->msg.set_complete.status);
break;
case REMOTE_NDIS_INDICATE_STATUS_MSG:
- DPRINT_DBG(NETVSC, "REMOTE_NDIS_INDICATE_STATUS_MSG "
- "(len %u, status 0x%x, buf len %u, buf offset %u)",
+ netdev_dbg(netdev, "REMOTE_NDIS_INDICATE_STATUS_MSG "
+ "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
rndis_msg->msg_len,
rndis_msg->msg.indicate_status.status,
rndis_msg->msg.indicate_status.status_buflen,
@@ -206,7 +209,7 @@ static void dump_rndis_message(struct rndis_message *rndis_msg)
break;
default:
- DPRINT_DBG(NETVSC, "0x%x (len %u)",
+ netdev_dbg(netdev, "0x%x (len %u)\n",
rndis_msg->ndis_msg_type,
rndis_msg->msg_len);
break;
@@ -372,24 +375,6 @@ int rndis_filter_receive(struct hv_device *dev,
pkt->page_buf[0].offset);
/* Make sure we got a valid rndis message */
- /*
- * FIXME: There seems to be a bug in set completion msg where its
- * MessageLength is 16 bytes but the ByteCount field in the xfer page
- * range shows 52 bytes
- * */
-#if 0
- if (pkt->total_data_buflen != rndis_hdr->msg_len) {
- kunmap_atomic(rndis_hdr - pkt->page_buf[0].offset,
- KM_IRQ0);
-
- dev_err(&dev->device, "invalid rndis message? (expected %u "
- "bytes got %u)...dropping this message!\n",
- rndis_hdr->msg_len,
- pkt->total_data_buflen);
- return -1;
- }
-#endif
-
if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
(rndis_hdr->msg_len > sizeof(struct rndis_message))) {
dev_err(&dev->device, "incoming rndis message buffer overflow "
@@ -405,7 +390,7 @@ int rndis_filter_receive(struct hv_device *dev,
kunmap_atomic(rndis_hdr - pkt->page_buf[0].offset, KM_IRQ0);
- dump_rndis_message(&rndis_msg);
+ dump_rndis_message(dev, &rndis_msg);
switch (rndis_msg.ndis_msg_type) {
case REMOTE_NDIS_PACKET_MSG:
@@ -467,7 +452,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
if (ret != 0)
goto Cleanup;
- t = wait_for_completion_timeout(&request->wait_event, HZ);
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto Cleanup;
@@ -543,7 +528,7 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
if (ret != 0)
goto Cleanup;
- t = wait_for_completion_timeout(&request->wait_event, HZ);
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -1;
@@ -600,7 +585,7 @@ static int rndis_filter_init_device(struct rndis_device *dev)
}
- t = wait_for_completion_timeout(&request->wait_event, HZ);
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
@@ -681,7 +666,7 @@ static int rndis_filter_close_device(struct rndis_device *dev)
return ret;
}
-int rndis_filte_device_add(struct hv_device *dev,
+int rndis_filter_device_add(struct hv_device *dev,
void *additional_info)
{
int ret;
@@ -741,7 +726,7 @@ int rndis_filte_device_add(struct hv_device *dev,
return ret;
}
-int rndis_filter_device_remove(struct hv_device *dev)
+void rndis_filter_device_remove(struct hv_device *dev)
{
struct netvsc_device *net_dev = dev->ext;
struct rndis_device *rndis_dev = net_dev->extension;
@@ -753,8 +738,6 @@ int rndis_filter_device_remove(struct hv_device *dev)
net_dev->extension = NULL;
netvsc_device_remove(dev);
-
- return 0;
}
diff --git a/drivers/staging/hv/storvsc.c b/drivers/staging/hv/storvsc.c
index 06cd3276813..30297861194 100644
--- a/drivers/staging/hv/storvsc.c
+++ b/drivers/staging/hv/storvsc.c
@@ -135,7 +135,7 @@ static int storvsc_channel_init(struct hv_device *device)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, HZ);
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
@@ -163,7 +163,7 @@ static int storvsc_channel_init(struct hv_device *device)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, HZ);
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
@@ -192,7 +192,7 @@ static int storvsc_channel_init(struct hv_device *device)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, HZ);
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
@@ -222,7 +222,7 @@ static int storvsc_channel_init(struct hv_device *device)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, HZ);
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index 942cc5f98db..7effaf32e25 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/blkdev.h>
+#include <linux/dmi.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -44,7 +45,7 @@ MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
static const char *driver_name = "storvsc";
/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
-static const struct hv_guid gStorVscDeviceType = {
+static const struct hv_guid stor_vsci_device_type = {
.data = {
0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f
@@ -92,12 +93,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
STORVSC_MAX_IO_REQUESTS);
- DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting max segment size to %ld",
- sdevice, PAGE_SIZE);
blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
- DPRINT_INFO(STORVSC_DRV, "sdev (%p) - adding merge bio vec routine",
- sdevice);
blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
@@ -308,31 +305,21 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
}
-/*
- * storvsc_remove - Callback when our device is removed
- */
static int storvsc_remove(struct hv_device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(&dev->device);
struct hv_host_device *host_dev =
(struct hv_host_device *)host->hostdata;
- /*
- * Call to the vsc driver to let it know that the device is being
- * removed
- */
- storvsc_dev_remove(dev);
+ scsi_remove_host(host);
+
+ scsi_host_put(host);
+ storvsc_dev_remove(dev);
if (host_dev->request_pool) {
kmem_cache_destroy(host_dev->request_pool);
host_dev->request_pool = NULL;
}
-
- DPRINT_INFO(STORVSC, "removing host adapter (%p)...", host);
- scsi_remove_host(host);
-
- DPRINT_INFO(STORVSC, "releasing host adapter (%p)...", host);
- scsi_host_put(host);
return 0;
}
@@ -357,9 +344,6 @@ static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
info[1] = sectors_pt;
info[2] = (int)cylinders;
- DPRINT_INFO(STORVSC_DRV, "CHS (%d, %d, %d)", (int)cylinders, heads,
- sectors_pt);
-
return 0;
}
@@ -370,7 +354,6 @@ static int storvsc_host_reset(struct hv_device *device)
struct vstor_packet *vstor_packet;
int ret, t;
- DPRINT_INFO(STORVSC, "resetting host adapter...");
stor_device = get_stor_device(device);
if (!stor_device)
@@ -393,13 +376,12 @@ static int storvsc_host_reset(struct hv_device *device)
if (ret != 0)
goto cleanup;
- t = wait_for_completion_timeout(&request->wait_event, HZ);
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
}
- DPRINT_INFO(STORVSC, "host adapter reset completed");
/*
* At this point, all outstanding requests in the adapter
@@ -422,17 +404,10 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
(struct hv_host_device *)scmnd->device->host->hostdata;
struct hv_device *dev = host_dev->dev;
- DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host resetting...",
- scmnd->device, dev);
-
- /* Invokes the vsc to reset the host/bus */
ret = storvsc_host_reset(dev);
if (ret != 0)
return ret;
- DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host reseted",
- scmnd->device, dev);
-
return ret;
}
@@ -479,7 +454,6 @@ static void storvsc_commmand_completion(struct hv_storvsc_request *request)
scmnd->host_scribble = NULL;
scmnd->scsi_done = NULL;
- /* !!DO NOT MODIFY the scmnd after this call */
scsi_done_fn(scmnd);
kmem_cache_free(host_dev->request_pool, cmd_request);
@@ -510,8 +484,6 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
cmd_request =
(struct storvsc_cmd_request *)scmnd->host_scribble;
- DPRINT_INFO(STORVSC_DRV, "retrying scmnd %p cmd_request %p",
- scmnd, cmd_request);
goto retry_request;
}
@@ -752,11 +724,28 @@ static struct hv_driver storvsc_drv = {
.remove = storvsc_remove,
};
-
/*
- * storvsc_drv_init - StorVsc driver initialization.
+ * We use a DMI table to determine if we should autoload this driver This is
+ * needed by distro tools to determine if the hyperv drivers should be
+ * installed and/or configured. We don't do anything else with the table, but
+ * it needs to be present.
*/
-static int storvsc_drv_init(void)
+
+static const struct dmi_system_id __initconst
+hv_stor_dmi_table[] __maybe_unused = {
+ {
+ .ident = "Hyper-V",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
+ DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
+ },
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(dmi, hv_stor_dmi_table);
+
+static int __init storvsc_drv_init(void)
{
int ret;
struct hv_driver *drv = &storvsc_drv;
@@ -775,14 +764,13 @@ static int storvsc_drv_init(void)
sizeof(struct vstor_packet) + sizeof(u64),
sizeof(u64)));
- memcpy(&drv->dev_type, &gStorVscDeviceType,
+ memcpy(&drv->dev_type, &stor_vsci_device_type,
sizeof(struct hv_guid));
if (max_outstanding_req_per_channel <
STORVSC_MAX_IO_REQUESTS)
return -1;
- drv->name = driver_name;
drv->driver.name = driver_name;
@@ -792,27 +780,13 @@ static int storvsc_drv_init(void)
return ret;
}
-static void storvsc_drv_exit(void)
+static void __exit storvsc_drv_exit(void)
{
vmbus_child_driver_unregister(&storvsc_drv.driver);
}
-static int __init storvsc_init(void)
-{
- int ret;
-
- DPRINT_INFO(STORVSC_DRV, "Storvsc initializing....");
- ret = storvsc_drv_init();
- return ret;
-}
-
-static void __exit storvsc_exit(void)
-{
- storvsc_drv_exit();
-}
-
MODULE_LICENSE("GPL");
MODULE_VERSION(HV_DRV_VERSION);
MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
-module_init(storvsc_init);
-module_exit(storvsc_exit);
+module_init(storvsc_drv_init);
+module_exit(storvsc_drv_exit);
diff --git a/drivers/staging/hv/tools/hv_kvp_daemon.c b/drivers/staging/hv/tools/hv_kvp_daemon.c
index 33f0f1c8ad7..a4a407f7052 100644
--- a/drivers/staging/hv/tools/hv_kvp_daemon.c
+++ b/drivers/staging/hv/tools/hv_kvp_daemon.c
@@ -35,7 +35,6 @@
#include <arpa/inet.h>
#include <linux/connector.h>
#include <linux/netlink.h>
-#include <sys/socket.h>
#include <ifaddrs.h>
#include <netdb.h>
#include <syslog.h>
diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
index ec1d38cd481..1c949f5fb71 100644
--- a/drivers/staging/hv/vmbus_drv.c
+++ b/drivers/staging/hv/vmbus_drv.c
@@ -39,7 +39,7 @@
#include "hyperv_vmbus.h"
-static struct pci_dev *hv_pci_dev;
+static struct acpi_device *hv_acpi_dev;
static struct tasklet_struct msg_dpc;
static struct tasklet_struct event_dpc;
@@ -49,7 +49,6 @@ EXPORT_SYMBOL(vmbus_loglevel);
/* (ALL_MODULES << 16 | DEBUG_LVL_ENTEREXIT); */
/* (((VMBUS | VMBUS_DRV)<<16) | DEBUG_LVL_ENTEREXIT); */
-static int pci_probe_error;
static struct completion probe_event;
static int irq;
@@ -108,12 +107,12 @@ static ssize_t vmbus_show_device_attr(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
- struct hv_device *device_ctx = device_to_hv_device(dev);
+ struct hv_device *hv_dev = device_to_hv_device(dev);
struct hv_device_info device_info;
memset(&device_info, 0, sizeof(struct hv_device_info));
- get_channel_info(device_ctx, &device_info);
+ get_channel_info(hv_dev, &device_info);
if (!strcmp(dev_attr->attr.name, "class_id")) {
return sprintf(buf, "{%02x%02x%02x%02x-%02x%02x-%02x%02x-"
@@ -301,10 +300,10 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
{
int match = 0;
struct hv_driver *drv = drv_to_hv_drv(driver);
- struct hv_device *device_ctx = device_to_hv_device(device);
+ struct hv_device *hv_dev = device_to_hv_device(device);
/* We found our driver ? */
- if (memcmp(&device_ctx->dev_type, &drv->dev_type,
+ if (memcmp(&hv_dev->dev_type, &drv->dev_type,
sizeof(struct hv_guid)) == 0)
match = 1;
@@ -330,7 +329,7 @@ static int vmbus_probe(struct device *child_device)
} else {
pr_err("probe not set for driver %s\n",
dev_name(child_device));
- ret = -1;
+ ret = -ENODEV;
}
return ret;
}
@@ -353,7 +352,7 @@ static int vmbus_remove(struct device *child_device)
} else {
pr_err("remove not set for driver %s\n",
dev_name(child_device));
- ret = -1;
+ ret = -ENODEV;
}
}
@@ -388,9 +387,9 @@ static void vmbus_shutdown(struct device *child_device)
*/
static void vmbus_device_release(struct device *device)
{
- struct hv_device *device_ctx = device_to_hv_device(device);
+ struct hv_device *hv_dev = device_to_hv_device(device);
- kfree(device_ctx);
+ kfree(hv_dev);
}
@@ -456,7 +455,7 @@ static void vmbus_on_msg_dpc(unsigned long data)
* will not deliver any more messages since there is
* no empty slot
*/
- mb();
+ smp_mb();
if (msg->header.message_flags.msg_pending) {
/*
@@ -487,7 +486,6 @@ static int vmbus_on_isr(void)
if (msg->header.message_type != HVMSG_NONE)
ret |= 0x1;
- /* TODO: Check if there are events to be process */
page_addr = hv_context.synic_event_page[cpu];
event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
@@ -528,7 +526,7 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
* - get the irq resource
* - retrieve the channel offers
*/
-static int vmbus_bus_init(struct pci_dev *pdev)
+static int vmbus_bus_init(int irq)
{
int ret;
unsigned int vector;
@@ -537,7 +535,7 @@ static int vmbus_bus_init(struct pci_dev *pdev)
ret = hv_init();
if (ret != 0) {
pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
- goto cleanup;
+ return ret;
}
/* Initialize the bus context */
@@ -546,27 +544,23 @@ static int vmbus_bus_init(struct pci_dev *pdev)
/* Now, register the bus with LDM */
ret = bus_register(&hv_bus);
- if (ret) {
- ret = -1;
- goto cleanup;
- }
+ if (ret)
+ return ret;
/* Get the interrupt resource */
- ret = request_irq(pdev->irq, vmbus_isr,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- driver_name, pdev);
+ ret = request_irq(irq, vmbus_isr, IRQF_SAMPLE_RANDOM,
+ driver_name, hv_acpi_dev);
if (ret != 0) {
pr_err("Unable to request IRQ %d\n",
- pdev->irq);
+ irq);
bus_unregister(&hv_bus);
- ret = -1;
- goto cleanup;
+ return ret;
}
- vector = IRQ0_VECTOR + pdev->irq;
+ vector = IRQ0_VECTOR + irq;
/*
* Notify the hypervisor of our irq and
@@ -575,16 +569,15 @@ static int vmbus_bus_init(struct pci_dev *pdev)
on_each_cpu(hv_synic_init, (void *)&vector, 1);
ret = vmbus_connect();
if (ret) {
- free_irq(pdev->irq, pdev);
+ free_irq(irq, hv_acpi_dev);
bus_unregister(&hv_bus);
- goto cleanup;
+ return ret;
}
vmbus_request_offers();
-cleanup:
- return ret;
+ return 0;
}
/**
@@ -631,7 +624,6 @@ void vmbus_child_driver_unregister(struct device_driver *drv)
driver_unregister(drv);
- drv->bus = NULL;
}
EXPORT_SYMBOL(vmbus_child_driver_unregister);
@@ -676,7 +668,7 @@ int vmbus_child_device_register(struct hv_device *child_device_obj)
/* The new device belongs to this bus */
child_device_obj->device.bus = &hv_bus; /* device->dev.bus; */
- child_device_obj->device.parent = &hv_pci_dev->dev;
+ child_device_obj->device.parent = &hv_acpi_dev->dev;
child_device_obj->device.release = vmbus_device_release;
/*
@@ -733,6 +725,8 @@ static int vmbus_acpi_add(struct acpi_device *device)
{
acpi_status result;
+ hv_acpi_dev = device;
+
result =
acpi_walk_resources(device->handle, METHOD_NAME__CRS,
vmbus_walk_resources, &irq);
@@ -747,6 +741,7 @@ static int vmbus_acpi_add(struct acpi_device *device)
static const struct acpi_device_id vmbus_acpi_device_ids[] = {
{"VMBUS", 0},
+ {"VMBus", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
@@ -759,53 +754,6 @@ static struct acpi_driver vmbus_acpi_driver = {
},
};
-static int vmbus_acpi_init(void)
-{
- int result;
-
-
- result = acpi_bus_register_driver(&vmbus_acpi_driver);
- if (result < 0)
- return result;
-
- return 0;
-}
-
-static void vmbus_acpi_exit(void)
-{
- acpi_bus_unregister_driver(&vmbus_acpi_driver);
-
- return;
-}
-
-
-static int __devinit hv_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- hv_pci_dev = pdev;
-
- pci_probe_error = pci_enable_device(pdev);
- if (pci_probe_error)
- goto probe_cleanup;
-
- /*
- * If the PCI sub-sytem did not assign us an
- * irq, use the bios provided one.
- */
-
- if (pdev->irq == 0)
- pdev->irq = irq;
-
- pci_probe_error = vmbus_bus_init(pdev);
-
- if (pci_probe_error)
- pci_disable_device(pdev);
-
-probe_cleanup:
- complete(&probe_event);
- return pci_probe_error;
-}
-
/*
* We use a PCI table to determine if we should autoload this driver This is
* needed by distro tools to determine if the hyperv drivers should be
@@ -818,13 +766,7 @@ static const struct pci_device_id microsoft_hv_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, microsoft_hv_pci_table);
-static struct pci_driver hv_bus_driver = {
- .name = "hv_bus",
- .probe = hv_pci_probe,
- .id_table = microsoft_hv_pci_table,
-};
-
-static int __init hv_pci_init(void)
+static int __init hv_acpi_init(void)
{
int ret;
@@ -834,32 +776,22 @@ static int __init hv_pci_init(void)
* Get irq resources first.
*/
- ret = vmbus_acpi_init();
+ ret = acpi_bus_register_driver(&vmbus_acpi_driver);
+
if (ret)
return ret;
wait_for_completion(&probe_event);
if (irq <= 0) {
- vmbus_acpi_exit();
+ acpi_bus_unregister_driver(&vmbus_acpi_driver);
return -ENODEV;
}
- vmbus_acpi_exit();
- init_completion(&probe_event);
- ret = pci_register_driver(&hv_bus_driver);
+ ret = vmbus_bus_init(irq);
if (ret)
- return ret;
- /*
- * All the vmbus initialization occurs within the
- * hv_pci_probe() function. Wait for hv_pci_probe()
- * to complete.
- */
- wait_for_completion(&probe_event);
-
- if (pci_probe_error)
- pci_unregister_driver(&hv_bus_driver);
- return pci_probe_error;
+ acpi_bus_unregister_driver(&vmbus_acpi_driver);
+ return ret;
}
@@ -867,4 +799,4 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(HV_DRV_VERSION);
module_param(vmbus_loglevel, int, S_IRUGO|S_IWUSR);
-module_init(hv_pci_init);
+module_init(hv_acpi_init);
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light b/drivers/staging/iio/Documentation/sysfs-bus-iio-light
index 21d27740581..edbf470e4e3 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio-light
@@ -75,3 +75,11 @@ KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
Description:
This property gets/sets the sensors ADC analog integration time.
+
+What: /sys/bus/iio/devices/device[n]/illuminance0_calibscale
+KernelVersion: 2.6.37
+Contact: linux-iio@vger.kernel.org
+Description:
+ Hardware or software applied calibration scale factor assumed
+ to account for attenuation due to industrial design (glass
+ filters or aperture holes).
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index f96d5b5d514..d329635fb5c 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -4,7 +4,7 @@
menuconfig IIO
tristate "Industrial I/O support"
- depends on !S390
+ depends on GENERIC_HARDIRQS
help
The industrial I/O subsystem provides a unified framework for
drivers for many different types of embedded sensors using a
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
index 4cc1a5bfab4..dac5540b5a8 100644
--- a/drivers/staging/iio/accel/adis16201.h
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -64,19 +64,17 @@
/**
* struct adis16201_state - device instance specific data
* @us: actual spi_device
- * @indio_dev: industrial I/O device structure
* @trig: data ready trigger registered with iio
* @tx: transmit buffer
* @rx: receive buffer
* @buf_lock: mutex to protect tx and rx
**/
struct adis16201_state {
- struct spi_device *us;
- struct iio_dev *indio_dev;
- struct iio_trigger *trig;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
+ struct spi_device *us;
+ struct iio_trigger *trig;
+ struct mutex buf_lock;
+ u8 tx[14] ____cacheline_aligned;
+ u8 rx[14];
};
int adis16201_set_irq(struct iio_dev *indio_dev, bool enable);
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index e4c49f00d13..2fd01aecdf9 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -24,8 +24,6 @@
#include "adis16201.h"
-#define DRIVER_NAME "adis16201"
-
enum adis16201_chan {
in_supply,
temp,
@@ -42,13 +40,12 @@ enum adis16201_chan {
* @reg_address: the address of the register to be written
* @val: the value to write
**/
-static int adis16201_spi_write_reg_8(struct device *dev,
+static int adis16201_spi_write_reg_8(struct iio_dev *indio_dev,
u8 reg_address,
u8 val)
{
int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16201_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16201_WRITE_REG(reg_address);
@@ -73,7 +70,7 @@ static int adis16201_spi_write_reg_16(struct iio_dev *indio_dev,
{
int ret;
struct spi_message msg;
- struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16201_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
{
.tx_buf = st->tx,
@@ -114,7 +111,7 @@ static int adis16201_spi_read_reg_16(struct iio_dev *indio_dev,
u16 *val)
{
struct spi_message msg;
- struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16201_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -151,14 +148,16 @@ error_ret:
return ret;
}
-static int adis16201_reset(struct device *dev)
+static int adis16201_reset(struct iio_dev *indio_dev)
{
int ret;
- ret = adis16201_spi_write_reg_8(dev,
+ struct adis16201_state *st = iio_priv(indio_dev);
+
+ ret = adis16201_spi_write_reg_8(indio_dev,
ADIS16201_GLOB_CMD,
ADIS16201_GLOB_CMD_SW_RESET);
if (ret)
- dev_err(dev, "problem resetting device");
+ dev_err(&st->us->dev, "problem resetting device");
return ret;
}
@@ -167,15 +166,15 @@ static ssize_t adis16201_write_reset(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
+ int ret;
+ bool res;
+
if (len < 1)
return -EINVAL;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return adis16201_reset(dev);
- }
- return -EINVAL;
+ ret = strtobool(buf, &res);
+ if (ret || !res)
+ return ret;
+ return adis16201_reset(dev_get_drvdata(dev));
}
int adis16201_set_irq(struct iio_dev *indio_dev, bool enable)
@@ -245,41 +244,38 @@ err_ret:
return ret;
}
-static int adis16201_initial_setup(struct adis16201_state *st)
+static int adis16201_initial_setup(struct iio_dev *indio_dev)
{
int ret;
- struct device *dev = &st->indio_dev->dev;
+ struct device *dev = &indio_dev->dev;
/* Disable IRQ */
- ret = adis16201_set_irq(st->indio_dev, false);
+ ret = adis16201_set_irq(indio_dev, false);
if (ret) {
dev_err(dev, "disable irq failed");
goto err_ret;
}
/* Do self test */
- ret = adis16201_self_test(st->indio_dev);
+ ret = adis16201_self_test(indio_dev);
if (ret) {
dev_err(dev, "self test failure");
goto err_ret;
}
/* Read status register to check the result */
- ret = adis16201_check_status(st->indio_dev);
+ ret = adis16201_check_status(indio_dev);
if (ret) {
- adis16201_reset(dev);
+ adis16201_reset(indio_dev);
dev_err(dev, "device not playing ball -> reset");
msleep(ADIS16201_STARTUP_DELAY);
- ret = adis16201_check_status(st->indio_dev);
+ ret = adis16201_check_status(indio_dev);
if (ret) {
dev_err(dev, "giving up");
goto err_ret;
}
}
- printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
- st->us->chip_select, st->us->irq);
-
err_ret:
return ret;
}
@@ -309,13 +305,17 @@ static int adis16201_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
addr = adis16201_addresses[chan->address][0];
ret = adis16201_spi_read_reg_16(indio_dev, addr, &val16);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
if (val16 & ADIS16201_ERROR_ACTIVE) {
ret = adis16201_check_status(indio_dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
@@ -467,53 +467,40 @@ static const struct iio_info adis16201_info = {
static int __devinit adis16201_probe(struct spi_device *spi)
{
int ret, regdone = 0;
- struct adis16201_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
+ struct adis16201_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
goto error_ret;
}
+ st = iio_priv(indio_dev);
/* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ spi_set_drvdata(spi, indio_dev);
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADIS16201_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADIS16201_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
- ret = -ENOMEM;
- goto error_free_rx;
- }
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
- st->indio_dev->name = spi->dev.driver->name;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &adis16201_info;
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16201_info;
- st->indio_dev->channels = adis16201_channels;
- st->indio_dev->num_channels = ARRAY_SIZE(adis16201_channels);
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = adis16201_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16201_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = adis16201_configure_ring(st->indio_dev);
+ ret = adis16201_configure_ring(indio_dev);
if (ret)
goto error_free_dev;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = iio_ring_buffer_register_ex(st->indio_dev->ring, 0,
+ ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
adis16201_channels,
ARRAY_SIZE(adis16201_channels));
if (ret) {
@@ -522,50 +509,40 @@ static int __devinit adis16201_probe(struct spi_device *spi)
}
if (spi->irq) {
- ret = adis16201_probe_trigger(st->indio_dev);
+ ret = adis16201_probe_trigger(indio_dev);
if (ret)
goto error_uninitialize_ring;
}
/* Get the device into a sane initial state */
- ret = adis16201_initial_setup(st);
+ ret = adis16201_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
return 0;
error_remove_trigger:
- adis16201_remove_trigger(st->indio_dev);
+ adis16201_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(st->indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
error_unreg_ring_funcs:
- adis16201_unconfigure_ring(st->indio_dev);
+ adis16201_unconfigure_ring(indio_dev);
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
static int adis16201_remove(struct spi_device *spi)
{
- struct adis16201_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
adis16201_remove_trigger(indio_dev);
iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
adis16201_unconfigure_ring(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
index c61f981255c..66e708ddf8b 100644
--- a/drivers/staging/iio/accel/adis16201_ring.c
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -23,7 +23,7 @@
static int adis16201_read_ring_data(struct iio_dev *indio_dev, u8 *rx)
{
struct spi_message msg;
- struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16201_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[ADIS16201_OUTPUTS + 1];
int ret;
int i;
@@ -63,7 +63,7 @@ static irqreturn_t adis16201_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->private_data;
- struct adis16201_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16201_state *st = iio_priv(indio_dev);
struct iio_ring_buffer *ring = indio_dev->ring;
int i = 0;
@@ -77,7 +77,7 @@ static irqreturn_t adis16201_trigger_handler(int irq, void *p)
}
if (ring->scan_count)
- if (adis16201_read_ring_data(st->indio_dev, st->rx) >= 0)
+ if (adis16201_read_ring_data(indio_dev, st->rx) >= 0)
for (; i < ring->scan_count; i++)
data[i] = be16_to_cpup(
(__be16 *)&(st->rx[i*2]));
@@ -88,7 +88,7 @@ static irqreturn_t adis16201_trigger_handler(int irq, void *p)
ring->access->store_to(ring, (u8 *)data, pf->timestamp);
- iio_trigger_notify_done(st->indio_dev->trig);
+ iio_trigger_notify_done(indio_dev->trig);
kfree(data);
return IRQ_HANDLED;
diff --git a/drivers/staging/iio/accel/adis16201_trigger.c b/drivers/staging/iio/accel/adis16201_trigger.c
index bea917e03b4..3a95c083b45 100644
--- a/drivers/staging/iio/accel/adis16201_trigger.c
+++ b/drivers/staging/iio/accel/adis16201_trigger.c
@@ -17,17 +17,16 @@
static int adis16201_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
- struct adis16201_state *st = trig->private_data;
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = trig->private_data;
dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
- return adis16201_set_irq(st->indio_dev, state);
+ return adis16201_set_irq(indio_dev, state);
}
int adis16201_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
- struct adis16201_state *st = indio_dev->dev_data;
+ struct adis16201_state *st = iio_priv(indio_dev);
st->trig = iio_allocate_trigger("adis16201-dev%d", indio_dev->id);
if (st->trig == NULL) {
@@ -43,7 +42,7 @@ int adis16201_probe_trigger(struct iio_dev *indio_dev)
goto error_free_trig;
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
- st->trig->private_data = st;
+ st->trig->private_data = indio_dev;
st->trig->set_trigger_state = &adis16201_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
@@ -64,7 +63,7 @@ error_ret:
void adis16201_remove_trigger(struct iio_dev *indio_dev)
{
- struct adis16201_state *state = indio_dev->dev_data;
+ struct adis16201_state *state = iio_priv(indio_dev);
iio_trigger_unregister(state->trig);
free_irq(state->us->irq, state->trig);
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
index 175e21bb9b4..4071bc0d69a 100644
--- a/drivers/staging/iio/accel/adis16203.h
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -59,19 +59,17 @@
/**
* struct adis16203_state - device instance specific data
* @us: actual spi_device
- * @indio_dev: industrial I/O device structure
* @trig: data ready trigger registered with iio
* @tx: transmit buffer
* @rx: receive buffer
* @buf_lock: mutex to protect tx and rx
**/
struct adis16203_state {
- struct spi_device *us;
- struct iio_dev *indio_dev;
- struct iio_trigger *trig;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
+ struct spi_device *us;
+ struct iio_trigger *trig;
+ struct mutex buf_lock;
+ u8 tx[ADIS16203_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADIS16203_MAX_RX];
};
int adis16203_set_irq(struct iio_dev *indio_dev, bool enable);
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index 36be4d5dc61..cf5d15da76a 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -36,7 +36,7 @@ static int adis16203_spi_write_reg_8(struct iio_dev *indio_dev,
u8 val)
{
int ret;
- struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16203_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16203_WRITE_REG(reg_address);
@@ -61,7 +61,7 @@ static int adis16203_spi_write_reg_16(struct iio_dev *indio_dev,
{
int ret;
struct spi_message msg;
- struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16203_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
{
.tx_buf = st->tx,
@@ -102,7 +102,7 @@ static int adis16203_spi_read_reg_16(struct iio_dev *indio_dev,
u16 *val)
{
struct spi_message msg;
- struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16203_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -311,13 +311,17 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
addr = adis16203_addresses[chan->address][0];
ret = adis16203_spi_read_reg_16(indio_dev, addr, &val16);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
if (val16 & ADIS16203_ERROR_ACTIVE) {
ret = adis16203_check_status(indio_dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
@@ -418,51 +422,38 @@ static const struct iio_info adis16203_info = {
static int __devinit adis16203_probe(struct spi_device *spi)
{
int ret, regdone = 0;
- struct adis16203_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
- goto error_ret;
- }
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ struct iio_dev *indio_dev;
+ struct adis16203_state *st;
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADIS16203_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADIS16203_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
- goto error_free_rx;
+ goto error_ret;
}
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
- st->indio_dev->name = spi->dev.driver->name;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->channels = adis16203_channels;
- st->indio_dev->num_channels = ARRAY_SIZE(adis16203_channels);
- st->indio_dev->info = &adis16203_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis16203_configure_ring(st->indio_dev);
+
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->channels = adis16203_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16203_channels);
+ indio_dev->info = &adis16203_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = adis16203_configure_ring(indio_dev);
if (ret)
goto error_free_dev;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = iio_ring_buffer_register_ex(st->indio_dev->ring, 0,
+ ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
adis16203_channels,
ARRAY_SIZE(adis16203_channels));
if (ret) {
@@ -471,50 +462,40 @@ static int __devinit adis16203_probe(struct spi_device *spi)
}
if (spi->irq) {
- ret = adis16203_probe_trigger(st->indio_dev);
+ ret = adis16203_probe_trigger(indio_dev);
if (ret)
goto error_uninitialize_ring;
}
/* Get the device into a sane initial state */
- ret = adis16203_initial_setup(st->indio_dev);
+ ret = adis16203_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
return 0;
error_remove_trigger:
- adis16203_remove_trigger(st->indio_dev);
+ adis16203_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(st->indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
error_unreg_ring_funcs:
- adis16203_unconfigure_ring(st->indio_dev);
+ adis16203_unconfigure_ring(indio_dev);
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
static int adis16203_remove(struct spi_device *spi)
{
- struct adis16203_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
adis16203_remove_trigger(indio_dev);
iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
adis16203_unconfigure_ring(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
index a9a789d79c0..d2c07c52746 100644
--- a/drivers/staging/iio/accel/adis16203_ring.c
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -26,7 +26,7 @@ static int adis16203_read_ring_data(struct device *dev, u8 *rx)
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16203_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[ADIS16203_OUTPUTS + 1];
int ret;
int i;
@@ -68,7 +68,7 @@ static irqreturn_t adis16203_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->private_data;
- struct adis16203_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16203_state *st = iio_priv(indio_dev);
struct iio_ring_buffer *ring = indio_dev->ring;
int i = 0;
@@ -82,7 +82,7 @@ static irqreturn_t adis16203_trigger_handler(int irq, void *p)
}
if (ring->scan_count)
- if (adis16203_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+ if (adis16203_read_ring_data(&indio_dev->dev, st->rx) >= 0)
for (; i < ring->scan_count; i++)
data[i] = be16_to_cpup(
(__be16 *)&(st->rx[i*2]));
@@ -95,7 +95,7 @@ static irqreturn_t adis16203_trigger_handler(int irq, void *p)
(u8 *)data,
pf->timestamp);
- iio_trigger_notify_done(st->indio_dev->trig);
+ iio_trigger_notify_done(indio_dev->trig);
kfree(data);
return IRQ_HANDLED;
diff --git a/drivers/staging/iio/accel/adis16203_trigger.c b/drivers/staging/iio/accel/adis16203_trigger.c
index ca5db173198..3caf3e8bc9d 100644
--- a/drivers/staging/iio/accel/adis16203_trigger.c
+++ b/drivers/staging/iio/accel/adis16203_trigger.c
@@ -18,17 +18,16 @@
static int adis16203_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
- struct adis16203_state *st = trig->private_data;
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = trig->private_data;
dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
- return adis16203_set_irq(st->indio_dev, state);
+ return adis16203_set_irq(indio_dev, state);
}
int adis16203_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
- struct adis16203_state *st = indio_dev->dev_data;
+ struct adis16203_state *st = iio_priv(indio_dev);
st->trig = iio_allocate_trigger("adis16203-dev%d", indio_dev->id);
if (st->trig == NULL) {
@@ -46,7 +45,7 @@ int adis16203_probe_trigger(struct iio_dev *indio_dev)
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
- st->trig->private_data = st;
+ st->trig->private_data = indio_dev;
st->trig->set_trigger_state = &adis16203_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
@@ -67,9 +66,9 @@ error_ret:
void adis16203_remove_trigger(struct iio_dev *indio_dev)
{
- struct adis16203_state *state = indio_dev->dev_data;
+ struct adis16203_state *st = iio_priv(indio_dev);
- iio_trigger_unregister(state->trig);
- free_irq(state->us->irq, state->trig);
- iio_free_trigger(state->trig);
+ iio_trigger_unregister(st->trig);
+ free_irq(st->us->irq, st->trig);
+ iio_free_trigger(st->trig);
}
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
index 5310a429768..3bb0490465f 100644
--- a/drivers/staging/iio/accel/adis16204.h
+++ b/drivers/staging/iio/accel/adis16204.h
@@ -67,24 +67,21 @@
/**
* struct adis16204_state - device instance specific data
* @us: actual spi_device
- * @indio_dev: industrial I/O device structure
* @trig: data ready trigger registered with iio
* @tx: transmit buffer
* @rx: receive buffer
* @buf_lock: mutex to protect tx and rx
**/
struct adis16204_state {
- struct spi_device *us;
- struct iio_dev *indio_dev;
- struct iio_trigger *trig;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
+ struct spi_device *us;
+ struct iio_trigger *trig;
+ struct mutex buf_lock;
+ u8 tx[ADIS16204_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADIS16204_MAX_RX];
};
int adis16204_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
enum adis16204_scan {
ADIS16204_SCAN_SUPPLY,
ADIS16204_SCAN_ACC_X,
@@ -93,6 +90,7 @@ enum adis16204_scan {
ADIS16204_SCAN_TEMP,
};
+#ifdef CONFIG_IIO_RING_BUFFER
void adis16204_remove_trigger(struct iio_dev *indio_dev);
int adis16204_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index 16806704bf4..3e2b62654b7 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -39,7 +39,7 @@ static int adis16204_spi_write_reg_8(struct iio_dev *indio_dev,
u8 val)
{
int ret;
- struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16204_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16204_WRITE_REG(reg_address);
@@ -64,7 +64,7 @@ static int adis16204_spi_write_reg_16(struct iio_dev *indio_dev,
{
int ret;
struct spi_message msg;
- struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16204_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
{
.tx_buf = st->tx,
@@ -106,7 +106,7 @@ static int adis16204_spi_read_reg_16(struct iio_dev *indio_dev,
u16 *val)
{
struct spi_message msg;
- struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16204_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -341,13 +341,17 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
addr = adis16204_addresses[chan->address][0];
ret = adis16204_spi_read_reg_16(indio_dev, addr, &val16);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
if (val16 & ADIS16204_ERROR_ACTIVE) {
ret = adis16204_check_status(indio_dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
@@ -483,52 +487,38 @@ static const struct iio_info adis16204_info = {
static int __devinit adis16204_probe(struct spi_device *spi)
{
int ret, regdone = 0;
- struct adis16204_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
- goto error_ret;
- }
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ struct adis16204_state *st;
+ struct iio_dev *indio_dev;
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADIS16204_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADIS16204_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
- goto error_free_rx;
+ goto error_ret;
}
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
- st->indio_dev->name = spi->dev.driver->name;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &adis16204_info;
- st->indio_dev->channels = adis16204_channels;
- st->indio_dev->num_channels = ARRAY_SIZE(adis16204_channels);
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16204_info;
+ indio_dev->channels = adis16204_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16204_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = adis16204_configure_ring(st->indio_dev);
+ ret = adis16204_configure_ring(indio_dev);
if (ret)
goto error_free_dev;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = iio_ring_buffer_register_ex(st->indio_dev->ring, 0,
+ ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
adis16204_channels,
ARRAY_SIZE(adis16204_channels));
if (ret) {
@@ -537,50 +527,40 @@ static int __devinit adis16204_probe(struct spi_device *spi)
}
if (spi->irq) {
- ret = adis16204_probe_trigger(st->indio_dev);
+ ret = adis16204_probe_trigger(indio_dev);
if (ret)
goto error_uninitialize_ring;
}
/* Get the device into a sane initial state */
- ret = adis16204_initial_setup(st->indio_dev);
+ ret = adis16204_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
return 0;
error_remove_trigger:
- adis16204_remove_trigger(st->indio_dev);
+ adis16204_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(st->indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
error_unreg_ring_funcs:
- adis16204_unconfigure_ring(st->indio_dev);
+ adis16204_unconfigure_ring(indio_dev);
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
static int adis16204_remove(struct spi_device *spi)
{
- struct adis16204_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
adis16204_remove_trigger(indio_dev);
- iio_ring_buffer_unregister(st->indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
adis16204_unconfigure_ring(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
index a2d36fb822e..852df06684d 100644
--- a/drivers/staging/iio/accel/adis16204_ring.c
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -26,7 +26,7 @@ static int adis16204_read_ring_data(struct device *dev, u8 *rx)
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16204_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[ADIS16204_OUTPUTS + 1];
int ret;
int i;
@@ -66,7 +66,7 @@ static irqreturn_t adis16204_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->private_data;
- struct adis16204_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16204_state *st = iio_priv(indio_dev);
struct iio_ring_buffer *ring = indio_dev->ring;
int i = 0;
s16 *data;
@@ -79,7 +79,7 @@ static irqreturn_t adis16204_trigger_handler(int irq, void *p)
}
if (ring->scan_count)
- if (adis16204_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+ if (adis16204_read_ring_data(&indio_dev->dev, st->rx) >= 0)
for (; i < ring->scan_count; i++)
data[i] = be16_to_cpup(
(__be16 *)&(st->rx[i*2]));
@@ -90,7 +90,7 @@ static irqreturn_t adis16204_trigger_handler(int irq, void *p)
ring->access->store_to(ring, (u8 *)data, pf->timestamp);
- iio_trigger_notify_done(st->indio_dev->trig);
+ iio_trigger_notify_done(indio_dev->trig);
kfree(data);
return IRQ_HANDLED;
diff --git a/drivers/staging/iio/accel/adis16204_trigger.c b/drivers/staging/iio/accel/adis16204_trigger.c
index 5e1f9ae9d5c..01f73b9b888 100644
--- a/drivers/staging/iio/accel/adis16204_trigger.c
+++ b/drivers/staging/iio/accel/adis16204_trigger.c
@@ -18,17 +18,16 @@
static int adis16204_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
- struct adis16204_state *st = trig->private_data;
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = trig->private_data;
dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
- return adis16204_set_irq(st->indio_dev, state);
+ return adis16204_set_irq(indio_dev, state);
}
int adis16204_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
- struct adis16204_state *st = indio_dev->dev_data;
+ struct adis16204_state *st = iio_priv(indio_dev);
st->trig = iio_allocate_trigger("adis16204-dev%d", indio_dev->id);
if (st->trig == NULL) {
@@ -46,7 +45,7 @@ int adis16204_probe_trigger(struct iio_dev *indio_dev)
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
- st->trig->private_data = st;
+ st->trig->private_data = indio_dev;
st->trig->set_trigger_state = &adis16204_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
@@ -67,7 +66,7 @@ error_ret:
void adis16204_remove_trigger(struct iio_dev *indio_dev)
{
- struct adis16204_state *state = indio_dev->dev_data;
+ struct adis16204_state *state = iio_priv(indio_dev);
iio_trigger_unregister(state->trig);
free_irq(state->us->irq, state->trig);
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
index 58d08db6f9b..c8b7b00d417 100644
--- a/drivers/staging/iio/accel/adis16209.h
+++ b/drivers/staging/iio/accel/adis16209.h
@@ -104,25 +104,21 @@
/**
* struct adis16209_state - device instance specific data
* @us: actual spi_device
- * @indio_dev: industrial I/O device structure
* @trig: data ready trigger registered with iio
* @tx: transmit buffer
* @rx: receive buffer
* @buf_lock: mutex to protect tx and rx
**/
struct adis16209_state {
- struct spi_device *us;
- struct iio_dev *indio_dev;
- struct iio_trigger *trig;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
+ struct spi_device *us;
+ struct iio_trigger *trig;
+ struct mutex buf_lock;
+ u8 tx[ADIS16209_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADIS16209_MAX_RX];
};
int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
-
#define ADIS16209_SCAN_SUPPLY 0
#define ADIS16209_SCAN_ACC_X 1
#define ADIS16209_SCAN_ACC_Y 2
@@ -132,6 +128,8 @@ int adis16209_set_irq(struct iio_dev *indio_dev, bool enable);
#define ADIS16209_SCAN_INCLI_Y 6
#define ADIS16209_SCAN_ROT 7
+#ifdef CONFIG_IIO_RING_BUFFER
+
void adis16209_remove_trigger(struct iio_dev *indio_dev);
int adis16209_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index c423cc96025..bec1fa8de9b 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -37,7 +37,7 @@ static int adis16209_spi_write_reg_8(struct iio_dev *indio_dev,
u8 val)
{
int ret;
- struct adis16209_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16209_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16209_WRITE_REG(reg_address);
@@ -62,7 +62,7 @@ static int adis16209_spi_write_reg_16(struct iio_dev *indio_dev,
{
int ret;
struct spi_message msg;
- struct adis16209_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16209_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
{
.tx_buf = st->tx,
@@ -105,7 +105,7 @@ static int adis16209_spi_read_reg_16(struct iio_dev *indio_dev,
u16 *val)
{
struct spi_message msg;
- struct adis16209_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16209_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -337,13 +337,17 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
addr = adis16209_addresses[chan->address][0];
ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
if (val16 & ADIS16209_ERROR_ACTIVE) {
ret = adis16209_check_status(indio_dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
@@ -466,52 +470,38 @@ static const struct iio_info adis16209_info = {
static int __devinit adis16209_probe(struct spi_device *spi)
{
int ret, regdone = 0;
- struct adis16209_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
- goto error_ret;
- }
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ struct adis16209_state *st;
+ struct iio_dev *indio_dev;
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADIS16209_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADIS16209_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
- goto error_free_rx;
+ goto error_ret;
}
+ st = iio_priv(indio_dev);
+ /* this is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
- st->indio_dev->name = spi->dev.driver->name;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &adis16209_info;
- st->indio_dev->channels = adis16209_channels;
- st->indio_dev->num_channels = ARRAY_SIZE(adis16209_channels);
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16209_info;
+ indio_dev->channels = adis16209_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16209_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = adis16209_configure_ring(st->indio_dev);
+ ret = adis16209_configure_ring(indio_dev);
if (ret)
goto error_free_dev;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = iio_ring_buffer_register_ex(st->indio_dev->ring, 0,
+ ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
adis16209_channels,
ARRAY_SIZE(adis16209_channels));
if (ret) {
@@ -520,42 +510,35 @@ static int __devinit adis16209_probe(struct spi_device *spi)
}
if (spi->irq) {
- ret = adis16209_probe_trigger(st->indio_dev);
+ ret = adis16209_probe_trigger(indio_dev);
if (ret)
goto error_uninitialize_ring;
}
/* Get the device into a sane initial state */
- ret = adis16209_initial_setup(st->indio_dev);
+ ret = adis16209_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
return 0;
error_remove_trigger:
- adis16209_remove_trigger(st->indio_dev);
+ adis16209_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(st->indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
error_unreg_ring_funcs:
- adis16209_unconfigure_ring(st->indio_dev);
+ adis16209_unconfigure_ring(indio_dev);
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
static int adis16209_remove(struct spi_device *spi)
{
- struct adis16209_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
flush_scheduled_work();
@@ -563,9 +546,6 @@ static int adis16209_remove(struct spi_device *spi)
iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
adis16209_unconfigure_ring(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index 390908b3f02..45017d3f02f 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -26,7 +26,7 @@ static int adis16209_read_ring_data(struct device *dev, u8 *rx)
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16209_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16209_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[ADIS16209_OUTPUTS + 1];
int ret;
int i;
@@ -66,7 +66,7 @@ static irqreturn_t adis16209_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->private_data;
- struct adis16209_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16209_state *st = iio_priv(indio_dev);
struct iio_ring_buffer *ring = indio_dev->ring;
int i = 0;
@@ -80,7 +80,7 @@ static irqreturn_t adis16209_trigger_handler(int irq, void *p)
}
if (ring->scan_count &&
- adis16209_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+ adis16209_read_ring_data(&indio_dev->dev, st->rx) >= 0)
for (; i < ring->scan_count; i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
@@ -90,7 +90,7 @@ static irqreturn_t adis16209_trigger_handler(int irq, void *p)
ring->access->store_to(ring, (u8 *)data, pf->timestamp);
- iio_trigger_notify_done(st->indio_dev->trig);
+ iio_trigger_notify_done(indio_dev->trig);
kfree(data);
return IRQ_HANDLED;
diff --git a/drivers/staging/iio/accel/adis16209_trigger.c b/drivers/staging/iio/accel/adis16209_trigger.c
index 211ee704569..6df7b47ec7b 100644
--- a/drivers/staging/iio/accel/adis16209_trigger.c
+++ b/drivers/staging/iio/accel/adis16209_trigger.c
@@ -27,17 +27,16 @@ static irqreturn_t adis16209_data_rdy_trig_poll(int irq, void *trig)
static int adis16209_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
- struct adis16209_state *st = trig->private_data;
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = trig->private_data;
dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
- return adis16209_set_irq(st->indio_dev, state);
+ return adis16209_set_irq(indio_dev, state);
}
int adis16209_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
- struct adis16209_state *st = indio_dev->dev_data;
+ struct adis16209_state *st = iio_priv(indio_dev);
st->trig = iio_allocate_trigger("adis16209-dev%d", indio_dev->id);
if (st->trig == NULL) {
@@ -54,7 +53,7 @@ int adis16209_probe_trigger(struct iio_dev *indio_dev)
goto error_free_trig;
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
- st->trig->private_data = st;
+ st->trig->private_data = indio_dev;
st->trig->set_trigger_state = &adis16209_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
@@ -75,9 +74,9 @@ error_ret:
void adis16209_remove_trigger(struct iio_dev *indio_dev)
{
- struct adis16209_state *state = indio_dev->dev_data;
+ struct adis16209_state *st = iio_priv(indio_dev);
- iio_trigger_unregister(state->trig);
- free_irq(state->us->irq, state->trig);
- iio_free_trigger(state->trig);
+ iio_trigger_unregister(st->trig);
+ free_irq(st->us->irq, st->trig);
+ iio_free_trigger(st->trig);
}
diff --git a/drivers/staging/iio/accel/adis16220.h b/drivers/staging/iio/accel/adis16220.h
index 4d5758c2c04..024313cf5cf 100644
--- a/drivers/staging/iio/accel/adis16220.h
+++ b/drivers/staging/iio/accel/adis16220.h
@@ -126,21 +126,15 @@
/**
* struct adis16220_state - device instance specific data
* @us: actual spi_device
- * @work_trigger_to_ring: bh for triggered event handling
- * @inter: used to check if new interrupt has been triggered
- * @last_timestamp: passing timestamp from th to bh of interrupt handler
- * @indio_dev: industrial I/O device structure
- * @trig: data ready trigger registered with iio
* @tx: transmit buffer
* @rx: receive buffer
* @buf_lock: mutex to protect tx and rx
**/
struct adis16220_state {
- struct spi_device *us;
- struct iio_dev *indio_dev;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
+ struct spi_device *us;
+ struct mutex buf_lock;
+ u8 tx[ADIS16220_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADIS16220_MAX_RX];
};
#endif /* SPI_ADIS16220_H_ */
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index 605a75ea399..bf9ba07c038 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -39,7 +39,7 @@ static int adis16220_spi_write_reg_8(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16220_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16220_WRITE_REG(reg_address);
@@ -65,7 +65,7 @@ static int adis16220_spi_write_reg_16(struct device *dev,
int ret;
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16220_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
{
.tx_buf = st->tx,
@@ -110,7 +110,7 @@ static int adis16220_spi_read_reg_16(struct device *dev,
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16220_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -316,10 +316,10 @@ err_ret:
return ret;
}
-static int adis16220_initial_setup(struct adis16220_state *st)
+static int adis16220_initial_setup(struct iio_dev *indio_dev)
{
int ret;
- struct device *dev = &st->indio_dev->dev;
+ struct device *dev = &indio_dev->dev;
/* Do self test */
ret = adis16220_self_test(dev);
@@ -341,19 +341,17 @@ static int adis16220_initial_setup(struct adis16220_state *st)
}
}
- printk(KERN_INFO DRIVER_NAME ": at CS%d (irq %d)\n",
- st->us->chip_select, st->us->irq);
-
err_ret:
return ret;
}
-static ssize_t adis16220_capture_buffer_read(struct adis16220_state *st,
+static ssize_t adis16220_capture_buffer_read(struct iio_dev *indio_dev,
char *buf,
loff_t off,
size_t count,
int addr)
{
+ struct adis16220_state *st = iio_priv(indio_dev);
struct spi_message msg;
struct spi_transfer xfers[] = {
{
@@ -383,7 +381,7 @@ static ssize_t adis16220_capture_buffer_read(struct adis16220_state *st,
count = ADIS16220_CAPTURE_SIZE - off;
/* write the begin position of capture buffer */
- ret = adis16220_spi_write_reg_16(&st->indio_dev->dev,
+ ret = adis16220_spi_write_reg_16(&indio_dev->dev,
ADIS16220_CAPT_PNTR,
off > 1);
if (ret)
@@ -422,9 +420,8 @@ static ssize_t adis16220_accel_bin_read(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
- return adis16220_capture_buffer_read(st, buf,
+ return adis16220_capture_buffer_read(indio_dev, buf,
off, count,
ADIS16220_CAPT_BUFA);
}
@@ -445,9 +442,8 @@ static ssize_t adis16220_adc1_bin_read(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
- return adis16220_capture_buffer_read(st, buf,
+ return adis16220_capture_buffer_read(indio_dev, buf,
off, count,
ADIS16220_CAPT_BUF1);
}
@@ -468,9 +464,8 @@ static ssize_t adis16220_adc2_bin_read(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16220_state *st = iio_dev_get_devdata(indio_dev);
- return adis16220_capture_buffer_read(st, buf,
+ return adis16220_capture_buffer_read(indio_dev, buf,
off, count,
ADIS16220_CAPT_BUF2);
}
@@ -551,98 +546,76 @@ static const struct iio_info adis16220_info = {
static int __devinit adis16220_probe(struct spi_device *spi)
{
int ret, regdone = 0;
- struct adis16220_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
+ struct adis16220_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
goto error_ret;
}
+
+ st = iio_priv(indio_dev);
/* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ spi_set_drvdata(spi, indio_dev);
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADIS16220_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADIS16220_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
- ret = -ENOMEM;
- goto error_free_rx;
- }
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
- st->indio_dev->name = spi->dev.driver->name;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &adis16220_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16220_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
regdone = 1;
- ret = sysfs_create_bin_file(&st->indio_dev->dev.kobj, &accel_bin);
+ ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin);
if (ret)
goto error_free_dev;
- ret = sysfs_create_bin_file(&st->indio_dev->dev.kobj, &adc1_bin);
+ ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc1_bin);
if (ret)
goto error_rm_accel_bin;
- ret = sysfs_create_bin_file(&st->indio_dev->dev.kobj, &adc2_bin);
+ ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc2_bin);
if (ret)
goto error_rm_adc1_bin;
/* Get the device into a sane initial state */
- ret = adis16220_initial_setup(st);
+ ret = adis16220_initial_setup(indio_dev);
if (ret)
goto error_rm_adc2_bin;
return 0;
error_rm_adc2_bin:
- sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &adc2_bin);
+ sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
error_rm_adc1_bin:
- sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &adc1_bin);
+ sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
error_rm_accel_bin:
- sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &accel_bin);
+ sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
static int adis16220_remove(struct spi_device *spi)
{
- struct adis16220_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
flush_scheduled_work();
- sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &adc2_bin);
- sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &adc1_bin);
- sysfs_remove_bin_file(&st->indio_dev->dev.kobj, &accel_bin);
+ sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
+ sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
+ sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
iio_device_unregister(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16240.h b/drivers/staging/iio/accel/adis16240.h
index 162b1f468a1..f1dd047aa5e 100644
--- a/drivers/staging/iio/accel/adis16240.h
+++ b/drivers/staging/iio/accel/adis16240.h
@@ -126,24 +126,21 @@
/**
* struct adis16240_state - device instance specific data
* @us: actual spi_device
- * @indio_dev: industrial I/O device structure
* @trig: data ready trigger registered with iio
* @tx: transmit buffer
* @rx: receive buffer
* @buf_lock: mutex to protect tx and rx
**/
struct adis16240_state {
- struct spi_device *us;
- struct iio_dev *indio_dev;
- struct iio_trigger *trig;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
+ struct spi_device *us;
+ struct iio_trigger *trig;
+ struct mutex buf_lock;
+ u8 tx[ADIS16240_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADIS16240_MAX_RX];
};
int adis16240_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
/* At the moment triggers are only used for ring buffer
* filling. This may change!
*/
@@ -155,6 +152,7 @@ int adis16240_set_irq(struct iio_dev *indio_dev, bool enable);
#define ADIS16240_SCAN_AUX_ADC 4
#define ADIS16240_SCAN_TEMP 5
+#ifdef CONFIG_IIO_RING_BUFFER
void adis16240_remove_trigger(struct iio_dev *indio_dev);
int adis16240_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index ac6038557b0..aee8b69173c 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -41,7 +41,7 @@ static int adis16240_spi_write_reg_8(struct iio_dev *indio_dev,
u8 val)
{
int ret;
- struct adis16240_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16240_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16240_WRITE_REG(reg_address);
@@ -66,7 +66,7 @@ static int adis16240_spi_write_reg_16(struct iio_dev *indio_dev,
{
int ret;
struct spi_message msg;
- struct adis16240_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16240_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
{
.tx_buf = st->tx,
@@ -109,7 +109,7 @@ static int adis16240_spi_read_reg_16(struct iio_dev *indio_dev,
u16 *val)
{
struct spi_message msg;
- struct adis16240_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16240_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -370,13 +370,17 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
addr = adis16240_addresses[chan->address][0];
ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
if (val16 & ADIS16240_ERROR_ACTIVE) {
ret = adis16240_check_status(indio_dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
@@ -517,52 +521,39 @@ static const struct iio_info adis16240_info = {
static int __devinit adis16240_probe(struct spi_device *spi)
{
int ret, regdone = 0;
- struct adis16240_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
+ struct adis16240_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
goto error_ret;
}
+ st = iio_priv(indio_dev);
/* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ spi_set_drvdata(spi, indio_dev);
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADIS16240_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADIS16240_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
- ret = -ENOMEM;
- goto error_free_rx;
- }
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
- st->indio_dev->name = spi->dev.driver->name;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &adis16240_info;
- st->indio_dev->channels = adis16240_channels;
- st->indio_dev->num_channels = ARRAY_SIZE(adis16240_channels);
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16240_info;
+ indio_dev->channels = adis16240_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16240_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = adis16240_configure_ring(st->indio_dev);
+ ret = adis16240_configure_ring(indio_dev);
if (ret)
goto error_free_dev;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = iio_ring_buffer_register_ex(st->indio_dev->ring, 0,
+ ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
adis16240_channels,
ARRAY_SIZE(adis16240_channels));
if (ret) {
@@ -571,42 +562,36 @@ static int __devinit adis16240_probe(struct spi_device *spi)
}
if (spi->irq) {
- ret = adis16240_probe_trigger(st->indio_dev);
+ ret = adis16240_probe_trigger(indio_dev);
if (ret)
goto error_uninitialize_ring;
}
/* Get the device into a sane initial state */
- ret = adis16240_initial_setup(st->indio_dev);
+ ret = adis16240_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
return 0;
error_remove_trigger:
- adis16240_remove_trigger(st->indio_dev);
+ adis16240_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(st->indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
error_unreg_ring_funcs:
- adis16240_unconfigure_ring(st->indio_dev);
+ adis16240_unconfigure_ring(indio_dev);
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
static int adis16240_remove(struct spi_device *spi)
{
- struct adis16240_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
flush_scheduled_work();
@@ -614,9 +599,6 @@ static int adis16240_remove(struct spi_device *spi)
iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
adis16240_unconfigure_ring(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
index 0c6d781d94c..c812a34daca 100644
--- a/drivers/staging/iio/accel/adis16240_ring.c
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -26,7 +26,7 @@ static int adis16240_read_ring_data(struct device *dev, u8 *rx)
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16240_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16240_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[ADIS16240_OUTPUTS + 1];
int ret;
int i;
@@ -63,7 +63,7 @@ static irqreturn_t adis16240_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->private_data;
- struct adis16240_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16240_state *st = iio_priv(indio_dev);
struct iio_ring_buffer *ring = indio_dev->ring;
int i = 0;
@@ -77,7 +77,7 @@ static irqreturn_t adis16240_trigger_handler(int irq, void *p)
}
if (ring->scan_count &&
- adis16240_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+ adis16240_read_ring_data(&indio_dev->dev, st->rx) >= 0)
for (; i < ring->scan_count; i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
@@ -87,7 +87,7 @@ static irqreturn_t adis16240_trigger_handler(int irq, void *p)
ring->access->store_to(ring, (u8 *)data, pf->timestamp);
- iio_trigger_notify_done(st->indio_dev->trig);
+ iio_trigger_notify_done(indio_dev->trig);
kfree(data);
return IRQ_HANDLED;
diff --git a/drivers/staging/iio/accel/adis16240_trigger.c b/drivers/staging/iio/accel/adis16240_trigger.c
index ece3ca8fb7e..17135fc33c9 100644
--- a/drivers/staging/iio/accel/adis16240_trigger.c
+++ b/drivers/staging/iio/accel/adis16240_trigger.c
@@ -27,17 +27,16 @@ static irqreturn_t adis16240_data_rdy_trig_poll(int irq, void *trig)
static int adis16240_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
- struct adis16240_state *st = trig->private_data;
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = trig->private_data;
dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
- return adis16240_set_irq(st->indio_dev, state);
+ return adis16240_set_irq(indio_dev, state);
}
int adis16240_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
- struct adis16240_state *st = indio_dev->dev_data;
+ struct adis16240_state *st = iio_priv(indio_dev);
st->trig = iio_allocate_trigger("adis16240-dev%d", indio_dev->id);
if (st->trig == NULL) {
@@ -55,7 +54,7 @@ int adis16240_probe_trigger(struct iio_dev *indio_dev)
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
- st->trig->private_data = st;
+ st->trig->private_data = indio_dev;
st->trig->set_trigger_state = &adis16240_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
@@ -76,9 +75,9 @@ error_ret:
void adis16240_remove_trigger(struct iio_dev *indio_dev)
{
- struct adis16240_state *state = indio_dev->dev_data;
+ struct adis16240_state *st = iio_priv(indio_dev);
- iio_trigger_unregister(state->trig);
- free_irq(state->us->irq, state->trig);
- iio_free_trigger(state->trig);
+ iio_trigger_unregister(st->trig);
+ free_irq(st->us->irq, st->trig);
+ iio_free_trigger(st->trig);
}
diff --git a/drivers/staging/iio/accel/kxsd9.c b/drivers/staging/iio/accel/kxsd9.c
index 973156e7577..c8a358a5df8 100644
--- a/drivers/staging/iio/accel/kxsd9.c
+++ b/drivers/staging/iio/accel/kxsd9.c
@@ -56,17 +56,15 @@
/**
* struct kxsd9_state - device related storage
* @buf_lock: protect the rx and tx buffers.
- * @indio_dev: associated industrial IO device
* @us: spi device
* @rx: single rx buffer storage
* @tx: single tx buffer storage
**/
struct kxsd9_state {
struct mutex buf_lock;
- struct iio_dev *indio_dev;
struct spi_device *us;
- u8 *rx;
- u8 *tx;
+ u8 rx[KXSD9_STATE_RX_SIZE] ____cacheline_aligned;
+ u8 tx[KXSD9_STATE_TX_SIZE];
};
/* This may want to move to mili g to allow for non integer ranges */
@@ -77,7 +75,7 @@ static ssize_t kxsd9_read_scale(struct device *dev,
int ret;
ssize_t len = 0;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct kxsd9_state *st = indio_dev->dev_data;
+ struct kxsd9_state *st = iio_priv(indio_dev);
struct spi_transfer xfer = {
.bits_per_word = 8,
.len = 2,
@@ -125,7 +123,7 @@ static ssize_t kxsd9_write_scale(struct device *dev,
struct spi_message msg;
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct kxsd9_state *st = indio_dev->dev_data;
+ struct kxsd9_state *st = iio_priv(indio_dev);
u8 val;
struct spi_transfer xfers[] = {
{
@@ -190,7 +188,7 @@ static ssize_t kxsd9_read_accel(struct device *dev,
u16 val;
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct kxsd9_state *st = indio_dev->dev_data;
+ struct kxsd9_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
{
.bits_per_word = 8,
@@ -253,52 +251,32 @@ static const struct attribute_group kxsd9_attribute_group = {
.attrs = kxsd9_attributes,
};
-static int __devinit kxsd9_power_up(struct spi_device *spi)
+static int __devinit kxsd9_power_up(struct kxsd9_state *st)
{
- int ret;
struct spi_transfer xfers[2] = {
{
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
+ .tx_buf = st->tx,
}, {
.bits_per_word = 8,
.len = 2,
.cs_change = 1,
+ .tx_buf = st->tx + 2,
},
};
struct spi_message msg;
- u8 *tx2;
- u8 *tx = kmalloc(2, GFP_KERNEL);
-
- if (tx == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- tx2 = kmalloc(2, GFP_KERNEL);
- if (tx2 == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
- tx[0] = 0x0d;
- tx[1] = 0x40;
+ st->tx[0] = 0x0d;
+ st->tx[1] = 0x40;
+ st->tx[2] = 0x0c;
+ st->tx[3] = 0x9b;
- tx2[0] = 0x0c;
- tx2[1] = 0x9b;
-
- xfers[0].tx_buf = tx;
- xfers[1].tx_buf = tx2;
spi_message_init(&msg);
spi_message_add_tail(&xfers[0], &msg);
spi_message_add_tail(&xfers[1], &msg);
- ret = spi_sync(spi, &msg);
-
- kfree(tx2);
-error_free_tx:
- kfree(tx);
-error_ret:
- return ret;
+ return spi_sync(st->us, &msg);
};
static const struct iio_info kxsd9_info = {
@@ -308,72 +286,44 @@ static const struct iio_info kxsd9_info = {
static int __devinit kxsd9_probe(struct spi_device *spi)
{
-
+ struct iio_dev *indio_dev;
struct kxsd9_state *st;
int ret = 0;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- spi_set_drvdata(spi, st);
-
- st->rx = kmalloc(sizeof(*st->rx)*KXSD9_STATE_RX_SIZE,
- GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kmalloc(sizeof(*st->tx)*KXSD9_STATE_TX_SIZE,
- GFP_KERNEL);
- if (st->tx == NULL) {
- ret = -ENOMEM;
- goto error_free_rx;
- }
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &kxsd9_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &kxsd9_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
spi->mode = SPI_MODE_0;
spi_setup(spi);
- kxsd9_power_up(spi);
+ kxsd9_power_up(st);
return 0;
error_free_dev:
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
static int __devexit kxsd9_remove(struct spi_device *spi)
{
- struct kxsd9_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index ba5bc679204..a29dfd27d44 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -676,7 +676,7 @@ static int __devinit lis3l02dq_probe(struct spi_device *spi)
}
st = iio_priv(indio_dev);
/* this is only used tor removal purposes */
- spi_set_drvdata(spi, st);
+ spi_set_drvdata(spi, indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
index cf0751d917a..1e396cefdf6 100644
--- a/drivers/staging/iio/accel/sca3000.h
+++ b/drivers/staging/iio/accel/sca3000.h
@@ -173,7 +173,6 @@
struct sca3000_state {
struct spi_device *us;
const struct sca3000_chip_info *info;
- struct iio_dev *indio_dev;
struct work_struct interrupt_handler_ws;
s64 last_timestamp;
int mo_det_use_count;
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index f213b8698eb..603f5bca797 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -242,7 +242,7 @@ static int sca3000_check_status(struct device *dev)
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
mutex_lock(&st->lock);
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_STATUS, 1);
@@ -269,7 +269,7 @@ static ssize_t sca3000_show_rev(struct device *dev,
{
int len = 0, ret;
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct sca3000_state *st = dev_info->dev_data;
+ struct sca3000_state *st = iio_priv(dev_info);
mutex_lock(&st->lock);
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_REVID, 1);
@@ -297,7 +297,7 @@ sca3000_show_available_measurement_modes(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct sca3000_state *st = dev_info->dev_data;
+ struct sca3000_state *st = iio_priv(dev_info);
int len = 0;
len += sprintf(buf + len, "0 - normal mode");
@@ -329,7 +329,7 @@ sca3000_show_measurement_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct sca3000_state *st = dev_info->dev_data;
+ struct sca3000_state *st = iio_priv(dev_info);
int len = 0, ret;
mutex_lock(&st->lock);
@@ -380,7 +380,7 @@ sca3000_store_measurement_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct sca3000_state *st = dev_info->dev_data;
+ struct sca3000_state *st = iio_priv(dev_info);
int ret;
int mask = 0x03;
long val;
@@ -453,7 +453,7 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
int *val2,
long mask)
{
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int ret;
u8 address;
@@ -500,7 +500,7 @@ static ssize_t sca3000_read_av_freq(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int len = 0, ret, val;
mutex_lock(&st->lock);
@@ -571,7 +571,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int ret, len = 0, base_freq = 0, val;
mutex_lock(&st->lock);
@@ -613,7 +613,7 @@ static ssize_t sca3000_set_frequency(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int ret, base_freq = 0;
int ctrlval;
long val;
@@ -673,7 +673,7 @@ static ssize_t sca3000_read_temp(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int ret;
int val;
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_TEMP_MSB, 2);
@@ -699,7 +699,7 @@ static int sca3000_read_thresh(struct iio_dev *indio_dev,
int *val)
{
int ret, i;
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int num = IIO_EVENT_CODE_EXTRACT_MODIFIER(e);
mutex_lock(&st->lock);
ret = sca3000_read_ctrl_reg(st, sca3000_addresses[num][1]);
@@ -726,7 +726,7 @@ static int sca3000_write_thresh(struct iio_dev *indio_dev,
int e,
int val)
{
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int num = IIO_EVENT_CODE_EXTRACT_MODIFIER(e);
int ret;
int i;
@@ -798,11 +798,10 @@ static const struct attribute_group sca3000_attribute_group_with_temp = {
static irqreturn_t sca3000_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
- struct sca3000_state *st;
+ struct sca3000_state *st = iio_priv(indio_dev);
int ret, val;
s64 last_timestamp = iio_get_time_ns();
- st = indio_dev->dev_data;
/* Could lead if badly timed to an extra read of status reg,
* but ensures no interrupt is missed.
*/
@@ -813,10 +812,10 @@ static irqreturn_t sca3000_event_handler(int irq, void *private)
if (ret)
goto done;
- sca3000_ring_int_process(val, st->indio_dev->ring);
+ sca3000_ring_int_process(val, indio_dev->ring);
if (val & SCA3000_INT_STATUS_FREE_FALL)
- iio_push_event(st->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
0,
IIO_EV_MOD_X_AND_Y_AND_Z,
@@ -825,7 +824,7 @@ static irqreturn_t sca3000_event_handler(int irq, void *private)
last_timestamp);
if (val & SCA3000_INT_STATUS_Y_TRIGGER)
- iio_push_event(st->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
0,
IIO_EV_MOD_Y,
@@ -834,7 +833,7 @@ static irqreturn_t sca3000_event_handler(int irq, void *private)
last_timestamp);
if (val & SCA3000_INT_STATUS_X_TRIGGER)
- iio_push_event(st->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
0,
IIO_EV_MOD_X,
@@ -843,7 +842,7 @@ static irqreturn_t sca3000_event_handler(int irq, void *private)
last_timestamp);
if (val & SCA3000_INT_STATUS_Z_TRIGGER)
- iio_push_event(st->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_MOD_EVENT_CODE(IIO_EV_CLASS_ACCEL,
0,
IIO_EV_MOD_Z,
@@ -861,7 +860,7 @@ done:
static int sca3000_read_event_config(struct iio_dev *indio_dev,
int e)
{
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int ret;
u8 protect_mask = 0x03;
int num = IIO_EVENT_CODE_EXTRACT_MODIFIER(e);
@@ -895,7 +894,7 @@ static ssize_t sca3000_query_free_fall_mode(struct device *dev,
{
int ret, len;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int val;
mutex_lock(&st->lock);
@@ -923,7 +922,7 @@ static ssize_t sca3000_set_free_fall_mode(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
long val;
int ret;
u8 protect_mask = SCA3000_FREE_FALL_DETECT;
@@ -965,7 +964,7 @@ static int sca3000_write_event_config(struct iio_dev *indio_dev,
int e,
int state)
{
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int ret, ctrlval;
u8 protect_mask = 0x03;
int num = IIO_EVENT_CODE_EXTRACT_MODIFIER(e);
@@ -1126,42 +1125,38 @@ static int __devinit sca3000_probe(struct spi_device *spi)
{
int ret, regdone = 0;
struct sca3000_state *st;
+ struct iio_dev *indio_dev;
- st = kzalloc(sizeof(struct sca3000_state), GFP_KERNEL);
- if (st == NULL) {
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- spi_set_drvdata(spi, st);
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
st->us = spi;
mutex_init(&st->lock);
st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi)
->driver_data];
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_clear_st;
- }
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
if (st->info->temp_output)
- st->indio_dev->info = &sca3000_info_with_temp;
+ indio_dev->info = &sca3000_info_with_temp;
else {
- st->indio_dev->info = &sca3000_info;
- st->indio_dev->channels = sca3000_channels;
- st->indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
+ indio_dev->info = &sca3000_info;
+ indio_dev->channels = sca3000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
}
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- sca3000_configure_ring(st->indio_dev);
- ret = iio_device_register(st->indio_dev);
+ sca3000_configure_ring(indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret < 0)
goto error_free_dev;
regdone = 1;
- ret = iio_ring_buffer_register_ex(st->indio_dev->ring, 0,
+ ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
sca3000_channels,
ARRAY_SIZE(sca3000_channels));
if (ret < 0)
@@ -1172,11 +1167,11 @@ static int __devinit sca3000_probe(struct spi_device *spi)
&sca3000_event_handler,
IRQF_TRIGGER_FALLING,
"sca3000",
- st->indio_dev);
+ indio_dev);
if (ret)
goto error_unregister_ring;
}
- sca3000_register_ring_funcs(st->indio_dev);
+ sca3000_register_ring_funcs(indio_dev);
ret = sca3000_clean_setup(st);
if (ret)
goto error_free_irq;
@@ -1184,17 +1179,16 @@ static int __devinit sca3000_probe(struct spi_device *spi)
error_free_irq:
if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0)
- free_irq(spi->irq, st->indio_dev);
+ free_irq(spi->irq, indio_dev);
error_unregister_ring:
- iio_ring_buffer_unregister(st->indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
error_unregister_dev:
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_clear_st:
- kfree(st);
+ iio_free_device(indio_dev);
+
error_ret:
return ret;
}
@@ -1219,8 +1213,8 @@ error_ret:
static int sca3000_remove(struct spi_device *spi)
{
- struct sca3000_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct sca3000_state *st = iio_priv(indio_dev);
int ret;
/* Must ensure no interrupts can be generated after this!*/
ret = sca3000_stop_all_interrupts(st);
@@ -1232,8 +1226,6 @@ static int sca3000_remove(struct spi_device *spi)
sca3000_unconfigure_ring(indio_dev);
iio_device_unregister(indio_dev);
- kfree(st);
-
return 0;
}
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 7c4ff0b1df0..a704c75fffc 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -89,7 +89,7 @@ static int sca3000_read_first_n_hw_rb(struct iio_ring_buffer *r,
{
struct iio_hw_ring_buffer *hw_ring = iio_to_hw_ring_buf(r);
struct iio_dev *indio_dev = hw_ring->private;
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
u8 *rx;
int ret, i, num_available, num_read = 0;
int bytes_per_sample = 1;
@@ -168,7 +168,7 @@ static ssize_t sca3000_query_ring_int(struct device *dev,
int ret, val;
struct iio_ring_buffer *ring = dev_get_drvdata(dev);
struct iio_dev *indio_dev = ring->indio_dev;
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
mutex_lock(&st->lock);
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
@@ -190,7 +190,7 @@ static ssize_t sca3000_set_ring_int(struct device *dev,
{
struct iio_ring_buffer *ring = dev_get_drvdata(dev);
struct iio_dev *indio_dev = ring->indio_dev;
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
long val;
int ret;
@@ -240,7 +240,7 @@ static ssize_t sca3000_show_ring_bpse(struct device *dev,
int len = 0, ret;
struct iio_ring_buffer *ring = dev_get_drvdata(dev);
struct iio_dev *indio_dev = ring->indio_dev;
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
mutex_lock(&st->lock);
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
@@ -270,7 +270,7 @@ static ssize_t sca3000_store_ring_bpse(struct device *dev,
{
struct iio_ring_buffer *ring = dev_get_drvdata(dev);
struct iio_dev *indio_dev = ring->indio_dev;
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int ret;
mutex_lock(&st->lock);
@@ -300,7 +300,7 @@ static ssize_t sca3000_show_buffer_scale(struct device *dev,
{
struct iio_ring_buffer *ring = dev_get_drvdata(dev);
struct iio_dev *indio_dev = ring->indio_dev;
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
return sprintf(buf, "0.%06d\n", 4*st->info->scale);
}
@@ -397,7 +397,7 @@ void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
static inline
int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
{
- struct sca3000_state *st = indio_dev->dev_data;
+ struct sca3000_state *st = iio_priv(indio_dev);
int ret;
mutex_lock(&st->lock);
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 8c751c46ddd..b39f2e1c1fe 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -130,6 +130,20 @@ config AD7780
To compile this driver as a module, choose M here: the
module will be called ad7780.
+config AD7793
+ tristate "Analog Devices AD7792 AD7793 ADC driver"
+ depends on SPI
+ select IIO_RING_BUFFER
+ select IIO_SW_RING
+ select IIO_TRIGGER
+ help
+ Say yes here to build support for Analog Devices
+ AD7792 and AD7793 SPI analog to digital convertors (ADC).
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called AD7793.
+
config AD7745
tristate "Analog Devices AD7745, AD7746 AD7747 capacitive sensor driver"
depends on I2C
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 1d9b3f582ea..f0203513997 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_AD7291) += ad7291.o
obj-$(CONFIG_AD7314) += ad7314.o
obj-$(CONFIG_AD7745) += ad7745.o
obj-$(CONFIG_AD7780) += ad7780.o
+obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7816) += ad7816.o
obj-$(CONFIG_ADT75) += adt75.o
obj-$(CONFIG_ADT7310) += adt7310.o
diff --git a/drivers/staging/iio/adc/ad7150.c b/drivers/staging/iio/adc/ad7150.c
index ca32b6778a9..04017ef6688 100644
--- a/drivers/staging/iio/adc/ad7150.c
+++ b/drivers/staging/iio/adc/ad7150.c
@@ -59,7 +59,6 @@
struct ad7150_chip_info {
struct i2c_client *client;
- struct iio_dev *indio_dev;
bool inter;
u16 ch1_threshold; /* Ch1 Threshold (in fixed threshold mode) */
u8 ch1_sensitivity; /* Ch1 Sensitivity (in adaptive threshold mode) */
@@ -184,7 +183,7 @@ static ssize_t ad7150_show_conversion_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%s\n", chip->conversion_mode);
}
@@ -195,7 +194,7 @@ static ssize_t ad7150_store_conversion_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
u8 cfg;
int i;
@@ -234,7 +233,7 @@ static ssize_t ad7150_show_ch1_value(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
u8 data[2];
ad7150_i2c_read(chip, AD7150_CH1_DATA_HIGH, data, 2);
@@ -248,7 +247,7 @@ static ssize_t ad7150_show_ch2_value(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
u8 data[2];
ad7150_i2c_read(chip, AD7150_CH2_DATA_HIGH, data, 2);
@@ -262,7 +261,7 @@ static ssize_t ad7150_show_threshold_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%s\n", chip->threshold_mode);
}
@@ -273,7 +272,7 @@ static ssize_t ad7150_store_threshold_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
u8 cfg;
ad7150_i2c_read(chip, AD7150_CFG, &cfg, 1);
@@ -305,7 +304,7 @@ static ssize_t ad7150_show_ch1_threshold(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->ch1_threshold);
}
@@ -316,7 +315,7 @@ static ssize_t ad7150_store_ch1_threshold(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -341,7 +340,7 @@ static ssize_t ad7150_show_ch2_threshold(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->ch2_threshold);
}
@@ -352,7 +351,7 @@ static ssize_t ad7150_store_ch2_threshold(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -377,7 +376,7 @@ static ssize_t ad7150_show_ch1_sensitivity(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->ch1_sensitivity);
}
@@ -388,7 +387,7 @@ static ssize_t ad7150_store_ch1_sensitivity(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -412,7 +411,7 @@ static ssize_t ad7150_show_ch2_sensitivity(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->ch2_sensitivity);
}
@@ -423,7 +422,7 @@ static ssize_t ad7150_store_ch2_sensitivity(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -447,7 +446,7 @@ static ssize_t ad7150_show_ch1_timeout(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->ch1_timeout);
}
@@ -458,7 +457,7 @@ static ssize_t ad7150_store_ch1_timeout(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -482,7 +481,7 @@ static ssize_t ad7150_show_ch2_timeout(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->ch2_timeout);
}
@@ -493,7 +492,7 @@ static ssize_t ad7150_store_ch2_timeout(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -517,7 +516,7 @@ static ssize_t ad7150_show_ch1_setup(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%02x\n", chip->ch1_setup);
}
@@ -528,7 +527,7 @@ static ssize_t ad7150_store_ch1_setup(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -553,7 +552,7 @@ static ssize_t ad7150_show_ch2_setup(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%02x\n", chip->ch2_setup);
}
@@ -564,7 +563,7 @@ static ssize_t ad7150_store_ch2_setup(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -588,7 +587,7 @@ static ssize_t ad7150_show_powerdown_timer(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%02x\n", chip->powerdown_timer);
}
@@ -599,7 +598,7 @@ static ssize_t ad7150_store_powerdown_timer(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7150_chip_info *chip = dev_info->dev_data;
+ struct ad7150_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -645,7 +644,7 @@ static const struct attribute_group ad7150_attribute_group = {
static irqreturn_t ad7150_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
- struct ad7150_chip_info *chip = iio_dev_get_devdata(indio_dev);
+ struct ad7150_chip_info *chip = iio_priv(indio_dev);
u8 int_status;
s64 timestamp = iio_get_time_ns();
@@ -714,33 +713,29 @@ static int __devinit ad7150_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret = 0, regdone = 0;
- struct ad7150_chip_info *chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (chip == NULL) {
+ struct ad7150_chip_info *chip;
+ struct iio_dev *indio_dev;
+
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
-
+ chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
- i2c_set_clientdata(client, chip);
+ i2c_set_clientdata(client, indio_dev);
chip->client = client;
- chip->indio_dev = iio_allocate_device(0);
- if (chip->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_chip;
- }
-
/* Establish that the iio_dev is a child of the i2c device */
- chip->indio_dev->name = id->name;
- chip->indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
- chip->indio_dev->info = &ad7150_info;
- chip->indio_dev->dev_data = (void *)(chip);
+ indio_dev->info = &ad7150_info;
- chip->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(chip->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
regdone = 1;
@@ -752,7 +747,7 @@ static int __devinit ad7150_probe(struct i2c_client *client,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
"ad7150",
- chip->indio_dev);
+ indio_dev);
if (ret)
goto error_free_dev;
}
@@ -763,24 +758,20 @@ static int __devinit ad7150_probe(struct i2c_client *client,
error_free_dev:
if (regdone)
- iio_device_unregister(chip->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(chip->indio_dev);
-error_free_chip:
- kfree(chip);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
static int __devexit ad7150_remove(struct i2c_client *client)
{
- struct ad7150_chip_info *chip = i2c_get_clientdata(client);
- struct iio_dev *indio_dev = chip->indio_dev;
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
if (client->irq)
free_irq(client->irq, indio_dev);
iio_device_unregister(indio_dev);
- kfree(chip);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7152.c b/drivers/staging/iio/adc/ad7152.c
index 7a38bcbbe1a..21f5f380fb5 100644
--- a/drivers/staging/iio/adc/ad7152.c
+++ b/drivers/staging/iio/adc/ad7152.c
@@ -51,7 +51,6 @@
struct ad7152_chip_info {
struct i2c_client *client;
- struct iio_dev *indio_dev;
u16 ch1_offset; /* Channel 1 offset calibration coefficient */
u16 ch1_gain; /* Channel 1 gain coefficient */
u8 ch1_setup;
@@ -166,7 +165,7 @@ static ssize_t ad7152_show_ch1_value(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
u8 data[2];
ad7152_i2c_read(chip, AD7152_CH1_DATA_HIGH, data, 2);
@@ -180,7 +179,7 @@ static ssize_t ad7152_show_ch2_value(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
u8 data[2];
ad7152_i2c_read(chip, AD7152_CH2_DATA_HIGH, data, 2);
@@ -194,7 +193,7 @@ static ssize_t ad7152_show_conversion_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%s\n", chip->conversion_mode);
}
@@ -205,7 +204,7 @@ static ssize_t ad7152_store_conversion_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
u8 cfg;
int i;
@@ -234,7 +233,7 @@ static ssize_t ad7152_show_ch1_offset(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->ch1_offset);
}
@@ -245,7 +244,7 @@ static ssize_t ad7152_store_ch1_offset(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -270,7 +269,7 @@ static ssize_t ad7152_show_ch2_offset(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->ch2_offset);
}
@@ -281,7 +280,7 @@ static ssize_t ad7152_store_ch2_offset(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -306,7 +305,7 @@ static ssize_t ad7152_show_ch1_gain(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->ch1_gain);
}
@@ -317,7 +316,7 @@ static ssize_t ad7152_store_ch1_gain(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -342,7 +341,7 @@ static ssize_t ad7152_show_ch2_gain(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->ch2_gain);
}
@@ -353,7 +352,7 @@ static ssize_t ad7152_store_ch2_gain(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -378,7 +377,7 @@ static ssize_t ad7152_show_ch1_setup(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%02x\n", chip->ch1_setup);
}
@@ -389,7 +388,7 @@ static ssize_t ad7152_store_ch1_setup(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -413,7 +412,7 @@ static ssize_t ad7152_show_ch2_setup(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%02x\n", chip->ch2_setup);
}
@@ -424,7 +423,7 @@ static ssize_t ad7152_store_ch2_setup(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -448,7 +447,7 @@ static ssize_t ad7152_show_filter_rate_setup(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%02x\n", chip->filter_rate_setup);
}
@@ -459,7 +458,7 @@ static ssize_t ad7152_store_filter_rate_setup(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7152_chip_info *chip = dev_info->dev_data;
+ struct ad7152_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -509,31 +508,27 @@ static int __devinit ad7152_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret = 0;
- struct ad7152_chip_info *chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (chip == NULL) {
+ struct ad7152_chip_info *chip;
+ struct iio_dev *indio_dev;
+
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
-
+ chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
- i2c_set_clientdata(client, chip);
+ i2c_set_clientdata(client, indio_dev);
chip->client = client;
- chip->indio_dev = iio_allocate_device(0);
- if (chip->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_chip;
- }
-
/* Echipabilish that the iio_dev is a child of the i2c device */
- chip->indio_dev->name = id->name;
- chip->indio_dev->dev.parent = &client->dev;
- chip->indio_dev->info = &ad7152_info;
- chip->indio_dev->dev_data = (void *)(chip);
- chip->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ad7152_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(chip->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
@@ -542,20 +537,16 @@ static int __devinit ad7152_probe(struct i2c_client *client,
return 0;
error_free_dev:
- iio_free_device(chip->indio_dev);
-error_free_chip:
- kfree(chip);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
static int __devexit ad7152_remove(struct i2c_client *client)
{
- struct ad7152_chip_info *chip = i2c_get_clientdata(client);
- struct iio_dev *indio_dev = chip->indio_dev;
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- kfree(chip);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index 1be3453479b..96cbb17bc2c 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -61,7 +61,6 @@
struct ad7291_chip_info {
struct i2c_client *client;
- struct iio_dev *indio_dev;
u16 command;
u8 channels; /* Active voltage channels */
};
@@ -157,7 +156,7 @@ static ssize_t ad7291_show_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
if (chip->command & AD7291_AUTOCYCLE)
return sprintf(buf, "autocycle\n");
@@ -171,7 +170,7 @@ static ssize_t ad7291_store_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
u16 command;
int ret;
@@ -208,7 +207,7 @@ static ssize_t ad7291_store_reset(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
u16 command;
int ret;
@@ -231,7 +230,7 @@ static ssize_t ad7291_show_ext_ref(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", !!(chip->command & AD7291_EXT_REF));
}
@@ -242,7 +241,7 @@ static ssize_t ad7291_store_ext_ref(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
u16 command;
int ret;
@@ -269,7 +268,7 @@ static ssize_t ad7291_show_noise_delay(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", !!(chip->command & AD7291_NOISE_DELAY));
}
@@ -280,7 +279,7 @@ static ssize_t ad7291_store_noise_delay(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
u16 command;
int ret;
@@ -307,7 +306,7 @@ static ssize_t ad7291_show_t_sense(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
u16 data;
char sign = ' ';
int ret;
@@ -334,7 +333,7 @@ static ssize_t ad7291_show_t_average(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
u16 data;
char sign = ' ';
int ret;
@@ -361,7 +360,7 @@ static ssize_t ad7291_show_voltage(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
u16 data[AD7291_VOLTAGE_LIMIT_COUNT];
int i, size, ret;
@@ -390,7 +389,7 @@ static ssize_t ad7291_show_channel_mask(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%x\n", (chip->command & AD7291_VOLTAGE_MASK) >>
AD7291_VOLTAGE_OFFSET);
@@ -402,7 +401,7 @@ static ssize_t ad7291_store_channel_mask(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
u16 command;
unsigned long data;
int i, ret;
@@ -457,7 +456,7 @@ static const struct attribute_group ad7291_attribute_group = {
static irqreturn_t ad7291_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
- struct ad7291_chip_info *chip = iio_dev_get_devdata(private);
+ struct ad7291_chip_info *chip = iio_priv(private);
u16 t_status, v_status;
u16 command;
int i;
@@ -532,7 +531,7 @@ static inline ssize_t ad7291_show_t_bound(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
u16 data;
char sign = ' ';
@@ -560,7 +559,7 @@ static inline ssize_t ad7291_set_t_bound(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
long tmp1, tmp2;
u16 data;
@@ -608,7 +607,7 @@ static inline ssize_t ad7291_show_v_bound(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
u16 data;
int ret;
@@ -633,7 +632,7 @@ static inline ssize_t ad7291_set_v_bound(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7291_chip_info *chip = dev_info->dev_data;
+ struct ad7291_chip_info *chip = iio_priv(dev_info);
unsigned long value;
u16 data;
int ret;
@@ -792,32 +791,27 @@ static int __devinit ad7291_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ad7291_chip_info *chip;
+ struct iio_dev *indio_dev;
int ret = 0;
- chip = kzalloc(sizeof(struct ad7291_chip_info), GFP_KERNEL);
-
- if (chip == NULL)
- return -ENOMEM;
-
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
- i2c_set_clientdata(client, chip);
+ i2c_set_clientdata(client, indio_dev);
chip->client = client;
chip->command = AD7291_NOISE_DELAY | AD7291_T_SENSE_MASK;
- chip->indio_dev = iio_allocate_device(0);
- if (chip->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_chip;
- }
-
- chip->indio_dev->name = id->name;
- chip->indio_dev->dev.parent = &client->dev;
- chip->indio_dev->info = &ad7291_info;
- chip->indio_dev->dev_data = (void *)chip;
- chip->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ad7291_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(chip->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
@@ -827,7 +821,7 @@ static int __devinit ad7291_probe(struct i2c_client *client,
&ad7291_event_handler,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
id->name,
- chip->indio_dev);
+ indio_dev);
if (ret)
goto error_unreg_dev;
@@ -847,27 +841,23 @@ static int __devinit ad7291_probe(struct i2c_client *client,
return 0;
error_unreg_irq:
- free_irq(client->irq, chip->indio_dev);
+ free_irq(client->irq, indio_dev);
error_unreg_dev:
- iio_device_unregister(chip->indio_dev);
+ iio_device_unregister(indio_dev);
error_free_dev:
- iio_free_device(chip->indio_dev);
-error_free_chip:
- kfree(chip);
-
+ iio_free_device(indio_dev);
+error_ret:
return ret;
}
static int __devexit ad7291_remove(struct i2c_client *client)
{
- struct ad7291_chip_info *chip = i2c_get_clientdata(client);
- struct iio_dev *indio_dev = chip->indio_dev;
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
if (client->irq)
- free_irq(client->irq, chip->indio_dev);
+ free_irq(client->irq, indio_dev);
iio_device_unregister(indio_dev);
- iio_free_device(chip->indio_dev);
- kfree(chip);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7314.c b/drivers/staging/iio/adc/ad7314.c
index 98bb16fcff2..9070d9cac72 100644
--- a/drivers/staging/iio/adc/ad7314.c
+++ b/drivers/staging/iio/adc/ad7314.c
@@ -43,7 +43,6 @@
struct ad7314_chip_info {
struct spi_device *spi_dev;
- struct iio_dev *indio_dev;
s64 last_timestamp;
u8 mode;
};
@@ -87,7 +86,7 @@ static ssize_t ad7314_show_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7314_chip_info *chip = dev_info->dev_data;
+ struct ad7314_chip_info *chip = iio_priv(dev_info);
if (chip->mode)
return sprintf(buf, "power-save\n");
@@ -101,7 +100,7 @@ static ssize_t ad7314_store_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7314_chip_info *chip = dev_info->dev_data;
+ struct ad7314_chip_info *chip = iio_priv(dev_info);
u16 mode = 0;
int ret;
@@ -136,7 +135,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7314_chip_info *chip = dev_info->dev_data;
+ struct ad7314_chip_info *chip = iio_priv(dev_info);
u16 data;
char sign = ' ';
int ret;
@@ -202,54 +201,45 @@ static const struct iio_info ad7314_info = {
static int __devinit ad7314_probe(struct spi_device *spi_dev)
{
struct ad7314_chip_info *chip;
+ struct iio_dev *indio_dev;
int ret = 0;
- chip = kzalloc(sizeof(struct ad7314_chip_info), GFP_KERNEL);
-
- if (chip == NULL)
- return -ENOMEM;
-
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
dev_set_drvdata(&spi_dev->dev, chip);
chip->spi_dev = spi_dev;
- chip->indio_dev = iio_allocate_device(0);
- if (chip->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_chip;
- }
+ indio_dev->name = spi_get_device_id(spi_dev)->name;
+ indio_dev->dev.parent = &spi_dev->dev;
+ indio_dev->info = &ad7314_info;
- chip->indio_dev->name = spi_get_device_id(spi_dev)->name;
- chip->indio_dev->dev.parent = &spi_dev->dev;
- chip->indio_dev->info = &ad7314_info;
- chip->indio_dev->dev_data = (void *)chip;
-
- ret = iio_device_register(chip->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
dev_info(&spi_dev->dev, "%s temperature sensor registered.\n",
- chip->indio_dev->name);
+ indio_dev->name);
return 0;
error_free_dev:
- iio_free_device(chip->indio_dev);
-error_free_chip:
- kfree(chip);
-
+ iio_free_device(indio_dev);
+error_ret:
return ret;
}
static int __devexit ad7314_remove(struct spi_device *spi_dev)
{
- struct ad7314_chip_info *chip = dev_get_drvdata(&spi_dev->dev);
- struct iio_dev *indio_dev = chip->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev);
dev_set_drvdata(&spi_dev->dev, NULL);
iio_device_unregister(indio_dev);
- iio_free_device(chip->indio_dev);
- kfree(chip);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7476.h b/drivers/staging/iio/adc/ad7476.h
index 01a70211f4f..0d44976e846 100644
--- a/drivers/staging/iio/adc/ad7476.h
+++ b/drivers/staging/iio/adc/ad7476.h
@@ -24,7 +24,6 @@ struct ad7476_chip_info {
};
struct ad7476_state {
- struct iio_dev *indio_dev;
struct spi_device *spi;
const struct ad7476_chip_info *chip_info;
struct regulator *reg;
@@ -51,11 +50,11 @@ enum ad7476_supported_device_ids {
};
#ifdef CONFIG_IIO_RING_BUFFER
-int ad7476_scan_from_ring(struct ad7476_state *st);
+int ad7476_scan_from_ring(struct iio_dev *indio_dev);
int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7476_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_IIO_RING_BUFFER */
-static inline int ad7476_scan_from_ring(struct ad7476_state *st)
+static inline int ad7476_scan_from_ring(struct iio_dev *indio_dev)
{
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7476_core.c b/drivers/staging/iio/adc/ad7476_core.c
index 50cedb42283..c21089894d2 100644
--- a/drivers/staging/iio/adc/ad7476_core.c
+++ b/drivers/staging/iio/adc/ad7476_core.c
@@ -39,14 +39,14 @@ static int ad7476_read_raw(struct iio_dev *dev_info,
long m)
{
int ret;
- struct ad7476_state *st = dev_info->dev_data;
+ struct ad7476_state *st = iio_priv(dev_info);
unsigned int scale_uv;
switch (m) {
case 0:
mutex_lock(&dev_info->mlock);
if (iio_ring_enabled(dev_info))
- ret = ad7476_scan_from_ring(st);
+ ret = ad7476_scan_from_ring(dev_info);
else
ret = ad7476_scan_direct(st);
mutex_unlock(&dev_info->mlock);
@@ -127,23 +127,26 @@ static int __devinit ad7476_probe(struct spi_device *spi)
{
struct ad7476_platform_data *pdata = spi->dev.platform_data;
struct ad7476_state *st;
+ struct iio_dev *indio_dev;
int ret, voltage_uv = 0;
+ bool reg_done = false;
+ struct regulator *reg;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
-
- st->reg = regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
+ st = iio_priv(indio_dev);
+ reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(reg)) {
+ ret = regulator_enable(reg);
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ voltage_uv = regulator_get_voltage(reg);
}
-
+ st->reg = reg;
st->chip_info =
&ad7476_chip_info_tbl[spi_get_device_id(spi)->driver_data];
@@ -156,24 +159,17 @@ static int __devinit ad7476_probe(struct spi_device *spi)
else
dev_warn(&spi->dev, "reference voltage unspecified\n");
- spi_set_drvdata(spi, st);
+ spi_set_drvdata(spi, indio_dev);
st->spi = spi;
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_disable_reg;
- }
-
/* Establish that the iio_dev is a child of the spi device */
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->name = spi_get_device_id(spi)->name;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
- st->indio_dev->channels = st->chip_info->channel;
- st->indio_dev->num_channels = 2;
- st->indio_dev->info = &ad7476_info;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channel;
+ indio_dev->num_channels = 2;
+ indio_dev->info = &ad7476_info;
/* Setup default message */
st->xfer.rx_buf = &st->data;
@@ -182,15 +178,15 @@ static int __devinit ad7476_probe(struct spi_device *spi)
spi_message_init(&st->msg);
spi_message_add_tail(&st->xfer, &st->msg);
- ret = ad7476_register_ring_funcs_and_init(st->indio_dev);
+ ret = ad7476_register_ring_funcs_and_init(indio_dev);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
- ret = iio_ring_buffer_register_ex(st->indio_dev->ring, 0,
+ ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
st->chip_info->channel,
ARRAY_SIZE(st->chip_info->channel));
if (ret)
@@ -198,33 +194,35 @@ static int __devinit ad7476_probe(struct spi_device *spi)
return 0;
error_cleanup_ring:
- ad7476_ring_cleanup(st->indio_dev);
- iio_device_unregister(st->indio_dev);
-error_free_device:
- iio_free_device(st->indio_dev);
+ ad7476_ring_cleanup(indio_dev);
+ iio_device_unregister(indio_dev);
error_disable_reg:
- if (!IS_ERR(st->reg))
+ if (!IS_ERR(reg))
regulator_disable(st->reg);
error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
- kfree(st);
+ if (!IS_ERR(reg))
+ regulator_put(reg);
+ if (!reg_done)
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
static int ad7476_remove(struct spi_device *spi)
{
- struct ad7476_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7476_state *st = iio_priv(indio_dev);
+ /* copy needed as st will have been freed */
+ struct regulator *reg = st->reg;
+
iio_ring_buffer_unregister(indio_dev->ring);
ad7476_ring_cleanup(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg)) {
- regulator_disable(st->reg);
- regulator_put(st->reg);
+ if (!IS_ERR(reg)) {
+ regulator_disable(reg);
+ regulator_put(reg);
}
- kfree(st);
+
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7476_ring.c b/drivers/staging/iio/adc/ad7476_ring.c
index b1b2ee2c56b..a92fc5a1a60 100644
--- a/drivers/staging/iio/adc/ad7476_ring.c
+++ b/drivers/staging/iio/adc/ad7476_ring.c
@@ -22,9 +22,9 @@
#include "ad7476.h"
-int ad7476_scan_from_ring(struct ad7476_state *st)
+int ad7476_scan_from_ring(struct iio_dev *indio_dev)
{
- struct iio_ring_buffer *ring = st->indio_dev->ring;
+ struct iio_ring_buffer *ring = indio_dev->ring;
int ret;
u8 *ring_data;
@@ -55,7 +55,7 @@ error_ret:
**/
static int ad7476_ring_preenable(struct iio_dev *indio_dev)
{
- struct ad7476_state *st = indio_dev->dev_data;
+ struct ad7476_state *st = iio_priv(indio_dev);
struct iio_ring_buffer *ring = indio_dev->ring;
st->d_size = ring->scan_count *
@@ -79,7 +79,7 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->private_data;
- struct ad7476_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad7476_state *st = iio_priv(indio_dev);
s64 time_ns;
__u8 *rxbuf;
int b_sent;
@@ -115,7 +115,7 @@ static const struct iio_ring_setup_ops ad7476_ring_setup_ops = {
int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
- struct ad7476_state *st = indio_dev->dev_data;
+ struct ad7476_state *st = iio_priv(indio_dev);
int ret = 0;
indio_dev->ring = iio_sw_rb_allocate(indio_dev);
diff --git a/drivers/staging/iio/adc/ad7745.c b/drivers/staging/iio/adc/ad7745.c
index 1944223ef16..4c13f26aa9a 100644
--- a/drivers/staging/iio/adc/ad7745.c
+++ b/drivers/staging/iio/adc/ad7745.c
@@ -54,7 +54,6 @@
struct ad774x_chip_info {
struct i2c_client *client;
- struct iio_dev *indio_dev;
bool inter;
u16 cap_offs; /* Capacitive offset */
u16 cap_gain; /* Capacitive gain calibration */
@@ -169,7 +168,7 @@ static ssize_t ad774x_show_conversion_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%s\n", chip->conversion_mode);
}
@@ -180,7 +179,7 @@ static ssize_t ad774x_store_conversion_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
u8 cfg;
int i;
@@ -210,7 +209,7 @@ static ssize_t ad774x_show_dac_value(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
u8 data;
@@ -225,7 +224,7 @@ static ssize_t ad774x_store_dac_value(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
unsigned long data;
int ret;
@@ -256,7 +255,7 @@ static ssize_t ad774x_show_cap_setup(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%02x\n", chip->cap_setup);
}
@@ -267,7 +266,7 @@ static ssize_t ad774x_store_cap_setup(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -291,7 +290,7 @@ static ssize_t ad774x_show_vt_setup(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%02x\n", chip->vt_setup);
}
@@ -302,7 +301,7 @@ static ssize_t ad774x_store_vt_setup(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -326,7 +325,7 @@ static ssize_t ad774x_show_exec_setup(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%02x\n", chip->exec_setup);
}
@@ -337,7 +336,7 @@ static ssize_t ad774x_store_exec_setup(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -361,7 +360,7 @@ static ssize_t ad774x_show_volt_gain(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->volt_gain);
}
@@ -372,7 +371,7 @@ static ssize_t ad774x_store_volt_gain(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -397,7 +396,7 @@ static ssize_t ad774x_show_cap_data(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
unsigned long data;
char tmp[3];
@@ -414,7 +413,7 @@ static ssize_t ad774x_show_vt_data(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
unsigned long data;
char tmp[3];
@@ -431,7 +430,7 @@ static ssize_t ad774x_show_cap_offs(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->cap_offs);
}
@@ -442,7 +441,7 @@ static ssize_t ad774x_store_cap_offs(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -467,7 +466,7 @@ static ssize_t ad774x_show_cap_gain(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->cap_gain);
}
@@ -478,7 +477,7 @@ static ssize_t ad774x_store_cap_gain(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad774x_chip_info *chip = dev_info->dev_data;
+ struct ad774x_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -534,7 +533,7 @@ static const struct attribute_group ad774x_attribute_group = {
static irqreturn_t ad774x_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
- struct ad774x_chip_info *chip = iio_dev_get_devdata(indio_dev);
+ struct ad774x_chip_info *chip = iio_priv(indio_dev);
u8 int_status;
ad774x_i2c_read(chip, AD774X_STATUS, &int_status, 1);
@@ -579,31 +578,27 @@ static int __devinit ad774x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret = 0, regdone = 0;
- struct ad774x_chip_info *chip = kzalloc(sizeof(*chip), GFP_KERNEL);
- if (chip == NULL) {
+ struct ad774x_chip_info *chip;
+ struct iio_dev *indio_dev;
+
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
-
+ chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
- i2c_set_clientdata(client, chip);
+ i2c_set_clientdata(client, indio_dev);
chip->client = client;
- chip->indio_dev = iio_allocate_device(0);
- if (chip->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_chip;
- }
-
/* Establish that the iio_dev is a child of the i2c device */
- chip->indio_dev->name = id->name;
- chip->indio_dev->dev.parent = &client->dev;
- chip->indio_dev->info = &ad774x_info;
- chip->indio_dev->dev_data = (void *)(chip);
- chip->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ad774x_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(chip->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
regdone = 1;
@@ -614,7 +609,7 @@ static int __devinit ad774x_probe(struct i2c_client *client,
&ad774x_event_handler,
IRQF_TRIGGER_FALLING,
"ad774x",
- chip->indio_dev);
+ indio_dev);
if (ret)
goto error_free_dev;
}
@@ -625,24 +620,20 @@ static int __devinit ad774x_probe(struct i2c_client *client,
error_free_dev:
if (regdone)
- free_irq(client->irq, chip->indio_dev);
+ free_irq(client->irq, indio_dev);
else
- iio_free_device(chip->indio_dev);
-error_free_chip:
- kfree(chip);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
static int __devexit ad774x_remove(struct i2c_client *client)
{
- struct ad774x_chip_info *chip = i2c_get_clientdata(client);
- struct iio_dev *indio_dev = chip->indio_dev;
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
if (client->irq)
free_irq(client->irq, indio_dev);
iio_device_unregister(indio_dev);
- kfree(chip);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
new file mode 100644
index 00000000000..90f6c039d6c
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -0,0 +1,987 @@
+/*
+ * AD7792/AD7793 SPI ADC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../ring_generic.h"
+#include "../ring_sw.h"
+#include "../trigger.h"
+#include "adc.h"
+
+#include "ad7793.h"
+
+/* NOTE:
+ * The AD7792/AD7793 features a dual use data out ready DOUT/RDY output.
+ * In order to avoid contentions on the SPI bus, it's therefore necessary
+ * to use spi bus locking.
+ *
+ * The DOUT/RDY output must also be wired to an interrupt capable GPIO.
+ */
+
+struct ad7793_chip_info {
+ struct iio_chan_spec channel[7];
+};
+
+struct ad7793_state {
+ struct spi_device *spi;
+ struct iio_trigger *trig;
+ const struct ad7793_chip_info *chip_info;
+ struct regulator *reg;
+ struct ad7793_platform_data *pdata;
+ wait_queue_head_t wq_data_avail;
+ bool done;
+ bool irq_dis;
+ u16 int_vref_mv;
+ u16 mode;
+ u16 conf;
+ u32 scale_avail[8][2];
+ u32 available_scan_masks[7];
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ u8 data[4] ____cacheline_aligned;
+};
+
+enum ad7793_supported_device_ids {
+ ID_AD7792,
+ ID_AD7793,
+};
+
+static int __ad7793_write_reg(struct ad7793_state *st, bool locked,
+ bool cs_change, unsigned char reg,
+ unsigned size, unsigned val)
+{
+ u8 *data = st->data;
+ struct spi_transfer t = {
+ .tx_buf = data,
+ .len = size + 1,
+ .cs_change = cs_change,
+ };
+ struct spi_message m;
+
+ data[0] = AD7793_COMM_WRITE | AD7793_COMM_ADDR(reg);
+
+ switch (size) {
+ case 3:
+ data[1] = val >> 16;
+ data[2] = val >> 8;
+ data[3] = val;
+ break;
+ case 2:
+ data[1] = val >> 8;
+ data[2] = val;
+ break;
+ case 1:
+ data[1] = val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ if (locked)
+ return spi_sync_locked(st->spi, &m);
+ else
+ return spi_sync(st->spi, &m);
+}
+
+static int ad7793_write_reg(struct ad7793_state *st,
+ unsigned reg, unsigned size, unsigned val)
+{
+ return __ad7793_write_reg(st, false, false, reg, size, val);
+}
+
+static int __ad7793_read_reg(struct ad7793_state *st, bool locked,
+ bool cs_change, unsigned char reg,
+ int *val, unsigned size)
+{
+ u8 *data = st->data;
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = data,
+ .len = 1,
+ }, {
+ .rx_buf = data,
+ .len = size,
+ .cs_change = cs_change,
+ },
+ };
+ struct spi_message m;
+
+ data[0] = AD7793_COMM_READ | AD7793_COMM_ADDR(reg);
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ if (locked)
+ ret = spi_sync_locked(st->spi, &m);
+ else
+ ret = spi_sync(st->spi, &m);
+
+ if (ret < 0)
+ return ret;
+
+ switch (size) {
+ case 3:
+ *val = data[0] << 16 | data[1] << 8 | data[2];
+ break;
+ case 2:
+ *val = data[0] << 8 | data[1];
+ break;
+ case 1:
+ *val = data[0];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ad7793_read_reg(struct ad7793_state *st,
+ unsigned reg, int *val, unsigned size)
+{
+ return __ad7793_read_reg(st, 0, 0, reg, val, size);
+}
+
+static int ad7793_read(struct ad7793_state *st, unsigned ch,
+ unsigned len, int *val)
+{
+ int ret;
+ st->conf = (st->conf & ~AD7793_CONF_CHAN(-1)) | AD7793_CONF_CHAN(ch);
+ st->mode = (st->mode & ~AD7793_MODE_SEL(-1)) |
+ AD7793_MODE_SEL(AD7793_MODE_SINGLE);
+
+ ad7793_write_reg(st, AD7793_REG_CONF, sizeof(st->conf), st->conf);
+
+ spi_bus_lock(st->spi->master);
+ st->done = false;
+
+ ret = __ad7793_write_reg(st, 1, 1, AD7793_REG_MODE,
+ sizeof(st->mode), st->mode);
+ if (ret < 0)
+ goto out;
+
+ st->irq_dis = false;
+ enable_irq(st->spi->irq);
+ wait_event_interruptible(st->wq_data_avail, st->done);
+
+ ret = __ad7793_read_reg(st, 1, 0, AD7793_REG_DATA, val, len);
+out:
+ spi_bus_unlock(st->spi->master);
+
+ return ret;
+}
+
+static int ad7793_calibrate(struct ad7793_state *st, unsigned mode, unsigned ch)
+{
+ int ret;
+
+ st->conf = (st->conf & ~AD7793_CONF_CHAN(-1)) | AD7793_CONF_CHAN(ch);
+ st->mode = (st->mode & ~AD7793_MODE_SEL(-1)) | AD7793_MODE_SEL(mode);
+
+ ad7793_write_reg(st, AD7793_REG_CONF, sizeof(st->conf), st->conf);
+
+ spi_bus_lock(st->spi->master);
+ st->done = false;
+
+ ret = __ad7793_write_reg(st, 1, 1, AD7793_REG_MODE,
+ sizeof(st->mode), st->mode);
+ if (ret < 0)
+ goto out;
+
+ st->irq_dis = false;
+ enable_irq(st->spi->irq);
+ wait_event_interruptible(st->wq_data_avail, st->done);
+
+ st->mode = (st->mode & ~AD7793_MODE_SEL(-1)) |
+ AD7793_MODE_SEL(AD7793_MODE_IDLE);
+
+ ret = __ad7793_write_reg(st, 1, 0, AD7793_REG_MODE,
+ sizeof(st->mode), st->mode);
+out:
+ spi_bus_unlock(st->spi->master);
+
+ return ret;
+}
+
+static const u8 ad7793_calib_arr[6][2] = {
+ {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN1P_AIN1M},
+ {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN1P_AIN1M},
+ {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN2P_AIN2M},
+ {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN2P_AIN2M},
+ {AD7793_MODE_CAL_INT_ZERO, AD7793_CH_AIN3P_AIN3M},
+ {AD7793_MODE_CAL_INT_FULL, AD7793_CH_AIN3P_AIN3M}
+};
+
+static int ad7793_calibrate_all(struct ad7793_state *st)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad7793_calib_arr); i++) {
+ ret = ad7793_calibrate(st, ad7793_calib_arr[i][0],
+ ad7793_calib_arr[i][1]);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_err(&st->spi->dev, "Calibration failed\n");
+ return ret;
+}
+
+static int ad7793_setup(struct ad7793_state *st)
+{
+ int i, ret = -1;
+ unsigned long long scale_uv;
+ u32 id;
+
+ /* reset the serial interface */
+ ret = spi_write(st->spi, (u8 *)&ret, sizeof(ret));
+ if (ret < 0)
+ goto out;
+ msleep(1); /* Wait for at least 500us */
+
+ /* write/read test for device presence */
+ ret = ad7793_read_reg(st, AD7793_REG_ID, &id, 1);
+ if (ret)
+ goto out;
+
+ id &= AD7793_ID_MASK;
+
+ if (!((id == AD7792_ID) || (id == AD7793_ID))) {
+ dev_err(&st->spi->dev, "device ID query failed\n");
+ goto out;
+ }
+
+ st->mode = (st->pdata->mode & ~AD7793_MODE_SEL(-1)) |
+ AD7793_MODE_SEL(AD7793_MODE_IDLE);
+ st->conf = st->pdata->conf & ~AD7793_CONF_CHAN(-1);
+
+ ret = ad7793_write_reg(st, AD7793_REG_MODE, sizeof(st->mode), st->mode);
+ if (ret)
+ goto out;
+
+ ret = ad7793_write_reg(st, AD7793_REG_CONF, sizeof(st->conf), st->conf);
+ if (ret)
+ goto out;
+
+ ret = ad7793_write_reg(st, AD7793_REG_IO,
+ sizeof(st->pdata->io), st->pdata->io);
+ if (ret)
+ goto out;
+
+ ret = ad7793_calibrate_all(st);
+ if (ret)
+ goto out;
+
+ /* Populate available ADC input ranges */
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) {
+ scale_uv = ((u64)st->int_vref_mv * 100000000)
+ >> (st->chip_info->channel[0].scan_type.realbits -
+ (!!(st->conf & AD7793_CONF_UNIPOLAR) ? 0 : 1));
+ scale_uv >>= i;
+
+ st->scale_avail[i][1] = do_div(scale_uv, 100000000) * 10;
+ st->scale_avail[i][0] = scale_uv;
+ }
+
+ return 0;
+out:
+ dev_err(&st->spi->dev, "setup failed\n");
+ return ret;
+}
+
+static int ad7793_scan_from_ring(struct ad7793_state *st, unsigned ch, int *val)
+{
+ struct iio_ring_buffer *ring = iio_priv_to_dev(st)->ring;
+ int ret;
+ s64 dat64[2];
+ u32 *dat32 = (u32 *)dat64;
+
+ if (!(ring->scan_mask & (1 << ch)))
+ return -EBUSY;
+
+ ret = ring->access->read_last(ring, (u8 *) &dat64);
+ if (ret)
+ return ret;
+
+ *val = *dat32;
+
+ return 0;
+}
+
+static int ad7793_ring_preenable(struct iio_dev *indio_dev)
+{
+ struct ad7793_state *st = iio_priv(indio_dev);
+ struct iio_ring_buffer *ring = indio_dev->ring;
+ size_t d_size;
+ unsigned channel;
+
+ if (!ring->scan_count)
+ return -EINVAL;
+
+ channel = __ffs(ring->scan_mask);
+
+ d_size = ring->scan_count *
+ indio_dev->channels[0].scan_type.storagebits / 8;
+
+ if (ring->scan_timestamp) {
+ d_size += sizeof(s64);
+
+ if (d_size % sizeof(s64))
+ d_size += sizeof(s64) - (d_size % sizeof(s64));
+ }
+
+ if (indio_dev->ring->access->set_bytes_per_datum)
+ indio_dev->ring->access->set_bytes_per_datum(indio_dev->ring,
+ d_size);
+
+ st->mode = (st->mode & ~AD7793_MODE_SEL(-1)) |
+ AD7793_MODE_SEL(AD7793_MODE_CONT);
+ st->conf = (st->conf & ~AD7793_CONF_CHAN(-1)) |
+ AD7793_CONF_CHAN(indio_dev->channels[channel].address);
+
+ ad7793_write_reg(st, AD7793_REG_CONF, sizeof(st->conf), st->conf);
+
+ spi_bus_lock(st->spi->master);
+ __ad7793_write_reg(st, 1, 1, AD7793_REG_MODE,
+ sizeof(st->mode), st->mode);
+
+ st->irq_dis = false;
+ enable_irq(st->spi->irq);
+
+ return 0;
+}
+
+static int ad7793_ring_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad7793_state *st = iio_priv(indio_dev);
+
+ st->mode = (st->mode & ~AD7793_MODE_SEL(-1)) |
+ AD7793_MODE_SEL(AD7793_MODE_IDLE);
+
+ st->done = false;
+ wait_event_interruptible(st->wq_data_avail, st->done);
+
+ if (!st->irq_dis)
+ disable_irq_nosync(st->spi->irq);
+
+ __ad7793_write_reg(st, 1, 0, AD7793_REG_MODE,
+ sizeof(st->mode), st->mode);
+
+ return spi_bus_unlock(st->spi->master);
+}
+
+/**
+ * ad7793_trigger_handler() bh of trigger launched polling to ring buffer
+ **/
+
+static irqreturn_t ad7793_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->private_data;
+ struct iio_ring_buffer *ring = indio_dev->ring;
+ struct ad7793_state *st = iio_priv(indio_dev);
+ s64 dat64[2];
+ s32 *dat32 = (s32 *)dat64;
+
+ if (ring->scan_count)
+ __ad7793_read_reg(st, 1, 1, AD7793_REG_DATA,
+ dat32,
+ indio_dev->channels[0].scan_type.realbits/8);
+
+ /* Guaranteed to be aligned with 8 byte boundary */
+ if (ring->scan_timestamp)
+ dat64[1] = pf->timestamp;
+
+ ring->access->store_to(ring, (u8 *)dat64, pf->timestamp);
+
+ iio_trigger_notify_done(indio_dev->trig);
+ st->irq_dis = false;
+ enable_irq(st->spi->irq);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_ring_setup_ops ad7793_ring_setup_ops = {
+ .preenable = &ad7793_ring_preenable,
+ .postenable = &iio_triggered_ring_postenable,
+ .predisable = &iio_triggered_ring_predisable,
+ .postdisable = &ad7793_ring_postdisable,
+};
+
+static int ad7793_register_ring_funcs_and_init(struct iio_dev *indio_dev)
+{
+ int ret;
+
+ indio_dev->ring = iio_sw_rb_allocate(indio_dev);
+ if (!indio_dev->ring) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ /* Effectively select the ring buffer implementation */
+ indio_dev->ring->access = &ring_sw_access_funcs;
+ indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
+ &ad7793_trigger_handler,
+ IRQF_ONESHOT,
+ indio_dev,
+ "ad7793_consumer%d",
+ indio_dev->id);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
+ goto error_deallocate_sw_rb;
+ }
+
+ /* Ring buffer functions - here trigger setup related */
+ indio_dev->ring->setup_ops = &ad7793_ring_setup_ops;
+
+ /* Flag that polled ring buffering is possible */
+ indio_dev->modes |= INDIO_RING_TRIGGERED;
+ return 0;
+
+error_deallocate_sw_rb:
+ iio_sw_rb_free(indio_dev->ring);
+error_ret:
+ return ret;
+}
+
+static void ad7793_ring_cleanup(struct iio_dev *indio_dev)
+{
+ /* ensure that the trigger has been detached */
+ if (indio_dev->trig) {
+ iio_put_trigger(indio_dev->trig);
+ iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
+ }
+ iio_dealloc_pollfunc(indio_dev->pollfunc);
+ iio_sw_rb_free(indio_dev->ring);
+}
+
+/**
+ * ad7793_data_rdy_trig_poll() the event handler for the data rdy trig
+ **/
+static irqreturn_t ad7793_data_rdy_trig_poll(int irq, void *private)
+{
+ struct ad7793_state *st = iio_priv(private);
+
+ st->done = true;
+ wake_up_interruptible(&st->wq_data_avail);
+ disable_irq_nosync(irq);
+ st->irq_dis = true;
+ iio_trigger_poll(st->trig, iio_get_time_ns());
+
+ return IRQ_HANDLED;
+}
+
+static int ad7793_probe_trigger(struct iio_dev *indio_dev)
+{
+ struct ad7793_state *st = iio_priv(indio_dev);
+ int ret;
+
+ st->trig = iio_allocate_trigger("%s-dev%d",
+ spi_get_device_id(st->spi)->name,
+ indio_dev->id);
+ if (st->trig == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+
+ ret = request_irq(st->spi->irq,
+ ad7793_data_rdy_trig_poll,
+ IRQF_TRIGGER_LOW,
+ spi_get_device_id(st->spi)->name,
+ indio_dev);
+ if (ret)
+ goto error_free_trig;
+
+ disable_irq_nosync(st->spi->irq);
+ st->irq_dis = true;
+ st->trig->dev.parent = &st->spi->dev;
+ st->trig->owner = THIS_MODULE;
+ st->trig->private_data = indio_dev;
+
+ ret = iio_trigger_register(st->trig);
+
+ /* select default trigger */
+ indio_dev->trig = st->trig;
+ if (ret)
+ goto error_free_irq;
+
+ return 0;
+
+error_free_irq:
+ free_irq(st->spi->irq, indio_dev);
+error_free_trig:
+ iio_free_trigger(st->trig);
+error_ret:
+ return ret;
+}
+
+static void ad7793_remove_trigger(struct iio_dev *indio_dev)
+{
+ struct ad7793_state *st = iio_priv(indio_dev);
+
+ iio_trigger_unregister(st->trig);
+ free_irq(st->spi->irq, indio_dev);
+ iio_free_trigger(st->trig);
+}
+
+static const u16 sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39, 33, 19,
+ 17, 16, 12, 10, 8, 6, 4};
+
+static ssize_t ad7793_read_frequency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7793_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n",
+ sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
+}
+
+static ssize_t ad7793_write_frequency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7793_state *st = iio_priv(indio_dev);
+ long lval;
+ int i, ret;
+
+ mutex_lock(&indio_dev->mlock);
+ if (iio_ring_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+ mutex_unlock(&indio_dev->mlock);
+
+ ret = strict_strtol(buf, 10, &lval);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(sample_freq_avail); i++)
+ if (lval == sample_freq_avail[i]) {
+ mutex_lock(&indio_dev->mlock);
+ st->mode &= ~AD7793_MODE_RATE(-1);
+ st->mode |= AD7793_MODE_RATE(i);
+ ad7793_write_reg(st, AD7793_REG_MODE,
+ sizeof(st->mode), st->mode);
+ mutex_unlock(&indio_dev->mlock);
+ ret = 0;
+ }
+
+ return ret ? ret : len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+ ad7793_read_frequency,
+ ad7793_write_frequency);
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
+
+static ssize_t ad7793_show_scale_available(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad7793_state *st = iio_priv(indio_dev);
+ int i, len = 0;
+
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ len += sprintf(buf + len, "%d.%09u ", st->scale_avail[i][0],
+ st->scale_avail[i][1]);
+
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, in-in_scale_available,
+ S_IRUGO, ad7793_show_scale_available, NULL, 0);
+
+static struct attribute *ad7793_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ad7793_attribute_group = {
+ .attrs = ad7793_attributes,
+};
+
+static int ad7793_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad7793_state *st = iio_priv(indio_dev);
+ int ret, smpl = 0;
+ unsigned long long scale_uv;
+ bool unipolar = !!(st->conf & AD7793_CONF_UNIPOLAR);
+
+ switch (m) {
+ case 0:
+ mutex_lock(&indio_dev->mlock);
+ if (iio_ring_enabled(indio_dev))
+ ret = ad7793_scan_from_ring(st,
+ chan->scan_index, &smpl);
+ else
+ ret = ad7793_read(st, chan->address,
+ chan->scan_type.realbits / 8, &smpl);
+ mutex_unlock(&indio_dev->mlock);
+
+ if (ret < 0)
+ return ret;
+
+ *val = (smpl >> chan->scan_type.shift) &
+ ((1 << (chan->scan_type.realbits)) - 1);
+
+ if (!unipolar)
+ *val -= (1 << (chan->scan_type.realbits - 1));
+
+ return IIO_VAL_INT;
+
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ *val = st->scale_avail[(st->conf >> 8) & 0x7][0];
+ *val2 = st->scale_avail[(st->conf >> 8) & 0x7][1];
+
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ switch (chan->type) {
+ case IIO_IN:
+ /* 1170mV / 2^23 * 6 */
+ scale_uv = (1170ULL * 100000000ULL * 6ULL)
+ >> (chan->scan_type.realbits -
+ (unipolar ? 0 : 1));
+ break;
+ case IIO_TEMP:
+ /* Always uses unity gain and internal ref */
+ scale_uv = (2500ULL * 100000000ULL)
+ >> (chan->scan_type.realbits -
+ (unipolar ? 0 : 1));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *val2 = do_div(scale_uv, 100000000) * 10;
+ *val = scale_uv;
+
+ return IIO_VAL_INT_PLUS_NANO;
+ }
+ return -EINVAL;
+}
+
+static int ad7793_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad7793_state *st = iio_priv(indio_dev);
+ int ret, i;
+ unsigned int tmp;
+
+ mutex_lock(&indio_dev->mlock);
+ if (iio_ring_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+
+ switch (mask) {
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ if (val2 == st->scale_avail[i][1]) {
+ tmp = st->conf;
+ st->conf &= ~AD7793_CONF_GAIN(-1);
+ st->conf |= AD7793_CONF_GAIN(i);
+
+ if (tmp != st->conf) {
+ ad7793_write_reg(st, AD7793_REG_CONF,
+ sizeof(st->conf),
+ st->conf);
+ ad7793_calibrate_all(st);
+ }
+ ret = 0;
+ }
+
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+}
+
+static int ad7793_validate_trigger(struct iio_dev *indio_dev,
+ struct iio_trigger *trig)
+{
+ if (indio_dev->trig != trig)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ad7793_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static const struct iio_info ad7793_info = {
+ .read_raw = &ad7793_read_raw,
+ .write_raw = &ad7793_write_raw,
+ .write_raw_get_fmt = &ad7793_write_raw_get_fmt,
+ .attrs = &ad7793_attribute_group,
+ .validate_trigger = ad7793_validate_trigger,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
+ [ID_AD7793] = {
+ .channel[0] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 0, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD7793_CH_AIN1P_AIN1M,
+ 0, IIO_ST('s', 24, 32, 0), 0),
+ .channel[1] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 1, 1,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD7793_CH_AIN2P_AIN2M,
+ 1, IIO_ST('s', 24, 32, 0), 0),
+ .channel[2] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 2, 2,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD7793_CH_AIN3P_AIN3M,
+ 2, IIO_ST('s', 24, 32, 0), 0),
+ .channel[3] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, "shorted", 0, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD7793_CH_AIN1M_AIN1M,
+ 3, IIO_ST('s', 24, 32, 0), 0),
+ .channel[4] = IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ AD7793_CH_TEMP,
+ 4, IIO_ST('s', 24, 32, 0), 0),
+ .channel[5] = IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 4, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ AD7793_CH_AVDD_MONITOR,
+ 5, IIO_ST('s', 24, 32, 0), 0),
+ .channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6),
+ },
+ [ID_AD7792] = {
+ .channel[0] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 0, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD7793_CH_AIN1P_AIN1M,
+ 0, IIO_ST('s', 16, 32, 0), 0),
+ .channel[1] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 1, 1,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD7793_CH_AIN2P_AIN2M,
+ 1, IIO_ST('s', 16, 32, 0), 0),
+ .channel[2] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, NULL, 2, 2,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD7793_CH_AIN3P_AIN3M,
+ 2, IIO_ST('s', 16, 32, 0), 0),
+ .channel[3] = IIO_CHAN(IIO_IN_DIFF, 0, 1, 0, "shorted", 0, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD7793_CH_AIN1M_AIN1M,
+ 3, IIO_ST('s', 16, 32, 0), 0),
+ .channel[4] = IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ AD7793_CH_TEMP,
+ 4, IIO_ST('s', 16, 32, 0), 0),
+ .channel[5] = IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 4, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ AD7793_CH_AVDD_MONITOR,
+ 5, IIO_ST('s', 16, 32, 0), 0),
+ .channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6),
+ },
+};
+
+static int __devinit ad7793_probe(struct spi_device *spi)
+{
+ struct ad7793_platform_data *pdata = spi->dev.platform_data;
+ struct ad7793_state *st;
+ struct iio_dev *indio_dev;
+ int ret, i, voltage_uv = 0, regdone = 0;
+
+ if (!pdata) {
+ dev_err(&spi->dev, "no platform data?\n");
+ return -ENODEV;
+ }
+
+ if (!spi->irq) {
+ dev_err(&spi->dev, "no IRQ?\n");
+ return -ENODEV;
+ }
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+
+ voltage_uv = regulator_get_voltage(st->reg);
+ }
+
+ st->chip_info =
+ &ad7793_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+ st->pdata = pdata;
+
+ if (pdata && pdata->vref_mv)
+ st->int_vref_mv = pdata->vref_mv;
+ else if (voltage_uv)
+ st->int_vref_mv = voltage_uv / 1000;
+ else
+ st->int_vref_mv = 2500; /* Build-in ref */
+
+ spi_set_drvdata(spi, indio_dev);
+ st->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channel;
+ indio_dev->available_scan_masks = st->available_scan_masks;
+ indio_dev->num_channels = 7;
+ indio_dev->info = &ad7793_info;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ st->available_scan_masks[i] = (1 << i) | (1 <<
+ indio_dev->channels[indio_dev->num_channels - 1].
+ scan_index);
+
+ init_waitqueue_head(&st->wq_data_avail);
+
+ ret = ad7793_register_ring_funcs_and_init(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_unreg_ring;
+ regdone = 1;
+
+ ret = ad7793_probe_trigger(indio_dev);
+ if (ret)
+ goto error_unreg_ring;
+
+ ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
+ indio_dev->channels,
+ indio_dev->num_channels);
+ if (ret)
+ goto error_remove_trigger;
+
+ ret = ad7793_setup(st);
+ if (ret)
+ goto error_uninitialize_ring;
+
+ return 0;
+
+error_uninitialize_ring:
+ iio_ring_buffer_unregister(indio_dev->ring);
+error_remove_trigger:
+ ad7793_remove_trigger(indio_dev);
+error_unreg_ring:
+ ad7793_ring_cleanup(indio_dev);
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+error_put_reg:
+ if (!IS_ERR(st->reg))
+ regulator_put(st->reg);
+
+ if (regdone)
+ iio_device_unregister(indio_dev);
+ else
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static int ad7793_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad7793_state *st = iio_priv(indio_dev);
+
+ iio_ring_buffer_unregister(indio_dev->ring);
+ ad7793_remove_trigger(indio_dev);
+ ad7793_ring_cleanup(indio_dev);
+
+ if (!IS_ERR(st->reg)) {
+ regulator_disable(st->reg);
+ regulator_put(st->reg);
+ }
+
+ iio_device_unregister(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id ad7793_id[] = {
+ {"ad7792", ID_AD7792},
+ {"ad7793", ID_AD7793},
+ {}
+};
+
+static struct spi_driver ad7793_driver = {
+ .driver = {
+ .name = "ad7793",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ad7793_probe,
+ .remove = __devexit_p(ad7793_remove),
+ .id_table = ad7793_id,
+};
+
+static int __init ad7793_init(void)
+{
+ return spi_register_driver(&ad7793_driver);
+}
+module_init(ad7793_init);
+
+static void __exit ad7793_exit(void)
+{
+ spi_unregister_driver(&ad7793_driver);
+}
+module_exit(ad7793_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7792/3 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7793.h b/drivers/staging/iio/adc/ad7793.h
new file mode 100644
index 00000000000..64f7d41dc45
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7793.h
@@ -0,0 +1,107 @@
+/*
+ * AD7792/AD7793 SPI ADC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef IIO_ADC_AD7793_H_
+#define IIO_ADC_AD7793_H_
+
+/*
+ * TODO: struct ad7793_platform_data needs to go into include/linux/iio
+ */
+
+/* Registers */
+#define AD7793_REG_COMM 0 /* Communications Register (WO, 8-bit) */
+#define AD7793_REG_STAT 0 /* Status Register (RO, 8-bit) */
+#define AD7793_REG_MODE 1 /* Mode Register (RW, 16-bit */
+#define AD7793_REG_CONF 2 /* Configuration Register (RW, 16-bit) */
+#define AD7793_REG_DATA 3 /* Data Register (RO, 16-/24-bit) */
+#define AD7793_REG_ID 4 /* ID Register (RO, 8-bit) */
+#define AD7793_REG_IO 5 /* IO Register (RO, 8-bit) */
+#define AD7793_REG_OFFSET 6 /* Offset Register (RW, 16-bit
+ * (AD7792)/24-bit (AD7793)) */
+#define AD7793_REG_FULLSALE 7 /* Full-Scale Register
+ * (RW, 16-bit (AD7792)/24-bit (AD7793)) */
+
+/* Communications Register Bit Designations (AD7793_REG_COMM) */
+#define AD7793_COMM_WEN (1 << 7) /* Write Enable */
+#define AD7793_COMM_WRITE (0 << 6) /* Write Operation */
+#define AD7793_COMM_READ (1 << 6) /* Read Operation */
+#define AD7793_COMM_ADDR(x) (((x) & 0x7) << 3) /* Register Address */
+#define AD7793_COMM_CREAD (1 << 2) /* Continuous Read of Data Register */
+
+/* Status Register Bit Designations (AD7793_REG_STAT) */
+#define AD7793_STAT_RDY (1 << 7) /* Ready */
+#define AD7793_STAT_ERR (1 << 6) /* Error (Overrange, Underrange) */
+#define AD7793_STAT_CH3 (1 << 2) /* Channel 3 */
+#define AD7793_STAT_CH2 (1 << 1) /* Channel 2 */
+#define AD7793_STAT_CH1 (1 << 0) /* Channel 1 */
+
+/* Mode Register Bit Designations (AD7793_REG_MODE) */
+#define AD7793_MODE_SEL(x) (((x) & 0x7) << 13) /* Operation Mode Select */
+#define AD7793_MODE_CLKSRC(x) (((x) & 0x3) << 6) /* ADC Clock Source Select */
+#define AD7793_MODE_RATE(x) ((x) & 0xF) /* Filter Update Rate Select */
+
+#define AD7793_MODE_CONT 0 /* Continuous Conversion Mode */
+#define AD7793_MODE_SINGLE 1 /* Single Conversion Mode */
+#define AD7793_MODE_IDLE 2 /* Idle Mode */
+#define AD7793_MODE_PWRDN 3 /* Power-Down Mode */
+#define AD7793_MODE_CAL_INT_ZERO 4 /* Internal Zero-Scale Calibration */
+#define AD7793_MODE_CAL_INT_FULL 5 /* Internal Full-Scale Calibration */
+#define AD7793_MODE_CAL_SYS_ZERO 6 /* System Zero-Scale Calibration */
+#define AD7793_MODE_CAL_SYS_FULL 7 /* System Full-Scale Calibration */
+
+#define AD7793_CLK_INT 0 /* Internal 64 kHz Clock not
+ * available at the CLK pin */
+#define AD7793_CLK_INT_CO 1 /* Internal 64 kHz Clock available
+ * at the CLK pin */
+#define AD7793_CLK_EXT 2 /* External 64 kHz Clock */
+#define AD7793_CLK_EXT_DIV2 3 /* External Clock divided by 2 */
+
+/* Configuration Register Bit Designations (AD7793_REG_CONF) */
+#define AD7793_CONF_VBIAS(x) (((x) & 0x3) << 14) /* Bias Voltage
+ * Generator Enable */
+#define AD7793_CONF_BO_EN (1 << 13) /* Burnout Current Enable */
+#define AD7793_CONF_UNIPOLAR (1 << 12) /* Unipolar/Bipolar Enable */
+#define AD7793_CONF_BOOST (1 << 11) /* Boost Enable */
+#define AD7793_CONF_GAIN(x) (((x) & 0x7) << 8) /* Gain Select */
+#define AD7793_CONF_REFSEL (1 << 7) /* INT/EXT Reference Select */
+#define AD7793_CONF_BUF (1 << 4) /* Buffered Mode Enable */
+#define AD7793_CONF_CHAN(x) ((x) & 0x7) /* Channel select */
+
+#define AD7793_CH_AIN1P_AIN1M 0 /* AIN1(+) - AIN1(-) */
+#define AD7793_CH_AIN2P_AIN2M 1 /* AIN2(+) - AIN2(-) */
+#define AD7793_CH_AIN3P_AIN3M 2 /* AIN3(+) - AIN3(-) */
+#define AD7793_CH_AIN1M_AIN1M 3 /* AIN1(-) - AIN1(-) */
+#define AD7793_CH_TEMP 6 /* Temp Sensor */
+#define AD7793_CH_AVDD_MONITOR 7 /* AVDD Monitor */
+
+/* ID Register Bit Designations (AD7793_REG_ID) */
+#define AD7792_ID 0xA
+#define AD7793_ID 0xB
+#define AD7793_ID_MASK 0xF
+
+/* IO (Excitation Current Sources) Register Bit Designations (AD7793_REG_IO) */
+#define AD7793_IO_IEXC1_IOUT1_IEXC2_IOUT2 0 /* IEXC1 connect to IOUT1,
+ * IEXC2 connect to IOUT2 */
+#define AD7793_IO_IEXC1_IOUT2_IEXC2_IOUT1 1 /* IEXC1 connect to IOUT2,
+ * IEXC2 connect to IOUT1 */
+#define AD7793_IO_IEXC1_IEXC2_IOUT1 2 /* Both current sources
+ * IEXC1,2 connect to IOUT1 */
+#define AD7793_IO_IEXC1_IEXC2_IOUT2 3 /* Both current sources
+ * IEXC1,2 connect to IOUT2 */
+
+#define AD7793_IO_IXCEN_10uA (1 << 0) /* Excitation Current 10uA */
+#define AD7793_IO_IXCEN_210uA (2 << 0) /* Excitation Current 210uA */
+#define AD7793_IO_IXCEN_1mA (3 << 0) /* Excitation Current 1mA */
+
+struct ad7793_platform_data {
+ u16 vref_mv;
+ u16 mode;
+ u16 conf;
+ u8 io;
+};
+
+#endif /* IIO_ADC_AD7793_H_ */
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 11379e469b0..0c84217bde3 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -43,7 +43,6 @@
struct ad7816_chip_info {
struct spi_device *spi_dev;
- struct iio_dev *indio_dev;
u16 rdwr_pin;
u16 convert_pin;
u16 busy_pin;
@@ -113,7 +112,7 @@ static ssize_t ad7816_show_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = dev_info->dev_data;
+ struct ad7816_chip_info *chip = iio_priv(dev_info);
if (chip->mode)
return sprintf(buf, "power-save\n");
@@ -127,7 +126,7 @@ static ssize_t ad7816_store_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = dev_info->dev_data;
+ struct ad7816_chip_info *chip = iio_priv(dev_info);
if (strcmp(buf, "full")) {
gpio_set_value(chip->rdwr_pin, 1);
@@ -159,7 +158,7 @@ static ssize_t ad7816_show_channel(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = dev_info->dev_data;
+ struct ad7816_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", chip->channel_id);
}
@@ -170,7 +169,7 @@ static ssize_t ad7816_store_channel(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = dev_info->dev_data;
+ struct ad7816_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
@@ -208,7 +207,7 @@ static ssize_t ad7816_show_value(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = dev_info->dev_data;
+ struct ad7816_chip_info *chip = iio_priv(dev_info);
u16 data;
s8 value;
int ret;
@@ -265,7 +264,7 @@ static ssize_t ad7816_show_oti(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = dev_info->dev_data;
+ struct ad7816_chip_info *chip = iio_priv(dev_info);
int value;
if (chip->channel_id > AD7816_CS_MAX) {
@@ -286,7 +285,7 @@ static inline ssize_t ad7816_set_oti(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad7816_chip_info *chip = dev_info->dev_data;
+ struct ad7816_chip_info *chip = iio_priv(dev_info);
long value;
u8 data;
int ret;
@@ -345,6 +344,7 @@ static const struct iio_info ad7816_info = {
static int __devinit ad7816_probe(struct spi_device *spi_dev)
{
struct ad7816_chip_info *chip;
+ struct iio_dev *indio_dev;
unsigned short *pins = spi_dev->dev.platform_data;
int ret = 0;
int i;
@@ -354,13 +354,14 @@ static int __devinit ad7816_probe(struct spi_device *spi_dev)
return -EINVAL;
}
- chip = kzalloc(sizeof(struct ad7816_chip_info), GFP_KERNEL);
-
- if (chip == NULL)
- return -ENOMEM;
-
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
- dev_set_drvdata(&spi_dev->dev, chip);
+ dev_set_drvdata(&spi_dev->dev, indio_dev);
chip->spi_dev = spi_dev;
for (i = 0; i <= AD7816_CS_MAX; i++)
@@ -373,7 +374,7 @@ static int __devinit ad7816_probe(struct spi_device *spi_dev)
if (ret) {
dev_err(&spi_dev->dev, "Fail to request rdwr gpio PIN %d.\n",
chip->rdwr_pin);
- goto error_free_chip;
+ goto error_free_device;
}
gpio_direction_input(chip->rdwr_pin);
ret = gpio_request(chip->convert_pin, spi_get_device_id(spi_dev)->name);
@@ -391,20 +392,14 @@ static int __devinit ad7816_probe(struct spi_device *spi_dev)
}
gpio_direction_input(chip->busy_pin);
- chip->indio_dev = iio_allocate_device(0);
- if (chip->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_gpio;
- }
- chip->indio_dev->name = spi_get_device_id(spi_dev)->name;
- chip->indio_dev->dev.parent = &spi_dev->dev;
- chip->indio_dev->info = &ad7816_info;
- chip->indio_dev->dev_data = (void *)chip;
- chip->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = spi_get_device_id(spi_dev)->name;
+ indio_dev->dev.parent = &spi_dev->dev;
+ indio_dev->info = &ad7816_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(chip->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ goto error_free_gpio;
if (spi_dev->irq) {
/* Only low trigger is supported in ad7816/7/8 */
@@ -412,47 +407,44 @@ static int __devinit ad7816_probe(struct spi_device *spi_dev)
NULL,
&ad7816_event_handler,
IRQF_TRIGGER_LOW,
- chip->indio_dev->name,
- chip->indio_dev);
+ indio_dev->name,
+ indio_dev);
if (ret)
goto error_unreg_dev;
}
dev_info(&spi_dev->dev, "%s temperature sensor and ADC registered.\n",
- chip->indio_dev->name);
+ indio_dev->name);
return 0;
error_unreg_dev:
- iio_device_unregister(chip->indio_dev);
-error_free_dev:
- iio_free_device(chip->indio_dev);
+ iio_device_unregister(indio_dev);
error_free_gpio:
gpio_free(chip->busy_pin);
error_free_gpio_convert:
gpio_free(chip->convert_pin);
error_free_gpio_rdwr:
gpio_free(chip->rdwr_pin);
-error_free_chip:
- kfree(chip);
-
+error_free_device:
+ iio_free_device(indio_dev);
+error_ret:
return ret;
}
static int __devexit ad7816_remove(struct spi_device *spi_dev)
{
- struct ad7816_chip_info *chip = dev_get_drvdata(&spi_dev->dev);
- struct iio_dev *indio_dev = chip->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev);
+ struct ad7816_chip_info *chip = iio_priv(indio_dev);
dev_set_drvdata(&spi_dev->dev, NULL);
if (spi_dev->irq)
free_irq(spi_dev->irq, indio_dev);
- iio_device_unregister(indio_dev);
- iio_free_device(chip->indio_dev);
gpio_free(chip->busy_pin);
gpio_free(chip->convert_pin);
gpio_free(chip->rdwr_pin);
- kfree(chip);
+ iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7887_core.c b/drivers/staging/iio/adc/ad7887_core.c
index de14b174cef..3d9121e5c37 100644
--- a/drivers/staging/iio/adc/ad7887_core.c
+++ b/drivers/staging/iio/adc/ad7887_core.c
@@ -37,7 +37,7 @@ static int ad7887_read_raw(struct iio_dev *dev_info,
long m)
{
int ret;
- struct ad7887_state *st = dev_info->dev_data;
+ struct ad7887_state *st = iio_priv(dev_info);
unsigned int scale_uv;
switch (m) {
@@ -118,7 +118,6 @@ static int __devinit ad7887_probe(struct spi_device *spi)
/* Estabilish that the iio_dev is a child of the spi device */
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
- indio_dev->dev_data = (void *)(st);
indio_dev->info = &ad7887_info;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c
index 0e4a5f4fd89..0ac7c0b9d71 100644
--- a/drivers/staging/iio/adc/ad7887_ring.c
+++ b/drivers/staging/iio/adc/ad7887_ring.c
@@ -64,7 +64,7 @@ error_ret:
**/
static int ad7887_ring_preenable(struct iio_dev *indio_dev)
{
- struct ad7887_state *st = indio_dev->dev_data;
+ struct ad7887_state *st = iio_priv(indio_dev);
struct iio_ring_buffer *ring = indio_dev->ring;
st->d_size = ring->scan_count *
@@ -100,7 +100,7 @@ static int ad7887_ring_preenable(struct iio_dev *indio_dev)
static int ad7887_ring_postdisable(struct iio_dev *indio_dev)
{
- struct ad7887_state *st = indio_dev->dev_data;
+ struct ad7887_state *st = iio_priv(indio_dev);
/* dummy read: restore default CH0 settin */
return spi_sync(st->spi, &st->msg[AD7887_CH0]);
@@ -116,7 +116,7 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->private_data;
- struct ad7887_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad7887_state *st = iio_priv(indio_dev);
struct iio_ring_buffer *ring = indio_dev->ring;
s64 time_ns;
__u8 *buf;
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 29bfbcf8206..92cfe2e3ea4 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -143,7 +143,7 @@ static int ad799x_read_raw(struct iio_dev *dev_info,
long m)
{
int ret;
- struct ad799x_state *st = dev_info->dev_data;
+ struct ad799x_state *st = iio_priv(dev_info);
unsigned int scale_uv;
switch (m) {
@@ -176,7 +176,7 @@ static ssize_t ad799x_read_frequency(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad799x_state *st = iio_dev_get_devdata(dev_info);
+ struct ad799x_state *st = iio_priv(dev_info);
int ret, len = 0;
u8 val;
@@ -221,7 +221,7 @@ static ssize_t ad799x_write_frequency(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad799x_state *st = iio_dev_get_devdata(dev_info);
+ struct ad799x_state *st = iio_priv(dev_info);
long val;
int ret;
@@ -281,7 +281,7 @@ static ssize_t ad799x_read_channel_config(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad799x_state *st = iio_dev_get_devdata(dev_info);
+ struct ad799x_state *st = iio_priv(dev_info);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
@@ -299,7 +299,7 @@ static ssize_t ad799x_write_channel_config(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad799x_state *st = iio_dev_get_devdata(dev_info);
+ struct ad799x_state *st = iio_priv(dev_info);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
long val;
@@ -319,7 +319,7 @@ static ssize_t ad799x_write_channel_config(struct device *dev,
static irqreturn_t ad799x_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
- struct ad799x_state *st = iio_dev_get_devdata(private);
+ struct ad799x_state *st = iio_priv(private);
u8 status;
int i, ret;
@@ -686,7 +686,6 @@ static int __devinit ad799x_probe(struct i2c_client *client,
indio_dev->name = id->name;
indio_dev->info = st->chip_info->info;
indio_dev->name = id->name;
- indio_dev->dev_data = (void *)(st);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channel;
diff --git a/drivers/staging/iio/adc/ad799x_ring.c b/drivers/staging/iio/adc/ad799x_ring.c
index 1ae8857b3d2..0376a826c26 100644
--- a/drivers/staging/iio/adc/ad799x_ring.c
+++ b/drivers/staging/iio/adc/ad799x_ring.c
@@ -72,7 +72,7 @@ error_ret:
static int ad799x_ring_preenable(struct iio_dev *indio_dev)
{
struct iio_ring_buffer *ring = indio_dev->ring;
- struct ad799x_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad799x_state *st = iio_priv(indio_dev);
/*
* Need to figure out the current mode based upon the requested
@@ -109,7 +109,7 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->private_data;
- struct ad799x_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad799x_state *st = iio_priv(indio_dev);
struct iio_ring_buffer *ring = indio_dev->ring;
s64 time_ns;
__u8 *rxbuf;
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
index 68eca0b99ac..1a41b803440 100644
--- a/drivers/staging/iio/adc/adt7310.c
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -80,7 +80,6 @@
struct adt7310_chip_info {
struct spi_device *spi_dev;
- struct iio_dev *indio_dev;
u8 config;
};
@@ -176,7 +175,7 @@ static ssize_t adt7310_show_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
u8 config;
config = chip->config & ADT7310_MODE_MASK;
@@ -199,7 +198,7 @@ static ssize_t adt7310_store_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
u16 config;
int ret;
@@ -243,7 +242,7 @@ static ssize_t adt7310_show_resolution(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
int ret;
int bits;
@@ -265,7 +264,7 @@ static ssize_t adt7310_store_resolution(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
unsigned long data;
u16 config;
int ret;
@@ -301,7 +300,7 @@ static ssize_t adt7310_show_id(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
u8 id;
int ret;
@@ -351,7 +350,7 @@ static ssize_t adt7310_show_value(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
u8 status;
u16 data;
int ret, i = 0;
@@ -390,7 +389,7 @@ static const struct attribute_group adt7310_attribute_group = {
static irqreturn_t adt7310_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
- struct adt7310_chip_info *chip = iio_dev_get_devdata(indio_dev);
+ struct adt7310_chip_info *chip = iio_priv(indio_dev);
s64 timestamp = iio_get_time_ns();
u8 status;
int ret;
@@ -425,7 +424,7 @@ static ssize_t adt7310_show_event_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
int ret;
ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
@@ -444,7 +443,7 @@ static ssize_t adt7310_set_event_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
u16 config;
int ret;
@@ -477,7 +476,7 @@ static ssize_t adt7310_show_fault_queue(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
int ret;
ret = adt7310_spi_read_byte(chip, ADT7310_CONFIG, &chip->config);
@@ -493,7 +492,7 @@ static ssize_t adt7310_set_fault_queue(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
u8 config;
@@ -523,7 +522,7 @@ static inline ssize_t adt7310_show_t_bound(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
u16 data;
int ret;
@@ -541,7 +540,7 @@ static inline ssize_t adt7310_set_t_bound(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
long tmp1, tmp2;
u16 data;
char *pos;
@@ -661,7 +660,7 @@ static ssize_t adt7310_show_t_hyst(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
int ret;
u8 t_hyst;
@@ -678,7 +677,7 @@ static inline ssize_t adt7310_set_t_hyst(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7310_chip_info *chip = dev_info->dev_data;
+ struct adt7310_chip_info *chip = iio_priv(dev_info);
int ret;
unsigned long data;
u8 t_hyst;
@@ -760,33 +759,28 @@ static const struct iio_info adt7310_info = {
static int __devinit adt7310_probe(struct spi_device *spi_dev)
{
struct adt7310_chip_info *chip;
+ struct iio_dev *indio_dev;
int ret = 0;
unsigned long *adt7310_platform_data = spi_dev->dev.platform_data;
unsigned long irq_flags;
- chip = kzalloc(sizeof(struct adt7310_chip_info), GFP_KERNEL);
-
- if (chip == NULL)
- return -ENOMEM;
-
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
- dev_set_drvdata(&spi_dev->dev, chip);
+ dev_set_drvdata(&spi_dev->dev, indio_dev);
chip->spi_dev = spi_dev;
- chip->indio_dev = iio_allocate_device(0);
- if (chip->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_chip;
- }
-
- chip->indio_dev->dev.parent = &spi_dev->dev;
- chip->indio_dev->name = spi_get_device_id(spi_dev)->name;
- chip->indio_dev->info = &adt7310_info;
- chip->indio_dev->dev_data = (void *)chip;
- chip->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &spi_dev->dev;
+ indio_dev->name = spi_get_device_id(spi_dev)->name;
+ indio_dev->info = &adt7310_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(chip->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
@@ -800,8 +794,8 @@ static int __devinit adt7310_probe(struct spi_device *spi_dev)
NULL,
&adt7310_event_handler,
irq_flags,
- chip->indio_dev->name,
- chip->indio_dev);
+ indio_dev->name,
+ indio_dev);
if (ret)
goto error_unreg_dev;
}
@@ -812,8 +806,8 @@ static int __devinit adt7310_probe(struct spi_device *spi_dev)
NULL,
&adt7310_event_handler,
adt7310_platform_data[1],
- chip->indio_dev->name,
- chip->indio_dev);
+ indio_dev->name,
+ indio_dev);
if (ret)
goto error_unreg_ct_irq;
}
@@ -841,38 +835,34 @@ static int __devinit adt7310_probe(struct spi_device *spi_dev)
}
dev_info(&spi_dev->dev, "%s temperature sensor registered.\n",
- chip->indio_dev->name);
+ indio_dev->name);
return 0;
error_unreg_int_irq:
- free_irq(adt7310_platform_data[0], chip->indio_dev);
+ free_irq(adt7310_platform_data[0], indio_dev);
error_unreg_ct_irq:
- free_irq(spi_dev->irq, chip->indio_dev);
+ free_irq(spi_dev->irq, indio_dev);
error_unreg_dev:
- iio_device_unregister(chip->indio_dev);
+ iio_device_unregister(indio_dev);
error_free_dev:
- iio_free_device(chip->indio_dev);
-error_free_chip:
- kfree(chip);
-
+ iio_free_device(indio_dev);
+error_ret:
return ret;
}
static int __devexit adt7310_remove(struct spi_device *spi_dev)
{
- struct adt7310_chip_info *chip = dev_get_drvdata(&spi_dev->dev);
- struct iio_dev *indio_dev = chip->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(&spi_dev->dev);
unsigned long *adt7310_platform_data = spi_dev->dev.platform_data;
dev_set_drvdata(&spi_dev->dev, NULL);
if (adt7310_platform_data[0])
- free_irq(adt7310_platform_data[0], chip->indio_dev);
+ free_irq(adt7310_platform_data[0], indio_dev);
if (spi_dev->irq)
- free_irq(spi_dev->irq, chip->indio_dev);
+ free_irq(spi_dev->irq, indio_dev);
iio_device_unregister(indio_dev);
- iio_free_device(chip->indio_dev);
- kfree(chip);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
index c40a84f9c2f..76aa0639a55 100644
--- a/drivers/staging/iio/adc/adt7410.c
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -75,7 +75,6 @@
struct adt7410_chip_info {
struct i2c_client *client;
- struct iio_dev *indio_dev;
u8 config;
};
@@ -144,7 +143,7 @@ static ssize_t adt7410_show_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
u8 config;
config = chip->config & ADT7410_MODE_MASK;
@@ -167,7 +166,7 @@ static ssize_t adt7410_store_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
u16 config;
int ret;
@@ -211,7 +210,7 @@ static ssize_t adt7410_show_resolution(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
int ret;
int bits;
@@ -233,7 +232,7 @@ static ssize_t adt7410_store_resolution(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
unsigned long data;
u16 config;
int ret;
@@ -269,7 +268,7 @@ static ssize_t adt7410_show_id(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
u8 id;
int ret;
@@ -319,7 +318,7 @@ static ssize_t adt7410_show_value(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
u8 status;
u16 data;
int ret, i = 0;
@@ -358,7 +357,7 @@ static const struct attribute_group adt7410_attribute_group = {
static irqreturn_t adt7410_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
- struct adt7410_chip_info *chip = iio_dev_get_devdata(indio_dev);
+ struct adt7410_chip_info *chip = iio_priv(indio_dev);
s64 timestamp = iio_get_time_ns();
u8 status;
@@ -392,7 +391,7 @@ static ssize_t adt7410_show_event_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
int ret;
ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
@@ -411,7 +410,7 @@ static ssize_t adt7410_set_event_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
u16 config;
int ret;
@@ -444,7 +443,7 @@ static ssize_t adt7410_show_fault_queue(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
int ret;
ret = adt7410_i2c_read_byte(chip, ADT7410_CONFIG, &chip->config);
@@ -460,7 +459,7 @@ static ssize_t adt7410_set_fault_queue(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
u8 config;
@@ -490,7 +489,7 @@ static inline ssize_t adt7410_show_t_bound(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
u16 data;
int ret;
@@ -508,7 +507,7 @@ static inline ssize_t adt7410_set_t_bound(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
long tmp1, tmp2;
u16 data;
char *pos;
@@ -628,7 +627,7 @@ static ssize_t adt7410_show_t_hyst(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
int ret;
u8 t_hyst;
@@ -645,7 +644,7 @@ static inline ssize_t adt7410_set_t_hyst(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7410_chip_info *chip = dev_info->dev_data;
+ struct adt7410_chip_info *chip = iio_priv(dev_info);
int ret;
unsigned long data;
u8 t_hyst;
@@ -728,31 +727,27 @@ static int __devinit adt7410_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adt7410_chip_info *chip;
+ struct iio_dev *indio_dev;
int ret = 0;
unsigned long *adt7410_platform_data = client->dev.platform_data;
- chip = kzalloc(sizeof(struct adt7410_chip_info), GFP_KERNEL);
-
- if (chip == NULL)
- return -ENOMEM;
-
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
- i2c_set_clientdata(client, chip);
+ i2c_set_clientdata(client, indio_dev);
chip->client = client;
- chip->indio_dev = iio_allocate_device(0);
- if (chip->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_chip;
- }
- chip->indio_dev->name = id->name;
- chip->indio_dev->dev.parent = &client->dev;
- chip->indio_dev->info = &adt7410_info;
- chip->indio_dev->dev_data = (void *)chip;
- chip->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &adt7410_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(chip->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
@@ -763,7 +758,7 @@ static int __devinit adt7410_probe(struct i2c_client *client,
&adt7410_event_handler,
IRQF_TRIGGER_LOW,
id->name,
- chip->indio_dev);
+ indio_dev);
if (ret)
goto error_unreg_dev;
}
@@ -775,7 +770,7 @@ static int __devinit adt7410_probe(struct i2c_client *client,
&adt7410_event_handler,
adt7410_platform_data[1],
id->name,
- chip->indio_dev);
+ indio_dev);
if (ret)
goto error_unreg_ct_irq;
}
@@ -809,32 +804,27 @@ static int __devinit adt7410_probe(struct i2c_client *client,
return 0;
error_unreg_int_irq:
- free_irq(adt7410_platform_data[0], chip->indio_dev);
+ free_irq(adt7410_platform_data[0], indio_dev);
error_unreg_ct_irq:
- free_irq(client->irq, chip->indio_dev);
+ free_irq(client->irq, indio_dev);
error_unreg_dev:
- iio_device_unregister(chip->indio_dev);
+ iio_device_unregister(indio_dev);
error_free_dev:
- iio_free_device(chip->indio_dev);
-error_free_chip:
- kfree(chip);
-
+ iio_free_device(indio_dev);
+error_ret:
return ret;
}
static int __devexit adt7410_remove(struct i2c_client *client)
{
- struct adt7410_chip_info *chip = i2c_get_clientdata(client);
- struct iio_dev *indio_dev = chip->indio_dev;
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
unsigned long *adt7410_platform_data = client->dev.platform_data;
if (adt7410_platform_data[0])
- free_irq(adt7410_platform_data[0], chip->indio_dev);
+ free_irq(adt7410_platform_data[0], indio_dev);
if (client->irq)
- free_irq(client->irq, chip->indio_dev);
+ free_irq(client->irq, indio_dev);
iio_device_unregister(indio_dev);
- iio_free_device(chip->indio_dev);
- kfree(chip);
return 0;
}
diff --git a/drivers/staging/iio/adc/adt75.c b/drivers/staging/iio/adc/adt75.c
index 1171fb9c178..38f141de6a4 100644
--- a/drivers/staging/iio/adc/adt75.c
+++ b/drivers/staging/iio/adc/adt75.c
@@ -51,7 +51,6 @@
struct adt75_chip_info {
struct i2c_client *client;
- struct iio_dev *indio_dev;
u8 config;
};
@@ -59,8 +58,9 @@ struct adt75_chip_info {
* adt75 register access by I2C
*/
-static int adt75_i2c_read(struct adt75_chip_info *chip, u8 reg, u8 *data)
+static int adt75_i2c_read(struct iio_dev *dev_info, u8 reg, u8 *data)
{
+ struct adt75_chip_info *chip = iio_priv(dev_info);
struct i2c_client *client = chip->client;
int ret = 0, len;
@@ -84,8 +84,9 @@ static int adt75_i2c_read(struct adt75_chip_info *chip, u8 reg, u8 *data)
return ret;
}
-static int adt75_i2c_write(struct adt75_chip_info *chip, u8 reg, u8 data)
+static int adt75_i2c_write(struct iio_dev *dev_info, u8 reg, u8 data)
{
+ struct adt75_chip_info *chip = iio_priv(dev_info);
struct i2c_client *client = chip->client;
int ret = 0;
@@ -104,8 +105,7 @@ static ssize_t adt75_show_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
+ struct adt75_chip_info *chip = iio_priv(dev_get_drvdata(dev));
if (chip->config & ADT75_PD)
return sprintf(buf, "power-save\n");
@@ -119,11 +119,11 @@ static ssize_t adt75_store_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
+ struct adt75_chip_info *chip = iio_priv(dev_info);
int ret;
u8 config;
- ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
if (ret)
return -EIO;
@@ -131,7 +131,7 @@ static ssize_t adt75_store_mode(struct device *dev,
if (!strcmp(buf, "full"))
config |= ADT75_PD;
- ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+ ret = adt75_i2c_write(dev_info, ADT75_CONFIG, config);
if (ret)
return -EIO;
@@ -158,8 +158,7 @@ static ssize_t adt75_show_oneshot(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
+ struct adt75_chip_info *chip = iio_priv(dev_get_drvdata(dev));
return sprintf(buf, "%d\n", !!(chip->config & ADT75_ONESHOT));
}
@@ -170,7 +169,7 @@ static ssize_t adt75_store_oneshot(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
+ struct adt75_chip_info *chip = iio_priv(dev_info);
unsigned long data = 0;
int ret;
u8 config;
@@ -180,7 +179,7 @@ static ssize_t adt75_store_oneshot(struct device *dev,
return -EINVAL;
- ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
if (ret)
return -EIO;
@@ -188,7 +187,7 @@ static ssize_t adt75_store_oneshot(struct device *dev,
if (data)
config |= ADT75_ONESHOT;
- ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+ ret = adt75_i2c_write(dev_info, ADT75_CONFIG, config);
if (ret)
return -EIO;
@@ -207,7 +206,7 @@ static ssize_t adt75_show_value(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
+ struct adt75_chip_info *chip = iio_priv(dev_info);
u16 data;
char sign = ' ';
int ret;
@@ -224,7 +223,7 @@ static ssize_t adt75_show_value(struct device *dev,
return -EIO;
}
- ret = adt75_i2c_read(chip, ADT75_TEMPERATURE, (u8 *)&data);
+ ret = adt75_i2c_read(dev_info, ADT75_TEMPERATURE, (u8 *)&data);
if (ret)
return -EIO;
@@ -277,11 +276,11 @@ static ssize_t adt75_show_oti_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
+ struct adt75_chip_info *chip = iio_priv(dev_info);
int ret;
/* retrive ALART status */
- ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
if (ret)
return -EIO;
@@ -297,12 +296,12 @@ static ssize_t adt75_set_oti_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
+ struct adt75_chip_info *chip = iio_priv(dev_info);
int ret;
u8 config;
/* retrive ALART status */
- ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
if (ret)
return -EIO;
@@ -310,7 +309,7 @@ static ssize_t adt75_set_oti_mode(struct device *dev,
if (strcmp(buf, "comparator") != 0)
config |= ADT75_OS_INT;
- ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+ ret = adt75_i2c_write(dev_info, ADT75_CONFIG, config);
if (ret)
return -EIO;
@@ -331,11 +330,11 @@ static ssize_t adt75_show_smbus_alart(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
+ struct adt75_chip_info *chip = iio_priv(dev_info);
int ret;
/* retrive ALART status */
- ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
if (ret)
return -EIO;
@@ -348,7 +347,7 @@ static ssize_t adt75_set_smbus_alart(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
+ struct adt75_chip_info *chip = iio_priv(dev_info);
unsigned long data = 0;
int ret;
u8 config;
@@ -358,7 +357,7 @@ static ssize_t adt75_set_smbus_alart(struct device *dev,
return -EINVAL;
/* retrive ALART status */
- ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
if (ret)
return -EIO;
@@ -366,7 +365,7 @@ static ssize_t adt75_set_smbus_alart(struct device *dev,
if (data)
config |= ADT75_SMBUS_ALART;
- ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+ ret = adt75_i2c_write(dev_info, ADT75_CONFIG, config);
if (ret)
return -EIO;
@@ -380,11 +379,11 @@ static ssize_t adt75_show_fault_queue(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
+ struct adt75_chip_info *chip = iio_priv(dev_info);
int ret;
/* retrive ALART status */
- ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
if (ret)
return -EIO;
@@ -398,7 +397,7 @@ static ssize_t adt75_set_fault_queue(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
+ struct adt75_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
u8 config;
@@ -408,13 +407,13 @@ static ssize_t adt75_set_fault_queue(struct device *dev,
return -EINVAL;
/* retrive ALART status */
- ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ ret = adt75_i2c_read(dev_info, ADT75_CONFIG, &chip->config);
if (ret)
return -EIO;
config = chip->config & ~ADT75_FAULT_QUEUE_MASK;
config |= (data << ADT75_FAULT_QUEUE_OFFSET);
- ret = adt75_i2c_write(chip, ADT75_CONFIG, config);
+ ret = adt75_i2c_write(dev_info, ADT75_CONFIG, config);
if (ret)
return -EIO;
@@ -428,12 +427,11 @@ static inline ssize_t adt75_show_t_bound(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
u16 data;
char sign = ' ';
int ret;
- ret = adt75_i2c_read(chip, this_attr->address, (u8 *)&data);
+ ret = adt75_i2c_read(dev_info, this_attr->address, (u8 *)&data);
if (ret)
return -EIO;
@@ -456,7 +454,6 @@ static inline ssize_t adt75_set_t_bound(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt75_chip_info *chip = dev_info->dev_data;
long tmp1, tmp2;
u16 data;
char *pos;
@@ -491,7 +488,7 @@ static inline ssize_t adt75_set_t_bound(struct device *dev,
data <<= ADT75_VALUE_OFFSET;
data = swab16(data);
- ret = adt75_i2c_write(chip, this_attr->address, (u8)data);
+ ret = adt75_i2c_write(dev_info, this_attr->address, (u8)data);
if (ret)
return -EIO;
@@ -549,31 +546,27 @@ static int __devinit adt75_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adt75_chip_info *chip;
+ struct iio_dev *indio_dev;
int ret = 0;
- chip = kzalloc(sizeof(struct adt75_chip_info), GFP_KERNEL);
-
- if (chip == NULL)
- return -ENOMEM;
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
- i2c_set_clientdata(client, chip);
+ i2c_set_clientdata(client, indio_dev);
chip->client = client;
- chip->indio_dev = iio_allocate_device(0);
- if (chip->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_chip;
- }
-
- chip->indio_dev->name = id->name;
- chip->indio_dev->dev.parent = &client->dev;
- chip->indio_dev->info = &adt75_info;
- chip->indio_dev->dev_data = (void *)chip;
- chip->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &adt75_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(chip->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
@@ -582,12 +575,12 @@ static int __devinit adt75_probe(struct i2c_client *client,
NULL,
&adt75_event_handler,
IRQF_TRIGGER_LOW,
- chip->indio_dev->name,
- chip->indio_dev);
+ indio_dev->name,
+ indio_dev);
if (ret)
goto error_unreg_dev;
- ret = adt75_i2c_read(chip, ADT75_CONFIG, &chip->config);
+ ret = adt75_i2c_read(indio_dev, ADT75_CONFIG, &chip->config);
if (ret) {
ret = -EIO;
goto error_unreg_irq;
@@ -596,7 +589,7 @@ static int __devinit adt75_probe(struct i2c_client *client,
/* set irq polarity low level */
chip->config &= ~ADT75_OS_POLARITY;
- ret = adt75_i2c_write(chip, ADT75_CONFIG, chip->config);
+ ret = adt75_i2c_write(indio_dev, ADT75_CONFIG, chip->config);
if (ret) {
ret = -EIO;
goto error_unreg_irq;
@@ -604,31 +597,27 @@ static int __devinit adt75_probe(struct i2c_client *client,
}
dev_info(&client->dev, "%s temperature sensor registered.\n",
- chip->indio_dev->name);
+ indio_dev->name);
return 0;
error_unreg_irq:
- free_irq(client->irq, chip->indio_dev);
+ free_irq(client->irq, indio_dev);
error_unreg_dev:
- iio_device_unregister(chip->indio_dev);
+ iio_device_unregister(indio_dev);
error_free_dev:
- iio_free_device(chip->indio_dev);
-error_free_chip:
- kfree(chip);
-
+ iio_free_device(indio_dev);
+error_ret:
return ret;
}
static int __devexit adt75_remove(struct i2c_client *client)
{
- struct adt75_chip_info *chip = i2c_get_clientdata(client);
- struct iio_dev *indio_dev = chip->indio_dev;
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
if (client->irq)
- free_irq(client->irq, chip->indio_dev);
+ free_irq(client->irq, indio_dev);
iio_device_unregister(indio_dev);
- iio_free_device(chip->indio_dev);
- kfree(chip);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index 98cebd26310..72b0917412e 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -1255,12 +1255,15 @@ static int __devinit max1363_probe(struct i2c_client *client,
struct regulator *reg;
reg = regulator_get(&client->dev, "vcc");
- if (!IS_ERR(reg)) {
- ret = regulator_enable(reg);
- if (ret)
- goto error_put_reg;
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ goto error_out;
}
+ ret = regulator_enable(reg);
+ if (ret)
+ goto error_put_reg;
+
indio_dev = iio_allocate_device(sizeof(struct max1363_state));
if (indio_dev == NULL) {
ret = -ENOMEM;
@@ -1323,6 +1326,7 @@ static int __devinit max1363_probe(struct i2c_client *client,
}
return 0;
+
error_uninit_ring:
iio_ring_buffer_unregister(indio_dev->ring);
error_cleanup_ring:
@@ -1335,12 +1339,10 @@ error_free_device:
else
iio_device_unregister(indio_dev);
error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ regulator_disable(reg);
error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
-
+ regulator_put(reg);
+error_out:
return ret;
}
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 7097deb0f30..637316f79f7 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -174,7 +174,6 @@
*/
struct adt7316_chip_info {
- struct iio_dev *indio_dev;
struct adt7316_bus bus;
u16 ldac_pin;
u16 int_mask; /* 0x2f */
@@ -220,7 +219,7 @@ static ssize_t adt7316_show_enabled(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_EN));
}
@@ -252,7 +251,7 @@ static ssize_t adt7316_store_enabled(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
int enable;
if (!memcmp(buf, "1", 1))
@@ -276,7 +275,7 @@ static ssize_t adt7316_show_select_ex_temp(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
return -EPERM;
@@ -290,7 +289,7 @@ static ssize_t adt7316_store_select_ex_temp(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config1;
int ret;
@@ -320,7 +319,7 @@ static ssize_t adt7316_show_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
if (chip->config2 & ADT7316_AD_SINGLE_CH_MODE)
return sprintf(buf, "single_channel\n");
@@ -334,7 +333,7 @@ static ssize_t adt7316_store_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config2;
int ret;
@@ -370,7 +369,7 @@ static ssize_t adt7316_show_ad_channel(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE))
return -EPERM;
@@ -409,7 +408,7 @@ static ssize_t adt7316_store_ad_channel(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config2;
unsigned long data = 0;
int ret;
@@ -455,7 +454,7 @@ static ssize_t adt7316_show_all_ad_channels(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE))
return -EPERM;
@@ -477,7 +476,7 @@ static ssize_t adt7316_show_disable_averaging(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n",
!!(chip->config2 & ADT7316_DISABLE_AVERAGING));
@@ -489,7 +488,7 @@ static ssize_t adt7316_store_disable_averaging(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config2;
int ret;
@@ -516,7 +515,7 @@ static ssize_t adt7316_show_enable_smbus_timeout(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n",
!!(chip->config2 & ADT7316_EN_SMBUS_TIMEOUT));
@@ -528,7 +527,7 @@ static ssize_t adt7316_store_enable_smbus_timeout(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config2;
int ret;
@@ -557,7 +556,7 @@ static ssize_t adt7316_store_reset(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config2;
int ret;
@@ -580,7 +579,7 @@ static ssize_t adt7316_show_powerdown(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_PD));
}
@@ -591,7 +590,7 @@ static ssize_t adt7316_store_powerdown(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config1;
int ret;
@@ -618,7 +617,7 @@ static ssize_t adt7316_show_fast_ad_clock(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", !!(chip->config3 & ADT7316_ADCLK_22_5));
}
@@ -629,7 +628,7 @@ static ssize_t adt7316_store_fast_ad_clock(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config3;
int ret;
@@ -656,7 +655,7 @@ static ssize_t adt7316_show_da_high_resolution(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
if (chip->config3 & ADT7316_DA_HIGH_RESOLUTION) {
if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
@@ -674,7 +673,7 @@ static ssize_t adt7316_store_da_high_resolution(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config3;
int ret;
@@ -708,7 +707,7 @@ static ssize_t adt7316_show_AIN_internal_Vref(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
return -EPERM;
@@ -723,7 +722,7 @@ static ssize_t adt7316_store_AIN_internal_Vref(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config3;
int ret;
@@ -755,7 +754,7 @@ static ssize_t adt7316_show_enable_prop_DACA(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n",
!!(chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA));
@@ -767,7 +766,7 @@ static ssize_t adt7316_store_enable_prop_DACA(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config3;
int ret;
@@ -794,7 +793,7 @@ static ssize_t adt7316_show_enable_prop_DACB(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n",
!!(chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB));
@@ -806,7 +805,7 @@ static ssize_t adt7316_store_enable_prop_DACB(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config3;
int ret;
@@ -833,7 +832,7 @@ static ssize_t adt7316_show_DAC_2Vref_ch_mask(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%x\n",
chip->dac_config & ADT7316_DA_2VREF_CH_MASK);
@@ -845,7 +844,7 @@ static ssize_t adt7316_store_DAC_2Vref_ch_mask(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 dac_config;
unsigned long data = 0;
int ret;
@@ -876,7 +875,7 @@ static ssize_t adt7316_show_DAC_update_mode(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA))
return sprintf(buf, "manual\n");
@@ -900,7 +899,7 @@ static ssize_t adt7316_store_DAC_update_mode(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 dac_config;
unsigned long data;
int ret;
@@ -934,7 +933,7 @@ static ssize_t adt7316_show_all_DAC_update_modes(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA)
return sprintf(buf, "0 - auto at any MSB DAC writing\n"
@@ -955,7 +954,7 @@ static ssize_t adt7316_store_update_DAC(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 ldac_config;
unsigned long data;
int ret;
@@ -994,7 +993,7 @@ static ssize_t adt7316_show_DA_AB_Vref_bypass(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
return -EPERM;
@@ -1009,7 +1008,7 @@ static ssize_t adt7316_store_DA_AB_Vref_bypass(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 dac_config;
int ret;
@@ -1039,7 +1038,7 @@ static ssize_t adt7316_show_DA_CD_Vref_bypass(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
return -EPERM;
@@ -1054,7 +1053,7 @@ static ssize_t adt7316_store_DA_CD_Vref_bypass(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 dac_config;
int ret;
@@ -1084,7 +1083,7 @@ static ssize_t adt7316_show_DAC_internal_Vref(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
return sprintf(buf, "0x%x\n",
@@ -1101,7 +1100,7 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 ldac_config;
unsigned long data;
int ret;
@@ -1220,7 +1219,7 @@ static ssize_t adt7316_show_VDD(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_VDD, buf);
}
@@ -1231,7 +1230,7 @@ static ssize_t adt7316_show_in_temp(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_IN, buf);
}
@@ -1243,7 +1242,7 @@ static ssize_t adt7316_show_ex_temp_AIN1(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_EX, buf);
}
@@ -1256,7 +1255,7 @@ static ssize_t adt7316_show_AIN2(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN2, buf);
}
@@ -1267,7 +1266,7 @@ static ssize_t adt7316_show_AIN3(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN3, buf);
}
@@ -1278,7 +1277,7 @@ static ssize_t adt7316_show_AIN4(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN4, buf);
}
@@ -1330,7 +1329,7 @@ static ssize_t adt7316_show_in_temp_offset(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf);
}
@@ -1341,7 +1340,7 @@ static ssize_t adt7316_store_in_temp_offset(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_store_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf, len);
}
@@ -1355,7 +1354,7 @@ static ssize_t adt7316_show_ex_temp_offset(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf);
}
@@ -1366,7 +1365,7 @@ static ssize_t adt7316_store_ex_temp_offset(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_store_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf, len);
}
@@ -1380,7 +1379,7 @@ static ssize_t adt7316_show_in_analog_temp_offset(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_temp_offset(chip,
ADT7316_IN_ANALOG_TEMP_OFFSET, buf);
@@ -1392,7 +1391,7 @@ static ssize_t adt7316_store_in_analog_temp_offset(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_store_temp_offset(chip,
ADT7316_IN_ANALOG_TEMP_OFFSET, buf, len);
@@ -1407,7 +1406,7 @@ static ssize_t adt7316_show_ex_analog_temp_offset(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_temp_offset(chip,
ADT7316_EX_ANALOG_TEMP_OFFSET, buf);
@@ -1419,7 +1418,7 @@ static ssize_t adt7316_store_ex_analog_temp_offset(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_store_temp_offset(chip,
ADT7316_EX_ANALOG_TEMP_OFFSET, buf, len);
@@ -1504,7 +1503,7 @@ static ssize_t adt7316_show_DAC_A(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_DAC(chip, 0, buf);
}
@@ -1515,7 +1514,7 @@ static ssize_t adt7316_store_DAC_A(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_store_DAC(chip, 0, buf, len);
}
@@ -1528,7 +1527,7 @@ static ssize_t adt7316_show_DAC_B(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_DAC(chip, 1, buf);
}
@@ -1539,7 +1538,7 @@ static ssize_t adt7316_store_DAC_B(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_store_DAC(chip, 1, buf, len);
}
@@ -1552,7 +1551,7 @@ static ssize_t adt7316_show_DAC_C(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_DAC(chip, 2, buf);
}
@@ -1563,7 +1562,7 @@ static ssize_t adt7316_store_DAC_C(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_store_DAC(chip, 2, buf, len);
}
@@ -1576,7 +1575,7 @@ static ssize_t adt7316_show_DAC_D(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_show_DAC(chip, 3, buf);
}
@@ -1587,7 +1586,7 @@ static ssize_t adt7316_store_DAC_D(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return adt7316_store_DAC(chip, 3, buf, len);
}
@@ -1600,7 +1599,7 @@ static ssize_t adt7316_show_device_id(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 id;
int ret;
@@ -1618,7 +1617,7 @@ static ssize_t adt7316_show_manufactorer_id(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 id;
int ret;
@@ -1637,7 +1636,7 @@ static ssize_t adt7316_show_device_rev(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 rev;
int ret;
@@ -1655,7 +1654,7 @@ static ssize_t adt7316_show_bus_type(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 stat;
int ret;
@@ -1765,7 +1764,7 @@ static const struct attribute_group adt7516_attribute_group = {
static irqreturn_t adt7316_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
- struct adt7316_chip_info *chip = iio_dev_get_devdata(indio_dev);
+ struct adt7316_chip_info *chip = iio_priv(indio_dev);
u8 stat1, stat2;
int ret;
s64 time;
@@ -1777,43 +1776,43 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
time = iio_get_time_ns();
if (stat1 & (1 << 0))
- iio_push_event(chip->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
time);
if (stat1 & (1 << 1))
- iio_push_event(chip->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
time);
if (stat1 & (1 << 2))
- iio_push_event(chip->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 1,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING),
time);
if (stat1 & (1 << 3))
- iio_push_event(chip->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_UNMOD_EVENT_CODE(IIO_TEMP, 1,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_FALLING),
time);
if (stat1 & (1 << 5))
- iio_push_event(chip->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_UNMOD_EVENT_CODE(IIO_IN, 1,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
time);
if (stat1 & (1 << 6))
- iio_push_event(chip->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_UNMOD_EVENT_CODE(IIO_IN, 2,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
time);
if (stat1 & (1 << 7))
- iio_push_event(chip->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_UNMOD_EVENT_CODE(IIO_IN, 3,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
@@ -1822,7 +1821,7 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
ret = chip->bus.read(chip->bus.client, ADT7316_INT_STAT2, &stat2);
if (!ret) {
if (stat2 & ADT7316_INT_MASK2_VDD)
- iio_push_event(chip->indio_dev, 0,
+ iio_push_event(indio_dev, 0,
IIO_UNMOD_EVENT_CODE(IIO_IN,
0,
IIO_EV_TYPE_THRESH,
@@ -1841,7 +1840,7 @@ static ssize_t adt7316_show_int_mask(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "0x%x\n", chip->int_mask);
}
@@ -1855,7 +1854,7 @@ static ssize_t adt7316_set_int_mask(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
unsigned long data;
int ret;
u8 mask;
@@ -1895,7 +1894,7 @@ static inline ssize_t adt7316_show_ad_bound(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 val;
int data;
int ret;
@@ -1926,7 +1925,7 @@ static inline ssize_t adt7316_set_ad_bound(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
long data;
u8 val;
int ret;
@@ -1965,7 +1964,7 @@ static ssize_t adt7316_show_int_enabled(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_INT_EN));
}
@@ -1976,7 +1975,7 @@ static ssize_t adt7316_set_int_enabled(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
u8 config1;
int ret;
@@ -2090,7 +2089,7 @@ static struct attribute_group adt7516_event_attribute_group = {
int adt7316_disable(struct device *dev)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return _adt7316_store_enabled(chip, 0);
}
@@ -2099,7 +2098,7 @@ EXPORT_SYMBOL(adt7316_disable);
int adt7316_enable(struct device *dev)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct adt7316_chip_info *chip = iio_priv(dev_info);
return _adt7316_store_enabled(chip, 1);
}
@@ -2127,16 +2126,18 @@ int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus,
const char *name)
{
struct adt7316_chip_info *chip;
+ struct iio_dev *indio_dev;
unsigned short *adt7316_platform_data = dev->platform_data;
int ret = 0;
- chip = kzalloc(sizeof(struct adt7316_chip_info), GFP_KERNEL);
-
- if (chip == NULL)
- return -ENOMEM;
-
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_ret;
+ }
+ chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
- dev_set_drvdata(dev, chip);
+ dev_set_drvdata(dev, indio_dev);
chip->bus = *bus;
@@ -2157,22 +2158,15 @@ int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus,
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
chip->int_mask |= ADT7516_AIN_INT_MASK;
- chip->indio_dev = iio_allocate_device(0);
- if (chip->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_chip;
- }
-
- chip->indio_dev->dev.parent = dev;
+ indio_dev->dev.parent = dev;
if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX)
- chip->indio_dev->info = &adt7516_info;
+ indio_dev->info = &adt7516_info;
else
- chip->indio_dev->info = &adt7316_info;
- chip->indio_dev->name = name;
- chip->indio_dev->dev_data = (void *)chip;
- chip->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &adt7316_info;
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(chip->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
@@ -2184,8 +2178,8 @@ int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus,
NULL,
&adt7316_event_handler,
chip->bus.irq_flags | IRQF_ONESHOT,
- chip->indio_dev->name,
- chip->indio_dev);
+ indio_dev->name,
+ indio_dev);
if (ret)
goto error_unreg_dev;
@@ -2206,35 +2200,31 @@ int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus,
}
dev_info(dev, "%s temperature sensor, ADC and DAC registered.\n",
- chip->indio_dev->name);
+ indio_dev->name);
return 0;
error_unreg_irq:
- free_irq(chip->bus.irq, chip->indio_dev);
+ free_irq(chip->bus.irq, indio_dev);
error_unreg_dev:
- iio_device_unregister(chip->indio_dev);
+ iio_device_unregister(indio_dev);
error_free_dev:
- iio_free_device(chip->indio_dev);
-error_free_chip:
- kfree(chip);
-
+ iio_free_device(indio_dev);
+error_ret:
return ret;
}
EXPORT_SYMBOL(adt7316_probe);
int __devexit adt7316_remove(struct device *dev)
{
-
- struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct adt7316_chip_info *chip = dev_info->dev_data;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct adt7316_chip_info *chip = iio_priv(indio_dev);
dev_set_drvdata(dev, NULL);
if (chip->bus.irq)
- free_irq(chip->bus.irq, chip->indio_dev);
- iio_device_unregister(chip->indio_dev);
- iio_free_device(chip->indio_dev);
- kfree(chip);
+ free_irq(chip->bus.irq, indio_dev);
+ iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/dac/Kconfig b/drivers/staging/iio/dac/Kconfig
index d5a5556cf98..7ddae357f20 100644
--- a/drivers/staging/iio/dac/Kconfig
+++ b/drivers/staging/iio/dac/Kconfig
@@ -42,6 +42,17 @@ config AD5791
To compile this driver as a module, choose M here: the
module will be called ad5791.
+config AD5686
+ tristate "Analog Devices AD5686R/AD5685R/AD5684R DAC SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5686R, AD5685R,
+ AD5684R, AD5791 Voltage Output Digital to
+ Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5686.
+
config MAX517
tristate "Maxim MAX517/518/519 DAC driver"
depends on I2C && EXPERIMENTAL
diff --git a/drivers/staging/iio/dac/Makefile b/drivers/staging/iio/dac/Makefile
index 83196de7a54..7f4f2ed031e 100644
--- a/drivers/staging/iio/dac/Makefile
+++ b/drivers/staging/iio/dac/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_AD5624R_SPI) += ad5624r_spi.o
obj-$(CONFIG_AD5504) += ad5504.o
obj-$(CONFIG_AD5446) += ad5446.o
obj-$(CONFIG_AD5791) += ad5791.o
+obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_MAX517) += max517.o
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index 86cb08ce199..e8a9d0bf1ed 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -68,7 +68,7 @@ static ssize_t ad5446_write(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = dev_info->dev_data;
+ struct ad5446_state *st = iio_priv(dev_info);
int ret;
long val;
@@ -98,7 +98,7 @@ static ssize_t ad5446_show_scale(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_dev_get_devdata(dev_info);
+ struct ad5446_state *st = iio_priv(dev_info);
/* Corresponds to Vref / 2^(bits) */
unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
@@ -111,7 +111,7 @@ static ssize_t ad5446_write_powerdown_mode(struct device *dev,
const char *buf, size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = dev_info->dev_data;
+ struct ad5446_state *st = iio_priv(dev_info);
if (sysfs_streq(buf, "1kohm_to_gnd"))
st->pwr_down_mode = MODE_PWRDWN_1k;
@@ -129,7 +129,7 @@ static ssize_t ad5446_read_powerdown_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = dev_info->dev_data;
+ struct ad5446_state *st = iio_priv(dev_info);
char mode[][15] = {"", "1kohm_to_gnd", "100kohm_to_gnd", "three_state"};
@@ -141,7 +141,7 @@ static ssize_t ad5446_read_dac_powerdown(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = dev_info->dev_data;
+ struct ad5446_state *st = iio_priv(dev_info);
return sprintf(buf, "%d\n", st->pwr_down);
}
@@ -151,7 +151,7 @@ static ssize_t ad5446_write_dac_powerdown(struct device *dev,
const char *buf, size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = dev_info->dev_data;
+ struct ad5446_state *st = iio_priv(dev_info);
unsigned long readin;
int ret;
@@ -201,7 +201,7 @@ static mode_t ad5446_attr_is_visible(struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_dev_get_devdata(dev_info);
+ struct ad5446_state *st = iio_priv(dev_info);
mode_t mode = attr->mode;
@@ -342,42 +342,37 @@ static const struct iio_info ad5446_info = {
static int __devinit ad5446_probe(struct spi_device *spi)
{
struct ad5446_state *st;
+ struct iio_dev *indio_dev;
+ struct regulator *reg;
int ret, voltage_uv = 0;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- st->reg = regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
+ reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(reg)) {
+ ret = regulator_enable(reg);
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ voltage_uv = regulator_get_voltage(reg);
}
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_reg;
+ }
+ st = iio_priv(indio_dev);
st->chip_info =
&ad5446_chip_info_tbl[spi_get_device_id(spi)->driver_data];
- spi_set_drvdata(spi, st);
-
+ spi_set_drvdata(spi, indio_dev);
+ st->reg = reg;
st->spi = spi;
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_disable_reg;
- }
-
/* Estabilish that the iio_dev is a child of the spi device */
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->name = spi_get_device_id(spi)->name;
- st->indio_dev->info = &ad5446_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5446_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
/* Setup default message */
@@ -404,36 +399,35 @@ static int __devinit ad5446_probe(struct spi_device *spi)
"reference voltage unspecified\n");
}
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_device;
return 0;
error_free_device:
- iio_free_device(st->indio_dev);
+ iio_free_device(indio_dev);
error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ if (!IS_ERR(reg))
+ regulator_disable(reg);
error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
- kfree(st);
-error_ret:
+ if (!IS_ERR(reg))
+ regulator_put(reg);
+
return ret;
}
static int ad5446_remove(struct spi_device *spi)
{
- struct ad5446_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5446_state *st = iio_priv(indio_dev);
+ struct regulator *reg = st->reg;
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg)) {
- regulator_disable(st->reg);
- regulator_put(st->reg);
+ if (!IS_ERR(reg)) {
+ regulator_disable(reg);
+ regulator_put(reg);
}
- kfree(st);
return 0;
}
diff --git a/drivers/staging/iio/dac/ad5446.h b/drivers/staging/iio/dac/ad5446.h
index e6ffd2bb7c7..7118d653ac3 100644
--- a/drivers/staging/iio/dac/ad5446.h
+++ b/drivers/staging/iio/dac/ad5446.h
@@ -33,7 +33,6 @@
/**
* struct ad5446_state - driver instance specific data
- * @indio_dev: the industrial I/O device
* @spi: spi_device
* @chip_info: chip model specific constants, available modes etc
* @reg: supply regulator
@@ -45,7 +44,6 @@
*/
struct ad5446_state {
- struct iio_dev *indio_dev;
struct spi_device *spi;
const struct ad5446_chip_info *chip_info;
struct regulator *reg;
diff --git a/drivers/staging/iio/dac/ad5504.c b/drivers/staging/iio/dac/ad5504.c
index ed029cdff30..1915f459868 100644
--- a/drivers/staging/iio/dac/ad5504.c
+++ b/drivers/staging/iio/dac/ad5504.c
@@ -55,7 +55,7 @@ static ssize_t ad5504_write_dac(struct device *dev,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5504_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5504_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
long readin;
int ret;
@@ -73,7 +73,7 @@ static ssize_t ad5504_read_dac(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5504_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5504_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
u16 val;
@@ -89,7 +89,7 @@ static ssize_t ad5504_read_powerdown_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5504_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5504_state *st = iio_priv(indio_dev);
const char mode[][14] = {"20kohm_to_gnd", "three_state"};
@@ -101,7 +101,7 @@ static ssize_t ad5504_write_powerdown_mode(struct device *dev,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5504_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5504_state *st = iio_priv(indio_dev);
int ret;
if (sysfs_streq(buf, "20kohm_to_gnd"))
@@ -119,7 +119,7 @@ static ssize_t ad5504_read_dac_powerdown(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5504_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5504_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
return sprintf(buf, "%d\n",
@@ -133,7 +133,7 @@ static ssize_t ad5504_write_dac_powerdown(struct device *dev,
long readin;
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5504_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5504_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
ret = strict_strtol(buf, 10, &readin);
@@ -162,7 +162,7 @@ static ssize_t ad5504_show_scale(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5504_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5504_state *st = iio_priv(indio_dev);
/* Corresponds to Vref / 2^(bits) */
unsigned int scale_uv = (st->vref_mv * 1000) >> AD5505_BITS;
@@ -277,26 +277,27 @@ static const struct iio_info ad5501_info = {
static int __devinit ad5504_probe(struct spi_device *spi)
{
struct ad5504_platform_data *pdata = spi->dev.platform_data;
+ struct iio_dev *indio_dev;
struct ad5504_state *st;
+ struct regulator *reg;
int ret, voltage_uv = 0;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- spi_set_drvdata(spi, st);
-
- st->reg = regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
+ reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(reg)) {
+ ret = regulator_enable(reg);
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ voltage_uv = regulator_get_voltage(reg);
}
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_reg;
+ }
+ spi_set_drvdata(spi, indio_dev);
+ st = iio_priv(indio_dev);
if (voltage_uv)
st->vref_mv = voltage_uv / 1000;
else if (pdata)
@@ -304,22 +305,17 @@ static int __devinit ad5504_probe(struct spi_device *spi)
else
dev_warn(&spi->dev, "reference voltage unspecified\n");
+ st->reg = reg;
st->spi = spi;
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_disable_reg;
- }
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->name = spi_get_device_id(st->spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(st->spi)->name;
if (spi_get_device_id(st->spi)->driver_data == ID_AD5501)
- st->indio_dev->info = &ad5501_info;
+ indio_dev->info = &ad5501_info;
else
- st->indio_dev->info = &ad5504_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ad5504_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
@@ -329,7 +325,7 @@ static int __devinit ad5504_probe(struct spi_device *spi)
&ad5504_event_handler,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
spi_get_device_id(st->spi)->name,
- st->indio_dev);
+ indio_dev);
if (ret)
goto error_unreg_iio_device;
}
@@ -337,37 +333,34 @@ static int __devinit ad5504_probe(struct spi_device *spi)
return 0;
error_unreg_iio_device:
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
error_free_dev:
- iio_free_device(st->indio_dev);
+ iio_free_device(indio_dev);
error_disable_reg:
- if (!IS_ERR(st->reg))
+ if (!IS_ERR(reg))
regulator_disable(st->reg);
error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
+ if (!IS_ERR(reg))
+ regulator_put(reg);
- kfree(st);
-error_ret:
return ret;
}
static int __devexit ad5504_remove(struct spi_device *spi)
{
- struct ad5504_state *st = spi_get_drvdata(spi);
-
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5504_state *st = iio_priv(indio_dev);
+ struct regulator *reg = st->reg;
if (spi->irq)
- free_irq(spi->irq, st->indio_dev);
+ free_irq(spi->irq, indio_dev);
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg)) {
- regulator_disable(st->reg);
- regulator_put(st->reg);
+ if (!IS_ERR(reg)) {
+ regulator_disable(reg);
+ regulator_put(reg);
}
- kfree(st);
-
return 0;
}
diff --git a/drivers/staging/iio/dac/ad5504.h b/drivers/staging/iio/dac/ad5504.h
index 13ef3539913..85beb1dd29b 100644
--- a/drivers/staging/iio/dac/ad5504.h
+++ b/drivers/staging/iio/dac/ad5504.h
@@ -41,7 +41,6 @@ struct ad5504_platform_data {
/**
* struct ad5446_state - driver instance specific data
- * @indio_dev: the industrial I/O device
* @us: spi_device
* @reg: supply regulator
* @vref_mv: actual reference voltage used
@@ -50,7 +49,6 @@ struct ad5504_platform_data {
*/
struct ad5504_state {
- struct iio_dev *indio_dev;
struct spi_device *spi;
struct regulator *reg;
unsigned short vref_mv;
diff --git a/drivers/staging/iio/dac/ad5624r.h b/drivers/staging/iio/dac/ad5624r.h
index c16df4ed52c..b71c6a03e78 100644
--- a/drivers/staging/iio/dac/ad5624r.h
+++ b/drivers/staging/iio/dac/ad5624r.h
@@ -53,7 +53,6 @@ struct ad5624r_chip_info {
*/
struct ad5624r_state {
- struct iio_dev *indio_dev;
struct spi_device *us;
const struct ad5624r_chip_info *chip_info;
struct regulator *reg;
diff --git a/drivers/staging/iio/dac/ad5624r_spi.c b/drivers/staging/iio/dac/ad5624r_spi.c
index c679981f014..a5b3776718e 100644
--- a/drivers/staging/iio/dac/ad5624r_spi.c
+++ b/drivers/staging/iio/dac/ad5624r_spi.c
@@ -77,7 +77,7 @@ static ssize_t ad5624r_write_dac(struct device *dev,
long readin;
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5624r_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5624r_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
ret = strict_strtol(buf, 10, &readin);
@@ -94,7 +94,7 @@ static ssize_t ad5624r_read_powerdown_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5624r_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5624r_state *st = iio_priv(indio_dev);
char mode[][15] = {"", "1kohm_to_gnd", "100kohm_to_gnd", "three_state"};
@@ -106,7 +106,7 @@ static ssize_t ad5624r_write_powerdown_mode(struct device *dev,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5624r_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5624r_state *st = iio_priv(indio_dev);
int ret;
if (sysfs_streq(buf, "1kohm_to_gnd"))
@@ -126,7 +126,7 @@ static ssize_t ad5624r_read_dac_powerdown(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5624r_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5624r_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
return sprintf(buf, "%d\n",
@@ -140,7 +140,7 @@ static ssize_t ad5624r_write_dac_powerdown(struct device *dev,
long readin;
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5624r_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5624r_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
ret = strict_strtol(buf, 10, &readin);
@@ -166,7 +166,7 @@ static ssize_t ad5624r_show_scale(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5624r_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5624r_state *st = iio_priv(indio_dev);
/* Corresponds to Vref / 2^(bits) */
unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
@@ -226,24 +226,26 @@ static const struct iio_info ad5624r_info = {
static int __devinit ad5624r_probe(struct spi_device *spi)
{
struct ad5624r_state *st;
+ struct iio_dev *indio_dev;
+ struct regulator *reg;
int ret, voltage_uv = 0;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- spi_set_drvdata(spi, st);
-
- st->reg = regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
+ reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(reg)) {
+ ret = regulator_enable(reg);
if (ret)
goto error_put_reg;
- voltage_uv = regulator_get_voltage(st->reg);
+ voltage_uv = regulator_get_voltage(reg);
}
-
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_reg;
+ }
+ st = iio_priv(indio_dev);
+ st->reg = reg;
+ spi_set_drvdata(spi, indio_dev);
st->chip_info =
&ad5624r_chip_info_tbl[spi_get_device_id(spi)->driver_data];
@@ -253,18 +255,13 @@ static int __devinit ad5624r_probe(struct spi_device *spi)
st->vref_mv = st->chip_info->int_vref_mv;
st->us = spi;
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_disable_reg;
- }
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->name = spi_get_device_id(spi)->name;
- st->indio_dev->info = &ad5624r_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5624r_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
@@ -276,32 +273,29 @@ static int __devinit ad5624r_probe(struct spi_device *spi)
return 0;
error_free_dev:
- iio_free_device(st->indio_dev);
+ iio_free_device(indio_dev);
error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ if (!IS_ERR(reg))
+ regulator_disable(reg);
error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
+ if (!IS_ERR(reg))
+ regulator_put(reg);
- kfree(st);
-error_ret:
return ret;
}
static int __devexit ad5624r_remove(struct spi_device *spi)
{
- struct ad5624r_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->indio_dev);
-
- if (!IS_ERR(st->reg)) {
- regulator_disable(st->reg);
- regulator_put(st->reg);
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5624r_state *st = iio_priv(indio_dev);
+ struct regulator *reg = st->reg;
+
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(reg)) {
+ regulator_disable(reg);
+ regulator_put(reg);
}
- kfree(st);
-
return 0;
}
diff --git a/drivers/staging/iio/dac/ad5686.c b/drivers/staging/iio/dac/ad5686.c
new file mode 100644
index 00000000000..fd67cfa5edb
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5686.c
@@ -0,0 +1,497 @@
+/*
+ * AD5686R, AD5685R, AD5684R Digital to analog converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+
+#define AD5686_DAC_CHANNELS 4
+
+#define AD5686_ADDR(x) ((x) << 16)
+#define AD5686_CMD(x) ((x) << 20)
+
+#define AD5686_ADDR_DAC0 0x1
+#define AD5686_ADDR_DAC1 0x2
+#define AD5686_ADDR_DAC2 0x4
+#define AD5686_ADDR_DAC3 0x8
+#define AD5686_ADDR_ALL_DAC 0xF
+
+#define AD5686_CMD_NOOP 0x0
+#define AD5686_CMD_WRITE_INPUT_N 0x1
+#define AD5686_CMD_UPDATE_DAC_N 0x2
+#define AD5686_CMD_WRITE_INPUT_N_UPDATE_N 0x3
+#define AD5686_CMD_POWERDOWN_DAC 0x4
+#define AD5686_CMD_LDAC_MASK 0x5
+#define AD5686_CMD_RESET 0x6
+#define AD5686_CMD_INTERNAL_REFER_SETUP 0x7
+#define AD5686_CMD_DAISY_CHAIN_ENABLE 0x8
+#define AD5686_CMD_READBACK_ENABLE 0x9
+
+#define AD5686_LDAC_PWRDN_NONE 0x0
+#define AD5686_LDAC_PWRDN_1K 0x1
+#define AD5686_LDAC_PWRDN_100K 0x2
+#define AD5686_LDAC_PWRDN_3STATE 0x3
+
+/**
+ * struct ad5686_chip_info - chip specific information
+ * @int_vref_mv: AD5620/40/60: the internal reference voltage
+ * @channel: channel specification
+*/
+
+struct ad5686_chip_info {
+ u16 int_vref_mv;
+ struct iio_chan_spec channel[AD5686_DAC_CHANNELS];
+};
+
+/**
+ * struct ad5446_state - driver instance specific data
+ * @spi: spi_device
+ * @chip_info: chip model specific constants, available modes etc
+ * @reg: supply regulator
+ * @vref_mv: actual reference voltage used
+ * @pwr_down_mask: power down mask
+ * @pwr_down_mode: current power down mode
+ * @data: spi transfer buffers
+ */
+
+struct ad5686_state {
+ struct spi_device *spi;
+ const struct ad5686_chip_info *chip_info;
+ struct regulator *reg;
+ unsigned short vref_mv;
+ unsigned pwr_down_mask;
+ unsigned pwr_down_mode;
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+
+ union {
+ u32 d32;
+ u8 d8[4];
+ } data[3] ____cacheline_aligned;
+};
+
+/**
+ * ad5686_supported_device_ids:
+ */
+
+enum ad5686_supported_device_ids {
+ ID_AD5684,
+ ID_AD5685,
+ ID_AD5686,
+};
+
+static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
+ [ID_AD5684] = {
+ .channel[0] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 0, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC0,
+ 0, IIO_ST('u', 12, 16, 4), 0),
+ .channel[1] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 1, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC1,
+ 1, IIO_ST('u', 12, 16, 4), 0),
+ .channel[2] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 2, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC2,
+ 2, IIO_ST('u', 12, 16, 4), 0),
+ .channel[3] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 3, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC3,
+ 3, IIO_ST('u', 12, 16, 4), 0),
+ .int_vref_mv = 2500,
+ },
+ [ID_AD5685] = {
+ .channel[0] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 0, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC0,
+ 0, IIO_ST('u', 14, 16, 2), 0),
+ .channel[1] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 1, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC1,
+ 1, IIO_ST('u', 14, 16, 2), 0),
+ .channel[2] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 2, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC2,
+ 2, IIO_ST('u', 14, 16, 2), 0),
+ .channel[3] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 3, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC3,
+ 3, IIO_ST('u', 14, 16, 2), 0),
+ .int_vref_mv = 2500,
+ },
+ [ID_AD5686] = {
+ .channel[0] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 0, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC0,
+ 0, IIO_ST('u', 16, 16, 0), 0),
+ .channel[1] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 1, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC1,
+ 1, IIO_ST('u', 16, 16, 0), 0),
+ .channel[2] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 2, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC2,
+ 2, IIO_ST('u', 16, 16, 0), 0),
+ .channel[3] = IIO_CHAN(IIO_OUT, 0, 1, 0, NULL, 3, 0,
+ (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ AD5686_ADDR_DAC3,
+ 3, IIO_ST('u', 16, 16, 0), 0),
+ .int_vref_mv = 2500,
+ },
+};
+
+static int ad5686_spi_write(struct ad5686_state *st,
+ u8 cmd, u8 addr, u16 val, u8 shift)
+{
+ val <<= shift;
+
+ st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) |
+ AD5686_ADDR(addr) |
+ val);
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5686_spi_read(struct ad5686_state *st, u8 addr)
+{
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->data[1].d8[1],
+ .rx_buf = &st->data[2].d8[1],
+ .len = 3,
+ },
+ };
+ struct spi_message m;
+ int ret;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ st->data[0].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_READBACK_ENABLE) |
+ AD5686_ADDR(addr));
+ st->data[1].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP));
+
+ ret = spi_sync(st->spi, &m);
+ if (ret < 0)
+ return ret;
+
+ return be32_to_cpu(st->data[2].d32);
+}
+
+static ssize_t ad5686_read_powerdown_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5686_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ char mode[][15] = {"", "1kohm_to_gnd", "100kohm_to_gnd", "three_state"};
+
+ return sprintf(buf, "%s\n", mode[(st->pwr_down_mode >>
+ (this_attr->address * 2)) & 0x3]);
+}
+
+static ssize_t ad5686_write_powerdown_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5686_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned mode;
+
+ if (sysfs_streq(buf, "1kohm_to_gnd"))
+ mode = AD5686_LDAC_PWRDN_1K;
+ else if (sysfs_streq(buf, "100kohm_to_gnd"))
+ mode = AD5686_LDAC_PWRDN_100K;
+ else if (sysfs_streq(buf, "three_state"))
+ mode = AD5686_LDAC_PWRDN_3STATE;
+ else
+ return -EINVAL;
+
+ st->pwr_down_mode &= ~(0x3 << (this_attr->address * 2));
+ st->pwr_down_mode |= (mode << (this_attr->address * 2));
+
+ return len;
+}
+
+static ssize_t ad5686_read_dac_powerdown(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5686_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ return sprintf(buf, "%d\n", !!(st->pwr_down_mask &
+ (0x3 << (this_attr->address * 2))));
+}
+
+static ssize_t ad5686_write_dac_powerdown(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ bool readin;
+ int ret;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5686_state *st = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ ret = strtobool(buf, &readin);
+ if (ret)
+ return ret;
+
+ if (readin == true)
+ st->pwr_down_mask |= (0x3 << (this_attr->address * 2));
+ else
+ st->pwr_down_mask &= ~(0x3 << (this_attr->address * 2));
+
+ ret = ad5686_spi_write(st, AD5686_CMD_POWERDOWN_DAC, 0,
+ st->pwr_down_mask & st->pwr_down_mode, 0);
+
+ return ret ? ret : len;
+}
+
+static IIO_CONST_ATTR(out_powerdown_mode_available,
+ "1kohm_to_gnd 100kohm_to_gnd three_state");
+
+#define IIO_DEV_ATTR_DAC_POWERDOWN_MODE(_num) \
+ IIO_DEVICE_ATTR(out##_num##_powerdown_mode, S_IRUGO | S_IWUSR, \
+ ad5686_read_powerdown_mode, \
+ ad5686_write_powerdown_mode, _num)
+
+static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(0);
+static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(1);
+static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(2);
+static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(3);
+
+#define IIO_DEV_ATTR_DAC_POWERDOWN(_num) \
+ IIO_DEVICE_ATTR(out##_num##_powerdown, S_IRUGO | S_IWUSR, \
+ ad5686_read_dac_powerdown, \
+ ad5686_write_dac_powerdown, _num)
+
+static IIO_DEV_ATTR_DAC_POWERDOWN(0);
+static IIO_DEV_ATTR_DAC_POWERDOWN(1);
+static IIO_DEV_ATTR_DAC_POWERDOWN(2);
+static IIO_DEV_ATTR_DAC_POWERDOWN(3);
+
+static struct attribute *ad5686_attributes[] = {
+ &iio_dev_attr_out0_powerdown.dev_attr.attr,
+ &iio_dev_attr_out1_powerdown.dev_attr.attr,
+ &iio_dev_attr_out2_powerdown.dev_attr.attr,
+ &iio_dev_attr_out3_powerdown.dev_attr.attr,
+ &iio_dev_attr_out0_powerdown_mode.dev_attr.attr,
+ &iio_dev_attr_out1_powerdown_mode.dev_attr.attr,
+ &iio_dev_attr_out2_powerdown_mode.dev_attr.attr,
+ &iio_dev_attr_out3_powerdown_mode.dev_attr.attr,
+ &iio_const_attr_out_powerdown_mode_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad5686_attribute_group = {
+ .attrs = ad5686_attributes,
+};
+
+static int ad5686_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5686_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+ int ret;
+
+ switch (m) {
+ case 0:
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5686_spi_read(st, chan->address);
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ scale_uv = (st->vref_mv * 100000)
+ >> (chan->scan_type.realbits);
+ *val = scale_uv / 100000;
+ *val2 = (scale_uv % 100000) * 10;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ }
+ return -EINVAL;
+}
+
+static int ad5686_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5686_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case 0:
+ if (val > (1 << chan->scan_type.realbits))
+ return -EINVAL;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5686_spi_write(st,
+ AD5686_CMD_WRITE_INPUT_N_UPDATE_N,
+ chan->address,
+ val,
+ chan->scan_type.shift);
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct iio_info ad5686_info = {
+ .read_raw = ad5686_read_raw,
+ .write_raw = ad5686_write_raw,
+ .attrs = &ad5686_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit ad5686_probe(struct spi_device *spi)
+{
+ struct ad5686_state *st;
+ struct iio_dev *indio_dev;
+ int ret, regdone = 0, voltage_uv = 0;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(st->reg)) {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto error_put_reg;
+
+ voltage_uv = regulator_get_voltage(st->reg);
+ }
+
+ st->chip_info =
+ &ad5686_chip_info_tbl[spi_get_device_id(spi)->driver_data];
+
+ if (voltage_uv)
+ st->vref_mv = voltage_uv / 1000;
+ else
+ st->vref_mv = st->chip_info->int_vref_mv;
+
+ st->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5686_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channel;
+ indio_dev->num_channels = AD5686_DAC_CHANNELS;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto error_disable_reg;
+
+ regdone = 1;
+ ret = ad5686_spi_write(st, AD5686_CMD_INTERNAL_REFER_SETUP, 0,
+ !!voltage_uv, 0);
+ if (ret)
+ goto error_disable_reg;
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(st->reg))
+ regulator_disable(st->reg);
+error_put_reg:
+ if (!IS_ERR(st->reg))
+ regulator_put(st->reg);
+
+ if (regdone)
+ iio_device_unregister(indio_dev);
+ else
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static int __devexit ad5686_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5686_state *st = iio_priv(indio_dev);
+ struct regulator *reg = st->reg;
+
+ if (!IS_ERR(reg)) {
+ regulator_disable(reg);
+ regulator_put(reg);
+ }
+
+ iio_device_unregister(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5686_id[] = {
+ {"ad5684", ID_AD5684},
+ {"ad5685", ID_AD5685},
+ {"ad5686", ID_AD5686},
+ {}
+};
+
+static struct spi_driver ad5686_driver = {
+ .driver = {
+ .name = "ad5686",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5686_probe,
+ .remove = __devexit_p(ad5686_remove),
+ .id_table = ad5686_id,
+};
+
+static __init int ad5686_spi_init(void)
+{
+ return spi_register_driver(&ad5686_driver);
+}
+module_init(ad5686_spi_init);
+
+static __exit void ad5686_spi_exit(void)
+{
+ spi_unregister_driver(&ad5686_driver);
+}
+module_exit(ad5686_spi_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD5686/85/84 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5791.c b/drivers/staging/iio/dac/ad5791.c
index 4eda25cba87..64770d2a1b4 100644
--- a/drivers/staging/iio/dac/ad5791.c
+++ b/drivers/staging/iio/dac/ad5791.c
@@ -76,7 +76,7 @@ static ssize_t ad5791_write_dac(struct device *dev,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5791_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
long readin;
int ret;
@@ -98,7 +98,7 @@ static ssize_t ad5791_read_dac(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5791_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
int val;
@@ -118,7 +118,7 @@ static ssize_t ad5791_read_powerdown_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5791_state *st = iio_priv(indio_dev);
const char mode[][14] = {"6kohm_to_gnd", "three_state"};
@@ -130,7 +130,7 @@ static ssize_t ad5791_write_powerdown_mode(struct device *dev,
const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5791_state *st = iio_priv(indio_dev);
int ret;
if (sysfs_streq(buf, "6kohm_to_gnd"))
@@ -148,7 +148,7 @@ static ssize_t ad5791_read_dac_powerdown(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5791_state *st = iio_priv(indio_dev);
return sprintf(buf, "%d\n", st->pwr_down);
}
@@ -160,7 +160,7 @@ static ssize_t ad5791_write_dac_powerdown(struct device *dev,
long readin;
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5791_state *st = iio_priv(indio_dev);
ret = strict_strtol(buf, 10, &readin);
if (ret)
@@ -188,7 +188,7 @@ static ssize_t ad5791_show_scale(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5791_state *st = iio_priv(indio_dev);
/* Corresponds to Vref / 2^(bits) */
unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
@@ -201,7 +201,7 @@ static ssize_t ad5791_show_name(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5791_state *st = iio_dev_get_devdata(indio_dev);
+ struct ad5791_state *st = iio_priv(indio_dev);
return sprintf(buf, "%s\n", spi_get_device_id(st->spi)->name);
}
@@ -295,36 +295,39 @@ static const struct iio_info ad5791_info = {
static int __devinit ad5791_probe(struct spi_device *spi)
{
struct ad5791_platform_data *pdata = spi->dev.platform_data;
+ struct iio_dev *indio_dev;
+ struct regulator *reg_vdd, *reg_vss;
struct ad5791_state *st;
int ret, pos_voltage_uv = 0, neg_voltage_uv = 0;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- spi_set_drvdata(spi, st);
-
- st->reg_vdd = regulator_get(&spi->dev, "vdd");
- if (!IS_ERR(st->reg_vdd)) {
- ret = regulator_enable(st->reg_vdd);
+ reg_vdd = regulator_get(&spi->dev, "vdd");
+ if (!IS_ERR(reg_vdd)) {
+ ret = regulator_enable(reg_vdd);
if (ret)
goto error_put_reg_pos;
- pos_voltage_uv = regulator_get_voltage(st->reg_vdd);
+ pos_voltage_uv = regulator_get_voltage(reg_vdd);
}
- st->reg_vss = regulator_get(&spi->dev, "vss");
- if (!IS_ERR(st->reg_vss)) {
- ret = regulator_enable(st->reg_vss);
+ reg_vss = regulator_get(&spi->dev, "vss");
+ if (!IS_ERR(reg_vss)) {
+ ret = regulator_enable(reg_vss);
if (ret)
goto error_put_reg_neg;
- neg_voltage_uv = regulator_get_voltage(st->reg_vss);
+ neg_voltage_uv = regulator_get_voltage(reg_vss);
}
- if (!IS_ERR(st->reg_vss) && !IS_ERR(st->reg_vdd))
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
+ goto error_disable_reg_neg;
+ }
+ st = iio_priv(indio_dev);
+ st->pwr_down = true;
+ st->spi = spi;
+
+ if (!IS_ERR(reg_vss) && !IS_ERR(reg_vdd))
st->vref_mv = (pos_voltage_uv - neg_voltage_uv) / 1000;
else if (pdata)
st->vref_mv = pdata->vref_pos_mv - pdata->vref_neg_mv;
@@ -333,7 +336,7 @@ static int __devinit ad5791_probe(struct spi_device *spi)
ret = ad5791_spi_write(spi, AD5791_ADDR_SW_CTRL, AD5791_SWCTRL_RESET);
if (ret)
- goto error_disable_reg_neg;
+ goto error_free_dev;
st->chip_info =
&ad5791_chip_info_tbl[spi_get_device_id(spi)->driver_data];
@@ -346,66 +349,61 @@ static int __devinit ad5791_probe(struct spi_device *spi)
ret = ad5791_spi_write(spi, AD5791_ADDR_CTRL, st->ctrl |
AD5791_CTRL_OPGND | AD5791_CTRL_DACTRI);
if (ret)
- goto error_disable_reg_neg;
+ goto error_free_dev;
- st->pwr_down = true;
+ st->reg_vdd = reg_vdd;
+ st->reg_vss = reg_vss;
- st->spi = spi;
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_disable_reg_neg;
- }
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->info = &ad5791_info;
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ spi_set_drvdata(spi, indio_dev);
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ad5791_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
return 0;
error_free_dev:
- iio_free_device(st->indio_dev);
+ iio_free_device(indio_dev);
error_disable_reg_neg:
- if (!IS_ERR(st->reg_vss))
- regulator_disable(st->reg_vss);
+ if (!IS_ERR(reg_vss))
+ regulator_disable(reg_vss);
error_put_reg_neg:
- if (!IS_ERR(st->reg_vss))
- regulator_put(st->reg_vss);
+ if (!IS_ERR(reg_vss))
+ regulator_put(reg_vss);
- if (!IS_ERR(st->reg_vdd))
- regulator_disable(st->reg_vdd);
+ if (!IS_ERR(reg_vdd))
+ regulator_disable(reg_vdd);
error_put_reg_pos:
- if (!IS_ERR(st->reg_vdd))
- regulator_put(st->reg_vdd);
+ if (!IS_ERR(reg_vdd))
+ regulator_put(reg_vdd);
- kfree(st);
error_ret:
return ret;
}
static int __devexit ad5791_remove(struct spi_device *spi)
{
- struct ad5791_state *st = spi_get_drvdata(spi);
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5791_state *st = iio_priv(indio_dev);
+ struct regulator *reg_vdd = st->reg_vdd;
+ struct regulator *reg_vss = st->reg_vss;
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
if (!IS_ERR(st->reg_vdd)) {
- regulator_disable(st->reg_vdd);
- regulator_put(st->reg_vdd);
+ regulator_disable(reg_vdd);
+ regulator_put(reg_vdd);
}
if (!IS_ERR(st->reg_vss)) {
- regulator_disable(st->reg_vss);
- regulator_put(st->reg_vss);
+ regulator_disable(reg_vss);
+ regulator_put(reg_vss);
}
- kfree(st);
-
return 0;
}
diff --git a/drivers/staging/iio/dac/ad5791.h b/drivers/staging/iio/dac/ad5791.h
index f09ad9a430c..c807f26539d 100644
--- a/drivers/staging/iio/dac/ad5791.h
+++ b/drivers/staging/iio/dac/ad5791.h
@@ -81,7 +81,6 @@ struct ad5791_chip_info {
/**
* struct ad5791_state - driver instance specific data
- * @indio_dev: the industrial I/O device
* @us: spi_device
* @reg_vdd: positive supply regulator
* @reg_vss: negative supply regulator
@@ -91,7 +90,6 @@ struct ad5791_chip_info {
*/
struct ad5791_state {
- struct iio_dev *indio_dev;
struct spi_device *spi;
struct regulator *reg_vdd;
struct regulator *reg_vss;
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index 2fe34d21b6a..ed5d351b238 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -59,7 +59,7 @@ static ssize_t max517_set_value(struct device *dev,
const char *buf, size_t count, int channel)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct max517_data *data = iio_dev_get_devdata(dev_info);
+ struct max517_data *data = iio_priv(dev_info);
struct i2c_client *client = data->client;
u8 outbuf[4]; /* 1x or 2x command + value */
int outbuf_size = 0;
@@ -127,7 +127,7 @@ static ssize_t max517_show_scale(struct device *dev,
char *buf, int channel)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct max517_data *data = iio_dev_get_devdata(dev_info);
+ struct max517_data *data = iio_priv(dev_info);
/* Corresponds to Vref / 2^(bits) */
unsigned int scale_uv = (data->vref_mv[channel - 1] * 1000) >> 8;
@@ -203,35 +203,28 @@ static int max517_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max517_data *data;
+ struct iio_dev *indio_dev;
struct max517_platform_data *platform_data = client->dev.platform_data;
int err;
- data = kzalloc(sizeof(struct max517_data), GFP_KERNEL);
- if (!data) {
+ indio_dev = iio_allocate_device(sizeof(*data));
+ if (indio_dev == NULL) {
err = -ENOMEM;
goto exit;
}
-
- i2c_set_clientdata(client, data);
-
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->indio_dev = iio_allocate_device(0);
- if (data->indio_dev == NULL) {
- err = -ENOMEM;
- goto exit_free_data;
- }
-
/* establish that the iio_dev is a child of the i2c device */
- data->indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.parent = &client->dev;
/* reduced attribute set for MAX517 */
if (id->driver_data == ID_MAX517)
- data->indio_dev->info = &max517_info;
+ indio_dev->info = &max517_info;
else
- data->indio_dev->info = &max518_info;
- data->indio_dev->dev_data = (void *)(data);
- data->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &max518_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
/*
* Reference voltage on MAX518 and default is 5V, else take vref_mv
@@ -244,7 +237,7 @@ static int max517_probe(struct i2c_client *client,
data->vref_mv[1] = platform_data->vref_mv[1];
}
- err = iio_device_register(data->indio_dev);
+ err = iio_device_register(indio_dev);
if (err)
goto exit_free_device;
@@ -253,19 +246,14 @@ static int max517_probe(struct i2c_client *client,
return 0;
exit_free_device:
- iio_free_device(data->indio_dev);
-exit_free_data:
- kfree(data);
+ iio_free_device(indio_dev);
exit:
return err;
}
static int max517_remove(struct i2c_client *client)
{
- struct max517_data *data = i2c_get_clientdata(client);
-
- iio_free_device(data->indio_dev);
- kfree(data);
+ iio_free_device(i2c_get_clientdata(client));
return 0;
}
diff --git a/drivers/staging/iio/dds/ad5930.c b/drivers/staging/iio/dds/ad5930.c
index 490c3637bc8..0b2aa4cafdd 100644
--- a/drivers/staging/iio/dds/ad5930.c
+++ b/drivers/staging/iio/dds/ad5930.c
@@ -35,7 +35,6 @@ struct ad5903_config {
struct ad5930_state {
struct mutex lock;
- struct iio_dev *idev;
struct spi_device *sdev;
};
@@ -49,7 +48,7 @@ static ssize_t ad5930_set_parameter(struct device *dev,
int ret;
struct ad5903_config *config = (struct ad5903_config *)buf;
struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad5930_state *st = idev->dev_data;
+ struct ad5930_state *st = iio_priv(idev);
config->control = (config->control & ~value_mask);
config->incnum = (config->control & ~value_mask) | (1 << addr_shift);
@@ -83,42 +82,35 @@ static struct attribute *ad5930_attributes[] = {
};
static const struct attribute_group ad5930_attribute_group = {
- .name = DRV_NAME,
.attrs = ad5930_attributes,
};
static const struct iio_info ad5930_info = {
.attrs = &ad5930_attribute_group,
-
.driver_module = THIS_MODULE,
};
static int __devinit ad5930_probe(struct spi_device *spi)
{
struct ad5930_state *st;
+ struct iio_dev *idev;
int ret = 0;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
+ idev = iio_allocate_device(sizeof(*st));
+ if (idev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- spi_set_drvdata(spi, st);
+ spi_set_drvdata(spi, idev);
+ st = iio_priv(idev);
mutex_init(&st->lock);
st->sdev = spi;
+ idev->dev.parent = &spi->dev;
+ idev->info = &ad5930_info;
+ idev->modes = INDIO_DIRECT_MODE;
- st->idev = iio_allocate_device(0);
- if (st->idev == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->idev->dev.parent = &spi->dev;
- st->idev->dev_data = (void *)(st);
- st->idev->info = &ad5930_info;
- st->idev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_device_register(st->idev);
+ ret = iio_device_register(idev);
if (ret)
goto error_free_dev;
spi->max_speed_hz = 2000000;
@@ -129,19 +121,14 @@ static int __devinit ad5930_probe(struct spi_device *spi)
return 0;
error_free_dev:
- iio_free_device(st->idev);
-error_free_st:
- kfree(st);
+ iio_free_device(idev);
error_ret:
return ret;
}
static int __devexit ad5930_remove(struct spi_device *spi)
{
- struct ad5930_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->idev);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9832.c b/drivers/staging/iio/dds/ad9832.c
index e8fe1426a32..e3e61a469bb 100644
--- a/drivers/staging/iio/dds/ad9832.c
+++ b/drivers/staging/iio/dds/ad9832.c
@@ -77,7 +77,7 @@ static ssize_t ad9832_write(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9832_state *st = dev_info->dev_data;
+ struct ad9832_state *st = iio_priv(dev_info);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
long val;
@@ -203,7 +203,9 @@ static const struct iio_info ad9832_info = {
static int __devinit ad9832_probe(struct spi_device *spi)
{
struct ad9832_platform_data *pdata = spi->dev.platform_data;
+ struct iio_dev *indio_dev;
struct ad9832_state *st;
+ struct regulator *reg;
int ret;
if (!pdata) {
@@ -211,35 +213,28 @@ static int __devinit ad9832_probe(struct spi_device *spi)
return -ENODEV;
}
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- st->reg = regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
+ reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(reg)) {
+ ret = regulator_enable(reg);
if (ret)
goto error_put_reg;
}
- st->mclk = pdata->mclk;
-
- spi_set_drvdata(spi, st);
- st->spi = spi;
-
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_disable_reg;
}
+ spi_set_drvdata(spi, indio_dev);
+ st = iio_priv(indio_dev);
+ st->reg = reg;
+ st->mclk = pdata->mclk;
+ st->spi = spi;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->name = spi_get_device_id(spi)->name;
- st->indio_dev->info = &ad9832_info;
- st->indio_dev->dev_data = (void *) st;
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad9832_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
/* Setup default messages */
@@ -310,35 +305,35 @@ static int __devinit ad9832_probe(struct spi_device *spi)
if (ret)
goto error_free_device;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_device;
return 0;
error_free_device:
- iio_free_device(st->indio_dev);
+ iio_free_device(indio_dev);
error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ if (!IS_ERR(reg))
+ regulator_disable(reg);
error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
- kfree(st);
-error_ret:
+ if (!IS_ERR(reg))
+ regulator_put(reg);
+
return ret;
}
static int __devexit ad9832_remove(struct spi_device *spi)
{
- struct ad9832_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->indio_dev);
- if (!IS_ERR(st->reg)) {
- regulator_disable(st->reg);
- regulator_put(st->reg);
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad9832_state *st = iio_priv(indio_dev);
+ struct regulator *reg = st->reg;
+
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(reg)) {
+ regulator_disable(reg);
+ regulator_put(reg);
}
- kfree(st);
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9832.h b/drivers/staging/iio/dds/ad9832.h
index 5d474543dfc..c5b701f8aab 100644
--- a/drivers/staging/iio/dds/ad9832.h
+++ b/drivers/staging/iio/dds/ad9832.h
@@ -57,7 +57,6 @@
/**
* struct ad9832_state - driver instance specific data
- * @indio_dev: the industrial I/O device
* @spi: spi_device
* @reg: supply regulator
* @mclk: external master clock
@@ -76,7 +75,6 @@
*/
struct ad9832_state {
- struct iio_dev *indio_dev;
struct spi_device *spi;
struct regulator *reg;
unsigned long mclk;
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
index 0ebe8d58e92..e6454d58fe4 100644
--- a/drivers/staging/iio/dds/ad9834.c
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -66,7 +66,7 @@ static ssize_t ad9834_write(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9834_state *st = dev_info->dev_data;
+ struct ad9834_state *st = iio_priv(dev_info);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
long val;
@@ -145,7 +145,7 @@ static ssize_t ad9834_store_wavetype(struct device *dev,
size_t len)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9834_state *st = dev_info->dev_data;
+ struct ad9834_state *st = iio_priv(dev_info);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret = 0;
bool is_ad9833_7 = (st->devid == ID_AD9833) || (st->devid == ID_AD9837);
@@ -203,7 +203,7 @@ static ssize_t ad9834_show_out0_wavetype_available(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9834_state *st = iio_dev_get_devdata(dev_info);
+ struct ad9834_state *st = iio_priv(dev_info);
char *str;
if ((st->devid == ID_AD9833) || (st->devid == ID_AD9837))
@@ -225,7 +225,7 @@ static ssize_t ad9834_show_out1_wavetype_available(struct device *dev,
char *buf)
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9834_state *st = iio_dev_get_devdata(dev_info);
+ struct ad9834_state *st = iio_priv(dev_info);
char *str;
if (st->control & AD9834_MODE)
@@ -285,7 +285,7 @@ static mode_t ad9834_attr_is_visible(struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *dev_info = dev_get_drvdata(dev);
- struct ad9834_state *st = iio_dev_get_devdata(dev_info);
+ struct ad9834_state *st = iio_priv(dev_info);
mode_t mode = attr->mode;
@@ -314,6 +314,8 @@ static int __devinit ad9834_probe(struct spi_device *spi)
{
struct ad9834_platform_data *pdata = spi->dev.platform_data;
struct ad9834_state *st;
+ struct iio_dev *indio_dev;
+ struct regulator *reg;
int ret;
if (!pdata) {
@@ -321,37 +323,28 @@ static int __devinit ad9834_probe(struct spi_device *spi)
return -ENODEV;
}
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
-
- st->reg = regulator_get(&spi->dev, "vcc");
- if (!IS_ERR(st->reg)) {
- ret = regulator_enable(st->reg);
+ reg = regulator_get(&spi->dev, "vcc");
+ if (!IS_ERR(reg)) {
+ ret = regulator_enable(reg);
if (ret)
goto error_put_reg;
}
- st->mclk = pdata->mclk;
-
- spi_set_drvdata(spi, st);
-
- st->spi = spi;
- st->devid = spi_get_device_id(spi)->driver_data;
-
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_disable_reg;
}
-
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->name = spi_get_device_id(spi)->name;
- st->indio_dev->info = &ad9834_info;
- st->indio_dev->dev_data = (void *) st;
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ spi_set_drvdata(spi, indio_dev);
+ st = iio_priv(indio_dev);
+ st->mclk = pdata->mclk;
+ st->spi = spi;
+ st->devid = spi_get_device_id(spi)->driver_data;
+ st->reg = reg;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad9834_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
/* Setup default messages */
@@ -402,35 +395,35 @@ static int __devinit ad9834_probe(struct spi_device *spi)
if (ret)
goto error_free_device;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_device;
return 0;
error_free_device:
- iio_free_device(st->indio_dev);
+ iio_free_device(indio_dev);
error_disable_reg:
- if (!IS_ERR(st->reg))
- regulator_disable(st->reg);
+ if (!IS_ERR(reg))
+ regulator_disable(reg);
error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
- kfree(st);
-error_ret:
+ if (!IS_ERR(reg))
+ regulator_put(reg);
return ret;
}
static int __devexit ad9834_remove(struct spi_device *spi)
{
- struct ad9834_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->indio_dev);
- if (!IS_ERR(st->reg)) {
- regulator_disable(st->reg);
- regulator_put(st->reg);
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad9834_state *st = iio_priv(indio_dev);
+ struct regulator *reg = st->reg;
+
+ iio_device_unregister(indio_dev);
+ if (!IS_ERR(reg)) {
+ regulator_disable(reg);
+ regulator_put(reg);
}
- kfree(st);
+
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9834.h b/drivers/staging/iio/dds/ad9834.h
index 2abd63587e0..ed5ed8d0007 100644
--- a/drivers/staging/iio/dds/ad9834.h
+++ b/drivers/staging/iio/dds/ad9834.h
@@ -38,7 +38,6 @@
/**
* struct ad9834_state - driver instance specific data
- * @indio_dev: the industrial I/O device
* @spi: spi_device
* @reg: supply regulator
* @mclk: external master clock
@@ -52,7 +51,6 @@
*/
struct ad9834_state {
- struct iio_dev *indio_dev;
struct spi_device *spi;
struct regulator *reg;
unsigned int mclk;
diff --git a/drivers/staging/iio/dds/ad9850.c b/drivers/staging/iio/dds/ad9850.c
index b580d852a1e..d7c9d05f635 100644
--- a/drivers/staging/iio/dds/ad9850.c
+++ b/drivers/staging/iio/dds/ad9850.c
@@ -30,7 +30,6 @@ struct ad9850_config {
struct ad9850_state {
struct mutex lock;
- struct iio_dev *idev;
struct spi_device *sdev;
};
@@ -44,7 +43,7 @@ static ssize_t ad9850_set_parameter(struct device *dev,
int ret;
struct ad9850_config *config = (struct ad9850_config *)buf;
struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad9850_state *st = idev->dev_data;
+ struct ad9850_state *st = iio_priv(idev);
xfer.len = len;
xfer.tx_buf = config;
@@ -69,7 +68,6 @@ static struct attribute *ad9850_attributes[] = {
};
static const struct attribute_group ad9850_attribute_group = {
- .name = DRV_NAME,
.attrs = ad9850_attributes,
};
@@ -81,30 +79,24 @@ static const struct iio_info ad9850_info = {
static int __devinit ad9850_probe(struct spi_device *spi)
{
struct ad9850_state *st;
+ struct iio_dev *idev;
int ret = 0;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
+ idev = iio_allocate_device(sizeof(*st));
+ if (idev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- spi_set_drvdata(spi, st);
-
+ spi_set_drvdata(spi, idev);
+ st = iio_priv(idev);
mutex_init(&st->lock);
st->sdev = spi;
- st->idev = iio_allocate_device(0);
- if (st->idev == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->idev->dev.parent = &spi->dev;
-
- st->idev->info = &ad9850_info;
- st->idev->dev_data = (void *)(st);
- st->idev->modes = INDIO_DIRECT_MODE;
+ idev->dev.parent = &spi->dev;
+ idev->info = &ad9850_info;
+ idev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->idev);
+ ret = iio_device_register(idev);
if (ret)
goto error_free_dev;
spi->max_speed_hz = 2000000;
@@ -115,19 +107,14 @@ static int __devinit ad9850_probe(struct spi_device *spi)
return 0;
error_free_dev:
- iio_free_device(st->idev);
-error_free_st:
- kfree(st);
+ iio_free_device(idev);
error_ret:
return ret;
}
static int __devexit ad9850_remove(struct spi_device *spi)
{
- struct ad9850_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->idev);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9852.c b/drivers/staging/iio/dds/ad9852.c
index 08020f96300..0184585425d 100644
--- a/drivers/staging/iio/dds/ad9852.c
+++ b/drivers/staging/iio/dds/ad9852.c
@@ -58,7 +58,6 @@ struct ad9852_config {
struct ad9852_state {
struct mutex lock;
- struct iio_dev *idev;
struct spi_device *sdev;
};
@@ -72,7 +71,7 @@ static ssize_t ad9852_set_parameter(struct device *dev,
int ret;
struct ad9852_config *config = (struct ad9852_config *)buf;
struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad9852_state *st = idev->dev_data;
+ struct ad9852_state *st = iio_priv(idev);
xfer.len = 3;
xfer.tx_buf = &config->phajst0[0];
@@ -230,30 +229,24 @@ static const struct iio_info ad9852_info = {
static int __devinit ad9852_probe(struct spi_device *spi)
{
struct ad9852_state *st;
+ struct iio_dev *idev;
int ret = 0;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
+ idev = iio_allocate_device(sizeof(*st));
+ if (idev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- spi_set_drvdata(spi, st);
-
+ st = iio_priv(idev);
+ spi_set_drvdata(spi, idev);
mutex_init(&st->lock);
st->sdev = spi;
- st->idev = iio_allocate_device(0);
- if (st->idev == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->idev->dev.parent = &spi->dev;
+ idev->dev.parent = &spi->dev;
+ idev->info = &ad9852_info;
+ idev->modes = INDIO_DIRECT_MODE;
- st->idev->info = &ad9852_info;
- st->idev->dev_data = (void *)(st);
- st->idev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_device_register(st->idev);
+ ret = iio_device_register(idev);
if (ret)
goto error_free_dev;
spi->max_speed_hz = 2000000;
@@ -261,22 +254,19 @@ static int __devinit ad9852_probe(struct spi_device *spi)
spi->bits_per_word = 8;
spi_setup(spi);
ad9852_init(st);
+
return 0;
error_free_dev:
- iio_free_device(st->idev);
-error_free_st:
- kfree(st);
+ iio_free_device(idev);
+
error_ret:
return ret;
}
static int __devexit ad9852_remove(struct spi_device *spi)
{
- struct ad9852_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->idev);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9910.c b/drivers/staging/iio/dds/ad9910.c
index 97d75d75582..0fa217f7b90 100644
--- a/drivers/staging/iio/dds/ad9910.c
+++ b/drivers/staging/iio/dds/ad9910.c
@@ -110,7 +110,6 @@ struct ad9910_config {
struct ad9910_state {
struct mutex lock;
- struct iio_dev *idev;
struct spi_device *sdev;
};
@@ -124,7 +123,7 @@ static ssize_t ad9910_set_parameter(struct device *dev,
int ret;
struct ad9910_config *config = (struct ad9910_config *)buf;
struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad9910_state *st = idev->dev_data;
+ struct ad9910_state *st = iio_priv(idev);
xfer.len = 5;
xfer.tx_buf = &config->auxdac[0];
@@ -365,30 +364,24 @@ static const struct iio_info ad9910_info = {
static int __devinit ad9910_probe(struct spi_device *spi)
{
struct ad9910_state *st;
+ struct iio_dev *idev;
int ret = 0;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
+ idev = iio_allocate_device(sizeof(*st));
+ if (idev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- spi_set_drvdata(spi, st);
-
+ spi_set_drvdata(spi, idev);
+ st = iio_priv(idev);
mutex_init(&st->lock);
st->sdev = spi;
- st->idev = iio_allocate_device(0);
- if (st->idev == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->idev->dev.parent = &spi->dev;
-
- st->idev->info = &ad9910_info;
- st->idev->dev_data = (void *)(st);
- st->idev->modes = INDIO_DIRECT_MODE;
+ idev->dev.parent = &spi->dev;
+ idev->info = &ad9910_info;
+ idev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->idev);
+ ret = iio_device_register(idev);
if (ret)
goto error_free_dev;
spi->max_speed_hz = 2000000;
@@ -399,19 +392,14 @@ static int __devinit ad9910_probe(struct spi_device *spi)
return 0;
error_free_dev:
- iio_free_device(st->idev);
-error_free_st:
- kfree(st);
+ iio_free_device(idev);
error_ret:
return ret;
}
static int __devexit ad9910_remove(struct spi_device *spi)
{
- struct ad9910_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->idev);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/dds/ad9951.c b/drivers/staging/iio/dds/ad9951.c
index d4dfcd41d5f..d361d1f125d 100644
--- a/drivers/staging/iio/dds/ad9951.c
+++ b/drivers/staging/iio/dds/ad9951.c
@@ -51,7 +51,6 @@ struct ad9951_config {
struct ad9951_state {
struct mutex lock;
- struct iio_dev *idev;
struct spi_device *sdev;
};
@@ -65,7 +64,7 @@ static ssize_t ad9951_set_parameter(struct device *dev,
int ret;
struct ad9951_config *config = (struct ad9951_config *)buf;
struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad9951_state *st = idev->dev_data;
+ struct ad9951_state *st = iio_priv(idev);
xfer.len = 3;
xfer.tx_buf = &config->asf[0];
@@ -174,30 +173,25 @@ static const struct iio_info ad9951_info = {
static int __devinit ad9951_probe(struct spi_device *spi)
{
struct ad9951_state *st;
+ struct iio_dev *idev;
int ret = 0;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
+ idev = iio_allocate_device(sizeof(*st));
+ if (idev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- spi_set_drvdata(spi, st);
-
+ spi_set_drvdata(spi, idev);
+ st = iio_priv(idev);
mutex_init(&st->lock);
st->sdev = spi;
- st->idev = iio_allocate_device(0);
- if (st->idev == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->idev->dev.parent = &spi->dev;
+ idev->dev.parent = &spi->dev;
- st->idev->info = &ad9951_info;
- st->idev->dev_data = (void *)(st);
- st->idev->modes = INDIO_DIRECT_MODE;
+ idev->info = &ad9951_info;
+ idev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->idev);
+ ret = iio_device_register(idev);
if (ret)
goto error_free_dev;
spi->max_speed_hz = 2000000;
@@ -208,19 +202,15 @@ static int __devinit ad9951_probe(struct spi_device *spi)
return 0;
error_free_dev:
- iio_free_device(st->idev);
-error_free_st:
- kfree(st);
+ iio_free_device(idev);
+
error_ret:
return ret;
}
static int __devexit ad9951_remove(struct spi_device *spi)
{
- struct ad9951_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->idev);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index edf9e3bf3ef..afa52d1961a 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -29,27 +29,25 @@
* struct adis16060_state - device instance specific data
* @us_w: actual spi_device to write config
* @us_r: actual spi_device to read back data
- * @indio_dev: industrial I/O device structure
* @buf: transmit or receive buffer
* @buf_lock: mutex to protect tx and rx
**/
struct adis16060_state {
struct spi_device *us_w;
struct spi_device *us_r;
- struct iio_dev *indio_dev;
struct mutex buf_lock;
u8 buf[3] ____cacheline_aligned;
};
-static struct adis16060_state *adis16060_st;
+static struct iio_dev *adis16060_iio_dev;
static int adis16060_spi_write(struct device *dev,
u8 val)
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16060_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16060_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->buf[2] = val; /* The last 8 bits clocked in are latched */
@@ -64,7 +62,7 @@ static int adis16060_spi_read(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16060_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16060_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
@@ -141,43 +139,38 @@ static const struct iio_info adis16060_info = {
static int __devinit adis16060_r_probe(struct spi_device *spi)
{
int ret, regdone = 0;
- struct adis16060_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
+ struct adis16060_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
goto error_ret;
}
/* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
-
+ spi_set_drvdata(spi, indio_dev);
+ st = iio_priv(indio_dev);
st->us_r = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &adis16060_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16060_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
regdone = 1;
- adis16060_st = st;
+ adis16060_iio_dev = indio_dev;
return 0;
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -185,11 +178,7 @@ error_ret:
/* fixme, confirm ordering in this function */
static int adis16060_r_remove(struct spi_device *spi)
{
- struct adis16060_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
-
- iio_device_unregister(indio_dev);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
@@ -197,12 +186,14 @@ static int adis16060_r_remove(struct spi_device *spi)
static int __devinit adis16060_w_probe(struct spi_device *spi)
{
int ret;
- struct adis16060_state *st = adis16060_st;
- if (!st) {
+ struct iio_dev *indio_dev = adis16060_iio_dev;
+ struct adis16060_state *st;
+ if (!indio_dev) {
ret = -ENODEV;
goto error_ret;
}
- spi_set_drvdata(spi, st);
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
st->us_w = spi;
return 0;
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c
index d42690bea06..ad2db4d723d 100644
--- a/drivers/staging/iio/gyro/adis16080_core.c
+++ b/drivers/staging/iio/gyro/adis16080_core.c
@@ -34,13 +34,11 @@
/**
* struct adis16080_state - device instance specific data
* @us: actual spi_device to write data
- * @indio_dev: industrial I/O device structure
* @buf: transmit or receive buffer
* @buf_lock: mutex to protect tx and rx
**/
struct adis16080_state {
struct spi_device *us;
- struct iio_dev *indio_dev;
struct mutex buf_lock;
u8 buf[2] ____cacheline_aligned;
@@ -51,7 +49,7 @@ static int adis16080_spi_write(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16080_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16080_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->buf[0] = val >> 8;
@@ -68,7 +66,7 @@ static int adis16080_spi_read(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16080_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16080_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
@@ -131,31 +129,29 @@ static const struct iio_info adis16080_info = {
static int __devinit adis16080_probe(struct spi_device *spi)
{
int ret, regdone = 0;
- struct adis16080_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
+ struct adis16080_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
goto error_ret;
}
+ st = iio_priv(indio_dev);
/* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ spi_set_drvdata(spi, indio_dev);
/* Allocate the comms buffers */
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->indio_dev->name = spi->dev.driver->name;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &adis16080_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16080_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
regdone = 1;
@@ -164,11 +160,9 @@ static int __devinit adis16080_probe(struct spi_device *spi)
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -176,11 +170,7 @@ error_ret:
/* fixme, confirm ordering in this function */
static int adis16080_remove(struct spi_device *spi)
{
- struct adis16080_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
-
- iio_device_unregister(indio_dev);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/gyro/adis16130_core.c b/drivers/staging/iio/gyro/adis16130_core.c
index 14d5a34ab44..c80e908d8ac 100644
--- a/drivers/staging/iio/gyro/adis16130_core.c
+++ b/drivers/staging/iio/gyro/adis16130_core.c
@@ -41,14 +41,12 @@
/**
* struct adis16130_state - device instance specific data
* @us: actual spi_device to write data
- * @indio_dev: industrial I/O device structure
* @mode: 24 bits (1) or 16 bits (0)
* @buf_lock: mutex to protect tx and rx
* @buf: unified tx/rx buffer
**/
struct adis16130_state {
struct spi_device *us;
- struct iio_dev *indio_dev;
u32 mode;
struct mutex buf_lock;
u8 buf[4] ____cacheline_aligned;
@@ -59,7 +57,7 @@ static int adis16130_spi_write(struct device *dev, u8 reg_addr,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16130_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16130_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->buf[0] = reg_addr;
@@ -76,7 +74,7 @@ static int adis16130_spi_read(struct device *dev, u8 reg_addr,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16130_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16130_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
@@ -125,7 +123,7 @@ static ssize_t adis16130_bitsmode_read(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16130_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16130_state *st = iio_priv(indio_dev);
if (st->mode == 1)
return sprintf(buf, "s24\n");
@@ -183,39 +181,35 @@ static const struct iio_info adis16130_info = {
static int __devinit adis16130_probe(struct spi_device *spi)
{
int ret;
- struct adis16130_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
+ struct adis16130_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
goto error_ret;
}
+ st = iio_priv(indio_dev);
/* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ spi_set_drvdata(spi, indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
-
- st->indio_dev->name = spi->dev.driver->name;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &adis16130_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16130_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
st->mode = 1;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
return 0;
error_free_dev:
- iio_free_device(st->indio_dev);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
+
error_ret:
return ret;
}
@@ -223,11 +217,7 @@ error_ret:
/* fixme, confirm ordering in this function */
static int adis16130_remove(struct spi_device *spi)
{
- struct adis16130_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
-
- iio_device_unregister(indio_dev);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/gyro/adis16260.h b/drivers/staging/iio/gyro/adis16260.h
index 702dc982f62..969b624be6d 100644
--- a/drivers/staging/iio/gyro/adis16260.h
+++ b/drivers/staging/iio/gyro/adis16260.h
@@ -85,26 +85,23 @@
/**
* struct adis16260_state - device instance specific data
* @us: actual spi_device
- * @indio_dev: industrial I/O device structure
* @trig: data ready trigger registered with iio
- * @tx: transmit buffer
- * @rx: receive buffer
* @buf_lock: mutex to protect tx and rx
* @negate: negate the scale parameter
+ * @tx: transmit buffer
+ * @rx: receive buffer
**/
struct adis16260_state {
- struct spi_device *us;
- struct iio_dev *indio_dev;
- struct iio_trigger *trig;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
- unsigned negate:1;
+ struct spi_device *us;
+ struct iio_trigger *trig;
+ struct mutex buf_lock;
+ unsigned negate:1;
+ u8 tx[ADIS16260_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADIS16260_MAX_RX];
};
int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
/* At the moment triggers are only used for ring buffer
* filling. This may change!
*/
@@ -115,6 +112,7 @@ int adis16260_set_irq(struct iio_dev *indio_dev, bool enable);
#define ADIS16260_SCAN_TEMP 3
#define ADIS16260_SCAN_ANGL 4
+#ifdef CONFIG_IIO_RING_BUFFER
void adis16260_remove_trigger(struct iio_dev *indio_dev);
int adis16260_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index 3dc9a272749..f2d43cfcc49 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -41,7 +41,7 @@ static int adis16260_spi_write_reg_8(struct iio_dev *indio_dev,
u8 val)
{
int ret;
- struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16260_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADIS16260_WRITE_REG(reg_address);
@@ -66,7 +66,7 @@ static int adis16260_spi_write_reg_16(struct iio_dev *indio_dev,
{
int ret;
struct spi_message msg;
- struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16260_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[] = {
{
.tx_buf = st->tx,
@@ -109,7 +109,7 @@ static int adis16260_spi_read_reg_16(struct iio_dev *indio_dev,
u16 *val)
{
struct spi_message msg;
- struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16260_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -152,7 +152,7 @@ static ssize_t adis16260_read_frequency_available(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16260_state *st = iio_priv(indio_dev);
if (spi_get_device_id(st->us)->driver_data)
return sprintf(buf, "%s\n", "0.129 ~ 256");
else
@@ -164,7 +164,7 @@ static ssize_t adis16260_read_frequency(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16260_state *st = iio_priv(indio_dev);
int ret, len = 0;
u16 t;
int sps;
@@ -189,7 +189,7 @@ static ssize_t adis16260_write_frequency(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16260_state *st = iio_priv(indio_dev);
long val;
int ret;
u8 t;
@@ -435,7 +435,7 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
int *val, int *val2,
long mask)
{
- struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16260_state *st = iio_priv(indio_dev);
int ret;
int bits;
u8 addr;
@@ -446,13 +446,17 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
addr = adis16260_addresses[chan->address][0];
ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
if (val16 & ADIS16260_ERROR_ACTIVE) {
ret = adis16260_check_status(indio_dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&indio_dev->mlock);
return ret;
+ }
}
val16 = val16 & ((1 << chan->scan_type.realbits) - 1);
if (chan->scan_type.sign == 's')
@@ -576,71 +580,58 @@ static int __devinit adis16260_probe(struct spi_device *spi)
{
int ret, regdone = 0;
struct adis16260_platform_data *pd = spi->dev.platform_data;
- struct adis16260_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
+ struct adis16260_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
goto error_ret;
}
+ st = iio_priv(indio_dev);
if (pd)
st->negate = pd->negate;
/* this is only used for removal purposes */
spi_set_drvdata(spi, st);
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADIS16260_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADIS16260_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
- ret = -ENOMEM;
- goto error_free_rx;
- }
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
- st->indio_dev->name = spi_get_device_id(st->us)->name;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &adis16260_info;
- st->indio_dev->num_channels
+ indio_dev->name = spi_get_device_id(st->us)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adis16260_info;
+ indio_dev->num_channels
= ARRAY_SIZE(adis16260_channels_x);
if (pd && pd->direction)
switch (pd->direction) {
case 'x':
- st->indio_dev->channels = adis16260_channels_x;
+ indio_dev->channels = adis16260_channels_x;
break;
case 'y':
- st->indio_dev->channels = adis16260_channels_y;
+ indio_dev->channels = adis16260_channels_y;
break;
case 'z':
- st->indio_dev->channels = adis16260_channels_z;
+ indio_dev->channels = adis16260_channels_z;
break;
default:
return -EINVAL;
}
else
- st->indio_dev->channels = adis16260_channels_x;
+ indio_dev->channels = adis16260_channels_x;
+ indio_dev->num_channels = ARRAY_SIZE(adis16260_channels_x);
+ indio_dev->modes = INDIO_DIRECT_MODE;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis16260_configure_ring(st->indio_dev);
+ ret = adis16260_configure_ring(indio_dev);
if (ret)
goto error_free_dev;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_unreg_ring_funcs;
regdone = 1;
- ret = iio_ring_buffer_register_ex(st->indio_dev->ring, 0,
- st->indio_dev->channels,
+ ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
+ indio_dev->channels,
ARRAY_SIZE(adis16260_channels_x));
if (ret) {
printk(KERN_ERR "failed to initialize the ring\n");
@@ -648,34 +639,28 @@ static int __devinit adis16260_probe(struct spi_device *spi)
}
if (spi->irq) {
- ret = adis16260_probe_trigger(st->indio_dev);
+ ret = adis16260_probe_trigger(indio_dev);
if (ret)
goto error_uninitialize_ring;
}
/* Get the device into a sane initial state */
- ret = adis16260_initial_setup(st->indio_dev);
+ ret = adis16260_initial_setup(indio_dev);
if (ret)
goto error_remove_trigger;
return 0;
error_remove_trigger:
- adis16260_remove_trigger(st->indio_dev);
+ adis16260_remove_trigger(indio_dev);
error_uninitialize_ring:
- iio_ring_buffer_unregister(st->indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
error_unreg_ring_funcs:
- adis16260_unconfigure_ring(st->indio_dev);
+ adis16260_unconfigure_ring(indio_dev);
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -683,8 +668,7 @@ error_ret:
static int adis16260_remove(struct spi_device *spi)
{
int ret;
- struct adis16260_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
ret = adis16260_stop_device(indio_dev);
if (ret)
@@ -693,13 +677,9 @@ static int adis16260_remove(struct spi_device *spi)
flush_scheduled_work();
adis16260_remove_trigger(indio_dev);
-
- iio_ring_buffer_unregister(st->indio_dev->ring);
+ iio_ring_buffer_unregister(indio_dev->ring);
iio_device_unregister(indio_dev);
adis16260_unconfigure_ring(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
err_ret:
return ret;
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
index a0925044eaa..a4df8b32251 100644
--- a/drivers/staging/iio/gyro/adis16260_ring.c
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -17,7 +17,6 @@
#include "../trigger.h"
#include "adis16260.h"
-
/**
* adis16260_read_ring_data() read data registers which will be placed into ring
* @dev: device associated with child of actual device (iio_dev or iio_trig)
@@ -27,7 +26,7 @@ static int adis16260_read_ring_data(struct device *dev, u8 *rx)
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16260_state *st = iio_priv(indio_dev);
struct spi_transfer xfers[ADIS16260_OUTPUTS + 1];
int ret;
int i;
@@ -70,7 +69,7 @@ static irqreturn_t adis16260_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->private_data;
- struct adis16260_state *st = iio_dev_get_devdata(indio_dev);
+ struct adis16260_state *st = iio_priv(indio_dev);
struct iio_ring_buffer *ring = indio_dev->ring;
int i = 0;
s16 *data;
@@ -83,7 +82,7 @@ static irqreturn_t adis16260_trigger_handler(int irq, void *p)
}
if (ring->scan_count &&
- adis16260_read_ring_data(&st->indio_dev->dev, st->rx) >= 0)
+ adis16260_read_ring_data(&indio_dev->dev, st->rx) >= 0)
for (; i < ring->scan_count; i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
@@ -93,7 +92,7 @@ static irqreturn_t adis16260_trigger_handler(int irq, void *p)
ring->access->store_to(ring, (u8 *)data, pf->timestamp);
- iio_trigger_notify_done(st->indio_dev->trig);
+ iio_trigger_notify_done(indio_dev->trig);
kfree(data);
return IRQ_HANDLED;
diff --git a/drivers/staging/iio/gyro/adis16260_trigger.c b/drivers/staging/iio/gyro/adis16260_trigger.c
index 4f10fb54335..01094d0e714 100644
--- a/drivers/staging/iio/gyro/adis16260_trigger.c
+++ b/drivers/staging/iio/gyro/adis16260_trigger.c
@@ -18,8 +18,7 @@
static int adis16260_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
- struct adis16260_state *st = trig->private_data;
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = trig->private_data;
dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
return adis16260_set_irq(indio_dev, state);
@@ -28,7 +27,7 @@ static int adis16260_data_rdy_trigger_set_state(struct iio_trigger *trig,
int adis16260_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
- struct adis16260_state *st = indio_dev->dev_data;
+ struct adis16260_state *st = iio_priv(indio_dev);
st->trig = iio_allocate_trigger("%s-dev%d",
spi_get_device_id(st->us)->name,
@@ -48,7 +47,7 @@ int adis16260_probe_trigger(struct iio_dev *indio_dev)
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
- st->trig->private_data = st;
+ st->trig->private_data = indio_dev;
st->trig->set_trigger_state = &adis16260_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
@@ -69,9 +68,9 @@ error_ret:
void adis16260_remove_trigger(struct iio_dev *indio_dev)
{
- struct adis16260_state *state = indio_dev->dev_data;
+ struct adis16260_state *st = iio_priv(indio_dev);
- iio_trigger_unregister(state->trig);
- free_irq(state->us->irq, state->trig);
- iio_free_trigger(state->trig);
+ iio_trigger_unregister(st->trig);
+ free_irq(st->us->irq, st->trig);
+ iio_free_trigger(st->trig);
}
diff --git a/drivers/staging/iio/gyro/adxrs450.h b/drivers/staging/iio/gyro/adxrs450.h
index c92f6945f00..b6b68287640 100644
--- a/drivers/staging/iio/gyro/adxrs450.h
+++ b/drivers/staging/iio/gyro/adxrs450.h
@@ -42,17 +42,16 @@
/**
* struct adxrs450_state - device instance specific data
* @us: actual spi_device
- * @indio_dev: industrial I/O device structure
+ * @buf_lock: mutex to protect tx and rx
* @tx: transmit buffer
* @rx: recieve buffer
- * @buf_lock: mutex to protect tx and rx
**/
struct adxrs450_state {
- struct spi_device *us;
- struct iio_dev *indio_dev;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
+ struct spi_device *us;
+ struct mutex buf_lock;
+ u8 tx[ADXRS450_MAX_RX] ____cacheline_aligned;
+ u8 rx[ADXRS450_MAX_TX];
+
};
#endif /* SPI_ADXRS450_H_ */
diff --git a/drivers/staging/iio/gyro/adxrs450_core.c b/drivers/staging/iio/gyro/adxrs450_core.c
index 3714e4aadc2..7502a264770 100644
--- a/drivers/staging/iio/gyro/adxrs450_core.c
+++ b/drivers/staging/iio/gyro/adxrs450_core.c
@@ -38,7 +38,7 @@ static int adxrs450_spi_read_reg_16(struct device *dev,
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adxrs450_state *st = iio_dev_get_devdata(indio_dev);
+ struct adxrs450_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -92,7 +92,7 @@ static int adxrs450_spi_write_reg_16(struct device *dev,
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adxrs450_state *st = iio_dev_get_devdata(indio_dev);
+ struct adxrs450_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers = {
.tx_buf = st->tx,
@@ -130,7 +130,7 @@ static int adxrs450_spi_sensor_data(struct device *dev, s16 *val)
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct adxrs450_state *st = iio_dev_get_devdata(indio_dev);
+ struct adxrs450_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -267,12 +267,13 @@ static ssize_t adxrs450_read_sensor_data(struct device *dev,
}
/* Recommended Startup Sequence by spec */
-static int adxrs450_initial_setup(struct adxrs450_state *st)
+static int adxrs450_initial_setup(struct iio_dev *indio_dev)
{
u32 t;
u16 data;
int ret;
- struct device *dev = &st->indio_dev->dev;
+ struct device *dev = &indio_dev->dev;
+ struct adxrs450_state *st = iio_priv(indio_dev);
msleep(ADXRS450_STARTUP_DELAY*2);
ret = adxrs450_spi_initial(st, &t, 1);
@@ -357,46 +358,32 @@ static const struct iio_info adxrs450_info = {
static int __devinit adxrs450_probe(struct spi_device *spi)
{
int ret, regdone = 0;
- struct adxrs450_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
- goto error_ret;
- }
- /* This is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ struct adxrs450_state *st;
+ struct iio_dev *indio_dev;
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADXRS450_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADXRS450_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
- goto error_free_rx;
+ goto error_ret;
}
+ st = iio_priv(indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
+ /* This is only used for removal purposes */
+ spi_set_drvdata(spi, indio_dev);
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &adxrs450_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adxrs450_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
regdone = 1;
/* Get the device into a sane initial state */
- ret = adxrs450_initial_setup(st);
+ ret = adxrs450_initial_setup(indio_dev);
if (ret)
goto error_initial;
return 0;
@@ -404,27 +391,17 @@ static int __devinit adxrs450_probe(struct spi_device *spi)
error_initial:
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
+
error_ret:
return ret;
}
static int adxrs450_remove(struct spi_device *spi)
{
- struct adxrs450_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/iio.h b/drivers/staging/iio/iio.h
index 38f1425f464..7a6ce4d0fb7 100644
--- a/drivers/staging/iio/iio.h
+++ b/drivers/staging/iio/iio.h
@@ -30,6 +30,7 @@
enum iio_chan_type {
/* real channel types */
IIO_IN,
+ IIO_OUT,
IIO_CURRENT,
IIO_POWER,
IIO_ACCEL,
@@ -202,6 +203,9 @@ static inline s64 iio_get_time_ns(void)
* call to iio_device_register. */
#define IIO_VAL_INT 1
#define IIO_VAL_INT_PLUS_MICRO 2
+#define IIO_VAL_INT_PLUS_NANO 3
+
+struct iio_trigger; /* forward declaration */
/**
* struct iio_info - constant information about device
@@ -217,12 +221,17 @@ static inline s64 iio_get_time_ns(void)
* contain the elements making up the returned value.
* @write_raw: function to write a value to the device.
* Parameters are the same as for read_raw.
+ * @write_raw_get_fmt: callback function to query the expected
+ * format/precision. If not set by the driver, write_raw
+ * returns IIO_VAL_INT_PLUS_MICRO.
* @read_event_config: find out if the event is enabled.
* @write_event_config: set if the event is enabled.
* @read_event_value: read a value associated with the event. Meaning
* is event dependant. event_code specifies which event.
* @write_event_value: write the value associate with the event.
* Meaning is event dependent.
+ * @validate_trigger: function to validate the trigger when the
+ * current trigger gets changed.
**/
struct iio_info {
struct module *driver_module;
@@ -242,6 +251,10 @@ struct iio_info {
int val2,
long mask);
+ int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask);
+
int (*read_event_config)(struct iio_dev *indio_dev,
int event_code);
@@ -255,6 +268,9 @@ struct iio_info {
int (*write_event_value)(struct iio_dev *indio_dev,
int event_code,
int val);
+ int (*validate_trigger)(struct iio_dev *indio_dev,
+ struct iio_trigger *trig);
+
};
/**
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
index db184d11dfc..1f8f0c60c2c 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -41,6 +41,9 @@
#define ADIS16350_YTEMP_OUT 0x12 /* Y-axis gyroscope temperature measurement */
#define ADIS16350_ZTEMP_OUT 0x14 /* Z-axis gyroscope temperature measurement */
+#define ADIS16300_PITCH_OUT 0x12 /* X axis inclinometer output measurement */
+#define ADIS16300_ROLL_OUT 0x12 /* Y axis inclinometer output measurement */
+
/* Calibration parameters */
#define ADIS16400_XGYRO_OFF 0x1A /* X-axis gyroscope bias offset factor */
#define ADIS16400_YGYRO_OFF 0x1C /* Y-axis gyroscope bias offset factor */
@@ -158,7 +161,6 @@ struct adis16400_state {
int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
-#ifdef CONFIG_IIO_RING_BUFFER
/* At the moment triggers are only used for ring buffer
* filling. This may change!
*/
@@ -182,6 +184,7 @@ int adis16400_set_irq(struct iio_dev *indio_dev, bool enable);
#define ADIS16300_SCAN_INCLI_X 12
#define ADIS16300_SCAN_INCLI_Y 13
+#ifdef CONFIG_IIO_RING_BUFFER
void adis16400_remove_trigger(struct iio_dev *indio_dev);
int adis16400_probe_trigger(struct iio_dev *indio_dev);
diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c
index fe89802e3fe..a2c3b67dcbd 100644
--- a/drivers/staging/iio/imu/adis16400_core.c
+++ b/drivers/staging/iio/imu/adis16400_core.c
@@ -441,10 +441,12 @@ enum adis16400_chan {
magn_z,
temp,
temp0, temp1, temp2,
- in1
+ in1,
+ incli_x,
+ incli_y,
};
-static u8 adis16400_addresses[16][2] = {
+static u8 adis16400_addresses[17][2] = {
[in_supply] = { ADIS16400_SUPPLY_OUT, 0 },
[gyro_x] = { ADIS16400_XGYRO_OUT, ADIS16400_XGYRO_OFF },
[gyro_y] = { ADIS16400_YGYRO_OUT, ADIS16400_YGYRO_OFF },
@@ -459,7 +461,9 @@ static u8 adis16400_addresses[16][2] = {
[temp0] = { ADIS16350_XTEMP_OUT },
[temp1] = { ADIS16350_YTEMP_OUT },
[temp2] = { ADIS16350_ZTEMP_OUT },
- [in1] = { ADIS16400_AUX_ADC , 0 },
+ [in1] = { ADIS16400_AUX_ADC, 0 },
+ [incli_x] = { ADIS16300_PITCH_OUT, 0 },
+ [incli_y] = { ADIS16300_ROLL_OUT, 0 }
};
static int adis16400_write_raw(struct iio_dev *indio_dev,
@@ -612,82 +616,82 @@ static struct iio_chan_spec adis16400_channels[] = {
static struct iio_chan_spec adis16350_channels[] = {
IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- 0, ADIS16400_SCAN_SUPPLY, IIO_ST('u', 12, 16, 0), 0),
+ in_supply, ADIS16400_SCAN_SUPPLY, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, IIO_MOD_X,
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, ADIS16400_SCAN_GYRO_X, IIO_ST('s', 14, 16, 0), 0),
+ gyro_x, ADIS16400_SCAN_GYRO_X, IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, IIO_MOD_Y,
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 2, ADIS16400_SCAN_GYRO_Y, IIO_ST('s', 14, 16, 0), 0),
+ gyro_y, ADIS16400_SCAN_GYRO_Y, IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, IIO_MOD_Z,
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 3, ADIS16400_SCAN_GYRO_Z, IIO_ST('s', 14, 16, 0), 0),
+ gyro_z, ADIS16400_SCAN_GYRO_Z, IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 4, ADIS16400_SCAN_ACC_X, IIO_ST('s', 14, 16, 0), 0),
+ accel_x, ADIS16400_SCAN_ACC_X, IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, ADIS16400_SCAN_ACC_Y, IIO_ST('s', 14, 16, 0), 0),
+ accel_y, ADIS16400_SCAN_ACC_Y, IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Z,
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, ADIS16400_SCAN_ACC_Z, IIO_ST('s', 14, 16, 0), 0),
+ accel_z, ADIS16400_SCAN_ACC_Z, IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, "x", 0, 0,
(1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- 0, ADIS16350_SCAN_TEMP_X, IIO_ST('s', 12, 16, 0), 0),
+ temp0, ADIS16350_SCAN_TEMP_X, IIO_ST('s', 12, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, "y", 1, 0,
(1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- 0, ADIS16350_SCAN_TEMP_Y, IIO_ST('s', 12, 16, 0), 0),
+ temp1, ADIS16350_SCAN_TEMP_Y, IIO_ST('s', 12, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, "z", 2, 0,
(1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- 0, ADIS16350_SCAN_TEMP_Z, IIO_ST('s', 12, 16, 0), 0),
+ temp2, ADIS16350_SCAN_TEMP_Z, IIO_ST('s', 12, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- 0, ADIS16350_SCAN_ADC_0, IIO_ST('s', 12, 16, 0), 0),
+ in1, ADIS16350_SCAN_ADC_0, IIO_ST('s', 12, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(11)
};
static struct iio_chan_spec adis16300_channels[] = {
IIO_CHAN(IIO_IN, 0, 1, 0, "supply", 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- 0, ADIS16400_SCAN_SUPPLY, IIO_ST('u', 12, 16, 0), 0),
+ in_supply, ADIS16400_SCAN_SUPPLY, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_GYRO, 1, 0, 0, NULL, 0, IIO_MOD_X,
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 1, ADIS16400_SCAN_GYRO_X, IIO_ST('s', 14, 16, 0), 0),
+ gyro_x, ADIS16400_SCAN_GYRO_X, IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 4, ADIS16400_SCAN_ACC_X, IIO_ST('s', 14, 16, 0), 0),
+ accel_x, ADIS16400_SCAN_ACC_X, IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, ADIS16400_SCAN_ACC_Y, IIO_ST('s', 14, 16, 0), 0),
+ accel_y, ADIS16400_SCAN_ACC_Y, IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Z,
(1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, ADIS16400_SCAN_ACC_Z, IIO_ST('s', 14, 16, 0), 0),
+ accel_z, ADIS16400_SCAN_ACC_Z, IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- 0, ADIS16400_SCAN_TEMP, IIO_ST('s', 12, 16, 0), 0),
+ temp, ADIS16400_SCAN_TEMP, IIO_ST('s', 12, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SEPARATE),
- 0, ADIS16350_SCAN_ADC_0, IIO_ST('s', 12, 16, 0), 0),
+ in1, ADIS16350_SCAN_ADC_0, IIO_ST('s', 12, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, ADIS16300_SCAN_INCLI_X, IIO_ST('s', 13, 16, 0), 0),
+ incli_x, ADIS16300_SCAN_INCLI_X, IIO_ST('s', 13, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
- 0, ADIS16300_SCAN_INCLI_Y, IIO_ST('s', 13, 16, 0), 0),
+ incli_y, ADIS16300_SCAN_INCLI_Y, IIO_ST('s', 13, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(14)
};
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 94d3bfaa061..19819e7578c 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -44,20 +44,22 @@ struct bus_type iio_bus_type = {
EXPORT_SYMBOL(iio_bus_type);
static const char * const iio_chan_type_name_spec_shared[] = {
- [IIO_TIMESTAMP] = "timestamp",
- [IIO_ACCEL] = "accel",
[IIO_IN] = "in",
+ [IIO_OUT] = "out",
[IIO_CURRENT] = "current",
[IIO_POWER] = "power",
+ [IIO_ACCEL] = "accel",
[IIO_IN_DIFF] = "in-in",
[IIO_GYRO] = "gyro",
- [IIO_TEMP] = "temp",
[IIO_MAGN] = "magn",
+ [IIO_LIGHT] = "illuminance",
+ [IIO_INTENSITY] = "intensity",
+ [IIO_PROXIMITY] = "proximity",
+ [IIO_TEMP] = "temp",
[IIO_INCLI] = "incli",
[IIO_ROT] = "rot",
- [IIO_INTENSITY] = "intensity",
- [IIO_LIGHT] = "illuminance",
[IIO_ANGL] = "angl",
+ [IIO_TIMESTAMP] = "timestamp",
};
static const char * const iio_chan_type_name_spec_complex[] = {
@@ -396,6 +398,11 @@ static ssize_t iio_read_channel_info(struct device *dev,
return sprintf(buf, "-%d.%06u\n", val, -val2);
else
return sprintf(buf, "%d.%06u\n", val, val2);
+ } else if (ret == IIO_VAL_INT_PLUS_NANO) {
+ if (val2 < 0)
+ return sprintf(buf, "-%d.%09u\n", val, -val2);
+ else
+ return sprintf(buf, "%d.%09u\n", val, val2);
} else
return 0;
}
@@ -407,25 +414,40 @@ static ssize_t iio_write_channel_info(struct device *dev,
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- int ret, integer = 0, micro = 0, micro_mult = 100000;
+ int ret, integer = 0, fract = 0, fract_mult = 100000;
bool integer_part = true, negative = false;
/* Assumes decimal - precision based on number of digits */
if (!indio_dev->info->write_raw)
return -EINVAL;
+
+ if (indio_dev->info->write_raw_get_fmt)
+ switch (indio_dev->info->write_raw_get_fmt(indio_dev,
+ this_attr->c, this_attr->address)) {
+ case IIO_VAL_INT_PLUS_MICRO:
+ fract_mult = 100000;
+ break;
+ case IIO_VAL_INT_PLUS_NANO:
+ fract_mult = 100000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
if (buf[0] == '-') {
negative = true;
buf++;
}
+
while (*buf) {
if ('0' <= *buf && *buf <= '9') {
if (integer_part)
integer = integer*10 + *buf - '0';
else {
- micro += micro_mult*(*buf - '0');
- if (micro_mult == 1)
+ fract += fract_mult*(*buf - '0');
+ if (fract_mult == 1)
break;
- micro_mult /= 10;
+ fract_mult /= 10;
}
} else if (*buf == '\n') {
if (*(buf + 1) == '\0')
@@ -443,11 +465,11 @@ static ssize_t iio_write_channel_info(struct device *dev,
if (integer)
integer = -integer;
else
- micro = -micro;
+ fract = -fract;
}
ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
- integer, micro, this_attr->address);
+ integer, fract, this_attr->address);
if (ret)
return ret;
@@ -655,7 +677,8 @@ static int iio_device_add_channel_sysfs(struct iio_dev *dev_info,
else
ret = __iio_add_chan_devattr("raw", NULL, chan,
&iio_read_channel_info,
- NULL,
+ (chan->type == IIO_OUT ?
+ &iio_write_channel_info : NULL),
0,
0,
&dev_info->dev,
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index d504aa251ce..90ca2df23ea 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -340,6 +340,9 @@ static ssize_t iio_trigger_write_current(struct device *dev,
{
struct iio_dev *dev_info = dev_get_drvdata(dev);
struct iio_trigger *oldtrig = dev_info->trig;
+ struct iio_trigger *trig;
+ int ret;
+
mutex_lock(&dev_info->mlock);
if (dev_info->currentmode == INDIO_RING_TRIGGERED) {
mutex_unlock(&dev_info->mlock);
@@ -347,7 +350,22 @@ static ssize_t iio_trigger_write_current(struct device *dev,
}
mutex_unlock(&dev_info->mlock);
- dev_info->trig = iio_trigger_find_by_name(buf, len);
+ trig = iio_trigger_find_by_name(buf, len);
+
+ if (trig && dev_info->info->validate_trigger) {
+ ret = dev_info->info->validate_trigger(dev_info, trig);
+ if (ret)
+ return ret;
+ }
+
+ if (trig && trig->validate_device) {
+ ret = trig->validate_device(trig, dev_info);
+ if (ret)
+ return ret;
+ }
+
+ dev_info->trig = trig;
+
if (oldtrig && dev_info->trig != oldtrig)
iio_put_trigger(oldtrig);
if (dev_info->trig)
diff --git a/drivers/staging/iio/kfifo_buf.c b/drivers/staging/iio/kfifo_buf.c
index cc14b96d814..6002368fdcf 100644
--- a/drivers/staging/iio/kfifo_buf.c
+++ b/drivers/staging/iio/kfifo_buf.c
@@ -110,9 +110,7 @@ struct iio_ring_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
iio_ring_buffer_init(&kf->ring, indio_dev);
__iio_init_kfifo(kf);
kf->ring.dev.type = &iio_kfifo_type;
- device_initialize(&kf->ring.dev);
kf->ring.dev.parent = &indio_dev->dev;
- kf->ring.dev.bus = &iio_bus_type;
dev_set_drvdata(&kf->ring.dev, (void *)&(kf->ring));
return &kf->ring;
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
index 46d62d1b037..1ad2d56c8ba 100644
--- a/drivers/staging/iio/light/Kconfig
+++ b/drivers/staging/iio/light/Kconfig
@@ -1,4 +1,4 @@
-\#
+#
# Light sensors
#
comment "Light sensors"
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 4794ffd5e44..426b6af7080 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -54,9 +54,9 @@
#define ISL29018_MAX_REGS ISL29018_REG_ADD_DATA_MSB
struct isl29018_chip {
- struct iio_dev *indio_dev;
struct i2c_client *client;
struct mutex lock;
+ unsigned int lux_scale;
unsigned int range;
unsigned int adc_bit;
int prox_scheme;
@@ -68,7 +68,7 @@ static int isl29018_write_data(struct i2c_client *client, u8 reg,
{
u8 regval;
int ret = 0;
- struct isl29018_chip *chip = i2c_get_clientdata(client);
+ struct isl29018_chip *chip = iio_priv(i2c_get_clientdata(client));
regval = chip->reg_cache[reg];
regval &= ~mask;
@@ -158,7 +158,7 @@ static int isl29018_read_sensor_input(struct i2c_client *client, int mode)
static int isl29018_read_lux(struct i2c_client *client, int *lux)
{
int lux_data;
- struct isl29018_chip *chip = i2c_get_clientdata(client);
+ struct isl29018_chip *chip = iio_priv(i2c_get_clientdata(client));
lux_data = isl29018_read_sensor_input(client,
COMMMAND1_OPMODE_ALS_ONCE);
@@ -166,7 +166,7 @@ static int isl29018_read_lux(struct i2c_client *client, int *lux)
if (lux_data < 0)
return lux_data;
- *lux = (lux_data * chip->range) >> chip->adc_bit;
+ *lux = (lux_data * chip->range * chip->lux_scale) >> chip->adc_bit;
return 0;
}
@@ -224,52 +224,13 @@ static int isl29018_read_proximity_ir(struct i2c_client *client, int scheme,
return 0;
}
-static ssize_t get_sensor_data(struct device *dev, char *buf, int mode)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct isl29018_chip *chip = indio_dev->dev_data;
- struct i2c_client *client = chip->client;
- int value = 0;
- int status;
-
- mutex_lock(&chip->lock);
- switch (mode) {
- case COMMMAND1_OPMODE_PROX_ONCE:
- status = isl29018_read_proximity_ir(client,
- chip->prox_scheme, &value);
- break;
-
- case COMMMAND1_OPMODE_ALS_ONCE:
- status = isl29018_read_lux(client, &value);
- break;
-
- case COMMMAND1_OPMODE_IR_ONCE:
- status = isl29018_read_ir(client, &value);
- break;
-
- default:
- dev_err(&client->dev, "Mode %d is not supported\n", mode);
- mutex_unlock(&chip->lock);
- return -EBUSY;
- }
- if (status < 0) {
- dev_err(&client->dev, "Error in Reading data");
- mutex_unlock(&chip->lock);
- return status;
- }
-
- mutex_unlock(&chip->lock);
-
- return sprintf(buf, "%d\n", value);
-}
-
/* Sysfs interface */
/* range */
static ssize_t show_range(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct isl29018_chip *chip = indio_dev->dev_data;
+ struct isl29018_chip *chip = iio_priv(indio_dev);
return sprintf(buf, "%u\n", chip->range);
}
@@ -278,7 +239,7 @@ static ssize_t store_range(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct isl29018_chip *chip = indio_dev->dev_data;
+ struct isl29018_chip *chip = iio_priv(indio_dev);
struct i2c_client *client = chip->client;
int status;
unsigned long lval;
@@ -311,7 +272,7 @@ static ssize_t show_resolution(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct isl29018_chip *chip = indio_dev->dev_data;
+ struct isl29018_chip *chip = iio_priv(indio_dev);
return sprintf(buf, "%u\n", chip->adc_bit);
}
@@ -320,7 +281,7 @@ static ssize_t store_resolution(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct isl29018_chip *chip = indio_dev->dev_data;
+ struct isl29018_chip *chip = iio_priv(indio_dev);
struct i2c_client *client = chip->client;
int status;
unsigned long lval;
@@ -351,7 +312,7 @@ static ssize_t show_prox_infrared_supression(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct isl29018_chip *chip = indio_dev->dev_data;
+ struct isl29018_chip *chip = iio_priv(indio_dev);
/* return the "proximity scheme" i.e. if the chip does on chip
infrared supression (1 means perform on chip supression) */
@@ -362,7 +323,7 @@ static ssize_t store_prox_infrared_supression(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct isl29018_chip *chip = indio_dev->dev_data;
+ struct isl29018_chip *chip = iio_priv(indio_dev);
unsigned long lval;
if (strict_strtoul(buf, 10, &lval))
@@ -381,27 +342,87 @@ static ssize_t store_prox_infrared_supression(struct device *dev,
return count;
}
-/* Read lux */
-static ssize_t show_lux(struct device *dev,
- struct device_attribute *devattr, char *buf)
+/* Channel IO */
+static int isl29018_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
- return get_sensor_data(dev, buf, COMMMAND1_OPMODE_ALS_ONCE);
-}
+ struct isl29018_chip *chip = iio_priv(indio_dev);
+ int ret = -EINVAL;
-/* Read ir */
-static ssize_t show_ir(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- return get_sensor_data(dev, buf, COMMMAND1_OPMODE_IR_ONCE);
+ mutex_lock(&chip->lock);
+ if (mask == (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) &&
+ chan->type == IIO_LIGHT) {
+ chip->lux_scale = val;
+ ret = 0;
+ }
+ mutex_unlock(&chip->lock);
+
+ return 0;
}
-/* Read nearest ir */
-static ssize_t show_proxim_ir(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int isl29018_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
{
- return get_sensor_data(dev, buf, COMMMAND1_OPMODE_PROX_ONCE);
+ int ret = -EINVAL;
+ struct isl29018_chip *chip = iio_priv(indio_dev);
+ struct i2c_client *client = chip->client;
+
+ mutex_lock(&chip->lock);
+ switch (mask) {
+ case 0:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = isl29018_read_lux(client, val);
+ break;
+ case IIO_INTENSITY:
+ ret = isl29018_read_ir(client, val);
+ break;
+ case IIO_PROXIMITY:
+ ret = isl29018_read_proximity_ir(client,
+ chip->prox_scheme, val);
+ break;
+ default:
+ break;
+ }
+ if (!ret)
+ ret = IIO_VAL_INT;
+ break;
+ case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ if (chan->type == IIO_LIGHT) {
+ *val = chip->lux_scale;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&chip->lock);
+ return ret;
}
+static const struct iio_chan_spec isl29018_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .indexed = 1,
+ .channel = 0,
+ .processed_val = 1,
+ .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ }, {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ }, {
+ /* Unindexed in current ABI. But perhaps it should be. */
+ .type = IIO_PROXIMITY,
+ }
+};
+
static IIO_DEVICE_ATTR(range, S_IRUGO | S_IWUSR, show_range, store_range, 0);
static IIO_CONST_ATTR(range_available, "1000 4000 16000 64000");
static IIO_CONST_ATTR(adc_resolution_available, "4 8 12 16");
@@ -411,9 +432,6 @@ static IIO_DEVICE_ATTR(proximity_on_chip_ambient_infrared_supression,
S_IRUGO | S_IWUSR,
show_prox_infrared_supression,
store_prox_infrared_supression, 0);
-static IIO_DEVICE_ATTR(illuminance0_input, S_IRUGO, show_lux, NULL, 0);
-static IIO_DEVICE_ATTR(intensity_infrared_raw, S_IRUGO, show_ir, NULL, 0);
-static IIO_DEVICE_ATTR(proximity_raw, S_IRUGO, show_proxim_ir, NULL, 0);
#define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
#define ISL29018_CONST_ATTR(name) (&iio_const_attr_##name.dev_attr.attr)
@@ -423,9 +441,6 @@ static struct attribute *isl29018_attributes[] = {
ISL29018_DEV_ATTR(adc_resolution),
ISL29018_CONST_ATTR(adc_resolution_available),
ISL29018_DEV_ATTR(proximity_on_chip_ambient_infrared_supression),
- ISL29018_DEV_ATTR(illuminance0_input),
- ISL29018_DEV_ATTR(intensity_infrared_raw),
- ISL29018_DEV_ATTR(proximity_raw),
NULL
};
@@ -435,7 +450,7 @@ static const struct attribute_group isl29108_group = {
static int isl29018_chip_init(struct i2c_client *client)
{
- struct isl29018_chip *chip = i2c_get_clientdata(client);
+ struct isl29018_chip *chip = iio_priv(i2c_get_clientdata(client));
int status;
int new_adc_bit;
unsigned int new_range;
@@ -458,44 +473,45 @@ static int isl29018_chip_init(struct i2c_client *client)
static const struct iio_info isl29108_info = {
.attrs = &isl29108_group,
.driver_module = THIS_MODULE,
+ .read_raw = &isl29018_read_raw,
+ .write_raw = &isl29018_write_raw,
};
static int __devinit isl29018_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct isl29018_chip *chip;
+ struct iio_dev *indio_dev;
int err;
- chip = kzalloc(sizeof(struct isl29018_chip), GFP_KERNEL);
- if (!chip) {
- dev_err(&client->dev, "Memory allocation fails\n");
+ indio_dev = iio_allocate_device(sizeof(*chip));
+ if (indio_dev == NULL) {
+ dev_err(&client->dev, "iio allocation fails\n");
err = -ENOMEM;
goto exit;
}
+ chip = iio_priv(indio_dev);
- i2c_set_clientdata(client, chip);
+ i2c_set_clientdata(client, indio_dev);
chip->client = client;
mutex_init(&chip->lock);
+ chip->lux_scale = 1;
chip->range = 1000;
chip->adc_bit = 16;
err = isl29018_chip_init(client);
if (err)
- goto exit_free;
+ goto exit_iio_free;
- chip->indio_dev = iio_allocate_device(0);
- if (!chip->indio_dev) {
- dev_err(&client->dev, "iio allocation fails\n");
- goto exit_free;
- }
- chip->indio_dev->info = &isl29108_info;
- chip->indio_dev->name = id->name;
- chip->indio_dev->dev.parent = &client->dev;
- chip->indio_dev->dev_data = (void *)(chip);
- chip->indio_dev->modes = INDIO_DIRECT_MODE;
- err = iio_device_register(chip->indio_dev);
+ indio_dev->info = &isl29108_info;
+ indio_dev->channels = isl29018_channels;
+ indio_dev->num_channels = ARRAY_SIZE(isl29018_channels);
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ err = iio_device_register(indio_dev);
if (err) {
dev_err(&client->dev, "iio registration fails\n");
goto exit_iio_free;
@@ -503,20 +519,17 @@ static int __devinit isl29018_probe(struct i2c_client *client,
return 0;
exit_iio_free:
- iio_free_device(chip->indio_dev);
-exit_free:
- kfree(chip);
+ iio_free_device(indio_dev);
exit:
return err;
}
static int __devexit isl29018_remove(struct i2c_client *client)
{
- struct isl29018_chip *chip = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
dev_dbg(&client->dev, "%s()\n", __func__);
- iio_device_unregister(chip->indio_dev);
- kfree(chip);
+ iio_device_unregister(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index 9cffa2ecb0e..f25243b0847 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -137,37 +137,14 @@ struct tsl2563_chip {
u32 data1;
};
-static int tsl2563_write(struct i2c_client *client, u8 reg, u8 value)
-{
- int ret;
- u8 buf[2];
-
- buf[0] = TSL2563_CMD | reg;
- buf[1] = value;
-
- ret = i2c_master_send(client, buf, sizeof(buf));
- return (ret == sizeof(buf)) ? 0 : ret;
-}
-
-static int tsl2563_read(struct i2c_client *client, u8 reg, void *buf, int len)
-{
- int ret;
- u8 cmd = TSL2563_CMD | reg;
-
- ret = i2c_master_send(client, &cmd, sizeof(cmd));
- if (ret != sizeof(cmd))
- return ret;
-
- return i2c_master_recv(client, buf, len);
-}
-
static int tsl2563_set_power(struct tsl2563_chip *chip, int on)
{
struct i2c_client *client = chip->client;
u8 cmd;
cmd = on ? TSL2563_CMD_POWER_ON : TSL2563_CMD_POWER_OFF;
- return tsl2563_write(client, TSL2563_REG_CTRL, cmd);
+ return i2c_smbus_write_byte_data(client,
+ TSL2563_CMD | TSL2563_REG_CTRL, cmd);
}
/*
@@ -178,36 +155,40 @@ static int tsl2563_get_power(struct tsl2563_chip *chip)
{
struct i2c_client *client = chip->client;
int ret;
- u8 val;
- ret = tsl2563_read(client, TSL2563_REG_CTRL, &val, sizeof(val));
- if (ret != sizeof(val))
+ ret = i2c_smbus_read_byte_data(client, TSL2563_CMD | TSL2563_REG_CTRL);
+ if (ret < 0)
return ret;
- return (val & TSL2563_CTRL_POWER_MASK) == TSL2563_CMD_POWER_ON;
+ return (ret & TSL2563_CTRL_POWER_MASK) == TSL2563_CMD_POWER_ON;
}
static int tsl2563_configure(struct tsl2563_chip *chip)
{
int ret;
- ret = tsl2563_write(chip->client, TSL2563_REG_TIMING,
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_TIMING,
chip->gainlevel->gaintime);
if (ret)
goto error_ret;
- ret = tsl2563_write(chip->client, TSL2563_REG_HIGHLOW,
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_HIGHLOW,
chip->high_thres & 0xFF);
if (ret)
goto error_ret;
- ret = tsl2563_write(chip->client, TSL2563_REG_HIGHHIGH,
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_HIGHHIGH,
(chip->high_thres >> 8) & 0xFF);
if (ret)
goto error_ret;
- ret = tsl2563_write(chip->client, TSL2563_REG_LOWLOW,
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_LOWLOW,
chip->low_thres & 0xFF);
if (ret)
goto error_ret;
- ret = tsl2563_write(chip->client, TSL2563_REG_LOWHIGH,
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_LOWHIGH,
(chip->low_thres >> 8) & 0xFF);
/* Interrupt register is automatically written anyway if it is relevant
so is not here */
@@ -242,8 +223,8 @@ static int tsl2563_read_id(struct tsl2563_chip *chip, u8 *id)
struct i2c_client *client = chip->client;
int ret;
- ret = tsl2563_read(client, TSL2563_REG_ID, id, sizeof(*id));
- if (ret != sizeof(*id))
+ ret = i2c_smbus_read_byte_data(client, TSL2563_CMD | TSL2563_REG_ID);
+ if (ret < 0)
return ret;
return 0;
@@ -313,8 +294,9 @@ static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
(adc > chip->gainlevel->max) ?
chip->gainlevel++ : chip->gainlevel--;
- tsl2563_write(client, TSL2563_REG_TIMING,
- chip->gainlevel->gaintime);
+ i2c_smbus_write_byte_data(client,
+ TSL2563_CMD | TSL2563_REG_TIMING,
+ chip->gainlevel->gaintime);
tsl2563_wait_adc(chip);
tsl2563_wait_adc(chip);
@@ -327,7 +309,6 @@ static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
static int tsl2563_get_adc(struct tsl2563_chip *chip)
{
struct i2c_client *client = chip->client;
- u8 buf0[2], buf1[2];
u16 adc0, adc1;
int retry = 1;
int ret = 0;
@@ -350,19 +331,17 @@ static int tsl2563_get_adc(struct tsl2563_chip *chip)
}
while (retry) {
- ret = tsl2563_read(client,
- TSL2563_REG_DATA0LOW,
- buf0, sizeof(buf0));
- if (ret != sizeof(buf0))
+ ret = i2c_smbus_read_word_data(client,
+ TSL2563_CMD | TSL2563_REG_DATA0LOW);
+ if (ret < 0)
goto out;
+ adc0 = ret;
- ret = tsl2563_read(client, TSL2563_REG_DATA1LOW,
- buf1, sizeof(buf1));
- if (ret != sizeof(buf1))
+ ret = i2c_smbus_read_word_data(client,
+ TSL2563_CMD | TSL2563_REG_DATA1LOW);
+ if (ret < 0)
goto out;
-
- adc0 = (buf0[1] << 8) + buf0[0];
- adc1 = (buf1[1] << 8) + buf1[0];
+ adc1 = ret;
retry = tsl2563_adjust_gainlevel(chip, adc0);
}
@@ -548,15 +527,16 @@ error_ret:
return ret;
}
+#define INFO_MASK (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE)
+#define EVENT_MASK (IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) | \
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING))
+#define IIO_CHAN_2563(type, mod, proc, chan, imask, emask) \
+ IIO_CHAN(type, mod, 1, proc, NULL, chan, 0, imask, 0, 0, {}, emask)
+
static const struct iio_chan_spec tsl2563_channels[] = {
- IIO_CHAN(IIO_LIGHT, 0, 1, 1, NULL, 0, 0, 0, 0, 0, {}, 0),
- IIO_CHAN(IIO_INTENSITY, 1, 1, 0, "both", 0,
- (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE), 0, 0, 0, {},
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
- IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING)),
- IIO_CHAN(IIO_INTENSITY, 1, 1, 0, "ir", 1,
- (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE), 0, 0, 0, {},
- 0)
+ IIO_CHAN_2563(IIO_LIGHT, 0, 1, 0, 0, 0),
+ IIO_CHAN_2563(IIO_INTENSITY, 1, 0, 0, INFO_MASK, EVENT_MASK),
+ IIO_CHAN_2563(IIO_INTENSITY, 1, 0, 1, INFO_MASK, 0),
};
static int tsl2563_read_thresh(struct iio_dev *indio_dev,
@@ -592,11 +572,13 @@ static ssize_t tsl2563_write_thresh(struct iio_dev *indio_dev,
else
address = TSL2563_REG_LOWLOW;
mutex_lock(&chip->lock);
- ret = tsl2563_write(chip->client, address, val & 0xFF);
+ ret = i2c_smbus_write_byte_data(chip->client, TSL2563_CMD | address,
+ val & 0xFF);
if (ret)
goto error_ret;
- ret = tsl2563_write(chip->client, address + 1,
- (val >> 8) & 0xFF);
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | (address + 1),
+ (val >> 8) & 0xFF);
if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_RISING)
chip->high_thres = val;
else
@@ -612,7 +594,6 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
{
struct iio_dev *dev_info = private;
struct tsl2563_chip *chip = iio_priv(dev_info);
- u8 cmd = TSL2563_CMD | TSL2563_CLEARINT;
iio_push_event(dev_info, 0,
IIO_UNMOD_EVENT_CODE(IIO_EV_CLASS_LIGHT,
@@ -622,7 +603,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
iio_get_time_ns());
/* clear the interrupt and push the event */
- i2c_master_send(chip->client, &cmd, sizeof(cmd));
+ i2c_smbus_write_byte(chip->client, TSL2563_CMD | TSL2563_CLEARINT);
return IRQ_HANDLED;
}
@@ -647,13 +628,17 @@ static int tsl2563_write_interrupt_config(struct iio_dev *indio_dev,
if (ret)
goto out;
}
- ret = tsl2563_write(chip->client, TSL2563_REG_INT, chip->intr);
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_INT,
+ chip->intr);
chip->int_enabled = true;
}
if (!state && (chip->intr & 0x30)) {
chip->intr |= ~0x30;
- ret = tsl2563_write(chip->client, TSL2563_REG_INT, chip->intr);
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_INT,
+ chip->intr);
chip->int_enabled = false;
/* now the interrupt is not enabled, we can go to sleep */
schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
@@ -668,16 +653,15 @@ static int tsl2563_read_interrupt_config(struct iio_dev *indio_dev,
int event_code)
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
- u8 rxbuf;
int ret;
mutex_lock(&chip->lock);
- ret = tsl2563_read(chip->client, TSL2563_REG_INT,
- &rxbuf, sizeof(rxbuf));
+ ret = i2c_smbus_read_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_INT);
mutex_unlock(&chip->lock);
if (ret < 0)
goto error_ret;
- ret = !!(rxbuf & 0x30);
+ ret = !!(ret & 0x30);
error_ret:
return ret;
@@ -690,6 +674,8 @@ static struct i2c_driver tsl2563_i2c_driver;
static const struct iio_info tsl2563_info_no_irq = {
.driver_module = THIS_MODULE,
+ .read_raw = &tsl2563_read_raw,
+ .write_raw = &tsl2563_write_raw,
};
static const struct iio_info tsl2563_info = {
@@ -797,7 +783,8 @@ static int tsl2563_remove(struct i2c_client *client)
cancel_delayed_work(&chip->poweroff_work);
/* Ensure that interrupts are disabled - then flush any bottom halves */
chip->intr |= ~0x30;
- tsl2563_write(chip->client, TSL2563_REG_INT, chip->intr);
+ i2c_smbus_write_byte_data(chip->client, TSL2563_CMD | TSL2563_REG_INT,
+ chip->intr);
flush_scheduled_work();
tsl2563_set_power(chip, 0);
if (client->irq)
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index 700f96c7027..33919e87e7c 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -89,7 +89,6 @@
*/
struct ak8975_data {
struct i2c_client *client;
- struct iio_dev *indio_dev;
struct attribute_group attrs;
struct mutex lock;
u8 asa[3];
@@ -221,7 +220,7 @@ static ssize_t show_mode(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ak8975_data *data = indio_dev->dev_data;
+ struct ak8975_data *data = iio_priv(indio_dev);
return sprintf(buf, "%lu\n", data->mode);
}
@@ -234,7 +233,7 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ak8975_data *data = indio_dev->dev_data;
+ struct ak8975_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
unsigned long oval;
int ret;
@@ -310,7 +309,7 @@ static ssize_t show_scale(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ak8975_data *data = indio_dev->dev_data;
+ struct ak8975_data *data = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(devattr);
return sprintf(buf, "%ld\n", data->raw_to_gauss[this_attr->address]);
@@ -376,7 +375,7 @@ static ssize_t show_raw(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ak8975_data *data = indio_dev->dev_data;
+ struct ak8975_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
struct iio_dev_attr *this_attr = to_iio_dev_attr(devattr);
u16 meas_reg;
@@ -483,46 +482,41 @@ static int ak8975_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ak8975_data *data;
+ struct iio_dev *indio_dev;
+ int eoc_gpio;
int err;
- /* Allocate our device context. */
- data = kzalloc(sizeof(struct ak8975_data), GFP_KERNEL);
- if (!data) {
- dev_err(&client->dev, "Memory allocation fails\n");
- err = -ENOMEM;
- goto exit;
- }
-
- i2c_set_clientdata(client, data);
- data->client = client;
-
- mutex_init(&data->lock);
-
/* Grab and set up the supplied GPIO. */
- data->eoc_irq = client->irq;
- data->eoc_gpio = irq_to_gpio(client->irq);
+ eoc_gpio = irq_to_gpio(client->irq);
/* We may not have a GPIO based IRQ to scan, that is fine, we will
poll if so */
- if (data->eoc_gpio > 0) {
- err = gpio_request(data->eoc_gpio, "ak_8975");
+ if (eoc_gpio > 0) {
+ err = gpio_request(eoc_gpio, "ak_8975");
if (err < 0) {
dev_err(&client->dev,
"failed to request GPIO %d, error %d\n",
- data->eoc_gpio, err);
- goto exit_free;
+ eoc_gpio, err);
+ goto exit;
}
- err = gpio_direction_input(data->eoc_gpio);
+ err = gpio_direction_input(eoc_gpio);
if (err < 0) {
dev_err(&client->dev,
"Failed to configure input direction for GPIO %d, error %d\n",
- data->eoc_gpio, err);
+ eoc_gpio, err);
goto exit_gpio;
}
} else
- data->eoc_gpio = 0; /* No GPIO available */
+ eoc_gpio = 0; /* No GPIO available */
+ /* Register with IIO */
+ indio_dev = iio_allocate_device(sizeof(*data));
+ if (indio_dev == NULL) {
+ err = -ENOMEM;
+ goto exit_gpio;
+ }
+ data = iio_priv(indio_dev);
/* Perform some basic start-of-day setup of the device. */
err = ak8975_setup(client);
if (err < 0) {
@@ -530,46 +524,41 @@ static int ak8975_probe(struct i2c_client *client,
goto exit_gpio;
}
- /* Register with IIO */
- data->indio_dev = iio_allocate_device(0);
- if (data->indio_dev == NULL) {
- err = -ENOMEM;
- goto exit_gpio;
- }
-
- data->indio_dev->dev.parent = &client->dev;
- data->indio_dev->info = &ak8975_info;
- data->indio_dev->dev_data = (void *)(data);
- data->indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ mutex_init(&data->lock);
+ data->eoc_irq = client->irq;
+ data->eoc_gpio = eoc_gpio;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ak8975_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- err = iio_device_register(data->indio_dev);
+ err = iio_device_register(indio_dev);
if (err < 0)
goto exit_free_iio;
return 0;
exit_free_iio:
- iio_free_device(data->indio_dev);
+ iio_free_device(indio_dev);
exit_gpio:
- if (data->eoc_gpio)
- gpio_free(data->eoc_gpio);
-exit_free:
- kfree(data);
+ if (eoc_gpio)
+ gpio_free(eoc_gpio);
exit:
return err;
}
static int ak8975_remove(struct i2c_client *client)
{
- struct ak8975_data *data = i2c_get_clientdata(client);
-
- iio_device_unregister(data->indio_dev);
- iio_free_device(data->indio_dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ int eoc_gpio = data->eoc_gpio;
- if (data->eoc_gpio)
- gpio_free(data->eoc_gpio);
+ iio_device_unregister(indio_dev);
+ iio_free_device(indio_dev);
- kfree(data);
+ if (eoc_gpio)
+ gpio_free(eoc_gpio);
return 0;
}
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index dd9a3bb6aa0..b44c273a91a 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -131,7 +131,6 @@ static const unsigned short normal_i2c[] = { HMC5843_I2C_ADDRESS,
/* Each client has this additional data */
struct hmc5843_data {
- struct iio_dev *indio_dev;
struct mutex lock;
u8 rate;
u8 meas_conf;
@@ -159,7 +158,7 @@ static ssize_t hmc5843_read_measurement(struct device *dev,
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
s16 coordinate_val;
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct hmc5843_data *data = indio_dev->dev_data;
+ struct hmc5843_data *data = iio_priv(indio_dev);
s32 result;
mutex_lock(&data->lock);
@@ -202,7 +201,7 @@ static ssize_t hmc5843_show_operating_mode(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct hmc5843_data *data = indio_dev->dev_data;
+ struct hmc5843_data *data = iio_priv(indio_dev);
return sprintf(buf, "%d\n", data->operating_mode);
}
@@ -213,7 +212,7 @@ static ssize_t hmc5843_set_operating_mode(struct device *dev,
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
- struct hmc5843_data *data = indio_dev->dev_data;
+ struct hmc5843_data *data = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
unsigned long operating_mode = 0;
s32 status;
@@ -278,7 +277,7 @@ static ssize_t hmc5843_show_measurement_configuration(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct hmc5843_data *data = indio_dev->dev_data;
+ struct hmc5843_data *data = iio_priv(indio_dev);
return sprintf(buf, "%d\n", data->meas_conf);
}
@@ -350,7 +349,7 @@ static ssize_t set_sampling_frequency(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
- struct hmc5843_data *data = indio_dev->dev_data;
+ struct hmc5843_data *data = iio_priv(indio_dev);
unsigned long rate = 0;
if (strncmp(buf, "0.5" , 3) == 0)
@@ -422,7 +421,7 @@ static ssize_t show_range(struct device *dev,
{
u8 range;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct hmc5843_data *data = indio_dev->dev_data;
+ struct hmc5843_data *data = iio_priv(indio_dev);
range = data->range;
return sprintf(buf, "%d\n", regval_to_input_field_mg[range]);
@@ -436,7 +435,7 @@ static ssize_t set_range(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct hmc5843_data *data = indio_dev->dev_data;
+ struct hmc5843_data *data = iio_priv(indio_dev);
unsigned long range = 0;
int error;
mutex_lock(&data->lock);
@@ -473,7 +472,7 @@ static ssize_t show_scale(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct hmc5843_data *data = indio_dev->dev_data;
+ struct hmc5843_data *data = iio_priv(indio_dev);
return strlen(strcpy(buf, regval_to_scale[data->range]));
}
static IIO_DEVICE_ATTR(magn_scale,
@@ -538,53 +537,46 @@ static int hmc5843_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct hmc5843_data *data;
+ struct iio_dev *indio_dev;
int err = 0;
- data = kzalloc(sizeof(struct hmc5843_data), GFP_KERNEL);
- if (!data) {
+ indio_dev = iio_allocate_device(sizeof(*data));
+ if (indio_dev == NULL) {
err = -ENOMEM;
goto exit;
}
-
+ data = iio_priv(indio_dev);
/* default settings at probe */
data->meas_conf = CONF_NORMAL;
data->range = RANGE_1_0;
data->operating_mode = MODE_CONVERSION_CONTINUOUS;
- i2c_set_clientdata(client, data);
+ i2c_set_clientdata(client, indio_dev);
/* Initialize the HMC5843 chip */
hmc5843_init_client(client);
- data->indio_dev = iio_allocate_device(0);
- if (!data->indio_dev) {
- err = -ENOMEM;
- goto exit_free1;
- }
- data->indio_dev->info = &hmc5843_info;
- data->indio_dev->dev.parent = &client->dev;
- data->indio_dev->dev_data = (void *)(data);
- data->indio_dev->modes = INDIO_DIRECT_MODE;
- err = iio_device_register(data->indio_dev);
+ indio_dev->info = &hmc5843_info;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ err = iio_device_register(indio_dev);
if (err)
goto exit_free2;
return 0;
exit_free2:
- iio_free_device(data->indio_dev);
-exit_free1:
- kfree(data);
+ iio_free_device(indio_dev);
exit:
return err;
}
static int hmc5843_remove(struct i2c_client *client)
{
- struct hmc5843_data *data = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
/* sleep mode to save power */
hmc5843_configure(client, MODE_SLEEP);
- iio_device_unregister(data->indio_dev);
- kfree(i2c_get_clientdata(client));
+ iio_device_unregister(indio_dev);
+
return 0;
}
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 6c9c23fc4ae..4d1bd42ff9e 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -29,7 +29,7 @@ static int ade7753_spi_write_reg_8(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7753_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADE7753_WRITE_REG(reg_address);
@@ -47,7 +47,7 @@ static int ade7753_spi_write_reg_16(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7753_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADE7753_WRITE_REG(reg_address);
@@ -64,7 +64,7 @@ static int ade7753_spi_read_reg_8(struct device *dev,
u8 *val)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7753_state *st = iio_priv(indio_dev);
ssize_t ret;
ret = spi_w8r8(st->us, ADE7753_READ_REG(reg_address));
@@ -83,7 +83,7 @@ static int ade7753_spi_read_reg_16(struct device *dev,
u16 *val)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7753_state *st = iio_priv(indio_dev);
ssize_t ret;
ret = spi_w8r16(st->us, ADE7753_READ_REG(reg_address));
@@ -105,7 +105,7 @@ static int ade7753_spi_read_reg_24(struct device *dev,
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7753_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -369,10 +369,11 @@ static int ade7753_stop_device(struct device *dev)
return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
}
-static int ade7753_initial_setup(struct ade7753_state *st)
+static int ade7753_initial_setup(struct iio_dev *indio_dev)
{
int ret;
- struct device *dev = &st->indio_dev->dev;
+ struct device *dev = &indio_dev->dev;
+ struct ade7753_state *st = iio_priv(indio_dev);
/* use low spi speed for init */
st->us->mode = SPI_MODE_3;
@@ -397,9 +398,9 @@ static ssize_t ade7753_read_frequency(struct device *dev,
char *buf)
{
int ret, len = 0;
- u8 t;
+ u16 t;
int sps;
- ret = ade7753_spi_read_reg_8(dev, ADE7753_MODE, &t);
+ ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &t);
if (ret)
return ret;
@@ -416,7 +417,7 @@ static ssize_t ade7753_write_frequency(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7753_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7753_state *st = iio_priv(indio_dev);
unsigned long val;
int ret;
u16 reg, t;
@@ -512,62 +513,44 @@ static const struct iio_info ade7753_info = {
static int __devinit ade7753_probe(struct spi_device *spi)
{
int ret, regdone = 0;
- struct ade7753_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
+ struct ade7753_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
goto error_ret;
}
/* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ spi_set_drvdata(spi, indio_dev);
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADE7753_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADE7753_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
- ret = -ENOMEM;
- goto error_free_rx;
- }
+ st = iio_priv(indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
- st->indio_dev->name = spi->dev.driver->name;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &ade7753_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ade7753_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
regdone = 1;
/* Get the device into a sane initial state */
- ret = ade7753_initial_setup(st);
+ ret = ade7753_initial_setup(indio_dev);
if (ret)
goto error_free_dev;
return 0;
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
+
error_ret:
return ret;
}
@@ -576,19 +559,13 @@ error_ret:
static int ade7753_remove(struct spi_device *spi)
{
int ret;
- struct ade7753_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
ret = ade7753_stop_device(&(indio_dev->dev));
if (ret)
goto err_ret;
iio_device_unregister(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
-
- return 0;
err_ret:
return ret;
diff --git a/drivers/staging/iio/meter/ade7753.h b/drivers/staging/iio/meter/ade7753.h
index 3b9c7f6a50e..3f059d3d939 100644
--- a/drivers/staging/iio/meter/ade7753.h
+++ b/drivers/staging/iio/meter/ade7753.h
@@ -60,17 +60,15 @@
/**
* struct ade7753_state - device instance specific data
* @us: actual spi_device
- * @indio_dev: industrial I/O device structure
* @tx: transmit buffer
* @rx: receive buffer
* @buf_lock: mutex to protect tx and rx
**/
struct ade7753_state {
- struct spi_device *us;
- struct iio_dev *indio_dev;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
+ struct spi_device *us;
+ struct mutex buf_lock;
+ u8 tx[ADE7753_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADE7753_MAX_RX];
};
#endif
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index 378f2c87086..f4f85fd5619 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -29,7 +29,7 @@ static int ade7754_spi_write_reg_8(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7754_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADE7754_WRITE_REG(reg_address);
@@ -47,7 +47,7 @@ static int ade7754_spi_write_reg_16(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7754_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADE7754_WRITE_REG(reg_address);
@@ -64,7 +64,7 @@ static int ade7754_spi_read_reg_8(struct device *dev,
u8 *val)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7754_state *st = iio_priv(indio_dev);
int ret;
ret = spi_w8r8(st->us, ADE7754_READ_REG(reg_address));
@@ -83,7 +83,7 @@ static int ade7754_spi_read_reg_16(struct device *dev,
u16 *val)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7754_state *st = iio_priv(indio_dev);
int ret;
ret = spi_w8r16(st->us, ADE7754_READ_REG(reg_address));
@@ -105,7 +105,7 @@ static int ade7754_spi_read_reg_24(struct device *dev,
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7754_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -388,10 +388,11 @@ static int ade7754_stop_device(struct device *dev)
return ade7754_spi_write_reg_8(dev, ADE7754_OPMODE, val);
}
-static int ade7754_initial_setup(struct ade7754_state *st)
+static int ade7754_initial_setup(struct iio_dev *indio_dev)
{
int ret;
- struct device *dev = &st->indio_dev->dev;
+ struct ade7754_state *st = iio_priv(indio_dev);
+ struct device *dev = &indio_dev->dev;
/* use low spi speed for init */
st->us->mode = SPI_MODE_3;
@@ -436,7 +437,7 @@ static ssize_t ade7754_write_frequency(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7754_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7754_state *st = iio_priv(indio_dev);
unsigned long val;
int ret;
u8 reg, t;
@@ -535,62 +536,44 @@ static const struct iio_info ade7754_info = {
static int __devinit ade7754_probe(struct spi_device *spi)
{
int ret, regdone = 0;
- struct ade7754_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
+ struct ade7754_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
goto error_ret;
}
/* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ spi_set_drvdata(spi, indio_dev);
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADE7754_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADE7754_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
- ret = -ENOMEM;
- goto error_free_rx;
- }
+ st = iio_priv(indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
- st->indio_dev->name = spi->dev.driver->name;
- st->indio_dev->dev.parent = &spi->dev;
- st->indio_dev->info = &ade7754_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ade7754_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
regdone = 1;
/* Get the device into a sane initial state */
- ret = ade7754_initial_setup(st);
+ ret = ade7754_initial_setup(indio_dev);
if (ret)
goto error_free_dev;
return 0;
error_free_dev:
if (regdone)
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
else
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
+
error_ret:
return ret;
}
@@ -599,22 +582,17 @@ error_ret:
static int ade7754_remove(struct spi_device *spi)
{
int ret;
- struct ade7754_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
ret = ade7754_stop_device(&(indio_dev->dev));
if (ret)
goto err_ret;
iio_device_unregister(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
-
- return 0;
err_ret:
return ret;
+
}
static struct spi_driver ade7754_driver = {
diff --git a/drivers/staging/iio/meter/ade7754.h b/drivers/staging/iio/meter/ade7754.h
index 0aa0522a33a..6121125520f 100644
--- a/drivers/staging/iio/meter/ade7754.h
+++ b/drivers/staging/iio/meter/ade7754.h
@@ -78,17 +78,15 @@
/**
* struct ade7754_state - device instance specific data
* @us: actual spi_device
- * @indio_dev: industrial I/O device structure
+ * @buf_lock: mutex to protect tx and rx
* @tx: transmit buffer
* @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
**/
struct ade7754_state {
- struct spi_device *us;
- struct iio_dev *indio_dev;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
+ struct spi_device *us;
+ struct mutex buf_lock;
+ u8 tx[ADE7754_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADE7754_MAX_RX];
};
#endif
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 730f6d9074a..a51a64cad03 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -29,7 +29,7 @@ static int ade7759_spi_write_reg_8(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7759_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADE7759_WRITE_REG(reg_address);
@@ -47,7 +47,7 @@ static int ade7759_spi_write_reg_16(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7759_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = ADE7759_WRITE_REG(reg_address);
@@ -64,7 +64,7 @@ static int ade7759_spi_read_reg_8(struct device *dev,
u8 *val)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7759_state *st = iio_priv(indio_dev);
int ret;
ret = spi_w8r8(st->us, ADE7759_READ_REG(reg_address));
@@ -83,7 +83,7 @@ static int ade7759_spi_read_reg_16(struct device *dev,
u16 *val)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7759_state *st = iio_priv(indio_dev);
int ret;
ret = spi_w8r16(st->us, ADE7759_READ_REG(reg_address));
@@ -105,7 +105,7 @@ static int ade7759_spi_read_reg_40(struct device *dev,
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7759_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -328,10 +328,11 @@ static int ade7759_stop_device(struct device *dev)
return ade7759_spi_write_reg_16(dev, ADE7759_MODE, val);
}
-static int ade7759_initial_setup(struct ade7759_state *st)
+static int ade7759_initial_setup(struct iio_dev *indio_dev)
{
int ret;
- struct device *dev = &st->indio_dev->dev;
+ struct ade7759_state *st = iio_priv(indio_dev);
+ struct device *dev = &indio_dev->dev;
/* use low spi speed for init */
st->us->mode = SPI_MODE_3;
@@ -376,7 +377,7 @@ static ssize_t ade7759_write_frequency(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7759_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7759_state *st = iio_priv(indio_dev);
unsigned long val;
int ret;
u16 reg, t;
@@ -458,62 +459,41 @@ static const struct iio_info ade7759_info = {
static int __devinit ade7759_probe(struct spi_device *spi)
{
int ret;
- struct ade7759_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
+ struct ade7759_state *st;
+ struct iio_dev *indio_dev;
+
+ /* setup the industrialio driver allocated elements */
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ ret = -ENOMEM;
goto error_ret;
}
/* this is only used for removal purposes */
- spi_set_drvdata(spi, st);
+ spi_set_drvdata(spi, indio_dev);
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADE7759_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADE7759_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
- ret = -ENOMEM;
- goto error_free_rx;
- }
+ st = iio_priv(indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
- /* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
-
- st->indio_dev->name = spi->dev.driver->name;
- st->indio_dev->dev.parent = &spi->dev;
-
- st->indio_dev->info = &ade7759_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = spi->dev.driver->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ade7759_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
/* Get the device into a sane initial state */
- ret = ade7759_initial_setup(st);
+ ret = ade7759_initial_setup(indio_dev);
if (ret)
goto error_unreg_dev;
return 0;
error_unreg_dev:
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
error_free_dev:
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
return ret;
}
@@ -522,19 +502,13 @@ error_ret:
static int ade7759_remove(struct spi_device *spi)
{
int ret;
- struct ade7759_state *st = spi_get_drvdata(spi);
- struct iio_dev *indio_dev = st->indio_dev;
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
ret = ade7759_stop_device(&(indio_dev->dev));
if (ret)
goto err_ret;
iio_device_unregister(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
-
- return 0;
err_ret:
return ret;
diff --git a/drivers/staging/iio/meter/ade7759.h b/drivers/staging/iio/meter/ade7759.h
index cc76c2c4c03..c81d23d730d 100644
--- a/drivers/staging/iio/meter/ade7759.h
+++ b/drivers/staging/iio/meter/ade7759.h
@@ -41,17 +41,15 @@
/**
* struct ade7759_state - device instance specific data
* @us: actual spi_device
- * @indio_dev: industrial I/O device structure
+ * @buf_lock: mutex to protect tx and rx
* @tx: transmit buffer
* @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
**/
struct ade7759_state {
- struct spi_device *us;
- struct iio_dev *indio_dev;
- u8 *tx;
- u8 *rx;
- struct mutex buf_lock;
+ struct spi_device *us;
+ struct mutex buf_lock;
+ u8 tx[ADE7759_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADE7759_MAX_RX];
};
#endif
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
index 4578e7b7f46..dd723435340 100644
--- a/drivers/staging/iio/meter/ade7854-i2c.c
+++ b/drivers/staging/iio/meter/ade7854-i2c.c
@@ -20,7 +20,7 @@ static int ade7854_i2c_write_reg_8(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = (reg_address >> 8) & 0xFF;
@@ -39,7 +39,7 @@ static int ade7854_i2c_write_reg_16(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = (reg_address >> 8) & 0xFF;
@@ -59,7 +59,7 @@ static int ade7854_i2c_write_reg_24(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = (reg_address >> 8) & 0xFF;
@@ -80,7 +80,7 @@ static int ade7854_i2c_write_reg_32(struct device *dev,
{
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
mutex_lock(&st->buf_lock);
st->tx[0] = (reg_address >> 8) & 0xFF;
@@ -101,7 +101,7 @@ static int ade7854_i2c_read_reg_8(struct device *dev,
u8 *val)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
mutex_lock(&st->buf_lock);
@@ -127,7 +127,7 @@ static int ade7854_i2c_read_reg_16(struct device *dev,
u16 *val)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
mutex_lock(&st->buf_lock);
@@ -153,7 +153,7 @@ static int ade7854_i2c_read_reg_24(struct device *dev,
u32 *val)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
mutex_lock(&st->buf_lock);
@@ -179,7 +179,7 @@ static int ade7854_i2c_read_reg_32(struct device *dev,
u32 *val)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
mutex_lock(&st->buf_lock);
@@ -204,13 +204,14 @@ static int __devinit ade7854_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret;
- struct ade7854_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
- return ret;
- }
-
- i2c_set_clientdata(client, st);
+ struct ade7854_state *st;
+ struct iio_dev *indio_dev;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
st->read_reg_8 = ade7854_i2c_read_reg_8;
st->read_reg_16 = ade7854_i2c_read_reg_16;
st->read_reg_24 = ade7854_i2c_read_reg_24;
@@ -222,11 +223,9 @@ static int __devinit ade7854_i2c_probe(struct i2c_client *client,
st->i2c = client;
st->irq = client->irq;
- ret = ade7854_probe(st, &client->dev);
- if (ret) {
- kfree(st);
- return ret;
- }
+ ret = ade7854_probe(indio_dev, &client->dev);
+ if (ret)
+ iio_free_device(indio_dev);
return ret;
}
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index 84da8fbde02..e0d10865590 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -21,7 +21,7 @@ static int ade7854_spi_write_reg_8(struct device *dev,
int ret;
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
struct spi_transfer xfer = {
.tx_buf = st->tx,
.bits_per_word = 8,
@@ -49,7 +49,7 @@ static int ade7854_spi_write_reg_16(struct device *dev,
int ret;
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
struct spi_transfer xfer = {
.tx_buf = st->tx,
.bits_per_word = 8,
@@ -78,7 +78,7 @@ static int ade7854_spi_write_reg_24(struct device *dev,
int ret;
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
struct spi_transfer xfer = {
.tx_buf = st->tx,
.bits_per_word = 8,
@@ -108,7 +108,7 @@ static int ade7854_spi_write_reg_32(struct device *dev,
int ret;
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
struct spi_transfer xfer = {
.tx_buf = st->tx,
.bits_per_word = 8,
@@ -138,7 +138,7 @@ static int ade7854_spi_read_reg_8(struct device *dev,
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -180,7 +180,7 @@ static int ade7854_spi_read_reg_16(struct device *dev,
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -221,7 +221,7 @@ static int ade7854_spi_read_reg_24(struct device *dev,
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -263,7 +263,7 @@ static int ade7854_spi_read_reg_32(struct device *dev,
{
struct spi_message msg;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
struct spi_transfer xfers[] = {
{
@@ -302,13 +302,14 @@ error_ret:
static int __devinit ade7854_spi_probe(struct spi_device *spi)
{
int ret;
- struct ade7854_state *st = kzalloc(sizeof *st, GFP_KERNEL);
- if (!st) {
- ret = -ENOMEM;
- return ret;
- }
-
- spi_set_drvdata(spi, st);
+ struct ade7854_state *st;
+ struct iio_dev *indio_dev;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
st->read_reg_8 = ade7854_spi_read_reg_8;
st->read_reg_16 = ade7854_spi_read_reg_16;
st->read_reg_24 = ade7854_spi_read_reg_24;
@@ -320,11 +321,10 @@ static int __devinit ade7854_spi_probe(struct spi_device *spi)
st->irq = spi->irq;
st->spi = spi;
- ret = ade7854_probe(st, &spi->dev);
- if (ret) {
- kfree(st);
- return ret;
- }
+
+ ret = ade7854_probe(indio_dev, &spi->dev);
+ if (ret)
+ iio_free_device(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 44cd3ec546a..b82659f43bc 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -29,7 +29,7 @@ static ssize_t ade7854_read_8bit(struct device *dev,
int ret;
u8 val = 0;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
ret = st->read_reg_8(dev, this_attr->address, &val);
@@ -46,7 +46,7 @@ static ssize_t ade7854_read_16bit(struct device *dev,
int ret;
u16 val = 0;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
ret = st->read_reg_16(dev, this_attr->address, &val);
@@ -63,7 +63,7 @@ static ssize_t ade7854_read_24bit(struct device *dev,
int ret;
u32 val;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
ret = st->read_reg_24(dev, this_attr->address, &val);
@@ -81,7 +81,7 @@ static ssize_t ade7854_read_32bit(struct device *dev,
u32 val = 0;
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
ret = st->read_reg_32(dev, this_attr->address, &val);
if (ret)
@@ -97,7 +97,7 @@ static ssize_t ade7854_write_8bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
long val;
@@ -118,7 +118,7 @@ static ssize_t ade7854_write_16bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
long val;
@@ -139,7 +139,7 @@ static ssize_t ade7854_write_24bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
long val;
@@ -160,7 +160,7 @@ static ssize_t ade7854_write_32bit(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
long val;
@@ -177,7 +177,7 @@ error_ret:
static int ade7854_reset(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
u16 val;
st->read_reg_16(dev, ADE7854_CONFIG, &val);
@@ -426,7 +426,7 @@ static IIO_DEV_ATTR_CVAHR(ade7854_read_32bit,
static int ade7854_set_irq(struct device *dev, bool enable)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ade7854_state *st = iio_dev_get_devdata(indio_dev);
+ struct ade7854_state *st = iio_priv(indio_dev);
int ret;
u32 irqen;
@@ -449,10 +449,10 @@ error_ret:
return ret;
}
-static int ade7854_initial_setup(struct ade7854_state *st)
+static int ade7854_initial_setup(struct iio_dev *indio_dev)
{
int ret;
- struct device *dev = &st->indio_dev->dev;
+ struct device *dev = &indio_dev->dev;
/* Disable IRQ */
ret = ade7854_set_irq(dev, false);
@@ -556,68 +556,40 @@ static const struct iio_info ade7854_info = {
.driver_module = THIS_MODULE,
};
-int ade7854_probe(struct ade7854_state *st, struct device *dev)
+int ade7854_probe(struct iio_dev *indio_dev, struct device *dev)
{
int ret;
-
- /* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADE7854_MAX_RX, GFP_KERNEL);
- if (st->rx == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->tx = kzalloc(sizeof(*st->tx)*ADE7854_MAX_TX, GFP_KERNEL);
- if (st->tx == NULL) {
- ret = -ENOMEM;
- goto error_free_rx;
- }
- mutex_init(&st->buf_lock);
+ struct ade7854_state *st = iio_priv(indio_dev);
/* setup the industrialio driver allocated elements */
- st->indio_dev = iio_allocate_device(0);
- if (st->indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_free_tx;
- }
+ mutex_init(&st->buf_lock);
- st->indio_dev->dev.parent = dev;
- st->indio_dev->info = &ade7854_info;
- st->indio_dev->dev_data = (void *)(st);
- st->indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &ade7854_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->indio_dev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
/* Get the device into a sane initial state */
- ret = ade7854_initial_setup(st);
+ ret = ade7854_initial_setup(indio_dev);
if (ret)
goto error_unreg_dev;
return 0;
error_unreg_dev:
- iio_device_unregister(st->indio_dev);
+ iio_device_unregister(indio_dev);
error_free_dev:
- iio_free_device(st->indio_dev);
-error_free_tx:
- kfree(st->tx);
-error_free_rx:
- kfree(st->rx);
-error_free_st:
- kfree(st);
-
+ iio_free_device(indio_dev);
+error_ret:
return ret;
}
EXPORT_SYMBOL(ade7854_probe);
-int ade7854_remove(struct ade7854_state *st)
+int ade7854_remove(struct iio_dev *indio_dev)
{
- struct iio_dev *indio_dev = st->indio_dev;
-
iio_device_unregister(indio_dev);
- kfree(st->tx);
- kfree(st->rx);
- kfree(st);
return 0;
}
diff --git a/drivers/staging/iio/meter/ade7854.h b/drivers/staging/iio/meter/ade7854.h
index 79a21109f4e..2c96e8695d5 100644
--- a/drivers/staging/iio/meter/ade7854.h
+++ b/drivers/staging/iio/meter/ade7854.h
@@ -148,29 +148,29 @@
* struct ade7854_state - device instance specific data
* @spi: actual spi_device
* @indio_dev: industrial I/O device structure
+ * @buf_lock: mutex to protect tx and rx
* @tx: transmit buffer
* @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
**/
struct ade7854_state {
- struct spi_device *spi;
- struct i2c_client *i2c;
- struct iio_dev *indio_dev;
- u8 *tx;
- u8 *rx;
- int (*read_reg_8) (struct device *, u16, u8 *);
- int (*read_reg_16) (struct device *, u16, u16 *);
- int (*read_reg_24) (struct device *, u16, u32 *);
- int (*read_reg_32) (struct device *, u16, u32 *);
- int (*write_reg_8) (struct device *, u16, u8);
- int (*write_reg_16) (struct device *, u16, u16);
- int (*write_reg_24) (struct device *, u16, u32);
- int (*write_reg_32) (struct device *, u16, u32);
- int irq;
- struct mutex buf_lock;
+ struct spi_device *spi;
+ struct i2c_client *i2c;
+ int (*read_reg_8) (struct device *, u16, u8 *);
+ int (*read_reg_16) (struct device *, u16, u16 *);
+ int (*read_reg_24) (struct device *, u16, u32 *);
+ int (*read_reg_32) (struct device *, u16, u32 *);
+ int (*write_reg_8) (struct device *, u16, u8);
+ int (*write_reg_16) (struct device *, u16, u16);
+ int (*write_reg_24) (struct device *, u16, u32);
+ int (*write_reg_32) (struct device *, u16, u32);
+ int irq;
+ struct mutex buf_lock;
+ u8 tx[ADE7854_MAX_TX] ____cacheline_aligned;
+ u8 rx[ADE7854_MAX_RX];
+
};
-extern int ade7854_probe(struct ade7854_state *st, struct device *dev);
-extern int ade7854_remove(struct ade7854_state *st);
+extern int ade7854_probe(struct iio_dev *indio_dev, struct device *dev);
+extern int ade7854_remove(struct iio_dev *indio_dev);
#endif
diff --git a/drivers/staging/iio/resolver/Kconfig b/drivers/staging/iio/resolver/Kconfig
index a4a36342935..6ecd79e3003 100644
--- a/drivers/staging/iio/resolver/Kconfig
+++ b/drivers/staging/iio/resolver/Kconfig
@@ -25,30 +25,3 @@ config AD2S1210
Say yes here to build support for Analog Devices spi resolver
to digital converters, ad2s1210, provides direct access via sysfs.
-choice
- prompt "Resolution Control"
- depends on AD2S1210
- default AD2S1210_GPIO_NONE
- help
- In normal mode, the resolution of the digital output is selected
- using the RES0 and RES1 input pins. In configuration mode, the
- resolution is selected by setting the RES0 and RES1 bits in the
- control regsiter. When switching between normal mode and configuration
- mode, there are some schemes to keep them matchs.
-
-config AD2S1210_GPIO_INPUT
- bool "read resolution from gpio pins"
- help
- GPIO pins are sampling RES0 and RES1 pins, read the resolution
- settings from the GPIO pins.
-
-config AD2S1210_GPIO_OUTPUT
- bool "set gpio pins to set resolution"
- help
- RES0 and RES1 pins are controlled by GPIOs, setting GPIO pins to
- set the resolution.
-
-config AD2S1210_GPIO_NONE
- bool "take the responsibility by user"
-
-endchoice
diff --git a/drivers/staging/iio/resolver/ad2s120x.c b/drivers/staging/iio/resolver/ad2s120x.c
index f83e1422fd2..bed4c725f2d 100644
--- a/drivers/staging/iio/resolver/ad2s120x.c
+++ b/drivers/staging/iio/resolver/ad2s120x.c
@@ -32,161 +32,46 @@
struct ad2s120x_state {
struct mutex lock;
- struct iio_dev *idev;
struct spi_device *sdev;
- unsigned short sample;
- unsigned short rdvel;
- u8 rx[2];
- u8 tx[2];
+ int sample;
+ int rdvel;
+ u8 rx[2] ____cacheline_aligned;
};
-static ssize_t ad2s120x_show_pos_vel(struct device *dev,
+static ssize_t ad2s120x_show_val(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct spi_message msg;
- struct spi_transfer xfer;
int ret = 0;
ssize_t len = 0;
u16 pos;
s16 vel;
u8 status;
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s120x_state *st = idev->dev_data;
+ struct ad2s120x_state *st = iio_priv(dev_get_drvdata(dev));
+ struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
- xfer.len = 1;
- xfer.tx_buf = st->tx;
- xfer.rx_buf = st->rx;
mutex_lock(&st->lock);
gpio_set_value(st->sample, 0);
/* delay (6 * AD2S120X_TSCLK + 20) nano seconds */
udelay(1);
gpio_set_value(st->sample, 1);
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
- if (ret)
- goto error_ret;
- status = st->rx[1];
- pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
- len = sprintf(buf, "%d %c%c%c%c ", pos,
- (status & 0x8) ? 'P' : 'V',
- (status & 0x4) ? 'd' : '_',
- (status & 0x2) ? 'l' : '_',
- (status & 0x1) ? '1' : '0');
-
- /* delay 18 ns */
- /* ndelay(18); */
-
- gpio_set_value(st->rdvel, 0);
- /* ndelay(5);*/
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
- if (ret)
+ gpio_set_value(st->rdvel, iattr->address);
+ ret = spi_read(st->sdev, st->rx, 2);
+ if (ret < 0)
goto error_ret;
status = st->rx[1];
- vel = (st->rx[0] & 0x80) ? 0xf000 : 0;
- vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
- len += sprintf(buf + len, "%d %c%c%c%c\n", vel,
- (status & 0x8) ? 'P' : 'V',
- (status & 0x4) ? 'd' : '_',
- (status & 0x2) ? 'l' : '_',
- (status & 0x1) ? '1' : '0');
-error_ret:
- gpio_set_value(st->rdvel, 1);
- /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */
- udelay(1);
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-static ssize_t ad2s120x_show_pos(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct spi_message msg;
- struct spi_transfer xfer;
- int ret = 0;
- ssize_t len = 0;
- u16 pos;
- u8 status;
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s120x_state *st = idev->dev_data;
-
- xfer.len = 1;
- xfer.tx_buf = st->tx;
- xfer.rx_buf = st->rx;
- mutex_lock(&st->lock);
-
- gpio_set_value(st->sample, 0);
- /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */
- udelay(1);
- gpio_set_value(st->sample, 1);
- gpio_set_value(st->rdvel, 1);
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
- if (ret)
- goto error_ret;
- status = st->rx[1];
- pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
- len = sprintf(buf, "%d %c%c%c%c ", pos,
- (status & 0x8) ? 'P' : 'V',
- (status & 0x4) ? 'd' : '_',
- (status & 0x2) ? 'l' : '_',
- (status & 0x1) ? '1' : '0');
-error_ret:
- /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */
- udelay(1);
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-static ssize_t ad2s120x_show_vel(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct spi_message msg;
- struct spi_transfer xfer;
- int ret = 0;
- ssize_t len = 0;
- s16 vel;
- u8 status;
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s120x_state *st = idev->dev_data;
-
- xfer.len = 1;
- xfer.tx_buf = st->tx;
- xfer.rx_buf = st->rx;
- mutex_lock(&st->lock);
-
- gpio_set_value(st->sample, 0);
- /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */
- udelay(1);
- gpio_set_value(st->sample, 1);
-
- gpio_set_value(st->rdvel, 0);
- /* ndelay(5);*/
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
- if (ret)
- goto error_ret;
- status = st->rx[1];
- vel = (st->rx[0] & 0x80) ? 0xf000 : 0;
- vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
- len += sprintf(buf + len, "%d %c%c%c%c\n", vel,
+ if (iattr->address)
+ pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+ else {
+ vel = (st->rx[0] & 0x80) ? 0xf000 : 0;
+ vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
+ }
+ len = sprintf(buf, "%d %c%c%c%c ", iattr->address ? pos : vel,
(status & 0x8) ? 'P' : 'V',
(status & 0x4) ? 'd' : '_',
(status & 0x2) ? 'l' : '_',
(status & 0x1) ? '1' : '0');
error_ret:
- gpio_set_value(st->rdvel, 1);
/* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */
udelay(1);
mutex_unlock(&st->lock);
@@ -194,15 +79,10 @@ error_ret:
return ret ? ret : len;
}
-static IIO_CONST_ATTR(description,
- "12-Bit R/D Converter with Reference Oscillator");
-static IIO_DEVICE_ATTR(pos_vel, S_IRUGO, ad2s120x_show_pos_vel, NULL, 0);
-static IIO_DEVICE_ATTR(pos, S_IRUGO, ad2s120x_show_pos, NULL, 0);
-static IIO_DEVICE_ATTR(vel, S_IRUGO, ad2s120x_show_vel, NULL, 0);
+static IIO_DEVICE_ATTR(pos, S_IRUGO, ad2s120x_show_val, NULL, 1);
+static IIO_DEVICE_ATTR(vel, S_IRUGO, ad2s120x_show_val, NULL, 0);
static struct attribute *ad2s120x_attributes[] = {
- &iio_const_attr_description.dev_attr.attr,
- &iio_dev_attr_pos_vel.dev_attr.attr,
&iio_dev_attr_pos.dev_attr.attr,
&iio_dev_attr_vel.dev_attr.attr,
NULL,
@@ -220,42 +100,33 @@ static const struct iio_info ad2s120x_info = {
static int __devinit ad2s120x_probe(struct spi_device *spi)
{
struct ad2s120x_state *st;
+ struct iio_dev *indio_dev;
int pn, ret = 0;
unsigned short *pins = spi->dev.platform_data;
- for (pn = 0; pn < AD2S120X_PN; pn++) {
- if (gpio_request(pins[pn], DRV_NAME)) {
+ for (pn = 0; pn < AD2S120X_PN; pn++)
+ if (gpio_request_one(pins[pn], GPIOF_DIR_OUT, DRV_NAME)) {
pr_err("%s: request gpio pin %d failed\n",
DRV_NAME, pins[pn]);
goto error_ret;
}
- gpio_direction_output(pins[pn], 1);
- }
-
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- spi_set_drvdata(spi, st);
-
+ spi_set_drvdata(spi, indio_dev);
+ st = iio_priv(indio_dev);
mutex_init(&st->lock);
st->sdev = spi;
st->sample = pins[0];
st->rdvel = pins[1];
- st->idev = iio_allocate_device(0);
- if (st->idev == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->idev->dev.parent = &spi->dev;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ad2s120x_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- st->idev->info = &ad2s120x_info;
- st->idev->dev_data = (void *)(st);
- st->idev->modes = INDIO_DIRECT_MODE;
-
- ret = iio_device_register(st->idev);
+ ret = iio_device_register(indio_dev);
if (ret)
goto error_free_dev;
@@ -266,9 +137,7 @@ static int __devinit ad2s120x_probe(struct spi_device *spi)
return 0;
error_free_dev:
- iio_free_device(st->idev);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
for (--pn; pn >= 0; pn--)
gpio_free(pins[pn]);
@@ -277,10 +146,7 @@ error_ret:
static int __devexit ad2s120x_remove(struct spi_device *spi)
{
- struct ad2s120x_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->idev);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 09f4fcfda73..ecaf7bb790f 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -19,44 +19,41 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "ad2s1210.h"
#define DRV_NAME "ad2s1210"
-#define DEF_CONTROL 0x7E
-
-#define MSB_IS_HIGH 0x80
-#define MSB_IS_LOW 0x7F
-#define PHASE_LOCK_RANGE_44 0x20
-#define ENABLE_HYSTERESIS 0x10
-#define SET_ENRES1 0x08
-#define SET_ENRES0 0x04
-#define SET_RES1 0x02
-#define SET_RES0 0x01
-
-#define SET_ENRESOLUTION (SET_ENRES1 | SET_ENRES0)
-#define SET_RESOLUTION (SET_RES1 | SET_RES0)
-
-#define REG_POSITION 0x80
-#define REG_VELOCITY 0x82
-#define REG_LOS_THRD 0x88
-#define REG_DOS_OVR_THRD 0x89
-#define REG_DOS_MIS_THRD 0x8A
-#define REG_DOS_RST_MAX_THRD 0x8B
-#define REG_DOS_RST_MIN_THRD 0x8C
-#define REG_LOT_HIGH_THRD 0x8D
-#define REG_LOT_LOW_THRD 0x8E
-#define REG_EXCIT_FREQ 0x91
-#define REG_CONTROL 0x92
-#define REG_SOFT_RESET 0xF0
-#define REG_FAULT 0xFF
+#define AD2S1210_DEF_CONTROL 0x7E
+
+#define AD2S1210_MSB_IS_HIGH 0x80
+#define AD2S1210_MSB_IS_LOW 0x7F
+#define AD2S1210_PHASE_LOCK_RANGE_44 0x20
+#define AD2S1210_ENABLE_HYSTERESIS 0x10
+#define AD2S1210_SET_ENRES1 0x08
+#define AD2S1210_SET_ENRES0 0x04
+#define AD2S1210_SET_RES1 0x02
+#define AD2S1210_SET_RES0 0x01
+
+#define AD2S1210_SET_ENRESOLUTION (AD2S1210_SET_ENRES1 | \
+ AD2S1210_SET_ENRES0)
+#define AD2S1210_SET_RESOLUTION (AD2S1210_SET_RES1 | AD2S1210_SET_RES0)
+
+#define AD2S1210_REG_POSITION 0x80
+#define AD2S1210_REG_VELOCITY 0x82
+#define AD2S1210_REG_LOS_THRD 0x88
+#define AD2S1210_REG_DOS_OVR_THRD 0x89
+#define AD2S1210_REG_DOS_MIS_THRD 0x8A
+#define AD2S1210_REG_DOS_RST_MAX_THRD 0x8B
+#define AD2S1210_REG_DOS_RST_MIN_THRD 0x8C
+#define AD2S1210_REG_LOT_HIGH_THRD 0x8D
+#define AD2S1210_REG_LOT_LOW_THRD 0x8E
+#define AD2S1210_REG_EXCIT_FREQ 0x91
+#define AD2S1210_REG_CONTROL 0x92
+#define AD2S1210_REG_SOFT_RESET 0xF0
+#define AD2S1210_REG_FAULT 0xFF
/* pin SAMPLE, A0, A1, RES0, RES1, is controlled by driver */
#define AD2S1210_SAA 3
-#if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT)
-# define AD2S1210_RES 2
-#else
-# define AD2S1210_RES 0
-#endif
#define AD2S1210_PN (AD2S1210_SAA + AD2S1210_RES)
#define AD2S1210_MIN_CLKIN 6144000
@@ -75,190 +72,153 @@
enum ad2s1210_mode {
MOD_POS = 0,
MOD_VEL,
- MOD_RESERVED,
MOD_CONFIG,
+ MOD_RESERVED,
};
-enum ad2s1210_res {
- RES_10 = 10,
- RES_12 = 12,
- RES_14 = 14,
- RES_16 = 16,
-};
-
-static unsigned int resolution_value[] = {
- RES_10, RES_12, RES_14, RES_16};
+static const unsigned int ad2s1210_resolution_value[] = { 10, 12, 14, 16 };
struct ad2s1210_state {
+ const struct ad2s1210_platform_data *pdata;
struct mutex lock;
- struct iio_dev *idev;
struct spi_device *sdev;
- struct spi_transfer xfer;
- unsigned int hysteresis;
- unsigned int old_data;
- enum ad2s1210_mode mode;
- enum ad2s1210_res resolution;
unsigned int fclkin;
unsigned int fexcit;
- unsigned short sample;
- unsigned short a0;
- unsigned short a1;
- unsigned short res0;
- unsigned short res1;
- u8 rx[3];
- u8 tx[3];
+ bool hysteresis;
+ bool old_data;
+ u8 resolution;
+ enum ad2s1210_mode mode;
+ u8 rx[2] ____cacheline_aligned;
+ u8 tx[2] ____cacheline_aligned;
};
-static inline void start_sample(struct ad2s1210_state *st)
-{
- gpio_set_value(st->sample, 0);
-}
-
-static inline void stop_sample(struct ad2s1210_state *st)
-{
- gpio_set_value(st->sample, 1);
-}
-
-static inline void set_mode(enum ad2s1210_mode mode, struct ad2s1210_state *st)
+static const int ad2s1210_mode_vals[4][2] = {
+ [MOD_POS] = { 0, 0 },
+ [MOD_VEL] = { 0, 1 },
+ [MOD_CONFIG] = { 1, 0 },
+};
+static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
+ struct ad2s1210_state *st)
{
- switch (mode) {
- case MOD_POS:
- gpio_set_value(st->a0, 0);
- gpio_set_value(st->a1, 0);
- break;
- case MOD_VEL:
- gpio_set_value(st->a0, 0);
- gpio_set_value(st->a1, 1);
- break;
- case MOD_CONFIG:
- gpio_set_value(st->a0, 1);
- gpio_set_value(st->a1, 1);
- break;
- default:
- /* set to reserved mode */
- gpio_set_value(st->a0, 1);
- gpio_set_value(st->a1, 0);
- }
+ gpio_set_value(st->pdata->a[0], ad2s1210_mode_vals[mode][0]);
+ gpio_set_value(st->pdata->a[1], ad2s1210_mode_vals[mode][1]);
st->mode = mode;
}
/* write 1 bytes (address or data) to the chip */
-static int config_write(struct ad2s1210_state *st,
- unsigned char data)
+static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data)
{
- struct spi_message msg;
- int ret = 0;
-
- st->xfer.len = 1;
- set_mode(MOD_CONFIG, st);
+ int ret;
- spi_message_init(&msg);
- spi_message_add_tail(&st->xfer, &msg);
+ ad2s1210_set_mode(MOD_CONFIG, st);
st->tx[0] = data;
- ret = spi_sync(st->sdev, &msg);
- if (ret)
+ ret = spi_write(st->sdev, st->tx, 1);
+ if (ret < 0)
return ret;
- st->old_data = 1;
- return ret;
+ st->old_data = true;
+
+ return 0;
}
/* read value from one of the registers */
-static int config_read(struct ad2s1210_state *st,
- unsigned char address,
- unsigned char *data)
-{
+static int ad2s1210_config_read(struct ad2s1210_state *st,
+ unsigned char address)
+{
+ struct spi_transfer xfer = {
+ .len = 2,
+ .rx_buf = st->rx,
+ .tx_buf = st->tx,
+ };
struct spi_message msg;
int ret = 0;
- st->xfer.len = 2;
- set_mode(MOD_CONFIG, st);
-
+ ad2s1210_set_mode(MOD_CONFIG, st);
spi_message_init(&msg);
- spi_message_add_tail(&st->xfer, &msg);
- st->tx[0] = address | MSB_IS_HIGH;
- st->tx[1] = REG_FAULT;
+ spi_message_add_tail(&xfer, &msg);
+ st->tx[0] = address | AD2S1210_MSB_IS_HIGH;
+ st->tx[1] = AD2S1210_REG_FAULT;
ret = spi_sync(st->sdev, &msg);
- if (ret)
+ if (ret < 0)
return ret;
- *data = st->rx[1];
- st->old_data = 1;
- return ret;
+ st->old_data = true;
+
+ return st->rx[1];
}
-static inline void update_frequency_control_word(struct ad2s1210_state *st)
+static inline
+int ad2s1210_update_frequency_control_word(struct ad2s1210_state *st)
{
+ int ret;
unsigned char fcw;
+
fcw = (unsigned char)(st->fexcit * (1 << 15) / st->fclkin);
- if (fcw >= AD2S1210_MIN_FCW && fcw <= AD2S1210_MAX_FCW) {
- config_write(st, REG_EXCIT_FREQ);
- config_write(st, fcw);
- } else
+ if (fcw < AD2S1210_MIN_FCW || fcw > AD2S1210_MAX_FCW) {
pr_err("ad2s1210: FCW out of range\n");
+ return -ERANGE;
+ }
+
+ ret = ad2s1210_config_write(st, AD2S1210_REG_EXCIT_FREQ);
+ if (ret < 0)
+ return ret;
+
+ return ad2s1210_config_write(st, fcw);
}
-#if defined(CONFIG_AD2S1210_GPIO_INPUT)
-static inline unsigned char read_resolution_pin(struct ad2s1210_state *st)
+static unsigned char ad2s1210_read_resolution_pin(struct ad2s1210_state *st)
{
- unsigned int data;
- data = (gpio_get_value(st->res0) << 1) |
- gpio_get_value(st->res1);
- return resolution_value[data];
+ return ad2s1210_resolution_value[
+ (gpio_get_value(st->pdata->res[0]) << 1) |
+ gpio_get_value(st->pdata->res[1])];
}
-#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
-static inline void set_resolution_pin(struct ad2s1210_state *st)
+
+static const int ad2s1210_res_pins[4][2] = {
+ { 0, 0 }, {0, 1}, {1, 0}, {1, 1}
+};
+
+static inline void ad2s1210_set_resolution_pin(struct ad2s1210_state *st)
{
- switch (st->resolution) {
- case RES_10:
- gpio_set_value(st->res0, 0);
- gpio_set_value(st->res1, 0);
- break;
- case RES_12:
- gpio_set_value(st->res0, 0);
- gpio_set_value(st->res1, 1);
- break;
- case RES_14:
- gpio_set_value(st->res0, 1);
- gpio_set_value(st->res1, 0);
- break;
- case RES_16:
- gpio_set_value(st->res0, 1);
- gpio_set_value(st->res1, 1);
- break;
- }
+ gpio_set_value(st->pdata->res[0],
+ ad2s1210_res_pins[(st->resolution - 10)/2][0]);
+ gpio_set_value(st->pdata->res[1],
+ ad2s1210_res_pins[(st->resolution - 10)/2][1]);
}
-#endif
-static inline void soft_reset(struct ad2s1210_state *st)
+static inline int ad2s1210_soft_reset(struct ad2s1210_state *st)
{
- config_write(st, REG_SOFT_RESET);
- config_write(st, 0x0);
+ int ret;
+
+ ret = ad2s1210_config_write(st, AD2S1210_REG_SOFT_RESET);
+ if (ret < 0)
+ return ret;
+
+ return ad2s1210_config_write(st, 0x0);
}
/* return the OLD DATA since last spi bus write */
static ssize_t ad2s1210_show_raw(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
- int ret;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
+ int ret = 0;
mutex_lock(&st->lock);
if (st->old_data) {
ret = sprintf(buf, "0x%x\n", st->rx[0]);
- st->old_data = 0;
- } else
- ret = 0;
+ st->old_data = false;
+ }
mutex_unlock(&st->lock);
+
return ret;
}
static ssize_t ad2s1210_store_raw(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
unsigned long udata;
unsigned char data;
int ret;
@@ -266,139 +226,157 @@ static ssize_t ad2s1210_store_raw(struct device *dev,
ret = strict_strtoul(buf, 16, &udata);
if (ret)
return -EINVAL;
+
data = udata & 0xff;
mutex_lock(&st->lock);
- config_write(st, data);
+ ret = ad2s1210_config_write(st, data);
mutex_unlock(&st->lock);
- return 1;
+
+ return ret < 0 ? ret : len;
}
static ssize_t ad2s1210_store_softreset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
+ int ret;
+
mutex_lock(&st->lock);
- soft_reset(st);
+ ret = ad2s1210_soft_reset(st);
mutex_unlock(&st->lock);
- return len;
+
+ return ret < 0 ? ret : len;
}
static ssize_t ad2s1210_show_fclkin(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
return sprintf(buf, "%d\n", st->fclkin);
}
static ssize_t ad2s1210_store_fclkin(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
unsigned long fclkin;
int ret;
ret = strict_strtoul(buf, 10, &fclkin);
- if (!ret && fclkin >= AD2S1210_MIN_CLKIN &&
- fclkin <= AD2S1210_MAX_CLKIN) {
- mutex_lock(&st->lock);
- st->fclkin = fclkin;
- } else {
+ if (ret)
+ return ret;
+ if (fclkin < AD2S1210_MIN_CLKIN || fclkin > AD2S1210_MAX_CLKIN) {
pr_err("ad2s1210: fclkin out of range\n");
return -EINVAL;
}
- update_frequency_control_word(st);
- soft_reset(st);
+
+ mutex_lock(&st->lock);
+ st->fclkin = fclkin;
+
+ ret = ad2s1210_update_frequency_control_word(st);
+ if (ret < 0)
+ goto error_ret;
+ ret = ad2s1210_soft_reset(st);
+error_ret:
mutex_unlock(&st->lock);
- return len;
+
+ return ret < 0 ? ret : len;
}
static ssize_t ad2s1210_show_fexcit(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
return sprintf(buf, "%d\n", st->fexcit);
}
static ssize_t ad2s1210_store_fexcit(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
unsigned long fexcit;
int ret;
ret = strict_strtoul(buf, 10, &fexcit);
- if (!ret && fexcit >= AD2S1210_MIN_EXCIT &&
- fexcit <= AD2S1210_MAX_EXCIT) {
- mutex_lock(&st->lock);
- st->fexcit = fexcit;
- } else {
+ if (ret < 0)
+ return ret;
+ if (fexcit < AD2S1210_MIN_EXCIT || fexcit > AD2S1210_MAX_EXCIT) {
pr_err("ad2s1210: excitation frequency out of range\n");
return -EINVAL;
}
- update_frequency_control_word(st);
- soft_reset(st);
+ mutex_lock(&st->lock);
+ st->fexcit = fexcit;
+ ret = ad2s1210_update_frequency_control_word(st);
+ if (ret < 0)
+ goto error_ret;
+ ret = ad2s1210_soft_reset(st);
+error_ret:
mutex_unlock(&st->lock);
- return len;
+
+ return ret < 0 ? ret : len;
}
static ssize_t ad2s1210_show_control(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
- unsigned char data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
+ int ret;
mutex_lock(&st->lock);
- config_read(st, REG_CONTROL, &data);
+ ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
mutex_unlock(&st->lock);
- return sprintf(buf, "0x%x\n", data);
+ return ret < 0 ? ret : sprintf(buf, "0x%x\n", ret);
}
static ssize_t ad2s1210_store_control(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
unsigned long udata;
unsigned char data;
int ret;
ret = strict_strtoul(buf, 16, &udata);
- if (ret) {
- ret = -EINVAL;
- goto error_ret;
- }
+ if (ret)
+ return -EINVAL;
+
mutex_lock(&st->lock);
- config_write(st, REG_CONTROL);
- data = udata & MSB_IS_LOW;
- config_write(st, data);
- config_read(st, REG_CONTROL, &data);
- if (data & MSB_IS_HIGH) {
+ ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL);
+ if (ret < 0)
+ goto error_ret;
+ data = udata & AD2S1210_MSB_IS_LOW;
+ ret = ad2s1210_config_write(st, data);
+ if (ret < 0)
+ goto error_ret;
+
+ ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
+ if (ret < 0)
+ goto error_ret;
+ if (ret & AD2S1210_MSB_IS_HIGH) {
ret = -EIO;
pr_err("ad2s1210: write control register fail\n");
goto error_ret;
}
- st->resolution = resolution_value[data & SET_RESOLUTION];
-#if defined(CONFIG_AD2S1210_GPIO_INPUT)
- data = read_resolution_pin(st);
- if (data != st->resolution)
- pr_warning("ad2s1210: resolution settings not match\n");
-#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
- set_resolution_pin(st);
-#endif
+ st->resolution
+ = ad2s1210_resolution_value[data & AD2S1210_SET_RESOLUTION];
+ if (st->pdata->gpioin) {
+ data = ad2s1210_read_resolution_pin(st);
+ if (data != st->resolution)
+ pr_warning("ad2s1210: resolution settings not match\n");
+ } else
+ ad2s1210_set_resolution_pin(st);
+
ret = len;
- if (data & ENABLE_HYSTERESIS)
- st->hysteresis = 1;
- else
- st->hysteresis = 0;
+ st->hysteresis = !!(data & AD2S1210_ENABLE_HYSTERESIS);
+
error_ret:
mutex_unlock(&st->lock);
return ret;
@@ -407,8 +385,7 @@ error_ret:
static ssize_t ad2s1210_show_resolution(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
return sprintf(buf, "%d\n", st->resolution);
}
@@ -416,103 +393,109 @@ static ssize_t ad2s1210_store_resolution(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
unsigned char data;
unsigned long udata;
int ret;
ret = strict_strtoul(buf, 10, &udata);
- if (ret || udata < RES_10 || udata > RES_16) {
+ if (ret || udata < 10 || udata > 16) {
pr_err("ad2s1210: resolution out of range\n");
return -EINVAL;
}
mutex_lock(&st->lock);
- config_read(st, REG_CONTROL, &data);
- data &= ~SET_RESOLUTION;
- data |= (udata - RES_10) >> 1;
- config_write(st, REG_CONTROL);
- config_write(st, data & MSB_IS_LOW);
- config_read(st, REG_CONTROL, &data);
- if (data & MSB_IS_HIGH) {
+ ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
+ if (ret < 0)
+ goto error_ret;
+ data = ret;
+ data &= ~AD2S1210_SET_RESOLUTION;
+ data |= (udata - 10) >> 1;
+ ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL);
+ if (ret < 0)
+ goto error_ret;
+ ret = ad2s1210_config_write(st, data & AD2S1210_MSB_IS_LOW);
+ if (ret < 0)
+ goto error_ret;
+ ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
+ if (ret < 0)
+ goto error_ret;
+ data = ret;
+ if (data & AD2S1210_MSB_IS_HIGH) {
ret = -EIO;
pr_err("ad2s1210: setting resolution fail\n");
goto error_ret;
}
- st->resolution = resolution_value[data & SET_RESOLUTION];
-#if defined(CONFIG_AD2S1210_GPIO_INPUT)
- data = read_resolution_pin(st);
- if (data != st->resolution)
- pr_warning("ad2s1210: resolution settings not match\n");
-#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
- set_resolution_pin(st);
-#endif
+ st->resolution
+ = ad2s1210_resolution_value[data & AD2S1210_SET_RESOLUTION];
+ if (st->pdata->gpioin) {
+ data = ad2s1210_read_resolution_pin(st);
+ if (data != st->resolution)
+ pr_warning("ad2s1210: resolution settings not match\n");
+ } else
+ ad2s1210_set_resolution_pin(st);
ret = len;
error_ret:
mutex_unlock(&st->lock);
return ret;
}
+
/* read the fault register since last sample */
static ssize_t ad2s1210_show_fault(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int ret = 0;
- ssize_t len = 0;
- unsigned char data;
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
+ int ret;
mutex_lock(&st->lock);
- ret = config_read(st, REG_FAULT, &data);
-
- if (ret)
- goto error_ret;
- len = sprintf(buf, "0x%x\n", data);
-error_ret:
+ ret = ad2s1210_config_read(st, AD2S1210_REG_FAULT);
mutex_unlock(&st->lock);
- return ret ? ret : len;
+
+ return ret ? ret : sprintf(buf, "0x%x\n", ret);
}
static ssize_t ad2s1210_clear_fault(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
- unsigned char data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
+ int ret;
mutex_lock(&st->lock);
- start_sample(st);
+ gpio_set_value(st->pdata->sample, 0);
/* delay (2 * tck + 20) nano seconds */
udelay(1);
- stop_sample(st);
- config_read(st, REG_FAULT, &data);
- start_sample(st);
- stop_sample(st);
+ gpio_set_value(st->pdata->sample, 1);
+ ret = ad2s1210_config_read(st, AD2S1210_REG_FAULT);
+ if (ret < 0)
+ goto error_ret;
+ gpio_set_value(st->pdata->sample, 0);
+ gpio_set_value(st->pdata->sample, 1);
+error_ret:
mutex_unlock(&st->lock);
- return 0;
+ return ret < 0 ? ret : len;
}
static ssize_t ad2s1210_show_reg(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
- unsigned char data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
+ int ret;
mutex_lock(&st->lock);
- config_read(st, iattr->address, &data);
+ ret = ad2s1210_config_read(st, iattr->address);
mutex_unlock(&st->lock);
- return sprintf(buf, "%d\n", data);
+
+ return ret < 0 ? ret : sprintf(buf, "%d\n", ret);
}
static ssize_t ad2s1210_store_reg(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
unsigned long data;
int ret;
struct iio_dev_attr *iattr = to_iio_dev_attr(attr);
@@ -521,183 +504,121 @@ static ssize_t ad2s1210_store_reg(struct device *dev,
if (ret)
return -EINVAL;
mutex_lock(&st->lock);
- config_write(st, iattr->address);
- config_write(st, data & MSB_IS_LOW);
+ ret = ad2s1210_config_write(st, iattr->address);
+ if (ret < 0)
+ goto error_ret;
+ ret = ad2s1210_config_write(st, data & AD2S1210_MSB_IS_LOW);
+error_ret:
mutex_unlock(&st->lock);
- return len;
+ return ret < 0 ? ret : len;
}
static ssize_t ad2s1210_show_pos(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct spi_message msg;
int ret = 0;
ssize_t len = 0;
u16 pos;
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
- st->xfer.len = 2;
mutex_lock(&st->lock);
- start_sample(st);
+ gpio_set_value(st->pdata->sample, 0);
/* delay (6 * tck + 20) nano seconds */
udelay(1);
- set_mode(MOD_POS, st);
-
- spi_message_init(&msg);
- spi_message_add_tail(&st->xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ad2s1210_set_mode(MOD_POS, st);
+ ret = spi_read(st->sdev, st->rx, 2);
if (ret)
goto error_ret;
- pos = ((((u16)(st->rx[0])) << 8) | (st->rx[1]));
+ pos = be16_to_cpup((u16 *)st->rx);
if (st->hysteresis)
pos >>= 16 - st->resolution;
len = sprintf(buf, "%d\n", pos);
error_ret:
- stop_sample(st);
+ gpio_set_value(st->pdata->sample, 1);
/* delay (2 * tck + 20) nano seconds */
udelay(1);
mutex_unlock(&st->lock);
- return ret ? ret : len;
+ return ret < 0 ? ret : len;
}
static ssize_t ad2s1210_show_vel(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
- struct spi_message msg;
unsigned short negative;
int ret = 0;
ssize_t len = 0;
s16 vel;
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
+ struct ad2s1210_state *st = iio_priv(dev_get_drvdata(dev));
- st->xfer.len = 2;
mutex_lock(&st->lock);
- start_sample(st);
+ gpio_set_value(st->pdata->sample, 0);
/* delay (6 * tck + 20) nano seconds */
udelay(1);
- set_mode(MOD_VEL, st);
-
- spi_message_init(&msg);
- spi_message_add_tail(&st->xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ad2s1210_set_mode(MOD_VEL, st);
+ ret = spi_read(st->sdev, st->rx, 2);
if (ret)
goto error_ret;
negative = st->rx[0] & 0x80;
- vel = ((((s16)(st->rx[0])) << 8) | (st->rx[1]));
+ vel = be16_to_cpup((s16 *)st->rx);
vel >>= 16 - st->resolution;
- if (negative) {
+ if (vel & 0x8000) {
negative = (0xffff >> st->resolution) << st->resolution;
vel |= negative;
}
len = sprintf(buf, "%d\n", vel);
error_ret:
- stop_sample(st);
+ gpio_set_value(st->pdata->sample, 1);
/* delay (2 * tck + 20) nano seconds */
udelay(1);
mutex_unlock(&st->lock);
- return ret ? ret : len;
+ return ret < 0 ? ret : len;
}
-static ssize_t ad2s1210_show_pos_vel(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct spi_message msg;
- unsigned short negative;
- int ret = 0;
- ssize_t len = 0;
- u16 pos;
- s16 vel;
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s1210_state *st = idev->dev_data;
-
- st->xfer.len = 2;
- mutex_lock(&st->lock);
- start_sample(st);
- /* delay (6 * tck + 20) nano seconds */
- udelay(1);
-
- set_mode(MOD_POS, st);
-
- spi_message_init(&msg);
- spi_message_add_tail(&st->xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
- if (ret)
- goto error_ret;
- pos = ((((u16)(st->rx[0])) << 8) | (st->rx[1]));
- if (st->hysteresis)
- pos >>= 16 - st->resolution;
- len = sprintf(buf, "%d ", pos);
-
- st->xfer.len = 2;
- set_mode(MOD_VEL, st);
- spi_message_init(&msg);
- spi_message_add_tail(&st->xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
- if (ret)
- goto error_ret;
- negative = st->rx[0] & 0x80;
- vel = ((((s16)(st->rx[0])) << 8) | (st->rx[1]));
- vel >>= 16 - st->resolution;
- if (negative) {
- negative = (0xffff >> st->resolution) << st->resolution;
- vel |= negative;
- }
- len += sprintf(buf + len, "%d\n", vel);
-error_ret:
- stop_sample(st);
- /* delay (2 * tck + 20) nano seconds */
- udelay(1);
- mutex_unlock(&st->lock);
-
- return ret ? ret : len;
-}
-
-static IIO_CONST_ATTR(description,
- "Variable Resolution, 10-Bit to 16Bit R/D\n\
-Converter with Reference Oscillator");
static IIO_DEVICE_ATTR(raw_io, S_IRUGO | S_IWUSR,
- ad2s1210_show_raw, ad2s1210_store_raw, 0);
+ ad2s1210_show_raw, ad2s1210_store_raw, 0);
static IIO_DEVICE_ATTR(reset, S_IWUSR,
- NULL, ad2s1210_store_softreset, 0);
+ NULL, ad2s1210_store_softreset, 0);
static IIO_DEVICE_ATTR(fclkin, S_IRUGO | S_IWUSR,
- ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0);
+ ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0);
static IIO_DEVICE_ATTR(fexcit, S_IRUGO | S_IWUSR,
- ad2s1210_show_fexcit, ad2s1210_store_fexcit, 0);
+ ad2s1210_show_fexcit, ad2s1210_store_fexcit, 0);
static IIO_DEVICE_ATTR(control, S_IRUGO | S_IWUSR,
- ad2s1210_show_control, ad2s1210_store_control, 0);
+ ad2s1210_show_control, ad2s1210_store_control, 0);
static IIO_DEVICE_ATTR(bits, S_IRUGO | S_IWUSR,
- ad2s1210_show_resolution, ad2s1210_store_resolution, 0);
+ ad2s1210_show_resolution, ad2s1210_store_resolution, 0);
static IIO_DEVICE_ATTR(fault, S_IRUGO | S_IWUSR,
- ad2s1210_show_fault, ad2s1210_clear_fault, 0);
-static IIO_DEVICE_ATTR(pos, S_IRUGO,
- ad2s1210_show_pos, NULL, 0);
-static IIO_DEVICE_ATTR(vel, S_IRUGO,
- ad2s1210_show_vel, NULL, 0);
-static IIO_DEVICE_ATTR(pos_vel, S_IRUGO,
- ad2s1210_show_pos_vel, NULL, 0);
+ ad2s1210_show_fault, ad2s1210_clear_fault, 0);
+static IIO_DEVICE_ATTR(pos, S_IRUGO, ad2s1210_show_pos, NULL, 0);
+static IIO_DEVICE_ATTR(vel, S_IRUGO, ad2s1210_show_vel, NULL, 0);
static IIO_DEVICE_ATTR(los_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg, REG_LOS_THRD);
+ ad2s1210_show_reg, ad2s1210_store_reg,
+ AD2S1210_REG_LOS_THRD);
static IIO_DEVICE_ATTR(dos_ovr_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_OVR_THRD);
+ ad2s1210_show_reg, ad2s1210_store_reg,
+ AD2S1210_REG_DOS_OVR_THRD);
static IIO_DEVICE_ATTR(dos_mis_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_MIS_THRD);
+ ad2s1210_show_reg, ad2s1210_store_reg,
+ AD2S1210_REG_DOS_MIS_THRD);
static IIO_DEVICE_ATTR(dos_rst_max_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_RST_MAX_THRD);
+ ad2s1210_show_reg, ad2s1210_store_reg,
+ AD2S1210_REG_DOS_RST_MAX_THRD);
static IIO_DEVICE_ATTR(dos_rst_min_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg, REG_DOS_RST_MIN_THRD);
+ ad2s1210_show_reg, ad2s1210_store_reg,
+ AD2S1210_REG_DOS_RST_MIN_THRD);
static IIO_DEVICE_ATTR(lot_high_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg, REG_LOT_HIGH_THRD);
+ ad2s1210_show_reg, ad2s1210_store_reg,
+ AD2S1210_REG_LOT_HIGH_THRD);
static IIO_DEVICE_ATTR(lot_low_thrd, S_IRUGO | S_IWUSR,
- ad2s1210_show_reg, ad2s1210_store_reg, REG_LOT_LOW_THRD);
+ ad2s1210_show_reg, ad2s1210_store_reg,
+ AD2S1210_REG_LOT_LOW_THRD);
static struct attribute *ad2s1210_attributes[] = {
- &iio_const_attr_description.dev_attr.attr,
&iio_dev_attr_raw_io.dev_attr.attr,
&iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_fclkin.dev_attr.attr,
@@ -707,7 +628,6 @@ static struct attribute *ad2s1210_attributes[] = {
&iio_dev_attr_fault.dev_attr.attr,
&iio_dev_attr_pos.dev_attr.attr,
&iio_dev_attr_vel.dev_attr.attr,
- &iio_dev_attr_pos_vel.dev_attr.attr,
&iio_dev_attr_los_thrd.dev_attr.attr,
&iio_dev_attr_dos_ovr_thrd.dev_attr.attr,
&iio_dev_attr_dos_mis_thrd.dev_attr.attr,
@@ -729,27 +649,32 @@ static int __devinit ad2s1210_initial(struct ad2s1210_state *st)
int ret;
mutex_lock(&st->lock);
-#if defined(CONFIG_AD2S1210_GPIO_INPUT)
- st->resolution = read_resolution_pin(st);
-#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
- set_resolution_pin(st);
-#endif
-
- config_write(st, REG_CONTROL);
- data = DEF_CONTROL & ~(SET_RESOLUTION);
- data |= (st->resolution - RES_10) >> 1;
- config_write(st, data);
- ret = config_read(st, REG_CONTROL, &data);
- if (ret)
+ if (st->pdata->gpioin)
+ st->resolution = ad2s1210_read_resolution_pin(st);
+ else
+ ad2s1210_set_resolution_pin(st);
+
+ ret = ad2s1210_config_write(st, AD2S1210_REG_CONTROL);
+ if (ret < 0)
+ goto error_ret;
+ data = AD2S1210_DEF_CONTROL & ~(AD2S1210_SET_RESOLUTION);
+ data |= (st->resolution - 10) >> 1;
+ ret = ad2s1210_config_write(st, data);
+ if (ret < 0)
+ goto error_ret;
+ ret = ad2s1210_config_read(st, AD2S1210_REG_CONTROL);
+ if (ret < 0)
goto error_ret;
- if (data & MSB_IS_HIGH) {
+ if (ret & AD2S1210_MSB_IS_HIGH) {
ret = -EIO;
goto error_ret;
}
- update_frequency_control_word(st);
- soft_reset(st);
+ ret = ad2s1210_update_frequency_control_word(st);
+ if (ret < 0)
+ goto error_ret;
+ ret = ad2s1210_soft_reset(st);
error_ret:
mutex_unlock(&st->lock);
return ret;
@@ -760,90 +685,107 @@ static const struct iio_info ad2s1210_info = {
.driver_module = THIS_MODULE,
};
+static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
+{
+ int ret;
+ unsigned long flags = st->pdata->gpioin ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
+
+ ret = gpio_request_one(st->pdata->sample, GPIOF_DIR_IN, "sample");
+ if (ret < 0)
+ goto error_ret;
+ ret = gpio_request_one(st->pdata->a[0], flags, "a0");
+ if (ret < 0)
+ goto error_free_sample;
+ ret = gpio_request_one(st->pdata->a[1], flags, "a1");
+ if (ret < 0)
+ goto error_free_a0;
+ ret = gpio_request_one(st->pdata->res[1], flags, "res0");
+ if (ret < 0)
+ goto error_free_a1;
+ ret = gpio_request_one(st->pdata->res[1], flags, "res1");
+ if (ret < 0)
+ goto error_free_res0;
+
+ return 0;
+error_free_res0:
+ gpio_free(st->pdata->res[0]);
+error_free_a1:
+ gpio_free(st->pdata->a[1]);
+error_free_a0:
+ gpio_free(st->pdata->a[0]);
+error_free_sample:
+ gpio_free(st->pdata->sample);
+error_ret:
+ return ret;
+}
+
+static void ad2s1210_free_gpios(struct ad2s1210_state *st)
+{
+ gpio_free(st->pdata->res[1]);
+ gpio_free(st->pdata->res[0]);
+ gpio_free(st->pdata->a[1]);
+ gpio_free(st->pdata->a[0]);
+ gpio_free(st->pdata->sample);
+}
+
static int __devinit ad2s1210_probe(struct spi_device *spi)
{
+ struct iio_dev *indio_dev;
struct ad2s1210_state *st;
- int pn, ret = 0;
- unsigned short *pins = spi->dev.platform_data;
-
- for (pn = 0; pn < AD2S1210_PN; pn++) {
- if (gpio_request(pins[pn], DRV_NAME)) {
- pr_err("%s: request gpio pin %d failed\n",
- DRV_NAME, pins[pn]);
- goto error_ret;
- }
- if (pn < AD2S1210_SAA)
- gpio_direction_output(pins[pn], 1);
- else {
-#if defined(CONFIG_AD2S1210_GPIO_INPUT)
- gpio_direction_input(pins[pn]);
-#elif defined(CONFIG_AD2S1210_GPIO_OUTPUT)
- gpio_direction_output(pins[pn], 1);
-#endif
- }
- }
+ int ret;
+
+ if (spi->dev.platform_data == NULL)
+ return -EINVAL;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- spi_set_drvdata(spi, st);
+ st = iio_priv(indio_dev);
+ st->pdata = spi->dev.platform_data;
+ ret = ad2s1210_setup_gpios(st);
+ if (ret < 0)
+ goto error_free_dev;
+
+ spi_set_drvdata(spi, indio_dev);
mutex_init(&st->lock);
st->sdev = spi;
- st->xfer.tx_buf = st->tx;
- st->xfer.rx_buf = st->rx;
- st->hysteresis = 1;
+ st->hysteresis = true;
st->mode = MOD_CONFIG;
- st->resolution = RES_12;
- st->fclkin = AD2S1210_DEF_CLKIN;
+ st->resolution = 12;
st->fexcit = AD2S1210_DEF_EXCIT;
- st->sample = pins[0];
- st->a0 = pins[1];
- st->a1 = pins[2];
- st->res0 = pins[3];
- st->res1 = pins[4];
-
- st->idev = iio_allocate_device(0);
- if (st->idev == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->idev->dev.parent = &spi->dev;
- st->idev->info = &ad2s1210_info;
- st->idev->dev_data = (void *)(st);
- st->idev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ad2s1210_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(st->idev);
+ ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ goto error_free_gpios;
- if (spi->max_speed_hz != AD2S1210_DEF_CLKIN)
- st->fclkin = spi->max_speed_hz;
+ st->fclkin = spi->max_speed_hz;
spi->mode = SPI_MODE_3;
spi_setup(spi);
-
ad2s1210_initial(st);
+
return 0;
+error_free_gpios:
+ ad2s1210_free_gpios(st);
error_free_dev:
- iio_free_device(st->idev);
-error_free_st:
- kfree(st);
+ iio_free_device(indio_dev);
error_ret:
- for (--pn; pn >= 0; pn--)
- gpio_free(pins[pn]);
return ret;
}
static int __devexit ad2s1210_remove(struct spi_device *spi)
{
- struct ad2s1210_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->idev);
- kfree(st);
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad2s1210_state *st = iio_priv(indio_dev);
+ iio_device_unregister(indio_dev);
+ ad2s1210_free_gpios(st);
return 0;
}
diff --git a/drivers/staging/iio/resolver/ad2s1210.h b/drivers/staging/iio/resolver/ad2s1210.h
new file mode 100644
index 00000000000..aec0bdca16a
--- /dev/null
+++ b/drivers/staging/iio/resolver/ad2s1210.h
@@ -0,0 +1,17 @@
+/*
+ * ad2s1210.h plaform data for the ADI Resolver to Digital Converters:
+ * AD2S1210
+ *
+ * Copyright (c) 2010-2010 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct ad2s1210_platform_data {
+ unsigned sample;
+ unsigned a[2];
+ unsigned res[2];
+ bool gpioin;
+};
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index 9b72a952f2b..166e2414ac8 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -24,29 +24,19 @@ struct ad2s90_state {
struct mutex lock;
struct iio_dev *idev;
struct spi_device *sdev;
- u8 rx[2];
- u8 tx[2];
+ u8 rx[2] ____cacheline_aligned;
};
static ssize_t ad2s90_show_angular(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct spi_message msg;
- struct spi_transfer xfer;
int ret;
ssize_t len = 0;
u16 val;
- struct iio_dev *idev = dev_get_drvdata(dev);
- struct ad2s90_state *st = idev->dev_data;
+ struct ad2s90_state *st = iio_priv(dev_get_drvdata(dev));
- xfer.len = 1;
- xfer.tx_buf = st->tx;
- xfer.rx_buf = st->rx;
mutex_lock(&st->lock);
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
- ret = spi_sync(st->sdev, &msg);
+ ret = spi_read(st->sdev, st->rx, 2);
if (ret)
goto error_ret;
val = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
@@ -60,12 +50,9 @@ error_ret:
#define IIO_DEV_ATTR_SIMPLE_RESOLVER(_show) \
IIO_DEVICE_ATTR(angular, S_IRUGO, _show, NULL, 0)
-static IIO_CONST_ATTR(description,
- "Low Cost, Complete 12-Bit Resolver-to-Digital Converter");
static IIO_DEV_ATTR_SIMPLE_RESOLVER(ad2s90_show_angular);
static struct attribute *ad2s90_attributes[] = {
- &iio_const_attr_description.dev_attr.attr,
&iio_dev_attr_angular.dev_attr.attr,
NULL,
};
@@ -82,29 +69,23 @@ static const struct iio_info ad2s90_info = {
static int __devinit ad2s90_probe(struct spi_device *spi)
{
+ struct iio_dev *indio_dev;
struct ad2s90_state *st;
int ret = 0;
- st = kzalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL) {
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
- spi_set_drvdata(spi, st);
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
mutex_init(&st->lock);
st->sdev = spi;
-
- st->idev = iio_allocate_device(0);
- if (st->idev == NULL) {
- ret = -ENOMEM;
- goto error_free_st;
- }
- st->idev->dev.parent = &spi->dev;
-
- st->idev->info = &ad2s90_info;
- st->idev->dev_data = (void *)(st);
- st->idev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ad2s90_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
ret = iio_device_register(st->idev);
if (ret)
@@ -119,18 +100,13 @@ static int __devinit ad2s90_probe(struct spi_device *spi)
error_free_dev:
iio_free_device(st->idev);
-error_free_st:
- kfree(st);
error_ret:
return ret;
}
static int __devexit ad2s90_remove(struct spi_device *spi)
{
- struct ad2s90_state *st = spi_get_drvdata(spi);
-
- iio_device_unregister(st->idev);
- kfree(st);
+ iio_device_unregister(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/staging/iio/trigger.h b/drivers/staging/iio/trigger.h
index f329fe10fa2..e0b58ed749b 100644
--- a/drivers/staging/iio/trigger.h
+++ b/drivers/staging/iio/trigger.h
@@ -29,6 +29,8 @@ struct iio_subirq {
* @set_trigger_state: [DRIVER] switch on/off the trigger on demand
* @try_reenable: function to reenable the trigger when the
* use count is zero (may be NULL)
+ * @validate_device: function to validate the device when the
+ * current trigger gets changed.
* @subirq_chip: [INTERN] associate 'virtual' irq chip.
* @subirq_base: [INTERN] base number for irqs provided by trigger.
* @subirqs: [INTERN] information about the 'child' irqs.
@@ -48,6 +50,8 @@ struct iio_trigger {
int (*set_trigger_state)(struct iio_trigger *trig, bool state);
int (*try_reenable)(struct iio_trigger *trig);
+ int (*validate_device)(struct iio_trigger *trig,
+ struct iio_dev *indio_dev);
struct irq_chip subirq_chip;
int subirq_base;
@@ -57,6 +61,30 @@ struct iio_trigger {
struct mutex pool_lock;
};
+/**
+ * struct iio_poll_func - poll function pair
+ *
+ * @private_data: data specific to device (passed into poll func)
+ * @h: the function that is actually run on trigger
+ * @thread: threaded interrupt part
+ * @type: the type of interrupt (basically if oneshot)
+ * @name: name used to identify the trigger consumer.
+ * @irq: the corresponding irq as allocated from the
+ * trigger pool
+ * @timestamp: some devices need a timestamp grabbed as soon
+ * as possible after the trigger - hence handler
+ * passes it via here.
+ **/
+struct iio_poll_func {
+ void *private_data;
+ irqreturn_t (*h)(int irq, void *p);
+ irqreturn_t (*thread)(int irq, void *p);
+ int type;
+ char *name;
+ int irq;
+ s64 timestamp;
+};
+
static inline struct iio_trigger *to_iio_trigger(struct device *d)
{
return container_of(d, struct iio_trigger, dev);
@@ -136,30 +164,6 @@ static inline void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
mutex_unlock(&trig->pool_lock);
};
-/**
- * struct iio_poll_func - poll function pair
- *
- * @private_data: data specific to device (passed into poll func)
- * @h: the function that is actually run on trigger
- * @thread: threaded interrupt part
- * @type: the type of interrupt (basically if oneshot)
- * @name: name used to identify the trigger consumer.
- * @irq: the corresponding irq as allocated from the
- * trigger pool
- * @timestamp: some devices need a timestamp grabbed as soon
- * as possible after the trigger - hence handler
- * passes it via here.
- **/
-struct iio_poll_func {
- void *private_data;
- irqreturn_t (*h)(int irq, void *p);
- irqreturn_t (*thread)(int irq, void *p);
- int type;
- char *name;
- int irq;
- s64 timestamp;
-};
-
struct iio_poll_func
*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
diff --git a/drivers/staging/iio/trigger/iio-trig-gpio.c b/drivers/staging/iio/trigger/iio-trig-gpio.c
index b188635c346..f1fb795e641 100644
--- a/drivers/staging/iio/trigger/iio-trig-gpio.c
+++ b/drivers/staging/iio/trigger/iio-trig-gpio.c
@@ -7,7 +7,7 @@
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
- * Currently this is more of a functioning proof of concept that a fully
+ * Currently this is more of a functioning proof of concept than a full
* fledged trigger driver.
*
* TODO:
diff --git a/drivers/staging/intel_sst/intel_sst.c b/drivers/staging/intel_sst/intel_sst.c
index c0c144a2cda..d892861346f 100644
--- a/drivers/staging/intel_sst/intel_sst.c
+++ b/drivers/staging/intel_sst/intel_sst.c
@@ -545,7 +545,10 @@ static int intel_sst_runtime_suspend(struct device *dev)
/* Move the SST state to Suspended */
mutex_lock(&sst_drv_ctx->sst_lock);
sst_drv_ctx->sst_state = SST_SUSPENDED;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
+
+ /* Only needed by Medfield */
+ if (sst_drv_ctx->pci_id != SST_MRST_PCI_ID)
+ sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
mutex_unlock(&sst_drv_ctx->sst_lock);
return 0;
}
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
index b8c7ddbd7cf..93b41a284d8 100644
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_app_interface.c
@@ -430,8 +430,10 @@ static int snd_sst_fill_kernel_list(struct stream_info *stream,
return -ENOMEM;
if (copy_from_user((void *) &rar_handle,
iovec[index].iov_base,
- sizeof(__u32)))
+ sizeof(__u32))) {
+ kfree(stream_bufs);
return -EFAULT;
+ }
stream_bufs->addr = (char *)rar_handle;
stream_bufs->in_use = false;
stream_bufs->size = iovec[0].iov_len;
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
index f8e9da6b309..870981ba3c9 100644
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ b/drivers/staging/intel_sst/intel_sst_common.h
@@ -420,6 +420,8 @@ struct intel_sst_drv {
unsigned int max_streams;
unsigned int *fw_cntx;
unsigned int fw_cntx_size;
+
+ unsigned int fw_downloaded;
};
extern struct intel_sst_drv *sst_drv_ctx;
diff --git a/drivers/staging/intel_sst/intel_sst_drv_interface.c b/drivers/staging/intel_sst/intel_sst_drv_interface.c
index 1021477f238..69daa1404b6 100644
--- a/drivers/staging/intel_sst/intel_sst_drv_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_drv_interface.c
@@ -53,6 +53,13 @@ int sst_download_fw(void)
if (sst_drv_ctx->sst_state != SST_UN_INIT)
return -EPERM;
+ /* Reload firmware is not needed for MRST */
+ if ( (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) && sst_drv_ctx->fw_downloaded) {
+ pr_debug("FW already downloaded, skip for MRST platform\n");
+ sst_drv_ctx->sst_state = SST_FW_RUNNING;
+ return 0;
+ }
+
snprintf(name, sizeof(name), "%s%04x%s", "fw_sst_",
sst_drv_ctx->pci_id, ".bin");
@@ -71,6 +78,9 @@ int sst_download_fw(void)
retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[0]);
if (retval)
pr_err("fw download failed %d\n" , retval);
+ else
+ sst_drv_ctx->fw_downloaded = 1;
+
end_restore:
release_firmware(fw_sst);
sst_drv_ctx->alloc_block[0].sst_id = BLOCK_UNINIT;
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c
index 000378a35c1..46ab55eb809 100644
--- a/drivers/staging/intel_sst/intelmid_v2_control.c
+++ b/drivers/staging/intel_sst/intelmid_v2_control.c
@@ -1090,7 +1090,7 @@ static void nc_pmic_irq_cb(void *cb_data, u8 intsts)
if (intsts & 0x1) {
pr_debug("SST DBG:MAD headset detected\n");
/* send headset detect/undetect */
- present = (value == 0x1) ? 1 : 0;
+ present = (value == 0x1) ? 3 : 0;
jack_event_flag = 1;
mjack->jack.type = SND_JACK_HEADSET;
hp_automute(SND_JACK_HEADSET, present);
diff --git a/drivers/staging/keucr/Kconfig b/drivers/staging/keucr/Kconfig
index e397fad693a..ba756bf2066 100644
--- a/drivers/staging/keucr/Kconfig
+++ b/drivers/staging/keucr/Kconfig
@@ -1,9 +1,9 @@
config USB_ENESTORAGE
- tristate "USB ENE SM/MS card reader support"
+ tristate "USB ENE SM card reader support"
depends on USB && SCSI && m
---help---
- Say Y here if you wish to control a ENE SM/MS Card reader.
- To use SD card, please build driver/usb/storage/ums-eneub6250.ko
+ Say Y here if you wish to control a ENE SM Card reader.
+ To use SD/MS card, please build driver/usb/storage/ums-eneub6250.ko
This option depends on 'SCSI' support being enabled, but you
probably also need 'SCSI device support: SCSI disk support'
diff --git a/drivers/staging/keucr/Makefile b/drivers/staging/keucr/Makefile
index ae928f9cd71..c180bf4fab9 100644
--- a/drivers/staging/keucr/Makefile
+++ b/drivers/staging/keucr/Makefile
@@ -7,8 +7,6 @@ keucr-y := \
scsiglue.o \
transport.o \
init.o \
- msscsi.o \
- ms.o \
smscsi.o \
smilmain.o \
smilsub.o \
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index b5a89375df2..071bdc23878 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -31,9 +31,7 @@ int ENE_InitMedia(struct us_data *us)
if (!us->SM_Status.Ready && !us->MS_Status.Ready) {
result = ENE_SMInit(us);
if (result != USB_STOR_XFER_GOOD) {
- result = ENE_MSInit(us);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
+ return USB_STOR_TRANSPORT_ERROR;
}
}
@@ -62,60 +60,6 @@ int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf)
}
/*
- * ENE_MSInit():
- */
-int ENE_MSInit(struct us_data *us)
-{
- struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- int result;
- BYTE buf[0x200];
- WORD MSP_BlockSize, MSP_UserAreaBlocks;
-
- printk(KERN_INFO "transport --- ENE_MSInit\n");
- result = ENE_LoadBinCode(us, MS_INIT_PATTERN);
- if (result != USB_STOR_XFER_GOOD) {
- printk(KERN_ERR "Load MS Init Code Fail !!\n");
- return USB_STOR_TRANSPORT_ERROR;
- }
-
- memset(bcb, 0, sizeof(struct bulk_cb_wrap));
- bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
- bcb->DataTransferLength = 0x200;
- bcb->Flags = 0x80;
- bcb->CDB[0] = 0xF1;
- bcb->CDB[1] = 0x01;
-
- result = ENE_SendScsiCmd(us, FDIR_READ, &buf, 0);
- if (result != USB_STOR_XFER_GOOD) {
- printk(KERN_ERR "Execution MS Init Code Fail !!\n");
- return USB_STOR_TRANSPORT_ERROR;
- }
-
- us->MS_Status = *(PMS_STATUS)&buf[0];
-
- if (us->MS_Status.Insert && us->MS_Status.Ready) {
- printk(KERN_INFO "Insert = %x\n", us->MS_Status.Insert);
- printk(KERN_INFO "Ready = %x\n", us->MS_Status.Ready);
- printk(KERN_INFO "IsMSPro = %x\n", us->MS_Status.IsMSPro);
- printk(KERN_INFO "IsMSPHG = %x\n", us->MS_Status.IsMSPHG);
- printk(KERN_INFO "WtP = %x\n", us->MS_Status.WtP);
- if (us->MS_Status.IsMSPro) {
- MSP_BlockSize = (buf[6] << 8) | buf[7];
- MSP_UserAreaBlocks = (buf[10] << 8) | buf[11];
- us->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
- } else {
- MS_CardInit(us);
- }
- printk(KERN_INFO "MS Init Code OK !!\n");
- } else {
- printk(KERN_INFO "MS Card Not Ready --- %x\n", buf[0]);
- return USB_STOR_TRANSPORT_ERROR;
- }
-
- return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
*ENE_SMInit()
*/
int ENE_SMInit(struct us_data *us)
@@ -185,19 +129,6 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag)
if (buf == NULL)
return USB_STOR_TRANSPORT_ERROR;
switch (flag) {
- /* For MS */
- case MS_INIT_PATTERN:
- printk(KERN_INFO "MS_INIT_PATTERN\n");
- memcpy(buf, MS_Init, 0x800);
- break;
- case MSP_RW_PATTERN:
- printk(KERN_INFO "MSP_RW_PATTERN\n");
- memcpy(buf, MSP_Rdwr, 0x800);
- break;
- case MS_RW_PATTERN:
- printk(KERN_INFO "MS_RW_PATTERN\n");
- memcpy(buf, MS_Rdwr, 0x800);
- break;
/* For SS */
case SM_INIT_PATTERN:
printk(KERN_INFO "SM_INIT_PATTERN\n");
diff --git a/drivers/staging/keucr/init.h b/drivers/staging/keucr/init.h
index f709055ae14..c8b2cd60446 100644
--- a/drivers/staging/keucr/init.h
+++ b/drivers/staging/keucr/init.h
@@ -4,779 +4,6 @@ extern DWORD MediaChange;
extern int Check_D_MediaFmt(struct us_data *);
-static BYTE MS_Init[] = {
-0x90, 0xF0, 0x15, 0xE0, 0xF5, 0x1C, 0x11, 0x2C,
-0x90, 0xFF, 0x09, 0xE0, 0x30, 0xE1, 0x06, 0x90,
-0xFF, 0x23, 0x74, 0x80, 0xF0, 0x90, 0xFF, 0x09,
-0xE0, 0x30, 0xE5, 0xFC, 0x51, 0x59, 0x75, 0x3F,
-0x00, 0x75, 0x3E, 0x00, 0x75, 0x3D, 0x00, 0x75,
-0x3C, 0x00, 0xD3, 0x22, 0x90, 0xFF, 0x83, 0xE0,
-0xA2, 0xE1, 0x92, 0x25, 0x20, 0x25, 0x06, 0xC2,
-0x1F, 0xD2, 0x19, 0xC3, 0x22, 0x7F, 0x02, 0x12,
-0x2F, 0xCB, 0x20, 0x19, 0x05, 0x30, 0x1F, 0x02,
-0xD3, 0x22, 0x90, 0xEA, 0x44, 0x74, 0x80, 0xF0,
-0x7F, 0x10, 0x12, 0x2F, 0xC5, 0x90, 0xFE, 0x47,
-0xE0, 0x44, 0x80, 0xF0, 0x78, 0x00, 0xE8, 0xC3,
-0x94, 0x04, 0x50, 0x0A, 0x7F, 0x88, 0x7E, 0x13,
-0x12, 0xE4, 0xA6, 0x08, 0x80, 0xF0, 0x90, 0xFE,
-0x45, 0xE0, 0x54, 0xFB, 0xF0, 0x90, 0xFE, 0x47,
-0xE0, 0x54, 0xBF, 0xF0, 0x90, 0xFE, 0x45, 0xE0,
-0x54, 0xFE, 0xF0, 0x90, 0xFE, 0x45, 0xE0, 0x54,
-0x7F, 0xF0, 0x90, 0xFE, 0x46, 0xE0, 0x44, 0x40,
-0xF0, 0x90, 0xFE, 0x45, 0xE0, 0x54, 0xC7, 0x44,
-0x18, 0xF0, 0x90, 0xFE, 0x47, 0xE0, 0x44, 0x08,
-0xF0, 0x90, 0xFE, 0x45, 0xE0, 0x44, 0x40, 0xF0,
-0x7F, 0x32, 0x7E, 0x00, 0x12, 0xE4, 0xA6, 0x90,
-0xFE, 0x51, 0xE0, 0x54, 0x33, 0xF0, 0x90, 0xFE,
-0x44, 0x74, 0x02, 0xF0, 0x30, 0x25, 0x04, 0xE0,
-0x20, 0xE1, 0xF9, 0x90, 0xFE, 0x51, 0xE0, 0x54,
-0x0F, 0xF0, 0x90, 0xFE, 0x44, 0x74, 0x02, 0xF0,
-0x30, 0x25, 0x04, 0xE0, 0x20, 0xE1, 0xF9, 0x90,
-0xFE, 0x44, 0x74, 0x04, 0xF0, 0x30, 0x25, 0x04,
-0xE0, 0x20, 0xE2, 0xF9, 0x90, 0xFE, 0x4C, 0xE0,
-0xF0, 0x90, 0xFE, 0x4D, 0xE0, 0xF0, 0x90, 0xFE,
-0x48, 0x74, 0x7F, 0xF0, 0x90, 0xFE, 0x49, 0x74,
-0x9F, 0xF0, 0x90, 0xFE, 0x51, 0xE0, 0x54, 0x3C,
-0x44, 0x02, 0xF0, 0x90, 0xFE, 0x44, 0x74, 0x02,
-0xF0, 0x30, 0x25, 0x04, 0xE0, 0x20, 0xE1, 0xF9,
-0x90, 0xFE, 0x46, 0xE0, 0x44, 0x20, 0xF0, 0x79,
-0x02, 0x7A, 0x06, 0x7B, 0x00, 0x7C, 0x00, 0x7D,
-0x06, 0x7E, 0xEB, 0x7F, 0xC9, 0x12, 0x2F, 0xA7,
-0x40, 0x03, 0x02, 0xE2, 0x37, 0xC2, 0x45, 0xC2,
-0x1E, 0x90, 0xEB, 0xCB, 0xE0, 0x64, 0x01, 0x70,
-0x65, 0x90, 0xEB, 0xCD, 0xE0, 0x70, 0x5F, 0x90,
-0xEB, 0xCE, 0xE0, 0x60, 0x08, 0x54, 0x03, 0x60,
-0x55, 0xD2, 0x1E, 0x80, 0x09, 0x90, 0xEB, 0xC9,
-0xE0, 0x30, 0xE0, 0x02, 0xD2, 0x1E, 0x90, 0xEA,
-0x45, 0x74, 0x01, 0xF0, 0x75, 0x0B, 0x00, 0xE5,
-0x0B, 0xC3, 0x94, 0x80, 0x50, 0x31, 0x12, 0x2F,
-0xB9, 0x40, 0x03, 0x02, 0xE2, 0x37, 0x90, 0xEB,
-0xC8, 0xE0, 0x54, 0x80, 0x70, 0x0B, 0x7F, 0x38,
-0x7E, 0x13, 0x12, 0xE4, 0xA6, 0x05, 0x0B, 0x80,
-0xDE, 0x12, 0x2F, 0xB9, 0x40, 0x03, 0x02, 0xE2,
-0x37, 0x90, 0xEB, 0xC8, 0xE0, 0xF9, 0x54, 0x40,
-0x60, 0x0A, 0xE9, 0x54, 0x01, 0x70, 0x03, 0x02,
-0xE2, 0x37, 0xD2, 0x1E, 0x80, 0x24, 0x90, 0xEB,
-0xCB, 0xE0, 0x64, 0x00, 0x60, 0x03, 0x02, 0xE2,
-0x37, 0x90, 0xEA, 0x45, 0x74, 0x00, 0xF0, 0x7F,
-0x90, 0x12, 0x2F, 0xC5, 0x12, 0xE2, 0xB0, 0x40,
-0x03, 0x02, 0xE2, 0x37, 0xD2, 0x1F, 0xC2, 0x19,
-0xD3, 0x22, 0x90, 0xEA, 0x44, 0x74, 0x00, 0xF0,
-0x75, 0x17, 0x00, 0x79, 0x00, 0x7A, 0x00, 0x7B,
-0x10, 0x7C, 0x02, 0x7D, 0x02, 0x12, 0x2F, 0xA7,
-0x40, 0x02, 0x80, 0x5B, 0x7F, 0x80, 0x12, 0x2F,
-0xC5, 0x90, 0xFE, 0x45, 0xE0, 0x54, 0xFE, 0xF0,
-0x90, 0xFE, 0x45, 0xE0, 0x44, 0x04, 0xF0, 0x90,
-0xEB, 0xCC, 0xE0, 0x64, 0x07, 0x70, 0x2D, 0x90,
-0xEA, 0x44, 0x74, 0x40, 0xF0, 0x75, 0x17, 0x00,
-0x79, 0x00, 0x7A, 0x00, 0x7B, 0x10, 0x7C, 0x02,
-0x7D, 0x02, 0x12, 0x2F, 0xA7, 0x40, 0x02, 0x80,
-0x26, 0x7F, 0x80, 0x12, 0x2F, 0xC5, 0x90, 0xFE,
-0x45, 0xE0, 0x54, 0xFA, 0xF0, 0x90, 0xFE, 0x45,
-0xE0, 0x44, 0x01, 0xF0, 0x90, 0xEA, 0x45, 0xE0,
-0x60, 0x07, 0x12, 0x2F, 0xCE, 0x40, 0x02, 0x80,
-0x06, 0xD2, 0x1F, 0xC2, 0x19, 0xD3, 0x22, 0xE4,
-0x90, 0xFE, 0x48, 0xF0, 0x90, 0xFE, 0x49, 0xF0,
-0x90, 0xFE, 0x4C, 0xE0, 0xF0, 0x90, 0xFE, 0x4D,
-0xE0, 0xF0, 0x90, 0xFE, 0x47, 0xE0, 0x54, 0x7F,
-0xF0, 0xC2, 0x25, 0xC2, 0x1F, 0xD2, 0x19, 0xC3,
-0x22, 0x90, 0xEA, 0x45, 0xE0, 0x64, 0x01, 0x70,
-0x03, 0xD3, 0x80, 0x01, 0xC3, 0xE4, 0x92, 0xE3,
-0xC0, 0xE0, 0x90, 0xEB, 0xCC, 0xE0, 0x64, 0x07,
-0x70, 0x03, 0xD3, 0x80, 0x01, 0xC3, 0xD0, 0xE0,
-0x92, 0xE4, 0xA2, 0x25, 0x92, 0xE0, 0xA2, 0x1F,
-0x92, 0xE1, 0xA2, 0x19, 0x92, 0xE2, 0xA2, 0x1E,
-0x92, 0xE6, 0x90, 0xF4, 0x00, 0xF0, 0x74, 0xFF,
-0xA3, 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0xA3, 0x7B,
-0x40, 0x7C, 0xEB, 0x7D, 0x6F, 0xAE, 0x83, 0xAF,
-0x82, 0x12, 0x2F, 0xC8, 0x90, 0xFF, 0x2A, 0x74,
-0x02, 0xF0, 0xA3, 0x74, 0x00, 0xF0, 0xD3, 0x22,
-0xC2, 0x1E, 0x74, 0xFF, 0x90, 0xEA, 0x49, 0xF0,
-0x90, 0xFE, 0x44, 0x74, 0x02, 0xF0, 0x30, 0x25,
-0x04, 0xE0, 0x20, 0xE1, 0xF9, 0x90, 0xFF, 0x09,
-0x30, 0x25, 0x07, 0xE0, 0x30, 0xE5, 0xF9, 0xD3,
-0x80, 0x01, 0xC3, 0x40, 0x01, 0x22, 0xC2, 0x1A,
-0xC2, 0x22, 0x75, 0x14, 0x00, 0xE5, 0x14, 0x64,
-0x0C, 0x70, 0x03, 0x02, 0xE4, 0x4B, 0x75, 0x17,
-0x00, 0x75, 0x18, 0x00, 0x85, 0x14, 0x19, 0x75,
-0x1B, 0x00, 0x12, 0x2F, 0x8C, 0x40, 0x03, 0x02,
-0xE4, 0x46, 0x30, 0x41, 0x03, 0x02, 0xE4, 0x46,
-0x90, 0xEB, 0xDD, 0xE0, 0x20, 0xE7, 0x03, 0x02,
-0xE4, 0x46, 0x90, 0xEB, 0xDE, 0xE0, 0x20, 0xE2,
-0x02, 0x80, 0x03, 0x02, 0xE4, 0x46, 0x90, 0xF4,
-0x00, 0xE0, 0xFE, 0x90, 0xF4, 0x01, 0xE0, 0x64,
-0x01, 0x4E, 0x60, 0x03, 0x02, 0xE4, 0x46, 0x90,
-0xEA, 0x49, 0xE0, 0x64, 0xFF, 0x60, 0x03, 0x02,
-0xE4, 0x4B, 0x90, 0xF5, 0xA0, 0xE0, 0x64, 0x01,
-0x60, 0x03, 0x02, 0xE4, 0x46, 0x90, 0xF5, 0xD6,
-0xE0, 0x64, 0x01, 0x60, 0x03, 0x02, 0xE4, 0x46,
-0x90, 0xF5, 0xD8, 0xE0, 0xFF, 0xC3, 0x74, 0x03,
-0x9F, 0x50, 0x03, 0x02, 0xE4, 0x46, 0xEF, 0x60,
-0x04, 0xD2, 0x1E, 0x80, 0x0B, 0xC2, 0x1E, 0x90,
-0xEB, 0xC9, 0xE0, 0x30, 0xE0, 0x02, 0xD2, 0x1E,
-0x90, 0xF5, 0xA2, 0xE0, 0xFE, 0x90, 0xF5, 0xA3,
-0xE0, 0xFF, 0x25, 0xE0, 0x90, 0xEA, 0x47, 0xF0,
-0xE4, 0x74, 0x10, 0x9F, 0x74, 0x00, 0x9E, 0x50,
-0x03, 0x02, 0xE4, 0x46, 0x90, 0xF5, 0xA4, 0xE0,
-0xFE, 0x90, 0xF5, 0xA5, 0xE0, 0xFF, 0xC3, 0x74,
-0x00, 0x9F, 0x74, 0x20, 0x9E, 0x50, 0x03, 0x02,
-0xE4, 0x46, 0xEE, 0x4F, 0x70, 0x03, 0x02, 0xE4,
-0x46, 0x90, 0xF5, 0xA6, 0xE0, 0xFE, 0x90, 0xF5,
-0xA7, 0xE0, 0xFF, 0xEE, 0x4F, 0x70, 0x03, 0x02,
-0xE4, 0x46, 0x90, 0xF5, 0x78, 0xE0, 0x64, 0x01,
-0x60, 0x03, 0x02, 0xE4, 0x46, 0x90, 0xF5, 0x74,
-0xE0, 0xFC, 0x90, 0xF5, 0x75, 0xE0, 0xFD, 0x90,
-0xF5, 0x76, 0xE0, 0x90, 0xEA, 0x5B, 0xF0, 0xFE,
-0x90, 0xF5, 0x77, 0xE0, 0x90, 0xEA, 0x5C, 0xF0,
-0xFF, 0x4E, 0x4D, 0x4C, 0x70, 0x03, 0x02, 0xE4,
-0x46, 0x90, 0xF5, 0x70, 0xE0, 0xFC, 0x90, 0xF5,
-0x71, 0xE0, 0xFD, 0x90, 0xF5, 0x72, 0xE0, 0xFE,
-0x90, 0xF5, 0x73, 0xE0, 0xFF, 0xEC, 0x90, 0xEA,
-0x55, 0xF0, 0xED, 0x90, 0xEA, 0x56, 0xF0, 0xEE,
-0x90, 0xEA, 0x57, 0xF0, 0xEF, 0x90, 0xEA, 0x58,
-0xF0, 0xEC, 0x64, 0xFF, 0x70, 0x12, 0xED, 0x64,
-0xFF, 0x70, 0x0D, 0xEE, 0x64, 0xFF, 0x70, 0x08,
-0xEF, 0x64, 0xFF, 0x70, 0x03, 0x02, 0xE4, 0x46,
-0xC2, 0x3F, 0x90, 0xF5, 0xD3, 0xE0, 0x64, 0x01,
-0x70, 0x02, 0xD2, 0x3F, 0x75, 0x17, 0x00, 0x75,
-0x18, 0x00, 0x85, 0x14, 0x19, 0x75, 0x1B, 0x01,
-0x12, 0x2F, 0x8C, 0x40, 0x03, 0x02, 0xE4, 0x46,
-0x90, 0xEA, 0x49, 0xE5, 0x14, 0xF0, 0x05, 0x14,
-0x02, 0xE2, 0xDD, 0xD2, 0x22, 0x90, 0xEA, 0x49,
-0xE0, 0x64, 0xFF, 0x70, 0x02, 0x80, 0x02, 0x80,
-0x12, 0x90, 0xFE, 0x44, 0x74, 0x02, 0xF0, 0x30,
-0x25, 0x04, 0xE0, 0x20, 0xE1, 0xF9, 0x12, 0x2F,
-0x9E, 0xC3, 0x22, 0x30, 0x3F, 0x36, 0x74, 0x88,
-0x90, 0xEA, 0x44, 0xF0, 0x75, 0x17, 0x00, 0x79,
-0x00, 0x7A, 0x00, 0x7B, 0x10, 0x7C, 0x02, 0x7D,
-0x02, 0x12, 0x2F, 0xA7, 0x7F, 0x80, 0x12, 0x2F,
-0xC5, 0x90, 0xFE, 0x45, 0xE0, 0x54, 0xFE, 0xF0,
-0x90, 0xFE, 0x45, 0xE0, 0x44, 0x04, 0xF0, 0x90,
-0xFE, 0x44, 0x74, 0x02, 0xF0, 0x30, 0x25, 0x04,
-0xE0, 0x20, 0xE1, 0xF9, 0xD3, 0x22, 0x75, 0x8A,
-0x00, 0x75, 0x8C, 0xCE, 0xC2, 0x8D, 0x90, 0xEA,
-0x65, 0xE4, 0xF0, 0xA3, 0xF0, 0xD2, 0x8C, 0x90,
-0xEA, 0x65, 0xE0, 0xFC, 0xA3, 0xE0, 0xFD, 0xEC,
-0xC3, 0x9E, 0x40, 0xF3, 0x70, 0x05, 0xED, 0xC3,
-0x9F, 0x40, 0xEC, 0xC2, 0x8C, 0x22, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x4D, 0x53, 0x2D, 0x49, 0x6E, 0x69, 0x74, 0x20,
-0x20, 0x20, 0x20, 0x31, 0x30, 0x30, 0x30, 0x30 };
-
-static BYTE MSP_Rdwr[] = {
-0x90, 0xF0, 0x10, 0xE0, 0x90, 0xEA, 0x46, 0xF0,
-0xB4, 0x04, 0x03, 0x02, 0xE1, 0x1E, 0x90, 0xFF,
-0x09, 0xE0, 0x30, 0xE1, 0x06, 0x90, 0xFF, 0x23,
-0x74, 0x80, 0xF0, 0x90, 0xFF, 0x09, 0xE0, 0x30,
-0xE5, 0xFC, 0x90, 0xFF, 0x83, 0xE0, 0xA2, 0xE1,
-0x92, 0x25, 0x40, 0x01, 0x22, 0x20, 0x1F, 0x02,
-0xC3, 0x22, 0x30, 0x45, 0x02, 0xC3, 0x22, 0xC3,
-0xE5, 0x3D, 0x13, 0xF5, 0x08, 0xE5, 0x3E, 0x13,
-0xF5, 0x09, 0x78, 0x96, 0x79, 0x20, 0xAA, 0x08,
-0xAB, 0x09, 0x12, 0xE2, 0x53, 0x20, 0x1D, 0x10,
-0x90, 0xFF, 0x83, 0xE0, 0xA2, 0xE1, 0x92, 0x25,
-0x30, 0x25, 0x03, 0x30, 0x24, 0xEF, 0xD2, 0x24,
-0x20, 0x23, 0x10, 0x90, 0xFF, 0x83, 0xE0, 0xA2,
-0xE1, 0x92, 0x25, 0x30, 0x25, 0x03, 0x30, 0x24,
-0xEF, 0xD2, 0x24, 0x30, 0x24, 0x02, 0xC3, 0x22,
-0xC2, 0x24, 0xC2, 0x23, 0x90, 0xEA, 0x4B, 0xE0,
-0x30, 0xE3, 0x0B, 0xC2, 0x25, 0x90, 0xFF, 0x85,
-0xE0, 0x54, 0xFD, 0xF0, 0xC3, 0x22, 0x30, 0xE2,
-0x78, 0x90, 0xFF, 0x09, 0x90, 0xFF, 0x83, 0xE0,
-0xA2, 0xE1, 0x92, 0x25, 0x30, 0x25, 0x0A, 0x90,
-0xFF, 0x09, 0xE0, 0x30, 0xE5, 0xEE, 0xD3, 0x80,
-0x01, 0xC3, 0x40, 0x01, 0x22, 0x79, 0x00, 0x90,
-0xFE, 0x46, 0xE0, 0x54, 0xF0, 0x49, 0xF0, 0x78,
-0x2D, 0x12, 0x2F, 0xAA, 0x7E, 0xF4, 0x7F, 0x00,
-0x7D, 0x00, 0x7C, 0x02, 0x12, 0x2F, 0xC2, 0x20,
-0x1D, 0x10, 0x90, 0xFF, 0x83, 0xE0, 0xA2, 0xE1,
-0x92, 0x25, 0x30, 0x25, 0x03, 0x30, 0x24, 0xEF,
-0xD2, 0x24, 0x30, 0x24, 0x13, 0x75, 0x3F, 0x00,
-0xC3, 0xE5, 0x09, 0x33, 0xF5, 0x3E, 0xE5, 0x08,
-0x33, 0xF5, 0x3D, 0x75, 0x3C, 0x00, 0xC3, 0x22,
-0x90, 0xFF, 0x2A, 0x74, 0x02, 0xF0, 0xA3, 0x74,
-0x00, 0xF0, 0xE5, 0x09, 0x24, 0xFF, 0xF5, 0x09,
-0xE5, 0x08, 0x34, 0xFF, 0xF5, 0x08, 0x02, 0xE0,
-0x60, 0x90, 0xEA, 0x4B, 0xE0, 0x20, 0xE0, 0x03,
-0x02, 0xE0, 0x60, 0xE4, 0xF5, 0x3F, 0xF5, 0x3E,
-0xF5, 0x3D, 0xF5, 0x3C, 0xD3, 0x22, 0x90, 0xFF,
-0x09, 0xE0, 0x30, 0xE1, 0x06, 0x90, 0xFF, 0x23,
-0x74, 0x80, 0xF0, 0x90, 0xFF, 0x09, 0xE0, 0x30,
-0xE5, 0xFC, 0x90, 0xFF, 0x83, 0xE0, 0xA2, 0xE1,
-0x92, 0x25, 0x40, 0x01, 0x22, 0x20, 0x1F, 0x02,
-0xC3, 0x22, 0x30, 0x1E, 0x02, 0xC3, 0x22, 0xC3,
-0xE5, 0x3D, 0x13, 0xF5, 0x08, 0xE5, 0x3E, 0x13,
-0xF5, 0x09, 0x78, 0x96, 0x79, 0x21, 0xAA, 0x08,
-0xAB, 0x09, 0x12, 0xE2, 0x53, 0x20, 0x1D, 0x10,
-0x90, 0xFF, 0x83, 0xE0, 0xA2, 0xE1, 0x92, 0x25,
-0x30, 0x25, 0x03, 0x30, 0x24, 0xEF, 0xD2, 0x24,
-0x30, 0x2D, 0x05, 0x75, 0x0A, 0x01, 0x80, 0x03,
-0x75, 0x0A, 0x08, 0x20, 0x23, 0x10, 0x90, 0xFF,
-0x83, 0xE0, 0xA2, 0xE1, 0x92, 0x25, 0x30, 0x25,
-0x03, 0x30, 0x24, 0xEF, 0xD2, 0x24, 0x30, 0x24,
-0x02, 0xC3, 0x22, 0xC2, 0x24, 0xC2, 0x23, 0x90,
-0xEA, 0x4B, 0xE0, 0x30, 0xE1, 0x0B, 0xC2, 0x25,
-0x90, 0xFF, 0x85, 0xE0, 0x54, 0xFD, 0xF0, 0xC3,
-0x22, 0x20, 0xE2, 0x03, 0x02, 0xE2, 0x3E, 0x79,
-0x0F, 0x90, 0xFE, 0x46, 0xE0, 0x54, 0xF0, 0x49,
-0xF0, 0x75, 0x0B, 0x00, 0xE5, 0x0B, 0xC3, 0x95,
-0x0A, 0x50, 0x43, 0x90, 0xFF, 0x09, 0x30, 0x25,
-0x0B, 0xE0, 0x30, 0xE1, 0xF9, 0x90, 0xFF, 0x09,
-0xF0, 0xD3, 0x80, 0x01, 0xC3, 0x50, 0x0F, 0xAF,
-0x0B, 0x7C, 0xF0, 0x7D, 0x00, 0xAB, 0x4D, 0xAA,
-0x4C, 0x12, 0x2F, 0xBF, 0x40, 0x0F, 0x90, 0xFF,
-0x09, 0xE0, 0x30, 0xE1, 0x06, 0x90, 0xFF, 0x23,
-0x74, 0x80, 0xF0, 0xC3, 0x22, 0x90, 0xFF, 0x09,
-0xE0, 0x30, 0xE1, 0x06, 0x90, 0xFF, 0x23, 0x74,
-0x80, 0xF0, 0x05, 0x0B, 0x80, 0xB6, 0x20, 0x1D,
-0x10, 0x90, 0xFF, 0x83, 0xE0, 0xA2, 0xE1, 0x92,
-0x25, 0x30, 0x25, 0x03, 0x30, 0x24, 0xEF, 0xD2,
-0x24, 0x30, 0x24, 0x13, 0x75, 0x3F, 0x00, 0xC3,
-0xE5, 0x09, 0x33, 0xF5, 0x3E, 0xE5, 0x08, 0x33,
-0xF5, 0x3D, 0x75, 0x3C, 0x00, 0xC3, 0x22, 0xE5,
-0x09, 0x24, 0xFF, 0xF5, 0x09, 0xE5, 0x08, 0x34,
-0xFF, 0xF5, 0x08, 0x02, 0xE1, 0x7B, 0x90, 0xEA,
-0x4B, 0xE0, 0x20, 0xE0, 0x03, 0x02, 0xE1, 0x7B,
-0xE4, 0xF5, 0x3F, 0xF5, 0x3E, 0xF5, 0x3D, 0xF5,
-0x3C, 0xD3, 0x22, 0x90, 0xFE, 0x4C, 0xE0, 0xF0,
-0x90, 0xFE, 0x4D, 0xE0, 0xF0, 0xC2, 0x24, 0xC2,
-0x23, 0xC2, 0x1D, 0x90, 0xFE, 0x50, 0xE8, 0xF0,
-0x90, 0xFE, 0x40, 0xE9, 0xF0, 0x90, 0xFE, 0x40,
-0xEA, 0xF0, 0x90, 0xFE, 0x40, 0xEB, 0xF0, 0x90,
-0xEB, 0x2A, 0xE0, 0x90, 0xFE, 0x40, 0xF0, 0x90,
-0xEB, 0x2B, 0xE0, 0x90, 0xFE, 0x40, 0xF0, 0x90,
-0xEB, 0x2C, 0xE0, 0x90, 0xFE, 0x40, 0xF0, 0x90,
-0xEB, 0x2D, 0xE0, 0x90, 0xFE, 0x40, 0xF0, 0x90,
-0xFE, 0x44, 0x74, 0x01, 0xF0, 0x22, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x4D, 0x53, 0x50, 0x2D, 0x52, 0x57, 0x20, 0x20,
-0x20, 0x20, 0x20, 0x31, 0x30, 0x30, 0x30, 0x30 };
-
-static BYTE MS_Rdwr[] = {
-0x90, 0xF0, 0x10, 0xE0, 0x90, 0xEA, 0x46, 0xF0,
-0xB4, 0x02, 0x02, 0x80, 0x36, 0x90, 0xF0, 0x11,
-0xE0, 0xF5, 0x17, 0x90, 0xF0, 0x12, 0xE0, 0xF5,
-0x18, 0x90, 0xF0, 0x13, 0xE0, 0xF5, 0x19, 0x90,
-0xF0, 0x14, 0xE0, 0xF5, 0x1B, 0x90, 0xF0, 0x15,
-0xE0, 0xF5, 0x1C, 0x90, 0xF0, 0x16, 0xE0, 0xF5,
-0x1D, 0x90, 0xF0, 0x17, 0xE0, 0xF5, 0x1E, 0x90,
-0xF0, 0x18, 0xE0, 0xF5, 0x1F, 0x90, 0xF0, 0x19,
-0xE0, 0xF5, 0x10, 0x90, 0xFF, 0x09, 0xE0, 0x30,
-0xE1, 0x06, 0x90, 0xFF, 0x23, 0x74, 0x80, 0xF0,
-0x90, 0xFF, 0x09, 0xE0, 0x30, 0xE5, 0xFC, 0x90,
-0xFF, 0x83, 0xE0, 0xA2, 0xE1, 0x92, 0x25, 0x40,
-0x01, 0x22, 0x90, 0xEA, 0x46, 0xE0, 0xB4, 0x02,
-0x02, 0x80, 0x2B, 0xB4, 0x03, 0x03, 0x02, 0xE0,
-0x96, 0xB4, 0x04, 0x05, 0xD2, 0x21, 0x02, 0xE2,
-0xBC, 0xB4, 0x08, 0x0E, 0x85, 0x1C, 0x11, 0x85,
-0x1D, 0x12, 0x85, 0x10, 0x1B, 0xC2, 0x21, 0x02,
-0xE2, 0xBC, 0xB4, 0x06, 0x03, 0x02, 0xE2, 0x2F,
-0xB4, 0x05, 0x03, 0x02, 0xE2, 0x7A, 0x20, 0x1F,
-0x02, 0xC3, 0x22, 0x90, 0xEA, 0x46, 0xE0, 0xB4,
-0x03, 0x03, 0x02, 0xE1, 0x94, 0xC3, 0xE5, 0x3D,
-0x13, 0xF5, 0x14, 0xE5, 0x3E, 0x13, 0xF5, 0x15,
-0x90, 0xEB, 0x2A, 0xE0, 0xFC, 0x90, 0xEB, 0x2B,
-0xE0, 0xFD, 0x90, 0xEB, 0x2C, 0xE0, 0xFE, 0x90,
-0xEB, 0x2D, 0xE0, 0xFF, 0x90, 0xEA, 0x47, 0xE0,
-0x14, 0xFB, 0x60, 0x12, 0xC3, 0xEC, 0x13, 0xFC,
-0xED, 0x13, 0xFD, 0xEE, 0x13, 0xFE, 0xEF, 0x13,
-0xFF, 0xC3, 0xEB, 0x13, 0x80, 0xEB, 0x8E, 0x1E,
-0x8F, 0x1F, 0x90, 0xEB, 0x2D, 0xE0, 0xFF, 0x90,
-0xEA, 0x47, 0xE0, 0x14, 0x5F, 0xF5, 0x1B, 0xD2,
-0x1A, 0x90, 0xEA, 0x47, 0xE0, 0xC3, 0x95, 0x1B,
-0xF5, 0x16, 0xE5, 0x14, 0x70, 0x0A, 0xE5, 0x16,
-0xD3, 0x95, 0x15, 0x40, 0x03, 0x85, 0x15, 0x16,
-0xE5, 0x1E, 0xF5, 0x18, 0xE5, 0x1F, 0xF5, 0x19,
-0x75, 0x17, 0x00, 0x90, 0xEA, 0x5C, 0xE0, 0xF8,
-0x90, 0xEB, 0x6D, 0xE0, 0x65, 0x18, 0x70, 0x08,
-0xA3, 0xE0, 0x65, 0x19, 0x70, 0x03, 0x80, 0x07,
-0xA3, 0xA3, 0xD8, 0xEF, 0xC3, 0x80, 0x01, 0xD3,
-0x40, 0x4F, 0xE5, 0x16, 0x64, 0x01, 0x70, 0x07,
-0x12, 0x2F, 0x8C, 0x50, 0x41, 0x80, 0x07, 0xAB,
-0x16, 0x12, 0xE5, 0x60, 0x50, 0x38, 0xC3, 0xE5,
-0x15, 0x95, 0x16, 0xF5, 0x15, 0xE5, 0x14, 0x94,
-0x00, 0xF5, 0x14, 0xE5, 0x14, 0x45, 0x15, 0x60,
-0x17, 0x05, 0x0D, 0xE5, 0x0D, 0x70, 0x02, 0x05,
-0x0C, 0x05, 0x1F, 0xE5, 0x1F, 0x70, 0x02, 0x05,
-0x1E, 0x74, 0x00, 0xF5, 0x1B, 0x02, 0xE0, 0xF1,
-0x75, 0x3F, 0x00, 0x75, 0x3E, 0x00, 0x75, 0x3D,
-0x00, 0x75, 0x3C, 0x00, 0xD3, 0x22, 0x12, 0x2F,
-0x9E, 0x75, 0x3F, 0x00, 0xC3, 0xE5, 0x15, 0x33,
-0xF5, 0x3E, 0xE5, 0x14, 0x33, 0xF5, 0x3D, 0x75,
-0x3C, 0x00, 0xC3, 0x22, 0xE5, 0x1C, 0x70, 0x03,
-0x75, 0x1C, 0x01, 0xC3, 0x94, 0x80, 0x40, 0x03,
-0x75, 0x1C, 0x80, 0xAA, 0x1C, 0xAD, 0x1B, 0x90,
-0xF4, 0x00, 0xC0, 0x83, 0xC0, 0x82, 0xEA, 0x60,
-0x5F, 0xAE, 0x18, 0xAF, 0x19, 0xE4, 0x90, 0xFE,
-0x48, 0xF0, 0x90, 0xFE, 0x49, 0xF0, 0x12, 0x2F,
-0x8F, 0x90, 0xFE, 0x48, 0x74, 0x7F, 0xF0, 0x90,
-0xFE, 0x49, 0x74, 0x9F, 0xF0, 0x90, 0xEB, 0xDD,
-0xE0, 0xD0, 0x82, 0xD0, 0x83, 0xF0, 0xA3, 0xC0,
-0x83, 0xC0, 0x82, 0x90, 0xEB, 0xDE, 0xE0, 0xD0,
-0x82, 0xD0, 0x83, 0xF0, 0xA3, 0xC0, 0x83, 0xC0,
-0x82, 0x90, 0xEB, 0xDF, 0xE0, 0xD0, 0x82, 0xD0,
-0x83, 0xF0, 0xA3, 0xC0, 0x83, 0xC0, 0x82, 0x90,
-0xEB, 0xE0, 0xE0, 0xD0, 0x82, 0xD0, 0x83, 0xF0,
-0xA3, 0xC0, 0x83, 0xC0, 0x82, 0x1A, 0x05, 0x19,
-0xE5, 0x19, 0x70, 0x02, 0x05, 0x18, 0x80, 0x9E,
-0xD0, 0x82, 0xD0, 0x83, 0xE5, 0x1C, 0x25, 0xE0,
-0xFF, 0x74, 0x00, 0x33, 0xFE, 0xEF, 0x25, 0xE0,
-0xFF, 0xEE, 0x33, 0xFE, 0x90, 0xFF, 0x2A, 0xEE,
-0xF0, 0xA3, 0xEF, 0xF0, 0x02, 0xE1, 0x70, 0x20,
-0x1F, 0x02, 0xC3, 0x22, 0x30, 0x1E, 0x02, 0x80,
-0xF9, 0xD2, 0x1A, 0x75, 0x17, 0x00, 0x75, 0x3F,
-0x00, 0x75, 0x3E, 0x00, 0x75, 0x3D, 0x00, 0x75,
-0x3C, 0x00, 0x90, 0xEA, 0x5C, 0xE0, 0xF8, 0x90,
-0xEB, 0x6D, 0xE0, 0x65, 0x18, 0x70, 0x08, 0xA3,
-0xE0, 0x65, 0x19, 0x70, 0x03, 0x80, 0x07, 0xA3,
-0xA3, 0xD8, 0xEF, 0xC3, 0x80, 0x01, 0xD3, 0x40,
-0x0E, 0x75, 0x1C, 0xF8, 0x75, 0x1D, 0xFF, 0x12,
-0xE7, 0x77, 0x40, 0x05, 0x12, 0x2F, 0x9E, 0xC3,
-0x22, 0x22, 0x20, 0x1F, 0x02, 0xC3, 0x22, 0x30,
-0x1E, 0x02, 0x80, 0xF9, 0xD2, 0x1A, 0x75, 0x3F,
-0x00, 0x75, 0x3E, 0x00, 0x75, 0x3D, 0x00, 0x75,
-0x3C, 0x00, 0x90, 0xEA, 0x5C, 0xE0, 0xF8, 0x90,
-0xEB, 0x6D, 0xE0, 0x65, 0x18, 0x70, 0x08, 0xA3,
-0xE0, 0x65, 0x19, 0x70, 0x03, 0x80, 0x07, 0xA3,
-0xA3, 0xD8, 0xEF, 0xC3, 0x80, 0x01, 0xD3, 0x40,
-0x08, 0x12, 0xE6, 0x6F, 0x40, 0x05, 0x12, 0x2F,
-0x9E, 0xC3, 0x22, 0x22, 0x20, 0x1F, 0x02, 0xC3,
-0x22, 0x30, 0x1E, 0x02, 0x80, 0xF9, 0xC3, 0xE5,
-0x3D, 0x13, 0xF5, 0x14, 0xE5, 0x3E, 0x13, 0xF5,
-0x15, 0x30, 0x21, 0x39, 0x90, 0xEB, 0x2A, 0xE0,
-0xFC, 0xA3, 0xE0, 0xFD, 0xA3, 0xE0, 0xFE, 0xA3,
-0xE0, 0xFF, 0x90, 0xEA, 0x47, 0xE0, 0x14, 0xFB,
-0x60, 0x12, 0xC3, 0xEC, 0x13, 0xFC, 0xED, 0x13,
-0xFD, 0xEE, 0x13, 0xFE, 0xEF, 0x13, 0xFF, 0xC3,
-0xEB, 0x13, 0x80, 0xEB, 0x8E, 0x18, 0x8F, 0x19,
-0x90, 0xEB, 0x2D, 0xE0, 0xFF, 0x90, 0xEA, 0x47,
-0xE0, 0x14, 0x5F, 0xF5, 0x1B, 0xD2, 0x1C, 0xC3,
-0x90, 0xEA, 0x47, 0xE0, 0x95, 0x1B, 0xF5, 0x16,
-0xE5, 0x14, 0x70, 0x0A, 0xD3, 0xE5, 0x16, 0x95,
-0x15, 0x40, 0x03, 0x85, 0x15, 0x16, 0x90, 0xEA,
-0x5C, 0xE0, 0xF8, 0x90, 0xEB, 0x6D, 0xE0, 0x65,
-0x18, 0x70, 0x08, 0xA3, 0xE0, 0x65, 0x19, 0x70,
-0x03, 0x80, 0x07, 0xA3, 0xA3, 0xD8, 0xEF, 0xC3,
-0x80, 0x01, 0xD3, 0x50, 0x03, 0x02, 0xE4, 0x34,
-0x20, 0x21, 0x2F, 0xC2, 0x42, 0x75, 0x10, 0x00,
-0xE5, 0x10, 0x65, 0x1B, 0x70, 0x03, 0x02, 0xE3,
-0x7A, 0x12, 0x2F, 0x89, 0x40, 0x03, 0x02, 0xE4,
-0x31, 0xE5, 0x10, 0x70, 0x11, 0xC0, 0x1C, 0xC0,
-0x1B, 0x75, 0x1B, 0x00, 0x75, 0x1C, 0xEF, 0x12,
-0x2F, 0x95, 0xD0, 0x1B, 0xD0, 0x1C, 0x05, 0x10,
-0x80, 0xD6, 0x75, 0x17, 0x00, 0x30, 0x21, 0x06,
-0xC0, 0x18, 0xC0, 0x19, 0x80, 0x10, 0x75, 0x1C,
-0xF8, 0x75, 0x1D, 0xFF, 0xC0, 0x18, 0xC0, 0x19,
-0x85, 0x11, 0x18, 0x85, 0x12, 0x19, 0xE5, 0x16,
-0xB4, 0x01, 0x0C, 0x12, 0xE5, 0x11, 0x40, 0x13,
-0xD0, 0x19, 0xD0, 0x18, 0x02, 0xE4, 0x31, 0x12,
-0x2F, 0x92, 0x40, 0x07, 0xD0, 0x19, 0xD0, 0x18,
-0x02, 0xE4, 0x31, 0xD0, 0x19, 0xD0, 0x18, 0xE5,
-0x10, 0x25, 0x16, 0xF5, 0x10, 0x20, 0x21, 0x3A,
-0x90, 0xEA, 0x47, 0xE0, 0x65, 0x10, 0x60, 0x0C,
-0x12, 0x2F, 0x89, 0x40, 0x03, 0x02, 0xE4, 0x31,
-0x05, 0x10, 0x80, 0xEC, 0x20, 0x42, 0x05, 0x12,
-0xE7, 0x77, 0x80, 0x09, 0x75, 0x1B, 0x00, 0x75,
-0x1C, 0x7F, 0x12, 0x2F, 0x95, 0x75, 0x17, 0x00,
-0x85, 0x11, 0x18, 0x85, 0x12, 0x19, 0x75, 0x1B,
-0x00, 0x75, 0x1C, 0xF8, 0x75, 0x1D, 0xFF, 0x12,
-0xE6, 0x6F, 0xC3, 0xE5, 0x15, 0x95, 0x16, 0xF5,
-0x15, 0xE5, 0x14, 0x94, 0x00, 0xF5, 0x14, 0xE5,
-0x15, 0x45, 0x14, 0x60, 0x16, 0x05, 0x19, 0xE5,
-0x19, 0x70, 0x02, 0x05, 0x18, 0x05, 0x0D, 0xE5,
-0x0D, 0x70, 0x02, 0x05, 0x0C, 0x75, 0x1B, 0x00,
-0x02, 0xE3, 0x0F, 0x75, 0x3F, 0x00, 0x75, 0x3E,
-0x00, 0x75, 0x3D, 0x00, 0x75, 0x3C, 0x00, 0xD3,
-0x22, 0x12, 0x2F, 0x9E, 0x90, 0xFF, 0x09, 0xE0,
-0x30, 0xE1, 0x06, 0x90, 0xFF, 0x23, 0x74, 0x80,
-0xF0, 0x75, 0x3F, 0x00, 0xC3, 0xE5, 0x15, 0x33,
-0xF5, 0x3E, 0xE5, 0x14, 0x33, 0xF5, 0x3D, 0x75,
-0x3C, 0x00, 0xC3, 0x22, 0x75, 0x1A, 0x20, 0x12,
-0x2F, 0xA4, 0x40, 0x03, 0x02, 0xE5, 0x0F, 0x79,
-0x0F, 0x90, 0xFE, 0x46, 0xE0, 0x54, 0xF0, 0x49,
-0xF0, 0x78, 0xD2, 0x12, 0x2F, 0xAA, 0x30, 0x1C,
-0x5A, 0x30, 0x2D, 0x05, 0x75, 0x16, 0x01, 0x80,
-0x03, 0x75, 0x16, 0x08, 0x75, 0x08, 0x00, 0xE5,
-0x08, 0x65, 0x16, 0x70, 0x02, 0x80, 0x55, 0x90,
-0xFF, 0x09, 0x30, 0x25, 0x0B, 0xE0, 0x30, 0xE1,
-0xF9, 0x90, 0xFF, 0x09, 0xF0, 0xD3, 0x80, 0x01,
-0xC3, 0x50, 0x0F, 0xAF, 0x08, 0x7C, 0xF0, 0x7D,
-0x00, 0xAB, 0x4D, 0xAA, 0x4C, 0x12, 0x2F, 0xBF,
-0x40, 0x10, 0x90, 0xFF, 0x09, 0xE0, 0x30, 0xE1,
-0x06, 0x90, 0xFF, 0x23, 0x74, 0x80, 0xF0, 0x02,
-0xE5, 0x0A, 0x90, 0xFF, 0x09, 0xE0, 0x30, 0xE1,
-0x06, 0x90, 0xFF, 0x23, 0x74, 0x80, 0xF0, 0x05,
-0x08, 0x80, 0xB4, 0x7C, 0xF0, 0x7D, 0x00, 0x7B,
-0x00, 0x7A, 0x02, 0x7F, 0x00, 0x12, 0x2F, 0xBF,
-0x40, 0x02, 0x80, 0x2E, 0x20, 0x1D, 0x08, 0x30,
-0x25, 0x03, 0x30, 0x24, 0xF7, 0xD2, 0x24, 0x30,
-0x24, 0x02, 0xC3, 0x22, 0x79, 0x55, 0x7A, 0x01,
-0x12, 0x2F, 0xAD, 0x40, 0x02, 0x80, 0x18, 0x12,
-0x2F, 0xB0, 0x30, 0x24, 0x02, 0xC3, 0x22, 0xEF,
-0x54, 0xC1, 0x64, 0x80, 0x60, 0x02, 0x80, 0x02,
-0xD3, 0x22, 0x79, 0xC3, 0x12, 0x2F, 0x9B, 0xC3,
-0x22, 0xC0, 0x16, 0x30, 0x1E, 0x03, 0x02, 0xE5,
-0x5C, 0x75, 0x09, 0x00, 0x7C, 0x08, 0x30, 0x2D,
-0x02, 0x7C, 0x20, 0x20, 0x25, 0x03, 0x02, 0xE5,
-0x5C, 0xC0, 0x04, 0x12, 0xE4, 0x54, 0xD0, 0x04,
-0x50, 0x04, 0xD0, 0x16, 0xD3, 0x22, 0xA9, 0x09,
-0xE9, 0x54, 0x07, 0x60, 0x0C, 0x90, 0xFE, 0x4C,
-0xE0, 0xF0, 0x90, 0xFE, 0x4D, 0xE0, 0xF0, 0x80,
-0x09, 0x20, 0x25, 0x03, 0x02, 0xE5, 0x5C, 0x12,
-0x2F, 0xB3, 0x05, 0x09, 0xE5, 0x09, 0x6C, 0x60,
-0x03, 0x02, 0xE5, 0x23, 0xD0, 0x16, 0xC3, 0x22,
-0xC0, 0x03, 0x75, 0x1A, 0x00, 0x12, 0x2F, 0xB6,
-0x40, 0x04, 0xD0, 0x03, 0xC3, 0x22, 0xC2, 0x41,
-0x79, 0xAA, 0x7A, 0x00, 0x12, 0x2F, 0xAD, 0x50,
-0xF1, 0xD0, 0x03, 0x1B, 0x8B, 0x08, 0xC2, 0x40,
-0x20, 0x20, 0x08, 0x30, 0x25, 0x03, 0x30, 0x24,
-0xF7, 0xD2, 0x24, 0x30, 0x24, 0x02, 0xC3, 0x22,
-0x12, 0x2F, 0xB0, 0xC2, 0x20, 0xC2, 0x24, 0xEF,
-0x54, 0xE1, 0xFF, 0x30, 0xE0, 0x03, 0x02, 0xE6,
-0x6D, 0x20, 0xE6, 0x0F, 0x30, 0xE7, 0x02, 0xD2,
-0x40, 0x20, 0xE5, 0x19, 0x64, 0x80, 0x70, 0x03,
-0x02, 0xE6, 0x4B, 0x12, 0x2F, 0xB9, 0x40, 0x03,
-0x02, 0xE6, 0x68, 0x90, 0xEB, 0xCA, 0xE0, 0x54,
-0x15, 0x60, 0x02, 0xD2, 0x41, 0xE5, 0x08, 0x70,
-0x0E, 0x20, 0x40, 0x0B, 0x79, 0x33, 0x7A, 0x01,
-0x12, 0x2F, 0xAD, 0x40, 0x02, 0xC1, 0x6D, 0x12,
-0x2F, 0xBC, 0x40, 0x02, 0xC1, 0x6D, 0x90, 0xEB,
-0xDE, 0xE0, 0x54, 0x30, 0x64, 0x30, 0x60, 0x02,
-0xC1, 0x6D, 0x79, 0x00, 0x90, 0xFE, 0x46, 0xE0,
-0x54, 0xF0, 0x49, 0xF0, 0x79, 0x00, 0x78, 0x2D,
-0x12, 0x2F, 0xAA, 0x90, 0xFF, 0x09, 0x30, 0x25,
-0x07, 0xE0, 0x30, 0xE5, 0xF9, 0xD3, 0x80, 0x01,
-0xC3, 0x40, 0x02, 0x80, 0x5B, 0xC0, 0x01, 0x7E,
-0xF4, 0x7F, 0x00, 0x7D, 0x00, 0x7C, 0x02, 0x12,
-0x2F, 0xC2, 0xD0, 0x01, 0x40, 0x09, 0x09, 0xE9,
-0x64, 0x20, 0x70, 0xD2, 0x02, 0xE6, 0x68, 0x90,
-0xFF, 0x2A, 0x74, 0x02, 0xF0, 0xA3, 0x74, 0x00,
-0xF0, 0x20, 0x1D, 0x08, 0x30, 0x25, 0x03, 0x30,
-0x24, 0xF7, 0xD2, 0x24, 0x30, 0x24, 0x02, 0xC3,
-0x22, 0x30, 0x40, 0x02, 0x80, 0x05, 0x15, 0x08,
-0x02, 0xE5, 0x80, 0x30, 0x41, 0x16, 0x79, 0xCC,
-0x12, 0x2F, 0x9B, 0xC2, 0x1A, 0x90, 0xEA, 0x47,
-0xE0, 0x65, 0x1B, 0x60, 0x07, 0x12, 0x2F, 0x8C,
-0x05, 0x1B, 0x80, 0xF1, 0xD2, 0x1A, 0xD3, 0x22,
-0x79, 0xC3, 0x12, 0x2F, 0x9B, 0xC3, 0x22, 0xC0,
-0x08, 0x30, 0x1E, 0x02, 0x80, 0x33, 0x75, 0x1A,
-0x40, 0x75, 0x1D, 0xFF, 0x75, 0x08, 0x00, 0x20,
-0x25, 0x02, 0x80, 0x25, 0x12, 0xE6, 0xAD, 0x50,
-0x04, 0xD0, 0x08, 0xD3, 0x22, 0xA9, 0x08, 0xE9,
-0x54, 0x07, 0x60, 0x02, 0x80, 0x08, 0x20, 0x25,
-0x02, 0x80, 0x0E, 0x12, 0x2F, 0xB3, 0x05, 0x08,
-0xE5, 0x08, 0x64, 0x20, 0x60, 0x03, 0x02, 0xE6,
-0x7F, 0xD0, 0x08, 0xC3, 0x22, 0x90, 0xFE, 0x4C,
-0xE0, 0xF0, 0x90, 0xFE, 0x4D, 0xE0, 0xF0, 0xC2,
-0x1D, 0xC2, 0x24, 0x90, 0xFE, 0x50, 0x74, 0x87,
-0xF0, 0x90, 0xFE, 0x40, 0x74, 0x00, 0xF0, 0x90,
-0xFE, 0x40, 0x74, 0x00, 0xF0, 0x90, 0xFE, 0x40,
-0x74, 0x10, 0xF0, 0x90, 0xFE, 0x40, 0x74, 0x0F,
-0xF0, 0x90, 0xFE, 0x57, 0x74, 0x0F, 0xF0, 0x90,
-0xFE, 0x44, 0x74, 0x01, 0xF0, 0x20, 0x1D, 0x08,
-0x30, 0x25, 0x03, 0x30, 0x24, 0xF7, 0xD2, 0x24,
-0x30, 0x24, 0x02, 0xC3, 0x22, 0x79, 0x00, 0x90,
-0xFE, 0x46, 0xE0, 0x54, 0xF0, 0x49, 0xF0, 0x90,
-0xFE, 0x4D, 0x30, 0x25, 0x07, 0xE0, 0x30, 0xE5,
-0xF9, 0xD3, 0x80, 0x01, 0xC3, 0x40, 0x01, 0x22,
-0x78, 0xB4, 0x12, 0x2F, 0xAA, 0x90, 0xEA, 0x44,
-0xE0, 0x90, 0xFE, 0x40, 0xF0, 0x78, 0x17, 0x7D,
-0x09, 0xE6, 0x08, 0x90, 0xFE, 0x40, 0xF0, 0xDD,
-0xF8, 0x74, 0xFF, 0x90, 0xFE, 0x40, 0xF0, 0xF0,
-0xF0, 0xF0, 0xC2, 0x1D, 0xC2, 0x24, 0xF0, 0x20,
-0x1D, 0x08, 0x30, 0x25, 0x03, 0x30, 0x24, 0xF7,
-0xD2, 0x24, 0x30, 0x24, 0x02, 0xC3, 0x22, 0x90,
-0xFE, 0x4E, 0x30, 0x25, 0x07, 0xE0, 0x30, 0xE6,
-0xF9, 0xD3, 0x80, 0x01, 0xC3, 0x79, 0x55, 0x7A,
-0x01, 0x12, 0x2F, 0xAD, 0x40, 0x02, 0x80, 0x13,
-0x12, 0x2F, 0xB0, 0x30, 0x24, 0x02, 0xC3, 0x22,
-0xEF, 0x20, 0xE0, 0x07, 0x54, 0xC0, 0xB4, 0x80,
-0x02, 0x80, 0x02, 0xC3, 0x22, 0xD3, 0x22, 0x30,
-0x1E, 0x02, 0x80, 0x0A, 0x12, 0xE7, 0x88, 0x40,
-0x03, 0x02, 0xE7, 0x86, 0xD3, 0x22, 0xC3, 0x22,
-0xC0, 0x08, 0x75, 0x08, 0x00, 0x20, 0x25, 0x02,
-0x80, 0x25, 0x12, 0x2F, 0xA1, 0x50, 0x03, 0xD0,
-0x08, 0x22, 0xA9, 0x08, 0xE9, 0x54, 0x07, 0x60,
-0x02, 0x80, 0x09, 0xA2, 0x25, 0x40, 0x02, 0x80,
-0x0E, 0x12, 0x2F, 0xB3, 0x05, 0x08, 0xE5, 0x08,
-0x64, 0x20, 0x60, 0x03, 0x02, 0xE7, 0x8D, 0xD0,
-0x08, 0xC3, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-0x4D, 0x53, 0x2D, 0x52, 0x57, 0x20, 0x20, 0x20,
-0x20, 0x20, 0x20, 0x31, 0x30, 0x30, 0x30, 0x30 };
static BYTE SM_Init[] = {
0x7B, 0x09, 0x7C, 0xF0, 0x7D, 0x10, 0x7E, 0xE9,
diff --git a/drivers/staging/keucr/ms.c b/drivers/staging/keucr/ms.c
deleted file mode 100644
index 087ad73ff70..00000000000
--- a/drivers/staging/keucr/ms.c
+++ /dev/null
@@ -1,1034 +0,0 @@
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-
-#include "usb.h"
-#include "scsiglue.h"
-#include "transport.h"
-#include "ms.h"
-
-/*
- * MS_ReaderCopyBlock()
- */
-int MS_ReaderCopyBlock(struct us_data *us, WORD oldphy, WORD newphy,
- WORD PhyBlockAddr, BYTE PageNum, PBYTE buf, WORD len)
-{
- struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- int result;
-
- /* printk(KERN_INFO "MS_ReaderCopyBlock --- PhyBlockAddr = %x,
- PageNum = %x\n", PhyBlockAddr, PageNum); */
- result = ENE_LoadBinCode(us, MS_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
-
- memset(bcb, 0, sizeof(struct bulk_cb_wrap));
- bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
- bcb->DataTransferLength = 0x200*len;
- bcb->Flags = 0x00;
- bcb->CDB[0] = 0xF0;
- bcb->CDB[1] = 0x08;
- bcb->CDB[4] = (BYTE)(oldphy);
- bcb->CDB[3] = (BYTE)(oldphy>>8);
- bcb->CDB[2] = 0; /* (BYTE)(oldphy>>16) */
- bcb->CDB[7] = (BYTE)(newphy);
- bcb->CDB[6] = (BYTE)(newphy>>8);
- bcb->CDB[5] = 0; /* (BYTE)(newphy>>16) */
- bcb->CDB[9] = (BYTE)(PhyBlockAddr);
- bcb->CDB[8] = (BYTE)(PhyBlockAddr>>8);
- bcb->CDB[10] = PageNum;
-
- result = ENE_SendScsiCmd(us, FDIR_WRITE, buf, 0);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
-
- return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
- * MS_ReaderReadPage()
- */
-int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr,
- BYTE PageNum, PDWORD PageBuf, MS_LibTypeExtdat *ExtraDat)
-{
- struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- int result;
- BYTE ExtBuf[4];
- DWORD bn = PhyBlockAddr * 0x20 + PageNum;
-
- /* printk(KERN_INFO "MS --- MS_ReaderReadPage,
- PhyBlockAddr = %x, PageNum = %x\n", PhyBlockAddr, PageNum); */
-
- result = ENE_LoadBinCode(us, MS_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
-
- /* Read Page Data */
- memset(bcb, 0, sizeof(struct bulk_cb_wrap));
- bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
- bcb->DataTransferLength = 0x200;
- bcb->Flags = 0x80;
- bcb->CDB[0] = 0xF1;
- bcb->CDB[1] = 0x02;
- bcb->CDB[5] = (BYTE)(bn);
- bcb->CDB[4] = (BYTE)(bn>>8);
- bcb->CDB[3] = (BYTE)(bn>>16);
- bcb->CDB[2] = (BYTE)(bn>>24);
-
- result = ENE_SendScsiCmd(us, FDIR_READ, PageBuf, 0);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
-
- /* Read Extra Data */
- memset(bcb, 0, sizeof(struct bulk_cb_wrap));
- bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
- bcb->DataTransferLength = 0x4;
- bcb->Flags = 0x80;
- bcb->CDB[0] = 0xF1;
- bcb->CDB[1] = 0x03;
- bcb->CDB[5] = (BYTE)(PageNum);
- bcb->CDB[4] = (BYTE)(PhyBlockAddr);
- bcb->CDB[3] = (BYTE)(PhyBlockAddr>>8);
- bcb->CDB[2] = (BYTE)(PhyBlockAddr>>16);
- bcb->CDB[6] = 0x01;
-
- result = ENE_SendScsiCmd(us, FDIR_READ, &ExtBuf, 0);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
-
- ExtraDat->reserved = 0;
- ExtraDat->intr = 0x80; /* Not yet,fireware support */
- ExtraDat->status0 = 0x10; /* Not yet,fireware support */
- ExtraDat->status1 = 0x00; /* Not yet,fireware support */
- ExtraDat->ovrflg = ExtBuf[0];
- ExtraDat->mngflg = ExtBuf[1];
- ExtraDat->logadr = MemStickLogAddr(ExtBuf[2], ExtBuf[3]);
-
- return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
- * MS_ReaderEraseBlock()
- */
-int MS_ReaderEraseBlock(struct us_data *us, DWORD PhyBlockAddr)
-{
- struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- int result;
- DWORD bn = PhyBlockAddr;
-
- /* printk(KERN_INFO "MS --- MS_ReaderEraseBlock,
- PhyBlockAddr = %x\n", PhyBlockAddr); */
- result = ENE_LoadBinCode(us, MS_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
-
- memset(bcb, 0, sizeof(struct bulk_cb_wrap));
- bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
- bcb->DataTransferLength = 0x200;
- bcb->Flags = 0x80;
- bcb->CDB[0] = 0xF2;
- bcb->CDB[1] = 0x06;
- bcb->CDB[4] = (BYTE)(bn);
- bcb->CDB[3] = (BYTE)(bn>>8);
- bcb->CDB[2] = (BYTE)(bn>>16);
-
- result = ENE_SendScsiCmd(us, FDIR_READ, NULL, 0);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
-
- return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
- * MS_CardInit()
- */
-int MS_CardInit(struct us_data *us)
-{
- DWORD result = 0;
- WORD TmpBlock;
- PBYTE PageBuffer0 = NULL, PageBuffer1 = NULL;
- MS_LibTypeExtdat extdat;
- WORD btBlk1st, btBlk2nd;
- DWORD btBlk1stErred;
-
- printk(KERN_INFO "MS_CardInit start\n");
-
- MS_LibFreeAllocatedArea(us);
-
- PageBuffer0 = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
- PageBuffer1 = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
- if ((PageBuffer0 == NULL) || (PageBuffer1 == NULL)) {
- result = MS_NO_MEMORY_ERROR;
- goto exit;
- }
-
- btBlk1st = btBlk2nd = MS_LB_NOT_USED;
- btBlk1stErred = 0;
-
- for (TmpBlock = 0; TmpBlock < MS_MAX_INITIAL_ERROR_BLOCKS+2;
- TmpBlock++) {
- switch (MS_ReaderReadPage(us, TmpBlock, 0,
- (DWORD *)PageBuffer0, &extdat)) {
- case MS_STATUS_SUCCESS:
- break;
- case MS_STATUS_INT_ERROR:
- break;
- case MS_STATUS_ERROR:
- default:
- continue;
- }
-
- if ((extdat.ovrflg & MS_REG_OVR_BKST) == MS_REG_OVR_BKST_NG)
- continue;
-
- if (((extdat.mngflg & MS_REG_MNG_SYSFLG) == MS_REG_MNG_SYSFLG_USER) ||
- (be16_to_cpu(((MemStickBootBlockPage0 *)PageBuffer0)->header.wBlockID) != MS_BOOT_BLOCK_ID) ||
- (be16_to_cpu(((MemStickBootBlockPage0 *)PageBuffer0)->header.wFormatVersion) != MS_BOOT_BLOCK_FORMAT_VERSION) ||
- (((MemStickBootBlockPage0 *)PageBuffer0)->header.bNumberOfDataEntry != MS_BOOT_BLOCK_DATA_ENTRIES))
- continue;
-
- if (btBlk1st != MS_LB_NOT_USED) {
- btBlk2nd = TmpBlock;
- break;
- }
-
- btBlk1st = TmpBlock;
- memcpy(PageBuffer1, PageBuffer0, MS_BYTES_PER_PAGE);
- if (extdat.status1 &
- (MS_REG_ST1_DTER | MS_REG_ST1_EXER | MS_REG_ST1_FGER))
- btBlk1stErred = 1;
- }
-
- if (btBlk1st == MS_LB_NOT_USED) {
- result = MS_STATUS_ERROR;
- goto exit;
- }
-
- /* write protect */
- if ((extdat.status0 & MS_REG_ST0_WP) == MS_REG_ST0_WP_ON)
- MS_LibCtrlSet(us, MS_LIB_CTRL_WRPROTECT);
-
- result = MS_STATUS_ERROR;
- /* 1st Boot Block */
- if (btBlk1stErred == 0)
- result = MS_LibProcessBootBlock(us, btBlk1st, PageBuffer1);
- /* 1st */
- /* 2nd Boot Block */
- if (result && (btBlk2nd != MS_LB_NOT_USED))
- result = MS_LibProcessBootBlock(us, btBlk2nd, PageBuffer0);
-
- if (result) {
- result = MS_STATUS_ERROR;
- goto exit;
- }
-
- for (TmpBlock = 0; TmpBlock < btBlk1st; TmpBlock++)
- us->MS_Lib.Phy2LogMap[TmpBlock] = MS_LB_INITIAL_ERROR;
-
- us->MS_Lib.Phy2LogMap[btBlk1st] = MS_LB_BOOT_BLOCK;
-
- if (btBlk2nd != MS_LB_NOT_USED) {
- for (TmpBlock = btBlk1st + 1; TmpBlock < btBlk2nd; TmpBlock++)
- us->MS_Lib.Phy2LogMap[TmpBlock] = MS_LB_INITIAL_ERROR;
- us->MS_Lib.Phy2LogMap[btBlk2nd] = MS_LB_BOOT_BLOCK;
- }
-
- result = MS_LibScanLogicalBlockNumber(us, btBlk1st);
- if (result)
- goto exit;
-
- for (TmpBlock = MS_PHYSICAL_BLOCKS_PER_SEGMENT;
- TmpBlock < us->MS_Lib.NumberOfPhyBlock;
- TmpBlock += MS_PHYSICAL_BLOCKS_PER_SEGMENT) {
- if (MS_CountFreeBlock(us, TmpBlock) == 0) {
- MS_LibCtrlSet(us, MS_LIB_CTRL_WRPROTECT);
- break;
- }
- }
-
- /* write */
- if (MS_LibAllocWriteBuf(us)) {
- result = MS_NO_MEMORY_ERROR;
- goto exit;
- }
-
- result = MS_STATUS_SUCCESS;
-
-exit:
- kfree(PageBuffer1);
- kfree(PageBuffer0);
-
- printk(KERN_INFO "MS_CardInit end\n");
- return result;
-}
-
-/*
- * MS_LibCheckDisableBlock()
- */
-int MS_LibCheckDisableBlock(struct us_data *us, WORD PhyBlock)
-{
- PWORD PageBuf = NULL;
- DWORD result = MS_STATUS_SUCCESS;
- DWORD blk, index = 0;
- MS_LibTypeExtdat extdat;
-
- PageBuf = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
- if (PageBuf == NULL) {
- result = MS_NO_MEMORY_ERROR;
- goto exit;
- }
-
- MS_ReaderReadPage(us, PhyBlock, 1, (DWORD *)PageBuf, &extdat);
- do {
- blk = be16_to_cpu(PageBuf[index]);
- if (blk == MS_LB_NOT_USED)
- break;
- if (blk == us->MS_Lib.Log2PhyMap[0]) {
- result = MS_ERROR_FLASH_READ;
- break;
- }
- index++;
- } while (1);
-
-exit:
- kfree(PageBuf);
- return result;
-}
-
-/*
- * MS_LibFreeAllocatedArea()
- */
-void MS_LibFreeAllocatedArea(struct us_data *us)
-{
- MS_LibFreeWriteBuf(us);
- MS_LibFreeLogicalMap(us);
-
- us->MS_Lib.flags = 0;
- us->MS_Lib.BytesPerSector = 0;
- us->MS_Lib.SectorsPerCylinder = 0;
-
- us->MS_Lib.cardType = 0;
- us->MS_Lib.blockSize = 0;
- us->MS_Lib.PagesPerBlock = 0;
-
- us->MS_Lib.NumberOfPhyBlock = 0;
- us->MS_Lib.NumberOfLogBlock = 0;
-}
-
-/*
- * MS_LibFreeWriteBuf()
- */
-void MS_LibFreeWriteBuf(struct us_data *us)
-{
- us->MS_Lib.wrtblk = (WORD)-1; /* set to -1 */
-
- /* memset((fdoExt)->MS_Lib.pagemap, 0,
- sizeof((fdoExt)->MS_Lib.pagemap)) */
- MS_LibClearPageMap(us);
-
- if (us->MS_Lib.blkpag) {
- kfree((BYTE *)(us->MS_Lib.blkpag)); /* Arnold test ... */
- us->MS_Lib.blkpag = NULL;
- }
-
- if (us->MS_Lib.blkext) {
- kfree((BYTE *)(us->MS_Lib.blkext)); /* Arnold test ... */
- us->MS_Lib.blkext = NULL;
- }
-}
-
-/*
- * MS_LibFreeLogicalMap()
- */
-int MS_LibFreeLogicalMap(struct us_data *us)
-{
- kfree(us->MS_Lib.Phy2LogMap);
- us->MS_Lib.Phy2LogMap = NULL;
-
- kfree(us->MS_Lib.Log2PhyMap);
- us->MS_Lib.Log2PhyMap = NULL;
-
- return 0;
-}
-
-/*
- * MS_LibProcessBootBlock()
- */
-int MS_LibProcessBootBlock(struct us_data *us, WORD PhyBlock, BYTE *PageData)
-{
- MemStickBootBlockSysEnt *SysEntry;
- MemStickBootBlockSysInf *SysInfo;
- DWORD i, result;
- BYTE PageNumber;
- BYTE *PageBuffer;
- MS_LibTypeExtdat ExtraData;
-
-
- PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
- if (PageBuffer == NULL)
- return (DWORD)-1;
-
- result = (DWORD)-1;
-
- SysInfo = &(((MemStickBootBlockPage0 *)PageData)->sysinf);
-
- if ((SysInfo->bMsClass != MS_SYSINF_MSCLASS_TYPE_1) ||
- (be16_to_cpu(SysInfo->wPageSize) != MS_SYSINF_PAGE_SIZE) ||
- ((SysInfo->bSecuritySupport & MS_SYSINF_SECURITY) == MS_SYSINF_SECURITY_SUPPORT) ||
- (SysInfo->bReserved1 != MS_SYSINF_RESERVED1) ||
- (SysInfo->bReserved2 != MS_SYSINF_RESERVED2) ||
- (SysInfo->bFormatType != MS_SYSINF_FORMAT_FAT) ||
- (SysInfo->bUsage != MS_SYSINF_USAGE_GENERAL))
- goto exit;
-
- switch (us->MS_Lib.cardType = SysInfo->bCardType) {
- case MS_SYSINF_CARDTYPE_RDONLY:
- MS_LibCtrlSet(us, MS_LIB_CTRL_RDONLY);
- break;
- case MS_SYSINF_CARDTYPE_RDWR:
- MS_LibCtrlReset(us, MS_LIB_CTRL_RDONLY);
- break;
- case MS_SYSINF_CARDTYPE_HYBRID:
- default:
- goto exit;
- }
-
- us->MS_Lib.blockSize = be16_to_cpu(SysInfo->wBlockSize);
- us->MS_Lib.NumberOfPhyBlock = be16_to_cpu(SysInfo->wBlockNumber);
- us->MS_Lib.NumberOfLogBlock = be16_to_cpu(SysInfo->wTotalBlockNumber)
- -2;
- us->MS_Lib.PagesPerBlock = us->MS_Lib.blockSize * SIZE_OF_KIRO /
- MS_BYTES_PER_PAGE;
- us->MS_Lib.NumberOfSegment = us->MS_Lib.NumberOfPhyBlock /
- MS_PHYSICAL_BLOCKS_PER_SEGMENT;
- us->MS_Model = be16_to_cpu(SysInfo->wMemorySize);
-
- /*Allocate to all number of logicalblock and physicalblock */
- if (MS_LibAllocLogicalMap(us))
- goto exit;
-
- /* Mark the book block */
- MS_LibSetBootBlockMark(us, PhyBlock);
-
- SysEntry = &(((MemStickBootBlockPage0 *)PageData)->sysent);
-
- for (i = 0; i < MS_NUMBER_OF_SYSTEM_ENTRY; i++) {
- DWORD EntryOffset, EntrySize;
-
- EntryOffset = be32_to_cpu(SysEntry->entry[i].dwStart);
-
- if (EntryOffset == 0xffffff)
- continue;
- EntrySize = be32_to_cpu(SysEntry->entry[i].dwSize);
-
- if (EntrySize == 0)
- continue;
-
- if (EntryOffset + MS_BYTES_PER_PAGE + EntrySize >
- us->MS_Lib.blockSize * (DWORD)SIZE_OF_KIRO)
- continue;
-
- if (i == 0) {
- BYTE PrevPageNumber = 0;
- WORD phyblk;
-
- if (SysEntry->entry[i].bType !=
- MS_SYSENT_TYPE_INVALID_BLOCK)
- goto exit;
-
- while (EntrySize > 0) {
-
- PageNumber = (BYTE)(EntryOffset /
- MS_BYTES_PER_PAGE + 1);
- if (PageNumber != PrevPageNumber) {
- switch (MS_ReaderReadPage(us, PhyBlock,
- PageNumber, (DWORD *)PageBuffer,
- &ExtraData)) {
- case MS_STATUS_SUCCESS:
- break;
- case MS_STATUS_WRITE_PROTECT:
- case MS_ERROR_FLASH_READ:
- case MS_STATUS_ERROR:
- default:
- goto exit;
- }
-
- PrevPageNumber = PageNumber;
- }
-
- phyblk = be16_to_cpu(*(WORD *)(PageBuffer +
- (EntryOffset % MS_BYTES_PER_PAGE)));
- if (phyblk < 0x0fff)
- MS_LibSetInitialErrorBlock(us, phyblk);
-
- EntryOffset += 2;
- EntrySize -= 2;
- }
- } else if (i == 1) { /* CIS/IDI */
- MemStickBootBlockIDI *idi;
-
- if (SysEntry->entry[i].bType != MS_SYSENT_TYPE_CIS_IDI)
- goto exit;
-
- switch (MS_ReaderReadPage(us, PhyBlock,
- (BYTE)(EntryOffset / MS_BYTES_PER_PAGE + 1),
- (DWORD *)PageBuffer, &ExtraData)) {
- case MS_STATUS_SUCCESS:
- break;
- case MS_STATUS_WRITE_PROTECT:
- case MS_ERROR_FLASH_READ:
- case MS_STATUS_ERROR:
- default:
- goto exit;
- }
-
- idi = &((MemStickBootBlockCIS_IDI *)(PageBuffer +
- (EntryOffset % MS_BYTES_PER_PAGE)))->idi.idi;
- if (le16_to_cpu(idi->wIDIgeneralConfiguration) !=
- MS_IDI_GENERAL_CONF)
- goto exit;
-
- us->MS_Lib.BytesPerSector =
- le16_to_cpu(idi->wIDIbytesPerSector);
- if (us->MS_Lib.BytesPerSector != MS_BYTES_PER_PAGE)
- goto exit;
- }
- } /* End for .. */
-
- result = 0;
-
-exit:
- if (result)
- MS_LibFreeLogicalMap(us);
-
- kfree(PageBuffer);
-
- result = 0;
- return result;
-}
-
-/*
- * MS_LibAllocLogicalMap()
- */
-int MS_LibAllocLogicalMap(struct us_data *us)
-{
- DWORD i;
-
-
- us->MS_Lib.Phy2LogMap = kmalloc(us->MS_Lib.NumberOfPhyBlock *
- sizeof(WORD), GFP_KERNEL);
- us->MS_Lib.Log2PhyMap = kmalloc(us->MS_Lib.NumberOfLogBlock *
- sizeof(WORD), GFP_KERNEL);
-
- if ((us->MS_Lib.Phy2LogMap == NULL) ||
- (us->MS_Lib.Log2PhyMap == NULL)) {
- MS_LibFreeLogicalMap(us);
- return (DWORD)-1;
- }
-
- for (i = 0; i < us->MS_Lib.NumberOfPhyBlock; i++)
- us->MS_Lib.Phy2LogMap[i] = MS_LB_NOT_USED;
-
- for (i = 0; i < us->MS_Lib.NumberOfLogBlock; i++)
- us->MS_Lib.Log2PhyMap[i] = MS_LB_NOT_USED;
-
- return 0;
-}
-
-/*
- * MS_LibSetBootBlockMark()
- */
-int MS_LibSetBootBlockMark(struct us_data *us, WORD phyblk)
-{
- return MS_LibSetLogicalBlockMark(us, phyblk, MS_LB_BOOT_BLOCK);
-}
-
-/*
- * MS_LibSetLogicalBlockMark()
- */
-int MS_LibSetLogicalBlockMark(struct us_data *us, WORD phyblk, WORD mark)
-{
- if (phyblk >= us->MS_Lib.NumberOfPhyBlock)
- return (DWORD)-1;
-
- us->MS_Lib.Phy2LogMap[phyblk] = mark;
-
- return 0;
-}
-
-/*
- * MS_LibSetInitialErrorBlock()
- */
-int MS_LibSetInitialErrorBlock(struct us_data *us, WORD phyblk)
-{
- return MS_LibSetLogicalBlockMark(us, phyblk, MS_LB_INITIAL_ERROR);
-}
-
-/*
- * MS_LibScanLogicalBlockNumber()
- */
-int MS_LibScanLogicalBlockNumber(struct us_data *us, WORD btBlk1st)
-{
- WORD PhyBlock, newblk, i;
- WORD LogStart, LogEnde;
- MS_LibTypeExtdat extdat;
- BYTE buf[0x200];
- DWORD count = 0, index = 0;
-
- for (PhyBlock = 0; PhyBlock < us->MS_Lib.NumberOfPhyBlock;) {
- MS_LibPhy2LogRange(PhyBlock, &LogStart, &LogEnde);
-
- for (i = 0; i < MS_PHYSICAL_BLOCKS_PER_SEGMENT;
- i++, PhyBlock++) {
- switch (MS_LibConv2Logical(us, PhyBlock)) {
- case MS_STATUS_ERROR:
- continue;
- default:
- break;
- }
-
- if (count == PhyBlock) {
- MS_LibReadExtraBlock(us, PhyBlock,
- 0, 0x80, &buf);
- count += 0x80;
- }
- index = (PhyBlock % 0x80) * 4;
-
- extdat.ovrflg = buf[index];
- extdat.mngflg = buf[index+1];
- extdat.logadr = MemStickLogAddr(buf[index+2],
- buf[index+3]);
-
- if ((extdat.ovrflg & MS_REG_OVR_BKST) !=
- MS_REG_OVR_BKST_OK) {
- MS_LibSetAcquiredErrorBlock(us, PhyBlock);
- continue;
- }
-
- if ((extdat.mngflg & MS_REG_MNG_ATFLG) ==
- MS_REG_MNG_ATFLG_ATTBL) {
- MS_LibErasePhyBlock(us, PhyBlock);
- continue;
- }
-
- if (extdat.logadr != MS_LB_NOT_USED) {
- if ((extdat.logadr < LogStart) ||
- (LogEnde <= extdat.logadr)) {
- MS_LibErasePhyBlock(us, PhyBlock);
- continue;
- }
-
- newblk = MS_LibConv2Physical(us, extdat.logadr);
-
- if (newblk != MS_LB_NOT_USED) {
- if (extdat.logadr == 0) {
- MS_LibSetLogicalPair(us,
- extdat.logadr,
- PhyBlock);
- if (MS_LibCheckDisableBlock(us,
- btBlk1st)) {
- MS_LibSetLogicalPair(us,
- extdat.logadr, newblk);
- continue;
- }
- }
-
- MS_LibReadExtra(us, newblk, 0, &extdat);
- if ((extdat.ovrflg & MS_REG_OVR_UDST) ==
- MS_REG_OVR_UDST_UPDATING) {
- MS_LibErasePhyBlock(us,
- PhyBlock);
- continue;
- } else {
- MS_LibErasePhyBlock(us, newblk);
- }
- }
-
- MS_LibSetLogicalPair(us, extdat.logadr,
- PhyBlock);
- }
- }
- } /* End for ... */
-
- return MS_STATUS_SUCCESS;
-}
-
-/*
- * MS_LibAllocWriteBuf()
- */
-int MS_LibAllocWriteBuf(struct us_data *us)
-{
- us->MS_Lib.wrtblk = (WORD)-1;
-
- us->MS_Lib.blkpag = kmalloc(us->MS_Lib.PagesPerBlock *
- us->MS_Lib.BytesPerSector, GFP_KERNEL);
- us->MS_Lib.blkext = kmalloc(us->MS_Lib.PagesPerBlock *
- sizeof(MS_LibTypeExtdat), GFP_KERNEL);
-
- if ((us->MS_Lib.blkpag == NULL) || (us->MS_Lib.blkext == NULL)) {
- MS_LibFreeWriteBuf(us);
- return (DWORD)-1;
- }
-
- MS_LibClearWriteBuf(us);
-
- return 0;
-}
-
-/*
- * MS_LibClearWriteBuf()
- */
-void MS_LibClearWriteBuf(struct us_data *us)
-{
- int i;
-
- us->MS_Lib.wrtblk = (WORD)-1;
- MS_LibClearPageMap(us);
-
- if (us->MS_Lib.blkpag)
- memset(us->MS_Lib.blkpag, 0xff,
- us->MS_Lib.PagesPerBlock * us->MS_Lib.BytesPerSector);
-
- if (us->MS_Lib.blkext) {
- for (i = 0; i < us->MS_Lib.PagesPerBlock; i++) {
- us->MS_Lib.blkext[i].status1 = MS_REG_ST1_DEFAULT;
- us->MS_Lib.blkext[i].ovrflg = MS_REG_OVR_DEFAULT;
- us->MS_Lib.blkext[i].mngflg = MS_REG_MNG_DEFAULT;
- us->MS_Lib.blkext[i].logadr = MS_LB_NOT_USED;
- }
- }
-}
-
-/*
- * MS_LibPhy2LogRange()
- */
-void MS_LibPhy2LogRange(WORD PhyBlock, WORD *LogStart, WORD *LogEnde)
-{
- PhyBlock /= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
-
- if (PhyBlock) {
- *LogStart = MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT +
- (PhyBlock - 1) * MS_LOGICAL_BLOCKS_PER_SEGMENT;/*496*/
- *LogEnde = *LogStart + MS_LOGICAL_BLOCKS_PER_SEGMENT;/*496*/
- } else {
- *LogStart = 0;
- *LogEnde = MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT;/*494*/
- }
-}
-
-/*
- * MS_LibReadExtraBlock()
- */
-int MS_LibReadExtraBlock(struct us_data *us, DWORD PhyBlock,
- BYTE PageNum, BYTE blen, void *buf)
-{
- struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- int result;
-
- /* printk("MS_LibReadExtraBlock --- PhyBlock = %x,
- PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen); */
-
- /* Read Extra Data */
- memset(bcb, 0, sizeof(struct bulk_cb_wrap));
- bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
- bcb->DataTransferLength = 0x4 * blen;
- bcb->Flags = 0x80;
- bcb->CDB[0] = 0xF1;
- bcb->CDB[1] = 0x03;
- bcb->CDB[5] = (BYTE)(PageNum);
- bcb->CDB[4] = (BYTE)(PhyBlock);
- bcb->CDB[3] = (BYTE)(PhyBlock>>8);
- bcb->CDB[2] = (BYTE)(PhyBlock>>16);
- bcb->CDB[6] = blen;
-
- result = ENE_SendScsiCmd(us, FDIR_READ, buf, 0);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
-
- return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
- * MS_LibReadExtra()
- */
-int MS_LibReadExtra(struct us_data *us, DWORD PhyBlock,
- BYTE PageNum, MS_LibTypeExtdat *ExtraDat)
-{
- struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- int result;
- BYTE ExtBuf[4];
-
- /* printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n"
- , PhyBlock, PageNum); */
- memset(bcb, 0, sizeof(struct bulk_cb_wrap));
- bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
- bcb->DataTransferLength = 0x4;
- bcb->Flags = 0x80;
- bcb->CDB[0] = 0xF1;
- bcb->CDB[1] = 0x03;
- bcb->CDB[5] = (BYTE)(PageNum);
- bcb->CDB[4] = (BYTE)(PhyBlock);
- bcb->CDB[3] = (BYTE)(PhyBlock>>8);
- bcb->CDB[2] = (BYTE)(PhyBlock>>16);
- bcb->CDB[6] = 0x01;
-
- result = ENE_SendScsiCmd(us, FDIR_READ, &ExtBuf, 0);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
-
- ExtraDat->reserved = 0;
- ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */
- ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */
- ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */
- ExtraDat->ovrflg = ExtBuf[0];
- ExtraDat->mngflg = ExtBuf[1];
- ExtraDat->logadr = MemStickLogAddr(ExtBuf[2], ExtBuf[3]);
-
- return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
- * MS_LibSetAcquiredErrorBlock()
- */
-int MS_LibSetAcquiredErrorBlock(struct us_data *us, WORD phyblk)
-{
- WORD log;
-
- if (phyblk >= us->MS_Lib.NumberOfPhyBlock)
- return (DWORD)-1;
-
- log = us->MS_Lib.Phy2LogMap[phyblk];
-
- if (log < us->MS_Lib.NumberOfLogBlock)
- us->MS_Lib.Log2PhyMap[log] = MS_LB_NOT_USED;
-
- if (us->MS_Lib.Phy2LogMap[phyblk] != MS_LB_INITIAL_ERROR)
- us->MS_Lib.Phy2LogMap[phyblk] = MS_LB_ACQUIRED_ERROR;
-
- return 0;
-}
-
-/*
- * MS_LibErasePhyBlock()
- */
-int MS_LibErasePhyBlock(struct us_data *us, WORD phyblk)
-{
- WORD log;
-
- if (phyblk >= us->MS_Lib.NumberOfPhyBlock)
- return MS_STATUS_ERROR;
-
- log = us->MS_Lib.Phy2LogMap[phyblk];
-
- if (log < us->MS_Lib.NumberOfLogBlock)
- us->MS_Lib.Log2PhyMap[log] = MS_LB_NOT_USED;
-
- us->MS_Lib.Phy2LogMap[phyblk] = MS_LB_NOT_USED;
-
- if (MS_LibIsWritable(us)) {
- switch (MS_ReaderEraseBlock(us, phyblk)) {
- case MS_STATUS_SUCCESS:
- us->MS_Lib.Phy2LogMap[phyblk] = MS_LB_NOT_USED_ERASED;
- return MS_STATUS_SUCCESS;
- case MS_ERROR_FLASH_ERASE:
- case MS_STATUS_INT_ERROR:
- MS_LibErrorPhyBlock(us, phyblk);
- return MS_ERROR_FLASH_ERASE;
- case MS_STATUS_ERROR:
- default:
- MS_LibCtrlSet(us, MS_LIB_CTRL_RDONLY);
- MS_LibSetAcquiredErrorBlock(us, phyblk);
- return MS_STATUS_ERROR;
- }
- }
-
- MS_LibSetAcquiredErrorBlock(us, phyblk);
-
- return MS_STATUS_SUCCESS;
-}
-
-/*
- * MS_LibErrorPhyBlock()
- */
-int MS_LibErrorPhyBlock(struct us_data *us, WORD phyblk)
-{
- if (phyblk >= us->MS_Lib.NumberOfPhyBlock)
- return MS_STATUS_ERROR;
-
- MS_LibSetAcquiredErrorBlock(us, phyblk);
-
- if (MS_LibIsWritable(us))
- return MS_LibOverwriteExtra(us, phyblk, 0,
- (BYTE)(~MS_REG_OVR_BKST & BYTE_MASK));
-
-
- return MS_STATUS_SUCCESS;
-}
-
-/*
- * MS_LibOverwriteExtra()
- */
-int MS_LibOverwriteExtra(struct us_data *us, DWORD PhyBlockAddr,
- BYTE PageNum, BYTE OverwriteFlag)
-{
- struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- int result;
-
- /* printk("MS --- MS_LibOverwriteExtra, \
- PhyBlockAddr = %x, PageNum = %x\n", PhyBlockAddr, PageNum); */
- result = ENE_LoadBinCode(us, MS_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
-
- memset(bcb, 0, sizeof(struct bulk_cb_wrap));
- bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
- bcb->DataTransferLength = 0x4;
- bcb->Flags = 0x80;
- bcb->CDB[0] = 0xF2;
- bcb->CDB[1] = 0x05;
- bcb->CDB[5] = (BYTE)(PageNum);
- bcb->CDB[4] = (BYTE)(PhyBlockAddr);
- bcb->CDB[3] = (BYTE)(PhyBlockAddr>>8);
- bcb->CDB[2] = (BYTE)(PhyBlockAddr>>16);
- bcb->CDB[6] = OverwriteFlag;
- bcb->CDB[7] = 0xFF;
- bcb->CDB[8] = 0xFF;
- bcb->CDB[9] = 0xFF;
-
- result = ENE_SendScsiCmd(us, FDIR_READ, NULL, 0);
- if (result != USB_STOR_XFER_GOOD)
- return USB_STOR_TRANSPORT_ERROR;
-
- return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
- * MS_LibForceSetLogicalPair()
- */
-int MS_LibForceSetLogicalPair(struct us_data *us, WORD logblk, WORD phyblk)
-{
- if (logblk == MS_LB_NOT_USED)
- return 0;
-
- if ((logblk >= us->MS_Lib.NumberOfLogBlock) ||
- (phyblk >= us->MS_Lib.NumberOfPhyBlock))
- return (DWORD)-1;
-
- us->MS_Lib.Phy2LogMap[phyblk] = logblk;
- us->MS_Lib.Log2PhyMap[logblk] = phyblk;
-
- return 0;
-}
-
-/*
- * MS_LibSetLogicalPair()
- */
-int MS_LibSetLogicalPair(struct us_data *us, WORD logblk, WORD phyblk)
-{
- if ((logblk >= us->MS_Lib.NumberOfLogBlock) ||
- (phyblk >= us->MS_Lib.NumberOfPhyBlock))
- return (DWORD)-1;
-
- us->MS_Lib.Phy2LogMap[phyblk] = logblk;
- us->MS_Lib.Log2PhyMap[logblk] = phyblk;
-
- return 0;
-}
-
-/*
- * MS_CountFreeBlock()
- */
-int MS_CountFreeBlock(struct us_data *us, WORD PhyBlock)
-{
- DWORD Ende, Count;
-
- Ende = PhyBlock + MS_PHYSICAL_BLOCKS_PER_SEGMENT;
- for (Count = 0; PhyBlock < Ende; PhyBlock++) {
- switch (us->MS_Lib.Phy2LogMap[PhyBlock]) {
- case MS_LB_NOT_USED:
- case MS_LB_NOT_USED_ERASED:
- Count++;
- default:
- break;
- }
- }
-
- return Count;
-}
-
-/*
- * MS_LibSearchBlockFromPhysical()
- */
-int MS_LibSearchBlockFromPhysical(struct us_data *us, WORD phyblk)
-{
- WORD Newblk;
- WORD blk;
- MS_LibTypeExtdat extdat;
-
- if (phyblk >= us->MS_Lib.NumberOfPhyBlock)
- return MS_LB_ERROR;
-
- for (blk = phyblk + 1; blk != phyblk; blk++) {
- if ((blk & MS_PHYSICAL_BLOCKS_PER_SEGMENT_MASK) == 0)
- blk -= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
-
- Newblk = us->MS_Lib.Phy2LogMap[blk];
- if (us->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED_ERASED)
- return blk;
- else if (us->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED) {
- switch (MS_LibReadExtra(us, blk, 0, &extdat)) {
- case MS_STATUS_SUCCESS:
- case MS_STATUS_SUCCESS_WITH_ECC:
- break;
- case MS_NOCARD_ERROR:
- return MS_NOCARD_ERROR;
- case MS_STATUS_INT_ERROR:
- return MS_LB_ERROR;
- case MS_ERROR_FLASH_READ:
- default:
- MS_LibSetAcquiredErrorBlock(us, blk);
- /* MS_LibErrorPhyBlock(fdoExt, blk); */
- continue;
- } /* End switch */
-
- if ((extdat.ovrflg & MS_REG_OVR_BKST) !=
- MS_REG_OVR_BKST_OK) {
- MS_LibSetAcquiredErrorBlock(us, blk);
- continue;
- }
-
- switch (MS_LibErasePhyBlock(us, blk)) {
- case MS_STATUS_SUCCESS:
- return blk;
- case MS_STATUS_ERROR:
- return MS_LB_ERROR;
- case MS_ERROR_FLASH_ERASE:
- default:
- MS_LibErrorPhyBlock(us, blk);
- break;
- }
- }
- } /* End for */
-
- return MS_LB_ERROR;
-}
-
-/*
- * MS_LibSearchBlockFromLogical()
- */
-int MS_LibSearchBlockFromLogical(struct us_data *us, WORD logblk)
-{
- WORD phyblk;
-
- phyblk = MS_LibConv2Physical(us, logblk);
- if (phyblk >= MS_LB_ERROR) {
- if (logblk >= us->MS_Lib.NumberOfLogBlock)
- return MS_LB_ERROR;
-
- phyblk = (logblk + MS_NUMBER_OF_BOOT_BLOCK) /
- MS_LOGICAL_BLOCKS_PER_SEGMENT;
- phyblk *= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
- phyblk += MS_PHYSICAL_BLOCKS_PER_SEGMENT - 1;
- }
-
- return MS_LibSearchBlockFromPhysical(us, phyblk);
-}
diff --git a/drivers/staging/keucr/ms.h b/drivers/staging/keucr/ms.h
deleted file mode 100644
index a3da4be3f55..00000000000
--- a/drivers/staging/keucr/ms.h
+++ /dev/null
@@ -1,401 +0,0 @@
-#ifndef MS_INCD
-#define MS_INCD
-
-#include <linux/blkdev.h>
-#include "common.h"
-
-/* MemoryStick Register */
-/* Status Register 0 */
-#define MS_REG_ST0_MB 0x80 /* media busy */
-#define MS_REG_ST0_FB0 0x40 /* flush busy 0 */
-#define MS_REG_ST0_BE 0x20 /* buffer empty */
-#define MS_REG_ST0_BF 0x10 /* buffer full */
-#define MS_REG_ST0_SL 0x02 /* sleep */
-#define MS_REG_ST0_WP 0x01 /* write protected */
-#define MS_REG_ST0_WP_ON MS_REG_ST0_WP
-#define MS_REG_ST0_WP_OFF 0x00
-
-/* Status Register 1 */
-#define MS_REG_ST1_MB 0x80 /* media busy */
-#define MS_REG_ST1_FB1 0x40 /* flush busy 1 */
-#define MS_REG_ST1_DTER 0x20 /* error on data(corrected) */
-#define MS_REG_ST1_UCDT 0x10 /* unable to correct data */
-#define MS_REG_ST1_EXER 0x08 /* error on extra(corrected) */
-#define MS_REG_ST1_UCEX 0x04 /* unable to correct extra */
-#define MS_REG_ST1_FGER 0x02 /* error on overwrite flag(corrected) */
-#define MS_REG_ST1_UCFG 0x01 /* unable to correct overwrite flag */
-#define MS_REG_ST1_DEFAULT (MS_REG_ST1_MB | MS_REG_ST1_FB1 | \
- MS_REG_ST1_DTER | MS_REG_ST1_UCDT | \
- MS_REG_ST1_EXER | MS_REG_ST1_UCEX | \
- MS_REG_ST1_FGER | MS_REG_ST1_UCFG)
-
-/* System Parameter */
-#define MS_REG_SYSPAR_BAMD 0x80 /* block address mode */
-#define MS_REG_SYSPAR_BAND_LINEAR MS_REG_SYSPAR_BAMD /* linear mode */
-#define MS_REG_SYSPAR_BAND_CHIP 0x00 /* chip mode */
-#define MS_REG_SYSPAR_ATEN 0x40 /* attribute ROM enable */
-#define MS_REG_SYSPAR_ATEN_ENABLE MS_REG_SYSPAR_ATEN /* enable */
-#define MS_REG_SYSPAR_ATEN_DISABLE 0x00 /* disable */
-#define MS_REG_SYSPAR_RESERVED 0x2f
-
-/* Command Parameter */
-#define MS_REG_CMDPAR_CP2 0x80
-#define MS_REG_CMDPAR_CP1 0x40
-#define MS_REG_CMDPAR_CP0 0x20
-#define MS_REG_CMDPAR_BLOCK_ACCESS 0
-#define MS_REG_CMDPAR_PAGE_ACCESS MS_REG_CMDPAR_CP0
-#define MS_REG_CMDPAR_EXTRA_DATA MS_REG_CMDPAR_CP1
-#define MS_REG_CMDPAR_OVERWRITE MS_REG_CMDPAR_CP2
-#define MS_REG_CMDPAR_RESERVED 0x1f
-
-/* Overwrite Area */
-#define MS_REG_OVR_BKST 0x80 /* block status */
-#define MS_REG_OVR_BKST_OK MS_REG_OVR_BKST /* OK */
-#define MS_REG_OVR_BKST_NG 0x00 /* NG */
-#define MS_REG_OVR_PGST0 0x40 /* page status */
-#define MS_REG_OVR_PGST1 0x20
-#define MS_REG_OVR_PGST_MASK (MS_REG_OVR_PGST0 | MS_REG_OVR_PGST1)
-#define MS_REG_OVR_PGST_OK (MS_REG_OVR_PGST0 | MS_REG_OVR_PGST1) /* OK */
-#define MS_REG_OVR_PGST_NG MS_REG_OVR_PGST1 /* NG */
-#define MS_REG_OVR_PGST_DATA_ERROR 0x00 /* data error */
-#define MS_REG_OVR_UDST 0x10 /* update status */
-#define MS_REG_OVR_UDST_UPDATING 0x00 /* updating */
-#define MS_REG_OVR_UDST_NO_UPDATE MS_REG_OVR_UDST
-#define MS_REG_OVR_RESERVED 0x08
-#define MS_REG_OVR_DEFAULT (MS_REG_OVR_BKST_OK | \
- MS_REG_OVR_PGST_OK | \
- MS_REG_OVR_UDST_NO_UPDATE | \
- MS_REG_OVR_RESERVED)
-/* Management Flag */
-#define MS_REG_MNG_SCMS0 0x20 /* serial copy management system */
-#define MS_REG_MNG_SCMS1 0x10
-#define MS_REG_MNG_SCMS_MASK (MS_REG_MNG_SCMS0 | MS_REG_MNG_SCMS1)
-#define MS_REG_MNG_SCMS_COPY_OK (MS_REG_MNG_SCMS0 | MS_REG_MNG_SCMS1)
-#define MS_REG_MNG_SCMS_ONE_COPY MS_REG_MNG_SCMS1
-#define MS_REG_MNG_SCMS_NO_COPY 0x00
-#define MS_REG_MNG_ATFLG 0x08 /* address transfer table flag */
-#define MS_REG_MNG_ATFLG_OTHER MS_REG_MNG_ATFLG /* other */
-#define MS_REG_MNG_ATFLG_ATTBL 0x00 /* address transfer table */
-#define MS_REG_MNG_SYSFLG 0x04 /* system flag */
-#define MS_REG_MNG_SYSFLG_USER MS_REG_MNG_SYSFLG /* user block */
-#define MS_REG_MNG_SYSFLG_BOOT 0x00 /* system block */
-#define MS_REG_MNG_RESERVED 0xc3
-#define MS_REG_MNG_DEFAULT (MS_REG_MNG_SCMS_COPY_OK | \
- MS_REG_MNG_ATFLG_OTHER | \
- MS_REG_MNG_SYSFLG_USER | \
- MS_REG_MNG_RESERVED)
-
-/* Error codes */
-#define MS_STATUS_SUCCESS 0x0000
-#define MS_ERROR_OUT_OF_SPACE 0x0103
-#define MS_STATUS_WRITE_PROTECT 0x0106
-#define MS_ERROR_READ_DATA 0x8002
-#define MS_ERROR_FLASH_READ 0x8003
-#define MS_ERROR_FLASH_WRITE 0x8004
-#define MS_ERROR_FLASH_ERASE 0x8005
-#define MS_ERROR_FLASH_COPY 0x8006
-
-#define MS_STATUS_ERROR 0xfffe
-#define MS_FIFO_ERROR 0xfffd
-#define MS_UNDEFINED_ERROR 0xfffc
-#define MS_KETIMEOUT_ERROR 0xfffb
-#define MS_STATUS_INT_ERROR 0xfffa
-#define MS_NO_MEMORY_ERROR 0xfff9
-#define MS_NOCARD_ERROR 0xfff8
-#define MS_LB_NOT_USED 0xffff
-#define MS_LB_ERROR 0xfff0
-#define MS_LB_BOOT_BLOCK 0xfff1
-#define MS_LB_INITIAL_ERROR 0xfff2
-#define MS_STATUS_SUCCESS_WITH_ECC 0xfff3
-#define MS_LB_ACQUIRED_ERROR 0xfff4
-#define MS_LB_NOT_USED_ERASED 0xfff5
-
-#define MS_LibConv2Physical(pdx, LogBlock) \
- (((LogBlock) >= (pdx)->MS_Lib.NumberOfLogBlock) ? \
- MS_STATUS_ERROR : (pdx)->MS_Lib.Log2PhyMap[LogBlock])
-#define MS_LibConv2Logical(pdx, PhyBlock) \
- (((PhyBlock) >= (pdx)->MS_Lib.NumberOfPhyBlock) ? \
- MS_STATUS_ERROR : (pdx)->MS_Lib.Phy2LogMap[PhyBlock])
- /*dphy->log table */
-
-#define MS_LIB_CTRL_RDONLY 0
-#define MS_LIB_CTRL_WRPROTECT 1
-#define MS_LibCtrlCheck(pdx, Flag) ((pdx)->MS_Lib.flags & (1 << (Flag)))
-
-#define MS_LibCtrlSet(pdx, Flag) ((pdx)->MS_Lib.flags |= (1 << (Flag)))
-#define MS_LibCtrlReset(pdx, Flag) ((pdx)->MS_Lib.flags &= ~(1 << (Flag)))
-#define MS_LibIsWritable(pdx) \
- ((MS_LibCtrlCheck((pdx), MS_LIB_CTRL_RDONLY) == 0) && \
- (MS_LibCtrlCheck(pdx, MS_LIB_CTRL_WRPROTECT) == 0))
-
-#define MS_MAX_PAGES_PER_BLOCK 32
-#define MS_LIB_BITS_PER_BYTE 8
-
-#define MS_LibPageMapIdx(n) ((n) / MS_LIB_BITS_PER_BYTE)
-#define MS_LibPageMapBit(n) (1 << ((n) % MS_LIB_BITS_PER_BYTE))
-#define MS_LibCheckPageMapBit(pdx, n) \
- ((pdx)->MS_Lib.pagemap[MS_LibPageMapIdx(n)] & MS_LibPageMapBit(n))
-#define MS_LibSetPageMapBit(pdx, n) \
- ((pdx)->MS_Lib.pagemap[MS_LibPageMapIdx(n)] |= MS_LibPageMapBit(n))
-#define MS_LibResetPageMapBit(pdx, n) \
- ((pdx)->MS_Lib.pagemap[MS_LibPageMapIdx(n)] &= ~MS_LibPageMapBit(n))
-#define MS_LibClearPageMap(pdx) \
- memset((pdx)->MS_Lib.pagemap, 0, sizeof((pdx)->MS_Lib.pagemap))
-
-
-#define MemStickLogAddr(logadr1, logadr0) \
- ((((WORD)(logadr1)) << 8) | (logadr0))
-
-#define MS_BYTES_PER_PAGE 512
-
-#define MS_MAX_INITIAL_ERROR_BLOCKS 10
-#define MS_NUMBER_OF_PAGES_FOR_BOOT_BLOCK 3
-#define MS_NUMBER_OF_PAGES_FOR_LPCTBL 2
-
-#define MS_NUMBER_OF_BOOT_BLOCK 2
-#define MS_NUMBER_OF_SYSTEM_BLOCK 4
-#define MS_LOGICAL_BLOCKS_PER_SEGMENT 496
-#define MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT 494
-#define MS_PHYSICAL_BLOCKS_PER_SEGMENT 0x200 /* 512 */
-#define MS_PHYSICAL_BLOCKS_PER_SEGMENT_MASK 0x1ff
-
-#define MS_SECTOR_SIZE 512
-#define MBR_SIGNATURE 0xAA55
-#define PBR_SIGNATURE 0xAA55
-
-#define PARTITION_FAT_12 1
-#define PARTITION_FAT_16 2
-
-#define MS_BOOT_BLOCK_ID 0x0001
-#define MS_BOOT_BLOCK_FORMAT_VERSION 0x0100
-#define MS_BOOT_BLOCK_DATA_ENTRIES 2
-
-#define MS_SYSINF_MSCLASS_TYPE_1 1
-#define MS_SYSINF_CARDTYPE_RDONLY 1
-#define MS_SYSINF_CARDTYPE_RDWR 2
-#define MS_SYSINF_CARDTYPE_HYBRID 3
-#define MS_SYSINF_SECURITY 0x01
-#define MS_SYSINF_SECURITY_NO_SUPPORT MS_SYSINF_SECURITY
-#define MS_SYSINF_SECURITY_SUPPORT 0
-#define MS_SYSINF_FORMAT_MAT 0 /* ? */
-#define MS_SYSINF_FORMAT_FAT 1
-#define MS_SYSINF_USAGE_GENERAL 0
-#define MS_SYSINF_PAGE_SIZE MS_BYTES_PER_PAGE /* fixed */
-#define MS_SYSINF_RESERVED1 1
-#define MS_SYSINF_RESERVED2 1
-
-#define MS_SYSENT_TYPE_INVALID_BLOCK 0x01
-#define MS_SYSENT_TYPE_CIS_IDI 0x0a /* CIS/IDI */
-
-#define SIZE_OF_KIRO 1024
-
-/* BOOT BLOCK */
-#define MS_NUMBER_OF_SYSTEM_ENTRY 4
-
-/*
- * MemStickRegisters
- */
-/* Status registers (16 bytes) */
-typedef struct {
- BYTE Reserved0; /* 00 */
- BYTE INTRegister; /* 01 */
- BYTE StatusRegister0; /* 02 */
- BYTE StatusRegister1; /* 03 */
- BYTE Reserved1[12]; /* 04-0F */
-} MemStickStatusRegisters;
-
-/* Parameter registers (6 bytes) */
-typedef struct {
- BYTE SystemParameter; /* 10 */
- BYTE BlockAddress2; /* 11 */
- BYTE BlockAddress1; /* 12 */
- BYTE BlockAddress0; /* 13 */
- BYTE CMDParameter; /* 14 */
- BYTE PageAddress; /* 15 */
-} MemStickParameterRegisters;
-
-/* Extra registers (9 bytes) */
-typedef struct {
- BYTE OverwriteFlag; /* 16 */
- BYTE ManagementFlag; /* 17 */
- BYTE LogicalAddress1; /* 18 */
- BYTE LogicalAddress0; /* 19 */
- BYTE ReservedArea[5]; /* 1A-1E */
-} MemStickExtraDataRegisters;
-
-/* All registers in Memory Stick (32 bytes, includes 1 byte padding) */
-typedef struct {
- MemStickStatusRegisters status;
- MemStickParameterRegisters param;
- MemStickExtraDataRegisters extra;
- BYTE padding;
-} MemStickRegisters, *PMemStickRegisters;
-
-/*
- * MemStickBootBlockPage0
- */
-typedef struct {
- WORD wBlockID;
- WORD wFormatVersion;
- BYTE bReserved1[184];
- BYTE bNumberOfDataEntry;
- BYTE bReserved2[179];
-} MemStickBootBlockHeader;
-
-typedef struct {
- DWORD dwStart;
- DWORD dwSize;
- BYTE bType;
- BYTE bReserved[3];
-} MemStickBootBlockSysEntRec;
-
-typedef struct {
- MemStickBootBlockSysEntRec entry[MS_NUMBER_OF_SYSTEM_ENTRY];
-} MemStickBootBlockSysEnt;
-
-typedef struct {
- BYTE bMsClass; /* must be 1 */
- BYTE bCardType; /* see below */
- WORD wBlockSize; /* n KB */
- WORD wBlockNumber; /* number of physical block */
- WORD wTotalBlockNumber; /* number of logical block */
- WORD wPageSize; /* must be 0x200 */
- BYTE bExtraSize; /* 0x10 */
- BYTE bSecuritySupport;
- BYTE bAssemblyDate[8];
- BYTE bFactoryArea[4];
- BYTE bAssemblyMakerCode;
- BYTE bAssemblyMachineCode[3];
- WORD wMemoryMakerCode;
- WORD wMemoryDeviceCode;
- WORD wMemorySize;
- BYTE bReserved1;
- BYTE bReserved2;
- BYTE bVCC;
- BYTE bVPP;
- WORD wControllerChipNumber;
- WORD wControllerFunction; /* New MS */
- BYTE bReserved3[9]; /* New MS */
- BYTE bParallelSupport; /* New MS */
- WORD wFormatValue; /* New MS */
- BYTE bFormatType;
- BYTE bUsage;
- BYTE bDeviceType;
- BYTE bReserved4[22];
- BYTE bFUValue3;
- BYTE bFUValue4;
- BYTE bReserved5[15];
-} MemStickBootBlockSysInf;
-
-typedef struct {
- MemStickBootBlockHeader header;
- MemStickBootBlockSysEnt sysent;
- MemStickBootBlockSysInf sysinf;
-} MemStickBootBlockPage0;
-
-/*
- * MemStickBootBlockCIS_IDI
- */
-typedef struct {
- BYTE bCistplDEVICE[6]; /* 0 */
- BYTE bCistplDEVICE0C[6]; /* 6 */
- BYTE bCistplJEDECC[4]; /* 12 */
- BYTE bCistplMANFID[6]; /* 16 */
- BYTE bCistplVER1[32]; /* 22 */
- BYTE bCistplFUNCID[4]; /* 54 */
- BYTE bCistplFUNCE0[4]; /* 58 */
- BYTE bCistplFUNCE1[5]; /* 62 */
- BYTE bCistplCONF[7]; /* 67 */
- BYTE bCistplCFTBLENT0[10]; /* 74 */
- BYTE bCistplCFTBLENT1[8]; /* 84 */
- BYTE bCistplCFTBLENT2[12]; /* 92 */
- BYTE bCistplCFTBLENT3[8]; /* 104 */
- BYTE bCistplCFTBLENT4[17]; /* 112 */
- BYTE bCistplCFTBLENT5[8]; /* 129 */
- BYTE bCistplCFTBLENT6[17]; /* 137 */
- BYTE bCistplCFTBLENT7[8]; /* 154 */
- BYTE bCistplNOLINK[3]; /* 162 */
-} MemStickBootBlockCIS;
-
-typedef struct {
-#define MS_IDI_GENERAL_CONF 0x848A
- WORD wIDIgeneralConfiguration; /* 0 */
- WORD wIDInumberOfCylinder; /* 1 */
- WORD wIDIreserved0; /* 2 */
- WORD wIDInumberOfHead; /* 3 */
- WORD wIDIbytesPerTrack; /* 4 */
- WORD wIDIbytesPerSector; /* 5 */
- WORD wIDIsectorsPerTrack; /* 6 */
- WORD wIDItotalSectors[2]; /* 7-8 high,low */
- WORD wIDIreserved1[11]; /* 9-19 */
- WORD wIDIbufferType; /* 20 */
- WORD wIDIbufferSize; /* 21 */
- WORD wIDIlongCmdECC; /* 22 */
- WORD wIDIfirmVersion[4]; /* 23-26 */
- WORD wIDImodelName[20]; /* 27-46 */
- WORD wIDIreserved2; /* 47 */
- WORD wIDIlongWordSupported; /* 48 */
- WORD wIDIdmaSupported; /* 49 */
- WORD wIDIreserved3; /* 50 */
- WORD wIDIpioTiming; /* 51 */
- WORD wIDIdmaTiming; /* 52 */
- WORD wIDItransferParameter; /* 53 */
- WORD wIDIformattedCylinder; /* 54 */
- WORD wIDIformattedHead; /* 55 */
- WORD wIDIformattedSectorsPerTrack; /* 56 */
- WORD wIDIformattedTotalSectors[2]; /* 57-58 */
- WORD wIDImultiSector; /* 59 */
- WORD wIDIlbaSectors[2]; /* 60-61 */
- WORD wIDIsingleWordDMA; /* 62 */
- WORD wIDImultiWordDMA; /* 63 */
- WORD wIDIreserved4[192]; /* 64-255 */
-} MemStickBootBlockIDI;
-
-typedef struct {
- union {
- MemStickBootBlockCIS cis;
- BYTE dmy[256];
- } cis;
-
- union {
- MemStickBootBlockIDI idi;
- BYTE dmy[256];
- } idi;
-
-} MemStickBootBlockCIS_IDI;
-
-/*
- * MS_LibControl
- */
-typedef struct {
- BYTE reserved;
- BYTE intr;
- BYTE status0;
- BYTE status1;
- BYTE ovrflg;
- BYTE mngflg;
- WORD logadr;
-} MS_LibTypeExtdat;
-
-typedef struct {
- DWORD flags;
- DWORD BytesPerSector;
- DWORD NumberOfCylinder;
- DWORD SectorsPerCylinder;
- WORD cardType; /* R/W, RO, Hybrid */
- WORD blockSize;
- WORD PagesPerBlock;
- WORD NumberOfPhyBlock;
- WORD NumberOfLogBlock;
- WORD NumberOfSegment;
- WORD *Phy2LogMap; /* phy2log table */
- WORD *Log2PhyMap; /* log2phy table */
- WORD wrtblk;
- BYTE pagemap[(MS_MAX_PAGES_PER_BLOCK + (MS_LIB_BITS_PER_BYTE-1)) /
- MS_LIB_BITS_PER_BYTE];
- BYTE *blkpag;
- MS_LibTypeExtdat *blkext;
- BYTE copybuf[512];
-} MS_LibControl;
-
-#endif
diff --git a/drivers/staging/keucr/msscsi.c b/drivers/staging/keucr/msscsi.c
deleted file mode 100644
index cb7190e0e18..00000000000
--- a/drivers/staging/keucr/msscsi.c
+++ /dev/null
@@ -1,344 +0,0 @@
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_eh.h>
-#include <scsi/scsi_device.h>
-
-#include "usb.h"
-#include "scsiglue.h"
-#include "transport.h"
-
-/*
- * MS_SCSI_Test_Unit_Ready()
- */
-int MS_SCSI_Test_Unit_Ready(struct us_data *us, struct scsi_cmnd *srb)
-{
- /* pr_info("MS_SCSI_Test_Unit_Ready\n"); */
- if (us->MS_Status.Insert && us->MS_Status.Ready)
- return USB_STOR_TRANSPORT_GOOD;
- else {
- ENE_MSInit(us);
- return USB_STOR_TRANSPORT_GOOD;
- }
-
- return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
- * MS_SCSI_Inquiry()
- */
-int MS_SCSI_Inquiry(struct us_data *us, struct scsi_cmnd *srb)
-{
- /* pr_info("MS_SCSI_Inquiry\n"); */
- BYTE data_ptr[36] = {0x00, 0x80, 0x02, 0x00, 0x1F, 0x00,
- 0x00, 0x00, 0x55, 0x53, 0x42, 0x32,
- 0x2E, 0x30, 0x20, 0x20, 0x43, 0x61,
- 0x72, 0x64, 0x52, 0x65, 0x61, 0x64,
- 0x65, 0x72, 0x20, 0x20, 0x20, 0x20,
- 0x20, 0x20, 0x30, 0x31, 0x30, 0x30};
-
- usb_stor_set_xfer_buf(us, data_ptr, 36, srb, TO_XFER_BUF);
- return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
- * MS_SCSI_Mode_Sense()
- */
-int MS_SCSI_Mode_Sense(struct us_data *us, struct scsi_cmnd *srb)
-{
- BYTE mediaNoWP[12] = {0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
- 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
- BYTE mediaWP[12] = {0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
- 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00};
-
- if (us->MS_Status.WtP)
- usb_stor_set_xfer_buf(us, mediaWP, 12, srb, TO_XFER_BUF);
- else
- usb_stor_set_xfer_buf(us, mediaNoWP, 12, srb, TO_XFER_BUF);
-
-
- return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
- * MS_SCSI_Read_Capacity()
- */
-int MS_SCSI_Read_Capacity(struct us_data *us, struct scsi_cmnd *srb)
-{
- unsigned int offset = 0;
- struct scatterlist *sg = NULL;
- DWORD bl_num;
- WORD bl_len;
- BYTE buf[8];
-
- pr_info("MS_SCSI_Read_Capacity\n");
-
- bl_len = 0x200;
- if (us->MS_Status.IsMSPro)
- bl_num = us->MSP_TotalBlock - 1;
- else
- bl_num = us->MS_Lib.NumberOfLogBlock *
- us->MS_Lib.blockSize * 2 - 1;
-
- us->bl_num = bl_num;
- pr_info("bl_len = %x\n", bl_len);
- pr_info("bl_num = %x\n", bl_num);
-
- /* srb->request_bufflen = 8; */
- buf[0] = (bl_num >> 24) & 0xff;
- buf[1] = (bl_num >> 16) & 0xff;
- buf[2] = (bl_num >> 8) & 0xff;
- buf[3] = (bl_num >> 0) & 0xff;
- buf[4] = (bl_len >> 24) & 0xff;
- buf[5] = (bl_len >> 16) & 0xff;
- buf[6] = (bl_len >> 8) & 0xff;
- buf[7] = (bl_len >> 0) & 0xff;
-
- usb_stor_access_xfer_buf(us, buf, 8, srb, &sg, &offset, TO_XFER_BUF);
- /* usb_stor_set_xfer_buf(us, buf, srb->request_bufflen,
- srb, TO_XFER_BUF); */
-
- return USB_STOR_TRANSPORT_GOOD;
-}
-
-/*
- * MS_SCSI_Read()
- */
-int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
-{
- struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- int result = 0;
- PBYTE Cdb = srb->cmnd;
- DWORD bn = ((Cdb[2] << 24) & 0xff000000) |
- ((Cdb[3] << 16) & 0x00ff0000) |
- ((Cdb[4] << 8) & 0x0000ff00) |
- ((Cdb[5] << 0) & 0x000000ff);
- WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
- DWORD blenByte = blen * 0x200;
-
- /* pr_info("SCSIOP_READ --- bn = %X, blen = %X, srb->use_sg = %X\n",
- bn, blen, srb->use_sg); */
-
- if (bn > us->bl_num)
- return USB_STOR_TRANSPORT_ERROR;
-
- if (us->MS_Status.IsMSPro) {
- result = ENE_LoadBinCode(us, MSP_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD) {
- pr_info("Load MSP RW pattern Fail !!\n");
- return USB_STOR_TRANSPORT_ERROR;
- }
-
- /* set up the command wrapper */
- memset(bcb, 0, sizeof(struct bulk_cb_wrap));
- bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
- bcb->DataTransferLength = blenByte;
- bcb->Flags = 0x80;
- bcb->CDB[0] = 0xF1;
- bcb->CDB[1] = 0x02;
- bcb->CDB[5] = (BYTE)(bn);
- bcb->CDB[4] = (BYTE)(bn>>8);
- bcb->CDB[3] = (BYTE)(bn>>16);
- bcb->CDB[2] = (BYTE)(bn>>24);
-
- result = ENE_SendScsiCmd(us, FDIR_READ, scsi_sglist(srb), 1);
- } else {
- void *buf;
- int offset = 0;
- WORD phyblk, logblk;
- BYTE PageNum;
- WORD len;
- DWORD blkno;
-
- buf = kmalloc(blenByte, GFP_KERNEL);
- if (buf == NULL)
- return USB_STOR_TRANSPORT_ERROR;
-
- result = ENE_LoadBinCode(us, MS_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD) {
- pr_info("Load MS RW pattern Fail !!\n");
- result = USB_STOR_TRANSPORT_ERROR;
- goto exit;
- }
-
- logblk = (WORD)(bn / us->MS_Lib.PagesPerBlock);
- PageNum = (BYTE)(bn % us->MS_Lib.PagesPerBlock);
-
- while (1) {
- if (blen > (us->MS_Lib.PagesPerBlock-PageNum))
- len = us->MS_Lib.PagesPerBlock-PageNum;
- else
- len = blen;
-
- phyblk = MS_LibConv2Physical(us, logblk);
- blkno = phyblk * 0x20 + PageNum;
-
- /* set up the command wrapper */
- memset(bcb, 0, sizeof(struct bulk_cb_wrap));
- bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
- bcb->DataTransferLength = 0x200 * len;
- bcb->Flags = 0x80;
- bcb->CDB[0] = 0xF1;
- bcb->CDB[1] = 0x02;
- bcb->CDB[5] = (BYTE)(blkno);
- bcb->CDB[4] = (BYTE)(blkno>>8);
- bcb->CDB[3] = (BYTE)(blkno>>16);
- bcb->CDB[2] = (BYTE)(blkno>>24);
-
- result = ENE_SendScsiCmd(us, FDIR_READ, buf+offset, 0);
- if (result != USB_STOR_XFER_GOOD) {
- pr_info("MS_SCSI_Read --- result = %x\n",
- result);
- result = USB_STOR_TRANSPORT_ERROR;
- goto exit;
- }
-
- blen -= len;
- if (blen <= 0)
- break;
- logblk++;
- PageNum = 0;
- offset += MS_BYTES_PER_PAGE*len;
- }
- usb_stor_set_xfer_buf(us, buf, blenByte, srb, TO_XFER_BUF);
-exit:
- kfree(buf);
- }
- return result;
-}
-
-/*
- * MS_SCSI_Write()
- */
-int MS_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
-{
- struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- int result = 0;
- PBYTE Cdb = srb->cmnd;
- DWORD bn = ((Cdb[2] << 24) & 0xff000000) |
- ((Cdb[3] << 16) & 0x00ff0000) |
- ((Cdb[4] << 8) & 0x0000ff00) |
- ((Cdb[5] << 0) & 0x000000ff);
- WORD blen = ((Cdb[7] << 8) & 0xff00) | ((Cdb[8] << 0) & 0x00ff);
- DWORD blenByte = blen * 0x200;
-
- if (bn > us->bl_num)
- return USB_STOR_TRANSPORT_ERROR;
-
- if (us->MS_Status.IsMSPro) {
- result = ENE_LoadBinCode(us, MSP_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD) {
- pr_info("Load MSP RW pattern Fail !!\n");
- return USB_STOR_TRANSPORT_ERROR;
- }
-
- /* set up the command wrapper */
- memset(bcb, 0, sizeof(struct bulk_cb_wrap));
- bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
- bcb->DataTransferLength = blenByte;
- bcb->Flags = 0x00;
- bcb->CDB[0] = 0xF0;
- bcb->CDB[1] = 0x04;
- bcb->CDB[5] = (BYTE)(bn);
- bcb->CDB[4] = (BYTE)(bn>>8);
- bcb->CDB[3] = (BYTE)(bn>>16);
- bcb->CDB[2] = (BYTE)(bn>>24);
-
- result = ENE_SendScsiCmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
- } else {
- void *buf;
- int offset = 0;
- WORD PhyBlockAddr;
- BYTE PageNum;
- DWORD result;
- WORD len, oldphy, newphy;
-
- buf = kmalloc(blenByte, GFP_KERNEL);
- if (buf == NULL)
- return USB_STOR_TRANSPORT_ERROR;
- usb_stor_set_xfer_buf(us, buf, blenByte, srb, FROM_XFER_BUF);
-
- result = ENE_LoadBinCode(us, MS_RW_PATTERN);
- if (result != USB_STOR_XFER_GOOD) {
- pr_info("Load MS RW pattern Fail !!\n");
- result = USB_STOR_TRANSPORT_ERROR;
- goto exit;
- }
-
- PhyBlockAddr = (WORD)(bn / us->MS_Lib.PagesPerBlock);
- PageNum = (BYTE)(bn % us->MS_Lib.PagesPerBlock);
-
- while (1) {
- if (blen > (us->MS_Lib.PagesPerBlock-PageNum))
- len = us->MS_Lib.PagesPerBlock-PageNum;
- else
- len = blen;
-
- oldphy = MS_LibConv2Physical(us, PhyBlockAddr);
- newphy = MS_LibSearchBlockFromLogical(us, PhyBlockAddr);
-
- result = MS_ReaderCopyBlock(us, oldphy, newphy,
- PhyBlockAddr, PageNum,
- buf+offset, len);
- if (result != USB_STOR_XFER_GOOD) {
- pr_info("MS_SCSI_Write --- result = %x\n",
- result);
- result = USB_STOR_TRANSPORT_ERROR;
- goto exit;
- }
-
- us->MS_Lib.Phy2LogMap[oldphy] = MS_LB_NOT_USED_ERASED;
- MS_LibForceSetLogicalPair(us, PhyBlockAddr, newphy);
-
- blen -= len;
- if (blen <= 0)
- break;
- PhyBlockAddr++;
- PageNum = 0;
- offset += MS_BYTES_PER_PAGE*len;
- }
-exit:
- kfree(buf);
- }
- return result;
-}
-
-/*
- * MS_SCSIIrp()
- */
-int MS_SCSIIrp(struct us_data *us, struct scsi_cmnd *srb)
-{
- int result;
-
- us->SrbStatus = SS_SUCCESS;
- switch (srb->cmnd[0]) {
- case TEST_UNIT_READY:
- result = MS_SCSI_Test_Unit_Ready(us, srb);
- break; /* 0x00 */
- case INQUIRY:
- result = MS_SCSI_Inquiry(us, srb);
- break; /* 0x12 */
- case MODE_SENSE:
- result = MS_SCSI_Mode_Sense(us, srb);
- break; /* 0x1A */
- case READ_CAPACITY:
- result = MS_SCSI_Read_Capacity(us, srb);
- break; /* 0x25 */
- case READ_10:
- result = MS_SCSI_Read(us, srb);
- break; /* 0x28 */
- case WRITE_10:
- result = MS_SCSI_Write(us, srb);
- break; /* 0x2A */
- default:
- us->SrbStatus = SS_ILLEGAL_REQUEST;
- result = USB_STOR_TRANSPORT_FAILED;
- break;
- }
- return result;
-}
-
diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c
index 0274cb0edd0..1a8837df076 100644
--- a/drivers/staging/keucr/transport.c
+++ b/drivers/staging/keucr/transport.c
@@ -432,7 +432,7 @@ void ENE_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
usb_stor_print_cmd(srb);
/* send the command to the transport layer */
scsi_set_resid(srb, 0);
- if (!(us->MS_Status.Ready || us->SM_Status.Ready))
+ if (!(us->SM_Status.Ready))
result = ENE_InitMedia(us);
if (us->Power_IsResum == true) {
@@ -440,8 +440,6 @@ void ENE_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
us->Power_IsResum = false;
}
- if (us->MS_Status.Ready)
- result = MS_SCSIIrp(us, srb);
if (us->SM_Status.Ready)
result = SM_SCSIIrp(us, srb);
diff --git a/drivers/staging/keucr/transport.h b/drivers/staging/keucr/transport.h
index 75296152af7..4ae57d0145b 100644
--- a/drivers/staging/keucr/transport.h
+++ b/drivers/staging/keucr/transport.h
@@ -95,7 +95,6 @@ extern void usb_stor_set_xfer_buf(struct us_data*, unsigned char *buffer,
*/
extern void ENE_stor_invoke_transport(struct scsi_cmnd *, struct us_data *);
extern int ENE_InitMedia(struct us_data *);
-extern int ENE_MSInit(struct us_data *);
extern int ENE_SMInit(struct us_data *);
extern int ENE_SendScsiCmd(struct us_data*, BYTE, void*, int);
extern int ENE_LoadBinCode(struct us_data*, BYTE);
@@ -107,51 +106,6 @@ extern void BuildSenseBuffer(struct scsi_cmnd *, int);
/*
* ENE scsi function
*/
-extern int MS_SCSIIrp(struct us_data *us, struct scsi_cmnd *srb);
extern int SM_SCSIIrp(struct us_data *us, struct scsi_cmnd *srb);
-/*
- * ENE MS function
- */
-extern int MS_CardInit(struct us_data *us);
-extern void MS_LibFreeAllocatedArea(struct us_data *us);
-extern void MS_LibFreeWriteBuf(struct us_data *us);
-extern int MS_LibFreeLogicalMap(struct us_data *us);
-extern int MS_LibForceSetLogicalPair(struct us_data *us, WORD logblk,
- WORD phyblk);
-extern int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr,
- BYTE PageNum, DWORD *PageBuf,
- MS_LibTypeExtdat *ExtraDat);
-extern int MS_ReaderCopyBlock(struct us_data *us, WORD oldphy,
- WORD newphy, WORD PhyBlockAddr,
- BYTE PageNum, PBYTE buf, WORD len);
-extern int MS_ReaderEraseBlock(struct us_data *us, DWORD PhyBlockAddr);
-extern int MS_LibProcessBootBlock(struct us_data *us, WORD PhyBlock,
- BYTE *PageData);
-extern int MS_LibAllocLogicalMap(struct us_data *us);
-extern int MS_LibSetBootBlockMark(struct us_data *us, WORD phyblk);
-extern int MS_LibSetLogicalBlockMark(struct us_data *us, WORD phyblk,
- WORD mark);
-extern int MS_LibSetInitialErrorBlock(struct us_data *us, WORD phyblk);
-extern int MS_LibScanLogicalBlockNumber(struct us_data *us, WORD phyblk);
-extern int MS_LibAllocWriteBuf(struct us_data *us);
-void MS_LibClearWriteBuf(struct us_data *us);
-void MS_LibPhy2LogRange(WORD PhyBlock, WORD *LogStart,
- WORD *LogEnde);
-extern int MS_LibReadExtra(struct us_data *us, DWORD PhyBlock,
- BYTE PageNum, MS_LibTypeExtdat *ExtraDat);
-extern int MS_LibReadExtraBlock(struct us_data *us, DWORD PhyBlock,
- BYTE PageNum, BYTE blen, void *buf);
-extern int MS_LibSetAcquiredErrorBlock(struct us_data *us, WORD phyblk);
-extern int MS_LibErasePhyBlock(struct us_data *us, WORD phyblk);
-extern int MS_LibErrorPhyBlock(struct us_data *us, WORD phyblk);
-extern int MS_LibOverwriteExtra(struct us_data *us, DWORD PhyBlockAddr,
- BYTE PageNum, BYTE OverwriteFlag);
-extern int MS_LibSetLogicalPair(struct us_data *us,
- WORD logblk, WORD phyblk);
-extern int MS_LibCheckDisableBlock(struct us_data *us, WORD PhyBlock);
-extern int MS_CountFreeBlock(struct us_data *us, WORD PhyBlock);
-extern int MS_LibSearchBlockFromLogical(struct us_data *us, WORD logblk);
-extern int MS_LibSearchBlockFromPhysical(struct us_data *us, WORD phyblk);
-
#endif
diff --git a/drivers/staging/keucr/usb.c b/drivers/staging/keucr/usb.c
index d8c5c626be5..66aad3a0d1f 100644
--- a/drivers/staging/keucr/usb.c
+++ b/drivers/staging/keucr/usb.c
@@ -75,7 +75,6 @@ static int eucr_resume(struct usb_interface *iface)
us->Power_IsResum = true;
//
//us->SD_Status.Ready = 0; //??
- us->MS_Status = *(PMS_STATUS)&tmp;
us->SM_Status = *(PSM_STATUS)&tmp;
return 0;
@@ -98,7 +97,6 @@ static int eucr_reset_resume(struct usb_interface *iface)
us->Power_IsResum = true;
//
//us->SD_Status.Ready = 0; //??
- us->MS_Status = *(PMS_STATUS)&tmp;
us->SM_Status = *(PSM_STATUS)&tmp;
return 0;
}
@@ -640,8 +638,7 @@ static int eucr_probe(struct usb_interface *intf, const struct usb_device_id *id
/* Start up the thread for delayed SCSI-device scanning */
th = kthread_create(usb_stor_scan_thread, us, "eucr-stor-scan");
- if (IS_ERR(th))
- {
+ if (IS_ERR(th)) {
pr_info("Unable to start the device-scanning thread\n");
complete(&us->scanning_done);
quiesce_and_remove_host(us);
diff --git a/drivers/staging/keucr/usb.h b/drivers/staging/keucr/usb.h
index bbf578ad631..a5f7a16c11c 100644
--- a/drivers/staging/keucr/usb.h
+++ b/drivers/staging/keucr/usb.h
@@ -10,7 +10,6 @@
#include <linux/mutex.h>
#include <scsi/scsi_host.h>
#include "common.h"
-#include "ms.h"
struct us_data;
struct scsi_cmnd;
@@ -201,7 +200,7 @@ struct us_data {
//----- MS Control Data ----------------
BOOLEAN MS_SWWP;
DWORD MSP_TotalBlock;
- MS_LibControl MS_Lib;
+ /* MS_LibControl MS_Lib; */
BOOLEAN MS_IsRWPage;
WORD MS_Model;
diff --git a/drivers/staging/lirc/lirc_bt829.c b/drivers/staging/lirc/lirc_bt829.c
index 33881025426..c5a0d27a02d 100644
--- a/drivers/staging/lirc/lirc_bt829.c
+++ b/drivers/staging/lirc/lirc_bt829.c
@@ -122,10 +122,10 @@ int init_module(void)
pdev = do_pci_probe();
if (pdev == NULL)
- return 1;
+ return -ENODEV;
if (!atir_init_start())
- return 1;
+ return -ENODEV;
strcpy(atir_driver.name, "ATIR");
atir_driver.minor = -1;
diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c
index 4039eda2a15..4a9e563f40f 100644
--- a/drivers/staging/lirc/lirc_imon.c
+++ b/drivers/staging/lirc/lirc_imon.c
@@ -672,8 +672,6 @@ static void imon_incoming_packet(struct imon_context *context,
static void usb_rx_callback(struct urb *urb)
{
struct imon_context *context;
- unsigned char *buf;
- int len;
int intfnum = 0;
if (!urb)
@@ -683,9 +681,6 @@ static void usb_rx_callback(struct urb *urb)
if (!context)
return;
- buf = urb->transfer_buffer;
- len = urb->actual_length;
-
switch (urb->status) {
case -ENOENT: /* usbcore unlink successful! */
return;
@@ -728,7 +723,6 @@ static int imon_probe(struct usb_interface *interface,
int ir_ep_found = 0;
int alloc_status = 0;
int vfd_proto_6p = 0;
- int code_length;
struct imon_context *context = NULL;
int i;
u16 vendor, product;
@@ -749,8 +743,6 @@ static int imon_probe(struct usb_interface *interface,
else
context->display = 1;
- code_length = BUF_CHUNK_SIZE * 8;
-
usbdev = usb_get_dev(interface_to_usbdev(interface));
iface_desc = interface->cur_altsetting;
num_endpts = iface_desc->desc.bNumEndpoints;
@@ -856,7 +848,7 @@ static int imon_probe(struct usb_interface *interface,
strcpy(driver->name, MOD_NAME);
driver->minor = -1;
- driver->code_length = sizeof(int) * 8;
+ driver->code_length = BUF_CHUNK_SIZE * 8;
driver->sample_rate = 0;
driver->features = LIRC_CAN_REC_MODE2;
driver->data = context;
diff --git a/drivers/staging/lirc/lirc_parallel.c b/drivers/staging/lirc/lirc_parallel.c
index 50724c4e248..792aac0a8e7 100644
--- a/drivers/staging/lirc/lirc_parallel.c
+++ b/drivers/staging/lirc/lirc_parallel.c
@@ -615,9 +615,6 @@ static struct platform_driver lirc_parallel_driver = {
},
};
-static int pf(void *handle);
-static void kf(void *handle);
-
static int pf(void *handle)
{
parport_disable_irq(pport);
@@ -730,6 +727,9 @@ static void __exit lirc_parallel_exit(void)
{
parport_unregister_device(ppdevice);
lirc_unregister_driver(driver.minor);
+
+ platform_device_unregister(lirc_parallel_dev);
+ platform_driver_unregister(&lirc_parallel_driver);
}
module_init(lirc_parallel_init);
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 4a3cca03224..805df913bb6 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -838,7 +838,23 @@ static int hardware_init_port(void)
static int init_port(void)
{
- int i, nlow, nhigh;
+ int i, nlow, nhigh, result;
+
+ result = request_irq(irq, irq_handler,
+ IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
+ LIRC_DRIVER_NAME, (void *)&hardware);
+
+ switch (result) {
+ case -EBUSY:
+ printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
+ return -EBUSY;
+ case -EINVAL:
+ printk(KERN_ERR LIRC_DRIVER_NAME
+ ": Bad irq number or handler\n");
+ return -EINVAL;
+ default:
+ break;
+ };
/* Reserve io region. */
/*
@@ -893,34 +909,17 @@ static int init_port(void)
printk(KERN_INFO LIRC_DRIVER_NAME ": Manually using active "
"%s receiver\n", sense ? "low" : "high");
+ dprintk("Interrupt %d, port %04x obtained\n", irq, io);
return 0;
}
static int set_use_inc(void *data)
{
- int result;
unsigned long flags;
/* initialize timestamp */
do_gettimeofday(&lasttv);
- result = request_irq(irq, irq_handler,
- IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
- LIRC_DRIVER_NAME, (void *)&hardware);
-
- switch (result) {
- case -EBUSY:
- printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
- return -EBUSY;
- case -EINVAL:
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": Bad irq number or handler\n");
- return -EINVAL;
- default:
- dprintk("Interrupt %d, port %04x obtained\n", irq, io);
- break;
- }
-
spin_lock_irqsave(&hardware[type].lock, flags);
/* Set DLAB 0. */
@@ -945,10 +944,6 @@ static void set_use_dec(void *data)
soutp(UART_IER, sinp(UART_IER) &
(~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
spin_unlock_irqrestore(&hardware[type].lock, flags);
-
- free_irq(irq, (void *)&hardware);
-
- dprintk("freed IRQ %d\n", irq);
}
static ssize_t lirc_write(struct file *file, const char *buf,
@@ -1256,6 +1251,9 @@ exit_serial_exit:
static void __exit lirc_serial_exit_module(void)
{
lirc_serial_exit();
+
+ free_irq(irq, (void *)&hardware);
+
if (iommap != 0)
release_mem_region(iommap, 8 << ioshift);
else
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index a7b46f24f24..0d3864594b1 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -739,23 +739,16 @@ static void send_space(unsigned long len)
static void send_pulse(unsigned long len)
{
long bytes_out = len / TIME_CONST;
- long time_left;
- time_left = (long)len - (long)bytes_out * (long)TIME_CONST;
- if (bytes_out == 0) {
+ if (bytes_out == 0)
bytes_out++;
- time_left = 0;
- }
+
while (bytes_out--) {
outb(PULSE, io + UART_TX);
/* FIXME treba seriozne cakanie z char/serial.c */
while (!(inb(io + UART_LSR) & UART_LSR_THRE))
;
}
-#if 0
- if (time_left > 0)
- safe_udelay(time_left);
-#endif
}
#endif
diff --git a/drivers/staging/lirc/lirc_ttusbir.c b/drivers/staging/lirc/lirc_ttusbir.c
index e345ab9a004..e4b329b8caf 100644
--- a/drivers/staging/lirc/lirc_ttusbir.c
+++ b/drivers/staging/lirc/lirc_ttusbir.c
@@ -30,7 +30,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index dd6a57c3c3a..0302d82a12f 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -39,8 +39,6 @@
*
*/
-
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/kernel.h>
@@ -475,14 +473,14 @@ static int lirc_thread(void *arg)
dprintk("poll thread started\n");
while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
/* if device not opened, we can sleep half a second */
if (atomic_read(&ir->open_count) == 0) {
schedule_timeout(HZ/2);
continue;
}
- set_current_state(TASK_INTERRUPTIBLE);
-
/*
* This is ~113*2 + 24 + jitter (2*repeat gap + code length).
* We use this interval as the chip resets every time you poll
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index d1ffa32cd14..0fa8216fd0e 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -29,54 +29,29 @@ const uuid_le mei_amthi_guid = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 0xac,
0x81, 0x4c);
/**
- * mei_initialize_list - Sets up a queue list.
+ * mei_io_list_init - Sets up a queue list.
*
- * @list: An instance of our list structure
+ * @list: An instance io list structure
* @dev: the device structure
*/
-void mei_initialize_list(struct mei_io_list *list, struct mei_device *dev)
+void mei_io_list_init(struct mei_io_list *list)
{
/* initialize our queue list */
INIT_LIST_HEAD(&list->mei_cb.cb_list);
list->status = 0;
- list->device_extension = dev;
}
/**
- * mei_flush_queues - flushes queue lists belonging to cl.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- */
-void mei_flush_queues(struct mei_device *dev, struct mei_cl *cl)
-{
- int i;
-
- if (!dev || !cl)
- return;
-
- for (i = 0; i < MEI_IO_LISTS_NUMBER; i++) {
- dev_dbg(&dev->pdev->dev, "remove list entry belonging to cl\n");
- mei_flush_list(dev->io_list_array[i], cl);
- }
-}
-
-
-/**
- * mei_flush_list - removes list entry belonging to cl.
+ * mei_io_list_flush - removes list entry belonging to cl.
*
* @list: An instance of our list structure
* @cl: private data of the file object
*/
-void mei_flush_list(struct mei_io_list *list, struct mei_cl *cl)
+void mei_io_list_flush(struct mei_io_list *list, struct mei_cl *cl)
{
- struct mei_cl *cl_tmp;
struct mei_cl_cb *cb_pos = NULL;
struct mei_cl_cb *cb_next = NULL;
- if (!list || !cl)
- return;
-
if (list->status != 0)
return;
@@ -86,14 +61,36 @@ void mei_flush_list(struct mei_io_list *list, struct mei_cl *cl)
list_for_each_entry_safe(cb_pos, cb_next,
&list->mei_cb.cb_list, cb_list) {
if (cb_pos) {
- cl_tmp = (struct mei_cl *)
- cb_pos->file_private;
- if (cl_tmp &&
- mei_fe_same_id(cl, cl_tmp))
+ struct mei_cl *cl_tmp;
+ cl_tmp = (struct mei_cl *)cb_pos->file_private;
+ if (mei_cl_cmp_id(cl, cl_tmp))
list_del(&cb_pos->cb_list);
}
}
}
+/**
+ * mei_cl_flush_queues - flushes queue lists belonging to cl.
+ *
+ * @dev: the device structure
+ * @cl: private data of the file object
+ */
+int mei_cl_flush_queues(struct mei_cl *cl)
+{
+ if (!cl || !cl->dev)
+ return -EINVAL;
+
+ dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
+ mei_io_list_flush(&cl->dev->read_list, cl);
+ mei_io_list_flush(&cl->dev->write_list, cl);
+ mei_io_list_flush(&cl->dev->write_waiting_list, cl);
+ mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
+ mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&cl->dev->amthi_cmd_list, cl);
+ mei_io_list_flush(&cl->dev->amthi_read_complete_list, cl);
+ return 0;
+}
+
+
/**
* mei_reset_iamthif_params - initializes mei device iamthif
@@ -106,8 +103,8 @@ static void mei_reset_iamthif_params(struct mei_device *dev)
dev->iamthif_current_cb = NULL;
dev->iamthif_msg_buf_size = 0;
dev->iamthif_msg_buf_index = 0;
- dev->iamthif_canceled = 0;
- dev->iamthif_ioctl = 0;
+ dev->iamthif_canceled = false;
+ dev->iamthif_ioctl = false;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
dev->iamthif_timer = 0;
}
@@ -119,9 +116,8 @@ static void mei_reset_iamthif_params(struct mei_device *dev)
*
* returns The mei_device_device pointer on success, NULL on failure.
*/
-struct mei_device *init_mei_device(struct pci_dev *pdev)
+struct mei_device *mei_device_init(struct pci_dev *pdev)
{
- int i;
struct mei_device *dev;
dev = kzalloc(sizeof(struct mei_device), GFP_KERNEL);
@@ -129,13 +125,6 @@ struct mei_device *init_mei_device(struct pci_dev *pdev)
return NULL;
/* setup our list array */
- dev->io_list_array[0] = &dev->read_list;
- dev->io_list_array[1] = &dev->write_list;
- dev->io_list_array[2] = &dev->write_waiting_list;
- dev->io_list_array[3] = &dev->ctrl_wr_list;
- dev->io_list_array[4] = &dev->ctrl_rd_list;
- dev->io_list_array[5] = &dev->amthi_cmd_list;
- dev->io_list_array[6] = &dev->amthi_read_complete_list;
INIT_LIST_HEAD(&dev->file_list);
INIT_LIST_HEAD(&dev->wd_cl.link);
INIT_LIST_HEAD(&dev->iamthif_cl.link);
@@ -144,8 +133,15 @@ struct mei_device *init_mei_device(struct pci_dev *pdev)
init_waitqueue_head(&dev->wait_stop_wd);
dev->mei_state = MEI_INITIALIZING;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
- for (i = 0; i < MEI_IO_LISTS_NUMBER; i++)
- mei_initialize_list(dev->io_list_array[i], dev);
+
+
+ mei_io_list_init(&dev->read_list);
+ mei_io_list_init(&dev->write_list);
+ mei_io_list_init(&dev->write_waiting_list);
+ mei_io_list_init(&dev->ctrl_wr_list);
+ mei_io_list_init(&dev->ctrl_rd_list);
+ mei_io_list_init(&dev->amthi_cmd_list);
+ mei_io_list_init(&dev->amthi_read_complete_list);
dev->pdev = pdev;
return dev;
}
@@ -173,7 +169,7 @@ int mei_hw_init(struct mei_device *dev)
if ((dev->host_hw_state & H_IS) == H_IS)
mei_reg_write(dev, H_CSR, dev->host_hw_state);
- dev->recvd_msg = 0;
+ dev->recvd_msg = false;
dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
mei_reset(dev, 1);
@@ -189,7 +185,7 @@ int mei_hw_init(struct mei_device *dev)
mutex_lock(&dev->device_lock);
}
- if (!err && !dev->recvd_msg) {
+ if (err <= 0 && !dev->recvd_msg) {
dev->mei_state = MEI_DISABLED;
dev_dbg(&dev->pdev->dev,
"wait_event_interruptible_timeout failed"
@@ -223,7 +219,7 @@ int mei_hw_init(struct mei_device *dev)
goto out;
}
- dev->recvd_msg = 0;
+ dev->recvd_msg = false;
dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
dev->host_hw_state, dev->me_hw_state);
dev_dbg(&dev->pdev->dev, "ME turn on ME_RDY and host turn on H_RDY.\n");
@@ -267,7 +263,7 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
bool unexpected;
if (dev->mei_state == MEI_RECOVERING_FROM_RESET) {
- dev->need_reset = 1;
+ dev->need_reset = true;
return;
}
@@ -291,7 +287,7 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
dev_dbg(&dev->pdev->dev, "currently saved host_hw_state = 0x%08x.\n",
dev->host_hw_state);
- dev->need_reset = 0;
+ dev->need_reset = false;
if (dev->mei_state != MEI_INITIALIZING) {
if (dev->mei_state != MEI_DISABLED &&
@@ -318,10 +314,10 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
dev->extra_write_index = 0;
}
- dev->num_mei_me_clients = 0;
+ dev->me_clients_num = 0;
dev->rd_msg_hdr = 0;
- dev->stop = 0;
- dev->wd_pending = 0;
+ dev->stop = false;
+ dev->wd_pending = false;
/* update the state of the registers after reset */
dev->host_hw_state = mei_hcsr_read(dev);
@@ -363,7 +359,7 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
*
* returns none.
*/
-void host_start_message(struct mei_device *dev)
+void mei_host_start_message(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_host_version_request *host_start_req;
@@ -382,7 +378,7 @@ void host_start_message(struct mei_device *dev)
host_start_req->cmd.cmd = HOST_START_REQ_CMD;
host_start_req->host_version.major_version = HBM_MAJOR_VERSION;
host_start_req->host_version.minor_version = HBM_MINOR_VERSION;
- dev->recvd_msg = 0;
+ dev->recvd_msg = false;
if (!mei_write_message(dev, mei_hdr,
(unsigned char *) (host_start_req),
mei_hdr->length)) {
@@ -402,7 +398,7 @@ void host_start_message(struct mei_device *dev)
*
* returns none.
*/
-void host_enum_clients_message(struct mei_device *dev)
+void mei_host_enum_clients_message(struct mei_device *dev)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_host_enum_request *host_enum_req;
@@ -437,16 +433,16 @@ void host_enum_clients_message(struct mei_device *dev)
*
* returns none.
*/
-void allocate_me_clients_storage(struct mei_device *dev)
+void mei_allocate_me_clients_storage(struct mei_device *dev)
{
struct mei_me_client *clients;
int b;
/* count how many ME clients we have */
for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
- dev->num_mei_me_clients++;
+ dev->me_clients_num++;
- if (dev->num_mei_me_clients <= 0)
+ if (dev->me_clients_num <= 0)
return ;
@@ -455,9 +451,9 @@ void allocate_me_clients_storage(struct mei_device *dev)
dev->me_clients = NULL;
}
dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
- dev->num_mei_me_clients * sizeof(struct mei_me_client));
+ dev->me_clients_num * sizeof(struct mei_me_client));
/* allocate storage for ME clients representation */
- clients = kcalloc(dev->num_mei_me_clients,
+ clients = kcalloc(dev->me_clients_num,
sizeof(struct mei_me_client), GFP_KERNEL);
if (!clients) {
dev_dbg(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
@@ -475,7 +471,7 @@ void allocate_me_clients_storage(struct mei_device *dev)
*
* returns none.
*/
-void host_client_properties(struct mei_device *dev)
+void mei_host_client_properties(struct mei_device *dev)
{
struct mei_msg_hdr *mei_header;
struct hbm_props_request *host_cli_req;
@@ -521,7 +517,6 @@ void host_client_properties(struct mei_device *dev)
* with associated host client
*/
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
- dev->write_hang = -1;
dev->open_handle_count = 0;
bitmap_set(dev->host_clients_map, 0, 3);
dev->mei_state = MEI_ENABLED;
@@ -536,7 +531,7 @@ void host_client_properties(struct mei_device *dev)
* @priv: private file structure to be initialized
* @file: the file structure
*/
-void mei_init_file_private(struct mei_cl *priv, struct mei_device *dev)
+void mei_cl_init(struct mei_cl *priv, struct mei_device *dev)
{
memset(priv, 0, sizeof(struct mei_cl));
init_waitqueue_head(&priv->wait);
@@ -552,7 +547,7 @@ int mei_find_me_client_index(const struct mei_device *dev, uuid_le cuuid)
{
int i, res = -1;
- for (i = 0; i < dev->num_mei_me_clients; ++i)
+ for (i = 0; i < dev->me_clients_num; ++i)
if (uuid_le_cmp(cuuid,
dev->me_clients[i].props.protocol_name) == 0) {
res = i;
@@ -601,12 +596,12 @@ u8 mei_find_me_client_update_filext(struct mei_device *dev, struct mei_cl *priv,
* @dev: the device structure
*
*/
-void host_init_iamthif(struct mei_device *dev)
+void mei_host_init_iamthif(struct mei_device *dev)
{
u8 i;
unsigned char *msg_buf;
- mei_init_file_private(&dev->iamthif_cl, dev);
+ mei_cl_init(&dev->iamthif_cl, dev);
dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
/* find ME amthi client */
@@ -656,17 +651,17 @@ void host_init_iamthif(struct mei_device *dev)
*
* returns The allocated file or NULL on failure
*/
-struct mei_cl *mei_alloc_file_private(struct mei_device *dev)
+struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{
- struct mei_cl *priv;
+ struct mei_cl *cl;
- priv = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
- if (!priv)
+ cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
+ if (!cl)
return NULL;
- mei_init_file_private(priv, dev);
+ mei_cl_init(cl, dev);
- return priv;
+ return cl;
}
@@ -701,7 +696,7 @@ int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
cb->file_private = cl;
cb->major_file_operations = MEI_CLOSE;
if (dev->mei_host_buffer_is_empty) {
- dev->mei_host_buffer_is_empty = 0;
+ dev->mei_host_buffer_is_empty = false;
if (mei_disconnect(dev, cl)) {
mdelay(10); /* Wait for hardware disconnection ready */
list_add_tail(&cb->cb_list,
@@ -739,8 +734,8 @@ int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
}
- mei_flush_list(&dev->ctrl_rd_list, cl);
- mei_flush_list(&dev->ctrl_wr_list, cl);
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
free:
mei_free_cb_private(cb);
return rets;
diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
index 4959aae37b8..cfec92dfc1c 100644
--- a/drivers/staging/mei/interface.c
+++ b/drivers/staging/mei/interface.c
@@ -179,7 +179,6 @@ int mei_write_message(struct mei_device *dev,
if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
return 0;
- dev->write_hang = 0;
return 1;
}
@@ -256,13 +255,13 @@ int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
{
int i;
- if (!dev->num_mei_me_clients)
+ if (!dev->me_clients_num)
return 0;
if (cl->mei_flow_ctrl_creds > 0)
return 1;
- for (i = 0; i < dev->num_mei_me_clients; i++) {
+ for (i = 0; i < dev->me_clients_num; i++) {
struct mei_me_client *me_cl = &dev->me_clients[i];
if (me_cl->client_id == cl->me_client_id) {
if (me_cl->mei_flow_ctrl_creds) {
@@ -291,10 +290,10 @@ int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
{
int i;
- if (!dev->num_mei_me_clients)
+ if (!dev->me_clients_num)
return -ENOENT;
- for (i = 0; i < dev->num_mei_me_clients; i++) {
+ for (i = 0; i < dev->me_clients_num; i++) {
struct mei_me_client *me_cl = &dev->me_clients[i];
if (me_cl->client_id == cl->me_client_id) {
if (me_cl->props.single_recv_buf != 0) {
diff --git a/drivers/staging/mei/interrupt.c b/drivers/staging/mei/interrupt.c
index d1b9214c10c..9cb186bf187 100644
--- a/drivers/staging/mei/interrupt.c
+++ b/drivers/staging/mei/interrupt.c
@@ -94,7 +94,7 @@ static void _mei_cmpl_iamthif(struct mei_device *dev, struct mei_cl_cb *cb_pos)
dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
dev->iamthif_timer);
} else {
- run_next_iamthif_cmd(dev);
+ mei_run_next_iamthif_cmd(dev);
}
dev_dbg(&dev->pdev->dev, "completing amthi call back.\n");
@@ -195,7 +195,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
{
struct mei_cl *cl;
struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
- unsigned char *buffer;
+ unsigned char *buffer = NULL;
dev_dbg(&dev->pdev->dev, "start client msg\n");
if (!(dev->read_list.status == 0 &&
@@ -280,7 +280,7 @@ static int _mei_irq_thread_iamthif_read(struct mei_device *dev, s32 *slots)
} else {
dev_dbg(&dev->pdev->dev, "iamthif flow control success\n");
dev->iamthif_state = MEI_IAMTHIF_READING;
- dev->iamthif_flow_control_pending = 0;
+ dev->iamthif_flow_control_pending = false;
dev->iamthif_msg_buf_index = 0;
dev->iamthif_msg_buf_size = 0;
dev->iamthif_stall_timer = IAMTHIF_STALL_TIMER;
@@ -396,7 +396,7 @@ static void mei_client_connect_response(struct mei_device *dev,
dev->wd_due_counter = (dev->wd_timeout) ? 1 : 0;
dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
- host_init_iamthif(dev);
+ mei_host_init_iamthif(dev);
return;
}
@@ -499,7 +499,7 @@ static void add_single_flow_creds(struct mei_device *dev,
struct mei_me_client *client;
int i;
- for (i = 0; i < dev->num_mei_me_clients; i++) {
+ for (i = 0; i < dev->me_clients_num; i++) {
client = &dev->me_clients[i];
if (client && flow->me_addr == client->client_id) {
if (client->props.single_recv_buf) {
@@ -593,7 +593,7 @@ static void mei_client_disconnect_request(struct mei_device *dev,
cl_pos->timer_count = 0;
if (cl_pos == &dev->wd_cl) {
dev->wd_due_counter = 0;
- dev->wd_pending = 0;
+ dev->wd_pending = false;
} else if (cl_pos == &dev->iamthif_cl)
dev->iamthif_timer = 0;
@@ -659,9 +659,9 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
if (dev->mei_state == MEI_INIT_CLIENTS &&
dev->init_clients_state == MEI_START_MESSAGE) {
dev->init_clients_timer = 0;
- host_enum_clients_message(dev);
+ mei_host_enum_clients_message(dev);
} else {
- dev->recvd_msg = 0;
+ dev->recvd_msg = false;
dev_dbg(&dev->pdev->dev, "IMEI reset due to received host start response bus message.\n");
mei_reset(dev, 1);
return;
@@ -690,7 +690,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
return;
}
- dev->recvd_msg = 1;
+ dev->recvd_msg = true;
dev_dbg(&dev->pdev->dev, "host start response message received.\n");
break;
@@ -734,7 +734,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
MEI_CLIENT_PROPERTIES_MESSAGE) {
dev->me_client_index++;
dev->me_client_presentation_num++;
- host_client_properties(dev);
+ mei_host_client_properties(dev);
} else {
dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message");
mei_reset(dev, 1);
@@ -755,10 +755,10 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
dev->init_clients_timer = 0;
dev->me_client_presentation_num = 0;
dev->me_client_index = 0;
- allocate_me_clients_storage(dev);
+ mei_allocate_me_clients_storage(dev);
dev->init_clients_state =
MEI_CLIENT_PROPERTIES_MESSAGE;
- host_client_properties(dev);
+ mei_host_client_properties(dev);
} else {
dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
mei_reset(dev, 1);
@@ -1028,7 +1028,7 @@ static int _mei_irq_thread_cmpl_iamthif(struct mei_device *dev, s32 *slots,
cb_pos->information = dev->iamthif_msg_buf_index;
cl->status = 0;
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
- dev->iamthif_flow_control_pending = 1;
+ dev->iamthif_flow_control_pending = true;
/* save iamthif cb sent to amthi client */
dev->iamthif_current_cb = cb_pos;
list_move_tail(&cb_pos->cb_list,
@@ -1192,7 +1192,6 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
return 0;
}
- dev->write_hang = -1;
*slots = mei_count_empty_write_slots(dev);
/* complete all waiting for write CB */
dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
@@ -1232,7 +1231,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
}
if (dev->stop && !dev->wd_pending) {
- dev->wd_stopped = 1;
+ dev->wd_stopped = true;
wake_up_interruptible(&dev->wait_stop_wd);
return 0;
}
@@ -1256,7 +1255,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
if (mei_flow_ctrl_reduce(dev, &dev->wd_cl))
return -ENODEV;
- dev->wd_pending = 0;
+ dev->wd_pending = false;
if (dev->wd_timeout) {
*slots -= (sizeof(struct mei_msg_hdr) +
@@ -1427,7 +1426,7 @@ void mei_wd_timer(struct work_struct *work)
if (--dev->wd_due_counter == 0) {
if (dev->mei_host_buffer_is_empty &&
mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
- dev->mei_host_buffer_is_empty = 0;
+ dev->mei_host_buffer_is_empty = false;
dev_dbg(&dev->pdev->dev, "send watchdog.\n");
if (mei_wd_send(dev))
@@ -1442,7 +1441,7 @@ void mei_wd_timer(struct work_struct *work)
dev->wd_due_counter = 0;
} else
- dev->wd_pending = 1;
+ dev->wd_pending = true;
}
}
@@ -1452,8 +1451,8 @@ void mei_wd_timer(struct work_struct *work)
mei_reset(dev, 1);
dev->iamthif_msg_buf_size = 0;
dev->iamthif_msg_buf_index = 0;
- dev->iamthif_canceled = 0;
- dev->iamthif_ioctl = 1;
+ dev->iamthif_canceled = false;
+ dev->iamthif_ioctl = true;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
dev->iamthif_timer = 0;
@@ -1462,7 +1461,7 @@ void mei_wd_timer(struct work_struct *work)
dev->iamthif_file_object = NULL;
dev->iamthif_current_cb = NULL;
- run_next_iamthif_cmd(dev);
+ mei_run_next_iamthif_cmd(dev);
}
}
@@ -1506,7 +1505,7 @@ void mei_wd_timer(struct work_struct *work)
dev->iamthif_file_object = NULL;
dev->iamthif_current_cb = NULL;
dev->iamthif_timer = 0;
- run_next_iamthif_cmd(dev);
+ mei_run_next_iamthif_cmd(dev);
}
}
@@ -1539,7 +1538,7 @@ irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
/* initialize our complete list */
mutex_lock(&dev->device_lock);
- mei_initialize_list(&complete_list, dev);
+ mei_io_list_init(&complete_list);
dev->host_hw_state = mei_hcsr_read(dev);
dev->me_hw_state = mei_mecsr_read(dev);
@@ -1564,7 +1563,7 @@ irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
/* link is established
* start sending messages.
*/
- host_start_message(dev);
+ mei_host_start_message(dev);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
} else {
diff --git a/drivers/staging/mei/iorw.c b/drivers/staging/mei/iorw.c
index 697a2773d7c..8a61d126651 100644
--- a/drivers/staging/mei/iorw.c
+++ b/drivers/staging/mei/iorw.c
@@ -121,7 +121,7 @@ int mei_ioctl_connect_client(struct file *file,
clear_bit(cl->host_client_id, dev->host_clients_map);
list_for_each_entry_safe(cl_pos, cl_next,
&dev->file_list, link) {
- if (mei_fe_same_id(cl, cl_pos)) {
+ if (mei_cl_cmp_id(cl, cl_pos)) {
dev_dbg(&dev->pdev->dev,
"remove file private data node host"
" client = %d, ME client = %d.\n",
@@ -161,7 +161,7 @@ int mei_ioctl_connect_client(struct file *file,
if (dev->mei_host_buffer_is_empty
&& !mei_other_client_is_connecting(dev, cl)) {
dev_dbg(&dev->pdev->dev, "Sending Connect Message\n");
- dev->mei_host_buffer_is_empty = 0;
+ dev->mei_host_buffer_is_empty = false;
if (!mei_connect(dev, cl)) {
dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n");
rets = -ENODEV;
@@ -204,8 +204,8 @@ int mei_ioctl_connect_client(struct file *file,
}
rets = -EFAULT;
- mei_flush_list(&dev->ctrl_rd_list, cl);
- mei_flush_list(&dev->ctrl_wr_list, cl);
+ mei_io_list_flush(&dev->ctrl_rd_list, cl);
+ mei_io_list_flush(&dev->ctrl_wr_list, cl);
goto end;
}
rets = 0;
@@ -277,13 +277,13 @@ int amthi_read(struct mei_device *dev, struct file *file,
return -ETIMEDOUT;
}
- for (i = 0; i < dev->num_mei_me_clients; i++) {
+ for (i = 0; i < dev->me_clients_num; i++) {
if (dev->me_clients[i].client_id ==
dev->iamthif_cl.me_client_id)
break;
}
- if (i == dev->num_mei_me_clients) {
+ if (i == dev->me_clients_num) {
dev_dbg(&dev->pdev->dev, "amthi client not found.\n");
return -ENODEV;
}
@@ -409,7 +409,7 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
dev_dbg(&dev->pdev->dev, "allocation call back successful. host client = %d, ME client = %d\n",
cl->host_client_id, cl->me_client_id);
- for (i = 0; i < dev->num_mei_me_clients; i++) {
+ for (i = 0; i < dev->me_clients_num; i++) {
if (dev->me_clients[i].client_id == cl->me_client_id)
break;
@@ -420,7 +420,7 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
goto unlock;
}
- if (i == dev->num_mei_me_clients) {
+ if (i == dev->me_clients_num) {
rets = -ENODEV;
goto unlock;
}
@@ -439,7 +439,7 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
cb->file_private = (void *) cl;
cl->read_cb = cb;
if (dev->mei_host_buffer_is_empty) {
- dev->mei_host_buffer_is_empty = 0;
+ dev->mei_host_buffer_is_empty = false;
if (!mei_send_flow_control(dev, cl)) {
rets = -ENODEV;
goto unlock;
@@ -478,8 +478,8 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
dev->iamthif_state = MEI_IAMTHIF_WRITING;
dev->iamthif_current_cb = cb;
dev->iamthif_file_object = cb->file_object;
- dev->iamthif_canceled = 0;
- dev->iamthif_ioctl = 1;
+ dev->iamthif_canceled = false;
+ dev->iamthif_ioctl = true;
dev->iamthif_msg_buf_size = cb->request_buffer.size;
memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
cb->request_buffer.size);
@@ -490,7 +490,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
if (ret && dev->mei_host_buffer_is_empty) {
ret = 0;
- dev->mei_host_buffer_is_empty = 0;
+ dev->mei_host_buffer_is_empty = false;
if (cb->request_buffer.size >
(((dev->host_hw_state & H_CBD) >> 24) * sizeof(u32))
-sizeof(struct mei_msg_hdr)) {
@@ -515,7 +515,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
if (mei_hdr.msg_complete) {
if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl))
return -ENODEV;
- dev->iamthif_flow_control_pending = 1;
+ dev->iamthif_flow_control_pending = true;
dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n");
dev->iamthif_current_cb = cb;
@@ -547,7 +547,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
*
* returns 0 on success, <0 on failure.
*/
-void run_next_iamthif_cmd(struct mei_device *dev)
+void mei_run_next_iamthif_cmd(struct mei_device *dev)
{
struct mei_cl *cl_tmp;
struct mei_cl_cb *cb_pos = NULL;
@@ -559,8 +559,8 @@ void run_next_iamthif_cmd(struct mei_device *dev)
dev->iamthif_msg_buf_size = 0;
dev->iamthif_msg_buf_index = 0;
- dev->iamthif_canceled = 0;
- dev->iamthif_ioctl = 1;
+ dev->iamthif_canceled = false;
+ dev->iamthif_ioctl = true;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
dev->iamthif_timer = 0;
dev->iamthif_file_object = NULL;
diff --git a/drivers/staging/mei/main.c b/drivers/staging/mei/main.c
index bfd1b46ec74..de8825fcd8c 100644
--- a/drivers/staging/mei/main.c
+++ b/drivers/staging/mei/main.c
@@ -14,8 +14,6 @@
*
*/
-
-
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -30,7 +28,6 @@
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
-#include <linux/version.h>
#include <linux/sched.h>
#include <linux/uuid.h>
#include <linux/compat.h>
@@ -142,7 +139,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
goto disable_device;
}
/* allocates and initializes the mei dev structure */
- dev = init_mei_device(pdev);
+ dev = mei_device_init(pdev);
if (!dev) {
err = -ENOMEM;
goto release_regions;
@@ -240,7 +237,7 @@ static void __devexit mei_remove(struct pci_dev *pdev)
mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
dev->iamthif_current_cb = NULL;
- dev->num_mei_me_clients = 0;
+ dev->me_clients_num = 0;
mutex_unlock(&dev->device_lock);
@@ -362,7 +359,6 @@ static struct mei_cl_cb *find_read_list_entry(
{
struct mei_cl_cb *cb_pos = NULL;
struct mei_cl_cb *cb_next = NULL;
- struct mei_cl *cl_list_temp;
if (!dev->read_list.status &&
!list_empty(&dev->read_list.mei_cb.cb_list)) {
@@ -370,14 +366,11 @@ static struct mei_cl_cb *find_read_list_entry(
dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
list_for_each_entry_safe(cb_pos, cb_next,
&dev->read_list.mei_cb.cb_list, cb_list) {
+ struct mei_cl *cl_temp;
+ cl_temp = (struct mei_cl *)cb_pos->file_private;
- cl_list_temp = (struct mei_cl *)
- cb_pos->file_private;
-
- if (cl_list_temp &&
- mei_fe_same_id(cl, cl_list_temp))
+ if (mei_cl_cmp_id(cl, cl_temp))
return cb_pos;
-
}
}
return NULL;
@@ -407,7 +400,7 @@ static int mei_open(struct inode *inode, struct file *file)
mutex_lock(&dev->device_lock);
err = -ENOMEM;
- cl = mei_alloc_file_private(dev);
+ cl = mei_cl_allocate(dev);
if (!cl)
goto out;
@@ -478,7 +471,7 @@ static int mei_release(struct inode *inode, struct file *file)
cl->me_client_id);
rets = mei_disconnect_host_client(dev, cl);
}
- mei_flush_queues(dev, cl);
+ mei_cl_flush_queues(cl);
dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
cl->host_client_id,
cl->me_client_id);
@@ -519,10 +512,10 @@ static int mei_release(struct inode *inode, struct file *file)
dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
dev->iamthif_state);
- dev->iamthif_canceled = 1;
+ dev->iamthif_canceled = true;
if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
- run_next_iamthif_cmd(dev);
+ mei_run_next_iamthif_cmd(dev);
}
}
@@ -800,7 +793,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
rets = -ENODEV;
goto unlock_dev;
}
- for (i = 0; i < dev->num_mei_me_clients; i++) {
+ for (i = 0; i < dev->me_clients_num; i++) {
if (dev->me_clients[i].client_id ==
dev->iamthif_cl.me_client_id)
break;
@@ -810,7 +803,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
rets = -ENODEV;
goto unlock_dev;
}
- if (i == dev->num_mei_me_clients ||
+ if (i == dev->me_clients_num ||
(dev->me_clients[i].client_id !=
dev->iamthif_cl.me_client_id)) {
rets = -ENODEV;
@@ -868,7 +861,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
cl->me_client_id);
goto unlock_dev;
}
- for (i = 0; i < dev->num_mei_me_clients; i++) {
+ for (i = 0; i < dev->me_clients_num; i++) {
if (dev->me_clients[i].client_id ==
cl->me_client_id)
break;
@@ -877,7 +870,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
rets = -ENODEV;
goto unlock_dev;
}
- if (i == dev->num_mei_me_clients) {
+ if (i == dev->me_clients_num) {
rets = -ENODEV;
goto unlock_dev;
}
@@ -893,7 +886,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
if (rets && dev->mei_host_buffer_is_empty) {
rets = 0;
- dev->mei_host_buffer_is_empty = 0;
+ dev->mei_host_buffer_is_empty = false;
if (length > ((((dev->host_hw_state & H_CBD) >> 24) *
sizeof(u32)) - sizeof(struct mei_msg_hdr))) {
@@ -1066,7 +1059,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
dev->iamthif_file_object == file) {
mask |= (POLLIN | POLLRDNORM);
dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
- run_next_iamthif_cmd(dev);
+ mei_run_next_iamthif_cmd(dev);
}
goto out;
}
@@ -1333,9 +1326,9 @@ module_init(mei_init_module);
*/
static void __exit mei_exit_module(void)
{
- pci_unregister_driver(&mei_driver);
mei_sysfs_device_remove();
mei_unregister_cdev();
+ pci_unregister_driver(&mei_driver);
pr_debug("mei: Driver unloaded successfully.\n");
}
diff --git a/drivers/staging/mei/mei_dev.h b/drivers/staging/mei/mei_dev.h
index 6f3ec068ed6..d7bc10c612b 100644
--- a/drivers/staging/mei/mei_dev.h
+++ b/drivers/staging/mei/mei_dev.h
@@ -62,11 +62,6 @@ extern const u8 mei_wd_state_independence_msg[3][4];
#define MEI_MAX_OPEN_HANDLE_COUNT 253
/*
- * Number of queue lists used by this driver
- */
-#define MEI_IO_LISTS_NUMBER 7
-
-/*
* Number of Maximum MEI Clients
*/
#define MEI_CLIENTS_MAX 255
@@ -169,7 +164,6 @@ struct mei_cl {
struct mei_io_list {
struct mei_cl_cb mei_cb;
int status;
- struct mei_device *device_extension;
};
/* MEI private device struct */
@@ -179,7 +173,6 @@ struct mei_device {
* lists of queues
*/
/* array of pointers to aio lists */
- struct mei_io_list *io_list_array[MEI_IO_LISTS_NUMBER];
struct mei_io_list read_list; /* driver read queue */
struct mei_io_list write_list; /* driver write queue */
struct mei_io_list write_waiting_list; /* write waiting queue */
@@ -193,6 +186,7 @@ struct mei_device {
* list of files
*/
struct list_head file_list;
+ long open_handle_count;
/*
* memory of device
*/
@@ -203,8 +197,8 @@ struct mei_device {
* lock for the device
*/
struct mutex device_lock; /* device lock */
- int recvd_msg;
struct delayed_work wd_work; /* watch dog deleye work */
+ bool recvd_msg;
/*
* hw states of host and fw(ME)
*/
@@ -222,7 +216,8 @@ struct mei_device {
enum mei_states mei_state;
enum mei_init_clients_states init_clients_state;
u16 init_clients_timer;
- int stop;
+ bool stop;
+ bool need_reset;
u32 extra_write_index;
u32 rd_msg_buf[128]; /* used for control messages */
@@ -232,81 +227,105 @@ struct mei_device {
struct hbm_version version;
- int mei_host_buffer_is_empty;
- struct mei_cl wd_cl;
struct mei_me_client *me_clients; /* Note: memory has to be allocated */
DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
- u8 num_mei_me_clients;
+ u8 me_clients_num;
u8 me_client_presentation_num;
u8 me_client_index;
+ bool mei_host_buffer_is_empty;
- int wd_pending;
- int wd_stopped;
+ struct mei_cl wd_cl;
+ bool wd_pending;
+ bool wd_stopped;
+ bool wd_bypass; /* if false, don't refresh watchdog ME client */
u16 wd_timeout; /* seconds ((wd_data[1] << 8) + wd_data[0]) */
+ u16 wd_due_counter;
unsigned char wd_data[MEI_START_WD_DATA_SIZE];
- u16 wd_due_counter;
- bool wd_bypass; /* if false, don't refresh watchdog ME client */
struct file *iamthif_file_object;
struct mei_cl iamthif_cl;
- int iamthif_ioctl;
- int iamthif_canceled;
+ struct mei_cl_cb *iamthif_current_cb;
int iamthif_mtu;
unsigned long iamthif_timer;
u32 iamthif_stall_timer;
unsigned char *iamthif_msg_buf; /* Note: memory has to be allocated */
u32 iamthif_msg_buf_size;
u32 iamthif_msg_buf_index;
- int iamthif_flow_control_pending;
enum iamthif_states iamthif_state;
- struct mei_cl_cb *iamthif_current_cb;
- u8 write_hang;
- int need_reset;
- long open_handle_count;
-
+ bool iamthif_flow_control_pending;
+ bool iamthif_ioctl;
+ bool iamthif_canceled;
};
/*
* mei init function prototypes
*/
-struct mei_device *init_mei_device(struct pci_dev *pdev);
+struct mei_device *mei_device_init(struct pci_dev *pdev);
void mei_reset(struct mei_device *dev, int interrupts);
int mei_hw_init(struct mei_device *dev);
int mei_task_initialize_clients(void *data);
int mei_initialize_clients(struct mei_device *dev);
-struct mei_cl *mei_alloc_file_private(struct mei_device *dev);
int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl);
-void mei_initialize_list(struct mei_io_list *list,
- struct mei_device *dev);
-void mei_flush_list(struct mei_io_list *list, struct mei_cl *cl);
-void mei_flush_queues(struct mei_device *dev, struct mei_cl *cl);
-void mei_remove_client_from_file_list(struct mei_device *dev,
- u8 host_client_id);
-void host_init_iamthif(struct mei_device *dev);
-void mei_init_file_private(struct mei_cl *priv, struct mei_device *dev);
-void allocate_me_clients_storage(struct mei_device *dev);
-
-void host_start_message(struct mei_device *dev);
-void host_enum_clients_message(struct mei_device *dev);
-void host_client_properties(struct mei_device *dev);
+void mei_remove_client_from_file_list(struct mei_device *dev, u8 host_client_id);
+void mei_host_init_iamthif(struct mei_device *dev);
+void mei_allocate_me_clients_storage(struct mei_device *dev);
+
u8 mei_find_me_client_update_filext(struct mei_device *dev,
struct mei_cl *priv,
const uuid_le *cguid, u8 client_id);
/*
- * interrupt functions prototype
+ * MEI IO List Functions
+ */
+void mei_io_list_init(struct mei_io_list *list);
+void mei_io_list_flush(struct mei_io_list *list, struct mei_cl *cl);
+
+/*
+ * MEI ME Client Functions
+ */
+
+struct mei_cl *mei_cl_allocate(struct mei_device *dev);
+void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
+int mei_cl_flush_queues(struct mei_cl *cl);
+/**
+ * mei_cl_cmp_id - tells if file private data have same id
+ *
+ * @fe1: private data of 1. file object
+ * @fe2: private data of 2. file object
+ *
+ * returns true - if ids are the same and not NULL
+ */
+static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
+ const struct mei_cl *cl2)
+{
+ return cl1 && cl2 &&
+ (cl1->host_client_id == cl2->host_client_id) &&
+ (cl1->me_client_id == cl2->me_client_id);
+}
+
+
+
+/*
+ * MEI Host Client Functions
+ */
+void mei_host_start_message(struct mei_device *dev);
+void mei_host_enum_clients_message(struct mei_device *dev);
+void mei_host_client_properties(struct mei_device *dev);
+
+/*
+ * MEI interrupt functions prototype
*/
irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id);
-irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id);
+irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id);
void mei_wd_timer(struct work_struct *work);
/*
- * input output function prototype
+ * MEI input output function prototype
*/
int mei_ioctl_connect_client(struct file *file,
struct mei_connect_client_data *data);
@@ -321,7 +340,7 @@ int amthi_read(struct mei_device *dev, struct file *file,
struct mei_cl_cb *find_amthi_read_list_entry(struct mei_device *dev,
struct file *file);
-void run_next_iamthif_cmd(struct mei_device *dev);
+void mei_run_next_iamthif_cmd(struct mei_device *dev);
void mei_free_cb_private(struct mei_cl_cb *priv_cb);
@@ -339,8 +358,7 @@ int mei_find_me_client_index(const struct mei_device *dev, uuid_le cuuid);
*
* returns the byte read.
*/
-static inline u32 mei_reg_read(struct mei_device *dev,
- unsigned long offset)
+static inline u32 mei_reg_read(struct mei_device *dev, unsigned long offset)
{
return ioread32(dev->mem_addr + offset);
}
@@ -404,19 +422,4 @@ void mei_csr_clear_his(struct mei_device *dev);
void mei_enable_interrupts(struct mei_device *dev);
void mei_disable_interrupts(struct mei_device *dev);
-/**
- * mei_fe_same_id - tells if file private data have same id
- *
- * @fe1: private data of 1. file object
- * @fe2: private data of 2. file object
- *
- * returns !=0 - if ids are the same, 0 - if differ.
- */
-static inline int mei_fe_same_id(const struct mei_cl *fe1,
- const struct mei_cl *fe2)
-{
- return ((fe1->host_client_id == fe2->host_client_id) &&
- (fe1->me_client_id == fe2->me_client_id));
-}
-
#endif
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index 2564b038636..42f04efc90e 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -65,7 +65,7 @@ void mei_wd_start_setup(struct mei_device *dev)
*/
void mei_wd_host_init(struct mei_device *dev)
{
- mei_init_file_private(&dev->wd_cl, dev);
+ mei_cl_init(&dev->wd_cl, dev);
/* look for WD client and connect to it */
dev->wd_cl.state = MEI_FILE_DISCONNECTED;
@@ -83,18 +83,18 @@ void mei_wd_host_init(struct mei_device *dev)
dev_dbg(&dev->pdev->dev, "Failed to connect to WD client\n");
dev->wd_cl.state = MEI_FILE_DISCONNECTED;
dev->wd_cl.host_client_id = 0;
- host_init_iamthif(dev) ;
+ mei_host_init_iamthif(dev) ;
} else {
dev->wd_cl.timer_count = CONNECT_TIMEOUT;
}
} else {
dev_dbg(&dev->pdev->dev, "Failed to find WD client\n");
- host_init_iamthif(dev) ;
+ mei_host_init_iamthif(dev) ;
}
} else {
dev->wd_bypass = true;
dev_dbg(&dev->pdev->dev, "WD requested to be disabled\n");
- host_init_iamthif(dev) ;
+ mei_host_init_iamthif(dev) ;
}
}
@@ -141,7 +141,7 @@ int mei_wd_stop(struct mei_device *dev, bool preserve)
dev->wd_timeout = 0;
dev->wd_due_counter = 0;
memcpy(dev->wd_data, mei_stop_wd_params, MEI_WD_PARAMS_SIZE);
- dev->stop = 1;
+ dev->stop = true;
ret = mei_flow_ctrl_creds(dev, &dev->wd_cl);
if (ret < 0)
@@ -149,7 +149,7 @@ int mei_wd_stop(struct mei_device *dev, bool preserve)
if (ret && dev->mei_host_buffer_is_empty) {
ret = 0;
- dev->mei_host_buffer_is_empty = 0;
+ dev->mei_host_buffer_is_empty = false;
if (!mei_wd_send(dev)) {
ret = mei_flow_ctrl_reduce(dev, &dev->wd_cl);
@@ -159,20 +159,25 @@ int mei_wd_stop(struct mei_device *dev, bool preserve)
dev_dbg(&dev->pdev->dev, "send stop WD failed\n");
}
- dev->wd_pending = 0;
+ dev->wd_pending = false;
} else {
- dev->wd_pending = 1;
+ dev->wd_pending = true;
}
- dev->wd_stopped = 0;
+ dev->wd_stopped = false;
mutex_unlock(&dev->device_lock);
ret = wait_event_interruptible_timeout(dev->wait_stop_wd,
dev->wd_stopped, 10 * HZ);
mutex_lock(&dev->device_lock);
- if (!dev->wd_stopped)
- dev_dbg(&dev->pdev->dev, "stop wd failed to complete.\n");
- else
- dev_dbg(&dev->pdev->dev, "stop wd complete.\n");
+ if (dev->wd_stopped) {
+ dev_dbg(&dev->pdev->dev, "stop wd complete ret=%d.\n", ret);
+ ret = 0;
+ } else {
+ if (!ret)
+ ret = -ETIMEDOUT;
+ dev_warn(&dev->pdev->dev,
+ "stop wd failed to complete ret=%d.\n", ret);
+ }
if (preserve)
dev->wd_timeout = wd_timeout;
diff --git a/drivers/staging/msm/Kconfig b/drivers/staging/msm/Kconfig
deleted file mode 100644
index c5309eec58f..00000000000
--- a/drivers/staging/msm/Kconfig
+++ /dev/null
@@ -1,124 +0,0 @@
-config MSM_STAGING
- tristate "MSM Frame Buffer Support"
- depends on FB && ARCH_MSM && !FB_MSM
- select FB_BACKLIGHT if FB_MSM_BACKLIGHT
- select NEW_LEDS
- select LEDS_CLASS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- ---help---
- Support for MSM Framebuffer.
-
-if MSM_STAGING
-
-config FB_MSM_LCDC_HW
- bool
- default n
-
-choice
- prompt "MDP HW version"
- default FB_MSM_MDP31
-
-config FB_MSM_MDP31
- select FB_MSM_LCDC_HW
- bool "MDP HW ver3.1"
- ---help---
- Support for MSM MDP HW revision 3.1
- Say Y here if this is msm8x50 variant platform.
-endchoice
-
-config FB_MSM_LCDC
- bool
- default n
-
-config FB_MSM_TVOUT
- bool
- default n
-
-config FB_MSM_LCDC_PANEL
- bool
- select FB_MSM_LCDC
- default n
-
-config FB_MSM_LCDC_PRISM_WVGA
- bool
- select FB_MSM_LCDC_PANEL
- default n
-
-config FB_MSM_LCDC_ST15_WXGA
- bool
- select FB_MSM_LCDC_PANEL
- default n
-
-choice
- prompt "LCD Panel"
- default FB_MSM_LCDC_ST15_PANEL
-
-config FB_MSM_LCDC_PRISM_WVGA_PANEL
- depends on FB_MSM_LCDC_HW
- bool "LCDC Prism WVGA Panel"
- select FB_MSM_LCDC_PRISM_WVGA
- ---help---
- Support for LCDC Prism WVGA (800x480) panel
-
-
-config FB_MSM_LCDC_ST15_PANEL
- depends on FB_MSM_LCDC_HW
- bool "LCDC ST1.5 Panel"
- select FB_MSM_LCDC_ST15_WXGA
- ---help---
- Support for ST1.5 WXGA (1366x768) panel
-
-config FB_MSM_PANEL_NONE
- bool "NONE"
- ---help---
- This will disable LCD panel
-endchoice
-
-choice
- prompt "Secondary LCD Panel"
- depends on FB_MSM_MDP31
- default FB_MSM_SECONDARY_PANEL_NONE
-
-config FB_MSM_SECONDARY_PANEL_NONE
- bool "NONE"
- ---help---
- No secondary panel
-endchoice
-
-config FB_MSM_TVOUT_NTSC
- bool
- select FB_MSM_TVOUT
- default n
-
-config FB_MSM_TVOUT_PAL
- bool
- select FB_MSM_TVOUT
- default n
-
-choice
- depends on (FB_MSM_MDP22 || FB_MSM_MDP31)
- prompt "TVOut Region"
- default FB_MSM_TVOUT_NTSC_M
-
-config FB_MSM_TVOUT_NTSC_M
- bool "NTSC M"
- select FB_MSM_TVOUT_NTSC
- ---help---
- Support for NTSC M region (North American and Korea)
-
-config FB_MSM_TVOUT_NONE
- bool "NONE"
- ---help---
- This will disable TV Out functionality.
-endchoice
-
-config PMEM_KERNEL_SIZE
- int "PMEM for kernel components (in MB)"
- default 2
- depends on ARCH_QSD8X50
- help
- Configures the amount of PMEM for use by kernel components
- (in MB; minimum 2MB)
-endif
diff --git a/drivers/staging/msm/Makefile b/drivers/staging/msm/Makefile
deleted file mode 100644
index 07a89ecfcc2..00000000000
--- a/drivers/staging/msm/Makefile
+++ /dev/null
@@ -1,88 +0,0 @@
-obj-y := msm_fb.o staging-devices.o memory.o
-
-obj-$(CONFIG_FB_MSM_LOGO) += logo.o
-obj-$(CONFIG_FB_BACKLIGHT) += msm_fb_bl.o
-
-# MDP
-obj-y += mdp.o
-
-ifeq ($(CONFIG_FB_MSM_MDP40),y)
-obj-y += mdp4_util.o
-obj-$(CONFIG_DEBUG_FS) += mdp4_debugfs.o
-else
-obj-y += mdp_hw_init.o
-obj-y += mdp_ppp.o
-ifeq ($(CONFIG_FB_MSM_MDP31),y)
-obj-y += mdp_ppp_v31.o
-obj-$(CONFIG_MDP_PPP_ASYNC_OP) += mdp_ppp_dq.o
-else
-obj-y += mdp_ppp_v20.o
-endif
-endif
-
-ifeq ($(CONFIG_FB_MSM_OVERLAY),y)
-obj-y += mdp4_overlay.o
-obj-y += mdp4_overlay_lcdc.o
-obj-y += mdp4_overlay_mddi.o
-else
-obj-y += mdp_dma_lcdc.o
-endif
-
-obj-y += mdp_dma.o
-obj-y += mdp_dma_s.o
-obj-y += mdp_vsync.o
-obj-y += mdp_cursor.o
-obj-y += mdp_dma_tv.o
-
-# EBI2
-obj-$(CONFIG_FB_MSM_EBI2) += ebi2_lcd.o
-
-# LCDC
-obj-$(CONFIG_FB_MSM_LCDC) += lcdc.o
-
-# MDDI
-msm_mddi-y := mddi.o mddihost.o mddihosti.o
-obj-$(CONFIG_FB_MSM_MDDI) += msm_mddi.o
-
-# External MDDI
-msm_mddi_ext-y := mddihost_e.o mddi_ext.o
-obj-$(CONFIG_FB_MSM_EXTMDDI) += msm_mddi_ext.o
-
-# TVEnc
-obj-$(CONFIG_FB_MSM_TVOUT) += tvenc.o
-
-# MSM FB Panel
-obj-y += msm_fb_panel.o
-obj-$(CONFIG_FB_MSM_EBI2_TMD_QVGA_EPSON_QCIF) += ebi2_tmd20.o
-obj-$(CONFIG_FB_MSM_EBI2_TMD_QVGA_EPSON_QCIF) += ebi2_l2f.o
-
-ifeq ($(CONFIG_FB_MSM_MDDI_AUTO_DETECT),y)
-obj-y += mddi_prism.o
-obj-y += mddi_toshiba.o
-obj-y += mddi_toshiba_vga.o
-obj-y += mddi_toshiba_wvga_pt.o
-obj-y += mddi_sharp.o
-else
-obj-$(CONFIG_FB_MSM_MDDI_PRISM_WVGA) += mddi_prism.o
-obj-$(CONFIG_FB_MSM_MDDI_TOSHIBA_COMMON) += mddi_toshiba.o
-obj-$(CONFIG_FB_MSM_MDDI_TOSHIBA_COMMON_VGA) += mddi_toshiba_vga.o
-obj-$(CONFIG_FB_MSM_MDDI_TOSHIBA_WVGA_PORTRAIT) += mddi_toshiba_wvga_pt.o
-obj-$(CONFIG_FB_MSM_MDDI_SHARP_QVGA_128x128) += mddi_sharp.o
-endif
-
-obj-$(CONFIG_FB_MSM_LCDC_PANEL) += lcdc_panel.o
-obj-$(CONFIG_FB_MSM_LCDC_PRISM_WVGA) += lcdc_prism.o
-obj-$(CONFIG_FB_MSM_LCDC_EXTERNAL_WXGA) += lcdc_external.o
-obj-$(CONFIG_FB_MSM_LCDC_GORDON_VGA) += lcdc_gordon.o
-obj-$(CONFIG_FB_MSM_LCDC_TOSHIBA_WVGA_PT) += lcdc_toshiba_wvga_pt.o
-obj-$(CONFIG_FB_MSM_LCDC_SHARP_WVGA_PT) += lcdc_sharp_wvga_pt.o
-obj-$(CONFIG_FB_MSM_LCDC_ST15_WXGA) += lcdc_st15.o
-obj-$(CONFIG_FB_MSM_HDMI_SII_EXTERNAL_720P) += hdmi_sii9022.o
-
-obj-$(CONFIG_FB_MSM_TVOUT_NTSC) += tv_ntsc.o
-obj-$(CONFIG_FB_MSM_TVOUT_PAL) += tv_pal.o
-
-obj-$(CONFIG_FB_MSM_EXTMDDI_SVGA) += mddi_ext_lcd.o
-
-clean:
- rm *.o .*cmd
diff --git a/drivers/staging/msm/TODO b/drivers/staging/msm/TODO
deleted file mode 100644
index 05107a7d516..00000000000
--- a/drivers/staging/msm/TODO
+++ /dev/null
@@ -1,3 +0,0 @@
-- Merge this code with the existing MSM framebuffer
-- General style clean ups.
-
diff --git a/drivers/staging/msm/ebi2_l2f.c b/drivers/staging/msm/ebi2_l2f.c
deleted file mode 100644
index eea891d8f0f..00000000000
--- a/drivers/staging/msm/ebi2_l2f.c
+++ /dev/null
@@ -1,569 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-
-#include <linux/memory.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include "linux/proc_fs.h"
-
-#include <linux/delay.h>
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-
-/* The following are for MSM5100 on Gator
-*/
-#ifdef FEATURE_PM1000
-#include "pm1000.h"
-#endif /* FEATURE_PM1000 */
-/* The following are for MSM6050 on Bambi
-*/
-#ifdef FEATURE_PMIC_LCDKBD_LED_DRIVER
-#include "pm.h"
-#endif /* FEATURE_PMIC_LCDKBD_LED_DRIVER */
-
-#ifdef DISP_DEVICE_18BPP
-#undef DISP_DEVICE_18BPP
-#define DISP_DEVICE_16BPP
-#endif
-
-#define QCIF_WIDTH 176
-#define QCIF_HEIGHT 220
-
-static void *DISP_CMD_PORT;
-static void *DISP_DATA_PORT;
-
-#define DISP_CMD_DISON 0xaf
-#define DISP_CMD_DISOFF 0xae
-#define DISP_CMD_DISNOR 0xa6
-#define DISP_CMD_DISINV 0xa7
-#define DISP_CMD_DISCTL 0xca
-#define DISP_CMD_GCP64 0xcb
-#define DISP_CMD_GCP16 0xcc
-#define DISP_CMD_GSSET 0xcd
-#define DISP_GS_2 0x02
-#define DISP_GS_16 0x01
-#define DISP_GS_64 0x00
-#define DISP_CMD_SLPIN 0x95
-#define DISP_CMD_SLPOUT 0x94
-#define DISP_CMD_SD_PSET 0x75
-#define DISP_CMD_MD_PSET 0x76
-#define DISP_CMD_SD_CSET 0x15
-#define DISP_CMD_MD_CSET 0x16
-#define DISP_CMD_DATCTL 0xbc
-#define DISP_DATCTL_666 0x08
-#define DISP_DATCTL_565 0x28
-#define DISP_DATCTL_444 0x38
-#define DISP_CMD_RAMWR 0x5c
-#define DISP_CMD_RAMRD 0x5d
-#define DISP_CMD_PTLIN 0xa8
-#define DISP_CMD_PTLOUT 0xa9
-#define DISP_CMD_ASCSET 0xaa
-#define DISP_CMD_SCSTART 0xab
-#define DISP_CMD_VOLCTL 0xc6
-#define DISP_VOLCTL_TONE 0x80
-#define DISP_CMD_NOp 0x25
-#define DISP_CMD_OSSEL 0xd0
-#define DISP_CMD_3500KSET 0xd1
-#define DISP_CMD_3500KEND 0xd2
-#define DISP_CMD_14MSET 0xd3
-#define DISP_CMD_14MEND 0xd4
-
-#define DISP_CMD_OUT(cmd) outpw(DISP_CMD_PORT, cmd);
-
-#define DISP_DATA_OUT(data) outpw(DISP_DATA_PORT, data);
-
-#define DISP_DATA_IN() inpw(DISP_DATA_PORT);
-
-/* Epson device column number starts at 2
-*/
-#define DISP_SET_RECT(ulhc_row, lrhc_row, ulhc_col, lrhc_col) \
- DISP_CMD_OUT(DISP_CMD_SD_PSET) \
- DISP_DATA_OUT((ulhc_row) & 0xFF) \
- DISP_DATA_OUT((ulhc_row) >> 8) \
- DISP_DATA_OUT((lrhc_row) & 0xFF) \
- DISP_DATA_OUT((lrhc_row) >> 8) \
- DISP_CMD_OUT(DISP_CMD_SD_CSET) \
- DISP_DATA_OUT(((ulhc_col)+2) & 0xFF) \
- DISP_DATA_OUT(((ulhc_col)+2) >> 8) \
- DISP_DATA_OUT(((lrhc_col)+2) & 0xFF) \
- DISP_DATA_OUT(((lrhc_col)+2) >> 8)
-
-#define DISP_MIN_CONTRAST 0
-#define DISP_MAX_CONTRAST 127
-#define DISP_DEFAULT_CONTRAST 80
-
-#define DISP_MIN_BACKLIGHT 0
-#define DISP_MAX_BACKLIGHT 15
-#define DISP_DEFAULT_BACKLIGHT 2
-
-#define WAIT_SEC(sec) mdelay((sec)/1000)
-
-static word disp_area_start_row;
-static word disp_area_end_row;
-static byte disp_contrast = DISP_DEFAULT_CONTRAST;
-static boolean disp_powered_up;
-static boolean disp_initialized = FALSE;
-/* For some reason the contrast set at init time is not good. Need to do
- * it again
- */
-static boolean display_on = FALSE;
-static void epsonQcif_disp_init(struct platform_device *pdev);
-static void epsonQcif_disp_set_contrast(word contrast);
-static void epsonQcif_disp_set_display_area(word start_row, word end_row);
-static int epsonQcif_disp_off(struct platform_device *pdev);
-static int epsonQcif_disp_on(struct platform_device *pdev);
-static void epsonQcif_disp_set_rect(int x, int y, int xres, int yres);
-
-volatile word databack;
-static void epsonQcif_disp_init(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
-
- int i;
-
- if (disp_initialized)
- return;
-
- mfd = platform_get_drvdata(pdev);
-
- DISP_CMD_PORT = mfd->cmd_port;
- DISP_DATA_PORT = mfd->data_port;
-
- /* Sleep in */
- DISP_CMD_OUT(DISP_CMD_SLPIN);
-
- /* Display off */
- DISP_CMD_OUT(DISP_CMD_DISOFF);
-
- /* Display normal */
- DISP_CMD_OUT(DISP_CMD_DISNOR);
-
- /* Set data mode */
- DISP_CMD_OUT(DISP_CMD_DATCTL);
- DISP_DATA_OUT(DISP_DATCTL_565);
-
- /* Set display timing */
- DISP_CMD_OUT(DISP_CMD_DISCTL);
- DISP_DATA_OUT(0x1c); /* p1 */
- DISP_DATA_OUT(0x02); /* p1 */
- DISP_DATA_OUT(0x82); /* p2 */
- DISP_DATA_OUT(0x00); /* p3 */
- DISP_DATA_OUT(0x00); /* p4 */
- DISP_DATA_OUT(0xe0); /* p5 */
- DISP_DATA_OUT(0x00); /* p5 */
- DISP_DATA_OUT(0xdc); /* p6 */
- DISP_DATA_OUT(0x00); /* p6 */
- DISP_DATA_OUT(0x02); /* p7 */
- DISP_DATA_OUT(0x00); /* p8 */
-
- /* Set 64 gray scale level */
- DISP_CMD_OUT(DISP_CMD_GCP64);
- DISP_DATA_OUT(0x08); /* p01 */
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT(0x2a); /* p02 */
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT(0x4e); /* p03 */
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT(0x6b); /* p04 */
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT(0x88); /* p05 */
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT(0xa3); /* p06 */
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT(0xba); /* p07 */
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT(0xd1); /* p08 */
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT(0xe5); /* p09 */
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT(0xf3); /* p10 */
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT(0x03); /* p11 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x13); /* p12 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x22); /* p13 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x2f); /* p14 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x3b); /* p15 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x46); /* p16 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x51); /* p17 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x5b); /* p18 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x64); /* p19 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x6c); /* p20 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x74); /* p21 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x7c); /* p22 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x83); /* p23 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x8a); /* p24 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x91); /* p25 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x98); /* p26 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x9f); /* p27 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xa6); /* p28 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xac); /* p29 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xb2); /* p30 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xb7); /* p31 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xbc); /* p32 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xc1); /* p33 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xc6); /* p34 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xcb); /* p35 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xd0); /* p36 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xd4); /* p37 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xd8); /* p38 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xdc); /* p39 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xe0); /* p40 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xe4); /* p41 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xe8); /* p42 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xec); /* p43 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xf0); /* p44 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xf4); /* p45 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xf8); /* p46 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xfb); /* p47 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xfe); /* p48 */
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0x01); /* p49 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x03); /* p50 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x05); /* p51 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x07); /* p52 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x09); /* p53 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x0b); /* p54 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x0d); /* p55 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x0f); /* p56 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x11); /* p57 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x13); /* p58 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x15); /* p59 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x17); /* p60 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x19); /* p61 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x1b); /* p62 */
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x1c); /* p63 */
- DISP_DATA_OUT(0x02);
-
- /* Set 16 gray scale level */
- DISP_CMD_OUT(DISP_CMD_GCP16);
- DISP_DATA_OUT(0x1a); /* p01 */
- DISP_DATA_OUT(0x32); /* p02 */
- DISP_DATA_OUT(0x42); /* p03 */
- DISP_DATA_OUT(0x4c); /* p04 */
- DISP_DATA_OUT(0x58); /* p05 */
- DISP_DATA_OUT(0x5f); /* p06 */
- DISP_DATA_OUT(0x66); /* p07 */
- DISP_DATA_OUT(0x6b); /* p08 */
- DISP_DATA_OUT(0x70); /* p09 */
- DISP_DATA_OUT(0x74); /* p10 */
- DISP_DATA_OUT(0x78); /* p11 */
- DISP_DATA_OUT(0x7b); /* p12 */
- DISP_DATA_OUT(0x7e); /* p13 */
- DISP_DATA_OUT(0x80); /* p14 */
- DISP_DATA_OUT(0x82); /* p15 */
-
- /* Set DSP column */
- DISP_CMD_OUT(DISP_CMD_MD_CSET);
- DISP_DATA_OUT(0xff);
- DISP_DATA_OUT(0x03);
- DISP_DATA_OUT(0xff);
- DISP_DATA_OUT(0x03);
-
- /* Set DSP page */
- DISP_CMD_OUT(DISP_CMD_MD_PSET);
- DISP_DATA_OUT(0xff);
- DISP_DATA_OUT(0x01);
- DISP_DATA_OUT(0xff);
- DISP_DATA_OUT(0x01);
-
- /* Set ARM column */
- DISP_CMD_OUT(DISP_CMD_SD_CSET);
- DISP_DATA_OUT(0x02);
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT((QCIF_WIDTH + 1) & 0xFF);
- DISP_DATA_OUT((QCIF_WIDTH + 1) >> 8);
-
- /* Set ARM page */
- DISP_CMD_OUT(DISP_CMD_SD_PSET);
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT(0x00);
- DISP_DATA_OUT((QCIF_HEIGHT - 1) & 0xFF);
- DISP_DATA_OUT((QCIF_HEIGHT - 1) >> 8);
-
- /* Set 64 gray scales */
- DISP_CMD_OUT(DISP_CMD_GSSET);
- DISP_DATA_OUT(DISP_GS_64);
-
- DISP_CMD_OUT(DISP_CMD_OSSEL);
- DISP_DATA_OUT(0);
-
- /* Sleep out */
- DISP_CMD_OUT(DISP_CMD_SLPOUT);
-
- WAIT_SEC(40000);
-
- /* Initialize power IC */
- DISP_CMD_OUT(DISP_CMD_VOLCTL);
- DISP_DATA_OUT(DISP_VOLCTL_TONE);
-
- WAIT_SEC(40000);
-
- /* Set electronic volume, d'xx */
- DISP_CMD_OUT(DISP_CMD_VOLCTL);
- DISP_DATA_OUT(DISP_DEFAULT_CONTRAST); /* value from 0 to 127 */
-
- /* Initialize display data */
- DISP_SET_RECT(0, (QCIF_HEIGHT - 1), 0, (QCIF_WIDTH - 1));
- DISP_CMD_OUT(DISP_CMD_RAMWR);
- for (i = 0; i < QCIF_HEIGHT * QCIF_WIDTH; i++)
- DISP_DATA_OUT(0xffff);
-
- DISP_CMD_OUT(DISP_CMD_RAMRD);
- databack = DISP_DATA_IN();
- databack = DISP_DATA_IN();
- databack = DISP_DATA_IN();
- databack = DISP_DATA_IN();
-
- WAIT_SEC(80000);
-
- DISP_CMD_OUT(DISP_CMD_DISON);
-
- disp_area_start_row = 0;
- disp_area_end_row = QCIF_HEIGHT - 1;
- disp_powered_up = TRUE;
- disp_initialized = TRUE;
- epsonQcif_disp_set_display_area(0, QCIF_HEIGHT - 1);
- display_on = TRUE;
-}
-
-static void epsonQcif_disp_set_rect(int x, int y, int xres, int yres)
-{
- if (!disp_initialized)
- return;
-
- DISP_SET_RECT(y, y + yres - 1, x, x + xres - 1);
- DISP_CMD_OUT(DISP_CMD_RAMWR);
-}
-
-static void epsonQcif_disp_set_display_area(word start_row, word end_row)
-{
- if (!disp_initialized)
- return;
-
- if ((start_row == disp_area_start_row)
- && (end_row == disp_area_end_row))
- return;
- disp_area_start_row = start_row;
- disp_area_end_row = end_row;
-
- /* Range checking
- */
- if (end_row >= QCIF_HEIGHT)
- end_row = QCIF_HEIGHT - 1;
- if (start_row > end_row)
- start_row = end_row;
-
- /* When display is not the full screen, gray scale is set to
- ** 2; otherwise it is set to 64.
- */
- if ((start_row == 0) && (end_row == (QCIF_HEIGHT - 1))) {
- /* The whole screen */
- DISP_CMD_OUT(DISP_CMD_PTLOUT);
- WAIT_SEC(10000);
- DISP_CMD_OUT(DISP_CMD_DISOFF);
- WAIT_SEC(100000);
- DISP_CMD_OUT(DISP_CMD_GSSET);
- DISP_DATA_OUT(DISP_GS_64);
- WAIT_SEC(100000);
- DISP_CMD_OUT(DISP_CMD_DISON);
- } else {
- /* partial screen */
- DISP_CMD_OUT(DISP_CMD_PTLIN);
- DISP_DATA_OUT(start_row);
- DISP_DATA_OUT(start_row >> 8);
- DISP_DATA_OUT(end_row);
- DISP_DATA_OUT(end_row >> 8);
- DISP_CMD_OUT(DISP_CMD_GSSET);
- DISP_DATA_OUT(DISP_GS_2);
- }
-}
-
-static int epsonQcif_disp_off(struct platform_device *pdev)
-{
- if (!disp_initialized)
- epsonQcif_disp_init(pdev);
-
- if (display_on) {
- DISP_CMD_OUT(DISP_CMD_DISOFF);
- DISP_CMD_OUT(DISP_CMD_SLPIN);
- display_on = FALSE;
- }
-
- return 0;
-}
-
-static int epsonQcif_disp_on(struct platform_device *pdev)
-{
- if (!disp_initialized)
- epsonQcif_disp_init(pdev);
-
- if (!display_on) {
- DISP_CMD_OUT(DISP_CMD_SLPOUT);
- WAIT_SEC(40000);
- DISP_CMD_OUT(DISP_CMD_DISON);
- epsonQcif_disp_set_contrast(disp_contrast);
- display_on = TRUE;
- }
-
- return 0;
-}
-
-static void epsonQcif_disp_set_contrast(word contrast)
-{
- if (!disp_initialized)
- return;
-
- /* Initialize power IC, d'24 */
- DISP_CMD_OUT(DISP_CMD_VOLCTL);
- DISP_DATA_OUT(DISP_VOLCTL_TONE);
-
- WAIT_SEC(40000);
-
- /* Set electronic volume, d'xx */
- DISP_CMD_OUT(DISP_CMD_VOLCTL);
- if (contrast > 127)
- contrast = 127;
- DISP_DATA_OUT(contrast); /* value from 0 to 127 */
- disp_contrast = (byte) contrast;
-} /* End disp_set_contrast */
-
-static void epsonQcif_disp_clear_screen_area(
- word start_row, word end_row, word start_column, word end_column) {
- int32 i;
-
- /* Clear the display screen */
- DISP_SET_RECT(start_row, end_row, start_column, end_column);
- DISP_CMD_OUT(DISP_CMD_RAMWR);
- i = (end_row - start_row + 1) * (end_column - start_column + 1);
- for (; i > 0; i--)
- DISP_DATA_OUT(0xffff);
-}
-
-static int __init epsonQcif_probe(struct platform_device *pdev)
-{
- msm_fb_add_device(pdev);
-
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = epsonQcif_probe,
- .driver = {
- .name = "ebi2_epson_qcif",
- },
-};
-
-static struct msm_fb_panel_data epsonQcif_panel_data = {
- .on = epsonQcif_disp_on,
- .off = epsonQcif_disp_off,
- .set_rect = epsonQcif_disp_set_rect,
-};
-
-static struct platform_device this_device = {
- .name = "ebi2_epson_qcif",
- .id = 0,
- .dev = {
- .platform_data = &epsonQcif_panel_data,
- }
-};
-
-static int __init epsonQcif_init(void)
-{
- int ret;
- struct msm_panel_info *pinfo;
-
- ret = platform_driver_register(&this_driver);
- if (!ret) {
- pinfo = &epsonQcif_panel_data.panel_info;
- pinfo->xres = QCIF_WIDTH;
- pinfo->yres = QCIF_HEIGHT;
- pinfo->type = EBI2_PANEL;
- pinfo->pdest = DISPLAY_2;
- pinfo->wait_cycle = 0x808000;
- pinfo->bpp = 16;
- pinfo->fb_num = 2;
- pinfo->lcd.vsync_enable = FALSE;
-
- ret = platform_device_register(&this_device);
- if (ret)
- platform_driver_unregister(&this_driver);
- }
-
- return ret;
-}
-
-module_init(epsonQcif_init);
diff --git a/drivers/staging/msm/ebi2_lcd.c b/drivers/staging/msm/ebi2_lcd.c
deleted file mode 100644
index b41e1230cec..00000000000
--- a/drivers/staging/msm/ebi2_lcd.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/uaccess.h>
-#include <linux/workqueue.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <linux/proc_fs.h>
-#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-
-#include "msm_fb.h"
-
-static int ebi2_lcd_probe(struct platform_device *pdev);
-static int ebi2_lcd_remove(struct platform_device *pdev);
-
-static struct platform_driver ebi2_lcd_driver = {
- .probe = ebi2_lcd_probe,
- .remove = ebi2_lcd_remove,
- .suspend = NULL,
- .suspend_late = NULL,
- .resume_early = NULL,
- .resume = NULL,
- .shutdown = NULL,
- .driver = {
- .name = "ebi2_lcd",
- },
-};
-
-static void *ebi2_base;
-static void *ebi2_lcd_cfg0;
-static void *ebi2_lcd_cfg1;
-static void __iomem *lcd01_base;
-static void __iomem *lcd02_base;
-static int ebi2_lcd_resource_initialized;
-
-static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
-static int pdev_list_cnt;
-
-static int ebi2_lcd_probe(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
- struct platform_device *mdp_dev = NULL;
- struct msm_fb_panel_data *pdata = NULL;
- int rc, i;
-
- if (pdev->id == 0) {
- for (i = 0; i < pdev->num_resources; i++) {
- if (!strncmp(pdev->resource[i].name, "base", 4)) {
- ebi2_base = ioremap(pdev->resource[i].start,
- pdev->resource[i].end -
- pdev->resource[i].start + 1);
- if (!ebi2_base) {
- printk(KERN_ERR
- "ebi2_base ioremap failed!\n");
- return -ENOMEM;
- }
- ebi2_lcd_cfg0 = (void *)(ebi2_base + 0x20);
- ebi2_lcd_cfg1 = (void *)(ebi2_base + 0x24);
- } else if (!strncmp(pdev->resource[i].name,
- "lcd01", 5)) {
- lcd01_base = ioremap(pdev->resource[i].start,
- pdev->resource[i].end -
- pdev->resource[i].start + 1);
- if (!lcd01_base) {
- printk(KERN_ERR
- "lcd01_base ioremap failed!\n");
- return -ENOMEM;
- }
- } else if (!strncmp(pdev->resource[i].name,
- "lcd02", 5)) {
- lcd02_base = ioremap(pdev->resource[i].start,
- pdev->resource[i].end -
- pdev->resource[i].start + 1);
- if (!lcd02_base) {
- printk(KERN_ERR
- "lcd02_base ioremap failed!\n");
- return -ENOMEM;
- }
- }
- }
- ebi2_lcd_resource_initialized = 1;
- return 0;
- }
-
- if (!ebi2_lcd_resource_initialized)
- return -EPERM;
-
- mfd = platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
- return -ENOMEM;
-
- if (ebi2_base == NULL)
- return -ENOMEM;
-
- mdp_dev = platform_device_alloc("mdp", pdev->id);
- if (!mdp_dev)
- return -ENOMEM;
-
- /* link to the latest pdev */
- mfd->pdev = mdp_dev;
- mfd->dest = DISPLAY_LCD;
-
- /* add panel data */
- if (platform_device_add_data
- (mdp_dev, pdev->dev.platform_data,
- sizeof(struct msm_fb_panel_data))) {
- printk(KERN_ERR "ebi2_lcd_probe: platform_device_add_data failed!\n");
- platform_device_put(mdp_dev);
- return -ENOMEM;
- }
-
- /* data chain */
- pdata = mdp_dev->dev.platform_data;
- pdata->on = panel_next_on;
- pdata->off = panel_next_off;
- pdata->next = pdev;
-
- /* get/set panel specific fb info */
- mfd->panel_info = pdata->panel_info;
-
- if (mfd->panel_info.bpp == 24)
- mfd->fb_imgType = MDP_RGB_888;
- else
- mfd->fb_imgType = MDP_RGB_565;
-
- /* config msm ebi2 lcd register */
- if (mfd->panel_info.pdest == DISPLAY_1) {
- outp32(ebi2_base,
- (inp32(ebi2_base) & (~(EBI2_PRIM_LCD_CLR))) |
- EBI2_PRIM_LCD_SEL);
- /*
- * current design has one set of cfg0/1 register to control
- * both EBI2 channels. so, we're using the PRIM channel to
- * configure both.
- */
- outp32(ebi2_lcd_cfg0, mfd->panel_info.wait_cycle);
- if (mfd->panel_info.bpp == 18)
- outp32(ebi2_lcd_cfg1, 0x01000000);
- else
- outp32(ebi2_lcd_cfg1, 0x0);
- } else {
-#ifdef DEBUG_EBI2_LCD
- /*
- * confliting with QCOM SURF FPGA CS.
- * OEM should enable below for their CS mapping
- */
- outp32(ebi2_base, (inp32(ebi2_base)&(~(EBI2_SECD_LCD_CLR)))
- |EBI2_SECD_LCD_SEL);
-#endif
- }
-
- /*
- * map cs (chip select) address
- */
- if (mfd->panel_info.pdest == DISPLAY_1) {
- mfd->cmd_port = lcd01_base;
- mfd->data_port =
- (void *)((uint32) mfd->cmd_port + EBI2_PRIM_LCD_RS_PIN);
- mfd->data_port_phys =
- (void *)(LCD_PRIM_BASE_PHYS + EBI2_PRIM_LCD_RS_PIN);
- } else {
- mfd->cmd_port = lcd01_base;
- mfd->data_port =
- (void *)((uint32) mfd->cmd_port + EBI2_SECD_LCD_RS_PIN);
- mfd->data_port_phys =
- (void *)(LCD_SECD_BASE_PHYS + EBI2_SECD_LCD_RS_PIN);
- }
-
- /*
- * set driver data
- */
- platform_set_drvdata(mdp_dev, mfd);
-
- /*
- * register in mdp driver
- */
- rc = platform_device_add(mdp_dev);
- if (rc) {
- goto ebi2_lcd_probe_err;
- }
-
- pdev_list[pdev_list_cnt++] = pdev;
- return 0;
-
- ebi2_lcd_probe_err:
- platform_device_put(mdp_dev);
- return rc;
-}
-
-static int ebi2_lcd_remove(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
-
- mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
-
- if (!mfd)
- return 0;
-
- if (mfd->key != MFD_KEY)
- return 0;
-
- iounmap(mfd->cmd_port);
-
- return 0;
-}
-
-static int ebi2_lcd_register_driver(void)
-{
- return platform_driver_register(&ebi2_lcd_driver);
-}
-
-static int __init ebi2_lcd_driver_init(void)
-{
- return ebi2_lcd_register_driver();
-}
-
-module_init(ebi2_lcd_driver_init); \ No newline at end of file
diff --git a/drivers/staging/msm/ebi2_tmd20.c b/drivers/staging/msm/ebi2_tmd20.c
deleted file mode 100644
index d66d0397825..00000000000
--- a/drivers/staging/msm/ebi2_tmd20.c
+++ /dev/null
@@ -1,1122 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-
-#include <linux/memory.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include "linux/proc_fs.h"
-
-#include <linux/delay.h>
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-
-/* #define TMD20QVGA_LCD_18BPP */
-#define QVGA_WIDTH 240
-#define QVGA_HEIGHT 320
-
-#ifdef TMD20QVGA_LCD_18BPP
-#define DISP_QVGA_18BPP(x) ((((x)<<2) & 0x3FC00)|(( (x)<<1)& 0x1FE))
-#define DISP_REG(name) uint32 register_##name;
-#define OUTPORT(x, y) outpdw(x, y)
-#define INPORT(x) inpdw(x)
-#else
-#define DISP_QVGA_18BPP(x) (x)
-#define DISP_REG(name) uint16 register_##name;
-#define OUTPORT(x, y) outpw(x, y)
-#define INPORT(x) intpw(x)
-#endif
-
-static void *DISP_CMD_PORT;
-static void *DISP_DATA_PORT;
-
-#define DISP_RNTI 0x10
-
-#define DISP_CMD_OUT(cmd) OUTPORT(DISP_CMD_PORT, DISP_QVGA_18BPP(cmd))
-#define DISP_DATA_OUT(data) OUTPORT(DISP_DATA_PORT, data)
-#define DISP_DATA_IN() INPORT(DISP_DATA_PORT)
-
-#if (defined(TMD20QVGA_LCD_18BPP))
-#define DISP_DATA_OUT_16TO18BPP(x) \
- DISP_DATA_OUT((((x)&0xf800)<<2|((x)&0x80000)>>3) \
- | (((x)&0x7e0)<<1) \
- | (((x)&0x1F)<<1|((x)&0x10)>>4))
-#else
-#define DISP_DATA_OUT_16TO18BPP(x) \
- DISP_DATA_OUT(x)
-#endif
-
-#define DISP_WRITE_OUT(addr, data) \
- register_##addr = DISP_QVGA_18BPP(data); \
- DISP_CMD_OUT(addr); \
- DISP_DATA_OUT(register_##addr);
-
-#define DISP_UPDATE_VALUE(addr, bitmask, data) \
- DISP_WRITE_OUT(##addr, (register_##addr & ~(bitmask)) | (data));
-
-#define DISP_VAL_IF(bitvalue, bitmask) \
- ((bitvalue) ? (bitmask) : 0)
-
-/* QVGA = 256 x 320 */
-/* actual display is 240 x 320...offset by 0x10 */
-#define DISP_ROW_COL_TO_ADDR(row, col) ((row) * 0x100 + col)
-#define DISP_SET_RECT(ulhc_row, lrhc_row, ulhc_col, lrhc_col) \
- { \
- DISP_WRITE_OUT(DISP_HORZ_RAM_ADDR_POS_1_ADDR, (ulhc_col) + tmd20qvga_panel_offset); \
- DISP_WRITE_OUT(DISP_HORZ_RAM_ADDR_POS_2_ADDR, (lrhc_col) + tmd20qvga_panel_offset); \
- DISP_WRITE_OUT(DISP_VERT_RAM_ADDR_POS_1_ADDR, (ulhc_row)); \
- DISP_WRITE_OUT(DISP_VERT_RAM_ADDR_POS_2_ADDR, (lrhc_row)); \
- DISP_WRITE_OUT(DISP_RAM_ADDR_SET_1_ADDR, (ulhc_col) + tmd20qvga_panel_offset); \
- DISP_WRITE_OUT(DISP_RAM_ADDR_SET_2_ADDR, (ulhc_row)); \
- }
-
-#define WAIT_MSEC(msec) mdelay(msec)
-
-/*
- * TMD QVGA Address
- */
-/* Display Control */
-#define DISP_START_OSCILLATION_ADDR 0x000
-DISP_REG(DISP_START_OSCILLATION_ADDR)
-#define DISP_DRIVER_OUTPUT_CTL_ADDR 0x001
- DISP_REG(DISP_DRIVER_OUTPUT_CTL_ADDR)
-#define DISP_LCD_DRIVING_SIG_ADDR 0x002
- DISP_REG(DISP_LCD_DRIVING_SIG_ADDR)
-#define DISP_ENTRY_MODE_ADDR 0x003
- DISP_REG(DISP_ENTRY_MODE_ADDR)
-#define DISP_DISPLAY_CTL_1_ADDR 0x007
- DISP_REG(DISP_DISPLAY_CTL_1_ADDR)
-#define DISP_DISPLAY_CTL_2_ADDR 0x008
- DISP_REG(DISP_DISPLAY_CTL_2_ADDR)
-
-/* DISPLAY MODE 0x009 partial display not supported */
-#define DISP_POWER_SUPPLY_INTF_ADDR 0x00A
- DISP_REG(DISP_POWER_SUPPLY_INTF_ADDR)
-
-/* DISPLAY MODE 0x00B xZoom feature is not supported */
-#define DISP_EXT_DISPLAY_CTL_1_ADDR 0x00C
- DISP_REG(DISP_EXT_DISPLAY_CTL_1_ADDR)
-
-#define DISP_FRAME_CYCLE_CTL_ADDR 0x00D
- DISP_REG(DISP_FRAME_CYCLE_CTL_ADDR)
-
-#define DISP_EXT_DISPLAY_CTL_2_ADDR 0x00E
- DISP_REG(DISP_EXT_DISPLAY_CTL_2_ADDR)
-
-#define DISP_EXT_DISPLAY_CTL_3_ADDR 0x00F
- DISP_REG(DISP_EXT_DISPLAY_CTL_3_ADDR)
-
-#define DISP_LTPS_CTL_1_ADDR 0x012
- DISP_REG(DISP_LTPS_CTL_1_ADDR)
-#define DISP_LTPS_CTL_2_ADDR 0x013
- DISP_REG(DISP_LTPS_CTL_2_ADDR)
-#define DISP_LTPS_CTL_3_ADDR 0x014
- DISP_REG(DISP_LTPS_CTL_3_ADDR)
-#define DISP_LTPS_CTL_4_ADDR 0x018
- DISP_REG(DISP_LTPS_CTL_4_ADDR)
-#define DISP_LTPS_CTL_5_ADDR 0x019
- DISP_REG(DISP_LTPS_CTL_5_ADDR)
-#define DISP_LTPS_CTL_6_ADDR 0x01A
- DISP_REG(DISP_LTPS_CTL_6_ADDR)
-#define DISP_AMP_SETTING_ADDR 0x01C
- DISP_REG(DISP_AMP_SETTING_ADDR)
-#define DISP_MODE_SETTING_ADDR 0x01D
- DISP_REG(DISP_MODE_SETTING_ADDR)
-#define DISP_POFF_LN_SETTING_ADDR 0x01E
- DISP_REG(DISP_POFF_LN_SETTING_ADDR)
-/* Power Contol */
-#define DISP_POWER_CTL_1_ADDR 0x100
- DISP_REG(DISP_POWER_CTL_1_ADDR)
-#define DISP_POWER_CTL_2_ADDR 0x101
- DISP_REG(DISP_POWER_CTL_2_ADDR)
-#define DISP_POWER_CTL_3_ADDR 0x102
- DISP_REG(DISP_POWER_CTL_3_ADDR)
-#define DISP_POWER_CTL_4_ADDR 0x103
- DISP_REG(DISP_POWER_CTL_4_ADDR)
-#define DISP_POWER_CTL_5_ADDR 0x104
- DISP_REG(DISP_POWER_CTL_5_ADDR)
-#define DISP_POWER_CTL_6_ADDR 0x105
- DISP_REG(DISP_POWER_CTL_6_ADDR)
-#define DISP_POWER_CTL_7_ADDR 0x106
- DISP_REG(DISP_POWER_CTL_7_ADDR)
-/* RAM Access */
-#define DISP_RAM_ADDR_SET_1_ADDR 0x200
- DISP_REG(DISP_RAM_ADDR_SET_1_ADDR)
-#define DISP_RAM_ADDR_SET_2_ADDR 0x201
- DISP_REG(DISP_RAM_ADDR_SET_2_ADDR)
-#define DISP_CMD_RAMRD DISP_CMD_RAMWR
-#define DISP_CMD_RAMWR 0x202
- DISP_REG(DISP_CMD_RAMWR)
-#define DISP_RAM_DATA_MASK_1_ADDR 0x203
- DISP_REG(DISP_RAM_DATA_MASK_1_ADDR)
-#define DISP_RAM_DATA_MASK_2_ADDR 0x204
- DISP_REG(DISP_RAM_DATA_MASK_2_ADDR)
-/* Gamma Control, Contrast, Gray Scale Setting */
-#define DISP_GAMMA_CONTROL_1_ADDR 0x300
- DISP_REG(DISP_GAMMA_CONTROL_1_ADDR)
-#define DISP_GAMMA_CONTROL_2_ADDR 0x301
- DISP_REG(DISP_GAMMA_CONTROL_2_ADDR)
-#define DISP_GAMMA_CONTROL_3_ADDR 0x302
- DISP_REG(DISP_GAMMA_CONTROL_3_ADDR)
-#define DISP_GAMMA_CONTROL_4_ADDR 0x303
- DISP_REG(DISP_GAMMA_CONTROL_4_ADDR)
-#define DISP_GAMMA_CONTROL_5_ADDR 0x304
- DISP_REG(DISP_GAMMA_CONTROL_5_ADDR)
-/* Coordinate Control */
-#define DISP_VERT_SCROLL_CTL_1_ADDR 0x400
- DISP_REG(DISP_VERT_SCROLL_CTL_1_ADDR)
-#define DISP_VERT_SCROLL_CTL_2_ADDR 0x401
- DISP_REG(DISP_VERT_SCROLL_CTL_2_ADDR)
-#define DISP_SCREEN_1_DRV_POS_1_ADDR 0x402
- DISP_REG(DISP_SCREEN_1_DRV_POS_1_ADDR)
-#define DISP_SCREEN_1_DRV_POS_2_ADDR 0x403
- DISP_REG(DISP_SCREEN_1_DRV_POS_2_ADDR)
-#define DISP_SCREEN_2_DRV_POS_1_ADDR 0x404
- DISP_REG(DISP_SCREEN_2_DRV_POS_1_ADDR)
-#define DISP_SCREEN_2_DRV_POS_2_ADDR 0x405
- DISP_REG(DISP_SCREEN_2_DRV_POS_2_ADDR)
-#define DISP_HORZ_RAM_ADDR_POS_1_ADDR 0x406
- DISP_REG(DISP_HORZ_RAM_ADDR_POS_1_ADDR)
-#define DISP_HORZ_RAM_ADDR_POS_2_ADDR 0x407
- DISP_REG(DISP_HORZ_RAM_ADDR_POS_2_ADDR)
-#define DISP_VERT_RAM_ADDR_POS_1_ADDR 0x408
- DISP_REG(DISP_VERT_RAM_ADDR_POS_1_ADDR)
-#define DISP_VERT_RAM_ADDR_POS_2_ADDR 0x409
- DISP_REG(DISP_VERT_RAM_ADDR_POS_2_ADDR)
-#define DISP_TMD_700_ADDR 0x700 /* 0x700 */
- DISP_REG(DISP_TMD_700_ADDR)
-#define DISP_TMD_015_ADDR 0x015 /* 0x700 */
- DISP_REG(DISP_TMD_015_ADDR)
-#define DISP_TMD_305_ADDR 0x305 /* 0x700 */
- DISP_REG(DISP_TMD_305_ADDR)
-
-/*
- * TMD QVGA Bit Definations
- */
-
-#define DISP_BIT_IB15 0x8000
-#define DISP_BIT_IB14 0x4000
-#define DISP_BIT_IB13 0x2000
-#define DISP_BIT_IB12 0x1000
-#define DISP_BIT_IB11 0x0800
-#define DISP_BIT_IB10 0x0400
-#define DISP_BIT_IB09 0x0200
-#define DISP_BIT_IB08 0x0100
-#define DISP_BIT_IB07 0x0080
-#define DISP_BIT_IB06 0x0040
-#define DISP_BIT_IB05 0x0020
-#define DISP_BIT_IB04 0x0010
-#define DISP_BIT_IB03 0x0008
-#define DISP_BIT_IB02 0x0004
-#define DISP_BIT_IB01 0x0002
-#define DISP_BIT_IB00 0x0001
-/*
- * Display Control
- * DISP_START_OSCILLATION_ADDR Start Oscillation
- * DISP_DRIVER_OUTPUT_CTL_ADDR Driver Output Control
- */
-#define DISP_BITMASK_SS DISP_BIT_IB08
-#define DISP_BITMASK_NL5 DISP_BIT_IB05
-#define DISP_BITMASK_NL4 DISP_BIT_IB04
-#define DISP_BITMASK_NL3 DISP_BIT_IB03
-#define DISP_BITMASK_NL2 DISP_BIT_IB02
-#define DISP_BITMASK_NL1 DISP_BIT_IB01
-#define DISP_BITMASK_NL0 DISP_BIT_IB00
-/* DISP_LCD_DRIVING_SIG_ADDR LCD Driving Signal Setting */
-#define DISP_BITMASK_BC DISP_BIT_IB09
-/* DISP_ENTRY_MODE_ADDR Entry Mode */
-#define DISP_BITMASK_TRI DISP_BIT_IB15
-#define DISP_BITMASK_DFM1 DISP_BIT_IB14
-#define DISP_BITMASK_DFM0 DISP_BIT_IB13
-#define DISP_BITMASK_BGR DISP_BIT_IB12
-#define DISP_BITMASK_HWM0 DISP_BIT_IB08
-#define DISP_BITMASK_ID1 DISP_BIT_IB05
-#define DISP_BITMASK_ID0 DISP_BIT_IB04
-#define DISP_BITMASK_AM DISP_BIT_IB03
-/* DISP_DISPLAY_CTL_1_ADDR Display Control (1) */
-#define DISP_BITMASK_COL1 DISP_BIT_IB15
-#define DISP_BITMASK_COL0 DISP_BIT_IB14
-#define DISP_BITMASK_VLE2 DISP_BIT_IB10
-#define DISP_BITMASK_VLE1 DISP_BIT_IB09
-#define DISP_BITMASK_SPT DISP_BIT_IB08
-#define DISP_BITMASK_PT1 DISP_BIT_IB07
-#define DISP_BITMASK_PT0 DISP_BIT_IB06
-#define DISP_BITMASK_REV DISP_BIT_IB02
-/* DISP_DISPLAY_CTL_2_ADDR Display Control (2) */
-#define DISP_BITMASK_FP3 DISP_BIT_IB11
-#define DISP_BITMASK_FP2 DISP_BIT_IB10
-#define DISP_BITMASK_FP1 DISP_BIT_IB09
-#define DISP_BITMASK_FP0 DISP_BIT_IB08
-#define DISP_BITMASK_BP3 DISP_BIT_IB03
-#define DISP_BITMASK_BP2 DISP_BIT_IB02
-#define DISP_BITMASK_BP1 DISP_BIT_IB01
-#define DISP_BITMASK_BP0 DISP_BIT_IB00
-/* DISP_POWER_SUPPLY_INTF_ADDR Power Supply IC Interface Control */
-#define DISP_BITMASK_CSE DISP_BIT_IB12
-#define DISP_BITMASK_TE DISP_BIT_IB08
-#define DISP_BITMASK_IX3 DISP_BIT_IB03
-#define DISP_BITMASK_IX2 DISP_BIT_IB02
-#define DISP_BITMASK_IX1 DISP_BIT_IB01
-#define DISP_BITMASK_IX0 DISP_BIT_IB00
-/* DISP_EXT_DISPLAY_CTL_1_ADDR External Display Interface Control (1) */
-#define DISP_BITMASK_RM DISP_BIT_IB08
-#define DISP_BITMASK_DM1 DISP_BIT_IB05
-#define DISP_BITMASK_DM0 DISP_BIT_IB04
-#define DISP_BITMASK_RIM1 DISP_BIT_IB01
-#define DISP_BITMASK_RIM0 DISP_BIT_IB00
-/* DISP_FRAME_CYCLE_CTL_ADDR Frame Frequency Adjustment Control */
-#define DISP_BITMASK_DIVI1 DISP_BIT_IB09
-#define DISP_BITMASK_DIVI0 DISP_BIT_IB08
-#define DISP_BITMASK_RTNI4 DISP_BIT_IB04
-#define DISP_BITMASK_RTNI3 DISP_BIT_IB03
-#define DISP_BITMASK_RTNI2 DISP_BIT_IB02
-#define DISP_BITMASK_RTNI1 DISP_BIT_IB01
-#define DISP_BITMASK_RTNI0 DISP_BIT_IB00
-/* DISP_EXT_DISPLAY_CTL_2_ADDR External Display Interface Control (2) */
-#define DISP_BITMASK_DIVE1 DISP_BIT_IB09
-#define DISP_BITMASK_DIVE0 DISP_BIT_IB08
-#define DISP_BITMASK_RTNE7 DISP_BIT_IB07
-#define DISP_BITMASK_RTNE6 DISP_BIT_IB06
-#define DISP_BITMASK_RTNE5 DISP_BIT_IB05
-#define DISP_BITMASK_RTNE4 DISP_BIT_IB04
-#define DISP_BITMASK_RTNE3 DISP_BIT_IB03
-#define DISP_BITMASK_RTNE2 DISP_BIT_IB02
-#define DISP_BITMASK_RTNE1 DISP_BIT_IB01
-#define DISP_BITMASK_RTNE0 DISP_BIT_IB00
-/* DISP_EXT_DISPLAY_CTL_3_ADDR External Display Interface Control (3) */
-#define DISP_BITMASK_VSPL DISP_BIT_IB04
-#define DISP_BITMASK_HSPL DISP_BIT_IB03
-#define DISP_BITMASK_VPL DISP_BIT_IB02
-#define DISP_BITMASK_EPL DISP_BIT_IB01
-#define DISP_BITMASK_DPL DISP_BIT_IB00
-/* DISP_LTPS_CTL_1_ADDR LTPS Interface Control (1) */
-#define DISP_BITMASK_CLWI3 DISP_BIT_IB11
-#define DISP_BITMASK_CLWI2 DISP_BIT_IB10
-#define DISP_BITMASK_CLWI1 DISP_BIT_IB09
-#define DISP_BITMASK_CLWI0 DISP_BIT_IB08
-#define DISP_BITMASK_CLTI1 DISP_BIT_IB01
-#define DISP_BITMASK_CLTI0 DISP_BIT_IB00
-/* DISP_LTPS_CTL_2_ADDR LTPS Interface Control (2) */
-#define DISP_BITMASK_OEVBI1 DISP_BIT_IB09
-#define DISP_BITMASK_OEVBI0 DISP_BIT_IB08
-#define DISP_BITMASK_OEVFI1 DISP_BIT_IB01
-#define DISP_BITMASK_OEVFI0 DISP_BIT_IB00
-/* DISP_LTPS_CTL_3_ADDR LTPS Interface Control (3) */
-#define DISP_BITMASK_SHI1 DISP_BIT_IB01
-#define DISP_BITMASK_SHI0 DISP_BIT_IB00
-/* DISP_LTPS_CTL_4_ADDR LTPS Interface Control (4) */
-#define DISP_BITMASK_CLWE5 DISP_BIT_IB13
-#define DISP_BITMASK_CLWE4 DISP_BIT_IB12
-#define DISP_BITMASK_CLWE3 DISP_BIT_IB11
-#define DISP_BITMASK_CLWE2 DISP_BIT_IB10
-#define DISP_BITMASK_CLWE1 DISP_BIT_IB09
-#define DISP_BITMASK_CLWE0 DISP_BIT_IB08
-#define DISP_BITMASK_CLTE3 DISP_BIT_IB03
-#define DISP_BITMASK_CLTE2 DISP_BIT_IB02
-#define DISP_BITMASK_CLTE1 DISP_BIT_IB01
-#define DISP_BITMASK_CLTE0 DISP_BIT_IB00
-/* DISP_LTPS_CTL_5_ADDR LTPS Interface Control (5) */
-#define DISP_BITMASK_OEVBE3 DISP_BIT_IB11
-#define DISP_BITMASK_OEVBE2 DISP_BIT_IB10
-#define DISP_BITMASK_OEVBE1 DISP_BIT_IB09
-#define DISP_BITMASK_OEVBE0 DISP_BIT_IB08
-#define DISP_BITMASK_OEVFE3 DISP_BIT_IB03
-#define DISP_BITMASK_OEVFE2 DISP_BIT_IB02
-#define DISP_BITMASK_OEVFE1 DISP_BIT_IB01
-#define DISP_BITMASK_OEVFE0 DISP_BIT_IB00
-/* DISP_LTPS_CTL_6_ADDR LTPS Interface Control (6) */
-#define DISP_BITMASK_SHE3 DISP_BIT_IB03
-#define DISP_BITMASK_SHE2 DISP_BIT_IB02
-#define DISP_BITMASK_SHE1 DISP_BIT_IB01
-#define DISP_BITMASK_SHE0 DISP_BIT_IB00
-/* DISP_AMP_SETTING_ADDR Amplify Setting */
-#define DISP_BITMASK_ABSW1 DISP_BIT_IB01
-#define DISP_BITMASK_ABSW0 DISP_BIT_IB00
-/* DISP_MODE_SETTING_ADDR Mode Setting */
-#define DISP_BITMASK_DSTB DISP_BIT_IB02
-#define DISP_BITMASK_STB DISP_BIT_IB00
-/* DISP_POFF_LN_SETTING_ADDR Power Off Line Setting */
-#define DISP_BITMASK_POFH3 DISP_BIT_IB03
-#define DISP_BITMASK_POFH2 DISP_BIT_IB02
-#define DISP_BITMASK_POFH1 DISP_BIT_IB01
-#define DISP_BITMASK_POFH0 DISP_BIT_IB00
-
-/* Power Contol */
-/* DISP_POWER_CTL_1_ADDR Power Control (1) */
-#define DISP_BITMASK_PO DISP_BIT_IB11
-#define DISP_BITMASK_VCD DISP_BIT_IB09
-#define DISP_BITMASK_VSC DISP_BIT_IB08
-#define DISP_BITMASK_CON DISP_BIT_IB07
-#define DISP_BITMASK_ASW1 DISP_BIT_IB06
-#define DISP_BITMASK_ASW0 DISP_BIT_IB05
-#define DISP_BITMASK_OEV DISP_BIT_IB04
-#define DISP_BITMASK_OEVE DISP_BIT_IB03
-#define DISP_BITMASK_FR DISP_BIT_IB02
-#define DISP_BITMASK_D1 DISP_BIT_IB01
-#define DISP_BITMASK_D0 DISP_BIT_IB00
-/* DISP_POWER_CTL_2_ADDR Power Control (2) */
-#define DISP_BITMASK_DC4 DISP_BIT_IB15
-#define DISP_BITMASK_DC3 DISP_BIT_IB14
-#define DISP_BITMASK_SAP2 DISP_BIT_IB13
-#define DISP_BITMASK_SAP1 DISP_BIT_IB12
-#define DISP_BITMASK_SAP0 DISP_BIT_IB11
-#define DISP_BITMASK_BT2 DISP_BIT_IB10
-#define DISP_BITMASK_BT1 DISP_BIT_IB09
-#define DISP_BITMASK_BT0 DISP_BIT_IB08
-#define DISP_BITMASK_DC2 DISP_BIT_IB07
-#define DISP_BITMASK_DC1 DISP_BIT_IB06
-#define DISP_BITMASK_DC0 DISP_BIT_IB05
-#define DISP_BITMASK_AP2 DISP_BIT_IB04
-#define DISP_BITMASK_AP1 DISP_BIT_IB03
-#define DISP_BITMASK_AP0 DISP_BIT_IB02
-/* DISP_POWER_CTL_3_ADDR Power Control (3) */
-#define DISP_BITMASK_VGL4 DISP_BIT_IB10
-#define DISP_BITMASK_VGL3 DISP_BIT_IB09
-#define DISP_BITMASK_VGL2 DISP_BIT_IB08
-#define DISP_BITMASK_VGL1 DISP_BIT_IB07
-#define DISP_BITMASK_VGL0 DISP_BIT_IB06
-#define DISP_BITMASK_VGH4 DISP_BIT_IB04
-#define DISP_BITMASK_VGH3 DISP_BIT_IB03
-#define DISP_BITMASK_VGH2 DISP_BIT_IB02
-#define DISP_BITMASK_VGH1 DISP_BIT_IB01
-#define DISP_BITMASK_VGH0 DISP_BIT_IB00
-/* DISP_POWER_CTL_4_ADDR Power Control (4) */
-#define DISP_BITMASK_VC2 DISP_BIT_IB02
-#define DISP_BITMASK_VC1 DISP_BIT_IB01
-#define DISP_BITMASK_VC0 DISP_BIT_IB00
-/* DISP_POWER_CTL_5_ADDR Power Control (5) */
-#define DISP_BITMASK_VRL3 DISP_BIT_IB11
-#define DISP_BITMASK_VRL2 DISP_BIT_IB10
-#define DISP_BITMASK_VRL1 DISP_BIT_IB09
-#define DISP_BITMASK_VRL0 DISP_BIT_IB08
-#define DISP_BITMASK_PON DISP_BIT_IB04
-#define DISP_BITMASK_VRH3 DISP_BIT_IB03
-#define DISP_BITMASK_VRH2 DISP_BIT_IB02
-#define DISP_BITMASK_VRH1 DISP_BIT_IB01
-#define DISP_BITMASK_VRH0 DISP_BIT_IB00
-/* DISP_POWER_CTL_6_ADDR Power Control (6) */
-#define DISP_BITMASK_VCOMG DISP_BIT_IB13
-#define DISP_BITMASK_VDV4 DISP_BIT_IB12
-#define DISP_BITMASK_VDV3 DISP_BIT_IB11
-#define DISP_BITMASK_VDV2 DISP_BIT_IB10
-#define DISP_BITMASK_VDV1 DISP_BIT_IB09
-#define DISP_BITMASK_VDV0 DISP_BIT_IB08
-#define DISP_BITMASK_VCM4 DISP_BIT_IB04
-#define DISP_BITMASK_VCM3 DISP_BIT_IB03
-#define DISP_BITMASK_VCM2 DISP_BIT_IB02
-#define DISP_BITMASK_VCM1 DISP_BIT_IB01
-#define DISP_BITMASK_VCM0 DISP_BIT_IB00
-/* RAM Access */
-/* DISP_RAM_ADDR_SET_1_ADDR RAM Address Set (1) */
-#define DISP_BITMASK_AD7 DISP_BIT_IB07
-#define DISP_BITMASK_AD6 DISP_BIT_IB06
-#define DISP_BITMASK_AD5 DISP_BIT_IB05
-#define DISP_BITMASK_AD4 DISP_BIT_IB04
-#define DISP_BITMASK_AD3 DISP_BIT_IB03
-#define DISP_BITMASK_AD2 DISP_BIT_IB02
-#define DISP_BITMASK_AD1 DISP_BIT_IB01
-#define DISP_BITMASK_AD0 DISP_BIT_IB00
-/* DISP_RAM_ADDR_SET_2_ADDR RAM Address Set (2) */
-#define DISP_BITMASK_AD16 DISP_BIT_IB08
-#define DISP_BITMASK_AD15 DISP_BIT_IB07
-#define DISP_BITMASK_AD14 DISP_BIT_IB06
-#define DISP_BITMASK_AD13 DISP_BIT_IB05
-#define DISP_BITMASK_AD12 DISP_BIT_IB04
-#define DISP_BITMASK_AD11 DISP_BIT_IB03
-#define DISP_BITMASK_AD10 DISP_BIT_IB02
-#define DISP_BITMASK_AD9 DISP_BIT_IB01
-#define DISP_BITMASK_AD8 DISP_BIT_IB00
-/*
- * DISP_CMD_RAMWR RAM Data Read/Write
- * Use Data Bit Configuration
- */
-/* DISP_RAM_DATA_MASK_1_ADDR RAM Write Data Mask (1) */
-#define DISP_BITMASK_WM11 DISP_BIT_IB13
-#define DISP_BITMASK_WM10 DISP_BIT_IB12
-#define DISP_BITMASK_WM9 DISP_BIT_IB11
-#define DISP_BITMASK_WM8 DISP_BIT_IB10
-#define DISP_BITMASK_WM7 DISP_BIT_IB09
-#define DISP_BITMASK_WM6 DISP_BIT_IB08
-#define DISP_BITMASK_WM5 DISP_BIT_IB05
-#define DISP_BITMASK_WM4 DISP_BIT_IB04
-#define DISP_BITMASK_WM3 DISP_BIT_IB03
-#define DISP_BITMASK_WM2 DISP_BIT_IB02
-#define DISP_BITMASK_WM1 DISP_BIT_IB01
-#define DISP_BITMASK_WM0 DISP_BIT_IB00
-/* DISP_RAM_DATA_MASK_2_ADDR RAM Write Data Mask (2) */
-#define DISP_BITMASK_WM17 DISP_BIT_IB05
-#define DISP_BITMASK_WM16 DISP_BIT_IB04
-#define DISP_BITMASK_WM15 DISP_BIT_IB03
-#define DISP_BITMASK_WM14 DISP_BIT_IB02
-#define DISP_BITMASK_WM13 DISP_BIT_IB01
-#define DISP_BITMASK_WM12 DISP_BIT_IB00
-/*Gamma Control */
-/* DISP_GAMMA_CONTROL_1_ADDR Gamma Control (1) */
-#define DISP_BITMASK_PKP12 DISP_BIT_IB10
-#define DISP_BITMASK_PKP11 DISP_BIT_IB08
-#define DISP_BITMASK_PKP10 DISP_BIT_IB09
-#define DISP_BITMASK_PKP02 DISP_BIT_IB02
-#define DISP_BITMASK_PKP01 DISP_BIT_IB01
-#define DISP_BITMASK_PKP00 DISP_BIT_IB00
-/* DISP_GAMMA_CONTROL_2_ADDR Gamma Control (2) */
-#define DISP_BITMASK_PKP32 DISP_BIT_IB10
-#define DISP_BITMASK_PKP31 DISP_BIT_IB09
-#define DISP_BITMASK_PKP30 DISP_BIT_IB08
-#define DISP_BITMASK_PKP22 DISP_BIT_IB02
-#define DISP_BITMASK_PKP21 DISP_BIT_IB01
-#define DISP_BITMASK_PKP20 DISP_BIT_IB00
-/* DISP_GAMMA_CONTROL_3_ADDR Gamma Control (3) */
-#define DISP_BITMASK_PKP52 DISP_BIT_IB10
-#define DISP_BITMASK_PKP51 DISP_BIT_IB09
-#define DISP_BITMASK_PKP50 DISP_BIT_IB08
-#define DISP_BITMASK_PKP42 DISP_BIT_IB02
-#define DISP_BITMASK_PKP41 DISP_BIT_IB01
-#define DISP_BITMASK_PKP40 DISP_BIT_IB00
-/* DISP_GAMMA_CONTROL_4_ADDR Gamma Control (4) */
-#define DISP_BITMASK_PRP12 DISP_BIT_IB10
-#define DISP_BITMASK_PRP11 DISP_BIT_IB08
-#define DISP_BITMASK_PRP10 DISP_BIT_IB09
-#define DISP_BITMASK_PRP02 DISP_BIT_IB02
-#define DISP_BITMASK_PRP01 DISP_BIT_IB01
-#define DISP_BITMASK_PRP00 DISP_BIT_IB00
-/* DISP_GAMMA_CONTROL_5_ADDR Gamma Control (5) */
-#define DISP_BITMASK_VRP14 DISP_BIT_IB12
-#define DISP_BITMASK_VRP13 DISP_BIT_IB11
-#define DISP_BITMASK_VRP12 DISP_BIT_IB10
-#define DISP_BITMASK_VRP11 DISP_BIT_IB08
-#define DISP_BITMASK_VRP10 DISP_BIT_IB09
-#define DISP_BITMASK_VRP03 DISP_BIT_IB03
-#define DISP_BITMASK_VRP02 DISP_BIT_IB02
-#define DISP_BITMASK_VRP01 DISP_BIT_IB01
-#define DISP_BITMASK_VRP00 DISP_BIT_IB00
-/* DISP_GAMMA_CONTROL_6_ADDR Gamma Control (6) */
-#define DISP_BITMASK_PKN12 DISP_BIT_IB10
-#define DISP_BITMASK_PKN11 DISP_BIT_IB08
-#define DISP_BITMASK_PKN10 DISP_BIT_IB09
-#define DISP_BITMASK_PKN02 DISP_BIT_IB02
-#define DISP_BITMASK_PKN01 DISP_BIT_IB01
-#define DISP_BITMASK_PKN00 DISP_BIT_IB00
-/* DISP_GAMMA_CONTROL_7_ADDR Gamma Control (7) */
-#define DISP_BITMASK_PKN32 DISP_BIT_IB10
-#define DISP_BITMASK_PKN31 DISP_BIT_IB08
-#define DISP_BITMASK_PKN30 DISP_BIT_IB09
-#define DISP_BITMASK_PKN22 DISP_BIT_IB02
-#define DISP_BITMASK_PKN21 DISP_BIT_IB01
-#define DISP_BITMASK_PKN20 DISP_BIT_IB00
-/* DISP_GAMMA_CONTROL_8_ADDR Gamma Control (8) */
-#define DISP_BITMASK_PKN52 DISP_BIT_IB10
-#define DISP_BITMASK_PKN51 DISP_BIT_IB08
-#define DISP_BITMASK_PKN50 DISP_BIT_IB09
-#define DISP_BITMASK_PKN42 DISP_BIT_IB02
-#define DISP_BITMASK_PKN41 DISP_BIT_IB01
-#define DISP_BITMASK_PKN40 DISP_BIT_IB00
-/* DISP_GAMMA_CONTROL_9_ADDR Gamma Control (9) */
-#define DISP_BITMASK_PRN12 DISP_BIT_IB10
-#define DISP_BITMASK_PRN11 DISP_BIT_IB08
-#define DISP_BITMASK_PRN10 DISP_BIT_IB09
-#define DISP_BITMASK_PRN02 DISP_BIT_IB02
-#define DISP_BITMASK_PRN01 DISP_BIT_IB01
-#define DISP_BITMASK_PRN00 DISP_BIT_IB00
-/* DISP_GAMMA_CONTROL_10_ADDR Gamma Control (10) */
-#define DISP_BITMASK_VRN14 DISP_BIT_IB12
-#define DISP_BITMASK_VRN13 DISP_BIT_IB11
-#define DISP_BITMASK_VRN12 DISP_BIT_IB10
-#define DISP_BITMASK_VRN11 DISP_BIT_IB08
-#define DISP_BITMASK_VRN10 DISP_BIT_IB09
-#define DISP_BITMASK_VRN03 DISP_BIT_IB03
-#define DISP_BITMASK_VRN02 DISP_BIT_IB02
-#define DISP_BITMASK_VRN01 DISP_BIT_IB01
-#define DISP_BITMASK_VRN00 DISP_BIT_IB00
-/* Coordinate Control */
-/* DISP_VERT_SCROLL_CTL_1_ADDR Vertical Scroll Control (1) */
-#define DISP_BITMASK_VL18 DISP_BIT_IB08
-#define DISP_BITMASK_VL17 DISP_BIT_IB07
-#define DISP_BITMASK_VL16 DISP_BIT_IB06
-#define DISP_BITMASK_VL15 DISP_BIT_IB05
-#define DISP_BITMASK_VL14 DISP_BIT_IB04
-#define DISP_BITMASK_VL13 DISP_BIT_IB03
-#define DISP_BITMASK_VL12 DISP_BIT_IB02
-#define DISP_BITMASK_VL11 DISP_BIT_IB01
-#define DISP_BITMASK_VL10 DISP_BIT_IB00
-/* DISP_VERT_SCROLL_CTL_2_ADDR Vertical Scroll Control (2) */
-#define DISP_BITMASK_VL28 DISP_BIT_IB08
-#define DISP_BITMASK_VL27 DISP_BIT_IB07
-#define DISP_BITMASK_VL26 DISP_BIT_IB06
-#define DISP_BITMASK_VL25 DISP_BIT_IB05
-#define DISP_BITMASK_VL24 DISP_BIT_IB04
-#define DISP_BITMASK_VL23 DISP_BIT_IB03
-#define DISP_BITMASK_VL22 DISP_BIT_IB02
-#define DISP_BITMASK_VL21 DISP_BIT_IB01
-#define DISP_BITMASK_VL20 DISP_BIT_IB00
-/* DISP_SCREEN_1_DRV_POS_1_ADDR First Screen Driving Position (1) */
-#define DISP_BITMASK_SS18 DISP_BIT_IB08
-#define DISP_BITMASK_SS17 DISP_BIT_IB07
-#define DISP_BITMASK_SS16 DISP_BIT_IB06
-#define DISP_BITMASK_SS15 DISP_BIT_IB05
-#define DISP_BITMASK_SS14 DISP_BIT_IB04
-#define DISP_BITMASK_SS13 DISP_BIT_IB03
-#define DISP_BITMASK_SS12 DISP_BIT_IB02
-#define DISP_BITMASK_SS11 DISP_BIT_IB01
-#define DISP_BITMASK_SS10 DISP_BIT_IB00
-/* DISP_SCREEN_1_DRV_POS_2_ADDR First Screen Driving Position (2) */
-#define DISP_BITMASK_SE18 DISP_BIT_IB08
-#define DISP_BITMASK_SE17 DISP_BIT_IB07
-#define DISP_BITMASK_SE16 DISP_BIT_IB06
-#define DISP_BITMASK_SE15 DISP_BIT_IB05
-#define DISP_BITMASK_SE14 DISP_BIT_IB04
-#define DISP_BITMASK_SE13 DISP_BIT_IB03
-#define DISP_BITMASK_SE12 DISP_BIT_IB02
-#define DISP_BITMASK_SE11 DISP_BIT_IB01
-#define DISP_BITMASK_SE10 DISP_BIT_IB00
-/* DISP_SCREEN_2_DRV_POS_1_ADDR Second Screen Driving Position (1) */
-#define DISP_BITMASK_SS28 DISP_BIT_IB08
-#define DISP_BITMASK_SS27 DISP_BIT_IB07
-#define DISP_BITMASK_SS26 DISP_BIT_IB06
-#define DISP_BITMASK_SS25 DISP_BIT_IB05
-#define DISP_BITMASK_SS24 DISP_BIT_IB04
-#define DISP_BITMASK_SS23 DISP_BIT_IB03
-#define DISP_BITMASK_SS22 DISP_BIT_IB02
-#define DISP_BITMASK_SS21 DISP_BIT_IB01
-#define DISP_BITMASK_SS20 DISP_BIT_IB00
-/* DISP_SCREEN_3_DRV_POS_2_ADDR Second Screen Driving Position (2) */
-#define DISP_BITMASK_SE28 DISP_BIT_IB08
-#define DISP_BITMASK_SE27 DISP_BIT_IB07
-#define DISP_BITMASK_SE26 DISP_BIT_IB06
-#define DISP_BITMASK_SE25 DISP_BIT_IB05
-#define DISP_BITMASK_SE24 DISP_BIT_IB04
-#define DISP_BITMASK_SE23 DISP_BIT_IB03
-#define DISP_BITMASK_SE22 DISP_BIT_IB02
-#define DISP_BITMASK_SE21 DISP_BIT_IB01
-#define DISP_BITMASK_SE20 DISP_BIT_IB00
-/* DISP_HORZ_RAM_ADDR_POS_1_ADDR Horizontal RAM Address Position (1) */
-#define DISP_BITMASK_HSA7 DISP_BIT_IB07
-#define DISP_BITMASK_HSA6 DISP_BIT_IB06
-#define DISP_BITMASK_HSA5 DISP_BIT_IB05
-#define DISP_BITMASK_HSA4 DISP_BIT_IB04
-#define DISP_BITMASK_HSA3 DISP_BIT_IB03
-#define DISP_BITMASK_HSA2 DISP_BIT_IB02
-#define DISP_BITMASK_HSA1 DISP_BIT_IB01
-#define DISP_BITMASK_HSA0 DISP_BIT_IB00
-/* DISP_HORZ_RAM_ADDR_POS_2_ADDR Horizontal RAM Address Position (2) */
-#define DISP_BITMASK_HEA7 DISP_BIT_IB07
-#define DISP_BITMASK_HEA6 DISP_BIT_IB06
-#define DISP_BITMASK_HEA5 DISP_BIT_IB05
-#define DISP_BITMASK_HEA4 DISP_BIT_IB04
-#define DISP_BITMASK_HEA3 DISP_BIT_IB03
-#define DISP_BITMASK_HEA2 DISP_BIT_IB02
-#define DISP_BITMASK_HEA1 DISP_BIT_IB01
-#define DISP_BITMASK_HEA0 DISP_BIT_IB00
-/* DISP_VERT_RAM_ADDR_POS_1_ADDR Vertical RAM Address Position (1) */
-#define DISP_BITMASK_VSA8 DISP_BIT_IB08
-#define DISP_BITMASK_VSA7 DISP_BIT_IB07
-#define DISP_BITMASK_VSA6 DISP_BIT_IB06
-#define DISP_BITMASK_VSA5 DISP_BIT_IB05
-#define DISP_BITMASK_VSA4 DISP_BIT_IB04
-#define DISP_BITMASK_VSA3 DISP_BIT_IB03
-#define DISP_BITMASK_VSA2 DISP_BIT_IB02
-#define DISP_BITMASK_VSA1 DISP_BIT_IB01
-#define DISP_BITMASK_VSA0 DISP_BIT_IB00
-/* DISP_VERT_RAM_ADDR_POS_2_ADDR Vertical RAM Address Position (2) */
-#define DISP_BITMASK_VEA8 DISP_BIT_IB08
-#define DISP_BITMASK_VEA7 DISP_BIT_IB07
-#define DISP_BITMASK_VEA6 DISP_BIT_IB06
-#define DISP_BITMASK_VEA5 DISP_BIT_IB05
-#define DISP_BITMASK_VEA4 DISP_BIT_IB04
-#define DISP_BITMASK_VEA3 DISP_BIT_IB03
-#define DISP_BITMASK_VEA2 DISP_BIT_IB02
-#define DISP_BITMASK_VEA1 DISP_BIT_IB01
-#define DISP_BITMASK_VEA0 DISP_BIT_IB00
-static word disp_area_start_row;
-static word disp_area_end_row;
-static boolean disp_initialized = FALSE;
-/* For some reason the contrast set at init time is not good. Need to do
-* it again
-*/
-static boolean display_on = FALSE;
-
-static uint32 tmd20qvga_lcd_rev;
-uint16 tmd20qvga_panel_offset;
-
-#ifdef DISP_DEVICE_8BPP
-static word convert_8_to_16_tbl[256] = {
- 0x0000, 0x2000, 0x4000, 0x6000, 0x8000, 0xA000, 0xC000, 0xE000,
- 0x0100, 0x2100, 0x4100, 0x6100, 0x8100, 0xA100, 0xC100, 0xE100,
- 0x0200, 0x2200, 0x4200, 0x6200, 0x8200, 0xA200, 0xC200, 0xE200,
- 0x0300, 0x2300, 0x4300, 0x6300, 0x8300, 0xA300, 0xC300, 0xE300,
- 0x0400, 0x2400, 0x4400, 0x6400, 0x8400, 0xA400, 0xC400, 0xE400,
- 0x0500, 0x2500, 0x4500, 0x6500, 0x8500, 0xA500, 0xC500, 0xE500,
- 0x0600, 0x2600, 0x4600, 0x6600, 0x8600, 0xA600, 0xC600, 0xE600,
- 0x0700, 0x2700, 0x4700, 0x6700, 0x8700, 0xA700, 0xC700, 0xE700,
- 0x0008, 0x2008, 0x4008, 0x6008, 0x8008, 0xA008, 0xC008, 0xE008,
- 0x0108, 0x2108, 0x4108, 0x6108, 0x8108, 0xA108, 0xC108, 0xE108,
- 0x0208, 0x2208, 0x4208, 0x6208, 0x8208, 0xA208, 0xC208, 0xE208,
- 0x0308, 0x2308, 0x4308, 0x6308, 0x8308, 0xA308, 0xC308, 0xE308,
- 0x0408, 0x2408, 0x4408, 0x6408, 0x8408, 0xA408, 0xC408, 0xE408,
- 0x0508, 0x2508, 0x4508, 0x6508, 0x8508, 0xA508, 0xC508, 0xE508,
- 0x0608, 0x2608, 0x4608, 0x6608, 0x8608, 0xA608, 0xC608, 0xE608,
- 0x0708, 0x2708, 0x4708, 0x6708, 0x8708, 0xA708, 0xC708, 0xE708,
- 0x0010, 0x2010, 0x4010, 0x6010, 0x8010, 0xA010, 0xC010, 0xE010,
- 0x0110, 0x2110, 0x4110, 0x6110, 0x8110, 0xA110, 0xC110, 0xE110,
- 0x0210, 0x2210, 0x4210, 0x6210, 0x8210, 0xA210, 0xC210, 0xE210,
- 0x0310, 0x2310, 0x4310, 0x6310, 0x8310, 0xA310, 0xC310, 0xE310,
- 0x0410, 0x2410, 0x4410, 0x6410, 0x8410, 0xA410, 0xC410, 0xE410,
- 0x0510, 0x2510, 0x4510, 0x6510, 0x8510, 0xA510, 0xC510, 0xE510,
- 0x0610, 0x2610, 0x4610, 0x6610, 0x8610, 0xA610, 0xC610, 0xE610,
- 0x0710, 0x2710, 0x4710, 0x6710, 0x8710, 0xA710, 0xC710, 0xE710,
- 0x0018, 0x2018, 0x4018, 0x6018, 0x8018, 0xA018, 0xC018, 0xE018,
- 0x0118, 0x2118, 0x4118, 0x6118, 0x8118, 0xA118, 0xC118, 0xE118,
- 0x0218, 0x2218, 0x4218, 0x6218, 0x8218, 0xA218, 0xC218, 0xE218,
- 0x0318, 0x2318, 0x4318, 0x6318, 0x8318, 0xA318, 0xC318, 0xE318,
- 0x0418, 0x2418, 0x4418, 0x6418, 0x8418, 0xA418, 0xC418, 0xE418,
- 0x0518, 0x2518, 0x4518, 0x6518, 0x8518, 0xA518, 0xC518, 0xE518,
- 0x0618, 0x2618, 0x4618, 0x6618, 0x8618, 0xA618, 0xC618, 0xE618,
- 0x0718, 0x2718, 0x4718, 0x6718, 0x8718, 0xA718, 0xC718, 0xE718
-};
-#endif /* DISP_DEVICE_8BPP */
-
-static void tmd20qvga_disp_set_rect(int x, int y, int xres, int yres);
-static void tmd20qvga_disp_init(struct platform_device *pdev);
-static void tmd20qvga_disp_set_contrast(void);
-static void tmd20qvga_disp_set_display_area(word start_row, word end_row);
-static int tmd20qvga_disp_off(struct platform_device *pdev);
-static int tmd20qvga_disp_on(struct platform_device *pdev);
-static void tmd20qvga_set_revId(int);
-
-/* future use */
-void tmd20qvga_disp_clear_screen_area(word start_row, word end_row,
- word start_column, word end_column);
-
-static void tmd20qvga_set_revId(int id)
-{
-
- tmd20qvga_lcd_rev = id;
-
- if (tmd20qvga_lcd_rev == 1)
- tmd20qvga_panel_offset = 0x10;
- else
- tmd20qvga_panel_offset = 0;
-}
-
-static void tmd20qvga_disp_init(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
-
- if (disp_initialized)
- return;
-
- mfd = platform_get_drvdata(pdev);
-
- DISP_CMD_PORT = mfd->cmd_port;
- DISP_DATA_PORT = mfd->data_port;
-
-#ifdef TMD20QVGA_LCD_18BPP
- tmd20qvga_set_revId(2);
-#else
- tmd20qvga_set_revId(1);
-#endif
-
- disp_initialized = TRUE;
- tmd20qvga_disp_set_contrast();
- tmd20qvga_disp_set_display_area(0, QVGA_HEIGHT - 1);
-}
-
-static void tmd20qvga_disp_set_rect(int x, int y, int xres, int yres)
-{
- if (!disp_initialized)
- return;
-
- DISP_SET_RECT(y, y + yres - 1, x, x + xres - 1);
-
- DISP_CMD_OUT(DISP_CMD_RAMWR);
-}
-
-static void tmd20qvga_disp_set_display_area(word start_row, word end_row)
-{
- word start_driving = start_row;
- word end_driving = end_row;
-
- if (!disp_initialized)
- return;
-
- /* Range checking
- */
- if (end_driving >= QVGA_HEIGHT)
- end_driving = QVGA_HEIGHT - 1;
- if (start_driving > end_driving) {
- /* Probably Backwards Switch */
- start_driving = end_driving;
- end_driving = start_row; /* Has not changed */
- if (end_driving >= QVGA_HEIGHT)
- end_driving = QVGA_HEIGHT - 1;
- }
-
- if ((start_driving == disp_area_start_row)
- && (end_driving == disp_area_end_row))
- return;
-
- disp_area_start_row = start_driving;
- disp_area_end_row = end_driving;
-
- DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_1_ADDR,
- DISP_VAL_IF(start_driving & 0x100,
- DISP_BITMASK_SS18) |
- DISP_VAL_IF(start_driving & 0x080,
- DISP_BITMASK_SS17) |
- DISP_VAL_IF(start_driving & 0x040,
- DISP_BITMASK_SS16) |
- DISP_VAL_IF(start_driving & 0x020,
- DISP_BITMASK_SS15) |
- DISP_VAL_IF(start_driving & 0x010,
- DISP_BITMASK_SS14) |
- DISP_VAL_IF(start_driving & 0x008,
- DISP_BITMASK_SS13) |
- DISP_VAL_IF(start_driving & 0x004,
- DISP_BITMASK_SS12) |
- DISP_VAL_IF(start_driving & 0x002,
- DISP_BITMASK_SS11) |
- DISP_VAL_IF(start_driving & 0x001, DISP_BITMASK_SS10));
-
- DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_2_ADDR,
- DISP_VAL_IF(end_driving & 0x100, DISP_BITMASK_SE18) |
- DISP_VAL_IF(end_driving & 0x080, DISP_BITMASK_SE17) |
- DISP_VAL_IF(end_driving & 0x040, DISP_BITMASK_SE16) |
- DISP_VAL_IF(end_driving & 0x020, DISP_BITMASK_SE15) |
- DISP_VAL_IF(end_driving & 0x010, DISP_BITMASK_SE14) |
- DISP_VAL_IF(end_driving & 0x008, DISP_BITMASK_SE13) |
- DISP_VAL_IF(end_driving & 0x004, DISP_BITMASK_SE12) |
- DISP_VAL_IF(end_driving & 0x002, DISP_BITMASK_SE11) |
- DISP_VAL_IF(end_driving & 0x001, DISP_BITMASK_SE10));
-}
-
-static int tmd20qvga_disp_off(struct platform_device *pdev)
-{
- if (!disp_initialized)
- tmd20qvga_disp_init(pdev);
-
- if (display_on) {
- if (tmd20qvga_lcd_rev == 2) {
- DISP_WRITE_OUT(DISP_POFF_LN_SETTING_ADDR, 0x000A);
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xFFEE);
- WAIT_MSEC(40);
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xF812);
- WAIT_MSEC(40);
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xE811);
- WAIT_MSEC(40);
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xC011);
- WAIT_MSEC(40);
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x4011);
- WAIT_MSEC(20);
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0010);
-
- } else {
- DISP_WRITE_OUT(DISP_POFF_LN_SETTING_ADDR, 0x000F);
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0BFE);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
- WAIT_MSEC(40);
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0BED);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
- WAIT_MSEC(40);
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x00CD);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
- WAIT_MSEC(20);
- DISP_WRITE_OUT(DISP_START_OSCILLATION_ADDR, 0x0);
- }
-
- DISP_WRITE_OUT(DISP_MODE_SETTING_ADDR, 0x0004);
- DISP_WRITE_OUT(DISP_MODE_SETTING_ADDR, 0x0000);
-
- display_on = FALSE;
- }
-
- return 0;
-}
-
-static int tmd20qvga_disp_on(struct platform_device *pdev)
-{
- if (!disp_initialized)
- tmd20qvga_disp_init(pdev);
-
- if (!display_on) {
- /* Deep Stand-by -> Stand-by */
- DISP_CMD_OUT(DISP_START_OSCILLATION_ADDR);
- WAIT_MSEC(1);
- DISP_CMD_OUT(DISP_START_OSCILLATION_ADDR);
- WAIT_MSEC(1);
- DISP_CMD_OUT(DISP_START_OSCILLATION_ADDR);
- WAIT_MSEC(1);
-
- /* OFF -> Deep Stan-By -> Stand-by */
- /* let's change the state from "Stand-by" to "Sleep" */
- DISP_WRITE_OUT(DISP_MODE_SETTING_ADDR, 0x0005);
- WAIT_MSEC(1);
-
- /* Sleep -> Displaying */
- DISP_WRITE_OUT(DISP_START_OSCILLATION_ADDR, 0x0001);
- DISP_WRITE_OUT(DISP_DRIVER_OUTPUT_CTL_ADDR, 0x0127);
- DISP_WRITE_OUT(DISP_LCD_DRIVING_SIG_ADDR, 0x200);
- /* fast write mode */
- DISP_WRITE_OUT(DISP_ENTRY_MODE_ADDR, 0x0130);
- if (tmd20qvga_lcd_rev == 2)
- DISP_WRITE_OUT(DISP_TMD_700_ADDR, 0x0003);
- /* back porch = 14 + front porch = 2 --> 16 lines */
- if (tmd20qvga_lcd_rev == 2) {
-#ifdef TMD20QVGA_LCD_18BPP
- /* 256k color */
- DISP_WRITE_OUT(DISP_DISPLAY_CTL_1_ADDR, 0x0000);
-#else
- /* 65k color */
- DISP_WRITE_OUT(DISP_DISPLAY_CTL_1_ADDR, 0x4000);
-#endif
- DISP_WRITE_OUT(DISP_DISPLAY_CTL_2_ADDR, 0x0302);
- } else {
-#ifdef TMD20QVGA_LCD_18BPP
- /* 256k color */
- DISP_WRITE_OUT(DISP_DISPLAY_CTL_1_ADDR, 0x0004);
-#else
- /* 65k color */
- DISP_WRITE_OUT(DISP_DISPLAY_CTL_1_ADDR, 0x4004);
-#endif
- DISP_WRITE_OUT(DISP_DISPLAY_CTL_2_ADDR, 0x020E);
- }
- /* 16 bit one transfer */
- if (tmd20qvga_lcd_rev == 2) {
- DISP_WRITE_OUT(DISP_EXT_DISPLAY_CTL_1_ADDR, 0x0000);
- DISP_WRITE_OUT(DISP_FRAME_CYCLE_CTL_ADDR, 0x0010);
- DISP_WRITE_OUT(DISP_LTPS_CTL_1_ADDR, 0x0302);
- DISP_WRITE_OUT(DISP_LTPS_CTL_2_ADDR, 0x0102);
- DISP_WRITE_OUT(DISP_LTPS_CTL_3_ADDR, 0x0000);
- DISP_WRITE_OUT(DISP_TMD_015_ADDR, 0x2000);
-
- DISP_WRITE_OUT(DISP_AMP_SETTING_ADDR, 0x0000);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_1_ADDR, 0x0403);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_2_ADDR, 0x0304);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_3_ADDR, 0x0403);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_4_ADDR, 0x0303);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_5_ADDR, 0x0101);
- DISP_WRITE_OUT(DISP_TMD_305_ADDR, 0);
-
- DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_1_ADDR, 0x0000);
- DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_2_ADDR, 0x013F);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_3_ADDR, 0x077D);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_4_ADDR, 0x0005);
- DISP_WRITE_OUT(DISP_POWER_CTL_5_ADDR, 0x0000);
- DISP_WRITE_OUT(DISP_POWER_CTL_6_ADDR, 0x0015);
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xC010);
- WAIT_MSEC(1);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_2_ADDR, 0x0001);
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0xFFFE);
- WAIT_MSEC(60);
- } else {
- DISP_WRITE_OUT(DISP_EXT_DISPLAY_CTL_1_ADDR, 0x0001);
- DISP_WRITE_OUT(DISP_FRAME_CYCLE_CTL_ADDR, 0x0010);
- DISP_WRITE_OUT(DISP_LTPS_CTL_1_ADDR, 0x0301);
- DISP_WRITE_OUT(DISP_LTPS_CTL_2_ADDR, 0x0001);
- DISP_WRITE_OUT(DISP_LTPS_CTL_3_ADDR, 0x0000);
- DISP_WRITE_OUT(DISP_AMP_SETTING_ADDR, 0x0000);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_1_ADDR, 0x0507);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_2_ADDR, 0x0405);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_3_ADDR, 0x0607);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_4_ADDR, 0x0502);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_5_ADDR, 0x0301);
- DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_1_ADDR, 0x0000);
- DISP_WRITE_OUT(DISP_SCREEN_1_DRV_POS_2_ADDR, 0x013F);
- DISP_WRITE_OUT(DISP_POWER_CTL_3_ADDR, 0x0795);
-
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0102);
- WAIT_MSEC(1);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_4_ADDR, 0x0450);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0103);
- WAIT_MSEC(1);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_5_ADDR, 0x0008);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0104);
- WAIT_MSEC(1);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_6_ADDR, 0x0C00);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0105);
- WAIT_MSEC(1);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_7_ADDR, 0x0000);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0106);
- WAIT_MSEC(1);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0801);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
- WAIT_MSEC(1);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_2_ADDR, 0x001F);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0101);
- WAIT_MSEC(60);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_2_ADDR, 0x009F);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0101);
- WAIT_MSEC(10);
-
- DISP_WRITE_OUT(DISP_HORZ_RAM_ADDR_POS_1_ADDR, 0x0010);
- DISP_WRITE_OUT(DISP_HORZ_RAM_ADDR_POS_2_ADDR, 0x00FF);
- DISP_WRITE_OUT(DISP_VERT_RAM_ADDR_POS_1_ADDR, 0x0000);
- DISP_WRITE_OUT(DISP_VERT_RAM_ADDR_POS_2_ADDR, 0x013F);
- /* RAM starts at address 0x10 */
- DISP_WRITE_OUT(DISP_RAM_ADDR_SET_1_ADDR, 0x0010);
- DISP_WRITE_OUT(DISP_RAM_ADDR_SET_2_ADDR, 0x0000);
-
- /* lcd controller uses internal clock, not ext. vsync */
- DISP_CMD_OUT(DISP_CMD_RAMWR);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0881);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
- WAIT_MSEC(40);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0BE1);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
- WAIT_MSEC(40);
-
- DISP_WRITE_OUT(DISP_POWER_CTL_1_ADDR, 0x0BFF);
- DISP_WRITE_OUT(DISP_POWER_SUPPLY_INTF_ADDR, 0x0100);
- }
- display_on = TRUE;
- }
-
- return 0;
-}
-
-static void tmd20qvga_disp_set_contrast(void)
-{
-#if (defined(TMD20QVGA_LCD_18BPP))
-
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_1_ADDR, 0x0403);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_2_ADDR, 0x0302);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_3_ADDR, 0x0403);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_4_ADDR, 0x0303);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_5_ADDR, 0x0F07);
-
-#else
- int newcontrast = 0x46;
-
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_1_ADDR, 0x0403);
-
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_2_ADDR,
- DISP_VAL_IF(newcontrast & 0x0001, DISP_BITMASK_PKP20) |
- DISP_VAL_IF(newcontrast & 0x0002, DISP_BITMASK_PKP21) |
- DISP_VAL_IF(newcontrast & 0x0004, DISP_BITMASK_PKP22) |
- DISP_VAL_IF(newcontrast & 0x0010, DISP_BITMASK_PKP30) |
- DISP_VAL_IF(newcontrast & 0x0020, DISP_BITMASK_PKP31) |
- DISP_VAL_IF(newcontrast & 0x0040, DISP_BITMASK_PKP32));
-
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_3_ADDR,
- DISP_VAL_IF(newcontrast & 0x0010, DISP_BITMASK_PKP40) |
- DISP_VAL_IF(newcontrast & 0x0020, DISP_BITMASK_PKP41) |
- DISP_VAL_IF(newcontrast & 0x0040, DISP_BITMASK_PKP42) |
- DISP_VAL_IF(newcontrast & 0x0001, DISP_BITMASK_PKP50) |
- DISP_VAL_IF(newcontrast & 0x0002, DISP_BITMASK_PKP51) |
- DISP_VAL_IF(newcontrast & 0x0004, DISP_BITMASK_PKP52));
-
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_4_ADDR, 0x0303);
- DISP_WRITE_OUT(DISP_GAMMA_CONTROL_5_ADDR, 0x0F07);
-
-#endif /* defined(TMD20QVGA_LCD_18BPP) */
-
-} /* End disp_set_contrast */
-
-void tmd20qvga_disp_clear_screen_area
- (word start_row, word end_row, word start_column, word end_column) {
- int32 i;
-
- /* Clear the display screen */
- DISP_SET_RECT(start_row, end_row, start_column, end_column);
- DISP_CMD_OUT(DISP_CMD_RAMWR);
- i = (end_row - start_row + 1) * (end_column - start_column + 1);
- for (; i > 0; i--)
- DISP_DATA_OUT_16TO18BPP(0x0);
-}
-
-static int __init tmd20qvga_probe(struct platform_device *pdev)
-{
- msm_fb_add_device(pdev);
-
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = tmd20qvga_probe,
- .driver = {
- .name = "ebi2_tmd_qvga",
- },
-};
-
-static struct msm_fb_panel_data tmd20qvga_panel_data = {
- .on = tmd20qvga_disp_on,
- .off = tmd20qvga_disp_off,
- .set_rect = tmd20qvga_disp_set_rect,
-};
-
-static struct platform_device this_device = {
- .name = "ebi2_tmd_qvga",
- .id = 0,
- .dev = {
- .platform_data = &tmd20qvga_panel_data,
- }
-};
-
-static int __init tmd20qvga_init(void)
-{
- int ret;
- struct msm_panel_info *pinfo;
-
- ret = platform_driver_register(&this_driver);
- if (!ret) {
- pinfo = &tmd20qvga_panel_data.panel_info;
- pinfo->xres = 240;
- pinfo->yres = 320;
- pinfo->type = EBI2_PANEL;
- pinfo->pdest = DISPLAY_1;
- pinfo->wait_cycle = 0x808000;
-#ifdef TMD20QVGA_LCD_18BPP
- pinfo->bpp = 18;
-#else
- pinfo->bpp = 16;
-#endif
- pinfo->fb_num = 2;
- pinfo->lcd.vsync_enable = TRUE;
- pinfo->lcd.refx100 = 6000;
- pinfo->lcd.v_back_porch = 16;
- pinfo->lcd.v_front_porch = 4;
- pinfo->lcd.v_pulse_width = 0;
- pinfo->lcd.hw_vsync_mode = FALSE;
- pinfo->lcd.vsync_notifier_period = 0;
-
- ret = platform_device_register(&this_device);
- if (ret)
- platform_driver_unregister(&this_driver);
- }
-
- return ret;
-}
-
-module_init(tmd20qvga_init);
diff --git a/drivers/staging/msm/hdmi_sii9022.c b/drivers/staging/msm/hdmi_sii9022.c
deleted file mode 100644
index 6b82b56a77b..00000000000
--- a/drivers/staging/msm/hdmi_sii9022.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include "msm_fb.h"
-
-#define DEVICE_NAME "sii9022"
-#define SII9022_DEVICE_ID 0xB0
-
-struct sii9022_i2c_addr_data{
- u8 addr;
- u8 data;
-};
-
-/* video mode data */
-static u8 video_mode_data[] = {
- 0x00,
- 0xF9, 0x1C, 0x70, 0x17, 0x72, 0x06, 0xEE, 0x02,
-};
-
-static u8 avi_io_format[] = {
- 0x09,
- 0x00, 0x00,
-};
-
-/* power state */
-static struct sii9022_i2c_addr_data regset0[] = {
- { 0x60, 0x04 },
- { 0x63, 0x00 },
- { 0x1E, 0x00 },
-};
-
-static u8 video_infoframe[] = {
- 0x0C,
- 0xF0, 0x00, 0x68, 0x00, 0x04, 0x00, 0x19, 0x00,
- 0xE9, 0x02, 0x04, 0x01, 0x04, 0x06,
-};
-
-/* configure audio */
-static struct sii9022_i2c_addr_data regset1[] = {
- { 0x26, 0x90 },
- { 0x20, 0x90 },
- { 0x1F, 0x80 },
- { 0x26, 0x80 },
- { 0x24, 0x02 },
- { 0x25, 0x0B },
- { 0xBC, 0x02 },
- { 0xBD, 0x24 },
- { 0xBE, 0x02 },
-};
-
-/* enable audio */
-static u8 misc_infoframe[] = {
- 0xBF,
- 0xC2, 0x84, 0x01, 0x0A, 0x6F, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-/* set HDMI, active */
-static struct sii9022_i2c_addr_data regset2[] = {
- { 0x1A, 0x01 },
- { 0x3D, 0x00 },
-};
-
-static int send_i2c_data(struct i2c_client *client,
- struct sii9022_i2c_addr_data *regset,
- int size)
-{
- int i;
- int rc = 0;
-
- for (i = 0; i < size; i++) {
- rc = i2c_smbus_write_byte_data(
- client,
- regset[i].addr, regset[i].data);
- if (rc)
- break;
- }
- return rc;
-}
-
-static int hdmi_sii_enable(struct i2c_client *client)
-{
- int rc;
- int retries = 10;
- int count;
-
- rc = i2c_smbus_write_byte_data(client, 0xC7, 0x00);
- if (rc)
- goto enable_exit;
-
- do {
- msleep(1);
- rc = i2c_smbus_read_byte_data(client, 0x1B);
- } while ((rc != SII9022_DEVICE_ID) && retries--);
-
- if (rc != SII9022_DEVICE_ID)
- return -ENODEV;
-
- rc = i2c_smbus_write_byte_data(client, 0x1A, 0x11);
- if (rc)
- goto enable_exit;
-
- count = ARRAY_SIZE(video_mode_data);
- rc = i2c_master_send(client, video_mode_data, count);
- if (rc != count) {
- rc = -EIO;
- goto enable_exit;
- }
-
- rc = i2c_smbus_write_byte_data(client, 0x08, 0x20);
- if (rc)
- goto enable_exit;
- count = ARRAY_SIZE(avi_io_format);
- rc = i2c_master_send(client, avi_io_format, count);
- if (rc != count) {
- rc = -EIO;
- goto enable_exit;
- }
-
- rc = send_i2c_data(client, regset0, ARRAY_SIZE(regset0));
- if (rc)
- goto enable_exit;
-
- count = ARRAY_SIZE(video_infoframe);
- rc = i2c_master_send(client, video_infoframe, count);
- if (rc != count) {
- rc = -EIO;
- goto enable_exit;
- }
-
- rc = send_i2c_data(client, regset1, ARRAY_SIZE(regset1));
- if (rc)
- goto enable_exit;
-
- count = ARRAY_SIZE(misc_infoframe);
- rc = i2c_master_send(client, misc_infoframe, count);
- if (rc != count) {
- rc = -EIO;
- goto enable_exit;
- }
-
- rc = send_i2c_data(client, regset2, ARRAY_SIZE(regset2));
- if (rc)
- goto enable_exit;
-
- return 0;
-enable_exit:
- printk(KERN_ERR "%s: exited rc=%d\n", __func__, rc);
- return rc;
-}
-
-static const struct i2c_device_id hmdi_sii_id[] = {
- { DEVICE_NAME, 0 },
- { }
-};
-
-static int hdmi_sii_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int rc;
-
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
- return -ENODEV;
- rc = hdmi_sii_enable(client);
- return rc;
-}
-
-
-static struct i2c_driver hdmi_sii_i2c_driver = {
- .driver = {
- .name = DEVICE_NAME,
- .owner = THIS_MODULE,
- },
- .probe = hdmi_sii_probe,
- .remove = __exit_p(hdmi_sii_remove),
- .id_table = hmdi_sii_id,
-};
-
-static int __init hdmi_sii_init(void)
-{
- int ret;
- struct msm_panel_info pinfo;
-
- if (msm_fb_detect_client("hdmi_sii9022"))
- return 0;
-
- pinfo.xres = 1280;
- pinfo.yres = 720;
- pinfo.type = HDMI_PANEL;
- pinfo.pdest = DISPLAY_1;
- pinfo.wait_cycle = 0;
- pinfo.bpp = 24;
- pinfo.fb_num = 2;
- pinfo.clk_rate = 74250000;
-
- pinfo.lcdc.h_back_porch = 124;
- pinfo.lcdc.h_front_porch = 110;
- pinfo.lcdc.h_pulse_width = 136;
- pinfo.lcdc.v_back_porch = 19;
- pinfo.lcdc.v_front_porch = 5;
- pinfo.lcdc.v_pulse_width = 6;
- pinfo.lcdc.border_clr = 0;
- pinfo.lcdc.underflow_clr = 0xff;
- pinfo.lcdc.hsync_skew = 0;
-
- ret = lcdc_device_register(&pinfo);
- if (ret) {
- printk(KERN_ERR "%s: failed to register device\n", __func__);
- goto init_exit;
- }
-
- ret = i2c_add_driver(&hdmi_sii_i2c_driver);
- if (ret)
- printk(KERN_ERR "%s: failed to add i2c driver\n", __func__);
-
-init_exit:
- return ret;
-}
-
-static void __exit hdmi_sii_exit(void)
-{
- i2c_del_driver(&hdmi_sii_i2c_driver);
-}
-
-module_init(hdmi_sii_init);
-module_exit(hdmi_sii_exit);
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION("0.1");
-MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
-MODULE_DESCRIPTION("SiI9022 HDMI driver");
-MODULE_ALIAS("platform:hdmi-sii9022");
diff --git a/drivers/staging/msm/lcdc.c b/drivers/staging/msm/lcdc.c
deleted file mode 100644
index 8183394aef7..00000000000
--- a/drivers/staging/msm/lcdc.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/uaccess.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/pm_qos_params.h>
-
-#include "msm_fb.h"
-
-static int lcdc_probe(struct platform_device *pdev);
-static int lcdc_remove(struct platform_device *pdev);
-
-static int lcdc_off(struct platform_device *pdev);
-static int lcdc_on(struct platform_device *pdev);
-
-static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
-static int pdev_list_cnt;
-
-static struct clk *mdp_lcdc_pclk_clk;
-static struct clk *mdp_lcdc_pad_pclk_clk;
-
-int mdp_lcdc_pclk_clk_rate;
-int mdp_lcdc_pad_pclk_clk_rate;
-
-static struct platform_driver lcdc_driver = {
- .probe = lcdc_probe,
- .remove = lcdc_remove,
- .suspend = NULL,
- .resume = NULL,
- .shutdown = NULL,
- .driver = {
- .name = "lcdc",
- },
-};
-
-static struct lcdc_platform_data *lcdc_pdata;
-
-static int lcdc_off(struct platform_device *pdev)
-{
- int ret = 0;
-
- ret = panel_next_off(pdev);
-
- clk_disable(mdp_lcdc_pclk_clk);
- clk_disable(mdp_lcdc_pad_pclk_clk);
-
- if (lcdc_pdata && lcdc_pdata->lcdc_power_save)
- lcdc_pdata->lcdc_power_save(0);
-
- if (lcdc_pdata && lcdc_pdata->lcdc_gpio_config)
- ret = lcdc_pdata->lcdc_gpio_config(0);
-
-// pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc",
-// PM_QOS_DEFAULT_VALUE);
-
- return ret;
-}
-
-static int lcdc_on(struct platform_device *pdev)
-{
- int ret = 0;
- struct msm_fb_data_type *mfd;
- unsigned long panel_pixclock_freq , pm_qos_freq;
-
- mfd = platform_get_drvdata(pdev);
- panel_pixclock_freq = mfd->fbi->var.pixclock;
-
- if (panel_pixclock_freq > 58000000)
- /* pm_qos_freq should be in Khz */
- pm_qos_freq = panel_pixclock_freq / 1000 ;
- else
- pm_qos_freq = 58000;
-
-// pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc",
-// pm_qos_freq);
- mfd = platform_get_drvdata(pdev);
-
- clk_enable(mdp_lcdc_pclk_clk);
- clk_enable(mdp_lcdc_pad_pclk_clk);
-
- if (lcdc_pdata && lcdc_pdata->lcdc_power_save)
- lcdc_pdata->lcdc_power_save(1);
- if (lcdc_pdata && lcdc_pdata->lcdc_gpio_config)
- ret = lcdc_pdata->lcdc_gpio_config(1);
-
- clk_set_rate(mdp_lcdc_pclk_clk, mfd->fbi->var.pixclock);
- clk_set_rate(mdp_lcdc_pad_pclk_clk, mfd->fbi->var.pixclock);
- mdp_lcdc_pclk_clk_rate = clk_get_rate(mdp_lcdc_pclk_clk);
- mdp_lcdc_pad_pclk_clk_rate = clk_get_rate(mdp_lcdc_pad_pclk_clk);
-
- ret = panel_next_on(pdev);
- return ret;
-}
-
-static int lcdc_probe(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
- struct fb_info *fbi;
- struct platform_device *mdp_dev = NULL;
- struct msm_fb_panel_data *pdata = NULL;
- int rc;
-
- if (pdev->id == 0) {
- lcdc_pdata = pdev->dev.platform_data;
- return 0;
- }
-
- mfd = platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
- return -ENOMEM;
-
- mdp_dev = platform_device_alloc("mdp", pdev->id);
- if (!mdp_dev)
- return -ENOMEM;
-
- /*
- * link to the latest pdev
- */
- mfd->pdev = mdp_dev;
- mfd->dest = DISPLAY_LCDC;
-
- /*
- * alloc panel device data
- */
- if (platform_device_add_data
- (mdp_dev, pdev->dev.platform_data,
- sizeof(struct msm_fb_panel_data))) {
- printk(KERN_ERR "lcdc_probe: platform_device_add_data failed!\n");
- platform_device_put(mdp_dev);
- return -ENOMEM;
- }
- /*
- * data chain
- */
- pdata = (struct msm_fb_panel_data *)mdp_dev->dev.platform_data;
- pdata->on = lcdc_on;
- pdata->off = lcdc_off;
- pdata->next = pdev;
-
- /*
- * get/set panel specific fb info
- */
- mfd->panel_info = pdata->panel_info;
- mfd->fb_imgType = MDP_RGB_565;
-
- fbi = mfd->fbi;
- fbi->var.pixclock = mfd->panel_info.clk_rate;
- fbi->var.left_margin = mfd->panel_info.lcdc.h_back_porch;
- fbi->var.right_margin = mfd->panel_info.lcdc.h_front_porch;
- fbi->var.upper_margin = mfd->panel_info.lcdc.v_back_porch;
- fbi->var.lower_margin = mfd->panel_info.lcdc.v_front_porch;
- fbi->var.hsync_len = mfd->panel_info.lcdc.h_pulse_width;
- fbi->var.vsync_len = mfd->panel_info.lcdc.v_pulse_width;
-
- /*
- * set driver data
- */
- platform_set_drvdata(mdp_dev, mfd);
-
- /*
- * register in mdp driver
- */
- rc = platform_device_add(mdp_dev);
- if (rc)
- goto lcdc_probe_err;
-
- pdev_list[pdev_list_cnt++] = pdev;
- return 0;
-
-lcdc_probe_err:
- platform_device_put(mdp_dev);
- return rc;
-}
-
-static int lcdc_remove(struct platform_device *pdev)
-{
-// pm_qos_remove_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc");
- return 0;
-}
-
-static int lcdc_register_driver(void)
-{
- return platform_driver_register(&lcdc_driver);
-}
-
-static int __init lcdc_driver_init(void)
-{
- mdp_lcdc_pclk_clk = clk_get(NULL, "mdp_lcdc_pclk_clk");
- if (IS_ERR(mdp_lcdc_pclk_clk)) {
- printk(KERN_ERR "error: can't get mdp_lcdc_pclk_clk!\n");
- return PTR_ERR(mdp_lcdc_pclk_clk);
- }
- mdp_lcdc_pad_pclk_clk = clk_get(NULL, "mdp_lcdc_pad_pclk_clk");
- if (IS_ERR(mdp_lcdc_pad_pclk_clk)) {
- printk(KERN_ERR "error: can't get mdp_lcdc_pad_pclk_clk!\n");
- return PTR_ERR(mdp_lcdc_pad_pclk_clk);
- }
-// pm_qos_add_requirement(PM_QOS_SYSTEM_BUS_FREQ , "lcdc",
-// PM_QOS_DEFAULT_VALUE);
- return lcdc_register_driver();
-}
-
-module_init(lcdc_driver_init);
diff --git a/drivers/staging/msm/lcdc_external.c b/drivers/staging/msm/lcdc_external.c
deleted file mode 100644
index 45ff7852711..00000000000
--- a/drivers/staging/msm/lcdc_external.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-
-static int __init lcdc_external_init(void)
-{
- int ret;
- struct msm_panel_info pinfo;
-
- if (msm_fb_detect_client("lcdc_external"))
- return 0;
-
- pinfo.xres = 1280;
- pinfo.yres = 720;
- pinfo.type = LCDC_PANEL;
- pinfo.pdest = DISPLAY_1;
- pinfo.wait_cycle = 0;
- pinfo.bpp = 24;
- pinfo.fb_num = 2;
- pinfo.clk_rate = 74250000;
-
- pinfo.lcdc.h_back_porch = 124;
- pinfo.lcdc.h_front_porch = 110;
- pinfo.lcdc.h_pulse_width = 136;
- pinfo.lcdc.v_back_porch = 19;
- pinfo.lcdc.v_front_porch = 5;
- pinfo.lcdc.v_pulse_width = 6;
- pinfo.lcdc.border_clr = 0; /* blk */
- pinfo.lcdc.underflow_clr = 0xff; /* blue */
- pinfo.lcdc.hsync_skew = 0;
-
- ret = lcdc_device_register(&pinfo);
- if (ret)
- printk(KERN_ERR "%s: failed to register device!\n", __func__);
-
- return ret;
-}
-
-module_init(lcdc_external_init);
diff --git a/drivers/staging/msm/lcdc_gordon.c b/drivers/staging/msm/lcdc_gordon.c
deleted file mode 100644
index 399ec8c791e..00000000000
--- a/drivers/staging/msm/lcdc_gordon.c
+++ /dev/null
@@ -1,446 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/delay.h>
-#include <mach/gpio.h>
-#include "msm_fb.h"
-
-/* registers */
-#define GORDON_REG_NOP 0x00
-#define GORDON_REG_IMGCTL1 0x10
-#define GORDON_REG_IMGCTL2 0x11
-#define GORDON_REG_IMGSET1 0x12
-#define GORDON_REG_IMGSET2 0x13
-#define GORDON_REG_IVBP1 0x14
-#define GORDON_REG_IHBP1 0x15
-#define GORDON_REG_IVNUM1 0x16
-#define GORDON_REG_IHNUM1 0x17
-#define GORDON_REG_IVBP2 0x18
-#define GORDON_REG_IHBP2 0x19
-#define GORDON_REG_IVNUM2 0x1A
-#define GORDON_REG_IHNUM2 0x1B
-#define GORDON_REG_LCDIFCTL1 0x30
-#define GORDON_REG_VALTRAN 0x31
-#define GORDON_REG_AVCTL 0x33
-#define GORDON_REG_LCDIFCTL2 0x34
-#define GORDON_REG_LCDIFCTL3 0x35
-#define GORDON_REG_LCDIFSET1 0x36
-#define GORDON_REG_PCCTL 0x3C
-#define GORDON_REG_TPARAM1 0x40
-#define GORDON_REG_TLCDIF1 0x41
-#define GORDON_REG_TSSPB_ST1 0x42
-#define GORDON_REG_TSSPB_ED1 0x43
-#define GORDON_REG_TSCK_ST1 0x44
-#define GORDON_REG_TSCK_WD1 0x45
-#define GORDON_REG_TGSPB_VST1 0x46
-#define GORDON_REG_TGSPB_VED1 0x47
-#define GORDON_REG_TGSPB_CH1 0x48
-#define GORDON_REG_TGCK_ST1 0x49
-#define GORDON_REG_TGCK_ED1 0x4A
-#define GORDON_REG_TPCTL_ST1 0x4B
-#define GORDON_REG_TPCTL_ED1 0x4C
-#define GORDON_REG_TPCHG_ED1 0x4D
-#define GORDON_REG_TCOM_CH1 0x4E
-#define GORDON_REG_THBP1 0x4F
-#define GORDON_REG_TPHCTL1 0x50
-#define GORDON_REG_EVPH1 0x51
-#define GORDON_REG_EVPL1 0x52
-#define GORDON_REG_EVNH1 0x53
-#define GORDON_REG_EVNL1 0x54
-#define GORDON_REG_TBIAS1 0x55
-#define GORDON_REG_TPARAM2 0x56
-#define GORDON_REG_TLCDIF2 0x57
-#define GORDON_REG_TSSPB_ST2 0x58
-#define GORDON_REG_TSSPB_ED2 0x59
-#define GORDON_REG_TSCK_ST2 0x5A
-#define GORDON_REG_TSCK_WD2 0x5B
-#define GORDON_REG_TGSPB_VST2 0x5C
-#define GORDON_REG_TGSPB_VED2 0x5D
-#define GORDON_REG_TGSPB_CH2 0x5E
-#define GORDON_REG_TGCK_ST2 0x5F
-#define GORDON_REG_TGCK_ED2 0x60
-#define GORDON_REG_TPCTL_ST2 0x61
-#define GORDON_REG_TPCTL_ED2 0x62
-#define GORDON_REG_TPCHG_ED2 0x63
-#define GORDON_REG_TCOM_CH2 0x64
-#define GORDON_REG_THBP2 0x65
-#define GORDON_REG_TPHCTL2 0x66
-#define GORDON_REG_POWCTL 0x80
-
-static int lcdc_gordon_panel_off(struct platform_device *pdev);
-
-static int spi_cs;
-static int spi_sclk;
-static int spi_sdo;
-static int spi_sdi;
-static int spi_dac;
-static unsigned char bit_shift[8] = { (1 << 7), /* MSB */
- (1 << 6),
- (1 << 5),
- (1 << 4),
- (1 << 3),
- (1 << 2),
- (1 << 1),
- (1 << 0) /* LSB */
-};
-
-struct gordon_state_type{
- boolean disp_initialized;
- boolean display_on;
- boolean disp_powered_up;
-};
-
-static struct gordon_state_type gordon_state = { 0 };
-static struct msm_panel_common_pdata *lcdc_gordon_pdata;
-
-static void serigo(uint16 reg, uint8 data)
-{
- unsigned int tx_val = ((0x00FF & reg) << 8) | data;
- unsigned char i, val = 0;
-
- /* Enable the Chip Select */
- gpio_set_value(spi_cs, 1);
- udelay(33);
-
- /* Transmit it in two parts, Higher Byte first, then Lower Byte */
- val = (unsigned char)((tx_val & 0xFF00) >> 8);
-
- /* Clock should be Low before entering ! */
- for (i = 0; i < 8; i++) {
- /* #1: Drive the Data (High or Low) */
- if (val & bit_shift[i])
- gpio_set_value(spi_sdi, 1);
- else
- gpio_set_value(spi_sdi, 0);
-
- /* #2: Drive the Clk High and then Low */
- udelay(33);
- gpio_set_value(spi_sclk, 1);
- udelay(33);
- gpio_set_value(spi_sclk, 0);
- }
-
- /* Idle state of SDO (MOSI) is Low */
- gpio_set_value(spi_sdi, 0);
- /* ..then Lower Byte */
- val = (uint8) (tx_val & 0x00FF);
- /* Before we enter here the Clock should be Low ! */
-
- for (i = 0; i < 8; i++) {
- /* #1: Drive the Data (High or Low) */
- if (val & bit_shift[i])
- gpio_set_value(spi_sdi, 1);
- else
- gpio_set_value(spi_sdi, 0);
-
- /* #2: Drive the Clk High and then Low */
- udelay(33);
-
- gpio_set_value(spi_sclk, 1);
- udelay(33);
- gpio_set_value(spi_sclk, 0);
- }
-
- /* Idle state of SDO (MOSI) is Low */
- gpio_set_value(spi_sdi, 0);
-
- /* Now Disable the Chip Select */
- udelay(33);
- gpio_set_value(spi_cs, 0);
-}
-
-static void spi_init(void)
-{
- /* Setting the Default GPIO's */
- spi_sclk = *(lcdc_gordon_pdata->gpio_num);
- spi_cs = *(lcdc_gordon_pdata->gpio_num + 1);
- spi_sdi = *(lcdc_gordon_pdata->gpio_num + 2);
- spi_sdo = *(lcdc_gordon_pdata->gpio_num + 3);
-
- /* Set the output so that we dont disturb the slave device */
- gpio_set_value(spi_sclk, 0);
- gpio_set_value(spi_sdi, 0);
-
- /* Set the Chip Select De-asserted */
- gpio_set_value(spi_cs, 0);
-
-}
-
-static void gordon_disp_powerup(void)
-{
- if (!gordon_state.disp_powered_up && !gordon_state.display_on) {
- /* Reset the hardware first */
- /* Include DAC power up implementation here */
- gordon_state.disp_powered_up = TRUE;
- }
-}
-
-static void gordon_init(void)
-{
- /* Image interface settings */
- serigo(GORDON_REG_IMGCTL2, 0x00);
- serigo(GORDON_REG_IMGSET1, 0x00);
-
- /* Exchange the RGB signal for J510(Softbank mobile) */
- serigo(GORDON_REG_IMGSET2, 0x12);
- serigo(GORDON_REG_LCDIFSET1, 0x00);
-
- /* Pre-charge settings */
- serigo(GORDON_REG_PCCTL, 0x09);
- serigo(GORDON_REG_LCDIFCTL2, 0x7B);
-
- mdelay(1);
-}
-
-static void gordon_disp_on(void)
-{
- if (gordon_state.disp_powered_up && !gordon_state.display_on) {
- gordon_init();
- mdelay(20);
- /* gordon_dispmode setting */
- serigo(GORDON_REG_TPARAM1, 0x30);
- serigo(GORDON_REG_TLCDIF1, 0x00);
- serigo(GORDON_REG_TSSPB_ST1, 0x8B);
- serigo(GORDON_REG_TSSPB_ED1, 0x93);
- serigo(GORDON_REG_TSCK_ST1, 0x88);
- serigo(GORDON_REG_TSCK_WD1, 0x00);
- serigo(GORDON_REG_TGSPB_VST1, 0x01);
- serigo(GORDON_REG_TGSPB_VED1, 0x02);
- serigo(GORDON_REG_TGSPB_CH1, 0x5E);
- serigo(GORDON_REG_TGCK_ST1, 0x80);
- serigo(GORDON_REG_TGCK_ED1, 0x3C);
- serigo(GORDON_REG_TPCTL_ST1, 0x50);
- serigo(GORDON_REG_TPCTL_ED1, 0x74);
- serigo(GORDON_REG_TPCHG_ED1, 0x78);
- serigo(GORDON_REG_TCOM_CH1, 0x50);
- serigo(GORDON_REG_THBP1, 0x84);
- serigo(GORDON_REG_TPHCTL1, 0x00);
- serigo(GORDON_REG_EVPH1, 0x70);
- serigo(GORDON_REG_EVPL1, 0x64);
- serigo(GORDON_REG_EVNH1, 0x56);
- serigo(GORDON_REG_EVNL1, 0x48);
- serigo(GORDON_REG_TBIAS1, 0x88);
-
- /* QVGA settings */
- serigo(GORDON_REG_TPARAM2, 0x28);
- serigo(GORDON_REG_TLCDIF2, 0x14);
- serigo(GORDON_REG_TSSPB_ST2, 0x49);
- serigo(GORDON_REG_TSSPB_ED2, 0x4B);
- serigo(GORDON_REG_TSCK_ST2, 0x4A);
- serigo(GORDON_REG_TSCK_WD2, 0x02);
- serigo(GORDON_REG_TGSPB_VST2, 0x02);
- serigo(GORDON_REG_TGSPB_VED2, 0x03);
- serigo(GORDON_REG_TGSPB_CH2, 0x2F);
- serigo(GORDON_REG_TGCK_ST2, 0x40);
- serigo(GORDON_REG_TGCK_ED2, 0x1E);
- serigo(GORDON_REG_TPCTL_ST2, 0x2C);
- serigo(GORDON_REG_TPCTL_ED2, 0x3A);
- serigo(GORDON_REG_TPCHG_ED2, 0x3C);
- serigo(GORDON_REG_TCOM_CH2, 0x28);
- serigo(GORDON_REG_THBP2, 0x4D);
- serigo(GORDON_REG_TPHCTL2, 0x1A);
-
- /* VGA settings */
- serigo(GORDON_REG_IVBP1, 0x02);
- serigo(GORDON_REG_IHBP1, 0x90);
- serigo(GORDON_REG_IVNUM1, 0xA0);
- serigo(GORDON_REG_IHNUM1, 0x78);
-
- /* QVGA settings */
- serigo(GORDON_REG_IVBP2, 0x02);
- serigo(GORDON_REG_IHBP2, 0x48);
- serigo(GORDON_REG_IVNUM2, 0x50);
- serigo(GORDON_REG_IHNUM2, 0x3C);
-
- /* Gordon Charge pump settings and ON */
- serigo(GORDON_REG_POWCTL, 0x03);
- mdelay(15);
- serigo(GORDON_REG_POWCTL, 0x07);
- mdelay(15);
-
- serigo(GORDON_REG_POWCTL, 0x0F);
- mdelay(15);
-
- serigo(GORDON_REG_AVCTL, 0x03);
- mdelay(15);
-
- serigo(GORDON_REG_POWCTL, 0x1F);
- mdelay(15);
-
- serigo(GORDON_REG_POWCTL, 0x5F);
- mdelay(15);
-
- serigo(GORDON_REG_POWCTL, 0x7F);
- mdelay(15);
-
- serigo(GORDON_REG_LCDIFCTL1, 0x02);
- mdelay(15);
-
- serigo(GORDON_REG_IMGCTL1, 0x00);
- mdelay(15);
-
- serigo(GORDON_REG_LCDIFCTL3, 0x00);
- mdelay(15);
-
- serigo(GORDON_REG_VALTRAN, 0x01);
- mdelay(15);
-
- serigo(GORDON_REG_LCDIFCTL1, 0x03);
- mdelay(1);
- gordon_state.display_on = TRUE;
- }
-}
-
-static int lcdc_gordon_panel_on(struct platform_device *pdev)
-{
- if (!gordon_state.disp_initialized) {
- /* Configure reset GPIO that drives DAC */
- lcdc_gordon_pdata->panel_config_gpio(1);
- spi_dac = *(lcdc_gordon_pdata->gpio_num + 4);
- gpio_set_value(spi_dac, 0);
- udelay(15);
- gpio_set_value(spi_dac, 1);
- spi_init(); /* LCD needs SPI */
- gordon_disp_powerup();
- gordon_disp_on();
- gordon_state.disp_initialized = TRUE;
- }
- return 0;
-}
-
-static int lcdc_gordon_panel_off(struct platform_device *pdev)
-{
- if (gordon_state.disp_powered_up && gordon_state.display_on) {
- serigo(GORDON_REG_LCDIFCTL2, 0x7B);
- serigo(GORDON_REG_VALTRAN, 0x01);
- serigo(GORDON_REG_LCDIFCTL1, 0x02);
- serigo(GORDON_REG_LCDIFCTL3, 0x01);
- mdelay(20);
- serigo(GORDON_REG_VALTRAN, 0x01);
- serigo(GORDON_REG_IMGCTL1, 0x01);
- serigo(GORDON_REG_LCDIFCTL1, 0x00);
- mdelay(20);
-
- serigo(GORDON_REG_POWCTL, 0x1F);
- mdelay(40);
-
- serigo(GORDON_REG_POWCTL, 0x07);
- mdelay(40);
-
- serigo(GORDON_REG_POWCTL, 0x03);
- mdelay(40);
-
- serigo(GORDON_REG_POWCTL, 0x00);
- mdelay(40);
- lcdc_gordon_pdata->panel_config_gpio(0);
- gordon_state.display_on = FALSE;
- gordon_state.disp_initialized = FALSE;
- }
- return 0;
-}
-
-static void lcdc_gordon_set_backlight(struct msm_fb_data_type *mfd)
-{
- int bl_level = mfd->bl_level;
-
- if (bl_level <= 1) {
- /* keep back light OFF */
- serigo(GORDON_REG_LCDIFCTL2, 0x0B);
- udelay(15);
- serigo(GORDON_REG_VALTRAN, 0x01);
- } else {
- /* keep back light ON */
- serigo(GORDON_REG_LCDIFCTL2, 0x7B);
- udelay(15);
- serigo(GORDON_REG_VALTRAN, 0x01);
- }
-}
-
-static int __init gordon_probe(struct platform_device *pdev)
-{
- if (pdev->id == 0) {
- lcdc_gordon_pdata = pdev->dev.platform_data;
- return 0;
- }
- msm_fb_add_device(pdev);
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = gordon_probe,
- .driver = {
- .name = "lcdc_gordon_vga",
- },
-};
-
-static struct msm_fb_panel_data gordon_panel_data = {
- .on = lcdc_gordon_panel_on,
- .off = lcdc_gordon_panel_off,
- .set_backlight = lcdc_gordon_set_backlight,
-};
-
-static struct platform_device this_device = {
- .name = "lcdc_gordon_vga",
- .id = 1,
- .dev = {
- .platform_data = &gordon_panel_data,
- }
-};
-
-static int __init lcdc_gordon_panel_init(void)
-{
- int ret;
- struct msm_panel_info *pinfo;
-
-#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
- if (msm_fb_detect_client("lcdc_gordon_vga"))
- return 0;
-#endif
- ret = platform_driver_register(&this_driver);
- if (ret)
- return ret;
-
- pinfo = &gordon_panel_data.panel_info;
- pinfo->xres = 480;
- pinfo->yres = 640;
- pinfo->type = LCDC_PANEL;
- pinfo->pdest = DISPLAY_1;
- pinfo->wait_cycle = 0;
- pinfo->bpp = 24;
- pinfo->fb_num = 2;
- pinfo->clk_rate = 24500000;
- pinfo->bl_max = 4;
- pinfo->bl_min = 1;
-
- pinfo->lcdc.h_back_porch = 84;
- pinfo->lcdc.h_front_porch = 33;
- pinfo->lcdc.h_pulse_width = 60;
- pinfo->lcdc.v_back_porch = 0;
- pinfo->lcdc.v_front_porch = 2;
- pinfo->lcdc.v_pulse_width = 2;
- pinfo->lcdc.border_clr = 0; /* blk */
- pinfo->lcdc.underflow_clr = 0xff; /* blue */
- pinfo->lcdc.hsync_skew = 0;
-
- ret = platform_device_register(&this_device);
- if (ret)
- platform_driver_unregister(&this_driver);
-
- return ret;
-}
-
-module_init(lcdc_gordon_panel_init);
diff --git a/drivers/staging/msm/lcdc_panel.c b/drivers/staging/msm/lcdc_panel.c
deleted file mode 100644
index b40974e1f27..00000000000
--- a/drivers/staging/msm/lcdc_panel.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-
-static int lcdc_panel_on(struct platform_device *pdev)
-{
- return 0;
-}
-
-static int lcdc_panel_off(struct platform_device *pdev)
-{
- return 0;
-}
-
-static int __init lcdc_panel_probe(struct platform_device *pdev)
-{
- msm_fb_add_device(pdev);
-
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = lcdc_panel_probe,
- .driver = {
- .name = "lcdc_panel",
- },
-};
-
-static struct msm_fb_panel_data lcdc_panel_data = {
- .on = lcdc_panel_on,
- .off = lcdc_panel_off,
-};
-
-static int lcdc_dev_id;
-
-int lcdc_device_register(struct msm_panel_info *pinfo)
-{
- struct platform_device *pdev = NULL;
- int ret;
-
- pdev = platform_device_alloc("lcdc_panel", ++lcdc_dev_id);
- if (!pdev)
- return -ENOMEM;
-
- lcdc_panel_data.panel_info = *pinfo;
- ret = platform_device_add_data(pdev, &lcdc_panel_data,
- sizeof(lcdc_panel_data));
- if (ret) {
- printk(KERN_ERR
- "%s: platform_device_add_data failed!\n", __func__);
- goto err_device_put;
- }
-
- ret = platform_device_add(pdev);
- if (ret) {
- printk(KERN_ERR
- "%s: platform_device_register failed!\n", __func__);
- goto err_device_put;
- }
-
- return 0;
-
-err_device_put:
- platform_device_put(pdev);
- return ret;
-}
-
-static int __init lcdc_panel_init(void)
-{
- return platform_driver_register(&this_driver);
-}
-
-module_init(lcdc_panel_init);
diff --git a/drivers/staging/msm/lcdc_prism.c b/drivers/staging/msm/lcdc_prism.c
deleted file mode 100644
index d102c98447c..00000000000
--- a/drivers/staging/msm/lcdc_prism.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-
-#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
-#include "mddihosti.h"
-#endif
-
-static int __init lcdc_prism_init(void)
-{
- int ret;
- struct msm_panel_info pinfo;
-
-#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
- ret = msm_fb_detect_client("lcdc_prism_wvga");
- if (ret == -ENODEV)
- return 0;
-
- if (ret && (mddi_get_client_id() != 0))
- return 0;
-#endif
-
- pinfo.xres = 800;
- pinfo.yres = 480;
- pinfo.type = LCDC_PANEL;
- pinfo.pdest = DISPLAY_1;
- pinfo.wait_cycle = 0;
- pinfo.bpp = 24;
- pinfo.fb_num = 2;
- pinfo.clk_rate = 38460000;
-
- pinfo.lcdc.h_back_porch = 21;
- pinfo.lcdc.h_front_porch = 81;
- pinfo.lcdc.h_pulse_width = 60;
- pinfo.lcdc.v_back_porch = 18;
- pinfo.lcdc.v_front_porch = 27;
- pinfo.lcdc.v_pulse_width = 2;
- pinfo.lcdc.border_clr = 0; /* blk */
- pinfo.lcdc.underflow_clr = 0xff; /* blue */
- pinfo.lcdc.hsync_skew = 0;
-
- ret = lcdc_device_register(&pinfo);
- if (ret)
- printk(KERN_ERR "%s: failed to register device!\n", __func__);
-
- return ret;
-}
-
-module_init(lcdc_prism_init);
diff --git a/drivers/staging/msm/lcdc_sharp_wvga_pt.c b/drivers/staging/msm/lcdc_sharp_wvga_pt.c
deleted file mode 100644
index 1f08cf9bc21..00000000000
--- a/drivers/staging/msm/lcdc_sharp_wvga_pt.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/delay.h>
-#ifdef CONFIG_ARCH_MSM7X30
-#include <linux/mfd/pmic8058.h>
-#endif
-#include <mach/gpio.h>
-#include "msm_fb.h"
-
-static int lcdc_sharp_panel_off(struct platform_device *pdev);
-
-static int spi_cs;
-static int spi_sclk;
-static int spi_mosi;
-static int spi_miso;
-static unsigned char bit_shift[8] = { (1 << 7), /* MSB */
- (1 << 6),
- (1 << 5),
- (1 << 4),
- (1 << 3),
- (1 << 2),
- (1 << 1),
- (1 << 0) /* LSB */
-};
-
-struct sharp_state_type {
- boolean disp_initialized;
- boolean display_on;
- boolean disp_powered_up;
-};
-
-struct sharp_spi_data {
- u8 addr;
- u8 data;
-};
-
-static struct sharp_spi_data init_sequence[] = {
- { 15, 0x01 },
- { 5, 0x01 },
- { 7, 0x10 },
- { 9, 0x1E },
- { 10, 0x04 },
- { 17, 0xFF },
- { 21, 0x8A },
- { 22, 0x00 },
- { 23, 0x82 },
- { 24, 0x24 },
- { 25, 0x22 },
- { 26, 0x6D },
- { 27, 0xEB },
- { 28, 0xB9 },
- { 29, 0x3A },
- { 49, 0x1A },
- { 50, 0x16 },
- { 51, 0x05 },
- { 55, 0x7F },
- { 56, 0x15 },
- { 57, 0x7B },
- { 60, 0x05 },
- { 61, 0x0C },
- { 62, 0x80 },
- { 63, 0x00 },
- { 92, 0x90 },
- { 97, 0x01 },
- { 98, 0xFF },
- { 113, 0x11 },
- { 114, 0x02 },
- { 115, 0x08 },
- { 123, 0xAB },
- { 124, 0x04 },
- { 6, 0x02 },
- { 133, 0x00 },
- { 134, 0xFE },
- { 135, 0x22 },
- { 136, 0x0B },
- { 137, 0xFF },
- { 138, 0x0F },
- { 139, 0x00 },
- { 140, 0xFE },
- { 141, 0x22 },
- { 142, 0x0B },
- { 143, 0xFF },
- { 144, 0x0F },
- { 145, 0x00 },
- { 146, 0xFE },
- { 147, 0x22 },
- { 148, 0x0B },
- { 149, 0xFF },
- { 150, 0x0F },
- { 202, 0x30 },
- { 30, 0x01 },
- { 4, 0x01 },
- { 31, 0x41 },
-};
-
-static struct sharp_state_type sharp_state = { 0 };
-static struct msm_panel_common_pdata *lcdc_sharp_pdata;
-
-static void sharp_spi_write_byte(u8 val)
-{
- int i;
-
- /* Clock should be Low before entering */
- for (i = 0; i < 8; i++) {
- /* #1: Drive the Data (High or Low) */
- if (val & bit_shift[i])
- gpio_set_value(spi_mosi, 1);
- else
- gpio_set_value(spi_mosi, 0);
-
- /* #2: Drive the Clk High and then Low */
- gpio_set_value(spi_sclk, 1);
- gpio_set_value(spi_sclk, 0);
- }
-}
-
-static void serigo(u8 reg, u8 data)
-{
- /* Enable the Chip Select - low */
- gpio_set_value(spi_cs, 0);
- udelay(1);
-
- /* Transmit register address first, then data */
- sharp_spi_write_byte(reg);
-
- /* Idle state of MOSI is Low */
- gpio_set_value(spi_mosi, 0);
- udelay(1);
- sharp_spi_write_byte(data);
-
- gpio_set_value(spi_mosi, 0);
- gpio_set_value(spi_cs, 1);
-}
-
-static void sharp_spi_init(void)
-{
- spi_sclk = *(lcdc_sharp_pdata->gpio_num);
- spi_cs = *(lcdc_sharp_pdata->gpio_num + 1);
- spi_mosi = *(lcdc_sharp_pdata->gpio_num + 2);
- spi_miso = *(lcdc_sharp_pdata->gpio_num + 3);
-
- /* Set the output so that we don't disturb the slave device */
- gpio_set_value(spi_sclk, 0);
- gpio_set_value(spi_mosi, 0);
-
- /* Set the Chip Select deasserted (active low) */
- gpio_set_value(spi_cs, 1);
-}
-
-static void sharp_disp_powerup(void)
-{
- if (!sharp_state.disp_powered_up && !sharp_state.display_on)
- sharp_state.disp_powered_up = TRUE;
-}
-
-static void sharp_disp_on(void)
-{
- int i;
-
- if (sharp_state.disp_powered_up && !sharp_state.display_on) {
- for (i = 0; i < ARRAY_SIZE(init_sequence); i++) {
- serigo(init_sequence[i].addr,
- init_sequence[i].data);
- }
- mdelay(10);
- serigo(31, 0xC1);
- mdelay(10);
- serigo(31, 0xD9);
- serigo(31, 0xDF);
-
- sharp_state.display_on = TRUE;
- }
-}
-
-static int lcdc_sharp_panel_on(struct platform_device *pdev)
-{
- if (!sharp_state.disp_initialized) {
- lcdc_sharp_pdata->panel_config_gpio(1);
- sharp_spi_init();
- sharp_disp_powerup();
- sharp_disp_on();
- sharp_state.disp_initialized = TRUE;
- }
- return 0;
-}
-
-static int lcdc_sharp_panel_off(struct platform_device *pdev)
-{
- if (sharp_state.disp_powered_up && sharp_state.display_on) {
- serigo(4, 0x00);
- mdelay(40);
- serigo(31, 0xC1);
- mdelay(40);
- serigo(31, 0x00);
- mdelay(100);
- sharp_state.display_on = FALSE;
- sharp_state.disp_initialized = FALSE;
- }
- return 0;
-}
-
-static int __init sharp_probe(struct platform_device *pdev)
-{
- if (pdev->id == 0) {
- lcdc_sharp_pdata = pdev->dev.platform_data;
- return 0;
- }
- msm_fb_add_device(pdev);
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = sharp_probe,
- .driver = {
- .name = "lcdc_sharp_wvga",
- },
-};
-
-static struct msm_fb_panel_data sharp_panel_data = {
- .on = lcdc_sharp_panel_on,
- .off = lcdc_sharp_panel_off,
-};
-
-static struct platform_device this_device = {
- .name = "lcdc_sharp_wvga",
- .id = 1,
- .dev = {
- .platform_data = &sharp_panel_data,
- }
-};
-
-static int __init lcdc_sharp_panel_init(void)
-{
- int ret;
- struct msm_panel_info *pinfo;
-
-#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
- if (msm_fb_detect_client("lcdc_sharp_wvga_pt"))
- return 0;
-#endif
-
- ret = platform_driver_register(&this_driver);
- if (ret)
- return ret;
-
- pinfo = &sharp_panel_data.panel_info;
- pinfo->xres = 480;
- pinfo->yres = 800;
- pinfo->type = LCDC_PANEL;
- pinfo->pdest = DISPLAY_1;
- pinfo->wait_cycle = 0;
- pinfo->bpp = 18;
- pinfo->fb_num = 2;
- pinfo->clk_rate = 24500000;
- pinfo->bl_max = 4;
- pinfo->bl_min = 1;
-
- pinfo->lcdc.h_back_porch = 20;
- pinfo->lcdc.h_front_porch = 10;
- pinfo->lcdc.h_pulse_width = 10;
- pinfo->lcdc.v_back_porch = 2;
- pinfo->lcdc.v_front_porch = 2;
- pinfo->lcdc.v_pulse_width = 2;
- pinfo->lcdc.border_clr = 0;
- pinfo->lcdc.underflow_clr = 0xff;
- pinfo->lcdc.hsync_skew = 0;
-
- ret = platform_device_register(&this_device);
- if (ret)
- platform_driver_unregister(&this_driver);
-
- return ret;
-}
-
-module_init(lcdc_sharp_panel_init);
diff --git a/drivers/staging/msm/lcdc_st15.c b/drivers/staging/msm/lcdc_st15.c
deleted file mode 100644
index fed8278eb15..00000000000
--- a/drivers/staging/msm/lcdc_st15.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include "msm_fb.h"
-
-#define DEVICE_NAME "sii9022"
-#define SII9022_DEVICE_ID 0xB0
-
-struct sii9022_i2c_addr_data{
- u8 addr;
- u8 data;
-};
-
-/* video mode data */
-static u8 video_mode_data[] = {
- 0x00,
- 0xF9, 0x1C, 0x70, 0x17, 0x72, 0x06, 0xEE, 0x02,
-};
-
-static u8 avi_io_format[] = {
- 0x09,
- 0x00, 0x00,
-};
-
-/* power state */
-static struct sii9022_i2c_addr_data regset0[] = {
- { 0x60, 0x04 },
- { 0x63, 0x00 },
- { 0x1E, 0x00 },
-};
-
-static u8 video_infoframe[] = {
- 0x0C,
- 0xF0, 0x00, 0x68, 0x00, 0x04, 0x00, 0x19, 0x00,
- 0xE9, 0x02, 0x04, 0x01, 0x04, 0x06,
-};
-
-/* configure audio */
-static struct sii9022_i2c_addr_data regset1[] = {
- { 0x26, 0x90 },
- { 0x20, 0x90 },
- { 0x1F, 0x80 },
- { 0x26, 0x80 },
- { 0x24, 0x02 },
- { 0x25, 0x0B },
- { 0xBC, 0x02 },
- { 0xBD, 0x24 },
- { 0xBE, 0x02 },
-};
-
-/* enable audio */
-static u8 misc_infoframe[] = {
- 0xBF,
- 0xC2, 0x84, 0x01, 0x0A, 0x6F, 0x02, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-/* set HDMI, active */
-static struct sii9022_i2c_addr_data regset2[] = {
- { 0x1A, 0x01 },
- { 0x3D, 0x00 },
-};
-
-static int send_i2c_data(struct i2c_client *client,
- struct sii9022_i2c_addr_data *regset,
- int size)
-{
- int i;
- int rc = 0;
-
- for (i = 0; i < size; i++) {
- rc = i2c_smbus_write_byte_data(
- client,
- regset[i].addr, regset[i].data);
- if (rc)
- break;
- }
- return rc;
-}
-
-static int hdmi_sii_enable(struct i2c_client *client)
-{
- int rc;
- int retries = 10;
- int count;
-
- rc = i2c_smbus_write_byte_data(client, 0xC7, 0x00);
- if (rc)
- goto enable_exit;
-
- do {
- msleep(1);
- rc = i2c_smbus_read_byte_data(client, 0x1B);
- } while ((rc != SII9022_DEVICE_ID) && retries--);
-
- if (rc != SII9022_DEVICE_ID)
- return -ENODEV;
-
- rc = i2c_smbus_write_byte_data(client, 0x1A, 0x11);
- if (rc)
- goto enable_exit;
-
- count = ARRAY_SIZE(video_mode_data);
- rc = i2c_master_send(client, video_mode_data, count);
- if (rc != count) {
- rc = -EIO;
- goto enable_exit;
- }
-
- rc = i2c_smbus_write_byte_data(client, 0x08, 0x20);
- if (rc)
- goto enable_exit;
- count = ARRAY_SIZE(avi_io_format);
- rc = i2c_master_send(client, avi_io_format, count);
- if (rc != count) {
- rc = -EIO;
- goto enable_exit;
- }
-
- rc = send_i2c_data(client, regset0, ARRAY_SIZE(regset0));
- if (rc)
- goto enable_exit;
-
- count = ARRAY_SIZE(video_infoframe);
- rc = i2c_master_send(client, video_infoframe, count);
- if (rc != count) {
- rc = -EIO;
- goto enable_exit;
- }
-
- rc = send_i2c_data(client, regset1, ARRAY_SIZE(regset1));
- if (rc)
- goto enable_exit;
-
- count = ARRAY_SIZE(misc_infoframe);
- rc = i2c_master_send(client, misc_infoframe, count);
- if (rc != count) {
- rc = -EIO;
- goto enable_exit;
- }
-
- rc = send_i2c_data(client, regset2, ARRAY_SIZE(regset2));
- if (rc)
- goto enable_exit;
-
- return 0;
-enable_exit:
- printk(KERN_ERR "%s: exited rc=%d\n", __func__, rc);
- return rc;
-}
-
-static const struct i2c_device_id hmdi_sii_id[] = {
- { DEVICE_NAME, 0 },
- { }
-};
-
-static int hdmi_sii_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int rc;
-
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
- return -ENODEV;
- rc = hdmi_sii_enable(client);
- return rc;
-}
-
-
-static struct i2c_driver hdmi_sii_i2c_driver = {
- .driver = {
- .name = DEVICE_NAME,
- .owner = THIS_MODULE,
- },
- .probe = hdmi_sii_probe,
- .remove = __exit_p(hdmi_sii_remove),
- .id_table = hmdi_sii_id,
-};
-
-static int __init lcdc_st15_init(void)
-{
- int ret;
- struct msm_panel_info pinfo;
-
- if (msm_fb_detect_client("lcdc_st15"))
- return 0;
-
- pinfo.xres = 1366;
- pinfo.yres = 768;
- pinfo.type = LCDC_PANEL;
- pinfo.pdest = DISPLAY_1;
- pinfo.wait_cycle = 0;
- pinfo.bpp = 24;
- pinfo.fb_num = 2;
- pinfo.clk_rate = 74250000;
-
- pinfo.lcdc.h_back_porch = 120;
- pinfo.lcdc.h_front_porch = 20;
- pinfo.lcdc.h_pulse_width = 40;
- pinfo.lcdc.v_back_porch = 25;
- pinfo.lcdc.v_front_porch = 1;
- pinfo.lcdc.v_pulse_width = 7;
- pinfo.lcdc.border_clr = 0; /* blk */
- pinfo.lcdc.underflow_clr = 0xff; /* blue */
- pinfo.lcdc.hsync_skew = 0;
-
- ret = lcdc_device_register(&pinfo);
- if (ret) {
- printk(KERN_ERR "%s: failed to register device!\n", __func__);
- goto init_exit;
- }
-
- ret = i2c_add_driver(&hdmi_sii_i2c_driver);
- if (ret)
- printk(KERN_ERR "%s: failed to add i2c driver\n", __func__);
-
-init_exit:
- return ret;
-}
-
-module_init(lcdc_st15_init);
diff --git a/drivers/staging/msm/lcdc_toshiba_wvga_pt.c b/drivers/staging/msm/lcdc_toshiba_wvga_pt.c
deleted file mode 100644
index edba78a3afc..00000000000
--- a/drivers/staging/msm/lcdc_toshiba_wvga_pt.c
+++ /dev/null
@@ -1,374 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <mach/gpio.h>
-#include <mach/pmic.h>
-#include "msm_fb.h"
-
-#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
-#include "mddihosti.h"
-#endif
-
-static int spi_cs;
-static int spi_sclk;
-static int spi_mosi;
-static int spi_miso;
-
-struct toshiba_state_type{
- boolean disp_initialized;
- boolean display_on;
- boolean disp_powered_up;
-};
-
-static struct toshiba_state_type toshiba_state = { 0 };
-static struct msm_panel_common_pdata *lcdc_toshiba_pdata;
-
-static void toshiba_spi_write_byte(char dc, uint8 data)
-{
- uint32 bit;
- int bnum;
-
- gpio_set_value(spi_sclk, 0); /* clk low */
- /* dc: 0 for command, 1 for parameter */
- gpio_set_value(spi_mosi, dc);
- udelay(1); /* at least 20 ns */
- gpio_set_value(spi_sclk, 1); /* clk high */
- udelay(1); /* at least 20 ns */
- bnum = 8; /* 8 data bits */
- bit = 0x80;
- while (bnum) {
- gpio_set_value(spi_sclk, 0); /* clk low */
- if (data & bit)
- gpio_set_value(spi_mosi, 1);
- else
- gpio_set_value(spi_mosi, 0);
- udelay(1);
- gpio_set_value(spi_sclk, 1); /* clk high */
- udelay(1);
- bit >>= 1;
- bnum--;
- }
-}
-
-static void toshiba_spi_write(char cmd, uint32 data, int num)
-{
- char *bp;
-
- gpio_set_value(spi_cs, 1); /* cs high */
-
- /* command byte first */
- toshiba_spi_write_byte(0, cmd);
-
- /* followed by parameter bytes */
- if (num) {
- bp = (char *)&data;
- bp += (num - 1);
- while (num) {
- toshiba_spi_write_byte(1, *bp);
- num--;
- bp--;
- }
- }
-
- gpio_set_value(spi_cs, 0); /* cs low */
- udelay(1);
-}
-
-void toshiba_spi_read_bytes(char cmd, uint32 *data, int num)
-{
- uint32 dbit, bits;
- int bnum;
-
- gpio_set_value(spi_cs, 1); /* cs high */
-
- /* command byte first */
- toshiba_spi_write_byte(0, cmd);
-
- if (num > 1) {
- /* extra dc bit */
- gpio_set_value(spi_sclk, 0); /* clk low */
- udelay(1);
- dbit = gpio_get_value(spi_miso);/* dc bit */
- udelay(1);
- gpio_set_value(spi_sclk, 1); /* clk high */
- }
-
- /* followed by data bytes */
- bnum = num * 8; /* number of bits */
- bits = 0;
- while (bnum) {
- bits <<= 1;
- gpio_set_value(spi_sclk, 0); /* clk low */
- udelay(1);
- dbit = gpio_get_value(spi_miso);
- udelay(1);
- gpio_set_value(spi_sclk, 1); /* clk high */
- bits |= dbit;
- bnum--;
- }
-
- *data = bits;
-
- udelay(1);
- gpio_set_value(spi_cs, 0); /* cs low */
- udelay(1);
-}
-
-static void spi_pin_assign(void)
-{
- /* Setting the Default GPIO's */
- spi_sclk = *(lcdc_toshiba_pdata->gpio_num);
- spi_cs = *(lcdc_toshiba_pdata->gpio_num + 1);
- spi_mosi = *(lcdc_toshiba_pdata->gpio_num + 2);
- spi_miso = *(lcdc_toshiba_pdata->gpio_num + 3);
-}
-
-static void toshiba_disp_powerup(void)
-{
- if (!toshiba_state.disp_powered_up && !toshiba_state.display_on) {
- /* Reset the hardware first */
- /* Include DAC power up implementation here */
- toshiba_state.disp_powered_up = TRUE;
- }
-}
-
-static void toshiba_disp_on(void)
-{
- uint32 data;
-
- gpio_set_value(spi_cs, 0); /* low */
- gpio_set_value(spi_sclk, 1); /* high */
- gpio_set_value(spi_mosi, 0);
- gpio_set_value(spi_miso, 0);
-
- if (toshiba_state.disp_powered_up && !toshiba_state.display_on) {
- toshiba_spi_write(0, 0, 0);
- mdelay(7);
- toshiba_spi_write(0, 0, 0);
- mdelay(7);
- toshiba_spi_write(0, 0, 0);
- mdelay(7);
- toshiba_spi_write(0xba, 0x11, 1);
- toshiba_spi_write(0x36, 0x00, 1);
- mdelay(1);
- toshiba_spi_write(0x3a, 0x60, 1);
- toshiba_spi_write(0xb1, 0x5d, 1);
- mdelay(1);
- toshiba_spi_write(0xb2, 0x33, 1);
- toshiba_spi_write(0xb3, 0x22, 1);
- mdelay(1);
- toshiba_spi_write(0xb4, 0x02, 1);
- toshiba_spi_write(0xb5, 0x1e, 1); /* vcs -- adjust brightness */
- mdelay(1);
- toshiba_spi_write(0xb6, 0x27, 1);
- toshiba_spi_write(0xb7, 0x03, 1);
- mdelay(1);
- toshiba_spi_write(0xb9, 0x24, 1);
- toshiba_spi_write(0xbd, 0xa1, 1);
- mdelay(1);
- toshiba_spi_write(0xbb, 0x00, 1);
- toshiba_spi_write(0xbf, 0x01, 1);
- mdelay(1);
- toshiba_spi_write(0xbe, 0x00, 1);
- toshiba_spi_write(0xc0, 0x11, 1);
- mdelay(1);
- toshiba_spi_write(0xc1, 0x11, 1);
- toshiba_spi_write(0xc2, 0x11, 1);
- mdelay(1);
- toshiba_spi_write(0xc3, 0x3232, 2);
- mdelay(1);
- toshiba_spi_write(0xc4, 0x3232, 2);
- mdelay(1);
- toshiba_spi_write(0xc5, 0x3232, 2);
- mdelay(1);
- toshiba_spi_write(0xc6, 0x3232, 2);
- mdelay(1);
- toshiba_spi_write(0xc7, 0x6445, 2);
- mdelay(1);
- toshiba_spi_write(0xc8, 0x44, 1);
- toshiba_spi_write(0xc9, 0x52, 1);
- mdelay(1);
- toshiba_spi_write(0xca, 0x00, 1);
- mdelay(1);
- toshiba_spi_write(0xec, 0x02a4, 2); /* 0x02a4 */
- mdelay(1);
- toshiba_spi_write(0xcf, 0x01, 1);
- mdelay(1);
- toshiba_spi_write(0xd0, 0xc003, 2); /* c003 */
- mdelay(1);
- toshiba_spi_write(0xd1, 0x01, 1);
- mdelay(1);
- toshiba_spi_write(0xd2, 0x0028, 2);
- mdelay(1);
- toshiba_spi_write(0xd3, 0x0028, 2);
- mdelay(1);
- toshiba_spi_write(0xd4, 0x26a4, 2);
- mdelay(1);
- toshiba_spi_write(0xd5, 0x20, 1);
- mdelay(1);
- toshiba_spi_write(0xef, 0x3200, 2);
- mdelay(32);
- toshiba_spi_write(0xbc, 0x80, 1); /* wvga pass through */
- toshiba_spi_write(0x3b, 0x00, 1);
- mdelay(1);
- toshiba_spi_write(0xb0, 0x16, 1);
- mdelay(1);
- toshiba_spi_write(0xb8, 0xfff5, 2);
- mdelay(1);
- toshiba_spi_write(0x11, 0, 0);
- mdelay(5);
- toshiba_spi_write(0x29, 0, 0);
- mdelay(5);
- toshiba_state.display_on = TRUE;
- }
-
- data = 0;
- toshiba_spi_read_bytes(0x04, &data, 3);
- printk(KERN_INFO "toshiba_disp_on: id=%x\n", data);
-
-}
-
-static int lcdc_toshiba_panel_on(struct platform_device *pdev)
-{
- if (!toshiba_state.disp_initialized) {
- /* Configure reset GPIO that drives DAC */
- if (lcdc_toshiba_pdata->panel_config_gpio)
- lcdc_toshiba_pdata->panel_config_gpio(1);
- toshiba_disp_powerup();
- toshiba_disp_on();
- toshiba_state.disp_initialized = TRUE;
- }
- return 0;
-}
-
-static int lcdc_toshiba_panel_off(struct platform_device *pdev)
-{
- if (toshiba_state.disp_powered_up && toshiba_state.display_on) {
- /* Main panel power off (Deep standby in) */
-
- toshiba_spi_write(0x28, 0, 0); /* display off */
- mdelay(1);
- toshiba_spi_write(0xb8, 0x8002, 2); /* output control */
- mdelay(1);
- toshiba_spi_write(0x10, 0x00, 1); /* sleep mode in */
- mdelay(85); /* wait 85 msec */
- toshiba_spi_write(0xb0, 0x00, 1); /* deep standby in */
- mdelay(1);
- if (lcdc_toshiba_pdata->panel_config_gpio)
- lcdc_toshiba_pdata->panel_config_gpio(0);
- toshiba_state.display_on = FALSE;
- toshiba_state.disp_initialized = FALSE;
- }
- return 0;
-}
-
-static void lcdc_toshiba_set_backlight(struct msm_fb_data_type *mfd)
-{
- int bl_level;
- int ret = -EPERM;
-
- bl_level = mfd->bl_level;
- ret = pmic_set_led_intensity(LED_LCD, bl_level);
-
- if (ret)
- printk(KERN_WARNING "%s: can't set lcd backlight!\n",
- __func__);
-}
-
-static int __init toshiba_probe(struct platform_device *pdev)
-{
- if (pdev->id == 0) {
- lcdc_toshiba_pdata = pdev->dev.platform_data;
- spi_pin_assign();
- return 0;
- }
- msm_fb_add_device(pdev);
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = toshiba_probe,
- .driver = {
- .name = "lcdc_toshiba_wvga",
- },
-};
-
-static struct msm_fb_panel_data toshiba_panel_data = {
- .on = lcdc_toshiba_panel_on,
- .off = lcdc_toshiba_panel_off,
- .set_backlight = lcdc_toshiba_set_backlight,
-};
-
-static struct platform_device this_device = {
- .name = "lcdc_toshiba_wvga",
- .id = 1,
- .dev = {
- .platform_data = &toshiba_panel_data,
- }
-};
-
-static int __init lcdc_toshiba_panel_init(void)
-{
- int ret;
- struct msm_panel_info *pinfo;
-#ifdef CONFIG_FB_MSM_TRY_MDDI_CATCH_LCDC_PRISM
- if (mddi_get_client_id() != 0)
- return 0;
-
- ret = msm_fb_detect_client("lcdc_toshiba_wvga_pt");
- if (ret)
- return 0;
-
-#endif
-
- ret = platform_driver_register(&this_driver);
- if (ret)
- return ret;
-
- pinfo = &toshiba_panel_data.panel_info;
- pinfo->xres = 480;
- pinfo->yres = 800;
- pinfo->type = LCDC_PANEL;
- pinfo->pdest = DISPLAY_1;
- pinfo->wait_cycle = 0;
- pinfo->bpp = 18;
- pinfo->fb_num = 2;
- /* 30Mhz mdp_lcdc_pclk and mdp_lcdc_pad_pcl */
- pinfo->clk_rate = 27648000;
- pinfo->bl_max = 15;
- pinfo->bl_min = 1;
-
- pinfo->lcdc.h_back_porch = 184; /* hsw = 8 + hbp=184 */
- pinfo->lcdc.h_front_porch = 4;
- pinfo->lcdc.h_pulse_width = 8;
- pinfo->lcdc.v_back_porch = 2; /* vsw=1 + vbp = 2 */
- pinfo->lcdc.v_front_porch = 3;
- pinfo->lcdc.v_pulse_width = 1;
- pinfo->lcdc.border_clr = 0; /* blk */
- pinfo->lcdc.underflow_clr = 0xff; /* blue */
- pinfo->lcdc.hsync_skew = 0;
-
- ret = platform_device_register(&this_device);
- if (ret)
- platform_driver_unregister(&this_driver);
-
- return ret;
-}
-
-device_initcall(lcdc_toshiba_panel_init);
diff --git a/drivers/staging/msm/logo.c b/drivers/staging/msm/logo.c
deleted file mode 100644
index 7272765f48c..00000000000
--- a/drivers/staging/msm/logo.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/* drivers/video/msm/logo.c
- *
- * Show Logo in RLE 565 format
- *
- * Copyright (C) 2008 Google Incorporated
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/fb.h>
-#include <linux/vt_kern.h>
-#include <linux/unistd.h>
-#include <linux/syscalls.h>
-
-#include <linux/irq.h>
-#include <asm/system.h>
-
-#define fb_width(fb) ((fb)->var.xres)
-#define fb_height(fb) ((fb)->var.yres)
-#define fb_size(fb) ((fb)->var.xres * (fb)->var.yres * 2)
-
-static void memset16(void *_ptr, unsigned short val, unsigned count)
-{
- unsigned short *ptr = _ptr;
- count >>= 1;
- while (count--)
- *ptr++ = val;
-}
-
-/* 565RLE image format: [count(2 bytes), rle(2 bytes)] */
-int load_565rle_image(char *filename)
-{
- struct fb_info *info;
- int fd, err = 0;
- unsigned count, max;
- unsigned short *data, *bits, *ptr;
-
- info = registered_fb[0];
- if (!info) {
- printk(KERN_WARNING "%s: Can not access framebuffer\n",
- __func__);
- return -ENODEV;
- }
-
- fd = sys_open(filename, O_RDONLY, 0);
- if (fd < 0) {
- printk(KERN_WARNING "%s: Can not open %s\n",
- __func__, filename);
- return -ENOENT;
- }
- count = (unsigned)sys_lseek(fd, (off_t)0, 2);
- if (count == 0) {
- sys_close(fd);
- err = -EIO;
- goto err_logo_close_file;
- }
- sys_lseek(fd, (off_t)0, 0);
- data = kmalloc(count, GFP_KERNEL);
- if (!data) {
- printk(KERN_WARNING "%s: Can not alloc data\n", __func__);
- err = -ENOMEM;
- goto err_logo_close_file;
- }
- if ((unsigned)sys_read(fd, (char *)data, count) != count) {
- err = -EIO;
- goto err_logo_free_data;
- }
-
- max = fb_width(info) * fb_height(info);
- ptr = data;
- bits = (unsigned short *)(info->screen_base);
- while (count > 3) {
- unsigned n = ptr[0];
- if (n > max)
- break;
- memset16(bits, ptr[1], n << 1);
- bits += n;
- max -= n;
- ptr += 2;
- count -= 4;
- }
-
-err_logo_free_data:
- kfree(data);
-err_logo_close_file:
- sys_close(fd);
- return err;
-}
-EXPORT_SYMBOL(load_565rle_image);
diff --git a/drivers/staging/msm/mddi.c b/drivers/staging/msm/mddi.c
deleted file mode 100644
index 132eb1adff1..00000000000
--- a/drivers/staging/msm/mddi.c
+++ /dev/null
@@ -1,375 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <mach/hardware.h>
-#include <asm/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/uaccess.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-
-#include "msm_fb.h"
-#include "mddihosti.h"
-#include "mddihost.h"
-#include <mach/gpio.h>
-#include <mach/clk.h>
-
-static int mddi_probe(struct platform_device *pdev);
-static int mddi_remove(struct platform_device *pdev);
-
-static int mddi_off(struct platform_device *pdev);
-static int mddi_on(struct platform_device *pdev);
-
-static int mddi_suspend(struct platform_device *pdev, pm_message_t state);
-static int mddi_resume(struct platform_device *pdev);
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static void mddi_early_suspend(struct early_suspend *h);
-static void mddi_early_resume(struct early_suspend *h);
-#endif
-
-static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
-static int pdev_list_cnt;
-static struct clk *mddi_clk;
-static struct clk *mddi_pclk;
-static struct mddi_platform_data *mddi_pdata;
-
-static struct platform_driver mddi_driver = {
- .probe = mddi_probe,
- .remove = mddi_remove,
-#ifndef CONFIG_HAS_EARLYSUSPEND
-#ifdef CONFIG_PM
- .suspend = mddi_suspend,
- .resume = mddi_resume,
-#endif
-#endif
- .suspend_late = NULL,
- .resume_early = NULL,
- .shutdown = NULL,
- .driver = {
- .name = "mddi",
- },
-};
-
-extern int int_mddi_pri_flag;
-
-static int mddi_off(struct platform_device *pdev)
-{
- int ret = 0;
-
- ret = panel_next_off(pdev);
-
- if (mddi_pdata && mddi_pdata->mddi_power_save)
- mddi_pdata->mddi_power_save(0);
-
- return ret;
-}
-
-static int mddi_on(struct platform_device *pdev)
-{
- int ret = 0;
- u32 clk_rate;
- struct msm_fb_data_type *mfd;
-
- mfd = platform_get_drvdata(pdev);
-
- if (mddi_pdata && mddi_pdata->mddi_power_save)
- mddi_pdata->mddi_power_save(1);
-
- clk_rate = mfd->fbi->var.pixclock;
- clk_rate = min(clk_rate, mfd->panel_info.clk_max);
-
- if (mddi_pdata &&
- mddi_pdata->mddi_sel_clk &&
- mddi_pdata->mddi_sel_clk(&clk_rate))
- printk(KERN_ERR
- "%s: can't select mddi io clk targate rate = %d\n",
- __func__, clk_rate);
-
- if (clk_set_min_rate(mddi_clk, clk_rate) < 0)
- printk(KERN_ERR "%s: clk_set_min_rate failed\n",
- __func__);
-
- ret = panel_next_on(pdev);
-
- return ret;
-}
-
-static int mddi_resource_initialized;
-
-static int mddi_probe(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
- struct platform_device *mdp_dev = NULL;
- struct msm_fb_panel_data *pdata = NULL;
- int rc;
- resource_size_t size ;
- u32 clk_rate;
-
- if ((pdev->id == 0) && (pdev->num_resources >= 0)) {
- mddi_pdata = pdev->dev.platform_data;
-
- size = resource_size(&pdev->resource[0]);
- msm_pmdh_base = ioremap(pdev->resource[0].start, size);
-
- MSM_FB_INFO("primary mddi base phy_addr = 0x%x virt = 0x%x\n",
- pdev->resource[0].start, (int) msm_pmdh_base);
-
- if (unlikely(!msm_pmdh_base))
- return -ENOMEM;
-
- if (mddi_pdata && mddi_pdata->mddi_power_save)
- mddi_pdata->mddi_power_save(1);
-
- mddi_resource_initialized = 1;
- return 0;
- }
-
- if (!mddi_resource_initialized)
- return -EPERM;
-
- mfd = platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
- return -ENOMEM;
-
- mdp_dev = platform_device_alloc("mdp", pdev->id);
- if (!mdp_dev)
- return -ENOMEM;
-
- /*
- * link to the latest pdev
- */
- mfd->pdev = mdp_dev;
- mfd->dest = DISPLAY_LCD;
-
- /*
- * alloc panel device data
- */
- if (platform_device_add_data
- (mdp_dev, pdev->dev.platform_data,
- sizeof(struct msm_fb_panel_data))) {
- printk(KERN_ERR "mddi_probe: platform_device_add_data failed!\n");
- platform_device_put(mdp_dev);
- return -ENOMEM;
- }
- /*
- * data chain
- */
- pdata = mdp_dev->dev.platform_data;
- pdata->on = mddi_on;
- pdata->off = mddi_off;
- pdata->next = pdev;
-
- /*
- * get/set panel specific fb info
- */
- mfd->panel_info = pdata->panel_info;
- mfd->fb_imgType = MDP_RGB_565;
-
- clk_rate = mfd->panel_info.clk_max;
- if (mddi_pdata &&
- mddi_pdata->mddi_sel_clk &&
- mddi_pdata->mddi_sel_clk(&clk_rate))
- printk(KERN_ERR
- "%s: can't select mddi io clk targate rate = %d\n",
- __func__, clk_rate);
-
- if (clk_set_max_rate(mddi_clk, clk_rate) < 0)
- printk(KERN_ERR "%s: clk_set_max_rate failed\n", __func__);
- mfd->panel_info.clk_rate = mfd->panel_info.clk_min;
-
- /*
- * set driver data
- */
- platform_set_drvdata(mdp_dev, mfd);
-
- /*
- * register in mdp driver
- */
- rc = platform_device_add(mdp_dev);
- if (rc)
- goto mddi_probe_err;
-
- pdev_list[pdev_list_cnt++] = pdev;
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
- mfd->mddi_early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
- mfd->mddi_early_suspend.suspend = mddi_early_suspend;
- mfd->mddi_early_suspend.resume = mddi_early_resume;
- register_early_suspend(&mfd->mddi_early_suspend);
-#endif
-
- return 0;
-
-mddi_probe_err:
- platform_device_put(mdp_dev);
- return rc;
-}
-
-static int mddi_pad_ctrl;
-static int mddi_power_locked;
-static int mddi_is_in_suspend;
-
-void mddi_disable(int lock)
-{
- mddi_host_type host_idx = MDDI_HOST_PRIM;
-
- if (mddi_power_locked)
- return;
-
- if (lock)
- mddi_power_locked = 1;
-
- if (mddi_host_timer.function)
- del_timer_sync(&mddi_host_timer);
-
- mddi_pad_ctrl = mddi_host_reg_in(PAD_CTL);
- mddi_host_reg_out(PAD_CTL, 0x0);
-
- if (clk_set_min_rate(mddi_clk, 0) < 0)
- printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__);
-
- clk_disable(mddi_clk);
- if (mddi_pclk)
- clk_disable(mddi_pclk);
- disable_irq(INT_MDDI_PRI);
-
- if (mddi_pdata && mddi_pdata->mddi_power_save)
- mddi_pdata->mddi_power_save(0);
-}
-
-static int mddi_suspend(struct platform_device *pdev, pm_message_t state)
-{
- if (mddi_is_in_suspend)
- return 0;
-
- mddi_is_in_suspend = 1;
- mddi_disable(0);
- return 0;
-}
-
-static int mddi_resume(struct platform_device *pdev)
-{
- mddi_host_type host_idx = MDDI_HOST_PRIM;
-
- if (!mddi_is_in_suspend)
- return 0;
-
- mddi_is_in_suspend = 0;
-
- if (mddi_power_locked)
- return 0;
-
- enable_irq(INT_MDDI_PRI);
- clk_enable(mddi_clk);
- if (mddi_pclk)
- clk_enable(mddi_pclk);
- mddi_host_reg_out(PAD_CTL, mddi_pad_ctrl);
-
- if (mddi_host_timer.function)
- mddi_host_timer_service(0);
-
- return 0;
-}
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static void mddi_early_suspend(struct early_suspend *h)
-{
- pm_message_t state;
- struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
- mddi_early_suspend);
-
- state.event = PM_EVENT_SUSPEND;
- mddi_suspend(mfd->pdev, state);
-}
-
-static void mddi_early_resume(struct early_suspend *h)
-{
- struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
- mddi_early_suspend);
- mddi_resume(mfd->pdev);
-}
-#endif
-
-static int mddi_remove(struct platform_device *pdev)
-{
- if (mddi_host_timer.function)
- del_timer_sync(&mddi_host_timer);
-
- iounmap(msm_pmdh_base);
-
- return 0;
-}
-
-static int mddi_register_driver(void)
-{
- return platform_driver_register(&mddi_driver);
-}
-
-static int __init mddi_driver_init(void)
-{
- int ret;
-
- mddi_clk = clk_get(NULL, "mddi_clk");
- if (IS_ERR(mddi_clk)) {
- printk(KERN_ERR "can't find mddi_clk \n");
- return PTR_ERR(mddi_clk);
- }
- clk_enable(mddi_clk);
-
- mddi_pclk = clk_get(NULL, "mddi_pclk");
- if (IS_ERR(mddi_pclk))
- mddi_pclk = NULL;
- else
- clk_enable(mddi_pclk);
-
- ret = mddi_register_driver();
- if (ret) {
- clk_disable(mddi_clk);
- clk_put(mddi_clk);
- if (mddi_pclk) {
- clk_disable(mddi_pclk);
- clk_put(mddi_pclk);
- }
- printk(KERN_ERR "mddi_register_driver() failed!\n");
- return ret;
- }
-
- mddi_init();
-
- return ret;
-}
-
-module_init(mddi_driver_init);
diff --git a/drivers/staging/msm/mddi_ext.c b/drivers/staging/msm/mddi_ext.c
deleted file mode 100644
index c0c168c7199..00000000000
--- a/drivers/staging/msm/mddi_ext.c
+++ /dev/null
@@ -1,320 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <mach/hardware.h>
-#include <asm/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/uaccess.h>
-#include <linux/clk.h>
-#include <mach/clk.h>
-#include <linux/platform_device.h>
-
-#include "msm_fb.h"
-#include "mddihosti.h"
-
-static int mddi_ext_probe(struct platform_device *pdev);
-static int mddi_ext_remove(struct platform_device *pdev);
-
-static int mddi_ext_off(struct platform_device *pdev);
-static int mddi_ext_on(struct platform_device *pdev);
-
-static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
-static int pdev_list_cnt;
-
-static int mddi_ext_suspend(struct platform_device *pdev, pm_message_t state);
-static int mddi_ext_resume(struct platform_device *pdev);
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static void mddi_ext_early_suspend(struct early_suspend *h);
-static void mddi_ext_early_resume(struct early_suspend *h);
-#endif
-
-static struct platform_driver mddi_ext_driver = {
- .probe = mddi_ext_probe,
- .remove = mddi_ext_remove,
-#ifndef CONFIG_HAS_EARLYSUSPEND
-#ifdef CONFIG_PM
- .suspend = mddi_ext_suspend,
- .resume = mddi_ext_resume,
-#endif
-#endif
- .resume_early = NULL,
- .resume = NULL,
- .shutdown = NULL,
- .driver = {
- .name = "mddi_ext",
- },
-};
-
-static struct clk *mddi_ext_clk;
-static struct mddi_platform_data *mddi_ext_pdata;
-
-extern int int_mddi_ext_flag;
-
-static int mddi_ext_off(struct platform_device *pdev)
-{
- int ret = 0;
-
- ret = panel_next_off(pdev);
- mddi_host_stop_ext_display();
-
- return ret;
-}
-
-static int mddi_ext_on(struct platform_device *pdev)
-{
- int ret = 0;
- u32 clk_rate;
- struct msm_fb_data_type *mfd;
-
- mfd = platform_get_drvdata(pdev);
-
- clk_rate = mfd->fbi->var.pixclock;
- clk_rate = min(clk_rate, mfd->panel_info.clk_max);
-
- if (mddi_ext_pdata &&
- mddi_ext_pdata->mddi_sel_clk &&
- mddi_ext_pdata->mddi_sel_clk(&clk_rate))
- printk(KERN_ERR
- "%s: can't select mddi io clk targate rate = %d\n",
- __func__, clk_rate);
-
- if (clk_set_min_rate(mddi_ext_clk, clk_rate) < 0)
- printk(KERN_ERR "%s: clk_set_min_rate failed\n",
- __func__);
-
- mddi_host_start_ext_display();
- ret = panel_next_on(pdev);
-
- return ret;
-}
-
-static int mddi_ext_resource_initialized;
-
-static int mddi_ext_probe(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
- struct platform_device *mdp_dev = NULL;
- struct msm_fb_panel_data *pdata = NULL;
- int rc;
- resource_size_t size ;
- u32 clk_rate;
-
- if ((pdev->id == 0) && (pdev->num_resources >= 0)) {
- mddi_ext_pdata = pdev->dev.platform_data;
-
- size = resource_size(&pdev->resource[0]);
- msm_emdh_base = ioremap(pdev->resource[0].start, size);
-
- MSM_FB_INFO("external mddi base address = 0x%x\n",
- pdev->resource[0].start);
-
- if (unlikely(!msm_emdh_base))
- return -ENOMEM;
-
- mddi_ext_resource_initialized = 1;
- return 0;
- }
-
- if (!mddi_ext_resource_initialized)
- return -EPERM;
-
- mfd = platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
- return -ENOMEM;
-
- mdp_dev = platform_device_alloc("mdp", pdev->id);
- if (!mdp_dev)
- return -ENOMEM;
-
- /*
- * link to the latest pdev
- */
- mfd->pdev = mdp_dev;
- mfd->dest = DISPLAY_EXT_MDDI;
-
- /*
- * alloc panel device data
- */
- if (platform_device_add_data
- (mdp_dev, pdev->dev.platform_data,
- sizeof(struct msm_fb_panel_data))) {
- printk(KERN_ERR "mddi_ext_probe: platform_device_add_data failed!\n");
- platform_device_put(mdp_dev);
- return -ENOMEM;
- }
- /*
- * data chain
- */
- pdata = mdp_dev->dev.platform_data;
- pdata->on = mddi_ext_on;
- pdata->off = mddi_ext_off;
- pdata->next = pdev;
-
- /*
- * get/set panel specific fb info
- */
- mfd->panel_info = pdata->panel_info;
- mfd->fb_imgType = MDP_RGB_565;
-
- clk_rate = mfd->panel_info.clk_max;
- if (mddi_ext_pdata &&
- mddi_ext_pdata->mddi_sel_clk &&
- mddi_ext_pdata->mddi_sel_clk(&clk_rate))
- printk(KERN_ERR
- "%s: can't select mddi io clk targate rate = %d\n",
- __func__, clk_rate);
-
- if (clk_set_max_rate(mddi_ext_clk, clk_rate) < 0)
- printk(KERN_ERR "%s: clk_set_max_rate failed\n", __func__);
- mfd->panel_info.clk_rate = mfd->panel_info.clk_min;
-
- /*
- * set driver data
- */
- platform_set_drvdata(mdp_dev, mfd);
-
- /*
- * register in mdp driver
- */
- rc = platform_device_add(mdp_dev);
- if (rc)
- goto mddi_ext_probe_err;
-
- pdev_list[pdev_list_cnt++] = pdev;
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
- mfd->mddi_ext_early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
- mfd->mddi_ext_early_suspend.suspend = mddi_ext_early_suspend;
- mfd->mddi_ext_early_suspend.resume = mddi_ext_early_resume;
- register_early_suspend(&mfd->mddi_ext_early_suspend);
-#endif
-
- return 0;
-
-mddi_ext_probe_err:
- platform_device_put(mdp_dev);
- return rc;
-}
-
-static int mddi_ext_is_in_suspend;
-
-static int mddi_ext_suspend(struct platform_device *pdev, pm_message_t state)
-{
- if (mddi_ext_is_in_suspend)
- return 0;
-
- mddi_ext_is_in_suspend = 1;
-
- if (clk_set_min_rate(mddi_ext_clk, 0) < 0)
- printk(KERN_ERR "%s: clk_set_min_rate failed\n", __func__);
-
- clk_disable(mddi_ext_clk);
- disable_irq(INT_MDDI_EXT);
-
- return 0;
-}
-
-static int mddi_ext_resume(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
-
- mfd = platform_get_drvdata(pdev);
-
- if (!mddi_ext_is_in_suspend)
- return 0;
-
- mddi_ext_is_in_suspend = 0;
- enable_irq(INT_MDDI_EXT);
-
- clk_enable(mddi_ext_clk);
-
- return 0;
-}
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static void mddi_ext_early_suspend(struct early_suspend *h)
-{
- pm_message_t state;
- struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
- mddi_ext_early_suspend);
-
- state.event = PM_EVENT_SUSPEND;
- mddi_ext_suspend(mfd->pdev, state);
-}
-
-static void mddi_ext_early_resume(struct early_suspend *h)
-{
- struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
- mddi_ext_early_suspend);
- mddi_ext_resume(mfd->pdev);
-}
-#endif
-
-static int mddi_ext_remove(struct platform_device *pdev)
-{
- iounmap(msm_emdh_base);
- return 0;
-}
-
-static int mddi_ext_register_driver(void)
-{
- return platform_driver_register(&mddi_ext_driver);
-}
-
-static int __init mddi_ext_driver_init(void)
-{
- int ret;
-
- mddi_ext_clk = clk_get(NULL, "emdh_clk");
- if (IS_ERR(mddi_ext_clk)) {
- printk(KERN_ERR "can't find emdh_clk\n");
- return PTR_ERR(mddi_ext_clk);
- }
- clk_enable(mddi_ext_clk);
-
- ret = mddi_ext_register_driver();
- if (ret) {
- clk_disable(mddi_ext_clk);
- clk_put(mddi_ext_clk);
- printk(KERN_ERR "mddi_ext_register_driver() failed!\n");
- return ret;
- }
- mddi_init();
-
- return ret;
-}
-
-module_init(mddi_ext_driver_init);
diff --git a/drivers/staging/msm/mddi_ext_lcd.c b/drivers/staging/msm/mddi_ext_lcd.c
deleted file mode 100644
index 502e80d17ec..00000000000
--- a/drivers/staging/msm/mddi_ext_lcd.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-#include "mddihost.h"
-#include "mddihosti.h"
-
-static int mddi_ext_lcd_on(struct platform_device *pdev);
-static int mddi_ext_lcd_off(struct platform_device *pdev);
-
-static int mddi_ext_lcd_on(struct platform_device *pdev)
-{
- return 0;
-}
-
-static int mddi_ext_lcd_off(struct platform_device *pdev)
-{
- return 0;
-}
-
-static int __init mddi_ext_lcd_probe(struct platform_device *pdev)
-{
- msm_fb_add_device(pdev);
-
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = mddi_ext_lcd_probe,
- .driver = {
- .name = "extmddi_svga",
- },
-};
-
-static struct msm_fb_panel_data mddi_ext_lcd_panel_data = {
- .panel_info.xres = 800,
- .panel_info.yres = 600,
- .panel_info.type = EXT_MDDI_PANEL,
- .panel_info.pdest = DISPLAY_1,
- .panel_info.wait_cycle = 0,
- .panel_info.bpp = 18,
- .panel_info.fb_num = 2,
- .panel_info.clk_rate = 122880000,
- .panel_info.clk_min = 120000000,
- .panel_info.clk_max = 125000000,
- .on = mddi_ext_lcd_on,
- .off = mddi_ext_lcd_off,
-};
-
-static struct platform_device this_device = {
- .name = "extmddi_svga",
- .id = 0,
- .dev = {
- .platform_data = &mddi_ext_lcd_panel_data,
- }
-};
-
-static int __init mddi_ext_lcd_init(void)
-{
- int ret;
- struct msm_panel_info *pinfo;
-
- ret = platform_driver_register(&this_driver);
- if (!ret) {
- pinfo = &mddi_ext_lcd_panel_data.panel_info;
- pinfo->lcd.vsync_enable = FALSE;
- pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
-
- ret = platform_device_register(&this_device);
- if (ret)
- platform_driver_unregister(&this_driver);
- }
-
- return ret;
-}
-
-module_init(mddi_ext_lcd_init);
diff --git a/drivers/staging/msm/mddi_prism.c b/drivers/staging/msm/mddi_prism.c
deleted file mode 100644
index 489d40405a5..00000000000
--- a/drivers/staging/msm/mddi_prism.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-#include "mddihost.h"
-#include "mddihosti.h"
-
-static int prism_lcd_on(struct platform_device *pdev);
-static int prism_lcd_off(struct platform_device *pdev);
-
-static int prism_lcd_on(struct platform_device *pdev)
-{
- /* Set the MDP pixel data attributes for Primary Display */
- mddi_host_write_pix_attr_reg(0x00C3);
-
- return 0;
-}
-
-static int prism_lcd_off(struct platform_device *pdev)
-{
- return 0;
-}
-
-static int __init prism_probe(struct platform_device *pdev)
-{
- msm_fb_add_device(pdev);
-
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = prism_probe,
- .driver = {
- .name = "mddi_prism_wvga",
- },
-};
-
-static struct msm_fb_panel_data prism_panel_data = {
- .on = prism_lcd_on,
- .off = prism_lcd_off,
-};
-
-static struct platform_device this_device = {
- .name = "mddi_prism_wvga",
- .id = 0,
- .dev = {
- .platform_data = &prism_panel_data,
- }
-};
-
-static int __init prism_init(void)
-{
- int ret;
- struct msm_panel_info *pinfo;
-
-#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
- u32 id;
-
- ret = msm_fb_detect_client("mddi_prism_wvga");
- if (ret == -ENODEV)
- return 0;
-
- if (ret) {
- id = mddi_get_client_id();
-
- if (((id >> 16) != 0x4474) || ((id & 0xffff) == 0x8960))
- return 0;
- }
-#endif
- ret = platform_driver_register(&this_driver);
- if (!ret) {
- pinfo = &prism_panel_data.panel_info;
- pinfo->xres = 800;
- pinfo->yres = 480;
- pinfo->type = MDDI_PANEL;
- pinfo->pdest = DISPLAY_1;
- pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
- pinfo->wait_cycle = 0;
- pinfo->bpp = 18;
- pinfo->fb_num = 2;
- pinfo->clk_rate = 153600000;
- pinfo->clk_min = 150000000;
- pinfo->clk_max = 160000000;
- pinfo->lcd.vsync_enable = TRUE;
- pinfo->lcd.refx100 = 6050;
- pinfo->lcd.v_back_porch = 23;
- pinfo->lcd.v_front_porch = 20;
- pinfo->lcd.v_pulse_width = 105;
- pinfo->lcd.hw_vsync_mode = TRUE;
- pinfo->lcd.vsync_notifier_period = 0;
-
- ret = platform_device_register(&this_device);
- if (ret)
- platform_driver_unregister(&this_driver);
- }
-
- return ret;
-}
-
-module_init(prism_init);
diff --git a/drivers/staging/msm/mddi_sharp.c b/drivers/staging/msm/mddi_sharp.c
deleted file mode 100644
index 1da1be4052d..00000000000
--- a/drivers/staging/msm/mddi_sharp.c
+++ /dev/null
@@ -1,892 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-#include "mddihost.h"
-#include "mddihosti.h"
-
-#define SHARP_QVGA_PRIM 1
-#define SHARP_128X128_SECD 2
-
-extern uint32 mddi_host_core_version;
-static boolean mddi_debug_prim_wait = FALSE;
-static boolean mddi_sharp_vsync_wake = TRUE;
-static boolean mddi_sharp_monitor_refresh_value = TRUE;
-static boolean mddi_sharp_report_refresh_measurements = FALSE;
-static uint32 mddi_sharp_rows_per_second = 13830; /* 5200000/376 */
-static uint32 mddi_sharp_rows_per_refresh = 338;
-static uint32 mddi_sharp_usecs_per_refresh = 24440; /* (376+338)/5200000 */
-static boolean mddi_sharp_debug_60hz_refresh = FALSE;
-
-extern mddi_gpio_info_type mddi_gpio;
-extern boolean mddi_vsync_detect_enabled;
-static msm_fb_vsync_handler_type mddi_sharp_vsync_handler;
-static void *mddi_sharp_vsync_handler_arg;
-static uint16 mddi_sharp_vsync_attempts;
-
-static void mddi_sharp_prim_lcd_init(void);
-static void mddi_sharp_sub_lcd_init(void);
-static void mddi_sharp_lcd_set_backlight(struct msm_fb_data_type *mfd);
-static void mddi_sharp_vsync_set_handler(msm_fb_vsync_handler_type handler,
- void *);
-static void mddi_sharp_lcd_vsync_detected(boolean detected);
-static struct msm_panel_common_pdata *mddi_sharp_pdata;
-
-#define REG_SYSCTL 0x0000
-#define REG_INTR 0x0006
-#define REG_CLKCNF 0x000C
-#define REG_CLKDIV1 0x000E
-#define REG_CLKDIV2 0x0010
-
-#define REG_GIOD 0x0040
-#define REG_GIOA 0x0042
-
-#define REG_AGM 0x010A
-#define REG_FLFT 0x0110
-#define REG_FRGT 0x0112
-#define REG_FTOP 0x0114
-#define REG_FBTM 0x0116
-#define REG_FSTRX 0x0118
-#define REG_FSTRY 0x011A
-#define REG_VRAM 0x0202
-#define REG_SSDCTL 0x0330
-#define REG_SSD0 0x0332
-#define REG_PSTCTL1 0x0400
-#define REG_PSTCTL2 0x0402
-#define REG_PTGCTL 0x042A
-#define REG_PTHP 0x042C
-#define REG_PTHB 0x042E
-#define REG_PTHW 0x0430
-#define REG_PTHF 0x0432
-#define REG_PTVP 0x0434
-#define REG_PTVB 0x0436
-#define REG_PTVW 0x0438
-#define REG_PTVF 0x043A
-#define REG_VBLKS 0x0458
-#define REG_VBLKE 0x045A
-#define REG_SUBCTL 0x0700
-#define REG_SUBTCMD 0x0702
-#define REG_SUBTCMDD 0x0704
-#define REG_REVBYTE 0x0A02
-#define REG_REVCNT 0x0A04
-#define REG_REVATTR 0x0A06
-#define REG_REVFMT 0x0A08
-
-#define SHARP_SUB_UNKNOWN 0xffffffff
-#define SHARP_SUB_HYNIX 1
-#define SHARP_SUB_ROHM 2
-
-static uint32 sharp_subpanel_type = SHARP_SUB_UNKNOWN;
-
-static void sub_through_write(int sub_rs, uint32 sub_data)
-{
- mddi_queue_register_write(REG_SUBTCMDD, sub_data, FALSE, 0);
-
- /* CS=1,RD=1,WE=1,RS=sub_rs */
- mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, FALSE, 0);
-
- /* CS=0,RD=1,WE=1,RS=sub_rs */
- mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0);
-
- /* CS=0,RD=1,WE=0,RS=sub_rs */
- mddi_queue_register_write(REG_SUBTCMD, 0x0004 | sub_rs, FALSE, 0);
-
- /* CS=0,RD=1,WE=1,RS=sub_rs */
- mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0);
-
- /* CS=1,RD=1,WE=1,RS=sub_rs */
- mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, TRUE, 0);
-}
-
-static uint32 sub_through_read(int sub_rs)
-{
- uint32 sub_data;
-
- /* CS=1,RD=1,WE=1,RS=sub_rs */
- mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, FALSE, 0);
-
- /* CS=0,RD=1,WE=1,RS=sub_rs */
- mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0);
-
- /* CS=0,RD=1,WE=0,RS=sub_rs */
- mddi_queue_register_write(REG_SUBTCMD, 0x0002 | sub_rs, TRUE, 0);
-
- mddi_queue_register_read(REG_SUBTCMDD, &sub_data, TRUE, 0);
-
- /* CS=0,RD=1,WE=1,RS=sub_rs */
- mddi_queue_register_write(REG_SUBTCMD, 0x0006 | sub_rs, FALSE, 0);
-
- /* CS=1,RD=1,WE=1,RS=sub_rs */
- mddi_queue_register_write(REG_SUBTCMD, 0x000e | sub_rs, TRUE, 0);
-
- return sub_data;
-}
-
-static void serigo(uint32 ssd)
-{
- uint32 ssdctl;
-
- mddi_queue_register_read(REG_SSDCTL, &ssdctl, TRUE, 0);
- ssdctl = ((ssdctl & 0xE7) | 0x02);
-
- mddi_queue_register_write(REG_SSD0, ssd, FALSE, 0);
- mddi_queue_register_write(REG_SSDCTL, ssdctl, TRUE, 0);
-
- do {
- mddi_queue_register_read(REG_SSDCTL, &ssdctl, TRUE, 0);
- } while ((ssdctl & 0x0002) != 0);
-
- if (mddi_debug_prim_wait)
- mddi_wait(2);
-}
-
-static void mddi_sharp_lcd_powerdown(void)
-{
- serigo(0x0131);
- serigo(0x0300);
- mddi_wait(40);
- serigo(0x0135);
- mddi_wait(20);
- serigo(0x2122);
- mddi_wait(20);
- serigo(0x0201);
- mddi_wait(20);
- serigo(0x2100);
- mddi_wait(20);
- serigo(0x2000);
- mddi_wait(20);
-
- mddi_queue_register_write(REG_PSTCTL1, 0x1, TRUE, 0);
- mddi_wait(100);
- mddi_queue_register_write(REG_PSTCTL1, 0x0, TRUE, 0);
- mddi_wait(2);
- mddi_queue_register_write(REG_SYSCTL, 0x1, TRUE, 0);
- mddi_wait(2);
- mddi_queue_register_write(REG_CLKDIV1, 0x3, TRUE, 0);
- mddi_wait(2);
- mddi_queue_register_write(REG_SSDCTL, 0x0000, TRUE, 0); /* SSDRESET */
- mddi_queue_register_write(REG_SYSCTL, 0x0, TRUE, 0);
- mddi_wait(2);
-}
-
-static void mddi_sharp_lcd_set_backlight(struct msm_fb_data_type *mfd)
-{
- uint32 regdata;
- int32 level;
- int max = mfd->panel_info.bl_max;
- int min = mfd->panel_info.bl_min;
-
- if (mddi_sharp_pdata && mddi_sharp_pdata->backlight_level) {
- level = mddi_sharp_pdata->backlight_level(mfd->bl_level,
- max,
- min);
-
- if (level < 0)
- return;
-
- /* use Rodem GPIO(2:0) to give 8 levels of backlight (7-0) */
- /* Set lower 3 GPIOs as Outputs (set to 0) */
- mddi_queue_register_read(REG_GIOA, &regdata, TRUE, 0);
- mddi_queue_register_write(REG_GIOA, regdata & 0xfff8, TRUE, 0);
-
- /* Set lower 3 GPIOs as level */
- mddi_queue_register_read(REG_GIOD, &regdata, TRUE, 0);
- mddi_queue_register_write(REG_GIOD,
- (regdata & 0xfff8) | (0x07 & level), TRUE, 0);
- }
-}
-
-static void mddi_sharp_prim_lcd_init(void)
-{
- mddi_queue_register_write(REG_SYSCTL, 0x4000, TRUE, 0);
- mddi_wait(1);
- mddi_queue_register_write(REG_SYSCTL, 0x0000, TRUE, 0);
- mddi_wait(5);
- mddi_queue_register_write(REG_SYSCTL, 0x0001, FALSE, 0);
- mddi_queue_register_write(REG_CLKDIV1, 0x000b, FALSE, 0);
-
- /* new reg write below */
- if (mddi_sharp_debug_60hz_refresh)
- mddi_queue_register_write(REG_CLKCNF, 0x070d, FALSE, 0);
- else
- mddi_queue_register_write(REG_CLKCNF, 0x0708, FALSE, 0);
-
- mddi_queue_register_write(REG_SYSCTL, 0x0201, FALSE, 0);
- mddi_queue_register_write(REG_PTGCTL, 0x0010, FALSE, 0);
- mddi_queue_register_write(REG_PTHP, 4, FALSE, 0);
- mddi_queue_register_write(REG_PTHB, 40, FALSE, 0);
- mddi_queue_register_write(REG_PTHW, 240, FALSE, 0);
- if (mddi_sharp_debug_60hz_refresh)
- mddi_queue_register_write(REG_PTHF, 12, FALSE, 0);
- else
- mddi_queue_register_write(REG_PTHF, 92, FALSE, 0);
-
- mddi_wait(1);
-
- mddi_queue_register_write(REG_PTVP, 1, FALSE, 0);
- mddi_queue_register_write(REG_PTVB, 2, FALSE, 0);
- mddi_queue_register_write(REG_PTVW, 320, FALSE, 0);
- mddi_queue_register_write(REG_PTVF, 15, FALSE, 0);
-
- mddi_wait(1);
-
- /* vram_color set REG_AGM???? */
- mddi_queue_register_write(REG_AGM, 0x0000, TRUE, 0);
-
- mddi_queue_register_write(REG_SSDCTL, 0x0000, FALSE, 0);
- mddi_queue_register_write(REG_SSDCTL, 0x0001, TRUE, 0);
- mddi_wait(1);
- mddi_queue_register_write(REG_PSTCTL1, 0x0001, TRUE, 0);
- mddi_wait(10);
-
- serigo(0x0701);
- /* software reset */
- mddi_wait(1);
- /* Wait over 50us */
-
- serigo(0x0400);
- /* DCLK~ACHSYNC~ACVSYNC polarity setting */
- serigo(0x2900);
- /* EEPROM start read address setting */
- serigo(0x2606);
- /* EEPROM start read register setting */
- mddi_wait(20);
- /* Wait over 20ms */
-
- serigo(0x0503);
- /* Horizontal timing setting */
- serigo(0x062C);
- /* Veritical timing setting */
- serigo(0x2001);
- /* power initialize setting(VDC2) */
- mddi_wait(20);
- /* Wait over 20ms */
-
- serigo(0x2120);
- /* Initialize power setting(CPS) */
- mddi_wait(20);
- /* Wait over 20ms */
-
- serigo(0x2130);
- /* Initialize power setting(CPS) */
- mddi_wait(20);
- /* Wait over 20ms */
-
- serigo(0x2132);
- /* Initialize power setting(CPS) */
- mddi_wait(10);
- /* Wait over 10ms */
-
- serigo(0x2133);
- /* Initialize power setting(CPS) */
- mddi_wait(20);
- /* Wait over 20ms */
-
- serigo(0x0200);
- /* Panel initialize release(INIT) */
- mddi_wait(1);
- /* Wait over 1ms */
-
- serigo(0x0131);
- /* Panel setting(CPS) */
- mddi_wait(1);
- /* Wait over 1ms */
-
- mddi_queue_register_write(REG_PSTCTL1, 0x0003, TRUE, 0);
-
- /* if (FFA LCD is upside down) -> serigo(0x0100); */
- serigo(0x0130);
-
- /* Black mask release(display ON) */
- mddi_wait(1);
- /* Wait over 1ms */
-
- if (mddi_sharp_vsync_wake) {
- mddi_queue_register_write(REG_VBLKS, 0x1001, TRUE, 0);
- mddi_queue_register_write(REG_VBLKE, 0x1002, TRUE, 0);
- }
-
- /* Set the MDP pixel data attributes for Primary Display */
- mddi_host_write_pix_attr_reg(0x00C3);
- return;
-
-}
-
-void mddi_sharp_sub_lcd_init(void)
-{
-
- mddi_queue_register_write(REG_SYSCTL, 0x4000, FALSE, 0);
- mddi_queue_register_write(REG_SYSCTL, 0x0000, TRUE, 0);
- mddi_wait(100);
-
- mddi_queue_register_write(REG_SYSCTL, 0x0001, FALSE, 0);
- mddi_queue_register_write(REG_CLKDIV1, 0x000b, FALSE, 0);
- mddi_queue_register_write(REG_CLKCNF, 0x0708, FALSE, 0);
- mddi_queue_register_write(REG_SYSCTL, 0x0201, FALSE, 0);
- mddi_queue_register_write(REG_PTGCTL, 0x0010, FALSE, 0);
- mddi_queue_register_write(REG_PTHP, 4, FALSE, 0);
- mddi_queue_register_write(REG_PTHB, 40, FALSE, 0);
- mddi_queue_register_write(REG_PTHW, 128, FALSE, 0);
- mddi_queue_register_write(REG_PTHF, 92, FALSE, 0);
- mddi_queue_register_write(REG_PTVP, 1, FALSE, 0);
- mddi_queue_register_write(REG_PTVB, 2, FALSE, 0);
- mddi_queue_register_write(REG_PTVW, 128, FALSE, 0);
- mddi_queue_register_write(REG_PTVF, 15, FALSE, 0);
-
- /* Now the sub display..... */
- /* Reset High */
- mddi_queue_register_write(REG_SUBCTL, 0x0200, FALSE, 0);
- /* CS=1,RD=1,WE=1,RS=1 */
- mddi_queue_register_write(REG_SUBTCMD, 0x000f, TRUE, 0);
- mddi_wait(1);
- /* Wait 5us */
-
- if (sharp_subpanel_type == SHARP_SUB_UNKNOWN) {
- uint32 data;
-
- sub_through_write(1, 0x05);
- sub_through_write(1, 0x6A);
- sub_through_write(1, 0x1D);
- sub_through_write(1, 0x05);
- data = sub_through_read(1);
- if (data == 0x6A) {
- sharp_subpanel_type = SHARP_SUB_HYNIX;
- } else {
- sub_through_write(0, 0x36);
- sub_through_write(1, 0xA8);
- sub_through_write(0, 0x09);
- data = sub_through_read(1);
- data = sub_through_read(1);
- if (data == 0x54) {
- sub_through_write(0, 0x36);
- sub_through_write(1, 0x00);
- sharp_subpanel_type = SHARP_SUB_ROHM;
- }
- }
- }
-
- if (sharp_subpanel_type == SHARP_SUB_HYNIX) {
- sub_through_write(1, 0x00); /* Display setting 1 */
- sub_through_write(1, 0x04);
- sub_through_write(1, 0x01);
- sub_through_write(1, 0x05);
- sub_through_write(1, 0x0280);
- sub_through_write(1, 0x0301);
- sub_through_write(1, 0x0402);
- sub_through_write(1, 0x0500);
- sub_through_write(1, 0x0681);
- sub_through_write(1, 0x077F);
- sub_through_write(1, 0x08C0);
- sub_through_write(1, 0x0905);
- sub_through_write(1, 0x0A02);
- sub_through_write(1, 0x0B00);
- sub_through_write(1, 0x0C00);
- sub_through_write(1, 0x0D00);
- sub_through_write(1, 0x0E00);
- sub_through_write(1, 0x0F00);
-
- sub_through_write(1, 0x100B); /* Display setting 2 */
- sub_through_write(1, 0x1103);
- sub_through_write(1, 0x1237);
- sub_through_write(1, 0x1300);
- sub_through_write(1, 0x1400);
- sub_through_write(1, 0x1500);
- sub_through_write(1, 0x1605);
- sub_through_write(1, 0x1700);
- sub_through_write(1, 0x1800);
- sub_through_write(1, 0x192E);
- sub_through_write(1, 0x1A00);
- sub_through_write(1, 0x1B00);
- sub_through_write(1, 0x1C00);
-
- sub_through_write(1, 0x151A); /* Power setting */
-
- sub_through_write(1, 0x2002); /* Gradation Palette setting */
- sub_through_write(1, 0x2107);
- sub_through_write(1, 0x220C);
- sub_through_write(1, 0x2310);
- sub_through_write(1, 0x2414);
- sub_through_write(1, 0x2518);
- sub_through_write(1, 0x261C);
- sub_through_write(1, 0x2720);
- sub_through_write(1, 0x2824);
- sub_through_write(1, 0x2928);
- sub_through_write(1, 0x2A2B);
- sub_through_write(1, 0x2B2E);
- sub_through_write(1, 0x2C31);
- sub_through_write(1, 0x2D34);
- sub_through_write(1, 0x2E37);
- sub_through_write(1, 0x2F3A);
- sub_through_write(1, 0x303C);
- sub_through_write(1, 0x313E);
- sub_through_write(1, 0x323F);
- sub_through_write(1, 0x3340);
- sub_through_write(1, 0x3441);
- sub_through_write(1, 0x3543);
- sub_through_write(1, 0x3646);
- sub_through_write(1, 0x3749);
- sub_through_write(1, 0x384C);
- sub_through_write(1, 0x394F);
- sub_through_write(1, 0x3A52);
- sub_through_write(1, 0x3B59);
- sub_through_write(1, 0x3C60);
- sub_through_write(1, 0x3D67);
- sub_through_write(1, 0x3E6E);
- sub_through_write(1, 0x3F7F);
- sub_through_write(1, 0x4001);
- sub_through_write(1, 0x4107);
- sub_through_write(1, 0x420C);
- sub_through_write(1, 0x4310);
- sub_through_write(1, 0x4414);
- sub_through_write(1, 0x4518);
- sub_through_write(1, 0x461C);
- sub_through_write(1, 0x4720);
- sub_through_write(1, 0x4824);
- sub_through_write(1, 0x4928);
- sub_through_write(1, 0x4A2B);
- sub_through_write(1, 0x4B2E);
- sub_through_write(1, 0x4C31);
- sub_through_write(1, 0x4D34);
- sub_through_write(1, 0x4E37);
- sub_through_write(1, 0x4F3A);
- sub_through_write(1, 0x503C);
- sub_through_write(1, 0x513E);
- sub_through_write(1, 0x523F);
- sub_through_write(1, 0x5340);
- sub_through_write(1, 0x5441);
- sub_through_write(1, 0x5543);
- sub_through_write(1, 0x5646);
- sub_through_write(1, 0x5749);
- sub_through_write(1, 0x584C);
- sub_through_write(1, 0x594F);
- sub_through_write(1, 0x5A52);
- sub_through_write(1, 0x5B59);
- sub_through_write(1, 0x5C60);
- sub_through_write(1, 0x5D67);
- sub_through_write(1, 0x5E6E);
- sub_through_write(1, 0x5F7E);
- sub_through_write(1, 0x6000);
- sub_through_write(1, 0x6107);
- sub_through_write(1, 0x620C);
- sub_through_write(1, 0x6310);
- sub_through_write(1, 0x6414);
- sub_through_write(1, 0x6518);
- sub_through_write(1, 0x661C);
- sub_through_write(1, 0x6720);
- sub_through_write(1, 0x6824);
- sub_through_write(1, 0x6928);
- sub_through_write(1, 0x6A2B);
- sub_through_write(1, 0x6B2E);
- sub_through_write(1, 0x6C31);
- sub_through_write(1, 0x6D34);
- sub_through_write(1, 0x6E37);
- sub_through_write(1, 0x6F3A);
- sub_through_write(1, 0x703C);
- sub_through_write(1, 0x713E);
- sub_through_write(1, 0x723F);
- sub_through_write(1, 0x7340);
- sub_through_write(1, 0x7441);
- sub_through_write(1, 0x7543);
- sub_through_write(1, 0x7646);
- sub_through_write(1, 0x7749);
- sub_through_write(1, 0x784C);
- sub_through_write(1, 0x794F);
- sub_through_write(1, 0x7A52);
- sub_through_write(1, 0x7B59);
- sub_through_write(1, 0x7C60);
- sub_through_write(1, 0x7D67);
- sub_through_write(1, 0x7E6E);
- sub_through_write(1, 0x7F7D);
-
- sub_through_write(1, 0x1851); /* Display on */
-
- mddi_queue_register_write(REG_AGM, 0x0000, TRUE, 0);
-
- /* 1 pixel / 1 post clock */
- mddi_queue_register_write(REG_CLKDIV2, 0x3b00, FALSE, 0);
-
- /* SUB LCD select */
- mddi_queue_register_write(REG_PSTCTL2, 0x0080, FALSE, 0);
-
- /* RS=0,command initiate number=0,select master mode */
- mddi_queue_register_write(REG_SUBCTL, 0x0202, FALSE, 0);
-
- /* Sub LCD Data transform start */
- mddi_queue_register_write(REG_PSTCTL1, 0x0003, FALSE, 0);
-
- } else if (sharp_subpanel_type == SHARP_SUB_ROHM) {
-
- sub_through_write(0, 0x01); /* Display setting */
- sub_through_write(1, 0x00);
-
- mddi_wait(1);
- /* Wait 100us <----- ******* Update 2005/01/24 */
-
- sub_through_write(0, 0xB6);
- sub_through_write(1, 0x0C);
- sub_through_write(1, 0x4A);
- sub_through_write(1, 0x20);
- sub_through_write(0, 0x3A);
- sub_through_write(1, 0x05);
- sub_through_write(0, 0xB7);
- sub_through_write(1, 0x01);
- sub_through_write(0, 0xBA);
- sub_through_write(1, 0x20);
- sub_through_write(1, 0x02);
- sub_through_write(0, 0x25);
- sub_through_write(1, 0x4F);
- sub_through_write(0, 0xBB);
- sub_through_write(1, 0x00);
- sub_through_write(0, 0x36);
- sub_through_write(1, 0x00);
- sub_through_write(0, 0xB1);
- sub_through_write(1, 0x05);
- sub_through_write(0, 0xBE);
- sub_through_write(1, 0x80);
- sub_through_write(0, 0x26);
- sub_through_write(1, 0x01);
- sub_through_write(0, 0x2A);
- sub_through_write(1, 0x02);
- sub_through_write(1, 0x81);
- sub_through_write(0, 0x2B);
- sub_through_write(1, 0x00);
- sub_through_write(1, 0x7F);
-
- sub_through_write(0, 0x2C);
- sub_through_write(0, 0x11); /* Sleep mode off */
-
- mddi_wait(1);
- /* Wait 100 ms <----- ******* Update 2005/01/24 */
-
- sub_through_write(0, 0x29); /* Display on */
- sub_through_write(0, 0xB3);
- sub_through_write(1, 0x20);
- sub_through_write(1, 0xAA);
- sub_through_write(1, 0xA0);
- sub_through_write(1, 0x20);
- sub_through_write(1, 0x30);
- sub_through_write(1, 0xA6);
- sub_through_write(1, 0xFF);
- sub_through_write(1, 0x9A);
- sub_through_write(1, 0x9F);
- sub_through_write(1, 0xAF);
- sub_through_write(1, 0xBC);
- sub_through_write(1, 0xCF);
- sub_through_write(1, 0xDF);
- sub_through_write(1, 0x20);
- sub_through_write(1, 0x9C);
- sub_through_write(1, 0x8A);
-
- sub_through_write(0, 0x002C); /* Display on */
-
- /* 1 pixel / 2 post clock */
- mddi_queue_register_write(REG_CLKDIV2, 0x7b00, FALSE, 0);
-
- /* SUB LCD select */
- mddi_queue_register_write(REG_PSTCTL2, 0x0080, FALSE, 0);
-
- /* RS=1,command initiate number=0,select master mode */
- mddi_queue_register_write(REG_SUBCTL, 0x0242, FALSE, 0);
-
- /* Sub LCD Data transform start */
- mddi_queue_register_write(REG_PSTCTL1, 0x0003, FALSE, 0);
-
- }
-
- /* Set the MDP pixel data attributes for Sub Display */
- mddi_host_write_pix_attr_reg(0x00C0);
-}
-
-void mddi_sharp_lcd_vsync_detected(boolean detected)
-{
- /* static timetick_type start_time = 0; */
- static struct timeval start_time;
- static boolean first_time = TRUE;
- /* uint32 mdp_cnt_val = 0; */
- /* timetick_type elapsed_us; */
- struct timeval now;
- uint32 elapsed_us;
- uint32 num_vsyncs;
-
- if ((detected) || (mddi_sharp_vsync_attempts > 5)) {
- if ((detected) && (mddi_sharp_monitor_refresh_value)) {
- /* if (start_time != 0) */
- if (!first_time) {
- jiffies_to_timeval(jiffies, &now);
- elapsed_us =
- (now.tv_sec - start_time.tv_sec) * 1000000 +
- now.tv_usec - start_time.tv_usec;
- /*
- * LCD is configured for a refresh every usecs,
- * so to determine the number of vsyncs that
- * have occurred since the last measurement add
- * half that to the time difference and divide
- * by the refresh rate.
- */
- num_vsyncs = (elapsed_us +
- (mddi_sharp_usecs_per_refresh >>
- 1)) /
- mddi_sharp_usecs_per_refresh;
- /*
- * LCD is configured for * hsyncs (rows) per
- * refresh cycle. Calculate new rows_per_second
- * value based upon these new measurements.
- * MDP can update with this new value.
- */
- mddi_sharp_rows_per_second =
- (mddi_sharp_rows_per_refresh * 1000 *
- num_vsyncs) / (elapsed_us / 1000);
- }
- /* start_time = timetick_get(); */
- first_time = FALSE;
- jiffies_to_timeval(jiffies, &start_time);
- if (mddi_sharp_report_refresh_measurements) {
- /* mdp_cnt_val = MDP_LINE_COUNT; */
- }
- }
- /* if detected = TRUE, client initiated wakeup was detected */
- if (mddi_sharp_vsync_handler != NULL) {
- (*mddi_sharp_vsync_handler)
- (mddi_sharp_vsync_handler_arg);
- mddi_sharp_vsync_handler = NULL;
- }
- mddi_vsync_detect_enabled = FALSE;
- mddi_sharp_vsync_attempts = 0;
- /* need to clear this vsync wakeup */
- if (!mddi_queue_register_write_int(REG_INTR, 0x0000)) {
- MDDI_MSG_ERR("Vsync interrupt clear failed!\n");
- }
- if (!detected) {
- /* give up after 5 failed attempts but show error */
- MDDI_MSG_NOTICE("Vsync detection failed!\n");
- } else if ((mddi_sharp_monitor_refresh_value) &&
- (mddi_sharp_report_refresh_measurements)) {
- MDDI_MSG_NOTICE(" Lines Per Second=%d!\n",
- mddi_sharp_rows_per_second);
- }
- } else
- /* if detected = FALSE, we woke up from hibernation, but did not
- * detect client initiated wakeup.
- */
- mddi_sharp_vsync_attempts++;
-}
-
-/* ISR to be executed */
-void mddi_sharp_vsync_set_handler(msm_fb_vsync_handler_type handler, void *arg)
-{
- boolean error = FALSE;
- unsigned long flags;
-
- /* Disable interrupts */
- spin_lock_irqsave(&mddi_host_spin_lock, flags);
- /* INTLOCK(); */
-
- if (mddi_sharp_vsync_handler != NULL)
- error = TRUE;
-
- /* Register the handler for this particular GROUP interrupt source */
- mddi_sharp_vsync_handler = handler;
- mddi_sharp_vsync_handler_arg = arg;
-
- /* Restore interrupts */
- spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
- /* INTFREE(); */
-
- if (error)
- MDDI_MSG_ERR("MDDI: Previous Vsync handler never called\n");
-
- /* Enable the vsync wakeup */
- mddi_queue_register_write(REG_INTR, 0x8100, FALSE, 0);
-
- mddi_sharp_vsync_attempts = 1;
- mddi_vsync_detect_enabled = TRUE;
-} /* mddi_sharp_vsync_set_handler */
-
-static int mddi_sharp_lcd_on(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
-
- mfd = platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- if (mfd->panel.id == SHARP_QVGA_PRIM)
- mddi_sharp_prim_lcd_init();
- else
- mddi_sharp_sub_lcd_init();
-
- return 0;
-}
-
-static int mddi_sharp_lcd_off(struct platform_device *pdev)
-{
- mddi_sharp_lcd_powerdown();
- return 0;
-}
-
-static int __init mddi_sharp_probe(struct platform_device *pdev)
-{
- if (pdev->id == 0) {
- mddi_sharp_pdata = pdev->dev.platform_data;
- return 0;
- }
-
- msm_fb_add_device(pdev);
-
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = mddi_sharp_probe,
- .driver = {
- .name = "mddi_sharp_qvga",
- },
-};
-
-static struct msm_fb_panel_data mddi_sharp_panel_data0 = {
- .on = mddi_sharp_lcd_on,
- .off = mddi_sharp_lcd_off,
- .set_backlight = mddi_sharp_lcd_set_backlight,
- .set_vsync_notifier = mddi_sharp_vsync_set_handler,
-};
-
-static struct platform_device this_device_0 = {
- .name = "mddi_sharp_qvga",
- .id = SHARP_QVGA_PRIM,
- .dev = {
- .platform_data = &mddi_sharp_panel_data0,
- }
-};
-
-static struct msm_fb_panel_data mddi_sharp_panel_data1 = {
- .on = mddi_sharp_lcd_on,
- .off = mddi_sharp_lcd_off,
-};
-
-static struct platform_device this_device_1 = {
- .name = "mddi_sharp_qvga",
- .id = SHARP_128X128_SECD,
- .dev = {
- .platform_data = &mddi_sharp_panel_data1,
- }
-};
-
-static int __init mddi_sharp_init(void)
-{
- int ret;
- struct msm_panel_info *pinfo;
-
-#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
- u32 id;
-
- ret = msm_fb_detect_client("mddi_sharp_qvga");
- if (ret == -ENODEV)
- return 0;
-
- if (ret) {
- id = mddi_get_client_id();
-
- if (((id >> 16) != 0x0) || ((id & 0xffff) != 0x8835))
- return 0;
- }
-#endif
- if (mddi_host_core_version > 8) {
- /* can use faster refresh with newer hw revisions */
- mddi_sharp_debug_60hz_refresh = TRUE;
-
- /* Timing variables for tracking vsync */
- /* dot_clock = 6.00MHz
- * horizontal count = 296
- * vertical count = 338
- * refresh rate = 6000000/(296+338) = 60Hz
- */
- mddi_sharp_rows_per_second = 20270; /* 6000000/296 */
- mddi_sharp_rows_per_refresh = 338;
- mddi_sharp_usecs_per_refresh = 16674; /* (296+338)/6000000 */
- } else {
- /* Timing variables for tracking vsync */
- /* dot_clock = 5.20MHz
- * horizontal count = 376
- * vertical count = 338
- * refresh rate = 5200000/(376+338) = 41Hz
- */
- mddi_sharp_rows_per_second = 13830; /* 5200000/376 */
- mddi_sharp_rows_per_refresh = 338;
- mddi_sharp_usecs_per_refresh = 24440; /* (376+338)/5200000 */
- }
-
- ret = platform_driver_register(&this_driver);
- if (!ret) {
- pinfo = &mddi_sharp_panel_data0.panel_info;
- pinfo->xres = 240;
- pinfo->yres = 320;
- pinfo->type = MDDI_PANEL;
- pinfo->pdest = DISPLAY_1;
- pinfo->mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
- pinfo->wait_cycle = 0;
- pinfo->bpp = 18;
- pinfo->fb_num = 2;
- pinfo->clk_rate = 122880000;
- pinfo->clk_min = 120000000;
- pinfo->clk_max = 125000000;
- pinfo->lcd.vsync_enable = TRUE;
- pinfo->lcd.refx100 =
- (mddi_sharp_rows_per_second * 100) /
- mddi_sharp_rows_per_refresh;
- pinfo->lcd.v_back_porch = 12;
- pinfo->lcd.v_front_porch = 6;
- pinfo->lcd.v_pulse_width = 0;
- pinfo->lcd.hw_vsync_mode = FALSE;
- pinfo->lcd.vsync_notifier_period = (1 * HZ);
- pinfo->bl_max = 7;
- pinfo->bl_min = 1;
-
- ret = platform_device_register(&this_device_0);
- if (ret)
- platform_driver_unregister(&this_driver);
-
- pinfo = &mddi_sharp_panel_data1.panel_info;
- pinfo->xres = 128;
- pinfo->yres = 128;
- pinfo->type = MDDI_PANEL;
- pinfo->pdest = DISPLAY_2;
- pinfo->mddi.vdopkt = 0x400;
- pinfo->wait_cycle = 0;
- pinfo->bpp = 18;
- pinfo->clk_rate = 122880000;
- pinfo->clk_min = 120000000;
- pinfo->clk_max = 125000000;
- pinfo->fb_num = 2;
-
- ret = platform_device_register(&this_device_1);
- if (ret) {
- platform_device_unregister(&this_device_0);
- platform_driver_unregister(&this_driver);
- }
- }
-
- if (!ret)
- mddi_lcd.vsync_detected = mddi_sharp_lcd_vsync_detected;
-
- return ret;
-}
-
-module_init(mddi_sharp_init);
diff --git a/drivers/staging/msm/mddi_toshiba.c b/drivers/staging/msm/mddi_toshiba.c
deleted file mode 100644
index e96342d477a..00000000000
--- a/drivers/staging/msm/mddi_toshiba.c
+++ /dev/null
@@ -1,1741 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-#include "mddihost.h"
-#include "mddihosti.h"
-#include "mddi_toshiba.h"
-
-#define TM_GET_DID(id) ((id) & 0xff)
-#define TM_GET_PID(id) (((id) & 0xff00)>>8)
-
-#define MDDI_CLIENT_CORE_BASE 0x108000
-#define LCD_CONTROL_BLOCK_BASE 0x110000
-#define SPI_BLOCK_BASE 0x120000
-#define PWM_BLOCK_BASE 0x140000
-#define SYSTEM_BLOCK1_BASE 0x160000
-
-#define TTBUSSEL (MDDI_CLIENT_CORE_BASE|0x18)
-#define DPSET0 (MDDI_CLIENT_CORE_BASE|0x1C)
-#define DPSET1 (MDDI_CLIENT_CORE_BASE|0x20)
-#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
-#define DPRUN (MDDI_CLIENT_CORE_BASE|0x28)
-#define SYSCKENA (MDDI_CLIENT_CORE_BASE|0x2C)
-
-#define BITMAP0 (MDDI_CLIENT_CORE_BASE|0x44)
-#define BITMAP1 (MDDI_CLIENT_CORE_BASE|0x48)
-#define BITMAP2 (MDDI_CLIENT_CORE_BASE|0x4C)
-#define BITMAP3 (MDDI_CLIENT_CORE_BASE|0x50)
-#define BITMAP4 (MDDI_CLIENT_CORE_BASE|0x54)
-
-#define SRST (LCD_CONTROL_BLOCK_BASE|0x00)
-#define PORT_ENB (LCD_CONTROL_BLOCK_BASE|0x04)
-#define START (LCD_CONTROL_BLOCK_BASE|0x08)
-#define PORT (LCD_CONTROL_BLOCK_BASE|0x0C)
-
-#define INTFLG (LCD_CONTROL_BLOCK_BASE|0x18)
-#define INTMSK (LCD_CONTROL_BLOCK_BASE|0x1C)
-#define MPLFBUF (LCD_CONTROL_BLOCK_BASE|0x20)
-
-#define PXL (LCD_CONTROL_BLOCK_BASE|0x30)
-#define HCYCLE (LCD_CONTROL_BLOCK_BASE|0x34)
-#define HSW (LCD_CONTROL_BLOCK_BASE|0x38)
-#define HDE_START (LCD_CONTROL_BLOCK_BASE|0x3C)
-#define HDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x40)
-#define VCYCLE (LCD_CONTROL_BLOCK_BASE|0x44)
-#define VSW (LCD_CONTROL_BLOCK_BASE|0x48)
-#define VDE_START (LCD_CONTROL_BLOCK_BASE|0x4C)
-#define VDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x50)
-#define WAKEUP (LCD_CONTROL_BLOCK_BASE|0x54)
-#define REGENB (LCD_CONTROL_BLOCK_BASE|0x5C)
-#define VSYNIF (LCD_CONTROL_BLOCK_BASE|0x60)
-#define WRSTB (LCD_CONTROL_BLOCK_BASE|0x64)
-#define RDSTB (LCD_CONTROL_BLOCK_BASE|0x68)
-#define ASY_DATA (LCD_CONTROL_BLOCK_BASE|0x6C)
-#define ASY_DATB (LCD_CONTROL_BLOCK_BASE|0x70)
-#define ASY_DATC (LCD_CONTROL_BLOCK_BASE|0x74)
-#define ASY_DATD (LCD_CONTROL_BLOCK_BASE|0x78)
-#define ASY_DATE (LCD_CONTROL_BLOCK_BASE|0x7C)
-#define ASY_DATF (LCD_CONTROL_BLOCK_BASE|0x80)
-#define ASY_DATG (LCD_CONTROL_BLOCK_BASE|0x84)
-#define ASY_DATH (LCD_CONTROL_BLOCK_BASE|0x88)
-#define ASY_CMDSET (LCD_CONTROL_BLOCK_BASE|0x8C)
-#define MONI (LCD_CONTROL_BLOCK_BASE|0xB0)
-#define VPOS (LCD_CONTROL_BLOCK_BASE|0xC0)
-
-#define SSICTL (SPI_BLOCK_BASE|0x00)
-#define SSITIME (SPI_BLOCK_BASE|0x04)
-#define SSITX (SPI_BLOCK_BASE|0x08)
-#define SSIINTS (SPI_BLOCK_BASE|0x14)
-
-#define TIMER0LOAD (PWM_BLOCK_BASE|0x00)
-#define TIMER0CTRL (PWM_BLOCK_BASE|0x08)
-#define PWM0OFF (PWM_BLOCK_BASE|0x1C)
-#define TIMER1LOAD (PWM_BLOCK_BASE|0x20)
-#define TIMER1CTRL (PWM_BLOCK_BASE|0x28)
-#define PWM1OFF (PWM_BLOCK_BASE|0x3C)
-#define TIMER2LOAD (PWM_BLOCK_BASE|0x40)
-#define TIMER2CTRL (PWM_BLOCK_BASE|0x48)
-#define PWM2OFF (PWM_BLOCK_BASE|0x5C)
-#define PWMCR (PWM_BLOCK_BASE|0x68)
-
-#define GPIOIS (GPIO_BLOCK_BASE|0x08)
-#define GPIOIEV (GPIO_BLOCK_BASE|0x10)
-#define GPIOIC (GPIO_BLOCK_BASE|0x20)
-
-#define WKREQ (SYSTEM_BLOCK1_BASE|0x00)
-#define CLKENB (SYSTEM_BLOCK1_BASE|0x04)
-#define DRAMPWR (SYSTEM_BLOCK1_BASE|0x08)
-#define INTMASK (SYSTEM_BLOCK1_BASE|0x0C)
-#define CNT_DIS (SYSTEM_BLOCK1_BASE|0x10)
-
-typedef enum {
- TOSHIBA_STATE_OFF,
- TOSHIBA_STATE_PRIM_SEC_STANDBY,
- TOSHIBA_STATE_PRIM_SEC_READY,
- TOSHIBA_STATE_PRIM_NORMAL_MODE,
- TOSHIBA_STATE_SEC_NORMAL_MODE
-} mddi_toshiba_state_t;
-
-static uint32 mddi_toshiba_curr_vpos;
-static boolean mddi_toshiba_monitor_refresh_value = FALSE;
-static boolean mddi_toshiba_report_refresh_measurements = FALSE;
-
-boolean mddi_toshiba_61Hz_refresh = TRUE;
-
-/* Modifications to timing to increase refresh rate to > 60Hz.
- * 20MHz dot clock.
- * 646 total rows.
- * 506 total columns.
- * refresh rate = 61.19Hz
- */
-static uint32 mddi_toshiba_rows_per_second = 39526;
-static uint32 mddi_toshiba_usecs_per_refresh = 16344;
-static uint32 mddi_toshiba_rows_per_refresh = 646;
-extern boolean mddi_vsync_detect_enabled;
-
-static msm_fb_vsync_handler_type mddi_toshiba_vsync_handler;
-static void *mddi_toshiba_vsync_handler_arg;
-static uint16 mddi_toshiba_vsync_attempts;
-
-static mddi_toshiba_state_t toshiba_state = TOSHIBA_STATE_OFF;
-
-static struct msm_panel_common_pdata *mddi_toshiba_pdata;
-
-static int mddi_toshiba_lcd_on(struct platform_device *pdev);
-static int mddi_toshiba_lcd_off(struct platform_device *pdev);
-
-static void mddi_toshiba_state_transition(mddi_toshiba_state_t a,
- mddi_toshiba_state_t b)
-{
- if (toshiba_state != a) {
- MDDI_MSG_ERR("toshiba state trans. (%d->%d) found %d\n", a, b,
- toshiba_state);
- }
- toshiba_state = b;
-}
-
-#define GORDON_REG_IMGCTL1 0x10 /* Image interface control 1 */
-#define GORDON_REG_IMGCTL2 0x11 /* Image interface control 2 */
-#define GORDON_REG_IMGSET1 0x12 /* Image interface settings 1 */
-#define GORDON_REG_IMGSET2 0x13 /* Image interface settings 2 */
-#define GORDON_REG_IVBP1 0x14 /* DM0: Vert back porch */
-#define GORDON_REG_IHBP1 0x15 /* DM0: Horiz back porch */
-#define GORDON_REG_IVNUM1 0x16 /* DM0: Num of vert lines */
-#define GORDON_REG_IHNUM1 0x17 /* DM0: Num of pixels per line */
-#define GORDON_REG_IVBP2 0x18 /* DM1: Vert back porch */
-#define GORDON_REG_IHBP2 0x19 /* DM1: Horiz back porch */
-#define GORDON_REG_IVNUM2 0x1A /* DM1: Num of vert lines */
-#define GORDON_REG_IHNUM2 0x1B /* DM1: Num of pixels per line */
-#define GORDON_REG_LCDIFCTL1 0x30 /* LCD interface control 1 */
-#define GORDON_REG_VALTRAN 0x31 /* LCD IF ctl: VALTRAN sync flag */
-#define GORDON_REG_AVCTL 0x33
-#define GORDON_REG_LCDIFCTL2 0x34 /* LCD interface control 2 */
-#define GORDON_REG_LCDIFCTL3 0x35 /* LCD interface control 3 */
-#define GORDON_REG_LCDIFSET1 0x36 /* LCD interface settings 1 */
-#define GORDON_REG_PCCTL 0x3C
-#define GORDON_REG_TPARAM1 0x40
-#define GORDON_REG_TLCDIF1 0x41
-#define GORDON_REG_TSSPB_ST1 0x42
-#define GORDON_REG_TSSPB_ED1 0x43
-#define GORDON_REG_TSCK_ST1 0x44
-#define GORDON_REG_TSCK_WD1 0x45
-#define GORDON_REG_TGSPB_VST1 0x46
-#define GORDON_REG_TGSPB_VED1 0x47
-#define GORDON_REG_TGSPB_CH1 0x48
-#define GORDON_REG_TGCK_ST1 0x49
-#define GORDON_REG_TGCK_ED1 0x4A
-#define GORDON_REG_TPCTL_ST1 0x4B
-#define GORDON_REG_TPCTL_ED1 0x4C
-#define GORDON_REG_TPCHG_ED1 0x4D
-#define GORDON_REG_TCOM_CH1 0x4E
-#define GORDON_REG_THBP1 0x4F
-#define GORDON_REG_TPHCTL1 0x50
-#define GORDON_REG_EVPH1 0x51
-#define GORDON_REG_EVPL1 0x52
-#define GORDON_REG_EVNH1 0x53
-#define GORDON_REG_EVNL1 0x54
-#define GORDON_REG_TBIAS1 0x55
-#define GORDON_REG_TPARAM2 0x56
-#define GORDON_REG_TLCDIF2 0x57
-#define GORDON_REG_TSSPB_ST2 0x58
-#define GORDON_REG_TSSPB_ED2 0x59
-#define GORDON_REG_TSCK_ST2 0x5A
-#define GORDON_REG_TSCK_WD2 0x5B
-#define GORDON_REG_TGSPB_VST2 0x5C
-#define GORDON_REG_TGSPB_VED2 0x5D
-#define GORDON_REG_TGSPB_CH2 0x5E
-#define GORDON_REG_TGCK_ST2 0x5F
-#define GORDON_REG_TGCK_ED2 0x60
-#define GORDON_REG_TPCTL_ST2 0x61
-#define GORDON_REG_TPCTL_ED2 0x62
-#define GORDON_REG_TPCHG_ED2 0x63
-#define GORDON_REG_TCOM_CH2 0x64
-#define GORDON_REG_THBP2 0x65
-#define GORDON_REG_TPHCTL2 0x66
-#define GORDON_REG_EVPH2 0x67
-#define GORDON_REG_EVPL2 0x68
-#define GORDON_REG_EVNH2 0x69
-#define GORDON_REG_EVNL2 0x6A
-#define GORDON_REG_TBIAS2 0x6B
-#define GORDON_REG_POWCTL 0x80
-#define GORDON_REG_POWOSC1 0x81
-#define GORDON_REG_POWOSC2 0x82
-#define GORDON_REG_POWSET 0x83
-#define GORDON_REG_POWTRM1 0x85
-#define GORDON_REG_POWTRM2 0x86
-#define GORDON_REG_POWTRM3 0x87
-#define GORDON_REG_POWTRMSEL 0x88
-#define GORDON_REG_POWHIZ 0x89
-
-void serigo(uint16 reg, uint8 data)
-{
- uint32 mddi_val = 0;
- mddi_queue_register_read(SSIINTS, &mddi_val, TRUE, 0);
- if (mddi_val & (1 << 8))
- mddi_wait(1);
- /* No De-assert of CS and send 2 bytes */
- mddi_val = 0x90000 | ((0x00FF & reg) << 8) | data;
- mddi_queue_register_write(SSITX, mddi_val, TRUE, 0);
-}
-
-void gordon_init(void)
-{
- /* Image interface settings ***/
- serigo(GORDON_REG_IMGCTL2, 0x00);
- serigo(GORDON_REG_IMGSET1, 0x01);
-
- /* Exchange the RGB signal for J510(Softbank mobile) */
- serigo(GORDON_REG_IMGSET2, 0x12);
- serigo(GORDON_REG_LCDIFSET1, 0x00);
- mddi_wait(2);
-
- /* Pre-charge settings */
- serigo(GORDON_REG_PCCTL, 0x09);
- serigo(GORDON_REG_LCDIFCTL2, 0x1B);
- mddi_wait(1);
-}
-
-void gordon_disp_on(void)
-{
- /*gordon_dispmode setting */
- /*VGA settings */
- serigo(GORDON_REG_TPARAM1, 0x30);
- serigo(GORDON_REG_TLCDIF1, 0x00);
- serigo(GORDON_REG_TSSPB_ST1, 0x8B);
- serigo(GORDON_REG_TSSPB_ED1, 0x93);
- mddi_wait(2);
- serigo(GORDON_REG_TSCK_ST1, 0x88);
- serigo(GORDON_REG_TSCK_WD1, 0x00);
- serigo(GORDON_REG_TGSPB_VST1, 0x01);
- serigo(GORDON_REG_TGSPB_VED1, 0x02);
- mddi_wait(2);
- serigo(GORDON_REG_TGSPB_CH1, 0x5E);
- serigo(GORDON_REG_TGCK_ST1, 0x80);
- serigo(GORDON_REG_TGCK_ED1, 0x3C);
- serigo(GORDON_REG_TPCTL_ST1, 0x50);
- mddi_wait(2);
- serigo(GORDON_REG_TPCTL_ED1, 0x74);
- serigo(GORDON_REG_TPCHG_ED1, 0x78);
- serigo(GORDON_REG_TCOM_CH1, 0x50);
- serigo(GORDON_REG_THBP1, 0x84);
- mddi_wait(2);
- serigo(GORDON_REG_TPHCTL1, 0x00);
- serigo(GORDON_REG_EVPH1, 0x70);
- serigo(GORDON_REG_EVPL1, 0x64);
- serigo(GORDON_REG_EVNH1, 0x56);
- mddi_wait(2);
- serigo(GORDON_REG_EVNL1, 0x48);
- serigo(GORDON_REG_TBIAS1, 0x88);
- mddi_wait(2);
- serigo(GORDON_REG_TPARAM2, 0x28);
- serigo(GORDON_REG_TLCDIF2, 0x14);
- serigo(GORDON_REG_TSSPB_ST2, 0x49);
- serigo(GORDON_REG_TSSPB_ED2, 0x4B);
- mddi_wait(2);
- serigo(GORDON_REG_TSCK_ST2, 0x4A);
- serigo(GORDON_REG_TSCK_WD2, 0x02);
- serigo(GORDON_REG_TGSPB_VST2, 0x02);
- serigo(GORDON_REG_TGSPB_VED2, 0x03);
- mddi_wait(2);
- serigo(GORDON_REG_TGSPB_CH2, 0x2F);
- serigo(GORDON_REG_TGCK_ST2, 0x40);
- serigo(GORDON_REG_TGCK_ED2, 0x1E);
- serigo(GORDON_REG_TPCTL_ST2, 0x2C);
- mddi_wait(2);
- serigo(GORDON_REG_TPCTL_ED2, 0x3A);
- serigo(GORDON_REG_TPCHG_ED2, 0x3C);
- serigo(GORDON_REG_TCOM_CH2, 0x28);
- serigo(GORDON_REG_THBP2, 0x4D);
- mddi_wait(2);
- serigo(GORDON_REG_TPHCTL2, 0x1A);
- mddi_wait(2);
- serigo(GORDON_REG_IVBP1, 0x02);
- serigo(GORDON_REG_IHBP1, 0x90);
- serigo(GORDON_REG_IVNUM1, 0xA0);
- serigo(GORDON_REG_IHNUM1, 0x78);
- mddi_wait(2);
- serigo(GORDON_REG_IVBP2, 0x02);
- serigo(GORDON_REG_IHBP2, 0x48);
- serigo(GORDON_REG_IVNUM2, 0x50);
- serigo(GORDON_REG_IHNUM2, 0x3C);
- mddi_wait(2);
- serigo(GORDON_REG_POWCTL, 0x03);
- mddi_wait(15);
- serigo(GORDON_REG_POWCTL, 0x07);
- mddi_wait(15);
- serigo(GORDON_REG_POWCTL, 0x0F);
- mddi_wait(15);
- serigo(GORDON_REG_AVCTL, 0x03);
- mddi_wait(15);
- serigo(GORDON_REG_POWCTL, 0x1F);
- mddi_wait(15);
- serigo(GORDON_REG_POWCTL, 0x5F);
- mddi_wait(15);
- serigo(GORDON_REG_POWCTL, 0x7F);
- mddi_wait(15);
- serigo(GORDON_REG_LCDIFCTL1, 0x02);
- mddi_wait(15);
- serigo(GORDON_REG_IMGCTL1, 0x00);
- mddi_wait(15);
- serigo(GORDON_REG_LCDIFCTL3, 0x00);
- mddi_wait(15);
- serigo(GORDON_REG_VALTRAN, 0x01);
- mddi_wait(15);
- serigo(GORDON_REG_LCDIFCTL1, 0x03);
- serigo(GORDON_REG_LCDIFCTL1, 0x03);
- mddi_wait(1);
-}
-
-void gordon_disp_off(void)
-{
- serigo(GORDON_REG_LCDIFCTL2, 0x7B);
- serigo(GORDON_REG_VALTRAN, 0x01);
- serigo(GORDON_REG_LCDIFCTL1, 0x02);
- serigo(GORDON_REG_LCDIFCTL3, 0x01);
- mddi_wait(20);
- serigo(GORDON_REG_VALTRAN, 0x01);
- serigo(GORDON_REG_IMGCTL1, 0x01);
- serigo(GORDON_REG_LCDIFCTL1, 0x00);
- mddi_wait(20);
- serigo(GORDON_REG_POWCTL, 0x1F);
- mddi_wait(40);
- serigo(GORDON_REG_POWCTL, 0x07);
- mddi_wait(40);
- serigo(GORDON_REG_POWCTL, 0x03);
- mddi_wait(40);
- serigo(GORDON_REG_POWCTL, 0x00);
- mddi_wait(40);
-}
-
-void gordon_disp_init(void)
-{
- gordon_init();
- mddi_wait(20);
- gordon_disp_on();
-}
-
-static void toshiba_common_initial_setup(struct msm_fb_data_type *mfd)
-{
- if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT) {
- write_client_reg(DPSET0 , 0x4bec0066, TRUE);
- write_client_reg(DPSET1 , 0x00000113, TRUE);
- write_client_reg(DPSUS , 0x00000000, TRUE);
- write_client_reg(DPRUN , 0x00000001, TRUE);
- mddi_wait(5);
- write_client_reg(SYSCKENA , 0x00000001, TRUE);
- write_client_reg(CLKENB , 0x0000a0e9, TRUE);
-
- write_client_reg(GPIODATA , 0x03FF0000, TRUE);
- write_client_reg(GPIODIR , 0x0000024D, TRUE);
- write_client_reg(GPIOSEL , 0x00000173, TRUE);
- write_client_reg(GPIOPC , 0x03C300C0, TRUE);
- write_client_reg(WKREQ , 0x00000000, TRUE);
- write_client_reg(GPIOIS , 0x00000000, TRUE);
- write_client_reg(GPIOIEV , 0x00000001, TRUE);
- write_client_reg(GPIOIC , 0x000003FF, TRUE);
- write_client_reg(GPIODATA , 0x00040004, TRUE);
-
- write_client_reg(GPIODATA , 0x00080008, TRUE);
- write_client_reg(DRAMPWR , 0x00000001, TRUE);
- write_client_reg(CLKENB , 0x0000a0eb, TRUE);
- write_client_reg(PWMCR , 0x00000000, TRUE);
- mddi_wait(1);
-
- write_client_reg(SSICTL , 0x00060399, TRUE);
- write_client_reg(SSITIME , 0x00000100, TRUE);
- write_client_reg(CNT_DIS , 0x00000002, TRUE);
- write_client_reg(SSICTL , 0x0006039b, TRUE);
-
- write_client_reg(SSITX , 0x00000000, TRUE);
- mddi_wait(7);
- write_client_reg(SSITX , 0x00000000, TRUE);
- mddi_wait(7);
- write_client_reg(SSITX , 0x00000000, TRUE);
- mddi_wait(7);
-
- write_client_reg(SSITX , 0x000800BA, TRUE);
- write_client_reg(SSITX , 0x00000111, TRUE);
- write_client_reg(SSITX , 0x00080036, TRUE);
- write_client_reg(SSITX , 0x00000100, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x0008003A, TRUE);
- write_client_reg(SSITX , 0x00000160, TRUE);
- write_client_reg(SSITX , 0x000800B1, TRUE);
- write_client_reg(SSITX , 0x0000015D, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800B2, TRUE);
- write_client_reg(SSITX , 0x00000133, TRUE);
- write_client_reg(SSITX , 0x000800B3, TRUE);
- write_client_reg(SSITX , 0x00000122, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800B4, TRUE);
- write_client_reg(SSITX , 0x00000102, TRUE);
- write_client_reg(SSITX , 0x000800B5, TRUE);
- write_client_reg(SSITX , 0x0000011E, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800B6, TRUE);
- write_client_reg(SSITX , 0x00000127, TRUE);
- write_client_reg(SSITX , 0x000800B7, TRUE);
- write_client_reg(SSITX , 0x00000103, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800B9, TRUE);
- write_client_reg(SSITX , 0x00000124, TRUE);
- write_client_reg(SSITX , 0x000800BD, TRUE);
- write_client_reg(SSITX , 0x000001A1, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800BB, TRUE);
- write_client_reg(SSITX , 0x00000100, TRUE);
- write_client_reg(SSITX , 0x000800BF, TRUE);
- write_client_reg(SSITX , 0x00000101, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800BE, TRUE);
- write_client_reg(SSITX , 0x00000100, TRUE);
- write_client_reg(SSITX , 0x000800C0, TRUE);
- write_client_reg(SSITX , 0x00000111, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800C1, TRUE);
- write_client_reg(SSITX , 0x00000111, TRUE);
- write_client_reg(SSITX , 0x000800C2, TRUE);
- write_client_reg(SSITX , 0x00000111, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800C3, TRUE);
- write_client_reg(SSITX , 0x00080132, TRUE);
- write_client_reg(SSITX , 0x00000132, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800C4, TRUE);
- write_client_reg(SSITX , 0x00080132, TRUE);
- write_client_reg(SSITX , 0x00000132, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800C5, TRUE);
- write_client_reg(SSITX , 0x00080132, TRUE);
- write_client_reg(SSITX , 0x00000132, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800C6, TRUE);
- write_client_reg(SSITX , 0x00080132, TRUE);
- write_client_reg(SSITX , 0x00000132, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800C7, TRUE);
- write_client_reg(SSITX , 0x00080164, TRUE);
- write_client_reg(SSITX , 0x00000145, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800C8, TRUE);
- write_client_reg(SSITX , 0x00000144, TRUE);
- write_client_reg(SSITX , 0x000800C9, TRUE);
- write_client_reg(SSITX , 0x00000152, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800CA, TRUE);
- write_client_reg(SSITX , 0x00000100, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800EC, TRUE);
- write_client_reg(SSITX , 0x00080101, TRUE);
- write_client_reg(SSITX , 0x000001FC, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800CF, TRUE);
- write_client_reg(SSITX , 0x00000101, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800D0, TRUE);
- write_client_reg(SSITX , 0x00080110, TRUE);
- write_client_reg(SSITX , 0x00000104, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800D1, TRUE);
- write_client_reg(SSITX , 0x00000101, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800D2, TRUE);
- write_client_reg(SSITX , 0x00080100, TRUE);
- write_client_reg(SSITX , 0x00000128, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800D3, TRUE);
- write_client_reg(SSITX , 0x00080100, TRUE);
- write_client_reg(SSITX , 0x00000128, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800D4, TRUE);
- write_client_reg(SSITX , 0x00080126, TRUE);
- write_client_reg(SSITX , 0x000001A4, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800D5, TRUE);
- write_client_reg(SSITX , 0x00000120, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800EF, TRUE);
- write_client_reg(SSITX , 0x00080132, TRUE);
- write_client_reg(SSITX , 0x00000100, TRUE);
- mddi_wait(1);
-
- write_client_reg(BITMAP0 , 0x032001E0, TRUE);
- write_client_reg(BITMAP1 , 0x032001E0, TRUE);
- write_client_reg(BITMAP2 , 0x014000F0, TRUE);
- write_client_reg(BITMAP3 , 0x014000F0, TRUE);
- write_client_reg(BITMAP4 , 0x014000F0, TRUE);
- write_client_reg(CLKENB , 0x0000A1EB, TRUE);
- write_client_reg(PORT_ENB , 0x00000001, TRUE);
- write_client_reg(PORT , 0x00000004, TRUE);
- write_client_reg(PXL , 0x00000002, TRUE);
- write_client_reg(MPLFBUF , 0x00000000, TRUE);
- write_client_reg(HCYCLE , 0x000000FD, TRUE);
- write_client_reg(HSW , 0x00000003, TRUE);
- write_client_reg(HDE_START , 0x00000007, TRUE);
- write_client_reg(HDE_SIZE , 0x000000EF, TRUE);
- write_client_reg(VCYCLE , 0x00000325, TRUE);
- write_client_reg(VSW , 0x00000001, TRUE);
- write_client_reg(VDE_START , 0x00000003, TRUE);
- write_client_reg(VDE_SIZE , 0x0000031F, TRUE);
- write_client_reg(START , 0x00000001, TRUE);
- mddi_wait(32);
- write_client_reg(SSITX , 0x000800BC, TRUE);
- write_client_reg(SSITX , 0x00000180, TRUE);
- write_client_reg(SSITX , 0x0008003B, TRUE);
- write_client_reg(SSITX , 0x00000100, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800B0, TRUE);
- write_client_reg(SSITX , 0x00000116, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x000800B8, TRUE);
- write_client_reg(SSITX , 0x000801FF, TRUE);
- write_client_reg(SSITX , 0x000001F5, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX , 0x00000011, TRUE);
- mddi_wait(5);
- write_client_reg(SSITX , 0x00000029, TRUE);
- return;
- }
-
- if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
- write_client_reg(DPSET0, 0x4BEC0066, TRUE);
- write_client_reg(DPSET1, 0x00000113, TRUE);
- write_client_reg(DPSUS, 0x00000000, TRUE);
- write_client_reg(DPRUN, 0x00000001, TRUE);
- mddi_wait(14);
- write_client_reg(SYSCKENA, 0x00000001, TRUE);
- write_client_reg(CLKENB, 0x000000EF, TRUE);
- write_client_reg(GPIO_BLOCK_BASE, 0x03FF0000, TRUE);
- write_client_reg(GPIODIR, 0x0000024D, TRUE);
- write_client_reg(SYSTEM_BLOCK2_BASE, 0x00000173, TRUE);
- write_client_reg(GPIOPC, 0x03C300C0, TRUE);
- write_client_reg(SYSTEM_BLOCK1_BASE, 0x00000000, TRUE);
- write_client_reg(GPIOIS, 0x00000000, TRUE);
- write_client_reg(GPIOIEV, 0x00000001, TRUE);
- write_client_reg(GPIOIC, 0x000003FF, TRUE);
- write_client_reg(GPIO_BLOCK_BASE, 0x00060006, TRUE);
- write_client_reg(GPIO_BLOCK_BASE, 0x00080008, TRUE);
- write_client_reg(GPIO_BLOCK_BASE, 0x02000200, TRUE);
- write_client_reg(DRAMPWR, 0x00000001, TRUE);
- write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
- write_client_reg(PWM_BLOCK_BASE, 0x00001388, TRUE);
- write_client_reg(PWM0OFF, 0x00001387, TRUE);
- write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
- write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
- write_client_reg(PWM1OFF, 0x00001387, TRUE);
- write_client_reg(TIMER0CTRL, 0x000000E0, TRUE);
- write_client_reg(TIMER1CTRL, 0x000000E0, TRUE);
- write_client_reg(PWMCR, 0x00000003, TRUE);
- mddi_wait(1);
- write_client_reg(SPI_BLOCK_BASE, 0x00063111, TRUE);
- write_client_reg(SSITIME, 0x00000100, TRUE);
- write_client_reg(SPI_BLOCK_BASE, 0x00063113, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX, 0x00000000, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX, 0x00000000, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX, 0x00000000, TRUE);
- mddi_wait(1);
- write_client_reg(CLKENB, 0x0000A1EF, TRUE);
- write_client_reg(START, 0x00000000, TRUE);
- write_client_reg(WRSTB, 0x0000003F, TRUE);
- write_client_reg(RDSTB, 0x00000432, TRUE);
- write_client_reg(PORT_ENB, 0x00000002, TRUE);
- write_client_reg(VSYNIF, 0x00000000, TRUE);
- write_client_reg(ASY_DATA, 0x80000000, TRUE);
- write_client_reg(ASY_DATB, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(10);
- write_client_reg(ASY_DATA, 0x80000000, TRUE);
- write_client_reg(ASY_DATB, 0x80000000, TRUE);
- write_client_reg(ASY_DATC, 0x80000000, TRUE);
- write_client_reg(ASY_DATD, 0x80000000, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
- write_client_reg(ASY_DATA, 0x80000007, TRUE);
- write_client_reg(ASY_DATB, 0x00004005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(20);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x00000000, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
-
- write_client_reg(VSYNIF, 0x00000001, TRUE);
- write_client_reg(PORT_ENB, 0x00000001, TRUE);
- } else {
- write_client_reg(DPSET0, 0x4BEC0066, TRUE);
- write_client_reg(DPSET1, 0x00000113, TRUE);
- write_client_reg(DPSUS, 0x00000000, TRUE);
- write_client_reg(DPRUN, 0x00000001, TRUE);
- mddi_wait(14);
- write_client_reg(SYSCKENA, 0x00000001, TRUE);
- write_client_reg(CLKENB, 0x000000EF, TRUE);
- write_client_reg(GPIODATA, 0x03FF0000, TRUE);
- write_client_reg(GPIODIR, 0x0000024D, TRUE);
- write_client_reg(GPIOSEL, 0x00000173, TRUE);
- write_client_reg(GPIOPC, 0x03C300C0, TRUE);
- write_client_reg(WKREQ, 0x00000000, TRUE);
- write_client_reg(GPIOIS, 0x00000000, TRUE);
- write_client_reg(GPIOIEV, 0x00000001, TRUE);
- write_client_reg(GPIOIC, 0x000003FF, TRUE);
- write_client_reg(GPIODATA, 0x00060006, TRUE);
- write_client_reg(GPIODATA, 0x00080008, TRUE);
- write_client_reg(GPIODATA, 0x02000200, TRUE);
-
- if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA) {
- mddi_wait(400);
- write_client_reg(DRAMPWR, 0x00000001, TRUE);
-
- write_client_reg(CNT_DIS, 0x00000002, TRUE);
- write_client_reg(BITMAP0, 0x01E00320, TRUE);
- write_client_reg(PORT_ENB, 0x00000001, TRUE);
- write_client_reg(PORT, 0x00000004, TRUE);
- write_client_reg(PXL, 0x0000003A, TRUE);
- write_client_reg(MPLFBUF, 0x00000000, TRUE);
- write_client_reg(HCYCLE, 0x00000253, TRUE);
- write_client_reg(HSW, 0x00000003, TRUE);
- write_client_reg(HDE_START, 0x00000017, TRUE);
- write_client_reg(HDE_SIZE, 0x0000018F, TRUE);
- write_client_reg(VCYCLE, 0x000001FF, TRUE);
- write_client_reg(VSW, 0x00000001, TRUE);
- write_client_reg(VDE_START, 0x00000003, TRUE);
- write_client_reg(VDE_SIZE, 0x000001DF, TRUE);
- write_client_reg(START, 0x00000001, TRUE);
- mddi_wait(1);
- write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
- write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
- write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
- write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
- write_client_reg(PWM1OFF, 0x00000087, TRUE);
- } else {
- write_client_reg(DRAMPWR, 0x00000001, TRUE);
- write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
- write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
- write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
- write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
- write_client_reg(PWM1OFF, 0x00001387, TRUE);
- }
-
- write_client_reg(TIMER0CTRL, 0x000000E0, TRUE);
- write_client_reg(TIMER1CTRL, 0x000000E0, TRUE);
- write_client_reg(PWMCR, 0x00000003, TRUE);
- mddi_wait(1);
- write_client_reg(SSICTL, 0x00000799, TRUE);
- write_client_reg(SSITIME, 0x00000100, TRUE);
- write_client_reg(SSICTL, 0x0000079b, TRUE);
- write_client_reg(SSITX, 0x00000000, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX, 0x00000000, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX, 0x00000000, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX, 0x000800BA, TRUE);
- write_client_reg(SSITX, 0x00000111, TRUE);
- write_client_reg(SSITX, 0x00080036, TRUE);
- write_client_reg(SSITX, 0x00000100, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800BB, TRUE);
- write_client_reg(SSITX, 0x00000100, TRUE);
- write_client_reg(SSITX, 0x0008003A, TRUE);
- write_client_reg(SSITX, 0x00000160, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800BF, TRUE);
- write_client_reg(SSITX, 0x00000100, TRUE);
- write_client_reg(SSITX, 0x000800B1, TRUE);
- write_client_reg(SSITX, 0x0000015D, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800B2, TRUE);
- write_client_reg(SSITX, 0x00000133, TRUE);
- write_client_reg(SSITX, 0x000800B3, TRUE);
- write_client_reg(SSITX, 0x00000122, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800B4, TRUE);
- write_client_reg(SSITX, 0x00000102, TRUE);
- write_client_reg(SSITX, 0x000800B5, TRUE);
- write_client_reg(SSITX, 0x0000011F, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800B6, TRUE);
- write_client_reg(SSITX, 0x00000128, TRUE);
- write_client_reg(SSITX, 0x000800B7, TRUE);
- write_client_reg(SSITX, 0x00000103, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800B9, TRUE);
- write_client_reg(SSITX, 0x00000120, TRUE);
- write_client_reg(SSITX, 0x000800BD, TRUE);
- write_client_reg(SSITX, 0x00000102, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800BE, TRUE);
- write_client_reg(SSITX, 0x00000100, TRUE);
- write_client_reg(SSITX, 0x000800C0, TRUE);
- write_client_reg(SSITX, 0x00000111, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800C1, TRUE);
- write_client_reg(SSITX, 0x00000111, TRUE);
- write_client_reg(SSITX, 0x000800C2, TRUE);
- write_client_reg(SSITX, 0x00000111, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800C3, TRUE);
- write_client_reg(SSITX, 0x0008010A, TRUE);
- write_client_reg(SSITX, 0x0000010A, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800C4, TRUE);
- write_client_reg(SSITX, 0x00080160, TRUE);
- write_client_reg(SSITX, 0x00000160, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800C5, TRUE);
- write_client_reg(SSITX, 0x00080160, TRUE);
- write_client_reg(SSITX, 0x00000160, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800C6, TRUE);
- write_client_reg(SSITX, 0x00080160, TRUE);
- write_client_reg(SSITX, 0x00000160, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800C7, TRUE);
- write_client_reg(SSITX, 0x00080133, TRUE);
- write_client_reg(SSITX, 0x00000143, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800C8, TRUE);
- write_client_reg(SSITX, 0x00000144, TRUE);
- write_client_reg(SSITX, 0x000800C9, TRUE);
- write_client_reg(SSITX, 0x00000133, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800CA, TRUE);
- write_client_reg(SSITX, 0x00000100, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800EC, TRUE);
- write_client_reg(SSITX, 0x00080102, TRUE);
- write_client_reg(SSITX, 0x00000118, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800CF, TRUE);
- write_client_reg(SSITX, 0x00000101, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800D0, TRUE);
- write_client_reg(SSITX, 0x00080110, TRUE);
- write_client_reg(SSITX, 0x00000104, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800D1, TRUE);
- write_client_reg(SSITX, 0x00000101, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800D2, TRUE);
- write_client_reg(SSITX, 0x00080100, TRUE);
- write_client_reg(SSITX, 0x0000013A, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800D3, TRUE);
- write_client_reg(SSITX, 0x00080100, TRUE);
- write_client_reg(SSITX, 0x0000013A, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800D4, TRUE);
- write_client_reg(SSITX, 0x00080124, TRUE);
- write_client_reg(SSITX, 0x0000016E, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX, 0x000800D5, TRUE);
- write_client_reg(SSITX, 0x00000124, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800ED, TRUE);
- write_client_reg(SSITX, 0x00080101, TRUE);
- write_client_reg(SSITX, 0x0000010A, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800D6, TRUE);
- write_client_reg(SSITX, 0x00000101, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800D7, TRUE);
- write_client_reg(SSITX, 0x00080110, TRUE);
- write_client_reg(SSITX, 0x0000010A, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800D8, TRUE);
- write_client_reg(SSITX, 0x00000101, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800D9, TRUE);
- write_client_reg(SSITX, 0x00080100, TRUE);
- write_client_reg(SSITX, 0x00000114, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800DE, TRUE);
- write_client_reg(SSITX, 0x00080100, TRUE);
- write_client_reg(SSITX, 0x00000114, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800DF, TRUE);
- write_client_reg(SSITX, 0x00080112, TRUE);
- write_client_reg(SSITX, 0x0000013F, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800E0, TRUE);
- write_client_reg(SSITX, 0x0000010B, TRUE);
- write_client_reg(SSITX, 0x000800E2, TRUE);
- write_client_reg(SSITX, 0x00000101, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800E3, TRUE);
- write_client_reg(SSITX, 0x00000136, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800E4, TRUE);
- write_client_reg(SSITX, 0x00080100, TRUE);
- write_client_reg(SSITX, 0x00000103, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800E5, TRUE);
- write_client_reg(SSITX, 0x00080102, TRUE);
- write_client_reg(SSITX, 0x00000104, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800E6, TRUE);
- write_client_reg(SSITX, 0x00000103, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800E7, TRUE);
- write_client_reg(SSITX, 0x00080104, TRUE);
- write_client_reg(SSITX, 0x0000010A, TRUE);
- mddi_wait(2);
- write_client_reg(SSITX, 0x000800E8, TRUE);
- write_client_reg(SSITX, 0x00000104, TRUE);
- write_client_reg(CLKENB, 0x000001EF, TRUE);
- write_client_reg(START, 0x00000000, TRUE);
- write_client_reg(WRSTB, 0x0000003F, TRUE);
- write_client_reg(RDSTB, 0x00000432, TRUE);
- write_client_reg(PORT_ENB, 0x00000002, TRUE);
- write_client_reg(VSYNIF, 0x00000000, TRUE);
- write_client_reg(ASY_DATA, 0x80000000, TRUE);
- write_client_reg(ASY_DATB, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(10);
- write_client_reg(ASY_DATA, 0x80000000, TRUE);
- write_client_reg(ASY_DATB, 0x80000000, TRUE);
- write_client_reg(ASY_DATC, 0x80000000, TRUE);
- write_client_reg(ASY_DATD, 0x80000000, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
- write_client_reg(ASY_DATA, 0x80000007, TRUE);
- write_client_reg(ASY_DATB, 0x00004005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(20);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x00000000, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- write_client_reg(VSYNIF, 0x00000001, TRUE);
- write_client_reg(PORT_ENB, 0x00000001, TRUE);
- }
-
- mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_STANDBY,
- TOSHIBA_STATE_PRIM_SEC_READY);
-}
-
-static void toshiba_prim_start(struct msm_fb_data_type *mfd)
-{
- if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
- return;
-
- if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
- write_client_reg(BITMAP1, 0x01E000F0, TRUE);
- write_client_reg(BITMAP2, 0x01E000F0, TRUE);
- write_client_reg(BITMAP3, 0x01E000F0, TRUE);
- write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
- write_client_reg(CLKENB, 0x000001EF, TRUE);
- write_client_reg(PORT_ENB, 0x00000001, TRUE);
- write_client_reg(PORT, 0x00000016, TRUE);
- write_client_reg(PXL, 0x00000002, TRUE);
- write_client_reg(MPLFBUF, 0x00000000, TRUE);
- write_client_reg(HCYCLE, 0x00000185, TRUE);
- write_client_reg(HSW, 0x00000018, TRUE);
- write_client_reg(HDE_START, 0x0000004A, TRUE);
- write_client_reg(HDE_SIZE, 0x000000EF, TRUE);
- write_client_reg(VCYCLE, 0x0000028E, TRUE);
- write_client_reg(VSW, 0x00000004, TRUE);
- write_client_reg(VDE_START, 0x00000009, TRUE);
- write_client_reg(VDE_SIZE, 0x0000027F, TRUE);
- write_client_reg(START, 0x00000001, TRUE);
- write_client_reg(SYSTEM_BLOCK1_BASE, 0x00000002, TRUE);
- } else{
-
- write_client_reg(VSYNIF, 0x00000001, TRUE);
- write_client_reg(PORT_ENB, 0x00000001, TRUE);
- write_client_reg(BITMAP1, 0x01E000F0, TRUE);
- write_client_reg(BITMAP2, 0x01E000F0, TRUE);
- write_client_reg(BITMAP3, 0x01E000F0, TRUE);
- write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
- write_client_reg(CLKENB, 0x000001EF, TRUE);
- write_client_reg(PORT_ENB, 0x00000001, TRUE);
- write_client_reg(PORT, 0x00000004, TRUE);
- write_client_reg(PXL, 0x00000002, TRUE);
- write_client_reg(MPLFBUF, 0x00000000, TRUE);
-
- if (mddi_toshiba_61Hz_refresh) {
- write_client_reg(HCYCLE, 0x000000FC, TRUE);
- mddi_toshiba_rows_per_second = 39526;
- mddi_toshiba_rows_per_refresh = 646;
- mddi_toshiba_usecs_per_refresh = 16344;
- } else {
- write_client_reg(HCYCLE, 0x0000010b, TRUE);
- mddi_toshiba_rows_per_second = 37313;
- mddi_toshiba_rows_per_refresh = 646;
- mddi_toshiba_usecs_per_refresh = 17313;
- }
-
- write_client_reg(HSW, 0x00000003, TRUE);
- write_client_reg(HDE_START, 0x00000007, TRUE);
- write_client_reg(HDE_SIZE, 0x000000EF, TRUE);
- write_client_reg(VCYCLE, 0x00000285, TRUE);
- write_client_reg(VSW, 0x00000001, TRUE);
- write_client_reg(VDE_START, 0x00000003, TRUE);
- write_client_reg(VDE_SIZE, 0x0000027F, TRUE);
- write_client_reg(START, 0x00000001, TRUE);
- mddi_wait(10);
- write_client_reg(SSITX, 0x000800BC, TRUE);
- write_client_reg(SSITX, 0x00000180, TRUE);
- write_client_reg(SSITX, 0x0008003B, TRUE);
- write_client_reg(SSITX, 0x00000100, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX, 0x000800B0, TRUE);
- write_client_reg(SSITX, 0x00000116, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX, 0x000800B8, TRUE);
- write_client_reg(SSITX, 0x000801FF, TRUE);
- write_client_reg(SSITX, 0x000001F5, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX, 0x00000011, TRUE);
- write_client_reg(SSITX, 0x00000029, TRUE);
- write_client_reg(WKREQ, 0x00000000, TRUE);
- write_client_reg(WAKEUP, 0x00000000, TRUE);
- write_client_reg(INTMSK, 0x00000001, TRUE);
- }
-
- mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_READY,
- TOSHIBA_STATE_PRIM_NORMAL_MODE);
-}
-
-static void toshiba_sec_start(struct msm_fb_data_type *mfd)
-{
- if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
- return;
-
- write_client_reg(VSYNIF, 0x00000000, TRUE);
- write_client_reg(PORT_ENB, 0x00000002, TRUE);
- write_client_reg(CLKENB, 0x000011EF, TRUE);
- write_client_reg(BITMAP0, 0x028001E0, TRUE);
- write_client_reg(BITMAP1, 0x00000000, TRUE);
- write_client_reg(BITMAP2, 0x00000000, TRUE);
- write_client_reg(BITMAP3, 0x00000000, TRUE);
- write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
- write_client_reg(PORT, 0x00000000, TRUE);
- write_client_reg(PXL, 0x00000000, TRUE);
- write_client_reg(MPLFBUF, 0x00000004, TRUE);
- write_client_reg(HCYCLE, 0x0000006B, TRUE);
- write_client_reg(HSW, 0x00000003, TRUE);
- write_client_reg(HDE_START, 0x00000007, TRUE);
- write_client_reg(HDE_SIZE, 0x00000057, TRUE);
- write_client_reg(VCYCLE, 0x000000E6, TRUE);
- write_client_reg(VSW, 0x00000001, TRUE);
- write_client_reg(VDE_START, 0x00000003, TRUE);
- write_client_reg(VDE_SIZE, 0x000000DB, TRUE);
- write_client_reg(ASY_DATA, 0x80000001, TRUE);
- write_client_reg(ASY_DATB, 0x0000011B, TRUE);
- write_client_reg(ASY_DATC, 0x80000002, TRUE);
- write_client_reg(ASY_DATD, 0x00000700, TRUE);
- write_client_reg(ASY_DATE, 0x80000003, TRUE);
- write_client_reg(ASY_DATF, 0x00000230, TRUE);
- write_client_reg(ASY_DATG, 0x80000008, TRUE);
- write_client_reg(ASY_DATH, 0x00000402, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
- write_client_reg(ASY_DATA, 0x80000009, TRUE);
- write_client_reg(ASY_DATB, 0x00000000, TRUE);
- write_client_reg(ASY_DATC, 0x8000000B, TRUE);
- write_client_reg(ASY_DATD, 0x00000000, TRUE);
- write_client_reg(ASY_DATE, 0x8000000C, TRUE);
- write_client_reg(ASY_DATF, 0x00000000, TRUE);
- write_client_reg(ASY_DATG, 0x8000000D, TRUE);
- write_client_reg(ASY_DATH, 0x00000409, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
- write_client_reg(ASY_DATA, 0x8000000E, TRUE);
- write_client_reg(ASY_DATB, 0x00000409, TRUE);
- write_client_reg(ASY_DATC, 0x80000030, TRUE);
- write_client_reg(ASY_DATD, 0x00000000, TRUE);
- write_client_reg(ASY_DATE, 0x80000031, TRUE);
- write_client_reg(ASY_DATF, 0x00000100, TRUE);
- write_client_reg(ASY_DATG, 0x80000032, TRUE);
- write_client_reg(ASY_DATH, 0x00000104, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
- write_client_reg(ASY_DATA, 0x80000033, TRUE);
- write_client_reg(ASY_DATB, 0x00000400, TRUE);
- write_client_reg(ASY_DATC, 0x80000034, TRUE);
- write_client_reg(ASY_DATD, 0x00000306, TRUE);
- write_client_reg(ASY_DATE, 0x80000035, TRUE);
- write_client_reg(ASY_DATF, 0x00000706, TRUE);
- write_client_reg(ASY_DATG, 0x80000036, TRUE);
- write_client_reg(ASY_DATH, 0x00000707, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
- write_client_reg(ASY_DATA, 0x80000037, TRUE);
- write_client_reg(ASY_DATB, 0x00000004, TRUE);
- write_client_reg(ASY_DATC, 0x80000038, TRUE);
- write_client_reg(ASY_DATD, 0x00000000, TRUE);
- write_client_reg(ASY_DATE, 0x80000039, TRUE);
- write_client_reg(ASY_DATF, 0x00000000, TRUE);
- write_client_reg(ASY_DATG, 0x8000003A, TRUE);
- write_client_reg(ASY_DATH, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
- write_client_reg(ASY_DATA, 0x80000044, TRUE);
- write_client_reg(ASY_DATB, 0x0000AF00, TRUE);
- write_client_reg(ASY_DATC, 0x80000045, TRUE);
- write_client_reg(ASY_DATD, 0x0000DB00, TRUE);
- write_client_reg(ASY_DATE, 0x08000042, TRUE);
- write_client_reg(ASY_DATF, 0x0000DB00, TRUE);
- write_client_reg(ASY_DATG, 0x80000021, TRUE);
- write_client_reg(ASY_DATH, 0x00000000, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
- write_client_reg(PXL, 0x0000000C, TRUE);
- write_client_reg(VSYNIF, 0x00000001, TRUE);
- write_client_reg(ASY_DATA, 0x80000022, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000003, TRUE);
- write_client_reg(START, 0x00000001, TRUE);
- mddi_wait(60);
- write_client_reg(PXL, 0x00000000, TRUE);
- write_client_reg(VSYNIF, 0x00000000, TRUE);
- write_client_reg(START, 0x00000000, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
- write_client_reg(ASY_DATA, 0x80000050, TRUE);
- write_client_reg(ASY_DATB, 0x00000000, TRUE);
- write_client_reg(ASY_DATC, 0x80000051, TRUE);
- write_client_reg(ASY_DATD, 0x00000E00, TRUE);
- write_client_reg(ASY_DATE, 0x80000052, TRUE);
- write_client_reg(ASY_DATF, 0x00000D01, TRUE);
- write_client_reg(ASY_DATG, 0x80000053, TRUE);
- write_client_reg(ASY_DATH, 0x00000000, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
- write_client_reg(ASY_DATA, 0x80000058, TRUE);
- write_client_reg(ASY_DATB, 0x00000000, TRUE);
- write_client_reg(ASY_DATC, 0x8000005A, TRUE);
- write_client_reg(ASY_DATD, 0x00000E01, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
- write_client_reg(ASY_DATA, 0x80000011, TRUE);
- write_client_reg(ASY_DATB, 0x00000812, TRUE);
- write_client_reg(ASY_DATC, 0x80000012, TRUE);
- write_client_reg(ASY_DATD, 0x00000003, TRUE);
- write_client_reg(ASY_DATE, 0x80000013, TRUE);
- write_client_reg(ASY_DATF, 0x00000909, TRUE);
- write_client_reg(ASY_DATG, 0x80000010, TRUE);
- write_client_reg(ASY_DATH, 0x00000040, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
- mddi_wait(40);
- write_client_reg(ASY_DATA, 0x80000010, TRUE);
- write_client_reg(ASY_DATB, 0x00000340, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(60);
- write_client_reg(ASY_DATA, 0x80000010, TRUE);
- write_client_reg(ASY_DATB, 0x00003340, TRUE);
- write_client_reg(ASY_DATC, 0x80000007, TRUE);
- write_client_reg(ASY_DATD, 0x00004007, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
- mddi_wait(1);
- write_client_reg(ASY_DATA, 0x80000007, TRUE);
- write_client_reg(ASY_DATB, 0x00004017, TRUE);
- write_client_reg(ASY_DATC, 0x8000005B, TRUE);
- write_client_reg(ASY_DATD, 0x00000000, TRUE);
- write_client_reg(ASY_DATE, 0x80000059, TRUE);
- write_client_reg(ASY_DATF, 0x00000011, TRUE);
- write_client_reg(ASY_CMDSET, 0x0000000D, TRUE);
- write_client_reg(ASY_CMDSET, 0x0000000C, TRUE);
- mddi_wait(20);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- /* LTPS I/F control */
- write_client_reg(ASY_DATB, 0x00000019, TRUE);
- /* Direct cmd transfer enable */
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- /* Direct cmd transfer disable */
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(20);
- /* Index setting of SUB LCDD */
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- /* LTPS I/F control */
- write_client_reg(ASY_DATB, 0x00000079, TRUE);
- /* Direct cmd transfer enable */
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- /* Direct cmd transfer disable */
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(20);
- /* Index setting of SUB LCDD */
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- /* LTPS I/F control */
- write_client_reg(ASY_DATB, 0x000003FD, TRUE);
- /* Direct cmd transfer enable */
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- /* Direct cmd transfer disable */
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(20);
- mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_SEC_READY,
- TOSHIBA_STATE_SEC_NORMAL_MODE);
-}
-
-static void toshiba_prim_lcd_off(struct msm_fb_data_type *mfd)
-{
- if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
- gordon_disp_off();
- } else{
-
- /* Main panel power off (Deep standby in) */
- write_client_reg(SSITX, 0x000800BC, TRUE);
- write_client_reg(SSITX, 0x00000100, TRUE);
- write_client_reg(SSITX, 0x00000028, TRUE);
- mddi_wait(1);
- write_client_reg(SSITX, 0x000800B8, TRUE);
- write_client_reg(SSITX, 0x00000180, TRUE);
- write_client_reg(SSITX, 0x00000102, TRUE);
- write_client_reg(SSITX, 0x00000010, TRUE);
- }
- write_client_reg(PORT, 0x00000003, TRUE);
- write_client_reg(REGENB, 0x00000001, TRUE);
- mddi_wait(1);
- write_client_reg(PXL, 0x00000000, TRUE);
- write_client_reg(START, 0x00000000, TRUE);
- write_client_reg(REGENB, 0x00000001, TRUE);
- mddi_wait(3);
- if (TM_GET_PID(mfd->panel.id) != LCD_SHARP_2P4_VGA) {
- write_client_reg(SSITX, 0x000800B0, TRUE);
- write_client_reg(SSITX, 0x00000100, TRUE);
- }
- mddi_toshiba_state_transition(TOSHIBA_STATE_PRIM_NORMAL_MODE,
- TOSHIBA_STATE_PRIM_SEC_STANDBY);
-}
-
-static void toshiba_sec_lcd_off(struct msm_fb_data_type *mfd)
-{
- if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
- return;
-
- write_client_reg(VSYNIF, 0x00000000, TRUE);
- write_client_reg(PORT_ENB, 0x00000002, TRUE);
- write_client_reg(ASY_DATA, 0x80000007, TRUE);
- write_client_reg(ASY_DATB, 0x00004016, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x00000019, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x0000000B, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x00000002, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(4);
- write_client_reg(ASY_DATA, 0x80000010, TRUE);
- write_client_reg(ASY_DATB, 0x00000300, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(4);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x00000000, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(ASY_DATA, 0x80000007, TRUE);
- write_client_reg(ASY_DATB, 0x00004004, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(PORT, 0x00000000, TRUE);
- write_client_reg(PXL, 0x00000000, TRUE);
- write_client_reg(START, 0x00000000, TRUE);
- write_client_reg(VSYNIF, 0x00000001, TRUE);
- write_client_reg(PORT_ENB, 0x00000001, TRUE);
- write_client_reg(REGENB, 0x00000001, TRUE);
- mddi_toshiba_state_transition(TOSHIBA_STATE_SEC_NORMAL_MODE,
- TOSHIBA_STATE_PRIM_SEC_STANDBY);
-}
-
-static void toshiba_sec_cont_update_start(struct msm_fb_data_type *mfd)
-{
-
- if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
- return;
-
- write_client_reg(VSYNIF, 0x00000000, TRUE);
- write_client_reg(PORT_ENB, 0x00000002, TRUE);
- write_client_reg(INTMASK, 0x00000001, TRUE);
- write_client_reg(TTBUSSEL, 0x0000000B, TRUE);
- write_client_reg(MONI, 0x00000008, TRUE);
- write_client_reg(CLKENB, 0x000000EF, TRUE);
- write_client_reg(CLKENB, 0x000010EF, TRUE);
- write_client_reg(CLKENB, 0x000011EF, TRUE);
- write_client_reg(BITMAP4, 0x00DC00B0, TRUE);
- write_client_reg(HCYCLE, 0x0000006B, TRUE);
- write_client_reg(HSW, 0x00000003, TRUE);
- write_client_reg(HDE_START, 0x00000002, TRUE);
- write_client_reg(HDE_SIZE, 0x00000057, TRUE);
- write_client_reg(VCYCLE, 0x000000E6, TRUE);
- write_client_reg(VSW, 0x00000001, TRUE);
- write_client_reg(VDE_START, 0x00000003, TRUE);
- write_client_reg(VDE_SIZE, 0x000000DB, TRUE);
- write_client_reg(WRSTB, 0x00000015, TRUE);
- write_client_reg(MPLFBUF, 0x00000004, TRUE);
- write_client_reg(ASY_DATA, 0x80000021, TRUE);
- write_client_reg(ASY_DATB, 0x00000000, TRUE);
- write_client_reg(ASY_DATC, 0x80000022, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000007, TRUE);
- write_client_reg(PXL, 0x00000089, TRUE);
- write_client_reg(VSYNIF, 0x00000001, TRUE);
- mddi_wait(2);
-}
-
-static void toshiba_sec_cont_update_stop(struct msm_fb_data_type *mfd)
-{
- if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
- return;
-
- write_client_reg(PXL, 0x00000000, TRUE);
- write_client_reg(VSYNIF, 0x00000000, TRUE);
- write_client_reg(START, 0x00000000, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
- mddi_wait(3);
- write_client_reg(SRST, 0x00000002, TRUE);
- mddi_wait(3);
- write_client_reg(SRST, 0x00000003, TRUE);
-}
-
-static void toshiba_sec_backlight_on(struct msm_fb_data_type *mfd)
-{
- if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
- return;
-
- write_client_reg(TIMER0CTRL, 0x00000060, TRUE);
- write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
- write_client_reg(PWM0OFF, 0x00000001, TRUE);
- write_client_reg(TIMER1CTRL, 0x00000060, TRUE);
- write_client_reg(TIMER1LOAD, 0x00001388, TRUE);
- write_client_reg(PWM1OFF, 0x00001387, TRUE);
- write_client_reg(TIMER0CTRL, 0x000000E0, TRUE);
- write_client_reg(TIMER1CTRL, 0x000000E0, TRUE);
- write_client_reg(PWMCR, 0x00000003, TRUE);
-}
-
-static void toshiba_sec_sleep_in(struct msm_fb_data_type *mfd)
-{
- if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
- return;
-
- write_client_reg(VSYNIF, 0x00000000, TRUE);
- write_client_reg(PORT_ENB, 0x00000002, TRUE);
- write_client_reg(ASY_DATA, 0x80000007, TRUE);
- write_client_reg(ASY_DATB, 0x00004016, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x00000019, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x0000000B, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x00000002, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(4);
- write_client_reg(ASY_DATA, 0x80000010, TRUE);
- write_client_reg(ASY_DATB, 0x00000300, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(4);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x00000000, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(ASY_DATA, 0x80000007, TRUE);
- write_client_reg(ASY_DATB, 0x00004004, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(PORT, 0x00000000, TRUE);
- write_client_reg(PXL, 0x00000000, TRUE);
- write_client_reg(START, 0x00000000, TRUE);
- write_client_reg(REGENB, 0x00000001, TRUE);
- /* Sleep in sequence */
- write_client_reg(ASY_DATA, 0x80000010, TRUE);
- write_client_reg(ASY_DATB, 0x00000302, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
-}
-
-static void toshiba_sec_sleep_out(struct msm_fb_data_type *mfd)
-{
- if (TM_GET_PID(mfd->panel.id) == LCD_TOSHIBA_2P4_WVGA_PT)
- return;
-
- write_client_reg(VSYNIF, 0x00000000, TRUE);
- write_client_reg(PORT_ENB, 0x00000002, TRUE);
- write_client_reg(ASY_DATA, 0x80000010, TRUE);
- write_client_reg(ASY_DATB, 0x00000300, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- /* Display ON sequence */
- write_client_reg(ASY_DATA, 0x80000011, TRUE);
- write_client_reg(ASY_DATB, 0x00000812, TRUE);
- write_client_reg(ASY_DATC, 0x80000012, TRUE);
- write_client_reg(ASY_DATD, 0x00000003, TRUE);
- write_client_reg(ASY_DATE, 0x80000013, TRUE);
- write_client_reg(ASY_DATF, 0x00000909, TRUE);
- write_client_reg(ASY_DATG, 0x80000010, TRUE);
- write_client_reg(ASY_DATH, 0x00000040, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000001, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000000, TRUE);
- mddi_wait(4);
- write_client_reg(ASY_DATA, 0x80000010, TRUE);
- write_client_reg(ASY_DATB, 0x00000340, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(6);
- write_client_reg(ASY_DATA, 0x80000010, TRUE);
- write_client_reg(ASY_DATB, 0x00003340, TRUE);
- write_client_reg(ASY_DATC, 0x80000007, TRUE);
- write_client_reg(ASY_DATD, 0x00004007, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000009, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000008, TRUE);
- mddi_wait(1);
- write_client_reg(ASY_DATA, 0x80000007, TRUE);
- write_client_reg(ASY_DATB, 0x00004017, TRUE);
- write_client_reg(ASY_DATC, 0x8000005B, TRUE);
- write_client_reg(ASY_DATD, 0x00000000, TRUE);
- write_client_reg(ASY_DATE, 0x80000059, TRUE);
- write_client_reg(ASY_DATF, 0x00000011, TRUE);
- write_client_reg(ASY_CMDSET, 0x0000000D, TRUE);
- write_client_reg(ASY_CMDSET, 0x0000000C, TRUE);
- mddi_wait(2);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x00000019, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x00000079, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
- write_client_reg(ASY_DATA, 0x80000059, TRUE);
- write_client_reg(ASY_DATB, 0x000003FD, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000005, TRUE);
- write_client_reg(ASY_CMDSET, 0x00000004, TRUE);
- mddi_wait(2);
-}
-
-static void mddi_toshiba_lcd_set_backlight(struct msm_fb_data_type *mfd)
-{
- int32 level;
- int ret = -EPERM;
- int max = mfd->panel_info.bl_max;
- int min = mfd->panel_info.bl_min;
-
- if (mddi_toshiba_pdata && mddi_toshiba_pdata->pmic_backlight) {
- ret = mddi_toshiba_pdata->pmic_backlight(mfd->bl_level);
- if (!ret)
- return;
- }
-
- if (ret && mddi_toshiba_pdata && mddi_toshiba_pdata->backlight_level) {
- level = mddi_toshiba_pdata->backlight_level(mfd->bl_level,
- max, min);
-
- if (level < 0)
- return;
-
- if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA)
- write_client_reg(TIMER0LOAD, 0x00001388, TRUE);
- } else {
- if (!max)
- level = 0;
- else
- level = (mfd->bl_level * 4999) / max;
- }
-
- write_client_reg(PWM0OFF, level, TRUE);
-}
-
-static void mddi_toshiba_vsync_set_handler(msm_fb_vsync_handler_type handler, /* ISR to be executed */
- void *arg)
-{
- boolean error = FALSE;
- unsigned long flags;
-
- /* Disable interrupts */
- spin_lock_irqsave(&mddi_host_spin_lock, flags);
- /* INTLOCK(); */
-
- if (mddi_toshiba_vsync_handler != NULL) {
- error = TRUE;
- } else {
- /* Register the handler for this particular GROUP interrupt source */
- mddi_toshiba_vsync_handler = handler;
- mddi_toshiba_vsync_handler_arg = arg;
- }
-
- /* Restore interrupts */
- spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
- /* MDDI_INTFREE(); */
- if (error) {
- MDDI_MSG_ERR("MDDI: Previous Vsync handler never called\n");
- } else {
- /* Enable the vsync wakeup */
- mddi_queue_register_write(INTMSK, 0x0000, FALSE, 0);
-
- mddi_toshiba_vsync_attempts = 1;
- mddi_vsync_detect_enabled = TRUE;
- }
-} /* mddi_toshiba_vsync_set_handler */
-
-static void mddi_toshiba_lcd_vsync_detected(boolean detected)
-{
- /* static timetick_type start_time = 0; */
- static struct timeval start_time;
- static boolean first_time = TRUE;
- /* uint32 mdp_cnt_val = 0; */
- /* timetick_type elapsed_us; */
- struct timeval now;
- uint32 elapsed_us;
- uint32 num_vsyncs;
-
- if ((detected) || (mddi_toshiba_vsync_attempts > 5)) {
- if ((detected) && (mddi_toshiba_monitor_refresh_value)) {
- /* if (start_time != 0) */
- if (!first_time) {
- jiffies_to_timeval(jiffies, &now);
- elapsed_us =
- (now.tv_sec - start_time.tv_sec) * 1000000 +
- now.tv_usec - start_time.tv_usec;
- /*
- * LCD is configured for a refresh every usecs,
- * so to determine the number of vsyncs that
- * have occurred since the last measurement
- * add half that to the time difference and
- * divide by the refresh rate.
- */
- num_vsyncs = (elapsed_us +
- (mddi_toshiba_usecs_per_refresh >>
- 1)) /
- mddi_toshiba_usecs_per_refresh;
- /*
- * LCD is configured for * hsyncs (rows) per
- * refresh cycle. Calculate new rows_per_second
- * value based upon these new measurements.
- * MDP can update with this new value.
- */
- mddi_toshiba_rows_per_second =
- (mddi_toshiba_rows_per_refresh * 1000 *
- num_vsyncs) / (elapsed_us / 1000);
- }
- /* start_time = timetick_get(); */
- first_time = FALSE;
- jiffies_to_timeval(jiffies, &start_time);
- if (mddi_toshiba_report_refresh_measurements) {
- (void)mddi_queue_register_read_int(VPOS,
- &mddi_toshiba_curr_vpos);
- /* mdp_cnt_val = MDP_LINE_COUNT; */
- }
- }
- /* if detected = TRUE, client initiated wakeup was detected */
- if (mddi_toshiba_vsync_handler != NULL) {
- (*mddi_toshiba_vsync_handler)
- (mddi_toshiba_vsync_handler_arg);
- mddi_toshiba_vsync_handler = NULL;
- }
- mddi_vsync_detect_enabled = FALSE;
- mddi_toshiba_vsync_attempts = 0;
- /* need to disable the interrupt wakeup */
- if (!mddi_queue_register_write_int(INTMSK, 0x0001))
- MDDI_MSG_ERR("Vsync interrupt disable failed!\n");
- if (!detected) {
- /* give up after 5 failed attempts but show error */
- MDDI_MSG_NOTICE("Vsync detection failed!\n");
- } else if ((mddi_toshiba_monitor_refresh_value) &&
- (mddi_toshiba_report_refresh_measurements)) {
- MDDI_MSG_NOTICE(" Last Line Counter=%d!\n",
- mddi_toshiba_curr_vpos);
- /* MDDI_MSG_NOTICE(" MDP Line Counter=%d!\n",mdp_cnt_val); */
- MDDI_MSG_NOTICE(" Lines Per Second=%d!\n",
- mddi_toshiba_rows_per_second);
- }
- /* clear the interrupt */
- if (!mddi_queue_register_write_int(INTFLG, 0x0001))
- MDDI_MSG_ERR("Vsync interrupt clear failed!\n");
- } else {
- /* if detected = FALSE, we woke up from hibernation, but did not
- * detect client initiated wakeup.
- */
- mddi_toshiba_vsync_attempts++;
- }
-}
-
-static void mddi_toshiba_prim_init(struct msm_fb_data_type *mfd)
-{
-
- switch (toshiba_state) {
- case TOSHIBA_STATE_PRIM_SEC_READY:
- break;
- case TOSHIBA_STATE_OFF:
- toshiba_state = TOSHIBA_STATE_PRIM_SEC_STANDBY;
- toshiba_common_initial_setup(mfd);
- break;
- case TOSHIBA_STATE_PRIM_SEC_STANDBY:
- toshiba_common_initial_setup(mfd);
- break;
- case TOSHIBA_STATE_SEC_NORMAL_MODE:
- toshiba_sec_cont_update_stop(mfd);
- toshiba_sec_sleep_in(mfd);
- toshiba_sec_sleep_out(mfd);
- toshiba_sec_lcd_off(mfd);
- toshiba_common_initial_setup(mfd);
- break;
- default:
- MDDI_MSG_ERR("mddi_toshiba_prim_init from state %d\n",
- toshiba_state);
- }
-
- toshiba_prim_start(mfd);
- if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA)
- gordon_disp_init();
- mddi_host_write_pix_attr_reg(0x00C3);
-}
-
-static void mddi_toshiba_sec_init(struct msm_fb_data_type *mfd)
-{
-
- switch (toshiba_state) {
- case TOSHIBA_STATE_PRIM_SEC_READY:
- break;
- case TOSHIBA_STATE_PRIM_SEC_STANDBY:
- toshiba_common_initial_setup(mfd);
- break;
- case TOSHIBA_STATE_PRIM_NORMAL_MODE:
- toshiba_prim_lcd_off(mfd);
- toshiba_common_initial_setup(mfd);
- break;
- default:
- MDDI_MSG_ERR("mddi_toshiba_sec_init from state %d\n",
- toshiba_state);
- }
-
- toshiba_sec_start(mfd);
- toshiba_sec_backlight_on(mfd);
- toshiba_sec_cont_update_start(mfd);
- mddi_host_write_pix_attr_reg(0x0400);
-}
-
-static void mddi_toshiba_lcd_powerdown(struct msm_fb_data_type *mfd)
-{
- switch (toshiba_state) {
- case TOSHIBA_STATE_PRIM_SEC_READY:
- mddi_toshiba_prim_init(mfd);
- mddi_toshiba_lcd_powerdown(mfd);
- return;
- case TOSHIBA_STATE_PRIM_SEC_STANDBY:
- break;
- case TOSHIBA_STATE_PRIM_NORMAL_MODE:
- toshiba_prim_lcd_off(mfd);
- break;
- case TOSHIBA_STATE_SEC_NORMAL_MODE:
- toshiba_sec_cont_update_stop(mfd);
- toshiba_sec_sleep_in(mfd);
- toshiba_sec_sleep_out(mfd);
- toshiba_sec_lcd_off(mfd);
- break;
- default:
- MDDI_MSG_ERR("mddi_toshiba_lcd_powerdown from state %d\n",
- toshiba_state);
- }
-}
-
-static int mddi_sharpgordon_firsttime = 1;
-
-static int mddi_toshiba_lcd_on(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
- mfd = platform_get_drvdata(pdev);
- if (!mfd)
- return -ENODEV;
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- if (TM_GET_DID(mfd->panel.id) == TOSHIBA_VGA_PRIM)
- mddi_toshiba_prim_init(mfd);
- else
- mddi_toshiba_sec_init(mfd);
- if (TM_GET_PID(mfd->panel.id) == LCD_SHARP_2P4_VGA) {
- if (mddi_sharpgordon_firsttime) {
- mddi_sharpgordon_firsttime = 0;
- write_client_reg(REGENB, 0x00000001, TRUE);
- }
- }
- return 0;
-}
-
-static int mddi_toshiba_lcd_off(struct platform_device *pdev)
-{
- mddi_toshiba_lcd_powerdown(platform_get_drvdata(pdev));
- return 0;
-}
-
-static int __init mddi_toshiba_lcd_probe(struct platform_device *pdev)
-{
- if (pdev->id == 0) {
- mddi_toshiba_pdata = pdev->dev.platform_data;
- return 0;
- }
-
- msm_fb_add_device(pdev);
-
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = mddi_toshiba_lcd_probe,
- .driver = {
- .name = "mddi_toshiba",
- },
-};
-
-static struct msm_fb_panel_data toshiba_panel_data = {
- .on = mddi_toshiba_lcd_on,
- .off = mddi_toshiba_lcd_off,
-};
-
-static int ch_used[3];
-
-int mddi_toshiba_device_register(struct msm_panel_info *pinfo,
- u32 channel, u32 panel)
-{
- struct platform_device *pdev = NULL;
- int ret;
-
- if ((channel >= 3) || ch_used[channel])
- return -ENODEV;
-
- if ((channel != TOSHIBA_VGA_PRIM) &&
- mddi_toshiba_pdata && mddi_toshiba_pdata->panel_num)
- if (mddi_toshiba_pdata->panel_num() < 2)
- return -ENODEV;
-
- ch_used[channel] = TRUE;
-
- pdev = platform_device_alloc("mddi_toshiba", (panel << 8)|channel);
- if (!pdev)
- return -ENOMEM;
-
- if (channel == TOSHIBA_VGA_PRIM) {
- toshiba_panel_data.set_backlight =
- mddi_toshiba_lcd_set_backlight;
-
- if (pinfo->lcd.vsync_enable) {
- toshiba_panel_data.set_vsync_notifier =
- mddi_toshiba_vsync_set_handler;
- mddi_lcd.vsync_detected =
- mddi_toshiba_lcd_vsync_detected;
- }
- } else {
- toshiba_panel_data.set_backlight = NULL;
- toshiba_panel_data.set_vsync_notifier = NULL;
- }
-
- toshiba_panel_data.panel_info = *pinfo;
-
- ret = platform_device_add_data(pdev, &toshiba_panel_data,
- sizeof(toshiba_panel_data));
- if (ret) {
- printk(KERN_ERR
- "%s: platform_device_add_data failed!\n", __func__);
- goto err_device_put;
- }
-
- ret = platform_device_add(pdev);
- if (ret) {
- printk(KERN_ERR
- "%s: platform_device_register failed!\n", __func__);
- goto err_device_put;
- }
-
- return 0;
-
-err_device_put:
- platform_device_put(pdev);
- return ret;
-}
-
-static int __init mddi_toshiba_lcd_init(void)
-{
- return platform_driver_register(&this_driver);
-}
-
-module_init(mddi_toshiba_lcd_init);
diff --git a/drivers/staging/msm/mddi_toshiba.h b/drivers/staging/msm/mddi_toshiba.h
deleted file mode 100644
index cbeea0a26d6..00000000000
--- a/drivers/staging/msm/mddi_toshiba.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef MDDI_TOSHIBA_H
-#define MDDI_TOSHIBA_H
-
-#define TOSHIBA_VGA_PRIM 1
-#define TOSHIBA_VGA_SECD 2
-
-#define LCD_TOSHIBA_2P4_VGA 0
-#define LCD_TOSHIBA_2P4_WVGA 1
-#define LCD_TOSHIBA_2P4_WVGA_PT 2
-#define LCD_SHARP_2P4_VGA 3
-
-#define GPIO_BLOCK_BASE 0x150000
-#define SYSTEM_BLOCK2_BASE 0x170000
-
-#define GPIODIR (GPIO_BLOCK_BASE|0x04)
-#define GPIOSEL (SYSTEM_BLOCK2_BASE|0x00)
-#define GPIOPC (GPIO_BLOCK_BASE|0x28)
-#define GPIODATA (GPIO_BLOCK_BASE|0x00)
-
-#define write_client_reg(__X, __Y, __Z) {\
- mddi_queue_register_write(__X, __Y, TRUE, 0);\
-}
-
-#endif /* MDDI_TOSHIBA_H */
diff --git a/drivers/staging/msm/mddi_toshiba_vga.c b/drivers/staging/msm/mddi_toshiba_vga.c
deleted file mode 100644
index 7e61d3a5b8f..00000000000
--- a/drivers/staging/msm/mddi_toshiba_vga.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-#include "mddihost.h"
-#include "mddihosti.h"
-#include "mddi_toshiba.h"
-
-static uint32 read_client_reg(uint32 addr)
-{
- uint32 val;
- mddi_queue_register_read(addr, &val, TRUE, 0);
- return val;
-}
-
-static uint32 toshiba_lcd_gpio_read(void)
-{
- uint32 val;
-
- write_client_reg(GPIODIR, 0x0000000C, TRUE);
- write_client_reg(GPIOSEL, 0x00000000, TRUE);
- write_client_reg(GPIOSEL, 0x00000000, TRUE);
- write_client_reg(GPIOPC, 0x03CF00C0, TRUE);
- val = read_client_reg(GPIODATA) & 0x2C0;
-
- return val;
-}
-
-static u32 mddi_toshiba_panel_detect(void)
-{
- mddi_host_type host_idx = MDDI_HOST_PRIM;
- uint32 lcd_gpio;
- u32 mddi_toshiba_lcd = LCD_TOSHIBA_2P4_VGA;
-
- /* Toshiba display requires larger drive_lo value */
- mddi_host_reg_out(DRIVE_LO, 0x0050);
-
- lcd_gpio = toshiba_lcd_gpio_read();
- switch (lcd_gpio) {
- case 0x0080:
- mddi_toshiba_lcd = LCD_SHARP_2P4_VGA;
- break;
-
- case 0x00C0:
- default:
- mddi_toshiba_lcd = LCD_TOSHIBA_2P4_VGA;
- break;
- }
-
- return mddi_toshiba_lcd;
-}
-
-static int __init mddi_toshiba_vga_init(void)
-{
- int ret;
- struct msm_panel_info pinfo;
- u32 panel;
-
-#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
- u32 id;
-
- ret = msm_fb_detect_client("mddi_toshiba_vga");
- if (ret == -ENODEV)
- return 0;
-
- if (ret) {
- id = mddi_get_client_id();
- if ((id >> 16) != 0xD263)
- return 0;
- }
-#endif
-
- panel = mddi_toshiba_panel_detect();
-
- pinfo.xres = 480;
- pinfo.yres = 640;
- pinfo.type = MDDI_PANEL;
- pinfo.pdest = DISPLAY_1;
- pinfo.mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
- pinfo.wait_cycle = 0;
- pinfo.bpp = 18;
- pinfo.lcd.vsync_enable = TRUE;
- pinfo.lcd.refx100 = 6118;
- pinfo.lcd.v_back_porch = 6;
- pinfo.lcd.v_front_porch = 0;
- pinfo.lcd.v_pulse_width = 0;
- pinfo.lcd.hw_vsync_mode = FALSE;
- pinfo.lcd.vsync_notifier_period = (1 * HZ);
- pinfo.bl_max = 99;
- pinfo.bl_min = 1;
- pinfo.clk_rate = 122880000;
- pinfo.clk_min = 120000000;
- pinfo.clk_max = 200000000;
- pinfo.fb_num = 2;
-
- ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_PRIM, panel);
- if (ret) {
- printk(KERN_ERR "%s: failed to register device!\n", __func__);
- return ret;
- }
-
- pinfo.xres = 176;
- pinfo.yres = 220;
- pinfo.type = MDDI_PANEL;
- pinfo.pdest = DISPLAY_2;
- pinfo.mddi.vdopkt = 0x400;
- pinfo.wait_cycle = 0;
- pinfo.bpp = 18;
- pinfo.clk_rate = 122880000;
- pinfo.clk_min = 120000000;
- pinfo.clk_max = 200000000;
- pinfo.fb_num = 2;
-
- ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_SECD, panel);
- if (ret)
- printk(KERN_WARNING
- "%s: failed to register device!\n", __func__);
-
- return ret;
-}
-
-module_init(mddi_toshiba_vga_init);
diff --git a/drivers/staging/msm/mddi_toshiba_wvga_pt.c b/drivers/staging/msm/mddi_toshiba_wvga_pt.c
deleted file mode 100644
index fc7d4e0d294..00000000000
--- a/drivers/staging/msm/mddi_toshiba_wvga_pt.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "msm_fb.h"
-#include "mddihost.h"
-#include "mddihosti.h"
-#include "mddi_toshiba.h"
-
-static int __init mddi_toshiba_wvga_pt_init(void)
-{
- int ret;
- struct msm_panel_info pinfo;
-#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
- uint id;
-
- ret = msm_fb_detect_client("mddi_toshiba_wvga_pt");
- if (ret == -ENODEV)
- return 0;
-
- if (ret) {
- id = mddi_get_client_id();
- if (id != 0xd2638722)
- return 0;
- }
-#endif
-
- pinfo.xres = 480;
- pinfo.yres = 800;
- pinfo.type = MDDI_PANEL;
- pinfo.pdest = DISPLAY_1;
- pinfo.mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
- pinfo.wait_cycle = 0;
- pinfo.bpp = 18;
- pinfo.lcd.vsync_enable = FALSE;
- pinfo.bl_max = 15;
- pinfo.bl_min = 1;
- pinfo.clk_rate = 192000000;
- pinfo.clk_min = 190000000;
- pinfo.clk_max = 200000000;
- pinfo.fb_num = 2;
-
- ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_PRIM,
- LCD_TOSHIBA_2P4_WVGA_PT);
- if (ret)
- printk(KERN_ERR "%s: failed to register device!\n", __func__);
-
- return ret;
-}
-
-module_init(mddi_toshiba_wvga_pt_init);
diff --git a/drivers/staging/msm/mddihost.c b/drivers/staging/msm/mddihost.c
deleted file mode 100644
index 58a86d5d995..00000000000
--- a/drivers/staging/msm/mddihost.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-
-#include "msm_fb.h"
-#include "mddihost.h"
-#include "mddihosti.h"
-
-#include <linux/clk.h>
-#include <mach/clk.h>
-
-struct semaphore mddi_host_mutex;
-
-struct clk *mddi_io_clk;
-static boolean mddi_host_powered = FALSE;
-static boolean mddi_host_initialized = FALSE;
-extern uint32 *mddi_reg_read_value_ptr;
-
-mddi_lcd_func_type mddi_lcd;
-
-extern mddi_client_capability_type mddi_client_capability_pkt;
-
-#ifdef FEATURE_MDDI_HITACHI
-extern void mddi_hitachi_window_adjust(uint16 x1,
- uint16 x2, uint16 y1, uint16 y2);
-#endif
-
-extern void mddi_toshiba_lcd_init(void);
-
-#ifdef FEATURE_MDDI_S6D0142
-extern void mddi_s6d0142_lcd_init(void);
-extern void mddi_s6d0142_window_adjust(uint16 x1,
- uint16 x2,
- uint16 y1,
- uint16 y2,
- mddi_llist_done_cb_type done_cb);
-#endif
-
-void mddi_init(void)
-{
- if (mddi_host_initialized)
- return;
-
- mddi_host_initialized = TRUE;
-
- sema_init(&mddi_host_mutex, 1);
-
- if (!mddi_host_powered) {
- down(&mddi_host_mutex);
- mddi_host_init(MDDI_HOST_PRIM);
- mddi_host_powered = TRUE;
- up(&mddi_host_mutex);
- mdelay(10);
- }
-}
-
-int mddi_host_register_read(uint32 reg_addr,
- uint32 *reg_value_ptr, boolean wait, mddi_host_type host) {
- mddi_linked_list_type *curr_llist_ptr;
- mddi_register_access_packet_type *regacc_pkt_ptr;
- uint16 curr_llist_idx;
- int ret = 0;
-
- if (in_interrupt())
- MDDI_MSG_CRIT("Called from ISR context\n");
-
- if (!mddi_host_powered) {
- MDDI_MSG_ERR("MDDI powered down!\n");
- mddi_init();
- }
-
- down(&mddi_host_mutex);
-
- mddi_reg_read_value_ptr = reg_value_ptr;
- curr_llist_idx = mddi_get_reg_read_llist_item(host, TRUE);
- if (curr_llist_idx == UNASSIGNED_INDEX) {
- up(&mddi_host_mutex);
-
- /* need to change this to some sort of wait */
- MDDI_MSG_ERR("Attempting to queue up more than 1 reg read\n");
- return -EINVAL;
- }
-
- curr_llist_ptr = &llist_extern[host][curr_llist_idx];
- curr_llist_ptr->link_controller_flags = 0x11;
- curr_llist_ptr->packet_header_count = 14;
- curr_llist_ptr->packet_data_count = 0;
-
- curr_llist_ptr->next_packet_pointer = NULL;
- curr_llist_ptr->packet_data_pointer = NULL;
- curr_llist_ptr->reserved = 0;
-
- regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
-
- regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count;
- regacc_pkt_ptr->packet_type = 146; /* register access packet */
- regacc_pkt_ptr->bClient_ID = 0;
- regacc_pkt_ptr->read_write_info = 0x8001;
- regacc_pkt_ptr->register_address = reg_addr;
-
- /* now adjust pointers */
- mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait,
- NULL, host);
- /* need to check if we can write the pointer or not */
-
- up(&mddi_host_mutex);
-
- if (wait) {
- int wait_ret;
-
- mddi_linked_list_notify_type *llist_notify_ptr;
- llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx];
- wait_ret = wait_for_completion_timeout(
- &(llist_notify_ptr->done_comp), 5 * HZ);
-
- if (wait_ret <= 0)
- ret = -EBUSY;
-
- if (wait_ret < 0)
- printk(KERN_ERR "%s: failed to wait for completion!\n",
- __func__);
- else if (!wait_ret)
- printk(KERN_ERR "%s: Timed out waiting!\n", __func__);
- }
-
- MDDI_MSG_DEBUG("Reg Read value=0x%x\n", *reg_value_ptr);
-
- return ret;
-} /* mddi_host_register_read */
-
-int mddi_host_register_write(uint32 reg_addr,
- uint32 reg_val, enum mddi_data_packet_size_type packet_size,
- boolean wait, mddi_llist_done_cb_type done_cb, mddi_host_type host) {
- mddi_linked_list_type *curr_llist_ptr;
- mddi_linked_list_type *curr_llist_dma_ptr;
- mddi_register_access_packet_type *regacc_pkt_ptr;
- uint16 curr_llist_idx;
- int ret = 0;
-
- if (in_interrupt())
- MDDI_MSG_CRIT("Called from ISR context\n");
-
- if (!mddi_host_powered) {
- MDDI_MSG_ERR("MDDI powered down!\n");
- mddi_init();
- }
-
- down(&mddi_host_mutex);
-
- curr_llist_idx = mddi_get_next_free_llist_item(host, TRUE);
- curr_llist_ptr = &llist_extern[host][curr_llist_idx];
- curr_llist_dma_ptr = &llist_dma_extern[host][curr_llist_idx];
-
- curr_llist_ptr->link_controller_flags = 1;
- curr_llist_ptr->packet_header_count = 14;
- curr_llist_ptr->packet_data_count = 4;
-
- curr_llist_ptr->next_packet_pointer = NULL;
- curr_llist_ptr->reserved = 0;
-
- regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
-
- regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count +
- (uint16)packet_size;
- regacc_pkt_ptr->packet_type = 146; /* register access packet */
- regacc_pkt_ptr->bClient_ID = 0;
- regacc_pkt_ptr->read_write_info = 0x0001;
- regacc_pkt_ptr->register_address = reg_addr;
- regacc_pkt_ptr->register_data_list = reg_val;
-
- MDDI_MSG_DEBUG("Reg Access write reg=0x%x, value=0x%x\n",
- regacc_pkt_ptr->register_address,
- regacc_pkt_ptr->register_data_list);
-
- regacc_pkt_ptr = &curr_llist_dma_ptr->packet_header.register_pkt;
- curr_llist_ptr->packet_data_pointer =
- (void *)(&regacc_pkt_ptr->register_data_list);
-
- /* now adjust pointers */
- mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, wait,
- done_cb, host);
-
- up(&mddi_host_mutex);
-
- if (wait) {
- int wait_ret;
-
- mddi_linked_list_notify_type *llist_notify_ptr;
- llist_notify_ptr = &llist_extern_notify[host][curr_llist_idx];
- wait_ret = wait_for_completion_timeout(
- &(llist_notify_ptr->done_comp), 5 * HZ);
-
- if (wait_ret <= 0)
- ret = -EBUSY;
-
- if (wait_ret < 0)
- printk(KERN_ERR "%s: failed to wait for completion!\n",
- __func__);
- else if (!wait_ret)
- printk(KERN_ERR "%s: Timed out waiting!\n", __func__);
- }
-
- return ret;
-} /* mddi_host_register_write */
-
-boolean mddi_host_register_read_int
- (uint32 reg_addr, uint32 *reg_value_ptr, mddi_host_type host) {
- mddi_linked_list_type *curr_llist_ptr;
- mddi_register_access_packet_type *regacc_pkt_ptr;
- uint16 curr_llist_idx;
-
- if (!in_interrupt())
- MDDI_MSG_CRIT("Called from TASK context\n");
-
- if (!mddi_host_powered) {
- MDDI_MSG_ERR("MDDI powered down!\n");
- return FALSE;
- }
-
- if (down_trylock(&mddi_host_mutex) != 0)
- return FALSE;
-
- mddi_reg_read_value_ptr = reg_value_ptr;
- curr_llist_idx = mddi_get_reg_read_llist_item(host, FALSE);
- if (curr_llist_idx == UNASSIGNED_INDEX) {
- up(&mddi_host_mutex);
- return FALSE;
- }
-
- curr_llist_ptr = &llist_extern[host][curr_llist_idx];
- curr_llist_ptr->link_controller_flags = 0x11;
- curr_llist_ptr->packet_header_count = 14;
- curr_llist_ptr->packet_data_count = 0;
-
- curr_llist_ptr->next_packet_pointer = NULL;
- curr_llist_ptr->packet_data_pointer = NULL;
- curr_llist_ptr->reserved = 0;
-
- regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
-
- regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count;
- regacc_pkt_ptr->packet_type = 146; /* register access packet */
- regacc_pkt_ptr->bClient_ID = 0;
- regacc_pkt_ptr->read_write_info = 0x8001;
- regacc_pkt_ptr->register_address = reg_addr;
-
- /* now adjust pointers */
- mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, FALSE,
- NULL, host);
- /* need to check if we can write the pointer or not */
-
- up(&mddi_host_mutex);
-
- return TRUE;
-
-} /* mddi_host_register_read */
-
-boolean mddi_host_register_write_int
- (uint32 reg_addr,
- uint32 reg_val, mddi_llist_done_cb_type done_cb, mddi_host_type host) {
- mddi_linked_list_type *curr_llist_ptr;
- mddi_linked_list_type *curr_llist_dma_ptr;
- mddi_register_access_packet_type *regacc_pkt_ptr;
- uint16 curr_llist_idx;
-
- if (!in_interrupt())
- MDDI_MSG_CRIT("Called from TASK context\n");
-
- if (!mddi_host_powered) {
- MDDI_MSG_ERR("MDDI powered down!\n");
- return FALSE;
- }
-
- if (down_trylock(&mddi_host_mutex) != 0)
- return FALSE;
-
- curr_llist_idx = mddi_get_next_free_llist_item(host, FALSE);
- if (curr_llist_idx == UNASSIGNED_INDEX) {
- up(&mddi_host_mutex);
- return FALSE;
- }
-
- curr_llist_ptr = &llist_extern[host][curr_llist_idx];
- curr_llist_dma_ptr = &llist_dma_extern[host][curr_llist_idx];
-
- curr_llist_ptr->link_controller_flags = 1;
- curr_llist_ptr->packet_header_count = 14;
- curr_llist_ptr->packet_data_count = 4;
-
- curr_llist_ptr->next_packet_pointer = NULL;
- curr_llist_ptr->reserved = 0;
-
- regacc_pkt_ptr = &curr_llist_ptr->packet_header.register_pkt;
-
- regacc_pkt_ptr->packet_length = curr_llist_ptr->packet_header_count + 4;
- regacc_pkt_ptr->packet_type = 146; /* register access packet */
- regacc_pkt_ptr->bClient_ID = 0;
- regacc_pkt_ptr->read_write_info = 0x0001;
- regacc_pkt_ptr->register_address = reg_addr;
- regacc_pkt_ptr->register_data_list = reg_val;
-
- regacc_pkt_ptr = &curr_llist_dma_ptr->packet_header.register_pkt;
- curr_llist_ptr->packet_data_pointer =
- (void *)(&(regacc_pkt_ptr->register_data_list));
-
- /* now adjust pointers */
- mddi_queue_forward_packets(curr_llist_idx, curr_llist_idx, FALSE,
- done_cb, host);
- up(&mddi_host_mutex);
-
- return TRUE;
-
-} /* mddi_host_register_write */
-
-void mddi_wait(uint16 time_ms)
-{
- mdelay(time_ms);
-}
-
-void mddi_client_lcd_vsync_detected(boolean detected)
-{
- if (mddi_lcd.vsync_detected)
- (*mddi_lcd.vsync_detected) (detected);
-}
-
-/* extended version of function includes done callback */
-void mddi_window_adjust_ext(struct msm_fb_data_type *mfd,
- uint16 x1,
- uint16 x2,
- uint16 y1,
- uint16 y2, mddi_llist_done_cb_type done_cb)
-{
-#ifdef FEATURE_MDDI_HITACHI
- if (mfd->panel.id == HITACHI)
- mddi_hitachi_window_adjust(x1, x2, y1, y2);
-#elif defined(FEATURE_MDDI_S6D0142)
- if (mfd->panel.id == MDDI_LCD_S6D0142)
- mddi_s6d0142_window_adjust(x1, x2, y1, y2, done_cb);
-#else
- /* Do nothing then... except avoid lint/compiler warnings */
- (void)x1;
- (void)x2;
- (void)y1;
- (void)y2;
- (void)done_cb;
-#endif
-}
-
-void mddi_window_adjust(struct msm_fb_data_type *mfd,
- uint16 x1, uint16 x2, uint16 y1, uint16 y2)
-{
- mddi_window_adjust_ext(mfd, x1, x2, y1, y2, NULL);
-}
diff --git a/drivers/staging/msm/mddihost.h b/drivers/staging/msm/mddihost.h
deleted file mode 100644
index 8f532d05f83..00000000000
--- a/drivers/staging/msm/mddihost.h
+++ /dev/null
@@ -1,207 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef MDDIHOST_H
-#define MDDIHOST_H
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include "linux/proc_fs.h"
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/clk.h>
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-
-#include "msm_fb_panel.h"
-
-#undef FEATURE_MDDI_MC4
-#undef FEATURE_MDDI_S6D0142
-#undef FEATURE_MDDI_HITACHI
-#define FEATURE_MDDI_SHARP
-#define FEATURE_MDDI_TOSHIBA
-#undef FEATURE_MDDI_E751
-#define FEATURE_MDDI_CORONA
-#define FEATURE_MDDI_PRISM
-
-#define T_MSM7500
-
-typedef enum {
- format_16bpp,
- format_18bpp,
- format_24bpp
-} mddi_video_format;
-
-typedef enum {
- MDDI_LCD_NONE = 0,
- MDDI_LCD_MC4,
- MDDI_LCD_S6D0142,
- MDDI_LCD_SHARP,
- MDDI_LCD_E751,
- MDDI_LCD_CORONA,
- MDDI_LCD_HITACHI,
- MDDI_LCD_TOSHIBA,
- MDDI_LCD_PRISM,
- MDDI_LCD_TP2,
- MDDI_NUM_LCD_TYPES,
- MDDI_LCD_DEFAULT = MDDI_LCD_TOSHIBA
-} mddi_lcd_type;
-
-typedef enum {
- MDDI_HOST_PRIM = 0,
- MDDI_HOST_EXT,
- MDDI_NUM_HOST_CORES
-} mddi_host_type;
-
-typedef enum {
- MDDI_DRIVER_RESET, /* host core registers have not been written. */
- MDDI_DRIVER_DISABLED, /* registers written, interrupts disabled. */
- MDDI_DRIVER_ENABLED /* registers written, interrupts enabled. */
-} mddi_host_driver_state_type;
-
-typedef enum {
- MDDI_GPIO_INT_0 = 0,
- MDDI_GPIO_INT_1,
- MDDI_GPIO_INT_2,
- MDDI_GPIO_INT_3,
- MDDI_GPIO_INT_4,
- MDDI_GPIO_INT_5,
- MDDI_GPIO_INT_6,
- MDDI_GPIO_INT_7,
- MDDI_GPIO_INT_8,
- MDDI_GPIO_INT_9,
- MDDI_GPIO_INT_10,
- MDDI_GPIO_INT_11,
- MDDI_GPIO_INT_12,
- MDDI_GPIO_INT_13,
- MDDI_GPIO_INT_14,
- MDDI_GPIO_INT_15,
- MDDI_GPIO_NUM_INTS
-} mddi_gpio_int_type;
-
-enum mddi_data_packet_size_type {
- MDDI_DATA_PACKET_4_BYTES = 4,
- MDDI_DATA_PACKET_8_BYTES = 8,
- MDDI_DATA_PACKET_12_BYTES = 12,
- MDDI_DATA_PACKET_16_BYTES = 16,
- MDDI_DATA_PACKET_24_BYTES = 24
-};
-
-typedef struct {
- uint32 addr;
- uint32 value;
-} mddi_reg_write_type;
-
-boolean mddi_vsync_set_handler(msm_fb_vsync_handler_type handler, void *arg);
-
-typedef void (*mddi_llist_done_cb_type) (void);
-
-typedef void (*mddi_rev_handler_type) (void *);
-
-boolean mddi_set_rev_handler(mddi_rev_handler_type handler, uint16 pkt_type);
-
-#define MDDI_DEFAULT_PRIM_PIX_ATTR 0xC3
-#define MDDI_DEFAULT_SECD_PIX_ATTR 0xC0
-
-typedef int gpio_int_polarity_type;
-typedef int gpio_int_handler_type;
-
-typedef struct {
- void (*vsync_detected) (boolean);
-} mddi_lcd_func_type;
-
-extern mddi_lcd_func_type mddi_lcd;
-void mddi_init(void);
-
-void mddi_powerdown(void);
-
-void mddi_host_start_ext_display(void);
-void mddi_host_stop_ext_display(void);
-
-extern spinlock_t mddi_host_spin_lock;
-#ifdef T_MSM7500
-void mddi_reset(void);
-#ifdef FEATURE_DUAL_PROC_MODEM_DISPLAY
-void mddi_host_switch_proc_control(boolean on);
-#endif
-#endif
-void mddi_host_exit_power_collapse(void);
-
-void mddi_queue_splash_screen
- (void *buf_ptr,
- boolean clear_area,
- int16 src_width,
- int16 src_starting_row,
- int16 src_starting_column,
- int16 num_of_rows,
- int16 num_of_columns, int16 dst_starting_row, int16 dst_starting_column);
-
-void mddi_queue_image
- (void *buf_ptr,
- uint8 stereo_video,
- boolean clear_area,
- int16 src_width,
- int16 src_starting_row,
- int16 src_starting_column,
- int16 num_of_rows,
- int16 num_of_columns, int16 dst_starting_row, int16 dst_starting_column);
-
-int mddi_host_register_read
- (uint32 reg_addr,
- uint32 *reg_value_ptr, boolean wait, mddi_host_type host_idx);
-int mddi_host_register_write
- (uint32 reg_addr, uint32 reg_val,
- enum mddi_data_packet_size_type packet_size,
- boolean wait, mddi_llist_done_cb_type done_cb, mddi_host_type host);
-boolean mddi_host_register_write_int
- (uint32 reg_addr,
- uint32 reg_val, mddi_llist_done_cb_type done_cb, mddi_host_type host);
-boolean mddi_host_register_read_int
- (uint32 reg_addr, uint32 *reg_value_ptr, mddi_host_type host_idx);
-void mddi_queue_register_write_static
- (uint32 reg_addr,
- uint32 reg_val, boolean wait, mddi_llist_done_cb_type done_cb);
-void mddi_queue_static_window_adjust
- (const mddi_reg_write_type *reg_write,
- uint16 num_writes, mddi_llist_done_cb_type done_cb);
-
-#define mddi_queue_register_read(reg, val_ptr, wait, sig) \
- mddi_host_register_read(reg, val_ptr, wait, MDDI_HOST_PRIM)
-#define mddi_queue_register_write(reg, val, wait, sig) \
- mddi_host_register_write(reg, val, MDDI_DATA_PACKET_4_BYTES,\
- wait, NULL, MDDI_HOST_PRIM)
-#define mddi_queue_register_write_extn(reg, val, pkt_size, wait, sig) \
- mddi_host_register_write(reg, val, pkt_size, \
- wait, NULL, MDDI_HOST_PRIM)
-#define mddi_queue_register_write_int(reg, val) \
- mddi_host_register_write_int(reg, val, NULL, MDDI_HOST_PRIM)
-#define mddi_queue_register_read_int(reg, val_ptr) \
- mddi_host_register_read_int(reg, val_ptr, MDDI_HOST_PRIM)
-#define mddi_queue_register_writes(reg_ptr, val, wait, sig) \
- mddi_host_register_writes(reg_ptr, val, wait, sig, MDDI_HOST_PRIM)
-
-void mddi_wait(uint16 time_ms);
-void mddi_assign_max_pkt_dimensions(uint16 image_cols,
- uint16 image_rows,
- uint16 bpp,
- uint16 *max_cols, uint16 * max_rows);
-uint16 mddi_assign_pkt_height(uint16 pkt_width, uint16 pkt_height, uint16 bpp);
-void mddi_queue_reverse_encapsulation(boolean wait);
-void mddi_disable(int lock);
-#endif /* MDDIHOST_H */
diff --git a/drivers/staging/msm/mddihost_e.c b/drivers/staging/msm/mddihost_e.c
deleted file mode 100644
index 7de5eda71ce..00000000000
--- a/drivers/staging/msm/mddihost_e.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-
-#include "msm_fb.h"
-#include "mddihost.h"
-#include "mddihosti.h"
-
-#include <linux/clk.h>
-#include <mach/clk.h>
-
-extern struct semaphore mddi_host_mutex;
-static boolean mddi_host_ext_powered = FALSE;
-
-void mddi_host_start_ext_display(void)
-{
- down(&mddi_host_mutex);
-
- if (!mddi_host_ext_powered) {
- mddi_host_init(MDDI_HOST_EXT);
-
- mddi_host_ext_powered = TRUE;
- }
-
- up(&mddi_host_mutex);
-}
-
-void mddi_host_stop_ext_display(void)
-{
- down(&mddi_host_mutex);
-
- if (mddi_host_ext_powered) {
- mddi_host_powerdown(MDDI_HOST_EXT);
-
- mddi_host_ext_powered = FALSE;
- }
-
- up(&mddi_host_mutex);
-}
diff --git a/drivers/staging/msm/mddihosti.c b/drivers/staging/msm/mddihosti.c
deleted file mode 100644
index f9d6e91e8d5..00000000000
--- a/drivers/staging/msm/mddihosti.c
+++ /dev/null
@@ -1,2239 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-
-#include "msm_fb_panel.h"
-#include "mddihost.h"
-#include "mddihosti.h"
-
-#define FEATURE_MDDI_UNDERRUN_RECOVERY
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
-static void mddi_read_rev_packet(byte *data_ptr);
-#endif
-
-struct timer_list mddi_host_timer;
-
-#define MDDI_DEFAULT_TIMER_LENGTH 5000 /* 5 seconds */
-uint32 mddi_rtd_frequency = 60000; /* send RTD every 60 seconds */
-uint32 mddi_client_status_frequency = 60000; /* get status pkt every 60 secs */
-
-boolean mddi_vsync_detect_enabled = FALSE;
-mddi_gpio_info_type mddi_gpio;
-
-uint32 mddi_host_core_version;
-boolean mddi_debug_log_statistics = FALSE;
-/* #define FEATURE_MDDI_HOST_ENABLE_EARLY_HIBERNATION */
-/* default to TRUE in case MDP does not vote */
-static boolean mddi_host_mdp_active_flag = TRUE;
-static uint32 mddi_log_stats_counter;
-uint32 mddi_log_stats_frequency = 4000;
-
-#define MDDI_DEFAULT_REV_PKT_SIZE 0x20
-
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
-static boolean mddi_rev_ptr_workaround = TRUE;
-static uint32 mddi_reg_read_retry;
-static uint32 mddi_reg_read_retry_max = 20;
-static boolean mddi_enable_reg_read_retry = TRUE;
-static boolean mddi_enable_reg_read_retry_once = FALSE;
-
-#define MDDI_MAX_REV_PKT_SIZE 0x60
-
-#define MDDI_CLIENT_CAPABILITY_REV_PKT_SIZE 0x60
-
-#define MDDI_VIDEO_REV_PKT_SIZE 0x40
-#define MDDI_REV_BUFFER_SIZE MDDI_MAX_REV_PKT_SIZE
-static byte rev_packet_data[MDDI_MAX_REV_PKT_SIZE];
-#endif /* FEATURE_MDDI_DISABLE_REVERSE */
-/* leave these variables so graphics will compile */
-
-#define MDDI_MAX_REV_DATA_SIZE 128
-/*lint -d__align(x) */
-boolean mddi_debug_clear_rev_data = TRUE;
-
-uint32 *mddi_reg_read_value_ptr;
-
-mddi_client_capability_type mddi_client_capability_pkt;
-static boolean mddi_client_capability_request = FALSE;
-
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
-
-#define MAX_MDDI_REV_HANDLERS 2
-#define INVALID_PKT_TYPE 0xFFFF
-
-typedef struct {
- mddi_rev_handler_type handler; /* ISR to be executed */
- uint16 pkt_type;
-} mddi_rev_pkt_handler_type;
-static mddi_rev_pkt_handler_type mddi_rev_pkt_handler[MAX_MDDI_REV_HANDLERS] =
- { {NULL, INVALID_PKT_TYPE}, {NULL, INVALID_PKT_TYPE} };
-
-static boolean mddi_rev_encap_user_request = FALSE;
-static mddi_linked_list_notify_type mddi_rev_user;
-
-spinlock_t mddi_host_spin_lock;
-extern uint32 mdp_in_processing;
-#endif
-
-typedef enum {
- MDDI_REV_IDLE
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- , MDDI_REV_REG_READ_ISSUED,
- MDDI_REV_REG_READ_SENT,
- MDDI_REV_ENCAP_ISSUED,
- MDDI_REV_STATUS_REQ_ISSUED,
- MDDI_REV_CLIENT_CAP_ISSUED
-#endif
-} mddi_rev_link_state_type;
-
-typedef enum {
- MDDI_LINK_DISABLED,
- MDDI_LINK_HIBERNATING,
- MDDI_LINK_ACTIVATING,
- MDDI_LINK_ACTIVE
-} mddi_host_link_state_type;
-
-typedef struct {
- uint32 count;
- uint32 in_count;
- uint32 disp_req_count;
- uint32 state_change_count;
- uint32 ll_done_count;
- uint32 rev_avail_count;
- uint32 error_count;
- uint32 rev_encap_count;
- uint32 llist_ptr_write_1;
- uint32 llist_ptr_write_2;
-} mddi_host_int_type;
-
-typedef struct {
- uint32 fwd_crc_count;
- uint32 rev_crc_count;
- uint32 pri_underflow;
- uint32 sec_underflow;
- uint32 rev_overflow;
- uint32 pri_overwrite;
- uint32 sec_overwrite;
- uint32 rev_overwrite;
- uint32 dma_failure;
- uint32 rtd_failure;
- uint32 reg_read_failure;
-#ifdef FEATURE_MDDI_UNDERRUN_RECOVERY
- uint32 pri_underrun_detected;
-#endif
-} mddi_host_stat_type;
-
-typedef struct {
- uint32 rtd_cnt;
- uint32 rev_enc_cnt;
- uint32 vid_cnt;
- uint32 reg_acc_cnt;
- uint32 cli_stat_cnt;
- uint32 cli_cap_cnt;
- uint32 reg_read_cnt;
- uint32 link_active_cnt;
- uint32 link_hibernate_cnt;
- uint32 vsync_response_cnt;
- uint32 fwd_crc_cnt;
- uint32 rev_crc_cnt;
-} mddi_log_params_struct_type;
-
-typedef struct {
- uint32 rtd_value;
- uint32 rtd_counter;
- uint32 client_status_cnt;
- boolean rev_ptr_written;
- uint8 *rev_ptr_start;
- uint8 *rev_ptr_curr;
- uint32 mddi_rev_ptr_write_val;
- dma_addr_t rev_data_dma_addr;
- uint16 rev_pkt_size;
- mddi_rev_link_state_type rev_state;
- mddi_host_link_state_type link_state;
- mddi_host_driver_state_type driver_state;
- boolean disable_hibernation;
- uint32 saved_int_reg;
- uint32 saved_int_en;
- mddi_linked_list_type *llist_ptr;
- dma_addr_t llist_dma_addr;
- mddi_linked_list_type *llist_dma_ptr;
- uint32 *rev_data_buf;
- struct completion mddi_llist_avail_comp;
- boolean mddi_waiting_for_llist_avail;
- mddi_host_int_type int_type;
- mddi_host_stat_type stats;
- mddi_log_params_struct_type log_parms;
- mddi_llist_info_type llist_info;
- mddi_linked_list_notify_type llist_notify[MDDI_MAX_NUM_LLIST_ITEMS];
-} mddi_host_cntl_type;
-
-static mddi_host_type mddi_curr_host = MDDI_HOST_PRIM;
-static mddi_host_cntl_type mhctl[MDDI_NUM_HOST_CORES];
-mddi_linked_list_type *llist_extern[MDDI_NUM_HOST_CORES];
-mddi_linked_list_type *llist_dma_extern[MDDI_NUM_HOST_CORES];
-mddi_linked_list_notify_type *llist_extern_notify[MDDI_NUM_HOST_CORES];
-static mddi_log_params_struct_type prev_parms[MDDI_NUM_HOST_CORES];
-
-extern uint32 mdp_total_vdopkts;
-
-static boolean mddi_host_io_clock_on = FALSE;
-static boolean mddi_host_hclk_on = FALSE;
-
-int int_mddi_pri_flag = FALSE;
-int int_mddi_ext_flag = FALSE;
-
-static void mddi_report_errors(uint32 int_reg)
-{
- mddi_host_type host_idx = mddi_curr_host;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- if (int_reg & MDDI_INT_PRI_UNDERFLOW) {
- pmhctl->stats.pri_underflow++;
- MDDI_MSG_ERR("!!! MDDI Primary Underflow !!!\n");
- }
- if (int_reg & MDDI_INT_SEC_UNDERFLOW) {
- pmhctl->stats.sec_underflow++;
- MDDI_MSG_ERR("!!! MDDI Secondary Underflow !!!\n");
- }
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- if (int_reg & MDDI_INT_REV_OVERFLOW) {
- pmhctl->stats.rev_overflow++;
- MDDI_MSG_ERR("!!! MDDI Reverse Overflow !!!\n");
- pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
- mddi_host_reg_out(REV_PTR, pmhctl->mddi_rev_ptr_write_val);
-
- }
- if (int_reg & MDDI_INT_CRC_ERROR)
- MDDI_MSG_ERR("!!! MDDI Reverse CRC Error !!!\n");
-#endif
- if (int_reg & MDDI_INT_PRI_OVERWRITE) {
- pmhctl->stats.pri_overwrite++;
- MDDI_MSG_ERR("!!! MDDI Primary Overwrite !!!\n");
- }
- if (int_reg & MDDI_INT_SEC_OVERWRITE) {
- pmhctl->stats.sec_overwrite++;
- MDDI_MSG_ERR("!!! MDDI Secondary Overwrite !!!\n");
- }
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- if (int_reg & MDDI_INT_REV_OVERWRITE) {
- pmhctl->stats.rev_overwrite++;
- /* This will show up normally and is not a problem */
- MDDI_MSG_DEBUG("MDDI Reverse Overwrite!\n");
- }
- if (int_reg & MDDI_INT_RTD_FAILURE) {
- mddi_host_reg_outm(INTEN, MDDI_INT_RTD_FAILURE, 0);
- pmhctl->stats.rtd_failure++;
- MDDI_MSG_ERR("!!! MDDI RTD Failure !!!\n");
- }
-#endif
- if (int_reg & MDDI_INT_DMA_FAILURE) {
- pmhctl->stats.dma_failure++;
- MDDI_MSG_ERR("!!! MDDI DMA Abort !!!\n");
- }
-}
-
-static void mddi_host_enable_io_clock(void)
-{
- if (!MDDI_HOST_IS_IO_CLOCK_ON)
- MDDI_HOST_ENABLE_IO_CLOCK;
-}
-
-static void mddi_host_enable_hclk(void)
-{
-
- if (!MDDI_HOST_IS_HCLK_ON)
- MDDI_HOST_ENABLE_HCLK;
-}
-
-static void mddi_host_disable_io_clock(void)
-{
-#ifndef FEATURE_MDDI_HOST_IO_CLOCK_CONTROL_DISABLE
- if (MDDI_HOST_IS_IO_CLOCK_ON)
- MDDI_HOST_DISABLE_IO_CLOCK;
-#endif
-}
-
-static void mddi_host_disable_hclk(void)
-{
-#ifndef FEATURE_MDDI_HOST_HCLK_CONTROL_DISABLE
- if (MDDI_HOST_IS_HCLK_ON)
- MDDI_HOST_DISABLE_HCLK;
-#endif
-}
-
-static void mddi_vote_to_sleep(mddi_host_type host_idx, boolean sleep)
-{
- uint16 vote_mask;
-
- if (host_idx == MDDI_HOST_PRIM)
- vote_mask = 0x01;
- else
- vote_mask = 0x02;
-}
-
-static void mddi_report_state_change(uint32 int_reg)
-{
- mddi_host_type host_idx = mddi_curr_host;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- if ((pmhctl->saved_int_reg & MDDI_INT_IN_HIBERNATION) &&
- (pmhctl->saved_int_reg & MDDI_INT_LINK_ACTIVE)) {
- /* recover from condition where the io_clock was turned off by the
- clock driver during a transition to hibernation. The io_clock
- disable is to prevent MDP/MDDI underruns when changing ARM
- clock speeds. In the process of halting the ARM, the hclk
- divider needs to be set to 1. When it is set to 1, there is
- a small time (usecs) when hclk is off or slow, and this can
- cause an underrun. To prevent the underrun, clock driver turns
- off the MDDI io_clock before making the change. */
- mddi_host_reg_out(CMD, MDDI_CMD_POWERUP);
- }
-
- if (int_reg & MDDI_INT_LINK_ACTIVE) {
- pmhctl->link_state = MDDI_LINK_ACTIVE;
- pmhctl->log_parms.link_active_cnt++;
- pmhctl->rtd_value = mddi_host_reg_in(RTD_VAL);
- MDDI_MSG_DEBUG("!!! MDDI Active RTD:0x%x!!!\n",
- pmhctl->rtd_value);
- /* now interrupt on hibernation */
- mddi_host_reg_outm(INTEN,
- (MDDI_INT_IN_HIBERNATION |
- MDDI_INT_LINK_ACTIVE),
- MDDI_INT_IN_HIBERNATION);
-
-#ifdef DEBUG_MDDIHOSTI
- /* if gpio interrupt is enabled, start polling at fastest
- * registered rate
- */
- if (mddi_gpio.polling_enabled) {
- timer_reg(&mddi_gpio_poll_timer,
- mddi_gpio_poll_timer_cb, 0, mddi_gpio.polling_interval, 0);
- }
-#endif
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- if (mddi_rev_ptr_workaround) {
- /* HW CR: need to reset reverse register stuff */
- pmhctl->rev_ptr_written = FALSE;
- pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
- }
-#endif
- /* vote on sleep */
- mddi_vote_to_sleep(host_idx, FALSE);
-
- if (host_idx == MDDI_HOST_PRIM) {
- if (mddi_vsync_detect_enabled) {
- /*
- * Indicate to client specific code that vsync
- * was enabled, but we did not detect a client
- * intiated wakeup. The client specific
- * handler can either reassert vsync detection,
- * or treat this as a valid vsync.
- */
- mddi_client_lcd_vsync_detected(FALSE);
- pmhctl->log_parms.vsync_response_cnt++;
- }
- }
- }
- if (int_reg & MDDI_INT_IN_HIBERNATION) {
- pmhctl->link_state = MDDI_LINK_HIBERNATING;
- pmhctl->log_parms.link_hibernate_cnt++;
- MDDI_MSG_DEBUG("!!! MDDI Hibernating !!!\n");
- /* now interrupt on link_active */
-#ifdef FEATURE_MDDI_DISABLE_REVERSE
- mddi_host_reg_outm(INTEN,
- (MDDI_INT_MDDI_IN |
- MDDI_INT_IN_HIBERNATION |
- MDDI_INT_LINK_ACTIVE),
- MDDI_INT_LINK_ACTIVE);
-#else
- mddi_host_reg_outm(INTEN,
- (MDDI_INT_MDDI_IN |
- MDDI_INT_IN_HIBERNATION |
- MDDI_INT_LINK_ACTIVE),
- (MDDI_INT_MDDI_IN | MDDI_INT_LINK_ACTIVE));
-
- pmhctl->rtd_counter = mddi_rtd_frequency;
-
- if (pmhctl->rev_state != MDDI_REV_IDLE) {
- /* a rev_encap will not wake up the link, so we do that here */
- pmhctl->link_state = MDDI_LINK_ACTIVATING;
- mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
- }
-#endif
-
- if (pmhctl->disable_hibernation) {
- mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE);
- mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
- pmhctl->link_state = MDDI_LINK_ACTIVATING;
- }
-#ifdef FEATURE_MDDI_UNDERRUN_RECOVERY
- if ((pmhctl->llist_info.transmitting_start_idx !=
- UNASSIGNED_INDEX)
- &&
- ((pmhctl->
- saved_int_reg & (MDDI_INT_PRI_LINK_LIST_DONE |
- MDDI_INT_PRI_PTR_READ)) ==
- MDDI_INT_PRI_PTR_READ)) {
- mddi_linked_list_type *llist_dma;
- llist_dma = pmhctl->llist_dma_ptr;
- /*
- * All indications are that we have not received a
- * linked list done interrupt, due to an underrun
- * condition. Recovery attempt is to send again.
- */
- dma_coherent_pre_ops();
- /* Write to primary pointer register again */
- mddi_host_reg_out(PRI_PTR,
- &llist_dma[pmhctl->llist_info.
- transmitting_start_idx]);
- pmhctl->stats.pri_underrun_detected++;
- }
-#endif
-
- /* vote on sleep */
- if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
- mddi_vote_to_sleep(host_idx, TRUE);
- }
-
-#ifdef DEBUG_MDDIHOSTI
- /* need to stop polling timer */
- if (mddi_gpio.polling_enabled) {
- (void) timer_clr(&mddi_gpio_poll_timer, T_NONE);
- }
-#endif
- }
-}
-
-void mddi_host_timer_service(unsigned long data)
-{
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- unsigned long flags;
-#endif
- mddi_host_type host_idx;
- mddi_host_cntl_type *pmhctl;
-
- unsigned long time_ms = MDDI_DEFAULT_TIMER_LENGTH;
- init_timer(&mddi_host_timer);
- mddi_host_timer.function = mddi_host_timer_service;
- mddi_host_timer.data = 0;
-
- mddi_host_timer.expires = jiffies + ((time_ms * HZ) / 1000);
- add_timer(&mddi_host_timer);
-
- for (host_idx = MDDI_HOST_PRIM; host_idx < MDDI_NUM_HOST_CORES;
- host_idx++) {
- pmhctl = &(mhctl[host_idx]);
- mddi_log_stats_counter += (uint32) time_ms;
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- pmhctl->rtd_counter += (uint32) time_ms;
- pmhctl->client_status_cnt += (uint32) time_ms;
-
- if (host_idx == MDDI_HOST_PRIM) {
- if (pmhctl->client_status_cnt >=
- mddi_client_status_frequency) {
- if ((pmhctl->link_state ==
- MDDI_LINK_HIBERNATING)
- && (pmhctl->client_status_cnt >
- mddi_client_status_frequency)) {
- /*
- * special case where we are hibernating
- * and mddi_host_isr is not firing, so
- * kick the link so that the status can
- * be retrieved
- */
-
- /* need to wake up link before issuing
- * rev encap command
- */
- MDDI_MSG_INFO("wake up link!\n");
- spin_lock_irqsave(&mddi_host_spin_lock,
- flags);
- mddi_host_enable_hclk();
- mddi_host_enable_io_clock();
- pmhctl->link_state =
- MDDI_LINK_ACTIVATING;
- mddi_host_reg_out(CMD,
- MDDI_CMD_LINK_ACTIVE);
- spin_unlock_irqrestore
- (&mddi_host_spin_lock, flags);
- } else
- if ((pmhctl->link_state == MDDI_LINK_ACTIVE)
- && pmhctl->disable_hibernation) {
- /*
- * special case where we have disabled
- * hibernation and mddi_host_isr
- * is not firing, so enable interrupt
- * for no pkts pending, which will
- * generate an interrupt
- */
- MDDI_MSG_INFO("kick isr!\n");
- spin_lock_irqsave(&mddi_host_spin_lock,
- flags);
- mddi_host_enable_hclk();
- mddi_host_reg_outm(INTEN,
- MDDI_INT_NO_CMD_PKTS_PEND,
- MDDI_INT_NO_CMD_PKTS_PEND);
- spin_unlock_irqrestore
- (&mddi_host_spin_lock, flags);
- }
- }
- }
-#endif /* #ifndef FEATURE_MDDI_DISABLE_REVERSE */
- }
-
- /* Check if logging is turned on */
- for (host_idx = MDDI_HOST_PRIM; host_idx < MDDI_NUM_HOST_CORES;
- host_idx++) {
- mddi_log_params_struct_type *prev_ptr = &(prev_parms[host_idx]);
- pmhctl = &(mhctl[host_idx]);
-
- if (mddi_debug_log_statistics) {
-
- /* get video pkt count from MDP, since MDDI sw cannot know this */
- pmhctl->log_parms.vid_cnt = mdp_total_vdopkts;
-
- if (mddi_log_stats_counter >= mddi_log_stats_frequency) {
- /* mddi_log_stats_counter = 0; */
- if (mddi_debug_log_statistics) {
- MDDI_MSG_NOTICE
- ("MDDI Statistics since last report:\n");
- MDDI_MSG_NOTICE(" Packets sent:\n");
- MDDI_MSG_NOTICE
- (" %d RTD packet(s)\n",
- pmhctl->log_parms.rtd_cnt -
- prev_ptr->rtd_cnt);
- if (prev_ptr->rtd_cnt !=
- pmhctl->log_parms.rtd_cnt) {
- unsigned long flags;
- spin_lock_irqsave
- (&mddi_host_spin_lock,
- flags);
- mddi_host_enable_hclk();
- pmhctl->rtd_value =
- mddi_host_reg_in(RTD_VAL);
- spin_unlock_irqrestore
- (&mddi_host_spin_lock,
- flags);
- MDDI_MSG_NOTICE
- (" RTD value=%d\n",
- pmhctl->rtd_value);
- }
- MDDI_MSG_NOTICE
- (" %d VIDEO packets\n",
- pmhctl->log_parms.vid_cnt -
- prev_ptr->vid_cnt);
- MDDI_MSG_NOTICE
- (" %d Register Access packets\n",
- pmhctl->log_parms.reg_acc_cnt -
- prev_ptr->reg_acc_cnt);
- MDDI_MSG_NOTICE
- (" %d Reverse Encapsulation packet(s)\n",
- pmhctl->log_parms.rev_enc_cnt -
- prev_ptr->rev_enc_cnt);
- if (prev_ptr->rev_enc_cnt !=
- pmhctl->log_parms.rev_enc_cnt) {
- /* report # of reverse CRC errors */
- MDDI_MSG_NOTICE
- (" %d reverse CRC errors detected\n",
- pmhctl->log_parms.
- rev_crc_cnt -
- prev_ptr->rev_crc_cnt);
- }
- MDDI_MSG_NOTICE
- (" Packets received:\n");
- MDDI_MSG_NOTICE
- (" %d Client Status packets",
- pmhctl->log_parms.cli_stat_cnt -
- prev_ptr->cli_stat_cnt);
- if (prev_ptr->cli_stat_cnt !=
- pmhctl->log_parms.cli_stat_cnt) {
- MDDI_MSG_NOTICE
- (" %d forward CRC errors reported\n",
- pmhctl->log_parms.
- fwd_crc_cnt -
- prev_ptr->fwd_crc_cnt);
- }
- MDDI_MSG_NOTICE
- (" %d Register Access Read packets\n",
- pmhctl->log_parms.reg_read_cnt -
- prev_ptr->reg_read_cnt);
-
- if (pmhctl->link_state ==
- MDDI_LINK_ACTIVE) {
- MDDI_MSG_NOTICE
- (" Current Link Status: Active\n");
- } else
- if ((pmhctl->link_state ==
- MDDI_LINK_HIBERNATING)
- || (pmhctl->link_state ==
- MDDI_LINK_ACTIVATING)) {
- MDDI_MSG_NOTICE
- (" Current Link Status: Hibernation\n");
- } else {
- MDDI_MSG_NOTICE
- (" Current Link Status: Inactive\n");
- }
- MDDI_MSG_NOTICE
- (" Active state entered %d times\n",
- pmhctl->log_parms.link_active_cnt -
- prev_ptr->link_active_cnt);
- MDDI_MSG_NOTICE
- (" Hibernation state entered %d times\n",
- pmhctl->log_parms.
- link_hibernate_cnt -
- prev_ptr->link_hibernate_cnt);
- }
- }
- prev_parms[host_idx] = pmhctl->log_parms;
- }
- }
- if (mddi_log_stats_counter >= mddi_log_stats_frequency)
- mddi_log_stats_counter = 0;
-
- return;
-} /* mddi_host_timer_cb */
-
-static void mddi_process_link_list_done(void)
-{
- mddi_host_type host_idx = mddi_curr_host;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- /* normal forward linked list packet(s) were sent */
- if (pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) {
- MDDI_MSG_ERR("**** getting LL done, but no list ****\n");
- } else {
- uint16 idx;
-
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- if (pmhctl->rev_state == MDDI_REV_REG_READ_ISSUED) {
- /* special case where a register read packet was sent */
- pmhctl->rev_state = MDDI_REV_REG_READ_SENT;
- if (pmhctl->llist_info.reg_read_idx == UNASSIGNED_INDEX) {
- MDDI_MSG_ERR
- ("**** getting LL done, but no list ****\n");
- }
- }
-#endif
- for (idx = pmhctl->llist_info.transmitting_start_idx;;) {
- uint16 next_idx = pmhctl->llist_notify[idx].next_idx;
- /* with reg read we don't release the waiting tcb until after
- * the reverse encapsulation has completed.
- */
- if (idx != pmhctl->llist_info.reg_read_idx) {
- /* notify task that may be waiting on this completion */
- if (pmhctl->llist_notify[idx].waiting) {
- complete(&
- (pmhctl->llist_notify[idx].
- done_comp));
- }
- if (pmhctl->llist_notify[idx].done_cb != NULL) {
- (*(pmhctl->llist_notify[idx].done_cb))
- ();
- }
-
- pmhctl->llist_notify[idx].in_use = FALSE;
- pmhctl->llist_notify[idx].waiting = FALSE;
- pmhctl->llist_notify[idx].done_cb = NULL;
- if (idx < MDDI_NUM_DYNAMIC_LLIST_ITEMS) {
- /* static LLIST items are configured only once */
- pmhctl->llist_notify[idx].next_idx =
- UNASSIGNED_INDEX;
- }
- /*
- * currently, all linked list packets are
- * register access, so we can increment the
- * counter for that packet type here.
- */
- pmhctl->log_parms.reg_acc_cnt++;
- }
- if (idx == pmhctl->llist_info.transmitting_end_idx)
- break;
- idx = next_idx;
- if (idx == UNASSIGNED_INDEX)
- MDDI_MSG_CRIT("MDDI linked list corruption!\n");
- }
-
- pmhctl->llist_info.transmitting_start_idx = UNASSIGNED_INDEX;
- pmhctl->llist_info.transmitting_end_idx = UNASSIGNED_INDEX;
-
- if (pmhctl->mddi_waiting_for_llist_avail) {
- if (!
- (pmhctl->
- llist_notify[pmhctl->llist_info.next_free_idx].
- in_use)) {
- pmhctl->mddi_waiting_for_llist_avail = FALSE;
- complete(&(pmhctl->mddi_llist_avail_comp));
- }
- }
- }
-
- /* Turn off MDDI_INT_PRI_LINK_LIST_DONE interrupt */
- mddi_host_reg_outm(INTEN, MDDI_INT_PRI_LINK_LIST_DONE, 0);
-
-}
-
-static void mddi_queue_forward_linked_list(void)
-{
- uint16 first_pkt_index;
- mddi_linked_list_type *llist_dma;
- mddi_host_type host_idx = mddi_curr_host;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
- llist_dma = pmhctl->llist_dma_ptr;
-
- first_pkt_index = UNASSIGNED_INDEX;
-
- if (pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) {
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- if (pmhctl->llist_info.reg_read_waiting) {
- if (pmhctl->rev_state == MDDI_REV_IDLE) {
- /*
- * we have a register read to send and
- * can send it now
- */
- pmhctl->rev_state = MDDI_REV_REG_READ_ISSUED;
- mddi_reg_read_retry = 0;
- first_pkt_index =
- pmhctl->llist_info.waiting_start_idx;
- pmhctl->llist_info.reg_read_waiting = FALSE;
- }
- } else
-#endif
- {
- /*
- * not register read to worry about, go ahead and write
- * anything that may be on the waiting list.
- */
- first_pkt_index = pmhctl->llist_info.waiting_start_idx;
- }
- }
-
- if (first_pkt_index != UNASSIGNED_INDEX) {
- pmhctl->llist_info.transmitting_start_idx =
- pmhctl->llist_info.waiting_start_idx;
- pmhctl->llist_info.transmitting_end_idx =
- pmhctl->llist_info.waiting_end_idx;
- pmhctl->llist_info.waiting_start_idx = UNASSIGNED_INDEX;
- pmhctl->llist_info.waiting_end_idx = UNASSIGNED_INDEX;
-
- /* write to the primary pointer register */
- MDDI_MSG_DEBUG("MDDI writing primary ptr with idx=%d\n",
- first_pkt_index);
-
- pmhctl->int_type.llist_ptr_write_2++;
-
- dma_coherent_pre_ops();
- mddi_host_reg_out(PRI_PTR, &llist_dma[first_pkt_index]);
-
- /* enable interrupt when complete */
- mddi_host_reg_outm(INTEN, MDDI_INT_PRI_LINK_LIST_DONE,
- MDDI_INT_PRI_LINK_LIST_DONE);
-
- }
-
-}
-
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
-static void mddi_read_rev_packet(byte *data_ptr)
-{
- uint16 i, length;
- mddi_host_type host_idx = mddi_curr_host;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- uint8 *rev_ptr_overflow =
- (pmhctl->rev_ptr_start + MDDI_REV_BUFFER_SIZE);
-
- /* first determine the length and handle invalid lengths */
- length = *pmhctl->rev_ptr_curr++;
- if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
- pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
- length |= ((*pmhctl->rev_ptr_curr++) << 8);
- if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
- pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
- if (length > (pmhctl->rev_pkt_size - 2)) {
- MDDI_MSG_ERR("Invalid rev pkt length %d\n", length);
- /* rev_pkt_size should always be <= rev_ptr_size so limit to packet size */
- length = pmhctl->rev_pkt_size - 2;
- }
-
- /* If the data pointer is NULL, just increment the pmhctl->rev_ptr_curr.
- * Loop around if necessary. Don't bother reading the data.
- */
- if (data_ptr == NULL) {
- pmhctl->rev_ptr_curr += length;
- if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
- pmhctl->rev_ptr_curr -= MDDI_REV_BUFFER_SIZE;
- return;
- }
-
- data_ptr[0] = length & 0x0ff;
- data_ptr[1] = length >> 8;
- data_ptr += 2;
- /* copy the data to data_ptr byte-at-a-time */
- for (i = 0; (i < length) && (pmhctl->rev_ptr_curr < rev_ptr_overflow);
- i++)
- *data_ptr++ = *pmhctl->rev_ptr_curr++;
- if (pmhctl->rev_ptr_curr >= rev_ptr_overflow)
- pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
- for (; (i < length) && (pmhctl->rev_ptr_curr < rev_ptr_overflow); i++)
- *data_ptr++ = *pmhctl->rev_ptr_curr++;
-}
-
-static void mddi_process_rev_packets(void)
-{
- uint32 rev_packet_count;
- word i;
- uint32 crc_errors;
- boolean mddi_reg_read_successful = FALSE;
- mddi_host_type host_idx = mddi_curr_host;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- pmhctl->log_parms.rev_enc_cnt++;
- if ((pmhctl->rev_state != MDDI_REV_ENCAP_ISSUED) &&
- (pmhctl->rev_state != MDDI_REV_STATUS_REQ_ISSUED) &&
- (pmhctl->rev_state != MDDI_REV_CLIENT_CAP_ISSUED)) {
- MDDI_MSG_ERR("Wrong state %d for reverse int\n",
- pmhctl->rev_state);
- }
- /* Turn off MDDI_INT_REV_AVAIL interrupt */
- mddi_host_reg_outm(INTEN, MDDI_INT_REV_DATA_AVAIL, 0);
-
- /* Clear rev data avail int */
- mddi_host_reg_out(INT, MDDI_INT_REV_DATA_AVAIL);
-
- /* Get Number of packets */
- rev_packet_count = mddi_host_reg_in(REV_PKT_CNT);
-
-#ifndef T_MSM7500
- /* Clear out rev packet counter */
- mddi_host_reg_out(REV_PKT_CNT, 0x0000);
-#endif
-
-#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
- if ((pmhctl->rev_state == MDDI_REV_CLIENT_CAP_ISSUED) &&
- (rev_packet_count > 0) &&
- (mddi_host_core_version == 0x28 ||
- mddi_host_core_version == 0x30)) {
-
- uint32 int_reg;
- uint32 max_count = 0;
-
- mddi_host_reg_out(REV_PTR, pmhctl->mddi_rev_ptr_write_val);
- int_reg = mddi_host_reg_in(INT);
- while ((int_reg & 0x100000) == 0) {
- udelay(3);
- int_reg = mddi_host_reg_in(INT);
- if (++max_count > 100)
- break;
- }
- }
-#endif
-
- /* Get CRC error count */
- crc_errors = mddi_host_reg_in(REV_CRC_ERR);
- if (crc_errors != 0) {
- pmhctl->log_parms.rev_crc_cnt += crc_errors;
- pmhctl->stats.rev_crc_count += crc_errors;
- MDDI_MSG_ERR("!!! MDDI %d Reverse CRC Error(s) !!!\n",
- crc_errors);
-#ifndef T_MSM7500
- /* Clear CRC error count */
- mddi_host_reg_out(REV_CRC_ERR, 0x0000);
-#endif
- /* also issue an RTD to attempt recovery */
- pmhctl->rtd_counter = mddi_rtd_frequency;
- }
-
- pmhctl->rtd_value = mddi_host_reg_in(RTD_VAL);
-
- MDDI_MSG_DEBUG("MDDI rev pkt cnt=%d, ptr=0x%x, RTD:0x%x\n",
- rev_packet_count,
- pmhctl->rev_ptr_curr - pmhctl->rev_ptr_start,
- pmhctl->rtd_value);
-
- if (rev_packet_count >= 1) {
- mddi_invalidate_cache_lines((uint32 *) pmhctl->rev_ptr_start,
- MDDI_REV_BUFFER_SIZE);
- }
- /* order the reads */
- dma_coherent_post_ops();
- for (i = 0; i < rev_packet_count; i++) {
- mddi_rev_packet_type *rev_pkt_ptr;
-
- mddi_read_rev_packet(rev_packet_data);
-
- rev_pkt_ptr = (mddi_rev_packet_type *) rev_packet_data;
-
- if (rev_pkt_ptr->packet_length > pmhctl->rev_pkt_size) {
- MDDI_MSG_ERR("!!!invalid packet size: %d\n",
- rev_pkt_ptr->packet_length);
- }
-
- MDDI_MSG_DEBUG("MDDI rev pkt 0x%x size 0x%x\n",
- rev_pkt_ptr->packet_type,
- rev_pkt_ptr->packet_length);
-
- /* Do whatever you want to do with the data based on the packet type */
- switch (rev_pkt_ptr->packet_type) {
- case 66: /* Client Capability */
- {
- mddi_client_capability_type
- *client_capability_pkt_ptr;
-
- client_capability_pkt_ptr =
- (mddi_client_capability_type *)
- rev_packet_data;
- MDDI_MSG_NOTICE
- ("Client Capability: Week=%d, Year=%d\n",
- client_capability_pkt_ptr->
- Week_of_Manufacture,
- client_capability_pkt_ptr->
- Year_of_Manufacture);
- memcpy((void *)&mddi_client_capability_pkt,
- (void *)rev_packet_data,
- sizeof(mddi_client_capability_type));
- pmhctl->log_parms.cli_cap_cnt++;
- }
- break;
-
- case 70: /* Display Status */
- {
- mddi_client_status_type *client_status_pkt_ptr;
-
- client_status_pkt_ptr =
- (mddi_client_status_type *) rev_packet_data;
- if ((client_status_pkt_ptr->crc_error_count !=
- 0)
- || (client_status_pkt_ptr->
- reverse_link_request != 0)) {
- MDDI_MSG_ERR
- ("Client Status: RevReq=%d, CrcErr=%d\n",
- client_status_pkt_ptr->
- reverse_link_request,
- client_status_pkt_ptr->
- crc_error_count);
- } else {
- MDDI_MSG_DEBUG
- ("Client Status: RevReq=%d, CrcErr=%d\n",
- client_status_pkt_ptr->
- reverse_link_request,
- client_status_pkt_ptr->
- crc_error_count);
- }
- pmhctl->log_parms.fwd_crc_cnt +=
- client_status_pkt_ptr->crc_error_count;
- pmhctl->stats.fwd_crc_count +=
- client_status_pkt_ptr->crc_error_count;
- pmhctl->log_parms.cli_stat_cnt++;
- }
- break;
-
- case 146: /* register access packet */
- {
- mddi_register_access_packet_type
- * regacc_pkt_ptr;
-
- regacc_pkt_ptr =
- (mddi_register_access_packet_type *)
- rev_packet_data;
-
- MDDI_MSG_DEBUG
- ("Reg Acc parse reg=0x%x, value=0x%x\n",
- regacc_pkt_ptr->register_address,
- regacc_pkt_ptr->register_data_list);
-
- /* Copy register value to location passed in */
- if (mddi_reg_read_value_ptr) {
-#if defined(T_MSM6280) && !defined(T_MSM7200)
- /* only least significant 16 bits are valid with 6280 */
- *mddi_reg_read_value_ptr =
- regacc_pkt_ptr->
- register_data_list & 0x0000ffff;
-#else
- *mddi_reg_read_value_ptr =
- regacc_pkt_ptr->register_data_list;
-#endif
- mddi_reg_read_successful = TRUE;
- mddi_reg_read_value_ptr = NULL;
- }
-
-#ifdef DEBUG_MDDIHOSTI
- if ((mddi_gpio.polling_enabled) &&
- (regacc_pkt_ptr->register_address ==
- mddi_gpio.polling_reg)) {
- /*
- * ToDo: need to call Linux GPIO call
- * here...
- */
- mddi_client_lcd_gpio_poll(
- regacc_pkt_ptr->register_data_list);
- }
-#endif
- pmhctl->log_parms.reg_read_cnt++;
- }
- break;
-
- default: /* any other packet */
- {
- uint16 hdlr;
-
- for (hdlr = 0; hdlr < MAX_MDDI_REV_HANDLERS;
- hdlr++) {
- if (mddi_rev_pkt_handler[hdlr].
- pkt_type ==
- rev_pkt_ptr->packet_type) {
- (*
- (mddi_rev_pkt_handler[hdlr].
- handler)) (rev_pkt_ptr);
- /* pmhctl->rev_state = MDDI_REV_IDLE; */
- break;
- }
- }
- if (hdlr >= MAX_MDDI_REV_HANDLERS)
- MDDI_MSG_ERR("MDDI unknown rev pkt\n");
- }
- break;
- }
- }
- if ((pmhctl->rev_ptr_curr + pmhctl->rev_pkt_size) >=
- (pmhctl->rev_ptr_start + MDDI_REV_BUFFER_SIZE)) {
- pmhctl->rev_ptr_written = FALSE;
- }
-
- if (pmhctl->rev_state == MDDI_REV_ENCAP_ISSUED) {
- pmhctl->rev_state = MDDI_REV_IDLE;
- if (mddi_rev_user.waiting) {
- mddi_rev_user.waiting = FALSE;
- complete(&(mddi_rev_user.done_comp));
- } else if (pmhctl->llist_info.reg_read_idx == UNASSIGNED_INDEX) {
- MDDI_MSG_ERR
- ("Reverse Encap state, but no reg read in progress\n");
- } else {
- if ((!mddi_reg_read_successful) &&
- (mddi_reg_read_retry < mddi_reg_read_retry_max) &&
- (mddi_enable_reg_read_retry)) {
- /*
- * There is a race condition that can happen
- * where the reverse encapsulation message is
- * sent out by the MDDI host before the register
- * read packet is sent. As a work-around for
- * that problem we issue the reverse
- * encapsulation one more time before giving up.
- */
- if (mddi_enable_reg_read_retry_once)
- mddi_reg_read_retry =
- mddi_reg_read_retry_max;
- pmhctl->rev_state = MDDI_REV_REG_READ_SENT;
- pmhctl->stats.reg_read_failure++;
- } else {
- uint16 reg_read_idx =
- pmhctl->llist_info.reg_read_idx;
-
- mddi_reg_read_retry = 0;
- if (pmhctl->llist_notify[reg_read_idx].waiting) {
- complete(&
- (pmhctl->
- llist_notify[reg_read_idx].
- done_comp));
- }
- pmhctl->llist_info.reg_read_idx =
- UNASSIGNED_INDEX;
- if (pmhctl->llist_notify[reg_read_idx].
- done_cb != NULL) {
- (*
- (pmhctl->llist_notify[reg_read_idx].
- done_cb)) ();
- }
- pmhctl->llist_notify[reg_read_idx].next_idx =
- UNASSIGNED_INDEX;
- pmhctl->llist_notify[reg_read_idx].in_use =
- FALSE;
- pmhctl->llist_notify[reg_read_idx].waiting =
- FALSE;
- pmhctl->llist_notify[reg_read_idx].done_cb =
- NULL;
- if (!mddi_reg_read_successful)
- pmhctl->stats.reg_read_failure++;
- }
- }
- } else if (pmhctl->rev_state == MDDI_REV_CLIENT_CAP_ISSUED) {
-#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
- if (mddi_host_core_version == 0x28 ||
- mddi_host_core_version == 0x30) {
- mddi_host_reg_out(FIFO_ALLOC, 0x00);
- pmhctl->rev_ptr_written = TRUE;
- mddi_host_reg_out(REV_PTR,
- pmhctl->mddi_rev_ptr_write_val);
- pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
- mddi_host_reg_out(CMD, 0xC00);
- }
-#endif
-
- if (mddi_rev_user.waiting) {
- mddi_rev_user.waiting = FALSE;
- complete(&(mddi_rev_user.done_comp));
- }
- pmhctl->rev_state = MDDI_REV_IDLE;
- } else {
- pmhctl->rev_state = MDDI_REV_IDLE;
- }
-
- /* pmhctl->rev_state = MDDI_REV_IDLE; */
-
- /* Re-enable interrupt */
- mddi_host_reg_outm(INTEN, MDDI_INT_REV_DATA_AVAIL,
- MDDI_INT_REV_DATA_AVAIL);
-
-}
-
-static void mddi_issue_reverse_encapsulation(void)
-{
- mddi_host_type host_idx = mddi_curr_host;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
- /* Only issue a reverse encapsulation packet if:
- * 1) another reverse is not in progress (MDDI_REV_IDLE).
- * 2) a register read has been sent (MDDI_REV_REG_READ_SENT).
- * 3) forward is not in progress, because of a hw bug in client that
- * causes forward crc errors on packet immediately after rev encap.
- */
- if (((pmhctl->rev_state == MDDI_REV_IDLE) ||
- (pmhctl->rev_state == MDDI_REV_REG_READ_SENT)) &&
- (pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) &&
- (!mdp_in_processing)) {
- uint32 mddi_command = MDDI_CMD_SEND_REV_ENCAP;
-
- if ((pmhctl->rev_state == MDDI_REV_REG_READ_SENT) ||
- (mddi_rev_encap_user_request == TRUE)) {
- mddi_host_enable_io_clock();
- if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
- /* need to wake up link before issuing rev encap command */
- MDDI_MSG_DEBUG("wake up link!\n");
- pmhctl->link_state = MDDI_LINK_ACTIVATING;
- mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
- } else {
- if (pmhctl->rtd_counter >= mddi_rtd_frequency) {
- MDDI_MSG_DEBUG
- ("mddi sending RTD command!\n");
- mddi_host_reg_out(CMD,
- MDDI_CMD_SEND_RTD);
- pmhctl->rtd_counter = 0;
- pmhctl->log_parms.rtd_cnt++;
- }
- if (pmhctl->rev_state != MDDI_REV_REG_READ_SENT) {
- /* this is generic reverse request by user, so
- * reset the waiting flag. */
- mddi_rev_encap_user_request = FALSE;
- }
- /* link is active so send reverse encap to get register read results */
- pmhctl->rev_state = MDDI_REV_ENCAP_ISSUED;
- mddi_command = MDDI_CMD_SEND_REV_ENCAP;
- MDDI_MSG_DEBUG("sending rev encap!\n");
- }
- } else
- if ((pmhctl->client_status_cnt >=
- mddi_client_status_frequency)
- || mddi_client_capability_request) {
- mddi_host_enable_io_clock();
- if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
- /* only wake up the link if it client status is overdue */
- if ((pmhctl->client_status_cnt >=
- (mddi_client_status_frequency * 2))
- || mddi_client_capability_request) {
- /* need to wake up link before issuing rev encap command */
- MDDI_MSG_DEBUG("wake up link!\n");
- pmhctl->link_state =
- MDDI_LINK_ACTIVATING;
- mddi_host_reg_out(CMD,
- MDDI_CMD_LINK_ACTIVE);
- }
- } else {
- if (pmhctl->rtd_counter >= mddi_rtd_frequency) {
- MDDI_MSG_DEBUG
- ("mddi sending RTD command!\n");
- mddi_host_reg_out(CMD,
- MDDI_CMD_SEND_RTD);
- pmhctl->rtd_counter = 0;
- pmhctl->log_parms.rtd_cnt++;
- }
- /* periodically get client status */
- MDDI_MSG_DEBUG
- ("mddi sending rev enc! (get status)\n");
- if (mddi_client_capability_request) {
- pmhctl->rev_state =
- MDDI_REV_CLIENT_CAP_ISSUED;
- mddi_command = MDDI_CMD_GET_CLIENT_CAP;
- mddi_client_capability_request = FALSE;
- } else {
- pmhctl->rev_state =
- MDDI_REV_STATUS_REQ_ISSUED;
- pmhctl->client_status_cnt = 0;
- mddi_command =
- MDDI_CMD_GET_CLIENT_STATUS;
- }
- }
- }
- if ((pmhctl->rev_state == MDDI_REV_ENCAP_ISSUED) ||
- (pmhctl->rev_state == MDDI_REV_STATUS_REQ_ISSUED) ||
- (pmhctl->rev_state == MDDI_REV_CLIENT_CAP_ISSUED)) {
- pmhctl->int_type.rev_encap_count++;
-#if defined(T_MSM6280) && !defined(T_MSM7200)
- mddi_rev_pointer_written = TRUE;
- mddi_host_reg_out(REV_PTR, mddi_rev_ptr_write_val);
- mddi_rev_ptr_curr = mddi_rev_ptr_start;
- /* force new rev ptr command */
- mddi_host_reg_out(CMD, 0xC00);
-#else
- if (!pmhctl->rev_ptr_written) {
- MDDI_MSG_DEBUG("writing reverse pointer!\n");
- pmhctl->rev_ptr_written = TRUE;
-#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
- if ((pmhctl->rev_state ==
- MDDI_REV_CLIENT_CAP_ISSUED) &&
- (mddi_host_core_version == 0x28 ||
- mddi_host_core_version == 0x30)) {
- pmhctl->rev_ptr_written = FALSE;
- mddi_host_reg_out(FIFO_ALLOC, 0x02);
- } else
- mddi_host_reg_out(REV_PTR,
- pmhctl->
- mddi_rev_ptr_write_val);
-#else
- mddi_host_reg_out(REV_PTR,
- pmhctl->
- mddi_rev_ptr_write_val);
-#endif
- }
-#endif
- if (mddi_debug_clear_rev_data) {
- uint16 i;
- for (i = 0; i < MDDI_MAX_REV_DATA_SIZE / 4; i++)
- pmhctl->rev_data_buf[i] = 0xdddddddd;
- /* clean cache */
- mddi_flush_cache_lines(pmhctl->rev_data_buf,
- MDDI_MAX_REV_DATA_SIZE);
- }
-
- /* send reverse encapsulation to get needed data */
- mddi_host_reg_out(CMD, mddi_command);
- }
- }
-
-}
-
-static void mddi_process_client_initiated_wakeup(void)
-{
- mddi_host_type host_idx = mddi_curr_host;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- /* Disable MDDI_INT Interrupt, we detect client initiated wakeup one
- * time for each entry into hibernation */
- mddi_host_reg_outm(INTEN, MDDI_INT_MDDI_IN, 0);
-
- if (host_idx == MDDI_HOST_PRIM) {
- if (mddi_vsync_detect_enabled) {
- mddi_host_enable_io_clock();
-#ifndef MDDI_HOST_DISP_LISTEN
- /* issue command to bring up link */
- /* need to do this to clear the vsync condition */
- if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
- pmhctl->link_state = MDDI_LINK_ACTIVATING;
- mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
- }
-#endif
- /*
- * Indicate to client specific code that vsync was
- * enabled, and we did not detect a client initiated
- * wakeup. The client specific handler can clear the
- * condition if necessary to prevent subsequent
- * client initiated wakeups.
- */
- mddi_client_lcd_vsync_detected(TRUE);
- pmhctl->log_parms.vsync_response_cnt++;
- MDDI_MSG_NOTICE("MDDI_INT_IN condition\n");
-
- }
- }
-
- if (mddi_gpio.polling_enabled) {
- mddi_host_enable_io_clock();
- /* check interrupt status now */
- (void)mddi_queue_register_read_int(mddi_gpio.polling_reg,
- &mddi_gpio.polling_val);
- }
-}
-#endif /* FEATURE_MDDI_DISABLE_REVERSE */
-
-static void mddi_host_isr(void)
-{
- uint32 int_reg, int_en;
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- uint32 status_reg;
-#endif
- mddi_host_type host_idx = mddi_curr_host;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- if (!MDDI_HOST_IS_HCLK_ON) {
- MDDI_HOST_ENABLE_HCLK;
- MDDI_MSG_DEBUG("HCLK disabled, but isr is firing\n");
- }
- int_reg = mddi_host_reg_in(INT);
- int_en = mddi_host_reg_in(INTEN);
- pmhctl->saved_int_reg = int_reg;
- pmhctl->saved_int_en = int_en;
- int_reg = int_reg & int_en;
- pmhctl->int_type.count++;
-
-
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- status_reg = mddi_host_reg_in(STAT);
-
- if ((int_reg & MDDI_INT_MDDI_IN) ||
- ((int_en & MDDI_INT_MDDI_IN) &&
- ((int_reg == 0) || (status_reg & MDDI_STAT_CLIENT_WAKEUP_REQ)))) {
- /*
- * The MDDI_IN condition will clear itself, and so it is
- * possible that MDDI_IN was the reason for the isr firing,
- * even though the interrupt register does not have the
- * MDDI_IN bit set. To check if this was the case we need to
- * look at the status register bit that signifies a client
- * initiated wakeup. If the status register bit is set, as well
- * as the MDDI_IN interrupt enabled, then we treat this as a
- * client initiated wakeup.
- */
- if (int_reg & MDDI_INT_MDDI_IN)
- pmhctl->int_type.in_count++;
- mddi_process_client_initiated_wakeup();
- }
-#endif
-
- if (int_reg & MDDI_INT_LINK_STATE_CHANGES) {
- pmhctl->int_type.state_change_count++;
- mddi_report_state_change(int_reg);
- }
-
- if (int_reg & MDDI_INT_PRI_LINK_LIST_DONE) {
- pmhctl->int_type.ll_done_count++;
- mddi_process_link_list_done();
- }
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- if (int_reg & MDDI_INT_REV_DATA_AVAIL) {
- pmhctl->int_type.rev_avail_count++;
- mddi_process_rev_packets();
- }
-#endif
-
- if (int_reg & MDDI_INT_ERROR_CONDITIONS) {
- pmhctl->int_type.error_count++;
- mddi_report_errors(int_reg);
-
- mddi_host_reg_out(INT, int_reg & MDDI_INT_ERROR_CONDITIONS);
- }
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- mddi_issue_reverse_encapsulation();
-
- if ((pmhctl->rev_state != MDDI_REV_ENCAP_ISSUED) &&
- (pmhctl->rev_state != MDDI_REV_STATUS_REQ_ISSUED))
-#endif
- /* don't want simultaneous reverse and forward with Eagle */
- mddi_queue_forward_linked_list();
-
- if (int_reg & MDDI_INT_NO_CMD_PKTS_PEND) {
- /* this interrupt is used to kick the isr when hibernation is disabled */
- mddi_host_reg_outm(INTEN, MDDI_INT_NO_CMD_PKTS_PEND, 0);
- }
-
- if ((!mddi_host_mdp_active_flag) &&
- (!mddi_vsync_detect_enabled) &&
- (pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) &&
- (pmhctl->llist_info.waiting_start_idx == UNASSIGNED_INDEX) &&
- (pmhctl->rev_state == MDDI_REV_IDLE)) {
- if (pmhctl->link_state == MDDI_LINK_HIBERNATING) {
- mddi_host_disable_io_clock();
- mddi_host_disable_hclk();
- }
-#ifdef FEATURE_MDDI_HOST_ENABLE_EARLY_HIBERNATION
- else if ((pmhctl->link_state == MDDI_LINK_ACTIVE) &&
- (!pmhctl->disable_hibernation)) {
- mddi_host_reg_out(CMD, MDDI_CMD_POWERDOWN);
- }
-#endif
- }
-}
-
-static void mddi_host_isr_primary(void)
-{
- mddi_curr_host = MDDI_HOST_PRIM;
- mddi_host_isr();
-}
-
-irqreturn_t mddi_pmdh_isr_proxy(int irq, void *ptr)
-{
- mddi_host_isr_primary();
- return IRQ_HANDLED;
-}
-
-static void mddi_host_isr_external(void)
-{
- mddi_curr_host = MDDI_HOST_EXT;
- mddi_host_isr();
- mddi_curr_host = MDDI_HOST_PRIM;
-}
-
-irqreturn_t mddi_emdh_isr_proxy(int irq, void *ptr)
-{
- mddi_host_isr_external();
- return IRQ_HANDLED;
-}
-
-static void mddi_host_initialize_registers(mddi_host_type host_idx)
-{
- uint32 pad_reg_val;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- if (pmhctl->driver_state == MDDI_DRIVER_ENABLED)
- return;
-
- /* turn on HCLK to MDDI host core */
- mddi_host_enable_hclk();
-
- /* MDDI Reset command */
- mddi_host_reg_out(CMD, MDDI_CMD_RESET);
-
- /* Version register (= 0x01) */
- mddi_host_reg_out(VERSION, 0x0001);
-
- /* Bytes per subframe register */
- mddi_host_reg_out(BPS, MDDI_HOST_BYTES_PER_SUBFRAME);
-
- /* Subframes per media frames register (= 0x03) */
- mddi_host_reg_out(SPM, 0x0003);
-
- /* Turn Around 1 register (= 0x05) */
- mddi_host_reg_out(TA1_LEN, 0x0005);
-
- /* Turn Around 2 register (= 0x0C) */
- mddi_host_reg_out(TA2_LEN, MDDI_HOST_TA2_LEN);
-
- /* Drive hi register (= 0x96) */
- mddi_host_reg_out(DRIVE_HI, 0x0096);
-
- /* Drive lo register (= 0x32) */
- mddi_host_reg_out(DRIVE_LO, 0x0032);
-
- /* Display wakeup count register (= 0x3c) */
- mddi_host_reg_out(DISP_WAKE, 0x003c);
-
- /* Reverse Rate Divisor register (= 0x2) */
- mddi_host_reg_out(REV_RATE_DIV, MDDI_HOST_REV_RATE_DIV);
-
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- /* Reverse Pointer Size */
- mddi_host_reg_out(REV_SIZE, MDDI_REV_BUFFER_SIZE);
-
- /* Rev Encap Size */
- mddi_host_reg_out(REV_ENCAP_SZ, pmhctl->rev_pkt_size);
-#endif
-
- /* Periodic Rev Encap */
- /* don't send periodically */
- mddi_host_reg_out(CMD, MDDI_CMD_PERIODIC_REV_ENCAP);
-
- pad_reg_val = mddi_host_reg_in(PAD_CTL);
- if (pad_reg_val == 0) {
- /* If we are turning on band gap, need to wait 5us before turning
- * on the rest of the PAD */
- mddi_host_reg_out(PAD_CTL, 0x08000);
- udelay(5);
- }
-#ifdef T_MSM7200
- /* Recommendation from PAD hw team */
- mddi_host_reg_out(PAD_CTL, 0xa850a);
-#else
- /* Recommendation from PAD hw team */
- mddi_host_reg_out(PAD_CTL, 0xa850f);
-#endif
-
-#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
- mddi_host_reg_out(PAD_IO_CTL, 0x00320000);
- mddi_host_reg_out(PAD_CAL, 0x00220020);
-#endif
-
- mddi_host_core_version = mddi_host_reg_inm(CORE_VER, 0xffff);
-
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- if (mddi_host_core_version >= 8)
- mddi_rev_ptr_workaround = FALSE;
- pmhctl->rev_ptr_curr = pmhctl->rev_ptr_start;
-#endif
-
- if ((mddi_host_core_version > 8) && (mddi_host_core_version < 0x19))
- mddi_host_reg_out(TEST, 0x2);
-
- /* Need an even number for counts */
- mddi_host_reg_out(DRIVER_START_CNT, 0x60006);
-
-#ifndef T_MSM7500
- /* Setup defaults for MDP related register */
- mddi_host_reg_out(MDP_VID_FMT_DES, 0x5666);
- mddi_host_reg_out(MDP_VID_PIX_ATTR, 0x00C3);
- mddi_host_reg_out(MDP_VID_CLIENTID, 0);
-#endif
-
- /* automatically hibernate after 1 empty subframe */
- if (pmhctl->disable_hibernation)
- mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE);
- else
- mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE | 1);
-
- /* Bring up link if display (client) requests it */
-#ifdef MDDI_HOST_DISP_LISTEN
- mddi_host_reg_out(CMD, MDDI_CMD_DISP_LISTEN);
-#else
- mddi_host_reg_out(CMD, MDDI_CMD_DISP_IGNORE);
-#endif
-
-}
-
-void mddi_host_configure_interrupts(mddi_host_type host_idx, boolean enable)
-{
- unsigned long flags;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- spin_lock_irqsave(&mddi_host_spin_lock, flags);
-
- /* turn on HCLK to MDDI host core if it has been disabled */
- mddi_host_enable_hclk();
- /* Clear MDDI Interrupt enable reg */
- mddi_host_reg_out(INTEN, 0);
-
- spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
-
- if (enable) {
- pmhctl->driver_state = MDDI_DRIVER_ENABLED;
-
- if (host_idx == MDDI_HOST_PRIM) {
- if (request_irq
- (INT_MDDI_PRI, mddi_pmdh_isr_proxy, IRQF_DISABLED,
- "PMDH", 0) != 0)
- printk(KERN_ERR
- "a mddi: unable to request_irq\n");
- else
- int_mddi_pri_flag = TRUE;
- } else {
- if (request_irq
- (INT_MDDI_EXT, mddi_emdh_isr_proxy, IRQF_DISABLED,
- "EMDH", 0) != 0)
- printk(KERN_ERR
- "b mddi: unable to request_irq\n");
- else
- int_mddi_ext_flag = TRUE;
- }
-
- /* Set MDDI Interrupt enable reg -- Enable Reverse data avail */
-#ifdef FEATURE_MDDI_DISABLE_REVERSE
- mddi_host_reg_out(INTEN,
- MDDI_INT_ERROR_CONDITIONS |
- MDDI_INT_LINK_STATE_CHANGES);
-#else
- /* Reverse Pointer register */
- pmhctl->rev_ptr_written = FALSE;
-
- mddi_host_reg_out(INTEN,
- MDDI_INT_REV_DATA_AVAIL |
- MDDI_INT_ERROR_CONDITIONS |
- MDDI_INT_LINK_STATE_CHANGES);
- pmhctl->rtd_counter = mddi_rtd_frequency;
- pmhctl->client_status_cnt = 0;
-#endif
- } else {
- if (pmhctl->driver_state == MDDI_DRIVER_ENABLED)
- pmhctl->driver_state = MDDI_DRIVER_DISABLED;
- }
-
-}
-
-static void mddi_host_powerup(mddi_host_type host_idx)
-{
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- if (pmhctl->link_state != MDDI_LINK_DISABLED)
- return;
-
- /* enable IO_CLK and hclk to MDDI host core */
- mddi_host_enable_io_clock();
-
- mddi_host_initialize_registers(host_idx);
- mddi_host_configure_interrupts(host_idx, TRUE);
-
- pmhctl->link_state = MDDI_LINK_ACTIVATING;
-
- /* Link activate command */
- mddi_host_reg_out(CMD, MDDI_CMD_LINK_ACTIVE);
-
-#ifdef CLKRGM_MDDI_IO_CLOCK_IN_MHZ
- MDDI_MSG_NOTICE("MDDI Host: Activating Link %d Mbps\n",
- CLKRGM_MDDI_IO_CLOCK_IN_MHZ * 2);
-#else
- MDDI_MSG_NOTICE("MDDI Host: Activating Link\n");
-#endif
-
- /* Initialize the timer */
- if (host_idx == MDDI_HOST_PRIM)
- mddi_host_timer_service(0);
-}
-
-void mddi_host_init(mddi_host_type host_idx)
-/* Write out the MDDI configuration registers */
-{
- static boolean initialized = FALSE;
- mddi_host_cntl_type *pmhctl;
-
- if (host_idx >= MDDI_NUM_HOST_CORES) {
- MDDI_MSG_ERR("Invalid host core index\n");
- return;
- }
-
- if (!initialized) {
- uint16 idx;
- mddi_host_type host;
- for (host = MDDI_HOST_PRIM; host < MDDI_NUM_HOST_CORES; host++) {
- pmhctl = &(mhctl[host]);
- initialized = TRUE;
-
- pmhctl->llist_ptr =
- dma_alloc_coherent(NULL, MDDI_LLIST_POOL_SIZE,
- &(pmhctl->llist_dma_addr),
- GFP_KERNEL);
- pmhctl->llist_dma_ptr =
- (mddi_linked_list_type *) (void *)pmhctl->
- llist_dma_addr;
-#ifdef FEATURE_MDDI_DISABLE_REVERSE
- pmhctl->rev_data_buf = NULL;
- if (pmhctl->llist_ptr == NULL)
-#else
- mddi_rev_user.waiting = FALSE;
- init_completion(&(mddi_rev_user.done_comp));
- pmhctl->rev_data_buf =
- dma_alloc_coherent(NULL, MDDI_MAX_REV_DATA_SIZE,
- &(pmhctl->rev_data_dma_addr),
- GFP_KERNEL);
- if ((pmhctl->llist_ptr == NULL)
- || (pmhctl->rev_data_buf == NULL))
-#endif
- {
- MDDI_MSG_CRIT
- ("unable to alloc non-cached memory\n");
- }
- llist_extern[host] = pmhctl->llist_ptr;
- llist_dma_extern[host] = pmhctl->llist_dma_ptr;
- llist_extern_notify[host] = pmhctl->llist_notify;
-
- for (idx = 0; idx < UNASSIGNED_INDEX; idx++) {
- init_completion(&
- (pmhctl->llist_notify[idx].
- done_comp));
- }
- init_completion(&(pmhctl->mddi_llist_avail_comp));
- spin_lock_init(&mddi_host_spin_lock);
- pmhctl->mddi_waiting_for_llist_avail = FALSE;
- pmhctl->mddi_rev_ptr_write_val =
- (uint32) (void *)(pmhctl->rev_data_dma_addr);
- pmhctl->rev_ptr_start = (void *)pmhctl->rev_data_buf;
-
- pmhctl->rev_pkt_size = MDDI_DEFAULT_REV_PKT_SIZE;
- pmhctl->rev_state = MDDI_REV_IDLE;
-#ifdef IMAGE_MODEM_PROC
- /* assume hibernation state is last state from APPS proc, so that
- * we don't reinitialize the host core */
- pmhctl->link_state = MDDI_LINK_HIBERNATING;
-#else
- pmhctl->link_state = MDDI_LINK_DISABLED;
-#endif
- pmhctl->driver_state = MDDI_DRIVER_DISABLED;
- pmhctl->disable_hibernation = FALSE;
-
- /* initialize llist variables */
- pmhctl->llist_info.transmitting_start_idx =
- UNASSIGNED_INDEX;
- pmhctl->llist_info.transmitting_end_idx =
- UNASSIGNED_INDEX;
- pmhctl->llist_info.waiting_start_idx = UNASSIGNED_INDEX;
- pmhctl->llist_info.waiting_end_idx = UNASSIGNED_INDEX;
- pmhctl->llist_info.reg_read_idx = UNASSIGNED_INDEX;
- pmhctl->llist_info.next_free_idx =
- MDDI_FIRST_DYNAMIC_LLIST_IDX;
- pmhctl->llist_info.reg_read_waiting = FALSE;
-
- mddi_vsync_detect_enabled = FALSE;
- mddi_gpio.polling_enabled = FALSE;
-
- pmhctl->int_type.count = 0;
- pmhctl->int_type.in_count = 0;
- pmhctl->int_type.disp_req_count = 0;
- pmhctl->int_type.state_change_count = 0;
- pmhctl->int_type.ll_done_count = 0;
- pmhctl->int_type.rev_avail_count = 0;
- pmhctl->int_type.error_count = 0;
- pmhctl->int_type.rev_encap_count = 0;
- pmhctl->int_type.llist_ptr_write_1 = 0;
- pmhctl->int_type.llist_ptr_write_2 = 0;
-
- pmhctl->stats.fwd_crc_count = 0;
- pmhctl->stats.rev_crc_count = 0;
- pmhctl->stats.pri_underflow = 0;
- pmhctl->stats.sec_underflow = 0;
- pmhctl->stats.rev_overflow = 0;
- pmhctl->stats.pri_overwrite = 0;
- pmhctl->stats.sec_overwrite = 0;
- pmhctl->stats.rev_overwrite = 0;
- pmhctl->stats.dma_failure = 0;
- pmhctl->stats.rtd_failure = 0;
- pmhctl->stats.reg_read_failure = 0;
-#ifdef FEATURE_MDDI_UNDERRUN_RECOVERY
- pmhctl->stats.pri_underrun_detected = 0;
-#endif
-
- pmhctl->log_parms.rtd_cnt = 0;
- pmhctl->log_parms.rev_enc_cnt = 0;
- pmhctl->log_parms.vid_cnt = 0;
- pmhctl->log_parms.reg_acc_cnt = 0;
- pmhctl->log_parms.cli_stat_cnt = 0;
- pmhctl->log_parms.cli_cap_cnt = 0;
- pmhctl->log_parms.reg_read_cnt = 0;
- pmhctl->log_parms.link_active_cnt = 0;
- pmhctl->log_parms.link_hibernate_cnt = 0;
- pmhctl->log_parms.fwd_crc_cnt = 0;
- pmhctl->log_parms.rev_crc_cnt = 0;
- pmhctl->log_parms.vsync_response_cnt = 0;
-
- prev_parms[host_idx] = pmhctl->log_parms;
- mddi_client_capability_pkt.packet_length = 0;
- }
-
-#ifndef T_MSM7500
- /* tell clock driver we are user of this PLL */
- MDDI_HOST_ENABLE_IO_CLOCK;
-#endif
- }
-
- mddi_host_powerup(host_idx);
- pmhctl = &(mhctl[host_idx]);
-}
-
-#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
-static uint32 mddi_client_id;
-
-uint32 mddi_get_client_id(void)
-{
-
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- mddi_host_type host_idx = MDDI_HOST_PRIM;
- static boolean client_detection_try = FALSE;
- mddi_host_cntl_type *pmhctl;
- unsigned long flags;
- uint16 saved_rev_pkt_size;
-
- if (!client_detection_try) {
- /* Toshiba display requires larger drive_lo value */
- mddi_host_reg_out(DRIVE_LO, 0x0050);
-
- pmhctl = &(mhctl[MDDI_HOST_PRIM]);
-
- saved_rev_pkt_size = pmhctl->rev_pkt_size;
-
- /* Increase Rev Encap Size */
- pmhctl->rev_pkt_size = MDDI_CLIENT_CAPABILITY_REV_PKT_SIZE;
- mddi_host_reg_out(REV_ENCAP_SZ, pmhctl->rev_pkt_size);
-
- /* disable hibernation temporarily */
- if (!pmhctl->disable_hibernation)
- mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE);
-
- mddi_rev_user.waiting = TRUE;
- INIT_COMPLETION(mddi_rev_user.done_comp);
-
- spin_lock_irqsave(&mddi_host_spin_lock, flags);
-
- /* turn on clock(s), if they have been disabled */
- mddi_host_enable_hclk();
- mddi_host_enable_io_clock();
-
- mddi_client_capability_request = TRUE;
-
- if (pmhctl->rev_state == MDDI_REV_IDLE) {
- /* attempt to send the reverse encapsulation now */
- mddi_issue_reverse_encapsulation();
- }
- spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
-
- wait_for_completion_killable(&(mddi_rev_user.done_comp));
-
- /* Set Rev Encap Size back to its original value */
- pmhctl->rev_pkt_size = saved_rev_pkt_size;
- mddi_host_reg_out(REV_ENCAP_SZ, pmhctl->rev_pkt_size);
-
- /* reenable auto-hibernate */
- if (!pmhctl->disable_hibernation)
- mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE | 1);
-
- mddi_host_reg_out(DRIVE_LO, 0x0032);
- client_detection_try = TRUE;
-
- mddi_client_id = (mddi_client_capability_pkt.Mfr_Name<<16) |
- mddi_client_capability_pkt.Product_Code;
-
- if (!mddi_client_id)
- mddi_disable(1);
- }
-
-#if 0
- switch (mddi_client_capability_pkt.Mfr_Name) {
- case 0x4474:
- if ((mddi_client_capability_pkt.Product_Code != 0x8960) &&
- (target == DISPLAY_1)) {
- ret = PRISM_WVGA;
- }
- break;
-
- case 0xD263:
- if (target == DISPLAY_1)
- ret = TOSHIBA_VGA_PRIM;
- else if (target == DISPLAY_2)
- ret = TOSHIBA_QCIF_SECD;
- break;
-
- case 0:
- if (mddi_client_capability_pkt.Product_Code == 0x8835) {
- if (target == DISPLAY_1)
- ret = SHARP_QVGA_PRIM;
- else if (target == DISPLAY_2)
- ret = SHARP_128x128_SECD;
- }
- break;
-
- default:
- break;
- }
-
- if ((!client_detection_try) && (ret != TOSHIBA_VGA_PRIM)
- && (ret != TOSHIBA_QCIF_SECD)) {
- /* Not a Toshiba display, so change drive_lo back to default value */
- mddi_host_reg_out(DRIVE_LO, 0x0032);
- }
-#endif
-
-#endif
-
- return mddi_client_id;
-}
-#endif
-
-void mddi_host_powerdown(mddi_host_type host_idx)
-{
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- if (host_idx >= MDDI_NUM_HOST_CORES) {
- MDDI_MSG_ERR("Invalid host core index\n");
- return;
- }
-
- if (pmhctl->driver_state == MDDI_DRIVER_RESET) {
- return;
- }
-
- if (host_idx == MDDI_HOST_PRIM) {
- /* disable timer */
- del_timer(&mddi_host_timer);
- }
-
- mddi_host_configure_interrupts(host_idx, FALSE);
-
- /* turn on HCLK to MDDI host core if it has been disabled */
- mddi_host_enable_hclk();
-
- /* MDDI Reset command */
- mddi_host_reg_out(CMD, MDDI_CMD_RESET);
-
- /* Pad Control Register */
- mddi_host_reg_out(PAD_CTL, 0x0);
-
- /* disable IO_CLK and hclk to MDDI host core */
- mddi_host_disable_io_clock();
- mddi_host_disable_hclk();
-
- pmhctl->link_state = MDDI_LINK_DISABLED;
- pmhctl->driver_state = MDDI_DRIVER_RESET;
-
- MDDI_MSG_NOTICE("MDDI Host: Disabling Link\n");
-
-}
-
-uint16 mddi_get_next_free_llist_item(mddi_host_type host_idx, boolean wait)
-{
- unsigned long flags;
- uint16 ret_idx;
- boolean forced_wait = FALSE;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- ret_idx = pmhctl->llist_info.next_free_idx;
-
- pmhctl->llist_info.next_free_idx++;
- if (pmhctl->llist_info.next_free_idx >= MDDI_NUM_DYNAMIC_LLIST_ITEMS)
- pmhctl->llist_info.next_free_idx = MDDI_FIRST_DYNAMIC_LLIST_IDX;
- spin_lock_irqsave(&mddi_host_spin_lock, flags);
- if (pmhctl->llist_notify[ret_idx].in_use) {
- if (!wait) {
- pmhctl->llist_info.next_free_idx = ret_idx;
- ret_idx = UNASSIGNED_INDEX;
- } else {
- forced_wait = TRUE;
- INIT_COMPLETION(pmhctl->mddi_llist_avail_comp);
- }
- }
- spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
-
- if (forced_wait) {
- wait_for_completion_killable(&
- (pmhctl->
- mddi_llist_avail_comp));
- MDDI_MSG_ERR("task waiting on mddi llist item\n");
- }
-
- if (ret_idx != UNASSIGNED_INDEX) {
- pmhctl->llist_notify[ret_idx].waiting = FALSE;
- pmhctl->llist_notify[ret_idx].done_cb = NULL;
- pmhctl->llist_notify[ret_idx].in_use = TRUE;
- pmhctl->llist_notify[ret_idx].next_idx = UNASSIGNED_INDEX;
- }
-
- return ret_idx;
-}
-
-uint16 mddi_get_reg_read_llist_item(mddi_host_type host_idx, boolean wait)
-{
-#ifdef FEATURE_MDDI_DISABLE_REVERSE
- MDDI_MSG_CRIT("No reverse link available\n");
- (void)wait;
- return FALSE;
-#else
- unsigned long flags;
- uint16 ret_idx;
- boolean error = FALSE;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- spin_lock_irqsave(&mddi_host_spin_lock, flags);
- if (pmhctl->llist_info.reg_read_idx != UNASSIGNED_INDEX) {
- /* need to block here or is this an error condition? */
- error = TRUE;
- ret_idx = UNASSIGNED_INDEX;
- }
- spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
-
- if (!error) {
- ret_idx = pmhctl->llist_info.reg_read_idx =
- mddi_get_next_free_llist_item(host_idx, wait);
- /* clear the reg_read_waiting flag */
- pmhctl->llist_info.reg_read_waiting = FALSE;
- }
-
- if (error)
- MDDI_MSG_ERR("***** Reg read still in progress! ****\n");
- return ret_idx;
-#endif
-
-}
-
-void mddi_queue_forward_packets(uint16 first_llist_idx,
- uint16 last_llist_idx,
- boolean wait,
- mddi_llist_done_cb_type llist_done_cb,
- mddi_host_type host_idx)
-{
- unsigned long flags;
- mddi_linked_list_type *llist;
- mddi_linked_list_type *llist_dma;
- mddi_host_cntl_type *pmhctl = &(mhctl[host_idx]);
-
- if ((first_llist_idx >= UNASSIGNED_INDEX) ||
- (last_llist_idx >= UNASSIGNED_INDEX)) {
- MDDI_MSG_ERR("MDDI queueing invalid linked list\n");
- return;
- }
-
- if (pmhctl->link_state == MDDI_LINK_DISABLED)
- MDDI_MSG_CRIT("MDDI host powered down!\n");
-
- llist = pmhctl->llist_ptr;
- llist_dma = pmhctl->llist_dma_ptr;
-
- /* clean cache so MDDI host can read data */
- memory_barrier();
-
- pmhctl->llist_notify[last_llist_idx].waiting = wait;
- if (wait)
- INIT_COMPLETION(pmhctl->llist_notify[last_llist_idx].done_comp);
- pmhctl->llist_notify[last_llist_idx].done_cb = llist_done_cb;
-
- spin_lock_irqsave(&mddi_host_spin_lock, flags);
-
- if ((pmhctl->llist_info.transmitting_start_idx == UNASSIGNED_INDEX) &&
- (pmhctl->llist_info.waiting_start_idx == UNASSIGNED_INDEX) &&
- (pmhctl->rev_state == MDDI_REV_IDLE)) {
- /* no packets are currently transmitting */
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- if (first_llist_idx == pmhctl->llist_info.reg_read_idx) {
- /* This is the special case where the packet is a register read. */
- pmhctl->rev_state = MDDI_REV_REG_READ_ISSUED;
- mddi_reg_read_retry = 0;
- /* mddi_rev_reg_read_attempt = 1; */
- }
-#endif
- /* assign transmitting index values */
- pmhctl->llist_info.transmitting_start_idx = first_llist_idx;
- pmhctl->llist_info.transmitting_end_idx = last_llist_idx;
-
- /* turn on clock(s), if they have been disabled */
- mddi_host_enable_hclk();
- mddi_host_enable_io_clock();
- pmhctl->int_type.llist_ptr_write_1++;
- /* Write to primary pointer register */
- dma_coherent_pre_ops();
- mddi_host_reg_out(PRI_PTR, &llist_dma[first_llist_idx]);
-
- /* enable interrupt when complete */
- mddi_host_reg_outm(INTEN, MDDI_INT_PRI_LINK_LIST_DONE,
- MDDI_INT_PRI_LINK_LIST_DONE);
-
- } else if (pmhctl->llist_info.waiting_start_idx == UNASSIGNED_INDEX) {
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- if (first_llist_idx == pmhctl->llist_info.reg_read_idx) {
- /*
- * we have a register read to send but need to wait
- * for current reverse activity to end or there are
- * packets currently transmitting
- */
- /* mddi_rev_reg_read_attempt = 0; */
- pmhctl->llist_info.reg_read_waiting = TRUE;
- }
-#endif
-
- /* assign waiting index values */
- pmhctl->llist_info.waiting_start_idx = first_llist_idx;
- pmhctl->llist_info.waiting_end_idx = last_llist_idx;
- } else {
- uint16 prev_end_idx = pmhctl->llist_info.waiting_end_idx;
-#ifndef FEATURE_MDDI_DISABLE_REVERSE
- if (first_llist_idx == pmhctl->llist_info.reg_read_idx) {
- /*
- * we have a register read to send but need to wait
- * for current reverse activity to end or there are
- * packets currently transmitting
- */
- /* mddi_rev_reg_read_attempt = 0; */
- pmhctl->llist_info.reg_read_waiting = TRUE;
- }
-#endif
-
- llist = pmhctl->llist_ptr;
-
- /* clear end flag in previous last packet */
- llist[prev_end_idx].link_controller_flags = 0;
- pmhctl->llist_notify[prev_end_idx].next_idx = first_llist_idx;
-
- /* set the next_packet_pointer of the previous last packet */
- llist[prev_end_idx].next_packet_pointer =
- (void *)(&llist_dma[first_llist_idx]);
-
- /* clean cache so MDDI host can read data */
- memory_barrier();
-
- /* assign new waiting last index value */
- pmhctl->llist_info.waiting_end_idx = last_llist_idx;
- }
-
- spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
-
-}
-
-void mddi_host_write_pix_attr_reg(uint32 value)
-{
- (void)value;
-}
-
-void mddi_queue_reverse_encapsulation(boolean wait)
-{
-#ifdef FEATURE_MDDI_DISABLE_REVERSE
- MDDI_MSG_CRIT("No reverse link available\n");
- (void)wait;
-#else
- unsigned long flags;
- boolean error = FALSE;
- mddi_host_type host_idx = MDDI_HOST_PRIM;
- mddi_host_cntl_type *pmhctl = &(mhctl[MDDI_HOST_PRIM]);
-
- spin_lock_irqsave(&mddi_host_spin_lock, flags);
-
- /* turn on clock(s), if they have been disabled */
- mddi_host_enable_hclk();
- mddi_host_enable_io_clock();
-
- if (wait) {
- if (!mddi_rev_user.waiting) {
- mddi_rev_user.waiting = TRUE;
- INIT_COMPLETION(mddi_rev_user.done_comp);
- } else
- error = TRUE;
- }
- mddi_rev_encap_user_request = TRUE;
-
- if (pmhctl->rev_state == MDDI_REV_IDLE) {
- /* attempt to send the reverse encapsulation now */
- mddi_host_type orig_host_idx = mddi_curr_host;
- mddi_curr_host = host_idx;
- mddi_issue_reverse_encapsulation();
- mddi_curr_host = orig_host_idx;
- }
- spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
-
- if (error) {
- MDDI_MSG_ERR("Reverse Encap request already in progress\n");
- } else if (wait)
- wait_for_completion_killable(&(mddi_rev_user.done_comp));
-#endif
-}
-
-/* ISR to be executed */
-boolean mddi_set_rev_handler(mddi_rev_handler_type handler, uint16 pkt_type)
-{
-#ifdef FEATURE_MDDI_DISABLE_REVERSE
- MDDI_MSG_CRIT("No reverse link available\n");
- (void)handler;
- (void)pkt_type;
- return (FALSE);
-#else
- unsigned long flags;
- uint16 hdlr;
- boolean handler_set = FALSE;
- boolean overwrite = FALSE;
- mddi_host_type host_idx = MDDI_HOST_PRIM;
- mddi_host_cntl_type *pmhctl = &(mhctl[MDDI_HOST_PRIM]);
-
- /* Disable interrupts */
- spin_lock_irqsave(&mddi_host_spin_lock, flags);
-
- for (hdlr = 0; hdlr < MAX_MDDI_REV_HANDLERS; hdlr++) {
- if (mddi_rev_pkt_handler[hdlr].pkt_type == pkt_type) {
- mddi_rev_pkt_handler[hdlr].handler = handler;
- if (handler == NULL) {
- /* clearing handler from table */
- mddi_rev_pkt_handler[hdlr].pkt_type =
- INVALID_PKT_TYPE;
- handler_set = TRUE;
- if (pkt_type == 0x10) { /* video stream packet */
- /* ensure HCLK on to MDDI host core before register write */
- mddi_host_enable_hclk();
- /* No longer getting video, so reset rev encap size to default */
- pmhctl->rev_pkt_size =
- MDDI_DEFAULT_REV_PKT_SIZE;
- mddi_host_reg_out(REV_ENCAP_SZ,
- pmhctl->rev_pkt_size);
- }
- } else {
- /* already a handler for this packet */
- overwrite = TRUE;
- }
- break;
- }
- }
- if ((hdlr >= MAX_MDDI_REV_HANDLERS) && (handler != NULL)) {
- /* assigning new handler */
- for (hdlr = 0; hdlr < MAX_MDDI_REV_HANDLERS; hdlr++) {
- if (mddi_rev_pkt_handler[hdlr].pkt_type ==
- INVALID_PKT_TYPE) {
- if ((pkt_type == 0x10) && /* video stream packet */
- (pmhctl->rev_pkt_size <
- MDDI_VIDEO_REV_PKT_SIZE)) {
- /* ensure HCLK on to MDDI host core before register write */
- mddi_host_enable_hclk();
- /* Increase Rev Encap Size */
- pmhctl->rev_pkt_size =
- MDDI_VIDEO_REV_PKT_SIZE;
- mddi_host_reg_out(REV_ENCAP_SZ,
- pmhctl->rev_pkt_size);
- }
- mddi_rev_pkt_handler[hdlr].handler = handler;
- mddi_rev_pkt_handler[hdlr].pkt_type = pkt_type;
- handler_set = TRUE;
- break;
- }
- }
- }
-
- /* Restore interrupts */
- spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
-
- if (overwrite)
- MDDI_MSG_ERR("Overwriting previous rev packet handler\n");
-
- return handler_set;
-
-#endif
-} /* mddi_set_rev_handler */
-
-void mddi_host_disable_hibernation(boolean disable)
-{
- mddi_host_type host_idx = MDDI_HOST_PRIM;
- mddi_host_cntl_type *pmhctl = &(mhctl[MDDI_HOST_PRIM]);
-
- if (disable) {
- pmhctl->disable_hibernation = TRUE;
- /* hibernation will be turned off by isr next time it is entered */
- } else {
- if (pmhctl->disable_hibernation) {
- unsigned long flags;
- spin_lock_irqsave(&mddi_host_spin_lock, flags);
- if (!MDDI_HOST_IS_HCLK_ON)
- MDDI_HOST_ENABLE_HCLK;
- mddi_host_reg_out(CMD, MDDI_CMD_HIBERNATE | 1);
- spin_unlock_irqrestore(&mddi_host_spin_lock, flags);
- pmhctl->disable_hibernation = FALSE;
- }
- }
-}
-
-void mddi_mhctl_remove(mddi_host_type host_idx)
-{
- mddi_host_cntl_type *pmhctl;
-
- pmhctl = &(mhctl[host_idx]);
-
- dma_free_coherent(NULL, MDDI_LLIST_POOL_SIZE, (void *)pmhctl->llist_ptr,
- pmhctl->llist_dma_addr);
-
- dma_free_coherent(NULL, MDDI_MAX_REV_DATA_SIZE,
- (void *)pmhctl->rev_data_buf,
- pmhctl->rev_data_dma_addr);
-}
diff --git a/drivers/staging/msm/mddihosti.h b/drivers/staging/msm/mddihosti.h
deleted file mode 100644
index 79eb39914ac..00000000000
--- a/drivers/staging/msm/mddihosti.h
+++ /dev/null
@@ -1,531 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef MDDIHOSTI_H
-#define MDDIHOSTI_H
-
-#include "msm_fb.h"
-#include "mddihost.h"
-#include <linux/clk.h>
-
-/* Register offsets in MDDI, applies to both msm_pmdh_base and
- * (u32)msm_emdh_base. */
-#define MDDI_CMD 0x0000
-#define MDDI_VERSION 0x0004
-#define MDDI_PRI_PTR 0x0008
-#define MDDI_BPS 0x0010
-#define MDDI_SPM 0x0014
-#define MDDI_INT 0x0018
-#define MDDI_INTEN 0x001c
-#define MDDI_REV_PTR 0x0020
-#define MDDI_REV_SIZE 0x0024
-#define MDDI_STAT 0x0028
-#define MDDI_REV_RATE_DIV 0x002c
-#define MDDI_REV_CRC_ERR 0x0030
-#define MDDI_TA1_LEN 0x0034
-#define MDDI_TA2_LEN 0x0038
-#define MDDI_TEST 0x0040
-#define MDDI_REV_PKT_CNT 0x0044
-#define MDDI_DRIVE_HI 0x0048
-#define MDDI_DRIVE_LO 0x004c
-#define MDDI_DISP_WAKE 0x0050
-#define MDDI_REV_ENCAP_SZ 0x0054
-#define MDDI_RTD_VAL 0x0058
-#define MDDI_PAD_CTL 0x0068
-#define MDDI_DRIVER_START_CNT 0x006c
-#define MDDI_CORE_VER 0x008c
-#define MDDI_FIFO_ALLOC 0x0090
-#define MDDI_PAD_IO_CTL 0x00a0
-#define MDDI_PAD_CAL 0x00a4
-
-extern u32 mddi_msg_level;
-
-/* No longer need to write to clear these registers */
-#define xxxx_mddi_host_reg_outm(reg, mask, val) \
-do { \
- if (host_idx == MDDI_HOST_PRIM) \
- mddi_host_reg_outm_pmdh(reg, mask, val); \
- else \
- mddi_host_reg_outm_emdh(reg, mask, val); \
-} while (0)
-
-#define mddi_host_reg_outm(reg, mask, val) \
-do { \
- unsigned long __addr; \
- if (host_idx == MDDI_HOST_PRIM) \
- __addr = (u32)msm_pmdh_base + MDDI_##reg; \
- else \
- __addr = (u32)msm_emdh_base + MDDI_##reg; \
- writel((readl(__addr) & ~(mask)) | ((val) & (mask)), __addr); \
-} while (0)
-
-#define xxxx_mddi_host_reg_out(reg, val) \
-do { \
- if (host_idx == MDDI_HOST_PRIM) \
- mddi_host_reg_out_pmdh(reg, val); \
- else \
- mddi_host_reg_out_emdh(reg, val); \
- } while (0)
-
-#define mddi_host_reg_out(reg, val) \
-do { \
- if (host_idx == MDDI_HOST_PRIM) \
- writel(val, (u32)msm_pmdh_base + MDDI_##reg); \
- else \
- writel(val, (u32)msm_emdh_base + MDDI_##reg); \
-} while (0)
-
-#define xxxx_mddi_host_reg_in(reg) \
- ((host_idx) ? \
- mddi_host_reg_in_emdh(reg) : mddi_host_reg_in_pmdh(reg));
-
-#define mddi_host_reg_in(reg) \
-((host_idx) ? \
- readl((u32)msm_emdh_base + MDDI_##reg) : \
- readl((u32)msm_pmdh_base + MDDI_##reg)) \
-
-#define xxxx_mddi_host_reg_inm(reg, mask) \
- ((host_idx) ? \
- mddi_host_reg_inm_emdh(reg, mask) : \
- mddi_host_reg_inm_pmdh(reg, mask);)
-
-#define mddi_host_reg_inm(reg, mask) \
-((host_idx) ? \
- readl((u32)msm_emdh_base + MDDI_##reg) & (mask) : \
- readl((u32)msm_pmdh_base + MDDI_##reg) & (mask)) \
-
-/* Using non-cacheable pmem, so do nothing */
-#define mddi_invalidate_cache_lines(addr_start, num_bytes)
-/*
- * Using non-cacheable pmem, so do nothing with cache
- * but, ensure write goes out to memory
- */
-#define mddi_flush_cache_lines(addr_start, num_bytes) \
- (void) addr_start; \
- (void) num_bytes; \
- memory_barrier()
-
-/* Since this translates to Remote Procedure Calls to check on clock status
-* just use a local variable to keep track of io_clock */
-#define MDDI_HOST_IS_IO_CLOCK_ON mddi_host_io_clock_on
-#define MDDI_HOST_ENABLE_IO_CLOCK
-#define MDDI_HOST_DISABLE_IO_CLOCK
-#define MDDI_HOST_IS_HCLK_ON mddi_host_hclk_on
-#define MDDI_HOST_ENABLE_HCLK
-#define MDDI_HOST_DISABLE_HCLK
-#define FEATURE_MDDI_HOST_IO_CLOCK_CONTROL_DISABLE
-#define FEATURE_MDDI_HOST_HCLK_CONTROL_DISABLE
-
-#define TRAMP_MDDI_HOST_ISR TRAMP_MDDI_PRI_ISR
-#define TRAMP_MDDI_HOST_EXT_ISR TRAMP_MDDI_EXT_ISR
-#define MDP_LINE_COUNT_BMSK 0x3ff
-#define MDP_SYNC_STATUS 0x000c
-#define MDP_LINE_COUNT \
-(readl(msm_mdp_base + MDP_SYNC_STATUS) & MDP_LINE_COUNT_BMSK)
-
-/* MDP sends 256 pixel packets, so lower value hibernates more without
-* significantly increasing latency of waiting for next subframe */
-#define MDDI_HOST_BYTES_PER_SUBFRAME 0x3C00
-
-#if defined(CONFIG_FB_MSM_MDP31) || defined(CONFIG_FB_MSM_MDP40)
-#define MDDI_HOST_TA2_LEN 0x001a
-#define MDDI_HOST_REV_RATE_DIV 0x0004
-#else
-#define MDDI_HOST_TA2_LEN 0x000c
-#define MDDI_HOST_REV_RATE_DIV 0x0002
-#endif
-
-#define MDDI_MSG_EMERG(msg, ...) \
- if (mddi_msg_level > 0) \
- printk(KERN_EMERG msg, ## __VA_ARGS__);
-#define MDDI_MSG_ALERT(msg, ...) \
- if (mddi_msg_level > 1) \
- printk(KERN_ALERT msg, ## __VA_ARGS__);
-#define MDDI_MSG_CRIT(msg, ...) \
- if (mddi_msg_level > 2) \
- printk(KERN_CRIT msg, ## __VA_ARGS__);
-#define MDDI_MSG_ERR(msg, ...) \
- if (mddi_msg_level > 3) \
- printk(KERN_ERR msg, ## __VA_ARGS__);
-#define MDDI_MSG_WARNING(msg, ...) \
- if (mddi_msg_level > 4) \
- printk(KERN_WARNING msg, ## __VA_ARGS__);
-#define MDDI_MSG_NOTICE(msg, ...) \
- if (mddi_msg_level > 5) \
- printk(KERN_NOTICE msg, ## __VA_ARGS__);
-#define MDDI_MSG_INFO(msg, ...) \
- if (mddi_msg_level > 6) \
- printk(KERN_INFO msg, ## __VA_ARGS__);
-#define MDDI_MSG_DEBUG(msg, ...) \
- if (mddi_msg_level > 7) \
- printk(KERN_DEBUG msg, ## __VA_ARGS__);
-
-#define GCC_PACKED __attribute__((packed))
-typedef struct GCC_PACKED {
- uint16 packet_length;
- /* total # of bytes in the packet not including
- the packet_length field. */
-
- uint16 packet_type;
- /* A Packet Type of 70 identifies the packet as
- a Client status Packet. */
-
- uint16 bClient_ID;
- /* This field is reserved for future use and shall
- be set to zero. */
-
-} mddi_rev_packet_type;
-
-typedef struct GCC_PACKED {
- uint16 packet_length;
- /* total # of bytes in the packet not including
- the packet_length field. */
-
- uint16 packet_type;
- /* A Packet Type of 70 identifies the packet as
- a Client status Packet. */
-
- uint16 bClient_ID;
- /* This field is reserved for future use and shall
- be set to zero. */
-
- uint16 reverse_link_request;
- /* 16 bit unsigned integer with number of bytes client
- needs in the * reverse encapsulation message
- to transmit data. */
-
- uint8 crc_error_count;
- uint8 capability_change;
- uint16 graphics_busy_flags;
-
- uint16 parameter_CRC;
- /* 16-bit CRC of all the bytes in the packet
- including Packet Length. */
-
-} mddi_client_status_type;
-
-typedef struct GCC_PACKED {
- uint16 packet_length;
- /* total # of bytes in the packet not including
- the packet_length field. */
-
- uint16 packet_type;
- /* A Packet Type of 66 identifies the packet as
- a Client Capability Packet. */
-
- uint16 bClient_ID;
- /* This field is reserved for future use and
- shall be set to zero. */
-
- uint16 Protocol_Version;
- uint16 Minimum_Protocol_Version;
- uint16 Data_Rate_Capability;
- uint8 Interface_Type_Capability;
- uint8 Number_of_Alt_Displays;
- uint16 PostCal_Data_Rate;
- uint16 Bitmap_Width;
- uint16 Bitmap_Height;
- uint16 Display_Window_Width;
- uint16 Display_Window_Height;
- uint32 Color_Map_Size;
- uint16 Color_Map_RGB_Width;
- uint16 RGB_Capability;
- uint8 Monochrome_Capability;
- uint8 Reserved_1;
- uint16 Y_Cb_Cr_Capability;
- uint16 Bayer_Capability;
- uint16 Alpha_Cursor_Image_Planes;
- uint32 Client_Feature_Capability_Indicators;
- uint8 Maximum_Video_Frame_Rate_Capability;
- uint8 Minimum_Video_Frame_Rate_Capability;
- uint16 Minimum_Sub_frame_Rate;
- uint16 Audio_Buffer_Depth;
- uint16 Audio_Channel_Capability;
- uint16 Audio_Sample_Rate_Capability;
- uint8 Audio_Sample_Resolution;
- uint8 Mic_Audio_Sample_Resolution;
- uint16 Mic_Sample_Rate_Capability;
- uint8 Keyboard_Data_Format;
- uint8 pointing_device_data_format;
- uint16 content_protection_type;
- uint16 Mfr_Name;
- uint16 Product_Code;
- uint16 Reserved_3;
- uint32 Serial_Number;
- uint8 Week_of_Manufacture;
- uint8 Year_of_Manufacture;
-
- uint16 parameter_CRC;
- /* 16-bit CRC of all the bytes in the packet including Packet Length. */
-
-} mddi_client_capability_type;
-
-typedef struct GCC_PACKED {
- uint16 packet_length;
- /* total # of bytes in the packet not including the packet_length field. */
-
- uint16 packet_type;
- /* A Packet Type of 16 identifies the packet as a Video Stream Packet. */
-
- uint16 bClient_ID;
- /* This field is reserved for future use and shall be set to zero. */
-
- uint16 video_data_format_descriptor;
- /* format of each pixel in the Pixel Data in the present stream in the
- * present packet.
- * If bits [15:13] = 000 monochrome
- * If bits [15:13] = 001 color pixels (palette).
- * If bits [15:13] = 010 color pixels in raw RGB
- * If bits [15:13] = 011 data in 4:2:2 Y Cb Cr format
- * If bits [15:13] = 100 Bayer pixels
- */
-
- uint16 pixel_data_attributes;
- /* interpreted as follows:
- * Bits [1:0] = 11 pixel data is displayed to both eyes
- * Bits [1:0] = 10 pixel data is routed to the left eye only.
- * Bits [1:0] = 01 pixel data is routed to the right eye only.
- * Bits [1:0] = 00 pixel data is routed to the alternate display.
- * Bit 2 is 0 Pixel Data is in the standard progressive format.
- * Bit 2 is 1 Pixel Data is in interlace format.
- * Bit 3 is 0 Pixel Data is in the standard progressive format.
- * Bit 3 is 1 Pixel Data is in alternate pixel format.
- * Bit 4 is 0 Pixel Data is to or from the display frame buffer.
- * Bit 4 is 1 Pixel Data is to or from the camera.
- * Bit 5 is 0 pixel data contains the next consecutive row of pixels.
- * Bit 5 is 1 X Left Edge, Y Top Edge, X Right Edge, Y Bottom Edge,
- * X Start, and Y Start parameters are not defined and
- * shall be ignored by the client.
- * Bits [7:6] = 01 Pixel data is written to the offline image buffer.
- * Bits [7:6] = 00 Pixel data is written to the buffer to refresh display.
- * Bits [7:6] = 11 Pixel data is written to all image buffers.
- * Bits [7:6] = 10 Invalid. Reserved for future use.
- * Bits 8 through 11 alternate display number.
- * Bits 12 through 14 are reserved for future use and shall be set to zero.
- * Bit 15 is 1 the row of pixels is the last row of pixels in a frame.
- */
-
- uint16 x_left_edge;
- uint16 y_top_edge;
- /* X,Y coordinate of the top left edge of the screen window */
-
- uint16 x_right_edge;
- uint16 y_bottom_edge;
- /* X,Y coordinate of the bottom right edge of the window being updated. */
-
- uint16 x_start;
- uint16 y_start;
- /* (X Start, Y Start) is the first pixel in the Pixel Data field below. */
-
- uint16 pixel_count;
- /* number of pixels in the Pixel Data field below. */
-
- uint16 parameter_CRC;
- /* 16-bit CRC of all bytes from the Packet Length to the Pixel Count. */
-
- uint16 reserved;
- /* 16-bit variable to make structure align on 4 byte boundary */
-
-} mddi_video_stream_packet_type;
-
-typedef struct GCC_PACKED {
- uint16 packet_length;
- /* total # of bytes in the packet not including the packet_length field. */
-
- uint16 packet_type;
- /* A Packet Type of 146 identifies the packet as a Register Access Packet. */
-
- uint16 bClient_ID;
- /* This field is reserved for future use and shall be set to zero. */
-
- uint16 read_write_info;
- /* Bits 13:0 a 14-bit unsigned integer that specifies the number of
- * 32-bit Register Data List items to be transferred in the
- * Register Data List field.
- * Bits[15:14] = 00 Write to register(s);
- * Bits[15:14] = 10 Read from register(s);
- * Bits[15:14] = 11 Response to a Read.
- * Bits[15:14] = 01 this value is reserved for future use. */
-
- uint32 register_address;
- /* the register address that is to be written to or read from. */
-
- uint16 parameter_CRC;
- /* 16-bit CRC of all bytes from the Packet Length to the Register Address. */
-
- uint32 register_data_list;
- /* list of 4-byte register data values for/from client registers */
-
-} mddi_register_access_packet_type;
-
-typedef union GCC_PACKED {
- mddi_video_stream_packet_type video_pkt;
- mddi_register_access_packet_type register_pkt;
- /* add 48 byte pad to ensure 64 byte llist struct, that can be
- * manipulated easily with cache */
- uint32 alignment_pad[12]; /* 48 bytes */
-} mddi_packet_header_type;
-
-typedef struct GCC_PACKED mddi_host_llist_struct {
- uint16 link_controller_flags;
- uint16 packet_header_count;
- uint16 packet_data_count;
- void *packet_data_pointer;
- struct mddi_host_llist_struct *next_packet_pointer;
- uint16 reserved;
- mddi_packet_header_type packet_header;
-} mddi_linked_list_type;
-
-typedef struct {
- struct completion done_comp;
- mddi_llist_done_cb_type done_cb;
- uint16 next_idx;
- boolean waiting;
- boolean in_use;
-} mddi_linked_list_notify_type;
-
-#define MDDI_LLIST_POOL_SIZE 0x1000
-#define MDDI_MAX_NUM_LLIST_ITEMS (MDDI_LLIST_POOL_SIZE / \
- sizeof(mddi_linked_list_type))
-#define UNASSIGNED_INDEX MDDI_MAX_NUM_LLIST_ITEMS
-#define MDDI_FIRST_DYNAMIC_LLIST_IDX 0
-
-/* Static llist items can be used for applications that frequently send
- * the same set of packets using the linked list interface. */
-/* Here we configure for 6 static linked list items:
- * The 1st is used for a the adaptive backlight setting.
- * and the remaining 5 are used for sending window adjustments for
- * MDDI clients that need windowing info sent separate from video
- * packets. */
-#define MDDI_NUM_STATIC_ABL_ITEMS 1
-#define MDDI_NUM_STATIC_WINDOW_ITEMS 5
-#define MDDI_NUM_STATIC_LLIST_ITEMS (MDDI_NUM_STATIC_ABL_ITEMS + \
- MDDI_NUM_STATIC_WINDOW_ITEMS)
-#define MDDI_NUM_DYNAMIC_LLIST_ITEMS (MDDI_MAX_NUM_LLIST_ITEMS - \
- MDDI_NUM_STATIC_LLIST_ITEMS)
-
-#define MDDI_FIRST_STATIC_LLIST_IDX MDDI_NUM_DYNAMIC_LLIST_ITEMS
-#define MDDI_FIRST_STATIC_ABL_IDX MDDI_FIRST_STATIC_LLIST_IDX
-#define MDDI_FIRST_STATIC_WINDOW_IDX (MDDI_FIRST_STATIC_LLIST_IDX + \
- MDDI_NUM_STATIC_ABL_ITEMS)
-
-/* GPIO registers */
-#define VSYNC_WAKEUP_REG 0x80
-#define GPIO_REG 0x81
-#define GPIO_OUTPUT_REG 0x82
-#define GPIO_INTERRUPT_REG 0x83
-#define GPIO_INTERRUPT_ENABLE_REG 0x84
-#define GPIO_POLARITY_REG 0x85
-
-/* Interrupt Bits */
-#define MDDI_INT_PRI_PTR_READ 0x0001
-#define MDDI_INT_SEC_PTR_READ 0x0002
-#define MDDI_INT_REV_DATA_AVAIL 0x0004
-#define MDDI_INT_DISP_REQ 0x0008
-#define MDDI_INT_PRI_UNDERFLOW 0x0010
-#define MDDI_INT_SEC_UNDERFLOW 0x0020
-#define MDDI_INT_REV_OVERFLOW 0x0040
-#define MDDI_INT_CRC_ERROR 0x0080
-#define MDDI_INT_MDDI_IN 0x0100
-#define MDDI_INT_PRI_OVERWRITE 0x0200
-#define MDDI_INT_SEC_OVERWRITE 0x0400
-#define MDDI_INT_REV_OVERWRITE 0x0800
-#define MDDI_INT_DMA_FAILURE 0x1000
-#define MDDI_INT_LINK_ACTIVE 0x2000
-#define MDDI_INT_IN_HIBERNATION 0x4000
-#define MDDI_INT_PRI_LINK_LIST_DONE 0x8000
-#define MDDI_INT_SEC_LINK_LIST_DONE 0x10000
-#define MDDI_INT_NO_CMD_PKTS_PEND 0x20000
-#define MDDI_INT_RTD_FAILURE 0x40000
-
-#define MDDI_INT_ERROR_CONDITIONS ( \
- MDDI_INT_PRI_UNDERFLOW | MDDI_INT_SEC_UNDERFLOW | \
- MDDI_INT_REV_OVERFLOW | MDDI_INT_CRC_ERROR | \
- MDDI_INT_PRI_OVERWRITE | MDDI_INT_SEC_OVERWRITE | \
- MDDI_INT_RTD_FAILURE | \
- MDDI_INT_REV_OVERWRITE | MDDI_INT_DMA_FAILURE)
-
-#define MDDI_INT_LINK_STATE_CHANGES ( \
- MDDI_INT_LINK_ACTIVE | MDDI_INT_IN_HIBERNATION)
-
-/* Status Bits */
-#define MDDI_STAT_LINK_ACTIVE 0x0001
-#define MDDI_STAT_NEW_REV_PTR 0x0002
-#define MDDI_STAT_NEW_PRI_PTR 0x0004
-#define MDDI_STAT_NEW_SEC_PTR 0x0008
-#define MDDI_STAT_IN_HIBERNATION 0x0010
-#define MDDI_STAT_PRI_LINK_LIST_DONE 0x0020
-#define MDDI_STAT_SEC_LINK_LIST_DONE 0x0040
-#define MDDI_STAT_PENDING_TIMING_PKT 0x0080
-#define MDDI_STAT_PENDING_REV_ENCAP 0x0100
-#define MDDI_STAT_PENDING_POWERDOWN 0x0200
-#define MDDI_STAT_RTD_MEAS_FAIL 0x0800
-#define MDDI_STAT_CLIENT_WAKEUP_REQ 0x1000
-
-/* Command Bits */
-#define MDDI_CMD_POWERDOWN 0x0100
-#define MDDI_CMD_POWERUP 0x0200
-#define MDDI_CMD_HIBERNATE 0x0300
-#define MDDI_CMD_RESET 0x0400
-#define MDDI_CMD_DISP_IGNORE 0x0501
-#define MDDI_CMD_DISP_LISTEN 0x0500
-#define MDDI_CMD_SEND_REV_ENCAP 0x0600
-#define MDDI_CMD_GET_CLIENT_CAP 0x0601
-#define MDDI_CMD_GET_CLIENT_STATUS 0x0602
-#define MDDI_CMD_SEND_RTD 0x0700
-#define MDDI_CMD_LINK_ACTIVE 0x0900
-#define MDDI_CMD_PERIODIC_REV_ENCAP 0x0A00
-
-extern void mddi_host_init(mddi_host_type host);
-extern void mddi_host_powerdown(mddi_host_type host);
-extern uint16 mddi_get_next_free_llist_item(mddi_host_type host, boolean wait);
-extern uint16 mddi_get_reg_read_llist_item(mddi_host_type host, boolean wait);
-extern void mddi_queue_forward_packets(uint16 first_llist_idx,
- uint16 last_llist_idx,
- boolean wait,
- mddi_llist_done_cb_type llist_done_cb,
- mddi_host_type host);
-
-extern void mddi_host_write_pix_attr_reg(uint32 value);
-extern void mddi_client_lcd_gpio_poll(uint32 poll_reg_val);
-extern void mddi_client_lcd_vsync_detected(boolean detected);
-extern void mddi_host_disable_hibernation(boolean disable);
-
-extern mddi_linked_list_type *llist_extern[];
-extern mddi_linked_list_type *llist_dma_extern[];
-extern mddi_linked_list_notify_type *llist_extern_notify[];
-extern struct timer_list mddi_host_timer;
-
-typedef struct {
- uint16 transmitting_start_idx;
- uint16 transmitting_end_idx;
- uint16 waiting_start_idx;
- uint16 waiting_end_idx;
- uint16 reg_read_idx;
- uint16 next_free_idx;
- boolean reg_read_waiting;
-} mddi_llist_info_type;
-
-extern mddi_llist_info_type mddi_llist;
-
-#define MDDI_GPIO_DEFAULT_POLLING_INTERVAL 200
-typedef struct {
- uint32 polling_reg;
- uint32 polling_val;
- uint32 polling_interval;
- boolean polling_enabled;
-} mddi_gpio_info_type;
-
-uint32 mddi_get_client_id(void);
-void mddi_mhctl_remove(mddi_host_type host_idx);
-void mddi_host_timer_service(unsigned long data);
-#endif /* MDDIHOSTI_H */
diff --git a/drivers/staging/msm/mdp.c b/drivers/staging/msm/mdp.c
deleted file mode 100644
index 58cb4046293..00000000000
--- a/drivers/staging/msm/mdp.c
+++ /dev/null
@@ -1,1113 +0,0 @@
-/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/hrtimer.h>
-#include <linux/clk.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/uaccess.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-#ifdef CONFIG_FB_MSM_MDP40
-#include "mdp4.h"
-#endif
-
-static struct clk *mdp_clk;
-static struct clk *mdp_pclk;
-
-struct completion mdp_ppp_comp;
-struct semaphore mdp_ppp_mutex;
-struct semaphore mdp_pipe_ctrl_mutex;
-
-unsigned long mdp_timer_duration = (HZ); /* 1 sec */
-/* unsigned long mdp_mdp_timer_duration=0; */
-
-boolean mdp_ppp_waiting = FALSE;
-uint32 mdp_tv_underflow_cnt;
-uint32 mdp_lcdc_underflow_cnt;
-
-boolean mdp_current_clk_on = FALSE;
-boolean mdp_is_in_isr = FALSE;
-
-/*
- * legacy mdp_in_processing is only for DMA2-MDDI
- * this applies to DMA2 block only
- */
-uint32 mdp_in_processing = FALSE;
-
-#ifdef CONFIG_FB_MSM_MDP40
-uint32 mdp_intr_mask = MDP4_ANY_INTR_MASK;
-#else
-uint32 mdp_intr_mask = MDP_ANY_INTR_MASK;
-#endif
-
-MDP_BLOCK_TYPE mdp_debug[MDP_MAX_BLOCK];
-
-int32 mdp_block_power_cnt[MDP_MAX_BLOCK];
-
-spinlock_t mdp_spin_lock;
-struct workqueue_struct *mdp_dma_wq; /*mdp dma wq */
-struct workqueue_struct *mdp_vsync_wq; /*mdp vsync wq */
-
-static struct workqueue_struct *mdp_pipe_ctrl_wq; /* mdp mdp pipe ctrl wq */
-static struct delayed_work mdp_pipe_ctrl_worker;
-
-#ifdef CONFIG_FB_MSM_MDP40
-struct mdp_dma_data dma2_data;
-struct mdp_dma_data dma_s_data;
-struct mdp_dma_data dma_e_data;
-#else
-static struct mdp_dma_data dma2_data;
-static struct mdp_dma_data dma_s_data;
-static struct mdp_dma_data dma_e_data;
-#endif
-static struct mdp_dma_data dma3_data;
-
-extern ktime_t mdp_dma2_last_update_time;
-
-extern uint32 mdp_dma2_update_time_in_usec;
-extern int mdp_lcd_rd_cnt_offset_slow;
-extern int mdp_lcd_rd_cnt_offset_fast;
-extern int mdp_usec_diff_threshold;
-
-#ifdef CONFIG_FB_MSM_LCDC
-extern int mdp_lcdc_pclk_clk_rate;
-extern int mdp_lcdc_pad_pclk_clk_rate;
-extern int first_pixel_start_x;
-extern int first_pixel_start_y;
-#endif
-
-#ifdef MSM_FB_ENABLE_DBGFS
-struct dentry *mdp_dir;
-#endif
-
-#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
-static int mdp_suspend(struct platform_device *pdev, pm_message_t state);
-#else
-#define mdp_suspend NULL
-#endif
-
-struct timeval mdp_dma2_timeval;
-struct timeval mdp_ppp_timeval;
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static struct early_suspend early_suspend;
-#endif
-
-#ifndef CONFIG_FB_MSM_MDP22
-DEFINE_MUTEX(mdp_lut_push_sem);
-static int mdp_lut_i;
-static int mdp_lut_hw_update(struct fb_cmap *cmap)
-{
- int i;
- u16 *c[3];
- u16 r, g, b;
-
- c[0] = cmap->green;
- c[1] = cmap->blue;
- c[2] = cmap->red;
-
- for (i = 0; i < cmap->len; i++) {
- if (copy_from_user(&r, cmap->red++, sizeof(r)) ||
- copy_from_user(&g, cmap->green++, sizeof(g)) ||
- copy_from_user(&b, cmap->blue++, sizeof(b)))
- return -EFAULT;
-
-#ifdef CONFIG_FB_MSM_MDP40
- MDP_OUTP(MDP_BASE + 0x94800 +
-#else
- MDP_OUTP(MDP_BASE + 0x93800 +
-#endif
- (0x400*mdp_lut_i) + cmap->start*4 + i*4,
- ((g & 0xff) |
- ((b & 0xff) << 8) |
- ((r & 0xff) << 16)));
- }
-
- return 0;
-}
-
-static int mdp_lut_push;
-static int mdp_lut_push_i;
-static int mdp_lut_update_nonlcdc(struct fb_info *info, struct fb_cmap *cmap)
-{
- int ret;
-
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- ret = mdp_lut_hw_update(cmap);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
- if (ret)
- return ret;
-
- mutex_lock(&mdp_lut_push_sem);
- mdp_lut_push = 1;
- mdp_lut_push_i = mdp_lut_i;
- mutex_unlock(&mdp_lut_push_sem);
-
- mdp_lut_i = (mdp_lut_i + 1)%2;
-
- return 0;
-}
-
-static int mdp_lut_update_lcdc(struct fb_info *info, struct fb_cmap *cmap)
-{
- int ret;
-
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- ret = mdp_lut_hw_update(cmap);
-
- if (ret) {
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- return ret;
- }
-
- MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x17);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- mdp_lut_i = (mdp_lut_i + 1)%2;
-
- return 0;
-}
-
-#define MDP_HIST_MAX_BIN 32
-static __u32 mdp_hist_r[MDP_HIST_MAX_BIN];
-static __u32 mdp_hist_g[MDP_HIST_MAX_BIN];
-static __u32 mdp_hist_b[MDP_HIST_MAX_BIN];
-
-#ifdef CONFIG_FB_MSM_MDP40
-struct mdp_histogram mdp_hist;
-struct completion mdp_hist_comp;
-#else
-static struct mdp_histogram mdp_hist;
-static struct completion mdp_hist_comp;
-#endif
-
-static int mdp_do_histogram(struct fb_info *info, struct mdp_histogram *hist)
-{
- int ret = 0;
-
- if (!hist->frame_cnt || (hist->bin_cnt == 0) ||
- (hist->bin_cnt > MDP_HIST_MAX_BIN))
- return -EINVAL;
-
- INIT_COMPLETION(mdp_hist_comp);
-
- mdp_hist.bin_cnt = hist->bin_cnt;
- mdp_hist.r = (hist->r) ? mdp_hist_r : 0;
- mdp_hist.g = (hist->g) ? mdp_hist_g : 0;
- mdp_hist.b = (hist->b) ? mdp_hist_b : 0;
-
-#ifdef CONFIG_FB_MSM_MDP40
- MDP_OUTP(MDP_BASE + 0x95004, hist->frame_cnt);
- MDP_OUTP(MDP_BASE + 0x95000, 1);
-#else
- MDP_OUTP(MDP_BASE + 0x94004, hist->frame_cnt);
- MDP_OUTP(MDP_BASE + 0x94000, 1);
-#endif
- wait_for_completion_killable(&mdp_hist_comp);
-
- if (hist->r) {
- ret = copy_to_user(hist->r, mdp_hist.r, hist->bin_cnt*4);
- if (ret)
- goto hist_err;
- }
- if (hist->g) {
- ret = copy_to_user(hist->g, mdp_hist.g, hist->bin_cnt*4);
- if (ret)
- goto hist_err;
- }
- if (hist->b) {
- ret = copy_to_user(hist->b, mdp_hist.b, hist->bin_cnt*4);
- if (ret)
- goto hist_err;
- }
- return 0;
-
-hist_err:
- printk(KERN_ERR "%s: invalid hist buffer\n", __func__);
- return ret;
-}
-#endif
-
-/* Returns < 0 on error, 0 on timeout, or > 0 on successful wait */
-
-int mdp_ppp_pipe_wait(void)
-{
- int ret = 1;
-
- /* wait 5 seconds for the operation to complete before declaring
- the MDP hung */
-
- if (mdp_ppp_waiting == TRUE) {
- ret = wait_for_completion_interruptible_timeout(&mdp_ppp_comp,
- 5 * HZ);
-
- if (!ret)
- printk(KERN_ERR "%s: Timed out waiting for the MDP.\n",
- __func__);
- }
-
- return ret;
-}
-
-static DEFINE_SPINLOCK(mdp_lock);
-static int mdp_irq_mask;
-static int mdp_irq_enabled;
-
-void mdp_enable_irq(uint32 term)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&mdp_lock, irq_flags);
- if (mdp_irq_mask & term) {
- printk(KERN_ERR "MDP IRQ term-0x%x is already set\n", term);
- } else {
- mdp_irq_mask |= term;
- if (mdp_irq_mask && !mdp_irq_enabled) {
- mdp_irq_enabled = 1;
- enable_irq(INT_MDP);
- }
- }
- spin_unlock_irqrestore(&mdp_lock, irq_flags);
-}
-
-void mdp_disable_irq(uint32 term)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&mdp_lock, irq_flags);
- if (!(mdp_irq_mask & term)) {
- printk(KERN_ERR "MDP IRQ term-0x%x is not set\n", term);
- } else {
- mdp_irq_mask &= ~term;
- if (!mdp_irq_mask && mdp_irq_enabled) {
- mdp_irq_enabled = 0;
- disable_irq(INT_MDP);
- }
- }
- spin_unlock_irqrestore(&mdp_lock, irq_flags);
-}
-
-void mdp_disable_irq_nolock(uint32 term)
-{
-
- if (!(mdp_irq_mask & term)) {
- printk(KERN_ERR "MDP IRQ term-0x%x is not set\n", term);
- } else {
- mdp_irq_mask &= ~term;
- if (!mdp_irq_mask && mdp_irq_enabled) {
- mdp_irq_enabled = 0;
- disable_irq(INT_MDP);
- }
- }
-}
-
-void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd)
-{
-
- dmb(); /* memory barrier */
-
- /* kick off PPP engine */
- if (term == MDP_PPP_TERM) {
- if (mdp_debug[MDP_PPP_BLOCK])
- jiffies_to_timeval(jiffies, &mdp_ppp_timeval);
-
- /* let's turn on PPP block */
- mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- mdp_enable_irq(term);
- INIT_COMPLETION(mdp_ppp_comp);
- mdp_ppp_waiting = TRUE;
- outpdw(MDP_BASE + 0x30, 0x1000);
- wait_for_completion_killable(&mdp_ppp_comp);
- mdp_disable_irq(term);
-
- if (mdp_debug[MDP_PPP_BLOCK]) {
- struct timeval now;
-
- jiffies_to_timeval(jiffies, &now);
- mdp_ppp_timeval.tv_usec =
- now.tv_usec - mdp_ppp_timeval.tv_usec;
- MSM_FB_INFO("MDP-PPP: %d\n",
- (int)mdp_ppp_timeval.tv_usec);
- }
- } else if (term == MDP_DMA2_TERM) {
- if (mdp_debug[MDP_DMA2_BLOCK]) {
- MSM_FB_INFO("MDP-DMA2: %d\n",
- (int)mdp_dma2_timeval.tv_usec);
- jiffies_to_timeval(jiffies, &mdp_dma2_timeval);
- }
- /* DMA update timestamp */
- mdp_dma2_last_update_time = ktime_get_real();
- /* let's turn on DMA2 block */
-#if 0
- mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-#endif
-#ifdef CONFIG_FB_MSM_MDP22
- outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x0044, 0x0);/* start DMA */
-#else
- if (mdp_lut_push) {
- mutex_lock(&mdp_lut_push_sem);
- mdp_lut_push = 0;
- MDP_OUTP(MDP_BASE + 0x90070,
- (mdp_lut_push_i << 10) | 0x17);
- mutex_unlock(&mdp_lut_push_sem);
- }
-#ifdef CONFIG_FB_MSM_MDP40
- outpdw(MDP_BASE + 0x000c, 0x0); /* start DMA */
-#else
- outpdw(MDP_BASE + 0x0044, 0x0); /* start DMA */
-#endif
-#endif
-#ifdef CONFIG_FB_MSM_MDP40
- } else if (term == MDP_DMA_S_TERM) {
- mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- outpdw(MDP_BASE + 0x0010, 0x0); /* start DMA */
- } else if (term == MDP_DMA_E_TERM) {
- mdp_pipe_ctrl(MDP_DMA_E_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- outpdw(MDP_BASE + 0x0014, 0x0); /* start DMA */
- } else if (term == MDP_OVERLAY0_TERM) {
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- outpdw(MDP_BASE + 0x0004, 0);
- } else if (term == MDP_OVERLAY1_TERM) {
- mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- outpdw(MDP_BASE + 0x0008, 0);
- }
-#else
- } else if (term == MDP_DMA_S_TERM) {
- mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- outpdw(MDP_BASE + 0x0048, 0x0); /* start DMA */
- }
-#endif
-}
-
-static void mdp_pipe_ctrl_workqueue_handler(struct work_struct *work)
-{
- mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-}
-
-void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
- boolean isr)
-{
- boolean mdp_all_blocks_off = TRUE;
- int i;
- unsigned long flag;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (MDP_BLOCK_POWER_ON == state) {
- mdp_block_power_cnt[block]++;
-
- if (MDP_DMA2_BLOCK == block)
- mdp_in_processing = TRUE;
- } else {
- mdp_block_power_cnt[block]--;
-
- if (mdp_block_power_cnt[block] < 0) {
- /*
- * Master has to serve a request to power off MDP always
- * It also has a timer to power off. So, in case of
- * timer expires first and DMA2 finishes later,
- * master has to power off two times
- * There shouldn't be multiple power-off request for
- * other blocks
- */
- if (block != MDP_MASTER_BLOCK) {
- MSM_FB_INFO("mdp_block_power_cnt[block=%d] \
- multiple power-off request\n", block);
- }
- mdp_block_power_cnt[block] = 0;
- }
-
- if (MDP_DMA2_BLOCK == block)
- mdp_in_processing = FALSE;
- }
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- /*
- * If it's in isr, we send our request to workqueue.
- * Otherwise, processing happens in the current context
- */
- if (isr) {
- /* checking all blocks power state */
- for (i = 0; i < MDP_MAX_BLOCK; i++) {
- if (mdp_block_power_cnt[i] > 0)
- mdp_all_blocks_off = FALSE;
- }
-
- if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
- /* send workqueue to turn off mdp power */
- queue_delayed_work(mdp_pipe_ctrl_wq,
- &mdp_pipe_ctrl_worker,
- mdp_timer_duration);
- }
- } else {
- down(&mdp_pipe_ctrl_mutex);
- /* checking all blocks power state */
- for (i = 0; i < MDP_MAX_BLOCK; i++) {
- if (mdp_block_power_cnt[i] > 0)
- mdp_all_blocks_off = FALSE;
- }
-
- /*
- * find out whether a delayable work item is currently
- * pending
- */
-
- if (delayed_work_pending(&mdp_pipe_ctrl_worker)) {
- /*
- * try to cancel the current work if it fails to
- * stop (which means del_timer can't delete it
- * from the list, it's about to expire and run),
- * we have to let it run. queue_delayed_work won't
- * accept the next job which is same as
- * queue_delayed_work(mdp_timer_duration = 0)
- */
- cancel_delayed_work(&mdp_pipe_ctrl_worker);
- }
-
- if ((mdp_all_blocks_off) && (mdp_current_clk_on)) {
- if (block == MDP_MASTER_BLOCK) {
- mdp_current_clk_on = FALSE;
- /* turn off MDP clks */
- if (mdp_clk != NULL) {
- clk_disable(mdp_clk);
- MSM_FB_DEBUG("MDP CLK OFF\n");
- }
- if (mdp_pclk != NULL) {
- clk_disable(mdp_pclk);
- MSM_FB_DEBUG("MDP PCLK OFF\n");
- }
- } else {
- /* send workqueue to turn off mdp power */
- queue_delayed_work(mdp_pipe_ctrl_wq,
- &mdp_pipe_ctrl_worker,
- mdp_timer_duration);
- }
- } else if ((!mdp_all_blocks_off) && (!mdp_current_clk_on)) {
- mdp_current_clk_on = TRUE;
- /* turn on MDP clks */
- if (mdp_clk != NULL) {
- clk_enable(mdp_clk);
- MSM_FB_DEBUG("MDP CLK ON\n");
- }
- if (mdp_pclk != NULL) {
- clk_enable(mdp_pclk);
- MSM_FB_DEBUG("MDP PCLK ON\n");
- }
- }
- up(&mdp_pipe_ctrl_mutex);
- }
-}
-
-#ifndef CONFIG_FB_MSM_MDP40
-irqreturn_t mdp_isr(int irq, void *ptr)
-{
- uint32 mdp_interrupt = 0;
- struct mdp_dma_data *dma;
-
- mdp_is_in_isr = TRUE;
- do {
- mdp_interrupt = inp32(MDP_INTR_STATUS);
- outp32(MDP_INTR_CLEAR, mdp_interrupt);
-
- mdp_interrupt &= mdp_intr_mask;
-
- if (mdp_interrupt & TV_ENC_UNDERRUN) {
- mdp_interrupt &= ~(TV_ENC_UNDERRUN);
- mdp_tv_underflow_cnt++;
- }
-
- if (!mdp_interrupt)
- break;
-
- /* DMA3 TV-Out Start */
- if (mdp_interrupt & TV_OUT_DMA3_START) {
- /* let's disable TV out interrupt */
- mdp_intr_mask &= ~TV_OUT_DMA3_START;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-
- dma = &dma3_data;
- if (dma->waiting) {
- dma->waiting = FALSE;
- complete(&dma->comp);
- }
- }
-#ifndef CONFIG_FB_MSM_MDP22
- if (mdp_interrupt & MDP_HIST_DONE) {
- outp32(MDP_BASE + 0x94018, 0x3);
- outp32(MDP_INTR_CLEAR, MDP_HIST_DONE);
- if (mdp_hist.r)
- memcpy(mdp_hist.r, MDP_BASE + 0x94100,
- mdp_hist.bin_cnt*4);
- if (mdp_hist.g)
- memcpy(mdp_hist.g, MDP_BASE + 0x94200,
- mdp_hist.bin_cnt*4);
- if (mdp_hist.b)
- memcpy(mdp_hist.b, MDP_BASE + 0x94300,
- mdp_hist.bin_cnt*4);
- complete(&mdp_hist_comp);
- }
-
- /* LCDC UnderFlow */
- if (mdp_interrupt & LCDC_UNDERFLOW) {
- mdp_lcdc_underflow_cnt++;
- }
- /* LCDC Frame Start */
- if (mdp_interrupt & LCDC_FRAME_START) {
- /* let's disable LCDC interrupt */
- mdp_intr_mask &= ~LCDC_FRAME_START;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-
- dma = &dma2_data;
- if (dma->waiting) {
- dma->waiting = FALSE;
- complete(&dma->comp);
- }
- }
-
- /* DMA2 LCD-Out Complete */
- if (mdp_interrupt & MDP_DMA_S_DONE) {
- dma = &dma_s_data;
- dma->busy = FALSE;
- mdp_pipe_ctrl(MDP_DMA_S_BLOCK, MDP_BLOCK_POWER_OFF,
- TRUE);
- complete(&dma->comp);
- }
-#endif
-
- /* DMA2 LCD-Out Complete */
- if (mdp_interrupt & MDP_DMA_P_DONE) {
- struct timeval now;
- ktime_t now_k;
-
- now_k = ktime_get_real();
- mdp_dma2_last_update_time.tv.sec =
- now_k.tv.sec - mdp_dma2_last_update_time.tv.sec;
- mdp_dma2_last_update_time.tv.nsec =
- now_k.tv.nsec - mdp_dma2_last_update_time.tv.nsec;
-
- if (mdp_debug[MDP_DMA2_BLOCK]) {
- jiffies_to_timeval(jiffies, &now);
- mdp_dma2_timeval.tv_usec =
- now.tv_usec - mdp_dma2_timeval.tv_usec;
- }
-
- dma = &dma2_data;
- dma->busy = FALSE;
- mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
- TRUE);
- complete(&dma->comp);
- }
- /* PPP Complete */
- if (mdp_interrupt & MDP_PPP_DONE) {
-#ifdef CONFIG_MDP_PPP_ASYNC_OP
- mdp_ppp_djob_done();
-#else
- mdp_pipe_ctrl(MDP_PPP_BLOCK,
- MDP_BLOCK_POWER_OFF, TRUE);
- if (mdp_ppp_waiting) {
- mdp_ppp_waiting = FALSE;
- complete(&mdp_ppp_comp);
- }
-#endif
- }
- } while (1);
-
- mdp_is_in_isr = FALSE;
-
- return IRQ_HANDLED;
-}
-#endif
-
-static void mdp_drv_init(void)
-{
- int i;
-
- for (i = 0; i < MDP_MAX_BLOCK; i++) {
- mdp_debug[i] = 0;
- }
-
- /* initialize spin lock and workqueue */
- spin_lock_init(&mdp_spin_lock);
- mdp_dma_wq = create_singlethread_workqueue("mdp_dma_wq");
- mdp_vsync_wq = create_singlethread_workqueue("mdp_vsync_wq");
- mdp_pipe_ctrl_wq = create_singlethread_workqueue("mdp_pipe_ctrl_wq");
- INIT_DELAYED_WORK(&mdp_pipe_ctrl_worker,
- mdp_pipe_ctrl_workqueue_handler);
-#ifdef CONFIG_MDP_PPP_ASYNC_OP
- mdp_ppp_dq_init();
-#endif
-
- /* initialize semaphore */
- init_completion(&mdp_ppp_comp);
- sema_init(&mdp_ppp_mutex, 1);
- sema_init(&mdp_pipe_ctrl_mutex, 1);
-
- dma2_data.busy = FALSE;
- dma2_data.waiting = FALSE;
- init_completion(&dma2_data.comp);
- sema_init(&dma2_data.mutex, 1);
- mutex_init(&dma2_data.ov_mutex);
-
- dma3_data.busy = FALSE;
- dma3_data.waiting = FALSE;
- init_completion(&dma3_data.comp);
- sema_init(&dma3_data.mutex, 1);
-
- dma_s_data.busy = FALSE;
- dma_s_data.waiting = FALSE;
- init_completion(&dma_s_data.comp);
- sema_init(&dma_s_data.mutex, 1);
-
- dma_e_data.busy = FALSE;
- dma_e_data.waiting = FALSE;
- init_completion(&dma_e_data.comp);
-
-#ifndef CONFIG_FB_MSM_MDP22
- init_completion(&mdp_hist_comp);
-#endif
-
- /* initializing mdp power block counter to 0 */
- for (i = 0; i < MDP_MAX_BLOCK; i++) {
- mdp_block_power_cnt[i] = 0;
- }
-
-#ifdef MSM_FB_ENABLE_DBGFS
- {
- struct dentry *root;
- char sub_name[] = "mdp";
-
- root = msm_fb_get_debugfs_root();
- if (root != NULL) {
- mdp_dir = debugfs_create_dir(sub_name, root);
-
- if (mdp_dir) {
- msm_fb_debugfs_file_create(mdp_dir,
- "dma2_update_time_in_usec",
- (u32 *) &mdp_dma2_update_time_in_usec);
- msm_fb_debugfs_file_create(mdp_dir,
- "vs_rdcnt_slow",
- (u32 *) &mdp_lcd_rd_cnt_offset_slow);
- msm_fb_debugfs_file_create(mdp_dir,
- "vs_rdcnt_fast",
- (u32 *) &mdp_lcd_rd_cnt_offset_fast);
- msm_fb_debugfs_file_create(mdp_dir,
- "mdp_usec_diff_threshold",
- (u32 *) &mdp_usec_diff_threshold);
- msm_fb_debugfs_file_create(mdp_dir,
- "mdp_current_clk_on",
- (u32 *) &mdp_current_clk_on);
-#ifdef CONFIG_FB_MSM_LCDC
- msm_fb_debugfs_file_create(mdp_dir,
- "lcdc_start_x",
- (u32 *) &first_pixel_start_x);
- msm_fb_debugfs_file_create(mdp_dir,
- "lcdc_start_y",
- (u32 *) &first_pixel_start_y);
- msm_fb_debugfs_file_create(mdp_dir,
- "mdp_lcdc_pclk_clk_rate",
- (u32 *) &mdp_lcdc_pclk_clk_rate);
- msm_fb_debugfs_file_create(mdp_dir,
- "mdp_lcdc_pad_pclk_clk_rate",
- (u32 *) &mdp_lcdc_pad_pclk_clk_rate);
-#endif
- }
- }
- }
-#endif
-}
-
-static int mdp_probe(struct platform_device *pdev);
-static int mdp_remove(struct platform_device *pdev);
-
-static struct platform_driver mdp_driver = {
- .probe = mdp_probe,
- .remove = mdp_remove,
-#ifndef CONFIG_HAS_EARLYSUSPEND
- .suspend = mdp_suspend,
- .resume = NULL,
-#endif
- .shutdown = NULL,
- .driver = {
- /*
- * Driver name must match the device name added in
- * platform.c.
- */
- .name = "mdp",
- },
-};
-
-static int mdp_off(struct platform_device *pdev)
-{
- int ret = 0;
-
-#ifdef MDP_HW_VSYNC
- struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
-#endif
-
- ret = panel_next_off(pdev);
-
-#ifdef MDP_HW_VSYNC
- mdp_hw_vsync_clk_disable(mfd);
-#endif
-
- return ret;
-}
-
-static int mdp_on(struct platform_device *pdev)
-{
-#ifdef MDP_HW_VSYNC
- struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
-#endif
-
- int ret = 0;
-
-#ifdef MDP_HW_VSYNC
- mdp_hw_vsync_clk_enable(mfd);
-#endif
-
- ret = panel_next_on(pdev);
-
- return ret;
-}
-
-static int mdp_irq_clk_setup(void)
-{
- int ret;
-
-#ifdef CONFIG_FB_MSM_MDP40
- ret = request_irq(INT_MDP, mdp4_isr, IRQF_DISABLED, "MDP", 0);
-#else
- ret = request_irq(INT_MDP, mdp_isr, IRQF_DISABLED, "MDP", 0);
-#endif
- if (ret) {
- printk(KERN_ERR "mdp request_irq() failed!\n");
- return ret;
- }
- disable_irq(INT_MDP);
-
- mdp_clk = clk_get(NULL, "mdp_clk");
-
- if (IS_ERR(mdp_clk)) {
- ret = PTR_ERR(mdp_clk);
- printk(KERN_ERR "can't get mdp_clk error:%d!\n", ret);
- free_irq(INT_MDP, 0);
- return ret;
- }
-
- mdp_pclk = clk_get(NULL, "mdp_pclk");
- if (IS_ERR(mdp_pclk))
- mdp_pclk = NULL;
-
-
-#ifdef CONFIG_FB_MSM_MDP40
- /*
- * mdp_clk should greater than mdp_pclk always
- */
- clk_set_rate(mdp_clk, 122880000); /* 122.88 Mhz */
- printk(KERN_INFO "mdp_clk: mdp_clk=%d mdp_pclk=%d\n",
- (int)clk_get_rate(mdp_clk), (int)clk_get_rate(mdp_pclk));
-#endif
-
- return 0;
-}
-
-static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
-static int pdev_list_cnt;
-static int mdp_resource_initialized;
-static struct msm_panel_common_pdata *mdp_pdata;
-
-static int mdp_probe(struct platform_device *pdev)
-{
- struct platform_device *msm_fb_dev = NULL;
- struct msm_fb_data_type *mfd;
- struct msm_fb_panel_data *pdata = NULL;
- int rc;
- resource_size_t size ;
-#ifdef CONFIG_FB_MSM_MDP40
- int intf, if_no;
-#else
- unsigned long flag;
-#endif
-
- if ((pdev->id == 0) && (pdev->num_resources > 0)) {
- mdp_pdata = pdev->dev.platform_data;
-
- size = resource_size(&pdev->resource[0]);
- msm_mdp_base = ioremap(pdev->resource[0].start, size);
-
- MSM_FB_INFO("MDP HW Base phy_Address = 0x%x virt = 0x%x\n",
- (int)pdev->resource[0].start, (int)msm_mdp_base);
-
- if (unlikely(!msm_mdp_base))
- return -ENOMEM;
-
- printk("irq clk setup\n");
- rc = mdp_irq_clk_setup();
- printk("irq clk setup done\n");
- if (rc)
- return rc;
-
- /* initializing mdp hw */
-#ifdef CONFIG_FB_MSM_MDP40
- mdp4_hw_init();
-#else
- mdp_hw_init();
-#endif
-
- mdp_resource_initialized = 1;
- return 0;
- }
-
- if (!mdp_resource_initialized)
- return -EPERM;
-
- mfd = platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
- return -ENOMEM;
-
- msm_fb_dev = platform_device_alloc("msm_fb", pdev->id);
- if (!msm_fb_dev)
- return -ENOMEM;
-
- /* link to the latest pdev */
- mfd->pdev = msm_fb_dev;
-
- /* add panel data */
- if (platform_device_add_data
- (msm_fb_dev, pdev->dev.platform_data,
- sizeof(struct msm_fb_panel_data))) {
- printk(KERN_ERR "mdp_probe: platform_device_add_data failed!\n");
- rc = -ENOMEM;
- goto mdp_probe_err;
- }
- /* data chain */
- pdata = msm_fb_dev->dev.platform_data;
- pdata->on = mdp_on;
- pdata->off = mdp_off;
- pdata->next = pdev;
-
- switch (mfd->panel.type) {
- case EXT_MDDI_PANEL:
- case MDDI_PANEL:
- case EBI2_PANEL:
- INIT_WORK(&mfd->dma_update_worker,
- mdp_lcd_update_workqueue_handler);
- INIT_WORK(&mfd->vsync_resync_worker,
- mdp_vsync_resync_workqueue_handler);
- mfd->hw_refresh = FALSE;
-
- if (mfd->panel.type == EXT_MDDI_PANEL) {
- /* 15 fps -> 66 msec */
- mfd->refresh_timer_duration = (66 * HZ / 1000);
- } else {
- /* 24 fps -> 42 msec */
- mfd->refresh_timer_duration = (42 * HZ / 1000);
- }
-
-#ifdef CONFIG_FB_MSM_MDP22
- mfd->dma_fnc = mdp_dma2_update;
- mfd->dma = &dma2_data;
-#else
- if (mfd->panel_info.pdest == DISPLAY_1) {
-#ifdef CONFIG_FB_MSM_OVERLAY
- mfd->dma_fnc = mdp4_mddi_overlay;
-#else
- mfd->dma_fnc = mdp_dma2_update;
-#endif
- mfd->dma = &dma2_data;
- mfd->lut_update = mdp_lut_update_nonlcdc;
- mfd->do_histogram = mdp_do_histogram;
- } else {
- mfd->dma_fnc = mdp_dma_s_update;
- mfd->dma = &dma_s_data;
- }
-#endif
- if (mdp_pdata)
- mfd->vsync_gpio = mdp_pdata->gpio;
- else
- mfd->vsync_gpio = -1;
-
-#ifdef CONFIG_FB_MSM_MDP40
- if (mfd->panel.type == EBI2_PANEL)
- intf = EBI2_INTF;
- else
- intf = MDDI_INTF;
-
- if (mfd->panel_info.pdest == DISPLAY_1)
- if_no = PRIMARY_INTF_SEL;
- else
- if_no = SECONDARY_INTF_SEL;
-
- mdp4_display_intf_sel(if_no, intf);
-#endif
- mdp_config_vsync(mfd);
- break;
-
- case HDMI_PANEL:
- case LCDC_PANEL:
- pdata->on = mdp_lcdc_on;
- pdata->off = mdp_lcdc_off;
- mfd->hw_refresh = TRUE;
- mfd->cursor_update = mdp_hw_cursor_update;
-#ifndef CONFIG_FB_MSM_MDP22
- mfd->lut_update = mdp_lut_update_lcdc;
- mfd->do_histogram = mdp_do_histogram;
-#endif
-#ifdef CONFIG_FB_MSM_OVERLAY
- mfd->dma_fnc = mdp4_lcdc_overlay;
-#else
- mfd->dma_fnc = mdp_lcdc_update;
-#endif
-
-#ifdef CONFIG_FB_MSM_MDP40
- if (mfd->panel.type == HDMI_PANEL) {
- mfd->dma = &dma_e_data;
- mdp4_display_intf_sel(EXTERNAL_INTF_SEL, LCDC_RGB_INTF);
- } else {
- mfd->dma = &dma2_data;
- mdp4_display_intf_sel(PRIMARY_INTF_SEL, LCDC_RGB_INTF);
- }
-#else
- mfd->dma = &dma2_data;
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_intr_mask &= ~MDP_DMA_P_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-#endif
- break;
-
- case TV_PANEL:
- pdata->on = mdp_dma3_on;
- pdata->off = mdp_dma3_off;
- mfd->hw_refresh = TRUE;
- mfd->dma_fnc = mdp_dma3_update;
- mfd->dma = &dma3_data;
- break;
-
- default:
- printk(KERN_ERR "mdp_probe: unknown device type!\n");
- rc = -ENODEV;
- goto mdp_probe_err;
- }
-
- /* set driver data */
- platform_set_drvdata(msm_fb_dev, mfd);
-
- rc = platform_device_add(msm_fb_dev);
- if (rc) {
- goto mdp_probe_err;
- }
-
- pdev_list[pdev_list_cnt++] = pdev;
- return 0;
-
- mdp_probe_err:
- platform_device_put(msm_fb_dev);
- return rc;
-}
-
-static void mdp_suspend_sub(void)
-{
- /* cancel pipe ctrl worker */
- cancel_delayed_work(&mdp_pipe_ctrl_worker);
-
- /* for workder can't be cancelled... */
- flush_workqueue(mdp_pipe_ctrl_wq);
-
- /* let's wait for PPP completion */
- while (mdp_block_power_cnt[MDP_PPP_BLOCK] > 0) ;
-
- /* try to power down */
- mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-}
-
-#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
-static int mdp_suspend(struct platform_device *pdev, pm_message_t state)
-{
- mdp_suspend_sub();
- return 0;
-}
-#endif
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static void mdp_early_suspend(struct early_suspend *h)
-{
- mdp_suspend_sub();
-}
-#endif
-
-static int mdp_remove(struct platform_device *pdev)
-{
- iounmap(msm_mdp_base);
- return 0;
-}
-
-static int mdp_register_driver(void)
-{
-#ifdef CONFIG_HAS_EARLYSUSPEND
- early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 1;
- early_suspend.suspend = mdp_early_suspend;
- register_early_suspend(&early_suspend);
-#endif
-
- return platform_driver_register(&mdp_driver);
-}
-
-static int __init mdp_driver_init(void)
-{
- int ret;
-
- mdp_drv_init();
-
- ret = mdp_register_driver();
- if (ret) {
- printk(KERN_ERR "mdp_register_driver() failed!\n");
- return ret;
- }
-
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_FB_MSM_MDP40)
- mdp4_debugfs_init();
-#endif
-
- return 0;
-
-}
-
-module_init(mdp_driver_init);
diff --git a/drivers/staging/msm/mdp.h b/drivers/staging/msm/mdp.h
deleted file mode 100644
index 44b114700da..00000000000
--- a/drivers/staging/msm/mdp.h
+++ /dev/null
@@ -1,679 +0,0 @@
-/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef MDP_H
-#define MDP_H
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-#include <linux/hrtimer.h>
-#include "msm_mdp.h"
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-
-#include "msm_fb_panel.h"
-
-#ifdef CONFIG_MDP_PPP_ASYNC_OP
-#include "mdp_ppp_dq.h"
-#endif
-
-#ifdef BIT
-#undef BIT
-#endif
-
-#define BIT(x) (1<<(x))
-
-#define MDPOP_NOP 0
-#define MDPOP_LR BIT(0) /* left to right flip */
-#define MDPOP_UD BIT(1) /* up and down flip */
-#define MDPOP_ROT90 BIT(2) /* rotate image to 90 degree */
-#define MDPOP_ROT180 (MDPOP_UD|MDPOP_LR)
-#define MDPOP_ROT270 (MDPOP_ROT90|MDPOP_UD|MDPOP_LR)
-#define MDPOP_ASCALE BIT(7)
-#define MDPOP_ALPHAB BIT(8) /* enable alpha blending */
-#define MDPOP_TRANSP BIT(9) /* enable transparency */
-#define MDPOP_DITHER BIT(10) /* enable dither */
-#define MDPOP_SHARPENING BIT(11) /* enable sharpening */
-#define MDPOP_BLUR BIT(12) /* enable blur */
-#define MDPOP_FG_PM_ALPHA BIT(13)
-
-struct mdp_table_entry {
- uint32_t reg;
- uint32_t val;
-};
-
-extern struct mdp_ccs mdp_ccs_yuv2rgb ;
-extern struct mdp_ccs mdp_ccs_rgb2yuv ;
-
-/*
- * MDP Image Structure
- */
-typedef struct mdpImg_ {
- uint32 imgType; /* Image type */
- uint32 *bmy_addr; /* bitmap or y addr */
- uint32 *cbcr_addr; /* cbcr addr */
- uint32 width; /* image width */
- uint32 mdpOp; /* image opertion (rotation,flip up/down, alpha/tp) */
- uint32 tpVal; /* transparency color */
- uint32 alpha; /* alpha percentage 0%(0x0) ~ 100%(0x100) */
- int sp_value; /* sharpening strength */
-} MDPIMG;
-
-#ifdef CONFIG_MDP_PPP_ASYNC_OP
-#define MDP_OUTP(addr, data) mdp_ppp_outdw((uint32_t)(addr), \
- (uint32_t)(data))
-#else
-#define MDP_OUTP(addr, data) outpdw((addr), (data))
-#endif
-
-#define MDP_KTIME2USEC(kt) (kt.tv.sec*1000000 + kt.tv.nsec/1000)
-
-#define MDP_BASE msm_mdp_base
-
-typedef enum {
- MDP_BC_SCALE_POINT2_POINT4,
- MDP_BC_SCALE_POINT4_POINT6,
- MDP_BC_SCALE_POINT6_POINT8,
- MDP_BC_SCALE_POINT8_1,
- MDP_BC_SCALE_UP,
- MDP_PR_SCALE_POINT2_POINT4,
- MDP_PR_SCALE_POINT4_POINT6,
- MDP_PR_SCALE_POINT6_POINT8,
- MDP_PR_SCALE_POINT8_1,
- MDP_PR_SCALE_UP,
- MDP_SCALE_BLUR,
- MDP_INIT_SCALE
-} MDP_SCALE_MODE;
-
-typedef enum {
- MDP_BLOCK_POWER_OFF,
- MDP_BLOCK_POWER_ON
-} MDP_BLOCK_POWER_STATE;
-
-typedef enum {
- MDP_MASTER_BLOCK,
- MDP_CMD_BLOCK,
- MDP_PPP_BLOCK,
- MDP_DMA2_BLOCK,
- MDP_DMA3_BLOCK,
- MDP_DMA_S_BLOCK,
- MDP_DMA_E_BLOCK,
- MDP_OVERLAY0_BLOCK,
- MDP_OVERLAY1_BLOCK,
- MDP_MAX_BLOCK
-} MDP_BLOCK_TYPE;
-
-/* Let's keep Q Factor power of 2 for optimization */
-#define MDP_SCALE_Q_FACTOR 512
-
-#ifdef CONFIG_FB_MSM_MDP31
-#define MDP_MAX_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*8)
-#define MDP_MIN_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/8)
-#define MDP_MAX_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*8)
-#define MDP_MIN_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/8)
-#else
-#define MDP_MAX_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*4)
-#define MDP_MIN_X_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/4)
-#define MDP_MAX_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR*4)
-#define MDP_MIN_Y_SCALE_FACTOR (MDP_SCALE_Q_FACTOR/4)
-#endif
-
-/* SHIM Q Factor */
-#define PHI_Q_FACTOR 29
-#define PQF_PLUS_5 (PHI_Q_FACTOR + 5) /* due to 32 phases */
-#define PQF_PLUS_4 (PHI_Q_FACTOR + 4)
-#define PQF_PLUS_2 (PHI_Q_FACTOR + 2) /* to get 4.0 */
-#define PQF_MINUS_2 (PHI_Q_FACTOR - 2) /* to get 0.25 */
-#define PQF_PLUS_5_PLUS_2 (PQF_PLUS_5 + 2)
-#define PQF_PLUS_5_MINUS_2 (PQF_PLUS_5 - 2)
-
-#define MDP_CONVTP(tpVal) (((tpVal&0xF800)<<8)|((tpVal&0x7E0)<<5)|((tpVal&0x1F)<<3))
-
-#define MDPOP_ROTATION (MDPOP_ROT90|MDPOP_LR|MDPOP_UD)
-#define MDP_CHKBIT(val, bit) ((bit) == ((val) & (bit)))
-
-/* overlay interface API defines */
-typedef enum {
- MORE_IBUF,
- FINAL_IBUF,
- COMPLETE_IBUF
-} MDP_IBUF_STATE;
-
-struct mdp_dirty_region {
- __u32 xoffset; /* source origin in the x-axis */
- __u32 yoffset; /* source origin in the y-axis */
- __u32 width; /* number of pixels in the x-axis */
- __u32 height; /* number of pixels in the y-axis */
-};
-
-/*
- * MDP extended data types
- */
-typedef struct mdp_roi_s {
- uint32 x;
- uint32 y;
- uint32 width;
- uint32 height;
- int32 lcd_x;
- int32 lcd_y;
- uint32 dst_width;
- uint32 dst_height;
-} MDP_ROI;
-
-typedef struct mdp_ibuf_s {
- uint8 *buf;
- uint32 bpp;
- uint32 ibuf_type;
- uint32 ibuf_width;
- uint32 ibuf_height;
-
- MDP_ROI roi;
- MDPIMG mdpImg;
-
- int32 dma_x;
- int32 dma_y;
- uint32 dma_w;
- uint32 dma_h;
-
- uint32 vsync_enable;
- uint32 visible_swapped;
-} MDPIBUF;
-
-struct mdp_dma_data {
- boolean busy;
- boolean waiting;
- struct mutex ov_mutex;
- struct semaphore mutex;
- struct completion comp;
-};
-
-#define MDP_CMD_DEBUG_ACCESS_BASE (MDP_BASE+0x10000)
-
-#define MDP_DMA2_TERM 0x1
-#define MDP_DMA3_TERM 0x2
-#define MDP_PPP_TERM 0x4
-#define MDP_DMA_S_TERM 0x8
-#ifdef CONFIG_FB_MSM_MDP40
-#define MDP_DMA_E_TERM 0x10
-#define MDP_OVERLAY0_TERM 0x20
-#define MDP_OVERLAY1_TERM 0x40
-#endif
-
-#define ACTIVE_START_X_EN BIT(31)
-#define ACTIVE_START_Y_EN BIT(31)
-#define ACTIVE_HIGH 0
-#define ACTIVE_LOW 1
-#define MDP_DMA_S_DONE BIT(2)
-#define LCDC_FRAME_START BIT(15)
-#define LCDC_UNDERFLOW BIT(16)
-
-#ifdef CONFIG_FB_MSM_MDP22
-#define MDP_DMA_P_DONE BIT(2)
-#else
-#define MDP_DMA_P_DONE BIT(14)
-#endif
-
-#define MDP_PPP_DONE BIT(0)
-#define TV_OUT_DMA3_DONE BIT(6)
-#define TV_ENC_UNDERRUN BIT(7)
-#define TV_OUT_DMA3_START BIT(13)
-#define MDP_HIST_DONE BIT(20)
-
-#ifdef CONFIG_FB_MSM_MDP22
-#define MDP_ANY_INTR_MASK (MDP_PPP_DONE| \
- MDP_DMA_P_DONE| \
- TV_ENC_UNDERRUN)
-#else
-#define MDP_ANY_INTR_MASK (MDP_PPP_DONE| \
- MDP_DMA_P_DONE| \
- MDP_DMA_S_DONE| \
- LCDC_UNDERFLOW| \
- MDP_HIST_DONE| \
- TV_ENC_UNDERRUN)
-#endif
-
-#define MDP_TOP_LUMA 16
-#define MDP_TOP_CHROMA 0
-#define MDP_BOTTOM_LUMA 19
-#define MDP_BOTTOM_CHROMA 3
-#define MDP_LEFT_LUMA 22
-#define MDP_LEFT_CHROMA 6
-#define MDP_RIGHT_LUMA 25
-#define MDP_RIGHT_CHROMA 9
-
-#define CLR_G 0x0
-#define CLR_B 0x1
-#define CLR_R 0x2
-#define CLR_ALPHA 0x3
-
-#define CLR_Y CLR_G
-#define CLR_CB CLR_B
-#define CLR_CR CLR_R
-
-/* from lsb to msb */
-#define MDP_GET_PACK_PATTERN(a,x,y,z,bit) (((a)<<(bit*3))|((x)<<(bit*2))|((y)<<bit)|(z))
-
-/*
- * 0x0000 0x0004 0x0008 MDP sync config
- */
-#ifdef CONFIG_FB_MSM_MDP22
-#define MDP_SYNCFG_HGT_LOC 22
-#define MDP_SYNCFG_VSYNC_EXT_EN BIT(21)
-#define MDP_SYNCFG_VSYNC_INT_EN BIT(20)
-#else
-#define MDP_SYNCFG_HGT_LOC 21
-#define MDP_SYNCFG_VSYNC_EXT_EN BIT(20)
-#define MDP_SYNCFG_VSYNC_INT_EN BIT(19)
-#define MDP_HW_VSYNC
-#endif
-
-/*
- * 0x0018 MDP VSYNC THREASH
- */
-#define MDP_PRIM_BELOW_LOC 0
-#define MDP_PRIM_ABOVE_LOC 8
-
-/*
- * MDP_PRIMARY_VSYNC_OUT_CTRL
- * 0x0080,84,88 internal vsync pulse config
- */
-#define VSYNC_PULSE_EN BIT(31)
-#define VSYNC_PULSE_INV BIT(30)
-
-/*
- * 0x008c MDP VSYNC CONTROL
- */
-#define DISP0_VSYNC_MAP_VSYNC0 0
-#define DISP0_VSYNC_MAP_VSYNC1 BIT(0)
-#define DISP0_VSYNC_MAP_VSYNC2 BIT(0)|BIT(1)
-
-#define DISP1_VSYNC_MAP_VSYNC0 0
-#define DISP1_VSYNC_MAP_VSYNC1 BIT(2)
-#define DISP1_VSYNC_MAP_VSYNC2 BIT(2)|BIT(3)
-
-#define PRIMARY_LCD_SYNC_EN BIT(4)
-#define PRIMARY_LCD_SYNC_DISABLE 0
-
-#define SECONDARY_LCD_SYNC_EN BIT(5)
-#define SECONDARY_LCD_SYNC_DISABLE 0
-
-#define EXTERNAL_LCD_SYNC_EN BIT(6)
-#define EXTERNAL_LCD_SYNC_DISABLE 0
-
-/*
- * 0x101f0 MDP VSYNC Threshold
- */
-#define VSYNC_THRESHOLD_ABOVE_LOC 0
-#define VSYNC_THRESHOLD_BELOW_LOC 16
-#define VSYNC_ANTI_TEAR_EN BIT(31)
-
-/*
- * 0x10004 command config
- */
-#define MDP_CMD_DBGBUS_EN BIT(0)
-
-/*
- * 0x10124 or 0x101d4PPP source config
- */
-#define PPP_SRC_C0G_8BITS (BIT(1)|BIT(0))
-#define PPP_SRC_C1B_8BITS (BIT(3)|BIT(2))
-#define PPP_SRC_C2R_8BITS (BIT(5)|BIT(4))
-#define PPP_SRC_C3A_8BITS (BIT(7)|BIT(6))
-
-#define PPP_SRC_C0G_6BITS BIT(1)
-#define PPP_SRC_C1B_6BITS BIT(3)
-#define PPP_SRC_C2R_6BITS BIT(5)
-
-#define PPP_SRC_C0G_5BITS BIT(0)
-#define PPP_SRC_C1B_5BITS BIT(2)
-#define PPP_SRC_C2R_5BITS BIT(4)
-
-#define PPP_SRC_C3_ALPHA_EN BIT(8)
-
-#define PPP_SRC_BPP_INTERLVD_1BYTES 0
-#define PPP_SRC_BPP_INTERLVD_2BYTES BIT(9)
-#define PPP_SRC_BPP_INTERLVD_3BYTES BIT(10)
-#define PPP_SRC_BPP_INTERLVD_4BYTES (BIT(10)|BIT(9))
-
-#define PPP_SRC_BPP_ROI_ODD_X BIT(11)
-#define PPP_SRC_BPP_ROI_ODD_Y BIT(12)
-#define PPP_SRC_INTERLVD_2COMPONENTS BIT(13)
-#define PPP_SRC_INTERLVD_3COMPONENTS BIT(14)
-#define PPP_SRC_INTERLVD_4COMPONENTS (BIT(14)|BIT(13))
-
-/*
- * RGB666 unpack format
- * TIGHT means R6+G6+B6 together
- * LOOSE means R6+2 +G6+2+ B6+2 (with MSB)
- * or 2+R6 +2+G6 +2+B6 (with LSB)
- */
-#define PPP_SRC_UNPACK_TIGHT BIT(17)
-#define PPP_SRC_UNPACK_LOOSE 0
-#define PPP_SRC_UNPACK_ALIGN_LSB 0
-#define PPP_SRC_UNPACK_ALIGN_MSB BIT(18)
-
-#define PPP_SRC_FETCH_PLANES_INTERLVD 0
-#define PPP_SRC_FETCH_PLANES_PSEUDOPLNR BIT(20)
-
-#define PPP_SRC_WMV9_MODE BIT(21) /* window media version 9 */
-
-/*
- * 0x10138 PPP operation config
- */
-#define PPP_OP_SCALE_X_ON BIT(0)
-#define PPP_OP_SCALE_Y_ON BIT(1)
-
-#define PPP_OP_CONVERT_RGB2YCBCR 0
-#define PPP_OP_CONVERT_YCBCR2RGB BIT(2)
-#define PPP_OP_CONVERT_ON BIT(3)
-
-#define PPP_OP_CONVERT_MATRIX_PRIMARY 0
-#define PPP_OP_CONVERT_MATRIX_SECONDARY BIT(4)
-
-#define PPP_OP_LUT_C0_ON BIT(5)
-#define PPP_OP_LUT_C1_ON BIT(6)
-#define PPP_OP_LUT_C2_ON BIT(7)
-
-/* rotate or blend enable */
-#define PPP_OP_ROT_ON BIT(8)
-
-#define PPP_OP_ROT_90 BIT(9)
-#define PPP_OP_FLIP_LR BIT(10)
-#define PPP_OP_FLIP_UD BIT(11)
-
-#define PPP_OP_BLEND_ON BIT(12)
-
-#define PPP_OP_BLEND_SRCPIXEL_ALPHA 0
-#define PPP_OP_BLEND_DSTPIXEL_ALPHA BIT(13)
-#define PPP_OP_BLEND_CONSTANT_ALPHA BIT(14)
-#define PPP_OP_BLEND_SRCPIXEL_TRANSP (BIT(13)|BIT(14))
-
-#define PPP_OP_BLEND_ALPHA_BLEND_NORMAL 0
-#define PPP_OP_BLEND_ALPHA_BLEND_REVERSE BIT(15)
-
-#define PPP_OP_DITHER_EN BIT(16)
-
-#define PPP_OP_COLOR_SPACE_RGB 0
-#define PPP_OP_COLOR_SPACE_YCBCR BIT(17)
-
-#define PPP_OP_SRC_CHROMA_RGB 0
-#define PPP_OP_SRC_CHROMA_H2V1 BIT(18)
-#define PPP_OP_SRC_CHROMA_H1V2 BIT(19)
-#define PPP_OP_SRC_CHROMA_420 (BIT(18)|BIT(19))
-#define PPP_OP_SRC_CHROMA_COSITE 0
-#define PPP_OP_SRC_CHROMA_OFFSITE BIT(20)
-
-#define PPP_OP_DST_CHROMA_RGB 0
-#define PPP_OP_DST_CHROMA_H2V1 BIT(21)
-#define PPP_OP_DST_CHROMA_H1V2 BIT(22)
-#define PPP_OP_DST_CHROMA_420 (BIT(21)|BIT(22))
-#define PPP_OP_DST_CHROMA_COSITE 0
-#define PPP_OP_DST_CHROMA_OFFSITE BIT(23)
-
-#define PPP_BLEND_CALPHA_TRNASP BIT(24)
-
-#define PPP_OP_BG_CHROMA_RGB 0
-#define PPP_OP_BG_CHROMA_H2V1 BIT(25)
-#define PPP_OP_BG_CHROMA_H1V2 BIT(26)
-#define PPP_OP_BG_CHROMA_420 BIT(25)|BIT(26)
-#define PPP_OP_BG_CHROMA_SITE_COSITE 0
-#define PPP_OP_BG_CHROMA_SITE_OFFSITE BIT(27)
-#define PPP_OP_DEINT_EN BIT(29)
-
-#define PPP_BLEND_BG_USE_ALPHA_SEL (1 << 0)
-#define PPP_BLEND_BG_ALPHA_REVERSE (1 << 3)
-#define PPP_BLEND_BG_SRCPIXEL_ALPHA (0 << 1)
-#define PPP_BLEND_BG_DSTPIXEL_ALPHA (1 << 1)
-#define PPP_BLEND_BG_CONSTANT_ALPHA (2 << 1)
-#define PPP_BLEND_BG_CONST_ALPHA_VAL(x) ((x) << 24)
-
-#define PPP_OP_DST_RGB 0
-#define PPP_OP_DST_YCBCR BIT(30)
-/*
- * 0x10150 PPP destination config
- */
-#define PPP_DST_C0G_8BIT (BIT(0)|BIT(1))
-#define PPP_DST_C1B_8BIT (BIT(3)|BIT(2))
-#define PPP_DST_C2R_8BIT (BIT(5)|BIT(4))
-#define PPP_DST_C3A_8BIT (BIT(7)|BIT(6))
-
-#define PPP_DST_C0G_6BIT BIT(1)
-#define PPP_DST_C1B_6BIT BIT(3)
-#define PPP_DST_C2R_6BIT BIT(5)
-
-#define PPP_DST_C0G_5BIT BIT(0)
-#define PPP_DST_C1B_5BIT BIT(2)
-#define PPP_DST_C2R_5BIT BIT(4)
-
-#define PPP_DST_C3A_8BIT (BIT(7)|BIT(6))
-#define PPP_DST_C3ALPHA_EN BIT(8)
-
-#define PPP_DST_PACKET_CNT_INTERLVD_2ELEM BIT(9)
-#define PPP_DST_PACKET_CNT_INTERLVD_3ELEM BIT(10)
-#define PPP_DST_PACKET_CNT_INTERLVD_4ELEM (BIT(10)|BIT(9))
-#define PPP_DST_PACKET_CNT_INTERLVD_6ELEM (BIT(11)|BIT(9))
-
-#define PPP_DST_PACK_LOOSE 0
-#define PPP_DST_PACK_TIGHT BIT(13)
-#define PPP_DST_PACK_ALIGN_LSB 0
-#define PPP_DST_PACK_ALIGN_MSB BIT(14)
-
-#define PPP_DST_OUT_SEL_AXI 0
-#define PPP_DST_OUT_SEL_MDDI BIT(15)
-
-#define PPP_DST_BPP_2BYTES BIT(16)
-#define PPP_DST_BPP_3BYTES BIT(17)
-#define PPP_DST_BPP_4BYTES (BIT(17)|BIT(16))
-
-#define PPP_DST_PLANE_INTERLVD 0
-#define PPP_DST_PLANE_PLANAR BIT(18)
-#define PPP_DST_PLANE_PSEUDOPLN BIT(19)
-
-#define PPP_DST_TO_TV BIT(20)
-
-#define PPP_DST_MDDI_PRIMARY 0
-#define PPP_DST_MDDI_SECONDARY BIT(21)
-#define PPP_DST_MDDI_EXTERNAL BIT(22)
-
-/*
- * 0x10180 DMA config
- */
-#define DMA_DSTC0G_8BITS (BIT(1)|BIT(0))
-#define DMA_DSTC1B_8BITS (BIT(3)|BIT(2))
-#define DMA_DSTC2R_8BITS (BIT(5)|BIT(4))
-
-#define DMA_DSTC0G_6BITS BIT(1)
-#define DMA_DSTC1B_6BITS BIT(3)
-#define DMA_DSTC2R_6BITS BIT(5)
-
-#define DMA_DSTC0G_5BITS BIT(0)
-#define DMA_DSTC1B_5BITS BIT(2)
-#define DMA_DSTC2R_5BITS BIT(4)
-
-#define DMA_PACK_TIGHT BIT(6)
-#define DMA_PACK_LOOSE 0
-#define DMA_PACK_ALIGN_LSB 0
-/*
- * use DMA_PACK_ALIGN_MSB if the upper 6 bits from 8 bits output
- * from LCDC block maps into 6 pins out to the panel
- */
-#define DMA_PACK_ALIGN_MSB BIT(7)
-#define DMA_PACK_PATTERN_RGB \
- (MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 2)<<8)
-#define DMA_PACK_PATTERN_BGR \
- (MDP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 2)<<8)
-#define DMA_OUT_SEL_AHB 0
-#define DMA_OUT_SEL_LCDC BIT(20)
-#define DMA_IBUF_FORMAT_RGB888 0
-#define DMA_IBUF_FORMAT_xRGB8888_OR_ARGB8888 BIT(26)
-
-#ifdef CONFIG_FB_MSM_MDP22
-#define DMA_OUT_SEL_MDDI BIT(14)
-#define DMA_AHBM_LCD_SEL_PRIMARY 0
-#define DMA_AHBM_LCD_SEL_SECONDARY BIT(15)
-#define DMA_IBUF_C3ALPHA_EN BIT(16)
-#define DMA_DITHER_EN BIT(17)
-#define DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY 0
-#define DMA_MDDI_DMAOUT_LCD_SEL_SECONDARY BIT(18)
-#define DMA_MDDI_DMAOUT_LCD_SEL_EXTERNAL BIT(19)
-#define DMA_IBUF_FORMAT_RGB565 BIT(20)
-#define DMA_IBUF_FORMAT_RGB888_OR_ARGB8888 0
-#define DMA_IBUF_NONCONTIGUOUS BIT(21)
-#else
-#define DMA_OUT_SEL_MDDI BIT(19)
-#define DMA_AHBM_LCD_SEL_PRIMARY 0
-#define DMA_AHBM_LCD_SEL_SECONDARY 0
-#define DMA_IBUF_C3ALPHA_EN 0
-#define DMA_DITHER_EN BIT(24)
-#define DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY 0
-#define DMA_MDDI_DMAOUT_LCD_SEL_SECONDARY 0
-#define DMA_MDDI_DMAOUT_LCD_SEL_EXTERNAL 0
-#define DMA_IBUF_FORMAT_RGB565 BIT(25)
-#define DMA_IBUF_NONCONTIGUOUS 0
-#endif
-
-/*
- * MDDI Register
- */
-#define MDDI_VDO_PACKET_DESC 0x5666
-
-#ifdef CONFIG_FB_MSM_MDP40
-#define MDP_INTR_ENABLE (msm_mdp_base + 0x0050)
-#define MDP_INTR_STATUS (msm_mdp_base + 0x0054)
-#define MDP_INTR_CLEAR (msm_mdp_base + 0x0058)
-#define MDP_EBI2_LCD0 (msm_mdp_base + 0x0060)
-#define MDP_EBI2_LCD1 (msm_mdp_base + 0x0064)
-#define MDP_EBI2_PORTMAP_MODE (msm_mdp_base + 0x0070)
-
-#define MDP_DMA_P_HIST_INTR_STATUS (msm_mdp_base + 0x95014)
-#define MDP_DMA_P_HIST_INTR_CLEAR (msm_mdp_base + 0x95018)
-#define MDP_DMA_P_HIST_INTR_ENABLE (msm_mdp_base + 0x9501C)
-#else
-#define MDP_INTR_ENABLE (msm_mdp_base + 0x0020)
-#define MDP_INTR_STATUS (msm_mdp_base + 0x0024)
-#define MDP_INTR_CLEAR (msm_mdp_base + 0x0028)
-#define MDP_EBI2_LCD0 (msm_mdp_base + 0x003c)
-#define MDP_EBI2_LCD1 (msm_mdp_base + 0x0040)
-#define MDP_EBI2_PORTMAP_MODE (msm_mdp_base + 0x005c)
-#endif
-
-#define MDP_FULL_BYPASS_WORD43 (msm_mdp_base + 0x101ac)
-
-#define MDP_CSC_PFMVn(n) (msm_mdp_base + 0x40400 + 4 * (n))
-#define MDP_CSC_PRMVn(n) (msm_mdp_base + 0x40440 + 4 * (n))
-#define MDP_CSC_PRE_BV1n(n) (msm_mdp_base + 0x40500 + 4 * (n))
-#define MDP_CSC_PRE_BV2n(n) (msm_mdp_base + 0x40540 + 4 * (n))
-#define MDP_CSC_POST_BV1n(n) (msm_mdp_base + 0x40580 + 4 * (n))
-#define MDP_CSC_POST_BV2n(n) (msm_mdp_base + 0x405c0 + 4 * (n))
-
-#ifdef CONFIG_FB_MSM_MDP31
-#define MDP_CSC_PRE_LV1n(n) (msm_mdp_base + 0x40600 + 4 * (n))
-#define MDP_CSC_PRE_LV2n(n) (msm_mdp_base + 0x40640 + 4 * (n))
-#define MDP_CSC_POST_LV1n(n) (msm_mdp_base + 0x40680 + 4 * (n))
-#define MDP_CSC_POST_LV2n(n) (msm_mdp_base + 0x406c0 + 4 * (n))
-#define MDP_PPP_SCALE_COEFF_LSBn(n) (msm_mdp_base + 0x50400 + 8 * (n))
-#define MDP_PPP_SCALE_COEFF_MSBn(n) (msm_mdp_base + 0x50404 + 8 * (n))
-
-#define SCALE_D0_SET 0
-#define SCALE_D1_SET BIT(0)
-#define SCALE_D2_SET BIT(1)
-#define SCALE_U1_SET (BIT(0)|BIT(1))
-
-#else
-#define MDP_CSC_PRE_LV1n(n) (msm_mdp_base + 0x40580 + 4 * (n))
-#endif
-
-#define MDP_CURSOR_WIDTH 64
-#define MDP_CURSOR_HEIGHT 64
-#define MDP_CURSOR_SIZE (MDP_CURSOR_WIDTH*MDP_CURSOR_WIDTH*4)
-
-#define MDP_DMA_P_LUT_C0_EN BIT(0)
-#define MDP_DMA_P_LUT_C1_EN BIT(1)
-#define MDP_DMA_P_LUT_C2_EN BIT(2)
-#define MDP_DMA_P_LUT_POST BIT(4)
-
-void mdp_hw_init(void);
-int mdp_ppp_pipe_wait(void);
-void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd);
-void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
- boolean isr);
-void mdp_set_dma_pan_info(struct fb_info *info, struct mdp_dirty_region *dirty,
- boolean sync);
-void mdp_dma_pan_update(struct fb_info *info);
-void mdp_refresh_screen(unsigned long data);
-int mdp_ppp_blit(struct fb_info *info, struct mdp_blit_req *req,
- struct file **pp_src, struct file **pp_dest);
-void mdp_lcd_update_workqueue_handler(struct work_struct *work);
-void mdp_vsync_resync_workqueue_handler(struct work_struct *work);
-void mdp_dma2_update(struct msm_fb_data_type *mfd);
-void mdp_config_vsync(struct msm_fb_data_type *);
-uint32 mdp_get_lcd_line_counter(struct msm_fb_data_type *mfd);
-enum hrtimer_restart mdp_dma2_vsync_hrtimer_handler(struct hrtimer *ht);
-void mdp_set_scale(MDPIBUF *iBuf,
- uint32 dst_roi_width,
- uint32 dst_roi_height,
- boolean inputRGB, boolean outputRGB, uint32 *pppop_reg_ptr);
-void mdp_init_scale_table(void);
-void mdp_adjust_start_addr(uint8 **src0,
- uint8 **src1,
- int v_slice,
- int h_slice,
- int x,
- int y,
- uint32 width,
- uint32 height, int bpp, MDPIBUF *iBuf, int layer);
-void mdp_set_blend_attr(MDPIBUF *iBuf,
- uint32 *alpha,
- uint32 *tpVal,
- uint32 perPixelAlpha, uint32 *pppop_reg_ptr);
-
-int mdp_dma3_on(struct platform_device *pdev);
-int mdp_dma3_off(struct platform_device *pdev);
-void mdp_dma3_update(struct msm_fb_data_type *mfd);
-
-int mdp_lcdc_on(struct platform_device *pdev);
-int mdp_lcdc_off(struct platform_device *pdev);
-void mdp_lcdc_update(struct msm_fb_data_type *mfd);
-int mdp_hw_cursor_update(struct fb_info *info, struct fb_cursor *cursor);
-void mdp_enable_irq(uint32 term);
-void mdp_disable_irq(uint32 term);
-void mdp_disable_irq_nolock(uint32 term);
-uint32_t mdp_get_bytes_per_pixel(uint32_t format);
-
-#ifdef MDP_HW_VSYNC
-void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd);
-void mdp_hw_vsync_clk_disable(struct msm_fb_data_type *mfd);
-#endif
-
-void mdp_dma_s_update(struct msm_fb_data_type *mfd);
-
-/* Added to support flipping */
-void mdp_set_offset_info(struct fb_info *info, uint32 address, uint32 interval);
-
-int get_gem_img(struct mdp_img *img, unsigned long *start,
- unsigned long *len);
-int get_img(struct mdp_img *img, struct fb_info *info,
- unsigned long *start, unsigned long *len,
- struct file **pp_file);
-
-
-/*int get_img(struct msmfb_data *img, struct fb_info *info,
- unsigned long *start, unsigned long *len, struct file **pp_file);*/
-#endif /* MDP_H */
diff --git a/drivers/staging/msm/mdp4.h b/drivers/staging/msm/mdp4.h
deleted file mode 100644
index 96997d9c908..00000000000
--- a/drivers/staging/msm/mdp4.h
+++ /dev/null
@@ -1,336 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef MDP4_H
-#define MDP4_H
-
-extern struct mdp_dma_data dma2_data;
-extern struct mdp_dma_data dma_s_data;
-extern struct mdp_dma_data dma_e_data;
-extern struct mdp_histogram mdp_hist;
-extern struct completion mdp_hist_comp;
-extern boolean mdp_is_in_isr;
-extern uint32 mdp_intr_mask;
-extern spinlock_t mdp_spin_lock;
-
-
-#define MDP4_NONBLOCKING /* enable non blocking ioctl */
-
-#define MDP4_OVERLAYPROC0_BASE 0x10000
-#define MDP4_OVERLAYPROC1_BASE 0x18000
-
-#define MDP4_VIDEO_BASE 0x20000
-#define MDP4_VIDEO_OFF 0x10000
-
-#define MDP4_RGB_BASE 0x40000
-#define MDP4_RGB_OFF 0x10000
-
-enum { /* display */
- PRIMARY_INTF_SEL,
- SECONDARY_INTF_SEL,
- EXTERNAL_INTF_SEL
-};
-
-enum {
- LCDC_RGB_INTF,
- DTV_INTF = LCDC_RGB_INTF,
- MDDI_LCDC_INTF,
- MDDI_INTF,
- EBI2_INTF
-};
-
-enum {
- MDDI_PRIMARY_SET,
- MDDI_SECONDARY_SET,
- MDDI_EXTERNAL_SET
-};
-
-enum {
- EBI2_LCD0,
- EBI2_LCD1
-};
-
-enum {
- OVERLAY_MODE_NONE,
- OVERLAY_MODE_BLT
-};
-
-enum {
- OVERLAY_REFRESH_ON_DEMAND,
- OVERLAY_REFRESH_VSYNC,
- OVERLAY_REFRESH_VSYNC_HALF,
- OVERLAY_REFRESH_VSYNC_QUARTER
-};
-
-enum {
- OVERLAY_FRAMEBUF,
- OVERLAY_DIRECTOUT
-};
-
-/* system interrupts */
-#define INTR_OVERLAY0_DONE BIT(0)
-#define INTR_OVERLAY1_DONE BIT(1)
-#define INTR_DMA_S_DONE BIT(2)
-#define INTR_DMA_E_DONE BIT(3)
-#define INTR_DMA_P_DONE BIT(4)
-#define INTR_VG1_HISTOGRAM BIT(5)
-#define INTR_VG2_HISTOGRAM BIT(6)
-#define INTR_PRIMARY_VSYNC BIT(7)
-#define INTR_PRIMARY_INTF_UDERRUN BIT(8)
-#define INTR_EXTERNAL_VSYNC BIT(9)
-#define INTR_EXTERNAL_INTF_UDERRUN BIT(10)
-#define INTR_DMA_P_HISTOGRAM BIT(17)
-
-/* histogram interrupts */
-#define INTR_HIST_DONE BIT(0)
-#define INTR_HIST_RESET_SEQ_DONE BIT(1)
-
-
-#ifdef CONFIG_FB_MSM_OVERLAY
-#define MDP4_ANY_INTR_MASK (INTR_OVERLAY0_DONE)
-#else
-#define MDP4_ANY_INTR_MASK (INTR_DMA_P_DONE)
-#endif
-
-enum {
- OVERLAY_PIPE_RGB1,
- OVERLAY_PIPE_RGB2,
-};
-
-enum {
- OVERLAY_PIPE_VG1, /* video/graphic */
- OVERLAY_PIPE_VG2
-};
-
-enum {
- OVERLAY_TYPE_RGB,
- OVERLAY_TYPE_VG /* video/graphic */
-};
-
-enum {
- MDP4_MIXER0,
- MDP4_MIXER1
-};
-
-#define MDP4_MAX_MIXER 2
-
-enum {
- OVERLAY_PLANE_INTERLEAVED,
- OVERLAY_PLANE_PLANAR,
- OVERLAY_PLANE_PSEUDO_PLANAR
-};
-
-enum {
- MDP4_MIXER_STAGE_UNUNSED, /* pipe not used */
- MDP4_MIXER_STAGE_BASE,
- MDP4_MIXER_STAGE0, /* zorder 0 */
- MDP4_MIXER_STAGE1, /* zorder 1 */
- MDP4_MIXER_STAGE2 /* zorder 2 */
-};
-
-#define MDP4_MAX_STAGE 4
-
-enum {
- MDP4_FRAME_FORMAT_LINEAR,
- MDP4_FRAME_FORMAT_ARGB_TILE,
- MDP4_FRAME_FORMAT_VIDEO_SUPERTILE
-};
-
-enum {
- MDP4_CHROMA_RGB,
- MDP4_CHROMA_H2V1,
- MDP4_CHROMA_H1V2,
- MDP4_CHROMA_420
-};
-
-#define MDP4_BLEND_BG_TRANSP_EN BIT(9)
-#define MDP4_BLEND_FG_TRANSP_EN BIT(8)
-#define MDP4_BLEND_BG_MOD_ALPHA BIT(7)
-#define MDP4_BLEND_BG_INV_ALPHA BIT(6)
-#define MDP4_BLEND_BG_ALPHA_FG_CONST (0 << 4)
-#define MDP4_BLEND_BG_ALPHA_BG_CONST (1 << 4)
-#define MDP4_BLEND_BG_ALPHA_FG_PIXEL (2 << 4)
-#define MDP4_BLEND_BG_ALPHA_BG_PIXEL (3 << 4)
-#define MDP4_BLEND_FG_MOD_ALPHA BIT(3)
-#define MDP4_BLEND_FG_INV_ALPHA BIT(2)
-#define MDP4_BLEND_FG_ALPHA_FG_CONST (0 << 0)
-#define MDP4_BLEND_FG_ALPHA_BG_CONST (1 << 0)
-#define MDP4_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
-#define MDP4_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
-
-#define MDP4_FORMAT_SOLID_FILL BIT(22)
-#define MDP4_FORMAT_UNPACK_ALIGN_MSB BIT(18)
-#define MDP4_FORMAT_UNPACK_TIGHT BIT(17)
-#define MDP4_FORMAT_90_ROTATED BIT(12)
-#define MDP4_FORMAT_ALPHA_ENABLE BIT(8)
-
-#define MDP4_OP_DEINT_ODD_REF BIT(19)
-#define MDP4_OP_IGC_LUT_EN BIT(16)
-#define MDP4_OP_DITHER_EN BIT(15)
-#define MDP4_OP_FLIP_UD BIT(14)
-#define MDP4_OP_FLIP_LR BIT(13)
-#define MDP4_OP_CSC_EN BIT(11)
-#define MDP4_OP_SRC_DATA_YCBCR BIT(9)
-#define MDP4_OP_SCALEY_FIR (0 << 4)
-#define MDP4_OP_SCALEY_MN_PHASE (1 << 4)
-#define MDP4_OP_SCALEY_PIXEL_RPT (2 << 4)
-#define MDP4_OP_SCALEX_FIR (0 << 2)
-#define MDP4_OP_SCALEX_MN_PHASE (1 << 2)
-#define MDP4_OP_SCALEX_PIXEL_RPT (2 << 2)
-#define MDP4_OP_SCALEY_EN BIT(1)
-#define MDP4_OP_SCALEX_EN BIT(0)
-
-#define MDP4_PIPE_PER_MIXER 2
-
-#define MDP4_MAX_PLANE 4
-
-#define MDP4_MAX_VIDEO_PIPE 2
-#define MDP4_MAX_RGB_PIPE 2
-#define MDP4_MAX_OVERLAY_PIPE 16
-
-
-struct mdp4_overlay_pipe {
- uint32 pipe_type; /* rgb, video/graphic */
- uint32 pipe_num;
- uint32 pipe_ndx;
- uint32 mixer_num; /* which mixer used */
- uint32 mixer_stage; /* which stage of mixer used */
- uint32 src_format;
- uint32 src_width; /* source img width */
- uint32 src_height; /* source img height */
- uint32 src_w; /* roi */
- uint32 src_h; /* roi */
- uint32 src_x; /* roi */
- uint32 src_y; /* roi */
- uint32 dst_w; /* roi */
- uint32 dst_h; /* roi */
- uint32 dst_x; /* roi */
- uint32 dst_y; /* roi */
- uint32 op_mode;
- uint32 transp;
- uint32 blend_op;
- uint32 phasex_step;
- uint32 phasey_step;
- uint32 alpha;
- uint32 is_fg; /* control alpha & color key */
- uint32 srcp0_addr; /* interleave, luma */
- uint32 srcp0_ystride;
- uint32 srcp1_addr; /* pseudoplanar, chroma plane */
- uint32 srcp1_ystride;
- uint32 srcp2_addr; /* planar color 2*/
- uint32 srcp2_ystride;
- uint32 srcp3_addr; /* alpha/color 3 */
- uint32 srcp3_ystride;
- uint32 fetch_plane;
- uint32 frame_format; /* video */
- uint32 chroma_site; /* video */
- uint32 chroma_sample; /* video */
- uint32 solid_fill;
- uint32 vc1_reduce; /* video */
- uint32 fatch_planes; /* video */
- uint32 unpack_align_msb;/* 0 to LSB, 1 to MSB */
- uint32 unpack_tight;/* 0 for loose, 1 for tight */
- uint32 unpack_count;/* 0 = 1 component, 1 = 2 component ... */
- uint32 rotated_90; /* has been rotated 90 degree */
- uint32 bpp; /* byte per pixel */
- uint32 alpha_enable;/* source has alpha */
- /*
- * number of bits for source component,
- * 0 = 1 bit, 1 = 2 bits, 2 = 6 bits, 3 = 8 bits
- */
- uint32 a_bit; /* component 3, alpha */
- uint32 r_bit; /* component 2, R_Cr */
- uint32 b_bit; /* component 1, B_Cb */
- uint32 g_bit; /* component 0, G_lumz */
- /*
- * unpack pattern
- * A = C3, R = C2, B = C1, G = C0
- */
- uint32 element3; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
- uint32 element2; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
- uint32 element1; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
- uint32 element0; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
- struct completion comp;
- struct mdp_overlay req_data;
-};
-
-void mdp4_sw_reset(unsigned long bits);
-void mdp4_display_intf_sel(int output, unsigned long intf);
-void mdp4_overlay_cfg(int layer, int blt_mode, int refresh, int direct_out);
-void mdp4_ebi2_lcd_setup(int lcd, unsigned long base, int ystride);
-void mdp4_mddi_setup(int which, unsigned long id);
-unsigned long mdp4_display_status(void);
-void mdp4_enable_clk_irq(void);
-void mdp4_disable_clk_irq(void);
-void mdp4_dma_p_update(struct msm_fb_data_type *mfd);
-void mdp4_dma_s_update(struct msm_fb_data_type *mfd);
-void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
- boolean isr);
-void mdp4_pipe_kickoff(uint32 pipe, struct msm_fb_data_type *mfd);
-int mdp4_lcdc_on(struct platform_device *pdev);
-int mdp4_lcdc_off(struct platform_device *pdev);
-void mdp4_lcdc_update(struct msm_fb_data_type *mfd);
-void mdp4_intr_clear_set(ulong clear, ulong set);
-void mdp4_dma_p_cfg(void);
-void mdp4_hw_init(void);
-void mdp4_isr_read(int);
-void mdp4_clear_lcdc(void);
-void mdp4_mixer_blend_init(int mixer_num);
-void mdp4_vg_qseed_init(int vg_num);
-void mdp4_vg_csc_mv_setup(int vp_num);
-void mdp4_vg_csc_pre_bv_setup(int vp_num);
-void mdp4_vg_csc_post_bv_setup(int vp_num);
-void mdp4_vg_csc_pre_lv_setup(int vp_num);
-void mdp4_vg_csc_post_lv_setup(int vp_num);
-irqreturn_t mdp4_isr(int irq, void *ptr);
-void mdp4_overlay_format_to_pipe(uint32 format, struct mdp4_overlay_pipe *pipe);
-uint32 mdp4_overlay_format(struct mdp4_overlay_pipe *pipe);
-uint32 mdp4_overlay_unpack_pattern(struct mdp4_overlay_pipe *pipe);
-uint32 mdp4_overlay_op_mode(struct mdp4_overlay_pipe *pipe);
-void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd);
-void mdp4_overlay_rgb_setup(struct mdp4_overlay_pipe *pipe);
-void mdp4_overlay_reg_flush(struct mdp4_overlay_pipe *pipe, int all);
-void mdp4_mixer_blend_setup(struct mdp4_overlay_pipe *pipe);
-void mdp4_mixer_stage_up(struct mdp4_overlay_pipe *pipe);
-void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe);
-int mdp4_mixer_stage_can_run(struct mdp4_overlay_pipe *pipe);
-void mdp4_overlayproc_cfg(struct mdp4_overlay_pipe *pipe);
-void mdp4_mddi_overlay(struct msm_fb_data_type *mfd);
-int mdp4_overlay_format2type(uint32 format);
-int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe);
-int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req);
-int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req);
-int mdp4_overlay_unset(struct fb_info *info, int ndx);
-int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req,
- struct file **pp_src_file);
-struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(void);
-void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe);
-void mdp4_overlay_dmap_cfg(struct msm_fb_data_type *mfd, int lcdc);
-void mdp4_overlay_dmap_xy(struct mdp4_overlay_pipe *pipe);
-int mdp4_overlay_active(int mixer);
-void mdp4_overlay0_done_lcdc(void);
-void mdp4_overlay0_done_mddi(void);
-void mdp4_mddi_overlay_restore(void);
-void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe);
-void mdp4_rgb_igc_lut_setup(int num);
-void mdp4_vg_igc_lut_setup(int num);
-void mdp4_mixer_gc_lut_setup(int mixer_num);
-
-#ifdef CONFIG_DEBUG_FS
-int mdp4_debugfs_init(void);
-#endif
-
-int mdp_ppp_blit(struct fb_info *info, struct mdp_blit_req *req,
- struct file **pp_src_file, struct file **pp_dst_file);
-
-#endif /* MDP_H */
diff --git a/drivers/staging/msm/mdp4_debugfs.c b/drivers/staging/msm/mdp4_debugfs.c
deleted file mode 100644
index 36954e89478..00000000000
--- a/drivers/staging/msm/mdp4_debugfs.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/hrtimer.h>
-#include <linux/clk.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-#include <linux/debugfs.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/uaccess.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-#include "mdp4.h"
-
-
-#define MDP4_DEBUG_BUF 128
-
-
-static char mdp4_debug_buf[MDP4_DEBUG_BUF];
-static ulong mdp4_debug_offset;
-static ulong mdp4_base_addr;
-
-static int mdp4_offset_set(void *data, u64 val)
-{
- mdp4_debug_offset = (int)val;
- return 0;
-}
-
-static int mdp4_offset_get(void *data, u64 *val)
-{
- *val = (u64)mdp4_debug_offset;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(
- mdp4_offset_fops,
- mdp4_offset_get,
- mdp4_offset_set,
- "%llx\n");
-
-
-static int mdp4_debugfs_release(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static ssize_t mdp4_debugfs_write(
- struct file *file,
- const char __user *buff,
- size_t count,
- loff_t *ppos)
-{
- int cnt;
- unsigned int data;
-
- printk(KERN_INFO "%s: offset=%d count=%d *ppos=%d\n",
- __func__, (int)mdp4_debug_offset, (int)count, (int)*ppos);
-
- if (count > sizeof(mdp4_debug_buf))
- return -EFAULT;
-
- if (copy_from_user(mdp4_debug_buf, buff, count))
- return -EFAULT;
-
-
- mdp4_debug_buf[count] = 0; /* end of string */
-
- cnt = sscanf(mdp4_debug_buf, "%x", &data);
- if (cnt < 1) {
- printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
- return -EINVAL;
- }
-
- writel(&data, mdp4_base_addr + mdp4_debug_offset);
-
- return 0;
-}
-
-static ssize_t mdp4_debugfs_read(
- struct file *file,
- char __user *buff,
- size_t count,
- loff_t *ppos)
-{
- int len = 0;
- unsigned int data;
-
- printk(KERN_INFO "%s: offset=%d count=%d *ppos=%d\n",
- __func__, (int)mdp4_debug_offset, (int)count, (int)*ppos);
-
- if (*ppos)
- return 0; /* the end */
-
- data = readl(mdp4_base_addr + mdp4_debug_offset);
-
- len = snprintf(mdp4_debug_buf, 4, "%x\n", data);
-
- if (len > 0) {
- if (len > count)
- len = count;
- if (copy_to_user(buff, mdp4_debug_buf, len))
- return -EFAULT;
- }
-
- printk(KERN_INFO "%s: len=%d\n", __func__, len);
-
- if (len < 0)
- return 0;
-
- *ppos += len; /* increase offset */
-
- return len;
-}
-
-static const struct file_operations mdp4_debugfs_fops = {
- .open = nonseekable_open,
- .release = mdp4_debugfs_release,
- .read = mdp4_debugfs_read,
- .write = mdp4_debugfs_write,
- .llseek = no_llseek,
-};
-
-int mdp4_debugfs_init(void)
-{
- struct dentry *dent = debugfs_create_dir("mdp4", NULL);
-
- if (IS_ERR(dent)) {
- printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n",
- __FILE__, __LINE__, PTR_ERR(dent));
- return -1;
- }
-
- if (debugfs_create_file("offset", 0644, dent, 0, &mdp4_offset_fops)
- == NULL) {
- printk(KERN_ERR "%s(%d): debugfs_create_file: offset fail\n",
- __FILE__, __LINE__);
- return -1;
- }
-
- if (debugfs_create_file("regs", 0644, dent, 0, &mdp4_debugfs_fops)
- == NULL) {
- printk(KERN_ERR "%s(%d): debugfs_create_file: regs fail\n",
- __FILE__, __LINE__);
- return -1;
- }
-
- mdp4_debug_offset = 0;
- mdp4_base_addr = (ulong) msm_mdp_base; /* defined at msm_fb_def.h */
-
- return 0;
-}
diff --git a/drivers/staging/msm/mdp4_overlay.c b/drivers/staging/msm/mdp4_overlay.c
deleted file mode 100644
index b9acf529929..00000000000
--- a/drivers/staging/msm/mdp4_overlay.c
+++ /dev/null
@@ -1,1259 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/hrtimer.h>
-#include <linux/clk.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-#include <linux/debugfs.h>
-#include <linux/fb.h>
-#include <msm_mdp.h>
-#include <linux/file.h>
-#include "android_pmem.h"
-#include <linux/major.h>
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/uaccess.h>
-#include <linux/mutex.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-#include "mdp4.h"
-
-
-struct mdp4_overlay_ctrl {
- struct mdp4_overlay_pipe plist[MDP4_MAX_OVERLAY_PIPE];
- struct mdp4_overlay_pipe *stage[MDP4_MAX_MIXER][MDP4_MAX_STAGE];
-} mdp4_overlay_db;
-
-static struct mdp4_overlay_ctrl *ctrl = &mdp4_overlay_db;
-
-
-void mdp4_overlay_dmap_cfg(struct msm_fb_data_type *mfd, int lcdc)
-{
- uint32 dma2_cfg_reg;
-
- dma2_cfg_reg = DMA_DITHER_EN;
-
- if (mfd->fb_imgType == MDP_BGR_565)
- dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
- else
- dma2_cfg_reg |= DMA_PACK_PATTERN_RGB;
-
-
- if (mfd->panel_info.bpp == 18) {
- dma2_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */
- DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
- } else if (mfd->panel_info.bpp == 16) {
- dma2_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */
- DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
- } else {
- dma2_cfg_reg |= DMA_DSTC0G_8BITS | /* 888 16BPP */
- DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
- }
-
- if (lcdc)
- dma2_cfg_reg |= DMA_PACK_ALIGN_MSB;
-
- /* dma2 config register */
- MDP_OUTP(MDP_BASE + 0x90000, dma2_cfg_reg);
-
-}
-
-void mdp4_overlay_dmap_xy(struct mdp4_overlay_pipe *pipe)
-{
-
- /* dma_p source */
- MDP_OUTP(MDP_BASE + 0x90004,
- (pipe->src_height << 16 | pipe->src_width));
- MDP_OUTP(MDP_BASE + 0x90008, pipe->srcp0_addr);
- MDP_OUTP(MDP_BASE + 0x9000c, pipe->srcp0_ystride);
-
- /* dma_p dest */
- MDP_OUTP(MDP_BASE + 0x90010, (pipe->dst_y << 16 | pipe->dst_x));
-}
-
-#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
-#define MDP4_VG_PHASE_STEP_SHIFT 29
-
-static int mdp4_leading_0(uint32 num)
-{
- uint32 bit = 0x80000000;
- int i;
-
- for (i = 0; i < 32; i++) {
- if (bit & num)
- return i;
- bit >>= 1;
- }
-
- return i;
-}
-
-static uint32 mdp4_scale_phase_step(int f_num, uint32 src, uint32 dst)
-{
- uint32 val;
- int n;
-
- n = mdp4_leading_0(src);
- if (n > f_num)
- n = f_num;
- val = src << n; /* maximum to reduce lose of resolution */
- val /= dst;
- if (n < f_num) {
- n = f_num - n;
- val <<= n;
- }
-
- return val;
-}
-
-static void mdp4_scale_setup(struct mdp4_overlay_pipe *pipe)
-{
-
- pipe->phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
- pipe->phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
-
- if (pipe->dst_h && pipe->src_h != pipe->dst_h) {
- if (pipe->dst_h >= pipe->src_h * 8) /* too much */
- return;
- pipe->op_mode |= MDP4_OP_SCALEY_EN;
-
- if (pipe->pipe_type == OVERLAY_TYPE_VG) {
- if (pipe->dst_h <= (pipe->src_h / 4))
- pipe->op_mode |= MDP4_OP_SCALEY_MN_PHASE;
- else
- pipe->op_mode |= MDP4_OP_SCALEY_FIR;
- }
-
- pipe->phasey_step = mdp4_scale_phase_step(29,
- pipe->src_h, pipe->dst_h);
- }
-
- if (pipe->dst_w && pipe->src_w != pipe->dst_w) {
- if (pipe->dst_w >= pipe->src_w * 8) /* too much */
- return;
- pipe->op_mode |= MDP4_OP_SCALEX_EN;
-
- if (pipe->pipe_type == OVERLAY_TYPE_VG) {
- if (pipe->dst_w <= (pipe->src_w / 4))
- pipe->op_mode |= MDP4_OP_SCALEY_MN_PHASE;
- else
- pipe->op_mode |= MDP4_OP_SCALEY_FIR;
- }
-
- pipe->phasex_step = mdp4_scale_phase_step(29,
- pipe->src_w, pipe->dst_w);
- }
-}
-
-void mdp4_overlay_rgb_setup(struct mdp4_overlay_pipe *pipe)
-{
- char *rgb_base;
- uint32 src_size, src_xy, dst_size, dst_xy;
- uint32 format, pattern;
-
- rgb_base = MDP_BASE + MDP4_RGB_BASE;
- rgb_base += (MDP4_RGB_OFF * pipe->pipe_num);
-
- src_size = ((pipe->src_h << 16) | pipe->src_w);
- src_xy = ((pipe->src_y << 16) | pipe->src_x);
- dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
- dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
-
- format = mdp4_overlay_format(pipe);
- pattern = mdp4_overlay_unpack_pattern(pipe);
-
- pipe->op_mode |= MDP4_OP_IGC_LUT_EN;
-
- mdp4_scale_setup(pipe);
-
- outpdw(rgb_base + 0x0000, src_size); /* MDP_RGB_SRC_SIZE */
- outpdw(rgb_base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
- outpdw(rgb_base + 0x0008, dst_size); /* MDP_RGB_DST_SIZE */
- outpdw(rgb_base + 0x000c, dst_xy); /* MDP_RGB_DST_XY */
-
- outpdw(rgb_base + 0x0010, pipe->srcp0_addr);
- outpdw(rgb_base + 0x0040, pipe->srcp0_ystride);
-
- outpdw(rgb_base + 0x0050, format);/* MDP_RGB_SRC_FORMAT */
- outpdw(rgb_base + 0x0054, pattern);/* MDP_RGB_SRC_UNPACK_PATTERN */
- outpdw(rgb_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
- outpdw(rgb_base + 0x005c, pipe->phasex_step);
- outpdw(rgb_base + 0x0060, pipe->phasey_step);
-
- /* 16 bytes-burst x 3 req <= 48 bytes */
- outpdw(rgb_base + 0x1004, 0xc2); /* MDP_RGB_FETCH_CFG */
-}
-
-void mdp4_overlay_vg_setup(struct mdp4_overlay_pipe *pipe)
-{
- char *vg_base;
- uint32 frame_size, src_size, src_xy, dst_size, dst_xy;
- uint32 format, pattern;
-
- vg_base = MDP_BASE + MDP4_VIDEO_BASE;
- vg_base += (MDP4_VIDEO_OFF * pipe->pipe_num);
-
- frame_size = ((pipe->src_height << 16) | pipe->src_width);
- src_size = ((pipe->src_h << 16) | pipe->src_w);
- src_xy = ((pipe->src_y << 16) | pipe->src_x);
- dst_size = ((pipe->dst_h << 16) | pipe->dst_w);
- dst_xy = ((pipe->dst_y << 16) | pipe->dst_x);
-
- format = mdp4_overlay_format(pipe);
- pattern = mdp4_overlay_unpack_pattern(pipe);
-
- pipe->op_mode |= (MDP4_OP_CSC_EN | MDP4_OP_SRC_DATA_YCBCR |
- MDP4_OP_IGC_LUT_EN);
-
- mdp4_scale_setup(pipe);
-
- outpdw(vg_base + 0x0000, src_size); /* MDP_RGB_SRC_SIZE */
- outpdw(vg_base + 0x0004, src_xy); /* MDP_RGB_SRC_XY */
- outpdw(vg_base + 0x0008, dst_size); /* MDP_RGB_DST_SIZE */
- outpdw(vg_base + 0x000c, dst_xy); /* MDP_RGB_DST_XY */
- outpdw(vg_base + 0x0048, frame_size); /* TILE frame size */
-
- /* luma component plane */
- outpdw(vg_base + 0x0010, pipe->srcp0_addr);
-
- /* chroma component plane */
- outpdw(vg_base + 0x0014, pipe->srcp1_addr);
-
- outpdw(vg_base + 0x0040,
- pipe->srcp1_ystride << 16 | pipe->srcp0_ystride);
-
- outpdw(vg_base + 0x0050, format); /* MDP_RGB_SRC_FORMAT */
- outpdw(vg_base + 0x0054, pattern); /* MDP_RGB_SRC_UNPACK_PATTERN */
- outpdw(vg_base + 0x0058, pipe->op_mode);/* MDP_RGB_OP_MODE */
- outpdw(vg_base + 0x005c, pipe->phasex_step);
- outpdw(vg_base + 0x0060, pipe->phasey_step);
-
- if (pipe->op_mode & MDP4_OP_DITHER_EN) {
- outpdw(vg_base + 0x0068,
- pipe->r_bit << 4 | pipe->b_bit << 2 | pipe->g_bit);
- }
-
- /* 16 bytes-burst x 3 req <= 48 bytes */
- outpdw(vg_base + 0x1004, 0xc2); /* MDP_VG_FETCH_CFG */
-}
-
-int mdp4_overlay_format2type(uint32 format)
-{
- switch (format) {
- case MDP_RGB_565:
- case MDP_RGB_888:
- case MDP_BGR_565:
- case MDP_ARGB_8888:
- case MDP_RGBA_8888:
- case MDP_BGRA_8888:
- return OVERLAY_TYPE_RGB;
- case MDP_YCRYCB_H2V1:
- case MDP_Y_CRCB_H2V1:
- case MDP_Y_CBCR_H2V1:
- case MDP_Y_CRCB_H2V2:
- case MDP_Y_CBCR_H2V2:
- case MDP_Y_CBCR_H2V2_TILE:
- case MDP_Y_CRCB_H2V2_TILE:
- return OVERLAY_TYPE_VG;
- default:
- return -ERANGE;
- }
-
-}
-
-#define C3_ALPHA 3 /* alpha */
-#define C2_R_Cr 2 /* R/Cr */
-#define C1_B_Cb 1 /* B/Cb */
-#define C0_G_Y 0 /* G/luma */
-
-int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe)
-{
- switch (pipe->src_format) {
- case MDP_RGB_565:
- pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
- pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
- pipe->a_bit = 0;
- pipe->r_bit = 1; /* R, 5 bits */
- pipe->b_bit = 1; /* B, 5 bits */
- pipe->g_bit = 2; /* G, 6 bits */
- pipe->alpha_enable = 0;
- pipe->unpack_tight = 1;
- pipe->unpack_align_msb = 0;
- pipe->unpack_count = 2;
- pipe->element2 = C2_R_Cr; /* R */
- pipe->element1 = C0_G_Y; /* G */
- pipe->element0 = C1_B_Cb; /* B */
- pipe->bpp = 2; /* 2 bpp */
- break;
- case MDP_RGB_888:
- pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
- pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
- pipe->a_bit = 0;
- pipe->r_bit = 3; /* R, 8 bits */
- pipe->b_bit = 3; /* B, 8 bits */
- pipe->g_bit = 3; /* G, 8 bits */
- pipe->alpha_enable = 0;
- pipe->unpack_tight = 1;
- pipe->unpack_align_msb = 0;
- pipe->unpack_count = 2;
- pipe->element2 = C2_R_Cr; /* R */
- pipe->element1 = C0_G_Y; /* G */
- pipe->element0 = C1_B_Cb; /* B */
- pipe->bpp = 3; /* 3 bpp */
- break;
- case MDP_BGR_565:
- pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
- pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
- pipe->a_bit = 0;
- pipe->r_bit = 1; /* R, 5 bits */
- pipe->b_bit = 1; /* B, 5 bits */
- pipe->g_bit = 2; /* G, 6 bits */
- pipe->alpha_enable = 0;
- pipe->unpack_tight = 1;
- pipe->unpack_align_msb = 0;
- pipe->unpack_count = 2;
- pipe->element2 = C1_B_Cb; /* B */
- pipe->element1 = C0_G_Y; /* G */
- pipe->element0 = C2_R_Cr; /* R */
- pipe->bpp = 2; /* 2 bpp */
- break;
- case MDP_ARGB_8888:
- pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
- pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
- pipe->a_bit = 3; /* alpha, 4 bits */
- pipe->r_bit = 3; /* R, 8 bits */
- pipe->b_bit = 3; /* B, 8 bits */
- pipe->g_bit = 3; /* G, 8 bits */
- pipe->alpha_enable = 1;
- pipe->unpack_tight = 1;
- pipe->unpack_align_msb = 0;
- pipe->unpack_count = 3;
- pipe->element3 = C3_ALPHA; /* alpha */
- pipe->element2 = C2_R_Cr; /* R */
- pipe->element1 = C0_G_Y; /* G */
- pipe->element0 = C1_B_Cb; /* B */
- pipe->bpp = 4; /* 4 bpp */
- break;
- case MDP_RGBA_8888:
- pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
- pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
- pipe->a_bit = 3; /* alpha, 4 bits */
- pipe->r_bit = 3; /* R, 8 bits */
- pipe->b_bit = 3; /* B, 8 bits */
- pipe->g_bit = 3; /* G, 8 bits */
- pipe->alpha_enable = 1;
- pipe->unpack_tight = 1;
- pipe->unpack_align_msb = 0;
- pipe->unpack_count = 3;
- pipe->element3 = C2_R_Cr; /* R */
- pipe->element2 = C0_G_Y; /* G */
- pipe->element1 = C1_B_Cb; /* B */
- pipe->element0 = C3_ALPHA; /* alpha */
- pipe->bpp = 4; /* 4 bpp */
- break;
- case MDP_BGRA_8888:
- pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
- pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
- pipe->a_bit = 3; /* alpha, 4 bits */
- pipe->r_bit = 3; /* R, 8 bits */
- pipe->b_bit = 3; /* B, 8 bits */
- pipe->g_bit = 3; /* G, 8 bits */
- pipe->alpha_enable = 1;
- pipe->unpack_tight = 1;
- pipe->unpack_align_msb = 0;
- pipe->unpack_count = 3;
- pipe->element3 = C1_B_Cb; /* B */
- pipe->element2 = C0_G_Y; /* G */
- pipe->element1 = C2_R_Cr; /* R */
- pipe->element0 = C3_ALPHA; /* alpha */
- pipe->bpp = 4; /* 4 bpp */
- break;
- case MDP_YCRYCB_H2V1:
- pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
- pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
- pipe->a_bit = 0; /* alpha, 4 bits */
- pipe->r_bit = 3; /* R, 8 bits */
- pipe->b_bit = 3; /* B, 8 bits */
- pipe->g_bit = 3; /* G, 8 bits */
- pipe->alpha_enable = 0;
- pipe->unpack_tight = 1;
- pipe->unpack_align_msb = 0;
- pipe->unpack_count = 3;
- pipe->element3 = C0_G_Y; /* G */
- pipe->element2 = C2_R_Cr; /* R */
- pipe->element1 = C0_G_Y; /* G */
- pipe->element0 = C1_B_Cb; /* B */
- pipe->bpp = 2; /* 2 bpp */
- pipe->chroma_sample = MDP4_CHROMA_H2V1;
- break;
- case MDP_Y_CRCB_H2V1:
- case MDP_Y_CBCR_H2V1:
- case MDP_Y_CRCB_H2V2:
- case MDP_Y_CBCR_H2V2:
- pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
- pipe->fetch_plane = OVERLAY_PLANE_PSEUDO_PLANAR;
- pipe->a_bit = 0;
- pipe->r_bit = 3; /* R, 8 bits */
- pipe->b_bit = 3; /* B, 8 bits */
- pipe->g_bit = 3; /* G, 8 bits */
- pipe->alpha_enable = 0;
- pipe->unpack_tight = 1;
- pipe->unpack_align_msb = 0;
- pipe->unpack_count = 1; /* 2 */
- pipe->element3 = C0_G_Y; /* not used */
- pipe->element2 = C0_G_Y; /* not used */
- if (pipe->src_format == MDP_Y_CRCB_H2V1) {
- pipe->element1 = C2_R_Cr; /* R */
- pipe->element0 = C1_B_Cb; /* B */
- pipe->chroma_sample = MDP4_CHROMA_H2V1;
- } else if (pipe->src_format == MDP_Y_CBCR_H2V1) {
- pipe->element1 = C1_B_Cb; /* B */
- pipe->element0 = C2_R_Cr; /* R */
- pipe->chroma_sample = MDP4_CHROMA_H2V1;
- } else if (pipe->src_format == MDP_Y_CRCB_H2V2) {
- pipe->element1 = C2_R_Cr; /* R */
- pipe->element0 = C1_B_Cb; /* B */
- pipe->chroma_sample = MDP4_CHROMA_420;
- } else if (pipe->src_format == MDP_Y_CBCR_H2V2) {
- pipe->element1 = C1_B_Cb; /* B */
- pipe->element0 = C2_R_Cr; /* R */
- pipe->chroma_sample = MDP4_CHROMA_420;
- }
- pipe->bpp = 2; /* 2 bpp */
- break;
- case MDP_Y_CBCR_H2V2_TILE:
- case MDP_Y_CRCB_H2V2_TILE:
- pipe->frame_format = MDP4_FRAME_FORMAT_VIDEO_SUPERTILE;
- pipe->fetch_plane = OVERLAY_PLANE_PSEUDO_PLANAR;
- pipe->a_bit = 0;
- pipe->r_bit = 3; /* R, 8 bits */
- pipe->b_bit = 3; /* B, 8 bits */
- pipe->g_bit = 3; /* G, 8 bits */
- pipe->alpha_enable = 0;
- pipe->unpack_tight = 1;
- pipe->unpack_align_msb = 0;
- pipe->unpack_count = 1; /* 2 */
- pipe->element3 = C0_G_Y; /* not used */
- pipe->element2 = C0_G_Y; /* not used */
- if (pipe->src_format == MDP_Y_CRCB_H2V2_TILE) {
- pipe->element1 = C2_R_Cr; /* R */
- pipe->element0 = C1_B_Cb; /* B */
- pipe->chroma_sample = MDP4_CHROMA_420;
- } else if (pipe->src_format == MDP_Y_CBCR_H2V2_TILE) {
- pipe->element1 = C1_B_Cb; /* B */
- pipe->element0 = C2_R_Cr; /* R */
- pipe->chroma_sample = MDP4_CHROMA_420;
- }
- pipe->bpp = 2; /* 2 bpp */
- break;
- default:
- /* not likely */
- return -ERANGE;
- }
-
- return 0;
-}
-
-/*
- * color_key_convert: output with 12 bits color key
- */
-static uint32 color_key_convert(int start, int num, uint32 color)
-{
-
- uint32 data;
-
- data = (color >> start) & ((1 << num) - 1);
-
- if (num == 5)
- data = (data << 7) + (data << 2) + (data >> 3);
- else if (num == 6)
- data = (data << 6) + data;
- else /* 8 bits */
- data = (data << 4) + (data >> 4);
-
- return data;
-
-}
-
-void transp_color_key(int format, uint32 transp,
- uint32 *c0, uint32 *c1, uint32 *c2)
-{
- int b_start, g_start, r_start;
- int b_num, g_num, r_num;
-
- switch (format) {
- case MDP_RGB_565:
- b_start = 0;
- g_start = 5;
- r_start = 11;
- r_num = 5;
- g_num = 6;
- b_num = 5;
- break;
- case MDP_RGB_888:
- case MDP_XRGB_8888:
- case MDP_ARGB_8888:
- b_start = 0;
- g_start = 8;
- r_start = 16;
- r_num = 8;
- g_num = 8;
- b_num = 8;
- break;
- case MDP_BGR_565:
- b_start = 11;
- g_start = 5;
- r_start = 0;
- r_num = 5;
- g_num = 6;
- b_num = 5;
- break;
- case MDP_Y_CBCR_H2V2:
- case MDP_Y_CBCR_H2V1:
- b_start = 8;
- g_start = 16;
- r_start = 0;
- r_num = 8;
- g_num = 8;
- b_num = 8;
- break;
- case MDP_Y_CRCB_H2V2:
- case MDP_Y_CRCB_H2V1:
- b_start = 0;
- g_start = 16;
- r_start = 8;
- r_num = 8;
- g_num = 8;
- b_num = 8;
- break;
- default:
- b_start = 0;
- g_start = 8;
- r_start = 16;
- r_num = 8;
- g_num = 8;
- b_num = 8;
- break;
- }
-
- *c0 = color_key_convert(g_start, g_num, transp);
- *c1 = color_key_convert(b_start, b_num, transp);
- *c2 = color_key_convert(r_start, r_num, transp);
-}
-
-uint32 mdp4_overlay_format(struct mdp4_overlay_pipe *pipe)
-{
- uint32 format;
-
- format = 0;
-
- if (pipe->solid_fill)
- format |= MDP4_FORMAT_SOLID_FILL;
-
- if (pipe->unpack_align_msb)
- format |= MDP4_FORMAT_UNPACK_ALIGN_MSB;
-
- if (pipe->unpack_tight)
- format |= MDP4_FORMAT_UNPACK_TIGHT;
-
- if (pipe->alpha_enable)
- format |= MDP4_FORMAT_ALPHA_ENABLE;
-
- format |= (pipe->unpack_count << 13);
- format |= ((pipe->bpp - 1) << 9);
- format |= (pipe->a_bit << 6);
- format |= (pipe->r_bit << 4);
- format |= (pipe->b_bit << 2);
- format |= pipe->g_bit;
-
- format |= (pipe->frame_format << 29);
-
- if (pipe->fetch_plane == OVERLAY_PLANE_PSEUDO_PLANAR) {
- /* video/graphic */
- format |= (pipe->fetch_plane << 19);
- format |= (pipe->chroma_site << 28);
- format |= (pipe->chroma_sample << 26);
- }
-
- return format;
-}
-
-uint32 mdp4_overlay_unpack_pattern(struct mdp4_overlay_pipe *pipe)
-{
- return (pipe->element3 << 24) | (pipe->element2 << 16) |
- (pipe->element1 << 8) | pipe->element0;
-}
-
-void mdp4_overlayproc_cfg(struct mdp4_overlay_pipe *pipe)
-{
- uint32 data;
- char *overlay_base;
-
- if (pipe->mixer_num == MDP4_MIXER1)
- overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
- else
- overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
-
- /* MDP_OVERLAYPROC_CFG */
- outpdw(overlay_base + 0x0004, 0x01); /* directout */
- data = pipe->src_height;
- data <<= 16;
- data |= pipe->src_width;
- outpdw(overlay_base + 0x0008, data); /* ROI, height + width */
- outpdw(overlay_base + 0x000c, pipe->srcp0_addr);
- outpdw(overlay_base + 0x0010, pipe->srcp0_ystride);
- outpdw(overlay_base + 0x0014, 0x4); /* GC_LUT_EN, 888 */
-}
-
-int mdp4_overlay_active(int mixer)
-{
- uint32 data, mask, i;
- int p1, p2;
-
- data = inpdw(MDP_BASE + 0x10100);
- p1 = 0;
- p2 = 0;
- for (i = 0; i < 8; i++) {
- mask = data & 0x0f;
- if (mask) {
- if (mask <= 4)
- p1++;
- else
- p2++;
- }
- data >>= 4;
- }
-
- if (mixer)
- return p2;
- else
- return p1;
-}
-
-void mdp4_mixer_stage_up(struct mdp4_overlay_pipe *pipe)
-{
- uint32 data, mask, snum, stage, mixer;
-
- stage = pipe->mixer_stage;
- mixer = pipe->mixer_num;
-
- /* MDP_LAYERMIXER_IN_CFG, shard by both mixer 0 and 1 */
- data = inpdw(MDP_BASE + 0x10100);
-
- if (mixer == MDP4_MIXER1)
- stage += 8;
-
- if (pipe->pipe_type == OVERLAY_TYPE_VG) {/* VG1 and VG2 */
- snum = 0;
- snum += (4 * pipe->pipe_num);
- } else {
- snum = 8;
- snum += (4 * pipe->pipe_num); /* RGB1 and RGB2 */
- }
-
- mask = 0x0f;
- mask <<= snum;
- stage <<= snum;
- data &= ~mask; /* clear old bits */
-
- data |= stage;
-
- outpdw(MDP_BASE + 0x10100, data); /* MDP_LAYERMIXER_IN_CFG */
-
- data = inpdw(MDP_BASE + 0x10100);
-
- ctrl->stage[pipe->mixer_num][pipe->mixer_stage] = pipe; /* keep it */
-}
-
-void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe)
-{
- uint32 data, mask, snum, stage, mixer;
-
- stage = pipe->mixer_stage;
- mixer = pipe->mixer_num;
-
- if (pipe != ctrl->stage[mixer][stage]) /* not running */
- return;
-
- /* MDP_LAYERMIXER_IN_CFG, shard by both mixer 0 and 1 */
- data = inpdw(MDP_BASE + 0x10100);
-
- if (mixer == MDP4_MIXER1)
- stage += 8;
-
- if (pipe->pipe_type == OVERLAY_TYPE_VG) {/* VG1 and VG2 */
- snum = 0;
- snum += (4 * pipe->pipe_num);
- } else {
- snum = 8;
- snum += (4 * pipe->pipe_num); /* RGB1 and RGB2 */
- }
-
- mask = 0x0f;
- mask <<= snum;
- data &= ~mask; /* clear old bits */
-
- outpdw(MDP_BASE + 0x10100, data); /* MDP_LAYERMIXER_IN_CFG */
-
- data = inpdw(MDP_BASE + 0x10100);
-
- ctrl->stage[pipe->mixer_num][pipe->mixer_stage] = NULL; /* clear it */
-}
-
-void mdp4_mixer_blend_setup(struct mdp4_overlay_pipe *pipe)
-{
- unsigned char *overlay_base;
- uint32 c0, c1, c2, blend_op;
- int off;
-
- if (pipe->mixer_num) /* mixer number, /dev/fb0, /dev/fb1 */
- overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
- else
- overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
-
- /* stage 0 to stage 2 */
- off = 0x20 * (pipe->mixer_stage - MDP4_MIXER_STAGE0);
-
- blend_op = 0;
- if (pipe->alpha_enable) /* ARGB */
- blend_op = MDP4_BLEND_FG_ALPHA_FG_PIXEL |
- MDP4_BLEND_BG_ALPHA_FG_PIXEL;
- else
- blend_op = (MDP4_BLEND_BG_ALPHA_BG_CONST |
- MDP4_BLEND_FG_ALPHA_FG_CONST);
-
-
- if (pipe->alpha_enable == 0) { /* not ARGB */
- if (pipe->is_fg) {
- outpdw(overlay_base + off + 0x108, pipe->alpha);
- outpdw(overlay_base + off + 0x10c, 0xff - pipe->alpha);
- } else {
- outpdw(overlay_base + off + 0x108, 0xff - pipe->alpha);
- outpdw(overlay_base + off + 0x10c, pipe->alpha);
- }
- }
-
- if (pipe->transp != MDP_TRANSP_NOP) {
- transp_color_key(pipe->src_format, pipe->transp, &c0, &c1, &c2);
- if (pipe->is_fg) {
- blend_op |= MDP4_BLEND_FG_TRANSP_EN; /* Fg blocked */
- /* lower limit */
- if (c0 > 0x10)
- c0 -= 0x10;
- if (c1 > 0x10)
- c1 -= 0x10;
- if (c2 > 0x10)
- c2 -= 0x10;
- outpdw(overlay_base + off + 0x110,
- (c1 << 16 | c0));/* low */
- outpdw(overlay_base + off + 0x114, c2);/* low */
- /* upper limit */
- if ((c0 + 0x20) < 0x0fff)
- c0 += 0x20;
- else
- c0 = 0x0fff;
- if ((c1 + 0x20) < 0x0fff)
- c1 += 0x20;
- else
- c1 = 0x0fff;
- if ((c2 + 0x20) < 0x0fff)
- c2 += 0x20;
- else
- c2 = 0x0fff;
- outpdw(overlay_base + off + 0x118,
- (c1 << 16 | c0));/* high */
- outpdw(overlay_base + off + 0x11c, c2);/* high */
- } else {
- blend_op |= MDP4_BLEND_BG_TRANSP_EN; /* bg blocked */
- /* lower limit */
- if (c0 > 0x10)
- c0 -= 0x10;
- if (c1 > 0x10)
- c1 -= 0x10;
- if (c2 > 0x10)
- c2 -= 0x10;
- outpdw(overlay_base + 0x180,
- (c1 << 16 | c0));/* low */
- outpdw(overlay_base + 0x184, c2);/* low */
- /* upper limit */
- if ((c0 + 0x20) < 0x0fff)
- c0 += 0x20;
- else
- c0 = 0x0fff;
- if ((c1 + 0x20) < 0x0fff)
- c1 += 0x20;
- else
- c1 = 0x0fff;
- if ((c2 + 0x20) < 0x0fff)
- c2 += 0x20;
- else
- c2 = 0x0fff;
- outpdw(overlay_base + 0x188,
- (c1 << 16 | c0));/* high */
- outpdw(overlay_base + 0x18c, c2);/* high */
- }
- }
- outpdw(overlay_base + off + 0x104, blend_op);
-}
-
-void mdp4_overlay_reg_flush(struct mdp4_overlay_pipe *pipe, int all)
-{
- uint32 bits = 0;
-
- if (pipe->mixer_num == MDP4_MIXER1)
- bits |= 0x02;
- else
- bits |= 0x01;
-
- if (all) {
- if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
- if (pipe->pipe_num == OVERLAY_PIPE_RGB2)
- bits |= 0x20;
- else
- bits |= 0x10;
- } else {
- if (pipe->pipe_num == OVERLAY_PIPE_VG2)
- bits |= 0x08;
- else
- bits |= 0x04;
- }
- }
-
- outpdw(MDP_BASE + 0x18000, bits); /* MDP_OVERLAY_REG_FLUSH */
-
- while (inpdw(MDP_BASE + 0x18000) & bits) /* self clear when complete */
- ;
-}
-
-struct mdp4_overlay_pipe *mdp4_overlay_ndx2pipe(int ndx)
-{
- struct mdp4_overlay_pipe *pipe;
-
- if (ndx == 0 || ndx >= MDP4_MAX_OVERLAY_PIPE)
- return NULL;
-
- pipe = &ctrl->plist[ndx - 1]; /* ndx start from 1 */
-
- if (pipe->pipe_ndx == 0)
- return NULL;
-
- return pipe;
-}
-
-struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(void)
-{
- int i;
- struct mdp4_overlay_pipe *pipe;
-
- pipe = &ctrl->plist[0];
- for (i = 0; i < MDP4_MAX_OVERLAY_PIPE; i++) {
- if (pipe->pipe_ndx == 0) {
- pipe->pipe_ndx = i + 1; /* start from 1 */
- init_completion(&pipe->comp);
- printk(KERN_INFO "mdp4_overlay_pipe_alloc: pipe=%p ndx=%d\n",
- pipe, pipe->pipe_ndx);
- return pipe;
- }
- pipe++;
- }
-
- return NULL;
-}
-
-
-void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe)
-{
- printk(KERN_INFO "mdp4_overlay_pipe_free: pipe=%p ndx=%d\n",
- pipe, pipe->pipe_ndx);
- memset(pipe, 0, sizeof(*pipe));
-}
-
-static int get_pipe_num(int ptype, int stage)
-{
- if (ptype == OVERLAY_TYPE_RGB) {
- if (stage == MDP4_MIXER_STAGE_BASE)
- return OVERLAY_PIPE_RGB1;
- else
- return OVERLAY_PIPE_RGB2;
- } else {
- if (stage == MDP4_MIXER_STAGE0)
- return OVERLAY_PIPE_VG1;
- else
- return OVERLAY_PIPE_VG2;
- }
-}
-
-int mdp4_overlay_req_check(uint32 id, uint32 z_order, uint32 mixer)
-{
- struct mdp4_overlay_pipe *pipe;
-
- pipe = ctrl->stage[mixer][z_order];
-
- if (pipe == NULL)
- return 0;
-
- if (pipe->pipe_ndx == id) /* same req, recycle */
- return 0;
-
- return -EPERM;
-}
-
-static int mdp4_overlay_req2pipe(struct mdp_overlay *req, int mixer,
- struct mdp4_overlay_pipe **ppipe)
-{
- struct mdp4_overlay_pipe *pipe;
- int ret, ptype;
-
- if (mixer >= MDP4_MAX_MIXER) {
- printk(KERN_ERR "mpd_overlay_req2pipe: mixer out of range!\n");
- return -ERANGE;
- }
-
- if (req->z_order < 0 || req->z_order > 2) {
- printk(KERN_ERR "mpd_overlay_req2pipe: z_order=%d out of range!\n",
- req->z_order);
- return -ERANGE;
- }
-
- if (req->src_rect.h == 0 || req->src_rect.w == 0) {
- printk(KERN_ERR "mpd_overlay_req2pipe: src img of zero size!\n");
- return -EINVAL;
- }
-
- ret = mdp4_overlay_req_check(req->id, req->z_order, mixer);
- if (ret < 0)
- return ret;
-
- ptype = mdp4_overlay_format2type(req->src.format);
- if (ptype < 0)
- return ptype;
-
- if (req->id == MSMFB_NEW_REQUEST) /* new request */
- pipe = mdp4_overlay_pipe_alloc();
- else
- pipe = mdp4_overlay_ndx2pipe(req->id);
-
- if (pipe == NULL)
- return -ENOMEM;
-
- pipe->src_format = req->src.format;
- ret = mdp4_overlay_format2pipe(pipe);
-
- if (ret < 0)
- return ret;
-
- /*
- * base layer == 1, reserved for frame buffer
- * zorder 0 == stage 0 == 2
- * zorder 1 == stage 1 == 3
- * zorder 2 == stage 2 == 4
- */
- if (req->id == MSMFB_NEW_REQUEST) { /* new request */
- pipe->mixer_stage = req->z_order + MDP4_MIXER_STAGE0;
- pipe->pipe_type = ptype;
- pipe->pipe_num = get_pipe_num(ptype, pipe->mixer_stage);
- printk(KERN_INFO "mpd4_overlay_req2pipe: zorder=%d pipe_num=%d\n",
- req->z_order, pipe->pipe_num);
- }
-
- pipe->src_width = req->src.width & 0x07ff; /* source img width */
- pipe->src_height = req->src.height & 0x07ff; /* source img height */
- pipe->src_h = req->src_rect.h & 0x07ff;
- pipe->src_w = req->src_rect.w & 0x07ff;
- pipe->src_y = req->src_rect.y & 0x07ff;
- pipe->src_x = req->src_rect.x & 0x07ff;
- pipe->dst_h = req->dst_rect.h & 0x07ff;
- pipe->dst_w = req->dst_rect.w & 0x07ff;
- pipe->dst_y = req->dst_rect.y & 0x07ff;
- pipe->dst_x = req->dst_rect.x & 0x07ff;
-
- if (req->flags & MDP_FLIP_LR)
- pipe->op_mode |= MDP4_OP_FLIP_LR;
-
- if (req->flags & MDP_FLIP_UD)
- pipe->op_mode |= MDP4_OP_FLIP_UD;
-
- if (req->flags & MDP_DITHER)
- pipe->op_mode |= MDP4_OP_DITHER_EN;
-
- if (req->flags & MDP_DEINTERLACE)
- pipe->op_mode |= MDP4_OP_DEINT_ODD_REF;
-
- pipe->is_fg = req->is_fg;/* control alpha and color key */
-
- pipe->alpha = req->alpha & 0x0ff;
-
- pipe->transp = req->transp_mask;
-
- *ppipe = pipe;
-
- return 0;
-}
-
-int get_img(struct msmfb_data *img, struct fb_info *info,
- unsigned long *start, unsigned long *len, struct file **pp_file)
-{
- int put_needed, ret = 0;
- struct file *file;
-#ifdef CONFIG_ANDROID_PMEM
- unsigned long vstart;
-#endif
-
-#ifdef CONFIG_ANDROID_PMEM
- if (!get_pmem_file(img->memory_id, start, &vstart, len, pp_file))
- return 0;
-#endif
- file = fget_light(img->memory_id, &put_needed);
- if (file == NULL)
- return -1;
-
- if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
- *start = info->fix.smem_start;
- *len = info->fix.smem_len;
- *pp_file = file;
- } else {
- ret = -1;
- fput_light(file, put_needed);
- }
- return ret;
-}
-int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req)
-{
- struct mdp4_overlay_pipe *pipe;
-
- pipe = mdp4_overlay_ndx2pipe(req->id);
- if (pipe == NULL)
- return -ENODEV;
-
- *req = pipe->req_data;
-
- return 0;
-}
-
-int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- int ret, mixer;
- struct mdp4_overlay_pipe *pipe;
- int lcdc;
-
- if (mfd == NULL)
- return -ENODEV;
-
- if (req->src.format == MDP_FB_FORMAT)
- req->src.format = mfd->fb_imgType;
-
- if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
- return -EINTR;
-
- mixer = info->node; /* minor number of char device */
-
- ret = mdp4_overlay_req2pipe(req, mixer, &pipe);
- if (ret < 0) {
- mutex_unlock(&mfd->dma->ov_mutex);
- return ret;
- }
-
- lcdc = inpdw(MDP_BASE + 0xc0000);
-
- if (lcdc == 0) { /* mddi */
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- }
-
- /* return id back to user */
- req->id = pipe->pipe_ndx; /* pipe_ndx start from 1 */
- pipe->req_data = *req; /* keep original req */
-
- mutex_unlock(&mfd->dma->ov_mutex);
-
- return 0;
-}
-
-int mdp4_overlay_unset(struct fb_info *info, int ndx)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- struct mdp4_overlay_pipe *pipe;
- int lcdc;
-
- if (mfd == NULL)
- return -ENODEV;
-
- if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
- return -EINTR;
-
- pipe = mdp4_overlay_ndx2pipe(ndx);
-
- if (pipe == NULL) {
- mutex_unlock(&mfd->dma->ov_mutex);
- return -ENODEV;
- }
-
- lcdc = inpdw(MDP_BASE + 0xc0000);
-
- mdp4_mixer_stage_down(pipe);
-
- if (lcdc == 0) { /* mddi */
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- }
-
- if (lcdc) /* LCDC mode */
- mdp4_overlay_reg_flush(pipe, 0);
-
- mdp4_overlay_pipe_free(pipe);
-
- if (lcdc == 0) { /* mddi */
- mdp4_mddi_overlay_restore();
- }
-
- mutex_unlock(&mfd->dma->ov_mutex);
-
- return 0;
-}
-
-struct tile_desc {
- uint32 width; /* tile's width */
- uint32 height; /* tile's height */
- uint32 row_tile_w; /* tiles per row's width */
- uint32 row_tile_h; /* tiles per row's height */
-};
-
-void tile_samsung(struct tile_desc *tp)
-{
- /*
- * each row of samsung tile consists of two tiles in height
- * and two tiles in width which means width should align to
- * 64 x 2 bytes and height should align to 32 x 2 bytes.
- * video decoder generate two tiles in width and one tile
- * in height which ends up height align to 32 X 1 bytes.
- */
- tp->width = 64; /* 64 bytes */
- tp->row_tile_w = 2; /* 2 tiles per row's width */
- tp->height = 32; /* 32 bytes */
- tp->row_tile_h = 1; /* 1 tiles per row's height */
-}
-
-uint32 tile_mem_size(struct mdp4_overlay_pipe *pipe, struct tile_desc *tp)
-{
- uint32 tile_w, tile_h;
- uint32 row_num_w, row_num_h;
-
-
- tile_w = tp->width * tp->row_tile_w;
- tile_h = tp->height * tp->row_tile_h;
-
- row_num_w = (pipe->src_width + tile_w - 1) / tile_w;
- row_num_h = (pipe->src_height + tile_h - 1) / tile_h;
-
- return row_num_w * row_num_h * tile_w * tile_h;
-}
-
-int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req,
- struct file **pp_src_file)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- struct msmfb_data *img;
- struct mdp4_overlay_pipe *pipe;
- ulong start, addr;
- ulong len = 0;
- struct file *p_src_file = 0;
- int lcdc;
-
- if (mfd == NULL)
- return -ENODEV;
-
- pipe = mdp4_overlay_ndx2pipe(req->id);
- if (pipe == NULL)
- return -ENODEV;
-
- if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
- return -EINTR;
-
- img = &req->data;
- get_img(img, info, &start, &len, &p_src_file);
- if (len == 0) {
- mutex_unlock(&mfd->dma->ov_mutex);
- printk(KERN_ERR "mdp_overlay_play: could not retrieve"
- " image from memory\n");
- return -1;
- }
- *pp_src_file = p_src_file;
-
- addr = start + img->offset;
- pipe->srcp0_addr = addr;
- pipe->srcp0_ystride = pipe->src_width * pipe->bpp;
-
- if (pipe->fetch_plane == OVERLAY_PLANE_PSEUDO_PLANAR) {
- if (pipe->frame_format == MDP4_FRAME_FORMAT_VIDEO_SUPERTILE) {
- struct tile_desc tile;
-
- tile_samsung(&tile);
- pipe->srcp1_addr = addr + tile_mem_size(pipe, &tile);
- } else
- pipe->srcp1_addr = addr +
- pipe->src_width * pipe->src_height;
-
- pipe->srcp0_ystride = pipe->src_width;
- pipe->srcp1_ystride = pipe->src_width;
- }
-
- lcdc = inpdw(MDP_BASE + 0xc0000);
- lcdc &= 0x01; /* LCDC mode */
-
- if (pipe->pipe_type == OVERLAY_TYPE_VG)
- mdp4_overlay_vg_setup(pipe); /* video/graphic pipe */
- else
- mdp4_overlay_rgb_setup(pipe); /* rgb pipe */
-
- mdp4_mixer_blend_setup(pipe);
- mdp4_mixer_stage_up(pipe);
-
- if (lcdc) { /* LCDC mode */
- mdp4_overlay_reg_flush(pipe, 1);
- }
-
- if (lcdc) { /* LCDC mode */
- if (pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) { /* done */
- mutex_unlock(&mfd->dma->ov_mutex);
- return 0;
- }
- }
-
- if (lcdc == 0) { /* MDDI mode */
-#ifdef MDP4_NONBLOCKING
- if (mfd->panel_power_on)
-#else
- if (!mfd->dma->busy && mfd->panel_power_on)
-#endif
- mdp4_mddi_overlay_kickoff(mfd, pipe);
- }
-
- mutex_unlock(&mfd->dma->ov_mutex);
-
- return 0;
-}
diff --git a/drivers/staging/msm/mdp4_overlay_lcdc.c b/drivers/staging/msm/mdp4_overlay_lcdc.c
deleted file mode 100644
index a6ab8ec83f5..00000000000
--- a/drivers/staging/msm/mdp4_overlay_lcdc.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-#include <linux/delay.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/spinlock.h>
-
-#include <linux/fb.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-#include "mdp4.h"
-
-#ifdef CONFIG_FB_MSM_MDP40
-#define LCDC_BASE 0xC0000
-#else
-#define LCDC_BASE 0xE0000
-#endif
-
-int first_pixel_start_x;
-int first_pixel_start_y;
-
-static struct mdp4_overlay_pipe *lcdc_pipe;
-
-int mdp_lcdc_on(struct platform_device *pdev)
-{
- int lcdc_width;
- int lcdc_height;
- int lcdc_bpp;
- int lcdc_border_clr;
- int lcdc_underflow_clr;
- int lcdc_hsync_skew;
-
- int hsync_period;
- int hsync_ctrl;
- int vsync_period;
- int display_hctl;
- int display_v_start;
- int display_v_end;
- int active_hctl;
- int active_h_start;
- int active_h_end;
- int active_v_start;
- int active_v_end;
- int ctrl_polarity;
- int h_back_porch;
- int h_front_porch;
- int v_back_porch;
- int v_front_porch;
- int hsync_pulse_width;
- int vsync_pulse_width;
- int hsync_polarity;
- int vsync_polarity;
- int data_en_polarity;
- int hsync_start_x;
- int hsync_end_x;
- uint8 *buf;
- int bpp, ptype;
- uint32 format;
- struct fb_info *fbi;
- struct fb_var_screeninfo *var;
- struct msm_fb_data_type *mfd;
- struct mdp4_overlay_pipe *pipe;
- int ret;
-
- mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- fbi = mfd->fbi;
- var = &fbi->var;
-
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- bpp = fbi->var.bits_per_pixel / 8;
- buf = (uint8 *) fbi->fix.smem_start;
- buf += fbi->var.xoffset * bpp +
- fbi->var.yoffset * fbi->fix.line_length;
-
- if (bpp == 2)
- format = MDP_RGB_565;
- else if (bpp == 3)
- format = MDP_RGB_888;
- else
- format = MDP_ARGB_8888;
-
-
- if (lcdc_pipe == NULL) {
- ptype = mdp4_overlay_format2type(format);
- pipe = mdp4_overlay_pipe_alloc();
- pipe->pipe_type = ptype;
- /* use RGB1 pipe */
- pipe->pipe_num = OVERLAY_PIPE_RGB1;
- pipe->mixer_stage = MDP4_MIXER_STAGE_BASE;
- pipe->mixer_num = MDP4_MIXER0;
- pipe->src_format = format;
- mdp4_overlay_format2pipe(pipe);
-
- lcdc_pipe = pipe; /* keep it */
- } else {
- pipe = lcdc_pipe;
- }
-
- pipe->src_height = fbi->var.yres;
- pipe->src_width = fbi->var.xres;
- pipe->src_h = fbi->var.yres;
- pipe->src_w = fbi->var.xres;
- pipe->src_y = 0;
- pipe->src_x = 0;
- pipe->srcp0_addr = (uint32) buf;
- pipe->srcp0_ystride = fbi->fix.line_length;
-
- mdp4_overlay_dmap_xy(pipe);
- mdp4_overlay_dmap_cfg(mfd, 1);
-
- mdp4_overlay_rgb_setup(pipe);
-
- mdp4_mixer_stage_up(pipe);
-
- mdp4_overlayproc_cfg(pipe);
-
- /*
- * LCDC timing setting
- */
- h_back_porch = var->left_margin;
- h_front_porch = var->right_margin;
- v_back_porch = var->upper_margin;
- v_front_porch = var->lower_margin;
- hsync_pulse_width = var->hsync_len;
- vsync_pulse_width = var->vsync_len;
- lcdc_border_clr = mfd->panel_info.lcdc.border_clr;
- lcdc_underflow_clr = mfd->panel_info.lcdc.underflow_clr;
- lcdc_hsync_skew = mfd->panel_info.lcdc.hsync_skew;
-
- lcdc_width = mfd->panel_info.xres;
- lcdc_height = mfd->panel_info.yres;
- lcdc_bpp = mfd->panel_info.bpp;
-
- hsync_period =
- hsync_pulse_width + h_back_porch + lcdc_width + h_front_porch;
- hsync_ctrl = (hsync_period << 16) | hsync_pulse_width;
- hsync_start_x = hsync_pulse_width + h_back_porch;
- hsync_end_x = hsync_period - h_front_porch - 1;
- display_hctl = (hsync_end_x << 16) | hsync_start_x;
-
- vsync_period =
- (vsync_pulse_width + v_back_porch + lcdc_height +
- v_front_porch) * hsync_period;
- display_v_start =
- (vsync_pulse_width + v_back_porch) * hsync_period + lcdc_hsync_skew;
- display_v_end =
- vsync_period - (v_front_porch * hsync_period) + lcdc_hsync_skew - 1;
-
- if (lcdc_width != var->xres) {
- active_h_start = hsync_start_x + first_pixel_start_x;
- active_h_end = active_h_start + var->xres - 1;
- active_hctl =
- ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start;
- } else {
- active_hctl = 0;
- }
-
- if (lcdc_height != var->yres) {
- active_v_start =
- display_v_start + first_pixel_start_y * hsync_period;
- active_v_end = active_v_start + (var->yres) * hsync_period - 1;
- active_v_start |= ACTIVE_START_Y_EN;
- } else {
- active_v_start = 0;
- active_v_end = 0;
- }
-
-
-#ifdef CONFIG_FB_MSM_MDP40
- hsync_polarity = 1;
- vsync_polarity = 1;
- lcdc_underflow_clr |= 0x80000000; /* enable recovery */
-#else
- hsync_polarity = 0;
- vsync_polarity = 0;
-#endif
- data_en_polarity = 0;
-
- ctrl_polarity =
- (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
-
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x4, hsync_ctrl);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x8, vsync_period);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0xc, vsync_pulse_width * hsync_period);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x10, display_hctl);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x14, display_v_start);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x18, display_v_end);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x28, lcdc_border_clr);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x2c, lcdc_underflow_clr);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x30, lcdc_hsync_skew);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x38, ctrl_polarity);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x1c, active_hctl);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x20, active_v_start);
- MDP_OUTP(MDP_BASE + LCDC_BASE + 0x24, active_v_end);
-
- ret = panel_next_on(pdev);
- if (ret == 0) {
- /* enable LCDC block */
- MDP_OUTP(MDP_BASE + LCDC_BASE, 1);
- mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- }
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
- return ret;
-}
-
-int mdp_lcdc_off(struct platform_device *pdev)
-{
- int ret = 0;
- struct mdp4_overlay_pipe *pipe;
-
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- MDP_OUTP(MDP_BASE + LCDC_BASE, 0);
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
- ret = panel_next_off(pdev);
-
- /* delay to make sure the last frame finishes */
- mdelay(100);
-
- /* dis-engage rgb0 from mixer */
- pipe = lcdc_pipe;
- mdp4_mixer_stage_down(pipe);
-
- return ret;
-}
-
-/*
- * mdp4_overlay0_done_lcdc: called from isr
- */
-void mdp4_overlay0_done_lcdc()
-{
- complete(&lcdc_pipe->comp);
-}
-
-void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd)
-{
- struct fb_info *fbi = mfd->fbi;
- uint8 *buf;
- int bpp;
- unsigned long flag;
- struct mdp4_overlay_pipe *pipe;
-
- if (!mfd->panel_power_on)
- return;
-
- /* no need to power on cmd block since it's lcdc mode */
- bpp = fbi->var.bits_per_pixel / 8;
- buf = (uint8 *) fbi->fix.smem_start;
- buf += fbi->var.xoffset * bpp +
- fbi->var.yoffset * fbi->fix.line_length;
-
- mutex_lock(&mfd->dma->ov_mutex);
-
- pipe = lcdc_pipe;
- pipe->srcp0_addr = (uint32) buf;
- mdp4_overlay_rgb_setup(pipe);
- mdp4_overlay_reg_flush(pipe, 1); /* rgb1 and mixer0 */
-
- /* enable irq */
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- INIT_COMPLETION(lcdc_pipe->comp);
- mfd->dma->waiting = TRUE;
- outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
- mdp_intr_mask |= INTR_OVERLAY0_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
- wait_for_completion_killable(&lcdc_pipe->comp);
- mdp_disable_irq(MDP_OVERLAY0_TERM);
-
- mutex_unlock(&mfd->dma->ov_mutex);
-}
diff --git a/drivers/staging/msm/mdp4_overlay_mddi.c b/drivers/staging/msm/mdp4_overlay_mddi.c
deleted file mode 100644
index be1b2874185..00000000000
--- a/drivers/staging/msm/mdp4_overlay_mddi.c
+++ /dev/null
@@ -1,254 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-#include <linux/delay.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/spinlock.h>
-
-#include <linux/fb.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-#include "mdp4.h"
-
-static struct mdp4_overlay_pipe *mddi_pipe;
-static struct mdp4_overlay_pipe *pending_pipe;
-static struct msm_fb_data_type *mddi_mfd;
-
-#define WHOLESCREEN
-
-void mdp4_overlay_update_lcd(struct msm_fb_data_type *mfd)
-{
- MDPIBUF *iBuf = &mfd->ibuf;
- uint8 *src;
- int bpp, ptype;
- uint32 format;
- uint32 mddi_ld_param;
- uint16 mddi_vdo_packet_reg;
- struct mdp4_overlay_pipe *pipe;
-
- if (mfd->key != MFD_KEY)
- return;
-
- mddi_mfd = mfd; /* keep it */
-
- bpp = iBuf->bpp;
-
- if (bpp == 2)
- format = MDP_RGB_565;
- else if (bpp == 3)
- format = MDP_RGB_888;
- else
- format = MDP_ARGB_8888;
-
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- if (mddi_pipe == NULL) {
- ptype = mdp4_overlay_format2type(format);
- pipe = mdp4_overlay_pipe_alloc();
- pipe->pipe_type = ptype;
- /* use RGB1 pipe */
- pipe->pipe_num = OVERLAY_PIPE_RGB1;
- pipe->mixer_num = MDP4_MIXER0;
- pipe->src_format = format;
- mdp4_overlay_format2pipe(pipe);
-
- mddi_pipe = pipe; /* keep it */
-
- mddi_ld_param = 0;
- mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
-
- if (mfd->panel_info.type == MDDI_PANEL) {
- if (mfd->panel_info.pdest == DISPLAY_1)
- mddi_ld_param = 0;
- else
- mddi_ld_param = 1;
- } else {
- mddi_ld_param = 2;
- }
-
- MDP_OUTP(MDP_BASE + 0x00090, mddi_ld_param);
- MDP_OUTP(MDP_BASE + 0x00094,
- (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
- } else {
- pipe = mddi_pipe;
- }
-
-
- src = (uint8 *) iBuf->buf;
-
-#ifdef WHOLESCREEN
- {
- struct fb_info *fbi;
-
- fbi = mfd->fbi;
- pipe->src_height = fbi->var.yres;
- pipe->src_width = fbi->var.xres;
- pipe->src_h = fbi->var.yres;
- pipe->src_w = fbi->var.xres;
- pipe->src_y = 0;
- pipe->src_x = 0;
- pipe->dst_h = fbi->var.yres;
- pipe->dst_w = fbi->var.xres;
- pipe->dst_y = 0;
- pipe->dst_x = 0;
- pipe->srcp0_addr = (uint32)src;
- pipe->srcp0_ystride = fbi->var.xres_virtual * bpp;
- }
-
-#else
- if (mdp4_overlay_active(MDP4_MIXER0)) {
- struct fb_info *fbi;
-
- fbi = mfd->fbi;
- pipe->src_height = fbi->var.yres;
- pipe->src_width = fbi->var.xres;
- pipe->src_h = fbi->var.yres;
- pipe->src_w = fbi->var.xres;
- pipe->src_y = 0;
- pipe->src_x = 0;
- pipe->dst_h = fbi->var.yres;
- pipe->dst_w = fbi->var.xres;
- pipe->dst_y = 0;
- pipe->dst_x = 0;
- pipe->srcp0_addr = (uint32) src;
- pipe->srcp0_ystride = fbi->var.xres_virtual * bpp;
- } else {
- /* starting input address */
- src += (iBuf->dma_x + iBuf->dma_y * iBuf->ibuf_width) * bpp;
-
- pipe->src_height = iBuf->dma_h;
- pipe->src_width = iBuf->dma_w;
- pipe->src_h = iBuf->dma_h;
- pipe->src_w = iBuf->dma_w;
- pipe->src_y = 0;
- pipe->src_x = 0;
- pipe->dst_h = iBuf->dma_h;
- pipe->dst_w = iBuf->dma_w;
- pipe->dst_y = iBuf->dma_y;
- pipe->dst_x = iBuf->dma_x;
- pipe->srcp0_addr = (uint32) src;
- pipe->srcp0_ystride = iBuf->ibuf_width * bpp;
- }
-#endif
-
- pipe->mixer_stage = MDP4_MIXER_STAGE_BASE;
-
- mdp4_overlay_rgb_setup(pipe);
-
- mdp4_mixer_stage_up(pipe);
-
- mdp4_overlayproc_cfg(pipe);
-
- mdp4_overlay_dmap_xy(pipe);
-
- mdp4_overlay_dmap_cfg(mfd, 0);
-
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
-}
-
-/*
- * mdp4_overlay0_done_mddi: called from isr
- */
-void mdp4_overlay0_done_mddi()
-{
- if (pending_pipe)
- complete(&pending_pipe->comp);
-}
-
-void mdp4_mddi_overlay_restore(void)
-{
- /* mutex holded by caller */
- mdp4_overlay_update_lcd(mddi_mfd);
- mdp4_mddi_overlay_kickoff(mddi_mfd, mddi_pipe);
-}
-
-void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd,
- struct mdp4_overlay_pipe *pipe)
-{
-#ifdef MDP4_NONBLOCKING
- unsigned long flag;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- if (mfd->dma->busy == TRUE) {
- INIT_COMPLETION(pipe->comp);
- pending_pipe = pipe;
- }
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- if (pending_pipe != NULL) {
- /* wait until DMA finishes the current job */
- wait_for_completion_killable(&pipe->comp);
- pending_pipe = NULL;
- }
- down(&mfd->sem);
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- mfd->dma->busy = TRUE;
- /* start OVERLAY pipe */
- mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd);
- up(&mfd->sem);
-#else
- down(&mfd->sem);
- mdp_enable_irq(MDP_OVERLAY0_TERM);
- mfd->dma->busy = TRUE;
- INIT_COMPLETION(pipe->comp);
- pending_pipe = pipe;
-
- /* start OVERLAY pipe */
- mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd);
- up(&mfd->sem);
-
- /* wait until DMA finishes the current job */
- wait_for_completion_killable(&pipe->comp);
- mdp_disable_irq(MDP_OVERLAY0_TERM);
-#endif
-
-}
-
-void mdp4_mddi_overlay(struct msm_fb_data_type *mfd)
-{
- mutex_lock(&mfd->dma->ov_mutex);
-
- if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
- mdp4_overlay_update_lcd(mfd);
-
- mdp4_mddi_overlay_kickoff(mfd, mddi_pipe);
-
- /* signal if pan function is waiting for the update completion */
- if (mfd->pan_waiting) {
- mfd->pan_waiting = FALSE;
- complete(&mfd->pan_comp);
- }
- }
-
- mutex_unlock(&mfd->dma->ov_mutex);
-}
diff --git a/drivers/staging/msm/mdp4_util.c b/drivers/staging/msm/mdp4_util.c
deleted file mode 100644
index fd97f520599..00000000000
--- a/drivers/staging/msm/mdp4_util.c
+++ /dev/null
@@ -1,1686 +0,0 @@
-
-/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/hrtimer.h>
-#include <linux/clk.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-#include <linux/debugfs.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/uaccess.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-#include "mdp4.h"
-
-void mdp4_sw_reset(ulong bits)
-{
- bits &= 0x1f; /* 5 bits */
- outpdw(MDP_BASE + 0x001c, bits); /* MDP_SW_RESET */
-
- while (inpdw(MDP_BASE + 0x001c) & bits) /* self clear when complete */
- ;
- MSM_FB_INFO("mdp4_sw_reset: 0x%x\n", (int)bits);
-}
-
-void mdp4_overlay_cfg(int overlayer, int blt_mode, int refresh, int direct_out)
-{
- ulong bits = 0;
-
- if (blt_mode)
- bits |= (1 << 3);
- refresh &= 0x03; /* 2 bites */
- bits |= (refresh << 1);
- direct_out &= 0x01;
- bits |= direct_out;
-
- if (overlayer == MDP4_MIXER0)
- outpdw(MDP_BASE + 0x10004, bits); /* MDP_OVERLAY0_CFG */
- else
- outpdw(MDP_BASE + 0x18004, bits); /* MDP_OVERLAY1_CFG */
-
- MSM_FB_INFO("mdp4_overlay_cfg: 0x%x\n", (int)inpdw(MDP_BASE + 0x10004));
-}
-
-void mdp4_display_intf_sel(int output, ulong intf)
-{
- ulong bits, mask;
-
- bits = inpdw(MDP_BASE + 0x0038); /* MDP_DISP_INTF_SEL */
-
- mask = 0x03; /* 2 bits */
- intf &= 0x03; /* 2 bits */
-
- switch (output) {
- case EXTERNAL_INTF_SEL:
- intf <<= 4;
- mask <<= 4;
- break;
- case SECONDARY_INTF_SEL:
- intf &= 0x02; /* only MDDI and EBI2 support */
- intf <<= 2;
- mask <<= 2;
- break;
- default:
- break;
- }
-
-
- bits &= ~mask;
- bits |= intf;
-
- outpdw(MDP_BASE + 0x0038, bits); /* MDP_DISP_INTF_SEL */
-
- MSM_FB_INFO("mdp4_display_intf_sel: 0x%x\n", (int)inpdw(MDP_BASE + 0x0038));
-}
-
-unsigned long mdp4_display_status(void)
-{
- return inpdw(MDP_BASE + 0x0018) & 0x3ff; /* MDP_DISPLAY_STATUS */
-}
-
-void mdp4_ebi2_lcd_setup(int lcd, ulong base, int ystride)
-{
- /* always use memory map */
- ystride &= 0x01fff; /* 13 bits */
- if (lcd == EBI2_LCD0) {
- outpdw(MDP_BASE + 0x0060, base);/* MDP_EBI2_LCD0 */
- outpdw(MDP_BASE + 0x0068, ystride);/* MDP_EBI2_LCD0_YSTRIDE */
- } else {
- outpdw(MDP_BASE + 0x0064, base);/* MDP_EBI2_LCD1 */
- outpdw(MDP_BASE + 0x006c, ystride);/* MDP_EBI2_LCD1_YSTRIDE */
- }
-}
-
-void mdp4_mddi_setup(int mddi, unsigned long id)
-{
- ulong bits;
-
- if (mddi == MDDI_EXTERNAL_SET)
- bits = 0x02;
- else if (mddi == MDDI_SECONDARY_SET)
- bits = 0x01;
- else
- bits = 0; /* PRIMARY_SET */
-
- id <<= 16;
-
- bits |= id;
-
- outpdw(MDP_BASE + 0x0090, bits); /* MDP_MDDI_PARAM_WR_SEL */
-}
-
-int mdp_ppp_blit(struct fb_info *info, struct mdp_blit_req *req,
- struct file **pp_src_file, struct file **pp_dst_file)
-{
-
- /* not implemented yet */
- return -1;
-}
-
-void mdp4_hw_init(void)
-{
- ulong bits;
-
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
-#ifdef MDP4_ERROR
- /*
- * Issue software reset on DMA_P will casue DMA_P dma engine stall
- * on LCDC mode. However DMA_P does not stall at MDDI mode.
- * This need further investigation.
- */
- mdp4_sw_reset(0x17);
-#endif
-
- mdp4_clear_lcdc();
-
- mdp4_mixer_blend_init(0);
- mdp4_mixer_blend_init(1);
- mdp4_vg_qseed_init(0);
- mdp4_vg_qseed_init(1);
- mdp4_vg_csc_mv_setup(0);
- mdp4_vg_csc_mv_setup(1);
- mdp4_vg_csc_pre_bv_setup(0);
- mdp4_vg_csc_pre_bv_setup(1);
- mdp4_vg_csc_post_bv_setup(0);
- mdp4_vg_csc_post_bv_setup(1);
- mdp4_vg_csc_pre_lv_setup(0);
- mdp4_vg_csc_pre_lv_setup(1);
- mdp4_vg_csc_post_lv_setup(0);
- mdp4_vg_csc_post_lv_setup(1);
-
- mdp4_mixer_gc_lut_setup(0);
- mdp4_mixer_gc_lut_setup(1);
-
- mdp4_vg_igc_lut_setup(0);
- mdp4_vg_igc_lut_setup(1);
-
- mdp4_rgb_igc_lut_setup(0);
- mdp4_rgb_igc_lut_setup(1);
-
- outp32(MDP_EBI2_PORTMAP_MODE, 0x3);
-
- /* system interrupts */
-
- bits = mdp_intr_mask;
- outpdw(MDP_BASE + 0x0050, bits);/* enable specififed interrupts */
-
- /* histogram */
- MDP_OUTP(MDP_BASE + 0x95010, 1); /* auto clear HIST */
-
- /* enable histogram interrupts */
- outpdw(MDP_BASE + 0x9501c, INTR_HIST_DONE);
-
- /* For the max read pending cmd config below, if the MDP clock */
- /* is less than the AXI clock, then we must use 3 pending */
- /* pending requests. Otherwise, we should use 8 pending requests. */
- /* In the future we should do this detection automatically. */
-
- /* max read pending cmd config */
- outpdw(MDP_BASE + 0x004c, 0x02222); /* 3 pending requests */
-
- /* dma_p fetch config */
- outpdw(MDP_BASE + 0x91004, 0x27); /* burst size of 8 */
-
-#ifndef CONFIG_FB_MSM_OVERLAY
- /* both REFRESH_MODE and DIRECT_OUT are ignored at BLT mode */
- mdp4_overlay_cfg(MDP4_MIXER0, OVERLAY_MODE_BLT, 0, 0);
- mdp4_overlay_cfg(MDP4_MIXER1, OVERLAY_MODE_BLT, 0, 0);
-#endif
-
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-}
-
-
-void mdp4_clear_lcdc(void)
-{
- uint32 bits;
-
- bits = inpdw(MDP_BASE + 0xc0000);
- if (bits & 0x01) /* enabled already */
- return;
-
- outpdw(MDP_BASE + 0xc0004, 0); /* vsync ctrl out */
- outpdw(MDP_BASE + 0xc0008, 0); /* vsync period */
- outpdw(MDP_BASE + 0xc000c, 0); /* vsync pusle width */
- outpdw(MDP_BASE + 0xc0010, 0); /* lcdc display HCTL */
- outpdw(MDP_BASE + 0xc0014, 0); /* lcdc display v start */
- outpdw(MDP_BASE + 0xc0018, 0); /* lcdc display v end */
- outpdw(MDP_BASE + 0xc001c, 0); /* lcdc active hctl */
- outpdw(MDP_BASE + 0xc0020, 0); /* lcdc active v start */
- outpdw(MDP_BASE + 0xc0024, 0); /* lcdc active v end */
- outpdw(MDP_BASE + 0xc0028, 0); /* lcdc board color */
- outpdw(MDP_BASE + 0xc002c, 0); /* lcdc underflow ctrl */
- outpdw(MDP_BASE + 0xc0030, 0); /* lcdc hsync skew */
- outpdw(MDP_BASE + 0xc0034, 0); /* lcdc test ctl */
- outpdw(MDP_BASE + 0xc0038, 0); /* lcdc ctl polarity */
-}
-
-static struct mdp_dma_data overlay1_data;
-static int intr_dma_p;
-static int intr_dma_s;
-static int intr_dma_e;
-static int intr_overlay0;
-static int intr_overlay1;
-
-irqreturn_t mdp4_isr(int irq, void *ptr)
-{
- uint32 isr, mask, lcdc;
- struct mdp_dma_data *dma;
-
- mdp_is_in_isr = TRUE;
-
- while (1) {
- isr = inpdw(MDP_INTR_STATUS);
- if (isr == 0)
- break;
-
- mask = inpdw(MDP_INTR_ENABLE);
- outpdw(MDP_INTR_CLEAR, isr);
-
- isr &= mask;
-
- if (unlikely(isr == 0))
- break;
-
- if (isr & INTR_DMA_P_DONE) {
- intr_dma_p++;
- lcdc = inpdw(MDP_BASE + 0xc0000);
- dma = &dma2_data;
- if (lcdc & 0x01) { /* LCDC enable */
- /* disable LCDC interrupt */
- mdp_intr_mask &= ~INTR_DMA_P_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->waiting = FALSE;
- } else {
- dma->busy = FALSE;
- mdp_pipe_ctrl(MDP_DMA2_BLOCK,
- MDP_BLOCK_POWER_OFF, TRUE);
- }
- complete(&dma->comp);
- }
- if (isr & INTR_DMA_S_DONE) {
- intr_dma_s++;
- dma = &dma_s_data;
- dma->busy = FALSE;
- mdp_pipe_ctrl(MDP_DMA_S_BLOCK,
- MDP_BLOCK_POWER_OFF, TRUE);
- complete(&dma->comp);
- }
- if (isr & INTR_DMA_E_DONE) {
- intr_dma_e++;
- dma = &dma_e_data;
- mdp_intr_mask &= ~INTR_DMA_E_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->busy = FALSE;
-
- if (dma->waiting) {
- dma->waiting = FALSE;
- complete(&dma->comp);
- }
- }
- if (isr & INTR_OVERLAY0_DONE) {
- intr_overlay0++;
- lcdc = inpdw(MDP_BASE + 0xc0000);
- dma = &dma2_data;
- if (lcdc & 0x01) { /* LCDC enable */
- /* disable LCDC interrupt */
- mdp_intr_mask &= ~INTR_OVERLAY0_DONE;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- dma->waiting = FALSE;
- mdp4_overlay0_done_lcdc();
- } else { /* MDDI */
- dma->busy = FALSE;
-#ifdef MDP4_NONBLOCKING
- mdp_disable_irq_nolock(MDP_OVERLAY0_TERM);
-#endif
- mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK,
- MDP_BLOCK_POWER_OFF, TRUE);
- mdp4_overlay0_done_mddi();
- }
- }
- if (isr & INTR_OVERLAY1_DONE) {
- intr_overlay1++;
- dma = &overlay1_data;
- dma->busy = FALSE;
- mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK,
- MDP_BLOCK_POWER_OFF, TRUE);
- complete(&dma->comp);
- }
- if (isr & INTR_DMA_P_HISTOGRAM) {
- isr = inpdw(MDP_DMA_P_HIST_INTR_STATUS);
- mask = inpdw(MDP_DMA_P_HIST_INTR_ENABLE);
- outpdw(MDP_DMA_P_HIST_INTR_CLEAR, isr);
- isr &= mask;
- if (isr & INTR_HIST_DONE) {
- if (mdp_hist.r)
- memcpy(mdp_hist.r, MDP_BASE + 0x95100,
- mdp_hist.bin_cnt*4);
- if (mdp_hist.g)
- memcpy(mdp_hist.g, MDP_BASE + 0x95200,
- mdp_hist.bin_cnt*4);
- if (mdp_hist.b)
- memcpy(mdp_hist.b, MDP_BASE + 0x95300,
- mdp_hist.bin_cnt*4);
- complete(&mdp_hist_comp);
- }
- }
- }
-
- mdp_is_in_isr = FALSE;
-
- return IRQ_HANDLED;
-}
-
-
-/*
- * QSEED tables
- */
-
-static uint32 vg_qseed_table0[] = {
- 0x5556aaff, 0x00000000, 0x00000000, 0x00000000
-};
-
-static uint32 vg_qseed_table1[] = {
- 0x76543210, 0xfedcba98
-};
-
-static uint32 vg_qseed_table2[] = {
- 0x02000000, 0x00000000, 0x02060ff2, 0x00000008,
- 0x02090fe4, 0x00000013, 0x020a0fd9, 0x0ffc0021,
- 0x02080fce, 0x0ffa0030, 0x02030fc5, 0x0ff60042,
- 0x01fd0fbe, 0x0ff10054, 0x01f50fb6, 0x0fed0068,
- 0x01e90fb1, 0x0fe60080, 0x01dc0fae, 0x0fe10095,
- 0x01ca0fae, 0x0fda00ae, 0x01b70fad, 0x0fd600c6,
- 0x01a40fad, 0x0fcf00e0, 0x018f0faf, 0x0fc800fa,
- 0x01780fb1, 0x0fc30114, 0x015f0fb5, 0x0fbf012d,
- 0x01490fb7, 0x0fb70149, 0x012d0fbf, 0x0fb5015f,
- 0x01140fc3, 0x0fb10178, 0x00fa0fc8, 0x0faf018f,
- 0x00e00fcf, 0x0fad01a4, 0x00c60fd6, 0x0fad01b7,
- 0x00ae0fda, 0x0fae01ca, 0x00950fe1, 0x0fae01dc,
- 0x00800fe6, 0x0fb101e9, 0x00680fed, 0x0fb601f5,
- 0x00540ff1, 0x0fbe01fd, 0x00420ff6, 0x0fc50203,
- 0x00300ffa, 0x0fce0208, 0x00210ffc, 0x0fd9020a,
- 0x00130000, 0x0fe40209, 0x00080000, 0x0ff20206,
- 0x02000000, 0x00000000, 0x02040ff2, 0x0000000a,
- 0x02040fe4, 0x00000018, 0x02010fda, 0x0ffc0029,
- 0x01fc0fcf, 0x0ffa003b, 0x01f30fc7, 0x0ff60050,
- 0x01e90fc0, 0x0ff20065, 0x01dc0fba, 0x0fee007c,
- 0x01cc0fb6, 0x0fe80096, 0x01ba0fb4, 0x0fe400ae,
- 0x01a70fb4, 0x0fdd00c8, 0x018f0fb5, 0x0fda00e2,
- 0x017a0fb5, 0x0fd400fd, 0x01630fb8, 0x0fce0117,
- 0x014c0fba, 0x0fca0130, 0x01320fbf, 0x0fc70148,
- 0x011b0fc1, 0x0fc10163, 0x01010fc8, 0x0fc00177,
- 0x00e90fcd, 0x0fbd018d, 0x00d10fd1, 0x0fbc01a2,
- 0x00ba0fd7, 0x0fbb01b4, 0x00a30fdd, 0x0fbc01c4,
- 0x008e0fe1, 0x0fbd01d4, 0x00790fe7, 0x0fbe01e2,
- 0x00670feb, 0x0fc001ee, 0x00540ff1, 0x0fc501f6,
- 0x00430ff4, 0x0fcb01fe, 0x00340ff8, 0x0fd10203,
- 0x00260ffb, 0x0fd80207, 0x001a0ffd, 0x0fe10208,
- 0x000f0000, 0x0fea0207, 0x00060000, 0x0ff50205,
- 0x02000000, 0x00000000, 0x02020ff2, 0x0000000c,
- 0x02000fe4, 0x0000001c, 0x01fa0fda, 0x0ffc0030,
- 0x01f10fd0, 0x0ffa0045, 0x01e50fc8, 0x0ff6005d,
- 0x01d60fc3, 0x0ff30074, 0x01c60fbd, 0x0fef008e,
- 0x01b30fba, 0x0fe900aa, 0x019e0fb9, 0x0fe500c4,
- 0x01870fba, 0x0fe000df, 0x016f0fbb, 0x0fdd00f9,
- 0x01580fbc, 0x0fd80114, 0x01400fbf, 0x0fd3012e,
- 0x01280fc2, 0x0fd00146, 0x010f0fc6, 0x0fce015d,
- 0x00f90fc9, 0x0fc90175, 0x00e00fcf, 0x0fc90188,
- 0x00ca0fd4, 0x0fc6019c, 0x00b40fd8, 0x0fc601ae,
- 0x009f0fdd, 0x0fc501bf, 0x008b0fe3, 0x0fc601cc,
- 0x00780fe6, 0x0fc701db, 0x00660feb, 0x0fc801e7,
- 0x00560fef, 0x0fcb01f0, 0x00460ff3, 0x0fcf01f8,
- 0x00380ff6, 0x0fd401fe, 0x002c0ff9, 0x0fd90202,
- 0x00200ffc, 0x0fdf0205, 0x00160ffe, 0x0fe60206,
- 0x000c0000, 0x0fed0207, 0x00050000, 0x0ff70204,
- 0x02000000, 0x00000000, 0x01fe0ff3, 0x0000000f,
- 0x01f60fe5, 0x00000025, 0x01ea0fdb, 0x0ffd003e,
- 0x01db0fd2, 0x0ffb0058, 0x01c80fcc, 0x0ff70075,
- 0x01b50fc7, 0x0ff40090, 0x01a00fc3, 0x0ff000ad,
- 0x01880fc1, 0x0feb00cc, 0x01700fc1, 0x0fe800e7,
- 0x01550fc3, 0x0fe40104, 0x013b0fc5, 0x0fe2011e,
- 0x01240fc6, 0x0fde0138, 0x010c0fca, 0x0fda0150,
- 0x00f40fcd, 0x0fd90166, 0x00dd0fd1, 0x0fd7017b,
- 0x00c80fd4, 0x0fd40190, 0x00b20fd9, 0x0fd401a1,
- 0x009f0fdd, 0x0fd301b1, 0x008c0fe1, 0x0fd301c0,
- 0x007b0fe5, 0x0fd301cd, 0x006a0fea, 0x0fd401d8,
- 0x005c0fec, 0x0fd501e3, 0x004d0ff0, 0x0fd601ed,
- 0x00410ff3, 0x0fd801f4, 0x00340ff7, 0x0fdb01fa,
- 0x002a0ff9, 0x0fdf01fe, 0x00200ffb, 0x0fe30202,
- 0x00180ffd, 0x0fe70204, 0x00100ffe, 0x0fed0205,
- 0x00090000, 0x0ff20205, 0x00040000, 0x0ff90203,
- 0x02000000, 0x00000000, 0x02050ff5, 0x00000006,
- 0x02070fea, 0x0000000f, 0x02080fe1, 0x0ffd001a,
- 0x02070fd8, 0x0ffb0026, 0x02030fd1, 0x0ff80034,
- 0x01fe0fcb, 0x0ff40043, 0x01f60fc5, 0x0ff10054,
- 0x01ee0fc0, 0x0feb0067, 0x01e20fbe, 0x0fe70079,
- 0x01d40fbd, 0x0fe1008e, 0x01c40fbc, 0x0fdd00a3,
- 0x01b40fbb, 0x0fd700ba, 0x01a20fbc, 0x0fd100d1,
- 0x018d0fbd, 0x0fcd00e9, 0x01770fc0, 0x0fc80101,
- 0x01630fc1, 0x0fc1011b, 0x01480fc7, 0x0fbf0132,
- 0x01300fca, 0x0fba014c, 0x01170fce, 0x0fb80163,
- 0x00fd0fd4, 0x0fb5017a, 0x00e20fda, 0x0fb5018f,
- 0x00c80fdd, 0x0fb401a7, 0x00ae0fe4, 0x0fb401ba,
- 0x00960fe8, 0x0fb601cc, 0x007c0fee, 0x0fba01dc,
- 0x00650ff2, 0x0fc001e9, 0x00500ff6, 0x0fc701f3,
- 0x003b0ffa, 0x0fcf01fc, 0x00290ffc, 0x0fda0201,
- 0x00180000, 0x0fe40204, 0x000a0000, 0x0ff20204,
- 0x02000000, 0x00000000, 0x02030ff5, 0x00000008,
- 0x02030fea, 0x00000013, 0x02020fe1, 0x0ffd0020,
- 0x01fc0fd9, 0x0ffc002f, 0x01f60fd2, 0x0ff80040,
- 0x01ed0fcd, 0x0ff50051, 0x01e30fc7, 0x0ff10065,
- 0x01d70fc3, 0x0fec007a, 0x01c60fc2, 0x0fe9008f,
- 0x01b60fc1, 0x0fe300a6, 0x01a20fc1, 0x0fe000bd,
- 0x018f0fc1, 0x0fdb00d5, 0x017b0fc2, 0x0fd500ee,
- 0x01640fc4, 0x0fd20106, 0x014d0fc8, 0x0fce011d,
- 0x01370fc9, 0x0fc90137, 0x011d0fce, 0x0fc8014d,
- 0x01060fd2, 0x0fc40164, 0x00ee0fd5, 0x0fc2017b,
- 0x00d50fdb, 0x0fc1018f, 0x00bd0fe0, 0x0fc101a2,
- 0x00a60fe3, 0x0fc101b6, 0x008f0fe9, 0x0fc201c6,
- 0x007a0fec, 0x0fc301d7, 0x00650ff1, 0x0fc701e3,
- 0x00510ff5, 0x0fcd01ed, 0x00400ff8, 0x0fd201f6,
- 0x002f0ffc, 0x0fd901fc, 0x00200ffd, 0x0fe10202,
- 0x00130000, 0x0fea0203, 0x00080000, 0x0ff50203,
- 0x02000000, 0x00000000, 0x02020ff5, 0x00000009,
- 0x01ff0fea, 0x00000017, 0x01fb0fe2, 0x0ffd0026,
- 0x01f30fda, 0x0ffc0037, 0x01ea0fd3, 0x0ff8004b,
- 0x01df0fce, 0x0ff5005e, 0x01d10fc9, 0x0ff20074,
- 0x01c10fc6, 0x0fed008c, 0x01ae0fc5, 0x0fea00a3,
- 0x019b0fc5, 0x0fe500bb, 0x01850fc6, 0x0fe200d3,
- 0x01700fc6, 0x0fde00ec, 0x015a0fc8, 0x0fd90105,
- 0x01430fca, 0x0fd6011d, 0x012b0fcd, 0x0fd30135,
- 0x01150fcf, 0x0fcf014d, 0x00fc0fd4, 0x0fce0162,
- 0x00e50fd8, 0x0fcc0177, 0x00cf0fdb, 0x0fca018c,
- 0x00b80fe0, 0x0fc9019f, 0x00a20fe5, 0x0fca01af,
- 0x008e0fe8, 0x0fcb01bf, 0x00790fec, 0x0fcb01d0,
- 0x00670fef, 0x0fcd01dd, 0x00550ff4, 0x0fd001e7,
- 0x00440ff7, 0x0fd501f0, 0x00350ffa, 0x0fda01f7,
- 0x00270ffc, 0x0fdf01fe, 0x001b0ffe, 0x0fe70200,
- 0x00100000, 0x0fee0202, 0x00060000, 0x0ff70203,
- 0x02000000, 0x00000000, 0x01ff0ff5, 0x0000000c,
- 0x01f80fea, 0x0000001e, 0x01ef0fe2, 0x0ffd0032,
- 0x01e20fdb, 0x0ffc0047, 0x01d30fd5, 0x0ff9005f,
- 0x01c20fd1, 0x0ff60077, 0x01b00fcd, 0x0ff30090,
- 0x019b0fcb, 0x0fef00ab, 0x01850fcb, 0x0fec00c4,
- 0x016e0fcc, 0x0fe800de, 0x01550fcd, 0x0fe600f8,
- 0x013f0fce, 0x0fe20111, 0x01280fd0, 0x0fdf0129,
- 0x01110fd2, 0x0fdd0140, 0x00f90fd6, 0x0fdb0156,
- 0x00e40fd8, 0x0fd8016c, 0x00cd0fdd, 0x0fd8017e,
- 0x00b80fe0, 0x0fd60192, 0x00a40fe3, 0x0fd601a3,
- 0x00910fe7, 0x0fd501b3, 0x007f0feb, 0x0fd601c0,
- 0x006e0fed, 0x0fd701ce, 0x005d0ff1, 0x0fd701db,
- 0x004f0ff3, 0x0fd901e5, 0x00400ff7, 0x0fdc01ed,
- 0x00330ff9, 0x0fe001f4, 0x00280ffb, 0x0fe301fa,
- 0x001d0ffd, 0x0fe801fe, 0x00140ffe, 0x0fed0201,
- 0x000c0000, 0x0ff20202, 0x00050000, 0x0ff90202,
- 0x02000000, 0x00000000, 0x02040ff7, 0x00000005,
- 0x02070fed, 0x0000000c, 0x02060fe6, 0x0ffe0016,
- 0x02050fdf, 0x0ffc0020, 0x02020fd9, 0x0ff9002c,
- 0x01fe0fd4, 0x0ff60038, 0x01f80fcf, 0x0ff30046,
- 0x01f00fcb, 0x0fef0056, 0x01e70fc8, 0x0feb0066,
- 0x01db0fc7, 0x0fe60078, 0x01cc0fc6, 0x0fe3008b,
- 0x01bf0fc5, 0x0fdd009f, 0x01ae0fc6, 0x0fd800b4,
- 0x019c0fc6, 0x0fd400ca, 0x01880fc9, 0x0fcf00e0,
- 0x01750fc9, 0x0fc900f9, 0x015d0fce, 0x0fc6010f,
- 0x01460fd0, 0x0fc20128, 0x012e0fd3, 0x0fbf0140,
- 0x01140fd8, 0x0fbc0158, 0x00f90fdd, 0x0fbb016f,
- 0x00df0fe0, 0x0fba0187, 0x00c40fe5, 0x0fb9019e,
- 0x00aa0fe9, 0x0fba01b3, 0x008e0fef, 0x0fbd01c6,
- 0x00740ff3, 0x0fc301d6, 0x005d0ff6, 0x0fc801e5,
- 0x00450ffa, 0x0fd001f1, 0x00300ffc, 0x0fda01fa,
- 0x001c0000, 0x0fe40200, 0x000c0000, 0x0ff20202,
- 0x02000000, 0x00000000, 0x02030ff7, 0x00000006,
- 0x02020fee, 0x00000010, 0x02000fe7, 0x0ffe001b,
- 0x01fe0fdf, 0x0ffc0027, 0x01f70fda, 0x0ffa0035,
- 0x01f00fd5, 0x0ff70044, 0x01e70fd0, 0x0ff40055,
- 0x01dd0fcd, 0x0fef0067, 0x01d00fcb, 0x0fec0079,
- 0x01bf0fcb, 0x0fe8008e, 0x01af0fca, 0x0fe500a2,
- 0x019f0fc9, 0x0fe000b8, 0x018c0fca, 0x0fdb00cf,
- 0x01770fcc, 0x0fd800e5, 0x01620fce, 0x0fd400fc,
- 0x014d0fcf, 0x0fcf0115, 0x01350fd3, 0x0fcd012b,
- 0x011d0fd6, 0x0fca0143, 0x01050fd9, 0x0fc8015a,
- 0x00ec0fde, 0x0fc60170, 0x00d30fe2, 0x0fc60185,
- 0x00bb0fe5, 0x0fc5019b, 0x00a30fea, 0x0fc501ae,
- 0x008c0fed, 0x0fc601c1, 0x00740ff2, 0x0fc901d1,
- 0x005e0ff5, 0x0fce01df, 0x004b0ff8, 0x0fd301ea,
- 0x00370ffc, 0x0fda01f3, 0x00260ffd, 0x0fe201fb,
- 0x00170000, 0x0fea01ff, 0x00090000, 0x0ff50202,
- 0x02000000, 0x00000000, 0x02010ff7, 0x00000008,
- 0x01ff0fee, 0x00000013, 0x01fb0fe7, 0x0ffe0020,
- 0x01f60fe0, 0x0ffc002e, 0x01ed0fda, 0x0ffa003f,
- 0x01e40fd6, 0x0ff7004f, 0x01d80fd2, 0x0ff40062,
- 0x01ca0fcf, 0x0ff00077, 0x01bb0fcd, 0x0fed008b,
- 0x01a90fcd, 0x0fe900a1, 0x01960fcd, 0x0fe600b7,
- 0x01830fcd, 0x0fe200ce, 0x016d0fcf, 0x0fde00e6,
- 0x01580fd0, 0x0fdb00fd, 0x01410fd3, 0x0fd80114,
- 0x012c0fd4, 0x0fd4012c, 0x01140fd8, 0x0fd30141,
- 0x00fd0fdb, 0x0fd00158, 0x00e60fde, 0x0fcf016d,
- 0x00ce0fe2, 0x0fcd0183, 0x00b70fe6, 0x0fcd0196,
- 0x00a10fe9, 0x0fcd01a9, 0x008b0fed, 0x0fcd01bb,
- 0x00770ff0, 0x0fcf01ca, 0x00620ff4, 0x0fd201d8,
- 0x004f0ff7, 0x0fd601e4, 0x003f0ffa, 0x0fda01ed,
- 0x002e0ffc, 0x0fe001f6, 0x00200ffe, 0x0fe701fb,
- 0x00130000, 0x0fee01ff, 0x00080000, 0x0ff70201,
- 0x02000000, 0x00000000, 0x01ff0ff7, 0x0000000a,
- 0x01f90fee, 0x00000019, 0x01f10fe7, 0x0ffe002a,
- 0x01e60fe1, 0x0ffd003c, 0x01d90fdc, 0x0ffa0051,
- 0x01cc0fd8, 0x0ff70065, 0x01bb0fd5, 0x0ff5007b,
- 0x01a80fd3, 0x0ff10094, 0x01950fd2, 0x0fef00aa,
- 0x01800fd2, 0x0feb00c3, 0x016a0fd3, 0x0fe900da,
- 0x01540fd3, 0x0fe600f3, 0x013f0fd5, 0x0fe2010a,
- 0x01280fd7, 0x0fe00121, 0x01100fda, 0x0fde0138,
- 0x00fb0fdb, 0x0fdb014f, 0x00e40fdf, 0x0fdb0162,
- 0x00ce0fe2, 0x0fd90177, 0x00b90fe4, 0x0fd8018b,
- 0x00a50fe8, 0x0fd8019b, 0x00910fec, 0x0fd801ab,
- 0x007e0fee, 0x0fd801bc, 0x006c0ff2, 0x0fd901c9,
- 0x005c0ff4, 0x0fda01d6, 0x004b0ff7, 0x0fdd01e1,
- 0x003c0ff9, 0x0fe001eb, 0x002f0ffb, 0x0fe401f2,
- 0x00230ffd, 0x0fe801f8, 0x00180ffe, 0x0fed01fd,
- 0x000e0000, 0x0ff20200, 0x00060000, 0x0ff90201,
- 0x02000000, 0x00000000, 0x02030ff9, 0x00000004,
- 0x02050ff2, 0x00000009, 0x02050fed, 0x0ffe0010,
- 0x02040fe7, 0x0ffd0018, 0x02020fe3, 0x0ffb0020,
- 0x01fe0fdf, 0x0ff9002a, 0x01fa0fdb, 0x0ff70034,
- 0x01f40fd8, 0x0ff30041, 0x01ed0fd6, 0x0ff0004d,
- 0x01e30fd5, 0x0fec005c, 0x01d80fd4, 0x0fea006a,
- 0x01cd0fd3, 0x0fe5007b, 0x01c00fd3, 0x0fe1008c,
- 0x01b10fd3, 0x0fdd009f, 0x01a10fd4, 0x0fd900b2,
- 0x01900fd4, 0x0fd400c8, 0x017b0fd7, 0x0fd100dd,
- 0x01660fd9, 0x0fcd00f4, 0x01500fda, 0x0fca010c,
- 0x01380fde, 0x0fc60124, 0x011e0fe2, 0x0fc5013b,
- 0x01040fe4, 0x0fc30155, 0x00e70fe8, 0x0fc10170,
- 0x00cc0feb, 0x0fc10188, 0x00ad0ff0, 0x0fc301a0,
- 0x00900ff4, 0x0fc701b5, 0x00750ff7, 0x0fcc01c8,
- 0x00580ffb, 0x0fd201db, 0x003e0ffd, 0x0fdb01ea,
- 0x00250000, 0x0fe501f6, 0x000f0000, 0x0ff301fe,
- 0x02000000, 0x00000000, 0x02020ff9, 0x00000005,
- 0x02020ff2, 0x0000000c, 0x02010fed, 0x0ffe0014,
- 0x01fe0fe8, 0x0ffd001d, 0x01fa0fe3, 0x0ffb0028,
- 0x01f40fe0, 0x0ff90033, 0x01ed0fdc, 0x0ff70040,
- 0x01e50fd9, 0x0ff3004f, 0x01db0fd7, 0x0ff1005d,
- 0x01ce0fd7, 0x0fed006e, 0x01c00fd6, 0x0feb007f,
- 0x01b30fd5, 0x0fe70091, 0x01a30fd6, 0x0fe300a4,
- 0x01920fd6, 0x0fe000b8, 0x017e0fd8, 0x0fdd00cd,
- 0x016c0fd8, 0x0fd800e4, 0x01560fdb, 0x0fd600f9,
- 0x01400fdd, 0x0fd20111, 0x01290fdf, 0x0fd00128,
- 0x01110fe2, 0x0fce013f, 0x00f80fe6, 0x0fcd0155,
- 0x00de0fe8, 0x0fcc016e, 0x00c40fec, 0x0fcb0185,
- 0x00ab0fef, 0x0fcb019b, 0x00900ff3, 0x0fcd01b0,
- 0x00770ff6, 0x0fd101c2, 0x005f0ff9, 0x0fd501d3,
- 0x00470ffc, 0x0fdb01e2, 0x00320ffd, 0x0fe201ef,
- 0x001e0000, 0x0fea01f8, 0x000c0000, 0x0ff501ff,
- 0x02000000, 0x00000000, 0x02010ff9, 0x00000006,
- 0x02000ff2, 0x0000000e, 0x01fd0fed, 0x0ffe0018,
- 0x01f80fe8, 0x0ffd0023, 0x01f20fe4, 0x0ffb002f,
- 0x01eb0fe0, 0x0ff9003c, 0x01e10fdd, 0x0ff7004b,
- 0x01d60fda, 0x0ff4005c, 0x01c90fd9, 0x0ff2006c,
- 0x01bc0fd8, 0x0fee007e, 0x01ab0fd8, 0x0fec0091,
- 0x019b0fd8, 0x0fe800a5, 0x018b0fd8, 0x0fe400b9,
- 0x01770fd9, 0x0fe200ce, 0x01620fdb, 0x0fdf00e4,
- 0x014f0fdb, 0x0fdb00fb, 0x01380fde, 0x0fda0110,
- 0x01210fe0, 0x0fd70128, 0x010a0fe2, 0x0fd5013f,
- 0x00f30fe6, 0x0fd30154, 0x00da0fe9, 0x0fd3016a,
- 0x00c30feb, 0x0fd20180, 0x00aa0fef, 0x0fd20195,
- 0x00940ff1, 0x0fd301a8, 0x007b0ff5, 0x0fd501bb,
- 0x00650ff7, 0x0fd801cc, 0x00510ffa, 0x0fdc01d9,
- 0x003c0ffd, 0x0fe101e6, 0x002a0ffe, 0x0fe701f1,
- 0x00190000, 0x0fee01f9, 0x000a0000, 0x0ff701ff,
- 0x02000000, 0x00000000, 0x01ff0ff9, 0x00000008,
- 0x01fb0ff2, 0x00000013, 0x01f50fed, 0x0ffe0020,
- 0x01ed0fe8, 0x0ffd002e, 0x01e30fe4, 0x0ffb003e,
- 0x01d80fe1, 0x0ff9004e, 0x01cb0fde, 0x0ff70060,
- 0x01bc0fdc, 0x0ff40074, 0x01ac0fdb, 0x0ff20087,
- 0x019a0fdb, 0x0fef009c, 0x01870fdb, 0x0fed00b1,
- 0x01740fdb, 0x0fea00c7, 0x01600fdc, 0x0fe700dd,
- 0x014b0fdd, 0x0fe500f3, 0x01350fdf, 0x0fe30109,
- 0x01200fe0, 0x0fe00120, 0x01090fe3, 0x0fdf0135,
- 0x00f30fe5, 0x0fdd014b, 0x00dd0fe7, 0x0fdc0160,
- 0x00c70fea, 0x0fdb0174, 0x00b10fed, 0x0fdb0187,
- 0x009c0fef, 0x0fdb019a, 0x00870ff2, 0x0fdb01ac,
- 0x00740ff4, 0x0fdc01bc, 0x00600ff7, 0x0fde01cb,
- 0x004e0ff9, 0x0fe101d8, 0x003e0ffb, 0x0fe401e3,
- 0x002e0ffd, 0x0fe801ed, 0x00200ffe, 0x0fed01f5,
- 0x00130000, 0x0ff201fb, 0x00080000, 0x0ff901ff,
- 0x02000000, 0x00000000, 0x02060ff2, 0x00000008,
- 0x02090fe4, 0x00000013, 0x020a0fd9, 0x0ffc0021,
- 0x02080fce, 0x0ffa0030, 0x02030fc5, 0x0ff60042,
- 0x01fd0fbe, 0x0ff10054, 0x01f50fb6, 0x0fed0068,
- 0x01e90fb1, 0x0fe60080, 0x01dc0fae, 0x0fe10095,
- 0x01ca0fae, 0x0fda00ae, 0x01b70fad, 0x0fd600c6,
- 0x01a40fad, 0x0fcf00e0, 0x018f0faf, 0x0fc800fa,
- 0x01780fb1, 0x0fc30114, 0x015f0fb5, 0x0fbf012d,
- 0x01490fb7, 0x0fb70149, 0x012d0fbf, 0x0fb5015f,
- 0x01140fc3, 0x0fb10178, 0x00fa0fc8, 0x0faf018f,
- 0x00e00fcf, 0x0fad01a4, 0x00c60fd6, 0x0fad01b7,
- 0x00ae0fda, 0x0fae01ca, 0x00950fe1, 0x0fae01dc,
- 0x00800fe6, 0x0fb101e9, 0x00680fed, 0x0fb601f5,
- 0x00540ff1, 0x0fbe01fd, 0x00420ff6, 0x0fc50203,
- 0x00300ffa, 0x0fce0208, 0x00210ffc, 0x0fd9020a,
- 0x00130000, 0x0fe40209, 0x00080000, 0x0ff20206,
- 0x02000000, 0x00000000, 0x02040ff2, 0x0000000a,
- 0x02040fe4, 0x00000018, 0x02010fda, 0x0ffc0029,
- 0x01fc0fcf, 0x0ffa003b, 0x01f30fc7, 0x0ff60050,
- 0x01e90fc0, 0x0ff20065, 0x01dc0fba, 0x0fee007c,
- 0x01cc0fb6, 0x0fe80096, 0x01ba0fb4, 0x0fe400ae,
- 0x01a70fb4, 0x0fdd00c8, 0x018f0fb5, 0x0fda00e2,
- 0x017a0fb5, 0x0fd400fd, 0x01630fb8, 0x0fce0117,
- 0x014c0fba, 0x0fca0130, 0x01320fbf, 0x0fc70148,
- 0x011b0fc1, 0x0fc10163, 0x01010fc8, 0x0fc00177,
- 0x00e90fcd, 0x0fbd018d, 0x00d10fd1, 0x0fbc01a2,
- 0x00ba0fd7, 0x0fbb01b4, 0x00a30fdd, 0x0fbc01c4,
- 0x008e0fe1, 0x0fbd01d4, 0x00790fe7, 0x0fbe01e2,
- 0x00670feb, 0x0fc001ee, 0x00540ff1, 0x0fc501f6,
- 0x00430ff4, 0x0fcb01fe, 0x00340ff8, 0x0fd10203,
- 0x00260ffb, 0x0fd80207, 0x001a0ffd, 0x0fe10208,
- 0x000f0000, 0x0fea0207, 0x00060000, 0x0ff50205,
- 0x02000000, 0x00000000, 0x02020ff2, 0x0000000c,
- 0x02000fe4, 0x0000001c, 0x01fa0fda, 0x0ffc0030,
- 0x01f10fd0, 0x0ffa0045, 0x01e50fc8, 0x0ff6005d,
- 0x01d60fc3, 0x0ff30074, 0x01c60fbd, 0x0fef008e,
- 0x01b30fba, 0x0fe900aa, 0x019e0fb9, 0x0fe500c4,
- 0x01870fba, 0x0fe000df, 0x016f0fbb, 0x0fdd00f9,
- 0x01580fbc, 0x0fd80114, 0x01400fbf, 0x0fd3012e,
- 0x01280fc2, 0x0fd00146, 0x010f0fc6, 0x0fce015d,
- 0x00f90fc9, 0x0fc90175, 0x00e00fcf, 0x0fc90188,
- 0x00ca0fd4, 0x0fc6019c, 0x00b40fd8, 0x0fc601ae,
- 0x009f0fdd, 0x0fc501bf, 0x008b0fe3, 0x0fc601cc,
- 0x00780fe6, 0x0fc701db, 0x00660feb, 0x0fc801e7,
- 0x00560fef, 0x0fcb01f0, 0x00460ff3, 0x0fcf01f8,
- 0x00380ff6, 0x0fd401fe, 0x002c0ff9, 0x0fd90202,
- 0x00200ffc, 0x0fdf0205, 0x00160ffe, 0x0fe60206,
- 0x000c0000, 0x0fed0207, 0x00050000, 0x0ff70204,
- 0x02000000, 0x00000000, 0x01fe0ff3, 0x0000000f,
- 0x01f60fe5, 0x00000025, 0x01ea0fdb, 0x0ffd003e,
- 0x01db0fd2, 0x0ffb0058, 0x01c80fcc, 0x0ff70075,
- 0x01b50fc7, 0x0ff40090, 0x01a00fc3, 0x0ff000ad,
- 0x01880fc1, 0x0feb00cc, 0x01700fc1, 0x0fe800e7,
- 0x01550fc3, 0x0fe40104, 0x013b0fc5, 0x0fe2011e,
- 0x01240fc6, 0x0fde0138, 0x010c0fca, 0x0fda0150,
- 0x00f40fcd, 0x0fd90166, 0x00dd0fd1, 0x0fd7017b,
- 0x00c80fd4, 0x0fd40190, 0x00b20fd9, 0x0fd401a1,
- 0x009f0fdd, 0x0fd301b1, 0x008c0fe1, 0x0fd301c0,
- 0x007b0fe5, 0x0fd301cd, 0x006a0fea, 0x0fd401d8,
- 0x005c0fec, 0x0fd501e3, 0x004d0ff0, 0x0fd601ed,
- 0x00410ff3, 0x0fd801f4, 0x00340ff7, 0x0fdb01fa,
- 0x002a0ff9, 0x0fdf01fe, 0x00200ffb, 0x0fe30202,
- 0x00180ffd, 0x0fe70204, 0x00100ffe, 0x0fed0205,
- 0x00090000, 0x0ff20205, 0x00040000, 0x0ff90203,
- 0x02000000, 0x00000000, 0x02050ff5, 0x00000006,
- 0x02070fea, 0x0000000f, 0x02080fe1, 0x0ffd001a,
- 0x02070fd8, 0x0ffb0026, 0x02030fd1, 0x0ff80034,
- 0x01fe0fcb, 0x0ff40043, 0x01f60fc5, 0x0ff10054,
- 0x01ee0fc0, 0x0feb0067, 0x01e20fbe, 0x0fe70079,
- 0x01d40fbd, 0x0fe1008e, 0x01c40fbc, 0x0fdd00a3,
- 0x01b40fbb, 0x0fd700ba, 0x01a20fbc, 0x0fd100d1,
- 0x018d0fbd, 0x0fcd00e9, 0x01770fc0, 0x0fc80101,
- 0x01630fc1, 0x0fc1011b, 0x01480fc7, 0x0fbf0132,
- 0x01300fca, 0x0fba014c, 0x01170fce, 0x0fb80163,
- 0x00fd0fd4, 0x0fb5017a, 0x00e20fda, 0x0fb5018f,
- 0x00c80fdd, 0x0fb401a7, 0x00ae0fe4, 0x0fb401ba,
- 0x00960fe8, 0x0fb601cc, 0x007c0fee, 0x0fba01dc,
- 0x00650ff2, 0x0fc001e9, 0x00500ff6, 0x0fc701f3,
- 0x003b0ffa, 0x0fcf01fc, 0x00290ffc, 0x0fda0201,
- 0x00180000, 0x0fe40204, 0x000a0000, 0x0ff20204,
- 0x02000000, 0x00000000, 0x02030ff5, 0x00000008,
- 0x02030fea, 0x00000013, 0x02020fe1, 0x0ffd0020,
- 0x01fc0fd9, 0x0ffc002f, 0x01f60fd2, 0x0ff80040,
- 0x01ed0fcd, 0x0ff50051, 0x01e30fc7, 0x0ff10065,
- 0x01d70fc3, 0x0fec007a, 0x01c60fc2, 0x0fe9008f,
- 0x01b60fc1, 0x0fe300a6, 0x01a20fc1, 0x0fe000bd,
- 0x018f0fc1, 0x0fdb00d5, 0x017b0fc2, 0x0fd500ee,
- 0x01640fc4, 0x0fd20106, 0x014d0fc8, 0x0fce011d,
- 0x01370fc9, 0x0fc90137, 0x011d0fce, 0x0fc8014d,
- 0x01060fd2, 0x0fc40164, 0x00ee0fd5, 0x0fc2017b,
- 0x00d50fdb, 0x0fc1018f, 0x00bd0fe0, 0x0fc101a2,
- 0x00a60fe3, 0x0fc101b6, 0x008f0fe9, 0x0fc201c6,
- 0x007a0fec, 0x0fc301d7, 0x00650ff1, 0x0fc701e3,
- 0x00510ff5, 0x0fcd01ed, 0x00400ff8, 0x0fd201f6,
- 0x002f0ffc, 0x0fd901fc, 0x00200ffd, 0x0fe10202,
- 0x00130000, 0x0fea0203, 0x00080000, 0x0ff50203,
- 0x02000000, 0x00000000, 0x02020ff5, 0x00000009,
- 0x01ff0fea, 0x00000017, 0x01fb0fe2, 0x0ffd0026,
- 0x01f30fda, 0x0ffc0037, 0x01ea0fd3, 0x0ff8004b,
- 0x01df0fce, 0x0ff5005e, 0x01d10fc9, 0x0ff20074,
- 0x01c10fc6, 0x0fed008c, 0x01ae0fc5, 0x0fea00a3,
- 0x019b0fc5, 0x0fe500bb, 0x01850fc6, 0x0fe200d3,
- 0x01700fc6, 0x0fde00ec, 0x015a0fc8, 0x0fd90105,
- 0x01430fca, 0x0fd6011d, 0x012b0fcd, 0x0fd30135,
- 0x01150fcf, 0x0fcf014d, 0x00fc0fd4, 0x0fce0162,
- 0x00e50fd8, 0x0fcc0177, 0x00cf0fdb, 0x0fca018c,
- 0x00b80fe0, 0x0fc9019f, 0x00a20fe5, 0x0fca01af,
- 0x008e0fe8, 0x0fcb01bf, 0x00790fec, 0x0fcb01d0,
- 0x00670fef, 0x0fcd01dd, 0x00550ff4, 0x0fd001e7,
- 0x00440ff7, 0x0fd501f0, 0x00350ffa, 0x0fda01f7,
- 0x00270ffc, 0x0fdf01fe, 0x001b0ffe, 0x0fe70200,
- 0x00100000, 0x0fee0202, 0x00060000, 0x0ff70203,
- 0x02000000, 0x00000000, 0x01ff0ff5, 0x0000000c,
- 0x01f80fea, 0x0000001e, 0x01ef0fe2, 0x0ffd0032,
- 0x01e20fdb, 0x0ffc0047, 0x01d30fd5, 0x0ff9005f,
- 0x01c20fd1, 0x0ff60077, 0x01b00fcd, 0x0ff30090,
- 0x019b0fcb, 0x0fef00ab, 0x01850fcb, 0x0fec00c4,
- 0x016e0fcc, 0x0fe800de, 0x01550fcd, 0x0fe600f8,
- 0x013f0fce, 0x0fe20111, 0x01280fd0, 0x0fdf0129,
- 0x01110fd2, 0x0fdd0140, 0x00f90fd6, 0x0fdb0156,
- 0x00e40fd8, 0x0fd8016c, 0x00cd0fdd, 0x0fd8017e,
- 0x00b80fe0, 0x0fd60192, 0x00a40fe3, 0x0fd601a3,
- 0x00910fe7, 0x0fd501b3, 0x007f0feb, 0x0fd601c0,
- 0x006e0fed, 0x0fd701ce, 0x005d0ff1, 0x0fd701db,
- 0x004f0ff3, 0x0fd901e5, 0x00400ff7, 0x0fdc01ed,
- 0x00330ff9, 0x0fe001f4, 0x00280ffb, 0x0fe301fa,
- 0x001d0ffd, 0x0fe801fe, 0x00140ffe, 0x0fed0201,
- 0x000c0000, 0x0ff20202, 0x00050000, 0x0ff90202,
- 0x02000000, 0x00000000, 0x02040ff7, 0x00000005,
- 0x02070fed, 0x0000000c, 0x02060fe6, 0x0ffe0016,
- 0x02050fdf, 0x0ffc0020, 0x02020fd9, 0x0ff9002c,
- 0x01fe0fd4, 0x0ff60038, 0x01f80fcf, 0x0ff30046,
- 0x01f00fcb, 0x0fef0056, 0x01e70fc8, 0x0feb0066,
- 0x01db0fc7, 0x0fe60078, 0x01cc0fc6, 0x0fe3008b,
- 0x01bf0fc5, 0x0fdd009f, 0x01ae0fc6, 0x0fd800b4,
- 0x019c0fc6, 0x0fd400ca, 0x01880fc9, 0x0fcf00e0,
- 0x01750fc9, 0x0fc900f9, 0x015d0fce, 0x0fc6010f,
- 0x01460fd0, 0x0fc20128, 0x012e0fd3, 0x0fbf0140,
- 0x01140fd8, 0x0fbc0158, 0x00f90fdd, 0x0fbb016f,
- 0x00df0fe0, 0x0fba0187, 0x00c40fe5, 0x0fb9019e,
- 0x00aa0fe9, 0x0fba01b3, 0x008e0fef, 0x0fbd01c6,
- 0x00740ff3, 0x0fc301d6, 0x005d0ff6, 0x0fc801e5,
- 0x00450ffa, 0x0fd001f1, 0x00300ffc, 0x0fda01fa,
- 0x001c0000, 0x0fe40200, 0x000c0000, 0x0ff20202,
- 0x02000000, 0x00000000, 0x02030ff7, 0x00000006,
- 0x02020fee, 0x00000010, 0x02000fe7, 0x0ffe001b,
- 0x01fe0fdf, 0x0ffc0027, 0x01f70fda, 0x0ffa0035,
- 0x01f00fd5, 0x0ff70044, 0x01e70fd0, 0x0ff40055,
- 0x01dd0fcd, 0x0fef0067, 0x01d00fcb, 0x0fec0079,
- 0x01bf0fcb, 0x0fe8008e, 0x01af0fca, 0x0fe500a2,
- 0x019f0fc9, 0x0fe000b8, 0x018c0fca, 0x0fdb00cf,
- 0x01770fcc, 0x0fd800e5, 0x01620fce, 0x0fd400fc,
- 0x014d0fcf, 0x0fcf0115, 0x01350fd3, 0x0fcd012b,
- 0x011d0fd6, 0x0fca0143, 0x01050fd9, 0x0fc8015a,
- 0x00ec0fde, 0x0fc60170, 0x00d30fe2, 0x0fc60185,
- 0x00bb0fe5, 0x0fc5019b, 0x00a30fea, 0x0fc501ae,
- 0x008c0fed, 0x0fc601c1, 0x00740ff2, 0x0fc901d1,
- 0x005e0ff5, 0x0fce01df, 0x004b0ff8, 0x0fd301ea,
- 0x00370ffc, 0x0fda01f3, 0x00260ffd, 0x0fe201fb,
- 0x00170000, 0x0fea01ff, 0x00090000, 0x0ff50202,
- 0x02000000, 0x00000000, 0x02010ff7, 0x00000008,
- 0x01ff0fee, 0x00000013, 0x01fb0fe7, 0x0ffe0020,
- 0x01f60fe0, 0x0ffc002e, 0x01ed0fda, 0x0ffa003f,
- 0x01e40fd6, 0x0ff7004f, 0x01d80fd2, 0x0ff40062,
- 0x01ca0fcf, 0x0ff00077, 0x01bb0fcd, 0x0fed008b,
- 0x01a90fcd, 0x0fe900a1, 0x01960fcd, 0x0fe600b7,
- 0x01830fcd, 0x0fe200ce, 0x016d0fcf, 0x0fde00e6,
- 0x01580fd0, 0x0fdb00fd, 0x01410fd3, 0x0fd80114,
- 0x012c0fd4, 0x0fd4012c, 0x01140fd8, 0x0fd30141,
- 0x00fd0fdb, 0x0fd00158, 0x00e60fde, 0x0fcf016d,
- 0x00ce0fe2, 0x0fcd0183, 0x00b70fe6, 0x0fcd0196,
- 0x00a10fe9, 0x0fcd01a9, 0x008b0fed, 0x0fcd01bb,
- 0x00770ff0, 0x0fcf01ca, 0x00620ff4, 0x0fd201d8,
- 0x004f0ff7, 0x0fd601e4, 0x003f0ffa, 0x0fda01ed,
- 0x002e0ffc, 0x0fe001f6, 0x00200ffe, 0x0fe701fb,
- 0x00130000, 0x0fee01ff, 0x00080000, 0x0ff70201,
- 0x02000000, 0x00000000, 0x01ff0ff7, 0x0000000a,
- 0x01f90fee, 0x00000019, 0x01f10fe7, 0x0ffe002a,
- 0x01e60fe1, 0x0ffd003c, 0x01d90fdc, 0x0ffa0051,
- 0x01cc0fd8, 0x0ff70065, 0x01bb0fd5, 0x0ff5007b,
- 0x01a80fd3, 0x0ff10094, 0x01950fd2, 0x0fef00aa,
- 0x01800fd2, 0x0feb00c3, 0x016a0fd3, 0x0fe900da,
- 0x01540fd3, 0x0fe600f3, 0x013f0fd5, 0x0fe2010a,
- 0x01280fd7, 0x0fe00121, 0x01100fda, 0x0fde0138,
- 0x00fb0fdb, 0x0fdb014f, 0x00e40fdf, 0x0fdb0162,
- 0x00ce0fe2, 0x0fd90177, 0x00b90fe4, 0x0fd8018b,
- 0x00a50fe8, 0x0fd8019b, 0x00910fec, 0x0fd801ab,
- 0x007e0fee, 0x0fd801bc, 0x006c0ff2, 0x0fd901c9,
- 0x005c0ff4, 0x0fda01d6, 0x004b0ff7, 0x0fdd01e1,
- 0x003c0ff9, 0x0fe001eb, 0x002f0ffb, 0x0fe401f2,
- 0x00230ffd, 0x0fe801f8, 0x00180ffe, 0x0fed01fd,
- 0x000e0000, 0x0ff20200, 0x00060000, 0x0ff90201,
- 0x02000000, 0x00000000, 0x02030ff9, 0x00000004,
- 0x02050ff2, 0x00000009, 0x02050fed, 0x0ffe0010,
- 0x02040fe7, 0x0ffd0018, 0x02020fe3, 0x0ffb0020,
- 0x01fe0fdf, 0x0ff9002a, 0x01fa0fdb, 0x0ff70034,
- 0x01f40fd8, 0x0ff30041, 0x01ed0fd6, 0x0ff0004d,
- 0x01e30fd5, 0x0fec005c, 0x01d80fd4, 0x0fea006a,
- 0x01cd0fd3, 0x0fe5007b, 0x01c00fd3, 0x0fe1008c,
- 0x01b10fd3, 0x0fdd009f, 0x01a10fd4, 0x0fd900b2,
- 0x01900fd4, 0x0fd400c8, 0x017b0fd7, 0x0fd100dd,
- 0x01660fd9, 0x0fcd00f4, 0x01500fda, 0x0fca010c,
- 0x01380fde, 0x0fc60124, 0x011e0fe2, 0x0fc5013b,
- 0x01040fe4, 0x0fc30155, 0x00e70fe8, 0x0fc10170,
- 0x00cc0feb, 0x0fc10188, 0x00ad0ff0, 0x0fc301a0,
- 0x00900ff4, 0x0fc701b5, 0x00750ff7, 0x0fcc01c8,
- 0x00580ffb, 0x0fd201db, 0x003e0ffd, 0x0fdb01ea,
- 0x00250000, 0x0fe501f6, 0x000f0000, 0x0ff301fe,
- 0x02000000, 0x00000000, 0x02020ff9, 0x00000005,
- 0x02020ff2, 0x0000000c, 0x02010fed, 0x0ffe0014,
- 0x01fe0fe8, 0x0ffd001d, 0x01fa0fe3, 0x0ffb0028,
- 0x01f40fe0, 0x0ff90033, 0x01ed0fdc, 0x0ff70040,
- 0x01e50fd9, 0x0ff3004f, 0x01db0fd7, 0x0ff1005d,
- 0x01ce0fd7, 0x0fed006e, 0x01c00fd6, 0x0feb007f,
- 0x01b30fd5, 0x0fe70091, 0x01a30fd6, 0x0fe300a4,
- 0x01920fd6, 0x0fe000b8, 0x017e0fd8, 0x0fdd00cd,
- 0x016c0fd8, 0x0fd800e4, 0x01560fdb, 0x0fd600f9,
- 0x01400fdd, 0x0fd20111, 0x01290fdf, 0x0fd00128,
- 0x01110fe2, 0x0fce013f, 0x00f80fe6, 0x0fcd0155,
- 0x00de0fe8, 0x0fcc016e, 0x00c40fec, 0x0fcb0185,
- 0x00ab0fef, 0x0fcb019b, 0x00900ff3, 0x0fcd01b0,
- 0x00770ff6, 0x0fd101c2, 0x005f0ff9, 0x0fd501d3,
- 0x00470ffc, 0x0fdb01e2, 0x00320ffd, 0x0fe201ef,
- 0x001e0000, 0x0fea01f8, 0x000c0000, 0x0ff501ff,
- 0x02000000, 0x00000000, 0x02010ff9, 0x00000006,
- 0x02000ff2, 0x0000000e, 0x01fd0fed, 0x0ffe0018,
- 0x01f80fe8, 0x0ffd0023, 0x01f20fe4, 0x0ffb002f,
- 0x01eb0fe0, 0x0ff9003c, 0x01e10fdd, 0x0ff7004b,
- 0x01d60fda, 0x0ff4005c, 0x01c90fd9, 0x0ff2006c,
- 0x01bc0fd8, 0x0fee007e, 0x01ab0fd8, 0x0fec0091,
- 0x019b0fd8, 0x0fe800a5, 0x018b0fd8, 0x0fe400b9,
- 0x01770fd9, 0x0fe200ce, 0x01620fdb, 0x0fdf00e4,
- 0x014f0fdb, 0x0fdb00fb, 0x01380fde, 0x0fda0110,
- 0x01210fe0, 0x0fd70128, 0x010a0fe2, 0x0fd5013f,
- 0x00f30fe6, 0x0fd30154, 0x00da0fe9, 0x0fd3016a,
- 0x00c30feb, 0x0fd20180, 0x00aa0fef, 0x0fd20195,
- 0x00940ff1, 0x0fd301a8, 0x007b0ff5, 0x0fd501bb,
- 0x00650ff7, 0x0fd801cc, 0x00510ffa, 0x0fdc01d9,
- 0x003c0ffd, 0x0fe101e6, 0x002a0ffe, 0x0fe701f1,
- 0x00190000, 0x0fee01f9, 0x000a0000, 0x0ff701ff,
- 0x02000000, 0x00000000, 0x01ff0ff9, 0x00000008,
- 0x01fb0ff2, 0x00000013, 0x01f50fed, 0x0ffe0020,
- 0x01ed0fe8, 0x0ffd002e, 0x01e30fe4, 0x0ffb003e,
- 0x01d80fe1, 0x0ff9004e, 0x01cb0fde, 0x0ff70060,
- 0x01bc0fdc, 0x0ff40074, 0x01ac0fdb, 0x0ff20087,
- 0x019a0fdb, 0x0fef009c, 0x01870fdb, 0x0fed00b1,
- 0x01740fdb, 0x0fea00c7, 0x01600fdc, 0x0fe700dd,
- 0x014b0fdd, 0x0fe500f3, 0x01350fdf, 0x0fe30109,
- 0x01200fe0, 0x0fe00120, 0x01090fe3, 0x0fdf0135,
- 0x00f30fe5, 0x0fdd014b, 0x00dd0fe7, 0x0fdc0160,
- 0x00c70fea, 0x0fdb0174, 0x00b10fed, 0x0fdb0187,
- 0x009c0fef, 0x0fdb019a, 0x00870ff2, 0x0fdb01ac,
- 0x00740ff4, 0x0fdc01bc, 0x00600ff7, 0x0fde01cb,
- 0x004e0ff9, 0x0fe101d8, 0x003e0ffb, 0x0fe401e3,
- 0x002e0ffd, 0x0fe801ed, 0x00200ffe, 0x0fed01f5,
- 0x00130000, 0x0ff201fb, 0x00080000, 0x0ff901ff
-};
-
-
-#define MDP4_QSEED_TABLE0_OFF 0x8100
-#define MDP4_QSEED_TABLE1_OFF 0x8200
-#define MDP4_QSEED_TABLE2_OFF 0x9000
-
-void mdp4_vg_qseed_init(int vp_num)
-{
- uint32 *off;
- int i, voff;
-
- voff = MDP4_VIDEO_OFF * vp_num;
- off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
- MDP4_QSEED_TABLE0_OFF);
- for (i = 0; i < (sizeof(vg_qseed_table0) / sizeof(uint32)); i++) {
- outpdw(off, vg_qseed_table0[i]);
- off++;
- }
-
- off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
- MDP4_QSEED_TABLE1_OFF);
- for (i = 0; i < (sizeof(vg_qseed_table1) / sizeof(uint32)); i++) {
- outpdw(off, vg_qseed_table1[i]);
- off++;
- }
-
- off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
- MDP4_QSEED_TABLE2_OFF);
- for (i = 0; i < (sizeof(vg_qseed_table2) / sizeof(uint32)); i++) {
- outpdw(off, vg_qseed_table2[i]);
- off++;
- }
-
-}
-
-void mdp4_mixer_blend_init(mixer_num)
-{
- unsigned char *overlay_base;
- int off;
-
- if (mixer_num) /* mixer number, /dev/fb0, /dev/fb1 */
- overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
- else
- overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
-
- /* stage 0 to stage 2 */
- off = 0;
- outpdw(overlay_base + off + 0x104, 0x010);
- outpdw(overlay_base + off + 0x108, 0xff);/* FG */
- outpdw(overlay_base + off + 0x10c, 0x00);/* BG */
-
- off += 0x20;
- outpdw(overlay_base + off + 0x104, 0x010);
- outpdw(overlay_base + off + 0x108, 0xff);/* FG */
- outpdw(overlay_base + off + 0x10c, 0x00);/* BG */
-
- off += 0x20;
- outpdw(overlay_base + off + 0x104, 0x010);
- outpdw(overlay_base + off + 0x108, 0xff);/* FG */
- outpdw(overlay_base + off + 0x10c, 0x00);/* BG */
-}
-
-
-static uint32 csc_matrix_tab[9] = {
- 0x0254, 0x0000, 0x0331,
- 0x0254, 0xff37, 0xfe60,
- 0x0254, 0x0409, 0x0000
-};
-
-static uint32 csc_pre_bv_tab[3] = {0xfff0, 0xff80, 0xff80 };
-static uint32 csc_post_bv_tab[3] = {0, 0, 0 };
-
-static uint32 csc_pre_lv_tab[6] = {0, 0xff, 0, 0xff, 0, 0xff };
-static uint32 csc_post_lv_tab[6] = {0, 0xff, 0, 0xff, 0, 0xff };
-
-#define MDP4_CSC_MV_OFF 0x4400
-#define MDP4_CSC_PRE_BV_OFF 0x4500
-#define MDP4_CSC_POST_BV_OFF 0x4580
-#define MDP4_CSC_PRE_LV_OFF 0x4600
-#define MDP4_CSC_POST_LV_OFF 0x4680
-
-void mdp4_vg_csc_mv_setup(int vp_num)
-{
- uint32 *off;
- int i, voff;
-
- voff = MDP4_VIDEO_OFF * vp_num;
- off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
- MDP4_CSC_MV_OFF);
- for (i = 0; i < 9; i++) {
- outpdw(off, csc_matrix_tab[i]);
- off++;
- }
-}
-
-void mdp4_vg_csc_pre_bv_setup(int vp_num)
-{
- uint32 *off;
- int i, voff;
-
- voff = MDP4_VIDEO_OFF * vp_num;
- off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
- MDP4_CSC_PRE_BV_OFF);
- for (i = 0; i < 3; i++) {
- outpdw(off, csc_pre_bv_tab[i]);
- off++;
- }
-}
-
-void mdp4_vg_csc_post_bv_setup(int vp_num)
-{
- uint32 *off;
- int i, voff;
-
- voff = MDP4_VIDEO_OFF * vp_num;
- off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
- MDP4_CSC_POST_BV_OFF);
- for (i = 0; i < 3; i++) {
- outpdw(off, csc_post_bv_tab[i]);
- off++;
- }
-}
-
-void mdp4_vg_csc_pre_lv_setup(int vp_num)
-{
- uint32 *off;
- int i, voff;
-
- voff = MDP4_VIDEO_OFF * vp_num;
- off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
- MDP4_CSC_PRE_LV_OFF);
-
- for (i = 0; i < 6; i++) {
- outpdw(off, csc_pre_lv_tab[i]);
- off++;
- }
-}
-
-void mdp4_vg_csc_post_lv_setup(int vp_num)
-{
- uint32 *off;
- int i, voff;
-
- voff = MDP4_VIDEO_OFF * vp_num;
- off = (uint32 *)(MDP_BASE + MDP4_VIDEO_BASE + voff +
- MDP4_CSC_POST_LV_OFF);
-
- for (i = 0; i < 6; i++) {
- outpdw(off, csc_post_lv_tab[i]);
- off++;
- }
-}
-
-char gc_lut[] = {
- 0x0, 0x1, 0x2, 0x2, 0x3, 0x4, 0x5, 0x6,
- 0x6, 0x7, 0x8, 0x9, 0xA, 0xA, 0xB, 0xC,
- 0xD, 0xD, 0xE, 0xF, 0xF, 0x10, 0x10, 0x11,
- 0x12, 0x12, 0x13, 0x13, 0x14, 0x14, 0x15, 0x15,
- 0x16, 0x16, 0x17, 0x17, 0x17, 0x18, 0x18, 0x19,
- 0x19, 0x19, 0x1A, 0x1A, 0x1B, 0x1B, 0x1B, 0x1C,
- 0x1C, 0x1D, 0x1D, 0x1D, 0x1E, 0x1E, 0x1E, 0x1F,
- 0x1F, 0x1F, 0x20, 0x20, 0x20, 0x21, 0x21, 0x21,
- 0x22, 0x22, 0x22, 0x22, 0x23, 0x23, 0x23, 0x24,
- 0x24, 0x24, 0x25, 0x25, 0x25, 0x25, 0x26, 0x26,
- 0x26, 0x26, 0x27, 0x27, 0x27, 0x28, 0x28, 0x28,
- 0x28, 0x29, 0x29, 0x29, 0x29, 0x2A, 0x2A, 0x2A,
- 0x2A, 0x2B, 0x2B, 0x2B, 0x2B, 0x2B, 0x2C, 0x2C,
- 0x2C, 0x2C, 0x2D, 0x2D, 0x2D, 0x2D, 0x2E, 0x2E,
- 0x2E, 0x2E, 0x2E, 0x2F, 0x2F, 0x2F, 0x2F, 0x30,
- 0x30, 0x30, 0x30, 0x30, 0x31, 0x31, 0x31, 0x31,
- 0x31, 0x32, 0x32, 0x32, 0x32, 0x32, 0x33, 0x33,
- 0x33, 0x33, 0x33, 0x34, 0x34, 0x34, 0x34, 0x34,
- 0x35, 0x35, 0x35, 0x35, 0x35, 0x36, 0x36, 0x36,
- 0x36, 0x36, 0x37, 0x37, 0x37, 0x37, 0x37, 0x37,
- 0x38, 0x38, 0x38, 0x38, 0x38, 0x39, 0x39, 0x39,
- 0x39, 0x39, 0x39, 0x3A, 0x3A, 0x3A, 0x3A, 0x3A,
- 0x3A, 0x3B, 0x3B, 0x3B, 0x3B, 0x3B, 0x3B, 0x3C,
- 0x3C, 0x3C, 0x3C, 0x3C, 0x3C, 0x3D, 0x3D, 0x3D,
- 0x3D, 0x3D, 0x3D, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E,
- 0x3E, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x40,
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x41, 0x41,
- 0x41, 0x41, 0x41, 0x41, 0x42, 0x42, 0x42, 0x42,
- 0x42, 0x42, 0x42, 0x43, 0x43, 0x43, 0x43, 0x43,
- 0x43, 0x43, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44,
- 0x44, 0x45, 0x45, 0x45, 0x45, 0x45, 0x45, 0x45,
- 0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x47,
- 0x47, 0x47, 0x47, 0x47, 0x47, 0x47, 0x48, 0x48,
- 0x48, 0x48, 0x48, 0x48, 0x48, 0x48, 0x49, 0x49,
- 0x49, 0x49, 0x49, 0x49, 0x49, 0x4A, 0x4A, 0x4A,
- 0x4A, 0x4A, 0x4A, 0x4A, 0x4A, 0x4B, 0x4B, 0x4B,
- 0x4B, 0x4B, 0x4B, 0x4B, 0x4B, 0x4C, 0x4C, 0x4C,
- 0x4C, 0x4C, 0x4C, 0x4C, 0x4D, 0x4D, 0x4D, 0x4D,
- 0x4D, 0x4D, 0x4D, 0x4D, 0x4E, 0x4E, 0x4E, 0x4E,
- 0x4E, 0x4E, 0x4E, 0x4E, 0x4E, 0x4F, 0x4F, 0x4F,
- 0x4F, 0x4F, 0x4F, 0x4F, 0x4F, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50, 0x50, 0x50, 0x51, 0x51, 0x51,
- 0x51, 0x51, 0x51, 0x51, 0x51, 0x51, 0x52, 0x52,
- 0x52, 0x52, 0x52, 0x52, 0x52, 0x52, 0x53, 0x53,
- 0x53, 0x53, 0x53, 0x53, 0x53, 0x53, 0x53, 0x54,
- 0x54, 0x54, 0x54, 0x54, 0x54, 0x54, 0x54, 0x54,
- 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55,
- 0x55, 0x56, 0x56, 0x56, 0x56, 0x56, 0x56, 0x56,
- 0x56, 0x56, 0x57, 0x57, 0x57, 0x57, 0x57, 0x57,
- 0x57, 0x57, 0x57, 0x58, 0x58, 0x58, 0x58, 0x58,
- 0x58, 0x58, 0x58, 0x58, 0x58, 0x59, 0x59, 0x59,
- 0x59, 0x59, 0x59, 0x59, 0x59, 0x59, 0x5A, 0x5A,
- 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A,
- 0x5B, 0x5B, 0x5B, 0x5B, 0x5B, 0x5B, 0x5B, 0x5B,
- 0x5B, 0x5B, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
- 0x5C, 0x5C, 0x5C, 0x5C, 0x5D, 0x5D, 0x5D, 0x5D,
- 0x5D, 0x5D, 0x5D, 0x5D, 0x5D, 0x5D, 0x5E, 0x5E,
- 0x5E, 0x5E, 0x5E, 0x5E, 0x5E, 0x5E, 0x5E, 0x5E,
- 0x5F, 0x5F, 0x5F, 0x5F, 0x5F, 0x5F, 0x5F, 0x5F,
- 0x5F, 0x5F, 0x60, 0x60, 0x60, 0x60, 0x60, 0x60,
- 0x60, 0x60, 0x60, 0x60, 0x60, 0x61, 0x61, 0x61,
- 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x62,
- 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62,
- 0x62, 0x62, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
- 0x63, 0x63, 0x63, 0x63, 0x63, 0x64, 0x64, 0x64,
- 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64,
- 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65,
- 0x65, 0x65, 0x65, 0x66, 0x66, 0x66, 0x66, 0x66,
- 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x67, 0x67,
- 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67,
- 0x67, 0x67, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68,
- 0x68, 0x68, 0x68, 0x68, 0x68, 0x69, 0x69, 0x69,
- 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69, 0x69,
- 0x69, 0x6A, 0x6A, 0x6A, 0x6A, 0x6A, 0x6A, 0x6A,
- 0x6A, 0x6A, 0x6A, 0x6A, 0x6A, 0x6B, 0x6B, 0x6B,
- 0x6B, 0x6B, 0x6B, 0x6B, 0x6B, 0x6B, 0x6B, 0x6B,
- 0x6B, 0x6C, 0x6C, 0x6C, 0x6C, 0x6C, 0x6C, 0x6C,
- 0x6C, 0x6C, 0x6C, 0x6C, 0x6C, 0x6D, 0x6D, 0x6D,
- 0x6D, 0x6D, 0x6D, 0x6D, 0x6D, 0x6D, 0x6D, 0x6D,
- 0x6D, 0x6E, 0x6E, 0x6E, 0x6E, 0x6E, 0x6E, 0x6E,
- 0x6E, 0x6E, 0x6E, 0x6E, 0x6E, 0x6F, 0x6F, 0x6F,
- 0x6F, 0x6F, 0x6F, 0x6F, 0x6F, 0x6F, 0x6F, 0x6F,
- 0x6F, 0x6F, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70,
- 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x71, 0x71,
- 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71, 0x71,
- 0x71, 0x71, 0x71, 0x72, 0x72, 0x72, 0x72, 0x72,
- 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
- 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73, 0x73,
- 0x73, 0x73, 0x73, 0x73, 0x73, 0x74, 0x74, 0x74,
- 0x74, 0x74, 0x74, 0x74, 0x74, 0x74, 0x74, 0x74,
- 0x74, 0x74, 0x75, 0x75, 0x75, 0x75, 0x75, 0x75,
- 0x75, 0x75, 0x75, 0x75, 0x75, 0x75, 0x75, 0x75,
- 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76,
- 0x76, 0x76, 0x76, 0x76, 0x76, 0x77, 0x77, 0x77,
- 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77,
- 0x77, 0x77, 0x77, 0x78, 0x78, 0x78, 0x78, 0x78,
- 0x78, 0x78, 0x78, 0x78, 0x78, 0x78, 0x78, 0x78,
- 0x78, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
- 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x7A, 0x7A,
- 0x7A, 0x7A, 0x7A, 0x7A, 0x7A, 0x7A, 0x7A, 0x7A,
- 0x7A, 0x7A, 0x7A, 0x7A, 0x7A, 0x7B, 0x7B, 0x7B,
- 0x7B, 0x7B, 0x7B, 0x7B, 0x7B, 0x7B, 0x7B, 0x7B,
- 0x7B, 0x7B, 0x7B, 0x7C, 0x7C, 0x7C, 0x7C, 0x7C,
- 0x7C, 0x7C, 0x7C, 0x7C, 0x7C, 0x7C, 0x7C, 0x7C,
- 0x7C, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D,
- 0x7D, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D, 0x7D,
- 0x7E, 0x7E, 0x7E, 0x7E, 0x7E, 0x7E, 0x7E, 0x7E,
- 0x7E, 0x7E, 0x7E, 0x7E, 0x7E, 0x7E, 0x7F, 0x7F,
- 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F,
- 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x80, 0x80, 0x80,
- 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
- 0x80, 0x80, 0x80, 0x80, 0x81, 0x81, 0x81, 0x81,
- 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81,
- 0x81, 0x81, 0x81, 0x82, 0x82, 0x82, 0x82, 0x82,
- 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82, 0x82,
- 0x82, 0x82, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83,
- 0x83, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83, 0x83,
- 0x83, 0x83, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84,
- 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84,
- 0x84, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85,
- 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85, 0x85,
- 0x85, 0x86, 0x86, 0x86, 0x86, 0x86, 0x86, 0x86,
- 0x86, 0x86, 0x86, 0x86, 0x86, 0x86, 0x86, 0x86,
- 0x86, 0x87, 0x87, 0x87, 0x87, 0x87, 0x87, 0x87,
- 0x87, 0x87, 0x87, 0x87, 0x87, 0x87, 0x87, 0x87,
- 0x87, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
- 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
- 0x88, 0x89, 0x89, 0x89, 0x89, 0x89, 0x89, 0x89,
- 0x89, 0x89, 0x89, 0x89, 0x89, 0x89, 0x89, 0x89,
- 0x89, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A,
- 0x8A, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A, 0x8A,
- 0x8A, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B,
- 0x8B, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B, 0x8B,
- 0x8B, 0x8B, 0x8C, 0x8C, 0x8C, 0x8C, 0x8C, 0x8C,
- 0x8C, 0x8C, 0x8C, 0x8C, 0x8C, 0x8C, 0x8C, 0x8C,
- 0x8C, 0x8C, 0x8C, 0x8D, 0x8D, 0x8D, 0x8D, 0x8D,
- 0x8D, 0x8D, 0x8D, 0x8D, 0x8D, 0x8D, 0x8D, 0x8D,
- 0x8D, 0x8D, 0x8D, 0x8D, 0x8E, 0x8E, 0x8E, 0x8E,
- 0x8E, 0x8E, 0x8E, 0x8E, 0x8E, 0x8E, 0x8E, 0x8E,
- 0x8E, 0x8E, 0x8E, 0x8E, 0x8E, 0x8F, 0x8F, 0x8F,
- 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F,
- 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x8F, 0x90, 0x90,
- 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
- 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x91,
- 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91,
- 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91,
- 0x91, 0x92, 0x92, 0x92, 0x92, 0x92, 0x92, 0x92,
- 0x92, 0x92, 0x92, 0x92, 0x92, 0x92, 0x92, 0x92,
- 0x92, 0x92, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93,
- 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93, 0x93,
- 0x93, 0x93, 0x93, 0x93, 0x94, 0x94, 0x94, 0x94,
- 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x94,
- 0x94, 0x94, 0x94, 0x94, 0x94, 0x94, 0x95, 0x95,
- 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95,
- 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95, 0x95,
- 0x96, 0x96, 0x96, 0x96, 0x96, 0x96, 0x96, 0x96,
- 0x96, 0x96, 0x96, 0x96, 0x96, 0x96, 0x96, 0x96,
- 0x96, 0x96, 0x96, 0x97, 0x97, 0x97, 0x97, 0x97,
- 0x97, 0x97, 0x97, 0x97, 0x97, 0x97, 0x97, 0x97,
- 0x97, 0x97, 0x97, 0x97, 0x97, 0x98, 0x98, 0x98,
- 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98,
- 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98,
- 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
- 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
- 0x99, 0x99, 0x9A, 0x9A, 0x9A, 0x9A, 0x9A, 0x9A,
- 0x9A, 0x9A, 0x9A, 0x9A, 0x9A, 0x9A, 0x9A, 0x9A,
- 0x9A, 0x9A, 0x9A, 0x9A, 0x9A, 0x9B, 0x9B, 0x9B,
- 0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B,
- 0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B, 0x9B,
- 0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C,
- 0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C, 0x9C,
- 0x9C, 0x9C, 0x9C, 0x9C, 0x9D, 0x9D, 0x9D, 0x9D,
- 0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9D,
- 0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9D, 0x9E,
- 0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E,
- 0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E, 0x9E,
- 0x9E, 0x9E, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F,
- 0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F,
- 0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0x9F, 0xA0, 0xA0,
- 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0,
- 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0, 0xA0,
- 0xA0, 0xA0, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1,
- 0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1,
- 0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA1, 0xA2, 0xA2,
- 0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2,
- 0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2, 0xA2,
- 0xA2, 0xA2, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3,
- 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3,
- 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA3, 0xA4, 0xA4,
- 0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4,
- 0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4, 0xA4,
- 0xA4, 0xA4, 0xA4, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5,
- 0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5,
- 0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5, 0xA5,
- 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6,
- 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6,
- 0xA6, 0xA6, 0xA6, 0xA6, 0xA7, 0xA7, 0xA7, 0xA7,
- 0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7,
- 0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7, 0xA7,
- 0xA7, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8,
- 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8,
- 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA8, 0xA9,
- 0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9,
- 0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9, 0xA9,
- 0xA9, 0xA9, 0xA9, 0xA9, 0xAA, 0xAA, 0xAA, 0xAA,
- 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
- 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
- 0xAA, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB,
- 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB,
- 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAB, 0xAC,
- 0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC,
- 0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAC,
- 0xAC, 0xAC, 0xAC, 0xAC, 0xAC, 0xAD, 0xAD, 0xAD,
- 0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD,
- 0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD, 0xAD,
- 0xAD, 0xAD, 0xAD, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE,
- 0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE,
- 0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE, 0xAE,
- 0xAE, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF,
- 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF,
- 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xAF, 0xB0,
- 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0,
- 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0,
- 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB0, 0xB1, 0xB1,
- 0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1,
- 0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1, 0xB1,
- 0xB1, 0xB1, 0xB1, 0xB1, 0xB2, 0xB2, 0xB2, 0xB2,
- 0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2,
- 0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2, 0xB2,
- 0xB2, 0xB2, 0xB2, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3,
- 0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3,
- 0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3, 0xB3,
- 0xB3, 0xB3, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4,
- 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4,
- 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4, 0xB4,
- 0xB4, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5,
- 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5,
- 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5, 0xB5,
- 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6,
- 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6,
- 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6, 0xB6,
- 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7,
- 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7,
- 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB7, 0xB8,
- 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8,
- 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8,
- 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB8, 0xB9,
- 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9,
- 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9,
- 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xB9, 0xBA,
- 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA,
- 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA,
- 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBA, 0xBB,
- 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB,
- 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB,
- 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB,
- 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC,
- 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC,
- 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC, 0xBC,
- 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD,
- 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD,
- 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD, 0xBD,
- 0xBD, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE,
- 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE,
- 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE, 0xBE,
- 0xBE, 0xBE, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF,
- 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF,
- 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF, 0xBF,
- 0xBF, 0xBF, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0,
- 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0,
- 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0, 0xC0,
- 0xC0, 0xC0, 0xC0, 0xC0, 0xC1, 0xC1, 0xC1, 0xC1,
- 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1,
- 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC1,
- 0xC1, 0xC1, 0xC1, 0xC1, 0xC1, 0xC2, 0xC2, 0xC2,
- 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2,
- 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2,
- 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC2, 0xC3, 0xC3,
- 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3,
- 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3,
- 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3, 0xC3,
- 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4,
- 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4,
- 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4, 0xC4,
- 0xC4, 0xC4, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5,
- 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5,
- 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5, 0xC5,
- 0xC5, 0xC5, 0xC5, 0xC5, 0xC6, 0xC6, 0xC6, 0xC6,
- 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6,
- 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6,
- 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC6, 0xC7, 0xC7,
- 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7,
- 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7,
- 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7, 0xC7,
- 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
- 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
- 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
- 0xC8, 0xC8, 0xC8, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9,
- 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9,
- 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9,
- 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xC9, 0xCA, 0xCA,
- 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA,
- 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA,
- 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA, 0xCA,
- 0xCA, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB,
- 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB,
- 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB, 0xCB,
- 0xCB, 0xCB, 0xCB, 0xCB, 0xCC, 0xCC, 0xCC, 0xCC,
- 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC,
- 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC,
- 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCC, 0xCD,
- 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD,
- 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD,
- 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD, 0xCD,
- 0xCD, 0xCD, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE,
- 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE,
- 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE,
- 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCE, 0xCF, 0xCF,
- 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF,
- 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF,
- 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF, 0xCF,
- 0xCF, 0xCF, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0,
- 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0,
- 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD0,
- 0xD0, 0xD0, 0xD0, 0xD0, 0xD0, 0xD1, 0xD1, 0xD1,
- 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1,
- 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1,
- 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1, 0xD1,
- 0xD1, 0xD1, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2,
- 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2,
- 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2,
- 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD2, 0xD3, 0xD3,
- 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3,
- 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3,
- 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3, 0xD3,
- 0xD3, 0xD3, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4,
- 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4,
- 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4,
- 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD4, 0xD5,
- 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5,
- 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5,
- 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5, 0xD5,
- 0xD5, 0xD5, 0xD5, 0xD5, 0xD6, 0xD6, 0xD6, 0xD6,
- 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6,
- 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6,
- 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6, 0xD6,
- 0xD6, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7,
- 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7,
- 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7,
- 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD7, 0xD8, 0xD8,
- 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8,
- 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8,
- 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8, 0xD8,
- 0xD8, 0xD8, 0xD8, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9,
- 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9,
- 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9,
- 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9, 0xD9,
- 0xD9, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA,
- 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA,
- 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA,
- 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDB, 0xDB,
- 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB,
- 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB,
- 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB, 0xDB,
- 0xDB, 0xDB, 0xDB, 0xDB, 0xDC, 0xDC, 0xDC, 0xDC,
- 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC,
- 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC,
- 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC, 0xDC,
- 0xDC, 0xDC, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD,
- 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD,
- 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD,
- 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD, 0xDD,
- 0xDD, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE,
- 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE,
- 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE,
- 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDE, 0xDF,
- 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF,
- 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF,
- 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF,
- 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xDF, 0xE0, 0xE0,
- 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0,
- 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0,
- 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0, 0xE0,
- 0xE0, 0xE0, 0xE0, 0xE0, 0xE1, 0xE1, 0xE1, 0xE1,
- 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1,
- 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1,
- 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1, 0xE1,
- 0xE1, 0xE1, 0xE1, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2,
- 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2,
- 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2,
- 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2, 0xE2,
- 0xE2, 0xE2, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3,
- 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3,
- 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3,
- 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3, 0xE3,
- 0xE3, 0xE3, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4,
- 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4,
- 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4,
- 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4, 0xE4,
- 0xE4, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5,
- 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5,
- 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5,
- 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5, 0xE5,
- 0xE5, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6,
- 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6,
- 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6,
- 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6, 0xE6,
- 0xE6, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
- 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
- 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
- 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7, 0xE7,
- 0xE7, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8,
- 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8,
- 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8,
- 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8, 0xE8,
- 0xE8, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9,
- 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9,
- 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9,
- 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9, 0xE9,
- 0xE9, 0xE9, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA,
- 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA,
- 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA,
- 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA, 0xEA,
- 0xEA, 0xEA, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB,
- 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB,
- 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB,
- 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB, 0xEB,
- 0xEB, 0xEB, 0xEB, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC,
- 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC,
- 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC,
- 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC, 0xEC,
- 0xEC, 0xEC, 0xEC, 0xEC, 0xED, 0xED, 0xED, 0xED,
- 0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED,
- 0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED,
- 0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED, 0xED,
- 0xED, 0xED, 0xED, 0xED, 0xED, 0xEE, 0xEE, 0xEE,
- 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
- 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
- 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
- 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEF, 0xEF,
- 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF,
- 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF,
- 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF,
- 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF, 0xEF,
- 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0,
- 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0,
- 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0,
- 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0, 0xF0,
- 0xF0, 0xF0, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1,
- 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1,
- 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1,
- 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1, 0xF1,
- 0xF1, 0xF1, 0xF1, 0xF1, 0xF2, 0xF2, 0xF2, 0xF2,
- 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2,
- 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2,
- 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2,
- 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF2, 0xF3, 0xF3,
- 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3,
- 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3,
- 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3,
- 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3, 0xF3,
- 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4,
- 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4,
- 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4,
- 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4, 0xF4,
- 0xF4, 0xF4, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5,
- 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5,
- 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5,
- 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF5,
- 0xF5, 0xF5, 0xF5, 0xF5, 0xF5, 0xF6, 0xF6, 0xF6,
- 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6,
- 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6,
- 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6,
- 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6, 0xF6,
- 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7,
- 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7,
- 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7,
- 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7, 0xF7,
- 0xF7, 0xF7, 0xF7, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8,
- 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8,
- 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8,
- 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8,
- 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF8, 0xF9, 0xF9,
- 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9,
- 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9,
- 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9,
- 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9, 0xF9,
- 0xF9, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA,
- 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA,
- 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA,
- 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFA,
- 0xFA, 0xFA, 0xFA, 0xFA, 0xFA, 0xFB, 0xFB, 0xFB,
- 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB,
- 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB,
- 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB,
- 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB, 0xFB,
- 0xFB, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC,
- 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC,
- 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC,
- 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFC,
- 0xFC, 0xFC, 0xFC, 0xFC, 0xFC, 0xFD, 0xFD, 0xFD,
- 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD,
- 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD,
- 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD,
- 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD, 0xFD,
- 0xFD, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE,
- 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE,
- 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE,
- 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE,
- 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
-};
-
-void mdp4_mixer_gc_lut_setup(int mixer_num)
-{
- unsigned char *base;
- uint32 data;
- char val;
- int i, off;
-
- if (mixer_num) /* mixer number, /dev/fb0, /dev/fb1 */
- base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
- else
- base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
-
- base += 0x4000; /* GC_LUT offset */
-
- off = 0;
- for (i = 0; i < 4096; i++) {
- val = gc_lut[i];
- data = (val << 16 | val << 8 | val); /* R, B, and G are same */
- outpdw(base + off, data);
- off += 4;
- }
-}
-
-uint32 igc_video_lut[] = { /* non linear */
- 0x0, 0x1, 0x2, 0x4, 0x5, 0x6, 0x7, 0x9,
- 0xA, 0xB, 0xC, 0xE, 0xF, 0x10, 0x12, 0x14,
- 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F, 0x21, 0x23,
- 0x25, 0x28, 0x2A, 0x2D, 0x30, 0x32, 0x35, 0x38,
- 0x3B, 0x3E, 0x42, 0x45, 0x48, 0x4C, 0x4F, 0x53,
- 0x57, 0x5B, 0x5F, 0x63, 0x67, 0x6B, 0x70, 0x74,
- 0x79, 0x7E, 0x83, 0x88, 0x8D, 0x92, 0x97, 0x9C,
- 0xA2, 0xA8, 0xAD, 0xB3, 0xB9, 0xBF, 0xC5, 0xCC,
- 0xD2, 0xD8, 0xDF, 0xE6, 0xED, 0xF4, 0xFB, 0x102,
- 0x109, 0x111, 0x118, 0x120, 0x128, 0x130, 0x138, 0x140,
- 0x149, 0x151, 0x15A, 0x162, 0x16B, 0x174, 0x17D, 0x186,
- 0x190, 0x199, 0x1A3, 0x1AC, 0x1B6, 0x1C0, 0x1CA, 0x1D5,
- 0x1DF, 0x1EA, 0x1F4, 0x1FF, 0x20A, 0x215, 0x220, 0x22B,
- 0x237, 0x242, 0x24E, 0x25A, 0x266, 0x272, 0x27F, 0x28B,
- 0x298, 0x2A4, 0x2B1, 0x2BE, 0x2CB, 0x2D8, 0x2E6, 0x2F3,
- 0x301, 0x30F, 0x31D, 0x32B, 0x339, 0x348, 0x356, 0x365,
- 0x374, 0x383, 0x392, 0x3A1, 0x3B1, 0x3C0, 0x3D0, 0x3E0,
- 0x3F0, 0x400, 0x411, 0x421, 0x432, 0x443, 0x454, 0x465,
- 0x476, 0x487, 0x499, 0x4AB, 0x4BD, 0x4CF, 0x4E1, 0x4F3,
- 0x506, 0x518, 0x52B, 0x53E, 0x551, 0x565, 0x578, 0x58C,
- 0x5A0, 0x5B3, 0x5C8, 0x5DC, 0x5F0, 0x605, 0x61A, 0x62E,
- 0x643, 0x659, 0x66E, 0x684, 0x699, 0x6AF, 0x6C5, 0x6DB,
- 0x6F2, 0x708, 0x71F, 0x736, 0x74D, 0x764, 0x77C, 0x793,
- 0x7AB, 0x7C3, 0x7DB, 0x7F3, 0x80B, 0x824, 0x83D, 0x855,
- 0x86F, 0x888, 0x8A1, 0x8BB, 0x8D4, 0x8EE, 0x908, 0x923,
- 0x93D, 0x958, 0x973, 0x98E, 0x9A9, 0x9C4, 0x9DF, 0x9FB,
- 0xA17, 0xA33, 0xA4F, 0xA6C, 0xA88, 0xAA5, 0xAC2, 0xADF,
- 0xAFC, 0xB19, 0xB37, 0xB55, 0xB73, 0xB91, 0xBAF, 0xBCE,
- 0xBEC, 0xC0B, 0xC2A, 0xC4A, 0xC69, 0xC89, 0xCA8, 0xCC8,
- 0xCE8, 0xD09, 0xD29, 0xD4A, 0xD6B, 0xD8C, 0xDAD, 0xDCF,
- 0xDF0, 0xE12, 0xE34, 0xE56, 0xE79, 0xE9B, 0xEBE, 0xEE1,
- 0xF04, 0xF27, 0xF4B, 0xF6E, 0xF92, 0xFB6, 0xFDB, 0xFFF,
-};
-
-void mdp4_vg_igc_lut_setup(int vp_num)
-{
- unsigned char *base;
- int i, voff, off;
- uint32 data, val;
-
- voff = MDP4_VIDEO_OFF * vp_num;
- base = MDP_BASE + MDP4_VIDEO_BASE + voff + 0x5000;
-
- off = 0;
- for (i = 0; i < 256; i++) {
- val = igc_video_lut[i];
- data = (val << 16 | val); /* color 0 and 1 */
- outpdw(base + off, data);
- outpdw(base + off + 0x800, val); /* color 2 */
- off += 4;
- }
-}
-
-uint32 igc_rgb_lut[] = { /* linear */
- 0x0, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70,
- 0x80, 0x91, 0xA1, 0xB1, 0xC1, 0xD1, 0xE1, 0xF1,
- 0x101, 0x111, 0x121, 0x131, 0x141, 0x151, 0x161, 0x171,
- 0x181, 0x191, 0x1A2, 0x1B2, 0x1C2, 0x1D2, 0x1E2, 0x1F2,
- 0x202, 0x212, 0x222, 0x232, 0x242, 0x252, 0x262, 0x272,
- 0x282, 0x292, 0x2A2, 0x2B3, 0x2C3, 0x2D3, 0x2E3, 0x2F3,
- 0x303, 0x313, 0x323, 0x333, 0x343, 0x353, 0x363, 0x373,
- 0x383, 0x393, 0x3A3, 0x3B3, 0x3C4, 0x3D4, 0x3E4, 0x3F4,
- 0x404, 0x414, 0x424, 0x434, 0x444, 0x454, 0x464, 0x474,
- 0x484, 0x494, 0x4A4, 0x4B4, 0x4C4, 0x4D5, 0x4E5, 0x4F5,
- 0x505, 0x515, 0x525, 0x535, 0x545, 0x555, 0x565, 0x575,
- 0x585, 0x595, 0x5A5, 0x5B5, 0x5C5, 0x5D5, 0x5E6, 0x5F6,
- 0x606, 0x616, 0x626, 0x636, 0x646, 0x656, 0x666, 0x676,
- 0x686, 0x696, 0x6A6, 0x6B6, 0x6C6, 0x6D6, 0x6E6, 0x6F7,
- 0x707, 0x717, 0x727, 0x737, 0x747, 0x757, 0x767, 0x777,
- 0x787, 0x797, 0x7A7, 0x7B7, 0x7C7, 0x7D7, 0x7E7, 0x7F7,
- 0x808, 0x818, 0x828, 0x838, 0x848, 0x858, 0x868, 0x878,
- 0x888, 0x898, 0x8A8, 0x8B8, 0x8C8, 0x8D8, 0x8E8, 0x8F8,
- 0x908, 0x919, 0x929, 0x939, 0x949, 0x959, 0x969, 0x979,
- 0x989, 0x999, 0x9A9, 0x9B9, 0x9C9, 0x9D9, 0x9E9, 0x9F9,
- 0xA09, 0xA19, 0xA2A, 0xA3A, 0xA4A, 0xA5A, 0xA6A, 0xA7A,
- 0xA8A, 0xA9A, 0xAAA, 0xABA, 0xACA, 0xADA, 0xAEA, 0xAFA,
- 0xB0A, 0xB1A, 0xB2A, 0xB3B, 0xB4B, 0xB5B, 0xB6B, 0xB7B,
- 0xB8B, 0xB9B, 0xBAB, 0xBBB, 0xBCB, 0xBDB, 0xBEB, 0xBFB,
- 0xC0B, 0xC1B, 0xC2B, 0xC3B, 0xC4C, 0xC5C, 0xC6C, 0xC7C,
- 0xC8C, 0xC9C, 0xCAC, 0xCBC, 0xCCC, 0xCDC, 0xCEC, 0xCFC,
- 0xD0C, 0xD1C, 0xD2C, 0xD3C, 0xD4C, 0xD5D, 0xD6D, 0xD7D,
- 0xD8D, 0xD9D, 0xDAD, 0xDBD, 0xDCD, 0xDDD, 0xDED, 0xDFD,
- 0xE0D, 0xE1D, 0xE2D, 0xE3D, 0xE4D, 0xE5D, 0xE6E, 0xE7E,
- 0xE8E, 0xE9E, 0xEAE, 0xEBE, 0xECE, 0xEDE, 0xEEE, 0xEFE,
- 0xF0E, 0xF1E, 0xF2E, 0xF3E, 0xF4E, 0xF5E, 0xF6E, 0xF7F,
- 0xF8F, 0xF9F, 0xFAF, 0xFBF, 0xFCF, 0xFDF, 0xFEF, 0xFFF,
-};
-
-void mdp4_rgb_igc_lut_setup(int num)
-{
- unsigned char *base;
- int i, voff, off;
- uint32 data, val;
-
- voff = MDP4_RGB_OFF * num;
- base = MDP_BASE + MDP4_RGB_BASE + voff + 0x5000;
-
- off = 0;
- for (i = 0; i < 256; i++) {
- val = igc_rgb_lut[i];
- data = (val << 16 | val); /* color 0 and 1 */
- outpdw(base + off, data);
- outpdw(base + off + 0x800, val); /* color 2 */
- off += 4;
- }
-}
diff --git a/drivers/staging/msm/mdp_cursor.c b/drivers/staging/msm/mdp_cursor.c
deleted file mode 100644
index 7d28f30d931..00000000000
--- a/drivers/staging/msm/mdp_cursor.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-
-#include <mach/hardware.h>
-#include <asm/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/spinlock.h>
-
-#include <linux/fb.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-
-static int cursor_enabled;
-
-int mdp_hw_cursor_update(struct fb_info *info, struct fb_cursor *cursor)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- struct fb_image *img = &cursor->image;
- int calpha_en, transp_en;
- int alpha;
- int ret = 0;
-
- if ((img->width > MDP_CURSOR_WIDTH) ||
- (img->height > MDP_CURSOR_HEIGHT) ||
- (img->depth != 32))
- return -EINVAL;
-
- if (cursor->set & FB_CUR_SETPOS)
- MDP_OUTP(MDP_BASE + 0x9004c, (img->dy << 16) | img->dx);
-
- if (cursor->set & FB_CUR_SETIMAGE) {
- ret = copy_from_user(mfd->cursor_buf, img->data,
- img->width*img->height*4);
- if (ret)
- return ret;
-
- if (img->bg_color == 0xffffffff)
- transp_en = 0;
- else
- transp_en = 1;
-
- alpha = (img->fg_color & 0xff000000) >> 24;
-
- if (alpha)
- calpha_en = 0x2; /* xrgb */
- else
- calpha_en = 0x1; /* argb */
-
- MDP_OUTP(MDP_BASE + 0x90044, (img->height << 16) | img->width);
- MDP_OUTP(MDP_BASE + 0x90048, mfd->cursor_buf_phys);
- /* order the writes the cursor_buf before updating the
- * hardware */
-// dma_coherent_pre_ops();
- MDP_OUTP(MDP_BASE + 0x90060,
- (transp_en << 3) | (calpha_en << 1) |
- (inp32(MDP_BASE + 0x90060) & 0x1));
-#ifdef CONFIG_FB_MSM_MDP40
- MDP_OUTP(MDP_BASE + 0x90064, (alpha << 24));
- MDP_OUTP(MDP_BASE + 0x90068, (0xffffff & img->bg_color));
- MDP_OUTP(MDP_BASE + 0x9006C, (0xffffff & img->bg_color));
-#else
- MDP_OUTP(MDP_BASE + 0x90064,
- (alpha << 24) | (0xffffff & img->bg_color));
- MDP_OUTP(MDP_BASE + 0x90068, 0);
-#endif
- }
-
- if ((cursor->enable) && (!cursor_enabled)) {
- cursor_enabled = 1;
- MDP_OUTP(MDP_BASE + 0x90060, inp32(MDP_BASE + 0x90060) | 0x1);
- } else if ((!cursor->enable) && (cursor_enabled)) {
- cursor_enabled = 0;
- MDP_OUTP(MDP_BASE + 0x90060,
- inp32(MDP_BASE + 0x90060) & (~0x1));
- }
-
- return 0;
-}
diff --git a/drivers/staging/msm/mdp_dma.c b/drivers/staging/msm/mdp_dma.c
deleted file mode 100644
index 639918b143b..00000000000
--- a/drivers/staging/msm/mdp_dma.c
+++ /dev/null
@@ -1,561 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/spinlock.h>
-
-#include <linux/fb.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-#include "mddihost.h"
-
-static uint32 mdp_last_dma2_update_width;
-static uint32 mdp_last_dma2_update_height;
-static uint32 mdp_curr_dma2_update_width;
-static uint32 mdp_curr_dma2_update_height;
-
-ktime_t mdp_dma2_last_update_time = { 0 };
-
-int mdp_lcd_rd_cnt_offset_slow = 20;
-int mdp_lcd_rd_cnt_offset_fast = 20;
-int mdp_vsync_usec_wait_line_too_short = 5;
-uint32 mdp_dma2_update_time_in_usec;
-uint32 mdp_total_vdopkts;
-
-extern u32 msm_fb_debug_enabled;
-extern struct workqueue_struct *mdp_dma_wq;
-
-int vsync_start_y_adjust = 4;
-
-static void mdp_dma2_update_lcd(struct msm_fb_data_type *mfd)
-{
- MDPIBUF *iBuf = &mfd->ibuf;
- int mddi_dest = FALSE;
- uint32 outBpp = iBuf->bpp;
- uint32 dma2_cfg_reg;
- uint8 *src;
- uint32 mddi_ld_param;
- uint16 mddi_vdo_packet_reg;
- struct msm_fb_panel_data *pdata =
- (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
- uint32 ystride = mfd->fbi->fix.line_length;
-
- dma2_cfg_reg = DMA_PACK_TIGHT | DMA_PACK_ALIGN_LSB |
- DMA_OUT_SEL_AHB | DMA_IBUF_NONCONTIGUOUS;
-
-#ifdef CONFIG_FB_MSM_MDP30
- /*
- * Software workaround: On 7x25/7x27, the MDP will not
- * respond if dma_w is 1 pixel. Set the update width to
- * 2 pixels and adjust the x offset if needed.
- */
- if (iBuf->dma_w == 1) {
- iBuf->dma_w = 2;
- if (iBuf->dma_x == (iBuf->ibuf_width - 2))
- iBuf->dma_x--;
- }
-#endif
-
- if (mfd->fb_imgType == MDP_BGR_565)
- dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
- else
- dma2_cfg_reg |= DMA_PACK_PATTERN_RGB;
-
- if (outBpp == 4)
- dma2_cfg_reg |= DMA_IBUF_C3ALPHA_EN;
-
- if (outBpp == 2)
- dma2_cfg_reg |= DMA_IBUF_FORMAT_RGB565;
-
- mddi_ld_param = 0;
- mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
-
- if ((mfd->panel_info.type == MDDI_PANEL) ||
- (mfd->panel_info.type == EXT_MDDI_PANEL)) {
- dma2_cfg_reg |= DMA_OUT_SEL_MDDI;
- mddi_dest = TRUE;
-
- if (mfd->panel_info.type == MDDI_PANEL) {
- mdp_total_vdopkts++;
- if (mfd->panel_info.pdest == DISPLAY_1) {
- dma2_cfg_reg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
- mddi_ld_param = 0;
-#ifdef MDDI_HOST_WINDOW_WORKAROUND
- mddi_window_adjust(mfd, iBuf->dma_x,
- iBuf->dma_w - 1, iBuf->dma_y,
- iBuf->dma_h - 1);
-#endif
- } else {
- dma2_cfg_reg |=
- DMA_MDDI_DMAOUT_LCD_SEL_SECONDARY;
- mddi_ld_param = 1;
-#ifdef MDDI_HOST_WINDOW_WORKAROUND
- mddi_window_adjust(mfd, iBuf->dma_x,
- iBuf->dma_w - 1, iBuf->dma_y,
- iBuf->dma_h - 1);
-#endif
- }
- } else {
- dma2_cfg_reg |= DMA_MDDI_DMAOUT_LCD_SEL_EXTERNAL;
- mddi_ld_param = 2;
- }
- } else {
- if (mfd->panel_info.pdest == DISPLAY_1) {
- dma2_cfg_reg |= DMA_AHBM_LCD_SEL_PRIMARY;
- outp32(MDP_EBI2_LCD0, mfd->data_port_phys);
- } else {
- dma2_cfg_reg |= DMA_AHBM_LCD_SEL_SECONDARY;
- outp32(MDP_EBI2_LCD1, mfd->data_port_phys);
- }
- }
-
- dma2_cfg_reg |= DMA_DITHER_EN;
-
- src = (uint8 *) iBuf->buf;
- /* starting input address */
- src += iBuf->dma_x * outBpp + iBuf->dma_y * ystride;
-
- mdp_curr_dma2_update_width = iBuf->dma_w;
- mdp_curr_dma2_update_height = iBuf->dma_h;
-
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
-#ifdef CONFIG_FB_MSM_MDP22
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0184,
- (iBuf->dma_h << 16 | iBuf->dma_w));
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0188, src);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x018C, ystride);
-#else
- MDP_OUTP(MDP_BASE + 0x90004, (iBuf->dma_h << 16 | iBuf->dma_w));
- MDP_OUTP(MDP_BASE + 0x90008, src);
- MDP_OUTP(MDP_BASE + 0x9000c, ystride);
-#endif
-
- if (mfd->panel_info.bpp == 18) {
- dma2_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */
- DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
- } else {
- dma2_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */
- DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
- }
-
- if (mddi_dest) {
-#ifdef CONFIG_FB_MSM_MDP22
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0194,
- (iBuf->dma_y << 16) | iBuf->dma_x);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0, mddi_ld_param);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4,
- (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
-#else
- MDP_OUTP(MDP_BASE + 0x90010, (iBuf->dma_y << 16) | iBuf->dma_x);
- MDP_OUTP(MDP_BASE + 0x00090, mddi_ld_param);
- MDP_OUTP(MDP_BASE + 0x00094,
- (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
-#endif
- } else {
- /* setting EBI2 LCDC write window */
- pdata->set_rect(iBuf->dma_x, iBuf->dma_y, iBuf->dma_w,
- iBuf->dma_h);
- }
-
- /* dma2 config register */
-#ifdef MDP_HW_VSYNC
- MDP_OUTP(MDP_BASE + 0x90000, dma2_cfg_reg);
-
- if ((mfd->use_mdp_vsync) &&
- (mfd->ibuf.vsync_enable) && (mfd->panel_info.lcd.vsync_enable)) {
- uint32 start_y;
-
- if (vsync_start_y_adjust <= iBuf->dma_y)
- start_y = iBuf->dma_y - vsync_start_y_adjust;
- else
- start_y =
- (mfd->total_lcd_lines - 1) - (vsync_start_y_adjust -
- iBuf->dma_y);
-
- /*
- * MDP VSYNC clock must be On by now so, we don't have to
- * re-enable it
- */
- MDP_OUTP(MDP_BASE + 0x210, start_y);
- MDP_OUTP(MDP_BASE + 0x20c, 1); /* enable prim vsync */
- } else {
- MDP_OUTP(MDP_BASE + 0x20c, 0); /* disable prim vsync */
- }
-#else
-#ifdef CONFIG_FB_MSM_MDP22
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0180, dma2_cfg_reg);
-#else
- MDP_OUTP(MDP_BASE + 0x90000, dma2_cfg_reg);
-#endif
-#endif /* MDP_HW_VSYNC */
-
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-}
-
-static ktime_t vt = { 0 };
-int mdp_usec_diff_threshold = 100;
-int mdp_expected_usec_wait;
-
-enum hrtimer_restart mdp_dma2_vsync_hrtimer_handler(struct hrtimer *ht)
-{
- struct msm_fb_data_type *mfd = NULL;
-
- mfd = container_of(ht, struct msm_fb_data_type, dma_hrtimer);
-
- mdp_pipe_kickoff(MDP_DMA2_TERM, mfd);
-
- if (msm_fb_debug_enabled) {
- ktime_t t;
- int usec_diff;
- int actual_wait;
-
- t = ktime_get_real();
-
- actual_wait =
- (t.tv.sec - vt.tv.sec) * 1000000 + (t.tv.nsec -
- vt.tv.nsec) / 1000;
- usec_diff = actual_wait - mdp_expected_usec_wait;
-
- if ((mdp_usec_diff_threshold < usec_diff) || (usec_diff < 0))
- MSM_FB_DEBUG
- ("HRT Diff = %d usec Exp=%d usec Act=%d usec\n",
- usec_diff, mdp_expected_usec_wait, actual_wait);
- }
-
- return HRTIMER_NORESTART;
-}
-
-static void mdp_dma_schedule(struct msm_fb_data_type *mfd, uint32 term)
-{
- /*
- * dma2 configure VSYNC block
- * vsync supported on Primary LCD only for now
- */
- int32 mdp_lcd_rd_cnt;
- uint32 usec_wait_time;
- uint32 start_y;
-
- /*
- * ToDo: if we can move HRT timer callback to workqueue, we can
- * move DMA2 power on under mdp_pipe_kickoff().
- * This will save a power for hrt time wait.
- * However if the latency for context switch (hrt irq -> workqueue)
- * is too big, we will miss the vsync timing.
- */
- if (term == MDP_DMA2_TERM)
- mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- mdp_dma2_update_time_in_usec =
- MDP_KTIME2USEC(mdp_dma2_last_update_time);
-
- if ((!mfd->ibuf.vsync_enable) || (!mfd->panel_info.lcd.vsync_enable)
- || (mfd->use_mdp_vsync)) {
- mdp_pipe_kickoff(term, mfd);
- return;
- }
- /* SW vsync logic starts here */
-
- /* get current rd counter */
- mdp_lcd_rd_cnt = mdp_get_lcd_line_counter(mfd);
- if (mdp_dma2_update_time_in_usec != 0) {
- uint32 num, den;
-
- /*
- * roi width boundary calculation to know the size of pixel
- * width that MDP can send faster or slower than LCD read
- * pointer
- */
-
- num = mdp_last_dma2_update_width * mdp_last_dma2_update_height;
- den =
- (((mfd->panel_info.lcd.refx100 * mfd->total_lcd_lines) /
- 1000) * (mdp_dma2_update_time_in_usec / 100)) / 1000;
-
- if (den == 0)
- mfd->vsync_width_boundary[mdp_last_dma2_update_width] =
- mfd->panel_info.xres + 1;
- else
- mfd->vsync_width_boundary[mdp_last_dma2_update_width] =
- (int)(num / den);
- }
-
- if (mfd->vsync_width_boundary[mdp_last_dma2_update_width] >
- mdp_curr_dma2_update_width) {
- /* MDP wrp is faster than LCD rdp */
- mdp_lcd_rd_cnt += mdp_lcd_rd_cnt_offset_fast;
- } else {
- /* MDP wrp is slower than LCD rdp */
- mdp_lcd_rd_cnt -= mdp_lcd_rd_cnt_offset_slow;
- }
-
- if (mdp_lcd_rd_cnt < 0)
- mdp_lcd_rd_cnt = mfd->total_lcd_lines + mdp_lcd_rd_cnt;
- else if (mdp_lcd_rd_cnt > mfd->total_lcd_lines)
- mdp_lcd_rd_cnt = mdp_lcd_rd_cnt - mfd->total_lcd_lines - 1;
-
- /* get wrt pointer position */
- start_y = mfd->ibuf.dma_y;
-
- /* measure line difference between start_y and rd counter */
- if (start_y > mdp_lcd_rd_cnt) {
- /*
- * *100 for lcd_ref_hzx100 was already multiplied by 100
- * *1000000 is for usec conversion
- */
-
- if ((start_y - mdp_lcd_rd_cnt) <=
- mdp_vsync_usec_wait_line_too_short)
- usec_wait_time = 0;
- else
- usec_wait_time =
- ((start_y -
- mdp_lcd_rd_cnt) * 1000000) /
- ((mfd->total_lcd_lines *
- mfd->panel_info.lcd.refx100) / 100);
- } else {
- if ((start_y + (mfd->total_lcd_lines - mdp_lcd_rd_cnt)) <=
- mdp_vsync_usec_wait_line_too_short)
- usec_wait_time = 0;
- else
- usec_wait_time =
- ((start_y +
- (mfd->total_lcd_lines -
- mdp_lcd_rd_cnt)) * 1000000) /
- ((mfd->total_lcd_lines *
- mfd->panel_info.lcd.refx100) / 100);
- }
-
- mdp_last_dma2_update_width = mdp_curr_dma2_update_width;
- mdp_last_dma2_update_height = mdp_curr_dma2_update_height;
-
- if (usec_wait_time == 0) {
- mdp_pipe_kickoff(term, mfd);
- } else {
- ktime_t wait_time;
-
- wait_time.tv.sec = 0;
- wait_time.tv.nsec = usec_wait_time * 1000;
-
- if (msm_fb_debug_enabled) {
- vt = ktime_get_real();
- mdp_expected_usec_wait = usec_wait_time;
- }
- hrtimer_start(&mfd->dma_hrtimer, wait_time, HRTIMER_MODE_REL);
- }
-}
-
-#ifdef MDDI_HOST_WINDOW_WORKAROUND
-void mdp_dma2_update(struct msm_fb_data_type *mfd)
-{
- MDPIBUF *iBuf;
- uint32 upper_height;
-
- if (mfd->panel.type == EXT_MDDI_PANEL) {
- mdp_dma2_update_sub(mfd);
- return;
- }
-
- iBuf = &mfd->ibuf;
-
- upper_height =
- (uint32) mddi_assign_pkt_height((uint16) iBuf->dma_w,
- (uint16) iBuf->dma_h, 18);
-
- if (upper_height >= iBuf->dma_h) {
- mdp_dma2_update_sub(mfd);
- } else {
- MDPIBUF lower_height;
-
- /* sending the upper region first */
- lower_height = iBuf->dma_h - upper_height;
- iBuf->dma_h = upper_height;
- mdp_dma2_update_sub(mfd);
-
- /* sending the lower region second */
- iBuf->dma_h = lower_height;
- iBuf->dma_y += lower_height;
- iBuf->vsync_enable = FALSE;
- mdp_dma2_update_sub(mfd);
- }
-}
-
-void mdp_dma2_update_sub(struct msm_fb_data_type *mfd)
-#else
-void mdp_dma2_update(struct msm_fb_data_type *mfd)
-#endif
-{
- down(&mfd->dma->mutex);
- if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
- down(&mfd->sem);
- mfd->ibuf_flushed = TRUE;
- mdp_dma2_update_lcd(mfd);
-
- mdp_enable_irq(MDP_DMA2_TERM);
- mfd->dma->busy = TRUE;
- INIT_COMPLETION(mfd->dma->comp);
-
- /* schedule DMA to start */
- mdp_dma_schedule(mfd, MDP_DMA2_TERM);
- up(&mfd->sem);
-
- /* wait until DMA finishes the current job */
- wait_for_completion_killable(&mfd->dma->comp);
- mdp_disable_irq(MDP_DMA2_TERM);
-
- /* signal if pan function is waiting for the update completion */
- if (mfd->pan_waiting) {
- mfd->pan_waiting = FALSE;
- complete(&mfd->pan_comp);
- }
- }
- up(&mfd->dma->mutex);
-}
-
-void mdp_lcd_update_workqueue_handler(struct work_struct *work)
-{
- struct msm_fb_data_type *mfd = NULL;
-
- mfd = container_of(work, struct msm_fb_data_type, dma_update_worker);
- if (mfd)
- mfd->dma_fnc(mfd);
-}
-
-void mdp_set_dma_pan_info(struct fb_info *info, struct mdp_dirty_region *dirty,
- boolean sync)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- MDPIBUF *iBuf;
- int bpp = info->var.bits_per_pixel / 8;
-
- down(&mfd->sem);
- iBuf = &mfd->ibuf;
- iBuf->buf = (uint8 *) info->fix.smem_start;
- iBuf->buf += info->var.xoffset * bpp +
- info->var.yoffset * info->fix.line_length;
-
- iBuf->ibuf_width = info->var.xres_virtual;
- iBuf->bpp = bpp;
-
- iBuf->vsync_enable = sync;
-
- if (dirty) {
- /*
- * ToDo: dirty region check inside var.xoffset+xres
- * <-> var.yoffset+yres
- */
- iBuf->dma_x = dirty->xoffset % info->var.xres;
- iBuf->dma_y = dirty->yoffset % info->var.yres;
- iBuf->dma_w = dirty->width;
- iBuf->dma_h = dirty->height;
- } else {
- iBuf->dma_x = 0;
- iBuf->dma_y = 0;
- iBuf->dma_w = info->var.xres;
- iBuf->dma_h = info->var.yres;
- }
- mfd->ibuf_flushed = FALSE;
- up(&mfd->sem);
-}
-
-void mdp_set_offset_info(struct fb_info *info, uint32 addr, uint32 sync)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- MDPIBUF *iBuf;
-
- int bpp = info->var.bits_per_pixel / 8;
-
- down(&mfd->sem);
- iBuf = &mfd->ibuf;
- iBuf->ibuf_width = info->var.xres_virtual;
- iBuf->bpp = bpp;
- iBuf->vsync_enable = sync;
- iBuf->dma_x = 0;
- iBuf->dma_y = 0;
- iBuf->dma_w = info->var.xres;
- iBuf->dma_h = info->var.yres;
- iBuf->buf = (uint8 *) addr;
-
- mfd->ibuf_flushed = FALSE;
- up(&mfd->sem);
-}
-
-void mdp_dma_pan_update(struct fb_info *info)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- MDPIBUF *iBuf;
-
- iBuf = &mfd->ibuf;
-
- if (mfd->sw_currently_refreshing) {
- /* we need to wait for the pending update */
- mfd->pan_waiting = TRUE;
- if (!mfd->ibuf_flushed) {
- wait_for_completion_killable(&mfd->pan_comp);
- }
- /* waiting for this update to complete */
- mfd->pan_waiting = TRUE;
- wait_for_completion_killable(&mfd->pan_comp);
- } else
- mfd->dma_fnc(mfd);
-}
-
-void mdp_refresh_screen(unsigned long data)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
-
- if ((mfd->sw_currently_refreshing) && (mfd->sw_refreshing_enable)) {
- init_timer(&mfd->refresh_timer);
- mfd->refresh_timer.function = mdp_refresh_screen;
- mfd->refresh_timer.data = data;
-
- if (mfd->dma->busy)
- /* come back in 1 msec */
- mfd->refresh_timer.expires = jiffies + (HZ / 1000);
- else
- mfd->refresh_timer.expires =
- jiffies + mfd->refresh_timer_duration;
-
- add_timer(&mfd->refresh_timer);
-
- if (!mfd->dma->busy) {
- if (!queue_work(mdp_dma_wq, &mfd->dma_update_worker)) {
- MSM_FB_DEBUG("mdp_dma: can't queue_work! -> \
- MDP/MDDI/LCD clock speed needs to be increased\n");
- }
- }
- } else {
- if (!mfd->hw_refresh)
- complete(&mfd->refresher_comp);
- }
-}
diff --git a/drivers/staging/msm/mdp_dma_lcdc.c b/drivers/staging/msm/mdp_dma_lcdc.c
deleted file mode 100644
index b57fa1a0ceb..00000000000
--- a/drivers/staging/msm/mdp_dma_lcdc.c
+++ /dev/null
@@ -1,379 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-#include <linux/delay.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/spinlock.h>
-
-#include <linux/fb.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-#include "mdp4.h"
-
-#ifdef CONFIG_FB_MSM_MDP40
-#define LCDC_BASE 0xC0000
-#define DTV_BASE 0xD0000
-#define DMA_E_BASE 0xB0000
-#else
-#define LCDC_BASE 0xE0000
-#endif
-
-#define DMA_P_BASE 0x90000
-
-extern spinlock_t mdp_spin_lock;
-#ifndef CONFIG_FB_MSM_MDP40
-extern uint32 mdp_intr_mask;
-#endif
-
-int first_pixel_start_x;
-int first_pixel_start_y;
-
-int mdp_lcdc_on(struct platform_device *pdev)
-{
- int lcdc_width;
- int lcdc_height;
- int lcdc_bpp;
- int lcdc_border_clr;
- int lcdc_underflow_clr;
- int lcdc_hsync_skew;
-
- int hsync_period;
- int hsync_ctrl;
- int vsync_period;
- int display_hctl;
- int display_v_start;
- int display_v_end;
- int active_hctl;
- int active_h_start;
- int active_h_end;
- int active_v_start;
- int active_v_end;
- int ctrl_polarity;
- int h_back_porch;
- int h_front_porch;
- int v_back_porch;
- int v_front_porch;
- int hsync_pulse_width;
- int vsync_pulse_width;
- int hsync_polarity;
- int vsync_polarity;
- int data_en_polarity;
- int hsync_start_x;
- int hsync_end_x;
- uint8 *buf;
- int bpp;
- uint32 dma2_cfg_reg;
- struct fb_info *fbi;
- struct fb_var_screeninfo *var;
- struct msm_fb_data_type *mfd;
- uint32 dma_base;
- uint32 timer_base = LCDC_BASE;
- uint32 block = MDP_DMA2_BLOCK;
- int ret;
-
- mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- fbi = mfd->fbi;
- var = &fbi->var;
-
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- bpp = fbi->var.bits_per_pixel / 8;
- buf = (uint8 *) fbi->fix.smem_start;
- buf += fbi->var.xoffset * bpp + fbi->var.yoffset * fbi->fix.line_length;
-
- dma2_cfg_reg = DMA_PACK_ALIGN_LSB | DMA_DITHER_EN | DMA_OUT_SEL_LCDC;
-
- if (mfd->fb_imgType == MDP_BGR_565)
- dma2_cfg_reg |= DMA_PACK_PATTERN_BGR;
- else
- dma2_cfg_reg |= DMA_PACK_PATTERN_RGB;
-
- if (bpp == 2)
- dma2_cfg_reg |= DMA_IBUF_FORMAT_RGB565;
- else if (bpp == 3)
- dma2_cfg_reg |= DMA_IBUF_FORMAT_RGB888;
- else
- dma2_cfg_reg |= DMA_IBUF_FORMAT_xRGB8888_OR_ARGB8888;
-
- switch (mfd->panel_info.bpp) {
- case 24:
- dma2_cfg_reg |= DMA_DSTC0G_8BITS |
- DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
- break;
-
- case 18:
- dma2_cfg_reg |= DMA_DSTC0G_6BITS |
- DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
- break;
-
- case 16:
- dma2_cfg_reg |= DMA_DSTC0G_6BITS |
- DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
- break;
-
- default:
- printk(KERN_ERR "mdp lcdc can't support format %d bpp!\n",
- mfd->panel_info.bpp);
- return -ENODEV;
- }
-
- /* DMA register config */
-
- dma_base = DMA_P_BASE;
-
-#ifdef CONFIG_FB_MSM_MDP40
- if (mfd->panel.type == HDMI_PANEL)
- dma_base = DMA_E_BASE;
-#endif
-
- /* starting address */
- MDP_OUTP(MDP_BASE + dma_base + 0x8, (uint32) buf);
- /* active window width and height */
- MDP_OUTP(MDP_BASE + dma_base + 0x4, ((fbi->var.yres) << 16) |
- (fbi->var.xres));
- /* buffer ystride */
- MDP_OUTP(MDP_BASE + dma_base + 0xc, fbi->fix.line_length);
- /* x/y coordinate = always 0 for lcdc */
- MDP_OUTP(MDP_BASE + dma_base + 0x10, 0);
- /* dma config */
- MDP_OUTP(MDP_BASE + dma_base, dma2_cfg_reg);
-
- /*
- * LCDC timing setting
- */
- h_back_porch = var->left_margin;
- h_front_porch = var->right_margin;
- v_back_porch = var->upper_margin;
- v_front_porch = var->lower_margin;
- hsync_pulse_width = var->hsync_len;
- vsync_pulse_width = var->vsync_len;
- lcdc_border_clr = mfd->panel_info.lcdc.border_clr;
- lcdc_underflow_clr = mfd->panel_info.lcdc.underflow_clr;
- lcdc_hsync_skew = mfd->panel_info.lcdc.hsync_skew;
-
- lcdc_width = mfd->panel_info.xres;
- lcdc_height = mfd->panel_info.yres;
- lcdc_bpp = mfd->panel_info.bpp;
-
- hsync_period =
- hsync_pulse_width + h_back_porch + lcdc_width + h_front_porch;
- hsync_ctrl = (hsync_period << 16) | hsync_pulse_width;
- hsync_start_x = hsync_pulse_width + h_back_porch;
- hsync_end_x = hsync_period - h_front_porch - 1;
- display_hctl = (hsync_end_x << 16) | hsync_start_x;
-
- vsync_period =
- (vsync_pulse_width + v_back_porch + lcdc_height +
- v_front_porch) * hsync_period;
- display_v_start =
- (vsync_pulse_width + v_back_porch) * hsync_period + lcdc_hsync_skew;
- display_v_end =
- vsync_period - (v_front_porch * hsync_period) + lcdc_hsync_skew - 1;
-
- if (lcdc_width != var->xres) {
- active_h_start = hsync_start_x + first_pixel_start_x;
- active_h_end = active_h_start + var->xres - 1;
- active_hctl =
- ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start;
- } else {
- active_hctl = 0;
- }
-
- if (lcdc_height != var->yres) {
- active_v_start =
- display_v_start + first_pixel_start_y * hsync_period;
- active_v_end = active_v_start + (var->yres) * hsync_period - 1;
- active_v_start |= ACTIVE_START_Y_EN;
- } else {
- active_v_start = 0;
- active_v_end = 0;
- }
-
-
-#ifdef CONFIG_FB_MSM_MDP40
- if (mfd->panel.type == HDMI_PANEL) {
- block = MDP_DMA_E_BLOCK;
- timer_base = DTV_BASE;
- hsync_polarity = 0;
- vsync_polarity = 0;
- } else {
- hsync_polarity = 1;
- vsync_polarity = 1;
- }
-
- lcdc_underflow_clr |= 0x80000000; /* enable recovery */
-#else
- hsync_polarity = 0;
- vsync_polarity = 0;
-#endif
- data_en_polarity = 0;
-
- ctrl_polarity =
- (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
-
- MDP_OUTP(MDP_BASE + timer_base + 0x4, hsync_ctrl);
- MDP_OUTP(MDP_BASE + timer_base + 0x8, vsync_period);
- MDP_OUTP(MDP_BASE + timer_base + 0xc, vsync_pulse_width * hsync_period);
- if (timer_base == LCDC_BASE) {
- MDP_OUTP(MDP_BASE + timer_base + 0x10, display_hctl);
- MDP_OUTP(MDP_BASE + timer_base + 0x14, display_v_start);
- MDP_OUTP(MDP_BASE + timer_base + 0x18, display_v_end);
- MDP_OUTP(MDP_BASE + timer_base + 0x28, lcdc_border_clr);
- MDP_OUTP(MDP_BASE + timer_base + 0x2c, lcdc_underflow_clr);
- MDP_OUTP(MDP_BASE + timer_base + 0x30, lcdc_hsync_skew);
- MDP_OUTP(MDP_BASE + timer_base + 0x38, ctrl_polarity);
- MDP_OUTP(MDP_BASE + timer_base + 0x1c, active_hctl);
- MDP_OUTP(MDP_BASE + timer_base + 0x20, active_v_start);
- MDP_OUTP(MDP_BASE + timer_base + 0x24, active_v_end);
- } else {
- MDP_OUTP(MDP_BASE + timer_base + 0x18, display_hctl);
- MDP_OUTP(MDP_BASE + timer_base + 0x1c, display_v_start);
- MDP_OUTP(MDP_BASE + timer_base + 0x20, display_v_end);
- MDP_OUTP(MDP_BASE + timer_base + 0x40, lcdc_border_clr);
- MDP_OUTP(MDP_BASE + timer_base + 0x44, lcdc_underflow_clr);
- MDP_OUTP(MDP_BASE + timer_base + 0x48, lcdc_hsync_skew);
- MDP_OUTP(MDP_BASE + timer_base + 0x50, ctrl_polarity);
- MDP_OUTP(MDP_BASE + timer_base + 0x2c, active_hctl);
- MDP_OUTP(MDP_BASE + timer_base + 0x30, active_v_start);
- MDP_OUTP(MDP_BASE + timer_base + 0x38, active_v_end);
- }
-
- ret = panel_next_on(pdev);
- if (ret == 0) {
- /* enable LCDC block */
- MDP_OUTP(MDP_BASE + timer_base, 1);
- mdp_pipe_ctrl(block, MDP_BLOCK_POWER_ON, FALSE);
- }
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
- return ret;
-}
-
-int mdp_lcdc_off(struct platform_device *pdev)
-{
- int ret = 0;
- struct msm_fb_data_type *mfd;
- uint32 timer_base = LCDC_BASE;
- uint32 block = MDP_DMA2_BLOCK;
-
- mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
-
-#ifdef CONFIG_FB_MSM_MDP40
- if (mfd->panel.type == HDMI_PANEL) {
- block = MDP_DMA_E_BLOCK;
- timer_base = DTV_BASE;
- }
-#endif
-
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- MDP_OUTP(MDP_BASE + timer_base, 0);
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- mdp_pipe_ctrl(block, MDP_BLOCK_POWER_OFF, FALSE);
-
- ret = panel_next_off(pdev);
-
- /* delay to make sure the last frame finishes */
- mdelay(100);
-
- return ret;
-}
-
-void mdp_lcdc_update(struct msm_fb_data_type *mfd)
-{
- struct fb_info *fbi = mfd->fbi;
- uint8 *buf;
- int bpp;
- unsigned long flag;
- uint32 dma_base;
- int irq_block = MDP_DMA2_TERM;
-#ifdef CONFIG_FB_MSM_MDP40
- int intr = INTR_DMA_P_DONE;
-#endif
-
- if (!mfd->panel_power_on)
- return;
-
- /* no need to power on cmd block since it's lcdc mode */
-
- if (!mfd->ibuf.visible_swapped) {
- bpp = fbi->var.bits_per_pixel / 8;
- buf = (uint8 *) fbi->fix.smem_start;
- buf += fbi->var.xoffset * bpp +
- fbi->var.yoffset * fbi->fix.line_length;
- } else {
- /* we've done something to update the pointer. */
- bpp = mfd->ibuf.bpp;
- buf = mfd->ibuf.buf;
- }
-
- dma_base = DMA_P_BASE;
-
-#ifdef CONFIG_FB_MSM_MDP40
- if (mfd->panel.type == HDMI_PANEL) {
- intr = INTR_DMA_E_DONE;
- irq_block = MDP_DMA_E_TERM;
- dma_base = DMA_E_BASE;
- }
-#endif
-
- /* starting address */
- MDP_OUTP(MDP_BASE + dma_base + 0x8, (uint32) buf);
-
- /* enable LCDC irq */
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_enable_irq(irq_block);
- INIT_COMPLETION(mfd->dma->comp);
- mfd->dma->waiting = TRUE;
-#ifdef CONFIG_FB_MSM_MDP40
- outp32(MDP_INTR_CLEAR, intr);
- mdp_intr_mask |= intr;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-#else
- outp32(MDP_INTR_CLEAR, LCDC_FRAME_START);
- mdp_intr_mask |= LCDC_FRAME_START;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-#endif
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- if (mfd->ibuf.vsync_enable)
- wait_for_completion_killable(&mfd->dma->comp);
- mdp_disable_irq(irq_block);
-}
diff --git a/drivers/staging/msm/mdp_dma_s.c b/drivers/staging/msm/mdp_dma_s.c
deleted file mode 100644
index 0c34a1010f1..00000000000
--- a/drivers/staging/msm/mdp_dma_s.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/spinlock.h>
-
-#include <linux/fb.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-
-static void mdp_dma_s_update_lcd(struct msm_fb_data_type *mfd)
-{
- MDPIBUF *iBuf = &mfd->ibuf;
- int mddi_dest = FALSE;
- uint32 outBpp = iBuf->bpp;
- uint32 dma_s_cfg_reg;
- uint8 *src;
- struct msm_fb_panel_data *pdata =
- (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
-
- dma_s_cfg_reg = DMA_PACK_TIGHT | DMA_PACK_ALIGN_LSB |
- DMA_OUT_SEL_AHB | DMA_IBUF_NONCONTIGUOUS;
-
- if (mfd->fb_imgType == MDP_BGR_565)
- dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR;
- else
- dma_s_cfg_reg |= DMA_PACK_PATTERN_RGB;
-
- if (outBpp == 4)
- dma_s_cfg_reg |= DMA_IBUF_C3ALPHA_EN;
-
- if (outBpp == 2)
- dma_s_cfg_reg |= DMA_IBUF_FORMAT_RGB565;
-
- if (mfd->panel_info.pdest != DISPLAY_2) {
- printk(KERN_ERR "error: non-secondary type through dma_s!\n");
- return;
- }
-
- if (mfd->panel_info.type == MDDI_PANEL) {
- dma_s_cfg_reg |= DMA_OUT_SEL_MDDI;
- mddi_dest = TRUE;
- } else {
- dma_s_cfg_reg |= DMA_AHBM_LCD_SEL_SECONDARY;
- outp32(MDP_EBI2_LCD1, mfd->data_port_phys);
- }
-
- dma_s_cfg_reg |= DMA_DITHER_EN;
-
- src = (uint8 *) iBuf->buf;
- /* starting input address */
- src += (iBuf->dma_x + iBuf->dma_y * iBuf->ibuf_width) * outBpp;
-
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- /* PIXELSIZE */
- MDP_OUTP(MDP_BASE + 0xa0004, (iBuf->dma_h << 16 | iBuf->dma_w));
- MDP_OUTP(MDP_BASE + 0xa0008, src); /* ibuf address */
- MDP_OUTP(MDP_BASE + 0xa000c, iBuf->ibuf_width * outBpp);/* ystride */
-
- if (mfd->panel_info.bpp == 18) {
- dma_s_cfg_reg |= DMA_DSTC0G_6BITS | /* 666 18BPP */
- DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
- } else {
- dma_s_cfg_reg |= DMA_DSTC0G_6BITS | /* 565 16BPP */
- DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
- }
-
- if (mddi_dest) {
- MDP_OUTP(MDP_BASE + 0xa0010, (iBuf->dma_y << 16) | iBuf->dma_x);
- MDP_OUTP(MDP_BASE + 0x00090, 1);
- MDP_OUTP(MDP_BASE + 0x00094,
- (MDDI_VDO_PACKET_DESC << 16) |
- mfd->panel_info.mddi.vdopkt);
- } else {
- /* setting LCDC write window */
- pdata->set_rect(iBuf->dma_x, iBuf->dma_y, iBuf->dma_w,
- iBuf->dma_h);
- }
-
- MDP_OUTP(MDP_BASE + 0xa0000, dma_s_cfg_reg);
-
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd);
-}
-
-void mdp_dma_s_update(struct msm_fb_data_type *mfd)
-{
- down(&mfd->dma->mutex);
- if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
- down(&mfd->sem);
- mdp_enable_irq(MDP_DMA_S_TERM);
- mfd->dma->busy = TRUE;
- INIT_COMPLETION(mfd->dma->comp);
- mfd->ibuf_flushed = TRUE;
- mdp_dma_s_update_lcd(mfd);
- up(&mfd->sem);
-
- /* wait until DMA finishes the current job */
- wait_for_completion_killable(&mfd->dma->comp);
- mdp_disable_irq(MDP_DMA_S_TERM);
-
- /* signal if pan function is waiting for the update completion */
- if (mfd->pan_waiting) {
- mfd->pan_waiting = FALSE;
- complete(&mfd->pan_comp);
- }
- }
- up(&mfd->dma->mutex);
-}
diff --git a/drivers/staging/msm/mdp_dma_tv.c b/drivers/staging/msm/mdp_dma_tv.c
deleted file mode 100644
index 70989fb32c1..00000000000
--- a/drivers/staging/msm/mdp_dma_tv.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-#include <linux/delay.h>
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/spinlock.h>
-
-#include <linux/fb.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-
-extern spinlock_t mdp_spin_lock;
-extern uint32 mdp_intr_mask;
-
-int mdp_dma3_on(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
- struct fb_info *fbi;
- uint8 *buf;
- int bpp;
- int ret = 0;
-
- mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- fbi = mfd->fbi;
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- bpp = fbi->var.bits_per_pixel / 8;
- buf = (uint8 *) fbi->fix.smem_start;
- buf += fbi->var.xoffset * bpp +
- fbi->var.yoffset * fbi->fix.line_length;
-
- /* starting address[31..8] of Video frame buffer is CS0 */
- MDP_OUTP(MDP_BASE + 0xC0008, (uint32) buf >> 3);
-
- mdp_pipe_ctrl(MDP_DMA3_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- MDP_OUTP(MDP_BASE + 0xC0004, 0x4c60674); /* flicker filter enabled */
- MDP_OUTP(MDP_BASE + 0xC0010, 0x20); /* sobel treshold */
-
- MDP_OUTP(MDP_BASE + 0xC0018, 0xeb0010); /* Y Max, Y min */
- MDP_OUTP(MDP_BASE + 0xC001C, 0xf00010); /* Cb Max, Cb min */
- MDP_OUTP(MDP_BASE + 0xC0020, 0xf00010); /* Cb Max, Cb min */
-
- MDP_OUTP(MDP_BASE + 0xC000C, 0x67686970); /* add a few chars for CC */
- MDP_OUTP(MDP_BASE + 0xC0000, 0x1); /* MDP tv out enable */
-
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
- ret = panel_next_on(pdev);
-
- return ret;
-}
-
-int mdp_dma3_off(struct platform_device *pdev)
-{
- int ret = 0;
-
- ret = panel_next_off(pdev);
- if (ret)
- return ret;
-
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- MDP_OUTP(MDP_BASE + 0xC0000, 0x0);
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
- mdp_pipe_ctrl(MDP_DMA3_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
- /* delay to make sure the last frame finishes */
- mdelay(100);
-
- return ret;
-}
-
-void mdp_dma3_update(struct msm_fb_data_type *mfd)
-{
- struct fb_info *fbi = mfd->fbi;
- uint8 *buf;
- int bpp;
- unsigned long flag;
-
- if (!mfd->panel_power_on)
- return;
-
- /* no need to power on cmd block since dma3 is running */
- bpp = fbi->var.bits_per_pixel / 8;
- buf = (uint8 *) fbi->fix.smem_start;
- buf += fbi->var.xoffset * bpp +
- fbi->var.yoffset * fbi->fix.line_length;
- MDP_OUTP(MDP_BASE + 0xC0008, (uint32) buf >> 3);
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- mdp_enable_irq(MDP_DMA3_TERM);
- INIT_COMPLETION(mfd->dma->comp);
- mfd->dma->waiting = TRUE;
-
- outp32(MDP_INTR_CLEAR, TV_OUT_DMA3_START);
- mdp_intr_mask |= TV_OUT_DMA3_START;
- outp32(MDP_INTR_ENABLE, mdp_intr_mask);
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- wait_for_completion_killable(&mfd->dma->comp);
- mdp_disable_irq(MDP_DMA3_TERM);
-}
diff --git a/drivers/staging/msm/mdp_hw_init.c b/drivers/staging/msm/mdp_hw_init.c
deleted file mode 100644
index 807362ac592..00000000000
--- a/drivers/staging/msm/mdp_hw_init.c
+++ /dev/null
@@ -1,720 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "mdp.h"
-
-/* mdp primary csc limit vector */
-uint32 mdp_plv[] = { 0x10, 0xeb, 0x10, 0xf0 };
-
-/* Color Coefficient matrix for YUV -> RGB */
-struct mdp_ccs mdp_ccs_yuv2rgb = {
- MDP_CCS_YUV2RGB,
- {
- 0x254,
- 0x000,
- 0x331,
- 0x254,
- 0xff38,
- 0xfe61,
- 0x254,
- 0x409,
- 0x000,
- },
- {
-#ifdef CONFIG_FB_MSM_MDP31
- 0x1f0,
- 0x180,
- 0x180
-#else
- 0x10,
- 0x80,
- 0x80
-#endif
- }
-};
-
-/* Color Coefficient matrix for RGB -> YUV */
-struct mdp_ccs mdp_ccs_rgb2yuv = {
- MDP_CCS_RGB2YUV,
- {
- 0x83,
- 0x102,
- 0x32,
- 0xffb5,
- 0xff6c,
- 0xe1,
- 0xe1,
- 0xff45,
- 0xffdc,
- },
-#ifdef CONFIG_FB_MSM_MDP31
- {
- 0x10,
- 0x80,
- 0x80
- }
-#endif
-};
-
-static void mdp_load_lut_param(void)
-{
- outpdw(MDP_BASE + 0x40800, 0x0);
- outpdw(MDP_BASE + 0x40804, 0x151515);
- outpdw(MDP_BASE + 0x40808, 0x1d1d1d);
- outpdw(MDP_BASE + 0x4080c, 0x232323);
- outpdw(MDP_BASE + 0x40810, 0x272727);
- outpdw(MDP_BASE + 0x40814, 0x2b2b2b);
- outpdw(MDP_BASE + 0x40818, 0x2f2f2f);
- outpdw(MDP_BASE + 0x4081c, 0x333333);
- outpdw(MDP_BASE + 0x40820, 0x363636);
- outpdw(MDP_BASE + 0x40824, 0x393939);
- outpdw(MDP_BASE + 0x40828, 0x3b3b3b);
- outpdw(MDP_BASE + 0x4082c, 0x3e3e3e);
- outpdw(MDP_BASE + 0x40830, 0x404040);
- outpdw(MDP_BASE + 0x40834, 0x434343);
- outpdw(MDP_BASE + 0x40838, 0x454545);
- outpdw(MDP_BASE + 0x4083c, 0x474747);
- outpdw(MDP_BASE + 0x40840, 0x494949);
- outpdw(MDP_BASE + 0x40844, 0x4b4b4b);
- outpdw(MDP_BASE + 0x40848, 0x4d4d4d);
- outpdw(MDP_BASE + 0x4084c, 0x4f4f4f);
- outpdw(MDP_BASE + 0x40850, 0x515151);
- outpdw(MDP_BASE + 0x40854, 0x535353);
- outpdw(MDP_BASE + 0x40858, 0x555555);
- outpdw(MDP_BASE + 0x4085c, 0x565656);
- outpdw(MDP_BASE + 0x40860, 0x585858);
- outpdw(MDP_BASE + 0x40864, 0x5a5a5a);
- outpdw(MDP_BASE + 0x40868, 0x5b5b5b);
- outpdw(MDP_BASE + 0x4086c, 0x5d5d5d);
- outpdw(MDP_BASE + 0x40870, 0x5e5e5e);
- outpdw(MDP_BASE + 0x40874, 0x606060);
- outpdw(MDP_BASE + 0x40878, 0x616161);
- outpdw(MDP_BASE + 0x4087c, 0x636363);
- outpdw(MDP_BASE + 0x40880, 0x646464);
- outpdw(MDP_BASE + 0x40884, 0x666666);
- outpdw(MDP_BASE + 0x40888, 0x676767);
- outpdw(MDP_BASE + 0x4088c, 0x686868);
- outpdw(MDP_BASE + 0x40890, 0x6a6a6a);
- outpdw(MDP_BASE + 0x40894, 0x6b6b6b);
- outpdw(MDP_BASE + 0x40898, 0x6c6c6c);
- outpdw(MDP_BASE + 0x4089c, 0x6e6e6e);
- outpdw(MDP_BASE + 0x408a0, 0x6f6f6f);
- outpdw(MDP_BASE + 0x408a4, 0x707070);
- outpdw(MDP_BASE + 0x408a8, 0x717171);
- outpdw(MDP_BASE + 0x408ac, 0x727272);
- outpdw(MDP_BASE + 0x408b0, 0x747474);
- outpdw(MDP_BASE + 0x408b4, 0x757575);
- outpdw(MDP_BASE + 0x408b8, 0x767676);
- outpdw(MDP_BASE + 0x408bc, 0x777777);
- outpdw(MDP_BASE + 0x408c0, 0x787878);
- outpdw(MDP_BASE + 0x408c4, 0x797979);
- outpdw(MDP_BASE + 0x408c8, 0x7a7a7a);
- outpdw(MDP_BASE + 0x408cc, 0x7c7c7c);
- outpdw(MDP_BASE + 0x408d0, 0x7d7d7d);
- outpdw(MDP_BASE + 0x408d4, 0x7e7e7e);
- outpdw(MDP_BASE + 0x408d8, 0x7f7f7f);
- outpdw(MDP_BASE + 0x408dc, 0x808080);
- outpdw(MDP_BASE + 0x408e0, 0x818181);
- outpdw(MDP_BASE + 0x408e4, 0x828282);
- outpdw(MDP_BASE + 0x408e8, 0x838383);
- outpdw(MDP_BASE + 0x408ec, 0x848484);
- outpdw(MDP_BASE + 0x408f0, 0x858585);
- outpdw(MDP_BASE + 0x408f4, 0x868686);
- outpdw(MDP_BASE + 0x408f8, 0x878787);
- outpdw(MDP_BASE + 0x408fc, 0x888888);
- outpdw(MDP_BASE + 0x40900, 0x898989);
- outpdw(MDP_BASE + 0x40904, 0x8a8a8a);
- outpdw(MDP_BASE + 0x40908, 0x8b8b8b);
- outpdw(MDP_BASE + 0x4090c, 0x8c8c8c);
- outpdw(MDP_BASE + 0x40910, 0x8d8d8d);
- outpdw(MDP_BASE + 0x40914, 0x8e8e8e);
- outpdw(MDP_BASE + 0x40918, 0x8f8f8f);
- outpdw(MDP_BASE + 0x4091c, 0x8f8f8f);
- outpdw(MDP_BASE + 0x40920, 0x909090);
- outpdw(MDP_BASE + 0x40924, 0x919191);
- outpdw(MDP_BASE + 0x40928, 0x929292);
- outpdw(MDP_BASE + 0x4092c, 0x939393);
- outpdw(MDP_BASE + 0x40930, 0x949494);
- outpdw(MDP_BASE + 0x40934, 0x959595);
- outpdw(MDP_BASE + 0x40938, 0x969696);
- outpdw(MDP_BASE + 0x4093c, 0x969696);
- outpdw(MDP_BASE + 0x40940, 0x979797);
- outpdw(MDP_BASE + 0x40944, 0x989898);
- outpdw(MDP_BASE + 0x40948, 0x999999);
- outpdw(MDP_BASE + 0x4094c, 0x9a9a9a);
- outpdw(MDP_BASE + 0x40950, 0x9b9b9b);
- outpdw(MDP_BASE + 0x40954, 0x9c9c9c);
- outpdw(MDP_BASE + 0x40958, 0x9c9c9c);
- outpdw(MDP_BASE + 0x4095c, 0x9d9d9d);
- outpdw(MDP_BASE + 0x40960, 0x9e9e9e);
- outpdw(MDP_BASE + 0x40964, 0x9f9f9f);
- outpdw(MDP_BASE + 0x40968, 0xa0a0a0);
- outpdw(MDP_BASE + 0x4096c, 0xa0a0a0);
- outpdw(MDP_BASE + 0x40970, 0xa1a1a1);
- outpdw(MDP_BASE + 0x40974, 0xa2a2a2);
- outpdw(MDP_BASE + 0x40978, 0xa3a3a3);
- outpdw(MDP_BASE + 0x4097c, 0xa4a4a4);
- outpdw(MDP_BASE + 0x40980, 0xa4a4a4);
- outpdw(MDP_BASE + 0x40984, 0xa5a5a5);
- outpdw(MDP_BASE + 0x40988, 0xa6a6a6);
- outpdw(MDP_BASE + 0x4098c, 0xa7a7a7);
- outpdw(MDP_BASE + 0x40990, 0xa7a7a7);
- outpdw(MDP_BASE + 0x40994, 0xa8a8a8);
- outpdw(MDP_BASE + 0x40998, 0xa9a9a9);
- outpdw(MDP_BASE + 0x4099c, 0xaaaaaa);
- outpdw(MDP_BASE + 0x409a0, 0xaaaaaa);
- outpdw(MDP_BASE + 0x409a4, 0xababab);
- outpdw(MDP_BASE + 0x409a8, 0xacacac);
- outpdw(MDP_BASE + 0x409ac, 0xadadad);
- outpdw(MDP_BASE + 0x409b0, 0xadadad);
- outpdw(MDP_BASE + 0x409b4, 0xaeaeae);
- outpdw(MDP_BASE + 0x409b8, 0xafafaf);
- outpdw(MDP_BASE + 0x409bc, 0xafafaf);
- outpdw(MDP_BASE + 0x409c0, 0xb0b0b0);
- outpdw(MDP_BASE + 0x409c4, 0xb1b1b1);
- outpdw(MDP_BASE + 0x409c8, 0xb2b2b2);
- outpdw(MDP_BASE + 0x409cc, 0xb2b2b2);
- outpdw(MDP_BASE + 0x409d0, 0xb3b3b3);
- outpdw(MDP_BASE + 0x409d4, 0xb4b4b4);
- outpdw(MDP_BASE + 0x409d8, 0xb4b4b4);
- outpdw(MDP_BASE + 0x409dc, 0xb5b5b5);
- outpdw(MDP_BASE + 0x409e0, 0xb6b6b6);
- outpdw(MDP_BASE + 0x409e4, 0xb6b6b6);
- outpdw(MDP_BASE + 0x409e8, 0xb7b7b7);
- outpdw(MDP_BASE + 0x409ec, 0xb8b8b8);
- outpdw(MDP_BASE + 0x409f0, 0xb8b8b8);
- outpdw(MDP_BASE + 0x409f4, 0xb9b9b9);
- outpdw(MDP_BASE + 0x409f8, 0xbababa);
- outpdw(MDP_BASE + 0x409fc, 0xbababa);
- outpdw(MDP_BASE + 0x40a00, 0xbbbbbb);
- outpdw(MDP_BASE + 0x40a04, 0xbcbcbc);
- outpdw(MDP_BASE + 0x40a08, 0xbcbcbc);
- outpdw(MDP_BASE + 0x40a0c, 0xbdbdbd);
- outpdw(MDP_BASE + 0x40a10, 0xbebebe);
- outpdw(MDP_BASE + 0x40a14, 0xbebebe);
- outpdw(MDP_BASE + 0x40a18, 0xbfbfbf);
- outpdw(MDP_BASE + 0x40a1c, 0xc0c0c0);
- outpdw(MDP_BASE + 0x40a20, 0xc0c0c0);
- outpdw(MDP_BASE + 0x40a24, 0xc1c1c1);
- outpdw(MDP_BASE + 0x40a28, 0xc1c1c1);
- outpdw(MDP_BASE + 0x40a2c, 0xc2c2c2);
- outpdw(MDP_BASE + 0x40a30, 0xc3c3c3);
- outpdw(MDP_BASE + 0x40a34, 0xc3c3c3);
- outpdw(MDP_BASE + 0x40a38, 0xc4c4c4);
- outpdw(MDP_BASE + 0x40a3c, 0xc5c5c5);
- outpdw(MDP_BASE + 0x40a40, 0xc5c5c5);
- outpdw(MDP_BASE + 0x40a44, 0xc6c6c6);
- outpdw(MDP_BASE + 0x40a48, 0xc6c6c6);
- outpdw(MDP_BASE + 0x40a4c, 0xc7c7c7);
- outpdw(MDP_BASE + 0x40a50, 0xc8c8c8);
- outpdw(MDP_BASE + 0x40a54, 0xc8c8c8);
- outpdw(MDP_BASE + 0x40a58, 0xc9c9c9);
- outpdw(MDP_BASE + 0x40a5c, 0xc9c9c9);
- outpdw(MDP_BASE + 0x40a60, 0xcacaca);
- outpdw(MDP_BASE + 0x40a64, 0xcbcbcb);
- outpdw(MDP_BASE + 0x40a68, 0xcbcbcb);
- outpdw(MDP_BASE + 0x40a6c, 0xcccccc);
- outpdw(MDP_BASE + 0x40a70, 0xcccccc);
- outpdw(MDP_BASE + 0x40a74, 0xcdcdcd);
- outpdw(MDP_BASE + 0x40a78, 0xcecece);
- outpdw(MDP_BASE + 0x40a7c, 0xcecece);
- outpdw(MDP_BASE + 0x40a80, 0xcfcfcf);
- outpdw(MDP_BASE + 0x40a84, 0xcfcfcf);
- outpdw(MDP_BASE + 0x40a88, 0xd0d0d0);
- outpdw(MDP_BASE + 0x40a8c, 0xd0d0d0);
- outpdw(MDP_BASE + 0x40a90, 0xd1d1d1);
- outpdw(MDP_BASE + 0x40a94, 0xd2d2d2);
- outpdw(MDP_BASE + 0x40a98, 0xd2d2d2);
- outpdw(MDP_BASE + 0x40a9c, 0xd3d3d3);
- outpdw(MDP_BASE + 0x40aa0, 0xd3d3d3);
- outpdw(MDP_BASE + 0x40aa4, 0xd4d4d4);
- outpdw(MDP_BASE + 0x40aa8, 0xd4d4d4);
- outpdw(MDP_BASE + 0x40aac, 0xd5d5d5);
- outpdw(MDP_BASE + 0x40ab0, 0xd6d6d6);
- outpdw(MDP_BASE + 0x40ab4, 0xd6d6d6);
- outpdw(MDP_BASE + 0x40ab8, 0xd7d7d7);
- outpdw(MDP_BASE + 0x40abc, 0xd7d7d7);
- outpdw(MDP_BASE + 0x40ac0, 0xd8d8d8);
- outpdw(MDP_BASE + 0x40ac4, 0xd8d8d8);
- outpdw(MDP_BASE + 0x40ac8, 0xd9d9d9);
- outpdw(MDP_BASE + 0x40acc, 0xd9d9d9);
- outpdw(MDP_BASE + 0x40ad0, 0xdadada);
- outpdw(MDP_BASE + 0x40ad4, 0xdbdbdb);
- outpdw(MDP_BASE + 0x40ad8, 0xdbdbdb);
- outpdw(MDP_BASE + 0x40adc, 0xdcdcdc);
- outpdw(MDP_BASE + 0x40ae0, 0xdcdcdc);
- outpdw(MDP_BASE + 0x40ae4, 0xdddddd);
- outpdw(MDP_BASE + 0x40ae8, 0xdddddd);
- outpdw(MDP_BASE + 0x40aec, 0xdedede);
- outpdw(MDP_BASE + 0x40af0, 0xdedede);
- outpdw(MDP_BASE + 0x40af4, 0xdfdfdf);
- outpdw(MDP_BASE + 0x40af8, 0xdfdfdf);
- outpdw(MDP_BASE + 0x40afc, 0xe0e0e0);
- outpdw(MDP_BASE + 0x40b00, 0xe0e0e0);
- outpdw(MDP_BASE + 0x40b04, 0xe1e1e1);
- outpdw(MDP_BASE + 0x40b08, 0xe1e1e1);
- outpdw(MDP_BASE + 0x40b0c, 0xe2e2e2);
- outpdw(MDP_BASE + 0x40b10, 0xe3e3e3);
- outpdw(MDP_BASE + 0x40b14, 0xe3e3e3);
- outpdw(MDP_BASE + 0x40b18, 0xe4e4e4);
- outpdw(MDP_BASE + 0x40b1c, 0xe4e4e4);
- outpdw(MDP_BASE + 0x40b20, 0xe5e5e5);
- outpdw(MDP_BASE + 0x40b24, 0xe5e5e5);
- outpdw(MDP_BASE + 0x40b28, 0xe6e6e6);
- outpdw(MDP_BASE + 0x40b2c, 0xe6e6e6);
- outpdw(MDP_BASE + 0x40b30, 0xe7e7e7);
- outpdw(MDP_BASE + 0x40b34, 0xe7e7e7);
- outpdw(MDP_BASE + 0x40b38, 0xe8e8e8);
- outpdw(MDP_BASE + 0x40b3c, 0xe8e8e8);
- outpdw(MDP_BASE + 0x40b40, 0xe9e9e9);
- outpdw(MDP_BASE + 0x40b44, 0xe9e9e9);
- outpdw(MDP_BASE + 0x40b48, 0xeaeaea);
- outpdw(MDP_BASE + 0x40b4c, 0xeaeaea);
- outpdw(MDP_BASE + 0x40b50, 0xebebeb);
- outpdw(MDP_BASE + 0x40b54, 0xebebeb);
- outpdw(MDP_BASE + 0x40b58, 0xececec);
- outpdw(MDP_BASE + 0x40b5c, 0xececec);
- outpdw(MDP_BASE + 0x40b60, 0xededed);
- outpdw(MDP_BASE + 0x40b64, 0xededed);
- outpdw(MDP_BASE + 0x40b68, 0xeeeeee);
- outpdw(MDP_BASE + 0x40b6c, 0xeeeeee);
- outpdw(MDP_BASE + 0x40b70, 0xefefef);
- outpdw(MDP_BASE + 0x40b74, 0xefefef);
- outpdw(MDP_BASE + 0x40b78, 0xf0f0f0);
- outpdw(MDP_BASE + 0x40b7c, 0xf0f0f0);
- outpdw(MDP_BASE + 0x40b80, 0xf1f1f1);
- outpdw(MDP_BASE + 0x40b84, 0xf1f1f1);
- outpdw(MDP_BASE + 0x40b88, 0xf2f2f2);
- outpdw(MDP_BASE + 0x40b8c, 0xf2f2f2);
- outpdw(MDP_BASE + 0x40b90, 0xf2f2f2);
- outpdw(MDP_BASE + 0x40b94, 0xf3f3f3);
- outpdw(MDP_BASE + 0x40b98, 0xf3f3f3);
- outpdw(MDP_BASE + 0x40b9c, 0xf4f4f4);
- outpdw(MDP_BASE + 0x40ba0, 0xf4f4f4);
- outpdw(MDP_BASE + 0x40ba4, 0xf5f5f5);
- outpdw(MDP_BASE + 0x40ba8, 0xf5f5f5);
- outpdw(MDP_BASE + 0x40bac, 0xf6f6f6);
- outpdw(MDP_BASE + 0x40bb0, 0xf6f6f6);
- outpdw(MDP_BASE + 0x40bb4, 0xf7f7f7);
- outpdw(MDP_BASE + 0x40bb8, 0xf7f7f7);
- outpdw(MDP_BASE + 0x40bbc, 0xf8f8f8);
- outpdw(MDP_BASE + 0x40bc0, 0xf8f8f8);
- outpdw(MDP_BASE + 0x40bc4, 0xf9f9f9);
- outpdw(MDP_BASE + 0x40bc8, 0xf9f9f9);
- outpdw(MDP_BASE + 0x40bcc, 0xfafafa);
- outpdw(MDP_BASE + 0x40bd0, 0xfafafa);
- outpdw(MDP_BASE + 0x40bd4, 0xfafafa);
- outpdw(MDP_BASE + 0x40bd8, 0xfbfbfb);
- outpdw(MDP_BASE + 0x40bdc, 0xfbfbfb);
- outpdw(MDP_BASE + 0x40be0, 0xfcfcfc);
- outpdw(MDP_BASE + 0x40be4, 0xfcfcfc);
- outpdw(MDP_BASE + 0x40be8, 0xfdfdfd);
- outpdw(MDP_BASE + 0x40bec, 0xfdfdfd);
- outpdw(MDP_BASE + 0x40bf0, 0xfefefe);
- outpdw(MDP_BASE + 0x40bf4, 0xfefefe);
- outpdw(MDP_BASE + 0x40bf8, 0xffffff);
- outpdw(MDP_BASE + 0x40bfc, 0xffffff);
- outpdw(MDP_BASE + 0x40c00, 0x0);
- outpdw(MDP_BASE + 0x40c04, 0x0);
- outpdw(MDP_BASE + 0x40c08, 0x0);
- outpdw(MDP_BASE + 0x40c0c, 0x0);
- outpdw(MDP_BASE + 0x40c10, 0x0);
- outpdw(MDP_BASE + 0x40c14, 0x0);
- outpdw(MDP_BASE + 0x40c18, 0x0);
- outpdw(MDP_BASE + 0x40c1c, 0x0);
- outpdw(MDP_BASE + 0x40c20, 0x0);
- outpdw(MDP_BASE + 0x40c24, 0x0);
- outpdw(MDP_BASE + 0x40c28, 0x0);
- outpdw(MDP_BASE + 0x40c2c, 0x0);
- outpdw(MDP_BASE + 0x40c30, 0x0);
- outpdw(MDP_BASE + 0x40c34, 0x0);
- outpdw(MDP_BASE + 0x40c38, 0x0);
- outpdw(MDP_BASE + 0x40c3c, 0x0);
- outpdw(MDP_BASE + 0x40c40, 0x10101);
- outpdw(MDP_BASE + 0x40c44, 0x10101);
- outpdw(MDP_BASE + 0x40c48, 0x10101);
- outpdw(MDP_BASE + 0x40c4c, 0x10101);
- outpdw(MDP_BASE + 0x40c50, 0x10101);
- outpdw(MDP_BASE + 0x40c54, 0x10101);
- outpdw(MDP_BASE + 0x40c58, 0x10101);
- outpdw(MDP_BASE + 0x40c5c, 0x10101);
- outpdw(MDP_BASE + 0x40c60, 0x10101);
- outpdw(MDP_BASE + 0x40c64, 0x10101);
- outpdw(MDP_BASE + 0x40c68, 0x20202);
- outpdw(MDP_BASE + 0x40c6c, 0x20202);
- outpdw(MDP_BASE + 0x40c70, 0x20202);
- outpdw(MDP_BASE + 0x40c74, 0x20202);
- outpdw(MDP_BASE + 0x40c78, 0x20202);
- outpdw(MDP_BASE + 0x40c7c, 0x20202);
- outpdw(MDP_BASE + 0x40c80, 0x30303);
- outpdw(MDP_BASE + 0x40c84, 0x30303);
- outpdw(MDP_BASE + 0x40c88, 0x30303);
- outpdw(MDP_BASE + 0x40c8c, 0x30303);
- outpdw(MDP_BASE + 0x40c90, 0x30303);
- outpdw(MDP_BASE + 0x40c94, 0x40404);
- outpdw(MDP_BASE + 0x40c98, 0x40404);
- outpdw(MDP_BASE + 0x40c9c, 0x40404);
- outpdw(MDP_BASE + 0x40ca0, 0x40404);
- outpdw(MDP_BASE + 0x40ca4, 0x40404);
- outpdw(MDP_BASE + 0x40ca8, 0x50505);
- outpdw(MDP_BASE + 0x40cac, 0x50505);
- outpdw(MDP_BASE + 0x40cb0, 0x50505);
- outpdw(MDP_BASE + 0x40cb4, 0x50505);
- outpdw(MDP_BASE + 0x40cb8, 0x60606);
- outpdw(MDP_BASE + 0x40cbc, 0x60606);
- outpdw(MDP_BASE + 0x40cc0, 0x60606);
- outpdw(MDP_BASE + 0x40cc4, 0x70707);
- outpdw(MDP_BASE + 0x40cc8, 0x70707);
- outpdw(MDP_BASE + 0x40ccc, 0x70707);
- outpdw(MDP_BASE + 0x40cd0, 0x70707);
- outpdw(MDP_BASE + 0x40cd4, 0x80808);
- outpdw(MDP_BASE + 0x40cd8, 0x80808);
- outpdw(MDP_BASE + 0x40cdc, 0x80808);
- outpdw(MDP_BASE + 0x40ce0, 0x90909);
- outpdw(MDP_BASE + 0x40ce4, 0x90909);
- outpdw(MDP_BASE + 0x40ce8, 0xa0a0a);
- outpdw(MDP_BASE + 0x40cec, 0xa0a0a);
- outpdw(MDP_BASE + 0x40cf0, 0xa0a0a);
- outpdw(MDP_BASE + 0x40cf4, 0xb0b0b);
- outpdw(MDP_BASE + 0x40cf8, 0xb0b0b);
- outpdw(MDP_BASE + 0x40cfc, 0xb0b0b);
- outpdw(MDP_BASE + 0x40d00, 0xc0c0c);
- outpdw(MDP_BASE + 0x40d04, 0xc0c0c);
- outpdw(MDP_BASE + 0x40d08, 0xd0d0d);
- outpdw(MDP_BASE + 0x40d0c, 0xd0d0d);
- outpdw(MDP_BASE + 0x40d10, 0xe0e0e);
- outpdw(MDP_BASE + 0x40d14, 0xe0e0e);
- outpdw(MDP_BASE + 0x40d18, 0xe0e0e);
- outpdw(MDP_BASE + 0x40d1c, 0xf0f0f);
- outpdw(MDP_BASE + 0x40d20, 0xf0f0f);
- outpdw(MDP_BASE + 0x40d24, 0x101010);
- outpdw(MDP_BASE + 0x40d28, 0x101010);
- outpdw(MDP_BASE + 0x40d2c, 0x111111);
- outpdw(MDP_BASE + 0x40d30, 0x111111);
- outpdw(MDP_BASE + 0x40d34, 0x121212);
- outpdw(MDP_BASE + 0x40d38, 0x121212);
- outpdw(MDP_BASE + 0x40d3c, 0x131313);
- outpdw(MDP_BASE + 0x40d40, 0x131313);
- outpdw(MDP_BASE + 0x40d44, 0x141414);
- outpdw(MDP_BASE + 0x40d48, 0x151515);
- outpdw(MDP_BASE + 0x40d4c, 0x151515);
- outpdw(MDP_BASE + 0x40d50, 0x161616);
- outpdw(MDP_BASE + 0x40d54, 0x161616);
- outpdw(MDP_BASE + 0x40d58, 0x171717);
- outpdw(MDP_BASE + 0x40d5c, 0x171717);
- outpdw(MDP_BASE + 0x40d60, 0x181818);
- outpdw(MDP_BASE + 0x40d64, 0x191919);
- outpdw(MDP_BASE + 0x40d68, 0x191919);
- outpdw(MDP_BASE + 0x40d6c, 0x1a1a1a);
- outpdw(MDP_BASE + 0x40d70, 0x1b1b1b);
- outpdw(MDP_BASE + 0x40d74, 0x1b1b1b);
- outpdw(MDP_BASE + 0x40d78, 0x1c1c1c);
- outpdw(MDP_BASE + 0x40d7c, 0x1c1c1c);
- outpdw(MDP_BASE + 0x40d80, 0x1d1d1d);
- outpdw(MDP_BASE + 0x40d84, 0x1e1e1e);
- outpdw(MDP_BASE + 0x40d88, 0x1f1f1f);
- outpdw(MDP_BASE + 0x40d8c, 0x1f1f1f);
- outpdw(MDP_BASE + 0x40d90, 0x202020);
- outpdw(MDP_BASE + 0x40d94, 0x212121);
- outpdw(MDP_BASE + 0x40d98, 0x212121);
- outpdw(MDP_BASE + 0x40d9c, 0x222222);
- outpdw(MDP_BASE + 0x40da0, 0x232323);
- outpdw(MDP_BASE + 0x40da4, 0x242424);
- outpdw(MDP_BASE + 0x40da8, 0x242424);
- outpdw(MDP_BASE + 0x40dac, 0x252525);
- outpdw(MDP_BASE + 0x40db0, 0x262626);
- outpdw(MDP_BASE + 0x40db4, 0x272727);
- outpdw(MDP_BASE + 0x40db8, 0x272727);
- outpdw(MDP_BASE + 0x40dbc, 0x282828);
- outpdw(MDP_BASE + 0x40dc0, 0x292929);
- outpdw(MDP_BASE + 0x40dc4, 0x2a2a2a);
- outpdw(MDP_BASE + 0x40dc8, 0x2b2b2b);
- outpdw(MDP_BASE + 0x40dcc, 0x2c2c2c);
- outpdw(MDP_BASE + 0x40dd0, 0x2c2c2c);
- outpdw(MDP_BASE + 0x40dd4, 0x2d2d2d);
- outpdw(MDP_BASE + 0x40dd8, 0x2e2e2e);
- outpdw(MDP_BASE + 0x40ddc, 0x2f2f2f);
- outpdw(MDP_BASE + 0x40de0, 0x303030);
- outpdw(MDP_BASE + 0x40de4, 0x313131);
- outpdw(MDP_BASE + 0x40de8, 0x323232);
- outpdw(MDP_BASE + 0x40dec, 0x333333);
- outpdw(MDP_BASE + 0x40df0, 0x333333);
- outpdw(MDP_BASE + 0x40df4, 0x343434);
- outpdw(MDP_BASE + 0x40df8, 0x353535);
- outpdw(MDP_BASE + 0x40dfc, 0x363636);
- outpdw(MDP_BASE + 0x40e00, 0x373737);
- outpdw(MDP_BASE + 0x40e04, 0x383838);
- outpdw(MDP_BASE + 0x40e08, 0x393939);
- outpdw(MDP_BASE + 0x40e0c, 0x3a3a3a);
- outpdw(MDP_BASE + 0x40e10, 0x3b3b3b);
- outpdw(MDP_BASE + 0x40e14, 0x3c3c3c);
- outpdw(MDP_BASE + 0x40e18, 0x3d3d3d);
- outpdw(MDP_BASE + 0x40e1c, 0x3e3e3e);
- outpdw(MDP_BASE + 0x40e20, 0x3f3f3f);
- outpdw(MDP_BASE + 0x40e24, 0x404040);
- outpdw(MDP_BASE + 0x40e28, 0x414141);
- outpdw(MDP_BASE + 0x40e2c, 0x424242);
- outpdw(MDP_BASE + 0x40e30, 0x434343);
- outpdw(MDP_BASE + 0x40e34, 0x444444);
- outpdw(MDP_BASE + 0x40e38, 0x464646);
- outpdw(MDP_BASE + 0x40e3c, 0x474747);
- outpdw(MDP_BASE + 0x40e40, 0x484848);
- outpdw(MDP_BASE + 0x40e44, 0x494949);
- outpdw(MDP_BASE + 0x40e48, 0x4a4a4a);
- outpdw(MDP_BASE + 0x40e4c, 0x4b4b4b);
- outpdw(MDP_BASE + 0x40e50, 0x4c4c4c);
- outpdw(MDP_BASE + 0x40e54, 0x4d4d4d);
- outpdw(MDP_BASE + 0x40e58, 0x4f4f4f);
- outpdw(MDP_BASE + 0x40e5c, 0x505050);
- outpdw(MDP_BASE + 0x40e60, 0x515151);
- outpdw(MDP_BASE + 0x40e64, 0x525252);
- outpdw(MDP_BASE + 0x40e68, 0x535353);
- outpdw(MDP_BASE + 0x40e6c, 0x545454);
- outpdw(MDP_BASE + 0x40e70, 0x565656);
- outpdw(MDP_BASE + 0x40e74, 0x575757);
- outpdw(MDP_BASE + 0x40e78, 0x585858);
- outpdw(MDP_BASE + 0x40e7c, 0x595959);
- outpdw(MDP_BASE + 0x40e80, 0x5b5b5b);
- outpdw(MDP_BASE + 0x40e84, 0x5c5c5c);
- outpdw(MDP_BASE + 0x40e88, 0x5d5d5d);
- outpdw(MDP_BASE + 0x40e8c, 0x5e5e5e);
- outpdw(MDP_BASE + 0x40e90, 0x606060);
- outpdw(MDP_BASE + 0x40e94, 0x616161);
- outpdw(MDP_BASE + 0x40e98, 0x626262);
- outpdw(MDP_BASE + 0x40e9c, 0x646464);
- outpdw(MDP_BASE + 0x40ea0, 0x656565);
- outpdw(MDP_BASE + 0x40ea4, 0x666666);
- outpdw(MDP_BASE + 0x40ea8, 0x686868);
- outpdw(MDP_BASE + 0x40eac, 0x696969);
- outpdw(MDP_BASE + 0x40eb0, 0x6a6a6a);
- outpdw(MDP_BASE + 0x40eb4, 0x6c6c6c);
- outpdw(MDP_BASE + 0x40eb8, 0x6d6d6d);
- outpdw(MDP_BASE + 0x40ebc, 0x6f6f6f);
- outpdw(MDP_BASE + 0x40ec0, 0x707070);
- outpdw(MDP_BASE + 0x40ec4, 0x717171);
- outpdw(MDP_BASE + 0x40ec8, 0x737373);
- outpdw(MDP_BASE + 0x40ecc, 0x747474);
- outpdw(MDP_BASE + 0x40ed0, 0x767676);
- outpdw(MDP_BASE + 0x40ed4, 0x777777);
- outpdw(MDP_BASE + 0x40ed8, 0x797979);
- outpdw(MDP_BASE + 0x40edc, 0x7a7a7a);
- outpdw(MDP_BASE + 0x40ee0, 0x7c7c7c);
- outpdw(MDP_BASE + 0x40ee4, 0x7d7d7d);
- outpdw(MDP_BASE + 0x40ee8, 0x7f7f7f);
- outpdw(MDP_BASE + 0x40eec, 0x808080);
- outpdw(MDP_BASE + 0x40ef0, 0x828282);
- outpdw(MDP_BASE + 0x40ef4, 0x838383);
- outpdw(MDP_BASE + 0x40ef8, 0x858585);
- outpdw(MDP_BASE + 0x40efc, 0x868686);
- outpdw(MDP_BASE + 0x40f00, 0x888888);
- outpdw(MDP_BASE + 0x40f04, 0x898989);
- outpdw(MDP_BASE + 0x40f08, 0x8b8b8b);
- outpdw(MDP_BASE + 0x40f0c, 0x8d8d8d);
- outpdw(MDP_BASE + 0x40f10, 0x8e8e8e);
- outpdw(MDP_BASE + 0x40f14, 0x909090);
- outpdw(MDP_BASE + 0x40f18, 0x919191);
- outpdw(MDP_BASE + 0x40f1c, 0x939393);
- outpdw(MDP_BASE + 0x40f20, 0x959595);
- outpdw(MDP_BASE + 0x40f24, 0x969696);
- outpdw(MDP_BASE + 0x40f28, 0x989898);
- outpdw(MDP_BASE + 0x40f2c, 0x9a9a9a);
- outpdw(MDP_BASE + 0x40f30, 0x9b9b9b);
- outpdw(MDP_BASE + 0x40f34, 0x9d9d9d);
- outpdw(MDP_BASE + 0x40f38, 0x9f9f9f);
- outpdw(MDP_BASE + 0x40f3c, 0xa1a1a1);
- outpdw(MDP_BASE + 0x40f40, 0xa2a2a2);
- outpdw(MDP_BASE + 0x40f44, 0xa4a4a4);
- outpdw(MDP_BASE + 0x40f48, 0xa6a6a6);
- outpdw(MDP_BASE + 0x40f4c, 0xa7a7a7);
- outpdw(MDP_BASE + 0x40f50, 0xa9a9a9);
- outpdw(MDP_BASE + 0x40f54, 0xababab);
- outpdw(MDP_BASE + 0x40f58, 0xadadad);
- outpdw(MDP_BASE + 0x40f5c, 0xafafaf);
- outpdw(MDP_BASE + 0x40f60, 0xb0b0b0);
- outpdw(MDP_BASE + 0x40f64, 0xb2b2b2);
- outpdw(MDP_BASE + 0x40f68, 0xb4b4b4);
- outpdw(MDP_BASE + 0x40f6c, 0xb6b6b6);
- outpdw(MDP_BASE + 0x40f70, 0xb8b8b8);
- outpdw(MDP_BASE + 0x40f74, 0xbababa);
- outpdw(MDP_BASE + 0x40f78, 0xbbbbbb);
- outpdw(MDP_BASE + 0x40f7c, 0xbdbdbd);
- outpdw(MDP_BASE + 0x40f80, 0xbfbfbf);
- outpdw(MDP_BASE + 0x40f84, 0xc1c1c1);
- outpdw(MDP_BASE + 0x40f88, 0xc3c3c3);
- outpdw(MDP_BASE + 0x40f8c, 0xc5c5c5);
- outpdw(MDP_BASE + 0x40f90, 0xc7c7c7);
- outpdw(MDP_BASE + 0x40f94, 0xc9c9c9);
- outpdw(MDP_BASE + 0x40f98, 0xcbcbcb);
- outpdw(MDP_BASE + 0x40f9c, 0xcdcdcd);
- outpdw(MDP_BASE + 0x40fa0, 0xcfcfcf);
- outpdw(MDP_BASE + 0x40fa4, 0xd1d1d1);
- outpdw(MDP_BASE + 0x40fa8, 0xd3d3d3);
- outpdw(MDP_BASE + 0x40fac, 0xd5d5d5);
- outpdw(MDP_BASE + 0x40fb0, 0xd7d7d7);
- outpdw(MDP_BASE + 0x40fb4, 0xd9d9d9);
- outpdw(MDP_BASE + 0x40fb8, 0xdbdbdb);
- outpdw(MDP_BASE + 0x40fbc, 0xdddddd);
- outpdw(MDP_BASE + 0x40fc0, 0xdfdfdf);
- outpdw(MDP_BASE + 0x40fc4, 0xe1e1e1);
- outpdw(MDP_BASE + 0x40fc8, 0xe3e3e3);
- outpdw(MDP_BASE + 0x40fcc, 0xe5e5e5);
- outpdw(MDP_BASE + 0x40fd0, 0xe7e7e7);
- outpdw(MDP_BASE + 0x40fd4, 0xe9e9e9);
- outpdw(MDP_BASE + 0x40fd8, 0xebebeb);
- outpdw(MDP_BASE + 0x40fdc, 0xeeeeee);
- outpdw(MDP_BASE + 0x40fe0, 0xf0f0f0);
- outpdw(MDP_BASE + 0x40fe4, 0xf2f2f2);
- outpdw(MDP_BASE + 0x40fe8, 0xf4f4f4);
- outpdw(MDP_BASE + 0x40fec, 0xf6f6f6);
- outpdw(MDP_BASE + 0x40ff0, 0xf8f8f8);
- outpdw(MDP_BASE + 0x40ff4, 0xfbfbfb);
- outpdw(MDP_BASE + 0x40ff8, 0xfdfdfd);
- outpdw(MDP_BASE + 0x40ffc, 0xffffff);
-}
-
-#define IRQ_EN_1__MDP_IRQ___M 0x00000800
-
-void mdp_hw_init(void)
-{
- int i;
-
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- /* debug interface write access */
- outpdw(MDP_BASE + 0x60, 1);
-
- outp32(MDP_INTR_ENABLE, MDP_ANY_INTR_MASK);
- outp32(MDP_EBI2_PORTMAP_MODE, 0x3);
- outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8, 0x0);
- outpdw(MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc, 0x0);
- outpdw(MDP_BASE + 0x60, 0x1);
- mdp_load_lut_param();
-
- /*
- * clear up unused fg/main registers
- */
- /* comp.plane 2&3 ystride */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0120, 0x0);
- /* unpacked pattern */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x012c, 0x0);
- /* unpacked pattern */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0130, 0x0);
- /* unpacked pattern */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0134, 0x0);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0158, 0x0);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x15c, 0x0);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0160, 0x0);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0170, 0x0);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0174, 0x0);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x017c, 0x0);
-
- /* comp.plane 2 */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0114, 0x0);
- /* comp.plane 3 */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0118, 0x0);
-
- /* clear up unused bg registers */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8, 0);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0, 0);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc, 0);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0, 0);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4, 0);
-
-#ifndef CONFIG_FB_MSM_MDP22
- MDP_OUTP(MDP_BASE + 0xE0000, 0);
- MDP_OUTP(MDP_BASE + 0x100, 0xffffffff);
- MDP_OUTP(MDP_BASE + 0x90070, 0);
- MDP_OUTP(MDP_BASE + 0x94010, 1);
- MDP_OUTP(MDP_BASE + 0x9401c, 2);
-#endif
-
- /*
- * limit vector
- * pre gets applied before color matrix conversion
- * post is after ccs
- */
- writel(mdp_plv[0], MDP_CSC_PRE_LV1n(0));
- writel(mdp_plv[1], MDP_CSC_PRE_LV1n(1));
- writel(mdp_plv[2], MDP_CSC_PRE_LV1n(2));
- writel(mdp_plv[3], MDP_CSC_PRE_LV1n(3));
-
-#ifdef CONFIG_FB_MSM_MDP31
- writel(mdp_plv[2], MDP_CSC_PRE_LV1n(4));
- writel(mdp_plv[3], MDP_CSC_PRE_LV1n(5));
-
- writel(0, MDP_CSC_POST_LV1n(0));
- writel(0xff, MDP_CSC_POST_LV1n(1));
- writel(0, MDP_CSC_POST_LV1n(2));
- writel(0xff, MDP_CSC_POST_LV1n(3));
- writel(0, MDP_CSC_POST_LV1n(4));
- writel(0xff, MDP_CSC_POST_LV1n(5));
-
- writel(0, MDP_CSC_PRE_LV2n(0));
- writel(0xff, MDP_CSC_PRE_LV2n(1));
- writel(0, MDP_CSC_PRE_LV2n(2));
- writel(0xff, MDP_CSC_PRE_LV2n(3));
- writel(0, MDP_CSC_PRE_LV2n(4));
- writel(0xff, MDP_CSC_PRE_LV2n(5));
-
- writel(mdp_plv[0], MDP_CSC_POST_LV2n(0));
- writel(mdp_plv[1], MDP_CSC_POST_LV2n(1));
- writel(mdp_plv[2], MDP_CSC_POST_LV2n(2));
- writel(mdp_plv[3], MDP_CSC_POST_LV2n(3));
- writel(mdp_plv[2], MDP_CSC_POST_LV2n(4));
- writel(mdp_plv[3], MDP_CSC_POST_LV2n(5));
-#endif
-
- /* primary forward matrix */
- for (i = 0; i < MDP_CCS_SIZE; i++)
- writel(mdp_ccs_rgb2yuv.ccs[i], MDP_CSC_PFMVn(i));
-
-#ifdef CONFIG_FB_MSM_MDP31
- for (i = 0; i < MDP_BV_SIZE; i++)
- writel(mdp_ccs_rgb2yuv.bv[i], MDP_CSC_POST_BV2n(i));
-
- writel(0, MDP_CSC_PRE_BV2n(0));
- writel(0, MDP_CSC_PRE_BV2n(1));
- writel(0, MDP_CSC_PRE_BV2n(2));
-#endif
- /* primary reverse matrix */
- for (i = 0; i < MDP_CCS_SIZE; i++)
- writel(mdp_ccs_yuv2rgb.ccs[i], MDP_CSC_PRMVn(i));
-
- for (i = 0; i < MDP_BV_SIZE; i++)
- writel(mdp_ccs_yuv2rgb.bv[i], MDP_CSC_PRE_BV1n(i));
-
-#ifdef CONFIG_FB_MSM_MDP31
- writel(0, MDP_CSC_POST_BV1n(0));
- writel(0, MDP_CSC_POST_BV1n(1));
- writel(0, MDP_CSC_POST_BV1n(2));
-
- outpdw(MDP_BASE + 0x30010, 0x03e0);
- outpdw(MDP_BASE + 0x30014, 0x0360);
- outpdw(MDP_BASE + 0x30018, 0x0120);
- outpdw(MDP_BASE + 0x3001c, 0x0140);
-#endif
- mdp_init_scale_table();
-
-#ifndef CONFIG_FB_MSM_MDP31
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0104,
- ((16 << 6) << 16) | (16) << 6);
-#endif
-
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-} \ No newline at end of file
diff --git a/drivers/staging/msm/mdp_ppp.c b/drivers/staging/msm/mdp_ppp.c
deleted file mode 100644
index c35a6aebca1..00000000000
--- a/drivers/staging/msm/mdp_ppp.c
+++ /dev/null
@@ -1,1502 +0,0 @@
-/* drivers/video/msm/src/drv/mdp/mdp_ppp.c
- *
- * Copyright (C) 2007 Google Incorporated
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-#include <msm_mdp.h>
-#include <linux/file.h>
-#include <linux/major.h>
-
-#include "linux/proc_fs.h"
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-
-#define MDP_IS_IMGTYPE_BAD(x) (((x) >= MDP_IMGTYPE_LIMIT) && \
- (((x) < MDP_IMGTYPE2_START) || \
- ((x) >= MDP_IMGTYPE_LIMIT2)))
-
-static uint32_t bytes_per_pixel[] = {
- [MDP_RGB_565] = 2,
- [MDP_RGB_888] = 3,
- [MDP_XRGB_8888] = 4,
- [MDP_ARGB_8888] = 4,
- [MDP_RGBA_8888] = 4,
- [MDP_BGRA_8888] = 4,
- [MDP_Y_CBCR_H2V1] = 1,
- [MDP_Y_CBCR_H2V2] = 1,
- [MDP_Y_CRCB_H2V1] = 1,
- [MDP_Y_CRCB_H2V2] = 1,
- [MDP_YCRYCB_H2V1] = 2,
- [MDP_BGR_565] = 2
-};
-
-extern uint32 mdp_plv[];
-extern struct semaphore mdp_ppp_mutex;
-
-uint32_t mdp_get_bytes_per_pixel(uint32_t format)
-{
- uint32_t bpp = 0;
- if (format < ARRAY_SIZE(bytes_per_pixel))
- bpp = bytes_per_pixel[format];
-
- BUG_ON(!bpp);
- return bpp;
-}
-
-static uint32 mdp_conv_matx_rgb2yuv(uint32 input_pixel,
- uint16 *matrix_and_bias_vector,
- uint32 *clamp_vector,
- uint32 *look_up_table)
-{
- uint8 input_C2, input_C0, input_C1;
- uint32 output;
- int32 comp_C2, comp_C1, comp_C0, temp;
- int32 temp1, temp2, temp3;
- int32 matrix[9];
- int32 bias_vector[3];
- int32 Y_low_limit, Y_high_limit, C_low_limit, C_high_limit;
- int32 i;
- uint32 _is_lookup_table_enabled;
-
- input_C2 = (input_pixel >> 16) & 0xFF;
- input_C1 = (input_pixel >> 8) & 0xFF;
- input_C0 = (input_pixel >> 0) & 0xFF;
-
- comp_C0 = input_C0;
- comp_C1 = input_C1;
- comp_C2 = input_C2;
-
- for (i = 0; i < 9; i++)
- matrix[i] =
- ((int32) (((int32) matrix_and_bias_vector[i]) << 20)) >> 20;
-
- bias_vector[0] = (int32) (matrix_and_bias_vector[9] & 0xFF);
- bias_vector[1] = (int32) (matrix_and_bias_vector[10] & 0xFF);
- bias_vector[2] = (int32) (matrix_and_bias_vector[11] & 0xFF);
-
- Y_low_limit = (int32) clamp_vector[0];
- Y_high_limit = (int32) clamp_vector[1];
- C_low_limit = (int32) clamp_vector[2];
- C_high_limit = (int32) clamp_vector[3];
-
- if (look_up_table == 0) /* check for NULL point */
- _is_lookup_table_enabled = 0;
- else
- _is_lookup_table_enabled = 1;
-
- if (_is_lookup_table_enabled == 1) {
- comp_C2 = (look_up_table[comp_C2] >> 16) & 0xFF;
- comp_C1 = (look_up_table[comp_C1] >> 8) & 0xFF;
- comp_C0 = (look_up_table[comp_C0] >> 0) & 0xFF;
- }
- /*
- * Color Conversion
- * reorder input colors
- */
- temp = comp_C2;
- comp_C2 = comp_C1;
- comp_C1 = comp_C0;
- comp_C0 = temp;
-
- /* matrix multiplication */
- temp1 = comp_C0 * matrix[0] + comp_C1 * matrix[1] + comp_C2 * matrix[2];
- temp2 = comp_C0 * matrix[3] + comp_C1 * matrix[4] + comp_C2 * matrix[5];
- temp3 = comp_C0 * matrix[6] + comp_C1 * matrix[7] + comp_C2 * matrix[8];
-
- comp_C0 = temp1 + 0x100;
- comp_C1 = temp2 + 0x100;
- comp_C2 = temp3 + 0x100;
-
- /* take interger part */
- comp_C0 >>= 9;
- comp_C1 >>= 9;
- comp_C2 >>= 9;
-
- /* post bias (+) */
- comp_C0 += bias_vector[0];
- comp_C1 += bias_vector[1];
- comp_C2 += bias_vector[2];
-
- /* limit pixel to 8-bit */
- if (comp_C0 < 0)
- comp_C0 = 0;
-
- if (comp_C0 > 255)
- comp_C0 = 255;
-
- if (comp_C1 < 0)
- comp_C1 = 0;
-
- if (comp_C1 > 255)
- comp_C1 = 255;
-
- if (comp_C2 < 0)
- comp_C2 = 0;
-
- if (comp_C2 > 255)
- comp_C2 = 255;
-
- /* clamp */
- if (comp_C0 < Y_low_limit)
- comp_C0 = Y_low_limit;
-
- if (comp_C0 > Y_high_limit)
- comp_C0 = Y_high_limit;
-
- if (comp_C1 < C_low_limit)
- comp_C1 = C_low_limit;
-
- if (comp_C1 > C_high_limit)
- comp_C1 = C_high_limit;
-
- if (comp_C2 < C_low_limit)
- comp_C2 = C_low_limit;
-
- if (comp_C2 > C_high_limit)
- comp_C2 = C_high_limit;
-
- output = (comp_C2 << 16) | (comp_C1 << 8) | comp_C0;
- return output;
-}
-
-uint32 mdp_conv_matx_yuv2rgb(uint32 input_pixel,
- uint16 *matrix_and_bias_vector,
- uint32 *clamp_vector, uint32 *look_up_table)
-{
- uint8 input_C2, input_C0, input_C1;
- uint32 output;
- int32 comp_C2, comp_C1, comp_C0, temp;
- int32 temp1, temp2, temp3;
- int32 matrix[9];
- int32 bias_vector[3];
- int32 Y_low_limit, Y_high_limit, C_low_limit, C_high_limit;
- int32 i;
- uint32 _is_lookup_table_enabled;
-
- input_C2 = (input_pixel >> 16) & 0xFF;
- input_C1 = (input_pixel >> 8) & 0xFF;
- input_C0 = (input_pixel >> 0) & 0xFF;
-
- comp_C0 = input_C0;
- comp_C1 = input_C1;
- comp_C2 = input_C2;
-
- for (i = 0; i < 9; i++)
- matrix[i] =
- ((int32) (((int32) matrix_and_bias_vector[i]) << 20)) >> 20;
-
- bias_vector[0] = (int32) (matrix_and_bias_vector[9] & 0xFF);
- bias_vector[1] = (int32) (matrix_and_bias_vector[10] & 0xFF);
- bias_vector[2] = (int32) (matrix_and_bias_vector[11] & 0xFF);
-
- Y_low_limit = (int32) clamp_vector[0];
- Y_high_limit = (int32) clamp_vector[1];
- C_low_limit = (int32) clamp_vector[2];
- C_high_limit = (int32) clamp_vector[3];
-
- if (look_up_table == 0) /* check for NULL point */
- _is_lookup_table_enabled = 0;
- else
- _is_lookup_table_enabled = 1;
-
- /* clamp */
- if (comp_C0 < Y_low_limit)
- comp_C0 = Y_low_limit;
-
- if (comp_C0 > Y_high_limit)
- comp_C0 = Y_high_limit;
-
- if (comp_C1 < C_low_limit)
- comp_C1 = C_low_limit;
-
- if (comp_C1 > C_high_limit)
- comp_C1 = C_high_limit;
-
- if (comp_C2 < C_low_limit)
- comp_C2 = C_low_limit;
-
- if (comp_C2 > C_high_limit)
- comp_C2 = C_high_limit;
-
- /*
- * Color Conversion
- * pre bias (-)
- */
- comp_C0 -= bias_vector[0];
- comp_C1 -= bias_vector[1];
- comp_C2 -= bias_vector[2];
-
- /* matrix multiplication */
- temp1 = comp_C0 * matrix[0] + comp_C1 * matrix[1] + comp_C2 * matrix[2];
- temp2 = comp_C0 * matrix[3] + comp_C1 * matrix[4] + comp_C2 * matrix[5];
- temp3 = comp_C0 * matrix[6] + comp_C1 * matrix[7] + comp_C2 * matrix[8];
-
- comp_C0 = temp1 + 0x100;
- comp_C1 = temp2 + 0x100;
- comp_C2 = temp3 + 0x100;
-
- /* take interger part */
- comp_C0 >>= 9;
- comp_C1 >>= 9;
- comp_C2 >>= 9;
-
- /* reorder output colors */
- temp = comp_C0;
- comp_C0 = comp_C1;
- comp_C1 = comp_C2;
- comp_C2 = temp;
-
- /* limit pixel to 8-bit */
- if (comp_C0 < 0)
- comp_C0 = 0;
-
- if (comp_C0 > 255)
- comp_C0 = 255;
-
- if (comp_C1 < 0)
- comp_C1 = 0;
-
- if (comp_C1 > 255)
- comp_C1 = 255;
-
- if (comp_C2 < 0)
- comp_C2 = 0;
-
- if (comp_C2 > 255)
- comp_C2 = 255;
-
- /* Look-up table */
- if (_is_lookup_table_enabled == 1) {
- comp_C2 = (look_up_table[comp_C2] >> 16) & 0xFF;
- comp_C1 = (look_up_table[comp_C1] >> 8) & 0xFF;
- comp_C0 = (look_up_table[comp_C0] >> 0) & 0xFF;
- }
-
- output = (comp_C2 << 16) | (comp_C1 << 8) | comp_C0;
- return output;
-}
-
-static uint32 mdp_calc_tpval(MDPIMG *mdpImg)
-{
- uint32 tpVal;
- uint8 plane_tp;
-
- tpVal = 0;
- if ((mdpImg->imgType == MDP_RGB_565)
- || (mdpImg->imgType == MDP_BGR_565)) {
- /*
- * transparent color conversion into 24 bpp
- *
- * C2R_8BIT
- * left shift the entire bit and or it with the upper most bits
- */
- plane_tp = (uint8) ((mdpImg->tpVal & 0xF800) >> 11);
- tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 16;
-
- /* C1B_8BIT */
- plane_tp = (uint8) (mdpImg->tpVal & 0x1F);
- tpVal |= ((plane_tp << 3) | ((plane_tp & 0x1C) >> 2)) << 8;
-
- /* C0G_8BIT */
- plane_tp = (uint8) ((mdpImg->tpVal & 0x7E0) >> 5);
- tpVal |= ((plane_tp << 2) | ((plane_tp & 0x30) >> 4));
- } else {
- /* 24bit RGB to RBG conversion */
-
- tpVal = (mdpImg->tpVal & 0xFF00) >> 8;
- tpVal |= (mdpImg->tpVal & 0xFF) << 8;
- tpVal |= (mdpImg->tpVal & 0xFF0000);
- }
-
- return tpVal;
-}
-
-static uint8 *mdp_get_chroma_addr(MDPIBUF *iBuf)
-{
- uint8 *dest1;
-
- dest1 = NULL;
- switch (iBuf->ibuf_type) {
- case MDP_Y_CBCR_H2V2:
- case MDP_Y_CRCB_H2V2:
- case MDP_Y_CBCR_H2V1:
- case MDP_Y_CRCB_H2V1:
- dest1 = (uint8 *) iBuf->buf;
- dest1 += iBuf->ibuf_width * iBuf->ibuf_height * iBuf->bpp;
- break;
-
- default:
- break;
- }
-
- return dest1;
-}
-
-static void mdp_ppp_setbg(MDPIBUF *iBuf)
-{
- uint8 *bg0_addr;
- uint8 *bg1_addr;
- uint32 bg0_ystride, bg1_ystride;
- uint32 ppp_src_cfg_reg, unpack_pattern;
- int v_slice, h_slice;
-
- v_slice = h_slice = 1;
- bg0_addr = (uint8 *) iBuf->buf;
- bg1_addr = mdp_get_chroma_addr(iBuf);
-
- bg0_ystride = iBuf->ibuf_width * iBuf->bpp;
- bg1_ystride = iBuf->ibuf_width * iBuf->bpp;
-
- switch (iBuf->ibuf_type) {
- case MDP_BGR_565:
- case MDP_RGB_565:
- /* 888 = 3bytes
- * RGB = 3Components
- * RGB interleaved
- */
- ppp_src_cfg_reg = PPP_SRC_C2R_5BITS | PPP_SRC_C0G_6BITS |
- PPP_SRC_C1B_5BITS | PPP_SRC_BPP_INTERLVD_2BYTES |
- PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT |
- PPP_SRC_UNPACK_ALIGN_LSB |
- PPP_SRC_FETCH_PLANES_INTERLVD;
-
- if (iBuf->ibuf_type == MDP_RGB_565)
- unpack_pattern =
- MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
- else
- unpack_pattern =
- MDP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8);
- break;
-
- case MDP_RGB_888:
- /*
- * 888 = 3bytes
- * RGB = 3Components
- * RGB interleaved
- */
- ppp_src_cfg_reg = PPP_SRC_C2R_8BITS | PPP_SRC_C0G_8BITS |
- PPP_SRC_C1B_8BITS | PPP_SRC_BPP_INTERLVD_3BYTES |
- PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT |
- PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_INTERLVD;
-
- unpack_pattern =
- MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
- break;
-
- case MDP_BGRA_8888:
- case MDP_RGBA_8888:
- case MDP_ARGB_8888:
- case MDP_XRGB_8888:
- /*
- * 8888 = 4bytes
- * ARGB = 4Components
- * ARGB interleaved
- */
- ppp_src_cfg_reg = PPP_SRC_C2R_8BITS | PPP_SRC_C0G_8BITS |
- PPP_SRC_C1B_8BITS | PPP_SRC_C3A_8BITS | PPP_SRC_C3_ALPHA_EN |
- PPP_SRC_BPP_INTERLVD_4BYTES | PPP_SRC_INTERLVD_4COMPONENTS |
- PPP_SRC_UNPACK_TIGHT | PPP_SRC_UNPACK_ALIGN_LSB |
- PPP_SRC_FETCH_PLANES_INTERLVD;
-
- if (iBuf->ibuf_type == MDP_BGRA_8888)
- unpack_pattern =
- MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
- 8);
- else if (iBuf->ibuf_type == MDP_RGBA_8888)
- unpack_pattern =
- MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R,
- 8);
- else
- unpack_pattern =
- MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
- 8);
- break;
-
- case MDP_Y_CBCR_H2V2:
- case MDP_Y_CRCB_H2V2:
- ppp_src_cfg_reg = PPP_SRC_C2R_8BITS |
- PPP_SRC_C0G_8BITS |
- PPP_SRC_C1B_8BITS |
- PPP_SRC_C3A_8BITS |
- PPP_SRC_BPP_INTERLVD_2BYTES |
- PPP_SRC_INTERLVD_2COMPONENTS |
- PPP_SRC_UNPACK_TIGHT |
- PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_PSEUDOPLNR;
-
- if (iBuf->ibuf_type == MDP_Y_CBCR_H2V1)
- unpack_pattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
- else
- unpack_pattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
- v_slice = h_slice = 2;
- break;
-
- case MDP_YCRYCB_H2V1:
- ppp_src_cfg_reg = PPP_SRC_C2R_8BITS |
- PPP_SRC_C0G_8BITS |
- PPP_SRC_C1B_8BITS |
- PPP_SRC_C3A_8BITS |
- PPP_SRC_BPP_INTERLVD_2BYTES |
- PPP_SRC_INTERLVD_4COMPONENTS |
- PPP_SRC_UNPACK_TIGHT | PPP_SRC_UNPACK_ALIGN_LSB;
-
- unpack_pattern =
- MDP_GET_PACK_PATTERN(CLR_Y, CLR_CR, CLR_Y, CLR_CB, 8);
- h_slice = 2;
- break;
-
- case MDP_Y_CBCR_H2V1:
- case MDP_Y_CRCB_H2V1:
- ppp_src_cfg_reg = PPP_SRC_C2R_8BITS |
- PPP_SRC_C0G_8BITS |
- PPP_SRC_C1B_8BITS |
- PPP_SRC_C3A_8BITS |
- PPP_SRC_BPP_INTERLVD_2BYTES |
- PPP_SRC_INTERLVD_2COMPONENTS |
- PPP_SRC_UNPACK_TIGHT |
- PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_PSEUDOPLNR;
-
- if (iBuf->ibuf_type == MDP_Y_CBCR_H2V1)
- unpack_pattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
- else
- unpack_pattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
- h_slice = 2;
- break;
-
- default:
- return;
- }
-
- /* starting input address adjustment */
- mdp_adjust_start_addr(&bg0_addr, &bg1_addr, v_slice, h_slice,
- iBuf->roi.lcd_x, iBuf->roi.lcd_y,
- iBuf->ibuf_width, iBuf->ibuf_height, iBuf->bpp,
- iBuf, 1);
-
- /*
- * 0x01c0: background plane 0 addr
- * 0x01c4: background plane 1 addr
- * 0x01c8: background plane 2 addr
- * 0x01cc: bg y stride for plane 0 and 1
- * 0x01d0: bg y stride for plane 2
- * 0x01d4: bg src PPP config
- * 0x01d8: unpack pattern
- */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01c0, bg0_addr);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01c4, bg1_addr);
-
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01cc,
- (bg1_ystride << 16) | bg0_ystride);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01d4, ppp_src_cfg_reg);
-
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01d8, unpack_pattern);
-}
-
-#define IS_PSEUDOPLNR(img) ((img == MDP_Y_CRCB_H2V2) | \
- (img == MDP_Y_CBCR_H2V2) | \
- (img == MDP_Y_CRCB_H2V1) | \
- (img == MDP_Y_CBCR_H2V1))
-
-#define IMG_LEN(rect_h, w, rect_w, bpp) (((rect_h) * w) * bpp)
-
-#define Y_TO_CRCB_RATIO(format) \
- ((format == MDP_Y_CBCR_H2V2 || format == MDP_Y_CRCB_H2V2) ? 2 :\
- (format == MDP_Y_CBCR_H2V1 || format == MDP_Y_CRCB_H2V1) ? 1 : 1)
-
-static void get_len(struct mdp_img *img, struct mdp_rect *rect, uint32_t bpp,
- uint32_t *len0, uint32_t *len1)
-{
- *len0 = IMG_LEN(rect->h, img->width, rect->w, bpp);
- if (IS_PSEUDOPLNR(img->format))
- *len1 = *len0/Y_TO_CRCB_RATIO(img->format);
- else
- *len1 = 0;
-}
-
-static void flush_imgs(struct mdp_blit_req *req, int src_bpp, int dst_bpp,
- struct file *p_src_file, struct file *p_dst_file)
-{
-#ifdef CONFIG_ANDROID_PMEM
- uint32_t src0_len, src1_len, dst0_len, dst1_len;
-
- /* flush src images to memory before dma to mdp */
- get_len(&req->src, &req->src_rect, src_bpp,
- &src0_len, &src1_len);
-
- flush_pmem_file(p_src_file,
- req->src.offset, src0_len);
-
- if (IS_PSEUDOPLNR(req->src.format))
- flush_pmem_file(p_src_file,
- req->src.offset + src0_len, src1_len);
-
- get_len(&req->dst, &req->dst_rect, dst_bpp, &dst0_len, &dst1_len);
- flush_pmem_file(p_dst_file, req->dst.offset, dst0_len);
-
- if (IS_PSEUDOPLNR(req->dst.format))
- flush_pmem_file(p_dst_file,
- req->dst.offset + dst0_len, dst1_len);
-#endif
-}
-
-static void mdp_start_ppp(struct msm_fb_data_type *mfd, MDPIBUF *iBuf,
-struct mdp_blit_req *req, struct file *p_src_file, struct file *p_dst_file)
-{
- uint8 *src0, *src1;
- uint8 *dest0, *dest1;
- uint16 inpBpp;
- uint32 dest0_ystride;
- uint32 src_width;
- uint32 src_height;
- uint32 src0_ystride;
- uint32 dst_roi_width;
- uint32 dst_roi_height;
- uint32 ppp_src_cfg_reg, ppp_operation_reg, ppp_dst_cfg_reg;
- uint32 alpha, tpVal;
- uint32 packPattern;
- uint32 dst_packPattern;
- boolean inputRGB, outputRGB, pseudoplanr_output;
- int sv_slice, sh_slice;
- int dv_slice, dh_slice;
- boolean perPixelAlpha = FALSE;
- boolean ppp_lookUp_enable = FALSE;
-
- sv_slice = sh_slice = dv_slice = dh_slice = 1;
- alpha = tpVal = 0;
- src_width = iBuf->mdpImg.width;
- src_height = iBuf->roi.y + iBuf->roi.height;
- src1 = NULL;
- dest1 = NULL;
-
- inputRGB = outputRGB = TRUE;
- pseudoplanr_output = FALSE;
- ppp_operation_reg = 0;
- ppp_dst_cfg_reg = 0;
- ppp_src_cfg_reg = 0;
-
- /* Wait for the pipe to clear */
- do { } while (mdp_ppp_pipe_wait() <= 0);
-
- /*
- * destination config
- */
- switch (iBuf->ibuf_type) {
- case MDP_RGB_888:
- dst_packPattern =
- MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
- ppp_dst_cfg_reg =
- PPP_DST_C0G_8BIT | PPP_DST_C1B_8BIT | PPP_DST_C2R_8BIT |
- PPP_DST_PACKET_CNT_INTERLVD_3ELEM | PPP_DST_PACK_TIGHT |
- PPP_DST_PACK_ALIGN_LSB | PPP_DST_OUT_SEL_AXI |
- PPP_DST_BPP_3BYTES | PPP_DST_PLANE_INTERLVD;
- break;
-
- case MDP_XRGB_8888:
- case MDP_ARGB_8888:
- case MDP_RGBA_8888:
- if (iBuf->ibuf_type == MDP_BGRA_8888)
- dst_packPattern =
- MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
- 8);
- else if (iBuf->ibuf_type == MDP_RGBA_8888)
- dst_packPattern =
- MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R,
- 8);
- else
- dst_packPattern =
- MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
- 8);
-
- ppp_dst_cfg_reg = PPP_DST_C0G_8BIT |
- PPP_DST_C1B_8BIT |
- PPP_DST_C2R_8BIT |
- PPP_DST_C3A_8BIT |
- PPP_DST_C3ALPHA_EN |
- PPP_DST_PACKET_CNT_INTERLVD_4ELEM |
- PPP_DST_PACK_TIGHT |
- PPP_DST_PACK_ALIGN_LSB |
- PPP_DST_OUT_SEL_AXI |
- PPP_DST_BPP_4BYTES | PPP_DST_PLANE_INTERLVD;
- break;
-
- case MDP_Y_CBCR_H2V2:
- case MDP_Y_CRCB_H2V2:
- if (iBuf->ibuf_type == MDP_Y_CBCR_H2V2)
- dst_packPattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
- else
- dst_packPattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
-
- ppp_dst_cfg_reg = PPP_DST_C2R_8BIT |
- PPP_DST_C0G_8BIT |
- PPP_DST_C1B_8BIT |
- PPP_DST_C3A_8BIT |
- PPP_DST_PACKET_CNT_INTERLVD_2ELEM |
- PPP_DST_PACK_TIGHT |
- PPP_DST_PACK_ALIGN_LSB |
- PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES;
-
- ppp_operation_reg |= PPP_OP_DST_CHROMA_420;
- outputRGB = FALSE;
- pseudoplanr_output = TRUE;
- /*
- * vertically (y direction) and horizontally (x direction)
- * sample reduction by 2
- */
-
- /*
- * H2V2(YUV420) Cosite
- *
- * Y Y Y Y
- * CbCr CbCr
- * Y Y Y Y
- * Y Y Y Y
- * CbCr CbCr
- * Y Y Y Y
- */
- dv_slice = dh_slice = 2;
-
- /* (x,y) and (width,height) must be even numbern */
- iBuf->roi.lcd_x = (iBuf->roi.lcd_x / 2) * 2;
- iBuf->roi.dst_width = (iBuf->roi.dst_width / 2) * 2;
- iBuf->roi.x = (iBuf->roi.x / 2) * 2;
- iBuf->roi.width = (iBuf->roi.width / 2) * 2;
-
- iBuf->roi.lcd_y = (iBuf->roi.lcd_y / 2) * 2;
- iBuf->roi.dst_height = (iBuf->roi.dst_height / 2) * 2;
- iBuf->roi.y = (iBuf->roi.y / 2) * 2;
- iBuf->roi.height = (iBuf->roi.height / 2) * 2;
- break;
-
- case MDP_YCRYCB_H2V1:
- dst_packPattern =
- MDP_GET_PACK_PATTERN(CLR_Y, CLR_CR, CLR_Y, CLR_CB, 8);
- ppp_dst_cfg_reg =
- PPP_DST_C2R_8BIT | PPP_DST_C0G_8BIT | PPP_DST_C1B_8BIT |
- PPP_DST_C3A_8BIT | PPP_DST_PACKET_CNT_INTERLVD_4ELEM |
- PPP_DST_PACK_TIGHT | PPP_DST_PACK_ALIGN_LSB |
- PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES |
- PPP_DST_PLANE_INTERLVD;
-
- ppp_operation_reg |= PPP_OP_DST_CHROMA_H2V1;
- outputRGB = FALSE;
- /*
- * horizontally (x direction) sample reduction by 2
- *
- * H2V1(YUV422) Cosite
- *
- * YCbCr Y YCbCr Y
- * YCbCr Y YCbCr Y
- * YCbCr Y YCbCr Y
- * YCbCr Y YCbCr Y
- */
- dh_slice = 2;
-
- /*
- * if it's TV-Out/MDP_YCRYCB_H2V1, let's go through the
- * preloaded gamma setting of 2.2 when the content is
- * non-linear ppp_lookUp_enable = TRUE;
- */
-
- /* x and width must be even number */
- iBuf->roi.lcd_x = (iBuf->roi.lcd_x / 2) * 2;
- iBuf->roi.dst_width = (iBuf->roi.dst_width / 2) * 2;
- iBuf->roi.x = (iBuf->roi.x / 2) * 2;
- iBuf->roi.width = (iBuf->roi.width / 2) * 2;
- break;
-
- case MDP_Y_CBCR_H2V1:
- case MDP_Y_CRCB_H2V1:
- if (iBuf->ibuf_type == MDP_Y_CBCR_H2V1)
- dst_packPattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
- else
- dst_packPattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
-
- ppp_dst_cfg_reg = PPP_DST_C2R_8BIT |
- PPP_DST_C0G_8BIT |
- PPP_DST_C1B_8BIT |
- PPP_DST_C3A_8BIT |
- PPP_DST_PACKET_CNT_INTERLVD_2ELEM |
- PPP_DST_PACK_TIGHT |
- PPP_DST_PACK_ALIGN_LSB |
- PPP_DST_OUT_SEL_AXI | PPP_DST_BPP_2BYTES;
-
- ppp_operation_reg |= PPP_OP_DST_CHROMA_H2V1;
- outputRGB = FALSE;
- pseudoplanr_output = TRUE;
- /* horizontally (x direction) sample reduction by 2 */
- dh_slice = 2;
-
- /* x and width must be even number */
- iBuf->roi.lcd_x = (iBuf->roi.lcd_x / 2) * 2;
- iBuf->roi.dst_width = (iBuf->roi.dst_width / 2) * 2;
- iBuf->roi.x = (iBuf->roi.x / 2) * 2;
- iBuf->roi.width = (iBuf->roi.width / 2) * 2;
- break;
-
- case MDP_BGR_565:
- case MDP_RGB_565:
- default:
- if (iBuf->ibuf_type == MDP_RGB_565)
- dst_packPattern =
- MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
- else
- dst_packPattern =
- MDP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8);
-
- ppp_dst_cfg_reg = PPP_DST_C0G_6BIT |
- PPP_DST_C1B_5BIT |
- PPP_DST_C2R_5BIT |
- PPP_DST_PACKET_CNT_INTERLVD_3ELEM |
- PPP_DST_PACK_TIGHT |
- PPP_DST_PACK_ALIGN_LSB |
- PPP_DST_OUT_SEL_AXI |
- PPP_DST_BPP_2BYTES | PPP_DST_PLANE_INTERLVD;
- break;
- }
-
- /* source config */
- switch (iBuf->mdpImg.imgType) {
- case MDP_RGB_888:
- inpBpp = 3;
- /*
- * 565 = 2bytes
- * RGB = 3Components
- * RGB interleaved
- */
- ppp_src_cfg_reg = PPP_SRC_C2R_8BITS | PPP_SRC_C0G_8BITS |
- PPP_SRC_C1B_8BITS | PPP_SRC_BPP_INTERLVD_3BYTES |
- PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT |
- PPP_SRC_UNPACK_ALIGN_LSB |
- PPP_SRC_FETCH_PLANES_INTERLVD;
-
- packPattern = MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
-
- ppp_operation_reg |= PPP_OP_COLOR_SPACE_RGB |
- PPP_OP_SRC_CHROMA_RGB | PPP_OP_DST_CHROMA_RGB;
- break;
-
- case MDP_BGRA_8888:
- case MDP_RGBA_8888:
- case MDP_ARGB_8888:
- perPixelAlpha = TRUE;
- case MDP_XRGB_8888:
- inpBpp = 4;
- /*
- * 8888 = 4bytes
- * ARGB = 4Components
- * ARGB interleaved
- */
- ppp_src_cfg_reg = PPP_SRC_C2R_8BITS | PPP_SRC_C0G_8BITS |
- PPP_SRC_C1B_8BITS | PPP_SRC_C3A_8BITS |
- PPP_SRC_C3_ALPHA_EN | PPP_SRC_BPP_INTERLVD_4BYTES |
- PPP_SRC_INTERLVD_4COMPONENTS | PPP_SRC_UNPACK_TIGHT |
- PPP_SRC_UNPACK_ALIGN_LSB |
- PPP_SRC_FETCH_PLANES_INTERLVD;
-
- if (iBuf->mdpImg.imgType == MDP_BGRA_8888)
- packPattern =
- MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
- 8);
- else if (iBuf->mdpImg.imgType == MDP_RGBA_8888)
- packPattern =
- MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R,
- 8);
- else
- packPattern =
- MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B,
- 8);
-
- ppp_operation_reg |= PPP_OP_COLOR_SPACE_RGB |
- PPP_OP_SRC_CHROMA_RGB | PPP_OP_DST_CHROMA_RGB;
- break;
-
- case MDP_Y_CBCR_H2V2:
- case MDP_Y_CRCB_H2V2:
- inpBpp = 1;
- src1 = (uint8 *) iBuf->mdpImg.cbcr_addr;
-
- /*
- * CbCr = 2bytes
- * CbCr = 2Components
- * Y+CbCr
- */
- ppp_src_cfg_reg = PPP_SRC_C2R_8BITS | PPP_SRC_C0G_8BITS |
- PPP_SRC_C1B_8BITS | PPP_SRC_BPP_INTERLVD_2BYTES |
- PPP_SRC_INTERLVD_2COMPONENTS | PPP_SRC_UNPACK_TIGHT |
- PPP_SRC_UNPACK_ALIGN_LSB |
- PPP_SRC_FETCH_PLANES_PSEUDOPLNR;
-
- if (iBuf->mdpImg.imgType == MDP_Y_CRCB_H2V2)
- packPattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
- else
- packPattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
-
- ppp_operation_reg |= PPP_OP_COLOR_SPACE_YCBCR |
- PPP_OP_SRC_CHROMA_420 |
- PPP_OP_SRC_CHROMA_COSITE |
- PPP_OP_DST_CHROMA_RGB | PPP_OP_DST_CHROMA_COSITE;
-
- inputRGB = FALSE;
- sh_slice = sv_slice = 2;
- break;
-
- case MDP_YCRYCB_H2V1:
- inpBpp = 2;
- ppp_src_cfg_reg = PPP_SRC_C2R_8BITS |
- PPP_SRC_C0G_8BITS |
- PPP_SRC_C1B_8BITS |
- PPP_SRC_C3A_8BITS |
- PPP_SRC_BPP_INTERLVD_2BYTES |
- PPP_SRC_INTERLVD_4COMPONENTS |
- PPP_SRC_UNPACK_TIGHT | PPP_SRC_UNPACK_ALIGN_LSB;
-
- packPattern =
- MDP_GET_PACK_PATTERN(CLR_Y, CLR_CR, CLR_Y, CLR_CB, 8);
-
- ppp_operation_reg |= PPP_OP_SRC_CHROMA_H2V1 |
- PPP_OP_SRC_CHROMA_COSITE | PPP_OP_DST_CHROMA_COSITE;
-
- /*
- * if it's TV-Out/MDP_YCRYCB_H2V1, let's go through the
- * preloaded inverse gamma setting of 2.2 since they're
- * symetric when the content is non-linear
- * ppp_lookUp_enable = TRUE;
- */
-
- /* x and width must be even number */
- iBuf->roi.lcd_x = (iBuf->roi.lcd_x / 2) * 2;
- iBuf->roi.dst_width = (iBuf->roi.dst_width / 2) * 2;
- iBuf->roi.x = (iBuf->roi.x / 2) * 2;
- iBuf->roi.width = (iBuf->roi.width / 2) * 2;
-
- inputRGB = FALSE;
- sh_slice = 2;
- break;
-
- case MDP_Y_CBCR_H2V1:
- case MDP_Y_CRCB_H2V1:
- inpBpp = 1;
- src1 = (uint8 *) iBuf->mdpImg.cbcr_addr;
-
- ppp_src_cfg_reg = PPP_SRC_C2R_8BITS |
- PPP_SRC_C0G_8BITS |
- PPP_SRC_C1B_8BITS |
- PPP_SRC_C3A_8BITS |
- PPP_SRC_BPP_INTERLVD_2BYTES |
- PPP_SRC_INTERLVD_2COMPONENTS |
- PPP_SRC_UNPACK_TIGHT |
- PPP_SRC_UNPACK_ALIGN_LSB | PPP_SRC_FETCH_PLANES_PSEUDOPLNR;
-
- if (iBuf->mdpImg.imgType == MDP_Y_CBCR_H2V1)
- packPattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8);
- else
- packPattern =
- MDP_GET_PACK_PATTERN(0, 0, CLR_CR, CLR_CB, 8);
-
- ppp_operation_reg |= PPP_OP_SRC_CHROMA_H2V1 |
- PPP_OP_SRC_CHROMA_COSITE | PPP_OP_DST_CHROMA_COSITE;
- inputRGB = FALSE;
- sh_slice = 2;
- break;
-
- case MDP_BGR_565:
- case MDP_RGB_565:
- default:
- inpBpp = 2;
- /*
- * 565 = 2bytes
- * RGB = 3Components
- * RGB interleaved
- */
- ppp_src_cfg_reg = PPP_SRC_C2R_5BITS | PPP_SRC_C0G_6BITS |
- PPP_SRC_C1B_5BITS | PPP_SRC_BPP_INTERLVD_2BYTES |
- PPP_SRC_INTERLVD_3COMPONENTS | PPP_SRC_UNPACK_TIGHT |
- PPP_SRC_UNPACK_ALIGN_LSB |
- PPP_SRC_FETCH_PLANES_INTERLVD;
-
- if (iBuf->mdpImg.imgType == MDP_RGB_565)
- packPattern =
- MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8);
- else
- packPattern =
- MDP_GET_PACK_PATTERN(0, CLR_B, CLR_G, CLR_R, 8);
-
- ppp_operation_reg |= PPP_OP_COLOR_SPACE_RGB |
- PPP_OP_SRC_CHROMA_RGB | PPP_OP_DST_CHROMA_RGB;
- break;
-
- }
-
- if (pseudoplanr_output)
- ppp_dst_cfg_reg |= PPP_DST_PLANE_PSEUDOPLN;
-
- /* YCbCr to RGB color conversion flag */
- if ((!inputRGB) && (outputRGB)) {
- ppp_operation_reg |= PPP_OP_CONVERT_YCBCR2RGB |
- PPP_OP_CONVERT_ON;
-
- /*
- * primary/secondary is sort of misleading term...but
- * in mdp2.2/3.0 we only use primary matrix (forward/rev)
- * in mdp3.1 we use set1(prim) and set2(secd)
- */
-#ifdef CONFIG_FB_MSM_MDP31
- ppp_operation_reg |= PPP_OP_CONVERT_MATRIX_SECONDARY |
- PPP_OP_DST_RGB;
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0240, 0);
-#endif
-
- if (ppp_lookUp_enable) {
- ppp_operation_reg |= PPP_OP_LUT_C0_ON |
- PPP_OP_LUT_C1_ON | PPP_OP_LUT_C2_ON;
- }
- }
- /* RGB to YCbCr color conversion flag */
- if ((inputRGB) && (!outputRGB)) {
- ppp_operation_reg |= PPP_OP_CONVERT_RGB2YCBCR |
- PPP_OP_CONVERT_ON;
-
-#ifdef CONFIG_FB_MSM_MDP31
- ppp_operation_reg |= PPP_OP_CONVERT_MATRIX_PRIMARY |
- PPP_OP_DST_YCBCR;
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0240, 0x1e);
-#endif
-
- if (ppp_lookUp_enable) {
- ppp_operation_reg |= PPP_OP_LUT_C0_ON |
- PPP_OP_LUT_C1_ON | PPP_OP_LUT_C2_ON;
- }
- }
- /* YCbCr to YCbCr color conversion flag */
- if ((!inputRGB) && (!outputRGB)) {
- if ((ppp_lookUp_enable) &&
- (iBuf->mdpImg.imgType != iBuf->ibuf_type)) {
- ppp_operation_reg |= PPP_OP_LUT_C0_ON;
- }
- }
-
- ppp_src_cfg_reg |= (iBuf->roi.x % 2) ? PPP_SRC_BPP_ROI_ODD_X : 0;
- ppp_src_cfg_reg |= (iBuf->roi.y % 2) ? PPP_SRC_BPP_ROI_ODD_Y : 0;
-
- if (req->flags & MDP_DEINTERLACE)
- ppp_operation_reg |= PPP_OP_DEINT_EN;
-
- /* Dither at DMA side only since iBuf format is RGB888 */
- if (iBuf->mdpImg.mdpOp & MDPOP_DITHER)
- ppp_operation_reg |= PPP_OP_DITHER_EN;
-
- if (iBuf->mdpImg.mdpOp & MDPOP_ROTATION) {
- ppp_operation_reg |= PPP_OP_ROT_ON;
-
- if (iBuf->mdpImg.mdpOp & MDPOP_ROT90) {
- ppp_operation_reg |= PPP_OP_ROT_90;
- }
- if (iBuf->mdpImg.mdpOp & MDPOP_LR) {
- ppp_operation_reg |= PPP_OP_FLIP_LR;
- }
- if (iBuf->mdpImg.mdpOp & MDPOP_UD) {
- ppp_operation_reg |= PPP_OP_FLIP_UD;
- }
- }
-
- src0_ystride = src_width * inpBpp;
- dest0_ystride = iBuf->ibuf_width * iBuf->bpp;
-
- /* no need to care about rotation since it's the real-XY. */
- dst_roi_width = iBuf->roi.dst_width;
- dst_roi_height = iBuf->roi.dst_height;
-
- src0 = (uint8 *) iBuf->mdpImg.bmy_addr;
- dest0 = (uint8 *) iBuf->buf;
-
- /* Jumping from Y-Plane to Chroma Plane */
- dest1 = mdp_get_chroma_addr(iBuf);
-
- /* first pixel addr calculation */
- mdp_adjust_start_addr(&src0, &src1, sv_slice, sh_slice, iBuf->roi.x,
- iBuf->roi.y, src_width, src_height, inpBpp, iBuf,
- 0);
- mdp_adjust_start_addr(&dest0, &dest1, dv_slice, dh_slice,
- iBuf->roi.lcd_x, iBuf->roi.lcd_y,
- iBuf->ibuf_width, iBuf->ibuf_height, iBuf->bpp,
- iBuf, 2);
-
- /* set scale operation */
- mdp_set_scale(iBuf, dst_roi_width, dst_roi_height,
- inputRGB, outputRGB, &ppp_operation_reg);
-
- /*
- * setting background source for blending
- */
- mdp_set_blend_attr(iBuf, &alpha, &tpVal, perPixelAlpha,
- &ppp_operation_reg);
-
- if (ppp_operation_reg & PPP_OP_BLEND_ON) {
- mdp_ppp_setbg(iBuf);
-
- if (iBuf->ibuf_type == MDP_YCRYCB_H2V1) {
- ppp_operation_reg |= PPP_OP_BG_CHROMA_H2V1;
-
- if (iBuf->mdpImg.mdpOp & MDPOP_TRANSP) {
- tpVal = mdp_conv_matx_rgb2yuv(tpVal,
- (uint16 *) &
- mdp_ccs_rgb2yuv,
- &mdp_plv[0], NULL);
- }
- }
- }
-
- /*
- * 0x0004: enable dbg bus
- * 0x0100: "don't care" Edge Condit until scaling is on
- * 0x0104: xrc tile x&y size u7.6 format = 7bit.6bit
- * 0x0108: src pixel size
- * 0x010c: component plane 0 starting address
- * 0x011c: component plane 0 ystride
- * 0x0124: PPP source config register
- * 0x0128: unpacked pattern from lsb to msb (eg. RGB->BGR)
- */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0108, (iBuf->roi.height << 16 |
- iBuf->roi.width));
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x010c, src0); /* comp.plane 0 */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0110, src1); /* comp.plane 1 */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x011c,
- (src0_ystride << 16 | src0_ystride));
-
- /* setup for rgb 565 */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0124, ppp_src_cfg_reg);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0128, packPattern);
- /*
- * 0x0138: PPP destination operation register
- * 0x014c: constant_alpha|transparent_color
- * 0x0150: PPP destination config register
- * 0x0154: PPP packing pattern
- */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0138, ppp_operation_reg);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x014c, alpha << 24 | tpVal);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0150, ppp_dst_cfg_reg);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0154, dst_packPattern);
-
- /*
- * 0x0164: ROI height and width
- * 0x0168: Component Plane 0 starting addr
- * 0x016c: Component Plane 1 starting addr
- * 0x0178: Component Plane 1/0 y stride
- */
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0164,
- (dst_roi_height << 16 | dst_roi_width));
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0168, dest0);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x016c, dest1);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0178,
- (dest0_ystride << 16 | dest0_ystride));
-
- flush_imgs(req, inpBpp, iBuf->bpp, p_src_file, p_dst_file);
-#ifdef CONFIG_MDP_PPP_ASYNC_OP
- mdp_ppp_process_curr_djob();
-#else
- mdp_pipe_kickoff(MDP_PPP_TERM, mfd);
-#endif
-}
-
-static int mdp_ppp_verify_req(struct mdp_blit_req *req)
-{
- u32 src_width, src_height, dst_width, dst_height;
-
- if (req == NULL)
- return -1;
-
- if (MDP_IS_IMGTYPE_BAD(req->src.format) ||
- MDP_IS_IMGTYPE_BAD(req->dst.format))
- return -1;
-
- if ((req->src.width == 0) || (req->src.height == 0) ||
- (req->src_rect.w == 0) || (req->src_rect.h == 0) ||
- (req->dst.width == 0) || (req->dst.height == 0) ||
- (req->dst_rect.w == 0) || (req->dst_rect.h == 0))
-
- return -1;
-
- if (((req->src_rect.x + req->src_rect.w) > req->src.width) ||
- ((req->src_rect.y + req->src_rect.h) > req->src.height))
- return -1;
-
- if (((req->dst_rect.x + req->dst_rect.w) > req->dst.width) ||
- ((req->dst_rect.y + req->dst_rect.h) > req->dst.height))
- return -1;
-
- /*
- * scaling range check
- */
- src_width = req->src_rect.w;
- src_height = req->src_rect.h;
-
- if (req->flags & MDP_ROT_90) {
- dst_width = req->dst_rect.h;
- dst_height = req->dst_rect.w;
- } else {
- dst_width = req->dst_rect.w;
- dst_height = req->dst_rect.h;
- }
-
- switch (req->dst.format) {
- case MDP_Y_CRCB_H2V2:
- case MDP_Y_CBCR_H2V2:
- src_width = (src_width / 2) * 2;
- src_height = (src_height / 2) * 2;
- dst_width = (src_width / 2) * 2;
- dst_height = (src_height / 2) * 2;
- break;
-
- case MDP_Y_CRCB_H2V1:
- case MDP_Y_CBCR_H2V1:
- case MDP_YCRYCB_H2V1:
- src_width = (src_width / 2) * 2;
- dst_width = (src_width / 2) * 2;
- break;
-
- default:
- break;
- }
-
- if (((MDP_SCALE_Q_FACTOR * dst_width) / src_width >
- MDP_MAX_X_SCALE_FACTOR)
- || ((MDP_SCALE_Q_FACTOR * dst_width) / src_width <
- MDP_MIN_X_SCALE_FACTOR))
- return -1;
-
- if (((MDP_SCALE_Q_FACTOR * dst_height) / src_height >
- MDP_MAX_Y_SCALE_FACTOR)
- || ((MDP_SCALE_Q_FACTOR * dst_height) / src_height <
- MDP_MIN_Y_SCALE_FACTOR))
- return -1;
-
- return 0;
-}
-
-/**
- * get_gem_img() - retrieve drm obj's start address and size
- * @img: contains drm file descriptor and gem handle
- * @start: repository of starting address of drm obj allocated memory
- * @len: repository of size of drm obj alloacted memory
- *
- **/
-int get_gem_img(struct mdp_img *img, unsigned long *start, unsigned long *len)
-{
- panic("waaaaaaaah");
- //return kgsl_gem_obj_addr(img->memory_id, (int)img->priv, start, len);
-}
-
-int get_img(struct mdp_img *img, struct fb_info *info, unsigned long *start,
- unsigned long *len, struct file **pp_file)
-{
- int put_needed, ret = 0;
- struct file *file;
- unsigned long vstart;
-#ifdef CONFIG_ANDROID_PMEM
- if (!get_pmem_file(img->memory_id, start, &vstart, len, pp_file))
- return 0;
-#endif
- file = fget_light(img->memory_id, &put_needed);
- if (file == NULL)
- return -1;
-
- if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
- *start = info->fix.smem_start;
- *len = info->fix.smem_len;
- *pp_file = file;
- } else {
- ret = -1;
- fput_light(file, put_needed);
- }
- return ret;
-}
-
-int mdp_ppp_blit(struct fb_info *info, struct mdp_blit_req *req,
- struct file **pp_src_file, struct file **pp_dst_file)
-{
- unsigned long src_start, dst_start;
- unsigned long src_len = 0;
- unsigned long dst_len = 0;
- MDPIBUF iBuf;
- u32 dst_width, dst_height;
- struct file *p_src_file = 0 , *p_dst_file = 0;
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
- if (req->dst.format == MDP_FB_FORMAT)
- req->dst.format = mfd->fb_imgType;
- if (req->src.format == MDP_FB_FORMAT)
- req->src.format = mfd->fb_imgType;
-
- if (req->flags & MDP_BLIT_SRC_GEM) {
- if (get_gem_img(&req->src, &src_start, &src_len) < 0)
- return -1;
- } else {
- get_img(&req->src, info, &src_start, &src_len, &p_src_file);
- }
- if (src_len == 0) {
- printk(KERN_ERR "mdp_ppp: could not retrieve image from "
- "memory\n");
- return -1;
- }
-
- if (req->flags & MDP_BLIT_DST_GEM) {
- if (get_gem_img(&req->dst, &dst_start, &dst_len) < 0)
- return -1;
- } else {
- get_img(&req->dst, info, &dst_start, &dst_len, &p_dst_file);
- }
- if (dst_len == 0) {
- printk(KERN_ERR "mdp_ppp: could not retrieve image from "
- "memory\n");
- return -1;
- }
- *pp_src_file = p_src_file;
- *pp_dst_file = p_dst_file;
- if (mdp_ppp_verify_req(req)) {
- printk(KERN_ERR "mdp_ppp: invalid image!\n");
- return -1;
- }
-
- iBuf.ibuf_width = req->dst.width;
- iBuf.ibuf_height = req->dst.height;
- iBuf.bpp = bytes_per_pixel[req->dst.format];
-
- iBuf.ibuf_type = req->dst.format;
- iBuf.buf = (uint8 *) dst_start;
- iBuf.buf += req->dst.offset;
-
- iBuf.roi.lcd_x = req->dst_rect.x;
- iBuf.roi.lcd_y = req->dst_rect.y;
- iBuf.roi.dst_width = req->dst_rect.w;
- iBuf.roi.dst_height = req->dst_rect.h;
-
- iBuf.roi.x = req->src_rect.x;
- iBuf.roi.width = req->src_rect.w;
- iBuf.roi.y = req->src_rect.y;
- iBuf.roi.height = req->src_rect.h;
-
- iBuf.mdpImg.width = req->src.width;
- iBuf.mdpImg.imgType = req->src.format;
-
- iBuf.mdpImg.bmy_addr = (uint32 *) (src_start + req->src.offset);
- iBuf.mdpImg.cbcr_addr =
- (uint32 *) ((uint32) iBuf.mdpImg.bmy_addr +
- req->src.width * req->src.height);
-
- iBuf.mdpImg.mdpOp = MDPOP_NOP;
-
- /* blending check */
- if (req->transp_mask != MDP_TRANSP_NOP) {
- iBuf.mdpImg.mdpOp |= MDPOP_TRANSP;
- iBuf.mdpImg.tpVal = req->transp_mask;
- iBuf.mdpImg.tpVal = mdp_calc_tpval(&iBuf.mdpImg);
- }
-
- req->alpha &= 0xff;
- if (req->alpha < MDP_ALPHA_NOP) {
- iBuf.mdpImg.mdpOp |= MDPOP_ALPHAB;
- iBuf.mdpImg.alpha = req->alpha;
- }
-
- /* rotation check */
- if (req->flags & MDP_FLIP_LR)
- iBuf.mdpImg.mdpOp |= MDPOP_LR;
- if (req->flags & MDP_FLIP_UD)
- iBuf.mdpImg.mdpOp |= MDPOP_UD;
- if (req->flags & MDP_ROT_90)
- iBuf.mdpImg.mdpOp |= MDPOP_ROT90;
- if (req->flags & MDP_DITHER)
- iBuf.mdpImg.mdpOp |= MDPOP_DITHER;
-
- if (req->flags & MDP_BLEND_FG_PREMULT) {
-#ifdef CONFIG_FB_MSM_MDP31
- iBuf.mdpImg.mdpOp |= MDPOP_FG_PM_ALPHA;
-#else
- return -EINVAL;
-#endif
- }
-
- if (req->flags & MDP_DEINTERLACE) {
-#ifdef CONFIG_FB_MSM_MDP31
- if ((req->src.format != MDP_Y_CBCR_H2V2) &&
- (req->src.format != MDP_Y_CRCB_H2V2))
-#endif
- return -EINVAL;
- }
-
- /* scale check */
- if (req->flags & MDP_ROT_90) {
- dst_width = req->dst_rect.h;
- dst_height = req->dst_rect.w;
- } else {
- dst_width = req->dst_rect.w;
- dst_height = req->dst_rect.h;
- }
-
- if ((iBuf.roi.width != dst_width) || (iBuf.roi.height != dst_height))
- iBuf.mdpImg.mdpOp |= MDPOP_ASCALE;
-
- if (req->flags & MDP_BLUR) {
-#ifdef CONFIG_FB_MSM_MDP31
- if (req->flags & MDP_SHARPENING)
- printk(KERN_WARNING
- "mdp: MDP_SHARPENING is set with MDP_BLUR!\n");
- req->flags |= MDP_SHARPENING;
- req->sharpening_strength = -127;
-#else
- iBuf.mdpImg.mdpOp |= MDPOP_ASCALE | MDPOP_BLUR;
-
-#endif
- }
-
- if (req->flags & MDP_SHARPENING) {
-#ifdef CONFIG_FB_MSM_MDP31
- if ((req->sharpening_strength > 127) ||
- (req->sharpening_strength < -127)) {
- printk(KERN_ERR
- "%s: sharpening strength out of range\n",
- __func__);
- return -EINVAL;
- }
-
- iBuf.mdpImg.mdpOp |= MDPOP_ASCALE | MDPOP_SHARPENING;
- iBuf.mdpImg.sp_value = req->sharpening_strength & 0xff;
-#else
- return -EINVAL;
-#endif
- }
-
- down(&mdp_ppp_mutex);
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
-#ifdef CONFIG_FB_MSM_MDP31
- mdp_start_ppp(mfd, &iBuf, req, p_src_file, p_dst_file);
-#else
- /* bg tile fetching HW workaround */
- if (((iBuf.mdpImg.mdpOp & (MDPOP_TRANSP | MDPOP_ALPHAB)) ||
- (req->src.format == MDP_ARGB_8888) ||
- (req->src.format == MDP_BGRA_8888) ||
- (req->src.format == MDP_RGBA_8888)) &&
- (iBuf.mdpImg.mdpOp & MDPOP_ROT90) && (req->dst_rect.w <= 16)) {
- int dst_h, src_w, i;
-
- src_w = req->src_rect.w;
- dst_h = iBuf.roi.dst_height;
-
- for (i = 0; i < (req->dst_rect.h / 16); i++) {
- /* this tile size */
- iBuf.roi.dst_height = 16;
- iBuf.roi.width =
- (16 * req->src_rect.w) / req->dst_rect.h;
-
- /* if it's out of scale range... */
- if (((MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
- iBuf.roi.width) > MDP_MAX_X_SCALE_FACTOR)
- iBuf.roi.width =
- (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
- MDP_MAX_X_SCALE_FACTOR;
- else if (((MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
- iBuf.roi.width) < MDP_MIN_X_SCALE_FACTOR)
- iBuf.roi.width =
- (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
- MDP_MIN_X_SCALE_FACTOR;
-
- mdp_start_ppp(mfd, &iBuf, req, p_src_file, p_dst_file);
-
- /* next tile location */
- iBuf.roi.lcd_y += 16;
- iBuf.roi.x += iBuf.roi.width;
-
- /* this is for a remainder update */
- dst_h -= 16;
- src_w -= iBuf.roi.width;
- }
-
- if ((dst_h < 0) || (src_w < 0))
- printk
- ("msm_fb: mdp_blt_ex() unexpected result! line:%d\n",
- __LINE__);
-
- /* remainder update */
- if ((dst_h > 0) && (src_w > 0)) {
- u32 tmp_v;
-
- iBuf.roi.dst_height = dst_h;
- iBuf.roi.width = src_w;
-
- if (((MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
- iBuf.roi.width) > MDP_MAX_X_SCALE_FACTOR) {
- tmp_v =
- (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
- MDP_MAX_X_SCALE_FACTOR +
- (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) %
- MDP_MAX_X_SCALE_FACTOR ? 1 : 0;
-
- /* move x location as roi width gets bigger */
- iBuf.roi.x -= tmp_v - iBuf.roi.width;
- iBuf.roi.width = tmp_v;
- } else
- if (((MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
- iBuf.roi.width) < MDP_MIN_X_SCALE_FACTOR) {
- tmp_v =
- (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) /
- MDP_MIN_X_SCALE_FACTOR +
- (MDP_SCALE_Q_FACTOR * iBuf.roi.dst_height) %
- MDP_MIN_X_SCALE_FACTOR ? 1 : 0;
-
- /*
- * we don't move x location for continuity of
- * source image
- */
- iBuf.roi.width = tmp_v;
- }
-
- mdp_start_ppp(mfd, &iBuf, req, p_src_file, p_dst_file);
- }
- } else {
- mdp_start_ppp(mfd, &iBuf, req, p_src_file, p_dst_file);
- }
-#endif
-
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- up(&mdp_ppp_mutex);
-
- return 0;
-}
diff --git a/drivers/staging/msm/mdp_ppp_dq.c b/drivers/staging/msm/mdp_ppp_dq.c
deleted file mode 100644
index 3a687c7a569..00000000000
--- a/drivers/staging/msm/mdp_ppp_dq.c
+++ /dev/null
@@ -1,347 +0,0 @@
-/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include "mdp.h"
-
-static boolean mdp_ppp_intr_flag = FALSE;
-static boolean mdp_ppp_busy_flag = FALSE;
-
-/* Queue to keep track of the completed jobs for cleaning */
-static LIST_HEAD(mdp_ppp_djob_clnrq);
-static DEFINE_SPINLOCK(mdp_ppp_djob_clnrq_lock);
-
-/* Worker to cleanup Display Jobs */
-static struct workqueue_struct *mdp_ppp_djob_clnr;
-
-/* Display Queue (DQ) for MDP PPP Block */
-static LIST_HEAD(mdp_ppp_dq);
-static DEFINE_SPINLOCK(mdp_ppp_dq_lock);
-
-/* Current Display Job for MDP PPP */
-static struct mdp_ppp_djob *curr_djob;
-
-/* Track ret code for the last opeartion */
-static int mdp_ppp_ret_code;
-
-inline int mdp_ppp_get_ret_code(void)
-{
- return mdp_ppp_ret_code;
-}
-
-/* Push <Reg, Val> pair into DQ (if available) to later
- * program the MDP PPP Block */
-inline void mdp_ppp_outdw(uint32_t addr, uint32_t data)
-{
- if (curr_djob) {
-
- /* get the last node of the list. */
- struct mdp_ppp_roi_cmd_set *node =
- list_entry(curr_djob->roi_cmd_list.prev,
- struct mdp_ppp_roi_cmd_set, node);
-
- /* If a node is already full, create a new one and add it to
- * the list (roi_cmd_list).
- */
- if (node->ncmds == MDP_PPP_ROI_NODE_SIZE) {
- node = kmalloc(sizeof(struct mdp_ppp_roi_cmd_set),
- GFP_KERNEL);
- if (!node) {
- printk(KERN_ERR
- "MDP_PPP: not enough memory.\n");
- mdp_ppp_ret_code = -EINVAL;
- return;
- }
-
- /* no ROI commands initially */
- node->ncmds = 0;
-
- /* add one node to roi_cmd_list. */
- list_add_tail(&node->node, &curr_djob->roi_cmd_list);
- }
-
- /* register ROI commands */
- node->cmd[node->ncmds].reg = addr;
- node->cmd[node->ncmds].val = data;
- node->ncmds++;
- } else
- /* program MDP PPP block now */
- outpdw((addr), (data));
-}
-
-/* Initialize DQ */
-inline void mdp_ppp_dq_init(void)
-{
- mdp_ppp_djob_clnr = create_singlethread_workqueue("MDPDJobClnrThrd");
-}
-
-/* Release resources of a job (DJob). */
-static void mdp_ppp_del_djob(struct mdp_ppp_djob *job)
-{
- struct mdp_ppp_roi_cmd_set *node, *tmp;
-
- /* release mem */
- mdp_ppp_put_img(job->p_src_file, job->p_dst_file);
-
- /* release roi_cmd_list */
- list_for_each_entry_safe(node, tmp, &job->roi_cmd_list, node) {
- list_del(&node->node);
- kfree(node);
- }
-
- /* release job struct */
- kfree(job);
-}
-
-/* Worker thread to reclaim resources once a display job is done */
-static void mdp_ppp_djob_cleaner(struct work_struct *work)
-{
- struct mdp_ppp_djob *job;
-
- MDP_PPP_DEBUG_MSG("mdp ppp display job cleaner started \n");
-
- /* cleanup display job */
- job = container_of(work, struct mdp_ppp_djob, cleaner.work);
- if (likely(work && job))
- mdp_ppp_del_djob(job);
-}
-
-/* Create a new Display Job (DJob) */
-inline struct mdp_ppp_djob *mdp_ppp_new_djob(void)
-{
- struct mdp_ppp_djob *job;
- struct mdp_ppp_roi_cmd_set *node;
-
- /* create a new djob */
- job = kmalloc(sizeof(struct mdp_ppp_djob), GFP_KERNEL);
- if (!job)
- return NULL;
-
- /* add the first node to curr_djob->roi_cmd_list */
- node = kmalloc(sizeof(struct mdp_ppp_roi_cmd_set), GFP_KERNEL);
- if (!node) {
- kfree(job);
- return NULL;
- }
-
- /* make this current djob container to keep track of the curr djob not
- * used in the async path i.e. no sync needed
- *
- * Should not contain any references from the past djob
- */
- BUG_ON(curr_djob);
- curr_djob = job;
- INIT_LIST_HEAD(&curr_djob->roi_cmd_list);
-
- /* no ROI commands initially */
- node->ncmds = 0;
- INIT_LIST_HEAD(&node->node);
- list_add_tail(&node->node, &curr_djob->roi_cmd_list);
-
- /* register this djob with the djob cleaner
- * initializes 'work' data struct
- */
- INIT_DELAYED_WORK(&curr_djob->cleaner, mdp_ppp_djob_cleaner);
- INIT_LIST_HEAD(&curr_djob->entry);
-
- curr_djob->p_src_file = 0;
- curr_djob->p_dst_file = 0;
-
- return job;
-}
-
-/* Undo the effect of mdp_ppp_new_djob() */
-inline void mdp_ppp_clear_curr_djob(void)
-{
- if (likely(curr_djob)) {
- mdp_ppp_del_djob(curr_djob);
- curr_djob = NULL;
- }
-}
-
-/* Cleanup dirty djobs */
-static void mdp_ppp_flush_dirty_djobs(void *cond)
-{
- unsigned long flags;
- struct mdp_ppp_djob *job;
-
- /* Flush the jobs from the djob clnr queue */
- while (cond && test_bit(0, (unsigned long *)cond)) {
-
- /* Until we are done with the cleanup queue */
- spin_lock_irqsave(&mdp_ppp_djob_clnrq_lock, flags);
- if (list_empty(&mdp_ppp_djob_clnrq)) {
- spin_unlock_irqrestore(&mdp_ppp_djob_clnrq_lock, flags);
- break;
- }
-
- MDP_PPP_DEBUG_MSG("flushing djobs ... loop \n");
-
- /* Retrieve the job that needs to be cleaned */
- job = list_entry(mdp_ppp_djob_clnrq.next,
- struct mdp_ppp_djob, entry);
- list_del_init(&job->entry);
- spin_unlock_irqrestore(&mdp_ppp_djob_clnrq_lock, flags);
-
- /* Keep mem state coherent */
- msm_fb_ensure_mem_coherency_after_dma(job->info, &job->req, 1);
-
- /* Schedule jobs for cleanup
- * A separate worker thread does this */
- queue_delayed_work(mdp_ppp_djob_clnr, &job->cleaner,
- mdp_timer_duration);
- }
-}
-
-/* If MDP PPP engine is busy, wait until it is available again */
-void mdp_ppp_wait(void)
-{
- unsigned long flags;
- int cond = 1;
-
- /* keep flushing dirty djobs as long as MDP PPP engine is busy */
- mdp_ppp_flush_dirty_djobs(&mdp_ppp_busy_flag);
-
- /* block if MDP PPP engine is still busy */
- spin_lock_irqsave(&mdp_ppp_dq_lock, flags);
- if (test_bit(0, (unsigned long *)&mdp_ppp_busy_flag)) {
-
- /* prepare for the wakeup event */
- test_and_set_bit(0, (unsigned long *)&mdp_ppp_waiting);
- INIT_COMPLETION(mdp_ppp_comp);
- spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
-
- /* block uninterruptibly until available */
- MDP_PPP_DEBUG_MSG("waiting for mdp... \n");
- wait_for_completion_killable(&mdp_ppp_comp);
-
- /* if MDP PPP engine is still free,
- * disable INT_MDP if enabled
- */
- spin_lock_irqsave(&mdp_ppp_dq_lock, flags);
- if (!test_bit(0, (unsigned long *)&mdp_ppp_busy_flag) &&
- test_and_clear_bit(0, (unsigned long *)&mdp_ppp_intr_flag))
- mdp_disable_irq(MDP_PPP_TERM);
- }
- spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
-
- /* flush remaining dirty djobs, if any */
- mdp_ppp_flush_dirty_djobs(&cond);
-}
-
-/* Program MDP PPP block to process this ROI */
-static void mdp_ppp_process_roi(struct list_head *roi_cmd_list)
-{
-
- /* program PPP engine with registered ROI commands */
- struct mdp_ppp_roi_cmd_set *node;
- list_for_each_entry(node, roi_cmd_list, node) {
- int i = 0;
- for (; i < node->ncmds; i++) {
- MDP_PPP_DEBUG_MSG("%d: reg: 0x%x val: 0x%x \n",
- i, node->cmd[i].reg, node->cmd[i].val);
- outpdw(node->cmd[i].reg, node->cmd[i].val);
- }
- }
-
- /* kickoff MDP PPP engine */
- MDP_PPP_DEBUG_MSG("kicking off mdp \n");
- outpdw(MDP_BASE + 0x30, 0x1000);
-}
-
-/* Submit this display job to MDP PPP engine */
-static void mdp_ppp_dispatch_djob(struct mdp_ppp_djob *job)
-{
- /* enable INT_MDP if disabled */
- if (!test_and_set_bit(0, (unsigned long *)&mdp_ppp_intr_flag))
- mdp_enable_irq(MDP_PPP_TERM);
-
- /* turn on PPP and CMD blocks */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- /* process this ROI */
- mdp_ppp_process_roi(&job->roi_cmd_list);
-}
-
-/* Enqueue this display job to be cleaned up later in "mdp_ppp_djob_done" */
-static inline void mdp_ppp_enqueue_djob(struct mdp_ppp_djob *job)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&mdp_ppp_dq_lock, flags);
- list_add_tail(&job->entry, &mdp_ppp_dq);
- spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
-}
-
-/* First enqueue display job for cleanup and dispatch immediately
- * if MDP PPP engine is free */
-void mdp_ppp_process_curr_djob(void)
-{
- /* enqueue djob */
- mdp_ppp_enqueue_djob(curr_djob);
-
- /* dispatch now if MDP PPP engine is free */
- if (!test_and_set_bit(0, (unsigned long *)&mdp_ppp_busy_flag))
- mdp_ppp_dispatch_djob(curr_djob);
-
- /* done with the current djob */
- curr_djob = NULL;
-}
-
-/* Called from mdp_isr - cleanup finished job and start with next
- * if available else set MDP PPP engine free */
-void mdp_ppp_djob_done(void)
-{
- struct mdp_ppp_djob *curr, *next;
- unsigned long flags;
-
- /* dequeue current */
- spin_lock_irqsave(&mdp_ppp_dq_lock, flags);
- curr = list_entry(mdp_ppp_dq.next, struct mdp_ppp_djob, entry);
- list_del_init(&curr->entry);
- spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
-
- /* cleanup current - enqueue in the djob clnr queue */
- spin_lock_irqsave(&mdp_ppp_djob_clnrq_lock, flags);
- list_add_tail(&curr->entry, &mdp_ppp_djob_clnrq);
- spin_unlock_irqrestore(&mdp_ppp_djob_clnrq_lock, flags);
-
- /* grab next pending */
- spin_lock_irqsave(&mdp_ppp_dq_lock, flags);
- if (!list_empty(&mdp_ppp_dq)) {
- next = list_entry(mdp_ppp_dq.next, struct mdp_ppp_djob,
- entry);
- spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
-
- /* process next in the queue */
- mdp_ppp_process_roi(&next->roi_cmd_list);
- } else {
- /* no pending display job */
- spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
-
- /* turn off PPP and CMD blocks - "in_isr" is TRUE */
- mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-
- /* notify if waiting */
- if (test_and_clear_bit(0, (unsigned long *)&mdp_ppp_waiting))
- complete(&mdp_ppp_comp);
-
- /* set free */
- test_and_clear_bit(0, (unsigned long *)&mdp_ppp_busy_flag);
- }
-}
diff --git a/drivers/staging/msm/mdp_ppp_dq.h b/drivers/staging/msm/mdp_ppp_dq.h
deleted file mode 100644
index 759abc20e9f..00000000000
--- a/drivers/staging/msm/mdp_ppp_dq.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef MDP_PPP_DQ_H
-#define MDP_PPP_DQ_H
-
-#include "msm_fb_def.h"
-
-#define MDP_PPP_DEBUG_MSG MSM_FB_DEBUG
-
-/* The maximum number of <Reg,Val> pairs in an mdp_ppp_roi_cmd_set structure (a
- * node)
- */
-#define MDP_PPP_ROI_NODE_SIZE 32
-
-/* ROI config command (<Reg,Val> pair) for MDP PPP block */
-struct mdp_ppp_roi_cmd {
- uint32_t reg;
- uint32_t val;
-};
-
-/* ROI config commands for MDP PPP block are stored in a list of
- * mdp_ppp_roi_cmd_set structures (nodes).
- */
-struct mdp_ppp_roi_cmd_set {
- struct list_head node;
- uint32_t ncmds; /* number of commands in this set (node). */
- struct mdp_ppp_roi_cmd cmd[MDP_PPP_ROI_NODE_SIZE];
-};
-
-/* MDP PPP Display Job (DJob) */
-struct mdp_ppp_djob {
- struct list_head entry;
- /* One ROI per MDP PPP DJob */
- struct list_head roi_cmd_list;
- struct mdp_blit_req req;
- struct fb_info *info;
- struct delayed_work cleaner;
- struct file *p_src_file, *p_dst_file;
-};
-
-extern struct completion mdp_ppp_comp;
-extern boolean mdp_ppp_waiting;
-extern unsigned long mdp_timer_duration;
-
-unsigned int mdp_ppp_async_op_get(void);
-void mdp_ppp_async_op_set(unsigned int flag);
-void msm_fb_ensure_mem_coherency_after_dma(struct fb_info *info,
- struct mdp_blit_req *req_list, int req_list_count);
-void mdp_ppp_put_img(struct file *p_src_file, struct file *p_dst_file);
-void mdp_ppp_dq_init(void);
-void mdp_ppp_outdw(uint32_t addr, uint32_t data);
-struct mdp_ppp_djob *mdp_ppp_new_djob(void);
-void mdp_ppp_clear_curr_djob(void);
-void mdp_ppp_process_curr_djob(void);
-int mdp_ppp_get_ret_code(void);
-void mdp_ppp_djob_done(void);
-void mdp_ppp_wait(void);
-
-#endif /* MDP_PPP_DQ_H */
diff --git a/drivers/staging/msm/mdp_ppp_v20.c b/drivers/staging/msm/mdp_ppp_v20.c
deleted file mode 100644
index b5b7271921e..00000000000
--- a/drivers/staging/msm/mdp_ppp_v20.c
+++ /dev/null
@@ -1,2486 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-#include "linux/proc_fs.h"
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <asm/div64.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-
-static MDP_SCALE_MODE mdp_curr_up_scale_xy;
-static MDP_SCALE_MODE mdp_curr_down_scale_x;
-static MDP_SCALE_MODE mdp_curr_down_scale_y;
-
-static long long mdp_do_div(long long num, long long den)
-{
- do_div(num, den);
- return num;
-}
-
-struct mdp_table_entry mdp_gaussian_blur_table[] = {
- /* max variance */
- { 0x5fffc, 0x20000080 },
- { 0x50280, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50284, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50288, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x5028c, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50290, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50294, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50298, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x5029c, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502a0, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502a4, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502a8, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502ac, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502b0, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502b4, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502b8, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502bc, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502c0, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502c4, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502c8, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502cc, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502d0, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502d4, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502d8, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502dc, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502e0, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502e4, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502e8, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502ec, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502f0, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502f4, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502f8, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x502fc, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50300, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50304, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50308, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x5030c, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50310, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50314, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50318, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x5031c, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50320, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50324, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50328, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x5032c, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50330, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50334, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50338, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x5033c, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50340, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50344, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50348, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x5034c, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50350, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50354, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50358, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x5035c, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50360, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50364, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50368, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x5036c, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50370, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50374, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x50378, 0x20000080 },
- { 0x5fffc, 0x20000080 },
- { 0x5037c, 0x20000080 },
-};
-
-static void load_scale_table(
- struct mdp_table_entry *table, int len)
-{
- int i;
- for (i = 0; i < len; i++)
- MDP_OUTP(MDP_BASE + table[i].reg, table[i].val);
-}
-
-static void mdp_load_pr_upscale_table(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50200, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50204, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50208, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5020c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50210, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50214, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50218, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5021c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50220, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50224, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50228, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5022c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50230, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50234, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50238, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5023c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50240, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50244, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50248, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5024c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50250, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50254, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50258, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5025c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50260, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50264, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50268, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5026c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50270, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50274, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50278, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5027c, 0x0);
-}
-
-static void mdp_load_pr_downscale_table_x_point2TOpoint4(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50280, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50284, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50288, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5028c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50290, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50294, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50298, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5029c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a0, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a4, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a8, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502ac, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b0, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b4, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b8, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502bc, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502cc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502dc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502ec, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502fc, 0x0);
-}
-
-static void mdp_load_pr_downscale_table_y_point2TOpoint4(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50300, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50304, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50308, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5030c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50310, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50314, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50318, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5031c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50320, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50324, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50328, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5032c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50330, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50334, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50338, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5033c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50340, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50344, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50348, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5034c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50350, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50354, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50358, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5035c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50360, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50364, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50368, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5036c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50370, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50374, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50378, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5037c, 0x0);
-}
-
-static void mdp_load_pr_downscale_table_x_point4TOpoint6(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50280, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50284, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50288, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5028c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50290, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50294, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50298, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5029c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a0, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a4, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a8, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502ac, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b0, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b4, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b8, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502bc, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502cc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502dc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502ec, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502fc, 0x0);
-}
-
-static void mdp_load_pr_downscale_table_y_point4TOpoint6(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50300, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50304, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50308, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5030c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50310, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50314, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50318, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5031c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50320, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50324, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50328, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5032c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50330, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50334, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50338, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5033c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50340, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50344, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50348, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5034c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50350, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50354, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50358, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5035c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50360, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50364, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50368, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5036c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50370, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50374, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50378, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5037c, 0x0);
-}
-
-static void mdp_load_pr_downscale_table_x_point6TOpoint8(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50280, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50284, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50288, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5028c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50290, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50294, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50298, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5029c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a0, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a4, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a8, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502ac, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b0, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b4, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b8, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502bc, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502cc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502dc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502ec, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502fc, 0x0);
-}
-
-static void mdp_load_pr_downscale_table_y_point6TOpoint8(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50300, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50304, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50308, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5030c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50310, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50314, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50318, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5031c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50320, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50324, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50328, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5032c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50330, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50334, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50338, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5033c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50340, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50344, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50348, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5034c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50350, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50354, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50358, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5035c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50360, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50364, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50368, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5036c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50370, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50374, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50378, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5037c, 0x0);
-}
-
-static void mdp_load_pr_downscale_table_x_point8TO1(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50280, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50284, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50288, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5028c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50290, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50294, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50298, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5029c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a0, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a4, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502a8, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502ac, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b0, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b4, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502b8, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x502bc, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502c8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502cc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502d8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502dc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502e8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502ec, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f0, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f4, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502f8, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x502fc, 0x0);
-}
-
-static void mdp_load_pr_downscale_table_y_point8TO1(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50300, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50304, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50308, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5030c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50310, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50314, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50318, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5031c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50320, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50324, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50328, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5032c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50330, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50334, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50338, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x5033c, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50340, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50344, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50348, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5034c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50350, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50354, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50358, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5035c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50360, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50364, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50368, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5036c, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50370, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50374, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x50378, 0x0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ff);
- MDP_OUTP(MDP_BASE + 0x5037c, 0x0);
-}
-
-static void mdp_load_bc_upscale_table(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50200, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xff80000d);
- MDP_OUTP(MDP_BASE + 0x50204, 0x7ec003f9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfec0001c);
- MDP_OUTP(MDP_BASE + 0x50208, 0x7d4003f3);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe40002b);
- MDP_OUTP(MDP_BASE + 0x5020c, 0x7b8003ed);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfd80003c);
- MDP_OUTP(MDP_BASE + 0x50210, 0x794003e8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc0004d);
- MDP_OUTP(MDP_BASE + 0x50214, 0x76c003e4);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfc40005f);
- MDP_OUTP(MDP_BASE + 0x50218, 0x73c003e0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb800071);
- MDP_OUTP(MDP_BASE + 0x5021c, 0x708003de);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfac00085);
- MDP_OUTP(MDP_BASE + 0x50220, 0x6d0003db);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa000098);
- MDP_OUTP(MDP_BASE + 0x50224, 0x698003d9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf98000ac);
- MDP_OUTP(MDP_BASE + 0x50228, 0x654003d8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf8c000c1);
- MDP_OUTP(MDP_BASE + 0x5022c, 0x610003d7);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf84000d5);
- MDP_OUTP(MDP_BASE + 0x50230, 0x5c8003d7);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf7c000e9);
- MDP_OUTP(MDP_BASE + 0x50234, 0x580003d7);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf74000fd);
- MDP_OUTP(MDP_BASE + 0x50238, 0x534003d8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c00112);
- MDP_OUTP(MDP_BASE + 0x5023c, 0x4e8003d8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6800126);
- MDP_OUTP(MDP_BASE + 0x50240, 0x494003da);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600013a);
- MDP_OUTP(MDP_BASE + 0x50244, 0x448003db);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600014d);
- MDP_OUTP(MDP_BASE + 0x50248, 0x3f4003dd);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00160);
- MDP_OUTP(MDP_BASE + 0x5024c, 0x3a4003df);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00172);
- MDP_OUTP(MDP_BASE + 0x50250, 0x354003e1);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00184);
- MDP_OUTP(MDP_BASE + 0x50254, 0x304003e3);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6000195);
- MDP_OUTP(MDP_BASE + 0x50258, 0x2b0003e6);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf64001a6);
- MDP_OUTP(MDP_BASE + 0x5025c, 0x260003e8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c001b4);
- MDP_OUTP(MDP_BASE + 0x50260, 0x214003eb);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf78001c2);
- MDP_OUTP(MDP_BASE + 0x50264, 0x1c4003ee);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf80001cf);
- MDP_OUTP(MDP_BASE + 0x50268, 0x17c003f1);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf90001db);
- MDP_OUTP(MDP_BASE + 0x5026c, 0x134003f3);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa0001e5);
- MDP_OUTP(MDP_BASE + 0x50270, 0xf0003f6);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb4001ee);
- MDP_OUTP(MDP_BASE + 0x50274, 0xac003f9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc001f5);
- MDP_OUTP(MDP_BASE + 0x50278, 0x70003fb);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe4001fb);
- MDP_OUTP(MDP_BASE + 0x5027c, 0x34003fe);
-}
-
-static void mdp_load_bc_downscale_table_x_point2TOpoint4(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ac00084);
- MDP_OUTP(MDP_BASE + 0x50280, 0x23400083);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b000084);
- MDP_OUTP(MDP_BASE + 0x50284, 0x23000083);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b400084);
- MDP_OUTP(MDP_BASE + 0x50288, 0x23000082);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b400085);
- MDP_OUTP(MDP_BASE + 0x5028c, 0x23000081);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b800085);
- MDP_OUTP(MDP_BASE + 0x50290, 0x23000080);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1bc00086);
- MDP_OUTP(MDP_BASE + 0x50294, 0x22c0007f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c000086);
- MDP_OUTP(MDP_BASE + 0x50298, 0x2280007f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c400086);
- MDP_OUTP(MDP_BASE + 0x5029c, 0x2280007e);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c800086);
- MDP_OUTP(MDP_BASE + 0x502a0, 0x2280007d);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc00086);
- MDP_OUTP(MDP_BASE + 0x502a4, 0x2240007d);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc00087);
- MDP_OUTP(MDP_BASE + 0x502a8, 0x2240007c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d000087);
- MDP_OUTP(MDP_BASE + 0x502ac, 0x2240007b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d400087);
- MDP_OUTP(MDP_BASE + 0x502b0, 0x2200007b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d400088);
- MDP_OUTP(MDP_BASE + 0x502b4, 0x22400079);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d800088);
- MDP_OUTP(MDP_BASE + 0x502b8, 0x22400078);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc00088);
- MDP_OUTP(MDP_BASE + 0x502bc, 0x22400077);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc00089);
- MDP_OUTP(MDP_BASE + 0x502c0, 0x22000077);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1e000089);
- MDP_OUTP(MDP_BASE + 0x502c4, 0x22000076);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1e400089);
- MDP_OUTP(MDP_BASE + 0x502c8, 0x22000075);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec00088);
- MDP_OUTP(MDP_BASE + 0x502cc, 0x21c00075);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec00089);
- MDP_OUTP(MDP_BASE + 0x502d0, 0x21c00074);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f000089);
- MDP_OUTP(MDP_BASE + 0x502d4, 0x21c00073);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f400089);
- MDP_OUTP(MDP_BASE + 0x502d8, 0x21800073);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f40008a);
- MDP_OUTP(MDP_BASE + 0x502dc, 0x21800072);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f80008a);
- MDP_OUTP(MDP_BASE + 0x502e0, 0x21800071);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1fc0008a);
- MDP_OUTP(MDP_BASE + 0x502e4, 0x21800070);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1fc0008b);
- MDP_OUTP(MDP_BASE + 0x502e8, 0x2180006f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x2000008c);
- MDP_OUTP(MDP_BASE + 0x502ec, 0x2140006e);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x2040008c);
- MDP_OUTP(MDP_BASE + 0x502f0, 0x2140006d);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x2080008c);
- MDP_OUTP(MDP_BASE + 0x502f4, 0x2100006d);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x20c0008c);
- MDP_OUTP(MDP_BASE + 0x502f8, 0x2100006c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x20c0008d);
- MDP_OUTP(MDP_BASE + 0x502fc, 0x2100006b);
-}
-
-static void mdp_load_bc_downscale_table_y_point2TOpoint4(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ac00084);
- MDP_OUTP(MDP_BASE + 0x50300, 0x23400083);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b000084);
- MDP_OUTP(MDP_BASE + 0x50304, 0x23000083);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b400084);
- MDP_OUTP(MDP_BASE + 0x50308, 0x23000082);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b400085);
- MDP_OUTP(MDP_BASE + 0x5030c, 0x23000081);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1b800085);
- MDP_OUTP(MDP_BASE + 0x50310, 0x23000080);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1bc00086);
- MDP_OUTP(MDP_BASE + 0x50314, 0x22c0007f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c000086);
- MDP_OUTP(MDP_BASE + 0x50318, 0x2280007f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c400086);
- MDP_OUTP(MDP_BASE + 0x5031c, 0x2280007e);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1c800086);
- MDP_OUTP(MDP_BASE + 0x50320, 0x2280007d);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc00086);
- MDP_OUTP(MDP_BASE + 0x50324, 0x2240007d);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc00087);
- MDP_OUTP(MDP_BASE + 0x50328, 0x2240007c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d000087);
- MDP_OUTP(MDP_BASE + 0x5032c, 0x2240007b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d400087);
- MDP_OUTP(MDP_BASE + 0x50330, 0x2200007b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d400088);
- MDP_OUTP(MDP_BASE + 0x50334, 0x22400079);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1d800088);
- MDP_OUTP(MDP_BASE + 0x50338, 0x22400078);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc00088);
- MDP_OUTP(MDP_BASE + 0x5033c, 0x22400077);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc00089);
- MDP_OUTP(MDP_BASE + 0x50340, 0x22000077);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1e000089);
- MDP_OUTP(MDP_BASE + 0x50344, 0x22000076);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1e400089);
- MDP_OUTP(MDP_BASE + 0x50348, 0x22000075);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec00088);
- MDP_OUTP(MDP_BASE + 0x5034c, 0x21c00075);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec00089);
- MDP_OUTP(MDP_BASE + 0x50350, 0x21c00074);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f000089);
- MDP_OUTP(MDP_BASE + 0x50354, 0x21c00073);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f400089);
- MDP_OUTP(MDP_BASE + 0x50358, 0x21800073);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f40008a);
- MDP_OUTP(MDP_BASE + 0x5035c, 0x21800072);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1f80008a);
- MDP_OUTP(MDP_BASE + 0x50360, 0x21800071);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1fc0008a);
- MDP_OUTP(MDP_BASE + 0x50364, 0x21800070);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1fc0008b);
- MDP_OUTP(MDP_BASE + 0x50368, 0x2180006f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x2000008c);
- MDP_OUTP(MDP_BASE + 0x5036c, 0x2140006e);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x2040008c);
- MDP_OUTP(MDP_BASE + 0x50370, 0x2140006d);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x2080008c);
- MDP_OUTP(MDP_BASE + 0x50374, 0x2100006d);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x20c0008c);
- MDP_OUTP(MDP_BASE + 0x50378, 0x2100006c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x20c0008d);
- MDP_OUTP(MDP_BASE + 0x5037c, 0x2100006b);
-}
-
-static void mdp_load_bc_downscale_table_x_point4TOpoint6(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x740008c);
- MDP_OUTP(MDP_BASE + 0x50280, 0x33800088);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x800008e);
- MDP_OUTP(MDP_BASE + 0x50284, 0x33400084);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x8400092);
- MDP_OUTP(MDP_BASE + 0x50288, 0x33000080);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x9000094);
- MDP_OUTP(MDP_BASE + 0x5028c, 0x3300007b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x9c00098);
- MDP_OUTP(MDP_BASE + 0x50290, 0x32400077);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xa40009b);
- MDP_OUTP(MDP_BASE + 0x50294, 0x32000073);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xb00009d);
- MDP_OUTP(MDP_BASE + 0x50298, 0x31c0006f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xbc000a0);
- MDP_OUTP(MDP_BASE + 0x5029c, 0x3140006b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xc8000a2);
- MDP_OUTP(MDP_BASE + 0x502a0, 0x31000067);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xd8000a5);
- MDP_OUTP(MDP_BASE + 0x502a4, 0x30800062);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xe4000a8);
- MDP_OUTP(MDP_BASE + 0x502a8, 0x2fc0005f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xec000aa);
- MDP_OUTP(MDP_BASE + 0x502ac, 0x2fc0005b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf8000ad);
- MDP_OUTP(MDP_BASE + 0x502b0, 0x2f400057);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x108000b0);
- MDP_OUTP(MDP_BASE + 0x502b4, 0x2e400054);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x114000b2);
- MDP_OUTP(MDP_BASE + 0x502b8, 0x2e000050);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x124000b4);
- MDP_OUTP(MDP_BASE + 0x502bc, 0x2d80004c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x130000b6);
- MDP_OUTP(MDP_BASE + 0x502c0, 0x2d000049);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x140000b8);
- MDP_OUTP(MDP_BASE + 0x502c4, 0x2c800045);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x150000b9);
- MDP_OUTP(MDP_BASE + 0x502c8, 0x2c000042);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x15c000bd);
- MDP_OUTP(MDP_BASE + 0x502cc, 0x2b40003e);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x16c000bf);
- MDP_OUTP(MDP_BASE + 0x502d0, 0x2a80003b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x17c000bf);
- MDP_OUTP(MDP_BASE + 0x502d4, 0x2a000039);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x188000c2);
- MDP_OUTP(MDP_BASE + 0x502d8, 0x29400036);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x19c000c4);
- MDP_OUTP(MDP_BASE + 0x502dc, 0x28800032);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ac000c5);
- MDP_OUTP(MDP_BASE + 0x502e0, 0x2800002f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1bc000c7);
- MDP_OUTP(MDP_BASE + 0x502e4, 0x2740002c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc000c8);
- MDP_OUTP(MDP_BASE + 0x502e8, 0x26c00029);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc000c9);
- MDP_OUTP(MDP_BASE + 0x502ec, 0x26000027);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec000cc);
- MDP_OUTP(MDP_BASE + 0x502f0, 0x25000024);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x200000cc);
- MDP_OUTP(MDP_BASE + 0x502f4, 0x24800021);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x210000cd);
- MDP_OUTP(MDP_BASE + 0x502f8, 0x23800020);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x220000ce);
- MDP_OUTP(MDP_BASE + 0x502fc, 0x2300001d);
-}
-
-static void mdp_load_bc_downscale_table_y_point4TOpoint6(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x740008c);
- MDP_OUTP(MDP_BASE + 0x50300, 0x33800088);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x800008e);
- MDP_OUTP(MDP_BASE + 0x50304, 0x33400084);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x8400092);
- MDP_OUTP(MDP_BASE + 0x50308, 0x33000080);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x9000094);
- MDP_OUTP(MDP_BASE + 0x5030c, 0x3300007b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x9c00098);
- MDP_OUTP(MDP_BASE + 0x50310, 0x32400077);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xa40009b);
- MDP_OUTP(MDP_BASE + 0x50314, 0x32000073);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xb00009d);
- MDP_OUTP(MDP_BASE + 0x50318, 0x31c0006f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xbc000a0);
- MDP_OUTP(MDP_BASE + 0x5031c, 0x3140006b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xc8000a2);
- MDP_OUTP(MDP_BASE + 0x50320, 0x31000067);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xd8000a5);
- MDP_OUTP(MDP_BASE + 0x50324, 0x30800062);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xe4000a8);
- MDP_OUTP(MDP_BASE + 0x50328, 0x2fc0005f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xec000aa);
- MDP_OUTP(MDP_BASE + 0x5032c, 0x2fc0005b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf8000ad);
- MDP_OUTP(MDP_BASE + 0x50330, 0x2f400057);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x108000b0);
- MDP_OUTP(MDP_BASE + 0x50334, 0x2e400054);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x114000b2);
- MDP_OUTP(MDP_BASE + 0x50338, 0x2e000050);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x124000b4);
- MDP_OUTP(MDP_BASE + 0x5033c, 0x2d80004c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x130000b6);
- MDP_OUTP(MDP_BASE + 0x50340, 0x2d000049);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x140000b8);
- MDP_OUTP(MDP_BASE + 0x50344, 0x2c800045);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x150000b9);
- MDP_OUTP(MDP_BASE + 0x50348, 0x2c000042);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x15c000bd);
- MDP_OUTP(MDP_BASE + 0x5034c, 0x2b40003e);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x16c000bf);
- MDP_OUTP(MDP_BASE + 0x50350, 0x2a80003b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x17c000bf);
- MDP_OUTP(MDP_BASE + 0x50354, 0x2a000039);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x188000c2);
- MDP_OUTP(MDP_BASE + 0x50358, 0x29400036);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x19c000c4);
- MDP_OUTP(MDP_BASE + 0x5035c, 0x28800032);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ac000c5);
- MDP_OUTP(MDP_BASE + 0x50360, 0x2800002f);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1bc000c7);
- MDP_OUTP(MDP_BASE + 0x50364, 0x2740002c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1cc000c8);
- MDP_OUTP(MDP_BASE + 0x50368, 0x26c00029);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1dc000c9);
- MDP_OUTP(MDP_BASE + 0x5036c, 0x26000027);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1ec000cc);
- MDP_OUTP(MDP_BASE + 0x50370, 0x25000024);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x200000cc);
- MDP_OUTP(MDP_BASE + 0x50374, 0x24800021);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x210000cd);
- MDP_OUTP(MDP_BASE + 0x50378, 0x23800020);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x220000ce);
- MDP_OUTP(MDP_BASE + 0x5037c, 0x2300001d);
-}
-
-static void mdp_load_bc_downscale_table_x_point6TOpoint8(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000070);
- MDP_OUTP(MDP_BASE + 0x50280, 0x4bc00068);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000078);
- MDP_OUTP(MDP_BASE + 0x50284, 0x4bc00060);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000080);
- MDP_OUTP(MDP_BASE + 0x50288, 0x4b800059);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000089);
- MDP_OUTP(MDP_BASE + 0x5028c, 0x4b000052);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe400091);
- MDP_OUTP(MDP_BASE + 0x50290, 0x4a80004b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe40009a);
- MDP_OUTP(MDP_BASE + 0x50294, 0x4a000044);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe8000a3);
- MDP_OUTP(MDP_BASE + 0x50298, 0x4940003d);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfec000ac);
- MDP_OUTP(MDP_BASE + 0x5029c, 0x48400037);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xff0000b4);
- MDP_OUTP(MDP_BASE + 0x502a0, 0x47800031);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xff8000bd);
- MDP_OUTP(MDP_BASE + 0x502a4, 0x4640002b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xc5);
- MDP_OUTP(MDP_BASE + 0x502a8, 0x45000026);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x8000ce);
- MDP_OUTP(MDP_BASE + 0x502ac, 0x43800021);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x10000d6);
- MDP_OUTP(MDP_BASE + 0x502b0, 0x4240001c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x18000df);
- MDP_OUTP(MDP_BASE + 0x502b4, 0x40800018);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x24000e6);
- MDP_OUTP(MDP_BASE + 0x502b8, 0x3f000014);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x30000ee);
- MDP_OUTP(MDP_BASE + 0x502bc, 0x3d400010);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x40000f5);
- MDP_OUTP(MDP_BASE + 0x502c0, 0x3b80000c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x50000fc);
- MDP_OUTP(MDP_BASE + 0x502c4, 0x39800009);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x6000102);
- MDP_OUTP(MDP_BASE + 0x502c8, 0x37c00006);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x7000109);
- MDP_OUTP(MDP_BASE + 0x502cc, 0x35800004);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x840010e);
- MDP_OUTP(MDP_BASE + 0x502d0, 0x33800002);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x9800114);
- MDP_OUTP(MDP_BASE + 0x502d4, 0x31400000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xac00119);
- MDP_OUTP(MDP_BASE + 0x502d8, 0x2f4003fe);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xc40011e);
- MDP_OUTP(MDP_BASE + 0x502dc, 0x2d0003fc);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xdc00121);
- MDP_OUTP(MDP_BASE + 0x502e0, 0x2b0003fb);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf400125);
- MDP_OUTP(MDP_BASE + 0x502e4, 0x28c003fa);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x11000128);
- MDP_OUTP(MDP_BASE + 0x502e8, 0x268003f9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x12c0012a);
- MDP_OUTP(MDP_BASE + 0x502ec, 0x244003f9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1480012c);
- MDP_OUTP(MDP_BASE + 0x502f0, 0x224003f8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1640012e);
- MDP_OUTP(MDP_BASE + 0x502f4, 0x200003f8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1800012f);
- MDP_OUTP(MDP_BASE + 0x502f8, 0x1e0003f8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1a00012f);
- MDP_OUTP(MDP_BASE + 0x502fc, 0x1c0003f8);
-}
-
-static void mdp_load_bc_downscale_table_y_point6TOpoint8(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000070);
- MDP_OUTP(MDP_BASE + 0x50300, 0x4bc00068);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000078);
- MDP_OUTP(MDP_BASE + 0x50304, 0x4bc00060);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000080);
- MDP_OUTP(MDP_BASE + 0x50308, 0x4b800059);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe000089);
- MDP_OUTP(MDP_BASE + 0x5030c, 0x4b000052);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe400091);
- MDP_OUTP(MDP_BASE + 0x50310, 0x4a80004b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe40009a);
- MDP_OUTP(MDP_BASE + 0x50314, 0x4a000044);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe8000a3);
- MDP_OUTP(MDP_BASE + 0x50318, 0x4940003d);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfec000ac);
- MDP_OUTP(MDP_BASE + 0x5031c, 0x48400037);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xff0000b4);
- MDP_OUTP(MDP_BASE + 0x50320, 0x47800031);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xff8000bd);
- MDP_OUTP(MDP_BASE + 0x50324, 0x4640002b);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xc5);
- MDP_OUTP(MDP_BASE + 0x50328, 0x45000026);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x8000ce);
- MDP_OUTP(MDP_BASE + 0x5032c, 0x43800021);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x10000d6);
- MDP_OUTP(MDP_BASE + 0x50330, 0x4240001c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x18000df);
- MDP_OUTP(MDP_BASE + 0x50334, 0x40800018);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x24000e6);
- MDP_OUTP(MDP_BASE + 0x50338, 0x3f000014);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x30000ee);
- MDP_OUTP(MDP_BASE + 0x5033c, 0x3d400010);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x40000f5);
- MDP_OUTP(MDP_BASE + 0x50340, 0x3b80000c);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x50000fc);
- MDP_OUTP(MDP_BASE + 0x50344, 0x39800009);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x6000102);
- MDP_OUTP(MDP_BASE + 0x50348, 0x37c00006);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x7000109);
- MDP_OUTP(MDP_BASE + 0x5034c, 0x35800004);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x840010e);
- MDP_OUTP(MDP_BASE + 0x50350, 0x33800002);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x9800114);
- MDP_OUTP(MDP_BASE + 0x50354, 0x31400000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xac00119);
- MDP_OUTP(MDP_BASE + 0x50358, 0x2f4003fe);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xc40011e);
- MDP_OUTP(MDP_BASE + 0x5035c, 0x2d0003fc);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xdc00121);
- MDP_OUTP(MDP_BASE + 0x50360, 0x2b0003fb);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf400125);
- MDP_OUTP(MDP_BASE + 0x50364, 0x28c003fa);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x11000128);
- MDP_OUTP(MDP_BASE + 0x50368, 0x268003f9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x12c0012a);
- MDP_OUTP(MDP_BASE + 0x5036c, 0x244003f9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1480012c);
- MDP_OUTP(MDP_BASE + 0x50370, 0x224003f8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1640012e);
- MDP_OUTP(MDP_BASE + 0x50374, 0x200003f8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1800012f);
- MDP_OUTP(MDP_BASE + 0x50378, 0x1e0003f8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x1a00012f);
- MDP_OUTP(MDP_BASE + 0x5037c, 0x1c0003f8);
-}
-
-static void mdp_load_bc_downscale_table_x_point8TO1(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50280, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xff80000d);
- MDP_OUTP(MDP_BASE + 0x50284, 0x7ec003f9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfec0001c);
- MDP_OUTP(MDP_BASE + 0x50288, 0x7d4003f3);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe40002b);
- MDP_OUTP(MDP_BASE + 0x5028c, 0x7b8003ed);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfd80003c);
- MDP_OUTP(MDP_BASE + 0x50290, 0x794003e8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc0004d);
- MDP_OUTP(MDP_BASE + 0x50294, 0x76c003e4);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfc40005f);
- MDP_OUTP(MDP_BASE + 0x50298, 0x73c003e0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb800071);
- MDP_OUTP(MDP_BASE + 0x5029c, 0x708003de);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfac00085);
- MDP_OUTP(MDP_BASE + 0x502a0, 0x6d0003db);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa000098);
- MDP_OUTP(MDP_BASE + 0x502a4, 0x698003d9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf98000ac);
- MDP_OUTP(MDP_BASE + 0x502a8, 0x654003d8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf8c000c1);
- MDP_OUTP(MDP_BASE + 0x502ac, 0x610003d7);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf84000d5);
- MDP_OUTP(MDP_BASE + 0x502b0, 0x5c8003d7);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf7c000e9);
- MDP_OUTP(MDP_BASE + 0x502b4, 0x580003d7);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf74000fd);
- MDP_OUTP(MDP_BASE + 0x502b8, 0x534003d8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c00112);
- MDP_OUTP(MDP_BASE + 0x502bc, 0x4e8003d8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6800126);
- MDP_OUTP(MDP_BASE + 0x502c0, 0x494003da);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600013a);
- MDP_OUTP(MDP_BASE + 0x502c4, 0x448003db);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600014d);
- MDP_OUTP(MDP_BASE + 0x502c8, 0x3f4003dd);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00160);
- MDP_OUTP(MDP_BASE + 0x502cc, 0x3a4003df);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00172);
- MDP_OUTP(MDP_BASE + 0x502d0, 0x354003e1);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00184);
- MDP_OUTP(MDP_BASE + 0x502d4, 0x304003e3);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6000195);
- MDP_OUTP(MDP_BASE + 0x502d8, 0x2b0003e6);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf64001a6);
- MDP_OUTP(MDP_BASE + 0x502dc, 0x260003e8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c001b4);
- MDP_OUTP(MDP_BASE + 0x502e0, 0x214003eb);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf78001c2);
- MDP_OUTP(MDP_BASE + 0x502e4, 0x1c4003ee);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf80001cf);
- MDP_OUTP(MDP_BASE + 0x502e8, 0x17c003f1);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf90001db);
- MDP_OUTP(MDP_BASE + 0x502ec, 0x134003f3);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa0001e5);
- MDP_OUTP(MDP_BASE + 0x502f0, 0xf0003f6);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb4001ee);
- MDP_OUTP(MDP_BASE + 0x502f4, 0xac003f9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc001f5);
- MDP_OUTP(MDP_BASE + 0x502f8, 0x70003fb);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe4001fb);
- MDP_OUTP(MDP_BASE + 0x502fc, 0x34003fe);
-}
-
-static void mdp_load_bc_downscale_table_y_point8TO1(void)
-{
- MDP_OUTP(MDP_BASE + 0x5fffc, 0x0);
- MDP_OUTP(MDP_BASE + 0x50300, 0x7fc00000);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xff80000d);
- MDP_OUTP(MDP_BASE + 0x50304, 0x7ec003f9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfec0001c);
- MDP_OUTP(MDP_BASE + 0x50308, 0x7d4003f3);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe40002b);
- MDP_OUTP(MDP_BASE + 0x5030c, 0x7b8003ed);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfd80003c);
- MDP_OUTP(MDP_BASE + 0x50310, 0x794003e8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc0004d);
- MDP_OUTP(MDP_BASE + 0x50314, 0x76c003e4);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfc40005f);
- MDP_OUTP(MDP_BASE + 0x50318, 0x73c003e0);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb800071);
- MDP_OUTP(MDP_BASE + 0x5031c, 0x708003de);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfac00085);
- MDP_OUTP(MDP_BASE + 0x50320, 0x6d0003db);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa000098);
- MDP_OUTP(MDP_BASE + 0x50324, 0x698003d9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf98000ac);
- MDP_OUTP(MDP_BASE + 0x50328, 0x654003d8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf8c000c1);
- MDP_OUTP(MDP_BASE + 0x5032c, 0x610003d7);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf84000d5);
- MDP_OUTP(MDP_BASE + 0x50330, 0x5c8003d7);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf7c000e9);
- MDP_OUTP(MDP_BASE + 0x50334, 0x580003d7);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf74000fd);
- MDP_OUTP(MDP_BASE + 0x50338, 0x534003d8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c00112);
- MDP_OUTP(MDP_BASE + 0x5033c, 0x4e8003d8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6800126);
- MDP_OUTP(MDP_BASE + 0x50340, 0x494003da);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600013a);
- MDP_OUTP(MDP_BASE + 0x50344, 0x448003db);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf600014d);
- MDP_OUTP(MDP_BASE + 0x50348, 0x3f4003dd);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00160);
- MDP_OUTP(MDP_BASE + 0x5034c, 0x3a4003df);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00172);
- MDP_OUTP(MDP_BASE + 0x50350, 0x354003e1);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf5c00184);
- MDP_OUTP(MDP_BASE + 0x50354, 0x304003e3);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6000195);
- MDP_OUTP(MDP_BASE + 0x50358, 0x2b0003e6);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf64001a6);
- MDP_OUTP(MDP_BASE + 0x5035c, 0x260003e8);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf6c001b4);
- MDP_OUTP(MDP_BASE + 0x50360, 0x214003eb);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf78001c2);
- MDP_OUTP(MDP_BASE + 0x50364, 0x1c4003ee);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf80001cf);
- MDP_OUTP(MDP_BASE + 0x50368, 0x17c003f1);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xf90001db);
- MDP_OUTP(MDP_BASE + 0x5036c, 0x134003f3);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfa0001e5);
- MDP_OUTP(MDP_BASE + 0x50370, 0xf0003f6);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfb4001ee);
- MDP_OUTP(MDP_BASE + 0x50374, 0xac003f9);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfcc001f5);
- MDP_OUTP(MDP_BASE + 0x50378, 0x70003fb);
- MDP_OUTP(MDP_BASE + 0x5fffc, 0xfe4001fb);
- MDP_OUTP(MDP_BASE + 0x5037c, 0x34003fe);
-}
-
-static int mdp_get_edge_cond(MDPIBUF *iBuf, uint32 *dup, uint32 *dup2)
-{
- uint32 reg;
- uint32 dst_roi_width; /* Dimensions of DST ROI. */
- uint32 dst_roi_height; /* Used to calculate scaling ratios. */
-
- /*
- * positions of the luma pixel(relative to the image ) required for
- * scaling the ROI
- */
- int32 luma_interp_point_left = 0; /* left-most luma pixel needed */
- int32 luma_interp_point_right = 0; /* right-most luma pixel needed */
- int32 luma_interp_point_top = 0; /* top-most luma pixel needed */
- int32 luma_interp_point_bottom = 0; /* bottom-most luma pixel needed */
-
- /*
- * positions of the chroma pixel(relative to the image ) required for
- * interpolating a chroma value at all required luma positions
- */
- /* left-most chroma pixel needed */
- int32 chroma_interp_point_left = 0;
- /* right-most chroma pixel needed */
- int32 chroma_interp_point_right = 0;
- /* top-most chroma pixel needed */
- int32 chroma_interp_point_top = 0;
- /* bottom-most chroma pixel needed */
- int32 chroma_interp_point_bottom = 0;
-
- /*
- * a rectangular region within the chroma plane of the "image".
- * Chroma pixels falling inside of this rectangle belongs to the ROI
- */
- int32 chroma_bound_left = 0;
- int32 chroma_bound_right = 0;
- int32 chroma_bound_top = 0;
- int32 chroma_bound_bottom = 0;
-
- /*
- * number of chroma pixels to replicate on the left, right,
- * top and bottom edge of the ROI.
- */
- int32 chroma_repeat_left = 0;
- int32 chroma_repeat_right = 0;
- int32 chroma_repeat_top = 0;
- int32 chroma_repeat_bottom = 0;
-
- /*
- * number of luma pixels to replicate on the left, right,
- * top and bottom edge of the ROI.
- */
- int32 luma_repeat_left = 0;
- int32 luma_repeat_right = 0;
- int32 luma_repeat_top = 0;
- int32 luma_repeat_bottom = 0;
-
- boolean chroma_edge_enable;
-
- uint32 _is_scale_enabled = 0;
- uint32 _is_yuv_offsite_vertical = 0;
-
- /* fg edge duplicate */
- reg = 0x0;
-
- if (iBuf->mdpImg.mdpOp & MDPOP_ASCALE) { /* if scaling enabled */
-
- _is_scale_enabled = 1;
-
- /*
- * if rotation mode involves a 90 deg rotation, flip
- * dst_roi_width with dst_roi_height.
- * Scaling ratios is based on source ROI dimensions, and
- * dst ROI dimensions before rotation.
- */
- if (iBuf->mdpImg.mdpOp & MDPOP_ROT90) {
- dst_roi_width = iBuf->roi.dst_height;
- dst_roi_height = iBuf->roi.dst_width;
- } else {
- dst_roi_width = iBuf->roi.dst_width;
- dst_roi_height = iBuf->roi.dst_height;
- }
-
- /*
- * Find out the luma pixels needed for scaling in the
- * x direction (LEFT and RIGHT). Locations of pixels are
- * relative to the ROI. Upper-left corner of ROI corresponds
- * to coordinates (0,0). Also set the number of luma pixel
- * to repeat.
- */
- if (iBuf->roi.width > 3 * dst_roi_width) {
- /* scale factor < 1/3 */
- luma_interp_point_left = 0;
- luma_interp_point_right = (iBuf->roi.width - 1);
- luma_repeat_left = 0;
- luma_repeat_right = 0;
- } else if (iBuf->roi.width == 3 * dst_roi_width) {
- /* scale factor == 1/3 */
- luma_interp_point_left = 0;
- luma_interp_point_right = (iBuf->roi.width - 1) + 1;
- luma_repeat_left = 0;
- luma_repeat_right = 1;
- } else if ((iBuf->roi.width > dst_roi_width) &&
- (iBuf->roi.width < 3 * dst_roi_width)) {
- /* 1/3 < scale factor < 1 */
- luma_interp_point_left = -1;
- luma_interp_point_right = (iBuf->roi.width - 1) + 1;
- luma_repeat_left = 1;
- luma_repeat_right = 1;
- }
-
- else if (iBuf->roi.width == dst_roi_width) {
- /* scale factor == 1 */
- luma_interp_point_left = -1;
- luma_interp_point_right = (iBuf->roi.width - 1) + 2;
- luma_repeat_left = 1;
- luma_repeat_right = 2;
- } else { /* (iBuf->roi.width < dst_roi_width) */
- /* scale factor > 1 */
- luma_interp_point_left = -2;
- luma_interp_point_right = (iBuf->roi.width - 1) + 2;
- luma_repeat_left = 2;
- luma_repeat_right = 2;
- }
-
- /*
- * Find out the number of pixels needed for scaling in the
- * y direction (TOP and BOTTOM). Locations of pixels are
- * relative to the ROI. Upper-left corner of ROI corresponds
- * to coordinates (0,0). Also set the number of luma pixel
- * to repeat.
- */
- if (iBuf->roi.height > 3 * dst_roi_height) {
- /* scale factor < 1/3 */
- luma_interp_point_top = 0;
- luma_interp_point_bottom = (iBuf->roi.height - 1);
- luma_repeat_top = 0;
- luma_repeat_bottom = 0;
- } else if (iBuf->roi.height == 3 * dst_roi_height) {
- /* scale factor == 1/3 */
- luma_interp_point_top = 0;
- luma_interp_point_bottom = (iBuf->roi.height - 1) + 1;
- luma_repeat_top = 0;
- luma_repeat_bottom = 1;
- } else if ((iBuf->roi.height > dst_roi_height) &&
- (iBuf->roi.height < 3 * dst_roi_height)) {
- /* 1/3 < scale factor < 1 */
- luma_interp_point_top = -1;
- luma_interp_point_bottom = (iBuf->roi.height - 1) + 1;
- luma_repeat_top = 1;
- luma_repeat_bottom = 1;
- } else if (iBuf->roi.height == dst_roi_height) {
- /* scale factor == 1 */
- luma_interp_point_top = -1;
- luma_interp_point_bottom = (iBuf->roi.height - 1) + 2;
- luma_repeat_top = 1;
- luma_repeat_bottom = 2;
- } else { /* (iBuf->roi.height < dst_roi_height) */
- /* scale factor > 1 */
- luma_interp_point_top = -2;
- luma_interp_point_bottom = (iBuf->roi.height - 1) + 2;
- luma_repeat_top = 2;
- luma_repeat_bottom = 2;
- }
- } /* if (iBuf->scale.scale_flag) */
- else { /* scaling disabled */
- /*
- * Since no scaling needed, Tile Fetch does not require any
- * more luma pixel than what the ROI contains.
- */
- luma_interp_point_left = (int32) 0;
- luma_interp_point_right = (int32) (iBuf->roi.width - 1);
- luma_interp_point_top = (int32) 0;
- luma_interp_point_bottom = (int32) (iBuf->roi.height - 1);
-
- luma_repeat_left = 0;
- luma_repeat_right = 0;
- luma_repeat_top = 0;
- luma_repeat_bottom = 0;
- }
-
- /* After adding the ROI offsets, we have locations of
- * luma_interp_points relative to the image.
- */
- luma_interp_point_left += (int32) (iBuf->roi.x);
- luma_interp_point_right += (int32) (iBuf->roi.x);
- luma_interp_point_top += (int32) (iBuf->roi.y);
- luma_interp_point_bottom += (int32) (iBuf->roi.y);
-
- /*
- * After adding the ROI offsets, we have locations of
- * chroma_interp_points relative to the image.
- */
- chroma_interp_point_left = luma_interp_point_left;
- chroma_interp_point_right = luma_interp_point_right;
- chroma_interp_point_top = luma_interp_point_top;
- chroma_interp_point_bottom = luma_interp_point_bottom;
-
- chroma_edge_enable = TRUE;
- /* find out which chroma pixels are needed for chroma upsampling. */
- switch (iBuf->mdpImg.imgType) {
- /*
- * cosite in horizontal axis
- * fully sampled in vertical axis
- */
- case MDP_Y_CBCR_H2V1:
- case MDP_Y_CRCB_H2V1:
- case MDP_YCRYCB_H2V1:
- /* floor( luma_interp_point_left / 2 ); */
- chroma_interp_point_left = luma_interp_point_left >> 1;
- /* floor( ( luma_interp_point_right + 1 ) / 2 ); */
- chroma_interp_point_right = (luma_interp_point_right + 1) >> 1;
-
- chroma_interp_point_top = luma_interp_point_top;
- chroma_interp_point_bottom = luma_interp_point_bottom;
- break;
-
- /*
- * cosite in horizontal axis
- * offsite in vertical axis
- */
- case MDP_Y_CBCR_H2V2:
- case MDP_Y_CRCB_H2V2:
- /* floor( luma_interp_point_left / 2) */
- chroma_interp_point_left = luma_interp_point_left >> 1;
-
- /* floor( ( luma_interp_point_right + 1 )/ 2 ) */
- chroma_interp_point_right = (luma_interp_point_right + 1) >> 1;
-
- /* floor( (luma_interp_point_top - 1 ) / 2 ) */
- chroma_interp_point_top = (luma_interp_point_top - 1) >> 1;
-
- /* floor( ( luma_interp_point_bottom + 1 ) / 2 ) */
- chroma_interp_point_bottom =
- (luma_interp_point_bottom + 1) >> 1;
-
- _is_yuv_offsite_vertical = 1;
- break;
-
- default:
- chroma_edge_enable = FALSE;
- chroma_interp_point_left = luma_interp_point_left;
- chroma_interp_point_right = luma_interp_point_right;
- chroma_interp_point_top = luma_interp_point_top;
- chroma_interp_point_bottom = luma_interp_point_bottom;
-
- break;
- }
-
- /* only if the image type is in YUV domain, we calculate chroma edge */
- if (chroma_edge_enable) {
- /* Defines which chroma pixels belongs to the roi */
- switch (iBuf->mdpImg.imgType) {
- /*
- * Cosite in horizontal direction, and fully sampled
- * in vertical direction.
- */
- case MDP_Y_CBCR_H2V1:
- case MDP_Y_CRCB_H2V1:
- case MDP_YCRYCB_H2V1:
- /*
- * width of chroma ROI is 1/2 of size of luma ROI
- * height of chroma ROI same as size of luma ROI
- */
- chroma_bound_left = iBuf->roi.x / 2;
-
- /* there are half as many chroma pixel as luma pixels */
- chroma_bound_right =
- (iBuf->roi.width + iBuf->roi.x - 1) / 2;
- chroma_bound_top = iBuf->roi.y;
- chroma_bound_bottom =
- (iBuf->roi.height + iBuf->roi.y - 1);
- break;
-
- case MDP_Y_CBCR_H2V2:
- case MDP_Y_CRCB_H2V2:
- /*
- * cosite in horizontal dir, and offsite in vertical dir
- * width of chroma ROI is 1/2 of size of luma ROI
- * height of chroma ROI is 1/2 of size of luma ROI
- */
-
- chroma_bound_left = iBuf->roi.x / 2;
- chroma_bound_right =
- (iBuf->roi.width + iBuf->roi.x - 1) / 2;
- chroma_bound_top = iBuf->roi.y / 2;
- chroma_bound_bottom =
- (iBuf->roi.height + iBuf->roi.y - 1) / 2;
- break;
-
- default:
- /*
- * If no valid chroma sub-sampling format specified,
- * assume 4:4:4 ( i.e. fully sampled). Set ROI
- * boundaries for chroma same as ROI boundaries for
- * luma.
- */
- chroma_bound_left = iBuf->roi.x;
- chroma_bound_right = iBuf->roi.width + iBuf->roi.x - 1;
- chroma_bound_top = iBuf->roi.y;
- chroma_bound_bottom =
- (iBuf->roi.height + iBuf->roi.y - 1);
- break;
- }
-
- /*
- * Knowing which chroma pixels are needed, and which chroma
- * pixels belong to the ROI (i.e. available for fetching ),
- * calculate how many chroma pixels Tile Fetch needs to
- * duplicate. If any required chroma pixels falls outside
- * of the ROI, Tile Fetch must obtain them by replicating
- * pixels.
- */
- if (chroma_bound_left > chroma_interp_point_left)
- chroma_repeat_left =
- chroma_bound_left - chroma_interp_point_left;
- else
- chroma_repeat_left = 0;
-
- if (chroma_interp_point_right > chroma_bound_right)
- chroma_repeat_right =
- chroma_interp_point_right - chroma_bound_right;
- else
- chroma_repeat_right = 0;
-
- if (chroma_bound_top > chroma_interp_point_top)
- chroma_repeat_top =
- chroma_bound_top - chroma_interp_point_top;
- else
- chroma_repeat_top = 0;
-
- if (chroma_interp_point_bottom > chroma_bound_bottom)
- chroma_repeat_bottom =
- chroma_interp_point_bottom - chroma_bound_bottom;
- else
- chroma_repeat_bottom = 0;
-
- if (_is_scale_enabled && (iBuf->roi.height == 1)
- && _is_yuv_offsite_vertical) {
- chroma_repeat_bottom = 3;
- chroma_repeat_top = 0;
- }
- }
- /* make sure chroma repeats are non-negative */
- if ((chroma_repeat_left < 0) || (chroma_repeat_right < 0) ||
- (chroma_repeat_top < 0) || (chroma_repeat_bottom < 0))
- return -1;
-
- /* make sure chroma repeats are no larger than 3 pixels */
- if ((chroma_repeat_left > 3) || (chroma_repeat_right > 3) ||
- (chroma_repeat_top > 3) || (chroma_repeat_bottom > 3))
- return -1;
-
- /* make sure luma repeats are non-negative */
- if ((luma_repeat_left < 0) || (luma_repeat_right < 0) ||
- (luma_repeat_top < 0) || (luma_repeat_bottom < 0))
- return -1;
-
- /* make sure luma repeats are no larger than 3 pixels */
- if ((luma_repeat_left > 3) || (luma_repeat_right > 3) ||
- (luma_repeat_top > 3) || (luma_repeat_bottom > 3))
- return -1;
-
- /* write chroma_repeat_left to register */
- reg |= (chroma_repeat_left & 3) << MDP_LEFT_CHROMA;
-
- /* write chroma_repeat_right to register */
- reg |= (chroma_repeat_right & 3) << MDP_RIGHT_CHROMA;
-
- /* write chroma_repeat_top to register */
- reg |= (chroma_repeat_top & 3) << MDP_TOP_CHROMA;
-
- /* write chroma_repeat_bottom to register */
- reg |= (chroma_repeat_bottom & 3) << MDP_BOTTOM_CHROMA;
-
- /* write luma_repeat_left to register */
- reg |= (luma_repeat_left & 3) << MDP_LEFT_LUMA;
-
- /* write luma_repeat_right to register */
- reg |= (luma_repeat_right & 3) << MDP_RIGHT_LUMA;
-
- /* write luma_repeat_top to register */
- reg |= (luma_repeat_top & 3) << MDP_TOP_LUMA;
-
- /* write luma_repeat_bottom to register */
- reg |= (luma_repeat_bottom & 3) << MDP_BOTTOM_LUMA;
-
- /* done with reg */
- *dup = reg;
-
- /* bg edge duplicate */
- reg = 0x0;
-
- switch (iBuf->ibuf_type) {
- case MDP_Y_CBCR_H2V2:
- case MDP_Y_CRCB_H2V2:
- /*
- * Edge condition for MDP_Y_CRCB/CBCR_H2V2 cosite only.
- * For 420 cosite, 1 chroma replicated on all sides except
- * left, so reg 101b8 should be 0x0209. For 420 offsite,
- * 1 chroma replicated all sides.
- */
- if (iBuf->roi.lcd_y == 0) {
- reg |= BIT(MDP_TOP_CHROMA);
- }
-
- if ((iBuf->roi.lcd_y + iBuf->roi.dst_height) ==
- iBuf->ibuf_height) {
- reg |= BIT(MDP_BOTTOM_CHROMA);
- }
-
- if (((iBuf->roi.lcd_x + iBuf->roi.dst_width) ==
- iBuf->ibuf_width) && ((iBuf->roi.dst_width % 2) == 0)) {
- reg |= BIT(MDP_RIGHT_CHROMA);
- }
-
- break;
-
- case MDP_Y_CBCR_H2V1:
- case MDP_Y_CRCB_H2V1:
- case MDP_YCRYCB_H2V1:
- if (((iBuf->roi.lcd_x + iBuf->roi.dst_width) ==
- iBuf->ibuf_width) && ((iBuf->roi.dst_width % 2) == 0)) {
- reg |= BIT(MDP_RIGHT_CHROMA);
- }
- break;
- default:
- break;
- }
-
- *dup2 = reg;
-
- return 0;
-}
-
-#define ADJUST_IP /* for 1/3 scale factor fix */
-
-static int mdp_calc_scale_params(
-/* ROI origin coordinate for the dimension */
- uint32 org,
-/* src ROI dimension */
- uint32 dim_in,
-/* scaled ROI dimension*/
- uint32 dim_out,
-/* is this ROI width dimension? */
- boolean is_W,
-/* initial phase location address */
- int32 *phase_init_ptr,
-/* phase increment location address */
- uint32 *phase_step_ptr,
-/* ROI start over-fetch location address */
- uint32 *num_repl_beg_ptr,
-/* ROI end over-fetch location address */
- uint32 *num_repl_end_ptr)
-{
- boolean rpa_on = FALSE;
- int init_phase = 0;
- uint32 beg_of = 0;
- uint32 end_of = 0;
- uint64 numer = 0;
- uint64 denom = 0;
- /*uint64 inverter = 1; */
- int64 point5 = 1;
- int64 one = 1;
- int64 k1, k2, k3, k4; /* linear equation coefficients */
- uint64 int_mask;
- uint64 fract_mask;
- uint64 Os;
- int64 Osprime;
- int64 Od;
- int64 Odprime;
- int64 Oreq;
- uint64 Es;
- uint64 Ed;
- uint64 Ereq;
-#ifdef ADJUST_IP
- int64 IP64;
- int64 delta;
-#endif
- uint32 mult;
-
- /*
- * The phase accumulator should really be rational for all cases in a
- * general purpose polyphase scaler for a tiled architecture with
- * non-zero * origin capability because there is no way to represent
- * certain scale factors in fixed point regardless of precision.
- * The error incurred in attempting to use fixed point is most
- * eggregious for SF where 1/SF is an integral multiple of 1/3.
- *
- * However, since the MDP2 has already been committed to HW, we
- * only use the rational phase accumulator (RPA) when 1/SF is an
- * integral multiple of 1/3. This will help minimize regressions in
- * matching the HW to the C-Sim.
- */
- /*
- * Set the RPA flag for this dimension.
- *
- * In order for 1/SF (dim_in/dim_out) to be an integral multiple of
- * 1/3, dim_out must be an integral multiple of 3.
- */
- if (!(dim_out % 3)) {
- mult = dim_out / 3;
- rpa_on = (!(dim_in % mult));
- }
-
- numer = dim_out;
- denom = dim_in;
-
- /*
- * convert to U30.34 before division
- *
- * The K vectors carry 4 extra bits of precision
- * and are rounded.
- *
- * We initially go 5 bits over then round by adding
- * 1 and right shifting by 1
- * so final result is U31.33
- */
- numer <<= PQF_PLUS_5;
-
- /* now calculate the scale factor (aka k3) */
- k3 = ((mdp_do_div(numer, denom) + 1) >> 1);
-
- /* check scale factor for legal range [0.25 - 4.0] */
- if (((k3 >> 4) < (1LL << PQF_MINUS_2)) ||
- ((k3 >> 4) > (1LL << PQF_PLUS_2))) {
- return -1;
- }
-
- /* calculate inverse scale factor (aka k1) for phase init */
- numer = dim_in;
- denom = dim_out;
- numer <<= PQF_PLUS_5;
- k1 = ((mdp_do_div(numer, denom) + 1) >> 1);
-
- /*
- * calculate initial phase and ROI overfetch
- */
- /* convert point5 & one to S39.24 (will always be positive) */
- point5 <<= (PQF_PLUS_4 - 1);
- one <<= PQF_PLUS_4;
- k2 = ((k1 - one) >> 1);
- init_phase = (int)(k2 >> 4);
- k4 = ((k3 - one) >> 1);
- if (k3 == one) {
- /* the simple case; SF = 1.0 */
- beg_of = 1;
- end_of = 2;
- } else {
- /* calculate the masks */
- fract_mask = one - 1;
- int_mask = ~fract_mask;
-
- if (!rpa_on) {
- /*
- * FIXED POINT IMPLEMENTATION
- */
- if (!org) {
- /* A fairly simple case; ROI origin = 0 */
- if (k1 < one) {
- /* upscaling */
- beg_of = end_of = 2;
- }
- /* 0.33 <= SF < 1.0 */
- else if (k1 < (3LL << PQF_PLUS_4))
- beg_of = end_of = 1;
- /* 0.33 == SF */
- else if (k1 == (3LL << PQF_PLUS_4)) {
- beg_of = 0;
- end_of = 1;
- }
- /* 0.25 <= SF < 0.33 */
- else
- beg_of = end_of = 0;
- } else {
- /*
- * The complicated case; ROI origin != 0
- * init_phase needs to be adjusted
- * OF is also position dependent
- */
-
- /* map (org - .5) into destination space */
- Os = ((uint64) org << 1) - 1;
- Od = ((k3 * Os) >> 1) + k4;
-
- /* take the ceiling */
- Odprime = (Od & int_mask);
- if (Odprime != Od)
- Odprime += one;
-
- /* now map that back to source space */
- Osprime = (k1 * (Odprime >> PQF_PLUS_4)) + k2;
-
- /* then floor & decrement to calculate the required
- starting coordinate */
- Oreq = (Osprime & int_mask) - one;
-
- /* calculate end coord in destination space then map to
- source space */
- Ed = Odprime +
- ((uint64) dim_out << PQF_PLUS_4) - one;
- Es = (k1 * (Ed >> PQF_PLUS_4)) + k2;
-
- /* now floor & increment by 2 to calculate the required
- ending coordinate */
- Ereq = (Es & int_mask) + (one << 1);
-
- /* calculate initial phase */
-#ifdef ADJUST_IP
-
- IP64 = Osprime - Oreq;
- delta = ((int64) (org) << PQF_PLUS_4) - Oreq;
- IP64 -= delta;
-
- /* limit to valid range before the left shift */
- delta = (IP64 & (1LL << 63)) ? 4 : -4;
- delta <<= PQF_PLUS_4;
- while (abs((int)(IP64 >> PQF_PLUS_4)) > 4)
- IP64 += delta;
-
- /* right shift to account for extra bits of precision */
- init_phase = (int)(IP64 >> 4);
-
-#else /* ADJUST_IP */
-
- /* just calculate the real initial phase */
- init_phase = (int)((Osprime - Oreq) >> 4);
-
-#endif /* ADJUST_IP */
-
- /* calculate the overfetch */
- beg_of = org - (uint32) (Oreq >> PQF_PLUS_4);
- end_of =
- (uint32) (Ereq >> PQF_PLUS_4) - (org +
- dim_in -
- 1);
- }
- } else {
- /*
- * RPA IMPLEMENTATION
- *
- * init_phase needs to be calculated in all RPA_on cases
- * because it's a numerator, not a fixed point value.
- */
-
- /* map (org - .5) into destination space */
- Os = ((uint64) org << PQF_PLUS_4) - point5;
- Od = mdp_do_div((dim_out * (Os + point5)),
- dim_in) - point5;
-
- /* take the ceiling */
- Odprime = (Od & int_mask);
- if (Odprime != Od)
- Odprime += one;
-
- /* now map that back to source space */
- Osprime =
- mdp_do_div((dim_in * (Odprime + point5)),
- dim_out) - point5;
-
- /* then floor & decrement to calculate the required
- starting coordinate */
- Oreq = (Osprime & int_mask) - one;
-
- /* calculate end coord in destination space then map to
- source space */
- Ed = Odprime + ((uint64) dim_out << PQF_PLUS_4) - one;
- Es = mdp_do_div((dim_in * (Ed + point5)),
- dim_out) - point5;
-
- /* now floor & increment by 2 to calculate the required
- ending coordinate */
- Ereq = (Es & int_mask) + (one << 1);
-
- /* calculate initial phase */
-
-#ifdef ADJUST_IP
-
- IP64 = Osprime - Oreq;
- delta = ((int64) (org) << PQF_PLUS_4) - Oreq;
- IP64 -= delta;
-
- /* limit to valid range before the left shift */
- delta = (IP64 & (1LL << 63)) ? 4 : -4;
- delta <<= PQF_PLUS_4;
- while (abs((int)(IP64 >> PQF_PLUS_4)) > 4)
- IP64 += delta;
-
- /* right shift to account for extra bits of precision */
- init_phase = (int)(IP64 >> 4);
-
-#else /* ADJUST_IP */
-
- /* just calculate the real initial phase */
- init_phase = (int)((Osprime - Oreq) >> 4);
-
-#endif /* ADJUST_IP */
-
- /* calculate the overfetch */
- beg_of = org - (uint32) (Oreq >> PQF_PLUS_4);
- end_of =
- (uint32) (Ereq >> PQF_PLUS_4) - (org + dim_in - 1);
- }
- }
-
- /* return the scale parameters */
- *phase_init_ptr = init_phase;
- *phase_step_ptr = (uint32) (k1 >> 4);
- *num_repl_beg_ptr = beg_of;
- *num_repl_end_ptr = end_of;
-
- return 0;
-}
-
-static uint8 *mdp_adjust_rot_addr(MDPIBUF *iBuf, uint8 *addr, uint32 uv)
-{
- uint32 dest_ystride = iBuf->ibuf_width * iBuf->bpp;
- uint32 h_slice = 1;
-
- if (uv && ((iBuf->ibuf_type == MDP_Y_CBCR_H2V2) ||
- (iBuf->ibuf_type == MDP_Y_CRCB_H2V2)))
- h_slice = 2;
-
- if (MDP_CHKBIT(iBuf->mdpImg.mdpOp, MDPOP_ROT90) ^
- MDP_CHKBIT(iBuf->mdpImg.mdpOp, MDPOP_LR)) {
- addr =
- addr + (iBuf->roi.dst_width -
- MIN(16, iBuf->roi.dst_width)) * iBuf->bpp;
- }
- if (MDP_CHKBIT(iBuf->mdpImg.mdpOp, MDPOP_UD)) {
- addr =
- addr + ((iBuf->roi.dst_height -
- MIN(16, iBuf->roi.dst_height))/h_slice) * dest_ystride;
- }
-
- return addr;
-}
-
-void mdp_set_scale(MDPIBUF *iBuf,
- uint32 dst_roi_width,
- uint32 dst_roi_height,
- boolean inputRGB, boolean outputRGB, uint32 *pppop_reg_ptr)
-{
- uint32 dst_roi_width_scale;
- uint32 dst_roi_height_scale;
- boolean use_pr;
- uint32 phasex_step = 0;
- uint32 phasey_step = 0;
- int32 phasex_init = 0;
- int32 phasey_init = 0;
- uint32 lines_dup = 0;
- uint32 lines_dup_bg = 0;
- uint32 dummy;
- uint32 mdp_blur = 0;
-
- if (iBuf->mdpImg.mdpOp & MDPOP_ASCALE) {
- if (iBuf->mdpImg.mdpOp & MDPOP_ROT90) {
- dst_roi_width_scale = dst_roi_height;
- dst_roi_height_scale = dst_roi_width;
- } else {
- dst_roi_width_scale = dst_roi_width;
- dst_roi_height_scale = dst_roi_height;
- }
-
- mdp_blur = iBuf->mdpImg.mdpOp & MDPOP_BLUR;
-
- if ((dst_roi_width_scale != iBuf->roi.width) ||
- (dst_roi_height_scale != iBuf->roi.height) ||
- mdp_blur) {
- *pppop_reg_ptr |=
- (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
-
- /* let's use SHIM logic to calculate the partial ROI scaling */
-#if 0
- phasex_step =
- (uint32) mdp_do_div(0x20000000 * iBuf->roi.width,
- dst_roi_width_scale);
- phasey_step =
- (uint32) mdp_do_div(0x20000000 * iBuf->roi.height,
- dst_roi_height_scale);
-
-/*
- phasex_step= ((long long) iBuf->roi.width * 0x20000000)/dst_roi_width_scale;
- phasey_step= ((long long)iBuf->roi.height * 0x20000000)/dst_roi_height_scale;
-*/
-
- phasex_init =
- (((long long)phasex_step - 0x20000000) >> 1);
- phasey_init =
- (((long long)phasey_step - 0x20000000) >> 1);
-
-#else
- mdp_calc_scale_params(iBuf->roi.x, iBuf->roi.width,
- dst_roi_width_scale, 1,
- &phasex_init, &phasex_step,
- &dummy, &dummy);
- mdp_calc_scale_params(iBuf->roi.y, iBuf->roi.height,
- dst_roi_height_scale, 0,
- &phasey_init, &phasey_step,
- &dummy, &dummy);
-#endif
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x013c,
- phasex_init);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0140,
- phasey_init);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0144,
- phasex_step);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0148,
- phasey_step);
-
- use_pr = (inputRGB) && (outputRGB);
-
- if ((dst_roi_width_scale > iBuf->roi.width) ||
- (dst_roi_height_scale > iBuf->roi.height)) {
- if ((use_pr)
- && (mdp_curr_up_scale_xy !=
- MDP_PR_SCALE_UP)) {
- mdp_load_pr_upscale_table();
- mdp_curr_up_scale_xy = MDP_PR_SCALE_UP;
- } else if ((!use_pr)
- && (mdp_curr_up_scale_xy !=
- MDP_BC_SCALE_UP)) {
- mdp_load_bc_upscale_table();
- mdp_curr_up_scale_xy = MDP_BC_SCALE_UP;
- }
- }
-
- if (mdp_blur) {
- load_scale_table(mdp_gaussian_blur_table,
- ARRAY_SIZE(mdp_gaussian_blur_table));
- mdp_curr_down_scale_x = MDP_SCALE_BLUR;
- mdp_curr_down_scale_y = MDP_SCALE_BLUR;
- }
-
- /* 0.2 < x <= 1 scaling factor */
- if ((dst_roi_width_scale <= iBuf->roi.width) &&
- !mdp_blur) {
- if (((dst_roi_width_scale * 10) /
- iBuf->roi.width) > 8) {
- if ((use_pr)
- && (mdp_curr_down_scale_x !=
- MDP_PR_SCALE_POINT8_1)) {
- mdp_load_pr_downscale_table_x_point8TO1
- ();
- mdp_curr_down_scale_x =
- MDP_PR_SCALE_POINT8_1;
- } else if ((!use_pr)
- && (mdp_curr_down_scale_x !=
- MDP_BC_SCALE_POINT8_1)) {
- mdp_load_bc_downscale_table_x_point8TO1
- ();
- mdp_curr_down_scale_x =
- MDP_BC_SCALE_POINT8_1;
- }
- } else
- if (((dst_roi_width_scale * 10) /
- iBuf->roi.width) > 6) {
- if ((use_pr)
- && (mdp_curr_down_scale_x !=
- MDP_PR_SCALE_POINT6_POINT8)) {
- mdp_load_pr_downscale_table_x_point6TOpoint8
- ();
- mdp_curr_down_scale_x =
- MDP_PR_SCALE_POINT6_POINT8;
- } else if ((!use_pr)
- && (mdp_curr_down_scale_x !=
- MDP_BC_SCALE_POINT6_POINT8))
- {
- mdp_load_bc_downscale_table_x_point6TOpoint8
- ();
- mdp_curr_down_scale_x =
- MDP_BC_SCALE_POINT6_POINT8;
- }
- } else
- if (((dst_roi_width_scale * 10) /
- iBuf->roi.width) > 4) {
- if ((use_pr)
- && (mdp_curr_down_scale_x !=
- MDP_PR_SCALE_POINT4_POINT6)) {
- mdp_load_pr_downscale_table_x_point4TOpoint6
- ();
- mdp_curr_down_scale_x =
- MDP_PR_SCALE_POINT4_POINT6;
- } else if ((!use_pr)
- && (mdp_curr_down_scale_x !=
- MDP_BC_SCALE_POINT4_POINT6))
- {
- mdp_load_bc_downscale_table_x_point4TOpoint6
- ();
- mdp_curr_down_scale_x =
- MDP_BC_SCALE_POINT4_POINT6;
- }
- } else {
- if ((use_pr)
- && (mdp_curr_down_scale_x !=
- MDP_PR_SCALE_POINT2_POINT4)) {
- mdp_load_pr_downscale_table_x_point2TOpoint4
- ();
- mdp_curr_down_scale_x =
- MDP_PR_SCALE_POINT2_POINT4;
- } else if ((!use_pr)
- && (mdp_curr_down_scale_x !=
- MDP_BC_SCALE_POINT2_POINT4))
- {
- mdp_load_bc_downscale_table_x_point2TOpoint4
- ();
- mdp_curr_down_scale_x =
- MDP_BC_SCALE_POINT2_POINT4;
- }
- }
- }
- /* 0.2 < y <= 1 scaling factor */
- if ((dst_roi_height_scale <= iBuf->roi.height) &&
- !mdp_blur) {
- if (((dst_roi_height_scale * 10) /
- iBuf->roi.height) > 8) {
- if ((use_pr)
- && (mdp_curr_down_scale_y !=
- MDP_PR_SCALE_POINT8_1)) {
- mdp_load_pr_downscale_table_y_point8TO1
- ();
- mdp_curr_down_scale_y =
- MDP_PR_SCALE_POINT8_1;
- } else if ((!use_pr)
- && (mdp_curr_down_scale_y !=
- MDP_BC_SCALE_POINT8_1)) {
- mdp_load_bc_downscale_table_y_point8TO1
- ();
- mdp_curr_down_scale_y =
- MDP_BC_SCALE_POINT8_1;
- }
- } else
- if (((dst_roi_height_scale * 10) /
- iBuf->roi.height) > 6) {
- if ((use_pr)
- && (mdp_curr_down_scale_y !=
- MDP_PR_SCALE_POINT6_POINT8)) {
- mdp_load_pr_downscale_table_y_point6TOpoint8
- ();
- mdp_curr_down_scale_y =
- MDP_PR_SCALE_POINT6_POINT8;
- } else if ((!use_pr)
- && (mdp_curr_down_scale_y !=
- MDP_BC_SCALE_POINT6_POINT8))
- {
- mdp_load_bc_downscale_table_y_point6TOpoint8
- ();
- mdp_curr_down_scale_y =
- MDP_BC_SCALE_POINT6_POINT8;
- }
- } else
- if (((dst_roi_height_scale * 10) /
- iBuf->roi.height) > 4) {
- if ((use_pr)
- && (mdp_curr_down_scale_y !=
- MDP_PR_SCALE_POINT4_POINT6)) {
- mdp_load_pr_downscale_table_y_point4TOpoint6
- ();
- mdp_curr_down_scale_y =
- MDP_PR_SCALE_POINT4_POINT6;
- } else if ((!use_pr)
- && (mdp_curr_down_scale_y !=
- MDP_BC_SCALE_POINT4_POINT6))
- {
- mdp_load_bc_downscale_table_y_point4TOpoint6
- ();
- mdp_curr_down_scale_y =
- MDP_BC_SCALE_POINT4_POINT6;
- }
- } else {
- if ((use_pr)
- && (mdp_curr_down_scale_y !=
- MDP_PR_SCALE_POINT2_POINT4)) {
- mdp_load_pr_downscale_table_y_point2TOpoint4
- ();
- mdp_curr_down_scale_y =
- MDP_PR_SCALE_POINT2_POINT4;
- } else if ((!use_pr)
- && (mdp_curr_down_scale_y !=
- MDP_BC_SCALE_POINT2_POINT4))
- {
- mdp_load_bc_downscale_table_y_point2TOpoint4
- ();
- mdp_curr_down_scale_y =
- MDP_BC_SCALE_POINT2_POINT4;
- }
- }
- }
- } else {
- iBuf->mdpImg.mdpOp &= ~(MDPOP_ASCALE);
- }
- }
- /* setting edge condition here after scaling check */
- if (mdp_get_edge_cond(iBuf, &lines_dup, &lines_dup_bg))
- printk(KERN_ERR "msm_fb: mdp_get_edge_cond() error!\n");
-
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01b8, lines_dup);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01bc, lines_dup_bg);
-}
-
-void mdp_init_scale_table(void)
-{
- mdp_curr_up_scale_xy = MDP_INIT_SCALE;
- mdp_curr_down_scale_x = MDP_INIT_SCALE;
- mdp_curr_down_scale_y = MDP_INIT_SCALE;
-}
-
-void mdp_adjust_start_addr(uint8 **src0,
- uint8 **src1,
- int v_slice,
- int h_slice,
- int x,
- int y,
- uint32 width,
- uint32 height, int bpp, MDPIBUF *iBuf, int layer)
-{
- *src0 += (x + y * width) * bpp;
-
- /* if it's dest/bg buffer, we need to adjust it for rotation */
- if (layer != 0)
- *src0 = mdp_adjust_rot_addr(iBuf, *src0, 0);
-
- if (*src1) {
- /*
- * MDP_Y_CBCR_H2V2/MDP_Y_CRCB_H2V2 cosite for now
- * we need to shift x direction same as y dir for offsite
- */
- *src1 +=
- ((x / h_slice) * h_slice +
- ((y == 0) ? 0 : ((y + 1) / v_slice - 1) * width)) * bpp;
-
- /* if it's dest/bg buffer, we need to adjust it for rotation */
- if (layer != 0)
- *src1 = mdp_adjust_rot_addr(iBuf, *src1, 1);
- }
-}
-
-void mdp_set_blend_attr(MDPIBUF *iBuf,
- uint32 *alpha,
- uint32 *tpVal,
- uint32 perPixelAlpha, uint32 *pppop_reg_ptr)
-{
- if (perPixelAlpha) {
- *pppop_reg_ptr |= PPP_OP_ROT_ON |
- PPP_OP_BLEND_ON | PPP_OP_BLEND_SRCPIXEL_ALPHA;
- } else {
- if ((iBuf->mdpImg.mdpOp & MDPOP_ALPHAB)
- && (iBuf->mdpImg.alpha == 0xff)) {
- iBuf->mdpImg.mdpOp &= ~(MDPOP_ALPHAB);
- }
-
- if ((iBuf->mdpImg.mdpOp & MDPOP_ALPHAB)
- && (iBuf->mdpImg.mdpOp & MDPOP_TRANSP)) {
- *pppop_reg_ptr |=
- PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
- PPP_OP_BLEND_CONSTANT_ALPHA |
- PPP_OP_BLEND_ALPHA_BLEND_NORMAL |
- PPP_BLEND_CALPHA_TRNASP;
-
- *alpha = iBuf->mdpImg.alpha;
- *tpVal = iBuf->mdpImg.tpVal;
- } else {
- if (iBuf->mdpImg.mdpOp & MDPOP_TRANSP) {
- *pppop_reg_ptr |= PPP_OP_ROT_ON |
- PPP_OP_BLEND_ON |
- PPP_OP_BLEND_SRCPIXEL_TRANSP;
- *tpVal = iBuf->mdpImg.tpVal;
- } else if (iBuf->mdpImg.mdpOp & MDPOP_ALPHAB) {
- *pppop_reg_ptr |= PPP_OP_ROT_ON |
- PPP_OP_BLEND_ON |
- PPP_OP_BLEND_ALPHA_BLEND_NORMAL |
- PPP_OP_BLEND_CONSTANT_ALPHA;
- *alpha = iBuf->mdpImg.alpha;
- }
- }
- }
-}
diff --git a/drivers/staging/msm/mdp_ppp_v31.c b/drivers/staging/msm/mdp_ppp_v31.c
deleted file mode 100644
index 76495dbe4e6..00000000000
--- a/drivers/staging/msm/mdp_ppp_v31.c
+++ /dev/null
@@ -1,828 +0,0 @@
-/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-#include "linux/proc_fs.h"
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <asm/div64.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-
-#define MDP_SCALE_COEFF_NUM 32
-#define MDP_SCALE_0P2_TO_0P4_INDEX 0
-#define MDP_SCALE_0P4_TO_0P6_INDEX 32
-#define MDP_SCALE_0P6_TO_0P8_INDEX 64
-#define MDP_SCALE_0P8_TO_8P0_INDEX 96
-#define MDP_SCALE_COEFF_MASK 0x3ff
-
-#define MDP_SCALE_PR 0
-#define MDP_SCALE_FIR 1
-
-static uint32 mdp_scale_0p8_to_8p0_mode;
-static uint32 mdp_scale_0p6_to_0p8_mode;
-static uint32 mdp_scale_0p4_to_0p6_mode;
-static uint32 mdp_scale_0p2_to_0p4_mode;
-
-/* -------- All scaling range, "pixel repeat" -------- */
-static int16 mdp_scale_pixel_repeat_C0[MDP_SCALE_COEFF_NUM] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-static int16 mdp_scale_pixel_repeat_C1[MDP_SCALE_COEFF_NUM] = {
- 511, 511, 511, 511, 511, 511, 511, 511,
- 511, 511, 511, 511, 511, 511, 511, 511,
- 511, 511, 511, 511, 511, 511, 511, 511,
- 511, 511, 511, 511, 511, 511, 511, 511
-};
-
-static int16 mdp_scale_pixel_repeat_C2[MDP_SCALE_COEFF_NUM] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-static int16 mdp_scale_pixel_repeat_C3[MDP_SCALE_COEFF_NUM] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-/* --------------------------- FIR ------------------------------------- */
-/* -------- Downscale, ranging from 0.8x to 8.0x of original size -------- */
-
-static int16 mdp_scale_0p8_to_8p0_C0[MDP_SCALE_COEFF_NUM] = {
- 0, -7, -13, -19, -24, -28, -32, -34, -37, -39,
- -40, -41, -41, -41, -40, -40, -38, -37, -35, -33,
- -31, -29, -26, -24, -21, -18, -15, -13, -10, -7,
- -5, -2
-};
-
-static int16 mdp_scale_0p8_to_8p0_C1[MDP_SCALE_COEFF_NUM] = {
- 511, 507, 501, 494, 485, 475, 463, 450, 436, 422,
- 405, 388, 370, 352, 333, 314, 293, 274, 253, 233,
- 213, 193, 172, 152, 133, 113, 95, 77, 60, 43,
- 28, 13
-};
-
-static int16 mdp_scale_0p8_to_8p0_C2[MDP_SCALE_COEFF_NUM] = {
- 0, 13, 28, 43, 60, 77, 95, 113, 133, 152,
- 172, 193, 213, 233, 253, 274, 294, 314, 333, 352,
- 370, 388, 405, 422, 436, 450, 463, 475, 485, 494,
- 501, 507,
-};
-
-static int16 mdp_scale_0p8_to_8p0_C3[MDP_SCALE_COEFF_NUM] = {
- 0, -2, -5, -7, -10, -13, -15, -18, -21, -24,
- -26, -29, -31, -33, -35, -37, -38, -40, -40, -41,
- -41, -41, -40, -39, -37, -34, -32, -28, -24, -19,
- -13, -7
-};
-
-/* -------- Downscale, ranging from 0.6x to 0.8x of original size -------- */
-
-static int16 mdp_scale_0p6_to_0p8_C0[MDP_SCALE_COEFF_NUM] = {
- 104, 96, 89, 82, 75, 68, 61, 55, 49, 43,
- 38, 33, 28, 24, 20, 16, 12, 9, 6, 4,
- 2, 0, -2, -4, -5, -6, -7, -7, -8, -8,
- -8, -8
-};
-
-static int16 mdp_scale_0p6_to_0p8_C1[MDP_SCALE_COEFF_NUM] = {
- 303, 303, 302, 300, 298, 296, 293, 289, 286, 281,
- 276, 270, 265, 258, 252, 245, 238, 230, 223, 214,
- 206, 197, 189, 180, 172, 163, 154, 145, 137, 128,
- 120, 112
-};
-
-static int16 mdp_scale_0p6_to_0p8_C2[MDP_SCALE_COEFF_NUM] = {
- 112, 120, 128, 137, 145, 154, 163, 172, 180, 189,
- 197, 206, 214, 223, 230, 238, 245, 252, 258, 265,
- 270, 276, 281, 286, 289, 293, 296, 298, 300, 302,
- 303, 303
-};
-
-static int16 mdp_scale_0p6_to_0p8_C3[MDP_SCALE_COEFF_NUM] = {
- -8, -8, -8, -8, -7, -7, -6, -5, -4, -2,
- 0, 2, 4, 6, 9, 12, 16, 20, 24, 28,
- 33, 38, 43, 49, 55, 61, 68, 75, 82, 89,
- 96, 104
-};
-
-/* -------- Downscale, ranging from 0.4x to 0.6x of original size -------- */
-
-static int16 mdp_scale_0p4_to_0p6_C0[MDP_SCALE_COEFF_NUM] = {
- 136, 132, 128, 123, 119, 115, 111, 107, 103, 98,
- 95, 91, 87, 84, 80, 76, 73, 69, 66, 62,
- 59, 57, 54, 50, 47, 44, 41, 39, 36, 33,
- 32, 29
-};
-
-static int16 mdp_scale_0p4_to_0p6_C1[MDP_SCALE_COEFF_NUM] = {
- 206, 205, 204, 204, 201, 200, 199, 197, 196, 194,
- 191, 191, 189, 185, 184, 182, 180, 178, 176, 173,
- 170, 168, 165, 162, 160, 157, 155, 152, 148, 146,
- 142, 140
-};
-
-static int16 mdp_scale_0p4_to_0p6_C2[MDP_SCALE_COEFF_NUM] = {
- 140, 142, 146, 148, 152, 155, 157, 160, 162, 165,
- 168, 170, 173, 176, 178, 180, 182, 184, 185, 189,
- 191, 191, 194, 196, 197, 199, 200, 201, 204, 204,
- 205, 206
-};
-
-static int16 mdp_scale_0p4_to_0p6_C3[MDP_SCALE_COEFF_NUM] = {
- 29, 32, 33, 36, 39, 41, 44, 47, 50, 54,
- 57, 59, 62, 66, 69, 73, 76, 80, 84, 87,
- 91, 95, 98, 103, 107, 111, 115, 119, 123, 128,
- 132, 136
-};
-
-/* -------- Downscale, ranging from 0.2x to 0.4x of original size -------- */
-
-static int16 mdp_scale_0p2_to_0p4_C0[MDP_SCALE_COEFF_NUM] = {
- 131, 131, 130, 129, 128, 127, 127, 126, 125, 125,
- 124, 123, 123, 121, 120, 119, 119, 118, 117, 117,
- 116, 115, 115, 114, 113, 112, 111, 110, 109, 109,
- 108, 107
-};
-
-static int16 mdp_scale_0p2_to_0p4_C1[MDP_SCALE_COEFF_NUM] = {
- 141, 140, 140, 140, 140, 139, 138, 138, 138, 137,
- 137, 137, 136, 137, 137, 137, 136, 136, 136, 135,
- 135, 135, 134, 134, 134, 134, 134, 133, 133, 132,
- 132, 132
-};
-
-static int16 mdp_scale_0p2_to_0p4_C2[MDP_SCALE_COEFF_NUM] = {
- 132, 132, 132, 133, 133, 134, 134, 134, 134, 134,
- 135, 135, 135, 136, 136, 136, 137, 137, 137, 136,
- 137, 137, 137, 138, 138, 138, 139, 140, 140, 140,
- 140, 141
-};
-
-static int16 mdp_scale_0p2_to_0p4_C3[MDP_SCALE_COEFF_NUM] = {
- 107, 108, 109, 109, 110, 111, 112, 113, 114, 115,
- 115, 116, 117, 117, 118, 119, 119, 120, 121, 123,
- 123, 124, 125, 125, 126, 127, 127, 128, 129, 130,
- 131, 131
-};
-
-static void mdp_update_scale_table(int index, int16 *c0, int16 *c1,
- int16 *c2, int16 *c3)
-{
- int i, val;
-
- for (i = 0; i < MDP_SCALE_COEFF_NUM; i++) {
- val =
- ((MDP_SCALE_COEFF_MASK & c1[i]) << 16) |
- (MDP_SCALE_COEFF_MASK & c0[i]);
- MDP_OUTP(MDP_PPP_SCALE_COEFF_LSBn(index), val);
- val =
- ((MDP_SCALE_COEFF_MASK & c3[i]) << 16) |
- (MDP_SCALE_COEFF_MASK & c2[i]);
- MDP_OUTP(MDP_PPP_SCALE_COEFF_MSBn(index), val);
- index++;
- }
-}
-
-void mdp_init_scale_table(void)
-{
- mdp_scale_0p2_to_0p4_mode = MDP_SCALE_FIR;
- mdp_update_scale_table(MDP_SCALE_0P2_TO_0P4_INDEX,
- mdp_scale_0p2_to_0p4_C0,
- mdp_scale_0p2_to_0p4_C1,
- mdp_scale_0p2_to_0p4_C2,
- mdp_scale_0p2_to_0p4_C3);
-
- mdp_scale_0p4_to_0p6_mode = MDP_SCALE_FIR;
- mdp_update_scale_table(MDP_SCALE_0P4_TO_0P6_INDEX,
- mdp_scale_0p4_to_0p6_C0,
- mdp_scale_0p4_to_0p6_C1,
- mdp_scale_0p4_to_0p6_C2,
- mdp_scale_0p4_to_0p6_C3);
-
- mdp_scale_0p6_to_0p8_mode = MDP_SCALE_FIR;
- mdp_update_scale_table(MDP_SCALE_0P6_TO_0P8_INDEX,
- mdp_scale_0p6_to_0p8_C0,
- mdp_scale_0p6_to_0p8_C1,
- mdp_scale_0p6_to_0p8_C2,
- mdp_scale_0p6_to_0p8_C3);
-
- mdp_scale_0p8_to_8p0_mode = MDP_SCALE_FIR;
- mdp_update_scale_table(MDP_SCALE_0P8_TO_8P0_INDEX,
- mdp_scale_0p8_to_8p0_C0,
- mdp_scale_0p8_to_8p0_C1,
- mdp_scale_0p8_to_8p0_C2,
- mdp_scale_0p8_to_8p0_C3);
-}
-
-static long long mdp_do_div(long long num, long long den)
-{
- do_div(num, den);
- return num;
-}
-
-#define SCALER_PHASE_BITS 29
-#define HAL_MDP_PHASE_STEP_2P50 0x50000000
-#define HAL_MDP_PHASE_STEP_1P66 0x35555555
-#define HAL_MDP_PHASE_STEP_1P25 0x28000000
-
-struct phase_val {
- int phase_init_x;
- int phase_init_y;
- int phase_step_x;
- int phase_step_y;
-};
-
-static void mdp_calc_scaleInitPhase_3p1(uint32 in_w,
- uint32 in_h,
- uint32 out_w,
- uint32 out_h,
- boolean is_rotate,
- boolean is_pp_x,
- boolean is_pp_y, struct phase_val *pval)
-{
- uint64 dst_ROI_width;
- uint64 dst_ROI_height;
- uint64 src_ROI_width;
- uint64 src_ROI_height;
-
- /*
- * phase_step_x, phase_step_y, phase_init_x and phase_init_y
- * are represented in fixed-point, unsigned 3.29 format
- */
- uint32 phase_step_x = 0;
- uint32 phase_step_y = 0;
- uint32 phase_init_x = 0;
- uint32 phase_init_y = 0;
- uint32 yscale_filter_sel, xscale_filter_sel;
- uint32 scale_unit_sel_x, scale_unit_sel_y;
-
- uint64 numerator, denominator;
- uint64 temp_dim;
-
- src_ROI_width = in_w;
- src_ROI_height = in_h;
- dst_ROI_width = out_w;
- dst_ROI_height = out_h;
-
- /* if there is a 90 degree rotation */
- if (is_rotate) {
- /* decide whether to use FIR or M/N for scaling */
-
- /* if down-scaling by a factor smaller than 1/4 */
- if (src_ROI_width > (4 * dst_ROI_height))
- scale_unit_sel_x = 1; /* use M/N scalar */
- else
- scale_unit_sel_x = 0; /* use FIR scalar */
-
- /* if down-scaling by a factor smaller than 1/4 */
- if (src_ROI_height > (4 * dst_ROI_width))
- scale_unit_sel_y = 1; /* use M/N scalar */
- else
- scale_unit_sel_y = 0; /* use FIR scalar */
- } else {
- /* decide whether to use FIR or M/N for scaling */
-
- if (src_ROI_width > (4 * dst_ROI_width))
- scale_unit_sel_x = 1; /* use M/N scalar */
- else
- scale_unit_sel_x = 0; /* use FIR scalar */
-
- if (src_ROI_height > (4 * dst_ROI_height))
- scale_unit_sel_y = 1; /* use M/N scalar */
- else
- scale_unit_sel_y = 0; /* use FIR scalar */
-
- }
-
- /* if there is a 90 degree rotation */
- if (is_rotate) {
- /* swap the width and height of dst ROI */
- temp_dim = dst_ROI_width;
- dst_ROI_width = dst_ROI_height;
- dst_ROI_height = temp_dim;
- }
-
- /* calculate phase step for the x direction */
-
- /* if destination is only 1 pixel wide, the value of phase_step_x
- is unimportant. Assigning phase_step_x to src ROI width
- as an arbitrary value. */
- if (dst_ROI_width == 1)
- phase_step_x = (uint32) ((src_ROI_width) << SCALER_PHASE_BITS);
-
- /* if using FIR scalar */
- else if (scale_unit_sel_x == 0) {
-
- /* Calculate the quotient ( src_ROI_width - 1 ) / ( dst_ROI_width - 1)
- with u3.29 precision. Quotient is rounded up to the larger
- 29th decimal point. */
- numerator = (src_ROI_width - 1) << SCALER_PHASE_BITS;
- denominator = (dst_ROI_width - 1); /* never equals to 0 because of the "( dst_ROI_width == 1 ) case" */
- phase_step_x = (uint32) mdp_do_div((numerator + denominator - 1), denominator); /* divide and round up to the larger 29th decimal point. */
-
- }
-
- /* if M/N scalar */
- else if (scale_unit_sel_x == 1) {
- /* Calculate the quotient ( src_ROI_width ) / ( dst_ROI_width)
- with u3.29 precision. Quotient is rounded down to the
- smaller 29th decimal point. */
- numerator = (src_ROI_width) << SCALER_PHASE_BITS;
- denominator = (dst_ROI_width);
- phase_step_x = (uint32) mdp_do_div(numerator, denominator);
- }
- /* calculate phase step for the y direction */
-
- /* if destination is only 1 pixel wide, the value of
- phase_step_x is unimportant. Assigning phase_step_x
- to src ROI width as an arbitrary value. */
- if (dst_ROI_height == 1)
- phase_step_y = (uint32) ((src_ROI_height) << SCALER_PHASE_BITS);
-
- /* if FIR scalar */
- else if (scale_unit_sel_y == 0) {
- /* Calculate the quotient ( src_ROI_height - 1 ) / ( dst_ROI_height - 1)
- with u3.29 precision. Quotient is rounded up to the larger
- 29th decimal point. */
- numerator = (src_ROI_height - 1) << SCALER_PHASE_BITS;
- denominator = (dst_ROI_height - 1); /* never equals to 0 because of the "( dst_ROI_height == 1 )" case */
- phase_step_y = (uint32) mdp_do_div((numerator + denominator - 1), denominator); /* Quotient is rounded up to the larger 29th decimal point. */
-
- }
-
- /* if M/N scalar */
- else if (scale_unit_sel_y == 1) {
- /* Calculate the quotient ( src_ROI_height ) / ( dst_ROI_height)
- with u3.29 precision. Quotient is rounded down to the smaller
- 29th decimal point. */
- numerator = (src_ROI_height) << SCALER_PHASE_BITS;
- denominator = (dst_ROI_height);
- phase_step_y = (uint32) mdp_do_div(numerator, denominator);
- }
-
- /* decide which set of FIR coefficients to use */
- if (phase_step_x > HAL_MDP_PHASE_STEP_2P50)
- xscale_filter_sel = 0;
- else if (phase_step_x > HAL_MDP_PHASE_STEP_1P66)
- xscale_filter_sel = 1;
- else if (phase_step_x > HAL_MDP_PHASE_STEP_1P25)
- xscale_filter_sel = 2;
- else
- xscale_filter_sel = 3;
-
- if (phase_step_y > HAL_MDP_PHASE_STEP_2P50)
- yscale_filter_sel = 0;
- else if (phase_step_y > HAL_MDP_PHASE_STEP_1P66)
- yscale_filter_sel = 1;
- else if (phase_step_y > HAL_MDP_PHASE_STEP_1P25)
- yscale_filter_sel = 2;
- else
- yscale_filter_sel = 3;
-
- /* calculate phase init for the x direction */
-
- /* if using FIR scalar */
- if (scale_unit_sel_x == 0) {
- if (dst_ROI_width == 1)
- phase_init_x =
- (uint32) ((src_ROI_width - 1) << SCALER_PHASE_BITS);
- else
- phase_init_x = 0;
-
- }
- /* M over N scalar */
- else if (scale_unit_sel_x == 1)
- phase_init_x = 0;
-
- /* calculate phase init for the y direction
- if using FIR scalar */
- if (scale_unit_sel_y == 0) {
- if (dst_ROI_height == 1)
- phase_init_y =
- (uint32) ((src_ROI_height -
- 1) << SCALER_PHASE_BITS);
- else
- phase_init_y = 0;
-
- }
- /* M over N scalar */
- else if (scale_unit_sel_y == 1)
- phase_init_y = 0;
-
- /* write registers */
- pval->phase_step_x = (uint32) phase_step_x;
- pval->phase_step_y = (uint32) phase_step_y;
- pval->phase_init_x = (uint32) phase_init_x;
- pval->phase_init_y = (uint32) phase_init_y;
-
- return;
-}
-
-void mdp_set_scale(MDPIBUF *iBuf,
- uint32 dst_roi_width,
- uint32 dst_roi_height,
- boolean inputRGB, boolean outputRGB, uint32 *pppop_reg_ptr)
-{
- uint32 dst_roi_width_scale;
- uint32 dst_roi_height_scale;
- struct phase_val pval;
- boolean use_pr;
- uint32 ppp_scale_config = 0;
-
- if (!inputRGB)
- ppp_scale_config |= BIT(6);
-
- if (iBuf->mdpImg.mdpOp & MDPOP_ASCALE) {
- if (iBuf->mdpImg.mdpOp & MDPOP_ROT90) {
- dst_roi_width_scale = dst_roi_height;
- dst_roi_height_scale = dst_roi_width;
- } else {
- dst_roi_width_scale = dst_roi_width;
- dst_roi_height_scale = dst_roi_height;
- }
-
- if ((dst_roi_width_scale != iBuf->roi.width) ||
- (dst_roi_height_scale != iBuf->roi.height) ||
- (iBuf->mdpImg.mdpOp & MDPOP_SHARPENING)) {
- *pppop_reg_ptr |=
- (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
-
- mdp_calc_scaleInitPhase_3p1(iBuf->roi.width,
- iBuf->roi.height,
- dst_roi_width,
- dst_roi_height,
- iBuf->mdpImg.
- mdpOp & MDPOP_ROT90, 1, 1,
- &pval);
-
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x013c,
- pval.phase_init_x);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0140,
- pval.phase_init_y);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0144,
- pval.phase_step_x);
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0148,
- pval.phase_step_y);
-
- use_pr = (inputRGB) && (outputRGB);
-
- /* x-direction */
- if ((dst_roi_width_scale == iBuf->roi.width) &&
- !(iBuf->mdpImg.mdpOp & MDPOP_SHARPENING)) {
- *pppop_reg_ptr &= ~PPP_OP_SCALE_X_ON;
- } else
- if (((dst_roi_width_scale * 10) / iBuf->roi.width) >
- 8) {
- if ((use_pr)
- && (mdp_scale_0p8_to_8p0_mode !=
- MDP_SCALE_PR)) {
- mdp_scale_0p8_to_8p0_mode =
- MDP_SCALE_PR;
- mdp_update_scale_table
- (MDP_SCALE_0P8_TO_8P0_INDEX,
- mdp_scale_pixel_repeat_C0,
- mdp_scale_pixel_repeat_C1,
- mdp_scale_pixel_repeat_C2,
- mdp_scale_pixel_repeat_C3);
- } else if ((!use_pr)
- && (mdp_scale_0p8_to_8p0_mode !=
- MDP_SCALE_FIR)) {
- mdp_scale_0p8_to_8p0_mode =
- MDP_SCALE_FIR;
- mdp_update_scale_table
- (MDP_SCALE_0P8_TO_8P0_INDEX,
- mdp_scale_0p8_to_8p0_C0,
- mdp_scale_0p8_to_8p0_C1,
- mdp_scale_0p8_to_8p0_C2,
- mdp_scale_0p8_to_8p0_C3);
- }
- ppp_scale_config |= (SCALE_U1_SET << 2);
- } else
- if (((dst_roi_width_scale * 10) / iBuf->roi.width) >
- 6) {
- if ((use_pr)
- && (mdp_scale_0p6_to_0p8_mode !=
- MDP_SCALE_PR)) {
- mdp_scale_0p6_to_0p8_mode =
- MDP_SCALE_PR;
- mdp_update_scale_table
- (MDP_SCALE_0P6_TO_0P8_INDEX,
- mdp_scale_pixel_repeat_C0,
- mdp_scale_pixel_repeat_C1,
- mdp_scale_pixel_repeat_C2,
- mdp_scale_pixel_repeat_C3);
- } else if ((!use_pr)
- && (mdp_scale_0p6_to_0p8_mode !=
- MDP_SCALE_FIR)) {
- mdp_scale_0p6_to_0p8_mode =
- MDP_SCALE_FIR;
- mdp_update_scale_table
- (MDP_SCALE_0P6_TO_0P8_INDEX,
- mdp_scale_0p6_to_0p8_C0,
- mdp_scale_0p6_to_0p8_C1,
- mdp_scale_0p6_to_0p8_C2,
- mdp_scale_0p6_to_0p8_C3);
- }
- ppp_scale_config |= (SCALE_D2_SET << 2);
- } else
- if (((dst_roi_width_scale * 10) / iBuf->roi.width) >
- 4) {
- if ((use_pr)
- && (mdp_scale_0p4_to_0p6_mode !=
- MDP_SCALE_PR)) {
- mdp_scale_0p4_to_0p6_mode =
- MDP_SCALE_PR;
- mdp_update_scale_table
- (MDP_SCALE_0P4_TO_0P6_INDEX,
- mdp_scale_pixel_repeat_C0,
- mdp_scale_pixel_repeat_C1,
- mdp_scale_pixel_repeat_C2,
- mdp_scale_pixel_repeat_C3);
- } else if ((!use_pr)
- && (mdp_scale_0p4_to_0p6_mode !=
- MDP_SCALE_FIR)) {
- mdp_scale_0p4_to_0p6_mode =
- MDP_SCALE_FIR;
- mdp_update_scale_table
- (MDP_SCALE_0P4_TO_0P6_INDEX,
- mdp_scale_0p4_to_0p6_C0,
- mdp_scale_0p4_to_0p6_C1,
- mdp_scale_0p4_to_0p6_C2,
- mdp_scale_0p4_to_0p6_C3);
- }
- ppp_scale_config |= (SCALE_D1_SET << 2);
- } else
- if (((dst_roi_width_scale * 4) / iBuf->roi.width) >=
- 1) {
- if ((use_pr)
- && (mdp_scale_0p2_to_0p4_mode !=
- MDP_SCALE_PR)) {
- mdp_scale_0p2_to_0p4_mode =
- MDP_SCALE_PR;
- mdp_update_scale_table
- (MDP_SCALE_0P2_TO_0P4_INDEX,
- mdp_scale_pixel_repeat_C0,
- mdp_scale_pixel_repeat_C1,
- mdp_scale_pixel_repeat_C2,
- mdp_scale_pixel_repeat_C3);
- } else if ((!use_pr)
- && (mdp_scale_0p2_to_0p4_mode !=
- MDP_SCALE_FIR)) {
- mdp_scale_0p2_to_0p4_mode =
- MDP_SCALE_FIR;
- mdp_update_scale_table
- (MDP_SCALE_0P2_TO_0P4_INDEX,
- mdp_scale_0p2_to_0p4_C0,
- mdp_scale_0p2_to_0p4_C1,
- mdp_scale_0p2_to_0p4_C2,
- mdp_scale_0p2_to_0p4_C3);
- }
- ppp_scale_config |= (SCALE_D0_SET << 2);
- } else
- ppp_scale_config |= BIT(0);
-
- /* y-direction */
- if ((dst_roi_height_scale == iBuf->roi.height) &&
- !(iBuf->mdpImg.mdpOp & MDPOP_SHARPENING)) {
- *pppop_reg_ptr &= ~PPP_OP_SCALE_Y_ON;
- } else if (((dst_roi_height_scale * 10) /
- iBuf->roi.height) > 8) {
- if ((use_pr)
- && (mdp_scale_0p8_to_8p0_mode !=
- MDP_SCALE_PR)) {
- mdp_scale_0p8_to_8p0_mode =
- MDP_SCALE_PR;
- mdp_update_scale_table
- (MDP_SCALE_0P8_TO_8P0_INDEX,
- mdp_scale_pixel_repeat_C0,
- mdp_scale_pixel_repeat_C1,
- mdp_scale_pixel_repeat_C2,
- mdp_scale_pixel_repeat_C3);
- } else if ((!use_pr)
- && (mdp_scale_0p8_to_8p0_mode !=
- MDP_SCALE_FIR)) {
- mdp_scale_0p8_to_8p0_mode =
- MDP_SCALE_FIR;
- mdp_update_scale_table
- (MDP_SCALE_0P8_TO_8P0_INDEX,
- mdp_scale_0p8_to_8p0_C0,
- mdp_scale_0p8_to_8p0_C1,
- mdp_scale_0p8_to_8p0_C2,
- mdp_scale_0p8_to_8p0_C3);
- }
- ppp_scale_config |= (SCALE_U1_SET << 4);
- } else
- if (((dst_roi_height_scale * 10) /
- iBuf->roi.height) > 6) {
- if ((use_pr)
- && (mdp_scale_0p6_to_0p8_mode !=
- MDP_SCALE_PR)) {
- mdp_scale_0p6_to_0p8_mode =
- MDP_SCALE_PR;
- mdp_update_scale_table
- (MDP_SCALE_0P6_TO_0P8_INDEX,
- mdp_scale_pixel_repeat_C0,
- mdp_scale_pixel_repeat_C1,
- mdp_scale_pixel_repeat_C2,
- mdp_scale_pixel_repeat_C3);
- } else if ((!use_pr)
- && (mdp_scale_0p6_to_0p8_mode !=
- MDP_SCALE_FIR)) {
- mdp_scale_0p6_to_0p8_mode =
- MDP_SCALE_FIR;
- mdp_update_scale_table
- (MDP_SCALE_0P6_TO_0P8_INDEX,
- mdp_scale_0p6_to_0p8_C0,
- mdp_scale_0p6_to_0p8_C1,
- mdp_scale_0p6_to_0p8_C2,
- mdp_scale_0p6_to_0p8_C3);
- }
- ppp_scale_config |= (SCALE_D2_SET << 4);
- } else
- if (((dst_roi_height_scale * 10) /
- iBuf->roi.height) > 4) {
- if ((use_pr)
- && (mdp_scale_0p4_to_0p6_mode !=
- MDP_SCALE_PR)) {
- mdp_scale_0p4_to_0p6_mode =
- MDP_SCALE_PR;
- mdp_update_scale_table
- (MDP_SCALE_0P4_TO_0P6_INDEX,
- mdp_scale_pixel_repeat_C0,
- mdp_scale_pixel_repeat_C1,
- mdp_scale_pixel_repeat_C2,
- mdp_scale_pixel_repeat_C3);
- } else if ((!use_pr)
- && (mdp_scale_0p4_to_0p6_mode !=
- MDP_SCALE_FIR)) {
- mdp_scale_0p4_to_0p6_mode =
- MDP_SCALE_FIR;
- mdp_update_scale_table
- (MDP_SCALE_0P4_TO_0P6_INDEX,
- mdp_scale_0p4_to_0p6_C0,
- mdp_scale_0p4_to_0p6_C1,
- mdp_scale_0p4_to_0p6_C2,
- mdp_scale_0p4_to_0p6_C3);
- }
- ppp_scale_config |= (SCALE_D1_SET << 4);
- } else
- if (((dst_roi_height_scale * 4) /
- iBuf->roi.height) >= 1) {
- if ((use_pr)
- && (mdp_scale_0p2_to_0p4_mode !=
- MDP_SCALE_PR)) {
- mdp_scale_0p2_to_0p4_mode =
- MDP_SCALE_PR;
- mdp_update_scale_table
- (MDP_SCALE_0P2_TO_0P4_INDEX,
- mdp_scale_pixel_repeat_C0,
- mdp_scale_pixel_repeat_C1,
- mdp_scale_pixel_repeat_C2,
- mdp_scale_pixel_repeat_C3);
- } else if ((!use_pr)
- && (mdp_scale_0p2_to_0p4_mode !=
- MDP_SCALE_FIR)) {
- mdp_scale_0p2_to_0p4_mode =
- MDP_SCALE_FIR;
- mdp_update_scale_table
- (MDP_SCALE_0P2_TO_0P4_INDEX,
- mdp_scale_0p2_to_0p4_C0,
- mdp_scale_0p2_to_0p4_C1,
- mdp_scale_0p2_to_0p4_C2,
- mdp_scale_0p2_to_0p4_C3);
- }
- ppp_scale_config |= (SCALE_D0_SET << 4);
- } else
- ppp_scale_config |= BIT(1);
-
- if (iBuf->mdpImg.mdpOp & MDPOP_SHARPENING) {
- ppp_scale_config |= BIT(7);
- MDP_OUTP(MDP_BASE + 0x50020,
- iBuf->mdpImg.sp_value);
- }
-
- MDP_OUTP(MDP_BASE + 0x10230, ppp_scale_config);
- } else {
- iBuf->mdpImg.mdpOp &= ~(MDPOP_ASCALE);
- }
- }
-}
-
-void mdp_adjust_start_addr(uint8 **src0,
- uint8 **src1,
- int v_slice,
- int h_slice,
- int x,
- int y,
- uint32 width,
- uint32 height, int bpp, MDPIBUF *iBuf, int layer)
-{
- switch (layer) {
- case 0:
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0200, (y << 16) | (x));
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0208,
- (height << 16) | (width));
- break;
-
- case 1:
- /* MDP 3.1 HW bug workaround */
- if (iBuf->ibuf_type == MDP_YCRYCB_H2V1) {
- *src0 += (x + y * width) * bpp;
- x = y = 0;
- width = iBuf->roi.dst_width;
- height = iBuf->roi.dst_height;
- }
-
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x0204, (y << 16) | (x));
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x020c,
- (height << 16) | (width));
- break;
-
- case 2:
- MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x019c, (y << 16) | (x));
- break;
- }
-}
-
-void mdp_set_blend_attr(MDPIBUF *iBuf,
- uint32 *alpha,
- uint32 *tpVal,
- uint32 perPixelAlpha, uint32 *pppop_reg_ptr)
-{
- int bg_alpha;
-
- *alpha = iBuf->mdpImg.alpha;
- *tpVal = iBuf->mdpImg.tpVal;
-
- if (iBuf->mdpImg.mdpOp & MDPOP_FG_PM_ALPHA) {
- *pppop_reg_ptr |= PPP_OP_ROT_ON |
- PPP_OP_BLEND_ON | PPP_OP_BLEND_CONSTANT_ALPHA;
-
- bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
- PPP_BLEND_BG_ALPHA_REVERSE;
-
- if (perPixelAlpha)
- bg_alpha |= PPP_BLEND_BG_SRCPIXEL_ALPHA;
- else
- bg_alpha |= PPP_BLEND_BG_CONSTANT_ALPHA;
-
- outpdw(MDP_BASE + 0x70010, bg_alpha);
-
- if (iBuf->mdpImg.mdpOp & MDPOP_TRANSP)
- *pppop_reg_ptr |= PPP_BLEND_CALPHA_TRNASP;
- } else if (perPixelAlpha) {
- *pppop_reg_ptr |= PPP_OP_ROT_ON |
- PPP_OP_BLEND_ON | PPP_OP_BLEND_SRCPIXEL_ALPHA;
- } else {
- if ((iBuf->mdpImg.mdpOp & MDPOP_ALPHAB)
- && (iBuf->mdpImg.alpha == 0xff)) {
- iBuf->mdpImg.mdpOp &= ~(MDPOP_ALPHAB);
- }
-
- if ((iBuf->mdpImg.mdpOp & MDPOP_ALPHAB)
- || (iBuf->mdpImg.mdpOp & MDPOP_TRANSP)) {
- *pppop_reg_ptr |=
- PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
- PPP_OP_BLEND_CONSTANT_ALPHA |
- PPP_OP_BLEND_ALPHA_BLEND_NORMAL;
- }
-
- if (iBuf->mdpImg.mdpOp & MDPOP_TRANSP)
- *pppop_reg_ptr |= PPP_BLEND_CALPHA_TRNASP;
- }
-}
diff --git a/drivers/staging/msm/mdp_vsync.c b/drivers/staging/msm/mdp_vsync.c
deleted file mode 100644
index bbd45604435..00000000000
--- a/drivers/staging/msm/mdp_vsync.c
+++ /dev/null
@@ -1,389 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/hrtimer.h>
-#include <linux/vmalloc.h>
-#include <linux/clk.h>
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/uaccess.h>
-#include <mach/gpio.h>
-
-#include "mdp.h"
-#include "msm_fb.h"
-#include "mddihost.h"
-
-#ifdef CONFIG_FB_MSM_MDP40
-#define MDP_SYNC_CFG_0 0x100
-#define MDP_SYNC_STATUS_0 0x10c
-#define MDP_PRIM_VSYNC_OUT_CTRL 0x118
-#define MDP_PRIM_VSYNC_INIT_VAL 0x128
-#else
-#define MDP_SYNC_CFG_0 0x300
-#define MDP_SYNC_STATUS_0 0x30c
-#define MDP_PRIM_VSYNC_OUT_CTRL 0x318
-#define MDP_PRIM_VSYNC_INIT_VAL 0x328
-#endif
-
-extern mddi_lcd_type mddi_lcd_idx;
-extern spinlock_t mdp_spin_lock;
-extern struct workqueue_struct *mdp_vsync_wq;
-extern int lcdc_mode;
-extern int vsync_mode;
-
-#ifdef MDP_HW_VSYNC
-int vsync_above_th = 4;
-int vsync_start_th = 1;
-int vsync_load_cnt;
-
-struct clk *mdp_vsync_clk;
-
-void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd)
-{
- if (mfd->use_mdp_vsync)
- clk_enable(mdp_vsync_clk);
-}
-
-void mdp_hw_vsync_clk_disable(struct msm_fb_data_type *mfd)
-{
- if (mfd->use_mdp_vsync)
- clk_disable(mdp_vsync_clk);
-}
-#endif
-
-static void mdp_set_vsync(unsigned long data)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
- struct msm_fb_panel_data *pdata = NULL;
-
- pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
-
- if ((pdata) && (pdata->set_vsync_notifier == NULL))
- return;
-
- init_timer(&mfd->vsync_resync_timer);
- mfd->vsync_resync_timer.function = mdp_set_vsync;
- mfd->vsync_resync_timer.data = data;
- mfd->vsync_resync_timer.expires =
- jiffies + mfd->panel_info.lcd.vsync_notifier_period;
- add_timer(&mfd->vsync_resync_timer);
-
- if ((mfd->panel_info.lcd.vsync_enable) && (mfd->panel_power_on)
- && (!mfd->vsync_handler_pending)) {
- mfd->vsync_handler_pending = TRUE;
- if (!queue_work(mdp_vsync_wq, &mfd->vsync_resync_worker)) {
- MSM_FB_INFO
- ("mdp_set_vsync: can't queue_work! -> needs to increase vsync_resync_timer_duration\n");
- }
- } else {
- MSM_FB_DEBUG
- ("mdp_set_vsync failed! EN:%d PWR:%d PENDING:%d\n",
- mfd->panel_info.lcd.vsync_enable, mfd->panel_power_on,
- mfd->vsync_handler_pending);
- }
-}
-
-static void mdp_vsync_handler(void *data)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
-
- if (mfd->use_mdp_vsync) {
-#ifdef MDP_HW_VSYNC
- if (mfd->panel_power_on)
- MDP_OUTP(MDP_BASE + MDP_SYNC_STATUS_0, vsync_load_cnt);
-
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-#endif
- } else {
- mfd->last_vsync_timetick = ktime_get_real();
- }
-
- mfd->vsync_handler_pending = FALSE;
-}
-
-irqreturn_t mdp_hw_vsync_handler_proxy(int irq, void *data)
-{
- /*
- * ToDo: tried enabling/disabling GPIO MDP HW VSYNC interrupt
- * but getting inaccurate timing in mdp_vsync_handler()
- * disable_irq(MDP_HW_VSYNC_IRQ);
- */
- mdp_vsync_handler(data);
-
- return IRQ_HANDLED;
-}
-
-#ifdef MDP_HW_VSYNC
-static void mdp_set_sync_cfg_0(struct msm_fb_data_type *mfd, int vsync_cnt)
-{
- unsigned long cfg;
-
- cfg = mfd->total_lcd_lines - 1;
- cfg <<= MDP_SYNCFG_HGT_LOC;
- if (mfd->panel_info.lcd.hw_vsync_mode)
- cfg |= MDP_SYNCFG_VSYNC_EXT_EN;
- cfg |= (MDP_SYNCFG_VSYNC_INT_EN | vsync_cnt);
-
- MDP_OUTP(MDP_BASE + MDP_SYNC_CFG_0, cfg);
-}
-#endif
-
-void mdp_config_vsync(struct msm_fb_data_type *mfd)
-{
-
- /* vsync on primary lcd only for now */
- if ((mfd->dest != DISPLAY_LCD) || (mfd->panel_info.pdest != DISPLAY_1)
- || (!vsync_mode)) {
- goto err_handle;
- }
-
- if (mfd->panel_info.lcd.vsync_enable) {
- mfd->total_porch_lines = mfd->panel_info.lcd.v_back_porch +
- mfd->panel_info.lcd.v_front_porch +
- mfd->panel_info.lcd.v_pulse_width;
- mfd->total_lcd_lines =
- mfd->panel_info.yres + mfd->total_porch_lines;
- mfd->lcd_ref_usec_time =
- 100000000 / mfd->panel_info.lcd.refx100;
- mfd->vsync_handler_pending = FALSE;
- mfd->last_vsync_timetick.tv.sec = 0;
- mfd->last_vsync_timetick.tv.nsec = 0;
-
-#ifdef MDP_HW_VSYNC
- if (mdp_vsync_clk == NULL)
- mdp_vsync_clk = clk_get(NULL, "mdp_vsync_clk");
-
- if (IS_ERR(mdp_vsync_clk)) {
- printk(KERN_ERR "error: can't get mdp_vsync_clk!\n");
- mfd->use_mdp_vsync = 0;
- } else
- mfd->use_mdp_vsync = 1;
-
- if (mfd->use_mdp_vsync) {
- uint32 vsync_cnt_cfg, vsync_cnt_cfg_dem;
- uint32 mdp_vsync_clk_speed_hz;
-
- mdp_vsync_clk_speed_hz = clk_get_rate(mdp_vsync_clk);
-
- if (mdp_vsync_clk_speed_hz == 0) {
- mfd->use_mdp_vsync = 0;
- } else {
- /*
- * Do this calculation in 2 steps for
- * rounding uint32 properly.
- */
- vsync_cnt_cfg_dem =
- (mfd->panel_info.lcd.refx100 *
- mfd->total_lcd_lines) / 100;
- vsync_cnt_cfg =
- (mdp_vsync_clk_speed_hz) /
- vsync_cnt_cfg_dem;
-
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON,
- FALSE);
- mdp_hw_vsync_clk_enable(mfd);
-
- mdp_set_sync_cfg_0(mfd, vsync_cnt_cfg);
-
- /*
- * load the last line + 1 to be in the
- * safety zone
- */
- vsync_load_cnt = mfd->panel_info.yres;
-
- /* line counter init value at the next pulse */
- MDP_OUTP(MDP_BASE + MDP_PRIM_VSYNC_INIT_VAL,
- vsync_load_cnt);
-
- /*
- * external vsync source pulse width and
- * polarity flip
- */
- MDP_OUTP(MDP_BASE + MDP_PRIM_VSYNC_OUT_CTRL,
- BIT(30) | BIT(0));
-
-
- /* threshold */
- MDP_OUTP(MDP_BASE + 0x200,
- (vsync_above_th << 16) |
- (vsync_start_th));
-
- mdp_hw_vsync_clk_disable(mfd);
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK,
- MDP_BLOCK_POWER_OFF, FALSE);
- }
- }
-#else
- mfd->use_mdp_vsync = 0;
- hrtimer_init(&mfd->dma_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- mfd->dma_hrtimer.function = mdp_dma2_vsync_hrtimer_handler;
- mfd->vsync_width_boundary = vmalloc(mfd->panel_info.xres * 4);
-#endif
-
- mfd->channel_irq = 0;
- if (mfd->panel_info.lcd.hw_vsync_mode) {
- u32 vsync_gpio = mfd->vsync_gpio;
- u32 ret;
-
- if (vsync_gpio == -1) {
- MSM_FB_INFO("vsync_gpio not defined!\n");
- goto err_handle;
- }
-
- ret = gpio_tlmm_config(GPIO_CFG
- (vsync_gpio,
- (mfd->use_mdp_vsync) ? 1 : 0,
- GPIO_INPUT,
- GPIO_PULL_DOWN,
- GPIO_2MA),
- GPIO_ENABLE);
- if (ret)
- goto err_handle;
-
- if (!mfd->use_mdp_vsync) {
- mfd->channel_irq = MSM_GPIO_TO_INT(vsync_gpio);
- if (request_irq
- (mfd->channel_irq,
- &mdp_hw_vsync_handler_proxy,
- IRQF_TRIGGER_FALLING, "VSYNC_GPIO",
- (void *)mfd)) {
- MSM_FB_INFO
- ("irq=%d failed! vsync_gpio=%d\n",
- mfd->channel_irq,
- vsync_gpio);
- goto err_handle;
- }
- }
- }
-
- mdp_set_vsync((unsigned long)mfd);
- }
-
- return;
-
-err_handle:
- if (mfd->vsync_width_boundary)
- vfree(mfd->vsync_width_boundary);
- mfd->panel_info.lcd.vsync_enable = FALSE;
- printk(KERN_ERR "%s: failed!\n", __func__);
-}
-
-void mdp_vsync_resync_workqueue_handler(struct work_struct *work)
-{
- struct msm_fb_data_type *mfd = NULL;
- int vsync_fnc_enabled = FALSE;
- struct msm_fb_panel_data *pdata = NULL;
-
- mfd = container_of(work, struct msm_fb_data_type, vsync_resync_worker);
-
- if (mfd) {
- if (mfd->panel_power_on) {
- pdata =
- (struct msm_fb_panel_data *)mfd->pdev->dev.
- platform_data;
-
- /*
- * we need to turn on MDP power if it uses MDP vsync
- * HW block in SW mode
- */
- if ((!mfd->panel_info.lcd.hw_vsync_mode) &&
- (mfd->use_mdp_vsync) &&
- (pdata) && (pdata->set_vsync_notifier != NULL)) {
- /*
- * enable pwr here since we can't enable it in
- * vsync callback in isr mode
- */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON,
- FALSE);
- }
-
- if (pdata->set_vsync_notifier != NULL) {
- vsync_fnc_enabled = TRUE;
- pdata->set_vsync_notifier(mdp_vsync_handler,
- (void *)mfd);
- }
- }
- }
-
- if ((mfd) && (!vsync_fnc_enabled))
- mfd->vsync_handler_pending = FALSE;
-}
-
-boolean mdp_hw_vsync_set_handler(msm_fb_vsync_handler_type handler, void *data)
-{
- /*
- * ToDo: tried enabling/disabling GPIO MDP HW VSYNC interrupt
- * but getting inaccurate timing in mdp_vsync_handler()
- * enable_irq(MDP_HW_VSYNC_IRQ);
- */
-
- return TRUE;
-}
-
-uint32 mdp_get_lcd_line_counter(struct msm_fb_data_type *mfd)
-{
- uint32 elapsed_usec_time;
- uint32 lcd_line;
- ktime_t last_vsync_timetick_local;
- ktime_t curr_time;
- unsigned long flag;
-
- if ((!mfd->panel_info.lcd.vsync_enable) || (!vsync_mode))
- return 0;
-
- spin_lock_irqsave(&mdp_spin_lock, flag);
- last_vsync_timetick_local = mfd->last_vsync_timetick;
- spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
- curr_time = ktime_get_real();
- elapsed_usec_time =
- ((curr_time.tv.sec - last_vsync_timetick_local.tv.sec) * 1000000) +
- ((curr_time.tv.nsec - last_vsync_timetick_local.tv.nsec) / 1000);
-
- elapsed_usec_time = elapsed_usec_time % mfd->lcd_ref_usec_time;
-
- /* lcd line calculation referencing to line counter = 0 */
- lcd_line =
- (elapsed_usec_time * mfd->total_lcd_lines) / mfd->lcd_ref_usec_time;
-
- /* lcd line adjusment referencing to the actual line counter at vsync */
- lcd_line =
- (mfd->total_lcd_lines - mfd->panel_info.lcd.v_back_porch +
- lcd_line) % (mfd->total_lcd_lines + 1);
-
- if (lcd_line > mfd->total_lcd_lines) {
- MSM_FB_INFO
- ("mdp_get_lcd_line_counter: mdp_lcd_rd_cnt >= mfd->total_lcd_lines error!\n");
- }
-
- return lcd_line;
-}
diff --git a/drivers/staging/msm/memory.c b/drivers/staging/msm/memory.c
deleted file mode 100644
index cc80fdf17d6..00000000000
--- a/drivers/staging/msm/memory.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/* arch/arm/mach-msm/memory.c
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/mm.h>
-#include <linux/mm_types.h>
-#include <linux/bootmem.h>
-#include <linux/module.h>
-#include <asm/pgtable.h>
-#include <asm/io.h>
-#include <asm/mach/map.h>
-#include "memory_ll.h"
-#include <asm/cacheflush.h>
-#if defined(CONFIG_MSM_NPA_REMOTE)
-#include "npa_remote.h"
-#include <linux/completion.h>
-#include <linux/err.h>
-#endif
-
-int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn, unsigned long size, pgprot_t prot)
-{
- unsigned long pfn_addr = pfn << PAGE_SHIFT;
-/*
- if ((pfn_addr >= 0x88000000) && (pfn_addr < 0xD0000000)) {
- prot = pgprot_device(prot);
- printk("remapping device %lx\n", prot);
- }
-*/
- panic("Memory remap PFN stuff not done\n");
- return remap_pfn_range(vma, addr, pfn, size, prot);
-}
-
-void *zero_page_strongly_ordered;
-
-static void map_zero_page_strongly_ordered(void)
-{
- if (zero_page_strongly_ordered)
- return;
-/*
- zero_page_strongly_ordered =
- ioremap_strongly_ordered(page_to_pfn(empty_zero_page)
- << PAGE_SHIFT, PAGE_SIZE);
-*/
- panic("Strongly ordered memory functions not implemented\n");
-}
-
-void write_to_strongly_ordered_memory(void)
-{
- map_zero_page_strongly_ordered();
- *(int *)zero_page_strongly_ordered = 0;
-}
-EXPORT_SYMBOL(write_to_strongly_ordered_memory);
-
-void flush_axi_bus_buffer(void)
-{
- __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
- : : "r" (0) : "memory");
- write_to_strongly_ordered_memory();
-}
-
-#define CACHE_LINE_SIZE 32
-
-/* These cache related routines make the assumption that the associated
- * physical memory is contiguous. They will operate on all (L1
- * and L2 if present) caches.
- */
-void clean_and_invalidate_caches(unsigned long vstart,
- unsigned long length, unsigned long pstart)
-{
- unsigned long vaddr;
-
- for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
- asm ("mcr p15, 0, %0, c7, c14, 1" : : "r" (vaddr));
-#ifdef CONFIG_OUTER_CACHE
- outer_flush_range(pstart, pstart + length);
-#endif
- asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
- asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
-
- flush_axi_bus_buffer();
-}
-
-void clean_caches(unsigned long vstart,
- unsigned long length, unsigned long pstart)
-{
- unsigned long vaddr;
-
- for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
- asm ("mcr p15, 0, %0, c7, c10, 1" : : "r" (vaddr));
-#ifdef CONFIG_OUTER_CACHE
- outer_clean_range(pstart, pstart + length);
-#endif
- asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
- asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
-
- flush_axi_bus_buffer();
-}
-
-void invalidate_caches(unsigned long vstart,
- unsigned long length, unsigned long pstart)
-{
- unsigned long vaddr;
-
- for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
- asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (vaddr));
-#ifdef CONFIG_OUTER_CACHE
- outer_inv_range(pstart, pstart + length);
-#endif
- asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
- asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
-
- flush_axi_bus_buffer();
-}
-
-void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
-{
- void *unused_addr = NULL;
- unsigned long addr, tmp_size, unused_size;
-
- /* Allocate maximum size needed, see where it ends up.
- * Then free it -- in this path there are no other allocators
- * so we can depend on getting the same address back
- * when we allocate a smaller piece that is aligned
- * at the end (if necessary) and the piece we really want,
- * then free the unused first piece.
- */
-
- tmp_size = size + alignment - PAGE_SIZE;
- addr = (unsigned long)alloc_bootmem(tmp_size);
- free_bootmem(__pa(addr), tmp_size);
-
- unused_size = alignment - (addr % alignment);
- if (unused_size)
- unused_addr = alloc_bootmem(unused_size);
-
- addr = (unsigned long)alloc_bootmem(size);
- if (unused_size)
- free_bootmem(__pa(unused_addr), unused_size);
-
- return (void *)addr;
-}
-
-#if defined(CONFIG_MSM_NPA_REMOTE)
-struct npa_client *npa_memory_client;
-#endif
-
-static int change_memory_power_state(unsigned long start_pfn,
- unsigned long nr_pages, int state)
-{
-#if defined(CONFIG_MSM_NPA_REMOTE)
- static atomic_t node_created_flag = ATOMIC_INIT(1);
-#else
- unsigned long start;
- unsigned long size;
- unsigned long virtual;
-#endif
- int rc = 0;
-
-#if defined(CONFIG_MSM_NPA_REMOTE)
- if (atomic_dec_and_test(&node_created_flag)) {
- /* Create NPA 'required' client. */
- npa_memory_client = npa_create_sync_client(NPA_MEMORY_NODE_NAME,
- "memory node", NPA_CLIENT_REQUIRED);
- if (IS_ERR(npa_memory_client)) {
- rc = PTR_ERR(npa_memory_client);
- return rc;
- }
- }
-
- rc = npa_issue_required_request(npa_memory_client, state);
-#else
- if (state == MEMORY_DEEP_POWERDOWN) {
- /* simulate turning off memory by writing bit pattern into it */
- start = start_pfn << PAGE_SHIFT;
- size = nr_pages << PAGE_SHIFT;
- virtual = __phys_to_virt(start);
- memset((void *)virtual, 0x27, size);
- }
-#endif
- return rc;
-}
-
-int platform_physical_remove_pages(unsigned long start_pfn,
- unsigned long nr_pages)
-{
- return change_memory_power_state(start_pfn, nr_pages,
- MEMORY_DEEP_POWERDOWN);
-}
-
-int platform_physical_add_pages(unsigned long start_pfn,
- unsigned long nr_pages)
-{
- return change_memory_power_state(start_pfn, nr_pages, MEMORY_ACTIVE);
-}
-
-int platform_physical_low_power_pages(unsigned long start_pfn,
- unsigned long nr_pages)
-{
- return change_memory_power_state(start_pfn, nr_pages,
- MEMORY_SELF_REFRESH);
-}
diff --git a/drivers/staging/msm/memory_ll.h b/drivers/staging/msm/memory_ll.h
deleted file mode 100644
index 18a239a89a7..00000000000
--- a/drivers/staging/msm/memory_ll.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#ifndef __ASM_ARCH_MEMORY_LL_H
-#define __ASM_ARCH_MEMORY_LL_H
-
-#define MAX_PHYSMEM_BITS 32
-#define SECTION_SIZE_BITS 25
-
-#define HAS_ARCH_IO_REMAP_PFN_RANGE
-
-#ifndef __ASSEMBLY__
-void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment);
-void clean_and_invalidate_caches(unsigned long, unsigned long, unsigned long);
-void clean_caches(unsigned long, unsigned long, unsigned long);
-void invalidate_caches(unsigned long, unsigned long, unsigned long);
-int platform_physical_remove_pages(unsigned long, unsigned long);
-int platform_physical_add_pages(unsigned long, unsigned long);
-int platform_physical_low_power_pages(unsigned long, unsigned long);
-
-#ifdef CONFIG_ARCH_MSM_ARM11
-void write_to_strongly_ordered_memory(void);
-
-#include <asm/mach-types.h>
-
-#define arch_barrier_extra() do \
- { if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa()) \
- write_to_strongly_ordered_memory(); \
- } while (0)
-#endif
-
-#ifdef CONFIG_CACHE_L2X0
-extern void l2x0_cache_sync(void);
-#define finish_arch_switch(prev) do { l2x0_cache_sync(); } while (0)
-#endif
-
-#endif
-
-#ifdef CONFIG_ARCH_MSM_SCORPION
-#define arch_has_speculative_dfetch() 1
-#endif
-
-#endif
-
-/* these correspond to values known by the modem */
-#define MEMORY_DEEP_POWERDOWN 0
-#define MEMORY_SELF_REFRESH 1
-#define MEMORY_ACTIVE 2
-
-#define NPA_MEMORY_NODE_NAME "/mem/ebi1/cs1"
diff --git a/drivers/staging/msm/msm_fb.c b/drivers/staging/msm/msm_fb.c
deleted file mode 100644
index e7ef836eb8d..00000000000
--- a/drivers/staging/msm/msm_fb.c
+++ /dev/null
@@ -1,2354 +0,0 @@
-/*
- *
- * Core MSM framebuffer driver.
- *
- * Copyright (C) 2007 Google Incorporated
- * Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include "msm_mdp.h"
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <mach/board.h>
-#include <linux/uaccess.h>
-
-#include <linux/workqueue.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <linux/proc_fs.h>
-#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-#include <linux/console.h>
-#include <linux/leds.h>
-#include <asm/dma-mapping.h>
-
-
-#define MSM_FB_C
-#include "msm_fb.h"
-#include "mddihosti.h"
-#include "tvenc.h"
-#include "mdp.h"
-#include "mdp4.h"
-
-#ifdef CONFIG_FB_MSM_LOGO
-#define INIT_IMAGE_FILE "/logo.rle"
-extern int load_565rle_image(char *filename);
-#endif
-
-
-#define pgprot_noncached(prot) \
- __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
-#define pgprot_writecombine(prot) \
- __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
-#define pgprot_device(prot) \
- __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_DEV_NONSHARED)
-#define pgprot_writethroughcache(prot) \
- __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_WRITETHROUGH)
-#define pgprot_writebackcache(prot) \
- __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_WRITEBACK)
-#define pgprot_writebackwacache(prot) \
- __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_WRITEALLOC)
-
-static unsigned char *fbram;
-static unsigned char *fbram_phys;
-static int fbram_size;
-
-static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
-static int pdev_list_cnt;
-
-int vsync_mode = 1;
-
-#define MAX_FBI_LIST 32
-static struct fb_info *fbi_list[MAX_FBI_LIST];
-static int fbi_list_index;
-
-static struct msm_fb_data_type *mfd_list[MAX_FBI_LIST];
-static int mfd_list_index;
-
-static u32 msm_fb_pseudo_palette[16] = {
- 0x00000000, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
-};
-
-u32 msm_fb_debug_enabled;
-/* Setting msm_fb_msg_level to 8 prints out ALL messages */
-u32 msm_fb_msg_level = 7;
-
-/* Setting mddi_msg_level to 8 prints out ALL messages */
-u32 mddi_msg_level = 5;
-
-extern int32 mdp_block_power_cnt[MDP_MAX_BLOCK];
-extern unsigned long mdp_timer_duration;
-
-static int msm_fb_register(struct msm_fb_data_type *mfd);
-static int msm_fb_open(struct fb_info *info, int user);
-static int msm_fb_release(struct fb_info *info, int user);
-static int msm_fb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info);
-static int msm_fb_stop_sw_refresher(struct msm_fb_data_type *mfd);
-int msm_fb_resume_sw_refresher(struct msm_fb_data_type *mfd);
-static int msm_fb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info);
-static int msm_fb_set_par(struct fb_info *info);
-static int msm_fb_blank_sub(int blank_mode, struct fb_info *info,
- boolean op_enable);
-static int msm_fb_suspend_sub(struct msm_fb_data_type *mfd);
-static int msm_fb_resume_sub(struct msm_fb_data_type *mfd);
-static int msm_fb_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg);
-static int msm_fb_mmap(struct fb_info *info, struct vm_area_struct * vma);
-
-#ifdef MSM_FB_ENABLE_DBGFS
-
-#define MSM_FB_MAX_DBGFS 1024
-#define MAX_BACKLIGHT_BRIGHTNESS 255
-
-int msm_fb_debugfs_file_index;
-struct dentry *msm_fb_debugfs_root;
-struct dentry *msm_fb_debugfs_file[MSM_FB_MAX_DBGFS];
-
-struct dentry *msm_fb_get_debugfs_root(void)
-{
- if (msm_fb_debugfs_root == NULL)
- msm_fb_debugfs_root = debugfs_create_dir("msm_fb", NULL);
-
- return msm_fb_debugfs_root;
-}
-
-void msm_fb_debugfs_file_create(struct dentry *root, const char *name,
- u32 *var)
-{
- if (msm_fb_debugfs_file_index >= MSM_FB_MAX_DBGFS)
- return;
-
- msm_fb_debugfs_file[msm_fb_debugfs_file_index++] =
- debugfs_create_u32(name, S_IRUGO | S_IWUSR, root, var);
-}
-#endif
-
-int msm_fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
- if (!mfd->cursor_update)
- return -ENODEV;
-
- return mfd->cursor_update(info, cursor);
-}
-
-static int msm_fb_resource_initialized;
-
-#ifndef CONFIG_FB_BACKLIGHT
-static int lcd_backlight_registered;
-
-static void msm_fb_set_bl_brightness(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- struct msm_fb_data_type *mfd = dev_get_drvdata(led_cdev->dev->parent);
- int bl_lvl;
-
- if (value > MAX_BACKLIGHT_BRIGHTNESS)
- value = MAX_BACKLIGHT_BRIGHTNESS;
-
- /* This maps android backlight level 0 to 255 into
- driver backlight level 0 to bl_max with rounding */
- bl_lvl = (2 * value * mfd->panel_info.bl_max + MAX_BACKLIGHT_BRIGHTNESS)
- /(2 * MAX_BACKLIGHT_BRIGHTNESS);
-
- if (!bl_lvl && value)
- bl_lvl = 1;
-
- msm_fb_set_backlight(mfd, bl_lvl, 1);
-}
-
-static struct led_classdev backlight_led = {
- .name = "lcd-backlight",
- .brightness = MAX_BACKLIGHT_BRIGHTNESS,
- .brightness_set = msm_fb_set_bl_brightness,
-};
-#endif
-
-static struct msm_fb_platform_data *msm_fb_pdata;
-
-int msm_fb_detect_client(const char *name)
-{
- int ret = -EPERM;
-#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
- u32 id;
-#endif
-
- if (msm_fb_pdata && msm_fb_pdata->detect_client) {
- ret = msm_fb_pdata->detect_client(name);
-
- /* if it's non mddi panel, we need to pre-scan
- mddi client to see if we can disable mddi host */
-
-#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
- if (!ret && msm_fb_pdata->mddi_prescan)
- id = mddi_get_client_id();
-#endif
- }
-
- return ret;
-}
-
-static int msm_fb_probe(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
- int rc;
-
- MSM_FB_DEBUG("msm_fb_probe\n");
-
- if ((pdev->id == 0) && (pdev->num_resources > 0)) {
- msm_fb_pdata = pdev->dev.platform_data;
- fbram_size =
- pdev->resource[0].end - pdev->resource[0].start + 1;
- fbram_phys = (char *)pdev->resource[0].start;
- fbram = ioremap((unsigned long)fbram_phys, fbram_size);
-
- if (!fbram) {
- printk(KERN_ERR "fbram ioremap failed!\n");
- return -ENOMEM;
- }
- MSM_FB_INFO("msm_fb_probe: phy_Addr = 0x%x virt = 0x%x\n",
- (int)fbram_phys, (int)fbram);
-
- msm_fb_resource_initialized = 1;
- return 0;
- }
-
- if (!msm_fb_resource_initialized)
- return -EPERM;
-
- mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
- return -ENOMEM;
-
- mfd->panel_info.frame_count = 0;
- mfd->bl_level = mfd->panel_info.bl_max;
-
- if (mfd->panel_info.type == LCDC_PANEL)
- mfd->allow_set_offset =
- msm_fb_pdata->allow_set_offset != NULL ?
- msm_fb_pdata->allow_set_offset() : 0;
- else
- mfd->allow_set_offset = 0;
-
- rc = msm_fb_register(mfd);
- if (rc)
- return rc;
-
-#ifdef CONFIG_FB_BACKLIGHT
- msm_fb_config_backlight(mfd);
-#else
- /* android supports only one lcd-backlight/lcd for now */
- if (!lcd_backlight_registered) {
- if (led_classdev_register(&pdev->dev, &backlight_led))
- printk(KERN_ERR "led_classdev_register failed\n");
- else
- lcd_backlight_registered = 1;
- }
-#endif
-
- pdev_list[pdev_list_cnt++] = pdev;
- return 0;
-}
-
-static int msm_fb_remove(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
-
- MSM_FB_DEBUG("msm_fb_remove\n");
-
- mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- if (msm_fb_suspend_sub(mfd))
- printk(KERN_ERR "msm_fb_remove: can't stop the device %d\n", mfd->index);
-
- if (mfd->channel_irq != 0)
- free_irq(mfd->channel_irq, (void *)mfd);
-
- if (mfd->vsync_width_boundary)
- vfree(mfd->vsync_width_boundary);
-
- if (mfd->vsync_resync_timer.function)
- del_timer(&mfd->vsync_resync_timer);
-
- if (mfd->refresh_timer.function)
- del_timer(&mfd->refresh_timer);
-
- if (mfd->dma_hrtimer.function)
- hrtimer_cancel(&mfd->dma_hrtimer);
-
- /* remove /dev/fb* */
- unregister_framebuffer(mfd->fbi);
-
-#ifdef CONFIG_FB_BACKLIGHT
- /* remove /sys/class/backlight */
- backlight_device_unregister(mfd->fbi->bl_dev);
-#else
- if (lcd_backlight_registered) {
- lcd_backlight_registered = 0;
- led_classdev_unregister(&backlight_led);
- }
-#endif
-
-#ifdef MSM_FB_ENABLE_DBGFS
- if (mfd->sub_dir)
- debugfs_remove(mfd->sub_dir);
-#endif
-
- return 0;
-}
-
-#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
-static int msm_fb_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct msm_fb_data_type *mfd;
- int ret = 0;
-
- MSM_FB_DEBUG("msm_fb_suspend\n");
-
- mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
-
- if ((!mfd) || (mfd->key != MFD_KEY))
- return 0;
-
- console_lock();
- fb_set_suspend(mfd->fbi, 1);
-
- ret = msm_fb_suspend_sub(mfd);
- if (ret != 0) {
- printk(KERN_ERR "msm_fb: failed to suspend! %d\n", ret);
- fb_set_suspend(mfd->fbi, 0);
- } else {
- pdev->dev.power.power_state = state;
- }
-
- console_unlock();
- return ret;
-}
-#else
-#define msm_fb_suspend NULL
-#endif
-
-static int msm_fb_suspend_sub(struct msm_fb_data_type *mfd)
-{
- int ret = 0;
-
- if ((!mfd) || (mfd->key != MFD_KEY))
- return 0;
-
- /*
- * suspend this channel
- */
- mfd->suspend.sw_refreshing_enable = mfd->sw_refreshing_enable;
- mfd->suspend.op_enable = mfd->op_enable;
- mfd->suspend.panel_power_on = mfd->panel_power_on;
-
- if (mfd->op_enable) {
- ret =
- msm_fb_blank_sub(FB_BLANK_POWERDOWN, mfd->fbi,
- mfd->suspend.op_enable);
- if (ret) {
- MSM_FB_INFO
- ("msm_fb_suspend: can't turn off display!\n");
- return ret;
- }
- mfd->op_enable = FALSE;
- }
- /*
- * try to power down
- */
- mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
- /*
- * detach display channel irq if there's any
- * or wait until vsync-resync completes
- */
- if ((mfd->dest == DISPLAY_LCD)) {
- if (mfd->panel_info.lcd.vsync_enable) {
- if (mfd->panel_info.lcd.hw_vsync_mode) {
- if (mfd->channel_irq != 0)
- disable_irq(mfd->channel_irq);
- } else {
- volatile boolean vh_pending;
- do {
- vh_pending = mfd->vsync_handler_pending;
- } while (vh_pending);
- }
- }
- }
-
- return 0;
-}
-
-#if defined(CONFIG_PM) && !defined(CONFIG_HAS_EARLYSUSPEND)
-static int msm_fb_resume(struct platform_device *pdev)
-{
- /* This resume function is called when interrupt is enabled.
- */
- int ret = 0;
- struct msm_fb_data_type *mfd;
-
- MSM_FB_DEBUG("msm_fb_resume\n");
-
- mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
-
- if ((!mfd) || (mfd->key != MFD_KEY))
- return 0;
-
- console_lock();
- ret = msm_fb_resume_sub(mfd);
- pdev->dev.power.power_state = PMSG_ON;
- fb_set_suspend(mfd->fbi, 1);
- console_unlock();
-
- return ret;
-}
-#else
-#define msm_fb_resume NULL
-#endif
-
-static int msm_fb_resume_sub(struct msm_fb_data_type *mfd)
-{
- int ret = 0;
-
- if ((!mfd) || (mfd->key != MFD_KEY))
- return 0;
-
- /* attach display channel irq if there's any */
- if (mfd->channel_irq != 0)
- enable_irq(mfd->channel_irq);
-
- /* resume state var recover */
- mfd->sw_refreshing_enable = mfd->suspend.sw_refreshing_enable;
- mfd->op_enable = mfd->suspend.op_enable;
-
- if (mfd->suspend.panel_power_on) {
- ret =
- msm_fb_blank_sub(FB_BLANK_UNBLANK, mfd->fbi,
- mfd->op_enable);
- if (ret)
- MSM_FB_INFO("msm_fb_resume: can't turn on display!\n");
- }
-
- return ret;
-}
-
-static struct platform_driver msm_fb_driver = {
- .probe = msm_fb_probe,
- .remove = msm_fb_remove,
-#ifndef CONFIG_HAS_EARLYSUSPEND
- .suspend = msm_fb_suspend,
- .resume = msm_fb_resume,
-#endif
- .shutdown = NULL,
- .driver = {
- /* Driver name must match the device name added in platform.c. */
- .name = "msm_fb",
- },
-};
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static void msmfb_early_suspend(struct early_suspend *h)
-{
- struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
- early_suspend);
- msm_fb_suspend_sub(mfd);
-}
-
-static void msmfb_early_resume(struct early_suspend *h)
-{
- struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
- early_suspend);
- msm_fb_resume_sub(mfd);
-}
-#endif
-
-void msm_fb_set_backlight(struct msm_fb_data_type *mfd, __u32 bkl_lvl, u32 save)
-{
- struct msm_fb_panel_data *pdata;
-
- pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
-
- if ((pdata) && (pdata->set_backlight)) {
- down(&mfd->sem);
- if ((bkl_lvl != mfd->bl_level) || (!save)) {
- u32 old_lvl;
-
- old_lvl = mfd->bl_level;
- mfd->bl_level = bkl_lvl;
- pdata->set_backlight(mfd);
-
- if (!save)
- mfd->bl_level = old_lvl;
- }
- up(&mfd->sem);
- }
-}
-
-static int msm_fb_blank_sub(int blank_mode, struct fb_info *info,
- boolean op_enable)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- struct msm_fb_panel_data *pdata = NULL;
- int ret = 0;
-
- if (!op_enable)
- return -EPERM;
-
- pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
- if ((!pdata) || (!pdata->on) || (!pdata->off)) {
- printk(KERN_ERR "msm_fb_blank_sub: no panel operation detected!\n");
- return -ENODEV;
- }
-
- switch (blank_mode) {
- case FB_BLANK_UNBLANK:
- if (!mfd->panel_power_on) {
- mdelay(100);
- ret = pdata->on(mfd->pdev);
- if (ret == 0) {
- mfd->panel_power_on = TRUE;
-
- msm_fb_set_backlight(mfd,
- mfd->bl_level, 0);
-
-/* ToDo: possible conflict with android which doesn't expect sw refresher */
-/*
- if (!mfd->hw_refresh)
- {
- if ((ret = msm_fb_resume_sw_refresher(mfd)) != 0)
- {
- MSM_FB_INFO("msm_fb_blank_sub: msm_fb_resume_sw_refresher failed = %d!\n",ret);
- }
- }
-*/
- }
- }
- break;
-
- case FB_BLANK_VSYNC_SUSPEND:
- case FB_BLANK_HSYNC_SUSPEND:
- case FB_BLANK_NORMAL:
- case FB_BLANK_POWERDOWN:
- default:
- if (mfd->panel_power_on) {
- int curr_pwr_state;
-
- mfd->op_enable = FALSE;
- curr_pwr_state = mfd->panel_power_on;
- mfd->panel_power_on = FALSE;
-
- mdelay(100);
- ret = pdata->off(mfd->pdev);
- if (ret)
- mfd->panel_power_on = curr_pwr_state;
-
- msm_fb_set_backlight(mfd, 0, 0);
- mfd->op_enable = TRUE;
- }
- break;
- }
-
- return ret;
-}
-
-static void msm_fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
- cfb_fillrect(info, rect);
- if (!mfd->hw_refresh && (info->var.yoffset == 0) &&
- !mfd->sw_currently_refreshing) {
- struct fb_var_screeninfo var;
-
- var = info->var;
- var.reserved[0] = 0x54445055;
- var.reserved[1] = (rect->dy << 16) | (rect->dx);
- var.reserved[2] = ((rect->dy + rect->height) << 16) |
- (rect->dx + rect->width);
-
- msm_fb_pan_display(&var, info);
- }
-}
-
-static void msm_fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
- cfb_copyarea(info, area);
- if (!mfd->hw_refresh && (info->var.yoffset == 0) &&
- !mfd->sw_currently_refreshing) {
- struct fb_var_screeninfo var;
-
- var = info->var;
- var.reserved[0] = 0x54445055;
- var.reserved[1] = (area->dy << 16) | (area->dx);
- var.reserved[2] = ((area->dy + area->height) << 16) |
- (area->dx + area->width);
-
- msm_fb_pan_display(&var, info);
- }
-}
-
-static void msm_fb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
- cfb_imageblit(info, image);
- if (!mfd->hw_refresh && (info->var.yoffset == 0) &&
- !mfd->sw_currently_refreshing) {
- struct fb_var_screeninfo var;
-
- var = info->var;
- var.reserved[0] = 0x54445055;
- var.reserved[1] = (image->dy << 16) | (image->dx);
- var.reserved[2] = ((image->dy + image->height) << 16) |
- (image->dx + image->width);
-
- msm_fb_pan_display(&var, info);
- }
-}
-
-static int msm_fb_blank(int blank_mode, struct fb_info *info)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- return msm_fb_blank_sub(blank_mode, info, mfd->op_enable);
-}
-
-static int msm_fb_set_lut(struct fb_cmap *cmap, struct fb_info *info)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
- if (!mfd->lut_update)
- return -ENODEV;
-
- mfd->lut_update(info, cmap);
- return 0;
-}
-
-/*
- * Custom Framebuffer mmap() function for MSM driver.
- * Differs from standard mmap() function by allowing for customized
- * page-protection.
- */
-static int msm_fb_mmap(struct fb_info *info, struct vm_area_struct * vma)
-{
- /* Get frame buffer memory range. */
- unsigned long start = info->fix.smem_start;
- u32 len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len);
- unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- if (off >= len) {
- /* memory mapped io */
- off -= len;
- if (info->var.accel_flags) {
- mutex_unlock(&info->lock);
- return -EINVAL;
- }
- start = info->fix.mmio_start;
- len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len);
- }
-
- /* Set VM flags. */
- start &= PAGE_MASK;
- if ((vma->vm_end - vma->vm_start + off) > len)
- return -EINVAL;
- off += start;
- vma->vm_pgoff = off >> PAGE_SHIFT;
- /* This is an IO map - tell maydump to skip this VMA */
- vma->vm_flags |= VM_IO | VM_RESERVED;
-
- /* Set VM page protection */
- if (mfd->mdp_fb_page_protection == MDP_FB_PAGE_PROTECTION_WRITECOMBINE)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- else if (mfd->mdp_fb_page_protection ==
- MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE)
- vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
- else if (mfd->mdp_fb_page_protection ==
- MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE)
- vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
- else if (mfd->mdp_fb_page_protection ==
- MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE)
- vma->vm_page_prot = pgprot_writebackwacache(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- /* Remap the frame buffer I/O range */
- if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return 0;
-}
-
-static struct fb_ops msm_fb_ops = {
- .owner = THIS_MODULE,
- .fb_open = msm_fb_open,
- .fb_release = msm_fb_release,
- .fb_read = NULL,
- .fb_write = NULL,
- .fb_cursor = NULL,
- .fb_check_var = msm_fb_check_var, /* vinfo check */
- .fb_set_par = msm_fb_set_par, /* set the video mode according to info->var */
- .fb_setcolreg = NULL, /* set color register */
- .fb_blank = msm_fb_blank, /* blank display */
- .fb_pan_display = msm_fb_pan_display, /* pan display */
- .fb_fillrect = msm_fb_fillrect, /* Draws a rectangle */
- .fb_copyarea = msm_fb_copyarea, /* Copy data from area to another */
- .fb_imageblit = msm_fb_imageblit, /* Draws a image to the display */
- .fb_rotate = NULL,
- .fb_sync = NULL, /* wait for blit idle, optional */
- .fb_ioctl = msm_fb_ioctl, /* perform fb specific ioctl (optional) */
- .fb_mmap = msm_fb_mmap,
-};
-
-static int msm_fb_register(struct msm_fb_data_type *mfd)
-{
- int ret = -ENODEV;
- int bpp;
- struct msm_panel_info *panel_info = &mfd->panel_info;
- struct fb_info *fbi = mfd->fbi;
- struct fb_fix_screeninfo *fix;
- struct fb_var_screeninfo *var;
- int *id;
- int fbram_offset;
-
- /*
- * fb info initialization
- */
- fix = &fbi->fix;
- var = &fbi->var;
-
- fix->type_aux = 0; /* if type == FB_TYPE_INTERLEAVED_PLANES */
- fix->visual = FB_VISUAL_TRUECOLOR; /* True Color */
- fix->ywrapstep = 0; /* No support */
- fix->mmio_start = 0; /* No MMIO Address */
- fix->mmio_len = 0; /* No MMIO Address */
- fix->accel = FB_ACCEL_NONE;/* FB_ACCEL_MSM needes to be added in fb.h */
-
- var->xoffset = 0, /* Offset from virtual to visible */
- var->yoffset = 0, /* resolution */
- var->grayscale = 0, /* No graylevels */
- var->nonstd = 0, /* standard pixel format */
- var->activate = FB_ACTIVATE_VBL, /* activate it at vsync */
- var->height = -1, /* height of picture in mm */
- var->width = -1, /* width of picture in mm */
- var->accel_flags = 0, /* acceleration flags */
- var->sync = 0, /* see FB_SYNC_* */
- var->rotate = 0, /* angle we rotate counter clockwise */
- mfd->op_enable = FALSE;
-
- switch (mfd->fb_imgType) {
- case MDP_RGB_565:
- fix->type = FB_TYPE_PACKED_PIXELS;
- fix->xpanstep = 1;
- fix->ypanstep = 1;
- var->vmode = FB_VMODE_NONINTERLACED;
- var->blue.offset = 0;
- var->green.offset = 5;
- var->red.offset = 11;
- var->blue.length = 5;
- var->green.length = 6;
- var->red.length = 5;
- var->blue.msb_right = 0;
- var->green.msb_right = 0;
- var->red.msb_right = 0;
- var->transp.offset = 0;
- var->transp.length = 0;
- bpp = 2;
- break;
-
- case MDP_RGB_888:
- fix->type = FB_TYPE_PACKED_PIXELS;
- fix->xpanstep = 1;
- fix->ypanstep = 1;
- var->vmode = FB_VMODE_NONINTERLACED;
- var->blue.offset = 0;
- var->green.offset = 8;
- var->red.offset = 16;
- var->blue.length = 8;
- var->green.length = 8;
- var->red.length = 8;
- var->blue.msb_right = 0;
- var->green.msb_right = 0;
- var->red.msb_right = 0;
- var->transp.offset = 0;
- var->transp.length = 0;
- bpp = 3;
- break;
-
- case MDP_ARGB_8888:
- fix->type = FB_TYPE_PACKED_PIXELS;
- fix->xpanstep = 1;
- fix->ypanstep = 1;
- var->vmode = FB_VMODE_NONINTERLACED;
- var->blue.offset = 0;
- var->green.offset = 8;
- var->red.offset = 16;
- var->blue.length = 8;
- var->green.length = 8;
- var->red.length = 8;
- var->blue.msb_right = 0;
- var->green.msb_right = 0;
- var->red.msb_right = 0;
- var->transp.offset = 24;
- var->transp.length = 8;
- bpp = 3;
- break;
-
- case MDP_YCRYCB_H2V1:
- /* ToDo: need to check TV-Out YUV422i framebuffer format */
- /* we might need to create new type define */
- fix->type = FB_TYPE_INTERLEAVED_PLANES;
- fix->xpanstep = 2;
- fix->ypanstep = 1;
- var->vmode = FB_VMODE_NONINTERLACED;
-
- /* how about R/G/B offset? */
- var->blue.offset = 0;
- var->green.offset = 5;
- var->red.offset = 11;
- var->blue.length = 5;
- var->green.length = 6;
- var->red.length = 5;
- var->blue.msb_right = 0;
- var->green.msb_right = 0;
- var->red.msb_right = 0;
- var->transp.offset = 0;
- var->transp.length = 0;
- bpp = 2;
- break;
-
- default:
- MSM_FB_ERR("msm_fb_init: fb %d unknown image type!\n",
- mfd->index);
- return ret;
- }
-
- /* The adreno GPU hardware requires that the pitch be aligned to
- 32 pixels for color buffers, so for the cases where the GPU
- is writing directly to fb0, the framebuffer pitch
- also needs to be 32 pixel aligned */
-
- if (mfd->index == 0)
- fix->line_length = ALIGN(panel_info->xres * bpp, 32);
- else
- fix->line_length = panel_info->xres * bpp;
-
- fix->smem_len = fix->line_length * panel_info->yres * mfd->fb_page;
-
- mfd->var_xres = panel_info->xres;
- mfd->var_yres = panel_info->yres;
-
- var->pixclock = mfd->panel_info.clk_rate;
- mfd->var_pixclock = var->pixclock;
-
- var->xres = panel_info->xres;
- var->yres = panel_info->yres;
- var->xres_virtual = panel_info->xres;
- var->yres_virtual = panel_info->yres * mfd->fb_page;
- var->bits_per_pixel = bpp * 8, /* FrameBuffer color depth */
- /*
- * id field for fb app
- */
- id = (int *)&mfd->panel;
-
-#if defined(CONFIG_FB_MSM_MDP22)
- snprintf(fix->id, sizeof(fix->id), "msmfb22_%x", (__u32) *id);
-#elif defined(CONFIG_FB_MSM_MDP30)
- snprintf(fix->id, sizeof(fix->id), "msmfb30_%x", (__u32) *id);
-#elif defined(CONFIG_FB_MSM_MDP31)
- snprintf(fix->id, sizeof(fix->id), "msmfb31_%x", (__u32) *id);
-#elif defined(CONFIG_FB_MSM_MDP40)
- snprintf(fix->id, sizeof(fix->id), "msmfb40_%x", (__u32) *id);
-#else
- error CONFIG_FB_MSM_MDP undefined !
-#endif
- fbi->fbops = &msm_fb_ops;
- fbi->flags = FBINFO_FLAG_DEFAULT;
- fbi->pseudo_palette = msm_fb_pseudo_palette;
-
- mfd->ref_cnt = 0;
- mfd->sw_currently_refreshing = FALSE;
- mfd->sw_refreshing_enable = TRUE;
- mfd->panel_power_on = FALSE;
-
- mfd->pan_waiting = FALSE;
- init_completion(&mfd->pan_comp);
- init_completion(&mfd->refresher_comp);
- sema_init(&mfd->sem, 1);
-
- fbram_offset = PAGE_ALIGN((int)fbram)-(int)fbram;
- fbram += fbram_offset;
- fbram_phys += fbram_offset;
- fbram_size -= fbram_offset;
-
- if (fbram_size < fix->smem_len) {
- printk(KERN_ERR "error: no more framebuffer memory!\n");
- return -ENOMEM;
- }
-
- fbi->screen_base = fbram;
- fbi->fix.smem_start = (unsigned long)fbram_phys;
-
- memset(fbi->screen_base, 0x0, fix->smem_len);
-
- mfd->op_enable = TRUE;
- mfd->panel_power_on = FALSE;
-
- /* cursor memory allocation */
- if (mfd->cursor_update) {
- mfd->cursor_buf = dma_alloc_coherent(NULL,
- MDP_CURSOR_SIZE,
- (dma_addr_t *) &mfd->cursor_buf_phys,
- GFP_KERNEL);
- if (!mfd->cursor_buf)
- mfd->cursor_update = 0;
- }
-
- if (mfd->lut_update) {
- ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
- if (ret)
- printk(KERN_ERR "%s: fb_alloc_cmap() failed!\n",
- __func__);
- }
-
- if (register_framebuffer(fbi) < 0) {
- if (mfd->lut_update)
- fb_dealloc_cmap(&fbi->cmap);
-
- if (mfd->cursor_buf)
- dma_free_coherent(NULL,
- MDP_CURSOR_SIZE,
- mfd->cursor_buf,
- (dma_addr_t) mfd->cursor_buf_phys);
-
- mfd->op_enable = FALSE;
- return -EPERM;
- }
-
- fbram += fix->smem_len;
- fbram_phys += fix->smem_len;
- fbram_size -= fix->smem_len;
-
- MSM_FB_INFO
- ("FrameBuffer[%d] %dx%d size=%d bytes is registered successfully!\n",
- mfd->index, fbi->var.xres, fbi->var.yres, fbi->fix.smem_len);
-
-#ifdef CONFIG_FB_MSM_LOGO
- if (!load_565rle_image(INIT_IMAGE_FILE)) ; /* Flip buffer */
-#endif
- ret = 0;
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
- mfd->early_suspend.suspend = msmfb_early_suspend;
- mfd->early_suspend.resume = msmfb_early_resume;
- mfd->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 2;
- register_early_suspend(&mfd->early_suspend);
-#endif
-
-#ifdef MSM_FB_ENABLE_DBGFS
- {
- struct dentry *root;
- struct dentry *sub_dir;
- char sub_name[2];
-
- root = msm_fb_get_debugfs_root();
- if (root != NULL) {
- sub_name[0] = (char)(mfd->index + 0x30);
- sub_name[1] = '\0';
- sub_dir = debugfs_create_dir(sub_name, root);
- } else {
- sub_dir = NULL;
- }
-
- mfd->sub_dir = sub_dir;
-
- if (sub_dir) {
- msm_fb_debugfs_file_create(sub_dir, "op_enable",
- (u32 *) &mfd->op_enable);
- msm_fb_debugfs_file_create(sub_dir, "panel_power_on",
- (u32 *) &mfd->
- panel_power_on);
- msm_fb_debugfs_file_create(sub_dir, "ref_cnt",
- (u32 *) &mfd->ref_cnt);
- msm_fb_debugfs_file_create(sub_dir, "fb_imgType",
- (u32 *) &mfd->fb_imgType);
- msm_fb_debugfs_file_create(sub_dir,
- "sw_currently_refreshing",
- (u32 *) &mfd->
- sw_currently_refreshing);
- msm_fb_debugfs_file_create(sub_dir,
- "sw_refreshing_enable",
- (u32 *) &mfd->
- sw_refreshing_enable);
-
- msm_fb_debugfs_file_create(sub_dir, "xres",
- (u32 *) &mfd->panel_info.
- xres);
- msm_fb_debugfs_file_create(sub_dir, "yres",
- (u32 *) &mfd->panel_info.
- yres);
- msm_fb_debugfs_file_create(sub_dir, "bpp",
- (u32 *) &mfd->panel_info.
- bpp);
- msm_fb_debugfs_file_create(sub_dir, "type",
- (u32 *) &mfd->panel_info.
- type);
- msm_fb_debugfs_file_create(sub_dir, "wait_cycle",
- (u32 *) &mfd->panel_info.
- wait_cycle);
- msm_fb_debugfs_file_create(sub_dir, "pdest",
- (u32 *) &mfd->panel_info.
- pdest);
- msm_fb_debugfs_file_create(sub_dir, "backbuff",
- (u32 *) &mfd->panel_info.
- fb_num);
- msm_fb_debugfs_file_create(sub_dir, "clk_rate",
- (u32 *) &mfd->panel_info.
- clk_rate);
- msm_fb_debugfs_file_create(sub_dir, "frame_count",
- (u32 *) &mfd->panel_info.
- frame_count);
-
-
- switch (mfd->dest) {
- case DISPLAY_LCD:
- msm_fb_debugfs_file_create(sub_dir,
- "vsync_enable",
- (u32 *)&mfd->panel_info.lcd.vsync_enable);
- msm_fb_debugfs_file_create(sub_dir,
- "refx100",
- (u32 *) &mfd->panel_info.lcd. refx100);
- msm_fb_debugfs_file_create(sub_dir,
- "v_back_porch",
- (u32 *) &mfd->panel_info.lcd.v_back_porch);
- msm_fb_debugfs_file_create(sub_dir,
- "v_front_porch",
- (u32 *) &mfd->panel_info.lcd.v_front_porch);
- msm_fb_debugfs_file_create(sub_dir,
- "v_pulse_width",
- (u32 *) &mfd->panel_info.lcd.v_pulse_width);
- msm_fb_debugfs_file_create(sub_dir,
- "hw_vsync_mode",
- (u32 *) &mfd->panel_info.lcd.hw_vsync_mode);
- msm_fb_debugfs_file_create(sub_dir,
- "vsync_notifier_period", (u32 *)
- &mfd->panel_info.lcd.vsync_notifier_period);
- break;
-
- case DISPLAY_LCDC:
- msm_fb_debugfs_file_create(sub_dir,
- "h_back_porch",
- (u32 *) &mfd->panel_info.lcdc.h_back_porch);
- msm_fb_debugfs_file_create(sub_dir,
- "h_front_porch",
- (u32 *) &mfd->panel_info.lcdc.h_front_porch);
- msm_fb_debugfs_file_create(sub_dir,
- "h_pulse_width",
- (u32 *) &mfd->panel_info.lcdc.h_pulse_width);
- msm_fb_debugfs_file_create(sub_dir,
- "v_back_porch",
- (u32 *) &mfd->panel_info.lcdc.v_back_porch);
- msm_fb_debugfs_file_create(sub_dir,
- "v_front_porch",
- (u32 *) &mfd->panel_info.lcdc.v_front_porch);
- msm_fb_debugfs_file_create(sub_dir,
- "v_pulse_width",
- (u32 *) &mfd->panel_info.lcdc.v_pulse_width);
- msm_fb_debugfs_file_create(sub_dir,
- "border_clr",
- (u32 *) &mfd->panel_info.lcdc.border_clr);
- msm_fb_debugfs_file_create(sub_dir,
- "underflow_clr",
- (u32 *) &mfd->panel_info.lcdc.underflow_clr);
- msm_fb_debugfs_file_create(sub_dir,
- "hsync_skew",
- (u32 *) &mfd->panel_info.lcdc.hsync_skew);
- break;
-
- default:
- break;
- }
- }
- }
-#endif /* MSM_FB_ENABLE_DBGFS */
-
- return ret;
-}
-
-static int msm_fb_open(struct fb_info *info, int user)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
- if (!mfd->ref_cnt) {
- mdp_set_dma_pan_info(info, NULL, TRUE);
-
- if (msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable)) {
- printk(KERN_ERR "msm_fb_open: can't turn on display!\n");
- return -1;
- }
- }
-
- mfd->ref_cnt++;
- return 0;
-}
-
-static int msm_fb_release(struct fb_info *info, int user)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- int ret = 0;
-
- if (!mfd->ref_cnt) {
- MSM_FB_INFO("msm_fb_release: try to close unopened fb %d!\n",
- mfd->index);
- return -EINVAL;
- }
-
- mfd->ref_cnt--;
-
- if (!mfd->ref_cnt) {
- if ((ret =
- msm_fb_blank_sub(FB_BLANK_POWERDOWN, info,
- mfd->op_enable)) != 0) {
- printk(KERN_ERR "msm_fb_release: can't turn off display!\n");
- return ret;
- }
- }
-
- return ret;
-}
-
-DEFINE_SEMAPHORE(msm_fb_pan_sem);
-
-static int msm_fb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct mdp_dirty_region dirty;
- struct mdp_dirty_region *dirtyPtr = NULL;
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
- if ((!mfd->op_enable) || (!mfd->panel_power_on))
- return -EPERM;
-
- if (var->xoffset > (info->var.xres_virtual - info->var.xres))
- return -EINVAL;
-
- if (var->yoffset > (info->var.yres_virtual - info->var.yres))
- return -EINVAL;
-
- if (info->fix.xpanstep)
- info->var.xoffset =
- (var->xoffset / info->fix.xpanstep) * info->fix.xpanstep;
-
- if (info->fix.ypanstep)
- info->var.yoffset =
- (var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
-
- /* "UPDT" */
- if (var->reserved[0] == 0x54445055) {
- dirty.xoffset = var->reserved[1] & 0xffff;
- dirty.yoffset = (var->reserved[1] >> 16) & 0xffff;
-
- if ((var->reserved[2] & 0xffff) <= dirty.xoffset)
- return -EINVAL;
- if (((var->reserved[2] >> 16) & 0xffff) <= dirty.yoffset)
- return -EINVAL;
-
- dirty.width = (var->reserved[2] & 0xffff) - dirty.xoffset;
- dirty.height =
- ((var->reserved[2] >> 16) & 0xffff) - dirty.yoffset;
- info->var.yoffset = var->yoffset;
-
- if (dirty.xoffset < 0)
- return -EINVAL;
-
- if (dirty.yoffset < 0)
- return -EINVAL;
-
- if ((dirty.xoffset + dirty.width) > info->var.xres)
- return -EINVAL;
-
- if ((dirty.yoffset + dirty.height) > info->var.yres)
- return -EINVAL;
-
- if ((dirty.width <= 0) || (dirty.height <= 0))
- return -EINVAL;
-
- dirtyPtr = &dirty;
- }
-
- /* Flip */
- /* A constant value is used to indicate that we should change the DMA
- output buffer instead of just panning */
-
- if (var->reserved[0] == 0x466c6970) {
- unsigned long length, address;
- struct file *p_src_file;
- struct mdp_img imgdata;
- int bpp;
-
- if (mfd->allow_set_offset) {
- imgdata.memory_id = var->reserved[1];
- imgdata.priv = var->reserved[2];
-
- /* If there is no memory ID then we want to reset back
- to the original fb visibility */
- if (var->reserved[1]) {
- if (var->reserved[4] == MDP_BLIT_SRC_GEM) {
- panic("waaaaaaaaaaaaaah");
- if ( /*get_gem_img(&imgdata,
- (unsigned long *) &address,
- &length)*/ -1 < 0) {
- return -1;
- }
- } else {
- /*get_img(&imgdata, info, &address,
- &length, &p_src_file);*/
- panic("waaaaaah");
- }
- mfd->ibuf.visible_swapped = TRUE;
- } else {
- /* Flip back to the original address
- adjusted for xoffset and yoffset */
-
- bpp = info->var.bits_per_pixel / 8;
- address = (unsigned long) info->fix.smem_start;
- address += info->var.xoffset * bpp +
- info->var.yoffset * info->fix.line_length;
-
- mfd->ibuf.visible_swapped = FALSE;
- }
-
- mdp_set_offset_info(info, address,
- (var->activate == FB_ACTIVATE_VBL));
-
- mfd->dma_fnc(mfd);
- return 0;
- } else
- return -EINVAL;
- }
-
- down(&msm_fb_pan_sem);
- mdp_set_dma_pan_info(info, dirtyPtr,
- (var->activate == FB_ACTIVATE_VBL));
- mdp_dma_pan_update(info);
- up(&msm_fb_pan_sem);
-
- ++mfd->panel_info.frame_count;
- return 0;
-}
-
-static int msm_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
- if (var->rotate != FB_ROTATE_UR)
- return -EINVAL;
- if (var->grayscale != info->var.grayscale)
- return -EINVAL;
-
- switch (var->bits_per_pixel) {
- case 16:
- if ((var->green.offset != 5) ||
- !((var->blue.offset == 11)
- || (var->blue.offset == 0)) ||
- !((var->red.offset == 11)
- || (var->red.offset == 0)) ||
- (var->blue.length != 5) ||
- (var->green.length != 6) ||
- (var->red.length != 5) ||
- (var->blue.msb_right != 0) ||
- (var->green.msb_right != 0) ||
- (var->red.msb_right != 0) ||
- (var->transp.offset != 0) ||
- (var->transp.length != 0))
- return -EINVAL;
- break;
-
- case 24:
- if ((var->blue.offset != 0) ||
- (var->green.offset != 8) ||
- (var->red.offset != 16) ||
- (var->blue.length != 8) ||
- (var->green.length != 8) ||
- (var->red.length != 8) ||
- (var->blue.msb_right != 0) ||
- (var->green.msb_right != 0) ||
- (var->red.msb_right != 0) ||
- !(((var->transp.offset == 0) &&
- (var->transp.length == 0)) ||
- ((var->transp.offset == 24) &&
- (var->transp.length == 8))))
- return -EINVAL;
- break;
-
- default:
- return -EINVAL;
- }
-
- if ((var->xres_virtual <= 0) || (var->yres_virtual <= 0))
- return -EINVAL;
-
- if (info->fix.smem_len <
- (var->xres_virtual*var->yres_virtual*(var->bits_per_pixel/8)))
- return -EINVAL;
-
- if ((var->xres == 0) || (var->yres == 0))
- return -EINVAL;
-
- if ((var->xres > mfd->panel_info.xres) ||
- (var->yres > mfd->panel_info.yres))
- return -EINVAL;
-
- if (var->xoffset > (var->xres_virtual - var->xres))
- return -EINVAL;
-
- if (var->yoffset > (var->yres_virtual - var->yres))
- return -EINVAL;
-
- return 0;
-}
-
-static int msm_fb_set_par(struct fb_info *info)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- struct fb_var_screeninfo *var = &info->var;
- int old_imgType;
- int blank = 0;
-
- old_imgType = mfd->fb_imgType;
- switch (var->bits_per_pixel) {
- case 16:
- if (var->red.offset == 0)
- mfd->fb_imgType = MDP_BGR_565;
- else
- mfd->fb_imgType = MDP_RGB_565;
- break;
-
- case 24:
- if ((var->transp.offset == 0) && (var->transp.length == 0))
- mfd->fb_imgType = MDP_RGB_888;
- else if ((var->transp.offset == 24) &&
- (var->transp.length == 8)) {
- mfd->fb_imgType = MDP_ARGB_8888;
- info->var.bits_per_pixel = 32;
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- if ((mfd->var_pixclock != var->pixclock) ||
- (mfd->hw_refresh && ((mfd->fb_imgType != old_imgType) ||
- (mfd->var_pixclock != var->pixclock) ||
- (mfd->var_xres != var->xres) ||
- (mfd->var_yres != var->yres)))) {
- mfd->var_xres = var->xres;
- mfd->var_yres = var->yres;
- mfd->var_pixclock = var->pixclock;
- blank = 1;
- }
-
- if (blank) {
- msm_fb_blank_sub(FB_BLANK_POWERDOWN, info, mfd->op_enable);
- msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable);
- }
-
- return 0;
-}
-
-static int msm_fb_stop_sw_refresher(struct msm_fb_data_type *mfd)
-{
- if (mfd->hw_refresh)
- return -EPERM;
-
- if (mfd->sw_currently_refreshing) {
- down(&mfd->sem);
- mfd->sw_currently_refreshing = FALSE;
- up(&mfd->sem);
-
- /* wait until the refresher finishes the last job */
- wait_for_completion_killable(&mfd->refresher_comp);
- }
-
- return 0;
-}
-
-int msm_fb_resume_sw_refresher(struct msm_fb_data_type *mfd)
-{
- boolean do_refresh;
-
- if (mfd->hw_refresh)
- return -EPERM;
-
- down(&mfd->sem);
- if ((!mfd->sw_currently_refreshing) && (mfd->sw_refreshing_enable)) {
- do_refresh = TRUE;
- mfd->sw_currently_refreshing = TRUE;
- } else {
- do_refresh = FALSE;
- }
- up(&mfd->sem);
-
- if (do_refresh)
- mdp_refresh_screen((unsigned long)mfd);
-
- return 0;
-}
-
-void mdp_ppp_put_img(struct file *p_src_file, struct file *p_dst_file)
-{
-#ifdef CONFIG_ANDROID_PMEM
- if (p_src_file)
- put_pmem_file(p_src_file);
- if (p_dst_file)
- put_pmem_file(p_dst_file);
-#endif
-}
-
-int mdp_blit(struct fb_info *info, struct mdp_blit_req *req)
-{
- int ret;
- struct file *p_src_file = 0, *p_dst_file = 0;
- if (unlikely(req->src_rect.h == 0 || req->src_rect.w == 0)) {
- printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
- return -EINVAL;
- }
- if (unlikely(req->dst_rect.h == 0 || req->dst_rect.w == 0))
- return 0;
-
- ret = mdp_ppp_blit(info, req, &p_src_file, &p_dst_file);
- mdp_ppp_put_img(p_src_file, p_dst_file);
- return ret;
-}
-
-typedef void (*msm_dma_barrier_function_pointer) (void *, size_t);
-
-static inline void msm_fb_dma_barrier_for_rect(struct fb_info *info,
- struct mdp_img *img, struct mdp_rect *rect,
- msm_dma_barrier_function_pointer dma_barrier_fp
- )
-{
- /*
- * Compute the start and end addresses of the rectangles.
- * NOTE: As currently implemented, the data between
- * the end of one row and the start of the next is
- * included in the address range rather than
- * doing multiple calls for each row.
- */
-
- char * const pmem_start = info->screen_base;
-/* int bytes_per_pixel = mdp_get_bytes_per_pixel(img->format);
- unsigned long start = (unsigned long)pmem_start + img->offset +
- (img->width * rect->y + rect->x) * bytes_per_pixel;
- size_t size = ((rect->h - 1) * img->width + rect->w) * bytes_per_pixel;
- (*dma_barrier_fp) ((void *) start, size);
-*/
- panic("waaaaah");
-}
-
-static inline void msm_dma_nc_pre(void)
-{
- dmb();
-}
-static inline void msm_dma_wt_pre(void)
-{
- dmb();
-}
-static inline void msm_dma_todevice_wb_pre(void *start, size_t size)
-{
- #warning this
-// dma_cache_pre_ops(start, size, DMA_TO_DEVICE);
-}
-
-static inline void msm_dma_fromdevice_wb_pre(void *start, size_t size)
-{
- #warning this
-// dma_cache_pre_ops(start, size, DMA_FROM_DEVICE);
-}
-
-static inline void msm_dma_nc_post(void)
-{
- dmb();
-}
-
-static inline void msm_dma_fromdevice_wt_post(void *start, size_t size)
-{
- #warning this
-// dma_cache_post_ops(start, size, DMA_FROM_DEVICE);
-}
-
-static inline void msm_dma_todevice_wb_post(void *start, size_t size)
-{
- #warning this
-// dma_cache_post_ops(start, size, DMA_TO_DEVICE);
-}
-
-static inline void msm_dma_fromdevice_wb_post(void *start, size_t size)
-{
- #warning this
-// dma_cache_post_ops(start, size, DMA_FROM_DEVICE);
-}
-
-/*
- * Do the write barriers required to guarantee data is committed to RAM
- * (from CPU cache or internal buffers) before a DMA operation starts.
- * NOTE: As currently implemented, the data between
- * the end of one row and the start of the next is
- * included in the address range rather than
- * doing multiple calls for each row.
-*/
-static void msm_fb_ensure_memory_coherency_before_dma(struct fb_info *info,
- struct mdp_blit_req *req_list,
- int req_list_count)
-{
-#ifdef CONFIG_ARCH_QSD8X50
- int i;
-
- /*
- * Normally, do the requested barriers for each address
- * range that corresponds to a rectangle.
- *
- * But if at least one write barrier is requested for data
- * going to or from the device but no address range is
- * needed for that barrier, then do the barrier, but do it
- * only once, no matter how many requests there are.
- */
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- switch (mfd->mdp_fb_page_protection) {
- default:
- case MDP_FB_PAGE_PROTECTION_NONCACHED:
- case MDP_FB_PAGE_PROTECTION_WRITECOMBINE:
- /*
- * The following barrier is only done at most once,
- * since further calls would be redundant.
- */
- for (i = 0; i < req_list_count; i++) {
- if (!(req_list[i].flags
- & MDP_NO_DMA_BARRIER_START)) {
- msm_dma_nc_pre();
- break;
- }
- }
- break;
-
- case MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE:
- /*
- * The following barrier is only done at most once,
- * since further calls would be redundant.
- */
- for (i = 0; i < req_list_count; i++) {
- if (!(req_list[i].flags
- & MDP_NO_DMA_BARRIER_START)) {
- msm_dma_wt_pre();
- break;
- }
- }
- break;
-
- case MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE:
- case MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE:
- for (i = 0; i < req_list_count; i++) {
- if (!(req_list[i].flags &
- MDP_NO_DMA_BARRIER_START)) {
-
- msm_fb_dma_barrier_for_rect(info,
- &(req_list[i].src),
- &(req_list[i].src_rect),
- msm_dma_todevice_wb_pre
- );
-
- msm_fb_dma_barrier_for_rect(info,
- &(req_list[i].dst),
- &(req_list[i].dst_rect),
- msm_dma_todevice_wb_pre
- );
- }
- }
- break;
- }
-#else
- dmb();
-#endif
-}
-
-
-/*
- * Do the write barriers required to guarantee data will be re-read from RAM by
- * the CPU after a DMA operation ends.
- * NOTE: As currently implemented, the data between
- * the end of one row and the start of the next is
- * included in the address range rather than
- * doing multiple calls for each row.
-*/
-static void msm_fb_ensure_memory_coherency_after_dma(struct fb_info *info,
- struct mdp_blit_req *req_list,
- int req_list_count)
-{
-#ifdef CONFIG_ARCH_QSD8X50
- int i;
-
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- switch (mfd->mdp_fb_page_protection) {
- default:
- case MDP_FB_PAGE_PROTECTION_NONCACHED:
- case MDP_FB_PAGE_PROTECTION_WRITECOMBINE:
- /*
- * The following barrier is only done at most once,
- * since further calls would be redundant.
- */
- for (i = 0; i < req_list_count; i++) {
- if (!(req_list[i].flags
- & MDP_NO_DMA_BARRIER_END)) {
- msm_dma_nc_post();
- break;
- }
- }
- break;
-
- case MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE:
- for (i = 0; i < req_list_count; i++) {
- if (!(req_list[i].flags &
- MDP_NO_DMA_BARRIER_END)) {
-
- msm_fb_dma_barrier_for_rect(info,
- &(req_list[i].dst),
- &(req_list[i].dst_rect),
- msm_dma_fromdevice_wt_post
- );
- }
- }
- break;
- case MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE:
- case MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE:
- for (i = 0; i < req_list_count; i++) {
- if (!(req_list[i].flags &
- MDP_NO_DMA_BARRIER_END)) {
-
- msm_fb_dma_barrier_for_rect(info,
- &(req_list[i].dst),
- &(req_list[i].dst_rect),
- msm_dma_fromdevice_wb_post
- );
- }
- }
- break;
- }
-#else
- dmb();
-#endif
-}
-
-#ifdef CONFIG_MDP_PPP_ASYNC_OP
-void msm_fb_ensure_mem_coherency_after_dma(struct fb_info *info,
- struct mdp_blit_req *req_list, int req_list_count)
-{
- BUG_ON(!info);
-
- /*
- * Ensure that CPU cache and other internal CPU state is
- * updated to reflect any change in memory modified by MDP blit
- * DMA.
- */
- msm_fb_ensure_memory_coherency_after_dma(info,
- req_list, req_list_count);
-}
-
-static int msmfb_async_blit(struct fb_info *info, void __user *p)
-{
- /*
- * CAUTION: The names of the struct types intentionally *DON'T* match
- * the names of the variables declared -- they appear to be swapped.
- * Read the code carefully and you should see that the variable names
- * make sense.
- */
- const int MAX_LIST_WINDOW = 16;
- struct mdp_blit_req req_list[MAX_LIST_WINDOW];
- struct mdp_blit_req_list req_list_header;
-
- int count, i, req_list_count;
-
- /* Get the count size for the total BLIT request. */
- if (copy_from_user(&req_list_header, p, sizeof(req_list_header)))
- return -EFAULT;
- p += sizeof(req_list_header);
- count = req_list_header.count;
- while (count > 0) {
- /*
- * Access the requests through a narrow window to decrease copy
- * overhead and make larger requests accessible to the
- * coherency management code.
- * NOTE: The window size is intended to be larger than the
- * typical request size, but not require more than 2
- * kbytes of stack storage.
- */
- req_list_count = count;
- if (req_list_count > MAX_LIST_WINDOW)
- req_list_count = MAX_LIST_WINDOW;
- if (copy_from_user(&req_list, p,
- sizeof(struct mdp_blit_req)*req_list_count))
- return -EFAULT;
-
- /*
- * Ensure that any data CPU may have previously written to
- * internal state (but not yet committed to memory) is
- * guaranteed to be committed to memory now.
- */
- msm_fb_ensure_memory_coherency_before_dma(info,
- req_list, req_list_count);
-
- /*
- * Do the blit DMA, if required -- returning early only if
- * there is a failure.
- */
- for (i = 0; i < req_list_count; i++) {
- if (!(req_list[i].flags & MDP_NO_BLIT)) {
- int ret = 0;
- struct mdp_ppp_djob *job = NULL;
-
- if (unlikely(req_list[i].src_rect.h == 0 ||
- req_list[i].src_rect.w == 0)) {
- MSM_FB_ERR("mpd_ppp: "
- "src img of zero size!\n");
- return -EINVAL;
- }
-
- if (unlikely(req_list[i].dst_rect.h == 0 ||
- req_list[i].dst_rect.w == 0))
- continue;
-
- /* create a new display job */
- job = mdp_ppp_new_djob();
- if (unlikely(!job))
- return -ENOMEM;
-
- job->info = info;
- memcpy(&job->req, &req_list[i],
- sizeof(struct mdp_blit_req));
-
- /* Do the actual blit. */
- ret = mdp_ppp_blit(info, &job->req,
- &job->p_src_file, &job->p_dst_file);
-
- /*
- * Note that early returns don't guarantee
- * memory coherency.
- */
- if (ret || mdp_ppp_get_ret_code()) {
- mdp_ppp_clear_curr_djob();
- return ret;
- }
- }
- }
-
- /* Go to next window of requests. */
- count -= req_list_count;
- p += sizeof(struct mdp_blit_req)*req_list_count;
- }
- return 0;
-}
-#else
-
-/*
- * NOTE: The userspace issues blit operations in a sequence, the sequence
- * start with a operation marked START and ends in an operation marked
- * END. It is guaranteed by the userspace that all the blit operations
- * between START and END are only within the regions of areas designated
- * by the START and END operations and that the userspace doesn't modify
- * those areas. Hence it would be enough to perform barrier/cache operations
- * only on the START and END operations.
- */
-static int msmfb_blit(struct fb_info *info, void __user *p)
-{
- /*
- * CAUTION: The names of the struct types intentionally *DON'T* match
- * the names of the variables declared -- they appear to be swapped.
- * Read the code carefully and you should see that the variable names
- * make sense.
- */
- const int MAX_LIST_WINDOW = 16;
- struct mdp_blit_req req_list[MAX_LIST_WINDOW];
- struct mdp_blit_req_list req_list_header;
-
- int count, i, req_list_count;
-
- /* Get the count size for the total BLIT request. */
- if (copy_from_user(&req_list_header, p, sizeof(req_list_header)))
- return -EFAULT;
- p += sizeof(req_list_header);
- count = req_list_header.count;
- while (count > 0) {
- /*
- * Access the requests through a narrow window to decrease copy
- * overhead and make larger requests accessible to the
- * coherency management code.
- * NOTE: The window size is intended to be larger than the
- * typical request size, but not require more than 2
- * kbytes of stack storage.
- */
- req_list_count = count;
- if (req_list_count > MAX_LIST_WINDOW)
- req_list_count = MAX_LIST_WINDOW;
- if (copy_from_user(&req_list, p,
- sizeof(struct mdp_blit_req)*req_list_count))
- return -EFAULT;
-
- /*
- * Ensure that any data CPU may have previously written to
- * internal state (but not yet committed to memory) is
- * guaranteed to be committed to memory now.
- */
- msm_fb_ensure_memory_coherency_before_dma(info,
- req_list, req_list_count);
-
- /*
- * Do the blit DMA, if required -- returning early only if
- * there is a failure.
- */
- for (i = 0; i < req_list_count; i++) {
- if (!(req_list[i].flags & MDP_NO_BLIT)) {
- /* Do the actual blit. */
- int ret = mdp_blit(info, &(req_list[i]));
-
- /*
- * Note that early returns don't guarantee
- * memory coherency.
- */
- if (ret)
- return ret;
- }
- }
-
- /*
- * Ensure that CPU cache and other internal CPU state is
- * updated to reflect any change in memory modified by MDP blit
- * DMA.
- */
- msm_fb_ensure_memory_coherency_after_dma(info,
- req_list,
- req_list_count);
-
- /* Go to next window of requests. */
- count -= req_list_count;
- p += sizeof(struct mdp_blit_req)*req_list_count;
- }
- return 0;
-}
-#endif
-
-#ifdef CONFIG_FB_MSM_OVERLAY
-static int msmfb_overlay_get(struct fb_info *info, void __user *p)
-{
- struct mdp_overlay req;
- int ret;
-
- if (copy_from_user(&req, p, sizeof(req)))
- return -EFAULT;
-
- ret = mdp4_overlay_get(info, &req);
- if (ret) {
- printk(KERN_ERR "%s: ioctl failed \n",
- __func__);
- return ret;
- }
- if (copy_to_user(p, &req, sizeof(req))) {
- printk(KERN_ERR "%s: copy2user failed \n",
- __func__);
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int msmfb_overlay_set(struct fb_info *info, void __user *p)
-{
- struct mdp_overlay req;
- int ret;
-
- if (copy_from_user(&req, p, sizeof(req)))
- return -EFAULT;
-
- ret = mdp4_overlay_set(info, &req);
- if (ret) {
- printk(KERN_ERR "%s:ioctl failed \n",
- __func__);
- return ret;
- }
-
- if (copy_to_user(p, &req, sizeof(req))) {
- printk(KERN_ERR "%s: copy2user failed \n",
- __func__);
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int msmfb_overlay_unset(struct fb_info *info, unsigned long *argp)
-{
- int ret, ndx;
-
- ret = copy_from_user(&ndx, argp, sizeof(ndx));
- if (ret) {
- printk(KERN_ERR "%s:msmfb_overlay_unset ioctl failed \n",
- __func__);
- return ret;
- }
-
- return mdp4_overlay_unset(info, ndx);
-}
-
-static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp)
-{
- int ret;
- struct msmfb_overlay_data req;
- struct file *p_src_file = 0;
-
- ret = copy_from_user(&req, argp, sizeof(req));
- if (ret) {
- printk(KERN_ERR "%s:msmfb_overlay_play ioctl failed \n",
- __func__);
- return ret;
- }
-
- ret = mdp4_overlay_play(info, &req, &p_src_file);
-
- if (p_src_file)
- put_pmem_file(p_src_file);
-
- return ret;
-}
-
-#endif
-
-DEFINE_SEMAPHORE(msm_fb_ioctl_ppp_sem);
-DEFINE_MUTEX(msm_fb_ioctl_lut_sem);
-DEFINE_MUTEX(msm_fb_ioctl_hist_sem);
-
-/* Set color conversion matrix from user space */
-
-#ifndef CONFIG_FB_MSM_MDP40
-static void msmfb_set_color_conv(struct mdp_ccs *p)
-{
- int i;
-
- if (p->direction == MDP_CCS_RGB2YUV) {
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- /* RGB->YUV primary forward matrix */
- for (i = 0; i < MDP_CCS_SIZE; i++)
- writel(p->ccs[i], MDP_CSC_PFMVn(i));
-
- #ifdef CONFIG_FB_MSM_MDP31
- for (i = 0; i < MDP_BV_SIZE; i++)
- writel(p->bv[i], MDP_CSC_POST_BV2n(i));
- #endif
-
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- } else {
- /* MDP cmd block enable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
- /* YUV->RGB primary reverse matrix */
- for (i = 0; i < MDP_CCS_SIZE; i++)
- writel(p->ccs[i], MDP_CSC_PRMVn(i));
- for (i = 0; i < MDP_BV_SIZE; i++)
- writel(p->bv[i], MDP_CSC_PRE_BV1n(i));
-
- /* MDP cmd block disable */
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
- }
-}
-#endif
-
-
-static int msm_fb_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
-{
- struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
- void __user *argp = (void __user *)arg;
- struct fb_cursor cursor;
- struct fb_cmap cmap;
- struct mdp_histogram hist;
-#ifndef CONFIG_FB_MSM_MDP40
- struct mdp_ccs ccs_matrix;
-#endif
- struct mdp_page_protection fb_page_protection;
- int ret = 0;
-
- if (!mfd->op_enable)
- return -EPERM;
-
- switch (cmd) {
-#ifdef CONFIG_FB_MSM_OVERLAY
- case MSMFB_OVERLAY_GET:
- down(&msm_fb_ioctl_ppp_sem);
- ret = msmfb_overlay_get(info, argp);
- up(&msm_fb_ioctl_ppp_sem);
- break;
- case MSMFB_OVERLAY_SET:
- down(&msm_fb_ioctl_ppp_sem);
- ret = msmfb_overlay_set(info, argp);
- up(&msm_fb_ioctl_ppp_sem);
- break;
- case MSMFB_OVERLAY_UNSET:
- down(&msm_fb_ioctl_ppp_sem);
- ret = msmfb_overlay_unset(info, argp);
- up(&msm_fb_ioctl_ppp_sem);
- break;
- case MSMFB_OVERLAY_PLAY:
- down(&msm_fb_ioctl_ppp_sem);
- ret = msmfb_overlay_play(info, argp);
- up(&msm_fb_ioctl_ppp_sem);
- break;
-#endif
- case MSMFB_BLIT:
- down(&msm_fb_ioctl_ppp_sem);
-#ifdef CONFIG_MDP_PPP_ASYNC_OP
- ret = msmfb_async_blit(info, argp);
- mdp_ppp_wait(); /* Wait for all blits to be finished. */
-#else
- ret = msmfb_blit(info, argp);
-#endif
- up(&msm_fb_ioctl_ppp_sem);
-
- break;
-
- /* Ioctl for setting ccs matrix from user space */
- case MSMFB_SET_CCS_MATRIX:
-#ifndef CONFIG_FB_MSM_MDP40
- ret = copy_from_user(&ccs_matrix, argp, sizeof(ccs_matrix));
- if (ret) {
- printk(KERN_ERR
- "%s:MSMFB_SET_CCS_MATRIX ioctl failed \n",
- __func__);
- return ret;
- }
-
- down(&msm_fb_ioctl_ppp_sem);
- if (ccs_matrix.direction == MDP_CCS_RGB2YUV)
- mdp_ccs_rgb2yuv = ccs_matrix;
- else
- mdp_ccs_yuv2rgb = ccs_matrix;
-
- msmfb_set_color_conv(&ccs_matrix) ;
- up(&msm_fb_ioctl_ppp_sem);
-#else
- ret = -EINVAL;
-#endif
-
- break;
-
- /* Ioctl for getting ccs matrix to user space */
- case MSMFB_GET_CCS_MATRIX:
-#ifndef CONFIG_FB_MSM_MDP40
- ret = copy_from_user(&ccs_matrix, argp, sizeof(ccs_matrix)) ;
- if (ret) {
- printk(KERN_ERR
- "%s:MSMFB_GET_CCS_MATRIX ioctl failed \n",
- __func__);
- return ret;
- }
-
- down(&msm_fb_ioctl_ppp_sem);
- if (ccs_matrix.direction == MDP_CCS_RGB2YUV)
- ccs_matrix = mdp_ccs_rgb2yuv;
- else
- ccs_matrix = mdp_ccs_yuv2rgb;
-
- ret = copy_to_user(argp, &ccs_matrix, sizeof(ccs_matrix));
-
- if (ret) {
- printk(KERN_ERR
- "%s:MSMFB_GET_CCS_MATRIX ioctl failed \n",
- __func__);
- return ret ;
- }
- up(&msm_fb_ioctl_ppp_sem);
-#else
- ret = -EINVAL;
-#endif
-
- break;
-
-#ifdef CONFIG_MDP_PPP_ASYNC_OP
- case MSMFB_ASYNC_BLIT:
- down(&msm_fb_ioctl_ppp_sem);
- ret = msmfb_async_blit(info, argp);
- up(&msm_fb_ioctl_ppp_sem);
- break;
-
- case MSMFB_BLIT_FLUSH:
- down(&msm_fb_ioctl_ppp_sem);
- mdp_ppp_wait();
- up(&msm_fb_ioctl_ppp_sem);
- break;
-#endif
-
- case MSMFB_GRP_DISP:
-#ifdef CONFIG_FB_MSM_MDP22
- {
- unsigned long grp_id;
-
- ret = copy_from_user(&grp_id, argp, sizeof(grp_id));
- if (ret)
- return ret;
-
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
- writel(grp_id, MDP_FULL_BYPASS_WORD43);
- mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF,
- FALSE);
- break;
- }
-#else
- return -EFAULT;
-#endif
- case MSMFB_SUSPEND_SW_REFRESHER:
- if (!mfd->panel_power_on)
- return -EPERM;
-
- mfd->sw_refreshing_enable = FALSE;
- ret = msm_fb_stop_sw_refresher(mfd);
- break;
-
- case MSMFB_RESUME_SW_REFRESHER:
- if (!mfd->panel_power_on)
- return -EPERM;
-
- mfd->sw_refreshing_enable = TRUE;
- ret = msm_fb_resume_sw_refresher(mfd);
- break;
-
- case MSMFB_CURSOR:
- ret = copy_from_user(&cursor, argp, sizeof(cursor));
- if (ret)
- return ret;
-
- ret = msm_fb_cursor(info, &cursor);
- break;
-
- case MSMFB_SET_LUT:
- ret = copy_from_user(&cmap, argp, sizeof(cmap));
- if (ret)
- return ret;
-
- mutex_lock(&msm_fb_ioctl_lut_sem);
- ret = msm_fb_set_lut(&cmap, info);
- mutex_unlock(&msm_fb_ioctl_lut_sem);
- break;
-
- case MSMFB_HISTOGRAM:
- if (!mfd->do_histogram)
- return -ENODEV;
-
- ret = copy_from_user(&hist, argp, sizeof(hist));
- if (ret)
- return ret;
-
- mutex_lock(&msm_fb_ioctl_hist_sem);
- ret = mfd->do_histogram(info, &hist);
- mutex_unlock(&msm_fb_ioctl_hist_sem);
- break;
-
- case MSMFB_GET_PAGE_PROTECTION:
- fb_page_protection.page_protection
- = mfd->mdp_fb_page_protection;
- ret = copy_to_user(argp, &fb_page_protection,
- sizeof(fb_page_protection));
- if (ret)
- return ret;
- break;
-
- case MSMFB_SET_PAGE_PROTECTION:
-#ifdef CONFIG_ARCH_QSD8X50
- ret = copy_from_user(&fb_page_protection, argp,
- sizeof(fb_page_protection));
- if (ret)
- return ret;
-
- /* Validate the proposed page protection settings. */
- switch (fb_page_protection.page_protection) {
- case MDP_FB_PAGE_PROTECTION_NONCACHED:
- case MDP_FB_PAGE_PROTECTION_WRITECOMBINE:
- case MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE:
- /* Write-back cache (read allocate) */
- case MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE:
- /* Write-back cache (write allocate) */
- case MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE:
- mfd->mdp_fb_page_protection =
- fb_page_protection.page_protection;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-#else
- /*
- * Don't allow caching until 7k DMA cache operations are
- * available.
- */
- ret = -EINVAL;
-#endif
- break;
-
- default:
- MSM_FB_INFO("MDP: unknown ioctl (cmd=%d) received!\n", cmd);
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int msm_fb_register_driver(void)
-{
- return platform_driver_register(&msm_fb_driver);
-}
-
-void msm_fb_add_device(struct platform_device *pdev)
-{
- struct msm_fb_panel_data *pdata;
- struct platform_device *this_dev = NULL;
- struct fb_info *fbi;
- struct msm_fb_data_type *mfd = NULL;
- u32 type, id, fb_num;
-
- if (!pdev)
- return;
- id = pdev->id;
-
- pdata = pdev->dev.platform_data;
- if (!pdata)
- return;
- type = pdata->panel_info.type;
- fb_num = pdata->panel_info.fb_num;
-
- if (fb_num <= 0)
- return;
-
- if (fbi_list_index >= MAX_FBI_LIST) {
- printk(KERN_ERR "msm_fb: no more framebuffer info list!\n");
- return;
- }
- /*
- * alloc panel device data
- */
- this_dev = msm_fb_device_alloc(pdata, type, id);
-
- if (!this_dev) {
- printk(KERN_ERR
- "%s: msm_fb_device_alloc failed!\n", __func__);
- return;
- }
-
- /*
- * alloc framebuffer info + par data
- */
- fbi = framebuffer_alloc(sizeof(struct msm_fb_data_type), NULL);
- if (fbi == NULL) {
- platform_device_put(this_dev);
- printk(KERN_ERR "msm_fb: can't alloca framebuffer info data!\n");
- return;
- }
-
- mfd = (struct msm_fb_data_type *)fbi->par;
- mfd->key = MFD_KEY;
- mfd->fbi = fbi;
- mfd->panel.type = type;
- mfd->panel.id = id;
- mfd->fb_page = fb_num;
- mfd->index = fbi_list_index;
- mfd->mdp_fb_page_protection = MDP_FB_PAGE_PROTECTION_WRITECOMBINE;
-
- /* link to the latest pdev */
- mfd->pdev = this_dev;
-
- mfd_list[mfd_list_index++] = mfd;
- fbi_list[fbi_list_index++] = fbi;
-
- /*
- * set driver data
- */
- platform_set_drvdata(this_dev, mfd);
-
- if (platform_device_add(this_dev)) {
- printk(KERN_ERR "msm_fb: platform_device_add failed!\n");
- platform_device_put(this_dev);
- framebuffer_release(fbi);
- fbi_list_index--;
- return;
- }
-}
-EXPORT_SYMBOL(msm_fb_add_device);
-
-int __init msm_fb_init(void)
-{
- int rc = -ENODEV;
-
- if (msm_fb_register_driver())
- return rc;
-
-#ifdef MSM_FB_ENABLE_DBGFS
- {
- struct dentry *root;
-
- if ((root = msm_fb_get_debugfs_root()) != NULL) {
- msm_fb_debugfs_file_create(root,
- "msm_fb_msg_printing_level",
- (u32 *) &msm_fb_msg_level);
- msm_fb_debugfs_file_create(root,
- "mddi_msg_printing_level",
- (u32 *) &mddi_msg_level);
- msm_fb_debugfs_file_create(root, "msm_fb_debug_enabled",
- (u32 *) &msm_fb_debug_enabled);
- }
- }
-#endif
-
- return 0;
-}
-
-module_init(msm_fb_init);
diff --git a/drivers/staging/msm/msm_fb.h b/drivers/staging/msm/msm_fb.h
deleted file mode 100644
index 4bca6d243f1..00000000000
--- a/drivers/staging/msm/msm_fb.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef MSM_FB_H
-#define MSM_FB_H
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include "linux/proc_fs.h"
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-#include <mach/board.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <mach/memory.h>
-#include <linux/semaphore.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/hrtimer.h>
-
-#include <linux/fb.h>
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
-#include <linux/earlysuspend.h>
-#endif
-
-#include "msm_fb_panel.h"
-#include "mdp.h"
-
-#define MSM_FB_DEFAULT_PAGE_SIZE 2
-#define MFD_KEY 0x11161126
-#define MSM_FB_MAX_DEV_LIST 32
-
-struct disp_info_type_suspend {
- boolean op_enable;
- boolean sw_refreshing_enable;
- boolean panel_power_on;
-};
-
-struct msm_fb_data_type {
- __u32 key;
- __u32 index;
- __u32 ref_cnt;
- __u32 fb_page;
-
- panel_id_type panel;
- struct msm_panel_info panel_info;
-
- DISP_TARGET dest;
- struct fb_info *fbi;
-
- boolean op_enable;
- uint32 fb_imgType;
- boolean sw_currently_refreshing;
- boolean sw_refreshing_enable;
- boolean hw_refresh;
-
- MDPIBUF ibuf;
- boolean ibuf_flushed;
- struct timer_list refresh_timer;
- struct completion refresher_comp;
-
- boolean pan_waiting;
- struct completion pan_comp;
-
- /* vsync */
- boolean use_mdp_vsync;
- __u32 vsync_gpio;
- __u32 total_lcd_lines;
- __u32 total_porch_lines;
- __u32 lcd_ref_usec_time;
- __u32 refresh_timer_duration;
-
- struct hrtimer dma_hrtimer;
-
- boolean panel_power_on;
- struct work_struct dma_update_worker;
- struct semaphore sem;
-
- struct timer_list vsync_resync_timer;
- boolean vsync_handler_pending;
- struct work_struct vsync_resync_worker;
-
- ktime_t last_vsync_timetick;
-
- __u32 *vsync_width_boundary;
-
- unsigned int pmem_id;
- struct disp_info_type_suspend suspend;
-
- __u32 channel_irq;
-
- struct mdp_dma_data *dma;
- void (*dma_fnc) (struct msm_fb_data_type *mfd);
- int (*cursor_update) (struct fb_info *info,
- struct fb_cursor *cursor);
- int (*lut_update) (struct fb_info *info,
- struct fb_cmap *cmap);
- int (*do_histogram) (struct fb_info *info,
- struct mdp_histogram *hist);
- void *cursor_buf;
- void *cursor_buf_phys;
-
- void *cmd_port;
- void *data_port;
- void *data_port_phys;
-
- __u32 bl_level;
-
- struct platform_device *pdev;
-
- __u32 var_xres;
- __u32 var_yres;
- __u32 var_pixclock;
-
-#ifdef MSM_FB_ENABLE_DBGFS
- struct dentry *sub_dir;
-#endif
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
- struct early_suspend early_suspend;
- struct early_suspend mddi_early_suspend;
- struct early_suspend mddi_ext_early_suspend;
-#endif
- u32 mdp_fb_page_protection;
- int allow_set_offset;
-};
-
-struct dentry *msm_fb_get_debugfs_root(void);
-void msm_fb_debugfs_file_create(struct dentry *root, const char *name,
- u32 *var);
-void msm_fb_set_backlight(struct msm_fb_data_type *mfd, __u32 bkl_lvl,
- u32 save);
-
-void msm_fb_add_device(struct platform_device *pdev);
-
-int msm_fb_detect_client(const char *name);
-
-#ifdef CONFIG_FB_BACKLIGHT
-void msm_fb_config_backlight(struct msm_fb_data_type *mfd);
-#endif
-
-#endif /* MSM_FB_H */
diff --git a/drivers/staging/msm/msm_fb_bl.c b/drivers/staging/msm/msm_fb_bl.c
deleted file mode 100644
index 2a8077511fc..00000000000
--- a/drivers/staging/msm/msm_fb_bl.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <linux/backlight.h>
-
-#include "msm_fb.h"
-
-static int msm_fb_bl_get_brightness(struct backlight_device *pbd)
-{
- return pbd->props.brightness;
-}
-
-static int msm_fb_bl_update_status(struct backlight_device *pbd)
-{
- struct msm_fb_data_type *mfd = bl_get_data(pbd);
- __u32 bl_lvl;
-
- bl_lvl = pbd->props.brightness;
- bl_lvl = mfd->fbi->bl_curve[bl_lvl];
- msm_fb_set_backlight(mfd, bl_lvl, 1);
- return 0;
-}
-
-static const struct backlight_ops msm_fb_bl_ops = {
- .get_brightness = msm_fb_bl_get_brightness,
- .update_status = msm_fb_bl_update_status,
-};
-
-void msm_fb_config_backlight(struct msm_fb_data_type *mfd)
-{
- struct msm_fb_panel_data *pdata;
- struct backlight_device *pbd;
- struct fb_info *fbi;
- char name[16];
-
- fbi = mfd->fbi;
- pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
-
- if ((pdata) && (pdata->set_backlight)) {
- snprintf(name, sizeof(name), "msmfb_bl%d", mfd->index);
- pbd =
- backlight_device_register(name, fbi->dev, mfd,
- &msm_fb_bl_ops);
- if (!IS_ERR(pbd)) {
- fbi->bl_dev = pbd;
- fb_bl_default_curve(fbi,
- 0,
- mfd->panel_info.bl_min,
- mfd->panel_info.bl_max);
- pbd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
- pbd->props.brightness = FB_BACKLIGHT_LEVELS - 1;
- backlight_update_status(pbd);
- } else {
- fbi->bl_dev = NULL;
- printk(KERN_ERR "msm_fb: backlight_device_register failed!\n");
- }
- }
-}
diff --git a/drivers/staging/msm/msm_fb_def.h b/drivers/staging/msm/msm_fb_def.h
deleted file mode 100644
index bc7f2562cc0..00000000000
--- a/drivers/staging/msm/msm_fb_def.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef MSM_FB_DEF_H
-#define MSM_FB_DEF_H
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include "msm_mdp.h"
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/uaccess.h>
-#include <linux/workqueue.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <linux/proc_fs.h>
-#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-#include <linux/console.h>
-
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/interrupt.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/platform_device.h>
-
-typedef s64 int64;
-typedef s32 int32;
-typedef s16 int16;
-typedef s8 int8;
-
-typedef u64 uint64;
-typedef u32 uint32;
-typedef u16 uint16;
-typedef u8 uint8;
-
-typedef s32 int4;
-typedef s16 int2;
-typedef s8 int1;
-
-typedef u32 uint4;
-typedef u16 uint2;
-typedef u8 uint1;
-
-typedef u32 dword;
-typedef u16 word;
-typedef u8 byte;
-
-typedef unsigned int boolean;
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-#define MSM_FB_ENABLE_DBGFS
-#define FEATURE_MDDI
-
-#define outp32(addr, val) writel(val, addr)
-#define outp16(addr, val) writew(val, addr)
-#define outp8(addr, val) writeb(val, addr)
-#define outp(addr, val) outp32(addr, val)
-
-#ifndef MAX
-#define MAX( x, y ) (((x) > (y)) ? (x) : (y))
-#endif
-
-#ifndef MIN
-#define MIN( x, y ) (((x) < (y)) ? (x) : (y))
-#endif
-
-/*--------------------------------------------------------------------------*/
-
-#define inp32(addr) readl(addr)
-#define inp16(addr) readw(addr)
-#define inp8(addr) readb(addr)
-#define inp(addr) inp32(addr)
-
-#define inpw(port) readw(port)
-#define outpw(port, val) writew(val, port)
-#define inpdw(port) readl(port)
-#define outpdw(port, val) writel(val, port)
-
-
-#define clk_busy_wait(x) msleep_interruptible((x)/1000)
-
-#define memory_barrier()
-
-#define assert(expr) \
- if(!(expr)) { \
- printk(KERN_ERR "msm_fb: assertion failed! %s,%s,%s,line=%d\n",\
- #expr, __FILE__, __func__, __LINE__); \
- }
-
-#define ASSERT(x) assert(x)
-
-#define DISP_EBI2_LOCAL_DEFINE
-#ifdef DISP_EBI2_LOCAL_DEFINE
-#define LCD_PRIM_BASE_PHYS 0x98000000
-#define LCD_SECD_BASE_PHYS 0x9c000000
-#define EBI2_PRIM_LCD_RS_PIN 0x20000
-#define EBI2_SECD_LCD_RS_PIN 0x20000
-
-#define EBI2_PRIM_LCD_CLR 0xC0
-#define EBI2_PRIM_LCD_SEL 0x40
-
-#define EBI2_SECD_LCD_CLR 0x300
-#define EBI2_SECD_LCD_SEL 0x100
-#endif
-
-extern u32 msm_fb_msg_level;
-
-/*
- * Message printing priorities:
- * LEVEL 0 KERN_EMERG (highest priority)
- * LEVEL 1 KERN_ALERT
- * LEVEL 2 KERN_CRIT
- * LEVEL 3 KERN_ERR
- * LEVEL 4 KERN_WARNING
- * LEVEL 5 KERN_NOTICE
- * LEVEL 6 KERN_INFO
- * LEVEL 7 KERN_DEBUG (Lowest priority)
- */
-#define MSM_FB_EMERG(msg, ...) \
- if (msm_fb_msg_level > 0) \
- printk(KERN_EMERG msg, ## __VA_ARGS__);
-#define MSM_FB_ALERT(msg, ...) \
- if (msm_fb_msg_level > 1) \
- printk(KERN_ALERT msg, ## __VA_ARGS__);
-#define MSM_FB_CRIT(msg, ...) \
- if (msm_fb_msg_level > 2) \
- printk(KERN_CRIT msg, ## __VA_ARGS__);
-#define MSM_FB_ERR(msg, ...) \
- if (msm_fb_msg_level > 3) \
- printk(KERN_ERR msg, ## __VA_ARGS__);
-#define MSM_FB_WARNING(msg, ...) \
- if (msm_fb_msg_level > 4) \
- printk(KERN_WARNING msg, ## __VA_ARGS__);
-#define MSM_FB_NOTICE(msg, ...) \
- if (msm_fb_msg_level > 5) \
- printk(KERN_NOTICE msg, ## __VA_ARGS__);
-#define MSM_FB_INFO(msg, ...) \
- if (msm_fb_msg_level > 6) \
- printk(KERN_INFO msg, ## __VA_ARGS__);
-#define MSM_FB_DEBUG(msg, ...) \
- if (msm_fb_msg_level > 7) \
- printk(KERN_DEBUG msg, ## __VA_ARGS__);
-
-#ifdef MSM_FB_C
-unsigned char *msm_mdp_base;
-unsigned char *msm_pmdh_base;
-unsigned char *msm_emdh_base;
-#else
-extern unsigned char *msm_mdp_base;
-extern unsigned char *msm_pmdh_base;
-extern unsigned char *msm_emdh_base;
-#endif
-
-#endif /* MSM_FB_DEF_H */
diff --git a/drivers/staging/msm/msm_fb_panel.c b/drivers/staging/msm/msm_fb_panel.c
deleted file mode 100644
index b17a239a1bc..00000000000
--- a/drivers/staging/msm/msm_fb_panel.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/uaccess.h>
-#include <linux/workqueue.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <linux/proc_fs.h>
-#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-
-#include "msm_fb_panel.h"
-
-int panel_next_on(struct platform_device *pdev)
-{
- int ret = 0;
- struct msm_fb_panel_data *pdata;
- struct msm_fb_panel_data *next_pdata;
- struct platform_device *next_pdev;
-
- pdata = (struct msm_fb_panel_data *)pdev->dev.platform_data;
-
- if (pdata) {
- next_pdev = pdata->next;
- if (next_pdev) {
- next_pdata =
- (struct msm_fb_panel_data *)next_pdev->dev.
- platform_data;
- if ((next_pdata) && (next_pdata->on))
- ret = next_pdata->on(next_pdev);
- }
- }
-
- return ret;
-}
-
-int panel_next_off(struct platform_device *pdev)
-{
- int ret = 0;
- struct msm_fb_panel_data *pdata;
- struct msm_fb_panel_data *next_pdata;
- struct platform_device *next_pdev;
-
- pdata = (struct msm_fb_panel_data *)pdev->dev.platform_data;
-
- if (pdata) {
- next_pdev = pdata->next;
- if (next_pdev) {
- next_pdata =
- (struct msm_fb_panel_data *)next_pdev->dev.
- platform_data;
- if ((next_pdata) && (next_pdata->on))
- ret = next_pdata->off(next_pdev);
- }
- }
-
- return ret;
-}
-
-struct platform_device *msm_fb_device_alloc(struct msm_fb_panel_data *pdata,
- u32 type, u32 id)
-{
- struct platform_device *this_dev = NULL;
- char dev_name[16];
-
- switch (type) {
- case EBI2_PANEL:
- snprintf(dev_name, sizeof(dev_name), "ebi2_lcd");
- break;
-
- case MDDI_PANEL:
- snprintf(dev_name, sizeof(dev_name), "mddi");
- break;
-
- case EXT_MDDI_PANEL:
- snprintf(dev_name, sizeof(dev_name), "mddi_ext");
- break;
-
- case TV_PANEL:
- snprintf(dev_name, sizeof(dev_name), "tvenc");
- break;
-
- case HDMI_PANEL:
- case LCDC_PANEL:
- snprintf(dev_name, sizeof(dev_name), "lcdc");
- break;
-
- default:
- return NULL;
- }
-
- if (pdata != NULL)
- pdata->next = NULL;
- else
- return NULL;
-
- this_dev =
- platform_device_alloc(dev_name, ((u32) type << 16) | (u32) id);
-
- if (this_dev) {
- if (platform_device_add_data
- (this_dev, pdata, sizeof(struct msm_fb_panel_data))) {
- printk
- ("msm_fb_device_alloc: platform_device_add_data failed!\n");
- platform_device_put(this_dev);
- return NULL;
- }
- }
-
- return this_dev;
-}
diff --git a/drivers/staging/msm/msm_fb_panel.h b/drivers/staging/msm/msm_fb_panel.h
deleted file mode 100644
index 6375976f09d..00000000000
--- a/drivers/staging/msm/msm_fb_panel.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef MSM_FB_PANEL_H
-#define MSM_FB_PANEL_H
-
-#include "msm_fb_def.h"
-
-struct msm_fb_data_type;
-
-typedef void (*msm_fb_vsync_handler_type) (void *arg);
-
-/* panel id type */
-typedef struct panel_id_s {
- uint16 id;
- uint16 type;
-} panel_id_type;
-
-/* panel type list */
-#define NO_PANEL 0xffff /* No Panel */
-#define MDDI_PANEL 1 /* MDDI */
-#define EBI2_PANEL 2 /* EBI2 */
-#define LCDC_PANEL 3 /* internal LCDC type */
-#define EXT_MDDI_PANEL 4 /* Ext.MDDI */
-#define TV_PANEL 5 /* TV */
-#define HDMI_PANEL 6 /* HDMI TV */
-
-/* panel class */
-typedef enum {
- DISPLAY_LCD = 0, /* lcd = ebi2/mddi */
- DISPLAY_LCDC, /* lcdc */
- DISPLAY_TV, /* TV Out */
- DISPLAY_EXT_MDDI, /* External MDDI */
-} DISP_TARGET;
-
-/* panel device locaiton */
-typedef enum {
- DISPLAY_1 = 0, /* attached as first device */
- DISPLAY_2, /* attached on second device */
- MAX_PHYS_TARGET_NUM,
-} DISP_TARGET_PHYS;
-
-/* panel info type */
-struct lcd_panel_info {
- __u32 vsync_enable;
- __u32 refx100;
- __u32 v_back_porch;
- __u32 v_front_porch;
- __u32 v_pulse_width;
- __u32 hw_vsync_mode;
- __u32 vsync_notifier_period;
-};
-
-struct lcdc_panel_info {
- __u32 h_back_porch;
- __u32 h_front_porch;
- __u32 h_pulse_width;
- __u32 v_back_porch;
- __u32 v_front_porch;
- __u32 v_pulse_width;
- __u32 border_clr;
- __u32 underflow_clr;
- __u32 hsync_skew;
-};
-
-struct mddi_panel_info {
- __u32 vdopkt;
-};
-
-struct msm_panel_info {
- __u32 xres;
- __u32 yres;
- __u32 bpp;
- __u32 type;
- __u32 wait_cycle;
- DISP_TARGET_PHYS pdest;
- __u32 bl_max;
- __u32 bl_min;
- __u32 fb_num;
- __u32 clk_rate;
- __u32 clk_min;
- __u32 clk_max;
- __u32 frame_count;
-
- union {
- struct mddi_panel_info mddi;
- };
-
- union {
- struct lcd_panel_info lcd;
- struct lcdc_panel_info lcdc;
- };
-};
-
-struct msm_fb_panel_data {
- struct msm_panel_info panel_info;
- void (*set_rect) (int x, int y, int xres, int yres);
- void (*set_vsync_notifier) (msm_fb_vsync_handler_type, void *arg);
- void (*set_backlight) (struct msm_fb_data_type *);
-
- /* function entry chain */
- int (*on) (struct platform_device *pdev);
- int (*off) (struct platform_device *pdev);
- struct platform_device *next;
-};
-
-/*===========================================================================
- FUNCTIONS PROTOTYPES
-============================================================================*/
-struct platform_device *msm_fb_device_alloc(struct msm_fb_panel_data *pdata,
- u32 type, u32 id);
-int panel_next_on(struct platform_device *pdev);
-int panel_next_off(struct platform_device *pdev);
-
-int lcdc_device_register(struct msm_panel_info *pinfo);
-
-int mddi_toshiba_device_register(struct msm_panel_info *pinfo,
- u32 channel, u32 panel);
-
-#endif /* MSM_FB_PANEL_H */
diff --git a/drivers/staging/msm/msm_mdp.h b/drivers/staging/msm/msm_mdp.h
deleted file mode 100644
index 2d5323f5b62..00000000000
--- a/drivers/staging/msm/msm_mdp.h
+++ /dev/null
@@ -1,245 +0,0 @@
-/* include/linux/msm_mdp.h
- *
- * Copyright (C) 2007 Google Incorporated
- * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#ifndef _MSM_MDP_H_
-#define _MSM_MDP_H_
-
-#include <linux/types.h>
-#include <linux/fb.h>
-
-#define MSMFB_IOCTL_MAGIC 'm'
-#define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
-#define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
-#define MSMFB_SUSPEND_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 128, unsigned int)
-#define MSMFB_RESUME_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 129, unsigned int)
-#define MSMFB_CURSOR _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor)
-#define MSMFB_SET_LUT _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap)
-#define MSMFB_HISTOGRAM _IOWR(MSMFB_IOCTL_MAGIC, 132, struct mdp_histogram)
-/* new ioctls's for set/get ccs matrix */
-#define MSMFB_GET_CCS_MATRIX _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs)
-#define MSMFB_SET_CCS_MATRIX _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs)
-#define MSMFB_OVERLAY_SET _IOWR(MSMFB_IOCTL_MAGIC, 135, \
- struct mdp_overlay)
-#define MSMFB_OVERLAY_UNSET _IOW(MSMFB_IOCTL_MAGIC, 136, unsigned int)
-#define MSMFB_OVERLAY_PLAY _IOW(MSMFB_IOCTL_MAGIC, 137, \
- struct msmfb_overlay_data)
-#define MSMFB_GET_PAGE_PROTECTION _IOR(MSMFB_IOCTL_MAGIC, 138, \
- struct mdp_page_protection)
-#define MSMFB_SET_PAGE_PROTECTION _IOW(MSMFB_IOCTL_MAGIC, 139, \
- struct mdp_page_protection)
-#define MSMFB_OVERLAY_GET _IOR(MSMFB_IOCTL_MAGIC, 140, \
- struct mdp_overlay)
-
-/* new ioctls for async MDP ops */
-#define MSMFB_ASYNC_BLIT _IOW(MSMFB_IOCTL_MAGIC, 141, unsigned int)
-#define MSMFB_BLIT_FLUSH _IOR(MSMFB_IOCTL_MAGIC, 142, unsigned int)
-
-#define MDP_IMGTYPE2_START 0x10000
-
-enum {
- MDP_RGB_565, /* RGB 565 planer */
- MDP_XRGB_8888, /* RGB 888 padded */
- MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planer w/ Cb is in MSB */
- MDP_ARGB_8888, /* ARGB 888 */
- MDP_RGB_888, /* RGB 888 planer */
- MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planer w/ Cr is in MSB */
- MDP_YCRYCB_H2V1, /* YCrYCb interleave */
- MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */
- MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */
- MDP_RGBA_8888, /* ARGB 888 */
- MDP_BGRA_8888, /* ABGR 888 */
- MDP_Y_CRCB_H2V2_TILE, /* Y and CrCb, pseudo planer tile */
- MDP_Y_CBCR_H2V2_TILE, /* Y and CbCr, pseudo planer tile */
- MDP_IMGTYPE_LIMIT,
- MDP_BGR_565 = MDP_IMGTYPE2_START, /* BGR 565 planer */
- MDP_FB_FORMAT, /* framebuffer format */
- MDP_IMGTYPE_LIMIT2 /* Non valid image type after this enum */
-};
-
-enum {
- PMEM_IMG,
- FB_IMG,
-};
-
-/* mdp_blit_req flag values */
-#define MDP_ROT_NOP 0
-#define MDP_FLIP_LR 0x1
-#define MDP_FLIP_UD 0x2
-#define MDP_ROT_90 0x4
-#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR)
-#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
-#define MDP_DITHER 0x8
-#define MDP_BLUR 0x10
-#define MDP_BLEND_FG_PREMULT 0x20000
-
-#define MDP_DEINTERLACE 0x80000000
-#define MDP_SHARPENING 0x40000000
-
-#define MDP_NO_DMA_BARRIER_START 0x20000000
-#define MDP_NO_DMA_BARRIER_END 0x10000000
-#define MDP_NO_BLIT 0x08000000
-#define MDP_BLIT_WITH_DMA_BARRIERS 0x000
-#define MDP_BLIT_WITH_NO_DMA_BARRIERS \
- (MDP_NO_DMA_BARRIER_START | MDP_NO_DMA_BARRIER_END)
-#define MDP_TRANSP_NOP 0xffffffff
-#define MDP_ALPHA_NOP 0xff
-
-#define MDP_BLIT_SRC_GEM 0x02000000 /* set for GEM, clear for PMEM */
-#define MDP_BLIT_DST_GEM 0x01000000 /* set for GEM, clear for PMEM */
-
-#define MDP_FB_PAGE_PROTECTION_NONCACHED (0)
-#define MDP_FB_PAGE_PROTECTION_WRITECOMBINE (1)
-#define MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE (2)
-#define MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE (3)
-#define MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE (4)
-/* Sentinel: Don't use! */
-#define MDP_FB_PAGE_PROTECTION_INVALID (5)
-/* Count of the number of MDP_FB_PAGE_PROTECTION_... values. */
-#define MDP_NUM_FB_PAGE_PROTECTION_VALUES (5)
-
-struct mdp_rect {
- uint32_t x;
- uint32_t y;
- uint32_t w;
- uint32_t h;
-};
-
-struct mdp_img {
- uint32_t width;
- uint32_t height;
- uint32_t format;
- uint32_t offset;
- int memory_id; /* the file descriptor */
- uint32_t priv;
-};
-
-/*
- * {3x3} + {3} ccs matrix
- */
-
-#define MDP_CCS_RGB2YUV 0
-#define MDP_CCS_YUV2RGB 1
-
-#define MDP_CCS_SIZE 9
-#define MDP_BV_SIZE 3
-
-struct mdp_ccs {
- int direction; /* MDP_CCS_RGB2YUV or YUV2RGB */
- uint16_t ccs[MDP_CCS_SIZE]; /* 3x3 color coefficients */
- uint16_t bv[MDP_BV_SIZE]; /* 1x3 bias vector */
-};
-
-/* The version of the mdp_blit_req structure so that
- * user applications can selectively decide which functionality
- * to include
- */
-
-#define MDP_BLIT_REQ_VERSION 2
-
-struct mdp_blit_req {
- struct mdp_img src;
- struct mdp_img dst;
- struct mdp_rect src_rect;
- struct mdp_rect dst_rect;
- uint32_t alpha;
- uint32_t transp_mask;
- uint32_t flags;
- int sharpening_strength; /* -127 <--> 127, default 64 */
-};
-
-struct mdp_blit_req_list {
- uint32_t count;
- struct mdp_blit_req req[];
-};
-
-struct msmfb_data {
- uint32_t offset;
- int memory_id;
- int id;
-};
-
-#define MSMFB_NEW_REQUEST -1
-
-struct msmfb_overlay_data {
- uint32_t id;
- struct msmfb_data data;
-};
-
-struct msmfb_img {
- uint32_t width;
- uint32_t height;
- uint32_t format;
-};
-
-struct mdp_overlay {
- struct msmfb_img src;
- struct mdp_rect src_rect;
- struct mdp_rect dst_rect;
- uint32_t z_order; /* stage number */
- uint32_t is_fg; /* control alpha & transp */
- uint32_t alpha;
- uint32_t transp_mask;
- uint32_t flags;
- uint32_t id;
- uint32_t user_data[8];
-};
-
-struct mdp_histogram {
- uint32_t frame_cnt;
- uint32_t bin_cnt;
- uint32_t *r;
- uint32_t *g;
- uint32_t *b;
-};
-
-struct mdp_page_protection {
- uint32_t page_protection;
-};
-
-
-struct msm_panel_common_pdata {
- int gpio;
- int (*backlight_level)(int level, int max, int min);
- int (*pmic_backlight)(int level);
- int (*panel_num)(void);
- void (*panel_config_gpio)(int);
- int *gpio_num;
-};
-
-struct lcdc_platform_data {
- int (*lcdc_gpio_config)(int on);
- void (*lcdc_power_save)(int);
-};
-
-struct tvenc_platform_data {
- int (*pm_vid_en)(int on);
-};
-
-struct mddi_platform_data {
- void (*mddi_power_save)(int on);
- int (*mddi_sel_clk)(u32 *clk_rate);
-};
-
-struct msm_fb_platform_data {
- int (*detect_client)(const char *name);
- int mddi_prescan;
- int (*allow_set_offset)(void);
-};
-
-struct msm_hdmi_platform_data {
- int irq;
- int (*cable_detect)(int insert);
-};
-
-#endif /*_MSM_MDP_H_*/
diff --git a/drivers/staging/msm/staging-devices.c b/drivers/staging/msm/staging-devices.c
deleted file mode 100644
index d6cd919469d..00000000000
--- a/drivers/staging/msm/staging-devices.c
+++ /dev/null
@@ -1,312 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/bootmem.h>
-#include <linux/delay.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/io.h>
-#include <asm/setup.h>
-
-#include <mach/board.h>
-#include <mach/irqs.h>
-#include <mach/sirc.h>
-#include <mach/gpio.h>
-
-#include "msm_mdp.h"
-#include "memory_ll.h"
-//#include "android_pmem.h"
-
-#ifdef CONFIG_MSM_SOC_REV_A
-#define MSM_SMI_BASE 0xE0000000
-#else
-#define MSM_SMI_BASE 0x00000000
-#endif
-
-
-#define TOUCHPAD_SUSPEND 34
-#define TOUCHPAD_IRQ 38
-
-#define MSM_PMEM_MDP_SIZE 0x1591000
-
-#ifdef CONFIG_MSM_SOC_REV_A
-#define SMEM_SPINLOCK_I2C "D:I2C02000021"
-#else
-#define SMEM_SPINLOCK_I2C "S:6"
-#endif
-
-#define MSM_PMEM_ADSP_SIZE 0x1C00000
-
-#define MSM_FB_SIZE 0x500000
-#define MSM_FB_SIZE_ST15 0x800000
-#define MSM_AUDIO_SIZE 0x80000
-#define MSM_GPU_PHYS_SIZE SZ_2M
-
-#ifdef CONFIG_MSM_SOC_REV_A
-#define MSM_SMI_BASE 0xE0000000
-#else
-#define MSM_SMI_BASE 0x00000000
-#endif
-
-#define MSM_SHARED_RAM_PHYS (MSM_SMI_BASE + 0x00100000)
-
-#define MSM_PMEM_SMI_BASE (MSM_SMI_BASE + 0x02B00000)
-#define MSM_PMEM_SMI_SIZE 0x01500000
-
-#define MSM_FB_BASE MSM_PMEM_SMI_BASE
-#define MSM_GPU_PHYS_BASE (MSM_FB_BASE + MSM_FB_SIZE)
-#define MSM_PMEM_SMIPOOL_BASE (MSM_GPU_PHYS_BASE + MSM_GPU_PHYS_SIZE)
-#define MSM_PMEM_SMIPOOL_SIZE (MSM_PMEM_SMI_SIZE - MSM_FB_SIZE \
- - MSM_GPU_PHYS_SIZE)
-
-#if defined(CONFIG_FB_MSM_MDP40)
-#define MDP_BASE 0xA3F00000
-#define PMDH_BASE 0xAD600000
-#define EMDH_BASE 0xAD700000
-#define TVENC_BASE 0xAD400000
-#else
-#define MDP_BASE 0xAA200000
-#define PMDH_BASE 0xAA600000
-#define EMDH_BASE 0xAA700000
-#define TVENC_BASE 0xAA400000
-#endif
-
-#define PMEM_KERNEL_EBI1_SIZE (CONFIG_PMEM_KERNEL_SIZE * 1024 * 1024)
-
-static struct resource msm_fb_resources[] = {
- {
- .flags = IORESOURCE_DMA,
- }
-};
-
-static struct resource msm_mdp_resources[] = {
- {
- .name = "mdp",
- .start = MDP_BASE,
- .end = MDP_BASE + 0x000F0000 - 1,
- .flags = IORESOURCE_MEM,
- }
-};
-
-static struct platform_device msm_mdp_device = {
- .name = "mdp",
- .id = 0,
- .num_resources = ARRAY_SIZE(msm_mdp_resources),
- .resource = msm_mdp_resources,
-};
-
-static struct platform_device msm_lcdc_device = {
- .name = "lcdc",
- .id = 0,
-};
-
-static int msm_fb_detect_panel(const char *name)
-{
- int ret = -EPERM;
-
- if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa()) {
- if (!strncmp(name, "mddi_toshiba_wvga_pt", 20))
- ret = 0;
- else
- ret = -ENODEV;
- } else if ((machine_is_qsd8x50_surf() || machine_is_qsd8x50a_surf())
- && !strcmp(name, "lcdc_external"))
- ret = 0;
- else if (machine_is_qsd8x50a_st1_5()) {
- if (!strcmp(name, "lcdc_st15") ||
- !strcmp(name, "hdmi_sii9022"))
- ret = 0;
- else
- ret = -ENODEV;
- }
-
- return ret;
-}
-
-/* Only allow a small subset of machines to set the offset via
- FB PAN_DISPLAY */
-
-static int msm_fb_allow_set_offset(void)
-{
- return (machine_is_qsd8x50_st1() ||
- machine_is_qsd8x50a_st1_5()) ? 1 : 0;
-}
-
-
-static struct msm_fb_platform_data msm_fb_pdata = {
- .detect_client = msm_fb_detect_panel,
- .allow_set_offset = msm_fb_allow_set_offset,
-};
-
-static struct platform_device msm_fb_device = {
- .name = "msm_fb",
- .id = 0,
- .num_resources = ARRAY_SIZE(msm_fb_resources),
- .resource = msm_fb_resources,
- .dev = {
- .platform_data = &msm_fb_pdata,
- }
-};
-
-static void __init qsd8x50_allocate_memory_regions(void)
-{
- void *addr;
- unsigned long size;
- if (machine_is_qsd8x50a_st1_5())
- size = MSM_FB_SIZE_ST15;
- else
- size = MSM_FB_SIZE;
-
- addr = alloc_bootmem(size); // (void *)MSM_FB_BASE;
- if (!addr)
- printk("Failed to allocate bootmem for framebuffer\n");
-
-
- msm_fb_resources[0].start = __pa(addr);
- msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1;
- pr_info("using %lu bytes of SMI at %lx physical for fb\n",
- size, (unsigned long)addr);
-}
-
-static int msm_fb_lcdc_gpio_config(int on)
-{
-// return 0;
- if (machine_is_qsd8x50_st1()) {
- if (on) {
- gpio_set_value(32, 1);
- mdelay(100);
- gpio_set_value(20, 1);
- gpio_set_value(17, 1);
- gpio_set_value(19, 1);
- } else {
- gpio_set_value(17, 0);
- gpio_set_value(19, 0);
- gpio_set_value(20, 0);
- mdelay(100);
- gpio_set_value(32, 0);
- }
- } else if (machine_is_qsd8x50a_st1_5()) {
- if (on) {
- gpio_set_value(17, 1);
- gpio_set_value(19, 1);
- gpio_set_value(20, 1);
- gpio_set_value(22, 0);
- gpio_set_value(32, 1);
- gpio_set_value(155, 1);
- //st15_hdmi_power(1);
- gpio_set_value(22, 1);
-
- } else {
- gpio_set_value(17, 0);
- gpio_set_value(19, 0);
- gpio_set_value(22, 0);
- gpio_set_value(32, 0);
- gpio_set_value(155, 0);
- // st15_hdmi_power(0);
- }
- }
- return 0;
-}
-
-
-static struct lcdc_platform_data lcdc_pdata = {
- .lcdc_gpio_config = msm_fb_lcdc_gpio_config,
-};
-
-static struct msm_gpio msm_fb_st15_gpio_config_data[] = {
- { GPIO_CFG(17, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lcdc_en0" },
- { GPIO_CFG(19, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "dat_pwr_sv" },
- { GPIO_CFG(20, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lvds_pwr_dn" },
- { GPIO_CFG(22, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lcdc_en1" },
- { GPIO_CFG(32, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "lcdc_en2" },
- { GPIO_CFG(103, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), "hdmi_irq" },
- { GPIO_CFG(155, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), "hdmi_3v3" },
-};
-
-static struct msm_panel_common_pdata mdp_pdata = {
- .gpio = 98,
-};
-
-static struct platform_device *devices[] __initdata = {
- &msm_fb_device,
-};
-
-
-static void __init msm_register_device(struct platform_device *pdev, void *data)
-{
- int ret;
-
- pdev->dev.platform_data = data;
-
- ret = platform_device_register(pdev);
- if (ret)
- dev_err(&pdev->dev,
- "%s: platform_device_register() failed = %d\n",
- __func__, ret);
-}
-
-void __init msm_fb_register_device(char *name, void *data)
-{
- if (!strncmp(name, "mdp", 3))
- msm_register_device(&msm_mdp_device, data);
-/*
- else if (!strncmp(name, "pmdh", 4))
- msm_register_device(&msm_mddi_device, data);
- else if (!strncmp(name, "emdh", 4))
- msm_register_device(&msm_mddi_ext_device, data);
- else if (!strncmp(name, "ebi2", 4))
- msm_register_device(&msm_ebi2_lcd_device, data);
- else if (!strncmp(name, "tvenc", 5))
- msm_register_device(&msm_tvenc_device, data);
- else */
-
- if (!strncmp(name, "lcdc", 4))
- msm_register_device(&msm_lcdc_device, data);
- /*else
- printk(KERN_ERR "%s: unknown device! %s\n", __func__, name);
-*/
-}
-
-static void __init msm_fb_add_devices(void)
-{
- int rc;
- msm_fb_register_device("mdp", &mdp_pdata);
-// msm_fb_register_device("pmdh", &mddi_pdata);
-// msm_fb_register_device("emdh", &mddi_pdata);
-// msm_fb_register_device("tvenc", 0);
-
- if (machine_is_qsd8x50a_st1_5()) {
-/* rc = st15_hdmi_vreg_init();
- if (rc)
- return;
-*/
- rc = msm_gpios_request_enable(
- msm_fb_st15_gpio_config_data,
- ARRAY_SIZE(msm_fb_st15_gpio_config_data));
- if (rc) {
- printk(KERN_ERR "%s: unable to init lcdc gpios\n",
- __func__);
- return;
- }
- msm_fb_register_device("lcdc", &lcdc_pdata);
- } else
- msm_fb_register_device("lcdc", 0);
-}
-
-int __init staging_init_pmem(void)
-{
- qsd8x50_allocate_memory_regions();
- return 0;
-}
-
-int __init staging_init_devices(void)
-{
- platform_add_devices(devices, ARRAY_SIZE(devices));
- msm_fb_add_devices();
- return 0;
-}
-
-arch_initcall(staging_init_pmem);
-arch_initcall(staging_init_devices);
diff --git a/drivers/staging/msm/tv_ntsc.c b/drivers/staging/msm/tv_ntsc.c
deleted file mode 100644
index 5eb67611661..00000000000
--- a/drivers/staging/msm/tv_ntsc.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/uaccess.h>
-#include <linux/clk.h>
-
-#include "msm_fb.h"
-#include "tvenc.h"
-
-#define NTSC_TV_DIMENSION_WIDTH 720
-#define NTSC_TV_DIMENSION_HEIGHT 480
-
-static int ntsc_off(struct platform_device *pdev);
-static int ntsc_on(struct platform_device *pdev);
-
-static int ntsc_on(struct platform_device *pdev)
-{
- uint32 reg = 0;
- int ret = 0;
- struct msm_fb_data_type *mfd;
-
- mfd = platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- TV_OUT(TV_ENC_CTL, 0); /* disable TV encoder */
-
- if (mfd->panel.id == NTSC_M) {
- /* Cr gain 11, Cb gain C6, y_gain 97 */
- TV_OUT(TV_GAIN, 0x0081B697);
- } else {
- /* Cr gain 11, Cb gain C6, y_gain 97 */
- TV_OUT(TV_GAIN, 0x008bc4a3);
- reg |= TVENC_CTL_NTSCJ_MODE;
- }
-
- TV_OUT(TV_CGMS, 0x0);
- /* NTSC Timing */
- TV_OUT(TV_SYNC_1, 0x0020009e);
- TV_OUT(TV_SYNC_2, 0x011306B4);
- TV_OUT(TV_SYNC_3, 0x0006000C);
- TV_OUT(TV_SYNC_4, 0x0028020D);
- TV_OUT(TV_SYNC_5, 0x005E02FB);
- TV_OUT(TV_SYNC_6, 0x0006000C);
- TV_OUT(TV_SYNC_7, 0x00000012);
- TV_OUT(TV_BURST_V1, 0x0013020D);
- TV_OUT(TV_BURST_V2, 0x0014020C);
- TV_OUT(TV_BURST_V3, 0x0013020D);
- TV_OUT(TV_BURST_V4, 0x0014020C);
- TV_OUT(TV_BURST_H, 0x00AE00F2);
- TV_OUT(TV_SOL_REQ_ODD, 0x00280208);
- TV_OUT(TV_SOL_REQ_EVEN, 0x00290209);
-
- reg |= TVENC_CTL_TV_MODE_NTSC_M_PAL60;
-
- reg |= TVENC_CTL_Y_FILTER_EN |
- TVENC_CTL_CR_FILTER_EN |
- TVENC_CTL_CB_FILTER_EN | TVENC_CTL_SINX_FILTER_EN;
-#ifdef CONFIG_FB_MSM_TVOUT_SVIDEO
- reg |= TVENC_CTL_S_VIDEO_EN;
-#endif
-
- TV_OUT(TV_LEVEL, 0x00000000); /* DC offset to 0. */
- TV_OUT(TV_OFFSET, 0x008080f0);
-
-#ifdef CONFIG_FB_MSM_MDP31
- TV_OUT(TV_DAC_INTF, 0x29);
-#endif
- TV_OUT(TV_ENC_CTL, reg);
-
- reg |= TVENC_CTL_ENC_EN;
- TV_OUT(TV_ENC_CTL, reg);
-
- return ret;
-}
-
-static int ntsc_off(struct platform_device *pdev)
-{
- TV_OUT(TV_ENC_CTL, 0); /* disable TV encoder */
- return 0;
-}
-
-static int __init ntsc_probe(struct platform_device *pdev)
-{
- msm_fb_add_device(pdev);
-
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = ntsc_probe,
- .driver = {
- .name = "tv_ntsc",
- },
-};
-
-static struct msm_fb_panel_data ntsc_panel_data = {
- .panel_info.xres = NTSC_TV_DIMENSION_WIDTH,
- .panel_info.yres = NTSC_TV_DIMENSION_HEIGHT,
- .panel_info.type = TV_PANEL,
- .panel_info.pdest = DISPLAY_1,
- .panel_info.wait_cycle = 0,
- .panel_info.bpp = 16,
- .panel_info.fb_num = 2,
- .on = ntsc_on,
- .off = ntsc_off,
-};
-
-static struct platform_device this_device = {
- .name = "tv_ntsc",
- .id = 0,
- .dev = {
- .platform_data = &ntsc_panel_data,
- }
-};
-
-static int __init ntsc_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&this_driver);
- if (!ret) {
- ret = platform_device_register(&this_device);
- if (ret)
- platform_driver_unregister(&this_driver);
- }
-
- return ret;
-}
-
-module_init(ntsc_init); \ No newline at end of file
diff --git a/drivers/staging/msm/tv_pal.c b/drivers/staging/msm/tv_pal.c
deleted file mode 100644
index 204da514660..00000000000
--- a/drivers/staging/msm/tv_pal.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/uaccess.h>
-#include <linux/clk.h>
-
-#include "msm_fb.h"
-#include "tvenc.h"
-
-#ifdef CONFIG_FB_MSM_TVOUT_PAL_M
-#define PAL_TV_DIMENSION_WIDTH 720
-#define PAL_TV_DIMENSION_HEIGHT 480
-#else
-#define PAL_TV_DIMENSION_WIDTH 720
-#define PAL_TV_DIMENSION_HEIGHT 576
-#endif
-
-static int pal_on(struct platform_device *pdev)
-{
- uint32 reg = 0;
- int ret = 0;
- struct msm_fb_data_type *mfd;
-
- mfd = platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- TV_OUT(TV_ENC_CTL, 0); /* disable TV encoder */
-
- switch (mfd->panel.id) {
- case PAL_BDGHIN:
- /* Cr gain 11, Cb gain C6, y_gain 97 */
- TV_OUT(TV_GAIN, 0x0088c1a0);
- TV_OUT(TV_CGMS, 0x00012345);
- TV_OUT(TV_TEST_MUX, 0x0);
- /* PAL Timing */
- TV_OUT(TV_SYNC_1, 0x00180097);
- TV_OUT(TV_SYNC_2, 0x011f06c0);
- TV_OUT(TV_SYNC_3, 0x0005000a);
- TV_OUT(TV_SYNC_4, 0x00320271);
- TV_OUT(TV_SYNC_5, 0x005602f9);
- TV_OUT(TV_SYNC_6, 0x0005000a);
- TV_OUT(TV_SYNC_7, 0x0000000f);
- TV_OUT(TV_BURST_V1, 0x0012026e);
- TV_OUT(TV_BURST_V2, 0x0011026d);
- TV_OUT(TV_BURST_V3, 0x00100270);
- TV_OUT(TV_BURST_V4, 0x0013026f);
- TV_OUT(TV_BURST_H, 0x00af00ea);
- TV_OUT(TV_SOL_REQ_ODD, 0x0030026e);
- TV_OUT(TV_SOL_REQ_EVEN, 0x0031026f);
-
- reg |= TVENC_CTL_TV_MODE_PAL_BDGHIN;
- break;
- case PAL_M:
- /* Cr gain 11, Cb gain C6, y_gain 97 */
- TV_OUT(TV_GAIN, 0x0081b697);
- TV_OUT(TV_CGMS, 0x000af317);
- TV_OUT(TV_TEST_MUX, 0x000001c3);
- TV_OUT(TV_TEST_MODE, 0x00000002);
- /* PAL Timing */
- TV_OUT(TV_SYNC_1, 0x0020009e);
- TV_OUT(TV_SYNC_2, 0x011306b4);
- TV_OUT(TV_SYNC_3, 0x0006000c);
- TV_OUT(TV_SYNC_4, 0x0028020D);
- TV_OUT(TV_SYNC_5, 0x005e02fb);
- TV_OUT(TV_SYNC_6, 0x0006000c);
- TV_OUT(TV_SYNC_7, 0x00000012);
- TV_OUT(TV_BURST_V1, 0x0012020b);
- TV_OUT(TV_BURST_V2, 0x0016020c);
- TV_OUT(TV_BURST_V3, 0x00150209);
- TV_OUT(TV_BURST_V4, 0x0013020c);
- TV_OUT(TV_BURST_H, 0x00bf010b);
- TV_OUT(TV_SOL_REQ_ODD, 0x00280208);
- TV_OUT(TV_SOL_REQ_EVEN, 0x00290209);
-
- reg |= TVENC_CTL_TV_MODE_PAL_M;
- break;
- case PAL_N:
- /* Cr gain 11, Cb gain C6, y_gain 97 */
- TV_OUT(TV_GAIN, 0x0081b697);
- TV_OUT(TV_CGMS, 0x000af317);
- TV_OUT(TV_TEST_MUX, 0x000001c3);
- TV_OUT(TV_TEST_MODE, 0x00000002);
- /* PAL Timing */
- TV_OUT(TV_SYNC_1, 0x00180097);
- TV_OUT(TV_SYNC_2, 0x12006c0);
- TV_OUT(TV_SYNC_3, 0x0005000a);
- TV_OUT(TV_SYNC_4, 0x00320271);
- TV_OUT(TV_SYNC_5, 0x005602f9);
- TV_OUT(TV_SYNC_6, 0x0005000a);
- TV_OUT(TV_SYNC_7, 0x0000000f);
- TV_OUT(TV_BURST_V1, 0x0012026e);
- TV_OUT(TV_BURST_V2, 0x0011026d);
- TV_OUT(TV_BURST_V3, 0x00100270);
- TV_OUT(TV_BURST_V4, 0x0013026f);
- TV_OUT(TV_BURST_H, 0x00af00fa);
- TV_OUT(TV_SOL_REQ_ODD, 0x0030026e);
- TV_OUT(TV_SOL_REQ_EVEN, 0x0031026f);
-
- reg |= TVENC_CTL_TV_MODE_PAL_N;
- break;
-
- default:
- return -ENODEV;
- }
-
- reg |= TVENC_CTL_Y_FILTER_EN |
- TVENC_CTL_CR_FILTER_EN |
- TVENC_CTL_CB_FILTER_EN | TVENC_CTL_SINX_FILTER_EN;
-#ifdef CONFIG_FB_MSM_TVOUT_SVIDEO
- reg |= TVENC_CTL_S_VIDEO_EN;
-#endif
-
- TV_OUT(TV_LEVEL, 0x00000000); /* DC offset to 0. */
- TV_OUT(TV_OFFSET, 0x008080f0);
-
-#ifdef CONFIG_FB_MSM_MDP31
- TV_OUT(TV_DAC_INTF, 0x29);
-#endif
- TV_OUT(TV_ENC_CTL, reg);
-
- reg |= TVENC_CTL_ENC_EN;
- TV_OUT(TV_ENC_CTL, reg);
-
- return ret;
-}
-
-static int pal_off(struct platform_device *pdev)
-{
- TV_OUT(TV_ENC_CTL, 0); /* disable TV encoder */
- return 0;
-}
-
-static int __init pal_probe(struct platform_device *pdev)
-{
- msm_fb_add_device(pdev);
-
- return 0;
-}
-
-static struct platform_driver this_driver = {
- .probe = pal_probe,
- .driver = {
- .name = "tv_pal",
- },
-};
-
-static struct msm_fb_panel_data pal_panel_data = {
- .panel_info.xres = PAL_TV_DIMENSION_WIDTH,
- .panel_info.yres = PAL_TV_DIMENSION_HEIGHT,
- .panel_info.type = TV_PANEL,
- .panel_info.pdest = DISPLAY_1,
- .panel_info.wait_cycle = 0,
- .panel_info.bpp = 16,
- .panel_info.fb_num = 2,
- .on = pal_on,
- .off = pal_off,
-};
-
-static struct platform_device this_device = {
- .name = "tv_pal",
- .id = 0,
- .dev = {
- .platform_data = &pal_panel_data,
- }
-};
-
-static int __init pal_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&this_driver);
- if (!ret) {
- ret = platform_device_register(&this_device);
- if (ret)
- platform_driver_unregister(&this_driver);
- }
-
- return ret;
-}
-
-module_init(pal_init);
diff --git a/drivers/staging/msm/tvenc.c b/drivers/staging/msm/tvenc.c
deleted file mode 100644
index 4fbb77b253d..00000000000
--- a/drivers/staging/msm/tvenc.c
+++ /dev/null
@@ -1,296 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-#include <linux/semaphore.h>
-#include <linux/uaccess.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/pm_qos_params.h>
-
-#define TVENC_C
-#include "tvenc.h"
-#include "msm_fb.h"
-
-static int tvenc_probe(struct platform_device *pdev);
-static int tvenc_remove(struct platform_device *pdev);
-
-static int tvenc_off(struct platform_device *pdev);
-static int tvenc_on(struct platform_device *pdev);
-
-static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
-static int pdev_list_cnt;
-
-static struct clk *tvenc_clk;
-static struct clk *tvdac_clk;
-
-static struct platform_driver tvenc_driver = {
- .probe = tvenc_probe,
- .remove = tvenc_remove,
- .suspend = NULL,
-// .suspend_late = NULL,
-// .resume_early = NULL,
- .resume = NULL,
- .shutdown = NULL,
- .driver = {
- .name = "tvenc",
- },
-};
-
-static struct tvenc_platform_data *tvenc_pdata;
-
-static int tvenc_off(struct platform_device *pdev)
-{
- int ret = 0;
-
- ret = panel_next_off(pdev);
-
- clk_disable(tvenc_clk);
- clk_disable(tvdac_clk);
-
- if (tvenc_pdata && tvenc_pdata->pm_vid_en)
- ret = tvenc_pdata->pm_vid_en(0);
-
- //pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "tvenc",
- // PM_QOS_DEFAULT_VALUE);
-
- if (ret)
- printk(KERN_ERR "%s: pm_vid_en(off) failed! %d\n",
- __func__, ret);
-
- return ret;
-}
-
-static int tvenc_on(struct platform_device *pdev)
-{
- int ret = 0;
-
-// pm_qos_update_requirement(PM_QOS_SYSTEM_BUS_FREQ , "tvenc",
-// 128000);
- if (tvenc_pdata && tvenc_pdata->pm_vid_en)
- ret = tvenc_pdata->pm_vid_en(1);
-
- if (ret) {
- printk(KERN_ERR "%s: pm_vid_en(on) failed! %d\n",
- __func__, ret);
- return ret;
- }
-
- clk_enable(tvenc_clk);
- clk_enable(tvdac_clk);
-
- ret = panel_next_on(pdev);
-
- return ret;
-}
-
-void tvenc_gen_test_pattern(struct msm_fb_data_type *mfd)
-{
- uint32 reg = 0, i;
-
- reg = readl(MSM_TV_ENC_CTL);
- reg |= TVENC_CTL_TEST_PATT_EN;
-
- for (i = 0; i < 3; i++) {
- TV_OUT(TV_ENC_CTL, 0); /* disable TV encoder */
-
- switch (i) {
- /*
- * TV Encoder - Color Bar Test Pattern
- */
- case 0:
- reg |= TVENC_CTL_TPG_CLRBAR;
- break;
- /*
- * TV Encoder - Red Frame Test Pattern
- */
- case 1:
- reg |= TVENC_CTL_TPG_REDCLR;
- break;
- /*
- * TV Encoder - Modulated Ramp Test Pattern
- */
- default:
- reg |= TVENC_CTL_TPG_MODRAMP;
- break;
- }
-
- TV_OUT(TV_ENC_CTL, reg);
- mdelay(5000);
-
- switch (i) {
- /*
- * TV Encoder - Color Bar Test Pattern
- */
- case 0:
- reg &= ~TVENC_CTL_TPG_CLRBAR;
- break;
- /*
- * TV Encoder - Red Frame Test Pattern
- */
- case 1:
- reg &= ~TVENC_CTL_TPG_REDCLR;
- break;
- /*
- * TV Encoder - Modulated Ramp Test Pattern
- */
- default:
- reg &= ~TVENC_CTL_TPG_MODRAMP;
- break;
- }
- }
-}
-
-static int tvenc_resource_initialized;
-
-static int tvenc_probe(struct platform_device *pdev)
-{
- struct msm_fb_data_type *mfd;
- struct platform_device *mdp_dev = NULL;
- struct msm_fb_panel_data *pdata = NULL;
- int rc;
-
- if (pdev->id == 0) {
- tvenc_base = ioremap(pdev->resource[0].start,
- pdev->resource[0].end -
- pdev->resource[0].start + 1);
- if (!tvenc_base) {
- printk(KERN_ERR
- "tvenc_base ioremap failed!\n");
- return -ENOMEM;
- }
- tvenc_pdata = pdev->dev.platform_data;
- tvenc_resource_initialized = 1;
- return 0;
- }
-
- if (!tvenc_resource_initialized)
- return -EPERM;
-
- mfd = platform_get_drvdata(pdev);
-
- if (!mfd)
- return -ENODEV;
-
- if (mfd->key != MFD_KEY)
- return -EINVAL;
-
- if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
- return -ENOMEM;
-
- if (tvenc_base == NULL)
- return -ENOMEM;
-
- mdp_dev = platform_device_alloc("mdp", pdev->id);
- if (!mdp_dev)
- return -ENOMEM;
-
- /*
- * link to the latest pdev
- */
- mfd->pdev = mdp_dev;
- mfd->dest = DISPLAY_TV;
-
- /*
- * alloc panel device data
- */
- if (platform_device_add_data
- (mdp_dev, pdev->dev.platform_data,
- sizeof(struct msm_fb_panel_data))) {
- printk(KERN_ERR "tvenc_probe: platform_device_add_data failed!\n");
- platform_device_put(mdp_dev);
- return -ENOMEM;
- }
- /*
- * data chain
- */
- pdata = mdp_dev->dev.platform_data;
- pdata->on = tvenc_on;
- pdata->off = tvenc_off;
- pdata->next = pdev;
-
- /*
- * get/set panel specific fb info
- */
- mfd->panel_info = pdata->panel_info;
- mfd->fb_imgType = MDP_YCRYCB_H2V1;
-
- /*
- * set driver data
- */
- platform_set_drvdata(mdp_dev, mfd);
-
- /*
- * register in mdp driver
- */
- rc = platform_device_add(mdp_dev);
- if (rc)
- goto tvenc_probe_err;
-
- pdev_list[pdev_list_cnt++] = pdev;
- return 0;
-
-tvenc_probe_err:
- platform_device_put(mdp_dev);
- return rc;
-}
-
-static int tvenc_remove(struct platform_device *pdev)
-{
-// pm_qos_remove_requirement(PM_QOS_SYSTEM_BUS_FREQ , "tvenc");
- return 0;
-}
-
-static int tvenc_register_driver(void)
-{
- return platform_driver_register(&tvenc_driver);
-}
-
-static int __init tvenc_driver_init(void)
-{
- tvenc_clk = clk_get(NULL, "tv_enc_clk");
- tvdac_clk = clk_get(NULL, "tv_dac_clk");
-
- if (IS_ERR(tvenc_clk)) {
- printk(KERN_ERR "error: can't get tvenc_clk!\n");
- return PTR_ERR(tvenc_clk);
- }
-
- if (IS_ERR(tvdac_clk)) {
- printk(KERN_ERR "error: can't get tvdac_clk!\n");
- clk_put(tvenc_clk);
- return PTR_ERR(tvdac_clk);
- }
-
-// pm_qos_add_requirement(PM_QOS_SYSTEM_BUS_FREQ , "tvenc",
-// PM_QOS_DEFAULT_VALUE);
- return tvenc_register_driver();
-}
-
-module_init(tvenc_driver_init);
diff --git a/drivers/staging/msm/tvenc.h b/drivers/staging/msm/tvenc.h
deleted file mode 100644
index 6bb375d7a5a..00000000000
--- a/drivers/staging/msm/tvenc.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef TVENC_H
-#define TVENC_H
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/fb.h>
-
-#include <mach/hardware.h>
-#include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
-
-#include "msm_fb_panel.h"
-
-#define NTSC_M 0 /* North America, Korea */
-#define NTSC_J 1 /* Japan */
-#define PAL_BDGHIN 2 /* Non-argentina PAL-N */
-#define PAL_M 3 /* PAL-M */
-#define PAL_N 4 /* Argentina PAL-N */
-
-/* 3.57954545 Mhz */
-#define TVENC_CTL_TV_MODE_NTSC_M_PAL60 0
-/* 3.57961149 Mhz */
-#define TVENC_CTL_TV_MODE_PAL_M BIT(0)
-/*non-Argintina = 4.3361875 Mhz */
-#define TVENC_CTL_TV_MODE_PAL_BDGHIN BIT(1)
-/*Argentina = 3.582055625 Mhz */
-#define TVENC_CTL_TV_MODE_PAL_N (BIT(1)|BIT(0))
-
-#define TVENC_CTL_ENC_EN BIT(2)
-#define TVENC_CTL_CC_EN BIT(3)
-#define TVENC_CTL_CGMS_EN BIT(4)
-#define TVENC_CTL_MACRO_EN BIT(5)
-#define TVENC_CTL_Y_FILTER_W_NOTCH BIT(6)
-#define TVENC_CTL_Y_FILTER_WO_NOTCH 0
-#define TVENC_CTL_Y_FILTER_EN BIT(7)
-#define TVENC_CTL_CR_FILTER_EN BIT(8)
-#define TVENC_CTL_CB_FILTER_EN BIT(9)
-#define TVENC_CTL_SINX_FILTER_EN BIT(10)
-#define TVENC_CTL_TEST_PATT_EN BIT(11)
-#define TVENC_CTL_OUTPUT_INV BIT(12)
-#define TVENC_CTL_PAL60_MODE BIT(13)
-#define TVENC_CTL_NTSCJ_MODE BIT(14)
-#define TVENC_CTL_TPG_CLRBAR 0
-#define TVENC_CTL_TPG_MODRAMP BIT(15)
-#define TVENC_CTL_TPG_REDCLR BIT(16)
-#define TVENC_CTL_S_VIDEO_EN BIT(19)
-
-#ifdef TVENC_C
-void *tvenc_base;
-#else
-extern void *tvenc_base;
-#endif
-
-#define TV_OUT(reg, v) writel(v, tvenc_base + MSM_##reg)
-
-#define MSM_TV_ENC_CTL 0x00
-#define MSM_TV_LEVEL 0x04
-#define MSM_TV_GAIN 0x08
-#define MSM_TV_OFFSET 0x0c
-#define MSM_TV_CGMS 0x10
-#define MSM_TV_SYNC_1 0x14
-#define MSM_TV_SYNC_2 0x18
-#define MSM_TV_SYNC_3 0x1c
-#define MSM_TV_SYNC_4 0x20
-#define MSM_TV_SYNC_5 0x24
-#define MSM_TV_SYNC_6 0x28
-#define MSM_TV_SYNC_7 0x2c
-#define MSM_TV_BURST_V1 0x30
-#define MSM_TV_BURST_V2 0x34
-#define MSM_TV_BURST_V3 0x38
-#define MSM_TV_BURST_V4 0x3c
-#define MSM_TV_BURST_H 0x40
-#define MSM_TV_SOL_REQ_ODD 0x44
-#define MSM_TV_SOL_REQ_EVEN 0x48
-#define MSM_TV_DAC_CTL 0x4c
-#define MSM_TV_TEST_MUX 0x50
-#define MSM_TV_TEST_MODE 0x54
-#define MSM_TV_TEST_MISR_RESET 0x58
-#define MSM_TV_TEST_EXPORT_MISR 0x5c
-#define MSM_TV_TEST_MISR_CURR_VAL 0x60
-#define MSM_TV_TEST_SOF_CFG 0x64
-#define MSM_TV_DAC_INTF 0x100
-
-#endif /* TVENC_H */
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index 77b47f763f2..649d6b70dea 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -4,5 +4,7 @@ ToDo list (incomplete, unordered)
- add compile as module support
- move nvec devices to mfd cells?
- adjust to kernel style
-
-
+ - fix clk usage
+ should not be using clk_get_sys(), but clk_get(&pdev->dev, conn)
+ where conn is either NULL if the device only has one clock, or
+ the device specific name if it has multiple clocks.
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 1a94364c48b..72258e8c64c 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -21,7 +21,6 @@
#include <linux/semaphore.h>
#include <linux/list.h>
#include <linux/notifier.h>
-#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include "nvec.h"
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 8a11ffcd7de..f18e3e14041 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/ratelimit.h>
#include <net/dst.h>
@@ -129,22 +130,22 @@ static void cvm_oct_adjust_link(struct net_device *dev)
if (priv->last_link) {
netif_carrier_on(dev);
if (priv->queue != -1)
- DEBUGPRINT("%s: %u Mbps %s duplex, "
- "port %2d, queue %2d\n",
- dev->name, priv->phydev->speed,
- priv->phydev->duplex ?
- "Full" : "Half",
- priv->port, priv->queue);
+ printk_ratelimited("%s: %u Mbps %s duplex, "
+ "port %2d, queue %2d\n",
+ dev->name, priv->phydev->speed,
+ priv->phydev->duplex ?
+ "Full" : "Half",
+ priv->port, priv->queue);
else
- DEBUGPRINT("%s: %u Mbps %s duplex, "
- "port %2d, POW\n",
- dev->name, priv->phydev->speed,
- priv->phydev->duplex ?
- "Full" : "Half",
- priv->port);
+ printk_ratelimited("%s: %u Mbps %s duplex, "
+ "port %2d, POW\n",
+ dev->name, priv->phydev->speed,
+ priv->phydev->duplex ?
+ "Full" : "Half",
+ priv->port);
} else {
netif_carrier_off(dev);
- DEBUGPRINT("%s: Link down\n", dev->name);
+ printk_ratelimited("%s: Link down\n", dev->name);
}
}
}
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index a0d4d4b98bd..9c0d2936e48 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <linux/ratelimit.h>
#include <net/dst.h>
#include <asm/octeon/octeon.h>
@@ -116,9 +117,9 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
(index, interface),
gmxx_rxx_int_reg.u64);
- DEBUGPRINT("%s: Using 10Mbps with software "
- "preamble removal\n",
- dev->name);
+ printk_ratelimited("%s: Using 10Mbps with software "
+ "preamble removal\n",
+ dev->name);
}
}
@@ -174,23 +175,23 @@ static void cvm_oct_rgmii_poll(struct net_device *dev)
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
if (priv->queue != -1)
- DEBUGPRINT("%s: %u Mbps %s duplex, "
- "port %2d, queue %2d\n",
- dev->name, link_info.s.speed,
- (link_info.s.full_duplex) ?
- "Full" : "Half",
- priv->port, priv->queue);
+ printk_ratelimited("%s: %u Mbps %s duplex, "
+ "port %2d, queue %2d\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ?
+ "Full" : "Half",
+ priv->port, priv->queue);
else
- DEBUGPRINT("%s: %u Mbps %s duplex, "
- "port %2d, POW\n",
- dev->name, link_info.s.speed,
- (link_info.s.full_duplex) ?
- "Full" : "Half",
- priv->port);
+ printk_ratelimited("%s: %u Mbps %s duplex, "
+ "port %2d, POW\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ?
+ "Full" : "Half",
+ priv->port);
} else {
if (netif_carrier_ok(dev))
netif_carrier_off(dev);
- DEBUGPRINT("%s: Link down\n", dev->name);
+ printk_ratelimited("%s: Link down\n", dev->name);
}
}
}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index cb38f9eb2cc..1a7c19ae766 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -34,6 +34,7 @@
#include <linux/ip.h>
#include <linux/string.h>
#include <linux/prefetch.h>
+#include <linux/ratelimit.h>
#include <linux/smp.h>
#include <net/dst.h>
#ifdef CONFIG_XFRM
@@ -41,7 +42,7 @@
#include <net/xfrm.h>
#endif /* CONFIG_XFRM */
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/octeon/octeon.h>
@@ -186,13 +187,13 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
if (*ptr == 0xd5) {
/*
- DEBUGPRINT("Port %d received 0xd5 preamble\n", work->ipprt);
+ printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt);
*/
work->packet_ptr.s.addr += i + 1;
work->len -= i + 5;
} else if ((*ptr & 0xf) == 0xd) {
/*
- DEBUGPRINT("Port %d received 0x?d preamble\n", work->ipprt);
+ printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt);
*/
work->packet_ptr.s.addr += i;
work->len -= i + 4;
@@ -203,9 +204,9 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
ptr++;
}
} else {
- DEBUGPRINT("Port %d unknown preamble, packet "
- "dropped\n",
- work->ipprt);
+ printk_ratelimited("Port %d unknown preamble, packet "
+ "dropped\n",
+ work->ipprt);
/*
cvmx_helper_dump_packet(work);
*/
@@ -214,8 +215,8 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
}
}
} else {
- DEBUGPRINT("Port %d receive error code %d, packet dropped\n",
- work->ipprt, work->word2.snoip.err_code);
+ printk_ratelimited("Port %d receive error code %d, packet dropped\n",
+ work->ipprt, work->word2.snoip.err_code);
cvm_oct_free_work(work);
return 1;
}
@@ -334,8 +335,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
*/
skb = dev_alloc_skb(work->len);
if (!skb) {
- DEBUGPRINT("Port %d failed to allocate skbuff, packet dropped\n",
- work->ipprt);
+ printk_ratelimited("Port %d failed to allocate "
+ "skbuff, packet dropped\n",
+ work->ipprt);
cvm_oct_free_work(work);
continue;
}
@@ -429,7 +431,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
} else {
/* Drop any packet received for a device that isn't up */
/*
- DEBUGPRINT("%s: Device not up, packet dropped\n",
+ printk_ratelimited("%s: Device not up, packet dropped\n",
dev->name);
*/
#ifdef CONFIG_64BIT
@@ -444,7 +446,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* Drop any packet received for a device that
* doesn't exist.
*/
- DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n",
+ printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
work->ipprt);
dev_kfree_skb_irq(skb);
}
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 2d8589eb461..5e148b512c9 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -26,6 +26,7 @@
**********************************************************************/
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/ratelimit.h>
#include <net/dst.h>
#include <asm/octeon/octeon.h>
@@ -90,20 +91,21 @@ static void cvm_oct_sgmii_poll(struct net_device *dev)
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
if (priv->queue != -1)
- DEBUGPRINT
+ printk_ratelimited
("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
dev->name, link_info.s.speed,
(link_info.s.full_duplex) ? "Full" : "Half",
priv->port, priv->queue);
else
- DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n",
- dev->name, link_info.s.speed,
- (link_info.s.full_duplex) ? "Full" : "Half",
- priv->port);
+ printk_ratelimited
+ ("%s: %u Mbps %s duplex, port %2d, POW\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ? "Full" : "Half",
+ priv->port);
} else {
if (netif_carrier_ok(dev))
netif_carrier_off(dev);
- DEBUGPRINT("%s: Link down\n", dev->name);
+ printk_ratelimited("%s: Link down\n", dev->name);
}
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index afc2b734d55..b445cd63f90 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
+#include <linux/ratelimit.h>
#include <linux/string.h>
#include <net/dst.h>
#ifdef CONFIG_XFRM
@@ -37,7 +38,7 @@
#include <net/xfrm.h>
#endif /* CONFIG_XFRM */
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/octeon/octeon.h>
@@ -446,7 +447,7 @@ dont_put_skbuff_in_hw:
priv->queue + qos,
pko_command, hw_buffer,
CVMX_PKO_LOCK_NONE))) {
- DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
+ printk_ratelimited("%s: Failed to send the packet\n", dev->name);
queue_type = QUEUE_DROP;
}
skip_xmit:
@@ -525,8 +526,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
/* Get a work queue entry */
cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
if (unlikely(work == NULL)) {
- DEBUGPRINT("%s: Failed to allocate a work queue entry\n",
- dev->name);
+ printk_ratelimited("%s: Failed to allocate a work "
+ "queue entry\n", dev->name);
priv->stats.tx_dropped++;
dev_kfree_skb(skb);
return 0;
@@ -535,8 +536,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
/* Get a packet buffer */
packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
if (unlikely(packet_buffer == NULL)) {
- DEBUGPRINT("%s: Failed to allocate a packet buffer\n",
- dev->name);
+ printk_ratelimited("%s: Failed to allocate a packet buffer\n",
+ dev->name);
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
priv->stats.tx_dropped++;
dev_kfree_skb(skb);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index c745a72a059..144fb99bf50 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -25,10 +25,6 @@
* Contact Cavium Networks for more information
*********************************************************************/
-#define DEBUGPRINT(format, ...) do { if (printk_ratelimit()) \
- printk(format, ##__VA_ARGS__); \
- } while (0)
-
/**
* cvm_oct_get_buffer_ptr - convert packet data address to pointer
* @packet_ptr: Packet data hardware address
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index 3fca1cc31ed..861a4b3fe85 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -26,6 +26,7 @@
**********************************************************************/
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/ratelimit.h>
#include <net/dst.h>
#include <asm/octeon/octeon.h>
@@ -89,20 +90,21 @@ static void cvm_oct_xaui_poll(struct net_device *dev)
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
if (priv->queue != -1)
- DEBUGPRINT
- ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
- dev->name, link_info.s.speed,
- (link_info.s.full_duplex) ? "Full" : "Half",
- priv->port, priv->queue);
+ printk_ratelimited
+ ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ? "Full" : "Half",
+ priv->port, priv->queue);
else
- DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n",
- dev->name, link_info.s.speed,
- (link_info.s.full_duplex) ? "Full" : "Half",
- priv->port);
+ printk_ratelimited
+ ("%s: %u Mbps %s duplex, port %2d, POW\n",
+ dev->name, link_info.s.speed,
+ (link_info.s.full_duplex) ? "Full" : "Half",
+ priv->port);
} else {
if (netif_carrier_ok(dev))
netif_carrier_off(dev);
- DEBUGPRINT("%s: Link down\n", dev->name);
+ printk_ratelimited("%s: Link down\n", dev->name);
}
}
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 6885f9a4660..b303b7e42b6 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -51,7 +51,6 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/parport.h>
-#include <linux/version.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
diff --git a/drivers/staging/pohmelfs/crypto.c b/drivers/staging/pohmelfs/crypto.c
index 5cca24fcf6c..ad92771dce5 100644
--- a/drivers/staging/pohmelfs/crypto.c
+++ b/drivers/staging/pohmelfs/crypto.c
@@ -17,6 +17,7 @@
#include <linux/highmem.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
#include "netfs.h"
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c
index 9732a9666cc..7598e77672a 100644
--- a/drivers/staging/pohmelfs/dir.c
+++ b/drivers/staging/pohmelfs/dir.c
@@ -512,7 +512,7 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct
int err, lock_type = POHMELFS_READ_LOCK, need_lock = 1;
struct qstr str = dentry->d_name;
- if ((nd->intent.open.flags & O_ACCMODE) > 1)
+ if ((nd->intent.open.flags & O_ACCMODE) != O_RDONLY)
lock_type = POHMELFS_WRITE_LOCK;
if (test_bit(NETFS_INODE_OWNED, &parent->state)) {
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index c0f0ac7c1cd..f3c6060c96b 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -887,11 +887,16 @@ static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
/*
* We want fsync() to work on POHMELFS.
*/
-static int pohmelfs_fsync(struct file *file, int datasync)
+static int pohmelfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct inode *inode = file->f_mapping->host;
-
- return sync_inode_metadata(inode, 1);
+ int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (!err) {
+ mutex_lock(&inode->i_mutex);
+ err = sync_inode_metadata(inode, 1);
+ mutex_unlock(&inode->i_mutex);
+ }
+ return err;
}
ssize_t pohmelfs_write(struct file *file, const char __user *buf,
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 16aa6a8952f..e79a7e21297 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -25,13 +25,13 @@
#define IEEE80211_H
#include <linux/if_ether.h> /* ETH_ALEN */
#include <linux/kernel.h> /* ARRAY_SIZE */
-#include <linux/version.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
+#include <linux/interrupt.h>
#define KEY_TYPE_NA 0x0
#define KEY_TYPE_WEP40 0x1
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
index c8dbcb92591..b3882ae9d97 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
@@ -12,7 +12,6 @@
*/
//#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
index 731d2686411..6aaaa2fd57f 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
@@ -10,7 +10,6 @@
*/
//#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
index ee71ee90fd8..da24e430ca1 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
@@ -10,7 +10,6 @@
*/
//#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
index f790cd65f10..58f3eeb2143 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
@@ -10,7 +10,6 @@
*/
//#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
index 9d58a429c56..9422573bfea 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
@@ -46,7 +46,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index 736a1404f28..38e67f0bf62 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -19,7 +19,7 @@
#include <linux/random.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/version.h>
+#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include "dot11d.h"
@@ -821,7 +821,7 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_IBSS);
if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
- cpu_to_le16((beacon_buf->capability |= WLAN_CAPABILITY_SHORT_SLOT));
+ beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
crypt = ieee->crypt[ieee->tx_keyidx];
@@ -2568,11 +2568,8 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->beacon_timer.data = (unsigned long) ieee;
ieee->beacon_timer.function = ieee80211_send_beacon_cb;
-#ifdef PF_SYNCTHREAD
- ieee->wq = create_workqueue(DRV_NAME,0);
-#else
ieee->wq = create_workqueue(DRV_NAME);
-#endif
+
INIT_DELAYED_WORK(&ieee->start_ibss_wq,(void*) ieee80211_start_ibss_wq);
INIT_WORK(&ieee->associate_complete_wq,(void*) ieee80211_associate_complete_wq);
INIT_WORK(&ieee->associate_procedure_wq,(void*) ieee80211_associate_procedure_wq);
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
index 6cb31e1760a..552115cd760 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
@@ -445,7 +445,7 @@ int ieee80211_rtl_xmit(struct sk_buff *skb,
(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
bytes_per_frag -= IEEE80211_FCS_LEN;
- /* Each fragment may need to have room for encryptiong pre/postfix */
+ /* Each fragment may need to have room for encryption pre/postfix */
if (encrypt)
bytes_per_frag -= crypt->ops->extra_prefix_len +
crypt->ops->extra_postfix_len;
diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h
index d15bdf64efd..a2c46ae4a40 100644
--- a/drivers/staging/rtl8187se/r8180.h
+++ b/drivers/staging/rtl8187se/r8180.h
@@ -18,6 +18,7 @@
#ifndef R8180H
#define R8180H
+#include <linux/interrupt.h>
#define RTL8180_MODULE_NAME "r8180"
#define DMESG(x,a...) printk(KERN_INFO RTL8180_MODULE_NAME ": " x "\n", ## a)
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index 2155a771c33..4c6651aac30 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/interrupt.h>
#include "r8180_hw.h"
#include "r8180.h"
@@ -306,7 +307,7 @@ static int proc_get_stats_tx(char *page, char **start,
void rtl8180_proc_module_init(void)
{
DMESG("Initializing proc filesystem");
- rtl8180_proc = create_proc_entry(RTL8180_MODULE_NAME, S_IFDIR, init_net.proc_net);
+ rtl8180_proc = proc_mkdir(RTL8180_MODULE_NAME, init_net.proc_net);
}
void rtl8180_proc_module_remove(void)
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 106ebcfa7d7..8e644614f21 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -4,30 +4,30 @@
#ifdef ENABLE_DOT11D
#include "ieee80211.h"
-typedef struct _CHNL_TXPOWER_TRIPLE {
+struct _CHNL_TXPOWER_TRIPLE {
u8 FirstChnl;
u8 NumChnls;
u8 MaxTxPowerInDbm;
-} CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
+};
-typedef enum _DOT11D_STATE {
+enum _DOT11D_STATE {
DOT11D_STATE_NONE = 0,
DOT11D_STATE_LEARNED,
DOT11D_STATE_DONE,
-} DOT11D_STATE;
+};
/**
* struct _RT_DOT11D_INFO
* @CountryIeLen: value greater than 0 if @CountryIeBuf contains
- * valid country information element.
+ * valid country information element.
* @chanell_map: holds channel values
* 0 - invalid,
* 1 - valid (active scan),
- * 2 - valid (passive scan)
+ * 2 - valid (passive scan)
* @CountryIeSrcAddr - Source AP of the country IE
*/
-typedef struct _RT_DOT11D_INFO {
+struct _RT_DOT11D_INFO {
bool bEnabled;
u16 CountryIeLen;
@@ -39,7 +39,7 @@ typedef struct _RT_DOT11D_INFO {
u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
DOT11D_STATE State;
-} RT_DOT11D_INFO, *PRT_DOT11D_INFO;
+};
static inline void cpMacAddr(unsigned char *des, unsigned char *src)
{
@@ -49,7 +49,7 @@ static inline void cpMacAddr(unsigned char *des, unsigned char *src)
#define GET_DOT11D_INFO(__pIeeeDev) ((PRT_DOT11D_INFO) \
((__pIeeeDev)->pDot11dInfo))
-#define IS_DOT11D_ENABLE(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->bEnabled
+#define IS_DOT11D_ENABLE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->bEnabled)
#define IS_COUNTRY_IE_VALID(__pIeeeDev) \
(GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen > 0)
@@ -66,9 +66,10 @@ static inline void cpMacAddr(unsigned char *des, unsigned char *src)
(__Ie).Octet, (__Ie).Length)))
#define CIE_WATCHDOG_TH 1
-#define GET_CIE_WATCHDOG(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog
+#define GET_CIE_WATCHDOG(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)-> \
+ CountryIeWatchdog)
#define RESET_CIE_WATCHDOG(__pIeeeDev) GET_CIE_WATCHDOG(__pIeeeDev) = 0
-#define UPDATE_CIE_WATCHDOG(__pIeeeDev) ++GET_CIE_WATCHDOG(__pIeeeDev)
+#define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
#define IS_DOT11D_STATE_DONE(__pIeeeDev) \
(GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211.h b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
index dbe21ab0dbf..6d7963e5b6a 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211.h
@@ -25,12 +25,12 @@
#define IEEE80211_H
#include <linux/if_ether.h> /* ETH_ALEN */
#include <linux/kernel.h> /* ARRAY_SIZE */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
+#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/wireless.h>
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
index ae503791890..61fd4ced452 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c
@@ -12,7 +12,6 @@
*/
//#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
index 9b8533f2fcb..48267a058d1 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
@@ -10,7 +10,6 @@
*/
//#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c
index b32b7e67f68..ed623a911e4 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_tkip.c
@@ -10,7 +10,6 @@
*/
//#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c
index e6264727d94..55043913afc 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_wep.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
index 663b0b8e109..37a65ff4b12 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c
@@ -45,7 +45,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
@@ -311,7 +310,7 @@ int __init ieee80211_rtl_init(void)
}
ieee80211_debug_level = debug;
- ieee80211_proc = create_proc_entry(DRV_NAME, S_IFDIR, init_net.proc_net);
+ ieee80211_proc = proc_mkdir(DRV_NAME, init_net.proc_net);
if (ieee80211_proc == NULL) {
IEEE80211_ERROR("Unable to create " DRV_NAME
" proc directory\n");
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
index ed5a3802309..022086d2a3f 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_rx.c
@@ -36,7 +36,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
index 7d4cba3a7c1..60e9a09d933 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c
@@ -19,7 +19,6 @@
#include <linux/random.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <asm/uaccess.h>
#ifdef ENABLE_DOT11D
#include "dot11d.h"
@@ -778,7 +777,7 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); //add short preamble here
if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
- cpu_to_le16((beacon_buf->capability |= WLAN_CAPABILITY_SHORT_SLOT));
+ beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
crypt = ieee->crypt[ieee->tx_keyidx];
if (encrypt)
diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
index 995346def24..424dd48da66 100644
--- a/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_tx.c
@@ -46,7 +46,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
@@ -762,7 +761,7 @@ int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
bytes_per_frag -= IEEE80211_FCS_LEN;
- /* Each fragment may need to have room for encryptiong pre/postfix */
+ /* Each fragment may need to have room for encryption pre/postfix */
if (encrypt)
bytes_per_frag -= crypt->ops->extra_prefix_len +
crypt->ops->extra_postfix_len;
diff --git a/drivers/staging/rtl8192e/r8192E.h b/drivers/staging/rtl8192e/r8192E.h
index 0229031d88d..137f66b034b 100644
--- a/drivers/staging/rtl8192e/r8192E.h
+++ b/drivers/staging/rtl8192e/r8192E.h
@@ -35,7 +35,7 @@
#include <linux/proc_fs.h> // Necessary because we use the proc fs
#include <linux/if_arp.h>
#include <linux/random.h>
-#include <linux/version.h>
+#include <linux/interrupt.h>
#include <asm/io.h>
#include "ieee80211/rtl819x_HT.h"
#include "ieee80211/ieee80211.h"
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index 58d800f1b5e..94d9c8d5d09 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -27,6 +27,8 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
#include <asm/uaccess.h>
#include "r8192E_hw.h"
#include "r8192E.h"
@@ -506,7 +508,7 @@ static int proc_get_stats_rx(char *page, char **start,
static void rtl8192_proc_module_init(void)
{
RT_TRACE(COMP_INIT, "Initializing proc filesystem\n");
- rtl8192_proc=create_proc_entry(RTL819xE_MODULE_NAME, S_IFDIR, init_net.proc_net);
+ rtl8192_proc = proc_mkdir(RTL819xE_MODULE_NAME, init_net.proc_net);
}
@@ -538,9 +540,7 @@ static void rtl8192_proc_init_one(struct r8192_priv *priv)
struct net_device *dev = priv->ieee80211->dev;
struct proc_dir_entry *e;
- priv->dir_dev = create_proc_entry(dev->name,
- S_IFDIR | S_IRUGO | S_IXUGO,
- rtl8192_proc);
+ priv->dir_dev = proc_mkdir(dev->name, rtl8192_proc);
if (!priv->dir_dev) {
RT_TRACE(COMP_ERR, "Unable to initialize /proc/net/rtl8192/%s\n",
dev->name);
@@ -4532,6 +4532,7 @@ static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
u8 unit = 0;
int ret = -ENODEV;
unsigned long pmem_start, pmem_len, pmem_flags;
+ u8 revisionid;
RT_TRACE(COMP_INIT,"Configuring chip resources\n");
@@ -4592,6 +4593,11 @@ static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
pci_write_config_byte(pdev, 0x41, 0x00);
+ pci_read_config_byte(pdev, 0x08, &revisionid);
+ /* If the revisionid is 0x10, the device uses rtl8192se. */
+ if (pdev->device == 0x8192 && revisionid == 0x10)
+ goto fail1;
+
pci_read_config_byte(pdev, 0x05, &unit);
pci_write_config_byte(pdev, 0x05, unit & (~0x04));
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index e716f7b1144..463cc261890 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -25,12 +25,12 @@
#define IEEE80211_H
#include <linux/if_ether.h> /* ETH_ALEN */
#include <linux/kernel.h> /* ARRAY_SIZE */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
+#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/wireless.h>
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
index 8707eba4f90..a464d111d73 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
@@ -12,7 +12,6 @@
*/
//#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
index 4b078e53638..fec0176888e 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
@@ -10,7 +10,6 @@
*/
//#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index a98584c845b..555eb8038e9 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -10,7 +10,6 @@
*/
//#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 96c2c9d67fd..3801f125f8f 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -10,7 +10,6 @@
*/
//#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index fe978f359f9..e3d47bcf4ca 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -46,7 +46,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
@@ -283,7 +282,7 @@ int __init ieee80211_debug_init(void)
ieee80211_debug_level = debug;
- ieee80211_proc = create_proc_entry(DRV_NAME, S_IFDIR, init_net.proc_net);
+ ieee80211_proc = proc_mkdir(DRV_NAME, init_net.proc_net);
if (ieee80211_proc == NULL) {
IEEE80211_ERROR("Unable to create " DRV_NAME
" proc directory\n");
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index a414303aef5..c9bdc7f6bdc 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -37,7 +37,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 4ec0a6520dd..b00eb0e65f3 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -19,7 +19,6 @@
#include <linux/random.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <asm/uaccess.h>
#include "dot11d.h"
@@ -777,7 +776,7 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); //add short preamble here
if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
- cpu_to_le16((beacon_buf->capability |= WLAN_CAPABILITY_SHORT_SLOT));
+ beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
crypt = ieee->crypt[ieee->tx_keyidx];
if (encrypt)
@@ -2726,11 +2725,7 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
ieee->beacon_timer.data = (unsigned long) ieee;
ieee->beacon_timer.function = ieee80211_send_beacon_cb;
-#ifdef PF_SYNCTHREAD
- ieee->wq = create_workqueue(DRV_NAME,0);
-#else
ieee->wq = create_workqueue(DRV_NAME);
-#endif
INIT_DELAYED_WORK(&ieee->start_ibss_wq,ieee80211_start_ibss_wq);
INIT_WORK(&ieee->associate_complete_wq, ieee80211_associate_complete_wq);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index ec7845ecdb7..59c45a510ef 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -723,7 +723,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
bytes_per_frag -= IEEE80211_FCS_LEN;
- /* Each fragment may need to have room for encryptiong pre/postfix */
+ /* Each fragment may need to have room for encryption pre/postfix */
if (encrypt)
bytes_per_frag -= crypt->ops->extra_prefix_len +
crypt->ops->extra_postfix_len;
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 0205079b13e..9b81f26d40f 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -37,7 +37,6 @@
#include <linux/proc_fs.h> // Necessary because we use the proc fs
#include <linux/if_arp.h>
#include <linux/random.h>
-#include <linux/version.h>
#include <asm/io.h>
#include "ieee80211/ieee80211.h"
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index e81b8ab6aa9..ee86fe8509e 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -671,7 +671,7 @@ static int proc_get_stats_rx(char *page, char **start,
void rtl8192_proc_module_init(void)
{
RT_TRACE(COMP_INIT, "Initializing proc filesystem");
- rtl8192_proc=create_proc_entry(RTL819xU_MODULE_NAME, S_IFDIR, init_net.proc_net);
+ rtl8192_proc = proc_mkdir(RTL819xU_MODULE_NAME, init_net.proc_net);
}
@@ -706,9 +706,7 @@ void rtl8192_proc_init_one(struct net_device *dev)
{
struct proc_dir_entry *e;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
- priv->dir_dev = create_proc_entry(dev->name,
- S_IFDIR | S_IRUGO | S_IXUGO,
- rtl8192_proc);
+ priv->dir_dev = proc_mkdir(dev->name, rtl8192_proc);
if (!priv->dir_dev) {
RT_TRACE(COMP_ERR, "Unable to initialize /proc/net/rtl8192/%s\n",
dev->name);
@@ -2852,11 +2850,7 @@ static void rtl8192_init_priv_task(struct net_device* dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
-#ifdef PF_SYNCTHREAD
- priv->priv_wq = create_workqueue(DRV_NAME,0);
-#else
priv->priv_wq = create_workqueue(DRV_NAME);
-#endif
INIT_WORK(&priv->reset_wq, rtl8192_restart);
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index 6766f468639..4bb5fffca5b 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -399,10 +399,7 @@ download_firmware_fail:
}
-
-
-
-
-
-
+MODULE_FIRMWARE("RTL8192U/boot.img");
+MODULE_FIRMWARE("RTL8192U/main.img");
+MODULE_FIRMWARE("RTL8192U/data.img");
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 3bb66dc2eb2..4f380a64aa8 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -29,7 +29,6 @@ struct qos_priv {
#include "rtl871x_ht.h"
#include "rtl871x_cmd.h"
-#include "wlan_bssdef.h"
#include "rtl871x_xmit.h"
#include "rtl871x_recv.h"
#include "rtl871x_security.h"
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index 1f4d147c4b6..d62c6ac0955 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -4,6 +4,7 @@
#include "osdep_service.h"
#include "drv_types.h"
#include "wifi.h"
+#include <linux/compiler.h>
#include <linux/wireless.h>
#define MGMT_QUEUE_NUM 5
@@ -123,7 +124,7 @@ struct ieee80211_hdr {
u8 addr3[ETH_ALEN];
u16 seq_ctl;
u8 addr4[ETH_ALEN];
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_hdr_3addr {
u16 frame_ctl;
@@ -132,7 +133,7 @@ struct ieee80211_hdr_3addr {
u8 addr2[ETH_ALEN];
u8 addr3[ETH_ALEN];
u16 seq_ctl;
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_hdr_qos {
@@ -144,7 +145,7 @@ struct ieee80211_hdr_qos {
u16 seq_ctl;
u8 addr4[ETH_ALEN];
u16 qc;
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_hdr_3addr_qos {
u16 frame_ctl;
@@ -154,7 +155,7 @@ struct ieee80211_hdr_3addr_qos {
u8 addr3[ETH_ALEN];
u16 seq_ctl;
u16 qc;
-} __attribute__ ((packed));
+} __packed;
struct eapol {
u8 snap[6];
@@ -162,7 +163,7 @@ struct eapol {
u8 version;
u8 type;
u16 length;
-} __attribute__ ((packed));
+} __packed;
enum eap_type {
@@ -260,7 +261,7 @@ struct ieee80211_snap_hdr {
u8 ssap; /* always 0xAA */
u8 ctrl; /* always 0x03 */
u8 oui[P80211_OUI_LEN]; /* organizational universal id */
-} __attribute__ ((packed));
+} __packed;
#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
@@ -510,7 +511,7 @@ struct ieee80211_security {
u8 keys[WEP_KEYS][WEP_KEY_LEN];
u8 level;
u16 flags;
-} __attribute__ ((packed));
+} __packed;
/*
@@ -555,13 +556,13 @@ struct ieee80211_header_data {
struct ieee80211_info_element_hdr {
u8 id;
u8 len;
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_info_element {
u8 id;
u8 len;
u8 data[0];
-} __attribute__ ((packed));
+} __packed;
/*
* These are the data types that can make up management packets
@@ -574,7 +575,7 @@ struct ieee80211_info_element {
u16 listen_interval;
struct {
u16 association_id:14, reserved:2;
- } __attribute__ ((packed));
+ } __packed;
u32 time_stamp[2];
u16 reason;
u16 status;
@@ -588,7 +589,7 @@ struct ieee80211_authentication {
u16 algorithm;
u16 transaction;
u16 status;
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_probe_response {
struct ieee80211_header_data header;
@@ -596,25 +597,25 @@ struct ieee80211_probe_response {
u16 beacon_interval;
u16 capability;
struct ieee80211_info_element info_element;
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_probe_request {
struct ieee80211_header_data header;
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_assoc_request_frame {
struct ieee80211_hdr_3addr header;
u16 capability;
u16 listen_interval;
struct ieee80211_info_element_hdr info_element;
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_assoc_response_frame {
struct ieee80211_hdr_3addr header;
u16 capability;
u16 status;
u16 aid;
-} __attribute__ ((packed));
+} __packed;
struct ieee80211_txb {
u8 nr_frags;
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 3d3f73c5cd5..c683d7609e6 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -5,9 +5,7 @@
#define _FAIL 0
#include "basic_types.h"
-#include <linux/version.h>
#include <linux/spinlock.h>
-
#include <linux/semaphore.h>
#include <linux/sem.h>
#include <linux/netdevice.h>
@@ -22,7 +20,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kref.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
@@ -30,7 +27,7 @@
#include <linux/circ_buf.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/wireless.h>
#include <linux/rtnetlink.h>
#include "ethernet.h"
@@ -236,7 +233,5 @@ static inline u32 _RND512(u32 sz)
return ((sz >> 9) + ((sz & 511) ? 1 : 0)) << 9;
}
-#define STRUCT_PACKED __attribute__ ((packed))
-
#endif
diff --git a/drivers/staging/rtl8712/rtl8712_hal.h b/drivers/staging/rtl8712/rtl8712_hal.h
index 66baa87cd4b..c696dd8a2ea 100644
--- a/drivers/staging/rtl8712/rtl8712_hal.h
+++ b/drivers/staging/rtl8712/rtl8712_hal.h
@@ -113,7 +113,7 @@ struct fw_hdr {/*8-byte alinment required*/
struct fw_priv fwpriv;
};
-struct hal_priv{
+struct hal_priv {
/*Endpoint handles*/
struct net_device *pipehdls_r8712[10];
u8 (*hal_bus_init)(struct _adapter *adapter);
diff --git a/drivers/staging/rtl8712/wifi.h b/drivers/staging/rtl8712/wifi.h
index 6032cdc6539..427ab7e2705 100644
--- a/drivers/staging/rtl8712/wifi.h
+++ b/drivers/staging/rtl8712/wifi.h
@@ -2,6 +2,7 @@
#define _WIFI_H_
#include "rtl871x_byteorder.h"
+#include <linux/compiler.h>
#ifdef BIT
#undef BIT
@@ -523,7 +524,7 @@ struct ieee80211_bar {
unsigned char ta[6];
unsigned short control;
unsigned short start_seq_num;
-} __attribute__((packed));
+} __packed;
/* 802.11 BAR control masks */
#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
@@ -544,7 +545,7 @@ struct ieee80211_ht_cap {
unsigned short extended_ht_cap_info;
unsigned int tx_BF_cap_info;
unsigned char antenna_selection_info;
-} __attribute__ ((packed));
+} __packed;
/**
* struct ieee80211_ht_cap - HT additional information
@@ -558,7 +559,7 @@ struct ieee80211_ht_addt_info {
unsigned short operation_mode;
unsigned short stbc_param;
unsigned char basic_set[16];
-} __attribute__ ((packed));
+} __packed;
/* 802.11n HT capabilities masks */
#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002
diff --git a/drivers/staging/rts_pstor/ms.c b/drivers/staging/rts_pstor/ms.c
index 2e8258754c9..66341dff8c9 100644
--- a/drivers/staging/rts_pstor/ms.c
+++ b/drivers/staging/rts_pstor/ms.c
@@ -2064,11 +2064,10 @@ static int ms_init_l2p_tbl(struct rtsx_chip *chip)
RTSX_DEBUGP("ms_card->segment_cnt = %d\n", ms_card->segment_cnt);
size = ms_card->segment_cnt * sizeof(struct zone_entry);
- ms_card->segment = (struct zone_entry *)vmalloc(size);
+ ms_card->segment = vzalloc(size);
if (ms_card->segment == NULL) {
TRACE_RET(chip, STATUS_FAIL);
}
- memset(ms_card->segment, 0, size);
retval = ms_read_page(chip, ms_card->boot_block, 1);
if (retval != STATUS_SUCCESS) {
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 5ff59f27d10..16c73fbff51 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -66,12 +66,6 @@ static int msi_en;
module_param(msi_en, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(msi_en, "enable msi");
-/* These are used to make sure the module doesn't unload before all the
- * threads have exited.
- */
-static atomic_t total_threads = ATOMIC_INIT(0);
-static DECLARE_COMPLETION(threads_gone);
-
static irqreturn_t rtsx_interrupt(int irq, void *dev_id);
/***********************************************************************
@@ -192,7 +186,7 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
/* enqueue the command and wake up the control thread */
srb->scsi_done = done;
chip->srb = srb;
- up(&(dev->sema));
+ complete(&dev->cmnd_ready);
return 0;
}
@@ -475,7 +469,7 @@ static int rtsx_control_thread(void *__dev)
current->flags |= PF_NOFREEZE;
for (;;) {
- if (down_interruptible(&dev->sema))
+ if (wait_for_completion_interruptible(&dev->cmnd_ready))
break;
/* lock the device pointers */
@@ -557,8 +551,6 @@ SkipForAbort:
mutex_unlock(&dev->dev_mutex);
} /* for (;;) */
- scsi_host_put(host);
-
/* notify the exit routine that we're actually exiting now
*
* complete()/wait_for_completion() is similar to up()/down(),
@@ -573,7 +565,7 @@ SkipForAbort:
* This is important in preemption kernels, which transfer the flow
* of execution immediately upon a complete().
*/
- complete_and_exit(&threads_gone, 0);
+ complete_and_exit(&dev->control_exit, 0);
}
@@ -581,7 +573,6 @@ static int rtsx_polling_thread(void *__dev)
{
struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
struct rtsx_chip *chip = dev->chip;
- struct Scsi_Host *host = rtsx_to_host(dev);
struct sd_info *sd_card = &(chip->sd_card);
struct xd_info *xd_card = &(chip->xd_card);
struct ms_info *ms_card = &(chip->ms_card);
@@ -621,8 +612,7 @@ static int rtsx_polling_thread(void *__dev)
mutex_unlock(&dev->dev_mutex);
}
- scsi_host_put(host);
- complete_and_exit(&threads_gone, 0);
+ complete_and_exit(&dev->polling_exit, 0);
}
/*
@@ -699,29 +689,38 @@ static void rtsx_release_resources(struct rtsx_dev *dev)
{
printk(KERN_INFO "-- %s\n", __func__);
+ /* Tell the control thread to exit. The SCSI host must
+ * already have been removed so it won't try to queue
+ * any more commands.
+ */
+ printk(KERN_INFO "-- sending exit command to thread\n");
+ complete(&dev->cmnd_ready);
+ if (dev->ctl_thread)
+ wait_for_completion(&dev->control_exit);
+ if (dev->polling_thread)
+ wait_for_completion(&dev->polling_exit);
+
+ wait_timeout(200);
+
if (dev->rtsx_resv_buf) {
- dma_free_coherent(&(dev->pci->dev), HOST_CMDS_BUF_LEN,
+ dma_free_coherent(&(dev->pci->dev), RTSX_RESV_BUF_LEN,
dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr);
dev->chip->host_cmds_ptr = NULL;
dev->chip->host_sg_tbl_ptr = NULL;
}
- pci_disable_device(dev->pci);
- pci_release_regions(dev->pci);
-
- if (dev->irq > 0) {
+ if (dev->irq > 0)
free_irq(dev->irq, (void *)dev);
- }
- if (dev->chip->msi_en) {
+ if (dev->chip->msi_en)
pci_disable_msi(dev->pci);
- }
+ if (dev->remap_addr)
+ iounmap(dev->remap_addr);
- /* Tell the control thread to exit. The SCSI host must
- * already have been removed so it won't try to queue
- * any more commands.
- */
- printk(KERN_INFO "-- sending exit command to thread\n");
- up(&dev->sema);
+ pci_disable_device(dev->pci);
+ pci_release_regions(dev->pci);
+
+ rtsx_release_chip(dev->chip);
+ kfree(dev->chip);
}
/* First stage of disconnect processing: stop all commands and remove
@@ -739,6 +738,7 @@ static void quiesce_and_remove_host(struct rtsx_dev *dev)
scsi_unlock(host);
mutex_unlock(&dev->dev_mutex);
wake_up(&dev->delay_wait);
+ wait_for_completion(&dev->scanning_done);
/* Wait some time to let other threads exist */
wait_timeout(100);
@@ -793,8 +793,7 @@ static int rtsx_scan_thread(void *__dev)
/* Should we unbind if no devices were detected? */
}
- scsi_host_put(rtsx_to_host(dev));
- complete_and_exit(&threads_gone, 0);
+ complete_and_exit(&dev->scanning_done, 0);
}
static void rtsx_init_options(struct rtsx_chip *chip)
@@ -941,8 +940,11 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
spin_lock_init(&dev->reg_lock);
mutex_init(&(dev->dev_mutex));
- sema_init(&(dev->sema), 0);
+ init_completion(&dev->cmnd_ready);
+ init_completion(&dev->control_exit);
+ init_completion(&dev->polling_exit);
init_completion(&(dev->notify));
+ init_completion(&dev->scanning_done);
init_waitqueue_head(&dev->delay_wait);
dev->pci = pci;
@@ -992,28 +994,22 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
pci_set_master(pci);
synchronize_irq(dev->irq);
- err = scsi_add_host(host, &pci->dev);
- if (err) {
- printk(KERN_ERR "Unable to add the scsi host\n");
- goto errout;
- }
-
rtsx_init_chip(dev->chip);
/* Start up our control thread */
- th = kthread_create(rtsx_control_thread, dev, CR_DRIVER_NAME);
+ th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
if (IS_ERR(th)) {
printk(KERN_ERR "Unable to start control thread\n");
err = PTR_ERR(th);
goto errout;
}
+ dev->ctl_thread = th;
- /* Take a reference to the host for the control thread and
- * count it among all the threads we have launched. Then
- * start it up. */
- scsi_host_get(rtsx_to_host(dev));
- atomic_inc(&total_threads);
- wake_up_process(th);
+ err = scsi_add_host(host, &pci->dev);
+ if (err) {
+ printk(KERN_ERR "Unable to add the scsi host\n");
+ goto errout;
+ }
/* Start up the thread for delayed SCSI-device scanning */
th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
@@ -1024,28 +1020,17 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
goto errout;
}
- /* Take a reference to the host for the scanning thread and
- * count it among all the threads we have launched. Then
- * start it up. */
- scsi_host_get(rtsx_to_host(dev));
- atomic_inc(&total_threads);
wake_up_process(th);
/* Start up the thread for polling thread */
- th = kthread_create(rtsx_polling_thread, dev, "rtsx-polling");
+ th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
if (IS_ERR(th)) {
printk(KERN_ERR "Unable to start the device-polling thread\n");
quiesce_and_remove_host(dev);
err = PTR_ERR(th);
goto errout;
}
-
- /* Take a reference to the host for the polling thread and
- * count it among all the threads we have launched. Then
- * start it up. */
- scsi_host_get(rtsx_to_host(dev));
- atomic_inc(&total_threads);
- wake_up_process(th);
+ dev->polling_thread = th;
pci_set_drvdata(pci, dev);
@@ -1108,16 +1093,6 @@ static void __exit rtsx_exit(void)
pci_unregister_driver(&driver);
- /* Don't return until all of our control and scanning threads
- * have exited. Since each thread signals threads_gone as its
- * last act, we have to call wait_for_completion the right number
- * of times.
- */
- while (atomic_read(&total_threads) > 0) {
- wait_for_completion(&threads_gone);
- atomic_dec(&total_threads);
- }
-
printk(KERN_INFO "%s module exit\n", CR_DRIVER_NAME);
}
diff --git a/drivers/staging/rts_pstor/rtsx.h b/drivers/staging/rts_pstor/rtsx.h
index 6afb6358e77..86e47c2e3e3 100644
--- a/drivers/staging/rts_pstor/rtsx.h
+++ b/drivers/staging/rts_pstor/rtsx.h
@@ -29,7 +29,6 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -113,9 +112,16 @@ struct rtsx_dev {
/* locks */
spinlock_t reg_lock;
+ struct task_struct *ctl_thread; /* the control thread */
+ struct task_struct *polling_thread; /* the polling thread */
+
/* mutual exclusion and synchronization structures */
- struct semaphore sema; /* to sleep thread on */
+ struct completion cmnd_ready; /* to sleep thread on */
+ struct completion control_exit; /* control thread exit */
+ struct completion polling_exit; /* polling thread exit */
struct completion notify; /* thread begin/end */
+ struct completion scanning_done; /* wait for scan thread */
+
wait_queue_head_t delay_wait; /* wait during scan, reset */
struct mutex dev_mutex;
diff --git a/drivers/staging/rts_pstor/rtsx_chip.c b/drivers/staging/rts_pstor/rtsx_chip.c
index 4e60780ea80..5452069fbe0 100644
--- a/drivers/staging/rts_pstor/rtsx_chip.c
+++ b/drivers/staging/rts_pstor/rtsx_chip.c
@@ -1596,18 +1596,16 @@ int rtsx_write_cfg_seq(struct rtsx_chip *chip, u8 func, u16 addr, u8 *buf, int l
}
RTSX_DEBUGP("dw_len = %d\n", dw_len);
- data = (u32 *)vmalloc(dw_len * 4);
+ data = vzalloc(dw_len * 4);
if (!data) {
TRACE_RET(chip, STATUS_NOMEM);
}
- memset(data, 0, dw_len * 4);
- mask = (u32 *)vmalloc(dw_len * 4);
+ mask = vzalloc(dw_len * 4);
if (!mask) {
vfree(data);
TRACE_RET(chip, STATUS_NOMEM);
}
- memset(mask, 0, dw_len * 4);
j = 0;
for (i = 0; i < len; i++) {
diff --git a/drivers/staging/rts_pstor/sd.c b/drivers/staging/rts_pstor/sd.c
index cdae497d546..8db14ddbeb7 100644
--- a/drivers/staging/rts_pstor/sd.c
+++ b/drivers/staging/rts_pstor/sd.c
@@ -2661,7 +2661,7 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL, 0);
if (retval != STATUS_SUCCESS) {
- TRACE_RET(chip, STATUS_FAIL);
+ TRACE_RET(chip, SWITCH_FAIL);
}
if (width == MMC_8BIT_BUS) {
@@ -2678,7 +2678,9 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
}
if (!CHECK_PID(chip, 0x5209)) {
- RTSX_WRITE_REG(chip, REG_SD_CFG3, 0x02, 0x02);
+ retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0x02);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, SWITCH_ERR);
}
retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3,
@@ -2690,17 +2692,19 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
rtsx_read_register(chip, REG_SD_STAT2, &val2);
rtsx_clear_sd_error(chip);
if ((val1 & 0xE0) || val2) {
- TRACE_RET(chip, STATUS_FAIL);
+ TRACE_RET(chip, SWITCH_ERR);
}
} else {
rtsx_clear_sd_error(chip);
rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
- TRACE_RET(chip, STATUS_FAIL);
+ TRACE_RET(chip, SWITCH_ERR);
}
}
if (!CHECK_PID(chip, 0x5209)) {
- RTSX_WRITE_REG(chip, REG_SD_CFG3, 0x02, 0);
+ retval = rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, SWITCH_ERR);
}
RTSX_DEBUGP("SD/MMC CMD %d\n", BUSTEST_R);
@@ -2733,7 +2737,7 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
retval = rtsx_send_cmd(chip, SD_CARD, 100);
if (retval < 0) {
rtsx_clear_sd_error(chip);
- TRACE_RET(chip, STATUS_FAIL);
+ TRACE_RET(chip, SWITCH_ERR);
}
ptr = rtsx_get_cmd_data(chip) + 1;
@@ -2751,7 +2755,7 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
}
retval = sd_send_cmd_get_rsp(chip, SWITCH, arg, SD_RSP_TYPE_R1b, rsp, 5);
if ((retval == STATUS_SUCCESS) && !(rsp[4] & MMC_SWITCH_ERR)) {
- return STATUS_SUCCESS;
+ return SWITCH_SUCCESS;
}
}
} else {
@@ -2767,12 +2771,12 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
}
retval = sd_send_cmd_get_rsp(chip, SWITCH, arg, SD_RSP_TYPE_R1b, rsp, 5);
if ((retval == STATUS_SUCCESS) && !(rsp[4] & MMC_SWITCH_ERR)) {
- return STATUS_SUCCESS;
+ return SWITCH_SUCCESS;
}
}
}
- TRACE_RET(chip, STATUS_FAIL);
+ TRACE_RET(chip, SWITCH_FAIL);
}
@@ -2880,21 +2884,30 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, int switch_ddr)
TRACE_RET(chip, STATUS_FAIL);
}
- if (mmc_test_switch_bus(chip, MMC_8BIT_BUS) == STATUS_SUCCESS) {
+ /* Test Bus Procedure */
+ retval = mmc_test_switch_bus(chip, MMC_8BIT_BUS);
+ if (retval == SWITCH_SUCCESS) {
SET_MMC_8BIT(sd_card);
chip->card_bus_width[chip->card2lun[SD_CARD]] = 8;
#ifdef SUPPORT_SD_LOCK
sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
#endif
- } else if (mmc_test_switch_bus(chip, MMC_4BIT_BUS) == STATUS_SUCCESS) {
- SET_MMC_4BIT(sd_card);
- chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
+ } else if (retval == SWITCH_FAIL) {
+ retval = mmc_test_switch_bus(chip, MMC_4BIT_BUS);
+ if (retval == SWITCH_SUCCESS) {
+ SET_MMC_4BIT(sd_card);
+ chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
#ifdef SUPPORT_SD_LOCK
- sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
+ sd_card->sd_lock_status &= ~SD_LOCK_1BIT_MODE;
#endif
+ } else if (retval == SWITCH_FAIL) {
+ CLR_MMC_8BIT(sd_card);
+ CLR_MMC_4BIT(sd_card);
+ } else {
+ TRACE_RET(chip, STATUS_FAIL);
+ }
} else {
- CLR_MMC_8BIT(sd_card);
- CLR_MMC_4BIT(sd_card);
+ TRACE_RET(chip, STATUS_FAIL);
}
return STATUS_SUCCESS;
@@ -2915,8 +2928,7 @@ static int reset_mmc(struct rtsx_chip *chip)
goto MMC_UNLOCK_ENTRY;
#endif
-DDR_TUNING_FAIL:
-
+Switch_Fail:
retval = sd_prepare_reset(chip);
if (retval != STATUS_SUCCESS) {
TRACE_RET(chip, retval);
@@ -3017,7 +3029,15 @@ MMC_UNLOCK_ENTRY:
if (!sd_card->mmc_dont_switch_bus) {
if (spec_ver == 4) {
- (void)mmc_switch_timing_bus(chip, switch_ddr);
+ /* MMC 4.x Cards */
+ retval = mmc_switch_timing_bus(chip, switch_ddr);
+ if (retval != STATUS_SUCCESS) {
+ retval = sd_init_power(chip);
+ if (retval != STATUS_SUCCESS)
+ TRACE_RET(chip, STATUS_FAIL);
+ sd_card->mmc_dont_switch_bus = 1;
+ TRACE_GOTO(chip, Switch_Fail);
+ }
}
if (CHK_MMC_SECTOR_MODE(sd_card) && (sd_card->capacity == 0)) {
@@ -3037,7 +3057,7 @@ MMC_UNLOCK_ENTRY:
TRACE_RET(chip, STATUS_FAIL);
}
switch_ddr = 0;
- goto DDR_TUNING_FAIL;
+ TRACE_GOTO(chip, Switch_Fail);
}
retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
@@ -3049,7 +3069,7 @@ MMC_UNLOCK_ENTRY:
TRACE_RET(chip, STATUS_FAIL);
}
switch_ddr = 0;
- goto DDR_TUNING_FAIL;
+ TRACE_GOTO(chip, Switch_Fail);
}
}
}
@@ -3727,7 +3747,7 @@ RTY_SEND_CMD:
if ((ptr[3] & 0x1E) != 0x04) {
TRACE_RET(chip, STATUS_FAIL);
}
- } else if (rsp_type == SD_RSP_TYPE_R2) {
+ } else if (rsp_type == SD_RSP_TYPE_R0) {
if ((ptr[3] & 0x1E) != 0x03) {
TRACE_RET(chip, STATUS_FAIL);
}
diff --git a/drivers/staging/rts_pstor/sd.h b/drivers/staging/rts_pstor/sd.h
index d62e690e963..1df1aa75e93 100644
--- a/drivers/staging/rts_pstor/sd.h
+++ b/drivers/staging/rts_pstor/sd.h
@@ -38,6 +38,11 @@
#define SD_RSP_TIMEOUT 0x04
#define SD_IO_ERR 0x02
+/* Return code for MMC switch bus */
+#define SWITCH_SUCCESS 0
+#define SWITCH_ERR 1
+#define SWITCH_FAIL 2
+
/* MMC/SD Command Index */
/* Basic command (class 0) */
#define GO_IDLE_STATE 0
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
index 52342c17ead..bf7286e01a3 100644
--- a/drivers/staging/sep/sep_driver.c
+++ b/drivers/staging/sep/sep_driver.c
@@ -50,7 +50,6 @@
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
-#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/rar_register.h>
@@ -201,7 +200,7 @@ static int sep_singleton_release(struct inode *inode, struct file *filp)
}
/**
- * sep_request_daemonopen - request daemon open method
+ * sep_request_daemon_open - request daemon open method
* @inode: inode of SEP device
* @filp: file handle to SEP device
*
@@ -1102,9 +1101,9 @@ static int sep_lock_user_pages(struct sep_device *sep,
"lli_array[%x].bus_address is "
"%08lx, lli_array[%x].block_size is %x\n",
num_pages - 1,
- (unsigned long)lli_array[num_pages -1].bus_address,
+ (unsigned long)lli_array[num_pages - 1].bus_address,
num_pages - 1,
- lli_array[num_pages -1].block_size);
+ lli_array[num_pages - 1].block_size);
}
/* Set output params according to the in_out flag */
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
index 1033425c9c3..d6bfd245522 100644
--- a/drivers/staging/sep/sep_driver_config.h
+++ b/drivers/staging/sep/sep_driver_config.h
@@ -180,7 +180,7 @@ held by the process (struct file) */
/* offset of the caller id area */
#define SEP_CALLER_ID_OFFSET_BYTES \
(SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES + \
- SEP_DRIVER_SYSTEM_RAR_MEMORY_SIZE_IN_BYTES)
+ SEP_DRIVER_SYSTEM_RAR_MEMORY_SIZE_IN_BYTES)
/* offset of the DCB area */
#define SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES \
diff --git a/drivers/staging/solo6x10/core.c b/drivers/staging/solo6x10/core.c
index 76779949f14..f974f6412ad 100644
--- a/drivers/staging/solo6x10/core.c
+++ b/drivers/staging/solo6x10/core.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/slab.h>
#include <linux/videodev2.h>
#include "solo6x10.h"
#include "tw28.h"
diff --git a/drivers/staging/solo6x10/enc.c b/drivers/staging/solo6x10/enc.c
index 285f7f35006..de502599bb1 100644
--- a/drivers/staging/solo6x10/enc.c
+++ b/drivers/staging/solo6x10/enc.c
@@ -18,6 +18,7 @@
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include "solo6x10.h"
#include "osd-font.h"
diff --git a/drivers/staging/solo6x10/g723.c b/drivers/staging/solo6x10/g723.c
index bd8eb92c94b..59274bfca95 100644
--- a/drivers/staging/solo6x10/g723.c
+++ b/drivers/staging/solo6x10/g723.c
@@ -21,6 +21,7 @@
#include <linux/mempool.h>
#include <linux/poll.h>
#include <linux/kthread.h>
+#include <linux/slab.h>
#include <linux/freezer.h>
#include <sound/core.h>
#include <sound/initval.h>
diff --git a/drivers/staging/solo6x10/p2m.c b/drivers/staging/solo6x10/p2m.c
index 5717eabb04a..56210f0fc5e 100644
--- a/drivers/staging/solo6x10/p2m.c
+++ b/drivers/staging/solo6x10/p2m.c
@@ -18,6 +18,7 @@
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/scatterlist.h>
#include "solo6x10.h"
diff --git a/drivers/staging/solo6x10/solo6x10.h b/drivers/staging/solo6x10/solo6x10.h
index fd59b093dd4..abee7213202 100644
--- a/drivers/staging/solo6x10/solo6x10.h
+++ b/drivers/staging/solo6x10/solo6x10.h
@@ -28,8 +28,9 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/delay.h>
+#include <linux/slab.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/videodev2.h>
#include <media/v4l2-dev.h>
#include <media/videobuf-core.h>
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c
index 39dc586fc8b..940769ef883 100644
--- a/drivers/staging/speakup/devsynth.c
+++ b/drivers/staging/speakup/devsynth.c
@@ -18,13 +18,14 @@ static ssize_t speakup_file_write(struct file *fp, const char *buffer,
{
size_t count = nbytes;
const char *ptr = buffer;
- int bytes;
+ size_t bytes;
unsigned long flags;
u_char buf[256];
+
if (synth == NULL)
return -ENODEV;
while (count > 0) {
- bytes = min_t(size_t, count, sizeof(buf));
+ bytes = min(count, sizeof(buf));
if (copy_from_user(buf, ptr, bytes))
return -EFAULT;
count -= bytes;
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 42fcf7e9cb6..8be56045897 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -23,7 +23,6 @@
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/vt.h>
#include <linux/tty.h>
#include <linux/mm.h> /* __get_free_page() and friends */
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index 46edabe2d32..412b87947f6 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -1,6 +1,5 @@
#ifndef _SPEAKUP_H
#define _SPEAKUP_H
-#include <linux/version.h>
#include "spk_types.h"
#include "i18n.h"
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index a2c3dc4098b..42cdafeea35 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -273,15 +273,8 @@ static ssize_t softsynth_write(struct file *fp, const char *buf, size_t count,
{
unsigned long supplied_index = 0;
int converted;
- char indbuf[5];
- if (count >= sizeof(indbuf))
- return -EINVAL;
- if (copy_from_user(indbuf, buf, count))
- return -EFAULT;
- indbuf[count] = '\0';
-
- converted = strict_strtoul(indbuf, 0, &supplied_index);
+ converted = kstrtoul_from_user(buf, count, 0, &supplied_index);
if (converted < 0)
return converted;
diff --git a/drivers/staging/spectra/lld_nand.c b/drivers/staging/spectra/lld_nand.c
index 0be7adc96b8..60a14ff26c7 100644
--- a/drivers/staging/spectra/lld_nand.c
+++ b/drivers/staging/spectra/lld_nand.c
@@ -2397,6 +2397,12 @@ static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct mrst_nand_info *pndev = &info;
u32 int_mask;
+ ret = pci_enable_device(dev);
+ if (ret) {
+ printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
+ return ret;
+ }
+
nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
@@ -2404,7 +2410,7 @@ static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
GLOB_HWCTL_REG_SIZE);
if (!FlashReg) {
printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- return -ENOMEM;
+ goto failed_disable;
}
nand_dbg_print(NAND_DBG_WARN,
"Spectra: Remapped reg base address: "
@@ -2416,7 +2422,7 @@ static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!FlashMem) {
printk(KERN_ERR "Spectra: ioremap_nocache failed!");
iounmap(FlashReg);
- return -ENOMEM;
+ goto failed_disable;
}
nand_dbg_print(NAND_DBG_WARN,
"Spectra: Remapped flash base address: "
@@ -2479,11 +2485,6 @@ static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
iowrite32(1, FlashReg + ECC_ENABLE);
enable_ecc = 1;
- ret = pci_enable_device(dev);
- if (ret) {
- printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
- goto failed_req_csr;
- }
pci_set_master(dev);
pndev->dev = dev;
@@ -2558,9 +2559,10 @@ static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
failed_remap_csr:
pci_release_regions(dev);
failed_req_csr:
- pci_disable_device(dev);
iounmap(FlashMem);
iounmap(FlashReg);
+failed_disable:
+ pci_disable_device(dev);
return ret;
}
diff --git a/drivers/staging/ste_rmi4/Makefile b/drivers/staging/ste_rmi4/Makefile
index 6cce2ed187e..176f4690057 100644
--- a/drivers/staging/ste_rmi4/Makefile
+++ b/drivers/staging/ste_rmi4/Makefile
@@ -2,3 +2,4 @@
# Makefile for the RMI4 touchscreen driver.
#
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += synaptics_i2c_rmi4.o
+obj-$(CONFIG_MACH_U8500) += board-mop500-u8500uib-rmi4.o
diff --git a/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c b/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c
new file mode 100644
index 00000000000..a272e488e5b
--- /dev/null
+++ b/drivers/staging/ste_rmi4/board-mop500-u8500uib-rmi4.c
@@ -0,0 +1,32 @@
+/*
+ * Some platform data for the RMI4 touchscreen that will override the __weak
+ * platform data in the Ux500 machine if this driver is activated.
+ */
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <mach/gpio.h>
+#include <mach/irqs.h>
+#include "synaptics_i2c_rmi4.h"
+
+/*
+ * Synaptics RMI4 touchscreen interface on the U8500 UIB
+ */
+
+/*
+ * Descriptor structure.
+ * Describes the number of i2c devices on the bus that speak RMI.
+ */
+static struct synaptics_rmi4_platform_data rmi4_i2c_dev_platformdata = {
+ .irq_number = NOMADIK_GPIO_TO_IRQ(84),
+ .irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED),
+ .x_flip = false,
+ .y_flip = true,
+};
+
+struct i2c_board_info __initdata mop500_i2c3_devices_u8500[] = {
+ {
+ I2C_BOARD_INFO("synaptics_rmi4_i2c", 0x4B),
+ .platform_data = &rmi4_i2c_dev_platformdata,
+ },
+};
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 3e68d58fdff..36f4cb77567 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -926,17 +926,19 @@ static int __devinit synaptics_rmi4_probe
goto err_input;
}
- if (platformdata->regulator_en) {
- rmi4_data->regulator = regulator_get(&client->dev, "vdd");
- if (IS_ERR(rmi4_data->regulator)) {
- dev_err(&client->dev, "%s:get regulator failed\n",
- __func__);
- retval = PTR_ERR(rmi4_data->regulator);
- goto err_regulator;
- }
- regulator_enable(rmi4_data->regulator);
+ rmi4_data->regulator = regulator_get(&client->dev, "vdd");
+ if (IS_ERR(rmi4_data->regulator)) {
+ dev_err(&client->dev, "%s:get regulator failed\n",
+ __func__);
+ retval = PTR_ERR(rmi4_data->regulator);
+ goto err_get_regulator;
+ }
+ retval = regulator_enable(rmi4_data->regulator);
+ if (retval < 0) {
+ dev_err(&client->dev, "%s:regulator enable failed\n",
+ __func__);
+ goto err_regulator_enable;
}
-
init_waitqueue_head(&rmi4_data->wait);
/*
* Copy i2c_client pointer into RTID's i2c_client pointer for
@@ -1011,11 +1013,10 @@ static int __devinit synaptics_rmi4_probe
err_free_irq:
free_irq(platformdata->irq_number, rmi4_data);
err_query_dev:
- if (platformdata->regulator_en) {
- regulator_disable(rmi4_data->regulator);
- regulator_put(rmi4_data->regulator);
- }
-err_regulator:
+ regulator_disable(rmi4_data->regulator);
+err_regulator_enable:
+ regulator_put(rmi4_data->regulator);
+err_get_regulator:
input_free_device(rmi4_data->input_dev);
rmi4_data->input_dev = NULL;
err_input:
@@ -1039,10 +1040,8 @@ static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
wake_up(&rmi4_data->wait);
free_irq(pdata->irq_number, rmi4_data);
input_unregister_device(rmi4_data->input_dev);
- if (pdata->regulator_en) {
- regulator_disable(rmi4_data->regulator);
- regulator_put(rmi4_data->regulator);
- }
+ regulator_disable(rmi4_data->regulator);
+ regulator_put(rmi4_data->regulator);
kfree(rmi4_data);
return 0;
@@ -1080,8 +1079,7 @@ static int synaptics_rmi4_suspend(struct device *dev)
if (retval < 0)
return retval;
- if (pdata->regulator_en)
- regulator_disable(rmi4_data->regulator);
+ regulator_disable(rmi4_data->regulator);
return 0;
}
@@ -1099,8 +1097,7 @@ static int synaptics_rmi4_resume(struct device *dev)
struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
- if (pdata->regulator_en)
- regulator_enable(rmi4_data->regulator);
+ regulator_enable(rmi4_data->regulator);
enable_irq(pdata->irq_number);
rmi4_data->touch_stopped = false;
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
index 3686a2ff596..384436ef806 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
@@ -34,7 +34,6 @@
* @irq_type: irq type
* @x flip: x flip flag
* @y flip: y flip flag
- * @regulator_en: regulator enable flag
*
* This structure gives platform data for rmi4.
*/
@@ -43,7 +42,6 @@ struct synaptics_rmi4_platform_data {
int irq_type;
bool x_flip;
bool y_flip;
- bool regulator_en;
};
#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/host_os.h b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
index b1b8acb5d3c..a2f31c69d12 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/host_os.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/host_os.h
@@ -18,13 +18,12 @@
#define _HOST_OS_H_
#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/syscalls.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/stddef.h>
diff --git a/drivers/staging/tm6000/tm6000-alsa.c b/drivers/staging/tm6000/tm6000-alsa.c
index 2b96047c298..bd5fa89af07 100644
--- a/drivers/staging/tm6000/tm6000-alsa.c
+++ b/drivers/staging/tm6000/tm6000-alsa.c
@@ -18,7 +18,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <asm/delay.h>
+#include <linux/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -84,7 +84,6 @@ static int _tm6000_start_audio_dma(struct snd_tm6000_card *chip)
tm6000_set_audio_bitrate(core, 48000);
-
return 0;
}
@@ -123,6 +122,7 @@ static int dsp_buffer_alloc(struct snd_pcm_substream *substream, int size)
if (substream->runtime->dma_area) {
if (substream->runtime->dma_bytes > size)
return 0;
+
dsp_buffer_free(substream);
}
@@ -152,9 +152,9 @@ static struct snd_pcm_hardware snd_tm6000_digital_hw = {
SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_CONTINUOUS,
- .rate_min = 48000,
- .rate_max = 48000,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 48000,
+ .rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.period_bytes_min = 64,
@@ -254,9 +254,7 @@ static int tm6000_fillbuf(struct tm6000_core *core, char *buf, int size)
memcpy(runtime->dma_area + buf_pos * stride, buf,
length * stride);
-#ifndef NO_PCM_LOCK
- snd_pcm_stream_lock(substream);
-#endif
+ snd_pcm_stream_lock(substream);
chip->buf_pos += length;
if (chip->buf_pos >= runtime->buffer_size)
@@ -268,9 +266,7 @@ static int tm6000_fillbuf(struct tm6000_core *core, char *buf, int size)
period_elapsed = 1;
}
-#ifndef NO_PCM_LOCK
- snd_pcm_stream_unlock(substream);
-#endif
+ snd_pcm_stream_unlock(substream);
if (period_elapsed)
snd_pcm_period_elapsed(substream);
@@ -461,7 +457,7 @@ int tm6000_audio_init(struct tm6000_core *dev)
if (rc < 0)
goto error_chip;
- dprintk(1,"Registered audio driver for %s\n", card->longname);
+ dprintk(1, "Registered audio driver for %s\n", card->longname);
return 0;
diff --git a/drivers/staging/tm6000/tm6000-cards.c b/drivers/staging/tm6000/tm6000-cards.c
index a69c82e1199..9227db5d895 100644
--- a/drivers/staging/tm6000/tm6000-cards.c
+++ b/drivers/staging/tm6000/tm6000-cards.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/usb.h>
-#include <linux/version.h>
#include <linux/slab.h>
#include <media/v4l2-common.h>
#include <media/tuner.h>
@@ -994,11 +993,7 @@ static int fill_board_specific_data(struct tm6000_core *dev)
if (rc < 0)
return rc;
- rc = v4l2_device_register(&dev->udev->dev, &dev->v4l2_dev);
- if (rc < 0)
- return rc;
-
- return rc;
+ return v4l2_device_register(&dev->udev->dev, &dev->v4l2_dev);
}
diff --git a/drivers/staging/tm6000/tm6000-dvb.c b/drivers/staging/tm6000/tm6000-dvb.c
index ff04c89e45a..0e0dfce0582 100644
--- a/drivers/staging/tm6000/tm6000-dvb.c
+++ b/drivers/staging/tm6000/tm6000-dvb.c
@@ -98,7 +98,7 @@ static void tm6000_urb_received(struct urb *urb)
if (dev->dvb->streams > 0) {
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
- printk(KERN_ERR "tm6000: error %s\n", __FUNCTION__);
+ printk(KERN_ERR "tm6000: error %s\n", __func__);
kfree(urb->transfer_buffer);
usb_free_urb(urb);
}
@@ -111,7 +111,7 @@ int tm6000_start_stream(struct tm6000_core *dev)
unsigned int pipe, size;
struct tm6000_dvb *dvb = dev->dvb;
- printk(KERN_INFO "tm6000: got start stream request %s\n", __FUNCTION__);
+ printk(KERN_INFO "tm6000: got start stream request %s\n", __func__);
if (dev->mode != TM6000_MODE_DIGITAL) {
tm6000_init_digital_mode(dev);
@@ -145,7 +145,7 @@ int tm6000_start_stream(struct tm6000_core *dev)
ret = usb_clear_halt(dev->udev, pipe);
if (ret < 0) {
printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n",
- ret, __FUNCTION__);
+ ret, __func__);
return ret;
} else
printk(KERN_ERR "tm6000: pipe resetted\n");
@@ -185,7 +185,7 @@ int tm6000_start_feed(struct dvb_demux_feed *feed)
struct dvb_demux *demux = feed->demux;
struct tm6000_core *dev = demux->priv;
struct tm6000_dvb *dvb = dev->dvb;
- printk(KERN_INFO "tm6000: got start feed request %s\n", __FUNCTION__);
+ printk(KERN_INFO "tm6000: got start feed request %s\n", __func__);
mutex_lock(&dvb->mutex);
if (dvb->streams == 0) {
@@ -205,7 +205,7 @@ int tm6000_stop_feed(struct dvb_demux_feed *feed)
struct tm6000_core *dev = demux->priv;
struct tm6000_dvb *dvb = dev->dvb;
- printk(KERN_INFO "tm6000: got stop feed request %s\n", __FUNCTION__);
+ printk(KERN_INFO "tm6000: got stop feed request %s\n", __func__);
mutex_lock(&dvb->mutex);
diff --git a/drivers/staging/tm6000/tm6000-i2c.c b/drivers/staging/tm6000/tm6000-i2c.c
index 8828c120b5c..5a651ea5f60 100644
--- a/drivers/staging/tm6000/tm6000-i2c.c
+++ b/drivers/staging/tm6000/tm6000-i2c.c
@@ -40,7 +40,7 @@ MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
#define i2c_dprintk(lvl, fmt, args...) if (i2c_debug >= lvl) do { \
printk(KERN_DEBUG "%s at %s: " fmt, \
- dev->name, __FUNCTION__ , ##args); } while (0)
+ dev->name, __func__, ##args); } while (0)
static int tm6000_i2c_send_regs(struct tm6000_core *dev, unsigned char addr,
__u8 reg, char *buf, int len)
diff --git a/drivers/staging/tm6000/tm6000-input.c b/drivers/staging/tm6000/tm6000-input.c
index dae2f1fdcc5..70a2c5f557c 100644
--- a/drivers/staging/tm6000/tm6000-input.c
+++ b/drivers/staging/tm6000/tm6000-input.c
@@ -449,9 +449,8 @@ int tm6000_ir_fini(struct tm6000_core *dev)
rc_unregister_device(ir->rc);
- if (ir->int_urb) {
+ if (ir->int_urb)
tm6000_ir_int_stop(dev);
- }
kfree(ir);
dev->ir = NULL;
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c
index 4264064a727..8d8b939915d 100644
--- a/drivers/staging/tm6000/tm6000-video.c
+++ b/drivers/staging/tm6000/tm6000-video.c
@@ -30,7 +30,6 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/random.h>
-#include <linux/version.h>
#include <linux/usb.h>
#include <linux/videodev2.h>
#include <media/v4l2-ioctl.h>
@@ -180,9 +179,6 @@ static inline void get_next_buf(struct tm6000_dmaqueue *dma_q,
*buf = list_entry(dma_q->active.next,
struct tm6000_buffer, vb.queue);
- if (!buf)
- return;
-
/* Cleans up buffer - Useful for testing for frame/URB loss */
outp = videobuf_to_vmalloc(&(*buf)->vb);
@@ -777,7 +773,8 @@ buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
}
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- if (0 != (rc = videobuf_iolock(vq, &buf->vb, NULL)))
+ rc = videobuf_iolock(vq, &buf->vb, NULL);
+ if (rc != 0)
goto fail;
urb_init = 1;
}
@@ -1048,12 +1045,12 @@ static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
if (!res_get(dev, fh, false))
return -EBUSY;
- return (videobuf_streamon(&fh->vb_vidq));
+ return videobuf_streamon(&fh->vb_vidq);
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct tm6000_fh *fh=priv;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1062,15 +1059,15 @@ static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
return -EINVAL;
videobuf_streamoff(&fh->vb_vidq);
- res_free(dev,fh);
+ res_free(dev, fh);
- return (0);
+ return 0;
}
-static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm)
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *norm)
{
- int rc=0;
- struct tm6000_fh *fh=priv;
+ int rc = 0;
+ struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
dev->norm = *norm;
@@ -1079,7 +1076,7 @@ static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm)
fh->width = dev->width;
fh->height = dev->height;
- if (rc<0)
+ if (rc < 0)
return rc;
v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
@@ -1087,7 +1084,7 @@ static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm)
return 0;
}
-static const char *iname [] = {
+static const char *iname[] = {
[TM6000_INPUT_TV] = "Television",
[TM6000_INPUT_COMPOSITE1] = "Composite 1",
[TM6000_INPUT_COMPOSITE2] = "Composite 2",
@@ -1394,10 +1391,10 @@ static int radio_g_input(struct file *filp, void *priv, unsigned int *i)
struct tm6000_fh *fh = priv;
struct tm6000_core *dev = fh->dev;
- if (dev->input !=5)
+ if (dev->input != 5)
return -EINVAL;
- *i = dev->input -5;
+ *i = dev->input - 5;
return 0;
}
@@ -1508,18 +1505,18 @@ static int tm6000_open(struct file *file)
fh->fmt = format_by_fourcc(dev->fourcc);
- tm6000_get_std_res (dev);
+ tm6000_get_std_res(dev);
fh->width = dev->width;
fh->height = dev->height;
dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, "
"dev->vidq=0x%08lx\n",
- (unsigned long)fh,(unsigned long)dev,(unsigned long)&dev->vidq);
+ (unsigned long)fh, (unsigned long)dev, (unsigned long)&dev->vidq);
dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
- "queued=%d\n",list_empty(&dev->vidq.queued));
+ "queued=%d\n", list_empty(&dev->vidq.queued));
dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
- "active=%d\n",list_empty(&dev->vidq.active));
+ "active=%d\n", list_empty(&dev->vidq.active));
/* initialize hardware on analog mode */
rc = tm6000_init_analog_mode(dev);
@@ -1557,7 +1554,7 @@ tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos)
{
struct tm6000_fh *fh = file->private_data;
- if (fh->type==V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
if (!res_get(fh->dev, fh, true))
return -EBUSY;
@@ -1583,7 +1580,7 @@ tm6000_poll(struct file *file, struct poll_table_struct *wait)
/* streaming capture */
if (list_empty(&fh->vb_vidq.stream))
return POLLERR;
- buf = list_entry(fh->vb_vidq.stream.next,struct tm6000_buffer,vb.stream);
+ buf = list_entry(fh->vb_vidq.stream.next, struct tm6000_buffer, vb.stream);
} else {
/* read() capture */
return videobuf_poll_stream(file, &fh->vb_vidq,
@@ -1699,7 +1696,7 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
struct video_device tm6000_radio_template = {
.name = "tm6000",
.fops = &radio_fops,
- .ioctl_ops = &radio_ioctl_ops,
+ .ioctl_ops = &radio_ioctl_ops,
};
/* -----------------------------------------------------------------
diff --git a/drivers/staging/tm6000/tm6000.h b/drivers/staging/tm6000/tm6000.h
index ae6369b9a90..c56da628dbe 100644
--- a/drivers/staging/tm6000/tm6000.h
+++ b/drivers/staging/tm6000/tm6000.h
@@ -30,8 +30,7 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <media/v4l2-device.h>
-
-
+#include <linux/version.h>
#include <linux/dvb/frontend.h>
#include "dvb_demux.h"
#include "dvb_frontend.h"
diff --git a/drivers/staging/tty/Kconfig b/drivers/staging/tty/Kconfig
deleted file mode 100644
index 77103a07abb..00000000000
--- a/drivers/staging/tty/Kconfig
+++ /dev/null
@@ -1,87 +0,0 @@
-config STALLION
- tristate "Stallion EasyIO or EC8/32 support"
- depends on STALDRV && (ISA || EISA || PCI)
- help
- If you have an EasyIO or EasyConnection 8/32 multiport Stallion
- card, then this is for you; say Y. Make sure to read
- <file:Documentation/serial/stallion.txt>.
-
- To compile this driver as a module, choose M here: the
- module will be called stallion.
-
-config ISTALLION
- tristate "Stallion EC8/64, ONboard, Brumby support"
- depends on STALDRV && (ISA || EISA || PCI)
- help
- If you have an EasyConnection 8/64, ONboard, Brumby or Stallion
- serial multiport card, say Y here. Make sure to read
- <file:Documentation/serial/stallion.txt>.
-
- To compile this driver as a module, choose M here: the
- module will be called istallion.
-
-config DIGIEPCA
- tristate "Digiboard Intelligent Async Support"
- depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
- ---help---
- This is a driver for Digi International's Xx, Xeve, and Xem series
- of cards which provide multiple serial ports. You would need
- something like this to connect more than two modems to your Linux
- box, for instance in order to become a dial-in server. This driver
- supports the original PC (ISA) boards as well as PCI, and EISA. If
- you have a card like this, say Y here and read the file
- <file:Documentation/serial/digiepca.txt>.
-
- To compile this driver as a module, choose M here: the
- module will be called epca.
-
-config RISCOM8
- tristate "SDL RISCom/8 card support"
- depends on SERIAL_NONSTANDARD
- help
- This is a driver for the SDL Communications RISCom/8 multiport card,
- which gives you many serial ports. You would need something like
- this to connect more than two modems to your Linux box, for instance
- in order to become a dial-in server. If you have a card like that,
- say Y here and read the file <file:Documentation/serial/riscom8.txt>.
-
- Also it's possible to say M here and compile this driver as kernel
- loadable module; the module will be called riscom8.
-
-config SPECIALIX
- tristate "Specialix IO8+ card support"
- depends on SERIAL_NONSTANDARD
- help
- This is a driver for the Specialix IO8+ multiport card (both the
- ISA and the PCI version) which gives you many serial ports. You
- would need something like this to connect more than two modems to
- your Linux box, for instance in order to become a dial-in server.
-
- If you have a card like that, say Y here and read the file
- <file:Documentation/serial/specialix.txt>. Also it's possible to say
- M here and compile this driver as kernel loadable module which will be
- called specialix.
-
-config COMPUTONE
- tristate "Computone IntelliPort Plus serial support"
- depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
- ---help---
- This driver supports the entire family of Intelliport II/Plus
- controllers with the exception of the MicroChannel controllers and
- products previous to the Intelliport II. These are multiport cards,
- which give you many serial ports. You would need something like this
- to connect more than two modems to your Linux box, for instance in
- order to become a dial-in server. If you have a card like that, say
- Y here and read <file:Documentation/serial/computone.txt>.
-
- To compile this driver as module, choose M here: the
- module will be called ip2.
-
-config SERIAL167
- bool "CD2401 support for MVME166/7 serial ports"
- depends on MVME16x
- help
- This is the driver for the serial ports on the Motorola MVME166,
- 167, and 172 boards. Everyone using one of these boards should say
- Y here.
-
diff --git a/drivers/staging/tty/Makefile b/drivers/staging/tty/Makefile
deleted file mode 100644
index ac57c105611..00000000000
--- a/drivers/staging/tty/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-obj-$(CONFIG_STALLION) += stallion.o
-obj-$(CONFIG_ISTALLION) += istallion.o
-obj-$(CONFIG_DIGIEPCA) += epca.o
-obj-$(CONFIG_SERIAL167) += serial167.o
-obj-$(CONFIG_SPECIALIX) += specialix.o
-obj-$(CONFIG_RISCOM8) += riscom8.o
-obj-$(CONFIG_COMPUTONE) += ip2/
diff --git a/drivers/staging/tty/TODO b/drivers/staging/tty/TODO
deleted file mode 100644
index 88756453ac6..00000000000
--- a/drivers/staging/tty/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-These are a few tty/serial drivers that either do not build,
-or work if they do build, or if they seem to work, are for obsolete
-hardware, or are full of unfixable races and no one uses them anymore.
-
-If no one steps up to adopt any of these drivers, they will be removed
-in the 2.6.41 release.
diff --git a/drivers/staging/tty/cd1865.h b/drivers/staging/tty/cd1865.h
deleted file mode 100644
index 8c2ad654b79..00000000000
--- a/drivers/staging/tty/cd1865.h
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * linux/drivers/char/cd1865.h -- Definitions relating to the CD1865
- * for the Specialix IO8+ multiport serial driver.
- *
- * Copyright (C) 1997 Roger Wolff (R.E.Wolff@BitWizard.nl)
- * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com)
- *
- * Specialix pays for the development and support of this driver.
- * Please DO contact io8-linux@specialix.co.uk if you require
- * support.
- *
- * This driver was developed in the BitWizard linux device
- * driver service. If you require a linux device driver for your
- * product, please contact devices@BitWizard.nl for a quote.
- *
- */
-
-/*
- * Definitions for Driving CD180/CD1864/CD1865 based eightport serial cards.
- */
-
-
-/* Values of choice for Interrupt ACKs */
-/* These values are "obligatory" if you use the register based
- * interrupt acknowledgements. See page 99-101 of V2.0 of the CD1865
- * databook */
-#define SX_ACK_MINT 0x75 /* goes to PILR1 */
-#define SX_ACK_TINT 0x76 /* goes to PILR2 */
-#define SX_ACK_RINT 0x77 /* goes to PILR3 */
-
-/* Chip ID (is used when chips ar daisy chained.) */
-#define SX_ID 0x10
-
-/* Definitions for Cirrus Logic CL-CD186x 8-port async mux chip */
-
-#define CD186x_NCH 8 /* Total number of channels */
-#define CD186x_TPC 16 /* Ticks per character */
-#define CD186x_NFIFO 8 /* TX FIFO size */
-
-
-/* Global registers */
-
-#define CD186x_GIVR 0x40 /* Global Interrupt Vector Register */
-#define CD186x_GICR 0x41 /* Global Interrupting Channel Register */
-#define CD186x_PILR1 0x61 /* Priority Interrupt Level Register 1 */
-#define CD186x_PILR2 0x62 /* Priority Interrupt Level Register 2 */
-#define CD186x_PILR3 0x63 /* Priority Interrupt Level Register 3 */
-#define CD186x_CAR 0x64 /* Channel Access Register */
-#define CD186x_SRSR 0x65 /* Channel Access Register */
-#define CD186x_GFRCR 0x6b /* Global Firmware Revision Code Register */
-#define CD186x_PPRH 0x70 /* Prescaler Period Register High */
-#define CD186x_PPRL 0x71 /* Prescaler Period Register Low */
-#define CD186x_RDR 0x78 /* Receiver Data Register */
-#define CD186x_RCSR 0x7a /* Receiver Character Status Register */
-#define CD186x_TDR 0x7b /* Transmit Data Register */
-#define CD186x_EOIR 0x7f /* End of Interrupt Register */
-#define CD186x_MRAR 0x75 /* Modem Request Acknowledge register */
-#define CD186x_TRAR 0x76 /* Transmit Request Acknowledge register */
-#define CD186x_RRAR 0x77 /* Receive Request Acknowledge register */
-#define CD186x_SRCR 0x66 /* Service Request Configuration register */
-
-/* Channel Registers */
-
-#define CD186x_CCR 0x01 /* Channel Command Register */
-#define CD186x_IER 0x02 /* Interrupt Enable Register */
-#define CD186x_COR1 0x03 /* Channel Option Register 1 */
-#define CD186x_COR2 0x04 /* Channel Option Register 2 */
-#define CD186x_COR3 0x05 /* Channel Option Register 3 */
-#define CD186x_CCSR 0x06 /* Channel Control Status Register */
-#define CD186x_RDCR 0x07 /* Receive Data Count Register */
-#define CD186x_SCHR1 0x09 /* Special Character Register 1 */
-#define CD186x_SCHR2 0x0a /* Special Character Register 2 */
-#define CD186x_SCHR3 0x0b /* Special Character Register 3 */
-#define CD186x_SCHR4 0x0c /* Special Character Register 4 */
-#define CD186x_MCOR1 0x10 /* Modem Change Option 1 Register */
-#define CD186x_MCOR2 0x11 /* Modem Change Option 2 Register */
-#define CD186x_MCR 0x12 /* Modem Change Register */
-#define CD186x_RTPR 0x18 /* Receive Timeout Period Register */
-#define CD186x_MSVR 0x28 /* Modem Signal Value Register */
-#define CD186x_MSVRTS 0x29 /* Modem Signal Value Register */
-#define CD186x_MSVDTR 0x2a /* Modem Signal Value Register */
-#define CD186x_RBPRH 0x31 /* Receive Baud Rate Period Register High */
-#define CD186x_RBPRL 0x32 /* Receive Baud Rate Period Register Low */
-#define CD186x_TBPRH 0x39 /* Transmit Baud Rate Period Register High */
-#define CD186x_TBPRL 0x3a /* Transmit Baud Rate Period Register Low */
-
-
-/* Global Interrupt Vector Register (R/W) */
-
-#define GIVR_ITMASK 0x07 /* Interrupt type mask */
-#define GIVR_IT_MODEM 0x01 /* Modem Signal Change Interrupt */
-#define GIVR_IT_TX 0x02 /* Transmit Data Interrupt */
-#define GIVR_IT_RCV 0x03 /* Receive Good Data Interrupt */
-#define GIVR_IT_REXC 0x07 /* Receive Exception Interrupt */
-
-
-/* Global Interrupt Channel Register (R/W) */
-
-#define GICR_CHAN 0x1c /* Channel Number Mask */
-#define GICR_CHAN_OFF 2 /* Channel Number shift */
-
-
-/* Channel Address Register (R/W) */
-
-#define CAR_CHAN 0x07 /* Channel Number Mask */
-#define CAR_A7 0x08 /* A7 Address Extension (unused) */
-
-
-/* Receive Character Status Register (R/O) */
-
-#define RCSR_TOUT 0x80 /* Rx Timeout */
-#define RCSR_SCDET 0x70 /* Special Character Detected Mask */
-#define RCSR_NO_SC 0x00 /* No Special Characters Detected */
-#define RCSR_SC_1 0x10 /* Special Char 1 (or 1 & 3) Detected */
-#define RCSR_SC_2 0x20 /* Special Char 2 (or 2 & 4) Detected */
-#define RCSR_SC_3 0x30 /* Special Char 3 Detected */
-#define RCSR_SC_4 0x40 /* Special Char 4 Detected */
-#define RCSR_BREAK 0x08 /* Break has been detected */
-#define RCSR_PE 0x04 /* Parity Error */
-#define RCSR_FE 0x02 /* Frame Error */
-#define RCSR_OE 0x01 /* Overrun Error */
-
-
-/* Channel Command Register (R/W) (commands in groups can be OR-ed) */
-
-#define CCR_HARDRESET 0x81 /* Reset the chip */
-
-#define CCR_SOFTRESET 0x80 /* Soft Channel Reset */
-
-#define CCR_CORCHG1 0x42 /* Channel Option Register 1 Changed */
-#define CCR_CORCHG2 0x44 /* Channel Option Register 2 Changed */
-#define CCR_CORCHG3 0x48 /* Channel Option Register 3 Changed */
-
-#define CCR_SSCH1 0x21 /* Send Special Character 1 */
-
-#define CCR_SSCH2 0x22 /* Send Special Character 2 */
-
-#define CCR_SSCH3 0x23 /* Send Special Character 3 */
-
-#define CCR_SSCH4 0x24 /* Send Special Character 4 */
-
-#define CCR_TXEN 0x18 /* Enable Transmitter */
-#define CCR_RXEN 0x12 /* Enable Receiver */
-
-#define CCR_TXDIS 0x14 /* Disable Transmitter */
-#define CCR_RXDIS 0x11 /* Disable Receiver */
-
-
-/* Interrupt Enable Register (R/W) */
-
-#define IER_DSR 0x80 /* Enable interrupt on DSR change */
-#define IER_CD 0x40 /* Enable interrupt on CD change */
-#define IER_CTS 0x20 /* Enable interrupt on CTS change */
-#define IER_RXD 0x10 /* Enable interrupt on Receive Data */
-#define IER_RXSC 0x08 /* Enable interrupt on Receive Spec. Char */
-#define IER_TXRDY 0x04 /* Enable interrupt on TX FIFO empty */
-#define IER_TXEMPTY 0x02 /* Enable interrupt on TX completely empty */
-#define IER_RET 0x01 /* Enable interrupt on RX Exc. Timeout */
-
-
-/* Channel Option Register 1 (R/W) */
-
-#define COR1_ODDP 0x80 /* Odd Parity */
-#define COR1_PARMODE 0x60 /* Parity Mode mask */
-#define COR1_NOPAR 0x00 /* No Parity */
-#define COR1_FORCEPAR 0x20 /* Force Parity */
-#define COR1_NORMPAR 0x40 /* Normal Parity */
-#define COR1_IGNORE 0x10 /* Ignore Parity on RX */
-#define COR1_STOPBITS 0x0c /* Number of Stop Bits */
-#define COR1_1SB 0x00 /* 1 Stop Bit */
-#define COR1_15SB 0x04 /* 1.5 Stop Bits */
-#define COR1_2SB 0x08 /* 2 Stop Bits */
-#define COR1_CHARLEN 0x03 /* Character Length */
-#define COR1_5BITS 0x00 /* 5 bits */
-#define COR1_6BITS 0x01 /* 6 bits */
-#define COR1_7BITS 0x02 /* 7 bits */
-#define COR1_8BITS 0x03 /* 8 bits */
-
-
-/* Channel Option Register 2 (R/W) */
-
-#define COR2_IXM 0x80 /* Implied XON mode */
-#define COR2_TXIBE 0x40 /* Enable In-Band (XON/XOFF) Flow Control */
-#define COR2_ETC 0x20 /* Embedded Tx Commands Enable */
-#define COR2_LLM 0x10 /* Local Loopback Mode */
-#define COR2_RLM 0x08 /* Remote Loopback Mode */
-#define COR2_RTSAO 0x04 /* RTS Automatic Output Enable */
-#define COR2_CTSAE 0x02 /* CTS Automatic Enable */
-#define COR2_DSRAE 0x01 /* DSR Automatic Enable */
-
-
-/* Channel Option Register 3 (R/W) */
-
-#define COR3_XONCH 0x80 /* XON is a pair of characters (1 & 3) */
-#define COR3_XOFFCH 0x40 /* XOFF is a pair of characters (2 & 4) */
-#define COR3_FCT 0x20 /* Flow-Control Transparency Mode */
-#define COR3_SCDE 0x10 /* Special Character Detection Enable */
-#define COR3_RXTH 0x0f /* RX FIFO Threshold value (1-8) */
-
-
-/* Channel Control Status Register (R/O) */
-
-#define CCSR_RXEN 0x80 /* Receiver Enabled */
-#define CCSR_RXFLOFF 0x40 /* Receive Flow Off (XOFF was sent) */
-#define CCSR_RXFLON 0x20 /* Receive Flow On (XON was sent) */
-#define CCSR_TXEN 0x08 /* Transmitter Enabled */
-#define CCSR_TXFLOFF 0x04 /* Transmit Flow Off (got XOFF) */
-#define CCSR_TXFLON 0x02 /* Transmit Flow On (got XON) */
-
-
-/* Modem Change Option Register 1 (R/W) */
-
-#define MCOR1_DSRZD 0x80 /* Detect 0->1 transition of DSR */
-#define MCOR1_CDZD 0x40 /* Detect 0->1 transition of CD */
-#define MCOR1_CTSZD 0x20 /* Detect 0->1 transition of CTS */
-#define MCOR1_DTRTH 0x0f /* Auto DTR flow control Threshold (1-8) */
-#define MCOR1_NODTRFC 0x0 /* Automatic DTR flow control disabled */
-
-
-/* Modem Change Option Register 2 (R/W) */
-
-#define MCOR2_DSROD 0x80 /* Detect 1->0 transition of DSR */
-#define MCOR2_CDOD 0x40 /* Detect 1->0 transition of CD */
-#define MCOR2_CTSOD 0x20 /* Detect 1->0 transition of CTS */
-
-/* Modem Change Register (R/W) */
-
-#define MCR_DSRCHG 0x80 /* DSR Changed */
-#define MCR_CDCHG 0x40 /* CD Changed */
-#define MCR_CTSCHG 0x20 /* CTS Changed */
-
-
-/* Modem Signal Value Register (R/W) */
-
-#define MSVR_DSR 0x80 /* Current state of DSR input */
-#define MSVR_CD 0x40 /* Current state of CD input */
-#define MSVR_CTS 0x20 /* Current state of CTS input */
-#define MSVR_DTR 0x02 /* Current state of DTR output */
-#define MSVR_RTS 0x01 /* Current state of RTS output */
-
-
-/* Escape characters */
-
-#define CD186x_C_ESC 0x00 /* Escape character */
-#define CD186x_C_SBRK 0x81 /* Start sending BREAK */
-#define CD186x_C_DELAY 0x82 /* Delay output */
-#define CD186x_C_EBRK 0x83 /* Stop sending BREAK */
-
-#define SRSR_RREQint 0x10 /* This chip wants "rec" serviced */
-#define SRSR_TREQint 0x04 /* This chip wants "transmit" serviced */
-#define SRSR_MREQint 0x01 /* This chip wants "mdm change" serviced */
-
-
-
-#define SRCR_PKGTYPE 0x80
-#define SRCR_REGACKEN 0x40
-#define SRCR_DAISYEN 0x20
-#define SRCR_GLOBPRI 0x10
-#define SRCR_UNFAIR 0x08
-#define SRCR_AUTOPRI 0x02
-#define SRCR_PRISEL 0x01
-
-
diff --git a/drivers/staging/tty/digi1.h b/drivers/staging/tty/digi1.h
deleted file mode 100644
index 94d4eab5d3c..00000000000
--- a/drivers/staging/tty/digi1.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* Definitions for DigiBoard ditty(1) command. */
-
-#if !defined(TIOCMODG)
-#define TIOCMODG (('d'<<8) | 250) /* get modem ctrl state */
-#define TIOCMODS (('d'<<8) | 251) /* set modem ctrl state */
-#endif
-
-#if !defined(TIOCMSET)
-#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */
-#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */
-#endif
-
-#if !defined(TIOCMBIC)
-#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */
-#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */
-#endif
-
-#if !defined(TIOCSDTR)
-#define TIOCSDTR (('e'<<8) | 0) /* set DTR */
-#define TIOCCDTR (('e'<<8) | 1) /* clear DTR */
-#endif
-
-/************************************************************************
- * Ioctl command arguments for DIGI parameters.
- ************************************************************************/
-#define DIGI_GETA (('e'<<8) | 94) /* Read params */
-
-#define DIGI_SETA (('e'<<8) | 95) /* Set params */
-#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */
-#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */
-
-#define DIGI_GETFLOW (('e'<<8) | 99) /* Get startc/stopc flow */
- /* control characters */
-#define DIGI_SETFLOW (('e'<<8) | 100) /* Set startc/stopc flow */
- /* control characters */
-#define DIGI_GETAFLOW (('e'<<8) | 101) /* Get Aux. startc/stopc */
- /* flow control chars */
-#define DIGI_SETAFLOW (('e'<<8) | 102) /* Set Aux. startc/stopc */
- /* flow control chars */
-
-#define DIGI_GETINFO (('e'<<8) | 103) /* Fill in digi_info */
-#define DIGI_POLLER (('e'<<8) | 104) /* Turn on/off poller */
-#define DIGI_INIT (('e'<<8) | 105) /* Allow things to run. */
-
-struct digiflow_struct
-{
- unsigned char startc; /* flow cntl start char */
- unsigned char stopc; /* flow cntl stop char */
-};
-
-typedef struct digiflow_struct digiflow_t;
-
-
-/************************************************************************
- * Values for digi_flags
- ************************************************************************/
-#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
-#define DIGI_FAST 0x0002 /* Fast baud rates */
-#define RTSPACE 0x0004 /* RTS input flow control */
-#define CTSPACE 0x0008 /* CTS output flow control */
-#define DSRPACE 0x0010 /* DSR output flow control */
-#define DCDPACE 0x0020 /* DCD output flow control */
-#define DTRPACE 0x0040 /* DTR input flow control */
-#define DIGI_FORCEDCD 0x0100 /* Force carrier */
-#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
-#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
-
-
-/************************************************************************
- * Values for digiDload
- ************************************************************************/
-#define NORMAL 0
-#define PCI_CTL 1
-
-#define SIZE8 0
-#define SIZE16 1
-#define SIZE32 2
-
-/************************************************************************
- * Structure used with ioctl commands for DIGI parameters.
- ************************************************************************/
-struct digi_struct
-{
- unsigned short digi_flags; /* Flags (see above) */
-};
-
-typedef struct digi_struct digi_t;
-
-struct digi_info
-{
- unsigned long board; /* Which board is this ? */
- unsigned char status; /* Alive or dead */
- unsigned char type; /* see epca.h */
- unsigned char subtype; /* For future XEM, XR, etc ... */
- unsigned short numports; /* Number of ports configured */
- unsigned char *port; /* I/O Address */
- unsigned char *membase; /* DPR Address */
- unsigned char *version; /* For future ... */
- unsigned short windowData; /* For future ... */
-} ;
diff --git a/drivers/staging/tty/digiFep1.h b/drivers/staging/tty/digiFep1.h
deleted file mode 100644
index 3c1f1922c79..00000000000
--- a/drivers/staging/tty/digiFep1.h
+++ /dev/null
@@ -1,136 +0,0 @@
-
-#define CSTART 0x400L
-#define CMAX 0x800L
-#define ISTART 0x800L
-#define IMAX 0xC00L
-#define CIN 0xD10L
-#define GLOBAL 0xD10L
-#define EIN 0xD18L
-#define FEPSTAT 0xD20L
-#define CHANSTRUCT 0x1000L
-#define RXTXBUF 0x4000L
-
-
-struct global_data
-{
- u16 cin;
- u16 cout;
- u16 cstart;
- u16 cmax;
- u16 ein;
- u16 eout;
- u16 istart;
- u16 imax;
-};
-
-
-struct board_chan
-{
- u32 filler1;
- u32 filler2;
- u16 tseg;
- u16 tin;
- u16 tout;
- u16 tmax;
-
- u16 rseg;
- u16 rin;
- u16 rout;
- u16 rmax;
-
- u16 tlow;
- u16 rlow;
- u16 rhigh;
- u16 incr;
-
- u16 etime;
- u16 edelay;
- unchar *dev;
-
- u16 iflag;
- u16 oflag;
- u16 cflag;
- u16 gmask;
-
- u16 col;
- u16 delay;
- u16 imask;
- u16 tflush;
-
- u32 filler3;
- u32 filler4;
- u32 filler5;
- u32 filler6;
-
- u8 num;
- u8 ract;
- u8 bstat;
- u8 tbusy;
- u8 iempty;
- u8 ilow;
- u8 idata;
- u8 eflag;
-
- u8 tflag;
- u8 rflag;
- u8 xmask;
- u8 xval;
- u8 mstat;
- u8 mchange;
- u8 mint;
- u8 lstat;
-
- u8 mtran;
- u8 orun;
- u8 startca;
- u8 stopca;
- u8 startc;
- u8 stopc;
- u8 vnext;
- u8 hflow;
-
- u8 fillc;
- u8 ochar;
- u8 omask;
-
- u8 filler7;
- u8 filler8[28];
-};
-
-
-#define SRXLWATER 0xE0
-#define SRXHWATER 0xE1
-#define STOUT 0xE2
-#define PAUSETX 0xE3
-#define RESUMETX 0xE4
-#define SAUXONOFFC 0xE6
-#define SENDBREAK 0xE8
-#define SETMODEM 0xE9
-#define SETIFLAGS 0xEA
-#define SONOFFC 0xEB
-#define STXLWATER 0xEC
-#define PAUSERX 0xEE
-#define RESUMERX 0xEF
-#define SETBUFFER 0xF2
-#define SETCOOKED 0xF3
-#define SETHFLOW 0xF4
-#define SETCTRLFLAGS 0xF5
-#define SETVNEXT 0xF6
-
-
-
-#define BREAK_IND 0x01
-#define LOWTX_IND 0x02
-#define EMPTYTX_IND 0x04
-#define DATA_IND 0x08
-#define MODEMCHG_IND 0x20
-
-#define FEP_HUPCL 0002000
-#if 0
-#define RTS 0x02
-#define CD 0x08
-#define DSR 0x10
-#define CTS 0x20
-#define RI 0x40
-#define DTR 0x80
-#endif
diff --git a/drivers/staging/tty/digiPCI.h b/drivers/staging/tty/digiPCI.h
deleted file mode 100644
index 6ca7819e506..00000000000
--- a/drivers/staging/tty/digiPCI.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*************************************************************************
- * Defines and structure definitions for PCI BIOS Interface
- *************************************************************************/
-#define PCIMAX 32 /* maximum number of PCI boards */
-
-
-#define PCI_VENDOR_DIGI 0x114F
-#define PCI_DEVICE_EPC 0x0002
-#define PCI_DEVICE_RIGHTSWITCH 0x0003 /* For testing */
-#define PCI_DEVICE_XEM 0x0004
-#define PCI_DEVICE_XR 0x0005
-#define PCI_DEVICE_CX 0x0006
-#define PCI_DEVICE_XRJ 0x0009 /* Jupiter boards with */
-#define PCI_DEVICE_EPCJ 0x000a /* PLX 9060 chip for PCI */
-
-
-/*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space. The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
-
-/* Potential location of PCI Bios from E0000 to FFFFF*/
-#define PCI_BIOS_SIZE 0x00020000
-
-/* Size of Memory and I/O for PCI (4MB) */
-#define PCI_RAM_SIZE 0x00400000
-
-/* Size of Memory (2MB) */
-#define PCI_MEM_SIZE 0x00200000
-
-/* Offset of I/0 in Memory (2MB) */
-#define PCI_IO_OFFSET 0x00200000
-
-#define MEMOUTB(basemem, pnum, setmemval) *(caddr_t)((basemem) + ( PCI_IO_OFFSET | pnum << 4 | pnum )) = (setmemval)
-#define MEMINB(basemem, pnum) *(caddr_t)((basemem) + (PCI_IO_OFFSET | pnum << 4 | pnum )) /* for PCI I/O */
-
-
-
-
-
diff --git a/drivers/staging/tty/epca.c b/drivers/staging/tty/epca.c
deleted file mode 100644
index 7f1369e5b41..00000000000
--- a/drivers/staging/tty/epca.c
+++ /dev/null
@@ -1,2784 +0,0 @@
-/*
- Copyright (C) 1996 Digi International.
-
- For technical support please email digiLinux@dgii.com or
- call Digi tech support at (612) 912-3456
-
- ** This driver is no longer supported by Digi **
-
- Much of this design and code came from epca.c which was
- copyright (C) 1994, 1995 Troy De Jongh, and subsequently
- modified by David Nugent, Christoph Lameter, Mike McLagan.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-/* See README.epca for change history --DAT*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/serial.h>
-#include <linux/delay.h>
-#include <linux/ctype.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include "digiPCI.h"
-
-
-#include "digi1.h"
-#include "digiFep1.h"
-#include "epca.h"
-#include "epcaconfig.h"
-
-#define VERSION "1.3.0.1-LK2.6"
-
-/* This major needs to be submitted to Linux to join the majors list */
-#define DIGIINFOMAJOR 35 /* For Digi specific ioctl */
-
-
-#define MAXCARDS 7
-#define epcaassert(x, msg) if (!(x)) epca_error(__LINE__, msg)
-
-#define PFX "epca: "
-
-static int nbdevs, num_cards, liloconfig;
-static int digi_poller_inhibited = 1 ;
-
-static int setup_error_code;
-static int invalid_lilo_config;
-
-/*
- * The ISA boards do window flipping into the same spaces so its only sane with
- * a single lock. It's still pretty efficient. This lock guards the hardware
- * and the tty_port lock guards the kernel side stuff like use counts. Take
- * this lock inside the port lock if you must take both.
- */
-static DEFINE_SPINLOCK(epca_lock);
-
-/* MAXBOARDS is typically 12, but ISA and EISA cards are restricted
- to 7 below. */
-static struct board_info boards[MAXBOARDS];
-
-static struct tty_driver *pc_driver;
-static struct tty_driver *pc_info;
-
-/* ------------------ Begin Digi specific structures -------------------- */
-
-/*
- * digi_channels represents an array of structures that keep track of each
- * channel of the Digi product. Information such as transmit and receive
- * pointers, termio data, and signal definitions (DTR, CTS, etc ...) are stored
- * here. This structure is NOT used to overlay the cards physical channel
- * structure.
- */
-static struct channel digi_channels[MAX_ALLOC];
-
-/*
- * card_ptr is an array used to hold the address of the first channel structure
- * of each card. This array will hold the addresses of various channels located
- * in digi_channels.
- */
-static struct channel *card_ptr[MAXCARDS];
-
-static struct timer_list epca_timer;
-
-/*
- * Begin generic memory functions. These functions will be alias (point at)
- * more specific functions dependent on the board being configured.
- */
-static void memwinon(struct board_info *b, unsigned int win);
-static void memwinoff(struct board_info *b, unsigned int win);
-static void globalwinon(struct channel *ch);
-static void rxwinon(struct channel *ch);
-static void txwinon(struct channel *ch);
-static void memoff(struct channel *ch);
-static void assertgwinon(struct channel *ch);
-static void assertmemoff(struct channel *ch);
-
-/* ---- Begin more 'specific' memory functions for cx_like products --- */
-
-static void pcxem_memwinon(struct board_info *b, unsigned int win);
-static void pcxem_memwinoff(struct board_info *b, unsigned int win);
-static void pcxem_globalwinon(struct channel *ch);
-static void pcxem_rxwinon(struct channel *ch);
-static void pcxem_txwinon(struct channel *ch);
-static void pcxem_memoff(struct channel *ch);
-
-/* ------ Begin more 'specific' memory functions for the pcxe ------- */
-
-static void pcxe_memwinon(struct board_info *b, unsigned int win);
-static void pcxe_memwinoff(struct board_info *b, unsigned int win);
-static void pcxe_globalwinon(struct channel *ch);
-static void pcxe_rxwinon(struct channel *ch);
-static void pcxe_txwinon(struct channel *ch);
-static void pcxe_memoff(struct channel *ch);
-
-/* ---- Begin more 'specific' memory functions for the pc64xe and pcxi ---- */
-/* Note : pc64xe and pcxi share the same windowing routines */
-
-static void pcxi_memwinon(struct board_info *b, unsigned int win);
-static void pcxi_memwinoff(struct board_info *b, unsigned int win);
-static void pcxi_globalwinon(struct channel *ch);
-static void pcxi_rxwinon(struct channel *ch);
-static void pcxi_txwinon(struct channel *ch);
-static void pcxi_memoff(struct channel *ch);
-
-/* - Begin 'specific' do nothing memory functions needed for some cards - */
-
-static void dummy_memwinon(struct board_info *b, unsigned int win);
-static void dummy_memwinoff(struct board_info *b, unsigned int win);
-static void dummy_globalwinon(struct channel *ch);
-static void dummy_rxwinon(struct channel *ch);
-static void dummy_txwinon(struct channel *ch);
-static void dummy_memoff(struct channel *ch);
-static void dummy_assertgwinon(struct channel *ch);
-static void dummy_assertmemoff(struct channel *ch);
-
-static struct channel *verifyChannel(struct tty_struct *);
-static void pc_sched_event(struct channel *, int);
-static void epca_error(int, char *);
-static void pc_close(struct tty_struct *, struct file *);
-static void shutdown(struct channel *, struct tty_struct *tty);
-static void pc_hangup(struct tty_struct *);
-static int pc_write_room(struct tty_struct *);
-static int pc_chars_in_buffer(struct tty_struct *);
-static void pc_flush_buffer(struct tty_struct *);
-static void pc_flush_chars(struct tty_struct *);
-static int pc_open(struct tty_struct *, struct file *);
-static void post_fep_init(unsigned int crd);
-static void epcapoll(unsigned long);
-static void doevent(int);
-static void fepcmd(struct channel *, int, int, int, int, int);
-static unsigned termios2digi_h(struct channel *ch, unsigned);
-static unsigned termios2digi_i(struct channel *ch, unsigned);
-static unsigned termios2digi_c(struct channel *ch, unsigned);
-static void epcaparam(struct tty_struct *, struct channel *);
-static void receive_data(struct channel *, struct tty_struct *tty);
-static int pc_ioctl(struct tty_struct *,
- unsigned int, unsigned long);
-static int info_ioctl(struct tty_struct *,
- unsigned int, unsigned long);
-static void pc_set_termios(struct tty_struct *, struct ktermios *);
-static void do_softint(struct work_struct *work);
-static void pc_stop(struct tty_struct *);
-static void pc_start(struct tty_struct *);
-static void pc_throttle(struct tty_struct *tty);
-static void pc_unthrottle(struct tty_struct *tty);
-static int pc_send_break(struct tty_struct *tty, int msec);
-static void setup_empty_event(struct tty_struct *tty, struct channel *ch);
-
-static int pc_write(struct tty_struct *, const unsigned char *, int);
-static int pc_init(void);
-static int init_PCI(void);
-
-/*
- * Table of functions for each board to handle memory. Mantaining parallelism
- * is a *very* good idea here. The idea is for the runtime code to blindly call
- * these functions, not knowing/caring about the underlying hardware. This
- * stuff should contain no conditionals; if more functionality is needed a
- * different entry should be established. These calls are the interface calls
- * and are the only functions that should be accessed. Anyone caught making
- * direct calls deserves what they get.
- */
-static void memwinon(struct board_info *b, unsigned int win)
-{
- b->memwinon(b, win);
-}
-
-static void memwinoff(struct board_info *b, unsigned int win)
-{
- b->memwinoff(b, win);
-}
-
-static void globalwinon(struct channel *ch)
-{
- ch->board->globalwinon(ch);
-}
-
-static void rxwinon(struct channel *ch)
-{
- ch->board->rxwinon(ch);
-}
-
-static void txwinon(struct channel *ch)
-{
- ch->board->txwinon(ch);
-}
-
-static void memoff(struct channel *ch)
-{
- ch->board->memoff(ch);
-}
-static void assertgwinon(struct channel *ch)
-{
- ch->board->assertgwinon(ch);
-}
-
-static void assertmemoff(struct channel *ch)
-{
- ch->board->assertmemoff(ch);
-}
-
-/* PCXEM windowing is the same as that used in the PCXR and CX series cards. */
-static void pcxem_memwinon(struct board_info *b, unsigned int win)
-{
- outb_p(FEPWIN | win, b->port + 1);
-}
-
-static void pcxem_memwinoff(struct board_info *b, unsigned int win)
-{
- outb_p(0, b->port + 1);
-}
-
-static void pcxem_globalwinon(struct channel *ch)
-{
- outb_p(FEPWIN, (int)ch->board->port + 1);
-}
-
-static void pcxem_rxwinon(struct channel *ch)
-{
- outb_p(ch->rxwin, (int)ch->board->port + 1);
-}
-
-static void pcxem_txwinon(struct channel *ch)
-{
- outb_p(ch->txwin, (int)ch->board->port + 1);
-}
-
-static void pcxem_memoff(struct channel *ch)
-{
- outb_p(0, (int)ch->board->port + 1);
-}
-
-/* ----------------- Begin pcxe memory window stuff ------------------ */
-static void pcxe_memwinon(struct board_info *b, unsigned int win)
-{
- outb_p(FEPWIN | win, b->port + 1);
-}
-
-static void pcxe_memwinoff(struct board_info *b, unsigned int win)
-{
- outb_p(inb(b->port) & ~FEPMEM, b->port + 1);
- outb_p(0, b->port + 1);
-}
-
-static void pcxe_globalwinon(struct channel *ch)
-{
- outb_p(FEPWIN, (int)ch->board->port + 1);
-}
-
-static void pcxe_rxwinon(struct channel *ch)
-{
- outb_p(ch->rxwin, (int)ch->board->port + 1);
-}
-
-static void pcxe_txwinon(struct channel *ch)
-{
- outb_p(ch->txwin, (int)ch->board->port + 1);
-}
-
-static void pcxe_memoff(struct channel *ch)
-{
- outb_p(0, (int)ch->board->port);
- outb_p(0, (int)ch->board->port + 1);
-}
-
-/* ------------- Begin pc64xe and pcxi memory window stuff -------------- */
-static void pcxi_memwinon(struct board_info *b, unsigned int win)
-{
- outb_p(inb(b->port) | FEPMEM, b->port);
-}
-
-static void pcxi_memwinoff(struct board_info *b, unsigned int win)
-{
- outb_p(inb(b->port) & ~FEPMEM, b->port);
-}
-
-static void pcxi_globalwinon(struct channel *ch)
-{
- outb_p(FEPMEM, ch->board->port);
-}
-
-static void pcxi_rxwinon(struct channel *ch)
-{
- outb_p(FEPMEM, ch->board->port);
-}
-
-static void pcxi_txwinon(struct channel *ch)
-{
- outb_p(FEPMEM, ch->board->port);
-}
-
-static void pcxi_memoff(struct channel *ch)
-{
- outb_p(0, ch->board->port);
-}
-
-static void pcxi_assertgwinon(struct channel *ch)
-{
- epcaassert(inb(ch->board->port) & FEPMEM, "Global memory off");
-}
-
-static void pcxi_assertmemoff(struct channel *ch)
-{
- epcaassert(!(inb(ch->board->port) & FEPMEM), "Memory on");
-}
-
-/*
- * Not all of the cards need specific memory windowing routines. Some cards
- * (Such as PCI) needs no windowing routines at all. We provide these do
- * nothing routines so that the same code base can be used. The driver will
- * ALWAYS call a windowing routine if it thinks it needs to; regardless of the
- * card. However, dependent on the card the routine may or may not do anything.
- */
-static void dummy_memwinon(struct board_info *b, unsigned int win)
-{
-}
-
-static void dummy_memwinoff(struct board_info *b, unsigned int win)
-{
-}
-
-static void dummy_globalwinon(struct channel *ch)
-{
-}
-
-static void dummy_rxwinon(struct channel *ch)
-{
-}
-
-static void dummy_txwinon(struct channel *ch)
-{
-}
-
-static void dummy_memoff(struct channel *ch)
-{
-}
-
-static void dummy_assertgwinon(struct channel *ch)
-{
-}
-
-static void dummy_assertmemoff(struct channel *ch)
-{
-}
-
-static struct channel *verifyChannel(struct tty_struct *tty)
-{
- /*
- * This routine basically provides a sanity check. It insures that the
- * channel returned is within the proper range of addresses as well as
- * properly initialized. If some bogus info gets passed in
- * through tty->driver_data this should catch it.
- */
- if (tty) {
- struct channel *ch = tty->driver_data;
- if (ch >= &digi_channels[0] && ch < &digi_channels[nbdevs]) {
- if (ch->magic == EPCA_MAGIC)
- return ch;
- }
- }
- return NULL;
-}
-
-static void pc_sched_event(struct channel *ch, int event)
-{
- /*
- * We call this to schedule interrupt processing on some event. The
- * kernel sees our request and calls the related routine in OUR driver.
- */
- ch->event |= 1 << event;
- schedule_work(&ch->tqueue);
-}
-
-static void epca_error(int line, char *msg)
-{
- printk(KERN_ERR "epca_error (Digi): line = %d %s\n", line, msg);
-}
-
-static void pc_close(struct tty_struct *tty, struct file *filp)
-{
- struct channel *ch;
- struct tty_port *port;
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
- if (ch == NULL)
- return;
- port = &ch->port;
-
- if (tty_port_close_start(port, tty, filp) == 0)
- return;
-
- pc_flush_buffer(tty);
- shutdown(ch, tty);
-
- tty_port_close_end(port, tty);
- ch->event = 0; /* FIXME: review ch->event locking */
- tty_port_tty_set(port, NULL);
-}
-
-static void shutdown(struct channel *ch, struct tty_struct *tty)
-{
- unsigned long flags;
- struct board_chan __iomem *bc;
- struct tty_port *port = &ch->port;
-
- if (!(port->flags & ASYNC_INITIALIZED))
- return;
-
- spin_lock_irqsave(&epca_lock, flags);
-
- globalwinon(ch);
- bc = ch->brdchan;
-
- /*
- * In order for an event to be generated on the receipt of data the
- * idata flag must be set. Since we are shutting down, this is not
- * necessary clear this flag.
- */
- if (bc)
- writeb(0, &bc->idata);
-
- /* If we're a modem control device and HUPCL is on, drop RTS & DTR. */
- if (tty->termios->c_cflag & HUPCL) {
- ch->omodem &= ~(ch->m_rts | ch->m_dtr);
- fepcmd(ch, SETMODEM, 0, ch->m_dtr | ch->m_rts, 10, 1);
- }
- memoff(ch);
-
- /*
- * The channel has officially been closed. The next time it is opened it
- * will have to reinitialized. Set a flag to indicate this.
- */
- /* Prevent future Digi programmed interrupts from coming active */
- port->flags &= ~ASYNC_INITIALIZED;
- spin_unlock_irqrestore(&epca_lock, flags);
-}
-
-static void pc_hangup(struct tty_struct *tty)
-{
- struct channel *ch;
-
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
- if (ch != NULL) {
- pc_flush_buffer(tty);
- tty_ldisc_flush(tty);
- shutdown(ch, tty);
-
- ch->event = 0; /* FIXME: review locking of ch->event */
- tty_port_hangup(&ch->port);
- }
-}
-
-static int pc_write(struct tty_struct *tty,
- const unsigned char *buf, int bytesAvailable)
-{
- unsigned int head, tail;
- int dataLen;
- int size;
- int amountCopied;
- struct channel *ch;
- unsigned long flags;
- int remain;
- struct board_chan __iomem *bc;
-
- /*
- * pc_write is primarily called directly by the kernel routine
- * tty_write (Though it can also be called by put_char) found in
- * tty_io.c. pc_write is passed a line discipline buffer where the data
- * to be written out is stored. The line discipline implementation
- * itself is done at the kernel level and is not brought into the
- * driver.
- */
-
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
- if (ch == NULL)
- return 0;
-
- /* Make a pointer to the channel data structure found on the board. */
- bc = ch->brdchan;
- size = ch->txbufsize;
- amountCopied = 0;
-
- spin_lock_irqsave(&epca_lock, flags);
- globalwinon(ch);
-
- head = readw(&bc->tin) & (size - 1);
- tail = readw(&bc->tout);
-
- if (tail != readw(&bc->tout))
- tail = readw(&bc->tout);
- tail &= (size - 1);
-
- if (head >= tail) {
- /* head has not wrapped */
- /*
- * remain (much like dataLen above) represents the total amount
- * of space available on the card for data. Here dataLen
- * represents the space existing between the head pointer and
- * the end of buffer. This is important because a memcpy cannot
- * be told to automatically wrap around when it hits the buffer
- * end.
- */
- dataLen = size - head;
- remain = size - (head - tail) - 1;
- } else {
- /* head has wrapped around */
- remain = tail - head - 1;
- dataLen = remain;
- }
- /*
- * Check the space on the card. If we have more data than space; reduce
- * the amount of data to fit the space.
- */
- bytesAvailable = min(remain, bytesAvailable);
- txwinon(ch);
- while (bytesAvailable > 0) {
- /* there is data to copy onto card */
-
- /*
- * If head is not wrapped, the below will make sure the first
- * data copy fills to the end of card buffer.
- */
- dataLen = min(bytesAvailable, dataLen);
- memcpy_toio(ch->txptr + head, buf, dataLen);
- buf += dataLen;
- head += dataLen;
- amountCopied += dataLen;
- bytesAvailable -= dataLen;
-
- if (head >= size) {
- head = 0;
- dataLen = tail;
- }
- }
- ch->statusflags |= TXBUSY;
- globalwinon(ch);
- writew(head, &bc->tin);
-
- if ((ch->statusflags & LOWWAIT) == 0) {
- ch->statusflags |= LOWWAIT;
- writeb(1, &bc->ilow);
- }
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- return amountCopied;
-}
-
-static int pc_write_room(struct tty_struct *tty)
-{
- int remain = 0;
- struct channel *ch;
- unsigned long flags;
- unsigned int head, tail;
- struct board_chan __iomem *bc;
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
- if (ch != NULL) {
- spin_lock_irqsave(&epca_lock, flags);
- globalwinon(ch);
-
- bc = ch->brdchan;
- head = readw(&bc->tin) & (ch->txbufsize - 1);
- tail = readw(&bc->tout);
-
- if (tail != readw(&bc->tout))
- tail = readw(&bc->tout);
- /* Wrap tail if necessary */
- tail &= (ch->txbufsize - 1);
- remain = tail - head - 1;
- if (remain < 0)
- remain += ch->txbufsize;
-
- if (remain && (ch->statusflags & LOWWAIT) == 0) {
- ch->statusflags |= LOWWAIT;
- writeb(1, &bc->ilow);
- }
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- }
- /* Return how much room is left on card */
- return remain;
-}
-
-static int pc_chars_in_buffer(struct tty_struct *tty)
-{
- int chars;
- unsigned int ctail, head, tail;
- int remain;
- unsigned long flags;
- struct channel *ch;
- struct board_chan __iomem *bc;
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
- if (ch == NULL)
- return 0;
-
- spin_lock_irqsave(&epca_lock, flags);
- globalwinon(ch);
-
- bc = ch->brdchan;
- tail = readw(&bc->tout);
- head = readw(&bc->tin);
- ctail = readw(&ch->mailbox->cout);
-
- if (tail == head && readw(&ch->mailbox->cin) == ctail &&
- readb(&bc->tbusy) == 0)
- chars = 0;
- else { /* Begin if some space on the card has been used */
- head = readw(&bc->tin) & (ch->txbufsize - 1);
- tail &= (ch->txbufsize - 1);
- /*
- * The logic here is basically opposite of the above
- * pc_write_room here we are finding the amount of bytes in the
- * buffer filled. Not the amount of bytes empty.
- */
- remain = tail - head - 1;
- if (remain < 0)
- remain += ch->txbufsize;
- chars = (int)(ch->txbufsize - remain);
- /*
- * Make it possible to wakeup anything waiting for output in
- * tty_ioctl.c, etc.
- *
- * If not already set. Setup an event to indicate when the
- * transmit buffer empties.
- */
- if (!(ch->statusflags & EMPTYWAIT))
- setup_empty_event(tty, ch);
- } /* End if some space on the card has been used */
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- /* Return number of characters residing on card. */
- return chars;
-}
-
-static void pc_flush_buffer(struct tty_struct *tty)
-{
- unsigned int tail;
- unsigned long flags;
- struct channel *ch;
- struct board_chan __iomem *bc;
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
- if (ch == NULL)
- return;
-
- spin_lock_irqsave(&epca_lock, flags);
- globalwinon(ch);
- bc = ch->brdchan;
- tail = readw(&bc->tout);
- /* Have FEP move tout pointer; effectively flushing transmit buffer */
- fepcmd(ch, STOUT, (unsigned) tail, 0, 0, 0);
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- tty_wakeup(tty);
-}
-
-static void pc_flush_chars(struct tty_struct *tty)
-{
- struct channel *ch;
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
- if (ch != NULL) {
- unsigned long flags;
- spin_lock_irqsave(&epca_lock, flags);
- /*
- * If not already set and the transmitter is busy setup an
- * event to indicate when the transmit empties.
- */
- if ((ch->statusflags & TXBUSY) &&
- !(ch->statusflags & EMPTYWAIT))
- setup_empty_event(tty, ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- }
-}
-
-static int epca_carrier_raised(struct tty_port *port)
-{
- struct channel *ch = container_of(port, struct channel, port);
- if (ch->imodem & ch->dcd)
- return 1;
- return 0;
-}
-
-static void epca_dtr_rts(struct tty_port *port, int onoff)
-{
-}
-
-static int pc_open(struct tty_struct *tty, struct file *filp)
-{
- struct channel *ch;
- struct tty_port *port;
- unsigned long flags;
- int line, retval, boardnum;
- struct board_chan __iomem *bc;
- unsigned int head;
-
- line = tty->index;
- if (line < 0 || line >= nbdevs)
- return -ENODEV;
-
- ch = &digi_channels[line];
- port = &ch->port;
- boardnum = ch->boardnum;
-
- /* Check status of board configured in system. */
-
- /*
- * I check to see if the epca_setup routine detected a user error. It
- * might be better to put this in pc_init, but for the moment it goes
- * here.
- */
- if (invalid_lilo_config) {
- if (setup_error_code & INVALID_BOARD_TYPE)
- printk(KERN_ERR "epca: pc_open: Invalid board type specified in kernel options.\n");
- if (setup_error_code & INVALID_NUM_PORTS)
- printk(KERN_ERR "epca: pc_open: Invalid number of ports specified in kernel options.\n");
- if (setup_error_code & INVALID_MEM_BASE)
- printk(KERN_ERR "epca: pc_open: Invalid board memory address specified in kernel options.\n");
- if (setup_error_code & INVALID_PORT_BASE)
- printk(KERN_ERR "epca; pc_open: Invalid board port address specified in kernel options.\n");
- if (setup_error_code & INVALID_BOARD_STATUS)
- printk(KERN_ERR "epca: pc_open: Invalid board status specified in kernel options.\n");
- if (setup_error_code & INVALID_ALTPIN)
- printk(KERN_ERR "epca: pc_open: Invalid board altpin specified in kernel options;\n");
- tty->driver_data = NULL; /* Mark this device as 'down' */
- return -ENODEV;
- }
- if (boardnum >= num_cards || boards[boardnum].status == DISABLED) {
- tty->driver_data = NULL; /* Mark this device as 'down' */
- return(-ENODEV);
- }
-
- bc = ch->brdchan;
- if (bc == NULL) {
- tty->driver_data = NULL;
- return -ENODEV;
- }
-
- spin_lock_irqsave(&port->lock, flags);
- /*
- * Every time a channel is opened, increment a counter. This is
- * necessary because we do not wish to flush and shutdown the channel
- * until the last app holding the channel open, closes it.
- */
- port->count++;
- /*
- * Set a kernel structures pointer to our local channel structure. This
- * way we can get to it when passed only a tty struct.
- */
- tty->driver_data = ch;
- port->tty = tty;
- /*
- * If this is the first time the channel has been opened, initialize
- * the tty->termios struct otherwise let pc_close handle it.
- */
- spin_lock(&epca_lock);
- globalwinon(ch);
- ch->statusflags = 0;
-
- /* Save boards current modem status */
- ch->imodem = readb(&bc->mstat);
-
- /*
- * Set receive head and tail ptrs to each other. This indicates no data
- * available to read.
- */
- head = readw(&bc->rin);
- writew(head, &bc->rout);
-
- /* Set the channels associated tty structure */
-
- /*
- * The below routine generally sets up parity, baud, flow control
- * issues, etc.... It effect both control flags and input flags.
- */
- epcaparam(tty, ch);
- memoff(ch);
- spin_unlock(&epca_lock);
- port->flags |= ASYNC_INITIALIZED;
- spin_unlock_irqrestore(&port->lock, flags);
-
- retval = tty_port_block_til_ready(port, tty, filp);
- if (retval)
- return retval;
- /*
- * Set this again in case a hangup set it to zero while this open() was
- * waiting for the line...
- */
- spin_lock_irqsave(&port->lock, flags);
- port->tty = tty;
- spin_lock(&epca_lock);
- globalwinon(ch);
- /* Enable Digi Data events */
- writeb(1, &bc->idata);
- memoff(ch);
- spin_unlock(&epca_lock);
- spin_unlock_irqrestore(&port->lock, flags);
- return 0;
-}
-
-static int __init epca_module_init(void)
-{
- return pc_init();
-}
-module_init(epca_module_init);
-
-static struct pci_driver epca_driver;
-
-static void __exit epca_module_exit(void)
-{
- int count, crd;
- struct board_info *bd;
- struct channel *ch;
-
- del_timer_sync(&epca_timer);
-
- if (tty_unregister_driver(pc_driver) ||
- tty_unregister_driver(pc_info)) {
- printk(KERN_WARNING "epca: cleanup_module failed to un-register tty driver\n");
- return;
- }
- put_tty_driver(pc_driver);
- put_tty_driver(pc_info);
-
- for (crd = 0; crd < num_cards; crd++) {
- bd = &boards[crd];
- if (!bd) { /* sanity check */
- printk(KERN_ERR "<Error> - Digi : cleanup_module failed\n");
- return;
- }
- ch = card_ptr[crd];
- for (count = 0; count < bd->numports; count++, ch++) {
- struct tty_struct *tty = tty_port_tty_get(&ch->port);
- if (tty) {
- tty_hangup(tty);
- tty_kref_put(tty);
- }
- }
- }
- pci_unregister_driver(&epca_driver);
-}
-module_exit(epca_module_exit);
-
-static const struct tty_operations pc_ops = {
- .open = pc_open,
- .close = pc_close,
- .write = pc_write,
- .write_room = pc_write_room,
- .flush_buffer = pc_flush_buffer,
- .chars_in_buffer = pc_chars_in_buffer,
- .flush_chars = pc_flush_chars,
- .ioctl = pc_ioctl,
- .set_termios = pc_set_termios,
- .stop = pc_stop,
- .start = pc_start,
- .throttle = pc_throttle,
- .unthrottle = pc_unthrottle,
- .hangup = pc_hangup,
- .break_ctl = pc_send_break
-};
-
-static const struct tty_port_operations epca_port_ops = {
- .carrier_raised = epca_carrier_raised,
- .dtr_rts = epca_dtr_rts,
-};
-
-static int info_open(struct tty_struct *tty, struct file *filp)
-{
- return 0;
-}
-
-static const struct tty_operations info_ops = {
- .open = info_open,
- .ioctl = info_ioctl,
-};
-
-static int __init pc_init(void)
-{
- int crd;
- struct board_info *bd;
- unsigned char board_id = 0;
- int err = -ENOMEM;
-
- int pci_boards_found, pci_count;
-
- pci_count = 0;
-
- pc_driver = alloc_tty_driver(MAX_ALLOC);
- if (!pc_driver)
- goto out1;
-
- pc_info = alloc_tty_driver(MAX_ALLOC);
- if (!pc_info)
- goto out2;
-
- /*
- * If epca_setup has not been ran by LILO set num_cards to defaults;
- * copy board structure defined by digiConfig into drivers board
- * structure. Note : If LILO has ran epca_setup then epca_setup will
- * handle defining num_cards as well as copying the data into the board
- * structure.
- */
- if (!liloconfig) {
- /* driver has been configured via. epcaconfig */
- nbdevs = NBDEVS;
- num_cards = NUMCARDS;
- memcpy(&boards, &static_boards,
- sizeof(struct board_info) * NUMCARDS);
- }
-
- /*
- * Note : If lilo was used to configure the driver and the ignore
- * epcaconfig option was chosen (digiepca=2) then nbdevs and num_cards
- * will equal 0 at this point. This is okay; PCI cards will still be
- * picked up if detected.
- */
-
- /*
- * Set up interrupt, we will worry about memory allocation in
- * post_fep_init.
- */
- printk(KERN_INFO "DIGI epca driver version %s loaded.\n", VERSION);
-
- /*
- * NOTE : This code assumes that the number of ports found in the
- * boards array is correct. This could be wrong if the card in question
- * is PCI (And therefore has no ports entry in the boards structure.)
- * The rest of the information will be valid for PCI because the
- * beginning of pc_init scans for PCI and determines i/o and base
- * memory addresses. I am not sure if it is possible to read the number
- * of ports supported by the card prior to it being booted (Since that
- * is the state it is in when pc_init is run). Because it is not
- * possible to query the number of supported ports until after the card
- * has booted; we are required to calculate the card_ptrs as the card
- * is initialized (Inside post_fep_init). The negative thing about this
- * approach is that digiDload's call to GET_INFO will have a bad port
- * value. (Since this is called prior to post_fep_init.)
- */
- pci_boards_found = 0;
- if (num_cards < MAXBOARDS)
- pci_boards_found += init_PCI();
- num_cards += pci_boards_found;
-
- pc_driver->owner = THIS_MODULE;
- pc_driver->name = "ttyD";
- pc_driver->major = DIGI_MAJOR;
- pc_driver->minor_start = 0;
- pc_driver->type = TTY_DRIVER_TYPE_SERIAL;
- pc_driver->subtype = SERIAL_TYPE_NORMAL;
- pc_driver->init_termios = tty_std_termios;
- pc_driver->init_termios.c_iflag = 0;
- pc_driver->init_termios.c_oflag = 0;
- pc_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL;
- pc_driver->init_termios.c_lflag = 0;
- pc_driver->init_termios.c_ispeed = 9600;
- pc_driver->init_termios.c_ospeed = 9600;
- pc_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_HARDWARE_BREAK;
- tty_set_operations(pc_driver, &pc_ops);
-
- pc_info->owner = THIS_MODULE;
- pc_info->name = "digi_ctl";
- pc_info->major = DIGIINFOMAJOR;
- pc_info->minor_start = 0;
- pc_info->type = TTY_DRIVER_TYPE_SERIAL;
- pc_info->subtype = SERIAL_TYPE_INFO;
- pc_info->init_termios = tty_std_termios;
- pc_info->init_termios.c_iflag = 0;
- pc_info->init_termios.c_oflag = 0;
- pc_info->init_termios.c_lflag = 0;
- pc_info->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
- pc_info->init_termios.c_ispeed = 9600;
- pc_info->init_termios.c_ospeed = 9600;
- pc_info->flags = TTY_DRIVER_REAL_RAW;
- tty_set_operations(pc_info, &info_ops);
-
-
- for (crd = 0; crd < num_cards; crd++) {
- /*
- * This is where the appropriate memory handlers for the
- * hardware is set. Everything at runtime blindly jumps through
- * these vectors.
- */
-
- /* defined in epcaconfig.h */
- bd = &boards[crd];
-
- switch (bd->type) {
- case PCXEM:
- case EISAXEM:
- bd->memwinon = pcxem_memwinon;
- bd->memwinoff = pcxem_memwinoff;
- bd->globalwinon = pcxem_globalwinon;
- bd->txwinon = pcxem_txwinon;
- bd->rxwinon = pcxem_rxwinon;
- bd->memoff = pcxem_memoff;
- bd->assertgwinon = dummy_assertgwinon;
- bd->assertmemoff = dummy_assertmemoff;
- break;
-
- case PCIXEM:
- case PCIXRJ:
- case PCIXR:
- bd->memwinon = dummy_memwinon;
- bd->memwinoff = dummy_memwinoff;
- bd->globalwinon = dummy_globalwinon;
- bd->txwinon = dummy_txwinon;
- bd->rxwinon = dummy_rxwinon;
- bd->memoff = dummy_memoff;
- bd->assertgwinon = dummy_assertgwinon;
- bd->assertmemoff = dummy_assertmemoff;
- break;
-
- case PCXE:
- case PCXEVE:
- bd->memwinon = pcxe_memwinon;
- bd->memwinoff = pcxe_memwinoff;
- bd->globalwinon = pcxe_globalwinon;
- bd->txwinon = pcxe_txwinon;
- bd->rxwinon = pcxe_rxwinon;
- bd->memoff = pcxe_memoff;
- bd->assertgwinon = dummy_assertgwinon;
- bd->assertmemoff = dummy_assertmemoff;
- break;
-
- case PCXI:
- case PC64XE:
- bd->memwinon = pcxi_memwinon;
- bd->memwinoff = pcxi_memwinoff;
- bd->globalwinon = pcxi_globalwinon;
- bd->txwinon = pcxi_txwinon;
- bd->rxwinon = pcxi_rxwinon;
- bd->memoff = pcxi_memoff;
- bd->assertgwinon = pcxi_assertgwinon;
- bd->assertmemoff = pcxi_assertmemoff;
- break;
-
- default:
- break;
- }
-
- /*
- * Some cards need a memory segment to be defined for use in
- * transmit and receive windowing operations. These boards are
- * listed in the below switch. In the case of the XI the amount
- * of memory on the board is variable so the memory_seg is also
- * variable. This code determines what they segment should be.
- */
- switch (bd->type) {
- case PCXE:
- case PCXEVE:
- case PC64XE:
- bd->memory_seg = 0xf000;
- break;
-
- case PCXI:
- board_id = inb((int)bd->port);
- if ((board_id & 0x1) == 0x1) {
- /* it's an XI card */
- /* Is it a 64K board */
- if ((board_id & 0x30) == 0)
- bd->memory_seg = 0xf000;
-
- /* Is it a 128K board */
- if ((board_id & 0x30) == 0x10)
- bd->memory_seg = 0xe000;
-
- /* Is is a 256K board */
- if ((board_id & 0x30) == 0x20)
- bd->memory_seg = 0xc000;
-
- /* Is it a 512K board */
- if ((board_id & 0x30) == 0x30)
- bd->memory_seg = 0x8000;
- } else
- printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n", (int)bd->port);
- break;
- }
- }
-
- err = tty_register_driver(pc_driver);
- if (err) {
- printk(KERN_ERR "Couldn't register Digi PC/ driver");
- goto out3;
- }
-
- err = tty_register_driver(pc_info);
- if (err) {
- printk(KERN_ERR "Couldn't register Digi PC/ info ");
- goto out4;
- }
-
- /* Start up the poller to check for events on all enabled boards */
- init_timer(&epca_timer);
- epca_timer.function = epcapoll;
- mod_timer(&epca_timer, jiffies + HZ/25);
- return 0;
-
-out4:
- tty_unregister_driver(pc_driver);
-out3:
- put_tty_driver(pc_info);
-out2:
- put_tty_driver(pc_driver);
-out1:
- return err;
-}
-
-static void post_fep_init(unsigned int crd)
-{
- int i;
- void __iomem *memaddr;
- struct global_data __iomem *gd;
- struct board_info *bd;
- struct board_chan __iomem *bc;
- struct channel *ch;
- int shrinkmem = 0, lowwater;
-
- /*
- * This call is made by the user via. the ioctl call DIGI_INIT. It is
- * responsible for setting up all the card specific stuff.
- */
- bd = &boards[crd];
-
- /*
- * If this is a PCI board, get the port info. Remember PCI cards do not
- * have entries into the epcaconfig.h file, so we can't get the number
- * of ports from it. Unfortunetly, this means that anyone doing a
- * DIGI_GETINFO before the board has booted will get an invalid number
- * of ports returned (It should return 0). Calls to DIGI_GETINFO after
- * DIGI_INIT has been called will return the proper values.
- */
- if (bd->type >= PCIXEM) { /* Begin get PCI number of ports */
- /*
- * Below we use XEMPORTS as a memory offset regardless of which
- * PCI card it is. This is because all of the supported PCI
- * cards have the same memory offset for the channel data. This
- * will have to be changed if we ever develop a PCI/XE card.
- * NOTE : The FEP manual states that the port offset is 0xC22
- * as opposed to 0xC02. This is only true for PC/XE, and PC/XI
- * cards; not for the XEM, or CX series. On the PCI cards the
- * number of ports is determined by reading a ID PROM located
- * in the box attached to the card. The card can then determine
- * the index the id to determine the number of ports available.
- * (FYI - The id should be located at 0x1ac (And may use up to
- * 4 bytes if the box in question is a XEM or CX)).
- */
- /* PCI cards are already remapped at this point ISA are not */
- bd->numports = readw(bd->re_map_membase + XEMPORTS);
- epcaassert(bd->numports <= 64, "PCI returned a invalid number of ports");
- nbdevs += (bd->numports);
- } else {
- /* Fix up the mappings for ISA/EISA etc */
- /* FIXME: 64K - can we be smarter ? */
- bd->re_map_membase = ioremap_nocache(bd->membase, 0x10000);
- }
-
- if (crd != 0)
- card_ptr[crd] = card_ptr[crd-1] + boards[crd-1].numports;
- else
- card_ptr[crd] = &digi_channels[crd]; /* <- For card 0 only */
-
- ch = card_ptr[crd];
- epcaassert(ch <= &digi_channels[nbdevs - 1], "ch out of range");
-
- memaddr = bd->re_map_membase;
-
- /*
- * The below assignment will set bc to point at the BEGINNING of the
- * cards channel structures. For 1 card there will be between 8 and 64
- * of these structures.
- */
- bc = memaddr + CHANSTRUCT;
-
- /*
- * The below assignment will set gd to point at the BEGINNING of global
- * memory address 0xc00. The first data in that global memory actually
- * starts at address 0xc1a. The command in pointer begins at 0xd10.
- */
- gd = memaddr + GLOBAL;
-
- /*
- * XEPORTS (address 0xc22) points at the number of channels the card
- * supports. (For 64XE, XI, XEM, and XR use 0xc02)
- */
- if ((bd->type == PCXEVE || bd->type == PCXE) &&
- (readw(memaddr + XEPORTS) < 3))
- shrinkmem = 1;
- if (bd->type < PCIXEM)
- if (!request_region((int)bd->port, 4, board_desc[bd->type]))
- return;
- memwinon(bd, 0);
-
- /*
- * Remember ch is the main drivers channels structure, while bc is the
- * cards channel structure.
- */
- for (i = 0; i < bd->numports; i++, ch++, bc++) {
- unsigned long flags;
- u16 tseg, rseg;
-
- tty_port_init(&ch->port);
- ch->port.ops = &epca_port_ops;
- ch->brdchan = bc;
- ch->mailbox = gd;
- INIT_WORK(&ch->tqueue, do_softint);
- ch->board = &boards[crd];
-
- spin_lock_irqsave(&epca_lock, flags);
- switch (bd->type) {
- /*
- * Since some of the boards use different bitmaps for
- * their control signals we cannot hard code these
- * values and retain portability. We virtualize this
- * data here.
- */
- case EISAXEM:
- case PCXEM:
- case PCIXEM:
- case PCIXRJ:
- case PCIXR:
- ch->m_rts = 0x02;
- ch->m_dcd = 0x80;
- ch->m_dsr = 0x20;
- ch->m_cts = 0x10;
- ch->m_ri = 0x40;
- ch->m_dtr = 0x01;
- break;
-
- case PCXE:
- case PCXEVE:
- case PCXI:
- case PC64XE:
- ch->m_rts = 0x02;
- ch->m_dcd = 0x08;
- ch->m_dsr = 0x10;
- ch->m_cts = 0x20;
- ch->m_ri = 0x40;
- ch->m_dtr = 0x80;
- break;
- }
-
- if (boards[crd].altpin) {
- ch->dsr = ch->m_dcd;
- ch->dcd = ch->m_dsr;
- ch->digiext.digi_flags |= DIGI_ALTPIN;
- } else {
- ch->dcd = ch->m_dcd;
- ch->dsr = ch->m_dsr;
- }
-
- ch->boardnum = crd;
- ch->channelnum = i;
- ch->magic = EPCA_MAGIC;
- tty_port_tty_set(&ch->port, NULL);
-
- if (shrinkmem) {
- fepcmd(ch, SETBUFFER, 32, 0, 0, 0);
- shrinkmem = 0;
- }
-
- tseg = readw(&bc->tseg);
- rseg = readw(&bc->rseg);
-
- switch (bd->type) {
- case PCIXEM:
- case PCIXRJ:
- case PCIXR:
- /* Cover all the 2MEG cards */
- ch->txptr = memaddr + ((tseg << 4) & 0x1fffff);
- ch->rxptr = memaddr + ((rseg << 4) & 0x1fffff);
- ch->txwin = FEPWIN | (tseg >> 11);
- ch->rxwin = FEPWIN | (rseg >> 11);
- break;
-
- case PCXEM:
- case EISAXEM:
- /* Cover all the 32K windowed cards */
- /* Mask equal to window size - 1 */
- ch->txptr = memaddr + ((tseg << 4) & 0x7fff);
- ch->rxptr = memaddr + ((rseg << 4) & 0x7fff);
- ch->txwin = FEPWIN | (tseg >> 11);
- ch->rxwin = FEPWIN | (rseg >> 11);
- break;
-
- case PCXEVE:
- case PCXE:
- ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4)
- & 0x1fff);
- ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9);
- ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4)
- & 0x1fff);
- ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >> 9);
- break;
-
- case PCXI:
- case PC64XE:
- ch->txptr = memaddr + ((tseg - bd->memory_seg) << 4);
- ch->rxptr = memaddr + ((rseg - bd->memory_seg) << 4);
- ch->txwin = ch->rxwin = 0;
- break;
- }
-
- ch->txbufhead = 0;
- ch->txbufsize = readw(&bc->tmax) + 1;
-
- ch->rxbufhead = 0;
- ch->rxbufsize = readw(&bc->rmax) + 1;
-
- lowwater = ch->txbufsize >= 2000 ? 1024 : (ch->txbufsize / 2);
-
- /* Set transmitter low water mark */
- fepcmd(ch, STXLWATER, lowwater, 0, 10, 0);
-
- /* Set receiver low water mark */
- fepcmd(ch, SRXLWATER, (ch->rxbufsize / 4), 0, 10, 0);
-
- /* Set receiver high water mark */
- fepcmd(ch, SRXHWATER, (3 * ch->rxbufsize / 4), 0, 10, 0);
-
- writew(100, &bc->edelay);
- writeb(1, &bc->idata);
-
- ch->startc = readb(&bc->startc);
- ch->stopc = readb(&bc->stopc);
- ch->startca = readb(&bc->startca);
- ch->stopca = readb(&bc->stopca);
-
- ch->fepcflag = 0;
- ch->fepiflag = 0;
- ch->fepoflag = 0;
- ch->fepstartc = 0;
- ch->fepstopc = 0;
- ch->fepstartca = 0;
- ch->fepstopca = 0;
-
- ch->port.close_delay = 50;
-
- spin_unlock_irqrestore(&epca_lock, flags);
- }
-
- printk(KERN_INFO
- "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n",
- VERSION, board_desc[bd->type], (long)bd->port,
- (long)bd->membase, bd->numports);
- memwinoff(bd, 0);
-}
-
-static void epcapoll(unsigned long ignored)
-{
- unsigned long flags;
- int crd;
- unsigned int head, tail;
- struct channel *ch;
- struct board_info *bd;
-
- /*
- * This routine is called upon every timer interrupt. Even though the
- * Digi series cards are capable of generating interrupts this method
- * of non-looping polling is more efficient. This routine checks for
- * card generated events (Such as receive data, are transmit buffer
- * empty) and acts on those events.
- */
- for (crd = 0; crd < num_cards; crd++) {
- bd = &boards[crd];
- ch = card_ptr[crd];
-
- if ((bd->status == DISABLED) || digi_poller_inhibited)
- continue;
-
- /*
- * assertmemoff is not needed here; indeed it is an empty
- * subroutine. It is being kept because future boards may need
- * this as well as some legacy boards.
- */
- spin_lock_irqsave(&epca_lock, flags);
-
- assertmemoff(ch);
-
- globalwinon(ch);
-
- /*
- * In this case head and tail actually refer to the event queue
- * not the transmit or receive queue.
- */
- head = readw(&ch->mailbox->ein);
- tail = readw(&ch->mailbox->eout);
-
- /* If head isn't equal to tail we have an event */
- if (head != tail)
- doevent(crd);
- memoff(ch);
-
- spin_unlock_irqrestore(&epca_lock, flags);
- } /* End for each card */
- mod_timer(&epca_timer, jiffies + (HZ / 25));
-}
-
-static void doevent(int crd)
-{
- void __iomem *eventbuf;
- struct channel *ch, *chan0;
- static struct tty_struct *tty;
- struct board_info *bd;
- struct board_chan __iomem *bc;
- unsigned int tail, head;
- int event, channel;
- int mstat, lstat;
-
- /*
- * This subroutine is called by epcapoll when an event is detected
- * in the event queue. This routine responds to those events.
- */
- bd = &boards[crd];
-
- chan0 = card_ptr[crd];
- epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range");
- assertgwinon(chan0);
- while ((tail = readw(&chan0->mailbox->eout)) !=
- (head = readw(&chan0->mailbox->ein))) {
- /* Begin while something in event queue */
- assertgwinon(chan0);
- eventbuf = bd->re_map_membase + tail + ISTART;
- /* Get the channel the event occurred on */
- channel = readb(eventbuf);
- /* Get the actual event code that occurred */
- event = readb(eventbuf + 1);
- /*
- * The two assignments below get the current modem status
- * (mstat) and the previous modem status (lstat). These are
- * useful because an event could signal a change in modem
- * signals itself.
- */
- mstat = readb(eventbuf + 2);
- lstat = readb(eventbuf + 3);
-
- ch = chan0 + channel;
- if ((unsigned)channel >= bd->numports || !ch) {
- if (channel >= bd->numports)
- ch = chan0;
- bc = ch->brdchan;
- goto next;
- }
-
- bc = ch->brdchan;
- if (bc == NULL)
- goto next;
-
- tty = tty_port_tty_get(&ch->port);
- if (event & DATA_IND) { /* Begin DATA_IND */
- receive_data(ch, tty);
- assertgwinon(ch);
- } /* End DATA_IND */
- /* else *//* Fix for DCD transition missed bug */
- if (event & MODEMCHG_IND) {
- /* A modem signal change has been indicated */
- ch->imodem = mstat;
- if (test_bit(ASYNCB_CHECK_CD, &ch->port.flags)) {
- /* We are now receiving dcd */
- if (mstat & ch->dcd)
- wake_up_interruptible(&ch->port.open_wait);
- else /* No dcd; hangup */
- pc_sched_event(ch, EPCA_EVENT_HANGUP);
- }
- }
- if (tty) {
- if (event & BREAK_IND) {
- /* A break has been indicated */
- tty_insert_flip_char(tty, 0, TTY_BREAK);
- tty_schedule_flip(tty);
- } else if (event & LOWTX_IND) {
- if (ch->statusflags & LOWWAIT) {
- ch->statusflags &= ~LOWWAIT;
- tty_wakeup(tty);
- }
- } else if (event & EMPTYTX_IND) {
- /* This event is generated by
- setup_empty_event */
- ch->statusflags &= ~TXBUSY;
- if (ch->statusflags & EMPTYWAIT) {
- ch->statusflags &= ~EMPTYWAIT;
- tty_wakeup(tty);
- }
- }
- tty_kref_put(tty);
- }
-next:
- globalwinon(ch);
- BUG_ON(!bc);
- writew(1, &bc->idata);
- writew((tail + 4) & (IMAX - ISTART - 4), &chan0->mailbox->eout);
- globalwinon(chan0);
- } /* End while something in event queue */
-}
-
-static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
- int byte2, int ncmds, int bytecmd)
-{
- unchar __iomem *memaddr;
- unsigned int head, cmdTail, cmdStart, cmdMax;
- long count;
- int n;
-
- /* This is the routine in which commands may be passed to the card. */
-
- if (ch->board->status == DISABLED)
- return;
- assertgwinon(ch);
- /* Remember head (As well as max) is just an offset not a base addr */
- head = readw(&ch->mailbox->cin);
- /* cmdStart is a base address */
- cmdStart = readw(&ch->mailbox->cstart);
- /*
- * We do the addition below because we do not want a max pointer
- * relative to cmdStart. We want a max pointer that points at the
- * physical end of the command queue.
- */
- cmdMax = (cmdStart + 4 + readw(&ch->mailbox->cmax));
- memaddr = ch->board->re_map_membase;
-
- if (head >= (cmdMax - cmdStart) || (head & 03)) {
- printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n",
- __LINE__, cmd, head);
- printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n",
- __LINE__, cmdMax, cmdStart);
- return;
- }
- if (bytecmd) {
- writeb(cmd, memaddr + head + cmdStart + 0);
- writeb(ch->channelnum, memaddr + head + cmdStart + 1);
- /* Below word_or_byte is bits to set */
- writeb(word_or_byte, memaddr + head + cmdStart + 2);
- /* Below byte2 is bits to reset */
- writeb(byte2, memaddr + head + cmdStart + 3);
- } else {
- writeb(cmd, memaddr + head + cmdStart + 0);
- writeb(ch->channelnum, memaddr + head + cmdStart + 1);
- writeb(word_or_byte, memaddr + head + cmdStart + 2);
- }
- head = (head + 4) & (cmdMax - cmdStart - 4);
- writew(head, &ch->mailbox->cin);
- count = FEPTIMEOUT;
-
- for (;;) {
- count--;
- if (count == 0) {
- printk(KERN_ERR "<Error> - Fep not responding in fepcmd()\n");
- return;
- }
- head = readw(&ch->mailbox->cin);
- cmdTail = readw(&ch->mailbox->cout);
- n = (head - cmdTail) & (cmdMax - cmdStart - 4);
- /*
- * Basically this will break when the FEP acknowledges the
- * command by incrementing cmdTail (Making it equal to head).
- */
- if (n <= ncmds * (sizeof(short) * 4))
- break;
- }
-}
-
-/*
- * Digi products use fields in their channels structures that are very similar
- * to the c_cflag and c_iflag fields typically found in UNIX termios
- * structures. The below three routines allow mappings between these hardware
- * "flags" and their respective Linux flags.
- */
-static unsigned termios2digi_h(struct channel *ch, unsigned cflag)
-{
- unsigned res = 0;
-
- if (cflag & CRTSCTS) {
- ch->digiext.digi_flags |= (RTSPACE | CTSPACE);
- res |= ((ch->m_cts) | (ch->m_rts));
- }
-
- if (ch->digiext.digi_flags & RTSPACE)
- res |= ch->m_rts;
-
- if (ch->digiext.digi_flags & DTRPACE)
- res |= ch->m_dtr;
-
- if (ch->digiext.digi_flags & CTSPACE)
- res |= ch->m_cts;
-
- if (ch->digiext.digi_flags & DSRPACE)
- res |= ch->dsr;
-
- if (ch->digiext.digi_flags & DCDPACE)
- res |= ch->dcd;
-
- if (res & (ch->m_rts))
- ch->digiext.digi_flags |= RTSPACE;
-
- if (res & (ch->m_cts))
- ch->digiext.digi_flags |= CTSPACE;
-
- return res;
-}
-
-static unsigned termios2digi_i(struct channel *ch, unsigned iflag)
-{
- unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
- INPCK | ISTRIP | IXON | IXANY | IXOFF);
- if (ch->digiext.digi_flags & DIGI_AIXON)
- res |= IAIXON;
- return res;
-}
-
-static unsigned termios2digi_c(struct channel *ch, unsigned cflag)
-{
- unsigned res = 0;
- if (cflag & CBAUDEX) {
- ch->digiext.digi_flags |= DIGI_FAST;
- /*
- * HUPCL bit is used by FEP to indicate fast baud table is to
- * be used.
- */
- res |= FEP_HUPCL;
- } else
- ch->digiext.digi_flags &= ~DIGI_FAST;
- /*
- * CBAUD has bit position 0x1000 set these days to indicate Linux
- * baud rate remap. Digi hardware can't handle the bit assignment.
- * (We use a different bit assignment for high speed.). Clear this
- * bit out.
- */
- res |= cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE);
- /*
- * This gets a little confusing. The Digi cards have their own
- * representation of c_cflags controlling baud rate. For the most part
- * this is identical to the Linux implementation. However; Digi
- * supports one rate (76800) that Linux doesn't. This means that the
- * c_cflag entry that would normally mean 76800 for Digi actually means
- * 115200 under Linux. Without the below mapping, a stty 115200 would
- * only drive the board at 76800. Since the rate 230400 is also found
- * after 76800, the same problem afflicts us when we choose a rate of
- * 230400. Without the below modificiation stty 230400 would actually
- * give us 115200.
- *
- * There are two additional differences. The Linux value for CLOCAL
- * (0x800; 0004000) has no meaning to the Digi hardware. Also in later
- * releases of Linux; the CBAUD define has CBAUDEX (0x1000; 0010000)
- * ored into it (CBAUD = 0x100f as opposed to 0xf). CBAUDEX should be
- * checked for a screened out prior to termios2digi_c returning. Since
- * CLOCAL isn't used by the board this can be ignored as long as the
- * returned value is used only by Digi hardware.
- */
- if (cflag & CBAUDEX) {
- /*
- * The below code is trying to guarantee that only baud rates
- * 115200 and 230400 are remapped. We use exclusive or because
- * the various baud rates share common bit positions and
- * therefore can't be tested for easily.
- */
- if ((!((cflag & 0x7) ^ (B115200 & ~CBAUDEX))) ||
- (!((cflag & 0x7) ^ (B230400 & ~CBAUDEX))))
- res += 1;
- }
- return res;
-}
-
-/* Caller must hold the locks */
-static void epcaparam(struct tty_struct *tty, struct channel *ch)
-{
- unsigned int cmdHead;
- struct ktermios *ts;
- struct board_chan __iomem *bc;
- unsigned mval, hflow, cflag, iflag;
-
- bc = ch->brdchan;
- epcaassert(bc != NULL, "bc out of range");
-
- assertgwinon(ch);
- ts = tty->termios;
- if ((ts->c_cflag & CBAUD) == 0) { /* Begin CBAUD detected */
- cmdHead = readw(&bc->rin);
- writew(cmdHead, &bc->rout);
- cmdHead = readw(&bc->tin);
- /* Changing baud in mid-stream transmission can be wonderful */
- /*
- * Flush current transmit buffer by setting cmdTail pointer
- * (tout) to cmdHead pointer (tin). Hopefully the transmit
- * buffer is empty.
- */
- fepcmd(ch, STOUT, (unsigned) cmdHead, 0, 0, 0);
- mval = 0;
- } else { /* Begin CBAUD not detected */
- /*
- * c_cflags have changed but that change had nothing to do with
- * BAUD. Propagate the change to the card.
- */
- cflag = termios2digi_c(ch, ts->c_cflag);
- if (cflag != ch->fepcflag) {
- ch->fepcflag = cflag;
- /* Set baud rate, char size, stop bits, parity */
- fepcmd(ch, SETCTRLFLAGS, (unsigned) cflag, 0, 0, 0);
- }
- /*
- * If the user has not forced CLOCAL and if the device is not a
- * CALLOUT device (Which is always CLOCAL) we set flags such
- * that the driver will wait on carrier detect.
- */
- if (ts->c_cflag & CLOCAL)
- clear_bit(ASYNCB_CHECK_CD, &ch->port.flags);
- else
- set_bit(ASYNCB_CHECK_CD, &ch->port.flags);
- mval = ch->m_dtr | ch->m_rts;
- } /* End CBAUD not detected */
- iflag = termios2digi_i(ch, ts->c_iflag);
- /* Check input mode flags */
- if (iflag != ch->fepiflag) {
- ch->fepiflag = iflag;
- /*
- * Command sets channels iflag structure on the board. Such
- * things as input soft flow control, handling of parity
- * errors, and break handling are all set here.
- *
- * break handling, parity handling, input stripping,
- * flow control chars
- */
- fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0);
- }
- /*
- * Set the board mint value for this channel. This will cause hardware
- * events to be generated each time the DCD signal (Described in mint)
- * changes.
- */
- writeb(ch->dcd, &bc->mint);
- if ((ts->c_cflag & CLOCAL) || (ch->digiext.digi_flags & DIGI_FORCEDCD))
- if (ch->digiext.digi_flags & DIGI_FORCEDCD)
- writeb(0, &bc->mint);
- ch->imodem = readb(&bc->mstat);
- hflow = termios2digi_h(ch, ts->c_cflag);
- if (hflow != ch->hflow) {
- ch->hflow = hflow;
- /*
- * Hard flow control has been selected but the board is not
- * using it. Activate hard flow control now.
- */
- fepcmd(ch, SETHFLOW, hflow, 0xff, 0, 1);
- }
- mval ^= ch->modemfake & (mval ^ ch->modem);
-
- if (ch->omodem ^ mval) {
- ch->omodem = mval;
- /*
- * The below command sets the DTR and RTS mstat structure. If
- * hard flow control is NOT active these changes will drive the
- * output of the actual DTR and RTS lines. If hard flow control
- * is active, the changes will be saved in the mstat structure
- * and only asserted when hard flow control is turned off.
- */
-
- /* First reset DTR & RTS; then set them */
- fepcmd(ch, SETMODEM, 0, ((ch->m_dtr)|(ch->m_rts)), 0, 1);
- fepcmd(ch, SETMODEM, mval, 0, 0, 1);
- }
- if (ch->startc != ch->fepstartc || ch->stopc != ch->fepstopc) {
- ch->fepstartc = ch->startc;
- ch->fepstopc = ch->stopc;
- /*
- * The XON / XOFF characters have changed; propagate these
- * changes to the card.
- */
- fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1);
- }
- if (ch->startca != ch->fepstartca || ch->stopca != ch->fepstopca) {
- ch->fepstartca = ch->startca;
- ch->fepstopca = ch->stopca;
- /*
- * Similar to the above, this time the auxilarly XON / XOFF
- * characters have changed; propagate these changes to the card.
- */
- fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1);
- }
-}
-
-/* Caller holds lock */
-static void receive_data(struct channel *ch, struct tty_struct *tty)
-{
- unchar *rptr;
- struct ktermios *ts = NULL;
- struct board_chan __iomem *bc;
- int dataToRead, wrapgap, bytesAvailable;
- unsigned int tail, head;
- unsigned int wrapmask;
-
- /*
- * This routine is called by doint when a receive data event has taken
- * place.
- */
- globalwinon(ch);
- if (ch->statusflags & RXSTOPPED)
- return;
- if (tty)
- ts = tty->termios;
- bc = ch->brdchan;
- BUG_ON(!bc);
- wrapmask = ch->rxbufsize - 1;
-
- /*
- * Get the head and tail pointers to the receiver queue. Wrap the head
- * pointer if it has reached the end of the buffer.
- */
- head = readw(&bc->rin);
- head &= wrapmask;
- tail = readw(&bc->rout) & wrapmask;
-
- bytesAvailable = (head - tail) & wrapmask;
- if (bytesAvailable == 0)
- return;
-
- /* If CREAD bit is off or device not open, set TX tail to head */
- if (!tty || !ts || !(ts->c_cflag & CREAD)) {
- writew(head, &bc->rout);
- return;
- }
-
- if (tty_buffer_request_room(tty, bytesAvailable + 1) == 0)
- return;
-
- if (readb(&bc->orun)) {
- writeb(0, &bc->orun);
- printk(KERN_WARNING "epca; overrun! DigiBoard device %s\n",
- tty->name);
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- }
- rxwinon(ch);
- while (bytesAvailable > 0) {
- /* Begin while there is data on the card */
- wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail;
- /*
- * Even if head has wrapped around only report the amount of
- * data to be equal to the size - tail. Remember memcpy can't
- * automatically wrap around the receive buffer.
- */
- dataToRead = (wrapgap < bytesAvailable) ? wrapgap
- : bytesAvailable;
- /* Make sure we don't overflow the buffer */
- dataToRead = tty_prepare_flip_string(tty, &rptr, dataToRead);
- if (dataToRead == 0)
- break;
- /*
- * Move data read from our card into the line disciplines
- * buffer for translation if necessary.
- */
- memcpy_fromio(rptr, ch->rxptr + tail, dataToRead);
- tail = (tail + dataToRead) & wrapmask;
- bytesAvailable -= dataToRead;
- } /* End while there is data on the card */
- globalwinon(ch);
- writew(tail, &bc->rout);
- /* Must be called with global data */
- tty_schedule_flip(tty);
-}
-
-static int info_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case DIGI_GETINFO:
- {
- struct digi_info di;
- int brd;
-
- if (get_user(brd, (unsigned int __user *)arg))
- return -EFAULT;
- if (brd < 0 || brd >= num_cards || num_cards == 0)
- return -ENODEV;
-
- memset(&di, 0, sizeof(di));
-
- di.board = brd;
- di.status = boards[brd].status;
- di.type = boards[brd].type ;
- di.numports = boards[brd].numports ;
- /* Legacy fixups - just move along nothing to see */
- di.port = (unsigned char *)boards[brd].port ;
- di.membase = (unsigned char *)boards[brd].membase ;
-
- if (copy_to_user((void __user *)arg, &di, sizeof(di)))
- return -EFAULT;
- break;
-
- }
-
- case DIGI_POLLER:
- {
- int brd = arg & 0xff000000 >> 16;
- unsigned char state = arg & 0xff;
-
- if (brd < 0 || brd >= num_cards) {
- printk(KERN_ERR "epca: DIGI POLLER : brd not valid!\n");
- return -ENODEV;
- }
- digi_poller_inhibited = state;
- break;
- }
-
- case DIGI_INIT:
- {
- /*
- * This call is made by the apps to complete the
- * initialization of the board(s). This routine is
- * responsible for setting the card to its initial
- * state and setting the drivers control fields to the
- * sutianle settings for the card in question.
- */
- int crd;
- for (crd = 0; crd < num_cards; crd++)
- post_fep_init(crd);
- break;
- }
- default:
- return -ENOTTY;
- }
- return 0;
-}
-
-static int pc_tiocmget(struct tty_struct *tty)
-{
- struct channel *ch = tty->driver_data;
- struct board_chan __iomem *bc;
- unsigned int mstat, mflag = 0;
- unsigned long flags;
-
- if (ch)
- bc = ch->brdchan;
- else
- return -EINVAL;
-
- spin_lock_irqsave(&epca_lock, flags);
- globalwinon(ch);
- mstat = readb(&bc->mstat);
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
-
- if (mstat & ch->m_dtr)
- mflag |= TIOCM_DTR;
- if (mstat & ch->m_rts)
- mflag |= TIOCM_RTS;
- if (mstat & ch->m_cts)
- mflag |= TIOCM_CTS;
- if (mstat & ch->dsr)
- mflag |= TIOCM_DSR;
- if (mstat & ch->m_ri)
- mflag |= TIOCM_RI;
- if (mstat & ch->dcd)
- mflag |= TIOCM_CD;
- return mflag;
-}
-
-static int pc_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct channel *ch = tty->driver_data;
- unsigned long flags;
-
- if (!ch)
- return -EINVAL;
-
- spin_lock_irqsave(&epca_lock, flags);
- /*
- * I think this modemfake stuff is broken. It doesn't correctly reflect
- * the behaviour desired by the TIOCM* ioctls. Therefore this is
- * probably broken.
- */
- if (set & TIOCM_RTS) {
- ch->modemfake |= ch->m_rts;
- ch->modem |= ch->m_rts;
- }
- if (set & TIOCM_DTR) {
- ch->modemfake |= ch->m_dtr;
- ch->modem |= ch->m_dtr;
- }
- if (clear & TIOCM_RTS) {
- ch->modemfake |= ch->m_rts;
- ch->modem &= ~ch->m_rts;
- }
- if (clear & TIOCM_DTR) {
- ch->modemfake |= ch->m_dtr;
- ch->modem &= ~ch->m_dtr;
- }
- globalwinon(ch);
- /*
- * The below routine generally sets up parity, baud, flow control
- * issues, etc.... It effect both control flags and input flags.
- */
- epcaparam(tty, ch);
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- return 0;
-}
-
-static int pc_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- digiflow_t dflow;
- unsigned long flags;
- unsigned int mflag, mstat;
- unsigned char startc, stopc;
- struct board_chan __iomem *bc;
- struct channel *ch = tty->driver_data;
- void __user *argp = (void __user *)arg;
-
- if (ch)
- bc = ch->brdchan;
- else
- return -EINVAL;
- switch (cmd) {
- case TIOCMODG:
- mflag = pc_tiocmget(tty);
- if (put_user(mflag, (unsigned long __user *)argp))
- return -EFAULT;
- break;
- case TIOCMODS:
- if (get_user(mstat, (unsigned __user *)argp))
- return -EFAULT;
- return pc_tiocmset(tty, mstat, ~mstat);
- case TIOCSDTR:
- spin_lock_irqsave(&epca_lock, flags);
- ch->omodem |= ch->m_dtr;
- globalwinon(ch);
- fepcmd(ch, SETMODEM, ch->m_dtr, 0, 10, 1);
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- break;
-
- case TIOCCDTR:
- spin_lock_irqsave(&epca_lock, flags);
- ch->omodem &= ~ch->m_dtr;
- globalwinon(ch);
- fepcmd(ch, SETMODEM, 0, ch->m_dtr, 10, 1);
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- break;
- case DIGI_GETA:
- if (copy_to_user(argp, &ch->digiext, sizeof(digi_t)))
- return -EFAULT;
- break;
- case DIGI_SETAW:
- case DIGI_SETAF:
- if (cmd == DIGI_SETAW) {
- /* Setup an event to indicate when the transmit
- buffer empties */
- spin_lock_irqsave(&epca_lock, flags);
- setup_empty_event(tty, ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- tty_wait_until_sent(tty, 0);
- } else {
- /* ldisc lock already held in ioctl */
- if (tty->ldisc->ops->flush_buffer)
- tty->ldisc->ops->flush_buffer(tty);
- }
- /* Fall Thru */
- case DIGI_SETA:
- if (copy_from_user(&ch->digiext, argp, sizeof(digi_t)))
- return -EFAULT;
-
- if (ch->digiext.digi_flags & DIGI_ALTPIN) {
- ch->dcd = ch->m_dsr;
- ch->dsr = ch->m_dcd;
- } else {
- ch->dcd = ch->m_dcd;
- ch->dsr = ch->m_dsr;
- }
-
- spin_lock_irqsave(&epca_lock, flags);
- globalwinon(ch);
-
- /*
- * The below routine generally sets up parity, baud, flow
- * control issues, etc.... It effect both control flags and
- * input flags.
- */
- epcaparam(tty, ch);
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- break;
-
- case DIGI_GETFLOW:
- case DIGI_GETAFLOW:
- spin_lock_irqsave(&epca_lock, flags);
- globalwinon(ch);
- if (cmd == DIGI_GETFLOW) {
- dflow.startc = readb(&bc->startc);
- dflow.stopc = readb(&bc->stopc);
- } else {
- dflow.startc = readb(&bc->startca);
- dflow.stopc = readb(&bc->stopca);
- }
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
-
- if (copy_to_user(argp, &dflow, sizeof(dflow)))
- return -EFAULT;
- break;
-
- case DIGI_SETAFLOW:
- case DIGI_SETFLOW:
- if (cmd == DIGI_SETFLOW) {
- startc = ch->startc;
- stopc = ch->stopc;
- } else {
- startc = ch->startca;
- stopc = ch->stopca;
- }
-
- if (copy_from_user(&dflow, argp, sizeof(dflow)))
- return -EFAULT;
-
- if (dflow.startc != startc || dflow.stopc != stopc) {
- /* Begin if setflow toggled */
- spin_lock_irqsave(&epca_lock, flags);
- globalwinon(ch);
-
- if (cmd == DIGI_SETFLOW) {
- ch->fepstartc = ch->startc = dflow.startc;
- ch->fepstopc = ch->stopc = dflow.stopc;
- fepcmd(ch, SONOFFC, ch->fepstartc,
- ch->fepstopc, 0, 1);
- } else {
- ch->fepstartca = ch->startca = dflow.startc;
- ch->fepstopca = ch->stopca = dflow.stopc;
- fepcmd(ch, SAUXONOFFC, ch->fepstartca,
- ch->fepstopca, 0, 1);
- }
-
- if (ch->statusflags & TXSTOPPED)
- pc_start(tty);
-
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- } /* End if setflow toggled */
- break;
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static void pc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- struct channel *ch;
- unsigned long flags;
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
-
- if (ch != NULL) { /* Begin if channel valid */
- spin_lock_irqsave(&epca_lock, flags);
- globalwinon(ch);
- epcaparam(tty, ch);
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
-
- if ((old_termios->c_cflag & CRTSCTS) &&
- ((tty->termios->c_cflag & CRTSCTS) == 0))
- tty->hw_stopped = 0;
-
- if (!(old_termios->c_cflag & CLOCAL) &&
- (tty->termios->c_cflag & CLOCAL))
- wake_up_interruptible(&ch->port.open_wait);
-
- } /* End if channel valid */
-}
-
-static void do_softint(struct work_struct *work)
-{
- struct channel *ch = container_of(work, struct channel, tqueue);
- /* Called in response to a modem change event */
- if (ch && ch->magic == EPCA_MAGIC) {
- struct tty_struct *tty = tty_port_tty_get(&ch->port);
-
- if (tty && tty->driver_data) {
- if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) {
- tty_hangup(tty);
- wake_up_interruptible(&ch->port.open_wait);
- clear_bit(ASYNCB_NORMAL_ACTIVE,
- &ch->port.flags);
- }
- }
- tty_kref_put(tty);
- }
-}
-
-/*
- * pc_stop and pc_start provide software flow control to the routine and the
- * pc_ioctl routine.
- */
-static void pc_stop(struct tty_struct *tty)
-{
- struct channel *ch;
- unsigned long flags;
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
- if (ch != NULL) {
- spin_lock_irqsave(&epca_lock, flags);
- if ((ch->statusflags & TXSTOPPED) == 0) {
- /* Begin if transmit stop requested */
- globalwinon(ch);
- /* STOP transmitting now !! */
- fepcmd(ch, PAUSETX, 0, 0, 0, 0);
- ch->statusflags |= TXSTOPPED;
- memoff(ch);
- } /* End if transmit stop requested */
- spin_unlock_irqrestore(&epca_lock, flags);
- }
-}
-
-static void pc_start(struct tty_struct *tty)
-{
- struct channel *ch;
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
- if (ch != NULL) {
- unsigned long flags;
- spin_lock_irqsave(&epca_lock, flags);
- /* Just in case output was resumed because of a change
- in Digi-flow */
- if (ch->statusflags & TXSTOPPED) {
- /* Begin transmit resume requested */
- struct board_chan __iomem *bc;
- globalwinon(ch);
- bc = ch->brdchan;
- if (ch->statusflags & LOWWAIT)
- writeb(1, &bc->ilow);
- /* Okay, you can start transmitting again... */
- fepcmd(ch, RESUMETX, 0, 0, 0, 0);
- ch->statusflags &= ~TXSTOPPED;
- memoff(ch);
- } /* End transmit resume requested */
- spin_unlock_irqrestore(&epca_lock, flags);
- }
-}
-
-/*
- * The below routines pc_throttle and pc_unthrottle are used to slow (And
- * resume) the receipt of data into the kernels receive buffers. The exact
- * occurrence of this depends on the size of the kernels receive buffer and
- * what the 'watermarks' are set to for that buffer. See the n_ttys.c file for
- * more details.
- */
-static void pc_throttle(struct tty_struct *tty)
-{
- struct channel *ch;
- unsigned long flags;
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
- if (ch != NULL) {
- spin_lock_irqsave(&epca_lock, flags);
- if ((ch->statusflags & RXSTOPPED) == 0) {
- globalwinon(ch);
- fepcmd(ch, PAUSERX, 0, 0, 0, 0);
- ch->statusflags |= RXSTOPPED;
- memoff(ch);
- }
- spin_unlock_irqrestore(&epca_lock, flags);
- }
-}
-
-static void pc_unthrottle(struct tty_struct *tty)
-{
- struct channel *ch;
- unsigned long flags;
- /*
- * verifyChannel returns the channel from the tty struct if it is
- * valid. This serves as a sanity check.
- */
- ch = verifyChannel(tty);
- if (ch != NULL) {
- /* Just in case output was resumed because of a change
- in Digi-flow */
- spin_lock_irqsave(&epca_lock, flags);
- if (ch->statusflags & RXSTOPPED) {
- globalwinon(ch);
- fepcmd(ch, RESUMERX, 0, 0, 0, 0);
- ch->statusflags &= ~RXSTOPPED;
- memoff(ch);
- }
- spin_unlock_irqrestore(&epca_lock, flags);
- }
-}
-
-static int pc_send_break(struct tty_struct *tty, int msec)
-{
- struct channel *ch = tty->driver_data;
- unsigned long flags;
-
- if (msec == -1)
- msec = 0xFFFF;
- else if (msec > 0xFFFE)
- msec = 0xFFFE;
- else if (msec < 1)
- msec = 1;
-
- spin_lock_irqsave(&epca_lock, flags);
- globalwinon(ch);
- /*
- * Maybe I should send an infinite break here, schedule() for msec
- * amount of time, and then stop the break. This way, the user can't
- * screw up the FEP by causing digi_send_break() to be called (i.e. via
- * an ioctl()) more than once in msec amount of time.
- * Try this for now...
- */
- fepcmd(ch, SENDBREAK, msec, 0, 10, 0);
- memoff(ch);
- spin_unlock_irqrestore(&epca_lock, flags);
- return 0;
-}
-
-/* Caller MUST hold the lock */
-static void setup_empty_event(struct tty_struct *tty, struct channel *ch)
-{
- struct board_chan __iomem *bc = ch->brdchan;
-
- globalwinon(ch);
- ch->statusflags |= EMPTYWAIT;
- /*
- * When set the iempty flag request a event to be generated when the
- * transmit buffer is empty (If there is no BREAK in progress).
- */
- writeb(1, &bc->iempty);
- memoff(ch);
-}
-
-#ifndef MODULE
-static void __init epca_setup(char *str, int *ints)
-{
- struct board_info board;
- int index, loop, last;
- char *temp, *t2;
- unsigned len;
-
- /*
- * If this routine looks a little strange it is because it is only
- * called if a LILO append command is given to boot the kernel with
- * parameters. In this way, we can provide the user a method of
- * changing his board configuration without rebuilding the kernel.
- */
- if (!liloconfig)
- liloconfig = 1;
-
- memset(&board, 0, sizeof(board));
-
- /* Assume the data is int first, later we can change it */
- /* I think that array position 0 of ints holds the number of args */
- for (last = 0, index = 1; index <= ints[0]; index++)
- switch (index) { /* Begin parse switch */
- case 1:
- board.status = ints[index];
- /*
- * We check for 2 (As opposed to 1; because 2 is a flag
- * instructing the driver to ignore epcaconfig.) For
- * this reason we check for 2.
- */
- if (board.status == 2) {
- /* Begin ignore epcaconfig as well as lilo cmd line */
- nbdevs = 0;
- num_cards = 0;
- return;
- } /* End ignore epcaconfig as well as lilo cmd line */
-
- if (board.status > 2) {
- printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n",
- board.status);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_BOARD_STATUS;
- return;
- }
- last = index;
- break;
- case 2:
- board.type = ints[index];
- if (board.type >= PCIXEM) {
- printk(KERN_ERR "epca_setup: Invalid board type 0x%x\n", board.type);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_BOARD_TYPE;
- return;
- }
- last = index;
- break;
- case 3:
- board.altpin = ints[index];
- if (board.altpin > 1) {
- printk(KERN_ERR "epca_setup: Invalid board altpin 0x%x\n", board.altpin);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_ALTPIN;
- return;
- }
- last = index;
- break;
-
- case 4:
- board.numports = ints[index];
- if (board.numports < 2 || board.numports > 256) {
- printk(KERN_ERR "epca_setup: Invalid board numports 0x%x\n", board.numports);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_NUM_PORTS;
- return;
- }
- nbdevs += board.numports;
- last = index;
- break;
-
- case 5:
- board.port = ints[index];
- if (ints[index] <= 0) {
- printk(KERN_ERR "epca_setup: Invalid io port 0x%x\n", (unsigned int)board.port);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_PORT_BASE;
- return;
- }
- last = index;
- break;
-
- case 6:
- board.membase = ints[index];
- if (ints[index] <= 0) {
- printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n",
- (unsigned int)board.membase);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_MEM_BASE;
- return;
- }
- last = index;
- break;
-
- default:
- printk(KERN_ERR "<Error> - epca_setup: Too many integer parms\n");
- return;
-
- } /* End parse switch */
-
- while (str && *str) { /* Begin while there is a string arg */
- /* find the next comma or terminator */
- temp = str;
- /* While string is not null, and a comma hasn't been found */
- while (*temp && (*temp != ','))
- temp++;
- if (!*temp)
- temp = NULL;
- else
- *temp++ = 0;
- /* Set index to the number of args + 1 */
- index = last + 1;
-
- switch (index) {
- case 1:
- len = strlen(str);
- if (strncmp("Disable", str, len) == 0)
- board.status = 0;
- else if (strncmp("Enable", str, len) == 0)
- board.status = 1;
- else {
- printk(KERN_ERR "epca_setup: Invalid status %s\n", str);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_BOARD_STATUS;
- return;
- }
- last = index;
- break;
-
- case 2:
- for (loop = 0; loop < EPCA_NUM_TYPES; loop++)
- if (strcmp(board_desc[loop], str) == 0)
- break;
- /*
- * If the index incremented above refers to a
- * legitimate board type set it here.
- */
- if (index < EPCA_NUM_TYPES)
- board.type = loop;
- else {
- printk(KERN_ERR "epca_setup: Invalid board type: %s\n", str);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_BOARD_TYPE;
- return;
- }
- last = index;
- break;
-
- case 3:
- len = strlen(str);
- if (strncmp("Disable", str, len) == 0)
- board.altpin = 0;
- else if (strncmp("Enable", str, len) == 0)
- board.altpin = 1;
- else {
- printk(KERN_ERR "epca_setup: Invalid altpin %s\n", str);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_ALTPIN;
- return;
- }
- last = index;
- break;
-
- case 4:
- t2 = str;
- while (isdigit(*t2))
- t2++;
-
- if (*t2) {
- printk(KERN_ERR "epca_setup: Invalid port count %s\n", str);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_NUM_PORTS;
- return;
- }
-
- /*
- * There is not a man page for simple_strtoul but the
- * code can be found in vsprintf.c. The first argument
- * is the string to translate (To an unsigned long
- * obviously), the second argument can be the address
- * of any character variable or a NULL. If a variable
- * is given, the end pointer of the string will be
- * stored in that variable; if a NULL is given the end
- * pointer will not be returned. The last argument is
- * the base to use. If a 0 is indicated, the routine
- * will attempt to determine the proper base by looking
- * at the values prefix (A '0' for octal, a 'x' for
- * hex, etc ... If a value is given it will use that
- * value as the base.
- */
- board.numports = simple_strtoul(str, NULL, 0);
- nbdevs += board.numports;
- last = index;
- break;
-
- case 5:
- t2 = str;
- while (isxdigit(*t2))
- t2++;
-
- if (*t2) {
- printk(KERN_ERR "epca_setup: Invalid i/o address %s\n", str);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_PORT_BASE;
- return;
- }
-
- board.port = simple_strtoul(str, NULL, 16);
- last = index;
- break;
-
- case 6:
- t2 = str;
- while (isxdigit(*t2))
- t2++;
-
- if (*t2) {
- printk(KERN_ERR "epca_setup: Invalid memory base %s\n", str);
- invalid_lilo_config = 1;
- setup_error_code |= INVALID_MEM_BASE;
- return;
- }
- board.membase = simple_strtoul(str, NULL, 16);
- last = index;
- break;
- default:
- printk(KERN_ERR "epca: Too many string parms\n");
- return;
- }
- str = temp;
- } /* End while there is a string arg */
-
- if (last < 6) {
- printk(KERN_ERR "epca: Insufficient parms specified\n");
- return;
- }
-
- /* I should REALLY validate the stuff here */
- /* Copies our local copy of board into boards */
- memcpy((void *)&boards[num_cards], (void *)&board, sizeof(board));
- /* Does this get called once per lilo arg are what ? */
- printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n",
- num_cards, board_desc[board.type],
- board.numports, (int)board.port, (unsigned int) board.membase);
- num_cards++;
-}
-
-static int __init epca_real_setup(char *str)
-{
- int ints[11];
-
- epca_setup(get_options(str, 11, ints), ints);
- return 1;
-}
-
-__setup("digiepca", epca_real_setup);
-#endif
-
-enum epic_board_types {
- brd_xr = 0,
- brd_xem,
- brd_cx,
- brd_xrj,
-};
-
-/* indexed directly by epic_board_types enum */
-static struct {
- unsigned char board_type;
- unsigned bar_idx; /* PCI base address region */
-} epca_info_tbl[] = {
- { PCIXR, 0, },
- { PCIXEM, 0, },
- { PCICX, 0, },
- { PCIXRJ, 2, },
-};
-
-static int __devinit epca_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- static int board_num = -1;
- int board_idx, info_idx = ent->driver_data;
- unsigned long addr;
-
- if (pci_enable_device(pdev))
- return -EIO;
-
- board_num++;
- board_idx = board_num + num_cards;
- if (board_idx >= MAXBOARDS)
- goto err_out;
-
- addr = pci_resource_start(pdev, epca_info_tbl[info_idx].bar_idx);
- if (!addr) {
- printk(KERN_ERR PFX "PCI region #%d not available (size 0)\n",
- epca_info_tbl[info_idx].bar_idx);
- goto err_out;
- }
-
- boards[board_idx].status = ENABLED;
- boards[board_idx].type = epca_info_tbl[info_idx].board_type;
- boards[board_idx].numports = 0x0;
- boards[board_idx].port = addr + PCI_IO_OFFSET;
- boards[board_idx].membase = addr;
-
- if (!request_mem_region(addr + PCI_IO_OFFSET, 0x200000, "epca")) {
- printk(KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n",
- 0x200000, addr + PCI_IO_OFFSET);
- goto err_out;
- }
-
- boards[board_idx].re_map_port = ioremap_nocache(addr + PCI_IO_OFFSET,
- 0x200000);
- if (!boards[board_idx].re_map_port) {
- printk(KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n",
- 0x200000, addr + PCI_IO_OFFSET);
- goto err_out_free_pciio;
- }
-
- if (!request_mem_region(addr, 0x200000, "epca")) {
- printk(KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n",
- 0x200000, addr);
- goto err_out_free_iounmap;
- }
-
- boards[board_idx].re_map_membase = ioremap_nocache(addr, 0x200000);
- if (!boards[board_idx].re_map_membase) {
- printk(KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n",
- 0x200000, addr + PCI_IO_OFFSET);
- goto err_out_free_memregion;
- }
-
- /*
- * I don't know what the below does, but the hardware guys say its
- * required on everything except PLX (In this case XRJ).
- */
- if (info_idx != brd_xrj) {
- pci_write_config_byte(pdev, 0x40, 0);
- pci_write_config_byte(pdev, 0x46, 0);
- }
-
- return 0;
-
-err_out_free_memregion:
- release_mem_region(addr, 0x200000);
-err_out_free_iounmap:
- iounmap(boards[board_idx].re_map_port);
-err_out_free_pciio:
- release_mem_region(addr + PCI_IO_OFFSET, 0x200000);
-err_out:
- return -ENODEV;
-}
-
-
-static struct pci_device_id epca_pci_tbl[] = {
- { PCI_VENDOR_DIGI, PCI_DEVICE_XR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, brd_xr },
- { PCI_VENDOR_DIGI, PCI_DEVICE_XEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, brd_xem },
- { PCI_VENDOR_DIGI, PCI_DEVICE_CX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, brd_cx },
- { PCI_VENDOR_DIGI, PCI_DEVICE_XRJ, PCI_ANY_ID, PCI_ANY_ID, 0, 0, brd_xrj },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, epca_pci_tbl);
-
-static int __init init_PCI(void)
-{
- memset(&epca_driver, 0, sizeof(epca_driver));
- epca_driver.name = "epca";
- epca_driver.id_table = epca_pci_tbl;
- epca_driver.probe = epca_init_one;
-
- return pci_register_driver(&epca_driver);
-}
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/tty/epca.h b/drivers/staging/tty/epca.h
deleted file mode 100644
index d414bf2dbf7..00000000000
--- a/drivers/staging/tty/epca.h
+++ /dev/null
@@ -1,158 +0,0 @@
-#define XEMPORTS 0xC02
-#define XEPORTS 0xC22
-
-#define MAX_ALLOC 0x100
-
-#define MAXBOARDS 12
-#define FEPCODESEG 0x0200L
-#define FEPCODE 0x2000L
-#define BIOSCODE 0xf800L
-
-#define MISCGLOBAL 0x0C00L
-#define NPORT 0x0C22L
-#define MBOX 0x0C40L
-#define PORTBASE 0x0C90L
-
-/* Begin code defines used for epca_setup */
-
-#define INVALID_BOARD_TYPE 0x1
-#define INVALID_NUM_PORTS 0x2
-#define INVALID_MEM_BASE 0x4
-#define INVALID_PORT_BASE 0x8
-#define INVALID_BOARD_STATUS 0x10
-#define INVALID_ALTPIN 0x20
-
-/* End code defines used for epca_setup */
-
-
-#define FEPCLR 0x00
-#define FEPMEM 0x02
-#define FEPRST 0x04
-#define FEPINT 0x08
-#define FEPMASK 0x0e
-#define FEPWIN 0x80
-
-#define PCXE 0
-#define PCXEVE 1
-#define PCXEM 2
-#define EISAXEM 3
-#define PC64XE 4
-#define PCXI 5
-#define PCIXEM 7
-#define PCICX 8
-#define PCIXR 9
-#define PCIXRJ 10
-#define EPCA_NUM_TYPES 6
-
-
-static char *board_desc[] =
-{
- "PC/Xe",
- "PC/Xeve",
- "PC/Xem",
- "EISA/Xem",
- "PC/64Xe",
- "PC/Xi",
- "unknown",
- "PCI/Xem",
- "PCI/CX",
- "PCI/Xr",
- "PCI/Xrj",
-};
-
-#define STARTC 021
-#define STOPC 023
-#define IAIXON 0x2000
-
-
-#define TXSTOPPED 0x1
-#define LOWWAIT 0x2
-#define EMPTYWAIT 0x4
-#define RXSTOPPED 0x8
-#define TXBUSY 0x10
-
-#define DISABLED 0
-#define ENABLED 1
-#define OFF 0
-#define ON 1
-
-#define FEPTIMEOUT 200000
-#define SERIAL_TYPE_INFO 3
-#define EPCA_EVENT_HANGUP 1
-#define EPCA_MAGIC 0x5c6df104L
-
-struct channel
-{
- long magic;
- struct tty_port port;
- unsigned char boardnum;
- unsigned char channelnum;
- unsigned char omodem; /* FEP output modem status */
- unsigned char imodem; /* FEP input modem status */
- unsigned char modemfake; /* Modem values to be forced */
- unsigned char modem; /* Force values */
- unsigned char hflow;
- unsigned char dsr;
- unsigned char dcd;
- unsigned char m_rts ; /* The bits used in whatever FEP */
- unsigned char m_dcd ; /* is indiginous to this board to */
- unsigned char m_dsr ; /* represent each of the physical */
- unsigned char m_cts ; /* handshake lines */
- unsigned char m_ri ;
- unsigned char m_dtr ;
- unsigned char stopc;
- unsigned char startc;
- unsigned char stopca;
- unsigned char startca;
- unsigned char fepstopc;
- unsigned char fepstartc;
- unsigned char fepstopca;
- unsigned char fepstartca;
- unsigned char txwin;
- unsigned char rxwin;
- unsigned short fepiflag;
- unsigned short fepcflag;
- unsigned short fepoflag;
- unsigned short txbufhead;
- unsigned short txbufsize;
- unsigned short rxbufhead;
- unsigned short rxbufsize;
- int close_delay;
- unsigned long event;
- uint dev;
- unsigned long statusflags;
- unsigned long c_iflag;
- unsigned long c_cflag;
- unsigned long c_lflag;
- unsigned long c_oflag;
- unsigned char __iomem *txptr;
- unsigned char __iomem *rxptr;
- struct board_info *board;
- struct board_chan __iomem *brdchan;
- struct digi_struct digiext;
- struct work_struct tqueue;
- struct global_data __iomem *mailbox;
-};
-
-struct board_info
-{
- unsigned char status;
- unsigned char type;
- unsigned char altpin;
- unsigned short numports;
- unsigned long port;
- unsigned long membase;
- void __iomem *re_map_port;
- void __iomem *re_map_membase;
- unsigned long memory_seg;
- void ( * memwinon ) (struct board_info *, unsigned int) ;
- void ( * memwinoff ) (struct board_info *, unsigned int) ;
- void ( * globalwinon ) (struct channel *) ;
- void ( * txwinon ) (struct channel *) ;
- void ( * rxwinon ) (struct channel *) ;
- void ( * memoff ) (struct channel *) ;
- void ( * assertgwinon ) (struct channel *) ;
- void ( * assertmemoff ) (struct channel *) ;
- unsigned char poller_inhibited ;
-};
-
diff --git a/drivers/staging/tty/epcaconfig.h b/drivers/staging/tty/epcaconfig.h
deleted file mode 100644
index 55dec067078..00000000000
--- a/drivers/staging/tty/epcaconfig.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#define NUMCARDS 0
-#define NBDEVS 0
-
-struct board_info static_boards[NUMCARDS]={
-};
-
-/* DO NOT HAND EDIT THIS FILE! */
diff --git a/drivers/staging/tty/ip2/Makefile b/drivers/staging/tty/ip2/Makefile
deleted file mode 100644
index 7b78e0dfc5b..00000000000
--- a/drivers/staging/tty/ip2/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for the Computone IntelliPort Plus Driver
-#
-
-obj-$(CONFIG_COMPUTONE) += ip2.o
-
-ip2-y := ip2main.o
-
diff --git a/drivers/staging/tty/ip2/i2cmd.c b/drivers/staging/tty/ip2/i2cmd.c
deleted file mode 100644
index e7af647800b..00000000000
--- a/drivers/staging/tty/ip2/i2cmd.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*******************************************************************************
-*
-* (c) 1998 by Computone Corporation
-*
-********************************************************************************
-*
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: Definition table for In-line and Bypass commands. Applicable
-* only when the standard loadware is active. (This is included
-* source code, not a separate compilation module.)
-*
-*******************************************************************************/
-
-//------------------------------------------------------------------------------
-//
-// Revision History:
-//
-// 10 October 1991 MAG First Draft
-// 7 November 1991 MAG Reflects additional commands.
-// 24 February 1992 MAG Additional commands for 1.4.x loadware
-// 11 March 1992 MAG Additional commands
-// 30 March 1992 MAG Additional command: CMD_DSS_NOW
-// 18 May 1992 MAG Discovered commands 39 & 40 must be at the end of a
-// packet: affects implementation.
-//------------------------------------------------------------------------------
-
-//************
-//* Includes *
-//************
-
-#include "i2cmd.h" /* To get some bit-defines */
-
-//------------------------------------------------------------------------------
-// Here is the table of global arrays which represent each type of command
-// supported in the IntelliPort standard loadware. See also i2cmd.h
-// for a more complete explanation of what is going on.
-//------------------------------------------------------------------------------
-
-// Here are the various globals: note that the names are not used except through
-// the macros defined in i2cmd.h. Also note that although they are character
-// arrays here (for extendability) they are cast to structure pointers in the
-// i2cmd.h macros. See i2cmd.h for flags definitions.
-
-// Length Flags Command
-static UCHAR ct02[] = { 1, BTH, 0x02 }; // DTR UP
-static UCHAR ct03[] = { 1, BTH, 0x03 }; // DTR DN
-static UCHAR ct04[] = { 1, BTH, 0x04 }; // RTS UP
-static UCHAR ct05[] = { 1, BTH, 0x05 }; // RTS DN
-static UCHAR ct06[] = { 1, BYP, 0x06 }; // START FL
-static UCHAR ct07[] = { 2, BTH, 0x07,0 }; // BAUD
-static UCHAR ct08[] = { 2, BTH, 0x08,0 }; // BITS
-static UCHAR ct09[] = { 2, BTH, 0x09,0 }; // STOP
-static UCHAR ct10[] = { 2, BTH, 0x0A,0 }; // PARITY
-static UCHAR ct11[] = { 2, BTH, 0x0B,0 }; // XON
-static UCHAR ct12[] = { 2, BTH, 0x0C,0 }; // XOFF
-static UCHAR ct13[] = { 1, BTH, 0x0D }; // STOP FL
-static UCHAR ct14[] = { 1, BYP|VIP, 0x0E }; // ACK HOTK
-//static UCHAR ct15[]={ 2, BTH|VIP, 0x0F,0 }; // IRQ SET
-static UCHAR ct16[] = { 2, INL, 0x10,0 }; // IXONOPTS
-static UCHAR ct17[] = { 2, INL, 0x11,0 }; // OXONOPTS
-static UCHAR ct18[] = { 1, INL, 0x12 }; // CTSENAB
-static UCHAR ct19[] = { 1, BTH, 0x13 }; // CTSDSAB
-static UCHAR ct20[] = { 1, INL, 0x14 }; // DCDENAB
-static UCHAR ct21[] = { 1, BTH, 0x15 }; // DCDDSAB
-static UCHAR ct22[] = { 1, BTH, 0x16 }; // DSRENAB
-static UCHAR ct23[] = { 1, BTH, 0x17 }; // DSRDSAB
-static UCHAR ct24[] = { 1, BTH, 0x18 }; // RIENAB
-static UCHAR ct25[] = { 1, BTH, 0x19 }; // RIDSAB
-static UCHAR ct26[] = { 2, BTH, 0x1A,0 }; // BRKENAB
-static UCHAR ct27[] = { 1, BTH, 0x1B }; // BRKDSAB
-//static UCHAR ct28[]={ 2, BTH, 0x1C,0 }; // MAXBLOKSIZE
-//static UCHAR ct29[]={ 2, 0, 0x1D,0 }; // reserved
-static UCHAR ct30[] = { 1, INL, 0x1E }; // CTSFLOWENAB
-static UCHAR ct31[] = { 1, INL, 0x1F }; // CTSFLOWDSAB
-static UCHAR ct32[] = { 1, INL, 0x20 }; // RTSFLOWENAB
-static UCHAR ct33[] = { 1, INL, 0x21 }; // RTSFLOWDSAB
-static UCHAR ct34[] = { 2, BTH, 0x22,0 }; // ISTRIPMODE
-static UCHAR ct35[] = { 2, BTH|END, 0x23,0 }; // SENDBREAK
-static UCHAR ct36[] = { 2, BTH, 0x24,0 }; // SETERRMODE
-//static UCHAR ct36a[]={ 3, INL, 0x24,0,0 }; // SET_REPLACE
-
-// The following is listed for completeness, but should never be sent directly
-// by user-level code. It is sent only by library routines in response to data
-// movement.
-//static UCHAR ct37[]={ 5, BYP|VIP, 0x25,0,0,0,0 }; // FLOW PACKET
-
-// Back to normal
-//static UCHAR ct38[] = {11, BTH|VAR, 0x26,0,0,0,0,0,0,0,0,0,0 }; // DEF KEY SEQ
-//static UCHAR ct39[]={ 3, BTH|END, 0x27,0,0 }; // OPOSTON
-//static UCHAR ct40[]={ 1, BTH|END, 0x28 }; // OPOSTOFF
-static UCHAR ct41[] = { 1, BYP, 0x29 }; // RESUME
-//static UCHAR ct42[]={ 2, BTH, 0x2A,0 }; // TXBAUD
-//static UCHAR ct43[]={ 2, BTH, 0x2B,0 }; // RXBAUD
-//static UCHAR ct44[]={ 2, BTH, 0x2C,0 }; // MS PING
-//static UCHAR ct45[]={ 1, BTH, 0x2D }; // HOTENAB
-//static UCHAR ct46[]={ 1, BTH, 0x2E }; // HOTDSAB
-//static UCHAR ct47[]={ 7, BTH, 0x2F,0,0,0,0,0,0 }; // UNIX FLAGS
-//static UCHAR ct48[]={ 1, BTH, 0x30 }; // DSRFLOWENAB
-//static UCHAR ct49[]={ 1, BTH, 0x31 }; // DSRFLOWDSAB
-//static UCHAR ct50[]={ 1, BTH, 0x32 }; // DTRFLOWENAB
-//static UCHAR ct51[]={ 1, BTH, 0x33 }; // DTRFLOWDSAB
-//static UCHAR ct52[]={ 1, BTH, 0x34 }; // BAUDTABRESET
-//static UCHAR ct53[] = { 3, BTH, 0x35,0,0 }; // BAUDREMAP
-static UCHAR ct54[] = { 3, BTH, 0x36,0,0 }; // CUSTOMBAUD1
-static UCHAR ct55[] = { 3, BTH, 0x37,0,0 }; // CUSTOMBAUD2
-static UCHAR ct56[] = { 2, BTH|END, 0x38,0 }; // PAUSE
-static UCHAR ct57[] = { 1, BYP, 0x39 }; // SUSPEND
-static UCHAR ct58[] = { 1, BYP, 0x3A }; // UNSUSPEND
-static UCHAR ct59[] = { 2, BTH, 0x3B,0 }; // PARITYCHK
-static UCHAR ct60[] = { 1, INL|VIP, 0x3C }; // BOOKMARKREQ
-//static UCHAR ct61[]={ 2, BTH, 0x3D,0 }; // INTERNALLOOP
-//static UCHAR ct62[]={ 2, BTH, 0x3E,0 }; // HOTKTIMEOUT
-static UCHAR ct63[] = { 2, INL, 0x3F,0 }; // SETTXON
-static UCHAR ct64[] = { 2, INL, 0x40,0 }; // SETTXOFF
-//static UCHAR ct65[]={ 2, BTH, 0x41,0 }; // SETAUTORTS
-//static UCHAR ct66[]={ 2, BTH, 0x42,0 }; // SETHIGHWAT
-//static UCHAR ct67[]={ 2, BYP, 0x43,0 }; // STARTSELFL
-//static UCHAR ct68[]={ 2, INL, 0x44,0 }; // ENDSELFL
-//static UCHAR ct69[]={ 1, BYP, 0x45 }; // HWFLOW_OFF
-//static UCHAR ct70[]={ 1, BTH, 0x46 }; // ODSRFL_ENAB
-//static UCHAR ct71[]={ 1, BTH, 0x47 }; // ODSRFL_DSAB
-//static UCHAR ct72[]={ 1, BTH, 0x48 }; // ODCDFL_ENAB
-//static UCHAR ct73[]={ 1, BTH, 0x49 }; // ODCDFL_DSAB
-//static UCHAR ct74[]={ 2, BTH, 0x4A,0 }; // LOADLEVEL
-//static UCHAR ct75[]={ 2, BTH, 0x4B,0 }; // STATDATA
-//static UCHAR ct76[]={ 1, BYP, 0x4C }; // BREAK_ON
-//static UCHAR ct77[]={ 1, BYP, 0x4D }; // BREAK_OFF
-//static UCHAR ct78[]={ 1, BYP, 0x4E }; // GETFC
-static UCHAR ct79[] = { 2, BYP, 0x4F,0 }; // XMIT_NOW
-//static UCHAR ct80[]={ 4, BTH, 0x50,0,0,0 }; // DIVISOR_LATCH
-//static UCHAR ct81[]={ 1, BYP, 0x51 }; // GET_STATUS
-//static UCHAR ct82[]={ 1, BYP, 0x52 }; // GET_TXCNT
-//static UCHAR ct83[]={ 1, BYP, 0x53 }; // GET_RXCNT
-//static UCHAR ct84[]={ 1, BYP, 0x54 }; // GET_BOXIDS
-//static UCHAR ct85[]={10, BYP, 0x55,0,0,0,0,0,0,0,0,0 }; // ENAB_MULT
-//static UCHAR ct86[]={ 2, BTH, 0x56,0 }; // RCV_ENABLE
-static UCHAR ct87[] = { 1, BYP, 0x57 }; // HW_TEST
-//static UCHAR ct88[]={ 3, BTH, 0x58,0,0 }; // RCV_THRESHOLD
-//static UCHAR ct90[]={ 3, BYP, 0x5A,0,0 }; // Set SILO
-//static UCHAR ct91[]={ 2, BYP, 0x5B,0 }; // timed break
-
-// Some composite commands as well
-//static UCHAR cc01[]={ 2, BTH, 0x02,0x04 }; // DTR & RTS UP
-//static UCHAR cc02[]={ 2, BTH, 0x03,0x05 }; // DTR & RTS DN
-
-//********
-//* Code *
-//********
-
-//******************************************************************************
-// Function: i2cmdUnixFlags(iflag, cflag, lflag)
-// Parameters: Unix tty flags
-//
-// Returns: Pointer to command structure
-//
-// Description:
-//
-// This routine sets the parameters of command 47 and returns a pointer to the
-// appropriate structure.
-//******************************************************************************
-#if 0
-cmdSyntaxPtr
-i2cmdUnixFlags(unsigned short iflag,unsigned short cflag,unsigned short lflag)
-{
- cmdSyntaxPtr pCM = (cmdSyntaxPtr) ct47;
-
- pCM->cmd[1] = (unsigned char) iflag;
- pCM->cmd[2] = (unsigned char) (iflag >> 8);
- pCM->cmd[3] = (unsigned char) cflag;
- pCM->cmd[4] = (unsigned char) (cflag >> 8);
- pCM->cmd[5] = (unsigned char) lflag;
- pCM->cmd[6] = (unsigned char) (lflag >> 8);
- return pCM;
-}
-#endif /* 0 */
-
-//******************************************************************************
-// Function: i2cmdBaudDef(which, rate)
-// Parameters: ?
-//
-// Returns: Pointer to command structure
-//
-// Description:
-//
-// This routine sets the parameters of commands 54 or 55 (according to the
-// argument which), and returns a pointer to the appropriate structure.
-//******************************************************************************
-static cmdSyntaxPtr
-i2cmdBaudDef(int which, unsigned short rate)
-{
- cmdSyntaxPtr pCM;
-
- switch(which)
- {
- case 1:
- pCM = (cmdSyntaxPtr) ct54;
- break;
- default:
- case 2:
- pCM = (cmdSyntaxPtr) ct55;
- break;
- }
- pCM->cmd[1] = (unsigned char) rate;
- pCM->cmd[2] = (unsigned char) (rate >> 8);
- return pCM;
-}
-
diff --git a/drivers/staging/tty/ip2/i2cmd.h b/drivers/staging/tty/ip2/i2cmd.h
deleted file mode 100644
index 29277ec6b8e..00000000000
--- a/drivers/staging/tty/ip2/i2cmd.h
+++ /dev/null
@@ -1,630 +0,0 @@
-/*******************************************************************************
-*
-* (c) 1999 by Computone Corporation
-*
-********************************************************************************
-*
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort II family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: Definitions and support for In-line and Bypass commands.
-* Applicable only when the standard loadware is active.
-*
-*******************************************************************************/
-//------------------------------------------------------------------------------
-// Revision History:
-//
-// 10 October 1991 MAG First Draft
-// 7 November 1991 MAG Reflects some new commands
-// 20 February 1992 MAG CMD_HOTACK corrected: no argument.
-// 24 February 1992 MAG Support added for new commands for 1.4.x loadware.
-// 11 March 1992 MAG Additional commands.
-// 16 March 1992 MAG Additional commands.
-// 30 March 1992 MAG Additional command: CMD_DSS_NOW
-// 18 May 1992 MAG Changed CMD_OPOST
-//
-//------------------------------------------------------------------------------
-#ifndef I2CMD_H // To prevent multiple includes
-#define I2CMD_H 1
-
-#include "ip2types.h"
-
-// This module is designed to provide a uniform method of sending commands to
-// the board through command packets. The difficulty is, some commands take
-// parameters, others do not. Furthermore, it is often useful to send several
-// commands to the same channel as part of the same packet. (See also i2pack.h.)
-//
-// This module is designed so that the caller should not be responsible for
-// remembering the exact syntax of each command, or at least so that the
-// compiler could check things somewhat. I'll explain as we go...
-//
-// First, a structure which can embody the syntax of each type of command.
-//
-typedef struct _cmdSyntax
-{
- UCHAR length; // Number of bytes in the command
- UCHAR flags; // Information about the command (see below)
-
- // The command and its parameters, which may be of arbitrary length. Don't
- // worry yet how the parameters will be initialized; macros later take care
- // of it. Also, don't worry about the arbitrary length issue; this structure
- // is never used to allocate space (see i2cmd.c).
- UCHAR cmd[2];
-} cmdSyntax, *cmdSyntaxPtr;
-
-// Bit assignments for flags
-
-#define INL 1 // Set if suitable for inline commands
-#define BYP 2 // Set if suitable for bypass commands
-#define BTH (INL|BYP) // suitable for either!
-#define END 4 // Set if this must be the last command in a block
-#define VIP 8 // Set if this command is special in some way and really
- // should only be sent from the library-level and not
- // directly from user-level
-#define VAR 0x10 // This command is of variable length!
-
-// Declarations for the global arrays used to bear the commands and their
-// arguments.
-//
-// Note: Since these are globals and the arguments might change, it is important
-// that the library routine COPY these into buffers from whence they would be
-// sent, rather than merely storing the pointers. In multi-threaded
-// environments, important that the copy should obtain before any context switch
-// is allowed. Also, for parameterized commands, DO NOT ISSUE THE SAME COMMAND
-// MORE THAN ONCE WITH THE SAME PARAMETERS in the same call.
-//
-static UCHAR ct02[];
-static UCHAR ct03[];
-static UCHAR ct04[];
-static UCHAR ct05[];
-static UCHAR ct06[];
-static UCHAR ct07[];
-static UCHAR ct08[];
-static UCHAR ct09[];
-static UCHAR ct10[];
-static UCHAR ct11[];
-static UCHAR ct12[];
-static UCHAR ct13[];
-static UCHAR ct14[];
-static UCHAR ct15[];
-static UCHAR ct16[];
-static UCHAR ct17[];
-static UCHAR ct18[];
-static UCHAR ct19[];
-static UCHAR ct20[];
-static UCHAR ct21[];
-static UCHAR ct22[];
-static UCHAR ct23[];
-static UCHAR ct24[];
-static UCHAR ct25[];
-static UCHAR ct26[];
-static UCHAR ct27[];
-static UCHAR ct28[];
-static UCHAR ct29[];
-static UCHAR ct30[];
-static UCHAR ct31[];
-static UCHAR ct32[];
-static UCHAR ct33[];
-static UCHAR ct34[];
-static UCHAR ct35[];
-static UCHAR ct36[];
-static UCHAR ct36a[];
-static UCHAR ct41[];
-static UCHAR ct42[];
-static UCHAR ct43[];
-static UCHAR ct44[];
-static UCHAR ct45[];
-static UCHAR ct46[];
-static UCHAR ct48[];
-static UCHAR ct49[];
-static UCHAR ct50[];
-static UCHAR ct51[];
-static UCHAR ct52[];
-static UCHAR ct56[];
-static UCHAR ct57[];
-static UCHAR ct58[];
-static UCHAR ct59[];
-static UCHAR ct60[];
-static UCHAR ct61[];
-static UCHAR ct62[];
-static UCHAR ct63[];
-static UCHAR ct64[];
-static UCHAR ct65[];
-static UCHAR ct66[];
-static UCHAR ct67[];
-static UCHAR ct68[];
-static UCHAR ct69[];
-static UCHAR ct70[];
-static UCHAR ct71[];
-static UCHAR ct72[];
-static UCHAR ct73[];
-static UCHAR ct74[];
-static UCHAR ct75[];
-static UCHAR ct76[];
-static UCHAR ct77[];
-static UCHAR ct78[];
-static UCHAR ct79[];
-static UCHAR ct80[];
-static UCHAR ct81[];
-static UCHAR ct82[];
-static UCHAR ct83[];
-static UCHAR ct84[];
-static UCHAR ct85[];
-static UCHAR ct86[];
-static UCHAR ct87[];
-static UCHAR ct88[];
-static UCHAR ct89[];
-static UCHAR ct90[];
-static UCHAR ct91[];
-static UCHAR cc01[];
-static UCHAR cc02[];
-
-// Now, refer to i2cmd.c, and see the character arrays defined there. They are
-// cast here to cmdSyntaxPtr.
-//
-// There are library functions for issuing bypass or inline commands. These
-// functions take one or more arguments of the type cmdSyntaxPtr. The routine
-// then can figure out how long each command is supposed to be and easily add it
-// to the list.
-//
-// For ease of use, we define manifests which return pointers to appropriate
-// cmdSyntaxPtr things. But some commands also take arguments. If a single
-// argument is used, we define a macro which performs the single assignment and
-// (through the expedient of a comma expression) references the appropriate
-// pointer. For commands requiring several arguments, we actually define a
-// function to perform the assignments.
-
-#define CMD_DTRUP (cmdSyntaxPtr)(ct02) // Raise DTR
-#define CMD_DTRDN (cmdSyntaxPtr)(ct03) // Lower DTR
-#define CMD_RTSUP (cmdSyntaxPtr)(ct04) // Raise RTS
-#define CMD_RTSDN (cmdSyntaxPtr)(ct05) // Lower RTS
-#define CMD_STARTFL (cmdSyntaxPtr)(ct06) // Start Flushing Data
-
-#define CMD_DTRRTS_UP (cmdSyntaxPtr)(cc01) // Raise DTR and RTS
-#define CMD_DTRRTS_DN (cmdSyntaxPtr)(cc02) // Lower DTR and RTS
-
-// Set Baud Rate for transmit and receive
-#define CMD_SETBAUD(arg) \
- (((cmdSyntaxPtr)(ct07))->cmd[1] = (arg),(cmdSyntaxPtr)(ct07))
-
-#define CBR_50 1
-#define CBR_75 2
-#define CBR_110 3
-#define CBR_134 4
-#define CBR_150 5
-#define CBR_200 6
-#define CBR_300 7
-#define CBR_600 8
-#define CBR_1200 9
-#define CBR_1800 10
-#define CBR_2400 11
-#define CBR_4800 12
-#define CBR_9600 13
-#define CBR_19200 14
-#define CBR_38400 15
-#define CBR_2000 16
-#define CBR_3600 17
-#define CBR_7200 18
-#define CBR_56000 19
-#define CBR_57600 20
-#define CBR_64000 21
-#define CBR_76800 22
-#define CBR_115200 23
-#define CBR_C1 24 // Custom baud rate 1
-#define CBR_C2 25 // Custom baud rate 2
-#define CBR_153600 26
-#define CBR_230400 27
-#define CBR_307200 28
-#define CBR_460800 29
-#define CBR_921600 30
-
-// Set Character size
-//
-#define CMD_SETBITS(arg) \
- (((cmdSyntaxPtr)(ct08))->cmd[1] = (arg),(cmdSyntaxPtr)(ct08))
-
-#define CSZ_5 0
-#define CSZ_6 1
-#define CSZ_7 2
-#define CSZ_8 3
-
-// Set number of stop bits
-//
-#define CMD_SETSTOP(arg) \
- (((cmdSyntaxPtr)(ct09))->cmd[1] = (arg),(cmdSyntaxPtr)(ct09))
-
-#define CST_1 0
-#define CST_15 1 // 1.5 stop bits
-#define CST_2 2
-
-// Set parity option
-//
-#define CMD_SETPAR(arg) \
- (((cmdSyntaxPtr)(ct10))->cmd[1] = (arg),(cmdSyntaxPtr)(ct10))
-
-#define CSP_NP 0 // no parity
-#define CSP_OD 1 // odd parity
-#define CSP_EV 2 // Even parity
-#define CSP_SP 3 // Space parity
-#define CSP_MK 4 // Mark parity
-
-// Define xon char for transmitter flow control
-//
-#define CMD_DEF_IXON(arg) \
- (((cmdSyntaxPtr)(ct11))->cmd[1] = (arg),(cmdSyntaxPtr)(ct11))
-
-// Define xoff char for transmitter flow control
-//
-#define CMD_DEF_IXOFF(arg) \
- (((cmdSyntaxPtr)(ct12))->cmd[1] = (arg),(cmdSyntaxPtr)(ct12))
-
-#define CMD_STOPFL (cmdSyntaxPtr)(ct13) // Stop Flushing data
-
-// Acknowledge receipt of hotkey signal
-//
-#define CMD_HOTACK (cmdSyntaxPtr)(ct14)
-
-// Define irq level to use. Should actually be sent by library-level code, not
-// directly from user...
-//
-#define CMDVALUE_IRQ 15 // For library use at initialization. Until this command
- // is sent, board processing doesn't really start.
-#define CMD_SET_IRQ(arg) \
- (((cmdSyntaxPtr)(ct15))->cmd[1] = (arg),(cmdSyntaxPtr)(ct15))
-
-#define CIR_POLL 0 // No IRQ - Poll
-#define CIR_3 3 // IRQ 3
-#define CIR_4 4 // IRQ 4
-#define CIR_5 5 // IRQ 5
-#define CIR_7 7 // IRQ 7
-#define CIR_10 10 // IRQ 10
-#define CIR_11 11 // IRQ 11
-#define CIR_12 12 // IRQ 12
-#define CIR_15 15 // IRQ 15
-
-// Select transmit flow xon/xoff options
-//
-#define CMD_IXON_OPT(arg) \
- (((cmdSyntaxPtr)(ct16))->cmd[1] = (arg),(cmdSyntaxPtr)(ct16))
-
-#define CIX_NONE 0 // Incoming Xon/Xoff characters not special
-#define CIX_XON 1 // Xoff disable, Xon enable
-#define CIX_XANY 2 // Xoff disable, any key enable
-
-// Select receive flow xon/xoff options
-//
-#define CMD_OXON_OPT(arg) \
- (((cmdSyntaxPtr)(ct17))->cmd[1] = (arg),(cmdSyntaxPtr)(ct17))
-
-#define COX_NONE 0 // Don't send Xon/Xoff
-#define COX_XON 1 // Send xon/xoff to start/stop incoming data
-
-
-#define CMD_CTS_REP (cmdSyntaxPtr)(ct18) // Enable CTS reporting
-#define CMD_CTS_NREP (cmdSyntaxPtr)(ct19) // Disable CTS reporting
-
-#define CMD_DCD_REP (cmdSyntaxPtr)(ct20) // Enable DCD reporting
-#define CMD_DCD_NREP (cmdSyntaxPtr)(ct21) // Disable DCD reporting
-
-#define CMD_DSR_REP (cmdSyntaxPtr)(ct22) // Enable DSR reporting
-#define CMD_DSR_NREP (cmdSyntaxPtr)(ct23) // Disable DSR reporting
-
-#define CMD_RI_REP (cmdSyntaxPtr)(ct24) // Enable RI reporting
-#define CMD_RI_NREP (cmdSyntaxPtr)(ct25) // Disable RI reporting
-
-// Enable break reporting and select style
-//
-#define CMD_BRK_REP(arg) \
- (((cmdSyntaxPtr)(ct26))->cmd[1] = (arg),(cmdSyntaxPtr)(ct26))
-
-#define CBK_STAT 0x00 // Report breaks as a status (exception,irq)
-#define CBK_NULL 0x01 // Report breaks as a good null
-#define CBK_STAT_SEQ 0x02 // Report breaks as a status AND as in-band character
- // sequence FFh, 01h, 10h
-#define CBK_SEQ 0x03 // Report breaks as the in-band
- //sequence FFh, 01h, 10h ONLY.
-#define CBK_FLSH 0x04 // if this bit set also flush input data
-#define CBK_POSIX 0x08 // if this bit set report as FF,0,0 sequence
-#define CBK_SINGLE 0x10 // if this bit set with CBK_SEQ or CBK_STAT_SEQ
- //then reports single null instead of triple
-
-#define CMD_BRK_NREP (cmdSyntaxPtr)(ct27) // Disable break reporting
-
-// Specify maximum block size for received data
-//
-#define CMD_MAX_BLOCK(arg) \
- (((cmdSyntaxPtr)(ct28))->cmd[1] = (arg),(cmdSyntaxPtr)(ct28))
-
-// -- COMMAND 29 is reserved --
-
-#define CMD_CTSFL_ENAB (cmdSyntaxPtr)(ct30) // Enable CTS flow control
-#define CMD_CTSFL_DSAB (cmdSyntaxPtr)(ct31) // Disable CTS flow control
-#define CMD_RTSFL_ENAB (cmdSyntaxPtr)(ct32) // Enable RTS flow control
-#define CMD_RTSFL_DSAB (cmdSyntaxPtr)(ct33) // Disable RTS flow control
-
-// Specify istrip option
-//
-#define CMD_ISTRIP_OPT(arg) \
- (((cmdSyntaxPtr)(ct34))->cmd[1] = (arg),(cmdSyntaxPtr)(ct34))
-
-#define CIS_NOSTRIP 0 // Strip characters to character size
-#define CIS_STRIP 1 // Strip any 8-bit characters to 7 bits
-
-// Send a break of arg milliseconds
-//
-#define CMD_SEND_BRK(arg) \
- (((cmdSyntaxPtr)(ct35))->cmd[1] = (arg),(cmdSyntaxPtr)(ct35))
-
-// Set error reporting mode
-//
-#define CMD_SET_ERROR(arg) \
- (((cmdSyntaxPtr)(ct36))->cmd[1] = (arg),(cmdSyntaxPtr)(ct36))
-
-#define CSE_ESTAT 0 // Report error in a status packet
-#define CSE_NOREP 1 // Treat character as though it were good
-#define CSE_DROP 2 // Discard the character
-#define CSE_NULL 3 // Replace with a null
-#define CSE_MARK 4 // Replace with a 3-character sequence (as Unix)
-
-#define CSE_REPLACE 0x8 // Replace the errored character with the
- // replacement character defined here
-
-#define CSE_STAT_REPLACE 0x18 // Replace the errored character with the
- // replacement character defined here AND
- // report the error as a status packet (as in
- // CSE_ESTAT).
-
-
-// COMMAND 37, to send flow control packets, is handled only by low-level
-// library code in response to data movement and shouldn't ever be sent by the
-// user code. See i2pack.h and the body of i2lib.c for details.
-
-// Enable on-board post-processing, using options given in oflag argument.
-// Formerly, this command was automatically preceded by a CMD_OPOST_OFF command
-// because the loadware does not permit sending back-to-back CMD_OPOST_ON
-// commands without an intervening CMD_OPOST_OFF. BUT, WE LEARN 18 MAY 92, that
-// CMD_OPOST_ON and CMD_OPOST_OFF must each be at the end of a packet (or in a
-// solo packet). This means the caller must specify separately CMD_OPOST_OFF,
-// CMD_OPOST_ON(parm) when he calls i2QueueCommands(). That function will ensure
-// each gets a separate packet. Extra CMD_OPOST_OFF's are always ok.
-//
-#define CMD_OPOST_ON(oflag) \
- (*(USHORT *)(((cmdSyntaxPtr)(ct39))->cmd[1]) = (oflag), \
- (cmdSyntaxPtr)(ct39))
-
-#define CMD_OPOST_OFF (cmdSyntaxPtr)(ct40) // Disable on-board post-proc
-
-#define CMD_RESUME (cmdSyntaxPtr)(ct41) // Resume: behave as though an XON
- // were received;
-
-// Set Transmit baud rate (see command 7 for arguments)
-//
-#define CMD_SETBAUD_TX(arg) \
- (((cmdSyntaxPtr)(ct42))->cmd[1] = (arg),(cmdSyntaxPtr)(ct42))
-
-// Set Receive baud rate (see command 7 for arguments)
-//
-#define CMD_SETBAUD_RX(arg) \
- (((cmdSyntaxPtr)(ct43))->cmd[1] = (arg),(cmdSyntaxPtr)(ct43))
-
-// Request interrupt from board each arg milliseconds. Interrupt will specify
-// "received data", even though there may be no data present. If arg == 0,
-// disables any such interrupts.
-//
-#define CMD_PING_REQ(arg) \
- (((cmdSyntaxPtr)(ct44))->cmd[1] = (arg),(cmdSyntaxPtr)(ct44))
-
-#define CMD_HOT_ENAB (cmdSyntaxPtr)(ct45) // Enable Hot-key checking
-#define CMD_HOT_DSAB (cmdSyntaxPtr)(ct46) // Disable Hot-key checking
-
-#if 0
-// COMMAND 47: Send Protocol info via Unix flags:
-// iflag = Unix tty t_iflag
-// cflag = Unix tty t_cflag
-// lflag = Unix tty t_lflag
-// See System V Unix/Xenix documentation for the meanings of the bit fields
-// within these flags
-//
-#define CMD_UNIX_FLAGS(iflag,cflag,lflag) i2cmdUnixFlags(iflag,cflag,lflag)
-#endif /* 0 */
-
-#define CMD_DSRFL_ENAB (cmdSyntaxPtr)(ct48) // Enable DSR receiver ctrl
-#define CMD_DSRFL_DSAB (cmdSyntaxPtr)(ct49) // Disable DSR receiver ctrl
-#define CMD_DTRFL_ENAB (cmdSyntaxPtr)(ct50) // Enable DTR flow control
-#define CMD_DTRFL_DSAB (cmdSyntaxPtr)(ct51) // Disable DTR flow control
-#define CMD_BAUD_RESET (cmdSyntaxPtr)(ct52) // Reset baudrate table
-
-// COMMAND 54: Define custom rate #1
-// rate = (short) 1/10 of the desired baud rate
-//
-#define CMD_BAUD_DEF1(rate) i2cmdBaudDef(1,rate)
-
-// COMMAND 55: Define custom rate #2
-// rate = (short) 1/10 of the desired baud rate
-//
-#define CMD_BAUD_DEF2(rate) i2cmdBaudDef(2,rate)
-
-// Pause arg hundredths of seconds. (Note, this is NOT milliseconds.)
-//
-#define CMD_PAUSE(arg) \
- (((cmdSyntaxPtr)(ct56))->cmd[1] = (arg),(cmdSyntaxPtr)(ct56))
-
-#define CMD_SUSPEND (cmdSyntaxPtr)(ct57) // Suspend output
-#define CMD_UNSUSPEND (cmdSyntaxPtr)(ct58) // Un-Suspend output
-
-// Set parity-checking options
-//
-#define CMD_PARCHK(arg) \
- (((cmdSyntaxPtr)(ct59))->cmd[1] = (arg),(cmdSyntaxPtr)(ct59))
-
-#define CPK_ENAB 0 // Enable parity checking on input
-#define CPK_DSAB 1 // Disable parity checking on input
-
-#define CMD_BMARK_REQ (cmdSyntaxPtr)(ct60) // Bookmark request
-
-
-// Enable/Disable internal loopback mode
-//
-#define CMD_INLOOP(arg) \
- (((cmdSyntaxPtr)(ct61))->cmd[1] = (arg),(cmdSyntaxPtr)(ct61))
-
-#define CIN_DISABLE 0 // Normal operation (default)
-#define CIN_ENABLE 1 // Internal (local) loopback
-#define CIN_REMOTE 2 // Remote loopback
-
-// Specify timeout for hotkeys: Delay will be (arg x 10) milliseconds, arg == 0
-// --> no timeout: wait forever.
-//
-#define CMD_HOT_TIME(arg) \
- (((cmdSyntaxPtr)(ct62))->cmd[1] = (arg),(cmdSyntaxPtr)(ct62))
-
-
-// Define (outgoing) xon for receive flow control
-//
-#define CMD_DEF_OXON(arg) \
- (((cmdSyntaxPtr)(ct63))->cmd[1] = (arg),(cmdSyntaxPtr)(ct63))
-
-// Define (outgoing) xoff for receiver flow control
-//
-#define CMD_DEF_OXOFF(arg) \
- (((cmdSyntaxPtr)(ct64))->cmd[1] = (arg),(cmdSyntaxPtr)(ct64))
-
-// Enable/Disable RTS on transmit (1/2 duplex-style)
-//
-#define CMD_RTS_XMIT(arg) \
- (((cmdSyntaxPtr)(ct65))->cmd[1] = (arg),(cmdSyntaxPtr)(ct65))
-
-#define CHD_DISABLE 0
-#define CHD_ENABLE 1
-
-// Set high-water-mark level (debugging use only)
-//
-#define CMD_SETHIGHWAT(arg) \
- (((cmdSyntaxPtr)(ct66))->cmd[1] = (arg),(cmdSyntaxPtr)(ct66))
-
-// Start flushing tagged data (tag = 0-14)
-//
-#define CMD_START_SELFL(tag) \
- (((cmdSyntaxPtr)(ct67))->cmd[1] = (tag),(cmdSyntaxPtr)(ct67))
-
-// End flushing tagged data (tag = 0-14)
-//
-#define CMD_END_SELFL(tag) \
- (((cmdSyntaxPtr)(ct68))->cmd[1] = (tag),(cmdSyntaxPtr)(ct68))
-
-#define CMD_HWFLOW_OFF (cmdSyntaxPtr)(ct69) // Disable HW TX flow control
-#define CMD_ODSRFL_ENAB (cmdSyntaxPtr)(ct70) // Enable DSR output f/c
-#define CMD_ODSRFL_DSAB (cmdSyntaxPtr)(ct71) // Disable DSR output f/c
-#define CMD_ODCDFL_ENAB (cmdSyntaxPtr)(ct72) // Enable DCD output f/c
-#define CMD_ODCDFL_DSAB (cmdSyntaxPtr)(ct73) // Disable DCD output f/c
-
-// Set transmit interrupt load level. Count should be an even value 2-12
-//
-#define CMD_LOADLEVEL(count) \
- (((cmdSyntaxPtr)(ct74))->cmd[1] = (count),(cmdSyntaxPtr)(ct74))
-
-// If reporting DSS changes, map to character sequence FFh, 2, MSR
-//
-#define CMD_STATDATA(arg) \
- (((cmdSyntaxPtr)(ct75))->cmd[1] = (arg),(cmdSyntaxPtr)(ct75))
-
-#define CSTD_DISABLE// Report DSS changes as status packets only (default)
-#define CSTD_ENABLE // Report DSS changes as in-band data sequence as well as
- // by status packet.
-
-#define CMD_BREAK_ON (cmdSyntaxPtr)(ct76)// Set break and stop xmit
-#define CMD_BREAK_OFF (cmdSyntaxPtr)(ct77)// End break and restart xmit
-#define CMD_GETFC (cmdSyntaxPtr)(ct78)// Request for flow control packet
- // from board.
-
-// Transmit this character immediately
-//
-#define CMD_XMIT_NOW(ch) \
- (((cmdSyntaxPtr)(ct79))->cmd[1] = (ch),(cmdSyntaxPtr)(ct79))
-
-// Set baud rate via "divisor latch"
-//
-#define CMD_DIVISOR_LATCH(which,value) \
- (((cmdSyntaxPtr)(ct80))->cmd[1] = (which), \
- *(USHORT *)(((cmdSyntaxPtr)(ct80))->cmd[2]) = (value), \
- (cmdSyntaxPtr)(ct80))
-
-#define CDL_RX 1 // Set receiver rate
-#define CDL_TX 2 // Set transmit rate
- // (CDL_TX | CDL_RX) Set both rates
-
-// Request for special diagnostic status pkt from the board.
-//
-#define CMD_GET_STATUS (cmdSyntaxPtr)(ct81)
-
-// Request time-stamped transmit character count packet.
-//
-#define CMD_GET_TXCNT (cmdSyntaxPtr)(ct82)
-
-// Request time-stamped receive character count packet.
-//
-#define CMD_GET_RXCNT (cmdSyntaxPtr)(ct83)
-
-// Request for box/board I.D. packet.
-#define CMD_GET_BOXIDS (cmdSyntaxPtr)(ct84)
-
-// Enable or disable multiple channels according to bit-mapped ushorts box 1-4
-//
-#define CMD_ENAB_MULT(enable, box1, box2, box3, box4) \
- (((cmdSytaxPtr)(ct85))->cmd[1] = (enable), \
- *(USHORT *)(((cmdSyntaxPtr)(ct85))->cmd[2]) = (box1), \
- *(USHORT *)(((cmdSyntaxPtr)(ct85))->cmd[4]) = (box2), \
- *(USHORT *)(((cmdSyntaxPtr)(ct85))->cmd[6]) = (box3), \
- *(USHORT *)(((cmdSyntaxPtr)(ct85))->cmd[8]) = (box4), \
- (cmdSyntaxPtr)(ct85))
-
-#define CEM_DISABLE 0
-#define CEM_ENABLE 1
-
-// Enable or disable receiver or receiver interrupts (default both enabled)
-//
-#define CMD_RCV_ENABLE(ch) \
- (((cmdSyntaxPtr)(ct86))->cmd[1] = (ch),(cmdSyntaxPtr)(ct86))
-
-#define CRE_OFF 0 // Disable the receiver
-#define CRE_ON 1 // Enable the receiver
-#define CRE_INTOFF 2 // Disable receiver interrupts (to loadware)
-#define CRE_INTON 3 // Enable receiver interrupts (to loadware)
-
-// Starts up a hardware test process, which runs transparently, and sends a
-// STAT_HWFAIL packet in case a hardware failure is detected.
-//
-#define CMD_HW_TEST (cmdSyntaxPtr)(ct87)
-
-// Change receiver threshold and timeout value:
-// Defaults: timeout = 20mS
-// threshold count = 8 when DTRflow not in use,
-// threshold count = 5 when DTRflow in use.
-//
-#define CMD_RCV_THRESHOLD(count,ms) \
- (((cmdSyntaxPtr)(ct88))->cmd[1] = (count), \
- ((cmdSyntaxPtr)(ct88))->cmd[2] = (ms), \
- (cmdSyntaxPtr)(ct88))
-
-// Makes the loadware report DSS signals for this channel immediately.
-//
-#define CMD_DSS_NOW (cmdSyntaxPtr)(ct89)
-
-// Set the receive silo parameters
-// timeout is ms idle wait until delivery (~VTIME)
-// threshold is max characters cause interrupt (~VMIN)
-//
-#define CMD_SET_SILO(timeout,threshold) \
- (((cmdSyntaxPtr)(ct90))->cmd[1] = (timeout), \
- ((cmdSyntaxPtr)(ct90))->cmd[2] = (threshold), \
- (cmdSyntaxPtr)(ct90))
-
-// Set timed break in decisecond (1/10s)
-//
-#define CMD_LBREAK(ds) \
- (((cmdSyntaxPtr)(ct91))->cmd[1] = (ds),(cmdSyntaxPtr)(ct66))
-
-
-
-#endif // I2CMD_H
diff --git a/drivers/staging/tty/ip2/i2ellis.c b/drivers/staging/tty/ip2/i2ellis.c
deleted file mode 100644
index 29db44de399..00000000000
--- a/drivers/staging/tty/ip2/i2ellis.c
+++ /dev/null
@@ -1,1403 +0,0 @@
-/*******************************************************************************
-*
-* (c) 1998 by Computone Corporation
-*
-********************************************************************************
-*
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: Low-level interface code for the device driver
-* (This is included source code, not a separate compilation
-* module.)
-*
-*******************************************************************************/
-//---------------------------------------------
-// Function declarations private to this module
-//---------------------------------------------
-// Functions called only indirectly through i2eBordStr entries.
-
-static int iiWriteBuf16(i2eBordStrPtr, unsigned char *, int);
-static int iiWriteBuf8(i2eBordStrPtr, unsigned char *, int);
-static int iiReadBuf16(i2eBordStrPtr, unsigned char *, int);
-static int iiReadBuf8(i2eBordStrPtr, unsigned char *, int);
-
-static unsigned short iiReadWord16(i2eBordStrPtr);
-static unsigned short iiReadWord8(i2eBordStrPtr);
-static void iiWriteWord16(i2eBordStrPtr, unsigned short);
-static void iiWriteWord8(i2eBordStrPtr, unsigned short);
-
-static int iiWaitForTxEmptyII(i2eBordStrPtr, int);
-static int iiWaitForTxEmptyIIEX(i2eBordStrPtr, int);
-static int iiTxMailEmptyII(i2eBordStrPtr);
-static int iiTxMailEmptyIIEX(i2eBordStrPtr);
-static int iiTrySendMailII(i2eBordStrPtr, unsigned char);
-static int iiTrySendMailIIEX(i2eBordStrPtr, unsigned char);
-
-static unsigned short iiGetMailII(i2eBordStrPtr);
-static unsigned short iiGetMailIIEX(i2eBordStrPtr);
-
-static void iiEnableMailIrqII(i2eBordStrPtr);
-static void iiEnableMailIrqIIEX(i2eBordStrPtr);
-static void iiWriteMaskII(i2eBordStrPtr, unsigned char);
-static void iiWriteMaskIIEX(i2eBordStrPtr, unsigned char);
-
-static void ii2Nop(void);
-
-//***************
-//* Static Data *
-//***************
-
-static int ii2Safe; // Safe I/O address for delay routine
-
-static int iiDelayed; // Set when the iiResetDelay function is
- // called. Cleared when ANY board is reset.
-static DEFINE_RWLOCK(Dl_spinlock);
-
-//********
-//* Code *
-//********
-
-//=======================================================
-// Initialization Routines
-//
-// iiSetAddress
-// iiReset
-// iiResetDelay
-// iiInitialize
-//=======================================================
-
-//******************************************************************************
-// Function: iiSetAddress(pB, address, delay)
-// Parameters: pB - pointer to the board structure
-// address - the purported I/O address of the board
-// delay - pointer to the 1-ms delay function to use
-// in this and any future operations to this board
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// This routine (roughly) checks for address validity, sets the i2eValid OK and
-// sets the state to II_STATE_COLD which means that we haven't even sent a reset
-// yet.
-//
-//******************************************************************************
-static int
-iiSetAddress( i2eBordStrPtr pB, int address, delayFunc_t delay )
-{
- // Should any failure occur before init is finished...
- pB->i2eValid = I2E_INCOMPLETE;
-
- // Cannot check upper limit except extremely: Might be microchannel
- // Address must be on an 8-byte boundary
-
- if ((unsigned int)address <= 0x100
- || (unsigned int)address >= 0xfff8
- || (address & 0x7)
- )
- {
- I2_COMPLETE(pB, I2EE_BADADDR);
- }
-
- // Initialize accelerators
- pB->i2eBase = address;
- pB->i2eData = address + FIFO_DATA;
- pB->i2eStatus = address + FIFO_STATUS;
- pB->i2ePointer = address + FIFO_PTR;
- pB->i2eXMail = address + FIFO_MAIL;
- pB->i2eXMask = address + FIFO_MASK;
-
- // Initialize i/o address for ii2DelayIO
- ii2Safe = address + FIFO_NOP;
-
- // Initialize the delay routine
- pB->i2eDelay = ((delay != (delayFunc_t)NULL) ? delay : (delayFunc_t)ii2Nop);
-
- pB->i2eValid = I2E_MAGIC;
- pB->i2eState = II_STATE_COLD;
-
- I2_COMPLETE(pB, I2EE_GOOD);
-}
-
-//******************************************************************************
-// Function: iiReset(pB)
-// Parameters: pB - pointer to the board structure
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// Attempts to reset the board (see also i2hw.h). Normally, we would use this to
-// reset a board immediately after iiSetAddress(), but it is valid to reset a
-// board from any state, say, in order to change or re-load loadware. (Under
-// such circumstances, no reason to re-run iiSetAddress(), which is why it is a
-// separate routine and not included in this routine.
-//
-//******************************************************************************
-static int
-iiReset(i2eBordStrPtr pB)
-{
- // Magic number should be set, else even the address is suspect
- if (pB->i2eValid != I2E_MAGIC)
- {
- I2_COMPLETE(pB, I2EE_BADMAGIC);
- }
-
- outb(0, pB->i2eBase + FIFO_RESET); /* Any data will do */
- iiDelay(pB, 50); // Pause between resets
- outb(0, pB->i2eBase + FIFO_RESET); /* Second reset */
-
- // We must wait before even attempting to read anything from the FIFO: the
- // board's P.O.S.T may actually attempt to read and write its end of the
- // FIFO in order to check flags, loop back (where supported), etc. On
- // completion of this testing it would reset the FIFO, and on completion
- // of all // P.O.S.T., write the message. We must not mistake data which
- // might have been sent for testing as part of the reset message. To
- // better utilize time, say, when resetting several boards, we allow the
- // delay to be performed externally; in this way the caller can reset
- // several boards, delay a single time, then call the initialization
- // routine for all.
-
- pB->i2eState = II_STATE_RESET;
-
- iiDelayed = 0; // i.e., the delay routine hasn't been called since the most
- // recent reset.
-
- // Ensure anything which would have been of use to standard loadware is
- // blanked out, since board has now forgotten everything!.
-
- pB->i2eUsingIrq = I2_IRQ_UNDEFINED; /* to not use an interrupt so far */
- pB->i2eWaitingForEmptyFifo = 0;
- pB->i2eOutMailWaiting = 0;
- pB->i2eChannelPtr = NULL;
- pB->i2eChannelCnt = 0;
-
- pB->i2eLeadoffWord[0] = 0;
- pB->i2eFifoInInts = 0;
- pB->i2eFifoOutInts = 0;
- pB->i2eFatalTrap = NULL;
- pB->i2eFatal = 0;
-
- I2_COMPLETE(pB, I2EE_GOOD);
-}
-
-//******************************************************************************
-// Function: iiResetDelay(pB)
-// Parameters: pB - pointer to the board structure
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// Using the delay defined in board structure, waits two seconds (for board to
-// reset).
-//
-//******************************************************************************
-static int
-iiResetDelay(i2eBordStrPtr pB)
-{
- if (pB->i2eValid != I2E_MAGIC) {
- I2_COMPLETE(pB, I2EE_BADMAGIC);
- }
- if (pB->i2eState != II_STATE_RESET) {
- I2_COMPLETE(pB, I2EE_BADSTATE);
- }
- iiDelay(pB,2000); /* Now we wait for two seconds. */
- iiDelayed = 1; /* Delay has been called: ok to initialize */
- I2_COMPLETE(pB, I2EE_GOOD);
-}
-
-//******************************************************************************
-// Function: iiInitialize(pB)
-// Parameters: pB - pointer to the board structure
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// Attempts to read the Power-on reset message. Initializes any remaining fields
-// in the pB structure.
-//
-// This should be called as the third step of a process beginning with
-// iiReset(), then iiResetDelay(). This routine checks to see that the structure
-// is "valid" and in the reset state, also confirms that the delay routine has
-// been called since the latest reset (to any board! overly strong!).
-//
-//******************************************************************************
-static int
-iiInitialize(i2eBordStrPtr pB)
-{
- int itemp;
- unsigned char c;
- unsigned short utemp;
- unsigned int ilimit;
-
- if (pB->i2eValid != I2E_MAGIC)
- {
- I2_COMPLETE(pB, I2EE_BADMAGIC);
- }
-
- if (pB->i2eState != II_STATE_RESET || !iiDelayed)
- {
- I2_COMPLETE(pB, I2EE_BADSTATE);
- }
-
- // In case there is a failure short of our completely reading the power-up
- // message.
- pB->i2eValid = I2E_INCOMPLETE;
-
-
- // Now attempt to read the message.
-
- for (itemp = 0; itemp < sizeof(porStr); itemp++)
- {
- // We expect the entire message is ready.
- if (!I2_HAS_INPUT(pB)) {
- pB->i2ePomSize = itemp;
- I2_COMPLETE(pB, I2EE_PORM_SHORT);
- }
-
- pB->i2ePom.c[itemp] = c = inb(pB->i2eData);
-
- // We check the magic numbers as soon as they are supposed to be read
- // (rather than after) to minimize effect of reading something we
- // already suspect can't be "us".
- if ( (itemp == POR_1_INDEX && c != POR_MAGIC_1) ||
- (itemp == POR_2_INDEX && c != POR_MAGIC_2))
- {
- pB->i2ePomSize = itemp+1;
- I2_COMPLETE(pB, I2EE_BADMAGIC);
- }
- }
-
- pB->i2ePomSize = itemp;
-
- // Ensure that this was all the data...
- if (I2_HAS_INPUT(pB))
- I2_COMPLETE(pB, I2EE_PORM_LONG);
-
- // For now, we'll fail to initialize if P.O.S.T reports bad chip mapper:
- // Implying we will not be able to download any code either: That's ok: the
- // condition is pretty explicit.
- if (pB->i2ePom.e.porDiag1 & POR_BAD_MAPPER)
- {
- I2_COMPLETE(pB, I2EE_POSTERR);
- }
-
- // Determine anything which must be done differently depending on the family
- // of boards!
- switch (pB->i2ePom.e.porID & POR_ID_FAMILY)
- {
- case POR_ID_FII: // IntelliPort-II
-
- pB->i2eFifoStyle = FIFO_II;
- pB->i2eFifoSize = 512; // 512 bytes, always
- pB->i2eDataWidth16 = false;
-
- pB->i2eMaxIrq = 15; // Because board cannot tell us it is in an 8-bit
- // slot, we do allow it to be done (documentation!)
-
- pB->i2eGoodMap[1] =
- pB->i2eGoodMap[2] =
- pB->i2eGoodMap[3] =
- pB->i2eChannelMap[1] =
- pB->i2eChannelMap[2] =
- pB->i2eChannelMap[3] = 0;
-
- switch (pB->i2ePom.e.porID & POR_ID_SIZE)
- {
- case POR_ID_II_4:
- pB->i2eGoodMap[0] =
- pB->i2eChannelMap[0] = 0x0f; // four-port
-
- // Since porPorts1 is based on the Hardware ID register, the numbers
- // should always be consistent for IntelliPort-II. Ditto below...
- if (pB->i2ePom.e.porPorts1 != 4)
- {
- I2_COMPLETE(pB, I2EE_INCONSIST);
- }
- break;
-
- case POR_ID_II_8:
- case POR_ID_II_8R:
- pB->i2eGoodMap[0] =
- pB->i2eChannelMap[0] = 0xff; // Eight port
- if (pB->i2ePom.e.porPorts1 != 8)
- {
- I2_COMPLETE(pB, I2EE_INCONSIST);
- }
- break;
-
- case POR_ID_II_6:
- pB->i2eGoodMap[0] =
- pB->i2eChannelMap[0] = 0x3f; // Six Port
- if (pB->i2ePom.e.porPorts1 != 6)
- {
- I2_COMPLETE(pB, I2EE_INCONSIST);
- }
- break;
- }
-
- // Fix up the "good channel list based on any errors reported.
- if (pB->i2ePom.e.porDiag1 & POR_BAD_UART1)
- {
- pB->i2eGoodMap[0] &= ~0x0f;
- }
-
- if (pB->i2ePom.e.porDiag1 & POR_BAD_UART2)
- {
- pB->i2eGoodMap[0] &= ~0xf0;
- }
-
- break; // POR_ID_FII case
-
- case POR_ID_FIIEX: // IntelliPort-IIEX
-
- pB->i2eFifoStyle = FIFO_IIEX;
-
- itemp = pB->i2ePom.e.porFifoSize;
-
- // Implicit assumption that fifo would not grow beyond 32k,
- // nor would ever be less than 256.
-
- if (itemp < 8 || itemp > 15)
- {
- I2_COMPLETE(pB, I2EE_INCONSIST);
- }
- pB->i2eFifoSize = (1 << itemp);
-
- // These are based on what P.O.S.T thinks should be there, based on
- // box ID registers
- ilimit = pB->i2ePom.e.porNumBoxes;
- if (ilimit > ABS_MAX_BOXES)
- {
- ilimit = ABS_MAX_BOXES;
- }
-
- // For as many boxes as EXIST, gives the type of box.
- // Added 8/6/93: check for the ISA-4 (asic) which looks like an
- // expandable but for whom "8 or 16?" is not the right question.
-
- utemp = pB->i2ePom.e.porFlags;
- if (utemp & POR_CEX4)
- {
- pB->i2eChannelMap[0] = 0x000f;
- } else {
- utemp &= POR_BOXES;
- for (itemp = 0; itemp < ilimit; itemp++)
- {
- pB->i2eChannelMap[itemp] =
- ((utemp & POR_BOX_16) ? 0xffff : 0x00ff);
- utemp >>= 1;
- }
- }
-
- // These are based on what P.O.S.T actually found.
-
- utemp = (pB->i2ePom.e.porPorts2 << 8) + pB->i2ePom.e.porPorts1;
-
- for (itemp = 0; itemp < ilimit; itemp++)
- {
- pB->i2eGoodMap[itemp] = 0;
- if (utemp & 1) pB->i2eGoodMap[itemp] |= 0x000f;
- if (utemp & 2) pB->i2eGoodMap[itemp] |= 0x00f0;
- if (utemp & 4) pB->i2eGoodMap[itemp] |= 0x0f00;
- if (utemp & 8) pB->i2eGoodMap[itemp] |= 0xf000;
- utemp >>= 4;
- }
-
- // Now determine whether we should transfer in 8 or 16-bit mode.
- switch (pB->i2ePom.e.porBus & (POR_BUS_SLOT16 | POR_BUS_DIP16) )
- {
- case POR_BUS_SLOT16 | POR_BUS_DIP16:
- pB->i2eDataWidth16 = true;
- pB->i2eMaxIrq = 15;
- break;
-
- case POR_BUS_SLOT16:
- pB->i2eDataWidth16 = false;
- pB->i2eMaxIrq = 15;
- break;
-
- case 0:
- case POR_BUS_DIP16: // In an 8-bit slot, DIP switch don't care.
- default:
- pB->i2eDataWidth16 = false;
- pB->i2eMaxIrq = 7;
- break;
- }
- break; // POR_ID_FIIEX case
-
- default: // Unknown type of board
- I2_COMPLETE(pB, I2EE_BAD_FAMILY);
- break;
- } // End the switch based on family
-
- // Temporarily, claim there is no room in the outbound fifo.
- // We will maintain this whenever we check for an empty outbound FIFO.
- pB->i2eFifoRemains = 0;
-
- // Now, based on the bus type, should we expect to be able to re-configure
- // interrupts (say, for testing purposes).
- switch (pB->i2ePom.e.porBus & POR_BUS_TYPE)
- {
- case POR_BUS_T_ISA:
- case POR_BUS_T_UNK: // If the type of bus is undeclared, assume ok.
- case POR_BUS_T_MCA:
- case POR_BUS_T_EISA:
- break;
- default:
- I2_COMPLETE(pB, I2EE_BADBUS);
- }
-
- if (pB->i2eDataWidth16)
- {
- pB->i2eWriteBuf = iiWriteBuf16;
- pB->i2eReadBuf = iiReadBuf16;
- pB->i2eWriteWord = iiWriteWord16;
- pB->i2eReadWord = iiReadWord16;
- } else {
- pB->i2eWriteBuf = iiWriteBuf8;
- pB->i2eReadBuf = iiReadBuf8;
- pB->i2eWriteWord = iiWriteWord8;
- pB->i2eReadWord = iiReadWord8;
- }
-
- switch(pB->i2eFifoStyle)
- {
- case FIFO_II:
- pB->i2eWaitForTxEmpty = iiWaitForTxEmptyII;
- pB->i2eTxMailEmpty = iiTxMailEmptyII;
- pB->i2eTrySendMail = iiTrySendMailII;
- pB->i2eGetMail = iiGetMailII;
- pB->i2eEnableMailIrq = iiEnableMailIrqII;
- pB->i2eWriteMask = iiWriteMaskII;
-
- break;
-
- case FIFO_IIEX:
- pB->i2eWaitForTxEmpty = iiWaitForTxEmptyIIEX;
- pB->i2eTxMailEmpty = iiTxMailEmptyIIEX;
- pB->i2eTrySendMail = iiTrySendMailIIEX;
- pB->i2eGetMail = iiGetMailIIEX;
- pB->i2eEnableMailIrq = iiEnableMailIrqIIEX;
- pB->i2eWriteMask = iiWriteMaskIIEX;
-
- break;
-
- default:
- I2_COMPLETE(pB, I2EE_INCONSIST);
- }
-
- // Initialize state information.
- pB->i2eState = II_STATE_READY; // Ready to load loadware.
-
- // Some Final cleanup:
- // For some boards, the bootstrap firmware may perform some sort of test
- // resulting in a stray character pending in the incoming mailbox. If one is
- // there, it should be read and discarded, especially since for the standard
- // firmware, it's the mailbox that interrupts the host.
-
- pB->i2eStartMail = iiGetMail(pB);
-
- // Throw it away and clear the mailbox structure element
- pB->i2eStartMail = NO_MAIL_HERE;
-
- // Everything is ok now, return with good status/
-
- pB->i2eValid = I2E_MAGIC;
- I2_COMPLETE(pB, I2EE_GOOD);
-}
-
-//******************************************************************************
-// Function: ii2DelayTimer(mseconds)
-// Parameters: mseconds - number of milliseconds to delay
-//
-// Returns: Nothing
-//
-// Description:
-//
-// This routine delays for approximately mseconds milliseconds and is intended
-// to be called indirectly through i2Delay field in i2eBordStr. It uses the
-// Linux timer_list mechanism.
-//
-// The Linux timers use a unit called "jiffies" which are 10mS in the Intel
-// architecture. This function rounds the delay period up to the next "jiffy".
-// In the Alpha architecture the "jiffy" is 1mS, but this driver is not intended
-// for Alpha platforms at this time.
-//
-//******************************************************************************
-static void
-ii2DelayTimer(unsigned int mseconds)
-{
- msleep_interruptible(mseconds);
-}
-
-#if 0
-//static void ii2DelayIO(unsigned int);
-//******************************************************************************
-// !!! Not Used, this is DOS crap, some of you young folks may be interested in
-// in how things were done in the stone age of caculating machines !!!
-// Function: ii2DelayIO(mseconds)
-// Parameters: mseconds - number of milliseconds to delay
-//
-// Returns: Nothing
-//
-// Description:
-//
-// This routine delays for approximately mseconds milliseconds and is intended
-// to be called indirectly through i2Delay field in i2eBordStr. It is intended
-// for use where a clock-based function is impossible: for example, DOS drivers.
-//
-// This function uses the IN instruction to place bounds on the timing and
-// assumes that ii2Safe has been set. This is because I/O instructions are not
-// subject to caching and will therefore take a certain minimum time. To ensure
-// the delay is at least long enough on fast machines, it is based on some
-// fastest-case calculations. On slower machines this may cause VERY long
-// delays. (3 x fastest case). In the fastest case, everything is cached except
-// the I/O instruction itself.
-//
-// Timing calculations:
-// The fastest bus speed for I/O operations is likely to be 10 MHz. The I/O
-// operation in question is a byte operation to an odd address. For 8-bit
-// operations, the architecture generally enforces two wait states. At 10 MHz, a
-// single cycle time is 100nS. A read operation at two wait states takes 6
-// cycles for a total time of 600nS. Therefore approximately 1666 iterations
-// would be required to generate a single millisecond delay. The worst
-// (reasonable) case would be an 8MHz system with no cacheing. In this case, the
-// I/O instruction would take 125nS x 6 cyles = 750 nS. More importantly, code
-// fetch of other instructions in the loop would take time (zero wait states,
-// however) and would be hard to estimate. This is minimized by using in-line
-// assembler for the in inner loop of IN instructions. This consists of just a
-// few bytes. So we'll guess about four code fetches per loop. Each code fetch
-// should take four cycles, so we have 125nS * 8 = 1000nS. Worst case then is
-// that what should have taken 1 mS takes instead 1666 * (1750) = 2.9 mS.
-//
-// So much for theoretical timings: results using 1666 value on some actual
-// machines:
-// IBM 286 6MHz 3.15 mS
-// Zenith 386 33MHz 2.45 mS
-// (brandX) 386 33MHz 1.90 mS (has cache)
-// (brandY) 486 33MHz 2.35 mS
-// NCR 486 ?? 1.65 mS (microchannel)
-//
-// For most machines, it is probably safe to scale this number back (remember,
-// for robust operation use an actual timed delay if possible), so we are using
-// a value of 1190. This yields 1.17 mS for the fastest machine in our sample,
-// 1.75 mS for typical 386 machines, and 2.25 mS the absolute slowest machine.
-//
-// 1/29/93:
-// The above timings are too slow. Actual cycle times might be faster. ISA cycle
-// times could approach 500 nS, and ...
-// The IBM model 77 being microchannel has no wait states for 8-bit reads and
-// seems to be accessing the I/O at 440 nS per access (from start of one to
-// start of next). This would imply we need 1000/.440 = 2272 iterations to
-// guarantee we are fast enough. In actual testing, we see that 2 * 1190 are in
-// fact enough. For diagnostics, we keep the level at 1190, but developers note
-// this needs tuning.
-//
-// Safe assumption: 2270 i/o reads = 1 millisecond
-//
-//******************************************************************************
-
-
-static int ii2DelValue = 1190; // See timing calculations below
- // 1666 for fastest theoretical machine
- // 1190 safe for most fast 386 machines
- // 1000 for fastest machine tested here
- // 540 (sic) for AT286/6Mhz
-static void
-ii2DelayIO(unsigned int mseconds)
-{
- if (!ii2Safe)
- return; /* Do nothing if this variable uninitialized */
-
- while(mseconds--) {
- int i = ii2DelValue;
- while ( i-- ) {
- inb(ii2Safe);
- }
- }
-}
-#endif
-
-//******************************************************************************
-// Function: ii2Nop()
-// Parameters: None
-//
-// Returns: Nothing
-//
-// Description:
-//
-// iiInitialize will set i2eDelay to this if the delay parameter is NULL. This
-// saves checking for a NULL pointer at every call.
-//******************************************************************************
-static void
-ii2Nop(void)
-{
- return; // no mystery here
-}
-
-//=======================================================
-// Routines which are available in 8/16-bit versions, or
-// in different fifo styles. These are ALL called
-// indirectly through the board structure.
-//=======================================================
-
-//******************************************************************************
-// Function: iiWriteBuf16(pB, address, count)
-// Parameters: pB - pointer to board structure
-// address - address of data to write
-// count - number of data bytes to write
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// Writes 'count' bytes from 'address' to the data fifo specified by the board
-// structure pointer pB. Should count happen to be odd, an extra pad byte is
-// sent (identity unknown...). Uses 16-bit (word) operations. Is called
-// indirectly through pB->i2eWriteBuf.
-//
-//******************************************************************************
-static int
-iiWriteBuf16(i2eBordStrPtr pB, unsigned char *address, int count)
-{
- // Rudimentary sanity checking here.
- if (pB->i2eValid != I2E_MAGIC)
- I2_COMPLETE(pB, I2EE_INVALID);
-
- I2_OUTSW(pB->i2eData, address, count);
-
- I2_COMPLETE(pB, I2EE_GOOD);
-}
-
-//******************************************************************************
-// Function: iiWriteBuf8(pB, address, count)
-// Parameters: pB - pointer to board structure
-// address - address of data to write
-// count - number of data bytes to write
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// Writes 'count' bytes from 'address' to the data fifo specified by the board
-// structure pointer pB. Should count happen to be odd, an extra pad byte is
-// sent (identity unknown...). This is to be consistent with the 16-bit version.
-// Uses 8-bit (byte) operations. Is called indirectly through pB->i2eWriteBuf.
-//
-//******************************************************************************
-static int
-iiWriteBuf8(i2eBordStrPtr pB, unsigned char *address, int count)
-{
- /* Rudimentary sanity checking here */
- if (pB->i2eValid != I2E_MAGIC)
- I2_COMPLETE(pB, I2EE_INVALID);
-
- I2_OUTSB(pB->i2eData, address, count);
-
- I2_COMPLETE(pB, I2EE_GOOD);
-}
-
-//******************************************************************************
-// Function: iiReadBuf16(pB, address, count)
-// Parameters: pB - pointer to board structure
-// address - address to put data read
-// count - number of data bytes to read
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// Reads 'count' bytes into 'address' from the data fifo specified by the board
-// structure pointer pB. Should count happen to be odd, an extra pad byte is
-// received (identity unknown...). Uses 16-bit (word) operations. Is called
-// indirectly through pB->i2eReadBuf.
-//
-//******************************************************************************
-static int
-iiReadBuf16(i2eBordStrPtr pB, unsigned char *address, int count)
-{
- // Rudimentary sanity checking here.
- if (pB->i2eValid != I2E_MAGIC)
- I2_COMPLETE(pB, I2EE_INVALID);
-
- I2_INSW(pB->i2eData, address, count);
-
- I2_COMPLETE(pB, I2EE_GOOD);
-}
-
-//******************************************************************************
-// Function: iiReadBuf8(pB, address, count)
-// Parameters: pB - pointer to board structure
-// address - address to put data read
-// count - number of data bytes to read
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// Reads 'count' bytes into 'address' from the data fifo specified by the board
-// structure pointer pB. Should count happen to be odd, an extra pad byte is
-// received (identity unknown...). This to match the 16-bit behaviour. Uses
-// 8-bit (byte) operations. Is called indirectly through pB->i2eReadBuf.
-//
-//******************************************************************************
-static int
-iiReadBuf8(i2eBordStrPtr pB, unsigned char *address, int count)
-{
- // Rudimentary sanity checking here.
- if (pB->i2eValid != I2E_MAGIC)
- I2_COMPLETE(pB, I2EE_INVALID);
-
- I2_INSB(pB->i2eData, address, count);
-
- I2_COMPLETE(pB, I2EE_GOOD);
-}
-
-//******************************************************************************
-// Function: iiReadWord16(pB)
-// Parameters: pB - pointer to board structure
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// Returns the word read from the data fifo specified by the board-structure
-// pointer pB. Uses a 16-bit operation. Is called indirectly through
-// pB->i2eReadWord.
-//
-//******************************************************************************
-static unsigned short
-iiReadWord16(i2eBordStrPtr pB)
-{
- return inw(pB->i2eData);
-}
-
-//******************************************************************************
-// Function: iiReadWord8(pB)
-// Parameters: pB - pointer to board structure
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// Returns the word read from the data fifo specified by the board-structure
-// pointer pB. Uses two 8-bit operations. Bytes are assumed to be LSB first. Is
-// called indirectly through pB->i2eReadWord.
-//
-//******************************************************************************
-static unsigned short
-iiReadWord8(i2eBordStrPtr pB)
-{
- unsigned short urs;
-
- urs = inb(pB->i2eData);
-
- return (inb(pB->i2eData) << 8) | urs;
-}
-
-//******************************************************************************
-// Function: iiWriteWord16(pB, value)
-// Parameters: pB - pointer to board structure
-// value - data to write
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// Writes the word 'value' to the data fifo specified by the board-structure
-// pointer pB. Uses 16-bit operation. Is called indirectly through
-// pB->i2eWriteWord.
-//
-//******************************************************************************
-static void
-iiWriteWord16(i2eBordStrPtr pB, unsigned short value)
-{
- outw((int)value, pB->i2eData);
-}
-
-//******************************************************************************
-// Function: iiWriteWord8(pB, value)
-// Parameters: pB - pointer to board structure
-// value - data to write
-//
-// Returns: True if everything appears copacetic.
-// False if there is any error: the pB->i2eError field has the error
-//
-// Description:
-//
-// Writes the word 'value' to the data fifo specified by the board-structure
-// pointer pB. Uses two 8-bit operations (writes LSB first). Is called
-// indirectly through pB->i2eWriteWord.
-//
-//******************************************************************************
-static void
-iiWriteWord8(i2eBordStrPtr pB, unsigned short value)
-{
- outb((char)value, pB->i2eData);
- outb((char)(value >> 8), pB->i2eData);
-}
-
-//******************************************************************************
-// Function: iiWaitForTxEmptyII(pB, mSdelay)
-// Parameters: pB - pointer to board structure
-// mSdelay - period to wait before returning
-//
-// Returns: True if the FIFO is empty.
-// False if it not empty in the required time: the pB->i2eError
-// field has the error.
-//
-// Description:
-//
-// Waits up to "mSdelay" milliseconds for the outgoing FIFO to become empty; if
-// not empty by the required time, returns false and error in pB->i2eError,
-// otherwise returns true.
-//
-// mSdelay == 0 is taken to mean must be empty on the first test.
-//
-// This version operates on IntelliPort-II - style FIFO's
-//
-// Note this routine is organized so that if status is ok there is no delay at
-// all called either before or after the test. Is called indirectly through
-// pB->i2eWaitForTxEmpty.
-//
-//******************************************************************************
-static int
-iiWaitForTxEmptyII(i2eBordStrPtr pB, int mSdelay)
-{
- unsigned long flags;
- int itemp;
-
- for (;;)
- {
- // This routine hinges on being able to see the "other" status register
- // (as seen by the local processor). His incoming fifo is our outgoing
- // FIFO.
- //
- // By the nature of this routine, you would be using this as part of a
- // larger atomic context: i.e., you would use this routine to ensure the
- // fifo empty, then act on this information. Between these two halves,
- // you will generally not want to service interrupts or in any way
- // disrupt the assumptions implicit in the larger context.
- //
- // Even worse, however, this routine "shifts" the status register to
- // point to the local status register which is not the usual situation.
- // Therefore for extra safety, we force the critical section to be
- // completely atomic, and pick up after ourselves before allowing any
- // interrupts of any kind.
-
-
- write_lock_irqsave(&Dl_spinlock, flags);
- outb(SEL_COMMAND, pB->i2ePointer);
- outb(SEL_CMD_SH, pB->i2ePointer);
-
- itemp = inb(pB->i2eStatus);
-
- outb(SEL_COMMAND, pB->i2ePointer);
- outb(SEL_CMD_UNSH, pB->i2ePointer);
-
- if (itemp & ST_IN_EMPTY)
- {
- I2_UPDATE_FIFO_ROOM(pB);
- write_unlock_irqrestore(&Dl_spinlock, flags);
- I2_COMPLETE(pB, I2EE_GOOD);
- }
-
- write_unlock_irqrestore(&Dl_spinlock, flags);
-
- if (mSdelay-- == 0)
- break;
-
- iiDelay(pB, 1); /* 1 mS granularity on checking condition */
- }
- I2_COMPLETE(pB, I2EE_TXE_TIME);
-}
-
-//******************************************************************************
-// Function: iiWaitForTxEmptyIIEX(pB, mSdelay)
-// Parameters: pB - pointer to board structure
-// mSdelay - period to wait before returning
-//
-// Returns: True if the FIFO is empty.
-// False if it not empty in the required time: the pB->i2eError
-// field has the error.
-//
-// Description:
-//
-// Waits up to "mSdelay" milliseconds for the outgoing FIFO to become empty; if
-// not empty by the required time, returns false and error in pB->i2eError,
-// otherwise returns true.
-//
-// mSdelay == 0 is taken to mean must be empty on the first test.
-//
-// This version operates on IntelliPort-IIEX - style FIFO's
-//
-// Note this routine is organized so that if status is ok there is no delay at
-// all called either before or after the test. Is called indirectly through
-// pB->i2eWaitForTxEmpty.
-//
-//******************************************************************************
-static int
-iiWaitForTxEmptyIIEX(i2eBordStrPtr pB, int mSdelay)
-{
- unsigned long flags;
-
- for (;;)
- {
- // By the nature of this routine, you would be using this as part of a
- // larger atomic context: i.e., you would use this routine to ensure the
- // fifo empty, then act on this information. Between these two halves,
- // you will generally not want to service interrupts or in any way
- // disrupt the assumptions implicit in the larger context.
-
- write_lock_irqsave(&Dl_spinlock, flags);
-
- if (inb(pB->i2eStatus) & STE_OUT_MT) {
- I2_UPDATE_FIFO_ROOM(pB);
- write_unlock_irqrestore(&Dl_spinlock, flags);
- I2_COMPLETE(pB, I2EE_GOOD);
- }
- write_unlock_irqrestore(&Dl_spinlock, flags);
-
- if (mSdelay-- == 0)
- break;
-
- iiDelay(pB, 1); // 1 mS granularity on checking condition
- }
- I2_COMPLETE(pB, I2EE_TXE_TIME);
-}
-
-//******************************************************************************
-// Function: iiTxMailEmptyII(pB)
-// Parameters: pB - pointer to board structure
-//
-// Returns: True if the transmit mailbox is empty.
-// False if it not empty.
-//
-// Description:
-//
-// Returns true or false according to whether the transmit mailbox is empty (and
-// therefore able to accept more mail)
-//
-// This version operates on IntelliPort-II - style FIFO's
-//
-//******************************************************************************
-static int
-iiTxMailEmptyII(i2eBordStrPtr pB)
-{
- int port = pB->i2ePointer;
- outb(SEL_OUTMAIL, port);
- return inb(port) == 0;
-}
-
-//******************************************************************************
-// Function: iiTxMailEmptyIIEX(pB)
-// Parameters: pB - pointer to board structure
-//
-// Returns: True if the transmit mailbox is empty.
-// False if it not empty.
-//
-// Description:
-//
-// Returns true or false according to whether the transmit mailbox is empty (and
-// therefore able to accept more mail)
-//
-// This version operates on IntelliPort-IIEX - style FIFO's
-//
-//******************************************************************************
-static int
-iiTxMailEmptyIIEX(i2eBordStrPtr pB)
-{
- return !(inb(pB->i2eStatus) & STE_OUT_MAIL);
-}
-
-//******************************************************************************
-// Function: iiTrySendMailII(pB,mail)
-// Parameters: pB - pointer to board structure
-// mail - value to write to mailbox
-//
-// Returns: True if the transmit mailbox is empty, and mail is sent.
-// False if it not empty.
-//
-// Description:
-//
-// If outgoing mailbox is empty, sends mail and returns true. If outgoing
-// mailbox is not empty, returns false.
-//
-// This version operates on IntelliPort-II - style FIFO's
-//
-//******************************************************************************
-static int
-iiTrySendMailII(i2eBordStrPtr pB, unsigned char mail)
-{
- int port = pB->i2ePointer;
-
- outb(SEL_OUTMAIL, port);
- if (inb(port) == 0) {
- outb(SEL_OUTMAIL, port);
- outb(mail, port);
- return 1;
- }
- return 0;
-}
-
-//******************************************************************************
-// Function: iiTrySendMailIIEX(pB,mail)
-// Parameters: pB - pointer to board structure
-// mail - value to write to mailbox
-//
-// Returns: True if the transmit mailbox is empty, and mail is sent.
-// False if it not empty.
-//
-// Description:
-//
-// If outgoing mailbox is empty, sends mail and returns true. If outgoing
-// mailbox is not empty, returns false.
-//
-// This version operates on IntelliPort-IIEX - style FIFO's
-//
-//******************************************************************************
-static int
-iiTrySendMailIIEX(i2eBordStrPtr pB, unsigned char mail)
-{
- if (inb(pB->i2eStatus) & STE_OUT_MAIL)
- return 0;
- outb(mail, pB->i2eXMail);
- return 1;
-}
-
-//******************************************************************************
-// Function: iiGetMailII(pB,mail)
-// Parameters: pB - pointer to board structure
-//
-// Returns: Mailbox data or NO_MAIL_HERE.
-//
-// Description:
-//
-// If no mail available, returns NO_MAIL_HERE otherwise returns the data from
-// the mailbox, which is guaranteed != NO_MAIL_HERE.
-//
-// This version operates on IntelliPort-II - style FIFO's
-//
-//******************************************************************************
-static unsigned short
-iiGetMailII(i2eBordStrPtr pB)
-{
- if (I2_HAS_MAIL(pB)) {
- outb(SEL_INMAIL, pB->i2ePointer);
- return inb(pB->i2ePointer);
- } else {
- return NO_MAIL_HERE;
- }
-}
-
-//******************************************************************************
-// Function: iiGetMailIIEX(pB,mail)
-// Parameters: pB - pointer to board structure
-//
-// Returns: Mailbox data or NO_MAIL_HERE.
-//
-// Description:
-//
-// If no mail available, returns NO_MAIL_HERE otherwise returns the data from
-// the mailbox, which is guaranteed != NO_MAIL_HERE.
-//
-// This version operates on IntelliPort-IIEX - style FIFO's
-//
-//******************************************************************************
-static unsigned short
-iiGetMailIIEX(i2eBordStrPtr pB)
-{
- if (I2_HAS_MAIL(pB))
- return inb(pB->i2eXMail);
- else
- return NO_MAIL_HERE;
-}
-
-//******************************************************************************
-// Function: iiEnableMailIrqII(pB)
-// Parameters: pB - pointer to board structure
-//
-// Returns: Nothing
-//
-// Description:
-//
-// Enables board to interrupt host (only) by writing to host's in-bound mailbox.
-//
-// This version operates on IntelliPort-II - style FIFO's
-//
-//******************************************************************************
-static void
-iiEnableMailIrqII(i2eBordStrPtr pB)
-{
- outb(SEL_MASK, pB->i2ePointer);
- outb(ST_IN_MAIL, pB->i2ePointer);
-}
-
-//******************************************************************************
-// Function: iiEnableMailIrqIIEX(pB)
-// Parameters: pB - pointer to board structure
-//
-// Returns: Nothing
-//
-// Description:
-//
-// Enables board to interrupt host (only) by writing to host's in-bound mailbox.
-//
-// This version operates on IntelliPort-IIEX - style FIFO's
-//
-//******************************************************************************
-static void
-iiEnableMailIrqIIEX(i2eBordStrPtr pB)
-{
- outb(MX_IN_MAIL, pB->i2eXMask);
-}
-
-//******************************************************************************
-// Function: iiWriteMaskII(pB)
-// Parameters: pB - pointer to board structure
-//
-// Returns: Nothing
-//
-// Description:
-//
-// Writes arbitrary value to the mask register.
-//
-// This version operates on IntelliPort-II - style FIFO's
-//
-//******************************************************************************
-static void
-iiWriteMaskII(i2eBordStrPtr pB, unsigned char value)
-{
- outb(SEL_MASK, pB->i2ePointer);
- outb(value, pB->i2ePointer);
-}
-
-//******************************************************************************
-// Function: iiWriteMaskIIEX(pB)
-// Parameters: pB - pointer to board structure
-//
-// Returns: Nothing
-//
-// Description:
-//
-// Writes arbitrary value to the mask register.
-//
-// This version operates on IntelliPort-IIEX - style FIFO's
-//
-//******************************************************************************
-static void
-iiWriteMaskIIEX(i2eBordStrPtr pB, unsigned char value)
-{
- outb(value, pB->i2eXMask);
-}
-
-//******************************************************************************
-// Function: iiDownloadBlock(pB, pSource, isStandard)
-// Parameters: pB - pointer to board structure
-// pSource - loadware block to download
-// isStandard - True if "standard" loadware, else false.
-//
-// Returns: Success or Failure
-//
-// Description:
-//
-// Downloads a single block (at pSource)to the board referenced by pB. Caller
-// sets isStandard to true/false according to whether the "standard" loadware is
-// what's being loaded. The normal process, then, is to perform an iiInitialize
-// to the board, then perform some number of iiDownloadBlocks using the returned
-// state to determine when download is complete.
-//
-// Possible return values: (see I2ELLIS.H)
-// II_DOWN_BADVALID
-// II_DOWN_BADFILE
-// II_DOWN_CONTINUING
-// II_DOWN_GOOD
-// II_DOWN_BAD
-// II_DOWN_BADSTATE
-// II_DOWN_TIMEOUT
-//
-// Uses the i2eState and i2eToLoad fields (initialized at iiInitialize) to
-// determine whether this is the first block, whether to check for magic
-// numbers, how many blocks there are to go...
-//
-//******************************************************************************
-static int
-iiDownloadBlock ( i2eBordStrPtr pB, loadHdrStrPtr pSource, int isStandard)
-{
- int itemp;
- int loadedFirst;
-
- if (pB->i2eValid != I2E_MAGIC) return II_DOWN_BADVALID;
-
- switch(pB->i2eState)
- {
- case II_STATE_READY:
-
- // Loading the first block after reset. Must check the magic number of the
- // loadfile, store the number of blocks we expect to load.
- if (pSource->e.loadMagic != MAGIC_LOADFILE)
- {
- return II_DOWN_BADFILE;
- }
-
- // Next we store the total number of blocks to load, including this one.
- pB->i2eToLoad = 1 + pSource->e.loadBlocksMore;
-
- // Set the state, store the version numbers. ('Cause this may have come
- // from a file - we might want to report these versions and revisions in
- // case of an error!
- pB->i2eState = II_STATE_LOADING;
- pB->i2eLVersion = pSource->e.loadVersion;
- pB->i2eLRevision = pSource->e.loadRevision;
- pB->i2eLSub = pSource->e.loadSubRevision;
-
- // The time and date of compilation is also available but don't bother
- // storing it for normal purposes.
- loadedFirst = 1;
- break;
-
- case II_STATE_LOADING:
- loadedFirst = 0;
- break;
-
- default:
- return II_DOWN_BADSTATE;
- }
-
- // Now we must be in the II_STATE_LOADING state, and we assume i2eToLoad
- // must be positive still, because otherwise we would have cleaned up last
- // time and set the state to II_STATE_LOADED.
- if (!iiWaitForTxEmpty(pB, MAX_DLOAD_READ_TIME)) {
- return II_DOWN_TIMEOUT;
- }
-
- if (!iiWriteBuf(pB, pSource->c, LOADWARE_BLOCK_SIZE)) {
- return II_DOWN_BADVALID;
- }
-
- // If we just loaded the first block, wait for the fifo to empty an extra
- // long time to allow for any special startup code in the firmware, like
- // sending status messages to the LCD's.
-
- if (loadedFirst) {
- if (!iiWaitForTxEmpty(pB, MAX_DLOAD_START_TIME)) {
- return II_DOWN_TIMEOUT;
- }
- }
-
- // Determine whether this was our last block!
- if (--(pB->i2eToLoad)) {
- return II_DOWN_CONTINUING; // more to come...
- }
-
- // It WAS our last block: Clean up operations...
- // ...Wait for last buffer to drain from the board...
- if (!iiWaitForTxEmpty(pB, MAX_DLOAD_READ_TIME)) {
- return II_DOWN_TIMEOUT;
- }
- // If there were only a single block written, this would come back
- // immediately and be harmless, though not strictly necessary.
- itemp = MAX_DLOAD_ACK_TIME/10;
- while (--itemp) {
- if (I2_HAS_INPUT(pB)) {
- switch (inb(pB->i2eData)) {
- case LOADWARE_OK:
- pB->i2eState =
- isStandard ? II_STATE_STDLOADED :II_STATE_LOADED;
-
- // Some revisions of the bootstrap firmware (e.g. ISA-8 1.0.2)
- // will, // if there is a debug port attached, require some
- // time to send information to the debug port now. It will do
- // this before // executing any of the code we just downloaded.
- // It may take up to 700 milliseconds.
- if (pB->i2ePom.e.porDiag2 & POR_DEBUG_PORT) {
- iiDelay(pB, 700);
- }
-
- return II_DOWN_GOOD;
-
- case LOADWARE_BAD:
- default:
- return II_DOWN_BAD;
- }
- }
-
- iiDelay(pB, 10); // 10 mS granularity on checking condition
- }
-
- // Drop-through --> timed out waiting for firmware confirmation
-
- pB->i2eState = II_STATE_BADLOAD;
- return II_DOWN_TIMEOUT;
-}
-
-//******************************************************************************
-// Function: iiDownloadAll(pB, pSource, isStandard, size)
-// Parameters: pB - pointer to board structure
-// pSource - loadware block to download
-// isStandard - True if "standard" loadware, else false.
-// size - size of data to download (in bytes)
-//
-// Returns: Success or Failure
-//
-// Description:
-//
-// Given a pointer to a board structure, a pointer to the beginning of some
-// loadware, whether it is considered the "standard loadware", and the size of
-// the array in bytes loads the entire array to the board as loadware.
-//
-// Assumes the board has been freshly reset and the power-up reset message read.
-// (i.e., in II_STATE_READY). Complains if state is bad, or if there seems to be
-// too much or too little data to load, or if iiDownloadBlock complains.
-//******************************************************************************
-static int
-iiDownloadAll(i2eBordStrPtr pB, loadHdrStrPtr pSource, int isStandard, int size)
-{
- int status;
-
- // We know (from context) board should be ready for the first block of
- // download. Complain if not.
- if (pB->i2eState != II_STATE_READY) return II_DOWN_BADSTATE;
-
- while (size > 0) {
- size -= LOADWARE_BLOCK_SIZE; // How much data should there be left to
- // load after the following operation ?
-
- // Note we just bump pSource by "one", because its size is actually that
- // of an entire block, same as LOADWARE_BLOCK_SIZE.
- status = iiDownloadBlock(pB, pSource++, isStandard);
-
- switch(status)
- {
- case II_DOWN_GOOD:
- return ( (size > 0) ? II_DOWN_OVER : II_DOWN_GOOD);
-
- case II_DOWN_CONTINUING:
- break;
-
- default:
- return status;
- }
- }
-
- // We shouldn't drop out: it means "while" caught us with nothing left to
- // download, yet the previous DownloadBlock did not return complete. Ergo,
- // not enough data to match the size byte in the header.
- return II_DOWN_UNDER;
-}
diff --git a/drivers/staging/tty/ip2/i2ellis.h b/drivers/staging/tty/ip2/i2ellis.h
deleted file mode 100644
index fb6df245601..00000000000
--- a/drivers/staging/tty/ip2/i2ellis.h
+++ /dev/null
@@ -1,566 +0,0 @@
-/*******************************************************************************
-*
-* (c) 1999 by Computone Corporation
-*
-********************************************************************************
-*
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort II family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: Mainline code for the device driver
-*
-*******************************************************************************/
-//------------------------------------------------------------------------------
-// i2ellis.h
-//
-// IntelliPort-II and IntelliPort-IIEX
-//
-// Extremely
-// Low
-// Level
-// Interface
-// Services
-//
-// Structure Definitions and declarations for "ELLIS" service routines found in
-// i2ellis.c
-//
-// These routines are based on properties of the IntelliPort-II and -IIEX
-// hardware and bootstrap firmware, and are not sensitive to particular
-// conventions of any particular loadware.
-//
-// Unlike i2hw.h, which provides IRONCLAD hardware definitions, the material
-// here and in i2ellis.c is intended to provice a useful, but not required,
-// layer of insulation from the hardware specifics.
-//------------------------------------------------------------------------------
-#ifndef I2ELLIS_H /* To prevent multiple includes */
-#define I2ELLIS_H 1
-//------------------------------------------------
-// Revision History:
-//
-// 30 September 1991 MAG First Draft Started
-// 12 October 1991 ...continued...
-//
-// 20 December 1996 AKM Linux version
-//-------------------------------------------------
-
-//----------------------
-// Mandatory Includes:
-//----------------------
-#include "ip2types.h"
-#include "i2hw.h" // The hardware definitions
-
-//------------------------------------------
-// STAT_BOXIDS packets
-//------------------------------------------
-#define MAX_BOX 4
-
-typedef struct _bidStat
-{
- unsigned char bid_value[MAX_BOX];
-} bidStat, *bidStatPtr;
-
-// This packet is sent in response to a CMD_GET_BOXIDS bypass command. For -IIEX
-// boards, reports the hardware-specific "asynchronous resource register" on
-// each expansion box. Boxes not present report 0xff. For -II boards, the first
-// element contains 0x80 for 8-port, 0x40 for 4-port boards.
-
-// Box IDs aka ARR or Async Resource Register (more than you want to know)
-// 7 6 5 4 3 2 1 0
-// F F N N L S S S
-// =============================
-// F F - Product Family Designator
-// =====+++++++++++++++++++++++++++++++
-// 0 0 - Intelliport II EX / ISA-8
-// 1 0 - IntelliServer
-// 0 1 - SAC - Port Device (Intelliport III ??? )
-// =====+++++++++++++++++++++++++++++++++++++++
-// N N - Number of Ports
-// 0 0 - 8 (eight)
-// 0 1 - 4 (four)
-// 1 0 - 12 (twelve)
-// 1 1 - 16 (sixteen)
-// =++++++++++++++++++++++++++++++++++
-// L - LCD Display Module Present
-// 0 - No
-// 1 - LCD module present
-// =========+++++++++++++++++++++++++++++++++++++
-// S S S - Async Signals Supported Designator
-// 0 0 0 - 8dss, Mod DCE DB25 Female
-// 0 0 1 - 6dss, RJ-45
-// 0 1 0 - RS-232/422 dss, DB25 Female
-// 0 1 1 - RS-232/422 dss, separate 232/422 DB25 Female
-// 1 0 0 - 6dss, 921.6 I/F with ST654's
-// 1 0 1 - RS-423/232 8dss, RJ-45 10Pin
-// 1 1 0 - 6dss, Mod DCE DB25 Female
-// 1 1 1 - NO BOX PRESENT
-
-#define FF(c) ((c & 0xC0) >> 6)
-#define NN(c) ((c & 0x30) >> 4)
-#define L(c) ((c & 0x08) >> 3)
-#define SSS(c) (c & 0x07)
-
-#define BID_HAS_654(x) (SSS(x) == 0x04)
-#define BID_NO_BOX 0xff /* no box */
-#define BID_8PORT 0x80 /* IP2-8 port */
-#define BID_4PORT 0x81 /* IP2-4 port */
-#define BID_EXP_MASK 0x30 /* IP2-EX */
-#define BID_EXP_8PORT 0x00 /* 8, */
-#define BID_EXP_4PORT 0x10 /* 4, */
-#define BID_EXP_UNDEF 0x20 /* UNDEF, */
-#define BID_EXP_16PORT 0x30 /* 16, */
-#define BID_LCD_CTRL 0x08 /* LCD Controller */
-#define BID_LCD_NONE 0x00 /* - no controller present */
-#define BID_LCD_PRES 0x08 /* - controller present */
-#define BID_CON_MASK 0x07 /* - connector pinouts */
-#define BID_CON_DB25 0x00 /* - DB-25 F */
-#define BID_CON_RJ45 0x01 /* - rj45 */
-
-//------------------------------------------------------------------------------
-// i2eBordStr
-//
-// This structure contains all the information the ELLIS routines require in
-// dealing with a particular board.
-//------------------------------------------------------------------------------
-// There are some queues here which are guaranteed to never contain the entry
-// for a single channel twice. So they must be slightly larger to allow
-// unambiguous full/empty management
-//
-#define CH_QUEUE_SIZE ABS_MOST_PORTS+2
-
-typedef struct _i2eBordStr
-{
- porStr i2ePom; // Structure containing the power-on message.
-
- unsigned short i2ePomSize;
- // The number of bytes actually read if
- // different from sizeof i2ePom, indicates
- // there is an error!
-
- unsigned short i2eStartMail;
- // Contains whatever inbound mailbox data
- // present at startup. NO_MAIL_HERE indicates
- // nothing was present. No special
- // significance as of this writing, but may be
- // useful for diagnostic reasons.
-
- unsigned short i2eValid;
- // Indicates validity of the structure; if
- // i2eValid == I2E_MAGIC, then we can trust
- // the other fields. Some (especially
- // initialization) functions are good about
- // checking for validity. Many functions do
- // not, it being assumed that the larger
- // context assures we are using a valid
- // i2eBordStrPtr.
-
- unsigned short i2eError;
- // Used for returning an error condition from
- // several functions which use i2eBordStrPtr
- // as an argument.
-
- // Accelerators to characterize separate features of a board, derived from a
- // number of sources.
-
- unsigned short i2eFifoSize;
- // Always, the size of the FIFO. For
- // IntelliPort-II, always the same, for -IIEX
- // taken from the Power-On reset message.
-
- volatile
- unsigned short i2eFifoRemains;
- // Used during normal operation to indicate a
- // lower bound on the amount of data which
- // might be in the outbound fifo.
-
- unsigned char i2eFifoStyle;
- // Accelerator which tells which style (-II or
- // -IIEX) FIFO we are using.
-
- unsigned char i2eDataWidth16;
- // Accelerator which tells whether we should
- // do 8 or 16-bit data transfers.
-
- unsigned char i2eMaxIrq;
- // The highest allowable IRQ, based on the
- // slot size.
-
- // Accelerators for various addresses on the board
- int i2eBase; // I/O Address of the Board
- int i2eData; // From here data transfers happen
- int i2eStatus; // From here status reads happen
- int i2ePointer; // (IntelliPort-II: pointer/commands)
- int i2eXMail; // (IntelliPOrt-IIEX: mailboxes
- int i2eXMask; // (IntelliPort-IIEX: mask write
-
- //-------------------------------------------------------
- // Information presented in a common format across boards
- // For each box, bit map of the channels present. Box closest to
- // the host is box 0. LSB is channel 0. IntelliPort-II (non-expandable)
- // is taken to be box 0. These are derived from product i.d. registers.
-
- unsigned short i2eChannelMap[ABS_MAX_BOXES];
-
- // Same as above, except each is derived from firmware attempting to detect
- // the uart presence (by reading a valid GFRCR register). If bits are set in
- // i2eChannelMap and not in i2eGoodMap, there is a potential problem.
-
- unsigned short i2eGoodMap[ABS_MAX_BOXES];
-
- // ---------------------------
- // For indirect function calls
-
- // Routine to cause an N-millisecond delay: Patched by the ii2Initialize
- // function.
-
- void (*i2eDelay)(unsigned int);
-
- // Routine to write N bytes to the board through the FIFO. Returns true if
- // all copacetic, otherwise returns false and error is in i2eError field.
- // IF COUNT IS ODD, ROUNDS UP TO THE NEXT EVEN NUMBER.
-
- int (*i2eWriteBuf)(struct _i2eBordStr *, unsigned char *, int);
-
- // Routine to read N bytes from the board through the FIFO. Returns true if
- // copacetic, otherwise returns false and error in i2eError.
- // IF COUNT IS ODD, ROUNDS UP TO THE NEXT EVEN NUMBER.
-
- int (*i2eReadBuf)(struct _i2eBordStr *, unsigned char *, int);
-
- // Returns a word from FIFO. Will use 2 byte operations if needed.
-
- unsigned short (*i2eReadWord)(struct _i2eBordStr *);
-
- // Writes a word to FIFO. Will use 2 byte operations if needed.
-
- void (*i2eWriteWord)(struct _i2eBordStr *, unsigned short);
-
- // Waits specified time for the Transmit FIFO to go empty. Returns true if
- // ok, otherwise returns false and error in i2eError.
-
- int (*i2eWaitForTxEmpty)(struct _i2eBordStr *, int);
-
- // Returns true or false according to whether the outgoing mailbox is empty.
-
- int (*i2eTxMailEmpty)(struct _i2eBordStr *);
-
- // Checks whether outgoing mailbox is empty. If so, sends mail and returns
- // true. Otherwise returns false.
-
- int (*i2eTrySendMail)(struct _i2eBordStr *, unsigned char);
-
- // If no mail available, returns NO_MAIL_HERE, else returns the value in the
- // mailbox (guaranteed can't be NO_MAIL_HERE).
-
- unsigned short (*i2eGetMail)(struct _i2eBordStr *);
-
- // Enables the board to interrupt the host when it writes to the mailbox.
- // Irqs will not occur, however, until the loadware separately enables
- // interrupt generation to the host. The standard loadware does this in
- // response to a command packet sent by the host. (Also, disables
- // any other potential interrupt sources from the board -- other than the
- // inbound mailbox).
-
- void (*i2eEnableMailIrq)(struct _i2eBordStr *);
-
- // Writes an arbitrary value to the mask register.
-
- void (*i2eWriteMask)(struct _i2eBordStr *, unsigned char);
-
-
- // State information
-
- // During downloading, indicates the number of blocks remaining to download
- // to the board.
-
- short i2eToLoad;
-
- // State of board (see manifests below) (e.g., whether in reset condition,
- // whether standard loadware is installed, etc.
-
- unsigned char i2eState;
-
- // These three fields are only valid when there is loadware running on the
- // board. (i2eState == II_STATE_LOADED or i2eState == II_STATE_STDLOADED )
-
- unsigned char i2eLVersion; // Loadware version
- unsigned char i2eLRevision; // Loadware revision
- unsigned char i2eLSub; // Loadware subrevision
-
- // Flags which only have meaning in the context of the standard loadware.
- // Somewhat violates the layering concept, but there is so little additional
- // needed at the board level (while much additional at the channel level),
- // that this beats maintaining two different per-board structures.
-
- // Indicates which IRQ the board has been initialized (from software) to use
- // For MicroChannel boards, any value different from IRQ_UNDEFINED means
- // that the software command has been sent to enable interrupts (or specify
- // they are disabled). Special value: IRQ_UNDEFINED indicates that the
- // software command to select the interrupt has not yet been sent, therefore
- // (since the standard loadware insists that it be sent before any other
- // packets are sent) no other packets should be sent yet.
-
- unsigned short i2eUsingIrq;
-
- // This is set when we hit the MB_OUT_STUFFED mailbox, which prevents us
- // putting more in the mailbox until an appropriate mailbox message is
- // received.
-
- unsigned char i2eWaitingForEmptyFifo;
-
- // Any mailbox bits waiting to be sent to the board are OR'ed in here.
-
- unsigned char i2eOutMailWaiting;
-
- // The head of any incoming packet is read into here, is then examined and
- // we dispatch accordingly.
-
- unsigned short i2eLeadoffWord[1];
-
- // Running counter of interrupts where the mailbox indicated incoming data.
-
- unsigned short i2eFifoInInts;
-
- // Running counter of interrupts where the mailbox indicated outgoing data
- // had been stripped.
-
- unsigned short i2eFifoOutInts;
-
- // If not void, gives the address of a routine to call if fatal board error
- // is found (only applies to standard l/w).
-
- void (*i2eFatalTrap)(struct _i2eBordStr *);
-
- // Will point to an array of some sort of channel structures (whose format
- // is unknown at this level, being a function of what loadware is
- // installed and the code configuration (max sizes of buffers, etc.)).
-
- void *i2eChannelPtr;
-
- // Set indicates that the board has gone fatal.
-
- unsigned short i2eFatal;
-
- // The number of elements pointed to by i2eChannelPtr.
-
- unsigned short i2eChannelCnt;
-
- // Ring-buffers of channel structures whose channels have particular needs.
-
- rwlock_t Fbuf_spinlock;
- volatile
- unsigned short i2Fbuf_strip; // Strip index
- volatile
- unsigned short i2Fbuf_stuff; // Stuff index
- void *i2Fbuf[CH_QUEUE_SIZE]; // An array of channel pointers
- // of channels who need to send
- // flow control packets.
- rwlock_t Dbuf_spinlock;
- volatile
- unsigned short i2Dbuf_strip; // Strip index
- volatile
- unsigned short i2Dbuf_stuff; // Stuff index
- void *i2Dbuf[CH_QUEUE_SIZE]; // An array of channel pointers
- // of channels who need to send
- // data or in-line command packets.
- rwlock_t Bbuf_spinlock;
- volatile
- unsigned short i2Bbuf_strip; // Strip index
- volatile
- unsigned short i2Bbuf_stuff; // Stuff index
- void *i2Bbuf[CH_QUEUE_SIZE]; // An array of channel pointers
- // of channels who need to send
- // bypass command packets.
-
- /*
- * A set of flags to indicate that certain events have occurred on at least
- * one of the ports on this board. We use this to decide whether to spin
- * through the channels looking for breaks, etc.
- */
- int got_input;
- int status_change;
- bidStat channelBtypes;
-
- /*
- * Debugging counters, etc.
- */
- unsigned long debugFlowQueued;
- unsigned long debugInlineQueued;
- unsigned long debugDataQueued;
- unsigned long debugBypassQueued;
- unsigned long debugFlowCount;
- unsigned long debugInlineCount;
- unsigned long debugBypassCount;
-
- rwlock_t read_fifo_spinlock;
- rwlock_t write_fifo_spinlock;
-
-// For queuing interrupt bottom half handlers. /\/\|=mhw=|\/\/
- struct work_struct tqueue_interrupt;
-
- struct timer_list SendPendingTimer; // Used by iiSendPending
- unsigned int SendPendingRetry;
-} i2eBordStr, *i2eBordStrPtr;
-
-//-------------------------------------------------------------------
-// Macro Definitions for the indirect calls defined in the i2eBordStr
-//-------------------------------------------------------------------
-//
-#define iiDelay(a,b) (*(a)->i2eDelay)(b)
-#define iiWriteBuf(a,b,c) (*(a)->i2eWriteBuf)(a,b,c)
-#define iiReadBuf(a,b,c) (*(a)->i2eReadBuf)(a,b,c)
-
-#define iiWriteWord(a,b) (*(a)->i2eWriteWord)(a,b)
-#define iiReadWord(a) (*(a)->i2eReadWord)(a)
-
-#define iiWaitForTxEmpty(a,b) (*(a)->i2eWaitForTxEmpty)(a,b)
-
-#define iiTxMailEmpty(a) (*(a)->i2eTxMailEmpty)(a)
-#define iiTrySendMail(a,b) (*(a)->i2eTrySendMail)(a,b)
-
-#define iiGetMail(a) (*(a)->i2eGetMail)(a)
-#define iiEnableMailIrq(a) (*(a)->i2eEnableMailIrq)(a)
-#define iiDisableMailIrq(a) (*(a)->i2eWriteMask)(a,0)
-#define iiWriteMask(a,b) (*(a)->i2eWriteMask)(a,b)
-
-//-------------------------------------------
-// Manifests for i2eBordStr:
-//-------------------------------------------
-
-typedef void (*delayFunc_t)(unsigned int);
-
-// i2eValid
-//
-#define I2E_MAGIC 0x4251 // Structure is valid.
-#define I2E_INCOMPLETE 0x1122 // Structure failed during init.
-
-
-// i2eError
-//
-#define I2EE_GOOD 0 // Operation successful
-#define I2EE_BADADDR 1 // Address out of range
-#define I2EE_BADSTATE 2 // Attempt to perform a function when the board
- // structure was in the incorrect state
-#define I2EE_BADMAGIC 3 // Bad magic number from Power On test (i2ePomSize
- // reflects what was read
-#define I2EE_PORM_SHORT 4 // Power On message too short
-#define I2EE_PORM_LONG 5 // Power On message too long
-#define I2EE_BAD_FAMILY 6 // Un-supported board family type
-#define I2EE_INCONSIST 7 // Firmware reports something impossible,
- // e.g. unexpected number of ports... Almost no
- // excuse other than bad FIFO...
-#define I2EE_POSTERR 8 // Power-On self test reported a bad error
-#define I2EE_BADBUS 9 // Unknown Bus type declared in message
-#define I2EE_TXE_TIME 10 // Timed out waiting for TX Fifo to empty
-#define I2EE_INVALID 11 // i2eValid field does not indicate a valid and
- // complete board structure (for functions which
- // require this be so.)
-#define I2EE_BAD_PORT 12 // Discrepancy between channels actually found and
- // what the product is supposed to have. Check
- // i2eGoodMap vs i2eChannelMap for details.
-#define I2EE_BAD_IRQ 13 // Someone specified an unsupported IRQ
-#define I2EE_NOCHANNELS 14 // No channel structures have been defined (for
- // functions requiring this).
-
-// i2eFifoStyle
-//
-#define FIFO_II 0 /* IntelliPort-II style: see also i2hw.h */
-#define FIFO_IIEX 1 /* IntelliPort-IIEX style */
-
-// i2eGetMail
-//
-#define NO_MAIL_HERE 0x1111 // Since mail is unsigned char, cannot possibly
- // promote to 0x1111.
-// i2eState
-//
-#define II_STATE_COLD 0 // Addresses have been defined, but board not even
- // reset yet.
-#define II_STATE_RESET 1 // Board,if it exists, has just been reset
-#define II_STATE_READY 2 // Board ready for its first block
-#define II_STATE_LOADING 3 // Board continuing load
-#define II_STATE_LOADED 4 // Board has finished load: status ok
-#define II_STATE_BADLOAD 5 // Board has finished load: failed!
-#define II_STATE_STDLOADED 6 // Board has finished load: standard firmware
-
-// i2eUsingIrq
-//
-#define I2_IRQ_UNDEFINED 0x1352 /* No valid irq (or polling = 0) can
- * ever promote to this! */
-//------------------------------------------
-// Handy Macros for i2ellis.c and others
-// Note these are common to -II and -IIEX
-//------------------------------------------
-
-// Given a pointer to the board structure, does the input FIFO have any data or
-// not?
-//
-#define I2_HAS_INPUT(pB) !(inb(pB->i2eStatus) & ST_IN_EMPTY)
-
-// Given a pointer to the board structure, is there anything in the incoming
-// mailbox?
-//
-#define I2_HAS_MAIL(pB) (inb(pB->i2eStatus) & ST_IN_MAIL)
-
-#define I2_UPDATE_FIFO_ROOM(pB) ((pB)->i2eFifoRemains = (pB)->i2eFifoSize)
-
-//------------------------------------------
-// Function Declarations for i2ellis.c
-//------------------------------------------
-//
-// Functions called directly
-//
-// Initialization of a board & structure is in four (five!) parts:
-//
-// 1) iiSetAddress() - Define the board address & delay function for a board.
-// 2) iiReset() - Reset the board (provided it exists)
-// -- Note you may do this to several boards --
-// 3) iiResetDelay() - Delay for 2 seconds (once for all boards)
-// 4) iiInitialize() - Attempt to read Power-up message; further initialize
-// accelerators
-//
-// Then you may use iiDownloadAll() or iiDownloadFile() (in i2file.c) to write
-// loadware. To change loadware, you must begin again with step 2, resetting
-// the board again (step 1 not needed).
-
-static int iiSetAddress(i2eBordStrPtr, int, delayFunc_t );
-static int iiReset(i2eBordStrPtr);
-static int iiResetDelay(i2eBordStrPtr);
-static int iiInitialize(i2eBordStrPtr);
-
-// Routine to validate that all channels expected are there.
-//
-extern int iiValidateChannels(i2eBordStrPtr);
-
-// Routine used to download a block of loadware.
-//
-static int iiDownloadBlock(i2eBordStrPtr, loadHdrStrPtr, int);
-
-// Return values given by iiDownloadBlock, iiDownloadAll, iiDownloadFile:
-//
-#define II_DOWN_BADVALID 0 // board structure is invalid
-#define II_DOWN_CONTINUING 1 // So far, so good, firmware expects more
-#define II_DOWN_GOOD 2 // Download complete, CRC good
-#define II_DOWN_BAD 3 // Download complete, but CRC bad
-#define II_DOWN_BADFILE 4 // Bad magic number in loadware file
-#define II_DOWN_BADSTATE 5 // Board is in an inappropriate state for
- // downloading loadware. (see i2eState)
-#define II_DOWN_TIMEOUT 6 // Timeout waiting for firmware
-#define II_DOWN_OVER 7 // Too much data
-#define II_DOWN_UNDER 8 // Not enough data
-#define II_DOWN_NOFILE 9 // Loadware file not found
-
-// Routine to download an entire loadware module: Return values are a subset of
-// iiDownloadBlock's, excluding, of course, II_DOWN_CONTINUING
-//
-static int iiDownloadAll(i2eBordStrPtr, loadHdrStrPtr, int, int);
-
-// Many functions defined here return True if good, False otherwise, with an
-// error code in i2eError field. Here is a handy macro for setting the error
-// code and returning.
-//
-#define I2_COMPLETE(pB,code) do { \
- pB->i2eError = code; \
- return (code == I2EE_GOOD);\
- } while (0)
-
-#endif // I2ELLIS_H
diff --git a/drivers/staging/tty/ip2/i2hw.h b/drivers/staging/tty/ip2/i2hw.h
deleted file mode 100644
index 8df2f487217..00000000000
--- a/drivers/staging/tty/ip2/i2hw.h
+++ /dev/null
@@ -1,652 +0,0 @@
-/*******************************************************************************
-*
-* (c) 1999 by Computone Corporation
-*
-********************************************************************************
-*
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort II family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: Definitions limited to properties of the hardware or the
-* bootstrap firmware. As such, they are applicable regardless of
-* operating system or loadware (standard or diagnostic).
-*
-*******************************************************************************/
-#ifndef I2HW_H
-#define I2HW_H 1
-//------------------------------------------------------------------------------
-// Revision History:
-//
-// 23 September 1991 MAG First Draft Started...through...
-// 11 October 1991 ... Continuing development...
-// 6 August 1993 Added support for ISA-4 (asic) which is architected
-// as an ISA-CEX with a single 4-port box.
-//
-// 20 December 1996 AKM Version for Linux
-//
-//------------------------------------------------------------------------------
-/*------------------------------------------------------------------------------
-
-HARDWARE DESCRIPTION:
-
-Introduction:
-
-The IntelliPort-II and IntelliPort-IIEX products occupy a block of eight (8)
-addresses in the host's I/O space.
-
-Some addresses are used to transfer data to/from the board, some to transfer
-so-called "mailbox" messages, and some to read bit-mapped status information.
-While all the products in the line are functionally similar, some use a 16-bit
-data path to transfer data while others use an 8-bit path. Also, the use of
-command /status/mailbox registers differs slightly between the II and IIEX
-branches of the family.
-
-The host determines what type of board it is dealing with by reading a string of
-sixteen characters from the board. These characters are always placed in the
-fifo by the board's local processor whenever the board is reset (either from
-power-on or under software control) and are known as the "Power-on Reset
-Message." In order that this message can be read from either type of board, the
-hardware registers used in reading this message are the same. Once this message
-has been read by the host, then it has the information required to operate.
-
-General Differences between boards:
-
-The greatest structural difference is between the -II and -IIEX families of
-product. The -II boards use the Am4701 dual 512x8 bidirectional fifo to support
-the data path, mailbox registers, and status registers. This chip contains some
-features which are not used in the IntelliPort-II products; a description of
-these is omitted here. Because of these many features, it contains many
-registers, too many to access directly within a small address space. They are
-accessed by first writing a value to a "pointer" register. This value selects
-the register to be accessed. The next read or write to that address accesses
-the selected register rather than the pointer register.
-
-The -IIEX boards use a proprietary design similar to the Am4701 in function. But
-because of a simpler, more streamlined design it doesn't require so many
-registers. This means they can be accessed directly in single operations rather
-than through a pointer register.
-
-Besides these differences, there are differences in whether 8-bit or 16-bit
-transfers are used to move data to the board.
-
-The -II boards are capable only of 8-bit data transfers, while the -IIEX boards
-may be configured for either 8-bit or 16-bit data transfers. If the on-board DIP
-switch #8 is ON, and the card has been installed in a 16-bit slot, 16-bit
-transfers are supported (and will be expected by the standard loadware). The
-on-board firmware can determine the position of the switch, and whether the
-board is installed in a 16-bit slot; it supplies this information to the host as
-part of the power-up reset message.
-
-The configuration switch (#8) and slot selection do not directly configure the
-hardware. It is up to the on-board loadware and host-based drivers to act
-according to the selected options. That is, loadware and drivers could be
-written to perform 8-bit transfers regardless of the state of the DIP switch or
-slot (and in a diagnostic environment might well do so). Likewise, 16-bit
-transfers could be performed as long as the card is in a 16-bit slot.
-
-Note the slot selection and DIP switch selection are provided separately: a
-board running in 8-bit mode in a 16-bit slot has a greater range of possible
-interrupts to choose from; information of potential use to the host.
-
-All 8-bit data transfers are done in the same way, regardless of whether on a
--II board or a -IIEX board.
-
-The host must consider two things then: 1) whether a -II or -IIEX product is
-being used, and 2) whether an 8-bit or 16-bit data path is used.
-
-A further difference is that -II boards always have a 512-byte fifo operating in
-each direction. -IIEX boards may use fifos of varying size; this size is
-reported as part of the power-up message.
-
-I/O Map Of IntelliPort-II and IntelliPort-IIEX boards:
-(Relative to the chosen base address)
-
-Addr R/W IntelliPort-II IntelliPort-IIEX
----- --- -------------- ----------------
-0 R/W Data Port (byte) Data Port (byte or word)
-1 R/W (Not used) (MSB of word-wide data written to Data Port)
-2 R Status Register Status Register
-2 W Pointer Register Interrupt Mask Register
-3 R/W (Not used) Mailbox Registers (6 bits: 11111100)
-4,5 -- Reserved for future products
-6 -- Reserved for future products
-7 R Guaranteed to have no effect
-7 W Hardware reset of board.
-
-
-Rules:
-All data transfers are performed using the even i/o address. If byte-wide data
-transfers are being used, do INB/OUTB operations on the data port. If word-wide
-transfers are used, do INW/OUTW operations. In some circumstances (such as
-reading the power-up message) you will do INB from the data port, but in this
-case the MSB of each word read is lost. When accessing all other unreserved
-registers, use byte operations only.
-------------------------------------------------------------------------------*/
-
-//------------------------------------------------
-// Mandatory Includes:
-//------------------------------------------------
-//
-#include "ip2types.h"
-
-//-------------------------------------------------------------------------
-// Manifests for the I/O map:
-//-------------------------------------------------------------------------
-// R/W: Data port (byte) for IntelliPort-II,
-// R/W: Data port (byte or word) for IntelliPort-IIEX
-// Incoming or outgoing data passes through a FIFO, the status of which is
-// available in some of the bits in FIFO_STATUS. This (bidirectional) FIFO is
-// the primary means of transferring data, commands, flow-control, and status
-// information between the host and board.
-//
-#define FIFO_DATA 0
-
-// Another way of passing information between the board and the host is
-// through "mailboxes". Unlike a FIFO, a mailbox holds only a single byte of
-// data. Writing data to the mailbox causes a status bit to be set, and
-// potentially interrupting the intended receiver. The sender has some way to
-// determine whether the data has been read yet; as soon as it has, it may send
-// more. The mailboxes are handled differently on -II and -IIEX products, as
-// suggested below.
-//------------------------------------------------------------------------------
-// Read: Status Register for IntelliPort-II or -IIEX
-// The presence of any bit set here will cause an interrupt to the host,
-// provided the corresponding bit has been unmasked in the interrupt mask
-// register. Furthermore, interrupts to the host are disabled globally until the
-// loadware selects the irq line to use. With the exception of STN_MR, the bits
-// remain set so long as the associated condition is true.
-//
-#define FIFO_STATUS 2
-
-// Bit map of status bits which are identical for -II and -IIEX
-//
-#define ST_OUT_FULL 0x40 // Outbound FIFO full
-#define ST_IN_EMPTY 0x20 // Inbound FIFO empty
-#define ST_IN_MAIL 0x04 // Inbound Mailbox full
-
-// The following exists only on the Intelliport-IIEX, and indicates that the
-// board has not read the last outgoing mailbox data yet. In the IntelliPort-II,
-// the outgoing mailbox may be read back: a zero indicates the board has read
-// the data.
-//
-#define STE_OUT_MAIL 0x80 // Outbound mailbox full (!)
-
-// The following bits are defined differently for -II and -IIEX boards. Code
-// which relies on these bits will need to be functionally different for the two
-// types of boards and should be generally avoided because of the additional
-// complexity this creates:
-
-// Bit map of status bits only on -II
-
-// Fifo has been RESET (cleared when the status register is read). Note that
-// this condition cannot be masked and would always interrupt the host, except
-// that the hardware reset also disables interrupts globally from the board
-// until re-enabled by loadware. This could also arise from the
-// Am4701-supported command to reset the chip, but this command is generally not
-// used here.
-//
-#define STN_MR 0x80
-
-// See the AMD Am4701 data sheet for details on the following four bits. They
-// are not presently used by Computone drivers.
-//
-#define STN_OUT_AF 0x10 // Outbound FIFO almost full (programmable)
-#define STN_IN_AE 0x08 // Inbound FIFO almost empty (programmable)
-#define STN_BD 0x02 // Inbound byte detected
-#define STN_PE 0x01 // Parity/Framing condition detected
-
-// Bit-map of status bits only on -IIEX
-//
-#define STE_OUT_HF 0x10 // Outbound FIFO half full
-#define STE_IN_HF 0x08 // Inbound FIFO half full
-#define STE_IN_FULL 0x02 // Inbound FIFO full
-#define STE_OUT_MT 0x01 // Outbound FIFO empty
-
-//------------------------------------------------------------------------------
-
-// Intelliport-II -- Write Only: the pointer register.
-// Values are written to this register to select the Am4701 internal register to
-// be accessed on the next operation.
-//
-#define FIFO_PTR 0x02
-
-// Values for the pointer register
-//
-#define SEL_COMMAND 0x1 // Selects the Am4701 command register
-
-// Some possible commands:
-//
-#define SEL_CMD_MR 0x80 // Am4701 command to reset the chip
-#define SEL_CMD_SH 0x40 // Am4701 command to map the "other" port into the
- // status register.
-#define SEL_CMD_UNSH 0 // Am4701 command to "unshift": port maps into its
- // own status register.
-#define SEL_MASK 0x2 // Selects the Am4701 interrupt mask register. The
- // interrupt mask register is bit-mapped to match
- // the status register (FIFO_STATUS) except for
- // STN_MR. (See above.)
-#define SEL_BYTE_DET 0x3 // Selects the Am4701 byte-detect register. (Not
- // normally used except in diagnostics.)
-#define SEL_OUTMAIL 0x4 // Selects the outbound mailbox (R/W). Reading back
- // a value of zero indicates that the mailbox has
- // been read by the board and is available for more
- // data./ Writing to the mailbox optionally
- // interrupts the board, depending on the loadware's
- // setting of its interrupt mask register.
-#define SEL_AEAF 0x5 // Selects AE/AF threshold register.
-#define SEL_INMAIL 0x6 // Selects the inbound mailbox (Read)
-
-//------------------------------------------------------------------------------
-// IntelliPort-IIEX -- Write Only: interrupt mask (and misc flags) register:
-// Unlike IntelliPort-II, bit assignments do NOT match those of the status
-// register.
-//
-#define FIFO_MASK 0x2
-
-// Mailbox readback select:
-// If set, reads to FIFO_MAIL will read the OUTBOUND mailbox (host to board). If
-// clear (default on reset) reads to FIFO_MAIL will read the INBOUND mailbox.
-// This is the normal situation. The clearing of a mailbox is determined on
-// -IIEX boards by waiting for the STE_OUT_MAIL bit to clear. Readback
-// capability is provided for diagnostic purposes only.
-//
-#define MX_OUTMAIL_RSEL 0x80
-
-#define MX_IN_MAIL 0x40 // Enables interrupts when incoming mailbox goes
- // full (ST_IN_MAIL set).
-#define MX_IN_FULL 0x20 // Enables interrupts when incoming FIFO goes full
- // (STE_IN_FULL).
-#define MX_IN_MT 0x08 // Enables interrupts when incoming FIFO goes empty
- // (ST_IN_MT).
-#define MX_OUT_FULL 0x04 // Enables interrupts when outgoing FIFO goes full
- // (ST_OUT_FULL).
-#define MX_OUT_MT 0x01 // Enables interrupts when outgoing FIFO goes empty
- // (STE_OUT_MT).
-
-// Any remaining bits are reserved, and should be written to ZERO for
-// compatibility with future Computone products.
-
-//------------------------------------------------------------------------------
-// IntelliPort-IIEX: -- These are only 6-bit mailboxes !!! -- 11111100 (low two
-// bits always read back 0).
-// Read: One of the mailboxes, usually Inbound.
-// Inbound Mailbox (MX_OUTMAIL_RSEL = 0)
-// Outbound Mailbox (MX_OUTMAIL_RSEL = 1)
-// Write: Outbound Mailbox
-// For the IntelliPort-II boards, the outbound mailbox is read back to determine
-// whether the board has read the data (0 --> data has been read). For the
-// IntelliPort-IIEX, this is done by reading a status register. To determine
-// whether mailbox is available for more outbound data, use the STE_OUT_MAIL bit
-// in FIFO_STATUS. Moreover, although the Outbound Mailbox can be read back by
-// setting MX_OUTMAIL_RSEL, it is NOT cleared when the board reads it, as is the
-// case with the -II boards. For this reason, FIFO_MAIL is normally used to read
-// the inbound FIFO, and MX_OUTMAIL_RSEL kept clear. (See above for
-// MX_OUTMAIL_RSEL description.)
-//
-#define FIFO_MAIL 0x3
-
-//------------------------------------------------------------------------------
-// WRITE ONLY: Resets the board. (Data doesn't matter).
-//
-#define FIFO_RESET 0x7
-
-//------------------------------------------------------------------------------
-// READ ONLY: Will have no effect. (Data is undefined.)
-// Actually, there will be an effect, in that the operation is sure to generate
-// a bus cycle: viz., an I/O byte Read. This fact can be used to enforce short
-// delays when no comparable time constant is available.
-//
-#define FIFO_NOP 0x7
-
-//------------------------------------------------------------------------------
-// RESET & POWER-ON RESET MESSAGE
-/*------------------------------------------------------------------------------
-RESET:
-
-The IntelliPort-II and -IIEX boards are reset in three ways: Power-up, channel
-reset, and via a write to the reset register described above. For products using
-the ISA bus, these three sources of reset are equvalent. For MCA and EISA buses,
-the Power-up and channel reset sources cause additional hardware initialization
-which should only occur at system startup time.
-
-The third type of reset, called a "command reset", is done by writing any data
-to the FIFO_RESET address described above. This resets the on-board processor,
-FIFO, UARTS, and associated hardware.
-
-This passes control of the board to the bootstrap firmware, which performs a
-Power-On Self Test and which detects its current configuration. For example,
--IIEX products determine the size of FIFO which has been installed, and the
-number and type of expansion boxes attached.
-
-This and other information is then written to the FIFO in a 16-byte data block
-to be read by the host. This block is guaranteed to be present within two (2)
-seconds of having received the command reset. The firmware is now ready to
-receive loadware from the host.
-
-It is good practice to perform a command reset to the board explicitly as part
-of your software initialization. This allows your code to properly restart from
-a soft boot. (Many systems do not issue channel reset on soft boot).
-
-Because of a hardware reset problem on some of the Cirrus Logic 1400's which are
-used on the product, it is recommended that you reset the board twice, separated
-by an approximately 50 milliseconds delay. (VERY approximately: probably ok to
-be off by a factor of five. The important point is that the first command reset
-in fact generates a reset pulse on the board. This pulse is guaranteed to last
-less than 10 milliseconds. The additional delay ensures the 1400 has had the
-chance to respond sufficiently to the first reset. Why not a longer delay? Much
-more than 50 milliseconds gets to be noticeable, but the board would still work.
-
-Once all 16 bytes of the Power-on Reset Message have been read, the bootstrap
-firmware is ready to receive loadware.
-
-Note on Power-on Reset Message format:
-The various fields have been designed with future expansion in view.
-Combinations of bitfields and values have been defined which define products
-which may not currently exist. This has been done to allow drivers to anticipate
-the possible introduction of products in a systematic fashion. This is not
-intended to suggest that each potential product is actually under consideration.
-------------------------------------------------------------------------------*/
-
-//----------------------------------------
-// Format of Power-on Reset Message
-//----------------------------------------
-
-typedef union _porStr // "por" stands for Power On Reset
-{
- unsigned char c[16]; // array used when considering the message as a
- // string of undifferentiated characters
-
- struct // Elements used when considering values
- {
- // The first two bytes out of the FIFO are two magic numbers. These are
- // intended to establish that there is indeed a member of the
- // IntelliPort-II(EX) family present. The remaining bytes may be
- // expected // to be valid. When reading the Power-on Reset message,
- // if the magic numbers do not match it is probably best to stop
- // reading immediately. You are certainly not reading our board (unless
- // hardware is faulty), and may in fact be reading some other piece of
- // hardware.
-
- unsigned char porMagic1; // magic number: first byte == POR_MAGIC_1
- unsigned char porMagic2; // magic number: second byte == POR_MAGIC_2
-
- // The Version, Revision, and Subrevision are stored as absolute numbers
- // and would normally be displayed in the format V.R.S (e.g. 1.0.2)
-
- unsigned char porVersion; // Bootstrap firmware version number
- unsigned char porRevision; // Bootstrap firmware revision number
- unsigned char porSubRev; // Bootstrap firmware sub-revision number
-
- unsigned char porID; // Product ID: Bit-mapped according to
- // conventions described below. Among other
- // things, this allows us to distinguish
- // IntelliPort-II boards from IntelliPort-IIEX
- // boards.
-
- unsigned char porBus; // IntelliPort-II: Unused
- // IntelliPort-IIEX: Bus Information:
- // Bit-mapped below
-
- unsigned char porMemory; // On-board DRAM size: in 32k blocks
-
- // porPorts1 (and porPorts2) are used to determine the ports which are
- // available to the board. For non-expandable product, a single number
- // is sufficient. For expandable product, the board may be connected
- // to as many as four boxes. Each box may be (so far) either a 16-port
- // or an 8-port size. Whenever an 8-port box is used, the remaining 8
- // ports leave gaps between existing channels. For that reason,
- // expandable products must report a MAP of available channels. Since
- // each UART supports four ports, we represent each UART found by a
- // single bit. Using two bytes to supply the mapping information we
- // report the presence or absence of up to 16 UARTS, or 64 ports in
- // steps of 4 ports. For -IIEX products, the ports are numbered
- // starting at the box closest to the controller in the "chain".
-
- // Interpreted Differently for IntelliPort-II and -IIEX.
- // -II: Number of ports (Derived actually from product ID). See
- // Diag1&2 to indicate if uart was actually detected.
- // -IIEX: Bit-map of UARTS found, LSB (see below for MSB of this). This
- // bitmap is based on detecting the uarts themselves;
- // see porFlags for information from the box i.d's.
- unsigned char porPorts1;
-
- unsigned char porDiag1; // Results of on-board P.O.S.T, 1st byte
- unsigned char porDiag2; // Results of on-board P.O.S.T, 2nd byte
- unsigned char porSpeed; // Speed of local CPU: given as MHz x10
- // e.g., 16.0 MHz CPU is reported as 160
- unsigned char porFlags; // Misc information (see manifests below)
- // Bit-mapped: CPU type, UART's present
-
- unsigned char porPorts2; // -II: Undefined
- // -IIEX: Bit-map of UARTS found, MSB (see
- // above for LSB)
-
- // IntelliPort-II: undefined
- // IntelliPort-IIEX: 1 << porFifoSize gives the size, in bytes, of the
- // host interface FIFO, in each direction. When running the -IIEX in
- // 8-bit mode, fifo capacity is halved. The bootstrap firmware will
- // have already accounted for this fact in generating this number.
- unsigned char porFifoSize;
-
- // IntelliPort-II: undefined
- // IntelliPort-IIEX: The number of boxes connected. (Presently 1-4)
- unsigned char porNumBoxes;
- } e;
-} porStr, *porStrPtr;
-
-//--------------------------
-// Values for porStr fields
-//--------------------------
-
-//---------------------
-// porMagic1, porMagic2
-//----------------------
-//
-#define POR_MAGIC_1 0x96 // The only valid value for porMagic1
-#define POR_MAGIC_2 0x35 // The only valid value for porMagic2
-#define POR_1_INDEX 0 // Byte position of POR_MAGIC_1
-#define POR_2_INDEX 1 // Ditto for POR_MAGIC_2
-
-//----------------------
-// porID
-//----------------------
-//
-#define POR_ID_FAMILY 0xc0 // These bits indicate the general family of
- // product.
-#define POR_ID_FII 0x00 // Family is "IntelliPort-II"
-#define POR_ID_FIIEX 0x40 // Family is "IntelliPort-IIEX"
-
-// These bits are reserved, presently zero. May be used at a later date to
-// convey other product information.
-//
-#define POR_ID_RESERVED 0x3c
-
-#define POR_ID_SIZE 0x03 // Remaining bits indicate number of ports &
- // Connector information.
-#define POR_ID_II_8 0x00 // For IntelliPort-II, indicates 8-port using
- // standard brick.
-#define POR_ID_II_8R 0x01 // For IntelliPort-II, indicates 8-port using
- // RJ11's (no CTS)
-#define POR_ID_II_6 0x02 // For IntelliPort-II, indicates 6-port using
- // RJ45's
-#define POR_ID_II_4 0x03 // For IntelliPort-II, indicates 4-port using
- // 4xRJ45 connectors
-#define POR_ID_EX 0x00 // For IntelliPort-IIEX, indicates standard
- // expandable controller (other values reserved)
-
-//----------------------
-// porBus
-//----------------------
-
-// IntelliPort-IIEX only: Board is installed in a 16-bit slot
-//
-#define POR_BUS_SLOT16 0x20
-
-// IntelliPort-IIEX only: DIP switch #8 is on, selecting 16-bit host interface
-// operation.
-//
-#define POR_BUS_DIP16 0x10
-
-// Bits 0-2 indicate type of bus: This information is stored in the bootstrap
-// loadware, different loadware being used on different products for different
-// buses. For most situations, the drivers do not need this information; but it
-// is handy in a diagnostic environment. For example, on microchannel boards,
-// you would not want to try to test several interrupts, only the one for which
-// you were configured.
-//
-#define POR_BUS_TYPE 0x07
-
-// Unknown: this product doesn't know what bus it is running in. (e.g. if same
-// bootstrap firmware were wanted for two different buses.)
-//
-#define POR_BUS_T_UNK 0
-
-// Note: existing firmware for ISA-8 and MC-8 currently report the POR_BUS_T_UNK
-// state, since the same bootstrap firmware is used for each.
-
-#define POR_BUS_T_MCA 1 // MCA BUS */
-#define POR_BUS_T_EISA 2 // EISA BUS */
-#define POR_BUS_T_ISA 3 // ISA BUS */
-
-// Values 4-7 Reserved
-
-// Remaining bits are reserved
-
-//----------------------
-// porDiag1
-//----------------------
-
-#define POR_BAD_MAPPER 0x80 // HW failure on P.O.S.T: Chip mapper failed
-
-// These two bits valid only for the IntelliPort-II
-//
-#define POR_BAD_UART1 0x01 // First 1400 bad
-#define POR_BAD_UART2 0x02 // Second 1400 bad
-
-//----------------------
-// porDiag2
-//----------------------
-
-#define POR_DEBUG_PORT 0x80 // debug port was detected by the P.O.S.T
-#define POR_DIAG_OK 0x00 // Indicates passage: Failure codes not yet
- // available.
- // Other bits undefined.
-//----------------------
-// porFlags
-//----------------------
-
-#define POR_CPU 0x03 // These bits indicate supposed CPU type
-#define POR_CPU_8 0x01 // Board uses an 80188 (no such thing yet)
-#define POR_CPU_6 0x02 // Board uses an 80186 (all existing products)
-#define POR_CEX4 0x04 // If set, this is an ISA-CEX/4: An ISA-4 (asic)
- // which is architected like an ISA-CEX connected
- // to a (hitherto impossible) 4-port box.
-#define POR_BOXES 0xf0 // Valid for IntelliPort-IIEX only: Map of Box
- // sizes based on box I.D.
-#define POR_BOX_16 0x10 // Set indicates 16-port, clear 8-port
-
-//-------------------------------------
-// LOADWARE and DOWNLOADING CODE
-//-------------------------------------
-
-/*
-Loadware may be sent to the board in two ways:
-1) It may be read from a (binary image) data file block by block as each block
- is sent to the board. This is only possible when the initialization is
- performed by code which can access your file system. This is most suitable
- for diagnostics and appications which use the interface library directly.
-
-2) It may be hard-coded into your source by including a .h file (typically
- supplied by Computone), which declares a data array and initializes every
- element. This achieves the same result as if an entire loadware file had
- been read into the array.
-
- This requires more data space in your program, but access to the file system
- is not required. This method is more suited to driver code, which typically
- is running at a level too low to access the file system directly.
-
-At present, loadware can only be generated at Computone.
-
-All Loadware begins with a header area which has a particular format. This
-includes a magic number which identifies the file as being (purportedly)
-loadware, CRC (for the loader), and version information.
-*/
-
-
-//-----------------------------------------------------------------------------
-// Format of loadware block
-//
-// This is defined as a union so we can pass a pointer to one of these items
-// and (if it is the first block) pick out the version information, etc.
-//
-// Otherwise, to deal with this as a simple character array
-//------------------------------------------------------------------------------
-
-#define LOADWARE_BLOCK_SIZE 512 // Number of bytes in each block of loadware
-
-typedef union _loadHdrStr
-{
- unsigned char c[LOADWARE_BLOCK_SIZE]; // Valid for every block
-
- struct // These fields are valid for only the first block of loadware.
- {
- unsigned char loadMagic; // Magic number: see below
- unsigned char loadBlocksMore; // How many more blocks?
- unsigned char loadCRC[2]; // Two CRC bytes: used by loader
- unsigned char loadVersion; // Version number
- unsigned char loadRevision; // Revision number
- unsigned char loadSubRevision; // Sub-revision number
- unsigned char loadSpares[9]; // Presently unused
- unsigned char loadDates[32]; // Null-terminated string which can give
- // date and time of compilation
- } e;
-} loadHdrStr, *loadHdrStrPtr;
-
-//------------------------------------
-// Defines for downloading code:
-//------------------------------------
-
-// The loadMagic field in the first block of the loadfile must be this, else the
-// file is not valid.
-//
-#define MAGIC_LOADFILE 0x3c
-
-// How do we know the load was successful? On completion of the load, the
-// bootstrap firmware returns a code to indicate whether it thought the download
-// was valid and intends to execute it. These are the only possible valid codes:
-//
-#define LOADWARE_OK 0xc3 // Download was ok
-#define LOADWARE_BAD 0x5a // Download was bad (CRC error)
-
-// Constants applicable to writing blocks of loadware:
-// The first block of loadware might take 600 mS to load, in extreme cases.
-// (Expandable board: worst case for sending startup messages to the LCD's).
-// The 600mS figure is not really a calculation, but a conservative
-// guess/guarantee. Usually this will be within 100 mS, like subsequent blocks.
-//
-#define MAX_DLOAD_START_TIME 1000 // 1000 mS
-#define MAX_DLOAD_READ_TIME 100 // 100 mS
-
-// Firmware should respond with status (see above) within this long of host
-// having sent the final block.
-//
-#define MAX_DLOAD_ACK_TIME 100 // 100 mS, again!
-
-//------------------------------------------------------
-// MAXIMUM NUMBER OF PORTS PER BOARD:
-// This is fixed for now (with the expandable), but may
-// be expanding according to even newer products.
-//------------------------------------------------------
-//
-#define ABS_MAX_BOXES 4 // Absolute most boxes per board
-#define ABS_BIGGEST_BOX 16 // Absolute the most ports per box
-#define ABS_MOST_PORTS (ABS_MAX_BOXES * ABS_BIGGEST_BOX)
-
-#define I2_OUTSW(port, addr, count) outsw((port), (addr), (((count)+1)/2))
-#define I2_OUTSB(port, addr, count) outsb((port), (addr), (((count)+1))&-2)
-#define I2_INSW(port, addr, count) insw((port), (addr), (((count)+1)/2))
-#define I2_INSB(port, addr, count) insb((port), (addr), (((count)+1))&-2)
-
-#endif // I2HW_H
-
diff --git a/drivers/staging/tty/ip2/i2lib.c b/drivers/staging/tty/ip2/i2lib.c
deleted file mode 100644
index 13a3caba85f..00000000000
--- a/drivers/staging/tty/ip2/i2lib.c
+++ /dev/null
@@ -1,2214 +0,0 @@
-/*******************************************************************************
-*
-* (c) 1999 by Computone Corporation
-*
-********************************************************************************
-*
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: High-level interface code for the device driver. Uses the
-* Extremely Low Level Interface Support (i2ellis.c). Provides an
-* interface to the standard loadware, to support drivers or
-* application code. (This is included source code, not a separate
-* compilation module.)
-*
-*******************************************************************************/
-//------------------------------------------------------------------------------
-// Note on Strategy:
-// Once the board has been initialized, it will interrupt us when:
-// 1) It has something in the fifo for us to read (incoming data, flow control
-// packets, or whatever).
-// 2) It has stripped whatever we have sent last time in the FIFO (and
-// consequently is ready for more).
-//
-// Note also that the buffer sizes declared in i2lib.h are VERY SMALL. This
-// worsens performance considerably, but is done so that a great many channels
-// might use only a little memory.
-//------------------------------------------------------------------------------
-
-//------------------------------------------------------------------------------
-// Revision History:
-//
-// 0.00 - 4/16/91 --- First Draft
-// 0.01 - 4/29/91 --- 1st beta release
-// 0.02 - 6/14/91 --- Changes to allow small model compilation
-// 0.03 - 6/17/91 MAG Break reporting protected from interrupts routines with
-// in-line asm added for moving data to/from ring buffers,
-// replacing a variety of methods used previously.
-// 0.04 - 6/21/91 MAG Initial flow-control packets not queued until
-// i2_enable_interrupts time. Former versions would enqueue
-// them at i2_init_channel time, before we knew how many
-// channels were supposed to exist!
-// 0.05 - 10/12/91 MAG Major changes: works through the ellis.c routines now;
-// supports new 16-bit protocol and expandable boards.
-// - 10/24/91 MAG Most changes in place and stable.
-// 0.06 - 2/20/92 MAG Format of CMD_HOTACK corrected: the command takes no
-// argument.
-// 0.07 -- 3/11/92 MAG Support added to store special packet types at interrupt
-// level (mostly responses to specific commands.)
-// 0.08 -- 3/30/92 MAG Support added for STAT_MODEM packet
-// 0.09 -- 6/24/93 MAG i2Link... needed to update number of boards BEFORE
-// turning on the interrupt.
-// 0.10 -- 6/25/93 MAG To avoid gruesome death from a bad board, we sanity check
-// some incoming.
-//
-// 1.1 - 12/25/96 AKM Linux version.
-// - 10/09/98 DMC Revised Linux version.
-//------------------------------------------------------------------------------
-
-//************
-//* Includes *
-//************
-
-#include <linux/sched.h>
-#include "i2lib.h"
-
-
-//***********************
-//* Function Prototypes *
-//***********************
-static void i2QueueNeeds(i2eBordStrPtr, i2ChanStrPtr, int);
-static i2ChanStrPtr i2DeQueueNeeds(i2eBordStrPtr, int );
-static void i2StripFifo(i2eBordStrPtr);
-static void i2StuffFifoBypass(i2eBordStrPtr);
-static void i2StuffFifoFlow(i2eBordStrPtr);
-static void i2StuffFifoInline(i2eBordStrPtr);
-static int i2RetryFlushOutput(i2ChanStrPtr);
-
-// Not a documented part of the library routines (careful...) but the Diagnostic
-// i2diag.c finds them useful to help the throughput in certain limited
-// single-threaded operations.
-static void iiSendPendingMail(i2eBordStrPtr);
-static void serviceOutgoingFifo(i2eBordStrPtr);
-
-// Functions defined in ip2.c as part of interrupt handling
-static void do_input(struct work_struct *);
-static void do_status(struct work_struct *);
-
-//***************
-//* Debug Data *
-//***************
-#ifdef DEBUG_FIFO
-
-unsigned char DBGBuf[0x4000];
-unsigned short I = 0;
-
-static void
-WriteDBGBuf(char *s, unsigned char *src, unsigned short n )
-{
- char *p = src;
-
- // XXX: We need a spin lock here if we ever use this again
-
- while (*s) { // copy label
- DBGBuf[I] = *s++;
- I = I++ & 0x3fff;
- }
- while (n--) { // copy data
- DBGBuf[I] = *p++;
- I = I++ & 0x3fff;
- }
-}
-
-static void
-fatality(i2eBordStrPtr pB )
-{
- int i;
-
- for (i=0;i<sizeof(DBGBuf);i++) {
- if ((i%16) == 0)
- printk("\n%4x:",i);
- printk("%02x ",DBGBuf[i]);
- }
- printk("\n");
- for (i=0;i<sizeof(DBGBuf);i++) {
- if ((i%16) == 0)
- printk("\n%4x:",i);
- if (DBGBuf[i] >= ' ' && DBGBuf[i] <= '~') {
- printk(" %c ",DBGBuf[i]);
- } else {
- printk(" . ");
- }
- }
- printk("\n");
- printk("Last index %x\n",I);
-}
-#endif /* DEBUG_FIFO */
-
-//********
-//* Code *
-//********
-
-static inline int
-i2Validate ( i2ChanStrPtr pCh )
-{
- //ip2trace(pCh->port_index, ITRC_VERIFY,ITRC_ENTER,2,pCh->validity,
- // (CHANNEL_MAGIC | CHANNEL_SUPPORT));
- return ((pCh->validity & (CHANNEL_MAGIC_BITS | CHANNEL_SUPPORT))
- == (CHANNEL_MAGIC | CHANNEL_SUPPORT));
-}
-
-static void iiSendPendingMail_t(unsigned long data)
-{
- i2eBordStrPtr pB = (i2eBordStrPtr)data;
-
- iiSendPendingMail(pB);
-}
-
-//******************************************************************************
-// Function: iiSendPendingMail(pB)
-// Parameters: Pointer to a board structure
-// Returns: Nothing
-//
-// Description:
-// If any outgoing mail bits are set and there is outgoing mailbox is empty,
-// send the mail and clear the bits.
-//******************************************************************************
-static void
-iiSendPendingMail(i2eBordStrPtr pB)
-{
- if (pB->i2eOutMailWaiting && (!pB->i2eWaitingForEmptyFifo) )
- {
- if (iiTrySendMail(pB, pB->i2eOutMailWaiting))
- {
- /* If we were already waiting for fifo to empty,
- * or just sent MB_OUT_STUFFED, then we are
- * still waiting for it to empty, until we should
- * receive an MB_IN_STRIPPED from the board.
- */
- pB->i2eWaitingForEmptyFifo |=
- (pB->i2eOutMailWaiting & MB_OUT_STUFFED);
- pB->i2eOutMailWaiting = 0;
- pB->SendPendingRetry = 0;
- } else {
-/* The only time we hit this area is when "iiTrySendMail" has
- failed. That only occurs when the outbound mailbox is
- still busy with the last message. We take a short breather
- to let the board catch up with itself and then try again.
- 16 Retries is the limit - then we got a borked board.
- /\/\|=mhw=|\/\/ */
-
- if( ++pB->SendPendingRetry < 16 ) {
- setup_timer(&pB->SendPendingTimer,
- iiSendPendingMail_t, (unsigned long)pB);
- mod_timer(&pB->SendPendingTimer, jiffies + 1);
- } else {
- printk( KERN_ERR "IP2: iiSendPendingMail unable to queue outbound mail\n" );
- }
- }
- }
-}
-
-//******************************************************************************
-// Function: i2InitChannels(pB, nChannels, pCh)
-// Parameters: Pointer to Ellis Board structure
-// Number of channels to initialize
-// Pointer to first element in an array of channel structures
-// Returns: Success or failure
-//
-// Description:
-//
-// This function patches pointers, back-pointers, and initializes all the
-// elements in the channel structure array.
-//
-// This should be run after the board structure is initialized, through having
-// loaded the standard loadware (otherwise it complains).
-//
-// In any case, it must be done before any serious work begins initializing the
-// irq's or sending commands...
-//
-//******************************************************************************
-static int
-i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
-{
- int index, stuffIndex;
- i2ChanStrPtr *ppCh;
-
- if (pB->i2eValid != I2E_MAGIC) {
- I2_COMPLETE(pB, I2EE_BADMAGIC);
- }
- if (pB->i2eState != II_STATE_STDLOADED) {
- I2_COMPLETE(pB, I2EE_BADSTATE);
- }
-
- rwlock_init(&pB->read_fifo_spinlock);
- rwlock_init(&pB->write_fifo_spinlock);
- rwlock_init(&pB->Dbuf_spinlock);
- rwlock_init(&pB->Bbuf_spinlock);
- rwlock_init(&pB->Fbuf_spinlock);
-
- // NO LOCK needed yet - this is init
-
- pB->i2eChannelPtr = pCh;
- pB->i2eChannelCnt = nChannels;
-
- pB->i2Fbuf_strip = pB->i2Fbuf_stuff = 0;
- pB->i2Dbuf_strip = pB->i2Dbuf_stuff = 0;
- pB->i2Bbuf_strip = pB->i2Bbuf_stuff = 0;
-
- pB->SendPendingRetry = 0;
-
- memset ( pCh, 0, sizeof (i2ChanStr) * nChannels );
-
- for (index = stuffIndex = 0, ppCh = (i2ChanStrPtr *)(pB->i2Fbuf);
- nChannels && index < ABS_MOST_PORTS;
- index++)
- {
- if ( !(pB->i2eChannelMap[index >> 4] & (1 << (index & 0xf)) ) ) {
- continue;
- }
- rwlock_init(&pCh->Ibuf_spinlock);
- rwlock_init(&pCh->Obuf_spinlock);
- rwlock_init(&pCh->Cbuf_spinlock);
- rwlock_init(&pCh->Pbuf_spinlock);
- // NO LOCK needed yet - this is init
- // Set up validity flag according to support level
- if (pB->i2eGoodMap[index >> 4] & (1 << (index & 0xf)) ) {
- pCh->validity = CHANNEL_MAGIC | CHANNEL_SUPPORT;
- } else {
- pCh->validity = CHANNEL_MAGIC;
- }
- pCh->pMyBord = pB; /* Back-pointer */
-
- // Prepare an outgoing flow-control packet to send as soon as the chance
- // occurs.
- if ( pCh->validity & CHANNEL_SUPPORT ) {
- pCh->infl.hd.i2sChannel = index;
- pCh->infl.hd.i2sCount = 5;
- pCh->infl.hd.i2sType = PTYPE_BYPASS;
- pCh->infl.fcmd = 37;
- pCh->infl.asof = 0;
- pCh->infl.room = IBUF_SIZE - 1;
-
- pCh->whenSendFlow = (IBUF_SIZE/5)*4; // when 80% full
-
- // The following is similar to calling i2QueueNeeds, except that this
- // is done in longhand, since we are setting up initial conditions on
- // many channels at once.
- pCh->channelNeeds = NEED_FLOW; // Since starting from scratch
- pCh->sinceLastFlow = 0; // No bytes received since last flow
- // control packet was queued
- stuffIndex++;
- *ppCh++ = pCh; // List this channel as needing
- // initial flow control packet sent
- }
-
- // Don't allow anything to be sent until the status packets come in from
- // the board.
-
- pCh->outfl.asof = 0;
- pCh->outfl.room = 0;
-
- // Initialize all the ring buffers
-
- pCh->Ibuf_stuff = pCh->Ibuf_strip = 0;
- pCh->Obuf_stuff = pCh->Obuf_strip = 0;
- pCh->Cbuf_stuff = pCh->Cbuf_strip = 0;
-
- memset( &pCh->icount, 0, sizeof (struct async_icount) );
- pCh->hotKeyIn = HOT_CLEAR;
- pCh->channelOptions = 0;
- pCh->bookMarks = 0;
- init_waitqueue_head(&pCh->pBookmarkWait);
-
- init_waitqueue_head(&pCh->open_wait);
- init_waitqueue_head(&pCh->close_wait);
- init_waitqueue_head(&pCh->delta_msr_wait);
-
- // Set base and divisor so default custom rate is 9600
- pCh->BaudBase = 921600; // MAX for ST654, changed after we get
- pCh->BaudDivisor = 96; // the boxids (UART types) later
-
- pCh->dataSetIn = 0;
- pCh->dataSetOut = 0;
-
- pCh->wopen = 0;
- pCh->throttled = 0;
-
- pCh->speed = CBR_9600;
-
- pCh->flags = 0;
-
- pCh->ClosingDelay = 5*HZ/10;
- pCh->ClosingWaitTime = 30*HZ;
-
- // Initialize task queue objects
- INIT_WORK(&pCh->tqueue_input, do_input);
- INIT_WORK(&pCh->tqueue_status, do_status);
-
-#ifdef IP2DEBUG_TRACE
- pCh->trace = ip2trace;
-#endif
-
- ++pCh;
- --nChannels;
- }
- // No need to check for wrap here; this is initialization.
- pB->i2Fbuf_stuff = stuffIndex;
- I2_COMPLETE(pB, I2EE_GOOD);
-
-}
-
-//******************************************************************************
-// Function: i2DeQueueNeeds(pB, type)
-// Parameters: Pointer to a board structure
-// type bit map: may include NEED_INLINE, NEED_BYPASS, or NEED_FLOW
-// Returns:
-// Pointer to a channel structure
-//
-// Description: Returns pointer struct of next channel that needs service of
-// the type specified. Otherwise returns a NULL reference.
-//
-//******************************************************************************
-static i2ChanStrPtr
-i2DeQueueNeeds(i2eBordStrPtr pB, int type)
-{
- unsigned short queueIndex;
- unsigned long flags;
-
- i2ChanStrPtr pCh = NULL;
-
- switch(type) {
-
- case NEED_INLINE:
-
- write_lock_irqsave(&pB->Dbuf_spinlock, flags);
- if ( pB->i2Dbuf_stuff != pB->i2Dbuf_strip)
- {
- queueIndex = pB->i2Dbuf_strip;
- pCh = pB->i2Dbuf[queueIndex];
- queueIndex++;
- if (queueIndex >= CH_QUEUE_SIZE) {
- queueIndex = 0;
- }
- pB->i2Dbuf_strip = queueIndex;
- pCh->channelNeeds &= ~NEED_INLINE;
- }
- write_unlock_irqrestore(&pB->Dbuf_spinlock, flags);
- break;
-
- case NEED_BYPASS:
-
- write_lock_irqsave(&pB->Bbuf_spinlock, flags);
- if (pB->i2Bbuf_stuff != pB->i2Bbuf_strip)
- {
- queueIndex = pB->i2Bbuf_strip;
- pCh = pB->i2Bbuf[queueIndex];
- queueIndex++;
- if (queueIndex >= CH_QUEUE_SIZE) {
- queueIndex = 0;
- }
- pB->i2Bbuf_strip = queueIndex;
- pCh->channelNeeds &= ~NEED_BYPASS;
- }
- write_unlock_irqrestore(&pB->Bbuf_spinlock, flags);
- break;
-
- case NEED_FLOW:
-
- write_lock_irqsave(&pB->Fbuf_spinlock, flags);
- if (pB->i2Fbuf_stuff != pB->i2Fbuf_strip)
- {
- queueIndex = pB->i2Fbuf_strip;
- pCh = pB->i2Fbuf[queueIndex];
- queueIndex++;
- if (queueIndex >= CH_QUEUE_SIZE) {
- queueIndex = 0;
- }
- pB->i2Fbuf_strip = queueIndex;
- pCh->channelNeeds &= ~NEED_FLOW;
- }
- write_unlock_irqrestore(&pB->Fbuf_spinlock, flags);
- break;
- default:
- printk(KERN_ERR "i2DeQueueNeeds called with bad type:%x\n",type);
- break;
- }
- return pCh;
-}
-
-//******************************************************************************
-// Function: i2QueueNeeds(pB, pCh, type)
-// Parameters: Pointer to a board structure
-// Pointer to a channel structure
-// type bit map: may include NEED_INLINE, NEED_BYPASS, or NEED_FLOW
-// Returns: Nothing
-//
-// Description:
-// For each type of need selected, if the given channel is not already in the
-// queue, adds it, and sets the flag indicating it is in the queue.
-//******************************************************************************
-static void
-i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type)
-{
- unsigned short queueIndex;
- unsigned long flags;
-
- // We turn off all the interrupts during this brief process, since the
- // interrupt-level code might want to put things on the queue as well.
-
- switch (type) {
-
- case NEED_INLINE:
-
- write_lock_irqsave(&pB->Dbuf_spinlock, flags);
- if ( !(pCh->channelNeeds & NEED_INLINE) )
- {
- pCh->channelNeeds |= NEED_INLINE;
- queueIndex = pB->i2Dbuf_stuff;
- pB->i2Dbuf[queueIndex++] = pCh;
- if (queueIndex >= CH_QUEUE_SIZE)
- queueIndex = 0;
- pB->i2Dbuf_stuff = queueIndex;
- }
- write_unlock_irqrestore(&pB->Dbuf_spinlock, flags);
- break;
-
- case NEED_BYPASS:
-
- write_lock_irqsave(&pB->Bbuf_spinlock, flags);
- if ((type & NEED_BYPASS) && !(pCh->channelNeeds & NEED_BYPASS))
- {
- pCh->channelNeeds |= NEED_BYPASS;
- queueIndex = pB->i2Bbuf_stuff;
- pB->i2Bbuf[queueIndex++] = pCh;
- if (queueIndex >= CH_QUEUE_SIZE)
- queueIndex = 0;
- pB->i2Bbuf_stuff = queueIndex;
- }
- write_unlock_irqrestore(&pB->Bbuf_spinlock, flags);
- break;
-
- case NEED_FLOW:
-
- write_lock_irqsave(&pB->Fbuf_spinlock, flags);
- if ((type & NEED_FLOW) && !(pCh->channelNeeds & NEED_FLOW))
- {
- pCh->channelNeeds |= NEED_FLOW;
- queueIndex = pB->i2Fbuf_stuff;
- pB->i2Fbuf[queueIndex++] = pCh;
- if (queueIndex >= CH_QUEUE_SIZE)
- queueIndex = 0;
- pB->i2Fbuf_stuff = queueIndex;
- }
- write_unlock_irqrestore(&pB->Fbuf_spinlock, flags);
- break;
-
- case NEED_CREDIT:
- pCh->channelNeeds |= NEED_CREDIT;
- break;
- default:
- printk(KERN_ERR "i2QueueNeeds called with bad type:%x\n",type);
- break;
- }
- return;
-}
-
-//******************************************************************************
-// Function: i2QueueCommands(type, pCh, timeout, nCommands, pCs,...)
-// Parameters: type - PTYPE_BYPASS or PTYPE_INLINE
-// pointer to the channel structure
-// maximum period to wait
-// number of commands (n)
-// n commands
-// Returns: Number of commands sent, or -1 for error
-//
-// get board lock before calling
-//
-// Description:
-// Queues up some commands to be sent to a channel. To send possibly several
-// bypass or inline commands to the given channel. The timeout parameter
-// indicates how many HUNDREDTHS OF SECONDS to wait until there is room:
-// 0 = return immediately if no room, -ive = wait forever, +ive = number of
-// 1/100 seconds to wait. Return values:
-// -1 Some kind of nasty error: bad channel structure or invalid arguments.
-// 0 No room to send all the commands
-// (+) Number of commands sent
-//******************************************************************************
-static int
-i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
- cmdSyntaxPtr pCs0,...)
-{
- int totalsize = 0;
- int blocksize;
- int lastended;
- cmdSyntaxPtr *ppCs;
- cmdSyntaxPtr pCs;
- int count;
- int flag;
- i2eBordStrPtr pB;
-
- unsigned short maxBlock;
- unsigned short maxBuff;
- short bufroom;
- unsigned short stuffIndex;
- unsigned char *pBuf;
- unsigned char *pInsert;
- unsigned char *pDest, *pSource;
- unsigned short channel;
- int cnt;
- unsigned long flags = 0;
- rwlock_t *lock_var_p = NULL;
-
- // Make sure the channel exists, otherwise do nothing
- if ( !i2Validate ( pCh ) ) {
- return -1;
- }
-
- ip2trace (CHANN, ITRC_QUEUE, ITRC_ENTER, 0 );
-
- pB = pCh->pMyBord;
-
- // Board must also exist, and THE INTERRUPT COMMAND ALREADY SENT
- if (pB->i2eValid != I2E_MAGIC || pB->i2eUsingIrq == I2_IRQ_UNDEFINED)
- return -2;
- // If the board has gone fatal, return bad, and also hit the trap routine if
- // it exists.
- if (pB->i2eFatal) {
- if ( pB->i2eFatalTrap ) {
- (*(pB)->i2eFatalTrap)(pB);
- }
- return -3;
- }
- // Set up some variables, Which buffers are we using? How big are they?
- switch(type)
- {
- case PTYPE_INLINE:
- flag = INL;
- maxBlock = MAX_OBUF_BLOCK;
- maxBuff = OBUF_SIZE;
- pBuf = pCh->Obuf;
- break;
- case PTYPE_BYPASS:
- flag = BYP;
- maxBlock = MAX_CBUF_BLOCK;
- maxBuff = CBUF_SIZE;
- pBuf = pCh->Cbuf;
- break;
- default:
- return -4;
- }
- // Determine the total size required for all the commands
- totalsize = blocksize = sizeof(i2CmdHeader);
- lastended = 0;
- ppCs = &pCs0;
- for ( count = nCommands; count; count--, ppCs++)
- {
- pCs = *ppCs;
- cnt = pCs->length;
- // Will a new block be needed for this one?
- // Two possible reasons: too
- // big or previous command has to be at the end of a packet.
- if ((blocksize + cnt > maxBlock) || lastended) {
- blocksize = sizeof(i2CmdHeader);
- totalsize += sizeof(i2CmdHeader);
- }
- totalsize += cnt;
- blocksize += cnt;
-
- // If this command had to end a block, then we will make sure to
- // account for it should there be any more blocks.
- lastended = pCs->flags & END;
- }
- for (;;) {
- // Make sure any pending flush commands go out before we add more data.
- if ( !( pCh->flush_flags && i2RetryFlushOutput( pCh ) ) ) {
- // How much room (this time through) ?
- switch(type) {
- case PTYPE_INLINE:
- lock_var_p = &pCh->Obuf_spinlock;
- write_lock_irqsave(lock_var_p, flags);
- stuffIndex = pCh->Obuf_stuff;
- bufroom = pCh->Obuf_strip - stuffIndex;
- break;
- case PTYPE_BYPASS:
- lock_var_p = &pCh->Cbuf_spinlock;
- write_lock_irqsave(lock_var_p, flags);
- stuffIndex = pCh->Cbuf_stuff;
- bufroom = pCh->Cbuf_strip - stuffIndex;
- break;
- default:
- return -5;
- }
- if (--bufroom < 0) {
- bufroom += maxBuff;
- }
-
- ip2trace (CHANN, ITRC_QUEUE, 2, 1, bufroom );
-
- // Check for overflow
- if (totalsize <= bufroom) {
- // Normal Expected path - We still hold LOCK
- break; /* from for()- Enough room: goto proceed */
- }
- ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize);
- write_unlock_irqrestore(lock_var_p, flags);
- } else
- ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize);
-
- /* Prepare to wait for buffers to empty */
- serviceOutgoingFifo(pB); // Dump what we got
-
- if (timeout == 0) {
- return 0; // Tired of waiting
- }
- if (timeout > 0)
- timeout--; // So negative values == forever
-
- if (!in_interrupt()) {
- schedule_timeout_interruptible(1); // short nap
- } else {
- // we cannot sched/sleep in interrupt silly
- return 0;
- }
- if (signal_pending(current)) {
- return 0; // Wake up! Time to die!!!
- }
-
- ip2trace (CHANN, ITRC_QUEUE, 4, 0 );
-
- } // end of for(;;)
-
- // At this point we have room and the lock - stick them in.
- channel = pCh->infl.hd.i2sChannel;
- pInsert = &pBuf[stuffIndex]; // Pointer to start of packet
- pDest = CMD_OF(pInsert); // Pointer to start of command
-
- // When we start counting, the block is the size of the header
- for (blocksize = sizeof(i2CmdHeader), count = nCommands,
- lastended = 0, ppCs = &pCs0;
- count;
- count--, ppCs++)
- {
- pCs = *ppCs; // Points to command protocol structure
-
- // If this is a bookmark request command, post the fact that a bookmark
- // request is pending. NOTE THIS TRICK ONLY WORKS BECAUSE CMD_BMARK_REQ
- // has no parameters! The more general solution would be to reference
- // pCs->cmd[0].
- if (pCs == CMD_BMARK_REQ) {
- pCh->bookMarks++;
-
- ip2trace (CHANN, ITRC_DRAIN, 30, 1, pCh->bookMarks );
-
- }
- cnt = pCs->length;
-
- // If this command would put us over the maximum block size or
- // if the last command had to be at the end of a block, we end
- // the existing block here and start a new one.
- if ((blocksize + cnt > maxBlock) || lastended) {
-
- ip2trace (CHANN, ITRC_QUEUE, 5, 0 );
-
- PTYPE_OF(pInsert) = type;
- CHANNEL_OF(pInsert) = channel;
- // count here does not include the header
- CMD_COUNT_OF(pInsert) = blocksize - sizeof(i2CmdHeader);
- stuffIndex += blocksize;
- if(stuffIndex >= maxBuff) {
- stuffIndex = 0;
- pInsert = pBuf;
- }
- pInsert = &pBuf[stuffIndex]; // Pointer to start of next pkt
- pDest = CMD_OF(pInsert);
- blocksize = sizeof(i2CmdHeader);
- }
- // Now we know there is room for this one in the current block
-
- blocksize += cnt; // Total bytes in this command
- pSource = pCs->cmd; // Copy the command into the buffer
- while (cnt--) {
- *pDest++ = *pSource++;
- }
- // If this command had to end a block, then we will make sure to account
- // for it should there be any more blocks.
- lastended = pCs->flags & END;
- } // end for
- // Clean up the final block by writing header, etc
-
- PTYPE_OF(pInsert) = type;
- CHANNEL_OF(pInsert) = channel;
- // count here does not include the header
- CMD_COUNT_OF(pInsert) = blocksize - sizeof(i2CmdHeader);
- stuffIndex += blocksize;
- if(stuffIndex >= maxBuff) {
- stuffIndex = 0;
- pInsert = pBuf;
- }
- // Updates the index, and post the need for service. When adding these to
- // the queue of channels, we turn off the interrupt while doing so,
- // because at interrupt level we might want to push a channel back to the
- // end of the queue.
- switch(type)
- {
- case PTYPE_INLINE:
- pCh->Obuf_stuff = stuffIndex; // Store buffer pointer
- write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
-
- pB->debugInlineQueued++;
- // Add the channel pointer to list of channels needing service (first
- // come...), if it's not already there.
- i2QueueNeeds(pB, pCh, NEED_INLINE);
- break;
-
- case PTYPE_BYPASS:
- pCh->Cbuf_stuff = stuffIndex; // Store buffer pointer
- write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags);
-
- pB->debugBypassQueued++;
- // Add the channel pointer to list of channels needing service (first
- // come...), if it's not already there.
- i2QueueNeeds(pB, pCh, NEED_BYPASS);
- break;
- }
-
- ip2trace (CHANN, ITRC_QUEUE, ITRC_RETURN, 1, nCommands );
-
- return nCommands; // Good status: number of commands sent
-}
-
-//******************************************************************************
-// Function: i2GetStatus(pCh,resetBits)
-// Parameters: Pointer to a channel structure
-// Bit map of status bits to clear
-// Returns: Bit map of current status bits
-//
-// Description:
-// Returns the state of data set signals, and whether a break has been received,
-// (see i2lib.h for bit-mapped result). resetBits is a bit-map of any status
-// bits to be cleared: I2_BRK, I2_PAR, I2_FRA, I2_OVR,... These are cleared
-// AFTER the condition is passed. If pCh does not point to a valid channel,
-// returns -1 (which would be impossible otherwise.
-//******************************************************************************
-static int
-i2GetStatus(i2ChanStrPtr pCh, int resetBits)
-{
- unsigned short status;
- i2eBordStrPtr pB;
-
- ip2trace (CHANN, ITRC_STATUS, ITRC_ENTER, 2, pCh->dataSetIn, resetBits );
-
- // Make sure the channel exists, otherwise do nothing */
- if ( !i2Validate ( pCh ) )
- return -1;
-
- pB = pCh->pMyBord;
-
- status = pCh->dataSetIn;
-
- // Clear any specified error bits: but note that only actual error bits can
- // be cleared, regardless of the value passed.
- if (resetBits)
- {
- pCh->dataSetIn &= ~(resetBits & (I2_BRK | I2_PAR | I2_FRA | I2_OVR));
- pCh->dataSetIn &= ~(I2_DDCD | I2_DCTS | I2_DDSR | I2_DRI);
- }
-
- ip2trace (CHANN, ITRC_STATUS, ITRC_RETURN, 1, pCh->dataSetIn );
-
- return status;
-}
-
-//******************************************************************************
-// Function: i2Input(pChpDest,count)
-// Parameters: Pointer to a channel structure
-// Pointer to data buffer
-// Number of bytes to read
-// Returns: Number of bytes read, or -1 for error
-//
-// Description:
-// Strips data from the input buffer and writes it to pDest. If there is a
-// colossal blunder, (invalid structure pointers or the like), returns -1.
-// Otherwise, returns the number of bytes read.
-//******************************************************************************
-static int
-i2Input(i2ChanStrPtr pCh)
-{
- int amountToMove;
- unsigned short stripIndex;
- int count;
- unsigned long flags = 0;
-
- ip2trace (CHANN, ITRC_INPUT, ITRC_ENTER, 0);
-
- // Ensure channel structure seems real
- if ( !i2Validate( pCh ) ) {
- count = -1;
- goto i2Input_exit;
- }
- write_lock_irqsave(&pCh->Ibuf_spinlock, flags);
-
- // initialize some accelerators and private copies
- stripIndex = pCh->Ibuf_strip;
-
- count = pCh->Ibuf_stuff - stripIndex;
-
- // If buffer is empty or requested data count was 0, (trivial case) return
- // without any further thought.
- if ( count == 0 ) {
- write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
- goto i2Input_exit;
- }
- // Adjust for buffer wrap
- if ( count < 0 ) {
- count += IBUF_SIZE;
- }
- // Don't give more than can be taken by the line discipline
- amountToMove = pCh->pTTY->receive_room;
- if (count > amountToMove) {
- count = amountToMove;
- }
- // How much could we copy without a wrap?
- amountToMove = IBUF_SIZE - stripIndex;
-
- if (amountToMove > count) {
- amountToMove = count;
- }
- // Move the first block
- pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY,
- &(pCh->Ibuf[stripIndex]), NULL, amountToMove );
- // If we needed to wrap, do the second data move
- if (count > amountToMove) {
- pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY,
- pCh->Ibuf, NULL, count - amountToMove );
- }
- // Bump and wrap the stripIndex all at once by the amount of data read. This
- // method is good regardless of whether the data was in one or two pieces.
- stripIndex += count;
- if (stripIndex >= IBUF_SIZE) {
- stripIndex -= IBUF_SIZE;
- }
- pCh->Ibuf_strip = stripIndex;
-
- // Update our flow control information and possibly queue ourselves to send
- // it, depending on how much data has been stripped since the last time a
- // packet was sent.
- pCh->infl.asof += count;
-
- if ((pCh->sinceLastFlow += count) >= pCh->whenSendFlow) {
- pCh->sinceLastFlow -= pCh->whenSendFlow;
- write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
- i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW);
- } else {
- write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
- }
-
-i2Input_exit:
-
- ip2trace (CHANN, ITRC_INPUT, ITRC_RETURN, 1, count);
-
- return count;
-}
-
-//******************************************************************************
-// Function: i2InputFlush(pCh)
-// Parameters: Pointer to a channel structure
-// Returns: Number of bytes stripped, or -1 for error
-//
-// Description:
-// Strips any data from the input buffer. If there is a colossal blunder,
-// (invalid structure pointers or the like), returns -1. Otherwise, returns the
-// number of bytes stripped.
-//******************************************************************************
-static int
-i2InputFlush(i2ChanStrPtr pCh)
-{
- int count;
- unsigned long flags;
-
- // Ensure channel structure seems real
- if ( !i2Validate ( pCh ) )
- return -1;
-
- ip2trace (CHANN, ITRC_INPUT, 10, 0);
-
- write_lock_irqsave(&pCh->Ibuf_spinlock, flags);
- count = pCh->Ibuf_stuff - pCh->Ibuf_strip;
-
- // Adjust for buffer wrap
- if (count < 0) {
- count += IBUF_SIZE;
- }
-
- // Expedient way to zero out the buffer
- pCh->Ibuf_strip = pCh->Ibuf_stuff;
-
-
- // Update our flow control information and possibly queue ourselves to send
- // it, depending on how much data has been stripped since the last time a
- // packet was sent.
-
- pCh->infl.asof += count;
-
- if ( (pCh->sinceLastFlow += count) >= pCh->whenSendFlow )
- {
- pCh->sinceLastFlow -= pCh->whenSendFlow;
- write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
- i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW);
- } else {
- write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
- }
-
- ip2trace (CHANN, ITRC_INPUT, 19, 1, count);
-
- return count;
-}
-
-//******************************************************************************
-// Function: i2InputAvailable(pCh)
-// Parameters: Pointer to a channel structure
-// Returns: Number of bytes available, or -1 for error
-//
-// Description:
-// If there is a colossal blunder, (invalid structure pointers or the like),
-// returns -1. Otherwise, returns the number of bytes stripped. Otherwise,
-// returns the number of bytes available in the buffer.
-//******************************************************************************
-#if 0
-static int
-i2InputAvailable(i2ChanStrPtr pCh)
-{
- int count;
-
- // Ensure channel structure seems real
- if ( !i2Validate ( pCh ) ) return -1;
-
-
- // initialize some accelerators and private copies
- read_lock_irqsave(&pCh->Ibuf_spinlock, flags);
- count = pCh->Ibuf_stuff - pCh->Ibuf_strip;
- read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
-
- // Adjust for buffer wrap
- if (count < 0)
- {
- count += IBUF_SIZE;
- }
-
- return count;
-}
-#endif
-
-//******************************************************************************
-// Function: i2Output(pCh, pSource, count)
-// Parameters: Pointer to channel structure
-// Pointer to source data
-// Number of bytes to send
-// Returns: Number of bytes sent, or -1 for error
-//
-// Description:
-// Queues the data at pSource to be sent as data packets to the board. If there
-// is a colossal blunder, (invalid structure pointers or the like), returns -1.
-// Otherwise, returns the number of bytes written. What if there is not enough
-// room for all the data? If pCh->channelOptions & CO_NBLOCK_WRITE is set, then
-// we transfer as many characters as we can now, then return. If this bit is
-// clear (default), routine will spin along until all the data is buffered.
-// Should this occur, the 1-ms delay routine is called while waiting to avoid
-// applications that one cannot break out of.
-//******************************************************************************
-static int
-i2Output(i2ChanStrPtr pCh, const char *pSource, int count)
-{
- i2eBordStrPtr pB;
- unsigned char *pInsert;
- int amountToMove;
- int countOriginal = count;
- unsigned short channel;
- unsigned short stuffIndex;
- unsigned long flags;
-
- int bailout = 10;
-
- ip2trace (CHANN, ITRC_OUTPUT, ITRC_ENTER, 2, count, 0 );
-
- // Ensure channel structure seems real
- if ( !i2Validate ( pCh ) )
- return -1;
-
- // initialize some accelerators and private copies
- pB = pCh->pMyBord;
- channel = pCh->infl.hd.i2sChannel;
-
- // If the board has gone fatal, return bad, and also hit the trap routine if
- // it exists.
- if (pB->i2eFatal) {
- if (pB->i2eFatalTrap) {
- (*(pB)->i2eFatalTrap)(pB);
- }
- return -1;
- }
- // Proceed as though we would do everything
- while ( count > 0 ) {
-
- // How much room in output buffer is there?
- read_lock_irqsave(&pCh->Obuf_spinlock, flags);
- amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1;
- read_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
- if (amountToMove < 0) {
- amountToMove += OBUF_SIZE;
- }
- // Subtract off the headers size and see how much room there is for real
- // data. If this is negative, we will discover later.
- amountToMove -= sizeof (i2DataHeader);
-
- // Don't move more (now) than can go in a single packet
- if ( amountToMove > (int)(MAX_OBUF_BLOCK - sizeof(i2DataHeader)) ) {
- amountToMove = MAX_OBUF_BLOCK - sizeof(i2DataHeader);
- }
- // Don't move more than the count we were given
- if (amountToMove > count) {
- amountToMove = count;
- }
- // Now we know how much we must move: NB because the ring buffers have
- // an overflow area at the end, we needn't worry about wrapping in the
- // middle of a packet.
-
-// Small WINDOW here with no LOCK but I can't call Flush with LOCK
-// We would be flushing (or ending flush) anyway
-
- ip2trace (CHANN, ITRC_OUTPUT, 10, 1, amountToMove );
-
- if ( !(pCh->flush_flags && i2RetryFlushOutput(pCh) )
- && amountToMove > 0 )
- {
- write_lock_irqsave(&pCh->Obuf_spinlock, flags);
- stuffIndex = pCh->Obuf_stuff;
-
- // Had room to move some data: don't know whether the block size,
- // buffer space, or what was the limiting factor...
- pInsert = &(pCh->Obuf[stuffIndex]);
-
- // Set up the header
- CHANNEL_OF(pInsert) = channel;
- PTYPE_OF(pInsert) = PTYPE_DATA;
- TAG_OF(pInsert) = 0;
- ID_OF(pInsert) = ID_ORDINARY_DATA;
- DATA_COUNT_OF(pInsert) = amountToMove;
-
- // Move the data
- memcpy( (char*)(DATA_OF(pInsert)), pSource, amountToMove );
- // Adjust pointers and indices
- pSource += amountToMove;
- pCh->Obuf_char_count += amountToMove;
- stuffIndex += amountToMove + sizeof(i2DataHeader);
- count -= amountToMove;
-
- if (stuffIndex >= OBUF_SIZE) {
- stuffIndex = 0;
- }
- pCh->Obuf_stuff = stuffIndex;
-
- write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
-
- ip2trace (CHANN, ITRC_OUTPUT, 13, 1, stuffIndex );
-
- } else {
-
- // Cannot move data
- // becuz we need to stuff a flush
- // or amount to move is <= 0
-
- ip2trace(CHANN, ITRC_OUTPUT, 14, 3,
- amountToMove, pB->i2eFifoRemains,
- pB->i2eWaitingForEmptyFifo );
-
- // Put this channel back on queue
- // this ultimatly gets more data or wakes write output
- i2QueueNeeds(pB, pCh, NEED_INLINE);
-
- if ( pB->i2eWaitingForEmptyFifo ) {
-
- ip2trace (CHANN, ITRC_OUTPUT, 16, 0 );
-
- // or schedule
- if (!in_interrupt()) {
-
- ip2trace (CHANN, ITRC_OUTPUT, 61, 0 );
-
- schedule_timeout_interruptible(2);
- if (signal_pending(current)) {
- break;
- }
- continue;
- } else {
-
- ip2trace (CHANN, ITRC_OUTPUT, 62, 0 );
-
- // let interrupt in = WAS restore_flags()
- // We hold no lock nor is irq off anymore???
-
- break;
- }
- break; // from while(count)
- }
- else if ( pB->i2eFifoRemains < 32 && !pB->i2eTxMailEmpty ( pB ) )
- {
- ip2trace (CHANN, ITRC_OUTPUT, 19, 2,
- pB->i2eFifoRemains,
- pB->i2eTxMailEmpty );
-
- break; // from while(count)
- } else if ( pCh->channelNeeds & NEED_CREDIT ) {
-
- ip2trace (CHANN, ITRC_OUTPUT, 22, 0 );
-
- break; // from while(count)
- } else if ( --bailout) {
-
- // Try to throw more things (maybe not us) in the fifo if we're
- // not already waiting for it.
-
- ip2trace (CHANN, ITRC_OUTPUT, 20, 0 );
-
- serviceOutgoingFifo(pB);
- //break; CONTINUE;
- } else {
- ip2trace (CHANN, ITRC_OUTPUT, 21, 3,
- pB->i2eFifoRemains,
- pB->i2eOutMailWaiting,
- pB->i2eWaitingForEmptyFifo );
-
- break; // from while(count)
- }
- }
- } // End of while(count)
-
- i2QueueNeeds(pB, pCh, NEED_INLINE);
-
- // We drop through either when the count expires, or when there is some
- // count left, but there was a non-blocking write.
- if (countOriginal > count) {
-
- ip2trace (CHANN, ITRC_OUTPUT, 17, 2, countOriginal, count );
-
- serviceOutgoingFifo( pB );
- }
-
- ip2trace (CHANN, ITRC_OUTPUT, ITRC_RETURN, 2, countOriginal, count );
-
- return countOriginal - count;
-}
-
-//******************************************************************************
-// Function: i2FlushOutput(pCh)
-// Parameters: Pointer to a channel structure
-// Returns: Nothing
-//
-// Description:
-// Sends bypass command to start flushing (waiting possibly forever until there
-// is room), then sends inline command to stop flushing output, (again waiting
-// possibly forever).
-//******************************************************************************
-static inline void
-i2FlushOutput(i2ChanStrPtr pCh)
-{
-
- ip2trace (CHANN, ITRC_FLUSH, 1, 1, pCh->flush_flags );
-
- if (pCh->flush_flags)
- return;
-
- if ( 1 != i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_STARTFL) ) {
- pCh->flush_flags = STARTFL_FLAG; // Failed - flag for later
-
- ip2trace (CHANN, ITRC_FLUSH, 2, 0 );
-
- } else if ( 1 != i2QueueCommands(PTYPE_INLINE, pCh, 0, 1, CMD_STOPFL) ) {
- pCh->flush_flags = STOPFL_FLAG; // Failed - flag for later
-
- ip2trace (CHANN, ITRC_FLUSH, 3, 0 );
- }
-}
-
-static int
-i2RetryFlushOutput(i2ChanStrPtr pCh)
-{
- int old_flags = pCh->flush_flags;
-
- ip2trace (CHANN, ITRC_FLUSH, 14, 1, old_flags );
-
- pCh->flush_flags = 0; // Clear flag so we can avoid recursion
- // and queue the commands
-
- if ( old_flags & STARTFL_FLAG ) {
- if ( 1 == i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_STARTFL) ) {
- old_flags = STOPFL_FLAG; //Success - send stop flush
- } else {
- old_flags = STARTFL_FLAG; //Failure - Flag for retry later
- }
-
- ip2trace (CHANN, ITRC_FLUSH, 15, 1, old_flags );
-
- }
- if ( old_flags & STOPFL_FLAG ) {
- if (1 == i2QueueCommands(PTYPE_INLINE, pCh, 0, 1, CMD_STOPFL)) {
- old_flags = 0; // Success - clear flags
- }
-
- ip2trace (CHANN, ITRC_FLUSH, 16, 1, old_flags );
- }
- pCh->flush_flags = old_flags;
-
- ip2trace (CHANN, ITRC_FLUSH, 17, 1, old_flags );
-
- return old_flags;
-}
-
-//******************************************************************************
-// Function: i2DrainOutput(pCh,timeout)
-// Parameters: Pointer to a channel structure
-// Maximum period to wait
-// Returns: ?
-//
-// Description:
-// Uses the bookmark request command to ask the board to send a bookmark back as
-// soon as all the data is completely sent.
-//******************************************************************************
-static void
-i2DrainWakeup(unsigned long d)
-{
- i2ChanStrPtr pCh = (i2ChanStrPtr)d;
-
- ip2trace (CHANN, ITRC_DRAIN, 10, 1, pCh->BookmarkTimer.expires );
-
- pCh->BookmarkTimer.expires = 0;
- wake_up_interruptible( &pCh->pBookmarkWait );
-}
-
-static void
-i2DrainOutput(i2ChanStrPtr pCh, int timeout)
-{
- wait_queue_t wait;
- i2eBordStrPtr pB;
-
- ip2trace (CHANN, ITRC_DRAIN, ITRC_ENTER, 1, pCh->BookmarkTimer.expires);
-
- pB = pCh->pMyBord;
- // If the board has gone fatal, return bad,
- // and also hit the trap routine if it exists.
- if (pB->i2eFatal) {
- if (pB->i2eFatalTrap) {
- (*(pB)->i2eFatalTrap)(pB);
- }
- return;
- }
- if ((timeout > 0) && (pCh->BookmarkTimer.expires == 0 )) {
- // One per customer (channel)
- setup_timer(&pCh->BookmarkTimer, i2DrainWakeup,
- (unsigned long)pCh);
-
- ip2trace (CHANN, ITRC_DRAIN, 1, 1, pCh->BookmarkTimer.expires );
-
- mod_timer(&pCh->BookmarkTimer, jiffies + timeout);
- }
-
- i2QueueCommands( PTYPE_INLINE, pCh, -1, 1, CMD_BMARK_REQ );
-
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&(pCh->pBookmarkWait), &wait);
- set_current_state( TASK_INTERRUPTIBLE );
-
- serviceOutgoingFifo( pB );
-
- schedule(); // Now we take our interruptible sleep on
-
- // Clean up the queue
- set_current_state( TASK_RUNNING );
- remove_wait_queue(&(pCh->pBookmarkWait), &wait);
-
- // if expires == 0 then timer poped, then do not need to del_timer
- if ((timeout > 0) && pCh->BookmarkTimer.expires &&
- time_before(jiffies, pCh->BookmarkTimer.expires)) {
- del_timer( &(pCh->BookmarkTimer) );
- pCh->BookmarkTimer.expires = 0;
-
- ip2trace (CHANN, ITRC_DRAIN, 3, 1, pCh->BookmarkTimer.expires );
-
- }
- ip2trace (CHANN, ITRC_DRAIN, ITRC_RETURN, 1, pCh->BookmarkTimer.expires );
- return;
-}
-
-//******************************************************************************
-// Function: i2OutputFree(pCh)
-// Parameters: Pointer to a channel structure
-// Returns: Space in output buffer
-//
-// Description:
-// Returns -1 if very gross error. Otherwise returns the amount of bytes still
-// free in the output buffer.
-//******************************************************************************
-static int
-i2OutputFree(i2ChanStrPtr pCh)
-{
- int amountToMove;
- unsigned long flags;
-
- // Ensure channel structure seems real
- if ( !i2Validate ( pCh ) ) {
- return -1;
- }
- read_lock_irqsave(&pCh->Obuf_spinlock, flags);
- amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1;
- read_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
-
- if (amountToMove < 0) {
- amountToMove += OBUF_SIZE;
- }
- // If this is negative, we will discover later
- amountToMove -= sizeof(i2DataHeader);
-
- return (amountToMove < 0) ? 0 : amountToMove;
-}
-static void
-
-ip2_owake( PTTY tp)
-{
- i2ChanStrPtr pCh;
-
- if (tp == NULL) return;
-
- pCh = tp->driver_data;
-
- ip2trace (CHANN, ITRC_SICMD, 10, 2, tp->flags,
- (1 << TTY_DO_WRITE_WAKEUP) );
-
- tty_wakeup(tp);
-}
-
-static inline void
-set_baud_params(i2eBordStrPtr pB)
-{
- int i,j;
- i2ChanStrPtr *pCh;
-
- pCh = (i2ChanStrPtr *) pB->i2eChannelPtr;
-
- for (i = 0; i < ABS_MAX_BOXES; i++) {
- if (pB->channelBtypes.bid_value[i]) {
- if (BID_HAS_654(pB->channelBtypes.bid_value[i])) {
- for (j = 0; j < ABS_BIGGEST_BOX; j++) {
- if (pCh[i*16+j] == NULL)
- break;
- (pCh[i*16+j])->BaudBase = 921600; // MAX for ST654
- (pCh[i*16+j])->BaudDivisor = 96;
- }
- } else { // has cirrus cd1400
- for (j = 0; j < ABS_BIGGEST_BOX; j++) {
- if (pCh[i*16+j] == NULL)
- break;
- (pCh[i*16+j])->BaudBase = 115200; // MAX for CD1400
- (pCh[i*16+j])->BaudDivisor = 12;
- }
- }
- }
- }
-}
-
-//******************************************************************************
-// Function: i2StripFifo(pB)
-// Parameters: Pointer to a board structure
-// Returns: ?
-//
-// Description:
-// Strips all the available data from the incoming FIFO, identifies the type of
-// packet, and either buffers the data or does what needs to be done.
-//
-// Note there is no overflow checking here: if the board sends more data than it
-// ought to, we will not detect it here, but blindly overflow...
-//******************************************************************************
-
-// A buffer for reading in blocks for unknown channels
-static unsigned char junkBuffer[IBUF_SIZE];
-
-// A buffer to read in a status packet. Because of the size of the count field
-// for these things, the maximum packet size must be less than MAX_CMD_PACK_SIZE
-static unsigned char cmdBuffer[MAX_CMD_PACK_SIZE + 4];
-
-// This table changes the bit order from MSR order given by STAT_MODEM packet to
-// status bits used in our library.
-static char xlatDss[16] = {
-0 | 0 | 0 | 0 ,
-0 | 0 | 0 | I2_CTS ,
-0 | 0 | I2_DSR | 0 ,
-0 | 0 | I2_DSR | I2_CTS ,
-0 | I2_RI | 0 | 0 ,
-0 | I2_RI | 0 | I2_CTS ,
-0 | I2_RI | I2_DSR | 0 ,
-0 | I2_RI | I2_DSR | I2_CTS ,
-I2_DCD | 0 | 0 | 0 ,
-I2_DCD | 0 | 0 | I2_CTS ,
-I2_DCD | 0 | I2_DSR | 0 ,
-I2_DCD | 0 | I2_DSR | I2_CTS ,
-I2_DCD | I2_RI | 0 | 0 ,
-I2_DCD | I2_RI | 0 | I2_CTS ,
-I2_DCD | I2_RI | I2_DSR | 0 ,
-I2_DCD | I2_RI | I2_DSR | I2_CTS };
-
-static inline void
-i2StripFifo(i2eBordStrPtr pB)
-{
- i2ChanStrPtr pCh;
- int channel;
- int count;
- unsigned short stuffIndex;
- int amountToRead;
- unsigned char *pc, *pcLimit;
- unsigned char uc;
- unsigned char dss_change;
- unsigned long bflags,cflags;
-
-// ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_ENTER, 0 );
-
- while (I2_HAS_INPUT(pB)) {
-// ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 2, 0 );
-
- // Process packet from fifo a one atomic unit
- write_lock_irqsave(&pB->read_fifo_spinlock, bflags);
-
- // The first word (or two bytes) will have channel number and type of
- // packet, possibly other information
- pB->i2eLeadoffWord[0] = iiReadWord(pB);
-
- switch(PTYPE_OF(pB->i2eLeadoffWord))
- {
- case PTYPE_DATA:
- pB->got_input = 1;
-
-// ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 3, 0 );
-
- channel = CHANNEL_OF(pB->i2eLeadoffWord); /* Store channel */
- count = iiReadWord(pB); /* Count is in the next word */
-
-// NEW: Check the count for sanity! Should the hardware fail, our death
-// is more pleasant. While an oversize channel is acceptable (just more
-// than the driver supports), an over-length count clearly means we are
-// sick!
- if ( ((unsigned int)count) > IBUF_SIZE ) {
- pB->i2eFatal = 2;
- write_unlock_irqrestore(&pB->read_fifo_spinlock,
- bflags);
- return; /* Bail out ASAP */
- }
- // Channel is illegally big ?
- if ((channel >= pB->i2eChannelCnt) ||
- (NULL==(pCh = ((i2ChanStrPtr*)pB->i2eChannelPtr)[channel])))
- {
- iiReadBuf(pB, junkBuffer, count);
- write_unlock_irqrestore(&pB->read_fifo_spinlock,
- bflags);
- break; /* From switch: ready for next packet */
- }
-
- // Channel should be valid, then
-
- // If this is a hot-key, merely post its receipt for now. These are
- // always supposed to be 1-byte packets, so we won't even check the
- // count. Also we will post an acknowledgement to the board so that
- // more data can be forthcoming. Note that we are not trying to use
- // these sequences in this driver, merely to robustly ignore them.
- if(ID_OF(pB->i2eLeadoffWord) == ID_HOT_KEY)
- {
- pCh->hotKeyIn = iiReadWord(pB) & 0xff;
- write_unlock_irqrestore(&pB->read_fifo_spinlock,
- bflags);
- i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_HOTACK);
- break; /* From the switch: ready for next packet */
- }
-
- // Normal data! We crudely assume there is room for the data in our
- // buffer because the board wouldn't have exceeded his credit limit.
- write_lock_irqsave(&pCh->Ibuf_spinlock, cflags);
- // We have 2 locks now
- stuffIndex = pCh->Ibuf_stuff;
- amountToRead = IBUF_SIZE - stuffIndex;
- if (amountToRead > count)
- amountToRead = count;
-
- // stuffIndex would have been already adjusted so there would
- // always be room for at least one, and count is always at least
- // one.
-
- iiReadBuf(pB, &(pCh->Ibuf[stuffIndex]), amountToRead);
- pCh->icount.rx += amountToRead;
-
- // Update the stuffIndex by the amount of data moved. Note we could
- // never ask for more data than would just fit. However, we might
- // have read in one more byte than we wanted because the read
- // rounds up to even bytes. If this byte is on the end of the
- // packet, and is padding, we ignore it. If the byte is part of
- // the actual data, we need to move it.
-
- stuffIndex += amountToRead;
-
- if (stuffIndex >= IBUF_SIZE) {
- if ((amountToRead & 1) && (count > amountToRead)) {
- pCh->Ibuf[0] = pCh->Ibuf[IBUF_SIZE];
- amountToRead++;
- stuffIndex = 1;
- } else {
- stuffIndex = 0;
- }
- }
-
- // If there is anything left over, read it as well
- if (count > amountToRead) {
- amountToRead = count - amountToRead;
- iiReadBuf(pB, &(pCh->Ibuf[stuffIndex]), amountToRead);
- pCh->icount.rx += amountToRead;
- stuffIndex += amountToRead;
- }
-
- // Update stuff index
- pCh->Ibuf_stuff = stuffIndex;
- write_unlock_irqrestore(&pCh->Ibuf_spinlock, cflags);
- write_unlock_irqrestore(&pB->read_fifo_spinlock,
- bflags);
-
-#ifdef USE_IQ
- schedule_work(&pCh->tqueue_input);
-#else
- do_input(&pCh->tqueue_input);
-#endif
-
- // Note we do not need to maintain any flow-control credits at this
- // time: if we were to increment .asof and decrement .room, there
- // would be no net effect. Instead, when we strip data, we will
- // increment .asof and leave .room unchanged.
-
- break; // From switch: ready for next packet
-
- case PTYPE_STATUS:
- ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 4, 0 );
-
- count = CMD_COUNT_OF(pB->i2eLeadoffWord);
-
- iiReadBuf(pB, cmdBuffer, count);
- // We can release early with buffer grab
- write_unlock_irqrestore(&pB->read_fifo_spinlock,
- bflags);
-
- pc = cmdBuffer;
- pcLimit = &(cmdBuffer[count]);
-
- while (pc < pcLimit) {
- channel = *pc++;
-
- ip2trace (channel, ITRC_SFIFO, 7, 2, channel, *pc );
-
- /* check for valid channel */
- if (channel < pB->i2eChannelCnt
- &&
- (pCh = (((i2ChanStrPtr*)pB->i2eChannelPtr)[channel])) != NULL
- )
- {
- dss_change = 0;
-
- switch (uc = *pc++)
- {
- /* Breaks and modem signals are easy: just update status */
- case STAT_CTS_UP:
- if ( !(pCh->dataSetIn & I2_CTS) )
- {
- pCh->dataSetIn |= I2_DCTS;
- pCh->icount.cts++;
- dss_change = 1;
- }
- pCh->dataSetIn |= I2_CTS;
- break;
-
- case STAT_CTS_DN:
- if ( pCh->dataSetIn & I2_CTS )
- {
- pCh->dataSetIn |= I2_DCTS;
- pCh->icount.cts++;
- dss_change = 1;
- }
- pCh->dataSetIn &= ~I2_CTS;
- break;
-
- case STAT_DCD_UP:
- ip2trace (channel, ITRC_MODEM, 1, 1, pCh->dataSetIn );
-
- if ( !(pCh->dataSetIn & I2_DCD) )
- {
- ip2trace (CHANN, ITRC_MODEM, 2, 0 );
- pCh->dataSetIn |= I2_DDCD;
- pCh->icount.dcd++;
- dss_change = 1;
- }
- pCh->dataSetIn |= I2_DCD;
-
- ip2trace (channel, ITRC_MODEM, 3, 1, pCh->dataSetIn );
- break;
-
- case STAT_DCD_DN:
- ip2trace (channel, ITRC_MODEM, 4, 1, pCh->dataSetIn );
- if ( pCh->dataSetIn & I2_DCD )
- {
- ip2trace (channel, ITRC_MODEM, 5, 0 );
- pCh->dataSetIn |= I2_DDCD;
- pCh->icount.dcd++;
- dss_change = 1;
- }
- pCh->dataSetIn &= ~I2_DCD;
-
- ip2trace (channel, ITRC_MODEM, 6, 1, pCh->dataSetIn );
- break;
-
- case STAT_DSR_UP:
- if ( !(pCh->dataSetIn & I2_DSR) )
- {
- pCh->dataSetIn |= I2_DDSR;
- pCh->icount.dsr++;
- dss_change = 1;
- }
- pCh->dataSetIn |= I2_DSR;
- break;
-
- case STAT_DSR_DN:
- if ( pCh->dataSetIn & I2_DSR )
- {
- pCh->dataSetIn |= I2_DDSR;
- pCh->icount.dsr++;
- dss_change = 1;
- }
- pCh->dataSetIn &= ~I2_DSR;
- break;
-
- case STAT_RI_UP:
- if ( !(pCh->dataSetIn & I2_RI) )
- {
- pCh->dataSetIn |= I2_DRI;
- pCh->icount.rng++;
- dss_change = 1;
- }
- pCh->dataSetIn |= I2_RI ;
- break;
-
- case STAT_RI_DN:
- // to be compat with serial.c
- //if ( pCh->dataSetIn & I2_RI )
- //{
- // pCh->dataSetIn |= I2_DRI;
- // pCh->icount.rng++;
- // dss_change = 1;
- //}
- pCh->dataSetIn &= ~I2_RI ;
- break;
-
- case STAT_BRK_DET:
- pCh->dataSetIn |= I2_BRK;
- pCh->icount.brk++;
- dss_change = 1;
- break;
-
- // Bookmarks? one less request we're waiting for
- case STAT_BMARK:
- pCh->bookMarks--;
- if (pCh->bookMarks <= 0 ) {
- pCh->bookMarks = 0;
- wake_up_interruptible( &pCh->pBookmarkWait );
-
- ip2trace (channel, ITRC_DRAIN, 20, 1, pCh->BookmarkTimer.expires );
- }
- break;
-
- // Flow control packets? Update the new credits, and if
- // someone was waiting for output, queue him up again.
- case STAT_FLOW:
- pCh->outfl.room =
- ((flowStatPtr)pc)->room -
- (pCh->outfl.asof - ((flowStatPtr)pc)->asof);
-
- ip2trace (channel, ITRC_STFLW, 1, 1, pCh->outfl.room );
-
- if (pCh->channelNeeds & NEED_CREDIT)
- {
- ip2trace (channel, ITRC_STFLW, 2, 1, pCh->channelNeeds);
-
- pCh->channelNeeds &= ~NEED_CREDIT;
- i2QueueNeeds(pB, pCh, NEED_INLINE);
- if ( pCh->pTTY )
- ip2_owake(pCh->pTTY);
- }
-
- ip2trace (channel, ITRC_STFLW, 3, 1, pCh->channelNeeds);
-
- pc += sizeof(flowStat);
- break;
-
- /* Special packets: */
- /* Just copy the information into the channel structure */
-
- case STAT_STATUS:
-
- pCh->channelStatus = *((debugStatPtr)pc);
- pc += sizeof(debugStat);
- break;
-
- case STAT_TXCNT:
-
- pCh->channelTcount = *((cntStatPtr)pc);
- pc += sizeof(cntStat);
- break;
-
- case STAT_RXCNT:
-
- pCh->channelRcount = *((cntStatPtr)pc);
- pc += sizeof(cntStat);
- break;
-
- case STAT_BOXIDS:
- pB->channelBtypes = *((bidStatPtr)pc);
- pc += sizeof(bidStat);
- set_baud_params(pB);
- break;
-
- case STAT_HWFAIL:
- i2QueueCommands (PTYPE_INLINE, pCh, 0, 1, CMD_HW_TEST);
- pCh->channelFail = *((failStatPtr)pc);
- pc += sizeof(failStat);
- break;
-
- /* No explicit match? then
- * Might be an error packet...
- */
- default:
- switch (uc & STAT_MOD_ERROR)
- {
- case STAT_ERROR:
- if (uc & STAT_E_PARITY) {
- pCh->dataSetIn |= I2_PAR;
- pCh->icount.parity++;
- }
- if (uc & STAT_E_FRAMING){
- pCh->dataSetIn |= I2_FRA;
- pCh->icount.frame++;
- }
- if (uc & STAT_E_OVERRUN){
- pCh->dataSetIn |= I2_OVR;
- pCh->icount.overrun++;
- }
- break;
-
- case STAT_MODEM:
- // the answer to DSS_NOW request (not change)
- pCh->dataSetIn = (pCh->dataSetIn
- & ~(I2_RI | I2_CTS | I2_DCD | I2_DSR) )
- | xlatDss[uc & 0xf];
- wake_up_interruptible ( &pCh->dss_now_wait );
- default:
- break;
- }
- } /* End of switch on status type */
- if (dss_change) {
-#ifdef USE_IQ
- schedule_work(&pCh->tqueue_status);
-#else
- do_status(&pCh->tqueue_status);
-#endif
- }
- }
- else /* Or else, channel is invalid */
- {
- // Even though the channel is invalid, we must test the
- // status to see how much additional data it has (to be
- // skipped)
- switch (*pc++)
- {
- case STAT_FLOW:
- pc += 4; /* Skip the data */
- break;
-
- default:
- break;
- }
- }
- } // End of while (there is still some status packet left)
- break;
-
- default: // Neither packet? should be impossible
- ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 5, 1,
- PTYPE_OF(pB->i2eLeadoffWord) );
- write_unlock_irqrestore(&pB->read_fifo_spinlock,
- bflags);
-
- break;
- } // End of switch on type of packets
- } /*while(board I2_HAS_INPUT)*/
-
- ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_RETURN, 0 );
-
- // Send acknowledgement to the board even if there was no data!
- pB->i2eOutMailWaiting |= MB_IN_STRIPPED;
- return;
-}
-
-//******************************************************************************
-// Function: i2Write2Fifo(pB,address,count)
-// Parameters: Pointer to a board structure, source address, byte count
-// Returns: bytes written
-//
-// Description:
-// Writes count bytes to board io address(implied) from source
-// Adjusts count, leaves reserve for next time around bypass cmds
-//******************************************************************************
-static int
-i2Write2Fifo(i2eBordStrPtr pB, unsigned char *source, int count,int reserve)
-{
- int rc = 0;
- unsigned long flags;
- write_lock_irqsave(&pB->write_fifo_spinlock, flags);
- if (!pB->i2eWaitingForEmptyFifo) {
- if (pB->i2eFifoRemains > (count+reserve)) {
- pB->i2eFifoRemains -= count;
- iiWriteBuf(pB, source, count);
- pB->i2eOutMailWaiting |= MB_OUT_STUFFED;
- rc = count;
- }
- }
- write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
- return rc;
-}
-//******************************************************************************
-// Function: i2StuffFifoBypass(pB)
-// Parameters: Pointer to a board structure
-// Returns: Nothing
-//
-// Description:
-// Stuffs as many bypass commands into the fifo as possible. This is simpler
-// than stuffing data or inline commands to fifo, since we do not have
-// flow-control to deal with.
-//******************************************************************************
-static inline void
-i2StuffFifoBypass(i2eBordStrPtr pB)
-{
- i2ChanStrPtr pCh;
- unsigned char *pRemove;
- unsigned short stripIndex;
- unsigned short packetSize;
- unsigned short paddedSize;
- unsigned short notClogged = 1;
- unsigned long flags;
-
- int bailout = 1000;
-
- // Continue processing so long as there are entries, or there is room in the
- // fifo. Each entry represents a channel with something to do.
- while ( --bailout && notClogged &&
- (NULL != (pCh = i2DeQueueNeeds(pB,NEED_BYPASS))))
- {
- write_lock_irqsave(&pCh->Cbuf_spinlock, flags);
- stripIndex = pCh->Cbuf_strip;
-
- // as long as there are packets for this channel...
-
- while (stripIndex != pCh->Cbuf_stuff) {
- pRemove = &(pCh->Cbuf[stripIndex]);
- packetSize = CMD_COUNT_OF(pRemove) + sizeof(i2CmdHeader);
- paddedSize = roundup(packetSize, 2);
-
- if (paddedSize > 0) {
- if ( 0 == i2Write2Fifo(pB, pRemove, paddedSize,0)) {
- notClogged = 0; /* fifo full */
- i2QueueNeeds(pB, pCh, NEED_BYPASS); // Put back on queue
- break; // Break from the channel
- }
- }
-#ifdef DEBUG_FIFO
-WriteDBGBuf("BYPS", pRemove, paddedSize);
-#endif /* DEBUG_FIFO */
- pB->debugBypassCount++;
-
- pRemove += packetSize;
- stripIndex += packetSize;
- if (stripIndex >= CBUF_SIZE) {
- stripIndex = 0;
- pRemove = pCh->Cbuf;
- }
- }
- // Done with this channel. Move to next, removing this one from
- // the queue of channels if we cleaned it out (i.e., didn't get clogged.
- pCh->Cbuf_strip = stripIndex;
- write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags);
- } // Either clogged or finished all the work
-
-#ifdef IP2DEBUG_TRACE
- if ( !bailout ) {
- ip2trace (ITRC_NO_PORT, ITRC_ERROR, 1, 0 );
- }
-#endif
-}
-
-//******************************************************************************
-// Function: i2StuffFifoFlow(pB)
-// Parameters: Pointer to a board structure
-// Returns: Nothing
-//
-// Description:
-// Stuffs as many flow control packets into the fifo as possible. This is easier
-// even than doing normal bypass commands, because there is always at most one
-// packet, already assembled, for each channel.
-//******************************************************************************
-static inline void
-i2StuffFifoFlow(i2eBordStrPtr pB)
-{
- i2ChanStrPtr pCh;
- unsigned short paddedSize = roundup(sizeof(flowIn), 2);
-
- ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_ENTER, 2,
- pB->i2eFifoRemains, paddedSize );
-
- // Continue processing so long as there are entries, or there is room in the
- // fifo. Each entry represents a channel with something to do.
- while ( (NULL != (pCh = i2DeQueueNeeds(pB,NEED_FLOW)))) {
- pB->debugFlowCount++;
-
- // NO Chan LOCK needed ???
- if ( 0 == i2Write2Fifo(pB,(unsigned char *)&(pCh->infl),paddedSize,0)) {
- break;
- }
-#ifdef DEBUG_FIFO
- WriteDBGBuf("FLOW",(unsigned char *) &(pCh->infl), paddedSize);
-#endif /* DEBUG_FIFO */
-
- } // Either clogged or finished all the work
-
- ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_RETURN, 0 );
-}
-
-//******************************************************************************
-// Function: i2StuffFifoInline(pB)
-// Parameters: Pointer to a board structure
-// Returns: Nothing
-//
-// Description:
-// Stuffs as much data and inline commands into the fifo as possible. This is
-// the most complex fifo-stuffing operation, since there if now the channel
-// flow-control issue to deal with.
-//******************************************************************************
-static inline void
-i2StuffFifoInline(i2eBordStrPtr pB)
-{
- i2ChanStrPtr pCh;
- unsigned char *pRemove;
- unsigned short stripIndex;
- unsigned short packetSize;
- unsigned short paddedSize;
- unsigned short notClogged = 1;
- unsigned short flowsize;
- unsigned long flags;
-
- int bailout = 1000;
- int bailout2;
-
- ip2trace (ITRC_NO_PORT, ITRC_SICMD, ITRC_ENTER, 3, pB->i2eFifoRemains,
- pB->i2Dbuf_strip, pB->i2Dbuf_stuff );
-
- // Continue processing so long as there are entries, or there is room in the
- // fifo. Each entry represents a channel with something to do.
- while ( --bailout && notClogged &&
- (NULL != (pCh = i2DeQueueNeeds(pB,NEED_INLINE))) )
- {
- write_lock_irqsave(&pCh->Obuf_spinlock, flags);
- stripIndex = pCh->Obuf_strip;
-
- ip2trace (CHANN, ITRC_SICMD, 3, 2, stripIndex, pCh->Obuf_stuff );
-
- // as long as there are packets for this channel...
- bailout2 = 1000;
- while ( --bailout2 && stripIndex != pCh->Obuf_stuff) {
- pRemove = &(pCh->Obuf[stripIndex]);
-
- // Must determine whether this be a data or command packet to
- // calculate correctly the header size and the amount of
- // flow-control credit this type of packet will use.
- if (PTYPE_OF(pRemove) == PTYPE_DATA) {
- flowsize = DATA_COUNT_OF(pRemove);
- packetSize = flowsize + sizeof(i2DataHeader);
- } else {
- flowsize = CMD_COUNT_OF(pRemove);
- packetSize = flowsize + sizeof(i2CmdHeader);
- }
- flowsize = CREDIT_USAGE(flowsize);
- paddedSize = roundup(packetSize, 2);
-
- ip2trace (CHANN, ITRC_SICMD, 4, 2, pB->i2eFifoRemains, paddedSize );
-
- // If we don't have enough credits from the board to send the data,
- // flag the channel that we are waiting for flow control credit, and
- // break out. This will clean up this channel and remove us from the
- // queue of hot things to do.
-
- ip2trace (CHANN, ITRC_SICMD, 5, 2, pCh->outfl.room, flowsize );
-
- if (pCh->outfl.room <= flowsize) {
- // Do Not have the credits to send this packet.
- i2QueueNeeds(pB, pCh, NEED_CREDIT);
- notClogged = 0;
- break; // So to do next channel
- }
- if ( (paddedSize > 0)
- && ( 0 == i2Write2Fifo(pB, pRemove, paddedSize, 128))) {
- // Do Not have room in fifo to send this packet.
- notClogged = 0;
- i2QueueNeeds(pB, pCh, NEED_INLINE);
- break; // Break from the channel
- }
-#ifdef DEBUG_FIFO
-WriteDBGBuf("DATA", pRemove, paddedSize);
-#endif /* DEBUG_FIFO */
- pB->debugInlineCount++;
-
- pCh->icount.tx += flowsize;
- // Update current credits
- pCh->outfl.room -= flowsize;
- pCh->outfl.asof += flowsize;
- if (PTYPE_OF(pRemove) == PTYPE_DATA) {
- pCh->Obuf_char_count -= DATA_COUNT_OF(pRemove);
- }
- pRemove += packetSize;
- stripIndex += packetSize;
-
- ip2trace (CHANN, ITRC_SICMD, 6, 2, stripIndex, pCh->Obuf_strip);
-
- if (stripIndex >= OBUF_SIZE) {
- stripIndex = 0;
- pRemove = pCh->Obuf;
-
- ip2trace (CHANN, ITRC_SICMD, 7, 1, stripIndex );
-
- }
- } /* while */
- if ( !bailout2 ) {
- ip2trace (CHANN, ITRC_ERROR, 3, 0 );
- }
- // Done with this channel. Move to next, removing this one from the
- // queue of channels if we cleaned it out (i.e., didn't get clogged.
- pCh->Obuf_strip = stripIndex;
- write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
- if ( notClogged )
- {
-
- ip2trace (CHANN, ITRC_SICMD, 8, 0 );
-
- if ( pCh->pTTY ) {
- ip2_owake(pCh->pTTY);
- }
- }
- } // Either clogged or finished all the work
-
- if ( !bailout ) {
- ip2trace (ITRC_NO_PORT, ITRC_ERROR, 4, 0 );
- }
-
- ip2trace (ITRC_NO_PORT, ITRC_SICMD, ITRC_RETURN, 1,pB->i2Dbuf_strip);
-}
-
-//******************************************************************************
-// Function: serviceOutgoingFifo(pB)
-// Parameters: Pointer to a board structure
-// Returns: Nothing
-//
-// Description:
-// Helper routine to put data in the outgoing fifo, if we aren't already waiting
-// for something to be there. If the fifo has only room for a very little data,
-// go head and hit the board with a mailbox hit immediately. Otherwise, it will
-// have to happen later in the interrupt processing. Since this routine may be
-// called both at interrupt and foreground time, we must turn off interrupts
-// during the entire process.
-//******************************************************************************
-static void
-serviceOutgoingFifo(i2eBordStrPtr pB)
-{
- // If we aren't currently waiting for the board to empty our fifo, service
- // everything that is pending, in priority order (especially, Bypass before
- // Inline).
- if ( ! pB->i2eWaitingForEmptyFifo )
- {
- i2StuffFifoFlow(pB);
- i2StuffFifoBypass(pB);
- i2StuffFifoInline(pB);
-
- iiSendPendingMail(pB);
- }
-}
-
-//******************************************************************************
-// Function: i2ServiceBoard(pB)
-// Parameters: Pointer to a board structure
-// Returns: Nothing
-//
-// Description:
-// Normally this is called from interrupt level, but there is deliberately
-// nothing in here specific to being called from interrupt level. All the
-// hardware-specific, interrupt-specific things happen at the outer levels.
-//
-// For example, a timer interrupt could drive this routine for some sort of
-// polled operation. The only requirement is that the programmer deal with any
-// atomiticity/concurrency issues that result.
-//
-// This routine responds to the board's having sent mailbox information to the
-// host (which would normally cause an interrupt). This routine reads the
-// incoming mailbox. If there is no data in it, this board did not create the
-// interrupt and/or has nothing to be done to it. (Except, if we have been
-// waiting to write mailbox data to it, we may do so.
-//
-// Based on the value in the mailbox, we may take various actions.
-//
-// No checking here of pB validity: after all, it shouldn't have been called by
-// the handler unless pB were on the list.
-//******************************************************************************
-static inline int
-i2ServiceBoard ( i2eBordStrPtr pB )
-{
- unsigned inmail;
- unsigned long flags;
-
-
- /* This should be atomic because of the way we are called... */
- if (NO_MAIL_HERE == ( inmail = pB->i2eStartMail ) ) {
- inmail = iiGetMail(pB);
- }
- pB->i2eStartMail = NO_MAIL_HERE;
-
- ip2trace (ITRC_NO_PORT, ITRC_INTR, 2, 1, inmail );
-
- if (inmail != NO_MAIL_HERE) {
- // If the board has gone fatal, nothing to do but hit a bit that will
- // alert foreground tasks to protest!
- if ( inmail & MB_FATAL_ERROR ) {
- pB->i2eFatal = 1;
- goto exit_i2ServiceBoard;
- }
-
- /* Assuming no fatal condition, we proceed to do work */
- if ( inmail & MB_IN_STUFFED ) {
- pB->i2eFifoInInts++;
- i2StripFifo(pB); /* There might be incoming packets */
- }
-
- if (inmail & MB_OUT_STRIPPED) {
- pB->i2eFifoOutInts++;
- write_lock_irqsave(&pB->write_fifo_spinlock, flags);
- pB->i2eFifoRemains = pB->i2eFifoSize;
- pB->i2eWaitingForEmptyFifo = 0;
- write_unlock_irqrestore(&pB->write_fifo_spinlock,
- flags);
-
- ip2trace (ITRC_NO_PORT, ITRC_INTR, 30, 1, pB->i2eFifoRemains );
-
- }
- serviceOutgoingFifo(pB);
- }
-
- ip2trace (ITRC_NO_PORT, ITRC_INTR, 8, 0 );
-
-exit_i2ServiceBoard:
-
- return 0;
-}
diff --git a/drivers/staging/tty/ip2/i2lib.h b/drivers/staging/tty/ip2/i2lib.h
deleted file mode 100644
index e559e9bac06..00000000000
--- a/drivers/staging/tty/ip2/i2lib.h
+++ /dev/null
@@ -1,351 +0,0 @@
-/*******************************************************************************
-*
-* (c) 1998 by Computone Corporation
-*
-********************************************************************************
-*
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort II family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: Header file for high level library functions
-*
-*******************************************************************************/
-#ifndef I2LIB_H
-#define I2LIB_H 1
-//------------------------------------------------------------------------------
-// I2LIB.H
-//
-// IntelliPort-II and IntelliPort-IIEX
-//
-// Defines, structure definitions, and external declarations for i2lib.c
-//------------------------------------------------------------------------------
-//--------------------------------------
-// Mandatory Includes:
-//--------------------------------------
-#include "ip2types.h"
-#include "i2ellis.h"
-#include "i2pack.h"
-#include "i2cmd.h"
-#include <linux/workqueue.h>
-
-//------------------------------------------------------------------------------
-// i2ChanStr -- Channel Structure:
-// Used to track per-channel information for the library routines using standard
-// loadware. Note also, a pointer to an array of these structures is patched
-// into the i2eBordStr (see i2ellis.h)
-//------------------------------------------------------------------------------
-//
-// If we make some limits on the maximum block sizes, we can avoid dealing with
-// buffer wrap. The wrapping of the buffer is based on where the start of the
-// packet is. Then there is always room for the packet contiguously.
-//
-// Maximum total length of an outgoing data or in-line command block. The limit
-// of 36 on data is quite arbitrary and based more on DOS memory limitations
-// than the board interface. However, for commands, the maximum packet length is
-// MAX_CMD_PACK_SIZE, because the field size for the count is only a few bits
-// (see I2PACK.H) in such packets. For data packets, the count field size is not
-// the limiting factor. As of this writing, MAX_OBUF_BLOCK < MAX_CMD_PACK_SIZE,
-// but be careful if wanting to modify either.
-//
-#define MAX_OBUF_BLOCK 36
-
-// Another note on maximum block sizes: we are buffering packets here. Data is
-// put into the buffer (if there is room) regardless of the credits from the
-// board. The board sends new credits whenever it has removed from his buffers a
-// number of characters equal to 80% of total buffer size. (Of course, the total
-// buffer size is what is reported when the very first set of flow control
-// status packets are received from the board. Therefore, to be robust, you must
-// always fill the board to at least 80% of the current credit limit, else you
-// might not give it enough to trigger a new report. These conditions are
-// obtained here so long as the maximum output block size is less than 20% the
-// size of the board's output buffers. This is true at present by "coincidence"
-// or "infernal knowledge": the board's output buffers are at least 700 bytes
-// long (20% = 140 bytes, at least). The 80% figure is "official", so the safest
-// strategy might be to trap the first flow control report and guarantee that
-// the effective maxObufBlock is the minimum of MAX_OBUF_BLOCK and 20% of first
-// reported buffer credit.
-//
-#define MAX_CBUF_BLOCK 6 // Maximum total length of a bypass command block
-
-#define IBUF_SIZE 512 // character capacity of input buffer per channel
-#define OBUF_SIZE 1024// character capacity of output buffer per channel
-#define CBUF_SIZE 10 // character capacity of output bypass buffer
-
-typedef struct _i2ChanStr
-{
- // First, back-pointers so that given a pointer to this structure, you can
- // determine the correct board and channel number to reference, (say, when
- // issuing commands, etc. (Note, channel number is in infl.hd.i2sChannel.)
-
- int port_index; // Index of port in channel structure array attached
- // to board structure.
- PTTY pTTY; // Pointer to tty structure for port (OS specific)
- USHORT validity; // Indicates whether the given channel has been
- // initialized, really exists (or is a missing
- // channel, e.g. channel 9 on an 8-port box.)
-
- i2eBordStrPtr pMyBord; // Back-pointer to this channel's board structure
-
- int wopen; // waiting fer carrier
-
- int throttled; // Set if upper layer can take no data
-
- int flags; // Defined in tty.h
-
- PWAITQ open_wait; // Pointer for OS sleep function.
- PWAITQ close_wait; // Pointer for OS sleep function.
- PWAITQ delta_msr_wait;// Pointer for OS sleep function.
- PWAITQ dss_now_wait; // Pointer for OS sleep function.
-
- struct timer_list BookmarkTimer; // Used by i2DrainOutput
- wait_queue_head_t pBookmarkWait; // Used by i2DrainOutput
-
- int BaudBase;
- int BaudDivisor;
-
- USHORT ClosingDelay;
- USHORT ClosingWaitTime;
-
- volatile
- flowIn infl; // This structure is initialized as a completely
- // formed flow-control command packet, and as such
- // has the channel number, also the capacity and
- // "as-of" data needed continuously.
-
- USHORT sinceLastFlow; // Counts the number of characters read from input
- // buffers, since the last time flow control info
- // was sent.
-
- USHORT whenSendFlow; // Determines when new flow control is to be sent to
- // the board. Note unlike earlier manifestations of
- // the driver, these packets can be sent from
- // in-place.
-
- USHORT channelNeeds; // Bit map of important things which must be done
- // for this channel. (See bits below )
-
- volatile
- flowStat outfl; // Same type of structure is used to hold current
- // flow control information used to control our
- // output. "asof" is kept updated as data is sent,
- // and "room" never goes to zero.
-
- // The incoming ring buffer
- // Unlike the outgoing buffers, this holds raw data, not packets. The two
- // extra bytes are used to hold the byte-padding when there is room for an
- // odd number of bytes before we must wrap.
- //
- UCHAR Ibuf[IBUF_SIZE + 2];
- volatile
- USHORT Ibuf_stuff; // Stuffing index
- volatile
- USHORT Ibuf_strip; // Stripping index
-
- // The outgoing ring-buffer: Holds Data and command packets. N.B., even
- // though these are in the channel structure, the channel is also written
- // here, the easier to send it to the fifo when ready. HOWEVER, individual
- // packets here are NOT padded to even length: the routines for writing
- // blocks to the fifo will pad to even byte counts.
- //
- UCHAR Obuf[OBUF_SIZE+MAX_OBUF_BLOCK+4];
- volatile
- USHORT Obuf_stuff; // Stuffing index
- volatile
- USHORT Obuf_strip; // Stripping index
- int Obuf_char_count;
-
- // The outgoing bypass-command buffer. Unlike earlier manifestations, the
- // flow control packets are sent directly from the structures. As above, the
- // channel number is included in the packet, but they are NOT padded to even
- // size.
- //
- UCHAR Cbuf[CBUF_SIZE+MAX_CBUF_BLOCK+2];
- volatile
- USHORT Cbuf_stuff; // Stuffing index
- volatile
- USHORT Cbuf_strip; // Stripping index
-
- // The temporary buffer for the Linux tty driver PutChar entry.
- //
- UCHAR Pbuf[MAX_OBUF_BLOCK - sizeof (i2DataHeader)];
- volatile
- USHORT Pbuf_stuff; // Stuffing index
-
- // The state of incoming data-set signals
- //
- USHORT dataSetIn; // Bit-mapped according to below. Also indicates
- // whether a break has been detected since last
- // inquiry.
-
- // The state of outcoming data-set signals (as far as we can tell!)
- //
- USHORT dataSetOut; // Bit-mapped according to below.
-
- // Most recent hot-key identifier detected
- //
- USHORT hotKeyIn; // Hot key as sent by the board, HOT_CLEAR indicates
- // no hot key detected since last examined.
-
- // Counter of outstanding requests for bookmarks
- //
- short bookMarks; // Number of outstanding bookmark requests, (+ive
- // whenever a bookmark request if queued up, -ive
- // whenever a bookmark is received).
-
- // Misc options
- //
- USHORT channelOptions; // See below
-
- // To store various incoming special packets
- //
- debugStat channelStatus;
- cntStat channelRcount;
- cntStat channelTcount;
- failStat channelFail;
-
- // To store the last values for line characteristics we sent to the board.
- //
- int speed;
-
- int flush_flags;
-
- void (*trace)(unsigned short,unsigned char,unsigned char,unsigned long,...);
-
- /*
- * Kernel counters for the 4 input interrupts
- */
- struct async_icount icount;
-
- /*
- * Task queues for processing input packets from the board.
- */
- struct work_struct tqueue_input;
- struct work_struct tqueue_status;
- struct work_struct tqueue_hangup;
-
- rwlock_t Ibuf_spinlock;
- rwlock_t Obuf_spinlock;
- rwlock_t Cbuf_spinlock;
- rwlock_t Pbuf_spinlock;
-
-} i2ChanStr, *i2ChanStrPtr;
-
-//---------------------------------------------------
-// Manifests and bit-maps for elements in i2ChanStr
-//---------------------------------------------------
-//
-// flush flags
-//
-#define STARTFL_FLAG 1
-#define STOPFL_FLAG 2
-
-// validity
-//
-#define CHANNEL_MAGIC_BITS 0xff00
-#define CHANNEL_MAGIC 0x5300 // (validity & CHANNEL_MAGIC_BITS) ==
- // CHANNEL_MAGIC --> structure good
-
-#define CHANNEL_SUPPORT 0x0001 // Indicates channel is supported, exists,
- // and passed P.O.S.T.
-
-// channelNeeds
-//
-#define NEED_FLOW 1 // Indicates flow control has been queued
-#define NEED_INLINE 2 // Indicates inline commands or data queued
-#define NEED_BYPASS 4 // Indicates bypass commands queued
-#define NEED_CREDIT 8 // Indicates would be sending except has not sufficient
- // credit. The data is still in the channel structure,
- // but the channel is not enqueued in the board
- // structure again until there is a credit received from
- // the board.
-
-// dataSetIn (Also the bits for i2GetStatus return value)
-//
-#define I2_DCD 1
-#define I2_CTS 2
-#define I2_DSR 4
-#define I2_RI 8
-
-// dataSetOut (Also the bits for i2GetStatus return value)
-//
-#define I2_DTR 1
-#define I2_RTS 2
-
-// i2GetStatus() can optionally clear these bits
-//
-#define I2_BRK 0x10 // A break was detected
-#define I2_PAR 0x20 // A parity error was received
-#define I2_FRA 0x40 // A framing error was received
-#define I2_OVR 0x80 // An overrun error was received
-
-// i2GetStatus() automatically clears these bits */
-//
-#define I2_DDCD 0x100 // DCD changed from its former value
-#define I2_DCTS 0x200 // CTS changed from its former value
-#define I2_DDSR 0x400 // DSR changed from its former value
-#define I2_DRI 0x800 // RI changed from its former value
-
-// hotKeyIn
-//
-#define HOT_CLEAR 0x1322 // Indicates that no hot-key has been detected
-
-// channelOptions
-//
-#define CO_NBLOCK_WRITE 1 // Writes don't block waiting for buffer. (Default
- // is, they do wait.)
-
-// fcmodes
-//
-#define I2_OUTFLOW_CTS 0x0001
-#define I2_INFLOW_RTS 0x0002
-#define I2_INFLOW_DSR 0x0004
-#define I2_INFLOW_DTR 0x0008
-#define I2_OUTFLOW_DSR 0x0010
-#define I2_OUTFLOW_DTR 0x0020
-#define I2_OUTFLOW_XON 0x0040
-#define I2_OUTFLOW_XANY 0x0080
-#define I2_INFLOW_XON 0x0100
-
-#define I2_CRTSCTS (I2_OUTFLOW_CTS|I2_INFLOW_RTS)
-#define I2_IXANY_MODE (I2_OUTFLOW_XON|I2_OUTFLOW_XANY)
-
-//-------------------------------------------
-// Macros used from user level like functions
-//-------------------------------------------
-
-// Macros to set and clear channel options
-//
-#define i2SetOption(pCh, option) pCh->channelOptions |= option
-#define i2ClrOption(pCh, option) pCh->channelOptions &= ~option
-
-// Macro to set fatal-error trap
-//
-#define i2SetFatalTrap(pB, routine) pB->i2eFatalTrap = routine
-
-//--------------------------------------------
-// Declarations and prototypes for i2lib.c
-//--------------------------------------------
-//
-static int i2InitChannels(i2eBordStrPtr, int, i2ChanStrPtr);
-static int i2QueueCommands(int, i2ChanStrPtr, int, int, cmdSyntaxPtr,...);
-static int i2GetStatus(i2ChanStrPtr, int);
-static int i2Input(i2ChanStrPtr);
-static int i2InputFlush(i2ChanStrPtr);
-static int i2Output(i2ChanStrPtr, const char *, int);
-static int i2OutputFree(i2ChanStrPtr);
-static int i2ServiceBoard(i2eBordStrPtr);
-static void i2DrainOutput(i2ChanStrPtr, int);
-
-#ifdef IP2DEBUG_TRACE
-void ip2trace(unsigned short,unsigned char,unsigned char,unsigned long,...);
-#else
-#define ip2trace(a,b,c,d...) do {} while (0)
-#endif
-
-// Argument to i2QueueCommands
-//
-#define C_IN_LINE 1
-#define C_BYPASS 0
-
-#endif // I2LIB_H
diff --git a/drivers/staging/tty/ip2/i2pack.h b/drivers/staging/tty/ip2/i2pack.h
deleted file mode 100644
index 00342a677c9..00000000000
--- a/drivers/staging/tty/ip2/i2pack.h
+++ /dev/null
@@ -1,364 +0,0 @@
-/*******************************************************************************
-*
-* (c) 1998 by Computone Corporation
-*
-********************************************************************************
-*
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort II family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: Definitions of the packets used to transfer data and commands
-* Host <--> Board. Information provided here is only applicable
-* when the standard loadware is active.
-*
-*******************************************************************************/
-#ifndef I2PACK_H
-#define I2PACK_H 1
-
-//-----------------------------------------------
-// Revision History:
-//
-// 10 October 1991 MAG First draft
-// 24 February 1992 MAG Additions for 1.4.x loadware
-// 11 March 1992 MAG New status packets
-//
-//-----------------------------------------------
-
-//------------------------------------------------------------------------------
-// Packet Formats:
-//
-// Information passes between the host and board through the FIFO in packets.
-// These have headers which indicate the type of packet. Because the fifo data
-// path may be 16-bits wide, the protocol is constrained such that each packet
-// is always padded to an even byte count. (The lower-level interface routines
-// -- i2ellis.c -- are designed to do this).
-//
-// The sender (be it host or board) must place some number of complete packets
-// in the fifo, then place a message in the mailbox that packets are available.
-// Placing such a message interrupts the "receiver" (be it board or host), who
-// reads the mailbox message and determines that there are incoming packets
-// ready. Since there are no partial packets, and the length of a packet is
-// given in the header, the remainder of the packet can be read without checking
-// for FIFO empty condition. The process is repeated, packet by packet, until
-// the incoming FIFO is empty. Then the receiver uses the outbound mailbox to
-// signal the board that it has read the data. Only then can the sender place
-// additional data in the fifo.
-//------------------------------------------------------------------------------
-//
-//------------------------------------------------
-// Definition of Packet Header Area
-//------------------------------------------------
-//
-// Caution: these only define header areas. In actual use the data runs off
-// beyond the end of these structures.
-//
-// Since these structures are based on sequences of bytes which go to the board,
-// there cannot be ANY padding between the elements.
-#pragma pack(1)
-
-//----------------------------
-// DATA PACKETS
-//----------------------------
-
-typedef struct _i2DataHeader
-{
- unsigned char i2sChannel; /* The channel number: 0-255 */
-
- // -- Bitfields are allocated LSB first --
-
- // For incoming data, indicates whether this is an ordinary packet or a
- // special one (e.g., hot key hit).
- unsigned i2sId : 2 __attribute__ ((__packed__));
-
- // For tagging data packets. There are flush commands which flush only data
- // packets bearing a particular tag. (used in implementing IntelliView and
- // IntelliPrint). THE TAG VALUE 0xf is RESERVED and must not be used (it has
- // meaning internally to the loadware).
- unsigned i2sTag : 4;
-
- // These two bits determine the type of packet sent/received.
- unsigned i2sType : 2;
-
- // The count of data to follow: does not include the possible additional
- // padding byte. MAXIMUM COUNT: 4094. The top four bits must be 0.
- unsigned short i2sCount;
-
-} i2DataHeader, *i2DataHeaderPtr;
-
-// Structure is immediately followed by the data, proper.
-
-//----------------------------
-// NON-DATA PACKETS
-//----------------------------
-
-typedef struct _i2CmdHeader
-{
- unsigned char i2sChannel; // The channel number: 0-255 (Except where noted
- // - see below
-
- // Number of bytes of commands, status or whatever to follow
- unsigned i2sCount : 6;
-
- // These two bits determine the type of packet sent/received.
- unsigned i2sType : 2;
-
-} i2CmdHeader, *i2CmdHeaderPtr;
-
-// Structure is immediately followed by the applicable data.
-
-//---------------------------------------
-// Flow Control Packets (Outbound)
-//---------------------------------------
-
-// One type of outbound command packet is so important that the entire structure
-// is explicitly defined here. That is the flow-control packet. This is never
-// sent by user-level code (as would be the commands to raise/lower DTR, for
-// example). These are only sent by the library routines in response to reading
-// incoming data into the buffers.
-//
-// The parameters inside the command block are maintained in place, then the
-// block is sent at the appropriate time.
-
-typedef struct _flowIn
-{
- i2CmdHeader hd; // Channel #, count, type (see above)
- unsigned char fcmd; // The flow control command (37)
- unsigned short asof; // As of byte number "asof" (LSB first!) I have room
- // for "room" bytes
- unsigned short room;
-} flowIn, *flowInPtr;
-
-//----------------------------------------
-// (Incoming) Status Packets
-//----------------------------------------
-
-// Incoming packets which are non-data packets are status packets. In this case,
-// the channel number in the header is unimportant. What follows are one or more
-// sub-packets, the first word of which consists of the channel (first or low
-// byte) and the status indicator (second or high byte), followed by possibly
-// more data.
-
-#define STAT_CTS_UP 0 /* CTS raised (no other bytes) */
-#define STAT_CTS_DN 1 /* CTS dropped (no other bytes) */
-#define STAT_DCD_UP 2 /* DCD raised (no other bytes) */
-#define STAT_DCD_DN 3 /* DCD dropped (no other bytes) */
-#define STAT_DSR_UP 4 /* DSR raised (no other bytes) */
-#define STAT_DSR_DN 5 /* DSR dropped (no other bytes) */
-#define STAT_RI_UP 6 /* RI raised (no other bytes) */
-#define STAT_RI_DN 7 /* RI dropped (no other bytes) */
-#define STAT_BRK_DET 8 /* BRK detect (no other bytes) */
-#define STAT_FLOW 9 /* Flow control(-- more: see below */
-#define STAT_BMARK 10 /* Bookmark (no other bytes)
- * Bookmark is sent as a response to
- * a command 60: request for bookmark
- */
-#define STAT_STATUS 11 /* Special packet: see below */
-#define STAT_TXCNT 12 /* Special packet: see below */
-#define STAT_RXCNT 13 /* Special packet: see below */
-#define STAT_BOXIDS 14 /* Special packet: see below */
-#define STAT_HWFAIL 15 /* Special packet: see below */
-
-#define STAT_MOD_ERROR 0xc0
-#define STAT_MODEM 0xc0/* If status & STAT_MOD_ERROR:
- * == STAT_MODEM, then this is a modem
- * status packet, given in response to a
- * CMD_DSS_NOW command.
- * The low nibble has each data signal:
- */
-#define STAT_MOD_DCD 0x8
-#define STAT_MOD_RI 0x4
-#define STAT_MOD_DSR 0x2
-#define STAT_MOD_CTS 0x1
-
-#define STAT_ERROR 0x80/* If status & STAT_MOD_ERROR
- * == STAT_ERROR, then
- * sort of error on the channel.
- * The remaining seven bits indicate
- * what sort of error it is.
- */
-/* The low three bits indicate parity, framing, or overrun errors */
-
-#define STAT_E_PARITY 4 /* Parity error */
-#define STAT_E_FRAMING 2 /* Framing error */
-#define STAT_E_OVERRUN 1 /* (uxart) overrun error */
-
-//---------------------------------------
-// STAT_FLOW packets
-//---------------------------------------
-
-typedef struct _flowStat
-{
- unsigned short asof;
- unsigned short room;
-}flowStat, *flowStatPtr;
-
-// flowStat packets are received from the board to regulate the flow of outgoing
-// data. A local copy of this structure is also kept to track the amount of
-// credits used and credits remaining. "room" is the amount of space in the
-// board's buffers, "as of" having received a certain byte number. When sending
-// data to the fifo, you must calculate how much buffer space your packet will
-// use. Add this to the current "asof" and subtract it from the current "room".
-//
-// The calculation for the board's buffer is given by CREDIT_USAGE, where size
-// is the un-rounded count of either data characters or command characters.
-// (Which is to say, the count rounded up, plus two).
-
-#define CREDIT_USAGE(size) (((size) + 3) & ~1)
-
-//---------------------------------------
-// STAT_STATUS packets
-//---------------------------------------
-
-typedef struct _debugStat
-{
- unsigned char d_ccsr;
- unsigned char d_txinh;
- unsigned char d_stat1;
- unsigned char d_stat2;
-} debugStat, *debugStatPtr;
-
-// debugStat packets are sent to the host in response to a CMD_GET_STATUS
-// command. Each byte is bit-mapped as described below:
-
-#define D_CCSR_XON 2 /* Has received XON, ready to transmit */
-#define D_CCSR_XOFF 4 /* Has received XOFF, not transmitting */
-#define D_CCSR_TXENAB 8 /* Transmitter is enabled */
-#define D_CCSR_RXENAB 0x80 /* Receiver is enabled */
-
-#define D_TXINH_BREAK 1 /* We are sending a break */
-#define D_TXINH_EMPTY 2 /* No data to send */
-#define D_TXINH_SUSP 4 /* Output suspended via command 57 */
-#define D_TXINH_CMD 8 /* We are processing an in-line command */
-#define D_TXINH_LCD 0x10 /* LCD diagnostics are running */
-#define D_TXINH_PAUSE 0x20 /* We are processing a PAUSE command */
-#define D_TXINH_DCD 0x40 /* DCD is low, preventing transmission */
-#define D_TXINH_DSR 0x80 /* DSR is low, preventing transmission */
-
-#define D_STAT1_TXEN 1 /* Transmit INTERRUPTS enabled */
-#define D_STAT1_RXEN 2 /* Receiver INTERRUPTS enabled */
-#define D_STAT1_MDEN 4 /* Modem (data set sigs) interrupts enabled */
-#define D_STAT1_RLM 8 /* Remote loopback mode selected */
-#define D_STAT1_LLM 0x10 /* Local internal loopback mode selected */
-#define D_STAT1_CTS 0x20 /* CTS is low, preventing transmission */
-#define D_STAT1_DTR 0x40 /* DTR is low, to stop remote transmission */
-#define D_STAT1_RTS 0x80 /* RTS is low, to stop remote transmission */
-
-#define D_STAT2_TXMT 1 /* Transmit buffers are all empty */
-#define D_STAT2_RXMT 2 /* Receive buffers are all empty */
-#define D_STAT2_RXINH 4 /* Loadware has tried to inhibit remote
- * transmission: dropped DTR, sent XOFF,
- * whatever...
- */
-#define D_STAT2_RXFLO 8 /* Loadware can send no more data to host
- * until it receives a flow-control packet
- */
-//-----------------------------------------
-// STAT_TXCNT and STAT_RXCNT packets
-//----------------------------------------
-
-typedef struct _cntStat
-{
- unsigned short cs_time; // (Assumes host is little-endian!)
- unsigned short cs_count;
-} cntStat, *cntStatPtr;
-
-// These packets are sent in response to a CMD_GET_RXCNT or a CMD_GET_TXCNT
-// bypass command. cs_time is a running 1 Millisecond counter which acts as a
-// time stamp. cs_count is a running counter of data sent or received from the
-// uxarts. (Not including data added by the chip itself, as with CRLF
-// processing).
-//------------------------------------------
-// STAT_HWFAIL packets
-//------------------------------------------
-
-typedef struct _failStat
-{
- unsigned char fs_written;
- unsigned char fs_read;
- unsigned short fs_address;
-} failStat, *failStatPtr;
-
-// This packet is sent whenever the on-board diagnostic process detects an
-// error. At startup, this process is dormant. The host can wake it up by
-// issuing the bypass command CMD_HW_TEST. The process runs at low priority and
-// performs continuous hardware verification; writing data to certain on-board
-// registers, reading it back, and comparing. If it detects an error, this
-// packet is sent to the host, and the process goes dormant again until the host
-// sends another CMD_HW_TEST. It then continues with the next register to be
-// tested.
-
-//------------------------------------------------------------------------------
-// Macros to deal with the headers more easily! Note that these are defined so
-// they may be used as "left" as well as "right" expressions.
-//------------------------------------------------------------------------------
-
-// Given a pointer to the packet, reference the channel number
-//
-#define CHANNEL_OF(pP) ((i2DataHeaderPtr)(pP))->i2sChannel
-
-// Given a pointer to the packet, reference the Packet type
-//
-#define PTYPE_OF(pP) ((i2DataHeaderPtr)(pP))->i2sType
-
-// The possible types of packets
-//
-#define PTYPE_DATA 0 /* Host <--> Board */
-#define PTYPE_BYPASS 1 /* Host ---> Board */
-#define PTYPE_INLINE 2 /* Host ---> Board */
-#define PTYPE_STATUS 2 /* Host <--- Board */
-
-// Given a pointer to a Data packet, reference the Tag
-//
-#define TAG_OF(pP) ((i2DataHeaderPtr)(pP))->i2sTag
-
-// Given a pointer to a Data packet, reference the data i.d.
-//
-#define ID_OF(pP) ((i2DataHeaderPtr)(pP))->i2sId
-
-// The possible types of ID's
-//
-#define ID_ORDINARY_DATA 0
-#define ID_HOT_KEY 1
-
-// Given a pointer to a Data packet, reference the count
-//
-#define DATA_COUNT_OF(pP) ((i2DataHeaderPtr)(pP))->i2sCount
-
-// Given a pointer to a Data packet, reference the beginning of data
-//
-#define DATA_OF(pP) &((unsigned char *)(pP))[4] // 4 = size of header
-
-// Given a pointer to a Non-Data packet, reference the count
-//
-#define CMD_COUNT_OF(pP) ((i2CmdHeaderPtr)(pP))->i2sCount
-
-#define MAX_CMD_PACK_SIZE 62 // Maximum size of such a count
-
-// Given a pointer to a Non-Data packet, reference the beginning of data
-//
-#define CMD_OF(pP) &((unsigned char *)(pP))[2] // 2 = size of header
-
-//--------------------------------
-// MailBox Bits:
-//--------------------------------
-
-//--------------------------
-// Outgoing (host to board)
-//--------------------------
-//
-#define MB_OUT_STUFFED 0x80 // Host has placed output in fifo
-#define MB_IN_STRIPPED 0x40 // Host has read in all input from fifo
-
-//--------------------------
-// Incoming (board to host)
-//--------------------------
-//
-#define MB_IN_STUFFED 0x80 // Board has placed input in fifo
-#define MB_OUT_STRIPPED 0x40 // Board has read all output from fifo
-#define MB_FATAL_ERROR 0x20 // Board has encountered a fatal error
-
-#pragma pack() // Reset padding to command-line default
-
-#endif // I2PACK_H
-
diff --git a/drivers/staging/tty/ip2/ip2.h b/drivers/staging/tty/ip2/ip2.h
deleted file mode 100644
index 936ccc53394..00000000000
--- a/drivers/staging/tty/ip2/ip2.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*******************************************************************************
-*
-* (c) 1998 by Computone Corporation
-*
-********************************************************************************
-*
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort II family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: Driver constants for configuration and tuning
-*
-* NOTES:
-*
-*******************************************************************************/
-#ifndef IP2_H
-#define IP2_H
-
-#include "ip2types.h"
-#include "i2cmd.h"
-
-/*************/
-/* Constants */
-/*************/
-
-/* Device major numbers - since version 2.0.26. */
-#define IP2_TTY_MAJOR 71
-#define IP2_CALLOUT_MAJOR 72
-#define IP2_IPL_MAJOR 73
-
-/* Board configuration array.
- * This array defines the hardware irq and address for up to IP2_MAX_BOARDS
- * (4 supported per ip2_types.h) ISA board addresses and irqs MUST be specified,
- * PCI and EISA boards are probed for and automagicly configed
- * iff the addresses are set to 1 and 2 respectivily.
- * 0x0100 - 0x03f0 == ISA
- * 1 == PCI
- * 2 == EISA
- * 0 == (skip this board)
- * This array defines the hardware addresses for them. Special
- * addresses are EISA and PCI which go sniffing for boards.
-
- * In a multiboard system the position in the array determines which port
- * devices are assigned to each board:
- * board 0 is assigned ttyF0.. to ttyF63,
- * board 1 is assigned ttyF64 to ttyF127,
- * board 2 is assigned ttyF128 to ttyF191,
- * board 3 is assigned ttyF192 to ttyF255.
- *
- * In PCI and EISA bus systems each range is mapped to card in
- * monotonically increasing slot number order, ISA position is as specified
- * here.
-
- * If the irqs are ALL set to 0,0,0,0 all boards operate in
- * polled mode. For interrupt operation ISA boards require that the IRQ be
- * specified, while PCI and EISA boards any nonzero entry
- * will enable interrupts using the BIOS configured irq for the board.
- * An invalid irq entry will default to polled mode for that card and print
- * console warning.
-
- * When the driver is loaded as a module these setting can be overridden on the
- * modprobe command line or on an option line in /etc/modprobe.conf.
- * If the driver is built-in the configuration must be
- * set here for ISA cards and address set to 1 and 2 for PCI and EISA.
- *
- * Here is an example that shows most if not all possibe combinations:
-
- *static ip2config_t ip2config =
- *{
- * {11,1,0,0}, // irqs
- * { // Addresses
- * 0x0308, // Board 0, ttyF0 - ttyF63// ISA card at io=0x308, irq=11
- * 0x0001, // Board 1, ttyF64 - ttyF127//PCI card configured by BIOS
- * 0x0000, // Board 2, ttyF128 - ttyF191// Slot skipped
- * 0x0002 // Board 3, ttyF192 - ttyF255//EISA card configured by BIOS
- * // but polled not irq driven
- * }
- *};
- */
-
- /* this structure is zeroed out because the suggested method is to configure
- * the driver as a module, set up the parameters with an options line in
- * /etc/modprobe.conf and load with modprobe or kmod, the kernel
- * module loader
- */
-
- /* This structure is NOW always initialized when the driver is initialized.
- * Compiled in defaults MUST be added to the io and irq arrays in
- * ip2.c. Those values are configurable from insmod parameters in the
- * case of modules or from command line parameters (ip2=io,irq) when
- * compiled in.
- */
-
-static ip2config_t ip2config =
-{
- {0,0,0,0}, // irqs
- { // Addresses
- /* Do NOT set compile time defaults HERE! Use the arrays in
- ip2.c! These WILL be overwritten! =mhw= */
- 0x0000, // Board 0, ttyF0 - ttyF63
- 0x0000, // Board 1, ttyF64 - ttyF127
- 0x0000, // Board 2, ttyF128 - ttyF191
- 0x0000 // Board 3, ttyF192 - ttyF255
- }
-};
-
-#endif
diff --git a/drivers/staging/tty/ip2/ip2ioctl.h b/drivers/staging/tty/ip2/ip2ioctl.h
deleted file mode 100644
index aa0a9da85e0..00000000000
--- a/drivers/staging/tty/ip2/ip2ioctl.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*******************************************************************************
-*
-* (c) 1998 by Computone Corporation
-*
-********************************************************************************
-*
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort II family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: Driver constants for configuration and tuning
-*
-* NOTES:
-*
-*******************************************************************************/
-
-#ifndef IP2IOCTL_H
-#define IP2IOCTL_H
-
-//*************
-//* Constants *
-//*************
-
-// High baud rates (if not defined elsewhere.
-#ifndef B153600
-# define B153600 0010005
-#endif
-#ifndef B307200
-# define B307200 0010006
-#endif
-#ifndef B921600
-# define B921600 0010007
-#endif
-
-#endif
diff --git a/drivers/staging/tty/ip2/ip2main.c b/drivers/staging/tty/ip2/ip2main.c
deleted file mode 100644
index ba074fbb4ce..00000000000
--- a/drivers/staging/tty/ip2/ip2main.c
+++ /dev/null
@@ -1,3234 +0,0 @@
-/*
-*
-* (c) 1999 by Computone Corporation
-*
-********************************************************************************
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: Mainline code for the device driver
-*
-*******************************************************************************/
-// ToDo:
-//
-// Fix the immediate DSS_NOW problem.
-// Work over the channel stats return logic in ip2_ipl_ioctl so they
-// make sense for all 256 possible channels and so the user space
-// utilities will compile and work properly.
-//
-// Done:
-//
-// 1.2.14 /\/\|=mhw=|\/\/
-// Added bounds checking to ip2_ipl_ioctl to avoid potential terroristic acts.
-// Changed the definition of ip2trace to be more consistent with kernel style
-// Thanks to Andreas Dilger <adilger@turbolabs.com> for these updates
-//
-// 1.2.13 /\/\|=mhw=|\/\/
-// DEVFS: Renamed ttf/{n} to tts/F{n} and cuf/{n} to cua/F{n} to conform
-// to agreed devfs serial device naming convention.
-//
-// 1.2.12 /\/\|=mhw=|\/\/
-// Cleaned up some remove queue cut and paste errors
-//
-// 1.2.11 /\/\|=mhw=|\/\/
-// Clean up potential NULL pointer dereferences
-// Clean up devfs registration
-// Add kernel command line parsing for io and irq
-// Compile defaults for io and irq are now set in ip2.c not ip2.h!
-// Reworked poll_only hack for explicit parameter setting
-// You must now EXPLICITLY set poll_only = 1 or set all irqs to 0
-// Merged ip2_loadmain and old_ip2_init
-// Converted all instances of interruptible_sleep_on into queue calls
-// Most of these had no race conditions but better to clean up now
-//
-// 1.2.10 /\/\|=mhw=|\/\/
-// Fixed the bottom half interrupt handler and enabled USE_IQI
-// to split the interrupt handler into a formal top-half / bottom-half
-// Fixed timing window on high speed processors that queued messages to
-// the outbound mail fifo faster than the board could handle.
-//
-// 1.2.9
-// Four box EX was barfing on >128k kmalloc, made structure smaller by
-// reducing output buffer size
-//
-// 1.2.8
-// Device file system support (MHW)
-//
-// 1.2.7
-// Fixed
-// Reload of ip2 without unloading ip2main hangs system on cat of /proc/modules
-//
-// 1.2.6
-//Fixes DCD problems
-// DCD was not reported when CLOCAL was set on call to TIOCMGET
-//
-//Enhancements:
-// TIOCMGET requests and waits for status return
-// No DSS interrupts enabled except for DCD when needed
-//
-// For internal use only
-//
-//#define IP2DEBUG_INIT
-//#define IP2DEBUG_OPEN
-//#define IP2DEBUG_WRITE
-//#define IP2DEBUG_READ
-//#define IP2DEBUG_IOCTL
-//#define IP2DEBUG_IPL
-
-//#define IP2DEBUG_TRACE
-//#define DEBUG_FIFO
-
-/************/
-/* Includes */
-/************/
-
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/major.h>
-#include <linux/wait.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/firmware.h>
-#include <linux/platform_device.h>
-
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/termios.h>
-#include <linux/tty_driver.h>
-#include <linux/serial.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-
-#include <linux/cdk.h>
-#include <linux/comstats.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#include <linux/vmalloc.h>
-#include <linux/init.h>
-
-#include <asm/uaccess.h>
-
-#include "ip2types.h"
-#include "ip2trace.h"
-#include "ip2ioctl.h"
-#include "ip2.h"
-#include "i2ellis.h"
-#include "i2lib.h"
-
-/*****************
- * /proc/ip2mem *
- *****************/
-
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
-static DEFINE_MUTEX(ip2_mutex);
-static const struct file_operations ip2mem_proc_fops;
-static const struct file_operations ip2_proc_fops;
-
-/********************/
-/* Type Definitions */
-/********************/
-
-/*************/
-/* Constants */
-/*************/
-
-/* String constants to identify ourselves */
-static const char pcName[] = "Computone IntelliPort Plus multiport driver";
-static const char pcVersion[] = "1.2.14";
-
-/* String constants for port names */
-static const char pcDriver_name[] = "ip2";
-static const char pcIpl[] = "ip2ipl";
-
-/***********************/
-/* Function Prototypes */
-/***********************/
-
-/* Global module entry functions */
-
-/* Private (static) functions */
-static int ip2_open(PTTY, struct file *);
-static void ip2_close(PTTY, struct file *);
-static int ip2_write(PTTY, const unsigned char *, int);
-static int ip2_putchar(PTTY, unsigned char);
-static void ip2_flush_chars(PTTY);
-static int ip2_write_room(PTTY);
-static int ip2_chars_in_buf(PTTY);
-static void ip2_flush_buffer(PTTY);
-static int ip2_ioctl(PTTY, UINT, ULONG);
-static void ip2_set_termios(PTTY, struct ktermios *);
-static void ip2_set_line_discipline(PTTY);
-static void ip2_throttle(PTTY);
-static void ip2_unthrottle(PTTY);
-static void ip2_stop(PTTY);
-static void ip2_start(PTTY);
-static void ip2_hangup(PTTY);
-static int ip2_tiocmget(struct tty_struct *tty);
-static int ip2_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear);
-static int ip2_get_icount(struct tty_struct *tty,
- struct serial_icounter_struct *icount);
-
-static void set_irq(int, int);
-static void ip2_interrupt_bh(struct work_struct *work);
-static irqreturn_t ip2_interrupt(int irq, void *dev_id);
-static void ip2_poll(unsigned long arg);
-static inline void service_all_boards(void);
-static void do_input(struct work_struct *);
-static void do_status(struct work_struct *);
-
-static void ip2_wait_until_sent(PTTY,int);
-
-static void set_params (i2ChanStrPtr, struct ktermios *);
-static int get_serial_info(i2ChanStrPtr, struct serial_struct __user *);
-static int set_serial_info(i2ChanStrPtr, struct serial_struct __user *);
-
-static ssize_t ip2_ipl_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t ip2_ipl_write(struct file *, const char __user *, size_t, loff_t *);
-static long ip2_ipl_ioctl(struct file *, UINT, ULONG);
-static int ip2_ipl_open(struct inode *, struct file *);
-
-static int DumpTraceBuffer(char __user *, int);
-static int DumpFifoBuffer( char __user *, int);
-
-static void ip2_init_board(int, const struct firmware *);
-static unsigned short find_eisa_board(int);
-static int ip2_setup(char *str);
-
-/***************/
-/* Static Data */
-/***************/
-
-static struct tty_driver *ip2_tty_driver;
-
-/* Here, then is a table of board pointers which the interrupt routine should
- * scan through to determine who it must service.
- */
-static unsigned short i2nBoards; // Number of boards here
-
-static i2eBordStrPtr i2BoardPtrTable[IP2_MAX_BOARDS];
-
-static i2ChanStrPtr DevTable[IP2_MAX_PORTS];
-//DevTableMem just used to save addresses for kfree
-static void *DevTableMem[IP2_MAX_BOARDS];
-
-/* This is the driver descriptor for the ip2ipl device, which is used to
- * download the loadware to the boards.
- */
-static const struct file_operations ip2_ipl = {
- .owner = THIS_MODULE,
- .read = ip2_ipl_read,
- .write = ip2_ipl_write,
- .unlocked_ioctl = ip2_ipl_ioctl,
- .open = ip2_ipl_open,
- .llseek = noop_llseek,
-};
-
-static unsigned long irq_counter;
-static unsigned long bh_counter;
-
-// Use immediate queue to service interrupts
-#define USE_IQI
-//#define USE_IQ // PCI&2.2 needs work
-
-/* The timer_list entry for our poll routine. If interrupt operation is not
- * selected, the board is serviced periodically to see if anything needs doing.
- */
-#define POLL_TIMEOUT (jiffies + 1)
-static DEFINE_TIMER(PollTimer, ip2_poll, 0, 0);
-
-#ifdef IP2DEBUG_TRACE
-/* Trace (debug) buffer data */
-#define TRACEMAX 1000
-static unsigned long tracebuf[TRACEMAX];
-static int tracestuff;
-static int tracestrip;
-static int tracewrap;
-#endif
-
-/**********/
-/* Macros */
-/**********/
-
-#ifdef IP2DEBUG_OPEN
-#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] ttyc=%d, modc=%x -> %s\n", \
- tty->name,(pCh->flags), \
- tty->count,/*GET_USE_COUNT(module)*/0,s)
-#else
-#define DBG_CNT(s)
-#endif
-
-/********/
-/* Code */
-/********/
-
-#include "i2ellis.c" /* Extremely low-level interface services */
-#include "i2cmd.c" /* Standard loadware command definitions */
-#include "i2lib.c" /* High level interface services */
-
-/* Configuration area for modprobe */
-
-MODULE_AUTHOR("Doug McNash");
-MODULE_DESCRIPTION("Computone IntelliPort Plus Driver");
-MODULE_LICENSE("GPL");
-
-#define MAX_CMD_STR 50
-
-static int poll_only;
-static char cmd[MAX_CMD_STR];
-
-static int Eisa_irq;
-static int Eisa_slot;
-
-static int iindx;
-static char rirqs[IP2_MAX_BOARDS];
-static int Valid_Irqs[] = { 3, 4, 5, 7, 10, 11, 12, 15, 0};
-
-/* Note: Add compiled in defaults to these arrays, not to the structure
- in ip2.h any longer. That structure WILL get overridden
- by these values, or command line values, or insmod values!!! =mhw=
-*/
-static int io[IP2_MAX_BOARDS];
-static int irq[IP2_MAX_BOARDS] = { -1, -1, -1, -1 };
-
-MODULE_AUTHOR("Doug McNash");
-MODULE_DESCRIPTION("Computone IntelliPort Plus Driver");
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(irq, "Interrupts for IntelliPort Cards");
-module_param_array(io, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O ports for IntelliPort Cards");
-module_param(poll_only, bool, 0);
-MODULE_PARM_DESC(poll_only, "Do not use card interrupts");
-module_param_string(ip2, cmd, MAX_CMD_STR, 0);
-MODULE_PARM_DESC(ip2, "Contains module parameter passed with 'ip2='");
-
-/* for sysfs class support */
-static struct class *ip2_class;
-
-/* Some functions to keep track of what irqs we have */
-
-static int __init is_valid_irq(int irq)
-{
- int *i = Valid_Irqs;
-
- while (*i != 0 && *i != irq)
- i++;
-
- return *i;
-}
-
-static void __init mark_requested_irq(char irq)
-{
- rirqs[iindx++] = irq;
-}
-
-static int __exit clear_requested_irq(char irq)
-{
- int i;
- for (i = 0; i < IP2_MAX_BOARDS; ++i) {
- if (rirqs[i] == irq) {
- rirqs[i] = 0;
- return 1;
- }
- }
- return 0;
-}
-
-static int have_requested_irq(char irq)
-{
- /* array init to zeros so 0 irq will not be requested as a side
- * effect */
- int i;
- for (i = 0; i < IP2_MAX_BOARDS; ++i)
- if (rirqs[i] == irq)
- return 1;
- return 0;
-}
-
-/******************************************************************************/
-/* Function: cleanup_module() */
-/* Parameters: None */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* This is a required entry point for an installable module. It has to return */
-/* the device and the driver to a passive state. It should not be necessary */
-/* to reset the board fully, especially as the loadware is downloaded */
-/* externally rather than in the driver. We just want to disable the board */
-/* and clear the loadware to a reset state. To allow this there has to be a */
-/* way to detect whether the board has the loadware running at init time to */
-/* handle subsequent installations of the driver. All memory allocated by the */
-/* driver should be returned since it may be unloaded from memory. */
-/******************************************************************************/
-static void __exit ip2_cleanup_module(void)
-{
- int err;
- int i;
-
- del_timer_sync(&PollTimer);
-
- /* Reset the boards we have. */
- for (i = 0; i < IP2_MAX_BOARDS; i++)
- if (i2BoardPtrTable[i])
- iiReset(i2BoardPtrTable[i]);
-
- /* The following is done at most once, if any boards were installed. */
- for (i = 0; i < IP2_MAX_BOARDS; i++) {
- if (i2BoardPtrTable[i]) {
- iiResetDelay(i2BoardPtrTable[i]);
- /* free io addresses and Tibet */
- release_region(ip2config.addr[i], 8);
- device_destroy(ip2_class, MKDEV(IP2_IPL_MAJOR, 4 * i));
- device_destroy(ip2_class, MKDEV(IP2_IPL_MAJOR,
- 4 * i + 1));
- }
- /* Disable and remove interrupt handler. */
- if (ip2config.irq[i] > 0 &&
- have_requested_irq(ip2config.irq[i])) {
- free_irq(ip2config.irq[i], (void *)&pcName);
- clear_requested_irq(ip2config.irq[i]);
- }
- }
- class_destroy(ip2_class);
- err = tty_unregister_driver(ip2_tty_driver);
- if (err)
- printk(KERN_ERR "IP2: failed to unregister tty driver (%d)\n",
- err);
- put_tty_driver(ip2_tty_driver);
- unregister_chrdev(IP2_IPL_MAJOR, pcIpl);
- remove_proc_entry("ip2mem", NULL);
-
- /* free memory */
- for (i = 0; i < IP2_MAX_BOARDS; i++) {
- void *pB;
-#ifdef CONFIG_PCI
- if (ip2config.type[i] == PCI && ip2config.pci_dev[i]) {
- pci_disable_device(ip2config.pci_dev[i]);
- pci_dev_put(ip2config.pci_dev[i]);
- ip2config.pci_dev[i] = NULL;
- }
-#endif
- pB = i2BoardPtrTable[i];
- if (pB != NULL) {
- kfree(pB);
- i2BoardPtrTable[i] = NULL;
- }
- if (DevTableMem[i] != NULL) {
- kfree(DevTableMem[i]);
- DevTableMem[i] = NULL;
- }
- }
-}
-module_exit(ip2_cleanup_module);
-
-static const struct tty_operations ip2_ops = {
- .open = ip2_open,
- .close = ip2_close,
- .write = ip2_write,
- .put_char = ip2_putchar,
- .flush_chars = ip2_flush_chars,
- .write_room = ip2_write_room,
- .chars_in_buffer = ip2_chars_in_buf,
- .flush_buffer = ip2_flush_buffer,
- .ioctl = ip2_ioctl,
- .throttle = ip2_throttle,
- .unthrottle = ip2_unthrottle,
- .set_termios = ip2_set_termios,
- .set_ldisc = ip2_set_line_discipline,
- .stop = ip2_stop,
- .start = ip2_start,
- .hangup = ip2_hangup,
- .tiocmget = ip2_tiocmget,
- .tiocmset = ip2_tiocmset,
- .get_icount = ip2_get_icount,
- .proc_fops = &ip2_proc_fops,
-};
-
-/******************************************************************************/
-/* Function: ip2_loadmain() */
-/* Parameters: irq, io from command line of insmod et. al. */
-/* pointer to fip firmware and firmware size for boards */
-/* Returns: Success (0) */
-/* */
-/* Description: */
-/* This was the required entry point for all drivers (now in ip2.c) */
-/* It performs all */
-/* initialisation of the devices and driver structures, and registers itself */
-/* with the relevant kernel modules. */
-/******************************************************************************/
-/* IRQF_DISABLED - if set blocks all interrupts else only this line */
-/* IRQF_SHARED - for shared irq PCI or maybe EISA only */
-/* SA_RANDOM - can be source for cert. random number generators */
-#define IP2_SA_FLAGS 0
-
-
-static const struct firmware *ip2_request_firmware(void)
-{
- struct platform_device *pdev;
- const struct firmware *fw;
-
- pdev = platform_device_register_simple("ip2", 0, NULL, 0);
- if (IS_ERR(pdev)) {
- printk(KERN_ERR "Failed to register platform device for ip2\n");
- return NULL;
- }
- if (request_firmware(&fw, "intelliport2.bin", &pdev->dev)) {
- printk(KERN_ERR "Failed to load firmware 'intelliport2.bin'\n");
- fw = NULL;
- }
- platform_device_unregister(pdev);
- return fw;
-}
-
-/******************************************************************************
- * ip2_setup:
- * str: kernel command line string
- *
- * Can't autoprobe the boards so user must specify configuration on
- * kernel command line. Sane people build it modular but the others
- * come here.
- *
- * Alternating pairs of io,irq for up to 4 boards.
- * ip2=io0,irq0,io1,irq1,io2,irq2,io3,irq3
- *
- * io=0 => No board
- * io=1 => PCI
- * io=2 => EISA
- * else => ISA I/O address
- *
- * irq=0 or invalid for ISA will revert to polling mode
- *
- * Any value = -1, do not overwrite compiled in value.
- *
- ******************************************************************************/
-static int __init ip2_setup(char *str)
-{
- int j, ints[10]; /* 4 boards, 2 parameters + 2 */
- unsigned int i;
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- for (i = 0, j = 1; i < 4; i++) {
- if (j > ints[0])
- break;
- if (ints[j] >= 0)
- io[i] = ints[j];
- j++;
- if (j > ints[0])
- break;
- if (ints[j] >= 0)
- irq[i] = ints[j];
- j++;
- }
- return 1;
-}
-__setup("ip2=", ip2_setup);
-
-static int __init ip2_loadmain(void)
-{
- int i, j, box;
- int err = 0;
- i2eBordStrPtr pB = NULL;
- int rc = -1;
- const struct firmware *fw = NULL;
- char *str;
-
- str = cmd;
-
- if (poll_only) {
- /* Hard lock the interrupts to zero */
- irq[0] = irq[1] = irq[2] = irq[3] = poll_only = 0;
- }
-
- /* Check module parameter with 'ip2=' has been passed or not */
- if (!poll_only && (!strncmp(str, "ip2=", 4)))
- ip2_setup(str);
-
- ip2trace(ITRC_NO_PORT, ITRC_INIT, ITRC_ENTER, 0);
-
- /* process command line arguments to modprobe or
- insmod i.e. iop & irqp */
- /* irqp and iop should ALWAYS be specified now... But we check
- them individually just to be sure, anyways... */
- for (i = 0; i < IP2_MAX_BOARDS; ++i) {
- ip2config.addr[i] = io[i];
- if (irq[i] >= 0)
- ip2config.irq[i] = irq[i];
- else
- ip2config.irq[i] = 0;
- /* This is a little bit of a hack. If poll_only=1 on command
- line back in ip2.c OR all IRQs on all specified boards are
- explicitly set to 0, then drop to poll only mode and override
- PCI or EISA interrupts. This superceeds the old hack of
- triggering if all interrupts were zero (like da default).
- Still a hack but less prone to random acts of terrorism.
-
- What we really should do, now that the IRQ default is set
- to -1, is to use 0 as a hard coded, do not probe.
-
- /\/\|=mhw=|\/\/
- */
- poll_only |= irq[i];
- }
- poll_only = !poll_only;
-
- /* Announce our presence */
- printk(KERN_INFO "%s version %s\n", pcName, pcVersion);
-
- ip2_tty_driver = alloc_tty_driver(IP2_MAX_PORTS);
- if (!ip2_tty_driver)
- return -ENOMEM;
-
- /* Initialise all the boards we can find (up to the maximum). */
- for (i = 0; i < IP2_MAX_BOARDS; ++i) {
- switch (ip2config.addr[i]) {
- case 0: /* skip this slot even if card is present */
- break;
- default: /* ISA */
- /* ISA address must be specified */
- if (ip2config.addr[i] < 0x100 ||
- ip2config.addr[i] > 0x3f8) {
- printk(KERN_ERR "IP2: Bad ISA board %d "
- "address %x\n", i,
- ip2config.addr[i]);
- ip2config.addr[i] = 0;
- break;
- }
- ip2config.type[i] = ISA;
-
- /* Check for valid irq argument, set for polling if
- * invalid */
- if (ip2config.irq[i] &&
- !is_valid_irq(ip2config.irq[i])) {
- printk(KERN_ERR "IP2: Bad IRQ(%d) specified\n",
- ip2config.irq[i]);
- /* 0 is polling and is valid in that sense */
- ip2config.irq[i] = 0;
- }
- break;
- case PCI:
-#ifdef CONFIG_PCI
- {
- struct pci_dev *pdev = NULL;
- u32 addr;
- int status;
-
- pdev = pci_get_device(PCI_VENDOR_ID_COMPUTONE,
- PCI_DEVICE_ID_COMPUTONE_IP2EX, pdev);
- if (pdev == NULL) {
- ip2config.addr[i] = 0;
- printk(KERN_ERR "IP2: PCI board %d not "
- "found\n", i);
- break;
- }
-
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "can't enable device\n");
- goto out;
- }
- ip2config.type[i] = PCI;
- ip2config.pci_dev[i] = pci_dev_get(pdev);
- status = pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1,
- &addr);
- if (addr & 1)
- ip2config.addr[i] = (USHORT)(addr & 0xfffe);
- else
- dev_err(&pdev->dev, "I/O address error\n");
-
- ip2config.irq[i] = pdev->irq;
-out:
- pci_dev_put(pdev);
- }
-#else
- printk(KERN_ERR "IP2: PCI card specified but PCI "
- "support not enabled.\n");
- printk(KERN_ERR "IP2: Recompile kernel with CONFIG_PCI "
- "defined!\n");
-#endif /* CONFIG_PCI */
- break;
- case EISA:
- ip2config.addr[i] = find_eisa_board(Eisa_slot + 1);
- if (ip2config.addr[i] != 0) {
- /* Eisa_irq set as side effect, boo */
- ip2config.type[i] = EISA;
- }
- ip2config.irq[i] = Eisa_irq;
- break;
- } /* switch */
- } /* for */
-
- for (i = 0; i < IP2_MAX_BOARDS; ++i) {
- if (ip2config.addr[i]) {
- pB = kzalloc(sizeof(i2eBordStr), GFP_KERNEL);
- if (pB) {
- i2BoardPtrTable[i] = pB;
- iiSetAddress(pB, ip2config.addr[i],
- ii2DelayTimer);
- iiReset(pB);
- } else
- printk(KERN_ERR "IP2: board memory allocation "
- "error\n");
- }
- }
- for (i = 0; i < IP2_MAX_BOARDS; ++i) {
- pB = i2BoardPtrTable[i];
- if (pB != NULL) {
- iiResetDelay(pB);
- break;
- }
- }
- for (i = 0; i < IP2_MAX_BOARDS; ++i) {
- /* We don't want to request the firmware unless we have at
- least one board */
- if (i2BoardPtrTable[i] != NULL) {
- if (!fw)
- fw = ip2_request_firmware();
- if (!fw)
- break;
- ip2_init_board(i, fw);
- }
- }
- if (fw)
- release_firmware(fw);
-
- ip2trace(ITRC_NO_PORT, ITRC_INIT, 2, 0);
-
- ip2_tty_driver->owner = THIS_MODULE;
- ip2_tty_driver->name = "ttyF";
- ip2_tty_driver->driver_name = pcDriver_name;
- ip2_tty_driver->major = IP2_TTY_MAJOR;
- ip2_tty_driver->minor_start = 0;
- ip2_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- ip2_tty_driver->subtype = SERIAL_TYPE_NORMAL;
- ip2_tty_driver->init_termios = tty_std_termios;
- ip2_tty_driver->init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
- ip2_tty_driver->flags = TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV;
- tty_set_operations(ip2_tty_driver, &ip2_ops);
-
- ip2trace(ITRC_NO_PORT, ITRC_INIT, 3, 0);
-
- err = tty_register_driver(ip2_tty_driver);
- if (err) {
- printk(KERN_ERR "IP2: failed to register tty driver\n");
- put_tty_driver(ip2_tty_driver);
- return err; /* leaking resources */
- }
-
- err = register_chrdev(IP2_IPL_MAJOR, pcIpl, &ip2_ipl);
- if (err) {
- printk(KERN_ERR "IP2: failed to register IPL device (%d)\n",
- err);
- } else {
- /* create the sysfs class */
- ip2_class = class_create(THIS_MODULE, "ip2");
- if (IS_ERR(ip2_class)) {
- err = PTR_ERR(ip2_class);
- goto out_chrdev;
- }
- }
- /* Register the read_procmem thing */
- if (!proc_create("ip2mem",0,NULL,&ip2mem_proc_fops)) {
- printk(KERN_ERR "IP2: failed to register read_procmem\n");
- return -EIO; /* leaking resources */
- }
-
- ip2trace(ITRC_NO_PORT, ITRC_INIT, 4, 0);
- /* Register the interrupt handler or poll handler, depending upon the
- * specified interrupt.
- */
-
- for (i = 0; i < IP2_MAX_BOARDS; ++i) {
- if (ip2config.addr[i] == 0)
- continue;
-
- pB = i2BoardPtrTable[i];
- if (pB != NULL) {
- device_create(ip2_class, NULL,
- MKDEV(IP2_IPL_MAJOR, 4 * i),
- NULL, "ipl%d", i);
- device_create(ip2_class, NULL,
- MKDEV(IP2_IPL_MAJOR, 4 * i + 1),
- NULL, "stat%d", i);
-
- for (box = 0; box < ABS_MAX_BOXES; box++)
- for (j = 0; j < ABS_BIGGEST_BOX; j++)
- if (pB->i2eChannelMap[box] & (1 << j))
- tty_register_device(
- ip2_tty_driver,
- j + ABS_BIGGEST_BOX *
- (box+i*ABS_MAX_BOXES),
- NULL);
- }
-
- if (poll_only) {
- /* Poll only forces driver to only use polling and
- to ignore the probed PCI or EISA interrupts. */
- ip2config.irq[i] = CIR_POLL;
- }
- if (ip2config.irq[i] == CIR_POLL) {
-retry:
- if (!timer_pending(&PollTimer)) {
- mod_timer(&PollTimer, POLL_TIMEOUT);
- printk(KERN_INFO "IP2: polling\n");
- }
- } else {
- if (have_requested_irq(ip2config.irq[i]))
- continue;
- rc = request_irq(ip2config.irq[i], ip2_interrupt,
- IP2_SA_FLAGS |
- (ip2config.type[i] == PCI ? IRQF_SHARED : 0),
- pcName, i2BoardPtrTable[i]);
- if (rc) {
- printk(KERN_ERR "IP2: request_irq failed: "
- "error %d\n", rc);
- ip2config.irq[i] = CIR_POLL;
- printk(KERN_INFO "IP2: Polling %ld/sec.\n",
- (POLL_TIMEOUT - jiffies));
- goto retry;
- }
- mark_requested_irq(ip2config.irq[i]);
- /* Initialise the interrupt handler bottom half
- * (aka slih). */
- }
- }
-
- for (i = 0; i < IP2_MAX_BOARDS; ++i) {
- if (i2BoardPtrTable[i]) {
- /* set and enable board interrupt */
- set_irq(i, ip2config.irq[i]);
- }
- }
-
- ip2trace(ITRC_NO_PORT, ITRC_INIT, ITRC_RETURN, 0);
-
- return 0;
-
-out_chrdev:
- unregister_chrdev(IP2_IPL_MAJOR, "ip2");
- /* unregister and put tty here */
- return err;
-}
-module_init(ip2_loadmain);
-
-/******************************************************************************/
-/* Function: ip2_init_board() */
-/* Parameters: Index of board in configuration structure */
-/* Returns: Success (0) */
-/* */
-/* Description: */
-/* This function initializes the specified board. The loadware is copied to */
-/* the board, the channel structures are initialized, and the board details */
-/* are reported on the console. */
-/******************************************************************************/
-static void
-ip2_init_board(int boardnum, const struct firmware *fw)
-{
- int i;
- int nports = 0, nboxes = 0;
- i2ChanStrPtr pCh;
- i2eBordStrPtr pB = i2BoardPtrTable[boardnum];
-
- if ( !iiInitialize ( pB ) ) {
- printk ( KERN_ERR "IP2: Failed to initialize board at 0x%x, error %d\n",
- pB->i2eBase, pB->i2eError );
- goto err_initialize;
- }
- printk(KERN_INFO "IP2: Board %d: addr=0x%x irq=%d\n", boardnum + 1,
- ip2config.addr[boardnum], ip2config.irq[boardnum] );
-
- if (!request_region( ip2config.addr[boardnum], 8, pcName )) {
- printk(KERN_ERR "IP2: bad addr=0x%x\n", ip2config.addr[boardnum]);
- goto err_initialize;
- }
-
- if ( iiDownloadAll ( pB, (loadHdrStrPtr)fw->data, 1, fw->size )
- != II_DOWN_GOOD ) {
- printk ( KERN_ERR "IP2: failed to download loadware\n" );
- goto err_release_region;
- } else {
- printk ( KERN_INFO "IP2: fv=%d.%d.%d lv=%d.%d.%d\n",
- pB->i2ePom.e.porVersion,
- pB->i2ePom.e.porRevision,
- pB->i2ePom.e.porSubRev, pB->i2eLVersion,
- pB->i2eLRevision, pB->i2eLSub );
- }
-
- switch ( pB->i2ePom.e.porID & ~POR_ID_RESERVED ) {
-
- default:
- printk( KERN_ERR "IP2: Unknown board type, ID = %x\n",
- pB->i2ePom.e.porID );
- nports = 0;
- goto err_release_region;
- break;
-
- case POR_ID_II_4: /* IntelliPort-II, ISA-4 (4xRJ45) */
- printk ( KERN_INFO "IP2: ISA-4\n" );
- nports = 4;
- break;
-
- case POR_ID_II_8: /* IntelliPort-II, 8-port using standard brick. */
- printk ( KERN_INFO "IP2: ISA-8 std\n" );
- nports = 8;
- break;
-
- case POR_ID_II_8R: /* IntelliPort-II, 8-port using RJ11's (no CTS) */
- printk ( KERN_INFO "IP2: ISA-8 RJ11\n" );
- nports = 8;
- break;
-
- case POR_ID_FIIEX: /* IntelliPort IIEX */
- {
- int portnum = IP2_PORTS_PER_BOARD * boardnum;
- int box;
-
- for( box = 0; box < ABS_MAX_BOXES; ++box ) {
- if ( pB->i2eChannelMap[box] != 0 ) {
- ++nboxes;
- }
- for( i = 0; i < ABS_BIGGEST_BOX; ++i ) {
- if ( pB->i2eChannelMap[box] & 1<< i ) {
- ++nports;
- }
- }
- }
- DevTableMem[boardnum] = pCh =
- kmalloc( sizeof(i2ChanStr) * nports, GFP_KERNEL );
- if ( !pCh ) {
- printk ( KERN_ERR "IP2: (i2_init_channel:) Out of memory.\n");
- goto err_release_region;
- }
- if ( !i2InitChannels( pB, nports, pCh ) ) {
- printk(KERN_ERR "IP2: i2InitChannels failed: %d\n",pB->i2eError);
- kfree ( pCh );
- goto err_release_region;
- }
- pB->i2eChannelPtr = &DevTable[portnum];
- pB->i2eChannelCnt = ABS_MOST_PORTS;
-
- for( box = 0; box < ABS_MAX_BOXES; ++box, portnum += ABS_BIGGEST_BOX ) {
- for( i = 0; i < ABS_BIGGEST_BOX; ++i ) {
- if ( pB->i2eChannelMap[box] & (1 << i) ) {
- DevTable[portnum + i] = pCh;
- pCh->port_index = portnum + i;
- pCh++;
- }
- }
- }
- printk(KERN_INFO "IP2: EX box=%d ports=%d %d bit\n",
- nboxes, nports, pB->i2eDataWidth16 ? 16 : 8 );
- }
- goto ex_exit;
- }
- DevTableMem[boardnum] = pCh =
- kmalloc ( sizeof (i2ChanStr) * nports, GFP_KERNEL );
- if ( !pCh ) {
- printk ( KERN_ERR "IP2: (i2_init_channel:) Out of memory.\n");
- goto err_release_region;
- }
- pB->i2eChannelPtr = pCh;
- pB->i2eChannelCnt = nports;
- if ( !i2InitChannels( pB, nports, pCh ) ) {
- printk(KERN_ERR "IP2: i2InitChannels failed: %d\n",pB->i2eError);
- kfree ( pCh );
- goto err_release_region;
- }
- pB->i2eChannelPtr = &DevTable[IP2_PORTS_PER_BOARD * boardnum];
-
- for( i = 0; i < pB->i2eChannelCnt; ++i ) {
- DevTable[IP2_PORTS_PER_BOARD * boardnum + i] = pCh;
- pCh->port_index = (IP2_PORTS_PER_BOARD * boardnum) + i;
- pCh++;
- }
-ex_exit:
- INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh);
- return;
-
-err_release_region:
- release_region(ip2config.addr[boardnum], 8);
-err_initialize:
- kfree ( pB );
- i2BoardPtrTable[boardnum] = NULL;
- return;
-}
-
-/******************************************************************************/
-/* Function: find_eisa_board ( int start_slot ) */
-/* Parameters: First slot to check */
-/* Returns: Address of EISA IntelliPort II controller */
-/* */
-/* Description: */
-/* This function searches for an EISA IntelliPort controller, starting */
-/* from the specified slot number. If the motherboard is not identified as an */
-/* EISA motherboard, or no valid board ID is selected it returns 0. Otherwise */
-/* it returns the base address of the controller. */
-/******************************************************************************/
-static unsigned short
-find_eisa_board( int start_slot )
-{
- int i, j;
- unsigned int idm = 0;
- unsigned int idp = 0;
- unsigned int base = 0;
- unsigned int value;
- int setup_address;
- int setup_irq;
- int ismine = 0;
-
- /*
- * First a check for an EISA motherboard, which we do by comparing the
- * EISA ID registers for the system board and the first couple of slots.
- * No slot ID should match the system board ID, but on an ISA or PCI
- * machine the odds are that an empty bus will return similar values for
- * each slot.
- */
- i = 0x0c80;
- value = (inb(i) << 24) + (inb(i+1) << 16) + (inb(i+2) << 8) + inb(i+3);
- for( i = 0x1c80; i <= 0x4c80; i += 0x1000 ) {
- j = (inb(i)<<24)+(inb(i+1)<<16)+(inb(i+2)<<8)+inb(i+3);
- if ( value == j )
- return 0;
- }
-
- /*
- * OK, so we are inclined to believe that this is an EISA machine. Find
- * an IntelliPort controller.
- */
- for( i = start_slot; i < 16; i++ ) {
- base = i << 12;
- idm = (inb(base + 0xc80) << 8) | (inb(base + 0xc81) & 0xff);
- idp = (inb(base + 0xc82) << 8) | (inb(base + 0xc83) & 0xff);
- ismine = 0;
- if ( idm == 0x0e8e ) {
- if ( idp == 0x0281 || idp == 0x0218 ) {
- ismine = 1;
- } else if ( idp == 0x0282 || idp == 0x0283 ) {
- ismine = 3; /* Can do edge-trigger */
- }
- if ( ismine ) {
- Eisa_slot = i;
- break;
- }
- }
- }
- if ( !ismine )
- return 0;
-
- /* It's some sort of EISA card, but at what address is it configured? */
-
- setup_address = base + 0xc88;
- value = inb(base + 0xc86);
- setup_irq = (value & 8) ? Valid_Irqs[value & 7] : 0;
-
- if ( (ismine & 2) && !(value & 0x10) ) {
- ismine = 1; /* Could be edging, but not */
- }
-
- if ( Eisa_irq == 0 ) {
- Eisa_irq = setup_irq;
- } else if ( Eisa_irq != setup_irq ) {
- printk ( KERN_ERR "IP2: EISA irq mismatch between EISA controllers\n" );
- }
-
-#ifdef IP2DEBUG_INIT
-printk(KERN_DEBUG "Computone EISA board in slot %d, I.D. 0x%x%x, Address 0x%x",
- base >> 12, idm, idp, setup_address);
- if ( Eisa_irq ) {
- printk(KERN_DEBUG ", Interrupt %d %s\n",
- setup_irq, (ismine & 2) ? "(edge)" : "(level)");
- } else {
- printk(KERN_DEBUG ", (polled)\n");
- }
-#endif
- return setup_address;
-}
-
-/******************************************************************************/
-/* Function: set_irq() */
-/* Parameters: index to board in board table */
-/* IRQ to use */
-/* Returns: Success (0) */
-/* */
-/* Description: */
-/******************************************************************************/
-static void
-set_irq( int boardnum, int boardIrq )
-{
- unsigned char tempCommand[16];
- i2eBordStrPtr pB = i2BoardPtrTable[boardnum];
- unsigned long flags;
-
- /*
- * Notify the boards they may generate interrupts. This is done by
- * sending an in-line command to channel 0 on each board. This is why
- * the channels have to be defined already. For each board, if the
- * interrupt has never been defined, we must do so NOW, directly, since
- * board will not send flow control or even give an interrupt until this
- * is done. If polling we must send 0 as the interrupt parameter.
- */
-
- // We will get an interrupt here at the end of this function
-
- iiDisableMailIrq(pB);
-
- /* We build up the entire packet header. */
- CHANNEL_OF(tempCommand) = 0;
- PTYPE_OF(tempCommand) = PTYPE_INLINE;
- CMD_COUNT_OF(tempCommand) = 2;
- (CMD_OF(tempCommand))[0] = CMDVALUE_IRQ;
- (CMD_OF(tempCommand))[1] = boardIrq;
- /*
- * Write to FIFO; don't bother to adjust fifo capacity for this, since
- * board will respond almost immediately after SendMail hit.
- */
- write_lock_irqsave(&pB->write_fifo_spinlock, flags);
- iiWriteBuf(pB, tempCommand, 4);
- write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
- pB->i2eUsingIrq = boardIrq;
- pB->i2eOutMailWaiting |= MB_OUT_STUFFED;
-
- /* Need to update number of boards before you enable mailbox int */
- ++i2nBoards;
-
- CHANNEL_OF(tempCommand) = 0;
- PTYPE_OF(tempCommand) = PTYPE_BYPASS;
- CMD_COUNT_OF(tempCommand) = 6;
- (CMD_OF(tempCommand))[0] = 88; // SILO
- (CMD_OF(tempCommand))[1] = 64; // chars
- (CMD_OF(tempCommand))[2] = 32; // ms
-
- (CMD_OF(tempCommand))[3] = 28; // MAX_BLOCK
- (CMD_OF(tempCommand))[4] = 64; // chars
-
- (CMD_OF(tempCommand))[5] = 87; // HW_TEST
- write_lock_irqsave(&pB->write_fifo_spinlock, flags);
- iiWriteBuf(pB, tempCommand, 8);
- write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
-
- CHANNEL_OF(tempCommand) = 0;
- PTYPE_OF(tempCommand) = PTYPE_BYPASS;
- CMD_COUNT_OF(tempCommand) = 1;
- (CMD_OF(tempCommand))[0] = 84; /* get BOX_IDS */
- iiWriteBuf(pB, tempCommand, 3);
-
-#ifdef XXX
- // enable heartbeat for test porpoises
- CHANNEL_OF(tempCommand) = 0;
- PTYPE_OF(tempCommand) = PTYPE_BYPASS;
- CMD_COUNT_OF(tempCommand) = 2;
- (CMD_OF(tempCommand))[0] = 44; /* get ping */
- (CMD_OF(tempCommand))[1] = 200; /* 200 ms */
- write_lock_irqsave(&pB->write_fifo_spinlock, flags);
- iiWriteBuf(pB, tempCommand, 4);
- write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
-#endif
-
- iiEnableMailIrq(pB);
- iiSendPendingMail(pB);
-}
-
-/******************************************************************************/
-/* Interrupt Handler Section */
-/******************************************************************************/
-
-static inline void
-service_all_boards(void)
-{
- int i;
- i2eBordStrPtr pB;
-
- /* Service every board on the list */
- for( i = 0; i < IP2_MAX_BOARDS; ++i ) {
- pB = i2BoardPtrTable[i];
- if ( pB ) {
- i2ServiceBoard( pB );
- }
- }
-}
-
-
-/******************************************************************************/
-/* Function: ip2_interrupt_bh(work) */
-/* Parameters: work - pointer to the board structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* Service the board in a bottom half interrupt handler and then */
-/* reenable the board's interrupts if it has an IRQ number */
-/* */
-/******************************************************************************/
-static void
-ip2_interrupt_bh(struct work_struct *work)
-{
- i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt);
-// pB better well be set or we have a problem! We can only get
-// here from the IMMEDIATE queue. Here, we process the boards.
-// Checking pB doesn't cost much and it saves us from the sanity checkers.
-
- bh_counter++;
-
- if ( pB ) {
- i2ServiceBoard( pB );
- if( pB->i2eUsingIrq ) {
-// Re-enable his interrupts
- iiEnableMailIrq(pB);
- }
- }
-}
-
-
-/******************************************************************************/
-/* Function: ip2_interrupt(int irq, void *dev_id) */
-/* Parameters: irq - interrupt number */
-/* pointer to optional device ID structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* */
-/* Our task here is simply to identify each board which needs servicing. */
-/* If we are queuing then, queue it to be serviced, and disable its irq */
-/* mask otherwise process the board directly. */
-/* */
-/* We could queue by IRQ but that just complicates things on both ends */
-/* with very little gain in performance (how many instructions does */
-/* it take to iterate on the immediate queue). */
-/* */
-/* */
-/******************************************************************************/
-static void
-ip2_irq_work(i2eBordStrPtr pB)
-{
-#ifdef USE_IQI
- if (NO_MAIL_HERE != ( pB->i2eStartMail = iiGetMail(pB))) {
-// Disable his interrupt (will be enabled when serviced)
-// This is mostly to protect from reentrancy.
- iiDisableMailIrq(pB);
-
-// Park the board on the immediate queue for processing.
- schedule_work(&pB->tqueue_interrupt);
-
-// Make sure the immediate queue is flagged to fire.
- }
-#else
-
-// We are using immediate servicing here. This sucks and can
-// cause all sorts of havoc with ppp and others. The failsafe
-// check on iiSendPendingMail could also throw a hairball.
-
- i2ServiceBoard( pB );
-
-#endif /* USE_IQI */
-}
-
-static void
-ip2_polled_interrupt(void)
-{
- int i;
- i2eBordStrPtr pB;
-
- ip2trace(ITRC_NO_PORT, ITRC_INTR, 99, 1, 0);
-
- /* Service just the boards on the list using this irq */
- for( i = 0; i < i2nBoards; ++i ) {
- pB = i2BoardPtrTable[i];
-
-// Only process those boards which match our IRQ.
-// IRQ = 0 for polled boards, we won't poll "IRQ" boards
-
- if (pB && pB->i2eUsingIrq == 0)
- ip2_irq_work(pB);
- }
-
- ++irq_counter;
-
- ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
-}
-
-static irqreturn_t
-ip2_interrupt(int irq, void *dev_id)
-{
- i2eBordStrPtr pB = dev_id;
-
- ip2trace (ITRC_NO_PORT, ITRC_INTR, 99, 1, pB->i2eUsingIrq );
-
- ip2_irq_work(pB);
-
- ++irq_counter;
-
- ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
- return IRQ_HANDLED;
-}
-
-/******************************************************************************/
-/* Function: ip2_poll(unsigned long arg) */
-/* Parameters: ? */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* This function calls the library routine i2ServiceBoard for each board in */
-/* the board table. This is used instead of the interrupt routine when polled */
-/* mode is specified. */
-/******************************************************************************/
-static void
-ip2_poll(unsigned long arg)
-{
- ip2trace (ITRC_NO_PORT, ITRC_INTR, 100, 0 );
-
- // Just polled boards, IRQ = 0 will hit all non-interrupt boards.
- // It will NOT poll boards handled by hard interrupts.
- // The issue of queued BH interrupts is handled in ip2_interrupt().
- ip2_polled_interrupt();
-
- mod_timer(&PollTimer, POLL_TIMEOUT);
-
- ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 );
-}
-
-static void do_input(struct work_struct *work)
-{
- i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input);
- unsigned long flags;
-
- ip2trace(CHANN, ITRC_INPUT, 21, 0 );
-
- // Data input
- if ( pCh->pTTY != NULL ) {
- read_lock_irqsave(&pCh->Ibuf_spinlock, flags);
- if (!pCh->throttled && (pCh->Ibuf_stuff != pCh->Ibuf_strip)) {
- read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
- i2Input( pCh );
- } else
- read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
- } else {
- ip2trace(CHANN, ITRC_INPUT, 22, 0 );
-
- i2InputFlush( pCh );
- }
-}
-
-// code duplicated from n_tty (ldisc)
-static inline void isig(int sig, struct tty_struct *tty, int flush)
-{
- /* FIXME: This is completely bogus */
- if (tty->pgrp)
- kill_pgrp(tty->pgrp, sig, 1);
- if (flush || !L_NOFLSH(tty)) {
- if ( tty->ldisc->ops->flush_buffer )
- tty->ldisc->ops->flush_buffer(tty);
- i2InputFlush( tty->driver_data );
- }
-}
-
-static void do_status(struct work_struct *work)
-{
- i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status);
- int status;
-
- status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) );
-
- ip2trace (CHANN, ITRC_STATUS, 21, 1, status );
-
- if (pCh->pTTY && (status & (I2_BRK|I2_PAR|I2_FRA|I2_OVR)) ) {
- if ( (status & I2_BRK) ) {
- // code duplicated from n_tty (ldisc)
- if (I_IGNBRK(pCh->pTTY))
- goto skip_this;
- if (I_BRKINT(pCh->pTTY)) {
- isig(SIGINT, pCh->pTTY, 1);
- goto skip_this;
- }
- wake_up_interruptible(&pCh->pTTY->read_wait);
- }
-#ifdef NEVER_HAPPENS_AS_SETUP_XXX
- // and can't work because we don't know the_char
- // as the_char is reported on a separate path
- // The intelligent board does this stuff as setup
- {
- char brkf = TTY_NORMAL;
- unsigned char brkc = '\0';
- unsigned char tmp;
- if ( (status & I2_BRK) ) {
- brkf = TTY_BREAK;
- brkc = '\0';
- }
- else if (status & I2_PAR) {
- brkf = TTY_PARITY;
- brkc = the_char;
- } else if (status & I2_FRA) {
- brkf = TTY_FRAME;
- brkc = the_char;
- } else if (status & I2_OVR) {
- brkf = TTY_OVERRUN;
- brkc = the_char;
- }
- tmp = pCh->pTTY->real_raw;
- pCh->pTTY->real_raw = 0;
- pCh->pTTY->ldisc->ops.receive_buf( pCh->pTTY, &brkc, &brkf, 1 );
- pCh->pTTY->real_raw = tmp;
- }
-#endif /* NEVER_HAPPENS_AS_SETUP_XXX */
- }
-skip_this:
-
- if ( status & (I2_DDCD | I2_DDSR | I2_DCTS | I2_DRI) ) {
- wake_up_interruptible(&pCh->delta_msr_wait);
-
- if ( (pCh->flags & ASYNC_CHECK_CD) && (status & I2_DDCD) ) {
- if ( status & I2_DCD ) {
- if ( pCh->wopen ) {
- wake_up_interruptible ( &pCh->open_wait );
- }
- } else {
- if (pCh->pTTY && (!(pCh->pTTY->termios->c_cflag & CLOCAL)) ) {
- tty_hangup( pCh->pTTY );
- }
- }
- }
- }
-
- ip2trace (CHANN, ITRC_STATUS, 26, 0 );
-}
-
-/******************************************************************************/
-/* Device Open/Close/Ioctl Entry Point Section */
-/******************************************************************************/
-
-/******************************************************************************/
-/* Function: open_sanity_check() */
-/* Parameters: Pointer to tty structure */
-/* Pointer to file structure */
-/* Returns: Success or failure */
-/* */
-/* Description: */
-/* Verifies the structure magic numbers and cross links. */
-/******************************************************************************/
-#ifdef IP2DEBUG_OPEN
-static void
-open_sanity_check( i2ChanStrPtr pCh, i2eBordStrPtr pBrd )
-{
- if ( pBrd->i2eValid != I2E_MAGIC ) {
- printk(KERN_ERR "IP2: invalid board structure\n" );
- } else if ( pBrd != pCh->pMyBord ) {
- printk(KERN_ERR "IP2: board structure pointer mismatch (%p)\n",
- pCh->pMyBord );
- } else if ( pBrd->i2eChannelCnt < pCh->port_index ) {
- printk(KERN_ERR "IP2: bad device index (%d)\n", pCh->port_index );
- } else if (&((i2ChanStrPtr)pBrd->i2eChannelPtr)[pCh->port_index] != pCh) {
- } else {
- printk(KERN_INFO "IP2: all pointers check out!\n" );
- }
-}
-#endif
-
-
-/******************************************************************************/
-/* Function: ip2_open() */
-/* Parameters: Pointer to tty structure */
-/* Pointer to file structure */
-/* Returns: Success or failure */
-/* */
-/* Description: (MANDATORY) */
-/* A successful device open has to run a gauntlet of checks before it */
-/* completes. After some sanity checking and pointer setup, the function */
-/* blocks until all conditions are satisfied. It then initialises the port to */
-/* the default characteristics and returns. */
-/******************************************************************************/
-static int
-ip2_open( PTTY tty, struct file *pFile )
-{
- wait_queue_t wait;
- int rc = 0;
- int do_clocal = 0;
- i2ChanStrPtr pCh = DevTable[tty->index];
-
- ip2trace (tty->index, ITRC_OPEN, ITRC_ENTER, 0 );
-
- if ( pCh == NULL ) {
- return -ENODEV;
- }
- /* Setup pointer links in device and tty structures */
- pCh->pTTY = tty;
- tty->driver_data = pCh;
-
-#ifdef IP2DEBUG_OPEN
- printk(KERN_DEBUG \
- "IP2:open(tty=%p,pFile=%p):dev=%s,ch=%d,idx=%d\n",
- tty, pFile, tty->name, pCh->infl.hd.i2sChannel, pCh->port_index);
- open_sanity_check ( pCh, pCh->pMyBord );
-#endif
-
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 3, CMD_DTRUP,CMD_RTSUP,CMD_DCD_REP);
- pCh->dataSetOut |= (I2_DTR | I2_RTS);
- serviceOutgoingFifo( pCh->pMyBord );
-
- /* Block here until the port is ready (per serial and istallion) */
- /*
- * 1. If the port is in the middle of closing wait for the completion
- * and then return the appropriate error.
- */
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&pCh->close_wait, &wait);
- set_current_state( TASK_INTERRUPTIBLE );
-
- if ( tty_hung_up_p(pFile) || ( pCh->flags & ASYNC_CLOSING )) {
- if ( pCh->flags & ASYNC_CLOSING ) {
- tty_unlock();
- schedule();
- tty_lock();
- }
- if ( tty_hung_up_p(pFile) ) {
- set_current_state( TASK_RUNNING );
- remove_wait_queue(&pCh->close_wait, &wait);
- return( pCh->flags & ASYNC_HUP_NOTIFY ) ? -EAGAIN : -ERESTARTSYS;
- }
- }
- set_current_state( TASK_RUNNING );
- remove_wait_queue(&pCh->close_wait, &wait);
-
- /*
- * 3. Handle a non-blocking open of a normal port.
- */
- if ( (pFile->f_flags & O_NONBLOCK) || (tty->flags & (1<<TTY_IO_ERROR) )) {
- pCh->flags |= ASYNC_NORMAL_ACTIVE;
- goto noblock;
- }
- /*
- * 4. Now loop waiting for the port to be free and carrier present
- * (if required).
- */
- if ( tty->termios->c_cflag & CLOCAL )
- do_clocal = 1;
-
-#ifdef IP2DEBUG_OPEN
- printk(KERN_DEBUG "OpenBlock: do_clocal = %d\n", do_clocal);
-#endif
-
- ++pCh->wopen;
-
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&pCh->open_wait, &wait);
-
- for(;;) {
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_DTRUP, CMD_RTSUP);
- pCh->dataSetOut |= (I2_DTR | I2_RTS);
- set_current_state( TASK_INTERRUPTIBLE );
- serviceOutgoingFifo( pCh->pMyBord );
- if ( tty_hung_up_p(pFile) ) {
- set_current_state( TASK_RUNNING );
- remove_wait_queue(&pCh->open_wait, &wait);
- return ( pCh->flags & ASYNC_HUP_NOTIFY ) ? -EBUSY : -ERESTARTSYS;
- }
- if (!(pCh->flags & ASYNC_CLOSING) &&
- (do_clocal || (pCh->dataSetIn & I2_DCD) )) {
- rc = 0;
- break;
- }
-
-#ifdef IP2DEBUG_OPEN
- printk(KERN_DEBUG "ASYNC_CLOSING = %s\n",
- (pCh->flags & ASYNC_CLOSING)?"True":"False");
- printk(KERN_DEBUG "OpenBlock: waiting for CD or signal\n");
-#endif
- ip2trace (CHANN, ITRC_OPEN, 3, 2, 0,
- (pCh->flags & ASYNC_CLOSING) );
- /* check for signal */
- if (signal_pending(current)) {
- rc = (( pCh->flags & ASYNC_HUP_NOTIFY ) ? -EAGAIN : -ERESTARTSYS);
- break;
- }
- tty_unlock();
- schedule();
- tty_lock();
- }
- set_current_state( TASK_RUNNING );
- remove_wait_queue(&pCh->open_wait, &wait);
-
- --pCh->wopen; //why count?
-
- ip2trace (CHANN, ITRC_OPEN, 4, 0 );
-
- if (rc != 0 ) {
- return rc;
- }
- pCh->flags |= ASYNC_NORMAL_ACTIVE;
-
-noblock:
-
- /* first open - Assign termios structure to port */
- if ( tty->count == 1 ) {
- i2QueueCommands(PTYPE_INLINE, pCh, 0, 2, CMD_CTSFL_DSAB, CMD_RTSFL_DSAB);
- /* Now we must send the termios settings to the loadware */
- set_params( pCh, NULL );
- }
-
- /*
- * Now set any i2lib options. These may go away if the i2lib code ends
- * up rolled into the mainline.
- */
- pCh->channelOptions |= CO_NBLOCK_WRITE;
-
-#ifdef IP2DEBUG_OPEN
- printk (KERN_DEBUG "IP2: open completed\n" );
-#endif
- serviceOutgoingFifo( pCh->pMyBord );
-
- ip2trace (CHANN, ITRC_OPEN, ITRC_RETURN, 0 );
-
- return 0;
-}
-
-/******************************************************************************/
-/* Function: ip2_close() */
-/* Parameters: Pointer to tty structure */
-/* Pointer to file structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static void
-ip2_close( PTTY tty, struct file *pFile )
-{
- i2ChanStrPtr pCh = tty->driver_data;
-
- if ( !pCh ) {
- return;
- }
-
- ip2trace (CHANN, ITRC_CLOSE, ITRC_ENTER, 0 );
-
-#ifdef IP2DEBUG_OPEN
- printk(KERN_DEBUG "IP2:close %s:\n",tty->name);
-#endif
-
- if ( tty_hung_up_p ( pFile ) ) {
-
- ip2trace (CHANN, ITRC_CLOSE, 2, 1, 2 );
-
- return;
- }
- if ( tty->count > 1 ) { /* not the last close */
-
- ip2trace (CHANN, ITRC_CLOSE, 2, 1, 3 );
-
- return;
- }
- pCh->flags |= ASYNC_CLOSING; // last close actually
-
- tty->closing = 1;
-
- if (pCh->ClosingWaitTime != ASYNC_CLOSING_WAIT_NONE) {
- /*
- * Before we drop DTR, make sure the transmitter has completely drained.
- * This uses an timeout, after which the close
- * completes.
- */
- ip2_wait_until_sent(tty, pCh->ClosingWaitTime );
- }
- /*
- * At this point we stop accepting input. Here we flush the channel
- * input buffer which will allow the board to send up more data. Any
- * additional input is tossed at interrupt/poll time.
- */
- i2InputFlush( pCh );
-
- /* disable DSS reporting */
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 4,
- CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP);
- if (tty->termios->c_cflag & HUPCL) {
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN);
- pCh->dataSetOut &= ~(I2_DTR | I2_RTS);
- i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25));
- }
-
- serviceOutgoingFifo ( pCh->pMyBord );
-
- tty_ldisc_flush(tty);
- tty_driver_flush_buffer(tty);
- tty->closing = 0;
-
- pCh->pTTY = NULL;
-
- if (pCh->wopen) {
- if (pCh->ClosingDelay) {
- msleep_interruptible(jiffies_to_msecs(pCh->ClosingDelay));
- }
- wake_up_interruptible(&pCh->open_wait);
- }
-
- pCh->flags &=~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
- wake_up_interruptible(&pCh->close_wait);
-
-#ifdef IP2DEBUG_OPEN
- DBG_CNT("ip2_close: after wakeups--");
-#endif
-
-
- ip2trace (CHANN, ITRC_CLOSE, ITRC_RETURN, 1, 1 );
-
- return;
-}
-
-/******************************************************************************/
-/* Function: ip2_hangup() */
-/* Parameters: Pointer to tty structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static void
-ip2_hangup ( PTTY tty )
-{
- i2ChanStrPtr pCh = tty->driver_data;
-
- if( !pCh ) {
- return;
- }
-
- ip2trace (CHANN, ITRC_HANGUP, ITRC_ENTER, 0 );
-
- ip2_flush_buffer(tty);
-
- /* disable DSS reporting */
-
- i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_DCD_NREP);
- i2QueueCommands(PTYPE_INLINE, pCh, 0, 2, CMD_CTSFL_DSAB, CMD_RTSFL_DSAB);
- if ( (tty->termios->c_cflag & HUPCL) ) {
- i2QueueCommands(PTYPE_BYPASS, pCh, 0, 2, CMD_RTSDN, CMD_DTRDN);
- pCh->dataSetOut &= ~(I2_DTR | I2_RTS);
- i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25));
- }
- i2QueueCommands(PTYPE_INLINE, pCh, 1, 3,
- CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP);
- serviceOutgoingFifo ( pCh->pMyBord );
-
- wake_up_interruptible ( &pCh->delta_msr_wait );
-
- pCh->flags &= ~ASYNC_NORMAL_ACTIVE;
- pCh->pTTY = NULL;
- wake_up_interruptible ( &pCh->open_wait );
-
- ip2trace (CHANN, ITRC_HANGUP, ITRC_RETURN, 0 );
-}
-
-/******************************************************************************/
-/******************************************************************************/
-/* Device Output Section */
-/******************************************************************************/
-/******************************************************************************/
-
-/******************************************************************************/
-/* Function: ip2_write() */
-/* Parameters: Pointer to tty structure */
-/* Flag denoting data is in user (1) or kernel (0) space */
-/* Pointer to data */
-/* Number of bytes to write */
-/* Returns: Number of bytes actually written */
-/* */
-/* Description: (MANDATORY) */
-/* */
-/* */
-/******************************************************************************/
-static int
-ip2_write( PTTY tty, const unsigned char *pData, int count)
-{
- i2ChanStrPtr pCh = tty->driver_data;
- int bytesSent = 0;
- unsigned long flags;
-
- ip2trace (CHANN, ITRC_WRITE, ITRC_ENTER, 2, count, -1 );
-
- /* Flush out any buffered data left over from ip2_putchar() calls. */
- ip2_flush_chars( tty );
-
- /* This is the actual move bit. Make sure it does what we need!!!!! */
- write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
- bytesSent = i2Output( pCh, pData, count);
- write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
-
- ip2trace (CHANN, ITRC_WRITE, ITRC_RETURN, 1, bytesSent );
-
- return bytesSent > 0 ? bytesSent : 0;
-}
-
-/******************************************************************************/
-/* Function: ip2_putchar() */
-/* Parameters: Pointer to tty structure */
-/* Character to write */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static int
-ip2_putchar( PTTY tty, unsigned char ch )
-{
- i2ChanStrPtr pCh = tty->driver_data;
- unsigned long flags;
-
-// ip2trace (CHANN, ITRC_PUTC, ITRC_ENTER, 1, ch );
-
- write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
- pCh->Pbuf[pCh->Pbuf_stuff++] = ch;
- if ( pCh->Pbuf_stuff == sizeof pCh->Pbuf ) {
- write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
- ip2_flush_chars( tty );
- } else
- write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
- return 1;
-
-// ip2trace (CHANN, ITRC_PUTC, ITRC_RETURN, 1, ch );
-}
-
-/******************************************************************************/
-/* Function: ip2_flush_chars() */
-/* Parameters: Pointer to tty structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* */
-/******************************************************************************/
-static void
-ip2_flush_chars( PTTY tty )
-{
- int strip;
- i2ChanStrPtr pCh = tty->driver_data;
- unsigned long flags;
-
- write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
- if ( pCh->Pbuf_stuff ) {
-
-// ip2trace (CHANN, ITRC_PUTC, 10, 1, strip );
-
- //
- // We may need to restart i2Output if it does not fulfill this request
- //
- strip = i2Output( pCh, pCh->Pbuf, pCh->Pbuf_stuff);
- if ( strip != pCh->Pbuf_stuff ) {
- memmove( pCh->Pbuf, &pCh->Pbuf[strip], pCh->Pbuf_stuff - strip );
- }
- pCh->Pbuf_stuff -= strip;
- }
- write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
-}
-
-/******************************************************************************/
-/* Function: ip2_write_room() */
-/* Parameters: Pointer to tty structure */
-/* Returns: Number of bytes that the driver can accept */
-/* */
-/* Description: */
-/* */
-/******************************************************************************/
-static int
-ip2_write_room ( PTTY tty )
-{
- int bytesFree;
- i2ChanStrPtr pCh = tty->driver_data;
- unsigned long flags;
-
- read_lock_irqsave(&pCh->Pbuf_spinlock, flags);
- bytesFree = i2OutputFree( pCh ) - pCh->Pbuf_stuff;
- read_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
-
- ip2trace (CHANN, ITRC_WRITE, 11, 1, bytesFree );
-
- return ((bytesFree > 0) ? bytesFree : 0);
-}
-
-/******************************************************************************/
-/* Function: ip2_chars_in_buf() */
-/* Parameters: Pointer to tty structure */
-/* Returns: Number of bytes queued for transmission */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static int
-ip2_chars_in_buf ( PTTY tty )
-{
- i2ChanStrPtr pCh = tty->driver_data;
- int rc;
- unsigned long flags;
-
- ip2trace (CHANN, ITRC_WRITE, 12, 1, pCh->Obuf_char_count + pCh->Pbuf_stuff );
-
-#ifdef IP2DEBUG_WRITE
- printk (KERN_DEBUG "IP2: chars in buffer = %d (%d,%d)\n",
- pCh->Obuf_char_count + pCh->Pbuf_stuff,
- pCh->Obuf_char_count, pCh->Pbuf_stuff );
-#endif
- read_lock_irqsave(&pCh->Obuf_spinlock, flags);
- rc = pCh->Obuf_char_count;
- read_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
- read_lock_irqsave(&pCh->Pbuf_spinlock, flags);
- rc += pCh->Pbuf_stuff;
- read_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
- return rc;
-}
-
-/******************************************************************************/
-/* Function: ip2_flush_buffer() */
-/* Parameters: Pointer to tty structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static void
-ip2_flush_buffer( PTTY tty )
-{
- i2ChanStrPtr pCh = tty->driver_data;
- unsigned long flags;
-
- ip2trace (CHANN, ITRC_FLUSH, ITRC_ENTER, 0 );
-
-#ifdef IP2DEBUG_WRITE
- printk (KERN_DEBUG "IP2: flush buffer\n" );
-#endif
- write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
- pCh->Pbuf_stuff = 0;
- write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
- i2FlushOutput( pCh );
- ip2_owake(tty);
-
- ip2trace (CHANN, ITRC_FLUSH, ITRC_RETURN, 0 );
-
-}
-
-/******************************************************************************/
-/* Function: ip2_wait_until_sent() */
-/* Parameters: Pointer to tty structure */
-/* Timeout for wait. */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* This function is used in place of the normal tty_wait_until_sent, which */
-/* only waits for the driver buffers to be empty (or rather, those buffers */
-/* reported by chars_in_buffer) which doesn't work for IP2 due to the */
-/* indeterminate number of bytes buffered on the board. */
-/******************************************************************************/
-static void
-ip2_wait_until_sent ( PTTY tty, int timeout )
-{
- int i = jiffies;
- i2ChanStrPtr pCh = tty->driver_data;
-
- tty_wait_until_sent(tty, timeout );
- if ( (i = timeout - (jiffies -i)) > 0)
- i2DrainOutput( pCh, i );
-}
-
-/******************************************************************************/
-/******************************************************************************/
-/* Device Input Section */
-/******************************************************************************/
-/******************************************************************************/
-
-/******************************************************************************/
-/* Function: ip2_throttle() */
-/* Parameters: Pointer to tty structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static void
-ip2_throttle ( PTTY tty )
-{
- i2ChanStrPtr pCh = tty->driver_data;
-
-#ifdef IP2DEBUG_READ
- printk (KERN_DEBUG "IP2: throttle\n" );
-#endif
- /*
- * Signal the poll/interrupt handlers not to forward incoming data to
- * the line discipline. This will cause the buffers to fill up in the
- * library and thus cause the library routines to send the flow control
- * stuff.
- */
- pCh->throttled = 1;
-}
-
-/******************************************************************************/
-/* Function: ip2_unthrottle() */
-/* Parameters: Pointer to tty structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static void
-ip2_unthrottle ( PTTY tty )
-{
- i2ChanStrPtr pCh = tty->driver_data;
- unsigned long flags;
-
-#ifdef IP2DEBUG_READ
- printk (KERN_DEBUG "IP2: unthrottle\n" );
-#endif
-
- /* Pass incoming data up to the line discipline again. */
- pCh->throttled = 0;
- i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_RESUME);
- serviceOutgoingFifo( pCh->pMyBord );
- read_lock_irqsave(&pCh->Ibuf_spinlock, flags);
- if ( pCh->Ibuf_stuff != pCh->Ibuf_strip ) {
- read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
-#ifdef IP2DEBUG_READ
- printk (KERN_DEBUG "i2Input called from unthrottle\n" );
-#endif
- i2Input( pCh );
- } else
- read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
-}
-
-static void
-ip2_start ( PTTY tty )
-{
- i2ChanStrPtr pCh = DevTable[tty->index];
-
- i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_RESUME);
- i2QueueCommands(PTYPE_BYPASS, pCh, 100, 1, CMD_UNSUSPEND);
- i2QueueCommands(PTYPE_BYPASS, pCh, 100, 1, CMD_RESUME);
-#ifdef IP2DEBUG_WRITE
- printk (KERN_DEBUG "IP2: start tx\n" );
-#endif
-}
-
-static void
-ip2_stop ( PTTY tty )
-{
- i2ChanStrPtr pCh = DevTable[tty->index];
-
- i2QueueCommands(PTYPE_BYPASS, pCh, 100, 1, CMD_SUSPEND);
-#ifdef IP2DEBUG_WRITE
- printk (KERN_DEBUG "IP2: stop tx\n" );
-#endif
-}
-
-/******************************************************************************/
-/* Device Ioctl Section */
-/******************************************************************************/
-
-static int ip2_tiocmget(struct tty_struct *tty)
-{
- i2ChanStrPtr pCh = DevTable[tty->index];
-#ifdef ENABLE_DSSNOW
- wait_queue_t wait;
-#endif
-
- if (pCh == NULL)
- return -ENODEV;
-
-/*
- FIXME - the following code is causing a NULL pointer dereference in
- 2.3.51 in an interrupt handler. It's suppose to prompt the board
- to return the DSS signal status immediately. Why doesn't it do
- the same thing in 2.2.14?
-*/
-
-/* This thing is still busted in the 1.2.12 driver on 2.4.x
- and even hoses the serial console so the oops can be trapped.
- /\/\|=mhw=|\/\/ */
-
-#ifdef ENABLE_DSSNOW
- i2QueueCommands(PTYPE_BYPASS, pCh, 100, 1, CMD_DSS_NOW);
-
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&pCh->dss_now_wait, &wait);
- set_current_state( TASK_INTERRUPTIBLE );
-
- serviceOutgoingFifo( pCh->pMyBord );
-
- schedule();
-
- set_current_state( TASK_RUNNING );
- remove_wait_queue(&pCh->dss_now_wait, &wait);
-
- if (signal_pending(current)) {
- return -EINTR;
- }
-#endif
- return ((pCh->dataSetOut & I2_RTS) ? TIOCM_RTS : 0)
- | ((pCh->dataSetOut & I2_DTR) ? TIOCM_DTR : 0)
- | ((pCh->dataSetIn & I2_DCD) ? TIOCM_CAR : 0)
- | ((pCh->dataSetIn & I2_RI) ? TIOCM_RNG : 0)
- | ((pCh->dataSetIn & I2_DSR) ? TIOCM_DSR : 0)
- | ((pCh->dataSetIn & I2_CTS) ? TIOCM_CTS : 0);
-}
-
-static int ip2_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- i2ChanStrPtr pCh = DevTable[tty->index];
-
- if (pCh == NULL)
- return -ENODEV;
-
- if (set & TIOCM_RTS) {
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_RTSUP);
- pCh->dataSetOut |= I2_RTS;
- }
- if (set & TIOCM_DTR) {
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_DTRUP);
- pCh->dataSetOut |= I2_DTR;
- }
-
- if (clear & TIOCM_RTS) {
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_RTSDN);
- pCh->dataSetOut &= ~I2_RTS;
- }
- if (clear & TIOCM_DTR) {
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_DTRDN);
- pCh->dataSetOut &= ~I2_DTR;
- }
- serviceOutgoingFifo( pCh->pMyBord );
- return 0;
-}
-
-/******************************************************************************/
-/* Function: ip2_ioctl() */
-/* Parameters: Pointer to tty structure */
-/* Pointer to file structure */
-/* Command */
-/* Argument */
-/* Returns: Success or failure */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static int
-ip2_ioctl ( PTTY tty, UINT cmd, ULONG arg )
-{
- wait_queue_t wait;
- i2ChanStrPtr pCh = DevTable[tty->index];
- i2eBordStrPtr pB;
- struct async_icount cprev, cnow; /* kernel counter temps */
- int rc = 0;
- unsigned long flags;
- void __user *argp = (void __user *)arg;
-
- if ( pCh == NULL )
- return -ENODEV;
-
- pB = pCh->pMyBord;
-
- ip2trace (CHANN, ITRC_IOCTL, ITRC_ENTER, 2, cmd, arg );
-
-#ifdef IP2DEBUG_IOCTL
- printk(KERN_DEBUG "IP2: ioctl cmd (%x), arg (%lx)\n", cmd, arg );
-#endif
-
- switch(cmd) {
- case TIOCGSERIAL:
-
- ip2trace (CHANN, ITRC_IOCTL, 2, 1, rc );
-
- rc = get_serial_info(pCh, argp);
- if (rc)
- return rc;
- break;
-
- case TIOCSSERIAL:
-
- ip2trace (CHANN, ITRC_IOCTL, 3, 1, rc );
-
- rc = set_serial_info(pCh, argp);
- if (rc)
- return rc;
- break;
-
- case TCXONC:
- rc = tty_check_change(tty);
- if (rc)
- return rc;
- switch (arg) {
- case TCOOFF:
- //return -ENOIOCTLCMD;
- break;
- case TCOON:
- //return -ENOIOCTLCMD;
- break;
- case TCIOFF:
- if (STOP_CHAR(tty) != __DISABLED_CHAR) {
- i2QueueCommands( PTYPE_BYPASS, pCh, 100, 1,
- CMD_XMIT_NOW(STOP_CHAR(tty)));
- }
- break;
- case TCION:
- if (START_CHAR(tty) != __DISABLED_CHAR) {
- i2QueueCommands( PTYPE_BYPASS, pCh, 100, 1,
- CMD_XMIT_NOW(START_CHAR(tty)));
- }
- break;
- default:
- return -EINVAL;
- }
- return 0;
-
- case TCSBRK: /* SVID version: non-zero arg --> no break */
- rc = tty_check_change(tty);
-
- ip2trace (CHANN, ITRC_IOCTL, 4, 1, rc );
-
- if (!rc) {
- ip2_wait_until_sent(tty,0);
- if (!arg) {
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_SEND_BRK(250));
- serviceOutgoingFifo( pCh->pMyBord );
- }
- }
- break;
-
- case TCSBRKP: /* support for POSIX tcsendbreak() */
- rc = tty_check_change(tty);
-
- ip2trace (CHANN, ITRC_IOCTL, 5, 1, rc );
-
- if (!rc) {
- ip2_wait_until_sent(tty,0);
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1,
- CMD_SEND_BRK(arg ? arg*100 : 250));
- serviceOutgoingFifo ( pCh->pMyBord );
- }
- break;
-
- case TIOCGSOFTCAR:
-
- ip2trace (CHANN, ITRC_IOCTL, 6, 1, rc );
-
- rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
- if (rc)
- return rc;
- break;
-
- case TIOCSSOFTCAR:
-
- ip2trace (CHANN, ITRC_IOCTL, 7, 1, rc );
-
- rc = get_user(arg,(unsigned long __user *) argp);
- if (rc)
- return rc;
- tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL)
- | (arg ? CLOCAL : 0));
-
- break;
-
- /*
- * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change - mask
- * passed in arg for lines of interest (use |'ed TIOCM_RNG/DSR/CD/CTS
- * for masking). Caller should use TIOCGICOUNT to see which one it was
- */
- case TIOCMIWAIT:
- write_lock_irqsave(&pB->read_fifo_spinlock, flags);
- cprev = pCh->icount; /* note the counters on entry */
- write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
- i2QueueCommands(PTYPE_BYPASS, pCh, 100, 4,
- CMD_DCD_REP, CMD_CTS_REP, CMD_DSR_REP, CMD_RI_REP);
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&pCh->delta_msr_wait, &wait);
- set_current_state( TASK_INTERRUPTIBLE );
-
- serviceOutgoingFifo( pCh->pMyBord );
- for(;;) {
- ip2trace (CHANN, ITRC_IOCTL, 10, 0 );
-
- schedule();
-
- ip2trace (CHANN, ITRC_IOCTL, 11, 0 );
-
- /* see if a signal did it */
- if (signal_pending(current)) {
- rc = -ERESTARTSYS;
- break;
- }
- write_lock_irqsave(&pB->read_fifo_spinlock, flags);
- cnow = pCh->icount; /* atomic copy */
- write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
- if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
- cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
- rc = -EIO; /* no change => rc */
- break;
- }
- if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
- ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
- ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
- ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts)) ) {
- rc = 0;
- break;
- }
- cprev = cnow;
- }
- set_current_state( TASK_RUNNING );
- remove_wait_queue(&pCh->delta_msr_wait, &wait);
-
- i2QueueCommands(PTYPE_BYPASS, pCh, 100, 3,
- CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP);
- if ( ! (pCh->flags & ASYNC_CHECK_CD)) {
- i2QueueCommands(PTYPE_BYPASS, pCh, 100, 1, CMD_DCD_NREP);
- }
- serviceOutgoingFifo( pCh->pMyBord );
- return rc;
- break;
-
- /*
- * The rest are not supported by this driver. By returning -ENOIOCTLCMD they
- * will be passed to the line discipline for it to handle.
- */
- case TIOCSERCONFIG:
- case TIOCSERGWILD:
- case TIOCSERGETLSR:
- case TIOCSERSWILD:
- case TIOCSERGSTRUCT:
- case TIOCSERGETMULTI:
- case TIOCSERSETMULTI:
-
- default:
- ip2trace (CHANN, ITRC_IOCTL, 12, 0 );
-
- rc = -ENOIOCTLCMD;
- break;
- }
-
- ip2trace (CHANN, ITRC_IOCTL, ITRC_RETURN, 0 );
-
- return rc;
-}
-
-static int ip2_get_icount(struct tty_struct *tty,
- struct serial_icounter_struct *icount)
-{
- i2ChanStrPtr pCh = DevTable[tty->index];
- i2eBordStrPtr pB;
- struct async_icount cnow; /* kernel counter temp */
- unsigned long flags;
-
- if ( pCh == NULL )
- return -ENODEV;
-
- pB = pCh->pMyBord;
-
- /*
- * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
- * Return: write counters to the user passed counter struct
- * NB: both 1->0 and 0->1 transitions are counted except for RI where
- * only 0->1 is counted. The controller is quite capable of counting
- * both, but this done to preserve compatibility with the standard
- * serial driver.
- */
-
- write_lock_irqsave(&pB->read_fifo_spinlock, flags);
- cnow = pCh->icount;
- write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
-
- icount->cts = cnow.cts;
- icount->dsr = cnow.dsr;
- icount->rng = cnow.rng;
- icount->dcd = cnow.dcd;
- icount->rx = cnow.rx;
- icount->tx = cnow.tx;
- icount->frame = cnow.frame;
- icount->overrun = cnow.overrun;
- icount->parity = cnow.parity;
- icount->brk = cnow.brk;
- icount->buf_overrun = cnow.buf_overrun;
- return 0;
-}
-
-/******************************************************************************/
-/* Function: GetSerialInfo() */
-/* Parameters: Pointer to channel structure */
-/* Pointer to old termios structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* This is to support the setserial command, and requires processing of the */
-/* standard Linux serial structure. */
-/******************************************************************************/
-static int
-get_serial_info ( i2ChanStrPtr pCh, struct serial_struct __user *retinfo )
-{
- struct serial_struct tmp;
-
- memset ( &tmp, 0, sizeof(tmp) );
- tmp.type = pCh->pMyBord->channelBtypes.bid_value[(pCh->port_index & (IP2_PORTS_PER_BOARD-1))/16];
- if (BID_HAS_654(tmp.type)) {
- tmp.type = PORT_16650;
- } else {
- tmp.type = PORT_CIRRUS;
- }
- tmp.line = pCh->port_index;
- tmp.port = pCh->pMyBord->i2eBase;
- tmp.irq = ip2config.irq[pCh->port_index/64];
- tmp.flags = pCh->flags;
- tmp.baud_base = pCh->BaudBase;
- tmp.close_delay = pCh->ClosingDelay;
- tmp.closing_wait = pCh->ClosingWaitTime;
- tmp.custom_divisor = pCh->BaudDivisor;
- return copy_to_user(retinfo,&tmp,sizeof(*retinfo));
-}
-
-/******************************************************************************/
-/* Function: SetSerialInfo() */
-/* Parameters: Pointer to channel structure */
-/* Pointer to old termios structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* This function provides support for setserial, which uses the TIOCSSERIAL */
-/* ioctl. Not all setserial parameters are relevant. If the user attempts to */
-/* change the IRQ, address or type of the port the ioctl fails. */
-/******************************************************************************/
-static int
-set_serial_info( i2ChanStrPtr pCh, struct serial_struct __user *new_info )
-{
- struct serial_struct ns;
- int old_flags, old_baud_divisor;
-
- if (copy_from_user(&ns, new_info, sizeof (ns)))
- return -EFAULT;
-
- /*
- * We don't allow setserial to change IRQ, board address, type or baud
- * base. Also line nunber as such is meaningless but we use it for our
- * array index so it is fixed also.
- */
- if ( (ns.irq != ip2config.irq[pCh->port_index])
- || ((int) ns.port != ((int) (pCh->pMyBord->i2eBase)))
- || (ns.baud_base != pCh->BaudBase)
- || (ns.line != pCh->port_index) ) {
- return -EINVAL;
- }
-
- old_flags = pCh->flags;
- old_baud_divisor = pCh->BaudDivisor;
-
- if ( !capable(CAP_SYS_ADMIN) ) {
- if ( ( ns.close_delay != pCh->ClosingDelay ) ||
- ( (ns.flags & ~ASYNC_USR_MASK) !=
- (pCh->flags & ~ASYNC_USR_MASK) ) ) {
- return -EPERM;
- }
-
- pCh->flags = (pCh->flags & ~ASYNC_USR_MASK) |
- (ns.flags & ASYNC_USR_MASK);
- pCh->BaudDivisor = ns.custom_divisor;
- } else {
- pCh->flags = (pCh->flags & ~ASYNC_FLAGS) |
- (ns.flags & ASYNC_FLAGS);
- pCh->BaudDivisor = ns.custom_divisor;
- pCh->ClosingDelay = ns.close_delay * HZ/100;
- pCh->ClosingWaitTime = ns.closing_wait * HZ/100;
- }
-
- if ( ( (old_flags & ASYNC_SPD_MASK) != (pCh->flags & ASYNC_SPD_MASK) )
- || (old_baud_divisor != pCh->BaudDivisor) ) {
- // Invalidate speed and reset parameters
- set_params( pCh, NULL );
- }
-
- return 0;
-}
-
-/******************************************************************************/
-/* Function: ip2_set_termios() */
-/* Parameters: Pointer to tty structure */
-/* Pointer to old termios structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static void
-ip2_set_termios( PTTY tty, struct ktermios *old_termios )
-{
- i2ChanStrPtr pCh = (i2ChanStrPtr)tty->driver_data;
-
-#ifdef IP2DEBUG_IOCTL
- printk (KERN_DEBUG "IP2: set termios %p\n", old_termios );
-#endif
-
- set_params( pCh, old_termios );
-}
-
-/******************************************************************************/
-/* Function: ip2_set_line_discipline() */
-/* Parameters: Pointer to tty structure */
-/* Returns: Nothing */
-/* */
-/* Description: Does nothing */
-/* */
-/* */
-/******************************************************************************/
-static void
-ip2_set_line_discipline ( PTTY tty )
-{
-#ifdef IP2DEBUG_IOCTL
- printk (KERN_DEBUG "IP2: set line discipline\n" );
-#endif
-
- ip2trace (((i2ChanStrPtr)tty->driver_data)->port_index, ITRC_IOCTL, 16, 0 );
-
-}
-
-/******************************************************************************/
-/* Function: SetLine Characteristics() */
-/* Parameters: Pointer to channel structure */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* This routine is called to update the channel structure with the new line */
-/* characteristics, and send the appropriate commands to the board when they */
-/* change. */
-/******************************************************************************/
-static void
-set_params( i2ChanStrPtr pCh, struct ktermios *o_tios )
-{
- tcflag_t cflag, iflag, lflag;
- char stop_char, start_char;
- struct ktermios dummy;
-
- lflag = pCh->pTTY->termios->c_lflag;
- cflag = pCh->pTTY->termios->c_cflag;
- iflag = pCh->pTTY->termios->c_iflag;
-
- if (o_tios == NULL) {
- dummy.c_lflag = ~lflag;
- dummy.c_cflag = ~cflag;
- dummy.c_iflag = ~iflag;
- o_tios = &dummy;
- }
-
- {
- switch ( cflag & CBAUD ) {
- case B0:
- i2QueueCommands( PTYPE_BYPASS, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN);
- pCh->dataSetOut &= ~(I2_DTR | I2_RTS);
- i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25));
- pCh->pTTY->termios->c_cflag |= (CBAUD & o_tios->c_cflag);
- goto service_it;
- break;
- case B38400:
- /*
- * This is the speed that is overloaded with all the other high
- * speeds, depending upon the flag settings.
- */
- if ( ( pCh->flags & ASYNC_SPD_MASK ) == ASYNC_SPD_HI ) {
- pCh->speed = CBR_57600;
- } else if ( (pCh->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI ) {
- pCh->speed = CBR_115200;
- } else if ( (pCh->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST ) {
- pCh->speed = CBR_C1;
- } else {
- pCh->speed = CBR_38400;
- }
- break;
- case B50: pCh->speed = CBR_50; break;
- case B75: pCh->speed = CBR_75; break;
- case B110: pCh->speed = CBR_110; break;
- case B134: pCh->speed = CBR_134; break;
- case B150: pCh->speed = CBR_150; break;
- case B200: pCh->speed = CBR_200; break;
- case B300: pCh->speed = CBR_300; break;
- case B600: pCh->speed = CBR_600; break;
- case B1200: pCh->speed = CBR_1200; break;
- case B1800: pCh->speed = CBR_1800; break;
- case B2400: pCh->speed = CBR_2400; break;
- case B4800: pCh->speed = CBR_4800; break;
- case B9600: pCh->speed = CBR_9600; break;
- case B19200: pCh->speed = CBR_19200; break;
- case B57600: pCh->speed = CBR_57600; break;
- case B115200: pCh->speed = CBR_115200; break;
- case B153600: pCh->speed = CBR_153600; break;
- case B230400: pCh->speed = CBR_230400; break;
- case B307200: pCh->speed = CBR_307200; break;
- case B460800: pCh->speed = CBR_460800; break;
- case B921600: pCh->speed = CBR_921600; break;
- default: pCh->speed = CBR_9600; break;
- }
- if ( pCh->speed == CBR_C1 ) {
- // Process the custom speed parameters.
- int bps = pCh->BaudBase / pCh->BaudDivisor;
- if ( bps == 921600 ) {
- pCh->speed = CBR_921600;
- } else {
- bps = bps/10;
- i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_BAUD_DEF1(bps) );
- }
- }
- i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_SETBAUD(pCh->speed));
-
- i2QueueCommands ( PTYPE_INLINE, pCh, 100, 2, CMD_DTRUP, CMD_RTSUP);
- pCh->dataSetOut |= (I2_DTR | I2_RTS);
- }
- if ( (CSTOPB & cflag) ^ (CSTOPB & o_tios->c_cflag))
- {
- i2QueueCommands ( PTYPE_INLINE, pCh, 100, 1,
- CMD_SETSTOP( ( cflag & CSTOPB ) ? CST_2 : CST_1));
- }
- if (((PARENB|PARODD) & cflag) ^ ((PARENB|PARODD) & o_tios->c_cflag))
- {
- i2QueueCommands ( PTYPE_INLINE, pCh, 100, 1,
- CMD_SETPAR(
- (cflag & PARENB ? (cflag & PARODD ? CSP_OD : CSP_EV) : CSP_NP)
- )
- );
- }
- /* byte size and parity */
- if ( (CSIZE & cflag)^(CSIZE & o_tios->c_cflag))
- {
- int datasize;
- switch ( cflag & CSIZE ) {
- case CS5: datasize = CSZ_5; break;
- case CS6: datasize = CSZ_6; break;
- case CS7: datasize = CSZ_7; break;
- case CS8: datasize = CSZ_8; break;
- default: datasize = CSZ_5; break; /* as per serial.c */
- }
- i2QueueCommands ( PTYPE_INLINE, pCh, 100, 1, CMD_SETBITS(datasize) );
- }
- /* Process CTS flow control flag setting */
- if ( (cflag & CRTSCTS) ) {
- i2QueueCommands(PTYPE_INLINE, pCh, 100,
- 2, CMD_CTSFL_ENAB, CMD_RTSFL_ENAB);
- } else {
- i2QueueCommands(PTYPE_INLINE, pCh, 100,
- 2, CMD_CTSFL_DSAB, CMD_RTSFL_DSAB);
- }
- //
- // Process XON/XOFF flow control flags settings
- //
- stop_char = STOP_CHAR(pCh->pTTY);
- start_char = START_CHAR(pCh->pTTY);
-
- //////////// can't be \000
- if (stop_char == __DISABLED_CHAR )
- {
- stop_char = ~__DISABLED_CHAR;
- }
- if (start_char == __DISABLED_CHAR )
- {
- start_char = ~__DISABLED_CHAR;
- }
- /////////////////////////////////
-
- if ( o_tios->c_cc[VSTART] != start_char )
- {
- i2QueueCommands(PTYPE_BYPASS, pCh, 100, 1, CMD_DEF_IXON(start_char));
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_DEF_OXON(start_char));
- }
- if ( o_tios->c_cc[VSTOP] != stop_char )
- {
- i2QueueCommands(PTYPE_BYPASS, pCh, 100, 1, CMD_DEF_IXOFF(stop_char));
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_DEF_OXOFF(stop_char));
- }
- if (stop_char == __DISABLED_CHAR )
- {
- stop_char = ~__DISABLED_CHAR; //TEST123
- goto no_xoff;
- }
- if ((iflag & (IXOFF))^(o_tios->c_iflag & (IXOFF)))
- {
- if ( iflag & IXOFF ) { // Enable XOFF output flow control
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_OXON_OPT(COX_XON));
- } else { // Disable XOFF output flow control
-no_xoff:
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_OXON_OPT(COX_NONE));
- }
- }
- if (start_char == __DISABLED_CHAR )
- {
- goto no_xon;
- }
- if ((iflag & (IXON|IXANY)) ^ (o_tios->c_iflag & (IXON|IXANY)))
- {
- if ( iflag & IXON ) {
- if ( iflag & IXANY ) { // Enable XON/XANY output flow control
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_IXON_OPT(CIX_XANY));
- } else { // Enable XON output flow control
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_IXON_OPT(CIX_XON));
- }
- } else { // Disable XON output flow control
-no_xon:
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_IXON_OPT(CIX_NONE));
- }
- }
- if ( (iflag & ISTRIP) ^ ( o_tios->c_iflag & (ISTRIP)) )
- {
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1,
- CMD_ISTRIP_OPT((iflag & ISTRIP ? 1 : 0)));
- }
- if ( (iflag & INPCK) ^ ( o_tios->c_iflag & (INPCK)) )
- {
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1,
- CMD_PARCHK((iflag & INPCK) ? CPK_ENAB : CPK_DSAB));
- }
-
- if ( (iflag & (IGNBRK|PARMRK|BRKINT|IGNPAR))
- ^ ( o_tios->c_iflag & (IGNBRK|PARMRK|BRKINT|IGNPAR)) )
- {
- char brkrpt = 0;
- char parrpt = 0;
-
- if ( iflag & IGNBRK ) { /* Ignore breaks altogether */
- /* Ignore breaks altogether */
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_BRK_NREP);
- } else {
- if ( iflag & BRKINT ) {
- if ( iflag & PARMRK ) {
- brkrpt = 0x0a; // exception an inline triple
- } else {
- brkrpt = 0x1a; // exception and NULL
- }
- brkrpt |= 0x04; // flush input
- } else {
- if ( iflag & PARMRK ) {
- brkrpt = 0x0b; //POSIX triple \0377 \0 \0
- } else {
- brkrpt = 0x01; // Null only
- }
- }
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_BRK_REP(brkrpt));
- }
-
- if (iflag & IGNPAR) {
- parrpt = 0x20;
- /* would be 2 for not cirrus bug */
- /* would be 0x20 cept for cirrus bug */
- } else {
- if ( iflag & PARMRK ) {
- /*
- * Replace error characters with 3-byte sequence (\0377,\0,char)
- */
- parrpt = 0x04 ;
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_ISTRIP_OPT((char)0));
- } else {
- parrpt = 0x03;
- }
- }
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_SET_ERROR(parrpt));
- }
- if (cflag & CLOCAL) {
- // Status reporting fails for DCD if this is off
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_DCD_NREP);
- pCh->flags &= ~ASYNC_CHECK_CD;
- } else {
- i2QueueCommands(PTYPE_INLINE, pCh, 100, 1, CMD_DCD_REP);
- pCh->flags |= ASYNC_CHECK_CD;
- }
-
-service_it:
- i2DrainOutput( pCh, 100 );
-}
-
-/******************************************************************************/
-/* IPL Device Section */
-/******************************************************************************/
-
-/******************************************************************************/
-/* Function: ip2_ipl_read() */
-/* Parameters: Pointer to device inode */
-/* Pointer to file structure */
-/* Pointer to data */
-/* Number of bytes to read */
-/* Returns: Success or failure */
-/* */
-/* Description: Ugly */
-/* */
-/* */
-/******************************************************************************/
-
-static
-ssize_t
-ip2_ipl_read(struct file *pFile, char __user *pData, size_t count, loff_t *off )
-{
- unsigned int minor = iminor(pFile->f_path.dentry->d_inode);
- int rc = 0;
-
-#ifdef IP2DEBUG_IPL
- printk (KERN_DEBUG "IP2IPL: read %p, %d bytes\n", pData, count );
-#endif
-
- switch( minor ) {
- case 0: // IPL device
- rc = -EINVAL;
- break;
- case 1: // Status dump
- rc = -EINVAL;
- break;
- case 2: // Ping device
- rc = -EINVAL;
- break;
- case 3: // Trace device
- rc = DumpTraceBuffer ( pData, count );
- break;
- case 4: // Trace device
- rc = DumpFifoBuffer ( pData, count );
- break;
- default:
- rc = -ENODEV;
- break;
- }
- return rc;
-}
-
-static int
-DumpFifoBuffer ( char __user *pData, int count )
-{
-#ifdef DEBUG_FIFO
- int rc;
- rc = copy_to_user(pData, DBGBuf, count);
-
- printk(KERN_DEBUG "Last index %d\n", I );
-
- return count;
-#endif /* DEBUG_FIFO */
- return 0;
-}
-
-static int
-DumpTraceBuffer ( char __user *pData, int count )
-{
-#ifdef IP2DEBUG_TRACE
- int rc;
- int dumpcount;
- int chunk;
- int *pIndex = (int __user *)pData;
-
- if ( count < (sizeof(int) * 6) ) {
- return -EIO;
- }
- rc = put_user(tracewrap, pIndex );
- rc = put_user(TRACEMAX, ++pIndex );
- rc = put_user(tracestrip, ++pIndex );
- rc = put_user(tracestuff, ++pIndex );
- pData += sizeof(int) * 6;
- count -= sizeof(int) * 6;
-
- dumpcount = tracestuff - tracestrip;
- if ( dumpcount < 0 ) {
- dumpcount += TRACEMAX;
- }
- if ( dumpcount > count ) {
- dumpcount = count;
- }
- chunk = TRACEMAX - tracestrip;
- if ( dumpcount > chunk ) {
- rc = copy_to_user(pData, &tracebuf[tracestrip],
- chunk * sizeof(tracebuf[0]) );
- pData += chunk * sizeof(tracebuf[0]);
- tracestrip = 0;
- chunk = dumpcount - chunk;
- } else {
- chunk = dumpcount;
- }
- rc = copy_to_user(pData, &tracebuf[tracestrip],
- chunk * sizeof(tracebuf[0]) );
- tracestrip += chunk;
- tracewrap = 0;
-
- rc = put_user(tracestrip, ++pIndex );
- rc = put_user(tracestuff, ++pIndex );
-
- return dumpcount;
-#else
- return 0;
-#endif
-}
-
-/******************************************************************************/
-/* Function: ip2_ipl_write() */
-/* Parameters: */
-/* Pointer to file structure */
-/* Pointer to data */
-/* Number of bytes to write */
-/* Returns: Success or failure */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static ssize_t
-ip2_ipl_write(struct file *pFile, const char __user *pData, size_t count, loff_t *off)
-{
-#ifdef IP2DEBUG_IPL
- printk (KERN_DEBUG "IP2IPL: write %p, %d bytes\n", pData, count );
-#endif
- return 0;
-}
-
-/******************************************************************************/
-/* Function: ip2_ipl_ioctl() */
-/* Parameters: Pointer to device inode */
-/* Pointer to file structure */
-/* Command */
-/* Argument */
-/* Returns: Success or failure */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static long
-ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg )
-{
- unsigned int iplminor = iminor(pFile->f_path.dentry->d_inode);
- int rc = 0;
- void __user *argp = (void __user *)arg;
- ULONG __user *pIndex = argp;
- i2eBordStrPtr pB = i2BoardPtrTable[iplminor / 4];
- i2ChanStrPtr pCh;
-
-#ifdef IP2DEBUG_IPL
- printk (KERN_DEBUG "IP2IPL: ioctl cmd %d, arg %ld\n", cmd, arg );
-#endif
-
- mutex_lock(&ip2_mutex);
-
- switch ( iplminor ) {
- case 0: // IPL device
- rc = -EINVAL;
- break;
- case 1: // Status dump
- case 5:
- case 9:
- case 13:
- switch ( cmd ) {
- case 64: /* Driver - ip2stat */
- rc = put_user(-1, pIndex++ );
- rc = put_user(irq_counter, pIndex++ );
- rc = put_user(bh_counter, pIndex++ );
- break;
-
- case 65: /* Board - ip2stat */
- if ( pB ) {
- rc = copy_to_user(argp, pB, sizeof(i2eBordStr));
- rc = put_user(inb(pB->i2eStatus),
- (ULONG __user *)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) );
- } else {
- rc = -ENODEV;
- }
- break;
-
- default:
- if (cmd < IP2_MAX_PORTS) {
- pCh = DevTable[cmd];
- if ( pCh )
- {
- rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
- if (rc)
- rc = -EFAULT;
- } else {
- rc = -ENODEV;
- }
- } else {
- rc = -EINVAL;
- }
- }
- break;
-
- case 2: // Ping device
- rc = -EINVAL;
- break;
- case 3: // Trace device
- /*
- * akpm: This used to write a whole bunch of function addresses
- * to userspace, which generated lots of put_user() warnings.
- * I killed it all. Just return "success" and don't do
- * anything.
- */
- if (cmd == 1)
- rc = 0;
- else
- rc = -EINVAL;
- break;
-
- default:
- rc = -ENODEV;
- break;
- }
- mutex_unlock(&ip2_mutex);
- return rc;
-}
-
-/******************************************************************************/
-/* Function: ip2_ipl_open() */
-/* Parameters: Pointer to device inode */
-/* Pointer to file structure */
-/* Returns: Success or failure */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-static int
-ip2_ipl_open( struct inode *pInode, struct file *pFile )
-{
-
-#ifdef IP2DEBUG_IPL
- printk (KERN_DEBUG "IP2IPL: open\n" );
-#endif
- return 0;
-}
-
-static int
-proc_ip2mem_show(struct seq_file *m, void *v)
-{
- i2eBordStrPtr pB;
- i2ChanStrPtr pCh;
- PTTY tty;
- int i;
-
-#define FMTLINE "%3d: 0x%08x 0x%08x 0%011o 0%011o\n"
-#define FMTLIN2 " 0x%04x 0x%04x tx flow 0x%x\n"
-#define FMTLIN3 " 0x%04x 0x%04x rc flow\n"
-
- seq_printf(m,"\n");
-
- for( i = 0; i < IP2_MAX_BOARDS; ++i ) {
- pB = i2BoardPtrTable[i];
- if ( pB ) {
- seq_printf(m,"board %d:\n",i);
- seq_printf(m,"\tFifo rem: %d mty: %x outM %x\n",
- pB->i2eFifoRemains,pB->i2eWaitingForEmptyFifo,pB->i2eOutMailWaiting);
- }
- }
-
- seq_printf(m,"#: tty flags, port flags, cflags, iflags\n");
- for (i=0; i < IP2_MAX_PORTS; i++) {
- pCh = DevTable[i];
- if (pCh) {
- tty = pCh->pTTY;
- if (tty && tty->count) {
- seq_printf(m,FMTLINE,i,(int)tty->flags,pCh->flags,
- tty->termios->c_cflag,tty->termios->c_iflag);
-
- seq_printf(m,FMTLIN2,
- pCh->outfl.asof,pCh->outfl.room,pCh->channelNeeds);
- seq_printf(m,FMTLIN3,pCh->infl.asof,pCh->infl.room);
- }
- }
- }
- return 0;
-}
-
-static int proc_ip2mem_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_ip2mem_show, NULL);
-}
-
-static const struct file_operations ip2mem_proc_fops = {
- .owner = THIS_MODULE,
- .open = proc_ip2mem_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/*
- * This is the handler for /proc/tty/driver/ip2
- *
- * This stretch of code has been largely plagerized from at least three
- * different sources including ip2mkdev.c and a couple of other drivers.
- * The bugs are all mine. :-) =mhw=
- */
-static int ip2_proc_show(struct seq_file *m, void *v)
-{
- int i, j, box;
- int boxes = 0;
- int ports = 0;
- int tports = 0;
- i2eBordStrPtr pB;
- char *sep;
-
- seq_printf(m, "ip2info: 1.0 driver: %s\n", pcVersion);
- seq_printf(m, "Driver: SMajor=%d CMajor=%d IMajor=%d MaxBoards=%d MaxBoxes=%d MaxPorts=%d\n",
- IP2_TTY_MAJOR, IP2_CALLOUT_MAJOR, IP2_IPL_MAJOR,
- IP2_MAX_BOARDS, ABS_MAX_BOXES, ABS_BIGGEST_BOX);
-
- for( i = 0; i < IP2_MAX_BOARDS; ++i ) {
- /* This need to be reset for a board by board count... */
- boxes = 0;
- pB = i2BoardPtrTable[i];
- if( pB ) {
- switch( pB->i2ePom.e.porID & ~POR_ID_RESERVED )
- {
- case POR_ID_FIIEX:
- seq_printf(m, "Board %d: EX ports=", i);
- sep = "";
- for( box = 0; box < ABS_MAX_BOXES; ++box )
- {
- ports = 0;
-
- if( pB->i2eChannelMap[box] != 0 ) ++boxes;
- for( j = 0; j < ABS_BIGGEST_BOX; ++j )
- {
- if( pB->i2eChannelMap[box] & 1<< j ) {
- ++ports;
- }
- }
- seq_printf(m, "%s%d", sep, ports);
- sep = ",";
- tports += ports;
- }
- seq_printf(m, " boxes=%d width=%d", boxes, pB->i2eDataWidth16 ? 16 : 8);
- break;
-
- case POR_ID_II_4:
- seq_printf(m, "Board %d: ISA-4 ports=4 boxes=1", i);
- tports = ports = 4;
- break;
-
- case POR_ID_II_8:
- seq_printf(m, "Board %d: ISA-8-std ports=8 boxes=1", i);
- tports = ports = 8;
- break;
-
- case POR_ID_II_8R:
- seq_printf(m, "Board %d: ISA-8-RJ11 ports=8 boxes=1", i);
- tports = ports = 8;
- break;
-
- default:
- seq_printf(m, "Board %d: unknown", i);
- /* Don't try and probe for minor numbers */
- tports = ports = 0;
- }
-
- } else {
- /* Don't try and probe for minor numbers */
- seq_printf(m, "Board %d: vacant", i);
- tports = ports = 0;
- }
-
- if( tports ) {
- seq_puts(m, " minors=");
- sep = "";
- for ( box = 0; box < ABS_MAX_BOXES; ++box )
- {
- for ( j = 0; j < ABS_BIGGEST_BOX; ++j )
- {
- if ( pB->i2eChannelMap[box] & (1 << j) )
- {
- seq_printf(m, "%s%d", sep,
- j + ABS_BIGGEST_BOX *
- (box+i*ABS_MAX_BOXES));
- sep = ",";
- }
- }
- }
- }
- seq_putc(m, '\n');
- }
- return 0;
- }
-
-static int ip2_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ip2_proc_show, NULL);
-}
-
-static const struct file_operations ip2_proc_fops = {
- .owner = THIS_MODULE,
- .open = ip2_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/******************************************************************************/
-/* Function: ip2trace() */
-/* Parameters: Value to add to trace buffer */
-/* Returns: Nothing */
-/* */
-/* Description: */
-/* */
-/* */
-/******************************************************************************/
-#ifdef IP2DEBUG_TRACE
-void
-ip2trace (unsigned short pn, unsigned char cat, unsigned char label, unsigned long codes, ...)
-{
- long flags;
- unsigned long *pCode = &codes;
- union ip2breadcrumb bc;
- i2ChanStrPtr pCh;
-
-
- tracebuf[tracestuff++] = jiffies;
- if ( tracestuff == TRACEMAX ) {
- tracestuff = 0;
- }
- if ( tracestuff == tracestrip ) {
- if ( ++tracestrip == TRACEMAX ) {
- tracestrip = 0;
- }
- ++tracewrap;
- }
-
- bc.hdr.port = 0xff & pn;
- bc.hdr.cat = cat;
- bc.hdr.codes = (unsigned char)( codes & 0xff );
- bc.hdr.label = label;
- tracebuf[tracestuff++] = bc.value;
-
- for (;;) {
- if ( tracestuff == TRACEMAX ) {
- tracestuff = 0;
- }
- if ( tracestuff == tracestrip ) {
- if ( ++tracestrip == TRACEMAX ) {
- tracestrip = 0;
- }
- ++tracewrap;
- }
-
- if ( !codes-- )
- break;
-
- tracebuf[tracestuff++] = *++pCode;
- }
-}
-#endif
-
-
-MODULE_LICENSE("GPL");
-
-static struct pci_device_id ip2main_pci_tbl[] __devinitdata __used = {
- { PCI_DEVICE(PCI_VENDOR_ID_COMPUTONE, PCI_DEVICE_ID_COMPUTONE_IP2EX) },
- { }
-};
-
-MODULE_DEVICE_TABLE(pci, ip2main_pci_tbl);
-
-MODULE_FIRMWARE("intelliport2.bin");
diff --git a/drivers/staging/tty/ip2/ip2trace.h b/drivers/staging/tty/ip2/ip2trace.h
deleted file mode 100644
index da20435dc8a..00000000000
--- a/drivers/staging/tty/ip2/ip2trace.h
+++ /dev/null
@@ -1,42 +0,0 @@
-
-//
-union ip2breadcrumb
-{
- struct {
- unsigned char port, cat, codes, label;
- } __attribute__ ((packed)) hdr;
- unsigned long value;
-};
-
-#define ITRC_NO_PORT 0xFF
-#define CHANN (pCh->port_index)
-
-#define ITRC_ERROR '!'
-#define ITRC_INIT 'A'
-#define ITRC_OPEN 'B'
-#define ITRC_CLOSE 'C'
-#define ITRC_DRAIN 'D'
-#define ITRC_IOCTL 'E'
-#define ITRC_FLUSH 'F'
-#define ITRC_STATUS 'G'
-#define ITRC_HANGUP 'H'
-#define ITRC_INTR 'I'
-#define ITRC_SFLOW 'J'
-#define ITRC_SBCMD 'K'
-#define ITRC_SICMD 'L'
-#define ITRC_MODEM 'M'
-#define ITRC_INPUT 'N'
-#define ITRC_OUTPUT 'O'
-#define ITRC_PUTC 'P'
-#define ITRC_QUEUE 'Q'
-#define ITRC_STFLW 'R'
-#define ITRC_SFIFO 'S'
-#define ITRC_VERIFY 'V'
-#define ITRC_WRITE 'W'
-
-#define ITRC_ENTER 0x00
-#define ITRC_RETURN 0xFF
-
-#define ITRC_QUEUE_ROOM 2
-#define ITRC_QUEUE_CMD 6
-
diff --git a/drivers/staging/tty/ip2/ip2types.h b/drivers/staging/tty/ip2/ip2types.h
deleted file mode 100644
index 9d67b260b2f..00000000000
--- a/drivers/staging/tty/ip2/ip2types.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*******************************************************************************
-*
-* (c) 1998 by Computone Corporation
-*
-********************************************************************************
-*
-*
-* PACKAGE: Linux tty Device Driver for IntelliPort II family of multiport
-* serial I/O controllers.
-*
-* DESCRIPTION: Driver constants and type definitions.
-*
-* NOTES:
-*
-*******************************************************************************/
-#ifndef IP2TYPES_H
-#define IP2TYPES_H
-
-//*************
-//* Constants *
-//*************
-
-// Define some limits for this driver. Ports per board is a hardware limitation
-// that will not change. Current hardware limits this to 64 ports per board.
-// Boards per driver is a self-imposed limit.
-//
-#define IP2_MAX_BOARDS 4
-#define IP2_PORTS_PER_BOARD ABS_MOST_PORTS
-#define IP2_MAX_PORTS (IP2_MAX_BOARDS*IP2_PORTS_PER_BOARD)
-
-#define ISA 0
-#define PCI 1
-#define EISA 2
-
-//********************
-//* Type Definitions *
-//********************
-
-typedef struct tty_struct * PTTY;
-typedef wait_queue_head_t PWAITQ;
-
-typedef unsigned char UCHAR;
-typedef unsigned int UINT;
-typedef unsigned short USHORT;
-typedef unsigned long ULONG;
-
-typedef struct
-{
- short irq[IP2_MAX_BOARDS];
- unsigned short addr[IP2_MAX_BOARDS];
- int type[IP2_MAX_BOARDS];
-#ifdef CONFIG_PCI
- struct pci_dev *pci_dev[IP2_MAX_BOARDS];
-#endif
-} ip2config_t;
-
-#endif
diff --git a/drivers/staging/tty/istallion.c b/drivers/staging/tty/istallion.c
deleted file mode 100644
index ca18cbf4e3c..00000000000
--- a/drivers/staging/tty/istallion.c
+++ /dev/null
@@ -1,4507 +0,0 @@
-/*****************************************************************************/
-
-/*
- * istallion.c -- stallion intelligent multiport serial driver.
- *
- * Copyright (C) 1996-1999 Stallion Technologies
- * Copyright (C) 1994-1996 Greg Ungerer.
- *
- * This code is loosely based on the Linux serial driver, written by
- * Linus Torvalds, Theodore T'so and others.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-
-/*****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/seq_file.h>
-#include <linux/cdk.h>
-#include <linux/comstats.h>
-#include <linux/istallion.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/wait.h>
-#include <linux/eisa.h>
-#include <linux/ctype.h>
-
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-#include <linux/pci.h>
-
-/*****************************************************************************/
-
-/*
- * Define different board types. Not all of the following board types
- * are supported by this driver. But I will use the standard "assigned"
- * board numbers. Currently supported boards are abbreviated as:
- * ECP = EasyConnection 8/64, ONB = ONboard, BBY = Brumby and
- * STAL = Stallion.
- */
-#define BRD_UNKNOWN 0
-#define BRD_STALLION 1
-#define BRD_BRUMBY4 2
-#define BRD_ONBOARD2 3
-#define BRD_ONBOARD 4
-#define BRD_ONBOARDE 7
-#define BRD_ECP 23
-#define BRD_ECPE 24
-#define BRD_ECPMC 25
-#define BRD_ECPPCI 29
-
-#define BRD_BRUMBY BRD_BRUMBY4
-
-/*
- * Define a configuration structure to hold the board configuration.
- * Need to set this up in the code (for now) with the boards that are
- * to be configured into the system. This is what needs to be modified
- * when adding/removing/modifying boards. Each line entry in the
- * stli_brdconf[] array is a board. Each line contains io/irq/memory
- * ranges for that board (as well as what type of board it is).
- * Some examples:
- * { BRD_ECP, 0x2a0, 0, 0xcc000, 0, 0 },
- * This line will configure an EasyConnection 8/64 at io address 2a0,
- * and shared memory address of cc000. Multiple EasyConnection 8/64
- * boards can share the same shared memory address space. No interrupt
- * is required for this board type.
- * Another example:
- * { BRD_ECPE, 0x5000, 0, 0x80000000, 0, 0 },
- * This line will configure an EasyConnection 8/64 EISA in slot 5 and
- * shared memory address of 0x80000000 (2 GByte). Multiple
- * EasyConnection 8/64 EISA boards can share the same shared memory
- * address space. No interrupt is required for this board type.
- * Another example:
- * { BRD_ONBOARD, 0x240, 0, 0xd0000, 0, 0 },
- * This line will configure an ONboard (ISA type) at io address 240,
- * and shared memory address of d0000. Multiple ONboards can share
- * the same shared memory address space. No interrupt required.
- * Another example:
- * { BRD_BRUMBY4, 0x360, 0, 0xc8000, 0, 0 },
- * This line will configure a Brumby board (any number of ports!) at
- * io address 360 and shared memory address of c8000. All Brumby boards
- * configured into a system must have their own separate io and memory
- * addresses. No interrupt is required.
- * Another example:
- * { BRD_STALLION, 0x330, 0, 0xd0000, 0, 0 },
- * This line will configure an original Stallion board at io address 330
- * and shared memory address d0000 (this would only be valid for a "V4.0"
- * or Rev.O Stallion board). All Stallion boards configured into the
- * system must have their own separate io and memory addresses. No
- * interrupt is required.
- */
-
-struct stlconf {
- int brdtype;
- int ioaddr1;
- int ioaddr2;
- unsigned long memaddr;
- int irq;
- int irqtype;
-};
-
-static unsigned int stli_nrbrds;
-
-/* stli_lock must NOT be taken holding brd_lock */
-static spinlock_t stli_lock; /* TTY logic lock */
-static spinlock_t brd_lock; /* Board logic lock */
-
-/*
- * There is some experimental EISA board detection code in this driver.
- * By default it is disabled, but for those that want to try it out,
- * then set the define below to be 1.
- */
-#define STLI_EISAPROBE 0
-
-/*****************************************************************************/
-
-/*
- * Define some important driver characteristics. Device major numbers
- * allocated as per Linux Device Registry.
- */
-#ifndef STL_SIOMEMMAJOR
-#define STL_SIOMEMMAJOR 28
-#endif
-#ifndef STL_SERIALMAJOR
-#define STL_SERIALMAJOR 24
-#endif
-#ifndef STL_CALLOUTMAJOR
-#define STL_CALLOUTMAJOR 25
-#endif
-
-/*****************************************************************************/
-
-/*
- * Define our local driver identity first. Set up stuff to deal with
- * all the local structures required by a serial tty driver.
- */
-static char *stli_drvtitle = "Stallion Intelligent Multiport Serial Driver";
-static char *stli_drvname = "istallion";
-static char *stli_drvversion = "5.6.0";
-static char *stli_serialname = "ttyE";
-
-static struct tty_driver *stli_serial;
-static const struct tty_port_operations stli_port_ops;
-
-#define STLI_TXBUFSIZE 4096
-
-/*
- * Use a fast local buffer for cooked characters. Typically a whole
- * bunch of cooked characters come in for a port, 1 at a time. So we
- * save those up into a local buffer, then write out the whole lot
- * with a large memcpy. Just use 1 buffer for all ports, since its
- * use it is only need for short periods of time by each port.
- */
-static char *stli_txcookbuf;
-static int stli_txcooksize;
-static int stli_txcookrealsize;
-static struct tty_struct *stli_txcooktty;
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially. Basically all it defines is a raw port
- * at 9600 baud, 8 data bits, no parity, 1 stop bit.
- */
-static struct ktermios stli_deftermios = {
- .c_cflag = (B9600 | CS8 | CREAD | HUPCL | CLOCAL),
- .c_cc = INIT_C_CC,
- .c_ispeed = 9600,
- .c_ospeed = 9600,
-};
-
-/*
- * Define global stats structures. Not used often, and can be
- * re-used for each stats call.
- */
-static comstats_t stli_comstats;
-static struct asystats stli_cdkstats;
-
-/*****************************************************************************/
-
-static DEFINE_MUTEX(stli_brdslock);
-static struct stlibrd *stli_brds[STL_MAXBRDS];
-
-static int stli_shared;
-
-/*
- * Per board state flags. Used with the state field of the board struct.
- * Not really much here... All we need to do is keep track of whether
- * the board has been detected, and whether it is actually running a slave
- * or not.
- */
-#define BST_FOUND 0
-#define BST_STARTED 1
-#define BST_PROBED 2
-
-/*
- * Define the set of port state flags. These are marked for internal
- * state purposes only, usually to do with the state of communications
- * with the slave. Most of them need to be updated atomically, so always
- * use the bit setting operations (unless protected by cli/sti).
- */
-#define ST_OPENING 2
-#define ST_CLOSING 3
-#define ST_CMDING 4
-#define ST_TXBUSY 5
-#define ST_RXING 6
-#define ST_DOFLUSHRX 7
-#define ST_DOFLUSHTX 8
-#define ST_DOSIGS 9
-#define ST_RXSTOP 10
-#define ST_GETSIGS 11
-
-/*
- * Define an array of board names as printable strings. Handy for
- * referencing boards when printing trace and stuff.
- */
-static char *stli_brdnames[] = {
- "Unknown",
- "Stallion",
- "Brumby",
- "ONboard-MC",
- "ONboard",
- "Brumby",
- "Brumby",
- "ONboard-EI",
- NULL,
- "ONboard",
- "ONboard-MC",
- "ONboard-MC",
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- "EasyIO",
- "EC8/32-AT",
- "EC8/32-MC",
- "EC8/64-AT",
- "EC8/64-EI",
- "EC8/64-MC",
- "EC8/32-PCI",
- "EC8/64-PCI",
- "EasyIO-PCI",
- "EC/RA-PCI",
-};
-
-/*****************************************************************************/
-
-/*
- * Define some string labels for arguments passed from the module
- * load line. These allow for easy board definitions, and easy
- * modification of the io, memory and irq resoucres.
- */
-
-static char *board0[8];
-static char *board1[8];
-static char *board2[8];
-static char *board3[8];
-
-static char **stli_brdsp[] = {
- (char **) &board0,
- (char **) &board1,
- (char **) &board2,
- (char **) &board3
-};
-
-/*
- * Define a set of common board names, and types. This is used to
- * parse any module arguments.
- */
-
-static struct stlibrdtype {
- char *name;
- int type;
-} stli_brdstr[] = {
- { "stallion", BRD_STALLION },
- { "1", BRD_STALLION },
- { "brumby", BRD_BRUMBY },
- { "brumby4", BRD_BRUMBY },
- { "brumby/4", BRD_BRUMBY },
- { "brumby-4", BRD_BRUMBY },
- { "brumby8", BRD_BRUMBY },
- { "brumby/8", BRD_BRUMBY },
- { "brumby-8", BRD_BRUMBY },
- { "brumby16", BRD_BRUMBY },
- { "brumby/16", BRD_BRUMBY },
- { "brumby-16", BRD_BRUMBY },
- { "2", BRD_BRUMBY },
- { "onboard2", BRD_ONBOARD2 },
- { "onboard-2", BRD_ONBOARD2 },
- { "onboard/2", BRD_ONBOARD2 },
- { "onboard-mc", BRD_ONBOARD2 },
- { "onboard/mc", BRD_ONBOARD2 },
- { "onboard-mca", BRD_ONBOARD2 },
- { "onboard/mca", BRD_ONBOARD2 },
- { "3", BRD_ONBOARD2 },
- { "onboard", BRD_ONBOARD },
- { "onboardat", BRD_ONBOARD },
- { "4", BRD_ONBOARD },
- { "onboarde", BRD_ONBOARDE },
- { "onboard-e", BRD_ONBOARDE },
- { "onboard/e", BRD_ONBOARDE },
- { "onboard-ei", BRD_ONBOARDE },
- { "onboard/ei", BRD_ONBOARDE },
- { "7", BRD_ONBOARDE },
- { "ecp", BRD_ECP },
- { "ecpat", BRD_ECP },
- { "ec8/64", BRD_ECP },
- { "ec8/64-at", BRD_ECP },
- { "ec8/64-isa", BRD_ECP },
- { "23", BRD_ECP },
- { "ecpe", BRD_ECPE },
- { "ecpei", BRD_ECPE },
- { "ec8/64-e", BRD_ECPE },
- { "ec8/64-ei", BRD_ECPE },
- { "24", BRD_ECPE },
- { "ecpmc", BRD_ECPMC },
- { "ec8/64-mc", BRD_ECPMC },
- { "ec8/64-mca", BRD_ECPMC },
- { "25", BRD_ECPMC },
- { "ecppci", BRD_ECPPCI },
- { "ec/ra", BRD_ECPPCI },
- { "ec/ra-pc", BRD_ECPPCI },
- { "ec/ra-pci", BRD_ECPPCI },
- { "29", BRD_ECPPCI },
-};
-
-/*
- * Define the module agruments.
- */
-MODULE_AUTHOR("Greg Ungerer");
-MODULE_DESCRIPTION("Stallion Intelligent Multiport Serial Driver");
-MODULE_LICENSE("GPL");
-
-
-module_param_array(board0, charp, NULL, 0);
-MODULE_PARM_DESC(board0, "Board 0 config -> name[,ioaddr[,memaddr]");
-module_param_array(board1, charp, NULL, 0);
-MODULE_PARM_DESC(board1, "Board 1 config -> name[,ioaddr[,memaddr]");
-module_param_array(board2, charp, NULL, 0);
-MODULE_PARM_DESC(board2, "Board 2 config -> name[,ioaddr[,memaddr]");
-module_param_array(board3, charp, NULL, 0);
-MODULE_PARM_DESC(board3, "Board 3 config -> name[,ioaddr[,memaddr]");
-
-#if STLI_EISAPROBE != 0
-/*
- * Set up a default memory address table for EISA board probing.
- * The default addresses are all bellow 1Mbyte, which has to be the
- * case anyway. They should be safe, since we only read values from
- * them, and interrupts are disabled while we do it. If the higher
- * memory support is compiled in then we also try probing around
- * the 1Gb, 2Gb and 3Gb areas as well...
- */
-static unsigned long stli_eisamemprobeaddrs[] = {
- 0xc0000, 0xd0000, 0xe0000, 0xf0000,
- 0x80000000, 0x80010000, 0x80020000, 0x80030000,
- 0x40000000, 0x40010000, 0x40020000, 0x40030000,
- 0xc0000000, 0xc0010000, 0xc0020000, 0xc0030000,
- 0xff000000, 0xff010000, 0xff020000, 0xff030000,
-};
-
-static int stli_eisamempsize = ARRAY_SIZE(stli_eisamemprobeaddrs);
-#endif
-
-/*
- * Define the Stallion PCI vendor and device IDs.
- */
-#ifndef PCI_DEVICE_ID_ECRA
-#define PCI_DEVICE_ID_ECRA 0x0004
-#endif
-
-static struct pci_device_id istallion_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_STALLION, PCI_DEVICE_ID_ECRA), },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, istallion_pci_tbl);
-
-static struct pci_driver stli_pcidriver;
-
-/*****************************************************************************/
-
-/*
- * Hardware configuration info for ECP boards. These defines apply
- * to the directly accessible io ports of the ECP. There is a set of
- * defines for each ECP board type, ISA, EISA, MCA and PCI.
- */
-#define ECP_IOSIZE 4
-
-#define ECP_MEMSIZE (128 * 1024)
-#define ECP_PCIMEMSIZE (256 * 1024)
-
-#define ECP_ATPAGESIZE (4 * 1024)
-#define ECP_MCPAGESIZE (4 * 1024)
-#define ECP_EIPAGESIZE (64 * 1024)
-#define ECP_PCIPAGESIZE (64 * 1024)
-
-#define STL_EISAID 0x8c4e
-
-/*
- * Important defines for the ISA class of ECP board.
- */
-#define ECP_ATIREG 0
-#define ECP_ATCONFR 1
-#define ECP_ATMEMAR 2
-#define ECP_ATMEMPR 3
-#define ECP_ATSTOP 0x1
-#define ECP_ATINTENAB 0x10
-#define ECP_ATENABLE 0x20
-#define ECP_ATDISABLE 0x00
-#define ECP_ATADDRMASK 0x3f000
-#define ECP_ATADDRSHFT 12
-
-/*
- * Important defines for the EISA class of ECP board.
- */
-#define ECP_EIIREG 0
-#define ECP_EIMEMARL 1
-#define ECP_EICONFR 2
-#define ECP_EIMEMARH 3
-#define ECP_EIENABLE 0x1
-#define ECP_EIDISABLE 0x0
-#define ECP_EISTOP 0x4
-#define ECP_EIEDGE 0x00
-#define ECP_EILEVEL 0x80
-#define ECP_EIADDRMASKL 0x00ff0000
-#define ECP_EIADDRSHFTL 16
-#define ECP_EIADDRMASKH 0xff000000
-#define ECP_EIADDRSHFTH 24
-#define ECP_EIBRDENAB 0xc84
-
-#define ECP_EISAID 0x4
-
-/*
- * Important defines for the Micro-channel class of ECP board.
- * (It has a lot in common with the ISA boards.)
- */
-#define ECP_MCIREG 0
-#define ECP_MCCONFR 1
-#define ECP_MCSTOP 0x20
-#define ECP_MCENABLE 0x80
-#define ECP_MCDISABLE 0x00
-
-/*
- * Important defines for the PCI class of ECP board.
- * (It has a lot in common with the other ECP boards.)
- */
-#define ECP_PCIIREG 0
-#define ECP_PCICONFR 1
-#define ECP_PCISTOP 0x01
-
-/*
- * Hardware configuration info for ONboard and Brumby boards. These
- * defines apply to the directly accessible io ports of these boards.
- */
-#define ONB_IOSIZE 16
-#define ONB_MEMSIZE (64 * 1024)
-#define ONB_ATPAGESIZE (64 * 1024)
-#define ONB_MCPAGESIZE (64 * 1024)
-#define ONB_EIMEMSIZE (128 * 1024)
-#define ONB_EIPAGESIZE (64 * 1024)
-
-/*
- * Important defines for the ISA class of ONboard board.
- */
-#define ONB_ATIREG 0
-#define ONB_ATMEMAR 1
-#define ONB_ATCONFR 2
-#define ONB_ATSTOP 0x4
-#define ONB_ATENABLE 0x01
-#define ONB_ATDISABLE 0x00
-#define ONB_ATADDRMASK 0xff0000
-#define ONB_ATADDRSHFT 16
-
-#define ONB_MEMENABLO 0
-#define ONB_MEMENABHI 0x02
-
-/*
- * Important defines for the EISA class of ONboard board.
- */
-#define ONB_EIIREG 0
-#define ONB_EIMEMARL 1
-#define ONB_EICONFR 2
-#define ONB_EIMEMARH 3
-#define ONB_EIENABLE 0x1
-#define ONB_EIDISABLE 0x0
-#define ONB_EISTOP 0x4
-#define ONB_EIEDGE 0x00
-#define ONB_EILEVEL 0x80
-#define ONB_EIADDRMASKL 0x00ff0000
-#define ONB_EIADDRSHFTL 16
-#define ONB_EIADDRMASKH 0xff000000
-#define ONB_EIADDRSHFTH 24
-#define ONB_EIBRDENAB 0xc84
-
-#define ONB_EISAID 0x1
-
-/*
- * Important defines for the Brumby boards. They are pretty simple,
- * there is not much that is programmably configurable.
- */
-#define BBY_IOSIZE 16
-#define BBY_MEMSIZE (64 * 1024)
-#define BBY_PAGESIZE (16 * 1024)
-
-#define BBY_ATIREG 0
-#define BBY_ATCONFR 1
-#define BBY_ATSTOP 0x4
-
-/*
- * Important defines for the Stallion boards. They are pretty simple,
- * there is not much that is programmably configurable.
- */
-#define STAL_IOSIZE 16
-#define STAL_MEMSIZE (64 * 1024)
-#define STAL_PAGESIZE (64 * 1024)
-
-/*
- * Define the set of status register values for EasyConnection panels.
- * The signature will return with the status value for each panel. From
- * this we can determine what is attached to the board - before we have
- * actually down loaded any code to it.
- */
-#define ECH_PNLSTATUS 2
-#define ECH_PNL16PORT 0x20
-#define ECH_PNLIDMASK 0x07
-#define ECH_PNLXPID 0x40
-#define ECH_PNLINTRPEND 0x80
-
-/*
- * Define some macros to do things to the board. Even those these boards
- * are somewhat related there is often significantly different ways of
- * doing some operation on it (like enable, paging, reset, etc). So each
- * board class has a set of functions which do the commonly required
- * operations. The macros below basically just call these functions,
- * generally checking for a NULL function - which means that the board
- * needs nothing done to it to achieve this operation!
- */
-#define EBRDINIT(brdp) \
- if (brdp->init != NULL) \
- (* brdp->init)(brdp)
-
-#define EBRDENABLE(brdp) \
- if (brdp->enable != NULL) \
- (* brdp->enable)(brdp);
-
-#define EBRDDISABLE(brdp) \
- if (brdp->disable != NULL) \
- (* brdp->disable)(brdp);
-
-#define EBRDINTR(brdp) \
- if (brdp->intr != NULL) \
- (* brdp->intr)(brdp);
-
-#define EBRDRESET(brdp) \
- if (brdp->reset != NULL) \
- (* brdp->reset)(brdp);
-
-#define EBRDGETMEMPTR(brdp,offset) \
- (* brdp->getmemptr)(brdp, offset, __LINE__)
-
-/*
- * Define the maximal baud rate, and the default baud base for ports.
- */
-#define STL_MAXBAUD 460800
-#define STL_BAUDBASE 115200
-#define STL_CLOSEDELAY (5 * HZ / 10)
-
-/*****************************************************************************/
-
-/*
- * Define macros to extract a brd or port number from a minor number.
- */
-#define MINOR2BRD(min) (((min) & 0xc0) >> 6)
-#define MINOR2PORT(min) ((min) & 0x3f)
-
-/*****************************************************************************/
-
-/*
- * Prototype all functions in this driver!
- */
-
-static int stli_parsebrd(struct stlconf *confp, char **argp);
-static int stli_open(struct tty_struct *tty, struct file *filp);
-static void stli_close(struct tty_struct *tty, struct file *filp);
-static int stli_write(struct tty_struct *tty, const unsigned char *buf, int count);
-static int stli_putchar(struct tty_struct *tty, unsigned char ch);
-static void stli_flushchars(struct tty_struct *tty);
-static int stli_writeroom(struct tty_struct *tty);
-static int stli_charsinbuffer(struct tty_struct *tty);
-static int stli_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
-static void stli_settermios(struct tty_struct *tty, struct ktermios *old);
-static void stli_throttle(struct tty_struct *tty);
-static void stli_unthrottle(struct tty_struct *tty);
-static void stli_stop(struct tty_struct *tty);
-static void stli_start(struct tty_struct *tty);
-static void stli_flushbuffer(struct tty_struct *tty);
-static int stli_breakctl(struct tty_struct *tty, int state);
-static void stli_waituntilsent(struct tty_struct *tty, int timeout);
-static void stli_sendxchar(struct tty_struct *tty, char ch);
-static void stli_hangup(struct tty_struct *tty);
-
-static int stli_brdinit(struct stlibrd *brdp);
-static int stli_startbrd(struct stlibrd *brdp);
-static ssize_t stli_memread(struct file *fp, char __user *buf, size_t count, loff_t *offp);
-static ssize_t stli_memwrite(struct file *fp, const char __user *buf, size_t count, loff_t *offp);
-static long stli_memioctl(struct file *fp, unsigned int cmd, unsigned long arg);
-static void stli_brdpoll(struct stlibrd *brdp, cdkhdr_t __iomem *hdrp);
-static void stli_poll(unsigned long arg);
-static int stli_hostcmd(struct stlibrd *brdp, struct stliport *portp);
-static int stli_initopen(struct tty_struct *tty, struct stlibrd *brdp, struct stliport *portp);
-static int stli_rawopen(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait);
-static int stli_rawclose(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait);
-static int stli_setport(struct tty_struct *tty);
-static int stli_cmdwait(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback);
-static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback);
-static void __stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback);
-static void stli_dodelaycmd(struct stliport *portp, cdkctrl_t __iomem *cp);
-static void stli_mkasyport(struct tty_struct *tty, struct stliport *portp, asyport_t *pp, struct ktermios *tiosp);
-static void stli_mkasysigs(asysigs_t *sp, int dtr, int rts);
-static long stli_mktiocm(unsigned long sigvalue);
-static void stli_read(struct stlibrd *brdp, struct stliport *portp);
-static int stli_getserial(struct stliport *portp, struct serial_struct __user *sp);
-static int stli_setserial(struct tty_struct *tty, struct serial_struct __user *sp);
-static int stli_getbrdstats(combrd_t __user *bp);
-static int stli_getportstats(struct tty_struct *tty, struct stliport *portp, comstats_t __user *cp);
-static int stli_portcmdstats(struct tty_struct *tty, struct stliport *portp);
-static int stli_clrportstats(struct stliport *portp, comstats_t __user *cp);
-static int stli_getportstruct(struct stliport __user *arg);
-static int stli_getbrdstruct(struct stlibrd __user *arg);
-static struct stlibrd *stli_allocbrd(void);
-
-static void stli_ecpinit(struct stlibrd *brdp);
-static void stli_ecpenable(struct stlibrd *brdp);
-static void stli_ecpdisable(struct stlibrd *brdp);
-static void __iomem *stli_ecpgetmemptr(struct stlibrd *brdp, unsigned long offset, int line);
-static void stli_ecpreset(struct stlibrd *brdp);
-static void stli_ecpintr(struct stlibrd *brdp);
-static void stli_ecpeiinit(struct stlibrd *brdp);
-static void stli_ecpeienable(struct stlibrd *brdp);
-static void stli_ecpeidisable(struct stlibrd *brdp);
-static void __iomem *stli_ecpeigetmemptr(struct stlibrd *brdp, unsigned long offset, int line);
-static void stli_ecpeireset(struct stlibrd *brdp);
-static void stli_ecpmcenable(struct stlibrd *brdp);
-static void stli_ecpmcdisable(struct stlibrd *brdp);
-static void __iomem *stli_ecpmcgetmemptr(struct stlibrd *brdp, unsigned long offset, int line);
-static void stli_ecpmcreset(struct stlibrd *brdp);
-static void stli_ecppciinit(struct stlibrd *brdp);
-static void __iomem *stli_ecppcigetmemptr(struct stlibrd *brdp, unsigned long offset, int line);
-static void stli_ecppcireset(struct stlibrd *brdp);
-
-static void stli_onbinit(struct stlibrd *brdp);
-static void stli_onbenable(struct stlibrd *brdp);
-static void stli_onbdisable(struct stlibrd *brdp);
-static void __iomem *stli_onbgetmemptr(struct stlibrd *brdp, unsigned long offset, int line);
-static void stli_onbreset(struct stlibrd *brdp);
-static void stli_onbeinit(struct stlibrd *brdp);
-static void stli_onbeenable(struct stlibrd *brdp);
-static void stli_onbedisable(struct stlibrd *brdp);
-static void __iomem *stli_onbegetmemptr(struct stlibrd *brdp, unsigned long offset, int line);
-static void stli_onbereset(struct stlibrd *brdp);
-static void stli_bbyinit(struct stlibrd *brdp);
-static void __iomem *stli_bbygetmemptr(struct stlibrd *brdp, unsigned long offset, int line);
-static void stli_bbyreset(struct stlibrd *brdp);
-static void stli_stalinit(struct stlibrd *brdp);
-static void __iomem *stli_stalgetmemptr(struct stlibrd *brdp, unsigned long offset, int line);
-static void stli_stalreset(struct stlibrd *brdp);
-
-static struct stliport *stli_getport(unsigned int brdnr, unsigned int panelnr, unsigned int portnr);
-
-static int stli_initecp(struct stlibrd *brdp);
-static int stli_initonb(struct stlibrd *brdp);
-#if STLI_EISAPROBE != 0
-static int stli_eisamemprobe(struct stlibrd *brdp);
-#endif
-static int stli_initports(struct stlibrd *brdp);
-
-/*****************************************************************************/
-
-/*
- * Define the driver info for a user level shared memory device. This
- * device will work sort of like the /dev/kmem device - except that it
- * will give access to the shared memory on the Stallion intelligent
- * board. This is also a very useful debugging tool.
- */
-static const struct file_operations stli_fsiomem = {
- .owner = THIS_MODULE,
- .read = stli_memread,
- .write = stli_memwrite,
- .unlocked_ioctl = stli_memioctl,
- .llseek = default_llseek,
-};
-
-/*****************************************************************************/
-
-/*
- * Define a timer_list entry for our poll routine. The slave board
- * is polled every so often to see if anything needs doing. This is
- * much cheaper on host cpu than using interrupts. It turns out to
- * not increase character latency by much either...
- */
-static DEFINE_TIMER(stli_timerlist, stli_poll, 0, 0);
-
-static int stli_timeron;
-
-/*
- * Define the calculation for the timeout routine.
- */
-#define STLI_TIMEOUT (jiffies + 1)
-
-/*****************************************************************************/
-
-static struct class *istallion_class;
-
-static void stli_cleanup_ports(struct stlibrd *brdp)
-{
- struct stliport *portp;
- unsigned int j;
- struct tty_struct *tty;
-
- for (j = 0; j < STL_MAXPORTS; j++) {
- portp = brdp->ports[j];
- if (portp != NULL) {
- tty = tty_port_tty_get(&portp->port);
- if (tty != NULL) {
- tty_hangup(tty);
- tty_kref_put(tty);
- }
- kfree(portp);
- }
- }
-}
-
-/*****************************************************************************/
-
-/*
- * Parse the supplied argument string, into the board conf struct.
- */
-
-static int stli_parsebrd(struct stlconf *confp, char **argp)
-{
- unsigned int i;
- char *sp;
-
- if (argp[0] == NULL || *argp[0] == 0)
- return 0;
-
- for (sp = argp[0], i = 0; ((*sp != 0) && (i < 25)); sp++, i++)
- *sp = tolower(*sp);
-
- for (i = 0; i < ARRAY_SIZE(stli_brdstr); i++) {
- if (strcmp(stli_brdstr[i].name, argp[0]) == 0)
- break;
- }
- if (i == ARRAY_SIZE(stli_brdstr)) {
- printk(KERN_WARNING "istallion: unknown board name, %s?\n", argp[0]);
- return 0;
- }
-
- confp->brdtype = stli_brdstr[i].type;
- if (argp[1] != NULL && *argp[1] != 0)
- confp->ioaddr1 = simple_strtoul(argp[1], NULL, 0);
- if (argp[2] != NULL && *argp[2] != 0)
- confp->memaddr = simple_strtoul(argp[2], NULL, 0);
- return(1);
-}
-
-/*****************************************************************************/
-
-/*
- * On the first open of the device setup the port hardware, and
- * initialize the per port data structure. Since initializing the port
- * requires several commands to the board we will need to wait for any
- * other open that is already initializing the port.
- *
- * Locking: protected by the port mutex.
- */
-
-static int stli_activate(struct tty_port *port, struct tty_struct *tty)
-{
- struct stliport *portp = container_of(port, struct stliport, port);
- struct stlibrd *brdp = stli_brds[portp->brdnr];
- int rc;
-
- if ((rc = stli_initopen(tty, brdp, portp)) >= 0)
- clear_bit(TTY_IO_ERROR, &tty->flags);
- wake_up_interruptible(&portp->raw_wait);
- return rc;
-}
-
-static int stli_open(struct tty_struct *tty, struct file *filp)
-{
- struct stlibrd *brdp;
- struct stliport *portp;
- unsigned int minordev, brdnr, portnr;
-
- minordev = tty->index;
- brdnr = MINOR2BRD(minordev);
- if (brdnr >= stli_nrbrds)
- return -ENODEV;
- brdp = stli_brds[brdnr];
- if (brdp == NULL)
- return -ENODEV;
- if (!test_bit(BST_STARTED, &brdp->state))
- return -ENODEV;
- portnr = MINOR2PORT(minordev);
- if (portnr > brdp->nrports)
- return -ENODEV;
-
- portp = brdp->ports[portnr];
- if (portp == NULL)
- return -ENODEV;
- if (portp->devnr < 1)
- return -ENODEV;
-
- tty->driver_data = portp;
- return tty_port_open(&portp->port, tty, filp);
-}
-
-
-/*****************************************************************************/
-
-static void stli_shutdown(struct tty_port *port)
-{
- struct stlibrd *brdp;
- unsigned long ftype;
- unsigned long flags;
- struct stliport *portp = container_of(port, struct stliport, port);
-
- if (portp->brdnr >= stli_nrbrds)
- return;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return;
-
- /*
- * May want to wait for data to drain before closing. The BUSY
- * flag keeps track of whether we are still transmitting or not.
- * It is updated by messages from the slave - indicating when all
- * chars really have drained.
- */
-
- if (!test_bit(ST_CLOSING, &portp->state))
- stli_rawclose(brdp, portp, 0, 0);
-
- spin_lock_irqsave(&stli_lock, flags);
- clear_bit(ST_TXBUSY, &portp->state);
- clear_bit(ST_RXSTOP, &portp->state);
- spin_unlock_irqrestore(&stli_lock, flags);
-
- ftype = FLUSHTX | FLUSHRX;
- stli_cmdwait(brdp, portp, A_FLUSH, &ftype, sizeof(u32), 0);
-}
-
-static void stli_close(struct tty_struct *tty, struct file *filp)
-{
- struct stliport *portp = tty->driver_data;
- unsigned long flags;
- if (portp == NULL)
- return;
- spin_lock_irqsave(&stli_lock, flags);
- /* Flush any internal buffering out first */
- if (tty == stli_txcooktty)
- stli_flushchars(tty);
- spin_unlock_irqrestore(&stli_lock, flags);
- tty_port_close(&portp->port, tty, filp);
-}
-
-/*****************************************************************************/
-
-/*
- * Carry out first open operations on a port. This involves a number of
- * commands to be sent to the slave. We need to open the port, set the
- * notification events, set the initial port settings, get and set the
- * initial signal values. We sleep and wait in between each one. But
- * this still all happens pretty quickly.
- */
-
-static int stli_initopen(struct tty_struct *tty,
- struct stlibrd *brdp, struct stliport *portp)
-{
- asynotify_t nt;
- asyport_t aport;
- int rc;
-
- if ((rc = stli_rawopen(brdp, portp, 0, 1)) < 0)
- return rc;
-
- memset(&nt, 0, sizeof(asynotify_t));
- nt.data = (DT_TXLOW | DT_TXEMPTY | DT_RXBUSY | DT_RXBREAK);
- nt.signal = SG_DCD;
- if ((rc = stli_cmdwait(brdp, portp, A_SETNOTIFY, &nt,
- sizeof(asynotify_t), 0)) < 0)
- return rc;
-
- stli_mkasyport(tty, portp, &aport, tty->termios);
- if ((rc = stli_cmdwait(brdp, portp, A_SETPORT, &aport,
- sizeof(asyport_t), 0)) < 0)
- return rc;
-
- set_bit(ST_GETSIGS, &portp->state);
- if ((rc = stli_cmdwait(brdp, portp, A_GETSIGNALS, &portp->asig,
- sizeof(asysigs_t), 1)) < 0)
- return rc;
- if (test_and_clear_bit(ST_GETSIGS, &portp->state))
- portp->sigs = stli_mktiocm(portp->asig.sigvalue);
- stli_mkasysigs(&portp->asig, 1, 1);
- if ((rc = stli_cmdwait(brdp, portp, A_SETSIGNALS, &portp->asig,
- sizeof(asysigs_t), 0)) < 0)
- return rc;
-
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Send an open message to the slave. This will sleep waiting for the
- * acknowledgement, so must have user context. We need to co-ordinate
- * with close events here, since we don't want open and close events
- * to overlap.
- */
-
-static int stli_rawopen(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait)
-{
- cdkhdr_t __iomem *hdrp;
- cdkctrl_t __iomem *cp;
- unsigned char __iomem *bits;
- unsigned long flags;
- int rc;
-
-/*
- * Send a message to the slave to open this port.
- */
-
-/*
- * Slave is already closing this port. This can happen if a hangup
- * occurs on this port. So we must wait until it is complete. The
- * order of opens and closes may not be preserved across shared
- * memory, so we must wait until it is complete.
- */
- wait_event_interruptible_tty(portp->raw_wait,
- !test_bit(ST_CLOSING, &portp->state));
- if (signal_pending(current)) {
- return -ERESTARTSYS;
- }
-
-/*
- * Everything is ready now, so write the open message into shared
- * memory. Once the message is in set the service bits to say that
- * this port wants service.
- */
- spin_lock_irqsave(&brd_lock, flags);
- EBRDENABLE(brdp);
- cp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->ctrl;
- writel(arg, &cp->openarg);
- writeb(1, &cp->open);
- hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- bits = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset +
- portp->portidx;
- writeb(readb(bits) | portp->portbit, bits);
- EBRDDISABLE(brdp);
-
- if (wait == 0) {
- spin_unlock_irqrestore(&brd_lock, flags);
- return 0;
- }
-
-/*
- * Slave is in action, so now we must wait for the open acknowledgment
- * to come back.
- */
- rc = 0;
- set_bit(ST_OPENING, &portp->state);
- spin_unlock_irqrestore(&brd_lock, flags);
-
- wait_event_interruptible_tty(portp->raw_wait,
- !test_bit(ST_OPENING, &portp->state));
- if (signal_pending(current))
- rc = -ERESTARTSYS;
-
- if ((rc == 0) && (portp->rc != 0))
- rc = -EIO;
- return rc;
-}
-
-/*****************************************************************************/
-
-/*
- * Send a close message to the slave. Normally this will sleep waiting
- * for the acknowledgement, but if wait parameter is 0 it will not. If
- * wait is true then must have user context (to sleep).
- */
-
-static int stli_rawclose(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait)
-{
- cdkhdr_t __iomem *hdrp;
- cdkctrl_t __iomem *cp;
- unsigned char __iomem *bits;
- unsigned long flags;
- int rc;
-
-/*
- * Slave is already closing this port. This can happen if a hangup
- * occurs on this port.
- */
- if (wait) {
- wait_event_interruptible_tty(portp->raw_wait,
- !test_bit(ST_CLOSING, &portp->state));
- if (signal_pending(current)) {
- return -ERESTARTSYS;
- }
- }
-
-/*
- * Write the close command into shared memory.
- */
- spin_lock_irqsave(&brd_lock, flags);
- EBRDENABLE(brdp);
- cp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->ctrl;
- writel(arg, &cp->closearg);
- writeb(1, &cp->close);
- hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- bits = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset +
- portp->portidx;
- writeb(readb(bits) |portp->portbit, bits);
- EBRDDISABLE(brdp);
-
- set_bit(ST_CLOSING, &portp->state);
- spin_unlock_irqrestore(&brd_lock, flags);
-
- if (wait == 0)
- return 0;
-
-/*
- * Slave is in action, so now we must wait for the open acknowledgment
- * to come back.
- */
- rc = 0;
- wait_event_interruptible_tty(portp->raw_wait,
- !test_bit(ST_CLOSING, &portp->state));
- if (signal_pending(current))
- rc = -ERESTARTSYS;
-
- if ((rc == 0) && (portp->rc != 0))
- rc = -EIO;
- return rc;
-}
-
-/*****************************************************************************/
-
-/*
- * Send a command to the slave and wait for the response. This must
- * have user context (it sleeps). This routine is generic in that it
- * can send any type of command. Its purpose is to wait for that command
- * to complete (as opposed to initiating the command then returning).
- */
-
-static int stli_cmdwait(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback)
-{
- /*
- * no need for wait_event_tty because clearing ST_CMDING cannot block
- * on BTM
- */
- wait_event_interruptible(portp->raw_wait,
- !test_bit(ST_CMDING, &portp->state));
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- stli_sendcmd(brdp, portp, cmd, arg, size, copyback);
-
- wait_event_interruptible(portp->raw_wait,
- !test_bit(ST_CMDING, &portp->state));
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- if (portp->rc != 0)
- return -EIO;
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Send the termios settings for this port to the slave. This sleeps
- * waiting for the command to complete - so must have user context.
- */
-
-static int stli_setport(struct tty_struct *tty)
-{
- struct stliport *portp = tty->driver_data;
- struct stlibrd *brdp;
- asyport_t aport;
-
- if (portp == NULL)
- return -ENODEV;
- if (portp->brdnr >= stli_nrbrds)
- return -ENODEV;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return -ENODEV;
-
- stli_mkasyport(tty, portp, &aport, tty->termios);
- return(stli_cmdwait(brdp, portp, A_SETPORT, &aport, sizeof(asyport_t), 0));
-}
-
-/*****************************************************************************/
-
-static int stli_carrier_raised(struct tty_port *port)
-{
- struct stliport *portp = container_of(port, struct stliport, port);
- return (portp->sigs & TIOCM_CD) ? 1 : 0;
-}
-
-static void stli_dtr_rts(struct tty_port *port, int on)
-{
- struct stliport *portp = container_of(port, struct stliport, port);
- struct stlibrd *brdp = stli_brds[portp->brdnr];
- stli_mkasysigs(&portp->asig, on, on);
- if (stli_cmdwait(brdp, portp, A_SETSIGNALS, &portp->asig,
- sizeof(asysigs_t), 0) < 0)
- printk(KERN_WARNING "istallion: dtr set failed.\n");
-}
-
-
-/*****************************************************************************/
-
-/*
- * Write routine. Take the data and put it in the shared memory ring
- * queue. If port is not already sending chars then need to mark the
- * service bits for this port.
- */
-
-static int stli_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- cdkasy_t __iomem *ap;
- cdkhdr_t __iomem *hdrp;
- unsigned char __iomem *bits;
- unsigned char __iomem *shbuf;
- unsigned char *chbuf;
- struct stliport *portp;
- struct stlibrd *brdp;
- unsigned int len, stlen, head, tail, size;
- unsigned long flags;
-
- if (tty == stli_txcooktty)
- stli_flushchars(tty);
- portp = tty->driver_data;
- if (portp == NULL)
- return 0;
- if (portp->brdnr >= stli_nrbrds)
- return 0;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return 0;
- chbuf = (unsigned char *) buf;
-
-/*
- * All data is now local, shove as much as possible into shared memory.
- */
- spin_lock_irqsave(&brd_lock, flags);
- EBRDENABLE(brdp);
- ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
- head = (unsigned int) readw(&ap->txq.head);
- tail = (unsigned int) readw(&ap->txq.tail);
- if (tail != ((unsigned int) readw(&ap->txq.tail)))
- tail = (unsigned int) readw(&ap->txq.tail);
- size = portp->txsize;
- if (head >= tail) {
- len = size - (head - tail) - 1;
- stlen = size - head;
- } else {
- len = tail - head - 1;
- stlen = len;
- }
-
- len = min(len, (unsigned int)count);
- count = 0;
- shbuf = (char __iomem *) EBRDGETMEMPTR(brdp, portp->txoffset);
-
- while (len > 0) {
- stlen = min(len, stlen);
- memcpy_toio(shbuf + head, chbuf, stlen);
- chbuf += stlen;
- len -= stlen;
- count += stlen;
- head += stlen;
- if (head >= size) {
- head = 0;
- stlen = tail;
- }
- }
-
- ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
- writew(head, &ap->txq.head);
- if (test_bit(ST_TXBUSY, &portp->state)) {
- if (readl(&ap->changed.data) & DT_TXEMPTY)
- writel(readl(&ap->changed.data) & ~DT_TXEMPTY, &ap->changed.data);
- }
- hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- bits = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset +
- portp->portidx;
- writeb(readb(bits) | portp->portbit, bits);
- set_bit(ST_TXBUSY, &portp->state);
- EBRDDISABLE(brdp);
- spin_unlock_irqrestore(&brd_lock, flags);
-
- return(count);
-}
-
-/*****************************************************************************/
-
-/*
- * Output a single character. We put it into a temporary local buffer
- * (for speed) then write out that buffer when the flushchars routine
- * is called. There is a safety catch here so that if some other port
- * writes chars before the current buffer has been, then we write them
- * first them do the new ports.
- */
-
-static int stli_putchar(struct tty_struct *tty, unsigned char ch)
-{
- if (tty != stli_txcooktty) {
- if (stli_txcooktty != NULL)
- stli_flushchars(stli_txcooktty);
- stli_txcooktty = tty;
- }
-
- stli_txcookbuf[stli_txcooksize++] = ch;
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Transfer characters from the local TX cooking buffer to the board.
- * We sort of ignore the tty that gets passed in here. We rely on the
- * info stored with the TX cook buffer to tell us which port to flush
- * the data on. In any case we clean out the TX cook buffer, for re-use
- * by someone else.
- */
-
-static void stli_flushchars(struct tty_struct *tty)
-{
- cdkhdr_t __iomem *hdrp;
- unsigned char __iomem *bits;
- cdkasy_t __iomem *ap;
- struct tty_struct *cooktty;
- struct stliport *portp;
- struct stlibrd *brdp;
- unsigned int len, stlen, head, tail, size, count, cooksize;
- unsigned char *buf;
- unsigned char __iomem *shbuf;
- unsigned long flags;
-
- cooksize = stli_txcooksize;
- cooktty = stli_txcooktty;
- stli_txcooksize = 0;
- stli_txcookrealsize = 0;
- stli_txcooktty = NULL;
-
- if (cooktty == NULL)
- return;
- if (tty != cooktty)
- tty = cooktty;
- if (cooksize == 0)
- return;
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
- if (portp->brdnr >= stli_nrbrds)
- return;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return;
-
- spin_lock_irqsave(&brd_lock, flags);
- EBRDENABLE(brdp);
-
- ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
- head = (unsigned int) readw(&ap->txq.head);
- tail = (unsigned int) readw(&ap->txq.tail);
- if (tail != ((unsigned int) readw(&ap->txq.tail)))
- tail = (unsigned int) readw(&ap->txq.tail);
- size = portp->txsize;
- if (head >= tail) {
- len = size - (head - tail) - 1;
- stlen = size - head;
- } else {
- len = tail - head - 1;
- stlen = len;
- }
-
- len = min(len, cooksize);
- count = 0;
- shbuf = EBRDGETMEMPTR(brdp, portp->txoffset);
- buf = stli_txcookbuf;
-
- while (len > 0) {
- stlen = min(len, stlen);
- memcpy_toio(shbuf + head, buf, stlen);
- buf += stlen;
- len -= stlen;
- count += stlen;
- head += stlen;
- if (head >= size) {
- head = 0;
- stlen = tail;
- }
- }
-
- ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
- writew(head, &ap->txq.head);
-
- if (test_bit(ST_TXBUSY, &portp->state)) {
- if (readl(&ap->changed.data) & DT_TXEMPTY)
- writel(readl(&ap->changed.data) & ~DT_TXEMPTY, &ap->changed.data);
- }
- hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- bits = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset +
- portp->portidx;
- writeb(readb(bits) | portp->portbit, bits);
- set_bit(ST_TXBUSY, &portp->state);
-
- EBRDDISABLE(brdp);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-static int stli_writeroom(struct tty_struct *tty)
-{
- cdkasyrq_t __iomem *rp;
- struct stliport *portp;
- struct stlibrd *brdp;
- unsigned int head, tail, len;
- unsigned long flags;
-
- if (tty == stli_txcooktty) {
- if (stli_txcookrealsize != 0) {
- len = stli_txcookrealsize - stli_txcooksize;
- return len;
- }
- }
-
- portp = tty->driver_data;
- if (portp == NULL)
- return 0;
- if (portp->brdnr >= stli_nrbrds)
- return 0;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return 0;
-
- spin_lock_irqsave(&brd_lock, flags);
- EBRDENABLE(brdp);
- rp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->txq;
- head = (unsigned int) readw(&rp->head);
- tail = (unsigned int) readw(&rp->tail);
- if (tail != ((unsigned int) readw(&rp->tail)))
- tail = (unsigned int) readw(&rp->tail);
- len = (head >= tail) ? (portp->txsize - (head - tail)) : (tail - head);
- len--;
- EBRDDISABLE(brdp);
- spin_unlock_irqrestore(&brd_lock, flags);
-
- if (tty == stli_txcooktty) {
- stli_txcookrealsize = len;
- len -= stli_txcooksize;
- }
- return len;
-}
-
-/*****************************************************************************/
-
-/*
- * Return the number of characters in the transmit buffer. Normally we
- * will return the number of chars in the shared memory ring queue.
- * We need to kludge around the case where the shared memory buffer is
- * empty but not all characters have drained yet, for this case just
- * return that there is 1 character in the buffer!
- */
-
-static int stli_charsinbuffer(struct tty_struct *tty)
-{
- cdkasyrq_t __iomem *rp;
- struct stliport *portp;
- struct stlibrd *brdp;
- unsigned int head, tail, len;
- unsigned long flags;
-
- if (tty == stli_txcooktty)
- stli_flushchars(tty);
- portp = tty->driver_data;
- if (portp == NULL)
- return 0;
- if (portp->brdnr >= stli_nrbrds)
- return 0;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return 0;
-
- spin_lock_irqsave(&brd_lock, flags);
- EBRDENABLE(brdp);
- rp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->txq;
- head = (unsigned int) readw(&rp->head);
- tail = (unsigned int) readw(&rp->tail);
- if (tail != ((unsigned int) readw(&rp->tail)))
- tail = (unsigned int) readw(&rp->tail);
- len = (head >= tail) ? (head - tail) : (portp->txsize - (tail - head));
- if ((len == 0) && test_bit(ST_TXBUSY, &portp->state))
- len = 1;
- EBRDDISABLE(brdp);
- spin_unlock_irqrestore(&brd_lock, flags);
-
- return len;
-}
-
-/*****************************************************************************/
-
-/*
- * Generate the serial struct info.
- */
-
-static int stli_getserial(struct stliport *portp, struct serial_struct __user *sp)
-{
- struct serial_struct sio;
- struct stlibrd *brdp;
-
- memset(&sio, 0, sizeof(struct serial_struct));
- sio.type = PORT_UNKNOWN;
- sio.line = portp->portnr;
- sio.irq = 0;
- sio.flags = portp->port.flags;
- sio.baud_base = portp->baud_base;
- sio.close_delay = portp->port.close_delay;
- sio.closing_wait = portp->closing_wait;
- sio.custom_divisor = portp->custom_divisor;
- sio.xmit_fifo_size = 0;
- sio.hub6 = 0;
-
- brdp = stli_brds[portp->brdnr];
- if (brdp != NULL)
- sio.port = brdp->iobase;
-
- return copy_to_user(sp, &sio, sizeof(struct serial_struct)) ?
- -EFAULT : 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Set port according to the serial struct info.
- * At this point we do not do any auto-configure stuff, so we will
- * just quietly ignore any requests to change irq, etc.
- */
-
-static int stli_setserial(struct tty_struct *tty, struct serial_struct __user *sp)
-{
- struct serial_struct sio;
- int rc;
- struct stliport *portp = tty->driver_data;
-
- if (copy_from_user(&sio, sp, sizeof(struct serial_struct)))
- return -EFAULT;
- if (!capable(CAP_SYS_ADMIN)) {
- if ((sio.baud_base != portp->baud_base) ||
- (sio.close_delay != portp->port.close_delay) ||
- ((sio.flags & ~ASYNC_USR_MASK) !=
- (portp->port.flags & ~ASYNC_USR_MASK)))
- return -EPERM;
- }
-
- portp->port.flags = (portp->port.flags & ~ASYNC_USR_MASK) |
- (sio.flags & ASYNC_USR_MASK);
- portp->baud_base = sio.baud_base;
- portp->port.close_delay = sio.close_delay;
- portp->closing_wait = sio.closing_wait;
- portp->custom_divisor = sio.custom_divisor;
-
- if ((rc = stli_setport(tty)) < 0)
- return rc;
- return 0;
-}
-
-/*****************************************************************************/
-
-static int stli_tiocmget(struct tty_struct *tty)
-{
- struct stliport *portp = tty->driver_data;
- struct stlibrd *brdp;
- int rc;
-
- if (portp == NULL)
- return -ENODEV;
- if (portp->brdnr >= stli_nrbrds)
- return 0;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return 0;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- if ((rc = stli_cmdwait(brdp, portp, A_GETSIGNALS,
- &portp->asig, sizeof(asysigs_t), 1)) < 0)
- return rc;
-
- return stli_mktiocm(portp->asig.sigvalue);
-}
-
-static int stli_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct stliport *portp = tty->driver_data;
- struct stlibrd *brdp;
- int rts = -1, dtr = -1;
-
- if (portp == NULL)
- return -ENODEV;
- if (portp->brdnr >= stli_nrbrds)
- return 0;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return 0;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- if (set & TIOCM_RTS)
- rts = 1;
- if (set & TIOCM_DTR)
- dtr = 1;
- if (clear & TIOCM_RTS)
- rts = 0;
- if (clear & TIOCM_DTR)
- dtr = 0;
-
- stli_mkasysigs(&portp->asig, dtr, rts);
-
- return stli_cmdwait(brdp, portp, A_SETSIGNALS, &portp->asig,
- sizeof(asysigs_t), 0);
-}
-
-static int stli_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
-{
- struct stliport *portp;
- struct stlibrd *brdp;
- int rc;
- void __user *argp = (void __user *)arg;
-
- portp = tty->driver_data;
- if (portp == NULL)
- return -ENODEV;
- if (portp->brdnr >= stli_nrbrds)
- return 0;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return 0;
-
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
- (cmd != COM_GETPORTSTATS) && (cmd != COM_CLRPORTSTATS)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
- }
-
- rc = 0;
-
- switch (cmd) {
- case TIOCGSERIAL:
- rc = stli_getserial(portp, argp);
- break;
- case TIOCSSERIAL:
- rc = stli_setserial(tty, argp);
- break;
- case STL_GETPFLAG:
- rc = put_user(portp->pflag, (unsigned __user *)argp);
- break;
- case STL_SETPFLAG:
- if ((rc = get_user(portp->pflag, (unsigned __user *)argp)) == 0)
- stli_setport(tty);
- break;
- case COM_GETPORTSTATS:
- rc = stli_getportstats(tty, portp, argp);
- break;
- case COM_CLRPORTSTATS:
- rc = stli_clrportstats(portp, argp);
- break;
- case TIOCSERCONFIG:
- case TIOCSERGWILD:
- case TIOCSERSWILD:
- case TIOCSERGETLSR:
- case TIOCSERGSTRUCT:
- case TIOCSERGETMULTI:
- case TIOCSERSETMULTI:
- default:
- rc = -ENOIOCTLCMD;
- break;
- }
-
- return rc;
-}
-
-/*****************************************************************************/
-
-/*
- * This routine assumes that we have user context and can sleep.
- * Looks like it is true for the current ttys implementation..!!
- */
-
-static void stli_settermios(struct tty_struct *tty, struct ktermios *old)
-{
- struct stliport *portp;
- struct stlibrd *brdp;
- struct ktermios *tiosp;
- asyport_t aport;
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
- if (portp->brdnr >= stli_nrbrds)
- return;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return;
-
- tiosp = tty->termios;
-
- stli_mkasyport(tty, portp, &aport, tiosp);
- stli_cmdwait(brdp, portp, A_SETPORT, &aport, sizeof(asyport_t), 0);
- stli_mkasysigs(&portp->asig, ((tiosp->c_cflag & CBAUD) ? 1 : 0), -1);
- stli_cmdwait(brdp, portp, A_SETSIGNALS, &portp->asig,
- sizeof(asysigs_t), 0);
- if ((old->c_cflag & CRTSCTS) && ((tiosp->c_cflag & CRTSCTS) == 0))
- tty->hw_stopped = 0;
- if (((old->c_cflag & CLOCAL) == 0) && (tiosp->c_cflag & CLOCAL))
- wake_up_interruptible(&portp->port.open_wait);
-}
-
-/*****************************************************************************/
-
-/*
- * Attempt to flow control who ever is sending us data. We won't really
- * do any flow control action here. We can't directly, and even if we
- * wanted to we would have to send a command to the slave. The slave
- * knows how to flow control, and will do so when its buffers reach its
- * internal high water marks. So what we will do is set a local state
- * bit that will stop us sending any RX data up from the poll routine
- * (which is the place where RX data from the slave is handled).
- */
-
-static void stli_throttle(struct tty_struct *tty)
-{
- struct stliport *portp = tty->driver_data;
- if (portp == NULL)
- return;
- set_bit(ST_RXSTOP, &portp->state);
-}
-
-/*****************************************************************************/
-
-/*
- * Unflow control the device sending us data... That means that all
- * we have to do is clear the RXSTOP state bit. The next poll call
- * will then be able to pass the RX data back up.
- */
-
-static void stli_unthrottle(struct tty_struct *tty)
-{
- struct stliport *portp = tty->driver_data;
- if (portp == NULL)
- return;
- clear_bit(ST_RXSTOP, &portp->state);
-}
-
-/*****************************************************************************/
-
-/*
- * Stop the transmitter.
- */
-
-static void stli_stop(struct tty_struct *tty)
-{
-}
-
-/*****************************************************************************/
-
-/*
- * Start the transmitter again.
- */
-
-static void stli_start(struct tty_struct *tty)
-{
-}
-
-/*****************************************************************************/
-
-
-/*
- * Hangup this port. This is pretty much like closing the port, only
- * a little more brutal. No waiting for data to drain. Shutdown the
- * port and maybe drop signals. This is rather tricky really. We want
- * to close the port as well.
- */
-
-static void stli_hangup(struct tty_struct *tty)
-{
- struct stliport *portp = tty->driver_data;
- tty_port_hangup(&portp->port);
-}
-
-/*****************************************************************************/
-
-/*
- * Flush characters from the lower buffer. We may not have user context
- * so we cannot sleep waiting for it to complete. Also we need to check
- * if there is chars for this port in the TX cook buffer, and flush them
- * as well.
- */
-
-static void stli_flushbuffer(struct tty_struct *tty)
-{
- struct stliport *portp;
- struct stlibrd *brdp;
- unsigned long ftype, flags;
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
- if (portp->brdnr >= stli_nrbrds)
- return;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return;
-
- spin_lock_irqsave(&brd_lock, flags);
- if (tty == stli_txcooktty) {
- stli_txcooktty = NULL;
- stli_txcooksize = 0;
- stli_txcookrealsize = 0;
- }
- if (test_bit(ST_CMDING, &portp->state)) {
- set_bit(ST_DOFLUSHTX, &portp->state);
- } else {
- ftype = FLUSHTX;
- if (test_bit(ST_DOFLUSHRX, &portp->state)) {
- ftype |= FLUSHRX;
- clear_bit(ST_DOFLUSHRX, &portp->state);
- }
- __stli_sendcmd(brdp, portp, A_FLUSH, &ftype, sizeof(u32), 0);
- }
- spin_unlock_irqrestore(&brd_lock, flags);
- tty_wakeup(tty);
-}
-
-/*****************************************************************************/
-
-static int stli_breakctl(struct tty_struct *tty, int state)
-{
- struct stlibrd *brdp;
- struct stliport *portp;
- long arg;
-
- portp = tty->driver_data;
- if (portp == NULL)
- return -EINVAL;
- if (portp->brdnr >= stli_nrbrds)
- return -EINVAL;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return -EINVAL;
-
- arg = (state == -1) ? BREAKON : BREAKOFF;
- stli_cmdwait(brdp, portp, A_BREAK, &arg, sizeof(long), 0);
- return 0;
-}
-
-/*****************************************************************************/
-
-static void stli_waituntilsent(struct tty_struct *tty, int timeout)
-{
- struct stliport *portp;
- unsigned long tend;
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
-
- if (timeout == 0)
- timeout = HZ;
- tend = jiffies + timeout;
-
- while (test_bit(ST_TXBUSY, &portp->state)) {
- if (signal_pending(current))
- break;
- msleep_interruptible(20);
- if (time_after_eq(jiffies, tend))
- break;
- }
-}
-
-/*****************************************************************************/
-
-static void stli_sendxchar(struct tty_struct *tty, char ch)
-{
- struct stlibrd *brdp;
- struct stliport *portp;
- asyctrl_t actrl;
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
- if (portp->brdnr >= stli_nrbrds)
- return;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return;
-
- memset(&actrl, 0, sizeof(asyctrl_t));
- if (ch == STOP_CHAR(tty)) {
- actrl.rxctrl = CT_STOPFLOW;
- } else if (ch == START_CHAR(tty)) {
- actrl.rxctrl = CT_STARTFLOW;
- } else {
- actrl.txctrl = CT_SENDCHR;
- actrl.tximdch = ch;
- }
- stli_cmdwait(brdp, portp, A_PORTCTRL, &actrl, sizeof(asyctrl_t), 0);
-}
-
-static void stli_portinfo(struct seq_file *m, struct stlibrd *brdp, struct stliport *portp, int portnr)
-{
- char *uart;
- int rc;
-
- rc = stli_portcmdstats(NULL, portp);
-
- uart = "UNKNOWN";
- if (test_bit(BST_STARTED, &brdp->state)) {
- switch (stli_comstats.hwid) {
- case 0: uart = "2681"; break;
- case 1: uart = "SC26198"; break;
- default:uart = "CD1400"; break;
- }
- }
- seq_printf(m, "%d: uart:%s ", portnr, uart);
-
- if (test_bit(BST_STARTED, &brdp->state) && rc >= 0) {
- char sep;
-
- seq_printf(m, "tx:%d rx:%d", (int) stli_comstats.txtotal,
- (int) stli_comstats.rxtotal);
-
- if (stli_comstats.rxframing)
- seq_printf(m, " fe:%d",
- (int) stli_comstats.rxframing);
- if (stli_comstats.rxparity)
- seq_printf(m, " pe:%d",
- (int) stli_comstats.rxparity);
- if (stli_comstats.rxbreaks)
- seq_printf(m, " brk:%d",
- (int) stli_comstats.rxbreaks);
- if (stli_comstats.rxoverrun)
- seq_printf(m, " oe:%d",
- (int) stli_comstats.rxoverrun);
-
- sep = ' ';
- if (stli_comstats.signals & TIOCM_RTS) {
- seq_printf(m, "%c%s", sep, "RTS");
- sep = '|';
- }
- if (stli_comstats.signals & TIOCM_CTS) {
- seq_printf(m, "%c%s", sep, "CTS");
- sep = '|';
- }
- if (stli_comstats.signals & TIOCM_DTR) {
- seq_printf(m, "%c%s", sep, "DTR");
- sep = '|';
- }
- if (stli_comstats.signals & TIOCM_CD) {
- seq_printf(m, "%c%s", sep, "DCD");
- sep = '|';
- }
- if (stli_comstats.signals & TIOCM_DSR) {
- seq_printf(m, "%c%s", sep, "DSR");
- sep = '|';
- }
- }
- seq_putc(m, '\n');
-}
-
-/*****************************************************************************/
-
-/*
- * Port info, read from the /proc file system.
- */
-
-static int stli_proc_show(struct seq_file *m, void *v)
-{
- struct stlibrd *brdp;
- struct stliport *portp;
- unsigned int brdnr, portnr, totalport;
-
- totalport = 0;
-
- seq_printf(m, "%s: version %s\n", stli_drvtitle, stli_drvversion);
-
-/*
- * We scan through for each board, panel and port. The offset is
- * calculated on the fly, and irrelevant ports are skipped.
- */
- for (brdnr = 0; (brdnr < stli_nrbrds); brdnr++) {
- brdp = stli_brds[brdnr];
- if (brdp == NULL)
- continue;
- if (brdp->state == 0)
- continue;
-
- totalport = brdnr * STL_MAXPORTS;
- for (portnr = 0; (portnr < brdp->nrports); portnr++,
- totalport++) {
- portp = brdp->ports[portnr];
- if (portp == NULL)
- continue;
- stli_portinfo(m, brdp, portp, totalport);
- }
- }
- return 0;
-}
-
-static int stli_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, stli_proc_show, NULL);
-}
-
-static const struct file_operations stli_proc_fops = {
- .owner = THIS_MODULE,
- .open = stli_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/*****************************************************************************/
-
-/*
- * Generic send command routine. This will send a message to the slave,
- * of the specified type with the specified argument. Must be very
- * careful of data that will be copied out from shared memory -
- * containing command results. The command completion is all done from
- * a poll routine that does not have user context. Therefore you cannot
- * copy back directly into user space, or to the kernel stack of a
- * process. This routine does not sleep, so can be called from anywhere.
- *
- * The caller must hold the brd_lock (see also stli_sendcmd the usual
- * entry point)
- */
-
-static void __stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback)
-{
- cdkhdr_t __iomem *hdrp;
- cdkctrl_t __iomem *cp;
- unsigned char __iomem *bits;
-
- if (test_bit(ST_CMDING, &portp->state)) {
- printk(KERN_ERR "istallion: command already busy, cmd=%x!\n",
- (int) cmd);
- return;
- }
-
- EBRDENABLE(brdp);
- cp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->ctrl;
- if (size > 0) {
- memcpy_toio((void __iomem *) &(cp->args[0]), arg, size);
- if (copyback) {
- portp->argp = arg;
- portp->argsize = size;
- }
- }
- writel(0, &cp->status);
- writel(cmd, &cp->cmd);
- hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- bits = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset +
- portp->portidx;
- writeb(readb(bits) | portp->portbit, bits);
- set_bit(ST_CMDING, &portp->state);
- EBRDDISABLE(brdp);
-}
-
-static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&brd_lock, flags);
- __stli_sendcmd(brdp, portp, cmd, arg, size, copyback);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Read data from shared memory. This assumes that the shared memory
- * is enabled and that interrupts are off. Basically we just empty out
- * the shared memory buffer into the tty buffer. Must be careful to
- * handle the case where we fill up the tty buffer, but still have
- * more chars to unload.
- */
-
-static void stli_read(struct stlibrd *brdp, struct stliport *portp)
-{
- cdkasyrq_t __iomem *rp;
- char __iomem *shbuf;
- struct tty_struct *tty;
- unsigned int head, tail, size;
- unsigned int len, stlen;
-
- if (test_bit(ST_RXSTOP, &portp->state))
- return;
- tty = tty_port_tty_get(&portp->port);
- if (tty == NULL)
- return;
-
- rp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->rxq;
- head = (unsigned int) readw(&rp->head);
- if (head != ((unsigned int) readw(&rp->head)))
- head = (unsigned int) readw(&rp->head);
- tail = (unsigned int) readw(&rp->tail);
- size = portp->rxsize;
- if (head >= tail) {
- len = head - tail;
- stlen = len;
- } else {
- len = size - (tail - head);
- stlen = size - tail;
- }
-
- len = tty_buffer_request_room(tty, len);
-
- shbuf = (char __iomem *) EBRDGETMEMPTR(brdp, portp->rxoffset);
-
- while (len > 0) {
- unsigned char *cptr;
-
- stlen = min(len, stlen);
- tty_prepare_flip_string(tty, &cptr, stlen);
- memcpy_fromio(cptr, shbuf + tail, stlen);
- len -= stlen;
- tail += stlen;
- if (tail >= size) {
- tail = 0;
- stlen = head;
- }
- }
- rp = &((cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr))->rxq;
- writew(tail, &rp->tail);
-
- if (head != tail)
- set_bit(ST_RXING, &portp->state);
-
- tty_schedule_flip(tty);
- tty_kref_put(tty);
-}
-
-/*****************************************************************************/
-
-/*
- * Set up and carry out any delayed commands. There is only a small set
- * of slave commands that can be done "off-level". So it is not too
- * difficult to deal with them here.
- */
-
-static void stli_dodelaycmd(struct stliport *portp, cdkctrl_t __iomem *cp)
-{
- int cmd;
-
- if (test_bit(ST_DOSIGS, &portp->state)) {
- if (test_bit(ST_DOFLUSHTX, &portp->state) &&
- test_bit(ST_DOFLUSHRX, &portp->state))
- cmd = A_SETSIGNALSF;
- else if (test_bit(ST_DOFLUSHTX, &portp->state))
- cmd = A_SETSIGNALSFTX;
- else if (test_bit(ST_DOFLUSHRX, &portp->state))
- cmd = A_SETSIGNALSFRX;
- else
- cmd = A_SETSIGNALS;
- clear_bit(ST_DOFLUSHTX, &portp->state);
- clear_bit(ST_DOFLUSHRX, &portp->state);
- clear_bit(ST_DOSIGS, &portp->state);
- memcpy_toio((void __iomem *) &(cp->args[0]), (void *) &portp->asig,
- sizeof(asysigs_t));
- writel(0, &cp->status);
- writel(cmd, &cp->cmd);
- set_bit(ST_CMDING, &portp->state);
- } else if (test_bit(ST_DOFLUSHTX, &portp->state) ||
- test_bit(ST_DOFLUSHRX, &portp->state)) {
- cmd = ((test_bit(ST_DOFLUSHTX, &portp->state)) ? FLUSHTX : 0);
- cmd |= ((test_bit(ST_DOFLUSHRX, &portp->state)) ? FLUSHRX : 0);
- clear_bit(ST_DOFLUSHTX, &portp->state);
- clear_bit(ST_DOFLUSHRX, &portp->state);
- memcpy_toio((void __iomem *) &(cp->args[0]), (void *) &cmd, sizeof(int));
- writel(0, &cp->status);
- writel(A_FLUSH, &cp->cmd);
- set_bit(ST_CMDING, &portp->state);
- }
-}
-
-/*****************************************************************************/
-
-/*
- * Host command service checking. This handles commands or messages
- * coming from the slave to the host. Must have board shared memory
- * enabled and interrupts off when called. Notice that by servicing the
- * read data last we don't need to change the shared memory pointer
- * during processing (which is a slow IO operation).
- * Return value indicates if this port is still awaiting actions from
- * the slave (like open, command, or even TX data being sent). If 0
- * then port is still busy, otherwise no longer busy.
- */
-
-static int stli_hostcmd(struct stlibrd *brdp, struct stliport *portp)
-{
- cdkasy_t __iomem *ap;
- cdkctrl_t __iomem *cp;
- struct tty_struct *tty;
- asynotify_t nt;
- unsigned long oldsigs;
- int rc, donerx;
-
- ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
- cp = &ap->ctrl;
-
-/*
- * Check if we are waiting for an open completion message.
- */
- if (test_bit(ST_OPENING, &portp->state)) {
- rc = readl(&cp->openarg);
- if (readb(&cp->open) == 0 && rc != 0) {
- if (rc > 0)
- rc--;
- writel(0, &cp->openarg);
- portp->rc = rc;
- clear_bit(ST_OPENING, &portp->state);
- wake_up_interruptible(&portp->raw_wait);
- }
- }
-
-/*
- * Check if we are waiting for a close completion message.
- */
- if (test_bit(ST_CLOSING, &portp->state)) {
- rc = (int) readl(&cp->closearg);
- if (readb(&cp->close) == 0 && rc != 0) {
- if (rc > 0)
- rc--;
- writel(0, &cp->closearg);
- portp->rc = rc;
- clear_bit(ST_CLOSING, &portp->state);
- wake_up_interruptible(&portp->raw_wait);
- }
- }
-
-/*
- * Check if we are waiting for a command completion message. We may
- * need to copy out the command results associated with this command.
- */
- if (test_bit(ST_CMDING, &portp->state)) {
- rc = readl(&cp->status);
- if (readl(&cp->cmd) == 0 && rc != 0) {
- if (rc > 0)
- rc--;
- if (portp->argp != NULL) {
- memcpy_fromio(portp->argp, (void __iomem *) &(cp->args[0]),
- portp->argsize);
- portp->argp = NULL;
- }
- writel(0, &cp->status);
- portp->rc = rc;
- clear_bit(ST_CMDING, &portp->state);
- stli_dodelaycmd(portp, cp);
- wake_up_interruptible(&portp->raw_wait);
- }
- }
-
-/*
- * Check for any notification messages ready. This includes lots of
- * different types of events - RX chars ready, RX break received,
- * TX data low or empty in the slave, modem signals changed state.
- */
- donerx = 0;
-
- if (ap->notify) {
- nt = ap->changed;
- ap->notify = 0;
- tty = tty_port_tty_get(&portp->port);
-
- if (nt.signal & SG_DCD) {
- oldsigs = portp->sigs;
- portp->sigs = stli_mktiocm(nt.sigvalue);
- clear_bit(ST_GETSIGS, &portp->state);
- if ((portp->sigs & TIOCM_CD) &&
- ((oldsigs & TIOCM_CD) == 0))
- wake_up_interruptible(&portp->port.open_wait);
- if ((oldsigs & TIOCM_CD) &&
- ((portp->sigs & TIOCM_CD) == 0)) {
- if (portp->port.flags & ASYNC_CHECK_CD) {
- if (tty)
- tty_hangup(tty);
- }
- }
- }
-
- if (nt.data & DT_TXEMPTY)
- clear_bit(ST_TXBUSY, &portp->state);
- if (nt.data & (DT_TXEMPTY | DT_TXLOW)) {
- if (tty != NULL) {
- tty_wakeup(tty);
- EBRDENABLE(brdp);
- }
- }
-
- if ((nt.data & DT_RXBREAK) && (portp->rxmarkmsk & BRKINT)) {
- if (tty != NULL) {
- tty_insert_flip_char(tty, 0, TTY_BREAK);
- if (portp->port.flags & ASYNC_SAK) {
- do_SAK(tty);
- EBRDENABLE(brdp);
- }
- tty_schedule_flip(tty);
- }
- }
- tty_kref_put(tty);
-
- if (nt.data & DT_RXBUSY) {
- donerx++;
- stli_read(brdp, portp);
- }
- }
-
-/*
- * It might seem odd that we are checking for more RX chars here.
- * But, we need to handle the case where the tty buffer was previously
- * filled, but we had more characters to pass up. The slave will not
- * send any more RX notify messages until the RX buffer has been emptied.
- * But it will leave the service bits on (since the buffer is not empty).
- * So from here we can try to process more RX chars.
- */
- if ((!donerx) && test_bit(ST_RXING, &portp->state)) {
- clear_bit(ST_RXING, &portp->state);
- stli_read(brdp, portp);
- }
-
- return((test_bit(ST_OPENING, &portp->state) ||
- test_bit(ST_CLOSING, &portp->state) ||
- test_bit(ST_CMDING, &portp->state) ||
- test_bit(ST_TXBUSY, &portp->state) ||
- test_bit(ST_RXING, &portp->state)) ? 0 : 1);
-}
-
-/*****************************************************************************/
-
-/*
- * Service all ports on a particular board. Assumes that the boards
- * shared memory is enabled, and that the page pointer is pointed
- * at the cdk header structure.
- */
-
-static void stli_brdpoll(struct stlibrd *brdp, cdkhdr_t __iomem *hdrp)
-{
- struct stliport *portp;
- unsigned char hostbits[(STL_MAXCHANS / 8) + 1];
- unsigned char slavebits[(STL_MAXCHANS / 8) + 1];
- unsigned char __iomem *slavep;
- int bitpos, bitat, bitsize;
- int channr, nrdevs, slavebitchange;
-
- bitsize = brdp->bitsize;
- nrdevs = brdp->nrdevs;
-
-/*
- * Check if slave wants any service. Basically we try to do as
- * little work as possible here. There are 2 levels of service
- * bits. So if there is nothing to do we bail early. We check
- * 8 service bits at a time in the inner loop, so we can bypass
- * the lot if none of them want service.
- */
- memcpy_fromio(&hostbits[0], (((unsigned char __iomem *) hdrp) + brdp->hostoffset),
- bitsize);
-
- memset(&slavebits[0], 0, bitsize);
- slavebitchange = 0;
-
- for (bitpos = 0; (bitpos < bitsize); bitpos++) {
- if (hostbits[bitpos] == 0)
- continue;
- channr = bitpos * 8;
- for (bitat = 0x1; (channr < nrdevs); channr++, bitat <<= 1) {
- if (hostbits[bitpos] & bitat) {
- portp = brdp->ports[(channr - 1)];
- if (stli_hostcmd(brdp, portp)) {
- slavebitchange++;
- slavebits[bitpos] |= bitat;
- }
- }
- }
- }
-
-/*
- * If any of the ports are no longer busy then update them in the
- * slave request bits. We need to do this after, since a host port
- * service may initiate more slave requests.
- */
- if (slavebitchange) {
- hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- slavep = ((unsigned char __iomem *) hdrp) + brdp->slaveoffset;
- for (bitpos = 0; (bitpos < bitsize); bitpos++) {
- if (readb(slavebits + bitpos))
- writeb(readb(slavep + bitpos) & ~slavebits[bitpos], slavebits + bitpos);
- }
- }
-}
-
-/*****************************************************************************/
-
-/*
- * Driver poll routine. This routine polls the boards in use and passes
- * messages back up to host when necessary. This is actually very
- * CPU efficient, since we will always have the kernel poll clock, it
- * adds only a few cycles when idle (since board service can be
- * determined very easily), but when loaded generates no interrupts
- * (with their expensive associated context change).
- */
-
-static void stli_poll(unsigned long arg)
-{
- cdkhdr_t __iomem *hdrp;
- struct stlibrd *brdp;
- unsigned int brdnr;
-
- mod_timer(&stli_timerlist, STLI_TIMEOUT);
-
-/*
- * Check each board and do any servicing required.
- */
- for (brdnr = 0; (brdnr < stli_nrbrds); brdnr++) {
- brdp = stli_brds[brdnr];
- if (brdp == NULL)
- continue;
- if (!test_bit(BST_STARTED, &brdp->state))
- continue;
-
- spin_lock(&brd_lock);
- EBRDENABLE(brdp);
- hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- if (readb(&hdrp->hostreq))
- stli_brdpoll(brdp, hdrp);
- EBRDDISABLE(brdp);
- spin_unlock(&brd_lock);
- }
-}
-
-/*****************************************************************************/
-
-/*
- * Translate the termios settings into the port setting structure of
- * the slave.
- */
-
-static void stli_mkasyport(struct tty_struct *tty, struct stliport *portp,
- asyport_t *pp, struct ktermios *tiosp)
-{
- memset(pp, 0, sizeof(asyport_t));
-
-/*
- * Start of by setting the baud, char size, parity and stop bit info.
- */
- pp->baudout = tty_get_baud_rate(tty);
- if ((tiosp->c_cflag & CBAUD) == B38400) {
- if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
- pp->baudout = 57600;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
- pp->baudout = 115200;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
- pp->baudout = 230400;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
- pp->baudout = 460800;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)
- pp->baudout = (portp->baud_base / portp->custom_divisor);
- }
- if (pp->baudout > STL_MAXBAUD)
- pp->baudout = STL_MAXBAUD;
- pp->baudin = pp->baudout;
-
- switch (tiosp->c_cflag & CSIZE) {
- case CS5:
- pp->csize = 5;
- break;
- case CS6:
- pp->csize = 6;
- break;
- case CS7:
- pp->csize = 7;
- break;
- default:
- pp->csize = 8;
- break;
- }
-
- if (tiosp->c_cflag & CSTOPB)
- pp->stopbs = PT_STOP2;
- else
- pp->stopbs = PT_STOP1;
-
- if (tiosp->c_cflag & PARENB) {
- if (tiosp->c_cflag & PARODD)
- pp->parity = PT_ODDPARITY;
- else
- pp->parity = PT_EVENPARITY;
- } else {
- pp->parity = PT_NOPARITY;
- }
-
-/*
- * Set up any flow control options enabled.
- */
- if (tiosp->c_iflag & IXON) {
- pp->flow |= F_IXON;
- if (tiosp->c_iflag & IXANY)
- pp->flow |= F_IXANY;
- }
- if (tiosp->c_cflag & CRTSCTS)
- pp->flow |= (F_RTSFLOW | F_CTSFLOW);
-
- pp->startin = tiosp->c_cc[VSTART];
- pp->stopin = tiosp->c_cc[VSTOP];
- pp->startout = tiosp->c_cc[VSTART];
- pp->stopout = tiosp->c_cc[VSTOP];
-
-/*
- * Set up the RX char marking mask with those RX error types we must
- * catch. We can get the slave to help us out a little here, it will
- * ignore parity errors and breaks for us, and mark parity errors in
- * the data stream.
- */
- if (tiosp->c_iflag & IGNPAR)
- pp->iflag |= FI_IGNRXERRS;
- if (tiosp->c_iflag & IGNBRK)
- pp->iflag |= FI_IGNBREAK;
-
- portp->rxmarkmsk = 0;
- if (tiosp->c_iflag & (INPCK | PARMRK))
- pp->iflag |= FI_1MARKRXERRS;
- if (tiosp->c_iflag & BRKINT)
- portp->rxmarkmsk |= BRKINT;
-
-/*
- * Set up clocal processing as required.
- */
- if (tiosp->c_cflag & CLOCAL)
- portp->port.flags &= ~ASYNC_CHECK_CD;
- else
- portp->port.flags |= ASYNC_CHECK_CD;
-
-/*
- * Transfer any persistent flags into the asyport structure.
- */
- pp->pflag = (portp->pflag & 0xffff);
- pp->vmin = (portp->pflag & P_RXIMIN) ? 1 : 0;
- pp->vtime = (portp->pflag & P_RXITIME) ? 1 : 0;
- pp->cc[1] = (portp->pflag & P_RXTHOLD) ? 1 : 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Construct a slave signals structure for setting the DTR and RTS
- * signals as specified.
- */
-
-static void stli_mkasysigs(asysigs_t *sp, int dtr, int rts)
-{
- memset(sp, 0, sizeof(asysigs_t));
- if (dtr >= 0) {
- sp->signal |= SG_DTR;
- sp->sigvalue |= ((dtr > 0) ? SG_DTR : 0);
- }
- if (rts >= 0) {
- sp->signal |= SG_RTS;
- sp->sigvalue |= ((rts > 0) ? SG_RTS : 0);
- }
-}
-
-/*****************************************************************************/
-
-/*
- * Convert the signals returned from the slave into a local TIOCM type
- * signals value. We keep them locally in TIOCM format.
- */
-
-static long stli_mktiocm(unsigned long sigvalue)
-{
- long tiocm = 0;
- tiocm |= ((sigvalue & SG_DCD) ? TIOCM_CD : 0);
- tiocm |= ((sigvalue & SG_CTS) ? TIOCM_CTS : 0);
- tiocm |= ((sigvalue & SG_RI) ? TIOCM_RI : 0);
- tiocm |= ((sigvalue & SG_DSR) ? TIOCM_DSR : 0);
- tiocm |= ((sigvalue & SG_DTR) ? TIOCM_DTR : 0);
- tiocm |= ((sigvalue & SG_RTS) ? TIOCM_RTS : 0);
- return(tiocm);
-}
-
-/*****************************************************************************/
-
-/*
- * All panels and ports actually attached have been worked out. All
- * we need to do here is set up the appropriate per port data structures.
- */
-
-static int stli_initports(struct stlibrd *brdp)
-{
- struct stliport *portp;
- unsigned int i, panelnr, panelport;
-
- for (i = 0, panelnr = 0, panelport = 0; (i < brdp->nrports); i++) {
- portp = kzalloc(sizeof(struct stliport), GFP_KERNEL);
- if (!portp) {
- printk(KERN_WARNING "istallion: failed to allocate port structure\n");
- continue;
- }
- tty_port_init(&portp->port);
- portp->port.ops = &stli_port_ops;
- portp->magic = STLI_PORTMAGIC;
- portp->portnr = i;
- portp->brdnr = brdp->brdnr;
- portp->panelnr = panelnr;
- portp->baud_base = STL_BAUDBASE;
- portp->port.close_delay = STL_CLOSEDELAY;
- portp->closing_wait = 30 * HZ;
- init_waitqueue_head(&portp->port.open_wait);
- init_waitqueue_head(&portp->port.close_wait);
- init_waitqueue_head(&portp->raw_wait);
- panelport++;
- if (panelport >= brdp->panels[panelnr]) {
- panelport = 0;
- panelnr++;
- }
- brdp->ports[i] = portp;
- }
-
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * All the following routines are board specific hardware operations.
- */
-
-static void stli_ecpinit(struct stlibrd *brdp)
-{
- unsigned long memconf;
-
- outb(ECP_ATSTOP, (brdp->iobase + ECP_ATCONFR));
- udelay(10);
- outb(ECP_ATDISABLE, (brdp->iobase + ECP_ATCONFR));
- udelay(100);
-
- memconf = (brdp->memaddr & ECP_ATADDRMASK) >> ECP_ATADDRSHFT;
- outb(memconf, (brdp->iobase + ECP_ATMEMAR));
-}
-
-/*****************************************************************************/
-
-static void stli_ecpenable(struct stlibrd *brdp)
-{
- outb(ECP_ATENABLE, (brdp->iobase + ECP_ATCONFR));
-}
-
-/*****************************************************************************/
-
-static void stli_ecpdisable(struct stlibrd *brdp)
-{
- outb(ECP_ATDISABLE, (brdp->iobase + ECP_ATCONFR));
-}
-
-/*****************************************************************************/
-
-static void __iomem *stli_ecpgetmemptr(struct stlibrd *brdp, unsigned long offset, int line)
-{
- void __iomem *ptr;
- unsigned char val;
-
- if (offset > brdp->memsize) {
- printk(KERN_ERR "istallion: shared memory pointer=%x out of "
- "range at line=%d(%d), brd=%d\n",
- (int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
- val = 0;
- } else {
- ptr = brdp->membase + (offset % ECP_ATPAGESIZE);
- val = (unsigned char) (offset / ECP_ATPAGESIZE);
- }
- outb(val, (brdp->iobase + ECP_ATMEMPR));
- return(ptr);
-}
-
-/*****************************************************************************/
-
-static void stli_ecpreset(struct stlibrd *brdp)
-{
- outb(ECP_ATSTOP, (brdp->iobase + ECP_ATCONFR));
- udelay(10);
- outb(ECP_ATDISABLE, (brdp->iobase + ECP_ATCONFR));
- udelay(500);
-}
-
-/*****************************************************************************/
-
-static void stli_ecpintr(struct stlibrd *brdp)
-{
- outb(0x1, brdp->iobase);
-}
-
-/*****************************************************************************/
-
-/*
- * The following set of functions act on ECP EISA boards.
- */
-
-static void stli_ecpeiinit(struct stlibrd *brdp)
-{
- unsigned long memconf;
-
- outb(0x1, (brdp->iobase + ECP_EIBRDENAB));
- outb(ECP_EISTOP, (brdp->iobase + ECP_EICONFR));
- udelay(10);
- outb(ECP_EIDISABLE, (brdp->iobase + ECP_EICONFR));
- udelay(500);
-
- memconf = (brdp->memaddr & ECP_EIADDRMASKL) >> ECP_EIADDRSHFTL;
- outb(memconf, (brdp->iobase + ECP_EIMEMARL));
- memconf = (brdp->memaddr & ECP_EIADDRMASKH) >> ECP_EIADDRSHFTH;
- outb(memconf, (brdp->iobase + ECP_EIMEMARH));
-}
-
-/*****************************************************************************/
-
-static void stli_ecpeienable(struct stlibrd *brdp)
-{
- outb(ECP_EIENABLE, (brdp->iobase + ECP_EICONFR));
-}
-
-/*****************************************************************************/
-
-static void stli_ecpeidisable(struct stlibrd *brdp)
-{
- outb(ECP_EIDISABLE, (brdp->iobase + ECP_EICONFR));
-}
-
-/*****************************************************************************/
-
-static void __iomem *stli_ecpeigetmemptr(struct stlibrd *brdp, unsigned long offset, int line)
-{
- void __iomem *ptr;
- unsigned char val;
-
- if (offset > brdp->memsize) {
- printk(KERN_ERR "istallion: shared memory pointer=%x out of "
- "range at line=%d(%d), brd=%d\n",
- (int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
- val = 0;
- } else {
- ptr = brdp->membase + (offset % ECP_EIPAGESIZE);
- if (offset < ECP_EIPAGESIZE)
- val = ECP_EIENABLE;
- else
- val = ECP_EIENABLE | 0x40;
- }
- outb(val, (brdp->iobase + ECP_EICONFR));
- return(ptr);
-}
-
-/*****************************************************************************/
-
-static void stli_ecpeireset(struct stlibrd *brdp)
-{
- outb(ECP_EISTOP, (brdp->iobase + ECP_EICONFR));
- udelay(10);
- outb(ECP_EIDISABLE, (brdp->iobase + ECP_EICONFR));
- udelay(500);
-}
-
-/*****************************************************************************/
-
-/*
- * The following set of functions act on ECP MCA boards.
- */
-
-static void stli_ecpmcenable(struct stlibrd *brdp)
-{
- outb(ECP_MCENABLE, (brdp->iobase + ECP_MCCONFR));
-}
-
-/*****************************************************************************/
-
-static void stli_ecpmcdisable(struct stlibrd *brdp)
-{
- outb(ECP_MCDISABLE, (brdp->iobase + ECP_MCCONFR));
-}
-
-/*****************************************************************************/
-
-static void __iomem *stli_ecpmcgetmemptr(struct stlibrd *brdp, unsigned long offset, int line)
-{
- void __iomem *ptr;
- unsigned char val;
-
- if (offset > brdp->memsize) {
- printk(KERN_ERR "istallion: shared memory pointer=%x out of "
- "range at line=%d(%d), brd=%d\n",
- (int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
- val = 0;
- } else {
- ptr = brdp->membase + (offset % ECP_MCPAGESIZE);
- val = ((unsigned char) (offset / ECP_MCPAGESIZE)) | ECP_MCENABLE;
- }
- outb(val, (brdp->iobase + ECP_MCCONFR));
- return(ptr);
-}
-
-/*****************************************************************************/
-
-static void stli_ecpmcreset(struct stlibrd *brdp)
-{
- outb(ECP_MCSTOP, (brdp->iobase + ECP_MCCONFR));
- udelay(10);
- outb(ECP_MCDISABLE, (brdp->iobase + ECP_MCCONFR));
- udelay(500);
-}
-
-/*****************************************************************************/
-
-/*
- * The following set of functions act on ECP PCI boards.
- */
-
-static void stli_ecppciinit(struct stlibrd *brdp)
-{
- outb(ECP_PCISTOP, (brdp->iobase + ECP_PCICONFR));
- udelay(10);
- outb(0, (brdp->iobase + ECP_PCICONFR));
- udelay(500);
-}
-
-/*****************************************************************************/
-
-static void __iomem *stli_ecppcigetmemptr(struct stlibrd *brdp, unsigned long offset, int line)
-{
- void __iomem *ptr;
- unsigned char val;
-
- if (offset > brdp->memsize) {
- printk(KERN_ERR "istallion: shared memory pointer=%x out of "
- "range at line=%d(%d), board=%d\n",
- (int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
- val = 0;
- } else {
- ptr = brdp->membase + (offset % ECP_PCIPAGESIZE);
- val = (offset / ECP_PCIPAGESIZE) << 1;
- }
- outb(val, (brdp->iobase + ECP_PCICONFR));
- return(ptr);
-}
-
-/*****************************************************************************/
-
-static void stli_ecppcireset(struct stlibrd *brdp)
-{
- outb(ECP_PCISTOP, (brdp->iobase + ECP_PCICONFR));
- udelay(10);
- outb(0, (brdp->iobase + ECP_PCICONFR));
- udelay(500);
-}
-
-/*****************************************************************************/
-
-/*
- * The following routines act on ONboards.
- */
-
-static void stli_onbinit(struct stlibrd *brdp)
-{
- unsigned long memconf;
-
- outb(ONB_ATSTOP, (brdp->iobase + ONB_ATCONFR));
- udelay(10);
- outb(ONB_ATDISABLE, (brdp->iobase + ONB_ATCONFR));
- mdelay(1000);
-
- memconf = (brdp->memaddr & ONB_ATADDRMASK) >> ONB_ATADDRSHFT;
- outb(memconf, (brdp->iobase + ONB_ATMEMAR));
- outb(0x1, brdp->iobase);
- mdelay(1);
-}
-
-/*****************************************************************************/
-
-static void stli_onbenable(struct stlibrd *brdp)
-{
- outb((brdp->enabval | ONB_ATENABLE), (brdp->iobase + ONB_ATCONFR));
-}
-
-/*****************************************************************************/
-
-static void stli_onbdisable(struct stlibrd *brdp)
-{
- outb((brdp->enabval | ONB_ATDISABLE), (brdp->iobase + ONB_ATCONFR));
-}
-
-/*****************************************************************************/
-
-static void __iomem *stli_onbgetmemptr(struct stlibrd *brdp, unsigned long offset, int line)
-{
- void __iomem *ptr;
-
- if (offset > brdp->memsize) {
- printk(KERN_ERR "istallion: shared memory pointer=%x out of "
- "range at line=%d(%d), brd=%d\n",
- (int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
- } else {
- ptr = brdp->membase + (offset % ONB_ATPAGESIZE);
- }
- return(ptr);
-}
-
-/*****************************************************************************/
-
-static void stli_onbreset(struct stlibrd *brdp)
-{
- outb(ONB_ATSTOP, (brdp->iobase + ONB_ATCONFR));
- udelay(10);
- outb(ONB_ATDISABLE, (brdp->iobase + ONB_ATCONFR));
- mdelay(1000);
-}
-
-/*****************************************************************************/
-
-/*
- * The following routines act on ONboard EISA.
- */
-
-static void stli_onbeinit(struct stlibrd *brdp)
-{
- unsigned long memconf;
-
- outb(0x1, (brdp->iobase + ONB_EIBRDENAB));
- outb(ONB_EISTOP, (brdp->iobase + ONB_EICONFR));
- udelay(10);
- outb(ONB_EIDISABLE, (brdp->iobase + ONB_EICONFR));
- mdelay(1000);
-
- memconf = (brdp->memaddr & ONB_EIADDRMASKL) >> ONB_EIADDRSHFTL;
- outb(memconf, (brdp->iobase + ONB_EIMEMARL));
- memconf = (brdp->memaddr & ONB_EIADDRMASKH) >> ONB_EIADDRSHFTH;
- outb(memconf, (brdp->iobase + ONB_EIMEMARH));
- outb(0x1, brdp->iobase);
- mdelay(1);
-}
-
-/*****************************************************************************/
-
-static void stli_onbeenable(struct stlibrd *brdp)
-{
- outb(ONB_EIENABLE, (brdp->iobase + ONB_EICONFR));
-}
-
-/*****************************************************************************/
-
-static void stli_onbedisable(struct stlibrd *brdp)
-{
- outb(ONB_EIDISABLE, (brdp->iobase + ONB_EICONFR));
-}
-
-/*****************************************************************************/
-
-static void __iomem *stli_onbegetmemptr(struct stlibrd *brdp, unsigned long offset, int line)
-{
- void __iomem *ptr;
- unsigned char val;
-
- if (offset > brdp->memsize) {
- printk(KERN_ERR "istallion: shared memory pointer=%x out of "
- "range at line=%d(%d), brd=%d\n",
- (int) offset, line, __LINE__, brdp->brdnr);
- ptr = NULL;
- val = 0;
- } else {
- ptr = brdp->membase + (offset % ONB_EIPAGESIZE);
- if (offset < ONB_EIPAGESIZE)
- val = ONB_EIENABLE;
- else
- val = ONB_EIENABLE | 0x40;
- }
- outb(val, (brdp->iobase + ONB_EICONFR));
- return(ptr);
-}
-
-/*****************************************************************************/
-
-static void stli_onbereset(struct stlibrd *brdp)
-{
- outb(ONB_EISTOP, (brdp->iobase + ONB_EICONFR));
- udelay(10);
- outb(ONB_EIDISABLE, (brdp->iobase + ONB_EICONFR));
- mdelay(1000);
-}
-
-/*****************************************************************************/
-
-/*
- * The following routines act on Brumby boards.
- */
-
-static void stli_bbyinit(struct stlibrd *brdp)
-{
- outb(BBY_ATSTOP, (brdp->iobase + BBY_ATCONFR));
- udelay(10);
- outb(0, (brdp->iobase + BBY_ATCONFR));
- mdelay(1000);
- outb(0x1, brdp->iobase);
- mdelay(1);
-}
-
-/*****************************************************************************/
-
-static void __iomem *stli_bbygetmemptr(struct stlibrd *brdp, unsigned long offset, int line)
-{
- void __iomem *ptr;
- unsigned char val;
-
- BUG_ON(offset > brdp->memsize);
-
- ptr = brdp->membase + (offset % BBY_PAGESIZE);
- val = (unsigned char) (offset / BBY_PAGESIZE);
- outb(val, (brdp->iobase + BBY_ATCONFR));
- return(ptr);
-}
-
-/*****************************************************************************/
-
-static void stli_bbyreset(struct stlibrd *brdp)
-{
- outb(BBY_ATSTOP, (brdp->iobase + BBY_ATCONFR));
- udelay(10);
- outb(0, (brdp->iobase + BBY_ATCONFR));
- mdelay(1000);
-}
-
-/*****************************************************************************/
-
-/*
- * The following routines act on original old Stallion boards.
- */
-
-static void stli_stalinit(struct stlibrd *brdp)
-{
- outb(0x1, brdp->iobase);
- mdelay(1000);
-}
-
-/*****************************************************************************/
-
-static void __iomem *stli_stalgetmemptr(struct stlibrd *brdp, unsigned long offset, int line)
-{
- BUG_ON(offset > brdp->memsize);
- return brdp->membase + (offset % STAL_PAGESIZE);
-}
-
-/*****************************************************************************/
-
-static void stli_stalreset(struct stlibrd *brdp)
-{
- u32 __iomem *vecp;
-
- vecp = (u32 __iomem *) (brdp->membase + 0x30);
- writel(0xffff0000, vecp);
- outb(0, brdp->iobase);
- mdelay(1000);
-}
-
-/*****************************************************************************/
-
-/*
- * Try to find an ECP board and initialize it. This handles only ECP
- * board types.
- */
-
-static int stli_initecp(struct stlibrd *brdp)
-{
- cdkecpsig_t sig;
- cdkecpsig_t __iomem *sigsp;
- unsigned int status, nxtid;
- char *name;
- int retval, panelnr, nrports;
-
- if ((brdp->iobase == 0) || (brdp->memaddr == 0)) {
- retval = -ENODEV;
- goto err;
- }
-
- brdp->iosize = ECP_IOSIZE;
-
- if (!request_region(brdp->iobase, brdp->iosize, "istallion")) {
- retval = -EIO;
- goto err;
- }
-
-/*
- * Based on the specific board type setup the common vars to access
- * and enable shared memory. Set all board specific information now
- * as well.
- */
- switch (brdp->brdtype) {
- case BRD_ECP:
- brdp->memsize = ECP_MEMSIZE;
- brdp->pagesize = ECP_ATPAGESIZE;
- brdp->init = stli_ecpinit;
- brdp->enable = stli_ecpenable;
- brdp->reenable = stli_ecpenable;
- brdp->disable = stli_ecpdisable;
- brdp->getmemptr = stli_ecpgetmemptr;
- brdp->intr = stli_ecpintr;
- brdp->reset = stli_ecpreset;
- name = "serial(EC8/64)";
- break;
-
- case BRD_ECPE:
- brdp->memsize = ECP_MEMSIZE;
- brdp->pagesize = ECP_EIPAGESIZE;
- brdp->init = stli_ecpeiinit;
- brdp->enable = stli_ecpeienable;
- brdp->reenable = stli_ecpeienable;
- brdp->disable = stli_ecpeidisable;
- brdp->getmemptr = stli_ecpeigetmemptr;
- brdp->intr = stli_ecpintr;
- brdp->reset = stli_ecpeireset;
- name = "serial(EC8/64-EI)";
- break;
-
- case BRD_ECPMC:
- brdp->memsize = ECP_MEMSIZE;
- brdp->pagesize = ECP_MCPAGESIZE;
- brdp->init = NULL;
- brdp->enable = stli_ecpmcenable;
- brdp->reenable = stli_ecpmcenable;
- brdp->disable = stli_ecpmcdisable;
- brdp->getmemptr = stli_ecpmcgetmemptr;
- brdp->intr = stli_ecpintr;
- brdp->reset = stli_ecpmcreset;
- name = "serial(EC8/64-MCA)";
- break;
-
- case BRD_ECPPCI:
- brdp->memsize = ECP_PCIMEMSIZE;
- brdp->pagesize = ECP_PCIPAGESIZE;
- brdp->init = stli_ecppciinit;
- brdp->enable = NULL;
- brdp->reenable = NULL;
- brdp->disable = NULL;
- brdp->getmemptr = stli_ecppcigetmemptr;
- brdp->intr = stli_ecpintr;
- brdp->reset = stli_ecppcireset;
- name = "serial(EC/RA-PCI)";
- break;
-
- default:
- retval = -EINVAL;
- goto err_reg;
- }
-
-/*
- * The per-board operations structure is all set up, so now let's go
- * and get the board operational. Firstly initialize board configuration
- * registers. Set the memory mapping info so we can get at the boards
- * shared memory.
- */
- EBRDINIT(brdp);
-
- brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize);
- if (brdp->membase == NULL) {
- retval = -ENOMEM;
- goto err_reg;
- }
-
-/*
- * Now that all specific code is set up, enable the shared memory and
- * look for the a signature area that will tell us exactly what board
- * this is, and what it is connected to it.
- */
- EBRDENABLE(brdp);
- sigsp = (cdkecpsig_t __iomem *) EBRDGETMEMPTR(brdp, CDK_SIGADDR);
- memcpy_fromio(&sig, sigsp, sizeof(cdkecpsig_t));
- EBRDDISABLE(brdp);
-
- if (sig.magic != cpu_to_le32(ECP_MAGIC)) {
- retval = -ENODEV;
- goto err_unmap;
- }
-
-/*
- * Scan through the signature looking at the panels connected to the
- * board. Calculate the total number of ports as we go.
- */
- for (panelnr = 0, nxtid = 0; (panelnr < STL_MAXPANELS); panelnr++) {
- status = sig.panelid[nxtid];
- if ((status & ECH_PNLIDMASK) != nxtid)
- break;
-
- brdp->panelids[panelnr] = status;
- nrports = (status & ECH_PNL16PORT) ? 16 : 8;
- if ((nrports == 16) && ((status & ECH_PNLXPID) == 0))
- nxtid++;
- brdp->panels[panelnr] = nrports;
- brdp->nrports += nrports;
- nxtid++;
- brdp->nrpanels++;
- }
-
-
- set_bit(BST_FOUND, &brdp->state);
- return 0;
-err_unmap:
- iounmap(brdp->membase);
- brdp->membase = NULL;
-err_reg:
- release_region(brdp->iobase, brdp->iosize);
-err:
- return retval;
-}
-
-/*****************************************************************************/
-
-/*
- * Try to find an ONboard, Brumby or Stallion board and initialize it.
- * This handles only these board types.
- */
-
-static int stli_initonb(struct stlibrd *brdp)
-{
- cdkonbsig_t sig;
- cdkonbsig_t __iomem *sigsp;
- char *name;
- int i, retval;
-
-/*
- * Do a basic sanity check on the IO and memory addresses.
- */
- if (brdp->iobase == 0 || brdp->memaddr == 0) {
- retval = -ENODEV;
- goto err;
- }
-
- brdp->iosize = ONB_IOSIZE;
-
- if (!request_region(brdp->iobase, brdp->iosize, "istallion")) {
- retval = -EIO;
- goto err;
- }
-
-/*
- * Based on the specific board type setup the common vars to access
- * and enable shared memory. Set all board specific information now
- * as well.
- */
- switch (brdp->brdtype) {
- case BRD_ONBOARD:
- case BRD_ONBOARD2:
- brdp->memsize = ONB_MEMSIZE;
- brdp->pagesize = ONB_ATPAGESIZE;
- brdp->init = stli_onbinit;
- brdp->enable = stli_onbenable;
- brdp->reenable = stli_onbenable;
- brdp->disable = stli_onbdisable;
- brdp->getmemptr = stli_onbgetmemptr;
- brdp->intr = stli_ecpintr;
- brdp->reset = stli_onbreset;
- if (brdp->memaddr > 0x100000)
- brdp->enabval = ONB_MEMENABHI;
- else
- brdp->enabval = ONB_MEMENABLO;
- name = "serial(ONBoard)";
- break;
-
- case BRD_ONBOARDE:
- brdp->memsize = ONB_EIMEMSIZE;
- brdp->pagesize = ONB_EIPAGESIZE;
- brdp->init = stli_onbeinit;
- brdp->enable = stli_onbeenable;
- brdp->reenable = stli_onbeenable;
- brdp->disable = stli_onbedisable;
- brdp->getmemptr = stli_onbegetmemptr;
- brdp->intr = stli_ecpintr;
- brdp->reset = stli_onbereset;
- name = "serial(ONBoard/E)";
- break;
-
- case BRD_BRUMBY4:
- brdp->memsize = BBY_MEMSIZE;
- brdp->pagesize = BBY_PAGESIZE;
- brdp->init = stli_bbyinit;
- brdp->enable = NULL;
- brdp->reenable = NULL;
- brdp->disable = NULL;
- brdp->getmemptr = stli_bbygetmemptr;
- brdp->intr = stli_ecpintr;
- brdp->reset = stli_bbyreset;
- name = "serial(Brumby)";
- break;
-
- case BRD_STALLION:
- brdp->memsize = STAL_MEMSIZE;
- brdp->pagesize = STAL_PAGESIZE;
- brdp->init = stli_stalinit;
- brdp->enable = NULL;
- brdp->reenable = NULL;
- brdp->disable = NULL;
- brdp->getmemptr = stli_stalgetmemptr;
- brdp->intr = stli_ecpintr;
- brdp->reset = stli_stalreset;
- name = "serial(Stallion)";
- break;
-
- default:
- retval = -EINVAL;
- goto err_reg;
- }
-
-/*
- * The per-board operations structure is all set up, so now let's go
- * and get the board operational. Firstly initialize board configuration
- * registers. Set the memory mapping info so we can get at the boards
- * shared memory.
- */
- EBRDINIT(brdp);
-
- brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize);
- if (brdp->membase == NULL) {
- retval = -ENOMEM;
- goto err_reg;
- }
-
-/*
- * Now that all specific code is set up, enable the shared memory and
- * look for the a signature area that will tell us exactly what board
- * this is, and how many ports.
- */
- EBRDENABLE(brdp);
- sigsp = (cdkonbsig_t __iomem *) EBRDGETMEMPTR(brdp, CDK_SIGADDR);
- memcpy_fromio(&sig, sigsp, sizeof(cdkonbsig_t));
- EBRDDISABLE(brdp);
-
- if (sig.magic0 != cpu_to_le16(ONB_MAGIC0) ||
- sig.magic1 != cpu_to_le16(ONB_MAGIC1) ||
- sig.magic2 != cpu_to_le16(ONB_MAGIC2) ||
- sig.magic3 != cpu_to_le16(ONB_MAGIC3)) {
- retval = -ENODEV;
- goto err_unmap;
- }
-
-/*
- * Scan through the signature alive mask and calculate how many ports
- * there are on this board.
- */
- brdp->nrpanels = 1;
- if (sig.amask1) {
- brdp->nrports = 32;
- } else {
- for (i = 0; (i < 16); i++) {
- if (((sig.amask0 << i) & 0x8000) == 0)
- break;
- }
- brdp->nrports = i;
- }
- brdp->panels[0] = brdp->nrports;
-
-
- set_bit(BST_FOUND, &brdp->state);
- return 0;
-err_unmap:
- iounmap(brdp->membase);
- brdp->membase = NULL;
-err_reg:
- release_region(brdp->iobase, brdp->iosize);
-err:
- return retval;
-}
-
-/*****************************************************************************/
-
-/*
- * Start up a running board. This routine is only called after the
- * code has been down loaded to the board and is operational. It will
- * read in the memory map, and get the show on the road...
- */
-
-static int stli_startbrd(struct stlibrd *brdp)
-{
- cdkhdr_t __iomem *hdrp;
- cdkmem_t __iomem *memp;
- cdkasy_t __iomem *ap;
- unsigned long flags;
- unsigned int portnr, nrdevs, i;
- struct stliport *portp;
- int rc = 0;
- u32 memoff;
-
- spin_lock_irqsave(&brd_lock, flags);
- EBRDENABLE(brdp);
- hdrp = (cdkhdr_t __iomem *) EBRDGETMEMPTR(brdp, CDK_CDKADDR);
- nrdevs = hdrp->nrdevs;
-
-#if 0
- printk("%s(%d): CDK version %d.%d.%d --> "
- "nrdevs=%d memp=%x hostp=%x slavep=%x\n",
- __FILE__, __LINE__, readb(&hdrp->ver_release), readb(&hdrp->ver_modification),
- readb(&hdrp->ver_fix), nrdevs, (int) readl(&hdrp->memp), readl(&hdrp->hostp),
- readl(&hdrp->slavep));
-#endif
-
- if (nrdevs < (brdp->nrports + 1)) {
- printk(KERN_ERR "istallion: slave failed to allocate memory for "
- "all devices, devices=%d\n", nrdevs);
- brdp->nrports = nrdevs - 1;
- }
- brdp->nrdevs = nrdevs;
- brdp->hostoffset = hdrp->hostp - CDK_CDKADDR;
- brdp->slaveoffset = hdrp->slavep - CDK_CDKADDR;
- brdp->bitsize = (nrdevs + 7) / 8;
- memoff = readl(&hdrp->memp);
- if (memoff > brdp->memsize) {
- printk(KERN_ERR "istallion: corrupted shared memory region?\n");
- rc = -EIO;
- goto stli_donestartup;
- }
- memp = (cdkmem_t __iomem *) EBRDGETMEMPTR(brdp, memoff);
- if (readw(&memp->dtype) != TYP_ASYNCTRL) {
- printk(KERN_ERR "istallion: no slave control device found\n");
- goto stli_donestartup;
- }
- memp++;
-
-/*
- * Cycle through memory allocation of each port. We are guaranteed to
- * have all ports inside the first page of slave window, so no need to
- * change pages while reading memory map.
- */
- for (i = 1, portnr = 0; (i < nrdevs); i++, portnr++, memp++) {
- if (readw(&memp->dtype) != TYP_ASYNC)
- break;
- portp = brdp->ports[portnr];
- if (portp == NULL)
- break;
- portp->devnr = i;
- portp->addr = readl(&memp->offset);
- portp->reqbit = (unsigned char) (0x1 << (i * 8 / nrdevs));
- portp->portidx = (unsigned char) (i / 8);
- portp->portbit = (unsigned char) (0x1 << (i % 8));
- }
-
- writeb(0xff, &hdrp->slavereq);
-
-/*
- * For each port setup a local copy of the RX and TX buffer offsets
- * and sizes. We do this separate from the above, because we need to
- * move the shared memory page...
- */
- for (i = 1, portnr = 0; (i < nrdevs); i++, portnr++) {
- portp = brdp->ports[portnr];
- if (portp == NULL)
- break;
- if (portp->addr == 0)
- break;
- ap = (cdkasy_t __iomem *) EBRDGETMEMPTR(brdp, portp->addr);
- if (ap != NULL) {
- portp->rxsize = readw(&ap->rxq.size);
- portp->txsize = readw(&ap->txq.size);
- portp->rxoffset = readl(&ap->rxq.offset);
- portp->txoffset = readl(&ap->txq.offset);
- }
- }
-
-stli_donestartup:
- EBRDDISABLE(brdp);
- spin_unlock_irqrestore(&brd_lock, flags);
-
- if (rc == 0)
- set_bit(BST_STARTED, &brdp->state);
-
- if (! stli_timeron) {
- stli_timeron++;
- mod_timer(&stli_timerlist, STLI_TIMEOUT);
- }
-
- return rc;
-}
-
-/*****************************************************************************/
-
-/*
- * Probe and initialize the specified board.
- */
-
-static int __devinit stli_brdinit(struct stlibrd *brdp)
-{
- int retval;
-
- switch (brdp->brdtype) {
- case BRD_ECP:
- case BRD_ECPE:
- case BRD_ECPMC:
- case BRD_ECPPCI:
- retval = stli_initecp(brdp);
- break;
- case BRD_ONBOARD:
- case BRD_ONBOARDE:
- case BRD_ONBOARD2:
- case BRD_BRUMBY4:
- case BRD_STALLION:
- retval = stli_initonb(brdp);
- break;
- default:
- printk(KERN_ERR "istallion: board=%d is unknown board "
- "type=%d\n", brdp->brdnr, brdp->brdtype);
- retval = -ENODEV;
- }
-
- if (retval)
- return retval;
-
- stli_initports(brdp);
- printk(KERN_INFO "istallion: %s found, board=%d io=%x mem=%x "
- "nrpanels=%d nrports=%d\n", stli_brdnames[brdp->brdtype],
- brdp->brdnr, brdp->iobase, (int) brdp->memaddr,
- brdp->nrpanels, brdp->nrports);
- return 0;
-}
-
-#if STLI_EISAPROBE != 0
-/*****************************************************************************/
-
-/*
- * Probe around trying to find where the EISA boards shared memory
- * might be. This is a bit if hack, but it is the best we can do.
- */
-
-static int stli_eisamemprobe(struct stlibrd *brdp)
-{
- cdkecpsig_t ecpsig, __iomem *ecpsigp;
- cdkonbsig_t onbsig, __iomem *onbsigp;
- int i, foundit;
-
-/*
- * First up we reset the board, to get it into a known state. There
- * is only 2 board types here we need to worry about. Don;t use the
- * standard board init routine here, it programs up the shared
- * memory address, and we don't know it yet...
- */
- if (brdp->brdtype == BRD_ECPE) {
- outb(0x1, (brdp->iobase + ECP_EIBRDENAB));
- outb(ECP_EISTOP, (brdp->iobase + ECP_EICONFR));
- udelay(10);
- outb(ECP_EIDISABLE, (brdp->iobase + ECP_EICONFR));
- udelay(500);
- stli_ecpeienable(brdp);
- } else if (brdp->brdtype == BRD_ONBOARDE) {
- outb(0x1, (brdp->iobase + ONB_EIBRDENAB));
- outb(ONB_EISTOP, (brdp->iobase + ONB_EICONFR));
- udelay(10);
- outb(ONB_EIDISABLE, (brdp->iobase + ONB_EICONFR));
- mdelay(100);
- outb(0x1, brdp->iobase);
- mdelay(1);
- stli_onbeenable(brdp);
- } else {
- return -ENODEV;
- }
-
- foundit = 0;
- brdp->memsize = ECP_MEMSIZE;
-
-/*
- * Board shared memory is enabled, so now we have a poke around and
- * see if we can find it.
- */
- for (i = 0; (i < stli_eisamempsize); i++) {
- brdp->memaddr = stli_eisamemprobeaddrs[i];
- brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize);
- if (brdp->membase == NULL)
- continue;
-
- if (brdp->brdtype == BRD_ECPE) {
- ecpsigp = stli_ecpeigetmemptr(brdp,
- CDK_SIGADDR, __LINE__);
- memcpy_fromio(&ecpsig, ecpsigp, sizeof(cdkecpsig_t));
- if (ecpsig.magic == cpu_to_le32(ECP_MAGIC))
- foundit = 1;
- } else {
- onbsigp = (cdkonbsig_t __iomem *) stli_onbegetmemptr(brdp,
- CDK_SIGADDR, __LINE__);
- memcpy_fromio(&onbsig, onbsigp, sizeof(cdkonbsig_t));
- if ((onbsig.magic0 == cpu_to_le16(ONB_MAGIC0)) &&
- (onbsig.magic1 == cpu_to_le16(ONB_MAGIC1)) &&
- (onbsig.magic2 == cpu_to_le16(ONB_MAGIC2)) &&
- (onbsig.magic3 == cpu_to_le16(ONB_MAGIC3)))
- foundit = 1;
- }
-
- iounmap(brdp->membase);
- if (foundit)
- break;
- }
-
-/*
- * Regardless of whether we found the shared memory or not we must
- * disable the region. After that return success or failure.
- */
- if (brdp->brdtype == BRD_ECPE)
- stli_ecpeidisable(brdp);
- else
- stli_onbedisable(brdp);
-
- if (! foundit) {
- brdp->memaddr = 0;
- brdp->membase = NULL;
- printk(KERN_ERR "istallion: failed to probe shared memory "
- "region for %s in EISA slot=%d\n",
- stli_brdnames[brdp->brdtype], (brdp->iobase >> 12));
- return -ENODEV;
- }
- return 0;
-}
-#endif
-
-static int stli_getbrdnr(void)
-{
- unsigned int i;
-
- for (i = 0; i < STL_MAXBRDS; i++) {
- if (!stli_brds[i]) {
- if (i >= stli_nrbrds)
- stli_nrbrds = i + 1;
- return i;
- }
- }
- return -1;
-}
-
-#if STLI_EISAPROBE != 0
-/*****************************************************************************/
-
-/*
- * Probe around and try to find any EISA boards in system. The biggest
- * problem here is finding out what memory address is associated with
- * an EISA board after it is found. The registers of the ECPE and
- * ONboardE are not readable - so we can't read them from there. We
- * don't have access to the EISA CMOS (or EISA BIOS) so we don't
- * actually have any way to find out the real value. The best we can
- * do is go probing around in the usual places hoping we can find it.
- */
-
-static int __init stli_findeisabrds(void)
-{
- struct stlibrd *brdp;
- unsigned int iobase, eid, i;
- int brdnr, found = 0;
-
-/*
- * Firstly check if this is an EISA system. If this is not an EISA system then
- * don't bother going any further!
- */
- if (EISA_bus)
- return 0;
-
-/*
- * Looks like an EISA system, so go searching for EISA boards.
- */
- for (iobase = 0x1000; (iobase <= 0xc000); iobase += 0x1000) {
- outb(0xff, (iobase + 0xc80));
- eid = inb(iobase + 0xc80);
- eid |= inb(iobase + 0xc81) << 8;
- if (eid != STL_EISAID)
- continue;
-
-/*
- * We have found a board. Need to check if this board was
- * statically configured already (just in case!).
- */
- for (i = 0; (i < STL_MAXBRDS); i++) {
- brdp = stli_brds[i];
- if (brdp == NULL)
- continue;
- if (brdp->iobase == iobase)
- break;
- }
- if (i < STL_MAXBRDS)
- continue;
-
-/*
- * We have found a Stallion board and it is not configured already.
- * Allocate a board structure and initialize it.
- */
- if ((brdp = stli_allocbrd()) == NULL)
- return found ? : -ENOMEM;
- brdnr = stli_getbrdnr();
- if (brdnr < 0)
- return found ? : -ENOMEM;
- brdp->brdnr = (unsigned int)brdnr;
- eid = inb(iobase + 0xc82);
- if (eid == ECP_EISAID)
- brdp->brdtype = BRD_ECPE;
- else if (eid == ONB_EISAID)
- brdp->brdtype = BRD_ONBOARDE;
- else
- brdp->brdtype = BRD_UNKNOWN;
- brdp->iobase = iobase;
- outb(0x1, (iobase + 0xc84));
- if (stli_eisamemprobe(brdp))
- outb(0, (iobase + 0xc84));
- if (stli_brdinit(brdp) < 0) {
- kfree(brdp);
- continue;
- }
-
- stli_brds[brdp->brdnr] = brdp;
- found++;
-
- for (i = 0; i < brdp->nrports; i++)
- tty_register_device(stli_serial,
- brdp->brdnr * STL_MAXPORTS + i, NULL);
- }
-
- return found;
-}
-#else
-static inline int stli_findeisabrds(void) { return 0; }
-#endif
-
-/*****************************************************************************/
-
-/*
- * Find the next available board number that is free.
- */
-
-/*****************************************************************************/
-
-/*
- * We have a Stallion board. Allocate a board structure and
- * initialize it. Read its IO and MEMORY resources from PCI
- * configuration space.
- */
-
-static int __devinit stli_pciprobe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct stlibrd *brdp;
- unsigned int i;
- int brdnr, retval = -EIO;
-
- retval = pci_enable_device(pdev);
- if (retval)
- goto err;
- brdp = stli_allocbrd();
- if (brdp == NULL) {
- retval = -ENOMEM;
- goto err;
- }
- mutex_lock(&stli_brdslock);
- brdnr = stli_getbrdnr();
- if (brdnr < 0) {
- printk(KERN_INFO "istallion: too many boards found, "
- "maximum supported %d\n", STL_MAXBRDS);
- mutex_unlock(&stli_brdslock);
- retval = -EIO;
- goto err_fr;
- }
- brdp->brdnr = (unsigned int)brdnr;
- stli_brds[brdp->brdnr] = brdp;
- mutex_unlock(&stli_brdslock);
- brdp->brdtype = BRD_ECPPCI;
-/*
- * We have all resources from the board, so lets setup the actual
- * board structure now.
- */
- brdp->iobase = pci_resource_start(pdev, 3);
- brdp->memaddr = pci_resource_start(pdev, 2);
- retval = stli_brdinit(brdp);
- if (retval)
- goto err_null;
-
- set_bit(BST_PROBED, &brdp->state);
- pci_set_drvdata(pdev, brdp);
-
- EBRDENABLE(brdp);
- brdp->enable = NULL;
- brdp->disable = NULL;
-
- for (i = 0; i < brdp->nrports; i++)
- tty_register_device(stli_serial, brdp->brdnr * STL_MAXPORTS + i,
- &pdev->dev);
-
- return 0;
-err_null:
- stli_brds[brdp->brdnr] = NULL;
-err_fr:
- kfree(brdp);
-err:
- return retval;
-}
-
-static void __devexit stli_pciremove(struct pci_dev *pdev)
-{
- struct stlibrd *brdp = pci_get_drvdata(pdev);
-
- stli_cleanup_ports(brdp);
-
- iounmap(brdp->membase);
- if (brdp->iosize > 0)
- release_region(brdp->iobase, brdp->iosize);
-
- stli_brds[brdp->brdnr] = NULL;
- kfree(brdp);
-}
-
-static struct pci_driver stli_pcidriver = {
- .name = "istallion",
- .id_table = istallion_pci_tbl,
- .probe = stli_pciprobe,
- .remove = __devexit_p(stli_pciremove)
-};
-/*****************************************************************************/
-
-/*
- * Allocate a new board structure. Fill out the basic info in it.
- */
-
-static struct stlibrd *stli_allocbrd(void)
-{
- struct stlibrd *brdp;
-
- brdp = kzalloc(sizeof(struct stlibrd), GFP_KERNEL);
- if (!brdp) {
- printk(KERN_ERR "istallion: failed to allocate memory "
- "(size=%Zd)\n", sizeof(struct stlibrd));
- return NULL;
- }
- brdp->magic = STLI_BOARDMAGIC;
- return brdp;
-}
-
-/*****************************************************************************/
-
-/*
- * Scan through all the boards in the configuration and see what we
- * can find.
- */
-
-static int __init stli_initbrds(void)
-{
- struct stlibrd *brdp, *nxtbrdp;
- struct stlconf conf;
- unsigned int i, j, found = 0;
- int retval;
-
- for (stli_nrbrds = 0; stli_nrbrds < ARRAY_SIZE(stli_brdsp);
- stli_nrbrds++) {
- memset(&conf, 0, sizeof(conf));
- if (stli_parsebrd(&conf, stli_brdsp[stli_nrbrds]) == 0)
- continue;
- if ((brdp = stli_allocbrd()) == NULL)
- continue;
- brdp->brdnr = stli_nrbrds;
- brdp->brdtype = conf.brdtype;
- brdp->iobase = conf.ioaddr1;
- brdp->memaddr = conf.memaddr;
- if (stli_brdinit(brdp) < 0) {
- kfree(brdp);
- continue;
- }
- stli_brds[brdp->brdnr] = brdp;
- found++;
-
- for (i = 0; i < brdp->nrports; i++)
- tty_register_device(stli_serial,
- brdp->brdnr * STL_MAXPORTS + i, NULL);
- }
-
- retval = stli_findeisabrds();
- if (retval > 0)
- found += retval;
-
-/*
- * All found boards are initialized. Now for a little optimization, if
- * no boards are sharing the "shared memory" regions then we can just
- * leave them all enabled. This is in fact the usual case.
- */
- stli_shared = 0;
- if (stli_nrbrds > 1) {
- for (i = 0; (i < stli_nrbrds); i++) {
- brdp = stli_brds[i];
- if (brdp == NULL)
- continue;
- for (j = i + 1; (j < stli_nrbrds); j++) {
- nxtbrdp = stli_brds[j];
- if (nxtbrdp == NULL)
- continue;
- if ((brdp->membase >= nxtbrdp->membase) &&
- (brdp->membase <= (nxtbrdp->membase +
- nxtbrdp->memsize - 1))) {
- stli_shared++;
- break;
- }
- }
- }
- }
-
- if (stli_shared == 0) {
- for (i = 0; (i < stli_nrbrds); i++) {
- brdp = stli_brds[i];
- if (brdp == NULL)
- continue;
- if (test_bit(BST_FOUND, &brdp->state)) {
- EBRDENABLE(brdp);
- brdp->enable = NULL;
- brdp->disable = NULL;
- }
- }
- }
-
- retval = pci_register_driver(&stli_pcidriver);
- if (retval && found == 0) {
- printk(KERN_ERR "Neither isa nor eisa cards found nor pci "
- "driver can be registered!\n");
- goto err;
- }
-
- return 0;
-err:
- return retval;
-}
-
-/*****************************************************************************/
-
-/*
- * Code to handle an "staliomem" read operation. This device is the
- * contents of the board shared memory. It is used for down loading
- * the slave image (and debugging :-)
- */
-
-static ssize_t stli_memread(struct file *fp, char __user *buf, size_t count, loff_t *offp)
-{
- unsigned long flags;
- void __iomem *memptr;
- struct stlibrd *brdp;
- unsigned int brdnr;
- int size, n;
- void *p;
- loff_t off = *offp;
-
- brdnr = iminor(fp->f_path.dentry->d_inode);
- if (brdnr >= stli_nrbrds)
- return -ENODEV;
- brdp = stli_brds[brdnr];
- if (brdp == NULL)
- return -ENODEV;
- if (brdp->state == 0)
- return -ENODEV;
- if (off >= brdp->memsize || off + count < off)
- return 0;
-
- size = min(count, (size_t)(brdp->memsize - off));
-
- /*
- * Copy the data a page at a time
- */
-
- p = (void *)__get_free_page(GFP_KERNEL);
- if(p == NULL)
- return -ENOMEM;
-
- while (size > 0) {
- spin_lock_irqsave(&brd_lock, flags);
- EBRDENABLE(brdp);
- memptr = EBRDGETMEMPTR(brdp, off);
- n = min(size, (int)(brdp->pagesize - (((unsigned long) off) % brdp->pagesize)));
- n = min(n, (int)PAGE_SIZE);
- memcpy_fromio(p, memptr, n);
- EBRDDISABLE(brdp);
- spin_unlock_irqrestore(&brd_lock, flags);
- if (copy_to_user(buf, p, n)) {
- count = -EFAULT;
- goto out;
- }
- off += n;
- buf += n;
- size -= n;
- }
-out:
- *offp = off;
- free_page((unsigned long)p);
- return count;
-}
-
-/*****************************************************************************/
-
-/*
- * Code to handle an "staliomem" write operation. This device is the
- * contents of the board shared memory. It is used for down loading
- * the slave image (and debugging :-)
- *
- * FIXME: copy under lock
- */
-
-static ssize_t stli_memwrite(struct file *fp, const char __user *buf, size_t count, loff_t *offp)
-{
- unsigned long flags;
- void __iomem *memptr;
- struct stlibrd *brdp;
- char __user *chbuf;
- unsigned int brdnr;
- int size, n;
- void *p;
- loff_t off = *offp;
-
- brdnr = iminor(fp->f_path.dentry->d_inode);
-
- if (brdnr >= stli_nrbrds)
- return -ENODEV;
- brdp = stli_brds[brdnr];
- if (brdp == NULL)
- return -ENODEV;
- if (brdp->state == 0)
- return -ENODEV;
- if (off >= brdp->memsize || off + count < off)
- return 0;
-
- chbuf = (char __user *) buf;
- size = min(count, (size_t)(brdp->memsize - off));
-
- /*
- * Copy the data a page at a time
- */
-
- p = (void *)__get_free_page(GFP_KERNEL);
- if(p == NULL)
- return -ENOMEM;
-
- while (size > 0) {
- n = min(size, (int)(brdp->pagesize - (((unsigned long) off) % brdp->pagesize)));
- n = min(n, (int)PAGE_SIZE);
- if (copy_from_user(p, chbuf, n)) {
- if (count == 0)
- count = -EFAULT;
- goto out;
- }
- spin_lock_irqsave(&brd_lock, flags);
- EBRDENABLE(brdp);
- memptr = EBRDGETMEMPTR(brdp, off);
- memcpy_toio(memptr, p, n);
- EBRDDISABLE(brdp);
- spin_unlock_irqrestore(&brd_lock, flags);
- off += n;
- chbuf += n;
- size -= n;
- }
-out:
- free_page((unsigned long) p);
- *offp = off;
- return count;
-}
-
-/*****************************************************************************/
-
-/*
- * Return the board stats structure to user app.
- */
-
-static int stli_getbrdstats(combrd_t __user *bp)
-{
- struct stlibrd *brdp;
- unsigned int i;
- combrd_t stli_brdstats;
-
- if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
- return -EFAULT;
- if (stli_brdstats.brd >= STL_MAXBRDS)
- return -ENODEV;
- brdp = stli_brds[stli_brdstats.brd];
- if (brdp == NULL)
- return -ENODEV;
-
- memset(&stli_brdstats, 0, sizeof(combrd_t));
-
- stli_brdstats.brd = brdp->brdnr;
- stli_brdstats.type = brdp->brdtype;
- stli_brdstats.hwid = 0;
- stli_brdstats.state = brdp->state;
- stli_brdstats.ioaddr = brdp->iobase;
- stli_brdstats.memaddr = brdp->memaddr;
- stli_brdstats.nrpanels = brdp->nrpanels;
- stli_brdstats.nrports = brdp->nrports;
- for (i = 0; (i < brdp->nrpanels); i++) {
- stli_brdstats.panels[i].panel = i;
- stli_brdstats.panels[i].hwid = brdp->panelids[i];
- stli_brdstats.panels[i].nrports = brdp->panels[i];
- }
-
- if (copy_to_user(bp, &stli_brdstats, sizeof(combrd_t)))
- return -EFAULT;
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Resolve the referenced port number into a port struct pointer.
- */
-
-static struct stliport *stli_getport(unsigned int brdnr, unsigned int panelnr,
- unsigned int portnr)
-{
- struct stlibrd *brdp;
- unsigned int i;
-
- if (brdnr >= STL_MAXBRDS)
- return NULL;
- brdp = stli_brds[brdnr];
- if (brdp == NULL)
- return NULL;
- for (i = 0; (i < panelnr); i++)
- portnr += brdp->panels[i];
- if (portnr >= brdp->nrports)
- return NULL;
- return brdp->ports[portnr];
-}
-
-/*****************************************************************************/
-
-/*
- * Return the port stats structure to user app. A NULL port struct
- * pointer passed in means that we need to find out from the app
- * what port to get stats for (used through board control device).
- */
-
-static int stli_portcmdstats(struct tty_struct *tty, struct stliport *portp)
-{
- unsigned long flags;
- struct stlibrd *brdp;
- int rc;
-
- memset(&stli_comstats, 0, sizeof(comstats_t));
-
- if (portp == NULL)
- return -ENODEV;
- brdp = stli_brds[portp->brdnr];
- if (brdp == NULL)
- return -ENODEV;
-
- mutex_lock(&portp->port.mutex);
- if (test_bit(BST_STARTED, &brdp->state)) {
- if ((rc = stli_cmdwait(brdp, portp, A_GETSTATS,
- &stli_cdkstats, sizeof(asystats_t), 1)) < 0) {
- mutex_unlock(&portp->port.mutex);
- return rc;
- }
- } else {
- memset(&stli_cdkstats, 0, sizeof(asystats_t));
- }
-
- stli_comstats.brd = portp->brdnr;
- stli_comstats.panel = portp->panelnr;
- stli_comstats.port = portp->portnr;
- stli_comstats.state = portp->state;
- stli_comstats.flags = portp->port.flags;
-
- spin_lock_irqsave(&brd_lock, flags);
- if (tty != NULL) {
- if (portp->port.tty == tty) {
- stli_comstats.ttystate = tty->flags;
- stli_comstats.rxbuffered = -1;
- if (tty->termios != NULL) {
- stli_comstats.cflags = tty->termios->c_cflag;
- stli_comstats.iflags = tty->termios->c_iflag;
- stli_comstats.oflags = tty->termios->c_oflag;
- stli_comstats.lflags = tty->termios->c_lflag;
- }
- }
- }
- spin_unlock_irqrestore(&brd_lock, flags);
-
- stli_comstats.txtotal = stli_cdkstats.txchars;
- stli_comstats.rxtotal = stli_cdkstats.rxchars + stli_cdkstats.ringover;
- stli_comstats.txbuffered = stli_cdkstats.txringq;
- stli_comstats.rxbuffered += stli_cdkstats.rxringq;
- stli_comstats.rxoverrun = stli_cdkstats.overruns;
- stli_comstats.rxparity = stli_cdkstats.parity;
- stli_comstats.rxframing = stli_cdkstats.framing;
- stli_comstats.rxlost = stli_cdkstats.ringover;
- stli_comstats.rxbreaks = stli_cdkstats.rxbreaks;
- stli_comstats.txbreaks = stli_cdkstats.txbreaks;
- stli_comstats.txxon = stli_cdkstats.txstart;
- stli_comstats.txxoff = stli_cdkstats.txstop;
- stli_comstats.rxxon = stli_cdkstats.rxstart;
- stli_comstats.rxxoff = stli_cdkstats.rxstop;
- stli_comstats.rxrtsoff = stli_cdkstats.rtscnt / 2;
- stli_comstats.rxrtson = stli_cdkstats.rtscnt - stli_comstats.rxrtsoff;
- stli_comstats.modem = stli_cdkstats.dcdcnt;
- stli_comstats.hwid = stli_cdkstats.hwid;
- stli_comstats.signals = stli_mktiocm(stli_cdkstats.signals);
- mutex_unlock(&portp->port.mutex);
-
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Return the port stats structure to user app. A NULL port struct
- * pointer passed in means that we need to find out from the app
- * what port to get stats for (used through board control device).
- */
-
-static int stli_getportstats(struct tty_struct *tty, struct stliport *portp,
- comstats_t __user *cp)
-{
- struct stlibrd *brdp;
- int rc;
-
- if (!portp) {
- if (copy_from_user(&stli_comstats, cp, sizeof(comstats_t)))
- return -EFAULT;
- portp = stli_getport(stli_comstats.brd, stli_comstats.panel,
- stli_comstats.port);
- if (!portp)
- return -ENODEV;
- }
-
- brdp = stli_brds[portp->brdnr];
- if (!brdp)
- return -ENODEV;
-
- if ((rc = stli_portcmdstats(tty, portp)) < 0)
- return rc;
-
- return copy_to_user(cp, &stli_comstats, sizeof(comstats_t)) ?
- -EFAULT : 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Clear the port stats structure. We also return it zeroed out...
- */
-
-static int stli_clrportstats(struct stliport *portp, comstats_t __user *cp)
-{
- struct stlibrd *brdp;
- int rc;
-
- if (!portp) {
- if (copy_from_user(&stli_comstats, cp, sizeof(comstats_t)))
- return -EFAULT;
- portp = stli_getport(stli_comstats.brd, stli_comstats.panel,
- stli_comstats.port);
- if (!portp)
- return -ENODEV;
- }
-
- brdp = stli_brds[portp->brdnr];
- if (!brdp)
- return -ENODEV;
-
- mutex_lock(&portp->port.mutex);
-
- if (test_bit(BST_STARTED, &brdp->state)) {
- if ((rc = stli_cmdwait(brdp, portp, A_CLEARSTATS, NULL, 0, 0)) < 0) {
- mutex_unlock(&portp->port.mutex);
- return rc;
- }
- }
-
- memset(&stli_comstats, 0, sizeof(comstats_t));
- stli_comstats.brd = portp->brdnr;
- stli_comstats.panel = portp->panelnr;
- stli_comstats.port = portp->portnr;
- mutex_unlock(&portp->port.mutex);
-
- if (copy_to_user(cp, &stli_comstats, sizeof(comstats_t)))
- return -EFAULT;
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Return the entire driver ports structure to a user app.
- */
-
-static int stli_getportstruct(struct stliport __user *arg)
-{
- struct stliport stli_dummyport;
- struct stliport *portp;
-
- if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
- return -EFAULT;
- portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
- stli_dummyport.portnr);
- if (!portp)
- return -ENODEV;
- if (copy_to_user(arg, portp, sizeof(struct stliport)))
- return -EFAULT;
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Return the entire driver board structure to a user app.
- */
-
-static int stli_getbrdstruct(struct stlibrd __user *arg)
-{
- struct stlibrd stli_dummybrd;
- struct stlibrd *brdp;
-
- if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
- return -EFAULT;
- if (stli_dummybrd.brdnr >= STL_MAXBRDS)
- return -ENODEV;
- brdp = stli_brds[stli_dummybrd.brdnr];
- if (!brdp)
- return -ENODEV;
- if (copy_to_user(arg, brdp, sizeof(struct stlibrd)))
- return -EFAULT;
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * The "staliomem" device is also required to do some special operations on
- * the board. We need to be able to send an interrupt to the board,
- * reset it, and start/stop it.
- */
-
-static long stli_memioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
- struct stlibrd *brdp;
- int brdnr, rc, done;
- void __user *argp = (void __user *)arg;
-
-/*
- * First up handle the board independent ioctls.
- */
- done = 0;
- rc = 0;
-
- switch (cmd) {
- case COM_GETPORTSTATS:
- rc = stli_getportstats(NULL, NULL, argp);
- done++;
- break;
- case COM_CLRPORTSTATS:
- rc = stli_clrportstats(NULL, argp);
- done++;
- break;
- case COM_GETBRDSTATS:
- rc = stli_getbrdstats(argp);
- done++;
- break;
- case COM_READPORT:
- rc = stli_getportstruct(argp);
- done++;
- break;
- case COM_READBOARD:
- rc = stli_getbrdstruct(argp);
- done++;
- break;
- }
- if (done)
- return rc;
-
-/*
- * Now handle the board specific ioctls. These all depend on the
- * minor number of the device they were called from.
- */
- brdnr = iminor(fp->f_dentry->d_inode);
- if (brdnr >= STL_MAXBRDS)
- return -ENODEV;
- brdp = stli_brds[brdnr];
- if (!brdp)
- return -ENODEV;
- if (brdp->state == 0)
- return -ENODEV;
-
- switch (cmd) {
- case STL_BINTR:
- EBRDINTR(brdp);
- break;
- case STL_BSTART:
- rc = stli_startbrd(brdp);
- break;
- case STL_BSTOP:
- clear_bit(BST_STARTED, &brdp->state);
- break;
- case STL_BRESET:
- clear_bit(BST_STARTED, &brdp->state);
- EBRDRESET(brdp);
- if (stli_shared == 0) {
- if (brdp->reenable != NULL)
- (* brdp->reenable)(brdp);
- }
- break;
- default:
- rc = -ENOIOCTLCMD;
- break;
- }
- return rc;
-}
-
-static const struct tty_operations stli_ops = {
- .open = stli_open,
- .close = stli_close,
- .write = stli_write,
- .put_char = stli_putchar,
- .flush_chars = stli_flushchars,
- .write_room = stli_writeroom,
- .chars_in_buffer = stli_charsinbuffer,
- .ioctl = stli_ioctl,
- .set_termios = stli_settermios,
- .throttle = stli_throttle,
- .unthrottle = stli_unthrottle,
- .stop = stli_stop,
- .start = stli_start,
- .hangup = stli_hangup,
- .flush_buffer = stli_flushbuffer,
- .break_ctl = stli_breakctl,
- .wait_until_sent = stli_waituntilsent,
- .send_xchar = stli_sendxchar,
- .tiocmget = stli_tiocmget,
- .tiocmset = stli_tiocmset,
- .proc_fops = &stli_proc_fops,
-};
-
-static const struct tty_port_operations stli_port_ops = {
- .carrier_raised = stli_carrier_raised,
- .dtr_rts = stli_dtr_rts,
- .activate = stli_activate,
- .shutdown = stli_shutdown,
-};
-
-/*****************************************************************************/
-/*
- * Loadable module initialization stuff.
- */
-
-static void istallion_cleanup_isa(void)
-{
- struct stlibrd *brdp;
- unsigned int j;
-
- for (j = 0; (j < stli_nrbrds); j++) {
- if ((brdp = stli_brds[j]) == NULL ||
- test_bit(BST_PROBED, &brdp->state))
- continue;
-
- stli_cleanup_ports(brdp);
-
- iounmap(brdp->membase);
- if (brdp->iosize > 0)
- release_region(brdp->iobase, brdp->iosize);
- kfree(brdp);
- stli_brds[j] = NULL;
- }
-}
-
-static int __init istallion_module_init(void)
-{
- unsigned int i;
- int retval;
-
- printk(KERN_INFO "%s: version %s\n", stli_drvtitle, stli_drvversion);
-
- spin_lock_init(&stli_lock);
- spin_lock_init(&brd_lock);
-
- stli_txcookbuf = kmalloc(STLI_TXBUFSIZE, GFP_KERNEL);
- if (!stli_txcookbuf) {
- printk(KERN_ERR "istallion: failed to allocate memory "
- "(size=%d)\n", STLI_TXBUFSIZE);
- retval = -ENOMEM;
- goto err;
- }
-
- stli_serial = alloc_tty_driver(STL_MAXBRDS * STL_MAXPORTS);
- if (!stli_serial) {
- retval = -ENOMEM;
- goto err_free;
- }
-
- stli_serial->owner = THIS_MODULE;
- stli_serial->driver_name = stli_drvname;
- stli_serial->name = stli_serialname;
- stli_serial->major = STL_SERIALMAJOR;
- stli_serial->minor_start = 0;
- stli_serial->type = TTY_DRIVER_TYPE_SERIAL;
- stli_serial->subtype = SERIAL_TYPE_NORMAL;
- stli_serial->init_termios = stli_deftermios;
- stli_serial->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
- tty_set_operations(stli_serial, &stli_ops);
-
- retval = tty_register_driver(stli_serial);
- if (retval) {
- printk(KERN_ERR "istallion: failed to register serial driver\n");
- goto err_ttyput;
- }
-
- retval = stli_initbrds();
- if (retval)
- goto err_ttyunr;
-
-/*
- * Set up a character driver for the shared memory region. We need this
- * to down load the slave code image. Also it is a useful debugging tool.
- */
- retval = register_chrdev(STL_SIOMEMMAJOR, "staliomem", &stli_fsiomem);
- if (retval) {
- printk(KERN_ERR "istallion: failed to register serial memory "
- "device\n");
- goto err_deinit;
- }
-
- istallion_class = class_create(THIS_MODULE, "staliomem");
- for (i = 0; i < 4; i++)
- device_create(istallion_class, NULL, MKDEV(STL_SIOMEMMAJOR, i),
- NULL, "staliomem%d", i);
-
- return 0;
-err_deinit:
- pci_unregister_driver(&stli_pcidriver);
- istallion_cleanup_isa();
-err_ttyunr:
- tty_unregister_driver(stli_serial);
-err_ttyput:
- put_tty_driver(stli_serial);
-err_free:
- kfree(stli_txcookbuf);
-err:
- return retval;
-}
-
-/*****************************************************************************/
-
-static void __exit istallion_module_exit(void)
-{
- unsigned int j;
-
- printk(KERN_INFO "Unloading %s: version %s\n", stli_drvtitle,
- stli_drvversion);
-
- if (stli_timeron) {
- stli_timeron = 0;
- del_timer_sync(&stli_timerlist);
- }
-
- unregister_chrdev(STL_SIOMEMMAJOR, "staliomem");
-
- for (j = 0; j < 4; j++)
- device_destroy(istallion_class, MKDEV(STL_SIOMEMMAJOR, j));
- class_destroy(istallion_class);
-
- pci_unregister_driver(&stli_pcidriver);
- istallion_cleanup_isa();
-
- tty_unregister_driver(stli_serial);
- put_tty_driver(stli_serial);
-
- kfree(stli_txcookbuf);
-}
-
-module_init(istallion_module_init);
-module_exit(istallion_module_exit);
diff --git a/drivers/staging/tty/riscom8.c b/drivers/staging/tty/riscom8.c
deleted file mode 100644
index 602643a40b4..00000000000
--- a/drivers/staging/tty/riscom8.c
+++ /dev/null
@@ -1,1560 +0,0 @@
-/*
- * linux/drivers/char/riscom.c -- RISCom/8 multiport serial driver.
- *
- * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com)
- *
- * This code is loosely based on the Linux serial driver, written by
- * Linus Torvalds, Theodore T'so and others. The RISCom/8 card
- * programming info was obtained from various drivers for other OSes
- * (FreeBSD, ISC, etc), but no source code from those drivers were
- * directly included in this driver.
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Revision 1.1
- *
- * ChangeLog:
- * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 27-Jun-2001
- * - get rid of check_region and several cleanups
- */
-
-#include <linux/module.h>
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/mm.h>
-#include <linux/serial.h>
-#include <linux/fcntl.h>
-#include <linux/major.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/tty_flip.h>
-#include <linux/spinlock.h>
-#include <linux/device.h>
-
-#include <linux/uaccess.h>
-
-#include "riscom8.h"
-#include "riscom8_reg.h"
-
-/* Am I paranoid or not ? ;-) */
-#define RISCOM_PARANOIA_CHECK
-
-/*
- * Crazy InteliCom/8 boards sometimes have swapped CTS & DSR signals.
- * You can slightly speed up things by #undefing the following option,
- * if you are REALLY sure that your board is correct one.
- */
-
-#define RISCOM_BRAIN_DAMAGED_CTS
-
-/*
- * The following defines are mostly for testing purposes. But if you need
- * some nice reporting in your syslog, you can define them also.
- */
-#undef RC_REPORT_FIFO
-#undef RC_REPORT_OVERRUN
-
-
-#define RISCOM_LEGAL_FLAGS \
- (ASYNC_HUP_NOTIFY | ASYNC_SAK | ASYNC_SPLIT_TERMIOS | \
- ASYNC_SPD_HI | ASYNC_SPEED_VHI | ASYNC_SESSION_LOCKOUT | \
- ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP)
-
-static struct tty_driver *riscom_driver;
-
-static DEFINE_SPINLOCK(riscom_lock);
-
-static struct riscom_board rc_board[RC_NBOARD] = {
- {
- .base = RC_IOBASE1,
- },
- {
- .base = RC_IOBASE2,
- },
- {
- .base = RC_IOBASE3,
- },
- {
- .base = RC_IOBASE4,
- },
-};
-
-static struct riscom_port rc_port[RC_NBOARD * RC_NPORT];
-
-/* RISCom/8 I/O ports addresses (without address translation) */
-static unsigned short rc_ioport[] = {
-#if 1
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x09, 0x0a, 0x0b, 0x0c,
-#else
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x09, 0x0a, 0x0b, 0x0c, 0x10,
- 0x11, 0x12, 0x18, 0x28, 0x31, 0x32, 0x39, 0x3a, 0x40, 0x41, 0x61, 0x62,
- 0x63, 0x64, 0x6b, 0x70, 0x71, 0x78, 0x7a, 0x7b, 0x7f, 0x100, 0x101
-#endif
-};
-#define RC_NIOPORT ARRAY_SIZE(rc_ioport)
-
-
-static int rc_paranoia_check(struct riscom_port const *port,
- char *name, const char *routine)
-{
-#ifdef RISCOM_PARANOIA_CHECK
- static const char badmagic[] = KERN_INFO
- "rc: Warning: bad riscom port magic number for device %s in %s\n";
- static const char badinfo[] = KERN_INFO
- "rc: Warning: null riscom port for device %s in %s\n";
-
- if (!port) {
- printk(badinfo, name, routine);
- return 1;
- }
- if (port->magic != RISCOM8_MAGIC) {
- printk(badmagic, name, routine);
- return 1;
- }
-#endif
- return 0;
-}
-
-/*
- *
- * Service functions for RISCom/8 driver.
- *
- */
-
-/* Get board number from pointer */
-static inline int board_No(struct riscom_board const *bp)
-{
- return bp - rc_board;
-}
-
-/* Get port number from pointer */
-static inline int port_No(struct riscom_port const *port)
-{
- return RC_PORT(port - rc_port);
-}
-
-/* Get pointer to board from pointer to port */
-static inline struct riscom_board *port_Board(struct riscom_port const *port)
-{
- return &rc_board[RC_BOARD(port - rc_port)];
-}
-
-/* Input Byte from CL CD180 register */
-static inline unsigned char rc_in(struct riscom_board const *bp,
- unsigned short reg)
-{
- return inb(bp->base + RC_TO_ISA(reg));
-}
-
-/* Output Byte to CL CD180 register */
-static inline void rc_out(struct riscom_board const *bp, unsigned short reg,
- unsigned char val)
-{
- outb(val, bp->base + RC_TO_ISA(reg));
-}
-
-/* Wait for Channel Command Register ready */
-static void rc_wait_CCR(struct riscom_board const *bp)
-{
- unsigned long delay;
-
- /* FIXME: need something more descriptive then 100000 :) */
- for (delay = 100000; delay; delay--)
- if (!rc_in(bp, CD180_CCR))
- return;
-
- printk(KERN_INFO "rc%d: Timeout waiting for CCR.\n", board_No(bp));
-}
-
-/*
- * RISCom/8 probe functions.
- */
-
-static int rc_request_io_range(struct riscom_board * const bp)
-{
- int i;
-
- for (i = 0; i < RC_NIOPORT; i++)
- if (!request_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1,
- "RISCom/8")) {
- goto out_release;
- }
- return 0;
-out_release:
- printk(KERN_INFO "rc%d: Skipping probe at 0x%03x. IO address in use.\n",
- board_No(bp), bp->base);
- while (--i >= 0)
- release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1);
- return 1;
-}
-
-static void rc_release_io_range(struct riscom_board * const bp)
-{
- int i;
-
- for (i = 0; i < RC_NIOPORT; i++)
- release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1);
-}
-
-/* Reset and setup CD180 chip */
-static void __init rc_init_CD180(struct riscom_board const *bp)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&riscom_lock, flags);
-
- rc_out(bp, RC_CTOUT, 0); /* Clear timeout */
- rc_wait_CCR(bp); /* Wait for CCR ready */
- rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */
- spin_unlock_irqrestore(&riscom_lock, flags);
- msleep(50); /* Delay 0.05 sec */
- spin_lock_irqsave(&riscom_lock, flags);
- rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */
- rc_out(bp, CD180_GICR, 0); /* Clear all bits */
- rc_out(bp, CD180_PILR1, RC_ACK_MINT); /* Prio for modem intr */
- rc_out(bp, CD180_PILR2, RC_ACK_TINT); /* Prio for tx intr */
- rc_out(bp, CD180_PILR3, RC_ACK_RINT); /* Prio for rx intr */
-
- /* Setting up prescaler. We need 4 ticks per 1 ms */
- rc_out(bp, CD180_PPRH, (RC_OSCFREQ/(1000000/RISCOM_TPS)) >> 8);
- rc_out(bp, CD180_PPRL, (RC_OSCFREQ/(1000000/RISCOM_TPS)) & 0xff);
-
- spin_unlock_irqrestore(&riscom_lock, flags);
-}
-
-/* Main probing routine, also sets irq. */
-static int __init rc_probe(struct riscom_board *bp)
-{
- unsigned char val1, val2;
- int irqs = 0;
- int retries;
-
- bp->irq = 0;
-
- if (rc_request_io_range(bp))
- return 1;
-
- /* Are the I/O ports here ? */
- rc_out(bp, CD180_PPRL, 0x5a);
- outb(0xff, 0x80);
- val1 = rc_in(bp, CD180_PPRL);
- rc_out(bp, CD180_PPRL, 0xa5);
- outb(0x00, 0x80);
- val2 = rc_in(bp, CD180_PPRL);
-
- if ((val1 != 0x5a) || (val2 != 0xa5)) {
- printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not found.\n",
- board_No(bp), bp->base);
- goto out_release;
- }
-
- /* It's time to find IRQ for this board */
- for (retries = 0; retries < 5 && irqs <= 0; retries++) {
- irqs = probe_irq_on();
- rc_init_CD180(bp); /* Reset CD180 chip */
- rc_out(bp, CD180_CAR, 2); /* Select port 2 */
- rc_wait_CCR(bp);
- rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter */
- rc_out(bp, CD180_IER, IER_TXRDY);/* Enable tx empty intr */
- msleep(50);
- irqs = probe_irq_off(irqs);
- val1 = rc_in(bp, RC_BSR); /* Get Board Status reg */
- val2 = rc_in(bp, RC_ACK_TINT); /* ACK interrupt */
- rc_init_CD180(bp); /* Reset CD180 again */
-
- if ((val1 & RC_BSR_TINT) || (val2 != (RC_ID | GIVR_IT_TX))) {
- printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not "
- "found.\n", board_No(bp), bp->base);
- goto out_release;
- }
- }
-
- if (irqs <= 0) {
- printk(KERN_ERR "rc%d: Can't find IRQ for RISCom/8 board "
- "at 0x%03x.\n", board_No(bp), bp->base);
- goto out_release;
- }
- bp->irq = irqs;
- bp->flags |= RC_BOARD_PRESENT;
-
- printk(KERN_INFO "rc%d: RISCom/8 Rev. %c board detected at "
- "0x%03x, IRQ %d.\n",
- board_No(bp),
- (rc_in(bp, CD180_GFRCR) & 0x0f) + 'A', /* Board revision */
- bp->base, bp->irq);
-
- return 0;
-out_release:
- rc_release_io_range(bp);
- return 1;
-}
-
-/*
- *
- * Interrupt processing routines.
- *
- */
-
-static struct riscom_port *rc_get_port(struct riscom_board const *bp,
- unsigned char const *what)
-{
- unsigned char channel;
- struct riscom_port *port;
-
- channel = rc_in(bp, CD180_GICR) >> GICR_CHAN_OFF;
- if (channel < CD180_NCH) {
- port = &rc_port[board_No(bp) * RC_NPORT + channel];
- if (port->port.flags & ASYNC_INITIALIZED)
- return port;
- }
- printk(KERN_ERR "rc%d: %s interrupt from invalid port %d\n",
- board_No(bp), what, channel);
- return NULL;
-}
-
-static void rc_receive_exc(struct riscom_board const *bp)
-{
- struct riscom_port *port;
- struct tty_struct *tty;
- unsigned char status;
- unsigned char ch, flag;
-
- port = rc_get_port(bp, "Receive");
- if (port == NULL)
- return;
-
- tty = tty_port_tty_get(&port->port);
-
-#ifdef RC_REPORT_OVERRUN
- status = rc_in(bp, CD180_RCSR);
- if (status & RCSR_OE)
- port->overrun++;
- status &= port->mark_mask;
-#else
- status = rc_in(bp, CD180_RCSR) & port->mark_mask;
-#endif
- ch = rc_in(bp, CD180_RDR);
- if (!status)
- goto out;
- if (status & RCSR_TOUT) {
- printk(KERN_WARNING "rc%d: port %d: Receiver timeout. "
- "Hardware problems ?\n",
- board_No(bp), port_No(port));
- goto out;
-
- } else if (status & RCSR_BREAK) {
- printk(KERN_INFO "rc%d: port %d: Handling break...\n",
- board_No(bp), port_No(port));
- flag = TTY_BREAK;
- if (tty && (port->port.flags & ASYNC_SAK))
- do_SAK(tty);
-
- } else if (status & RCSR_PE)
- flag = TTY_PARITY;
-
- else if (status & RCSR_FE)
- flag = TTY_FRAME;
-
- else if (status & RCSR_OE)
- flag = TTY_OVERRUN;
- else
- flag = TTY_NORMAL;
-
- if (tty) {
- tty_insert_flip_char(tty, ch, flag);
- tty_flip_buffer_push(tty);
- }
-out:
- tty_kref_put(tty);
-}
-
-static void rc_receive(struct riscom_board const *bp)
-{
- struct riscom_port *port;
- struct tty_struct *tty;
- unsigned char count;
-
- port = rc_get_port(bp, "Receive");
- if (port == NULL)
- return;
-
- tty = tty_port_tty_get(&port->port);
-
- count = rc_in(bp, CD180_RDCR);
-
-#ifdef RC_REPORT_FIFO
- port->hits[count > 8 ? 9 : count]++;
-#endif
-
- while (count--) {
- u8 ch = rc_in(bp, CD180_RDR);
- if (tty)
- tty_insert_flip_char(tty, ch, TTY_NORMAL);
- }
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
-}
-
-static void rc_transmit(struct riscom_board const *bp)
-{
- struct riscom_port *port;
- struct tty_struct *tty;
- unsigned char count;
-
- port = rc_get_port(bp, "Transmit");
- if (port == NULL)
- return;
-
- tty = tty_port_tty_get(&port->port);
-
- if (port->IER & IER_TXEMPTY) {
- /* FIFO drained */
- rc_out(bp, CD180_CAR, port_No(port));
- port->IER &= ~IER_TXEMPTY;
- rc_out(bp, CD180_IER, port->IER);
- goto out;
- }
-
- if ((port->xmit_cnt <= 0 && !port->break_length)
- || (tty && (tty->stopped || tty->hw_stopped))) {
- rc_out(bp, CD180_CAR, port_No(port));
- port->IER &= ~IER_TXRDY;
- rc_out(bp, CD180_IER, port->IER);
- goto out;
- }
-
- if (port->break_length) {
- if (port->break_length > 0) {
- if (port->COR2 & COR2_ETC) {
- rc_out(bp, CD180_TDR, CD180_C_ESC);
- rc_out(bp, CD180_TDR, CD180_C_SBRK);
- port->COR2 &= ~COR2_ETC;
- }
- count = min_t(int, port->break_length, 0xff);
- rc_out(bp, CD180_TDR, CD180_C_ESC);
- rc_out(bp, CD180_TDR, CD180_C_DELAY);
- rc_out(bp, CD180_TDR, count);
- port->break_length -= count;
- if (port->break_length == 0)
- port->break_length--;
- } else {
- rc_out(bp, CD180_TDR, CD180_C_ESC);
- rc_out(bp, CD180_TDR, CD180_C_EBRK);
- rc_out(bp, CD180_COR2, port->COR2);
- rc_wait_CCR(bp);
- rc_out(bp, CD180_CCR, CCR_CORCHG2);
- port->break_length = 0;
- }
- goto out;
- }
-
- count = CD180_NFIFO;
- do {
- rc_out(bp, CD180_TDR, port->port.xmit_buf[port->xmit_tail++]);
- port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE-1);
- if (--port->xmit_cnt <= 0)
- break;
- } while (--count > 0);
-
- if (port->xmit_cnt <= 0) {
- rc_out(bp, CD180_CAR, port_No(port));
- port->IER &= ~IER_TXRDY;
- rc_out(bp, CD180_IER, port->IER);
- }
- if (tty && port->xmit_cnt <= port->wakeup_chars)
- tty_wakeup(tty);
-out:
- tty_kref_put(tty);
-}
-
-static void rc_check_modem(struct riscom_board const *bp)
-{
- struct riscom_port *port;
- struct tty_struct *tty;
- unsigned char mcr;
-
- port = rc_get_port(bp, "Modem");
- if (port == NULL)
- return;
-
- tty = tty_port_tty_get(&port->port);
-
- mcr = rc_in(bp, CD180_MCR);
- if (mcr & MCR_CDCHG) {
- if (rc_in(bp, CD180_MSVR) & MSVR_CD)
- wake_up_interruptible(&port->port.open_wait);
- else if (tty)
- tty_hangup(tty);
- }
-
-#ifdef RISCOM_BRAIN_DAMAGED_CTS
- if (mcr & MCR_CTSCHG) {
- if (rc_in(bp, CD180_MSVR) & MSVR_CTS) {
- port->IER |= IER_TXRDY;
- if (tty) {
- tty->hw_stopped = 0;
- if (port->xmit_cnt <= port->wakeup_chars)
- tty_wakeup(tty);
- }
- } else {
- if (tty)
- tty->hw_stopped = 1;
- port->IER &= ~IER_TXRDY;
- }
- rc_out(bp, CD180_IER, port->IER);
- }
- if (mcr & MCR_DSRCHG) {
- if (rc_in(bp, CD180_MSVR) & MSVR_DSR) {
- port->IER |= IER_TXRDY;
- if (tty) {
- tty->hw_stopped = 0;
- if (port->xmit_cnt <= port->wakeup_chars)
- tty_wakeup(tty);
- }
- } else {
- if (tty)
- tty->hw_stopped = 1;
- port->IER &= ~IER_TXRDY;
- }
- rc_out(bp, CD180_IER, port->IER);
- }
-#endif /* RISCOM_BRAIN_DAMAGED_CTS */
-
- /* Clear change bits */
- rc_out(bp, CD180_MCR, 0);
- tty_kref_put(tty);
-}
-
-/* The main interrupt processing routine */
-static irqreturn_t rc_interrupt(int dummy, void *dev_id)
-{
- unsigned char status;
- unsigned char ack;
- struct riscom_board *bp = dev_id;
- unsigned long loop = 0;
- int handled = 0;
-
- if (!(bp->flags & RC_BOARD_ACTIVE))
- return IRQ_NONE;
-
- while ((++loop < 16) && ((status = ~(rc_in(bp, RC_BSR))) &
- (RC_BSR_TOUT | RC_BSR_TINT |
- RC_BSR_MINT | RC_BSR_RINT))) {
- handled = 1;
- if (status & RC_BSR_TOUT)
- printk(KERN_WARNING "rc%d: Got timeout. Hardware "
- "error?\n", board_No(bp));
- else if (status & RC_BSR_RINT) {
- ack = rc_in(bp, RC_ACK_RINT);
- if (ack == (RC_ID | GIVR_IT_RCV))
- rc_receive(bp);
- else if (ack == (RC_ID | GIVR_IT_REXC))
- rc_receive_exc(bp);
- else
- printk(KERN_WARNING "rc%d: Bad receive ack "
- "0x%02x.\n",
- board_No(bp), ack);
- } else if (status & RC_BSR_TINT) {
- ack = rc_in(bp, RC_ACK_TINT);
- if (ack == (RC_ID | GIVR_IT_TX))
- rc_transmit(bp);
- else
- printk(KERN_WARNING "rc%d: Bad transmit ack "
- "0x%02x.\n",
- board_No(bp), ack);
- } else /* if (status & RC_BSR_MINT) */ {
- ack = rc_in(bp, RC_ACK_MINT);
- if (ack == (RC_ID | GIVR_IT_MODEM))
- rc_check_modem(bp);
- else
- printk(KERN_WARNING "rc%d: Bad modem ack "
- "0x%02x.\n",
- board_No(bp), ack);
- }
- rc_out(bp, CD180_EOIR, 0); /* Mark end of interrupt */
- rc_out(bp, RC_CTOUT, 0); /* Clear timeout flag */
- }
- return IRQ_RETVAL(handled);
-}
-
-/*
- * Routines for open & close processing.
- */
-
-/* Called with disabled interrupts */
-static int rc_setup_board(struct riscom_board *bp)
-{
- int error;
-
- if (bp->flags & RC_BOARD_ACTIVE)
- return 0;
-
- error = request_irq(bp->irq, rc_interrupt, IRQF_DISABLED,
- "RISCom/8", bp);
- if (error)
- return error;
-
- rc_out(bp, RC_CTOUT, 0); /* Just in case */
- bp->DTR = ~0;
- rc_out(bp, RC_DTR, bp->DTR); /* Drop DTR on all ports */
-
- bp->flags |= RC_BOARD_ACTIVE;
-
- return 0;
-}
-
-/* Called with disabled interrupts */
-static void rc_shutdown_board(struct riscom_board *bp)
-{
- if (!(bp->flags & RC_BOARD_ACTIVE))
- return;
-
- bp->flags &= ~RC_BOARD_ACTIVE;
-
- free_irq(bp->irq, NULL);
-
- bp->DTR = ~0;
- rc_out(bp, RC_DTR, bp->DTR); /* Drop DTR on all ports */
-
-}
-
-/*
- * Setting up port characteristics.
- * Must be called with disabled interrupts
- */
-static void rc_change_speed(struct tty_struct *tty, struct riscom_board *bp,
- struct riscom_port *port)
-{
- unsigned long baud;
- long tmp;
- unsigned char cor1 = 0, cor3 = 0;
- unsigned char mcor1 = 0, mcor2 = 0;
-
- port->IER = 0;
- port->COR2 = 0;
- port->MSVR = MSVR_RTS;
-
- baud = tty_get_baud_rate(tty);
-
- /* Select port on the board */
- rc_out(bp, CD180_CAR, port_No(port));
-
- if (!baud) {
- /* Drop DTR & exit */
- bp->DTR |= (1u << port_No(port));
- rc_out(bp, RC_DTR, bp->DTR);
- return;
- } else {
- /* Set DTR on */
- bp->DTR &= ~(1u << port_No(port));
- rc_out(bp, RC_DTR, bp->DTR);
- }
-
- /*
- * Now we must calculate some speed depended things
- */
-
- /* Set baud rate for port */
- tmp = (((RC_OSCFREQ + baud/2) / baud +
- CD180_TPC/2) / CD180_TPC);
-
- rc_out(bp, CD180_RBPRH, (tmp >> 8) & 0xff);
- rc_out(bp, CD180_TBPRH, (tmp >> 8) & 0xff);
- rc_out(bp, CD180_RBPRL, tmp & 0xff);
- rc_out(bp, CD180_TBPRL, tmp & 0xff);
-
- baud = (baud + 5) / 10; /* Estimated CPS */
-
- /* Two timer ticks seems enough to wakeup something like SLIP driver */
- tmp = ((baud + HZ/2) / HZ) * 2 - CD180_NFIFO;
- port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ?
- SERIAL_XMIT_SIZE - 1 : tmp);
-
- /* Receiver timeout will be transmission time for 1.5 chars */
- tmp = (RISCOM_TPS + RISCOM_TPS/2 + baud/2) / baud;
- tmp = (tmp > 0xff) ? 0xff : tmp;
- rc_out(bp, CD180_RTPR, tmp);
-
- switch (C_CSIZE(tty)) {
- case CS5:
- cor1 |= COR1_5BITS;
- break;
- case CS6:
- cor1 |= COR1_6BITS;
- break;
- case CS7:
- cor1 |= COR1_7BITS;
- break;
- case CS8:
- cor1 |= COR1_8BITS;
- break;
- }
- if (C_CSTOPB(tty))
- cor1 |= COR1_2SB;
-
- cor1 |= COR1_IGNORE;
- if (C_PARENB(tty)) {
- cor1 |= COR1_NORMPAR;
- if (C_PARODD(tty))
- cor1 |= COR1_ODDP;
- if (I_INPCK(tty))
- cor1 &= ~COR1_IGNORE;
- }
- /* Set marking of some errors */
- port->mark_mask = RCSR_OE | RCSR_TOUT;
- if (I_INPCK(tty))
- port->mark_mask |= RCSR_FE | RCSR_PE;
- if (I_BRKINT(tty) || I_PARMRK(tty))
- port->mark_mask |= RCSR_BREAK;
- if (I_IGNPAR(tty))
- port->mark_mask &= ~(RCSR_FE | RCSR_PE);
- if (I_IGNBRK(tty)) {
- port->mark_mask &= ~RCSR_BREAK;
- if (I_IGNPAR(tty))
- /* Real raw mode. Ignore all */
- port->mark_mask &= ~RCSR_OE;
- }
- /* Enable Hardware Flow Control */
- if (C_CRTSCTS(tty)) {
-#ifdef RISCOM_BRAIN_DAMAGED_CTS
- port->IER |= IER_DSR | IER_CTS;
- mcor1 |= MCOR1_DSRZD | MCOR1_CTSZD;
- mcor2 |= MCOR2_DSROD | MCOR2_CTSOD;
- tty->hw_stopped = !(rc_in(bp, CD180_MSVR) &
- (MSVR_CTS|MSVR_DSR));
-#else
- port->COR2 |= COR2_CTSAE;
-#endif
- }
- /* Enable Software Flow Control. FIXME: I'm not sure about this */
- /* Some people reported that it works, but I still doubt */
- if (I_IXON(tty)) {
- port->COR2 |= COR2_TXIBE;
- cor3 |= (COR3_FCT | COR3_SCDE);
- if (I_IXANY(tty))
- port->COR2 |= COR2_IXM;
- rc_out(bp, CD180_SCHR1, START_CHAR(tty));
- rc_out(bp, CD180_SCHR2, STOP_CHAR(tty));
- rc_out(bp, CD180_SCHR3, START_CHAR(tty));
- rc_out(bp, CD180_SCHR4, STOP_CHAR(tty));
- }
- if (!C_CLOCAL(tty)) {
- /* Enable CD check */
- port->IER |= IER_CD;
- mcor1 |= MCOR1_CDZD;
- mcor2 |= MCOR2_CDOD;
- }
-
- if (C_CREAD(tty))
- /* Enable receiver */
- port->IER |= IER_RXD;
-
- /* Set input FIFO size (1-8 bytes) */
- cor3 |= RISCOM_RXFIFO;
- /* Setting up CD180 channel registers */
- rc_out(bp, CD180_COR1, cor1);
- rc_out(bp, CD180_COR2, port->COR2);
- rc_out(bp, CD180_COR3, cor3);
- /* Make CD180 know about registers change */
- rc_wait_CCR(bp);
- rc_out(bp, CD180_CCR, CCR_CORCHG1 | CCR_CORCHG2 | CCR_CORCHG3);
- /* Setting up modem option registers */
- rc_out(bp, CD180_MCOR1, mcor1);
- rc_out(bp, CD180_MCOR2, mcor2);
- /* Enable CD180 transmitter & receiver */
- rc_wait_CCR(bp);
- rc_out(bp, CD180_CCR, CCR_TXEN | CCR_RXEN);
- /* Enable interrupts */
- rc_out(bp, CD180_IER, port->IER);
- /* And finally set RTS on */
- rc_out(bp, CD180_MSVR, port->MSVR);
-}
-
-/* Must be called with interrupts enabled */
-static int rc_activate_port(struct tty_port *port, struct tty_struct *tty)
-{
- struct riscom_port *rp = container_of(port, struct riscom_port, port);
- struct riscom_board *bp = port_Board(rp);
- unsigned long flags;
-
- if (tty_port_alloc_xmit_buf(port) < 0)
- return -ENOMEM;
-
- spin_lock_irqsave(&riscom_lock, flags);
-
- clear_bit(TTY_IO_ERROR, &tty->flags);
- bp->count++;
- rp->xmit_cnt = rp->xmit_head = rp->xmit_tail = 0;
- rc_change_speed(tty, bp, rp);
- spin_unlock_irqrestore(&riscom_lock, flags);
- return 0;
-}
-
-/* Must be called with interrupts disabled */
-static void rc_shutdown_port(struct tty_struct *tty,
- struct riscom_board *bp, struct riscom_port *port)
-{
-#ifdef RC_REPORT_OVERRUN
- printk(KERN_INFO "rc%d: port %d: Total %ld overruns were detected.\n",
- board_No(bp), port_No(port), port->overrun);
-#endif
-#ifdef RC_REPORT_FIFO
- {
- int i;
-
- printk(KERN_INFO "rc%d: port %d: FIFO hits [ ",
- board_No(bp), port_No(port));
- for (i = 0; i < 10; i++)
- printk("%ld ", port->hits[i]);
- printk("].\n");
- }
-#endif
- tty_port_free_xmit_buf(&port->port);
-
- /* Select port */
- rc_out(bp, CD180_CAR, port_No(port));
- /* Reset port */
- rc_wait_CCR(bp);
- rc_out(bp, CD180_CCR, CCR_SOFTRESET);
- /* Disable all interrupts from this port */
- port->IER = 0;
- rc_out(bp, CD180_IER, port->IER);
-
- set_bit(TTY_IO_ERROR, &tty->flags);
-
- if (--bp->count < 0) {
- printk(KERN_INFO "rc%d: rc_shutdown_port: "
- "bad board count: %d\n",
- board_No(bp), bp->count);
- bp->count = 0;
- }
- /*
- * If this is the last opened port on the board
- * shutdown whole board
- */
- if (!bp->count)
- rc_shutdown_board(bp);
-}
-
-static int carrier_raised(struct tty_port *port)
-{
- struct riscom_port *p = container_of(port, struct riscom_port, port);
- struct riscom_board *bp = port_Board(p);
- unsigned long flags;
- int CD;
-
- spin_lock_irqsave(&riscom_lock, flags);
- rc_out(bp, CD180_CAR, port_No(p));
- CD = rc_in(bp, CD180_MSVR) & MSVR_CD;
- rc_out(bp, CD180_MSVR, MSVR_RTS);
- bp->DTR &= ~(1u << port_No(p));
- rc_out(bp, RC_DTR, bp->DTR);
- spin_unlock_irqrestore(&riscom_lock, flags);
- return CD;
-}
-
-static void dtr_rts(struct tty_port *port, int onoff)
-{
- struct riscom_port *p = container_of(port, struct riscom_port, port);
- struct riscom_board *bp = port_Board(p);
- unsigned long flags;
-
- spin_lock_irqsave(&riscom_lock, flags);
- bp->DTR &= ~(1u << port_No(p));
- if (onoff == 0)
- bp->DTR |= (1u << port_No(p));
- rc_out(bp, RC_DTR, bp->DTR);
- spin_unlock_irqrestore(&riscom_lock, flags);
-}
-
-static int rc_open(struct tty_struct *tty, struct file *filp)
-{
- int board;
- int error;
- struct riscom_port *port;
- struct riscom_board *bp;
-
- board = RC_BOARD(tty->index);
- if (board >= RC_NBOARD || !(rc_board[board].flags & RC_BOARD_PRESENT))
- return -ENODEV;
-
- bp = &rc_board[board];
- port = rc_port + board * RC_NPORT + RC_PORT(tty->index);
- if (rc_paranoia_check(port, tty->name, "rc_open"))
- return -ENODEV;
-
- error = rc_setup_board(bp);
- if (error)
- return error;
-
- tty->driver_data = port;
- return tty_port_open(&port->port, tty, filp);
-}
-
-static void rc_flush_buffer(struct tty_struct *tty)
-{
- struct riscom_port *port = tty->driver_data;
- unsigned long flags;
-
- if (rc_paranoia_check(port, tty->name, "rc_flush_buffer"))
- return;
-
- spin_lock_irqsave(&riscom_lock, flags);
- port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
- spin_unlock_irqrestore(&riscom_lock, flags);
-
- tty_wakeup(tty);
-}
-
-static void rc_close_port(struct tty_port *port)
-{
- unsigned long flags;
- struct riscom_port *rp = container_of(port, struct riscom_port, port);
- struct riscom_board *bp = port_Board(rp);
- unsigned long timeout;
-
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receive line status interrupts, and tell the
- * interrupt driver to stop checking the data ready bit in the
- * line status register.
- */
-
- spin_lock_irqsave(&riscom_lock, flags);
- rp->IER &= ~IER_RXD;
-
- rp->IER &= ~IER_TXRDY;
- rp->IER |= IER_TXEMPTY;
- rc_out(bp, CD180_CAR, port_No(rp));
- rc_out(bp, CD180_IER, rp->IER);
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- timeout = jiffies + HZ;
- while (rp->IER & IER_TXEMPTY) {
- spin_unlock_irqrestore(&riscom_lock, flags);
- msleep_interruptible(jiffies_to_msecs(rp->timeout));
- spin_lock_irqsave(&riscom_lock, flags);
- if (time_after(jiffies, timeout))
- break;
- }
- rc_shutdown_port(port->tty, bp, rp);
- spin_unlock_irqrestore(&riscom_lock, flags);
-}
-
-static void rc_close(struct tty_struct *tty, struct file *filp)
-{
- struct riscom_port *port = tty->driver_data;
-
- if (!port || rc_paranoia_check(port, tty->name, "close"))
- return;
- tty_port_close(&port->port, tty, filp);
-}
-
-static int rc_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
-{
- struct riscom_port *port = tty->driver_data;
- struct riscom_board *bp;
- int c, total = 0;
- unsigned long flags;
-
- if (rc_paranoia_check(port, tty->name, "rc_write"))
- return 0;
-
- bp = port_Board(port);
-
- while (1) {
- spin_lock_irqsave(&riscom_lock, flags);
-
- c = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - port->xmit_head));
- if (c <= 0)
- break; /* lock continues to be held */
-
- memcpy(port->port.xmit_buf + port->xmit_head, buf, c);
- port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
- port->xmit_cnt += c;
-
- spin_unlock_irqrestore(&riscom_lock, flags);
-
- buf += c;
- count -= c;
- total += c;
- }
-
- if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped &&
- !(port->IER & IER_TXRDY)) {
- port->IER |= IER_TXRDY;
- rc_out(bp, CD180_CAR, port_No(port));
- rc_out(bp, CD180_IER, port->IER);
- }
-
- spin_unlock_irqrestore(&riscom_lock, flags);
-
- return total;
-}
-
-static int rc_put_char(struct tty_struct *tty, unsigned char ch)
-{
- struct riscom_port *port = tty->driver_data;
- unsigned long flags;
- int ret = 0;
-
- if (rc_paranoia_check(port, tty->name, "rc_put_char"))
- return 0;
-
- spin_lock_irqsave(&riscom_lock, flags);
-
- if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
- goto out;
-
- port->port.xmit_buf[port->xmit_head++] = ch;
- port->xmit_head &= SERIAL_XMIT_SIZE - 1;
- port->xmit_cnt++;
- ret = 1;
-
-out:
- spin_unlock_irqrestore(&riscom_lock, flags);
- return ret;
-}
-
-static void rc_flush_chars(struct tty_struct *tty)
-{
- struct riscom_port *port = tty->driver_data;
- unsigned long flags;
-
- if (rc_paranoia_check(port, tty->name, "rc_flush_chars"))
- return;
-
- if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped)
- return;
-
- spin_lock_irqsave(&riscom_lock, flags);
-
- port->IER |= IER_TXRDY;
- rc_out(port_Board(port), CD180_CAR, port_No(port));
- rc_out(port_Board(port), CD180_IER, port->IER);
-
- spin_unlock_irqrestore(&riscom_lock, flags);
-}
-
-static int rc_write_room(struct tty_struct *tty)
-{
- struct riscom_port *port = tty->driver_data;
- int ret;
-
- if (rc_paranoia_check(port, tty->name, "rc_write_room"))
- return 0;
-
- ret = SERIAL_XMIT_SIZE - port->xmit_cnt - 1;
- if (ret < 0)
- ret = 0;
- return ret;
-}
-
-static int rc_chars_in_buffer(struct tty_struct *tty)
-{
- struct riscom_port *port = tty->driver_data;
-
- if (rc_paranoia_check(port, tty->name, "rc_chars_in_buffer"))
- return 0;
-
- return port->xmit_cnt;
-}
-
-static int rc_tiocmget(struct tty_struct *tty)
-{
- struct riscom_port *port = tty->driver_data;
- struct riscom_board *bp;
- unsigned char status;
- unsigned int result;
- unsigned long flags;
-
- if (rc_paranoia_check(port, tty->name, __func__))
- return -ENODEV;
-
- bp = port_Board(port);
-
- spin_lock_irqsave(&riscom_lock, flags);
-
- rc_out(bp, CD180_CAR, port_No(port));
- status = rc_in(bp, CD180_MSVR);
- result = rc_in(bp, RC_RI) & (1u << port_No(port)) ? 0 : TIOCM_RNG;
-
- spin_unlock_irqrestore(&riscom_lock, flags);
-
- result |= ((status & MSVR_RTS) ? TIOCM_RTS : 0)
- | ((status & MSVR_DTR) ? TIOCM_DTR : 0)
- | ((status & MSVR_CD) ? TIOCM_CAR : 0)
- | ((status & MSVR_DSR) ? TIOCM_DSR : 0)
- | ((status & MSVR_CTS) ? TIOCM_CTS : 0);
- return result;
-}
-
-static int rc_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct riscom_port *port = tty->driver_data;
- unsigned long flags;
- struct riscom_board *bp;
-
- if (rc_paranoia_check(port, tty->name, __func__))
- return -ENODEV;
-
- bp = port_Board(port);
-
- spin_lock_irqsave(&riscom_lock, flags);
-
- if (set & TIOCM_RTS)
- port->MSVR |= MSVR_RTS;
- if (set & TIOCM_DTR)
- bp->DTR &= ~(1u << port_No(port));
-
- if (clear & TIOCM_RTS)
- port->MSVR &= ~MSVR_RTS;
- if (clear & TIOCM_DTR)
- bp->DTR |= (1u << port_No(port));
-
- rc_out(bp, CD180_CAR, port_No(port));
- rc_out(bp, CD180_MSVR, port->MSVR);
- rc_out(bp, RC_DTR, bp->DTR);
-
- spin_unlock_irqrestore(&riscom_lock, flags);
-
- return 0;
-}
-
-static int rc_send_break(struct tty_struct *tty, int length)
-{
- struct riscom_port *port = tty->driver_data;
- struct riscom_board *bp = port_Board(port);
- unsigned long flags;
-
- if (length == 0 || length == -1)
- return -EOPNOTSUPP;
-
- spin_lock_irqsave(&riscom_lock, flags);
-
- port->break_length = RISCOM_TPS / HZ * length;
- port->COR2 |= COR2_ETC;
- port->IER |= IER_TXRDY;
- rc_out(bp, CD180_CAR, port_No(port));
- rc_out(bp, CD180_COR2, port->COR2);
- rc_out(bp, CD180_IER, port->IER);
- rc_wait_CCR(bp);
- rc_out(bp, CD180_CCR, CCR_CORCHG2);
- rc_wait_CCR(bp);
-
- spin_unlock_irqrestore(&riscom_lock, flags);
- return 0;
-}
-
-static int rc_set_serial_info(struct tty_struct *tty, struct riscom_port *port,
- struct serial_struct __user *newinfo)
-{
- struct serial_struct tmp;
- struct riscom_board *bp = port_Board(port);
- int change_speed;
-
- if (copy_from_user(&tmp, newinfo, sizeof(tmp)))
- return -EFAULT;
-
- mutex_lock(&port->port.mutex);
- change_speed = ((port->port.flags & ASYNC_SPD_MASK) !=
- (tmp.flags & ASYNC_SPD_MASK));
-
- if (!capable(CAP_SYS_ADMIN)) {
- if ((tmp.close_delay != port->port.close_delay) ||
- (tmp.closing_wait != port->port.closing_wait) ||
- ((tmp.flags & ~ASYNC_USR_MASK) !=
- (port->port.flags & ~ASYNC_USR_MASK))) {
- mutex_unlock(&port->port.mutex);
- return -EPERM;
- }
- port->port.flags = ((port->port.flags & ~ASYNC_USR_MASK) |
- (tmp.flags & ASYNC_USR_MASK));
- } else {
- port->port.flags = ((port->port.flags & ~ASYNC_FLAGS) |
- (tmp.flags & ASYNC_FLAGS));
- port->port.close_delay = tmp.close_delay;
- port->port.closing_wait = tmp.closing_wait;
- }
- if (change_speed) {
- unsigned long flags;
-
- spin_lock_irqsave(&riscom_lock, flags);
- rc_change_speed(tty, bp, port);
- spin_unlock_irqrestore(&riscom_lock, flags);
- }
- mutex_unlock(&port->port.mutex);
- return 0;
-}
-
-static int rc_get_serial_info(struct riscom_port *port,
- struct serial_struct __user *retinfo)
-{
- struct serial_struct tmp;
- struct riscom_board *bp = port_Board(port);
-
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = PORT_CIRRUS;
- tmp.line = port - rc_port;
-
- mutex_lock(&port->port.mutex);
- tmp.port = bp->base;
- tmp.irq = bp->irq;
- tmp.flags = port->port.flags;
- tmp.baud_base = (RC_OSCFREQ + CD180_TPC/2) / CD180_TPC;
- tmp.close_delay = port->port.close_delay * HZ/100;
- tmp.closing_wait = port->port.closing_wait * HZ/100;
- mutex_unlock(&port->port.mutex);
- tmp.xmit_fifo_size = CD180_NFIFO;
- return copy_to_user(retinfo, &tmp, sizeof(tmp)) ? -EFAULT : 0;
-}
-
-static int rc_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct riscom_port *port = tty->driver_data;
- void __user *argp = (void __user *)arg;
- int retval;
-
- if (rc_paranoia_check(port, tty->name, "rc_ioctl"))
- return -ENODEV;
-
- switch (cmd) {
- case TIOCGSERIAL:
- retval = rc_get_serial_info(port, argp);
- break;
- case TIOCSSERIAL:
- retval = rc_set_serial_info(tty, port, argp);
- break;
- default:
- retval = -ENOIOCTLCMD;
- }
- return retval;
-}
-
-static void rc_throttle(struct tty_struct *tty)
-{
- struct riscom_port *port = tty->driver_data;
- struct riscom_board *bp;
- unsigned long flags;
-
- if (rc_paranoia_check(port, tty->name, "rc_throttle"))
- return;
- bp = port_Board(port);
-
- spin_lock_irqsave(&riscom_lock, flags);
- port->MSVR &= ~MSVR_RTS;
- rc_out(bp, CD180_CAR, port_No(port));
- if (I_IXOFF(tty)) {
- rc_wait_CCR(bp);
- rc_out(bp, CD180_CCR, CCR_SSCH2);
- rc_wait_CCR(bp);
- }
- rc_out(bp, CD180_MSVR, port->MSVR);
- spin_unlock_irqrestore(&riscom_lock, flags);
-}
-
-static void rc_unthrottle(struct tty_struct *tty)
-{
- struct riscom_port *port = tty->driver_data;
- struct riscom_board *bp;
- unsigned long flags;
-
- if (rc_paranoia_check(port, tty->name, "rc_unthrottle"))
- return;
- bp = port_Board(port);
-
- spin_lock_irqsave(&riscom_lock, flags);
- port->MSVR |= MSVR_RTS;
- rc_out(bp, CD180_CAR, port_No(port));
- if (I_IXOFF(tty)) {
- rc_wait_CCR(bp);
- rc_out(bp, CD180_CCR, CCR_SSCH1);
- rc_wait_CCR(bp);
- }
- rc_out(bp, CD180_MSVR, port->MSVR);
- spin_unlock_irqrestore(&riscom_lock, flags);
-}
-
-static void rc_stop(struct tty_struct *tty)
-{
- struct riscom_port *port = tty->driver_data;
- struct riscom_board *bp;
- unsigned long flags;
-
- if (rc_paranoia_check(port, tty->name, "rc_stop"))
- return;
-
- bp = port_Board(port);
-
- spin_lock_irqsave(&riscom_lock, flags);
- port->IER &= ~IER_TXRDY;
- rc_out(bp, CD180_CAR, port_No(port));
- rc_out(bp, CD180_IER, port->IER);
- spin_unlock_irqrestore(&riscom_lock, flags);
-}
-
-static void rc_start(struct tty_struct *tty)
-{
- struct riscom_port *port = tty->driver_data;
- struct riscom_board *bp;
- unsigned long flags;
-
- if (rc_paranoia_check(port, tty->name, "rc_start"))
- return;
-
- bp = port_Board(port);
-
- spin_lock_irqsave(&riscom_lock, flags);
-
- if (port->xmit_cnt && port->port.xmit_buf && !(port->IER & IER_TXRDY)) {
- port->IER |= IER_TXRDY;
- rc_out(bp, CD180_CAR, port_No(port));
- rc_out(bp, CD180_IER, port->IER);
- }
- spin_unlock_irqrestore(&riscom_lock, flags);
-}
-
-static void rc_hangup(struct tty_struct *tty)
-{
- struct riscom_port *port = tty->driver_data;
-
- if (rc_paranoia_check(port, tty->name, "rc_hangup"))
- return;
-
- tty_port_hangup(&port->port);
-}
-
-static void rc_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
-{
- struct riscom_port *port = tty->driver_data;
- unsigned long flags;
-
- if (rc_paranoia_check(port, tty->name, "rc_set_termios"))
- return;
-
- spin_lock_irqsave(&riscom_lock, flags);
- rc_change_speed(tty, port_Board(port), port);
- spin_unlock_irqrestore(&riscom_lock, flags);
-
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios->c_cflag & CRTSCTS)) {
- tty->hw_stopped = 0;
- rc_start(tty);
- }
-}
-
-static const struct tty_operations riscom_ops = {
- .open = rc_open,
- .close = rc_close,
- .write = rc_write,
- .put_char = rc_put_char,
- .flush_chars = rc_flush_chars,
- .write_room = rc_write_room,
- .chars_in_buffer = rc_chars_in_buffer,
- .flush_buffer = rc_flush_buffer,
- .ioctl = rc_ioctl,
- .throttle = rc_throttle,
- .unthrottle = rc_unthrottle,
- .set_termios = rc_set_termios,
- .stop = rc_stop,
- .start = rc_start,
- .hangup = rc_hangup,
- .tiocmget = rc_tiocmget,
- .tiocmset = rc_tiocmset,
- .break_ctl = rc_send_break,
-};
-
-static const struct tty_port_operations riscom_port_ops = {
- .carrier_raised = carrier_raised,
- .dtr_rts = dtr_rts,
- .shutdown = rc_close_port,
- .activate = rc_activate_port,
-};
-
-
-static int __init rc_init_drivers(void)
-{
- int error;
- int i;
-
- riscom_driver = alloc_tty_driver(RC_NBOARD * RC_NPORT);
- if (!riscom_driver)
- return -ENOMEM;
-
- riscom_driver->owner = THIS_MODULE;
- riscom_driver->name = "ttyL";
- riscom_driver->major = RISCOM8_NORMAL_MAJOR;
- riscom_driver->type = TTY_DRIVER_TYPE_SERIAL;
- riscom_driver->subtype = SERIAL_TYPE_NORMAL;
- riscom_driver->init_termios = tty_std_termios;
- riscom_driver->init_termios.c_cflag =
- B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- riscom_driver->init_termios.c_ispeed = 9600;
- riscom_driver->init_termios.c_ospeed = 9600;
- riscom_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_HARDWARE_BREAK;
- tty_set_operations(riscom_driver, &riscom_ops);
- error = tty_register_driver(riscom_driver);
- if (error != 0) {
- put_tty_driver(riscom_driver);
- printk(KERN_ERR "rc: Couldn't register RISCom/8 driver, "
- "error = %d\n", error);
- return 1;
- }
- memset(rc_port, 0, sizeof(rc_port));
- for (i = 0; i < RC_NPORT * RC_NBOARD; i++) {
- tty_port_init(&rc_port[i].port);
- rc_port[i].port.ops = &riscom_port_ops;
- rc_port[i].magic = RISCOM8_MAGIC;
- }
- return 0;
-}
-
-static void rc_release_drivers(void)
-{
- tty_unregister_driver(riscom_driver);
- put_tty_driver(riscom_driver);
-}
-
-#ifndef MODULE
-/*
- * Called at boot time.
- *
- * You can specify IO base for up to RC_NBOARD cards,
- * using line "riscom8=0xiobase1,0xiobase2,.." at LILO prompt.
- * Note that there will be no probing at default
- * addresses in this case.
- *
- */
-static int __init riscom8_setup(char *str)
-{
- int ints[RC_NBOARD];
- int i;
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- for (i = 0; i < RC_NBOARD; i++) {
- if (i < ints[0])
- rc_board[i].base = ints[i+1];
- else
- rc_board[i].base = 0;
- }
- return 1;
-}
-
-__setup("riscom8=", riscom8_setup);
-#endif
-
-static char banner[] __initdata =
- KERN_INFO "rc: SDL RISCom/8 card driver v1.1, (c) D.Gorodchanin "
- "1994-1996.\n";
-static char no_boards_msg[] __initdata =
- KERN_INFO "rc: No RISCom/8 boards detected.\n";
-
-/*
- * This routine must be called by kernel at boot time
- */
-static int __init riscom8_init(void)
-{
- int i;
- int found = 0;
-
- printk(banner);
-
- if (rc_init_drivers())
- return -EIO;
-
- for (i = 0; i < RC_NBOARD; i++)
- if (rc_board[i].base && !rc_probe(&rc_board[i]))
- found++;
- if (!found) {
- rc_release_drivers();
- printk(no_boards_msg);
- return -EIO;
- }
- return 0;
-}
-
-#ifdef MODULE
-static int iobase;
-static int iobase1;
-static int iobase2;
-static int iobase3;
-module_param(iobase, int, 0);
-module_param(iobase1, int, 0);
-module_param(iobase2, int, 0);
-module_param(iobase3, int, 0);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV_MAJOR(RISCOM8_NORMAL_MAJOR);
-#endif /* MODULE */
-
-/*
- * You can setup up to 4 boards (current value of RC_NBOARD)
- * by specifying "iobase=0xXXX iobase1=0xXXX ..." as insmod parameter.
- *
- */
-static int __init riscom8_init_module(void)
-{
-#ifdef MODULE
- int i;
-
- if (iobase || iobase1 || iobase2 || iobase3) {
- for (i = 0; i < RC_NBOARD; i++)
- rc_board[i].base = 0;
- }
-
- if (iobase)
- rc_board[0].base = iobase;
- if (iobase1)
- rc_board[1].base = iobase1;
- if (iobase2)
- rc_board[2].base = iobase2;
- if (iobase3)
- rc_board[3].base = iobase3;
-#endif /* MODULE */
-
- return riscom8_init();
-}
-
-static void __exit riscom8_exit_module(void)
-{
- int i;
-
- rc_release_drivers();
- for (i = 0; i < RC_NBOARD; i++)
- if (rc_board[i].flags & RC_BOARD_PRESENT)
- rc_release_io_range(&rc_board[i]);
-
-}
-
-module_init(riscom8_init_module);
-module_exit(riscom8_exit_module);
diff --git a/drivers/staging/tty/riscom8.h b/drivers/staging/tty/riscom8.h
deleted file mode 100644
index c9876b3f971..00000000000
--- a/drivers/staging/tty/riscom8.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * linux/drivers/char/riscom8.h -- RISCom/8 multiport serial driver.
- *
- * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com)
- *
- * This code is loosely based on the Linux serial driver, written by
- * Linus Torvalds, Theodore T'so and others. The RISCom/8 card
- * programming info was obtained from various drivers for other OSes
- * (FreeBSD, ISC, etc), but no source code from those drivers were
- * directly included in this driver.
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __LINUX_RISCOM8_H
-#define __LINUX_RISCOM8_H
-
-#include <linux/serial.h>
-
-#ifdef __KERNEL__
-
-#define RC_NBOARD 4
-/* NOTE: RISCom decoder recognizes 16 addresses... */
-#define RC_NPORT 8
-#define RC_BOARD(line) (((line) >> 3) & 0x07)
-#define RC_PORT(line) ((line) & (RC_NPORT - 1))
-
-/* Ticks per sec. Used for setting receiver timeout and break length */
-#define RISCOM_TPS 4000
-
-/* Yeah, after heavy testing I decided it must be 6.
- * Sure, You can change it if needed.
- */
-#define RISCOM_RXFIFO 6 /* Max. receiver FIFO size (1-8) */
-
-#define RISCOM8_MAGIC 0x0907
-
-#define RC_IOBASE1 0x220
-#define RC_IOBASE2 0x240
-#define RC_IOBASE3 0x250
-#define RC_IOBASE4 0x260
-
-struct riscom_board {
- unsigned long flags;
- unsigned short base;
- unsigned char irq;
- signed char count;
- unsigned char DTR;
-};
-
-#define RC_BOARD_PRESENT 0x00000001
-#define RC_BOARD_ACTIVE 0x00000002
-
-struct riscom_port {
- int magic;
- struct tty_port port;
- int baud_base;
- int timeout;
- int custom_divisor;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
- short wakeup_chars;
- short break_length;
- unsigned char mark_mask;
- unsigned char IER;
- unsigned char MSVR;
- unsigned char COR2;
-#ifdef RC_REPORT_OVERRUN
- unsigned long overrun;
-#endif
-#ifdef RC_REPORT_FIFO
- unsigned long hits[10];
-#endif
-};
-
-#endif /* __KERNEL__ */
-#endif /* __LINUX_RISCOM8_H */
diff --git a/drivers/staging/tty/riscom8_reg.h b/drivers/staging/tty/riscom8_reg.h
deleted file mode 100644
index a32475ed0d1..00000000000
--- a/drivers/staging/tty/riscom8_reg.h
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * linux/drivers/char/riscom8_reg.h -- RISCom/8 multiport serial driver.
- */
-
-/*
- * Definitions for RISCom/8 Async Mux card by SDL Communications, Inc.
- */
-
-/*
- * Address mapping between Cirrus Logic CD180 chip internal registers
- * and ISA port addresses:
- *
- * CL-CD180 A6 A5 A4 A3 A2 A1 A0
- * ISA A15 A14 A13 A12 A11 A10 A9 A8 A7 A6 A5 A4 A3 A2 A1 A0
- */
-#define RC_TO_ISA(r) ((((r)&0x07)<<1) | (((r)&~0x07)<<7))
-
-
-/* RISCom/8 On-Board Registers (assuming address translation) */
-
-#define RC_RI 0x100 /* Ring Indicator Register (R/O) */
-#define RC_DTR 0x100 /* DTR Register (W/O) */
-#define RC_BSR 0x101 /* Board Status Register (R/O) */
-#define RC_CTOUT 0x101 /* Clear Timeout (W/O) */
-
-
-/* Board Status Register */
-
-#define RC_BSR_TOUT 0x08 /* Hardware Timeout */
-#define RC_BSR_RINT 0x04 /* Receiver Interrupt */
-#define RC_BSR_TINT 0x02 /* Transmitter Interrupt */
-#define RC_BSR_MINT 0x01 /* Modem Ctl Interrupt */
-
-
-/* On-board oscillator frequency (in Hz) */
-#define RC_OSCFREQ 9830400
-
-/* Values of choice for Interrupt ACKs */
-#define RC_ACK_MINT 0x81 /* goes to PILR1 */
-#define RC_ACK_RINT 0x82 /* goes to PILR3 */
-#define RC_ACK_TINT 0x84 /* goes to PILR2 */
-
-/* Chip ID (sorry, only one chip now) */
-#define RC_ID 0x10
-
-/* Definitions for Cirrus Logic CL-CD180 8-port async mux chip */
-
-#define CD180_NCH 8 /* Total number of channels */
-#define CD180_TPC 16 /* Ticks per character */
-#define CD180_NFIFO 8 /* TX FIFO size */
-
-
-/* Global registers */
-
-#define CD180_GIVR 0x40 /* Global Interrupt Vector Register */
-#define CD180_GICR 0x41 /* Global Interrupting Channel Register */
-#define CD180_PILR1 0x61 /* Priority Interrupt Level Register 1 */
-#define CD180_PILR2 0x62 /* Priority Interrupt Level Register 2 */
-#define CD180_PILR3 0x63 /* Priority Interrupt Level Register 3 */
-#define CD180_CAR 0x64 /* Channel Access Register */
-#define CD180_GFRCR 0x6b /* Global Firmware Revision Code Register */
-#define CD180_PPRH 0x70 /* Prescaler Period Register High */
-#define CD180_PPRL 0x71 /* Prescaler Period Register Low */
-#define CD180_RDR 0x78 /* Receiver Data Register */
-#define CD180_RCSR 0x7a /* Receiver Character Status Register */
-#define CD180_TDR 0x7b /* Transmit Data Register */
-#define CD180_EOIR 0x7f /* End of Interrupt Register */
-
-
-/* Channel Registers */
-
-#define CD180_CCR 0x01 /* Channel Command Register */
-#define CD180_IER 0x02 /* Interrupt Enable Register */
-#define CD180_COR1 0x03 /* Channel Option Register 1 */
-#define CD180_COR2 0x04 /* Channel Option Register 2 */
-#define CD180_COR3 0x05 /* Channel Option Register 3 */
-#define CD180_CCSR 0x06 /* Channel Control Status Register */
-#define CD180_RDCR 0x07 /* Receive Data Count Register */
-#define CD180_SCHR1 0x09 /* Special Character Register 1 */
-#define CD180_SCHR2 0x0a /* Special Character Register 2 */
-#define CD180_SCHR3 0x0b /* Special Character Register 3 */
-#define CD180_SCHR4 0x0c /* Special Character Register 4 */
-#define CD180_MCOR1 0x10 /* Modem Change Option 1 Register */
-#define CD180_MCOR2 0x11 /* Modem Change Option 2 Register */
-#define CD180_MCR 0x12 /* Modem Change Register */
-#define CD180_RTPR 0x18 /* Receive Timeout Period Register */
-#define CD180_MSVR 0x28 /* Modem Signal Value Register */
-#define CD180_RBPRH 0x31 /* Receive Baud Rate Period Register High */
-#define CD180_RBPRL 0x32 /* Receive Baud Rate Period Register Low */
-#define CD180_TBPRH 0x39 /* Transmit Baud Rate Period Register High */
-#define CD180_TBPRL 0x3a /* Transmit Baud Rate Period Register Low */
-
-
-/* Global Interrupt Vector Register (R/W) */
-
-#define GIVR_ITMASK 0x07 /* Interrupt type mask */
-#define GIVR_IT_MODEM 0x01 /* Modem Signal Change Interrupt */
-#define GIVR_IT_TX 0x02 /* Transmit Data Interrupt */
-#define GIVR_IT_RCV 0x03 /* Receive Good Data Interrupt */
-#define GIVR_IT_REXC 0x07 /* Receive Exception Interrupt */
-
-
-/* Global Interrupt Channel Register (R/W) */
-
-#define GICR_CHAN 0x1c /* Channel Number Mask */
-#define GICR_CHAN_OFF 2 /* Channel Number Offset */
-
-
-/* Channel Address Register (R/W) */
-
-#define CAR_CHAN 0x07 /* Channel Number Mask */
-#define CAR_A7 0x08 /* A7 Address Extension (unused) */
-
-
-/* Receive Character Status Register (R/O) */
-
-#define RCSR_TOUT 0x80 /* Rx Timeout */
-#define RCSR_SCDET 0x70 /* Special Character Detected Mask */
-#define RCSR_NO_SC 0x00 /* No Special Characters Detected */
-#define RCSR_SC_1 0x10 /* Special Char 1 (or 1 & 3) Detected */
-#define RCSR_SC_2 0x20 /* Special Char 2 (or 2 & 4) Detected */
-#define RCSR_SC_3 0x30 /* Special Char 3 Detected */
-#define RCSR_SC_4 0x40 /* Special Char 4 Detected */
-#define RCSR_BREAK 0x08 /* Break has been detected */
-#define RCSR_PE 0x04 /* Parity Error */
-#define RCSR_FE 0x02 /* Frame Error */
-#define RCSR_OE 0x01 /* Overrun Error */
-
-
-/* Channel Command Register (R/W) (commands in groups can be OR-ed) */
-
-#define CCR_HARDRESET 0x81 /* Reset the chip */
-
-#define CCR_SOFTRESET 0x80 /* Soft Channel Reset */
-
-#define CCR_CORCHG1 0x42 /* Channel Option Register 1 Changed */
-#define CCR_CORCHG2 0x44 /* Channel Option Register 2 Changed */
-#define CCR_CORCHG3 0x48 /* Channel Option Register 3 Changed */
-
-#define CCR_SSCH1 0x21 /* Send Special Character 1 */
-
-#define CCR_SSCH2 0x22 /* Send Special Character 2 */
-
-#define CCR_SSCH3 0x23 /* Send Special Character 3 */
-
-#define CCR_SSCH4 0x24 /* Send Special Character 4 */
-
-#define CCR_TXEN 0x18 /* Enable Transmitter */
-#define CCR_RXEN 0x12 /* Enable Receiver */
-
-#define CCR_TXDIS 0x14 /* Disable Transmitter */
-#define CCR_RXDIS 0x11 /* Disable Receiver */
-
-
-/* Interrupt Enable Register (R/W) */
-
-#define IER_DSR 0x80 /* Enable interrupt on DSR change */
-#define IER_CD 0x40 /* Enable interrupt on CD change */
-#define IER_CTS 0x20 /* Enable interrupt on CTS change */
-#define IER_RXD 0x10 /* Enable interrupt on Receive Data */
-#define IER_RXSC 0x08 /* Enable interrupt on Receive Spec. Char */
-#define IER_TXRDY 0x04 /* Enable interrupt on TX FIFO empty */
-#define IER_TXEMPTY 0x02 /* Enable interrupt on TX completely empty */
-#define IER_RET 0x01 /* Enable interrupt on RX Exc. Timeout */
-
-
-/* Channel Option Register 1 (R/W) */
-
-#define COR1_ODDP 0x80 /* Odd Parity */
-#define COR1_PARMODE 0x60 /* Parity Mode mask */
-#define COR1_NOPAR 0x00 /* No Parity */
-#define COR1_FORCEPAR 0x20 /* Force Parity */
-#define COR1_NORMPAR 0x40 /* Normal Parity */
-#define COR1_IGNORE 0x10 /* Ignore Parity on RX */
-#define COR1_STOPBITS 0x0c /* Number of Stop Bits */
-#define COR1_1SB 0x00 /* 1 Stop Bit */
-#define COR1_15SB 0x04 /* 1.5 Stop Bits */
-#define COR1_2SB 0x08 /* 2 Stop Bits */
-#define COR1_CHARLEN 0x03 /* Character Length */
-#define COR1_5BITS 0x00 /* 5 bits */
-#define COR1_6BITS 0x01 /* 6 bits */
-#define COR1_7BITS 0x02 /* 7 bits */
-#define COR1_8BITS 0x03 /* 8 bits */
-
-
-/* Channel Option Register 2 (R/W) */
-
-#define COR2_IXM 0x80 /* Implied XON mode */
-#define COR2_TXIBE 0x40 /* Enable In-Band (XON/XOFF) Flow Control */
-#define COR2_ETC 0x20 /* Embedded Tx Commands Enable */
-#define COR2_LLM 0x10 /* Local Loopback Mode */
-#define COR2_RLM 0x08 /* Remote Loopback Mode */
-#define COR2_RTSAO 0x04 /* RTS Automatic Output Enable */
-#define COR2_CTSAE 0x02 /* CTS Automatic Enable */
-#define COR2_DSRAE 0x01 /* DSR Automatic Enable */
-
-
-/* Channel Option Register 3 (R/W) */
-
-#define COR3_XONCH 0x80 /* XON is a pair of characters (1 & 3) */
-#define COR3_XOFFCH 0x40 /* XOFF is a pair of characters (2 & 4) */
-#define COR3_FCT 0x20 /* Flow-Control Transparency Mode */
-#define COR3_SCDE 0x10 /* Special Character Detection Enable */
-#define COR3_RXTH 0x0f /* RX FIFO Threshold value (1-8) */
-
-
-/* Channel Control Status Register (R/O) */
-
-#define CCSR_RXEN 0x80 /* Receiver Enabled */
-#define CCSR_RXFLOFF 0x40 /* Receive Flow Off (XOFF was sent) */
-#define CCSR_RXFLON 0x20 /* Receive Flow On (XON was sent) */
-#define CCSR_TXEN 0x08 /* Transmitter Enabled */
-#define CCSR_TXFLOFF 0x04 /* Transmit Flow Off (got XOFF) */
-#define CCSR_TXFLON 0x02 /* Transmit Flow On (got XON) */
-
-
-/* Modem Change Option Register 1 (R/W) */
-
-#define MCOR1_DSRZD 0x80 /* Detect 0->1 transition of DSR */
-#define MCOR1_CDZD 0x40 /* Detect 0->1 transition of CD */
-#define MCOR1_CTSZD 0x20 /* Detect 0->1 transition of CTS */
-#define MCOR1_DTRTH 0x0f /* Auto DTR flow control Threshold (1-8) */
-#define MCOR1_NODTRFC 0x0 /* Automatic DTR flow control disabled */
-
-
-/* Modem Change Option Register 2 (R/W) */
-
-#define MCOR2_DSROD 0x80 /* Detect 1->0 transition of DSR */
-#define MCOR2_CDOD 0x40 /* Detect 1->0 transition of CD */
-#define MCOR2_CTSOD 0x20 /* Detect 1->0 transition of CTS */
-
-
-/* Modem Change Register (R/W) */
-
-#define MCR_DSRCHG 0x80 /* DSR Changed */
-#define MCR_CDCHG 0x40 /* CD Changed */
-#define MCR_CTSCHG 0x20 /* CTS Changed */
-
-
-/* Modem Signal Value Register (R/W) */
-
-#define MSVR_DSR 0x80 /* Current state of DSR input */
-#define MSVR_CD 0x40 /* Current state of CD input */
-#define MSVR_CTS 0x20 /* Current state of CTS input */
-#define MSVR_DTR 0x02 /* Current state of DTR output */
-#define MSVR_RTS 0x01 /* Current state of RTS output */
-
-
-/* Escape characters */
-
-#define CD180_C_ESC 0x00 /* Escape character */
-#define CD180_C_SBRK 0x81 /* Start sending BREAK */
-#define CD180_C_DELAY 0x82 /* Delay output */
-#define CD180_C_EBRK 0x83 /* Stop sending BREAK */
diff --git a/drivers/staging/tty/serial167.c b/drivers/staging/tty/serial167.c
deleted file mode 100644
index 674af693397..00000000000
--- a/drivers/staging/tty/serial167.c
+++ /dev/null
@@ -1,2489 +0,0 @@
-/*
- * linux/drivers/char/serial167.c
- *
- * Driver for MVME166/7 board serial ports, which are via a CD2401.
- * Based very much on cyclades.c.
- *
- * MVME166/7 work by Richard Hirst [richard@sleepie.demon.co.uk]
- *
- * ==============================================================
- *
- * static char rcsid[] =
- * "$Revision: 1.36.1.4 $$Date: 1995/03/29 06:14:14 $";
- *
- * linux/kernel/cyclades.c
- *
- * Maintained by Marcio Saito (cyclades@netcom.com) and
- * Randolph Bentson (bentson@grieg.seaslug.org)
- *
- * Much of the design and some of the code came from serial.c
- * which was copyright (C) 1991, 1992 Linus Torvalds. It was
- * extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92,
- * and then fixed as suggested by Michael K. Johnson 12/12/92.
- *
- * This version does not support shared irq's.
- *
- * $Log: cyclades.c,v $
- * Revision 1.36.1.4 1995/03/29 06:14:14 bentson
- * disambiguate between Cyclom-16Y and Cyclom-32Ye;
- *
- * Changes:
- *
- * 200 lines of changes record removed - RGH 11-10-95, starting work on
- * converting this to drive serial ports on mvme166 (cd2401).
- *
- * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 2000/08/25
- * - get rid of verify_area
- * - use get_user to access memory from userspace in set_threshold,
- * set_default_threshold and set_timeout
- * - don't use the panic function in serial167_init
- * - do resource release on failure on serial167_init
- * - include missing restore_flags in mvme167_serial_console_setup
- *
- * Kars de Jong <jongk@linux-m68k.org> - 2004/09/06
- * - replace bottom half handler with task queue handler
- */
-
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/tty.h>
-#include <linux/interrupt.h>
-#include <linux/serial.h>
-#include <linux/serialP.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/ptrace.h>
-#include <linux/serial167.h>
-#include <linux/delay.h>
-#include <linux/major.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/tty_flip.h>
-#include <linux/gfp.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/mvme16xhw.h>
-#include <asm/bootinfo.h>
-#include <asm/setup.h>
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <asm/uaccess.h>
-#include <linux/init.h>
-
-#define SERIAL_PARANOIA_CHECK
-#undef SERIAL_DEBUG_OPEN
-#undef SERIAL_DEBUG_THROTTLE
-#undef SERIAL_DEBUG_OTHER
-#undef SERIAL_DEBUG_IO
-#undef SERIAL_DEBUG_COUNT
-#undef SERIAL_DEBUG_DTR
-#undef CYCLOM_16Y_HACK
-#define CYCLOM_ENABLE_MONITORING
-
-#define WAKEUP_CHARS 256
-
-#define STD_COM_FLAGS (0)
-
-static struct tty_driver *cy_serial_driver;
-extern int serial_console;
-static struct cyclades_port *serial_console_info = NULL;
-static unsigned int serial_console_cflag = 0;
-u_char initial_console_speed;
-
-/* Base address of cd2401 chip on mvme166/7 */
-
-#define BASE_ADDR (0xfff45000)
-#define pcc2chip ((volatile u_char *)0xfff42000)
-#define PccSCCMICR 0x1d
-#define PccSCCTICR 0x1e
-#define PccSCCRICR 0x1f
-#define PccTPIACKR 0x25
-#define PccRPIACKR 0x27
-#define PccIMLR 0x3f
-
-/* This is the per-port data structure */
-struct cyclades_port cy_port[] = {
- /* CARD# */
- {-1}, /* ttyS0 */
- {-1}, /* ttyS1 */
- {-1}, /* ttyS2 */
- {-1}, /* ttyS3 */
-};
-
-#define NR_PORTS ARRAY_SIZE(cy_port)
-
-/*
- * This is used to look up the divisor speeds and the timeouts
- * We're normally limited to 15 distinct baud rates. The extra
- * are accessed via settings in info->flags.
- * 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
- * 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
- * HI VHI
- */
-static int baud_table[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200,
- 1800, 2400, 4800, 9600, 19200, 38400, 57600, 76800, 115200, 150000,
- 0
-};
-
-#if 0
-static char baud_co[] = { /* 25 MHz clock option table */
- /* value => 00 01 02 03 04 */
- /* divide by 8 32 128 512 2048 */
- 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x02,
- 0x02, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-static char baud_bpr[] = { /* 25 MHz baud rate period table */
- 0x00, 0xf5, 0xa3, 0x6f, 0x5c, 0x51, 0xf5, 0xa3, 0x51, 0xa3,
- 0x6d, 0x51, 0xa3, 0x51, 0xa3, 0x51, 0x36, 0x29, 0x1b, 0x15
-};
-#endif
-
-/* I think 166 brd clocks 2401 at 20MHz.... */
-
-/* These values are written directly to tcor, and >> 5 for writing to rcor */
-static u_char baud_co[] = { /* 20 MHz clock option table */
- 0x00, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x60, 0x60, 0x40,
- 0x40, 0x40, 0x20, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
-};
-
-/* These values written directly to tbpr/rbpr */
-static u_char baud_bpr[] = { /* 20 MHz baud rate period table */
- 0x00, 0xc0, 0x80, 0x58, 0x6c, 0x40, 0xc0, 0x81, 0x40, 0x81,
- 0x57, 0x40, 0x81, 0x40, 0x81, 0x40, 0x2b, 0x20, 0x15, 0x10
-};
-
-static u_char baud_cor4[] = { /* receive threshold */
- 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
- 0x0a, 0x0a, 0x0a, 0x09, 0x09, 0x08, 0x08, 0x08, 0x08, 0x07
-};
-
-static void shutdown(struct cyclades_port *);
-static int startup(struct cyclades_port *);
-static void cy_throttle(struct tty_struct *);
-static void cy_unthrottle(struct tty_struct *);
-static void config_setup(struct cyclades_port *);
-#ifdef CYCLOM_SHOW_STATUS
-static void show_status(int);
-#endif
-
-/*
- * I have my own version of udelay(), as it is needed when initialising
- * the chip, before the delay loop has been calibrated. Should probably
- * reference one of the vmechip2 or pccchip2 counter for an accurate
- * delay, but this wild guess will do for now.
- */
-
-void my_udelay(long us)
-{
- u_char x;
- volatile u_char *p = &x;
- int i;
-
- while (us--)
- for (i = 100; i; i--)
- x |= *p;
-}
-
-static inline int serial_paranoia_check(struct cyclades_port *info, char *name,
- const char *routine)
-{
-#ifdef SERIAL_PARANOIA_CHECK
- if (!info) {
- printk("Warning: null cyclades_port for (%s) in %s\n", name,
- routine);
- return 1;
- }
-
- if (info < &cy_port[0] || info >= &cy_port[NR_PORTS]) {
- printk("Warning: cyclades_port out of range for (%s) in %s\n",
- name, routine);
- return 1;
- }
-
- if (info->magic != CYCLADES_MAGIC) {
- printk("Warning: bad magic number for serial struct (%s) in "
- "%s\n", name, routine);
- return 1;
- }
-#endif
- return 0;
-} /* serial_paranoia_check */
-
-#if 0
-/* The following diagnostic routines allow the driver to spew
- information on the screen, even (especially!) during interrupts.
- */
-void SP(char *data)
-{
- unsigned long flags;
- local_irq_save(flags);
- printk(KERN_EMERG "%s", data);
- local_irq_restore(flags);
-}
-
-char scrn[2];
-void CP(char data)
-{
- unsigned long flags;
- local_irq_save(flags);
- scrn[0] = data;
- printk(KERN_EMERG "%c", scrn);
- local_irq_restore(flags);
-} /* CP */
-
-void CP1(int data)
-{
- (data < 10) ? CP(data + '0') : CP(data + 'A' - 10);
-} /* CP1 */
-void CP2(int data)
-{
- CP1((data >> 4) & 0x0f);
- CP1(data & 0x0f);
-} /* CP2 */
-void CP4(int data)
-{
- CP2((data >> 8) & 0xff);
- CP2(data & 0xff);
-} /* CP4 */
-void CP8(long data)
-{
- CP4((data >> 16) & 0xffff);
- CP4(data & 0xffff);
-} /* CP8 */
-#endif
-
-/* This routine waits up to 1000 micro-seconds for the previous
- command to the Cirrus chip to complete and then issues the
- new command. An error is returned if the previous command
- didn't finish within the time limit.
- */
-u_short write_cy_cmd(volatile u_char * base_addr, u_char cmd)
-{
- unsigned long flags;
- volatile int i;
-
- local_irq_save(flags);
- /* Check to see that the previous command has completed */
- for (i = 0; i < 100; i++) {
- if (base_addr[CyCCR] == 0) {
- break;
- }
- my_udelay(10L);
- }
- /* if the CCR never cleared, the previous command
- didn't finish within the "reasonable time" */
- if (i == 10) {
- local_irq_restore(flags);
- return (-1);
- }
-
- /* Issue the new command */
- base_addr[CyCCR] = cmd;
- local_irq_restore(flags);
- return (0);
-} /* write_cy_cmd */
-
-/* cy_start and cy_stop provide software output flow control as a
- function of XON/XOFF, software CTS, and other such stuff. */
-
-static void cy_stop(struct tty_struct *tty)
-{
- struct cyclades_port *info = tty->driver_data;
- volatile unsigned char *base_addr = (unsigned char *)BASE_ADDR;
- int channel;
- unsigned long flags;
-
-#ifdef SERIAL_DEBUG_OTHER
- printk("cy_stop %s\n", tty->name); /* */
-#endif
-
- if (serial_paranoia_check(info, tty->name, "cy_stop"))
- return;
-
- channel = info->line;
-
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) (channel); /* index channel */
- base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
- local_irq_restore(flags);
-} /* cy_stop */
-
-static void cy_start(struct tty_struct *tty)
-{
- struct cyclades_port *info = tty->driver_data;
- volatile unsigned char *base_addr = (unsigned char *)BASE_ADDR;
- int channel;
- unsigned long flags;
-
-#ifdef SERIAL_DEBUG_OTHER
- printk("cy_start %s\n", tty->name); /* */
-#endif
-
- if (serial_paranoia_check(info, tty->name, "cy_start"))
- return;
-
- channel = info->line;
-
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) (channel);
- base_addr[CyIER] |= CyTxMpty;
- local_irq_restore(flags);
-} /* cy_start */
-
-/* The real interrupt service routines are called
- whenever the card wants its hand held--chars
- received, out buffer empty, modem change, etc.
- */
-static irqreturn_t cd2401_rxerr_interrupt(int irq, void *dev_id)
-{
- struct tty_struct *tty;
- struct cyclades_port *info;
- volatile unsigned char *base_addr = (unsigned char *)BASE_ADDR;
- unsigned char err, rfoc;
- int channel;
- char data;
-
- /* determine the channel and change to that context */
- channel = (u_short) (base_addr[CyLICR] >> 2);
- info = &cy_port[channel];
- info->last_active = jiffies;
-
- if ((err = base_addr[CyRISR]) & CyTIMEOUT) {
- /* This is a receive timeout interrupt, ignore it */
- base_addr[CyREOIR] = CyNOTRANS;
- return IRQ_HANDLED;
- }
-
- /* Read a byte of data if there is any - assume the error
- * is associated with this character */
-
- if ((rfoc = base_addr[CyRFOC]) != 0)
- data = base_addr[CyRDR];
- else
- data = 0;
-
- /* if there is nowhere to put the data, discard it */
- if (info->tty == 0) {
- base_addr[CyREOIR] = rfoc ? 0 : CyNOTRANS;
- return IRQ_HANDLED;
- } else { /* there is an open port for this data */
- tty = info->tty;
- if (err & info->ignore_status_mask) {
- base_addr[CyREOIR] = rfoc ? 0 : CyNOTRANS;
- return IRQ_HANDLED;
- }
- if (tty_buffer_request_room(tty, 1) != 0) {
- if (err & info->read_status_mask) {
- if (err & CyBREAK) {
- tty_insert_flip_char(tty, data,
- TTY_BREAK);
- if (info->flags & ASYNC_SAK) {
- do_SAK(tty);
- }
- } else if (err & CyFRAME) {
- tty_insert_flip_char(tty, data,
- TTY_FRAME);
- } else if (err & CyPARITY) {
- tty_insert_flip_char(tty, data,
- TTY_PARITY);
- } else if (err & CyOVERRUN) {
- tty_insert_flip_char(tty, 0,
- TTY_OVERRUN);
- /*
- If the flip buffer itself is
- overflowing, we still lose
- the next incoming character.
- */
- if (tty_buffer_request_room(tty, 1) !=
- 0) {
- tty_insert_flip_char(tty, data,
- TTY_FRAME);
- }
- /* These two conditions may imply */
- /* a normal read should be done. */
- /* else if(data & CyTIMEOUT) */
- /* else if(data & CySPECHAR) */
- } else {
- tty_insert_flip_char(tty, 0,
- TTY_NORMAL);
- }
- } else {
- tty_insert_flip_char(tty, data, TTY_NORMAL);
- }
- } else {
- /* there was a software buffer overrun
- and nothing could be done about it!!! */
- }
- }
- tty_schedule_flip(tty);
- /* end of service */
- base_addr[CyREOIR] = rfoc ? 0 : CyNOTRANS;
- return IRQ_HANDLED;
-} /* cy_rxerr_interrupt */
-
-static irqreturn_t cd2401_modem_interrupt(int irq, void *dev_id)
-{
- struct cyclades_port *info;
- volatile unsigned char *base_addr = (unsigned char *)BASE_ADDR;
- int channel;
- int mdm_change;
- int mdm_status;
-
- /* determine the channel and change to that context */
- channel = (u_short) (base_addr[CyLICR] >> 2);
- info = &cy_port[channel];
- info->last_active = jiffies;
-
- mdm_change = base_addr[CyMISR];
- mdm_status = base_addr[CyMSVR1];
-
- if (info->tty == 0) { /* nowhere to put the data, ignore it */
- ;
- } else {
- if ((mdm_change & CyDCD)
- && (info->flags & ASYNC_CHECK_CD)) {
- if (mdm_status & CyDCD) {
-/* CP('!'); */
- wake_up_interruptible(&info->open_wait);
- } else {
-/* CP('@'); */
- tty_hangup(info->tty);
- wake_up_interruptible(&info->open_wait);
- info->flags &= ~ASYNC_NORMAL_ACTIVE;
- }
- }
- if ((mdm_change & CyCTS)
- && (info->flags & ASYNC_CTS_FLOW)) {
- if (info->tty->stopped) {
- if (mdm_status & CyCTS) {
- /* !!! cy_start isn't used because... */
- info->tty->stopped = 0;
- base_addr[CyIER] |= CyTxMpty;
- tty_wakeup(info->tty);
- }
- } else {
- if (!(mdm_status & CyCTS)) {
- /* !!! cy_stop isn't used because... */
- info->tty->stopped = 1;
- base_addr[CyIER] &=
- ~(CyTxMpty | CyTxRdy);
- }
- }
- }
- if (mdm_status & CyDSR) {
- }
- }
- base_addr[CyMEOIR] = 0;
- return IRQ_HANDLED;
-} /* cy_modem_interrupt */
-
-static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
-{
- struct cyclades_port *info;
- volatile unsigned char *base_addr = (unsigned char *)BASE_ADDR;
- int channel;
- int char_count, saved_cnt;
- int outch;
-
- /* determine the channel and change to that context */
- channel = (u_short) (base_addr[CyLICR] >> 2);
-
- /* validate the port number (as configured and open) */
- if ((channel < 0) || (NR_PORTS <= channel)) {
- base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
- base_addr[CyTEOIR] = CyNOTRANS;
- return IRQ_HANDLED;
- }
- info = &cy_port[channel];
- info->last_active = jiffies;
- if (info->tty == 0) {
- base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
- base_addr[CyTEOIR] = CyNOTRANS;
- return IRQ_HANDLED;
- }
-
- /* load the on-chip space available for outbound data */
- saved_cnt = char_count = base_addr[CyTFTC];
-
- if (info->x_char) { /* send special char */
- outch = info->x_char;
- base_addr[CyTDR] = outch;
- char_count--;
- info->x_char = 0;
- }
-
- if (info->x_break) {
- /* The Cirrus chip requires the "Embedded Transmit
- Commands" of start break, delay, and end break
- sequences to be sent. The duration of the
- break is given in TICs, which runs at HZ
- (typically 100) and the PPR runs at 200 Hz,
- so the delay is duration * 200/HZ, and thus a
- break can run from 1/100 sec to about 5/4 sec.
- Need to check these values - RGH 141095.
- */
- base_addr[CyTDR] = 0; /* start break */
- base_addr[CyTDR] = 0x81;
- base_addr[CyTDR] = 0; /* delay a bit */
- base_addr[CyTDR] = 0x82;
- base_addr[CyTDR] = info->x_break * 200 / HZ;
- base_addr[CyTDR] = 0; /* terminate break */
- base_addr[CyTDR] = 0x83;
- char_count -= 7;
- info->x_break = 0;
- }
-
- while (char_count > 0) {
- if (!info->xmit_cnt) {
- base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
- break;
- }
- if (info->xmit_buf == 0) {
- base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
- break;
- }
- if (info->tty->stopped || info->tty->hw_stopped) {
- base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
- break;
- }
- /* Because the Embedded Transmit Commands have been
- enabled, we must check to see if the escape
- character, NULL, is being sent. If it is, we
- must ensure that there is room for it to be
- doubled in the output stream. Therefore we
- no longer advance the pointer when the character
- is fetched, but rather wait until after the check
- for a NULL output character. (This is necessary
- because there may not be room for the two chars
- needed to send a NULL.
- */
- outch = info->xmit_buf[info->xmit_tail];
- if (outch) {
- info->xmit_cnt--;
- info->xmit_tail = (info->xmit_tail + 1)
- & (PAGE_SIZE - 1);
- base_addr[CyTDR] = outch;
- char_count--;
- } else {
- if (char_count > 1) {
- info->xmit_cnt--;
- info->xmit_tail = (info->xmit_tail + 1)
- & (PAGE_SIZE - 1);
- base_addr[CyTDR] = outch;
- base_addr[CyTDR] = 0;
- char_count--;
- char_count--;
- } else {
- break;
- }
- }
- }
-
- if (info->xmit_cnt < WAKEUP_CHARS)
- tty_wakeup(info->tty);
-
- base_addr[CyTEOIR] = (char_count != saved_cnt) ? 0 : CyNOTRANS;
- return IRQ_HANDLED;
-} /* cy_tx_interrupt */
-
-static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
-{
- struct tty_struct *tty;
- struct cyclades_port *info;
- volatile unsigned char *base_addr = (unsigned char *)BASE_ADDR;
- int channel;
- char data;
- int char_count;
- int save_cnt;
-
- /* determine the channel and change to that context */
- channel = (u_short) (base_addr[CyLICR] >> 2);
- info = &cy_port[channel];
- info->last_active = jiffies;
- save_cnt = char_count = base_addr[CyRFOC];
-
- /* if there is nowhere to put the data, discard it */
- if (info->tty == 0) {
- while (char_count--) {
- data = base_addr[CyRDR];
- }
- } else { /* there is an open port for this data */
- tty = info->tty;
- /* load # characters available from the chip */
-
-#ifdef CYCLOM_ENABLE_MONITORING
- ++info->mon.int_count;
- info->mon.char_count += char_count;
- if (char_count > info->mon.char_max)
- info->mon.char_max = char_count;
- info->mon.char_last = char_count;
-#endif
- while (char_count--) {
- data = base_addr[CyRDR];
- tty_insert_flip_char(tty, data, TTY_NORMAL);
-#ifdef CYCLOM_16Y_HACK
- udelay(10L);
-#endif
- }
- tty_schedule_flip(tty);
- }
- /* end of service */
- base_addr[CyREOIR] = save_cnt ? 0 : CyNOTRANS;
- return IRQ_HANDLED;
-} /* cy_rx_interrupt */
-
-/* This is called whenever a port becomes active;
- interrupts are enabled and DTR & RTS are turned on.
- */
-static int startup(struct cyclades_port *info)
-{
- unsigned long flags;
- volatile unsigned char *base_addr = (unsigned char *)BASE_ADDR;
- int channel;
-
- if (info->flags & ASYNC_INITIALIZED) {
- return 0;
- }
-
- if (!info->type) {
- if (info->tty) {
- set_bit(TTY_IO_ERROR, &info->tty->flags);
- }
- return 0;
- }
- if (!info->xmit_buf) {
- info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
- if (!info->xmit_buf) {
- return -ENOMEM;
- }
- }
-
- config_setup(info);
-
- channel = info->line;
-
-#ifdef SERIAL_DEBUG_OPEN
- printk("startup channel %d\n", channel);
-#endif
-
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) channel;
- write_cy_cmd(base_addr, CyENB_RCVR | CyENB_XMTR);
-
- base_addr[CyCAR] = (u_char) channel; /* !!! Is this needed? */
- base_addr[CyMSVR1] = CyRTS;
-/* CP('S');CP('1'); */
- base_addr[CyMSVR2] = CyDTR;
-
-#ifdef SERIAL_DEBUG_DTR
- printk("cyc: %d: raising DTR\n", __LINE__);
- printk(" status: 0x%x, 0x%x\n", base_addr[CyMSVR1],
- base_addr[CyMSVR2]);
-#endif
-
- base_addr[CyIER] |= CyRxData;
- info->flags |= ASYNC_INITIALIZED;
-
- if (info->tty) {
- clear_bit(TTY_IO_ERROR, &info->tty->flags);
- }
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
- local_irq_restore(flags);
-
-#ifdef SERIAL_DEBUG_OPEN
- printk(" done\n");
-#endif
- return 0;
-} /* startup */
-
-void start_xmit(struct cyclades_port *info)
-{
- unsigned long flags;
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int channel;
-
- channel = info->line;
- local_irq_save(flags);
- base_addr[CyCAR] = channel;
- base_addr[CyIER] |= CyTxMpty;
- local_irq_restore(flags);
-} /* start_xmit */
-
-/*
- * This routine shuts down a serial port; interrupts are disabled,
- * and DTR is dropped if the hangup on close termio flag is on.
- */
-static void shutdown(struct cyclades_port *info)
-{
- unsigned long flags;
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int channel;
-
- if (!(info->flags & ASYNC_INITIALIZED)) {
-/* CP('$'); */
- return;
- }
-
- channel = info->line;
-
-#ifdef SERIAL_DEBUG_OPEN
- printk("shutdown channel %d\n", channel);
-#endif
-
- /* !!! REALLY MUST WAIT FOR LAST CHARACTER TO BE
- SENT BEFORE DROPPING THE LINE !!! (Perhaps
- set some flag that is read when XMTY happens.)
- Other choices are to delay some fixed interval
- or schedule some later processing.
- */
- local_irq_save(flags);
- if (info->xmit_buf) {
- free_page((unsigned long)info->xmit_buf);
- info->xmit_buf = NULL;
- }
-
- base_addr[CyCAR] = (u_char) channel;
- if (!info->tty || (info->tty->termios->c_cflag & HUPCL)) {
- base_addr[CyMSVR1] = 0;
-/* CP('C');CP('1'); */
- base_addr[CyMSVR2] = 0;
-#ifdef SERIAL_DEBUG_DTR
- printk("cyc: %d: dropping DTR\n", __LINE__);
- printk(" status: 0x%x, 0x%x\n", base_addr[CyMSVR1],
- base_addr[CyMSVR2]);
-#endif
- }
- write_cy_cmd(base_addr, CyDIS_RCVR);
- /* it may be appropriate to clear _XMIT at
- some later date (after testing)!!! */
-
- if (info->tty) {
- set_bit(TTY_IO_ERROR, &info->tty->flags);
- }
- info->flags &= ~ASYNC_INITIALIZED;
- local_irq_restore(flags);
-
-#ifdef SERIAL_DEBUG_OPEN
- printk(" done\n");
-#endif
-} /* shutdown */
-
-/*
- * This routine finds or computes the various line characteristics.
- */
-static void config_setup(struct cyclades_port *info)
-{
- unsigned long flags;
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int channel;
- unsigned cflag;
- int i;
- unsigned char ti, need_init_chan = 0;
-
- if (!info->tty || !info->tty->termios) {
- return;
- }
- if (info->line == -1) {
- return;
- }
- cflag = info->tty->termios->c_cflag;
-
- /* baud rate */
- i = cflag & CBAUD;
-#ifdef CBAUDEX
-/* Starting with kernel 1.1.65, there is direct support for
- higher baud rates. The following code supports those
- changes. The conditional aspect allows this driver to be
- used for earlier as well as later kernel versions. (The
- mapping is slightly different from serial.c because there
- is still the possibility of supporting 75 kbit/sec with
- the Cyclades board.)
- */
- if (i & CBAUDEX) {
- if (i == B57600)
- i = 16;
- else if (i == B115200)
- i = 18;
-#ifdef B78600
- else if (i == B78600)
- i = 17;
-#endif
- else
- info->tty->termios->c_cflag &= ~CBAUDEX;
- }
-#endif
- if (i == 15) {
- if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
- i += 1;
- if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
- i += 3;
- }
- /* Don't ever change the speed of the console port. It will
- * run at the speed specified in bootinfo, or at 19.2K */
- /* Actually, it should run at whatever speed 166Bug was using */
- /* Note info->timeout isn't used at present */
- if (info != serial_console_info) {
- info->tbpr = baud_bpr[i]; /* Tx BPR */
- info->tco = baud_co[i]; /* Tx CO */
- info->rbpr = baud_bpr[i]; /* Rx BPR */
- info->rco = baud_co[i] >> 5; /* Rx CO */
- if (baud_table[i] == 134) {
- info->timeout =
- (info->xmit_fifo_size * HZ * 30 / 269) + 2;
- /* get it right for 134.5 baud */
- } else if (baud_table[i]) {
- info->timeout =
- (info->xmit_fifo_size * HZ * 15 / baud_table[i]) +
- 2;
- /* this needs to be propagated into the card info */
- } else {
- info->timeout = 0;
- }
- }
- /* By tradition (is it a standard?) a baud rate of zero
- implies the line should be/has been closed. A bit
- later in this routine such a test is performed. */
-
- /* byte size and parity */
- info->cor7 = 0;
- info->cor6 = 0;
- info->cor5 = 0;
- info->cor4 = (info->default_threshold ? info->default_threshold : baud_cor4[i]); /* receive threshold */
- /* Following two lines added 101295, RGH. */
- /* It is obviously wrong to access CyCORx, and not info->corx here,
- * try and remember to fix it later! */
- channel = info->line;
- base_addr[CyCAR] = (u_char) channel;
- if (C_CLOCAL(info->tty)) {
- if (base_addr[CyIER] & CyMdmCh)
- base_addr[CyIER] &= ~CyMdmCh; /* without modem intr */
- /* ignore 1->0 modem transitions */
- if (base_addr[CyCOR4] & (CyDSR | CyCTS | CyDCD))
- base_addr[CyCOR4] &= ~(CyDSR | CyCTS | CyDCD);
- /* ignore 0->1 modem transitions */
- if (base_addr[CyCOR5] & (CyDSR | CyCTS | CyDCD))
- base_addr[CyCOR5] &= ~(CyDSR | CyCTS | CyDCD);
- } else {
- if ((base_addr[CyIER] & CyMdmCh) != CyMdmCh)
- base_addr[CyIER] |= CyMdmCh; /* with modem intr */
- /* act on 1->0 modem transitions */
- if ((base_addr[CyCOR4] & (CyDSR | CyCTS | CyDCD)) !=
- (CyDSR | CyCTS | CyDCD))
- base_addr[CyCOR4] |= CyDSR | CyCTS | CyDCD;
- /* act on 0->1 modem transitions */
- if ((base_addr[CyCOR5] & (CyDSR | CyCTS | CyDCD)) !=
- (CyDSR | CyCTS | CyDCD))
- base_addr[CyCOR5] |= CyDSR | CyCTS | CyDCD;
- }
- info->cor3 = (cflag & CSTOPB) ? Cy_2_STOP : Cy_1_STOP;
- info->cor2 = CyETC;
- switch (cflag & CSIZE) {
- case CS5:
- info->cor1 = Cy_5_BITS;
- break;
- case CS6:
- info->cor1 = Cy_6_BITS;
- break;
- case CS7:
- info->cor1 = Cy_7_BITS;
- break;
- case CS8:
- info->cor1 = Cy_8_BITS;
- break;
- }
- if (cflag & PARENB) {
- if (cflag & PARODD) {
- info->cor1 |= CyPARITY_O;
- } else {
- info->cor1 |= CyPARITY_E;
- }
- } else {
- info->cor1 |= CyPARITY_NONE;
- }
-
- /* CTS flow control flag */
-#if 0
- /* Don't complcate matters for now! RGH 141095 */
- if (cflag & CRTSCTS) {
- info->flags |= ASYNC_CTS_FLOW;
- info->cor2 |= CyCtsAE;
- } else {
- info->flags &= ~ASYNC_CTS_FLOW;
- info->cor2 &= ~CyCtsAE;
- }
-#endif
- if (cflag & CLOCAL)
- info->flags &= ~ASYNC_CHECK_CD;
- else
- info->flags |= ASYNC_CHECK_CD;
-
- /***********************************************
- The hardware option, CyRtsAO, presents RTS when
- the chip has characters to send. Since most modems
- use RTS as reverse (inbound) flow control, this
- option is not used. If inbound flow control is
- necessary, DTR can be programmed to provide the
- appropriate signals for use with a non-standard
- cable. Contact Marcio Saito for details.
- ***********************************************/
-
- channel = info->line;
-
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) channel;
-
- /* CyCMR set once only in mvme167_init_serial() */
- if (base_addr[CyLICR] != channel << 2)
- base_addr[CyLICR] = channel << 2;
- if (base_addr[CyLIVR] != 0x5c)
- base_addr[CyLIVR] = 0x5c;
-
- /* tx and rx baud rate */
-
- if (base_addr[CyCOR1] != info->cor1)
- need_init_chan = 1;
- if (base_addr[CyTCOR] != info->tco)
- base_addr[CyTCOR] = info->tco;
- if (base_addr[CyTBPR] != info->tbpr)
- base_addr[CyTBPR] = info->tbpr;
- if (base_addr[CyRCOR] != info->rco)
- base_addr[CyRCOR] = info->rco;
- if (base_addr[CyRBPR] != info->rbpr)
- base_addr[CyRBPR] = info->rbpr;
-
- /* set line characteristics according configuration */
-
- if (base_addr[CySCHR1] != START_CHAR(info->tty))
- base_addr[CySCHR1] = START_CHAR(info->tty);
- if (base_addr[CySCHR2] != STOP_CHAR(info->tty))
- base_addr[CySCHR2] = STOP_CHAR(info->tty);
- if (base_addr[CySCRL] != START_CHAR(info->tty))
- base_addr[CySCRL] = START_CHAR(info->tty);
- if (base_addr[CySCRH] != START_CHAR(info->tty))
- base_addr[CySCRH] = START_CHAR(info->tty);
- if (base_addr[CyCOR1] != info->cor1)
- base_addr[CyCOR1] = info->cor1;
- if (base_addr[CyCOR2] != info->cor2)
- base_addr[CyCOR2] = info->cor2;
- if (base_addr[CyCOR3] != info->cor3)
- base_addr[CyCOR3] = info->cor3;
- if (base_addr[CyCOR4] != info->cor4)
- base_addr[CyCOR4] = info->cor4;
- if (base_addr[CyCOR5] != info->cor5)
- base_addr[CyCOR5] = info->cor5;
- if (base_addr[CyCOR6] != info->cor6)
- base_addr[CyCOR6] = info->cor6;
- if (base_addr[CyCOR7] != info->cor7)
- base_addr[CyCOR7] = info->cor7;
-
- if (need_init_chan)
- write_cy_cmd(base_addr, CyINIT_CHAN);
-
- base_addr[CyCAR] = (u_char) channel; /* !!! Is this needed? */
-
- /* 2ms default rx timeout */
- ti = info->default_timeout ? info->default_timeout : 0x02;
- if (base_addr[CyRTPRL] != ti)
- base_addr[CyRTPRL] = ti;
- if (base_addr[CyRTPRH] != 0)
- base_addr[CyRTPRH] = 0;
-
- /* Set up RTS here also ????? RGH 141095 */
- if (i == 0) { /* baud rate is zero, turn off line */
- if ((base_addr[CyMSVR2] & CyDTR) == CyDTR)
- base_addr[CyMSVR2] = 0;
-#ifdef SERIAL_DEBUG_DTR
- printk("cyc: %d: dropping DTR\n", __LINE__);
- printk(" status: 0x%x, 0x%x\n", base_addr[CyMSVR1],
- base_addr[CyMSVR2]);
-#endif
- } else {
- if ((base_addr[CyMSVR2] & CyDTR) != CyDTR)
- base_addr[CyMSVR2] = CyDTR;
-#ifdef SERIAL_DEBUG_DTR
- printk("cyc: %d: raising DTR\n", __LINE__);
- printk(" status: 0x%x, 0x%x\n", base_addr[CyMSVR1],
- base_addr[CyMSVR2]);
-#endif
- }
-
- if (info->tty) {
- clear_bit(TTY_IO_ERROR, &info->tty->flags);
- }
-
- local_irq_restore(flags);
-
-} /* config_setup */
-
-static int cy_put_char(struct tty_struct *tty, unsigned char ch)
-{
- struct cyclades_port *info = tty->driver_data;
- unsigned long flags;
-
-#ifdef SERIAL_DEBUG_IO
- printk("cy_put_char %s(0x%02x)\n", tty->name, ch);
-#endif
-
- if (serial_paranoia_check(info, tty->name, "cy_put_char"))
- return 0;
-
- if (!info->xmit_buf)
- return 0;
-
- local_irq_save(flags);
- if (info->xmit_cnt >= PAGE_SIZE - 1) {
- local_irq_restore(flags);
- return 0;
- }
-
- info->xmit_buf[info->xmit_head++] = ch;
- info->xmit_head &= PAGE_SIZE - 1;
- info->xmit_cnt++;
- local_irq_restore(flags);
- return 1;
-} /* cy_put_char */
-
-static void cy_flush_chars(struct tty_struct *tty)
-{
- struct cyclades_port *info = tty->driver_data;
- unsigned long flags;
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int channel;
-
-#ifdef SERIAL_DEBUG_IO
- printk("cy_flush_chars %s\n", tty->name); /* */
-#endif
-
- if (serial_paranoia_check(info, tty->name, "cy_flush_chars"))
- return;
-
- if (info->xmit_cnt <= 0 || tty->stopped
- || tty->hw_stopped || !info->xmit_buf)
- return;
-
- channel = info->line;
-
- local_irq_save(flags);
- base_addr[CyCAR] = channel;
- base_addr[CyIER] |= CyTxMpty;
- local_irq_restore(flags);
-} /* cy_flush_chars */
-
-/* This routine gets called when tty_write has put something into
- the write_queue. If the port is not already transmitting stuff,
- start it off by enabling interrupts. The interrupt service
- routine will then ensure that the characters are sent. If the
- port is already active, there is no need to kick it.
- */
-static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- struct cyclades_port *info = tty->driver_data;
- unsigned long flags;
- int c, total = 0;
-
-#ifdef SERIAL_DEBUG_IO
- printk("cy_write %s\n", tty->name); /* */
-#endif
-
- if (serial_paranoia_check(info, tty->name, "cy_write")) {
- return 0;
- }
-
- if (!info->xmit_buf) {
- return 0;
- }
-
- while (1) {
- local_irq_save(flags);
- c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
- if (c <= 0) {
- local_irq_restore(flags);
- break;
- }
-
- memcpy(info->xmit_buf + info->xmit_head, buf, c);
- info->xmit_head =
- (info->xmit_head + c) & (SERIAL_XMIT_SIZE - 1);
- info->xmit_cnt += c;
- local_irq_restore(flags);
-
- buf += c;
- count -= c;
- total += c;
- }
-
- if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
- start_xmit(info);
- }
- return total;
-} /* cy_write */
-
-static int cy_write_room(struct tty_struct *tty)
-{
- struct cyclades_port *info = tty->driver_data;
- int ret;
-
-#ifdef SERIAL_DEBUG_IO
- printk("cy_write_room %s\n", tty->name); /* */
-#endif
-
- if (serial_paranoia_check(info, tty->name, "cy_write_room"))
- return 0;
- ret = PAGE_SIZE - info->xmit_cnt - 1;
- if (ret < 0)
- ret = 0;
- return ret;
-} /* cy_write_room */
-
-static int cy_chars_in_buffer(struct tty_struct *tty)
-{
- struct cyclades_port *info = tty->driver_data;
-
-#ifdef SERIAL_DEBUG_IO
- printk("cy_chars_in_buffer %s %d\n", tty->name, info->xmit_cnt); /* */
-#endif
-
- if (serial_paranoia_check(info, tty->name, "cy_chars_in_buffer"))
- return 0;
-
- return info->xmit_cnt;
-} /* cy_chars_in_buffer */
-
-static void cy_flush_buffer(struct tty_struct *tty)
-{
- struct cyclades_port *info = tty->driver_data;
- unsigned long flags;
-
-#ifdef SERIAL_DEBUG_IO
- printk("cy_flush_buffer %s\n", tty->name); /* */
-#endif
-
- if (serial_paranoia_check(info, tty->name, "cy_flush_buffer"))
- return;
- local_irq_save(flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
- local_irq_restore(flags);
- tty_wakeup(tty);
-} /* cy_flush_buffer */
-
-/* This routine is called by the upper-layer tty layer to signal
- that incoming characters should be throttled or that the
- throttle should be released.
- */
-static void cy_throttle(struct tty_struct *tty)
-{
- struct cyclades_port *info = tty->driver_data;
- unsigned long flags;
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int channel;
-
-#ifdef SERIAL_DEBUG_THROTTLE
- char buf[64];
-
- printk("throttle %s: %d....\n", tty_name(tty, buf),
- tty->ldisc.chars_in_buffer(tty));
- printk("cy_throttle %s\n", tty->name);
-#endif
-
- if (serial_paranoia_check(info, tty->name, "cy_nthrottle")) {
- return;
- }
-
- if (I_IXOFF(tty)) {
- info->x_char = STOP_CHAR(tty);
- /* Should use the "Send Special Character" feature!!! */
- }
-
- channel = info->line;
-
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) channel;
- base_addr[CyMSVR1] = 0;
- local_irq_restore(flags);
-} /* cy_throttle */
-
-static void cy_unthrottle(struct tty_struct *tty)
-{
- struct cyclades_port *info = tty->driver_data;
- unsigned long flags;
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int channel;
-
-#ifdef SERIAL_DEBUG_THROTTLE
- char buf[64];
-
- printk("throttle %s: %d....\n", tty_name(tty, buf),
- tty->ldisc.chars_in_buffer(tty));
- printk("cy_unthrottle %s\n", tty->name);
-#endif
-
- if (serial_paranoia_check(info, tty->name, "cy_nthrottle")) {
- return;
- }
-
- if (I_IXOFF(tty)) {
- info->x_char = START_CHAR(tty);
- /* Should use the "Send Special Character" feature!!! */
- }
-
- channel = info->line;
-
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) channel;
- base_addr[CyMSVR1] = CyRTS;
- local_irq_restore(flags);
-} /* cy_unthrottle */
-
-static int
-get_serial_info(struct cyclades_port *info,
- struct serial_struct __user * retinfo)
-{
- struct serial_struct tmp;
-
-/* CP('g'); */
- if (!retinfo)
- return -EFAULT;
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = info->type;
- tmp.line = info->line;
- tmp.port = info->line;
- tmp.irq = 0;
- tmp.flags = info->flags;
- tmp.baud_base = 0; /*!!! */
- tmp.close_delay = info->close_delay;
- tmp.custom_divisor = 0; /*!!! */
- tmp.hub6 = 0; /*!!! */
- return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
-} /* get_serial_info */
-
-static int
-set_serial_info(struct cyclades_port *info,
- struct serial_struct __user * new_info)
-{
- struct serial_struct new_serial;
- struct cyclades_port old_info;
-
-/* CP('s'); */
- if (!new_info)
- return -EFAULT;
- if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
- return -EFAULT;
- old_info = *info;
-
- if (!capable(CAP_SYS_ADMIN)) {
- if ((new_serial.close_delay != info->close_delay) ||
- ((new_serial.flags & ASYNC_FLAGS & ~ASYNC_USR_MASK) !=
- (info->flags & ASYNC_FLAGS & ~ASYNC_USR_MASK)))
- return -EPERM;
- info->flags = ((info->flags & ~ASYNC_USR_MASK) |
- (new_serial.flags & ASYNC_USR_MASK));
- goto check_and_exit;
- }
-
- /*
- * OK, past this point, all the error checking has been done.
- * At this point, we start making changes.....
- */
-
- info->flags = ((info->flags & ~ASYNC_FLAGS) |
- (new_serial.flags & ASYNC_FLAGS));
- info->close_delay = new_serial.close_delay;
-
-check_and_exit:
- if (info->flags & ASYNC_INITIALIZED) {
- config_setup(info);
- return 0;
- }
- return startup(info);
-} /* set_serial_info */
-
-static int cy_tiocmget(struct tty_struct *tty)
-{
- struct cyclades_port *info = tty->driver_data;
- int channel;
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- unsigned long flags;
- unsigned char status;
-
- channel = info->line;
-
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) channel;
- status = base_addr[CyMSVR1] | base_addr[CyMSVR2];
- local_irq_restore(flags);
-
- return ((status & CyRTS) ? TIOCM_RTS : 0)
- | ((status & CyDTR) ? TIOCM_DTR : 0)
- | ((status & CyDCD) ? TIOCM_CAR : 0)
- | ((status & CyDSR) ? TIOCM_DSR : 0)
- | ((status & CyCTS) ? TIOCM_CTS : 0);
-} /* cy_tiocmget */
-
-static int
-cy_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
-{
- struct cyclades_port *info = tty->driver_data;
- int channel;
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- unsigned long flags;
-
- channel = info->line;
-
- if (set & TIOCM_RTS) {
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) channel;
- base_addr[CyMSVR1] = CyRTS;
- local_irq_restore(flags);
- }
- if (set & TIOCM_DTR) {
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) channel;
-/* CP('S');CP('2'); */
- base_addr[CyMSVR2] = CyDTR;
-#ifdef SERIAL_DEBUG_DTR
- printk("cyc: %d: raising DTR\n", __LINE__);
- printk(" status: 0x%x, 0x%x\n", base_addr[CyMSVR1],
- base_addr[CyMSVR2]);
-#endif
- local_irq_restore(flags);
- }
-
- if (clear & TIOCM_RTS) {
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) channel;
- base_addr[CyMSVR1] = 0;
- local_irq_restore(flags);
- }
- if (clear & TIOCM_DTR) {
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) channel;
-/* CP('C');CP('2'); */
- base_addr[CyMSVR2] = 0;
-#ifdef SERIAL_DEBUG_DTR
- printk("cyc: %d: dropping DTR\n", __LINE__);
- printk(" status: 0x%x, 0x%x\n", base_addr[CyMSVR1],
- base_addr[CyMSVR2]);
-#endif
- local_irq_restore(flags);
- }
-
- return 0;
-} /* set_modem_info */
-
-static void send_break(struct cyclades_port *info, int duration)
-{ /* Let the transmit ISR take care of this (since it
- requires stuffing characters into the output stream).
- */
- info->x_break = duration;
- if (!info->xmit_cnt) {
- start_xmit(info);
- }
-} /* send_break */
-
-static int
-get_mon_info(struct cyclades_port *info, struct cyclades_monitor __user * mon)
-{
-
- if (copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor)))
- return -EFAULT;
- info->mon.int_count = 0;
- info->mon.char_count = 0;
- info->mon.char_max = 0;
- info->mon.char_last = 0;
- return 0;
-}
-
-static int set_threshold(struct cyclades_port *info, unsigned long __user * arg)
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- unsigned long value;
- int channel;
-
- if (get_user(value, arg))
- return -EFAULT;
-
- channel = info->line;
- info->cor4 &= ~CyREC_FIFO;
- info->cor4 |= value & CyREC_FIFO;
- base_addr[CyCOR4] = info->cor4;
- return 0;
-}
-
-static int
-get_threshold(struct cyclades_port *info, unsigned long __user * value)
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int channel;
- unsigned long tmp;
-
- channel = info->line;
-
- tmp = base_addr[CyCOR4] & CyREC_FIFO;
- return put_user(tmp, value);
-}
-
-static int
-set_default_threshold(struct cyclades_port *info, unsigned long __user * arg)
-{
- unsigned long value;
-
- if (get_user(value, arg))
- return -EFAULT;
-
- info->default_threshold = value & 0x0f;
- return 0;
-}
-
-static int
-get_default_threshold(struct cyclades_port *info, unsigned long __user * value)
-{
- return put_user(info->default_threshold, value);
-}
-
-static int set_timeout(struct cyclades_port *info, unsigned long __user * arg)
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int channel;
- unsigned long value;
-
- if (get_user(value, arg))
- return -EFAULT;
-
- channel = info->line;
-
- base_addr[CyRTPRL] = value & 0xff;
- base_addr[CyRTPRH] = (value >> 8) & 0xff;
- return 0;
-}
-
-static int get_timeout(struct cyclades_port *info, unsigned long __user * value)
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int channel;
- unsigned long tmp;
-
- channel = info->line;
-
- tmp = base_addr[CyRTPRL];
- return put_user(tmp, value);
-}
-
-static int set_default_timeout(struct cyclades_port *info, unsigned long value)
-{
- info->default_timeout = value & 0xff;
- return 0;
-}
-
-static int
-get_default_timeout(struct cyclades_port *info, unsigned long __user * value)
-{
- return put_user(info->default_timeout, value);
-}
-
-static int
-cy_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct cyclades_port *info = tty->driver_data;
- int ret_val = 0;
- void __user *argp = (void __user *)arg;
-
-#ifdef SERIAL_DEBUG_OTHER
- printk("cy_ioctl %s, cmd = %x arg = %lx\n", tty->name, cmd, arg); /* */
-#endif
-
- tty_lock();
-
- switch (cmd) {
- case CYGETMON:
- ret_val = get_mon_info(info, argp);
- break;
- case CYGETTHRESH:
- ret_val = get_threshold(info, argp);
- break;
- case CYSETTHRESH:
- ret_val = set_threshold(info, argp);
- break;
- case CYGETDEFTHRESH:
- ret_val = get_default_threshold(info, argp);
- break;
- case CYSETDEFTHRESH:
- ret_val = set_default_threshold(info, argp);
- break;
- case CYGETTIMEOUT:
- ret_val = get_timeout(info, argp);
- break;
- case CYSETTIMEOUT:
- ret_val = set_timeout(info, argp);
- break;
- case CYGETDEFTIMEOUT:
- ret_val = get_default_timeout(info, argp);
- break;
- case CYSETDEFTIMEOUT:
- ret_val = set_default_timeout(info, (unsigned long)arg);
- break;
- case TCSBRK: /* SVID version: non-zero arg --> no break */
- ret_val = tty_check_change(tty);
- if (ret_val)
- break;
- tty_wait_until_sent(tty, 0);
- if (!arg)
- send_break(info, HZ / 4); /* 1/4 second */
- break;
- case TCSBRKP: /* support for POSIX tcsendbreak() */
- ret_val = tty_check_change(tty);
- if (ret_val)
- break;
- tty_wait_until_sent(tty, 0);
- send_break(info, arg ? arg * (HZ / 10) : HZ / 4);
- break;
-
-/* The following commands are incompletely implemented!!! */
- case TIOCGSERIAL:
- ret_val = get_serial_info(info, argp);
- break;
- case TIOCSSERIAL:
- ret_val = set_serial_info(info, argp);
- break;
- default:
- ret_val = -ENOIOCTLCMD;
- }
- tty_unlock();
-
-#ifdef SERIAL_DEBUG_OTHER
- printk("cy_ioctl done\n");
-#endif
-
- return ret_val;
-} /* cy_ioctl */
-
-static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- struct cyclades_port *info = tty->driver_data;
-
-#ifdef SERIAL_DEBUG_OTHER
- printk("cy_set_termios %s\n", tty->name);
-#endif
-
- if (tty->termios->c_cflag == old_termios->c_cflag)
- return;
- config_setup(info);
-
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios->c_cflag & CRTSCTS)) {
- tty->stopped = 0;
- cy_start(tty);
- }
-#ifdef tytso_patch_94Nov25_1726
- if (!(old_termios->c_cflag & CLOCAL) &&
- (tty->termios->c_cflag & CLOCAL))
- wake_up_interruptible(&info->open_wait);
-#endif
-} /* cy_set_termios */
-
-static void cy_close(struct tty_struct *tty, struct file *filp)
-{
- struct cyclades_port *info = tty->driver_data;
-
-/* CP('C'); */
-#ifdef SERIAL_DEBUG_OTHER
- printk("cy_close %s\n", tty->name);
-#endif
-
- if (!info || serial_paranoia_check(info, tty->name, "cy_close")) {
- return;
- }
-#ifdef SERIAL_DEBUG_OPEN
- printk("cy_close %s, count = %d\n", tty->name, info->count);
-#endif
-
- if ((tty->count == 1) && (info->count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. Info->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk("cy_close: bad serial port count; tty->count is 1, "
- "info->count is %d\n", info->count);
- info->count = 1;
- }
-#ifdef SERIAL_DEBUG_COUNT
- printk("cyc: %d: decrementing count to %d\n", __LINE__,
- info->count - 1);
-#endif
- if (--info->count < 0) {
- printk("cy_close: bad serial port count for ttys%d: %d\n",
- info->line, info->count);
-#ifdef SERIAL_DEBUG_COUNT
- printk("cyc: %d: setting count to 0\n", __LINE__);
-#endif
- info->count = 0;
- }
- if (info->count)
- return;
- info->flags |= ASYNC_CLOSING;
- if (info->flags & ASYNC_INITIALIZED)
- tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */
- shutdown(info);
- cy_flush_buffer(tty);
- tty_ldisc_flush(tty);
- info->tty = NULL;
- if (info->blocked_open) {
- if (info->close_delay) {
- msleep_interruptible(jiffies_to_msecs
- (info->close_delay));
- }
- wake_up_interruptible(&info->open_wait);
- }
- info->flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING);
- wake_up_interruptible(&info->close_wait);
-
-#ifdef SERIAL_DEBUG_OTHER
- printk("cy_close done\n");
-#endif
-} /* cy_close */
-
-/*
- * cy_hangup() --- called by tty_hangup() when a hangup is signaled.
- */
-void cy_hangup(struct tty_struct *tty)
-{
- struct cyclades_port *info = tty->driver_data;
-
-#ifdef SERIAL_DEBUG_OTHER
- printk("cy_hangup %s\n", tty->name); /* */
-#endif
-
- if (serial_paranoia_check(info, tty->name, "cy_hangup"))
- return;
-
- shutdown(info);
-#if 0
- info->event = 0;
- info->count = 0;
-#ifdef SERIAL_DEBUG_COUNT
- printk("cyc: %d: setting count to 0\n", __LINE__);
-#endif
- info->tty = 0;
-#endif
- info->flags &= ~ASYNC_NORMAL_ACTIVE;
- wake_up_interruptible(&info->open_wait);
-} /* cy_hangup */
-
-/*
- * ------------------------------------------------------------
- * cy_open() and friends
- * ------------------------------------------------------------
- */
-
-static int
-block_til_ready(struct tty_struct *tty, struct file *filp,
- struct cyclades_port *info)
-{
- DECLARE_WAITQUEUE(wait, current);
- unsigned long flags;
- int channel;
- int retval;
- volatile u_char *base_addr = (u_char *) BASE_ADDR;
-
- /*
- * If the device is in the middle of being closed, then block
- * until it's done, and then try again.
- */
- if (info->flags & ASYNC_CLOSING) {
- interruptible_sleep_on(&info->close_wait);
- if (info->flags & ASYNC_HUP_NOTIFY) {
- return -EAGAIN;
- } else {
- return -ERESTARTSYS;
- }
- }
-
- /*
- * If non-blocking mode is set, then make the check up front
- * and then exit.
- */
- if (filp->f_flags & O_NONBLOCK) {
- info->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
- }
-
- /*
- * Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, info->count is dropped by one, so that
- * cy_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal.
- */
- retval = 0;
- add_wait_queue(&info->open_wait, &wait);
-#ifdef SERIAL_DEBUG_OPEN
- printk("block_til_ready before block: %s, count = %d\n",
- tty->name, info->count);
- /**/
-#endif
- info->count--;
-#ifdef SERIAL_DEBUG_COUNT
- printk("cyc: %d: decrementing count to %d\n", __LINE__, info->count);
-#endif
- info->blocked_open++;
-
- channel = info->line;
-
- while (1) {
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) channel;
- base_addr[CyMSVR1] = CyRTS;
-/* CP('S');CP('4'); */
- base_addr[CyMSVR2] = CyDTR;
-#ifdef SERIAL_DEBUG_DTR
- printk("cyc: %d: raising DTR\n", __LINE__);
- printk(" status: 0x%x, 0x%x\n", base_addr[CyMSVR1],
- base_addr[CyMSVR2]);
-#endif
- local_irq_restore(flags);
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp)
- || !(info->flags & ASYNC_INITIALIZED)) {
- if (info->flags & ASYNC_HUP_NOTIFY) {
- retval = -EAGAIN;
- } else {
- retval = -ERESTARTSYS;
- }
- break;
- }
- local_irq_save(flags);
- base_addr[CyCAR] = (u_char) channel;
-/* CP('L');CP1(1 && C_CLOCAL(tty)); CP1(1 && (base_addr[CyMSVR1] & CyDCD) ); */
- if (!(info->flags & ASYNC_CLOSING)
- && (C_CLOCAL(tty)
- || (base_addr[CyMSVR1] & CyDCD))) {
- local_irq_restore(flags);
- break;
- }
- local_irq_restore(flags);
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
-#ifdef SERIAL_DEBUG_OPEN
- printk("block_til_ready blocking: %s, count = %d\n",
- tty->name, info->count);
- /**/
-#endif
- tty_unlock();
- schedule();
- tty_lock();
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&info->open_wait, &wait);
- if (!tty_hung_up_p(filp)) {
- info->count++;
-#ifdef SERIAL_DEBUG_COUNT
- printk("cyc: %d: incrementing count to %d\n", __LINE__,
- info->count);
-#endif
- }
- info->blocked_open--;
-#ifdef SERIAL_DEBUG_OPEN
- printk("block_til_ready after blocking: %s, count = %d\n",
- tty->name, info->count);
- /**/
-#endif
- if (retval)
- return retval;
- info->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
-} /* block_til_ready */
-
-/*
- * This routine is called whenever a serial port is opened. It
- * performs the serial-specific initialization for the tty structure.
- */
-int cy_open(struct tty_struct *tty, struct file *filp)
-{
- struct cyclades_port *info;
- int retval, line;
-
-/* CP('O'); */
- line = tty->index;
- if ((line < 0) || (NR_PORTS <= line)) {
- return -ENODEV;
- }
- info = &cy_port[line];
- if (info->line < 0) {
- return -ENODEV;
- }
-#ifdef SERIAL_DEBUG_OTHER
- printk("cy_open %s\n", tty->name); /* */
-#endif
- if (serial_paranoia_check(info, tty->name, "cy_open")) {
- return -ENODEV;
- }
-#ifdef SERIAL_DEBUG_OPEN
- printk("cy_open %s, count = %d\n", tty->name, info->count);
- /**/
-#endif
- info->count++;
-#ifdef SERIAL_DEBUG_COUNT
- printk("cyc: %d: incrementing count to %d\n", __LINE__, info->count);
-#endif
- tty->driver_data = info;
- info->tty = tty;
-
- /*
- * Start up serial port
- */
- retval = startup(info);
- if (retval) {
- return retval;
- }
-
- retval = block_til_ready(tty, filp, info);
- if (retval) {
-#ifdef SERIAL_DEBUG_OPEN
- printk("cy_open returning after block_til_ready with %d\n",
- retval);
-#endif
- return retval;
- }
-#ifdef SERIAL_DEBUG_OPEN
- printk("cy_open done\n");
- /**/
-#endif
- return 0;
-} /* cy_open */
-
-/*
- * ---------------------------------------------------------------------
- * serial167_init() and friends
- *
- * serial167_init() is called at boot-time to initialize the serial driver.
- * ---------------------------------------------------------------------
- */
-
-/*
- * This routine prints out the appropriate serial driver version
- * number, and identifies which options were configured into this
- * driver.
- */
-static void show_version(void)
-{
- printk("MVME166/167 cd2401 driver\n");
-} /* show_version */
-
-/* initialize chips on card -- return number of valid
- chips (which is number of ports/4) */
-
-/*
- * This initialises the hardware to a reasonable state. It should
- * probe the chip first so as to copy 166-Bug setup as a default for
- * port 0. It initialises CMR to CyASYNC; that is never done again, so
- * as to limit the number of CyINIT_CHAN commands in normal running.
- *
- * ... I wonder what I should do if this fails ...
- */
-
-void mvme167_serial_console_setup(int cflag)
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int ch;
- u_char spd;
- u_char rcor, rbpr, badspeed = 0;
- unsigned long flags;
-
- local_irq_save(flags);
-
- /*
- * First probe channel zero of the chip, to see what speed has
- * been selected.
- */
-
- base_addr[CyCAR] = 0;
-
- rcor = base_addr[CyRCOR] << 5;
- rbpr = base_addr[CyRBPR];
-
- for (spd = 0; spd < sizeof(baud_bpr); spd++)
- if (rbpr == baud_bpr[spd] && rcor == baud_co[spd])
- break;
- if (spd >= sizeof(baud_bpr)) {
- spd = 14; /* 19200 */
- badspeed = 1; /* Failed to identify speed */
- }
- initial_console_speed = spd;
-
- /* OK, we have chosen a speed, now reset and reinitialise */
-
- my_udelay(20000L); /* Allow time for any active o/p to complete */
- if (base_addr[CyCCR] != 0x00) {
- local_irq_restore(flags);
- /* printk(" chip is never idle (CCR != 0)\n"); */
- return;
- }
-
- base_addr[CyCCR] = CyCHIP_RESET; /* Reset the chip */
- my_udelay(1000L);
-
- if (base_addr[CyGFRCR] == 0x00) {
- local_irq_restore(flags);
- /* printk(" chip is not responding (GFRCR stayed 0)\n"); */
- return;
- }
-
- /*
- * System clock is 20Mhz, divided by 2048, so divide by 10 for a 1.0ms
- * tick
- */
-
- base_addr[CyTPR] = 10;
-
- base_addr[CyPILR1] = 0x01; /* Interrupt level for modem change */
- base_addr[CyPILR2] = 0x02; /* Interrupt level for tx ints */
- base_addr[CyPILR3] = 0x03; /* Interrupt level for rx ints */
-
- /*
- * Attempt to set up all channels to something reasonable, and
- * bang out a INIT_CHAN command. We should then be able to limit
- * the amount of fiddling we have to do in normal running.
- */
-
- for (ch = 3; ch >= 0; ch--) {
- base_addr[CyCAR] = (u_char) ch;
- base_addr[CyIER] = 0;
- base_addr[CyCMR] = CyASYNC;
- base_addr[CyLICR] = (u_char) ch << 2;
- base_addr[CyLIVR] = 0x5c;
- base_addr[CyTCOR] = baud_co[spd];
- base_addr[CyTBPR] = baud_bpr[spd];
- base_addr[CyRCOR] = baud_co[spd] >> 5;
- base_addr[CyRBPR] = baud_bpr[spd];
- base_addr[CySCHR1] = 'Q' & 0x1f;
- base_addr[CySCHR2] = 'X' & 0x1f;
- base_addr[CySCRL] = 0;
- base_addr[CySCRH] = 0;
- base_addr[CyCOR1] = Cy_8_BITS | CyPARITY_NONE;
- base_addr[CyCOR2] = 0;
- base_addr[CyCOR3] = Cy_1_STOP;
- base_addr[CyCOR4] = baud_cor4[spd];
- base_addr[CyCOR5] = 0;
- base_addr[CyCOR6] = 0;
- base_addr[CyCOR7] = 0;
- base_addr[CyRTPRL] = 2;
- base_addr[CyRTPRH] = 0;
- base_addr[CyMSVR1] = 0;
- base_addr[CyMSVR2] = 0;
- write_cy_cmd(base_addr, CyINIT_CHAN | CyDIS_RCVR | CyDIS_XMTR);
- }
-
- /*
- * Now do specials for channel zero....
- */
-
- base_addr[CyMSVR1] = CyRTS;
- base_addr[CyMSVR2] = CyDTR;
- base_addr[CyIER] = CyRxData;
- write_cy_cmd(base_addr, CyENB_RCVR | CyENB_XMTR);
-
- local_irq_restore(flags);
-
- my_udelay(20000L); /* Let it all settle down */
-
- printk("CD2401 initialised, chip is rev 0x%02x\n", base_addr[CyGFRCR]);
- if (badspeed)
- printk
- (" WARNING: Failed to identify line speed, rcor=%02x,rbpr=%02x\n",
- rcor >> 5, rbpr);
-} /* serial_console_init */
-
-static const struct tty_operations cy_ops = {
- .open = cy_open,
- .close = cy_close,
- .write = cy_write,
- .put_char = cy_put_char,
- .flush_chars = cy_flush_chars,
- .write_room = cy_write_room,
- .chars_in_buffer = cy_chars_in_buffer,
- .flush_buffer = cy_flush_buffer,
- .ioctl = cy_ioctl,
- .throttle = cy_throttle,
- .unthrottle = cy_unthrottle,
- .set_termios = cy_set_termios,
- .stop = cy_stop,
- .start = cy_start,
- .hangup = cy_hangup,
- .tiocmget = cy_tiocmget,
- .tiocmset = cy_tiocmset,
-};
-
-/* The serial driver boot-time initialization code!
- Hardware I/O ports are mapped to character special devices on a
- first found, first allocated manner. That is, this code searches
- for Cyclom cards in the system. As each is found, it is probed
- to discover how many chips (and thus how many ports) are present.
- These ports are mapped to the tty ports 64 and upward in monotonic
- fashion. If an 8-port card is replaced with a 16-port card, the
- port mapping on a following card will shift.
-
- This approach is different from what is used in the other serial
- device driver because the Cyclom is more properly a multiplexer,
- not just an aggregation of serial ports on one card.
-
- If there are more cards with more ports than have been statically
- allocated above, a warning is printed and the extra ports are ignored.
- */
-static int __init serial167_init(void)
-{
- struct cyclades_port *info;
- int ret = 0;
- int good_ports = 0;
- int port_num = 0;
- int index;
- int DefSpeed;
-#ifdef notyet
- struct sigaction sa;
-#endif
-
- if (!(mvme16x_config & MVME16x_CONFIG_GOT_CD2401))
- return 0;
-
- cy_serial_driver = alloc_tty_driver(NR_PORTS);
- if (!cy_serial_driver)
- return -ENOMEM;
-
-#if 0
- scrn[1] = '\0';
-#endif
-
- show_version();
-
- /* Has "console=0,9600n8" been used in bootinfo to change speed? */
- if (serial_console_cflag)
- DefSpeed = serial_console_cflag & 0017;
- else {
- DefSpeed = initial_console_speed;
- serial_console_info = &cy_port[0];
- serial_console_cflag = DefSpeed | CS8;
-#if 0
- serial_console = 64; /*callout_driver.minor_start */
-#endif
- }
-
- /* Initialize the tty_driver structure */
-
- cy_serial_driver->owner = THIS_MODULE;
- cy_serial_driver->name = "ttyS";
- cy_serial_driver->major = TTY_MAJOR;
- cy_serial_driver->minor_start = 64;
- cy_serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
- cy_serial_driver->subtype = SERIAL_TYPE_NORMAL;
- cy_serial_driver->init_termios = tty_std_termios;
- cy_serial_driver->init_termios.c_cflag =
- B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- cy_serial_driver->flags = TTY_DRIVER_REAL_RAW;
- tty_set_operations(cy_serial_driver, &cy_ops);
-
- ret = tty_register_driver(cy_serial_driver);
- if (ret) {
- printk(KERN_ERR "Couldn't register MVME166/7 serial driver\n");
- put_tty_driver(cy_serial_driver);
- return ret;
- }
-
- port_num = 0;
- info = cy_port;
- for (index = 0; index < 1; index++) {
-
- good_ports = 4;
-
- if (port_num < NR_PORTS) {
- while (good_ports-- && port_num < NR_PORTS) {
- /*** initialize port ***/
- info->magic = CYCLADES_MAGIC;
- info->type = PORT_CIRRUS;
- info->card = index;
- info->line = port_num;
- info->flags = STD_COM_FLAGS;
- info->tty = NULL;
- info->xmit_fifo_size = 12;
- info->cor1 = CyPARITY_NONE | Cy_8_BITS;
- info->cor2 = CyETC;
- info->cor3 = Cy_1_STOP;
- info->cor4 = 0x08; /* _very_ small receive threshold */
- info->cor5 = 0;
- info->cor6 = 0;
- info->cor7 = 0;
- info->tbpr = baud_bpr[DefSpeed]; /* Tx BPR */
- info->tco = baud_co[DefSpeed]; /* Tx CO */
- info->rbpr = baud_bpr[DefSpeed]; /* Rx BPR */
- info->rco = baud_co[DefSpeed] >> 5; /* Rx CO */
- info->close_delay = 0;
- info->x_char = 0;
- info->count = 0;
-#ifdef SERIAL_DEBUG_COUNT
- printk("cyc: %d: setting count to 0\n",
- __LINE__);
-#endif
- info->blocked_open = 0;
- info->default_threshold = 0;
- info->default_timeout = 0;
- init_waitqueue_head(&info->open_wait);
- init_waitqueue_head(&info->close_wait);
- /* info->session */
- /* info->pgrp */
-/*** !!!!!!!! this may expose new bugs !!!!!!!!! *********/
- info->read_status_mask =
- CyTIMEOUT | CySPECHAR | CyBREAK | CyPARITY |
- CyFRAME | CyOVERRUN;
- /* info->timeout */
-
- printk("ttyS%d ", info->line);
- port_num++;
- info++;
- if (!(port_num & 7)) {
- printk("\n ");
- }
- }
- }
- printk("\n");
- }
- while (port_num < NR_PORTS) {
- info->line = -1;
- port_num++;
- info++;
- }
-
- ret = request_irq(MVME167_IRQ_SER_ERR, cd2401_rxerr_interrupt, 0,
- "cd2401_errors", cd2401_rxerr_interrupt);
- if (ret) {
- printk(KERN_ERR "Could't get cd2401_errors IRQ");
- goto cleanup_serial_driver;
- }
-
- ret = request_irq(MVME167_IRQ_SER_MODEM, cd2401_modem_interrupt, 0,
- "cd2401_modem", cd2401_modem_interrupt);
- if (ret) {
- printk(KERN_ERR "Could't get cd2401_modem IRQ");
- goto cleanup_irq_cd2401_errors;
- }
-
- ret = request_irq(MVME167_IRQ_SER_TX, cd2401_tx_interrupt, 0,
- "cd2401_txints", cd2401_tx_interrupt);
- if (ret) {
- printk(KERN_ERR "Could't get cd2401_txints IRQ");
- goto cleanup_irq_cd2401_modem;
- }
-
- ret = request_irq(MVME167_IRQ_SER_RX, cd2401_rx_interrupt, 0,
- "cd2401_rxints", cd2401_rx_interrupt);
- if (ret) {
- printk(KERN_ERR "Could't get cd2401_rxints IRQ");
- goto cleanup_irq_cd2401_txints;
- }
-
- /* Now we have registered the interrupt handlers, allow the interrupts */
-
- pcc2chip[PccSCCMICR] = 0x15; /* Serial ints are level 5 */
- pcc2chip[PccSCCTICR] = 0x15;
- pcc2chip[PccSCCRICR] = 0x15;
-
- pcc2chip[PccIMLR] = 3; /* Allow PCC2 ints above 3!? */
-
- return 0;
-cleanup_irq_cd2401_txints:
- free_irq(MVME167_IRQ_SER_TX, cd2401_tx_interrupt);
-cleanup_irq_cd2401_modem:
- free_irq(MVME167_IRQ_SER_MODEM, cd2401_modem_interrupt);
-cleanup_irq_cd2401_errors:
- free_irq(MVME167_IRQ_SER_ERR, cd2401_rxerr_interrupt);
-cleanup_serial_driver:
- if (tty_unregister_driver(cy_serial_driver))
- printk(KERN_ERR
- "Couldn't unregister MVME166/7 serial driver\n");
- put_tty_driver(cy_serial_driver);
- return ret;
-} /* serial167_init */
-
-module_init(serial167_init);
-
-#ifdef CYCLOM_SHOW_STATUS
-static void show_status(int line_num)
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- int channel;
- struct cyclades_port *info;
- unsigned long flags;
-
- info = &cy_port[line_num];
- channel = info->line;
- printk(" channel %d\n", channel);
- /**/ printk(" cy_port\n");
- printk(" card line flags = %d %d %x\n",
- info->card, info->line, info->flags);
- printk
- (" *tty read_status_mask timeout xmit_fifo_size = %lx %x %x %x\n",
- (long)info->tty, info->read_status_mask, info->timeout,
- info->xmit_fifo_size);
- printk(" cor1,cor2,cor3,cor4,cor5,cor6,cor7 = %x %x %x %x %x %x %x\n",
- info->cor1, info->cor2, info->cor3, info->cor4, info->cor5,
- info->cor6, info->cor7);
- printk(" tbpr,tco,rbpr,rco = %d %d %d %d\n", info->tbpr, info->tco,
- info->rbpr, info->rco);
- printk(" close_delay event count = %d %d %d\n", info->close_delay,
- info->event, info->count);
- printk(" x_char blocked_open = %x %x\n", info->x_char,
- info->blocked_open);
- printk(" open_wait = %lx %lx %lx\n", (long)info->open_wait);
-
- local_irq_save(flags);
-
-/* Global Registers */
-
- printk(" CyGFRCR %x\n", base_addr[CyGFRCR]);
- printk(" CyCAR %x\n", base_addr[CyCAR]);
- printk(" CyRISR %x\n", base_addr[CyRISR]);
- printk(" CyTISR %x\n", base_addr[CyTISR]);
- printk(" CyMISR %x\n", base_addr[CyMISR]);
- printk(" CyRIR %x\n", base_addr[CyRIR]);
- printk(" CyTIR %x\n", base_addr[CyTIR]);
- printk(" CyMIR %x\n", base_addr[CyMIR]);
- printk(" CyTPR %x\n", base_addr[CyTPR]);
-
- base_addr[CyCAR] = (u_char) channel;
-
-/* Virtual Registers */
-
-#if 0
- printk(" CyRIVR %x\n", base_addr[CyRIVR]);
- printk(" CyTIVR %x\n", base_addr[CyTIVR]);
- printk(" CyMIVR %x\n", base_addr[CyMIVR]);
- printk(" CyMISR %x\n", base_addr[CyMISR]);
-#endif
-
-/* Channel Registers */
-
- printk(" CyCCR %x\n", base_addr[CyCCR]);
- printk(" CyIER %x\n", base_addr[CyIER]);
- printk(" CyCOR1 %x\n", base_addr[CyCOR1]);
- printk(" CyCOR2 %x\n", base_addr[CyCOR2]);
- printk(" CyCOR3 %x\n", base_addr[CyCOR3]);
- printk(" CyCOR4 %x\n", base_addr[CyCOR4]);
- printk(" CyCOR5 %x\n", base_addr[CyCOR5]);
-#if 0
- printk(" CyCCSR %x\n", base_addr[CyCCSR]);
- printk(" CyRDCR %x\n", base_addr[CyRDCR]);
-#endif
- printk(" CySCHR1 %x\n", base_addr[CySCHR1]);
- printk(" CySCHR2 %x\n", base_addr[CySCHR2]);
-#if 0
- printk(" CySCHR3 %x\n", base_addr[CySCHR3]);
- printk(" CySCHR4 %x\n", base_addr[CySCHR4]);
- printk(" CySCRL %x\n", base_addr[CySCRL]);
- printk(" CySCRH %x\n", base_addr[CySCRH]);
- printk(" CyLNC %x\n", base_addr[CyLNC]);
- printk(" CyMCOR1 %x\n", base_addr[CyMCOR1]);
- printk(" CyMCOR2 %x\n", base_addr[CyMCOR2]);
-#endif
- printk(" CyRTPRL %x\n", base_addr[CyRTPRL]);
- printk(" CyRTPRH %x\n", base_addr[CyRTPRH]);
- printk(" CyMSVR1 %x\n", base_addr[CyMSVR1]);
- printk(" CyMSVR2 %x\n", base_addr[CyMSVR2]);
- printk(" CyRBPR %x\n", base_addr[CyRBPR]);
- printk(" CyRCOR %x\n", base_addr[CyRCOR]);
- printk(" CyTBPR %x\n", base_addr[CyTBPR]);
- printk(" CyTCOR %x\n", base_addr[CyTCOR]);
-
- local_irq_restore(flags);
-} /* show_status */
-#endif
-
-#if 0
-/* Dummy routine in mvme16x/config.c for now */
-
-/* Serial console setup. Called from linux/init/main.c */
-
-void console_setup(char *str, int *ints)
-{
- char *s;
- int baud, bits, parity;
- int cflag = 0;
-
- /* Sanity check. */
- if (ints[0] > 3 || ints[1] > 3)
- return;
-
- /* Get baud, bits and parity */
- baud = 2400;
- bits = 8;
- parity = 'n';
- if (ints[2])
- baud = ints[2];
- if ((s = strchr(str, ','))) {
- do {
- s++;
- } while (*s >= '0' && *s <= '9');
- if (*s)
- parity = *s++;
- if (*s)
- bits = *s - '0';
- }
-
- /* Now construct a cflag setting. */
- switch (baud) {
- case 1200:
- cflag |= B1200;
- break;
- case 9600:
- cflag |= B9600;
- break;
- case 19200:
- cflag |= B19200;
- break;
- case 38400:
- cflag |= B38400;
- break;
- case 2400:
- default:
- cflag |= B2400;
- break;
- }
- switch (bits) {
- case 7:
- cflag |= CS7;
- break;
- default:
- case 8:
- cflag |= CS8;
- break;
- }
- switch (parity) {
- case 'o':
- case 'O':
- cflag |= PARODD;
- break;
- case 'e':
- case 'E':
- cflag |= PARENB;
- break;
- }
-
- serial_console_info = &cy_port[ints[1]];
- serial_console_cflag = cflag;
- serial_console = ints[1] + 64; /*callout_driver.minor_start */
-}
-#endif
-
-/*
- * The following is probably out of date for 2.1.x serial console stuff.
- *
- * The console is registered early on from arch/m68k/kernel/setup.c, and
- * it therefore relies on the chip being setup correctly by 166-Bug. This
- * seems reasonable, as the serial port has been used to invoke the system
- * boot. It also means that this function must not rely on any data
- * initialisation performed by serial167_init() etc.
- *
- * Of course, once the console has been registered, we had better ensure
- * that serial167_init() doesn't leave the chip non-functional.
- *
- * The console must be locked when we get here.
- */
-
-void serial167_console_write(struct console *co, const char *str,
- unsigned count)
-{
- volatile unsigned char *base_addr = (u_char *) BASE_ADDR;
- unsigned long flags;
- volatile u_char sink;
- u_char ier;
- int port;
- u_char do_lf = 0;
- int i = 0;
-
- local_irq_save(flags);
-
- /* Ensure transmitter is enabled! */
-
- port = 0;
- base_addr[CyCAR] = (u_char) port;
- while (base_addr[CyCCR])
- ;
- base_addr[CyCCR] = CyENB_XMTR;
-
- ier = base_addr[CyIER];
- base_addr[CyIER] = CyTxMpty;
-
- while (1) {
- if (pcc2chip[PccSCCTICR] & 0x20) {
- /* We have a Tx int. Acknowledge it */
- sink = pcc2chip[PccTPIACKR];
- if ((base_addr[CyLICR] >> 2) == port) {
- if (i == count) {
- /* Last char of string is now output */
- base_addr[CyTEOIR] = CyNOTRANS;
- break;
- }
- if (do_lf) {
- base_addr[CyTDR] = '\n';
- str++;
- i++;
- do_lf = 0;
- } else if (*str == '\n') {
- base_addr[CyTDR] = '\r';
- do_lf = 1;
- } else {
- base_addr[CyTDR] = *str++;
- i++;
- }
- base_addr[CyTEOIR] = 0;
- } else
- base_addr[CyTEOIR] = CyNOTRANS;
- }
- }
-
- base_addr[CyIER] = ier;
-
- local_irq_restore(flags);
-}
-
-static struct tty_driver *serial167_console_device(struct console *c,
- int *index)
-{
- *index = c->index;
- return cy_serial_driver;
-}
-
-static struct console sercons = {
- .name = "ttyS",
- .write = serial167_console_write,
- .device = serial167_console_device,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-static int __init serial167_console_init(void)
-{
- if (vme_brdtype == VME_TYPE_MVME166 ||
- vme_brdtype == VME_TYPE_MVME167 ||
- vme_brdtype == VME_TYPE_MVME177) {
- mvme167_serial_console_setup(0);
- register_console(&sercons);
- }
- return 0;
-}
-
-console_initcall(serial167_console_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/tty/specialix.c b/drivers/staging/tty/specialix.c
deleted file mode 100644
index 5c3598ec745..00000000000
--- a/drivers/staging/tty/specialix.c
+++ /dev/null
@@ -1,2368 +0,0 @@
-/*
- * specialix.c -- specialix IO8+ multiport serial driver.
- *
- * Copyright (C) 1997 Roger Wolff (R.E.Wolff@BitWizard.nl)
- * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com)
- *
- * Specialix pays for the development and support of this driver.
- * Please DO contact io8-linux@specialix.co.uk if you require
- * support. But please read the documentation (specialix.txt)
- * first.
- *
- * This driver was developed in the BitWizard linux device
- * driver service. If you require a linux device driver for your
- * product, please contact devices@BitWizard.nl for a quote.
- *
- * This code is firmly based on the riscom/8 serial driver,
- * written by Dmitry Gorodchanin. The specialix IO8+ card
- * programming information was obtained from the CL-CD1865 Data
- * Book, and Specialix document number 6200059: IO8+ Hardware
- * Functional Specification.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
- * Revision history:
- *
- * Revision 1.0: April 1st 1997.
- * Initial release for alpha testing.
- * Revision 1.1: April 14th 1997.
- * Incorporated Richard Hudsons suggestions,
- * removed some debugging printk's.
- * Revision 1.2: April 15th 1997.
- * Ported to 2.1.x kernels.
- * Revision 1.3: April 17th 1997
- * Backported to 2.0. (Compatibility macros).
- * Revision 1.4: April 18th 1997
- * Fixed DTR/RTS bug that caused the card to indicate
- * "don't send data" to a modem after the password prompt.
- * Fixed bug for premature (fake) interrupts.
- * Revision 1.5: April 19th 1997
- * fixed a minor typo in the header file, cleanup a little.
- * performance warnings are now MAXed at once per minute.
- * Revision 1.6: May 23 1997
- * Changed the specialix=... format to include interrupt.
- * Revision 1.7: May 27 1997
- * Made many more debug printk's a compile time option.
- * Revision 1.8: Jul 1 1997
- * port to linux-2.1.43 kernel.
- * Revision 1.9: Oct 9 1998
- * Added stuff for the IO8+/PCI version.
- * Revision 1.10: Oct 22 1999 / Jan 21 2000.
- * Added stuff for setserial.
- * Nicolas Mailhot (Nicolas.Mailhot@email.enst.fr)
- *
- */
-
-#define VERSION "1.11"
-
-
-/*
- * There is a bunch of documentation about the card, jumpers, config
- * settings, restrictions, cables, device names and numbers in
- * Documentation/serial/specialix.txt
- */
-
-#include <linux/module.h>
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/mm.h>
-#include <linux/serial.h>
-#include <linux/fcntl.h>
-#include <linux/major.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-#include <linux/gfp.h>
-
-#include "specialix_io8.h"
-#include "cd1865.h"
-
-
-/*
- This driver can spew a whole lot of debugging output at you. If you
- need maximum performance, you should disable the DEBUG define. To
- aid in debugging in the field, I'm leaving the compile-time debug
- features enabled, and disable them "runtime". That allows me to
- instruct people with problems to enable debugging without requiring
- them to recompile...
-*/
-#define DEBUG
-
-static int sx_debug;
-static int sx_rxfifo = SPECIALIX_RXFIFO;
-static int sx_rtscts;
-
-#ifdef DEBUG
-#define dprintk(f, str...) if (sx_debug & f) printk(str)
-#else
-#define dprintk(f, str...) /* nothing */
-#endif
-
-#define SX_DEBUG_FLOW 0x0001
-#define SX_DEBUG_DATA 0x0002
-#define SX_DEBUG_PROBE 0x0004
-#define SX_DEBUG_CHAN 0x0008
-#define SX_DEBUG_INIT 0x0010
-#define SX_DEBUG_RX 0x0020
-#define SX_DEBUG_TX 0x0040
-#define SX_DEBUG_IRQ 0x0080
-#define SX_DEBUG_OPEN 0x0100
-#define SX_DEBUG_TERMIOS 0x0200
-#define SX_DEBUG_SIGNALS 0x0400
-#define SX_DEBUG_FIFO 0x0800
-
-
-#define func_enter() dprintk(SX_DEBUG_FLOW, "io8: enter %s\n", __func__)
-#define func_exit() dprintk(SX_DEBUG_FLOW, "io8: exit %s\n", __func__)
-
-
-/* Configurable options: */
-
-/* Am I paranoid or not ? ;-) */
-#define SPECIALIX_PARANOIA_CHECK
-
-/*
- * The following defines are mostly for testing purposes. But if you need
- * some nice reporting in your syslog, you can define them also.
- */
-#undef SX_REPORT_FIFO
-#undef SX_REPORT_OVERRUN
-
-
-
-
-#define SPECIALIX_LEGAL_FLAGS \
- (ASYNC_HUP_NOTIFY | ASYNC_SAK | ASYNC_SPLIT_TERMIOS | \
- ASYNC_SPD_HI | ASYNC_SPEED_VHI | ASYNC_SESSION_LOCKOUT | \
- ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP)
-
-static struct tty_driver *specialix_driver;
-
-static struct specialix_board sx_board[SX_NBOARD] = {
- { 0, SX_IOBASE1, 9, },
- { 0, SX_IOBASE2, 11, },
- { 0, SX_IOBASE3, 12, },
- { 0, SX_IOBASE4, 15, },
-};
-
-static struct specialix_port sx_port[SX_NBOARD * SX_NPORT];
-
-
-static int sx_paranoia_check(struct specialix_port const *port,
- char *name, const char *routine)
-{
-#ifdef SPECIALIX_PARANOIA_CHECK
- static const char *badmagic = KERN_ERR
- "sx: Warning: bad specialix port magic number for device %s in %s\n";
- static const char *badinfo = KERN_ERR
- "sx: Warning: null specialix port for device %s in %s\n";
-
- if (!port) {
- printk(badinfo, name, routine);
- return 1;
- }
- if (port->magic != SPECIALIX_MAGIC) {
- printk(badmagic, name, routine);
- return 1;
- }
-#endif
- return 0;
-}
-
-
-/*
- *
- * Service functions for specialix IO8+ driver.
- *
- */
-
-/* Get board number from pointer */
-static inline int board_No(struct specialix_board *bp)
-{
- return bp - sx_board;
-}
-
-
-/* Get port number from pointer */
-static inline int port_No(struct specialix_port const *port)
-{
- return SX_PORT(port - sx_port);
-}
-
-
-/* Get pointer to board from pointer to port */
-static inline struct specialix_board *port_Board(
- struct specialix_port const *port)
-{
- return &sx_board[SX_BOARD(port - sx_port)];
-}
-
-
-/* Input Byte from CL CD186x register */
-static inline unsigned char sx_in(struct specialix_board *bp,
- unsigned short reg)
-{
- bp->reg = reg | 0x80;
- outb(reg | 0x80, bp->base + SX_ADDR_REG);
- return inb(bp->base + SX_DATA_REG);
-}
-
-
-/* Output Byte to CL CD186x register */
-static inline void sx_out(struct specialix_board *bp, unsigned short reg,
- unsigned char val)
-{
- bp->reg = reg | 0x80;
- outb(reg | 0x80, bp->base + SX_ADDR_REG);
- outb(val, bp->base + SX_DATA_REG);
-}
-
-
-/* Input Byte from CL CD186x register */
-static inline unsigned char sx_in_off(struct specialix_board *bp,
- unsigned short reg)
-{
- bp->reg = reg;
- outb(reg, bp->base + SX_ADDR_REG);
- return inb(bp->base + SX_DATA_REG);
-}
-
-
-/* Output Byte to CL CD186x register */
-static inline void sx_out_off(struct specialix_board *bp,
- unsigned short reg, unsigned char val)
-{
- bp->reg = reg;
- outb(reg, bp->base + SX_ADDR_REG);
- outb(val, bp->base + SX_DATA_REG);
-}
-
-
-/* Wait for Channel Command Register ready */
-static void sx_wait_CCR(struct specialix_board *bp)
-{
- unsigned long delay, flags;
- unsigned char ccr;
-
- for (delay = SX_CCR_TIMEOUT; delay; delay--) {
- spin_lock_irqsave(&bp->lock, flags);
- ccr = sx_in(bp, CD186x_CCR);
- spin_unlock_irqrestore(&bp->lock, flags);
- if (!ccr)
- return;
- udelay(1);
- }
-
- printk(KERN_ERR "sx%d: Timeout waiting for CCR.\n", board_No(bp));
-}
-
-
-/* Wait for Channel Command Register ready */
-static void sx_wait_CCR_off(struct specialix_board *bp)
-{
- unsigned long delay;
- unsigned char crr;
- unsigned long flags;
-
- for (delay = SX_CCR_TIMEOUT; delay; delay--) {
- spin_lock_irqsave(&bp->lock, flags);
- crr = sx_in_off(bp, CD186x_CCR);
- spin_unlock_irqrestore(&bp->lock, flags);
- if (!crr)
- return;
- udelay(1);
- }
-
- printk(KERN_ERR "sx%d: Timeout waiting for CCR.\n", board_No(bp));
-}
-
-
-/*
- * specialix IO8+ IO range functions.
- */
-
-static int sx_request_io_range(struct specialix_board *bp)
-{
- return request_region(bp->base,
- bp->flags & SX_BOARD_IS_PCI ? SX_PCI_IO_SPACE : SX_IO_SPACE,
- "specialix IO8+") == NULL;
-}
-
-
-static void sx_release_io_range(struct specialix_board *bp)
-{
- release_region(bp->base, bp->flags & SX_BOARD_IS_PCI ?
- SX_PCI_IO_SPACE : SX_IO_SPACE);
-}
-
-
-/* Set the IRQ using the RTS lines that run to the PAL on the board.... */
-static int sx_set_irq(struct specialix_board *bp)
-{
- int virq;
- int i;
- unsigned long flags;
-
- if (bp->flags & SX_BOARD_IS_PCI)
- return 1;
- switch (bp->irq) {
- /* In the same order as in the docs... */
- case 15:
- virq = 0;
- break;
- case 12:
- virq = 1;
- break;
- case 11:
- virq = 2;
- break;
- case 9:
- virq = 3;
- break;
- default:printk(KERN_ERR
- "Speclialix: cannot set irq to %d.\n", bp->irq);
- return 0;
- }
- spin_lock_irqsave(&bp->lock, flags);
- for (i = 0; i < 2; i++) {
- sx_out(bp, CD186x_CAR, i);
- sx_out(bp, CD186x_MSVRTS, ((virq >> i) & 0x1)? MSVR_RTS:0);
- }
- spin_unlock_irqrestore(&bp->lock, flags);
- return 1;
-}
-
-
-/* Reset and setup CD186x chip */
-static int sx_init_CD186x(struct specialix_board *bp)
-{
- unsigned long flags;
- int scaler;
- int rv = 1;
-
- func_enter();
- sx_wait_CCR_off(bp); /* Wait for CCR ready */
- spin_lock_irqsave(&bp->lock, flags);
- sx_out_off(bp, CD186x_CCR, CCR_HARDRESET); /* Reset CD186x chip */
- spin_unlock_irqrestore(&bp->lock, flags);
- msleep(50); /* Delay 0.05 sec */
- spin_lock_irqsave(&bp->lock, flags);
- sx_out_off(bp, CD186x_GIVR, SX_ID); /* Set ID for this chip */
- sx_out_off(bp, CD186x_GICR, 0); /* Clear all bits */
- sx_out_off(bp, CD186x_PILR1, SX_ACK_MINT); /* Prio for modem intr */
- sx_out_off(bp, CD186x_PILR2, SX_ACK_TINT); /* Prio for transmitter intr */
- sx_out_off(bp, CD186x_PILR3, SX_ACK_RINT); /* Prio for receiver intr */
- /* Set RegAckEn */
- sx_out_off(bp, CD186x_SRCR, sx_in(bp, CD186x_SRCR) | SRCR_REGACKEN);
-
- /* Setting up prescaler. We need 4 ticks per 1 ms */
- scaler = SX_OSCFREQ/SPECIALIX_TPS;
-
- sx_out_off(bp, CD186x_PPRH, scaler >> 8);
- sx_out_off(bp, CD186x_PPRL, scaler & 0xff);
- spin_unlock_irqrestore(&bp->lock, flags);
-
- if (!sx_set_irq(bp)) {
- /* Figure out how to pass this along... */
- printk(KERN_ERR "Cannot set irq to %d.\n", bp->irq);
- rv = 0;
- }
-
- func_exit();
- return rv;
-}
-
-
-static int read_cross_byte(struct specialix_board *bp, int reg, int bit)
-{
- int i;
- int t;
- unsigned long flags;
-
- spin_lock_irqsave(&bp->lock, flags);
- for (i = 0, t = 0; i < 8; i++) {
- sx_out_off(bp, CD186x_CAR, i);
- if (sx_in_off(bp, reg) & bit)
- t |= 1 << i;
- }
- spin_unlock_irqrestore(&bp->lock, flags);
-
- return t;
-}
-
-
-/* Main probing routine, also sets irq. */
-static int sx_probe(struct specialix_board *bp)
-{
- unsigned char val1, val2;
- int rev;
- int chip;
-
- func_enter();
-
- if (sx_request_io_range(bp)) {
- func_exit();
- return 1;
- }
-
- /* Are the I/O ports here ? */
- sx_out_off(bp, CD186x_PPRL, 0x5a);
- udelay(1);
- val1 = sx_in_off(bp, CD186x_PPRL);
-
- sx_out_off(bp, CD186x_PPRL, 0xa5);
- udelay(1);
- val2 = sx_in_off(bp, CD186x_PPRL);
-
-
- if (val1 != 0x5a || val2 != 0xa5) {
- printk(KERN_INFO
- "sx%d: specialix IO8+ Board at 0x%03x not found.\n",
- board_No(bp), bp->base);
- sx_release_io_range(bp);
- func_exit();
- return 1;
- }
-
- /* Check the DSR lines that Specialix uses as board
- identification */
- val1 = read_cross_byte(bp, CD186x_MSVR, MSVR_DSR);
- val2 = read_cross_byte(bp, CD186x_MSVR, MSVR_RTS);
- dprintk(SX_DEBUG_INIT,
- "sx%d: DSR lines are: %02x, rts lines are: %02x\n",
- board_No(bp), val1, val2);
-
- /* They managed to switch the bit order between the docs and
- the IO8+ card. The new PCI card now conforms to old docs.
- They changed the PCI docs to reflect the situation on the
- old card. */
- val2 = (bp->flags & SX_BOARD_IS_PCI)?0x4d : 0xb2;
- if (val1 != val2) {
- printk(KERN_INFO
- "sx%d: specialix IO8+ ID %02x at 0x%03x not found (%02x).\n",
- board_No(bp), val2, bp->base, val1);
- sx_release_io_range(bp);
- func_exit();
- return 1;
- }
-
-
- /* Reset CD186x again */
- if (!sx_init_CD186x(bp)) {
- sx_release_io_range(bp);
- func_exit();
- return 1;
- }
-
- sx_request_io_range(bp);
- bp->flags |= SX_BOARD_PRESENT;
-
- /* Chip revcode pkgtype
- GFRCR SRCR bit 7
- CD180 rev B 0x81 0
- CD180 rev C 0x82 0
- CD1864 rev A 0x82 1
- CD1865 rev A 0x83 1 -- Do not use!!! Does not work.
- CD1865 rev B 0x84 1
- -- Thanks to Gwen Wang, Cirrus Logic.
- */
-
- switch (sx_in_off(bp, CD186x_GFRCR)) {
- case 0x82:
- chip = 1864;
- rev = 'A';
- break;
- case 0x83:
- chip = 1865;
- rev = 'A';
- break;
- case 0x84:
- chip = 1865;
- rev = 'B';
- break;
- case 0x85:
- chip = 1865;
- rev = 'C';
- break; /* Does not exist at this time */
- default:
- chip = -1;
- rev = 'x';
- }
-
- dprintk(SX_DEBUG_INIT, " GFCR = 0x%02x\n", sx_in_off(bp, CD186x_GFRCR));
-
- printk(KERN_INFO
- "sx%d: specialix IO8+ board detected at 0x%03x, IRQ %d, CD%d Rev. %c.\n",
- board_No(bp), bp->base, bp->irq, chip, rev);
-
- func_exit();
- return 0;
-}
-
-/*
- *
- * Interrupt processing routines.
- * */
-
-static struct specialix_port *sx_get_port(struct specialix_board *bp,
- unsigned char const *what)
-{
- unsigned char channel;
- struct specialix_port *port = NULL;
-
- channel = sx_in(bp, CD186x_GICR) >> GICR_CHAN_OFF;
- dprintk(SX_DEBUG_CHAN, "channel: %d\n", channel);
- if (channel < CD186x_NCH) {
- port = &sx_port[board_No(bp) * SX_NPORT + channel];
- dprintk(SX_DEBUG_CHAN, "port: %d %p flags: 0x%lx\n",
- board_No(bp) * SX_NPORT + channel, port,
- port->port.flags & ASYNC_INITIALIZED);
-
- if (port->port.flags & ASYNC_INITIALIZED) {
- dprintk(SX_DEBUG_CHAN, "port: %d %p\n", channel, port);
- func_exit();
- return port;
- }
- }
- printk(KERN_INFO "sx%d: %s interrupt from invalid port %d\n",
- board_No(bp), what, channel);
- return NULL;
-}
-
-
-static void sx_receive_exc(struct specialix_board *bp)
-{
- struct specialix_port *port;
- struct tty_struct *tty;
- unsigned char status;
- unsigned char ch, flag;
-
- func_enter();
-
- port = sx_get_port(bp, "Receive");
- if (!port) {
- dprintk(SX_DEBUG_RX, "Hmm, couldn't find port.\n");
- func_exit();
- return;
- }
- tty = port->port.tty;
-
- status = sx_in(bp, CD186x_RCSR);
-
- dprintk(SX_DEBUG_RX, "status: 0x%x\n", status);
- if (status & RCSR_OE) {
- port->overrun++;
- dprintk(SX_DEBUG_FIFO,
- "sx%d: port %d: Overrun. Total %ld overruns.\n",
- board_No(bp), port_No(port), port->overrun);
- }
- status &= port->mark_mask;
-
- /* This flip buffer check needs to be below the reading of the
- status register to reset the chip's IRQ.... */
- if (tty_buffer_request_room(tty, 1) == 0) {
- dprintk(SX_DEBUG_FIFO,
- "sx%d: port %d: Working around flip buffer overflow.\n",
- board_No(bp), port_No(port));
- func_exit();
- return;
- }
-
- ch = sx_in(bp, CD186x_RDR);
- if (!status) {
- func_exit();
- return;
- }
- if (status & RCSR_TOUT) {
- printk(KERN_INFO
- "sx%d: port %d: Receiver timeout. Hardware problems ?\n",
- board_No(bp), port_No(port));
- func_exit();
- return;
-
- } else if (status & RCSR_BREAK) {
- dprintk(SX_DEBUG_RX, "sx%d: port %d: Handling break...\n",
- board_No(bp), port_No(port));
- flag = TTY_BREAK;
- if (port->port.flags & ASYNC_SAK)
- do_SAK(tty);
-
- } else if (status & RCSR_PE)
- flag = TTY_PARITY;
-
- else if (status & RCSR_FE)
- flag = TTY_FRAME;
-
- else if (status & RCSR_OE)
- flag = TTY_OVERRUN;
-
- else
- flag = TTY_NORMAL;
-
- if (tty_insert_flip_char(tty, ch, flag))
- tty_flip_buffer_push(tty);
- func_exit();
-}
-
-
-static void sx_receive(struct specialix_board *bp)
-{
- struct specialix_port *port;
- struct tty_struct *tty;
- unsigned char count;
-
- func_enter();
-
- port = sx_get_port(bp, "Receive");
- if (port == NULL) {
- dprintk(SX_DEBUG_RX, "Hmm, couldn't find port.\n");
- func_exit();
- return;
- }
- tty = port->port.tty;
-
- count = sx_in(bp, CD186x_RDCR);
- dprintk(SX_DEBUG_RX, "port: %p: count: %d\n", port, count);
- port->hits[count > 8 ? 9 : count]++;
-
- while (count--)
- tty_insert_flip_char(tty, sx_in(bp, CD186x_RDR), TTY_NORMAL);
- tty_flip_buffer_push(tty);
- func_exit();
-}
-
-
-static void sx_transmit(struct specialix_board *bp)
-{
- struct specialix_port *port;
- struct tty_struct *tty;
- unsigned char count;
-
- func_enter();
- port = sx_get_port(bp, "Transmit");
- if (port == NULL) {
- func_exit();
- return;
- }
- dprintk(SX_DEBUG_TX, "port: %p\n", port);
- tty = port->port.tty;
-
- if (port->IER & IER_TXEMPTY) {
- /* FIFO drained */
- sx_out(bp, CD186x_CAR, port_No(port));
- port->IER &= ~IER_TXEMPTY;
- sx_out(bp, CD186x_IER, port->IER);
- func_exit();
- return;
- }
-
- if ((port->xmit_cnt <= 0 && !port->break_length)
- || tty->stopped || tty->hw_stopped) {
- sx_out(bp, CD186x_CAR, port_No(port));
- port->IER &= ~IER_TXRDY;
- sx_out(bp, CD186x_IER, port->IER);
- func_exit();
- return;
- }
-
- if (port->break_length) {
- if (port->break_length > 0) {
- if (port->COR2 & COR2_ETC) {
- sx_out(bp, CD186x_TDR, CD186x_C_ESC);
- sx_out(bp, CD186x_TDR, CD186x_C_SBRK);
- port->COR2 &= ~COR2_ETC;
- }
- count = min_t(int, port->break_length, 0xff);
- sx_out(bp, CD186x_TDR, CD186x_C_ESC);
- sx_out(bp, CD186x_TDR, CD186x_C_DELAY);
- sx_out(bp, CD186x_TDR, count);
- port->break_length -= count;
- if (port->break_length == 0)
- port->break_length--;
- } else {
- sx_out(bp, CD186x_TDR, CD186x_C_ESC);
- sx_out(bp, CD186x_TDR, CD186x_C_EBRK);
- sx_out(bp, CD186x_COR2, port->COR2);
- sx_wait_CCR(bp);
- sx_out(bp, CD186x_CCR, CCR_CORCHG2);
- port->break_length = 0;
- }
-
- func_exit();
- return;
- }
-
- count = CD186x_NFIFO;
- do {
- sx_out(bp, CD186x_TDR, port->xmit_buf[port->xmit_tail++]);
- port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE-1);
- if (--port->xmit_cnt <= 0)
- break;
- } while (--count > 0);
-
- if (port->xmit_cnt <= 0) {
- sx_out(bp, CD186x_CAR, port_No(port));
- port->IER &= ~IER_TXRDY;
- sx_out(bp, CD186x_IER, port->IER);
- }
- if (port->xmit_cnt <= port->wakeup_chars)
- tty_wakeup(tty);
-
- func_exit();
-}
-
-
-static void sx_check_modem(struct specialix_board *bp)
-{
- struct specialix_port *port;
- struct tty_struct *tty;
- unsigned char mcr;
- int msvr_cd;
-
- dprintk(SX_DEBUG_SIGNALS, "Modem intr. ");
- port = sx_get_port(bp, "Modem");
- if (port == NULL)
- return;
-
- tty = port->port.tty;
-
- mcr = sx_in(bp, CD186x_MCR);
-
- if ((mcr & MCR_CDCHG)) {
- dprintk(SX_DEBUG_SIGNALS, "CD just changed... ");
- msvr_cd = sx_in(bp, CD186x_MSVR) & MSVR_CD;
- if (msvr_cd) {
- dprintk(SX_DEBUG_SIGNALS, "Waking up guys in open.\n");
- wake_up_interruptible(&port->port.open_wait);
- } else {
- dprintk(SX_DEBUG_SIGNALS, "Sending HUP.\n");
- tty_hangup(tty);
- }
- }
-
-#ifdef SPECIALIX_BRAIN_DAMAGED_CTS
- if (mcr & MCR_CTSCHG) {
- if (sx_in(bp, CD186x_MSVR) & MSVR_CTS) {
- tty->hw_stopped = 0;
- port->IER |= IER_TXRDY;
- if (port->xmit_cnt <= port->wakeup_chars)
- tty_wakeup(tty);
- } else {
- tty->hw_stopped = 1;
- port->IER &= ~IER_TXRDY;
- }
- sx_out(bp, CD186x_IER, port->IER);
- }
- if (mcr & MCR_DSSXHG) {
- if (sx_in(bp, CD186x_MSVR) & MSVR_DSR) {
- tty->hw_stopped = 0;
- port->IER |= IER_TXRDY;
- if (port->xmit_cnt <= port->wakeup_chars)
- tty_wakeup(tty);
- } else {
- tty->hw_stopped = 1;
- port->IER &= ~IER_TXRDY;
- }
- sx_out(bp, CD186x_IER, port->IER);
- }
-#endif /* SPECIALIX_BRAIN_DAMAGED_CTS */
-
- /* Clear change bits */
- sx_out(bp, CD186x_MCR, 0);
-}
-
-
-/* The main interrupt processing routine */
-static irqreturn_t sx_interrupt(int dummy, void *dev_id)
-{
- unsigned char status;
- unsigned char ack;
- struct specialix_board *bp = dev_id;
- unsigned long loop = 0;
- int saved_reg;
- unsigned long flags;
-
- func_enter();
-
- spin_lock_irqsave(&bp->lock, flags);
-
- dprintk(SX_DEBUG_FLOW, "enter %s port %d room: %ld\n", __func__,
- port_No(sx_get_port(bp, "INT")),
- SERIAL_XMIT_SIZE - sx_get_port(bp, "ITN")->xmit_cnt - 1);
- if (!(bp->flags & SX_BOARD_ACTIVE)) {
- dprintk(SX_DEBUG_IRQ, "sx: False interrupt. irq %d.\n",
- bp->irq);
- spin_unlock_irqrestore(&bp->lock, flags);
- func_exit();
- return IRQ_NONE;
- }
-
- saved_reg = bp->reg;
-
- while (++loop < 16) {
- status = sx_in(bp, CD186x_SRSR) &
- (SRSR_RREQint | SRSR_TREQint | SRSR_MREQint);
- if (status == 0)
- break;
- if (status & SRSR_RREQint) {
- ack = sx_in(bp, CD186x_RRAR);
-
- if (ack == (SX_ID | GIVR_IT_RCV))
- sx_receive(bp);
- else if (ack == (SX_ID | GIVR_IT_REXC))
- sx_receive_exc(bp);
- else
- printk(KERN_ERR
- "sx%d: status: 0x%x Bad receive ack 0x%02x.\n",
- board_No(bp), status, ack);
-
- } else if (status & SRSR_TREQint) {
- ack = sx_in(bp, CD186x_TRAR);
-
- if (ack == (SX_ID | GIVR_IT_TX))
- sx_transmit(bp);
- else
- printk(KERN_ERR "sx%d: status: 0x%x Bad transmit ack 0x%02x. port: %d\n",
- board_No(bp), status, ack,
- port_No(sx_get_port(bp, "Int")));
- } else if (status & SRSR_MREQint) {
- ack = sx_in(bp, CD186x_MRAR);
-
- if (ack == (SX_ID | GIVR_IT_MODEM))
- sx_check_modem(bp);
- else
- printk(KERN_ERR
- "sx%d: status: 0x%x Bad modem ack 0x%02x.\n",
- board_No(bp), status, ack);
-
- }
-
- sx_out(bp, CD186x_EOIR, 0); /* Mark end of interrupt */
- }
- bp->reg = saved_reg;
- outb(bp->reg, bp->base + SX_ADDR_REG);
- spin_unlock_irqrestore(&bp->lock, flags);
- func_exit();
- return IRQ_HANDLED;
-}
-
-
-/*
- * Routines for open & close processing.
- */
-
-static void turn_ints_off(struct specialix_board *bp)
-{
- unsigned long flags;
-
- func_enter();
- spin_lock_irqsave(&bp->lock, flags);
- (void) sx_in_off(bp, 0); /* Turn off interrupts. */
- spin_unlock_irqrestore(&bp->lock, flags);
-
- func_exit();
-}
-
-static void turn_ints_on(struct specialix_board *bp)
-{
- unsigned long flags;
-
- func_enter();
-
- spin_lock_irqsave(&bp->lock, flags);
- (void) sx_in(bp, 0); /* Turn ON interrupts. */
- spin_unlock_irqrestore(&bp->lock, flags);
-
- func_exit();
-}
-
-
-/* Called with disabled interrupts */
-static int sx_setup_board(struct specialix_board *bp)
-{
- int error;
-
- if (bp->flags & SX_BOARD_ACTIVE)
- return 0;
-
- if (bp->flags & SX_BOARD_IS_PCI)
- error = request_irq(bp->irq, sx_interrupt,
- IRQF_DISABLED | IRQF_SHARED, "specialix IO8+", bp);
- else
- error = request_irq(bp->irq, sx_interrupt,
- IRQF_DISABLED, "specialix IO8+", bp);
-
- if (error)
- return error;
-
- turn_ints_on(bp);
- bp->flags |= SX_BOARD_ACTIVE;
-
- return 0;
-}
-
-
-/* Called with disabled interrupts */
-static void sx_shutdown_board(struct specialix_board *bp)
-{
- func_enter();
-
- if (!(bp->flags & SX_BOARD_ACTIVE)) {
- func_exit();
- return;
- }
-
- bp->flags &= ~SX_BOARD_ACTIVE;
-
- dprintk(SX_DEBUG_IRQ, "Freeing IRQ%d for board %d.\n",
- bp->irq, board_No(bp));
- free_irq(bp->irq, bp);
- turn_ints_off(bp);
- func_exit();
-}
-
-static unsigned int sx_crtscts(struct tty_struct *tty)
-{
- if (sx_rtscts)
- return C_CRTSCTS(tty);
- return 1;
-}
-
-/*
- * Setting up port characteristics.
- * Must be called with disabled interrupts
- */
-static void sx_change_speed(struct specialix_board *bp,
- struct specialix_port *port)
-{
- struct tty_struct *tty;
- unsigned long baud;
- long tmp;
- unsigned char cor1 = 0, cor3 = 0;
- unsigned char mcor1 = 0, mcor2 = 0;
- static unsigned long again;
- unsigned long flags;
-
- func_enter();
-
- tty = port->port.tty;
- if (!tty || !tty->termios) {
- func_exit();
- return;
- }
-
- port->IER = 0;
- port->COR2 = 0;
- /* Select port on the board */
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CAR, port_No(port));
-
- /* The Specialix board doesn't implement the RTS lines.
- They are used to set the IRQ level. Don't touch them. */
- if (sx_crtscts(tty))
- port->MSVR = MSVR_DTR | (sx_in(bp, CD186x_MSVR) & MSVR_RTS);
- else
- port->MSVR = (sx_in(bp, CD186x_MSVR) & MSVR_RTS);
- spin_unlock_irqrestore(&bp->lock, flags);
- dprintk(SX_DEBUG_TERMIOS, "sx: got MSVR=%02x.\n", port->MSVR);
- baud = tty_get_baud_rate(tty);
-
- if (baud == 38400) {
- if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
- baud = 57600;
- if ((port->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
- baud = 115200;
- }
-
- if (!baud) {
- /* Drop DTR & exit */
- dprintk(SX_DEBUG_TERMIOS, "Dropping DTR... Hmm....\n");
- if (!sx_crtscts(tty)) {
- port->MSVR &= ~MSVR_DTR;
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_MSVR, port->MSVR);
- spin_unlock_irqrestore(&bp->lock, flags);
- } else
- dprintk(SX_DEBUG_TERMIOS, "Can't drop DTR: no DTR.\n");
- return;
- } else {
- /* Set DTR on */
- if (!sx_crtscts(tty))
- port->MSVR |= MSVR_DTR;
- }
-
- /*
- * Now we must calculate some speed depended things
- */
-
- /* Set baud rate for port */
- tmp = port->custom_divisor ;
- if (tmp)
- printk(KERN_INFO
- "sx%d: Using custom baud rate divisor %ld. \n"
- "This is an untested option, please be careful.\n",
- port_No(port), tmp);
- else
- tmp = (((SX_OSCFREQ + baud/2) / baud + CD186x_TPC/2) /
- CD186x_TPC);
-
- if (tmp < 0x10 && time_before(again, jiffies)) {
- again = jiffies + HZ * 60;
- /* Page 48 of version 2.0 of the CL-CD1865 databook */
- if (tmp >= 12) {
- printk(KERN_INFO "sx%d: Baud rate divisor is %ld. \n"
- "Performance degradation is possible.\n"
- "Read specialix.txt for more info.\n",
- port_No(port), tmp);
- } else {
- printk(KERN_INFO "sx%d: Baud rate divisor is %ld. \n"
- "Warning: overstressing Cirrus chip. This might not work.\n"
- "Read specialix.txt for more info.\n", port_No(port), tmp);
- }
- }
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_RBPRH, (tmp >> 8) & 0xff);
- sx_out(bp, CD186x_TBPRH, (tmp >> 8) & 0xff);
- sx_out(bp, CD186x_RBPRL, tmp & 0xff);
- sx_out(bp, CD186x_TBPRL, tmp & 0xff);
- spin_unlock_irqrestore(&bp->lock, flags);
- if (port->custom_divisor)
- baud = (SX_OSCFREQ + port->custom_divisor/2) /
- port->custom_divisor;
- baud = (baud + 5) / 10; /* Estimated CPS */
-
- /* Two timer ticks seems enough to wakeup something like SLIP driver */
- tmp = ((baud + HZ/2) / HZ) * 2 - CD186x_NFIFO;
- port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ?
- SERIAL_XMIT_SIZE - 1 : tmp);
-
- /* Receiver timeout will be transmission time for 1.5 chars */
- tmp = (SPECIALIX_TPS + SPECIALIX_TPS/2 + baud/2) / baud;
- tmp = (tmp > 0xff) ? 0xff : tmp;
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_RTPR, tmp);
- spin_unlock_irqrestore(&bp->lock, flags);
- switch (C_CSIZE(tty)) {
- case CS5:
- cor1 |= COR1_5BITS;
- break;
- case CS6:
- cor1 |= COR1_6BITS;
- break;
- case CS7:
- cor1 |= COR1_7BITS;
- break;
- case CS8:
- cor1 |= COR1_8BITS;
- break;
- }
-
- if (C_CSTOPB(tty))
- cor1 |= COR1_2SB;
-
- cor1 |= COR1_IGNORE;
- if (C_PARENB(tty)) {
- cor1 |= COR1_NORMPAR;
- if (C_PARODD(tty))
- cor1 |= COR1_ODDP;
- if (I_INPCK(tty))
- cor1 &= ~COR1_IGNORE;
- }
- /* Set marking of some errors */
- port->mark_mask = RCSR_OE | RCSR_TOUT;
- if (I_INPCK(tty))
- port->mark_mask |= RCSR_FE | RCSR_PE;
- if (I_BRKINT(tty) || I_PARMRK(tty))
- port->mark_mask |= RCSR_BREAK;
- if (I_IGNPAR(tty))
- port->mark_mask &= ~(RCSR_FE | RCSR_PE);
- if (I_IGNBRK(tty)) {
- port->mark_mask &= ~RCSR_BREAK;
- if (I_IGNPAR(tty))
- /* Real raw mode. Ignore all */
- port->mark_mask &= ~RCSR_OE;
- }
- /* Enable Hardware Flow Control */
- if (C_CRTSCTS(tty)) {
-#ifdef SPECIALIX_BRAIN_DAMAGED_CTS
- port->IER |= IER_DSR | IER_CTS;
- mcor1 |= MCOR1_DSRZD | MCOR1_CTSZD;
- mcor2 |= MCOR2_DSROD | MCOR2_CTSOD;
- spin_lock_irqsave(&bp->lock, flags);
- tty->hw_stopped = !(sx_in(bp, CD186x_MSVR) &
- (MSVR_CTS|MSVR_DSR));
- spin_unlock_irqrestore(&bp->lock, flags);
-#else
- port->COR2 |= COR2_CTSAE;
-#endif
- }
- /* Enable Software Flow Control. FIXME: I'm not sure about this */
- /* Some people reported that it works, but I still doubt it */
- if (I_IXON(tty)) {
- port->COR2 |= COR2_TXIBE;
- cor3 |= (COR3_FCT | COR3_SCDE);
- if (I_IXANY(tty))
- port->COR2 |= COR2_IXM;
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_SCHR1, START_CHAR(tty));
- sx_out(bp, CD186x_SCHR2, STOP_CHAR(tty));
- sx_out(bp, CD186x_SCHR3, START_CHAR(tty));
- sx_out(bp, CD186x_SCHR4, STOP_CHAR(tty));
- spin_unlock_irqrestore(&bp->lock, flags);
- }
- if (!C_CLOCAL(tty)) {
- /* Enable CD check */
- port->IER |= IER_CD;
- mcor1 |= MCOR1_CDZD;
- mcor2 |= MCOR2_CDOD;
- }
-
- if (C_CREAD(tty))
- /* Enable receiver */
- port->IER |= IER_RXD;
-
- /* Set input FIFO size (1-8 bytes) */
- cor3 |= sx_rxfifo;
- /* Setting up CD186x channel registers */
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_COR1, cor1);
- sx_out(bp, CD186x_COR2, port->COR2);
- sx_out(bp, CD186x_COR3, cor3);
- spin_unlock_irqrestore(&bp->lock, flags);
- /* Make CD186x know about registers change */
- sx_wait_CCR(bp);
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CCR, CCR_CORCHG1 | CCR_CORCHG2 | CCR_CORCHG3);
- /* Setting up modem option registers */
- dprintk(SX_DEBUG_TERMIOS, "Mcor1 = %02x, mcor2 = %02x.\n",
- mcor1, mcor2);
- sx_out(bp, CD186x_MCOR1, mcor1);
- sx_out(bp, CD186x_MCOR2, mcor2);
- spin_unlock_irqrestore(&bp->lock, flags);
- /* Enable CD186x transmitter & receiver */
- sx_wait_CCR(bp);
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CCR, CCR_TXEN | CCR_RXEN);
- /* Enable interrupts */
- sx_out(bp, CD186x_IER, port->IER);
- /* And finally set the modem lines... */
- sx_out(bp, CD186x_MSVR, port->MSVR);
- spin_unlock_irqrestore(&bp->lock, flags);
-
- func_exit();
-}
-
-
-/* Must be called with interrupts enabled */
-static int sx_setup_port(struct specialix_board *bp,
- struct specialix_port *port)
-{
- unsigned long flags;
-
- func_enter();
-
- if (port->port.flags & ASYNC_INITIALIZED) {
- func_exit();
- return 0;
- }
-
- if (!port->xmit_buf) {
- /* We may sleep in get_zeroed_page() */
- unsigned long tmp;
-
- tmp = get_zeroed_page(GFP_KERNEL);
- if (tmp == 0L) {
- func_exit();
- return -ENOMEM;
- }
-
- if (port->xmit_buf) {
- free_page(tmp);
- func_exit();
- return -ERESTARTSYS;
- }
- port->xmit_buf = (unsigned char *) tmp;
- }
-
- spin_lock_irqsave(&port->lock, flags);
-
- if (port->port.tty)
- clear_bit(TTY_IO_ERROR, &port->port.tty->flags);
-
- port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
- sx_change_speed(bp, port);
- port->port.flags |= ASYNC_INITIALIZED;
-
- spin_unlock_irqrestore(&port->lock, flags);
-
-
- func_exit();
- return 0;
-}
-
-
-/* Must be called with interrupts disabled */
-static void sx_shutdown_port(struct specialix_board *bp,
- struct specialix_port *port)
-{
- struct tty_struct *tty;
- int i;
- unsigned long flags;
-
- func_enter();
-
- if (!(port->port.flags & ASYNC_INITIALIZED)) {
- func_exit();
- return;
- }
-
- if (sx_debug & SX_DEBUG_FIFO) {
- dprintk(SX_DEBUG_FIFO,
- "sx%d: port %d: %ld overruns, FIFO hits [ ",
- board_No(bp), port_No(port), port->overrun);
- for (i = 0; i < 10; i++)
- dprintk(SX_DEBUG_FIFO, "%ld ", port->hits[i]);
- dprintk(SX_DEBUG_FIFO, "].\n");
- }
-
- if (port->xmit_buf) {
- free_page((unsigned long) port->xmit_buf);
- port->xmit_buf = NULL;
- }
-
- /* Select port */
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CAR, port_No(port));
-
- tty = port->port.tty;
- if (tty == NULL || C_HUPCL(tty)) {
- /* Drop DTR */
- sx_out(bp, CD186x_MSVDTR, 0);
- }
- spin_unlock_irqrestore(&bp->lock, flags);
- /* Reset port */
- sx_wait_CCR(bp);
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CCR, CCR_SOFTRESET);
- /* Disable all interrupts from this port */
- port->IER = 0;
- sx_out(bp, CD186x_IER, port->IER);
- spin_unlock_irqrestore(&bp->lock, flags);
- if (tty)
- set_bit(TTY_IO_ERROR, &tty->flags);
- port->port.flags &= ~ASYNC_INITIALIZED;
-
- if (!bp->count)
- sx_shutdown_board(bp);
- func_exit();
-}
-
-
-static int block_til_ready(struct tty_struct *tty, struct file *filp,
- struct specialix_port *port)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct specialix_board *bp = port_Board(port);
- int retval;
- int do_clocal = 0;
- int CD;
- unsigned long flags;
-
- func_enter();
-
- /*
- * If the device is in the middle of being closed, then block
- * until it's done, and then try again.
- */
- if (tty_hung_up_p(filp) || port->port.flags & ASYNC_CLOSING) {
- interruptible_sleep_on(&port->port.close_wait);
- if (port->port.flags & ASYNC_HUP_NOTIFY) {
- func_exit();
- return -EAGAIN;
- } else {
- func_exit();
- return -ERESTARTSYS;
- }
- }
-
- /*
- * If non-blocking mode is set, or the port is not enabled,
- * then make the check up front and then exit.
- */
- if ((filp->f_flags & O_NONBLOCK) ||
- (tty->flags & (1 << TTY_IO_ERROR))) {
- port->port.flags |= ASYNC_NORMAL_ACTIVE;
- func_exit();
- return 0;
- }
-
- if (C_CLOCAL(tty))
- do_clocal = 1;
-
- /*
- * Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, info->count is dropped by one, so that
- * rs_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal.
- */
- retval = 0;
- add_wait_queue(&port->port.open_wait, &wait);
- spin_lock_irqsave(&port->lock, flags);
- if (!tty_hung_up_p(filp))
- port->port.count--;
- spin_unlock_irqrestore(&port->lock, flags);
- port->port.blocked_open++;
- while (1) {
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CAR, port_No(port));
- CD = sx_in(bp, CD186x_MSVR) & MSVR_CD;
- if (sx_crtscts(tty)) {
- /* Activate RTS */
- port->MSVR |= MSVR_DTR; /* WTF? */
- sx_out(bp, CD186x_MSVR, port->MSVR);
- } else {
- /* Activate DTR */
- port->MSVR |= MSVR_DTR;
- sx_out(bp, CD186x_MSVR, port->MSVR);
- }
- spin_unlock_irqrestore(&bp->lock, flags);
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) ||
- !(port->port.flags & ASYNC_INITIALIZED)) {
- if (port->port.flags & ASYNC_HUP_NOTIFY)
- retval = -EAGAIN;
- else
- retval = -ERESTARTSYS;
- break;
- }
- if (!(port->port.flags & ASYNC_CLOSING) &&
- (do_clocal || CD))
- break;
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- tty_unlock();
- schedule();
- tty_lock();
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&port->port.open_wait, &wait);
- spin_lock_irqsave(&port->lock, flags);
- if (!tty_hung_up_p(filp))
- port->port.count++;
- port->port.blocked_open--;
- spin_unlock_irqrestore(&port->lock, flags);
- if (retval) {
- func_exit();
- return retval;
- }
-
- port->port.flags |= ASYNC_NORMAL_ACTIVE;
- func_exit();
- return 0;
-}
-
-
-static int sx_open(struct tty_struct *tty, struct file *filp)
-{
- int board;
- int error;
- struct specialix_port *port;
- struct specialix_board *bp;
- int i;
- unsigned long flags;
-
- func_enter();
-
- board = SX_BOARD(tty->index);
-
- if (board >= SX_NBOARD || !(sx_board[board].flags & SX_BOARD_PRESENT)) {
- func_exit();
- return -ENODEV;
- }
-
- bp = &sx_board[board];
- port = sx_port + board * SX_NPORT + SX_PORT(tty->index);
- port->overrun = 0;
- for (i = 0; i < 10; i++)
- port->hits[i] = 0;
-
- dprintk(SX_DEBUG_OPEN,
- "Board = %d, bp = %p, port = %p, portno = %d.\n",
- board, bp, port, SX_PORT(tty->index));
-
- if (sx_paranoia_check(port, tty->name, "sx_open")) {
- func_exit();
- return -ENODEV;
- }
-
- error = sx_setup_board(bp);
- if (error) {
- func_exit();
- return error;
- }
-
- spin_lock_irqsave(&bp->lock, flags);
- port->port.count++;
- bp->count++;
- tty->driver_data = port;
- port->port.tty = tty;
- spin_unlock_irqrestore(&bp->lock, flags);
-
- error = sx_setup_port(bp, port);
- if (error) {
- func_exit();
- return error;
- }
-
- error = block_til_ready(tty, filp, port);
- if (error) {
- func_exit();
- return error;
- }
-
- func_exit();
- return 0;
-}
-
-static void sx_flush_buffer(struct tty_struct *tty)
-{
- struct specialix_port *port = tty->driver_data;
- unsigned long flags;
- struct specialix_board *bp;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, "sx_flush_buffer")) {
- func_exit();
- return;
- }
-
- bp = port_Board(port);
- spin_lock_irqsave(&port->lock, flags);
- port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
- spin_unlock_irqrestore(&port->lock, flags);
- tty_wakeup(tty);
-
- func_exit();
-}
-
-static void sx_close(struct tty_struct *tty, struct file *filp)
-{
- struct specialix_port *port = tty->driver_data;
- struct specialix_board *bp;
- unsigned long flags;
- unsigned long timeout;
-
- func_enter();
- if (!port || sx_paranoia_check(port, tty->name, "close")) {
- func_exit();
- return;
- }
- spin_lock_irqsave(&port->lock, flags);
-
- if (tty_hung_up_p(filp)) {
- spin_unlock_irqrestore(&port->lock, flags);
- func_exit();
- return;
- }
-
- bp = port_Board(port);
- if (tty->count == 1 && port->port.count != 1) {
- printk(KERN_ERR "sx%d: sx_close: bad port count;"
- " tty->count is 1, port count is %d\n",
- board_No(bp), port->port.count);
- port->port.count = 1;
- }
-
- if (port->port.count > 1) {
- port->port.count--;
- bp->count--;
-
- spin_unlock_irqrestore(&port->lock, flags);
-
- func_exit();
- return;
- }
- port->port.flags |= ASYNC_CLOSING;
- /*
- * Now we wait for the transmit buffer to clear; and we notify
- * the line discipline to only process XON/XOFF characters.
- */
- tty->closing = 1;
- spin_unlock_irqrestore(&port->lock, flags);
- dprintk(SX_DEBUG_OPEN, "Closing\n");
- if (port->port.closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, port->port.closing_wait);
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receive line status interrupts, and tell the
- * interrupt driver to stop checking the data ready bit in the
- * line status register.
- */
- dprintk(SX_DEBUG_OPEN, "Closed\n");
- port->IER &= ~IER_RXD;
- if (port->port.flags & ASYNC_INITIALIZED) {
- port->IER &= ~IER_TXRDY;
- port->IER |= IER_TXEMPTY;
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CAR, port_No(port));
- sx_out(bp, CD186x_IER, port->IER);
- spin_unlock_irqrestore(&bp->lock, flags);
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- timeout = jiffies+HZ;
- while (port->IER & IER_TXEMPTY) {
- set_current_state(TASK_INTERRUPTIBLE);
- msleep_interruptible(jiffies_to_msecs(port->timeout));
- if (time_after(jiffies, timeout)) {
- printk(KERN_INFO "Timeout waiting for close\n");
- break;
- }
- }
-
- }
-
- if (--bp->count < 0) {
- printk(KERN_ERR
- "sx%d: sx_shutdown_port: bad board count: %d port: %d\n",
- board_No(bp), bp->count, tty->index);
- bp->count = 0;
- }
- if (--port->port.count < 0) {
- printk(KERN_ERR
- "sx%d: sx_close: bad port count for tty%d: %d\n",
- board_No(bp), port_No(port), port->port.count);
- port->port.count = 0;
- }
-
- sx_shutdown_port(bp, port);
- sx_flush_buffer(tty);
- tty_ldisc_flush(tty);
- spin_lock_irqsave(&port->lock, flags);
- tty->closing = 0;
- port->port.tty = NULL;
- spin_unlock_irqrestore(&port->lock, flags);
- if (port->port.blocked_open) {
- if (port->port.close_delay)
- msleep_interruptible(
- jiffies_to_msecs(port->port.close_delay));
- wake_up_interruptible(&port->port.open_wait);
- }
- port->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
- wake_up_interruptible(&port->port.close_wait);
-
- func_exit();
-}
-
-
-static int sx_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
-{
- struct specialix_port *port = tty->driver_data;
- struct specialix_board *bp;
- int c, total = 0;
- unsigned long flags;
-
- func_enter();
- if (sx_paranoia_check(port, tty->name, "sx_write")) {
- func_exit();
- return 0;
- }
-
- bp = port_Board(port);
-
- if (!port->xmit_buf) {
- func_exit();
- return 0;
- }
-
- while (1) {
- spin_lock_irqsave(&port->lock, flags);
- c = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - port->xmit_head));
- if (c <= 0) {
- spin_unlock_irqrestore(&port->lock, flags);
- break;
- }
- memcpy(port->xmit_buf + port->xmit_head, buf, c);
- port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
- port->xmit_cnt += c;
- spin_unlock_irqrestore(&port->lock, flags);
-
- buf += c;
- count -= c;
- total += c;
- }
-
- spin_lock_irqsave(&bp->lock, flags);
- if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped &&
- !(port->IER & IER_TXRDY)) {
- port->IER |= IER_TXRDY;
- sx_out(bp, CD186x_CAR, port_No(port));
- sx_out(bp, CD186x_IER, port->IER);
- }
- spin_unlock_irqrestore(&bp->lock, flags);
- func_exit();
-
- return total;
-}
-
-
-static int sx_put_char(struct tty_struct *tty, unsigned char ch)
-{
- struct specialix_port *port = tty->driver_data;
- unsigned long flags;
- struct specialix_board *bp;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, "sx_put_char")) {
- func_exit();
- return 0;
- }
- dprintk(SX_DEBUG_TX, "check tty: %p %p\n", tty, port->xmit_buf);
- if (!port->xmit_buf) {
- func_exit();
- return 0;
- }
- bp = port_Board(port);
- spin_lock_irqsave(&port->lock, flags);
-
- dprintk(SX_DEBUG_TX, "xmit_cnt: %d xmit_buf: %p\n",
- port->xmit_cnt, port->xmit_buf);
- if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1 || !port->xmit_buf) {
- spin_unlock_irqrestore(&port->lock, flags);
- dprintk(SX_DEBUG_TX, "Exit size\n");
- func_exit();
- return 0;
- }
- dprintk(SX_DEBUG_TX, "Handle xmit: %p %p\n", port, port->xmit_buf);
- port->xmit_buf[port->xmit_head++] = ch;
- port->xmit_head &= SERIAL_XMIT_SIZE - 1;
- port->xmit_cnt++;
- spin_unlock_irqrestore(&port->lock, flags);
-
- func_exit();
- return 1;
-}
-
-
-static void sx_flush_chars(struct tty_struct *tty)
-{
- struct specialix_port *port = tty->driver_data;
- unsigned long flags;
- struct specialix_board *bp = port_Board(port);
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, "sx_flush_chars")) {
- func_exit();
- return;
- }
- if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
- !port->xmit_buf) {
- func_exit();
- return;
- }
- spin_lock_irqsave(&bp->lock, flags);
- port->IER |= IER_TXRDY;
- sx_out(port_Board(port), CD186x_CAR, port_No(port));
- sx_out(port_Board(port), CD186x_IER, port->IER);
- spin_unlock_irqrestore(&bp->lock, flags);
-
- func_exit();
-}
-
-
-static int sx_write_room(struct tty_struct *tty)
-{
- struct specialix_port *port = tty->driver_data;
- int ret;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, "sx_write_room")) {
- func_exit();
- return 0;
- }
-
- ret = SERIAL_XMIT_SIZE - port->xmit_cnt - 1;
- if (ret < 0)
- ret = 0;
-
- func_exit();
- return ret;
-}
-
-
-static int sx_chars_in_buffer(struct tty_struct *tty)
-{
- struct specialix_port *port = tty->driver_data;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, "sx_chars_in_buffer")) {
- func_exit();
- return 0;
- }
- func_exit();
- return port->xmit_cnt;
-}
-
-static int sx_tiocmget(struct tty_struct *tty)
-{
- struct specialix_port *port = tty->driver_data;
- struct specialix_board *bp;
- unsigned char status;
- unsigned int result;
- unsigned long flags;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, __func__)) {
- func_exit();
- return -ENODEV;
- }
-
- bp = port_Board(port);
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CAR, port_No(port));
- status = sx_in(bp, CD186x_MSVR);
- spin_unlock_irqrestore(&bp->lock, flags);
- dprintk(SX_DEBUG_INIT, "Got msvr[%d] = %02x, car = %d.\n",
- port_No(port), status, sx_in(bp, CD186x_CAR));
- dprintk(SX_DEBUG_INIT, "sx_port = %p, port = %p\n", sx_port, port);
- if (sx_crtscts(port->port.tty)) {
- result = TIOCM_DTR | TIOCM_DSR
- | ((status & MSVR_DTR) ? TIOCM_RTS : 0)
- | ((status & MSVR_CD) ? TIOCM_CAR : 0)
- | ((status & MSVR_CTS) ? TIOCM_CTS : 0);
- } else {
- result = TIOCM_RTS | TIOCM_DSR
- | ((status & MSVR_DTR) ? TIOCM_DTR : 0)
- | ((status & MSVR_CD) ? TIOCM_CAR : 0)
- | ((status & MSVR_CTS) ? TIOCM_CTS : 0);
- }
-
- func_exit();
-
- return result;
-}
-
-
-static int sx_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct specialix_port *port = tty->driver_data;
- unsigned long flags;
- struct specialix_board *bp;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, __func__)) {
- func_exit();
- return -ENODEV;
- }
-
- bp = port_Board(port);
-
- spin_lock_irqsave(&port->lock, flags);
- if (sx_crtscts(port->port.tty)) {
- if (set & TIOCM_RTS)
- port->MSVR |= MSVR_DTR;
- } else {
- if (set & TIOCM_DTR)
- port->MSVR |= MSVR_DTR;
- }
- if (sx_crtscts(port->port.tty)) {
- if (clear & TIOCM_RTS)
- port->MSVR &= ~MSVR_DTR;
- } else {
- if (clear & TIOCM_DTR)
- port->MSVR &= ~MSVR_DTR;
- }
- spin_lock(&bp->lock);
- sx_out(bp, CD186x_CAR, port_No(port));
- sx_out(bp, CD186x_MSVR, port->MSVR);
- spin_unlock(&bp->lock);
- spin_unlock_irqrestore(&port->lock, flags);
- func_exit();
- return 0;
-}
-
-
-static int sx_send_break(struct tty_struct *tty, int length)
-{
- struct specialix_port *port = tty->driver_data;
- struct specialix_board *bp = port_Board(port);
- unsigned long flags;
-
- func_enter();
- if (length == 0 || length == -1)
- return -EOPNOTSUPP;
-
- spin_lock_irqsave(&port->lock, flags);
- port->break_length = SPECIALIX_TPS / HZ * length;
- port->COR2 |= COR2_ETC;
- port->IER |= IER_TXRDY;
- spin_lock(&bp->lock);
- sx_out(bp, CD186x_CAR, port_No(port));
- sx_out(bp, CD186x_COR2, port->COR2);
- sx_out(bp, CD186x_IER, port->IER);
- spin_unlock(&bp->lock);
- spin_unlock_irqrestore(&port->lock, flags);
- sx_wait_CCR(bp);
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CCR, CCR_CORCHG2);
- spin_unlock_irqrestore(&bp->lock, flags);
- sx_wait_CCR(bp);
-
- func_exit();
- return 0;
-}
-
-
-static int sx_set_serial_info(struct specialix_port *port,
- struct serial_struct __user *newinfo)
-{
- struct serial_struct tmp;
- struct specialix_board *bp = port_Board(port);
- int change_speed;
-
- func_enter();
-
- if (copy_from_user(&tmp, newinfo, sizeof(tmp))) {
- func_exit();
- return -EFAULT;
- }
-
- mutex_lock(&port->port.mutex);
- change_speed = ((port->port.flags & ASYNC_SPD_MASK) !=
- (tmp.flags & ASYNC_SPD_MASK));
- change_speed |= (tmp.custom_divisor != port->custom_divisor);
-
- if (!capable(CAP_SYS_ADMIN)) {
- if ((tmp.close_delay != port->port.close_delay) ||
- (tmp.closing_wait != port->port.closing_wait) ||
- ((tmp.flags & ~ASYNC_USR_MASK) !=
- (port->port.flags & ~ASYNC_USR_MASK))) {
- func_exit();
- mutex_unlock(&port->port.mutex);
- return -EPERM;
- }
- port->port.flags = ((port->port.flags & ~ASYNC_USR_MASK) |
- (tmp.flags & ASYNC_USR_MASK));
- port->custom_divisor = tmp.custom_divisor;
- } else {
- port->port.flags = ((port->port.flags & ~ASYNC_FLAGS) |
- (tmp.flags & ASYNC_FLAGS));
- port->port.close_delay = tmp.close_delay;
- port->port.closing_wait = tmp.closing_wait;
- port->custom_divisor = tmp.custom_divisor;
- }
- if (change_speed)
- sx_change_speed(bp, port);
-
- func_exit();
- mutex_unlock(&port->port.mutex);
- return 0;
-}
-
-
-static int sx_get_serial_info(struct specialix_port *port,
- struct serial_struct __user *retinfo)
-{
- struct serial_struct tmp;
- struct specialix_board *bp = port_Board(port);
-
- func_enter();
-
- memset(&tmp, 0, sizeof(tmp));
- mutex_lock(&port->port.mutex);
- tmp.type = PORT_CIRRUS;
- tmp.line = port - sx_port;
- tmp.port = bp->base;
- tmp.irq = bp->irq;
- tmp.flags = port->port.flags;
- tmp.baud_base = (SX_OSCFREQ + CD186x_TPC/2) / CD186x_TPC;
- tmp.close_delay = port->port.close_delay * HZ/100;
- tmp.closing_wait = port->port.closing_wait * HZ/100;
- tmp.custom_divisor = port->custom_divisor;
- tmp.xmit_fifo_size = CD186x_NFIFO;
- mutex_unlock(&port->port.mutex);
- if (copy_to_user(retinfo, &tmp, sizeof(tmp))) {
- func_exit();
- return -EFAULT;
- }
-
- func_exit();
- return 0;
-}
-
-
-static int sx_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct specialix_port *port = tty->driver_data;
- void __user *argp = (void __user *)arg;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, "sx_ioctl")) {
- func_exit();
- return -ENODEV;
- }
-
- switch (cmd) {
- case TIOCGSERIAL:
- func_exit();
- return sx_get_serial_info(port, argp);
- case TIOCSSERIAL:
- func_exit();
- return sx_set_serial_info(port, argp);
- default:
- func_exit();
- return -ENOIOCTLCMD;
- }
- func_exit();
- return 0;
-}
-
-
-static void sx_throttle(struct tty_struct *tty)
-{
- struct specialix_port *port = tty->driver_data;
- struct specialix_board *bp;
- unsigned long flags;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, "sx_throttle")) {
- func_exit();
- return;
- }
-
- bp = port_Board(port);
-
- /* Use DTR instead of RTS ! */
- if (sx_crtscts(tty))
- port->MSVR &= ~MSVR_DTR;
- else {
- /* Auch!!! I think the system shouldn't call this then. */
- /* Or maybe we're supposed (allowed?) to do our side of hw
- handshake anyway, even when hardware handshake is off.
- When you see this in your logs, please report.... */
- printk(KERN_ERR
- "sx%d: Need to throttle, but can't (hardware hs is off)\n",
- port_No(port));
- }
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CAR, port_No(port));
- spin_unlock_irqrestore(&bp->lock, flags);
- if (I_IXOFF(tty)) {
- sx_wait_CCR(bp);
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CCR, CCR_SSCH2);
- spin_unlock_irqrestore(&bp->lock, flags);
- sx_wait_CCR(bp);
- }
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_MSVR, port->MSVR);
- spin_unlock_irqrestore(&bp->lock, flags);
-
- func_exit();
-}
-
-
-static void sx_unthrottle(struct tty_struct *tty)
-{
- struct specialix_port *port = tty->driver_data;
- struct specialix_board *bp;
- unsigned long flags;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, "sx_unthrottle")) {
- func_exit();
- return;
- }
-
- bp = port_Board(port);
-
- spin_lock_irqsave(&port->lock, flags);
- /* XXXX Use DTR INSTEAD???? */
- if (sx_crtscts(tty))
- port->MSVR |= MSVR_DTR;
- /* Else clause: see remark in "sx_throttle"... */
- spin_lock(&bp->lock);
- sx_out(bp, CD186x_CAR, port_No(port));
- spin_unlock(&bp->lock);
- if (I_IXOFF(tty)) {
- spin_unlock_irqrestore(&port->lock, flags);
- sx_wait_CCR(bp);
- spin_lock_irqsave(&bp->lock, flags);
- sx_out(bp, CD186x_CCR, CCR_SSCH1);
- spin_unlock_irqrestore(&bp->lock, flags);
- sx_wait_CCR(bp);
- spin_lock_irqsave(&port->lock, flags);
- }
- spin_lock(&bp->lock);
- sx_out(bp, CD186x_MSVR, port->MSVR);
- spin_unlock(&bp->lock);
- spin_unlock_irqrestore(&port->lock, flags);
-
- func_exit();
-}
-
-
-static void sx_stop(struct tty_struct *tty)
-{
- struct specialix_port *port = tty->driver_data;
- struct specialix_board *bp;
- unsigned long flags;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, "sx_stop")) {
- func_exit();
- return;
- }
-
- bp = port_Board(port);
-
- spin_lock_irqsave(&port->lock, flags);
- port->IER &= ~IER_TXRDY;
- spin_lock(&bp->lock);
- sx_out(bp, CD186x_CAR, port_No(port));
- sx_out(bp, CD186x_IER, port->IER);
- spin_unlock(&bp->lock);
- spin_unlock_irqrestore(&port->lock, flags);
-
- func_exit();
-}
-
-
-static void sx_start(struct tty_struct *tty)
-{
- struct specialix_port *port = tty->driver_data;
- struct specialix_board *bp;
- unsigned long flags;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, "sx_start")) {
- func_exit();
- return;
- }
-
- bp = port_Board(port);
-
- spin_lock_irqsave(&port->lock, flags);
- if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) {
- port->IER |= IER_TXRDY;
- spin_lock(&bp->lock);
- sx_out(bp, CD186x_CAR, port_No(port));
- sx_out(bp, CD186x_IER, port->IER);
- spin_unlock(&bp->lock);
- }
- spin_unlock_irqrestore(&port->lock, flags);
-
- func_exit();
-}
-
-static void sx_hangup(struct tty_struct *tty)
-{
- struct specialix_port *port = tty->driver_data;
- struct specialix_board *bp;
- unsigned long flags;
-
- func_enter();
-
- if (sx_paranoia_check(port, tty->name, "sx_hangup")) {
- func_exit();
- return;
- }
-
- bp = port_Board(port);
-
- sx_shutdown_port(bp, port);
- spin_lock_irqsave(&port->lock, flags);
- bp->count -= port->port.count;
- if (bp->count < 0) {
- printk(KERN_ERR
- "sx%d: sx_hangup: bad board count: %d port: %d\n",
- board_No(bp), bp->count, tty->index);
- bp->count = 0;
- }
- port->port.count = 0;
- port->port.flags &= ~ASYNC_NORMAL_ACTIVE;
- port->port.tty = NULL;
- spin_unlock_irqrestore(&port->lock, flags);
- wake_up_interruptible(&port->port.open_wait);
-
- func_exit();
-}
-
-
-static void sx_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
-{
- struct specialix_port *port = tty->driver_data;
- unsigned long flags;
- struct specialix_board *bp;
-
- if (sx_paranoia_check(port, tty->name, "sx_set_termios"))
- return;
-
- bp = port_Board(port);
- spin_lock_irqsave(&port->lock, flags);
- sx_change_speed(port_Board(port), port);
- spin_unlock_irqrestore(&port->lock, flags);
-
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios->c_cflag & CRTSCTS)) {
- tty->hw_stopped = 0;
- sx_start(tty);
- }
-}
-
-static const struct tty_operations sx_ops = {
- .open = sx_open,
- .close = sx_close,
- .write = sx_write,
- .put_char = sx_put_char,
- .flush_chars = sx_flush_chars,
- .write_room = sx_write_room,
- .chars_in_buffer = sx_chars_in_buffer,
- .flush_buffer = sx_flush_buffer,
- .ioctl = sx_ioctl,
- .throttle = sx_throttle,
- .unthrottle = sx_unthrottle,
- .set_termios = sx_set_termios,
- .stop = sx_stop,
- .start = sx_start,
- .hangup = sx_hangup,
- .tiocmget = sx_tiocmget,
- .tiocmset = sx_tiocmset,
- .break_ctl = sx_send_break,
-};
-
-static int sx_init_drivers(void)
-{
- int error;
- int i;
-
- func_enter();
-
- specialix_driver = alloc_tty_driver(SX_NBOARD * SX_NPORT);
- if (!specialix_driver) {
- printk(KERN_ERR "sx: Couldn't allocate tty_driver.\n");
- func_exit();
- return 1;
- }
-
- specialix_driver->owner = THIS_MODULE;
- specialix_driver->name = "ttyW";
- specialix_driver->major = SPECIALIX_NORMAL_MAJOR;
- specialix_driver->type = TTY_DRIVER_TYPE_SERIAL;
- specialix_driver->subtype = SERIAL_TYPE_NORMAL;
- specialix_driver->init_termios = tty_std_termios;
- specialix_driver->init_termios.c_cflag =
- B9600 | CS8 | CREAD | HUPCL | CLOCAL;
- specialix_driver->init_termios.c_ispeed = 9600;
- specialix_driver->init_termios.c_ospeed = 9600;
- specialix_driver->flags = TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_HARDWARE_BREAK;
- tty_set_operations(specialix_driver, &sx_ops);
-
- error = tty_register_driver(specialix_driver);
- if (error) {
- put_tty_driver(specialix_driver);
- printk(KERN_ERR
- "sx: Couldn't register specialix IO8+ driver, error = %d\n",
- error);
- func_exit();
- return 1;
- }
- memset(sx_port, 0, sizeof(sx_port));
- for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
- sx_port[i].magic = SPECIALIX_MAGIC;
- tty_port_init(&sx_port[i].port);
- spin_lock_init(&sx_port[i].lock);
- }
-
- func_exit();
- return 0;
-}
-
-static void sx_release_drivers(void)
-{
- func_enter();
-
- tty_unregister_driver(specialix_driver);
- put_tty_driver(specialix_driver);
- func_exit();
-}
-
-/*
- * This routine must be called by kernel at boot time
- */
-static int __init specialix_init(void)
-{
- int i;
- int found = 0;
-
- func_enter();
-
- printk(KERN_INFO "sx: Specialix IO8+ driver v" VERSION ", (c) R.E.Wolff 1997/1998.\n");
- printk(KERN_INFO "sx: derived from work (c) D.Gorodchanin 1994-1996.\n");
- if (sx_rtscts)
- printk(KERN_INFO
- "sx: DTR/RTS pin is RTS when CRTSCTS is on.\n");
- else
- printk(KERN_INFO "sx: DTR/RTS pin is always RTS.\n");
-
- for (i = 0; i < SX_NBOARD; i++)
- spin_lock_init(&sx_board[i].lock);
-
- if (sx_init_drivers()) {
- func_exit();
- return -EIO;
- }
-
- for (i = 0; i < SX_NBOARD; i++)
- if (sx_board[i].base && !sx_probe(&sx_board[i]))
- found++;
-
-#ifdef CONFIG_PCI
- {
- struct pci_dev *pdev = NULL;
-
- i = 0;
- while (i < SX_NBOARD) {
- if (sx_board[i].flags & SX_BOARD_PRESENT) {
- i++;
- continue;
- }
- pdev = pci_get_device(PCI_VENDOR_ID_SPECIALIX,
- PCI_DEVICE_ID_SPECIALIX_IO8, pdev);
- if (!pdev)
- break;
-
- if (pci_enable_device(pdev))
- continue;
-
- sx_board[i].irq = pdev->irq;
-
- sx_board[i].base = pci_resource_start(pdev, 2);
-
- sx_board[i].flags |= SX_BOARD_IS_PCI;
- if (!sx_probe(&sx_board[i]))
- found++;
- }
- /* May exit pci_get sequence early with lots of boards */
- if (pdev != NULL)
- pci_dev_put(pdev);
- }
-#endif
-
- if (!found) {
- sx_release_drivers();
- printk(KERN_INFO "sx: No specialix IO8+ boards detected.\n");
- func_exit();
- return -EIO;
- }
-
- func_exit();
- return 0;
-}
-
-static int iobase[SX_NBOARD] = {0,};
-static int irq[SX_NBOARD] = {0,};
-
-module_param_array(iobase, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param(sx_debug, int, 0);
-module_param(sx_rtscts, int, 0);
-module_param(sx_rxfifo, int, 0);
-
-/*
- * You can setup up to 4 boards.
- * by specifying "iobase=0xXXX,0xXXX ..." as insmod parameter.
- * You should specify the IRQs too in that case "irq=....,...".
- *
- * More than 4 boards in one computer is not possible, as the card can
- * only use 4 different interrupts.
- *
- */
-static int __init specialix_init_module(void)
-{
- int i;
-
- func_enter();
-
- if (iobase[0] || iobase[1] || iobase[2] || iobase[3]) {
- for (i = 0; i < SX_NBOARD; i++) {
- sx_board[i].base = iobase[i];
- sx_board[i].irq = irq[i];
- sx_board[i].count = 0;
- }
- }
-
- func_exit();
-
- return specialix_init();
-}
-
-static void __exit specialix_exit_module(void)
-{
- int i;
-
- func_enter();
-
- sx_release_drivers();
- for (i = 0; i < SX_NBOARD; i++)
- if (sx_board[i].flags & SX_BOARD_PRESENT)
- sx_release_io_range(&sx_board[i]);
- func_exit();
-}
-
-static struct pci_device_id specialx_pci_tbl[] __devinitdata __used = {
- { PCI_DEVICE(PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_IO8) },
- { }
-};
-MODULE_DEVICE_TABLE(pci, specialx_pci_tbl);
-
-module_init(specialix_init_module);
-module_exit(specialix_exit_module);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV_MAJOR(SPECIALIX_NORMAL_MAJOR);
diff --git a/drivers/staging/tty/specialix_io8.h b/drivers/staging/tty/specialix_io8.h
deleted file mode 100644
index 1215d7e2cb3..00000000000
--- a/drivers/staging/tty/specialix_io8.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * linux/drivers/char/specialix_io8.h --
- * Specialix IO8+ multiport serial driver.
- *
- * Copyright (C) 1997 Roger Wolff (R.E.Wolff@BitWizard.nl)
- * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com)
- *
- *
- * Specialix pays for the development and support of this driver.
- * Please DO contact io8-linux@specialix.co.uk if you require
- * support.
- *
- * This driver was developed in the BitWizard linux device
- * driver service. If you require a linux device driver for your
- * product, please contact devices@BitWizard.nl for a quote.
- *
- * This code is firmly based on the riscom/8 serial driver,
- * written by Dmitry Gorodchanin. The specialix IO8+ card
- * programming information was obtained from the CL-CD1865 Data
- * Book, and Specialix document number 6200059: IO8+ Hardware
- * Functional Specification.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- * */
-
-#ifndef __LINUX_SPECIALIX_H
-#define __LINUX_SPECIALIX_H
-
-#include <linux/serial.h>
-
-#ifdef __KERNEL__
-
-/* You can have max 4 ISA cards in one PC, and I recommend not much
-more than a few PCI versions of the card. */
-
-#define SX_NBOARD 8
-
-/* NOTE: Specialix decoder recognizes 4 addresses, but only two are used.... */
-#define SX_IO_SPACE 4
-/* The PCI version decodes 8 addresses, but still only 2 are used. */
-#define SX_PCI_IO_SPACE 8
-
-/* eight ports per board. */
-#define SX_NPORT 8
-#define SX_BOARD(line) ((line) / SX_NPORT)
-#define SX_PORT(line) ((line) & (SX_NPORT - 1))
-
-
-#define SX_DATA_REG 0 /* Base+0 : Data register */
-#define SX_ADDR_REG 1 /* base+1 : Address register. */
-
-#define MHz *1000000 /* I'm ashamed of myself. */
-
-/* On-board oscillator frequency */
-#define SX_OSCFREQ (25 MHz/2)
-/* There is a 25MHz crystal on the board, but the chip is in /2 mode */
-
-
-/* Ticks per sec. Used for setting receiver timeout and break length */
-#define SPECIALIX_TPS 4000
-
-/* Yeah, after heavy testing I decided it must be 6.
- * Sure, You can change it if needed.
- */
-#define SPECIALIX_RXFIFO 6 /* Max. receiver FIFO size (1-8) */
-
-#define SPECIALIX_MAGIC 0x0907
-
-#define SX_CCR_TIMEOUT 10000 /* CCR timeout. You may need to wait up to
- 10 milliseconds before the internal
- processor is available again after
- you give it a command */
-
-#define SX_IOBASE1 0x100
-#define SX_IOBASE2 0x180
-#define SX_IOBASE3 0x250
-#define SX_IOBASE4 0x260
-
-struct specialix_board {
- unsigned long flags;
- unsigned short base;
- unsigned char irq;
- //signed char count;
- int count;
- unsigned char DTR;
- int reg;
- spinlock_t lock;
-};
-
-#define SX_BOARD_PRESENT 0x00000001
-#define SX_BOARD_ACTIVE 0x00000002
-#define SX_BOARD_IS_PCI 0x00000004
-
-
-struct specialix_port {
- int magic;
- struct tty_port port;
- int baud_base;
- int flags;
- int timeout;
- unsigned char * xmit_buf;
- int custom_divisor;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
- short wakeup_chars;
- short break_length;
- unsigned char mark_mask;
- unsigned char IER;
- unsigned char MSVR;
- unsigned char COR2;
- unsigned long overrun;
- unsigned long hits[10];
- spinlock_t lock;
-};
-
-#endif /* __KERNEL__ */
-#endif /* __LINUX_SPECIALIX_H */
-
-
-
-
-
-
-
-
-
diff --git a/drivers/staging/tty/stallion.c b/drivers/staging/tty/stallion.c
deleted file mode 100644
index 4fff5cd3b16..00000000000
--- a/drivers/staging/tty/stallion.c
+++ /dev/null
@@ -1,4651 +0,0 @@
-/*****************************************************************************/
-
-/*
- * stallion.c -- stallion multiport serial driver.
- *
- * Copyright (C) 1996-1999 Stallion Technologies
- * Copyright (C) 1994-1996 Greg Ungerer.
- *
- * This code is loosely based on the Linux serial driver, written by
- * Linus Torvalds, Theodore T'so and others.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-/*****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/seq_file.h>
-#include <linux/cd1400.h>
-#include <linux/sc26198.h>
-#include <linux/comstats.h>
-#include <linux/stallion.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/ctype.h>
-
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-#include <linux/pci.h>
-
-/*****************************************************************************/
-
-/*
- * Define different board types. Use the standard Stallion "assigned"
- * board numbers. Boards supported in this driver are abbreviated as
- * EIO = EasyIO and ECH = EasyConnection 8/32.
- */
-#define BRD_EASYIO 20
-#define BRD_ECH 21
-#define BRD_ECHMC 22
-#define BRD_ECHPCI 26
-#define BRD_ECH64PCI 27
-#define BRD_EASYIOPCI 28
-
-struct stlconf {
- unsigned int brdtype;
- int ioaddr1;
- int ioaddr2;
- unsigned long memaddr;
- int irq;
- int irqtype;
-};
-
-static unsigned int stl_nrbrds;
-
-/*****************************************************************************/
-
-/*
- * Define some important driver characteristics. Device major numbers
- * allocated as per Linux Device Registry.
- */
-#ifndef STL_SIOMEMMAJOR
-#define STL_SIOMEMMAJOR 28
-#endif
-#ifndef STL_SERIALMAJOR
-#define STL_SERIALMAJOR 24
-#endif
-#ifndef STL_CALLOUTMAJOR
-#define STL_CALLOUTMAJOR 25
-#endif
-
-/*
- * Set the TX buffer size. Bigger is better, but we don't want
- * to chew too much memory with buffers!
- */
-#define STL_TXBUFLOW 512
-#define STL_TXBUFSIZE 4096
-
-/*****************************************************************************/
-
-/*
- * Define our local driver identity first. Set up stuff to deal with
- * all the local structures required by a serial tty driver.
- */
-static char *stl_drvtitle = "Stallion Multiport Serial Driver";
-static char *stl_drvname = "stallion";
-static char *stl_drvversion = "5.6.0";
-
-static struct tty_driver *stl_serial;
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially. Basically all it defines is a raw port
- * at 9600, 8 data bits, 1 stop bit.
- */
-static struct ktermios stl_deftermios = {
- .c_cflag = (B9600 | CS8 | CREAD | HUPCL | CLOCAL),
- .c_cc = INIT_C_CC,
- .c_ispeed = 9600,
- .c_ospeed = 9600,
-};
-
-/*
- * Define global place to put buffer overflow characters.
- */
-static char stl_unwanted[SC26198_RXFIFOSIZE];
-
-/*****************************************************************************/
-
-static DEFINE_MUTEX(stl_brdslock);
-static struct stlbrd *stl_brds[STL_MAXBRDS];
-
-static const struct tty_port_operations stl_port_ops;
-
-/*
- * Per board state flags. Used with the state field of the board struct.
- * Not really much here!
- */
-#define BRD_FOUND 0x1
-#define STL_PROBED 0x2
-
-
-/*
- * Define the port structure istate flags. These set of flags are
- * modified at interrupt time - so setting and reseting them needs
- * to be atomic. Use the bit clear/setting routines for this.
- */
-#define ASYI_TXBUSY 1
-#define ASYI_TXLOW 2
-#define ASYI_TXFLOWED 3
-
-/*
- * Define an array of board names as printable strings. Handy for
- * referencing boards when printing trace and stuff.
- */
-static char *stl_brdnames[] = {
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- "EasyIO",
- "EC8/32-AT",
- "EC8/32-MC",
- NULL,
- NULL,
- NULL,
- "EC8/32-PCI",
- "EC8/64-PCI",
- "EasyIO-PCI",
-};
-
-/*****************************************************************************/
-
-/*
- * Define some string labels for arguments passed from the module
- * load line. These allow for easy board definitions, and easy
- * modification of the io, memory and irq resoucres.
- */
-static unsigned int stl_nargs;
-static char *board0[4];
-static char *board1[4];
-static char *board2[4];
-static char *board3[4];
-
-static char **stl_brdsp[] = {
- (char **) &board0,
- (char **) &board1,
- (char **) &board2,
- (char **) &board3
-};
-
-/*
- * Define a set of common board names, and types. This is used to
- * parse any module arguments.
- */
-
-static struct {
- char *name;
- int type;
-} stl_brdstr[] = {
- { "easyio", BRD_EASYIO },
- { "eio", BRD_EASYIO },
- { "20", BRD_EASYIO },
- { "ec8/32", BRD_ECH },
- { "ec8/32-at", BRD_ECH },
- { "ec8/32-isa", BRD_ECH },
- { "ech", BRD_ECH },
- { "echat", BRD_ECH },
- { "21", BRD_ECH },
- { "ec8/32-mc", BRD_ECHMC },
- { "ec8/32-mca", BRD_ECHMC },
- { "echmc", BRD_ECHMC },
- { "echmca", BRD_ECHMC },
- { "22", BRD_ECHMC },
- { "ec8/32-pc", BRD_ECHPCI },
- { "ec8/32-pci", BRD_ECHPCI },
- { "26", BRD_ECHPCI },
- { "ec8/64-pc", BRD_ECH64PCI },
- { "ec8/64-pci", BRD_ECH64PCI },
- { "ech-pci", BRD_ECH64PCI },
- { "echpci", BRD_ECH64PCI },
- { "echpc", BRD_ECH64PCI },
- { "27", BRD_ECH64PCI },
- { "easyio-pc", BRD_EASYIOPCI },
- { "easyio-pci", BRD_EASYIOPCI },
- { "eio-pci", BRD_EASYIOPCI },
- { "eiopci", BRD_EASYIOPCI },
- { "28", BRD_EASYIOPCI },
-};
-
-/*
- * Define the module agruments.
- */
-
-module_param_array(board0, charp, &stl_nargs, 0);
-MODULE_PARM_DESC(board0, "Board 0 config -> name[,ioaddr[,ioaddr2][,irq]]");
-module_param_array(board1, charp, &stl_nargs, 0);
-MODULE_PARM_DESC(board1, "Board 1 config -> name[,ioaddr[,ioaddr2][,irq]]");
-module_param_array(board2, charp, &stl_nargs, 0);
-MODULE_PARM_DESC(board2, "Board 2 config -> name[,ioaddr[,ioaddr2][,irq]]");
-module_param_array(board3, charp, &stl_nargs, 0);
-MODULE_PARM_DESC(board3, "Board 3 config -> name[,ioaddr[,ioaddr2][,irq]]");
-
-/*****************************************************************************/
-
-/*
- * Hardware ID bits for the EasyIO and ECH boards. These defines apply
- * to the directly accessible io ports of these boards (not the uarts -
- * they are in cd1400.h and sc26198.h).
- */
-#define EIO_8PORTRS 0x04
-#define EIO_4PORTRS 0x05
-#define EIO_8PORTDI 0x00
-#define EIO_8PORTM 0x06
-#define EIO_MK3 0x03
-#define EIO_IDBITMASK 0x07
-
-#define EIO_BRDMASK 0xf0
-#define ID_BRD4 0x10
-#define ID_BRD8 0x20
-#define ID_BRD16 0x30
-
-#define EIO_INTRPEND 0x08
-#define EIO_INTEDGE 0x00
-#define EIO_INTLEVEL 0x08
-#define EIO_0WS 0x10
-
-#define ECH_ID 0xa0
-#define ECH_IDBITMASK 0xe0
-#define ECH_BRDENABLE 0x08
-#define ECH_BRDDISABLE 0x00
-#define ECH_INTENABLE 0x01
-#define ECH_INTDISABLE 0x00
-#define ECH_INTLEVEL 0x02
-#define ECH_INTEDGE 0x00
-#define ECH_INTRPEND 0x01
-#define ECH_BRDRESET 0x01
-
-#define ECHMC_INTENABLE 0x01
-#define ECHMC_BRDRESET 0x02
-
-#define ECH_PNLSTATUS 2
-#define ECH_PNL16PORT 0x20
-#define ECH_PNLIDMASK 0x07
-#define ECH_PNLXPID 0x40
-#define ECH_PNLINTRPEND 0x80
-
-#define ECH_ADDR2MASK 0x1e0
-
-/*
- * Define the vector mapping bits for the programmable interrupt board
- * hardware. These bits encode the interrupt for the board to use - it
- * is software selectable (except the EIO-8M).
- */
-static unsigned char stl_vecmap[] = {
- 0xff, 0xff, 0xff, 0x04, 0x06, 0x05, 0xff, 0x07,
- 0xff, 0xff, 0x00, 0x02, 0x01, 0xff, 0xff, 0x03
-};
-
-/*
- * Lock ordering is that you may not take stallion_lock holding
- * brd_lock.
- */
-
-static spinlock_t brd_lock; /* Guard the board mapping */
-static spinlock_t stallion_lock; /* Guard the tty driver */
-
-/*
- * Set up enable and disable macros for the ECH boards. They require
- * the secondary io address space to be activated and deactivated.
- * This way all ECH boards can share their secondary io region.
- * If this is an ECH-PCI board then also need to set the page pointer
- * to point to the correct page.
- */
-#define BRDENABLE(brdnr,pagenr) \
- if (stl_brds[(brdnr)]->brdtype == BRD_ECH) \
- outb((stl_brds[(brdnr)]->ioctrlval | ECH_BRDENABLE), \
- stl_brds[(brdnr)]->ioctrl); \
- else if (stl_brds[(brdnr)]->brdtype == BRD_ECHPCI) \
- outb((pagenr), stl_brds[(brdnr)]->ioctrl);
-
-#define BRDDISABLE(brdnr) \
- if (stl_brds[(brdnr)]->brdtype == BRD_ECH) \
- outb((stl_brds[(brdnr)]->ioctrlval | ECH_BRDDISABLE), \
- stl_brds[(brdnr)]->ioctrl);
-
-#define STL_CD1400MAXBAUD 230400
-#define STL_SC26198MAXBAUD 460800
-
-#define STL_BAUDBASE 115200
-#define STL_CLOSEDELAY (5 * HZ / 10)
-
-/*****************************************************************************/
-
-/*
- * Define the Stallion PCI vendor and device IDs.
- */
-#ifndef PCI_VENDOR_ID_STALLION
-#define PCI_VENDOR_ID_STALLION 0x124d
-#endif
-#ifndef PCI_DEVICE_ID_ECHPCI832
-#define PCI_DEVICE_ID_ECHPCI832 0x0000
-#endif
-#ifndef PCI_DEVICE_ID_ECHPCI864
-#define PCI_DEVICE_ID_ECHPCI864 0x0002
-#endif
-#ifndef PCI_DEVICE_ID_EIOPCI
-#define PCI_DEVICE_ID_EIOPCI 0x0003
-#endif
-
-/*
- * Define structure to hold all Stallion PCI boards.
- */
-
-static struct pci_device_id stl_pcibrds[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_STALLION, PCI_DEVICE_ID_ECHPCI864),
- .driver_data = BRD_ECH64PCI },
- { PCI_DEVICE(PCI_VENDOR_ID_STALLION, PCI_DEVICE_ID_EIOPCI),
- .driver_data = BRD_EASYIOPCI },
- { PCI_DEVICE(PCI_VENDOR_ID_STALLION, PCI_DEVICE_ID_ECHPCI832),
- .driver_data = BRD_ECHPCI },
- { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87410),
- .driver_data = BRD_ECHPCI },
- { }
-};
-MODULE_DEVICE_TABLE(pci, stl_pcibrds);
-
-/*****************************************************************************/
-
-/*
- * Define macros to extract a brd/port number from a minor number.
- */
-#define MINOR2BRD(min) (((min) & 0xc0) >> 6)
-#define MINOR2PORT(min) ((min) & 0x3f)
-
-/*
- * Define a baud rate table that converts termios baud rate selector
- * into the actual baud rate value. All baud rate calculations are
- * based on the actual baud rate required.
- */
-static unsigned int stl_baudrates[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
- 9600, 19200, 38400, 57600, 115200, 230400, 460800, 921600
-};
-
-/*****************************************************************************/
-
-/*
- * Declare all those functions in this driver!
- */
-
-static long stl_memioctl(struct file *fp, unsigned int cmd, unsigned long arg);
-static int stl_brdinit(struct stlbrd *brdp);
-static int stl_getportstats(struct tty_struct *tty, struct stlport *portp, comstats_t __user *cp);
-static int stl_clrportstats(struct stlport *portp, comstats_t __user *cp);
-
-/*
- * CD1400 uart specific handling functions.
- */
-static void stl_cd1400setreg(struct stlport *portp, int regnr, int value);
-static int stl_cd1400getreg(struct stlport *portp, int regnr);
-static int stl_cd1400updatereg(struct stlport *portp, int regnr, int value);
-static int stl_cd1400panelinit(struct stlbrd *brdp, struct stlpanel *panelp);
-static void stl_cd1400portinit(struct stlbrd *brdp, struct stlpanel *panelp, struct stlport *portp);
-static void stl_cd1400setport(struct stlport *portp, struct ktermios *tiosp);
-static int stl_cd1400getsignals(struct stlport *portp);
-static void stl_cd1400setsignals(struct stlport *portp, int dtr, int rts);
-static void stl_cd1400ccrwait(struct stlport *portp);
-static void stl_cd1400enablerxtx(struct stlport *portp, int rx, int tx);
-static void stl_cd1400startrxtx(struct stlport *portp, int rx, int tx);
-static void stl_cd1400disableintrs(struct stlport *portp);
-static void stl_cd1400sendbreak(struct stlport *portp, int len);
-static void stl_cd1400flowctrl(struct stlport *portp, int state);
-static void stl_cd1400sendflow(struct stlport *portp, int state);
-static void stl_cd1400flush(struct stlport *portp);
-static int stl_cd1400datastate(struct stlport *portp);
-static void stl_cd1400eiointr(struct stlpanel *panelp, unsigned int iobase);
-static void stl_cd1400echintr(struct stlpanel *panelp, unsigned int iobase);
-static void stl_cd1400txisr(struct stlpanel *panelp, int ioaddr);
-static void stl_cd1400rxisr(struct stlpanel *panelp, int ioaddr);
-static void stl_cd1400mdmisr(struct stlpanel *panelp, int ioaddr);
-
-static inline int stl_cd1400breakisr(struct stlport *portp, int ioaddr);
-
-/*
- * SC26198 uart specific handling functions.
- */
-static void stl_sc26198setreg(struct stlport *portp, int regnr, int value);
-static int stl_sc26198getreg(struct stlport *portp, int regnr);
-static int stl_sc26198updatereg(struct stlport *portp, int regnr, int value);
-static int stl_sc26198getglobreg(struct stlport *portp, int regnr);
-static int stl_sc26198panelinit(struct stlbrd *brdp, struct stlpanel *panelp);
-static void stl_sc26198portinit(struct stlbrd *brdp, struct stlpanel *panelp, struct stlport *portp);
-static void stl_sc26198setport(struct stlport *portp, struct ktermios *tiosp);
-static int stl_sc26198getsignals(struct stlport *portp);
-static void stl_sc26198setsignals(struct stlport *portp, int dtr, int rts);
-static void stl_sc26198enablerxtx(struct stlport *portp, int rx, int tx);
-static void stl_sc26198startrxtx(struct stlport *portp, int rx, int tx);
-static void stl_sc26198disableintrs(struct stlport *portp);
-static void stl_sc26198sendbreak(struct stlport *portp, int len);
-static void stl_sc26198flowctrl(struct stlport *portp, int state);
-static void stl_sc26198sendflow(struct stlport *portp, int state);
-static void stl_sc26198flush(struct stlport *portp);
-static int stl_sc26198datastate(struct stlport *portp);
-static void stl_sc26198wait(struct stlport *portp);
-static void stl_sc26198txunflow(struct stlport *portp, struct tty_struct *tty);
-static void stl_sc26198intr(struct stlpanel *panelp, unsigned int iobase);
-static void stl_sc26198txisr(struct stlport *port);
-static void stl_sc26198rxisr(struct stlport *port, unsigned int iack);
-static void stl_sc26198rxbadch(struct stlport *portp, unsigned char status, char ch);
-static void stl_sc26198rxbadchars(struct stlport *portp);
-static void stl_sc26198otherisr(struct stlport *port, unsigned int iack);
-
-/*****************************************************************************/
-
-/*
- * Generic UART support structure.
- */
-typedef struct uart {
- int (*panelinit)(struct stlbrd *brdp, struct stlpanel *panelp);
- void (*portinit)(struct stlbrd *brdp, struct stlpanel *panelp, struct stlport *portp);
- void (*setport)(struct stlport *portp, struct ktermios *tiosp);
- int (*getsignals)(struct stlport *portp);
- void (*setsignals)(struct stlport *portp, int dtr, int rts);
- void (*enablerxtx)(struct stlport *portp, int rx, int tx);
- void (*startrxtx)(struct stlport *portp, int rx, int tx);
- void (*disableintrs)(struct stlport *portp);
- void (*sendbreak)(struct stlport *portp, int len);
- void (*flowctrl)(struct stlport *portp, int state);
- void (*sendflow)(struct stlport *portp, int state);
- void (*flush)(struct stlport *portp);
- int (*datastate)(struct stlport *portp);
- void (*intr)(struct stlpanel *panelp, unsigned int iobase);
-} uart_t;
-
-/*
- * Define some macros to make calling these functions nice and clean.
- */
-#define stl_panelinit (* ((uart_t *) panelp->uartp)->panelinit)
-#define stl_portinit (* ((uart_t *) portp->uartp)->portinit)
-#define stl_setport (* ((uart_t *) portp->uartp)->setport)
-#define stl_getsignals (* ((uart_t *) portp->uartp)->getsignals)
-#define stl_setsignals (* ((uart_t *) portp->uartp)->setsignals)
-#define stl_enablerxtx (* ((uart_t *) portp->uartp)->enablerxtx)
-#define stl_startrxtx (* ((uart_t *) portp->uartp)->startrxtx)
-#define stl_disableintrs (* ((uart_t *) portp->uartp)->disableintrs)
-#define stl_sendbreak (* ((uart_t *) portp->uartp)->sendbreak)
-#define stl_flowctrl (* ((uart_t *) portp->uartp)->flowctrl)
-#define stl_sendflow (* ((uart_t *) portp->uartp)->sendflow)
-#define stl_flush (* ((uart_t *) portp->uartp)->flush)
-#define stl_datastate (* ((uart_t *) portp->uartp)->datastate)
-
-/*****************************************************************************/
-
-/*
- * CD1400 UART specific data initialization.
- */
-static uart_t stl_cd1400uart = {
- stl_cd1400panelinit,
- stl_cd1400portinit,
- stl_cd1400setport,
- stl_cd1400getsignals,
- stl_cd1400setsignals,
- stl_cd1400enablerxtx,
- stl_cd1400startrxtx,
- stl_cd1400disableintrs,
- stl_cd1400sendbreak,
- stl_cd1400flowctrl,
- stl_cd1400sendflow,
- stl_cd1400flush,
- stl_cd1400datastate,
- stl_cd1400eiointr
-};
-
-/*
- * Define the offsets within the register bank of a cd1400 based panel.
- * These io address offsets are common to the EasyIO board as well.
- */
-#define EREG_ADDR 0
-#define EREG_DATA 4
-#define EREG_RXACK 5
-#define EREG_TXACK 6
-#define EREG_MDACK 7
-
-#define EREG_BANKSIZE 8
-
-#define CD1400_CLK 25000000
-#define CD1400_CLK8M 20000000
-
-/*
- * Define the cd1400 baud rate clocks. These are used when calculating
- * what clock and divisor to use for the required baud rate. Also
- * define the maximum baud rate allowed, and the default base baud.
- */
-static int stl_cd1400clkdivs[] = {
- CD1400_CLK0, CD1400_CLK1, CD1400_CLK2, CD1400_CLK3, CD1400_CLK4
-};
-
-/*****************************************************************************/
-
-/*
- * SC26198 UART specific data initization.
- */
-static uart_t stl_sc26198uart = {
- stl_sc26198panelinit,
- stl_sc26198portinit,
- stl_sc26198setport,
- stl_sc26198getsignals,
- stl_sc26198setsignals,
- stl_sc26198enablerxtx,
- stl_sc26198startrxtx,
- stl_sc26198disableintrs,
- stl_sc26198sendbreak,
- stl_sc26198flowctrl,
- stl_sc26198sendflow,
- stl_sc26198flush,
- stl_sc26198datastate,
- stl_sc26198intr
-};
-
-/*
- * Define the offsets within the register bank of a sc26198 based panel.
- */
-#define XP_DATA 0
-#define XP_ADDR 1
-#define XP_MODID 2
-#define XP_STATUS 2
-#define XP_IACK 3
-
-#define XP_BANKSIZE 4
-
-/*
- * Define the sc26198 baud rate table. Offsets within the table
- * represent the actual baud rate selector of sc26198 registers.
- */
-static unsigned int sc26198_baudtable[] = {
- 50, 75, 150, 200, 300, 450, 600, 900, 1200, 1800, 2400, 3600,
- 4800, 7200, 9600, 14400, 19200, 28800, 38400, 57600, 115200,
- 230400, 460800, 921600
-};
-
-#define SC26198_NRBAUDS ARRAY_SIZE(sc26198_baudtable)
-
-/*****************************************************************************/
-
-/*
- * Define the driver info for a user level control device. Used mainly
- * to get at port stats - only not using the port device itself.
- */
-static const struct file_operations stl_fsiomem = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = stl_memioctl,
- .llseek = noop_llseek,
-};
-
-static struct class *stallion_class;
-
-static void stl_cd_change(struct stlport *portp)
-{
- unsigned int oldsigs = portp->sigs;
- struct tty_struct *tty = tty_port_tty_get(&portp->port);
-
- if (!tty)
- return;
-
- portp->sigs = stl_getsignals(portp);
-
- if ((portp->sigs & TIOCM_CD) && ((oldsigs & TIOCM_CD) == 0))
- wake_up_interruptible(&portp->port.open_wait);
-
- if ((oldsigs & TIOCM_CD) && ((portp->sigs & TIOCM_CD) == 0))
- if (portp->port.flags & ASYNC_CHECK_CD)
- tty_hangup(tty);
- tty_kref_put(tty);
-}
-
-/*
- * Check for any arguments passed in on the module load command line.
- */
-
-/*****************************************************************************/
-
-/*
- * Parse the supplied argument string, into the board conf struct.
- */
-
-static int __init stl_parsebrd(struct stlconf *confp, char **argp)
-{
- char *sp;
- unsigned int i;
-
- pr_debug("stl_parsebrd(confp=%p,argp=%p)\n", confp, argp);
-
- if ((argp[0] == NULL) || (*argp[0] == 0))
- return 0;
-
- for (sp = argp[0], i = 0; (*sp != 0) && (i < 25); sp++, i++)
- *sp = tolower(*sp);
-
- for (i = 0; i < ARRAY_SIZE(stl_brdstr); i++)
- if (strcmp(stl_brdstr[i].name, argp[0]) == 0)
- break;
-
- if (i == ARRAY_SIZE(stl_brdstr)) {
- printk("STALLION: unknown board name, %s?\n", argp[0]);
- return 0;
- }
-
- confp->brdtype = stl_brdstr[i].type;
-
- i = 1;
- if ((argp[i] != NULL) && (*argp[i] != 0))
- confp->ioaddr1 = simple_strtoul(argp[i], NULL, 0);
- i++;
- if (confp->brdtype == BRD_ECH) {
- if ((argp[i] != NULL) && (*argp[i] != 0))
- confp->ioaddr2 = simple_strtoul(argp[i], NULL, 0);
- i++;
- }
- if ((argp[i] != NULL) && (*argp[i] != 0))
- confp->irq = simple_strtoul(argp[i], NULL, 0);
- return 1;
-}
-
-/*****************************************************************************/
-
-/*
- * Allocate a new board structure. Fill out the basic info in it.
- */
-
-static struct stlbrd *stl_allocbrd(void)
-{
- struct stlbrd *brdp;
-
- brdp = kzalloc(sizeof(struct stlbrd), GFP_KERNEL);
- if (!brdp) {
- printk("STALLION: failed to allocate memory (size=%Zd)\n",
- sizeof(struct stlbrd));
- return NULL;
- }
-
- brdp->magic = STL_BOARDMAGIC;
- return brdp;
-}
-
-/*****************************************************************************/
-
-static int stl_activate(struct tty_port *port, struct tty_struct *tty)
-{
- struct stlport *portp = container_of(port, struct stlport, port);
- if (!portp->tx.buf) {
- portp->tx.buf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
- if (!portp->tx.buf)
- return -ENOMEM;
- portp->tx.head = portp->tx.buf;
- portp->tx.tail = portp->tx.buf;
- }
- stl_setport(portp, tty->termios);
- portp->sigs = stl_getsignals(portp);
- stl_setsignals(portp, 1, 1);
- stl_enablerxtx(portp, 1, 1);
- stl_startrxtx(portp, 1, 0);
- return 0;
-}
-
-static int stl_open(struct tty_struct *tty, struct file *filp)
-{
- struct stlport *portp;
- struct stlbrd *brdp;
- unsigned int minordev, brdnr, panelnr;
- int portnr;
-
- pr_debug("stl_open(tty=%p,filp=%p): device=%s\n", tty, filp, tty->name);
-
- minordev = tty->index;
- brdnr = MINOR2BRD(minordev);
- if (brdnr >= stl_nrbrds)
- return -ENODEV;
- brdp = stl_brds[brdnr];
- if (brdp == NULL)
- return -ENODEV;
-
- minordev = MINOR2PORT(minordev);
- for (portnr = -1, panelnr = 0; panelnr < STL_MAXPANELS; panelnr++) {
- if (brdp->panels[panelnr] == NULL)
- break;
- if (minordev < brdp->panels[panelnr]->nrports) {
- portnr = minordev;
- break;
- }
- minordev -= brdp->panels[panelnr]->nrports;
- }
- if (portnr < 0)
- return -ENODEV;
-
- portp = brdp->panels[panelnr]->ports[portnr];
- if (portp == NULL)
- return -ENODEV;
-
- tty->driver_data = portp;
- return tty_port_open(&portp->port, tty, filp);
-
-}
-
-/*****************************************************************************/
-
-static int stl_carrier_raised(struct tty_port *port)
-{
- struct stlport *portp = container_of(port, struct stlport, port);
- return (portp->sigs & TIOCM_CD) ? 1 : 0;
-}
-
-static void stl_dtr_rts(struct tty_port *port, int on)
-{
- struct stlport *portp = container_of(port, struct stlport, port);
- /* Takes brd_lock internally */
- stl_setsignals(portp, on, on);
-}
-
-/*****************************************************************************/
-
-static void stl_flushbuffer(struct tty_struct *tty)
-{
- struct stlport *portp;
-
- pr_debug("stl_flushbuffer(tty=%p)\n", tty);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
-
- stl_flush(portp);
- tty_wakeup(tty);
-}
-
-/*****************************************************************************/
-
-static void stl_waituntilsent(struct tty_struct *tty, int timeout)
-{
- struct stlport *portp;
- unsigned long tend;
-
- pr_debug("stl_waituntilsent(tty=%p,timeout=%d)\n", tty, timeout);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
-
- if (timeout == 0)
- timeout = HZ;
- tend = jiffies + timeout;
-
- while (stl_datastate(portp)) {
- if (signal_pending(current))
- break;
- msleep_interruptible(20);
- if (time_after_eq(jiffies, tend))
- break;
- }
-}
-
-/*****************************************************************************/
-
-static void stl_shutdown(struct tty_port *port)
-{
- struct stlport *portp = container_of(port, struct stlport, port);
- stl_disableintrs(portp);
- stl_enablerxtx(portp, 0, 0);
- stl_flush(portp);
- portp->istate = 0;
- if (portp->tx.buf != NULL) {
- kfree(portp->tx.buf);
- portp->tx.buf = NULL;
- portp->tx.head = NULL;
- portp->tx.tail = NULL;
- }
-}
-
-static void stl_close(struct tty_struct *tty, struct file *filp)
-{
- struct stlport*portp;
- pr_debug("stl_close(tty=%p,filp=%p)\n", tty, filp);
-
- portp = tty->driver_data;
- if(portp == NULL)
- return;
- tty_port_close(&portp->port, tty, filp);
-}
-
-/*****************************************************************************/
-
-/*
- * Write routine. Take data and stuff it in to the TX ring queue.
- * If transmit interrupts are not running then start them.
- */
-
-static int stl_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- struct stlport *portp;
- unsigned int len, stlen;
- unsigned char *chbuf;
- char *head, *tail;
-
- pr_debug("stl_write(tty=%p,buf=%p,count=%d)\n", tty, buf, count);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return 0;
- if (portp->tx.buf == NULL)
- return 0;
-
-/*
- * If copying direct from user space we must cater for page faults,
- * causing us to "sleep" here for a while. To handle this copy in all
- * the data we need now, into a local buffer. Then when we got it all
- * copy it into the TX buffer.
- */
- chbuf = (unsigned char *) buf;
-
- head = portp->tx.head;
- tail = portp->tx.tail;
- if (head >= tail) {
- len = STL_TXBUFSIZE - (head - tail) - 1;
- stlen = STL_TXBUFSIZE - (head - portp->tx.buf);
- } else {
- len = tail - head - 1;
- stlen = len;
- }
-
- len = min(len, (unsigned int)count);
- count = 0;
- while (len > 0) {
- stlen = min(len, stlen);
- memcpy(head, chbuf, stlen);
- len -= stlen;
- chbuf += stlen;
- count += stlen;
- head += stlen;
- if (head >= (portp->tx.buf + STL_TXBUFSIZE)) {
- head = portp->tx.buf;
- stlen = tail - head;
- }
- }
- portp->tx.head = head;
-
- clear_bit(ASYI_TXLOW, &portp->istate);
- stl_startrxtx(portp, -1, 1);
-
- return count;
-}
-
-/*****************************************************************************/
-
-static int stl_putchar(struct tty_struct *tty, unsigned char ch)
-{
- struct stlport *portp;
- unsigned int len;
- char *head, *tail;
-
- pr_debug("stl_putchar(tty=%p,ch=%x)\n", tty, ch);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return -EINVAL;
- if (portp->tx.buf == NULL)
- return -EINVAL;
-
- head = portp->tx.head;
- tail = portp->tx.tail;
-
- len = (head >= tail) ? (STL_TXBUFSIZE - (head - tail)) : (tail - head);
- len--;
-
- if (len > 0) {
- *head++ = ch;
- if (head >= (portp->tx.buf + STL_TXBUFSIZE))
- head = portp->tx.buf;
- }
- portp->tx.head = head;
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * If there are any characters in the buffer then make sure that TX
- * interrupts are on and get'em out. Normally used after the putchar
- * routine has been called.
- */
-
-static void stl_flushchars(struct tty_struct *tty)
-{
- struct stlport *portp;
-
- pr_debug("stl_flushchars(tty=%p)\n", tty);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
- if (portp->tx.buf == NULL)
- return;
-
- stl_startrxtx(portp, -1, 1);
-}
-
-/*****************************************************************************/
-
-static int stl_writeroom(struct tty_struct *tty)
-{
- struct stlport *portp;
- char *head, *tail;
-
- pr_debug("stl_writeroom(tty=%p)\n", tty);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return 0;
- if (portp->tx.buf == NULL)
- return 0;
-
- head = portp->tx.head;
- tail = portp->tx.tail;
- return (head >= tail) ? (STL_TXBUFSIZE - (head - tail) - 1) : (tail - head - 1);
-}
-
-/*****************************************************************************/
-
-/*
- * Return number of chars in the TX buffer. Normally we would just
- * calculate the number of chars in the buffer and return that, but if
- * the buffer is empty and TX interrupts are still on then we return
- * that the buffer still has 1 char in it. This way whoever called us
- * will not think that ALL chars have drained - since the UART still
- * must have some chars in it (we are busy after all).
- */
-
-static int stl_charsinbuffer(struct tty_struct *tty)
-{
- struct stlport *portp;
- unsigned int size;
- char *head, *tail;
-
- pr_debug("stl_charsinbuffer(tty=%p)\n", tty);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return 0;
- if (portp->tx.buf == NULL)
- return 0;
-
- head = portp->tx.head;
- tail = portp->tx.tail;
- size = (head >= tail) ? (head - tail) : (STL_TXBUFSIZE - (tail - head));
- if ((size == 0) && test_bit(ASYI_TXBUSY, &portp->istate))
- size = 1;
- return size;
-}
-
-/*****************************************************************************/
-
-/*
- * Generate the serial struct info.
- */
-
-static int stl_getserial(struct stlport *portp, struct serial_struct __user *sp)
-{
- struct serial_struct sio;
- struct stlbrd *brdp;
-
- pr_debug("stl_getserial(portp=%p,sp=%p)\n", portp, sp);
-
- memset(&sio, 0, sizeof(struct serial_struct));
-
- mutex_lock(&portp->port.mutex);
- sio.line = portp->portnr;
- sio.port = portp->ioaddr;
- sio.flags = portp->port.flags;
- sio.baud_base = portp->baud_base;
- sio.close_delay = portp->close_delay;
- sio.closing_wait = portp->closing_wait;
- sio.custom_divisor = portp->custom_divisor;
- sio.hub6 = 0;
- if (portp->uartp == &stl_cd1400uart) {
- sio.type = PORT_CIRRUS;
- sio.xmit_fifo_size = CD1400_TXFIFOSIZE;
- } else {
- sio.type = PORT_UNKNOWN;
- sio.xmit_fifo_size = SC26198_TXFIFOSIZE;
- }
-
- brdp = stl_brds[portp->brdnr];
- if (brdp != NULL)
- sio.irq = brdp->irq;
- mutex_unlock(&portp->port.mutex);
-
- return copy_to_user(sp, &sio, sizeof(struct serial_struct)) ? -EFAULT : 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Set port according to the serial struct info.
- * At this point we do not do any auto-configure stuff, so we will
- * just quietly ignore any requests to change irq, etc.
- */
-
-static int stl_setserial(struct tty_struct *tty, struct serial_struct __user *sp)
-{
- struct stlport * portp = tty->driver_data;
- struct serial_struct sio;
-
- pr_debug("stl_setserial(portp=%p,sp=%p)\n", portp, sp);
-
- if (copy_from_user(&sio, sp, sizeof(struct serial_struct)))
- return -EFAULT;
- mutex_lock(&portp->port.mutex);
- if (!capable(CAP_SYS_ADMIN)) {
- if ((sio.baud_base != portp->baud_base) ||
- (sio.close_delay != portp->close_delay) ||
- ((sio.flags & ~ASYNC_USR_MASK) !=
- (portp->port.flags & ~ASYNC_USR_MASK))) {
- mutex_unlock(&portp->port.mutex);
- return -EPERM;
- }
- }
-
- portp->port.flags = (portp->port.flags & ~ASYNC_USR_MASK) |
- (sio.flags & ASYNC_USR_MASK);
- portp->baud_base = sio.baud_base;
- portp->close_delay = sio.close_delay;
- portp->closing_wait = sio.closing_wait;
- portp->custom_divisor = sio.custom_divisor;
- mutex_unlock(&portp->port.mutex);
- stl_setport(portp, tty->termios);
- return 0;
-}
-
-/*****************************************************************************/
-
-static int stl_tiocmget(struct tty_struct *tty)
-{
- struct stlport *portp;
-
- portp = tty->driver_data;
- if (portp == NULL)
- return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- return stl_getsignals(portp);
-}
-
-static int stl_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct stlport *portp;
- int rts = -1, dtr = -1;
-
- portp = tty->driver_data;
- if (portp == NULL)
- return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- if (set & TIOCM_RTS)
- rts = 1;
- if (set & TIOCM_DTR)
- dtr = 1;
- if (clear & TIOCM_RTS)
- rts = 0;
- if (clear & TIOCM_DTR)
- dtr = 0;
-
- stl_setsignals(portp, dtr, rts);
- return 0;
-}
-
-static int stl_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
-{
- struct stlport *portp;
- int rc;
- void __user *argp = (void __user *)arg;
-
- pr_debug("stl_ioctl(tty=%p,cmd=%x,arg=%lx)\n", tty, cmd, arg);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return -ENODEV;
-
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
- (cmd != COM_GETPORTSTATS) && (cmd != COM_CLRPORTSTATS))
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- rc = 0;
-
- switch (cmd) {
- case TIOCGSERIAL:
- rc = stl_getserial(portp, argp);
- break;
- case TIOCSSERIAL:
- rc = stl_setserial(tty, argp);
- break;
- case COM_GETPORTSTATS:
- rc = stl_getportstats(tty, portp, argp);
- break;
- case COM_CLRPORTSTATS:
- rc = stl_clrportstats(portp, argp);
- break;
- case TIOCSERCONFIG:
- case TIOCSERGWILD:
- case TIOCSERSWILD:
- case TIOCSERGETLSR:
- case TIOCSERGSTRUCT:
- case TIOCSERGETMULTI:
- case TIOCSERSETMULTI:
- default:
- rc = -ENOIOCTLCMD;
- break;
- }
- return rc;
-}
-
-/*****************************************************************************/
-
-/*
- * Start the transmitter again. Just turn TX interrupts back on.
- */
-
-static void stl_start(struct tty_struct *tty)
-{
- struct stlport *portp;
-
- pr_debug("stl_start(tty=%p)\n", tty);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
- stl_startrxtx(portp, -1, 1);
-}
-
-/*****************************************************************************/
-
-static void stl_settermios(struct tty_struct *tty, struct ktermios *old)
-{
- struct stlport *portp;
- struct ktermios *tiosp;
-
- pr_debug("stl_settermios(tty=%p,old=%p)\n", tty, old);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
-
- tiosp = tty->termios;
- if ((tiosp->c_cflag == old->c_cflag) &&
- (tiosp->c_iflag == old->c_iflag))
- return;
-
- stl_setport(portp, tiosp);
- stl_setsignals(portp, ((tiosp->c_cflag & (CBAUD & ~CBAUDEX)) ? 1 : 0),
- -1);
- if ((old->c_cflag & CRTSCTS) && ((tiosp->c_cflag & CRTSCTS) == 0)) {
- tty->hw_stopped = 0;
- stl_start(tty);
- }
- if (((old->c_cflag & CLOCAL) == 0) && (tiosp->c_cflag & CLOCAL))
- wake_up_interruptible(&portp->port.open_wait);
-}
-
-/*****************************************************************************/
-
-/*
- * Attempt to flow control who ever is sending us data. Based on termios
- * settings use software or/and hardware flow control.
- */
-
-static void stl_throttle(struct tty_struct *tty)
-{
- struct stlport *portp;
-
- pr_debug("stl_throttle(tty=%p)\n", tty);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
- stl_flowctrl(portp, 0);
-}
-
-/*****************************************************************************/
-
-/*
- * Unflow control the device sending us data...
- */
-
-static void stl_unthrottle(struct tty_struct *tty)
-{
- struct stlport *portp;
-
- pr_debug("stl_unthrottle(tty=%p)\n", tty);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
- stl_flowctrl(portp, 1);
-}
-
-/*****************************************************************************/
-
-/*
- * Stop the transmitter. Basically to do this we will just turn TX
- * interrupts off.
- */
-
-static void stl_stop(struct tty_struct *tty)
-{
- struct stlport *portp;
-
- pr_debug("stl_stop(tty=%p)\n", tty);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
- stl_startrxtx(portp, -1, 0);
-}
-
-/*****************************************************************************/
-
-/*
- * Hangup this port. This is pretty much like closing the port, only
- * a little more brutal. No waiting for data to drain. Shutdown the
- * port and maybe drop signals.
- */
-
-static void stl_hangup(struct tty_struct *tty)
-{
- struct stlport *portp = tty->driver_data;
- pr_debug("stl_hangup(tty=%p)\n", tty);
-
- if (portp == NULL)
- return;
- tty_port_hangup(&portp->port);
-}
-
-/*****************************************************************************/
-
-static int stl_breakctl(struct tty_struct *tty, int state)
-{
- struct stlport *portp;
-
- pr_debug("stl_breakctl(tty=%p,state=%d)\n", tty, state);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return -EINVAL;
-
- stl_sendbreak(portp, ((state == -1) ? 1 : 2));
- return 0;
-}
-
-/*****************************************************************************/
-
-static void stl_sendxchar(struct tty_struct *tty, char ch)
-{
- struct stlport *portp;
-
- pr_debug("stl_sendxchar(tty=%p,ch=%x)\n", tty, ch);
-
- portp = tty->driver_data;
- if (portp == NULL)
- return;
-
- if (ch == STOP_CHAR(tty))
- stl_sendflow(portp, 0);
- else if (ch == START_CHAR(tty))
- stl_sendflow(portp, 1);
- else
- stl_putchar(tty, ch);
-}
-
-static void stl_portinfo(struct seq_file *m, struct stlport *portp, int portnr)
-{
- int sigs;
- char sep;
-
- seq_printf(m, "%d: uart:%s tx:%d rx:%d",
- portnr, (portp->hwid == 1) ? "SC26198" : "CD1400",
- (int) portp->stats.txtotal, (int) portp->stats.rxtotal);
-
- if (portp->stats.rxframing)
- seq_printf(m, " fe:%d", (int) portp->stats.rxframing);
- if (portp->stats.rxparity)
- seq_printf(m, " pe:%d", (int) portp->stats.rxparity);
- if (portp->stats.rxbreaks)
- seq_printf(m, " brk:%d", (int) portp->stats.rxbreaks);
- if (portp->stats.rxoverrun)
- seq_printf(m, " oe:%d", (int) portp->stats.rxoverrun);
-
- sigs = stl_getsignals(portp);
- sep = ' ';
- if (sigs & TIOCM_RTS) {
- seq_printf(m, "%c%s", sep, "RTS");
- sep = '|';
- }
- if (sigs & TIOCM_CTS) {
- seq_printf(m, "%c%s", sep, "CTS");
- sep = '|';
- }
- if (sigs & TIOCM_DTR) {
- seq_printf(m, "%c%s", sep, "DTR");
- sep = '|';
- }
- if (sigs & TIOCM_CD) {
- seq_printf(m, "%c%s", sep, "DCD");
- sep = '|';
- }
- if (sigs & TIOCM_DSR) {
- seq_printf(m, "%c%s", sep, "DSR");
- sep = '|';
- }
- seq_putc(m, '\n');
-}
-
-/*****************************************************************************/
-
-/*
- * Port info, read from the /proc file system.
- */
-
-static int stl_proc_show(struct seq_file *m, void *v)
-{
- struct stlbrd *brdp;
- struct stlpanel *panelp;
- struct stlport *portp;
- unsigned int brdnr, panelnr, portnr;
- int totalport;
-
- totalport = 0;
-
- seq_printf(m, "%s: version %s\n", stl_drvtitle, stl_drvversion);
-
-/*
- * We scan through for each board, panel and port. The offset is
- * calculated on the fly, and irrelevant ports are skipped.
- */
- for (brdnr = 0; brdnr < stl_nrbrds; brdnr++) {
- brdp = stl_brds[brdnr];
- if (brdp == NULL)
- continue;
- if (brdp->state == 0)
- continue;
-
- totalport = brdnr * STL_MAXPORTS;
- for (panelnr = 0; panelnr < brdp->nrpanels; panelnr++) {
- panelp = brdp->panels[panelnr];
- if (panelp == NULL)
- continue;
-
- for (portnr = 0; portnr < panelp->nrports; portnr++,
- totalport++) {
- portp = panelp->ports[portnr];
- if (portp == NULL)
- continue;
- stl_portinfo(m, portp, totalport);
- }
- }
- }
- return 0;
-}
-
-static int stl_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, stl_proc_show, NULL);
-}
-
-static const struct file_operations stl_proc_fops = {
- .owner = THIS_MODULE,
- .open = stl_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-/*****************************************************************************/
-
-/*
- * All board interrupts are vectored through here first. This code then
- * calls off to the approrpriate board interrupt handlers.
- */
-
-static irqreturn_t stl_intr(int irq, void *dev_id)
-{
- struct stlbrd *brdp = dev_id;
-
- pr_debug("stl_intr(brdp=%p,irq=%d)\n", brdp, brdp->irq);
-
- return IRQ_RETVAL((* brdp->isr)(brdp));
-}
-
-/*****************************************************************************/
-
-/*
- * Interrupt service routine for EasyIO board types.
- */
-
-static int stl_eiointr(struct stlbrd *brdp)
-{
- struct stlpanel *panelp;
- unsigned int iobase;
- int handled = 0;
-
- spin_lock(&brd_lock);
- panelp = brdp->panels[0];
- iobase = panelp->iobase;
- while (inb(brdp->iostatus) & EIO_INTRPEND) {
- handled = 1;
- (* panelp->isr)(panelp, iobase);
- }
- spin_unlock(&brd_lock);
- return handled;
-}
-
-/*****************************************************************************/
-
-/*
- * Interrupt service routine for ECH-AT board types.
- */
-
-static int stl_echatintr(struct stlbrd *brdp)
-{
- struct stlpanel *panelp;
- unsigned int ioaddr, bnknr;
- int handled = 0;
-
- outb((brdp->ioctrlval | ECH_BRDENABLE), brdp->ioctrl);
-
- while (inb(brdp->iostatus) & ECH_INTRPEND) {
- handled = 1;
- for (bnknr = 0; bnknr < brdp->nrbnks; bnknr++) {
- ioaddr = brdp->bnkstataddr[bnknr];
- if (inb(ioaddr) & ECH_PNLINTRPEND) {
- panelp = brdp->bnk2panel[bnknr];
- (* panelp->isr)(panelp, (ioaddr & 0xfffc));
- }
- }
- }
-
- outb((brdp->ioctrlval | ECH_BRDDISABLE), brdp->ioctrl);
-
- return handled;
-}
-
-/*****************************************************************************/
-
-/*
- * Interrupt service routine for ECH-MCA board types.
- */
-
-static int stl_echmcaintr(struct stlbrd *brdp)
-{
- struct stlpanel *panelp;
- unsigned int ioaddr, bnknr;
- int handled = 0;
-
- while (inb(brdp->iostatus) & ECH_INTRPEND) {
- handled = 1;
- for (bnknr = 0; bnknr < brdp->nrbnks; bnknr++) {
- ioaddr = brdp->bnkstataddr[bnknr];
- if (inb(ioaddr) & ECH_PNLINTRPEND) {
- panelp = brdp->bnk2panel[bnknr];
- (* panelp->isr)(panelp, (ioaddr & 0xfffc));
- }
- }
- }
- return handled;
-}
-
-/*****************************************************************************/
-
-/*
- * Interrupt service routine for ECH-PCI board types.
- */
-
-static int stl_echpciintr(struct stlbrd *brdp)
-{
- struct stlpanel *panelp;
- unsigned int ioaddr, bnknr, recheck;
- int handled = 0;
-
- while (1) {
- recheck = 0;
- for (bnknr = 0; bnknr < brdp->nrbnks; bnknr++) {
- outb(brdp->bnkpageaddr[bnknr], brdp->ioctrl);
- ioaddr = brdp->bnkstataddr[bnknr];
- if (inb(ioaddr) & ECH_PNLINTRPEND) {
- panelp = brdp->bnk2panel[bnknr];
- (* panelp->isr)(panelp, (ioaddr & 0xfffc));
- recheck++;
- handled = 1;
- }
- }
- if (! recheck)
- break;
- }
- return handled;
-}
-
-/*****************************************************************************/
-
-/*
- * Interrupt service routine for ECH-8/64-PCI board types.
- */
-
-static int stl_echpci64intr(struct stlbrd *brdp)
-{
- struct stlpanel *panelp;
- unsigned int ioaddr, bnknr;
- int handled = 0;
-
- while (inb(brdp->ioctrl) & 0x1) {
- handled = 1;
- for (bnknr = 0; bnknr < brdp->nrbnks; bnknr++) {
- ioaddr = brdp->bnkstataddr[bnknr];
- if (inb(ioaddr) & ECH_PNLINTRPEND) {
- panelp = brdp->bnk2panel[bnknr];
- (* panelp->isr)(panelp, (ioaddr & 0xfffc));
- }
- }
- }
-
- return handled;
-}
-
-/*****************************************************************************/
-
-/*
- * Initialize all the ports on a panel.
- */
-
-static int __devinit stl_initports(struct stlbrd *brdp, struct stlpanel *panelp)
-{
- struct stlport *portp;
- unsigned int i;
- int chipmask;
-
- pr_debug("stl_initports(brdp=%p,panelp=%p)\n", brdp, panelp);
-
- chipmask = stl_panelinit(brdp, panelp);
-
-/*
- * All UART's are initialized (if found!). Now go through and setup
- * each ports data structures.
- */
- for (i = 0; i < panelp->nrports; i++) {
- portp = kzalloc(sizeof(struct stlport), GFP_KERNEL);
- if (!portp) {
- printk("STALLION: failed to allocate memory "
- "(size=%Zd)\n", sizeof(struct stlport));
- break;
- }
- tty_port_init(&portp->port);
- portp->port.ops = &stl_port_ops;
- portp->magic = STL_PORTMAGIC;
- portp->portnr = i;
- portp->brdnr = panelp->brdnr;
- portp->panelnr = panelp->panelnr;
- portp->uartp = panelp->uartp;
- portp->clk = brdp->clk;
- portp->baud_base = STL_BAUDBASE;
- portp->close_delay = STL_CLOSEDELAY;
- portp->closing_wait = 30 * HZ;
- init_waitqueue_head(&portp->port.open_wait);
- init_waitqueue_head(&portp->port.close_wait);
- portp->stats.brd = portp->brdnr;
- portp->stats.panel = portp->panelnr;
- portp->stats.port = portp->portnr;
- panelp->ports[i] = portp;
- stl_portinit(brdp, panelp, portp);
- }
-
- return 0;
-}
-
-static void stl_cleanup_panels(struct stlbrd *brdp)
-{
- struct stlpanel *panelp;
- struct stlport *portp;
- unsigned int j, k;
- struct tty_struct *tty;
-
- for (j = 0; j < STL_MAXPANELS; j++) {
- panelp = brdp->panels[j];
- if (panelp == NULL)
- continue;
- for (k = 0; k < STL_PORTSPERPANEL; k++) {
- portp = panelp->ports[k];
- if (portp == NULL)
- continue;
- tty = tty_port_tty_get(&portp->port);
- if (tty != NULL) {
- stl_hangup(tty);
- tty_kref_put(tty);
- }
- kfree(portp->tx.buf);
- kfree(portp);
- }
- kfree(panelp);
- }
-}
-
-/*****************************************************************************/
-
-/*
- * Try to find and initialize an EasyIO board.
- */
-
-static int __devinit stl_initeio(struct stlbrd *brdp)
-{
- struct stlpanel *panelp;
- unsigned int status;
- char *name;
- int retval;
-
- pr_debug("stl_initeio(brdp=%p)\n", brdp);
-
- brdp->ioctrl = brdp->ioaddr1 + 1;
- brdp->iostatus = brdp->ioaddr1 + 2;
-
- status = inb(brdp->iostatus);
- if ((status & EIO_IDBITMASK) == EIO_MK3)
- brdp->ioctrl++;
-
-/*
- * Handle board specific stuff now. The real difference is PCI
- * or not PCI.
- */
- if (brdp->brdtype == BRD_EASYIOPCI) {
- brdp->iosize1 = 0x80;
- brdp->iosize2 = 0x80;
- name = "serial(EIO-PCI)";
- outb(0x41, (brdp->ioaddr2 + 0x4c));
- } else {
- brdp->iosize1 = 8;
- name = "serial(EIO)";
- if ((brdp->irq < 0) || (brdp->irq > 15) ||
- (stl_vecmap[brdp->irq] == (unsigned char) 0xff)) {
- printk("STALLION: invalid irq=%d for brd=%d\n",
- brdp->irq, brdp->brdnr);
- retval = -EINVAL;
- goto err;
- }
- outb((stl_vecmap[brdp->irq] | EIO_0WS |
- ((brdp->irqtype) ? EIO_INTLEVEL : EIO_INTEDGE)),
- brdp->ioctrl);
- }
-
- retval = -EBUSY;
- if (!request_region(brdp->ioaddr1, brdp->iosize1, name)) {
- printk(KERN_WARNING "STALLION: Warning, board %d I/O address "
- "%x conflicts with another device\n", brdp->brdnr,
- brdp->ioaddr1);
- goto err;
- }
-
- if (brdp->iosize2 > 0)
- if (!request_region(brdp->ioaddr2, brdp->iosize2, name)) {
- printk(KERN_WARNING "STALLION: Warning, board %d I/O "
- "address %x conflicts with another device\n",
- brdp->brdnr, brdp->ioaddr2);
- printk(KERN_WARNING "STALLION: Warning, also "
- "releasing board %d I/O address %x \n",
- brdp->brdnr, brdp->ioaddr1);
- goto err_rel1;
- }
-
-/*
- * Everything looks OK, so let's go ahead and probe for the hardware.
- */
- brdp->clk = CD1400_CLK;
- brdp->isr = stl_eiointr;
-
- retval = -ENODEV;
- switch (status & EIO_IDBITMASK) {
- case EIO_8PORTM:
- brdp->clk = CD1400_CLK8M;
- /* fall thru */
- case EIO_8PORTRS:
- case EIO_8PORTDI:
- brdp->nrports = 8;
- break;
- case EIO_4PORTRS:
- brdp->nrports = 4;
- break;
- case EIO_MK3:
- switch (status & EIO_BRDMASK) {
- case ID_BRD4:
- brdp->nrports = 4;
- break;
- case ID_BRD8:
- brdp->nrports = 8;
- break;
- case ID_BRD16:
- brdp->nrports = 16;
- break;
- default:
- goto err_rel2;
- }
- break;
- default:
- goto err_rel2;
- }
-
-/*
- * We have verified that the board is actually present, so now we
- * can complete the setup.
- */
-
- panelp = kzalloc(sizeof(struct stlpanel), GFP_KERNEL);
- if (!panelp) {
- printk(KERN_WARNING "STALLION: failed to allocate memory "
- "(size=%Zd)\n", sizeof(struct stlpanel));
- retval = -ENOMEM;
- goto err_rel2;
- }
-
- panelp->magic = STL_PANELMAGIC;
- panelp->brdnr = brdp->brdnr;
- panelp->panelnr = 0;
- panelp->nrports = brdp->nrports;
- panelp->iobase = brdp->ioaddr1;
- panelp->hwid = status;
- if ((status & EIO_IDBITMASK) == EIO_MK3) {
- panelp->uartp = &stl_sc26198uart;
- panelp->isr = stl_sc26198intr;
- } else {
- panelp->uartp = &stl_cd1400uart;
- panelp->isr = stl_cd1400eiointr;
- }
-
- brdp->panels[0] = panelp;
- brdp->nrpanels = 1;
- brdp->state |= BRD_FOUND;
- brdp->hwid = status;
- if (request_irq(brdp->irq, stl_intr, IRQF_SHARED, name, brdp) != 0) {
- printk("STALLION: failed to register interrupt "
- "routine for %s irq=%d\n", name, brdp->irq);
- retval = -ENODEV;
- goto err_fr;
- }
-
- return 0;
-err_fr:
- stl_cleanup_panels(brdp);
-err_rel2:
- if (brdp->iosize2 > 0)
- release_region(brdp->ioaddr2, brdp->iosize2);
-err_rel1:
- release_region(brdp->ioaddr1, brdp->iosize1);
-err:
- return retval;
-}
-
-/*****************************************************************************/
-
-/*
- * Try to find an ECH board and initialize it. This code is capable of
- * dealing with all types of ECH board.
- */
-
-static int __devinit stl_initech(struct stlbrd *brdp)
-{
- struct stlpanel *panelp;
- unsigned int status, nxtid, ioaddr, conflict, panelnr, banknr, i;
- int retval;
- char *name;
-
- pr_debug("stl_initech(brdp=%p)\n", brdp);
-
- status = 0;
- conflict = 0;
-
-/*
- * Set up the initial board register contents for boards. This varies a
- * bit between the different board types. So we need to handle each
- * separately. Also do a check that the supplied IRQ is good.
- */
- switch (brdp->brdtype) {
-
- case BRD_ECH:
- brdp->isr = stl_echatintr;
- brdp->ioctrl = brdp->ioaddr1 + 1;
- brdp->iostatus = brdp->ioaddr1 + 1;
- status = inb(brdp->iostatus);
- if ((status & ECH_IDBITMASK) != ECH_ID) {
- retval = -ENODEV;
- goto err;
- }
- if ((brdp->irq < 0) || (brdp->irq > 15) ||
- (stl_vecmap[brdp->irq] == (unsigned char) 0xff)) {
- printk("STALLION: invalid irq=%d for brd=%d\n",
- brdp->irq, brdp->brdnr);
- retval = -EINVAL;
- goto err;
- }
- status = ((brdp->ioaddr2 & ECH_ADDR2MASK) >> 1);
- status |= (stl_vecmap[brdp->irq] << 1);
- outb((status | ECH_BRDRESET), brdp->ioaddr1);
- brdp->ioctrlval = ECH_INTENABLE |
- ((brdp->irqtype) ? ECH_INTLEVEL : ECH_INTEDGE);
- for (i = 0; i < 10; i++)
- outb((brdp->ioctrlval | ECH_BRDENABLE), brdp->ioctrl);
- brdp->iosize1 = 2;
- brdp->iosize2 = 32;
- name = "serial(EC8/32)";
- outb(status, brdp->ioaddr1);
- break;
-
- case BRD_ECHMC:
- brdp->isr = stl_echmcaintr;
- brdp->ioctrl = brdp->ioaddr1 + 0x20;
- brdp->iostatus = brdp->ioctrl;
- status = inb(brdp->iostatus);
- if ((status & ECH_IDBITMASK) != ECH_ID) {
- retval = -ENODEV;
- goto err;
- }
- if ((brdp->irq < 0) || (brdp->irq > 15) ||
- (stl_vecmap[brdp->irq] == (unsigned char) 0xff)) {
- printk("STALLION: invalid irq=%d for brd=%d\n",
- brdp->irq, brdp->brdnr);
- retval = -EINVAL;
- goto err;
- }
- outb(ECHMC_BRDRESET, brdp->ioctrl);
- outb(ECHMC_INTENABLE, brdp->ioctrl);
- brdp->iosize1 = 64;
- name = "serial(EC8/32-MC)";
- break;
-
- case BRD_ECHPCI:
- brdp->isr = stl_echpciintr;
- brdp->ioctrl = brdp->ioaddr1 + 2;
- brdp->iosize1 = 4;
- brdp->iosize2 = 8;
- name = "serial(EC8/32-PCI)";
- break;
-
- case BRD_ECH64PCI:
- brdp->isr = stl_echpci64intr;
- brdp->ioctrl = brdp->ioaddr2 + 0x40;
- outb(0x43, (brdp->ioaddr1 + 0x4c));
- brdp->iosize1 = 0x80;
- brdp->iosize2 = 0x80;
- name = "serial(EC8/64-PCI)";
- break;
-
- default:
- printk("STALLION: unknown board type=%d\n", brdp->brdtype);
- retval = -EINVAL;
- goto err;
- }
-
-/*
- * Check boards for possible IO address conflicts and return fail status
- * if an IO conflict found.
- */
- retval = -EBUSY;
- if (!request_region(brdp->ioaddr1, brdp->iosize1, name)) {
- printk(KERN_WARNING "STALLION: Warning, board %d I/O address "
- "%x conflicts with another device\n", brdp->brdnr,
- brdp->ioaddr1);
- goto err;
- }
-
- if (brdp->iosize2 > 0)
- if (!request_region(brdp->ioaddr2, brdp->iosize2, name)) {
- printk(KERN_WARNING "STALLION: Warning, board %d I/O "
- "address %x conflicts with another device\n",
- brdp->brdnr, brdp->ioaddr2);
- printk(KERN_WARNING "STALLION: Warning, also "
- "releasing board %d I/O address %x \n",
- brdp->brdnr, brdp->ioaddr1);
- goto err_rel1;
- }
-
-/*
- * Scan through the secondary io address space looking for panels.
- * As we find'em allocate and initialize panel structures for each.
- */
- brdp->clk = CD1400_CLK;
- brdp->hwid = status;
-
- ioaddr = brdp->ioaddr2;
- banknr = 0;
- panelnr = 0;
- nxtid = 0;
-
- for (i = 0; i < STL_MAXPANELS; i++) {
- if (brdp->brdtype == BRD_ECHPCI) {
- outb(nxtid, brdp->ioctrl);
- ioaddr = brdp->ioaddr2;
- }
- status = inb(ioaddr + ECH_PNLSTATUS);
- if ((status & ECH_PNLIDMASK) != nxtid)
- break;
- panelp = kzalloc(sizeof(struct stlpanel), GFP_KERNEL);
- if (!panelp) {
- printk("STALLION: failed to allocate memory "
- "(size=%Zd)\n", sizeof(struct stlpanel));
- retval = -ENOMEM;
- goto err_fr;
- }
- panelp->magic = STL_PANELMAGIC;
- panelp->brdnr = brdp->brdnr;
- panelp->panelnr = panelnr;
- panelp->iobase = ioaddr;
- panelp->pagenr = nxtid;
- panelp->hwid = status;
- brdp->bnk2panel[banknr] = panelp;
- brdp->bnkpageaddr[banknr] = nxtid;
- brdp->bnkstataddr[banknr++] = ioaddr + ECH_PNLSTATUS;
-
- if (status & ECH_PNLXPID) {
- panelp->uartp = &stl_sc26198uart;
- panelp->isr = stl_sc26198intr;
- if (status & ECH_PNL16PORT) {
- panelp->nrports = 16;
- brdp->bnk2panel[banknr] = panelp;
- brdp->bnkpageaddr[banknr] = nxtid;
- brdp->bnkstataddr[banknr++] = ioaddr + 4 +
- ECH_PNLSTATUS;
- } else
- panelp->nrports = 8;
- } else {
- panelp->uartp = &stl_cd1400uart;
- panelp->isr = stl_cd1400echintr;
- if (status & ECH_PNL16PORT) {
- panelp->nrports = 16;
- panelp->ackmask = 0x80;
- if (brdp->brdtype != BRD_ECHPCI)
- ioaddr += EREG_BANKSIZE;
- brdp->bnk2panel[banknr] = panelp;
- brdp->bnkpageaddr[banknr] = ++nxtid;
- brdp->bnkstataddr[banknr++] = ioaddr +
- ECH_PNLSTATUS;
- } else {
- panelp->nrports = 8;
- panelp->ackmask = 0xc0;
- }
- }
-
- nxtid++;
- ioaddr += EREG_BANKSIZE;
- brdp->nrports += panelp->nrports;
- brdp->panels[panelnr++] = panelp;
- if ((brdp->brdtype != BRD_ECHPCI) &&
- (ioaddr >= (brdp->ioaddr2 + brdp->iosize2))) {
- retval = -EINVAL;
- goto err_fr;
- }
- }
-
- brdp->nrpanels = panelnr;
- brdp->nrbnks = banknr;
- if (brdp->brdtype == BRD_ECH)
- outb((brdp->ioctrlval | ECH_BRDDISABLE), brdp->ioctrl);
-
- brdp->state |= BRD_FOUND;
- if (request_irq(brdp->irq, stl_intr, IRQF_SHARED, name, brdp) != 0) {
- printk("STALLION: failed to register interrupt "
- "routine for %s irq=%d\n", name, brdp->irq);
- retval = -ENODEV;
- goto err_fr;
- }
-
- return 0;
-err_fr:
- stl_cleanup_panels(brdp);
- if (brdp->iosize2 > 0)
- release_region(brdp->ioaddr2, brdp->iosize2);
-err_rel1:
- release_region(brdp->ioaddr1, brdp->iosize1);
-err:
- return retval;
-}
-
-/*****************************************************************************/
-
-/*
- * Initialize and configure the specified board.
- * Scan through all the boards in the configuration and see what we
- * can find. Handle EIO and the ECH boards a little differently here
- * since the initial search and setup is very different.
- */
-
-static int __devinit stl_brdinit(struct stlbrd *brdp)
-{
- int i, retval;
-
- pr_debug("stl_brdinit(brdp=%p)\n", brdp);
-
- switch (brdp->brdtype) {
- case BRD_EASYIO:
- case BRD_EASYIOPCI:
- retval = stl_initeio(brdp);
- if (retval)
- goto err;
- break;
- case BRD_ECH:
- case BRD_ECHMC:
- case BRD_ECHPCI:
- case BRD_ECH64PCI:
- retval = stl_initech(brdp);
- if (retval)
- goto err;
- break;
- default:
- printk("STALLION: board=%d is unknown board type=%d\n",
- brdp->brdnr, brdp->brdtype);
- retval = -ENODEV;
- goto err;
- }
-
- if ((brdp->state & BRD_FOUND) == 0) {
- printk("STALLION: %s board not found, board=%d io=%x irq=%d\n",
- stl_brdnames[brdp->brdtype], brdp->brdnr,
- brdp->ioaddr1, brdp->irq);
- goto err_free;
- }
-
- for (i = 0; i < STL_MAXPANELS; i++)
- if (brdp->panels[i] != NULL)
- stl_initports(brdp, brdp->panels[i]);
-
- printk("STALLION: %s found, board=%d io=%x irq=%d "
- "nrpanels=%d nrports=%d\n", stl_brdnames[brdp->brdtype],
- brdp->brdnr, brdp->ioaddr1, brdp->irq, brdp->nrpanels,
- brdp->nrports);
-
- return 0;
-err_free:
- free_irq(brdp->irq, brdp);
-
- stl_cleanup_panels(brdp);
-
- release_region(brdp->ioaddr1, brdp->iosize1);
- if (brdp->iosize2 > 0)
- release_region(brdp->ioaddr2, brdp->iosize2);
-err:
- return retval;
-}
-
-/*****************************************************************************/
-
-/*
- * Find the next available board number that is free.
- */
-
-static int __devinit stl_getbrdnr(void)
-{
- unsigned int i;
-
- for (i = 0; i < STL_MAXBRDS; i++)
- if (stl_brds[i] == NULL) {
- if (i >= stl_nrbrds)
- stl_nrbrds = i + 1;
- return i;
- }
-
- return -1;
-}
-
-/*****************************************************************************/
-/*
- * We have a Stallion board. Allocate a board structure and
- * initialize it. Read its IO and IRQ resources from PCI
- * configuration space.
- */
-
-static int __devinit stl_pciprobe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct stlbrd *brdp;
- unsigned int i, brdtype = ent->driver_data;
- int brdnr, retval = -ENODEV;
-
- if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE)
- goto err;
-
- retval = pci_enable_device(pdev);
- if (retval)
- goto err;
- brdp = stl_allocbrd();
- if (brdp == NULL) {
- retval = -ENOMEM;
- goto err;
- }
- mutex_lock(&stl_brdslock);
- brdnr = stl_getbrdnr();
- if (brdnr < 0) {
- dev_err(&pdev->dev, "too many boards found, "
- "maximum supported %d\n", STL_MAXBRDS);
- mutex_unlock(&stl_brdslock);
- retval = -ENODEV;
- goto err_fr;
- }
- brdp->brdnr = (unsigned int)brdnr;
- stl_brds[brdp->brdnr] = brdp;
- mutex_unlock(&stl_brdslock);
-
- brdp->brdtype = brdtype;
- brdp->state |= STL_PROBED;
-
-/*
- * We have all resources from the board, so let's setup the actual
- * board structure now.
- */
- switch (brdtype) {
- case BRD_ECHPCI:
- brdp->ioaddr2 = pci_resource_start(pdev, 0);
- brdp->ioaddr1 = pci_resource_start(pdev, 1);
- break;
- case BRD_ECH64PCI:
- brdp->ioaddr2 = pci_resource_start(pdev, 2);
- brdp->ioaddr1 = pci_resource_start(pdev, 1);
- break;
- case BRD_EASYIOPCI:
- brdp->ioaddr1 = pci_resource_start(pdev, 2);
- brdp->ioaddr2 = pci_resource_start(pdev, 1);
- break;
- default:
- dev_err(&pdev->dev, "unknown PCI board type=%u\n", brdtype);
- break;
- }
-
- brdp->irq = pdev->irq;
- retval = stl_brdinit(brdp);
- if (retval)
- goto err_null;
-
- pci_set_drvdata(pdev, brdp);
-
- for (i = 0; i < brdp->nrports; i++)
- tty_register_device(stl_serial,
- brdp->brdnr * STL_MAXPORTS + i, &pdev->dev);
-
- return 0;
-err_null:
- stl_brds[brdp->brdnr] = NULL;
-err_fr:
- kfree(brdp);
-err:
- return retval;
-}
-
-static void __devexit stl_pciremove(struct pci_dev *pdev)
-{
- struct stlbrd *brdp = pci_get_drvdata(pdev);
- unsigned int i;
-
- free_irq(brdp->irq, brdp);
-
- stl_cleanup_panels(brdp);
-
- release_region(brdp->ioaddr1, brdp->iosize1);
- if (brdp->iosize2 > 0)
- release_region(brdp->ioaddr2, brdp->iosize2);
-
- for (i = 0; i < brdp->nrports; i++)
- tty_unregister_device(stl_serial,
- brdp->brdnr * STL_MAXPORTS + i);
-
- stl_brds[brdp->brdnr] = NULL;
- kfree(brdp);
-}
-
-static struct pci_driver stl_pcidriver = {
- .name = "stallion",
- .id_table = stl_pcibrds,
- .probe = stl_pciprobe,
- .remove = __devexit_p(stl_pciremove)
-};
-
-/*****************************************************************************/
-
-/*
- * Return the board stats structure to user app.
- */
-
-static int stl_getbrdstats(combrd_t __user *bp)
-{
- combrd_t stl_brdstats;
- struct stlbrd *brdp;
- struct stlpanel *panelp;
- unsigned int i;
-
- if (copy_from_user(&stl_brdstats, bp, sizeof(combrd_t)))
- return -EFAULT;
- if (stl_brdstats.brd >= STL_MAXBRDS)
- return -ENODEV;
- brdp = stl_brds[stl_brdstats.brd];
- if (brdp == NULL)
- return -ENODEV;
-
- memset(&stl_brdstats, 0, sizeof(combrd_t));
- stl_brdstats.brd = brdp->brdnr;
- stl_brdstats.type = brdp->brdtype;
- stl_brdstats.hwid = brdp->hwid;
- stl_brdstats.state = brdp->state;
- stl_brdstats.ioaddr = brdp->ioaddr1;
- stl_brdstats.ioaddr2 = brdp->ioaddr2;
- stl_brdstats.irq = brdp->irq;
- stl_brdstats.nrpanels = brdp->nrpanels;
- stl_brdstats.nrports = brdp->nrports;
- for (i = 0; i < brdp->nrpanels; i++) {
- panelp = brdp->panels[i];
- stl_brdstats.panels[i].panel = i;
- stl_brdstats.panels[i].hwid = panelp->hwid;
- stl_brdstats.panels[i].nrports = panelp->nrports;
- }
-
- return copy_to_user(bp, &stl_brdstats, sizeof(combrd_t)) ? -EFAULT : 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Resolve the referenced port number into a port struct pointer.
- */
-
-static struct stlport *stl_getport(int brdnr, int panelnr, int portnr)
-{
- struct stlbrd *brdp;
- struct stlpanel *panelp;
-
- if (brdnr < 0 || brdnr >= STL_MAXBRDS)
- return NULL;
- brdp = stl_brds[brdnr];
- if (brdp == NULL)
- return NULL;
- if (panelnr < 0 || (unsigned int)panelnr >= brdp->nrpanels)
- return NULL;
- panelp = brdp->panels[panelnr];
- if (panelp == NULL)
- return NULL;
- if (portnr < 0 || (unsigned int)portnr >= panelp->nrports)
- return NULL;
- return panelp->ports[portnr];
-}
-
-/*****************************************************************************/
-
-/*
- * Return the port stats structure to user app. A NULL port struct
- * pointer passed in means that we need to find out from the app
- * what port to get stats for (used through board control device).
- */
-
-static int stl_getportstats(struct tty_struct *tty, struct stlport *portp, comstats_t __user *cp)
-{
- comstats_t stl_comstats;
- unsigned char *head, *tail;
- unsigned long flags;
-
- if (!portp) {
- if (copy_from_user(&stl_comstats, cp, sizeof(comstats_t)))
- return -EFAULT;
- portp = stl_getport(stl_comstats.brd, stl_comstats.panel,
- stl_comstats.port);
- if (portp == NULL)
- return -ENODEV;
- }
-
- mutex_lock(&portp->port.mutex);
- portp->stats.state = portp->istate;
- portp->stats.flags = portp->port.flags;
- portp->stats.hwid = portp->hwid;
-
- portp->stats.ttystate = 0;
- portp->stats.cflags = 0;
- portp->stats.iflags = 0;
- portp->stats.oflags = 0;
- portp->stats.lflags = 0;
- portp->stats.rxbuffered = 0;
-
- spin_lock_irqsave(&stallion_lock, flags);
- if (tty != NULL && portp->port.tty == tty) {
- portp->stats.ttystate = tty->flags;
- /* No longer available as a statistic */
- portp->stats.rxbuffered = 1; /*tty->flip.count; */
- if (tty->termios != NULL) {
- portp->stats.cflags = tty->termios->c_cflag;
- portp->stats.iflags = tty->termios->c_iflag;
- portp->stats.oflags = tty->termios->c_oflag;
- portp->stats.lflags = tty->termios->c_lflag;
- }
- }
- spin_unlock_irqrestore(&stallion_lock, flags);
-
- head = portp->tx.head;
- tail = portp->tx.tail;
- portp->stats.txbuffered = (head >= tail) ? (head - tail) :
- (STL_TXBUFSIZE - (tail - head));
-
- portp->stats.signals = (unsigned long) stl_getsignals(portp);
- mutex_unlock(&portp->port.mutex);
-
- return copy_to_user(cp, &portp->stats,
- sizeof(comstats_t)) ? -EFAULT : 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Clear the port stats structure. We also return it zeroed out...
- */
-
-static int stl_clrportstats(struct stlport *portp, comstats_t __user *cp)
-{
- comstats_t stl_comstats;
-
- if (!portp) {
- if (copy_from_user(&stl_comstats, cp, sizeof(comstats_t)))
- return -EFAULT;
- portp = stl_getport(stl_comstats.brd, stl_comstats.panel,
- stl_comstats.port);
- if (portp == NULL)
- return -ENODEV;
- }
-
- mutex_lock(&portp->port.mutex);
- memset(&portp->stats, 0, sizeof(comstats_t));
- portp->stats.brd = portp->brdnr;
- portp->stats.panel = portp->panelnr;
- portp->stats.port = portp->portnr;
- mutex_unlock(&portp->port.mutex);
- return copy_to_user(cp, &portp->stats,
- sizeof(comstats_t)) ? -EFAULT : 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Return the entire driver ports structure to a user app.
- */
-
-static int stl_getportstruct(struct stlport __user *arg)
-{
- struct stlport stl_dummyport;
- struct stlport *portp;
-
- if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
- return -EFAULT;
- portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
- stl_dummyport.portnr);
- if (!portp)
- return -ENODEV;
- return copy_to_user(arg, portp, sizeof(struct stlport)) ? -EFAULT : 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Return the entire driver board structure to a user app.
- */
-
-static int stl_getbrdstruct(struct stlbrd __user *arg)
-{
- struct stlbrd stl_dummybrd;
- struct stlbrd *brdp;
-
- if (copy_from_user(&stl_dummybrd, arg, sizeof(struct stlbrd)))
- return -EFAULT;
- if (stl_dummybrd.brdnr >= STL_MAXBRDS)
- return -ENODEV;
- brdp = stl_brds[stl_dummybrd.brdnr];
- if (!brdp)
- return -ENODEV;
- return copy_to_user(arg, brdp, sizeof(struct stlbrd)) ? -EFAULT : 0;
-}
-
-/*****************************************************************************/
-
-/*
- * The "staliomem" device is also required to do some special operations
- * on the board and/or ports. In this driver it is mostly used for stats
- * collection.
- */
-
-static long stl_memioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
- int brdnr, rc;
- void __user *argp = (void __user *)arg;
-
- pr_debug("stl_memioctl(fp=%p,cmd=%x,arg=%lx)\n", fp, cmd,arg);
-
- brdnr = iminor(fp->f_dentry->d_inode);
- if (brdnr >= STL_MAXBRDS)
- return -ENODEV;
- rc = 0;
-
- switch (cmd) {
- case COM_GETPORTSTATS:
- rc = stl_getportstats(NULL, NULL, argp);
- break;
- case COM_CLRPORTSTATS:
- rc = stl_clrportstats(NULL, argp);
- break;
- case COM_GETBRDSTATS:
- rc = stl_getbrdstats(argp);
- break;
- case COM_READPORT:
- rc = stl_getportstruct(argp);
- break;
- case COM_READBOARD:
- rc = stl_getbrdstruct(argp);
- break;
- default:
- rc = -ENOIOCTLCMD;
- break;
- }
- return rc;
-}
-
-static const struct tty_operations stl_ops = {
- .open = stl_open,
- .close = stl_close,
- .write = stl_write,
- .put_char = stl_putchar,
- .flush_chars = stl_flushchars,
- .write_room = stl_writeroom,
- .chars_in_buffer = stl_charsinbuffer,
- .ioctl = stl_ioctl,
- .set_termios = stl_settermios,
- .throttle = stl_throttle,
- .unthrottle = stl_unthrottle,
- .stop = stl_stop,
- .start = stl_start,
- .hangup = stl_hangup,
- .flush_buffer = stl_flushbuffer,
- .break_ctl = stl_breakctl,
- .wait_until_sent = stl_waituntilsent,
- .send_xchar = stl_sendxchar,
- .tiocmget = stl_tiocmget,
- .tiocmset = stl_tiocmset,
- .proc_fops = &stl_proc_fops,
-};
-
-static const struct tty_port_operations stl_port_ops = {
- .carrier_raised = stl_carrier_raised,
- .dtr_rts = stl_dtr_rts,
- .activate = stl_activate,
- .shutdown = stl_shutdown,
-};
-
-/*****************************************************************************/
-/* CD1400 HARDWARE FUNCTIONS */
-/*****************************************************************************/
-
-/*
- * These functions get/set/update the registers of the cd1400 UARTs.
- * Access to the cd1400 registers is via an address/data io port pair.
- * (Maybe should make this inline...)
- */
-
-static int stl_cd1400getreg(struct stlport *portp, int regnr)
-{
- outb((regnr + portp->uartaddr), portp->ioaddr);
- return inb(portp->ioaddr + EREG_DATA);
-}
-
-static void stl_cd1400setreg(struct stlport *portp, int regnr, int value)
-{
- outb(regnr + portp->uartaddr, portp->ioaddr);
- outb(value, portp->ioaddr + EREG_DATA);
-}
-
-static int stl_cd1400updatereg(struct stlport *portp, int regnr, int value)
-{
- outb(regnr + portp->uartaddr, portp->ioaddr);
- if (inb(portp->ioaddr + EREG_DATA) != value) {
- outb(value, portp->ioaddr + EREG_DATA);
- return 1;
- }
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Inbitialize the UARTs in a panel. We don't care what sort of board
- * these ports are on - since the port io registers are almost
- * identical when dealing with ports.
- */
-
-static int stl_cd1400panelinit(struct stlbrd *brdp, struct stlpanel *panelp)
-{
- unsigned int gfrcr;
- int chipmask, i, j;
- int nrchips, uartaddr, ioaddr;
- unsigned long flags;
-
- pr_debug("stl_panelinit(brdp=%p,panelp=%p)\n", brdp, panelp);
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(panelp->brdnr, panelp->pagenr);
-
-/*
- * Check that each chip is present and started up OK.
- */
- chipmask = 0;
- nrchips = panelp->nrports / CD1400_PORTS;
- for (i = 0; i < nrchips; i++) {
- if (brdp->brdtype == BRD_ECHPCI) {
- outb((panelp->pagenr + (i >> 1)), brdp->ioctrl);
- ioaddr = panelp->iobase;
- } else
- ioaddr = panelp->iobase + (EREG_BANKSIZE * (i >> 1));
- uartaddr = (i & 0x01) ? 0x080 : 0;
- outb((GFRCR + uartaddr), ioaddr);
- outb(0, (ioaddr + EREG_DATA));
- outb((CCR + uartaddr), ioaddr);
- outb(CCR_RESETFULL, (ioaddr + EREG_DATA));
- outb(CCR_RESETFULL, (ioaddr + EREG_DATA));
- outb((GFRCR + uartaddr), ioaddr);
- for (j = 0; j < CCR_MAXWAIT; j++)
- if ((gfrcr = inb(ioaddr + EREG_DATA)) != 0)
- break;
-
- if ((j >= CCR_MAXWAIT) || (gfrcr < 0x40) || (gfrcr > 0x60)) {
- printk("STALLION: cd1400 not responding, "
- "brd=%d panel=%d chip=%d\n",
- panelp->brdnr, panelp->panelnr, i);
- continue;
- }
- chipmask |= (0x1 << i);
- outb((PPR + uartaddr), ioaddr);
- outb(PPR_SCALAR, (ioaddr + EREG_DATA));
- }
-
- BRDDISABLE(panelp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
- return chipmask;
-}
-
-/*****************************************************************************/
-
-/*
- * Initialize hardware specific port registers.
- */
-
-static void stl_cd1400portinit(struct stlbrd *brdp, struct stlpanel *panelp, struct stlport *portp)
-{
- unsigned long flags;
- pr_debug("stl_cd1400portinit(brdp=%p,panelp=%p,portp=%p)\n", brdp,
- panelp, portp);
-
- if ((brdp == NULL) || (panelp == NULL) ||
- (portp == NULL))
- return;
-
- spin_lock_irqsave(&brd_lock, flags);
- portp->ioaddr = panelp->iobase + (((brdp->brdtype == BRD_ECHPCI) ||
- (portp->portnr < 8)) ? 0 : EREG_BANKSIZE);
- portp->uartaddr = (portp->portnr & 0x04) << 5;
- portp->pagenr = panelp->pagenr + (portp->portnr >> 3);
-
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
- stl_cd1400setreg(portp, LIVR, (portp->portnr << 3));
- portp->hwid = stl_cd1400getreg(portp, GFRCR);
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Wait for the command register to be ready. We will poll this,
- * since it won't usually take too long to be ready.
- */
-
-static void stl_cd1400ccrwait(struct stlport *portp)
-{
- int i;
-
- for (i = 0; i < CCR_MAXWAIT; i++)
- if (stl_cd1400getreg(portp, CCR) == 0)
- return;
-
- printk("STALLION: cd1400 not responding, port=%d panel=%d brd=%d\n",
- portp->portnr, portp->panelnr, portp->brdnr);
-}
-
-/*****************************************************************************/
-
-/*
- * Set up the cd1400 registers for a port based on the termios port
- * settings.
- */
-
-static void stl_cd1400setport(struct stlport *portp, struct ktermios *tiosp)
-{
- struct stlbrd *brdp;
- unsigned long flags;
- unsigned int clkdiv, baudrate;
- unsigned char cor1, cor2, cor3;
- unsigned char cor4, cor5, ccr;
- unsigned char srer, sreron, sreroff;
- unsigned char mcor1, mcor2, rtpr;
- unsigned char clk, div;
-
- cor1 = 0;
- cor2 = 0;
- cor3 = 0;
- cor4 = 0;
- cor5 = 0;
- ccr = 0;
- rtpr = 0;
- clk = 0;
- div = 0;
- mcor1 = 0;
- mcor2 = 0;
- sreron = 0;
- sreroff = 0;
-
- brdp = stl_brds[portp->brdnr];
- if (brdp == NULL)
- return;
-
-/*
- * Set up the RX char ignore mask with those RX error types we
- * can ignore. We can get the cd1400 to help us out a little here,
- * it will ignore parity errors and breaks for us.
- */
- portp->rxignoremsk = 0;
- if (tiosp->c_iflag & IGNPAR) {
- portp->rxignoremsk |= (ST_PARITY | ST_FRAMING | ST_OVERRUN);
- cor1 |= COR1_PARIGNORE;
- }
- if (tiosp->c_iflag & IGNBRK) {
- portp->rxignoremsk |= ST_BREAK;
- cor4 |= COR4_IGNBRK;
- }
-
- portp->rxmarkmsk = ST_OVERRUN;
- if (tiosp->c_iflag & (INPCK | PARMRK))
- portp->rxmarkmsk |= (ST_PARITY | ST_FRAMING);
- if (tiosp->c_iflag & BRKINT)
- portp->rxmarkmsk |= ST_BREAK;
-
-/*
- * Go through the char size, parity and stop bits and set all the
- * option register appropriately.
- */
- switch (tiosp->c_cflag & CSIZE) {
- case CS5:
- cor1 |= COR1_CHL5;
- break;
- case CS6:
- cor1 |= COR1_CHL6;
- break;
- case CS7:
- cor1 |= COR1_CHL7;
- break;
- default:
- cor1 |= COR1_CHL8;
- break;
- }
-
- if (tiosp->c_cflag & CSTOPB)
- cor1 |= COR1_STOP2;
- else
- cor1 |= COR1_STOP1;
-
- if (tiosp->c_cflag & PARENB) {
- if (tiosp->c_cflag & PARODD)
- cor1 |= (COR1_PARENB | COR1_PARODD);
- else
- cor1 |= (COR1_PARENB | COR1_PAREVEN);
- } else {
- cor1 |= COR1_PARNONE;
- }
-
-/*
- * Set the RX FIFO threshold at 6 chars. This gives a bit of breathing
- * space for hardware flow control and the like. This should be set to
- * VMIN. Also here we will set the RX data timeout to 10ms - this should
- * really be based on VTIME.
- */
- cor3 |= FIFO_RXTHRESHOLD;
- rtpr = 2;
-
-/*
- * Calculate the baud rate timers. For now we will just assume that
- * the input and output baud are the same. Could have used a baud
- * table here, but this way we can generate virtually any baud rate
- * we like!
- */
- baudrate = tiosp->c_cflag & CBAUD;
- if (baudrate & CBAUDEX) {
- baudrate &= ~CBAUDEX;
- if ((baudrate < 1) || (baudrate > 4))
- tiosp->c_cflag &= ~CBAUDEX;
- else
- baudrate += 15;
- }
- baudrate = stl_baudrates[baudrate];
- if ((tiosp->c_cflag & CBAUD) == B38400) {
- if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
- baudrate = 57600;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
- baudrate = 115200;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
- baudrate = 230400;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
- baudrate = 460800;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)
- baudrate = (portp->baud_base / portp->custom_divisor);
- }
- if (baudrate > STL_CD1400MAXBAUD)
- baudrate = STL_CD1400MAXBAUD;
-
- if (baudrate > 0) {
- for (clk = 0; clk < CD1400_NUMCLKS; clk++) {
- clkdiv = (portp->clk / stl_cd1400clkdivs[clk]) / baudrate;
- if (clkdiv < 0x100)
- break;
- }
- div = (unsigned char) clkdiv;
- }
-
-/*
- * Check what form of modem signaling is required and set it up.
- */
- if ((tiosp->c_cflag & CLOCAL) == 0) {
- mcor1 |= MCOR1_DCD;
- mcor2 |= MCOR2_DCD;
- sreron |= SRER_MODEM;
- portp->port.flags |= ASYNC_CHECK_CD;
- } else
- portp->port.flags &= ~ASYNC_CHECK_CD;
-
-/*
- * Setup cd1400 enhanced modes if we can. In particular we want to
- * handle as much of the flow control as possible automatically. As
- * well as saving a few CPU cycles it will also greatly improve flow
- * control reliability.
- */
- if (tiosp->c_iflag & IXON) {
- cor2 |= COR2_TXIBE;
- cor3 |= COR3_SCD12;
- if (tiosp->c_iflag & IXANY)
- cor2 |= COR2_IXM;
- }
-
- if (tiosp->c_cflag & CRTSCTS) {
- cor2 |= COR2_CTSAE;
- mcor1 |= FIFO_RTSTHRESHOLD;
- }
-
-/*
- * All cd1400 register values calculated so go through and set
- * them all up.
- */
-
- pr_debug("SETPORT: portnr=%d panelnr=%d brdnr=%d\n",
- portp->portnr, portp->panelnr, portp->brdnr);
- pr_debug(" cor1=%x cor2=%x cor3=%x cor4=%x cor5=%x\n",
- cor1, cor2, cor3, cor4, cor5);
- pr_debug(" mcor1=%x mcor2=%x rtpr=%x sreron=%x sreroff=%x\n",
- mcor1, mcor2, rtpr, sreron, sreroff);
- pr_debug(" tcor=%x tbpr=%x rcor=%x rbpr=%x\n", clk, div, clk, div);
- pr_debug(" schr1=%x schr2=%x schr3=%x schr4=%x\n",
- tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP],
- tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]);
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_cd1400setreg(portp, CAR, (portp->portnr & 0x3));
- srer = stl_cd1400getreg(portp, SRER);
- stl_cd1400setreg(portp, SRER, 0);
- if (stl_cd1400updatereg(portp, COR1, cor1))
- ccr = 1;
- if (stl_cd1400updatereg(portp, COR2, cor2))
- ccr = 1;
- if (stl_cd1400updatereg(portp, COR3, cor3))
- ccr = 1;
- if (ccr) {
- stl_cd1400ccrwait(portp);
- stl_cd1400setreg(portp, CCR, CCR_CORCHANGE);
- }
- stl_cd1400setreg(portp, COR4, cor4);
- stl_cd1400setreg(portp, COR5, cor5);
- stl_cd1400setreg(portp, MCOR1, mcor1);
- stl_cd1400setreg(portp, MCOR2, mcor2);
- if (baudrate > 0) {
- stl_cd1400setreg(portp, TCOR, clk);
- stl_cd1400setreg(portp, TBPR, div);
- stl_cd1400setreg(portp, RCOR, clk);
- stl_cd1400setreg(portp, RBPR, div);
- }
- stl_cd1400setreg(portp, SCHR1, tiosp->c_cc[VSTART]);
- stl_cd1400setreg(portp, SCHR2, tiosp->c_cc[VSTOP]);
- stl_cd1400setreg(portp, SCHR3, tiosp->c_cc[VSTART]);
- stl_cd1400setreg(portp, SCHR4, tiosp->c_cc[VSTOP]);
- stl_cd1400setreg(portp, RTPR, rtpr);
- mcor1 = stl_cd1400getreg(portp, MSVR1);
- if (mcor1 & MSVR1_DCD)
- portp->sigs |= TIOCM_CD;
- else
- portp->sigs &= ~TIOCM_CD;
- stl_cd1400setreg(portp, SRER, ((srer & ~sreroff) | sreron));
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Set the state of the DTR and RTS signals.
- */
-
-static void stl_cd1400setsignals(struct stlport *portp, int dtr, int rts)
-{
- unsigned char msvr1, msvr2;
- unsigned long flags;
-
- pr_debug("stl_cd1400setsignals(portp=%p,dtr=%d,rts=%d)\n",
- portp, dtr, rts);
-
- msvr1 = 0;
- msvr2 = 0;
- if (dtr > 0)
- msvr1 = MSVR1_DTR;
- if (rts > 0)
- msvr2 = MSVR2_RTS;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
- if (rts >= 0)
- stl_cd1400setreg(portp, MSVR2, msvr2);
- if (dtr >= 0)
- stl_cd1400setreg(portp, MSVR1, msvr1);
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Return the state of the signals.
- */
-
-static int stl_cd1400getsignals(struct stlport *portp)
-{
- unsigned char msvr1, msvr2;
- unsigned long flags;
- int sigs;
-
- pr_debug("stl_cd1400getsignals(portp=%p)\n", portp);
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
- msvr1 = stl_cd1400getreg(portp, MSVR1);
- msvr2 = stl_cd1400getreg(portp, MSVR2);
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-
- sigs = 0;
- sigs |= (msvr1 & MSVR1_DCD) ? TIOCM_CD : 0;
- sigs |= (msvr1 & MSVR1_CTS) ? TIOCM_CTS : 0;
- sigs |= (msvr1 & MSVR1_DTR) ? TIOCM_DTR : 0;
- sigs |= (msvr2 & MSVR2_RTS) ? TIOCM_RTS : 0;
-#if 0
- sigs |= (msvr1 & MSVR1_RI) ? TIOCM_RI : 0;
- sigs |= (msvr1 & MSVR1_DSR) ? TIOCM_DSR : 0;
-#else
- sigs |= TIOCM_DSR;
-#endif
- return sigs;
-}
-
-/*****************************************************************************/
-
-/*
- * Enable/Disable the Transmitter and/or Receiver.
- */
-
-static void stl_cd1400enablerxtx(struct stlport *portp, int rx, int tx)
-{
- unsigned char ccr;
- unsigned long flags;
-
- pr_debug("stl_cd1400enablerxtx(portp=%p,rx=%d,tx=%d)\n", portp, rx, tx);
-
- ccr = 0;
-
- if (tx == 0)
- ccr |= CCR_TXDISABLE;
- else if (tx > 0)
- ccr |= CCR_TXENABLE;
- if (rx == 0)
- ccr |= CCR_RXDISABLE;
- else if (rx > 0)
- ccr |= CCR_RXENABLE;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
- stl_cd1400ccrwait(portp);
- stl_cd1400setreg(portp, CCR, ccr);
- stl_cd1400ccrwait(portp);
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Start/stop the Transmitter and/or Receiver.
- */
-
-static void stl_cd1400startrxtx(struct stlport *portp, int rx, int tx)
-{
- unsigned char sreron, sreroff;
- unsigned long flags;
-
- pr_debug("stl_cd1400startrxtx(portp=%p,rx=%d,tx=%d)\n", portp, rx, tx);
-
- sreron = 0;
- sreroff = 0;
- if (tx == 0)
- sreroff |= (SRER_TXDATA | SRER_TXEMPTY);
- else if (tx == 1)
- sreron |= SRER_TXDATA;
- else if (tx >= 2)
- sreron |= SRER_TXEMPTY;
- if (rx == 0)
- sreroff |= SRER_RXDATA;
- else if (rx > 0)
- sreron |= SRER_RXDATA;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
- stl_cd1400setreg(portp, SRER,
- ((stl_cd1400getreg(portp, SRER) & ~sreroff) | sreron));
- BRDDISABLE(portp->brdnr);
- if (tx > 0)
- set_bit(ASYI_TXBUSY, &portp->istate);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Disable all interrupts from this port.
- */
-
-static void stl_cd1400disableintrs(struct stlport *portp)
-{
- unsigned long flags;
-
- pr_debug("stl_cd1400disableintrs(portp=%p)\n", portp);
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
- stl_cd1400setreg(portp, SRER, 0);
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-static void stl_cd1400sendbreak(struct stlport *portp, int len)
-{
- unsigned long flags;
-
- pr_debug("stl_cd1400sendbreak(portp=%p,len=%d)\n", portp, len);
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
- stl_cd1400setreg(portp, SRER,
- ((stl_cd1400getreg(portp, SRER) & ~SRER_TXDATA) |
- SRER_TXEMPTY));
- BRDDISABLE(portp->brdnr);
- portp->brklen = len;
- if (len == 1)
- portp->stats.txbreaks++;
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Take flow control actions...
- */
-
-static void stl_cd1400flowctrl(struct stlport *portp, int state)
-{
- struct tty_struct *tty;
- unsigned long flags;
-
- pr_debug("stl_cd1400flowctrl(portp=%p,state=%x)\n", portp, state);
-
- if (portp == NULL)
- return;
- tty = tty_port_tty_get(&portp->port);
- if (tty == NULL)
- return;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
-
- if (state) {
- if (tty->termios->c_iflag & IXOFF) {
- stl_cd1400ccrwait(portp);
- stl_cd1400setreg(portp, CCR, CCR_SENDSCHR1);
- portp->stats.rxxon++;
- stl_cd1400ccrwait(portp);
- }
-/*
- * Question: should we return RTS to what it was before? It may
- * have been set by an ioctl... Suppose not, since if you have
- * hardware flow control set then it is pretty silly to go and
- * set the RTS line by hand.
- */
- if (tty->termios->c_cflag & CRTSCTS) {
- stl_cd1400setreg(portp, MCOR1,
- (stl_cd1400getreg(portp, MCOR1) |
- FIFO_RTSTHRESHOLD));
- stl_cd1400setreg(portp, MSVR2, MSVR2_RTS);
- portp->stats.rxrtson++;
- }
- } else {
- if (tty->termios->c_iflag & IXOFF) {
- stl_cd1400ccrwait(portp);
- stl_cd1400setreg(portp, CCR, CCR_SENDSCHR2);
- portp->stats.rxxoff++;
- stl_cd1400ccrwait(portp);
- }
- if (tty->termios->c_cflag & CRTSCTS) {
- stl_cd1400setreg(portp, MCOR1,
- (stl_cd1400getreg(portp, MCOR1) & 0xf0));
- stl_cd1400setreg(portp, MSVR2, 0);
- portp->stats.rxrtsoff++;
- }
- }
-
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
- tty_kref_put(tty);
-}
-
-/*****************************************************************************/
-
-/*
- * Send a flow control character...
- */
-
-static void stl_cd1400sendflow(struct stlport *portp, int state)
-{
- struct tty_struct *tty;
- unsigned long flags;
-
- pr_debug("stl_cd1400sendflow(portp=%p,state=%x)\n", portp, state);
-
- if (portp == NULL)
- return;
- tty = tty_port_tty_get(&portp->port);
- if (tty == NULL)
- return;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
- if (state) {
- stl_cd1400ccrwait(portp);
- stl_cd1400setreg(portp, CCR, CCR_SENDSCHR1);
- portp->stats.rxxon++;
- stl_cd1400ccrwait(portp);
- } else {
- stl_cd1400ccrwait(portp);
- stl_cd1400setreg(portp, CCR, CCR_SENDSCHR2);
- portp->stats.rxxoff++;
- stl_cd1400ccrwait(portp);
- }
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
- tty_kref_put(tty);
-}
-
-/*****************************************************************************/
-
-static void stl_cd1400flush(struct stlport *portp)
-{
- unsigned long flags;
-
- pr_debug("stl_cd1400flush(portp=%p)\n", portp);
-
- if (portp == NULL)
- return;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
- stl_cd1400ccrwait(portp);
- stl_cd1400setreg(portp, CCR, CCR_TXFLUSHFIFO);
- stl_cd1400ccrwait(portp);
- portp->tx.tail = portp->tx.head;
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Return the current state of data flow on this port. This is only
- * really interesting when determining if data has fully completed
- * transmission or not... This is easy for the cd1400, it accurately
- * maintains the busy port flag.
- */
-
-static int stl_cd1400datastate(struct stlport *portp)
-{
- pr_debug("stl_cd1400datastate(portp=%p)\n", portp);
-
- if (portp == NULL)
- return 0;
-
- return test_bit(ASYI_TXBUSY, &portp->istate) ? 1 : 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Interrupt service routine for cd1400 EasyIO boards.
- */
-
-static void stl_cd1400eiointr(struct stlpanel *panelp, unsigned int iobase)
-{
- unsigned char svrtype;
-
- pr_debug("stl_cd1400eiointr(panelp=%p,iobase=%x)\n", panelp, iobase);
-
- spin_lock(&brd_lock);
- outb(SVRR, iobase);
- svrtype = inb(iobase + EREG_DATA);
- if (panelp->nrports > 4) {
- outb((SVRR + 0x80), iobase);
- svrtype |= inb(iobase + EREG_DATA);
- }
-
- if (svrtype & SVRR_RX)
- stl_cd1400rxisr(panelp, iobase);
- else if (svrtype & SVRR_TX)
- stl_cd1400txisr(panelp, iobase);
- else if (svrtype & SVRR_MDM)
- stl_cd1400mdmisr(panelp, iobase);
-
- spin_unlock(&brd_lock);
-}
-
-/*****************************************************************************/
-
-/*
- * Interrupt service routine for cd1400 panels.
- */
-
-static void stl_cd1400echintr(struct stlpanel *panelp, unsigned int iobase)
-{
- unsigned char svrtype;
-
- pr_debug("stl_cd1400echintr(panelp=%p,iobase=%x)\n", panelp, iobase);
-
- outb(SVRR, iobase);
- svrtype = inb(iobase + EREG_DATA);
- outb((SVRR + 0x80), iobase);
- svrtype |= inb(iobase + EREG_DATA);
- if (svrtype & SVRR_RX)
- stl_cd1400rxisr(panelp, iobase);
- else if (svrtype & SVRR_TX)
- stl_cd1400txisr(panelp, iobase);
- else if (svrtype & SVRR_MDM)
- stl_cd1400mdmisr(panelp, iobase);
-}
-
-
-/*****************************************************************************/
-
-/*
- * Unfortunately we need to handle breaks in the TX data stream, since
- * this is the only way to generate them on the cd1400.
- */
-
-static int stl_cd1400breakisr(struct stlport *portp, int ioaddr)
-{
- if (portp->brklen == 1) {
- outb((COR2 + portp->uartaddr), ioaddr);
- outb((inb(ioaddr + EREG_DATA) | COR2_ETC),
- (ioaddr + EREG_DATA));
- outb((TDR + portp->uartaddr), ioaddr);
- outb(ETC_CMD, (ioaddr + EREG_DATA));
- outb(ETC_STARTBREAK, (ioaddr + EREG_DATA));
- outb((SRER + portp->uartaddr), ioaddr);
- outb((inb(ioaddr + EREG_DATA) & ~(SRER_TXDATA | SRER_TXEMPTY)),
- (ioaddr + EREG_DATA));
- return 1;
- } else if (portp->brklen > 1) {
- outb((TDR + portp->uartaddr), ioaddr);
- outb(ETC_CMD, (ioaddr + EREG_DATA));
- outb(ETC_STOPBREAK, (ioaddr + EREG_DATA));
- portp->brklen = -1;
- return 1;
- } else {
- outb((COR2 + portp->uartaddr), ioaddr);
- outb((inb(ioaddr + EREG_DATA) & ~COR2_ETC),
- (ioaddr + EREG_DATA));
- portp->brklen = 0;
- }
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Transmit interrupt handler. This has gotta be fast! Handling TX
- * chars is pretty simple, stuff as many as possible from the TX buffer
- * into the cd1400 FIFO. Must also handle TX breaks here, since they
- * are embedded as commands in the data stream. Oh no, had to use a goto!
- * This could be optimized more, will do when I get time...
- * In practice it is possible that interrupts are enabled but that the
- * port has been hung up. Need to handle not having any TX buffer here,
- * this is done by using the side effect that head and tail will also
- * be NULL if the buffer has been freed.
- */
-
-static void stl_cd1400txisr(struct stlpanel *panelp, int ioaddr)
-{
- struct stlport *portp;
- int len, stlen;
- char *head, *tail;
- unsigned char ioack, srer;
- struct tty_struct *tty;
-
- pr_debug("stl_cd1400txisr(panelp=%p,ioaddr=%x)\n", panelp, ioaddr);
-
- ioack = inb(ioaddr + EREG_TXACK);
- if (((ioack & panelp->ackmask) != 0) ||
- ((ioack & ACK_TYPMASK) != ACK_TYPTX)) {
- printk("STALLION: bad TX interrupt ack value=%x\n", ioack);
- return;
- }
- portp = panelp->ports[(ioack >> 3)];
-
-/*
- * Unfortunately we need to handle breaks in the data stream, since
- * this is the only way to generate them on the cd1400. Do it now if
- * a break is to be sent.
- */
- if (portp->brklen != 0)
- if (stl_cd1400breakisr(portp, ioaddr))
- goto stl_txalldone;
-
- head = portp->tx.head;
- tail = portp->tx.tail;
- len = (head >= tail) ? (head - tail) : (STL_TXBUFSIZE - (tail - head));
- if ((len == 0) || ((len < STL_TXBUFLOW) &&
- (test_bit(ASYI_TXLOW, &portp->istate) == 0))) {
- set_bit(ASYI_TXLOW, &portp->istate);
- tty = tty_port_tty_get(&portp->port);
- if (tty) {
- tty_wakeup(tty);
- tty_kref_put(tty);
- }
- }
-
- if (len == 0) {
- outb((SRER + portp->uartaddr), ioaddr);
- srer = inb(ioaddr + EREG_DATA);
- if (srer & SRER_TXDATA) {
- srer = (srer & ~SRER_TXDATA) | SRER_TXEMPTY;
- } else {
- srer &= ~(SRER_TXDATA | SRER_TXEMPTY);
- clear_bit(ASYI_TXBUSY, &portp->istate);
- }
- outb(srer, (ioaddr + EREG_DATA));
- } else {
- len = min(len, CD1400_TXFIFOSIZE);
- portp->stats.txtotal += len;
- stlen = min_t(unsigned int, len,
- (portp->tx.buf + STL_TXBUFSIZE) - tail);
- outb((TDR + portp->uartaddr), ioaddr);
- outsb((ioaddr + EREG_DATA), tail, stlen);
- len -= stlen;
- tail += stlen;
- if (tail >= (portp->tx.buf + STL_TXBUFSIZE))
- tail = portp->tx.buf;
- if (len > 0) {
- outsb((ioaddr + EREG_DATA), tail, len);
- tail += len;
- }
- portp->tx.tail = tail;
- }
-
-stl_txalldone:
- outb((EOSRR + portp->uartaddr), ioaddr);
- outb(0, (ioaddr + EREG_DATA));
-}
-
-/*****************************************************************************/
-
-/*
- * Receive character interrupt handler. Determine if we have good chars
- * or bad chars and then process appropriately. Good chars are easy
- * just shove the lot into the RX buffer and set all status byte to 0.
- * If a bad RX char then process as required. This routine needs to be
- * fast! In practice it is possible that we get an interrupt on a port
- * that is closed. This can happen on hangups - since they completely
- * shutdown a port not in user context. Need to handle this case.
- */
-
-static void stl_cd1400rxisr(struct stlpanel *panelp, int ioaddr)
-{
- struct stlport *portp;
- struct tty_struct *tty;
- unsigned int ioack, len, buflen;
- unsigned char status;
- char ch;
-
- pr_debug("stl_cd1400rxisr(panelp=%p,ioaddr=%x)\n", panelp, ioaddr);
-
- ioack = inb(ioaddr + EREG_RXACK);
- if ((ioack & panelp->ackmask) != 0) {
- printk("STALLION: bad RX interrupt ack value=%x\n", ioack);
- return;
- }
- portp = panelp->ports[(ioack >> 3)];
- tty = tty_port_tty_get(&portp->port);
-
- if ((ioack & ACK_TYPMASK) == ACK_TYPRXGOOD) {
- outb((RDCR + portp->uartaddr), ioaddr);
- len = inb(ioaddr + EREG_DATA);
- if (tty == NULL || (buflen = tty_buffer_request_room(tty, len)) == 0) {
- len = min_t(unsigned int, len, sizeof(stl_unwanted));
- outb((RDSR + portp->uartaddr), ioaddr);
- insb((ioaddr + EREG_DATA), &stl_unwanted[0], len);
- portp->stats.rxlost += len;
- portp->stats.rxtotal += len;
- } else {
- len = min(len, buflen);
- if (len > 0) {
- unsigned char *ptr;
- outb((RDSR + portp->uartaddr), ioaddr);
- tty_prepare_flip_string(tty, &ptr, len);
- insb((ioaddr + EREG_DATA), ptr, len);
- tty_schedule_flip(tty);
- portp->stats.rxtotal += len;
- }
- }
- } else if ((ioack & ACK_TYPMASK) == ACK_TYPRXBAD) {
- outb((RDSR + portp->uartaddr), ioaddr);
- status = inb(ioaddr + EREG_DATA);
- ch = inb(ioaddr + EREG_DATA);
- if (status & ST_PARITY)
- portp->stats.rxparity++;
- if (status & ST_FRAMING)
- portp->stats.rxframing++;
- if (status & ST_OVERRUN)
- portp->stats.rxoverrun++;
- if (status & ST_BREAK)
- portp->stats.rxbreaks++;
- if (status & ST_SCHARMASK) {
- if ((status & ST_SCHARMASK) == ST_SCHAR1)
- portp->stats.txxon++;
- if ((status & ST_SCHARMASK) == ST_SCHAR2)
- portp->stats.txxoff++;
- goto stl_rxalldone;
- }
- if (tty != NULL && (portp->rxignoremsk & status) == 0) {
- if (portp->rxmarkmsk & status) {
- if (status & ST_BREAK) {
- status = TTY_BREAK;
- if (portp->port.flags & ASYNC_SAK) {
- do_SAK(tty);
- BRDENABLE(portp->brdnr, portp->pagenr);
- }
- } else if (status & ST_PARITY)
- status = TTY_PARITY;
- else if (status & ST_FRAMING)
- status = TTY_FRAME;
- else if(status & ST_OVERRUN)
- status = TTY_OVERRUN;
- else
- status = 0;
- } else
- status = 0;
- tty_insert_flip_char(tty, ch, status);
- tty_schedule_flip(tty);
- }
- } else {
- printk("STALLION: bad RX interrupt ack value=%x\n", ioack);
- tty_kref_put(tty);
- return;
- }
-
-stl_rxalldone:
- tty_kref_put(tty);
- outb((EOSRR + portp->uartaddr), ioaddr);
- outb(0, (ioaddr + EREG_DATA));
-}
-
-/*****************************************************************************/
-
-/*
- * Modem interrupt handler. The is called when the modem signal line
- * (DCD) has changed state. Leave most of the work to the off-level
- * processing routine.
- */
-
-static void stl_cd1400mdmisr(struct stlpanel *panelp, int ioaddr)
-{
- struct stlport *portp;
- unsigned int ioack;
- unsigned char misr;
-
- pr_debug("stl_cd1400mdmisr(panelp=%p)\n", panelp);
-
- ioack = inb(ioaddr + EREG_MDACK);
- if (((ioack & panelp->ackmask) != 0) ||
- ((ioack & ACK_TYPMASK) != ACK_TYPMDM)) {
- printk("STALLION: bad MODEM interrupt ack value=%x\n", ioack);
- return;
- }
- portp = panelp->ports[(ioack >> 3)];
-
- outb((MISR + portp->uartaddr), ioaddr);
- misr = inb(ioaddr + EREG_DATA);
- if (misr & MISR_DCD) {
- stl_cd_change(portp);
- portp->stats.modem++;
- }
-
- outb((EOSRR + portp->uartaddr), ioaddr);
- outb(0, (ioaddr + EREG_DATA));
-}
-
-/*****************************************************************************/
-/* SC26198 HARDWARE FUNCTIONS */
-/*****************************************************************************/
-
-/*
- * These functions get/set/update the registers of the sc26198 UARTs.
- * Access to the sc26198 registers is via an address/data io port pair.
- * (Maybe should make this inline...)
- */
-
-static int stl_sc26198getreg(struct stlport *portp, int regnr)
-{
- outb((regnr | portp->uartaddr), (portp->ioaddr + XP_ADDR));
- return inb(portp->ioaddr + XP_DATA);
-}
-
-static void stl_sc26198setreg(struct stlport *portp, int regnr, int value)
-{
- outb((regnr | portp->uartaddr), (portp->ioaddr + XP_ADDR));
- outb(value, (portp->ioaddr + XP_DATA));
-}
-
-static int stl_sc26198updatereg(struct stlport *portp, int regnr, int value)
-{
- outb((regnr | portp->uartaddr), (portp->ioaddr + XP_ADDR));
- if (inb(portp->ioaddr + XP_DATA) != value) {
- outb(value, (portp->ioaddr + XP_DATA));
- return 1;
- }
- return 0;
-}
-
-/*****************************************************************************/
-
-/*
- * Functions to get and set the sc26198 global registers.
- */
-
-static int stl_sc26198getglobreg(struct stlport *portp, int regnr)
-{
- outb(regnr, (portp->ioaddr + XP_ADDR));
- return inb(portp->ioaddr + XP_DATA);
-}
-
-#if 0
-static void stl_sc26198setglobreg(struct stlport *portp, int regnr, int value)
-{
- outb(regnr, (portp->ioaddr + XP_ADDR));
- outb(value, (portp->ioaddr + XP_DATA));
-}
-#endif
-
-/*****************************************************************************/
-
-/*
- * Inbitialize the UARTs in a panel. We don't care what sort of board
- * these ports are on - since the port io registers are almost
- * identical when dealing with ports.
- */
-
-static int stl_sc26198panelinit(struct stlbrd *brdp, struct stlpanel *panelp)
-{
- int chipmask, i;
- int nrchips, ioaddr;
-
- pr_debug("stl_sc26198panelinit(brdp=%p,panelp=%p)\n", brdp, panelp);
-
- BRDENABLE(panelp->brdnr, panelp->pagenr);
-
-/*
- * Check that each chip is present and started up OK.
- */
- chipmask = 0;
- nrchips = (panelp->nrports + 4) / SC26198_PORTS;
- if (brdp->brdtype == BRD_ECHPCI)
- outb(panelp->pagenr, brdp->ioctrl);
-
- for (i = 0; i < nrchips; i++) {
- ioaddr = panelp->iobase + (i * 4);
- outb(SCCR, (ioaddr + XP_ADDR));
- outb(CR_RESETALL, (ioaddr + XP_DATA));
- outb(TSTR, (ioaddr + XP_ADDR));
- if (inb(ioaddr + XP_DATA) != 0) {
- printk("STALLION: sc26198 not responding, "
- "brd=%d panel=%d chip=%d\n",
- panelp->brdnr, panelp->panelnr, i);
- continue;
- }
- chipmask |= (0x1 << i);
- outb(GCCR, (ioaddr + XP_ADDR));
- outb(GCCR_IVRTYPCHANACK, (ioaddr + XP_DATA));
- outb(WDTRCR, (ioaddr + XP_ADDR));
- outb(0xff, (ioaddr + XP_DATA));
- }
-
- BRDDISABLE(panelp->brdnr);
- return chipmask;
-}
-
-/*****************************************************************************/
-
-/*
- * Initialize hardware specific port registers.
- */
-
-static void stl_sc26198portinit(struct stlbrd *brdp, struct stlpanel *panelp, struct stlport *portp)
-{
- pr_debug("stl_sc26198portinit(brdp=%p,panelp=%p,portp=%p)\n", brdp,
- panelp, portp);
-
- if ((brdp == NULL) || (panelp == NULL) ||
- (portp == NULL))
- return;
-
- portp->ioaddr = panelp->iobase + ((portp->portnr < 8) ? 0 : 4);
- portp->uartaddr = (portp->portnr & 0x07) << 4;
- portp->pagenr = panelp->pagenr;
- portp->hwid = 0x1;
-
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_sc26198setreg(portp, IOPCR, IOPCR_SETSIGS);
- BRDDISABLE(portp->brdnr);
-}
-
-/*****************************************************************************/
-
-/*
- * Set up the sc26198 registers for a port based on the termios port
- * settings.
- */
-
-static void stl_sc26198setport(struct stlport *portp, struct ktermios *tiosp)
-{
- struct stlbrd *brdp;
- unsigned long flags;
- unsigned int baudrate;
- unsigned char mr0, mr1, mr2, clk;
- unsigned char imron, imroff, iopr, ipr;
-
- mr0 = 0;
- mr1 = 0;
- mr2 = 0;
- clk = 0;
- iopr = 0;
- imron = 0;
- imroff = 0;
-
- brdp = stl_brds[portp->brdnr];
- if (brdp == NULL)
- return;
-
-/*
- * Set up the RX char ignore mask with those RX error types we
- * can ignore.
- */
- portp->rxignoremsk = 0;
- if (tiosp->c_iflag & IGNPAR)
- portp->rxignoremsk |= (SR_RXPARITY | SR_RXFRAMING |
- SR_RXOVERRUN);
- if (tiosp->c_iflag & IGNBRK)
- portp->rxignoremsk |= SR_RXBREAK;
-
- portp->rxmarkmsk = SR_RXOVERRUN;
- if (tiosp->c_iflag & (INPCK | PARMRK))
- portp->rxmarkmsk |= (SR_RXPARITY | SR_RXFRAMING);
- if (tiosp->c_iflag & BRKINT)
- portp->rxmarkmsk |= SR_RXBREAK;
-
-/*
- * Go through the char size, parity and stop bits and set all the
- * option register appropriately.
- */
- switch (tiosp->c_cflag & CSIZE) {
- case CS5:
- mr1 |= MR1_CS5;
- break;
- case CS6:
- mr1 |= MR1_CS6;
- break;
- case CS7:
- mr1 |= MR1_CS7;
- break;
- default:
- mr1 |= MR1_CS8;
- break;
- }
-
- if (tiosp->c_cflag & CSTOPB)
- mr2 |= MR2_STOP2;
- else
- mr2 |= MR2_STOP1;
-
- if (tiosp->c_cflag & PARENB) {
- if (tiosp->c_cflag & PARODD)
- mr1 |= (MR1_PARENB | MR1_PARODD);
- else
- mr1 |= (MR1_PARENB | MR1_PAREVEN);
- } else
- mr1 |= MR1_PARNONE;
-
- mr1 |= MR1_ERRBLOCK;
-
-/*
- * Set the RX FIFO threshold at 8 chars. This gives a bit of breathing
- * space for hardware flow control and the like. This should be set to
- * VMIN.
- */
- mr2 |= MR2_RXFIFOHALF;
-
-/*
- * Calculate the baud rate timers. For now we will just assume that
- * the input and output baud are the same. The sc26198 has a fixed
- * baud rate table, so only discrete baud rates possible.
- */
- baudrate = tiosp->c_cflag & CBAUD;
- if (baudrate & CBAUDEX) {
- baudrate &= ~CBAUDEX;
- if ((baudrate < 1) || (baudrate > 4))
- tiosp->c_cflag &= ~CBAUDEX;
- else
- baudrate += 15;
- }
- baudrate = stl_baudrates[baudrate];
- if ((tiosp->c_cflag & CBAUD) == B38400) {
- if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
- baudrate = 57600;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
- baudrate = 115200;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
- baudrate = 230400;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
- baudrate = 460800;
- else if ((portp->port.flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)
- baudrate = (portp->baud_base / portp->custom_divisor);
- }
- if (baudrate > STL_SC26198MAXBAUD)
- baudrate = STL_SC26198MAXBAUD;
-
- if (baudrate > 0)
- for (clk = 0; clk < SC26198_NRBAUDS; clk++)
- if (baudrate <= sc26198_baudtable[clk])
- break;
-
-/*
- * Check what form of modem signaling is required and set it up.
- */
- if (tiosp->c_cflag & CLOCAL) {
- portp->port.flags &= ~ASYNC_CHECK_CD;
- } else {
- iopr |= IOPR_DCDCOS;
- imron |= IR_IOPORT;
- portp->port.flags |= ASYNC_CHECK_CD;
- }
-
-/*
- * Setup sc26198 enhanced modes if we can. In particular we want to
- * handle as much of the flow control as possible automatically. As
- * well as saving a few CPU cycles it will also greatly improve flow
- * control reliability.
- */
- if (tiosp->c_iflag & IXON) {
- mr0 |= MR0_SWFTX | MR0_SWFT;
- imron |= IR_XONXOFF;
- } else
- imroff |= IR_XONXOFF;
-
- if (tiosp->c_iflag & IXOFF)
- mr0 |= MR0_SWFRX;
-
- if (tiosp->c_cflag & CRTSCTS) {
- mr2 |= MR2_AUTOCTS;
- mr1 |= MR1_AUTORTS;
- }
-
-/*
- * All sc26198 register values calculated so go through and set
- * them all up.
- */
-
- pr_debug("SETPORT: portnr=%d panelnr=%d brdnr=%d\n",
- portp->portnr, portp->panelnr, portp->brdnr);
- pr_debug(" mr0=%x mr1=%x mr2=%x clk=%x\n", mr0, mr1, mr2, clk);
- pr_debug(" iopr=%x imron=%x imroff=%x\n", iopr, imron, imroff);
- pr_debug(" schr1=%x schr2=%x schr3=%x schr4=%x\n",
- tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP],
- tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]);
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_sc26198setreg(portp, IMR, 0);
- stl_sc26198updatereg(portp, MR0, mr0);
- stl_sc26198updatereg(portp, MR1, mr1);
- stl_sc26198setreg(portp, SCCR, CR_RXERRBLOCK);
- stl_sc26198updatereg(portp, MR2, mr2);
- stl_sc26198updatereg(portp, IOPIOR,
- ((stl_sc26198getreg(portp, IOPIOR) & ~IPR_CHANGEMASK) | iopr));
-
- if (baudrate > 0) {
- stl_sc26198setreg(portp, TXCSR, clk);
- stl_sc26198setreg(portp, RXCSR, clk);
- }
-
- stl_sc26198setreg(portp, XONCR, tiosp->c_cc[VSTART]);
- stl_sc26198setreg(portp, XOFFCR, tiosp->c_cc[VSTOP]);
-
- ipr = stl_sc26198getreg(portp, IPR);
- if (ipr & IPR_DCD)
- portp->sigs &= ~TIOCM_CD;
- else
- portp->sigs |= TIOCM_CD;
-
- portp->imr = (portp->imr & ~imroff) | imron;
- stl_sc26198setreg(portp, IMR, portp->imr);
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Set the state of the DTR and RTS signals.
- */
-
-static void stl_sc26198setsignals(struct stlport *portp, int dtr, int rts)
-{
- unsigned char iopioron, iopioroff;
- unsigned long flags;
-
- pr_debug("stl_sc26198setsignals(portp=%p,dtr=%d,rts=%d)\n", portp,
- dtr, rts);
-
- iopioron = 0;
- iopioroff = 0;
- if (dtr == 0)
- iopioroff |= IPR_DTR;
- else if (dtr > 0)
- iopioron |= IPR_DTR;
- if (rts == 0)
- iopioroff |= IPR_RTS;
- else if (rts > 0)
- iopioron |= IPR_RTS;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_sc26198setreg(portp, IOPIOR,
- ((stl_sc26198getreg(portp, IOPIOR) & ~iopioroff) | iopioron));
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Return the state of the signals.
- */
-
-static int stl_sc26198getsignals(struct stlport *portp)
-{
- unsigned char ipr;
- unsigned long flags;
- int sigs;
-
- pr_debug("stl_sc26198getsignals(portp=%p)\n", portp);
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- ipr = stl_sc26198getreg(portp, IPR);
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-
- sigs = 0;
- sigs |= (ipr & IPR_DCD) ? 0 : TIOCM_CD;
- sigs |= (ipr & IPR_CTS) ? 0 : TIOCM_CTS;
- sigs |= (ipr & IPR_DTR) ? 0: TIOCM_DTR;
- sigs |= (ipr & IPR_RTS) ? 0: TIOCM_RTS;
- sigs |= TIOCM_DSR;
- return sigs;
-}
-
-/*****************************************************************************/
-
-/*
- * Enable/Disable the Transmitter and/or Receiver.
- */
-
-static void stl_sc26198enablerxtx(struct stlport *portp, int rx, int tx)
-{
- unsigned char ccr;
- unsigned long flags;
-
- pr_debug("stl_sc26198enablerxtx(portp=%p,rx=%d,tx=%d)\n", portp, rx,tx);
-
- ccr = portp->crenable;
- if (tx == 0)
- ccr &= ~CR_TXENABLE;
- else if (tx > 0)
- ccr |= CR_TXENABLE;
- if (rx == 0)
- ccr &= ~CR_RXENABLE;
- else if (rx > 0)
- ccr |= CR_RXENABLE;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_sc26198setreg(portp, SCCR, ccr);
- BRDDISABLE(portp->brdnr);
- portp->crenable = ccr;
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Start/stop the Transmitter and/or Receiver.
- */
-
-static void stl_sc26198startrxtx(struct stlport *portp, int rx, int tx)
-{
- unsigned char imr;
- unsigned long flags;
-
- pr_debug("stl_sc26198startrxtx(portp=%p,rx=%d,tx=%d)\n", portp, rx, tx);
-
- imr = portp->imr;
- if (tx == 0)
- imr &= ~IR_TXRDY;
- else if (tx == 1)
- imr |= IR_TXRDY;
- if (rx == 0)
- imr &= ~(IR_RXRDY | IR_RXBREAK | IR_RXWATCHDOG);
- else if (rx > 0)
- imr |= IR_RXRDY | IR_RXBREAK | IR_RXWATCHDOG;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_sc26198setreg(portp, IMR, imr);
- BRDDISABLE(portp->brdnr);
- portp->imr = imr;
- if (tx > 0)
- set_bit(ASYI_TXBUSY, &portp->istate);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Disable all interrupts from this port.
- */
-
-static void stl_sc26198disableintrs(struct stlport *portp)
-{
- unsigned long flags;
-
- pr_debug("stl_sc26198disableintrs(portp=%p)\n", portp);
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- portp->imr = 0;
- stl_sc26198setreg(portp, IMR, 0);
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-static void stl_sc26198sendbreak(struct stlport *portp, int len)
-{
- unsigned long flags;
-
- pr_debug("stl_sc26198sendbreak(portp=%p,len=%d)\n", portp, len);
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- if (len == 1) {
- stl_sc26198setreg(portp, SCCR, CR_TXSTARTBREAK);
- portp->stats.txbreaks++;
- } else
- stl_sc26198setreg(portp, SCCR, CR_TXSTOPBREAK);
-
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Take flow control actions...
- */
-
-static void stl_sc26198flowctrl(struct stlport *portp, int state)
-{
- struct tty_struct *tty;
- unsigned long flags;
- unsigned char mr0;
-
- pr_debug("stl_sc26198flowctrl(portp=%p,state=%x)\n", portp, state);
-
- if (portp == NULL)
- return;
- tty = tty_port_tty_get(&portp->port);
- if (tty == NULL)
- return;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
-
- if (state) {
- if (tty->termios->c_iflag & IXOFF) {
- mr0 = stl_sc26198getreg(portp, MR0);
- stl_sc26198setreg(portp, MR0, (mr0 & ~MR0_SWFRXTX));
- stl_sc26198setreg(portp, SCCR, CR_TXSENDXON);
- mr0 |= MR0_SWFRX;
- portp->stats.rxxon++;
- stl_sc26198wait(portp);
- stl_sc26198setreg(portp, MR0, mr0);
- }
-/*
- * Question: should we return RTS to what it was before? It may
- * have been set by an ioctl... Suppose not, since if you have
- * hardware flow control set then it is pretty silly to go and
- * set the RTS line by hand.
- */
- if (tty->termios->c_cflag & CRTSCTS) {
- stl_sc26198setreg(portp, MR1,
- (stl_sc26198getreg(portp, MR1) | MR1_AUTORTS));
- stl_sc26198setreg(portp, IOPIOR,
- (stl_sc26198getreg(portp, IOPIOR) | IOPR_RTS));
- portp->stats.rxrtson++;
- }
- } else {
- if (tty->termios->c_iflag & IXOFF) {
- mr0 = stl_sc26198getreg(portp, MR0);
- stl_sc26198setreg(portp, MR0, (mr0 & ~MR0_SWFRXTX));
- stl_sc26198setreg(portp, SCCR, CR_TXSENDXOFF);
- mr0 &= ~MR0_SWFRX;
- portp->stats.rxxoff++;
- stl_sc26198wait(portp);
- stl_sc26198setreg(portp, MR0, mr0);
- }
- if (tty->termios->c_cflag & CRTSCTS) {
- stl_sc26198setreg(portp, MR1,
- (stl_sc26198getreg(portp, MR1) & ~MR1_AUTORTS));
- stl_sc26198setreg(portp, IOPIOR,
- (stl_sc26198getreg(portp, IOPIOR) & ~IOPR_RTS));
- portp->stats.rxrtsoff++;
- }
- }
-
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
- tty_kref_put(tty);
-}
-
-/*****************************************************************************/
-
-/*
- * Send a flow control character.
- */
-
-static void stl_sc26198sendflow(struct stlport *portp, int state)
-{
- struct tty_struct *tty;
- unsigned long flags;
- unsigned char mr0;
-
- pr_debug("stl_sc26198sendflow(portp=%p,state=%x)\n", portp, state);
-
- if (portp == NULL)
- return;
- tty = tty_port_tty_get(&portp->port);
- if (tty == NULL)
- return;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- if (state) {
- mr0 = stl_sc26198getreg(portp, MR0);
- stl_sc26198setreg(portp, MR0, (mr0 & ~MR0_SWFRXTX));
- stl_sc26198setreg(portp, SCCR, CR_TXSENDXON);
- mr0 |= MR0_SWFRX;
- portp->stats.rxxon++;
- stl_sc26198wait(portp);
- stl_sc26198setreg(portp, MR0, mr0);
- } else {
- mr0 = stl_sc26198getreg(portp, MR0);
- stl_sc26198setreg(portp, MR0, (mr0 & ~MR0_SWFRXTX));
- stl_sc26198setreg(portp, SCCR, CR_TXSENDXOFF);
- mr0 &= ~MR0_SWFRX;
- portp->stats.rxxoff++;
- stl_sc26198wait(portp);
- stl_sc26198setreg(portp, MR0, mr0);
- }
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
- tty_kref_put(tty);
-}
-
-/*****************************************************************************/
-
-static void stl_sc26198flush(struct stlport *portp)
-{
- unsigned long flags;
-
- pr_debug("stl_sc26198flush(portp=%p)\n", portp);
-
- if (portp == NULL)
- return;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- stl_sc26198setreg(portp, SCCR, CR_TXRESET);
- stl_sc26198setreg(portp, SCCR, portp->crenable);
- BRDDISABLE(portp->brdnr);
- portp->tx.tail = portp->tx.head;
- spin_unlock_irqrestore(&brd_lock, flags);
-}
-
-/*****************************************************************************/
-
-/*
- * Return the current state of data flow on this port. This is only
- * really interesting when determining if data has fully completed
- * transmission or not... The sc26198 interrupt scheme cannot
- * determine when all data has actually drained, so we need to
- * check the port statusy register to be sure.
- */
-
-static int stl_sc26198datastate(struct stlport *portp)
-{
- unsigned long flags;
- unsigned char sr;
-
- pr_debug("stl_sc26198datastate(portp=%p)\n", portp);
-
- if (portp == NULL)
- return 0;
- if (test_bit(ASYI_TXBUSY, &portp->istate))
- return 1;
-
- spin_lock_irqsave(&brd_lock, flags);
- BRDENABLE(portp->brdnr, portp->pagenr);
- sr = stl_sc26198getreg(portp, SR);
- BRDDISABLE(portp->brdnr);
- spin_unlock_irqrestore(&brd_lock, flags);
-
- return (sr & SR_TXEMPTY) ? 0 : 1;
-}
-
-/*****************************************************************************/
-
-/*
- * Delay for a small amount of time, to give the sc26198 a chance
- * to process a command...
- */
-
-static void stl_sc26198wait(struct stlport *portp)
-{
- int i;
-
- pr_debug("stl_sc26198wait(portp=%p)\n", portp);
-
- if (portp == NULL)
- return;
-
- for (i = 0; i < 20; i++)
- stl_sc26198getglobreg(portp, TSTR);
-}
-
-/*****************************************************************************/
-
-/*
- * If we are TX flow controlled and in IXANY mode then we may
- * need to unflow control here. We gotta do this because of the
- * automatic flow control modes of the sc26198.
- */
-
-static void stl_sc26198txunflow(struct stlport *portp, struct tty_struct *tty)
-{
- unsigned char mr0;
-
- mr0 = stl_sc26198getreg(portp, MR0);
- stl_sc26198setreg(portp, MR0, (mr0 & ~MR0_SWFRXTX));
- stl_sc26198setreg(portp, SCCR, CR_HOSTXON);
- stl_sc26198wait(portp);
- stl_sc26198setreg(portp, MR0, mr0);
- clear_bit(ASYI_TXFLOWED, &portp->istate);
-}
-
-/*****************************************************************************/
-
-/*
- * Interrupt service routine for sc26198 panels.
- */
-
-static void stl_sc26198intr(struct stlpanel *panelp, unsigned int iobase)
-{
- struct stlport *portp;
- unsigned int iack;
-
- spin_lock(&brd_lock);
-
-/*
- * Work around bug in sc26198 chip... Cannot have A6 address
- * line of UART high, else iack will be returned as 0.
- */
- outb(0, (iobase + 1));
-
- iack = inb(iobase + XP_IACK);
- portp = panelp->ports[(iack & IVR_CHANMASK) + ((iobase & 0x4) << 1)];
-
- if (iack & IVR_RXDATA)
- stl_sc26198rxisr(portp, iack);
- else if (iack & IVR_TXDATA)
- stl_sc26198txisr(portp);
- else
- stl_sc26198otherisr(portp, iack);
-
- spin_unlock(&brd_lock);
-}
-
-/*****************************************************************************/
-
-/*
- * Transmit interrupt handler. This has gotta be fast! Handling TX
- * chars is pretty simple, stuff as many as possible from the TX buffer
- * into the sc26198 FIFO.
- * In practice it is possible that interrupts are enabled but that the
- * port has been hung up. Need to handle not having any TX buffer here,
- * this is done by using the side effect that head and tail will also
- * be NULL if the buffer has been freed.
- */
-
-static void stl_sc26198txisr(struct stlport *portp)
-{
- struct tty_struct *tty;
- unsigned int ioaddr;
- unsigned char mr0;
- int len, stlen;
- char *head, *tail;
-
- pr_debug("stl_sc26198txisr(portp=%p)\n", portp);
-
- ioaddr = portp->ioaddr;
- head = portp->tx.head;
- tail = portp->tx.tail;
- len = (head >= tail) ? (head - tail) : (STL_TXBUFSIZE - (tail - head));
- if ((len == 0) || ((len < STL_TXBUFLOW) &&
- (test_bit(ASYI_TXLOW, &portp->istate) == 0))) {
- set_bit(ASYI_TXLOW, &portp->istate);
- tty = tty_port_tty_get(&portp->port);
- if (tty) {
- tty_wakeup(tty);
- tty_kref_put(tty);
- }
- }
-
- if (len == 0) {
- outb((MR0 | portp->uartaddr), (ioaddr + XP_ADDR));
- mr0 = inb(ioaddr + XP_DATA);
- if ((mr0 & MR0_TXMASK) == MR0_TXEMPTY) {
- portp->imr &= ~IR_TXRDY;
- outb((IMR | portp->uartaddr), (ioaddr + XP_ADDR));
- outb(portp->imr, (ioaddr + XP_DATA));
- clear_bit(ASYI_TXBUSY, &portp->istate);
- } else {
- mr0 |= ((mr0 & ~MR0_TXMASK) | MR0_TXEMPTY);
- outb(mr0, (ioaddr + XP_DATA));
- }
- } else {
- len = min(len, SC26198_TXFIFOSIZE);
- portp->stats.txtotal += len;
- stlen = min_t(unsigned int, len,
- (portp->tx.buf + STL_TXBUFSIZE) - tail);
- outb(GTXFIFO, (ioaddr + XP_ADDR));
- outsb((ioaddr + XP_DATA), tail, stlen);
- len -= stlen;
- tail += stlen;
- if (tail >= (portp->tx.buf + STL_TXBUFSIZE))
- tail = portp->tx.buf;
- if (len > 0) {
- outsb((ioaddr + XP_DATA), tail, len);
- tail += len;
- }
- portp->tx.tail = tail;
- }
-}
-
-/*****************************************************************************/
-
-/*
- * Receive character interrupt handler. Determine if we have good chars
- * or bad chars and then process appropriately. Good chars are easy
- * just shove the lot into the RX buffer and set all status byte to 0.
- * If a bad RX char then process as required. This routine needs to be
- * fast! In practice it is possible that we get an interrupt on a port
- * that is closed. This can happen on hangups - since they completely
- * shutdown a port not in user context. Need to handle this case.
- */
-
-static void stl_sc26198rxisr(struct stlport *portp, unsigned int iack)
-{
- struct tty_struct *tty;
- unsigned int len, buflen, ioaddr;
-
- pr_debug("stl_sc26198rxisr(portp=%p,iack=%x)\n", portp, iack);
-
- tty = tty_port_tty_get(&portp->port);
- ioaddr = portp->ioaddr;
- outb(GIBCR, (ioaddr + XP_ADDR));
- len = inb(ioaddr + XP_DATA) + 1;
-
- if ((iack & IVR_TYPEMASK) == IVR_RXDATA) {
- if (tty == NULL || (buflen = tty_buffer_request_room(tty, len)) == 0) {
- len = min_t(unsigned int, len, sizeof(stl_unwanted));
- outb(GRXFIFO, (ioaddr + XP_ADDR));
- insb((ioaddr + XP_DATA), &stl_unwanted[0], len);
- portp->stats.rxlost += len;
- portp->stats.rxtotal += len;
- } else {
- len = min(len, buflen);
- if (len > 0) {
- unsigned char *ptr;
- outb(GRXFIFO, (ioaddr + XP_ADDR));
- tty_prepare_flip_string(tty, &ptr, len);
- insb((ioaddr + XP_DATA), ptr, len);
- tty_schedule_flip(tty);
- portp->stats.rxtotal += len;
- }
- }
- } else {
- stl_sc26198rxbadchars(portp);
- }
-
-/*
- * If we are TX flow controlled and in IXANY mode then we may need
- * to unflow control here. We gotta do this because of the automatic
- * flow control modes of the sc26198.
- */
- if (test_bit(ASYI_TXFLOWED, &portp->istate)) {
- if ((tty != NULL) &&
- (tty->termios != NULL) &&
- (tty->termios->c_iflag & IXANY)) {
- stl_sc26198txunflow(portp, tty);
- }
- }
- tty_kref_put(tty);
-}
-
-/*****************************************************************************/
-
-/*
- * Process an RX bad character.
- */
-
-static void stl_sc26198rxbadch(struct stlport *portp, unsigned char status, char ch)
-{
- struct tty_struct *tty;
- unsigned int ioaddr;
-
- tty = tty_port_tty_get(&portp->port);
- ioaddr = portp->ioaddr;
-
- if (status & SR_RXPARITY)
- portp->stats.rxparity++;
- if (status & SR_RXFRAMING)
- portp->stats.rxframing++;
- if (status & SR_RXOVERRUN)
- portp->stats.rxoverrun++;
- if (status & SR_RXBREAK)
- portp->stats.rxbreaks++;
-
- if ((tty != NULL) &&
- ((portp->rxignoremsk & status) == 0)) {
- if (portp->rxmarkmsk & status) {
- if (status & SR_RXBREAK) {
- status = TTY_BREAK;
- if (portp->port.flags & ASYNC_SAK) {
- do_SAK(tty);
- BRDENABLE(portp->brdnr, portp->pagenr);
- }
- } else if (status & SR_RXPARITY)
- status = TTY_PARITY;
- else if (status & SR_RXFRAMING)
- status = TTY_FRAME;
- else if(status & SR_RXOVERRUN)
- status = TTY_OVERRUN;
- else
- status = 0;
- } else
- status = 0;
-
- tty_insert_flip_char(tty, ch, status);
- tty_schedule_flip(tty);
-
- if (status == 0)
- portp->stats.rxtotal++;
- }
- tty_kref_put(tty);
-}
-
-/*****************************************************************************/
-
-/*
- * Process all characters in the RX FIFO of the UART. Check all char
- * status bytes as well, and process as required. We need to check
- * all bytes in the FIFO, in case some more enter the FIFO while we
- * are here. To get the exact character error type we need to switch
- * into CHAR error mode (that is why we need to make sure we empty
- * the FIFO).
- */
-
-static void stl_sc26198rxbadchars(struct stlport *portp)
-{
- unsigned char status, mr1;
- char ch;
-
-/*
- * To get the precise error type for each character we must switch
- * back into CHAR error mode.
- */
- mr1 = stl_sc26198getreg(portp, MR1);
- stl_sc26198setreg(portp, MR1, (mr1 & ~MR1_ERRBLOCK));
-
- while ((status = stl_sc26198getreg(portp, SR)) & SR_RXRDY) {
- stl_sc26198setreg(portp, SCCR, CR_CLEARRXERR);
- ch = stl_sc26198getreg(portp, RXFIFO);
- stl_sc26198rxbadch(portp, status, ch);
- }
-
-/*
- * To get correct interrupt class we must switch back into BLOCK
- * error mode.
- */
- stl_sc26198setreg(portp, MR1, mr1);
-}
-
-/*****************************************************************************/
-
-/*
- * Other interrupt handler. This includes modem signals, flow
- * control actions, etc. Most stuff is left to off-level interrupt
- * processing time.
- */
-
-static void stl_sc26198otherisr(struct stlport *portp, unsigned int iack)
-{
- unsigned char cir, ipr, xisr;
-
- pr_debug("stl_sc26198otherisr(portp=%p,iack=%x)\n", portp, iack);
-
- cir = stl_sc26198getglobreg(portp, CIR);
-
- switch (cir & CIR_SUBTYPEMASK) {
- case CIR_SUBCOS:
- ipr = stl_sc26198getreg(portp, IPR);
- if (ipr & IPR_DCDCHANGE) {
- stl_cd_change(portp);
- portp->stats.modem++;
- }
- break;
- case CIR_SUBXONXOFF:
- xisr = stl_sc26198getreg(portp, XISR);
- if (xisr & XISR_RXXONGOT) {
- set_bit(ASYI_TXFLOWED, &portp->istate);
- portp->stats.txxoff++;
- }
- if (xisr & XISR_RXXOFFGOT) {
- clear_bit(ASYI_TXFLOWED, &portp->istate);
- portp->stats.txxon++;
- }
- break;
- case CIR_SUBBREAK:
- stl_sc26198setreg(portp, SCCR, CR_BREAKRESET);
- stl_sc26198rxbadchars(portp);
- break;
- default:
- break;
- }
-}
-
-static void stl_free_isabrds(void)
-{
- struct stlbrd *brdp;
- unsigned int i;
-
- for (i = 0; i < stl_nrbrds; i++) {
- if ((brdp = stl_brds[i]) == NULL || (brdp->state & STL_PROBED))
- continue;
-
- free_irq(brdp->irq, brdp);
-
- stl_cleanup_panels(brdp);
-
- release_region(brdp->ioaddr1, brdp->iosize1);
- if (brdp->iosize2 > 0)
- release_region(brdp->ioaddr2, brdp->iosize2);
-
- kfree(brdp);
- stl_brds[i] = NULL;
- }
-}
-
-/*
- * Loadable module initialization stuff.
- */
-static int __init stallion_module_init(void)
-{
- struct stlbrd *brdp;
- struct stlconf conf;
- unsigned int i, j;
- int retval;
-
- printk(KERN_INFO "%s: version %s\n", stl_drvtitle, stl_drvversion);
-
- spin_lock_init(&stallion_lock);
- spin_lock_init(&brd_lock);
-
- stl_serial = alloc_tty_driver(STL_MAXBRDS * STL_MAXPORTS);
- if (!stl_serial) {
- retval = -ENOMEM;
- goto err;
- }
-
- stl_serial->owner = THIS_MODULE;
- stl_serial->driver_name = stl_drvname;
- stl_serial->name = "ttyE";
- stl_serial->major = STL_SERIALMAJOR;
- stl_serial->minor_start = 0;
- stl_serial->type = TTY_DRIVER_TYPE_SERIAL;
- stl_serial->subtype = SERIAL_TYPE_NORMAL;
- stl_serial->init_termios = stl_deftermios;
- stl_serial->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
- tty_set_operations(stl_serial, &stl_ops);
-
- retval = tty_register_driver(stl_serial);
- if (retval) {
- printk("STALLION: failed to register serial driver\n");
- goto err_frtty;
- }
-
-/*
- * Find any dynamically supported boards. That is via module load
- * line options.
- */
- for (i = stl_nrbrds; i < stl_nargs; i++) {
- memset(&conf, 0, sizeof(conf));
- if (stl_parsebrd(&conf, stl_brdsp[i]) == 0)
- continue;
- if ((brdp = stl_allocbrd()) == NULL)
- continue;
- brdp->brdnr = i;
- brdp->brdtype = conf.brdtype;
- brdp->ioaddr1 = conf.ioaddr1;
- brdp->ioaddr2 = conf.ioaddr2;
- brdp->irq = conf.irq;
- brdp->irqtype = conf.irqtype;
- stl_brds[brdp->brdnr] = brdp;
- if (stl_brdinit(brdp)) {
- stl_brds[brdp->brdnr] = NULL;
- kfree(brdp);
- } else {
- for (j = 0; j < brdp->nrports; j++)
- tty_register_device(stl_serial,
- brdp->brdnr * STL_MAXPORTS + j, NULL);
- stl_nrbrds = i + 1;
- }
- }
-
- /* this has to be _after_ isa finding because of locking */
- retval = pci_register_driver(&stl_pcidriver);
- if (retval && stl_nrbrds == 0) {
- printk(KERN_ERR "STALLION: can't register pci driver\n");
- goto err_unrtty;
- }
-
-/*
- * Set up a character driver for per board stuff. This is mainly used
- * to do stats ioctls on the ports.
- */
- if (register_chrdev(STL_SIOMEMMAJOR, "staliomem", &stl_fsiomem))
- printk("STALLION: failed to register serial board device\n");
-
- stallion_class = class_create(THIS_MODULE, "staliomem");
- if (IS_ERR(stallion_class))
- printk("STALLION: failed to create class\n");
- for (i = 0; i < 4; i++)
- device_create(stallion_class, NULL, MKDEV(STL_SIOMEMMAJOR, i),
- NULL, "staliomem%d", i);
-
- return 0;
-err_unrtty:
- tty_unregister_driver(stl_serial);
-err_frtty:
- put_tty_driver(stl_serial);
-err:
- return retval;
-}
-
-static void __exit stallion_module_exit(void)
-{
- struct stlbrd *brdp;
- unsigned int i, j;
-
- pr_debug("cleanup_module()\n");
-
- printk(KERN_INFO "Unloading %s: version %s\n", stl_drvtitle,
- stl_drvversion);
-
-/*
- * Free up all allocated resources used by the ports. This includes
- * memory and interrupts. As part of this process we will also do
- * a hangup on every open port - to try to flush out any processes
- * hanging onto ports.
- */
- for (i = 0; i < stl_nrbrds; i++) {
- if ((brdp = stl_brds[i]) == NULL || (brdp->state & STL_PROBED))
- continue;
- for (j = 0; j < brdp->nrports; j++)
- tty_unregister_device(stl_serial,
- brdp->brdnr * STL_MAXPORTS + j);
- }
-
- for (i = 0; i < 4; i++)
- device_destroy(stallion_class, MKDEV(STL_SIOMEMMAJOR, i));
- unregister_chrdev(STL_SIOMEMMAJOR, "staliomem");
- class_destroy(stallion_class);
-
- pci_unregister_driver(&stl_pcidriver);
-
- stl_free_isabrds();
-
- tty_unregister_driver(stl_serial);
- put_tty_driver(stl_serial);
-}
-
-module_init(stallion_module_init);
-module_exit(stallion_module_exit);
-
-MODULE_AUTHOR("Greg Ungerer");
-MODULE_DESCRIPTION("Stallion Multiport Serial Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/usbip/README b/drivers/staging/usbip/README
index c11be573548..41a2cf2e77a 100644
--- a/drivers/staging/usbip/README
+++ b/drivers/staging/usbip/README
@@ -2,5 +2,6 @@ TODO:
- more discussion about the protocol
- testing
- review of the userspace interface
+ - document the protocol
Please send patches for this code to Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index 6592aa2ad15..132adc57ebc 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -76,7 +76,9 @@ struct stub_unlink {
__u32 status;
};
-#define BUSID_SIZE 20
+/* same as SYSFS_BUS_ID_SIZE */
+#define BUSID_SIZE 32
+
struct bus_id_priv {
char name[BUSID_SIZE];
char status;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 8cbea42b69b..fce22f2bd8b 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -23,16 +23,10 @@
#include "usbip_common.h"
#include "stub.h"
-static int stub_probe(struct usb_interface *interface,
- const struct usb_device_id *id);
-static void stub_disconnect(struct usb_interface *interface);
-static int stub_pre_reset(struct usb_interface *interface);
-static int stub_post_reset(struct usb_interface *interface);
-
/*
* Define device IDs here if you want to explicitly limit exportable devices.
- * In the most cases, wild card matching will be ok because driver binding can
- * be changed dynamically by a userland program.
+ * In most cases, wildcard matching will be okay because driver binding can be
+ * changed dynamically by a userland program.
*/
static struct usb_device_id stub_table[] = {
#if 0
@@ -56,18 +50,9 @@ static struct usb_device_id stub_table[] = {
};
MODULE_DEVICE_TABLE(usb, stub_table);
-struct usb_driver stub_driver = {
- .name = "usbip",
- .probe = stub_probe,
- .disconnect = stub_disconnect,
- .id_table = stub_table,
- .pre_reset = stub_pre_reset,
- .post_reset = stub_post_reset,
-};
-
/*
- * usbip_status shows status of usbip as long as this driver is bound to the
- * target device.
+ * usbip_status shows the status of usbip-host as long as this driver is bound
+ * to the target device.
*/
static ssize_t show_status(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -211,10 +196,11 @@ static void stub_shutdown_connection(struct usbip_device *ud)
if (ud->tcp_tx && !task_is_dead(ud->tcp_tx))
kthread_stop(ud->tcp_tx);
- /* 2. close the socket */
/*
- * tcp_socket is freed after threads are killed.
- * So usbip_xmit do not touch NULL socket.
+ * 2. close the socket
+ *
+ * tcp_socket is freed after threads are killed so that usbip_xmit does
+ * not touch NULL socket.
*/
if (ud->tcp_socket) {
sock_release(ud->tcp_socket);
@@ -234,8 +220,8 @@ static void stub_shutdown_connection(struct usbip_device *ud)
list_del(&unlink->list);
kfree(unlink);
}
- list_for_each_entry_safe(unlink, tmp,
- &sdev->unlink_free, list) {
+ list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free,
+ list) {
list_del(&unlink->list);
kfree(unlink);
}
@@ -262,22 +248,17 @@ static void stub_device_reset(struct usbip_device *ud)
/* try to reset the device */
ret = usb_reset_device(udev);
-
usb_unlock_device(udev);
spin_lock(&ud->lock);
if (ret) {
dev_err(&udev->dev, "device reset\n");
ud->status = SDEV_ST_ERROR;
-
} else {
dev_info(&udev->dev, "device reset\n");
ud->status = SDEV_ST_AVAILABLE;
-
}
spin_unlock(&ud->lock);
-
- return;
}
static void stub_device_unusable(struct usbip_device *ud)
@@ -379,7 +360,7 @@ static int stub_probe(struct usb_interface *interface,
/* check we should claim or not by busid_table */
busid_priv = get_busid_priv(udev_busid);
- if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) ||
+ if (!busid_priv || (busid_priv->status == STUB_BUSID_REMOV) ||
(busid_priv->status == STUB_BUSID_OTHER)) {
dev_info(&interface->dev, "%s is not in match_busid table... "
"skip!\n", udev_busid);
@@ -424,7 +405,6 @@ static int stub_probe(struct usb_interface *interface,
udev_busid);
usb_set_intfdata(interface, NULL);
busid_priv->interf_count--;
-
return err;
}
@@ -432,7 +412,7 @@ static int stub_probe(struct usb_interface *interface,
return 0;
}
- /* ok. this is my device. */
+ /* ok, this is my device */
sdev = stub_device_alloc(udev, interface);
if (!sdev)
return -ENOMEM;
@@ -447,7 +427,6 @@ static int stub_probe(struct usb_interface *interface,
/* set private data to usb_interface */
usb_set_intfdata(interface, sdev);
busid_priv->interf_count++;
-
busid_priv->sdev = sdev;
err = stub_add_files(&interface->dev);
@@ -457,7 +436,6 @@ static int stub_probe(struct usb_interface *interface,
usb_put_intf(interface);
busid_priv->interf_count = 0;
-
busid_priv->sdev = NULL;
stub_device_free(sdev);
return err;
@@ -562,3 +540,12 @@ int stub_post_reset(struct usb_interface *interface)
dev_dbg(&interface->dev, "post_reset\n");
return 0;
}
+
+struct usb_driver stub_driver = {
+ .name = "usbip-host",
+ .probe = stub_probe,
+ .disconnect = stub_disconnect,
+ .id_table = stub_table,
+ .pre_reset = stub_pre_reset,
+ .post_reset = stub_post_reset,
+ };
diff --git a/drivers/staging/usbip/stub_main.c b/drivers/staging/usbip/stub_main.c
index e9085d66394..a34249a9cb6 100644
--- a/drivers/staging/usbip/stub_main.c
+++ b/drivers/staging/usbip/stub_main.c
@@ -25,9 +25,7 @@
#define DRIVER_AUTHOR "Takahiro Hirofuchi"
#define DRIVER_DESC "USB/IP Host Driver"
-/* stub_priv is allocated from stub_priv_cache */
struct kmem_cache *stub_priv_cache;
-
/*
* busid_tables defines matching busids that usbip can grab. A user can change
* dynamically what device is locally used and what device is exported to a
@@ -37,70 +35,60 @@ struct kmem_cache *stub_priv_cache;
static struct bus_id_priv busid_table[MAX_BUSID];
static spinlock_t busid_table_lock;
-int match_busid(const char *busid)
+static void init_busid_table(void)
{
int i;
- spin_lock(&busid_table_lock);
-
+ memset(busid_table, 0, sizeof(busid_table));
for (i = 0; i < MAX_BUSID; i++)
- if (busid_table[i].name[0])
- if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
- /* already registerd */
- spin_unlock(&busid_table_lock);
- return 0;
- }
-
- spin_unlock(&busid_table_lock);
+ busid_table[i].status = STUB_BUSID_OTHER;
- return 1;
+ spin_lock_init(&busid_table_lock);
}
-struct bus_id_priv *get_busid_priv(const char *busid)
+/*
+ * Find the index of the busid by name.
+ * Must be called with busid_table_lock held.
+ */
+static int get_busid_idx(const char *busid)
{
int i;
-
- spin_lock(&busid_table_lock);
+ int idx = -1;
for (i = 0; i < MAX_BUSID; i++)
if (busid_table[i].name[0])
if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
- /* already registerd */
- spin_unlock(&busid_table_lock);
- return &(busid_table[i]);
+ idx = i;
+ break;
}
-
- spin_unlock(&busid_table_lock);
-
- return NULL;
+ return idx;
}
-static ssize_t show_match_busid(struct device_driver *drv, char *buf)
+struct bus_id_priv *get_busid_priv(const char *busid)
{
- int i;
- char *out = buf;
+ int idx;
+ struct bus_id_priv *bid = NULL;
spin_lock(&busid_table_lock);
-
- for (i = 0; i < MAX_BUSID; i++)
- if (busid_table[i].name[0])
- out += sprintf(out, "%s ", busid_table[i].name);
-
+ idx = get_busid_idx(busid);
+ if (idx >= 0)
+ bid = &(busid_table[idx]);
spin_unlock(&busid_table_lock);
- out += sprintf(out, "\n");
-
- return out - buf;
+ return bid;
}
static int add_match_busid(char *busid)
{
int i;
-
- if (!match_busid(busid))
- return 0;
+ int ret = -1;
spin_lock(&busid_table_lock);
+ /* already registered? */
+ if (get_busid_idx(busid) >= 0) {
+ ret = 0;
+ goto out;
+ }
for (i = 0; i < MAX_BUSID; i++)
if (!busid_table[i].name[0]) {
@@ -108,52 +96,55 @@ static int add_match_busid(char *busid)
if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
(busid_table[i].status != STUB_BUSID_REMOV))
busid_table[i].status = STUB_BUSID_ADDED;
- spin_unlock(&busid_table_lock);
- return 0;
+ ret = 0;
+ break;
}
+out:
spin_unlock(&busid_table_lock);
- return -1;
+ return ret;
}
int del_match_busid(char *busid)
{
- int i;
+ int idx;
+ int ret = -1;
spin_lock(&busid_table_lock);
+ idx = get_busid_idx(busid);
+ if (idx < 0)
+ goto out;
- for (i = 0; i < MAX_BUSID; i++)
- if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
- /* found */
- if (busid_table[i].status == STUB_BUSID_OTHER)
- memset(busid_table[i].name, 0, BUSID_SIZE);
- if ((busid_table[i].status != STUB_BUSID_OTHER) &&
- (busid_table[i].status != STUB_BUSID_ADDED)) {
- busid_table[i].status = STUB_BUSID_REMOV;
- }
- spin_unlock(&busid_table_lock);
- return 0;
- }
+ /* found */
+ ret = 0;
+
+ if (busid_table[idx].status == STUB_BUSID_OTHER)
+ memset(busid_table[idx].name, 0, BUSID_SIZE);
+
+ if ((busid_table[idx].status != STUB_BUSID_OTHER) &&
+ (busid_table[idx].status != STUB_BUSID_ADDED))
+ busid_table[idx].status = STUB_BUSID_REMOV;
+out:
spin_unlock(&busid_table_lock);
- return -1;
+ return ret;
}
-static void init_busid_table(void)
+static ssize_t show_match_busid(struct device_driver *drv, char *buf)
{
int i;
+ char *out = buf;
- for (i = 0; i < MAX_BUSID; i++) {
- memset(busid_table[i].name, 0, BUSID_SIZE);
- busid_table[i].status = STUB_BUSID_OTHER;
- busid_table[i].interf_count = 0;
- busid_table[i].sdev = NULL;
- busid_table[i].shutdown_busid = 0;
- }
+ spin_lock(&busid_table_lock);
+ for (i = 0; i < MAX_BUSID; i++)
+ if (busid_table[i].name[0])
+ out += sprintf(out, "%s ", busid_table[i].name);
+ spin_unlock(&busid_table_lock);
+ out += sprintf(out, "\n");
- spin_lock_init(&busid_table_lock);
+ return out - buf;
}
static ssize_t store_match_busid(struct device_driver *dev, const char *buf,
@@ -175,23 +166,24 @@ static ssize_t store_match_busid(struct device_driver *dev, const char *buf,
strncpy(busid, buf + 4, BUSID_SIZE);
if (!strncmp(buf, "add ", 4)) {
- if (add_match_busid(busid) < 0)
+ if (add_match_busid(busid) < 0) {
return -ENOMEM;
- else {
+ } else {
pr_debug("add busid %s\n", busid);
return count;
}
} else if (!strncmp(buf, "del ", 4)) {
- if (del_match_busid(busid) < 0)
+ if (del_match_busid(busid) < 0) {
return -ENODEV;
- else {
+ } else {
pr_debug("del busid %s\n", busid);
return count;
}
- } else
+ } else {
return -EINVAL;
+ }
}
-static DRIVER_ATTR(match_busid, S_IRUSR|S_IWUSR, show_match_busid,
+static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid,
store_match_busid);
static struct stub_priv *stub_priv_pop_from_listhead(struct list_head *listhead)
@@ -214,36 +206,30 @@ static struct stub_priv *stub_priv_pop(struct stub_device *sdev)
spin_lock_irqsave(&sdev->priv_lock, flags);
priv = stub_priv_pop_from_listhead(&sdev->priv_init);
- if (priv) {
- spin_unlock_irqrestore(&sdev->priv_lock, flags);
- return priv;
- }
+ if (priv)
+ goto done;
priv = stub_priv_pop_from_listhead(&sdev->priv_tx);
- if (priv) {
- spin_unlock_irqrestore(&sdev->priv_lock, flags);
- return priv;
- }
+ if (priv)
+ goto done;
priv = stub_priv_pop_from_listhead(&sdev->priv_free);
- if (priv) {
- spin_unlock_irqrestore(&sdev->priv_lock, flags);
- return priv;
- }
+done:
spin_unlock_irqrestore(&sdev->priv_lock, flags);
- return NULL;
+
+ return priv;
}
void stub_device_cleanup_urbs(struct stub_device *sdev)
{
struct stub_priv *priv;
+ struct urb *urb;
dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
while ((priv = stub_priv_pop(sdev))) {
- struct urb *urb = priv->urb;
-
+ urb = priv->urb;
dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
usb_kill_urb(urb);
@@ -251,51 +237,46 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
kfree(urb->transfer_buffer);
kfree(urb->setup_packet);
-
usb_free_urb(urb);
}
}
-static int __init usb_stub_init(void)
+static int __init usbip_host_init(void)
{
int ret;
- stub_priv_cache = kmem_cache_create("stub_priv",
- sizeof(struct stub_priv), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ stub_priv_cache = KMEM_CACHE(stub_priv, SLAB_HWCACHE_ALIGN);
if (!stub_priv_cache) {
- pr_err("create stub_priv_cache error\n");
+ pr_err("kmem_cache_create failed\n");
return -ENOMEM;
}
ret = usb_register(&stub_driver);
- if (ret) {
+ if (ret < 0) {
pr_err("usb_register failed %d\n", ret);
- goto error_usb_register;
+ goto err_usb_register;
}
- pr_info(DRIVER_DESC " " USBIP_VERSION "\n");
-
- init_busid_table();
-
ret = driver_create_file(&stub_driver.drvwrap.driver,
&driver_attr_match_busid);
-
- if (ret) {
- pr_err("create driver sysfs\n");
- goto error_create_file;
+ if (ret < 0) {
+ pr_err("driver_create_file failed\n");
+ goto err_create_file;
}
+ init_busid_table();
+ pr_info(DRIVER_DESC " v" USBIP_VERSION "\n");
return ret;
-error_create_file:
+
+err_create_file:
usb_deregister(&stub_driver);
-error_usb_register:
+err_usb_register:
kmem_cache_destroy(stub_priv_cache);
return ret;
}
-static void __exit usb_stub_exit(void)
+static void __exit usbip_host_exit(void)
{
driver_remove_file(&stub_driver.drvwrap.driver,
&driver_attr_match_busid);
@@ -309,8 +290,8 @@ static void __exit usb_stub_exit(void)
kmem_cache_destroy(stub_priv_cache);
}
-module_init(usb_stub_init);
-module_exit(usb_stub_exit);
+module_init(usbip_host_init);
+module_exit(usbip_host_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index bc57844600b..538fb9ee341 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -306,18 +306,18 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
{
struct usbip_device *ud = &sdev->ud;
+ int valid = 0;
if (pdu->base.devid == sdev->devid) {
spin_lock(&ud->lock);
if (ud->status == SDEV_ST_USED) {
/* A request is valid. */
- spin_unlock(&ud->lock);
- return 1;
+ valid = 1;
}
spin_unlock(&ud->lock);
}
- return 0;
+ return valid;
}
static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
diff --git a/drivers/staging/usbip/stub_tx.c b/drivers/staging/usbip/stub_tx.c
index fda2bc95e85..023fda305be 100644
--- a/drivers/staging/usbip/stub_tx.c
+++ b/drivers/staging/usbip/stub_tx.c
@@ -97,13 +97,12 @@ void stub_complete(struct urb *urb)
/* link a urb to the queue of tx. */
spin_lock_irqsave(&sdev->priv_lock, flags);
-
if (priv->unlinking) {
stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
stub_free_priv_and_urb(priv);
- } else
+ } else {
list_move_tail(&priv->list, &sdev->priv_tx);
-
+ }
spin_unlock_irqrestore(&sdev->priv_lock, flags);
/* wake up tx_thread */
@@ -113,10 +112,10 @@ void stub_complete(struct urb *urb)
static inline void setup_base_pdu(struct usbip_header_basic *base,
__u32 command, __u32 seqnum)
{
- base->command = command;
- base->seqnum = seqnum;
- base->devid = 0;
- base->ep = 0;
+ base->command = command;
+ base->seqnum = seqnum;
+ base->devid = 0;
+ base->ep = 0;
base->direction = 0;
}
@@ -232,7 +231,7 @@ static int stub_send_ret_submit(struct stub_device *sdev)
if (txsize != sizeof(pdu_header) + urb->actual_length) {
dev_err(&sdev->interface->dev,
"actual length of urb %d does not "
- "match iso packet sizes %lu\n",
+ "match iso packet sizes %zu\n",
urb->actual_length,
txsize-sizeof(pdu_header));
kfree(iov);
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 433a3b6207d..f4b53d103c5 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -63,9 +63,9 @@ static void usbip_dump_buffer(char *buff, int bufflen)
static void usbip_dump_pipe(unsigned int p)
{
unsigned char type = usb_pipetype(p);
- unsigned char ep = usb_pipeendpoint(p);
- unsigned char dev = usb_pipedevice(p);
- unsigned char dir = usb_pipein(p);
+ unsigned char ep = usb_pipeendpoint(p);
+ unsigned char dev = usb_pipedevice(p);
+ unsigned char dir = usb_pipein(p);
pr_debug("dev(%d) ep(%d) [%s] ", dev, ep, dir ? "IN" : "OUT");
@@ -204,7 +204,7 @@ static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd)
pr_debug("CLEAR_FEAT\n");
break;
case USB_REQ_SET_FEATURE:
- pr_debug("SET_FEAT \n");
+ pr_debug("SET_FEAT\n");
break;
case USB_REQ_SET_ADDRESS:
pr_debug("SET_ADDRRS\n");
@@ -231,14 +231,14 @@ static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd)
pr_debug("SYNC_FRAME\n");
break;
default:
- pr_debug("REQ(%02X) \n", cmd->bRequest);
+ pr_debug("REQ(%02X)\n", cmd->bRequest);
break;
}
usbip_dump_request_type(cmd->bRequestType);
} else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
- pr_debug("CLASS \n");
+ pr_debug("CLASS\n");
} else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
- pr_debug("VENDOR \n");
+ pr_debug("VENDOR\n");
} else if ((cmd->bRequestType & USB_TYPE_MASK) == USB_TYPE_RESERVED) {
pr_debug("RESERVED\n");
}
@@ -334,8 +334,8 @@ void usbip_dump_header(struct usbip_header *pdu)
EXPORT_SYMBOL_GPL(usbip_dump_header);
/* Send/receive messages over TCP/IP. I refer drivers/block/nbd.c */
-int usbip_xmit(int send, struct socket *sock, char *buf,
- int size, int msg_flags)
+int usbip_xmit(int send, struct socket *sock, char *buf, int size,
+ int msg_flags)
{
int result;
struct msghdr msg;
@@ -627,9 +627,8 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send)
}
EXPORT_SYMBOL_GPL(usbip_header_correct_endian);
-static void usbip_iso_pakcet_correct_endian(
- struct usbip_iso_packet_descriptor *iso,
- int send)
+static void usbip_iso_packet_correct_endian(
+ struct usbip_iso_packet_descriptor *iso, int send)
{
/* does not need all members. but copy all simply. */
if (send) {
@@ -678,7 +677,7 @@ void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen)
iso = buff + (i * sizeof(*iso));
usbip_pack_iso(iso, &urb->iso_frame_desc[i], 1);
- usbip_iso_pakcet_correct_endian(iso, 1);
+ usbip_iso_packet_correct_endian(iso, 1);
}
*bufflen = size;
@@ -729,7 +728,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
for (i = 0; i < np; i++) {
iso = buff + (i * sizeof(*iso));
- usbip_iso_pakcet_correct_endian(iso, 0);
+ usbip_iso_packet_correct_endian(iso, 0);
usbip_pack_iso(iso, &urb->iso_frame_desc[i], 0);
total_length += urb->iso_frame_desc[i].actual_length;
}
@@ -839,19 +838,19 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
}
EXPORT_SYMBOL_GPL(usbip_recv_xbuff);
-static int __init usbip_common_init(void)
+static int __init usbip_core_init(void)
{
pr_info(DRIVER_DESC " v" USBIP_VERSION "\n");
return 0;
}
-static void __exit usbip_common_exit(void)
+static void __exit usbip_core_exit(void)
{
return;
}
-module_init(usbip_common_init);
-module_exit(usbip_common_exit);
+module_init(usbip_core_init);
+module_exit(usbip_core_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index 4a641c552b7..074ac4267d3 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -65,7 +65,7 @@ enum {
#define usbip_dbg_flag_vhci_tx (usbip_debug_flag & usbip_debug_vhci_tx)
#define usbip_dbg_flag_stub_rx (usbip_debug_flag & usbip_debug_stub_rx)
#define usbip_dbg_flag_stub_tx (usbip_debug_flag & usbip_debug_stub_tx)
-#define usbip_dbg_flag_vhci_sysfs (usbip_debug_flag & usbip_debug_vhci_sysfs)
+#define usbip_dbg_flag_vhci_sysfs (usbip_debug_flag & usbip_debug_vhci_sysfs)
extern unsigned long usbip_debug_flag;
extern struct device_attribute dev_attr_usbip_debug;
@@ -104,111 +104,110 @@ extern struct device_attribute dev_attr_usbip_debug;
usbip_dbg_with_flag(usbip_debug_stub_tx, fmt , ##args)
/*
- * USB/IP request headers.
- * Currently, we define 4 request types:
+ * USB/IP request headers
*
- * - CMD_SUBMIT transfers a USB request, corresponding to usb_submit_urb().
- * (client to server)
- * - RET_RETURN transfers the result of CMD_SUBMIT.
- * (server to client)
- * - CMD_UNLINK transfers an unlink request of a pending USB request.
+ * Each request is transferred across the network to its counterpart, which
+ * facilitates the normal USB communication. The values contained in the headers
+ * are basically the same as in a URB. Currently, four request types are
+ * defined:
+ *
+ * - USBIP_CMD_SUBMIT: a USB request block, corresponds to usb_submit_urb()
* (client to server)
- * - RET_UNLINK transfers the result of CMD_UNLINK.
+ *
+ * - USBIP_RET_SUBMIT: the result of USBIP_CMD_SUBMIT
* (server to client)
*
- * Note: The below request formats are based on the USB subsystem of Linux. Its
- * details will be defined when other implementations come.
+ * - USBIP_CMD_UNLINK: an unlink request of a pending USBIP_CMD_SUBMIT,
+ * corresponds to usb_unlink_urb()
+ * (client to server)
*
+ * - USBIP_RET_UNLINK: the result of USBIP_CMD_UNLINK
+ * (server to client)
*
*/
+#define USBIP_CMD_SUBMIT 0x0001
+#define USBIP_RET_SUBMIT 0x0002
+#define USBIP_CMD_UNLINK 0x0003
+#define USBIP_RET_UNLINK 0x0004
-/*
- * A basic header followed by other additional headers.
+#define USBIP_DIR_IN 0x00
+#define USBIP_DIR_OUT 0x01
+
+/**
+ * struct usbip_header_basic - data pertinent to every request
+ * @command: the usbip request type
+ * @seqnum: sequential number that identifies requests; incremented per
+ * connection
+ * @devid: specifies a remote USB device uniquely instead of busnum and devnum;
+ * in the stub driver, this value is ((busnum << 16) | devnum)
+ * @direction: direction of the transfer
+ * @ep: endpoint number
*/
struct usbip_header_basic {
-#define USBIP_CMD_SUBMIT 0x0001
-#define USBIP_CMD_UNLINK 0x0002
-#define USBIP_RET_SUBMIT 0x0003
-#define USBIP_RET_UNLINK 0x0004
__u32 command;
-
- /* sequential number which identifies requests.
- * incremented per connections */
__u32 seqnum;
-
- /* devid is used to specify a remote USB device uniquely instead
- * of busnum and devnum in Linux. In the case of Linux stub_driver,
- * this value is ((busnum << 16) | devnum) */
__u32 devid;
-
-#define USBIP_DIR_OUT 0
-#define USBIP_DIR_IN 1
__u32 direction;
- __u32 ep; /* endpoint number */
+ __u32 ep;
} __packed;
-/*
- * An additional header for a CMD_SUBMIT packet.
+/**
+ * struct usbip_header_cmd_submit - USBIP_CMD_SUBMIT packet header
+ * @transfer_flags: URB flags
+ * @transfer_buffer_length: the data size for (in) or (out) transfer
+ * @start_frame: initial frame for isochronous or interrupt transfers
+ * @number_of_packets: number of isochronous packets
+ * @interval: maximum time for the request on the server-side host controller
+ * @setup: setup data for a control request
*/
struct usbip_header_cmd_submit {
- /* these values are basically the same as in a URB. */
-
- /* the same in a URB. */
__u32 transfer_flags;
-
- /* set the following data size (out),
- * or expected reading data size (in) */
__s32 transfer_buffer_length;
/* it is difficult for usbip to sync frames (reserved only?) */
__s32 start_frame;
-
- /* the number of iso descriptors that follows this header */
__s32 number_of_packets;
-
- /* the maximum time within which this request works in a host
- * controller of a server side */
__s32 interval;
- /* set setup packet data for a CTRL request */
unsigned char setup[8];
} __packed;
-/*
- * An additional header for a RET_SUBMIT packet.
+/**
+ * struct usbip_header_ret_submit - USBIP_RET_SUBMIT packet header
+ * @status: return status of a non-iso request
+ * @actual_length: number of bytes transferred
+ * @start_frame: initial frame for isochronous or interrupt transfers
+ * @number_of_packets: number of isochronous packets
+ * @error_count: number of errors for isochronous transfers
*/
struct usbip_header_ret_submit {
__s32 status;
- __s32 actual_length; /* returned data length */
- __s32 start_frame; /* ISO and INT */
- __s32 number_of_packets; /* ISO only */
- __s32 error_count; /* ISO only */
+ __s32 actual_length;
+ __s32 start_frame;
+ __s32 number_of_packets;
+ __s32 error_count;
} __packed;
-/*
- * An additional header for a CMD_UNLINK packet.
+/**
+ * struct usbip_header_cmd_unlink - USBIP_CMD_UNLINK packet header
+ * @seqnum: the URB seqnum to unlink
*/
struct usbip_header_cmd_unlink {
- __u32 seqnum; /* URB's seqnum that will be unlinked */
+ __u32 seqnum;
} __packed;
-/*
- * An additional header for a RET_UNLINK packet.
+/**
+ * struct usbip_header_ret_unlink - USBIP_RET_UNLINK packet header
+ * @status: return status of the request
*/
struct usbip_header_ret_unlink {
__s32 status;
} __packed;
-/* the same as usb_iso_packet_descriptor but packed for pdu */
-struct usbip_iso_packet_descriptor {
- __u32 offset;
- __u32 length; /* expected length */
- __u32 actual_length;
- __u32 status;
-} __packed;
-
-/*
- * All usbip packets use a common header to keep code simple.
+/**
+ * struct usbip_header - common header for all usbip packets
+ * @base: the basic header
+ * @u: packet type dependent header
*/
struct usbip_header {
struct usbip_header_basic base;
@@ -221,40 +220,15 @@ struct usbip_header {
} u;
} __packed;
-int usbip_xmit(int, struct socket *, char *, int, int);
-int usbip_sendmsg(struct socket *, struct msghdr *, int);
-
-static inline int interface_to_busnum(struct usb_interface *interface)
-{
- struct usb_device *udev = interface_to_usbdev(interface);
- return udev->bus->busnum;
-}
-
-static inline int interface_to_devnum(struct usb_interface *interface)
-{
- struct usb_device *udev = interface_to_usbdev(interface);
- return udev->devnum;
-}
-
-static inline int interface_to_infnum(struct usb_interface *interface)
-{
- return interface->cur_altsetting->desc.bInterfaceNumber;
-}
-
-#if 0
-int setnodelay(struct socket *);
-int setquickack(struct socket *);
-int setkeepalive(struct socket *socket);
-void setreuse(struct socket *);
-#endif
-
-struct socket *sockfd_to_socket(unsigned int);
-int set_sockaddr(struct socket *socket, struct sockaddr_storage *ss);
-
-void usbip_dump_urb(struct urb *purb);
-void usbip_dump_header(struct usbip_header *pdu);
-
-struct usbip_device;
+/*
+ * This is the same as usb_iso_packet_descriptor but packed for pdu.
+ */
+struct usbip_iso_packet_descriptor {
+ __u32 offset;
+ __u32 length; /* expected length */
+ __u32 actual_length;
+ __u32 status;
+} __packed;
enum usbip_side {
USBIP_VHCI,
@@ -277,20 +251,7 @@ enum usbip_status {
VDEV_ST_ERROR
};
-/* a common structure for stub_device and vhci_device */
-struct usbip_device {
- enum usbip_side side;
- enum usbip_status status;
-
- /* lock for status */
- spinlock_t lock;
-
- struct socket *tcp_socket;
-
- struct task_struct *tcp_rx;
- struct task_struct *tcp_tx;
-
- /* event handler */
+/* event handler */
#define USBIP_EH_SHUTDOWN (1 << 0)
#define USBIP_EH_BYE (1 << 1)
#define USBIP_EH_RESET (1 << 2)
@@ -307,6 +268,19 @@ struct usbip_device {
#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
+/* a common structure for stub_device and vhci_device */
+struct usbip_device {
+ enum usbip_side side;
+ enum usbip_status status;
+
+ /* lock for status */
+ spinlock_t lock;
+
+ struct socket *tcp_socket;
+
+ struct task_struct *tcp_rx;
+ struct task_struct *tcp_tx;
+
unsigned long event;
struct task_struct *eh;
wait_queue_head_t eh_waitq;
@@ -318,17 +292,32 @@ struct usbip_device {
} eh_ops;
};
+#if 0
+int usbip_sendmsg(struct socket *, struct msghdr *, int);
+int set_sockaddr(struct socket *socket, struct sockaddr_storage *ss);
+int setnodelay(struct socket *);
+int setquickack(struct socket *);
+int setkeepalive(struct socket *socket);
+void setreuse(struct socket *);
+#endif
+
+/* usbip_common.c */
+void usbip_dump_urb(struct urb *purb);
+void usbip_dump_header(struct usbip_header *pdu);
+
+int usbip_xmit(int send, struct socket *sock, char *buf, int size,
+ int msg_flags);
+struct socket *sockfd_to_socket(unsigned int sockfd);
+
void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
int pack);
-
void usbip_header_correct_endian(struct usbip_header *pdu, int send);
-/* some members of urb must be substituted before. */
-int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
+
+void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen);
/* some members of urb must be substituted before. */
int usbip_recv_iso(struct usbip_device *ud, struct urb *urb);
-/* some members of urb must be substituted before. */
int usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
-void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen);
+int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
/* usbip_event.c */
int usbip_start_eh(struct usbip_device *ud);
@@ -336,4 +325,21 @@ void usbip_stop_eh(struct usbip_device *ud);
void usbip_event_add(struct usbip_device *ud, unsigned long event);
int usbip_event_happened(struct usbip_device *ud);
+static inline int interface_to_busnum(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ return udev->bus->busnum;
+}
+
+static inline int interface_to_devnum(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ return udev->devnum;
+}
+
+static inline int interface_to_infnum(struct usb_interface *interface)
+{
+ return interface->cur_altsetting->desc.bInterfaceNumber;
+}
+
#endif /* __USBIP_COMMON_H */
diff --git a/drivers/staging/usbip/userspace/AUTHORS b/drivers/staging/usbip/userspace/AUTHORS
index 2f73e65d509..a27ea8d03ae 100644
--- a/drivers/staging/usbip/userspace/AUTHORS
+++ b/drivers/staging/usbip/userspace/AUTHORS
@@ -1,2 +1,3 @@
Takahiro Hirofuchi
Robert Leibl
+matt mooney <mfm@muteddisk.com>
diff --git a/drivers/staging/usbip/userspace/Makefile.am b/drivers/staging/usbip/userspace/Makefile.am
index 83f51b8df89..9ab19499fe0 100644
--- a/drivers/staging/usbip/userspace/Makefile.am
+++ b/drivers/staging/usbip/userspace/Makefile.am
@@ -1,11 +1,6 @@
SUBDIRS := libsrc src
-includedir := @includedir@/usbip
+includedir = @includedir@/usbip
include_HEADERS := $(addprefix libsrc/, \
- usbip.h usbip_common.h vhci_driver.h stub_driver.h)
+ usbip_common.h vhci_driver.h usbip_host_driver.h)
dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8 usbip_bind_driver.8)
-
-if INSTALL_USBIDS
-pkgdata_DATA := usb.ids
-EXTRA_DIST := $(pkgdata_DATA)
-endif
diff --git a/drivers/staging/usbip/userspace/README b/drivers/staging/usbip/userspace/README
index 2ee84b9e7e0..63cd1071905 100644
--- a/drivers/staging/usbip/userspace/README
+++ b/drivers/staging/usbip/userspace/README
@@ -1,19 +1,19 @@
-# vim:tw=78:ts=4:expandtab:ai:sw=4
#
# README for usbip-utils
#
-# Copyright (C) 2005-2008 Takahiro Hirofuchi
+# Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+# 2005-2008 Takahiro Hirofuchi
[Requirements]
- USB/IP device drivers
- Its source code is included under $(top)/drivers/.
+ Found in the staging directory of the Linux kernel.
- sysfsutils >= 2.0.0
- sysfsutils library
+ sysfsutils library
- libwrap0-dev
- tcp wrapper library
+ tcp wrapper library
- gcc >= 4.0
@@ -21,195 +21,181 @@
- libtool, automake >= 1.9, autoconf >= 2.5.0, pkg-config
+
[Install]
- 0. Skip here if you see a configure script.
- $ ./autogen.sh
+ 0. Generate configuration scripts.
+ $ ./autogen.sh
+
+ 1. Compile & install the userspace utilities.
+ $ ./configure [--with-tcp-wrappers=no] [--with-usbids-dir=<dir>]
+ $ make install
- 1. Compile & install.
- $ ./configure
- $ make install
+ 2. Compile & install USB/IP drivers.
- 2. Compile & install USB/IP drivers if not yet.
[Usage]
- server:# (Attach your USB device physically.)
+ server:# (Physically attach your USB device.)
server:# insmod usbip-core.ko
server:# insmod usbip-host.ko
- - It was formerly named as stub.ko.
server:# usbipd -D
- - Start usbip daemon.
+ - Start usbip daemon.
- server:# usbip_bind_driver --list
- - List driver assignments for usb devices.
-
- server:# usbip_bind_driver --usbip 1-2
- - Bind usbip-host.ko to the device of busid 1-2.
- - A usb device 1-2 is now exportable to other hosts!
- - Use 'usbip_bind_driver --other 1-2' when you want to shutdown exporting
- and use the device locally.
+ server:# usbip list -l
+ - List driver assignments for USB devices.
+ server:# usbip bind --busid 1-2
+ - Bind usbip-host.ko to the device with busid 1-2.
+ - The USB device 1-2 is now exportable to other hosts!
+ - Use `usbip unbind --busid 1-2' to stop exporting the device.
client:# insmod usbip-core.ko
client:# insmod vhci-hcd.ko
- - It was formerly named as vhci.ko.
- client:# usbip --list server
- - List exportable usb devices on the server.
-
- client:# usbip --attach server 1-2
- - Connect the remote USB device.
-
- client:# usbip --port
- - Show virtual port status.
+ client:# usbip list --remote <host>
+ - List exported USB devices on the <host>.
+
+ client:# usbip attach --host <host> --busid 1-2
+ - Connect the remote USB device.
+
+ client:# usbip port
+ - Show virtual port status.
+
+ client:# usbip detach --port <port>
+ - Detach the USB device.
+
+
+[Example]
+---------------------------
+ SERVER SIDE
+---------------------------
+Physically attach your USB devices to this host.
- client:# usbip --detach 0
- - Detach the usb device.
-
-
-[Output Example]
---------------------------------------------------------------------------------------------------------
-- SERVER SIDE (physically attach your USB devices to this host) ----------------------------------------
---------------------------------------------------------------------------------------------------------
-trois:# insmod (somewhere)/usbip-core.ko
-trois:# insmod (somewhere)/usbip-host.ko
-trois:# usbipd -D
+ trois:# insmod path/to/usbip-core.ko
+ trois:# insmod path/to/usbip-host.ko
+ trois:# usbipd -D
+
+In another terminal, let's look up what USB devices are physically
+attached to this host.
---------------------------------------------------------------------------------------------------------
-In another terminal, let's look up what usb devices are physically attached to
-this host. We can see a usb storage device of busid 3-3.2 is now bound to
-usb-storage driver. To export this device, we first mark the device as
-"exportable"; the device is bound to usbip driver. Please remember you can not
-export a usb hub.
-
- trois:# usbip_bind_driver --list
- List USB devices
- - busid 3-3.2 (04bb:0206)
- 3-3.2:1.0 -> usb-storage
-
- - busid 3-3.1 (08bb:2702)
- 3-3.1:1.0 -> snd-usb-audio
- 3-3.1:1.1 -> snd-usb-audio
-
- - busid 3-3 (0409:0058)
- 3-3:1.0 -> hub
-
- - busid 3-2 (0711:0902)
- 3-2:1.0 -> none
+ trois:# usbip list -l
+ Local USB devices
+ =================
+ - busid 1-1 (05a9:a511)
+ 1-1:1.0 -> ov511
- - busid 1-1 (05a9:a511)
- 1-1:1.0 -> ov511
+ - busid 3-2 (0711:0902)
+ 3-2:1.0 -> none
- - busid 4-1 (046d:08b2)
- 4-1:1.0 -> none
- 4-1:1.1 -> none
- 4-1:1.2 -> none
+ - busid 3-3.1 (08bb:2702)
+ 3-3.1:1.0 -> snd-usb-audio
+ 3-3.1:1.1 -> snd-usb-audio
- - busid 5-2 (058f:9254)
- 5-2:1.0 -> hub
+ - busid 3-3.2 (04bb:0206)
+ 3-3.2:1.0 -> usb-storage
---------------------------------------------------------------------------------------------------------
-Mark the device of busid 3-3.2 as exportable.
+ - busid 3-3 (0409:0058)
+ 3-3:1.0 -> hub
- trois:# usbip_bind_driver --usbip 3-3.2
- ** (process:24621): DEBUG: 3-3.2:1.0 -> none
- ** (process:24621): DEBUG: write "add 3-3.2" to /sys/bus/usb/drivers/usbip/match_busid
- ** Message: bind 3-3.2 to usbip, complete!
+ - busid 4-1 (046d:08b2)
+ 4-1:1.0 -> none
+ 4-1:1.1 -> none
+ 4-1:1.2 -> none
- trois:# usbip_bind_driver --list
- List USB devices
- - busid 3-3.2 (04bb:0206)
- 3-3.2:1.0 -> usbip
- (snip)
+ - busid 5-2 (058f:9254)
+ 5-2:1.0 -> hub
-Iterate the above operation for other devices if you like.
+A USB storage device of busid 3-3.2 is now bound to the usb-storage
+driver. To export this device, we first mark the device as
+"exportable"; the device is bound to the usbip-host driver. Please
+remember you can not export a USB hub.
+Mark the device of busid 3-3.2 as exportable:
---------------------------------------------------------------------------------------------------------
-- CLIENT SIDE ------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------------
-First, let's list available remote devices which are marked as exportable in
-the server host.
+ trois:# usbip --debug bind --busid 3-3.2
+ ...
+ usbip debug: usbip_bind.c:162:[unbind_other] 3-3.2:1.0 -> usb-storage
+ ...
+ bind device on busid 3-3.2: complete
- deux:# insmod (somewhere)/usbip-core.ko
- deux:# insmod (somewhere)/vhci_hcd.ko
+ trois:# usbip list -l
+ Local USB devices
+ =================
+ ...
- deux:# usbip --list 10.0.0.3
- - 10.0.0.3
- 1-1: Prolific Technology, Inc. : unknown product (067b:3507)
- : /sys/devices/pci0000:00/0000:00:1f.2/usb1/1-1
- : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00)
- : 0 - Mass Storage / SCSI / Bulk (Zip) (08/06/50)
+ - busid 3-3.2 (04bb:0206)
+ 3-3.2:1.0 -> usbip-host
+ ...
- 1-2.2.1: Apple Computer, Inc. : unknown product (05ac:0203)
- : /sys/devices/pci0000:00/0000:00:1f.2/usb1/1-2/1-2.2/1-2.2.1
- : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00)
- : 0 - Human Interface Devices / Boot Interface Subclass / Keyboard (03/01/01)
+---------------------------
+ CLIENT SIDE
+---------------------------
+First, let's list available remote devices that are marked as
+exportable on the host.
- 1-2.2.3: OmniVision Technologies, Inc. : OV511+ WebCam (05a9:a511)
- : /sys/devices/pci0000:00/0000:00:1f.2/usb1/1-2/1-2.2/1-2.2.3
- : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00)
- : 0 - Vendor Specific Class / unknown subclass / unknown protocol (ff/00/00)
+ deux:# insmod path/to/usbip-core.ko
+ deux:# insmod path/to/vhci-hcd.ko
- 3-1: Logitech, Inc. : QuickCam Pro 4000 (046d:08b2)
- : /sys/devices/pci0000:00/0000:00:1e.0/0000:02:0a.0/usb3/3-1
- : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00)
- : 0 - Data / unknown subclass / unknown protocol (0a/ff/00)
- : 1 - Audio / Control Device / unknown protocol (01/01/00)
- : 2 - Audio / Streaming / unknown protocol (01/02/00)
-
- 4-1: Logitech, Inc. : QuickCam Express (046d:0870)
- : /sys/devices/pci0000:00/0000:00:1e.0/0000:02:0a.1/usb4/4-1
- : Vendor Specific Class / Vendor Specific Subclass / Vendor Specific Protocol (ff/ff/ff)
- : 0 - Vendor Specific Class / Vendor Specific Subclass / Vendor Specific Protocol (ff/ff/ff)
-
- 4-2: Texas Instruments Japan : unknown product (08bb:2702)
- : /sys/devices/pci0000:00/0000:00:1e.0/0000:02:0a.1/usb4/4-2
- : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00)
- : 0 - Audio / Control Device / unknown protocol (01/01/00)
- : 1 - Audio / Streaming / unknown protocol (01/02/00)
-
---------------------------------------------------------------------------------------------------------
-Attach a remote usb device!
-
- deux:# usbip --attach 10.0.0.3 1-1
- port 0 attached
-
---------------------------------------------------------------------------------------------------------
-Show what devices are attached to this client.
-
- deux:# usbip --port
- Port 00: <Port in Use> at Full Speed(12Mbps)
- Prolific Technology, Inc. : unknown product (067b:3507)
- 6-1 -> usbip://10.0.0.3:3240/1-1 (remote bus/dev 001/004)
- 6-1:1.0 used by usb-storage
- /sys/class/scsi_device/0:0:0:0/device
- /sys/class/scsi_host/host0/device
- /sys/block/sda/device
+ deux:# usbip list --remote 10.0.0.3
+ Exportable USB devices
+ ======================
+ - 10.0.0.3
+ 1-1: Prolific Technology, Inc. : unknown product (067b:3507)
+ : /sys/devices/pci0000:00/0000:00:1f.2/usb1/1-1
+ : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00)
+ : 0 - Mass Storage / SCSI / Bulk (Zip) (08/06/50)
---------------------------------------------------------------------------------------------------------
-Detach the imported device.
+ 1-2.2.1: Apple Computer, Inc. : unknown product (05ac:0203)
+ : /sys/devices/pci0000:00/0000:00:1f.2/usb1/1-2/1-2.2/1-2.2.1
+ : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00)
+ : 0 - Human Interface Devices / Boot Interface Subclass / Keyboard (03/01/01)
- deux:# usbip --detach 0
- port 0 detached
+ 1-2.2.3: OmniVision Technologies, Inc. : OV511+ WebCam (05a9:a511)
+ : /sys/devices/pci0000:00/0000:00:1f.2/usb1/1-2/1-2.2/1-2.2.3
+ : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00)
+ : 0 - Vendor Specific Class / unknown subclass / unknown protocol (ff/00/00)
---------------------------------------------------------------------------------------------------------
-
-
-[Check List]
- - See Debug Tips in the project wiki.
- - http://usbip.wiki.sourceforge.net/how-to-debug-usbip
+ 3-1: Logitech, Inc. : QuickCam Pro 4000 (046d:08b2)
+ : /sys/devices/pci0000:00/0000:00:1e.0/0000:02:0a.0/usb3/3-1
+ : (Defined at Interface level) / unknown subclass / unknown protocol (00/00/00)
+ : 0 - Data / unknown subclass / unknown protocol (0a/ff/00)
+ : 1 - Audio / Control Device / unknown protocol (01/01/00)
+ : 2 - Audio / Streaming / unknown protocol (01/02/00)
+
+Attach a remote USB device:
+
+ deux:# usbip attach --host 10.0.0.3 --busid 1-1
+ port 0 attached
+
+Show the devices attached to this client:
+
+ deux:# usbip port
+ Port 00: <Port in Use> at Full Speed(12Mbps)
+ Prolific Technology, Inc. : unknown product (067b:3507)
+ 6-1 -> usbip://10.0.0.3:3240/1-1 (remote bus/dev 001/004)
+ 6-1:1.0 used by usb-storage
+ /sys/class/scsi_device/0:0:0:0/device
+ /sys/class/scsi_host/host0/device
+ /sys/block/sda/device
+
+Detach the imported device:
+
+ deux:# usbip detach --port 0
+ port 0 detached
+
+
+[Checklist]
+ - See 'Debug Tips' on the project wiki.
+ - http://usbip.wiki.sourceforge.net/how-to-debug-usbip
- usbip-host.ko must be bound to the target device.
- - See /proc/bus/usb/devices and find "Driver=..." lines of the device.
+ - See /proc/bus/usb/devices and find "Driver=..." lines of the device.
- Shutdown firewall.
- - usbip now uses TCP port 3240.
+ - usbip now uses TCP port 3240.
- Disable SELinux.
- - If possible, compile your kernel with CONFIG_USB_DEBUG flag and try
- again.
- - Check your kernel and daemon messages.
- ex. /var/log/{messages, kern.log, daemon.log, syslog}
+ - If possible, compile your kernel with CONFIG_USB_DEBUG flag and try again.
+ - Check the kernel and daemon messages.
[Contact]
- Mailing List: usbip-devel _at_ lists.sourceforge.net
+ Mailing List: linux-usb@vger.kernel.org
diff --git a/drivers/staging/usbip/userspace/cleanup.sh b/drivers/staging/usbip/userspace/cleanup.sh
index da2f89bd17c..955c3ccb729 100755
--- a/drivers/staging/usbip/userspace/cleanup.sh
+++ b/drivers/staging/usbip/userspace/cleanup.sh
@@ -1,10 +1,12 @@
-#!/bin/sh -x
-
+#!/bin/sh
if [ -r Makefile ]; then
make distclean
fi
-FILES="configure cscope.out Makefile.in depcomp compile config.guess config.sub config.h.in~ config.log config.status ltmain.sh libtool config.h.in autom4te.cache missing aclocal.m4 install-sh cmd/Makefile.in lib/Makefile.in Makefile lib/Makefile cmd/Makefile"
+FILES="aclocal.m4 autom4te.cache compile config.guess config.h.in config.log \
+ config.status config.sub configure cscope.out depcomp install-sh \
+ libsrc/Makefile libsrc/Makefile.in libtool ltmain.sh Makefile \
+ Makefile.in missing src/Makefile src/Makefile.in"
-rm -Rf $FILES
+rm -vRf $FILES
diff --git a/drivers/staging/usbip/userspace/configure.ac b/drivers/staging/usbip/userspace/configure.ac
index e3afa159116..bf5cf49cb55 100644
--- a/drivers/staging/usbip/userspace/configure.ac
+++ b/drivers/staging/usbip/userspace/configure.ac
@@ -1,8 +1,8 @@
dnl Process this file with autoconf to produce a configure script.
AC_PREREQ(2.59)
-AC_INIT([usbip], [0.1.8], [usbip-devel@lists.sourceforge.net])
-AC_DEFINE([USBIP_VERSION], [0x000106], [numeric version number])
+AC_INIT([usbip-utils], [1.1.1], [linux-usb@vger.kernel.org])
+AC_DEFINE([USBIP_VERSION], [0x00000111], [binary-coded decimal version number])
CURRENT=0
REVISION=1
@@ -29,7 +29,7 @@ AC_PROG_MAKE_SET
AC_HEADER_DIRENT
AC_HEADER_STDC
AC_CHECK_HEADERS([arpa/inet.h fcntl.h netdb.h netinet/in.h stdint.h stdlib.h dnl
- string.h strings.h sys/socket.h syslog.h unistd.h])
+ string.h sys/socket.h syslog.h unistd.h])
# Checks for typedefs, structures, and compiler characteristics.
AC_TYPE_INT32_T
@@ -41,7 +41,7 @@ AC_TYPE_UINT8_T
# Checks for library functions.
AC_FUNC_REALLOC
-AC_CHECK_FUNCS([bzero memset mkdir regcomp socket strchr strerror strstr dnl
+AC_CHECK_FUNCS([memset mkdir regcomp socket strchr strerror strstr dnl
strtoul])
AC_CHECK_HEADER([sysfs/libsysfs.h],
@@ -85,26 +85,12 @@ AC_ARG_WITH([tcp-wrappers],
[AC_MSG_RESULT([no]); LIBS="$saved_LIBS"])])
# Sets directory containing usb.ids.
-USBIDS_DIR='${datadir}/usbip'
AC_ARG_WITH([usbids-dir],
[AS_HELP_STRING([--with-usbids-dir=DIR],
- [where usb.ids is found (default ${datadir}/usbip)])],
- [USBIDS_DIR=$withval])
+ [where usb.ids is found (default /usr/share/hwdata/)])],
+ [USBIDS_DIR=$withval], [USBIDS_DIR="/usr/share/hwdata/"])
AC_SUBST([USBIDS_DIR])
-dnl FIXME: when disabled, empty directry is created
-usbids=install
-AC_ARG_ENABLE([usbids-install],
- [AS_HELP_STRING([--enable-usbids-install],
- [install usb.ids (default)])],
- [AS_CASE([$enableval],
- [yes], [usbids=install],
- [no], [usbids=notinstall],
- [AC_MSG_ERROR(
- [bad value ${enableval} for --enable-usbids-install])]
- )])
-AM_CONDITIONAL([INSTALL_USBIDS], [test x$usbids = xinstall])
-
GLIB2_REQUIRED=2.6.0
PKG_CHECK_MODULES([PACKAGE], [glib-2.0 >= $GLIB2_REQUIRED])
AC_SUBST([PACKAGE_CFLAGS])
diff --git a/drivers/staging/usbip/userspace/libsrc/Makefile.am b/drivers/staging/usbip/userspace/libsrc/Makefile.am
index 77ecf6b844b..4921189e026 100644
--- a/drivers/staging/usbip/userspace/libsrc/Makefile.am
+++ b/drivers/staging/usbip/userspace/libsrc/Makefile.am
@@ -1,7 +1,7 @@
-libusbip_la_CPPFLAGS := -DUSBIDS_FILE='"@USBIDS_DIR@/usb.ids"'
-libusbip_la_CFLAGS := @EXTRA_CFLAGS@
-libusbip_la_LDFLAGS := -version-info @LIBUSBIP_VERSION@
+libusbip_la_CPPFLAGS = -DUSBIDS_FILE='"@USBIDS_DIR@/usb.ids"'
+libusbip_la_CFLAGS = @EXTRA_CFLAGS@
+libusbip_la_LDFLAGS = -version-info @LIBUSBIP_VERSION@
lib_LTLIBRARIES := libusbip.la
-libusbip_la_SOURCES := names.c names.h stub_driver.c stub_driver.h usbip.h \
+libusbip_la_SOURCES := names.c names.h usbip_host_driver.c usbip_host_driver.h \
usbip_common.c usbip_common.h vhci_driver.c vhci_driver.h
diff --git a/drivers/staging/usbip/userspace/libsrc/stub_driver.c b/drivers/staging/usbip/userspace/libsrc/stub_driver.c
deleted file mode 100644
index cc3364345f5..00000000000
--- a/drivers/staging/usbip/userspace/libsrc/stub_driver.c
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * Copyright (C) 2005-2007 Takahiro Hirofuchi
- */
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include "usbip.h"
-
-/* kernel module name */
-static const char *usbip_stub_driver_name = "usbip-host";
-
-
-struct usbip_stub_driver *stub_driver;
-
-static struct sysfs_driver *open_sysfs_stub_driver(void)
-{
- int ret;
-
- char sysfs_mntpath[SYSFS_PATH_MAX];
- char stub_driver_path[SYSFS_PATH_MAX];
- struct sysfs_driver *stub_driver;
-
-
- ret = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
- if (ret < 0) {
- err("sysfs must be mounted");
- return NULL;
- }
-
- snprintf(stub_driver_path, SYSFS_PATH_MAX, "%s/%s/usb/%s/%s",
- sysfs_mntpath, SYSFS_BUS_NAME, SYSFS_DRIVERS_NAME,
- usbip_stub_driver_name);
-
- stub_driver = sysfs_open_driver_path(stub_driver_path);
- if (!stub_driver) {
- err("usbip-core.ko and usbip-host.ko must be loaded");
- return NULL;
- }
-
- return stub_driver;
-}
-
-
-#define SYSFS_OPEN_RETRIES 100
-
-/* only the first interface value is true! */
-static int32_t read_attr_usbip_status(struct usb_device *udev)
-{
- char attrpath[SYSFS_PATH_MAX];
- struct sysfs_attribute *attr;
- int value = 0;
- int ret;
- struct stat s;
- int retries = SYSFS_OPEN_RETRIES;
-
- /* This access is racy!
- *
- * Just after detach, our driver removes the sysfs
- * files and recreates them.
- *
- * We may try and fail to open the usbip_status of
- * an exported device in the (short) window where
- * it has been removed and not yet recreated.
- *
- * This is a bug in the interface. Nothing we can do
- * except work around it here by polling for the sysfs
- * usbip_status to reappear.
- */
-
- snprintf(attrpath, SYSFS_PATH_MAX, "%s/%s:%d.%d/usbip_status",
- udev->path, udev->busid,
- udev->bConfigurationValue,
- 0);
-
- while (retries > 0) {
- if (stat(attrpath, &s) == 0)
- break;
-
- if (errno != ENOENT) {
- err("error stat'ing %s", attrpath);
- return -1;
- }
-
- usleep(10000); /* 10ms */
- retries--;
- }
-
- if (retries == 0)
- err("usbip_status not ready after %d retries",
- SYSFS_OPEN_RETRIES);
- else if (retries < SYSFS_OPEN_RETRIES)
- info("warning: usbip_status ready after %d retries",
- SYSFS_OPEN_RETRIES - retries);
-
- attr = sysfs_open_attribute(attrpath);
- if (!attr) {
- err("open %s", attrpath);
- return -1;
- }
-
- ret = sysfs_read_attribute(attr);
- if (ret) {
- err("read %s", attrpath);
- sysfs_close_attribute(attr);
- return -1;
- }
-
- value = atoi(attr->value);
-
- sysfs_close_attribute(attr);
-
- return value;
-}
-
-
-static void usbip_exported_device_delete(void *dev)
-{
- struct usbip_exported_device *edev =
- (struct usbip_exported_device *) dev;
-
- sysfs_close_device(edev->sudev);
- free(dev);
-}
-
-
-static struct usbip_exported_device *usbip_exported_device_new(char *sdevpath)
-{
- struct usbip_exported_device *edev = NULL;
-
- edev = (struct usbip_exported_device *) calloc(1, sizeof(*edev));
- if (!edev) {
- err("alloc device");
- return NULL;
- }
-
- edev->sudev = sysfs_open_device_path(sdevpath);
- if (!edev->sudev) {
- err("open %s", sdevpath);
- goto err;
- }
-
- read_usb_device(edev->sudev, &edev->udev);
-
- edev->status = read_attr_usbip_status(&edev->udev);
- if (edev->status < 0)
- goto err;
-
- /* reallocate buffer to include usb interface data */
- size_t size = sizeof(*edev) + edev->udev.bNumInterfaces * sizeof(struct usb_interface);
- edev = (struct usbip_exported_device *) realloc(edev, size);
- if (!edev) {
- err("alloc device");
- goto err;
- }
-
- for (int i=0; i < edev->udev.bNumInterfaces; i++)
- read_usb_interface(&edev->udev, i, &edev->uinf[i]);
-
- return edev;
-
-err:
- if (edev && edev->sudev)
- sysfs_close_device(edev->sudev);
- if (edev)
- free(edev);
- return NULL;
-}
-
-
-static int check_new(struct dlist *dlist, struct sysfs_device *target)
-{
- struct sysfs_device *dev;
-
- dlist_for_each_data(dlist, dev, struct sysfs_device) {
- if (!strncmp(dev->bus_id, target->bus_id, SYSFS_BUS_ID_SIZE))
- /* found. not new */
- return 0;
- }
-
- return 1;
-}
-
-static void delete_nothing(void *dev __attribute__((unused)))
-{
- /* do not delete anything. but, its container will be deleted. */
-}
-
-static int refresh_exported_devices(void)
-{
- struct sysfs_device *suinf; /* sysfs_device of usb_interface */
- struct dlist *suinf_list;
-
- struct sysfs_device *sudev; /* sysfs_device of usb_device */
- struct dlist *sudev_list;
-
-
- sudev_list = dlist_new_with_delete(sizeof(struct sysfs_device), delete_nothing);
-
- suinf_list = sysfs_get_driver_devices(stub_driver->sysfs_driver);
- if (!suinf_list) {
- printf("Bind usbip-host.ko to a usb device to be exportable!\n");
- goto bye;
- }
-
- /* collect unique USB devices (not interfaces) */
- dlist_for_each_data(suinf_list, suinf, struct sysfs_device) {
-
- /* get usb device of this usb interface */
- sudev = sysfs_get_device_parent(suinf);
- if (!sudev) {
- err("get parent dev of %s", suinf->name);
- continue;
- }
-
- if (check_new(sudev_list, sudev)) {
- dlist_unshift(sudev_list, sudev);
- }
- }
-
- dlist_for_each_data(sudev_list, sudev, struct sysfs_device) {
- struct usbip_exported_device *edev;
-
- edev = usbip_exported_device_new(sudev->path);
- if (!edev) {
- err("usbip_exported_device new");
- continue;
- }
-
- dlist_unshift(stub_driver->edev_list, (void *) edev);
- stub_driver->ndevs++;
- }
-
-
- dlist_destroy(sudev_list);
-
-bye:
-
- return 0;
-}
-
-int usbip_stub_refresh_device_list(void)
-{
- int ret;
-
- if (stub_driver->edev_list)
- dlist_destroy(stub_driver->edev_list);
-
- stub_driver->ndevs = 0;
-
- stub_driver->edev_list = dlist_new_with_delete(sizeof(struct usbip_exported_device),
- usbip_exported_device_delete);
- if (!stub_driver->edev_list) {
- err("alloc dlist");
- return -1;
- }
-
- ret = refresh_exported_devices();
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-int usbip_stub_driver_open(void)
-{
- int ret;
-
-
- stub_driver = (struct usbip_stub_driver *) calloc(1, sizeof(*stub_driver));
- if (!stub_driver) {
- err("alloc stub_driver");
- return -1;
- }
-
- stub_driver->ndevs = 0;
-
- stub_driver->edev_list = dlist_new_with_delete(sizeof(struct usbip_exported_device),
- usbip_exported_device_delete);
- if (!stub_driver->edev_list) {
- err("alloc dlist");
- goto err;
- }
-
- stub_driver->sysfs_driver = open_sysfs_stub_driver();
- if (!stub_driver->sysfs_driver)
- goto err;
-
- ret = refresh_exported_devices();
- if (ret < 0)
- goto err;
-
- return 0;
-
-
-err:
- if (stub_driver->sysfs_driver)
- sysfs_close_driver(stub_driver->sysfs_driver);
- if (stub_driver->edev_list)
- dlist_destroy(stub_driver->edev_list);
- free(stub_driver);
-
- stub_driver = NULL;
- return -1;
-}
-
-
-void usbip_stub_driver_close(void)
-{
- if (!stub_driver)
- return;
-
- if (stub_driver->edev_list)
- dlist_destroy(stub_driver->edev_list);
- if (stub_driver->sysfs_driver)
- sysfs_close_driver(stub_driver->sysfs_driver);
- free(stub_driver);
-
- stub_driver = NULL;
-}
-
-int usbip_stub_export_device(struct usbip_exported_device *edev, int sockfd)
-{
- char attrpath[SYSFS_PATH_MAX];
- struct sysfs_attribute *attr;
- char sockfd_buff[30];
- int ret;
-
-
- if (edev->status != SDEV_ST_AVAILABLE) {
- info("device not available, %s", edev->udev.busid);
- switch( edev->status ) {
- case SDEV_ST_ERROR:
- info(" status SDEV_ST_ERROR");
- break;
- case SDEV_ST_USED:
- info(" status SDEV_ST_USED");
- break;
- default:
- info(" status unknown: 0x%x", edev->status);
- }
- return -1;
- }
-
- /* only the first interface is true */
- snprintf(attrpath, sizeof(attrpath), "%s/%s:%d.%d/%s",
- edev->udev.path,
- edev->udev.busid,
- edev->udev.bConfigurationValue, 0,
- "usbip_sockfd");
-
- attr = sysfs_open_attribute(attrpath);
- if (!attr) {
- err("open %s", attrpath);
- return -1;
- }
-
- snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
-
- dbg("write: %s", sockfd_buff);
-
- ret = sysfs_write_attribute(attr, sockfd_buff, strlen(sockfd_buff));
- if (ret < 0) {
- err("write sockfd %s to %s", sockfd_buff, attrpath);
- goto err_write_sockfd;
- }
-
- info("connect %s", edev->udev.busid);
-
-err_write_sockfd:
- sysfs_close_attribute(attr);
-
- return ret;
-}
-
-struct usbip_exported_device *usbip_stub_get_device(int num)
-{
- struct usbip_exported_device *edev;
- struct dlist *dlist = stub_driver->edev_list;
- int count = 0;
-
- dlist_for_each_data(dlist, edev, struct usbip_exported_device) {
- if (num == count)
- return edev;
- else
- count++ ;
- }
-
- return NULL;
-}
diff --git a/drivers/staging/usbip/userspace/libsrc/stub_driver.h b/drivers/staging/usbip/userspace/libsrc/stub_driver.h
deleted file mode 100644
index 3107d18de65..00000000000
--- a/drivers/staging/usbip/userspace/libsrc/stub_driver.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2005-2007 Takahiro Hirofuchi
- */
-
-#ifndef _USBIP_STUB_DRIVER_H
-#define _USBIP_STUB_DRIVER_H
-
-#include "usbip.h"
-
-
-struct usbip_stub_driver {
- int ndevs;
- struct sysfs_driver *sysfs_driver;
-
- struct dlist *edev_list; /* list of exported device */
-};
-
-struct usbip_exported_device {
- struct sysfs_device *sudev;
-
- int32_t status;
- struct usb_device udev;
- struct usb_interface uinf[];
-};
-
-
-extern struct usbip_stub_driver *stub_driver;
-
-int usbip_stub_driver_open(void);
-void usbip_stub_driver_close(void);
-
-int usbip_stub_refresh_device_list(void);
-int usbip_stub_export_device(struct usbip_exported_device *edev, int sockfd);
-
-struct usbip_exported_device *usbip_stub_get_device(int num);
-#endif
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip.h b/drivers/staging/usbip/userspace/libsrc/usbip.h
deleted file mode 100644
index 7cb8e6fef35..00000000000
--- a/drivers/staging/usbip/userspace/libsrc/usbip.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (C) 2005-2007 Takahiro Hirofuchi
- */
-
-#ifndef _USBIP_H
-#define _USBIP_H
-
-#ifdef HAVE_CONFIG_H
-#include "../config.h"
-#endif
-
-#include "usbip_common.h"
-#include "stub_driver.h"
-#include "vhci_driver.h"
-#ifdef DMALLOC
-#include <dmalloc.h>
-#endif
-
-#endif
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_common.c b/drivers/staging/usbip/userspace/libsrc/usbip_common.c
index a128a924b27..154b4b1103e 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_common.c
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_common.c
@@ -2,9 +2,12 @@
* Copyright (C) 2005-2007 Takahiro Hirofuchi
*/
-#include "usbip.h"
+#include "usbip_common.h"
#include "names.h"
+#undef PROGNAME
+#define PROGNAME "libusbip"
+
int usbip_use_syslog = 0;
int usbip_use_stderr = 0;
int usbip_use_debug = 0;
@@ -64,7 +67,7 @@ const char *usbip_speed_string(int num)
#define DBG_UINF_INTEGER(name)\
dbg("%-20s = %x", to_string(name), (int) uinf->name)
-void dump_usb_interface(struct usb_interface *uinf)
+void dump_usb_interface(struct usbip_usb_interface *uinf)
{
char buff[100];
usbip_names_get_class(buff, sizeof(buff),
@@ -74,7 +77,7 @@ void dump_usb_interface(struct usb_interface *uinf)
dbg("%-20s = %s", "Interface(C/SC/P)", buff);
}
-void dump_usb_device(struct usb_device *udev)
+void dump_usb_device(struct usbip_usb_device *udev)
{
char buff[100];
@@ -117,19 +120,19 @@ int read_attr_value(struct sysfs_device *dev, const char *name, const char *form
attr = sysfs_open_attribute(attrpath);
if (!attr) {
- err("open attr %s", attrpath);
+ dbg("sysfs_open_attribute failed: %s", attrpath);
return 0;
}
ret = sysfs_read_attribute(attr);
if (ret < 0) {
- err("read attr");
+ dbg("sysfs_read_attribute failed");
goto err;
}
ret = sscanf(attr->value, format, &num);
if (ret < 1) {
- err("sscanf");
+ dbg("sscanf failed");
goto err;
}
@@ -151,19 +154,19 @@ int read_attr_speed(struct sysfs_device *dev)
attr = sysfs_open_attribute(attrpath);
if (!attr) {
- err("open attr");
+ dbg("sysfs_open_attribute failed: %s", attrpath);
return 0;
}
ret = sysfs_read_attribute(attr);
if (ret < 0) {
- err("read attr");
+ dbg("sysfs_read_attribute failed");
goto err;
}
ret = sscanf(attr->value, "%s\n", speed);
if (ret < 1) {
- err("sscanf");
+ dbg("sscanf failed");
goto err;
}
err:
@@ -181,7 +184,7 @@ err:
do { (object)->name = (type) read_attr_value(dev, to_string(name), format); } while (0)
-int read_usb_device(struct sysfs_device *sdev, struct usb_device *udev)
+int read_usb_device(struct sysfs_device *sdev, struct usbip_usb_device *udev)
{
uint32_t busnum, devnum;
@@ -209,7 +212,8 @@ int read_usb_device(struct sysfs_device *sdev, struct usb_device *udev)
return 0;
}
-int read_usb_interface(struct usb_device *udev, int i, struct usb_interface *uinf)
+int read_usb_interface(struct usbip_usb_device *udev, int i,
+ struct usbip_usb_interface *uinf)
{
char busid[SYSFS_BUS_ID_SIZE];
struct sysfs_device *sif;
@@ -218,7 +222,7 @@ int read_usb_interface(struct usb_device *udev, int i, struct usb_interface *uin
sif = sysfs_open_device("usb", busid);
if (!sif) {
- err("open sif of %s", busid);
+ dbg("sysfs_open_device(\"usb\", \"%s\") failed", busid);
return -1;
}
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_common.h b/drivers/staging/usbip/userspace/libsrc/usbip_common.h
index c254b5481f7..eedefbd12ea 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_common.h
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_common.h
@@ -2,21 +2,18 @@
* Copyright (C) 2005-2007 Takahiro Hirofuchi
*/
-#ifndef _USBIP_COMMON_H
-#define _USBIP_COMMON_H
+#ifndef __USBIP_COMMON_H
+#define __USBIP_COMMON_H
+
+#include <sysfs/libsysfs.h>
-#include <unistd.h>
#include <stdint.h>
-#include <syslog.h>
-#include <errno.h>
#include <stdio.h>
-#include <string.h>
#include <stdlib.h>
-#include <strings.h>
+#include <string.h>
-#include <sysfs/libsysfs.h>
-#include <netdb.h>
-#include <sys/socket.h>
+#include <syslog.h>
+#include <unistd.h>
#ifndef USBIDS_FILE
#define USBIDS_FILE "/usr/share/hwdata/usb.ids"
@@ -26,7 +23,59 @@
#define VHCI_STATE_PATH "/var/run/vhci_hcd"
#endif
-//#include <linux/usb_ch9.h>
+/* kernel module names */
+#define USBIP_CORE_MOD_NAME "usbip-core"
+#define USBIP_HOST_DRV_NAME "usbip-host"
+#define USBIP_VHCI_DRV_NAME "vhci_hcd"
+
+extern int usbip_use_syslog;
+extern int usbip_use_stderr;
+extern int usbip_use_debug ;
+
+#define PROGNAME "usbip"
+
+#define pr_fmt(fmt) "%s: %s: " fmt "\n", PROGNAME
+#define dbg_fmt(fmt) pr_fmt("%s:%d:[%s] " fmt), "debug", \
+ __FILE__, __LINE__, __FUNCTION__
+
+#define err(fmt, args...) \
+ do { \
+ if (usbip_use_syslog) { \
+ syslog(LOG_ERR, pr_fmt(fmt), "error", ##args); \
+ } \
+ if (usbip_use_stderr) { \
+ fprintf(stderr, pr_fmt(fmt), "error", ##args); \
+ } \
+ } while (0)
+
+#define info(fmt, args...) \
+ do { \
+ if (usbip_use_syslog) { \
+ syslog(LOG_INFO, pr_fmt(fmt), "info", ##args); \
+ } \
+ if (usbip_use_stderr) { \
+ fprintf(stderr, pr_fmt(fmt), "info", ##args); \
+ } \
+ } while (0)
+
+#define dbg(fmt, args...) \
+ do { \
+ if (usbip_use_debug) { \
+ if (usbip_use_syslog) { \
+ syslog(LOG_DEBUG, dbg_fmt(fmt), ##args); \
+ } \
+ if (usbip_use_stderr) { \
+ fprintf(stderr, dbg_fmt(fmt), ##args); \
+ } \
+ } \
+ } while (0)
+
+#define BUG() \
+ do { \
+ err("sorry, it's a bug!"); \
+ abort(); \
+ } while (0)
+
enum usb_device_speed {
USB_SPEED_UNKNOWN = 0, /* enumerating */
USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
@@ -51,66 +100,14 @@ enum usbip_device_status{
VDEV_ST_ERROR
};
-extern int usbip_use_syslog;
-extern int usbip_use_stderr;
-extern int usbip_use_debug ;
-
-#define err(fmt, args...) do { \
- if (usbip_use_syslog) { \
- syslog(LOG_ERR, "usbip err: %13s:%4d (%-12s) " fmt "\n", \
- __FILE__, __LINE__, __FUNCTION__, ##args); \
- } \
- if (usbip_use_stderr) { \
- fprintf(stderr, "usbip err: %13s:%4d (%-12s) " fmt "\n", \
- __FILE__, __LINE__, __FUNCTION__, ##args); \
- } \
-} while (0)
-
-#define notice(fmt, args...) do { \
- if (usbip_use_syslog) { \
- syslog(LOG_DEBUG, "usbip: " fmt, ##args); \
- } \
- if (usbip_use_stderr) { \
- fprintf(stderr, "usbip: " fmt "\n", ##args); \
- } \
-} while (0)
-
-#define info(fmt, args...) do { \
- if (usbip_use_syslog) { \
- syslog(LOG_DEBUG, fmt, ##args); \
- } \
- if (usbip_use_stderr) { \
- fprintf(stderr, fmt "\n", ##args); \
- } \
-} while (0)
-
-#define dbg(fmt, args...) do { \
- if (usbip_use_debug) { \
- if (usbip_use_syslog) { \
- syslog(LOG_DEBUG, "usbip dbg: %13s:%4d (%-12s) " fmt, \
- __FILE__, __LINE__, __FUNCTION__, ##args); \
- } \
- if (usbip_use_stderr) { \
- fprintf(stderr, "usbip dbg: %13s:%4d (%-12s) " fmt "\n", \
- __FILE__, __LINE__, __FUNCTION__, ##args); \
- } \
- } \
-} while (0)
-
-
-#define BUG() do { err("sorry, it's a bug"); abort(); } while (0)
-
-
-struct usb_interface {
+struct usbip_usb_interface {
uint8_t bInterfaceClass;
uint8_t bInterfaceSubClass;
uint8_t bInterfaceProtocol;
uint8_t padding; /* alignment */
} __attribute__((packed));
-
-
-struct usb_device {
+struct usbip_usb_device {
char path[SYSFS_PATH_MAX];
char busid[SYSFS_BUS_ID_SIZE];
@@ -132,11 +129,12 @@ struct usb_device {
#define to_string(s) #s
-void dump_usb_interface(struct usb_interface *);
-void dump_usb_device(struct usb_device *);
-int read_usb_device(struct sysfs_device *sdev, struct usb_device *udev);
+void dump_usb_interface(struct usbip_usb_interface *);
+void dump_usb_device(struct usbip_usb_device *);
+int read_usb_device(struct sysfs_device *sdev, struct usbip_usb_device *udev);
int read_attr_value(struct sysfs_device *dev, const char *name, const char *format);
-int read_usb_interface(struct usb_device *udev, int i, struct usb_interface *uinf);
+int read_usb_interface(struct usbip_usb_device *udev, int i,
+ struct usbip_usb_interface *uinf);
const char *usbip_speed_string(int num);
const char *usbip_status_string(int32_t status);
@@ -146,4 +144,4 @@ void usbip_names_free(void);
void usbip_names_get_product(char *buff, size_t size, uint16_t vendor, uint16_t product);
void usbip_names_get_class(char *buff, size_t size, uint8_t class, uint8_t subclass, uint8_t protocol);
-#endif
+#endif /* __USBIP_COMMON_H */
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c
new file mode 100644
index 00000000000..71a449cf50d
--- /dev/null
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <errno.h>
+#include <unistd.h>
+
+#include "usbip_common.h"
+#include "usbip_host_driver.h"
+
+#undef PROGNAME
+#define PROGNAME "libusbip"
+
+struct usbip_host_driver *host_driver;
+
+#define SYSFS_OPEN_RETRIES 100
+
+/* only the first interface value is true! */
+static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
+{
+ char attrpath[SYSFS_PATH_MAX];
+ struct sysfs_attribute *attr;
+ int value = 0;
+ int rc;
+ struct stat s;
+ int retries = SYSFS_OPEN_RETRIES;
+
+ /* This access is racy!
+ *
+ * Just after detach, our driver removes the sysfs
+ * files and recreates them.
+ *
+ * We may try and fail to open the usbip_status of
+ * an exported device in the (short) window where
+ * it has been removed and not yet recreated.
+ *
+ * This is a bug in the interface. Nothing we can do
+ * except work around it here by polling for the sysfs
+ * usbip_status to reappear.
+ */
+
+ snprintf(attrpath, SYSFS_PATH_MAX, "%s/%s:%d.%d/usbip_status",
+ udev->path, udev->busid, udev->bConfigurationValue, 0);
+
+ while (retries > 0) {
+ if (stat(attrpath, &s) == 0)
+ break;
+
+ if (errno != ENOENT) {
+ dbg("stat failed: %s", attrpath);
+ return -1;
+ }
+
+ usleep(10000); /* 10ms */
+ retries--;
+ }
+
+ if (retries == 0)
+ dbg("usbip_status not ready after %d retries",
+ SYSFS_OPEN_RETRIES);
+ else if (retries < SYSFS_OPEN_RETRIES)
+ dbg("warning: usbip_status ready after %d retries",
+ SYSFS_OPEN_RETRIES - retries);
+
+ attr = sysfs_open_attribute(attrpath);
+ if (!attr) {
+ dbg("sysfs_open_attribute failed: %s", attrpath);
+ return -1;
+ }
+
+ rc = sysfs_read_attribute(attr);
+ if (rc) {
+ dbg("sysfs_read_attribute failed: %s", attrpath);
+ sysfs_close_attribute(attr);
+ return -1;
+ }
+
+ value = atoi(attr->value);
+
+ sysfs_close_attribute(attr);
+
+ return value;
+}
+
+static struct usbip_exported_device *usbip_exported_device_new(char *sdevpath)
+{
+ struct usbip_exported_device *edev = NULL;
+ size_t size;
+ int i;
+
+ edev = calloc(1, sizeof(*edev));
+ if (!edev) {
+ dbg("calloc failed");
+ return NULL;
+ }
+
+ edev->sudev = sysfs_open_device_path(sdevpath);
+ if (!edev->sudev) {
+ dbg("sysfs_open_device_path failed: %s", sdevpath);
+ goto err;
+ }
+
+ read_usb_device(edev->sudev, &edev->udev);
+
+ edev->status = read_attr_usbip_status(&edev->udev);
+ if (edev->status < 0)
+ goto err;
+
+ /* reallocate buffer to include usb interface data */
+ size = sizeof(*edev) + edev->udev.bNumInterfaces *
+ sizeof(struct usbip_usb_interface);
+
+ edev = realloc(edev, size);
+ if (!edev) {
+ dbg("realloc failed");
+ goto err;
+ }
+
+ for (i = 0; i < edev->udev.bNumInterfaces; i++)
+ read_usb_interface(&edev->udev, i, &edev->uinf[i]);
+
+ return edev;
+err:
+ if (edev && edev->sudev)
+ sysfs_close_device(edev->sudev);
+ if (edev)
+ free(edev);
+
+ return NULL;
+}
+
+static int check_new(struct dlist *dlist, struct sysfs_device *target)
+{
+ struct sysfs_device *dev;
+
+ dlist_for_each_data(dlist, dev, struct sysfs_device) {
+ if (!strncmp(dev->bus_id, target->bus_id, SYSFS_BUS_ID_SIZE))
+ /* device found and is not new */
+ return 0;
+ }
+ return 1;
+}
+
+static void delete_nothing(void *unused_data)
+{
+ /*
+ * NOTE: Do not delete anything, but the container will be deleted.
+ */
+ (void) unused_data;
+}
+
+static int refresh_exported_devices(void)
+{
+ /* sysfs_device of usb_interface */
+ struct sysfs_device *suintf;
+ struct dlist *suintf_list;
+ /* sysfs_device of usb_device */
+ struct sysfs_device *sudev;
+ struct dlist *sudev_list;
+ struct usbip_exported_device *edev;
+
+ sudev_list = dlist_new_with_delete(sizeof(struct sysfs_device),
+ delete_nothing);
+
+ suintf_list = sysfs_get_driver_devices(host_driver->sysfs_driver);
+ if (!suintf_list) {
+ /*
+ * Not an error condition. There are simply no devices bound to
+ * the driver yet.
+ */
+ dbg("bind " USBIP_HOST_DRV_NAME ".ko to a usb device to be "
+ "exportable!");
+ return 0;
+ }
+
+ /* collect unique USB devices (not interfaces) */
+ dlist_for_each_data(suintf_list, suintf, struct sysfs_device) {
+ /* get usb device of this usb interface */
+ sudev = sysfs_get_device_parent(suintf);
+ if (!sudev) {
+ dbg("sysfs_get_device_parent failed: %s", suintf->name);
+ continue;
+ }
+
+ if (check_new(sudev_list, sudev)) {
+ /* insert item at head of list */
+ dlist_unshift(sudev_list, sudev);
+ }
+ }
+
+ dlist_for_each_data(sudev_list, sudev, struct sysfs_device) {
+ edev = usbip_exported_device_new(sudev->path);
+ if (!edev) {
+ dbg("usbip_exported_device_new failed");
+ continue;
+ }
+
+ dlist_unshift(host_driver->edev_list, edev);
+ host_driver->ndevs++;
+ }
+
+ dlist_destroy(sudev_list);
+
+ return 0;
+}
+
+static struct sysfs_driver *open_sysfs_host_driver(void)
+{
+ char bus_type[] = "usb";
+ char sysfs_mntpath[SYSFS_PATH_MAX];
+ char host_drv_path[SYSFS_PATH_MAX];
+ struct sysfs_driver *host_drv;
+ int rc;
+
+ rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
+ if (rc < 0) {
+ dbg("sysfs_get_mnt_path failed");
+ return NULL;
+ }
+
+ snprintf(host_drv_path, SYSFS_PATH_MAX, "%s/%s/%s/%s/%s",
+ sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME,
+ USBIP_HOST_DRV_NAME);
+
+ host_drv = sysfs_open_driver_path(host_drv_path);
+ if (!host_drv) {
+ dbg("sysfs_open_driver_path failed");
+ return NULL;
+ }
+
+ return host_drv;
+}
+
+static void usbip_exported_device_delete(void *dev)
+{
+ struct usbip_exported_device *edev = dev;
+ sysfs_close_device(edev->sudev);
+ free(dev);
+}
+
+int usbip_host_driver_open(void)
+{
+ int rc;
+
+ host_driver = calloc(1, sizeof(*host_driver));
+ if (!host_driver) {
+ dbg("calloc failed");
+ return -1;
+ }
+
+ host_driver->ndevs = 0;
+ host_driver->edev_list =
+ dlist_new_with_delete(sizeof(struct usbip_exported_device),
+ usbip_exported_device_delete);
+ if (!host_driver->edev_list) {
+ dbg("dlist_new_with_delete failed");
+ goto err_free_host_driver;
+ }
+
+ host_driver->sysfs_driver = open_sysfs_host_driver();
+ if (!host_driver->sysfs_driver)
+ goto err_destroy_edev_list;
+
+ rc = refresh_exported_devices();
+ if (rc < 0)
+ goto err_close_sysfs_driver;
+
+ return 0;
+
+err_close_sysfs_driver:
+ sysfs_close_driver(host_driver->sysfs_driver);
+err_destroy_edev_list:
+ dlist_destroy(host_driver->edev_list);
+err_free_host_driver:
+ free(host_driver);
+ host_driver = NULL;
+
+ return -1;
+}
+
+void usbip_host_driver_close(void)
+{
+ if (!host_driver)
+ return;
+
+ if (host_driver->edev_list)
+ dlist_destroy(host_driver->edev_list);
+ if (host_driver->sysfs_driver)
+ sysfs_close_driver(host_driver->sysfs_driver);
+
+ free(host_driver);
+ host_driver = NULL;
+}
+
+int usbip_host_refresh_device_list(void)
+{
+ int rc;
+
+ if (host_driver->edev_list)
+ dlist_destroy(host_driver->edev_list);
+
+ host_driver->ndevs = 0;
+ host_driver->edev_list =
+ dlist_new_with_delete(sizeof(struct usbip_exported_device),
+ usbip_exported_device_delete);
+ if (!host_driver->edev_list) {
+ dbg("dlist_new_with_delete failed");
+ return -1;
+ }
+
+ rc = refresh_exported_devices();
+ if (rc < 0)
+ return -1;
+
+ return 0;
+}
+
+int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
+{
+ char attr_name[] = "usbip_sockfd";
+ char attr_path[SYSFS_PATH_MAX];
+ struct sysfs_attribute *attr;
+ char sockfd_buff[30];
+ int ret;
+
+ if (edev->status != SDEV_ST_AVAILABLE) {
+ dbg("device not available: %s", edev->udev.busid);
+ switch (edev->status) {
+ case SDEV_ST_ERROR:
+ dbg("status SDEV_ST_ERROR");
+ break;
+ case SDEV_ST_USED:
+ dbg("status SDEV_ST_USED");
+ break;
+ default:
+ dbg("status unknown: 0x%x", edev->status);
+ }
+ return -1;
+ }
+
+ /* only the first interface is true */
+ snprintf(attr_path, sizeof(attr_path), "%s/%s:%d.%d/%s",
+ edev->udev.path, edev->udev.busid,
+ edev->udev.bConfigurationValue, 0, attr_name);
+
+ attr = sysfs_open_attribute(attr_path);
+ if (!attr) {
+ dbg("sysfs_open_attribute failed: %s", attr_path);
+ return -1;
+ }
+
+ snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
+ dbg("write: %s", sockfd_buff);
+
+ ret = sysfs_write_attribute(attr, sockfd_buff, strlen(sockfd_buff));
+ if (ret < 0) {
+ dbg("sysfs_write_attribute failed: sockfd %s to %s",
+ sockfd_buff, attr_path);
+ goto err_write_sockfd;
+ }
+
+ dbg("connect: %s", edev->udev.busid);
+
+err_write_sockfd:
+ sysfs_close_attribute(attr);
+
+ return ret;
+}
+
+struct usbip_exported_device *usbip_host_get_device(int num)
+{
+ struct usbip_exported_device *edev;
+ struct dlist *dlist = host_driver->edev_list;
+ int cnt = 0;
+
+ dlist_for_each_data(dlist, edev, struct usbip_exported_device) {
+ if (num == cnt)
+ return edev;
+ else
+ cnt++;
+ }
+
+ return NULL;
+}
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h
new file mode 100644
index 00000000000..34fd14cbfc4
--- /dev/null
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __USBIP_HOST_DRIVER_H
+#define __USBIP_HOST_DRIVER_H
+
+#include <stdint.h>
+#include "usbip_common.h"
+
+struct usbip_host_driver {
+ int ndevs;
+ struct sysfs_driver *sysfs_driver;
+ /* list of exported device */
+ struct dlist *edev_list;
+};
+
+struct usbip_exported_device {
+ struct sysfs_device *sudev;
+ int32_t status;
+ struct usbip_usb_device udev;
+ struct usbip_usb_interface uinf[];
+};
+
+extern struct usbip_host_driver *host_driver;
+
+int usbip_host_driver_open(void);
+void usbip_host_driver_close(void);
+
+int usbip_host_refresh_device_list(void);
+int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd);
+struct usbip_exported_device *usbip_host_get_device(int num);
+
+#endif /* __USBIP_HOST_DRIVER_H */
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
index db43f8d2eb8..abbc285f433 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
@@ -2,11 +2,11 @@
* Copyright (C) 2005-2007 Takahiro Hirofuchi
*/
+#include "usbip_common.h"
+#include "vhci_driver.h"
-#include "usbip.h"
-
-
-static const char vhci_driver_name[] = "vhci_hcd";
+#undef PROGNAME
+#define PROGNAME "libusbip"
struct usbip_vhci_driver *vhci_driver;
@@ -16,17 +16,19 @@ static struct usbip_imported_device *imported_device_init(struct usbip_imported_
sudev = sysfs_open_device("usb", busid);
if (!sudev) {
- err("sysfs_open_device %s", busid);
+ dbg("sysfs_open_device failed: %s", busid);
goto err;
}
read_usb_device(sudev, &idev->udev);
sysfs_close_device(sudev);
/* add class devices of this imported device */
- struct class_device *cdev;
- dlist_for_each_data(vhci_driver->cdev_list, cdev, struct class_device) {
- if (!strncmp(cdev->devpath, idev->udev.path, strlen(idev->udev.path))) {
- struct class_device *new_cdev;
+ struct usbip_class_device *cdev;
+ dlist_for_each_data(vhci_driver->cdev_list, cdev,
+ struct usbip_class_device) {
+ if (!strncmp(cdev->dev_path, idev->udev.path,
+ strlen(idev->udev.path))) {
+ struct usbip_class_device *new_cdev;
/* alloc and copy because dlist is linked from only one list */
new_cdev = calloc(1, sizeof(*new_cdev));
@@ -53,7 +55,7 @@ static int parse_status(char *value)
for (int i = 0; i < vhci_driver->nports; i++)
- bzero(&vhci_driver->idev[i], sizeof(struct usbip_imported_device));
+ memset(&vhci_driver->idev[i], 0, sizeof(vhci_driver->idev[i]));
/* skip a header line */
@@ -69,7 +71,7 @@ static int parse_status(char *value)
&devid, &socket, lbusid);
if (ret < 5) {
- err("scanf %d", ret);
+ dbg("sscanf failed: %d", ret);
BUG();
}
@@ -90,16 +92,16 @@ static int parse_status(char *value)
idev->busnum = (devid >> 16);
idev->devnum = (devid & 0x0000ffff);
- idev->cdev_list = dlist_new(sizeof(struct class_device));
+ idev->cdev_list = dlist_new(sizeof(struct usbip_class_device));
if (!idev->cdev_list) {
- err("init new device");
+ dbg("dlist_new failed");
return -1;
}
if (idev->status != VDEV_ST_NULL && idev->status != VDEV_ST_NOTASSIGNED) {
idev = imported_device_init(idev, lbusid);
if (!idev) {
- err("init new device");
+ dbg("imported_device_init failed");
return -1;
}
}
@@ -118,29 +120,29 @@ static int parse_status(char *value)
static int check_usbip_device(struct sysfs_class_device *cdev)
{
- char clspath[SYSFS_PATH_MAX]; /* /sys/class/video4linux/video0/device */
- char devpath[SYSFS_PATH_MAX]; /* /sys/devices/platform/vhci_hcd/usb6/6-1:1.1 */
-
+ char class_path[SYSFS_PATH_MAX]; /* /sys/class/video4linux/video0/device */
+ char dev_path[SYSFS_PATH_MAX]; /* /sys/devices/platform/vhci_hcd/usb6/6-1:1.1 */
int ret;
+ struct usbip_class_device *usbip_cdev;
- snprintf(clspath, sizeof(clspath), "%s/device", cdev->path);
+ snprintf(class_path, sizeof(class_path), "%s/device", cdev->path);
- ret = sysfs_get_link(clspath, devpath, SYSFS_PATH_MAX);
- if (!ret) {
- if (!strncmp(devpath, vhci_driver->hc_device->path,
- strlen(vhci_driver->hc_device->path))) {
+ ret = sysfs_get_link(class_path, dev_path, sizeof(dev_path));
+ if (ret == 0) {
+ if (!strncmp(dev_path, vhci_driver->hc_device->path,
+ strlen(vhci_driver->hc_device->path))) {
/* found usbip device */
- struct class_device *cdev;
-
- cdev = calloc(1, sizeof(*cdev));
+ usbip_cdev = calloc(1, sizeof(*usbip_cdev));
if (!cdev) {
- err("calloc cdev");
+ dbg("calloc failed");
return -1;
}
- dlist_unshift(vhci_driver->cdev_list, (void*) cdev);
- strncpy(cdev->clspath, clspath, sizeof(cdev->clspath));
- strncpy(cdev->devpath, devpath, sizeof(cdev->clspath));
- dbg(" found %s %s", clspath, devpath);
+ dlist_unshift(vhci_driver->cdev_list, usbip_cdev);
+ strncpy(usbip_cdev->class_path, class_path,
+ sizeof(usbip_cdev->class_path));
+ strncpy(usbip_cdev->dev_path, dev_path,
+ sizeof(usbip_cdev->dev_path));
+ dbg("found: %s %s", class_path, dev_path);
}
}
@@ -157,11 +159,11 @@ static int search_class_for_usbip_device(char *cname)
class = sysfs_open_class(cname);
if (!class) {
- err("open class");
+ dbg("sysfs_open_class failed");
return -1;
}
- dbg("class %s", class->name);
+ dbg("class: %s", class->name);
cdev_list = sysfs_get_class_devices(class);
if (!cdev_list)
@@ -169,7 +171,7 @@ static int search_class_for_usbip_device(char *cname)
goto out;
dlist_for_each_data(cdev_list, cdev, struct sysfs_class_device) {
- dbg(" cdev %s", cdev->name);
+ dbg("cdev: %s", cdev->name);
ret = check_usbip_device(cdev);
if (ret < 0)
goto out;
@@ -187,11 +189,22 @@ static int refresh_class_device_list(void)
int ret;
struct dlist *cname_list;
char *cname;
+ char sysfs_mntpath[SYSFS_PATH_MAX];
+ char class_path[SYSFS_PATH_MAX];
+
+ ret = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
+ if (ret < 0) {
+ dbg("sysfs_get_mnt_path failed");
+ return -1;
+ }
+
+ snprintf(class_path, sizeof(class_path), "%s/%s", sysfs_mntpath,
+ SYSFS_CLASS_NAME);
/* search under /sys/class */
- cname_list = sysfs_open_directory_list("/sys/class");
+ cname_list = sysfs_open_directory_list(class_path);
if (!cname_list) {
- err("open class directory");
+ dbg("sysfs_open_directory failed");
return -1;
}
@@ -221,45 +234,42 @@ static int refresh_imported_device_list(void)
attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status");
if (!attr_status) {
- err("get attr %s of %s", "status", vhci_driver->hc_device->name);
+ dbg("sysfs_get_device_attr(\"status\") failed: %s",
+ vhci_driver->hc_device->name);
return -1;
}
- dbg("name %s, path %s, len %d, method %d\n", attr_status->name,
- attr_status->path, attr_status->len, attr_status->method);
-
- dbg("%s", attr_status->value);
+ dbg("name: %s path: %s len: %d method: %d value: %s",
+ attr_status->name, attr_status->path, attr_status->len,
+ attr_status->method, attr_status->value);
return parse_status(attr_status->value);
}
static int get_nports(void)
{
+ char *c;
int nports = 0;
struct sysfs_attribute *attr_status;
attr_status = sysfs_get_device_attr(vhci_driver->hc_device, "status");
if (!attr_status) {
- err("get attr %s of %s", "status", vhci_driver->hc_device->name);
+ dbg("sysfs_get_device_attr(\"status\") failed: %s",
+ vhci_driver->hc_device->name);
return -1;
}
- dbg("name %s, path %s, len %d, method %d\n", attr_status->name,
- attr_status->path, attr_status->len, attr_status->method);
-
- dbg("%s", attr_status->value);
+ dbg("name: %s path: %s len: %d method: %d value: %s",
+ attr_status->name, attr_status->path, attr_status->len,
+ attr_status->method, attr_status->value);
- {
- char *c;
-
- /* skip a header line */
- c = strchr(attr_status->value, '\n') + 1;
+ /* skip a header line */
+ c = strchr(attr_status->value, '\n') + 1;
- while (*c != '\0') {
- /* go to the next line */
- c = strchr(c, '\n') + 1;
- nports += 1;
- }
+ while (*c != '\0') {
+ /* go to the next line */
+ c = strchr(c, '\n') + 1;
+ nports += 1;
}
return nports;
@@ -275,20 +285,21 @@ static int get_hc_busid(char *sysfs_mntpath, char *hc_busid)
int found = 0;
- snprintf(sdriver_path, SYSFS_PATH_MAX, "%s/%s/platform/%s/%s",
- sysfs_mntpath, SYSFS_BUS_NAME, SYSFS_DRIVERS_NAME,
- vhci_driver_name);
+ snprintf(sdriver_path, SYSFS_PATH_MAX, "%s/%s/%s/%s/%s", sysfs_mntpath,
+ SYSFS_BUS_NAME, USBIP_VHCI_BUS_TYPE, SYSFS_DRIVERS_NAME,
+ USBIP_VHCI_DRV_NAME);
sdriver = sysfs_open_driver_path(sdriver_path);
if (!sdriver) {
- info("%s is not found", sdriver_path);
- info("load usbip-core.ko and vhci-hcd.ko !");
+ dbg("sysfs_open_driver_path failed: %s", sdriver_path);
+ dbg("make sure " USBIP_CORE_MOD_NAME ".ko and "
+ USBIP_VHCI_DRV_NAME ".ko are loaded!");
return -1;
}
hc_devs = sysfs_get_driver_devices(sdriver);
if (!hc_devs) {
- err("get hc list");
+ dbg("sysfs_get_driver failed");
goto err;
}
@@ -304,7 +315,7 @@ err:
if (found)
return 0;
- err("not found usbip hc");
+ dbg("%s not found", hc_busid);
return -1;
}
@@ -318,13 +329,13 @@ int usbip_vhci_driver_open(void)
vhci_driver = (struct usbip_vhci_driver *) calloc(1, sizeof(*vhci_driver));
if (!vhci_driver) {
- err("alloc vhci_driver");
+ dbg("calloc failed");
return -1;
}
ret = sysfs_get_mnt_path(vhci_driver->sysfs_mntpath, SYSFS_PATH_MAX);
if (ret < 0) {
- err("sysfs must be mounted");
+ dbg("sysfs_get_mnt_path failed");
goto err;
}
@@ -333,17 +344,18 @@ int usbip_vhci_driver_open(void)
goto err;
/* will be freed in usbip_driver_close() */
- vhci_driver->hc_device = sysfs_open_device("platform", hc_busid);
+ vhci_driver->hc_device = sysfs_open_device(USBIP_VHCI_BUS_TYPE,
+ hc_busid);
if (!vhci_driver->hc_device) {
- err("get sysfs vhci_driver");
+ dbg("sysfs_open_device failed");
goto err;
}
vhci_driver->nports = get_nports();
- info("%d ports available\n", vhci_driver->nports);
+ dbg("available ports: %d", vhci_driver->nports);
- vhci_driver->cdev_list = dlist_new(sizeof(struct class_device));
+ vhci_driver->cdev_list = dlist_new(sizeof(struct usbip_class_device));
if (!vhci_driver->cdev_list)
goto err;
@@ -402,7 +414,7 @@ int usbip_vhci_refresh_device_list(void)
dlist_destroy(vhci_driver->idev[i].cdev_list);
}
- vhci_driver->cdev_list = dlist_new(sizeof(struct class_device));
+ vhci_driver->cdev_list = dlist_new(sizeof(struct usbip_class_device));
if (!vhci_driver->cdev_list)
goto err;
@@ -422,7 +434,7 @@ err:
dlist_destroy(vhci_driver->idev[i].cdev_list);
}
- err("refresh device list");
+ dbg("failed to refresh device list");
return -1;
}
@@ -445,7 +457,8 @@ int usbip_vhci_attach_device2(uint8_t port, int sockfd, uint32_t devid,
attr_attach = sysfs_get_device_attr(vhci_driver->hc_device, "attach");
if (!attr_attach) {
- err("get attach");
+ dbg("sysfs_get_device_attr(\"attach\") failed: %s",
+ vhci_driver->hc_device->name);
return -1;
}
@@ -455,11 +468,11 @@ int usbip_vhci_attach_device2(uint8_t port, int sockfd, uint32_t devid,
ret = sysfs_write_attribute(attr_attach, buff, strlen(buff));
if (ret < 0) {
- err("write to attach failed");
+ dbg("sysfs_write_attribute failed");
return -1;
}
- info("port %d attached", port);
+ dbg("attached port: %d", port);
return 0;
}
@@ -486,21 +499,21 @@ int usbip_vhci_detach_device(uint8_t port)
attr_detach = sysfs_get_device_attr(vhci_driver->hc_device, "detach");
if (!attr_detach) {
- err("get detach");
+ dbg("sysfs_get_device_attr(\"detach\") failed: %s",
+ vhci_driver->hc_device->name);
return -1;
}
snprintf(buff, sizeof(buff), "%u", port);
- dbg("writing to detach");
dbg("writing: %s", buff);
ret = sysfs_write_attribute(attr_detach, buff, strlen(buff));
if (ret < 0) {
- err("write to detach failed");
+ dbg("sysfs_write_attribute failed");
return -1;
}
- info("port %d detached", port);
+ dbg("detached port: %d", port);
return 0;
}
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h b/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
index cad8ad7586d..89949aa7c31 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.h
@@ -2,18 +2,20 @@
* Copyright (C) 2005-2007 Takahiro Hirofuchi
*/
-#ifndef _VHCI_DRIVER_H
-#define _VHCI_DRIVER_H
-
-#include "usbip.h"
+#ifndef __VHCI_DRIVER_H
+#define __VHCI_DRIVER_H
+#include <sysfs/libsysfs.h>
+#include <stdint.h>
+#include "usbip_common.h"
+#define USBIP_VHCI_BUS_TYPE "platform"
#define MAXNPORT 128
-struct class_device {
- char clspath[SYSFS_PATH_MAX];
- char devpath[SYSFS_PATH_MAX];
+struct usbip_class_device {
+ char class_path[SYSFS_PATH_MAX];
+ char dev_path[SYSFS_PATH_MAX];
};
struct usbip_imported_device {
@@ -25,16 +27,19 @@ struct usbip_imported_device {
uint8_t busnum;
uint8_t devnum;
-
- struct dlist *cdev_list; /* list of class device */
- struct usb_device udev;
+ /* usbip_class_device list */
+ struct dlist *cdev_list;
+ struct usbip_usb_device udev;
};
struct usbip_vhci_driver {
char sysfs_mntpath[SYSFS_PATH_MAX];
- struct sysfs_device *hc_device; /* /sys/devices/platform/vhci_hcd */
- struct dlist *cdev_list; /* list of class device */
+ /* /sys/devices/platform/vhci_hcd */
+ struct sysfs_device *hc_device;
+
+ /* usbip_class_device list */
+ struct dlist *cdev_list;
int nports;
struct usbip_imported_device idev[MAXNPORT];
@@ -58,4 +63,5 @@ int usbip_vhci_attach_device(uint8_t port, int sockfd, uint8_t busnum,
uint8_t devnum, uint32_t speed);
int usbip_vhci_detach_device(uint8_t port);
-#endif
+
+#endif /* __VHCI_DRIVER_H */
diff --git a/drivers/staging/usbip/userspace/src/Makefile.am b/drivers/staging/usbip/userspace/src/Makefile.am
index 05a7aa50d42..3f09f6ad39f 100644
--- a/drivers/staging/usbip/userspace/src/Makefile.am
+++ b/drivers/staging/usbip/userspace/src/Makefile.am
@@ -1,10 +1,11 @@
-AM_CPPFLAGS := -I$(top_srcdir)/libsrc -DUSBIDS_FILE='"@USBIDS_DIR@/usb.ids"'
-AM_CFLAGS := @EXTRA_CFLAGS@ @PACKAGE_CFLAGS@
-LDADD := $(top_srcdir)/libsrc/libusbip.la @PACKAGE_LIBS@
+AM_CPPFLAGS = -I$(top_srcdir)/libsrc -DUSBIDS_FILE='"@USBIDS_DIR@/usb.ids"'
+AM_CFLAGS = @EXTRA_CFLAGS@ @PACKAGE_CFLAGS@
+LDADD = $(top_builddir)/libsrc/libusbip.la @PACKAGE_LIBS@
-sbin_PROGRAMS := usbip usbipd usbip_bind_driver
+sbin_PROGRAMS := usbip usbipd
-usbip_SOURCES := usbip.c usbip_network.c usbip_network.h
-usbipd_SOURCES := usbipd.c usbip_network.c usbip_network.h
-usbip_bind_driver_SOURCES := bind-driver.c utils.c utils.h \
- usbip_network.h usbip_network.c
+usbip_SOURCES := usbip.c utils.c usbip_network.c \
+ usbip_attach.c usbip_detach.c usbip_list.c \
+ usbip_bind.c usbip_unbind.c
+
+usbipd_SOURCES := usbipd.c usbip_network.c
diff --git a/drivers/staging/usbip/userspace/src/bind-driver.c b/drivers/staging/usbip/userspace/src/bind-driver.c
deleted file mode 100644
index 201ffbbee54..00000000000
--- a/drivers/staging/usbip/userspace/src/bind-driver.c
+++ /dev/null
@@ -1,643 +0,0 @@
-/*
- *
- * Copyright (C) 2005-2007 Takahiro Hirofuchi
- */
-
-#include "utils.h"
-
-#define _GNU_SOURCE
-#include <getopt.h>
-#include <glib.h>
-
-
-
-static const struct option longopts[] = {
- {"usbip", required_argument, NULL, 'u'},
- {"other", required_argument, NULL, 'o'},
- {"list", no_argument, NULL, 'l'},
- {"list2", no_argument, NULL, 'L'},
- {"help", no_argument, NULL, 'h'},
-#if 0
- {"allusbip", no_argument, NULL, 'a'},
- {"export-to", required_argument, NULL, 'e'},
- {"unexport", required_argument, NULL, 'x'},
- {"busid", required_argument, NULL, 'b'},
-#endif
-
- {NULL, 0, NULL, 0}
-};
-
-static const char match_busid_path[] = "/sys/bus/usb/drivers/usbip/match_busid";
-
-
-static void show_help(void)
-{
- printf("Usage: usbip_bind_driver [OPTION]\n");
- printf("Change driver binding for USB/IP.\n");
- printf(" --usbip busid make a device exportable\n");
- printf(" --other busid use a device by a local driver\n");
- printf(" --list print usb devices and their drivers\n");
- printf(" --list2 print usb devices and their drivers in parseable mode\n");
-#if 0
- printf(" --allusbip make all devices exportable\n");
- printf(" --export-to host export the device to 'host'\n");
- printf(" --unexport host unexport a device previously exported to 'host'\n");
- printf(" --busid busid the busid used for --export-to\n");
-#endif
-}
-
-static int modify_match_busid(char *busid, int add)
-{
- int fd;
- int ret;
- char buff[BUS_ID_SIZE + 4];
-
- /* BUS_IS_SIZE includes NULL termination? */
- if (strnlen(busid, BUS_ID_SIZE) > BUS_ID_SIZE - 1) {
- g_warning("too long busid");
- return -1;
- }
-
- fd = open(match_busid_path, O_WRONLY);
- if (fd < 0)
- return -1;
-
- if (add)
- snprintf(buff, BUS_ID_SIZE + 4, "add %s", busid);
- else
- snprintf(buff, BUS_ID_SIZE + 4, "del %s", busid);
-
- g_debug("write \"%s\" to %s", buff, match_busid_path);
-
- ret = write(fd, buff, sizeof(buff));
- if (ret < 0) {
- close(fd);
- return -1;
- }
-
- close(fd);
-
- return 0;
-}
-
-static const char unbind_path_format[] = "/sys/bus/usb/devices/%s/driver/unbind";
-
-/* buggy driver may cause dead lock */
-static int unbind_interface_busid(char *busid)
-{
- char unbind_path[PATH_MAX];
- int fd;
- int ret;
-
- snprintf(unbind_path, sizeof(unbind_path), unbind_path_format, busid);
-
- fd = open(unbind_path, O_WRONLY);
- if (fd < 0) {
- g_warning("opening unbind_path failed: %d", fd);
- return -1;
- }
-
- ret = write(fd, busid, strnlen(busid, BUS_ID_SIZE));
- if (ret < 0) {
- g_warning("write to unbind_path failed: %d", ret);
- close(fd);
- return -1;
- }
-
- close(fd);
-
- return 0;
-}
-
-static int unbind_interface(char *busid, int configvalue, int interface)
-{
- char inf_busid[BUS_ID_SIZE];
- g_debug("unbinding interface");
-
- snprintf(inf_busid, BUS_ID_SIZE, "%s:%d.%d", busid, configvalue, interface);
-
- return unbind_interface_busid(inf_busid);
-}
-
-
-static const char bind_path_format[] = "/sys/bus/usb/drivers/%s/bind";
-
-static int bind_interface_busid(char *busid, char *driver)
-{
- char bind_path[PATH_MAX];
- int fd;
- int ret;
-
- snprintf(bind_path, sizeof(bind_path), bind_path_format, driver);
-
- fd = open(bind_path, O_WRONLY);
- if (fd < 0)
- return -1;
-
- ret = write(fd, busid, strnlen(busid, BUS_ID_SIZE));
- if (ret < 0) {
- close(fd);
- return -1;
- }
-
- close(fd);
-
- return 0;
-}
-
-static int bind_interface(char *busid, int configvalue, int interface, char *driver)
-{
- char inf_busid[BUS_ID_SIZE];
-
- snprintf(inf_busid, BUS_ID_SIZE, "%s:%d.%d", busid, configvalue, interface);
-
- return bind_interface_busid(inf_busid, driver);
-}
-
-static int unbind(char *busid)
-{
- int configvalue = 0;
- int ninterface = 0;
- int devclass = 0;
- int i;
- int failed = 0;
-
- configvalue = read_bConfigurationValue(busid);
- ninterface = read_bNumInterfaces(busid);
- devclass = read_bDeviceClass(busid);
-
- if (configvalue < 0 || ninterface < 0 || devclass < 0) {
- g_warning("read config and ninf value, removed?");
- return -1;
- }
-
- if (devclass == 0x09) {
- g_message("skip unbinding of hub");
- return -1;
- }
-
- for (i = 0; i < ninterface; i++) {
- char driver[PATH_MAX];
- int ret;
-
- bzero(&driver, sizeof(driver));
-
- getdriver(busid, configvalue, i, driver, PATH_MAX-1);
-
- g_debug(" %s:%d.%d -> %s ", busid, configvalue, i, driver);
-
- if (!strncmp("none", driver, PATH_MAX))
- continue; /* unbound interface */
-
-#if 0
- if (!strncmp("usbip", driver, PATH_MAX))
- continue; /* already bound to usbip */
-#endif
-
- /* unbinding */
- ret = unbind_interface(busid, configvalue, i);
- if (ret < 0) {
- g_warning("unbind driver at %s:%d.%d failed",
- busid, configvalue, i);
- failed = 1;
- }
- }
-
- if (failed)
- return -1;
- else
- return 0;
-}
-
-/* call at unbound state */
-static int bind_to_usbip(char *busid)
-{
- int configvalue = 0;
- int ninterface = 0;
- int i;
- int failed = 0;
-
- configvalue = read_bConfigurationValue(busid);
- ninterface = read_bNumInterfaces(busid);
-
- if (configvalue < 0 || ninterface < 0) {
- g_warning("read config and ninf value, removed?");
- return -1;
- }
-
- for (i = 0; i < ninterface; i++) {
- int ret;
-
- ret = bind_interface(busid, configvalue, i, "usbip");
- if (ret < 0) {
- g_warning("bind usbip at %s:%d.%d, failed",
- busid, configvalue, i);
- failed = 1;
- /* need to contine binding at other interfaces */
- }
- }
-
- if (failed)
- return -1;
- else
- return 0;
-}
-
-
-static int use_device_by_usbip(char *busid)
-{
- int ret;
-
- ret = unbind(busid);
- if (ret < 0) {
- g_warning("unbind drivers of %s, failed", busid);
- return -1;
- }
-
- ret = modify_match_busid(busid, 1);
- if (ret < 0) {
- g_warning("add %s to match_busid, failed", busid);
- return -1;
- }
-
- ret = bind_to_usbip(busid);
- if (ret < 0) {
- g_warning("bind usbip to %s, failed", busid);
- modify_match_busid(busid, 0);
- return -1;
- }
-
- g_message("bind %s to usbip, complete!", busid);
-
- return 0;
-}
-
-
-
-static int use_device_by_other(char *busid)
-{
- int ret;
- int config;
-
- /* read and write the same config value to kick probing */
- config = read_bConfigurationValue(busid);
- if (config < 0) {
- g_warning("read bConfigurationValue of %s, failed", busid);
- return -1;
- }
-
- ret = modify_match_busid(busid, 0);
- if (ret < 0) {
- g_warning("del %s to match_busid, failed", busid);
- return -1;
- }
-
- ret = write_bConfigurationValue(busid, config);
- if (ret < 0) {
- g_warning("read bConfigurationValue of %s, failed", busid);
- return -1;
- }
-
- g_message("bind %s to other drivers than usbip, complete!", busid);
-
- return 0;
-}
-
-
-#include <sys/types.h>
-#include <regex.h>
-
-#include <errno.h>
-#include <string.h>
-#include <stdio.h>
-
-
-
-static int is_usb_device(char *busid)
-{
- int ret;
-
- regex_t regex;
- regmatch_t pmatch[1];
-
- ret = regcomp(&regex, "^[0-9]+-[0-9]+(\\.[0-9]+)*$", REG_NOSUB|REG_EXTENDED);
- if (ret < 0)
- g_error("regcomp: %s\n", strerror(errno));
-
- ret = regexec(&regex, busid, 0, pmatch, 0);
- if (ret)
- return 0; /* not matched */
-
- return 1;
-}
-
-
-#include <dirent.h>
-static int show_devices(void)
-{
- DIR *dir;
-
- dir = opendir("/sys/bus/usb/devices/");
- if (!dir)
- g_error("opendir: %s", strerror(errno));
-
- printf("List USB devices\n");
- for (;;) {
- struct dirent *dirent;
- char *busid;
-
- dirent = readdir(dir);
- if (!dirent)
- break;
-
- busid = dirent->d_name;
-
- if (is_usb_device(busid)) {
- char name[100] = {'\0'};
- char driver[100] = {'\0'};
- int conf, ninf = 0;
- int i;
-
- conf = read_bConfigurationValue(busid);
- ninf = read_bNumInterfaces(busid);
-
- getdevicename(busid, name, sizeof(name));
-
- printf(" - busid %s (%s)\n", busid, name);
-
- for (i = 0; i < ninf; i++) {
- getdriver(busid, conf, i, driver, sizeof(driver));
- printf(" %s:%d.%d -> %s\n", busid, conf, i, driver);
- }
- printf("\n");
- }
- }
-
- closedir(dir);
-
- return 0;
-}
-
-static int show_devices2(void)
-{
- DIR *dir;
-
- dir = opendir("/sys/bus/usb/devices/");
- if (!dir)
- g_error("opendir: %s", strerror(errno));
-
- for (;;) {
- struct dirent *dirent;
- char *busid;
-
- dirent = readdir(dir);
- if (!dirent)
- break;
-
- busid = dirent->d_name;
-
- if (is_usb_device(busid)) {
- char name[100] = {'\0'};
- char driver[100] = {'\0'};
- int conf, ninf = 0;
- int i;
-
- conf = read_bConfigurationValue(busid);
- ninf = read_bNumInterfaces(busid);
-
- getdevicename(busid, name, sizeof(name));
-
- printf("busid=%s#usbid=%s#", busid, name);
-
- for (i = 0; i < ninf; i++) {
- getdriver(busid, conf, i, driver, sizeof(driver));
- printf("%s:%d.%d=%s#", busid, conf, i, driver);
- }
- printf("\n");
- }
- }
-
- closedir(dir);
-
- return 0;
-}
-
-
-#if 0
-static int export_to(char *host, char *busid) {
-
- int ret;
-
- if( host == NULL ) {
- printf( "no host given\n\n");
- show_help();
- return -1;
- }
- if( busid == NULL ) {
- /* XXX print device list and ask for busnumber, if none is
- * given */
- printf( "no busid given, use --busid switch\n\n");
- show_help();
- return -1;
- }
-
-
- ret = use_device_by_usbip(busid);
- if( ret != 0 ) {
- printf( "could not bind driver to usbip\n");
- return -1;
- }
-
- printf( "DEBUG: exporting device '%s' to '%s'\n", busid, host );
- ret = export_busid_to_host(host, busid); /* usbip_export.[ch] */
- if( ret != 0 ) {
- printf( "could not export device to host\n" );
- printf( " host: %s, device: %s\n", host, busid );
- use_device_by_other(busid);
- return -1;
- }
-
- return 0;
-}
-
-static int unexport_from(char *host, char *busid) {
-
- int ret;
-
- if (!host || !busid)
- g_error("no host or no busid\n");
-
- g_message("unexport_from: host: '%s', busid: '%s'", host, busid);
-
- ret = unexport_busid_from_host(host, busid); /* usbip_export.[ch] */
- if( ret != 0 ) {
- err( "could not unexport device from host\n" );
- err( " host: %s, device: %s\n", host, busid );
- }
-
- ret = use_device_by_other(busid);
- if (ret < 0)
- g_error("could not unbind device from usbip\n");
-
- return 0;
-}
-
-
-static int allusbip(void)
-{
- DIR *dir;
-
- dir = opendir("/sys/bus/usb/devices/");
- if (!dir)
- g_error("opendir: %s", strerror(errno));
-
- for (;;) {
- struct dirent *dirent;
- char *busid;
-
- dirent = readdir(dir);
- if (!dirent)
- break;
-
- busid = dirent->d_name;
-
- if (!is_usb_device(busid))
- continue;
-
- {
- char name[PATH_MAX];
- int conf, ninf = 0;
- int i;
- int be_local = 0;
-
- conf = read_bConfigurationValue(busid);
- ninf = read_bNumInterfaces(busid);
-
- getdevicename(busid, name, sizeof(name));
-
- for (i = 0; i < ninf; i++) {
- char driver[PATH_MAX];
-
- getdriver(busid, conf, i, driver, sizeof(driver));
-#if 0
- if (strncmp(driver, "usbhid", 6) == 0 || strncmp(driver, "usb-storage", 11) == 0) {
- be_local = 1;
- break;
- }
-#endif
- }
-
- if (be_local == 0)
- use_device_by_usbip(busid);
- }
- }
-
- closedir(dir);
-
- return 0;
-}
-#endif
-
-int main(int argc, char **argv)
-{
- char *busid = NULL;
- char *remote_host __attribute__((unused)) = NULL;
-
- enum {
- cmd_unknown = 0,
- cmd_use_by_usbip,
- cmd_use_by_other,
- cmd_list,
- cmd_list2,
- cmd_allusbip,
- cmd_export_to,
- cmd_unexport,
- cmd_help,
- } cmd = cmd_unknown;
-
- if (geteuid() != 0)
- g_warning("running non-root?");
-
- for (;;) {
- int c;
- int index = 0;
-
- c = getopt_long(argc, argv, "u:o:hlLae:x:b:", longopts, &index);
- if (c == -1)
- break;
-
- switch (c) {
- case 'u':
- cmd = cmd_use_by_usbip;
- busid = optarg;
- break;
- case 'o' :
- cmd = cmd_use_by_other;
- busid = optarg;
- break;
- case 'l' :
- cmd = cmd_list;
- break;
- case 'L' :
- cmd = cmd_list2;
- break;
- case 'a' :
- cmd = cmd_allusbip;
- break;
- case 'b':
- busid = optarg;
- break;
- case 'e':
- cmd = cmd_export_to;
- remote_host = optarg;
- break;
- case 'x':
- cmd = cmd_unexport;
- remote_host = optarg;
- break;
- case 'h': /* fallthrough */
- case '?':
- cmd = cmd_help;
- break;
- default:
- g_error("getopt");
- }
-
- //if (cmd)
- // break;
- }
-
- switch (cmd) {
- case cmd_use_by_usbip:
- use_device_by_usbip(busid);
- break;
- case cmd_use_by_other:
- use_device_by_other(busid);
- break;
- case cmd_list:
- show_devices();
- break;
- case cmd_list2:
- show_devices2();
- break;
-#if 0
- case cmd_allusbip:
- allusbip();
- break;
- case cmd_export_to:
- export_to(remote_host, busid);
- break;
- case cmd_unexport:
- unexport_from(remote_host, busid);
- break;
-#endif
- case cmd_help: /* fallthrough */
- case cmd_unknown:
- show_help();
- break;
- default:
- g_error("NOT REACHED");
- }
-
- return 0;
-}
diff --git a/drivers/staging/usbip/userspace/src/usbip.c b/drivers/staging/usbip/userspace/src/usbip.c
index 01a562866b5..fff4b768e70 100644
--- a/drivers/staging/usbip/userspace/src/usbip.c
+++ b/drivers/staging/usbip/userspace/src/usbip.c
@@ -1,723 +1,190 @@
/*
+ * command structure borrowed from udev
+ * (git://git.kernel.org/pub/scm/linux/hotplug/udev.git)
*
- * Copyright (C) 2005-2007 Takahiro Hirofuchi
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifdef HAVE_CONFIG_H
-#include "../config.h"
-#endif
-
-#include "usbip.h"
-#include "usbip_network.h"
-#include <ctype.h>
-#include <sys/types.h>
-#include <sys/stat.h>
+#include <stdio.h>
#include <stdlib.h>
-#include <fcntl.h>
-#include <glib.h>
-
-static const char version[] = PACKAGE_STRING;
-
-
-/* /sys/devices/platform/vhci_hcd/usb6/6-1/6-1:1.1 -> 1 */
-static int get_interface_number(char *path)
-{
- char *c;
-
- c = strstr(path, vhci_driver->hc_device->bus_id);
- if (!c)
- return -1; /* hc exist? */
- c++;
- /* -> usb6/6-1/6-1:1.1 */
-
- c = strchr(c, '/');
- if (!c)
- return -1; /* hc exist? */
- c++;
- /* -> 6-1/6-1:1.1 */
-
- c = strchr(c, '/');
- if (!c)
- return -1; /* no interface path */
- c++;
- /* -> 6-1:1.1 */
-
- c = strchr(c, ':');
- if (!c)
- return -1; /* no configuration? */
- c++;
- /* -> 1.1 */
-
- c = strchr(c, '.');
- if (!c)
- return -1; /* no interface? */
- c++;
- /* -> 1 */
-
-
- return atoi(c);
-}
-
-
-static struct sysfs_device *open_usb_interface(struct usb_device *udev, int i)
-{
- struct sysfs_device *suinf;
- char busid[SYSFS_BUS_ID_SIZE];
-
- snprintf(busid, SYSFS_BUS_ID_SIZE, "%s:%d.%d",
- udev->busid, udev->bConfigurationValue, i);
-
- suinf = sysfs_open_device("usb", busid);
- if (!suinf)
- err("sysfs_open_device %s", busid);
-
- return suinf;
-}
-
-
-#define MAX_BUFF 100
-static int record_connection(char *host, char *port, char *busid, int rhport)
-{
- int fd;
- char path[PATH_MAX+1];
- char buff[MAX_BUFF+1];
- int ret;
-
- mkdir(VHCI_STATE_PATH, 0700);
- snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport);
+#include <getopt.h>
+#include <syslog.h>
- fd = open(path, O_WRONLY|O_CREAT|O_TRUNC, S_IRWXU);
- if (fd < 0)
- return -1;
+#include "usbip_common.h"
+#include "usbip.h"
- snprintf(buff, MAX_BUFF, "%s %s %s\n",
- host, port, busid);
+static int usbip_help(int argc, char *argv[]);
+static int usbip_version(int argc, char *argv[]);
- ret = write(fd, buff, strlen(buff));
- if (ret != (ssize_t) strlen(buff)) {
- close(fd);
- return -1;
- }
+static const char usbip_version_string[] = PACKAGE_STRING;
- close(fd);
+static const char usbip_usage_string[] =
+ "usbip [--debug] [--log] [version]\n"
+ " [help] <command> <args>\n";
- return 0;
-}
-
-static int read_record(int rhport, char *host, char *port, char *busid)
+static void usbip_usage(void)
{
- FILE *file;
- char path[PATH_MAX+1];
-
- snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport);
-
- file = fopen(path, "r");
- if (!file) {
- err("fopen");
- return -1;
- }
-
- if (fscanf(file, "%s %s %s\n", host, port, busid) != 3) {
- err("fscanf");
- fclose(file);
- return -1;
- }
-
- fclose(file);
-
- return 0;
+ printf("usage: %s", usbip_usage_string);
}
+struct command {
+ const char *name;
+ int (*fn)(int argc, char *argv[]);
+ const char *help;
+ void (*usage)(void);
+};
-int usbip_vhci_imported_device_dump(struct usbip_imported_device *idev)
-{
- char product_name[100];
- char host[NI_MAXHOST] = "unknown host";
- char serv[NI_MAXSERV] = "unknown port";
- char remote_busid[SYSFS_BUS_ID_SIZE];
- int ret;
-
- if (idev->status == VDEV_ST_NULL || idev->status == VDEV_ST_NOTASSIGNED) {
- info("Port %02d: <%s>", idev->port, usbip_status_string(idev->status));
- return 0;
- }
-
- ret = read_record(idev->port, host, serv, remote_busid);
- if (ret) {
- err("read_record");
- return -1;
- }
-
- info("Port %02d: <%s> at %s", idev->port,
- usbip_status_string(idev->status), usbip_speed_string(idev->udev.speed));
-
- usbip_names_get_product(product_name, sizeof(product_name),
- idev->udev.idVendor, idev->udev.idProduct);
-
- info(" %s", product_name);
-
- info("%10s -> usbip://%s:%s/%s (remote devid %08x (bus/dev %03d/%03d))",
- idev->udev.busid, host, serv, remote_busid,
- idev->devid,
- idev->busnum, idev->devnum);
-
- for (int i=0; i < idev->udev.bNumInterfaces; i++) {
- /* show interface information */
- struct sysfs_device *suinf;
-
- suinf = open_usb_interface(&idev->udev, i);
- if (!suinf)
- continue;
-
- info(" %6s used by %-17s", suinf->bus_id, suinf->driver_name);
- sysfs_close_device(suinf);
-
- /* show class device information */
- struct class_device *cdev;
-
- dlist_for_each_data(idev->cdev_list, cdev, struct class_device) {
- int ifnum = get_interface_number(cdev->devpath);
- if (ifnum == i) {
- info(" %s", cdev->clspath);
- }
- }
- }
-
- return 0;
-}
-
-
-
+static const struct command cmds[] = {
+ {
+ .name = "help",
+ .fn = usbip_help,
+ .help = NULL,
+ .usage = NULL
+ },
+ {
+ .name = "version",
+ .fn = usbip_version,
+ .help = NULL,
+ .usage = NULL
+ },
+ {
+ .name = "attach",
+ .fn = usbip_attach,
+ .help = "Attach a remote USB device",
+ .usage = usbip_attach_usage
+ },
+ {
+ .name = "detach",
+ .fn = usbip_detach,
+ .help = "Detach a remote USB device",
+ .usage = usbip_detach_usage
+ },
+ {
+ .name = "list",
+ .fn = usbip_list,
+ .help = "List exportable or local USB devices",
+ .usage = usbip_list_usage
+ },
+ {
+ .name = "bind",
+ .fn = usbip_bind,
+ .help = "Bind device to " USBIP_HOST_DRV_NAME ".ko",
+ .usage = usbip_bind_usage
+ },
+ {
+ .name = "unbind",
+ .fn = usbip_unbind,
+ .help = "Unbind device from " USBIP_HOST_DRV_NAME ".ko",
+ .usage = usbip_unbind_usage
+ },
+ { NULL, NULL, NULL, NULL }
+};
-static int query_exported_devices(int sockfd)
+static int usbip_help(int argc, char *argv[])
{
- int ret;
- struct op_devlist_reply rep;
- uint16_t code = OP_REP_DEVLIST;
-
- bzero(&rep, sizeof(rep));
-
- ret = usbip_send_op_common(sockfd, OP_REQ_DEVLIST, 0);
- if (ret < 0) {
- err("send op_common");
- return -1;
- }
-
- ret = usbip_recv_op_common(sockfd, &code);
- if (ret < 0) {
- err("recv op_common");
- return -1;
- }
-
- ret = usbip_recv(sockfd, (void *) &rep, sizeof(rep));
- if (ret < 0) {
- err("recv op_devlist");
- return -1;
- }
-
- PACK_OP_DEVLIST_REPLY(0, &rep);
- dbg("exportable %d devices", rep.ndev);
-
- for (unsigned int i=0; i < rep.ndev; i++) {
- char product_name[100];
- char class_name[100];
- struct usb_device udev;
-
- bzero(&udev, sizeof(udev));
-
- ret = usbip_recv(sockfd, (void *) &udev, sizeof(udev));
- if (ret < 0) {
- err("recv usb_device[%d]", i);
- return -1;
- }
- pack_usb_device(0, &udev);
-
- usbip_names_get_product(product_name, sizeof(product_name),
- udev.idVendor, udev.idProduct);
- usbip_names_get_class(class_name, sizeof(class_name), udev.bDeviceClass,
- udev.bDeviceSubClass, udev.bDeviceProtocol);
-
- info("%8s: %s", udev.busid, product_name);
- info("%8s: %s", " ", udev.path);
- info("%8s: %s", " ", class_name);
-
- for (int j=0; j < udev.bNumInterfaces; j++) {
- struct usb_interface uinf;
+ const struct command *cmd;
+ int i;
+ int ret = 0;
- ret = usbip_recv(sockfd, (void *) &uinf, sizeof(uinf));
- if (ret < 0) {
- err("recv usb_interface[%d]", j);
- return -1;
+ if (argc > 1 && argv++) {
+ for (i = 0; cmds[i].name != NULL; i++)
+ if (!strcmp(cmds[i].name, argv[0]) && cmds[i].usage) {
+ cmds[i].usage();
+ goto done;
}
-
- pack_usb_interface(0, &uinf);
- usbip_names_get_class(class_name, sizeof(class_name), uinf.bInterfaceClass,
- uinf.bInterfaceSubClass, uinf.bInterfaceProtocol);
-
- info("%8s: %2d - %s", " ", j, class_name);
- }
-
- info(" ");
- }
-
- return rep.ndev;
-}
-
-static int import_device(int sockfd, struct usb_device *udev)
-{
- int ret;
- int port;
-
- ret = usbip_vhci_driver_open();
- if (ret < 0) {
- err("open vhci_driver");
- return -1;
- }
-
- port = usbip_vhci_get_free_port();
- if (port < 0) {
- err("no free port");
- usbip_vhci_driver_close();
- return -1;
- }
-
- ret = usbip_vhci_attach_device(port, sockfd, udev->busnum,
- udev->devnum, udev->speed);
- if (ret < 0) {
- err("import device");
- usbip_vhci_driver_close();
- return -1;
+ ret = -1;
}
- usbip_vhci_driver_close();
-
- return port;
-}
-
-
-static int query_import_device(int sockfd, char *busid)
-{
- int ret;
- struct op_import_request request;
- struct op_import_reply reply;
- uint16_t code = OP_REP_IMPORT;
-
- bzero(&request, sizeof(request));
- bzero(&reply, sizeof(reply));
-
-
- /* send a request */
- ret = usbip_send_op_common(sockfd, OP_REQ_IMPORT, 0);
- if (ret < 0) {
- err("send op_common");
- return -1;
- }
-
- strncpy(request.busid, busid, SYSFS_BUS_ID_SIZE-1);
-
- PACK_OP_IMPORT_REQUEST(0, &request);
-
- ret = usbip_send(sockfd, (void *) &request, sizeof(request));
- if (ret < 0) {
- err("send op_import_request");
- return -1;
- }
-
-
- /* recieve a reply */
- ret = usbip_recv_op_common(sockfd, &code);
- if (ret < 0) {
- err("recv op_common");
- return -1;
- }
-
- ret = usbip_recv(sockfd, (void *) &reply, sizeof(reply));
- if (ret < 0) {
- err("recv op_import_reply");
- return -1;
- }
-
- PACK_OP_IMPORT_REPLY(0, &reply);
-
-
- /* check the reply */
- if (strncmp(reply.udev.busid, busid, SYSFS_BUS_ID_SIZE)) {
- err("recv different busid %s", reply.udev.busid);
- return -1;
- }
-
-
- /* import a device */
- return import_device(sockfd, &reply.udev);
-}
-
-static int attach_device(char *host, char *busid)
-{
- int sockfd;
- int ret;
- int rhport;
-
- sockfd = tcp_connect(host, USBIP_PORT_STRING);
- if (sockfd < 0) {
- err("tcp connect");
- return -1;
- }
-
- rhport = query_import_device(sockfd, busid);
- if (rhport < 0) {
- err("query");
- return -1;
- }
-
- close(sockfd);
-
- ret = record_connection(host, USBIP_PORT_STRING,
- busid, rhport);
- if (ret < 0) {
- err("record connection");
- return -1;
- }
-
- return 0;
-}
-
-static int detach_port(char *port)
-{
- int ret;
- uint8_t portnum;
-
- for (unsigned int i=0; i < strlen(port); i++)
- if (!isdigit(port[i])) {
- err("invalid port %s", port);
- return -1;
- }
-
- /* check max port */
-
- portnum = atoi(port);
-
- ret = usbip_vhci_driver_open();
- if (ret < 0) {
- err("open vhci_driver");
- return -1;
- }
-
- ret = usbip_vhci_detach_device(portnum);
- if (ret < 0)
- return -1;
-
- usbip_vhci_driver_close();
-
+ usbip_usage();
+ printf("\n");
+ for (cmd = cmds; cmd->name != NULL; cmd++)
+ if (cmd->help != NULL)
+ printf(" %-10s %s\n", cmd->name, cmd->help);
+ printf("\n");
+done:
return ret;
}
-static int show_exported_devices(char *host)
-{
- int ret;
- int sockfd;
-
- sockfd = tcp_connect(host, USBIP_PORT_STRING);
- if (sockfd < 0) {
- err("- %s failed", host);
- return -1;
- }
-
- info("- %s", host);
-
- ret = query_exported_devices(sockfd);
- if (ret < 0) {
- err("query");
- return -1;
- }
-
- close(sockfd);
- return 0;
-}
-
-static int attach_exported_devices(char *host, int sockfd)
-{
- int ret;
- struct op_devlist_reply rep;
- uint16_t code = OP_REP_DEVLIST;
-
- bzero(&rep, sizeof(rep));
-
- ret = usbip_send_op_common(sockfd, OP_REQ_DEVLIST, 0);
- if(ret < 0) {
- err("send op_common");
- return -1;
- }
-
- ret = usbip_recv_op_common(sockfd, &code);
- if(ret < 0) {
- err("recv op_common");
- return -1;
- }
-
- ret = usbip_recv(sockfd, (void *) &rep, sizeof(rep));
- if(ret < 0) {
- err("recv op_devlist");
- return -1;
- }
-
- PACK_OP_DEVLIST_REPLY(0, &rep);
- dbg("exportable %d devices", rep.ndev);
-
- for(unsigned int i=0; i < rep.ndev; i++) {
- char product_name[100];
- char class_name[100];
- struct usb_device udev;
-
- bzero(&udev, sizeof(udev));
-
- ret = usbip_recv(sockfd, (void *) &udev, sizeof(udev));
- if(ret < 0) {
- err("recv usb_device[%d]", i);
- return -1;
- }
- pack_usb_device(0, &udev);
-
- usbip_names_get_product(product_name, sizeof(product_name),
- udev.idVendor, udev.idProduct);
- usbip_names_get_class(class_name, sizeof(class_name), udev.bDeviceClass,
- udev.bDeviceSubClass, udev.bDeviceProtocol);
-
- dbg("Attaching usb port %s from host %s on usbip, with deviceid: %s", udev.busid, host, product_name);
-
- for (int j=0; j < udev.bNumInterfaces; j++) {
- struct usb_interface uinf;
-
- ret = usbip_recv(sockfd, (void *) &uinf, sizeof(uinf));
- if (ret < 0) {
- err("recv usb_interface[%d]", j);
- return -1;
- }
-
- pack_usb_interface(0, &uinf);
- usbip_names_get_class(class_name, sizeof(class_name), uinf.bInterfaceClass,
- uinf.bInterfaceSubClass, uinf.bInterfaceProtocol);
-
- dbg("interface %2d - %s", j, class_name);
- }
-
- attach_device(host, udev.busid);
- }
-
- return rep.ndev;
-}
-
-static int attach_devices_all(char *host)
+static int usbip_version(int argc, char *argv[])
{
- int ret;
- int sockfd;
+ (void) argc;
+ (void) argv;
- sockfd = tcp_connect(host, USBIP_PORT_STRING);
- if(sockfd < 0) {
- err("- %s failed", host);
- return -1;
- }
-
- info("- %s", host);
-
- ret = attach_exported_devices(host, sockfd);
- if(ret < 0) {
- err("query");
- return -1;
- }
-
- close(sockfd);
+ printf(PROGNAME " (%s)\n", usbip_version_string);
return 0;
}
-
-const char help_message[] = "\
-Usage: usbip [options] \n\
- -a, --attach [host] [bus_id] \n\
- Attach a remote USB device. \n\
- \n\
- -x, --attachall [host] \n\
- Attach all remote USB devices on the specific host. \n\
- \n\
- -d, --detach [ports] \n\
- Detach an imported USB device. \n\
- \n\
- -l, --list [hosts] \n\
- List exported USB devices. \n\
- \n\
- -p, --port \n\
- List virtual USB port status. \n\
- \n\
- -D, --debug \n\
- Print debugging information. \n\
- \n\
- -v, --version \n\
- Show version. \n\
- \n\
- -h, --help \n\
- Print this help. \n";
-
-static void show_help(void)
-{
- printf("%s", help_message);
-}
-
-static int show_port_status(void)
+static int run_command(const struct command *cmd, int argc, char *argv[])
{
- int ret;
- struct usbip_imported_device *idev;
-
- ret = usbip_vhci_driver_open();
- if (ret < 0)
- return ret;
-
- for (int i = 0; i < vhci_driver->nports; i++) {
- idev = &vhci_driver->idev[i];
-
- if (usbip_vhci_imported_device_dump(idev) < 0)
- ret = -1;
- }
-
- usbip_vhci_driver_close();
-
- return ret;
+ dbg("running command: `%s'", cmd->name);
+ return cmd->fn(argc, argv);
}
-#define _GNU_SOURCE
-#include <getopt.h>
-static const struct option longopts[] = {
- {"attach", no_argument, NULL, 'a'},
- {"attachall", no_argument, NULL, 'x'},
- {"detach", no_argument, NULL, 'd'},
- {"port", no_argument, NULL, 'p'},
- {"list", no_argument, NULL, 'l'},
- {"version", no_argument, NULL, 'v'},
- {"help", no_argument, NULL, 'h'},
- {"debug", no_argument, NULL, 'D'},
- {"syslog", no_argument, NULL, 'S'},
- {NULL, 0, NULL, 0}
-};
-
int main(int argc, char *argv[])
{
- int ret;
+ static const struct option opts[] = {
+ { "debug", no_argument, NULL, 'd' },
+ { "log", no_argument, NULL, 'l' },
+ { NULL, 0, NULL, 0 }
+ };
- enum {
- cmd_attach = 1,
- cmd_attachall,
- cmd_detach,
- cmd_port,
- cmd_list,
- cmd_help,
- cmd_version
- } cmd = 0;
+ char *cmd;
+ int opt;
+ int i, rc = -1;
usbip_use_stderr = 1;
-
- if (geteuid() != 0)
- g_warning("running non-root?");
-
- ret = usbip_names_init(USBIDS_FILE);
- if (ret)
- notice("failed to open %s", USBIDS_FILE);
-
+ opterr = 0;
for (;;) {
- int c;
- int index = 0;
-
- c = getopt_long(argc, argv, "adplvhDSx", longopts, &index);
+ opt = getopt_long(argc, argv, "+d", opts, NULL);
- if (c == -1)
+ if (opt == -1)
break;
- switch(c) {
- case 'a':
- if (!cmd)
- cmd = cmd_attach;
- else
- cmd = cmd_help;
- break;
- case 'd':
- if (!cmd)
- cmd = cmd_detach;
- else
- cmd = cmd_help;
- break;
- case 'p':
- if (!cmd)
- cmd = cmd_port;
- else cmd = cmd_help;
- break;
- case 'l':
- if (!cmd)
- cmd = cmd_list;
- else
- cmd = cmd_help;
- break;
- case 'v':
- if (!cmd)
- cmd = cmd_version;
- else
- cmd = cmd_help;
- break;
- case 'x':
- if(!cmd)
- cmd = cmd_attachall;
- else
- cmd = cmd_help;
- break;
- case 'h':
- cmd = cmd_help;
- break;
- case 'D':
- usbip_use_debug = 1;
- break;
- case 'S':
- usbip_use_syslog = 1;
- break;
- case '?':
- break;
-
- default:
- err("getopt");
- }
- }
-
- ret = 0;
- switch(cmd) {
- case cmd_attach:
- if (optind == argc - 2)
- ret = attach_device(argv[optind], argv[optind+1]);
- else
- show_help();
- break;
- case cmd_detach:
- while (optind < argc)
- ret = detach_port(argv[optind++]);
- break;
- case cmd_port:
- ret = show_port_status();
- break;
- case cmd_list:
- while (optind < argc)
- ret = show_exported_devices(argv[optind++]);
+ switch (opt) {
+ case 'd':
+ usbip_use_debug = 1;
break;
- case cmd_attachall:
- while(optind < argc)
- ret = attach_devices_all(argv[optind++]);
- break;
- case cmd_version:
- printf("%s\n", version);
- break;
- case cmd_help:
- show_help();
+ case 'l':
+ usbip_use_syslog = 1;
+ openlog("", LOG_PID, LOG_USER);
break;
+ case '?':
+ printf("usbip: invalid option\n");
default:
- show_help();
+ usbip_usage();
+ goto out;
+ }
}
+ cmd = argv[optind];
+ if (cmd) {
+ for (i = 0; cmds[i].name != NULL; i++)
+ if (!strcmp(cmds[i].name, cmd)) {
+ argc -= optind;
+ argv += optind;
+ optind = 0;
+ rc = run_command(&cmds[i], argc, argv);
+ goto out;
+ }
+ }
- usbip_names_free();
-
- exit((ret == 0) ? EXIT_SUCCESS : EXIT_FAILURE);
+ /* invalid command */
+ usbip_help(0, NULL);
+out:
+ return (rc > -1 ? EXIT_SUCCESS : EXIT_FAILURE);
}
diff --git a/drivers/staging/usbip/userspace/src/usbip.h b/drivers/staging/usbip/userspace/src/usbip.h
new file mode 100644
index 00000000000..14d4a475b68
--- /dev/null
+++ b/drivers/staging/usbip/userspace/src/usbip.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __USBIP_H
+#define __USBIP_H
+
+#ifdef HAVE_CONFIG_H
+#include "../config.h"
+#endif
+
+/* usbip commands */
+int usbip_attach(int argc, char *argv[]);
+int usbip_detach(int argc, char *argv[]);
+int usbip_list(int argc, char *argv[]);
+int usbip_bind(int argc, char *argv[]);
+int usbip_unbind(int argc, char *argv[]);
+
+void usbip_attach_usage(void);
+void usbip_detach_usage(void);
+void usbip_list_usage(void);
+void usbip_bind_usage(void);
+void usbip_unbind_usage(void);
+
+#endif /* __USBIP_H */
diff --git a/drivers/staging/usbip/userspace/src/usbip_attach.c b/drivers/staging/usbip/userspace/src/usbip_attach.c
new file mode 100644
index 00000000000..b7885a20275
--- /dev/null
+++ b/drivers/staging/usbip/userspace/src/usbip_attach.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <sys/stat.h>
+#include <sysfs/libsysfs.h>
+
+#include <limits.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <fcntl.h>
+#include <getopt.h>
+#include <unistd.h>
+
+#include "vhci_driver.h"
+#include "usbip_common.h"
+#include "usbip_network.h"
+#include "usbip.h"
+
+static const char usbip_attach_usage_string[] =
+ "usbip attach <args>\n"
+ " -h, --host=<host> The machine with exported USB devices\n"
+ " -b, --busid=<busid> Busid of the device on <host>\n";
+
+void usbip_attach_usage(void)
+{
+ printf("usage: %s", usbip_attach_usage_string);
+}
+
+#define MAX_BUFF 100
+static int record_connection(char *host, char *port, char *busid, int rhport)
+{
+ int fd;
+ char path[PATH_MAX+1];
+ char buff[MAX_BUFF+1];
+ int ret;
+
+ mkdir(VHCI_STATE_PATH, 0700);
+
+ snprintf(path, PATH_MAX, VHCI_STATE_PATH"/port%d", rhport);
+
+ fd = open(path, O_WRONLY|O_CREAT|O_TRUNC, S_IRWXU);
+ if (fd < 0)
+ return -1;
+
+ snprintf(buff, MAX_BUFF, "%s %s %s\n",
+ host, port, busid);
+
+ ret = write(fd, buff, strlen(buff));
+ if (ret != (ssize_t) strlen(buff)) {
+ close(fd);
+ return -1;
+ }
+
+ close(fd);
+
+ return 0;
+}
+
+static int import_device(int sockfd, struct usbip_usb_device *udev)
+{
+ int rc;
+ int port;
+
+ rc = usbip_vhci_driver_open();
+ if (rc < 0) {
+ err("open vhci_driver");
+ return -1;
+ }
+
+ port = usbip_vhci_get_free_port();
+ if (port < 0) {
+ err("no free port");
+ usbip_vhci_driver_close();
+ return -1;
+ }
+
+ rc = usbip_vhci_attach_device(port, sockfd, udev->busnum,
+ udev->devnum, udev->speed);
+ if (rc < 0) {
+ err("import device");
+ usbip_vhci_driver_close();
+ return -1;
+ }
+
+ usbip_vhci_driver_close();
+
+ return port;
+}
+
+static int query_import_device(int sockfd, char *busid)
+{
+ int rc;
+ struct op_import_request request;
+ struct op_import_reply reply;
+ uint16_t code = OP_REP_IMPORT;
+
+ memset(&request, 0, sizeof(request));
+ memset(&reply, 0, sizeof(reply));
+
+ /* send a request */
+ rc = usbip_net_send_op_common(sockfd, OP_REQ_IMPORT, 0);
+ if (rc < 0) {
+ err("send op_common");
+ return -1;
+ }
+
+ strncpy(request.busid, busid, SYSFS_BUS_ID_SIZE-1);
+
+ PACK_OP_IMPORT_REQUEST(0, &request);
+
+ rc = usbip_net_send(sockfd, (void *) &request, sizeof(request));
+ if (rc < 0) {
+ err("send op_import_request");
+ return -1;
+ }
+
+ /* recieve a reply */
+ rc = usbip_net_recv_op_common(sockfd, &code);
+ if (rc < 0) {
+ err("recv op_common");
+ return -1;
+ }
+
+ rc = usbip_net_recv(sockfd, (void *) &reply, sizeof(reply));
+ if (rc < 0) {
+ err("recv op_import_reply");
+ return -1;
+ }
+
+ PACK_OP_IMPORT_REPLY(0, &reply);
+
+ /* check the reply */
+ if (strncmp(reply.udev.busid, busid, SYSFS_BUS_ID_SIZE)) {
+ err("recv different busid %s", reply.udev.busid);
+ return -1;
+ }
+
+ /* import a device */
+ return import_device(sockfd, &reply.udev);
+}
+
+static int attach_device(char *host, char *busid)
+{
+ int sockfd;
+ int rc;
+ int rhport;
+
+ sockfd = usbip_net_tcp_connect(host, USBIP_PORT_STRING);
+ if (sockfd < 0) {
+ err("tcp connect");
+ return -1;
+ }
+
+ rhport = query_import_device(sockfd, busid);
+ if (rhport < 0) {
+ err("query");
+ return -1;
+ }
+
+ close(sockfd);
+
+ rc = record_connection(host, USBIP_PORT_STRING, busid, rhport);
+ if (rc < 0) {
+ err("record connection");
+ return -1;
+ }
+
+ return 0;
+}
+
+int usbip_attach(int argc, char *argv[])
+{
+ static const struct option opts[] = {
+ { "host", required_argument, NULL, 'h' },
+ { "busid", required_argument, NULL, 'b' },
+ { NULL, 0, NULL, 0 }
+ };
+ char *host = NULL;
+ char *busid = NULL;
+ int opt;
+ int ret = -1;
+
+ for (;;) {
+ opt = getopt_long(argc, argv, "h:b:", opts, NULL);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'h':
+ host = optarg;
+ break;
+ case 'b':
+ busid = optarg;
+ break;
+ default:
+ goto err_out;
+ }
+ }
+
+ if (!host || !busid)
+ goto err_out;
+
+ ret = attach_device(host, busid);
+ goto out;
+
+err_out:
+ usbip_attach_usage();
+out:
+ return ret;
+}
diff --git a/drivers/staging/usbip/userspace/src/usbip_bind.c b/drivers/staging/usbip/userspace/src/usbip_bind.c
new file mode 100644
index 00000000000..9ecaf6e574d
--- /dev/null
+++ b/drivers/staging/usbip/userspace/src/usbip_bind.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <sysfs/libsysfs.h>
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <getopt.h>
+
+#include "usbip_common.h"
+#include "utils.h"
+#include "usbip.h"
+
+enum unbind_status {
+ UNBIND_ST_OK,
+ UNBIND_ST_USBIP_HOST,
+ UNBIND_ST_FAILED
+};
+
+static const char usbip_bind_usage_string[] =
+ "usbip bind <args>\n"
+ " -b, --busid=<busid> Bind " USBIP_HOST_DRV_NAME ".ko to device "
+ "on <busid>\n";
+
+void usbip_bind_usage(void)
+{
+ printf("usage: %s", usbip_bind_usage_string);
+}
+
+/* call at unbound state */
+static int bind_usbip(char *busid)
+{
+ char bus_type[] = "usb";
+ char attr_name[] = "bind";
+ char sysfs_mntpath[SYSFS_PATH_MAX];
+ char bind_attr_path[SYSFS_PATH_MAX];
+ char intf_busid[SYSFS_BUS_ID_SIZE];
+ struct sysfs_device *busid_dev;
+ struct sysfs_attribute *bind_attr;
+ struct sysfs_attribute *bConfValue;
+ struct sysfs_attribute *bNumIntfs;
+ int i, failed = 0;
+ int rc, ret = -1;
+
+ rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
+ if (rc < 0) {
+ err("sysfs must be mounted: %s", strerror(errno));
+ return -1;
+ }
+
+ snprintf(bind_attr_path, sizeof(bind_attr_path), "%s/%s/%s/%s/%s/%s",
+ sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DRIVERS_NAME,
+ USBIP_HOST_DRV_NAME, attr_name);
+
+ bind_attr = sysfs_open_attribute(bind_attr_path);
+ if (!bind_attr) {
+ dbg("problem getting bind attribute: %s", strerror(errno));
+ return -1;
+ }
+
+ busid_dev = sysfs_open_device(bus_type, busid);
+ if (!busid_dev) {
+ dbg("sysfs_open_device %s failed: %s", busid, strerror(errno));
+ goto err_close_bind_attr;
+ }
+
+ bConfValue = sysfs_get_device_attr(busid_dev, "bConfigurationValue");
+ bNumIntfs = sysfs_get_device_attr(busid_dev, "bNumInterfaces");
+
+ if (!bConfValue || !bNumIntfs) {
+ dbg("problem getting device attributes: %s",
+ strerror(errno));
+ goto err_close_busid_dev;
+ }
+
+ for (i = 0; i < atoi(bNumIntfs->value); i++) {
+ snprintf(intf_busid, SYSFS_BUS_ID_SIZE, "%s:%.1s.%d", busid,
+ bConfValue->value, i);
+
+ rc = sysfs_write_attribute(bind_attr, intf_busid,
+ SYSFS_BUS_ID_SIZE);
+ if (rc < 0) {
+ dbg("bind driver at %s failed", intf_busid);
+ failed = 1;
+ }
+ }
+
+ if (!failed)
+ ret = 0;
+
+err_close_busid_dev:
+ sysfs_close_device(busid_dev);
+err_close_bind_attr:
+ sysfs_close_attribute(bind_attr);
+
+ return ret;
+}
+
+/* buggy driver may cause dead lock */
+static int unbind_other(char *busid)
+{
+ char bus_type[] = "usb";
+ char intf_busid[SYSFS_BUS_ID_SIZE];
+ struct sysfs_device *busid_dev;
+ struct sysfs_device *intf_dev;
+ struct sysfs_driver *intf_drv;
+ struct sysfs_attribute *unbind_attr;
+ struct sysfs_attribute *bConfValue;
+ struct sysfs_attribute *bDevClass;
+ struct sysfs_attribute *bNumIntfs;
+ int i, rc;
+ enum unbind_status status = UNBIND_ST_OK;
+
+ busid_dev = sysfs_open_device(bus_type, busid);
+ if (!busid_dev) {
+ dbg("sysfs_open_device %s failed: %s", busid, strerror(errno));
+ return -1;
+ }
+
+ bConfValue = sysfs_get_device_attr(busid_dev, "bConfigurationValue");
+ bDevClass = sysfs_get_device_attr(busid_dev, "bDeviceClass");
+ bNumIntfs = sysfs_get_device_attr(busid_dev, "bNumInterfaces");
+ if (!bConfValue || !bDevClass || !bNumIntfs) {
+ dbg("problem getting device attributes: %s",
+ strerror(errno));
+ goto err_close_busid_dev;
+ }
+
+ if (!strncmp(bDevClass->value, "09", bDevClass->len)) {
+ dbg("skip unbinding of hub");
+ goto err_close_busid_dev;
+ }
+
+ for (i = 0; i < atoi(bNumIntfs->value); i++) {
+ snprintf(intf_busid, SYSFS_BUS_ID_SIZE, "%s:%.1s.%d", busid,
+ bConfValue->value, i);
+ intf_dev = sysfs_open_device(bus_type, intf_busid);
+ if (!intf_dev) {
+ dbg("could not open interface device: %s",
+ strerror(errno));
+ goto err_close_busid_dev;
+ }
+
+ dbg("%s -> %s", intf_dev->name, intf_dev->driver_name);
+
+ if (!strncmp("unknown", intf_dev->driver_name, SYSFS_NAME_LEN))
+ /* unbound interface */
+ continue;
+
+ if (!strncmp(USBIP_HOST_DRV_NAME, intf_dev->driver_name,
+ SYSFS_NAME_LEN)) {
+ /* already bound to usbip-host */
+ status = UNBIND_ST_USBIP_HOST;
+ continue;
+ }
+
+ /* unbinding */
+ intf_drv = sysfs_open_driver(bus_type, intf_dev->driver_name);
+ if (!intf_drv) {
+ dbg("could not open interface driver on %s: %s",
+ intf_dev->name, strerror(errno));
+ goto err_close_intf_dev;
+ }
+
+ unbind_attr = sysfs_get_driver_attr(intf_drv, "unbind");
+ if (!unbind_attr) {
+ dbg("problem getting interface driver attribute: %s",
+ strerror(errno));
+ goto err_close_intf_drv;
+ }
+
+ rc = sysfs_write_attribute(unbind_attr, intf_dev->bus_id,
+ SYSFS_BUS_ID_SIZE);
+ if (rc < 0) {
+ /* NOTE: why keep unbinding other interfaces? */
+ dbg("unbind driver at %s failed", intf_dev->bus_id);
+ status = UNBIND_ST_FAILED;
+ }
+
+ sysfs_close_driver(intf_drv);
+ sysfs_close_device(intf_dev);
+ }
+
+ goto out;
+
+err_close_intf_drv:
+ sysfs_close_driver(intf_drv);
+err_close_intf_dev:
+ sysfs_close_device(intf_dev);
+err_close_busid_dev:
+ status = UNBIND_ST_FAILED;
+out:
+ sysfs_close_device(busid_dev);
+
+ return status;
+}
+
+static int bind_device(char *busid)
+{
+ int rc;
+
+ rc = unbind_other(busid);
+ if (rc == UNBIND_ST_FAILED) {
+ err("could not unbind driver from device on busid %s", busid);
+ return -1;
+ } else if (rc == UNBIND_ST_USBIP_HOST) {
+ err("device on busid %s is already bound to %s", busid,
+ USBIP_HOST_DRV_NAME);
+ return -1;
+ }
+
+ rc = modify_match_busid(busid, 1);
+ if (rc < 0) {
+ err("unable to bind device on %s", busid);
+ return -1;
+ }
+
+ rc = bind_usbip(busid);
+ if (rc < 0) {
+ err("could not bind device to %s", USBIP_HOST_DRV_NAME);
+ modify_match_busid(busid, 0);
+ return -1;
+ }
+
+ printf("bind device on busid %s: complete\n", busid);
+
+ return 0;
+}
+
+int usbip_bind(int argc, char *argv[])
+{
+ static const struct option opts[] = {
+ { "busid", required_argument, NULL, 'b' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ int opt;
+ int ret = -1;
+
+ for (;;) {
+ opt = getopt_long(argc, argv, "b:", opts, NULL);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'b':
+ ret = bind_device(optarg);
+ goto out;
+ default:
+ goto err_out;
+ }
+ }
+
+err_out:
+ usbip_bind_usage();
+out:
+ return ret;
+}
diff --git a/drivers/staging/usbip/userspace/src/usbip_detach.c b/drivers/staging/usbip/userspace/src/usbip_detach.c
new file mode 100644
index 00000000000..89bf3c195c2
--- /dev/null
+++ b/drivers/staging/usbip/userspace/src/usbip_detach.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <sysfs/libsysfs.h>
+
+#include <ctype.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <getopt.h>
+#include <unistd.h>
+
+#include "vhci_driver.h"
+#include "usbip_common.h"
+#include "usbip_network.h"
+#include "usbip.h"
+
+static const char usbip_detach_usage_string[] =
+ "usbip detach <args>\n"
+ " -p, --port=<port> " USBIP_VHCI_DRV_NAME
+ " port the device is on\n";
+
+void usbip_detach_usage(void)
+{
+ printf("usage: %s", usbip_detach_usage_string);
+}
+
+static int detach_port(char *port)
+{
+ int ret;
+ uint8_t portnum;
+
+ for (unsigned int i=0; i < strlen(port); i++)
+ if (!isdigit(port[i])) {
+ err("invalid port %s", port);
+ return -1;
+ }
+
+ /* check max port */
+
+ portnum = atoi(port);
+
+ ret = usbip_vhci_driver_open();
+ if (ret < 0) {
+ err("open vhci_driver");
+ return -1;
+ }
+
+ ret = usbip_vhci_detach_device(portnum);
+ if (ret < 0)
+ return -1;
+
+ usbip_vhci_driver_close();
+
+ return ret;
+}
+
+int usbip_detach(int argc, char *argv[])
+{
+ static const struct option opts[] = {
+ { "port", required_argument, NULL, 'p' },
+ { NULL, 0, NULL, 0 }
+ };
+ int opt;
+ int ret = -1;
+
+ for (;;) {
+ opt = getopt_long(argc, argv, "p:", opts, NULL);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'p':
+ ret = detach_port(optarg);
+ goto out;
+ default:
+ goto err_out;
+ }
+ }
+
+err_out:
+ usbip_detach_usage();
+out:
+ return ret;
+}
diff --git a/drivers/staging/usbip/userspace/src/usbip_list.c b/drivers/staging/usbip/userspace/src/usbip_list.c
new file mode 100644
index 00000000000..ed30d910e03
--- /dev/null
+++ b/drivers/staging/usbip/userspace/src/usbip_list.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <sys/types.h>
+#include <sysfs/libsysfs.h>
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <getopt.h>
+#include <netdb.h>
+#include <unistd.h>
+
+#include "usbip_common.h"
+#include "usbip_network.h"
+#include "usbip.h"
+
+static const char usbip_list_usage_string[] =
+ "usbip list [-p|--parsable] <args>\n"
+ " -p, --parsable Parsable list format\n"
+ " -r, --remote=<host> List the exportable USB devices on <host>\n"
+ " -l, --local List the local USB devices\n";
+
+void usbip_list_usage(void)
+{
+ printf("usage: %s", usbip_list_usage_string);
+}
+
+static int get_exported_devices(char *host, int sockfd)
+{
+ char product_name[100];
+ char class_name[100];
+ struct op_devlist_reply reply;
+ uint16_t code = OP_REP_DEVLIST;
+ struct usbip_usb_device udev;
+ struct usbip_usb_interface uintf;
+ unsigned int i;
+ int j, rc;
+
+ rc = usbip_net_send_op_common(sockfd, OP_REQ_DEVLIST, 0);
+ if (rc < 0) {
+ dbg("usbip_net_send_op_common failed");
+ return -1;
+ }
+
+ rc = usbip_net_recv_op_common(sockfd, &code);
+ if (rc < 0) {
+ dbg("usbip_net_recv_op_common failed");
+ return -1;
+ }
+
+ memset(&reply, 0, sizeof(reply));
+ rc = usbip_net_recv(sockfd, &reply, sizeof(reply));
+ if (rc < 0) {
+ dbg("usbip_net_recv_op_devlist failed");
+ return -1;
+ }
+ PACK_OP_DEVLIST_REPLY(0, &reply);
+ dbg("exportable devices: %d\n", reply.ndev);
+
+ if (reply.ndev == 0) {
+ info("no exportable devices found on %s", host);
+ return 0;
+ }
+
+ printf("Exportable USB devices\n");
+ printf("======================\n");
+ printf(" - %s\n", host);
+
+ for (i = 0; i < reply.ndev; i++) {
+ memset(&udev, 0, sizeof(udev));
+ rc = usbip_net_recv(sockfd, &udev, sizeof(udev));
+ if (rc < 0) {
+ dbg("usbip_net_recv failed: usbip_usb_device[%d]", i);
+ return -1;
+ }
+ usbip_net_pack_usb_device(0, &udev);
+
+ usbip_names_get_product(product_name, sizeof(product_name),
+ udev.idVendor, udev.idProduct);
+ usbip_names_get_class(class_name, sizeof(class_name),
+ udev.bDeviceClass, udev.bDeviceSubClass,
+ udev.bDeviceProtocol);
+ printf("%11s: %s\n", udev.busid, product_name);
+ printf("%11s: %s\n", "", udev.path);
+ printf("%11s: %s\n", "", class_name);
+
+ for (j = 0; j < udev.bNumInterfaces; j++) {
+ rc = usbip_net_recv(sockfd, &uintf, sizeof(uintf));
+ if (rc < 0) {
+ dbg("usbip_net_recv failed: usbip_usb_intf[%d]",
+ j);
+
+ return -1;
+ }
+ usbip_net_pack_usb_interface(0, &uintf);
+
+ usbip_names_get_class(class_name, sizeof(class_name),
+ uintf.bInterfaceClass,
+ uintf.bInterfaceSubClass,
+ uintf.bInterfaceProtocol);
+ printf("%11s: %2d - %s\n", "", j, class_name);
+ }
+ printf("\n");
+ }
+
+ return 0;
+}
+
+static int list_exported_devices(char *host)
+{
+ int rc;
+ int sockfd;
+
+ sockfd = usbip_net_tcp_connect(host, USBIP_PORT_STRING);
+ if (sockfd < 0) {
+ err("could not connect to %s:%s: %s", host,
+ USBIP_PORT_STRING, gai_strerror(sockfd));
+ return -1;
+ }
+ dbg("connected to %s:%s", host, USBIP_PORT_STRING);
+
+ rc = get_exported_devices(host, sockfd);
+ if (rc < 0) {
+ err("failed to get device list from %s", host);
+ return -1;
+ }
+
+ close(sockfd);
+
+ return 0;
+}
+
+static void print_device(char *busid, char *vendor, char *product,
+ bool parsable)
+{
+ if (parsable)
+ printf("busid=%s#usbid=%.4s:%.4s#", busid, vendor, product);
+ else
+ printf(" - busid %s (%.4s:%.4s)\n", busid, vendor, product);
+}
+
+static void print_interface(char *busid, char *driver, bool parsable)
+{
+ if (parsable)
+ printf("%s=%s#", busid, driver);
+ else
+ printf("%9s%s -> %s\n", "", busid, driver);
+}
+
+static int is_device(void *x)
+{
+ struct sysfs_attribute *devpath;
+ struct sysfs_device *dev = x;
+ int ret = 0;
+
+ devpath = sysfs_get_device_attr(dev, "devpath");
+ if (devpath && *devpath->value != '0')
+ ret = 1;
+
+ return ret;
+}
+
+static int devcmp(void *a, void *b)
+{
+ return strcmp(a, b);
+}
+
+static int list_devices(bool parsable)
+{
+ char bus_type[] = "usb";
+ char busid[SYSFS_BUS_ID_SIZE];
+ struct sysfs_bus *ubus;
+ struct sysfs_device *dev;
+ struct sysfs_device *intf;
+ struct sysfs_attribute *idVendor;
+ struct sysfs_attribute *idProduct;
+ struct sysfs_attribute *bConfValue;
+ struct sysfs_attribute *bNumIntfs;
+ struct dlist *devlist;
+ int i;
+ int ret = -1;
+
+ ubus = sysfs_open_bus(bus_type);
+ if (!ubus) {
+ err("could not open %s bus: %s", bus_type, strerror(errno));
+ return -1;
+ }
+
+ devlist = sysfs_get_bus_devices(ubus);
+ if (!devlist) {
+ err("could not get %s bus devices: %s", bus_type,
+ strerror(errno));
+ goto err_out;
+ }
+
+ /* remove interfaces and root hubs from device list */
+ dlist_filter_sort(devlist, is_device, devcmp);
+
+ if (!parsable) {
+ printf("Local USB devices\n");
+ printf("=================\n");
+ }
+ dlist_for_each_data(devlist, dev, struct sysfs_device) {
+ idVendor = sysfs_get_device_attr(dev, "idVendor");
+ idProduct = sysfs_get_device_attr(dev, "idProduct");
+ bConfValue = sysfs_get_device_attr(dev, "bConfigurationValue");
+ bNumIntfs = sysfs_get_device_attr(dev, "bNumInterfaces");
+ if (!idVendor || !idProduct || !bConfValue || !bNumIntfs) {
+ err("problem getting device attributes: %s",
+ strerror(errno));
+ goto err_out;
+ }
+
+ print_device(dev->bus_id, idVendor->value, idProduct->value,
+ parsable);
+
+ for (i = 0; i < atoi(bNumIntfs->value); i++) {
+ snprintf(busid, sizeof(busid), "%s:%.1s.%d",
+ dev->bus_id, bConfValue->value, i);
+ intf = sysfs_open_device(bus_type, busid);
+ if (!intf) {
+ err("could not open device interface: %s",
+ strerror(errno));
+ goto err_out;
+ }
+ print_interface(busid, intf->driver_name, parsable);
+ sysfs_close_device(intf);
+ }
+ printf("\n");
+ }
+
+ ret = 0;
+
+err_out:
+ sysfs_close_bus(ubus);
+
+ return ret;
+}
+
+int usbip_list(int argc, char *argv[])
+{
+ static const struct option opts[] = {
+ { "parsable", no_argument, NULL, 'p' },
+ { "remote", required_argument, NULL, 'r' },
+ { "local", no_argument, NULL, 'l' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ bool parsable = false;
+ int opt;
+ int ret = -1;
+
+ if (usbip_names_init(USBIDS_FILE))
+ err("failed to open %s", USBIDS_FILE);
+
+ for (;;) {
+ opt = getopt_long(argc, argv, "pr:l", opts, NULL);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'p':
+ parsable = true;
+ break;
+ case 'r':
+ ret = list_exported_devices(optarg);
+ goto out;
+ case 'l':
+ ret = list_devices(parsable);
+ goto out;
+ default:
+ goto err_out;
+ }
+ }
+
+err_out:
+ usbip_list_usage();
+out:
+ usbip_names_free();
+
+ return ret;
+}
diff --git a/drivers/staging/usbip/userspace/src/usbip_network.c b/drivers/staging/usbip/userspace/src/usbip_network.c
index 01be3c7211d..1a84dd37e12 100644
--- a/drivers/staging/usbip/userspace/src/usbip_network.c
+++ b/drivers/staging/usbip/userspace/src/usbip_network.c
@@ -1,11 +1,34 @@
/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
*
- * Copyright (C) 2005-2007 Takahiro Hirofuchi
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <sys/socket.h>
+
+#include <string.h>
+
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <netinet/tcp.h>
+#include <unistd.h>
+
+#include "usbip_common.h"
#include "usbip_network.h"
-void pack_uint32_t(int pack, uint32_t *num)
+void usbip_net_pack_uint32_t(int pack, uint32_t *num)
{
uint32_t i;
@@ -17,7 +40,7 @@ void pack_uint32_t(int pack, uint32_t *num)
*num = i;
}
-void pack_uint16_t(int pack, uint16_t *num)
+void usbip_net_pack_uint16_t(int pack, uint16_t *num)
{
uint16_t i;
@@ -29,34 +52,34 @@ void pack_uint16_t(int pack, uint16_t *num)
*num = i;
}
-void pack_usb_device(int pack, struct usb_device *udev)
+void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev)
{
- pack_uint32_t(pack, &udev->busnum);
- pack_uint32_t(pack, &udev->devnum);
- pack_uint32_t(pack, &udev->speed );
+ usbip_net_pack_uint32_t(pack, &udev->busnum);
+ usbip_net_pack_uint32_t(pack, &udev->devnum);
+ usbip_net_pack_uint32_t(pack, &udev->speed );
- pack_uint16_t(pack, &udev->idVendor );
- pack_uint16_t(pack, &udev->idProduct);
- pack_uint16_t(pack, &udev->bcdDevice);
+ usbip_net_pack_uint16_t(pack, &udev->idVendor);
+ usbip_net_pack_uint16_t(pack, &udev->idProduct);
+ usbip_net_pack_uint16_t(pack, &udev->bcdDevice);
}
-void pack_usb_interface(int pack __attribute__((unused)),
- struct usb_interface *udev __attribute__((unused)))
+void usbip_net_pack_usb_interface(int pack __attribute__((unused)),
+ struct usbip_usb_interface *udev
+ __attribute__((unused)))
{
/* uint8_t members need nothing */
}
-
-static ssize_t usbip_xmit(int sockfd, void *buff, size_t bufflen, int sending)
+static ssize_t usbip_net_xmit(int sockfd, void *buff, size_t bufflen,
+ int sending)
{
+ ssize_t nbytes;
ssize_t total = 0;
if (!bufflen)
return 0;
do {
- ssize_t nbytes;
-
if (sending)
nbytes = send(sockfd, buff, bufflen, 0);
else
@@ -65,80 +88,81 @@ static ssize_t usbip_xmit(int sockfd, void *buff, size_t bufflen, int sending)
if (nbytes <= 0)
return -1;
- buff = (void *) ((intptr_t) buff + nbytes);
+ buff = (void *)((intptr_t) buff + nbytes);
bufflen -= nbytes;
total += nbytes;
} while (bufflen > 0);
-
return total;
}
-ssize_t usbip_recv(int sockfd, void *buff, size_t bufflen)
+ssize_t usbip_net_recv(int sockfd, void *buff, size_t bufflen)
{
- return usbip_xmit(sockfd, buff, bufflen, 0);
+ return usbip_net_xmit(sockfd, buff, bufflen, 0);
}
-ssize_t usbip_send(int sockfd, void *buff, size_t bufflen)
+ssize_t usbip_net_send(int sockfd, void *buff, size_t bufflen)
{
- return usbip_xmit(sockfd, buff, bufflen, 1);
+ return usbip_net_xmit(sockfd, buff, bufflen, 1);
}
-int usbip_send_op_common(int sockfd, uint32_t code, uint32_t status)
+int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status)
{
- int ret;
struct op_common op_common;
+ int rc;
- bzero(&op_common, sizeof(op_common));
+ memset(&op_common, 0, sizeof(op_common));
- op_common.version = USBIP_VERSION;
- op_common.code = code;
- op_common.status = status;
+ op_common.version = USBIP_VERSION;
+ op_common.code = code;
+ op_common.status = status;
PACK_OP_COMMON(1, &op_common);
- ret = usbip_send(sockfd, (void *) &op_common, sizeof(op_common));
- if (ret < 0) {
- err("send op_common");
+ rc = usbip_net_send(sockfd, &op_common, sizeof(op_common));
+ if (rc < 0) {
+ dbg("usbip_net_send failed: %d", rc);
return -1;
}
return 0;
}
-int usbip_recv_op_common(int sockfd, uint16_t *code)
+int usbip_net_recv_op_common(int sockfd, uint16_t *code)
{
- int ret;
struct op_common op_common;
+ int rc;
- bzero(&op_common, sizeof(op_common));
+ memset(&op_common, 0, sizeof(op_common));
- ret = usbip_recv(sockfd, (void *) &op_common, sizeof(op_common));
- if (ret < 0) {
- err("recv op_common, %d", ret);
+ rc = usbip_net_recv(sockfd, &op_common, sizeof(op_common));
+ if (rc < 0) {
+ dbg("usbip_net_recv failed: %d", rc);
goto err;
}
PACK_OP_COMMON(0, &op_common);
if (op_common.version != USBIP_VERSION) {
- err("version mismatch, %d %d", op_common.version, USBIP_VERSION);
+ dbg("version mismatch: %d %d", op_common.version,
+ USBIP_VERSION);
goto err;
}
- switch(*code) {
- case OP_UNSPEC:
- break;
- default:
- if (op_common.code != *code) {
- info("unexpected pdu %d for %d", op_common.code, *code);
- goto err;
- }
+ switch (*code) {
+ case OP_UNSPEC:
+ break;
+ default:
+ if (op_common.code != *code) {
+ dbg("unexpected pdu %#0x for %#0x", op_common.code,
+ *code);
+ goto err;
+ }
}
if (op_common.status != ST_OK) {
- info("request failed at peer, %d", op_common.status);
+ dbg("request failed at peer: %d", op_common.status);
goto err;
}
@@ -149,103 +173,85 @@ err:
return -1;
}
-
-int usbip_set_reuseaddr(int sockfd)
+int usbip_net_set_reuseaddr(int sockfd)
{
const int val = 1;
int ret;
ret = setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
if (ret < 0)
- err("setsockopt SO_REUSEADDR");
+ dbg("setsockopt: SO_REUSEADDR");
return ret;
}
-int usbip_set_nodelay(int sockfd)
+int usbip_net_set_nodelay(int sockfd)
{
const int val = 1;
int ret;
ret = setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val));
if (ret < 0)
- err("setsockopt TCP_NODELAY");
+ dbg("setsockopt: TCP_NODELAY");
return ret;
}
-int usbip_set_keepalive(int sockfd)
+int usbip_net_set_keepalive(int sockfd)
{
const int val = 1;
int ret;
ret = setsockopt(sockfd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val));
if (ret < 0)
- err("setsockopt SO_KEEPALIVE");
+ dbg("setsockopt: SO_KEEPALIVE");
return ret;
}
-/* IPv6 Ready */
/*
- * moved here from vhci_attach.c
+ * IPv6 Ready
*/
-int tcp_connect(char *hostname, char *service)
+int usbip_net_tcp_connect(char *hostname, char *service)
{
- struct addrinfo hints, *res, *res0;
+ struct addrinfo hints, *res, *rp;
int sockfd;
- int err;
-
+ int ret;
memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
/* get all possible addresses */
- err = getaddrinfo(hostname, service, &hints, &res0);
- if (err) {
- err("%s %s: %s", hostname, service, gai_strerror(err));
- return -1;
+ ret = getaddrinfo(hostname, service, &hints, &res);
+ if (ret < 0) {
+ dbg("getaddrinfo: %s service %s: %s", hostname, service,
+ gai_strerror(ret));
+ return ret;
}
- /* try all the addresses */
- for (res = res0; res; res = res->ai_next) {
- char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV];
-
- err = getnameinfo(res->ai_addr, res->ai_addrlen,
- hbuf, sizeof(hbuf), sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV);
- if (err) {
- err("%s %s: %s", hostname, service, gai_strerror(err));
- continue;
- }
-
- dbg("trying %s port %s\n", hbuf, sbuf);
-
- sockfd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
- if (sockfd < 0) {
- err("socket");
+ /* try the addresses */
+ for (rp = res; rp; rp = rp->ai_next) {
+ sockfd = socket(rp->ai_family, rp->ai_socktype,
+ rp->ai_protocol);
+ if (sockfd < 0)
continue;
- }
/* should set TCP_NODELAY for usbip */
- usbip_set_nodelay(sockfd);
- /* TODO: write code for heatbeat */
- usbip_set_keepalive(sockfd);
+ usbip_net_set_nodelay(sockfd);
+ /* TODO: write code for heartbeat */
+ usbip_net_set_keepalive(sockfd);
- err = connect(sockfd, res->ai_addr, res->ai_addrlen);
- if (err < 0) {
- close(sockfd);
- continue;
- }
+ if (connect(sockfd, rp->ai_addr, rp->ai_addrlen) == 0)
+ break;
- /* connected */
- dbg("connected to %s:%s", hbuf, sbuf);
- freeaddrinfo(res0);
- return sockfd;
+ close(sockfd);
}
+ if (!rp)
+ return EAI_SYSTEM;
- dbg("%s:%s, %s", hostname, service, "no destination to connect to");
- freeaddrinfo(res0);
+ freeaddrinfo(res);
- return -1;
+ return sockfd;
}
diff --git a/drivers/staging/usbip/userspace/src/usbip_network.h b/drivers/staging/usbip/userspace/src/usbip_network.h
index 1225466e1c5..2d1e070fb7b 100644
--- a/drivers/staging/usbip/userspace/src/usbip_network.h
+++ b/drivers/staging/usbip/userspace/src/usbip_network.h
@@ -2,19 +2,20 @@
* Copyright (C) 2005-2007 Takahiro Hirofuchi
*/
-#ifndef _USBIP_NETWORK_H
-#define _USBIP_NETWORK_H
+#ifndef __USBIP_NETWORK_H
+#define __USBIP_NETWORK_H
-#include "usbip.h"
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <netinet/tcp.h>
+#ifdef HAVE_CONFIG_H
+#include "../config.h"
+#endif
+#include <sys/types.h>
+#include <sysfs/libsysfs.h>
-/* -------------------------------------------------- */
-/* Define Protocol Format */
-/* -------------------------------------------------- */
+#include <stdint.h>
+#define USBIP_PORT 3240
+#define USBIP_PORT_STRING "3240"
/* ---------------------------------------------------------------------- */
/* Common header for all the kinds of PDUs. */
@@ -33,12 +34,11 @@ struct op_common {
} __attribute__((packed));
#define PACK_OP_COMMON(pack, op_common) do {\
- pack_uint16_t(pack, &(op_common)->version);\
- pack_uint16_t(pack, &(op_common)->code );\
- pack_uint32_t(pack, &(op_common)->status );\
+ usbip_net_pack_uint16_t(pack, &(op_common)->version);\
+ usbip_net_pack_uint16_t(pack, &(op_common)->code );\
+ usbip_net_pack_uint32_t(pack, &(op_common)->status );\
} while (0)
-
/* ---------------------------------------------------------------------- */
/* Dummy Code */
#define OP_UNSPEC 0x00
@@ -56,11 +56,10 @@ struct op_devinfo_request {
} __attribute__((packed));
struct op_devinfo_reply {
- struct usb_device udev;
- struct usb_interface uinf[];
+ struct usbip_usb_device udev;
+ struct usbip_usb_interface uinf[];
} __attribute__((packed));
-
/* ---------------------------------------------------------------------- */
/* Import a remote USB device. */
#define OP_IMPORT 0x03
@@ -72,19 +71,17 @@ struct op_import_request {
} __attribute__((packed));
struct op_import_reply {
- struct usb_device udev;
-// struct usb_interface uinf[];
+ struct usbip_usb_device udev;
+// struct usbip_usb_interface uinf[];
} __attribute__((packed));
#define PACK_OP_IMPORT_REQUEST(pack, request) do {\
} while (0)
#define PACK_OP_IMPORT_REPLY(pack, reply) do {\
- pack_usb_device(pack, &(reply)->udev);\
+ usbip_net_pack_usb_device(pack, &(reply)->udev);\
} while (0)
-
-
/* ---------------------------------------------------------------------- */
/* Export a USB device to a remote host. */
#define OP_EXPORT 0x06
@@ -92,7 +89,7 @@ struct op_import_reply {
#define OP_REP_EXPORT (OP_REPLY | OP_EXPORT)
struct op_export_request {
- struct usb_device udev;
+ struct usbip_usb_device udev;
} __attribute__((packed));
struct op_export_reply {
@@ -101,7 +98,7 @@ struct op_export_reply {
#define PACK_OP_EXPORT_REQUEST(pack, request) do {\
- pack_usb_device(pack, &(request)->udev);\
+ usbip_net_pack_usb_device(pack, &(request)->udev);\
} while (0)
#define PACK_OP_EXPORT_REPLY(pack, reply) do {\
@@ -114,7 +111,7 @@ struct op_export_reply {
#define OP_REP_UNEXPORT (OP_REPLY | OP_UNEXPORT)
struct op_unexport_request {
- struct usb_device udev;
+ struct usbip_usb_device udev;
} __attribute__((packed));
struct op_unexport_reply {
@@ -122,14 +119,12 @@ struct op_unexport_reply {
} __attribute__((packed));
#define PACK_OP_UNEXPORT_REQUEST(pack, request) do {\
- pack_usb_device(pack, &(request)->udev);\
+ usbip_net_pack_usb_device(pack, &(request)->udev);\
} while (0)
#define PACK_OP_UNEXPORT_REPLY(pack, reply) do {\
} while (0)
-
-
/* ---------------------------------------------------------------------- */
/* Negotiate IPSec encryption key. (still not used) */
#define OP_CRYPKEY 0x04
@@ -161,38 +156,29 @@ struct op_devlist_reply {
} __attribute__((packed));
struct op_devlist_reply_extra {
- struct usb_device udev;
- struct usb_interface uinf[];
+ struct usbip_usb_device udev;
+ struct usbip_usb_interface uinf[];
} __attribute__((packed));
#define PACK_OP_DEVLIST_REQUEST(pack, request) do {\
} while (0)
#define PACK_OP_DEVLIST_REPLY(pack, reply) do {\
- pack_uint32_t(pack, &(reply)->ndev);\
+ usbip_net_pack_uint32_t(pack, &(reply)->ndev);\
} while (0)
-
-/* -------------------------------------------------- */
-/* Declare Prototype Function */
-/* -------------------------------------------------- */
-
-void pack_uint32_t(int pack, uint32_t *num);
-void pack_uint16_t(int pack, uint16_t *num);
-void pack_usb_device(int pack, struct usb_device *udev);
-void pack_usb_interface(int pack, struct usb_interface *uinf);
-
-ssize_t usbip_recv(int sockfd, void *buff, size_t bufflen);
-ssize_t usbip_send(int sockfd, void *buff, size_t bufflen);
-int usbip_send_op_common(int sockfd, uint32_t code, uint32_t status);
-int usbip_recv_op_common(int sockfd, uint16_t *code);
-int usbip_set_reuseaddr(int sockfd);
-int usbip_set_nodelay(int sockfd);
-int usbip_set_keepalive(int sockfd);
-
-int tcp_connect(char *hostname, char *service);
-
-#define USBIP_PORT 3240
-#define USBIP_PORT_STRING "3240"
-
-#endif
+void usbip_net_pack_uint32_t(int pack, uint32_t *num);
+void usbip_net_pack_uint16_t(int pack, uint16_t *num);
+void usbip_net_pack_usb_device(int pack, struct usbip_usb_device *udev);
+void usbip_net_pack_usb_interface(int pack, struct usbip_usb_interface *uinf);
+
+ssize_t usbip_net_recv(int sockfd, void *buff, size_t bufflen);
+ssize_t usbip_net_send(int sockfd, void *buff, size_t bufflen);
+int usbip_net_send_op_common(int sockfd, uint32_t code, uint32_t status);
+int usbip_net_recv_op_common(int sockfd, uint16_t *code);
+int usbip_net_set_reuseaddr(int sockfd);
+int usbip_net_set_nodelay(int sockfd);
+int usbip_net_set_keepalive(int sockfd);
+int usbip_net_tcp_connect(char *hostname, char *port);
+
+#endif /* __USBIP_NETWORK_H */
diff --git a/drivers/staging/usbip/userspace/src/usbip_unbind.c b/drivers/staging/usbip/userspace/src/usbip_unbind.c
new file mode 100644
index 00000000000..d5a9ab6af2a
--- /dev/null
+++ b/drivers/staging/usbip/userspace/src/usbip_unbind.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <sysfs/libsysfs.h>
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <getopt.h>
+
+#include "usbip_common.h"
+#include "utils.h"
+#include "usbip.h"
+
+static const char usbip_unbind_usage_string[] =
+ "usbip unbind <args>\n"
+ " -b, --busid=<busid> Unbind " USBIP_HOST_DRV_NAME ".ko from "
+ "device on <busid>\n";
+
+void usbip_unbind_usage(void)
+{
+ printf("usage: %s", usbip_unbind_usage_string);
+}
+
+static int unbind_device(char *busid)
+{
+ char bus_type[] = "usb";
+ struct sysfs_driver *usbip_host_drv;
+ struct sysfs_device *dev;
+ struct dlist *devlist;
+ int verified = 0;
+ int rc, ret = -1;
+
+ char attr_name[] = "bConfigurationValue";
+ char sysfs_mntpath[SYSFS_PATH_MAX];
+ char busid_attr_path[SYSFS_PATH_MAX];
+ struct sysfs_attribute *busid_attr;
+ char *val = NULL;
+ int len;
+
+ /* verify the busid device is using usbip-host */
+ usbip_host_drv = sysfs_open_driver(bus_type, USBIP_HOST_DRV_NAME);
+ if (!usbip_host_drv) {
+ err("could not open %s driver: %s", USBIP_HOST_DRV_NAME,
+ strerror(errno));
+ return -1;
+ }
+
+ devlist = sysfs_get_driver_devices(usbip_host_drv);
+ if (!devlist) {
+ err("%s is not in use by any devices", USBIP_HOST_DRV_NAME);
+ goto err_close_usbip_host_drv;
+ }
+
+ dlist_for_each_data(devlist, dev, struct sysfs_device) {
+ if (!strncmp(busid, dev->name, strlen(busid)) &&
+ !strncmp(dev->driver_name, USBIP_HOST_DRV_NAME,
+ strlen(USBIP_HOST_DRV_NAME))) {
+ verified = 1;
+ break;
+ }
+ }
+
+ if (!verified) {
+ err("device on busid %s is not using %s", busid,
+ USBIP_HOST_DRV_NAME);
+ goto err_close_usbip_host_drv;
+ }
+
+ /*
+ * NOTE: A read and write of an attribute value of the device busid
+ * refers to must be done to start probing. That way a rebind of the
+ * default driver for the device occurs.
+ *
+ * This seems very hackish and adds a lot of pointless code. I think it
+ * should be done in the kernel by the driver after del_match_busid is
+ * finished!
+ */
+
+ rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
+ if (rc < 0) {
+ err("sysfs must be mounted: %s", strerror(errno));
+ return -1;
+ }
+
+ snprintf(busid_attr_path, sizeof(busid_attr_path), "%s/%s/%s/%s/%s/%s",
+ sysfs_mntpath, SYSFS_BUS_NAME, bus_type, SYSFS_DEVICES_NAME,
+ busid, attr_name);
+
+ /* read a device attribute */
+ busid_attr = sysfs_open_attribute(busid_attr_path);
+ if (!busid_attr) {
+ err("could not open %s/%s: %s", busid, attr_name,
+ strerror(errno));
+ return -1;
+ }
+
+ if (sysfs_read_attribute(busid_attr) < 0) {
+ err("problem reading attribute: %s", strerror(errno));
+ goto err_out;
+ }
+
+ len = busid_attr->len;
+ val = malloc(len);
+ *val = *busid_attr->value;
+ sysfs_close_attribute(busid_attr);
+
+ /* notify driver of unbind */
+ rc = modify_match_busid(busid, 0);
+ if (rc < 0) {
+ err("unable to unbind device on %s", busid);
+ goto err_out;
+ }
+
+ /* write the device attribute */
+ busid_attr = sysfs_open_attribute(busid_attr_path);
+ if (!busid_attr) {
+ err("could not open %s/%s: %s", busid, attr_name,
+ strerror(errno));
+ return -1;
+ }
+
+ rc = sysfs_write_attribute(busid_attr, val, len);
+ if (rc < 0) {
+ err("problem writing attribute: %s", strerror(errno));
+ goto err_out;
+ }
+ sysfs_close_attribute(busid_attr);
+
+ ret = 0;
+ printf("unbind device on busid %s: complete\n", busid);
+
+err_out:
+ free(val);
+err_close_usbip_host_drv:
+ sysfs_close_driver(usbip_host_drv);
+
+ return ret;
+}
+
+int usbip_unbind(int argc, char *argv[])
+{
+ static const struct option opts[] = {
+ { "busid", required_argument, NULL, 'b' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ int opt;
+ int ret = -1;
+
+ for (;;) {
+ opt = getopt_long(argc, argv, "b:", opts, NULL);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'b':
+ ret = unbind_device(optarg);
+ goto out;
+ default:
+ goto err_out;
+ }
+ }
+
+err_out:
+ usbip_unbind_usage();
+out:
+ return ret;
+}
diff --git a/drivers/staging/usbip/userspace/src/usbipd.c b/drivers/staging/usbip/userspace/src/usbipd.c
index ec9faac5ff8..8668a8092d4 100644
--- a/drivers/staging/usbip/userspace/src/usbipd.c
+++ b/drivers/staging/usbip/userspace/src/usbipd.c
@@ -1,15 +1,29 @@
/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
*
- * Copyright (C) 2005-2007 Takahiro Hirofuchi
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef HAVE_CONFIG_H
#include "../config.h"
#endif
+#include <errno.h>
#include <unistd.h>
#include <netdb.h>
-#include <strings.h>
+#include <string.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -23,70 +37,160 @@
#define _GNU_SOURCE
#include <getopt.h>
+#include <glib.h>
#include <signal.h>
-#include "usbip.h"
+#include "usbip_host_driver.h"
+#include "usbip_common.h"
#include "usbip_network.h"
-#include <glib.h>
+#undef PROGNAME
+#define PROGNAME "usbipd"
+#define MAXSOCKFD 20
-static const char version[] = PACKAGE_STRING;
+GMainLoop *main_loop;
+static const char usbip_version_string[] = PACKAGE_STRING;
+
+static const char usbipd_help_string[] =
+ "usage: usbipd [options] \n"
+ " -D, --daemon \n"
+ " Run as a daemon process. \n"
+ " \n"
+ " -d, --debug \n"
+ " Print debugging information. \n"
+ " \n"
+ " -h, --help \n"
+ " Print this help. \n"
+ " \n"
+ " -v, --version \n"
+ " Show version. \n";
+
+static void usbipd_help(void)
+{
+ printf("%s\n", usbipd_help_string);
+}
-static int send_reply_devlist(int sockfd)
+static int recv_request_import(int sockfd)
{
- int ret;
+ struct op_import_request req;
+ struct op_common reply;
struct usbip_exported_device *edev;
- struct op_devlist_reply reply;
+ struct usbip_usb_device pdu_udev;
+ int found = 0;
+ int error = 0;
+ int rc;
+ memset(&req, 0, sizeof(req));
+ memset(&reply, 0, sizeof(reply));
- reply.ndev = 0;
+ rc = usbip_net_recv(sockfd, &req, sizeof(req));
+ if (rc < 0) {
+ dbg("usbip_net_recv failed: import request");
+ return -1;
+ }
+ PACK_OP_IMPORT_REQUEST(0, &req);
- /* how many devices are exported ? */
- dlist_for_each_data(stub_driver->edev_list, edev, struct usbip_exported_device) {
- reply.ndev += 1;
+ dlist_for_each_data(host_driver->edev_list, edev,
+ struct usbip_exported_device) {
+ if (!strncmp(req.busid, edev->udev.busid, SYSFS_BUS_ID_SIZE)) {
+ info("found requested device: %s", req.busid);
+ found = 1;
+ break;
+ }
}
- dbg("%d devices are exported", reply.ndev);
+ if (found) {
+ /* should set TCP_NODELAY for usbip */
+ usbip_net_set_nodelay(sockfd);
- ret = usbip_send_op_common(sockfd, OP_REP_DEVLIST, ST_OK);
- if (ret < 0) {
- err("send op_common");
- return ret;
+ /* export device needs a TCP/IP socket descriptor */
+ rc = usbip_host_export_device(edev, sockfd);
+ if (rc < 0)
+ error = 1;
+ } else {
+ info("requested device not found: %s", req.busid);
+ error = 1;
}
- PACK_OP_DEVLIST_REPLY(1, &reply);
+ rc = usbip_net_send_op_common(sockfd, OP_REP_IMPORT,
+ (!error ? ST_OK : ST_NA));
+ if (rc < 0) {
+ dbg("usbip_net_send_op_common failed: %#0x", OP_REP_IMPORT);
+ return -1;
+ }
- ret = usbip_send(sockfd, (void *) &reply, sizeof(reply));
- if (ret < 0) {
- err("send op_devlist_reply");
- return ret;
+ if (error) {
+ dbg("import request busid %s: failed", req.busid);
+ return -1;
+ }
+
+ memcpy(&pdu_udev, &edev->udev, sizeof(pdu_udev));
+ usbip_net_pack_usb_device(1, &pdu_udev);
+
+ rc = usbip_net_send(sockfd, &pdu_udev, sizeof(pdu_udev));
+ if (rc < 0) {
+ dbg("usbip_net_send failed: devinfo");
+ return -1;
}
- dlist_for_each_data(stub_driver->edev_list, edev, struct usbip_exported_device) {
- struct usb_device pdu_udev;
+ dbg("import request busid %s: complete", req.busid);
+
+ return 0;
+}
+
+static int send_reply_devlist(int connfd)
+{
+ struct usbip_exported_device *edev;
+ struct usbip_usb_device pdu_udev;
+ struct usbip_usb_interface pdu_uinf;
+ struct op_devlist_reply reply;
+ int i;
+ int rc;
+ reply.ndev = 0;
+ /* number of exported devices */
+ dlist_for_each_data(host_driver->edev_list, edev,
+ struct usbip_exported_device) {
+ reply.ndev += 1;
+ }
+ info("exportable devices: %d", reply.ndev);
+
+ rc = usbip_net_send_op_common(connfd, OP_REP_DEVLIST, ST_OK);
+ if (rc < 0) {
+ dbg("usbip_net_send_op_common failed: %#0x", OP_REP_DEVLIST);
+ return -1;
+ }
+ PACK_OP_DEVLIST_REPLY(1, &reply);
+
+ rc = usbip_net_send(connfd, &reply, sizeof(reply));
+ if (rc < 0) {
+ dbg("usbip_net_send failed: %#0x", OP_REP_DEVLIST);
+ return -1;
+ }
+
+ dlist_for_each_data(host_driver->edev_list, edev,
+ struct usbip_exported_device) {
dump_usb_device(&edev->udev);
memcpy(&pdu_udev, &edev->udev, sizeof(pdu_udev));
- pack_usb_device(1, &pdu_udev);
+ usbip_net_pack_usb_device(1, &pdu_udev);
- ret = usbip_send(sockfd, (void *) &pdu_udev, sizeof(pdu_udev));
- if (ret < 0) {
- err("send pdu_udev");
- return ret;
+ rc = usbip_net_send(connfd, &pdu_udev, sizeof(pdu_udev));
+ if (rc < 0) {
+ dbg("usbip_net_send failed: pdu_udev");
+ return -1;
}
- for (int i=0; i < edev->udev.bNumInterfaces; i++) {
- struct usb_interface pdu_uinf;
-
+ for (i = 0; i < edev->udev.bNumInterfaces; i++) {
dump_usb_interface(&edev->uinf[i]);
memcpy(&pdu_uinf, &edev->uinf[i], sizeof(pdu_uinf));
- pack_usb_interface(1, &pdu_uinf);
+ usbip_net_pack_usb_interface(1, &pdu_uinf);
- ret = usbip_send(sockfd, (void *) &pdu_uinf, sizeof(pdu_uinf));
- if (ret < 0) {
- err("send pdu_uinf");
- return ret;
+ rc = usbip_net_send(connfd, &pdu_uinf,
+ sizeof(pdu_uinf));
+ if (rc < 0) {
+ dbg("usbip_net_send failed: pdu_uinf");
+ return -1;
}
}
}
@@ -94,283 +198,227 @@ static int send_reply_devlist(int sockfd)
return 0;
}
-
-static int recv_request_devlist(int sockfd)
+static int recv_request_devlist(int connfd)
{
- int ret;
struct op_devlist_request req;
+ int rc;
- bzero(&req, sizeof(req));
+ memset(&req, 0, sizeof(req));
- ret = usbip_recv(sockfd, (void *) &req, sizeof(req));
- if (ret < 0) {
- err("recv devlist request");
+ rc = usbip_net_recv(connfd, &req, sizeof(req));
+ if (rc < 0) {
+ dbg("usbip_net_recv failed: devlist request");
return -1;
}
- ret = send_reply_devlist(sockfd);
- if (ret < 0) {
- err("send devlist reply");
+ rc = send_reply_devlist(connfd);
+ if (rc < 0) {
+ dbg("send_reply_devlist failed");
return -1;
}
return 0;
}
-
-static int recv_request_import(int sockfd)
+static int recv_pdu(int connfd)
{
+ uint16_t code = OP_UNSPEC;
int ret;
- struct op_import_request req;
- struct op_common reply;
- struct usbip_exported_device *edev;
- int found = 0;
- int error = 0;
- bzero(&req, sizeof(req));
- bzero(&reply, sizeof(reply));
-
- ret = usbip_recv(sockfd, (void *) &req, sizeof(req));
+ ret = usbip_net_recv_op_common(connfd, &code);
if (ret < 0) {
- err("recv import request");
+ dbg("could not receive opcode: %#0x", code);
return -1;
}
- PACK_OP_IMPORT_REQUEST(0, &req);
-
- dlist_for_each_data(stub_driver->edev_list, edev, struct usbip_exported_device) {
- if (!strncmp(req.busid, edev->udev.busid, SYSFS_BUS_ID_SIZE)) {
- dbg("found requested device %s", req.busid);
- found = 1;
- break;
- }
+ ret = usbip_host_refresh_device_list();
+ if (ret < 0) {
+ dbg("could not refresh device list: %d", ret);
+ return -1;
}
- if (found) {
- /* should set TCP_NODELAY for usbip */
- usbip_set_nodelay(sockfd);
-
- /* export_device needs a TCP/IP socket descriptor */
- ret = usbip_stub_export_device(edev, sockfd);
- if (ret < 0)
- error = 1;
- } else {
- info("not found requested device %s", req.busid);
- error = 1;
+ info("received request: %#0x(%d)", code, connfd);
+ switch (code) {
+ case OP_REQ_DEVLIST:
+ ret = recv_request_devlist(connfd);
+ break;
+ case OP_REQ_IMPORT:
+ ret = recv_request_import(connfd);
+ break;
+ case OP_REQ_DEVINFO:
+ case OP_REQ_CRYPKEY:
+ default:
+ err("received an unknown opcode: %#0x", code);
+ ret = -1;
}
+ if (ret == 0)
+ info("request %#0x(%d): complete", code, connfd);
+ else
+ info("request %#0x(%d): failed", code, connfd);
- ret = usbip_send_op_common(sockfd, OP_REP_IMPORT, (!error ? ST_OK : ST_NA));
- if (ret < 0) {
- err("send import reply");
- return -1;
- }
-
- if (!error) {
- struct usb_device pdu_udev;
+ return ret;
+}
- memcpy(&pdu_udev, &edev->udev, sizeof(pdu_udev));
- pack_usb_device(1, &pdu_udev);
+#ifdef HAVE_LIBWRAP
+static int tcpd_auth(int connfd)
+{
+ struct request_info request;
+ int rc;
- ret = usbip_send(sockfd, (void *) &pdu_udev, sizeof(pdu_udev));
- if (ret < 0) {
- err("send devinfo");
- return -1;
- }
- }
+ request_init(&request, RQ_DAEMON, PROGNAME, RQ_FILE, connfd, 0);
+ fromhost(&request);
+ rc = hosts_access(&request);
+ if (rc == 0)
+ return -1;
return 0;
}
+#endif
-
-
-static int recv_pdu(int sockfd)
+static int do_accept(int listenfd)
{
- int ret;
- uint16_t code = OP_UNSPEC;
+ int connfd;
+ struct sockaddr_storage ss;
+ socklen_t len = sizeof(ss);
+ char host[NI_MAXHOST], port[NI_MAXSERV];
+ int rc;
+ memset(&ss, 0, sizeof(ss));
- ret = usbip_recv_op_common(sockfd, &code);
- if (ret < 0) {
- err("recv op_common, %d", ret);
- return ret;
+ connfd = accept(listenfd, (struct sockaddr *) &ss, &len);
+ if (connfd < 0) {
+ err("failed to accept connection");
+ return -1;
}
+ rc = getnameinfo((struct sockaddr *) &ss, len, host, sizeof(host),
+ port, sizeof(port), NI_NUMERICHOST | NI_NUMERICSERV);
+ if (rc)
+ err("getnameinfo: %s", gai_strerror(rc));
- ret = usbip_stub_refresh_device_list();
- if (ret < 0)
+#ifdef HAVE_LIBWRAP
+ rc = tcpd_auth(connfd);
+ if (rc < 0) {
+ info("denied access from %s", host);
+ close(connfd);
return -1;
+ }
+#endif
+ info("connection from %s:%s", host, port);
- switch(code) {
- case OP_REQ_DEVLIST:
- ret = recv_request_devlist(sockfd);
- break;
+ return connfd;
+}
- case OP_REQ_IMPORT:
- ret = recv_request_import(sockfd);
- break;
+gboolean process_request(GIOChannel *gio, GIOCondition condition,
+ gpointer unused_data)
+{
+ int listenfd;
+ int connfd;
- case OP_REQ_DEVINFO:
- case OP_REQ_CRYPKEY:
+ (void) unused_data;
- default:
- err("unknown op_code, %d", code);
- ret = -1;
+ if (condition & (G_IO_ERR | G_IO_HUP | G_IO_NVAL)) {
+ err("unknown condition");
+ BUG();
}
+ if (condition & G_IO_IN) {
+ listenfd = g_io_channel_unix_get_fd(gio);
+ connfd = do_accept(listenfd);
+ if (connfd < 0)
+ return TRUE;
- return ret;
-}
-
-
+ recv_pdu(connfd);
+ close(connfd);
+ }
+ return TRUE;
+}
static void log_addrinfo(struct addrinfo *ai)
{
- int ret;
char hbuf[NI_MAXHOST];
char sbuf[NI_MAXSERV];
+ int rc;
- ret = getnameinfo(ai->ai_addr, ai->ai_addrlen, hbuf, sizeof(hbuf),
- sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV);
- if (ret)
- err("getnameinfo, %s", gai_strerror(ret));
-
- info("listen at [%s]:%s", hbuf, sbuf);
-}
-
-static struct addrinfo *my_getaddrinfo(char *host, int ai_family)
-{
- int ret;
- struct addrinfo hints, *ai_head;
-
- bzero(&hints, sizeof(hints));
-
- hints.ai_family = ai_family;
- hints.ai_socktype = SOCK_STREAM;
- hints.ai_flags = AI_PASSIVE;
+ rc = getnameinfo(ai->ai_addr, ai->ai_addrlen, hbuf, sizeof(hbuf),
+ sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV);
+ if (rc)
+ err("getnameinfo: %s", gai_strerror(rc));
- ret = getaddrinfo(host, USBIP_PORT_STRING, &hints, &ai_head);
- if (ret) {
- err("%s: %s", USBIP_PORT_STRING, gai_strerror(ret));
- return NULL;
- }
-
- return ai_head;
+ info("listening on %s:%s", hbuf, sbuf);
}
-#define MAXSOCK 20
-static int listen_all_addrinfo(struct addrinfo *ai_head, int lsock[])
+static int listen_all_addrinfo(struct addrinfo *ai_head, int sockfdlist[])
{
struct addrinfo *ai;
- int n = 0; /* number of sockets */
+ int ret, nsockfd = 0;
- for (ai = ai_head; ai && n < MAXSOCK; ai = ai->ai_next) {
- int ret;
-
- lsock[n] = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
- if (lsock[n] < 0)
+ for (ai = ai_head; ai && nsockfd < MAXSOCKFD; ai = ai->ai_next) {
+ sockfdlist[nsockfd] = socket(ai->ai_family, ai->ai_socktype,
+ ai->ai_protocol);
+ if (sockfdlist[nsockfd] < 0)
continue;
- usbip_set_reuseaddr(lsock[n]);
- usbip_set_nodelay(lsock[n]);
+ usbip_net_set_reuseaddr(sockfdlist[nsockfd]);
+ usbip_net_set_nodelay(sockfdlist[nsockfd]);
- if (lsock[n] >= FD_SETSIZE) {
- close(lsock[n]);
- lsock[n] = -1;
+ if (sockfdlist[nsockfd] >= FD_SETSIZE) {
+ close(sockfdlist[nsockfd]);
+ sockfdlist[nsockfd] = -1;
continue;
}
- ret = bind(lsock[n], ai->ai_addr, ai->ai_addrlen);
+ ret = bind(sockfdlist[nsockfd], ai->ai_addr, ai->ai_addrlen);
if (ret < 0) {
- close(lsock[n]);
- lsock[n] = -1;
+ close(sockfdlist[nsockfd]);
+ sockfdlist[nsockfd] = -1;
continue;
}
- ret = listen(lsock[n], SOMAXCONN);
+ ret = listen(sockfdlist[nsockfd], SOMAXCONN);
if (ret < 0) {
- close(lsock[n]);
- lsock[n] = -1;
+ close(sockfdlist[nsockfd]);
+ sockfdlist[nsockfd] = -1;
continue;
}
log_addrinfo(ai);
-
- /* next if succeed */
- n++;
+ nsockfd++;
}
- if (n == 0) {
- err("no socket to listen to");
+ if (nsockfd == 0)
return -1;
- }
- dbg("listen %d address%s", n, (n==1)?"":"es");
+ dbg("listening on %d address%s", nsockfd, (nsockfd == 1) ? "" : "es");
- return n;
+ return nsockfd;
}
-#ifdef HAVE_LIBWRAP
-static int tcpd_auth(int csock)
+static struct addrinfo *do_getaddrinfo(char *host, int ai_family)
{
- int ret;
- struct request_info request;
-
- request_init(&request, RQ_DAEMON, "usbipd", RQ_FILE, csock, 0);
-
- fromhost(&request);
-
- ret = hosts_access(&request);
- if (!ret)
- return -1;
-
- return 0;
-}
-#endif
-
-static int my_accept(int lsock)
-{
- int csock;
- struct sockaddr_storage ss;
- socklen_t len = sizeof(ss);
- char host[NI_MAXHOST], port[NI_MAXSERV];
- int ret;
-
- bzero(&ss, sizeof(ss));
-
- csock = accept(lsock, (struct sockaddr *) &ss, &len);
- if (csock < 0) {
- err("accept");
- return -1;
- }
+ struct addrinfo hints, *ai_head;
+ int rc;
- ret = getnameinfo((struct sockaddr *) &ss, len,
- host, sizeof(host), port, sizeof(port),
- (NI_NUMERICHOST | NI_NUMERICSERV));
- if (ret)
- err("getnameinfo, %s", gai_strerror(ret));
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = ai_family;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags = AI_PASSIVE;
-#ifdef HAVE_LIBWRAP
- ret = tcpd_auth(csock);
- if (ret < 0) {
- info("deny access from %s", host);
- close(csock);
- return -1;
+ rc = getaddrinfo(host, USBIP_PORT_STRING, &hints, &ai_head);
+ if (rc) {
+ err("failed to get a network address %s: %s", USBIP_PORT_STRING,
+ gai_strerror(rc));
+ return NULL;
}
-#endif
- info("connected from %s:%s", host, port);
-
- return csock;
+ return ai_head;
}
-
-GMainLoop *main_loop;
-
static void signal_handler(int i)
{
- dbg("signal catched, code %d", i);
+ dbg("received signal: code %d", i);
if (main_loop)
g_main_loop_quit(main_loop);
@@ -380,191 +428,140 @@ static void set_signal(void)
{
struct sigaction act;
- bzero(&act, sizeof(act));
+ memset(&act, 0, sizeof(act));
act.sa_handler = signal_handler;
sigemptyset(&act.sa_mask);
sigaction(SIGTERM, &act, NULL);
sigaction(SIGINT, &act, NULL);
}
-
-gboolean process_comming_request(GIOChannel *gio, GIOCondition condition,
- gpointer data __attribute__((unused)))
-{
- int ret;
-
- if (condition & (G_IO_ERR | G_IO_HUP | G_IO_NVAL))
- g_error("unknown condition");
-
-
- if (condition & G_IO_IN) {
- int lsock;
- int csock;
-
- lsock = g_io_channel_unix_get_fd(gio);
-
- csock = my_accept(lsock);
- if (csock < 0)
- return TRUE;
-
- ret = recv_pdu(csock);
- if (ret < 0)
- err("process recieved pdu");
-
- close(csock);
- }
-
- return TRUE;
-}
-
-
-static void do_standalone_mode(gboolean daemonize)
+static int do_standalone_mode(gboolean daemonize)
{
- int ret;
- int lsock[MAXSOCK];
struct addrinfo *ai_head;
- int n;
-
+ int sockfdlist[MAXSOCKFD];
+ int nsockfd;
+ int i;
+ if (usbip_names_init(USBIDS_FILE))
+ err("failed to open %s", USBIDS_FILE);
- ret = usbip_names_init(USBIDS_FILE);
- if (ret)
- err("open usb.ids");
-
- ret = usbip_stub_driver_open();
- if (ret < 0)
- g_error("driver open failed");
+ if (usbip_host_driver_open()) {
+ err("please load " USBIP_CORE_MOD_NAME ".ko and "
+ USBIP_HOST_DRV_NAME ".ko!");
+ return -1;
+ }
if (daemonize) {
- if (daemon(0,0) < 0)
- g_error("daemonizing failed: %s", g_strerror(errno));
+ if (daemon(0,0) < 0) {
+ err("daemonizing failed: %s", strerror(errno));
+ return -1;
+ }
usbip_use_syslog = 1;
}
-
set_signal();
- ai_head = my_getaddrinfo(NULL, PF_UNSPEC);
+ ai_head = do_getaddrinfo(NULL, PF_UNSPEC);
if (!ai_head)
- return;
+ return -1;
- n = listen_all_addrinfo(ai_head, lsock);
- if (n <= 0)
- g_error("no socket to listen to");
+ info("starting " PROGNAME " (%s)", usbip_version_string);
- for (int i = 0; i < n; i++) {
+ nsockfd = listen_all_addrinfo(ai_head, sockfdlist);
+ if (nsockfd <= 0) {
+ err("failed to open a listening socket");
+ return -1;
+ }
+
+ for (i = 0; i < nsockfd; i++) {
GIOChannel *gio;
- gio = g_io_channel_unix_new(lsock[i]);
+ gio = g_io_channel_unix_new(sockfdlist[i]);
g_io_add_watch(gio, (G_IO_IN | G_IO_ERR | G_IO_HUP | G_IO_NVAL),
- process_comming_request, NULL);
+ process_request, NULL);
}
-
- info("usbipd start (%s)", version);
-
-
main_loop = g_main_loop_new(FALSE, FALSE);
g_main_loop_run(main_loop);
- info("shutdown");
+ info("shutting down " PROGNAME);
freeaddrinfo(ai_head);
+ usbip_host_driver_close();
usbip_names_free();
- usbip_stub_driver_close();
-
- return;
-}
-
-static const char help_message[] = "\
-Usage: usbipd [options] \n\
- -D, --daemon \n\
- Run as a daemon process. \n\
- \n\
- -d, --debug \n\
- Print debugging information. \n\
- \n\
- -v, --version \n\
- Show version. \n\
- \n\
- -h, --help \n\
- Print this help. \n";
-
-static void show_help(void)
-{
- printf("%s", help_message);
+ return 0;
}
-static const struct option longopts[] = {
- {"daemon", no_argument, NULL, 'D'},
- {"debug", no_argument, NULL, 'd'},
- {"version", no_argument, NULL, 'v'},
- {"help", no_argument, NULL, 'h'},
- {NULL, 0, NULL, 0}
-};
-
int main(int argc, char *argv[])
{
- gboolean daemonize = FALSE;
+ static const struct option longopts[] = {
+ { "daemon", no_argument, NULL, 'D' },
+ { "debug", no_argument, NULL, 'd' },
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, 'v' },
+ { NULL, 0, NULL, 0 }
+ };
enum {
cmd_standalone_mode = 1,
cmd_help,
cmd_version
- } cmd = cmd_standalone_mode;
+ } cmd;
+ gboolean daemonize = FALSE;
+ int opt, rc = -1;
usbip_use_stderr = 1;
usbip_use_syslog = 0;
if (geteuid() != 0)
- g_warning("running non-root?");
+ err("not running as root?");
+ cmd = cmd_standalone_mode;
for (;;) {
- int c;
- int index = 0;
-
- c = getopt_long(argc, argv, "vhdD", longopts, &index);
+ opt = getopt_long(argc, argv, "Ddhv", longopts, NULL);
- if (c == -1)
+ if (opt == -1)
break;
- switch (c) {
- case 'd':
- usbip_use_debug = 1;
- continue;
- case 'v':
- cmd = cmd_version;
- break;
- case 'h':
- cmd = cmd_help;
- break;
- case 'D':
- daemonize = TRUE;
- break;
- case '?':
- show_help();
- exit(EXIT_FAILURE);
- default:
- err("getopt");
- }
- }
-
- switch (cmd) {
- case cmd_standalone_mode:
- do_standalone_mode(daemonize);
+ switch (opt) {
+ case 'D':
+ daemonize = TRUE;
+ break;
+ case 'd':
+ usbip_use_debug = 1;
break;
- case cmd_version:
- printf("%s\n", version);
+ case 'h':
+ cmd = cmd_help;
break;
- case cmd_help:
- show_help();
+ case 'v':
+ cmd = cmd_version;
break;
+ case '?':
+ usbipd_help();
default:
- info("unknown cmd");
- show_help();
+ goto err_out;
+ }
}
- return 0;
+ switch (cmd) {
+ case cmd_standalone_mode:
+ rc = do_standalone_mode(daemonize);
+ break;
+ case cmd_version:
+ printf(PROGNAME " (%s)\n", usbip_version_string);
+ rc = 0;
+ break;
+ case cmd_help:
+ usbipd_help();
+ rc = 0;
+ break;
+ default:
+ usbipd_help();
+ goto err_out;
+ }
+
+err_out:
+ return (rc > -1 ? EXIT_SUCCESS : EXIT_FAILURE);
}
diff --git a/drivers/staging/usbip/userspace/src/utils.c b/drivers/staging/usbip/userspace/src/utils.c
index 8f441089b64..2d4966e6289 100644
--- a/drivers/staging/usbip/userspace/src/utils.c
+++ b/drivers/staging/usbip/userspace/src/utils.c
@@ -1,255 +1,76 @@
/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
*
- * Copyright (C) 2005-2007 Takahiro Hirofuchi
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "utils.h"
-
-int read_integer(char *path)
-{
- char buff[100];
- int fd;
- int ret = 0;
-
- bzero(buff, sizeof(buff));
-
- fd = open(path, O_RDONLY);
- if (fd < 0)
- return -1;
-
- ret = read(fd, buff, sizeof(buff));
- if (ret < 0) {
- close(fd);
- return -1;
- }
-
- sscanf(buff, "%d", &ret);
-
- close(fd);
-
- return ret;
-}
-
-int read_string(char *path, char *string, size_t len)
-{
- int fd;
- int ret = 0;
- char *p;
-
- bzero(string, len);
-
- fd = open(path, O_RDONLY);
- if (fd < 0) {
- string = NULL;
- return -1;
- }
-
- ret = read(fd, string, len-1);
- if (ret < 0) {
- string = NULL;
- close(fd);
- return -1;
- }
-
- p = strchr(string, '\n');
- *p = '\0';
+#include <sysfs/libsysfs.h>
- close(fd);
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
- return 0;
-}
+#include "usbip_common.h"
+#include "utils.h"
-int write_integer(char *path, int value)
+int modify_match_busid(char *busid, int add)
{
- int fd;
- int ret;
- char buff[100];
-
- snprintf(buff, sizeof(buff), "%d", value);
-
- fd = open(path, O_WRONLY);
- if (fd < 0)
- return -1;
-
- ret = write(fd, buff, strlen(buff));
- if (ret < 0) {
- close(fd);
+ char bus_type[] = "usb";
+ char attr_name[] = "match_busid";
+ char buff[SYSFS_BUS_ID_SIZE + 4];
+ char sysfs_mntpath[SYSFS_PATH_MAX];
+ char match_busid_attr_path[SYSFS_PATH_MAX];
+ struct sysfs_attribute *match_busid_attr;
+ int rc, ret = 0;
+
+ if (strnlen(busid, SYSFS_BUS_ID_SIZE) > SYSFS_BUS_ID_SIZE - 1) {
+ dbg("busid is too long");
return -1;
}
- close(fd);
-
- return 0;
-}
-
-int read_bConfigurationValue(char *busid)
-{
- char path[PATH_MAX];
-
- snprintf(path, PATH_MAX, "/sys/bus/usb/devices/%s/bConfigurationValue", busid);
-
- return read_integer(path);
-}
-
-int write_bConfigurationValue(char *busid, int config)
-{
- char path[PATH_MAX];
-
- snprintf(path, PATH_MAX, "/sys/bus/usb/devices/%s/bConfigurationValue", busid);
-
- return write_integer(path, config);
-}
-
-int read_bNumInterfaces(char *busid)
-{
- char path[PATH_MAX];
-
- snprintf(path, PATH_MAX, "/sys/bus/usb/devices/%s/bNumInterfaces", busid);
-
- return read_integer(path);
-}
-
-int read_bDeviceClass(char *busid)
-{
- char path[PATH_MAX];
-
- snprintf(path, PATH_MAX, "/sys/bus/usb/devices/%s/bDeviceClass", busid);
-
- return read_integer(path);
-}
-
-int getdriver(char *busid, int conf, int infnum, char *driver, size_t len)
-{
- char path[PATH_MAX];
- char linkto[PATH_MAX];
- int ret;
-
- snprintf(path, PATH_MAX, "/sys/bus/usb/devices/%s:%d.%d/driver", busid, conf, infnum);
-
- /* readlink does not add NULL */
- bzero(linkto, sizeof(linkto));
- ret = readlink(path, linkto, sizeof(linkto)-1);
- if (ret < 0) {
- strncpy(driver, "none", len);
+ rc = sysfs_get_mnt_path(sysfs_mntpath, SYSFS_PATH_MAX);
+ if (rc < 0) {
+ err("sysfs must be mounted: %s", strerror(errno));
return -1;
- } else {
- strncpy(driver, basename(linkto), len);
- return 0;
}
-}
-
-int getdevicename(char *busid, char *name, size_t len)
-{
- char path[PATH_MAX];
- char idProduct[10], idVendor[10];
- snprintf(path, PATH_MAX, "/sys/bus/usb/devices/%s/idVendor", busid);
- read_string(path, idVendor, sizeof(idVendor));
+ snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
+ "%s/%s/%s/%s/%s/%s", sysfs_mntpath, SYSFS_BUS_NAME, bus_type,
+ SYSFS_DRIVERS_NAME, USBIP_HOST_DRV_NAME, attr_name);
- snprintf(path, PATH_MAX, "/sys/bus/usb/devices/%s/idProduct", busid);
- read_string(path, idProduct, sizeof(idProduct));
-
- if (!idVendor[0] || !idProduct[0])
+ match_busid_attr = sysfs_open_attribute(match_busid_attr_path);
+ if (!match_busid_attr) {
+ dbg("problem getting match_busid attribute: %s",
+ strerror(errno));
return -1;
-
- snprintf(name, len, "%s:%s", idVendor, idProduct);
-
- return 0;
-}
-
-#define MAXLINE 100
-
-/* if this cannot read a whole line, return -1 */
-int readline(int sockfd, char *buff, int bufflen)
-{
- int ret;
- char c;
- int index = 0;
-
-
- while (index < bufflen) {
- ret = read(sockfd, &c, sizeof(c));
- if (ret < 0 && errno == EINTR)
- continue;
- if (ret <= 0) {
- return -1;
- }
-
- buff[index] = c;
-
- if ( index > 0 && buff[index-1] == '\r' && buff[index] == '\n') {
- /* end of line */
- buff[index-1] = '\0'; /* get rid of delimitor */
- return index;
- } else
- index++;
}
- return -1;
-}
-
-#if 0
-int writeline(int sockfd, char *str, int strlen)
-{
- int ret;
- int index = 0;
- int len;
- char buff[MAXLINE];
-
- if (strlen + 3 > MAXLINE)
- return -1;
-
- strncpy(buff, str, strlen);
- buff[strlen+1] = '\r';
- buff[strlen+2] = '\n';
- buff[strlen+3] = '\0';
+ if (add)
+ snprintf(buff, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
+ else
+ snprintf(buff, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
- len = strlen + 3;
+ dbg("write \"%s\" to %s", buff, match_busid_attr->path);
- while (len > 0) {
- ret = write(sockfd, buff+index, len);
- if (ret <= 0) {
- return -1;
- }
-
- len -= ret;
- index += ret;
+ rc = sysfs_write_attribute(match_busid_attr, buff, sizeof(buff));
+ if (rc < 0) {
+ dbg("failed to write match_busid: %s", strerror(errno));
+ ret = -1;
}
- return index;
-}
-#endif
+ sysfs_close_attribute(match_busid_attr);
-int writeline(int sockfd, char *str, int strlen)
-{
- int ret;
- int index = 0;
- int len;
- char buff[MAXLINE];
-
- len = strnlen(str, strlen);
-
- if (strlen + 2 > MAXLINE)
- return -1;
-
- memcpy(buff, str, strlen);
- buff[strlen] = '\r';
- buff[strlen+1] = '\n'; /* strlen+1 <= MAXLINE-1 */
-
- len = strlen + 2;
-
- while (len > 0) {
- ret = write(sockfd, buff+index, len);
- if (ret < 0 && errno == EINTR)
- continue;
- if (ret <= 0) {
- return -1;
- }
-
- len -= ret;
- index += ret;
- }
-
- return index;
+ return ret;
}
-
diff --git a/drivers/staging/usbip/userspace/src/utils.h b/drivers/staging/usbip/userspace/src/utils.h
index 6c29ae94521..fdcb14dc0fb 100644
--- a/drivers/staging/usbip/userspace/src/utils.h
+++ b/drivers/staging/usbip/userspace/src/utils.h
@@ -1,38 +1,24 @@
+/*
+ * Copyright (C) 2011 matt mooney <mfm@muteddisk.com>
+ * 2005-2007 Takahiro Hirofuchi
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
-#ifdef HAVE_CONFIG_H
-#include "../config.h"
-#endif
+#ifndef __UTILS_H
+#define __UTILS_H
-#define _GNU_SOURCE
-#include <string.h>
-#include <sys/un.h>
-#include <sys/types.h>
-#include <sys/socket.h>
+int modify_match_busid(char *busid, int add);
-#include <sysfs/libsysfs.h>
-#include <glib.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <stdlib.h>
-#include <time.h>
-#include <errno.h>
-
-
-
-/* Be sync to kernel header */
-#define BUS_ID_SIZE 20
-
-int read_string(char *path, char *, size_t len);
-int read_integer(char *path);
-int getdevicename(char *busid, char *name, size_t len);
-int getdriver(char *busid, int conf, int infnum, char *driver, size_t len);
-int read_bNumInterfaces(char *busid);
-int read_bConfigurationValue(char *busid);
-int write_integer(char *path, int value);
-int write_bConfigurationValue(char *busid, int config);
-int read_bDeviceClass(char *busid);
-int readline(int sockfd, char *str, int strlen);
-int writeline(int sockfd, char *buff, int bufflen);
+#endif /* __UTILS_H */
diff --git a/drivers/staging/usbip/userspace/usb.ids b/drivers/staging/usbip/userspace/usb.ids
deleted file mode 100644
index b1f87449bdd..00000000000
--- a/drivers/staging/usbip/userspace/usb.ids
+++ /dev/null
@@ -1,13209 +0,0 @@
-#
-# List of USB ID's
-#
-# Maintained by Stephen J. Gowdy <gowdy@slac.stanford.edu>
-# If you have any new entries, send them to the maintainer.
-# Send entries as patches (diff -u old new).
-# The latest version can be obtained from
-# http://www.linux-usb.org/usb.ids
-#
-# $Id: usb.ids,v 1.346 2008/04/23 13:51:46 gowdy Exp $
-#
-
-# Vendors, devices and interfaces. Please keep sorted.
-
-# Syntax:
-# vendor vendor_name
-# device device_name <-- single tab
-# interface interface_name <-- two tabs
-
-0001 Fry's Electronics
-0002 Ingram
-0003 Club Mac
-0004 Nebraska Furniture Mart
-0145 Unknown
- 0112 Card Reader
-0204 Chipsbank Microelectronics Co., Ltd
- 6025 CBM2080 Flash drive controller
- 6026 CBM1180 Flash drive controller
-02ad HUMAX Co., Ltd.
- 138c PVR Mass Storage
-0386 LTS
- 0001 PSX for USB Converter
-03e8 EndPoints, Inc.
- 0004 SE401 WebCam
- 0008 101 Ethernet [klsi]
- 0015 USB ATAPI Enclosure
- 2123 SiPix StyleCam Deluxe
- 8004 Aox 99001
-03e9 Thesys Microelectronics
-03ea Data Broadcasting Corp.
-03eb Atmel Corp.
- 2002 Mass Storage Device
- 2015 at90usbkey sample firmware (HID keyboard)
- 2018 at90usbkey sample firmware (CDC ACM)
- 2019 stk525 sample firmware (microphone)
- 201c at90usbkey sample firmware (HID mouse)
- 201d at90usbkey sample firmware (HID generic)
- 2022 at90usbkey sample firmware (composite device)
- 2103 JTAG ICE mkII
- 2104 AVR ISP mkII
- 2107 AVR Dragon
- 2ffb at90usb AVR DFU bootloader
- 2ffd at89c5130/c5131 DFU bootloader
- 2fff at89c5132/c51snd1c DFU bootloader
- 3301 at43301 4-port Hub
- 3312 4-port Hub
- 5601 at76c510 Prism-II 802.11b Access Point
- 5603 Cisco 7920 WiFi IP Phone
- 6124 at91sam SAMBA bootloader
- 7603 at76c503a D-Link DWL-120 802.11b Adapter
- 7604 FastVNET
- 7605 at76c503a 802.11b Adapter
- 7606 at76c505 802.11b Adapter
- 7611 at76c510 rfmd2948 802.11b Access Point
- 7613 WL-1130 USB
- 7614 AT76c505a Wireless Adapter
-03ec Iwatsu America, Inc.
-03ed Mitel Corp.
-03ee Mitsumi
- 0000 CD-R/RW Drive
- 2501 eHome Infrared Receiver
- 2502 eHome Infrared Receiver
- 5609 Japanese Keyboard
- 641f WIF-0402C Bluetooth Adapter
- 6438 Bluetooth Device
- 6440 WML-C52APR Bluetooth Adapter
- 6901 SmartDisk FDD
- 6902 Floppy Disk Drive
- 7500 CD-R/RW
- ffff Dongle with BlueCore in DFU mode
-03f0 Hewlett-Packard
- 0004 DeskJet 895c
- 0011 OfficeJet G55
- 0012 DeskJet 1125C Printer Port
- 0024 KU-0316 Keyboard
- 0101 ScanJet 4100c
- 0102 PhotoSmart S20
- 0104 DeskJet 880c/970c
- 0105 ScanJet 4200c
- 0107 CD-Writer Plus
- 010c Multimedia Keyboard Hub
- 0111 G55xi Printer/Scanner/Copier
- 0117 LaserJet 3200
- 011c hn210w 802.11b Adapter
- 011d Integrated Bluetooth Module
- 0121 HP49g+ Calculator
- 0122 HID Internet Keyboard
- 0201 ScanJet 6200c
- 0202 PhotoSmart S20
- 0204 DeskJet 815c
- 0205 ScanJet 3300c
- 0207 CD-Writer Plus 8200e
- 020c Multimedia Keyboard
- 0211 OfficeJet G85
- 0212 DeskJet 1220C
- 0217 LaserJet 2200
- 0218 APOLLO P2500/2600
- 2624 Pole Display (HP522 2 x 20 Line Display)
- 0304 DeskJet 810c/812c
- 0305 ScanJet 4300c
- 0307 CD-Writer+ CD-4e
- 0311 OfficeJet G85xi
- 0312 Color Inkjet CP1700
- 0314 designjet 30/130 series
- 0317 LaserJet 1200
- 0401 ScanJet 5200c
- 0404 DeskJet 830c/832c
- 0405 ScanJet 3400cse
- 0411 OfficeJet G95
- 0412 Printing Support
- 0417 LaserJet 1200 series
- 0504 DeskJet 885c
- 0505 ScanJet 2100c
- 0507 DVD+RW
- 050c 5219 Wireless Keyboard
- 0511 OfficeJet K60
- 0512 DeckJet 450
- 0517 LaserJet 1000
- 051d integrated module with Bluetooth wireless technology.
- 0601 ScanJet 6300c
- 0604 DeskJet 840c
- 0605 ScanJet 2200c
- 0611 OfficeJet K60xi
- 0612 business inkjet 3000
- 0624 Bluetooth Dongle
- 0701 ScanJet 5300c/5370c
- 0704 DeskJet 825c
- 0705 ScanJet 4400c
- 0711 OfficeJet K80
- 0712 DeskJet 1180c
- 0714 Printing Support
- 0801 ScanJet 7400c
- 0804 DeskJet 816c
- 0805 HP4470C
- 0811 OfficeJet K80xi
- 0817 LaserJet 3300
- 0901 ScanJet 2300c
- 0904 DeskJet 845c
- 0912 Printing Support
- 0917 LaserJet 3330
- 0924 Modular Smartcard Keyboard
- 0a01 ScanJet 2400c
- 0a17 color LaserJet 3700
- 0b01 Scanjet 82x0C
- 0b17 Laserjet 2300d
- 0c17 LaserJet 1010
- 0c24 Bluetooth Dongle
- 0d12 Officejet 9100 series
- 0d17 LaserJet 1012
- 0e17 LaserJet 1015
- 0f11 OfficeJet V40
- 0f12 Printing Support
- 0f17 LaserJet 1150
- 1001 Photo Scanner 1000
- 1002 photosmart 140 series
- 1004 DeskJet 970c/970cse
- 1005 ScanJet 5400c
- 1011 OfficeJet V40xi
- 1016 Jornada 548 / iPAQ HW6515 Pocket PC
- 1017 LaserJet 1300
- 1024 Smart Card Keyboard
- 1102 photosmart 240 series
- 1104 DeskJet 959c
- 1105 ScanJet 5470c
- 1111 officejet v60
- 1116 Jornada 568 Pocket PC
- 1117 LaserJet 1300n
- 1151 PSC-750xi Printer/Scanner/Copier
- 1202 Photosmart 320 Series
- 1204 DeskJet 930c
- 1205 ScanJet 4500C/5550C
- 1211 officejet v60xi
- 1217 LaserJet 2300L
- 1302 Photosmart 370 Series
- 1305 ScanJet 4570c
- 1311 OfficeJet V30
- 1312 Deskjet 460
- 1317 LaserJet 1005
- 1405 Scanjet 3670
- 1411 PSC 750
- 1424 f2105 Monitor Hub
- 1502 Photosmart 420 Series
- 1504 DeskJet 920c
- 1511 PSC 750xi
- 1512 Printing Support
- 1517 color LaserJet 3500
- 1524 Smart Card Keyboard - KR
- 1602 Photosmart 330 Series
- 1604 DeskJet 940c
- 1605 ScanJet 5530C Photosmart
- 1611 psc 780
- 1617 LaserJet 3015
- 161d Wireless Rechargeable Optical Mouse (HID)
- 1624 Smart Card Keyboard - JP
- 1702 Photosmart 380 Series
- 1704 deskjet 948C
- 1705 ScanJet 5590
- 1711 psc 780xi
- 1712 Printing Support
- 1717 LaserJet 3020
- 171d Wireless (Bluetooth + WLAN) Interface [Integrated Module]
- 1801 Inkjet P-2000U
- 1802 Photosmart 470 Series
- 1804 deskjet 916C
- 1805 ScanJet 7650
- 1811 PSC 720
- 1817 LaserJet 3030
- 181d integrated module with Bluetooth 2.0 wireless technology.
- 1902 Photosmart A430 series
- 1904 DeskJet 3820
- 1911 OfficeJet V45
- 1917 LaserJet 3380
- 1a02 Photosmart A510 series
- 1a11 officejet 5100 series
- 1a17 color LaserJet 4650
- 1b02 Photosmart A610 series
- 1b04 deskjet 3810
- 1b05 ScanJet 4850C/4890C
- 1c02 Photosmart A710 series
- 1c17 Color LaserJet 2550l
- 1d02 Photosmart A310 series
- 1d17 LaserJet 1320
- 1e02 Photosmart A320 Printer series
- 1e11 PSC-950
- 1e17 LaserJet 1160 series
- 1f02 Photosmart A440 Printer series
- 1f11 PSC 920
- 1f12 Officejet Pro K5300
- 1f17 color LaserJet 5550
- 2001 Floppy
- 2002 Hub
- 2004 DeskJet 640c
- 2005 ScanJet 3570c
- 2012 Officejet Pro K5400
- 2102 photosmart 7345
- 2104 DeskJet 630c
- 2112 Officejet Pro L7500
- 2202 photosmart 7600 series
- 2205 ScanJet 3500c
- 2212 Officejet Pro L7600
- 2217 color LaserJet 9500 MFP
- 2302 photosmart 7600 series
- 2304 DeskJet 656c
- 2305 ScanJet 3970c
- 2311 officejet d series
- 2312 Officejet Pro L7700
- 2317 LaserJet 4350
- 2402 photosmart 7700 series
- 2405 ScanJet 4070 Photosmart
- 2417 LaserJet 4250
- 2424 LP1965 19" Monitor Hub
- 2502 photosmart 7700 series
- 2505 ScanJet 3770
- 2512 Officejet Pro L7300
- 2517 LaserJet 2410
- 2524 LP3065 30" Monitor Hub
- 2602 Photosmart A520 series
- 2605 ScanJet 3800c
- 2611 officejet 7100 series
- 2617 Color LaserJet 2820 Series
- 2702 Photosmart A620 series
- 2704 Deskjet 915
- 2717 Color LaserJet 2830
- 2811 PSC-2100
- 2817 Color LaserJet 2840
- 2902 Photosmart A820 series
- 2911 PSC 2200
- 2917 LaserJet 2420
- 2a11 PSC 2150 series
- 2a17 LaserJet 2430
- 2b11 PSC 2170 series
- 2b17 LaserJet 1020
- 2c17 Printing Support
- 2d11 OfficeJet 6110
- 2d17 Printing Support
- 2e11 PSC 1000
- 2e17 Printing Support
- 2f11 PSC 1200
- 2f17 EWS 2605dn
- 3002 photosmart P1000
- 3004 deskjet 980c
- 3005 ScanJet 4670v
- 3011 PSC 1100 series
- 3017 Printing Support
- 3102 PhotoSmart P1100 Printer w/ Card Reader
- 3104 DeskJet 960c
- 3111 officejet 4100 series
- 3117 EWS 2605dtn
- 3202 photosmart 1215
- 3211 officejet 4105 series
- 3217 LaserJet 3050
- 3302 photosmart 1218
- 3304 DeskJet 990c
- 3317 LaserJet 3052
- 3402 photosmart 1115
- 3404 DeskJet 6122
- 3417 LaserJet 3055
- 3502 photosmart 230
- 3504 DeskJet 6127c
- 3511 PSC 2300
- 3517 LaserJet 3390
- 3602 photosmart 1315
- 3611 PSC 2410 Photosmart
- 3617 EWS 2605
- 3711 PSC 2500
- 3717 EWS UPD
- 3802 photosmart 100
- 3817 LaserJet P2015 Series
- 3902 photosmart 130
- 3a02 photosmart 7150
- 3a11 OfficeJet 5500 series
- 3a17 Printing Support
- 3b02 photosmart 7150~
- 3b11 PSC 1300 series
- 3b17 LaserJet M1005 MFP
- 3c02 PhotoSmart 7350
- 3c11 PSC 1358
- 3c17 EWS UPD
- 3d02 photosmart 7350~
- 3d11 OfficeJet 4215
- 3e02 photosmart 7550
- 3f02 photosmart 7550~
- 3f11 PSC-1315/PSC-1317
- 4002 PhotoSmart 720 / PhotoSmart 935 (storage)
- 4004 cp1160
- 4102 PhotoSmart 618
- 4105 ScanJet 4370
- 4111 Officejet 7200 series
- 4117 Printing Support
- 4202 PhotoSmart 812
- 4205 Scanjet G3010
- 4211 Officejet 7300 series
- 4217 EWS CM1015
- 4302 PhotoSmart 850 (ptp)
- 4311 Officejet 7400 series
- 4317 Color LaserJet CM1017
- 4402 PhotoSmart 935 (ptp)
- 4417 EWS UPD
- 4502 PhotoSmart 945 (PTP mode)
- 4505 ScanJet G4010
- 4511 Photosmart 2600
- 4517 EWS UPD
- 4605 ScanJet G4050
- 4611 Photosmart 2700
- 4811 PSC 1600
- 4911 PSC 2350
- 4b11 Officejet 6200
- 4c11 PSC 1500 series
- 4c17 EWS UPD
- 4d11 PSC 1400
- 4d17 EWS UPD
- 4e11 Photosmart 2570 series
- 4f11 Officejet 5600 (USBHUB)
- 5004 DeskJet 995c
- 5011 Photosmart 3100 Series
- 5017 EWS UPD
- 5111 Photosmart 3200 Series
- 5211 Photosmart 3300 Series
- 5311 Officejet 6300
- 5411 Officejet 4300
- 5511 Deskjet F300 series
- 5611 PhotoSmart C3180
- 5617 LaserJet M1120 MFP
- 5711 Photosmart C4100 series
- 5717 LaserJet M1120n MFP
- 5811 Photosmart C5100 series
- 5817 LaserJet M1319f MFP
- 5911 PhotoSmart C6180
- 5a11 Photosmart C7100 series
- 5b11 Officejet J2100 Series
- 5c11 Photosmart C4200 Printer series
- 5d11 Photosmart C5200 series
- 5e11 Photosmart D7400 series
- 6004 DeskJet 5550
- 6102 Hewlett Packard Digital Camera
- 6104 DeskJet 5650c
- 6117 color LaserJet 3550
- 6202 PhotoSmart 215
- 6204 DeskJet 5150c
- 6217 Color LaserJet 4700
- 6302 PhotoSmart 318/612
- 6317 Color LaserJet 4730mfp
- 6402 PhotoSmart 715 (ptp)
- 6411 Photosmart C8100 series
- 6417 LaserJet 5200
- 6502 PhotoSmart 120 (ptp)
- 6511 Photosmart C7200 series
- 6602 PhotoSmart 320
- 6611 Photosmart C4380 series
- 6617 LaserJet 5200L
- 6702 PhotoSmart 720 (ptp)
- 6717 Color LaserJet 3000
- 6802 PhotoSmart 620 (ptp)
- 6811 Photosmart D5300 series
- 6817 Color LaserJet 3800
- 6911 Photosmart D7200 series
- 6917 Color LaserJet 3600
- 6a02 PhotoSmart 735 (ptp)
- 6a11 Photosmart C6200 series
- 6a17 LaserJet 4240
- 6b02 PhotoSmart R707 (PTP mode)
- 6c17 Color LaserJet 4610
- 6f17 Color LaserJet CP6015 series
- 7004 DeskJet 3320c
- 7102 PhotoSmart 635 (PTP mode)
- 7104 DeskJet 3420c
- 7117 CM8060 Color MFP with Edgeline Technology
- 7202 PhotoSmart 43x (ptp)
- 7204 DeskJet 36xx
- 7217 LaserJet M5035 MFP
- 7302 PhotoSmart M307 (PTP mode)
- 7304 DeskJet 35xx
- 7317 LaserJet P3005
- 7404 Printing Support
- 7417 LaserJet M4345 MFP
- 7504 Printing Support
- 7517 LaserJet M3035 MFP
- 7604 Deskjet 3940
- 7617 LaserJet P3004
- 7702 PhotoSmart R817 (PTP mode)
- 7704 Deskjet D4100
- 7717 CM8050 Color MFP with Edgeline Technology
- 7804 Deskjet D1360
- 7817 Color LaserJet CP3505
- 7917 LaserJet M5025 MFP
- 7a02 PhotoSmart M415 (PTP mode)
- 7a17 LaserJet M3027 MFP
- 7b02 PhotoSmart M23 (PTP mode)
- 7b17 Color LaserJet CP4005
- 7c17 Color LaserJet CM6040 Series
- 7d04 Deskjet F2100 Printer series
- 7d17 Color LaserJet CM4730 MFP
- 7e04 Deskjet F4100 Printer series
- 8017 LaserJet P4515
- 8104 Printing Support
- 8117 LaserJet P4015
- 811c Ethernet HN210E
- 8204 Printing Support
- 8217 LaserJet P4014
- 8317 LaserJet M9050 MFP
- 8404 Deskjet 6800 Series
- 8417 LaserJet M9040 MFP
- 8504 Deskjet 6600 Series
- 8604 Deskjet 5440
- 8704 deskjet 5900 series
- 8804 Deskjet 6980 Series
- 8904 Deskjet 6940 Series
- 9002 Photosmart M437
- 9102 Photosmart M537
- 9302 Photosmart R930 series
- 9402 Photosmart R837
- 9502 Photosmart R840 series
- 9602 Photosmart M730 series
- 9702 Photosmart R740 series
- 9802 Photosmart Mz60 series
- 9902 Photosmart M630 series
- 9a02 Photosmart E330 series
- 9b02 Photosmart M540 series
- 9c02 Photosmart M440 series
- a004 DeskJet 5850c
- b002 photosmart 7200 series
- b102 photosmart 7200 series
- b202 photosmart 7600 series
- b302 photosmart 7600 series
- b402 photosmart 7700 series
- b502 photosmart 7700 series
- b602 photosmart 7900 series
- b702 photosmart 7900 series
- b802 Photosmart 7400 Series
- b902 Photosmart 7800 Series
- ba02 Photosmart 8100 Series
- bb02 Photosmart 8400 Series
- bc02 Photosmart 8700 Series
- bd02 Photosmart Pro B9100 series
- bef4 NEC Picty760
- c002 Photosmart 7800 Series
- c102 Photosmart 8000 Series
- c202 Photosmart 8200 Series
- c302 Deskjet D2300
- c402 Photosmart D5100 series
- c502 Photosmart D6100 series
- c602 Photosmart D7100 series
- c702 Photosmart D7300 series
- c802 Photosmart D5060 Printer
- d104 Bluetooth Dongle
- efbe NEC Picty900
- f0be NEC Picty920
- f1be NEC Picty800
-03f1 Genoa Technology
-03f2 Oak Technology, Inc.
-03f3 Adaptec, Inc.
- 0020 AWN-8020 WLAN
- 0080 AVC-1100 Audio Capture
- 0083 AVC-2200 Device
- 0087 AVC-2210 Loader
- 0088 AVC-2210 Device
- 008b AVC-2310 Loader
- 008c AVC-2310 Device
- 0094 eHome Infrared Receiver
- 009b AVC-1410 GameBridge TV NTSC
- 2000 USBXchange
- 2001 USBXchange Adapter
- 2002 USB2-Xchange
- 2003 USB2-Xchange Adapter
- adcc Composite Device Support
-03f4 Diebold, Inc.
-03f5 Siemens Electromechanical
-03f8 Epson Imaging Technology Center
-03f9 KeyTronic Corp.
- 0100 Keyboard
- 0101 Keyboard
- 0102 Keyboard Mouse
-03fb OPTi, Inc.
-03fc Elitegroup Computer Systems
-03fd Xilinx, Inc.
-03fe Farallon Comunications
-0400 National Semiconductor Corp.
- 0807 Bluetooth Dongle
- 080a Bluetooth Device
- 1000 Mustek BearPaw 1200 Scanner
- 1001 Mustek BearPaw 2400 Scanner
- 1237 Hub
- a000 Smart Display Reference Device
- c35b Printing Support
-0401 National Registry, Inc.
-0402 ALi Corp.
- 5462 M5462 IDE Controller
- 5602 Video Camera Controller
- 5603 USB 2.0 Q-tec Webcam 300
- 5621 USB 2.0 Storage Device
- 5623 VistaScan Astra 3600
- 5627 Welland ME-740PS USB2 3.5" Power Saving Enclosure
- 5632 USB 2.0 Host-to-Host Link
- 5635 USB 2.0 Flash Card Reader
- 5636 USB 2.0 Storage Device
- 5637 M5637 IDE Controller
-0403 Future Technology Devices International, Ltd
- 0000 H4SMK 7 Port Hub
- 0232 Serial Converter
- 6001 FT232 USB-Serial (UART) IC
- 6007 Serial Converter
- 6008 Serial Converter
- 6009 Serial Converter
- 6010 FT2232C Dual USB-UART/FIFO IC
- 8040 4 Port Hub
- 8070 7 Port Hub
- 8370 7 Port Hub
- 8371 PS/2 Keyboard And Mouse
- 8372 FT8U100AX Serial Port
- c630 lcd2usb interface
- c7d0 RR-CirKits LocoBuffer-USB
- cc48 product FTDI TACTRIX_OPENPORT_13M 0xcc48 OpenPort 1.3 Mitsubishi
- cc49 product FTDI TACTRIX_OPENPORT_13S 0xcc49 OpenPort 1.3 Subaru
- cc4a product FTDI TACTRIX_OPENPORT_13U 0xcc4a OpenPort 1.3 Universal
- d010 SCS PTC-IIusb
- d011 SCS Position-Tracker/TNC
- d012 SCS DRAGON 1
- d013 SCS DRAGON 1
- d6f8 UNI Black BOX
- e700 Elster Unicom III Optical Probe
- e888 Expert ISDN Control USB
- e889 USB-RS232 OptoBridge
- e88a Expert mouseCLOCK USB II
- e88b Precision Clock MSF USB
- e88c Expert mouseCLOCK USB II HBG
- ea90 Eclo 1-Wire Adapter
- f208 Papenmeier Braille-Display
- f680 Suunto Sports Instrument
- f918 Ant8 Logic Probe
- fa00 Matrix Orbital USB Serial
- fa01 Matrix Orbital MX2 or MX3
- fa02 Matrix Orbital MX4 or MX5
- fa03 Matrix Orbital VK/LK202 Family
- fa04 Matrix Orbital VK/LK204 Family
- fc08 Crystalfontz CFA-632 USB LCD
- fc09 Crystalfontz CFA-634 USB LCD
- fc0b Crystalfontz CFA-633 USB LCD
- fc0c Crystalfontz CFA-631 USB LCD
- fc0d Crystalfontz CFA-635 USB LCD
- fc82 SEMC DSS-20 SyncStation
- fd48 ShipModul MiniPlex-4xUSB NMEA Multiplexer
- ff08 ToolHouse LoopBack Adapter
- ff18 Logbook Bus
- ff19 Logbook Bus
- ff1a Logbook Bus
- ff1b Logbook Bus
- ff1c Logbook Bus
- ff1d Logbook Bus
- ff1e Logbook Bus
- ff1f Logbook Bus
-0404 NCR Corp.
- 0202 78XX Scanner
- 0203 78XX Scanner - Embedded System
- 0310 K590 Printer, Self-Service
- 0311 7167 Printer, Receipt/Slip
- 0312 7197 Printer Receipt
- 0320 5932-USB Keyboard
- 0321 5953-USB Dynakey
- 0322 5932-USB Enhanced Keyboard
- 0323 5932-USB Enhanced Keyboard, Flash-Recovery/Download
- 0324 5953-USB Enhanced Dynakey
- 0325 5953-USB Enhanced Dynakey Flash-Recovery/Download
- 0328 K016: USB-MSR ISO 3-track MSR: POS Standard (See HID pages)
- 0329 K018: USB-MSR JIS 2-Track MSR: POS Standard
- 032a K016: USB-MSR ISO 3-Track MSR: HID Keyboard Mode
- 032b K016/K018: USB-MSR Flash-Recovery/Download
-0405 Synopsys, Inc.
-0406 Fujitsu-ICL Computers
-0407 Fujitsu Personal Systems, Inc.
-0408 Quanta Computer, Inc.
-0409 NEC Corp.
- 0011 PC98 Series Layout Keyboard Mouse
- 0012 ATerm IT75DSU ISDN TA
- 0014 Japanese Keyboard
- 0019 109 Japanese Keyboard with Bus-Powered Hub
- 001a PC98 Series Layout Keyboard with Bus-Powered Hub
- 0025 Mini Keyboard with Bus-Powered Hub
- 0027 MultiSync Monitor
- 002c Clik!-USB Drive
- 0034 109 Japanese Keyboard with One-touch start buttons
- 003f Wireless Keyboard with One-touch start buttons
- 0040 Floppy
- 004e SuperScript 1400 Series
- 004f Wireless Keyboard with One-touch start buttons
- 0058 HighSpeed Hub
- 0059 HighSpeed Hub
- 005a HighSpeed Hub
- 006a Conceptronic USB Harddisk Box
- 0081 SuperScript 1400 Series
- 0082 SuperScript 1400 Series
- 0094 Japanese Keyboard with One-touch start buttons
- 0095 Japanese Keyboard
- 00a9 AtermIT21L 128K Support Standard
- 00aa AtermITX72 128K Support Standard
- 00ab AtermITX62 128K Support Standard
- 00ac AtermIT42 128K Support Standard
- 00ae INSMATEV70G-MAX Standard
- 00af AtermITX70 128K Support Standard
- 00b0 AtermITX80 128K Support Standard
- 00b2 AtermITX80D 128K Support Standard
- 00c0 Wireless Remocon
- 00f7 Smart Display PK-SD10
- 011d e228 Mobile Phone
- 0203 HID Audio Controls
- 55aa Hub
- 55ab Hub [iMac/iTouch kbd]
- 8010 Intellibase Hub
- 8011 Intellibase Hub
- efbe P!cty 900 [HP DJ]
- f0be P!cty 920 [HP DJ 812c]
-040a Kodak Co.
- 0001 DVC-323
- 0002 DVC-325
- 0100 DC-220
- 0110 DC-260
- 0111 DC-265
- 0112 DC-290
- 0120 DC-240
- 0121 DC-240 (PTP firmware)
- 0130 DC-280
- 0131 DC-5000
- 0132 DC-3400
- 0140 DC-4800
- 0160 DC4800
- 0170 DX3900
- 0200 Digital Camera
- 0300 EZ-200
- 0400 MC3
- 0402 Digital Camera
- 0403 Z7590
- 0500 DX3500
- 0510 DX3600
- 0525 DX3215
- 0530 DX3700
- 0535 EasyShare CX4230 Camera
- 0540 LS420
- 0550 DX4900
- 0555 DX4330
- 0560 CX4200
- 0565 CX4210
- 0566 CX4300
- 0567 LS753
- 0568 LS443
- 0569 LS663
- 0570 DX6340
- 0571 CX6330
- 0572 DX6440
- 0573 CX6230
- 0574 CX6200
- 0575 DX6490
- 0576 DX4530
- 0577 DX7630
- 0578 CX7300/CX7310
- 0579 CX7220
- 057a CX7330
- 057b CX7430
- 057c CX7530
- 057d DX7440
- 057e C300
- 057f DX7590
- 0580 Z730
- 0581 Digital Camera
- 0582 Digital Camera
- 0583 Digital Camera
- 0584 CX6445
- 0585 Digital Camera
- 0586 CX7525
- 0587 Digital Camera
- 0588 Digital Camera
- 0589 EasyShare C360
- 058a C310
- 058b Digital Camera
- 058c C330
- 058d C340
- 058e V530
- 058f V550
- 0590 Digital Camera
- 0591 Digital Camera
- 0592 Digital Camera
- 0593 Digital Camera
- 0594 Digital Camera
- 0595 Digital Camera
- 0596 Digital Camera
- 0597 Digital Camera
- 0598 Digital Camera
- 0599 Digital Camera
- 059a Digital Camera
- 059b Digital Camera
- 059c Digital Camera
- 059d Digital Camera
- 059e Digital Camera
- 059f Digital Camera
- 05a0 Digital Camera
- 05a1 Digital Camera
- 05a2 Digital Camera
- 05a3 Digital Camera
- 05a4 Digital Camera
- 05a5 Digital Camera
- 05a6 Digital Camera
- 05a7 Digital Camera
- 05a8 Digital Camera
- 05a9 Digital Camera
- 05aa Digital Camera
- 05ab Digital Camera
- 05ac Digital Camera
- 05ad Digital Camera
- 05ae Digital Camera
- 05af Digital Camera
- 05b0 Digital Camera
- 05b1 Digital Camera
- 05b2 Digital Camera
- 05b3 EasyShare Z710 Camera
- 05b4 Digital Camera
- 05b5 Digital Camera
- 05b6 Digital Camera
- 05b7 Digital Camera
- 05b8 Digital Camera
- 05b9 Digital Camera
- 05ba Digital Camera
- 05bb Digital Camera
- 05bc Digital Camera
- 05bd Digital Camera
- 05be Digital Camera
- 05bf Digital Camera
- 05c0 Digital Camera
- 05c1 Digital Camera
- 05c2 Digital Camera
- 05c3 Digital Camera
- 05c4 Digital Camera
- 05c5 Digital Camera
- 4000 InkJet Color Printer
- 410d EasyShare G600 Printer Dock
- 5010 Wireless Adapter
- 5012 DBT-220 Bluetooth Adapter
- 6001 i30
- 6002 i40
- 6003 i50
- 6004 i60
- 6005 i80
-040b Weltrend Semiconductor
- 6510 Weltrend Bar Code Reader
- 6520 XBOX Xploder
-040c VTech Computers, Ltd
-040d VIA Technologies, Inc.
- 3184 VNT VT6656 USB-802.11 Wireless LAN Adapter
- 6205 USB 2.0 Card Reader
-040e MCCI
-040f Echo Speech Corp.
-0411 MelCo., Inc.
- 0001 LUA-TX Ethernet [pegasus]
- 0005 LUA-TX Ethernet
- 0006 WLI-USB-L11 Wireless LAN Adapter
- 0009 LUA2-TX Ethernet
- 000b WLI-USB-L11G-WR Wireless LAN Adapter
- 000d WLI-USB-L11G Wireless LAN Adapter
- 0012 LUA-KTX Ethernet
- 0013 USB2-IDE Adapter
- 0016 WLI-USB-S11 802.11b Adapter
- 0018 USB2-IDE Adapter
- 001c USB-IDE Bridge: DUB-PxxG
- 0027 WLI-USB-KS11G 802.11b Adapter
- 003d LUA-U2-KTX Ethernet
- 0044 WLI-USB-KB11 Wireless LAN Adapter
- 004d WLI-USB-B11 Wireless LAN Adapter
- 0050 WLI2-USB2-G54 Wireless LAN Adapter
- 005e WLI-U2-KG54-YB WLAN
- 0065 Python2 WDM Encoder
- 0066 WLI-U2-KG54 WLAN
- 0067 WLI-U2-KG54-AI WLAN
- 008b Nintendo Wi-Fi
- 0091 WLI-U2-KAMG54 Wireless LAN Adapter
- 0092 WLI-U2-KAMG54 Bootloader
- 0097 WLI-U2-KG54-BB
- 00a9 WLI-U2-AMG54HP Wireless LAN Adapter
- 00aa WLI-U2-AMG54HP Bootloader
- 00b3 PC-OP-RS1 RemoteStation
- 00ca 802.11n Network Adapter
- 00cb WLI-U2-G300N 802.11n Adapter
- 00d8 WLI-U2-SG54HP
- 00d9 WLI-U2-G54HP
- 00da WLI-U2-KG54L
-0412 Award Software International
-0413 Leadtek Research, Inc.
- 1310 WinFast TV - NTSC + FM
- 1311 WinFast TV - NTSC + MTS + FM
- 1312 WinFast TV - PAL BG + FM
- 1313 WinFast TV - PAL BG+TXT + FM
- 1314 WinFast TV Audio - PHP PAL I
- 1315 WinFast TV Audio - PHP PAL I+TXT
- 1316 WinFast TV Audio - PHP PAL DK
- 1317 WinFast TV Audio - PHP PAL DK+TXT
- 1318 WinFast TV - PAL I/DK + FM
- 1319 WinFast TV - PAL N + FM
- 131a WinFast TV Audio - PHP SECAM LL
- 131b WinFast TV Audio - PHP SECAM LL+TXT
- 131c WinFast TV Audio - PHP SECAM DK
- 131d WinFast TV - SECAM DK + TXT + FM
- 131e WinFast TV - NTSC Japan + FM
- 1320 WinFast TV - NTSC
- 1321 WinFast TV - NTSC + MTS
- 1322 WinFast TV - PAL BG
- 1323 WinFast TV - PAL BG+TXT
- 1324 WinFast TV Audio - PHP PAL I
- 1325 WinFast TV Audio - PHP PAL I+TXT
- 1326 WinFast TV Audio - PHP PAL DK
- 1327 WinFast TV Audio - PHP PAL DK+TXT
- 1328 WinFast TV - PAL I/DK
- 1329 WinFast TV - PAL N
- 132a WinFast TV Audio - PHP SECAM LL
- 132b WinFast TV Audio - PHP SECAM LL+TXT
- 132c WinFast TV Audio - PHP SECAM DK
- 132d WinFast TV - SECAM DK + TXT
- 132e WinFast TV - NTSC Japan
- 6023 EMP Audio Device
- 6024 WinFast PalmTop/Novo TV Video
- 6025 WinFast DTV Dongle (cold state)
- 6026 WinFast DTV Dongle (warm state)
- 6125 WinFast DTV Dongle
- 6126 WinFast DTV Dongle BDA Driver
- 6f00 WinFast DTV Dongle (STK7700P based)
-0414 Giga-Byte Technology Co., Ltd
-0416 Winbond Electronics Corp.
- 0035 W89C35 802.11bg WLAN Adapter
- 0101 Hub
- 0961 AVL Flash Card Reader
- 3810 Smart Card Controller
- 3811 Generic Controller - Single interface
- 3812 Smart Card Controller_2Interface
- 3813 Panel Display
- 5518 4-Port Hub
- 551a PC Sync Keypad
- 551b PC Async Keypad
- 551c Sync Tenkey
- 551d Async Tenkey
- 551e Keyboard
- 551f Keyboard w/ Sys and Media
- 5521 Keyboard
- 6481 16-bit Scanner
- 7721 Memory Stick Reader/Writer
- 7722 Memory Stick Reader/Writer
- 7723 SD Card Reader
-0417 Symbios Logic
-0418 AST Research
-0419 Samsung Info. Systems America, Inc.
- 0001 IrDA Remote Controller
- 3001 Xerox P1202 Laser Printer
- 3003 Olivetti PG L12L
- 3201 Docuprint P8ex
- 3404 SCX-5x12 Series
- 3406 MFP 830 Series
- 3407 ML-912
- 3601 InkJet Color Printer
- 3602 InkJet Color Printer
- 4602 Remote NDIS Network Device
- 8001 Hub
- 8002 SyncMaster 757DFX HID Device
-041a Phoenix Technologies, Ltd
-041b d'TV
-041d S3, Inc.
-041e Creative Technology, Ltd
- 1002 Nomad II
- 1003 Blaster GamePad Cobra
- 1050 GamePad Cobra
- 3000 SoundBlaster Extigy
- 3002 SB External Composite Device
- 3010 SoundBlaster MP3+
- 3014 SB External Composite Device
- 3015 Sound Blaster Digital Music LX
- 3020 SoundBlaster Audigy 2 NX
- 3030 SB External Composite Device
- 3040 SoundBlaster Live! 24-bit External SB0490
- 3060 Sound Blaster Audigy 2 ZS External
- 3061 SoundBlaster Audigy 2 ZS Video Editor
- 3090 Sound Blaster Digital Music SX
- 3f02 E-Mu 0202
- 3f04 E-Mu 0404
- 4003 VideoBlaster WebCam Go Plus [W9967CF]
- 4004 Nomad II MG
- 4005 WebCam Blaster Go ES
- 4007 Go Mini
- 400a PC-Cam 300
- 400b PC-Cam 600
- 400c WebCam 5 [pwc]
- 400d WebCam PD1001
- 400f PC-CAM 550 (Composite)
- 4011 WebCam PRO eX
- 4012 PC-CAM350
- 4013 PC-Cam 750
- 4015 CardCam Value
- 4016 CardCam
- 4017 WebCam Mobile
- 4018 WebCam Vista
- 4019 Audio Device
- 401c WebCam NX [PD1110]
- 401d WebCam NX Ultra
- 401e WebCam NX Pro
- 401f Webcam Notebook
- 4020 WebCam NX
- 4021 WebCam NX Ultra
- 4022 WebCam NX Pro
- 4028 Vista Plus cam [VF0090]
- 402f DC-CAM 3000Z
- 4034 WebCam Instant
- 4035 WebCam Instant
- 4036 Webcam Live!/Live! Pro
- 4037 WebCam Live!
- 4038 ORITE CCD Webcam(PC370R)
- 4039 WebCam Live! Effects
- 403a WebCam NX Pro 2
- 403c WebCam Live! Ultra
- 403d WebCam Notebook Ultra
- 403e WebCam Vista Plus
- 4041 WebCam Live! Motion
- 4045 Live! Cam Voice
- 4049 Live! Cam Voice
- 4051 Live! Cam Notebook Pro
- 4052 Live! Cam Vista IM
- 4053 Live! Cam Video IM
- 4054 Live! Cam Video IM
- 4055 Live! Cam Video IM Pro
- 4056 Live! Cam Video IM Pro
- 4057 Live! Cam Optia
- 4058 Live! Cam Optia AF
- 4068 WebCam Live! Notebook
- 4100 Nomad Jukebox 2
- 4101 Nomad Jukebox 3
- 4102 NOMAD MuVo^2
- 4106 Nomad MuVo
- 4107 NOMAD MuVo
- 4108 Nomad Jukebox Zen
- 4109 Nomad Jukebox Zen NX
- 410b Nomad Jukebox Zen USB 2.0
- 410c Nomad MuVo NX
- 410f NOMAD MuVo^2 (Flash)
- 4110 Nomad Jukebox Zen Xtra
- 4111 Dell Digital Jukebox
- 4116 MuVo^2
- 4117 Nomad MuVo TX
- 411b Zen Touch
- 411c Nomad MuVo USB 2.0
- 411d Zen
- 411e Zen Micro
- 4123 Zen Portable Media Center
- 4124 MuVo^2 FM (uHDD)
- 4126 Dell DJ (2nd gen)
- 4127 Dell DJ
- 4128 NOMAD Jukebox Zen Xtra (mtp)
- 412b MuVo N200 with FM radio
- 4130 Zen Micro (mtp)
- 4131 Zen Touch (mtp)
- 4133 Mass Storage Device
- 4134 Zen Neeon
- 4136 Zen Sleek
- 4137 Zen Sleek (mtp)
- 4139 Zen Nano Plus
- 413c Zen MicroPhoto
- 4151 Zen Vision:M (mtp)
- 4155 Zen Stone plus
- 500f Broadband Blaster 8012U-V
- 5015 TECOM Bluetooth Device
- ffff WebCam Live! Ultra
-041f LCS Telegraphics
-0420 Chips and Technologies
- 1307 Celly SIM Card Reader
-0421 Nokia Mobile Phones
- 0018 6288 GSM Smartphone
- 0019 6288 GSM Smartphone (imaging mode)
- 001a 6288 GSM Smartphone (file transfer mode)
- 0024 5610 XpressMusic (Storage mode)
- 0025 5610 XpressMusic (PC-Suite mode)
- 0028 5610 XpressMusic (Imaging mode)
- 0096 N810 Internet Tablet
- 0103 ADL Flashing Engine AVALON Parent
- 0104 ADL Re-Flashing Engine Parent
- 0105 E-61 (Firmware update mode)
- 0106 ROM Parent
- 0400 7600 Phone Parent
- 0401 6650 GSM Phone
- 0402 6255 Phone Parent
- 0404 5510
- 0405 9500 GSM Communicator
- 0407 Music Player HDR-1(tm)
- 040b N-Gage GSM Phone
- 040d 6620 Phone Parent
- 040e 6651 Phone Parent
- 040f 6230 GSM Phone
- 0410 6630 Imaging Smartphone
- 0411 7610 Phone Parent
- 0413 6260 Phone Parent
- 0414 7370
- 0415 9300 GSM Smartphone
- 0416 6170 Phone Parent
- 0417 7270 Phone Parent
- 0418 E-70 (PC-Suite mode)
- 0419 E-60 (PC-Suite mode)
- 041a 9500 GSM Communicator (RNDIS)
- 041b 9300 GSM Smartphone (RNDIS)
- 041c 7710 Phone Parent
- 041d 6670 Phone Parent
- 041e 6680
- 041f 6235 Phone Parent
- 0421 3230 Phone Parent
- 0422 6681 Phone Parent
- 0423 6682 Phone Parent
- 0428 6230i Modem
- 0429 6230i MultiMedia Card
- 0431 770 Internet Tablet
- 0432 N90 Phone Parent
- 0435 E-70 (IP Passthrough/RNDIS mode)
- 0436 E-60 (IP Passthrough/RNDIS mode)
- 0437 6265 Phone Parent
- 043a N70 USB Phone Parent
- 043b 3155 Phone Parent
- 043c 6155 Phone Parent
- 043d 6270 Phone Parent
- 0443 N70 Phone Parent
- 044c NM850iG Phone Parent
- 044d E-61 (PC Suite mode)
- 044e E-61 (Data Exchange mode)
- 044f E-61 (IP Passthrough/RNDIS mode)
- 0453 9300 Phone Parent
- 0456 6111 Phone Parent
- 045a 6280 Phone Parent
- 045d 6282 Phone Parent
- 046e 6110 Navigator
- 0485 MTP Device
- 04c3 N800 Internet Tablet
- 04ce E90 Communicator (PC-Suite mode)
- 04cf E90 Communicator (Storage mode)
- 04f9 6300 (PC-Suite mode)
- 0600 Digital Pen SU-1B
- 0800 Connectivity Cable DKU-5
- 0801 Data Cable DKU-6
- 0802 CA-42 Phone Parent
-0422 ADI Systems, Inc.
-0423 Computer Access Technology Corp.
- 000a NetMate Ethernet
- 000c NetMate2 Ethernet
- 000d USB Chief Analyzer
- 0100 Generic Universal Protocol Analyzer
- 0101 UPA USBTracer
- 0200 Generic 10K Universal Protocol Analyzer
- 020a PETracer ML
- 0300 Generic Universal Protocol Analyzer
- 0301 2500H Tracer Trainer
- 030a PETracer x1
- 1237 Andromeda Hub
-0424 Standard Microsystems Corp.
- 0001 Integrated Hub
- 0acd Sitecom Internal Multi Memory reader/writer MD-005
- 0fdc Floppy
- 10cd Sitecom Internal Multi Memory reader/writer MD-005
- 2020 USB Hub
- 20cd Sitecom Internal Multi Memory reader/writer MD-005
- 20fc 6-in-1 Card Reader
- 2228 9-in-2 Card Reader
- 223a 8-in-1 Card Reader
- 2503 USB 2.0 Hub
- 2504 USB 2.0 Hub
- 2524 USB MultiSwitch Hub
-0425 Motorola Semiconductors HK, Ltd
- 0101 G-Tech Wireless Mouse & Keyboard
-0426 Integrated Device Technology, Inc.
- 0426 WDM Driver
-0427 Motorola Electronics Taiwan, Ltd
-0428 Advanced Gravis Computer Tech, Ltd
- 4001 GamePad Pro
-0429 Cirrus Logic
-042a Ericsson Austrian, AG
-042b Intel Corp.
- 9316 8x931Hx Customer Hub
-042c Innovative Semiconductors, Inc.
-042d Micronics
-042e Acer, Inc.
- 0380 MP3 Player
-042f Molex, Inc.
-0430 Sun Microsystems, Inc.
- 0002 109 Keyboard
- 0005 Type 6 Keyboard
- 000a 109 Japanese Keyboard
- 000b 109 Japanese Keyboard
- 0082 109 Japanese Keyboard
- 0083 109 Japanese Keyboard
- 0100 3-button Mouse
- 36ba Bus Powered Hub
-0431 Itac Systems, Inc.
- 0100 Mouse-Trak 3-button Track Ball
-0432 Unisys Corp.
-0433 Alps Electric, Inc.
- 1101 IBM Game Controller
- abab Keyboard
-0434 Samsung Info. Systems America, Inc.
-0435 Hyundai Electronics America
-0436 Taugagreining HF
- 0005 CameraMate (DPCM_USB)
-0437 Framatome Connectors USA
-0438 Advanced Micro Devices, Inc.
-0439 Voice Technologies Group
-043d Lexmark International, Inc.
- 0001 Laser Printer
- 0002 Optra E310 Printer
- 0003 Laser Printer
- 0004 Laser Printer
- 0005 Laser Printer
- 0006 Laser Printer
- 0007 Laser Printer
- 0008 Inkjet Color Printer
- 0009 Optra S2450 Printer
- 000a Laser Printer
- 000b Inkjet Color Printer
- 000c Optra E312 Printer
- 000d Laser Printer
- 000e Laser Printer
- 000f Laser Printer
- 0010 Laser Printer
- 0011 Laser Printer
- 0012 Inkjet Color Printer
- 0013 Inkjet Color Printer
- 0014 InkJet Color Printer
- 0015 InkJet Color Printer
- 0016 Z12 Color Jetprinter
- 0017 Z32 printer
- 0018 Z52 Printer
- 0019 Forms Printer
- 001a Z65 Printer
- 001b InkJet Photo Printer
- 001c Kodak Personal Picture Maker 200 Printer
- 001d InkJet Color Printer
- 001e InkJet Photo Printer
- 001f Kodak Personal Picture Maker 200 Card Reader
- 0020 Z51 Printer
- 0021 Z33 Printer
- 0022 InkJet Color Printer
- 0023 Laser Printer
- 0024 Laser Printer
- 0025 InkJet Color Printer
- 0026 InkJet Color Printer
- 0027 InkJet Color Printer
- 0028 InkJet Color Printer
- 0029 Scan Print Copy
- 002a Scan Print Copy
- 002b Scan Print Copy
- 002c Scan Print Copy
- 002d X70/X73 Scan/Print/Copy
- 002e Scan Print Copy
- 002f Scan Print Copy
- 0030 Scan Print Copy
- 0031 Scan Print Copy
- 0032 Scan Print Copy
- 0033 Scan Print Copy
- 0034 Scan Print Copy
- 0035 Scan Print Copy
- 0036 Scan Print Copy
- 0037 Scan Print Copy
- 0038 Scan Print Copy
- 0039 Scan Print Copy
- 003a Scan Print Copy
- 003b Scan Print Copy
- 003c Scan Print Copy
- 003d X83 Scan/Print/Copy
- 003e Scan Print Copy
- 003f Scan Print Copy
- 0040 Scan Print Copy
- 0041 Scan Print Copy
- 0042 Scan Print Copy
- 0043 Scan Print Copy
- 0044 Scan Print Copy
- 0045 Scan Print Copy
- 0046 Scan Print Copy
- 0047 Scan Print Copy
- 0048 Scan Print Copy
- 0049 Scan Print Copy
- 004a Scan Print Copy
- 004b Scan Print Copy
- 004c Scan Print Copy
- 004d Laser Printer
- 004e Laser Printer
- 004f InkJet Color Printer
- 0050 InkJet Color Printer
- 0051 Laser Printer
- 0052 Laser Printer
- 0053 InkJet Color Printer
- 0054 InkJet Color Printer
- 0057 Z35 Printer
- 0058 Laser Printer
- 005a X63
- 005c InkJet Color Printer
- 0060 X74/X75 Scanner
- 0061 X74 Hub
- 0065 X5130
- 0069 X74/X75 Printer
- 006d X125
- 0072 X6170 Printer
- 0073 InkJet Color Printer
- 0078 InkJet Color Printer
- 0079 InkJet Color Printer
- 007a Generic Hub
- 007b InkJet Color Printer
- 007c Lexmark X1110/X1130/X1140/X1150/X1170/X1180/X1185
- 007d Photo 3150
- 008a 4200 Series
- 008b InkJet Color Printer
- 008c to CF/SM/SD/MS Card Reader
- 008e InkJet Color Printer
- 008f X422
- 0093 X5250
- 0095 E220 Printer
- 0096 2200 Series
- 0097 P6250
- 0098 7100 Series
- 009e P910 Series Human Interface Device
- 009f InkJet Color Printer
- 00a9 IBM Infoprint 1410 MFP
- 00ab InkJet Color Printer
- 00b2 3300 Series
- 00b8 7300 Series
- 00b9 8300 Series
- 00ba InkJet Color Printer
- 00bb 2300 Series
- 00bd Printing Support
- 00be Printing Support
- 00bf Printing Support
- 00c0 6300 Series
- 00c1 4300 Series
- 00c7 Printing Support
- 00c8 Printing Support
- 00c9 Printing Support
- 00cb Printing Support
- 00d0 9300 Series
- 00d3 X340 Scanner
- 00d4 X342n Scanner
- 00d5 Printing Support
- 00d6 X340 Scanner
- 00e8 X642e
- 00e9 2400 Series
- 00f6 3400 Series
- 00f7 InkJet Color Printer
- 00ff InkJet Color Printer
- 010b 2500 Series
- 010d 3500-4500 Series
- 010f 6500 Series
- 4303 Xerox WorkCentre Pro 412
-043e LG Electronics USA, Inc.
- 42bd Flatron 795FT Plus Monitor
- 4a4d Flatron 915FT Plus Monitor
- 7001 MF-PD100 Soul Digital MP3 Player
- 7013 MP3 Player
- 8484 LPC-U30 Webcam II
- 8585 LPC-UC35 Webcam
- 8888 Electronics VCS Camera II(LPC-U20)
- 9800 Remote Control Receiver_iMON
- 9803 eHome Infrared Receiver
- 9804 DMB Receiver Control
- 9c01 LGE Sync
-043f RadiSys Corp.
-0440 Eizo Nanao Corp.
-0441 Winbond Systems Lab.
- 1456 Hub
-0442 Ericsson, Inc.
- abba Bluetooth Device
-0443 Gateway, Inc.
- 000e Multimedia Keyboard
- 002e Millennium Keyboard
-0445 Lucent Technologies, Inc.
-0446 NMB Technologies Corp.
- 6781 Keyboard with PS/2 Mouse Port
- 6782 Keyboard
-0447 Momentum Microsystems
-044a Shamrock Tech. Co., Ltd
-044b WSI
-044c CCL/ITRI
-044d Siemens Nixdorf AG
-044e Alps Electric Co., Ltd
- 1104 Japanese Keyboard
- 2002 MD-5500 Printer
- 2014 Bluetooth Device
- 3001 UGTZ4 Bluetooth
- 3002 Bluetooth Device
- 3003 Bluetooth Device
- 3004 Bluetooth Adapter
- 3005 Integrated Bluetooth Device
- 3006 Bluetooth Adapter
- 3007 GlidePoint PS/2 TouchPad
- 300c Bluetooth Controller (ALPS/UGPZ6)
- 300d Bluetooth Controller (ALPS/UGPZ6)
- ffff Compaq Bluetooth Multiport Module
-044f ThrustMaster, Inc.
- 0400 HOTAS Cougar
- a003 Rage 3D Game Pad
- a01b PK-GP301 Driving Wheel
- a0a0 Top Gun Joystick
- a0a1 Top Gun Joystick (rev2)
- a0a3 Fusion Digital GamePad
- a201 PK-GP201 PlayStick
- b203 360 Modena Pro Wheel
- b300 Firestorm Dual Power
- b304 Firestorm Dual Power
- b307 vibrating Upad
- b603 force feedback Wheel
- b605 force feedback Racing Wheel
- b700 Tacticalboard
-0450 DFI, Inc.
-0451 Texas Instruments, Inc.
- 1234 Bluetooth Device
- 1428 Hub
- 1446 TUSB2040/2070 Hub
- 2036 TUSB2036 Hub
- 2046 TUSB2046 Hub
- 2077 TUSB2077 Hub
- 3410 TUSB3410 Microcontroller
- 3f02 SMC WSKP100 Wi-Fi Phone
- 5409 Frontier Labs NEX IA+ Digital Audio Player
- 6000 AU5 ADSL Modem (pre-reenum)
- 6001 AU5 ADSL Modem
- 6060 RNDIS/BeWAN ADSL2+
- 6070 RNDIS/BeWAN ADSL2+
- 625f Trekstor USB-Stick 12 CS-D 12 GB
- dbc0 Device Bay Controller
- e001 GraphLink
- e004 TI-89 Titanium Calculator
- e008 TI-84 Plus Silver Calculator
- f430 MSP-FET430UIF JTAG Tool
- ffff Bluetooth Device
-0452 Mitsubishi Electronics America, Inc.
- 0021 HID Monitor Controls
- 0050 Diamond Pro 900u CRT Monitor
- 0051 Integrated Hub
-0453 CMD Technology
- 6781 NMB Keyboard
- 6783 Chicony Composite Keyboard
-0454 Vobis Microcomputer AG
-0455 Telematics International, Inc.
-0456 Analog Devices, Inc.
-0457 Silicon Integrated Systems Corp.
- 0150 Super Talent 1GB Flash Drive
- 0151 Super Flash 1GB / GXT 64MB Flash Drive
- 0162 SiS162 usb Wireless LAN Adapter
- 0163 802.11 Wireless LAN Adapter
- 5401 Wireless Adapter RO80211GS-USB
-0458 KYE Systems Corp. (Mouse Systems)
- 0001 Mouse
- 0002 Genius NetMouse Pro
- 0003 Genius NetScroll+
- 0006 Easy Mouse+ USB(USB\Vid_0458&Pid;_0006) Mouse
- 000b NetMouse Wheel(P+U)
- 000c TACOMA Fingerprint V1.06.01
- 000e VideoCAM Web
- 0013 TACOMA Fingerprint Mouse V1.06.01
- 001a Genius WebScroll+
- 0036 Pocket Mouse LE
- 004c Slimstar Pro Keyboard
- 0056 Ergo 300 Mouse
- 0057 Enhanced Gaming Device
- 0059 Enhanced Laser Device
- 005a Enhanced Device
- 005b Enhanced Device
- 005c Enhanced Laser Gaming Device
- 005d Enhanced Device
- 0061 Bluetooth Dongle
- 0083 Bluetooth Dongle
- 0100 EasyPen Tablet
- 0101 CueCat
- 1001 Joystick
- 1002 Game Pad
- 1003 Genius VideoCam
- 1004 Flight2000 F-23 Joystick
- 100a Aashima Technology Trust Sight Fighter Vibration Feedback Joystick
- 2001 ColorPage-Vivid Pro Scanner
- 2004 ColorPage-HR6 V1 Scanner
- 2005 ColorPage-HR6/Vivid3
- 2007 ColorPage-HR6 V2 Scanner
- 2008 ColorPage-HR6 V2 Scanner
- 2009 ColorPage-HR6A Scanner
- 2011 ColorPage-Vivid3x Scanner
- 2012 Plustek Scanner
- 2013 ColorPage-HR7 Scanner
- 2014 ColorPage-Vivid4
- 2015 ColorPage-HR7LE Scanner
- 2016 ColorPage-HR6X Scanner
- 2017 ColorPage-Vivid3xe
- 2018 ColorPage-HR7X
- 2019 ColorPage-HR6X Slim
- 201a ColorPage-Vivid4xe
- 201b ColorPage-Vivid4x
- 201c ColorPage-HR8
- 201d ColorPage-Vivid 1200 X
- 201e ColorPage-Slim 1200
- 201f ColorPage-Vivid 1200 XE
- 2020 ColorPage-Slim 1200 USB2
- 2021 ColorPage-SF600
- 301d Genius MaxFire MiniPad
- 6001 GF3000F Ethernet Adapter
- 7004 VideoCAM Express
- 7007 VideoCAM Web
- 7009 G-Shot G312 Still Camera Device
- 700c VideoCAM Web V3
- 700d G-Shot G511 Composite Device
- 700f VideoCAM Web V4
- 7012 WebCAM USB2.0
- 7014 VideoCAM Live V3
- 701c G-Shot G512 Still Camera
- 7020 Sim 321C
-0459 Adobe Systems, Inc.
-045a SONICblue, Inc.
- 07da Supra Express 56K modem
- 0b4a SupraMax 2890 56K Modem [Lucent Atlas]
- 0b68 SupraMax 56K Modem
- 5001 Rio 600 MP3 Player
- 5002 Rio 800 MP3 Player
- 5003 Nike Psa/Play MP3 Player
- 5005 Rio S10 MP3 Player
- 5006 Rio S50 MP3 Player
- 5007 Rio S35 MP3 Player
- 5008 Rio 900 MP3 Player
- 5009 Rio S30 MP3 Player
- 500d Fuse MP3 Player
- 500e Chiba MP3 Player
- 500f Cali MP3 Player
- 5010 Rio S11 MP3 Player
- 501c Virgin MPF-1000
- 501d Rio Fuse
- 501e Rio Chiba
- 501f Rio Cali
- 503f Cali256 MP3 Player
- 5202 Rio Riot MP3 Player
- 5210 Rio Karma Music Player
- 5220 Rio Nitrus MP3 Player
- 5221 Rio Eigen
-045b Hitachi, Ltd
-045d Nortel Networks, Ltd
-045e Microsoft Corp.
- 0007 SideWinder Game Pad
- 0008 SideWinder Precision Pro
- 0009 IntelliMouse
- 000b Natural Keyboard Elite
- 000e SideWinder® Freestyle Pro
- 0014 Digital Sound System 80
- 001a SideWinder Precision Racing Wheel
- 001b SideWinder Force Feedback 2 Joystick
- 001c Internet Keyboard Pro
- 001d Natural Keyboard Pro
- 001e IntelliMouse Explorer
- 0023 Trackball Optical
- 0024 Trackball Explorer
- 0025 IntelliEye Mouse
- 0026 SideWinder GamePad Pro
- 0027 SideWinder PnP GamePad
- 0028 SideWinder Dual Strike
- 0029 IntelliMouse Optical
- 002b Internet Keyboard Pro
- 002d Internet Keyboard
- 002f Integrated Hub
- 0033 Sidewinder Strategic Commander
- 0034 SideWinder Force Feedback Wheel
- 0038 SideWinder Precision 2
- 0039 IntelliMouse Optical
- 003b SideWinder Game Voice
- 003c SideWinder Joystick
- 0040 Wheel Mouse Optical
- 0047 IntelliMouse Explorer 3.0
- 0048 Office Keyboard 1.0A
- 0053 Optical Mouse
- 0059 Wireless IntelliMouse Explorer
- 005c Office Keyboard (106/109)
- 005f Wireless MultiMedia Keyboard
- 0061 Wireless MultiMedia Keyboard (106/109)
- 0063 Wireless Natural MultiMedia Keyboard
- 0065 Wireless Natural MultiMedia Keyboard (106/109)
- 006a Wireless Optical Mouse (IntelliPoint)
- 006d eHome Remote Control Keyboard keys
- 006e MN510 802.11b Adapter
- 006f Smart Display Reference Device
- 0070 Wireless MultiMedia Keyboard
- 0071 Wireless MultiMedia Keyboard (106/109)
- 0072 Wireless Natural MultiMedia Keyboard
- 0073 Wireless Natural MultiMedia Keyboard (106/109)
- 007a 10/100 USB NIC
- 007d Notebook Optical Mouse
- 007e Wireless Transceiver for Bluetooth
- 0080 Digital Media Pro Keyboard
- 0083 Basic Optical Mouse
- 0084 Basic Optical Mouse
- 008a Wireless Keyboard and Mouse
- 008b Dual Receiver Wireless Mouse (IntelliPoint)
- 008c Wireless Intellimouse Explorer 2.0
- 0095 IntelliMouse Explorer 4.0 (IntelliPoint)
- 009c Wireless Transceiver for Bluetooth 2.0
- 00a0 eHome Infrared Receiver
- 00b0 Digital Media Pro Keyboard
- 00b9 Wireless Optical Mouse 3.0
- 00bb Fingerprint Reader
- 00bc Fingerprint Reader
- 00bd Fingerprint Reader
- 00c2 Wireless Adapter MN-710
- 00c9 MTP Device
- 00ce Generic PPC Flash device
- 00d1 Optical Mouse with Tilt Wheel
- 00da eHome Infrared Receiver
- 00db Natural Ergonomic Keyboard 4000 V1.0
- 00e1 Wireless Laser Mouse 6000 Reciever
- 00f4 LifeCam VX-6000.
- 00f5 LifeCam VX-3000.
- 00f7 LifeCam VX-1000.
- 00f8 LifeCam NX-6000.
- 0202 Xbox Controller
- 0280 XBox Device
- 0284 Xbox DVD Playback Kit
- 0285 Xbox Controller S
- 0288 Xbox Controller S Hub
- 0289 Xbox Controller S
- 028b Xbox360 DVD Emulator
- 028d Xbox360 Memory Unit 64MB
- 028e Xbox360 Controller
- 028f Xbox360 Wireless Controller
- 0290 Xbox360 Performance Pipe (PIX)
- 0292 Xbox360 Wireless Networking Adapter
- 029c Xbox360 HD-DVD Drive
- 029d Xbox360 HD-DVD Drive
- 029e Xbox360 HD-DVD Memory Unit
- 0400 Windows Powered Pocket PC 2002
- 0401 Windows Powered Pocket PC 2002
- 0402 Windows Powered Pocket PC 2002
- 0403 Windows Powered Pocket PC 2002
- 0404 Windows Powered Pocket PC 2002
- 0405 Windows Powered Pocket PC 2002
- 0406 Windows Powered Pocket PC 2002
- 0407 Windows Powered Pocket PC 2002
- 0408 Windows Powered Pocket PC 2002
- 0409 Windows Powered Pocket PC 2002
- 040a Windows Powered Pocket PC 2002
- 040b Windows Powered Pocket PC 2002
- 040c Windows Powered Pocket PC 2002
- 040d Windows Powered Pocket PC 2002
- 040e Windows Powered Pocket PC 2002
- 040f Windows Powered Pocket PC 2002
- 0410 Windows Powered Pocket PC 2002
- 0411 Windows Powered Pocket PC 2002
- 0412 Windows Powered Pocket PC 2002
- 0413 Windows Powered Pocket PC 2002
- 0414 Windows Powered Pocket PC 2002
- 0415 Windows Powered Pocket PC 2002
- 0416 Windows Powered Pocket PC 2002
- 0417 Windows Powered Pocket PC 2002
- 0432 Windows Powered Pocket PC 2003
- 0433 Windows Powered Pocket PC 2003
- 0434 Windows Powered Pocket PC 2003
- 0435 Windows Powered Pocket PC 2003
- 0436 Windows Powered Pocket PC 2003
- 0437 Windows Powered Pocket PC 2003
- 0438 Windows Powered Pocket PC 2003
- 0439 Windows Powered Pocket PC 2003
- 043a Windows Powered Pocket PC 2003
- 043b Windows Powered Pocket PC 2003
- 043c Windows Powered Pocket PC 2003
- 043d Becker Traffic Assist Highspeed 7934
- 043e Windows Powered Pocket PC 2003
- 043f Windows Powered Pocket PC 2003
- 0440 Windows Powered Pocket PC 2003
- 0441 Windows Powered Pocket PC 2003
- 0442 Windows Powered Pocket PC 2003
- 0443 Windows Powered Pocket PC 2003
- 0444 Windows Powered Pocket PC 2003
- 0445 Windows Powered Pocket PC 2003
- 0446 Windows Powered Pocket PC 2003
- 0447 Windows Powered Pocket PC 2003
- 0448 Windows Powered Pocket PC 2003
- 0449 Windows Powered Pocket PC 2003
- 044a Windows Powered Pocket PC 2003
- 044b Windows Powered Pocket PC 2003
- 044c Windows Powered Pocket PC 2003
- 044d Windows Powered Pocket PC 2003
- 044e Windows Powered Pocket PC 2003
- 044f Windows Powered Pocket PC 2003
- 0450 Windows Powered Pocket PC 2003
- 0451 Windows Powered Pocket PC 2003
- 0452 Windows Powered Pocket PC 2003
- 0453 Windows Powered Pocket PC 2003
- 0454 Windows Powered Pocket PC 2003
- 0455 Windows Powered Pocket PC 2003
- 0456 Windows Powered Pocket PC 2003
- 0457 Windows Powered Pocket PC 2003
- 0458 Windows Powered Pocket PC 2003
- 0459 Windows Powered Pocket PC 2003
- 045a Windows Powered Pocket PC 2003
- 045b Windows Powered Pocket PC 2003
- 045c Windows Powered Pocket PC 2003
- 045d Windows Powered Pocket PC 2003
- 045e Windows Powered Pocket PC 2003
- 045f Windows Powered Pocket PC 2003
- 0460 Windows Powered Pocket PC 2003
- 0461 Windows Powered Pocket PC 2003
- 0462 Windows Powered Pocket PC 2003
- 0463 Windows Powered Pocket PC 2003
- 0464 Windows Powered Pocket PC 2003
- 0465 Windows Powered Pocket PC 2003
- 0466 Windows Powered Pocket PC 2003
- 0467 Windows Powered Pocket PC 2003
- 0468 Windows Powered Pocket PC 2003
- 0469 Windows Powered Pocket PC 2003
- 046a Windows Powered Pocket PC 2003
- 046b Windows Powered Pocket PC 2003
- 046c Windows Powered Pocket PC 2003
- 046d Windows Powered Pocket PC 2003
- 046e Windows Powered Pocket PC 2003
- 046f Windows Powered Pocket PC 2003
- 0470 Windows Powered Pocket PC 2003
- 0471 Windows Powered Pocket PC 2003
- 0472 Windows Powered Pocket PC 2003
- 0473 Windows Powered Pocket PC 2003
- 0474 Windows Powered Pocket PC 2003
- 0475 Windows Powered Pocket PC 2003
- 0476 Windows Powered Pocket PC 2003
- 0477 Windows Powered Pocket PC 2003
- 0478 Windows Powered Pocket PC 2003
- 0479 Windows Powered Pocket PC 2003
- 047a Windows Powered Pocket PC 2003
- 047b Windows Powered Pocket PC 2003
- 04c8 Windows Powered Smartphone 2002
- 04c9 Windows Powered Smartphone 2002
- 04ca Windows Powered Smartphone 2002
- 04cb Windows Powered Smartphone 2002
- 04cc Windows Powered Smartphone 2002
- 04cd Windows Powered Smartphone 2002
- 04ce Windows Powered Smartphone 2002
- 04d7 Windows Powered Smartphone 2003
- 04d8 Windows Powered Smartphone 2003
- 04d9 Windows Powered Smartphone 2003
- 04da Windows Powered Smartphone 2003
- 04db Windows Powered Smartphone 2003
- 04dc Windows Powered Smartphone 2003
- 04dd Windows Powered Smartphone 2003
- 04de Windows Powered Smartphone 2003
- 04df Windows Powered Smartphone 2003
- 04e0 Windows Powered Smartphone 2003
- 04e1 Windows Powered Smartphone 2003
- 04e2 Windows Powered Smartphone 2003
- 04e3 Windows Powered Smartphone 2003
- 04e4 Windows Powered Smartphone 2003
- 04e5 Windows Powered Smartphone 2003
- 04e6 Windows Powered Smartphone 2003
- 04e7 Windows Powered Smartphone 2003
- 04e8 Windows Powered Smartphone 2003
- 04e9 Windows Powered Smartphone 2003
- 04ea Windows Powered Smartphone 2003
- 0708 Transceiver v 3.0 for Bluetooth
- 070a Charon Bluetooth Dongle (DFU)
- 930a ISOUSB.SYS Intel 82930 Isochronous IO Test Board
- fff8 Keyboard
-0460 Ace Cad Enterprise Co., Ltd
-0461 Primax Electronics, Ltd
- 0300 G2-300 Scanner
- 0301 G2E-300 Scanner
- 0302 G2-300 #2 Scanner
- 0303 G2E-300 #2 Scanner
- 0340 Colorado 9600 Scanner
- 0341 Colorado 600u Scanner
- 0345 Visioneer 6200 Scanner
- 0346 Memorex Maxx 6136u Scanner
- 0347 Primascan Colorado 2600u/Visioneer 4400 Scanner
- 0360 Colorado 19200 Scanner
- 0361 Colorado 1200u Scanner
- 0363 VistaScan Astra 3600(ENG)
- 0364 LG Electronics Scanworks 600U Scanner
- 0365 VistaScan Astra 3600(ENG)
- 0366 6400
- 0367 VistaScan Astra 3600(ENG)
- 0371 Visioneer Onetouch 8920 Scanner
- 0374 UMAX Astra 2500
- 0375 VistaScan Astra 3600(ENG)
- 0377 Medion MD 5345 Scanner
- 0378 VistaScan Astra 3600(ENG)
- 037b Medion MD 6190 Scanner
- 037c VistaScan Astra 3600(ENG)
- 0380 G2-600 Scanner
- 0381 ReadyScan 636i Scanner
- 0382 G2-600 #2 Scanner
- 0383 G2E-600 Scanner
- 038a UMAX Astra 3000/3600
- 038b Xerox 2400 Onetouch
- 038c UMAX Astra 4100
- 0392 Medion/Lifetec/Tevion/Cytron MD 6190
- 03a8 9420M
- 0813 IBM UltraPort Camera
- 0815 Micro Innovations WebCam
- 0819 Fujifilm IX-30 Camera [webcam mode]
- 081a Fujifilm IX-30 Camera [storage mode]
- 081c Elitegroup ECS-C11 Camera
- 081d Elitegroup ECS-C11 Storage
- 0a00 Web Cam 320
- 4d01 Comfort Keyboard
- 4d02 Mouse-in-a-Box
- 4d03 Kensington Mouse-in-a-box
- 4d04 Mouse
- 4d06 Balless Mouse (HID)
- 4d2a PoPo Elixir Mouse (HID)
- 4d2b Wireless Laser Mini Mouse (HID)
- 4d2c PoPo Mini Pointer Mouse (HID)
- 4d2e Optical Mobile Mouse (HID)
-0463 MGE UPS Systems
- 0001 UPS
- ffff UPS
-0464 AMP/Tycoelectronics Corp.
-0467 AT&T Paradyne
-0468 Wieson Technologies Co., Ltd
-046a Cherry GmbH
- 0001 My3000 Keyboard
- 0003 My3000 Hub
- 0004 CyBoard Keyboard
- 0005 XX33 SmartCard Reader Keyboard
- 0010 SmartBoard XX44
- 0023 Cymotion Master Linux Keyboard
- 002d SmartTerminal XX44
- 003e SmartTerminal ST-2xxx
-046b American Megatrends, Inc.
- 0001 Keyboard
- 0101 PS/2 Keyboard, Mouse & Joystick Ports
- 0301 USB 1.0 Hub
- 0500 Serial & Parallel Ports
-046c Toshiba Corp., Digital Media Equipment
-046d Logitech, Inc.
- 0082 Acer Aspire 5672 Webcam
- 0200 WingMan Extreme Joystick
- 0203 M2452 Keyboard
- 0301 M4848 Mouse
- 0401 HP PageScan
- 0402 NEC PageScan
- 040f Logitech/Storm PageScan
- 0430 Mic (Cordless)
- 0801 QuickCam Home
- 0810 QuickCam Pro
- 0820 QuickCam VC
- 0830 QuickClip
- 0840 QuickCam Express
- 0850 QuickCam Web
- 0870 QuickCam Express
- 0890 QuickCam Traveler
- 0892 OrbiCam
- 0894 CrystalCam
- 0895 QuickCam for Dell Notebooks
- 0896 OrbiCam
- 0897 QuickCam for Dell Notebooks
- 0899 QuickCam for Dell Notebooks
- 08a0 QuickCam IM
- 08a1 QuickCam IM with sound
- 08a2 Labtec WebCam Pro
- 08a3 QuickCam QuickCam Chat
- 08a6 QuickCam IM
- 08a7 QuickCam Image
- 08a9 Notebook Deluxe
- 08aa Labtec Notebooks
- 08ac QuickCam Cool
- 08ad QuickCam Communicate STX
- 08ae Quickcam for Notebooks
- 08af QuickCam Easy/Cool
- 08b0 QuickCam 3000 Pro [pwc]
- 08b1 QuickCam Notebook Pro
- 08b2 QuickCam Pro 4000
- 08b3 QuickCam Zoom
- 08b4 QuickCam Zoom
- 08b5 QuickCam Sphere
- 08b9 QuickCam IM
- 08bd Microphone (Pro 4000)
- 08c0 QuickCam Pro 3000
- 08c1 QuickCam Fusion
- 08c2 QuickCam PTZ
- 08c3 Camera (Notebooks Pro)
- 08c5 QuickCam Pro 5000
- 08c6 QuickCam for DELL Notebooks
- 08c9 QuickCam Ultra Vision
- 08ca Mic (Fusion)
- 08cb Mic (Notebooks Pro)
- 08cc Mic (PTZ)
- 08ce QuickCam Pro 5000
- 08cf QuickCam UpdateMe
- 08d0 QuickCam Express
- 08d7 QuickCam Communicate STX
- 08d8 QuickCam for Notebook Deluxe
- 08d9 QuickCam IM/Connect
- 08da QuickCam Messanger
- 08dd QuickCam for Notebooks
- 08e0 QuickCam Express
- 08e1 Labtec WebCam
- 08f0 QuickCam Messenger
- 08f1 QuickCam Express
- 08f2 Microphone (Messenger)
- 08f3 QuickCam Express
- 08f4 Labtec WebCam
- 08f5 QuickCam Messenger Communicate
- 08f6 Quickcam Messenger Plus
- 0900 ClickSmart 310
- 0901 ClickSmart 510
- 0903 ClickSmart 820
- 0905 ClickSmart 820
- 0910 QuickCam Cordless
- 0920 QuickCam Express
- 0921 Labtec WebCam
- 0922 QuickCam Live
- 0928 Quickcam Express
- 0929 Labtec WebCam Pro
- 092a QuickCam for Notebooks
- 092b Labtec WebCam Plus
- 092c QuickCam Chat
- 092d QuickCam Express / Go
- 092e QuickCam Chat
- 092f QuickCam Express Plus
- 0950 Pocket Camera
- 0960 ClickSmart 420
- 0970 Pocket750
- 0990 QuickCam Pro 9000
- 0991 QuickCam Pro for Notebooks
- 0992 QuickCam Communicate Deluxe
- 0994 QuickCam Orbit/Sphere AF
- 09b0 OrbiCam
- 09c0 QuickCam for Dell Notebooks Mic
- 09c1 QuickCam Deluxe for Notebooks
- 0a01 USB Headset
- 0a02 Premium Stereo USB Headset 350
- 0a03 Logitech USB Microphone
- 0a04 V20 portable speakers (USB powered)
- 0b02 BT Mini-Receiver (HID proxy mode)
- 8801 Video Camera
- b305 BT Mini-Receiver
- bfe4 Premium Optical Wheel Mouse
- c000 N43 [Pilot Mouse]
- c001 N48/M-BB48 [FirstMouse Plus]
- c002 M-BA47 [MouseMan Plus]
- c003 MouseMan
- c004 WingMan Gaming Mouse
- c005 WingMan Gaming Wheel Mouse
- c00b MouseMan Wheel
- c00c Optical Wheel Mouse
- c00d MouseMan Wheel+
- c00e M-BJ58/M-BJ69 Optical Wheel Mouse
- c00f MouseMan Traveler/Mobile
- c011 Optical MouseMan
- c012 Mouseman Dual Optical
- c014 Corded Workstation Mouse
- c015 Corded Workstation Mouse
- c016 M-UV69a/HP M-UV96 Optical Wheel Mouse
- c018 Optical Wheel Mouse
- c019 Optical Tilt Wheel Mouse
- c01a M-BQ85 Optical Wheel Mouse
- c01b MX310 Optical Mouse
- c01c Optical Mouse
- c01d MX510 Optical Mouse
- c01e MX518 Optical Mouse
- c024 MX300 Optical Mouse
- c025 MX500 Optical Mouse
- c030 iFeel Mouse
- c031 iFeel Mouse+
- c032 MouseMan iFeel
- c033 iFeel MouseMan+
- c034 MouseMan Optical
- c035 Mouse
- c036 Mouse
- c037 Mouse
- c038 Mouse
- c03d M-BT69a Pilot Optical Mouse
- c03e Premium Optical Wheel Mouse
- c03f UltraX Optical Mouse
- c040 Corded Tilt-Wheel Mouse
- c043 MX320 Laser Mouse
- c044 LX3 Optical Mouse
- c045 Optical Mouse
- c046 RX1000 Laser Mouse
- c047 Laser Mouse
- c049 G5 Laser Mouse
- c050 RX 250 Optical Mouse
- c051 G3 (MX518) Optical Mouse
- c053 Laser Mouse
- c101 UltraX Media Remote
- c201 WingMan Extreme Joystick with Throttle
- c202 WingMan Formula
- c207 WingMan Extreme Digital 3D
- c208 WingMan Gamepad Extreme
- c209 WingMan Gamepad
- c20a WingMan RumblePad
- c20b WingMan Action Pad
- c20c WingMan Precision
- c20d WingMan Attack 2
- c20e WingMan Formula GP
- c211 iTouch Cordless Reciever
- c212 WingMan Extreme Digital 3D
- c213 J-UH16 (Freedom 2.4 Cordless Joystick)
- c214 ATK3 (Attack III Joystick)
- c215 Extreme 3D Pro
- c216 Dual Action Gamepad
- c218 Logitech RumblePad 2 USB
- c219 Cordless RumblePad 2
- c21a Precision Gamepad
- c221 G15 Keyboard / Keyboard
- c222 G15 Keyboard / LCD
- c223 G15 Keyboard / USB Hub
- c281 WingMan Force
- c283 WingMan Force 3D
- c285 WingMan Strike Force 3D
- c286 Force 3D Pro
- c291 WingMan Formula Force
- c293 WingMan Formula Force GP
- c294 Driving Force
- c295 Momo Force Steering Wheel
- c298 Driving Force Pro
- c2a0 Wingman Force Feedback Mouse
- c2a1 WingMan Force Feedback Mouse
- c301 iTouch Keyboard
- c302 iTouch Pro Keyboard
- c303 iTouch Keyboard
- c305 Internet Keyboard
- c307 Internet Keyboard
- c308 Internet Navigator Keyboard
- c309 Internet Keyboard
- c30a iTouch Composite
- c30c Internet Keys (X)
- c30d Internet Keys
- c30e UltraX Keys (X)
- c30f Logicool HID-Compliant Keyboard (106 key)
- c315 Classic New Touch Keyboard
- c316 HID-Compliant Keyboard
- c401 TrackMan Marble Wheel
- c402 Marble Mouse (2-button)
- c403 Turbo TrackMan Marble FX
- c404 TrackMan Wheel
- c408 Marble Mouse (4-button)
- c501 Cordless Mouse Receiver
- c502 Cordless Mouse & iTouch Keys
- c503 Cordless Mouse+Keyboard Receiver
- c504 Cordless Mouse+Keyboard Receiver
- c505 Cordless Mouse+Keyboard Receiver
- c506 MX-700 Cordless Mouse Receiver
- c508 Cordless Trackball
- c509 Cordless Keyboard
- c50a Cordless Mouse
- c50b Cordless Desktop Optical
- c50d Cordless Mouse
- c50e MX-1000 Cordless Mouse Receiver
- c510 Cordless Mouse
- c512 LX-700 Cordless Desktop Receiver
- c513 MX3000 Cordless Desktop Receiver
- c514 Cordless Mouse
- c517 LX710 Cordless Desktop Laser
- c518 MX610 Laser Cordless Mouse
- c51a MX Revolution/G7 Cordless Mouse
- c521 MX620 Laser Cordless Mouse
- c625 3Dconnexion Space Pilot 3D Mouse
- c626 3DConnexion Space Navigator 3D Mouse
- c627 3DConnexion Space Explorer 3D Mouse
- c702 Cordless Presenter
- c703 Elite Keyboard Y-RP20 + Mouse MX900 (Bluetooth)
- c707 Bluetooth wireless hub
- c708 Bluetooth wireless hub
- c709 BT Mini-Receiver (HCI mode)
- c70a MX5000 Cordless Desktop
- c70b BT Mini-Receiver (HID proxy mode)
- c70c BT Mini-Receiver (HID proxy mode)
- c70d Bluetooth wireless hub
- c70e MX1000 Bluetooth Laser Mouse
- c70f Bluetooth wireless hub
- c712 Bluetooth wireless hub
- c715 Bluetooth wireless hub
- c71a Bluetooth wireless hub
- c71d Bluetooth wireless hub
- c720 Bluetooth wireless hub
- ca03 MOMO Racing
- ca04 Formula Vibration Feedback Wheel
- d001 QuickCam Pro
-046e Behavior Tech. Computer Corp.
- 0100 Keyboard
- 3001 Mass Storage Device
- 3002 Mass Storage Device
- 3003 Mass Storage Device
- 3005 Mass Storage Device
- 3008 Mass Storage Device
- 5250 KeyMaestro Multimedia Keyboard
- 5273 KeyMaestro Multimedia Keyboard
- 5308 KeyMaestro Keyboard
- 5408 KeyMaestro Multimedia Keyboard/Hub
- 5720 Smart Card Reader
- 6782 BTC 7932 mouse+keyboard
-046f Crystal Semiconductor
-0471 Philips
- 0101 DSS350 Digital Speaker System
- 0104 DSS330 Digital Speaker System [uda1321]
- 0105 UDA1321
- 0160 MP3 Player
- 0161 MP3 Player
- 0201 Hub
- 0222 Creative Nomad Jukebox
- 0302 PCA645VC WebCam [pwc]
- 0303 PCA646VC WebCam [pwc]
- 0304 Askey VC010 WebCam [pwc]
- 0307 PCVC675K WebCam [pwc]
- 0308 PCVC680K WebCam [pwc]
- 030b PC VGA Camera (Vesta Fun)
- 030c PCVC690K WebCam [pwc]
- 0310 PCVC730K WebCam [pwc]
- 0311 PCVC740K ToUcam Pro [pwc]
- 0312 PCVC750K WebCam [pwc]
- 0314 DMVC 1000K
- 0316 DMVC 2000K Video Capture
- 0321 FunCam
- 0325 SPC 200NC PC Camera
- 0326 SPC 300NC PC Camera
- 0327 WebCam SPC 6000 NC (WebCam w/ mic)
- 0329 ORITE CCD Webcam(PC370R)
- 0401 Semiconductors CICT Keyboard
- 0402 PS/2 Mouse on Semiconductors CICT Keyboard
- 0406 15 inch Detachable Monitor
- 0407 10 inch Mobile Monitor
- 0471 Digital Speaker System
- 0601 OVU1020 IR Dongle (Kbd+Mouse)
- 0602 ATI Remote Wonder II Input Device
- 0603 ATI Remote Wonder II Controller
- 0608 eHome Infrared Receiver
- 060a TSU9600 Remote Control
- 060e RF Dongle
- 0619 TSU9400 Remote Control
- 0700 Semiconductors CICT Hub
- 0701 150P1 TFT Display
- 0809 AVNET Bluetooth Device
- 0811 JR24 CDRW
- 0815 eHome Infrared Receiver
- 1120 Creative Rhomba MP3 player
- 1125 Nike psa[128max Player
- 1137 HDD065 MP3 player
- 1201 Arima Bluetooth Device
- 1230 Wireless Adapter 11g
- 1232 SNU6500 Wireless Adapter
- 1233 Wireless Adapter Bootloader Download
- 1236 SNU5600
- 1237 TalkTalk SNU5630NS/05 Wireless Adapter
- 1552 ISP 1581 Hi-Speed USB MPEG2 Encoder Reference Kit
- 1801 Diva MP3 player
- 200a Wireless Network Adapter
- 200f 802.11n Wireless Adapter
- 485d Senselock SenseIV v2.x
-0472 Chicony Electronics Co., Ltd
- 0065 PFU-65 Keyboard
-0473 Sanyo Information Business Co., Ltd
-0474 Sanyo Electric Co., Ltd
- 0110 Digital Voice Recorder R200
- 0217 Xacti J2
- 022f C5 Digital Media Camera (mass storage mode)
- 0230 C5 Digital Media Camera (PictBridge mode)
- 0231 C5 Digital Media Camera (PC control mode)
- 0401 Optical Drive
- 0701 SCP-4900 Cellphone
- 071f Usb Com Port Enumerator
-0475 Relisys/Teco Information System
- 0100 NEC Petiscan
- 0103 Eclipse 1200U/Episode
- 0210 Scorpio Ultra 3
-0476 AESP
-0477 Seagate Technology, Inc.
-0478 Connectix Corp.
- 0001 QuickCam
- 0002 QuickClip
- 0003 QuickCam Pro
-0479 Advanced Peripheral Laboratories
-047a Semtech Corp.
- 0004 ScreenCoder UR7HCTS2-USB
-047b Silitek Corp.
- 0001 Keyboard
- 0002 Keyboard and Mouse
- 00f9 SK-1789u Keyboard
- 0101 BlueTooth Keyboard and Mouse
- 020b SK-3105 SmartCard Reader
- 050e Internet Compact Keyboard
- 1000 Trust Office Scan USB 19200
- 1002 HP ScanJet 4300c Parallel Port
-047c Dell Computer Corp.
-047d Kensington
- 1001 Mouse*in*a*Box
- 1002 Expert Mouse Pro
- 1003 Orbit TrackBall
- 1004 MouseWorks
- 1005 TurboBall
- 1006 TurboRing
- 1009 Orbit TrackBall for Mac
- 1012 PocketMouse
- 1013 Mouse*in*a*Box Optical Pro
- 1014 Expert Mouse Pro Wireless
- 1015 Expert Mouse
- 1016 ADB/USB Orbit
- 1018 Studio Mouse
- 101d Mouse*in*a*Box Optical Pro
- 101e Studio Mouse Wireless
- 101f PocketMouse Pro
- 1020 Expert Mouse Trackball
- 1021 Expert Mouse Wireless
- 1022 Orbit Optical
- 1023 Pocket Mouse Pro Wireless
- 1024 PocketMouse
- 1025 Mouse*in*a*Box Optical Elite Wireless
- 1026 Pocket Mouse Pro
- 1027 StudioMouse
- 1028 StudioMouse Wireless
- 1029 Mouse*in*a*Box Optical Elite
- 102a Mouse*in*a*Box Optical
- 102b PocketMouse
- 102c Iridio
- 102d Pilot Optical
- 102e Pilot Optical Pro
- 102f Pilot Optical Pro Wireless
- 104a PilotMouse Mini Retractable
- 105d PocketMouse Bluetooth
- 105e Bluetooth EDR Dongle
- 1061 PocketMouse Grip
- 1062 PocketMouse Max
- 1063 PocketMouse Max Wireless
- 1064 PocketMouse 2.0 Wireless
- 1065 PocketMouse 2.0
- 1066 PocketMouse Max Glow
- 1067 ValueMouse
- 1068 ValueOpt White
- 1069 ValueOpt Black
- 106a PilotMouse Laser Wireless Mini
- 106b PilotMouse Laser - 3 Button
- 106c PilotMouse Laser - Gaming
- 106d PilotMouse Laser - Wired
- 106e PilotMouse Micro Laser
- 1070 ValueOpt Travel
- 1071 ValueOpt RF TX
- 1072 PocketMouse Colour
- 1073 PilotMouse Laser - 6 Button
- 1074 PilotMouse Laser Wireless Mini
- 1075 SlimBlade Presenter Media Mouse
- 1076 SlimBlade Media Mouse
- 1077 SlimBlade Presenter Mouse
- 1152 Bluetooth EDR Dongle
- 2002 Optical Elite Wireless
- 2010 Wireless Presentation Remote
- 2021 PilotBoard Wireless
- 2030 PilotBoard Wireless
- 2034 SlimBlade Media Notebook Set
- 4003 Gravis Xterminator Digital Gamepad
- 4005 Gravis Eliminator GamePad Pro
- 4006 Gravis Eliminator AfterShock
- 4007 Gravis Xterminator Force
- 4008 Gravis Destroyer TiltPad
- 5001 Cabo I Camera
- 5002 VideoCam CABO II
- 5003 VideoCam
-047e Agere Systems, Inc. (Lucent)
- 0300 ORiNOCO Card
- 1001 USS720 Parallel Port
- 2892 Systems Soft Modem
- bad1 Lucent 56k Modem
- f101 Atlas Modem
-047f Plantronics, Inc.
- 0101 Bulk Driver
- 0301 Bulk Driver
- 0ca1 USB DSP v4 Audio Interface
-0480 Toshiba America Info. Systems, Inc.
- 0001 InTouch Module
- 0004 InTouch Module
- 0011 InTouch Module
- 0014 InTouch Module
-0481 Zenith Data Systems
-0482 Kyocera Corp.
- 000e FS-1020D Printer
- 0100 Finecam S3x
- 0101 Finecam S4
- 0103 Finecam S5
- 0105 Finecam L3
- 0106 Finecam
- 0107 Digital Camera Device
- 0108 Digital Camera Device
- 0203 AH-K3001V
- 0204 iBurst Terminal
-0483 SGS Thomson Microelectronics
- 0137 BeWAN ADSL USB ST (blue or green)
- 1307 Cytronix 6in1 card reader
- 163d Cool Icam Digi-MP3
- 2015 TouchChip® Fingerprint Reader
- 2016 Fingerprint Reader
- 2017 Biometric Smart Card Reader
- 2018 BioSimKey
- 2302 Portable Flash Device (PFD)
- 4810 ISDN adapter
- 481d BT Digital Access adapter
- 5000 ST Micro Bluetooth Device
- 5001 ST Micro Bluetooth Device
- 7270 ST Micro Serial Bridge
- 7554 56k SoftModem
- ff10 Swann ST56 Modem
-0484 Specialix
-0485 Nokia Monitors
-0486 ASUS Computers, Inc.
-0487 Stewart Connector
-0488 Cirque Corp.
-0489 Foxconn / Hon Hai
- 0502 SmartMedia Card Reader Firmware Loader
- 0503 SmartMedia Card Reader
-048a S-MOS Systems, Inc.
-048c Alps Electric Ireland, Ltd
-048d Integrated Technology Express, Inc.
-048f Eicon Tech.
-0490 United Microelectronics Corp.
-0491 Capetronic
- 0003 Taxan Monitor Control
-0492 Samsung SemiConductor, Inc.
-0493 MAG Technology Co., Ltd
-0495 ESS Technology, Inc.
-0496 Micron Electronics
-0497 Smile International
-0498 Capetronic (Kaohsiung) Corp.
-0499 Yamaha Corp.
- 1000 UX256 MIDI I/F
- 1001 MU1000
- 1002 MU2000
- 1003 MU500
- 1004 UW500
- 1005 MOTIF6
- 1006 MOTIF7
- 1007 MOTIF8
- 1008 UX96 MIDI I/F
- 1009 UX16 MIDI I/F
- 100a EOS BX
- 100c UC-MX
- 100d UC-KX
- 100e S08
- 100f CLP-150
- 1010 CLP-170
- 1011 P-250
- 1012 TYROS
- 1013 PF-500
- 1014 S90
- 1015 MOTIF-R
- 1016 MDP-5
- 1017 CVP-204
- 1018 CVP-206
- 1019 CVP-208
- 101a CVP-210
- 101b PSR-1100
- 101c PSR-2100
- 101d CLP-175
- 101e PSR-K1
- 101f EZ-J24
- 1020 EZ-250i
- 1021 MOTIF ES 6
- 1022 MOTIF ES 7
- 1023 MOTIF ES 8
- 1024 CVP-301
- 1025 CVP-303
- 1026 CVP-305
- 1027 CVP-307
- 1028 CVP-309
- 1029 CVP-309GP
- 102a PSR-1500
- 102b PSR-3000
- 102e ELS-01/01C
- 1030 PSR-295/293
- 1031 DGX-205/203
- 1032 DGX-305
- 1033 DGX-505
- 2000 DGP-7
- 2001 DGP-5
- 3001 YST-MS55D USB Speaker
- 4000 NetVolante RTA54i Broadband&ISDN Router
- 4001 NetVolante RTW65b Broadband Wireless Router
- 4002 NetVolante RTW65i Broadband&ISDN Wireless Router
- 4004 NetVolante RTA55i Broadband VoIP Router
- 5000 CS1D
- 5001 DSP1D
- 5002 DME32
- 5003 DM2000
- 5004 02R96
- 5005 ACU16-C
- 5006 NHB32-C
- 5007 DM1000
- 5008 01V96
- 5009 SPX2000
- 500a PM5D
- 500b DME64N
- 500c DME24N
- 6001 CRW2200UX Lightspeed 2 External CD-RW Drive
- 7000 DTX
- 7010 UB99
-049a Gandalf Technologies, Ltd
-049b Curtis Computer Products
-049c Acer Advanced Labs, Inc.
- 0002 Keyboard (???)
-049d VLSI Technology
-049f Compaq Computer Corp.
- 0002 InkJet Color Printer
- 0003 iPAQ PocketPC
- 000e Internet Keyboard
- 0012 InkJet Color Printer
- 0018 PA-1/PA-2 MP3 Player
- 0019 InkJet Color Printer
- 001a S4 100 Scanner
- 001e IJ650 Inkjet Printer
- 001f WL215 Adapter
- 0021 S200 Scanner
- 0027 Bluetooth Multiport Module by Compaq
- 002a 1400P Inkjet Printer
- 002b A3000
- 002c Lexmark X125
- 0032 802.11b Adapter [ipaq h5400]
- 0033 802.11b Adapter [orinoco]
- 0036 Bluetooth Multiport Module
- 0051 KU-0133 Easy Access Interner Keyboard
- 0076 Wireless LAN MultiPort W200
- 0080 GPRS Multiport
- 0086 Bluetooth Device
- 504a Personal Jukebox PJB100
- 505a Linux-USB "CDC Subset" Device, or Itsy (experimental)
- 8511 iPAQ Networking 10/100 Ethernet [pegasus2]
-04a0 Digital Equipment Corp.
-04a1 SystemSoft Corp.
- fff0 Telex Composite Device
-04a2 FirePower Systems
-04a3 Trident Microsystems, Inc.
-04a4 Hitachi, Ltd
- 0004 DVD-CAM DZ-MV100A Camcorder
- 001e DVDCAM USB HS Interface
-04a5 Acer Peripherals Inc. (now BenQ Corp.)
- 0001 Keyboard
- 0002 API Ergo K/B
- 0003 API Generic K/B Mouse
- 12a6 AcerScan C310U
- 1a20 Prisa 310U
- 1a2a Prisa 620U
- 2022 Prisa 320U/340U
- 2040 Prisa 620UT
- 205e ScanPrisa 640BU
- 2060 Prisa 620U+/640U
- 207e Prisa 640BU
- 209e ScanPrisa 640BT
- 20ae S2W 3000U
- 20b0 S2W 3300U/4300U
- 20be Prisa 640BT
- 20c0 Prisa 1240UT
- 20de S2W 4300U+
- 20f8 Benq 5000
- 20fc Benq 5000
- 20fe SW2 5300U
- 2137 Benq 5150/5250
- 2202 Benq 7400UT
- 3003 Benq WebCam
- 3008 Benq 1500
- 300a Benq 3410
- 300c Benq 1016
- 3019 Benq DC C40
- 4000 P30 Composite Device
- 6001 Mass Storage Device
- 6002 Mass Storage Device
- 6003 ATA/ATAPI Adapter
- 6004 Mass Storage Device
- 6005 Mass Storage Device
- 6006 Mass Storage Device
- 6007 Mass Storage Device
- 6008 Mass Storage Device
- 6009 Mass Storage Device
- 600a Mass Storage Device
- 600b Mass Storage Device
- 600c Mass Storage Device
- 600d Mass Storage Device
- 600e Mass Storage Device
- 600f Mass Storage Device
- 6010 Mass Storage Device
- 6011 Mass Storage Device
- 6012 Mass Storage Device
- 6013 Mass Storage Device
- 6014 Mass Storage Device
- 6015 Mass Storage Device
- 6125 MP3 Player
- 6180 MP3 Player
- 6200 MP3 Player
- 7500 Hi-Speed Mass Storage Device
- 9000 AWL300 Wireless Adapter
- 9001 AWL400 Wireless Adapter
- 9213 Kbd Hub
-04a6 Nokia Display Products
- 00b9 Audio
- 0180 Hub Type P
- 0181 HID Monitor Controls
-04a7 Visioneer
- 0100 StrobePro
- 0101 Strobe Pro Scanner (1.01)
- 0102 StrobePro Scanner
- 0211 OneTouch 7600 Scanner
- 0221 OneTouch 5300 Scanner
- 0223 OneTouch 8200
- 0224 OneTouch 4800 USB/Microtek Scanport 3000
- 0225 VistaScan Astra 3600(ENG)
- 0226 OneTouch 5300 USB
- 0229 OneTouch 7100
- 022a OneTouch 6600
- 022c OneTouch 9000/9020
- 0231 6100 Scanner
- 0311 6200 EPP/USB Scanner
- 0321 OneTouch 8100 EPP/USB Scanner
- 0331 OneTouch 8600 EPP/USB Scanner
- 0341 6400
- 0361 VistaScan Astra 3600(ENG)
- 0362 OneTouch 9320
- 0371 OneTouch 8700/8920
- 0380 OneTouch 7700
- 0382 Photo Port 7700
- 0390 9650
- 03a0 Xerox 4800 One Touch
- 0410 OneTouch Pro 8800/8820
- 0421 9450 USB
- 0423 9750 Scanner
- 0424 Strobe XP 450
- 0425 Strobe XP 100
- 0426 Strobe XP 200
- 0427 Strobe XP 100
- 0444 OneTouch 7300
- 0445 CardReader 100
- 0446 Xerox DocuMate 510
- 0447 XEROX DocuMate 520
- 0448 XEROX DocuMate 250
- 0449 Xerox DocuMate 252
- 044a Xerox 6400
- 044c Xerox DocuMate 262
- 0474 Strobe XP 300
- 0475 Xerox DocuMate 272
- 0478 Strobe XP 220
- 0479 Strobe XP 470
- 047a 9450
- 047b 9650
- 047d 9420
- 0480 9520
- 048f Strobe XP 470
- 0491 Strobe XP 450
- 0493 9750
- 0494 Strobe XP 120
- 0497 Patriot 430
- 0498 Patriot 680
- 0499 Patriot 780
- 049b Strobe XP 100
- 04a0 7400
-04a8 Multivideo Labs, Inc.
- 0101 Hub
- 0303 Peripheral Switch
- 0404 Peripheral Switch
-04a9 Canon, Inc.
- 1005 BJ Printer Hub
- 1035 PD Printer Storage
- 1050 BJC-8200
- 1051 BJC-3000 Color Printer
- 1052 BJC-6100
- 1053 BJC-6200
- 1054 BJC-6500
- 1055 BJC-85
- 1056 BJC-2110 Color Printer
- 1057 LR1
- 105a BJC-55
- 105b S600 Printer
- 105c S400
- 105d S450 Printer
- 105e S800
- 1062 S500 Printer
- 1063 S4500
- 1064 S300 Printer
- 1065 S100
- 1066 S630
- 1067 S900
- 1068 S9000
- 1069 S820
- 106a S200 Printer
- 106b S520 Printer
- 106d S750 Printer
- 106e S820D
- 1070 S530D
- 1072 I850 Printer
- 1073 I550 Printer
- 1074 S330 Printer
- 1076 i70
- 1077 i950
- 107a S830D
- 107b i320
- 107c i470D
- 107d i9100
- 107e i450
- 107f i860
- 1082 i350
- 1084 i250
- 1085 i255
- 1086 i560
- 1088 i965
- 108a i455
- 108b i900D
- 108c i475D
- 108d PIXMA iP2000
- 108f i80
- 1090 i9900 Photo Printer
- 1091 PIXMA iP1500
- 1093 PIXMA iP4000
- 1094 PIXMA iP3000x Printer
- 1095 PIXMA iP6000D
- 1097 PIXMA iP5000
- 1098 PIXMA iP1000
- 1099 PIXMA iP8500
- 109c PIXMA iP4000R
- 109d iP90
- 10a0 PIXMA iP1600 Printer
- 10a2 iP4200
- 10a4 iP5200R
- 10a5 iP5200
- 10a7 iP6210D
- 10a8 iP6220D
- 10a9 iP6600D
- 10b6 PIXMA iP4300 Printer
- 1404 W6400PG
- 1405 W8400PG
- 150f BIJ2350 PCL
- 1510 BIJ1350 PCL
- 1512 BIJ1350D PCL
- 1601 DR-2080C Scanner
- 1607 DR-6080 Scanner
- 1700 PIXMA MP110 Scanner
- 1701 PIXMA MP130 Scanner
- 1702 MP410 Composite
- 1703 MP430 Composite
- 1704 MP330 Composite
- 1706 PIXMA MP750 Scanner
- 1707 PIXMA MP780 Scanner
- 1708 PIXMA MP760 Scanner
- 1709 PIXMA MP150 Scanner
- 170a PIXMA MP170 Scanner
- 170b PIXMA MP450 Scanner
- 170c PIXMA MP500 Scanner
- 170d PIXMA MP800 Scanner
- 170e MP800R
- 1710 MP950
- 1712 MP530
- 1713 PIXMA MP830 Scanner
- 1714 MP160
- 1715 MP180 Storage
- 1716 MP460 Composite
- 1717 MP510
- 1718 MP600 Storage
- 171a MP810 Storage
- 171b MP960
- 1721 MP210 ser
- 1723 MP470 ser
- 1725 MP610 ser
- 1726 MP970 ser
- 1727 MX300 ser
- 1728 MX310 ser
- 1729 MX700 ser
- 172b MP140 ser
- 2200 CanoScan LiDE 25
- 2201 CanoScan FB320U
- 2202 CanoScan FB620U
- 2204 CanoScan FB630U
- 2205 CanoScan FB1210U
- 2206 CanoScan N650U/N656U
- 2207 CanoScan 1220U
- 2208 CanoScan D660U
- 220a CanoScan D2400UF
- 220b CanoScan D646U
- 220c CanoScan D1250U2
- 220d CanoScan N670U/N676U/LiDE 20
- 220e CanoScan N1240U/LiDE 30
- 220f CanoScan 8000F
- 2210 CanoScan 9900F
- 2212 CanoScan 5000F
- 2213 CanoScan LiDE 50/LiDE 35/LiDE 40
- 2214 CanoScan LiDE 80
- 2215 CanoScan 3000/3000F/3000ex
- 2216 CanoScan 3200F
- 2217 CanoScan 5200F
- 2219 CanoScan 9950F
- 221b CanoScan 4200F
- 221c CanoScan LiDE 60
- 221e CanoScan 8400F
- 221f CanoScan LiDE 500F
- 2220 CanoScan LIDE 25
- 2225 CanoScan LiDE 70
- 2228 CanoScan 4400F
- 2602 MultiPASS C555
- 2603 MultiPASS C755
- 260a CAPT Printer
- 260e LBP-2000
- 2610 MPC600F
- 2611 SmartBase MPC400
- 2612 MultiPASS C855
- 2617 CAPT Printer
- 261a iR1600
- 261b iR1610
- 261c iC2300
- 261f MPC200 Printer
- 2621 iR2000
- 2622 iR2010
- 2623 FAX-B180C
- 2629 FAXPHONE L75
- 262b LaserShot LBP-1120 Printer
- 262d iR C3200
- 262f MultiPASS MP730
- 2630 MultiPASS MP700
- 2631 LASER CLASS 700
- 2632 FAX-L2000
- 2635 MPC190
- 2637 iR C6800
- 2638 iR C3100
- 263c Smartbase MP360
- 263d MP370
- 263e MP390 FAX
- 263f MP375
- 2646 MF5530 Scanner Device V1.9.1
- 2647 MF5550 Composite
- 264e MF5630
- 264f MF5650 (FAX)
- 2650 iR 6800C EUR
- 2651 iR 3100C EUR
- 2655 FP-L170/MF350/L380/L398
- 2659 MF8100
- 265b CAPT Printer
- 265c iR C3220
- 265d MF5730
- 265e MF5750
- 265f MF5770
- 2660 MF3110
- 2663 iR3570/iR4570
- 2664 iR2270/iR2870
- 2665 iR C2620
- 2666 iR C5800
- 2667 iR85PLUS
- 2669 iR105PLUS
- 266a CAPT Device
- 266b iR8070
- 266c iR9070
- 266d iR 5800C EUR
- 266e CAPT Device
- 266f iR2230
- 2670 iR3530
- 2671 iR5570/iR6570
- 2672 iR C3170
- 2673 iR 3170C EUR
- 2674 L120
- 2675 iR2830
- 2676 CAPT Device
- 2677 iR C2570
- 2678 iR 2570C EUR
- 2679 CAPT Device
- 267a iR2016
- 267b iR2020
- 267d MF7100 Series
- 2684 MF3200 Series
- 2687 iR4530
- 2688 LBP3460
- 268c iR C6870
- 268d iR 6870C EUR
- 268e iR C5870
- 268f iR 5870C EUR
- 2691 iR7105
- 26a3 MF4100 Series
- 26b5 MF4200 Series
- 3041 PowerShot S10
- 3042 CanoScan FS4000US Film Scanner
- 3043 PowerShot S20
- 3044 EOS D30
- 3045 PowerShot S100
- 3046 IXY Digital
- 3047 Digital IXUS
- 3048 PowerShot G1
- 3049 PowerShot Pro90 IS
- 304a CP-10
- 304b IXY Digital 300
- 304c PowerShot S300
- 304d Digital IXUS 300
- 304e PowerShot A20
- 304f PowerShot A10
- 3050 PowerShot unknown 1
- 3051 PowerShot S110
- 3052 Digital IXUS V
- 3055 PowerShot G2
- 3056 PowerShot S40
- 3057 PowerShot S30
- 3058 PowerShot A40
- 3059 PowerShot A30
- 305b ZR45MC Digital Camcorder
- 305c PowerShot unknown 2
- 3060 EOS D60
- 3061 PowerShot A100
- 3062 PowerShot A200
- 3063 CP-100
- 3065 PowerShot S200
- 3066 Digital IXUS 330
- 3067 MV550i Digital Video Camera
- 3069 PowerShot G3
- 306a Digital unknown 3
- 306b MVX2i Digital Video Camera
- 306c PowerShot S45
- 306d PowerShot S45 PtP Mode
- 306e PowerShot G3 (normal mode)
- 306f PowerShot G3 (ptp)
- 3070 PowerShot S230
- 3071 PowerShot S230 (ptp)
- 3072 PowerShot SD100 / Digital IXUS II (ptp)
- 3073 PowerShot A70 (ptp)
- 3074 PowerShot A60 (ptp)
- 3075 IXUS 400 Camera
- 3076 PowerShot A300
- 3077 PowerShot S50
- 3078 ZR70MC Digital Camcorder
- 307a MV650i (normal mode)
- 307b MV630i Digital Video Camera
- 307c MV630i (normal mode)
- 307d CP-300
- 307f Optura 20
- 3080 MVX150i (normal mode) / Optura 20 (normal mode)
- 3081 Optura 10
- 3082 MVX100i / Optura 10
- 3083 EOS 10D
- 3084 EOS 300D / EOS Digital Rebel
- 3085 PowerShot G5
- 3087 Elura 50 (PTP mode)
- 3088 Elura 50 (normal mode)
- 308d MVX3i
- 308e FV M1 (normal mode) / MVX 3i (normal mode) / Optura Xi (normal mode)
- 3093 Optura 300
- 3096 IXY DV M2 (normal mode) / MVX 10i (normal mode)
- 3099 EOS 300D (ptp)
- 309a PowerShot A80
- 309b Digital IXUS (ptp)
- 309c PowerShot S1 IS
- 309d Camera
- 309f Camera
- 30a0 Camera
- 30a1 Camera
- 30a2 Camera
- 30a8 Elura 60E/Optura 40 (ptp)
- 30a9 MVX25i (normal mode) / Optura 40 (normal mode)
- 30b1 PowerShot S70 (normal mode) / PowerShot S70 (PTP mode)
- 30b2 PowerShot S60 (normal mode) / PowerShot S60 (PTP mode)
- 30b3 PowerShot G6 (normal mode) / PowerShot G6 (PTP mode)
- 30b4 PowerShot S500
- 30b5 PowerShot A75
- 30b6 Digital IXUS II2 / Digital IXUS II2 (PTP mode) / PowerShot SD110 (PTP mode) / PowerShot SD110 Digital ELPH
- 30b7 PowerShot A400 / PowerShot A400 (PTP mode)
- 30b8 PowerShot A310 / PowerShot A310 (PTP mode)
- 30b9 Powershot A85
- 30ba PowerShot S410 Digital Elph
- 30bb PowerShot A95
- 30bd CP-220
- 30be CP-330
- 30bf Digital IXUS 40
- 30c0 Digital IXUS 30 (PTP mode) / PowerShot SD200 (PTP mode)
- 30c1 Digital IXUS 50 (normal mode) / IXY Digital 55 (normal mode) / PowerShot A520 (PTP mode) / PowerShot SD400 (normal mode)
- 30c2 PowerShot A510 (normal mode) / PowerShot A510 (PTP mode)
- 30c4 Digital IXUS i5 (normal mode) / IXY Digital L2 (normal mode) / PowerShot SD20 (normal mode)
- 30ea EOS 1D Mark II (PTP mode)
- 30eb EOS 20D
- 30ec EOS 20D (ptp)
- 30ee EOS 350D
- 30ef EOS 350D (ptp)
- 30f0 PowerShot S2 IS (PTP mode)
- 30f2 Digital IXUS 700 (normal mode) / Digital IXUS 700 (PTP mode) / IXY Digital 600 (normal mode) / PowerShot SD500 (normal mode) / PowerShot SD500 (PTP mode)
- 30f6 SELPHY CP400
- 30f8 Powershot A430
- 30f9 PowerShot A410 (PTP mode)
- 30fc PowerShot A620 (PTP mode)
- 30fd PowerShot A610 (normal mode)/PowerShot A610 (PTP mode)
- 30ff Digital IXUS 55 (PTP mode)/PowerShot SD450 (PTP mode)
- 310b SELPHY CP600
- 310e Digital IXUS 50 (PTP mode)
- 3116 Digital IXUS 750 (PTP mode)
- 3117 PowerShot A700
- 3138 PowerShot A710 IS
- 315a PowerShot G9
- 3176 PowerShot A590
- 31ff Digital IXUS 55
-04aa DaeWoo Telecom, Ltd
-04ab Chromatic Research
-04ac Micro Audiometrics Corp.
-04ad Dooin Electronics
- 2501 Bluetooth Device
-04af Winnov L.P.
-04b0 Nikon Corp.
- 0102 Coolpix 990
- 0103 Coolpix 880
- 0104 Coolpix 995
- 0106 Coolpix 775
- 0107 Coolpix 5000
- 0108 Coolpix 2500
- 0109 Coolpix 2500 (ptp)
- 010a Coolpix 4500
- 010b Coolpix 4500 (ptp)
- 010d Coolpix 5700 (ptp)
- 010e Coolpix 4300 (storage)
- 010f Coolpix 4300 (ptp)
- 0110 Coolpix 3500 (Sierra Mode)
- 0111 Coolpix 3500 (ptp)
- 0112 Coolpix 885 (ptp)
- 0113 Coolpix 5000 (ptp)
- 0114 Coolpix 3100 (storage)
- 0115 Coolpix 3100 (ptp)
- 0117 Coolpix 2100 (ptp)
- 0119 Coolpix 5400 (ptp)
- 011d Coolpix 3700 (ptp)
- 0121 Coolpix 3200 (ptp)
- 0122 Coolpix 2200 (ptp)
- 0126 Coolpix 8800
- 0129 Coolpix 4800 (ptp)
- 012c Coolpix 4100 (storage)
- 012d Coolpix 4100 (ptp)
- 012e Coolpix 5600 (ptp)
- 0130 Coolpix 4600 (ptp)
- 0135 Coolpix 5900 (ptp)
- 0136 Coolpix 7900 (storage)
- 0137 Coolpix 7900 (ptp)
- 0141 Coolpix P2 (storage)
- 0142 Coolpix P2 (ptp)
- 0163 Coolpix P5100 (ptp)
- 0169 Coolpix P50 (ptp)
- 0202 Coolpix SQ (ptp)
- 0203 Coolpix 4200 (mass storage mode)
- 0204 Coolpix 4200 (ptp)
- 0205 Coolpix 5200 (storage)
- 0206 Coolpix 5200 (ptp)
- 0301 Coolpix 2000 (storage)
- 0302 Coolpix 2000 (ptp)
- 0402 DSC D100 (ptp)
- 0403 D2H (mass storage mode)
- 0404 D2H SLR (ptp)
- 0405 D70 (mass storage mode)
- 0406 DSC D70 (ptp)
- 0408 D2X SLR (ptp)
- 0409 D50 digital camera
- 040a D50 (ptp)
- 040c D2Hs
- 040e DSC D70s (ptp)
- 0413 D40 (mass storage mode)
- 4000 Coolscan LS 40 ED
- 4001 LS 50 ED/Coolscan V ED
- 4002 Super Coolscan LS-5000 ED
-04b1 Pan International
-04b3 IBM Corp.
- 3003 Rapid Access III Keyboard
- 3004 Media Access Pro Keyboard
- 300a Rapid Access IIIe Keyboard
- 3016 UltraNav Keyboard Hub
- 3018 UltraNav Keyboard
- 301b SK-8815 Keyboard
- 301c Enhanced Performance Keyboard
- 3020 Enhanced Performance Keyboard
- 3100 NetVista Mouse
- 3103 ScrollPoint Pro Mouse
- 3104 ScrollPoint Wireless Mouse
- 3105 ScrollPoint Optical (HID)
- 3107 ThinkPad 800dpi Optical Travel Mouse
- 3108 800dpi Optical Mouse w/ Scroll Point
- 3109 Optical ScrollPoint Pro Mouse
- 310b Red Wheel Mouse
- 4427 Portable CD ROM
- 4482 Serial Converter
- 4525 Double sided CRT
- 4550 NVRAM (128 KB)
- 4554 Cash Drawer
- 4580 Hub w/ NVRAM
- 4581 4800-2xx Hub w/ Cash Drawer
- 4604 Keyboard w/ Card Reader
- 4671 4820 LCD w/ MSR/KB
-04b4 Cypress Semiconductor Corp.
- 0000 Dacal DC-101 CD Library
- 0001 Mouse
- 0002 CY7C63x0x Thermometer
- 0101 Keyboard/Hub
- 0102 Keyboard with APM
- 0130 MyIRC Remote Receiver
- 0bad MetaGeek Wi-Spy
- 1002 CY7C63001 R100 FM Radio
- 1006 Human Interface Device
- 4381 SCAPS USC-1 Scanner Controller
- 4611 Storage Adapter FX2 (CY)
- 4616 Flash Disk (TPP)
- 5500 HID->COM RS232 Adapter
- 6370 ViewMate Desktop Mouse CC2201
- 6560 CY7C65640 USB-2.0 "TetraHub"
- 6830 CY7C68300A EZ-USB AT2 USB 2.0 to ATA/ATAPI
- 6831 Storage Adapter ISD-300LP (CY)
- 7417 Wireless PC Lock
- 8613 CY7C68013 EZ-USB FX2 USB 2.0 Development Kit
- 8614 DTV-DVB UDST7020BDA DVB-S Box(DVBS for MCE2005)
- cc04 Centor USB RACIA-ALVAR USB PORT
- cc06 Centor-P RACIA-ALVAR USB PORT
- d5d5 CY7C63x0x Zoltrix Z-Boxer GamePad
- f000 CY30700 Licorice evaluation board
-04b5 ROHM LSI Systems USA, LLC
-04b6 Hint Corp.
-04b7 Compal Electronics, Inc.
-04b8 Seiko Epson Corp.
- 0001 Stylus Color 740 / Photo 750
- 0002 ISD Smart Cable for Mac
- 0003 ISD Smart Cable
- 0004 Printer
- 0005 Stylus D88+
- 0006 Printer
- 0007 Printer
- 0101 Perfection 636
- 0102 GT-2200
- 0103 Perfection 610
- 0104 Perfection 1200
- 0105 StylusScan 2000
- 0106 Stylus Scan 2500
- 0107 Expression 1600U
- 0109 Expression 1640 XL
- 010a Perfection 1640SU
- 010b Perfection 1240
- 010c Perfection 640
- 010e Perfection 1680
- 010f Perfection 1250
- 0110 Perfection 1650
- 0112 Perfection 2450
- 0114 Perfection 660
- 0116 Perfection 3170 (GT-9400)
- 0118 Perfection 4180 (GF-F600)
- 0119 Perfection 4490 Photo
- 011a 1000 ICS
- 011b Perfection 2400 Photo
- 011c Perfection 3200
- 011d Perfection 1260 Photo
- 011e Perfection 1660 Photo
- 011f Perfection 1670
- 0120 Perfection 1270 scanner
- 0121 Perfection 2480 Photo
- 0122 Perfection 3590 scanner
- 0126 GT-15000 (ES-7000)
- 0128 Perfection 4870 (GT-X700)
- 0129 Expression 10000XL (ES-10000G)
- 012a Perfection 4990 Photo scanner
- 012b GT-2500 (ES-H300)
- 012c Perfection V350 (GT-F700)
- 012d Perfection V10/V100 (GT-S600/F650)
- 012f Perfection V350 (GT-F700)
- 0202 Receipt Printer M129C
- 0401 CP 800 Digital Camera
- 0402 PhotoPC 850z
- 0403 PhotoPC 3000z
- 0509 JVC PIX-MC10
- 0601 Stylus Photo 875DC Card Reader
- 0602 Stylus Photo 895 Card Reader
- 0801 Stylus CX5200/CX5400/CX6600
- 0802 Stylus CX3200
- 0803 Printer (Composite Device)
- 0804 Storage Device
- 0805 Stylus CX6400
- 0806 Stylus Photo RX600/610
- 0807 Stylus Photo RX500/510
- 0808 Stylus CX5200
- 0809 Storage Device
- 080a Storage Device
- 080c ME100
- 080d Stylus CX4500/4600
- 080e CX-3500/3600/3650 MFP
- 080f Stylus Photo RX425 scanner
- 0810 Stylus Photo RX700 (PM-A900)
- 0811 Stylus Photo RX620 all-in-one
- 0812 MFP Composite Device
- 0813 Stylus CX6500/6600
- 0814 (PM-A700)
- 0815 AcuLaser CX11 (LP-A500)
- 0816 Printer (Composite Device)
- 0817 (LP-M5500)
- 0818 Stylus CX3700/CX3800/DX3800
- 0819 Stylus CX4700/CX4800/DX4800 (PX-A750)
- 081a Stylus Photo RX520/RX530 (PM-A750)
- 081b MFP Composite Device
- 081c Stylus Photo RX640/RX650 (PM-A890)
- 081d (PM-A950)
- 081e MFP Composite Device
- 081f Stylus CX7700/7800
- 0820 CX4200 MP scanner
- 0821 MFP Composite Device
- 0822 Storage Device
- 0823 MFP Composite Device
- 0824 Storage Device
- 0825 MFP Composite Device
- 0826 Storage Device
- 0827 Stylus Photo RX560/580/590 (PM-A820)
- 0828 (PM-A970)
- 0829 (PM-T990)
- 082a (PM-A920)
- 082b Stylus DX5050
- 082c Storage Device
- 082d Storage Device
- 082e 0x082e DX-60x0 MFP scanner
- 082f Stylus DX4050
- 0830 Stylus CX2800/CX2900/ME200
- 0831 MFP Composite Device
- 0832 MFP Composite Device
- 0833 (LP-M5600)
- 0834 MFP Composite Device
- 0835 AcuLaser CX21
- 0836 MFP Composite Device
- 0837 MFP Composite Device
- 0838 CX7300/CX7400/DX7400
- 0839 CX8300/CX8400/DX8400
- 083a CX9300F/CX9400Fax/DX9400F
- 083b MFP Composite Device
- 083c MFP Composite Device
- 083d MFP Composite Device
- 083e MFP Composite Device
- 083f Stylus DX4450
-04b9 Rainbow Technologies, Inc.
- 0300 SafeNet USB SuperPro/UltraPro
- 1000 iKey 1000 Token
- 1001 iKey 1200 Token
- 1002 iKey Token
- 1003 iKey Token
- 1004 iKey Token
- 1005 iKey Token
- 1006 iKey Token
- 1200 iKey 2000 Token
- 1201 iKey Token
- 1202 iKey 2032 Token
- 1203 iKey Token
- 1204 iKey Token
- 1205 iKey Token
- 1206 iKey Token
- 1300 iKey 3000 Token
- 1301 iKey 3000
- 1302 iKey Token
- 1303 iKey Token
- 1304 iKey Token
- 1305 iKey Token
- 1306 iKey Token
-04ba Toucan Systems, Ltd
-04bb I-O Data Device, Inc.
- 0101 USB2-IDE/ATAPI Bridge Adapter
- 0201 USB2-IDE/ATAPI Bridge Adapter
- 0204 DVD Multi-plus unit iU-CD2
- 0206 DVD Multi-plus unit DVR-UEH8
- 0301 Storage Device
- 0314 USB-SSMRW SD-card
- 0319 USB2-IDE/ATAPI Bridge Adapter
- 031a USB2-IDE/ATAPI Bridge Adapter
- 031b USB2-IDE/ATAPI Bridge Adapter
- 031e USB-SDRW SD-card
- 0502 Nogatech Live! (BT)
- 0901 USB ETT
- 0904 ET/TX Ethernet [pegasus]
- 0913 ET/TX-S Ethernet [pegasus2]
- 0919 USB WN-B11
- 0922 IOData AirPort WN-B11/USBS 802.11b
- 0930 ETG-US2
- 0937 WN-WAG/USL Wireless LAN Adapter
- 0938 WN-G54/USL Wireless LAN Adapter
- 0a03 Serial USB-RSAQ1
- 0a07 USB2-iCN Adapter
- 0a08 USB2-iCN Adapter
- 0c01 FM-10 Pro Disk
-04bd Toshiba Electronics Taiwan Corp.
-04be Telia Research AB
-04bf TDK Corp.
- 0100 MediaReader CF
- 0115 USB-PDC Adapter UPA9664
- 0116 USB-cdmaOne Adapter UCA1464
- 0117 USB-PHS Adapter UHA6400
- 0118 USB-PHS Adapter UPA6400
- 0135 MediaReader Dual
- 0202 73S1121F Smart Card Reader-
- 0309 Bluetooth USB dongle
- 030a IBM Bluetooth Ultraport Module
- 030b Bluetooth Device
- 030c Ultraport Bluetooth Device
- 0310 Integrated Bluetooth
- 0311 Integrated Bluetooth Device
- 0317 Bluetooth UltraPort Module from IBM
- 0318 IBM Integrated Bluetooth
- 0319 Bluetooth Adapter
- 0320 Bluetooth Adapter
- 0321 Bluetooth Device
- 0a28 INDI AV-IN Device
-04c1 U.S. Robotics (3Com)
- 0020 56K Voice Pro
- 0022 56K Voice Pro
- 007e ISDN TA
- 0082 OfficeConnect Analog Modem
- 008f Pro ISDN TA
- 0097 OfficeConnect Analog
- 009d HomeConnect WebCam [vicam]
- 00a9 ISDN Pro TA-U
- 00b9 HomeConnect IDSL Modem
- 3021 56k Voice FaxModem Pro
-04c2 Methode Electronics Far East PTE, Ltd
-04c3 Maxi Switch, Inc.
- 1102 Mouse
- 2102 Mouse
-04c4 Lockheed Martin Energy Research
-04c5 Fujitsu, Ltd
- 1029 fi-4010c Scanner
- 1033 fi-4110CU
- 1041 fi-4120c Scanner
- 1042 fi-4220c Scanner
- 105b AH-F401U Air H device
- 1096 fi-5110EOX
- 1097 fi-5110C
- 10ae fi-4120C2
- 10af fi-4220C2
- 10e0 fi-5120c Scanner
- 10e1 fi-5220C
- 10e7 fi-5900C
- 10fe S500
-04c6 Toshiba America Electronic Components
-04c7 Micro Macro Technologies
-04c8 Konica Corp.
- 0720 Digital Color Camera
- 0721 e-miniD Camera
- 0722 e-mini
- 0723 KD-200Z Camera
- 0726 KD-310Z Camera
- 0728 Revio C2 Mass Storage Device
- 0729 Revio C2 Digital Camera
- 072c Revio KD20M
- 072d Revio KD410Z
-04ca Lite-On Technology Corp.
- 1766 HID Monitor Controls
- 9304 Hub
-04cb Fuji Photo Film Co., Ltd
- 0100 FinePix 30i/40i/50i, A101/201, 1300/2200, 1400/2400/2600/2800/4500/4700/4800/4900/6800/6900 Zoom
- 0103 FinePix NX-500/NX-700 printer
- 0104 FinePix A101, 2600/2800/4800/6800 Zoom (PC CAM)
- 0108 FinePix F601 Zoom (DSC)
- 0109 FinePix F601 Zoom (PC CAM)
- 010a FinePix S602 (Pro) Zoom (DSC)
- 010b FinePix S602 (Pro) Zoom (PC CAM)
- 010d FinePix Digital Camera 020531
- 010e FinePix F402 Zoom (DSC)
- 010f FinePix F402 Zoom (PC CAM)
- 0110 FinePix M603 Zoom (DSC)
- 0111 FinePix M603 Zoom (PC CAM)
- 0112 FinePix A202, A200 Zoom (DSC)
- 0113 FinePix A202, A200 Zoom (PC CAM)
- 0114 FinePix F401 Zoom (DSC)
- 0115 FinePix F401 Zoom (PC CAM)
- 0116 FinePix A203 Zoom (DSC)
- 0117 FinePix A203 Zoom (PC CAM)
- 0118 FinePix A303 Zoom (DSC)
- 0119 FinePix A303 Zoom (PC CAM)
- 011a FinePix S304/3800 Zoom (DSC)
- 011b FinePix S304/3800 Zoom (PC CAM)
- 011c FinePix A204/2650 Zoom (DSC)
- 011d FinePix A204/2650 Zoom (PC CAM)
- 0120 FinePix F700 Zoom (DSC)
- 0121 FinePix F700 Zoom (PC CAM)
- 0122 FinePix F410 Zoom (DSC)
- 0123 FinePix F410 Zoom (PC CAM)
- 0124 FinePix A310 Zoom (DSC)
- 0125 FinePix A310 Zoom (PC CAM)
- 0126 FinePix A210 Zoom (DSC)
- 0127 FinePix A210 Zoom (PC CAM)
- 0128 FinePix A205(S) Zoom (DSC)
- 0129 FinePix A205(S) Zoom (PC CAM)
- 012a FinePix F610 Zoom (DSC)
- 012b FinePix Digital Camera 030513
- 012c FinePix S7000 Zoom (DSC)
- 012d FinePix S7000 Zoom (PC CAM)
- 012f FinePix Digital Camera 030731
- 0130 FinePix S5000 Zoom (DSC)
- 0131 FinePix S5000 Zoom (PC CAM)
- 013b FinePix Digital Camera 030722
- 013c FinePix S3000 Zoom (DSC)
- 013d FinePix S3000 Zoom (PC CAM)
- 013e FinePix F420 Zoom (DSC)
- 013f FinePix F420 Zoom (PC CAM)
- 0142 FinePix S7000 Zoom (PTP)
- 0148 FinePix A330 Zoom (DSC)
- 0149 FinePix A330 Zoom (UVC)
- 014a FinePix A330 Zoom (PTP)
- 014b FinePix A340 Zoom (DSC)
- 0159 FinePix F710 Zoom (DSC)
- 0165 FinePix S3500 Zoom (DSC)
- 0168 FinePix E500 Zoom (DSC)
- 0169 FinePix E500 Zoom (UVC)
- 016b FinePix E510 Zoom (DSC)
- 016c FinePix E510 Zoom (PC CAM)
- 016e FinePix S5500 Zoom (DSC)
- 016f FinePix S5500 Zoom (UVC)
- 0171 FinePix E550 Zoom (DSC)
- 0172 FinePix E550 Zoom (PTP)
- 0177 FinePix F10 (DSC)
- 0179 Finepix F10 (PTP)
- 0186 FinePix S5200/S5600 Zoom (DSC)
- 0188 FinePix S5200/S5600 Zoom (PTP)
- 018e FinePix S9500 Zoom (DSC)
- 018f FinePix S9500 Zoom (PTP)
- 0192 FinePix E900 Zoom (DSC)
- 0193 FinePix E900 Zoom (PTP)
- 019b FinePix F30 (PTP)
- 01bf FinePix F6000fd/S6500fd Zoom (PTP)
- 01c0 FinePix F20 (PTP)
- 01c1 FinePix F31fd (PTP)
- 01c4 FinePix S5700 Zoom (PTP)
- 01c5 FinePix F40fd (PTP)
- 01c6 FinePix A820 Zoom (PTP)
- 01d2 FinePix A800 Zoom (PTP)
- 01d5 FinePix F47 (PTP)
-04cc Philips Semiconductors
- 1122 Hub
- 1521 USB 2.0 Hub
- 8116 Camera
-04cd Tatung Co. Of America
-04ce ScanLogic Corp.
- 0002 SL11R-IDE IDE Bridge
- 0100 USB2PRN Printer Class
- 0300 Phantom 336CX - C3 scanner
- 04ce SL11DEMO, VID: 0x4ce, PID: 0x4ce
- 07d1 SL11R, VID: 0x4ce, PID: 0x07D1
-04cf Myson Century, Inc.
- 0800 MTP800 Mass Storage Device
- 8810 CS8810 Mass Storage Device
- 8811 CS8811 Mass Storage Device
- 8813 CS8813 Mass Storage Device
- 8818 USB2.0 to ATAPI Bridge Controller
-04d0 Digi International
-04d1 ITT Canon
-04d2 Altec Lansing Technologies
- 0070 ADA70 Speakers
- 0305 Non-Compliant Audio Device
- 0311 ADA-310 Speakers
- 2060 Claritel-i750 - vp
- ff05 ADA-305 Speakers
- ff47 Lansing HID Audio Controls
- ff49 Lansing HID Audio Controls
-04d3 VidUS, Inc.
-04d4 LSI Logic, Inc.
-04d5 Forte Technologies, Inc.
-04d6 Mentor Graphics
-04d7 Oki Semiconductor
- 1be4 Bluetooth Device
-04d8 Microchip Technology, Inc.
- 0002 USB-LCD 2x20
- 8000 In-Circuit Debugger
- 8001 ICD2 in-circuit debugger
-04d9 Holtek Semiconductor, Inc.
- 1203 MC Industries Keyboard
-04da Panasonic (Matsushita)
- 0901 LS-120 Camera
- 0b01 CD-R/RW Drive
- 0b03 SuperDisk 240MB
- 0d01 CD-R Drive KXL-840AN
- 0d09 CD-R Drive KXL-RW32AN
- 0d0a CD-R Drive KXL-CB20AN
- 0d0d CDRCB03
- 0d0e DVD-ROM & CD-R/RW
- 0f40 Printer
- 1500 MFSUSB Driver
- 1b00 MultiMediaCard
- 2121 EB-VS6
- 2317 DVC USB-SERIAL Driver for WinXP
- 2319 NV-GS15 (webcam mode)
- 231d DVC Web Camera Device
- 231e DVC DV Stream Device
- 2372 Lumix DMC-FZ10 Camera
- 2374 DMC-FZ20
-04db Hypertec Pty, Ltd
-04dc Huan Hsin Holdings, Ltd
-04dd Sharp Corp.
- 13a6 MFC2000
- 6006 AL-1216
- 6007 AL-1045
- 6008 AL-1255
- 6009 AL-1530CS
- 600a AL-1540CS
- 600b AL-1456
- 600c AL-1555
- 600d AL-1225
- 600e AL-1551CS
- 600f AR-122E
- 6010 AR-152E
- 6011 AR-157E
- 6012 SN-1045
- 6013 SN-1255
- 6014 SN-1456
- 6015 SN-1555
- 6016 AR-153E
- 6017 AR-122E N
- 6018 AR-153E N
- 6019 AR-152E N
- 601a AR-157E N
- 601b AL-1217
- 601c AL-1226
- 601d AR-123E
- 7002 DVC Ver.1.0
- 7004 VE-CG40U Digital Still Camera
- 7005 VE-CG30 Digital Still Camera
- 7007 VL-Z7S Digital Camcorder
- 8004 Zaurus SL-5000D/SL-5500 PDA
- 8005 Zaurus A-300
- 8006 Zaurus SL-B500/SL-5600 PDA
- 8007 Zaurus C-700 PDA
- 9014 IM-DR80 Portable NetMD Player
- 9031 Zaurus C-750/C-760/C-860/SL-C3000 PDA
- 9032 Zaurus SL-6000
- 903a GSM GPRS
- 9050 Zaurus C-860 PDA
- 9056 Viewcam Z
- 9073 AM-900
- 9074 GSM GPRS
- 90a9 Sharp Composite
- 90d0 USB-to-Serial Comm. Port
- 90f2 Sharp 3G GSM USB Control
- 9120 WS004SH
- 9122 WS007SH
- 9123 W-ZERO3 ES Smartphone
- 91a3 922SH Internet Machine
-04de MindShare, Inc.
-04df Interlink Electronics
-04e1 Iiyama North America, Inc.
- 0201 Monitor Hub
-04e2 Exar Corp.
-04e3 Zilog, Inc.
-04e4 ACC Microelectronics
-04e5 Promise Technology
-04e6 SCM Microsystems, Inc.
- 0001 E-USB ATA Bridge
- 0002 eUSCSI SCSI Bridge
- 0003 eUSB SmartMedia Card Reader
- 0005 eUSB SmartMedia/CompactFlash Card Reader
- 0006 eUSB SmartMedia Card Reader
- 0007 Hifd
- 0009 eUSB ATA/ATAPI Adapter
- 000a eUSB CompactFlash Adapter
- 000b eUSCSI Bridge
- 000c eUSCSI Bridge
- 000d Dazzle MS
- 0012 Dazzle SD/MMC
- 0101 eUSB ATA Bridge
- 0311 Dazzle DM-CF
- 0312 Dazzle DM-SD/MMC
- 0313 Dazzle SM
- 0314 Dazzle MS
- 0322 e-Film Reader-5
- 0325 eUSB ORCA Quad Reader
- 0327 Digital Media Reader
- 03fe DMHS2 DFU Adapter
- 0406 eUSB SmartDM Reader
- 04e6 eUSB DFU Adapter
- 04e7 STCII DFU Adapter
- 04e8 eUSBDM DFU Adapter
- 04e9 DM-E DFU Adapter
- 0500 Veridicom 5thSense Fingerprint Sensor and eUSB SmartCard
- 0701 DCS200 Loader Device
- 0702 DVD Creation Station 200
- 0703 DVC100 Loader Device
- 0704 Digital Video Creator 100
- 1001 SCR300 Smart Card Reader
- 1010 USBAT-2 CompactFlash Card Reader
- 1014 e-Film Reader-3
- 1020 USBAT ATA/ATAPI Adapter
- 2007 RSA SecurID ComboReader
- 2009 Citibank Smart Card Reader
- 200a Reflex v.2 Smart Card Reader
- 200d STR391 Reader
- 5111 SCR331-DI SmartCard Reader
- 5113 SCR333 SmartCard Reader
- 5114 SCR331-DI SmartCard Reader
- 5115 SCR335 SmartCard Reader
- 5116 SCR331-LC1 SmartCard Reader
- 5117 SCR3320 - Smart Card Reader
- 5118 Expresscard SIM Card Reader
- 5119 SCR3340 - ExpressCard54 Smart Card Reader
- 511b SmartCard Reader
- 511d SCR3311 Smart Card Reader
- 5120 SCR331-DI SmartCard Reader
- 5121 SDI010 Smart Card Reader
- 5151 SCR338 Keyboard Smart Card Reader
- 5410 SCR35xx Smart Card Reader
- e000 SCRx31 Reader
- e001 SCR331 SmartCard Reader
- e003 SPR532 PinPad SmartCard Reader
-04e7 Elo TouchSystems
- 0001 TouchScreen
- 0002 Touchmonitor Interface 2600 Rev 2
- 0004 4000U CarrollTouch® Touchmonitor Interface
- 0007 2500U IntelliTouch® Touchmonitor Interface
- 0008 3000U AccuTouch® Touchmonitor Interface
- 0009 4000U CarrollTouch® Touchmonitor Interface
- 0020 Touchscreen Interface (2700)
- 0021 Touchmonitor Interface
- 0030 4500U CarrollTouch® Touchmonitor Interface
- 0032 Touchmonitor Interface
- 0033 Touchmonitor Interface
- 0041 5010 Surface Capacitive Touchmonitor Interface
- 0042 Touchmonitor Interface
- 0050 2216 AccuTouch® Touchmonitor Interface
- 0071 Touchmonitor Interface
- 0072 Touchmonitor Interface
- 0081 Touchmonitor Interface
- 0082 Touchmonitor Interface
- 00ff Touchmonitor Interface
-04e8 Samsung Electronics Co., Ltd
- 0110 Connect3D Flash Drive
- 0111 Connect3D Flash Drive
- 1003 MP3 Player and Recorder
- 1006 SDC-200Z
- 3004 ML-4600
- 3005 Docuprint P1210
- 3008 ML-6060 laser printer
- 300c ML-1210 Printer
- 300e Laser Printer
- 3104 ML-3550N
- 3226 Laser Printer
- 3228 Laser Printer
- 322a Laser Printer
- 322c Laser Printer
- 3230 ML-1440
- 3232 Laser Printer
- 3236 ML-1450
- 3238 ML-1430
- 323a ML-1710 Printer
- 323b Phaser 3130
- 323c Laser Printer
- 323d Phaser 3120
- 323e Laser Printer
- 3240 Laser Printer
- 3242 Laser Printer
- 3248 Color Laser Printer
- 324a Laser Printer
- 324c ML-1740 Printer
- 324d Phaser 3121
- 325f Phaser 3425 Laser Printer
- 3260 CLP-510 Color Laser Printer
- 3268 ML-1610 Mono Laser Printer
- 326c ML-2010P Mono Laser Printer
- 3409 SCX-4216F Scanner
- 340c SCX-5x15 Series
- 340d SCX-6x20 Series
- 340e MFP 560 Series
- 340f Printing Support
- 3412 SCX-4x20 Series
- 3413 SCX-4100 Scanner
- 3415 Composite Device
- 3419 Composite Device
- 341a Printing Support
- 341b SCX-4200 Series
- 341c Composite Device
- 341d Composite Device
- 341f Composite Device
- 3420 Composite Device
- 3605 InkJet Color Printer
- 3606 InkJet Color Printer
- 3609 InkJet Color Printer
- 3902 InkJet Color Printer
- 3903 Xerox WorkCentre XK50cx
- 390f InkJet Color Printer
- 3911 SCX-1020 Series
- 5000 YP-MF Series
- 5001 YP-100
- 5002 YP-30
- 5003 YP-700
- 5004 YP-30
- 5005 YP-300
- 5006 YP-750
- 500d MP3 Player
- 5010 MP3 Player
- 5011 YP-780
- 5013 YP-60
- 5015 yepp upgrade
- 501b MP3 Player
- 503b YP-U1 MP3 Player
- 5050 YP-U2 MP3 Player
- 507d YP-U3 MP3 Player
- 508b YP-S5 MP3 Player
- 5a00 YP-NEU
- 5a01 YP-NDU
- 5a03 Yepp MP3 Player
- 5a04 YP-800
- 5a08 YP-90
- 5a0f MTP Device
- 5b01 Memory Stick Reader/Writer
- 5b02 Memory Stick Reader/Writer
- 5b03 Memory Stick Reader/Writer
- 5b04 Memory Stick Reader/Writer
- 5b05 Memory Stick Reader/Writer
- 5b11 SEW-2001u Card
- 5f00 NEXiO Sync
- 5f01 NEXiO Sync
- 5f02 NEXiO Sync
- 5f03 NEXiO Sync
- 5f04 NEXiO Sync
- 6601 Z100 Mobile Phone
- 6611 MITs Sync
- 6613 MITs Sync
- 6615 MITs Sync
- 6617 MITs Sync
- 6619 MITs Sync
- 661b MITs Sync
- 661e Handheld
- 6620 Handheld
- 6622 Handheld
- 6624 Handheld
- 662e MITs Sync
- 6630 MITs Sync
- 6632 MITs Sync
- 663f SGH-E720/SGH-E840
- 6640 Usb Modem Enumerator
- 7011 SEW-2003U Card
- 7021 Bluetooth Device
- 7061 eHome Infrared Receiver
- 7081 Human Interface Device
- 8001 Handheld
- e020 SERI E02 SCOM 6200 UMTS Phone
- e021 SERI E02 SCOM 6200 Virtual UARTs
- e022 SERI E02 SCOM 6200 Flash Load Disk
- ff30 SG_iMON
-04e9 PC-Tel, Inc.
-04ea Brooktree Corp.
-04eb Northstar Systems, Inc.
-04ec Tokyo Electron Device, Ltd
-04ed Annabooks
-04ef Pacific Electronic International, Inc.
-04f0 Daewoo Electronics Co., Ltd
-04f1 Victor Company of Japan, Ltd
- 0001 GC-QX3 Digital Still Camera
- 0004 GR-DVL815U Digital Video Camera
- 0006 DV Camera Storage
- 0008 GZ-MG30AA/MC500E Digital Video Camera
- 0009 GR-DX25EK Digital Video Camera
- 000a GR-D72 Digital Video Camera
- 3008 MP-PRX1 Ethernet
-04f2 Chicony Electronics Co., Ltd
- 0001 KU-8933 Keyboard
- 0002 NT68P81 Keyboard
- 0110 KU-2971 Keyboard
- 0111 KU-9908 Keyboard
- 0112 KU-8933 Keyboard with PS/2 Mouse port
- 0116 KU-2971 German Keyboard
- 0403 KU-0420 keyboard
- a001 E-Video DC-100 Camera
- a120 ORITE CCD Webcam(PC370R)
- a121 ORITE CCD Webcam(PC370R)
- a122 ORITE CCD Webcam(PC370R)
- a123 ORITE CCD Webcam(PC370R)
- a124 ORITE CCD Webcam(PC370R)
- a133 Gateway Webcam
- a204 DSC WIA Device (1300)
- a208 DSC WIA Device (2320)
- a209 Labtec DC-2320
- a20a DSC WIA Device (3310)
- a20c DSC WIA Device (3320)
- a210 Audio Device
- b009 Integrated Camera
- b010 Integrated Camera
- b012 1.3 MPixel UVC webcam
- b018 Video Device
- b022 Camera
- b025 Camera
- b027 Gateway Webcam
- b028 VGA UVC WebCam
-04f3 Elan Microelectronics Corp.
- 0210 AM-400 Hama Optical Mouse
-04f4 Harting Elektronik, Inc.
-04f5 Fujitsu-ICL Systems, Inc.
-04f6 Norand Corp.
-04f7 Newnex Technology Corp.
-04f8 FuturePlus Systems
-04f9 Brother Industries, Ltd
- 0002 HL-1050 Laser Printer
- 0005 Printer
- 0006 HL-1240 Laser Printer
- 0007 HL-1250 Laser Printer
- 0008 HL-1270 Laser Printer
- 0009 Printer
- 000a P2500 Series
- 000b Printer
- 000c Printer
- 000d HL-1440 Laser Printer
- 000e HL-1450 series
- 000f HL-1470N series
- 0010 Printer
- 0011 Printer
- 0012 Printer
- 0013 Printer
- 0014 Printer
- 0015 Printer
- 0016 Printer
- 0017 Printer
- 0018 Printer
- 001c Printer
- 001e Printer
- 0020 HL-5130 series
- 0021 HL-5140 series
- 0022 HL-5150D series
- 0023 HL-5170DN series
- 0024 Printer
- 0025 Printer
- 0027 HL-2030 Laser Printer
- 0028 Printer
- 0029 Printer
- 002a Printer
- 002b Printer
- 002c Printer
- 002d Printer
- 0100 MFC8600/9650 Series
- 0101 MFC9600/9870 Series
- 0102 MFC9750/1200 Series
- 0104 MFC-8300J
- 0105 MFC-9600J
- 0106 MFC-7300C
- 0107 MFC-7400C
- 0108 MFC-9200C
- 0109 MFC-830
- 010a MFC-840
- 010b MFC-860
- 010c MFC-7400J
- 010d MFC-9200J
- 010e MFC3100C Scanner
- 010f MFC 5100C
- 0110 MFC4800 Scanner
- 0111 MFC 6800
- 0112 DCP1000 Port(FaxModem)
- 0113 MFC-8500
- 0114 MFC9700 Port(FaxModem)
- 0115 MFC9800 Scanner
- 0116 DCP1400 Scanner
- 0119 MFC-9660
- 011b MFC-9880
- 011c MFC-9760
- 011d MFC-9070
- 011e MFC-9180
- 011f MFC-9160
- 0120 MFC580 Port(FaxModem)
- 0121 MFC-590
- 0122 MFC-5100J
- 0129 Imagistics 2500 (MFC-8640D clone)
- 012f FAX-4750e
- 0132 MFC-5200C RemovableDisk
- 0135 MFC-100 Scanner
- 0136 MFC-150CL Scanner
- 013c MFC-890 Port
- 013d MFC-5200J Printer
- 013e MFC-4420C RemovableDisk
- 013f MFC-4820C RemovableDisk
- 0140 DCP-8020
- 0141 DCP-8025D
- 0142 MFC-8420
- 0143 MFC-8820D
- 0144 DCP-4020C RemovableDisk
- 0146 MFC-3220C
- 0147 FAX-1820C Printer
- 0148 MFC-3320CN Printer
- 0149 FAX-1920CN Printer
- 014a MFC-3420C
- 014b MFC-3820CN
- 014d FAX-1815C Printer
- 014e MFC-8820J
- 0150 MFC-8220 Port(FaxModem)
- 0151 MFC-8210J
- 0157 MFC-3420J Printer
- 0158 MFC-3820JN Port(FaxModem)
- 015d MFC Composite Device
- 015e DCP-8045D
- 015f MFC-8440
- 0160 MFC-8840D
- 0161 MFC-210C
- 0162 MFC-420CN Remote Setup Port
- 0163 MFC-410CN RemovableDisk
- 0165 MFC-620CN
- 0166 MFC-610CLN RemovableDisk
- 0168 MFC-620CLN
- 0169 DCP-110C RemovableDisk
- 016b DCP-310CN RemovableDisk
- 016c FAX-2440C Printer
- 016d MFC-5440CN
- 016e MFC-5840CN Remote Setup Port
- 0170 FAX-1840C Printer
- 0171 FAX-1835C Printer
- 0172 FAX-1940CN Printer
- 0173 MFC-3240C Remote Setup Port
- 0174 MFC-3340CN RemovableDisk
- 017b Imagistics sx2100
- 0180 MFC-7420
- 0181 MFC-7820N Port(FaxModem)
- 0182 Composite Device
- 0183 DCP-7020
- 0184 DCP-7025 Printer
- 0185 MFC-7220 Printer
- 0186 Composite Device
- 0187 FAX-2820 Printer
- 0188 FAX-2920 Printer
- 018a MFC-9420CN
- 018c DCP-115C
- 018d DCP-116C
- 018e DCP-117C
- 018f DCP-118C
- 0190 DCP-120C
- 0191 DCP-315CN
- 0192 DCP-340CW
- 0193 MFC-215C
- 0194 MFC-425CN
- 0195 MFC-820CW Remote Setup Port
- 0196 MFC-820CN Remote Setup Port
- 0197 MFC-640CW
- 019a MFC-840CLN Remote Setup Port
- 01a2 MFC-8640D
- 01a3 Composite Device
- 01a4 DCP-8065DN Printer
- 01a5 MFC-8460N Port(FaxModem)
- 01a6 MFC-8860DN Port(FaxModem)
- 01a7 MFC-8870DW Printer
- 01a8 DCP-130C
- 01a9 DCP-330C
- 01aa DCP-540CN
- 01ab MFC-240C
- 01ae DCP-750CW RemovableDisk
- 01af MFC-440CN
- 01b0 MFC-660CN
- 01b1 MFC-665CW Remote Setup Port
- 01b2 MFC-845CW Remote Setup Port
- 01b4 MFC-460CN Remote Setup Port
- 01b5 MFC-630CD
- 01b6 MFC-850CDN
- 01b7 MFC-5460CN Remote Setup Port
- 01b8 MFC-5860CN
- 01ba MFC-3360C
- 01bd MFC-8660DN
- 01be DCP-750CN RemovableDisk
- 01bf MFC-860CDN Remote Setup Port
- 01c0 DCP-128C
- 01c1 DCP-129C
- 01c2 DCP-131C
- 01c3 DCP-329C
- 01c4 DCP-331C
- 01c5 MFC-239C
- 01ca MFC-9440CN Remote Setup Port
- 01ce DCP-135C
- 01cf DCP-150C
- 01d0 DCP-350C
- 01d1 DCP-560CN
- 01d4 MFC-230C
- 01d5 MFC-235C
- 01d6 MFC-260C
- 01df DCP-155C
- 01e0 MFC-265C
- 01e1 DCP-153C
- 01e2 DCP-157C
- 01e3 DCP-353C
- 01e4 DCP-357C
- 1000 Printer
- 1002 Printer
- 2002 PTUSB Printing
- 2004 PT-2300/2310 p-Touch Laber Printer
- 2015 QL-500 P-touch label printer
- 2100 Card Reader Writer
-04fa Dallas Semiconductor
- 2490 DS1490F 2-in-1 Fob, 1-Wire adapter
- 4201 DS4201 Audio DAC
-04fb Biostar Microtech International Corp.
-04fc Sunplus Technology Co., Ltd
- 0003 CM1092 Optical Scroller Mouse
- 0013 ViewMate Desktop Mouse CC2201
- 0015 ViewMate Desktop Mouse CC2201
- 0232 Fingerprint
- 0561 Flexcam 100
- 1533 Mass Storage
- 504a SPCA504a Digital Camera
- 504b Aiptek, 1.3 mega PockerCam
- 5330 Digitrex 2110
- 5331 Vivitar Vivicam 10
- 5720 Card Reader Driver
- 7333 Finet Technology Palmpix DC-85
- 757a Aiptek, MP315 MP3 Player
- ffff PureDigital Ritz Disposable
-04fd Soliton Systems, K.K.
- 0003 Smart Card Reader II
-04fe PFU, Ltd
-04ff E-CMOS Corp.
-0500 Siam United Hi-Tech
- 0001 DART Keyboard Mouse
- 0002 DART-2 Keyboard
-0501 Fujikura DDK, Ltd
-0502 Acer, Inc.
- 0001 Handheld
- 0736 Handheld
- 15b1 PDA n311
- 1631 c10 Series
- 1632 c20 Series
- 16e1 n10 Handheld Sync
- 16e2 n20 Pocket PC Sync
- 16e3 n30 Handheld Sync
- d001 Divio NW801/DVC-V6+ Digital Camera
-0503 Hitachi America, Ltd
-0504 Hayes Microcomputer Products
-0506 3Com Corp.
- 009d HomeConnect Camera
- 00a0 3CREB96 Bluetooth Adapter
- 00a1 Bluetooth Device
- 00a2 Bluetooth Device
- 00df 3Com Home Connect lite
- 0100 HomeConnect ADSL Modem Driver
- 03e8 3C19250 Ethernet [klsi]
- 0a01 3CRSHEW696 Wireless Adapter
- 0a11 3CRWE254G72 802.11g Adapter
- 11f8 HomeConnect 3C460
- 2922 HomeConnect Cable Modem External with
- 3021 U.S.Robotics 56000 Voice FaxModem Pro
- 4601 3C460B 10/100 Ethernet Adapter
- f002 3CP4218 ADSL Modem (pre-init)
- f003 3CP4218 ADSL Modem
- f100 3CP4218 ADSL Modem (pre-init)
-0507 Hosiden Corp.
- 0011 Konami ParaParaParadise Controller
-0508 Clarion Co., Ltd
-0509 Aztech Systems, Ltd
- 0801 ADSL Modem
- 0802 ADSL Modem (RFC1483)
- 0806 DSL Modem
- 080f Binatone ADSL500 Modem Network Interface
- 0812 Pirelli ADSL Modem Network Interface
-050a Cinch Connectors
-050b Cable System International
-050c InnoMedia, Inc.
-050d Belkin Components
- 0004 Direct Connect
- 0012 F8T012 Bluetooth Adapter
- 0013 F8T013 Bluetooth Adapter
- 0050 F5D6050 802.11b Wireless Adapter
- 0081 F8T001v2 Bluetooth
- 0083 Bluetooth Device
- 0084 F8T003v2 Bluetooth
- 0102 Flip KVM
- 0103 F5U103 Serial Adapter [etek]
- 0106 VideoBus II Adapter, Video
- 0108 F1DE108B KVM
- 0109 F5U109/F5U409 PDA Adapter
- 0115 SCSI Adapter
- 0119 F5U120-PC Dual PS/2 Ports
- 0121 F5D5050 100Mbps Ethernet
- 0122 Ethernet Adapter
- 0131 Bluetooth Device with trace filter
- 0201 Peripheral Switch
- 0208 USBView II Video Adapter [nt1004]
- 0210 F5U228 Hi-Speed USB 2.0 DVD Creator
- 0211 F5U211 USB 2.0 15-in-1 Media Reader & Writer
- 0224 F5U224 USB 2.0 4-Port Hub
- 0234 F5U234 USB 2.0 4-Port Hub
- 0237 F5U237 USB 2.0 7-Port Hub
- 0240 F5U240 USB 2.0 CF Card Reader
- 0257 F5U257 Serial
- 0409 F5U409 Serial
- 0551 F6C550-AVR UPS
- 0802 Nostromo n40 Gamepad
- 0803 Nostromo 1745 GamePad
- 0805 Nostromo N50 GamePad
- 0815 Nostromo n52 HID SpeedPad Mouse Wheel
- 0826 ErgoFit Wireless Optical Mouse (HID)
- 0980 HID UPS Battery
- 1202 F5U120-PC Parallel Printer Port
- 1203 F5U120-PC Serial Port
- 258a F5U258 Host to Host cable
- 3101 F1DF102U/F1DG102U Flip Hub
- 3201 F1DF102U/F1DG102U Flip KVM
- 4050 ZD1211B
- 5055 F5D5055
- 6051 11Mbps Wireless Network Adapter
- 7050 F5D7050 ver 1000 WiFi
- 7051 F5D7051 54g USB Network Adapter
- 705a F5D7050A Wireless Adapter
- 705b Wireless G Adapter
- 705c F5D7050 v4000 Wireless Adapter
- 905b F5D9050 ver 3 Wireless Adapter
- 905c Wireless G Plus MIMO Network Adapter
-050e Neon Technology, Inc.
-050f KC Technology, Inc.
- 0001 Hub
- 0003 KC82C160S Hub
- 0180 KC-180 IrDA Dongle
- 0190 KC2190 USB Host-to-Host cable
-0510 Sejin Electron, Inc.
- 0001 Keyboard
- 1000 Keyboard with PS/2 Mouse Port
- e001 Mouse
-0511 N'Able (DataBook) Technologies, Inc.
-0512 Hualon Microelectronics Corp.
-0513 digital-X, Inc.
-0514 FCI Electronics
-0515 ACTC
-0516 Longwell Electronics
-0517 Butterfly Communications
-0518 EzKEY Corp.
- 0001 USB to PS2 Adaptor v1.09
- 0002 EZ-9900C Keyboard
-0519 Star Micronics Co., Ltd
- c002 Xlive Bluetooth XBM-100S MP3 Player
-051a WYSE Technology
- a005 Smart Display Version 9973
-051b Silicon Graphics
-051c Shuttle, Inc.
- c001 eHome Infrared Receiver
- c002 eHome Infrared Receiver
-051d American Power Conversion
- 0001 UPS
- 0002 Uninterruptible Power Supply
- 0003 UPS
-051e Scientific Atlanta, Inc.
-051f IO Systems (Elite Electronics), Inc.
-0520 Taiwan Semiconductor Manufacturing Co.
-0521 Airborn Connectors
-0522 Advanced Connectek, Inc.
-0523 ATEN GmbH
-0524 Sola Electronics
-0525 Netchip Technology, Inc.
- 100d RFMD Bluetooth Device
- 1080 NET1080 USB-USB Bridge
- a140 USB Clik! 40
- a141 (OME) PocketZip 40 MP3 Player Driver
- a220 GVC Bluetooth Wireless Adapter
- a4a0 Linux-USB "Gadget Zero"
- a4a1 Linux-USB Ethernet Gadget
- a4a2 Linux-USB Ethernet/RNDIS Gadget
- a4a3 Linux-USB user-mode isochronous source/sink
- a4a4 Linux-USB user-mode bulk source/sink
- a4a5 Linux-USB File Storage Gadget
- a4a6 Linux-USB Serial Gadget
- a4a7 Linux-USB Serial Gadget (CDC ACM mode)
- a4a8 Linux-USB Printer Gadget
-0526 Temic MHS S.A.
-0527 ALTRA
-0528 ATI Technologies, Inc.
- 7561 TV Wonder
- 7562 TV Wonder, Edition (FN5)
- 7563 TV Wonder, Edition (FI)
- 7564 TV Wonder, Edition (FQ)
- 7565 TV Wonder, Edition (NTSC+)
- 7566 TV Wonder, Edition (FN5)
- 7567 TV Wonder, Edition (FI)
- 7568 TV Wonder, Edition (FQ)
- 7569 Live! Pro (A)
- 756a Live! Pro Audio (O)
-0529 Aladdin Knowledge Systems
- 0001 HASP v0.06
- 030b eToken R1 v3.1.3.x
- 0313 eToken R1 v3.2.3.x
- 031b eToken R1 v3.3.3.x
- 0323 eToken R1 v3.4.3.x
- 0412 eToken R2 v2.2.4.x
- 041a eToken R2 v2.2.4.x
- 0422 eToken R2 v2.4.4.x
- 042a eToken R2 v2.5.4.x
- 050c eToken Pro v4.1.5.x
- 0514 eToken Pro v4.2.5.4
- 0600 eToken Pro 64k (4.2)
-052a Crescent Heart Software
-052b Tekom Technologies, Inc.
- 0102 Ca508A HP1020 Camera v.1.3.1.6
- 0801 Yakumo MegaImage 37
- 1512 Yakumo MegaImage IV
- 1513 Aosta CX100 WebCam
- 1514 Aosta CX100 WebCam Storage
- 1905 Yakumo MegaImage 47
- 1911 Yakumo MegaImage 47 SL
- 2202 WDM Still Image Capture
- 2203 Sound Vision Stream Driver
- 3a06 DigiLife DDV-5120A
- d001 P35U Camera Capture
-052c Canon Information Systems, Inc.
-052d Avid Electronics Corp.
-052e Standard Microsystems Corp.
-052f Unicore Software, Inc.
-0530 American Microsystems, Inc.
-0531 Wacom Technology Corp.
-0532 Systech Corp.
-0533 Alcatel Mobile Phones
-0534 Motorola, Inc.
-0535 LIH TZU Electric Co., Ltd
-0536 Hand Held Products (Welch Allyn, Inc.)
- 01a0 PDT
-0537 Inventec Corp.
-0538 Caldera International, Inc. (SCO)
-0539 Shyh Shiun Terminals Co., Ltd
-053a Preh Werke GmbH & Co. KG
-053b Global Village Communication
-053c Institut of Microelectronic & Mechatronic Systems
-053d Silicon Architect
-053e Mobility Electronics
-053f Synopsys, Inc.
-0540 UniAccess AB
- 0101 Panache Surf ISDN TA
-0541 Sirf Technology, Inc.
-0543 ViewSonic Corp.
- 00fe G773 Monitor Hub
- 00ff P815 Monitor Hub
- 0bf2 airpanel V150 Wireless Smart Display
- 0bf3 airpanel V110 Wireless Smart Display
- 0ed9 Color Pocket PC V35
- 0f01 airsync Wi-Fi Wireless Adapter
- 1527 Color Pocket PC V36
- 1529 Color Pocket PC V37
- 152b Color Pocket PC V38
- 152e Pocket PC
- 1921 Communicator Pocket PC
- 1922 Smartphone
- 1923 Pocket PC V30
- 1a11 Wireless 802.11g Adapter
- 1e60 TA310 - ATSC/NTSC/PAL Driver(PCM4)
- 4153 ViewSonic G773 Control (?)
-0544 Cristie Electronics, Ltd
-0545 Xirlink, Inc.
- 7333 Trution Web Camera
- 8002 IBM NetCamera
- 8009 Veo PC Camera
- 800c Veo StingRay
- 800d Veo PC Camera
- 8080 IBM C-It WebCam
- 808a Veo PC Camera
- 808b Veo PC Camera
- 808d Veo PC Camera
- 810a Veo Advanced Connect WebCam
- 810b Veo PC Camera
- 810c Veo PC Camera
- 8135 Veo Mobile/Advanced Web Camera
- 813a Veo PC Camera
- 813b Veo PC Camera
- 813c Veo Mobile/Advanced Web Camera
- 8333 Veo Stingray/Connect Web Camera
- 888c eVision 123 digital camera
- 888d eVision 123 digital camera
-0546 Polaroid Corp.
- 0daf PDC 2300Z
- 1bed PDC 1320 Camera
- 3097 PDC 310
- 3187 Digital Cam
- dccf Sound Vision Stream Driver
-0547 Anchor Chips, Inc.
- 0001 ICSI Bluetooth Device
- 1002 Python2 WDM Encoder
- 2131 AN2131 EZUSB Microcontroller
- 2235 AN2235 EZUSB-FX Microcontroller
- 2710 EZ-Link Loader (EZLNKLDR.SYS)
- 2720 AN2720 USB-USB Bridge
- 2727 Xircom PGUNET USB-USB Bridge
- 2750 EZ-Link (EZLNKUSB.SYS)
- 2810 Cypress USB ATAPI Bridge
- 7777 Bluetooth Device
- 9999 AN2131 uninitialized (?)
-0548 Tyan Computer Corp.
- 1005 EZ Cart II GameBoy Flash Programmer
-0549 Pixera Corp.
-054a Fujitsu Microelectronics, Inc.
-054b New Media Corp.
-054c Sony Corp.
- 0001 HUB
- 0002 Standard HUB
- 0010 DSC-S30/S70/S75/F505V/F505/FD92/W1 Cybershot/Mavica Digital Camera
- 0014 Nogatech USBVision (SY)
- 0022 Storage Adapter V2 (TPP)
- 0023 CD Writer
- 0024 Mavica CD-1000 Camera
- 0025 NW-MS7 Walkman MemoryStick Reader
- 002b Portable USB Harddrive V2
- 002c USB Floppy Disk Drive
- 002d MSAC-US1 MemoryStick Reader
- 002e Sony HandyCam MemoryStick Reader
- 0030 Storage Adapter V2 (TPP)
- 0032 MemoryStick MSC-U01 Reader
- 0035 Network Walkman (E)
- 0036 Net MD
- 0037 MG Memory Stick Reader/Writer
- 0038 Clie PEG-S300/D PalmOS PDA
- 0039 Network Walkman (MS)
- 003c VAIO-MX LCD Control
- 0045 Digital Imaging Video
- 0046 Network Walkman
- 004a Memory Stick Hi-Fi System
- 004b Memory Stick Reader/Writer
- 004e DSC-xxx (ptp)
- 0056 MG Memory Stick Reader/Writer
- 0058 Clie PEG-N7x0C PalmOS PDA Mass Storage
- 0066 Clie PEG-N7x0C/PEG-T425 PalmOS PDA Serial
- 0069 Memorystick MSC-U03 Reader
- 006d Clie PEG-T425 PDA Mass Storage
- 006f Network Walkman (EV)
- 0073 Storage CRX1750U
- 0075 Net MD
- 0076 Storage Adapter ACR-U20
- 007c Net MD
- 007f IC Recorder (MS)
- 0080 Net MD
- 0081 Net MD
- 0084 Net MD
- 0085 Net MD
- 0086 Net MD
- 008b Micro Vault 64M Mass Storage
- 0095 Sony Clie s360
- 0099 Clie NR70 PDA Mass Storage
- 009a Clie NR70 PDA Serial
- 00ab Visual Communication Camera (PCGA-UVC10)
- 00af DPP-EX Series Digital Photo Printer
- 00bf IC Recorder (S)
- 00c0 Handycam DCR-30
- 00c6 Net MD
- 00c7 Net MD
- 00c8 MZ-N710 Minidisc Walkman
- 00c9 Net MD
- 00ca MZ-DN430 Minidisc Walkman
- 00cb MSAC-US20 Memory Stick Reader
- 00da Sony Clie nx60
- 00e8 Network Walkman (MS)
- 00e9 Handheld
- 00eb Net MD
- 0101 Net MD
- 0103 IC Recorder (ST)
- 0105 Micro Vault Hub
- 0107 VCC-U01 Visual Communication Camera
- 0110 Digital Imaging Video
- 0113 Net MD
- 0116 IC Recorder (P)
- 0144 Clie PEG-TH55 PDA
- 0147 Visual Communication Camera (PCGA-UVC11)
- 014c Aiwa AM-NX9 Net MD Music Recorder MDLP
- 014d Memory Stick Reader/Writer
- 0154 Eyetoy Audio Device
- 015f IC Recorder (BM)
- 0169 Clie PEG-TJ35 PDA Serial
- 016a Clie PEG-TJ35 PDA Mass Storage
- 016b Mobile HDD
- 016d IC Recorder (SX)
- 016e DPP-EX50 Digital Photo Printer
- 0171 Fingerprint Sensor 3500
- 017e Net MD
- 017f Hi-MD WALKMAN
- 0180 Net MD
- 0181 Hi-MD WALKMAN
- 0182 Net MD
- 0183 Hi-MD WALKMAN
- 0184 Net MD
- 0185 Hi-MD WALKMAN
- 0186 Net MD
- 0187 Hi-MD WALKMAN
- 0188 Net MD
- 018a Net MD
- 018b Hi-MD SOUND GATE
- 019e Micro Vault 1.0G Mass Storage
- 01ad ATRAC HDD PA
- 01bd MRW62E Multi-Card Reader/Writer
- 01c3 NW-E55 Network Walkman
- 01c6 MEMORY P-AUDIO
- 01c7 Printing Support
- 01d0 DVD+RW External Drive DRU-700A
- 01d5 IC RECORDER
- 01de VRD-VC10 [Video Capture]
- 01e9 Net MD
- 01ea Hi-MD WALKMAN
- 01ee IC RECORDER
- 01fa Sony IC Recorder (P)
- 01fb NW-E405 Network Walkman
- 020f Device
- 0210 ATRAC HDD PA
- 0219 Net MD
- 021a Hi-MD WALKMAN
- 021b Net MD
- 021c Hi-MD WALKMAN
- 021d Net MD
- 0227 Printing Support
- 022c Net MD
- 022d Hi-MD AUDIO
- 0233 ATRAC HDD PA
- 0236 Mobile HDD
- 023b DVD+RW External Drive DRU-800UL
- 023c Net MD
- 023d Hi-MD WALKMAN
- 0243 MicroVault Flash Drive
- 0257 IFU-WLM2 USB Wireless LAN Module (Wireless Mode)
- 0258 IFU-WLM2 USB Wireless LAN Module (Memory Mode)
- 0259 IC RECORDER
- 0267 Tachikoma Device
- 0268 Batoh Device
- 0269 HDD WALKMAN
- 026a HDD WALKMAN
- 0271 IC Recorder (P)
- 027c NETWORK WALKMAN
- 027e SONY Communicator
- 027f IC RECORDER
- 0286 Net MD
- 0287 Hi-MD WALKMAN
- 029b PRS-500 eBook reader
- 02ae PlayStation 3 Memory Card Adaptor
- 02af Handycam DCR-DVD306E
- 02c4 Device
- 02d2 PSP
-054d Try Corp.
-054e Proside Corp.
-054f WYSE Technology Taiwan
-0550 Fuji Xerox Co., Ltd
- 0002 InkJet Color Printer
- 0004 InkJet Color Printer
- 0005 InkJet Color Printer
-0551 CompuTrend Systems, Inc.
-0552 Philips Monitors
-0553 STMicroelectronics Imaging Division (VLSI Vision)
- 0001 TerraCAM
- 0002 CPiA WebCam
- 0100 STV0672 Camera
- 0140 Video Camera
- 0150 CDE CAM 100
- 0151 Digital Blue QX5 Microscope
- 0200 Dual-mode Camera0
- 0201 Dual-mode Camera1
- 0202 Aiptek PenCam 1
- 0674 Multi-mode Camera
- 0679 NMS Video Camera (Webcam)
- 1002 Che-ez! Splash
-0554 Dictaphone Corp.
-0555 ANAM S&T Co., Ltd
-0556 Asahi Kasei Microsystems Co., Ltd
- 0001 AK5370 I/F A/D Converter
-0557 ATEN International Co., Ltd
- 2001 UC-1284 Printer Port
- 2002 10Mbps Ethernet [klsi]
- 2004 UC-100KM PS/2 Mouse and Keyboard adapter
- 2006 UC-1284B Printer Port
- 2007 UC-110T 100Mbps Ethernet [pegasus]
- 2008 UC-232A Serial Port [pl2303]
- 2009 UC-210T Ethernet
- 2202 CS124U Miniview II KVM Switch
- 2600 IDE Bridge
- 4000 DSB-650 10Mbps Ethernet [klsi]
- 7000 Hub
-0558 Truevision, Inc.
-0559 Cadence Design Systems, Inc.
-055a Kenwood USA
-055b KnowledgeTek, Inc.
-055c Proton Electronic Ind.
-055d Samsung Electro-Mechanics Co.
- 0001 Keyboard
- 0bb1 Bluetooth Device
- 1030 Optical Wheel Mouse (OMS3CB/OMGB30)
- 1031 Optical Wheel Mouse (OMA3CB/OMGI30)
- 1040 Mouse HID Device
- 1050 E-Mail Optical Wheel Mouse (OMS3CE)
- 1080 Optical Wheel Mouse (OMS3CH)
- 2020 Floppy Disk Drive
- 6780 Keyboard V1
- 6781 Keyboard Mouse
- 8001 E.M. Hub
- 9000 AnyCam [pwc]
- 9001 MPC-C30 AnyCam Premium for Notebooks [pwc]
- a010 WLAN Adapter(SWL-2300)
- a011 Boot Device
- a012 WLAN Adapter(SWL-2300)
- a013 WLAN Adapter(SWL-2350)
- a230 Boot Device
- b000 11Mbps WLAN Mini Adapter
- b230 Netopia 802.11b WLAN Adapter
- b231 LG Wireless LAN 11b Adapter
-055e CTX Opto-Electronics Corp.
-055f Mustek Systems, Inc.
- 0001 ScanExpress 1200 CU
- 0002 ScanExpress 600 CU
- 0003 ScanExpress 1200 USB
- 0006 ScanExpress 1200 UB
- 0007 ScanExpress 1200 USB Plus
- 0008 ScanExpress 1200 CU Plus
- 0010 BearPaw 1200F
- 0210 ScanExpress A3 USB
- 0218 BearPaw 2400 TA
- 0219 BearPaw 2400 TA Plus
- 021a BearPaw 2448 TA Plus
- 021c BearPaw 1200 CU Plus
- 021d BearPaw 2400 CU Plus
- 021e BearPaw 1200 TA/CS
- 021f SNAPSCAN e22
- 0400 BearPaw 2400 TA Pro
- 0401 P 3600 A3 Pro
- 0408 BearPaw 2448 CU Pro
- 0873 ScanExpress 600 USB
- 1000 BearPaw 4800 TA Pro
- a350 gSmart 350
- a800 MDC 800 Camera
- b500 MDC 3000 Camera
- c005 PC CAM 300A
- c200 gSmart 300
- c220 gSmart mini
- c360 Mustek DV 4000
- c420 gSmart mini 2
- c440 Mustek DV 3000
- c520 gSmart mini 3
- c530 Mustek Gsmart LCD 2
- c631 MDC-4000
- c650 Mustek MDC5500Z
- d001 WCam 300
- d003 PC CAM 300A
- d004 PC CAM 300A
-0560 Interface Corp.
-0561 Oasis Design, Inc.
-0562 Telex Communications, Inc.
- 0001 Enhanced Microphone
- 0002 Telex Microphone
-0563 Immersion Corp.
-0564 Chinon Industries, Inc.
-0565 Peracom Networks, Inc.
- 0001 Serial Port [etek]
- 0002 Enet Ethernet [klsi]
- 0003 @Home Networks Ethernet [klsi]
- 0005 Enet2 Ethernet [klsi]
- 0041 Peracom Remote NDIS Ethernet Adapter
-0566 Monterey International Corp.
- 0110 ViewMate Desktop Mouse CC2201
- 1001 ViewMate Desktop Mouse CC2201
- 1002 ViewMate Desktop Mouse CC2201
- 1003 ViewMate Desktop Mouse CC2201
- 1004 ViewMate Desktop Mouse CC2201
- 1005 ViewMate Desktop Mouse CC2201
- 1006 ViewMate Desktop Mouse CC2201
- 1007 ViewMate Desktop Mouse CC2201
- 2800 MIC K/B
- 2801 MIC K/B Mouse
- 2802 Kbd Hub
-0567 Xyratex International, Ltd
-0568 Quartz Ingenierie
-0569 SegaSoft
-056a Wacom Co., Ltd
- 0000 PenPartner
- 0001 PenPartner 4x5
- 0002 PenPartner 6x8
- 0010 Graphire
- 0011 Graphire 2
- 0013 Graphire 3 4x5
- 0020 Intuos 4x5
- 0021 Intuos 6x8
- 0022 Intuos 9x12
- 0023 Intuos 12x12
- 0024 Intuos 12x18
- 0030 PL400
- 0031 PL500
- 0032 PL600
- 0034 PL550
- 0035 PL800
- 0041 Intuos2 4x5
- 0042 Intuos 2 6x8
- 0043 Intuos 2
- 0044 Intuos2 12x12
- 0045 Intuos2 12x18
- 0400 PenPartner 4x5
- 4850 PenPartner 6x8
-056b Decicon, Inc.
-056c eTEK Labs
- 0006 KwikLink Host-Host Connector
- 8007 Kwik232 Serial Port
- 8100 KwikLink Host-Host Connector
- 8101 KwikLink USB-USB Bridge
-056d EIZO Corp.
- 0000 Hub
- 0001 Monitor
- 0002 HID Monitor Controls
- 0003 Device Bay Controller
-056e Elecom Co., Ltd
- 0002 29UO Mouse
- 200c LD-USB/TX
- 4002 Laneed 100Mbps Ethernet LD-USB/TX [pegasus]
- 4005 LD-USBL/TX
- 400b LD-USB/TX
- 4010 LD-USB20
- 5003 UC-SGT
- 5004 UC-SGT
- abc1 LD-USB/TX
-056f Korea Data Systems Co., Ltd
- cd00 CDM-751 CD organizer
-0570 Epson America
-0571 Interex, Inc.
- 0002 echoFX InterView Lite
-0572 Conexant Systems (Rockwell), Inc.
- 0001 Ezcam II WebCam
- 0002 Ezcam II WebCam
- 0040 Wondereye CP-115 WebCam
- 0041 WebCam Notebook
- 0042 WebCam Notebook
- 1232 V.90 modem
- 1234 Typhoon Redfun Modem V90 56k
- 1252 HCF V90 Data Fax Voice Modem
- 1253 Zoom V.92 Faxmodem
- 1300 SoftK56 Data Fax Voice CARP
- 1301 Modem Enumerator
- 2000 SoftGate 802.11 Adapter
- 2002 SoftGate 802.11 Adapter
- 8390 WinFast PalmTop/Novo TV Video
- 8392 WinFast PalmTop/Novo TV Video
- cafe AccessRunner ADSL Modem
- cb00 E-Tech ADSL Modem v2
- cb01 GeekADSL Promax Q31 ADSL Modem
- cb06 StarModem Network Interface
-0573 Zoran Co. Personal Media Division (Nogatech)
- 0003 USBGear USBG-V1
- 0400 D-Link V100
- 0600 Dazzle USBVision (1006)
- 1300 leadtek USBVision (1006)
- 2000 X10 va10a Wireless Camera
- 2001 Dazzle EmMe (2001)
- 2101 Zoran Co. PMD (Nogatech) AV-grabber Manhattan
- 2d00 Osprey 50
- 2d01 Hauppauge USB-Live Model 600
- 3000 Dazzle MicroCam (NTSC)
- 3001 Dazzle MicroCam (PAL)
- 4000 Nogatech TV! (NTSC)
- 4001 Nogatech TV! (PAL)
- 4002 Nogatech TV! (PAL-I-)
- 4003 Nogatech TV! (MF-)
- 4008 Nogatech TV! (NTSC) (T)
- 4009 Nogatech TV! (PAL) (T)
- 4010 Nogatech TV! (NTSC) (A)
- 4100 USB-TV FM (NTSC)
- 4110 PNY USB-TV (NTSC) FM
- 4400 Nogatech TV! Pro (NTSC)
- 4401 Nogatech TV! Pro (PAL)
- 4450 PixelView PlayTv-USB PRO (PAL) FM
- 4451 Nogatech TV! Pro (PAL+)
- 4452 Nogatech TV! Pro (PAL-I+)
- 4500 Nogatech TV! Pro (NTSC)
- 4501 Nogatech TV! Pro (PAL)
- 4550 ZTV ZT-721 2.4GHz USB A/V Receiver
- 4551 Dazzle TV! Pro Audio (P+)
- 4d00 Hauppauge WinTV-USB USA
- 4d01 Hauppauge WinTV-USB
- 4d02 Hauppauge WinTV-USB UK
- 4d03 Hauppauge WinTV-USB France
- 4d04 Hauppauge WinTV (PAL D/K)
- 4d10 Hauppauge WinTV-USB with FM USA radio
- 4d11 Hauppauge WinTV-USB (PAL) with FM radio
- 4d12 Hauppauge WinTV-USB UK with FM Radio
- 4d14 Hauppauge WinTV (PAL D/K FM)
- 4d20 Hauppauge WinTV-USB II (PAL) with FM radio
- 4d21 Hauppauge WinTV-USB II (PAL)
- 4d22 Hauppauge WinTV-USB II (PAL) Model 566
- 4d23 Hauppauge WinTV-USB France 4D23
- 4d24 Hauppauge WinTV Pro (PAL D/K)
- 4d25 Hauppauge WinTV-USB Model 40209 rev B234
- 4d26 Hauppauge WinTV-USB Model 40209 rev B243
- 4d27 Hauppauge WinTV-USB Model 40204 Rev B281
- 4d28 Hauppauge WinTV-USB Model 40204 rev B283
- 4d29 Hauppauge WinTV-USB Model 40205 rev B298
- 4d2a Hauppague WinTV-USB Model 602 Rev B285
- 4d2b Hauppague WinTV-USB Model 602 Rev B282
- 4d2c Hauppauge WinTV Pro (PAL/SECAM)
- 4d30 Hauppauge WinTV-USB FM Model 40211 Rev B123
- 4d31 Hauppauge WinTV-USB III (PAL) with FM radio Model 568
- 4d32 Hauppauge WinTV-USB III (PAL) FM Model 573
- 4d34 Hauppauge WinTV Pro (PAL D/K FM)
- 4d35 Hauppauge WinTV-USB III (PAL) FM Model 597
- 4d36 Hauppauge WinTV Pro (PAL B/G FM)
- 4d37 Hauppauge WinTV-USB Model 40219 rev E189
- 4d38 Hauppauge WinTV Pro (NTSC FM)
-0574 City University of Hong Kong
-0575 Philips Creative Display Solutions
-0576 BAFO/Quality Computer Accessories
-0577 ELSA
-0578 Intrinsix Corp.
-0579 GVC Corp.
-057a Samsung Electronics America
-057b Y-E Data, Inc.
- 0000 FlashBuster-U Floppy
- 0001 Tri-Media Reader Floppy
- 0006 Tri-Media Reader Card Reader
- 0010 Memory Stick Reader Writer
- 0020 HEXA Media Drive 6-in-1 Card Reader Writer
- 0030 Memory Card Viewer (TV)
-057c AVM GmbH
- 0b00 ISDN-Controller B1 Family
- 0c00 ISDN-Controller FRITZ!Card
- 1000 ISDN-Controller FRITZ!Card v2.0
- 1900 ISDN-Controller FRITZ!Card v2.1
- 2000 ISDN-Connector FRITZ!X
- 2200 BlueFRITZ!
- 2300 Teledat X130 DSL
- 2800 ISDN-Connector TA
- 3200 Teledat X130 DSL
- 3500 FRITZ!Card DSL SL
- 3701 FRITZ!Box SL
- 3702 FRITZ!Box
- 3800 BlueFRITZ! Bluetooth Stick
- 3a00 FRITZ!Box Fon
- 3c00 FRITZ!Box WLAN
- 3d00 Fritz!Box
- 3e01 FRITZ!Box (Annex A)
- 4001 FRITZ!Box Fon (Annex A)
- 4101 FRITZ!Box WLAN (Annex A)
- 4201 FRITZ!Box Fon WLAN (Annex A)
- 4601 Eumex 5520PC (WinXP/2000)
- 4602 Eumex 400 (WinXP/2000)
- 4701 AVM FRITZ!Box Fon ata
- 5401 Eumex 300 IP
- 5601 AVM FRITZ!WLAN Stick
- 6201 WLAN USB v1.1
- 62ff WLAN USB v1.1 [no firmware]
-057d Shark Multimedia, Inc.
-057e Nintendo Co., Ltd
- 0306 Wii Remote Controller RVL-003
-057f QuickShot, Ltd
- 6238 USB StrikePad
-0580 Denron, Inc.
-0581 Racal Data Group
-0582 Roland Corp.
- 0000 UA-100
- 0002 UM-4/MPU-64 MIDI Interface
- 0003 SoundCanvas SC-8850
- 0004 U-8
- 0005 Edirol UM-2 MIDI Adapter
- 0007 SoundCanvas SC-8820
- 0008 PC-300
- 0009 Edirol UM-1SX MIDI Adapter
- 000b SK-500
- 000c SC-D70
- 0010 EDIROL UA-5
- 0011 Edirol UA-5 Sound Capture
- 0012 XV-5050
- 0013 XV-5050
- 0014 EDIROL UM-880 MIDI I/F (native)
- 0015 EDIROL UM-880 MIDI I/F (generic)
- 0016 EDIROL SD-90
- 0017 EDIROL SD-90
- 001b MMP-2
- 001c MMP-2
- 001d V-SYNTH
- 001e V-SYNTH
- 0023 EDIROL UM-550
- 0024 EDIROL UM-550
- 0025 EDIROL UA-20
- 0026 EDIROL UA-20
- 0027 EDIROL SD-20
- 0028 EDIROL SD-20
- 0029 EDIROL SD-80
- 002a EDIROL SD-80
- 002b EDIROL UA-700
- 002c EDIROL UA-700
- 002d XV-2020 Synthesizer
- 002e XV-2020 Synthesizer
- 002f VariOS
- 0030 VariOS
- 0033 EDIROL PCR
- 0034 EDIROL PCR
- 0037 Digital Piano
- 0038 Digital Piano
- 003b BOSS GS-10
- 003c BOSS GS-10
- 0040 GI-20
- 0041 GI-20
- 0042 RS-70
- 0043 RS-70
- 0044 EDIROL UA-1000
- 0047 EDIROL UR-80 WAVE
- 0048 EDIROL UR-80 MIDI
- 0049 EDIROL UR-80 WAVE
- 004a EDIROL UR-80 MIDI
- 004b EDIROL M-100FX
- 004c EDIROL PCR-A WAVE
- 004d EDIROL PCR-A MIDI
- 004e EDIROL PCR-A WAVE
- 004f EDIROL PCR-A MIDI
- 0050 EDIROL UA-3FX
- 0052 EDIROL UM-1SX
- 0054 Digital Piano
- 0060 EXR Series
- 0064 EDIROL PCR-1 WAVE
- 0065 EDIROL PCR-1 MIDI
- 0066 EDIROL PCR-1 WAVE
- 0067 EDIROL PCR-1 MIDI
- 006a SP-606
- 006b SP-606
- 006d FANTOM-X
- 006e FANTOM-X
- 0073 EDIROL UA-25
- 0074 EDIROL UA-25
- 0075 BOSS DR-880
- 0076 BOSS DR-880
- 007a RD
- 007b RD
- 007d EDIROL UA-101
- 0080 G-70
- 0081 G-70
- 008b EDIROL PC-50
- 008c EDIROL PC-50
- 008d EDIROL UA-101 USB1
- 0092 EDIROL PC-80 WAVE
- 0093 EDIROL PC-80 MIDI
- 0096 EDIROL UA-1EX
- 009a EDIROL UM-3EX
- 009d EDIROL UM-1
- 00a2 Digital Piano
- 00a3 EDIROL UA-4FX
- 00a6 Juno-G
- 00ad SH-201
- 00c4 EDIROL M-16DX
-0583 Padix Co., Ltd (Rockfire)
- 2030 RM-203 USB Nest [mode 1]
- 2031 RM-203 USB Nest [mode 2]
- 2032 RM-203 USB Nest [mode 3]
- 2033 RM-203 USB Nest [mode 4]
- 2050 PX-205 PSX Bridge
- 3050 QF-305u Gamepad
- 688f QF-688uv Windstorm Pro Joystick
- 7070 QF-707u Bazooka Joystick
-0584 RATOC System, Inc.
- 0008 Fujifilm MemoryCard ReaderWriter
- b000 REX-USB60
-0585 FlashPoint Technology, Inc.
- 0001 Digital Camera
- 0002 Digital Camera
- 0003 Digital Camera
- 0004 Digital Camera
- 0005 Digital Camera
- 0006 Digital Camera
- 0007 Digital Camera
- 0008 Digital Camera
- 0009 Digital Camera
- 000a Digital Camera
- 000b Digital Camera
- 000c Digital Camera
- 000d Digital Camera
- 000e Digital Camera
- 000f Digital Camera
-0586 ZyXEL Communications Corp.
- 1000 Omni NET Modem / ISDN TA
- 1500 Omni 56K Plus
- 2011 Scorpion-980N keyboard
- 3304 LAN Modem
- 330a ADSL Modem Interface
- 330e USB Broadband ADSL Modem Rev 1.10
- 3400 ZyAIR B-220 IEEE 802.11b Adapter
- 3401 ZyAIR G-220
- 3402 (ZD1211)IEEE 802.11b+g Adapter
- 3407 G-200 v2
- 3409 AG-225H
- 340a M-202
- 340f G-220 v2
- 3410 Wi-Fi Wireless LAN Adapter
- 3412 Wi-Fi Wireless LAN Adapter
- 3413 AG-225H v2 802.11a/g Wi-Fi Finder & Adapter
- 3415 G-210H 802.11g Wireless Adapter
-0587 America Kotobuki Electronics Industries, Inc.
-0588 Sapien Design
-0589 Victron
-058a Nohau Corp.
-058b Infineon Technologies
-058c In Focus Systems
- 0007 Flash
- 0008 LP130
- 000a LP530
- 0010 Projector
- 0011 Projector
- 0012 Projector
- 0013 Projector
- 0014 Projector
- 0015 Projector
- 0016 Projector
- 0017 Projector
- 0018 Projector
- 0019 Projector
- 001a Projector
- 001b Projector
- 001c Projector
- 001d Projector
- 001e Projector
- 001f Projector
-058d Micrel Semiconductor
-058e Tripath Technology, Inc.
-058f Alcor Micro Corp.
- 2412 SCard R/W CSR-145
- 2802 Monterey Keyboard
- 5492 Hub
- 6232 Hi-Speed 16-in-1 Flash Card Reader/Writer
- 6360 Multimedia Card Reader
- 6361 Multimedia Card Reader
- 6362 Hi-Speed 21-in-1 Flash Card Reader/Writer (Internal/External)
- 6377 Multimedia Card Reader
- 6386 Memory Card
- 6387 Transcend JetFlash Flash Drive
- 6390 USB 2.0-IDE bridge
- 9213 MacAlly Kbd Hub
- 9215 AU9814 Hub
- 9254 Hub
- 9310 Mass Storage (UID4/5A & UID7A)
- 9320 Micro Storage Driver for Win98
- 9321 Micro Storage Driver for Win98
- 9330 SD Reader
- 9331 Micro Storage Driver for Win98
- 9340 Delkin eFilm Reader-32
- 9350 Delkin eFilm Reader-32
- 9360 8-in-1 Media Card Reader
- 9361 Multimedia Card Reader
- 9368 Multimedia Card Reader
- 9380 Flash drive
- 9382 Acer/Sweex Flash drive
- 9410 Keyboard
- 9472 Keyboard Hub
- 9510 ChunghwaTL USB02 Smartcard Reader
- 9520 EMV Certified Smart Card Reader
- 9720 USB-Serial Adapter
-0590 Omron Corp.
- 0004 Cable Modem
- 000b MR56SVS
- 0028 HJ-720IT Pedometer
-0591 Questra Consulting
-0592 Powerware Corp.
- 0002 UPS (X-Slot)
-0593 Incite
-0594 Princeton Graphic Systems
-0595 Zoran Microelectronics, Ltd
- 1001 Digitrex DSC-1300/DSC-2100 (mass storage mode)
- 4343 Digital Camera EX-20 DSC
-0596 MicroTouch Systems, Inc.
- 0001 Touchscreen
- 0002 Touch Screen Controller
-0597 Trisignal Communications
-0598 Niigata Canotec Co., Inc.
-0599 Brilliance Semiconductor, Inc.
-059a Spectrum Signal Processing, Inc.
-059b Iomega Corp.
- 0001 Zip 100 (Type 1)
- 000b Zip 100 (Type 2)
- 0021 Win98 Disk Controller
- 0030 Zip 250 (Ver 1)
- 0031 Zip 100 (Type 3)
- 0032 Zip 250 (Ver 2)
- 0034 Zip 100 Driver
- 0037 Zip 750 MB
- 0040 SCSI Bridge
- 0042 Rev 70 GB
- 0050 Zip CD 650 Writer
- 0053 CDRW55292EXT CD-RW External Drive
- 0057 Mass Storage Device
- 005d Mass Storage Device
- 005f Mass Storage Device
- 0060 PCMCIA PocketZip Dock
- 0061 Varo PocketZip 40 MP3 Player
- 006d HipZip MP3 Player
- 007c Ultra Max USB/1394
- 00db FotoShow Zip 250 Driver
- 0150 Mass Storage Device
- 015d Super DVD Writer
- 0173 Hi-Speed USB-to-IDE Bridge Controller
- 0174 Hi-Speed USB-to-IDE Bridge Controller
- 0176 Hi-Speed USB-to-IDE Bridge Controller
- 0177 Hi-Speed USB-to-IDE Bridge Controller
- 0178 Hi-Speed USB-to-IDE Bridge Controller
- 0179 Hi-Speed USB-to-IDE Bridge Controller
- 017a HDD
- 017b HDD/1394A
- 017c HDD/1394B
- 0251 Optical
- 0252 Optical
- 1052 DVD+RW External Drive
-059c A-Trend Technology Co., Ltd
-059d Advanced Input Devices
-059e Intelligent Instrumentation
-059f LaCie, Ltd
- 0201 StudioDrive USB2
- 0202 StudioDrive USB2
- 0203 StudioDrive USB2
- 0211 PocketDrive
- 0212 PocketDrive
- 0213 PocketDrive USB2
- 0323 LaCie d2 Drive USB2
- 0641 Mobile Hard Drive
- 1010 Desktop Hard Drive
- a601 HardDrive
- a602 CD R/W
-05a0 Vetronix Corp.
-05a1 USC Corp.
-05a2 Fuji Film Microdevices Co., Ltd
-05a3 ARC International
-05a4 Ortek Technology, Inc.
- 9720 Keyboard Mouse
- 9722 Keyboard
- 9731 MCK-600W/MCK-800USB Keyboard
-05a5 Sampo Technology Corp.
-05a6 Cisco Systems, Inc.
- 0001 CVA124 Cable Voice Adapter (WDM)
- 0002 CVA122 Cable Voice Adapter (WDM)
- 0003 CVA124E Cable Voice Adapter (WDM)
- 0004 CVA122E Cable Voice Adapter (WDM)
-05a7 Bose Corp.
-05a8 Spacetec IMC Corp.
-05a9 OmniVision Technologies, Inc.
- 0511 OV511 WebCam
- 0518 OV518 WebCam
- 0519 OV519 Microphone
- 1550 VEHO Filmscanner
- 2800 SuperCAM
- 4519 Webcam Classic
- 8519 OV519 WebCam
- a511 OV511+ WebCam
- a518 D-Link DSB-C310 WebCam
-05aa Utilux South China, Ltd
-05ab In-System Design
- 0002 Parallel Port
- 0030 Storage Adapter V2 (TPP)
- 0031 ATA Bridge
- 0060 USB 2.0 ATA Bridge
- 0061 Storage Adapter V3 (TPP-I)
- 0101 Storage Adapter (TPP)
- 0130 Compact Flash and Microdrive Reader (TPP)
- 0200 USS725 ATA Bridge
- 0201 Storage Adapter (TPP)
- 0202 ATA Bridge
- 0300 Portable Hard Drive (TPP)
- 0301 Portable Hard Drive V2
- 0350 Portable Hard Drive (TPP)
- 0351 Portable Hard Drive V2
- 081a ATA Bridge
- 0cda ATA Bridge for CD-R/RW
- 1001 BAYI Printer Class Support
- 5700 Storage Adapter V2 (TPP)
- 5701 USB Storage Adapter V2
- 5901 Smart Board (TPP)
- 5a01 ATI Storage Adapter (TPP)
- 5d01 DataBook Adapter (TPP)
-05ac Apple, Inc.
- 0201 USB Keyboard [Alps or Logitech, M2452]
- 0202 Keyboard [ALPS]
- 0205 Extended Keyboard [Mitsumi]
- 0206 Extended Keyboard [Mitsumi]
- 020b Pro Keyboard [Mitsumi, A1048/US layout]
- 020c Extended Keyboard [Mitsumi]
- 020d Pro Keyboard [Mitsumi, A1048/JIS layout]
- 020e Internal Keyboard/Trackpad
- 020f Internal Keyboard/Trackpad
- 021b Internal Keyboard/Trackpad
- 0220 Aluminum Keyboard
- 0221 Keyboard (Aluminium) (ISO)
- 0229 Internal Keyboard/Trackpad (MacBook Pro) (ANSI)
- 022a Internal Keyboard/Trackpad (MacBook Pro) (ISO)
- 022b Internal Keyboard/Trackpad (MacBook Pro) (JIS)
- 0301 USB Mouse [Mitsumi, M4848]
- 0302 Optical Mouse [Fujitsu]
- 0304 Optical USB Mouse [Mitsumi]
- 0306 Optical USB Mouse [Fujitsu]
- 1000 Bluetooth HCI MacBookPro (HID mode)
- 1001 Keyboard Hub [ALPS]
- 1002 Extended Keyboard Hub [Mitsumi]
- 1003 Hub in Pro Keyboard [Mitsumi, A1048]
- 1006 Hub in Aluminum Keyboard
- 1101 Speakers
- 1201 3G iPod
- 1202 iPod 2G
- 1203 iPod 4.Gen Grayscale 40G
- 1204 iPod [Photo]
- 1205 iPod Mini 1.Gen/2.Gen
- 1206 iPod '06'
- 1207 iPod '07'
- 1208 iPod '08'
- 1209 iPod Video
- 120a iPod Nano
- 1260 iPod Nano 2.Gen
- 1261 iPod Classic
- 1300 iPod Shuffle
- 1301 iPod Shuffle 2.Gen
- 8202 HCF V.90 Data/Fax Modem
- 8203 Bluetooth HCI
- 8204 Bluetooth HCI [Bluetooth 2.0 + EDR, build-in]
- 8205 Bluetooth HCI MacBookPro
- 8206 Bluetooth USB Host Controller
- 8240 IR Receiver [build-in]
- 8300 Built-in iSight (no firmware loaded)
- 8501 Built-in iSight [Micron]
- 912f Hub in 30" Cinema Display
- 9221 30" Cinema Display
- ffff Bluetooth in DFU mode - Driver
-05ad Y.C. Cable U.S.A., Inc.
-05ae Synopsys, Inc.
-05af Jing-Mold Enterprise Co., Ltd
- 0821 IDE to
- 9167 KB 9151B - 678
- 9267 KB 9251B - 678 Mouse
-05b0 Fountain Technologies, Inc.
-05b1 First International Computer, Inc.
- 1389 Bluetooth Wireless Adapter
-05b4 LG Semicon Co., Ltd
- 4857 M-Any DAH-210
- 6001 Digisette DUO-MP3 AR-100
-05b5 Dialogic Corp.
-05b6 Proxima Corp.
-05b7 Medianix Semiconductor, Inc.
-05b8 Agiler, Inc.
- 3002 Scroll Mouse
-05b9 Philips Research Laboratories
-05ba DigitalPersona, Inc.
-05bb Grey Cell Systems
-05bc 3G Green Green Globe Co., Ltd
- 0004 Trackball
-05bd RAFI GmbH & Co. KG
-05be Tyco Electronics (Raychem)
-05bf S & S Research
-05c0 Keil Software
-05c1 Kawasaki Microelectronics, Inc.
-05c2 Media Phonics (Suisse) S.A.
-05c5 Digi International, Inc.
- 0002 AccelePort USB 2
- 0004 AccelePort USB 4
- 0008 AccelePort USB 8
-05c6 Qualcomm, Inc.
- 3100 CDMA Wireless Modem/Phone
- 3196 CDMA Wireless Modem
- 3197 CDMA Wireless Modem/Phone
-05c7 Qtronix Corp.
- 0113 PC Line Mouse
- 1001 Lynx Mouse
- 2001 Keyboard
- 2011 SCorpius Keyboard
- 6001 Ten-Keypad
-05c8 Cheng Uei Precision Industry Co., Ltd (Foxlink)
-05c9 Semtech Corp.
-05ca Ricoh Co., Ltd
- 0101 RDC-5300 Camera
- 0325 Caplio GX (ptp)
- 032d Caplio GX 8 (ptp)
- 032f Caplio R3 (ptp)
- 03a1 IS200e
- 0403 Printing Support
- 0405 Type 101
- 0406 Type 102
- 1830 Visual Communication Camera VGP-VCC2
- 1835 Visual Communication Camera VGP-VCC5
- 1870 Webcam 1000
- 2201 RDC-7 Camera
- 2202 Caplio RR30
- 2203 Caplio 300G
- 2204 Caplio G3
- 2205 Caplio RR30 / Medion MD 6126 Camera
- 2206 Konica DG-3Z
- 2207 Caplio Pro G3
- 2208 Caplio G4
- 2209 Caplio 400G wide
- 220a KONICA MINOLTA DG-4Wide
- 220b Caplio RX
- 220c Caplio GX
- 220d Caplio R1/RZ1
- 220e Sea & Sea 5000G
- 220f Rollei dr5 / Rollei dr5 (PTP mode)
- 2211 Caplio R1S
- 2212 Caplio R1v Camera
- 2213 Caplio R2
- 2214 Caplio GX 8
- 2215 DSC 725
- 2216 Caplio R3
- 2222 RDC-i500
-05cb PowerVision Technologies, Inc.
- 1483 PV8630 interface (scanners, webcams)
-05cc ELSA AG
- 2100 MicroLink ISDN Office
- 2219 MicroLink ISDN
- 2265 MicroLink 56k
- 2267 MicroLink 56k (V.250)
- 2280 MicroLink 56k Fun
- 3000 Micolink USB2Ethernet [pegasus]
- 3100 AirLancer USB-11
- 3363 MicroLink ADSL Fun
-05cd Silicom, Ltd
-05ce sci-worx GmbH
-05cf Sung Forn Co., Ltd
-05d0 GE Medical Systems Lunar
-05d1 Brainboxes, Ltd
- 0003 Bluetooth Adapter BL-554
-05d2 Wave Systems Corp.
-05d3 Tohoku Ricoh Co., Ltd
-05d5 Super Gate Technology Co., Ltd
-05d6 Philips Semiconductors, CICT
-05d7 Thomas & Betts Corp.
- 0099 10Mbps Ethernet [klsi]
-05d8 Ultima Electronics Corp.
- 4001 Artec Ultima 2000
- 4002 Artec Ultima 2000 (GT6801 based)/Lifetec LT9385/ScanMagic 1200 UB Plus Scanner
- 4003 Artec E+ 48U
- 4004 Artec E+ Pro
- 4005 MEM48U
- 4006 TRUST EASY WEBSCAN 19200
- 4007 TRUST 240H EASY WEBSCAN GOLD
- 4008 Trust Easy Webscan 19200
- 4009 Umax Astraslim
- 4013 IT Scan 1200
- 8105 Artec T1 USB TVBOX (cold)
- 8106 Artec T1 USB TVBOX (warm)
- 8107 Artec T1 USB TVBOX with AN2235 (cold)
- 8108 Artec T1 USB TVBOX with AN2235 (warm)
- 8109 Artec T1 USB2.0 TVBOX (cold
-05d9 Axiohm Transaction Solutions
- a225 A225 Printer
- a758 A758 Printer
- a794 A794 Printer
-05da Microtek International, Inc.
- 0091 ScanMaker X6u
- 0093 ScanMaker V6USL
- 0094 Phantom 336CX/C3
- 0099 ScanMaker X6/X6U
- 009a Phantom C6
- 00a0 Phantom 336CX/C3 (#2)
- 00a3 ScanMaker V6USL
- 00ac ScanMaker V6UL
- 00b6 ScanMaker V6UPL
- 00ef ScanMaker V6UPL
- 1006 Jenoptik JD350 entrance
- 1011 NHJ Che-ez! Kiss Digital Camera
- 1018 Digital Dream Enigma 1.3
- 1020 Digital Dream l'espion xtra
- 1025 Take-it Still Camera Device
- 1026 Take-it
- 1043 Take-It 1300 DSC Bulk Driver
- 1045 Take-it D1
- 1047 Take-it Camera Composite Device
- 1048 Take-it Q3
- 1049 3M Still Camera Device
- 1051 Camcorder Series
- 1052 Mass Storage Device
- 1053 Take-it DV Composite Device
- 1054 Mass Storage Device
- 1055 Digital Camera Series(536)
- 1056 Mass Storage Device
- 1057 Take-it DSC Camera Device(536)
- 1058 Mass Storage Device
- 1059 Camcorder DSC Series
- 1060 Microtek Take-it MV500
- 2007 ArtixScan DI 1210
- 200c 1394_USB2 Scanner
- 200e ArtixScan DI 810
- 2017 UF ICE Scanner
- 201c 4800 Scanner
- 201d ArtixScan DI 1610
- 201f 4800 Scanner-ICE
- 202e ArtixScan DI 2020
- 208b ScanMaker 6800
- 208f ArtixScan DI 2010
- 209e ScanMaker 4700LP
- 20a7 ScanMaker 5600
- 20b0 ScanMaker X12USL
- 20b1 ScanMaker 8700
- 20b4 ScanMaker 4700
- 20bd ScanMaker 5700
- 20c9 ScanMaker 6700
- 20d2 Microtek ArtixScan 1800f
- 20d6 PS4000
- 20de ScanMaker 9800XL
- 20e0 ScanMaker 9700XL
- 20ed ScanMaker 4700
- 20ee Micortek ScanMaker X12USL
- 3008 Scanner
- 300a 4800 ICE Scanner
- 300b 4800 Scanner
- 300f MiniScan C5
- 3020 4800dpi Scanner
- 3021 1200dpi Scanner
- 3022 Scanner 4800dpi
- 3023 USB1200II Scanner
- 30c1 USB600 Scanner
- 30ce ScanMaker 3800
- 30cf ScanMaker 4800
- 30d4 USB1200 Scanner
- 30d8 Scanner
- 30d9 USB2400 Scanner
- 30e4 ScanMaker 4100
- 30e5 USB3200 Scanner
- 30e6 ScanMaker i320
- 40b3 ScanMaker 3600
- 40b8 ScanMaker 3700
- 40c7 ScanMaker 4600
- 40ca ScanMaker 3600
- 40cb ScanMaker 3700
- 40dd ScanMaker 3750i
- 40ff ScanMaker 3600
- 5003 Goya
- 5013 3200 Scanner
- 80a3 ScanMaker V6USL (#2)
- 80ac ScanMaker V6UL/SpicyU
-05db Sun Corp. (Suntac?)
- 0003 SUNTAC U-Cable type D2
- 0005 SUNTAC U-Cable type P1
- 0009 SUNTAC Slipper U
- 000a SUNTAC Ir-Trinity
- 000b SUNTAC U-Cable type A3
- 0011 SUNTAC U-Cable type A4
-05dc Lexar Media, Inc.
- 0001 jumpSHOT CompactFlash Reader
- 0002 JumpShot
- 0003 JumpShot
- 0080 Jumpdrive Secure 64MB
- 0081 RBC Compact Flash Drive
- 00a7 JumpDrive Impact
- 0100 JumpDrive PRO
- 0200 JumpDrive 2.0 Pro
- 0300 Jumpdrive Geysr
- 0301 JumpDrive Classic
- 0302 JD Micro
- 0303 JD Micro Pro
- 0304 JD Secure II
- 0310 JumpDrive
- 0311 JumpDrive Classic
- 0312 JD Micro
- 0313 JD Micro Pro
- 0320 JumpDrive
- 0321 JD Micro
- 0322 JD Micro Pro
- 0323 UFC
- 0330 JumpDrive Expression
- 0340 JumpDrive TAD
- 0350 Express Card
- 0400 UFDC
- 0401 UFDC
- 0403 Locked B Device
- 0405 Locked C Device
- 0407 Locked D Device
- 0409 Locked E Device
- 040b Locked F Device
- 040d Locked G Device
- 040f Locked H Device
- 0410 JumpDrive
- 0411 JumpDrive
- 0413 Locked J Device
- 0415 Locked K Device
- 0417 Locked L Device
- 0419 Locked M Device
- 041b Locked N Device
- 041d Locked O Device
- 041f Locked P Device
- 0420 JumpDrive
- 0421 JumpDrive
- 0423 Locked R Device
- 0425 Locked S Device
- 0427 Locked T Device
- 0429 Locked U Device
- 042b Locked V Device
- 042d Locked W Device
- 042f Locked X Device
- 0431 Locked Y Device
- 0433 Locked Z Device
- 4d02 MP3 Player
- 4d12 MP3 Player
- a300 JumpDrive2
- a400 JumpDrive trade; Pro 40-501
- a410 JumpDrive 128MB/256MB
- a411 JumpDrive Traveler
- a420 JumpDrive Pro
- a421 JumpDrive Pro II
- a422 JumpDrive Micro Pro
- a430 JumpDrive Secure
- a431 JumpDrive Secure II
- a432 JumpDrive Classic
- a440 JumpDrive Lightning
- a450 JumpDrive TouchGuard
- a460 JD Mercury
- a501 JumpDrive Classic
- a510 JumpDrive Sport
- a530 JumpDrive Expression
- a531 JumpDrive Secure II
- a560 JumpDrive FireFly
- a701 JumpDrive FireFly
- b002 USB CF Reader
- b018 Multi-Card Reader
-05dd Delta Electronics, Inc.
- ff31 AWU-120
- ff32 FriendlyNET AeroLAN AL2011
- ff35 PCW 100 - Wireless 802.11b Adapter
- ff91 2Wire PC Port Phoneline 10Mbps Adapter
-05df Silicon Vision, Inc.
-05e0 Symbol Technologies
- 0700 Bar Code Scanner (CS1504)
- 0800 Spectrum24 Wireless LAN Adapter
- 1200 DS6608 Bar Code Scanner
- 1900 SNAPI Imaging Device
- 2000 MC3090 Rugged Mobile Computer
- 200d MC70 Rugged Mobile Computer
-05e1 Syntek Semiconductor Co., Ltd
- 0500 DC-112X
- 0501 WebCam, Chipset DC-1125 similar to 174f:a311 - Asus F2F, F2J, F3J, F3T, G1, Z53JA
- 0890 STK011 Camera
- 0892 STK013 Camera
- 0895 STK016 Camera
- 0896 STK017 Camera
-05e2 ElecVision, Inc.
-05e3 Genesys Logic, Inc.
- 000a Keyboard with PS/2 Port
- 000b Mouse
- 0100 Nintendo Game Boy Advance SP
- 0120 Pacific Image Electronics PrimeFilm 1800u slide/negative scanner
- 0131 CF/SM Reader/Writer
- 0142 Multiple Slides Scanner-3600
- 0143 Multiple Frames Film Scanner-36series
- 0180 Plustek Scanner
- 0182 Wize Media 1000
- 0189 ScanJet 4600 series
- 018a Xerox 6400
- 0300 GLUSB98PT Parallel Port
- 0301 USB2LPT Cable Release2
- 0406 Hub
- 0501 GL620USB Host-Host interface
- 0502 GL620USB GeneLink USB-USB Bridge
- 0504 HID Keyboard Filter
- 0604 USB 1.1 Hub
- 0605 USB 2.0 Hub [ednet]
- 0606 USB 2.0 Hub / D-Link DUB-H4 USB 2.0 Hub
- 0608 USB-2.0 4-Port HUB
- 0660 USB 2.0 Hub
- 0700 SIIG US2256 CompactFlash Card Reader
- 0701 USB 2.0 IDE Adapter
- 0702 USB 2.0 IDE Adapter
- 0703 Card Reader
- 0704 Card Reader
- 0705 Card Reader
- 0706 Card Reader
- 0707 Card Reader
- 0708 Card Reader
- 0709 Card Reader
- 070a Pen Flash
- 070b DMHS1B Rev 3 DFU Adapter
- 070e X-PRO CR20xA USB 2.0 Internal Card Reader
- 070f Pen Flash
- 0710 USB 2.0 33-in-1 Card Reader
- 0711 Card Reader
- 0712 Delkin Mass Storage Device
- 0715 USB 2.0 microSD Reader
- 0760 USB 2.0 Card Reader/Writer
- 0761 Genesys Mass Storage Device
- 0780 USBFS DFU Adapter
- 07a0 Pen Flash
- 0927 Card Reader
- 1205 Afilias Optical Mouse H3003
- a700 Pen Flash
- f102 VX7012 TV Box
- f103 VX7012 TV Box
- f104 VX7012 TV Box
- fd21 3M TL20 Temperature Logger
- fe00 Razer Mouse
-05e4 Red Wing Corp.
-05e5 Fuji Electric Co., Ltd
-05e6 Keithley Instruments
-05e8 ICC, Inc.
-05e9 Kawasaki LSI
- 0008 KL5KUSB101B Ethernet [klsi]
- 0009 Sony 10Mbps Ethernet [pegasus]
- 000c USB-to-RS-232
- 000d USB-to-RS-232
- 0014 RS-232 J104
- 0040 Ethernet Adapter
- 2008 Ethernet Adapter
-05eb FFC, Ltd
-05ec COM21, Inc.
-05ee Cytechinfo Inc.
-05ef AVB, Inc. [anko?]
- 020a Top Shot Pegasus Joystick
- 8884 Mag Turbo Force Wheel
- 8888 Top Shot Force Feedback Racing Wheel
-05f0 Canopus Co., Ltd
- 0101 DA-Port DAC
-05f1 Compass Communications
-05f2 Dexin Corp., Ltd
- 0010 AQ Mouse
-05f3 PI Engineering, Inc.
- 0007 Kinesis Advantage PRO MPC/USB Keyboard
- 0081 Kinesis Integrated Hub
- 020b PS2 Adapter
- 0232 X-Keys Switch Interface, Programming Mode
- 0261 X-Keys Switch Interface, SPLAT Mode
- 0264 X-Keys Switch Interface, Composite Mode
-05f5 Unixtar Technology, Inc.
-05f6 AOC International
-05f7 RFC Distribution(s) PTE, Ltd
-05f9 PSC Scanning, Inc.
-05fa Siemens Telecommunications Systems, Ltd
- 3301 Keyboard with PS/2 Mouse Port
- 3302 Keyboard
- 3303 Keyboard with PS/2 Mouse Port
-05fc Harman Multimedia
- 7849 Harman/Kardon SoundSticks
-05fd InterAct, Inc.
- 0239 SV-239 HammerHead Digital
- 0251 Raider Pro
- 0253 ProPad 8 Digital
- 0286 SV-286 Cyclone Digital
- 262a 3dfx HammerHead FX
- 262f HammerHead Fx
- daae Game Shark
-05fe Chic Technology Corp.
- 0001 Mouse
- 0003 Cypress USB Mouse
- 0005 Viewmaster 4D Browser Mouse
- 0007 Twinhead Mouse
- 0009 Inland Pro 4500/5000 Mouse
- 0011 Browser Mouse
- 1010 Optical Wireless
-05ff LeCroy Corp.
-0600 Barco Display Systems
-0601 Jazz Hipster Corp.
- 0003 Internet Security Co., Ltd. SecureKey
-0602 Vista Imaging, Inc.
- 1001 ViCam WebCam
-0603 Novatek Microelectronics Corp.
- 00f1 Keyboard
- 6871 Mouse
-0604 Jean Co., Ltd
-0605 Anchor C&C Co., Ltd
-0606 Royal Information Electronics Co., Ltd
-0607 Bridge Information Co., Ltd
-0608 Genrad Ads
-0609 SMK Manufacturing, Inc.
- 031d eHome Infrared Receiver
- 0322 eHome Infrared Receiver
- ff12 SMK Bluetooth Device
-060a Worthington Data Solutions, Inc.
-060b Solid Year
- 0001 MacAlly Keyboard
- 1006 Japanese Keyboard - 260U
- 2101 Keyboard
- 5811 ACK-571U Wireless Keyboard
- 5903 Japanese Keyboard - 595U
- 6001 SolidTek USB 2p HUB
- 6002 SolidTek USB Keyboard
- 6003 Japanese Keyboard - 600HM
- a001 Maxwell Compact Pc PM3
-060c EEH Datalink GmbH
-060d Auctor Corp.
-060e Transmonde Technologies, Inc.
-060f Joinsoon Electronics Mfg. Co., Ltd
-0610 Costar Electronics, Inc.
-0611 Totoku Electric Co., Ltd
-0613 TransAct Technologies, Inc.
-0614 Bio-Rad Laboratories
-0615 Quabbin Wire & Cable Co., Inc.
-0616 Future Techno Designs PVT, Ltd
-0617 Swiss Federal Insitute of Technology
-0618 MacAlly
- 0101 Mouse
-0619 Seiko Instruments, Inc.
- 0101 SLP-100 Driver
- 0102 SLP-200 Driver
- 0103 SLP-100N Driver
- 0104 SLP-200N Driver
- 0105 SLP-240 Driver
-061a Veridicom International, Inc.
- 0110 5thSense Fingerprint Sensor
- 0200 FPS200 Fingerprint Sensor
- 8200 VKI-A Fingerprint Sensor/Flash Storage (dumb)
- 9200 VKI-B Fingerprint Sensor/Flash Storage (smart)
-061b Promptus Communications, Inc.
-061c Act Labs, Ltd
-061d Quatech, Inc.
-061e Nissei Electric Co.
- 0001 nissei 128DE-USB -
- 0010 nissei 128DE-PNA -
-0620 Alaris, Inc.
- 0004 QuickVideo weeCam
- 0007 QuickVideo weeCam
- 000a QuickVideo weeCam
- 000b QuickVideo weeCam
-0621 ODU-Steckverbindungssysteme GmbH & Co. KG
-0622 Iotech, Inc.
-0623 Littelfuse, Inc.
-0624 Avocent Corp.
-0625 TiMedia Technology Co., Ltd
-0626 Nippon Systems Development Co., Ltd
-0627 Adomax Technology Co., Ltd
-0628 Tasking Software, Inc.
-0629 Zida Technologies, Ltd
-062a Creative Labs
- 0000 Optical mouse
- 0001 Notebook Optical Mouse
- 0201 Defender Office Keyboard (K7310) S Zodiak KM-9010
- 9003 VoIP Conference Hub (A16GH)
- 9004 USR9602 USB Internet Mini Phone
-062b Greatlink Electronics Taiwan, Ltd
-062c Institute for Information Industry
-062d Taiwan Tai-Hao Enterprises Co., Ltd
-062e Mainsuper Enterprises Co., Ltd
-062f Sin Sheng Terminal & Machine, Inc.
-0631 JUJO Electronics Corp.
-0633 Cyrix Corp.
-0634 Micron Technology, Inc.
-0635 Methode Electronics, Inc.
-0636 Sierra Imaging, Inc.
- 0003 Vivicam 35Xx
-0638 Avision, Inc.
- 0268 iVina 1200U Scanner
- 026a Minolta Dimage Scan Dual II
- 0a10 iVina FB1600/UMAX Astra 4500
- 0a13 AV600U
- 0a16 SC-215
- 0a30 UMAX Astra 6700 Scanner
- 0a41 Avision AM3000/MF3000 Series
- 0f01 fi-4010CU
- 4004 Minolta Dimage Scan Elite II
-0639 Chrontel, Inc.
-063a Techwin Corp.
-063b Taugagreining HF
-063c Yamaichi Electronics Co., Ltd (Sakura)
-063d Fong Kai Industrial Co., Ltd
-063e RealMedia Technology, Inc.
-063f New Technology Cable, Ltd
-0640 Hitex Development Tools
-0641 Woods Industries, Inc.
-0642 VIA Medical Corp.
-0644 TEAC Corp.
- 0000 Floppy
- 1000 CD-ROM Drive
- 800d TASCAM Portastudio DP-01FX
- d001 CD-R/RW Unit
- d002 CD-R/RW Unit
- d010 CD-RW/DVD Unit
-0645 Who? Vision Systems, Inc.
-0646 UMAX
-0647 Acton Research Corp.
- 0100 ARC SpectraPro UV/VIS/IR Monochromator/Spectrograph
- 0101 ARC AM-VM Mono Airpath/Vacuum Monochromator/Spectrograph
- 0102 ARC Inspectrum Mono
- 0103 ARC Filterwheel
- 03e9 Inspectrum 128x1024 F VIS Spectrograph
- 03ea Inspectrum 256x1024 F VIS Spectrograph
- 03eb Inspectrum 128x1024 B VIS Spectrograph
- 03ec Inspectrum 256x1024 B VIS Spectrograph
-0648 Inside Out Networks
-0649 Weli Science Co., Ltd
-064b White Mountain DSP, Inc.
-064c Ji-Haw Industrial Co., Ltd
-064d TriTech Microelectronics, Ltd
-064e Suyin Corp.
-064f WIBU-Systems AG
- 0bd7 BOX/U
- 0bd8 BOX/RU
-0650 Dynapro Systems
-0651 Likom Technology Sdn. Bhd.
-0652 Stargate Solutions, Inc.
-0653 CNF, Inc.
-0654 Granite Microsystems, Inc.
- 0005 Device Bay Controller
- 0006 Hub
- 0007 Device Bay Controller
- 0016 Hub
-0655 Space Shuttle Hi-Tech Co., Ltd
-0656 Glory Mark Electronic, Ltd
-0657 Tekcon Electronics Corp.
-0658 Sigma Designs, Inc.
-0659 Aethra
-065a Optoelectronics Co., Ltd
- 0001 Barcode scanner
-065b Tracewell Systems
-065e Silicon Graphics
-065f Good Way Technology Co., Ltd & GWC technology Inc.
-0660 TSAY-E (BVI) International, Inc.
-0661 Hamamatsu Photonics K.K.
-0662 Kansai Electric Co., Ltd
-0663 Topmax Electronic Co., Ltd
- 0103 CobraPad
-0667 Aiwa Co., Ltd
- 0fa1 TD-U8000 Tape Drive
-0668 WordWand
-0669 Oce' Printing Systems GmbH
-066a Total Technologies, Ltd
-066b Linksys, Inc.
- 0105 SCM eUSB SmartMedia Card Reader
- 010a Melco MCR-U2 SmartMedia / CompactFlash Reader
- 200c USB10TX
- 2202 USB10TX Ethernet [pegasus]
- 2203 USB100TX Ethernet [pegasus]
- 2204 USB100TX HomePNA Ethernet [pegasus]
- 2206 USB Ethernet [pegasus]
- 2207 HomeLink Phoneline 10M Network Adapter
- 2211 WUSB11 802.11b Adapter
- 2212 WUSB11v2.5 802.11b Adapter
- 2213 WUSB12v1.1 802.11b Adapter
- 2219 Instant Wireless Network Adapter
- 400b USB10TX
-066d Entrega, Inc.
-066e Acer Semiconductor America, Inc.
-066f SigmaTel, Inc.
- 003b MP3 Player
- 003e MP3 Player
- 003f MP3 Player
- 0040 MP3 Player
- 0041 MP3 Player
- 0042 MP3 Player
- 0043 MP3 Player
- 004b A-Max PA11 MP3 Player
- 3400 STMP3400 D-Major MP3 Player
- 3410 STMP3410 D-Major MP3 Player
- 3500 Player Recovery Device
- 4200 STIr4200 IrDA Bridge
- 4210 STIr4210 IrDA Bridge
- 8000 MSCN MP3 Player
- 8001 SigmaTel MSCN Audio Player
- 8004 MSCNMMC MP3 Player
- 8008 i-Bead 100 MP3 Player
- 8020 MP3 Player
- 8034 MP3 Player
- 8036 MP3 Player
- 8038 MP3 Player
- 8056 MP3 Player
- 8060 MP3 Player
- 8066 MP3 Player
- 807e MP3 Player
- 8092 MP3 Player
- 8096 MP3 Player
- 809a MP3 Player
- 80aa MP3 Player
- 80ac MP3 Player
- 80b8 MP3 Player
- 80ba MP3 Player
- 80bc MP3 Player
- 80bf MP3 Player
- 80c5 MP3 Player
- 80c8 MP3 Player
- 80ca MP3 Player
- 80cc MP3 Player
- 8104 MP3 Player
- 8106 MP3 Player
- 8108 MP3 Player
- 810a MP3 Player
- 810c MP3 Player
- 8122 MP3 Player
- 8124 MP3 Player
- 8126 MP3 Player
- 8128 MP3 Player
- 8134 MP3 Player
- 8136 MP3 Player
- 8138 MP3 Player
- 813a MP3 Player
- 813e MP3 Player
- 8140 MP3 Player
- 8142 MP3 Player
- 8144 MP3 Player
- 8146 MP3 Player
- 8148 MP3 Player
- 814c MP3 Player
- 8201 MP3 Player
- 8202 Jens of Sweden / I-BEAD 150M/150H MP3 player
- 8203 MP3 Player
- 8204 MP3 Player
- 8205 MP3 Player
- 8206 Digital MP3 Music Player
- 8207 MP3 Player
- 8208 MP3 Player
- 8209 MP3 Player
- 820a MP3 Player
- 820b MP3 Player
- 820c MP3 Player
- 820d MP3 Player
- 820e MP3 Player
- 820f MP3 Player
- 8210 MP3 Player
- 8211 MP3 Player
- 8212 MP3 Player
- 8213 MP3 Player
- 8214 MP3 Player
- 8215 MP3 Player
- 8216 MP3 Player
- 8217 MP3 Player
- 8218 MP3 Player
- 8219 MP3 Player
- 821a MP3 Player
- 821b MP3 Player
- 821c MP3 Player
- 821d MP3 Player
- 821e MP3 Player
- 821f MP3 Player
- 8220 MP3 Player
- 8221 MP3 Player
- 8222 MP3 Player
- 8223 MP3 Player
- 8224 MP3 Player
- 8225 MP3 Player
- 8226 MP3 Player
- 8227 MP3 Player
- 8228 MP3 Player
- 8229 MP3 Player
- 8230 MP3 Player
- 9000 MP3 Player
- 9001 MP3 Player
- 9002 MP3 Player
-0672 Labtec, Inc.
- 1041 LCS1040 Speaker System
- 5000 SpaceBall 4000 FLX
-0673 HCL
- 5000 Keyboard
-0674 Key Mouse Electronic Enterprise Co., Ltd
-0675 Draytech
- 0110 Vigor 128 ISDN TA
- 0550 Vigor550
-0676 Teles AG
-0677 Aiwa Co., Ltd
- 07d5 TM-ED1285(USB)
- 0fa1 TD-U8000 Tape Drive
-0678 ACard Technology Corp.
-067b Prolific Technology, Inc.
- 0000 PL2301 USB-USB Bridge
- 0001 PL2302 USB-USB Bridge
- 04bb PL2303 Serial (IODATA USB-RSAQ2)
- 0610 Onext EG210U MODEM
- 0611 AlDiga AL-11U Quad-band GSM/GPRS/EDGE modem
- 2303 PL2303 Serial Port
- 2305 PL2305 Parallel Port
- 2307 PL2307 USB-ATAPI4 Bridge
- 2313 FITEL PHS U Cable Adaptor
- 2315 Flash Disk Embedded Hub
- 2316 Flash Disk Security Device
- 2317 Mass Storage Device
- 2501 PL2501 USB-USB Bridge (USB 2.0)
- 2507 PL2507 Hi-speed USB to IDE bridge controller
- 2515 Flash Disk Embedded Hub
- 2517 Flash Disk Mass Storage Device
- 25a1 PL25A1 Host-Host Bridge
- 3400 Hi-Speed Flash Disk with TruePrint AES3400
- 3500 Hi-Speed Flash Disk with TruePrint AES3500
- 3507 PL3507 ATAPI6 Bridge
- aaa0 Prolific Pharos
- aaa2 PL2303 Serial Adapter (IODATA USB-RSAQ3)
-067c Efficient Networks, Inc.
- 1001 Siemens SpeedStream 100MBps Ethernet
- 1022 Siemens SpeedStream 1022 802.11b Adapter
- 1023 SpeedStream Wireless
- 4020 SpeedStream 4020 ATM/ADSL Installer
- 4031 Efficient ADSL Modem
- 4032 SpeedStream 4031 ATM/ADSL Installer
- 4033 SpeedStream 4031 ATM/ADSL Installer
- 4060 Alcatel Speedstream 4060 ADSL Modem
- 4062 Efficient Networks 4060 Loader
- 5667 Efficient Networks Virtual Bus for ADSL Modem
- c031 SpeedStream 4031 ATM/ADSL Installer
- c032 SpeedStream 4031 ATM/ADSL Installer
- c033 SpeedStream 4031 ATM/ADSL Installer
- c060 SpeedStream 4060 Miniport ATM/ADSL Adapter
- d667 Efficient Networks Virtual Bus for ADSL Modem
- e240 Speedstream Ethernet Adapter E240
- e540 Speedstream Ethernet Adapter E240
-067d Hohner Corp.
-067e Intermec
- 1001 Mobile Computer
-067f Virata, Ltd
- 4552 DSL-200 ADSL Modem
- 6542 DSL Modem
- 6549 DSL Modem
- 7541 DSL Modem
-0680 Realtek Semiconductor Corp., CPP Div. (Avance Logic)
- 0002 Arowana Optical Wheel Mouse MSOP-01
-0681 Siemens Information and Communication Products
- 0001 Dect Base
- 0002 Gigaset 3075 Passive ISDN
- 0005 ID-Mouse with Fingerprint Reader
- 0012 I-Gate 802.11b Adapter
- 001b WLL013
- 0022 Gigaset SX353 ISDN
- 002b A-100-I ADSL Modem
- 002e ADSL Router_S-141
- 0034 GSM module MC35/ES75 USB Modem
- 3c06 54g USB Network Adapter
-0682 Victor Company of Japan, Ltd
-0684 Actiontec Electronics, Inc.
-0686 Minolta Co., Ltd
- 2001 PagePro 4110W
- 3001 PagePro 4100
- 3006 PagePro 1250W
- 302e Develop D 1650iD PCL
- 3034 Develop D 2050iD PCL
- 4001 Dimage 2300
- 4003 Dimage 2330 Zoom Camera
- 4004 Scan Elite II
- 4005 Minolta DiMAGE E201 Mass Storage Device
- 4006 Dimage 7 Camera
- 4007 Dimage S304 Camera
- 4008 Dimage 5 Camera
- 4009 Dimage X Camera
- 400a Dimage S404 Camera
- 400b Dimage 7i Camera
- 400c Dimage F100 Camera
- 400d Scan Dual III
- 400e Dimage 5400
- 400f Dimage 7Hi Camera
- 4010 Dimage Xi Camera
- 4011 Dimage F300 Camera
- 4012 Dimage F200 Camera
- 4014 Dimage S414 Camera
- 4015 Dimage XT Camera [storage]
- 4016 Dimage XT Camera [remote mode]
- 4017 Dimage E223
- 4018 Dimage Z1 Camera
- 401a Dimage A1 Camera
- 401c Dimage X20 Camera
- 401e Dimage E323 Camera
-068a Pertech, Inc.
-068b Potrans International, Inc.
-068e CH Products, Inc.
- 00e2 HFX OEM Joystick
- 00f1 Pro Throttle
- 00f2 Flight Sim Pedals
- 00f3 Fighterstick
- 00ff Flight Sim Yoke
- 0500 GameStick 3D
- 0501 CH Pro Pedals
- 0504 F-16 Combat Stick
-0690 Golden Bridge Electech, Inc.
-0693 Hagiwara Sys-Com Co., Ltd
- 0002 FlashGate SmartMedia Card Reader
- 0003 FlashGate CompactFlash Card Reader
- 0005 FlashGate
- 0006 SM PCCard R/W and SPD
- 0007 FlashGate ME (Authenticated)
- 000a SDCard/MMC Reader/Writer
-0694 Lego Group
- 0001 Mindstorms Tower
-0698 Chuntex (CTX)
- 1786 1300ex Monitor
- 9999 VLxxxx Monitor+Hub
-0699 Tektronix, Inc.
-069a Askey Computer Corp.
- 0001 VC010 WebCam [pwc]
- 0303 Cable Modem
- 0311 ADSL Router Remote NDIS Device
- 0318 Remote NDIS Device
- 0319 220V Remote NDIS Device
- 0320 IEEE 802.11b Wireless LAN Card
- 0321 Dynalink WLL013 / Compex WLU11A 802.11b Adapter
- 0402 Scientific Atlanta WebSTAR 100 & 200 series Cable Modem
- 0811 BT Virtual Bus for Helium
- 0821 BT Voyager 1010 802.11b Adapter
- 4402 Scientific Atlanta WebSTAR 2000 series Cable Modem
- 4403 Scientific Atlanta WebSTAR 300 series Cable Modem
- 4501 Scientific-Atlanta WebSTAR 2000 series Cable Modem
-069b Thomson, Inc.
- 0704 DCM245 Cable Modem
- 070c MP3 Player
- 070d MP3 Player
- 070e MP3 Player
- 070f RCA Lyra RD1071 MP3 Player
- 2220 RCA Kazoo RD1000 MP3 Player
- 300a RCA Lyra MP3 Player
- 3012 MP3 Player
- 3013 MP3 Player
- 5557 RCA CDS6300
-069d Hughes Network Systems (HNS)
- 0001 Satellite Receiver Device
- 0002 Satellite Device
-069e Marx
- 0005 CryptoBox v1.2
-069f Allied Data Technologies BV
- 0010 Tornado Speakerphone FaxModem 56.0
- 0011 Tornado Speakerphone FaxModem 56.0
- 1000 ADT VvBus for CopperJet
-06a2 Topro Technology, Inc.
-06a3 Saitek PLC
- 0006 Cyborg Gold Joystick
- 0109 P880 Pad
- 0160 ST290 Pro
- 0200 Xbox Adrenalin Hub
- 0241 Xbox Adrenalin Gamepad
- 0255 X52 Flight Controller
- 040b P990 Dual Analog Pad
- 040c P2900 Wireless Pad
- 0422 ST90 Joystick
- 0460 ST290 Pro Flight Stick
- 0463 ST290
- 0464 Cyborg Evo
- 0471 Cyborg Graphite Stick
- 0501 R100 Sports Wheel
- 0502 ST200 Stick
- 0506 R220 Digital Wheel
- 051e Cyborg Digital II Stick
- 052d P750 Gamepad
- 053c X45 Flight Controller
- 053f X36F Flightstick
- 056c P2000 Tilt Pad
- 056f P2000 Tilt Pad
- 05d2 PC Dash 2
- 075c X52 Flight Controller
- 0805 R440 Force Wheel
- 1003 GM2 Action Pad
- 1009 Action Pad
- 100a SP550 Pad and Joystick Combo
- 100b SP550 Pad
- 1509 P3000 Wireless Pad
- 1589 P3000 Wireless Pad
- 2541 X45 Flight Controller
- 3509 P3000 RF GamePad
- 353e Cyborg Evo Wireless
- 3589 P3000 Wireless Pad
- 35be Cyborg Evo
- 5509 P3000 Wireless Pad
- 8000 Gamers' Keyboard
- 801e Cyborg 3D Digital Stick II
- 8021 Eclipse II Keyboard
- 802d P750 Pad
- 803f X36 Flight Controller
- 806f P2000 Tilt Pad
- 80c0 Pro Gamer Command Unit
- a502 Gaming Mouse
- ff04 R440 Force Wheel
- ff0c Cyborg Force Rumble Pad
- ff0d P2600 Rumble Force Pad
- ff12 Cyborg 3D Force Stick
- ff17 ST 330 Rumble Force Stick
- ff52 Cyborg 3D Rumble Force Joystick
- ffb5 Cyborg Evo Force Joystick
-06a4 Xiamen Doowell Electron Co., Ltd
-06a5 Divio
- 0000 Typhoon Webcam 100k [nw8000]
- d001 ProLink DS3303u WebCam
- d800 Chicony TwinkleCam
- d820 Wize Media 1000
-06a7 MicroStore, Inc.
-06a8 Topaz Systems, Inc.
- 0042 SignatureGem 1X5 Pad
- 0043 SignatureGem 1X5-HID Pad
-06a9 Westell
- 0005 WireSpeed Dual Connect Modem
- 0006 WireSpeed Dual Connect Modem
- 000a WireSpeed Dual Connect Modem
- 000b WireSpeed Dual Connect Modem
- 000e 802.11g Adapter
-06aa Sysgration, Ltd
-06ac Fujitsu Laboratories of America, Inc.
-06ad Greatland Electronics Taiwan, Ltd
-06ae Professional Multimedia Testing Centre
-06af Harting, Inc. of North America
-06b8 Pixela Corp.
-06b9 Alcatel Telecom
- 0121 SpeedTouch 121g Wireless Dongle
- 2001 SPEED TOUCH Card
- 4061 SpeedTouch ISDN or ADSL Modem
- a5a5 DynaMiTe Modem
-06ba Smooth Cord & Connector Co., Ltd
-06bb EDA, Inc.
-06bc Oki Data Corp.
-06bd AGFA-Gevaert NV
- 0001 SnapScan 1212U
- 0002 SnapScan 1236U
- 0100 SnapScan Touch
- 0101 SNAPSCAN ELITE
- 0200 ScanMaker 8700
- 02bf DUOSCAN f40
- 0400 CL30
- 0401 Mass Storage
- 0403 ePhoto CL18 Camera
- 0404 ePhoto CL20 Camera
- 2061 SnapScan 1212U (?)
- 208d Snapscan e40
- 208f SnapScan e50
- 2091 SnapScan e20
- 2093 SnapScan e10
- 2095 SnapScan e25
- 2097 SnapScan e26
- 20fd SnapScan e52
- 20ff SnapScan e42
-06be AME Optimedia Technology Co., Ltd
- 1005 Dazzle DPVM! (1005)
- d001 P35U Camera Capture
-06bf Leoco Corp.
-06c2 Phidgets Inc. (formerly GLAB)
- 0030 PhidgetRFID
- 0038 4-Motor PhidgetServo v3.0
- 0039 1-Motor PhidgetServo v3.0
- 003a 8-Motor PhidgetAvancedServo
- 0040 PhidgetInterface Kit 0-0-4
- 0044 PhidgetInterface Kit 0-16-16
- 0045 PhidgetInterface Kit 8-8-8
- 0048 PhidgetStepper (Under Development)
- 0049 PhidgetTextLED Ver 1.0
- 004a PhidgetLED Ver 1.0
- 004b PhidgetEncoder Ver 1.0
- 0051 PhidgetInterface Kit 0-5-7 (Custom)
- 0052 PhidgetTextLCD
- 0053 PhidgetInterfaceKit 0-8-8
- 0058 PhidgetMotorControl Ver 1.0
- 0070 PhidgetTemperatureSensor Ver 1.0
- 0071 PhidgetAccelerometer Ver 1.0
- 0072 PhidgetWeightSensor Ver 1.0
- 0073 PhidgetHumiditySensor
- 0074 PhidgetPHSensor
- 0075 PhidgetGyroscope
-06c4 Bizlink International Corp.
-06c5 Hagenuk, GmbH
-06c6 Infowave Software, Inc.
-06c8 SIIG, Inc.
-06c9 Taxan (Europe), Ltd
- 0005 Monitor Control
- 0007 Monitor Control
- 0009 Monitor Control
-06ca Newer Technology, Inc.
-06cb Synaptics, Inc.
- 0001 HID Device
- 0002 HID Device
- 0003 HID Device
- 0005 Touchpad/FPS
- 0006 HID Device
- 0007 HID Device
- 0008 HID Device
- 0009 Composite TouchPad and TrackPoint
- 000e HID Device
- 0010 Composite Human Interface Device
- 0013 Human Interface Device
-06cc Terayon Communication Systems
- 0101 Cable Modem
- 0102 Cable Modem
- 0103 Cable Modem
- 0104 Cable Modem
- 0304 Cable Modem
-06cd Keyspan
- 0101 USA-28 PDA [no firmware]
- 0102 USA-28X PDA [no firmware]
- 0103 USA-19 PDA [no firmware]
- 0104 PDA [prerenum]
- 0105 USA-18X PDA [no firmware]
- 0106 USA-19W PDA [no firmware]
- 0107 USA-19 PDA
- 0108 USA-19W PDA
- 0109 USA-49W serial adapter [no firmware]
- 010a USA-49W serial adapter
- 010b USA-19Qi serial adapter [no firmware]
- 010c USA-19Qi serial adapter
- 010d USA-19Q serial Adapter (no firmware)
- 010e USA-19Q serial Adapter
- 010f USA-28 PDA
- 0110 USA-28Xb PDA
- 0111 USA-18 serial Adapter
- 0112 USA-18X PDA
- 0113 USA-28Xb PDA [no firmware]
- 0114 USA-28Xa PDA [no firmware]
- 0115 USA-28Xa PDA
- 0116 USA-18XA serial Adapter (no firmware)
- 0117 USA-18XA serial Adapter
- 0118 USA-19QW PDA [no firmware]
- 0119 USA-19QW PDA
- 011a USA-49Wlc serial adapter [no firmware]
- 011b MPR Serial Preloader (MPRQI)
- 011c MPR Serial (MPRQI)
- 011d MPR Serial Preloader (MPRQ)
- 011e MPR Serial (MPRQ)
- 0121 USA-19hs serial adapter
- 012a USA-49Wlc serial adapter
- 0201 Digital Media Remote
- 0202 UIA-11 remote control
-06cf SpheronVR AG
- 1010 PanoCam 10
- 1012 PanoCam 12/12X
-06d0 LapLink, Inc.
- 0622 LapLink Gold USB-USB Bridge [net1080]
-06d1 Daewoo Electronics Co., Ltd
-06d3 Mitsubishi Electric Corp.
- 0380 CP8000D Port
- 0381 CP770D Port
- 0385 CP900D Port
- 0387 CP980D Port
- 038b CP3020D Port
- 038c CP900DW(ID) Port
- 0393 CP9500D/DW Port
- 0394 CP9000D/DW Port
- 03a1 CP9550D/DW Port
-06d4 Cisco Systems
-06d5 Toshiba
- 4000 Japanese Keyboard
-06d6 Aashima Technology B.V.
- 002d Trust PowerC@m 350FT
- 002e Trust PowerC@m 350FS
- 0030 Trust 710 LCD POWERC@M ZOOM - MSD
- 0031 Trust 710 LCD POWERC@M ZOOM
- 003a Trust PowerC@m 770Z
- 003c Trust 910z PowerC@m
- 003f Trust 735S POWERC@M ZOOM, WDM DSC Bulk Driver
- 0050 Trust 738AV LCD PV Digital Camera
- 0062 TRUST 782AV LCD P. V. Video Capture
- 0066 TRUST Digital PCTV and Movie Editor
- 006b TRUST AUDIO VIDEO EDITOR
-06d7 Network Computing Devices (NCD)
-06d8 Technical Marketing Research, Inc.
-06da Phoenixtec Power Co., Ltd
- 0002 UPS
-06db Paradyne
-06dc Foxlink Image Technology Co., Ltd
- 0012 Scan 1200c Scanner
- 0014 Prolink Winscan Pro 2448U
-06de Heisei Electronics Co., Ltd
-06e0 Multi-Tech Systems, Inc.
- f101 MT5634ZBA-USB MultiModemUSB (old firmware)
- f103 MT5634MU MultiMobileUSB
- f104 MT5634ZBA-USB MultiModemUSB (new firmware)
- f107 MT5634ZBA-USB-V92 MultiModemUSB
-06e1 ADS Technologies, Inc.
- 0008 UBS-10BT Ethernet [klsi]
- 0009 UBS-10BT Ethernet
- 0833 Mass Storage Device
- a160 Instant Video-To-Go RDX-160 (no firmware)
- a161 Instant Video-To-Go RDX-160
- a190 Instand VCD Capture
- a191 Instant VideoXpress
- a337 Mini DigitalTV
- a701 DVD Xpress
- b337 Mini DigitalTV
- b701 DVD Xpress B
-06e4 Alcatel Microelectronics
-06e6 Tiger Jet Network, Inc.
- 0200 Internet Phone
- 0201 Internet Phone
- 0202 Composite Device
- 0203 Internet Phone
- 0210 Composite Device
- 0211 Internet Phone
- 0212 Internet Phone
- 031c Internet Phone
- 031d Internet Phone
- 031e Internet Phone
- 3200 Composite Device
- 3201 Internet Phone
- 3202 Composite Device
- 3203 Composite Device
- 7200 Composite Device
- 7210 Composite Device
- 7250 Composite Device
- 825c Internet Phone
- 831c Internet Phone
- 831d Composite Device
- 831e Composite Device
- b200 Composite Device
- b201 Composite Device
- b202 Internet Phone
- b210 Internet Phone
- b211 Composite Device
- b212 Composite Device
- b250 Composite Device
- b251 Internet Phone
- b252 Internet Phone
- c200 Internet Phone
- c201 Internet Phone
- c202 Composite Device
- c203 Internet Phone
- c210 Personal PhoneGateway
- c211 Personal PhoneGateway
- c212 Personal PhoneGateway
- c213 PPG Device
- c25c Composite Device
- c290 PPG Device
- c291 PPG Device
- c292 PPG Device
- c293 Personal PhoneGateway
- c31c Composite Device
- c39c Personal PhoneGateway
- c39d PPG Device
- c39e PPG Device
- c39f PPG Device
- c700 Internet Phone
- c701 Internet Phone
- c702 Composite Device
- c703 Internet Phone
- c710 VoIP Combo Device
- c711 VoIP Combo
- c712 VoIP Combo Device
- c713 VoIP Combo Device
- cf00 Composite Device
- cf01 Internet Phone
- cf02 Internet Phone
- cf03 Composite Device
- d210 Personal PhoneGateway
- d211 PPG Device
- d212 PPG Device
- d213 Personal PhoneGateway
- d700 Composite Device
- d701 Composite Device
- d702 Internet Phone
- d703 Composite Device
- d710 VoIP Combo
- d711 VoIP Combo Device
- d712 VoIP Combo
- d713 VoIP Combo
- df00 Composite Device
- df01 Composite Device
- df02 Internet Phone
- df03 Internet Phone
- f200 Internet Phone
- f201 Internet Phone
- f202 Composite Device
- f203 Composite Device
- f210 Internet Phone
- f250 Composite Device
- f252 Internet Phone
- f310 Internet Phone
- f350 Composite Device
-06ea Sirius Technologies
- 0001 NetCom Roadster II 56k
- 0002 Roadster II 56k
-06eb PC Expert Tech. Co., Ltd
-06ef I.A.C. Geometrische Ingenieurs B.V.
-06f0 T.N.C Industrial Co., Ltd
- de01 DualCam Video Camera
- de02 DualCam Still Camera
-06f1 Opcode Systems, Inc.
- a011 SonicPort
- a021 SonicPort Optical
-06f2 Emine Technology Co.
- 0011 KVM Switch Keyboard
-06f6 Wintrend Technology Co., Ltd
-06f7 Wailly Technology Ltd
- 0003 USB->Din 4 Adaptor
-06f8 Guillemot Corp.
- a300 Dual Analog Leader GamePad
- b000 Hercules DJ Console
- c000 Hercules Muse Pocket
- d002 Hercules DJ Console
- e000 HWGUSB2-54 WLAN
- e010 HWGUSB2-54-LB
- e020 HWGUSB2-54V2-AP
-06fa HSD S.r.L
-06fc Motorola Semiconductor Products Sector
-06fd Boston Acoustics
- 0101 Audio Device
- 0102 Audio Device
- 0201 2-piece Audio Device
-06fe Gallant Computer, Inc.
-0701 Supercomal Wire & Cable SDN. BHD.
-0703 Bvtech Industry, Inc.
-0705 NKK Corp.
-0706 Ariel Corp.
-0707 Standard Microsystems Corp.
- 0100 2202 Ethernet [klsi]
- 0200 2202 Ethernet [pegasus]
- 0201 EZ Connect USB Ethernet
- ee04 SMCWUSB32 802.11b Wireless LAN Card
- ee06 EZ-Connect 802.11g Adapter
- ee13 EZ-Connect 802.11g Adapter
-0708 Putercom Co., Ltd
- 047e USB-1284 BRIDGE
-0709 Silicon Systems, Ltd (SSL)
-070a Oki Electric Industry Co., Ltd
- 4002 Bluetooth Device
- 4003 Bluetooth Device
-070d Comoss Electronic Co., Ltd
-070e Excel Cell Electronic Co., Ltd
-0710 Connect Tech, Inc.
- 0001 WhiteHeat (fake ID)
- 8001 WhiteHeat
-0711 Magic Control Technology Corp.
- 0100 Hub
- 0180 IRXpress Infrared Device
- 0181 IRXpress Infrared Device
- 0200 BAY-3U1S1P Serial Port
- 0210 MCT1S Serial Port
- 0230 MCT-232 Serial Port
- 0231 PS/2 Mouse Port
- 0232 Serial On Port
- 0240 PS/2 to USB Converter
- 0300 BAY-3U1S1P Parallel Port
- 0302 Parallel Port
- 0900 SVGA Adapter
-0713 Interval Research Corp.
-0714 NewMotion, Inc.
- 0003 ADB to USB convertor
-0717 ZNK Corp.
-0718 Imation Corp.
- 0002 SuperDisk 120MB
- 0003 SuperDisk 120MB (Authenticated)
- 0060 Flash Drive
- 0061 Flash Drive
- 0062 Flash Drive
- 0063 Swivel Flash Drive
- 0064 Flash Drive
- 0065 Flash Drive
- 0066 Flash Drive
- 0067 Flash Drive
- 0068 Flash Drive
- 0084 USB Flash Drive Mini
-0719 Tremon Enterprises Co., Ltd
-071b Domain Technologies, Inc.
- 0002 DTI-56362-USB Digital Interface Unit
- 0101 Audio4-USB DSP Data Acquisition Unit
- 0201 Audio4-5410 DSP Data Acquisition Unit
- 0301 SB-USB JTAG Emulator
-071c Xionics Document Technologies, Inc.
-071d Eicon Networks Corp.
- 1000 Diva ISDN TA
- 1003 Diva
- 2000 Teledat Surf
-071e Ariston Technologies
-0723 Centillium Communications Corp.
- 0002 Palladia 300/400 Adsl Modem
-0726 Vanguard International Semiconductor-America
-0729 Amitm
- 1000 USC-1000 Serial Port
-072e Sunix Co., Ltd
-072f Advanced Card Systems, Ltd
- 0001 AC1030-based SmartCard Reader
- 0008 ACR 80 Smart Card Reader
- 1000 PLDT Drive
- 1001 PLDT Drive
- 8002 AET63 BioTRUSTKey
- 8003 ACR120
- 8103 ACR120
- 9000 ACR38 AC1038-based Smart Card Reader
- 90cc ACR38 SmartCard Reader
- 90cf ACR38 SAM Smart Card Reader
- 90d0 PertoSmart EMV - Card Reader
-0731 Susteen, Inc.
- 0528 SonyEricsson DCU-11 Cable
-0732 Goldfull Electronics & Telecommunications Corp.
-0733 ViewQuest Technologies, Inc.
- 0101 Digital Video Camera
- 0110 VQ110
- 0401 CS330 WebCam
- 0402 M-318B WebCam
- 0430 Intel Pro Share WebCam
- 0630 VQ630 Dual Mode Digital Camera(Bulk)
- 0631 Hercules Dualpix
- 0780 Smart Cam Deluxe(composite)
- 1310 Epsilon 1.3/Jenoptik JD C1.3/UMAX AstraPix 470
- 1311 Digital Dream Epsilon 1.3
- 2211 Jenoptik
-0734 Lasat Communications A/S
- 0001 560V Modem
- 0002 Lasat 560V Modem
- 043a DVS Audio
-0735 Asuscom Network
- 2100 ISDN Adapter
- 2101 ISDN Adapter
- 6694 ISDN Adapter
- c541 ISDN TA 280
-0736 Lorom Industrial Co., Ltd
-0738 Mad Catz, Inc.
- 4507 XBox Device
- 4516 XBox Device
- 4520 XBox Device
- 4526 XBox Device
- 4536 XBox Device
- 4540 XBox Device
- 4556 XBox Device
- 4566 XBox Device
- 4576 XBox Device
- 4586 XBox Device
- 4588 XBox Device
-073a Chaplet Systems, Inc.
-073b Suncom Technologies
-073d Eutron S.p.a.
- 0005 Crypto Token
- 0007 CryptoIdentity CCID
- 0025 SmartKey 3
- 0c00 Pocket Reader
- 0d00 StarSign Bio Token 3.0 EU
-073c Industrial Electronic Engineers, Inc.
- 0305 Pole Display (PC305-3415 2 x 20 Line Display)
- 0322 Pole Display (PC322-3415 2 x 20 Line Display)
- 0324 Pole Display (LB324-USB 4 x 20 Line Display)
- 0330 Pole Display (P330-3415 2 x 20 Line Display)
- 0450 Pole Display (L450-USB Graphic Line Display)
- 0505 Pole Display (SPC505-3415 2 x 20 Line Display)
- 0522 Pole Display (SPC522-3415 2 x 20 Line Display)
- 0624 Pole Display (SP324-3415 2 x 20 Line Display)
-073e NEC, Inc.
- 0301 Game Pad
-0745 Syntech Information Co., Ltd
-0746 Onkyo Corp.
- 5500 SE-U55 Audio Device
-0747 Labway Corp.
-0748 Strong Man Enterprise Co., Ltd
-0749 EVer Electronics Corp.
-074a Ming Fortune Industry Co., Ltd
-074b Polestar Tech. Corp.
-074c C-C-C Group PLC
-074d Micronas GmbH
- 3553 Composite USB-Device
- 3554 Composite USB-Device
- 3556 Composite USB-Device
-074e Digital Stream Corp.
- 0001 PS/2 Adapter
- 0002 PS/2 Adapter
-0755 Aureal Semiconductor
-0757 Network Technologies, Inc.
-075b Sophisticated Circuits, Inc.
- 0001 Kick-off! Watchdog
-0763 Midiman
- 0115 KeyRig 25
- 0117 Trigger Finger
- 0119 MidAir
- 0150 M-Audio Uno
- 0160 M-Audio 1x1
- 0192 M-Audio Keystation 88es
- 0193 ProKeys 88
- 0194 ProKeys 88sx
- 0195 Oxygen 8 v2
- 0196 Oxygen 49
- 0197 Oxygen 61
- 0198 Axiom 25
- 0199 Axiom 49
- 019a Axiom 61
- 019b KeyRig 49
- 019c KeyStudio
- 1001 MidiSport 2x2
- 1002 MidiSport 2x2
- 1003 MidiSport 2x2
- 1010 MidiSport 1x1
- 1011 MidiSport 1x1
- 1014 M-Audio Keystation Loader
- 1015 M-Audio Keystation
- 1020 Midisport 4x4
- 1021 MidiSport 4x4
- 1030 Midisport 8x8
- 1031 MidiSport 8x8/s Loader
- 1033 MidiSport 8x8/s
- 1040 M-Audio MidiSport 2x4 Loader
- 1041 M-Audio MidiSport 2x4
- 2001 M Audio Quattro
- 2002 M Audio Duo
- 2003 M Audio AudioPhile
- 2004 M-Audio MobilePre
- 2006 M-Audio Transit
- 2007 M-Audio Sonica Theater
- 2008 M-Audio Ozone
- 200d M-Audio OmniStudio
- 200f M-Audio MobilePre
- 2010 M-Audio Fast Track
- 2013 M-Audio JamLab
- 2015 M-Audio RunTime DFU
- 2016 M-Audio RunTime DFU
- 2019 M-Audio Ozone Academic
- 201a M-Audio Micro
- 201b M-Audio RunTime DFU
- 201d M-Audio Producer
- 2080 M-Audio RunTime DFU
- 2081 M-Audio RunTime DFU
- 2803 M-Audio Audiophile DFU
- 2804 M-Audio MobilePre DFU
- 2806 M-Audio Transit DFU
- 2815 M-Audio DFU
- 2816 M-Audio DFU
- 281b M-Audio DFU
- 2880 M-Audio DFU
- 2881 M-Audio DFU
-0764 Cyber Power System, Inc.
- 0005 Cyber Power UPS
- 0501 CP1500 AVR UPS
-0765 X-Rite, Inc.
-0766 Jess-Link Products Co., Ltd
-0767 Tokheim Corp.
-0768 Camtel Technology Corp.
- 0006 Camtel Technology USB TV Genie Pro FM Model TVB330
- 0023 eHome Infrared Receiver
-0769 Surecom Technology Corp.
- 11f2 EP-9001-g 802.11g 54M WLAN Adapter
- 11f3 RT2570
- 11f7 802.11g 54M WLAN Adapter
- 31f3 RT2573
-076a Smart Technology Enablers, Inc.
-076b OmniKey AG
- 0596 CardMan 2020
- 1021 CardMan 1021
- 1221 CardMan 1221
- 1784 CardMan 6020
- 3021 CardMan 3121
- 3610 CardMan 3620
- 3621 CardMan 3621
- 3821 CardMan 3821
- 4321 CardMan 4321
- 5121 CardMan 5121
- 5125 CardMan 5125
- 6622 CardMan 6121
- a011 CCID Smart Card Reader Keyboard
- a021 CCID Smart Card Reader
- a022 CardMan Smart@Link
- c000 CardMan 3x21 CS
- c001 CardMan 5121 CS
-076c Partner Tech
-076d Denso Corp.
-076e Kuan Tech Enterprise Co., Ltd
-076f Jhen Vei Electronic Co., Ltd
-0770 Welch Allyn, Inc - Medical Division
-0774 AmTRAN Technology Co., Ltd
-0775 Longshine Electronics Corp.
-0776 Inalways Corp.
-0777 Comda Enterprise Corp.
-0778 Volex, Inc.
-0779 Fairchild Semiconductor
-077a Sankyo Seiki Mfg. Co., Ltd
-077b Linksys
- 08be BEFCMU10 v4 Cable Modem
- 2219 WUSB11 V2.6 802.11b Adapter
- 2226 USB200M 100baseTX Adapter
-077c Forward Electronics Co., Ltd
- 0005 NEC Keyboard
-077d Griffin Technology
- 0223 IMic Audio In/Out
- 0405 iMate, ADB Adapter
- 0410 PowerMate
- 041a PowerWave
- 07af iMic
- 627a Radio SHARK
-077f Well Excellent & Most Corp.
-0781 SanDisk Corp.
- 0001 SDDR-05a ImageMate CompactFlash Reader
- 0002 SDDR-31 ImageMate II CompactFlash Reader
- 0005 SDDR-05b (CF II) ImageMate CompactFlash Reader
- 0100 ImageMate SDDR-12
- 0200 SDDR-09 (SSFDC) ImageMate SmartMedia Reader [eusb]
- 0400 SecureMate SD/MMC Reader
- 0621 SDDR-86 Imagemate 6-in-1 Reader
- 0720 Sansa C200 series in recovery mode
- 0729 Sansa E200 series in recovery mode
- 0810 SDDR-75 ImageMate CF-SM Reader
- 0830 ImageMate CF/MMC/SD Reader
- 1234 Cruzer Mini Flash Drive
- 5150 SDCZ2 Cruzer Mini Flash Drive (thin)
- 5151 Cruzer Micro 256/512MB Flash Drive
- 5153 Cruzer USB-Flash-Drive
- 5406 Cruzer Micro 1/4GB Flash Drive
- 5408 Cruzer Titanium U3
- 6100 Ultra II SD Plus 2GB
- 7100 Cruzer Mini
- 7101 Pen Flash
- 7102 Cruzer Mini
- 7103 Cruzer Mini
- 7104 Cruzer Micro Mini 256MB Flash Drive
- 7105 Cruzer Mini
- 7106 Cruzer Mini
- 7112 Cruzer Micro 128MB Flash Drive
- 7113 Cruzer Micro 256MB Flash Drive
- 7114 Cruzer Mini
- 7115 Cruzer Mini
- 7420 Sansa E200 series (mtp)
- 7421 Sansa E200 series
- 7432 Sansa Clip (mtp)
- 7433 Sansa Clip (msc)
- 7450 Sansa C250
- 7451 Sansa C240
- 7480 Sansa Connect
- 7481 Sansa Connect (in recovery mode)
- 8181 Pen Flash
- 8183 Hi-Speed Mass Storage Device
- 8185 SDCZ2 Cruzer Mini Flash Drive (older, thick)
- 8888 Card Reader
- 8889 SDDR-88 Imagemate 8-in-1 Reader
- 8919 Card Reader
- 8989 ImageMate 12-in-1 Reader
- 9191 ImageMate CF
- 9219 Card Reader
- 9292 ImageMate CF Reader/Writer
- 9393 ImageMate SD-MMC
- 9595 ImageMate xD-SM
- 9797 ImageMate MS-PRO
- 9919 Card Reader
- 9999 SDDR-99 5-in-1 Reader
- a7e8 SDDR-113 MicroMate SDHC Reader
- b2b3 SDDR-103 MobileMate SD+ Reader
-0782 Trackerball
-0783 C3PO
- 0003 LTC31 SmartCard Reader
-0784 Vivitar, Inc.
- 0100 Vivicam 2655
- 1310 Vivicam 3305
- 1688 Vivicam 3665
- 1689 Gateway DC-M42/Labtec DC-505/Vivitar Vivicam 3705
- 2620 AOL Photocam Plus
- 2888 Polaroid DC700
- 3330 Nytec ND-3200 Camera
- 4300 Traveler D1
- 5260 Werlisa Sport PX 100 / JVC GC-A33 Camera
- 5300 Pretec dc530
-0785 NTT-ME
- 0001 MN128mini-V ISDN TA
- 0003 MN128mini-J ISDN TA
-0789 Logitec Corp.
- 0026 LHD Device
- 0033 DVD Multi-plus unit LDR-H443SU2
- 0063 LDR Device
- 0064 LDR-R Device
- 00b3 DVD Multi-plus unit LDR-H443U2
- 010c Realtek RTL8187 Wireless 802.11g 54Mbps Network Adapter
-078b Happ Controls, Inc.
- 0010 Driving UGCI
- 0020 Flying UGCI
- 0030 Fighting UGCI
-078c GTCO/CalComp
- 0400 Digitizer (Whiteboard)
-078e Brincom, Inc.
-0790 Pro-Image Manufacturing Co., Ltd
-0791 Copartner Wire and Cable Mfg. Corp.
-0792 Axis Communications AB
-0793 Wha Yu Industrial Co., Ltd
-0794 ABL Electronics Corp.
-0795 RealChip, Inc.
-0796 Certicom Corp.
-0797 Grandtech Semiconductor Corp.
- 6801 Flatbed Scanner
- 6802 InkJet Color Printer
- 8001 SmartCam
- 801a Typhoon StyloCam
- 801c Meade Binoculars/Camera
- 8901 ScanHex SX-35a
- 8909 ScanHex SX-35b
- 8911 ScanHex SX-35c
-0798 Optelec
- 0001 Braille Voyager
-079b Sagem
- 0027 USB-Serial Controller
- 004a XG-760A
- 004b Wi-Fi 11g adapter
- 0056 Agfa AP1100 Photo Printer
- 0062 XG-76NA
-079d Alfadata Computer Corp.
- 0201 GamePort Adapter
-07a1 Digicom S.p.A.
- d952 Palladio USB V.92 Modem
-07a2 National Technical Systems
-07a3 Onnto Corp.
-07a4 Be, Inc.
-07a6 ADMtek, Inc.
- 07c2 AN986A Ethernet
- 0986 AN986 Pegasus Ethernet
- 8266 Infineon WildCard-USB Wireless LAN Adapter
- 8511 ADM8511 Pegasus II Ethernet
- 8513 AN8513 Ethernet
- 8515 AN8515 Ethernet
-07aa Corega K.K.
- 0001 Ether USB-T Ethernet [klsi]
- 0004 FEther USB-TX Ethernet [pegasus]
- 000c WirelessLAN USB-11
- 000d FEther USB-TXS
- 0012 Stick-11 802.11b Adapter
- 0017 FEther USB2-TX
- 001a ULUSB-11 Key
- 002f CG-WLUSB2GNL
- 7613 Stick-11 V2 802.11b Adapter
- 9601 FEther USB-TXC
-07ab Freecom Technologies
- fc01 IDE bridge
- fc02 Cable II USB-2
- fc03 USB2-IDE IDE bridge
- fcf8 Freecom Classic SL Network Drive
-07af Microtech
- 0004 SCSI-DB25 SCSI Bridge [shuttle]
- 0005 SCSI-HD50 SCSI Bridge [shuttle]
- 0006 CameraMate SmartMedia and CompactFlash Card Reader [eusb/shuttle]
- fc01 Freecom USB-IDE
-07b0 Trust Technologies
- 0001 ISDN TA
- 0002 ISDN TA128 Plus
- 0003 ISDN TA128 Deluxe
- 0005 ISDN TA128 SE
- 0006 ISDN TA128 CE
- 0007 ISDN TA
- 0008 ISDN TA
-07b1 IMP, Inc.
-07b2 Motorola BCS, Inc.
- 0100 SURFboard Voice over IP Cable Modem
- 0900 SURFboard Gateway
- 0950 SURFboard SBG950 Gateway
- 1000 SURFboard SBG1000 Gateway
- 4100 SurfBoard SB4100 Cable Modem
- 4200 SurfBoard SB4200 Cable Modem
- 4210 SurfBoard 4210 Cable Modem
- 4220 SURFboard SB4220 Cable Modem
- 4500 CG4500 Communications Gateway
- 450b CG4501 Communications Gateway
- 450e CG4500E Communications Gateway
- 5100 SurfBoard SB5100 Cable Modem
- 5101 SurfBoard SB5101 Cable Modem
- 5120 SurfBoard SB5120 Cable Modem (RNDIS)
- 7030 Wireless Adapter WU830G
-07b3 Plustek, Inc.
- 0001 OpticPro 1212U Scanner
- 0003 Scanner
- 0010 OpticPro U12 Scanner
- 0011 OpticPro U24 Scanner
- 0013 OpticPro UT12 Scanner
- 0014 Scanner
- 0015 OpticPro U24 Scanner
- 0017 OpticPro UT12/16/24 Scanner
- 0204 Scanner
- 0400 OpticPro 1248U Scanner
- 0401 OpticPro 1248U Scanner #2
- 0403 OpticPro U16B Scanner
- 0404 Scanner
- 0405 A8 Namecard-s Controller
- 0406 A8 Namecard-D Controller
- 0410 Scanner
- 0412 Scanner
- 0800 OpticPro ST48 Scanner
- 0c03 OpticPro ST64+ Scanner
-07b4 Olympus Optical Co., Ltd
- 0100 Camedia C-2100/C-3000 Ultra Zoom Camera
- 0102 Camedia E-10/C-220/C-50 Camera
- 0105 Camedia C-310Z/C-700/C-750UZ/C-755/C-765UZ/C-3040/C-4000/C-5050Z/D-560/C-3020Z Zoom Camera
- 0109 C-370Z/D-535Z/X-450
- 0112 MAUSB-100 xD Card Reader
- 0113 Mju 500
- 0114 C-350Z Camera
- 0118 Mju Mini Digital/Mju Digital 500 Camera
- 0184 P-S100 port
- 0203 Digital Voice Recorder DW-90
- 0206 Digital Voice Recorder DS-330
- 0207 Digital Voice Recorder & Camera W-10
- 0209 Digital Voice Recorder DM-20
- 020d Digital Voice Recorder VN-240PC
-07b5 Mega World International, Ltd
- 0017 Joystick
- 0213 Thrustmaster Firestorm Digital 3 Gamepad
- 9902 GamePad
-07b6 Marubun Corp.
-07b7 TIME Interconnect, Ltd
-07b8 D-Link Corp.
- 110c XX1
- 1201 IEEE 802.11b Adapter
- 200c XX2
- 2573 Wireless LAN Card
- 4000 DU-E10 Ethernet [klsi]
- 4002 DU-E100 Ethernet [pegasus]
- 4003 1/10/100 Ethernet Adapter
- 4004 XX4
- 4007 XX5
- 400b XX6
- 400c XX7
- 401a RTL8151
- 4102 USB 1.1 10/100M Fast Ethernet Adapter
- 4104 XX9
- 420a UF200 Ethernet
- 6001 WL54
- a001 Wireless Network Adapter
- abc1 DU-E10 Ethernet [pegasus]
- b000 BWU613
- b02a AboCom Bluetooth Device
- b02b Bluetooth dongle
- b02c BCM92045DG-Flash with trace filter
- b02d BCM92045DG-Flash with trace filter
- b02e BCM92045DG-Flash with trace filter
- b030 BCM92045DG-Flash with trace filter
- b031 BCM92045DG-Flash with trace filter
- b032 BCM92045DG-Flash with trace filter
- b033 BCM92045DG-Flash with trace filter
- b21a 802.11g Wireless Adapter
- b21b HWU54DM
- b21c RT2573
- b21d RT2573
- b21e RT2573
- b21f WUG2700
- d011 MP3 Player
- e001 Mass Storage Device
- e002 Mass Storage Device
- e003 Mass Storage Device
- e004 Mass Storage Device
- e005 Mass Storage Device
- e006 Mass Storage Device
- e007 Mass Storage Device
- e008 Mass Storage Device
- e009 Mass Storage Device
- e00a Mass Storage Device
- e4f0 Card Reader Driver
- f101 DSB-560 Modem [atlas]
-07bc Canon Computer Systems, Inc.
-07bd Webgear, Inc.
-07be Veridicom
-07c0 Code Mercenaries Hard- und Software GmbH
- 1121 The Claw
- 1500 IO-Warrior 40
- 1501 IO-Warrior 24
- 1502 IO-Warrior 48
- 1503 IO-Warrior 28
-07c1 Keisokugiken
- 0068 HKS-0200 USBDAQ
-07c4 Datafab Systems, Inc.
- 0102 USB to LS120
- 0103 USB to IDE
- 1234 USB to ATAPI
- a000 CompactFlash Card Reader
- a001 CompactFlash & SmartMedia Card Reader [eusb]
- a002 Disk Drive
- a003 Datafab-based Reader
- a004 USB to MMC Class Drive
- a005 CompactFlash & SmartMedia Card Reader
- a006 SmartMedia Card Reader
- a007 Memory Stick Class Drive
- a103 MDSM-B reader
- a107 USB to Memory Stick (LC1) Drive
- a109 LC1 CompactFlash & SmartMedia Card Reader
- a10b USB to CF+MS(LC1)
- a200 DF-UT-06 Hama MMC/SD Reader
- a400 CompactFlash & Microdrive Reader
- a600 Card Reader
- ad01 Mass Storage Device
- ae01 Mass Storage Device
- af01 Mass Storage Device
- b000 USB to CF(LC1)
- b001 USB to CF+PCMCIA
- b004 MMC/SD Reader
- b006 USB to PCMCIA
- b00a USB to CF+SD Drive(LC1)
- b00b USB to Memory Stick(LC1)
-07c5 APG Cash Drawer
-07c6 ShareWave, Inc.
-07c7 Powertech Industrial Co., Ltd
-07c8 B.U.G., Inc.
- 0202 MN128-SOHO PAL
-07c9 Allied Telesyn International
- b100 AT-USB100
-07ca AVerMedia Technologies, Inc.
- 0002 AVerTV PVR USB/EZMaker Pro Device
- 0026 AVerTV
- 1228 MPEG-2 Capture Device (M038)
- e880 MPEG-2 Capture Device (E880)
- e882 MPEG-2 Capture Device (E882)
-07cb Kingmax Technology, Inc.
-07cc Carry Computer Eng., Co., Ltd
- 0000 CF Card Reader
- 0001 Reader (UICSE)
- 0002 Reader (UIS)
- 0003 SM Card Reader
- 0004 SM/CF/PCMCIA Card Reader
- 0005 Reader (UISA2SE)
- 0006 SM/CF/PCMCIA Card Reader
- 0007 Reader (UISA6SE)
- 000c SM/CF Card Reader
- 000d SM/CF Card Reader
- 000e Reader (UISDA)
- 000f Reader (UICLIK)
- 0010 Reader (UISMA)
- 0012 Reader (UISC6SE-FLASH)
- 0014 Litronic Fortezza Reader
- 0030 Mass Storage (UISDMC12S)
- 0040 Mass Storage (UISDMC13S)
- 0100 Reader (UID)
- 0101 Reader (UIM)
- 0102 Reader (UISDMA)
- 0103 Reader (UISDMC)
- 0104 Reader (UISDM)
- 0200 6-in-1 Card Reader
- 0201 Mass Storage (UISDMC1S & UISDMC3S)
- 0202 Mass Storage (UISDMC5S)
- 0203 Mass Storage (UISMC5S)
- 0204 Mass Storage (UIM4/5S & UIM7S)
- 0205 Mass Storage (UIS4/5S & UIS7S)
- 0206 Mass Storage (UISDMC10S & UISDMC11S)
- 0207 Mass Storage (UPIDMA)
- 0208 Mass Storage (UCFC II)
- 0210 Mass Storage (UPIXXA)
- 0213 Mass Storage (UPIDA)
- 0214 Mass Storage (UPIMA)
- 0215 Mass Storage (UPISA)
- 0217 Mass Storage (UPISDMA)
- 0223 Mass Storage (UCIDA)
- 0224 Mass Storage (UCIMA)
- 0225 Mass Storage (UIS7S)
- 0227 Mass Storage (UCIDMA)
- 0234 Mass Storage (UIM7S)
- 0235 Mass Storage (UIS4S-S)
- 0237 Velper (UISDMC4S)
- 0300 6-in-1 Card Reader
- 0301 6-in-1 Card Reader
- 0303 Mass Storage (UID10W)
- 0304 Mass Storage (UIM10W)
- 0305 Mass Storage (UIS10W)
- 0308 Mass Storage (UIC10W)
- 0309 Mass Storage (UISC3W)
- 0310 Mass Storage (UISDMA2W)
- 0311 Mass Storage (UISDMC14W)
- 0320 Mass Storage (UISDMC4W)
- 0321 Mass Storage (UISDMC37W)
- 0330 WINTERREADER Reader
- 0350 9-in-1 Card Reader
- 0500 Mass Storage
- 0501 Mass Storage
-07cd Elektor
- 0001 USBuart Serial Port
-07cf Casio Computer Co., Ltd
- 1001 QV-8000SX/5700/3000EX Digicam; Exilim EX-M20
- 1003 Exilim EX-S500
- 1004 Exilim EX-Z120
- 1011 USB-CASIO PC CAMERA
- 2002 E-125 Cassiopeia Pocket PC
- 3801 WMP-1 MP3-Watch
- 4001 Label Printer KL-P1000
- 4007 CW50 Device
- 4104 Cw75 Device
- 4107 CW-L300 Device
- 4500 LV-20 Digital Camera
- 6801 PL-40R
- 6802 MIDI Keyboard
-07d0 Dazzle
- 0001 Digital Video Creator I
- 0002 Global Village VideoFX Grabber
- 0003 Fusion Model DVC-50 Rev 1 (NTSC)
- 0004 DVC-800 (PAL) Grabber
- 0005 Fusion Video and Audio Ports
- 0006 DVC 150 Loader Device
- 0007 DVC 150
- 0327 Fusion Digital Media Reader
- 1001 DM-FLEX DFU Adapter
- 1002 DMHS2 DFU Adapter
- 1102 CF Reader/Writer
- 1103 SD Reader/Writer
- 1104 SM Reader/Writer
- 1105 MS Reader/Writer
- 1106 xD/SM Reader/Writer
- 1202 MultiSlot Reader/Writer
- 2000 FX2 DFU Adapter
- 2001 eUSB CompactFlash Reader
- 4100 Kingsun SF-620 Infrared Adapter
- 4959 Kingsun KS-959 Infrared Adapter
-07d1 D-Link System
- 13ec VvBus for Helium 2xx
- 13ed VvBus for Helium 2xx
- 13f1 DSL-302G Modem
- 13f2 DSL-502G Router
- 3a07 WUA-2340 Adapter
- 3a08 predator Bootloader Download
- 3a0d DWA-120 Wireless 108G Adapter
- 3b01 AirPlus G DWL-G122 Wireless Adapter
- 3b10 RangeBooster N Adapter
- 3b11 Wireless N Adapter DWA-130
- 3c03 DWL-G122 802.11g Adapter [ralink rt73]
- 3c04 WUA-1340
- 3c05 EH103 Wireless G Adapter
- 3c07 Wireless G DWA-110 Adapter
- 3c09 DWA-140 802.11n Adapter [ralink rt2870]
- 5100 Remote NDIS Device
- f101 DBT-122 Bluetooth
- fc01 DBT-120 Bluetooth Adapter
-07d2 Aptio Products, Inc.
-07d3 Cyberdata Corp.
-07d7 GCC Technologies, Inc.
-07da Arasan Chip Systems
-07de Diamond Multimedia
- 2820 VC500 Video Capture Dongle
-07df David Electronics Co., Ltd
-07e1 Ambient Technologies, Inc.
- 5201 V.90 Modem
-07e2 Elmeg GmbH & Co., Ltd
-07e3 Planex Communications, Inc.
-07e4 Movado Enterprise Co., Ltd
- 0967 SCard R/W CSR-145
- 0968 SCard R/W CSR-145
-07e5 QPS, Inc.
- 05c2 IDE-to-USB2.0 PCA
- 5c01 Que! CDRW
-07e6 Allied Cable Corp.
-07e7 Mirvo Toys, Inc.
-07e8 Labsystems
-07ea Iwatsu Electric Co., Ltd
-07eb Double-H Technology Co., Ltd
-07ec Taiyo Electric Wire & Cable Co., Ltd
-07ee Torex Retail (formerly Logware)
- 0002 Cash Drawer I/F
-07ef STSN
- 0001 Internet Access Device
-07f6 Circuit Assembly Corp.
-07f7 Century Corp.
- 0005 ScanLogic/Century Corporation uATA
- 011e Century USB Disk Enclosure
-07f9 Dotop Technology, Inc.
-07fa Draytek
- 0778 miniVigor 128 ISDN TA
- 1012 BeWAN ADSL USB ST (grey)
- a904 BeWAN ADSL
- a905 BeWAN ADSL ST
-07fd Mark of the Unicorn
- 0000 FastLane MIDI Interface
- 0001 FastLane Quad MIDI Interface
- 0002 MOTU Audio for 64 bit
-0801 Mag-Tek
- 0002 Mini Swipe Reader
-0802 Mako Technologies, LLC
-0803 Zoom Telephonics, Inc.
- 1300 V92 Faxmodem
- 4310 Wireless-G
- 5241 Cable Modem
- 5551 DSL Modem
- 9700 2986L FaxModem
- 9800 Cable Modem
- a312 Wireless-G
-0809 Genicom Technology, Inc.
-080a Evermuch Technology Co., Ltd
-080c Datalogic S.p.A.
- 0300 Gryphon D120 Barcode Scanner
- 0400 Gryphon D120 Barcode Scanner
- 0500 Gryphon D120 Barcode Scanner
- 0600 Gryphon M100 Barcode Scanner
-080d Teco Image Systems Co., Ltd
- 0102 Hercules Scan@home 48
- 0104 3.2Slim
- 0110 UMAX AstraSlim 1200 Scanner
-0810 Personal Communication Systems, Inc.
-0813 Mattel, Inc.
- 0001 Intel Play QX3 Microscope
- 0002 Dual Mode Camera Plus
-081a MG Logic
- 1000 Duo Pen Tablet
-081b Indigita Corp.
- 0600 Storage Adapter
- 0601 Storage Adapter
-081c Mipsys
-081e AlphaSmart, Inc.
- df00 Handheld
-0822 Reudo Corp.
- 2001 IRXpress Infrared Device
-0825 GC Protronics
-0826 Data Transit
-0827 BroadLogic, Inc.
-0828 Sato Corp.
-0829 DirecTV Broadband, Inc. (Telocity)
-082d Handspring
- 0100 Visor
- 0200 Treo
- 0300 Treo 600
- 0400 Handheld
- 0500 Handheld
- 0600 Handheld
-0830 Palm, Inc.
- 0001 m500
- 0002 m505
- 0003 m515
- 0004 Handheld
- 0005 Handheld
- 0006 Handheld
- 0010 Handheld
- 0011 Handheld
- 0012 Handheld
- 0013 Handheld
- 0014 Handheld
- 0020 i705
- 0021 Handheld
- 0022 Handheld
- 0023 Handheld
- 0024 Handheld
- 0030 Handheld
- 0031 Tungsten W
- 0032 Handheld
- 0033 Handheld
- 0034 Handheld
- 0040 m125
- 0041 Handheld
- 0042 Handheld
- 0043 Handheld
- 0044 Handheld
- 0050 m130
- 0051 Handheld
- 0052 Handheld
- 0053 Handheld
- 0054 Handheld
- 0060 Tungsten C/E/T/T2/T3 / Zire 71
- 0061 Lifedrive / Treo 650/680 / Tunsten E2/T5/TX / Zire 21/31/72 / Z22
- 0062 Handheld
- 0063 Handheld
- 0064 Handheld
- 0070 Zire
- 0071 Handheld
- 0072 Handheld
- 0080 Serial Adapter [for Palm III]
- 0081 Handheld
- 0082 Handheld
-0832 Kouwell Electronics Corp.
- 5850 Cable
-0833 Sourcenext Corp.
- 012e KeikaiDenwa 8 with charger
- 039f KeikaiDenwa 8
-0835 Action Star Enterprise Co., Ltd
-0839 Samsung Techwin Co., Ltd
- 0005 Digimax Camera
- 0008 Digimax 230 Camera
- 0009 Digimax 340
- 000a Digimax 410
- 000e Digimax 360
- 0010 Digimax 300
- 1003 Digimax 210SE
- 1005 Digimax 220
- 1009 Digimax V4
- 1012 6500 Document Camera
- 1058 S730 Camera
- 1542 Digimax 50 Duo
- 3000 Digimax 35 MP3
-083a Accton Technology Corp.
- 1046 10/100 Ethernet [pegasus]
- 1060 HomeLine Adapter
- 1f4d SMC8013WG Broadband Remote NDIS Device
- 3046 10/100 Series Adapter
- 3060 1/10/100 Adapter
- 3501 2664W
- 3502 WN3501D Wireless Adapter
- 3503 T-Sinus 111 Wireless Adapter
- 4501 T-Sinus 154data
- 4505 SMCWUSB-G
- 5046 SpeedStream 10/100 Ethernet [pegasus]
- 5501 Wireless Adapter 11g
- 6500 Cable Modem
- 6618 802.11n Wireless Adapter
- 7522 802.11N Wireless Adapter
- a618 SMC EZ Connect N Draft 11n Wireless Adapter
- b004 CPWUE001 USB/Ethernet Adapter
- b522 EZ Connect N Draft 11n Wireless USB2.0 Adapter
- bb01 BlueExpert Bluetooth Device
- c003 802.11b Wireless Adapter
- c501 Zoom Wireless-G
- c561 802.11a/g Wireless Adapter
- e501 ZD1211B
- f501 802.11g Wireless Adapter
- f502 802.11g Wireless Adapter
-083f Global Village
- b100 TelePort V.90 Fax/Modem
-0840 Argosy Research, Inc.
- 0060 Storage Adapter Bridge Module
-0841 Rioport.com, Inc.
- 0001 Rio 500
-0844 Welland Industrial Co., Ltd
-0846 NetGear, Inc.
- 1001 EA101 Ethernet [klsi]
- 1002 Ethernet
- 1020 Ethernet 10/100, USB1.1
- 1040 USB 2.0 Ethernet
- 4110 MA111 WiFi (v1)
- 4200 WG121 WiFi (v1)
- 4210 WG121 WiFi (v2)
- 4220 WG111 WiFi (v1)
- 4230 MA111 WiFi (v2)
- 4240 WG111 WiFi (v2)
- 4260 WG111v3 802.11g Adapter [realtek RTL8187B]
- 4300 WG111U
- 4301 WG111U (no firmware)
- 6a00 WG111 WiFi (v2)
- 7100 WN121T Wireless Adapter
- 9000 RangeMax NEXT Wireless-N Adapter WN111
- a001 PA101 Phoneline10X Adapter
-084d Minton Optic Industry Co., Inc.
- 0001 Jenoptik JD800i
- 0003 S-Cam F5 Digital Camera
- 0011 Argus DC3500 Digital Camera
- 0014 Praktica DC 32
- 0019 Praktica DPix3000
- 0025 Praktica DC 60
- 1001 ScanHex SX-35d
-084e KB Gear
- 0001 KBGear JamCam
- 1002 Pablo Tablet
-084f Empeg
- 0001 Empeg-Car Mark I/II Player
-0850 Fast Point Technologies, Inc.
-0851 Macronix International Co., Ltd
- 1542 SiPix Blink
- 1543 Maxell WS30 Slim Digital Camera
- a168 MXIC
-0852 CSEM
-0853 Topre Corporation
- 0100 HHKB Professional
-0854 ActiveWire, Inc.
- 0100 I/O Board
- 0101 I/O Board, rev1
-0856 B&B Electronics
- ac01 uLinks USOTL4 RS422/485 Adapter
-0858 Hitachi Maxell, Ltd
- 3102 Bluetooth Device
- ffff Maxell module with BlueCore in DFU mode
-0859 Minolta Systems Laboratory, Inc.
-085a Xircom
- 0001 Portstation Dual Serial Port
- 0003 Portstation Paraller Port
- 0008 Ethernet
- 0009 Ethernet
- 000b Portstation Dual PS/2 Port
- 0021 1 port to Serial Converter
- 0022 Parallel Port
- 0023 2 port to Serial Converter
- 0024 Parallel Port
- 0027 1 port to Serial Converter
- 0028 PortGear to SCSI Converter
- 0032 PortStation SCSI Module
- 003c Bluetooth Adapter
- 0299 Colorvision, Inc. Monitor Spyder
- 8021 1 port to Serial
- 8023 2 port to Serial
- 8027 PGSDB9 Serial Port
-085c ColorVision, Inc.
- 0200 Monitor Spyder
-0862 Teletrol Systems, Inc.
-0863 Filanet Corp.
-0864 NetGear, Inc.
- 4100 MA101 802.11b Adapter
- 4102 MA101 802.11b Adapter
-0867 Data Translation, Inc.
- 9812 ECON Data acquisition unit
- 9816 DT9816 ECON data acquisition module
- 9836 DT9836 data acquisition card
-086a Emagic Soft- und Hardware GmbH
- 0001 Unitor8
- 0002 AMT8
- 0003 MT4
-086c DeTeWe - Deutsche Telephonwerke AG & Co.
- 1001 Eumex 504PC ISDN TA
- 1002 Eumex 504PC (FlashLoad)
- 1003 TA33 ISDN TA
- 1004 TA33 (FlashLoad)
- 1005 Eumex 604PC HomeNet
- 1006 Eumex 604PC HomeNet (FlashLoad)
- 1007 Eumex 704PC DSL
- 1008 Eumex 704PC DSL (FlashLoad)
- 1009 Eumex 724PC DSL
- 100a Eumex 724PC DSL (FlashLoad)
- 100b OpenCom 30
- 100c OpenCom 30 (FlashLoad)
- 100d BeeTel Home 100
- 100e BeeTel Home 100 (FlashLoad)
- 1011 USB2DECT
- 1012 USB2DECT (FlashLoad)
- 1013 Eumex 704PC LAN
- 1014 Eumex 704PC LAN (FlashLoad)
- 1021 OpenCom 40
- 1022 OpenCom 40 (FlashLoad)
- 1023 OpenCom 45
- 1024 OpenCom 45 (FlashLoad)
- 1025 Sinus 61 data
- 1029 dect BOX
- 102c Eumex 604PC HomeNet [FlashLoad]
- 1030 Eumex 704PC DSL [FlashLoad]
- 1032 OpenCom 40 [FlashLoad]
- 1033 OpenCom 30 plus
- 1034 OpenCom 30 plus (FlashLoad)
- 1055 Eumex 220 ISDN TA
- 2000 OpenCom 1000
-086e System TALKS, Inc.
- 1920 SGC-X2UL
-086f MEC IMEX, Inc.
-0870 Metricom
- 0001 Ricochet GS
-0871 SanDisk, Inc.
- 0001 SDDR-01 Compact Flash Reader
- 0002 SDDR-31 Compact Flash Reader
- 0005 SDDR-05 Compact Flash Reader
-0873 Xpeed, Inc.
-0874 A-Tec Subsystem, Inc.
-0879 Comtrol Corp.
-087c Adesso/Kbtek America, Inc.
-087d Jaton Corp.
- 5704 Ethernet
-087e Fujitsu Computer Products of America
-087f Virtual IP Group, Inc.
-0880 APT Technologies, Inc.
-0883 Recording Industry Association of America (RIAA)
-0885 Boca Research, Inc.
-0886 XAC Automation Corp.
- 0630 Intel PC Camera CS630
-0887 Hannstar Electronics Corp.
-088b MassWorks, Inc.
- 4944 MassWorks ID-75 TouchScreen
-0892 DioGraphy, Inc.
- 0101 Smartdio Reader/Writer
-089c United Technologies Research Cntr.
-089d Icron Technologies Corp.
-089e NST Co., Ltd
-089f Primex Aerospace Co.
-08a5 e9, Inc.
-08a8 Andrea Electronics
-08ae Macally (Mace Group, Inc.)
-08b4 Sorenson Vision, Inc.
-08b8 J. Gordon Electronic Design, Inc.
- 01f4 USBSIMM1
-08b9 RadioShack Corp. (Tandy)
-08bb Texas Instruments Japan
- 2702 Speakers
- 2900 PCM2900 Audio Codec
- 2904 PCM2904 Audio Codec
-08bd Citizen Watch Co., Ltd
- 1100 X1-USB Floppy
-08c3 Precise Biometrics
- 0001 100 SC
- 0002 100 A
- 0003 100 SC BioKeyboard
- 0006 100 A BioKeyboard
- 0100 100 MC ISP
- 0101 100 MC FingerPrint and SmartCard Reader
- 0300 100 AX
- 0400 100 SC
- 0401 150 MC
- 0402 200 MC FingerPrint and SmartCard Reader
- 0404 100 SC Upgrade
- 0405 150 MC Upgrade
- 0406 100 MC Upgrade
-08c4 Proxim, Inc.
- 02f2 Farallon Home Phoneline Adapter
-08c7 Key Nice Enterprise Co., Ltd
-08c8 2Wire, Inc.
-08c9 Nippon Telegraph and Telephone Corp.
-08ca Aiptek International, Inc.
- 0010 Tablet
- 0020 APT-6000U Tablet
- 0021 APT-2 Tablet
- 0022 Tablet
- 0023 Tablet
- 0024 Tablet
- 0100 Pen Drive
- 0102 DualCam
- 0103 Pocket DV Digital Camera
- 0104 Pocket DVII
- 0105 Mega DV(Disk)
- 0106 Pocket DV3100+
- 0107 Pocket DV 3100
- 0109 Nisis DV4 Digital Camera
- 010a Trust 738AV LCD PV Mass Storage
- 0111 PenCam VGA Plus
- 2008 Mini PenCam 2
- 2010 Pocket CAM 3 Mega (webcam)
- 2011 Pocket CAM 3 Mega (storage)
- 2018 Pencam SD 2
- 2024 Pocket DV3500
- 2042 DV 5100M Composite Device
- 2043 DV 5100M(Disk)
-08cd Jue Hsun Ind. Corp.
-08ce Long Well Electronics Corp.
-08cf Productivity Enhancement Products
-08d1 smartBridges, Inc.
- 0001 smartNIC Ethernet [catc]
- 0003 smartNIC 2 PnP Ethernet
-08d3 Virtual Ink
-08d4 Fujitsu Siemens Computers
- 0009 SCR SmartCard Reader
-08d9 Increment P Corp.
-08dd Billionton Systems, Inc.
- 0112 Wireless LAN Adapter
- 0113 Wireless LAN Adapter
- 0986 USB-100N Ethernet [pegasus]
- 0987 USBLP-100 HomePNA Ethernet [pegasus]
- 0988 USBEL-100 Ethernet [pegasus]
- 1986 10/100 LAN Adapter
- 2103 DVB-T TV-Tuner Card-R
- 8511 USBE-100 Ethernet [pegasus2]
- 90ff USB2AR Ethernet
-08de ???
- 7a01 802.11b Adapter
-08df Spyrus, Inc.
- 0001 Rosetta Token V1
- 0002 Rosetta Token V2
- 0003 Rosetta Token V3
- 0a00 Lynks Interface
-08e3 Olitec, Inc.
- 0002 USB-RS232 Bridge
- 0100 Interface ADSL
- 0101 Interface ADSL
- 0102 ADSL
- 0301 RNIS
-08e4 Pioneer Corp.
-08e5 Litronic
-08e6 Gemplus
- 0001 GemPC-Touch 430
- 0430 GemPC430 SmartCard Reader
- 0432 GemPC432 SmartCard Reader
- 0435 GemPC435 SmartCard Reader
- 0437 GemPC433 SL SmartCard Reader
- 1359 UA SECURE STORAGE TOKEN
- 2202 Gem e-Seal Pro Token
- 3437 GemPC Twin SmartCard Reader
- 3438 GemPC Key SmartCard Reader
- 3478 PinPad Smart Card Reader
- 4433 GemPC433-Swap
- 5501 GemProx-PU Contactless Smart Card Reader
- ace0 UA HYBRID TOKEN
-08e7 Pan-International Wire & Cable
-08e8 Integrated Memory Logic
-08e9 Extended Systems, Inc.
- 0100 XTNDAccess IrDA Dongle
-08ea Ericsson, Inc., Blue Ridge Labs
- 00c9 ADSL Modem HM120dp Loader
- 00ca ADSL WAN Modem HM120dp
- 00ce HM230d Virtual Bus for Helium
- abba USB Driver for Bluetooth Wireless Technology
- abbb Bluetooth Device in DFU State
-08ec M-Systems Flash Disk Pioneers
- 0001 TravelDrive 2C
- 0002 TravelDrive 2C
- 0005 TravelDrive 2C
- 0008 TravelDrive 2C
- 0010 DiskOnKey
- 0011 DiskOnKey
- 0012 TravelDrive 2C
- 0014 TravelDrive 2C
- 0015 Kingston DataTraveler ELITE
- 0016 Kingston DataTraveler U3
- 0020 TravelDrive
- 0021 TravelDrive
- 0022 TravelDrive
- 0023 TravelDrive
- 0024 TravelDrive
- 0025 TravelDrive
- 0026 TravelDrive
- 0027 TravelDrive
- 0028 TravelDrive
- 0029 TravelDrive
- 0030 TravelDrive
- 0822 TravelDrive 2C
- 0832 Hi-Speed Mass Storage Device
- 0998 Kingston Data Traveler2.0 Disk Driver
- 0999 Kingston Data Traveler2.0 Disk Driver
- 1000 TravelDrive 2C
- 2000 TravelDrive 2C
- 2038 TravelDrive
- 2039 TravelDrive
- 204a TravelDrive
- 204b TravelDrive
-08ee CCSI/Hesso
-08f0 Corex Technologies
-08f1 CTI Electronics Corp.
-08f5 SysTec Co., Ltd
-08f6 Logic 3 International, Ltd
-08f7 Vernier
- 0001 LabPro
- 0002 EasyTemp
-08f8 Keen Top International Enterprise Co., Ltd
-08f9 Wipro Technologies
-08fa Caere
-08fb Socket Communications
-08fc Sicon Cable Technology Co., Ltd
-08fd Digianswer A/S
- 0001 Bluetooth Device
-08ff AuthenTec, Inc.
- 1600 AES1600
- 1610 AES1600
- 2500 AES2501
- 2501 AES2501
- 2502 AES2501
- 2503 AES2501
- 2504 AES2501
- 2505 AES2501
- 2506 AES2501
- 2507 AES2501
- 2508 AES2501
- 2509 AES2501
- 250a AES2501
- 250b AES2501
- 250c AES2501
- 250d AES2501
- 250e AES2501
- 250f AES2501
- 2510 AES2510
- 2580 AES2501 Fingerprint Sensor
- 2588 AES2501
- 2589 AES2501
- 258a AES2501
- 258b AES2501
- 258c AES2501
- 258d AES2501
- 258e AES2501
- 258f AES2501
- 3400 AES3400 TruePrint Sensor
- 3401 AES3400 Sensor
- 3402 AES3400 Sensor
- 3403 AES3400 Sensor
- 3404 AES3400 TruePrint Sensor
- 3405 AES3400 TruePrint Sensor
- 3406 AES3400 TruePrint Sensor
- 3407 AES3400 TruePrint Sensor
- 4902 BioMV with TruePrint AES3500
- 4903 BioMV with TruePrint AES3400
- 5500 AES4000
- 5501 AES4000 TruePrint Sensor
- 5503 AES4000 TruePrint Sensor
- 5505 AES4000 TruePrint Sensor
- 5507 AES4000 TruePrint Sensor
- 55ff AES4000 TruePrint Sensor.
- 5700 AES3500 Fingerprint Reader
- 5701 AES3500 TruePrint Sensor
- 5702 AES3500 TruePrint Sensor
- 5703 AES3500 TruePrint Sensor
- 5704 AES3500-BZ TruePrint Sensor
- 5705 AES3500-BZ TruePrint Sensor
- 5706 AES3500-BZ TruePrint Sensor
- 5707 AES3500-BZ TruePrint Sensor
- 5710 AES3500 TruePrint Sensor
- 5711 AES3500 TruePrint Sensor
- 5712 AES3500 TruePrint Sensor
- 5713 AES3500 TruePrint Sensor
- 5714 AES3500-BZ TruePrint Sensor
- 5715 AES3500-BZ TruePrint Sensor
- 5716 AES3500-BZ TruePrint Sensor
- 5717 AES3500-BZ TruePrint Sensor
- 5730 AES3500 TruePrint Sensor
- 5731 AES3500 TruePrint Sensor
- 5732 AES3500 TruePrint Sensor
- 5733 AES3500 TruePrint Sensor
- 5734 AES3500-BZ TruePrint Sensor
- 5735 AES3500-BZ TruePrint Sensor
- 5736 AES3500-BZ TruePrint Sensor
- 5737 AES3500-BZ TruePrint Sensor
- afe3 FingerLoc Sensor Module (Anchor)
- afe4 FingerLoc Sensor Module (Anchor)
- afe5 FingerLoc Sensor Module (Anchor)
- afe6 FingerLoc Sensor Module (Anchor)
- fffd AES2510 Sensor (USB Emulator)
- ffff Sensor (Emulator)
-0900 Pinnacle Systems, Inc.
-0901 VST Technologies
- 0001 Hard Drive Adapter (TPP)
- 0002 SigmaDrive Adapter (TPP)
-0906 Faraday Technology Corp.
-0909 Audio-Technica Corp.
-090a Trumpion Microelectronics, Inc.
- 1001 T33520 USB Flash Card Controller
- 1100 Comotron C3310 MP3 player
- 1200 MP3 player
- 1540 Digitex Container Flash Disk
-090b Neurosmith
-090c Feiya Technology Corp.
- 1000 Memory Bar
- 1132 5-in-1 Card Reader
-090d Multiport Computer Vertriebs GmbH
-090e Shining Technology, Inc.
-090f Fujitsu Devices, Inc.
-0910 Alation Systems, Inc.
-0911 Philips Speech Processing
- 2512 SpeechMike Pro
-0912 Voquette, Inc.
-0915 GlobeSpan, Inc.
- 0001 DSL Modem
- 0002 ADSL ATM Modem
- 0005 LAN Modem
- 2000 802.11 Adapter
- 2002 802.11 Adapter
- 8000 ADSL LAN Modem
- 8005 DSL-302G Modem
- 8101 ADSL WAN Modem
- 8102 DSL-200 ADSL Modem
- 8103 DSL-200 ADSL Modem
- 8104 DSL-200 Modem
- 8400 DSL Modem
- 8401 DSL Modem
- 8402 DSL Modem
- 8500 DSL Modem
- 8501 DSL Modem
-0917 SmartDisk Corp.
- 0001 eFilm Reader-11 SM/CF
- 0002 eFilm Reader-11 SM
- 0003 eFilm Reader-11 CF
- 0200 FireFly
- 0201 FireLite
- 0202 STORAGE ADAPTER (FirePower)
- 0204 FlashTrax Storage
- 0205 STORAGE ADAPTER (CrossFire)
- 0206 FireFly 20G HDD
- 0207 FireLite
- 020f STORAGE ADAPTER (FireLite)
- da01 eFilm Reader-11 Test
- ffff eFilm Reader-11 (Class/PDR)
-0919 Tiger Electronics
- 0100 Fast Flicks Digital Camera
-091e Garmin International
- 0003 GPSmap (various models)
- 0004 Garmin iQue 3600
- 0200 Data Card Programmer (install)
- 1200 Data Card Programmer
-0920 Echelon Co.
- 7500 Network Interface
-0921 GoHubs, Inc.
- 1001 GoCOM232 Serial
-0922 Dymo-CoStar Corp.
- 0007 LabelWriter 330
- 0009 LabelWriter 310
-0923 IC Media Corp.
- 010f SIIG MobileCam
-0924 Xerox
- 23dd DocuPrint M760 (X760_USB)
- 3d5b Phaser 6115MFP TWAIN Scanner
- 420f WorkCentre PE220 Series
- 421f M20 Scanner
- 423b Printing Support
- ffef WorkCenter M15
- fffb DocuPrint M750 (X750_USB)
-0925 Lakeview Research
- 8101 Phidgets, Inc., 1-Motor PhidgetServo v2.0
- 8104 Phidgets, Inc., 4-Motor PhidgetServo v2.0
- 8800 WiseGroup Ltd, MP-8800 Quad Joypad
- 8866 WiseGroup Ltd, MP-8866 Dual Joypad
-0927 Summus, Ltd
-0928 Oxford Semiconductor, Ltd
-0929 American Biometric Co.
-092a Toshiba Information & Industrial Sys. And Services
-092b Sena Technologies, Inc.
-092f Northern Embedded Science/CAVNEX
- 0004 JTAG-4
- 0005 JTAG-5
-0930 Toshiba Corp.
- 0009 Gigabeat F/X (HDD audio player)
- 000c Gigabeat F (mtp)
- 0010 Gigabeat S (mtp)
- 0301 PCX1100U Cable Modem (WDM)
- 0302 PCX2000 Cable Modem (WDM)
- 0305 Cable Modem PCX3000
- 0307 Cable Modem PCX2500
- 0308 PCX2200 Cable Modem (WDM)
- 0309 PCX5000 Cable Modem (WDM)
- 030b Cable Modem PCX2600
- 0501 Bluetooth Controller
- 0502 Integrated Bluetooth
- 0503 Bluetooth Controller
- 0505 Integrated Bluetooth
- 0506 Integrated Bluetooth
- 0507 Bluetooth Adapter
- 0508 Integrated Bluetooth HCI
- 0509 BT EDR Dongle
- 0706 PocketPC e740
- 0707 Pocket PC e330 Series
- 0708 Pocket PC e350 Series
- 0709 Pocket PC e750 Series
- 070a Pocket PC e400 Series
- 070b Pocket PC e800 Series
- 1300 Wireless Broadband (CDMA EV-DO) SM-Bus Minicard Status Port
- 1301 Wireless Broadband (CDMA EV-DO) Minicard Status Port
- 1302 Wireless Broadband (3G HSDPA) SM-Bus Minicard Status Port
- 1303 Wireless Broadband (3G HSDPA) Minicard Status Port
- 1308 Broadband (3G HSDPA) SM-Bus Minicard Diagnostics Port
- 642f TravelDrive
- 6506 TravelDrive 2C
- 6507 TravelDrive 2C
- 6508 TravelDrive 2C
- 6509 TravelDrive 2C
- 6510 TravelDrive 2C
- 6517 TravelDrive 2C
- 6518 TravelDrive 2C
- 6519 Kingston DataTraveler 2.0 USB Stick
- 651a TravelDrive 2C
- 651b TravelDrive 2C
- 651c TravelDrive 2C
- 651d TravelDrive 2C
- 651e TravelDrive 2C
- 651f TravelDrive 2C
- 6520 TravelDrive 2C
- 6521 TravelDrive 2C
- 6522 TravelDrive 2C
- 6523 TravelDrive
- 6524 TravelDrive
- 6525 TravelDrive
- 6526 TravelDrive
- 6527 TravelDrive
- 6528 TravelDrive
- 6529 TravelDrive
- 652a TravelDrive
- 652b TravelDrive
- 652c TravelDrive
- 652d TravelDrive
- 652f TravelDrive
- 6530 TravelDrive
- 6531 TravelDrive
- 6532 256M USB Stick
- 6533 512M USB Stick
- 6534 TravelDrive
- 653c Kingston DataTraveler 2.0 USB Stick (512M)
- 653d Kingston DataTraveler 2.0 USB Stick (1GB)
- 653e USB Flash Memory
- 6540 TransMemory USB Flash Memory
-0931 Harmonic Data Systems, Ltd
-0932 Crescentec Corp.
- 0300 VideoAdvantage
- 0302 Syntek DC-112X
- 0320 VideoAdvantage
- 1100 Video Enhamcement Device
- 1112 Veo Web Camera
- a311 Video Enhancement Device
-0933 Quantum Corp.
-0934 Netcom Systems
-0939 Lumberg, Inc.
-093a Pixart Imaging, Inc.
- 0007 CMOS 100K-R Rev. 1.90
- 010e Digital camera, CD302N/Elta Medi@ digi-cam/HE-501A
- 010f Argus DC-1610/DC-1620/Emprex PCD3600/Philips P44417B keychain camera/Precision Mini,Model HA513A/Vivitar Vivicam 55
- 2460 Q-TEC WEBCAM 100
- 2468 Cammaestro 2.5DU/X-EYE/Orite SC-120/ICGear TravelCam/Easy Snap Snake Eye WebCam
- 2470 SoC PC-Camera
- 2471 SoC PC-Camera
- 2500 USB Optical Mouse
- 2600 Typhoon Easycam USB 330K (newer)/Typhoon Easycam USB 2.0 VGA 1.3M/Sansun SN-508
- 2601 SPC 610NC Laptop Camera
-093b Plextor Corp.
- 0010 Storage Adapter
- 0011 PlexWriter 40/12/40U
- 0042 PX-712UF DVD RW
- a002 ConvertX M402U XLOADER
- a003 ConvertX AV100U A/V Capture Audio
- a004 ConvertX TV402U XLOADER
- a005 KWorld EMP Audio Device
- a102 ConvertX M402U A/V Capture
- a104 ConvertX PX-TV402U/NA
-093c Intrepid Control Systems, Inc.
- 0601 ValueCAN
- 0701 NeoVI Blue vehicle bus interface
-093d InnoSync, Inc.
-093e J.S.T. Mfg. Co., Ltd
-093f Olympia Telecom Vertriebs GmbH
-0940 Japan Storage Battery Co., Ltd
-0941 Photobit Corp.
-0942 i2Go.com, LLC
-0943 HCL Technologies India Private, Ltd
-0944 KORG, Inc.
-0945 Pasco Scientific
-0948 Kronauer music in digital
- 0301 USB Pro (24/48)
- 0302 USB Pro (24/96 playback)
- 0303 USB Pro (24/96 record)
- 0304 USB Pro (16/48)
- 1105 USB One
-094b Linkup Systems Corp.
-094d Cable Television Laboratories
-094f Yano
- 0101 U640MO-03
- 05fc METALWEAR-HDD
-0951 Kingston Technology
- 0008 Ethernet
- 000a KNU101TX 100baseTX Ethernet
- 1600 Data Traveler II Pen Drive
- 1601 Data Traveler II+ Pen Drive
- 1602 Data Traveler Mini
- 1603 Data Traveler 1GB/2GB Pen Drive
-0954 RPM Systems Corp.
-0955 NVidia Corp.
-0956 BSquare Corp.
-0957 Agilent Technologies, Inc.
- 0200 E-Video DC-350 Camera
- 0202 E-Video DC-350 Camera
-0958 CompuLink Research, Inc.
-0959 Cologne Chip AG
- 2bd0 Intelligent ISDN (Ver. 3.60.04)
-095a Portsmith
- 3003 Express Ethernet
-095b Medialogic Corp.
-095c K-Tec Electronics
-095d Polycom, Inc.
- 0001 Polycom ViaVideo
-0967 Acer (??)
- 0204 WarpLink 802.11b Adapter
-0968 Catalyst Enterprises, Inc.
-096e Feitian Technologies, Inc.
- 0802 ePass2000 (G&D STARCOS SPK 2.4)
-0971 Gretag-Macbeth AG
-0973 Schlumberger
- 0001 e-gate Smart Card
-0974 Datagraphix, a business unit of Anacomp
-0975 OL'E Communications, Inc.
-0976 Adirondack Wire & Cable
-0977 Lightsurf Technologies
-0978 Beckhoff GmbH
-0979 Jeilin Technology Corp., Ltd
- 0224 JL2005A Toy Camera
- 0226 JL2005A Toy Camera
-097a Minds At Work LLC
- 0001 Digital Wallet
-097b Knudsen Engineering, Ltd
-097c Marunix Co., Ltd
-097d Rosun Technologies, Inc.
-097f Barun Electronics Co., Ltd
-0981 Oak Technology, Ltd
-0984 Apricorn
- 0200 Hard Drive Storage (TPP)
-0985 cab Produkttechnik GmbH & Co KG
- 00a3 A3/200 or A3/300 Label Printer
-0986 Matsushita Electric Works, Ltd.
-098c Vitana Corp.
-098d INDesign
-098e Integrated Intellectual Property, Inc.
-098f Kenwood TMI Corp.
-0993 Gemstar eBook Group, Ltd
- 0001 REB1100 eBook Reader
- 0002 eBook
-0996 Integrated Telecom Express, Inc.
-099a Zippy Technology Corp.
- 610c EL-610 Super Mini Electron luminescent Keyboard
-09a3 PairGain Technologies
-09a4 Contech Research, Inc.
-09a5 VCON Telecommunications
-09a6 Poinchips
- 8001 Mass Storage Device
-09a7 Data Transmission Network Corp.
-09a8 Lin Shiung Enterprise Co., Ltd
-09a9 Smart Card Technologies Co., Ltd
-09aa Intersil Corp.
- 1000 Prism GT 802.11b/g Adapter
- 3642 Prism 2.x 802.11b Adapter
-09ab Japan Cash Machine Co., Ltd.
-09ae Tripp Lite
-09b2 Franklin Electronic Publishers, Inc.
- 0001 eBookman Palm Computer
-09b3 Altius Solutions, Inc.
-09b4 MDS Telephone Systems
-09b5 Celltrix Technology Co., Ltd
-09bc Grundig
- 0002 MPaxx MP150 MP3 Player
-09be MySmart.Com
- 0001 MySmartPad
-09bf Auerswald GmbH & Co. KG
- 00c0 COMpact 2104 ISDN PBX
- 00db COMpact 4410/2206 ISDN ISDN
- 00f1 COMfort System Telephones
-09c1 Arris Interactive LLC
- 1337 TOUCHSTONE DEVICE
-09c2 Nisca Corp.
-09c3 ActivCard, Inc.
- 0007 Reader V2
- 0008 SmartCard Reader
-09c4 ACTiSYS Corp.
- 0011 ACT-IR2000U IrDA Dongle
-09c5 Memory Corp.
-09cc Workbit Corp.
- 0404 BAFO USB-ATA/ATAPI Bridge Controller
-09cd Psion Dacom Home Networks, Ltd
-09ce City Electronics, Ltd
-09cf Electronics Testing Center, Taiwan
-09d1 NeoMagic, Inc.
-09d2 Vreelin Engineering, Inc.
-09d3 Com One
- 0001 ISDN TA
-09d7 Novatel Wireless
- 0100 NovAtel FlexPack GPS receiver
-09d9 KRF Tech, Ltd
-09da A4 Tech Co., Ltd
- 0006 Optical Mouse WOP-35 / Trust 450L Optical Mouse
- 000a Port Mouse
- 0018 Trust Human Interface Device
- 001a Wireless Mouse & RXM-15 Receiver
- 002a Wireless Optical Mouse NB-30
-09db Measurement Computing Corp.
- 0075 MiniLab 1008
- 0076 PMD-1024
- 007a PMD-1208LS
- 0081 USB-1616FS
- 0088 USB-1616FS internal hub
-09dc Aimex Corp.
-09dd Fellowes, Inc.
-09df Addonics Technologies Corp.
-09e1 Intellon Corp.
- 5121 MicroLink dLAN
-09e5 Jo-Dan International, Inc.
-09e6 Silutia, Inc.
-09e7 Real 3D, Inc.
-09e8 AKAI Professional M.I. Corp.
-09e9 Chen-Source, Inc.
-09eb IM Networks, Inc.
- 4331 iRhythm Tuner Remote
-09ef Xitel
- 0101 MD-Port DG2 MiniDisc Interface
-09f5 AresCom
- 0168 Network Adapter
- 0188 LAN Adapter
- 0850 Adapter
-09f6 RocketChips, Inc.
-09f7 Edu-Science (H.K.), Ltd
-09f8 SoftConnex Technologies, Inc.
-09f9 Bay Associates
-09fa Mtek Vision
-09fb Altera
-09ff Gain Technology Corp.
-0a00 Liquid Audio
-0a01 ViA, Inc.
-0a07 Ontrak Control Systems Inc.
- 0064 ADU100 Data Acquisition Interface
- 00c8 ADU200 Relay I/O Interface
- 00d0 ADU208 Data Acquisition Interface
-0a0b Cybex Computer Products Co.
-0a11 Xentec, Inc.
-0a12 Cambridge Silicon Radio, Ltd
- 0001 Bluetooth Dongle (HCI mode)
- 0002 Frontline Test Equipment Bluetooth Device
- 0003 Nanosira
- 0004 Nanosira WHQL Reference Radio
- 0005 Nanosira-Multimedia
- 0006 Nanosira-Multimedia WHQL Reference Radio
- 0007 Nanosira3-ROM
- 0008 Nanosira3-ROM
- 0009 Nanosira4-EDR WHQL Reference Radio
- 000a Nanosira4-EDR-ROM
- 000b Nanosira5-ROM
- 0043 Bluetooth Device
- 0100 Casira with BlueCore2-External Module
- 0101 Casira with BlueCore2-Flash Module
- 0102 Casira with BlueCore3-Multimedia Module
- 0103 Casira with BlueCore3-Flash Module
- 0104 Casira with BlueCore4-External Module
- 0105 Casira with BlueCore4-Multimedia Module
- 1000 Bluetooth Dongle (HID proxy mode)
- 1010 Bluetooth Device
- 1011 Bluetooth Device
- 1012 Bluetooth Device
- ffff USB Bluetooth Device in DFU State
-0a13 Telebyte, Inc.
-0a14 Spacelabs Medical, Inc.
-0a15 Scalar Corp.
-0a16 Trek Technology (S) PTE, Ltd
- 1111 ThumbDrive
- 8888 IBM USB Memory Key
- 9988 Trek2000 TD-G2
-0a17 Pentax Corp.
- 0004 Pentax Optio 330
- 0006 Pentax Optio S
- 0007 Pentax Optio 550
- 0009 Pentax Optio 33WR
- 000a Pentax Optio 555
- 000c Pentax Optio 43WR (mass storage mode)
- 000d Pentax Optio 43WR
- 0015 Pentax Optio S40/S5i
- 003b Pentax Optio 50 (mass storage mode)
- 003d Pentax Optio S55
- 0043 Pentax *ist DL
- 0047 Pentax Optio S60
- 0052 Optio 60 Digital Camera
- 006e Pentax K10D
- 0070 Pentax K100D
- 1001 EI2000 Camera powered by Digita!
-0a18 Heidelberger Druckmaschinen AG
-0a19 Hua Geng Technologies, Inc.
-0a21 Medtronic Physio Control Corp.
-0a22 Century Semiconductor USA, Inc.
-0a2c AK-Modul-Bus Computer GmbH
- 0008 GPIO Ports
-0a34 TG3 Electronics, Inc.
- 0110 Deck 82-key backlit keyboard
-0a39 Gilat Satellite Networks, Ltd
-0a3a PentaMedia Co., Ltd
- 0163 KN-W510U 1.0 Wireless LAN Adapter
-0a3c NTT DoCoMo, Inc.
-0a3d Varo Vision
-0a3f Swissonic AG
-0a43 Boca Systems, Inc.
-0a46 Davicom Semiconductor, Inc.
- 0268 ST268
- 9601 DM9601 To Fast Ethernet Adapter
-0a47 Hirose Electric
-0a48 I/O Interconnect
- 3233 Multimedia Card Reader
- 3239 Multimedia Card Reader
- 3258 Dane Elec zMate SD Reader
- 3259 Dane Elec zMate CF Reader
- 5000 MediaGear xD-SM
- 500a Mass Storage Device
- 500f Mass Storage Device
- 5010 Mass Storage Device
- 5011 Mass Storage Device
- 5014 Mass Storage Device
- 5020 Mass Storage Device
- 5021 Mass Storage Device
- 5022 Mass Storage Device
- 5023 Mass Storage Device
- 5024 Mass Storage Device
- 5025 Mass Storage Device
-0a4b Fujitsu Media Devices, Ltd
-0a4c Computex Co., Ltd
-0a4d Evolution Electronics, Ltd
- 0064 MK-225 Driver
- 0065 MK-225C Driver
- 0066 MK-225C Driver
- 0067 MK-425C Driver
- 0078 MK-37 Driver
- 0079 MK-37C Driver
- 007a MK-37C Driver
- 008c TerraTec MIDI MASTER
- 008d MK-249C Driver
- 008e MK-249C MIDI Keyboard
- 008f MK-449C Driver
- 0090 Keystation 49e Driver
- 0091 Keystation 61es Driver
- 00a0 MK-361 Driver
- 00a1 MK-361C Driver
- 00a2 MK-361C Driver
- 00a3 MK-461C MIDI Keyboard
- 00b5 Keystation Pro 88 Driver
- 00d2 E-Keys Driver
- 00f0 UC-16 Driver
- 00f1 X-Session Driver
- 00f5 UC-33e MIDI Controller
-0a4e Steinberg Soft-und Hardware GmbH
-0a4f Litton Systems, Inc.
-0a50 Mimaki Engineering Co., Ltd
-0a51 Sony Electronics, Inc.
-0a52 Jebsee Electronics Co., Ltd
-0a53 Portable Peripheral Co., Ltd
- 1000 Scanner
- 2000 Q-Scan A6 Scanner
- 2001 Q-Scan A6 Scanner
- 2013 Media Drive A6 Scanner
- 2014 Media Drive A6 Scanner
- 2015 BizCardReader 600C
- 2016 BizCardReader 600C
- 202a Scanshell-CSSN
- 3000 Q-Scan A8 Scanner
- 3002 Q-Scan A8 Reader
- 3015 BizCardReader 300G
- 5001 BizCardReader 900C
-0a5a Electronics For Imaging, Inc.
-0a5b EAsics NV
-0a5c Broadcom Corp.
- 0201 iLine10(tm) Network Adapter
- 2000 Bluetooth Device
- 2009 Bluetooth Controller
- 200a Bluetooth dongle
- 200f Bluetooth Controller
- 201d Bluetooth Device
- 201e IBM Integrated Bluetooth IV
- 2020 Bluetooth Dongle
- 2033 BCM2033 Bluetooth
- 2035 BCM2035 Bluetooth
- 2038 Blutonium Device
- 2039 Bluetooth Device
- 2045 Bluetooth Controller
- 2046 Bluetooth Device
- 2047 Bluetooth Device
- 205e Bluetooth Device
- 2100 Bluetooth 2.0+eDR dongle
- 2101 A-Link BlueUsbA2 Bluetooth
- 2102 ANYCOM Blue USB-200/250
- 2110 Bluetooth Controller
- 2111 ANYCOM Blue USB-UHE 200/250
- 2120 2045 Bluetooth 2.0 USB-UHE Device with trace filter
- 2121 BCM2210 Bluetooth
- 2122 Bluetooth 2.0+EDR dongle
- 2130 2045 Bluetooth 2.0 USB-UHE Device with trace filter
- 2131 2045 Bluetooth 2.0 Device with trace filter
- 6300 Pirelli Remote NDIS Device
-0a5d Diatrend Corp.
-0a5f Zebra
- 0009 LP2844 Printer
- 930a Printer
-0a62 MPMan
- 0010 MPMan MP-F40 MP3 Player
-0a66 ClearCube Technology
-0a67 Medeli Electronics Co., Ltd
-0a68 Comaide Corp.
-0a69 Chroma ate, Inc.
-0a6b Green House Co., Ltd
- 0001 Compact Flash R/W with MP3 player
-0a6c Integrated Circuit Systems, Inc.
-0a6d UPS Manufacturing
-0a6e Benwin
-0a6f Core Technology, Inc.
- 0400 Xanboo
-0a70 International Game Technology
-0a72 Sanwa Denshi
-0a7d NSTL, Inc.
-0a7e Octagon Systems Corp.
-0a80 Rexon Technology Corp., Ltd
-0a81 Chesen Electronics Corp.
- 0101 Keyboard
- 0103 Keyboard
- 0203 Mouse
- 0205 PS/2 Keyboard+Mouse Adapter
-0a82 Syscan
- 4600 TravelScan 460/464
-0a83 NextComm, Inc.
-0a84 Maui Innovative Peripherals
-0a85 Idexx Labs
-0a86 NITGen Co., Ltd
-0a8d Picturetel
-0a8e Japan Aviation Electronics Industry, Ltd
- 2011 Filter Driver For JAE XMC R/W
-0a90 Candy Technology Co., Ltd
-0a91 Globlink Technology, Inc.
- 3801 Targus PAKP003 Mouse
-0a92 EGO SYStems, Inc.
- 0011 SYS WaveTerminal U2A
- 0021 GIGAPort
- 0031 GIGAPortAG
- 0053 AudioTrak Optoplay
- 0061 Waveterminal U24
- 0071 MAYA EX7
- 0091 Maya 44
- 00b1 MAYA EX5
- 1000 MIDI Mate
- 1010 RoMI/O
- 1020 M4U
- 1030 M8U
- 1090 KeyControl49
- 10a0 KeyControl25
-0a93 C Technologies AB
- 0002 C-Pen 10
- 0005 MyPen Light
- 000d Input Pen
- 0010 C-Pen 20
-0a94 Intersense
-0aa3 Lava Computer Mfg., Inc.
-0aa4 Develco Elektronik
-0aa5 First International Digital
- 0002 irock! 500 Series
- 0801 MP3 Player
-0aa6 Perception Digital, Ltd
- 0101 Hercules Jukebox
- 1501 Store 'n' Go HD Drive
-0aa7 Wincor Nixdorf International GmbH
- 0100 POS Keyboard, TA58P-USB
- 0101 POS Keyboard, TA85P-USB
- 0102 POS Keyboard, TA59-USB
- 0103 POS Keyboard, TA60-USB
- 0104 SNIkey Keyboard, SNIKey-KB-USB
- 0200 Operator Display, BA63-USB
- 0201 Operator Display, BA66-USB
- 0202 Operator Display & Scanner, XiCheck-BA63
- 0203 Operator Display & Scanner, XiCheck-BA66
- 0204 Graphics Operator Display, BA63GV
- 0300 POS Printer (printer class mode), TH210
- 0301 POS Printer (native mode), TH210
- 0302 POS Printer (printer class mode), TH220
- 0303 POS Printer (native mode), TH220
- 0304 POS Printer, TH230
- 0305 Lottery Printer, XiPrintPlus
- 0306 POS Printer (printer class mode), TH320
- 0307 POS Printer (native mode), TH320
- 0308 POS Printer (printer class mode), TH420
- 0309 POS Printer (native mode), TH420
- 030a POS Printer, TH200B
- 0400 Lottery Scanner, Xiscan S
- 0401 Lottery Scanner, Xiscan 3
- 4304 Banking Printer TP07
-0aa8 TriGem Computer, Inc.
- 0060 TG 11Mbps WLAN Mini Adapter
- 1001 DreamComboM4100
- 3002 InkJet Color Printer
- 8001 TG_iMON
- 8002 TG_KLOSS
- a001 TG_X2
- a002 TGVFD_KLOSS
- ffda iMON_VFD
-0aa9 Baromtec Co.
- f01b Medion MD 6242 MP3 Player
-0aaa Japan CBM Corp.
-0aab Vision Shape Europe SA
-0aac iCompression, Inc.
-0aad Rohde & Schwarz GmbH & Co. KG
-0aae NEC infrontia Corp. (Nitsuko)
-0aaf Digitalway Co., Ltd
-0ab0 Arrow Strong Electronics Co., Ltd
-0aba Ellisys
- 8001 USB Tracker 110 Protocol Analyzer
-0abe Stereo-Link
- 0101 SL1200 DAC
-0ac3 Sanyo Semiconductor Company Micro
-0ac4 Leco Corp.
-0ac5 I & C Corp.
-0ac6 Singing Electrons, Inc.
-0ac7 Panwest Corp.
-0ac8 Z-Star Microelectronics Corp.
- 0301 Web Camera
- 0302 ZC0302 WebCam
- 0321 USB 2.0 Webcam
- 0323 Luxya WC-1200 USB 2.0 Webcam
- 301b ZC0301 WebCam
- 303b ZC0303 WebCam
- 305b ZC0305 WebCam
- 307b USB 1.1 WebCam
- c002 Visual Communication Camera VGP-VCC1
-0ac9 Micro Solutions, Inc.
- 0000 Backpack CD-ReWriter
- 0001 BACKPACK 2 Cable
- 0010 BACKPACK
- 0011 Backpack 40GB Hard Drive
- 0110 BACKPACK
- 0111 BackPack
- 1234 BACKPACK
-0aca OPEN Networks Ltd
- 1060 OPEN NT1 Plus II
-0acc Koga Electronics Co.
-0acd ID Tech
- 0401 ID TECH Spectrum III Hybrid Smartcard Reader
-0ace ZyDAS
- 1201 802.11b WiFi
- 1211 802.11b/g USB2 WiFi
- 1215 WLA-54L WiFi
- 1608 ONMI FAXMODEM 56K UNO (ZyXEL)
-0acf Intoto, Inc.
-0ad0 Intellix Corp.
-0ad1 Remotec Technology, Ltd
-0ad2 Service & Quality Technology Co., Ltd
-0ae3 Allion Computer, Inc.
-0ae4 Taito Corp.
-0ae7 Neodym Systems, Inc.
-0ae8 System Support Co., Ltd
-0ae9 North Shore Circuit Design L.L.P.
-0aea SciEssence, LLC
-0aeb TTP Communications, Ltd
-0aec Neodio Technologies Corp.
- 2101 SmartMedia Card Reader
- 2102 CompactFlash Card Reader
- 2103 MMC/SD Card Reader
- 2104 MemoryStick Card Reader
- 2201 SmartMedia+CompactFlash Card Reader
- 2202 SmartMedia+MMC/SD Card Reader
- 2203 SmartMedia+MemoryStick Card Reader
- 2204 CompactFlash+MMC/SD Card Reader
- 2205 CompactFlash+MemoryStick Card Reader
- 2206 MMC/SD+MemoryStick Card Reader
- 2301 SmartMedia+CompactFlash+MMC/SD Card Reader
- 2302 SmartMedia+CompactFlash+MemoryStick Card Reader
- 2303 SmartMedia+MMC/SD+MemoryStick Card Reader
- 2304 CompactFlash+MMC/SD+MemoryStick Card Reader
- 3016 MMC/SD+Memory Stick Card Reader
- 3050 ND3050 8-in-1 Card Reader
- 3060 1.1 FS Card Reader
- 3101 MMC/SD Card Reader
- 3102 MemoryStick Card Reader
- 3201 MMC/SD+MemoryStick Card Reader
- 3216 HS Card Reader
- 3260 7-in-1 Card Reader
- 5010 ND5010 Card Reader
-0af0 Option
- 5000 UMTS Card
- 6000 GlobeTrotter 3G datacard
- 6300 GT 3G Quad UMTS/GPRS Card
- 6600 GlobeTrotter 3G+ datacard
-0af6 Silver I Co., Ltd
-0af7 B2C2, Inc.
- 0101 Digital TV USB Receiver (DVB-S/T/C / ATSC)
-0af9 Hama, Inc.
- 0010 USB SightCam 100
- 0011 Micro Innovations IC50C WebCam
-0afc Zaptronix Ltd
-0afd Tateno Dennou, Inc.
-0afe Cummins Engine Co.
-0aff Jump Zone Network Products, Inc.
-0b00 INGENICO
-0b05 ASUSTek Computer, Inc.
- 1101 Mass Storage (UISDMC4S)
- 1706 WL-167G 802.11g Adapter [ralink]
- 1707 WL-167g Wireless Adapter
- 1708 Mass Storage Device
- 170b Mass Storage Device
- 170c WL-159g
- 170d 802.11b/g Wireless Network Adapter
- 1712 BT-183 Bluetooth 2.0+EDR adapter
- 1715 2045 Bluetooth 2.0 Device with trace filter
- 1716 Bluetooth Device
- 171b A9T wireless
- 171c 802.11b/g Wireless Network Adapter
- 1723 WL-167G v2 802.11g Adapter [ralink]
- 1724 RT2573
- 1726 Laptop OLED Display
- 172a ASUS 802.11n Network Adapter
- 172b 802.11n Network Adapter
- 1731 ASUS 802.11n Network Adapter
- 1732 802.11n Network Adapter
- 173c BT-183 Bluetooth 2.0
- 1742 802.11n Network Adapter
- 6101 Cable Modem
- 620a Remote NDIS Device
-0b0c Todos Data System AB
- 0009 Todos Argos Mini II Smart Card Reader
-0b0e GN Netcom
-0b0f AVID Technology
-0b10 Pcally
-0b11 I Tech Solutions Co., Ltd
-0b1e Electronic Warfare Assoc., Inc. (EWA)
-0b1f Insyde Software Corp.
-0b20 TransDimension, Inc.
-0b21 Yokogawa Electric Corp.
-0b22 Japan System Development Co., Ltd
-0b23 Pan-Asia Electronics Co., Ltd
-0b24 Link Evolution Corp.
-0b27 Ritek Corp.
-0b28 Kenwood Corp.
-0b2c Village Center, Inc.
-0b30 PNY Technologies, Inc.
- 0006 SM Media-Shuttle Card Reader
-0b33 Contour Design, Inc.
- 0020 ShuttleXpress
-0b37 Hitachi ULSI Systems Co., Ltd
-0b39 Omnidirectional Control Technology, Inc.
- 0109 USB TO Ethernet
- 0421 Serial
- 0801 USB-Parallel Bridge
- 0901 OCT To Fast Ethernet Converter
- 0c03 LAN DOCK Serial Converter
-0b3a IPaxess
-0b3b Tekram Technology Co., Ltd
- 0163 TL-WN320G 1.0 WLAN Adapter
- 1601 Allnet 0193 802.11b Adapter
- 1602 ZyXEL ZyAIR B200 802.11b Adapter
- 1612 AIR.Mate 2@net 802.11b Adapter
- 1613 802.11b Wireless LAN Adapter
- 1620 Allnet USB 2.0 Wireless Network Adapter
- 1630 QuickWLAN
- 5630 ZD1211
- 6630 ZD1211
-0b3c Olivetti Techcenter
- a010 Simple_Way Printer/Scanner/Copier
-0b3e Kikusui Electronics Corp.
-0b41 Hal Corp.
- 0011 Crossam2+USB IR commander
-0b43 Play.com, Inc.
- 0003 PS2 Controller Converter
-0b47 Sportbug.com, Inc.
-0b48 TechnoTrend AG
- 1003 Technotrend/Hauppauge USB-Nova
- 1004 TT-PCline
- 1005 Technotrend/Hauppauge USB-Nova
- 1006 Technotrend/Hauppauge DEC3000-s
- 1007 TT-micro plus Device
- 1008 Technotrend/Hauppauge DEC2000-t
- 1009 Technotrend/Hauppauge DEC2540-t
-0b49 ASCII Corp.
- 064f Trance Vibrator
-0b4b Pine Corp. Ltd.
- 0100 D'music MP3 Player
-0b4d Graphtec America, Inc.
- 110a Graphtec CC200-20
-0b4e Musical Electronics, Ltd
- 6500 MP3 Player
- 8028 MP3 Player
- 8920 MP3 Player
-0b50 Dumpries Co., Ltd
-0b51 Comfort Keyboard Co.
- 0020 Comfort Keyboard
-0b52 Colorado MicroDisplay, Inc.
-0b54 Sinbon Electronics Co., Ltd
-0b56 TYI Systems, Ltd
-0b57 Beijing HanwangTechnology Co., Ltd
-0b59 Lake Communications, Ltd
-0b5a Corel Corp.
-0b5f Green Electronics Co., Ltd
-0b60 Nsine, Ltd
-0b61 NEC Viewtechnology, Ltd
-0b62 Orange Micro, Inc.
- 000b Bluetooth Device
- 0059 iBOT2 WebCam
-0b63 ADLink Technology, Inc.
-0b64 Wonderful Wire Cable Co., Ltd
-0b65 Expert Magnetics Corp.
-0b69 CacheVision
-0b6a Maxim Integrated Products
-0b6f Nagano Japan Radio Co., Ltd
-0b70 PortalPlayer, Inc.
- 00ba iRiver H10 20GB
-0b71 SHIN-EI Sangyo Co., Ltd
-0b72 Embedded Wireless Technology Co., Ltd
-0b73 Computone Corp.
-0b75 Roland DG Corp.
-0b79 Sunrise Telecom, Inc.
-0b7a Zeevo, Inc.
- 07d0 Bluetooth Dongle
-0b7b Taiko Denki Co., Ltd
-0b7c ITRAN Communications, Ltd
-0b7d Astrodesign, Inc.
-0b84 Rextron Technology, Inc.
-0b85 Elkat Electronics, Sdn., Bhd.
-0b86 Exputer Systems, Inc.
- 5100 XMC5100 Zippy Drive
- 5110 XMC5110 Flash Drive
- 5200 XMC5200 Zippy Drive
- 5201 XMC5200 Zippy Drive
- 5202 XMC5200 Zippy Drive
- 5280 XMC5280 Storage Drive
- fff0 ISP5200 Debugger
-0b87 Plus-One I & T, Inc.
-0b88 Sigma Koki Co., Ltd, Technology Center
-0b89 Advanced Digital Broadcast, Ltd
-0b95 ASIX Electronics Corp.
- 1720 10/100 Ethernet
- 1780 AX88178
- 7720 AX88772
-0b96 Sewon Telecom
-0b97 O2 Micro, Inc.
- 7732 Smart Card Reader
- 7761 Oz776 1.1 Hub
- 7762 Oz776 SmartCard Reader
- 7772 OZ776 CCID Smartcard Reader
-0b98 Playmates Toys, Inc.
-0b99 Audio International, Inc.
-0b9b Dipl.-Ing. Stefan Kunde
- 4012 Reflex RC-controller Interface
-0b9d Softprotec Co.
-0b9f Chippo Technologies
-0baf U.S. Robotics
- 00e5 USR6000
- 00eb USR1120 802.11b Adapter
- 00ec 56K Faxmodem
- 00f1 SureConnect ADSL ATM Adapter
- 00f2 SureConnect ADSL Loader
- 00f5 SureConnect ADSL ATM Adapter
- 00f6 SureConnect ADSL Loader
- 00f7 SureConnect ADSL ATM Adapter
- 00f8 SureConnect ADSL Loader
- 00f9 SureConnect ADSL ATM Adapter
- 00fa SureConnect ADSL Loader
- 00fb SureConnect ADSL Ethernet/USB Router
- 0118 U5 802.11g Adapter
- 011b Wireless MAXg Adapter
- 0121 USR5423 WLAN
- 6112 FaxModem Model 5633
-0bb0 Concord Camera Corp.
- 0100 Sound Vision Stream
- 5007 3340z/Rollei DC3100
-0bb1 Infinilink Corp.
-0bb2 Ambit Microsystems Corp.
- 0302 WLAN
- 6098 USB Cable Modem
-0bb3 Ofuji Technology
-0bb4 High Tech Computer Corp.
- 00ce mmO2 XDA GSM/GPRS Pocket PC
- 00cf SPV C500 Smart Phone
- 0a01 PocketPC Sync
- 0a02 Himalaya GSM/GPRS Pocket PC
- 0a03 PocketPC Sync
- 0a04 PocketPC Sync
- 0a05 PocketPC Sync
- 0a06 PocketPC Sync
- 0a07 Magician PocketPC SmartPhone / O2 XDA
- 0a08 PocketPC Sync
- 0a09 PocketPC Sync
- 0a0a PocketPC Sync
- 0a0b PocketPC Sync
- 0a0c PocketPC Sync
- 0a0d PocketPC Sync
- 0a0e PocketPC Sync
- 0a0f PocketPC Sync
- 0a10 PocketPC Sync
- 0a11 PocketPC Sync
- 0a12 PocketPC Sync
- 0a13 PocketPC Sync
- 0a14 PocketPC Sync
- 0a15 PocketPC Sync
- 0a16 PocketPC Sync
- 0a17 PocketPC Sync
- 0a18 PocketPC Sync
- 0a19 PocketPC Sync
- 0a1a PocketPC Sync
- 0a1b PocketPC Sync
- 0a1c PocketPC Sync
- 0a1d PocketPC Sync
- 0a1e PocketPC Sync
- 0a1f PocketPC Sync
- 0a20 PocketPC Sync
- 0a21 PocketPC Sync
- 0a22 PocketPC Sync
- 0a23 PocketPC Sync
- 0a24 PocketPC Sync
- 0a25 PocketPC Sync
- 0a26 PocketPC Sync
- 0a27 PocketPC Sync
- 0a28 PocketPC Sync
- 0a29 PocketPC Sync
- 0a2a PocketPC Sync
- 0a2b PocketPC Sync
- 0a2c PocketPC Sync
- 0a2d PocketPC Sync
- 0a2e PocketPC Sync
- 0a2f PocketPC Sync
- 0a30 PocketPC Sync
- 0a31 PocketPC Sync
- 0a32 PocketPC Sync
- 0a33 PocketPC Sync
- 0a34 PocketPC Sync
- 0a35 PocketPC Sync
- 0a36 PocketPC Sync
- 0a37 PocketPC Sync
- 0a38 PocketPC Sync
- 0a39 PocketPC Sync
- 0a3a PocketPC Sync
- 0a3b PocketPC Sync
- 0a3c PocketPC Sync
- 0a3d PocketPC Sync
- 0a3e PocketPC Sync
- 0a3f PocketPC Sync
- 0a40 PocketPC Sync
- 0a41 PocketPC Sync
- 0a42 PocketPC Sync
- 0a43 PocketPC Sync
- 0a44 PocketPC Sync
- 0a45 PocketPC Sync
- 0a46 PocketPC Sync
- 0a47 PocketPC Sync
- 0a48 PocketPC Sync
- 0a49 PocketPC Sync
- 0a4a PocketPC Sync
- 0a4b PocketPC Sync
- 0a4c PocketPC Sync
- 0a4d PocketPC Sync
- 0a4e PocketPC Sync
- 0a4f PocketPC Sync
- 0a50 HTC SmartPhone Sync
- 0a51 SPV C400 / T-Mobile SDA GSM/GPRS Pocket PC
- 0a52 SmartPhone Sync
- 0a53 SmartPhone Sync
- 0a54 SmartPhone Sync
- 0a55 SmartPhone Sync
- 0a56 SmartPhone Sync
- 0a57 SmartPhone Sync
- 0a58 SmartPhone Sync
- 0a59 SmartPhone Sync
- 0a5a SmartPhone Sync
- 0a5b SmartPhone Sync
- 0a5c SmartPhone Sync
- 0a5d SmartPhone Sync
- 0a5e SmartPhone Sync
- 0a5f SmartPhone Sync
- 0a60 SmartPhone Sync
- 0a61 SmartPhone Sync
- 0a62 SmartPhone Sync
- 0a63 SmartPhone Sync
- 0a64 SmartPhone Sync
- 0a65 SmartPhone Sync
- 0a66 SmartPhone Sync
- 0a67 SmartPhone Sync
- 0a68 SmartPhone Sync
- 0a69 SmartPhone Sync
- 0a6a SmartPhone Sync
- 0a6b SmartPhone Sync
- 0a6c SmartPhone Sync
- 0a6d SmartPhone Sync
- 0a6e SmartPhone Sync
- 0a6f SmartPhone Sync
- 0a70 SmartPhone Sync
- 0a71 SmartPhone Sync
- 0a72 SmartPhone Sync
- 0a73 SmartPhone Sync
- 0a74 SmartPhone Sync
- 0a75 SmartPhone Sync
- 0a76 SmartPhone Sync
- 0a77 SmartPhone Sync
- 0a78 SmartPhone Sync
- 0a79 SmartPhone Sync
- 0a7a SmartPhone Sync
- 0a7b SmartPhone Sync
- 0a7c SmartPhone Sync
- 0a7d SmartPhone Sync
- 0a7e SmartPhone Sync
- 0a7f SmartPhone Sync
- 0a80 SmartPhone Sync
- 0a81 SmartPhone Sync
- 0a82 SmartPhone Sync
- 0a83 SmartPhone Sync
- 0a84 SmartPhone Sync
- 0a85 SmartPhone Sync
- 0a86 SmartPhone Sync
- 0a87 SmartPhone Sync
- 0a88 SmartPhone Sync
- 0a89 SmartPhone Sync
- 0a8a SmartPhone Sync
- 0a8b SmartPhone Sync
- 0a8c SmartPhone Sync
- 0a8d SmartPhone Sync
- 0a8e SmartPhone Sync
- 0a8f SmartPhone Sync
- 0a90 SmartPhone Sync
- 0a91 SmartPhone Sync
- 0a92 SmartPhone Sync
- 0a93 SmartPhone Sync
- 0a94 SmartPhone Sync
- 0a95 SmartPhone Sync
- 0a96 SmartPhone Sync
- 0a97 SmartPhone Sync
- 0a98 SmartPhone Sync
- 0a99 SmartPhone Sync
- 0a9a SmartPhone Sync
- 0a9b SmartPhone Sync
- 0a9c SmartPhone Sync
- 0a9d SmartPhone Sync
- 0a9e SmartPhone Sync
- 0a9f SmartPhone Sync
- 0b04 Hermes / TyTN / T-Mobile MDA Vario II / O2 Xda Trion
- 0b06 Athena / Advantage x7500 / Dopod U1000 / T-Mobile AMEO
- 0b0c Elf / Touch / P3450 / T-Mobile MDA Touch / O2 Xda Nova / Dopod S1
- 0bce Vario MDA
-0bb5 Murata Manufacturing Co., Ltd
-0bb6 Network Alchemy
-0bb7 Joytech Computer Co., Ltd
-0bb8 Hitachi Semiconductor and Devices Sales Co., Ltd
-0bb9 Eiger M&C Co., Ltd
-0bba ZAccess Systems
-0bbb General Meters Corp.
-0bbc Assistive Technology, Inc.
-0bbd System Connection, Inc.
-0bc0 Knilink Technology, Inc.
-0bc1 Fuw Yng Electronics Co., Ltd
-0bc2 Seagate RSS LLC
- 2000 Storage Adapter V3 (TPP)
-0bc3 IPWireless, Inc.
-0bc4 Microcube Corp.
-0bc5 JCN Co., Ltd
-0bc6 ExWAY, Inc.
-0bc7 X10 Wireless Technology, Inc.
- 0001 ActiveHome (ACPI-compliant)
- 0002 Firecracker Interface (ACPI-compliant)
- 0003 VGA Video Sender (ACPI-compliant)
- 0004 X10 Receiver
- 0005 Wireless Transceiver (ACPI-compliant)
- 0006 Wireless Transceiver (ACPI-compliant)
- 0007 Wireless Transceiver (ACPI-compliant)
- 0008 Wireless Transceiver (ACPI-compliant)
- 0009 Wireless Transceiver (ACPI-compliant)
- 000a Wireless Transceiver (ACPI-compliant)
- 000b Transceiver (ACPI-compliant)
- 000c Transceiver (ACPI-compliant)
- 000d Transceiver (ACPI-compliant)
- 000e Transceiver (ACPI-compliant)
- 000f Transceiver (ACPI-compliant)
-0bc8 Telmax Communications
-0bc9 ECI Telecom, Ltd
-0bca Startek Engineering, Inc.
-0bcb Perfect Technic Enterprise Co., Ltd
-0bd7 Andrew Pargeter & Associates
- a021 Amptek DP4 multichannel signal analyzer
-0bda Realtek Semiconductor Corp.
- 0103 USB 2.0 Card Reader
- 0104 Mass Storage Device
- 0106 Mass Storage Device
- 0107 Mass Storage Device
- 0108 Mass Storage Device
- 0111 Card Reader
- 0113 Mass Storage Device
- 0115 Mass Storage Device
- 0116 Mass Storage Device
- 0117 Mass Storage Device
- 0118 Mass Storage Device
- 0151 Mass Stroage Device
- 0152 Mass Stroage Device
- 0153 Mass Stroage Device
- 0156 Mass Stroage Device
- 0157 Mass Stroage Device
- 0158 Mass Stroage Device
- 0161 Mass Stroage Device
- 0168 Mass Stroage Device
- 0169 Mass Stroage Device
- 0171 Mass Stroage Device
- 0176 Mass Stroage Device
- 0178 Mass Stroage Device
- 2831 2831U Device
- 8150 RTL8150 Fast Ethernet Adapter
- 8151 RTL8151 Adapteon Business Mobile Networks BV
- 8187 RTL8187 Wireless Adapter
- 8189 RTL8187B Wireless 802.11g 54Mbps Network Adapter
- 8197 RTL8187B Wireless Adapter
-0bdb Ericsson Business Mobile Networks BV
- 1000 BV Bluetooth Device
- 1002 Bluetooth Device 1.2
-0bdc Y Media Corp.
-0bdd Orange PCS
-0be2 Kanda Tsushin Kogyo Co., Ltd
-0be3 TOYO Corp.
-0be4 Elka International, Ltd
-0be5 DOME imaging systems, Inc.
-0be6 Dong Guan Humen Wonderful Wire Cable Factory
-0bee LTK Industries, Ltd
-0bef Way2Call Communications
-0bf0 Pace Micro Technology PLC
-0bf1 Intracom S.A.
- 0001 netMod Driver Ver 2.4.17 (CAPI)
- 0002 netMod Driver Ver 2.4 (CAPI)
- 0003 netMod Driver Ver 2.4 (CAPI)
-0bf2 Konexx
-0bf6 Addonics Technologies, Inc.
- 0103 Storage Device
- 1234 Storage Device
- a000 Cable 205 (TPP)
- a001 Cable 205
- a002 IDE Bridge
-0bf7 Sunny Giken, Inc.
-0bf8 Fujitsu Siemens Computers
- 1001 Fujitsu Pocket Loox 600 PDA
-0c04 MOTO Development Group, Inc.
-0c05 Appian Graphics
-0c06 Hasbro Games, Inc.
-0c07 Infinite Data Storage, Ltd
-0c08 Agate
- 0378 Q 16MB Storage Device
-0c09 Comjet Information System
- a5a5 Litto Version USB2.0
-0c0a Highpoint Technologies, Inc.
-0c0b Dura Micro, Inc. (Acomdata)
- 27cb 6-in-1 Flash Reader and Writer
- 27d7 Multi Memory reader/writer MD-005
- 27da Multi Memory reader/writer MD-005
- 27dc Multi Memory reader/writer MD-005
- 27e7 3,5'' HDD case MD-231
- 27ee 3,5'' HDD case MD-231
- 2814 3,5'' HDD case MD-231
- 2815 3,5'' HDD case MD-231
- 281d 3,5'' HDD case MD-231
- a109 CF/SM Reader and Writer
- a10c SD/MS Reader and Writer
- b001 USB 2.0 Mass Storage IDE adapter
- b004 MMC/SD Reader and Writer
-0c12 Zeroplus
- 0005 PSX Vibration Feedback Converter
- 8809 Red Octane Ignition Xbox DDR Pad
-0c15 Iris Graphics
-0c16 Gyration, Inc.
- 0080 eHome Infrared Receiver
- 0081 eHome Infrared Receiver
-0c17 Cyberboard A/S
-0c18 SynerTek Korea, Inc.
-0c19 cyberPIXIE, Inc.
-0c1a Silicon Motion, Inc.
-0c1b MIPS Technologies
-0c1c Hang Zhou Silan Electronics Co., Ltd
-0c22 Tally Printer Corp.
-0c23 Lernout + Hauspie
-0c24 Taiyo Yuden
- 0001 Bluetooth Adaptor
- 0002 Bluetooth Device2
- 0005 Bluetooth Device(BC04-External)
- 000b Bluetooth Device(BC04-External)
- 000c Bluetooth Adaptor
- 000e Bluetooth Device(BC04-External)
- 000f Bluetooth Driver (V2.0+EDR)
- 0010 Bluetooth Device(BC04-External)
- 0012 Bluetooth Device(BC04-External)
- 0018 Bluetooth Device(BC04-External)
- 0019 Bluetooth Device
- 0c24 Bluetooth Device(SAMPLE)
- ffff Bluetooth module with BlueCore in DFU mode
-0c25 Sampo Corp.
- 0310 Scream Cam
-0c27 RFIDeas, Inc
- 3bfa pcProx Card Reader
-0c2e Metro
- 0200 Metrologic Scanner
-0c35 Eagletron, Inc.
-0c36 E Ink Corp.
-0c37 e.Digital
-0c38 Der An Electric Wire & Cable Co., Ltd
-0c39 IFR
-0c3a Furui Precise Component (Kunshan) Co., Ltd
-0c3b Komatsu, Ltd
-0c3c Radius Co., Ltd
-0c3d Innocom, Inc.
-0c3e Nextcell, Inc.
-0c44 Motorola iDEN
- 0021 iDEN P2k0 Device
- 0022 iDEN P2k1 Device
- 03a2 iDEN Smartphone
-0c45 Microdia
- 1020 Mass Storage Reader
- 1028 Mass Storage Reader
- 1030 Mass Storage Reader
- 1031 Sonix Mass Storage Device
- 1032 Mass Storage Reader
- 1033 Sonix Mass Storage Device
- 1034 Mass Storage Reader
- 1035 Mass Storage Reader
- 1036 Mass Storage Reader
- 1037 Sonix Mass Storage Device
- 1050 CF Card Reader
- 1058 HDD Reader
- 1060 iFlash SM-Direct Card Reader
- 1061 Mass Storage Reader
- 1062 Mass Storage Reader
- 1063 Sonix Mass Storage Device
- 1064 Mass Storage Reader
- 1065 Mass Storage Reader
- 1066 Mass Storage Reader
- 1067 Mass Storage Reader
- 1158 A56AK
- 184c VoIP Phone
- 6001 Genius VideoCAM NB
- 6005 Sweex Mini WebCam
- 6007 VideoCAM Eye
- 6009 VideoCAM ExpressII
- 600d TwinkleCam USB camera
- 6011 PC Camera (SN9C102)
- 6019 PC Camera (SN9C102)
- 6024 VideoCAM ExpressII
- 6025 VideoCAM ExpressII
- 6028 Typhoon Easycam USB 330K (older)
- 6029 Triplex i-mini PC Camera
- 602a Meade ETX-105EC Camera
- 602b VideoCAM NB 300
- 602c Clas Ohlson TWC-30XOP WebCam
- 602d VideoCAM ExpressII
- 602e VideoCAM Messenger
- 6030 VideoCAM ExpressII
- 603f VideoCAM ExpressII
- 6040 CCD PC Camera (PC390A)
- 606a CCD PC Camera (PC390A)
- 607a CCD PC Camera (PC390A)
- 607b Win2 PC Camera
- 607c CCD PC Camera (PC390A)
- 607e CCD PC Camera (PC390A)
- 6080 Audio (Microphone)
- 6082 VideoCAM Look
- 6083 VideoCAM Look
- 608c VideoCAM Look
- 608e VideoCAM Look
- 608f VideoCAM Look
- 60a8 VideoCAM Look
- 60aa VideoCAM Look
- 60ab PC Camera
- 60af VideoCAM Look
- 60b0 Genius VideoCam Look
- 60c0 PC Camera with Mic (SN9C105)
- 60c8 Win2 PC Camera
- 60cc Composite Device
- 60ec Composite Device
- 60ef Win2 PC Camera
- 60fa PC Camera with Mic (SN9C105)
- 60fb Composite Device
- 60fc PC Camera with Mic (SN9C105)
- 60fe Audio (Microphone)
- 6108 Win2 PC Camera
- 6122 PC Camera (SN9C110)
- 6123 PC Camera (SN9C110)
- 612a PC Camera (SN9C110)
- 612c PC Camera (SN9C110)
- 612e PC Camera (SN9C110)
- 612f PC Camera (SN9C110)
- 6130 PC Camera (SN9C120)
- 6138 Win2 PC Camera
- 613a PC Camera (SN9C120)
- 613b Win2 PC Camera
- 613c PC Camera (SN9C120)
- 613e PC Camera (SN9C120)
- 6240 PC Camera (SN9C201)
- 6242 PC Camera (SN9C201)
- 6243 PC Camera (SN9C201)
- 6248 PC Camera (SN9C201)
- 624b PC Camera (SN9C201)
- 624c PC Camera (SN9C201)
- 624e PC Camera (SN9C201)
- 624f PC Camera (SN9C201)
- 6260 PC Camera (SN9C201)
- 6270 U-CAM PC Camera NE878
- 627a PC Camera (SN9C201)
- 627b PC Camera (SN9C201)
- 627c PC Camera (SN9C201)
- 627f PC Camera (SN9C201)
- 6280 Composite Device
- 6282 Audio (Microphone)
- 6283 Audio (Microphone)
- 6288 Audio (Microphone)
- 628a Composite Device
- 628b PC Camera (SN9C202)
- 628c PC Camera (SN9C202)
- 628e Composite Device
- 628f Composite Device
- 62a0 Audio (Microphone)
- 62b0 Audio (Microphone)
- 62ba PC Camera (SN9C202)
- 62bb PC Camera (SN9C202)
- 62bc Composite Device
- 62c0 Pavilion Webcam
- 8000 DC31VC
- 8006 Dual Mode Camera (8006 VGA)
- 800a Vivitar Vivicam3350B
-0c46 WaveRider Communications, Inc.
-0c4b Reiner SCT Kartensysteme GmbH
- 0100 cyberJack e-com/pinpad
- 0300 cyberJack pinpad(a)
-0c52 Sealevel Systems, Inc.
- 2101 Serial Converter
-0c53 ViewPLUS, Inc.
-0c54 Glory, Ltd
-0c55 Spectrum Digital, Inc.
- 0510 Spectrum Digital XDS510 JTAG Debugger
- 0540 SPI540
- 5416 TMS320C5416 DSK
- 6416 TMS320C6416 DDB
-0c56 Billion Bright, Ltd
-0c57 Imaginative Design Operation Co., Ltd
-0c58 Vidar Systems Corp.
-0c59 Dong Guan Shinko Wire Co., Ltd
-0c5a TRS International Mfg., Inc.
-0c5e Xytronix Research & Design
-0c62 Chant Sincere Co., Ltd
-0c63 Toko, Inc.
-0c64 Signality System Engineering Co., Ltd
-0c65 Eminence Enterprise Co., Ltd
-0c66 Rexon Electronics Corp.
-0c67 Concept Telecom, Ltd
-0c70 MCT Elektronikladen
- 0000 USB08 Development board
-0c74 Optronic Laboratories Inc.
- 0002 OL 700-30 Goniometer
-0c76 JMTek, LLC.
- 0001 Mass Storage Controller
- 0002 Mass Storage Controller
- 0003 USBdisk
- 0004 Mass Storage Controller
- 0005 Transcend USB Flash disk
- 0006 Transcend JetFlash
- 0007 Mass Storage Device
-0c77 Sipix Group, Ltd
- 1001 SiPix Web2
- 1002 SiPix SC2100
- 1010 SiPix Snap
- 1011 SiPix Blink 2
- 1015 SiPix CAMeleon
-0c78 Detto Corp.
-0c79 NuConnex Technologies Pte., Ltd
-0c7a Wing-Span Enterprise Co., Ltd
-0c86 NDA Technologies, Inc.
-0c88 Kyocera Wireless Corp.
- 0021 Handheld
- 17da Qualcomm Kyocera CDMA Technologies MSM
-0c89 Honda Tsushin Kogyo Co., Ltd
-0c8a Pathway Connectivity, Inc.
-0c8b Wavefly Corp.
-0c8c Coactive Networks
-0c8d Tempo
-0c8e Cesscom Co., Ltd
- 6000 Luxian Series
-0c8f Applied Microsystems
-0c98 Berkshire Products, Inc.
- 1140 USB PC Watchdog
-0c99 Innochips Co., Ltd
-0c9a Hanwool Robotics Corp.
-0c9b Jobin Yvon, Inc.
-0c9d SemTek
- 0170 3873 Manual Insert card reader
-0ca2 Zyfer
-0ca3 Sega Corp.
-0ca4 ST&T Instrument Corp.
-0ca5 BAE Systems Canada, Inc.
-0ca6 Castles Technology Co., Ltd
- 0010 EZUSB PC/SC Smart Card Reader
- 0050 EZ220PU Reader Controller
- 1077 Bludrive Family Smart Card Reader
- 107e Reader Controller
- 2010 myPad110 PC/SC Smart Card Reader
-0ca7 Information Systems Laboratories
-0cad Motorola CGISS
- 9001 PowerPad Pocket PC Device
-0cae Ascom Business Systems, Ltd
-0caf Buslink
- 2507 Hi-Speed USB-to-IDE Bridge Controller
- 2515 Flash Disk Embedded Hub
- 2516 Flash Disk Security Device
- 2517 Flash Disk Mass Storage Device
- 25c7 Hi-Speed USB-to-IDE Bridge Controller
- 3a00 Hard Drive
- 3a20 Mass Storage Device
- 3acd Mass Storage Device
-0cb0 Flying Pig Systems
-0cb1 Innovonics, Inc.
-0cb6 Celestix Networks, Pte., Ltd
-0cb7 Singatron Enterprise Co., Ltd
-0cb8 Opticis Co., Ltd
-0cba Trust Electronic (Shanghai) Co., Ltd
-0cbb Shanghai Darong Electronics Co., Ltd
-0cbc Palmax Technology Co., Ltd
- 0101 Pocket PC P6C
- 0201 Personal Digital Assistant
- 0301 Personal Digital Assistant P6M+
- 0401 Pocket PC
-0cbd Pentel Co., Ltd (Electronics Equipment Div.)
-0cbe Keryx Technologies, Inc.
-0cbf Union Genius Computer Co., Ltd
-0cc0 Kuon Yi Industrial Corp.
-0cc1 Given Imaging, Ltd
-0cc2 Timex Corp.
-0cc3 Rimage Corp.
-0cc4 emsys GmbH
-0cc5 Sendo
-0cc6 Intermagic Corp.
-0cc7 Kontron Medical AG
-0cc8 Technotools Corp.
-0cc9 BroadMAX Technologies, Inc.
-0cca Amphenol
-0ccb SKNet Co., Ltd
-0ccc Domex Technology Corp.
-0ccd TerraTec Electronic GmbH
- 0012 PHASE 26
- 0013 PHASE 26
- 0014 PHASE 26
- 0015 Flash Update for TerraTec PHASE 26
- 0021 Cameo Grabster 200
- 0023 Mystify Claw
- 0028 Aureon 5.1 MkII
- 0032 MIDI HUBBLE
- 0035 Miditech Play'n Roll
- 0036 Cinergy 250 Audio
- 0037 Cinergy 250 Audio
- 0038 Cinergy T^2 DVB-T Receiver
- 0039 Grabster AV 400
- 003b Cinergy 400
- 003c Grabster AV 250
- 0042 Cinergy Hybrid T XS
- 0043 Cinergy T XS
- 004e Cinergy T XS
- 004f Cinergy Analog XS
- 005c Cinergy T²
- 0069 Cinergy T XE DVB-T Receiver
-0cd4 Bang Olufsen
- 0101 BeolinkPC2
-0cd7 NewChip S.r.l.
-0cd8 JS Digitech, Inc.
- 2007 Smart Card Reader/JSTU-9700
-0cd9 Hitachi Shin Din Cable, Ltd
-0cde Z-Com
- 0001 M4Y-750
- 0002 XI-725/726 Prism2.5 802.11b Adapter
- 0003 Sagem 802.11b Dongle
- 0004 Sagem 802.11b Dongle
- 0005 XI-735 Prism3 802.11b Adapter
- 0006 Medion 40900 802.11b Adapter
- 0008 Sitecom Wireless Network Adapter 100G+ WL-125
- 0009 (ZD1211)IEEE 802.11b+g Adapter
- 0011 ZD1211
- 0012 AR5523
- 0013 AR5523 driver (no firmware)
- 0014 NB 802.11g Wireless LAN Adapter(3887A)
- 0015 Zoom Wireless-G
- 0016 NB 802.11g Wireless LAN Adapter(3887A)
- 0018 NB 802.11a/b/g Wireless LAN Adapter(3887A)
- 001a ZD1211B
- 001c 802.11b/g Wireless Network Adapter
- 0020 Wi-Fi Wireless LAN Adapter
- 0022 802.11b/g/n Wireless Network Adapter
-0ce9 pico Technology
- 1001 PicoScope3204
-0cf1 e-Conn Electronic Co., Ltd
-0cf2 ENE Technology, Inc.
-0cf3 Atheros Communications, Inc.
- 0001 AR5523
- 0002 AR5523 (no firmware)
- 0003 AR5523
- 0004 AR5523 (no firmware)
- 0005 AR5523
- 0006 AR5523 (no firmware)
-0cf4 Fomtex Corp.
-0cf5 Cellink Co., Ltd
-0cf6 Compucable Corp.
-0cf7 ishoni Networks
-0cf8 Clarisys, Inc.
- 0750 Claritel-i750 - vp
-0cf9 Central System Research Co., Ltd
-0cfa Inviso, Inc.
-0cfc Minolta-QMS, Inc.
-0cff SAFA MEDIA Co., Ltd.
- 0320 SR-380N
-0d06 telos EDV Systementwicklung GmbH
-0d08 UTStarcom
- 0602 DV007 [serial]
- 0603 DV007 [storage]
-0d0b Contemporary Controls
-0d0c Astron Electronics Co., Ltd
-0d0d MKNet Corp.
-0d0e Hybrid Networks, Inc.
-0d0f Feng Shin Cable Co., Ltd
-0d10 Elastic Networks
- 0001 StormPort (WDM)
-0d11 Maspro Denkoh Corp.
-0d12 Hansol Electronics, Inc.
-0d13 BMF Corp.
-0d14 Array Comm, Inc.
-0d15 OnStream b.v.
-0d16 Hi-Touch Imaging Technologies Co., Ltd
- 0001 PhotoShuttle
- 0002 Photo Printer 730 series
- 0004 Photo Printer 63xPL/PS
- 0100 Photo Printer 63xPL/PS
- 0102 Photo Printer 64xPS
- 0103 Photo Printer 730 series
- 0104 Photo Printer 63xPL/PS
- 0105 Photo Printer 64xPS
- 0200 Photo Printer 64xDL
-0d17 NALTEC, Inc.
-0d18 coaXmedia
-0d19 Hank Connection Industrial Co., Ltd
-0d32 Leo Hui Electric Wire & Cable Co., Ltd
-0d33 AirSpeak, Inc.
-0d34 Rearden Steel Technologies
-0d35 Dah Kun Co., Ltd
-0d3a Posiflex Technologies, Inc.
-0d3c Sri Cable Technology, Ltd
-0d3d Tangtop Technology Co., Ltd
- 0001 HID Keyboard
-0d3e Fitcom, inc.
-0d3f MTS Systems Corp.
-0d40 Ascor, Inc.
-0d41 Ta Yun Terminals Industrial Co., Ltd
-0d42 Full Der Co., Ltd
-0d46 Kobil Systems GmbH
- 2012 KAAN Standard Plus (Smartcard reader)
- 3003 mIDentity Light / KAAN SIM III
- 4000 mIDentity (mass storage)
- 4001 mIDentity Basic/Classic (composite device)
- 4081 mIDentity Basic/Classic (installationless)
-0d49 Maxtor
- 3000 Drive
- 3010 3000LE Drive
- 3100 Hi-Speed USB-IDE Bridge Controller
- 5000 5000XT Drive
- 5010 5000LE Drive
- 5020 Mobile Hard Disk Drive
- 7000 OneTouch
- 7010 OneTouch
-0d4a NF Corp.
-0d4b Grape Systems, Inc.
-0d4c Tedas AG
-0d4d Coherent, Inc.
-0d4e Agere Systems Netherland BV
- 047a WLAN Card
- 1000 Wireless Card Model 0801
- 1001 Wireless Card Model 0802
-0d4f EADS Airbus France
-0d50 Cleware GmbH
- 0011 USB-Temp2 Thermometer
-0d51 Volex (Asia) Pte., Ltd
-0d53 HMI Co., Ltd
-0d54 Holon Corp.
-0d55 ASKA Technologies, Inc.
-0d56 AVLAB Technology, Inc.
-0d57 Solomon Microtech, Ltd
-0d5c Belkin
- a002 F5D6050 802.11b Adapter
-0d5e Myacom, Ltd
- 2346 BT Digital Access adapter
-0d5f CSI, Inc.
-0d60 IVL Technologies, Ltd
-0d61 Meilu Electronics (Shenzhen) Co., Ltd
-0d62 Darfon Electronics Corp.
- 0003 Smartcard Reader
- 0004 Filter Driver
- 0306 M530 Mouse
- 0800 Magic Wheel
- 2021 AM805 Keyboard
- 2026 TECOM Bluetooth Device
- a100 Benq Mouse
-0d63 Fritz Gegauf AG
-0d64 DXG Technology Corp.
- 0105 Dual Mode Digital Camera 1.3M
- 0107 Horus MT-409 Camera
- 0108 Dual Mode Digital Camera
- 0202 Dual Mode Video Camera Device
- 0303 DXG-305V Camera
- 1001 SiPix Stylecam/UMAX AstraPix 320s
- 1002 Fashion Cam 01 Dual-Mode DSC (Video Camera)
- 1003 Fashion Cam Dual-Mode DSC (Controller)
- 1021 D-Link DSC 350F
- 1208 Dual Mode Still Camera Device
- 2208 Mass Storage
- 3105 Dual Mode Digital Camera Disk
- 3108 Digicam Mass Storage Device
-0d65 KMJP Co., Ltd
-0d66 TMT
-0d67 Advanet, Inc.
-0d68 Super Link Electronics Co., Ltd
-0d69 NSI
-0d6a Megapower International Corp.
-0d6b And-Or Logic
-0d70 Try Computer Co., Ltd
-0d71 Hirakawa Hewtech Corp.
-0d72 Winmate Communication, Inc.
-0d73 Hit's Communications, Inc.
-0d76 MFP Korea, Inc.
-0d77 Power Sentry/Newpoint
-0d78 Japan Distributor Corp.
-0d7a MARX Datentechnik GmbH
-0d7b Wellco Technology Co., Ltd
-0d7c Taiwan Line Tek Electronic Co., Ltd
-0d7d Phison Electronics Corp.
- 0100 PS1001/1011/1006/1026 Flash Disk
- 0110 Gigabyte FlexDrive
- 0120 Disk Pro 64MB
- 0124 GIGABYTE Disk
- 0240 I/O-Magic/Transcend 6-in-1 Card Reader
- 110e NEC uPD720121/130 USB-ATA/ATAPI Bridge
- 1240 Apacer 6-in-1 Card Reader 2.0
- 1270 Wolverine SixPac 6000
- 1300 Flash Disk
- 1320 PS2031 Flash Disk
- 1400 Attache 256MB USB 2.0 Flash Drive
- 1420 PS2044 Pen Drive
- 1470 Vosonic X's-Drive II+ VP2160
- 1900 USB Thumb Drive
-0d7e American Computer & Digital Components
- 2507 Hi-Speed USB-to-IDE Bridge Controller
- 2517 Hi-Speed Mass Storage Device
- 25c7 Hi-Speed USB-to-IDE Bridge Controller
-0d7f Essential Reality LLC
-0d80 H.R. Silvine Electronics, Inc.
-0d81 TechnoVision
-0d83 Think Outside, Inc.
-0d89 Oz Software
-0d8a King Jim Co., Ltd
- 0101 TEPRA PRO
-0d8b Ascom Telecommunications, Ltd
-0d8c C-Media Electronics, Inc.
- 0001 Audio Device
- 0002 Composite Device
- 0003 Sound Device
- 0006 Storm HP-USB500 5.1 Headset
- 000c Audio Adapter
- 000d Composite Device
- 000e Audio Adapter (Planet UP-100, Genius G-Talk)
- 0102 CM106 Like Sound Device
- 0103 Turtle Beach Audio Advantage Micro
- 0201 CM6501
- 5000 Mass Storage Controller
- 5200 Mass Storage Controller(0D8C,5200)
- b213 USB Phone CM109 (aka CT2000,VPT1000)
-0d8d Promotion & Display Technology, Ltd
- 0234 V-234 Composite Device
- 0550 V-550 Composite Device
- 0551 V-551 Composite Device
- 0552 V-552 Composite Device
- 0651 V-651 Composite Device
- 0652 V-652 Composite Device
- 0653 V-653 Composite Device
- 0654 V-654 Composite Device
- 0655 V-655 Composite Device
- 0656 V-656 Composite Device
- 0657 V-657 Composite Device
- 0658 V-658 Composite Device
- 0659 V-659 Composite Device
- 0660 V-660 Composite Device
- 0661 V-661 Composite Device
- 0662 V-662 Composite Device
- 0850 V-850 Composite Device
- 0851 V-851 Composite Device
- 0852 V-852 Composite Device
- 0901 V-901 Composite Device
- 0902 V-902 Composite Device
- 0903 V-903 Composite Device
- 4754 Voyager DMP Composite Device
- bb00 Bloomberg Composite Device
- bb01 Bloomberg Composite Device
- bb02 Bloomberg Composite Device
- bb03 Bloomberg Composite Device
- bb04 Bloomberg Composite Device
- bb05 Bloomberg Composite Device
- fffe Global Tuner Composite Device
- ffff Voyager DMP Composite Device
-0d8e Global Sun Technology, Inc.
- 0163 802.11g 54 Mbps Wireless Dongle
- 1621 802.11b Wireless Adapter
- 3762 802.11g Wireless Mini adapter
- 3763 802.11g Wireless dongle
- 7100 802.11b Adapter
- 7110 WL-210
- 7801 AR5523
- 7802 AR5523 (no firmware)
- 7811 AR5523
- 7812 AR5523 (no firmware)
- 7a01 PRISM25 802.11b Adapter
-0d8f Pitney Bowes
-0d90 Sure-Fire Electrical Corp.
-0d96 Skanhex Technology, Inc.
- 0000 Jenoptik JD350 video
- 3300 SX330z Camera
- 4100 SX410z Camera
- 4102 MD 9700 Camera
- 4104 Jenoptik JD-4100z3s
- 410a Medion 9801/Novatech SX-410z
- 5200 SX-520z Camera
-0d97 Santa Barbara Instrument Group
- 0001 SBIG Astronomy Camera (without firmware)
- 0101 SBIG Astronomy Camera (with firmware)
-0d98 Mars Semiconductor Corp.
- 0300 Avaya Wireless Card
-0d99 Trazer Technologies, Inc.
-0d9a RTX Telecom AS
- 0001 Bluetooth Device
-0d9b Tat Shing Electrical Co.
-0d9c Chee Chen Hi-Technology Co., Ltd
-0d9d Sanwa Supply, Inc.
-0d9e Avaya
- 0300 Wireless Card
-0d9f Powercom Co., Ltd
-0da0 Danger Research
-0da1 Suzhou Peter's Precise Industrial Co., Ltd
-0da2 Land Instruments International, Ltd
-0da3 Nippon Electro-Sensory Devices Corp.
-0da4 Polar Electro OY
- 0001 Interface
-0da7 IOGear, Inc.
-0da8 softDSP Co., Ltd
- 0001 SDS 200A Oscilloscope
-0dab Cubig Group
- 0100 DVR/CVR-M140 MP3 Player
-0dad Westover Scientific
-0db0 Micro Star International
- 1020 PC2PC WLAN Card
- 1967 Bluetooth Dongle
- 4011 Medion Flash XL V2.0 Card Reader
- 4600 802.11b/g Turbo Wireless Adapter
- 5501 Mass Storage Device
- 5502 Mass Storage Device
- 5513 MP3 Player
- 5515 MP3 Player
- 5516 MP3 Player
- 6823 UB11B/MS-6823 802.11b Wi-Fi adapter
- 6826 IEEE 802.11g Wireless Network Adapter
- 6855 Bluetooth Device
- 6861 MSI-6861 802.11g WiFi adapter
- 6865 RT2570
- 6869 RT2570
- 6874 RT2573
- 6877 RT2573
- 6881 Bluetooth Class I EDR Device
- 688a Bluetooth Class I EDR Device
- 6970 Bluetooth adapter
- 697a Bluetooth Dongle
- 6982 Medion Flash XL Card Reader
- a861 RT2573
- a874 RT2573
- a970 Bluetooth dongle
- a97a Bluetooth EDR Device
- b970 Bluetooth EDR Device
- b97a Bluetooth EDR Device
-0db1 Wen Te Electronics Co., Ltd
-0db2 Shian Hwi Plug Parts, Plastic Factory
-0db3 Tekram Technology Co., Ltd
-0db4 Chung Fu Chen Yeh Enterprise Corp.
-0db7 ELCON Systemtechnik
- 0002 Goldpfeil P-LAN
-0dbe Jiuh Shiuh Precision Industry Co., Ltd
-0dbf Quik Tech Solutions
- 0002 SmartDongle Security Key
- 0200 HDD Storage Solution
- 021b USB-2.0 IDE Adapter
- 0300 Storage Adapter
- 0333 Storage Adapter
- 0707 ZIV Drive
-0dc0 Great Notions
-0dc1 Tamagawa Seiki Co., Ltd
-0dc3 Athena Smartcard Solutions, Inc.
- 0801 ASEDrive III
- 0802 ASEDrive IIIe
- 1104 ASEDrive IIIe KB
- 1701 ASEKey
- 1702 ASEKey
-0dc4 Macpower Peripherals, Ltd
- 0040 Mass Storage Device
- 0041 Mass Storage Device
- 0042 Mass Storage Device
- 0101 Hi-Speed Mass Storage Device
-0dc5 SDK Co., Ltd
-0dc6 Precision Squared Technology Corp.
-0dc7 First Cable Line, Inc.
-0dcd NetworkFab Corp.
- 0001 Remote Interface Adapter
- 0002 High Bandwidth Codec
-0dd0 Access Solutions
- 1002 Triple Talk Speech Synthesizer
-0dd1 Contek Electronics Co., Ltd
-0dd2 Power Quotient International Co., Ltd
- 0003 Mass Storage (P)
-0dd3 MediaQ
-0dd4 Custom Engineering SPA
-0dd5 California Micro Devices
-0dd7 Kocom Co., Ltd
-0dd8 Netac Technology Co., Ltd
- 1060 USB-CF-Card
- e007 OnlyDisk U222 Pendrive
-0dd9 HighSpeed Surfing
-0dda Integrated Circuit Solution, Inc.
- 0001 Multi-Card Reader 6in1
- 0002 Multi-Card Reader 7in1
- 0003 Flash Disk
- 0005 Internal Multi-Card Reader 6in1
- 0008 SD single card reader
- 0009 MS single card reader
- 000a MS+SD Dual Card Reader
- 000b SM single card reader
- 0101 All-In-One Card Reader
- 0102 All-In-One Card Reader
- 0301 MP3 Player
- 0302 Multi-Card MP3 Player
- 1001 Multi-Flash Disk
- 2001 Multi-Card Reader
- 2002 Q018 default PID
- 2003 Multi-Card Reader
- 2005 Datalux DLX-1611 16in1 Card Reader
- 2006 All-In-One Card Reader
- 2007 USB to ATAPI bridge
- 2008 All-In-One Card Reader
- 2013 SD/MS Combo Card Reader
- 2014 SD/MS Single Card Reader
- 2023 card reader SD/MS DEMO board with ICSI brand name (MaskROM version)
- 2024 card reader SD/MS DEMO board with Generic brand name (MaskROM version)
- 2026 USB2.0 Card Reader
- 2027 USB 2.0 Card Reader
- 2315 UFD MP3 player (model 2)
- 2318 UFD MP3 player (model 1)
- 2321 UFD MP3 player
-0ddb Tamarack, Inc.
-0ddd Datelink Technology Co., Ltd
-0dde Ubicom, Inc.
-0de0 BD Consumer Healthcare
-0dea UTECH Electronic (D.G.) Co., Ltd.
-0ded Novasonics
-0dee Lifetime Memory Products
- 4010 Storage Adapter
-0def Full Rise Electronic Co., Ltd
-0df6 Sitecom Europe B.V.
- 0001 C-Media VOIP Device
- 0004 Bluetooth 2.0 Adapter 100m
- 0007 Bluetooth 2.0 Adapter 10m
- 000b Bluetooth 2.0.USB Adapter DFU
- 000d WL-168 Wireless Network Adapter 54g
- 0017 WL-182
- 0019 Bluetooth 2.0 adapter 10m CN-512v2 001
- 001a Bluetooth 2.0 adapter 100m CN-521v2 001
- 061c LN-028
- 21f4 44 St Bluetooth Device
- 2200 Sitecom bluetooth2.0 class 2 dongle CN-512
- 2208 Sitecom bluetooth2.0 class 2 dongle CN-520
- 2209 Sitecom bluetooth2.0 class 1 dongle CN-521
- 9071 zd1211 802.11g Adapter
- 9075 ZD1211B
- 90ac WL-172
- 9712 WL-113 rev 2
-0df7 Mobile Action Technology, Inc.
- 0620 MA-620 Infrared Adapter
- 0700 MA-700 Bluetooth Adapter
- 0720 MA-720 Bluetooth Adapter
- 0722 Bluetooth Dongle
- 0800 Data Cable
- 0820 Data Cable
- 1800 Generic Card Reader
- 1802 Card Reader
-0dfa Toyo Communication Equipment Co., Ltd
-0dfc GeneralTouch Technology Co., Ltd
- 0001 Touchscreen
-0e03 Nippon Systemware Co., Ltd
-0e08 Winbest Technology Co., Ltd
-0e0c Gesytec
- 0101 LonUSB LonTalk Network Adapter
-0e16 JMTek, LLC
-0e17 Walex Electronic, Ltd
-0e1b Crewave
-0e21 Cowon Systems, Inc.
- 0300 iAudio CW200
- 0400 MP3 Player
- 0510 iAudio X5
- 0513 iAudio X5, side USB port
- 0520 iAudio M5
- 0700 iAudio U3
-0e22 Symbian Ltd.
-0e23 Liou Yuane Enterprise Co., Ltd
-0e25 VinChip Systems, Inc.
-0e26 J-Phone East Co., Ltd
-0e30 HeartMath LLC
-0e34 Micro Computer Control Corp.
-0e35 3Pea Technologies, Inc.
-0e36 TiePie engineering
- 0008 Handyscope HS3
- 0009 Handyscope HS3 (br)
- 000a Handyscope HS4
- 000b Handyscope HS4 (br)
- 000e Handyscope HS4 Diff
- 000f Handyscope HS4 Diff (br)
- 0010 Handyscope HS2
- 0018 Handyprobe HP2
- 0042 TiePieSCOPE HS801
- 00fd USB To Parallel adapter
- 00fe USB To Parallel adapter
-0e38 Stratitec, Inc.
-0e39 Smart Modular Technologies, Inc.
- 0137 Bluetooth Device
-0e3a Neostar Technology Co., Ltd
- 1100 CW-1100 Wireless Network Adapter
-0e3b Mansella, Ltd
-0e41 Line6, Inc.
- 4250 BassPODxt
- 4252 BassPODxt Pro
- 4642 BassPODxt Live
- 4650 PODxt Live
- 4750 GuitarPort
- 5044 PODxt
- 5050 PODxt Pro
- 534d SeaMonkey
-0e44 Sun-Riseful Technology Co., Ltd.
-0e48 Julia Corp., Ltd
- 0100 CardPro SmartCard Reader
-0e4a Shenzhen Bao Hing Electric Wire & Cable Mfr. Co.
-0e4c Radica Games, Ltd
-0e55 Speed Dragon Multimedia, Ltd
- 110b MS3303H USB-to-Serial Bridge
-0e56 Kingston Technology Company, Inc.
- 6021 K-PEX 100
-0e5a Active Co., Ltd
-0e5b Union Power Information Industrial Co., Ltd
-0e5c Bitland Information Technology Co., Ltd
- 6118 LCD Device
- 6119 remote receive and control device
- 6441 C-Media Sound Device
-0e5d Neltron Industrial Co., Ltd
-0e66 Hawking
- 400b UF100 10/100 Network Adapter
- 400c UF100 Ethernet [pegasus2]
-0e67 Fossil, Inc.
- 0002 Wrist PDA
-0e6a Megawin Technology Co., Ltd
-0e70 Tokyo Electronic Industry Co., Ltd
-0e72 Hsi-Chin Electronics Co., Ltd
-0e75 TVS Electronics, Ltd
-0e79 Archos, Inc.
- 1106 Pocket Medai Assistant - PMA400
-0e7b On-Tech Industry Co., Ltd
-0e7e Gmate, Inc.
- 0001 Yopy 3000 PDA
- 1001 YP3X00 PDA
-0e82 Ching Tai Electric Wire & Cable Co., Ltd
-0e83 Shin An Wire & Cable Co.
-0e8c Well Force Electronic Co., Ltd
-0e8d MediaTek Inc.
-0e8f GreenAsia Inc.
- 0012 Joystick
-0e90 WiebeTech, LLC
- 0100 Storage Adapter V1
-0e91 VTech Engineering Canada, Ltd
-0e92 C's Glory Enterprise Co., Ltd
-0e93 eM Technics Co., Ltd
-0e95 Future Technology Co., Ltd
-0e96 Aplux Communications, Ltd
- c001 TRUST 380 USB2 SPACEC@M
-0e97 Fingerworks, Inc.
-0e98 Advanced Analogic Technologies, Inc.
-0e99 Parallel Dice Co., Ltd
-0e9a TA HSING Industries, Ltd
-0e9b ADTEC Corp.
-0e9c Streamzap, Inc.
- 0000 Streamzap Remote Control
-0e9f Tamura Corp.
-0ea0 Ours Technology, Inc.
- 2126 7-in-1 Card Reader
- 2168 Transcend JetFlash 2.0 / Astone USB Drive
- 6803 OTI-6803 Flash Disk
- 6808 OTI-6808 Flash Disk
- 6828 OTI-6828 Flash Disk
-0ea6 Nihon Computer Co., Ltd
-0ea7 MSL Enterprises Corp.
-0ea8 CenDyne, Inc.
-0ead Humax Co., Ltd
-0eb0 NovaTech
- 9020 NovaTech NV-902W
- 9021 RT2573
-0eb1 WIS Technologies, Inc.
- 6666 WinFast WalkieTV TV Loader
- 6668 WinFast WalkieTV TV Loader
- 7007 WinFast WalkieTV WDM Capture
-0eb2 Y-S Electronic Co., Ltd
-0eb3 Saint Technology Corp.
-0eb7 Endor AG
-0ebe VWeb Corp.
-0ebf Omega Technology of Taiwan, Inc.
-0ec0 LHI Technology (China) Co., Ltd
-0ec1 Abit Computer Corp.
-0ec2 Sweetray Industrial, Ltd
-0ec3 Axell Co., Ltd
-0ec4 Ballracing Developments, Ltd
-0ec5 GT Information System Co., Ltd
-0ec6 InnoVISION Multimedia, Ltd
-0ec7 Theta Link Corp.
- 1008 So., Show 301 Digital Camera
-0ecd Lite-On IT Corp.
- 1400 CD\RW 40X
-0ece TaiSol Electronics Co., Ltd
-0ecf Phogenix Imaging, LLC
-0ed1 WinMaxGroup
- 6660 USB Flash Disk 64M-C
- 6680 USB Flash Disk 64M-B
- 7634 MP3 Player
-0ed2 Kyoto Micro Computer Co., Ltd
-0ed3 Wing-Tech Enterprise Co., Ltd
-0ed5 Fiberbyte
- e000 USB-inSync Device
- f000 Fiberbyte USB-inSync Device
- f201 Fiberbyte USB-inSync DAQ-2500X
-0eda Noriake Itron Corp.
-0edf e-MDT Co., Ltd
- 2060 FID irock! 100 Series
-0ee0 Shima Seiki Mfg., Ltd
-0ee1 Sarotech Co., Ltd
-0ee2 AMI Semiconductor, Inc.
-0ee3 ComTrue Technology Corp.
- 1000 Image Tank 1.5
-0ee4 Sunrich Technology, Ltd
-0eee Digital Stream Technology, Inc.
- 8810 Mass Storage Drive
-0eef D-WAV Scientific Co., Ltd
- 0001 eGalax TouchScreen
- 0002 Touchscreen Controller(Professional)
-0ef0 Hitachi Cable, Ltd
-0ef1 Aichi Micro Intelligent Corp.
-0ef2 I/O Magic Corp.
-0ef3 Lynn Products, Inc.
-0ef4 DSI Datotech
-0ef5 PointChips
- 2202 Flash Disk
- 2366 Flash Disk
-0ef6 Yield Microelectronics Corp.
-0ef7 SM Tech Co., Ltd (Tulip)
-0efd Oasis Semiconductor
-0efe Wem Technology, Inc.
-0f06 Visual Frontier Enterprise Co., Ltd
-0f08 CSL Wire & Plug (Shen Zhen) Co.
-0f0c CAS Corp.
-0f0d Hori Co., Ltd
-0f0e Energy Full Corp.
-0f12 Mars Engineering Corp.
-0f13 Acetek Technology Co., Ltd
-0f19 Oracom Co., Ltd
-0f1b Onset Computer Corp.
-0f1c Funai Electric Co., Ltd
-0f1d Iwill Corp.
-0f21 IOI Technology Corp.
-0f22 Senior Industries, Inc.
-0f23 Leader Tech Manufacturer Co., Ltd
-0f24 Flex-P Industries, Snd., Bhd.
-0f2d ViPower, Inc.
-0f2e Geniality Maple Technology Co., Ltd
-0f2f Priva Design Services
-0f30 Jess Technology Co., Ltd
- 001c PS3 Guitar Controller Dongle
- 0110 10-Button Joypad
-0f31 Chrysalis Development
-0f32 YFC-BonEagle Electric Co., Ltd
-0f37 Kokuyo Co., Ltd
-0f38 Nien-Yi Industrial Corp.
-0f3d Airprime, Incorporated
- 0112 CDMA 1xEVDO PC Card, PC 5220
-0f41 RDC Semiconductor Co., Ltd
-0f42 Nital Consulting Services, Inc.
-0f44 Polhemus
- ef11 Patriot (firmware not loaded)
- ef12 Patriot
- ff11 Liberty (firmware not loaded)
- ff12 Liberty
-0f4b St. John Technology Co., Ltd
-0f4c WorldWide Cable Opto Corp.
-0f4d Microtune, Inc.
- 1000 Bluetooth Dongle
-0f4e Freedom Scientific
-0f52 Wing Key Electrical Co., Ltd
-0f53 Dongguan White Horse Cable Factory, Ltd
-0f54 Kawai Musical Instruments Mfg. Co., Ltd
-0f55 AmbiCom, Inc.
-0f5c Prairiecomm, Inc.
-0f5d NewAge International, LLC
- 9455 Compact Drive
-0f5f Key Technology Corp.
-0f60 NTK, Ltd
-0f61 Varian, Inc.
-0f62 Acrox Technologies Co., Ltd
- 1001 Targus Mini Trackball Optical Mouse
-0f68 Kobe Steel, Ltd
-0f69 Dionex Corp.
-0f6a Vibren Technologies, Inc.
-0f6e INTELLIGENT SYSTEMS
- 0100 GameBoy Color Emulator
- 0201 GameBoy Advance Flash Gang Writer
- 0202 GameBoy Advance Capture
- 0300 Gamecube DOL Viewer
- 0400 NDS Emulator
- 0401 NDS UIC
- 0402 NDS Writer
- 0403 NDS Capture
- 0404 NDS Emulator (Lite)
-0f73 DFI
-0f7c DQ Technology, Inc.
-0f7d NetBotz, Inc.
-0f7e Fluke Corp.
-0f88 VTech Holdings, Ltd
- 3012 RT2570
- 3014 ZD1211B
-0f8b Yazaki Corp.
-0f8c Young Generation International Corp.
-0f8d Uniwill Computer Corp.
-0f8e Kingnet Technology Co., Ltd
-0f8f Soma Networks
-0f97 CviLux Corp.
-0f98 CyberBank Corp.
-0f9c Hyun Won, Inc.
- 0301 M-Any Premium DAH-610 MP3/WMA Player
- 0332 mobiBLU DAH-1200 MP3/Ogg Player
-0f9e Lucent Technologies
-0fa3 Starconn Electronic Co., Ltd
-0fa4 ATL Technology
-0fa5 Sotec Co., Ltd
-0fa7 Epox Computer Co., Ltd
-0fa8 Logic Controls, Inc.
-0faf Winpoint Electronic Corp.
-0fb0 Haurtian Wire & Cable Co., Ltd
-0fb1 Inclose Design, Inc.
-0fb2 Juan-Chern Industrial Co., Ltd
-0fb8 Wistron Corp.
- 0002 eHome Infrared Receiver
-0fb9 AACom Corp.
-0fba San Shing Electronics Co., Ltd
-0fbb Bitwise Systems, Inc.
-0fc1 Mitac Internatinal Corp.
-0fc2 Plug and Jack Industrial, Inc.
-0fc5 Delcom Engineering
- 1222 I/O Development Board
-0fc6 Dataplus Supplies, Inc.
-0fca Research In Motion, Ltd.
- 0001 Blackberry Handheld
-0fce Sony Ericsson Mobile Communications AB
- 1010 WMC Modem
- d008 V800-Vodafone 802SE WMC Modem
- d016 K750i Phone
- d017 K608i Phone
- d019 VDC EGPRS Modem
- d025 520 WMC Data Modem
- d038 W850i Phone
- d041 K510i Phone
- d042 W810i Phone
- d046 K610i Phone
-0fcf Dynastream Innovations, Inc.
-0fd0 Tulip Computers B.V.
-0fd1 Giant Electronics Ltd.
-0fd4 Tenovis GmbH & Co., KG
-0fd5 Direct Access Technology, Inc.
-0fdc Micro Plus
-0fe4 IN-Tech Electronics, Ltd
-0fe5 Greenconn (U.S.A.), Inc.
-0fe9 DVICO
- db00 FusionHDTV DVB-T (MT352+LgZ201) (uninitialized)
- db01 FusionHDTV DVB-T (MT352+LgZ201) (initialized)
- db10 FusionHDTV DVB-T (MT352+Thomson7579) (uninitialized)
- db11 FusionHDTV DVB-T (MT352+Thomson7579) (initialized)
-0fea United Computer Accessories
-0feb CRS Electronic Co., Ltd
-0fec UMC Electronics Co., Ltd
-0fed Access Co., Ltd
-0fee Xsido Corp.
-0fef MJ Research, Inc.
-0ff6 Core Valley Co., Ltd
-0ff7 CHI SHING Computer Accessories Co., Ltd
-0fff Aopen, Inc.
-1000 Speed Tech Corp.
-1001 Ritronics Components (S) Pte., Ltd
-1003 Sigma Corp.
- 0100 Sigma SD10
-1004 LG Electronics, Inc.
- 1fae U8120 3G Cellphone
- 6000 VX4400/VX6000 Cellphone
- 6005 T5100
- 6800 CDMA Modem
- 7000 LG LDP-7024D(LD)USB
-1005 Apacer Technology, Inc.
- 1001 MP3 Player
- 1004 MP3 Player
- 1006 MP3 Player
- b113 Handy Steno 2.0/HT203
- b223 CD-RW + 6 in 1 Card Reader Digital Storage / Converter
-1006 iRiver, Ltd.
- 3001 iHP-100
- 3002 iHP-120/140 MP3 Player
- 3003 H320/H340
- 3004 H340 (mtp)
-1009 Emuzed, Inc.
- 000e eHome Infrared Receiver
- 0013 Angel MPEG Device
- 0015 Lumanate Wave PAL SECAM DVBT Device
- 0016 Lumanate Wave NTSC/ATSC Combo Device
-100a AV Chaseway, Ltd
- 2402 MP3 Player
- 2404 MP3 Player
- 2405 MP3 Player
- 2406 MP3 Player
- a0c0 MP3 Player
-100b Chou Chin Industrial Co., Ltd
-100d Netopia, Inc.
- 3342 Cayman 3352 DSL Modem
- 3382 3380 Series Network Interface
- cb01 Cayman 3341 Ethernet DSL Router
-1010 Fukuda Denshi Co., Ltd
-1011 Mobile Media Tech.
- 0001 AccFast Mp3
-1012 SDKM Fibres, Wires & Cables Berhad
-1013 TST-Touchless Sensor Technology AG
-1014 Densitron Technologies PLC
-1015 Softronics Pty., Ltd
-1016 Xiamen Hung's Enterprise Co., Ltd
-1017 Speedy Industrial Supplies, Pte., Ltd
-1019 Elitegroup Computer Systems (ECS)
- 0c55 USB Flash Reader, Desknote UCR-61S2B
-1020 Labtec
- 000a Wireless Optical Mouse
-1022 Shinko Shoji Co., Ltd
-1025 Hyper-Paltek
- 005e USB DVB-T device
- 005f USB DVB-T device
- 0300 MP3 Player
- 0350 MP3 Player
-1026 Newly Corp.
-1027 Time Domain
-1028 Inovys Corp.
-1029 Atlantic Coast Telesys
-102a Ramos Technology Co., Ltd
-102b Infotronic America, Inc.
-102c Etoms Electronics Corp.
- 6251 Q-Cam
-102d Winic Corp.
-1031 Comax Technology, Inc.
-1032 C-One Technology Corp.
-1033 Nucam Corp.
- 0068 3,5'' HDD case MD-231
-1038 Ideazon, Inc.
- 0100 Zboard
-1039 devolo AG
- 2140 dsl+ 1100 duo
-103d Stanton
- 0100 ScratchAmp
- 0101 ScratchAmp
-1043 iCreate Technologies Corp.
- 160f Wireless Network Adapter
- 4901 AV-836 Video Capture Device
- 8006 Flash Disk 32-256 MB
-1044 Chu Yuen Enterprise Co., Ltd
- 7001 U7000 TV tuner device
- 8001 GN-54G
- 8002 GN-BR402W
- 8003 GN-WLBM101
- 8004 GN-WLBZ101 802.11b Adapter
- 8005 GN-WLBZ201 802.11b Adapter
- 8006 GN-WBZB-M 802.11b Adapter
- 8007 GN-WBKG
- 8008 GN-WB01GS
- 800a GN-WI05GS
- 800b GN-WB30N 802.11n WLAN Card
-1046 Winbond Electronics Corp. [hex]
- 8901 Bluetooth Device
- 9967 W9967CF/W9968CF WebCam IC
-1048 Targus Group International
-104c AMCO TEC International, Inc.
-1053 Immanuel Electronics Co., Ltd
-1054 BMS International Beheer N.V.
- 5004 DSL 7420 Loader
- 5005 DSL 7420 LAN Modem
-1055 Complex Micro Interconnection Co., Ltd
-1056 Hsin Chen Ent Co., Ltd
-1057 ON Semiconductor
-1058 Western Digital Technologies, Inc.
- 0200 Firewire USB Combo
- 0400 External HDD
- 0500 hub
- 0702 Passport External HDD
- 0901 MyBook External HDD
- 1001 External Hard Disk
-1059 Giesecke & Devrient GmbH
- 000b StarSign Bio Token 3.0
-105c Hong Ji Electric Wire & Cable (Dongguan) Co., Ltd
-105d Delkin Devices, Inc.
-105e Valence Semiconductor Design, Ltd
-105f Chin Shong Enterprise Co., Ltd
-1060 Easthome Industrial Co., Ltd
-1063 Motorola Electronics Taiwan, Ltd [hex]
- 1555 MC141555 Hub
- 4100 SB4100 USB Cable Modem
-1065 CCYU Technology
- 0020 USB-DVR2 Dev Board
- 2136 EasyDisk ED1064
-106a Loyal Legend, Ltd
-106c Curitel Communications, Inc.
- 1101 CDMA 2000 1xRTT USB modem (HX-550C)
- 1102 Packet Service
- 1103 Packet Service Diagnostic Serial Port (WDM)
- 1104 Packet Service Diagnostic Serial Port (WDM)
- 1105 Composite Device
- 1106 Packet Service Diagnostic Serial Port (WDM)
- 1301 Composite Device
- 1302 Packet Service Diagnostic Serial Port (WDM)
- 1303 Packet Service
- 1304 Packet Service
- 1401 Composite Device
- 1402 Packet Service
- 1403 Packet Service Diagnostic Serial Port (WDM)
- 1501 Packet Service
- 1502 Packet Service Diagnostic Serial Port (WDM)
- 1503 Packet Service
- 1601 Packet Service
- 1602 Packet Service Diagnostic Serial Port (WDM)
- 1603 Packet Service
- 2101 AudioVox 8900 Cell Phone
- 2102 Packet Service
- 2103 Packet Service Diagnostic Serial Port (WDM)
- 2301 Packet Service
- 2302 Packet Service Diagnostic Serial Port (WDM)
- 2303 Packet Service
- 2401 Packet Service Diagnostic Serial Port (WDM)
- 2402 Packet Service
- 2403 Packet Service Diagnostic Serial Port (WDM)
- 2501 Packet Service
- 2502 Packet Service Diagnostic Serial Port (WDM)
- 2503 Packet Service
- 2601 Packet Service
- 2602 Packet Service Diagnostic Serial Port (WDM)
- 2603 Packet Service
- 3701 Broadband Wireless modem
- 3702 Pantech PX-500
- 3eb4 Packet Service Diagnostic Serial Port (WDM)
- 4101 Packet Service Diagnostic Serial Port (WDM)
- 4102 Packet Service
- 4301 Composite Device
- 4302 Packet Service Diagnostic Serial Port (WDM)
- 4401 Composite Device
- 4402 Packet Service
- 4501 Packet Service
- 4502 Packet Service Diagnostic Serial Port (WDM)
- 4601 Composite Device
- 4602 Packet Service Diagnostic Serial Port (WDM)
- 5101 Packet Service
- 5102 Packet Service Diagnostic Serial Port (WDM)
- 5301 Packet Service Diagnostic Serial Port (WDM)
- 5302 Packet Service
- 5401 Packet Service
- 5402 Packet Service Diagnostic Serial Port (WDM)
- 5501 Packet Service Diagnostic Serial Port (WDM)
- 5502 Packet Service
- 5601 Packet Service Diagnostic Serial Port (WDM)
- 5602 Packet Service
- 7101 Composite Device
- 7102 Packet Service
- a000 Packet Service
- a001 Packet Service Diagnostic Serial Port (WDM)
- c100 Packet Service
- c200 Packet Service
- c500 Packet Service Diagnostic Serial Port (WDM)
- e200 Packet Service
-106d San Chieh Manufacturing, Ltd
-106e ConectL
-106f Money Controls
-1076 GCT Semiconductor, Inc.
- 0031 Bluetooth Device
- 0032 Bluetooth Device
-107d Arlec Australia, Ltd
-107e Midoriya Electric Co., Ltd
-107f KidzMouse, Inc.
-1082 Shin-Etsukaken Co., Ltd
-1083 Canon Electronics, Inc.
-1084 Pantech Co., Ltd
-108a Chloride Power Protection
-108b Grand-tek Technology Co., Ltd
-108c Robert Bosch GmbH
-108e Lotes Co., Ltd.
-1099 Surface Optics Corp.
-109a DATASOFT Systems GmbH
-109f eSOL Co., Ltd
- 3163 Trigem Mobile SmartDisplay84
- 3164 Trigem Mobile SmartDisplay121
-10a0 Hirotech, Inc.
-10a3 Mitsubishi Materials Corp.
-10a9 SK Teletech Co., Ltd
-10aa Cables To Go
-10ab USI Co., Ltd
- 1002 Bluetooth Device
- 1003 BC02-EXT in DFU
- 1005 Bluetooth Adptr
- 1006 BC04-EXT in DFU
- 10c5 Sony-Ericsson / Samsung DataCable
-10ac Honeywell, Inc.
-10ae Princeton Technology Corp.
-10af Liebert Corp.
- 0000 UPS
- 0001 PowerSure PSA UPS
- 0002 PowerSure PST UPS
- 0003 PowerSure PSP UPS
- 0004 PowerSure PSI UPS
- 0005 UPStation GXT 2U UPS
- 0006 UPStation GXT UPS
- 0007 Nfinity Power Systems UPS
- 0008 PowerSure Interactive UPS
-10b5 Comodo (PLX?)
- 9060 Test Board
-10b8 DiBcom
- 0bb8 DiBcom USB DVB-T reference design (MOD300) (cold)
- 0bb9 DiBcom USB DVB-T reference design (MOD300) (warm)
- 0bc6 DiBcom USB2.0 DVB-T reference design (MOD3000P) (cold)
- 0bc7 DiBcom USB2.0 DVB-T reference design (MOD3000P) (warm)
-10bb TM Technology, Inc.
-10bc Dinging Technology Co., Ltd
-10bd TMT Technology, Inc.
- 1427 Ethernet
-10bf SmartHome
- 0001 SmartHome PowerLinc
-10c4 Cygnal Integrated Products, Inc.
- 0002 F32x USBXpress Device
- 80a9 CP210x to UART Bridge Controller
- 80ca ATM2400 Sensor Device
- ea60 CP210x Composite Device
-10c5 Sanei Electric, Inc.
-10c6 Intec, Inc.
-10cb Eratech
-10cc GBM Connector Co., Ltd
- 1101 MP3 Player
-10cd Kycon, Inc.
-10ce Silicon Labs
- ea6a MobiData EDGE USB Modem
-10cf Velleman Components, Inc.
- 5500 8055 Experiment Interface Board (address=0)
- 5501 8055 Experiment Interface Board (address=1)
- 5502 8055 Experiment Interface Board (address=2)
- 5503 8055 Experiment Interface Board (address=3)
-10d1 Hottinger Baldwin Measurement
- 0101 USB-Module for Spider8, CP32
- 0202 CP22 - Communication Processor
- 0301 CP42 - Communication Processor
-10d4 Man Boon Manufactory, Ltd
-10d5 Uni Class Technology Co., Ltd
-10d6 Actions Semiconductor Co., Ltd
- 1000 MP3 Player
- 1100 MPMan MP-Ki 128 MP3 Player/Recorder
- 1101 D-Wave 2GB MP4 Player
- 8888 ADFU Device
- ff51 ADFU Device
-10de Authenex, Inc.
-10df In-Win Development, Inc.
-10e0 Post-Op Video, Inc.
-10e1 CablePlus, Ltd
-10e2 Nada Electronics, Ltd
-10ec Vast Technologies, Inc.
-10f5 Turtle Beach
- 0200 Audio Advantage Roadie
-10fb Pictos Technologies, Inc.
-10fd Anubis Electronics, Ltd
- 804d Typhoon Webshot II Webcam [zc0301]
- 8050 FlyCAM-USB 300 XP2
- de00 WinFast WalkieTV WDM Capture Driver.
-1100 VirTouch, Ltd
- 0001 VTPlayer VTP-1 Braille Mouse
-1101 EasyPass Industrial Co., Ltd
- 0001 FSK Electronics Super GSM Reader
-1108 Brightcom Technologies, Ltd
-1110 Analog Devices Canada, Ltd (Allied Telesyn)
- 5c01 Huawei MT-882 Remote NDIS Network Device
- 6489 ADSL ETH/USB RTR
- 9000 ADSL LAN Adapter
- 9001 ADSL Loader
- 900f AT-AR215 DSL Modem
- 9010 AT-AR215 DSL Modem
- 9021 ADSL WAN Adapter
- 9022 ADSL Loader
- 9023 ADSL WAN Adapter
- 9024 ADSL Loader
- 9031 ADSL LAN Adapter
- 9032 ADSL Loader
-1111 Pandora International Ltd.
- 8888 Evolution Device
-1112 YM ELECTRIC CO., Ltd
-1113 Medion AG
-111e VSO Electric Co., Ltd
-112e Master Hill Electric Wire and Cable Co., Ltd
-112f Cellon International, Inc.
-1130 Tenx Technology, Inc.
- f211 USB audio headset
-1131 Integrated System Solution Corp.
- 1001 KY-BT100 Bluetooth Adapter
- 1002 Bluetooth Device
- 1003 Bluetooth Device
- 1004 Bluetooth Device
-1132 Toshiba Corp., Digital Media Equipment [hex]
- 4331 PDR-M4/M5/M70 Digital Camera
- 4332 PDR-M60 Digital Camera
- 4333 PDR-M2300/PDR-M700
- 4334 PDR-M65
- 4335 PDR-M61
- 4337 PDR-M11
- 4338 PDR-M25
-113c Arin Tech Co., Ltd
-113d Mapower Electronics Co., Ltd
-1141 V One Multimedia, Pte., Ltd
-1142 CyberScan Technologies, Inc.
-1145 Japan Radio Company
- 0001 AirH PHONE AH-J3001V/J3002V
-1146 Shimane SANYO Electric Co., Ltd.
-1147 Ever Great Electric Wire and Cable Co., Ltd
-114b Sphairon Access Systems GmbH
- 0110 Turbolink UB801R WLAN USB Adapter
-114c Tinius Olsen Testing Machine Co., Inc.
-114d Alpha Imaging Technology Corp.
-115b Salix Technology Co., Ltd.
-1162 Secugen Corp.
-1163 DeLorme Publishing, Inc.
- 0100 Earthmate GPS
-1164 YUAN High-Tech Development Co., Ltd
- 0300 ELSAVISION 460D
- 0601 Analog TV Tuner
- 0900 TigerBird BMP837 USB2.0 WDM Encoder
- 0bc7 Digital TV Tuner
-1165 Telson Electronics Co., Ltd
-1166 Bantam Interactive Technologies
-1167 Salient Systems Corp.
-1168 BizConn International Corp.
-116e Gigastorage Corp.
-116f Silicon 10 Technology Corp.
-1175 Shengyih Steel Mold Co., Ltd
-117d Santa Electronic, Inc.
-117e JNC, Inc.
-1182 Venture Corp., Ltd
-1183 Compaq Computer Corp. [hex] (Digital Dream ??)
- 0001 DigitalDream l'espion XS
- 19c7 ISDN TA
- 4008 56k FaxModem
- 504a PJB-100 Personal Jukebox
-1184 Kyocera Elco Corp.
-1188 Bloomberg L.P.
-1189 Acer Communications & Multimedia
- 0893 EP-1427X-2 Ethernet Adapter
-118f You Yang Technology Co., Ltd
-1190 Tripace
-1191 Loyalty Founder Enterprise Co., Ltd
-1196 Yankee Robotics, LLC
- 0010 Trifid Camera without code
- 0011 Trifid Camera
-1197 Technoimagia Co., Ltd
-1198 StarShine Technology Corp.
-1199 Sierra Wireless, Inc.
- 0019 AC595U
- 0021 AC597E
- 0110 Composite Device
- 0112 CDMA 1xEVDO PC Card, AirCard 580
- 0120 AC595U
- 0218 MC5720 Wireless Modem
- 6467 MP Series Network Adapter
- 6468 MP Series Network Adapter
- 6469 MP Series Network Adapter
- 6802 MC8755 Device
- 6803 MC8765 Device
- 6804 MC8755 Device
- 6805 MC8765 Device
- 6812 MC8775 Device
- 6820 AC875 Device
- 6832 MC8780 Device
- 6833 MC8781 Device
- 683a MC8785 Device
- 6850 AirCard 880 Device
- 6851 AirCard 881 Device
- 6852 AirCard 880E Device
- 6853 AirCard 881E Device
- 6854 AirCard 885 Device
- 6870 MC8780 Device
- 6871 MC8781 Device
-119a ZHAN QI Technology Co., Ltd
-119b ruwido austria GmbH
- 0400 Infrared Keyboard V2.01
-11a0 Chipcon AS
- eb11 CC2400EB 2.0 ZigBee Sniffer
-11a3 Technovas Co., Ltd
- 8031 MP3 Player
- 8032 MP3 Player
-11aa GlobalMedia Group, LLC
- 1518 iREZ K2
-11ab Exito Electronics Co., Ltd
-11b0 ATECH FLASH TECHNOLOGY
-11db Topfield Co., Ltd.
- 1000 PVR
- 1100 PVR
-11e6 K.I. Technology Co. Ltd.
-11f5 Siemens AG (?)
- 0001 SX1
- 0003 Mobile phone USB cable
- 0004 X75
-11f6 Prolific
- 2001 Willcom WSIM
-11f7 Alcatel (?)
- 02df TD10 Mobile phone USB cable
-1209 InterBiometrics
- 1001 USB Hub
- 1002 USB Relais
- 1003 IBSecureCam-P
- 1004 IBSecureCam-O
- 1005 IBSecureCam-N
-120e Hudson Soft Co., Ltd
-121e Jungsoft Co., Ltd
- 3403 Muzio JM250 Audio Player
-1223 SKYCABLE ENTERPRISE. CO., LTD.
-1230 Chipidea-Microelectronica, S.A.
-1235 Novation EMS
- 0001 ReMOTE Audio/XStation
- 0002 Speedio
- 4661 ReMOTE25
-1241 Belkin
- 1111 Mouse
- 1122 Typhoon Stream Optical Mouse USB+PS/2
- 1155 PS2/USB Browser Combo Mouse
- 1166 MI-2150 Trust Mouse
- 1177 F8E842-DL Mouse
- 1503 Keyboard
-124a AirVast
- 4017 PC-Chips 802.11b Adapter
-124b Nyko (Honey Bee)
- 4d01 Airflo EX Joystick
-125f A-DATA Technology Co., Ltd.
-1264 Covidien Energy-based Devices
-1267 Logic3 / SpectraVideo plc
- 0103 G-720 Keyboard
- 0201 A4Tech SWOP-3 Mouse
- a001 JP260 PC Game Pad
- c002 Wireless Optical Mouse
-126c Aristocrat Technologies
-126d Bel Stewart
-126e Strobe Data, Inc.
-126f TwinMOS
- 1325 Mobile Disk
- 2168 Mobile Disk III
- a006 G240
-1275 Xaxero Marine Software Engineering, Ltd.
- 0002 WeatherFax 2000 Demodulator
- 0080 SkyEye Weather Satellite Receiver
-1286 Marvell Semiconductor, Inc.
- 8001 BLOB boot loader firmware
-1292 Innomedia
- 0258 Creative Labs VoIP Blaster
-1293 Belkin Components [hex]
- 0002 F5U002 Parallel Port [uss720]
- 2101 104-key keyboard
-1294 RISO KAGAKU CORP.
-129b CyberTAN Technology
- 1666 TG54USB
-12a7 Trendchip Technologies Corp.
-12ab Honey Bee Electronic International Ltd.
-12ba Licensed by Sony Computer Entertainment America
- 0200 Harmonix Guitar for PlayStation(R)3
- 0210 Harmonix Drum Kit for PlayStation(R)3
-12d1 Huawei Technologies Co., Ltd.
- 1001 E620 USB Modem
- 1003 E220 HSDPA Modem / E270 HSDPA/HSUPA Modem
-12d2 LINE TECH INDUSTRIAL CO., LTD.
-12d7 BETTER WIRE FACTORY CO., LTD.
-12ef Tapwave, Inc.
- 0100 Tapwave Handheld [Tapwave Zodiac]
-12f5 Dynamic System Electronics Corp.
-12f7 Memorex Products, Inc.
- 1a00 TD Classic 003B
- 1e23 TravelDrive 2007 Flash Drive
-12fd AIN Comm. Technology Co., Ltd
- 1001 AWU2000b 802.11b Stick
-1307 Transcend Information, Inc.
- 0163 512MB USB Flash Drive
- 1169 TS2GJF210 JetFlash 210 2GB
-1310 Roper
- 0001 Class 1 Bluetooth Dongle
-1312 ICS Electronics
-131d Natural Point
- 0155 TrackIR 3 Pro Head Tracker
-132b Konica Minolta
- 0000 Dimage A2 Camera
- 0001 Minolta DiMAGE A2 (ptp)
- 0003 Dimage Xg Camera
- 0006 Dimage Z2 Camera
- 0007 Minolta DiMAGE Z2 (PictBridge mode)
- 0008 Dimage X21 Camera
- 000a Dimage Scan Dual IV
- 000b Dimage Z10 Camera
- 000d Dimage X50 Camera [storage?]
- 000f Dimage X50 Camera [p2p?]
- 0010 Dimage G600 Camera
- 0012 Dimage Scan Elite5400 2
- 0013 Dimage X31 Camera
- 0015 Dimage G530 Camera
- 0017 Dimage Z3 Camera
- 0018 Minolta DiMAGE Z3 (PictBridge mode)
- 0019 Dimage A200 Camera
- 0021 Dimage Z5 Camera
- 0022 Minolta DiMAGE Z5 (PictBridge mode)
-1342 Mobility
- 0200 EasiDock 200 Hub
- 0201 EasiDock 200 Keyboard and Mouse Port
- 0202 EasiDock 200 Serial Port
- 0203 EasiDock 200 Printer Port
- 0204 Ethernet
- 0304 EasiDock Ethernet
-1348 Katsuragawa Electric Co., Ltd.
-134e Digby's Bitpile, Inc. DBA D Bit
-136b STEC
-1370 Swissbit
- 6828 Victorinox Flash Drive
-1371 Dick Smith Electronics
- 9022 RT2573
- 9032 C-Net CWD-854 rev F
-1376 Vimtron Electronics Co., Ltd.
-1385 Netgear, Inc
- 4250 WG111T
- 4251 WG111T (no firmware)
- 5f00 WPN111 RangeMax(TM) Wireless USB 2.0 Adapter
- 5f01 WPN111 (no firmware)
-138e Jungo LTD
- 9000 Raisonance S.A. STM32 ARM evaluation board
-1390 TOMTOM B.V.
-1395 Sennheiser Communications
- 3556 USB Headset
-1398 Q-tec
- 2103 USB 2.0 Storage Device
-13ad Baltech
- 9999 Card reader
-13b0 PerkinElmer Optoelectronics
- 000a Alesis Photon X25 MIDI Controller
-13b1 Linksys
- 000b WUSB11 v4.0 802.11b Adapter
- 000d WUSB54G Wireless Adapter
- 0011 WUSB54GP v4.0 802.11g Adapter
- 0018 USB200M 10/100 Ethernet Adapter
- 001a HU200TS Wireless Adapter
- 0020 WUSB54GC 802.11g Adapter [ralink rt73]
- 0023 WUSB54GR
- 0024 WUSBF54G v1.1 802.11g Adapter w/ Wi-Fi Finder
-13b3 Nippon Dics Co., Ltd.
-13be Ricoh Printing Systems, Ltd.
-13ca JyeTai Precision Industrial Co., Ltd.
-13cf Wisair Ltd.
-13d1 A-Max Technology Macao Commercial Offshore Co. Ltd.
-13d2 Shark Multimedia
- 0400 Pocket Ethernet [klsi]
-13d3 IMC Networks
- 3201 VisionDTV USB-Ter/HAMA USB DVB-T device cold
- 3202 VisionDTV USB-Ter/HAMA USB DVB-T device warm
- 3203 DTV-DVB UDST7020BDA DVB-S Box(DVBS for MCE2005)
- 3204 DTV-DVB UDST7020BDA DVB-S Box(DVBS for MCE2005)
- 3205 DNTV Live! Tiny USB2 BDA (No Remote)
- 3206 DNTV Live! Tiny USB2 BDA (No Remote)
- 3207 DTV-DVB UDST7020BDA DVB-S Box(DVBS for MCE2005)
- 3208 DTV-DVB UDST7020BDA DVB-S Box(DVBS for MCE2005)
- 3209 DTV-DVB UDST7022BDA DVB-S Box(Without HID)
- 3211 DTV-DVB Hybrid Analog/Capture / Pinnacle PCTV 310e
- 3212 DTV-DVB UDTT704C - DVBT/NTSC/PAL Driver(PCM4)
- 3213 DTV-DVB UDTT704D - DVBT/NTSC/PAL Driver (PCM4)
- 3214 DTV-DVB UDTT704F -(MiniCard) DVBT/NTSC/PAL Driver(Without HID)
- 3215 DTV-DVB UDAT7240 - ATSC/NTSC/PAL Driver(PCM4)
- 3216 DTV-DVB UDTT 7047-USB 2.0 DVB-T Driver
- 3217 Digital-TV Receiver.
- 3219 DTV-DVB UDTT7049 - DVB-T Driver(Without HID)
- 3220 DTV-DVB UDTT 7047M-USB 2.0 DVB-T Driver
- 3223 DNTV Live! Tiny USB2 BDA (No Remote)
- 3224 DNTV Live! Tiny USB2 BDA (No Remote)
- 3226 DigitalNow TinyTwin DVB-T Receiver
- 3236 DTV-DVB UDTT 7047A-USB 2.0 DVB-T Driver
- 3237 DTV-DVB UDTT 704J - dual DVB-T Driver
- 3239 DTV-DVB UDTT704D - DVBT/NTSC/PAL Driver(Without HID)
- 3240 DTV-DVB UDXTTM6010 - A/D Driver(Without HID)
- 3241 DTV-DVB UDXTTM6010 - A/D Driver(Without HID)
- 3242 DTV-DVB UDAT7240LP - ATSC/NTSC/PAL Driver(Without HID)
- 3243 DTV-DVB UDXTTM6010 - A/D Driver(Without HID)
- 3244 DTV-DVB UDTT 7047Z-USB 2.0 DVB-T Driver
- 3247 802.11 n/g/b Wireless LAN Adapter
- 7020 DTV-DVB UDST7020BDA DVB-S Box(DVBS for MCE2005)
- 7022 DTV-DVB UDST7022BDA DVB-S Box(Without HID)
-13dc ALEREON, INC.
-13dd i.Tech Dynamic Limited
-13e1 Kaibo Wire & Cable (Shenzhen) Co., Ltd.
-13e5 Rane
- 0001 SL-1
-13e6 TechnoScope Co., Ltd.
-13fd Initio Corporation
-13fe Kingston Technology Company Inc.
- 1a00 512MB/1GB Flash Drive
- 1a23 512MB Flash Drive
- 1d00 DataTraveler 2.0 1GB/4GB Flash Drive / Patriot Xporter 4GB Flash Drive
- 1f00 DataTraveler 2.0 4GB Flash Drive
-1400 Axxion Group Corp.
-1402 Bowe Bell & Howell
-1403 Sitronix
- 0001 Digital Photo Frame
-140e Telechips, Inc.
-1410 Novatel Wireless
- 1110 Merlin S620
- 1120 Merlin EX720
- 1130 Merlin S720
- 1400 Merlin U740
- 2110 Ovation U720/MCD3000
- 4100 U727
-1415 Nam Tai E&E Products Ltd. or OmniVision Technologies, Inc.
- 0000 Sony SingStar USBMIC
- 2000 Sony Playstation Eye
-1419 ABILITY ENTERPRISE CO., LTD.
-1429 Vega Technologies Industrial (Austria) Co.
-1430 RedOctane
-1431 Pertech Resources, Inc.
-1435 Wistron NeWeb
- 0711 UR055G
- 0826 AR5523
- 0827 AR5523 (no firmware)
- 0828 AR5523
- 0829 AR5523 (no firmware)
-1436 Denali Software, Inc.
-143c Altek Corporation
-1453 Radio Shack
- 4026 26-183 Serial Cable
-1456 Extending Wire & Cable Co., Ltd.
-1457 First International Computer, Inc.
- 5117 OpenMoko Neo1973 kernel usbnet (g_ether, CDC Ethernet) mode
- 5118 OpenMoko Neo1973 Debug board (V2+)
- 5119 OpenMoko Neo1973 u-boot cdc_acm serial port
- 5120 OpenMoko Neo1973 u-boot usbtty generic serial
- 5121 OpenMoko Neo1973 kernel mass storage (g_storage) mode
- 5122 OpenMoko Neo1973 kernel cdc_ether USB network
- 5123 OpenMoko Neo1973 internal USB CSR4 module
- 5124 OpenMoko Neo1973 Bluetooth Device ID service
-1461 Staccato Communications
-1462 Micro Star International
- 5512 MegaStick-1 Flash Stick
-1472 Huawei-3Com
- 0009 Aolynk WUB320g
-147a Formosa Industrial Computing, Inc.
- e015 eHome Infrared Receiver
- e016 eHome Infrared Receiver
-147f Hama GmbH & Co., KG
-1484 Elsa AG [hex]
- 1746 Ecomo 19H99 Monitor
- 7616 Elsa Hub
-1485 Silicom
- 0001 U2E
- 0002 Psion Gold Port Ethernet
-1487 DSP Group, Ltd.
-148e EVATRONIX SA
-148f Ralink Technology, Corp.
- 1706 RT2500USB Wireless Adapter
- 2570 802.11g WiFi
- 2573 RT2501USB Wireless Adapter
- 2671 RT2601USB Wireless Adapter
- 9020 RT2500USB Wireless Adapter
- 9021 RT2501USB Wireless Adapter
-1497 Panstrong Company Ltd.
-149a Imagination Technologies
- 2107 DBX1 DSP core
-14aa AVerMedia (again) or C&E
- 0001 Avermedia AverTV DVBT USB1.1 (cold)
- 0002 Avermedia AverTV DVBT USB1.1 (warm)
- 0201 AVermedia/Yakumo/Hama/Typhoon DVB-T USB2.0 (cold)
- 0221 AVermedia DVBT Tuner Dongle
- 0301 AVermedia/Yakumo/Hama/Typhoon DVB-T USB2.0 (warm)
-14ad CTK Corporation
-14ae Printronix Inc.
-14af ATP Electronics Inc.
-14b0 StarTech.com Ltd.
-14b2 Atheros Communications Inc
- 3a93 USB WLAN Device
- 3c02 C54RU WLAN
- 3c22 C54RU
-14c0 Rockwell Automation, Inc.
-14c2 Gemlight Computer, Ltd
- 0250 Storage Adapter V2
- 0350 Storage Adapter V2
-14cd Super Top
- 6600 USB 2.0 IDE DEVICE
-14d8 JAMER INDUSTRIES CO., LTD.
-14dd Raritan Computer, Inc.
-14e5 SAIN Information & Communications Co., Ltd.
-14ea Planex Communications
- ab10 GW-US54GZ
- ab11 GU-1000T
- ab13 GW-US54Mini
-14ed Shure Inc.
-1500 Ellisys
-1501 Pine-Tum Enterprise Co., Ltd.
-1513 Hypercom
-1516 CompUSA
- 8628 128M Pen Drive
-1518 Cheshire Engineering Corp.
- 0001 HDReye High Dynamic Range Camera
- 0002 HDReye (before firmware loads)
-1520 Bitwire Corp.
-1524 ENE Technology Inc
- 6680 UTS 6680
-1527 Silicon Portals
- 0200 YAP Phone (no firmware)
- 0201 YAP Phone
-1529 UBIQUAM Co., Ltd.
- 3100 CDMA 1xRTT USB Modem (U-100/105/200/300/520)
-152d JMicron Technology Corp. / JMicron USA Technology Corp.
- 2338 JM20337 Hi-Speed USB to SATA & PATA Combo Bridge
-152e LG (HLDS)
- e001 GSA-5120D DVD-RW
-1532 Razer USA, Ltd
- 0001 RZ01-020300 Optical Mouse [Diamondback]
- 0003 Krait Mouse
- 0007 DeathAdder Mouse
- 0102 Tarantula Keyboard
-1546 U-Blox AG
-154b PNY
- 0010 USB 2.0 Flash Drive
-154d ConnectCounty Holdings Berhad
-154e D&M Holdings, Inc. (Denon/Marantz)
- 3000 Marantz RC9001 Remote Control
-1554 Prolink Microsystems Corp.
-1557 OQO
- 0002 model 01 WiFi interface
- 0003 model 01 Bluetooth interface
- 7720 model 01+ Ethernet
- 8150 model 01 Ethernet interface
-1568 Sunf Pu Technology Co., Ltd
-156f Quantum Corporation
-1570 ALLTOP TECHNOLOGY CO., LTD.
-157b Ketron SRL
-157e TRENDnet
- 3006 TEW-444UB EU
- 3007 TEW-444UB EU (no firmware)
- 300a TEW-429UB 802.11g Adapter with HotSpot Detector
- 300b TEW-429UB
- 300d TEW-429UB C1
- 3204 ALL0298 v2
- 3205 AR5523
- 3206 AR5523 (no firmware)
-1582 Fiberline
- 6003 WL-430U
-1587 SMA Technologie AG
-158d Oakley Inc.
-1598 Kunshan Guoji Electronics Co., Ltd.
-15a2 Freescale Semiconductor, Inc.
-15a8 Teams Power Limited
-15aa Gearway Electronics (Dong Guan) Co., Ltd.
-15ba Olimex Ltd.
- 0003 OpenOCD JTAG
- 0004 OpenOCD JTAG TINY
-15c2 SoundGraph Inc.
- ffdc iMON PAD Remote Controller
-15c6 Laboratoires MXM
- 1000 DigistimSP (cold)
- 1001 DigistimSP (warm)
- 1002 DigimapSP USB (cold)
- 1003 DigimapSP USB (warm)
-15c9 D-Box Technologies
-15ca Textech International Ltd.
- 00c3 Mini Optical Mouse
-15d5 Coulomb Electronics Ltd.
-15dc Hynix Semiconductor Inc.
-15e0 Seong Ji Industrial Co., Ltd.
-15e1 RSA
- 2007 RSA SecurID (R) Authenticator
-15e8 SohoWare
- 9100 NUB100 Ethernet [pegasus]
- 9110 10/100 USB Ethernet
-15e9 Pacific Digital Corp.
- 04ce MemoryFrame MF-570
- 1968 MemoryFrame MF-570
- 1969 Digital Frame
-15ec Belcarra Technologies Corp.
-15f4 HanfTek
- 0001 HanfTek UMT-010 USB2.0 DVB-T (cold)
- 0025 HanfTek UMT-010 USB2.0 DVB-T (warm)
-1604 Tascam
- 8000 US-428 Audio/Midi Controller (without fw)
- 8001 US-428 Audio/Midi Controller
- 8004 US-224 Audio/Midi Controller (without fw)
- 8005 US-224 Audio/Midi Controller
- 8006 US-122 Audio/Midi Interface (without fw)
- 8007 US-122 Audio/Midi Interface
-1606 Umax [hex]
- 0002 Astra 1236U Scanner
- 0010 Astra 1220U
- 0030 Astra 2000U
- 0050 Scanner
- 0060 Astra 3400U
- 0130 Astra 2100U
- 0160 Astra 5400U
- 0230 Astra 2200/2200SU
- 0350 Astra 4800/4850 Scanner
- 1030 Astra 4000U
- 1220 Genesys Logic Scanner Controller NT5.0
- 2010 AstraCam Digital Camera
- 2020 AstraCam 1000
- 2030 AstraCam 1800 Digital Camera
-1608 Inside Out Networks [hex]
- 0001 EdgePort/4 Serial Port
- 0002 Edgeport/8
- 0003 Rapidport/4
- 0004 Edgeport/4
- 0005 Edgeport/2
- 0006 Edgeport/4i
- 0007 Edgeport/2i
- 0008 Edgeport/8
- 000c Edgeport/421
- 000d Edgeport/21
- 000e Edgeport/4
- 000f Edgeport/8
- 0010 Edgeport/2
- 0011 Edgeport/4
- 0012 Edgeport/416
- 0014 Edgeport/8i
- 0018 Edgeport/412
- 0019 Edgeport/412
- 001a Edgeport/2+2i
- 0101 Edgeport/4
- 0105 Edgeport/2
- 0106 Edgeport/4i
- 0107 Edgeport/2i
- 010c Edgeport/421
- 010d Edgeport/21
- 0110 Edgeport/2
- 0111 Edgeport/4
- 0112 Edgeport/416
- 0114 Edgeport/8i
- 0201 Edgeport/4
- 0203 Rapidport/4
- 0204 Edgeport/4
- 0205 Edgeport/2
- 0206 Edgeport/4i
- 0207 Edgeport/2i
- 020c Edgeport/421
- 020d Edgeport/21
- 020e Edgeport/4
- 020f Edgeport/8
- 0210 Edgeport/2
- 0211 Edgeport/4
- 0212 Edgeport/416
- 0214 Edgeport/8i
- 0215 Edgeport/1
- 0216 EPOS/44
- 0217 Edgeport/42
- 021a Edgeport/2+2i
- 021b Edgeport/2c
- 021c Edgeport/221c
- 021d Edgeport/22c
- 021e Edgeport/21c
- 021f Edgeport/62
- 0240 Edgeport/1
- 0241 Edgeport/1i
- 0242 Edgeport/4s
- 0243 Edgeport/8s
- 0244 Edgeport/8
- 0245 Edgeport/22c
- 0301 Watchport/P
- 0302 Watchport/M
- 0303 Watchport/W
- 0304 Watchport/T
- 0305 Watchport/H
- 0306 Watchport/E
- 0307 Watchport/L
- 0308 Watchport/R
- 0309 Watchport/A
- 030a Watchport/D
- 030b Watchport/D
- 030c Power Management Port
- 030e Power Management Port
- 030f Watchport/G
- 0310 Watchport/Tc
- 0311 Watchport/Hc
- 1403 MultiTech Systems MT4X56 Modem
- 1a17 Agilent Technologies (E6473)
-1619 L & K Precision Technology Co., Ltd.
-1621 Wionics Research
-1628 Stonestreet One, Inc.
-162a Airgo Networks Inc.
-162f WiQuest Communications, Inc.
-1631 Good Way Technology
- 6200 GWUSB2E
- c019 RT2573
-1645 Entrega [hex]
- 0001 1S Serial Port
- 0002 2S Serial Port
- 0003 1S25 Serial Port
- 0004 4S Serial Port
- 0005 E45 Ethernet [klsi]
- 0006 Parallel Port
- 0007 U1-SC25 SCSI
- 0008 Ethernet
- 0016 Bi-directional to Parallel Printer Converter
- 0080 1 port to Serial Converter
- 0081 1 port to Serial Converter
- 0093 1S9 Serial Port
- 8000 EZ-USB
- 8001 1 port to Serial
- 8002 2x Serial Port
- 8003 1 port to Serial
- 8004 2U4S serial/usb hub
- 8005 Ethernet
- 8080 1 port to Serial
- 8081 1 port to Serial
- 8093 PortGear Serial Port
-164a ChipX
-1657 Struck Innovative Systeme GmbH
- 3150 SIS3150 USB2.0 to VME interface
-1660 Creatix Polymedia GmbH
-1668 Actiontec Electronics, Inc. [hex]
- 0009 Gateway
- 0333 Modem
- 0358 InternetPhoneWizard
- 0405 Gateway
- 0408 Prism2.5 802.11b Adapter
- 0413 Gateway
- 0421 Prism2.5 802.11b Adapter
- 0441 IBM Integrated Bluetooth II
- 0500 BTM200B BlueTooth Adapter
- 1050 802.11g Wireless Mini adapter
- 1441 IBM Integrated Bluetooth II
- 2441 BMDC-2 IBM Bluetooth III w.56k
- 3441 IBM Integrated Bluetooth III
- 6010 Gateway
- 6097 802.11b Wireless Adapter
- 6106 ROPEX FreeLan 802.11b
- 7605 UAT1 Wireless Ethernet Adapter
-1669 PiKRON Ltd. [hex]
- 1001 uLan2USB Converter - PS1 protocol
-1679 Total Phase
- 2001 Beagle USB 12 Protocol Analyzer
-1682 Maxwise Production Enterprise Ltd.
-1684 Godspeed Computer Corp.
-1686 ZOOM Corporation
- 0045 H4 Digital Recorder
-1687 Kingmax Digital Inc.
-168c Atheros Communications
- 0001 AR5523
- 0002 AR5523 (no firmware)
-1690 Askey Computer Corp. [hex]
- 0101 Creative Modem Blaster DE5670
- 0102 CDC Modem Board
- 0103 Askey 1456 VQE-R3 Modem [conexant]
- 0104 HCF V90 Data Fax RTAD Modem
- 0107 HCF V.90 Data,Fax,RTAD Modem
- 0109 Askey MagicXpress V.90 Pocket Modem [conexant]
- 0203 Voyager ADSL Modem Loader
- 0204 Voyager ADSL Modem
- 0205 DSL Modem
- 0206 GlobeSpan ADSL WAN Modem
- 0208 DSL Modem
- 0209 Voyager 100 ADSL Modem
- 0211 Globespan Virata ADSL LAN Modem
- 0212 DSL Modem
- 0213 HM121d DSL Modem
- 0214 HM121d DSL Modem
- 0215 Voyager 105 ADSL Modem
- 0701 WLAN
- 0710 SMCWUSBT-G
- 0711 SMCWUSBT-G (no firmware)
- 0712 AR5523
- 0713 AR5523 (no firmware)
- 0715 Voyager 1055 Laptop Adapter
- 0722 RT2573
- 0726 Wi-Fi Wireless LAN Adapter
- 0901 Voyager 205 ADSL Router
-1696 Hitachi Video and Information System, Inc.
-1697 VTec Test, Inc.
-16a5 Shenzhen Zhengerya Cable Co., Ltd.
-16ab Global Sun Technology
- 7801 AR5523
- 7802 AR5523 (no firmware)
- 7811 AR5523
- 7812 AR5523 (no firmware)
-16ac Dongguan ChingLung Wire & Cable Co., Ltd.
-16c0 VOTI
- 03e8 free for internal lab use 1000
- 03e9 free for internal lab use 1001
- 03ea free for internal lab use 1002
- 03eb free for internal lab use 1003
- 03ec free for internal lab use 1004
- 03ed free for internal lab use 1005
- 03ee free for internal lab use 1006
- 03ef free for internal lab use 1007
- 03f0 free for internal lab use 1008
- 03f1 free for internal lab use 1009
- 076b OpenPCD 13.56MHz RFID Reader
- 076c OpenPICC 13.56MHz RFID Simulator (native)
- 08ac OpenBeacon USB stick
-16cc silex technology, Inc.
-16d3 Frontline Test Equipment, Inc.
-16d5 AnyDATA Corporation
- 6501 CDMA 2000 1xRTT/EV-DO USB Modem
-16d8 CMOTECH Co., Ltd.
- 5141 CMOTECH CDMA Technologies USB modem
- 5543 CDMA 2000 1xRTT/1xEVDO USB modem
- 6280 CMOTECH CDMA Technologies USB modem
-16df King Billion Electronics Co., Ltd.
-16f5 Futurelogic Inc.
-1706 BlueView Technologies, Inc.
-1707 ARTIMI
-170b Swissonic
- 0011 MIDI-USB 1x1
-170d Avnera
-1733 Cellink Technology Co., Ltd
- 0101 RF Wireless Optical Mouse OP-701
-1736 CANON IMAGING SYSTEM TECHNOLOGIES INC.
-1737 Linksys
- 0039 USB1000
-1740 Senao
- 2000 NUB-8301
-1743 General Atomics
-174c ASMedia Technology Inc.
-174f Syntek
- 5a35 1.3MPixel Web Cam - Asus G1s
- 6a31 Web Cam - Asus A8J, F3S, F5R, VX2S, V1S
- 6a33 Web Cam - Asus F3SA, F9J, F9S
- 6a51 2.0MPixel Web Cam - Asus Z96J, Z96S, S96S
- 6a54 Web Cam
- 6d51 2.0Mpixel Web Cam - Eurocom D900C
- 8a12 0.3MPixel Web Cam - Packard Bell MX37-T-003
- a311 1.3MPixel Web Cam - Asus A3A, A6J, A6K, A6M, A6R, A6T, A6V, A7T, A7sv, A7U
- a312 1.3MPixel Web Cam
- a821 Web Cam - Packard Bell BU45, PB Easynote MX66-208W
- aa11 Web Cam
-1759 LucidPort Technology, Inc.
-1772 System Level Solutions, Inc.
-1781 Multiple Vendors
- 083e MetaGeek Wi-Spy
- 0938 Iguanaworks USB IR Transceiver
-1782 Spreadtrum Communications Inc.
-1784 TopSeed Technology Corp.
-1788 ShenZhen Litkconn Technology Co., Ltd.
-1796 Printrex, Inc.
-1797 JALCO CO., LTD.
-17a5 Advanced Connection Technology Inc.
-17a7 MICOMSOFT CO., LTD.
-17b3 Grey Innovation
- 0004 Linux-USB Midi Gadget
-17c3 Singim International Corp.
-17cc Native Instruments
- 0815 Audio Kontrol 1
- 1940 RigKontrol3
- 1969 RigKontrol2
- 1978 Audio 8 DJ
- 4711 Kore Controller
- 4712 Kore Controller 2
-17cf Hip Hing Cable & Plug Mfy. Ltd.
-17d0 Sanford L.P.
-17d3 Korea Techtron Co., Ltd.
-17e9 Newnham Research
- 0051 USB VGA Adaptor
-17eb Cornice, Inc.
-17ef Lenovo
- 3815 ChipsBnk 2GB USB Stick
-17f5 K.K. Rocky
-17f6 Unicomp, Inc
-1822 Twinhan
- 3201 VisionDTV USB-Ter/HAMA USB DVB-T device cold
- 3202 VisionDTV USB-Ter/HAMA USB DVB-T device warm
-1831 Gwo Jinn Industries Co., Ltd.
-1832 Huizhou Shenghua Industrial Co., Ltd.
-1854 Memory Devices Ltd.
-185b Compro
- d000 Compro Videomate DVB-U2000 - DVB-T USB cold
- d001 Compro Videomate DVB-U2000 - DVB-T USB warm
-1861 Tech Technology Industrial Company
-1862 Teridian Semiconductor Corp.
-1871 Aveo Technology Corp.
-1894 Topseed
- 5632 Atek Tote Remote
- 5641 TSAM-004 Presentation Remote
-1897 Evertop Wire Cable Co.
-18b6 Mikkon Technology Limited
-18b7 Zotek Electronic Co., Ltd.
-18c5 AMIT
- 0002 CG-WLUSB2GO
-18d5 Starline International Group Limited
-18d9 Kaba
- 01xy LEGIC advant desktop reader
-18e3 Fitipower Integrated Technology Inc
-18e8 Qcom
- 6196 RT2573
- 6229 RT2573
-18ea Matrox Graphics, Inc.
- 0002 DualHead2Go [Analog Edition]
- 0004 TripleHead2Go [Digital Edition]
-18fd FineArch Inc.
-190d Motorola GSG
-1914 Alco Digital Devices Limited
-1915 Linksys
- 2233 WUSB11 v2.8 802.11b Adapter
- 2234 WUSB54G 802.11g Adapter
-192f Avago Technologies, Pte.
-1930 Shenzhen Xianhe Technology Co., Ltd.
-1931 Ningbo Broad Telecommunication Co., Ltd.
-1949 Lab126
-1951 Hyperstone AG
-1953 Ironkey Inc.
-1954 Radiient Technologies
-195d Itron Technology iONE
- 7002 Libra-Q11 IR remote
- 7006 Libra-Q26 / 1.0 Remote
- 7777 Scorpius wireless keyboard
-1967 CASIO HITACHI Mobile Communications Co., Ltd.
-196b Wispro Technology Inc.
-1970 Dane-Elec Corp. USA
-1975 Dongguan Guneetal Wire & Cable Co., Ltd.
-1976 Chipsbrand Microelectronics (HK) Co., Ltd.
-1977 T-Logic
- 0111 TL203 MP3 Player and Voice Recorder
-1989 Nuconn Technology Corp.
-198f Beceem Communications Inc.
-1990 Acron Precision Industrial Co., Ltd.
-1995 Trillium Technology Pty. Ltd.
- 3202 REC-ADPT-USB (recorder)
- 3203 REC-A-ADPT-USB (recorder)
-199e The Imaging Source Europe GmbH
-199f Benica Corporation
-19a8 Biforst Technology Inc.
-19af S Life
- 6611 Celestia VoIP Phone
-19b5 B & W Group
-19b6 Infotech Logistic, LLC
-19ca Mindtribe
- 0001 Sandio 3D HID Mouse
-19cf Parrot SA
-19e1 WeiDuan Electronic Accessory (S.Z.) Co., Ltd.
-19e8 Industrial Technology Research Institute
-19ef Pak Heng Technology (Shenzhen) Co., Ltd.
-19ff Best Buy
- 0201 Rocketfish Wireless 2.4G Laser Mouse
-1a08 Bellwood International, Inc.
-1a0a USB-IF non-workshop
- badd USB OTG Compliance test device
-1a12 KES Co., Ltd.
-1a25 Amphenol East Asia Ltd.
-1a2a Seagate Branded Solutions
-1a36 Biwin Technology Ltd.
-1a40 TERMINUS TECHNOLOGY INC.
-1a41 Action Electronics Co., Ltd.
-1a4a Silicon Image
-1a4b SafeBoot International B.V.
-1a61 Abbott Diabetes Care
-1a6a Spansion Inc.
-1a6d SamYoung Electronics Co., Ltd
-1a6e Global Unichip Corp.
-1a6f Sagem Orga GmbH
-1a79 Bayer Health Care LLC
-1a7b Lumberg Connect GmbH & Co. KG
-1a89 Dynalith Systems Co., Ltd.
-1a8b SGS Taiwan Ltd.
-1a98 Leica Camera AG
-1aa4 Data Drive Thru, Inc.
-1aa5 UBeacon Technologies, Inc.
-1aa6 eFortune Technology Corp.
-1acb Salcomp Plc
-1ad1 Desay Wire Co., Ltd.
-1ae4 ic-design Reinhard Gottinger GmbH
-1aed High Top Precision Electronic Co., Ltd.
-1aef Conntech Electronic (Suzhou) Corporation
-1b04 Meilhaus Electronic GmBH
- 0630 ME-630
- 0940 ME-94
- 0950 ME-95
- 0960 ME-96
- 1000 ME-1000
- 100a ME-1000
- 100b ME-1000
- 1400 ME-1400
- 140a ME-1400A
- 140b ME-1400B
- 140c ME-1400C
- 140d ME-1400D
- 140e ME-1400E
- 14ea ME-1400EA
- 14eb ME-1400EB
- 1604 ME-1600/4U
- 1608 ME-1600/8U
- 160c ME-1600/12U
- 160f ME-1600/16U
- 168f ME-1600/16U8I
- 4610 ME-4610
- 4650 ME-4650
- 4660 ME-4660
- 4661 ME-4660I
- 4662 ME-4660
- 4663 ME-4660I
- 4670 ME-4670
- 4671 ME-4670I
- 4672 ME-4670S
- 4673 ME-4670IS
- 4680 ME-4680
- 4681 ME-4680I
- 4682 ME-4680S
- 4683 ME-4680IS
- 6004 ME-6000/4
- 6008 ME-6000/8
- 600f ME-6000/16
- 6014 ME-6000I/4
- 6018 ME-6000I/8
- 601f ME-6000I/16
- 6034 ME-6000ISLE/4
- 6038 ME-6000ISLE/8
- 603f ME-6000ISLE/16
- 6044 ME-6000/4/DIO
- 6048 ME-6000/8/DIO
- 604f ME-6000/16/DIO
- 6054 ME-6000I/4/DIO
- 6058 ME-6000I/8/DIO
- 605f ME-6000I/16/DIO
- 6074 ME-6000ISLE/4/DIO
- 6078 ME-6000ISLE/8/DIO
- 607f ME-6000ISLE/16/DIO
- 6104 ME-6100/4
- 6108 ME-6100/8
- 610f ME-6100/16
- 6114 ME-6100I/4
- 6118 ME-6100I/8
- 611f ME-6100I/16
- 6134 ME-6100ISLE/4
- 6138 ME-6100ISLE/8
- 613f ME-6100ISLE/16
- 6144 ME-6100/4/DIO
- 6148 ME-6100/8/DIO
- 614f ME-6100/16/DIO
- 6154 ME-6100I/4/DIO
- 6158 ME-6100I/8/DIO
- 615f ME-6100I/16/DIO
- 6174 ME-6100ISLE/4/DIO
- 6178 ME-6100ISLE/8/DIO
- 617f ME-6100ISLE/16/DIO
- 6259 ME-6200I/9/DIO
- 6359 ME-6300I/9/DIO
- 810a ME-8100A
- 810b ME-8100B
- 820a ME-8200A
- 820b ME-8200B
-1b20 MStar Semiconductor, Inc.
-1b22 WiLinx Corp.
-1b26 Cellex Power Products, Inc.
-1b27 Current Electronics Inc.
-1b28 NAVIsis Inc.
-1b32 Ugobe Life Forms, Inc.
-1b36 ViXS Systems, Inc.
-1b3f Generalplus Technology Inc.
-1b47 Energizer Holdings, Inc.
- 0001 CHUSB Duo Charger (NiMH AA/AAA USB smart charger)
-1b48 Plastron Precision Co., Ltd.
-1b59 K.S. Terminals Inc.
-1b5a Chao Zhou Kai Yuan Electric Co., Ltd.
-1b65 The Hong Kong Standards and Testing Centre Ltd.
-1b72 ATERGI TECHNOLOGY CO., LTD.
-1b76 Legend Silicon Corp.
-1b86 Dongguan Guanshang Electronics Co., Ltd.
-1b88 ShenMing Electron (Dong Guan) Co., Ltd.
-1b8c Altium Limited
-1b8d e-MOVE Technology Co., Ltd.
-1b8e Amlogic, Inc.
-1b8f MA LABS, Inc.
-1b98 YMax Communications Corp.
-1b99 Shenzhen Yuanchuan Electronic
-1ba1 JINQ CHERN ENTERPRISE CO., LTD.
-1ba2 Lite Metals & Plastic (Shenzhen) Co., Ltd.
-1ba4 Ember Corporation
- 0001 InSight USB Link
-1ba8 China Telecommunication Technology Labs
-1bad Harmonix Music
- 0002 Harmonix Guitar for Xbox 360
- 0003 Harmonix Drum Kit for Xbox 360
-1bbb T & A Mobile Phones
-1bc4 Ford Motor Co.
-1bc5 AVIXE Technology (China) Ltd.
-1bce Contac Cable Industrial Limited
-1bcf Sunplus Innovation Technology Inc.
-1bd0 Hangzhou Riyue Electronic Co., Ltd.
-1bde P-TWO INDUSTRIES, INC.
-1bef Shenzhen Tongyuan Network-Communication Cables Co., Ltd
-1bf0 RealVision Inc.
-1bf5 Extranet Systems Inc.
-1bf6 Orient Semiconductor Electronics, Ltd.
-1bfd TouchPack
- 1688 Resistive Touch Screen
-1c02 Kreton Corporation
-1c04 QNAP System Inc.
-1c0d Relm Wireless
-1c10 Lanterra Industrial Co., Ltd.
-1c13 ALECTRONIC LIMITED
-1c1a Datel Electronics Ltd.
-1c1b Volkswagen of America, Inc.
-1c1f Goldvish S.A.
-1c20 Fuji Electric Device Technology Co., Ltd.
-1c21 ADDMM LLC
-1c22 ZHONGSHAN CHIANG YU ELECTRIC CO., LTD.
-1c26 Shanghai Haiying Electronics Co., Ltd.
-1c27 HuiYang D & S Cable Co., Ltd.
-1c31 LS Cable Ltd.
-1c37 Authorizer Technologies, Inc.
-1c3d NONIN MEDICAL INC.
-1c3e Wep Peripherals
-1c49 Cherng Weei Technology Corp.
-1c6b Philips & Lite-ON Digital Solutions Corporation
-1c6c Skydigital Inc.
-1c77 Kaetat Industrial Co., Ltd.
-1c78 Datascope Corp.
-1c79 Unigen Corporation
-1c7a LighTuning Technology Inc.
-1c7b LUXSHARE PRECISION INDUSTRY (SHENZHEN) CO., LTD.
-1c87 2N TELEKOMUNIKACE a.s.
-1c88 Somagic, Inc.
-1c89 HONGKONG WEIDIDA ELECTRON LIMITED
-1c8e ASTRON INTERNATIONAL CORP.
-1c98 ALPINE ELECTRONICS, INC.
-1ca0 ACCARIO Inc.
-1cb3 Aces Electronic Co., Ltd.
-1cb4 OPEX CORPORATION
-1cbe Luminary Micro Inc.
-1cbf FORTAT SKYMARK INDUSTRIAL COMPANY
-1cc0 PlantSense
-1cca NextWave Broadband Inc.
-1ccd Bodatong Technology (Shenzhen) Co., Ltd.
-1cd4 adp corporation
-1cd5 Firecomms Ltd.
-1cd6 Antonio Precise Products Manufactory Ltd.
-1cde Telecommunications Technology Association (TTA)
-1cdf WonTen Technology Co., Ltd.
-1ce0 EDIMAX TECHNOLOGY CO., LTD.
-1ce1 Amphenol KAE
-1cfc ANDES TECHNOLOGY CORPORATION
-1cfd Flextronics Digital Design Japan, LTD.
-1d08 NINGBO HENTEK DRAGON ELECTRONICS CO., LTD.
-1d09 TechFaith Wireless Technology Limited
-1d0a Johnson Controls, Inc. The Automotive Business Unit
-1d0b HAN HUA CABLE & WIRE TECHNOLOGY (J.X.) CO., LTD.
-1d14 ALPHA-SAT TECHNOLOGY LIMITED
-1d1f Diostech Co., Ltd.
-1d20 SAMTACK INC.
-1d50 OpenMoko, Inc.
-1d5b Smartronix, Inc.
-1d6b Linux Foundation
- 0001 1.1 root hub
- 0002 2.0 root hub
- 0003 3.0 root hub
-1ebb NuCORE Technology, Inc.
-2001 D-Link Corp. [hex]
- 0001 DWL-120 WIRELESS ADAPTER
- 0201 DHN-120 10Mb Home Phoneline Adapter
- 1a00 10/100 Ethernet
- 200c 10/100 Ethernet
- 3200 DWL-120 802.11b (Atmel RFMD503A) [usbvnetr]
- 3500 Elitegroup Computer Systems WLAN card WL-162
- 3700 DWL-122 802.11b
- 3701 DWL-G120 Spinnaker 802.11b
- 3702 DWL-120 rev F
- 3703 DWL-122 802.11b
- 3704 DWL-G122 802.11g rev. A2
- 3705 AirPlus G DWL-G120 Wireless Adapter(rev.C)
- 3761 IEEE 802.11g USB2.0 Wireless Network Adapter-PN
- 3a00 DWL-AG132
- 3a01 DWL-AG132 (no firmware)
- 3a02 DWL-G132
- 3a03 DWL-G132 (no firmware)
- 3a04 DWL-AG122
- 3a05 DWL-AG122 (no firmware)
- 3a80 AirPlus Xtreme G DWL-G132 Wireless Adapter
- 3a81 predator Bootloader Download
- 3a82 AirPremier AG DWL-AG132 Wireless Adapter
- 3a83 predator Bootloader Download
- 3b00 AirPlus DWL-120+ Wireless Adapter
- 3b01 WLAN Boot Device
- 3c00 DWL-G122 802.11g rev. B1 [ralink]
- 3c01 AirPlus AG DWL-AG122 Wireless Adapter
- 3c02 AirPlus G DWL-G122 Wireless Adapter
- 3c05 DUB-E100 Fast Ethernet [asix]
- 4000 DSB-650C Ethernet [klsi]
- 4001 DSB-650TX Ethernet [pegasus]
- 4002 DSB-650TX Ethernet [pegasus]
- 4003 DSB-650TX-PNA Ethernet [pegasus]
- 400b 10/100 Ethernet
- 4102 10/100 Ethernet
- 5100 DSL-200 ADSL ATM Modem
- 5102 DSL-200 ADSL Loader
- 5b00 Remote NDIS Network Device
- 9414 Cable Modem
- 9b00 Broadband Cable Modem Remote NDIS Device
- abc1 DSB-650 Ethernet [pegasus]
- f013 DLink 7 port USB2.0 Hub
- f10d Accent Communications Modem
- f110 DUB-AV300 A/V Capture
- f111 DBT-122 Bluetooth adapter
- f112 DUB-T210 Audio Device
- f116 Formosa 2
- f117 Formosa 3
- f118 Formosa 4
-2019 PLANEX
- 3220 GW-US11S WLAN
- 5303 GW-US54GXS
- ab01 GW-US54HP
- ab50 GW-US54Mini2
- c002 GW-US54SG
- c007 GW-US54GZL
- ed02 GW-USMM
-2040 Hauppauge
- 6502 WinTV HVR-900
- 6503 WinTV HVR-930
- 7050 Nova-T Stick
- 9300 WinTV NOVA-T USB2 (cold)
- 9301 WinTV NOVA-T USB2 (warm)
-2101 ActionStar
- 0201 SIIG 4-to-2 Printer Switch
-2162 Creative (?)
- 2031 Network Blaster Wireless Adapter
- 500c DE5771 Modem Blaster
- 8001 Broadxent BritePort DSL Bridge 8010U
-2222 MacAlly
- 0004 iWebKey Keyboard
- 4050 AirStick joystick
-2233 RadioShack Corporation
- 6323 USB Electronic Scale
-22b8 Motorola PCS
- 0001 Wally 2.2 chipset
- 0002 Wally 2.4 chipset
- 0005 V.60c/V.60i GSM Phone
- 0850 Bluetooth Device
- 1001 Patriot 1.0 (GSM) chipset
- 1002 Patriot 2.0 chipset
- 1005 T280e GSM/GPRS Phone
- 1101 Patriot 1.0 (TDMA) chipset
- 1801 Rainbow chipset flash
- 2035 Bluetooth Device
- 2805 GSM Modem
- 2821 T720 GSM Phone
- 2822 V.120e GSM Phone
- 2823 Flash Interface
- 2a01 MSM6050 chipset
- 2a02 CDMA modem
- 2a03 MSM6050 chipset flash
- 2a21 V710 GSM Phone (P2K)
- 2a22 V710 GSM Phone (AT)
- 2a23 MSM6100 chipset flash
- 2a41 MSM6300 chipset
- 2a42 Usb Modem
- 2a43 MSM6300 chipset flash
- 2a61 E815 GSM Phone (P2K)
- 2a62 E815 GSM Phone (AT)
- 2a63 MSM6500 chipset flash
- 2a81 MSM6025 chipset
- 2a83 MSM6025 chipset flash
- 2ac1 MSM6100 chipset
- 2ac3 MSM6100 chipset flash
- 3001 A835/E1000 GSM Phone (P2K)
- 3002 A835/E1000 GSM Phone (AT)
- 3801 C350L/C450 (P2K)
- 3802 C330/C350L/C450/EZX GSM Phone (AT)
- 3803 Neptune LT chipset flash
- 4001 OMAP 1.0 chipset
- 4002 A920/A925 UMTS Phone
- 4003 OMAP 1.0 chipset flash
- 4008 OMAP 1.0 chipset RDL
- 4204 MPx200 Smartphone
- 4214 MPc GSM
- 4224 MPx220 Smartphone
- 4234 MPc CDMA
- 4244 MPx100 Smartphone
- 4801 Neptune LTS chipset
- 4803 Neptune LTS chipset flash
- 4810 Triplet GSM Phone (storage)
- 4901 Triplet GSM Phone (P2K)
- 4902 Triplet GSM Phone (AT)
- 4903 Neptune LTE chipset flash
- 4a01 Neptune LTX chipset
- 4a03 Neptune LTX chipset flash
- 4a32 L6-imode Phone
- 5801 Neptune ULS chipset
- 5803 Neptune ULS chipset flash
- 5901 Neptune VLT chipset
- 5903 Neptune VLT chipset flash
- 6001 Dalhart EZX
- 6003 Dalhart flash
- 6004 EZX GSM Phone (CDC Net)
- 6008 Dalhart RDL
- 6009 EZX GSM Phone (P2K)
- 600a Dalhart EZX config 17
- 600b Dalhart EZX config 18
- 600c EZX GSM Phone (USBLAN)
- 6021 JUIX chipset
- 6023 JUIX chipset flash
- 6026 Flash RAM Downloader/miniOS
- 6027 USBLAN
- 604c EZX GSM Phone (Storage)
- 6101 Talon integrated chipset
- 6401 Argon chipset
- 6403 Argon chipset flash
- 6415 ROKR Z6 (MTP mode)
- 6604 Washington CDMA Phone
- 6631 CDC Modem
-22b9 eTurboTouch Technology, Inc.
-22ba Technology Innovation Holdings, Ltd
-2304 Pinnacle Systems, Inc. [hex]
- 0109 Studio PCTV USB (SECAM)
- 0110 Studio PCTV USB (PAL)
- 0111 Miro PCTV USB
- 0112 Studio PCTV USB (NTSC) with FM radio
- 0201 Systems MovieBox Device
- 0204 MovieBox USB_B
- 0205 DVC 150B
- 0206 Systems MovieBox Deluxe Device
- 0207 Dazzle DVC90 Video Device
- 0208 Studio PCTV USB2
- 020e PCTV 200e
- 020f PCTV 400e BDA Device
- 0210 Studio PCTV USB (PAL) with FM radio
- 0212 Studio PCTV USB (NTSC)
- 0213 500-USB Device
- 0214 Studio PCTV USB (PAL) with FM radio
- 0216 PCTV 60e
- 0219 PCTV 260e
- 021a Dazzle DVC100 Audio Device
- 021b Dazzle DVC130/DVC170
- 021d Dazzle DVC130
- 021e Dazzle DVC170
- 021f PCTV Sat HDTV Pro BDA Device
- 0222 PCTV Sat Pro BDA Device
- 0223 DazzleTV Sat BDA Device
- 0226 PCTV 330e
- 0227 PCTV for Mac, HD Stick
- 0228 PCTV DVB-T Flash Stick
- 022a PCTV 160e
- 022b PCTV 71e
- 0232 PCTV 170e
- 0300 Studio Linx Video input cable (NTSC)
- 0301 Studio Linx Video input cable (PAL)
- 0302 Dazzle DVC120
- 0419 PCTV Bungee USB (PAL) with FM radio
- 061d PCTV Deluxe (NTSC) Device
- 061e PCTV Deluxe (PAL) Device
-2318 Shining Technologies, Inc. [hex]
- 0011 CitiDISK Jr. IDE Enclosure
-2375 Digit@lway, Inc.
- 0001 Digital Audio Player
-2406 SANHO Digital Electronics Co., Ltd.
- 6688 PD7X Portable Storage
-2478 Tripp-Lite
- 2008 U209-000-R Serial Port
-2632 TwinMOS
- 3209 7-in-1 Card Reader
-2650 Electronics For Imaging, Inc. [hex]
-2730 Citizen
- 200f CT-S310 Label printer
-2735 DigitalWay
- 0003 MPIO 1.5GB Hard Disc Drive
-2770 NHJ, Ltd
- 0a01 ScanJet 4600 series
- 905c Che-Ez Snap SNAP-U/Digigr8/Soundstar TDC-35
- 9060 A130
- 9120 Che-ez! Snap / iClick Tiny VGA Digital Camera
- 9130 TCG 501
- 913c Argus DC-1730
- 9150 Mini Cam
- 9153 iClick 5X
- 915d Cyberpix S-210S / Little Tikes My Real Digital Camera
- 930b CCD Webcam(PC370R)
- 930c CCD Webcam(PC370R)
-2899 Toptronic Industrial Co., Ltd
-2c02 Planex Communications
- 14ea GW-US11H WLAN
-2fb2 Fujitsu, Ltd
-3125 Eagletron
- 0001 TrackerPod Camera Stand
-3176 Whanam Electronics Co., Ltd
-3275 VidzMedia Pte Ltd
- 4fb1 MonsterTV P2H
-3334 AEI
- 1701 Fast Ethernet
-3340 Yakumo
- 043a Mio A701 DigiWalker PPCPhone
- 0e3a Pocket PC 300 GPS SL / Typhoon MyGuide 3500
- a0a3 deltaX 5 BT (D) PDA
-3504 Micro Star
- f110 Security Key
-3538 Power Quotient International Co., Ltd
- 0001 Travel Flash
- 0015 Mass Storge Device
- 0022 Hi-Speed Mass Storage Device
- 0042 Cool Drive U339 Flash Disk
-3579 DIVA
- 6901 Media Reader
-3636 InVibro
-3838 WEM
- 0001 5-in-1 Card Reader
-3923 National Instruments Corp.
- 12c0 DAQPad-6020E
- 12d0 DAQPad-6507
- 12e0 NI 4350
- 12f0 NI 5102
- 1750 DAQPad-6508
- 17b0 USB-ISA-Bridge
- 1820 DAQPad-6020E (68 pin I/O)
- 1830 DAQPad-6020E (BNC)
- 1f00 DAQPad-6024E
- 1f10 DAQPad-6024E
- 1f20 DAQPad-6025E
- 1f30 DAQPad-6025E
- 1f40 DAQPad-6036E
- 1f50 DAQPad-6036E
- 2f80 DAQPad-6052E
- 2f90 DAQPad-6052E
- 703c USB-485 RS485 Cable
- 7254 NI MIO (data acquisition card) firmware updater
- 729e USB-6251 (OEM) data acquisition card
-40bb I-O Data
- 0a09 USB2.0-SCSI Bridge USB2-SC
-4101 i-rocks
- 1301 IR-2510 usb phone
-4102 iRiver, Ltd.
- 1001 iFP-100 series mp3 player
- 1003 iFP-300 series mp3 player
- 1005 iFP-500 series mp3 player
- 1007 iFP-700 series mp3/ogg vorbis player
- 1008 iFP-800 series mp3/ogg vorbis player
- 100a iFP-1000 series mp3/ogg vorbis player
- 1014 T20 series mp3/ogg vorbis player (ums firmware)
- 1101 iFP-100 series mp3 player (ums firmware)
- 1103 iFP-300 series mp3 player (ums firmware)
- 1105 iFP-500 series mp3 player (ums firmware)
- 1113 T10 (alternate)
- 1117 T10
- 1119 T30 series mp3/ogg/wma player
- 2002 H10 6GB
- 2101 H10 20GB (mtp)
- 2102 H10 5GB (mtp)
- 2105 H10 5/6GB (mtp)
-413c Dell Computer Corp.
- 0058 Port Replicator
- 1001 Keyboard Hub
- 1002 Keyboard Hub
- 2001 Keyboard HID Support
- 2002 SK-8125 Keyboard
- 2003 Keyboard
- 2005 RT7D50 Keyboard
- 2100 SK-3106 Keyboard
- 2101 SmartCard Reader Keyboard
- 2500 DRAC4 Remote Access Card
- 3010 Optical Wheel Mouse
- 3200 Mouse
- 4001 Axim X5
- 4002 Axim X3
- 4003 Axim X30
- 4004 Axim Sync
- 4005 Axim Sync
- 4006 Axim Sync
- 4007 Axim Sync
- 4008 Axim Sync
- 4009 Axim Sync
- 4011 Axim X51v
- 5103 AIO Printer A940
- 5105 AIO Printer A920
- 5107 AIO Printer A960
- 5109 Photo AIO Printer 922
- 5110 Photo AIO Printer 962
- 5111 Photo AIO Printer 942
- 5112 Photo AIO Printer 924
- 5113 Photo AIO Printer 944
- 5114 Photo AIO Printer 964
- 5115 Photo AIO Printer 926
- 5116 AIO Printer 946
- 5117 Photo AIO Printer 966
- 5118 AIO 810
- 5124 Laser MFP 1815
- 5128 Photo AIO 928
- 5200 Laser Printer
- 5202 Printing Support
- 5203 Printing Support
- 5210 Printing Support
- 5211 Printing Support
- 5220 Laser MFP 1600n
- 5225 Printing Support
- 5226 Printing Support
- 5300 Laser Printer
- 5400 Laser Printer
- 5401 Laser Printer
- 5601 Laser Printer 3100cn
- 5602 Laser Printer 3000cn
- 5631 Laser Printer 5100cn
- 5905 Printing Support
- 8000 BC02 Bluetooth USB Adapter
- 8010 TrueMobile Bluetooth Module in
- 8100 TrueMobile 1180 802.11b Adapter
- 8102 TrueMobile 1300 USB2.0 WLAN Card
- 8103 Wireless 350 Bluetooth
- 8104 Wireless 1450 Dual-band (802.11a/b/g) USB2.0 Adapter
- 8105 U2 in HID - Driver
- 8106 Wireless 350 Bluetooth Internal Card in
- 8110 Wireless 3xx Bluetooth Internal Card
- 8111 Wireless 3xx Bluetooth Internal Card in
- 8114 Wireless 5700 Mobile Broadband (CDMA EV-DO) Minicard Modem
- 8115 Wireless 5500 Mobile Broadband (3G HSDPA) Minicard Modem
- 8116 Wireless 5505 Mobile Broadband (3G HSDPA) Minicard Modem
- 8117 Wireless 5700 Mobile Broadband (CDMA EV-DO) Expresscard Modem
- 8118 Wireless 5510 Mobile Broadband (3G HSDPA) Expresscard Status Port
- 8120 Bluetooth adapter
- 8121 Eastfold in HID
- 8122 Eastfold in DFU
- 8123 eHome Infrared Receiver
- 8124 eHome Infrared Receiver
- 8126 Wireless 355 Bluetooth
- 8127 Wireless 355 Module with Bluetooth 2.0 + EDR Technology.
- 8128 Wireless 5700-Sprint Mobile Broadband (CDMA EV-DO) Mini-Card Status Port
- 8129 Wireless 5700-Telus Mobile Broadband (CDMA EV-DO) Mini-Card Status Port
- 8131 Wireless 360 Bluetooth 2.0 + EDR module.
- 8133 Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port
- 8134 Wireless 5720 Sprint Mobile Broadband (EVDO Rev-A) Minicard Status Port
- 8135 Wireless 5720 TELUS Mobile Broadband (EVDO Rev-A) Minicard Diagnostics Port
- 8136 Wireless 5520 Cingular Mobile Broadband (3G HSDPA) Minicard Diagnostics Port
- 8137 Wireless 5520 Voda L Mobile Broadband (3G HSDPA) Minicard Status Port
- 8138 Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard EAP-SIM Port
- 8140 Wireless 360 Bluetooth
- 8142 Mobile 360 in DFU
- 8501 Bluetooth Adapter
- a001 Hub
- a005 Internal 2.0 Hub
- a700 Hub (in 1905FP LCD Monitor)
-4146 USBest Technology
- 9281 Iomega Micro Mini 128MB Flash Drive
- ba01 Intuix Flash Drive
-4242 USB Design by Example
- 4201 Buttons and Lights HID device
- 4220 Echo 1 Camera
-4348 WinChipHead
- 5523 USB->RS 232 adapter with Prolifec PL 2303 chipset
- 5537 13.56Mhz RFID Card Reader and Writer
- 5584 CH34x printer adapter cable
-4572 Shuttle, Inc.
- 4572 Shuttle PN31 Remote
-4586 Panram
- 1026 Crystal Bar Flash Drive
-4670 EMS Production
- 9394 Game Cube USB Memory Adaptor 64M
-4752 Miditech
- 0011 Midistart-2
-4766 Aceeca
- 0001 MEZ1000 RDA
-4855 Memorex
- 7288 Ultra Traveldrive 160G 2.5" HDD
-5032 Grandtec
- 0bb8 Grandtec USB1.1 DVB-T (cold)
- 0bb9 Grandtec USB1.1 DVB-T (warm)
- 0fa0 Grandtec USB1.1 DVB-T (cold)
- 0fa1 Grandtec USB1.1 DVB-T (warm)
-5041 Linksys (?)
- 2234 WUSB54G 802.11g Adapter
-5173 Sweex
- 1809 ZD1211
-5345 Owon
- 1234 PDS6062T Oscilloscope
-544d Transmeta Corp.
-5543 UC-Logic Technology Corp.
- 0002 SuperPen WP3325U Tablet
- 0003 Genius MousePen 4x3 Tablet/Aquila L1 Tablet
- 0004 Genius MousePen 5x4 Tablet
- 0005 Genius MousePen 8x6 Tablet
- 0041 Genius PenSketch 6x8 Tablet
- 0042 Genius PenSketch 12x9 Tablet
-55aa OnSpec Electronic, Inc.
- 0015 Hard Drive
- 0102 SuperDisk
- 0103 IDE Hard Drive
- 0201 DDI to Reader-19
- 1234 ATAPI Bridge
- a103 Sandisk SDDR-55 SmartMedia Card Reader
- b000 USB to CompactFlash Card Reader
- b004 OnSpec MMC/SD Reader/Writer
- b00b USB to Memory Stick Card Reader
- b00c USB to SmartMedia Card Reader
- b012 Mitsumi FA402M 8-in-2 Card Reader
- b200 Compact Flash Reader
- b204 MMC/ SD Reader
- b207 Memory Stick Reader
-5986 Acer, Inc
- 0102 Crystal Eye webcam
-5a57 Zinwell
- 0260 RT2570
-6189 Sitecom
- 182d USB 2.0 Ethernet
- 2068 USB to serial cable (v2)
-6253 TwinHan Technology Co., Ltd
- 0100 Ir reciver f. remote control
-636c CoreLogic, Inc.
-6547 Arkmicro Technologies Inc.
- 0232 ARK3116 Serial
-6666 Prototype product Vendor ID
- 0667 WiseGroup Smart Joy PSX, PS-PC Smart JoyPad
- 2667 JCOP BlueZ Smartcard reader
- 8804 WiseGroup SuperJoy Box 5
-6891 3Com
- a727 3CRUSB10075
-6993 Freshtel
- b001 FT-102 VoIP USB Phone
-6a75 Shanghai Jujo Electronics Co., Ltd
-7104 CME (Central Music Co.)
- 2202 UF5/UF6/UF7/UF8 MIDI Master Keyboard
-8086 Intel Corp.
- 0001 AnyPoint (TM) Home Network 1.6 Mbps Wireless Adapter
- 0100 Personal Audio Player 3000
- 0101 Personal Audio Player 3000
- 0110 Easy PC Camera
- 0120 PC Camera CS120
- 0200 AnyPoint(TM) Wireless II Network 11Mbps Adapter
- 0431 Intel Pro Video PC Camera
- 0510 Digital Movie Creator
- 0630 Pocket PC Camera
- 0780 CS780 Microphone Input
- 07d3 BLOB boot loader firmware
- 0dad Cherry MiniatureCard Keyboard
- 1010 AnyPoint(TM) Home Network 10 Mbps Phoneline Adapter
- 110a Bluetooth Controller from (Ericsson P4A)
- 110b Bluetooth Controller from (Intel/CSR)
- 1110 PRO/Wireless LAN Module
- 1111 PRO/Wireless 2011B 802.11b Adapter
- 1134 Hollister Mobile Monitor
- 1234 Prototype Reader/Writer
- 3100 PRO/DSL 3220 Modem - WAN
- 3101 PRO/DSL 3220 Modem
- 3240 AnyPoint® 3240 Modem - WAN
- 3241 AnyPoint® 3240 Modem
- 8602 Miniature Card Slot
- 9303 Intel 8x930Hx Hub
- 9890 82930 Test Board
- beef SCM Miniature Card Reader/Writer
- c013 Wireless HID Station
- f001 XScale PXA27x Bulverde flash
-8341 EGO Systems, Inc.
- 2000 Flashdisk
-9016 Sitecom
- 182d WL-022
-9710 MosChip Semiconductor
- 7703 MCS7703 Serial Port Adapter
- 7705 Printer cable
- 7715 Printer cable
- 7780 MS7780 4Mbps Fast IRDA Adapter
- 7830 MCS7830 Ethernet
-a727 3Com
- 6893 AR5523
- 6895 AR5523
- 6897 AR5523
-c251 Keil Software, Inc.
- 2710 ULink
-eb1a eMPIA Technology, Inc.
- 17de KWorld V-Stream XPERT DTV - DVB-T USB cold
- 17df KWorld V-Stream XPERT DTV - DVB-T USB warm
- 2710 SilverCrest WebCam
- 2750 ECS Elitegroup G220 integrated webcam
- 2800 Terratec Cinergy 200
- 2801 GrabBeeX+ Video Encoder
-f003 Hewlett Packard
- 6002 PhotoSmart C500
-
-# List of known device classes, subclasses and protocols
-
-# Syntax:
-# C class class_name
-# subclass subclass_name <-- single tab
-# protocol protocol_name <-- two tabs
-
-C 00 (Defined at Interface level)
-C 01 Audio
- 01 Control Device
- 02 Streaming
- 03 MIDI Streaming
-C 02 Communications
- 01 Direct Line
- 02 Abstract (modem)
- 00 None
- 01 AT-commands (v.25ter)
- 02 AT-commands (PCCA101)
- 03 AT-commands (PCCA101 + wakeup)
- 04 AT-commands (GSM)
- 05 AT-commands (3G)
- 06 AT-commands (CDMA)
- fe Defined by command set descriptor
- ff Vendor Specific (MSFT RNDIS?)
- 03 Telephone
- 04 Multi-Channel
- 05 CAPI Control
- 06 Ethernet Networking
- 07 ATM Networking
- 08 Wireless Handset Control
- 09 Device Management
- 0a Mobile Direct Line
- 0b OBEX
- 0c Ethernet Emulation
- 07 Ethernet Emulation (EEM)
-C 03 Human Interface Device
- 00 No Subclass
- 00 None
- 01 Keyboard
- 02 Mouse
- 01 Boot Interface Subclass
- 00 None
- 01 Keyboard
- 02 Mouse
-C 05 Physical Interface Device
-C 06 Imaging
- 01 Still Image Capture
- 01 Picture Transfer Protocol (PIMA 15470)
-C 07 Printer
- 01 Printer
- 00 Reserved/Undefined
- 01 Unidirectional
- 02 Bidirectional
- 03 IEEE 1284.4 compatible bidirectional
- ff Vendor Specific
-C 08 Mass Storage
- 01 RBC (typically Flash)
- 00 Control/Bulk/Interrupt
- 01 Control/Bulk
- 50 Bulk (Zip)
- 02 SFF-8020i, MMC-2 (ATAPI)
- 03 QIC-157
- 04 Floppy (UFI)
- 00 Control/Bulk/Interrupt
- 01 Control/Bulk
- 50 Bulk (Zip)
- 05 SFF-8070i
- 06 SCSI
- 00 Control/Bulk/Interrupt
- 01 Control/Bulk
- 50 Bulk (Zip)
-C 09 Hub
- 00 Unused
- 00 Full speed (or root) hub
- 01 Single TT
- 02 TT per port
-C 0a CDC Data
- 00 Unused
- 30 I.430 ISDN BRI
- 31 HDLC
- 32 Transparent
- 50 Q.921M
- 51 Q.921
- 52 Q.921TM
- 90 V.42bis
- 91 Q.932 EuroISDN
- 92 V.120 V.24 rate ISDN
- 93 CAPI 2.0
- fd Host Based Driver
- fe CDC PUF
- ff Vendor specific
-C 0b Chip/SmartCard
-C 0d Content Security
-C 0e Video
- 00 Undefined
- 01 Video Control
- 02 Video Streaming
- 03 Video Interface Collection
-C dc Diagnostic
- 01 Reprogrammable Diagnostics
- 01 USB2 Compliance
-C e0 Wireless
- 01 Radio Frequency
- 01 Bluetooth
- 02 Ultra WideBand Radio Control
- 03 RNDIS
- 02 Wireless USB Wire Adapter
- 01 Host Wire Adapter Control/Data Streaming
- 02 Device Wire Adapter Control/Data Streaming
- 03 Device Wire Adapter Isochronous Streaming
-C ef Miscellaneous Device
- 01 ?
- 01 Microsoft ActiveSync
- 02 Palm Sync
- 02 ?
- 01 Interface Association
- 02 Wire Adapter Multifunction Peripheral
- 03 ?
- 01 Cable Based Association
-C fe Application Specific Interface
- 01 Device Firmware Update
- 02 IRDA Bridge
- 03 Test and Measurement
- 01 TMC
- 02 USB488
-C ff Vendor Specific Class
- ff Vendor Specific Subclass
- ff Vendor Specific Protocol
-
-# List of Audio Class Terminal Types
-
-# Syntax:
-# AT terminal_type terminal_type_name
-
-AT 0100 USB Undefined
-AT 0101 USB Streaming
-AT 01ff USB Vendor Specific
-AT 0200 Input Undefined
-AT 0201 Microphone
-AT 0202 Desktop Microphone
-AT 0203 Personal Microphone
-AT 0204 Omni-directional Microphone
-AT 0205 Microphone Array
-AT 0206 Processing Microphone Array
-AT 0300 Output Undefined
-AT 0301 Speaker
-AT 0302 Headphones
-AT 0303 Head Mounted Display Audio
-AT 0304 Desktop Speaker
-AT 0305 Room Speaker
-AT 0306 Communication Speaker
-AT 0307 Low Frequency Effects Speaker
-AT 0400 Bidirectional Undefined
-AT 0401 Handset
-AT 0402 Headset
-AT 0403 Speakerphone, no echo reduction
-AT 0404 Echo-suppressing speakerphone
-AT 0405 Echo-canceling speakerphone
-AT 0500 Telephony Undefined
-AT 0501 Phone line
-AT 0502 Telephone
-AT 0503 Down Line Phone
-AT 0600 External Undefined
-AT 0601 Analog Connector
-AT 0602 Digital Audio Interface
-AT 0603 Line Connector
-AT 0604 Legacy Audio Connector
-AT 0605 SPDIF interface
-AT 0606 1394 DA stream
-AT 0607 1394 DV stream soundtrack
-AT 0700 Embedded Undefined
-AT 0701 Level Calibration Noise Source
-AT 0702 Equalization Noise
-AT 0703 CD Player
-AT 0704 DAT
-AT 0705 DCC
-AT 0706 MiniDisc
-AT 0707 Analog Tape
-AT 0708 Phonograph
-AT 0709 VCR Audio
-AT 070a Video Disc Audio
-AT 070b DVD Audio
-AT 070c TV Tuner Audio
-AT 070d Satellite Receiver Audio
-AT 070e Cable Tuner Audio
-AT 070f DSS Audio
-AT 0710 Radio Receiver
-AT 0711 Radio Transmitter
-AT 0712 Multitrack Recorder
-AT 0713 Synthesizer
-
-# List of HID Descriptor Types
-
-# Syntax:
-# HID descriptor_type descriptor_type_name
-
-HID 21 HID
-HID 22 Report
-HID 23 Physical
-
-# List of HID Descriptor Item Types
-# Note: 2 bits LSB encode data length following
-
-# Syntax:
-# R item_type item_type_name
-
-# Main Items
-R 80 Input
-R 90 Output
-R b0 Feature
-R a0 Collection
-R c0 End Collection
-
-# Global Items
-R 04 Usage Page
-R 14 Logical Minimum
-R 24 Logical Maximum
-R 34 Physical Minimum
-R 44 Physical Maximum
-R 54 Unit Exponent
-R 64 Unit
-R 74 Report Size
-R 84 Report ID
-R 94 Report Count
-R a4 Push
-R b4 Pop
-
-# Local Items
-R 08 Usage
-R 18 Usage Minimum
-R 28 Usage Maximum
-R 38 Designator Index
-R 48 Designator Minimum
-R 58 Designator Maximum
-R 78 String Index
-R 88 String Minimum
-R 98 String Maximum
-R a8 Delimiter
-
-# List of Physical Descriptor Bias Types
-
-# Syntax:
-# BIAS item_type item_type_name
-
-BIAS 0 Not Applicable
-BIAS 1 Right Hand
-BIAS 2 Left Hand
-BIAS 3 Both Hands
-BIAS 4 Either Hand
-
-# List of Physical Descriptor Item Types
-
-# Syntax:
-# PHY item_type item_type_name
-
-PHY 00 None
-PHY 01 Hand
-PHY 02 Eyeball
-PHY 03 Eyebrow
-PHY 04 Eyelid
-PHY 05 Ear
-PHY 06 Nose
-PHY 07 Mouth
-PHY 08 Upper Lip
-PHY 09 Lower Lip
-PHY 0a Jaw
-PHY 0b Neck
-PHY 0c Upper Arm
-PHY 0d Elbow
-PHY 0e Forearm
-PHY 0f Wrist
-PHY 10 Palm
-PHY 11 Thumb
-PHY 12 Index Finger
-PHY 13 Middle Finger
-PHY 14 Ring Finger
-PHY 15 Little Finger
-PHY 16 Head
-PHY 17 Shoulder
-PHY 18 Hip
-PHY 19 Waist
-PHY 1a Thigh
-PHY 1b Knee
-PHY 1c calf
-PHY 1d Ankle
-PHY 1e Foot
-PHY 1f Heel
-PHY 20 Ball of Foot
-PHY 21 Big Toe
-PHY 22 Second Toe
-PHY 23 Third Toe
-PHY 24 Fourth Toe
-PHY 25 Fifth Toe
-PHY 26 Brow
-PHY 27 Cheek
-
-# List of HID Usages
-
-# Syntax:
-# HUT hi _usage_page hid_usage_page_name
-# hid_usage hid_usage_name
-
-HUT 00 Undefined
-HUT 01 Generic Desktop Controls
- 000 Undefined
- 001 Pointer
- 002 Mouse
- 004 Joystick
- 005 Gamepad
- 006 Keyboard
- 007 Keypad
- 008 Multi-Axis Controller
- 030 Direction-X
- 031 Direction-Y
- 032 Direction-Z
- 033 Rotate-X
- 034 Rotate-Y
- 035 Rotate-Z
- 036 Slider
- 037 Dial
- 038 Wheel
- 039 Hat Switch
- 03a Counted Buffer
- 03b Byte Count
- 03c Motion Wakeup
- 03d Start
- 03e Select
- 040 Vector-X
- 041 Vector-Y
- 042 Vector-Z
- 043 Vector-X relative Body
- 044 Vector-Y relative Body
- 045 Vector-Z relative Body
- 046 Vector
- 080 System Control
- 081 System Power Down
- 082 System Sleep
- 083 System Wake Up
- 084 System Context Menu
- 085 System Main Menu
- 086 System App Menu
- 087 System Menu Help
- 088 System Menu Exit
- 089 System Menu Select
- 08a System Menu Right
- 08b System Menu Left
- 08c System Menu Up
- 08d System Menu Down
- 090 Direction Pad Up
- 091 Direction Pad Down
- 092 Direction Pad Right
- 093 Direction Pad Left
-HUT 02 Simulation Controls
- 000 Undefined
- 001 Flight Simulation Device
- 002 Automobile Simulation Device
- 003 Tank Simulation Device
- 004 Spaceship Simulation Device
- 005 Submarine Simulation Device
- 006 Sailing Simulation Device
- 007 Motorcycle Simulation Device
- 008 Sports Simulation Device
- 009 Airplane Simualtion Device
- 00a Helicopter Simulation Device
- 00b Magic Carpet Simulation Device
- 00c Bicycle Simulation Device
- 020 Flight Control Stick
- 021 Flight Stick
- 022 Cyclic Control
- 023 Cyclic Trim
- 024 Flight Yoke
- 025 Track Control
- 0b0 Aileron
- 0b1 Aileron Trim
- 0b2 Anti-Torque Control
- 0b3 Autopilot Enable
- 0b4 Chaff Release
- 0b5 Collective Control
- 0b6 Dive Break
- 0b7 Electronic Countermeasures
- 0b8 Elevator
- 0b9 Elevator Trim
- 0ba Rudder
- 0bb Throttle
- 0bc Flight COmmunications
- 0bd Flare Release
- 0be Landing Gear
- 0bf Toe Break
- 0c0 Trigger
- 0c1 Weapon Arm
- 0c2 Weapons Select
- 0c3 Wing Flaps
- 0c4 Accelerator
- 0c5 Brake
- 0c6 Clutch
- 0c7 Shifter
- 0c8 Steering
- 0c9 Turret Direction
- 0ca Barrel Elevation
- 0cb Drive Plane
- 0cc Ballast
- 0cd Bicylce Crank
- 0ce Handle Bars
- 0cf Front Brake
- 0d0 Rear Brake
-HUT 03 VR Controls
- 000 Unidentified
- 001 Belt
- 002 Body Suit
- 003 Flexor
- 004 Glove
- 005 Head Tracker
- 006 Head Mounted Display
- 007 Hand Tracker
- 008 Oculometer
- 009 Vest
- 00a Animatronic Device
- 020 Stereo Enable
- 021 Display Enable
-HUT 04 Sport Controls
- 000 Unidentified
- 001 Baseball Bat
- 002 Golf Club
- 003 Rowing Machine
- 004 Treadmill
- 030 Oar
- 031 Slope
- 032 Rate
- 033 Stick Speed
- 034 Stick Face Angle
- 035 Stick Heel/Toe
- 036 Stick Follow Through
- 047 Stick Temp
- 038 Stick Type
- 039 Stick Height
- 050 Putter
- 051 1 Iron
- 052 2 Iron
- 053 3 Iron
- 054 4 Iron
- 055 5 Iron
- 056 6 Iron
- 057 7 Iron
- 058 8 Iron
- 059 9 Iron
- 05a 10 Iron
- 05b 11 Iron
- 05c Sand Wedge
- 05d Loft Wedge
- 05e Power Wedge
- 05f 1 Wood
- 060 3 Wood
- 061 5 Wood
- 062 7 Wood
- 063 9 Wood
-HUT 05 Game Controls
- 000 Undefined
- 001 3D Game Controller
- 002 Pinball Device
- 003 Gun Device
- 020 Point Of View
- 021 Turn Right/Left
- 022 Pitch Right/Left
- 023 Roll Forward/Backward
- 024 Move Right/Left
- 025 Move Forward/Backward
- 026 Move Up/Down
- 027 Lean Right/Left
- 028 Lean Forward/Backward
- 029 Height of POV
- 02a Flipper
- 02b Secondary Flipper
- 02c Bump
- 02d New Game
- 02e Shoot Ball
- 02f Player
- 030 Gun Bolt
- 031 Gun Clip
- 032 Gun Selector
- 033 Gun Single Shot
- 034 Gun Burst
- 035 Gun Automatic
- 036 Gun Safety
- 037 Gamepad Fire/Jump
- 038 Gamepad Fun
- 039 Gamepad Trigger
-HUT 07 Keyboard
- 000 No Event
- 001 Keyboard ErrorRollOver
- 002 Keyboard POSTfail
- 003 Keyboard Error Undefined
- 004 A
- 005 B
- 006 C
- 007 D
- 008 E
- 009 F
- 00a G
- 00b H
- 00c I
- 00d J
- 00e K
- 00f L
- 010 M
- 011 N
- 012 O
- 013 P
- 014 Q
- 015 R
- 016 S
- 017 T
- 018 U
- 019 V
- 01a W
- 01b X
- 01c Y
- 01d Z
- 01e 1 and ! (One and Exclamation)
- 01f 2 and @ (2 and at)
- 020 3 and # (3 and Hash)
- 021 4 and $ (4 and Dollar Sign)
- 022 5 and % (5 and Percent Sign)
- 023 6 and ^ (6 and circumflex)
- 024 7 and & (Seven and Ampersand)
- 025 8 and * (Eight and asterisk)
- 026 9 and ( (Nine and Parenthesis Left)
- 027 0 and ) (Zero and Parenthesis Right)
- 028 Return (Enter)
- 029 Escape
- 02a Delete (Backspace)
- 02b Tab
- 02c Space Bar
- 02d - and _ (Minus and underscore)
- 02e = and + (Equal and Plus)
- 02f [ and { (Bracket and Braces Left)
- 030 ] and } (Bracket and Braces Right)
- 031 \ and | (Backslash and Bar)
- 032 # and ~ (Hash and Tilde, Non-US Keyboard near right shift)
- 033 ; and : (Semicolon and Colon)
- 034 ´ and " (Accent Acute and Double Quotes)
- 035 ` and ~ (Accent Grace and Tilde)
- 036 , and < (Comma and Less)
- 037 . and > (Period and Greater)
- 038 / and ? (Slash and Question Mark)
- 039 Caps Lock
- 03a F1
- 03b F2
- 03c F3
- 03d F4
- 03e F5
- 03f F6
- 040 F7
- 041 F8
- 042 F9
- 043 F10
- 044 F11
- 045 F12
- 046 Print Screen
- 047 Scroll Lock
- 048 Pause
- 049 Insert
- 04a Home
- 04b Page Up
- 04c Delete Forward (without Changing Position)
- 04d End
- 04e Page Down
- 04f Right Arrow
- 050 Left Arrow
- 051 Down Arrow
- 052 Up Arrow
- 053 Num Lock and Clear
- 054 Keypad / (Division Sign)
- 055 Keypad * (Multiplication Sign)
- 056 Keypad - (Subtraction Sign)
- 057 Keypad + (Addition Sign)
- 058 Keypad Enter
- 059 Keypad 1 and END
- 05a Keypad 2 and Down Arrow
- 05b Keypad 3 and Page Down
- 05c Keypad 4 and Left Arrow
- 05d Keypad 5 (Tactilei Raised)
- 05f Keypad 6 and Right Arrow
- 060 Keypad 7 and Home
- 061 Keypad 8 and Up Arrow
- 062 Keypad 8 and Page Up
- 063 Keypad . (decimal delimiter) and Delete
- 064 \ and | (Backslash and Bar, UK and Non-US Keyboard near left shift)
- 065 Keyboard Application (Windows Key for Win95 or Compose)
- 066 Power (not a key)
- 067 Keypad = (Equal Sign)
- 068 F13
- 069 F14
- 06a F15
- 06b F16
- 06c F17
- 06d F18
- 06e F19
- 06f F20
- 070 F21
- 071 F22
- 072 F23
- 073 F24
- 074 Execute
- 075 Help
- 076 Menu
- 077 Select
- 078 Stop
- 079 Again
- 07a Undo
- 07b Cut
- 07c Copy
- 07d Paste
- 07e Find
- 07f Mute
- 080 Volume Up
- 081 Volume Down
- 082 Locking Caps Lock
- 083 Locking Num Lock
- 084 Locking Scroll Lock
- 085 Keypad Comma
- 086 Keypad Equal Sign (AS/400)
- 087 International 1 (PC98)
- 088 International 2 (PC98)
- 089 International 3 (PC98)
- 08a International 4 (PC98)
- 08b International 5 (PC98)
- 08c International 6 (PC98)
- 08d International 7 (Toggle Single/Double Byte Mode)
- 08e International 8
- 08f International 9
- 090 LANG 1 (Hangul/English Toggle, Korea)
- 091 LANG 2 (Hanja Conversion, Korea)
- 092 LANG 3 (Katakana, Japan)
- 093 LANG 4 (Hiragana, Japan)
- 094 LANG 5 (Zenkaku/Hankaku, Japan)
- 095 LANG 6
- 096 LANG 7
- 097 LANG 8
- 098 LANG 9
- 099 Alternate Erase
- 09a SysReq/Attention
- 09b Cancel
- 09c Clear
- 09d Prior
- 09e Return
- 09f Separator
- 0a0 Out
- 0a1 Open
- 0a2 Clear/Again
- 0a3 CrSel/Props
- 0a4 ExSel
- 0e0 Control Left
- 0e1 Shift Left
- 0e2 Alt Left
- 0e3 GUI Left
- 0e4 Control Right
- 0e5 Shift Right
- 0e6 Alt Rigth
- 0e7 GUI Right
-HUT 08 LEDs
- 000 Undefined
- 001 NumLock
- 002 CapsLock
- 003 Scroll Lock
- 004 Compose
- 005 Kana
- 006 Power
- 007 Shift
- 008 Do not disturb
- 009 Mute
- 00a Tone Enabke
- 00b High Cut Filter
- 00c Low Cut Filter
- 00d Equalizer Enable
- 00e Sound Field ON
- 00f Surround On
- 010 Repeat
- 011 Stereo
- 012 Sampling Rate Detect
- 013 Spinning
- 014 CAV
- 015 CLV
- 016 Recording Format Detect
- 017 Off-Hook
- 018 Ring
- 019 Message Waiting
- 01a Data Mode
- 01b Battery Operation
- 01c Battery OK
- 01d Battery Low
- 01e Speaker
- 01f Head Set
- 020 Hold
- 021 Microphone
- 022 Coverage
- 023 Night Mode
- 024 Send Calls
- 025 Call Pickup
- 026 Conference
- 027 Stand-by
- 028 Camera On
- 029 Camera Off
- 02a On-Line
- 02b Off-Line
- 02c Busy
- 02d Ready
- 02e Paper-Out
- 02f Paper-Jam
- 030 Remote
- 031 Forward
- 032 Reverse
- 033 Stop
- 034 Rewind
- 035 Fast Forward
- 036 Play
- 037 Pause
- 038 Record
- 039 Error
- 03a Usage Selected Indicator
- 03b Usage In Use Indicator
- 03c Usage Multi Indicator
- 03d Indicator On
- 03e Indicator Flash
- 03f Indicator Slow Blink
- 040 Indicator Fast Blink
- 041 Indicator Off
- 042 Flash On Time
- 043 Slow Blink On Time
- 044 Slow Blink Off Time
- 045 Fast Blink On Time
- 046 Fast Blink Off Time
- 047 Usage Color Indicator
- 048 Indicator Red
- 049 Indicator Green
- 04a Indicator Amber
- 04b Generic Indicator
- 04c System Suspend
- 04d External Power Connected
-HUT 09 Buttons
- 000 No Button Pressed
- 001 Button 1 (Primary)
- 002 Button 2 (Secondary)
- 003 Button 3 (Tertiary)
- 004 Button 4
- 005 Button 5
-HUT 0a Ordinal
- 001 Instance 1
- 002 Instance 2
- 003 Instance 3
-HUT 0b Telephony
- 000 Unassigned
- 001 Phone
- 002 Answering Machine
- 003 Message Controls
- 004 Handset
- 005 Headset
- 006 Telephony Key Pad
- 007 Programmable Button
- 020 Hook Switch
- 021 Flash
- 022 Feature
- 023 Hold
- 024 Redial
- 025 Transfer
- 026 Drop
- 027 Park
- 028 Forward Calls
- 029 Alternate Function
- 02a Line
- 02b Speaker Phone
- 02c Conference
- 02d Ring Enable
- 02e Ring Select
- 02f Phone Mute
- 030 Caller ID
- 050 Speed Dial
- 051 Store Number
- 052 Recall Number
- 053 Phone Directory
- 070 Voice Mail
- 071 Screen Calls
- 072 Do Not Disturb
- 073 Message
- 074 Answer On/Offf
- 090 Inside Dial Tone
- 091 Outside Dial Tone
- 092 Inside Ring Tone
- 093 Outside Ring Tone
- 094 Priority Ring Tone
- 095 Inside Ringback
- 096 Priority Ringback
- 097 Line Busy Tone
- 098 Recorder Tone
- 099 Call Waiting Tone
- 09a Confirmation Tone 1
- 09b Confirmation Tone 2
- 09c Tones Off
- 09d Outside Ringback
- 0b0 Key 1
- 0b1 Key 2
- 0b3 Key 3
- 0b4 Key 4
- 0b5 Key 5
- 0b6 Key 6
- 0b7 Key 7
- 0b8 Key 8
- 0b9 Key 9
- 0ba Key Star
- 0bb Key Pound
- 0bc Key A
- 0bd Key B
- 0be Key C
- 0bf Key D
-HUT 0c Consumer
- 000 Unassigned
- 001 Consumer Control
- 002 Numeric Key Pad
- 003 Programmable Buttons
- 020 +10
- 021 +100
- 022 AM/PM
- 030 Power
- 031 Reset
- 032 Sleep
- 033 Sleep After
- 034 Sleep Mode
- 035 Illumination
- 036 Function Buttons
- 040 Menu
- 041 Menu Pick
- 042 Menu Up
- 043 Menu Down
- 044 Menu Left
- 045 Menu Right
- 046 Menu Escape
- 047 Menu Value Increase
- 048 Menu Value Decrease
- 060 Data on Screen
- 061 Closed Caption
- 062 Closed Caption Select
- 063 VCR/TV
- 064 Broadcast Mode
- 065 Snapshot
- 066 Still
- 080 Selection
- 081 Assign Selection
- 082 Mode Step
- 083 Recall Last
- 084 Enter Channel
- 085 Order Movie
- 086 Channel
- 087 Media Selection
- 088 Media Select Computer
- 089 Media Select TV
- 08a Media Select WWW
- 08b Media Select DVD
- 08c Media Select Telephone
- 08d Media Select Program Guide
- 08e Media Select Video Phone
- 08f Media Select Games
- 090 Media Select Messages
- 091 Media Select CD
- 092 Media Select VCR
- 093 Media Select Tuner
- 094 Quit
- 095 Help
- 096 Media Select Tape
- 097 Media Select Cable
- 098 Media Select Satellite
- 099 Media Select Security
- 09a Media Select Home
- 09b Media Select Call
- 09c Channel Increment
- 09d Channel Decrement
- 09e Media Select SAP
- 0a0 VCR Plus
- 0a1 Once
- 0a2 Daily
- 0a3 Weekly
- 0a4 Monthly
- 0b0 Play
- 0b1 Pause
- 0b2 Record
- 0b3 Fast Forward
- 0b4 Rewind
- 0b5 Scan Next Track
- 0b6 Scan Previous Track
- 0b7 Stop
- 0b8 Eject
- 0b9 Random Play
- 0ba Select Disc
- 0bb Enter Disc
- 0bc Repeat
- 0bd Tracking
- 0be Track Normal
- 0bf Slow Tracking
- 0c0 Frame Forward
- 0c1 Frame Back
- 0c2 Mark
- 0c3 Clear Mark
- 0c4 Repeat from Mark
- 0c5 Return to Mark
- 0c6 Search Mark Forward
- 0c7 Search Mark Backward
- 0c8 Counter Reset
- 0c9 Show Counter
- 0ca Tracking Increment
- 0cb Tracking Decrement
- 0cc Stop/Eject
- 0cd Play/Pause
- 0ce Play/Skip
- 0e0 Volume
- 0e1 Balance
- 0e2 Mute
- 0e3 Bass
- 0e4 Treble
- 0e5 Bass Boost
- 0e6 Surround Mode
- 0e7 Loudness
- 0e8 MPX
- 0e9 Volume Increment
- 0ea Volume Decrement
- 0f0 Speed Select
- 0f1 Playback Speed
- 0f2 Standard Play
- 0f3 Long Play
- 0f4 Extended Play
- 0f5 Slow
- 100 Fan Enable
- 101 Fan Speed
- 102 Light Enable
- 103 Light Illumination Level
- 104 Climate Control Enable
- 105 Room Temperature
- 106 Security Enable
- 107 Fire Alarm
- 108 Police Alarm
- 150 Balance Right
- 151 Balance Left
- 152 Bass Increment
- 153 Bass Decrement
- 154 Treble Increment
- 155 Treble Decrement
- 160 Speaker System
- 161 Channel Left
- 162 Channel Right
- 163 Channel Center
- 164 Channel Front
- 165 Channel Center Front
- 166 Channel Side
- 167 Channel Surround
- 168 Channel Low Frequency Enhancement
- 169 Channel Top
- 16a Channel Unknown
- 170 Sub-Channel
- 171 Sub-Channel Increment
- 172 Sub-Channel Decrement
- 173 Alternative Audio Increment
- 174 Alternative Audio Decrement
- 180 Application Launch Buttons
- 181 AL Launch Button Configuration Tool
- 182 AL Launch Button Configuration
- 183 AL Consumer Control Configuration
- 184 AL Word Processor
- 185 AL Text Editor
- 186 AL Spreadsheet
- 187 AL Graphics Editor
- 188 AL Presentation App
- 189 AL Database App
- 18a AL Email Reader
- 18b AL Newsreader
- 18c AL Voicemail
- 18d AL Contacts/Address Book
- 18e AL Calendar/Schedule
- 18f AL Task/Project Manager
- 190 AL Log/Jounal/Timecard
- 191 AL Checkbook/Finance
- 192 AL Calculator
- 193 AL A/V Capture/Playback
- 194 AL Local Machine Browser
- 195 AL LAN/Wan Browser
- 196 AL Internet Browser
- 197 AL Remote Networking/ISP Connect
- 198 AL Network Conference
- 199 AL Network Chat
- 19a AL Telephony/Dialer
- 19b AL Logon
- 19c AL Logoff
- 19d AL Logon/Logoff
- 19e AL Terminal Local/Screensaver
- 19f AL Control Panel
- 1a0 AL Command Line Processor/Run
- 1a1 AL Process/Task Manager
- 1a2 AL Select Task/Application
- 1a3 AL Next Task/Application
- 1a4 AL Previous Task/Application
- 1a5 AL Preemptive Halt Task/Application
- 200 Generic GUI Application Controls
- 201 AC New
- 202 AC Open
- 203 AC CLose
- 204 AC Exit
- 205 AC Maximize
- 206 AC Minimize
- 207 AC Save
- 208 AC Print
- 209 AC Properties
- 21a AC Undo
- 21b AC Copy
- 21c AC Cut
- 21d AC Paste
- 21e AC Select All
- 21f AC Find
- 220 AC Find and Replace
- 221 AC Search
- 222 AC Go To
- 223 AC Home
- 224 AC Back
- 225 AC Forward
- 226 AC Stop
- 227 AC Refresh
- 228 AC Previous Link
- 229 AC Next Link
- 22b AC History
- 22c AC Subscriptions
- 22d AC Zoom In
- 22e AC Zoom Out
- 22f AC Zoom
- 230 AC Full Screen View
- 231 AC Normal View
- 232 AC View Toggle
- 233 AC Scroll Up
- 234 AC Scroll Down
- 235 AC Scroll
- 236 AC Pan Left
- 237 AC Pan Right
- 238 AC Pan
- 239 AC New Window
- 23a AC Tile Horizontally
- 23b AC Tile Vertically
- 23c AC Format
-HUT 0d Digitizer
- 000 Undefined
- 001 Digitizer
- 002 Pen
- 003 Light Pen
- 004 Touch Screen
- 005 Touch Pad
- 006 White Board
- 007 Coordinate Measuring Machine
- 008 3D Digitizer
- 009 Stereo Plotter
- 00a Articulated Arm
- 00b Armature
- 00c Multiple Point Digitizer
- 00d Free Space Wand
- 020 Stylus
- 021 Puck
- 022 Finger
- 030 Tip Pressure
- 031 Barrel Pressure
- 032 In Range
- 033 Touch
- 034 Untouch
- 035 Tap
- 036 Quality
- 037 Data Valid
- 038 Transducer Index
- 039 Tablet Function Keys
- 03a Program Change Keys
- 03b Battery Strength
- 03c Invert
- 03d X Tilt
- 03e Y Tilt
- 03f Azimuth
- 040 Altitude
- 041 Twist
- 042 Tip Switch
- 043 Secondary Tip Switch
- 044 Barrel Switch
- 045 Eraser
- 046 Tablet Pick
-HUT 0f PID Page
- 000 Undefined
- 001 Physical Interface Device
- 020 Normal
- 021 Set Effect Report
- 022 Effect Block Index
- 023 Parameter Block Offset
- 024 ROM Flag
- 025 Effect Type
- 026 ET Constant Force
- 027 ET Ramp
- 028 ET Custom Force Data
- 030 ET Square
- 031 ET Sine
- 032 ET Triangle
- 033 ET Sawtooth Up
- 034 ET Sawtooth Down
- 040 ET Spring
- 041 ET Damper
- 042 ET Inertia
- 043 ET Friction
- 050 Duration
- 051 Sample Period
- 052 Gain
- 053 Trigger Button
- 054 Trigger Repeat Interval
- 055 Axes Enable
- 056 Direction Enable
- 057 Direction
- 058 Type Specific Block Offset
- 059 Block Type
- 05A Set Envelope Report
- 05B Attack Level
- 05C Attack Time
- 05D Fade Level
- 05E Fade Time
- 05F Set Condition Report
- 060 CP Offset
- 061 Positive Coefficient
- 062 Negative Coefficient
- 063 Positive Saturation
- 064 Negative Saturation
- 065 Dead Band
- 066 Download Force Sample
- 067 Isoch Custom Force Enable
- 068 Custom Force Data Report
- 069 Custom Force Data
- 06A Custom Force Vendor Defined Data
- 06B Set Custom Force Report
- 06C Custom Force Data Offset
- 06D Sample Count
- 06E Set Periodic Report
- 06F Offset
- 070 Magnitude
- 071 Phase
- 072 Period
- 073 Set Constant Force Report
- 074 Set Ramp Force Report
- 075 Ramp Start
- 076 Ramp End
- 077 Effect Operation Report
- 078 Effect Operation
- 079 Op Effect Start
- 07A Op Effect Start Solo
- 07B Op Effect Stop
- 07C Loop Count
- 07D Device Gain Report
- 07E Device Gain
- 07F PID Pool Report
- 080 RAM Pool Size
- 081 ROM Pool Size
- 082 ROM Effect Block Count
- 083 Simultaneous Effects Max
- 084 Pool Alignment
- 085 PID Pool Move Report
- 086 Move Source
- 087 Move Destination
- 088 Move Length
- 089 PID Block Load Report
- 08B Block Load Status
- 08C Block Load Success
- 08D Block Load Full
- 08E Block Load Error
- 08F Block Handle
- 090 PID Block Free Report
- 091 Type Specific Block Handle
- 092 PID State Report
- 094 Effect Playing
- 095 PID Device Control Report
- 096 PID Device Control
- 097 DC Enable Actuators
- 098 DC Disable Actuators
- 099 DC Stop All Effects
- 09A DC Device Reset
- 09B DC Device Pause
- 09C DC Device Continue
- 09F Device Paused
- 0A0 Actuators Enabled
- 0A4 Safety Switch
- 0A5 Actuator Override Switch
- 0A6 Actuator Power
- 0A7 Start Delay
- 0A8 Parameter Block Size
- 0A9 Device Managed Pool
- 0AA Shared Parameter Blocks
- 0AB Create New Effect Report
- 0AC RAM Pool Available
-HUT 10 Unicode
-HUT 14 Alphanumeric Display
- 000 Undefined
- 001 Alphanumeric Display
- 020 Display Attributes Report
- 021 ASCII Character Set
- 022 Data Read Back
- 023 Font Read Back
- 024 Display Control Report
- 025 Clear Display
- 026 Display Enable
- 027 Screen Saver Delay
- 028 Screen Saver Enable
- 029 Vertical Scroll
- 02a Horizontal Scroll
- 02b Character Report
- 02c Display Data
- 02d Display Status
- 02e Stat Not Ready
- 02f Stat Ready
- 030 Err Not a loadable Character
- 031 Err Font Data Cannot Be Read
- 032 Cursur Position Report
- 033 Row
- 034 Column
- 035 Rows
- 036 Columns
- 037 Cursor Pixel Positioning
- 038 Cursor Mode
- 039 Cursor Enable
- 03a Cursor Blink
- 03b Font Report
- 03c Font Data
- 03d Character Width
- 03e Character Height
- 03f Character Spacing Horizontal
- 040 Character Spacing Vertical
- 041 Unicode Character Set
-HUT 80 USB Monitor
- 001 Monitor Control
- 002 EDID Information
- 003 VDIF Information
- 004 VESA Version
-HUT 81 USB Monitor Enumerated Values
-HUT 82 Monitor VESA Virtual Controls
- 001 Degauss
- 010 Brightness
- 012 Contrast
- 016 Red Video Gain
- 018 Green Video Gain
- 01a Blue Video Gain
- 01c Focus
- 020 Horizontal Position
- 022 Horizontal Size
- 024 Horizontal Pincushion
- 026 Horizontal Pincushion Balance
- 028 Horizontal Misconvergence
- 02a Horizontal Linearity
- 02c Horizontal Linearity Balance
- 030 Vertical Position
- 032 Vertical Size
- 034 Vertical Pincushion
- 036 Vertical Pincushion Balance
- 038 Vertical Misconvergence
- 03a Vertical Linearity
- 03c Vertical Linearity Balance
- 040 Parallelogram Balance (Key Distortion)
- 042 Trapezoidal Distortion (Key)
- 044 Tilt (Rotation)
- 046 Top Corner Distortion Control
- 048 Top Corner Distortion Balance
- 04a Bottom Corner Distortion Control
- 04c Bottom Corner Distortion Balance
- 056 Horizontal Moire
- 058 Vertical Moire
- 05e Input Level Select
- 060 Input Source Select
- 06c Red Video Black Level
- 06e Green Video Black Level
- 070 Blue Video Black Level
- 0a2 Auto Size Center
- 0a4 Polarity Horizontal Sychronization
- 0a6 Polarity Vertical Synchronization
- 0aa Screen Orientation
- 0ac Horizontal Frequency in Hz
- 0ae Vertical Frequency in 0.1 Hz
- 0b0 Settings
- 0ca On Screen Display (OSD)
- 0d4 Stereo Mode
-HUT 84 Power Device Page
- 000 Undefined
- 001 iName
- 002 Present Status
- 003 Changed Status
- 004 UPS
- 005 Power Supply
- 010 Battery System
- 011 Battery System ID
- 012 Battery
- 013 Battery ID
- 014 Charger
- 015 Charger ID
- 016 Power Converter
- 017 Power Converter ID
- 018 Outlet System
- 019 Outlet System ID
- 01a Input
- 01b Input ID
- 01c Output
- 01d Output ID
- 01e Flow
- 01f Flow ID
- 020 Outlet
- 021 Outlet ID
- 022 Gang
- 023 Gang ID
- 024 Power Summary
- 025 Power Summary ID
- 030 Voltage
- 031 Current
- 032 Frequency
- 033 Apparent Power
- 034 Active Power
- 035 Percent Load
- 036 Temperature
- 037 Humidity
- 038 Bad Count
- 040 Config Voltage
- 041 Config Current
- 042 Config Frequency
- 043 Config Apparent Power
- 044 Config Active Power
- 045 Config Percent Load
- 046 Config Temperature
- 047 Config Humidity
- 050 Switch On Control
- 051 Switch Off Control
- 052 Toggle Control
- 053 Low Voltage Transfer
- 054 High Voltage Transfer
- 055 Delay Before Reboot
- 056 Delay Before Startup
- 057 Delay Before Shutdown
- 058 Test
- 059 Module Reset
- 05a Audible Alarm Control
- 060 Present
- 061 Good
- 062 Internal Failure
- 063 Voltage out of range
- 064 Frequency out of range
- 065 Overload
- 066 Over Charged
- 067 Over Temperature
- 068 Shutdown Requested
- 069 Shutdown Imminent
- 06a Reserved
- 06b Switch On/Off
- 06c Switchable
- 06d Used
- 06e Boost
- 06f Buck
- 070 Initialized
- 071 Tested
- 072 Awaiting Power
- 073 Communication Lost
- 0fd iManufacturer
- 0fe iProduct
- 0ff iSerialNumber
-HUT 85 Battery System Page
- 000 Undefined
- 001 SMB Battery Mode
- 002 SMB Battery Status
- 003 SMB Alarm Warning
- 004 SMB Charger Mode
- 005 SMB Charger Status
- 006 SMB Charger Spec Info
- 007 SMB Selector State
- 008 SMB Selector Presets
- 009 SMB Selector Info
- 010 Optional Mfg. Function 1
- 011 Optional Mfg. Function 2
- 012 Optional Mfg. Function 3
- 013 Optional Mfg. Function 4
- 014 Optional Mfg. Function 5
- 015 Connection to SMBus
- 016 Output Connection
- 017 Charger Connection
- 018 Battery Insertion
- 019 Use Next
- 01a OK to use
- 01b Battery Supported
- 01c SelectorRevision
- 01d Charging Indicator
- 028 Manufacturer Access
- 029 Remaining Capacity Limit
- 02a Remaining Time Limit
- 02b At Rate
- 02c Capacity Mode
- 02d Broadcast To Charger
- 02e Primary Battery
- 02f Charge Controller
- 040 Terminate Charge
- 041 Terminate Discharge
- 042 Below Remaining Capacity Limit
- 043 Remaining Time Limit Expired
- 044 Charging
- 045 Discharging
- 046 Fully Charged
- 047 Fully Discharged
- 048 Conditioning Flag
- 049 At Rate OK
- 04a SMB Error Code
- 04b Need Replacement
- 060 At Rate Time To Full
- 061 At Rate Time To Empty
- 062 Average Current
- 063 Max Error
- 064 Relative State Of Charge
- 065 Absolute State Of Charge
- 066 Remaining Capacity
- 067 Full Charge Capacity
- 068 Run Time To Empty
- 069 Average Time To Empty
- 06a Average Time To Full
- 06b Cycle Count
- 080 Batt. Pack Model Level
- 081 Internal Charge Controller
- 082 Primary Battery Support
- 083 Design Capacity
- 084 Specification Info
- 085 Manufacturer Date
- 086 Serial Number
- 087 iManufacturerName
- 088 iDeviceName
- 089 iDeviceChemistry
- 08a Manufacturer Data
- 08b Rechargeable
- 08c Warning Capacity Limit
- 08d Capacity Granularity 1
- 08e Capacity Granularity 2
- 08f iOEMInformation
- 0c0 Inhibit Charge
- 0c1 Enable Polling
- 0c2 Reset To Zero
- 0d0 AC Present
- 0d1 Battery Present
- 0d2 Power Fail
- 0d3 Alarm Inhibited
- 0d4 Thermistor Under Range
- 0d5 Thermistor Hot
- 0d6 Thermistor Cold
- 0d7 Thermistor Over Range
- 0d8 Voltage Out Of Range
- 0d9 Current Out Of Range
- 0da Current Not Regulated
- 0db Voltage Not Regulated
- 0dc Master Mode
- 0f0 Charger Selector Support
- 0f1 Charger Spec
- 0f2 Level 2
- 0f3 Level 3
-HUT 86 Power Pages
-HUT 87 Power Pages
-HUT 8c Bar Code Scanner Page (POS)
-HUT 8d Scale Page (POS)
-HUT 90 Camera Control Page
-HUT 91 Arcade Control Page
-HUT f0 Cash Device
- 0f1 Cash Drawer
- 0f2 Cash Drawer Number
- 0f3 Cash Drawer Set
- 0f4 Cash Drawer Status
-HUT ff Vendor Specific
-
-# List of Languages
-
-# Syntax:
-# L language_id language_name
-# dialect_id dialect_name
-
-L 0001 Arabic
- 01 Saudi Arabia
- 02 Iraq
- 03 Egypt
- 04 Libya
- 05 Algeria
- 06 Morocco
- 07 Tunesia
- 08 Oman
- 09 Yemen
- 0a Syria
- 0b Jordan
- 0c Lebanon
- 0d Kuwait
- 0e U.A.E
- 0f Bahrain
- 10 Qatar
-L 0002 Bulgarian
-L 0003 Catalan
-L 0004 Chinese
- 01 Traditional
- 02 Simplified
- 03 Hongkong SAR, PRC
- 04 Singapore
- 05 Macau SAR
-L 0005 Czech
-L 0006 Danish
-L 0007 German
- 01 German
- 02 Swiss
- 03 Austrian
- 04 Luxembourg
- 05 Liechtenstein
-L 0008 Greek
-L 0009 English
- 01 US
- 02 UK
- 03 Australian
- 04 Canadian
- 05 New Zealand
- 06 Ireland
- 07 South Africa
- 08 Jamaica
- 09 Carribean
- 0a Belize
- 0b Trinidad
- 0c Zimbabwe
- 0d Philippines
-L 000a Spanish
- 01 Castilian
- 02 Mexican
- 03 Modern
- 04 Guatemala
- 05 Costa Rica
- 06 Panama
- 07 Dominican Republic
- 08 Venzuela
- 09 Colombia
- 0a Peru
- 0b Argentina
- 0c Ecuador
- 0d Chile
- 0e Uruguay
- 0f Paraguay
- 10 Bolivia
- 11 El Salvador
- 12 Honduras
- 13 Nicaragua
- 14 Puerto Rico
-L 000b Finnish
-L 000c French
- 01 French
- 02 Belgian
- 03 Canadian
- 04 Swiss
- 05 Luxembourg
- 06 Monaco
-L 000d Hebrew
-L 000e Hungarian
-L 000f Idelandic
-L 0010 Italian
- 01 Italian
- 02 Swiss
-L 0011 Japanese
-L 0012 Korean
- 01 Korean
-L 0013 Dutch
- 01 Dutch
- 02 Belgian
-L 0014 Norwegian
- 01 Bokmal
- 02 Nynorsk
-L 0015 Polish
-L 0016 Portuguese
- 01 Portuguese
- 02 Brazilian
-L 0017 forgotten
-L 0018 Romanian
-L 0019 Russian
-L 001a Serbian
- 01 Croatian
- 02 Latin
- 03 Cyrillic
-L 001b Slovak
-L 001c Albanian
-L 001d Swedish
- 01 Swedish
- 02 Finland
-L 001e Thai
-L 001f Turkish
-L 0020 Urdu
- 01 Pakistan
- 02 India
-L 0021 Indonesian
-L 0022 Ukrainian
-L 0023 Belarusian
-L 0024 Slovenian
-L 0025 Estonian
-L 0026 Latvian
-L 0027 Lithuanian
- 01 Lithuanian
-L 0028 forgotten
-L 0029 Farsi
-L 002a Vietnamese
-L 002b Armenian
-L 002c Azeri
- 01 Cyrillic
- 02 Latin
-L 002d Basque
-L 002e forgotten
-L 002f Macedonian
-L 0036 Afrikaans
-L 0037 Georgian
-L 0038 Faeroese
-L 0039 Hindi
-L 003e Malay
- 01 Malaysia
- 02 Brunei Darassalam
-L 003f Kazak
-L 0041 Awahili
-L 0043 Uzbek
- 01 Latin
- 02 Cyrillic
-L 0044 Tatar
-L 0045 Bengali
-L 0046 Punjabi
-L 0047 Gujarati
-L 0048 Oriya
-L 0049 Tamil
-L 004a Telugu
-L 004b Kannada
-L 004c Malayalam
-L 004d Assamese
-L 004e Marathi
-L 004f Sanskrit
-L 0057 Konkani
-L 0058 Manipuri
-L 0059 Sindhi
-L 0060 Kashmiri
- 02 India
-L 0061 Nepali
- 02 India
-
-# HID Descriptor bCountryCode
-# HID Specification 1.11 (2001-06-27) page 23
-#
-# Syntax:
-# HCC country_code keymap_type
-
-HCC 00 Not supported
-HCC 01 Arabic
-HCC 02 Belgian
-HCC 03 Canadian-Bilingual
-HCC 04 Canadian-French
-HCC 05 Czech Republic
-HCC 06 Danish
-HCC 07 Finnish
-HCC 08 French
-HCC 09 German
-HCC 10 Greek
-HCC 11 Hebrew
-HCC 12 Hungary
-HCC 13 International (ISO)
-HCC 14 Italian
-HCC 15 Japan (Katakana)
-HCC 16 Korean
-HCC 17 Latin American
-HCC 18 Netherlands/Dutch
-HCC 19 Norwegian
-HCC 20 Persian (Farsi)
-HCC 21 Poland
-HCC 22 Portuguese
-HCC 23 Russia
-HCC 24 Slovakia
-HCC 25 Spanish
-HCC 26 Swedish
-HCC 27 Swiss/French
-HCC 28 Swiss/German
-HCC 29 Switzerland
-HCC 30 Taiwan
-HCC 31 Turkish-Q
-HCC 32 UK
-HCC 33 US
-HCC 34 Yugoslavia
-HCC 35 Turkish-F
-
-# List of Video Class Terminal Types
-
-# Syntax:
-# VT terminal_type terminal_type_name
-
-VT 0100 USB Vendor Specific
-VT 0101 USB Streaming
-VT 0200 Input Vendor Specific
-VT 0201 Camera Sensor
-VT 0202 Sequential Media
-VT 0300 Output Vendor Specific
-VT 0301 Generic Display
-VT 0302 Sequential Media
-VT 0400 External Vendor Specific
-VT 0401 Composite Video
-VT 0402 S-Video
-VT 0403 Component Video
diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
index d5bc8e7e3d7..71a586e00fd 100644
--- a/drivers/staging/usbip/vhci.h
+++ b/drivers/staging/usbip/vhci.h
@@ -6,15 +6,6 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * This is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
- * USA.
*/
#include <linux/device.h>
@@ -105,7 +96,7 @@ struct vhci_hcd {
};
extern struct vhci_hcd *the_controller;
-extern struct attribute_group dev_attr_group;
+extern const struct attribute_group dev_attr_group;
#define hardware (&the_controller->pdev.dev)
/* vhci_hcd.c */
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index a76e8fa69b6..2ee97e2095b 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -92,14 +92,28 @@ static const char * const bit_desc[] = {
"R31", /*31*/
};
-static void dump_port_status(u32 status)
+static void dump_port_status_diff(u32 prev_status, u32 new_status)
{
int i = 0;
-
- pr_debug("status %08x:", status);
- for (i = 0; i < 32; i++) {
- if (status & (1 << i))
- pr_debug(" %s", bit_desc[i]);
+ u32 bit = 1;
+
+ pr_debug("status prev -> new: %08x -> %08x\n", prev_status, new_status);
+ while (bit) {
+ u32 prev = prev_status & bit;
+ u32 new = new_status & bit;
+ char change;
+
+ if (!prev && new)
+ change = '+';
+ else if (prev && !new)
+ change = '-';
+ else
+ change = ' ';
+
+ if (prev || new)
+ pr_debug(" %c%s\n", change, bit_desc[i]);
+ bit <<= 1;
+ i++;
}
pr_debug("\n");
}
@@ -273,9 +287,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* store old status and compare now and old later */
if (usbip_dbg_flag_vhci_rh) {
- int i = 0;
- for (i = 0; i < VHCI_NPORTS; i++)
- prev_port_status[i] = dum->port_status[i];
+ memcpy(prev_port_status, dum->port_status,
+ sizeof(prev_port_status));
}
switch (typeReq) {
@@ -344,9 +357,9 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* */
if (dum->resuming && time_after(jiffies, dum->re_timeout)) {
dum->port_status[rhport] |=
- (1 << USB_PORT_FEAT_C_SUSPEND);
+ (1 << USB_PORT_FEAT_C_SUSPEND);
dum->port_status[rhport] &=
- ~(1 << USB_PORT_FEAT_SUSPEND);
+ ~(1 << USB_PORT_FEAT_SUSPEND);
dum->resuming = 0;
dum->re_timeout = 0;
/* if (dum->driver && dum->driver->resume) {
@@ -464,8 +477,11 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (usbip_dbg_flag_vhci_rh) {
pr_debug("port %d\n", rhport);
- dump_port_status(prev_port_status[rhport]);
- dump_port_status(dum->port_status[rhport]);
+ /* Only dump valid port status */
+ if (rhport >= 0) {
+ dump_port_status_diff(prev_port_status[rhport],
+ dum->port_status[rhport]);
+ }
}
usbip_dbg_vhci_rh(" bye\n");
@@ -639,9 +655,7 @@ no_need_xmit:
usb_hcd_unlink_urb_from_ep(hcd, urb);
no_need_unlink:
spin_unlock_irqrestore(&the_controller->lock, flags);
-
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
-
return ret;
}
@@ -846,9 +860,9 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
}
/* kill threads related to this sdev, if v.c. exists */
- if (vdev->ud.tcp_rx)
+ if (vdev->ud.tcp_rx && !task_is_dead(vdev->ud.tcp_rx))
kthread_stop(vdev->ud.tcp_rx);
- if (vdev->ud.tcp_tx)
+ if (vdev->ud.tcp_tx && !task_is_dead(vdev->ud.tcp_tx))
kthread_stop(vdev->ud.tcp_tx);
pr_info("stop threads\n");
@@ -1033,9 +1047,8 @@ static int vhci_bus_resume(struct usb_hcd *hcd)
hcd->state = HC_STATE_RUNNING;
}
spin_unlock_irq(&vhci->lock);
- return rc;
- return 0;
+ return rc;
}
#else
@@ -1212,7 +1225,7 @@ static struct platform_device the_pdev = {
},
};
-static int __init vhci_init(void)
+static int __init vhci_hcd_init(void)
{
int ret;
@@ -1236,14 +1249,14 @@ err_driver_register:
return ret;
}
-static void __exit vhci_cleanup(void)
+static void __exit vhci_hcd_exit(void)
{
platform_device_unregister(&the_pdev);
platform_driver_unregister(&vhci_driver);
}
-module_init(vhci_init);
-module_exit(vhci_cleanup);
+module_init(vhci_hcd_init);
+module_exit(vhci_hcd_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index e42ce9dab7a..09c44abb89e 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -179,8 +179,6 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
}
kfree(unlink);
-
- return;
}
static int vhci_priv_tx_empty(struct vhci_device *vdev)
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index d9736f9c402..0cd039bb5fd 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -135,7 +135,7 @@ static DEVICE_ATTR(detach, S_IWUSR, NULL, store_detach);
static int valid_args(__u32 rhport, enum usb_device_speed speed)
{
/* check rhport */
- if ((rhport < 0) || (rhport >= VHCI_NPORTS)) {
+ if (rhport >= VHCI_NPORTS) {
pr_err("port %u\n", rhport);
return -EINVAL;
}
@@ -192,7 +192,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
/* check sockfd */
socket = sockfd_to_socket(sockfd);
if (!socket)
- return -EINVAL;
+ return -EINVAL;
/* now need lock until setting vdev status as used */
@@ -239,6 +239,6 @@ static struct attribute *dev_attrs[] = {
NULL,
};
-struct attribute_group dev_attr_group = {
+const struct attribute_group dev_attr_group = {
.attrs = dev_attrs,
};
diff --git a/drivers/staging/vme/boards/vme_vmivme7805.c b/drivers/staging/vme/boards/vme_vmivme7805.c
index 80eaa0c4fe1..8e05bb4e135 100644
--- a/drivers/staging/vme/boards/vme_vmivme7805.c
+++ b/drivers/staging/vme/boards/vme_vmivme7805.c
@@ -27,9 +27,9 @@ static void __exit vmic_exit(void);
/** Base address to access FPGA register */
static void *vmic_base;
-static char driver_name[] = "vmivme_7805";
+static const char driver_name[] = "vmivme_7805";
-static struct pci_device_id vmic_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(vmic_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
{ },
};
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index a4007287ef4..5122c13a956 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -42,7 +42,7 @@ static void __exit ca91cx42_exit(void);
/* Module parameters */
static int geoid;
-static char driver_name[] = "vme_ca91cx42";
+static const char driver_name[] = "vme_ca91cx42";
static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
@@ -190,7 +190,7 @@ static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
/* Clear serviced interrupts */
- iowrite32(stat, bridge->base + LINT_STAT);
+ iowrite32(serviced, bridge->base + LINT_STAT);
return IRQ_HANDLED;
}
@@ -256,6 +256,18 @@ static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
free_irq(pdev->irq, pdev);
}
+static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
+{
+ u32 tmp;
+
+ tmp = ioread32(bridge->base + LINT_STAT);
+
+ if (tmp & (1 << level))
+ return 0;
+ else
+ return 1;
+}
+
/*
* Set up an VME interrupt
*/
@@ -311,7 +323,8 @@ static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
iowrite32(tmp, bridge->base + VINT_EN);
/* Wait for IACK */
- wait_event_interruptible(bridge->iack_queue, 0);
+ wait_event_interruptible(bridge->iack_queue,
+ ca91cx42_iack_received(bridge, level));
/* Return interrupt to low state */
tmp = ioread32(bridge->base + VINT_EN);
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index 106aa9daff4..9c539513c74 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -44,7 +44,7 @@ static void __exit tsi148_exit(void);
static int err_chk;
static int geoid;
-static char driver_name[] = "vme_tsi148";
+static const char driver_name[] = "vme_tsi148";
static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index a571173249c..91d2cc7bb4c 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -41,7 +41,7 @@
#include "vme_user.h"
static DEFINE_MUTEX(vme_user_mutex);
-static char driver_name[] = "vme_user";
+static const char driver_name[] = "vme_user";
static int bus[USER_BUS_MAX];
static unsigned int bus_num;
@@ -91,7 +91,7 @@ static unsigned int bus_num;
/*
* Structure to handle image related parameters.
*/
-typedef struct {
+struct image_desc {
void *kern_buf; /* Buffer address in kernel space */
dma_addr_t pci_buf; /* Buffer address in PCI address space */
unsigned long long size_buf; /* Buffer size */
@@ -99,10 +99,10 @@ typedef struct {
struct device *device; /* Sysfs device */
struct vme_resource *resource; /* VME resource */
int users; /* Number of current users */
-} image_desc_t;
-static image_desc_t image[VME_DEVS];
+};
+static struct image_desc image[VME_DEVS];
-typedef struct {
+struct driver_stats {
unsigned long reads;
unsigned long writes;
unsigned long ioctls;
@@ -111,8 +111,8 @@ typedef struct {
unsigned long dmaErrors;
unsigned long timeouts;
unsigned long external;
-} driver_stats_t;
-static driver_stats_t statistics;
+};
+static struct driver_stats statistics;
static struct cdev *vme_user_cdev; /* Character device */
static struct class *vme_user_sysfs_class; /* Sysfs class */
@@ -138,7 +138,7 @@ static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
static int __devinit vme_user_probe(struct device *, int, int);
static int __devexit vme_user_remove(struct device *, int, int);
-static struct file_operations vme_user_fops = {
+static const struct file_operations vme_user_fops = {
.open = vme_user_open,
.release = vme_user_release,
.read = vme_user_read,
@@ -168,8 +168,8 @@ static int vme_user_open(struct inode *inode, struct file *file)
unsigned int minor = MINOR(inode->i_rdev);
down(&image[minor].sem);
- /* Only allow device to be opened if a resource is allocated */
- if (image[minor].resource == NULL) {
+ /* Allow device to be opened if a resource is needed and allocated. */
+ if (minor < CONTROL_MINOR && image[minor].resource == NULL) {
printk(KERN_ERR "No resources allocated for device\n");
err = -EINVAL;
goto err_res;
@@ -321,6 +321,9 @@ static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
size_t image_size;
size_t okcount;
+ if (minor == CONTROL_MINOR)
+ return 0;
+
down(&image[minor].sem);
/* XXX Do we *really* want this helper - we can use vme_*_get ? */
@@ -365,6 +368,9 @@ static ssize_t vme_user_write(struct file *file, const char __user *buf,
size_t image_size;
size_t okcount;
+ if (minor == CONTROL_MINOR)
+ return 0;
+
down(&image[minor].sem);
image_size = vme_get_size(image[minor].resource);
@@ -406,6 +412,9 @@ static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
size_t image_size;
+ if (minor == CONTROL_MINOR)
+ return -EINVAL;
+
down(&image[minor].sem);
image_size = vme_get_size(image[minor].resource);
@@ -452,6 +461,7 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
{
struct vme_master master;
struct vme_slave slave;
+ struct vme_irq_id irq_req;
unsigned long copied;
unsigned int minor = MINOR(inode->i_rdev);
int retval;
@@ -462,6 +472,21 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
switch (type[minor]) {
case CONTROL_MINOR:
+ switch (cmd) {
+ case VME_IRQ_GEN:
+ copied = copy_from_user(&irq_req, (char *)arg,
+ sizeof(struct vme_irq_id));
+ if (copied != 0) {
+ printk(KERN_WARNING "Partial copy from userspace\n");
+ return -EFAULT;
+ }
+
+ retval = vme_irq_generate(vme_user_bridge,
+ irq_req.level,
+ irq_req.statid);
+
+ return retval;
+ }
break;
case MASTER_MINOR:
switch (cmd) {
@@ -773,6 +798,7 @@ static int __devinit vme_user_probe(struct device *dev, int cur_bus,
/* Add sysfs Entries */
for (i = 0; i < VME_DEVS; i++) {
+ int num;
switch (type[i]) {
case MASTER_MINOR:
sprintf(name, "bus/vme/m%%d");
@@ -789,10 +815,9 @@ static int __devinit vme_user_probe(struct device *dev, int cur_bus,
break;
}
- image[i].device =
- device_create(vme_user_sysfs_class, NULL,
- MKDEV(VME_MAJOR, i), NULL, name,
- (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i);
+ num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
+ image[i].device = device_create(vme_user_sysfs_class, NULL,
+ MKDEV(VME_MAJOR, i), NULL, name, num);
if (IS_ERR(image[i].device)) {
printk(KERN_INFO "%s: Error creating sysfs device\n",
driver_name);
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h
index ede77d7e766..24bf4e54013 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme/devices/vme_user.h
@@ -43,10 +43,16 @@ struct vme_slave {
#endif
};
+struct vme_irq_id {
+ __u8 level;
+ __u8 statid;
+};
+
#define VME_GET_SLAVE _IOR(VME_IOC_MAGIC, 1, struct vme_slave)
#define VME_SET_SLAVE _IOW(VME_IOC_MAGIC, 2, struct vme_slave)
#define VME_GET_MASTER _IOR(VME_IOC_MAGIC, 3, struct vme_master)
#define VME_SET_MASTER _IOW(VME_IOC_MAGIC, 4, struct vme_master)
+#define VME_IRQ_GEN _IOW(VME_IOC_MAGIC, 5, struct vme_irq_id)
#endif /* _VME_USER_H_ */
diff --git a/drivers/staging/vme/vme.h b/drivers/staging/vme/vme.h
index 48768ca97e1..4155d8c2a53 100644
--- a/drivers/staging/vme/vme.h
+++ b/drivers/staging/vme/vme.h
@@ -98,7 +98,7 @@ struct vme_device_id {
struct vme_driver {
struct list_head node;
- char *name;
+ const char *name;
const struct vme_device_id *bind_table;
int (*probe) (struct device *, int, int);
int (*remove) (struct device *, int, int);
diff --git a/drivers/staging/westbridge/Kconfig b/drivers/staging/westbridge/Kconfig
deleted file mode 100644
index 2b1c2ae557b..00000000000
--- a/drivers/staging/westbridge/Kconfig
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# West Bridge configuration
-#
-
-menuconfig WESTBRIDGE
- tristate "West Bridge support"
- depends on WESTBRIDGE_HAL_SELECTED
- help
- This selects West Bridge Peripheral controller support.
-
- If you want West Bridge support, you should say Y here.
-
-menuconfig WESTBRIDGE_ASTORIA
- bool "West Bridge Astoria support"
- depends on WESTBRIDGE != n && WESTBRIDGE_HAL_SELECTED
- help
- This option enables support for West Bridge Astoria
-
-if WESTBRIDGE_ASTORIA
-source "drivers/staging/westbridge/astoria/Kconfig"
-endif #WESTBRIDGE_ASTORIA
-
-config WESTBRIDGE_HAL_SELECTED
- boolean
-
-choice
- prompt "West Bridge HAL"
- help
- West Bridge HAL/processor interface to be used
-
-#
-# HAL Layers
-#
-
-config MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
- bool "WESTBRIDGE OMAP3430 Astoria PNAND HAL"
- depends on ARCH_OMAP3
- select WESTBRIDGE_HAL_SELECTED
- help
- Include the OMAP3430 HAL for PNAND interface
-
-config MACH_NO_WESTBRIDGE
- bool "no West Bridge HAL selected"
- help
- Do not include any HAL layer(de-activates West Bridge option)
-endchoice
-
-config WESTBRIDGE_DEBUG
- bool "West Bridge debugging"
- depends on WESTBRIDGE != n
- help
- This is an option for use by developers; most people should
- say N here. This enables WESTBRIDGE core and driver debugging.
diff --git a/drivers/staging/westbridge/TODO b/drivers/staging/westbridge/TODO
deleted file mode 100644
index 6ca80581bbe..00000000000
--- a/drivers/staging/westbridge/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO:
-- checkpatch.pl fixes
-- determine where to put the hal and common api code
-- modify the driver directory structure in an intuitive way
-
-Please send any patches to Greg Kroah-Hartman <gregkh@suse.de>
-and David Cross <david.cross@cypress.com>.
diff --git a/drivers/staging/westbridge/astoria/Kconfig b/drivers/staging/westbridge/astoria/Kconfig
deleted file mode 100644
index 1ce388acbfe..00000000000
--- a/drivers/staging/westbridge/astoria/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# West Bridge configuration
-#
-source "drivers/staging/westbridge/astoria/device/Kconfig"
-
-source "drivers/staging/westbridge/astoria/block/Kconfig"
-
-source "drivers/staging/westbridge/astoria/gadget/Kconfig"
-
diff --git a/drivers/staging/westbridge/astoria/Makefile b/drivers/staging/westbridge/astoria/Makefile
deleted file mode 100644
index 907bdb25804..00000000000
--- a/drivers/staging/westbridge/astoria/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the kernel westbridge device drivers.
-#
-
-ifneq ($(CONFIG_WESTBRIDGE_DEBUG),y)
- EXTRA_CFLAGS += -WESTBRIDGE_NDEBUG
-endif
-
-obj-$(CONFIG_WESTBRIDGE) += device/
-obj-$(CONFIG_WESTBRIDGE) += block/
-obj-$(CONFIG_WESTBRIDGE) += gadget/ \ No newline at end of file
diff --git a/drivers/staging/westbridge/astoria/api/Makefile b/drivers/staging/westbridge/astoria/api/Makefile
deleted file mode 100644
index 1c94bc7bb31..00000000000
--- a/drivers/staging/westbridge/astoria/api/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Makefile for the kernel westbridge core.
-#
-
-ifeq ($(CONFIG_WESTBRIDGE_DEBUG),n)
- EXTRA_CFLAGS += -NDEBUG
-endif
-
-obj-$(CONFIG_WESTBRIDGE_DEVICE_DRIVER) += cyasapi.o
-cyasapi-y := src/cyasdma.o src/cyasintr.o src/cyaslep2pep.o \
- src/cyaslowlevel.o src/cyasmisc.o src/cyasmtp.o \
- src/cyasstorage.o src/cyasusb.o
-
-
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasdma.c b/drivers/staging/westbridge/astoria/api/src/cyasdma.c
deleted file mode 100644
index c461d4f60bf..00000000000
--- a/drivers/staging/westbridge/astoria/api/src/cyasdma.c
+++ /dev/null
@@ -1,1107 +0,0 @@
-/* Cypress West Bridge API source file (cyasdma.c)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#include "../../include/linux/westbridge/cyashal.h"
-#include "../../include/linux/westbridge/cyasdma.h"
-#include "../../include/linux/westbridge/cyaslowlevel.h"
-#include "../../include/linux/westbridge/cyaserr.h"
-#include "../../include/linux/westbridge/cyasregs.h"
-
-/*
- * Add the DMA queue entry to the free list to be re-used later
- */
-static void
-cy_as_dma_add_request_to_free_queue(cy_as_device *dev_p,
- cy_as_dma_queue_entry *req_p)
-{
- uint32_t imask;
- imask = cy_as_hal_disable_interrupts();
-
- req_p->next_p = dev_p->dma_freelist_p;
- dev_p->dma_freelist_p = req_p;
-
- cy_as_hal_enable_interrupts(imask);
-}
-
-/*
- * Get a DMA queue entry from the free list.
- */
-static cy_as_dma_queue_entry *
-cy_as_dma_get_dma_queue_entry(cy_as_device *dev_p)
-{
- cy_as_dma_queue_entry *req_p;
- uint32_t imask;
-
- cy_as_hal_assert(dev_p->dma_freelist_p != 0);
-
- imask = cy_as_hal_disable_interrupts();
- req_p = dev_p->dma_freelist_p;
- dev_p->dma_freelist_p = req_p->next_p;
- cy_as_hal_enable_interrupts(imask);
-
- return req_p;
-}
-
-/*
- * Set the maximum size that the West Bridge hardware
- * can handle in a single DMA operation. This size
- * may change for the P <-> U endpoints as a function
- * of the endpoint type and whether we are running
- * at full speed or high speed.
- */
-cy_as_return_status_t
-cy_as_dma_set_max_dma_size(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, uint32_t size)
-{
- /* In MTP mode, EP2 is allowed to have all max sizes. */
- if ((!dev_p->is_mtp_firmware) || (ep != 0x02)) {
- if (size < 64 || size > 1024)
- return CY_AS_ERROR_INVALID_SIZE;
- }
-
- CY_AS_NUM_EP(dev_p, ep)->maxhwdata = (uint16_t)size;
- return CY_AS_ERROR_SUCCESS;
-}
-
-/*
- * The callback for requests sent to West Bridge
- * to relay endpoint data. Endpoint data for EP0
- * and EP1 are sent using mailbox requests. This
- * is the callback that is called when a response
- * to a mailbox request to send data is received.
- */
-static void
-cy_as_dma_request_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *resp_p,
- cy_as_return_status_t ret)
-{
- uint16_t v;
- uint16_t datacnt;
- cy_as_end_point_number_t ep;
-
- (void)context;
-
- cy_as_log_debug_message(5, "cy_as_dma_request_callback called");
-
- /*
- * extract the return code from the firmware
- */
- if (ret == CY_AS_ERROR_SUCCESS) {
- if (cy_as_ll_request_response__get_code(resp_p) !=
- CY_RESP_SUCCESS_FAILURE)
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- else
- ret = cy_as_ll_request_response__get_word(resp_p, 0);
- }
-
- /*
- * extract the endpoint number and the transferred byte count
- * from the request.
- */
- v = cy_as_ll_request_response__get_word(req_p, 0);
- ep = (cy_as_end_point_number_t)((v >> 13) & 0x01);
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- /*
- * if the firmware returns success,
- * all of the data requested was
- * transferred. there are no partial
- * transfers.
- */
- datacnt = v & 0x3FF;
- } else {
- /*
- * if the firmware returned an error, no data was transferred.
- */
- datacnt = 0;
- }
-
- /*
- * queue the request and response data structures for use with the
- * next EP0 or EP1 request.
- */
- if (ep == 0) {
- dev_p->usb_ep0_dma_req = req_p;
- dev_p->usb_ep0_dma_resp = resp_p;
- } else {
- dev_p->usb_ep1_dma_req = req_p;
- dev_p->usb_ep1_dma_resp = resp_p;
- }
-
- /*
- * call the DMA complete function so we can
- * signal that this portion of the transfer
- * has completed. if the low level request
- * was canceled, we do not need to signal
- * the completed function as the only way a
- * cancel can happen is via the DMA cancel
- * function.
- */
- if (ret != CY_AS_ERROR_CANCELED)
- cy_as_dma_completed_callback(dev_p->tag, ep, datacnt, ret);
-}
-
-/*
- * Set the DRQ mask register for the given endpoint number. If state is
- * CyTrue, the DRQ interrupt for the given endpoint is enabled, otherwise
- * it is disabled.
- */
-static void
-cy_as_dma_set_drq(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, cy_bool state)
-{
- uint16_t mask;
- uint16_t v;
- uint32_t intval;
-
- /*
- * there are not DRQ register bits for EP0 and EP1
- */
- if (ep == 0 || ep == 1)
- return;
-
- /*
- * disable interrupts while we do this to be sure the state of the
- * DRQ mask register is always well defined.
- */
- intval = cy_as_hal_disable_interrupts();
-
- /*
- * set the DRQ bit to the given state for the ep given
- */
- mask = (1 << ep);
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK);
-
- if (state)
- v |= mask;
- else
- v &= ~mask;
-
- cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK, v);
- cy_as_hal_enable_interrupts(intval);
-}
-
-/*
-* Send the next DMA request for the endpoint given
-*/
-static void
-cy_as_dma_send_next_dma_request(cy_as_device *dev_p, cy_as_dma_end_point *ep_p)
-{
- uint32_t datacnt;
- void *buf_p;
- cy_as_dma_queue_entry *dma_p;
-
- cy_as_log_debug_message(6, "cy_as_dma_send_next_dma_request called");
-
- /* If the queue is empty, nothing to do */
- dma_p = ep_p->queue_p;
- if (dma_p == 0) {
- /*
- * there are no pending DMA requests
- * for this endpoint. disable the DRQ
- * mask bits to insure no interrupts
- * will be triggered by this endpoint
- * until someone is interested in the data.
- */
- cy_as_dma_set_drq(dev_p, ep_p->ep, cy_false);
- return;
- }
-
- cy_as_dma_end_point_set_running(ep_p);
-
- /*
- * get the number of words that still
- * need to be xferred in this request.
- */
- datacnt = dma_p->size - dma_p->offset;
- cy_as_hal_assert(datacnt >= 0);
-
- /*
- * the HAL layer should never limit the size
- * of the transfer to something less than the
- * maxhwdata otherwise, the data will be sent
- * in packets that are not correct in size.
- */
- cy_as_hal_assert(ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE
- || ep_p->maxhaldata >= ep_p->maxhwdata);
-
- /*
- * update the number of words that need to be xferred yet
- * based on the limits of the HAL layer.
- */
- if (ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE) {
- if (datacnt > ep_p->maxhwdata)
- datacnt = ep_p->maxhwdata;
- } else {
- if (datacnt > ep_p->maxhaldata)
- datacnt = ep_p->maxhaldata;
- }
-
- /*
- * find a pointer to the data that needs to be transferred
- */
- buf_p = (((char *)dma_p->buf_p) + dma_p->offset);
-
- /*
- * mark a request in transit
- */
- cy_as_dma_end_point_set_in_transit(ep_p);
-
- if (ep_p->ep == 0 || ep_p->ep == 1) {
- /*
- * if this is a WRITE request on EP0 and EP1
- * we write the data via an EP_DATA request
- * to west bridge via the mailbox registers.
- * if this is a READ request, we do nothing
- * and the data will arrive via an EP_DATA
- * request from west bridge. in the request
- * handler for the USB context we will pass
- * the data back into the DMA module.
- */
- if (dma_p->readreq == cy_false) {
- uint16_t v;
- uint16_t len;
- cy_as_ll_request_response *resp_p;
- cy_as_ll_request_response *req_p;
- cy_as_return_status_t ret;
-
- len = (uint16_t)(datacnt / 2);
- if (datacnt % 2)
- len++;
-
- len++;
-
- if (ep_p->ep == 0) {
- req_p = dev_p->usb_ep0_dma_req;
- resp_p = dev_p->usb_ep0_dma_resp;
- dev_p->usb_ep0_dma_req = 0;
- dev_p->usb_ep0_dma_resp = 0;
- } else {
- req_p = dev_p->usb_ep1_dma_req;
- resp_p = dev_p->usb_ep1_dma_resp;
- dev_p->usb_ep1_dma_req = 0;
- dev_p->usb_ep1_dma_resp = 0;
- }
-
- cy_as_hal_assert(req_p != 0);
- cy_as_hal_assert(resp_p != 0);
- cy_as_hal_assert(len <= 64);
-
- cy_as_ll_init_request(req_p, CY_RQT_USB_EP_DATA,
- CY_RQT_USB_RQT_CONTEXT, len);
-
- v = (uint16_t)(datacnt | (ep_p->ep << 13) | (1 << 14));
- if (dma_p->offset == 0)
- v |= (1 << 12);/* Set the first packet bit */
- if (dma_p->offset + datacnt == dma_p->size)
- v |= (1 << 11);/* Set the last packet bit */
-
- cy_as_ll_request_response__set_word(req_p, 0, v);
- cy_as_ll_request_response__pack(req_p,
- 1, datacnt, buf_p);
-
- cy_as_ll_init_response(resp_p, 1);
-
- ret = cy_as_ll_send_request(dev_p, req_p, resp_p,
- cy_false, cy_as_dma_request_callback);
- if (ret == CY_AS_ERROR_SUCCESS)
- cy_as_log_debug_message(5,
- "+++ send EP 0/1 data via mailbox registers");
- else
- cy_as_log_debug_message(5,
- "+++ error sending EP 0/1 data via mailbox "
- "registers - CY_AS_ERROR_TIMEOUT");
-
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_dma_completed_callback(dev_p->tag,
- ep_p->ep, 0, ret);
- }
- } else {
- /*
- * this is a DMA request on an endpoint that is accessible
- * via the P port. ask the HAL DMA capabilities to
- * perform this. the amount of data sent is limited by the
- * HAL max size as well as what we need to send. if the
- * ep_p->maxhaldata is set to a value larger than the
- * endpoint buffer size, then we will pass more than a
- * single buffer worth of data to the HAL layer and expect
- * the HAL layer to divide the data into packets. the last
- * parameter here (ep_p->maxhwdata) gives the packet size for
- * the data so the HAL layer knows what the packet size should
- * be.
- */
- if (cy_as_dma_end_point_is_direction_in(ep_p))
- cy_as_hal_dma_setup_write(dev_p->tag,
- ep_p->ep, buf_p, datacnt, ep_p->maxhwdata);
- else
- cy_as_hal_dma_setup_read(dev_p->tag,
- ep_p->ep, buf_p, datacnt, ep_p->maxhwdata);
-
- /*
- * the DRQ interrupt for this endpoint should be enabled
- * so that the data transfer progresses at interrupt time.
- */
- cy_as_dma_set_drq(dev_p, ep_p->ep, cy_true);
- }
-}
-
-/*
- * This function is called when the HAL layer has
- * completed the last requested DMA operation.
- * This function sends/receives the next batch of
- * data associated with the current DMA request,
- * or it is is complete, moves to the next DMA request.
- */
-void
-cy_as_dma_completed_callback(cy_as_hal_device_tag tag,
- cy_as_end_point_number_t ep, uint32_t cnt, cy_as_return_status_t status)
-{
- uint32_t mask;
- cy_as_dma_queue_entry *req_p;
- cy_as_dma_end_point *ep_p;
- cy_as_device *dev_p = cy_as_device_find_from_tag(tag);
-
- /* Make sure the HAL layer gave us good parameters */
- cy_as_hal_assert(dev_p != 0);
- cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
- cy_as_hal_assert(ep < 16);
-
-
- /* Get the endpoint ptr */
- ep_p = CY_AS_NUM_EP(dev_p, ep);
- cy_as_hal_assert(ep_p->queue_p != 0);
-
- /* Get a pointer to the current entry in the queue */
- mask = cy_as_hal_disable_interrupts();
- req_p = ep_p->queue_p;
-
- /* Update the offset to reflect the data actually received or sent */
- req_p->offset += cnt;
-
- /*
- * if we are still sending/receiving the current packet,
- * send/receive the next chunk basically we keep going
- * if we have not sent/received enough data, and we are
- * not doing a packet operation, and the last packet
- * sent or received was a full sized packet. in other
- * words, when we are NOT doing a packet operation, a
- * less than full size packet (a short packet) will
- * terminate the operation.
- *
- * note: if this is EP1 request and the request has
- * timed out, it means the buffer is not free.
- * we have to resend the data.
- *
- * note: for the MTP data transfers, the DMA transfer
- * for the next packet can only be started asynchronously,
- * after a firmware event notifies that the device is ready.
- */
- if (((req_p->offset != req_p->size) && (req_p->packet == cy_false) &&
- ((cnt == ep_p->maxhaldata) || ((cnt == ep_p->maxhwdata) &&
- ((ep != CY_AS_MTP_READ_ENDPOINT) ||
- (cnt == dev_p->usb_max_tx_size)))))
- || ((ep == 1) && (status == CY_AS_ERROR_TIMEOUT))) {
- cy_as_hal_enable_interrupts(mask);
-
- /*
- * and send the request again to send the next block of
- * data. special handling for MTP transfers on E_ps 2
- * and 6. the send_next_request will be processed based
- * on the event sent by the firmware.
- */
- if ((ep == CY_AS_MTP_WRITE_ENDPOINT) || (
- (ep == CY_AS_MTP_READ_ENDPOINT) &&
- (!cy_as_dma_end_point_is_direction_in(ep_p))))
- cy_as_dma_end_point_set_stopped(ep_p);
- else
- cy_as_dma_send_next_dma_request(dev_p, ep_p);
- } else {
- /*
- * we get here if ...
- * we have sent or received all of the data
- * or
- * we are doing a packet operation
- * or
- * we receive a short packet
- */
-
- /*
- * remove this entry from the DMA queue for this endpoint.
- */
- cy_as_dma_end_point_clear_in_transit(ep_p);
- ep_p->queue_p = req_p->next_p;
- if (ep_p->last_p == req_p) {
- /*
- * we have removed the last packet from the DMA queue,
- * disable the interrupt associated with this interrupt.
- */
- ep_p->last_p = 0;
- cy_as_hal_enable_interrupts(mask);
- cy_as_dma_set_drq(dev_p, ep, cy_false);
- } else
- cy_as_hal_enable_interrupts(mask);
-
- if (req_p->cb) {
- /*
- * if the request has a callback associated with it,
- * call the callback to tell the interested party that
- * this DMA request has completed.
- *
- * note, we set the in_callback bit to insure that we
- * cannot recursively call an API function that is
- * synchronous only from a callback.
- */
- cy_as_device_set_in_callback(dev_p);
- (*req_p->cb)(dev_p, ep, req_p->buf_p,
- req_p->offset, status);
- cy_as_device_clear_in_callback(dev_p);
- }
-
- /*
- * we are done with this request, put it on the freelist to be
- * reused at a later time.
- */
- cy_as_dma_add_request_to_free_queue(dev_p, req_p);
-
- if (ep_p->queue_p == 0) {
- /*
- * if the endpoint is out of DMA entries, set the
- * endpoint as stopped.
- */
- cy_as_dma_end_point_set_stopped(ep_p);
-
- /*
- * the DMA queue is empty, wake any task waiting on
- * the QUEUE to drain.
- */
- if (cy_as_dma_end_point_is_sleeping(ep_p)) {
- cy_as_dma_end_point_set_wake_state(ep_p);
- cy_as_hal_wake(&ep_p->channel);
- }
- } else {
- /*
- * if the queued operation is a MTP transfer,
- * wait until firmware event before sending
- * down the next DMA request.
- */
- if ((ep == CY_AS_MTP_WRITE_ENDPOINT) ||
- ((ep == CY_AS_MTP_READ_ENDPOINT) &&
- (!cy_as_dma_end_point_is_direction_in(ep_p))) ||
- ((ep == dev_p->storage_read_endpoint) &&
- (!cy_as_device_is_p2s_dma_start_recvd(dev_p)))
- || ((ep == dev_p->storage_write_endpoint) &&
- (!cy_as_device_is_p2s_dma_start_recvd(dev_p))))
- cy_as_dma_end_point_set_stopped(ep_p);
- else
- cy_as_dma_send_next_dma_request(dev_p, ep_p);
- }
- }
-}
-
-/*
-* This function is used to kick start DMA on a given
-* channel. If DMA is already running on the given
-* endpoint, nothing happens. If DMA is not running,
-* the first entry is pulled from the DMA queue and
-* sent/recevied to/from the West Bridge device.
-*/
-cy_as_return_status_t
-cy_as_dma_kick_start(cy_as_device *dev_p, cy_as_end_point_number_t ep)
-{
- cy_as_dma_end_point *ep_p;
- cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
-
- ep_p = CY_AS_NUM_EP(dev_p, ep);
-
- /* We are already running */
- if (cy_as_dma_end_point_is_running(ep_p))
- return CY_AS_ERROR_SUCCESS;
-
- cy_as_dma_send_next_dma_request(dev_p, ep_p);
- return CY_AS_ERROR_SUCCESS;
-}
-
-/*
- * This function stops the given endpoint. Stopping and endpoint cancels
- * any pending DMA operations and frees all resources associated with the
- * given endpoint.
- */
-static cy_as_return_status_t
-cy_as_dma_stop_end_point(cy_as_device *dev_p, cy_as_end_point_number_t ep)
-{
- cy_as_return_status_t ret;
- cy_as_dma_end_point *ep_p = CY_AS_NUM_EP(dev_p, ep);
-
- /*
- * cancel any pending DMA requests associated with this endpoint. this
- * cancels any DMA requests at the HAL layer as well as dequeues any
- * request that is currently pending.
- */
- ret = cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_CANCELED);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /*
- * destroy the sleep channel
- */
- if (!cy_as_hal_destroy_sleep_channel(&ep_p->channel)
- && ret == CY_AS_ERROR_SUCCESS)
- ret = CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED;
-
- /*
- * free the memory associated with this endpoint
- */
- cy_as_hal_free(ep_p);
-
- /*
- * set the data structure ptr to something sane since the
- * previous pointer is now free.
- */
- dev_p->endp[ep] = 0;
-
- return ret;
-}
-
-/*
- * This method stops the USB stack. This is an internal function that does
- * all of the work of destroying the USB stack without the protections that
- * we provide to the API (i.e. stopping at stack that is not running).
- */
-static cy_as_return_status_t
-cy_as_dma_stop_internal(cy_as_device *dev_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_return_status_t lret;
- cy_as_end_point_number_t i;
-
- /*
- * stop all of the endpoints. this cancels all DMA requests, and
- * frees all resources associated with each endpoint.
- */
- for (i = 0; i < sizeof(dev_p->endp)/(sizeof(dev_p->endp[0])); i++) {
- lret = cy_as_dma_stop_end_point(dev_p, i);
- if (lret != CY_AS_ERROR_SUCCESS && ret == CY_AS_ERROR_SUCCESS)
- ret = lret;
- }
-
- /*
- * now, free the list of DMA requests structures that we use to manage
- * DMA requests.
- */
- while (dev_p->dma_freelist_p) {
- cy_as_dma_queue_entry *req_p;
- uint32_t imask = cy_as_hal_disable_interrupts();
-
- req_p = dev_p->dma_freelist_p;
- dev_p->dma_freelist_p = req_p->next_p;
-
- cy_as_hal_enable_interrupts(imask);
-
- cy_as_hal_free(req_p);
- }
-
- cy_as_ll_destroy_request(dev_p, dev_p->usb_ep0_dma_req);
- cy_as_ll_destroy_request(dev_p, dev_p->usb_ep1_dma_req);
- cy_as_ll_destroy_response(dev_p, dev_p->usb_ep0_dma_resp);
- cy_as_ll_destroy_response(dev_p, dev_p->usb_ep1_dma_resp);
-
- return ret;
-}
-
-
-/*
- * CyAsDmaStop()
- *
- * This function shuts down the DMA module. All resources
- * associated with the DMA module will be freed. This
- * routine is the API stop function. It insures that we
- * are stopping a stack that is actually running and then
- * calls the internal function to do the work.
- */
-cy_as_return_status_t
-cy_as_dma_stop(cy_as_device *dev_p)
-{
- cy_as_return_status_t ret;
-
- ret = cy_as_dma_stop_internal(dev_p);
- cy_as_device_set_dma_stopped(dev_p);
-
- return ret;
-}
-
-/*
- * CyAsDmaStart()
- *
- * This function initializes the DMA module to insure it is up and running.
- */
-cy_as_return_status_t
-cy_as_dma_start(cy_as_device *dev_p)
-{
- cy_as_end_point_number_t i;
- uint16_t cnt;
-
- if (cy_as_device_is_dma_running(dev_p))
- return CY_AS_ERROR_ALREADY_RUNNING;
-
- /*
- * pre-allocate DMA queue structures to be used in the interrupt context
- */
- for (cnt = 0; cnt < 32; cnt++) {
- cy_as_dma_queue_entry *entry_p = (cy_as_dma_queue_entry *)
- cy_as_hal_alloc(sizeof(cy_as_dma_queue_entry));
- if (entry_p == 0) {
- cy_as_dma_stop_internal(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
- cy_as_dma_add_request_to_free_queue(dev_p, entry_p);
- }
-
- /*
- * pre-allocate the DMA requests for sending EP0
- * and EP1 data to west bridge
- */
- dev_p->usb_ep0_dma_req = cy_as_ll_create_request(dev_p,
- CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64);
- dev_p->usb_ep1_dma_req = cy_as_ll_create_request(dev_p,
- CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64);
-
- if (dev_p->usb_ep0_dma_req == 0 || dev_p->usb_ep1_dma_req == 0) {
- cy_as_dma_stop_internal(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
- dev_p->usb_ep0_dma_req_save = dev_p->usb_ep0_dma_req;
-
- dev_p->usb_ep0_dma_resp = cy_as_ll_create_response(dev_p, 1);
- dev_p->usb_ep1_dma_resp = cy_as_ll_create_response(dev_p, 1);
- if (dev_p->usb_ep0_dma_resp == 0 || dev_p->usb_ep1_dma_resp == 0) {
- cy_as_dma_stop_internal(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
- dev_p->usb_ep0_dma_resp_save = dev_p->usb_ep0_dma_resp;
-
- /*
- * set the dev_p->endp to all zeros to insure cleanup is possible if
- * an error occurs during initialization.
- */
- cy_as_hal_mem_set(dev_p->endp, 0, sizeof(dev_p->endp));
-
- /*
- * now, iterate through each of the endpoints and initialize each
- * one.
- */
- for (i = 0; i < sizeof(dev_p->endp)/sizeof(dev_p->endp[0]); i++) {
- dev_p->endp[i] = (cy_as_dma_end_point *)
- cy_as_hal_alloc(sizeof(cy_as_dma_end_point));
- if (dev_p->endp[i] == 0) {
- cy_as_dma_stop_internal(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
- cy_as_hal_mem_set(dev_p->endp[i], 0,
- sizeof(cy_as_dma_end_point));
-
- dev_p->endp[i]->ep = i;
- dev_p->endp[i]->queue_p = 0;
- dev_p->endp[i]->last_p = 0;
-
- cy_as_dma_set_drq(dev_p, i, cy_false);
-
- if (!cy_as_hal_create_sleep_channel(&dev_p->endp[i]->channel))
- return CY_AS_ERROR_CREATE_SLEEP_CHANNEL_FAILED;
- }
-
- /*
- * tell the HAL layer who to call when the
- * HAL layer completes a DMA request
- */
- cy_as_hal_dma_register_callback(dev_p->tag,
- cy_as_dma_completed_callback);
-
- /*
- * mark DMA as up and running on this device
- */
- cy_as_device_set_dma_running(dev_p);
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-/*
-* Wait for all entries in the DMA queue associated
-* the given endpoint to be drained. This function
-* will not return until all the DMA data has been
-* transferred.
-*/
-cy_as_return_status_t
-cy_as_dma_drain_queue(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, cy_bool kickstart)
-{
- cy_as_dma_end_point *ep_p;
- int loopcount = 1000;
- uint32_t mask;
-
- /*
- * make sure the endpoint is valid
- */
- if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0]))
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* Get the endpoint pointer based on the endpoint number */
- ep_p = CY_AS_NUM_EP(dev_p, ep);
-
- /*
- * if the endpoint is empty of traffic, we return
- * with success immediately
- */
- mask = cy_as_hal_disable_interrupts();
- if (ep_p->queue_p == 0) {
- cy_as_hal_enable_interrupts(mask);
- return CY_AS_ERROR_SUCCESS;
- } else {
- /*
- * add 10 seconds to the time out value for each 64 KB segment
- * of data to be transferred.
- */
- if (ep_p->queue_p->size > 0x10000)
- loopcount += ((ep_p->queue_p->size / 0x10000) * 1000);
- }
- cy_as_hal_enable_interrupts(mask);
-
- /* If we are already sleeping on this endpoint, it is an error */
- if (cy_as_dma_end_point_is_sleeping(ep_p))
- return CY_AS_ERROR_NESTED_SLEEP;
-
- /*
- * we disable the endpoint while the queue drains to
- * prevent any additional requests from being queued while we are waiting
- */
- cy_as_dma_enable_end_point(dev_p, ep,
- cy_false, cy_as_direction_dont_change);
-
- if (kickstart) {
- /*
- * now, kick start the DMA if necessary
- */
- cy_as_dma_kick_start(dev_p, ep);
- }
-
- /*
- * check one last time before we begin sleeping to see if the
- * queue is drained.
- */
- if (ep_p->queue_p == 0) {
- cy_as_dma_enable_end_point(dev_p, ep, cy_true,
- cy_as_direction_dont_change);
- return CY_AS_ERROR_SUCCESS;
- }
-
- while (loopcount-- > 0) {
- /*
- * sleep for 10 ms maximum (per loop) while
- * waiting for the transfer to complete.
- */
- cy_as_dma_end_point_set_sleep_state(ep_p);
- cy_as_hal_sleep_on(&ep_p->channel, 10);
-
- /* If we timed out, the sleep bit will still be set */
- cy_as_dma_end_point_set_wake_state(ep_p);
-
- /* Check the queue to see if is drained */
- if (ep_p->queue_p == 0) {
- /*
- * clear the endpoint running and in transit flags
- * for the endpoint, now that its DMA queue is empty.
- */
- cy_as_dma_end_point_clear_in_transit(ep_p);
- cy_as_dma_end_point_set_stopped(ep_p);
-
- cy_as_dma_enable_end_point(dev_p, ep,
- cy_true, cy_as_direction_dont_change);
- return CY_AS_ERROR_SUCCESS;
- }
- }
-
- /*
- * the DMA operation that has timed out can be cancelled, so that later
- * operations on this queue can proceed.
- */
- cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_TIMEOUT);
- cy_as_dma_enable_end_point(dev_p, ep,
- cy_true, cy_as_direction_dont_change);
- return CY_AS_ERROR_TIMEOUT;
-}
-
-/*
-* This function queues a write request in the DMA queue
-* for a given endpoint. The direction of the
-* entry will be inferred from the endpoint direction.
-*/
-cy_as_return_status_t
-cy_as_dma_queue_request(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, void *mem_p,
- uint32_t size, cy_bool pkt, cy_bool readreq, cy_as_dma_callback cb)
-{
- uint32_t mask;
- cy_as_dma_queue_entry *entry_p;
- cy_as_dma_end_point *ep_p;
-
- /*
- * make sure the endpoint is valid
- */
- if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0]))
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* Get the endpoint pointer based on the endpoint number */
- ep_p = CY_AS_NUM_EP(dev_p, ep);
-
- if (!cy_as_dma_end_point_is_enabled(ep_p))
- return CY_AS_ERROR_ENDPOINT_DISABLED;
-
- entry_p = cy_as_dma_get_dma_queue_entry(dev_p);
-
- entry_p->buf_p = mem_p;
- entry_p->cb = cb;
- entry_p->size = size;
- entry_p->offset = 0;
- entry_p->packet = pkt;
- entry_p->readreq = readreq;
-
- mask = cy_as_hal_disable_interrupts();
- entry_p->next_p = 0;
- if (ep_p->last_p)
- ep_p->last_p->next_p = entry_p;
- ep_p->last_p = entry_p;
- if (ep_p->queue_p == 0)
- ep_p->queue_p = entry_p;
- cy_as_hal_enable_interrupts(mask);
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-/*
-* This function enables or disables and endpoint for DMA
-* queueing. If an endpoint is disabled, any queue requests
-* continue to be processed, but no new requests can be queued.
-*/
-cy_as_return_status_t
-cy_as_dma_enable_end_point(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, cy_bool enable, cy_as_dma_direction dir)
-{
- cy_as_dma_end_point *ep_p;
-
- /*
- * make sure the endpoint is valid
- */
- if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0]))
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* Get the endpoint pointer based on the endpoint number */
- ep_p = CY_AS_NUM_EP(dev_p, ep);
-
- if (dir == cy_as_direction_out)
- cy_as_dma_end_point_set_direction_out(ep_p);
- else if (dir == cy_as_direction_in)
- cy_as_dma_end_point_set_direction_in(ep_p);
-
- /*
- * get the maximum size of data buffer the HAL
- * layer can accept. this is used when the DMA
- * module is sending DMA requests to the HAL.
- * the DMA module will never send down a request
- * that is greater than this value.
- *
- * for EP0 and EP1, we can send no more than 64
- * bytes of data at one time as this is the maximum
- * size of a packet that can be sent via these
- * endpoints.
- */
- if (ep == 0 || ep == 1)
- ep_p->maxhaldata = 64;
- else
- ep_p->maxhaldata = cy_as_hal_dma_max_request_size(
- dev_p->tag, ep);
-
- if (enable)
- cy_as_dma_end_point_enable(ep_p);
- else
- cy_as_dma_end_point_disable(ep_p);
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-/*
- * This function cancels any DMA operations pending with the HAL layer as well
- * as any DMA operation queued on the endpoint.
- */
-cy_as_return_status_t
-cy_as_dma_cancel(
- cy_as_device *dev_p,
- cy_as_end_point_number_t ep,
- cy_as_return_status_t err)
-{
- uint32_t mask;
- cy_as_dma_end_point *ep_p;
- cy_as_dma_queue_entry *entry_p;
- cy_bool epstate;
-
- /*
- * make sure the endpoint is valid
- */
- if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0]))
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* Get the endpoint pointer based on the endpoint number */
- ep_p = CY_AS_NUM_EP(dev_p, ep);
-
- if (ep_p) {
- /* Remember the state of the endpoint */
- epstate = cy_as_dma_end_point_is_enabled(ep_p);
-
- /*
- * disable the endpoint so no more DMA packets can be
- * queued.
- */
- cy_as_dma_enable_end_point(dev_p, ep,
- cy_false, cy_as_direction_dont_change);
-
- /*
- * don't allow any interrupts from this endpoint
- * while we get the most current request off of
- * the queue.
- */
- cy_as_dma_set_drq(dev_p, ep, cy_false);
-
- /*
- * cancel any pending request queued in the HAL layer
- */
- if (cy_as_dma_end_point_in_transit(ep_p))
- cy_as_hal_dma_cancel_request(dev_p->tag, ep_p->ep);
-
- /*
- * shutdown the DMA for this endpoint so no
- * more data is transferred
- */
- cy_as_dma_end_point_set_stopped(ep_p);
-
- /*
- * mark the endpoint as not in transit, because we are
- * going to consume any queued requests
- */
- cy_as_dma_end_point_clear_in_transit(ep_p);
-
- /*
- * now, remove each entry in the queue and call the
- * associated callback stating that the request was
- * canceled.
- */
- ep_p->last_p = 0;
- while (ep_p->queue_p != 0) {
- /* Disable interrupts to manipulate the queue */
- mask = cy_as_hal_disable_interrupts();
-
- /* Remove an entry from the queue */
- entry_p = ep_p->queue_p;
- ep_p->queue_p = entry_p->next_p;
-
- /* Ok, the queue has been updated, we can
- * turn interrupts back on */
- cy_as_hal_enable_interrupts(mask);
-
- /* Call the callback indicating we have
- * canceled the DMA */
- if (entry_p->cb)
- entry_p->cb(dev_p, ep,
- entry_p->buf_p, entry_p->size, err);
-
- cy_as_dma_add_request_to_free_queue(dev_p, entry_p);
- }
-
- if (ep == 0 || ep == 1) {
- /*
- * if this endpoint is zero or one, we need to
- * clear the queue of any pending CY_RQT_USB_EP_DATA
- * requests as these are pending requests to send
- * data to the west bridge device.
- */
- cy_as_ll_remove_ep_data_requests(dev_p, ep);
- }
-
- if (epstate) {
- /*
- * the endpoint started out enabled, so we
- * re-enable the endpoint here.
- */
- cy_as_dma_enable_end_point(dev_p, ep,
- cy_true, cy_as_direction_dont_change);
- }
- }
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-cy_as_return_status_t
-cy_as_dma_received_data(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, uint32_t dsize, void *data)
-{
- cy_as_dma_queue_entry *dma_p;
- uint8_t *src_p, *dest_p;
- cy_as_dma_end_point *ep_p;
- uint32_t xfersize;
-
- /*
- * make sure the endpoint is valid
- */
- if (ep != 0 && ep != 1)
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* Get the endpoint pointer based on the endpoint number */
- ep_p = CY_AS_NUM_EP(dev_p, ep);
- dma_p = ep_p->queue_p;
- if (dma_p == 0)
- return CY_AS_ERROR_SUCCESS;
-
- /*
- * if the data received exceeds the size of the DMA buffer,
- * clip the data to the size of the buffer. this can lead
- * to losing some data, but is not different than doing
- * non-packet reads on the other endpoints.
- */
- if (dsize > dma_p->size - dma_p->offset)
- dsize = dma_p->size - dma_p->offset;
-
- /*
- * copy the data from the request packet to the DMA buffer
- * for the endpoint
- */
- src_p = (uint8_t *)data;
- dest_p = ((uint8_t *)(dma_p->buf_p)) + dma_p->offset;
- xfersize = dsize;
- while (xfersize-- > 0)
- *dest_p++ = *src_p++;
-
- /* Signal the DMA module that we have
- * received data for this EP request */
- cy_as_dma_completed_callback(dev_p->tag,
- ep, dsize, CY_AS_ERROR_SUCCESS);
-
- return CY_AS_ERROR_SUCCESS;
-}
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasintr.c b/drivers/staging/westbridge/astoria/api/src/cyasintr.c
deleted file mode 100644
index b60f69ce598..00000000000
--- a/drivers/staging/westbridge/astoria/api/src/cyasintr.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/* Cypress West Bridge API source file (cyasintr.c)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#include "../../include/linux/westbridge/cyashal.h"
-#include "../../include/linux/westbridge/cyasdevice.h"
-#include "../../include/linux/westbridge/cyasregs.h"
-#include "../../include/linux/westbridge/cyaserr.h"
-
-extern void cy_as_mail_box_interrupt_handler(cy_as_device *);
-
-void
-cy_as_mcu_interrupt_handler(cy_as_device *dev_p)
-{
- /* Read and clear the interrupt. */
- uint16_t v;
-
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_MCU_STAT);
- v = v;
-}
-
-void
-cy_as_power_management_interrupt_handler(cy_as_device *dev_p)
-{
- uint16_t v;
-
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_PWR_MAGT_STAT);
- v = v;
-}
-
-void
-cy_as_pll_lock_loss_interrupt_handler(cy_as_device *dev_p)
-{
- uint16_t v;
-
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_PLL_LOCK_LOSS_STAT);
- v = v;
-}
-
-uint32_t cy_as_intr_start(cy_as_device *dev_p, cy_bool dmaintr)
-{
- uint16_t v;
-
- cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
-
- if (cy_as_device_is_intr_running(dev_p) != 0)
- return CY_AS_ERROR_ALREADY_RUNNING;
-
- v = CY_AS_MEM_P0_INT_MASK_REG_MMCUINT |
- CY_AS_MEM_P0_INT_MASK_REG_MMBINT |
- CY_AS_MEM_P0_INT_MASK_REG_MPMINT;
-
- if (dmaintr)
- v |= CY_AS_MEM_P0_INT_MASK_REG_MDRQINT;
-
- /* Enable the interrupts of interest */
- cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_INT_MASK_REG, v);
-
- /* Mark the interrupt module as initialized */
- cy_as_device_set_intr_running(dev_p);
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-uint32_t cy_as_intr_stop(cy_as_device *dev_p)
-{
- cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
-
- if (cy_as_device_is_intr_running(dev_p) == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_INT_MASK_REG, 0);
- cy_as_device_set_intr_stopped(dev_p);
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-void cy_as_intr_service_interrupt(cy_as_hal_device_tag tag)
-{
- uint16_t v;
- cy_as_device *dev_p;
-
- dev_p = cy_as_device_find_from_tag(tag);
-
- /*
- * only power management interrupts can occur before the
- * antioch API setup is complete. if this is a PM interrupt
- * handle it here; otherwise output a warning message.
- */
- if (dev_p == 0) {
- v = cy_as_hal_read_register(tag, CY_AS_MEM_P0_INTR_REG);
- if (v == CY_AS_MEM_P0_INTR_REG_PMINT) {
- /* Read the PWR_MAGT_STAT register
- * to clear this interrupt. */
- v = cy_as_hal_read_register(tag,
- CY_AS_MEM_PWR_MAGT_STAT);
- } else
- cy_as_hal_print_message("stray antioch "
- "interrupt detected"
- ", tag not associated "
- "with any created device.");
- return;
- }
-
- /* Make sure we got a valid object from CyAsDeviceFindFromTag */
- cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
-
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_INTR_REG);
-
- if (v & CY_AS_MEM_P0_INTR_REG_MCUINT)
- cy_as_mcu_interrupt_handler(dev_p);
-
- if (v & CY_AS_MEM_P0_INTR_REG_PMINT)
- cy_as_power_management_interrupt_handler(dev_p);
-
- if (v & CY_AS_MEM_P0_INTR_REG_PLLLOCKINT)
- cy_as_pll_lock_loss_interrupt_handler(dev_p);
-
- /* If the interrupt module is not running, no mailbox
- * interrupts are expected from the west bridge. */
- if (cy_as_device_is_intr_running(dev_p) == 0)
- return;
-
- if (v & CY_AS_MEM_P0_INTR_REG_MBINT)
- cy_as_mail_box_interrupt_handler(dev_p);
-}
diff --git a/drivers/staging/westbridge/astoria/api/src/cyaslep2pep.c b/drivers/staging/westbridge/astoria/api/src/cyaslep2pep.c
deleted file mode 100644
index 76821e51b81..00000000000
--- a/drivers/staging/westbridge/astoria/api/src/cyaslep2pep.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/* Cypress West Bridge API source file (cyaslep2pep.c)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#include "../../include/linux/westbridge/cyashal.h"
-#include "../../include/linux/westbridge/cyasusb.h"
-#include "../../include/linux/westbridge/cyaserr.h"
-#include "../../include/linux/westbridge/cyaslowlevel.h"
-#include "../../include/linux/westbridge/cyasdma.h"
-
-typedef enum cy_as_physical_endpoint_state {
- cy_as_e_p_free,
- cy_as_e_p_in,
- cy_as_e_p_out,
- cy_as_e_p_iso_in,
- cy_as_e_p_iso_out
-} cy_as_physical_endpoint_state;
-
-
-/*
-* This map is used to map an index between 1 and 10
-* to a logical endpoint number. This is used to map
-* LEP register indexes into actual EP numbers.
-*/
-static cy_as_end_point_number_t end_point_map[] = {
- 3, 5, 7, 9, 10, 11, 12, 13, 14, 15 };
-
-#define CY_AS_EPCFG_1024 (1 << 3)
-#define CY_AS_EPCFG_DBL (0x02)
-#define CY_AS_EPCFG_TRIPLE (0x03)
-#define CY_AS_EPCFG_QUAD (0x00)
-
-/*
- * NB: This table contains the register values for PEP1
- * and PEP3. PEP2 and PEP4 only have a bit to change the
- * direction of the PEP and therefre are not represented
- * in this table.
- */
-static uint8_t pep_register_values[12][4] = {
- /* Bit 1:0 buffering, 0 = quad, 2 = double, 3 = triple */
- /* Bit 3 size, 0 = 512, 1 = 1024 */
- {
- CY_AS_EPCFG_DBL,
- CY_AS_EPCFG_DBL,
- },/* Config 1 - PEP1 (2 * 512), PEP2 (2 * 512),
- * PEP3 (2 * 512), PEP4 (2 * 512) */
- {
- CY_AS_EPCFG_DBL,
- CY_AS_EPCFG_QUAD,
- }, /* Config 2 - PEP1 (2 * 512), PEP2 (2 * 512),
- * PEP3 (4 * 512), PEP4 (N/A) */
- {
- CY_AS_EPCFG_DBL,
- CY_AS_EPCFG_DBL | CY_AS_EPCFG_1024,
- },/* Config 3 - PEP1 (2 * 512), PEP2 (2 * 512),
- * PEP3 (2 * 1024), PEP4(N/A) */
- {
- CY_AS_EPCFG_QUAD,
- CY_AS_EPCFG_DBL,
- },/* Config 4 - PEP1 (4 * 512), PEP2 (N/A),
- * PEP3 (2 * 512), PEP4 (2 * 512) */
- {
- CY_AS_EPCFG_QUAD,
- CY_AS_EPCFG_QUAD,
- },/* Config 5 - PEP1 (4 * 512), PEP2 (N/A),
- * PEP3 (4 * 512), PEP4 (N/A) */
- {
- CY_AS_EPCFG_QUAD,
- CY_AS_EPCFG_1024 | CY_AS_EPCFG_DBL,
- },/* Config 6 - PEP1 (4 * 512), PEP2 (N/A),
- * PEP3 (2 * 1024), PEP4 (N/A) */
- {
- CY_AS_EPCFG_1024 | CY_AS_EPCFG_DBL,
- CY_AS_EPCFG_DBL,
- },/* Config 7 - PEP1 (2 * 1024), PEP2 (N/A),
- * PEP3 (2 * 512), PEP4 (2 * 512) */
- {
- CY_AS_EPCFG_1024 | CY_AS_EPCFG_DBL,
- CY_AS_EPCFG_QUAD,
- },/* Config 8 - PEP1 (2 * 1024), PEP2 (N/A),
- * PEP3 (4 * 512), PEP4 (N/A) */
- {
- CY_AS_EPCFG_1024 | CY_AS_EPCFG_DBL,
- CY_AS_EPCFG_1024 | CY_AS_EPCFG_DBL,
- },/* Config 9 - PEP1 (2 * 1024), PEP2 (N/A),
- * PEP3 (2 * 1024), PEP4 (N/A)*/
- {
- CY_AS_EPCFG_TRIPLE,
- CY_AS_EPCFG_TRIPLE,
- },/* Config 10 - PEP1 (3 * 512), PEP2 (N/A),
- * PEP3 (3 * 512), PEP4 (2 * 512)*/
- {
- CY_AS_EPCFG_TRIPLE | CY_AS_EPCFG_1024,
- CY_AS_EPCFG_DBL,
- },/* Config 11 - PEP1 (3 * 1024), PEP2 (N/A),
- * PEP3 (N/A), PEP4 (2 * 512) */
- {
- CY_AS_EPCFG_QUAD | CY_AS_EPCFG_1024,
- CY_AS_EPCFG_DBL,
- },/* Config 12 - PEP1 (4 * 1024), PEP2 (N/A),
- * PEP3 (N/A), PEP4 (N/A) */
-};
-
-static cy_as_return_status_t
-find_endpoint_directions(cy_as_device *dev_p,
- cy_as_physical_endpoint_state epstate[4])
-{
- int i;
- cy_as_physical_endpoint_state desired;
-
- /*
- * note, there is no error checking here because
- * ISO error checking happens when the API is called.
- */
- for (i = 0; i < 10; i++) {
- int epno = end_point_map[i];
- if (dev_p->usb_config[epno].enabled) {
- int pep = dev_p->usb_config[epno].physical;
- if (dev_p->usb_config[epno].type == cy_as_usb_iso) {
- /*
- * marking this as an ISO endpoint, removes the
- * physical EP from consideration when
- * mapping the remaining E_ps.
- */
- if (dev_p->usb_config[epno].dir == cy_as_usb_in)
- desired = cy_as_e_p_iso_in;
- else
- desired = cy_as_e_p_iso_out;
- } else {
- if (dev_p->usb_config[epno].dir == cy_as_usb_in)
- desired = cy_as_e_p_in;
- else
- desired = cy_as_e_p_out;
- }
-
- /*
- * NB: Note the API calls insure that an ISO endpoint
- * has a physical and logical EP number that are the
- * same, therefore this condition is not enforced here.
- */
- if (epstate[pep - 1] !=
- cy_as_e_p_free && epstate[pep - 1] != desired)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
-
- epstate[pep - 1] = desired;
- }
- }
-
- /*
- * create the EP1 config values directly.
- * both EP1OUT and EP1IN are invalid by default.
- */
- dev_p->usb_ep1cfg[0] = 0;
- dev_p->usb_ep1cfg[1] = 0;
- if (dev_p->usb_config[1].enabled) {
- if ((dev_p->usb_config[1].dir == cy_as_usb_out) ||
- (dev_p->usb_config[1].dir == cy_as_usb_in_out)) {
- /* Set the valid bit and type field. */
- dev_p->usb_ep1cfg[0] = (1 << 7);
- if (dev_p->usb_config[1].type == cy_as_usb_bulk)
- dev_p->usb_ep1cfg[0] |= (2 << 4);
- else
- dev_p->usb_ep1cfg[0] |= (3 << 4);
- }
-
- if ((dev_p->usb_config[1].dir == cy_as_usb_in) ||
- (dev_p->usb_config[1].dir == cy_as_usb_in_out)) {
- /* Set the valid bit and type field. */
- dev_p->usb_ep1cfg[1] = (1 << 7);
- if (dev_p->usb_config[1].type == cy_as_usb_bulk)
- dev_p->usb_ep1cfg[1] |= (2 << 4);
- else
- dev_p->usb_ep1cfg[1] |= (3 << 4);
- }
- }
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-static void
-create_register_settings(cy_as_device *dev_p,
- cy_as_physical_endpoint_state epstate[4])
-{
- int i;
- uint8_t v;
-
- for (i = 0; i < 4; i++) {
- if (i == 0) {
- /* Start with the values that specify size */
- dev_p->usb_pepcfg[i] =
- pep_register_values
- [dev_p->usb_phy_config - 1][0];
- } else if (i == 2) {
- /* Start with the values that specify size */
- dev_p->usb_pepcfg[i] =
- pep_register_values
- [dev_p->usb_phy_config - 1][1];
- } else
- dev_p->usb_pepcfg[i] = 0;
-
- /* Adjust direction if it is in */
- if (epstate[i] == cy_as_e_p_iso_in ||
- epstate[i] == cy_as_e_p_in)
- dev_p->usb_pepcfg[i] |= (1 << 6);
- }
-
- /* Configure the logical EP registers */
- for (i = 0; i < 10; i++) {
- int val;
- int epnum = end_point_map[i];
-
- v = 0x10; /* PEP 1, Bulk Endpoint, EP not valid */
- if (dev_p->usb_config[epnum].enabled) {
- v |= (1 << 7); /* Enabled */
-
- val = dev_p->usb_config[epnum].physical - 1;
- cy_as_hal_assert(val >= 0 && val <= 3);
- v |= (val << 5);
-
- switch (dev_p->usb_config[epnum].type) {
- case cy_as_usb_bulk:
- val = 2;
- break;
- case cy_as_usb_int:
- val = 3;
- break;
- case cy_as_usb_iso:
- val = 1;
- break;
- default:
- cy_as_hal_assert(cy_false);
- break;
- }
- v |= (val << 3);
- }
-
- dev_p->usb_lepcfg[i] = v;
- }
-}
-
-
-cy_as_return_status_t
-cy_as_usb_map_logical2_physical(cy_as_device *dev_p)
-{
- cy_as_return_status_t ret;
-
- /* Physical EPs 3 5 7 9 respectively in the array */
- cy_as_physical_endpoint_state epstate[4] = {
- cy_as_e_p_free, cy_as_e_p_free,
- cy_as_e_p_free, cy_as_e_p_free };
-
- /* Find the direction for the endpoints */
- ret = find_endpoint_directions(dev_p, epstate);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /*
- * now create the register settings based on the given
- * assigned of logical E_ps to physical endpoints.
- */
- create_register_settings(dev_p, epstate);
-
- return ret;
-}
-
-static uint16_t
-get_max_dma_size(cy_as_device *dev_p, cy_as_end_point_number_t ep)
-{
- uint16_t size = dev_p->usb_config[ep].size;
-
- if (size == 0) {
- switch (dev_p->usb_config[ep].type) {
- case cy_as_usb_control:
- size = 64;
- break;
-
- case cy_as_usb_bulk:
- size = cy_as_device_is_usb_high_speed(dev_p) ?
- 512 : 64;
- break;
-
- case cy_as_usb_int:
- size = cy_as_device_is_usb_high_speed(dev_p) ?
- 1024 : 64;
- break;
-
- case cy_as_usb_iso:
- size = cy_as_device_is_usb_high_speed(dev_p) ?
- 1024 : 1023;
- break;
- }
- }
-
- return size;
-}
-
-cy_as_return_status_t
-cy_as_usb_set_dma_sizes(cy_as_device *dev_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint32_t i;
-
- for (i = 0; i < 10; i++) {
- cy_as_usb_end_point_config *config_p =
- &dev_p->usb_config[end_point_map[i]];
- if (config_p->enabled) {
- ret = cy_as_dma_set_max_dma_size(dev_p,
- end_point_map[i],
- get_max_dma_size(dev_p, end_point_map[i]));
- if (ret != CY_AS_ERROR_SUCCESS)
- break;
- }
- }
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_usb_setup_dma(cy_as_device *dev_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint32_t i;
-
- for (i = 0; i < 10; i++) {
- cy_as_usb_end_point_config *config_p =
- &dev_p->usb_config[end_point_map[i]];
- if (config_p->enabled) {
- /* Map the endpoint direction to the DMA direction */
- cy_as_dma_direction dir = cy_as_direction_out;
- if (config_p->dir == cy_as_usb_in)
- dir = cy_as_direction_in;
-
- ret = cy_as_dma_enable_end_point(dev_p,
- end_point_map[i], cy_true, dir);
- if (ret != CY_AS_ERROR_SUCCESS)
- break;
- }
- }
-
- return ret;
-}
diff --git a/drivers/staging/westbridge/astoria/api/src/cyaslowlevel.c b/drivers/staging/westbridge/astoria/api/src/cyaslowlevel.c
deleted file mode 100644
index 96a86d08830..00000000000
--- a/drivers/staging/westbridge/astoria/api/src/cyaslowlevel.c
+++ /dev/null
@@ -1,1264 +0,0 @@
-/* Cypress West Bridge API source file (cyaslowlevel.c)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#include "../../include/linux/westbridge/cyashal.h"
-#include "../../include/linux/westbridge/cyascast.h"
-#include "../../include/linux/westbridge/cyasdevice.h"
-#include "../../include/linux/westbridge/cyaslowlevel.h"
-#include "../../include/linux/westbridge/cyasintr.h"
-#include "../../include/linux/westbridge/cyaserr.h"
-#include "../../include/linux/westbridge/cyasregs.h"
-
-static const uint32_t cy_as_low_level_timeout_count = 65536 * 4;
-
-/* Forward declaration */
-static cy_as_return_status_t cy_as_send_one(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p);
-
-/*
-* This array holds the size of the largest request we will ever recevie from
-* the West Bridge device per context. The size is in 16 bit words. Note a
-* size of 0xffff indicates that there will be no requests on this context
-* from West Bridge.
-*/
-static uint16_t max_request_length[CY_RQT_CONTEXT_COUNT] = {
- 8, /* CY_RQT_GENERAL_RQT_CONTEXT - CY_RQT_INITIALIZATION_COMPLETE */
- 8, /* CY_RQT_RESOURCE_RQT_CONTEXT - none */
- 8, /* CY_RQT_STORAGE_RQT_CONTEXT - CY_RQT_MEDIA_CHANGED */
- 128, /* CY_RQT_USB_RQT_CONTEXT - CY_RQT_USB_EVENT */
- 8 /* CY_RQT_TUR_RQT_CONTEXT - CY_RQT_TURBO_CMD_FROM_HOST */
-};
-
-/*
-* For the given context, this function removes the request node at the head
-* of the queue from the context. This is called after all processing has
-* occurred on the given request and response and we are ready to remove this
-* entry from the queue.
-*/
-static void
-cy_as_ll_remove_request_queue_head(cy_as_device *dev_p, cy_as_context *ctxt_p)
-{
- uint32_t mask, state;
- cy_as_ll_request_list_node *node_p;
-
- (void)dev_p;
- cy_as_hal_assert(ctxt_p->request_queue_p != 0);
-
- mask = cy_as_hal_disable_interrupts();
- node_p = ctxt_p->request_queue_p;
- ctxt_p->request_queue_p = node_p->next;
- cy_as_hal_enable_interrupts(mask);
-
- node_p->callback = 0;
- node_p->rqt = 0;
- node_p->resp = 0;
-
- /*
- * note that the caller allocates and destroys the request and
- * response. generally the destroy happens in the callback for
- * async requests and after the wait returns for sync. the
- * request and response may not actually be destroyed but may be
- * managed in other ways as well. it is the responsibilty of
- * the caller to deal with these in any case. the caller can do
- * this in the request/response callback function.
- */
- state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(node_p);
- cy_as_hal_enable_interrupts(state);
-}
-
-/*
-* For the context given, this function sends the next request to
-* West Bridge via the mailbox register, if the next request is
-* ready to be sent and has not already been sent.
-*/
-static void
-cy_as_ll_send_next_request(cy_as_device *dev_p, cy_as_context *ctxt_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- /*
- * ret == ret is equivalent to while (1) but eliminates compiler
- * warnings for some compilers.
- */
- while (ret == ret) {
- cy_as_ll_request_list_node *node_p = ctxt_p->request_queue_p;
- if (node_p == 0)
- break;
-
- if (cy_as_request_get_node_state(node_p) !=
- CY_AS_REQUEST_LIST_STATE_QUEUED)
- break;
-
- cy_as_request_set_node_state(node_p,
- CY_AS_REQUEST_LIST_STATE_WAITING);
- ret = cy_as_send_one(dev_p, node_p->rqt);
- if (ret == CY_AS_ERROR_SUCCESS)
- break;
-
- /*
- * if an error occurs in sending the request, tell the requester
- * about the error and remove the request from the queue.
- */
- cy_as_request_set_node_state(node_p,
- CY_AS_REQUEST_LIST_STATE_RECEIVED);
- node_p->callback(dev_p, ctxt_p->number,
- node_p->rqt, node_p->resp, ret);
- cy_as_ll_remove_request_queue_head(dev_p, ctxt_p);
-
- /*
- * this falls through to the while loop to send the next request
- * since the previous request did not get sent.
- */
- }
-}
-
-/*
-* This method removes an entry from the request queue of a given context.
-* The entry is removed only if it is not in transit.
-*/
-cy_as_remove_request_result_t
-cy_as_ll_remove_request(cy_as_device *dev_p, cy_as_context *ctxt_p,
- cy_as_ll_request_response *req_p, cy_bool force)
-{
- uint32_t imask;
- cy_as_ll_request_list_node *node_p;
- cy_as_ll_request_list_node *tmp_p;
- uint32_t state;
-
- imask = cy_as_hal_disable_interrupts();
- if (ctxt_p->request_queue_p != 0 &&
- ctxt_p->request_queue_p->rqt == req_p) {
- node_p = ctxt_p->request_queue_p;
- if ((cy_as_request_get_node_state(node_p) ==
- CY_AS_REQUEST_LIST_STATE_WAITING) && (!force)) {
- cy_as_hal_enable_interrupts(imask);
- return cy_as_remove_request_in_transit;
- }
-
- ctxt_p->request_queue_p = node_p->next;
- } else {
- tmp_p = ctxt_p->request_queue_p;
- while (tmp_p != 0 && tmp_p->next != 0 &&
- tmp_p->next->rqt != req_p)
- tmp_p = tmp_p->next;
-
- if (tmp_p == 0 || tmp_p->next == 0) {
- cy_as_hal_enable_interrupts(imask);
- return cy_as_remove_request_not_found;
- }
-
- node_p = tmp_p->next;
- tmp_p->next = node_p->next;
- }
-
- if (node_p->callback)
- node_p->callback(dev_p, ctxt_p->number, node_p->rqt,
- node_p->resp, CY_AS_ERROR_CANCELED);
-
- state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(node_p);
- cy_as_hal_enable_interrupts(state);
-
- cy_as_hal_enable_interrupts(imask);
- return cy_as_remove_request_sucessful;
-}
-
-void
-cy_as_ll_remove_all_requests(cy_as_device *dev_p, cy_as_context *ctxt_p)
-{
- cy_as_ll_request_list_node *node = ctxt_p->request_queue_p;
-
- while (node) {
- if (cy_as_request_get_node_state(ctxt_p->request_queue_p) !=
- CY_AS_REQUEST_LIST_STATE_RECEIVED)
- cy_as_ll_remove_request(dev_p, ctxt_p,
- node->rqt, cy_true);
- node = node->next;
- }
-}
-
-static cy_bool
-cy_as_ll_is_in_queue(cy_as_context *ctxt_p, cy_as_ll_request_response *req_p)
-{
- uint32_t mask;
- cy_as_ll_request_list_node *node_p;
-
- mask = cy_as_hal_disable_interrupts();
- node_p = ctxt_p->request_queue_p;
- while (node_p) {
- if (node_p->rqt == req_p) {
- cy_as_hal_enable_interrupts(mask);
- return cy_true;
- }
- node_p = node_p->next;
- }
- cy_as_hal_enable_interrupts(mask);
- return cy_false;
-}
-
-/*
-* This is the handler for mailbox data when we are trying to send data
-* to the West Bridge firmware. The firmware may be trying to send us
-* data and we need to queue this data to allow the firmware to move
-* forward and be in a state to receive our request. Here we just queue
-* the data and it is processed at a later time by the mailbox interrupt
-* handler.
-*/
-void
-cy_as_ll_queue_mailbox_data(cy_as_device *dev_p)
-{
- cy_as_context *ctxt_p;
- uint8_t context;
- uint16_t data[4];
- int32_t i;
-
- /* Read the data from mailbox 0 to determine what to do with the data */
- for (i = 3; i >= 0; i--)
- data[i] = cy_as_hal_read_register(dev_p->tag,
- cy_cast_int2U_int16(CY_AS_MEM_P0_MAILBOX0 + i));
-
- context = cy_as_mbox_get_context(data[0]);
- if (context >= CY_RQT_CONTEXT_COUNT) {
- cy_as_hal_print_message("mailbox request/response received "
- "with invalid context value (%d)\n", context);
- return;
- }
-
- ctxt_p = dev_p->context[context];
-
- /*
- * if we have queued too much data, drop future data.
- */
- cy_as_hal_assert(ctxt_p->queue_index * sizeof(uint16_t) +
- sizeof(data) <= sizeof(ctxt_p->data_queue));
-
- for (i = 0; i < 4; i++)
- ctxt_p->data_queue[ctxt_p->queue_index++] = data[i];
-
- cy_as_hal_assert((ctxt_p->queue_index % 4) == 0);
- dev_p->ll_queued_data = cy_true;
-}
-
-void
-cy_as_mail_box_process_data(cy_as_device *dev_p, uint16_t *data)
-{
- cy_as_context *ctxt_p;
- uint8_t context;
- uint16_t *len_p;
- cy_as_ll_request_response *rec_p;
- uint8_t st;
- uint16_t src, dest;
-
- context = cy_as_mbox_get_context(data[0]);
- if (context >= CY_RQT_CONTEXT_COUNT) {
- cy_as_hal_print_message("mailbox request/response received "
- "with invalid context value (%d)\n", context);
- return;
- }
-
- ctxt_p = dev_p->context[context];
-
- if (cy_as_mbox_is_request(data[0])) {
- cy_as_hal_assert(ctxt_p->req_p != 0);
- rec_p = ctxt_p->req_p;
- len_p = &ctxt_p->request_length;
-
- } else {
- if (ctxt_p->request_queue_p == 0 ||
- cy_as_request_get_node_state(ctxt_p->request_queue_p)
- != CY_AS_REQUEST_LIST_STATE_WAITING) {
- cy_as_hal_print_message("mailbox response received on "
- "context that was not expecting a response\n");
- cy_as_hal_print_message(" context: %d\n", context);
- cy_as_hal_print_message(" contents: 0x%04x 0x%04x "
- "0x%04x 0x%04x\n",
- data[0], data[1], data[2], data[3]);
- if (ctxt_p->request_queue_p != 0)
- cy_as_hal_print_message(" state: 0x%02x\n",
- ctxt_p->request_queue_p->state);
- return;
- }
-
- /* Make sure the request has an associated response */
- cy_as_hal_assert(ctxt_p->request_queue_p->resp != 0);
-
- rec_p = ctxt_p->request_queue_p->resp;
- len_p = &ctxt_p->request_queue_p->length;
- }
-
- if (rec_p->stored == 0) {
- /*
- * this is the first cycle of the response
- */
- cy_as_ll_request_response__set_code(rec_p,
- cy_as_mbox_get_code(data[0]));
- cy_as_ll_request_response__set_context(rec_p, context);
-
- if (cy_as_mbox_is_last(data[0])) {
- /* This is a single cycle response */
- *len_p = rec_p->length;
- st = 1;
- } else {
- /* Ensure that enough memory has been
- * reserved for the response. */
- cy_as_hal_assert(rec_p->length >= data[1]);
- *len_p = (data[1] < rec_p->length) ?
- data[1] : rec_p->length;
- st = 2;
- }
- } else
- st = 1;
-
- /* Trasnfer the data from the mailboxes to the response */
- while (rec_p->stored < *len_p && st < 4)
- rec_p->data[rec_p->stored++] = data[st++];
-
- if (cy_as_mbox_is_last(data[0])) {
- /* NB: The call-back that is made below can cause the
- * addition of more data in this queue, thus causing
- * a recursive overflow of the queue. this is prevented
- * by removing the request entry that is currently
- * being passed up from the data queue. if this is done,
- * the queue only needs to be as long as two request
- * entries from west bridge.
- */
- if ((ctxt_p->rqt_index > 0) &&
- (ctxt_p->rqt_index <= ctxt_p->queue_index)) {
- dest = 0;
- src = ctxt_p->rqt_index;
-
- while (src < ctxt_p->queue_index)
- ctxt_p->data_queue[dest++] =
- ctxt_p->data_queue[src++];
-
- ctxt_p->rqt_index = 0;
- ctxt_p->queue_index = dest;
- cy_as_hal_assert((ctxt_p->queue_index % 4) == 0);
- }
-
- if (ctxt_p->request_queue_p != 0 && rec_p ==
- ctxt_p->request_queue_p->resp) {
- /*
- * if this is the last cycle of the response, call the
- * callback and reset for the next response.
- */
- cy_as_ll_request_response *resp_p =
- ctxt_p->request_queue_p->resp;
- resp_p->length = ctxt_p->request_queue_p->length;
- cy_as_request_set_node_state(ctxt_p->request_queue_p,
- CY_AS_REQUEST_LIST_STATE_RECEIVED);
-
- cy_as_device_set_in_callback(dev_p);
- ctxt_p->request_queue_p->callback(dev_p, context,
- ctxt_p->request_queue_p->rqt,
- resp_p, CY_AS_ERROR_SUCCESS);
-
- cy_as_device_clear_in_callback(dev_p);
-
- cy_as_ll_remove_request_queue_head(dev_p, ctxt_p);
- cy_as_ll_send_next_request(dev_p, ctxt_p);
- } else {
- /* Send the request to the appropriate
- * module to handle */
- cy_as_ll_request_response *request_p = ctxt_p->req_p;
- ctxt_p->req_p = 0;
- if (ctxt_p->request_callback) {
- cy_as_device_set_in_callback(dev_p);
- ctxt_p->request_callback(dev_p, context,
- request_p, 0, CY_AS_ERROR_SUCCESS);
- cy_as_device_clear_in_callback(dev_p);
- }
- cy_as_ll_init_request(request_p, 0,
- context, request_p->length);
- ctxt_p->req_p = request_p;
- }
- }
-}
-
-/*
-* This is the handler for processing queued mailbox data
-*/
-void
-cy_as_mail_box_queued_data_handler(cy_as_device *dev_p)
-{
- uint16_t i;
-
- /*
- * if more data gets queued in between our entering this call
- * and the end of the iteration on all contexts; we should
- * continue processing the queued data.
- */
- while (dev_p->ll_queued_data) {
- dev_p->ll_queued_data = cy_false;
- for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) {
- uint16_t offset;
- cy_as_context *ctxt_p = dev_p->context[i];
- cy_as_hal_assert((ctxt_p->queue_index % 4) == 0);
-
- offset = 0;
- while (offset < ctxt_p->queue_index) {
- ctxt_p->rqt_index = offset + 4;
- cy_as_mail_box_process_data(dev_p,
- ctxt_p->data_queue + offset);
- offset = ctxt_p->rqt_index;
- }
- ctxt_p->queue_index = 0;
- }
- }
-}
-
-/*
-* This is the handler for the mailbox interrupt. This function reads
-* data from the mailbox registers until a complete request or response
-* is received. When a complete request is received, the callback
-* associated with requests on that context is called. When a complete
-* response is recevied, the callback associated with the request that
-* generated the response is called.
-*/
-void
-cy_as_mail_box_interrupt_handler(cy_as_device *dev_p)
-{
- cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
-
- /*
- * queue the mailbox data to preserve
- * order for later processing.
- */
- cy_as_ll_queue_mailbox_data(dev_p);
-
- /*
- * process what was queued and anything that may be pending
- */
- cy_as_mail_box_queued_data_handler(dev_p);
-}
-
-cy_as_return_status_t
-cy_as_ll_start(cy_as_device *dev_p)
-{
- uint16_t i;
-
- if (cy_as_device_is_low_level_running(dev_p))
- return CY_AS_ERROR_ALREADY_RUNNING;
-
- dev_p->ll_sending_rqt = cy_false;
- dev_p->ll_abort_curr_rqt = cy_false;
-
- for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) {
- dev_p->context[i] = (cy_as_context *)
- cy_as_hal_alloc(sizeof(cy_as_context));
- if (dev_p->context[i] == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- dev_p->context[i]->number = (uint8_t)i;
- dev_p->context[i]->request_callback = 0;
- dev_p->context[i]->request_queue_p = 0;
- dev_p->context[i]->last_node_p = 0;
- dev_p->context[i]->req_p = cy_as_ll_create_request(dev_p,
- 0, (uint8_t)i, max_request_length[i]);
- dev_p->context[i]->queue_index = 0;
-
- if (!cy_as_hal_create_sleep_channel
- (&dev_p->context[i]->channel))
- return CY_AS_ERROR_CREATE_SLEEP_CHANNEL_FAILED;
- }
-
- cy_as_device_set_low_level_running(dev_p);
- return CY_AS_ERROR_SUCCESS;
-}
-
-/*
-* Shutdown the low level communications module. This operation will
-* also cancel any queued low level requests.
-*/
-cy_as_return_status_t
-cy_as_ll_stop(cy_as_device *dev_p)
-{
- uint8_t i;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_context *ctxt_p;
- uint32_t mask;
-
- for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) {
- ctxt_p = dev_p->context[i];
- if (!cy_as_hal_destroy_sleep_channel(&ctxt_p->channel))
- return CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED;
-
- /*
- * now, free any queued requests and assocaited responses
- */
- while (ctxt_p->request_queue_p) {
- uint32_t state;
- cy_as_ll_request_list_node *node_p =
- ctxt_p->request_queue_p;
-
- /* Mark this pair as in a cancel operation */
- cy_as_request_set_node_state(node_p,
- CY_AS_REQUEST_LIST_STATE_CANCELING);
-
- /* Tell the caller that we are canceling this request */
- /* NB: The callback is responsible for destroying the
- * request and the response. we cannot count on the
- * contents of these two after calling the callback.
- */
- node_p->callback(dev_p, i, node_p->rqt,
- node_p->resp, CY_AS_ERROR_CANCELED);
-
- /* Remove the pair from the queue */
- mask = cy_as_hal_disable_interrupts();
- ctxt_p->request_queue_p = node_p->next;
- cy_as_hal_enable_interrupts(mask);
-
- /* Free the list node */
- state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(node_p);
- cy_as_hal_enable_interrupts(state);
- }
-
- cy_as_ll_destroy_request(dev_p, dev_p->context[i]->req_p);
- cy_as_hal_free(dev_p->context[i]);
- dev_p->context[i] = 0;
-
- }
- cy_as_device_set_low_level_stopped(dev_p);
-
- return ret;
-}
-
-void
-cy_as_ll_init_request(cy_as_ll_request_response *req_p,
- uint16_t code, uint16_t context, uint16_t length)
-{
- uint16_t totallen = sizeof(cy_as_ll_request_response) +
- (length - 1) * sizeof(uint16_t);
-
- cy_as_hal_mem_set(req_p, 0, totallen);
- req_p->length = length;
- cy_as_ll_request_response__set_code(req_p, code);
- cy_as_ll_request_response__set_context(req_p, context);
- cy_as_ll_request_response__set_request(req_p);
-}
-
-/*
-* Create a new request.
-*/
-cy_as_ll_request_response *
-cy_as_ll_create_request(cy_as_device *dev_p, uint16_t code,
- uint8_t context, uint16_t length)
-{
- cy_as_ll_request_response *req_p;
- uint32_t state;
- uint16_t totallen = sizeof(cy_as_ll_request_response) +
- (length - 1) * sizeof(uint16_t);
-
- (void)dev_p;
-
- state = cy_as_hal_disable_interrupts();
- req_p = cy_as_hal_c_b_alloc(totallen);
- cy_as_hal_enable_interrupts(state);
- if (req_p)
- cy_as_ll_init_request(req_p, code, context, length);
-
- return req_p;
-}
-
-/*
-* Destroy a request.
-*/
-void
-cy_as_ll_destroy_request(cy_as_device *dev_p, cy_as_ll_request_response *req_p)
-{
- uint32_t state;
- (void)dev_p;
- (void)req_p;
-
- state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(req_p);
- cy_as_hal_enable_interrupts(state);
-
-}
-
-void
-cy_as_ll_init_response(cy_as_ll_request_response *req_p, uint16_t length)
-{
- uint16_t totallen = sizeof(cy_as_ll_request_response) +
- (length - 1) * sizeof(uint16_t);
-
- cy_as_hal_mem_set(req_p, 0, totallen);
- req_p->length = length;
- cy_as_ll_request_response__set_response(req_p);
-}
-
-/*
-* Create a new response
-*/
-cy_as_ll_request_response *
-cy_as_ll_create_response(cy_as_device *dev_p, uint16_t length)
-{
- cy_as_ll_request_response *req_p;
- uint32_t state;
- uint16_t totallen = sizeof(cy_as_ll_request_response) +
- (length - 1) * sizeof(uint16_t);
-
- (void)dev_p;
-
- state = cy_as_hal_disable_interrupts();
- req_p = cy_as_hal_c_b_alloc(totallen);
- cy_as_hal_enable_interrupts(state);
- if (req_p)
- cy_as_ll_init_response(req_p, length);
-
- return req_p;
-}
-
-/*
-* Destroy the new response
-*/
-void
-cy_as_ll_destroy_response(cy_as_device *dev_p, cy_as_ll_request_response *req_p)
-{
- uint32_t state;
- (void)dev_p;
- (void)req_p;
-
- state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(req_p);
- cy_as_hal_enable_interrupts(state);
-}
-
-static uint16_t
-cy_as_read_intr_status(
- cy_as_device *dev_p)
-{
- uint32_t mask;
- cy_bool bloop = cy_true;
- uint16_t v = 0, last = 0xffff;
-
- /*
- * before determining if the mailboxes are ready for more data,
- * we first check the mailbox interrupt to see if we need to
- * receive data. this prevents a dead-lock condition that can
- * occur when both sides are trying to receive data.
- */
- while (last == last) {
- /*
- * disable interrupts to be sure we don't process the mailbox
- * here and have the interrupt routine try to read this data
- * as well.
- */
- mask = cy_as_hal_disable_interrupts();
-
- /*
- * see if there is data to be read.
- */
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_INTR_REG);
- if ((v & CY_AS_MEM_P0_INTR_REG_MBINT) == 0) {
- cy_as_hal_enable_interrupts(mask);
- break;
- }
-
- /*
- * queue the mailbox data for later processing.
- * this allows the firmware to move forward and
- * service the requst from the P port.
- */
- cy_as_ll_queue_mailbox_data(dev_p);
-
- /*
- * enable interrupts again to service mailbox
- * interrupts appropriately
- */
- cy_as_hal_enable_interrupts(mask);
- }
-
- /*
- * now, all data is received
- */
- last = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_MCU_MB_STAT) & CY_AS_MEM_P0_MCU_MBNOTRD;
- while (bloop) {
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_MCU_MB_STAT) & CY_AS_MEM_P0_MCU_MBNOTRD;
- if (v == last)
- break;
-
- last = v;
- }
-
- return v;
-}
-
-/*
-* Send a single request or response using the mail box register.
-* This function does not deal with the internal queues at all,
-* but only sends the request or response across to the firmware
-*/
-static cy_as_return_status_t
-cy_as_send_one(
- cy_as_device *dev_p,
- cy_as_ll_request_response *req_p)
-{
- int i;
- uint16_t mb0, v;
- int32_t loopcount;
- uint32_t int_stat;
-
-#ifdef _DEBUG
- if (cy_as_ll_request_response__is_request(req_p)) {
- switch (cy_as_ll_request_response__get_context(req_p)) {
- case CY_RQT_GENERAL_RQT_CONTEXT:
- cy_as_hal_assert(req_p->length * 2 + 2 <
- CY_CTX_GEN_MAX_DATA_SIZE);
- break;
-
- case CY_RQT_RESOURCE_RQT_CONTEXT:
- cy_as_hal_assert(req_p->length * 2 + 2 <
- CY_CTX_RES_MAX_DATA_SIZE);
- break;
-
- case CY_RQT_STORAGE_RQT_CONTEXT:
- cy_as_hal_assert(req_p->length * 2 + 2 <
- CY_CTX_STR_MAX_DATA_SIZE);
- break;
-
- case CY_RQT_USB_RQT_CONTEXT:
- cy_as_hal_assert(req_p->length * 2 + 2 <
- CY_CTX_USB_MAX_DATA_SIZE);
- break;
- }
- }
-#endif
-
- /* Write the request to the mail box registers */
- if (req_p->length > 3) {
- uint16_t length = req_p->length;
- int which = 0;
- int st = 1;
-
- dev_p->ll_sending_rqt = cy_true;
- while (which < length) {
- loopcount = cy_as_low_level_timeout_count;
- do {
- v = cy_as_read_intr_status(dev_p);
-
- } while (v && loopcount-- > 0);
-
- if (v) {
- cy_as_hal_print_message(
- ">>>>>> LOW LEVEL TIMEOUT "
- "%x %x %x %x\n",
- cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_MCU_MAILBOX0),
- cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_MCU_MAILBOX1),
- cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_MCU_MAILBOX2),
- cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_MCU_MAILBOX3));
- return CY_AS_ERROR_TIMEOUT;
- }
-
- if (dev_p->ll_abort_curr_rqt) {
- dev_p->ll_sending_rqt = cy_false;
- dev_p->ll_abort_curr_rqt = cy_false;
- return CY_AS_ERROR_CANCELED;
- }
-
- int_stat = cy_as_hal_disable_interrupts();
-
- /*
- * check again whether the mailbox is free.
- * it is possible that an ISR came in and
- * wrote into the mailboxes since we last
- * checked the status.
- */
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_MCU_MB_STAT) &
- CY_AS_MEM_P0_MCU_MBNOTRD;
- if (v) {
- /* Go back to the original check since
- * the mailbox is not free. */
- cy_as_hal_enable_interrupts(int_stat);
- continue;
- }
-
- if (which == 0) {
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_MCU_MAILBOX1, length);
- st = 2;
- } else {
- st = 1;
- }
-
- while ((which < length) && (st < 4)) {
- cy_as_hal_write_register(dev_p->tag,
- cy_cast_int2U_int16
- (CY_AS_MEM_MCU_MAILBOX0 + st),
- req_p->data[which++]);
- st++;
- }
-
- mb0 = req_p->box0;
- if (which == length) {
- dev_p->ll_sending_rqt = cy_false;
- mb0 |= CY_AS_REQUEST_RESPONSE_LAST_MASK;
- }
-
- if (dev_p->ll_abort_curr_rqt) {
- dev_p->ll_sending_rqt = cy_false;
- dev_p->ll_abort_curr_rqt = cy_false;
- cy_as_hal_enable_interrupts(int_stat);
- return CY_AS_ERROR_CANCELED;
- }
-
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_MCU_MAILBOX0, mb0);
-
- /* Wait for the MBOX interrupt to be high */
- cy_as_hal_sleep150();
- cy_as_hal_enable_interrupts(int_stat);
- }
- } else {
-check_mailbox_availability:
- /*
- * wait for the mailbox registers to become available. this
- * should be a very quick wait as the firmware is designed
- * to accept requests at interrupt time and queue them for
- * future processing.
- */
- loopcount = cy_as_low_level_timeout_count;
- do {
- v = cy_as_read_intr_status(dev_p);
-
- } while (v && loopcount-- > 0);
-
- if (v) {
- cy_as_hal_print_message(
- ">>>>>> LOW LEVEL TIMEOUT %x %x %x %x\n",
- cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_MCU_MAILBOX0),
- cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_MCU_MAILBOX1),
- cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_MCU_MAILBOX2),
- cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_MCU_MAILBOX3));
- return CY_AS_ERROR_TIMEOUT;
- }
-
- int_stat = cy_as_hal_disable_interrupts();
-
- /*
- * check again whether the mailbox is free. it is
- * possible that an ISR came in and wrote into the
- * mailboxes since we last checked the status.
- */
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MB_STAT) &
- CY_AS_MEM_P0_MCU_MBNOTRD;
- if (v) {
- /* Go back to the original check
- * since the mailbox is not free. */
- cy_as_hal_enable_interrupts(int_stat);
- goto check_mailbox_availability;
- }
-
- /* Write the data associated with the request
- * into the mbox registers 1 - 3 */
- v = 0;
- for (i = req_p->length - 1; i >= 0; i--)
- cy_as_hal_write_register(dev_p->tag,
- cy_cast_int2U_int16(CY_AS_MEM_MCU_MAILBOX1 + i),
- req_p->data[i]);
-
- /* Write the mbox register 0 to trigger the interrupt */
- cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX0,
- req_p->box0 | CY_AS_REQUEST_RESPONSE_LAST_MASK);
-
- cy_as_hal_sleep150();
- cy_as_hal_enable_interrupts(int_stat);
- }
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-/*
-* This function queues a single request to be sent to the firmware.
-*/
-extern cy_as_return_status_t
-cy_as_ll_send_request(
- cy_as_device *dev_p,
- /* The request to send */
- cy_as_ll_request_response *req,
- /* Storage for a reply, must be sure
- * it is of sufficient size */
- cy_as_ll_request_response *resp,
- /* If true, this is a synchronous request */
- cy_bool sync,
- /* Callback to call when reply is received */
- cy_as_response_callback cb
-)
-{
- cy_as_context *ctxt_p;
- uint16_t box0 = req->box0;
- uint8_t context;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_ll_request_list_node *node_p;
- uint32_t mask, state;
-
- cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
-
- context = cy_as_mbox_get_context(box0);
- cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT);
- ctxt_p = dev_p->context[context];
-
- /* Allocate the list node */
- state = cy_as_hal_disable_interrupts();
- node_p = cy_as_hal_c_b_alloc(sizeof(cy_as_ll_request_list_node));
- cy_as_hal_enable_interrupts(state);
-
- if (node_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* Initialize the list node */
- node_p->callback = cb;
- node_p->length = 0;
- node_p->next = 0;
- node_p->resp = resp;
- node_p->rqt = req;
- node_p->state = CY_AS_REQUEST_LIST_STATE_QUEUED;
- if (sync)
- cy_as_request_node_set_sync(node_p);
-
- /* Put the request into the queue */
- mask = cy_as_hal_disable_interrupts();
- if (ctxt_p->request_queue_p == 0) {
- /* Empty queue */
- ctxt_p->request_queue_p = node_p;
- ctxt_p->last_node_p = node_p;
- } else {
- ctxt_p->last_node_p->next = node_p;
- ctxt_p->last_node_p = node_p;
- }
- cy_as_hal_enable_interrupts(mask);
- cy_as_ll_send_next_request(dev_p, ctxt_p);
-
- if (!cy_as_device_is_in_callback(dev_p)) {
- mask = cy_as_hal_disable_interrupts();
- cy_as_mail_box_queued_data_handler(dev_p);
- cy_as_hal_enable_interrupts(mask);
- }
-
- return ret;
-}
-
-static void
-cy_as_ll_send_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret)
-{
- (void)rqt;
- (void)resp;
- (void)ret;
-
-
- cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
-
- /*
- * storage the state to return to the caller
- */
- dev_p->ll_error = ret;
-
- /*
- * now wake the caller
- */
- cy_as_hal_wake(&dev_p->context[context]->channel);
-}
-
-cy_as_return_status_t
-cy_as_ll_send_request_wait_reply(
- cy_as_device *dev_p,
- /* The request to send */
- cy_as_ll_request_response *req,
- /* Storage for a reply, must be
- * sure it is of sufficient size */
- cy_as_ll_request_response *resp
- )
-{
- cy_as_return_status_t ret;
- uint8_t context;
- /* Larger 8 sec time-out to handle the init
- * delay for slower storage devices in USB FS. */
- uint32_t loopcount = 800;
- cy_as_context *ctxt_p;
-
- /* Get the context for the request */
- context = cy_as_ll_request_response__get_context(req);
- cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT);
- ctxt_p = dev_p->context[context];
-
- ret = cy_as_ll_send_request(dev_p, req, resp,
- cy_true, cy_as_ll_send_callback);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- while (loopcount-- > 0) {
- /*
- * sleep while we wait on the response. receiving the reply will
- * wake this thread. we will wait, at most 2 seconds (10 ms*200
- * tries) before we timeout. note if the reply arrives, we will
- * not sleep the entire 10 ms, just til the reply arrives.
- */
- cy_as_hal_sleep_on(&ctxt_p->channel, 10);
-
- /*
- * if the request has left the queue, it means the request has
- * been sent and the reply has been received. this means we can
- * return to the caller and be sure the reply has been received.
- */
- if (!cy_as_ll_is_in_queue(ctxt_p, req))
- return dev_p->ll_error;
- }
-
- /* Remove the QueueListNode for this request. */
- cy_as_ll_remove_request(dev_p, ctxt_p, req, cy_true);
-
- return CY_AS_ERROR_TIMEOUT;
-}
-
-cy_as_return_status_t
-cy_as_ll_register_request_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_response_callback cb)
-{
- cy_as_context *ctxt_p;
- cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT);
- ctxt_p = dev_p->context[context];
-
- ctxt_p->request_callback = cb;
- return CY_AS_ERROR_SUCCESS;
-}
-
-void
-cy_as_ll_request_response__pack(
- cy_as_ll_request_response *req_p,
- uint32_t offset,
- uint32_t length,
- void *data_p)
-{
- uint16_t dt;
- uint8_t *dp = (uint8_t *)data_p;
-
- while (length > 1) {
- dt = ((*dp++) << 8);
- dt |= (*dp++);
- cy_as_ll_request_response__set_word(req_p, offset, dt);
- offset++;
- length -= 2;
- }
-
- if (length == 1) {
- dt = (*dp << 8);
- cy_as_ll_request_response__set_word(req_p, offset, dt);
- }
-}
-
-void
-cy_as_ll_request_response__unpack(
- cy_as_ll_request_response *req_p,
- uint32_t offset,
- uint32_t length,
- void *data_p)
-{
- uint8_t *dp = (uint8_t *)data_p;
-
- while (length-- > 0) {
- uint16_t val = cy_as_ll_request_response__get_word
- (req_p, offset++);
- *dp++ = (uint8_t)((val >> 8) & 0xff);
-
- if (length) {
- length--;
- *dp++ = (uint8_t)(val & 0xff);
- }
- }
-}
-
-extern cy_as_return_status_t
-cy_as_ll_send_status_response(
- cy_as_device *dev_p,
- uint8_t context,
- uint16_t code,
- uint8_t clear_storage)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response resp;
- cy_as_ll_request_response *resp_p = &resp;
-
- cy_as_hal_mem_set(resp_p, 0, sizeof(resp));
- resp_p->length = 1;
- cy_as_ll_request_response__set_response(resp_p);
- cy_as_ll_request_response__set_context(resp_p, context);
-
- if (clear_storage)
- cy_as_ll_request_response__set_clear_storage_flag(resp_p);
-
- cy_as_ll_request_response__set_code(resp_p, CY_RESP_SUCCESS_FAILURE);
- cy_as_ll_request_response__set_word(resp_p, 0, code);
-
- ret = cy_as_send_one(dev_p, resp_p);
-
- return ret;
-}
-
-extern cy_as_return_status_t
-cy_as_ll_send_data_response(
- cy_as_device *dev_p,
- uint8_t context,
- uint16_t code,
- uint16_t length,
- void *data)
-{
- cy_as_ll_request_response *resp_p;
- uint16_t wlen;
- uint8_t respbuf[256];
-
- if (length > 192)
- return CY_AS_ERROR_INVALID_SIZE;
-
- /* Word length for bytes */
- wlen = length / 2;
-
- /* If byte length odd, add one more */
- if (length % 2)
- wlen++;
-
- /* One for the length of field */
- wlen++;
-
- resp_p = (cy_as_ll_request_response *)respbuf;
- cy_as_hal_mem_set(resp_p, 0, sizeof(respbuf));
- resp_p->length = wlen;
- cy_as_ll_request_response__set_context(resp_p, context);
- cy_as_ll_request_response__set_code(resp_p, code);
-
- cy_as_ll_request_response__set_word(resp_p, 0, length);
- cy_as_ll_request_response__pack(resp_p, 1, length, data);
-
- return cy_as_send_one(dev_p, resp_p);
-}
-
-static cy_bool
-cy_as_ll_is_e_p_transfer_related_request(cy_as_ll_request_response *rqt_p,
- cy_as_end_point_number_t ep)
-{
- uint16_t v;
- uint8_t type = cy_as_ll_request_response__get_code(rqt_p);
-
- if (cy_as_ll_request_response__get_context(rqt_p) !=
- CY_RQT_USB_RQT_CONTEXT)
- return cy_false;
-
- /*
- * when cancelling outstanding EP0 data transfers, any pending
- * setup ACK requests also need to be cancelled.
- */
- if ((ep == 0) && (type == CY_RQT_ACK_SETUP_PACKET))
- return cy_true;
-
- if (type != CY_RQT_USB_EP_DATA)
- return cy_false;
-
- v = cy_as_ll_request_response__get_word(rqt_p, 0);
- if ((cy_as_end_point_number_t)((v >> 13) & 1) != ep)
- return cy_false;
-
- return cy_true;
-}
-
-cy_as_return_status_t
-cy_as_ll_remove_ep_data_requests(cy_as_device *dev_p,
- cy_as_end_point_number_t ep)
-{
- cy_as_context *ctxt_p;
- cy_as_ll_request_list_node *node_p;
- uint32_t imask;
-
- /*
- * first, remove any queued requests
- */
- ctxt_p = dev_p->context[CY_RQT_USB_RQT_CONTEXT];
- if (ctxt_p) {
- for (node_p = ctxt_p->request_queue_p; node_p;
- node_p = node_p->next) {
- if (cy_as_ll_is_e_p_transfer_related_request
- (node_p->rqt, ep)) {
- cy_as_ll_remove_request(dev_p, ctxt_p,
- node_p->rqt, cy_false);
- break;
- }
- }
-
- /*
- * now, deal with any request that may be in transit
- */
- imask = cy_as_hal_disable_interrupts();
-
- if (ctxt_p->request_queue_p != 0 &&
- cy_as_ll_is_e_p_transfer_related_request
- (ctxt_p->request_queue_p->rqt, ep) &&
- cy_as_request_get_node_state(ctxt_p->request_queue_p) ==
- CY_AS_REQUEST_LIST_STATE_WAITING) {
- cy_as_hal_print_message("need to remove an in-transit "
- "request to antioch\n");
-
- /*
- * if the request has not been fully sent to west bridge
- * yet, abort sending. otherwise, terminate the request
- * with a CANCELED status. firmware will already have
- * terminated this transfer.
- */
- if (dev_p->ll_sending_rqt)
- dev_p->ll_abort_curr_rqt = cy_true;
- else {
- uint32_t state;
-
- node_p = ctxt_p->request_queue_p;
- if (node_p->callback)
- node_p->callback(dev_p, ctxt_p->number,
- node_p->rqt, node_p->resp,
- CY_AS_ERROR_CANCELED);
-
- ctxt_p->request_queue_p = node_p->next;
- state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(node_p);
- cy_as_hal_enable_interrupts(state);
- }
- }
-
- cy_as_hal_enable_interrupts(imask);
- }
-
- return CY_AS_ERROR_SUCCESS;
-}
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasmisc.c b/drivers/staging/westbridge/astoria/api/src/cyasmisc.c
deleted file mode 100644
index 4564fc11df2..00000000000
--- a/drivers/staging/westbridge/astoria/api/src/cyasmisc.c
+++ /dev/null
@@ -1,3488 +0,0 @@
-/* Cypress West Bridge API source file (cyasmisc.c)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#include "../../include/linux/westbridge/cyashal.h"
-#include "../../include/linux/westbridge/cyasmisc.h"
-#include "../../include/linux/westbridge/cyasdma.h"
-#include "../../include/linux/westbridge/cyasintr.h"
-#include "../../include/linux/westbridge/cyaserr.h"
-#include "../../include/linux/westbridge/cyasregs.h"
-#include "../../include/linux/westbridge/cyaslowlevel.h"
-#include "../../include/linux/westbridge/cyasprotocol.h"
-
-/*
-* The device list, the only global in the API
-*/
-static cy_as_device *g_device_list;
-
-/*
- * The current debug level
- */
-static uint8_t debug_level;
-
-/*
- * This function sets the debug level for the API
- *
- */
-void
-cy_as_misc_set_log_level(uint8_t level)
-{
- debug_level = level;
-}
-
-#ifdef CY_AS_LOG_SUPPORT
-
-/*
- * This function is a low level logger for the API.
- */
-void
-cy_as_log_debug_message(int level, const char *str)
-{
- if (level <= debug_level)
- cy_as_hal_print_message("log %d: %s\n", level, str);
-}
-
-#endif
-
-#define cy_as_check_device_ready(dev_p) \
-{\
- if (!(dev_p) || ((dev_p)->sig != \
- CY_AS_DEVICE_HANDLE_SIGNATURE)) \
- return CY_AS_ERROR_INVALID_HANDLE; \
-\
- if (!cy_as_device_is_configured(dev_p)) \
- return CY_AS_ERROR_NOT_CONFIGURED; \
-\
- if (!cy_as_device_is_firmware_loaded(dev_p))\
- return CY_AS_ERROR_NO_FIRMWARE; \
-}
-
-/* Find an West Bridge device based on a TAG */
-cy_as_device *
-cy_as_device_find_from_tag(cy_as_hal_device_tag tag)
-{
- cy_as_device *dev_p;
-
- for (dev_p = g_device_list; dev_p != 0; dev_p = dev_p->next_p) {
- if (dev_p->tag == tag)
- return dev_p;
- }
-
- return 0;
-}
-
-/* Map a pre-V1.2 media type to the V1.2+ bus number */
-static void
-cy_as_bus_from_media_type(cy_as_media_type type,
- cy_as_bus_number_t *bus)
-{
- if (type == cy_as_media_nand)
- *bus = 0;
- else
- *bus = 1;
-}
-
-static cy_as_return_status_t
-my_handle_response_no_data(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE)
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- else
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
-
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-/*
-* Create a new West Bridge device
-*/
-cy_as_return_status_t
-cy_as_misc_create_device(cy_as_device_handle *handle_p,
- cy_as_hal_device_tag tag)
-{
- cy_as_device *dev_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_log_debug_message(6, "cy_as_misc_create_device called");
-
- dev_p = (cy_as_device *)cy_as_hal_alloc(sizeof(cy_as_device));
- if (dev_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
- cy_as_hal_mem_set(dev_p, 0, sizeof(cy_as_device));
-
- /*
- * dynamically allocating this buffer to ensure that it is
- * word aligned.
- */
- dev_p->usb_ep_data = (uint8_t *)cy_as_hal_alloc(64 * sizeof(uint8_t));
- if (dev_p->usb_ep_data == 0) {
- cy_as_hal_free(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- dev_p->sig = CY_AS_DEVICE_HANDLE_SIGNATURE;
- dev_p->tag = tag;
- dev_p->usb_max_tx_size = 0x40;
-
- dev_p->storage_write_endpoint = CY_AS_P2S_WRITE_ENDPOINT;
- dev_p->storage_read_endpoint = CY_AS_P2S_READ_ENDPOINT;
-
- dev_p->func_cbs_misc = cy_as_create_c_b_queue(CYAS_FUNC_CB);
- if (dev_p->func_cbs_misc == 0)
- goto destroy;
-
- dev_p->func_cbs_res = cy_as_create_c_b_queue(CYAS_FUNC_CB);
- if (dev_p->func_cbs_res == 0)
- goto destroy;
-
- dev_p->func_cbs_stor = cy_as_create_c_b_queue(CYAS_FUNC_CB);
- if (dev_p->func_cbs_stor == 0)
- goto destroy;
-
- dev_p->func_cbs_usb = cy_as_create_c_b_queue(CYAS_FUNC_CB);
- if (dev_p->func_cbs_usb == 0)
- goto destroy;
-
- dev_p->func_cbs_mtp = cy_as_create_c_b_queue(CYAS_FUNC_CB);
- if (dev_p->func_cbs_mtp == 0)
- goto destroy;
-
- /*
- * allocate memory for the DMA module here. it is then marked idle, and
- * will be activated when cy_as_misc_configure_device is called.
- */
- ret = cy_as_dma_start(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- cy_as_device_set_dma_stopped(dev_p);
-
- /*
- * allocate memory for the low level module here. this module is also
- * activated only when cy_as_misc_configure_device is called.
- */
- ret = cy_as_ll_start(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- cy_as_device_set_low_level_stopped(dev_p);
-
- dev_p->next_p = g_device_list;
- g_device_list = dev_p;
-
- *handle_p = dev_p;
- cy_as_hal_init_dev_registers(tag, cy_false);
- return CY_AS_ERROR_SUCCESS;
-
-destroy:
- /* Free any queues that were successfully allocated. */
- if (dev_p->func_cbs_misc)
- cy_as_destroy_c_b_queue(dev_p->func_cbs_misc);
-
- if (dev_p->func_cbs_res)
- cy_as_destroy_c_b_queue(dev_p->func_cbs_res);
-
- if (dev_p->func_cbs_stor)
- cy_as_destroy_c_b_queue(dev_p->func_cbs_stor);
-
- if (dev_p->func_cbs_usb)
- cy_as_destroy_c_b_queue(dev_p->func_cbs_usb);
-
- if (dev_p->func_cbs_mtp)
- cy_as_destroy_c_b_queue(dev_p->func_cbs_mtp);
-
- cy_as_hal_free(dev_p->usb_ep_data);
- cy_as_hal_free(dev_p);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- else
- return CY_AS_ERROR_OUT_OF_MEMORY;
-}
-
-/*
-* Destroy an existing West Bridge device
-*/
-cy_as_return_status_t
-cy_as_misc_destroy_device(cy_as_device_handle handle)
-{
- cy_as_return_status_t ret;
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_misc_destroy_device called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- /*
- * if the USB stack is still running,
- * it must be stopped first
- */
- if (dev_p->usb_count > 0)
- return CY_AS_ERROR_STILL_RUNNING;
-
- /*
- * if the STORAGE stack is still running,
- * it must be stopped first
- */
- if (dev_p->storage_count > 0)
- return CY_AS_ERROR_STILL_RUNNING;
-
- if (cy_as_device_is_intr_running(dev_p))
- ret = cy_as_intr_stop(dev_p);
-
- ret = cy_as_ll_stop(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_intr_start(dev_p, dev_p->use_int_drq);
- return ret;
- }
-
- ret = cy_as_dma_stop(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_intr_start(dev_p, dev_p->use_int_drq);
- return ret;
- }
-
- /* Reset the West Bridge device. */
- cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_RST_CTRL_REG,
- CY_AS_MEM_RST_CTRL_REG_HARD);
-
- /*
- * remove the device from the device list
- */
- if (g_device_list == dev_p) {
- g_device_list = dev_p->next_p;
- } else {
- cy_as_device *tmp_p = g_device_list;
- while (tmp_p && tmp_p->next_p != dev_p)
- tmp_p = tmp_p->next_p;
-
- cy_as_hal_assert(tmp_p != 0);
- tmp_p->next_p = dev_p->next_p;
- }
-
- /*
- * reset the signature so this will not be detected
- * as a valid handle
- */
- dev_p->sig = 0;
-
- cy_as_destroy_c_b_queue(dev_p->func_cbs_misc);
- cy_as_destroy_c_b_queue(dev_p->func_cbs_res);
- cy_as_destroy_c_b_queue(dev_p->func_cbs_stor);
- cy_as_destroy_c_b_queue(dev_p->func_cbs_usb);
- cy_as_destroy_c_b_queue(dev_p->func_cbs_mtp);
-
- /*
- * free the memory associated with the device
- */
- cy_as_hal_free(dev_p->usb_ep_data);
- cy_as_hal_free(dev_p);
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-/*
-* Determine the endian mode for the processor we are
-* running on, then set the endian mode register
-*/
-static void
-cy_as_setup_endian_mode(cy_as_device *dev_p)
-{
- /*
- * In general, we always set west bridge intothe little
- * endian mode. this causes the data on bit 0 internally
- * to come out on data line 0 externally and it is generally
- * what we want regardless of the endian mode of the
- * processor. this capability in west bridge should be
- * labeled as a "SWAP" capability and can be used to swap the
- * bytes of data in and out of west bridge. this is
- * useful if there is DMA hardware that requires this for some
- * reason I cannot imagine at this time. basically if the
- * wires are connected correctly, we should never need to
- * change the endian-ness of west bridge.
- */
- cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_ENDIAN,
- CY_AS_LITTLE_ENDIAN);
-}
-
-/*
-* Query the West Bridge device and determine if we are an standby mode
-*/
-cy_as_return_status_t
-cy_as_misc_in_standby(cy_as_device_handle handle, cy_bool *standby)
-{
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_misc_in_standby called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (cy_as_device_is_pin_standby(dev_p) ||
- cy_as_device_is_register_standby(dev_p)) {
- *standby = cy_true;
- } else
- *standby = cy_false;
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-static void
-cy_as_misc_func_callback(cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret);
-
-
-static void
-my_misc_callback(cy_as_device *dev_p, uint8_t context,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *resp_p,
- cy_as_return_status_t ret)
-{
- (void)resp_p;
- (void)context;
- (void)ret;
-
- switch (cy_as_ll_request_response__get_code(req_p)) {
- case CY_RQT_INITIALIZATION_COMPLETE:
- {
- uint16_t v;
-
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_GENERAL_RQT_CONTEXT,
- CY_AS_ERROR_SUCCESS, 0);
- cy_as_device_set_firmware_loaded(dev_p);
-
- if (cy_as_device_is_waking(dev_p)) {
- /*
- * this is a callback from a
- * cy_as_misc_leave_standby()
- * request. in this case we call
- * the standby callback and clear
- * the waking state.
- */
- if (dev_p->misc_event_cb)
- dev_p->misc_event_cb(
- (cy_as_device_handle)dev_p,
- cy_as_event_misc_awake, 0);
- cy_as_device_clear_waking(dev_p);
- } else {
- v = cy_as_ll_request_response__get_word
- (req_p, 3);
-
- /*
- * store the media supported on
- * each of the device buses.
- */
- dev_p->media_supported[0] =
- (uint8_t)(v & 0xFF);
- dev_p->media_supported[1] =
- (uint8_t)((v >> 8) & 0xFF);
-
- v = cy_as_ll_request_response__get_word
- (req_p, 4);
-
- dev_p->is_mtp_firmware =
- (cy_bool)((v >> 8) & 0xFF);
-
- if (dev_p->misc_event_cb)
- dev_p->misc_event_cb(
- (cy_as_device_handle)dev_p,
- cy_as_event_misc_initialized, 0);
- }
-
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_P0_VM_SET);
-
- if (v & CY_AS_MEM_P0_VM_SET_CFGMODE)
- cy_as_hal_print_message(
- "initialization message "
- "received, but config bit "
- "still set\n");
-
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_RST_CTRL_REG);
- if ((v & CY_AS_MEM_RST_RSTCMPT) == 0)
- cy_as_hal_print_message(
- "initialization message "
- "received, but reset complete "
- "bit still not set\n");
- }
- break;
-
- case CY_RQT_OUT_OF_SUSPEND:
- cy_as_ll_send_status_response(dev_p, CY_RQT_GENERAL_RQT_CONTEXT,
- CY_AS_ERROR_SUCCESS, 0);
- cy_as_device_clear_suspend_mode(dev_p);
-
- /*
- * if the wakeup was caused by an async cy_as_misc_leave_suspend
- * call, we have to call the corresponding callback.
- */
- if (dev_p->func_cbs_misc->count > 0) {
- cy_as_func_c_b_node *node = (cy_as_func_c_b_node *)
- dev_p->func_cbs_misc->head_p;
- cy_as_hal_assert(node);
-
- if (cy_as_funct_c_b_type_get_type(node->data_type) ==
- CY_FUNCT_CB_MISC_LEAVESUSPEND) {
- cy_as_hal_assert(node->cb_p != 0);
-
- node->cb_p((cy_as_device_handle)dev_p,
- CY_AS_ERROR_SUCCESS, node->client_data,
- CY_FUNCT_CB_MISC_LEAVESUSPEND, 0);
- cy_as_remove_c_b_node(dev_p->func_cbs_misc);
- }
- }
-
- if (dev_p->misc_event_cb)
- dev_p->misc_event_cb((cy_as_device_handle)dev_p,
- cy_as_event_misc_wakeup, 0);
- break;
-
- case CY_RQT_DEBUG_MESSAGE:
- if ((req_p->data[0] == 0) && (req_p->data[1] == 0) &&
- (req_p->data[2] == 0)) {
- if (dev_p->misc_event_cb)
- dev_p->misc_event_cb((cy_as_device_handle)dev_p,
- cy_as_event_misc_heart_beat, 0);
- } else {
- cy_as_hal_print_message(
- "**** debug message: %02x "
- "%02x %02x %02x %02x %02x\n",
- req_p->data[0] & 0xff,
- (req_p->data[0] >> 8) & 0xff,
- req_p->data[1] & 0xff,
- (req_p->data[1] >> 8) & 0xff,
- req_p->data[2] & 0xff,
- (req_p->data[2] >> 8) & 0xff);
- }
- break;
-
- case CY_RQT_WB_DEVICE_MISMATCH:
- {
- if (dev_p->misc_event_cb)
- dev_p->misc_event_cb((cy_as_device_handle)dev_p,
- cy_as_event_misc_device_mismatch, 0);
- }
- break;
-
- case CY_RQT_BOOTLOAD_NO_FIRMWARE:
- {
- /* TODO Handle case when firmware is
- * not found during bootloading. */
- cy_as_hal_print_message("no firmware image found "
- "during bootload. device not started\n");
- }
- break;
-
- default:
- cy_as_hal_assert(0);
- }
-}
-
-static cy_bool
-is_valid_silicon_id(uint16_t v)
-{
- cy_bool idok = cy_false;
-
- /*
- * remove the revision number from the ID value
- */
- v = v & CY_AS_MEM_CM_WB_CFG_ID_HDID_MASK;
-
- /*
- * if this is west bridge, then we are OK.
- */
- if (v == CY_AS_MEM_CM_WB_CFG_ID_HDID_ANTIOCH_VALUE ||
- v == CY_AS_MEM_CM_WB_CFG_ID_HDID_ASTORIA_FPGA_VALUE ||
- v == CY_AS_MEM_CM_WB_CFG_ID_HDID_ASTORIA_VALUE)
- idok = cy_true;
-
- return idok;
-}
-
-/*
-* Configure the West Bridge device hardware
-*/
-cy_as_return_status_t
-cy_as_misc_configure_device(cy_as_device_handle handle,
- cy_as_device_config *config_p)
-{
- cy_as_return_status_t ret;
- cy_bool standby;
- cy_as_device *dev_p;
- uint16_t v;
- uint16_t fw_present;
- cy_as_log_debug_message(6, "cy_as_misc_configure_device called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- /* Setup big endian vs little endian */
- cy_as_setup_endian_mode(dev_p);
-
- /* Now, confirm that we can talk to the West Bridge device */
- dev_p->silicon_id = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_CM_WB_CFG_ID);
- fw_present = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_RST_CTRL_REG);
- if (!(fw_present & CY_AS_MEM_RST_RSTCMPT)) {
- if (!is_valid_silicon_id(dev_p->silicon_id))
- return CY_AS_ERROR_NO_ANTIOCH;
- }
- /* Check for standby mode */
- ret = cy_as_misc_in_standby(handle, &standby);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- if (ret)
- return CY_AS_ERROR_IN_STANDBY;
-
- /* Setup P-port interface mode (CRAM / SRAM). */
- if (cy_as_device_is_astoria_dev(dev_p)) {
- if (config_p->srammode)
- v = CY_AS_MEM_P0_VM_SET_VMTYPE_SRAM;
- else
- v = CY_AS_MEM_P0_VM_SET_VMTYPE_RAM;
- } else
- v = CY_AS_MEM_P0_VM_SET_VMTYPE_RAM;
-
- /* Setup synchronous versus asynchronous mode */
- if (config_p->sync)
- v |= CY_AS_MEM_P0_VM_SET_IFMODE;
- if (config_p->dackmode == cy_as_device_dack_ack)
- v |= CY_AS_MEM_P0_VM_SET_DACKEOB;
- if (config_p->drqpol)
- v |= CY_AS_MEM_P0_VM_SET_DRQPOL;
- if (config_p->dackpol)
- v |= CY_AS_MEM_P0_VM_SET_DACKPOL;
- cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_VM_SET, v);
-
- if (config_p->crystal)
- cy_as_device_set_crystal(dev_p);
- else
- cy_as_device_set_external_clock(dev_p);
-
- /* Register a callback to handle MISC requests from the firmware */
- cy_as_ll_register_request_callback(dev_p,
- CY_RQT_GENERAL_RQT_CONTEXT, my_misc_callback);
-
- /* Now mark the DMA and low level modules as active. */
- cy_as_device_set_dma_running(dev_p);
- cy_as_device_set_low_level_running(dev_p);
-
- /* Now, initialize the interrupt module */
- dev_p->use_int_drq = config_p->dmaintr;
- ret = cy_as_intr_start(dev_p, config_p->dmaintr);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /* Mark the interface as initialized */
- cy_as_device_set_configured(dev_p);
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-static void
-my_dma_callback(cy_as_device *dev_p,
- cy_as_end_point_number_t ep,
- void *mem_p,
- uint32_t size,
- cy_as_return_status_t ret
- )
-{
- cy_as_dma_end_point *ep_p;
-
- (void)size;
-
- /* Get the endpoint pointer based on the endpoint number */
- ep_p = CY_AS_NUM_EP(dev_p, ep);
-
- /* Check the queue to see if is drained */
- if (ep_p->queue_p == 0) {
- cy_as_func_c_b_node *node =
- (cy_as_func_c_b_node *)dev_p->func_cbs_misc->head_p;
-
- cy_as_hal_assert(node);
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- /*
- * disable endpoint 2. the storage module
- * will enable this EP if necessary.
- */
- cy_as_dma_enable_end_point(dev_p,
- CY_AS_FIRMWARE_ENDPOINT,
- cy_false, cy_as_direction_in);
-
- /*
- * clear the reset register. this releases the
- * antioch micro-controller from reset and begins
- * running the code at address zero.
- */
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_RST_CTRL_REG, 0x00);
- }
-
- /* Call the user Callback */
- node->cb_p((cy_as_device_handle)dev_p, ret, node->client_data,
- node->data_type, node->data);
- cy_as_remove_c_b_node(dev_p->func_cbs_misc);
- } else {
- /* This is the header data that was allocated in the
- * download firmware function, and can be safely freed
- * here. */
- uint32_t state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(mem_p);
- cy_as_hal_enable_interrupts(state);
- }
-}
-
-cy_as_return_status_t
-cy_as_misc_download_firmware(cy_as_device_handle handle,
- const void *mem_p,
- uint16_t size,
- cy_as_function_callback cb,
- uint32_t client)
-{
- uint8_t *header;
- cy_as_return_status_t ret;
- cy_bool standby;
- cy_as_device *dev_p;
- cy_as_dma_callback dmacb = 0;
- uint32_t state;
-
- cy_as_log_debug_message(6, "cy_as_misc_download_firmware called");
-
- /* Make sure we have a valid device */
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- /*
- * if the device has not been initialized, we cannot download firmware
- * to the device.
- */
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- /*
- * make sure west bridge is not in standby
- */
- ret = cy_as_misc_in_standby(dev_p, &standby);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (standby)
- return CY_AS_ERROR_IN_STANDBY;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- /*
- * make sure we are in configuration mode
- */
- if ((cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_VM_SET) &
- CY_AS_MEM_P0_VM_SET_CFGMODE) == 0)
- return CY_AS_ERROR_NOT_IN_CONFIG_MODE;
-
- /* Maximum firmware size is 24k */
- if (size > CY_AS_MAXIMUM_FIRMWARE_SIZE)
- return CY_AS_ERROR_INVALID_SIZE;
-
- /* Make sure the size is an even number of bytes as well */
- if (size & 0x01)
- return CY_AS_ERROR_ALIGNMENT_ERROR;
-
- /*
- * write the two word header that gives the base address and
- * size of the firmware image to download
- */
- state = cy_as_hal_disable_interrupts();
- header = (uint8_t *)cy_as_hal_c_b_alloc(4);
- cy_as_hal_enable_interrupts(state);
- if (header == NULL)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- header[0] = 0x00;
- header[1] = 0x00;
- header[2] = (uint8_t)(size & 0xff);
- header[3] = (uint8_t)((size >> 8) & 0xff);
-
- /* Enable the firmware endpoint */
- ret = cy_as_dma_enable_end_point(dev_p, CY_AS_FIRMWARE_ENDPOINT,
- cy_true, cy_as_direction_in);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /*
- * setup DMA for 64 byte packets. this is the requirement for downloading
- * firmware to west bridge.
- */
- cy_as_dma_set_max_dma_size(dev_p, CY_AS_FIRMWARE_ENDPOINT, 64);
-
- if (cb)
- dmacb = my_dma_callback;
-
- ret = cy_as_dma_queue_request(dev_p, CY_AS_FIRMWARE_ENDPOINT, header,
- 4, cy_false, cy_false, dmacb);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /*
- * write the firmware image to the west bridge device
- */
- ret = cy_as_dma_queue_request(dev_p, CY_AS_FIRMWARE_ENDPOINT,
- (void *)mem_p, size, cy_false, cy_false, dmacb);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cb) {
- cy_as_func_c_b_node *cbnode = cy_as_create_func_c_b_node_data(
- cb, client, CY_FUNCT_CB_MISC_DOWNLOADFIRMWARE, 0);
-
- if (cbnode == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
- else
- cy_as_insert_c_b_node(dev_p->func_cbs_misc, cbnode);
-
- ret = cy_as_dma_kick_start(dev_p, CY_AS_FIRMWARE_ENDPOINT);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- } else {
- ret = cy_as_dma_drain_queue(dev_p,
- CY_AS_FIRMWARE_ENDPOINT, cy_true);
-
- /* Free the header memory that was allocated earlier. */
- cy_as_hal_c_b_free(header);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /*
- * disable EP 2. the storage module will
- * enable this EP if necessary.
- */
- cy_as_dma_enable_end_point(dev_p, CY_AS_FIRMWARE_ENDPOINT,
- cy_false, cy_as_direction_in);
-
- /*
- * clear the reset register. this releases the west bridge
- * micro-controller from reset and begins running the code at
- * address zero.
- */
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_RST_CTRL_REG, 0x00);
- }
-
- /*
- * the firmware is not marked as loaded until the firmware
- * initializes west bridge and a request is sent from west bridge
- * to the P port processor indicating that west bridge is ready.
- */
- return CY_AS_ERROR_SUCCESS;
-}
-
-
-static cy_as_return_status_t
-my_handle_response_get_firmware_version(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_get_firmware_version_data *data_p)
-{
-
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint16_t val;
-
- if (cy_as_ll_request_response__get_code(reply_p)
- != CY_RESP_FIRMWARE_VERSION) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- data_p->major = cy_as_ll_request_response__get_word(reply_p, 0);
- data_p->minor = cy_as_ll_request_response__get_word(reply_p, 1);
- data_p->build = cy_as_ll_request_response__get_word(reply_p, 2);
- val = cy_as_ll_request_response__get_word(reply_p, 3);
- data_p->media_type = (uint8_t)(((val >> 8) & 0xFF) | (val & 0xFF));
- val = cy_as_ll_request_response__get_word(reply_p, 4);
- data_p->is_debug_mode = (cy_bool)(val & 0xFF);
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_misc_get_firmware_version(cy_as_device_handle handle,
- cy_as_get_firmware_version_data *data,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_bool standby;
- cy_as_ll_request_response *req_p, *reply_p;
-
- cy_as_device *dev_p;
-
- (void)client;
-
- cy_as_log_debug_message(6, "cy_as_misc_get_firmware_version called");
-
- /* Make sure we have a valid device */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- /*
- * make sure antioch is not in standby
- */
- ret = cy_as_misc_in_standby(dev_p, &standby);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- if (standby)
- return CY_AS_ERROR_IN_STANDBY;
-
- /* Make sure the Antioch is not in suspend mode. */
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_GET_FIRMWARE_VERSION,
- CY_RQT_GENERAL_RQT_CONTEXT, 0);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /*
- * Reserve space for the reply, the reply data
- * will not exceed three words
- */
- reply_p = cy_as_ll_create_response(dev_p, 5);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* Request and response are freed in
- * MyHandleResponseGetFirmwareVersion. */
- ret = my_handle_response_get_firmware_version(dev_p,
- req_p, reply_p, data);
- return ret;
- } else {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_GETFIRMWAREVERSION, data,
- dev_p->func_cbs_misc, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed
- * as part of the MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_get_firmware_version);
-
-static cy_as_return_status_t
-my_handle_response_read_m_c_u_register(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- uint8_t *data_p)
-{
-
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (cy_as_ll_request_response__get_code(reply_p)
- != CY_RESP_MCU_REGISTER_DATA) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- *data_p = (uint8_t)
- (cy_as_ll_request_response__get_word(reply_p, 0));
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-static cy_as_return_status_t
-my_handle_response_get_gpio_value(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- uint8_t *data_p)
-{
-
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (cy_as_ll_request_response__get_code(reply_p)
- != CY_RESP_GPIO_STATE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- } else
- *data_p = (uint8_t)
- (cy_as_ll_request_response__get_word(reply_p, 0));
-
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-
-cy_as_return_status_t cy_as_misc_set_sd_power_polarity(
- cy_as_device_handle handle,
- cy_as_misc_signal_polarity polarity,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_SDPOLARITY,
- CY_RQT_GENERAL_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- (uint16_t)polarity);
-
- /*
- * Reserve space for the reply, the reply data will
- * not exceed one word
- */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return (my_handle_response_no_data(dev_p, req_p, reply_p));
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_SETSDPOLARITY, 0, dev_p->func_cbs_misc,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed
- * as part of the FuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
- return ret;
-}
-
-
-cy_as_return_status_t
-cy_as_misc_read_m_c_u_register(cy_as_device_handle handle,
- uint16_t address,
- uint8_t *value,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_ll_request_response *req_p, *reply_p;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_misc_read_m_c_u_register called");
-
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- /* Check whether the firmware supports this command. */
- if (cy_as_device_is_nand_storage_supported(dev_p))
- return CY_AS_ERROR_NOT_SUPPORTED;
-
- /* Make sure the Antioch is not in suspend mode. */
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_READ_MCU_REGISTER,
- CY_RQT_GENERAL_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0, (uint16_t)address);
-
- /* Reserve space for the reply, the reply
- * data will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_MCU_REGISTER_DATA) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- *value = (uint8_t)(cy_as_ll_request_response__get_word
- (reply_p, 0));
- } else {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_READMCUREGISTER, value,
- dev_p->func_cbs_misc, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed
- * as part of the MiscFuncCallback */
- return ret;
- }
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_read_m_c_u_register);
-
-cy_as_return_status_t
-cy_as_misc_write_m_c_u_register(cy_as_device_handle handle,
- uint16_t address,
- uint8_t mask,
- uint8_t value,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_misc_write_m_c_u_register called");
-
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- /* Check whether the firmware supports this command. */
- if (cy_as_device_is_nand_storage_supported(dev_p))
- return CY_AS_ERROR_NOT_SUPPORTED;
-
- /* Make sure the Antioch is not in suspend mode. */
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_WRITE_MCU_REGISTER,
- CY_RQT_GENERAL_RQT_CONTEXT, 2);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0, (uint16_t)address);
- cy_as_ll_request_response__set_word(req_p, 1,
- (uint16_t)((mask << 8) | value));
-
- /*
- * Reserve space for the reply, the reply data
- * will not exceed one word
- */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_WRITEMCUREGISTER, 0,
- dev_p->func_cbs_misc, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /*
- * The request and response are freed as part of the
- * MiscFuncCallback
- */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-my_handle_response_reset(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_reset_type type)
-{
- uint16_t v;
-
- (void)req_p;
- (void)reply_p;
-
- /*
- * if the device is in suspend mode, it needs to be woken up
- * so that the write to the reset control register succeeds.
- * we need not however wait for the wake up procedure to be
- * complete.
- */
- if (cy_as_device_is_in_suspend_mode(dev_p)) {
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_CM_WB_CFG_ID);
- cy_as_hal_sleep(1);
- }
-
- if (type == cy_as_reset_hard) {
- cy_as_misc_cancel_ex_requests(dev_p);
- cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_RST_CTRL_REG,
- CY_AS_MEM_RST_CTRL_REG_HARD);
- cy_as_device_set_unconfigured(dev_p);
- cy_as_device_set_firmware_not_loaded(dev_p);
- cy_as_device_set_dma_stopped(dev_p);
- cy_as_device_set_low_level_stopped(dev_p);
- cy_as_device_set_intr_stopped(dev_p);
- cy_as_device_clear_suspend_mode(dev_p);
- cy_as_usb_cleanup(dev_p);
- cy_as_storage_cleanup(dev_p);
-
- /*
- * wait for a small amount of time to
- * allow reset to be complete.
- */
- cy_as_hal_sleep(100);
- }
-
- cy_as_device_clear_reset_pending(dev_p);
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-cy_as_return_status_t
-cy_as_misc_reset(cy_as_device_handle handle,
- cy_as_reset_type type,
- cy_bool flush,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p;
- cy_as_end_point_number_t i;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- (void)client;
- (void)cb;
-
- cy_as_log_debug_message(6, "cy_as_misc_reset_e_x called");
-
- /* Make sure the device is ready for the command. */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- /*
- * soft reset is not supported until we close on the issues
- * in the firmware with what needs to happen.
- */
- if (type == cy_as_reset_soft)
- return CY_AS_ERROR_NOT_YET_SUPPORTED;
-
- cy_as_device_set_reset_pending(dev_p);
-
- if (flush) {
- /* Unable to DrainQueues in polling mode */
- if ((dev_p->storage_cb || dev_p->storage_cb_ms) &&
- cy_as_hal_is_polling())
- return CY_AS_ERROR_ASYNC_PENDING;
-
- /*
- * shutdown the endpoints so no more traffic can be queued
- */
- for (i = 0; i < 15; i++)
- cy_as_dma_enable_end_point(dev_p, i, cy_false,
- cy_as_direction_dont_change);
-
- /*
- * if we are in normal mode, drain all traffic across all
- * endpoints to be sure all traffic is flushed. if the
- * device is suspended, data will not be coming in on any
- * endpoint and all outstanding DMA operations can be
- * cancelled.
- */
- if (cy_as_device_is_in_suspend_mode(dev_p)) {
- for (i = 0; i < 15; i++)
- cy_as_dma_cancel(dev_p, i,
- CY_AS_ERROR_CANCELED);
- } else {
- for (i = 0; i < 15; i++) {
- if ((i == CY_AS_P2S_WRITE_ENDPOINT) ||
- (i == CY_AS_P2S_READ_ENDPOINT))
- cy_as_dma_drain_queue(dev_p, i,
- cy_false);
- else
- cy_as_dma_drain_queue(dev_p, i,
- cy_true);
- }
- }
- } else {
- /* No flush was requested, so cancel any outstanding DMAs
- * so the user callbacks are called as needed
- */
- if (cy_as_device_is_storage_async_pending(dev_p)) {
- for (i = 0; i < 15; i++)
- cy_as_dma_cancel(dev_p, i,
- CY_AS_ERROR_CANCELED);
- }
- }
-
- ret = my_handle_response_reset(dev_p, 0, 0, type);
-
- if (cb)
- /* Even though no mailbox communication was needed,
- * issue the callback so the user does not need to
- * special case their code. */
- cb((cy_as_device_handle)dev_p, ret, client,
- CY_FUNCT_CB_MISC_RESET, 0);
-
- /*
- * initialize any registers that may have been
- * changed when the device was reset.
- */
- cy_as_hal_init_dev_registers(dev_p->tag, cy_false);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_reset);
-
-static cy_as_return_status_t
-get_unallocated_resource(cy_as_device *dev_p, cy_as_resource_type resource)
-{
- uint8_t shift = 0;
- uint16_t v;
- cy_as_return_status_t ret = CY_AS_ERROR_NOT_ACQUIRED;
-
- switch (resource) {
- case cy_as_bus_u_s_b:
- shift = 4;
- break;
- case cy_as_bus_1:
- shift = 0;
- break;
- case cy_as_bus_0:
- shift = 2;
- break;
- default:
- cy_as_hal_assert(cy_false);
- break;
- }
-
- /* Get the semaphore value for this resource */
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_RSE_ALLOCATE);
- v = (v >> shift) & 0x03;
-
- if (v == 0x03) {
- ret = CY_AS_ERROR_RESOURCE_ALREADY_OWNED;
- } else if ((v & 0x01) == 0) {
- /* The resource is not owned by anyone, we can try to get it */
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_P0_RSE_MASK, (0x03 << shift));
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_RSE_MASK);
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_P0_RSE_ALLOCATE, (0x01 << shift));
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_RSE_MASK);
-
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_P0_RSE_ALLOCATE);
- v = (v >> shift) & 0x03;
- if (v == 0x03)
- ret = CY_AS_ERROR_SUCCESS;
- }
-
- return ret;
-}
-
-static cy_as_return_status_t
-my_handle_response_acquire_resource(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_resource_type *resource)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- ret = get_unallocated_resource(dev_p, *resource);
- if (ret != CY_AS_ERROR_NOT_ACQUIRED)
- ret = CY_AS_ERROR_SUCCESS;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_misc_acquire_resource(cy_as_device_handle handle,
- cy_as_resource_type *resource,
- cy_bool force,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret;
-
- cy_as_device *dev_p;
-
- (void)client;
-
- cy_as_log_debug_message(6, "cy_as_misc_acquire_resource called");
-
- if (*resource != cy_as_bus_u_s_b && *resource !=
- cy_as_bus_0 && *resource != cy_as_bus_1)
- return CY_AS_ERROR_INVALID_RESOURCE;
-
-
- /* Make sure the device is ready to accept the command. */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
-
- ret = get_unallocated_resource(dev_p, *resource);
-
- /*
- * make sure that the callback is called if the resource is
- * successfully acquired at this point.
- */
- if ((ret == CY_AS_ERROR_SUCCESS) && (cb != 0))
- cb(handle, ret, client,
- CY_FUNCT_CB_MISC_ACQUIRERESOURCE, resource);
-
- if (ret != CY_AS_ERROR_NOT_ACQUIRED)
- return ret;
-
- if (!force)
- return CY_AS_ERROR_NOT_ACQUIRED;
-
- /* Create the request to acquire the resource */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_ACQUIRE_RESOURCE,
- CY_RQT_RESOURCE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0, (uint16_t)(*resource));
-
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_ACQUIRERESOURCE, resource,
- dev_p->func_cbs_res, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed
- * as part of the MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- ret = get_unallocated_resource(dev_p, *resource);
- if (ret != CY_AS_ERROR_NOT_ACQUIRED)
- ret = CY_AS_ERROR_SUCCESS;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_acquire_resource);
-
-cy_as_return_status_t
-cy_as_misc_release_resource(cy_as_device_handle handle,
- cy_as_resource_type resource)
-{
- uint8_t shift = 0;
- uint16_t v;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_misc_release_resource called");
-
- /* Make sure the device is ready for the command. */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- if (resource != cy_as_bus_u_s_b && resource !=
- cy_as_bus_0 && resource != cy_as_bus_1)
- return CY_AS_ERROR_INVALID_RESOURCE;
-
- switch (resource) {
- case cy_as_bus_u_s_b:
- shift = 4;
- break;
- case cy_as_bus_1:
- shift = 0;
- break;
- case cy_as_bus_0:
- shift = 2;
- break;
- default:
- cy_as_hal_assert(cy_false);
- break;
- }
-
- /* Get the semaphore value for this resource */
- v = (cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_P0_RSE_ALLOCATE) >> shift) & 0x03;
- if (v == 0 || v == 1 || v == 2)
- return CY_AS_ERROR_RESOURCE_NOT_OWNED;
-
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_P0_RSE_MASK, (0x03 << shift));
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_P0_RSE_ALLOCATE, (0x02 << shift));
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_P0_RSE_MASK, 0);
-
- return CY_AS_ERROR_SUCCESS;
-}
-EXPORT_SYMBOL(cy_as_misc_release_resource);
-
-cy_as_return_status_t
-cy_as_misc_set_trace_level(cy_as_device_handle handle,
- uint8_t level,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint32_t unit,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret;
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_misc_set_trace_level called");
-
- /* Make sure the device is ready for the command. */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- if (bus < 0 || bus >= CY_AS_MAX_BUSES)
- return CY_AS_ERROR_NO_SUCH_BUS;
-
- if (device >= CY_AS_MAX_STORAGE_DEVICES)
- return CY_AS_ERROR_NO_SUCH_DEVICE;
-
- if (unit > 255)
- return CY_AS_ERROR_NO_SUCH_UNIT;
-
- if (level >= CYAS_FW_TRACE_MAX_LEVEL)
- return CY_AS_ERROR_INVALID_TRACE_LEVEL;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_SET_TRACE_LEVEL,
- CY_RQT_GENERAL_RQT_CONTEXT, 2);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- (uint16_t)level);
- cy_as_ll_request_response__set_word(req_p, 1,
- (uint16_t)((bus << 12) | (device << 8) | (unit)));
-
- /*
- * Reserve space for the reply, the reply data will not
- * exceed three words
- */
- reply_p = cy_as_ll_create_response(dev_p, 2);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_NOT_SUPPORTED;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- } else {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_SETTRACELEVEL, 0, dev_p->func_cbs_misc,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_misc_heart_beat_control(cy_as_device_handle handle,
- cy_bool enable,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret;
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_misc_heart_beat_control called");
-
- /* Make sure the device is ready for the command. */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_CONTROL_ANTIOCH_HEARTBEAT,
- CY_RQT_GENERAL_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0, (uint16_t)enable);
-
- /* Reserve space for the reply, the reply
- * data will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- } else {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_HEARTBEATCONTROL, 0,
- dev_p->func_cbs_misc, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_heart_beat_control);
-
-static cy_as_return_status_t
-my_set_sd_clock_freq(
- cy_as_device *dev_p,
- uint8_t card_type,
- uint8_t setting,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_ll_request_response *req_p, *reply_p;
-
- if (cy_as_device_is_in_callback(dev_p) && (cb == 0))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_SET_SD_CLOCK_FREQ,
- CY_RQT_GENERAL_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- (uint16_t)((card_type << 8) | setting));
-
- /* Reserve space for the reply, which will not exceed one word. */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_SETSDFREQ, 0, dev_p->func_cbs_misc,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_misc_set_low_speed_sd_freq(
- cy_as_device_handle handle,
- cy_as_low_speed_sd_freq setting,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_misc_set_low_speed_sd_freq called");
-
- /* Make sure the device is ready for the command. */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- if ((setting != CY_AS_SD_DEFAULT_FREQ) &&
- (setting != CY_AS_SD_RATED_FREQ))
- return CY_AS_ERROR_INVALID_PARAMETER;
-
- return my_set_sd_clock_freq(dev_p, 0, (uint8_t)setting, cb, client);
-}
-EXPORT_SYMBOL(cy_as_misc_set_low_speed_sd_freq);
-
-cy_as_return_status_t
-cy_as_misc_set_high_speed_sd_freq(
- cy_as_device_handle handle,
- cy_as_high_speed_sd_freq setting,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_misc_set_high_speed_sd_freq called");
-
- /* Make sure the device is ready for the command. */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- if ((setting != CY_AS_HS_SD_FREQ_24) &&
- (setting != CY_AS_HS_SD_FREQ_48))
- return CY_AS_ERROR_INVALID_PARAMETER;
-
- return my_set_sd_clock_freq(dev_p, 1, (uint8_t)setting, cb, client);
-}
-EXPORT_SYMBOL(cy_as_misc_set_high_speed_sd_freq);
-
-cy_as_return_status_t
-cy_as_misc_get_gpio_value(cy_as_device_handle handle,
- cy_as_misc_gpio pin,
- uint8_t *value,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_device *dev_p;
- uint16_t v;
-
- cy_as_log_debug_message(6, "cy_as_misc_get_gpio_value called");
-
- /* Make sure the device is ready for the command. */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- /* If the pin specified is UVALID, there is no need
- * for firmware to be loaded. */
- if (pin == cy_as_misc_gpio_U_valid) {
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_PMU_UPDATE);
- *value = (uint8_t)(v & CY_AS_MEM_PMU_UPDATE_UVALID);
-
- if (cb != 0)
- cb(dev_p, ret, client,
- CY_FUNCT_CB_MISC_GETGPIOVALUE, value);
-
- return ret;
- }
-
- /* Check whether the firmware supports this command. */
- if (cy_as_device_is_nand_storage_supported(dev_p))
- return CY_AS_ERROR_NOT_SUPPORTED;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- /* Make sure the pin selected is valid */
- if ((pin != cy_as_misc_gpio_1) && (pin != cy_as_misc_gpio_0))
- return CY_AS_ERROR_INVALID_PARAMETER;
-
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_GET_GPIO_STATE,
- CY_RQT_GENERAL_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0, ((uint8_t)pin << 8));
-
- /* Reserve space for the reply, which will not exceed one word. */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_GPIO_STATE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- *value = (uint8_t)
- cy_as_ll_request_response__get_word(reply_p, 0);
- } else {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_GETGPIOVALUE, value,
- dev_p->func_cbs_misc, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_get_gpio_value);
-
-cy_as_return_status_t
-cy_as_misc_set_gpio_value(cy_as_device_handle handle,
- cy_as_misc_gpio pin,
- uint8_t value,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_device *dev_p;
- uint16_t v;
-
- cy_as_log_debug_message(6, "cy_as_misc_set_gpio_value called");
-
- /* Make sure the device is ready for the command. */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- /* If the pin specified is UVALID, there is
- * no need for firmware to be loaded. */
- if (pin == cy_as_misc_gpio_U_valid) {
- v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_PMU_UPDATE);
- if (value)
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_PMU_UPDATE,
- (v | CY_AS_MEM_PMU_UPDATE_UVALID));
- else
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_PMU_UPDATE,
- (v & ~CY_AS_MEM_PMU_UPDATE_UVALID));
-
- if (cb != 0)
- cb(dev_p, ret, client,
- CY_FUNCT_CB_MISC_SETGPIOVALUE, 0);
- return ret;
- }
-
- /* Check whether the firmware supports this command. */
- if (cy_as_device_is_nand_storage_supported(dev_p))
- return CY_AS_ERROR_NOT_SUPPORTED;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- /* Make sure the pin selected is valid */
- if ((pin < cy_as_misc_gpio_0) || (pin > cy_as_misc_gpio_U_valid))
- return CY_AS_ERROR_INVALID_PARAMETER;
-
- /* Create and initialize the low level request to the firmware. */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_SET_GPIO_STATE,
- CY_RQT_GENERAL_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- v = (uint16_t)(((uint8_t)pin << 8) | (value > 0));
- cy_as_ll_request_response__set_word(req_p, 0, v);
-
- /* Reserve space for the reply, which will not exceed one word. */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- } else {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_SETGPIOVALUE, 0,
- dev_p->func_cbs_misc, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_set_gpio_value);
-
-static cy_as_return_status_t
-my_enter_standby(cy_as_device *dev_p, cy_bool pin)
-{
- cy_as_misc_cancel_ex_requests(dev_p);
-
- /* Save the current values in the critical P-port
- * registers, where necessary. */
- cy_as_hal_read_regs_before_standby(dev_p->tag);
-
- if (pin) {
- if (cy_as_hal_set_wakeup_pin(dev_p->tag, cy_false))
- cy_as_device_set_pin_standby(dev_p);
- else
- return CY_AS_ERROR_SETTING_WAKEUP_PIN;
- } else {
- /*
- * put antioch in the standby mode
- */
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_PWR_MAGT_STAT, 0x02);
- cy_as_device_set_register_standby(dev_p);
- }
-
- /*
- * when the antioch comes out of standby, we have to wait until
- * the firmware initialization completes before sending other
- * requests down.
- */
- cy_as_device_set_firmware_not_loaded(dev_p);
-
- /*
- * keep west bridge interrupt disabled until the device is being woken
- * up from standby.
- */
- dev_p->stby_int_mask = cy_as_hal_disable_interrupts();
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-static cy_as_return_status_t
-my_handle_response_enter_standby(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_bool pin)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- ret = my_enter_standby(dev_p, pin);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_misc_enter_standby(cy_as_device_handle handle,
- cy_bool pin,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_ll_request_response *req_p, *reply_p;
- cy_bool standby;
-
- cy_as_log_debug_message(6, "cy_as_misc_enter_standby called");
-
- /* Make sure we have a valid device */
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- /*
- * if we already are in standby, do not do it again and let the
- * user know via the error return.
- */
- ret = cy_as_misc_in_standby(handle, &standby);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (standby == cy_true)
- return CY_AS_ERROR_ALREADY_STANDBY;
-
- /*
- * if the user wants to transition from suspend mode to standby mode,
- * the device needs to be woken up so that it can complete all pending
- * operations.
- */
- if (cy_as_device_is_in_suspend_mode(dev_p))
- cy_as_misc_leave_suspend(dev_p, 0, 0);
-
- if (dev_p->usb_count) {
- /*
- * we do not allow west bridge to go into standby mode when the
- * USB stack is initialized. you must stop the USB stack in
- * order to enter standby mode.
- */
- return CY_AS_ERROR_USB_RUNNING;
- }
-
- /*
- * if the storage stack is not running, the device can directly be
- * put into sleep mode. otherwise, the firmware needs to be signaled
- * to prepare for going into sleep mode.
- */
- if (dev_p->storage_count) {
- /*
- * if there are async storage operations pending,
- * make one attempt to complete them.
- */
- if (cy_as_device_is_storage_async_pending(dev_p)) {
- /* DrainQueue will not work in polling mode */
- if (cy_as_hal_is_polling())
- return CY_AS_ERROR_ASYNC_PENDING;
-
- cy_as_dma_drain_queue(dev_p,
- CY_AS_P2S_READ_ENDPOINT, cy_false);
- cy_as_dma_drain_queue(dev_p,
- CY_AS_P2S_WRITE_ENDPOINT, cy_false);
-
- /*
- * if more storage operations were queued
- * at this stage, return an error.
- */
- if (cy_as_device_is_storage_async_pending(dev_p))
- return CY_AS_ERROR_ASYNC_PENDING;
- }
-
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_PREPARE_FOR_STANDBY,
- CY_RQT_GENERAL_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (!cb) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed
- * in the HandleResponse */
- return my_handle_response_enter_standby(dev_p,
- req_p, reply_p, pin);
-
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_ENTERSTANDBY, (void *)pin,
- dev_p->func_cbs_misc, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed
- * as part of the MiscFuncCallback */
- return ret;
- }
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
- } else {
- ret = my_enter_standby(dev_p, pin);
- if (cb)
- /* Even though no mailbox communication was
- * needed, issue the callback so the user
- * does not need to special case their code. */
- cb((cy_as_device_handle)dev_p, ret, client,
- CY_FUNCT_CB_MISC_ENTERSTANDBY, 0);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_enter_standby);
-
-cy_as_return_status_t
-cy_as_misc_enter_standby_e_x_u(cy_as_device_handle handle,
- cy_bool pin,
- cy_bool uvalid_special,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p;
-
- dev_p = (cy_as_device *)handle;
- if (uvalid_special)
- cy_as_hal_write_register(dev_p->tag, 0xc5, 0x4);
-
- return cy_as_misc_enter_standby(handle, pin, cb, client);
-}
-
-cy_as_return_status_t
-cy_as_misc_leave_standby(cy_as_device_handle handle,
- cy_as_resource_type resource)
-{
- cy_as_device *dev_p;
- uint16_t v;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint32_t count = 8;
- uint8_t retry = 1;
-
- cy_as_log_debug_message(6, "cy_as_misc_leave_standby called");
- (void)resource;
-
- /* Make sure we have a valid device */
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (cy_as_device_is_register_standby(dev_p)) {
- /*
- * set a flag to indicate that the west bridge is waking
- * up from standby.
- */
- cy_as_device_set_waking(dev_p);
-
- /*
- * the initial read will not succeed, but will just wake
- * the west bridge device from standby. successive reads
- * should succeed and in that way we know west bridge is awake.
- */
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_CM_WB_CFG_ID);
-
- do {
- /*
- * we have initiated the operation to leave standby, now
- * we need to wait at least N ms before trying to access
- * the west bridge device to insure the PLLs have locked
- * and we can talk to the device.
- */
- if (cy_as_device_is_crystal(dev_p))
- cy_as_hal_sleep(
- CY_AS_LEAVE_STANDBY_DELAY_CRYSTAL);
- else
- cy_as_hal_sleep(
- CY_AS_LEAVE_STANDBY_DELAY_CLOCK);
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_CM_WB_CFG_ID);
-
- /*
- * if the P-SPI interface mode is in use, there may be a
- * need to re-synchronise the serial clock used for
- * astoria access.
- */
- if (!is_valid_silicon_id(v)) {
- if (cy_as_hal_sync_device_clocks(dev_p->tag) !=
- cy_true) {
- cy_as_hal_enable_interrupts(
- dev_p->stby_int_mask);
- return CY_AS_ERROR_TIMEOUT;
- }
- }
- } while (!is_valid_silicon_id(v) && count-- > 0);
-
- /*
- * if we tried to read the register and could not,
- * return a timeout
- */
- if (count == 0) {
- cy_as_hal_enable_interrupts(
- dev_p->stby_int_mask);
- return CY_AS_ERROR_TIMEOUT;
- }
-
- /*
- * the standby flag is cleared here, after the action to
- * exit standby has been taken. the wait for firmware
- * initialization, is ensured by marking the firmware as
- * not loaded until the init event is received.
- */
- cy_as_device_clear_register_standby(dev_p);
-
- /*
- * initialize any registers that may have been changed
- * while the device was in standby mode.
- */
- cy_as_hal_init_dev_registers(dev_p->tag, cy_true);
- } else if (cy_as_device_is_pin_standby(dev_p)) {
- /*
- * set a flag to indicate that the west bridge is waking
- * up from standby.
- */
- cy_as_device_set_waking(dev_p);
-
-try_wakeup_again:
- /*
- * try to set the wakeup pin, if this fails in the HAL
- * layer, return this failure to the user.
- */
- if (!cy_as_hal_set_wakeup_pin(dev_p->tag, cy_true)) {
- cy_as_hal_enable_interrupts(dev_p->stby_int_mask);
- return CY_AS_ERROR_SETTING_WAKEUP_PIN;
- }
-
- /*
- * we have initiated the operation to leave standby, now
- * we need to wait at least N ms before trying to access
- * the west bridge device to insure the PL_ls have locked
- * and we can talk to the device.
- */
- if (cy_as_device_is_crystal(dev_p))
- cy_as_hal_sleep(CY_AS_LEAVE_STANDBY_DELAY_CRYSTAL);
- else
- cy_as_hal_sleep(CY_AS_LEAVE_STANDBY_DELAY_CLOCK);
-
- /*
- * initialize any registers that may have been changed
- * while the device was in standby mode.
- */
- cy_as_hal_init_dev_registers(dev_p->tag, cy_true);
-
- /*
- * the standby flag is cleared here, after the action to
- * exit standby has been taken. the wait for firmware
- * initialization, is ensured by marking the firmware as
- * not loaded until the init event is received.
- */
- cy_as_device_clear_pin_standby(dev_p);
- } else {
- return CY_AS_ERROR_NOT_IN_STANDBY;
- }
-
- /*
- * the west bridge interrupt can be enabled now.
- */
- cy_as_hal_enable_interrupts(dev_p->stby_int_mask);
-
- /*
- * release the west bridge micro-_controller from reset,
- * so that firmware initialization can complete. the attempt
- * to release antioch reset is made up to 8 times.
- */
- v = 0x03;
- count = 0x08;
- while ((v & 0x03) && (count)) {
- cy_as_hal_write_register(dev_p->tag,
- CY_AS_MEM_RST_CTRL_REG, 0x00);
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_RST_CTRL_REG);
- count--;
- }
-
- if (v & 0x03) {
- cy_as_hal_print_message("failed to clear antioch reset\n");
- return CY_AS_ERROR_TIMEOUT;
- }
-
- /*
- * if the wake-up pin is being used, wait here to make
- * sure that the wake-up event is received within a
- * reasonable delay. otherwise, toggle the wake-up pin
- * again in an attempt to start the firmware properly.
- */
- if (retry) {
- count = 10;
- while (count) {
- /* If the wake-up event has been received,
- * we can return. */
- if (cy_as_device_is_firmware_loaded(dev_p))
- break;
- /* If we are in polling mode, the interrupt may
- * not have been serviced as yet. read the
- * interrupt status register. if a pending mailbox
- * interrupt is seen, we can assume that the
- * wake-up event will be received soon. */
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_P0_INTR_REG);
- if (v & CY_AS_MEM_P0_INTR_REG_MBINT)
- break;
-
- cy_as_hal_sleep(10);
- count--;
- }
-
- if (!count) {
- retry = 0;
- dev_p->stby_int_mask = cy_as_hal_disable_interrupts();
- cy_as_hal_set_wakeup_pin(dev_p->tag, cy_false);
- cy_as_hal_sleep(10);
- goto try_wakeup_again;
- }
- }
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_leave_standby);
-
-cy_as_return_status_t
-cy_as_misc_register_callback(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The function to call */
- cy_as_misc_event_callback callback
- )
-{
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_misc_register_callback called");
-
- /* Make sure we have a valid device */
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- dev_p->misc_event_cb = callback;
- return CY_AS_ERROR_SUCCESS;
-}
-
-cy_as_return_status_t
-cy_as_misc_storage_changed(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_bool standby;
- cy_as_ll_request_response *req_p, *reply_p;
-
- cy_as_log_debug_message(6, "cy_as_misc_storage_changed called");
-
- /* Make sure the device is ready for the command. */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- /*
- * make sure antioch is not in standby
- */
- ret = cy_as_misc_in_standby(dev_p, &standby);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (standby)
- return CY_AS_ERROR_IN_STANDBY;
-
- /*
- * make sure westbridge is not in suspend mode.
- */
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_STORAGE_MEDIA_CHANGED,
- CY_RQT_GENERAL_RQT_CONTEXT, 0);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* Reserve space for the reply, the reply data will
- * not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- } else {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_STORAGECHANGED, 0,
- dev_p->func_cbs_misc, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_storage_changed);
-
-cy_as_return_status_t
-cy_as_misc_enter_suspend(
- cy_as_device_handle handle,
- cy_bool usb_wakeup_en,
- cy_bool gpio_wakeup_en,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_bool standby;
- cy_as_ll_request_response *req_p, *reply_p;
- uint16_t value;
- uint32_t int_state;
-
- cy_as_log_debug_message(6, "cy_as_misc_enter_suspend called");
-
- /*
- * basic sanity checks to ensure that the device is initialised.
- */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- /*
- * make sure west bridge is not already in standby
- */
- cy_as_misc_in_standby(dev_p, &standby);
- if (standby)
- return CY_AS_ERROR_IN_STANDBY;
-
- /*
- * make sure that the device is not already in suspend mode.
- */
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- /*
- * make sure there is no active USB connection.
- */
- if ((cy_as_device_is_usb_connected(dev_p)) && (dev_p->usb_last_event
- != cy_as_event_usb_suspend))
- return CY_AS_ERROR_USB_CONNECTED;
-
- /*
- * make sure that there are no async requests at this point in time.
- */
- int_state = cy_as_hal_disable_interrupts();
- if ((dev_p->func_cbs_misc->count) || (dev_p->func_cbs_res->count) ||
- (dev_p->func_cbs_stor->count) || (dev_p->func_cbs_usb->count)) {
- cy_as_hal_enable_interrupts(int_state);
- return CY_AS_ERROR_ASYNC_PENDING;
- }
- cy_as_hal_enable_interrupts(int_state);
-
- /* Create the request to send to the Antioch device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_ENTER_SUSPEND_MODE,
- CY_RQT_GENERAL_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* Reserve space for the reply, the reply data will not
- * exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- /* Wakeup control flags. */
- value = 0x0001;
- if (usb_wakeup_en)
- value |= 0x04;
- if (gpio_wakeup_en)
- value |= 0x02;
- cy_as_ll_request_response__set_word(req_p, 0, value);
-
- if (cb != 0) {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_ENTERSUSPEND,
- 0, dev_p->func_cbs_misc, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p,
- cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return CY_AS_ERROR_SUCCESS;
- } else {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE)
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- else
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- }
-
-destroy:
- if (ret == CY_AS_ERROR_SUCCESS)
- cy_as_device_set_suspend_mode(dev_p);
-
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_enter_suspend);
-
-cy_as_return_status_t
-cy_as_misc_leave_suspend(
- cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p;
- uint16_t v, count;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_log_debug_message(6, "cy_as_misc_leave_suspend called");
-
- /* Make sure we have a valid device */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- /* Make sure we are in suspend mode. */
- if (cy_as_device_is_in_suspend_mode(dev_p)) {
- if (cb) {
- cy_as_func_c_b_node *cbnode =
- cy_as_create_func_c_b_node_data(cb, client,
- CY_FUNCT_CB_MISC_LEAVESUSPEND, 0);
- if (cbnode == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_insert_c_b_node(dev_p->func_cbs_misc, cbnode);
- }
-
- /*
- * do a read from the ID register so that the CE assertion
- * will wake west bridge. the read is repeated until the
- * read comes back with valid data.
- */
- count = 8;
-
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_CM_WB_CFG_ID);
-
- while (!is_valid_silicon_id(v) && count-- > 0) {
- cy_as_hal_sleep(CY_AS_LEAVE_STANDBY_DELAY_CLOCK);
- v = cy_as_hal_read_register(dev_p->tag,
- CY_AS_MEM_CM_WB_CFG_ID);
- }
-
- /*
- * if we tried to read the register and could not,
- * return a timeout
- */
- if (count == 0)
- return CY_AS_ERROR_TIMEOUT;
- } else
- return CY_AS_ERROR_NOT_IN_SUSPEND;
-
- if (cb == 0) {
- /*
- * wait until the in suspend mode flag is cleared.
- */
- count = 20;
- while ((cy_as_device_is_in_suspend_mode(dev_p))
- && (count--)) {
- cy_as_hal_sleep(CY_AS_LEAVE_STANDBY_DELAY_CLOCK);
- }
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- ret = CY_AS_ERROR_TIMEOUT;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_misc_leave_suspend);
-
-cy_as_return_status_t
-cy_as_misc_reserve_l_n_a_boot_area(cy_as_device_handle handle,
- uint8_t numzones,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_bool standby;
- cy_as_ll_request_response *req_p, *reply_p;
-
- cy_as_device *dev_p;
-
- (void)client;
-
- cy_as_log_debug_message(6, "cy_as_misc_switch_pnand_mode called");
-
- /* Make sure we have a valid device */
- dev_p = (cy_as_device *)handle;
- cy_as_check_device_ready(dev_p);
-
- /*
- * make sure antioch is not in standby
- */
- ret = cy_as_misc_in_standby(dev_p, &standby);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- if (standby)
- return CY_AS_ERROR_IN_STANDBY;
-
- /* Make sure the Antioch is not in suspend mode. */
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_RESERVE_LNA_BOOT_AREA,
- CY_RQT_GENERAL_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
- cy_as_ll_request_response__set_word(req_p,
- 0, (uint16_t)numzones);
-
- /* Reserve space for the reply, the reply data will not
- * exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- } else {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MISC_RESERVELNABOOTAREA,
- 0, dev_p->func_cbs_misc, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_misc_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_func_c_b_node*
-cy_as_create_func_c_b_node_data(cy_as_function_callback cb,
- uint32_t client,
- cy_as_funct_c_b_type type,
- void *data)
-{
- uint32_t state = cy_as_hal_disable_interrupts();
- cy_as_func_c_b_node *node = cy_as_hal_c_b_alloc(
- sizeof(cy_as_func_c_b_node));
- cy_as_hal_enable_interrupts(state);
- if (node != 0) {
- node->node_type = CYAS_FUNC_CB;
- node->cb_p = cb;
- node->client_data = client;
- node->data_type = type;
- if (data != 0)
- node->data_type |= CY_FUNCT_CB_DATA;
- else
- node->data_type |= CY_FUNCT_CB_NODATA;
- node->data = data;
- node->next_p = 0;
- }
- return node;
-}
-
-cy_as_func_c_b_node*
-cy_as_create_func_c_b_node(cy_as_function_callback cb,
- uint32_t client)
-{
- return cy_as_create_func_c_b_node_data(cb, client,
- CY_FUNCT_CB_NODATA, 0);
-}
-
-void
-cy_as_destroy_func_c_b_node(cy_as_func_c_b_node *node)
-{
- uint32_t state;
-
- node->node_type = CYAS_INVALID;
- state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(node);
- cy_as_hal_enable_interrupts(state);
-}
-
-cy_as_usb_func_c_b_node*
-cy_as_create_usb_func_c_b_node(
- cy_as_usb_function_callback cb, uint32_t client)
-{
- uint32_t state = cy_as_hal_disable_interrupts();
- cy_as_usb_func_c_b_node *node = cy_as_hal_c_b_alloc(
- sizeof(cy_as_usb_func_c_b_node));
- cy_as_hal_enable_interrupts(state);
- if (node != 0) {
- node->type = CYAS_USB_FUNC_CB;
- node->cb_p = cb;
- node->client_data = client;
- node->next_p = 0;
- }
- return node;
-}
-
-void
-cy_as_destroy_usb_func_c_b_node(cy_as_usb_func_c_b_node *node)
-{
- uint32_t state;
-
- node->type = CYAS_INVALID;
- state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(node);
- cy_as_hal_enable_interrupts(state);
-}
-
-cy_as_usb_io_c_b_node*
-cy_as_create_usb_io_c_b_node(cy_as_usb_io_callback cb)
-{
- uint32_t state = cy_as_hal_disable_interrupts();
- cy_as_usb_io_c_b_node *node = cy_as_hal_c_b_alloc(
- sizeof(cy_as_usb_io_c_b_node));
- cy_as_hal_enable_interrupts(state);
- if (node != 0) {
- node->type = CYAS_USB_IO_CB;
- node->cb_p = cb;
- node->next_p = 0;
- }
- return node;
-}
-
-void
-cy_as_destroy_usb_io_c_b_node(cy_as_usb_io_c_b_node *node)
-{
- uint32_t state;
-
- node->type = CYAS_INVALID;
-
- state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(node);
- cy_as_hal_enable_interrupts(state);
-}
-
-cy_as_storage_io_c_b_node*
-cy_as_create_storage_io_c_b_node(cy_as_storage_callback cb,
- cy_as_media_type media, uint32_t device_index,
- uint32_t unit, uint32_t block_addr, cy_as_oper_type oper,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p)
-{
- uint32_t state = cy_as_hal_disable_interrupts();
- cy_as_storage_io_c_b_node *node = cy_as_hal_c_b_alloc(
- sizeof(cy_as_storage_io_c_b_node));
- cy_as_hal_enable_interrupts(state);
- if (node != 0) {
- node->type = CYAS_STORAGE_IO_CB;
- node->cb_p = cb;
- node->media = media;
- node->device_index = device_index;
- node->unit = unit;
- node->block_addr = block_addr;
- node->oper = oper;
- node->req_p = req_p;
- node->reply_p = reply_p;
- node->next_p = 0;
- }
- return node;
-}
-
-void
-cy_as_destroy_storage_io_c_b_node(cy_as_storage_io_c_b_node *node)
-{
- uint32_t state;
- node->type = CYAS_INVALID;
- state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(node);
- cy_as_hal_enable_interrupts(state);
-}
-
-cy_as_c_b_queue *
-cy_as_create_c_b_queue(cy_as_c_b_node_type type)
-{
- uint32_t state = cy_as_hal_disable_interrupts();
- cy_as_c_b_queue *queue = cy_as_hal_c_b_alloc(
- sizeof(cy_as_c_b_queue));
- cy_as_hal_enable_interrupts(state);
- if (queue) {
- queue->type = type;
- queue->head_p = 0;
- queue->tail_p = 0;
- queue->count = 0;
- }
-
- return queue;
-}
-
-void
-cy_as_destroy_c_b_queue(cy_as_c_b_queue *queue)
-{
- uint32_t state;
- queue->type = CYAS_INVALID;
- queue->head_p = 0;
- queue->tail_p = 0;
- queue->count = 0;
- state = cy_as_hal_disable_interrupts();
- cy_as_hal_c_b_free(queue);
- cy_as_hal_enable_interrupts(state);
-}
-
-/* Inserts a CyAsCBNode into the queue, the
- * node type must match the queue type*/
-void
-cy_as_insert_c_b_node(cy_as_c_b_queue *queue_p, void*cbnode)
-{
- uint32_t int_state;
-
- int_state = cy_as_hal_disable_interrupts();
-
- cy_as_hal_assert(queue_p != 0);
-
- switch (queue_p->type) {
- case CYAS_USB_FUNC_CB:
- {
- cy_as_usb_func_c_b_node *node =
- (cy_as_usb_func_c_b_node *)cbnode;
- cy_as_usb_func_c_b_node *tail =
- (cy_as_usb_func_c_b_node *)queue_p->tail_p;
-
- cy_as_hal_assert(node->type == CYAS_USB_FUNC_CB);
- cy_as_hal_assert(tail == 0 ||
- tail->type == CYAS_USB_FUNC_CB);
- if (queue_p->head_p == 0)
- queue_p->head_p = node;
- else
- tail->next_p = node;
-
- queue_p->tail_p = node;
- }
- break;
-
- case CYAS_USB_IO_CB:
- {
- cy_as_usb_io_c_b_node *node =
- (cy_as_usb_io_c_b_node *)cbnode;
- cy_as_usb_io_c_b_node *tail =
- (cy_as_usb_io_c_b_node *)queue_p->tail_p;
-
- cy_as_hal_assert(node->type == CYAS_USB_IO_CB);
- cy_as_hal_assert(tail == 0 ||
- tail->type == CYAS_USB_IO_CB);
- if (queue_p->head_p == 0)
- queue_p->head_p = node;
- else
- tail->next_p = node;
-
- queue_p->tail_p = node;
- }
- break;
-
- case CYAS_STORAGE_IO_CB:
- {
- cy_as_storage_io_c_b_node *node =
- (cy_as_storage_io_c_b_node *)cbnode;
- cy_as_storage_io_c_b_node *tail =
- (cy_as_storage_io_c_b_node *)queue_p->tail_p;
-
- cy_as_hal_assert(node->type == CYAS_STORAGE_IO_CB);
- cy_as_hal_assert(tail == 0 ||
- tail->type == CYAS_STORAGE_IO_CB);
- if (queue_p->head_p == 0)
- queue_p->head_p = node;
- else
- tail->next_p = node;
-
- queue_p->tail_p = node;
- }
- break;
-
- case CYAS_FUNC_CB:
- {
- cy_as_func_c_b_node *node =
- (cy_as_func_c_b_node *)cbnode;
- cy_as_func_c_b_node *tail =
- (cy_as_func_c_b_node *)queue_p->tail_p;
-
- cy_as_hal_assert(node->node_type == CYAS_FUNC_CB);
- cy_as_hal_assert(tail == 0 ||
- tail->node_type == CYAS_FUNC_CB);
- if (queue_p->head_p == 0)
- queue_p->head_p = node;
- else
- tail->next_p = node;
-
- queue_p->tail_p = node;
- }
- break;
-
- default:
- cy_as_hal_assert(cy_false);
- break;
- }
-
- queue_p->count++;
-
- cy_as_hal_enable_interrupts(int_state);
-}
-
-/* Removes the tail node from the queue and frees it */
-void
-cy_as_remove_c_b_tail_node(cy_as_c_b_queue *queue_p)
-{
- uint32_t int_state;
-
- int_state = cy_as_hal_disable_interrupts();
-
- if (queue_p->count > 0) {
- /*
- * the worst case length of the queue should be
- * under 10 elements, and the average case should
- * be just 1 element. so, we just employ a linear
- * search to find the node to be freed.
- */
- switch (queue_p->type) {
- case CYAS_FUNC_CB:
- {
- cy_as_func_c_b_node *node =
- (cy_as_func_c_b_node *)
- queue_p->head_p;
- cy_as_func_c_b_node *tail =
- (cy_as_func_c_b_node *)
- queue_p->tail_p;
- if (node != tail) {
- while (node->next_p != tail)
- node = node->next_p;
- node->next_p = 0;
- queue_p->tail_p = node;
- }
- cy_as_destroy_func_c_b_node(tail);
- }
- break;
-
- case CYAS_USB_FUNC_CB:
- {
- cy_as_usb_func_c_b_node *node =
- (cy_as_usb_func_c_b_node *)
- queue_p->head_p;
- cy_as_usb_func_c_b_node *tail =
- (cy_as_usb_func_c_b_node *)
- queue_p->tail_p;
- if (node != tail) {
- while (node->next_p != tail)
- node = node->next_p;
- node->next_p = 0;
- queue_p->tail_p = node;
- }
-
- cy_as_destroy_usb_func_c_b_node(tail);
- }
- break;
-
- case CYAS_USB_IO_CB:
- {
- cy_as_usb_io_c_b_node *node =
- (cy_as_usb_io_c_b_node *)
- queue_p->head_p;
- cy_as_usb_io_c_b_node *tail =
- (cy_as_usb_io_c_b_node *)
- queue_p->tail_p;
- if (node != tail) {
- while (node->next_p != tail)
- node = node->next_p;
- node->next_p = 0;
- queue_p->tail_p = node;
- }
- cy_as_destroy_usb_io_c_b_node(tail);
- }
- break;
-
- case CYAS_STORAGE_IO_CB:
- {
- cy_as_storage_io_c_b_node *node =
- (cy_as_storage_io_c_b_node *)
- queue_p->head_p;
- cy_as_storage_io_c_b_node *tail =
- (cy_as_storage_io_c_b_node *)
- queue_p->tail_p;
- if (node != tail) {
- while (node->next_p != tail)
- node = node->next_p;
- node->next_p = 0;
- queue_p->tail_p = node;
- }
- cy_as_destroy_storage_io_c_b_node(tail);
- }
- break;
-
- default:
- cy_as_hal_assert(cy_false);
- }
-
- queue_p->count--;
- if (queue_p->count == 0) {
- queue_p->head_p = 0;
- queue_p->tail_p = 0;
- }
- }
-
- cy_as_hal_enable_interrupts(int_state);
-}
-
-/* Removes the first CyAsCBNode from the queue and frees it */
-void
-cy_as_remove_c_b_node(cy_as_c_b_queue *queue_p)
-{
- uint32_t int_state;
-
- int_state = cy_as_hal_disable_interrupts();
-
- cy_as_hal_assert(queue_p->count >= 0);
- if (queue_p->count > 0) {
- if (queue_p->type == CYAS_USB_FUNC_CB) {
- cy_as_usb_func_c_b_node *node =
- (cy_as_usb_func_c_b_node *)
- queue_p->head_p;
- queue_p->head_p = node->next_p;
- cy_as_destroy_usb_func_c_b_node(node);
- } else if (queue_p->type == CYAS_USB_IO_CB) {
- cy_as_usb_io_c_b_node *node =
- (cy_as_usb_io_c_b_node *)
- queue_p->head_p;
- queue_p->head_p = node->next_p;
- cy_as_destroy_usb_io_c_b_node(node);
- } else if (queue_p->type == CYAS_STORAGE_IO_CB) {
- cy_as_storage_io_c_b_node *node =
- (cy_as_storage_io_c_b_node *)
- queue_p->head_p;
- queue_p->head_p = node->next_p;
- cy_as_destroy_storage_io_c_b_node(node);
- } else if (queue_p->type == CYAS_FUNC_CB) {
- cy_as_func_c_b_node *node =
- (cy_as_func_c_b_node *)
- queue_p->head_p;
- queue_p->head_p = node->next_p;
- cy_as_destroy_func_c_b_node(node);
- } else {
- cy_as_hal_assert(cy_false);
- }
-
- queue_p->count--;
- if (queue_p->count == 0) {
- queue_p->head_p = 0;
- queue_p->tail_p = 0;
- }
- }
-
- cy_as_hal_enable_interrupts(int_state);
-}
-
-void my_print_func_c_b_node(cy_as_func_c_b_node *node)
-{
- cy_as_funct_c_b_type type =
- cy_as_funct_c_b_type_get_type(node->data_type);
- cy_as_hal_print_message("[cd:%2u dt:%2u cb:0x%08x "
- "d:0x%08x nt:%1i]", node->client_data, type,
- (uint32_t)node->cb_p, (uint32_t)node->data,
- node->node_type);
-}
-
-void my_print_c_b_queue(cy_as_c_b_queue *queue_p)
-{
- uint32_t i = 0;
-
- cy_as_hal_print_message("| count: %u type: ", queue_p->count);
-
- if (queue_p->type == CYAS_USB_FUNC_CB) {
- cy_as_hal_print_message("USB_FUNC_CB\n");
- } else if (queue_p->type == CYAS_USB_IO_CB) {
- cy_as_hal_print_message("USB_IO_CB\n");
- } else if (queue_p->type == CYAS_STORAGE_IO_CB) {
- cy_as_hal_print_message("STORAGE_IO_CB\n");
- } else if (queue_p->type == CYAS_FUNC_CB) {
- cy_as_func_c_b_node *node = queue_p->head_p;
- cy_as_hal_print_message("FUNC_CB\n");
- if (queue_p->count > 0) {
- cy_as_hal_print_message("| head->");
-
- for (i = 0; i < queue_p->count; i++) {
- if (node) {
- cy_as_hal_print_message("->");
- my_print_func_c_b_node(node);
- node = node->next_p;
- } else
- cy_as_hal_print_message("->[NULL]\n");
- }
-
- cy_as_hal_print_message("\n| tail->");
- my_print_func_c_b_node(queue_p->tail_p);
- cy_as_hal_print_message("\n");
- }
- } else {
- cy_as_hal_print_message("INVALID\n");
- }
-
- cy_as_hal_print_message("|----------\n");
-}
-
-
-/* Removes and frees all pending callbacks */
-void
-cy_as_clear_c_b_queue(cy_as_c_b_queue *queue_p)
-{
- uint32_t int_state = cy_as_hal_disable_interrupts();
-
- while (queue_p->count != 0)
- cy_as_remove_c_b_node(queue_p);
-
- cy_as_hal_enable_interrupts(int_state);
-}
-
-cy_as_return_status_t
-cy_as_misc_send_request(cy_as_device *dev_p,
- cy_as_function_callback cb,
- uint32_t client,
- cy_as_funct_c_b_type type,
- void *data,
- cy_as_c_b_queue *queue,
- uint16_t req_type,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_response_callback rcb)
-{
-
- cy_as_func_c_b_node *cbnode = cy_as_create_func_c_b_node_data(cb,
- client, type, data);
- cy_as_return_status_t ret;
-
- if (cbnode == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
- else
- cy_as_insert_c_b_node(queue, cbnode);
-
- req_p->flags |= req_type;
-
- ret = cy_as_ll_send_request(dev_p, req_p, reply_p, cy_false, rcb);
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_remove_c_b_tail_node(queue);
-
- return ret;
-}
-
-void
-cy_as_misc_cancel_ex_requests(cy_as_device *dev_p)
-{
- int i;
- for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++)
- cy_as_ll_remove_all_requests(dev_p, dev_p->context[i]);
-}
-
-
-static void
-cy_as_misc_func_callback(cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t stat)
-{
- cy_as_func_c_b_node *node = NULL;
- cy_as_return_status_t ret;
-
- cy_bool ex_request = (rqt->flags & CY_AS_REQUEST_RESPONSE_EX)
- == CY_AS_REQUEST_RESPONSE_EX;
- cy_bool ms_request = (rqt->flags & CY_AS_REQUEST_RESPONSE_MS)
- == CY_AS_REQUEST_RESPONSE_MS;
- uint8_t code;
- uint32_t type;
- uint8_t cntxt;
-
- cy_as_hal_assert(ex_request || ms_request);
- (void) ex_request;
- (void) ms_request;
- (void)context;
-
- cntxt = cy_as_ll_request_response__get_context(rqt);
- code = cy_as_ll_request_response__get_code(rqt);
-
- switch (cntxt) {
- case CY_RQT_GENERAL_RQT_CONTEXT:
- cy_as_hal_assert(dev_p->func_cbs_misc->count != 0);
- cy_as_hal_assert(dev_p->func_cbs_misc->type == CYAS_FUNC_CB);
- node = (cy_as_func_c_b_node *)dev_p->func_cbs_misc->head_p;
- type = cy_as_funct_c_b_type_get_type(node->data_type);
-
- switch (code) {
- case CY_RQT_GET_FIRMWARE_VERSION:
- cy_as_hal_assert(node->data != 0);
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_GETFIRMWAREVERSION);
- ret = my_handle_response_get_firmware_version(dev_p,
- rqt, resp,
- (cy_as_get_firmware_version_data *)node->data);
- break;
- case CY_RQT_READ_MCU_REGISTER:
- cy_as_hal_assert(node->data != 0);
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_READMCUREGISTER);
- ret = my_handle_response_read_m_c_u_register(dev_p, rqt,
- resp, (uint8_t *)node->data);
- break;
- case CY_RQT_GET_GPIO_STATE:
- cy_as_hal_assert(node->data != 0);
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_GETGPIOVALUE);
- ret = my_handle_response_get_gpio_value(dev_p, rqt,
- resp, (uint8_t *)node->data);
- break;
- case CY_RQT_SET_SD_CLOCK_FREQ:
- cy_as_hal_assert(type == CY_FUNCT_CB_MISC_SETSDFREQ);
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_CONTROL_ANTIOCH_HEARTBEAT:
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_HEARTBEATCONTROL);
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_WRITE_MCU_REGISTER:
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_WRITEMCUREGISTER);
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_STORAGE_MEDIA_CHANGED:
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_STORAGECHANGED);
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_SET_GPIO_STATE:
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_SETGPIOVALUE);
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_SET_TRACE_LEVEL:
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_SETTRACELEVEL);
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- if (ret == CY_AS_ERROR_INVALID_RESPONSE)
- ret = CY_AS_ERROR_NOT_SUPPORTED;
- break;
- case CY_RQT_PREPARE_FOR_STANDBY:
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_ENTERSTANDBY);
- ret = my_handle_response_enter_standby(dev_p, rqt, resp,
- (cy_bool)node->data);
- break;
- case CY_RQT_ENTER_SUSPEND_MODE:
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_ENTERSUSPEND);
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- if (ret == CY_AS_ERROR_SUCCESS)
- cy_as_device_set_suspend_mode(dev_p);
-
- break;
- case CY_RQT_RESERVE_LNA_BOOT_AREA:
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_RESERVELNABOOTAREA);
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_SDPOLARITY:
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_SETSDPOLARITY);
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- default:
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- cy_as_hal_assert(cy_false);
- break;
- }
- break;
-
- case CY_RQT_RESOURCE_RQT_CONTEXT:
- cy_as_hal_assert(dev_p->func_cbs_res->count != 0);
- cy_as_hal_assert(dev_p->func_cbs_res->type == CYAS_FUNC_CB);
- node = (cy_as_func_c_b_node *)dev_p->func_cbs_res->head_p;
- type = cy_as_funct_c_b_type_get_type(node->data_type);
-
- switch (code) {
- case CY_RQT_ACQUIRE_RESOURCE:
- /* The node->data field is actually an enum value
- * which could be 0, thus no assert is done */
- cy_as_hal_assert(type ==
- CY_FUNCT_CB_MISC_ACQUIRERESOURCE);
- ret = my_handle_response_acquire_resource(dev_p, rqt,
- resp, (cy_as_resource_type *)node->data);
- break;
- default:
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- cy_as_hal_assert(cy_false);
- break;
- }
- break;
-
- default:
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- cy_as_hal_assert(cy_false);
- break;
- }
-
- /*
- * if the low level layer returns a direct error, use the
- * corresponding error code. if not, use the error code
- * based on the response from firmware.
- */
- if (stat == CY_AS_ERROR_SUCCESS)
- stat = ret;
-
- /* Call the user Callback */
- node->cb_p((cy_as_device_handle)dev_p, stat, node->client_data,
- node->data_type, node->data);
- if (cntxt == CY_RQT_GENERAL_RQT_CONTEXT)
- cy_as_remove_c_b_node(dev_p->func_cbs_misc);
- else
- cy_as_remove_c_b_node(dev_p->func_cbs_res);
-
-}
-
-
-
-/*[]*/
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasmtp.c b/drivers/staging/westbridge/astoria/api/src/cyasmtp.c
deleted file mode 100644
index 8598364f7ab..00000000000
--- a/drivers/staging/westbridge/astoria/api/src/cyasmtp.c
+++ /dev/null
@@ -1,1136 +0,0 @@
-/* Cypress West Bridge API header file (cyasmtp.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#include "../../include/linux/westbridge/cyashal.h"
-#include "../../include/linux/westbridge/cyasmtp.h"
-#include "../../include/linux/westbridge/cyaserr.h"
-#include "../../include/linux/westbridge/cyasdma.h"
-#include "../../include/linux/westbridge/cyaslowlevel.h"
-
-static void
-cy_as_mtp_func_callback(cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t stat);
-
-static cy_as_return_status_t
-is_mtp_active(cy_as_device *dev_p)
-{
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- if (dev_p->mtp_count == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-static void
-my_mtp_request_callback(cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *resp_p,
- cy_as_return_status_t ret)
-{
- uint16_t val, ev, status;
- uint16_t mtp_datalen = 0;
- uint32_t bytecount_l, bytecount_h;
- cy_as_mtp_send_object_complete_data send_obj_data;
- cy_as_mtp_get_object_complete_data get_obj_data;
- cy_as_dma_end_point *ep_p;
-
- uint8_t code = cy_as_ll_request_response__get_code(req_p);
-
- (void)resp_p;
- (void)context;
- (void)ret;
-
- switch (code) {
- case CY_RQT_MTP_EVENT:
- val = cy_as_ll_request_response__get_word(req_p, 0);
- /* MSB indicates status of read/write */
- status = (val >> 8) & 0xFF;
- /* event type */
- ev = val & 0xFF;
- switch (ev) {
- case 0: /* SendObject Complete */
- {
- bytecount_l =
- cy_as_ll_request_response__get_word
- (req_p, 1);
- bytecount_h =
- cy_as_ll_request_response__get_word
- (req_p, 2);
- send_obj_data.byte_count =
- (bytecount_h << 16) | bytecount_l;
-
- send_obj_data.status = status;
-
- /* use the byte count again */
- bytecount_l =
- cy_as_ll_request_response__get_word
- (req_p, 3);
- bytecount_h =
- cy_as_ll_request_response__get_word
- (req_p, 4);
- send_obj_data.transaction_id =
- (bytecount_h << 16) | bytecount_l;
-
- dev_p->mtp_turbo_active = cy_false;
-
- if (dev_p->mtp_event_cb)
- dev_p->mtp_event_cb(
- (cy_as_device_handle) dev_p,
- cy_as_mtp_send_object_complete,
- &send_obj_data);
- }
- break;
-
- case 1: /* GetObject Complete */
- {
- bytecount_l =
- cy_as_ll_request_response__get_word
- (req_p, 1);
- bytecount_h =
- cy_as_ll_request_response__get_word
- (req_p, 2);
-
- get_obj_data.byte_count =
- (bytecount_h << 16) | bytecount_l;
-
- get_obj_data.status = status;
-
- dev_p->mtp_turbo_active = cy_false;
-
- if (dev_p->mtp_event_cb)
- dev_p->mtp_event_cb(
- (cy_as_device_handle) dev_p,
- cy_as_mtp_get_object_complete,
- &get_obj_data);
- }
- break;
-
- case 2: /* BlockTable Needed */
- {
- if (dev_p->mtp_event_cb)
- dev_p->mtp_event_cb(
- (cy_as_device_handle) dev_p,
- cy_as_mtp_block_table_needed, 0);
- }
- break;
- default:
- cy_as_hal_print_message("invalid event type\n");
- cy_as_ll_send_data_response(dev_p,
- CY_RQT_TUR_RQT_CONTEXT,
- CY_RESP_MTP_INVALID_EVENT,
- sizeof(ev), &ev);
- break;
- }
- break;
-
- case CY_RQT_TURBO_CMD_FROM_HOST:
- {
- mtp_datalen =
- cy_as_ll_request_response__get_word(req_p, 1);
-
- /* Get the endpoint pointer based on
- * the endpoint number */
- ep_p = CY_AS_NUM_EP(dev_p, CY_AS_MTP_READ_ENDPOINT);
-
- /* The event should arrive only after the DMA operation
- * has been queued. */
- cy_as_hal_assert(ep_p->queue_p != 0);
-
- /* Put the len in ep data information in
- * dmaqueue and kick start the queue */
- cy_as_hal_assert(ep_p->queue_p->size >= mtp_datalen);
-
- if (mtp_datalen == 0) {
- cy_as_dma_completed_callback(dev_p->tag,
- CY_AS_MTP_READ_ENDPOINT, 0,
- CY_AS_ERROR_SUCCESS);
- } else {
- ep_p->maxhwdata = mtp_datalen;
-
- /*
- * make sure that the DMA status for this
- * EP is not running, so that the call to
- * cy_as_dma_kick_start gets this transfer
- * going. note: in MTP mode, we never leave
- * a DMA transfer of greater than one packet
- * running. so, it is okay to override the
- * status here and start the next packet
- * transfer.
- */
- cy_as_dma_end_point_set_stopped(ep_p);
-
- /* Kick start the queue if it is not running */
- cy_as_dma_kick_start(dev_p,
- CY_AS_MTP_READ_ENDPOINT);
- }
- }
- break;
-
- case CY_RQT_TURBO_START_WRITE_DMA:
- {
- /*
- * now that the firmware is ready to receive the
- * next packet of data, start the corresponding
- * DMA transfer. first, ensure that a DMA
- * operation is still pending in the queue for the
- * write endpoint.
- */
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_TUR_RQT_CONTEXT,
- CY_AS_ERROR_SUCCESS, 0);
-
- ep_p = CY_AS_NUM_EP(dev_p, CY_AS_MTP_WRITE_ENDPOINT);
- cy_as_hal_assert(ep_p->queue_p != 0);
-
- cy_as_dma_end_point_set_stopped(ep_p);
- cy_as_dma_kick_start(dev_p, CY_AS_MTP_WRITE_ENDPOINT);
- }
- break;
-
- default:
- cy_as_hal_print_message("invalid request received "
- "on TUR context\n");
- val = req_p->box0;
- cy_as_ll_send_data_response(dev_p, CY_RQT_TUR_RQT_CONTEXT,
- CY_RESP_INVALID_REQUEST, sizeof(val), &val);
- break;
- }
-}
-
-static cy_as_return_status_t
-my_handle_response_no_data(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-static cy_as_return_status_t
-my_handle_response_mtp_start(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_return_status_t ret)
-{
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- dev_p->mtp_count++;
-
- cy_as_dma_enable_end_point(dev_p, CY_AS_MTP_READ_ENDPOINT,
- cy_true, cy_as_direction_out);
- dev_p->usb_config[CY_AS_MTP_READ_ENDPOINT].enabled = cy_true;
- dev_p->usb_config[CY_AS_MTP_READ_ENDPOINT].dir = cy_as_usb_out;
- dev_p->usb_config[CY_AS_MTP_READ_ENDPOINT].type = cy_as_usb_bulk;
-
- cy_as_dma_enable_end_point(dev_p, CY_AS_MTP_WRITE_ENDPOINT,
- cy_true, cy_as_direction_in);
- dev_p->usb_config[CY_AS_MTP_WRITE_ENDPOINT].enabled = cy_true;
- dev_p->usb_config[CY_AS_MTP_WRITE_ENDPOINT].dir = cy_as_usb_in;
- dev_p->usb_config[CY_AS_MTP_WRITE_ENDPOINT].type = cy_as_usb_bulk;
-
- /* Packet size is 512 bytes */
- cy_as_dma_set_max_dma_size(dev_p, 0x02, 0x0200);
- /* Packet size is 64 bytes until a switch to high speed happens.*/
- cy_as_dma_set_max_dma_size(dev_p, 0x06, 0x40);
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_ll_register_request_callback(dev_p,
- CY_RQT_TUR_RQT_CONTEXT, 0);
-
- cy_as_device_clear_m_s_s_pending(dev_p);
-
- return ret;
-}
-
-
-cy_as_return_status_t
-cy_as_mtp_start(cy_as_device_handle handle,
- cy_as_mtp_event_callback event_c_b,
- cy_as_function_callback cb,
- uint32_t client
- )
-{
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p;
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- if (cy_as_device_is_m_s_s_pending(dev_p))
- return CY_AS_ERROR_STARTSTOP_PENDING;
-
- if (dev_p->storage_count == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- if (dev_p->usb_count == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- if (dev_p->is_mtp_firmware == 0)
- return CY_AS_ERROR_NOT_SUPPORTED;
-
- cy_as_device_set_m_s_s_pending(dev_p);
-
- if (dev_p->mtp_count == 0) {
-
- dev_p->mtp_event_cb = event_c_b;
- /*
- * we register here because the start request may cause
- * events to occur before the response to the start request.
- */
- cy_as_ll_register_request_callback(dev_p,
- CY_RQT_TUR_RQT_CONTEXT, my_mtp_request_callback);
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_START_MTP, CY_RQT_TUR_RQT_CONTEXT, 0);
- if (req_p == 0) {
- cy_as_device_clear_m_s_s_pending(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- /* Reserve space for the reply, the reply data will
- * not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_device_clear_m_s_s_pending(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_mtp_start(dev_p, req_p,
- reply_p, ret);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MTP_START, 0, dev_p->func_cbs_mtp,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_mtp_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
- } else {
- dev_p->mtp_count++;
- if (cb)
- cb(handle, ret, client, CY_FUNCT_CB_MTP_START, 0);
- }
-
- cy_as_device_clear_m_s_s_pending(dev_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_mtp_start);
-
-static cy_as_return_status_t
-my_handle_response_mtp_stop(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_return_status_t ret)
-{
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /*
- * we successfully shutdown the stack, so decrement
- * to make the count zero.
- */
- dev_p->mtp_count--;
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_ll_register_request_callback(dev_p,
- CY_RQT_TUR_RQT_CONTEXT, 0);
-
- cy_as_device_clear_m_s_s_pending(dev_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_mtp_stop(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client
- )
-{
- cy_as_ll_request_response *req_p = 0, *reply_p = 0;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_mtp_stop called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_mtp_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- if (cy_as_device_is_m_s_s_pending(dev_p))
- return CY_AS_ERROR_STARTSTOP_PENDING;
-
- cy_as_device_set_m_s_s_pending(dev_p);
-
- if (dev_p->mtp_count == 1) {
- /* Create the request to send to the West
- * Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_STOP_MTP,
- CY_RQT_TUR_RQT_CONTEXT, 0);
- if (req_p == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- }
-
- /* Reserve space for the reply, the reply data will
- * not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_mtp_stop(dev_p, req_p,
- reply_p, ret);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MTP_STOP, 0, dev_p->func_cbs_mtp,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_mtp_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
- } else if (dev_p->mtp_count > 1) {
-
- dev_p->mtp_count--;
-
- if (cb)
- cb(handle, ret, client, CY_FUNCT_CB_MTP_STOP, 0);
- }
-
- cy_as_device_clear_m_s_s_pending(dev_p);
-
- return ret;
-}
-
-static void
-mtp_write_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret)
-{
- cy_as_hal_assert(context == CY_RQT_TUR_RQT_CONTEXT);
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- if (cy_as_ll_request_response__get_code(resp) !=
- CY_RESP_SUCCESS_FAILURE)
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- else
- ret = cy_as_ll_request_response__get_word(resp, 0);
- }
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- /* Firmware failed the request. Cancel the DMA transfer. */
- cy_as_dma_cancel(dev_p, 0x04, CY_AS_ERROR_CANCELED);
- cy_as_device_clear_storage_async_pending(dev_p);
- }
-
- cy_as_ll_destroy_response(dev_p, resp);
- cy_as_ll_destroy_request(dev_p, rqt);
-}
-
-static void
-async_write_request_callback(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, void *buf_p, uint32_t size,
- cy_as_return_status_t err)
-{
- cy_as_device_handle h;
- cy_as_function_callback cb;
-
- (void)size;
- (void)buf_p;
- (void)ep;
-
-
- cy_as_log_debug_message(6, "async_write_request_callback called");
-
- h = (cy_as_device_handle)dev_p;
-
- cb = dev_p->mtp_cb;
- dev_p->mtp_cb = 0;
-
- cy_as_device_clear_storage_async_pending(dev_p);
-
- if (cb)
- cb(h, err, dev_p->mtp_client, dev_p->mtp_op, 0);
-
-}
-
-static void
-sync_mtp_callback(cy_as_device *dev_p, cy_as_end_point_number_t ep,
- void *buf_p, uint32_t size, cy_as_return_status_t err)
-{
- (void)ep;
- (void)buf_p;
- (void)size;
-
- dev_p->mtp_error = err;
-}
-
-static cy_as_return_status_t
-cy_as_mtp_operation(cy_as_device *dev_p,
- cy_as_mtp_block_table *blk_table,
- uint32_t num_bytes,
- uint32_t transaction_id,
- cy_as_function_callback cb,
- uint32_t client,
- uint8_t rqttype
- )
-{
- cy_as_ll_request_response *req_p = 0, *reply_p = 0;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint32_t mask = 0;
- cy_as_funct_c_b_type mtp_cb_op = 0;
- uint16_t size = 2;
-
- if (dev_p->mtp_count == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- if (rqttype == CY_RQT_INIT_SEND_OBJECT) {
- mtp_cb_op = CY_FUNCT_CB_MTP_INIT_SEND_OBJECT;
- dev_p->mtp_turbo_active = cy_true;
- } else if (rqttype == CY_RQT_INIT_GET_OBJECT) {
- mtp_cb_op = CY_FUNCT_CB_MTP_INIT_GET_OBJECT;
- dev_p->mtp_turbo_active = cy_true;
- } else
- mtp_cb_op = CY_FUNCT_CB_MTP_SEND_BLOCK_TABLE;
-
- ret = is_mtp_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (CY_RQT_INIT_GET_OBJECT == rqttype)
- size = 4;
-
- /* Create the request to send to the West
- * Bridge device */
- req_p = cy_as_ll_create_request(dev_p, rqttype,
- CY_RQT_TUR_RQT_CONTEXT, size);
- if (req_p == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- }
-
- /* Reserve space for the reply, the reply data will
- * not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- }
-
- cy_as_ll_request_response__set_word(req_p, 0,
- (uint16_t)(num_bytes & 0xFFFF));
- cy_as_ll_request_response__set_word(req_p, 1,
- (uint16_t)((num_bytes >> 16) & 0xFFFF));
-
- /* If it is GET_OBJECT, send transaction id as well*/
- if (CY_RQT_INIT_GET_OBJECT == rqttype) {
- cy_as_ll_request_response__set_word(req_p, 2,
- (uint16_t)(transaction_id & 0xFFFF));
- cy_as_ll_request_response__set_word(req_p, 3,
- (uint16_t)((transaction_id >> 16) & 0xFFFF));
- }
-
- if (cb == 0) {
- /* Queue the DMA request for block table write */
- ret = cy_as_dma_queue_request(dev_p, 4, blk_table,
- sizeof(cy_as_mtp_block_table), cy_false,
- cy_false, sync_mtp_callback);
-
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_dma_cancel(dev_p, 4, CY_AS_ERROR_CANCELED);
- cy_as_device_clear_storage_async_pending(dev_p);
-
- goto destroy;
- }
-
- ret = cy_as_dma_drain_queue(dev_p, 4, cy_true);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- ret = dev_p->mtp_error;
- goto destroy;
- } else {
-#if 0
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MTP_INIT_SEND_OBJECT,
- 0, dev_p->func_cbs_mtp, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_mtp_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-#endif
-
- /* Protection from interrupt driven code */
- /* since we are using storage EP4 check if any
- * storage activity is pending */
- mask = cy_as_hal_disable_interrupts();
- if ((cy_as_device_is_storage_async_pending(dev_p)) ||
- (dev_p->storage_wait)) {
- cy_as_hal_enable_interrupts(mask);
- return CY_AS_ERROR_ASYNC_PENDING;
- }
- cy_as_device_set_storage_async_pending(dev_p);
- cy_as_hal_enable_interrupts(mask);
-
- dev_p->mtp_cb = cb;
- dev_p->mtp_client = client;
- dev_p->mtp_op = mtp_cb_op;
-
- ret = cy_as_ll_send_request(dev_p, req_p, reply_p,
- cy_false, mtp_write_callback);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- ret = cy_as_dma_queue_request(dev_p, 4, blk_table,
- sizeof(cy_as_mtp_block_table), cy_false, cy_false,
- async_write_request_callback);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /* Kick start the queue if it is not running */
- cy_as_dma_kick_start(dev_p, 4);
-
- return CY_AS_ERROR_SUCCESS;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_mtp_init_send_object(cy_as_device_handle handle,
- cy_as_mtp_block_table *blk_table,
- uint32_t num_bytes,
- cy_as_function_callback cb,
- uint32_t client
- )
-{
- cy_as_device *dev_p;
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- return cy_as_mtp_operation(dev_p, blk_table, num_bytes, 0, cb,
- client, CY_RQT_INIT_SEND_OBJECT);
-
-}
-EXPORT_SYMBOL(cy_as_mtp_init_send_object);
-
-cy_as_return_status_t
-cy_as_mtp_init_get_object(cy_as_device_handle handle,
- cy_as_mtp_block_table *blk_table,
- uint32_t num_bytes,
- uint32_t transaction_id,
- cy_as_function_callback cb,
- uint32_t client
- )
-{
- cy_as_device *dev_p;
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- return cy_as_mtp_operation(dev_p, blk_table, num_bytes,
- transaction_id, cb, client, CY_RQT_INIT_GET_OBJECT);
-
-}
-EXPORT_SYMBOL(cy_as_mtp_init_get_object);
-
-static cy_as_return_status_t
-my_handle_response_cancel_send_object(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_return_status_t ret)
-{
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_mtp_cancel_send_object(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client
- )
-{
- cy_as_ll_request_response *req_p = 0, *reply_p = 0;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p;
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (dev_p->mtp_count == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_CANCEL_SEND_OBJECT, CY_RQT_TUR_RQT_CONTEXT, 0);
- if (req_p == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- }
-
- /* Reserve space for the reply, the reply data will
- * not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_cancel_send_object(dev_p,
- req_p, reply_p, ret);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MTP_CANCEL_SEND_OBJECT, 0,
- dev_p->func_cbs_mtp, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_mtp_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_mtp_cancel_send_object);
-
-static cy_as_return_status_t
-my_handle_response_cancel_get_object(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_return_status_t ret)
-{
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_mtp_cancel_get_object(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client
- )
-{
- cy_as_ll_request_response *req_p = 0, *reply_p = 0;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p;
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (dev_p->mtp_count == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_CANCEL_GET_OBJECT,
- CY_RQT_TUR_RQT_CONTEXT, 0);
- if (req_p == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- }
-
- /* Reserve space for the reply, the reply data will
- * not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_cancel_get_object(dev_p,
- req_p, reply_p, ret);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MTP_CANCEL_GET_OBJECT, 0,
- dev_p->func_cbs_mtp, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_mtp_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_mtp_cancel_get_object);
-
-cy_as_return_status_t
-cy_as_mtp_send_block_table(cy_as_device_handle handle,
- cy_as_mtp_block_table *blk_table,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p;
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- return cy_as_mtp_operation(dev_p, blk_table, 0, 0, cb,
- client, CY_RQT_SEND_BLOCK_TABLE);
-}
-
-static void
-cy_as_mtp_func_callback(cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t stat)
-{
- cy_as_func_c_b_node* node = (cy_as_func_c_b_node *)
- dev_p->func_cbs_mtp->head_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t code;
- cy_bool delay_callback = cy_false;
-
- cy_as_hal_assert(dev_p->func_cbs_mtp->count != 0);
- cy_as_hal_assert(dev_p->func_cbs_mtp->type == CYAS_FUNC_CB);
-
- (void)context;
-
- /* The Handlers are responsible for Deleting the
- * rqt and resp when they are finished
- */
- code = cy_as_ll_request_response__get_code(rqt);
- switch (code) {
- case CY_RQT_START_MTP:
- ret = my_handle_response_mtp_start(dev_p, rqt,
- resp, stat);
- break;
- case CY_RQT_STOP_MTP:
- ret = my_handle_response_mtp_stop(dev_p, rqt,
- resp, stat);
- break;
-#if 0
- case CY_RQT_INIT_SEND_OBJECT:
- ret = my_handle_response_init_send_object(dev_p,
- rqt, resp, stat, cy_true);
- delay_callback = cy_true;
- break;
-#endif
- case CY_RQT_CANCEL_SEND_OBJECT:
- ret = my_handle_response_cancel_send_object(dev_p,
- rqt, resp, stat);
- break;
-#if 0
- case CY_RQT_INIT_GET_OBJECT:
- ret = my_handle_response_init_get_object(dev_p,
- rqt, resp, stat, cy_true);
- delay_callback = cy_true;
- break;
-#endif
- case CY_RQT_CANCEL_GET_OBJECT:
- ret = my_handle_response_cancel_get_object(dev_p,
- rqt, resp, stat);
- break;
-#if 0
- case CY_RQT_SEND_BLOCK_TABLE:
- ret = my_handle_response_send_block_table(dev_p, rqt,
- resp, stat, cy_true);
- delay_callback = cy_true;
- break;
-#endif
- case CY_RQT_ENABLE_USB_PATH:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- if (ret == CY_AS_ERROR_SUCCESS)
- dev_p->is_storage_only_mode = cy_false;
- break;
- default:
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- cy_as_hal_assert(cy_false);
- break;
- }
-
- /*
- * if the low level layer returns a direct error, use the
- * corresponding error code. if not, use the error code
- * based on the response from firmware.
- */
- if (stat == CY_AS_ERROR_SUCCESS)
- stat = ret;
-
- if (!delay_callback) {
- node->cb_p((cy_as_device_handle)dev_p, stat, node->client_data,
- node->data_type, node->data);
- cy_as_remove_c_b_node(dev_p->func_cbs_mtp);
- }
-}
-
-cy_as_return_status_t
-cy_as_mtp_storage_only_start(cy_as_device_handle handle)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- if (dev_p->storage_count == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- dev_p->is_storage_only_mode = cy_true;
- return CY_AS_ERROR_SUCCESS;
-}
-EXPORT_SYMBOL(cy_as_mtp_storage_only_start);
-
-cy_as_return_status_t
-cy_as_mtp_storage_only_stop(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- if (dev_p->storage_count == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- if (dev_p->is_storage_only_mode == cy_false)
- return CY_AS_ERROR_SUCCESS;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_ENABLE_USB_PATH, CY_RQT_TUR_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- ret = my_handle_response_no_data(dev_p, req_p,
- reply_p);
- if (ret == CY_AS_ERROR_SUCCESS)
- dev_p->is_storage_only_mode = cy_false;
- return ret;
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_MTP_STOP_STORAGE_ONLY, 0,
- dev_p->func_cbs_mtp, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_mtp_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_mtp_storage_only_stop);
-
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasstorage.c b/drivers/staging/westbridge/astoria/api/src/cyasstorage.c
deleted file mode 100644
index 7abd6a35e82..00000000000
--- a/drivers/staging/westbridge/astoria/api/src/cyasstorage.c
+++ /dev/null
@@ -1,4125 +0,0 @@
-/* Cypress West Bridge API source file (cyasstorage.c)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-/*
-* Storage Design
-*
-* The storage module is fairly straight forward once the
-* DMA and LOWLEVEL modules have been designed. The
-* storage module simple takes requests from the user, queues
-* the associated DMA requests for action, and then sends
-* the low level requests to the West Bridge firmware.
-*
-*/
-
-#include "../../include/linux/westbridge/cyashal.h"
-#include "../../include/linux/westbridge/cyasstorage.h"
-#include "../../include/linux/westbridge/cyaserr.h"
-#include "../../include/linux/westbridge/cyasdevice.h"
-#include "../../include/linux/westbridge/cyaslowlevel.h"
-#include "../../include/linux/westbridge/cyasdma.h"
-#include "../../include/linux/westbridge/cyasregs.h"
-
-/* Map a pre-V1.2 media type to the V1.2+ bus number */
-cy_as_return_status_t
-cy_an_map_bus_from_media_type(cy_as_device *dev_p,
- cy_as_media_type type, cy_as_bus_number_t *bus)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t code = (uint8_t)(1 << type);
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
-
- if (dev_p->media_supported[0] & code) {
- if (dev_p->media_supported[1] & code) {
- /*
- * this media type could be supported on multiple
- * buses. so, report an address resolution error.
- */
- ret = CY_AS_ERROR_ADDRESS_RESOLUTION_ERROR;
- } else
- *bus = 0;
- } else {
- if (dev_p->media_supported[1] & code)
- *bus = 1;
- else
- ret = CY_AS_ERROR_NO_SUCH_MEDIA;
- }
-
- return ret;
-}
-
-static uint16_t
-create_address(cy_as_bus_number_t bus, uint32_t device, uint8_t unit)
-{
- cy_as_hal_assert(bus >= 0 && bus < CY_AS_MAX_BUSES);
- cy_as_hal_assert(device < 16);
-
- return (uint16_t)(((uint8_t)bus << 12) | (device << 8) | unit);
-}
-
-cy_as_media_type
-cy_as_storage_get_media_from_address(uint16_t v)
-{
- cy_as_media_type media = cy_as_media_max_media_value;
-
- switch (v & 0xFF) {
- case 0x00:
- break;
- case 0x01:
- media = cy_as_media_nand;
- break;
- case 0x02:
- media = cy_as_media_sd_flash;
- break;
- case 0x04:
- media = cy_as_media_mmc_flash;
- break;
- case 0x08:
- media = cy_as_media_ce_ata;
- break;
- case 0x10:
- media = cy_as_media_sdio;
- break;
- default:
- cy_as_hal_assert(0);
- break;
- }
-
- return media;
-}
-
-cy_as_bus_number_t
-cy_as_storage_get_bus_from_address(uint16_t v)
-{
- cy_as_bus_number_t bus = (cy_as_bus_number_t)((v >> 12) & 0x0f);
- cy_as_hal_assert(bus >= 0 && bus < CY_AS_MAX_BUSES);
- return bus;
-}
-
-uint32_t
-cy_as_storage_get_device_from_address(uint16_t v)
-{
- return (uint32_t)((v >> 8) & 0x0f);
-}
-
-static uint8_t
-get_unit_from_address(uint16_t v)
-{
- return (uint8_t)(v & 0xff);
-}
-
-static cy_as_return_status_t
-cy_as_map_bad_addr(uint16_t val)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_INVALID_RESPONSE;
-
- switch (val) {
- case 0:
- ret = CY_AS_ERROR_NO_SUCH_BUS;
- break;
- case 1:
- ret = CY_AS_ERROR_NO_SUCH_DEVICE;
- break;
- case 2:
- ret = CY_AS_ERROR_NO_SUCH_UNIT;
- break;
- case 3:
- ret = CY_AS_ERROR_INVALID_BLOCK;
- break;
- }
-
- return ret;
-}
-
-static void
-my_storage_request_callback(cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *resp_p,
- cy_as_return_status_t ret)
-{
- uint16_t val;
- uint16_t addr;
- cy_as_bus_number_t bus;
- uint32_t device;
- cy_as_device_handle h = (cy_as_device_handle)dev_p;
- cy_as_dma_end_point *ep_p = NULL;
-
- (void)resp_p;
- (void)context;
- (void)ret;
-
- switch (cy_as_ll_request_response__get_code(req_p)) {
- case CY_RQT_MEDIA_CHANGED:
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_STORAGE_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
-
- /* Media has either been inserted or removed */
- addr = cy_as_ll_request_response__get_word(req_p, 0);
-
- bus = cy_as_storage_get_bus_from_address(addr);
- device = cy_as_storage_get_device_from_address(addr);
-
- /* Clear the entry for this device to force re-query later */
- cy_as_hal_mem_set(&(dev_p->storage_device_info[bus][device]), 0,
- sizeof(dev_p->storage_device_info[bus][device]));
-
- val = cy_as_ll_request_response__get_word(req_p, 1);
- if (dev_p->storage_event_cb_ms) {
- if (val == 1)
- dev_p->storage_event_cb_ms(h, bus,
- device, cy_as_storage_removed, 0);
- else
- dev_p->storage_event_cb_ms(h, bus,
- device, cy_as_storage_inserted, 0);
- } else if (dev_p->storage_event_cb) {
- if (val == 1)
- dev_p->storage_event_cb(h, bus,
- cy_as_storage_removed, 0);
- else
- dev_p->storage_event_cb(h, bus,
- cy_as_storage_inserted, 0);
- }
-
- break;
-
- case CY_RQT_ANTIOCH_CLAIM:
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_STORAGE_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
- if (dev_p->storage_event_cb || dev_p->storage_event_cb_ms) {
- val = cy_as_ll_request_response__get_word(req_p, 0);
- if (dev_p->storage_event_cb_ms) {
- if (val & 0x0100)
- dev_p->storage_event_cb_ms(h, 0, 0,
- cy_as_storage_antioch, 0);
- if (val & 0x0200)
- dev_p->storage_event_cb_ms(h, 1, 0,
- cy_as_storage_antioch, 0);
- } else {
- if (val & 0x01)
- dev_p->storage_event_cb(h,
- cy_as_media_nand,
- cy_as_storage_antioch, 0);
- if (val & 0x02)
- dev_p->storage_event_cb(h,
- cy_as_media_sd_flash,
- cy_as_storage_antioch, 0);
- if (val & 0x04)
- dev_p->storage_event_cb(h,
- cy_as_media_mmc_flash,
- cy_as_storage_antioch, 0);
- if (val & 0x08)
- dev_p->storage_event_cb(h,
- cy_as_media_ce_ata,
- cy_as_storage_antioch, 0);
- }
- }
- break;
-
- case CY_RQT_ANTIOCH_RELEASE:
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_STORAGE_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
- val = cy_as_ll_request_response__get_word(req_p, 0);
- if (dev_p->storage_event_cb_ms) {
- if (val & 0x0100)
- dev_p->storage_event_cb_ms(h, 0, 0,
- cy_as_storage_processor, 0);
- if (val & 0x0200)
- dev_p->storage_event_cb_ms(h, 1, 0,
- cy_as_storage_processor, 0);
- } else if (dev_p->storage_event_cb) {
- if (val & 0x01)
- dev_p->storage_event_cb(h,
- cy_as_media_nand,
- cy_as_storage_processor, 0);
- if (val & 0x02)
- dev_p->storage_event_cb(h,
- cy_as_media_sd_flash,
- cy_as_storage_processor, 0);
- if (val & 0x04)
- dev_p->storage_event_cb(h,
- cy_as_media_mmc_flash,
- cy_as_storage_processor, 0);
- if (val & 0x08)
- dev_p->storage_event_cb(h,
- cy_as_media_ce_ata,
- cy_as_storage_processor, 0);
- }
- break;
-
-
- case CY_RQT_SDIO_INTR:
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_STORAGE_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
- val = cy_as_ll_request_response__get_word(req_p, 0);
- if (dev_p->storage_event_cb_ms) {
- if (val & 0x0100)
- dev_p->storage_event_cb_ms(h, 1, 0,
- cy_as_sdio_interrupt, 0);
- else
- dev_p->storage_event_cb_ms(h, 0, 0,
- cy_as_sdio_interrupt, 0);
-
- } else if (dev_p->storage_event_cb) {
- dev_p->storage_event_cb(h,
- cy_as_media_sdio, cy_as_sdio_interrupt, 0);
- }
- break;
-
- case CY_RQT_P2S_DMA_START:
- /* Do the DMA setup for the waiting operation. */
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_STORAGE_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
- cy_as_device_set_p2s_dma_start_recvd(dev_p);
- if (dev_p->storage_oper == cy_as_op_read) {
- ep_p = CY_AS_NUM_EP(dev_p, CY_AS_P2S_READ_ENDPOINT);
- cy_as_dma_end_point_set_stopped(ep_p);
- cy_as_dma_kick_start(dev_p, CY_AS_P2S_READ_ENDPOINT);
- } else {
- ep_p = CY_AS_NUM_EP(dev_p, CY_AS_P2S_WRITE_ENDPOINT);
- cy_as_dma_end_point_set_stopped(ep_p);
- cy_as_dma_kick_start(dev_p, CY_AS_P2S_WRITE_ENDPOINT);
- }
- break;
-
- default:
- cy_as_hal_print_message("invalid request received "
- "on storage context\n");
- val = req_p->box0;
- cy_as_ll_send_data_response(dev_p, CY_RQT_STORAGE_RQT_CONTEXT,
- CY_RESP_INVALID_REQUEST, sizeof(val), &val);
- break;
- }
-}
-
-static cy_as_return_status_t
-is_storage_active(cy_as_device *dev_p)
-{
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- if (dev_p->storage_count == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-static void
-cy_as_storage_func_callback(cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret);
-
-static cy_as_return_status_t
-my_handle_response_no_data(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-static cy_as_return_status_t
-my_handle_response_storage_start(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_return_status_t ret)
-{
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- if (dev_p->storage_count > 0 && ret ==
- CY_AS_ERROR_ALREADY_RUNNING)
- ret = CY_AS_ERROR_SUCCESS;
-
- ret = cy_as_dma_enable_end_point(dev_p,
- CY_AS_P2S_WRITE_ENDPOINT, cy_true, cy_as_direction_in);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- ret = cy_as_dma_set_max_dma_size(dev_p,
- CY_AS_P2S_WRITE_ENDPOINT, CY_AS_STORAGE_EP_SIZE);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- ret = cy_as_dma_enable_end_point(dev_p,
- CY_AS_P2S_READ_ENDPOINT, cy_true, cy_as_direction_out);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- ret = cy_as_dma_set_max_dma_size(dev_p,
- CY_AS_P2S_READ_ENDPOINT, CY_AS_STORAGE_EP_SIZE);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- cy_as_ll_register_request_callback(dev_p,
- CY_RQT_STORAGE_RQT_CONTEXT, my_storage_request_callback);
-
- /* Create the request/response used for storage reads and writes. */
- dev_p->storage_rw_req_p = cy_as_ll_create_request(dev_p,
- 0, CY_RQT_STORAGE_RQT_CONTEXT, 5);
- if (dev_p->storage_rw_req_p == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- }
-
- dev_p->storage_rw_resp_p = cy_as_ll_create_response(dev_p, 5);
- if (dev_p->storage_rw_resp_p == 0) {
- cy_as_ll_destroy_request(dev_p, dev_p->storage_rw_req_p);
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- /* Increment the storage count only if
- * the above functionality succeeds.*/
- if (ret == CY_AS_ERROR_SUCCESS) {
- if (dev_p->storage_count == 0) {
- cy_as_hal_mem_set(dev_p->storage_device_info,
- 0, sizeof(dev_p->storage_device_info));
- dev_p->is_storage_only_mode = cy_false;
- }
-
- dev_p->storage_count++;
- }
-
- cy_as_device_clear_s_s_s_pending(dev_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_storage_start(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- if (cy_as_device_is_s_s_s_pending(dev_p))
- return CY_AS_ERROR_STARTSTOP_PENDING;
-
- cy_as_device_set_s_s_s_pending(dev_p);
-
- if (dev_p->storage_count == 0) {
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_START_STORAGE, CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0) {
- cy_as_device_clear_s_s_s_pending(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- /* Reserve space for the reply, the reply data
- * will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_device_clear_s_s_s_pending(dev_p);
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_storage_start(dev_p,
- req_p, reply_p, ret);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_STOR_START, 0, dev_p->func_cbs_stor,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as
- * part of the FuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
- } else {
- dev_p->storage_count++;
- if (cb)
- cb(handle, ret, client, CY_FUNCT_CB_STOR_START, 0);
- }
-
- cy_as_device_clear_s_s_s_pending(dev_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_storage_start);
-
-static cy_as_return_status_t
-my_handle_response_storage_stop(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_return_status_t ret)
-{
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- cy_as_ll_destroy_request(dev_p, dev_p->storage_rw_req_p);
- cy_as_ll_destroy_response(dev_p, dev_p->storage_rw_resp_p);
- dev_p->storage_count--;
- }
-
- cy_as_device_clear_s_s_s_pending(dev_p);
-
- return ret;
-}
-cy_as_return_status_t
-cy_as_storage_stop(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_storage_async_pending(dev_p))
- return CY_AS_ERROR_ASYNC_PENDING;
-
- if (cy_as_device_is_s_s_s_pending(dev_p))
- return CY_AS_ERROR_STARTSTOP_PENDING;
-
- cy_as_device_set_s_s_s_pending(dev_p);
-
- if (dev_p->storage_count == 1) {
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_STOP_STORAGE, CY_RQT_STORAGE_RQT_CONTEXT, 0);
- if (req_p == 0) {
- cy_as_device_clear_s_s_s_pending(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- /* Reserve space for the reply, the reply data
- * will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_device_clear_s_s_s_pending(dev_p);
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_storage_stop(dev_p,
- req_p, reply_p, ret);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_STOR_STOP, 0, dev_p->func_cbs_stor,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed
- * as part of the MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
- } else if (dev_p->storage_count > 1) {
- dev_p->storage_count--;
- if (cb)
- cb(handle, ret, client, CY_FUNCT_CB_STOR_STOP, 0);
- }
-
- cy_as_device_clear_s_s_s_pending(dev_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_storage_stop);
-
-cy_as_return_status_t
-cy_as_storage_register_callback(cy_as_device_handle handle,
- cy_as_storage_event_callback callback)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- if (dev_p->storage_count == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- dev_p->storage_event_cb = NULL;
- dev_p->storage_event_cb_ms = callback;
-
- return CY_AS_ERROR_SUCCESS;
-}
-EXPORT_SYMBOL(cy_as_storage_register_callback);
-
-
-static cy_as_return_status_t
-my_handle_response_storage_claim(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (cy_as_ll_request_response__get_code(reply_p) ==
- CY_RESP_NO_SUCH_ADDRESS) {
- ret = cy_as_map_bad_addr(
- cy_as_ll_request_response__get_word(reply_p, 3));
- goto destroy;
- }
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_MEDIA_CLAIMED_RELEASED) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- /* The response must be about the address I am
- * trying to claim or the firmware is broken */
- if ((cy_as_storage_get_bus_from_address(
- cy_as_ll_request_response__get_word(req_p, 0)) !=
- cy_as_storage_get_bus_from_address(
- cy_as_ll_request_response__get_word(reply_p, 0))) ||
- (cy_as_storage_get_device_from_address(
- cy_as_ll_request_response__get_word(req_p, 0)) !=
- cy_as_storage_get_device_from_address(
- cy_as_ll_request_response__get_word(reply_p, 0)))) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- if (cy_as_ll_request_response__get_word(reply_p, 1) != 1)
- ret = CY_AS_ERROR_NOT_ACQUIRED;
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-static cy_as_return_status_t
-my_storage_claim(cy_as_device *dev_p,
- void *data,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint16_t req_flags,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (dev_p->mtp_count > 0)
- return CY_AS_ERROR_NOT_VALID_IN_MTP;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_CLAIM_STORAGE, CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p,
- 0, create_address(bus, device, 0));
-
- /* Reserve space for the reply, the reply data will
- * not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 4);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_storage_claim(dev_p, req_p, reply_p);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_STOR_CLAIM, data, dev_p->func_cbs_stor,
- req_flags, req_p, reply_p,
- cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of
- * the MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_storage_claim(cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (bus < 0 || bus >= CY_AS_MAX_BUSES)
- return CY_AS_ERROR_NO_SUCH_BUS;
-
- return my_storage_claim(dev_p, NULL, bus, device,
- CY_AS_REQUEST_RESPONSE_MS, cb, client);
-}
-EXPORT_SYMBOL(cy_as_storage_claim);
-
-static cy_as_return_status_t
-my_handle_response_storage_release(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (cy_as_ll_request_response__get_code(reply_p) ==
- CY_RESP_NO_SUCH_ADDRESS) {
- ret = cy_as_map_bad_addr(
- cy_as_ll_request_response__get_word(reply_p, 3));
- goto destroy;
- }
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_MEDIA_CLAIMED_RELEASED) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- /* The response must be about the address I am
- * trying to release or the firmware is broken */
- if ((cy_as_storage_get_bus_from_address(
- cy_as_ll_request_response__get_word(req_p, 0)) !=
- cy_as_storage_get_bus_from_address(
- cy_as_ll_request_response__get_word(reply_p, 0))) ||
- (cy_as_storage_get_device_from_address(
- cy_as_ll_request_response__get_word(req_p, 0)) !=
- cy_as_storage_get_device_from_address(
- cy_as_ll_request_response__get_word(reply_p, 0)))) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
-
- if (cy_as_ll_request_response__get_word(reply_p, 1) != 0)
- ret = CY_AS_ERROR_NOT_RELEASED;
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-static cy_as_return_status_t
-my_storage_release(cy_as_device *dev_p,
- void *data,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint16_t req_flags,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (dev_p->mtp_count > 0)
- return CY_AS_ERROR_NOT_VALID_IN_MTP;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_RELEASE_STORAGE,
- CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(
- req_p, 0, create_address(bus, device, 0));
-
- /* Reserve space for the reply, the reply
- * data will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 4);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_storage_release(
- dev_p, req_p, reply_p);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_STOR_RELEASE, data, dev_p->func_cbs_stor,
- req_flags, req_p, reply_p,
- cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as
- * part of the MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_storage_release(cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (bus < 0 || bus >= CY_AS_MAX_BUSES)
- return CY_AS_ERROR_NO_SUCH_BUS;
-
- return my_storage_release(dev_p, NULL, bus, device,
- CY_AS_REQUEST_RESPONSE_MS, cb, client);
-}
-EXPORT_SYMBOL(cy_as_storage_release);
-
-static cy_as_return_status_t
-my_handle_response_storage_query_bus(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- uint32_t *count)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t code = cy_as_ll_request_response__get_code(reply_p);
- uint16_t v;
-
- if (code == CY_RESP_NO_SUCH_ADDRESS) {
- ret = CY_AS_ERROR_NO_SUCH_BUS;
- goto destroy;
- }
-
- if (code != CY_RESP_BUS_DESCRIPTOR) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- /*
- * verify that the response corresponds to the bus that was queried.
- */
- if (cy_as_storage_get_bus_from_address(
- cy_as_ll_request_response__get_word(req_p, 0)) !=
- cy_as_storage_get_bus_from_address(
- cy_as_ll_request_response__get_word(reply_p, 0))) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- v = cy_as_ll_request_response__get_word(reply_p, 1);
- if (req_p->flags & CY_AS_REQUEST_RESPONSE_MS) {
- /*
- * this request is only for the count of devices
- * on the bus. there is no need to check the media type.
- */
- if (v)
- *count = 1;
- else
- *count = 0;
- } else {
- /*
- * this request is for the count of devices of a
- * particular type. we need to check whether the media
- * type found matches the queried type.
- */
- cy_as_media_type queried = (cy_as_media_type)
- cy_as_ll_request_response__get_word(req_p, 1);
- cy_as_media_type found =
- cy_as_storage_get_media_from_address(v);
-
- if (queried == found)
- *count = 1;
- else
- *count = 0;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-my_storage_query_bus(cy_as_device *dev_p,
- cy_as_bus_number_t bus,
- cy_as_media_type type,
- uint16_t req_flags,
- uint32_t *count,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_funct_c_b_type cb_type = CY_FUNCT_CB_STOR_QUERYBUS;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /* Create the request to send to the Antioch device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_QUERY_BUS, CY_RQT_STORAGE_RQT_CONTEXT, 2);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p,
- 0, create_address(bus, 0, 0));
- cy_as_ll_request_response__set_word(req_p, 1, (uint16_t)type);
-
- /* Reserve space for the reply, the reply data
- * will not exceed two words. */
- reply_p = cy_as_ll_create_response(dev_p, 2);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- req_p->flags |= req_flags;
- return my_handle_response_storage_query_bus(dev_p,
- req_p, reply_p, count);
- } else {
- if (req_flags == CY_AS_REQUEST_RESPONSE_EX)
- cb_type = CY_FUNCT_CB_STOR_QUERYMEDIA;
-
- ret = cy_as_misc_send_request(dev_p, cb, client, cb_type,
- count, dev_p->func_cbs_stor, req_flags,
- req_p, reply_p, cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of
- * the MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_storage_query_bus(cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t *count,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- return my_storage_query_bus(dev_p, bus, cy_as_media_max_media_value,
- CY_AS_REQUEST_RESPONSE_MS, count, cb, client);
-}
-EXPORT_SYMBOL(cy_as_storage_query_bus);
-
-cy_as_return_status_t
-cy_as_storage_query_media(cy_as_device_handle handle,
- cy_as_media_type type,
- uint32_t *count,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_bus_number_t bus;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- ret = cy_an_map_bus_from_media_type(dev_p, type, &bus);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- return my_storage_query_bus(dev_p, bus, type, CY_AS_REQUEST_RESPONSE_EX,
- count, cb, client);
-}
-EXPORT_SYMBOL(cy_as_storage_query_media);
-
-static cy_as_return_status_t
-my_handle_response_storage_query_device(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- void *data_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint16_t v;
- cy_as_bus_number_t bus;
- cy_as_media_type type;
- uint32_t device;
- cy_bool removable;
- cy_bool writeable;
- cy_bool locked;
- uint16_t block_size;
- uint32_t number_units;
- uint32_t number_eus;
-
- if (cy_as_ll_request_response__get_code(reply_p)
- == CY_RESP_NO_SUCH_ADDRESS) {
- ret = cy_as_map_bad_addr(
- cy_as_ll_request_response__get_word(reply_p, 3));
- goto destroy;
- }
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_DEVICE_DESCRIPTOR) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- /* Unpack the response */
- v = cy_as_ll_request_response__get_word(reply_p, 0);
- type = cy_as_storage_get_media_from_address(v);
- bus = cy_as_storage_get_bus_from_address(v);
- device = cy_as_storage_get_device_from_address(v);
-
- block_size = cy_as_ll_request_response__get_word(reply_p, 1);
-
- v = cy_as_ll_request_response__get_word(reply_p, 2);
- removable = (v & 0x8000) ? cy_true : cy_false;
- writeable = (v & 0x0100) ? cy_true : cy_false;
- locked = (v & 0x0200) ? cy_true : cy_false;
- number_units = (v & 0xff);
-
- number_eus = (cy_as_ll_request_response__get_word(reply_p, 3) << 16)
- | cy_as_ll_request_response__get_word(reply_p, 4);
-
- /* Store the results based on the version of originating function */
- if (req_p->flags & CY_AS_REQUEST_RESPONSE_MS) {
- cy_as_storage_query_device_data *store_p =
- (cy_as_storage_query_device_data *)data_p;
-
- /* Make sure the response is about the address we asked
- * about - if not, firmware error */
- if ((bus != store_p->bus) || (device != store_p->device)) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- store_p->desc_p.type = type;
- store_p->desc_p.removable = removable;
- store_p->desc_p.writeable = writeable;
- store_p->desc_p.block_size = block_size;
- store_p->desc_p.number_units = number_units;
- store_p->desc_p.locked = locked;
- store_p->desc_p.erase_unit_size = number_eus;
- dev_p->storage_device_info[bus][device] = store_p->desc_p;
- } else {
- cy_as_storage_query_device_data_dep *store_p =
- (cy_as_storage_query_device_data_dep *)data_p;
-
- /* Make sure the response is about the address we asked
- * about - if not, firmware error */
- if ((type != store_p->type) || (device != store_p->device)) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- store_p->desc_p.type = type;
- store_p->desc_p.removable = removable;
- store_p->desc_p.writeable = writeable;
- store_p->desc_p.block_size = block_size;
- store_p->desc_p.number_units = number_units;
- store_p->desc_p.locked = locked;
- store_p->desc_p.erase_unit_size = number_eus;
- dev_p->storage_device_info[bus][device] = store_p->desc_p;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-static cy_as_return_status_t
-my_storage_query_device(cy_as_device *dev_p,
- void *data_p,
- uint16_t req_flags,
- cy_as_bus_number_t bus,
- uint32_t device,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /* Create the request to send to the Antioch device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_QUERY_DEVICE, CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, device, 0));
-
- /* Reserve space for the reply, the reply data
- * will not exceed five words. */
- reply_p = cy_as_ll_create_response(dev_p, 5);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- req_p->flags |= req_flags;
- return my_handle_response_storage_query_device(dev_p,
- req_p, reply_p, data_p);
- } else {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_STOR_QUERYDEVICE, data_p,
- dev_p->func_cbs_stor, req_flags, req_p,
- reply_p, cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_storage_query_device(cy_as_device_handle handle,
- cy_as_storage_query_device_data *data_p,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- return my_storage_query_device(dev_p, data_p,
- CY_AS_REQUEST_RESPONSE_MS, data_p->bus,
- data_p->device, cb, client);
-}
-EXPORT_SYMBOL(cy_as_storage_query_device);
-
-static cy_as_return_status_t
-my_handle_response_storage_query_unit(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- void *data_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_bus_number_t bus;
- uint32_t device;
- uint32_t unit;
- cy_as_media_type type;
- uint16_t block_size;
- uint32_t start_block;
- uint32_t unit_size;
- uint16_t v;
-
- if (cy_as_ll_request_response__get_code(reply_p) ==
- CY_RESP_NO_SUCH_ADDRESS) {
- ret = cy_as_map_bad_addr(
- cy_as_ll_request_response__get_word(reply_p, 3));
- goto destroy;
- }
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_UNIT_DESCRIPTOR) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- /* Unpack the response */
- v = cy_as_ll_request_response__get_word(reply_p, 0);
- bus = cy_as_storage_get_bus_from_address(v);
- device = cy_as_storage_get_device_from_address(v);
- unit = get_unit_from_address(v);
-
- type = cy_as_storage_get_media_from_address(
- cy_as_ll_request_response__get_word(reply_p, 1));
-
- block_size = cy_as_ll_request_response__get_word(reply_p, 2);
- start_block = cy_as_ll_request_response__get_word(reply_p, 3)
- | (cy_as_ll_request_response__get_word(reply_p, 4) << 16);
- unit_size = cy_as_ll_request_response__get_word(reply_p, 5)
- | (cy_as_ll_request_response__get_word(reply_p, 6) << 16);
-
- /* Store the results based on the version of
- * originating function */
- if (req_p->flags & CY_AS_REQUEST_RESPONSE_MS) {
- cy_as_storage_query_unit_data *store_p =
- (cy_as_storage_query_unit_data *)data_p;
-
- /* Make sure the response is about the address we
- * asked about - if not, firmware error */
- if (bus != store_p->bus || device != store_p->device ||
- unit != store_p->unit) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- store_p->desc_p.type = type;
- store_p->desc_p.block_size = block_size;
- store_p->desc_p.start_block = start_block;
- store_p->desc_p.unit_size = unit_size;
- } else {
- cy_as_storage_query_unit_data_dep *store_p =
- (cy_as_storage_query_unit_data_dep *)data_p;
-
- /* Make sure the response is about the media type we asked
- * about - if not, firmware error */
- if ((type != store_p->type) || (device != store_p->device) ||
- (unit != store_p->unit)) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- store_p->desc_p.type = type;
- store_p->desc_p.block_size = block_size;
- store_p->desc_p.start_block = start_block;
- store_p->desc_p.unit_size = unit_size;
- }
-
- dev_p->storage_device_info[bus][device].type = type;
- dev_p->storage_device_info[bus][device].block_size = block_size;
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-static cy_as_return_status_t
-my_storage_query_unit(cy_as_device *dev_p,
- void *data_p,
- uint16_t req_flags,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint32_t unit,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_QUERY_UNIT, CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- if (device > 255)
- return CY_AS_ERROR_NO_SUCH_DEVICE;
-
- if (unit > 255)
- return CY_AS_ERROR_NO_SUCH_UNIT;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, device, (uint8_t)unit));
-
- /* Reserve space for the reply, the reply data
- * will be of seven words. */
- reply_p = cy_as_ll_create_response(dev_p, 7);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- req_p->flags |= req_flags;
- return my_handle_response_storage_query_unit(dev_p,
- req_p, reply_p, data_p);
- } else {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_STOR_QUERYUNIT, data_p,
- dev_p->func_cbs_stor, req_flags, req_p, reply_p,
- cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed
- * as part of the MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_storage_query_unit(cy_as_device_handle handle,
- cy_as_storage_query_unit_data *data_p,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- return my_storage_query_unit(dev_p, data_p, CY_AS_REQUEST_RESPONSE_MS,
- data_p->bus, data_p->device, data_p->unit, cb, client);
-}
-EXPORT_SYMBOL(cy_as_storage_query_unit);
-
-static cy_as_return_status_t
-cy_as_get_block_size(cy_as_device *dev_p,
- cy_as_bus_number_t bus,
- uint32_t device,
- cy_as_function_callback cb)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_QUERY_DEVICE,
- CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, device, 0));
-
- reply_p = cy_as_ll_create_response(dev_p, 4);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p)
- == CY_RESP_NO_SUCH_ADDRESS) {
- ret = CY_AS_ERROR_NO_SUCH_BUS;
- goto destroy;
- }
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_DEVICE_DESCRIPTOR) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- /* Make sure the response is about the media type we asked
- * about - if not, firmware error */
- if ((cy_as_storage_get_bus_from_address
- (cy_as_ll_request_response__get_word(reply_p, 0))
- != bus) || (cy_as_storage_get_device_from_address
- (cy_as_ll_request_response__get_word(reply_p, 0))
- != device)) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
-
- dev_p->storage_device_info[bus][device].block_size =
- cy_as_ll_request_response__get_word(reply_p, 1);
- } else
- ret = CY_AS_ERROR_INVALID_REQUEST;
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-my_storage_device_control(
- cy_as_device *dev_p,
- cy_as_bus_number_t bus,
- uint32_t device,
- cy_bool card_detect_en,
- cy_bool write_prot_en,
- cy_as_storage_card_detect config_detect,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret;
- cy_bool use_gpio = cy_false;
-
- (void)device;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- if (bus < 0 || bus >= CY_AS_MAX_BUSES)
- return CY_AS_ERROR_NO_SUCH_BUS;
-
- if (device >= CY_AS_MAX_STORAGE_DEVICES)
- return CY_AS_ERROR_NO_SUCH_DEVICE;
-
- /* If SD is not supported on the specified bus,
- * then return ERROR */
- if ((dev_p->media_supported[bus] == 0) ||
- (dev_p->media_supported[bus] & (1<<cy_as_media_nand)))
- return CY_AS_ERROR_NOT_SUPPORTED;
-
- if (config_detect == cy_as_storage_detect_GPIO)
- use_gpio = cy_true;
- else if (config_detect == cy_as_storage_detect_SDAT_3)
- use_gpio = cy_false;
- else
- return CY_AS_ERROR_INVALID_PARAMETER;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_SD_INTERFACE_CONTROL, CY_RQT_STORAGE_RQT_CONTEXT, 2);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p,
- 0, create_address(bus, device, 0));
- cy_as_ll_request_response__set_word(req_p,
- 1, (((uint16_t)card_detect_en << 8) |
- ((uint16_t)use_gpio << 1) | (uint16_t)write_prot_en));
-
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- } else {
-
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_STOR_DEVICECONTROL,
- 0, dev_p->func_cbs_stor, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * MiscFuncCallback */
- return ret;
- }
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_storage_device_control(cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- cy_bool card_detect_en,
- cy_bool write_prot_en,
- cy_as_storage_card_detect config_detect,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- return my_storage_device_control(dev_p, bus, device, card_detect_en,
- write_prot_en, config_detect, cb, client);
-}
-EXPORT_SYMBOL(cy_as_storage_device_control);
-
-static void
-cy_as_async_storage_callback(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, void *buf_p, uint32_t size,
- cy_as_return_status_t ret)
-{
- cy_as_storage_callback_dep cb;
- cy_as_storage_callback cb_ms;
-
- (void)size;
- (void)buf_p;
- (void)ep;
-
- cy_as_device_clear_storage_async_pending(dev_p);
-
- /*
- * if the LL request callback has already been called,
- * the user callback has to be called from here.
- */
- if (!dev_p->storage_wait) {
- cy_as_hal_assert(dev_p->storage_cb != NULL ||
- dev_p->storage_cb_ms != NULL);
- cb = dev_p->storage_cb;
- cb_ms = dev_p->storage_cb_ms;
-
- dev_p->storage_cb = 0;
- dev_p->storage_cb_ms = 0;
-
- if (ret == CY_AS_ERROR_SUCCESS)
- ret = dev_p->storage_error;
-
- if (cb_ms) {
- cb_ms((cy_as_device_handle)dev_p,
- dev_p->storage_bus_index,
- dev_p->storage_device_index,
- dev_p->storage_unit,
- dev_p->storage_block_addr,
- dev_p->storage_oper, ret);
- } else {
- cb((cy_as_device_handle)dev_p,
- dev_p->storage_device_info
- [dev_p->storage_bus_index]
- [dev_p->storage_device_index].type,
- dev_p->storage_device_index,
- dev_p->storage_unit,
- dev_p->storage_block_addr,
- dev_p->storage_oper, ret);
- }
- } else
- dev_p->storage_error = ret;
-}
-
-static void
-cy_as_async_storage_reply_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret)
-{
- cy_as_storage_callback_dep cb;
- cy_as_storage_callback cb_ms;
- uint8_t reqtype;
- (void)rqt;
- (void)context;
-
- reqtype = cy_as_ll_request_response__get_code(rqt);
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- if (cy_as_ll_request_response__get_code(resp) ==
- CY_RESP_ANTIOCH_DEFERRED_ERROR) {
- ret = cy_as_ll_request_response__get_word
- (resp, 0) & 0x00FF;
- } else if (cy_as_ll_request_response__get_code(resp) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- }
- }
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- if (reqtype == CY_RQT_READ_BLOCK)
- cy_as_dma_cancel(dev_p,
- dev_p->storage_read_endpoint, ret);
- else
- cy_as_dma_cancel(dev_p,
- dev_p->storage_write_endpoint, ret);
- }
-
- dev_p->storage_wait = cy_false;
-
- /*
- * if the DMA callback has already been called, the
- * user callback has to be called from here.
- */
- if (!cy_as_device_is_storage_async_pending(dev_p)) {
- cy_as_hal_assert(dev_p->storage_cb != NULL ||
- dev_p->storage_cb_ms != NULL);
- cb = dev_p->storage_cb;
- cb_ms = dev_p->storage_cb_ms;
-
- dev_p->storage_cb = 0;
- dev_p->storage_cb_ms = 0;
-
- if (ret == CY_AS_ERROR_SUCCESS)
- ret = dev_p->storage_error;
-
- if (cb_ms) {
- cb_ms((cy_as_device_handle)dev_p,
- dev_p->storage_bus_index,
- dev_p->storage_device_index,
- dev_p->storage_unit,
- dev_p->storage_block_addr,
- dev_p->storage_oper, ret);
- } else {
- cb((cy_as_device_handle)dev_p,
- dev_p->storage_device_info
- [dev_p->storage_bus_index]
- [dev_p->storage_device_index].type,
- dev_p->storage_device_index,
- dev_p->storage_unit,
- dev_p->storage_block_addr,
- dev_p->storage_oper, ret);
- }
- } else
- dev_p->storage_error = ret;
-}
-
-static cy_as_return_status_t
-cy_as_storage_async_oper(cy_as_device *dev_p, cy_as_end_point_number_t ep,
- uint8_t reqtype, uint16_t req_flags, cy_as_bus_number_t bus,
- uint32_t device, uint32_t unit, uint32_t block, void *data_p,
- uint16_t num_blocks, cy_as_storage_callback_dep callback,
- cy_as_storage_callback callback_ms)
-{
- uint32_t mask;
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (bus < 0 || bus >= CY_AS_MAX_BUSES)
- return CY_AS_ERROR_NO_SUCH_BUS;
-
- if (device >= CY_AS_MAX_STORAGE_DEVICES)
- return CY_AS_ERROR_NO_SUCH_DEVICE;
-
- if (unit > 255)
- return CY_AS_ERROR_NO_SUCH_UNIT;
-
- /* We are supposed to return success if the number of
- * blocks is zero
- */
- if (num_blocks == 0) {
- if (callback_ms)
- callback_ms((cy_as_device_handle)dev_p,
- bus, device, unit, block,
- ((reqtype == CY_RQT_WRITE_BLOCK)
- ? cy_as_op_write : cy_as_op_read),
- CY_AS_ERROR_SUCCESS);
- else
- callback((cy_as_device_handle)dev_p,
- dev_p->storage_device_info[bus][device].type,
- device, unit, block,
- ((reqtype == CY_RQT_WRITE_BLOCK) ?
- cy_as_op_write : cy_as_op_read),
- CY_AS_ERROR_SUCCESS);
-
- return CY_AS_ERROR_SUCCESS;
- }
-
- if (dev_p->storage_device_info[bus][device].block_size == 0)
- return CY_AS_ERROR_QUERY_DEVICE_NEEDED;
-
- /*
- * since async operations can be triggered by interrupt
- * code, we must insure that we do not get multiple
- * async operations going at one time and protect this
- * test and set operation from interrupts. also need to
- * check for pending async MTP writes
- */
- mask = cy_as_hal_disable_interrupts();
- if ((cy_as_device_is_storage_async_pending(dev_p)) ||
- (dev_p->storage_wait) ||
- (cy_as_device_is_usb_async_pending(dev_p, 6))) {
- cy_as_hal_enable_interrupts(mask);
- return CY_AS_ERROR_ASYNC_PENDING;
- }
-
- cy_as_device_set_storage_async_pending(dev_p);
- cy_as_device_clear_p2s_dma_start_recvd(dev_p);
- cy_as_hal_enable_interrupts(mask);
-
- /*
- * storage information about the currently outstanding request
- */
- dev_p->storage_cb = callback;
- dev_p->storage_cb_ms = callback_ms;
- dev_p->storage_bus_index = bus;
- dev_p->storage_device_index = device;
- dev_p->storage_unit = unit;
- dev_p->storage_block_addr = block;
-
- /* Initialise the request to send to the West Bridge. */
- req_p = dev_p->storage_rw_req_p;
- cy_as_ll_init_request(req_p, reqtype, CY_RQT_STORAGE_RQT_CONTEXT, 5);
-
- /* Initialise the space for reply from the West Bridge. */
- reply_p = dev_p->storage_rw_resp_p;
- cy_as_ll_init_response(reply_p, 5);
-
- /* Remember which version of the API originated the request */
- req_p->flags |= req_flags;
-
- /* Setup the DMA request and adjust the storage
- * operation if we are reading */
- if (reqtype == CY_RQT_READ_BLOCK) {
- ret = cy_as_dma_queue_request(dev_p, ep, data_p,
- dev_p->storage_device_info[bus][device].block_size
- * num_blocks, cy_false, cy_true,
- cy_as_async_storage_callback);
- dev_p->storage_oper = cy_as_op_read;
- } else if (reqtype == CY_RQT_WRITE_BLOCK) {
- ret = cy_as_dma_queue_request(dev_p, ep, data_p,
- dev_p->storage_device_info[bus][device].block_size *
- num_blocks, cy_false, cy_false,
- cy_as_async_storage_callback);
- dev_p->storage_oper = cy_as_op_write;
- }
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_device_clear_storage_async_pending(dev_p);
- return ret;
- }
-
- cy_as_ll_request_response__set_word(req_p,
- 0, create_address(bus, (uint8_t)device, (uint8_t)unit));
- cy_as_ll_request_response__set_word(req_p,
- 1, (uint16_t)((block >> 16) & 0xffff));
- cy_as_ll_request_response__set_word(req_p,
- 2, (uint16_t)(block & 0xffff));
- cy_as_ll_request_response__set_word(req_p,
- 3, (uint16_t)((num_blocks >> 8) & 0x00ff));
- cy_as_ll_request_response__set_word(req_p,
- 4, (uint16_t)((num_blocks << 8) & 0xff00));
-
- /* Set the burst mode flag. */
- if (dev_p->is_storage_only_mode)
- req_p->data[4] |= 0x0001;
-
- /* Send the request and wait for completion
- * of storage request */
- dev_p->storage_wait = cy_true;
- ret = cy_as_ll_send_request(dev_p, req_p, reply_p,
- cy_true, cy_as_async_storage_reply_callback);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_CANCELED);
- cy_as_device_clear_storage_async_pending(dev_p);
- }
-
- return ret;
-}
-
-static void
-cy_as_sync_storage_callback(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, void *buf_p,
- uint32_t size, cy_as_return_status_t err)
-{
- (void)ep;
- (void)buf_p;
- (void)size;
-
- dev_p->storage_error = err;
-}
-
-static void
-cy_as_sync_storage_reply_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret)
-{
- uint8_t reqtype;
- (void)rqt;
-
- reqtype = cy_as_ll_request_response__get_code(rqt);
-
- if (cy_as_ll_request_response__get_code(resp) ==
- CY_RESP_ANTIOCH_DEFERRED_ERROR) {
- ret = cy_as_ll_request_response__get_word(resp, 0) & 0x00FF;
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- if (reqtype == CY_RQT_READ_BLOCK)
- cy_as_dma_cancel(dev_p,
- dev_p->storage_read_endpoint, ret);
- else
- cy_as_dma_cancel(dev_p,
- dev_p->storage_write_endpoint, ret);
- }
- } else if (cy_as_ll_request_response__get_code(resp) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- }
-
- dev_p->storage_wait = cy_false;
- dev_p->storage_error = ret;
-
- /* Wake any threads/processes that are waiting on
- * the read/write completion. */
- cy_as_hal_wake(&dev_p->context[context]->channel);
-}
-
-static cy_as_return_status_t
-cy_as_storage_sync_oper(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, uint8_t reqtype,
- cy_as_bus_number_t bus, uint32_t device,
- uint32_t unit, uint32_t block, void *data_p,
- uint16_t num_blocks)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_context *ctxt_p;
- uint32_t loopcount = 200;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (bus < 0 || bus >= CY_AS_MAX_BUSES)
- return CY_AS_ERROR_NO_SUCH_BUS;
-
- if (device >= CY_AS_MAX_STORAGE_DEVICES)
- return CY_AS_ERROR_NO_SUCH_DEVICE;
-
- if (unit > 255)
- return CY_AS_ERROR_NO_SUCH_UNIT;
-
- if ((cy_as_device_is_storage_async_pending(dev_p)) ||
- (dev_p->storage_wait))
- return CY_AS_ERROR_ASYNC_PENDING;
-
- /* Also need to check for pending Async MTP writes */
- if (cy_as_device_is_usb_async_pending(dev_p, 6))
- return CY_AS_ERROR_ASYNC_PENDING;
-
- /* We are supposed to return success if the number of
- * blocks is zero
- */
- if (num_blocks == 0)
- return CY_AS_ERROR_SUCCESS;
-
- if (dev_p->storage_device_info[bus][device].block_size == 0) {
- /*
- * normally, a given device has been queried via
- * the query device call before a read request is issued.
- * therefore, this normally will not be run.
- */
- ret = cy_as_get_block_size(dev_p, bus, device, 0);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- }
-
- /* Initialise the request to send to the West Bridge. */
- req_p = dev_p->storage_rw_req_p;
- cy_as_ll_init_request(req_p, reqtype,
- CY_RQT_STORAGE_RQT_CONTEXT, 5);
-
- /* Initialise the space for reply from
- * the West Bridge. */
- reply_p = dev_p->storage_rw_resp_p;
- cy_as_ll_init_response(reply_p, 5);
- cy_as_device_clear_p2s_dma_start_recvd(dev_p);
-
- /* Setup the DMA request */
- if (reqtype == CY_RQT_READ_BLOCK) {
- ret = cy_as_dma_queue_request(dev_p, ep, data_p,
- dev_p->storage_device_info[bus][device].block_size *
- num_blocks, cy_false,
- cy_true, cy_as_sync_storage_callback);
- dev_p->storage_oper = cy_as_op_read;
- } else if (reqtype == CY_RQT_WRITE_BLOCK) {
- ret = cy_as_dma_queue_request(dev_p, ep, data_p,
- dev_p->storage_device_info[bus][device].block_size *
- num_blocks, cy_false, cy_false,
- cy_as_sync_storage_callback);
- dev_p->storage_oper = cy_as_op_write;
- }
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, (uint8_t)unit));
- cy_as_ll_request_response__set_word(req_p, 1,
- (uint16_t)((block >> 16) & 0xffff));
- cy_as_ll_request_response__set_word(req_p, 2,
- (uint16_t)(block & 0xffff));
- cy_as_ll_request_response__set_word(req_p, 3,
- (uint16_t)((num_blocks >> 8) & 0x00ff));
- cy_as_ll_request_response__set_word(req_p, 4,
- (uint16_t)((num_blocks << 8) & 0xff00));
-
- /* Set the burst mode flag. */
- if (dev_p->is_storage_only_mode)
- req_p->data[4] |= 0x0001;
-
- /* Send the request and wait for
- * completion of storage request */
- dev_p->storage_wait = cy_true;
- ret = cy_as_ll_send_request(dev_p, req_p, reply_p, cy_true,
- cy_as_sync_storage_reply_callback);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_CANCELED);
- } else {
- /* Setup the DMA request */
- ctxt_p = dev_p->context[CY_RQT_STORAGE_RQT_CONTEXT];
- ret = cy_as_dma_drain_queue(dev_p, ep, cy_false);
-
- while (loopcount-- > 0) {
- if (dev_p->storage_wait == cy_false)
- break;
- cy_as_hal_sleep_on(&ctxt_p->channel, 10);
- }
-
- if (dev_p->storage_wait == cy_true) {
- dev_p->storage_wait = cy_false;
- cy_as_ll_remove_request(dev_p, ctxt_p, req_p, cy_true);
- ret = CY_AS_ERROR_TIMEOUT;
- }
-
- if (ret == CY_AS_ERROR_SUCCESS)
- ret = dev_p->storage_error;
- }
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_storage_read(cy_as_device_handle handle,
- cy_as_bus_number_t bus, uint32_t device,
- uint32_t unit, uint32_t block,
- void *data_p, uint16_t num_blocks)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- return cy_as_storage_sync_oper(dev_p, dev_p->storage_read_endpoint,
- CY_RQT_READ_BLOCK, bus, device,
- unit, block, data_p, num_blocks);
-}
-EXPORT_SYMBOL(cy_as_storage_read);
-
-cy_as_return_status_t
-cy_as_storage_write(cy_as_device_handle handle,
- cy_as_bus_number_t bus, uint32_t device,
- uint32_t unit, uint32_t block, void *data_p,
- uint16_t num_blocks)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (dev_p->mtp_turbo_active)
- return CY_AS_ERROR_NOT_VALID_DURING_MTP;
-
- return cy_as_storage_sync_oper(dev_p,
- dev_p->storage_write_endpoint,
- CY_RQT_WRITE_BLOCK, bus, device,
- unit, block, data_p, num_blocks);
-}
-EXPORT_SYMBOL(cy_as_storage_write);
-
-cy_as_return_status_t
-cy_as_storage_read_async(cy_as_device_handle handle,
- cy_as_bus_number_t bus, uint32_t device, uint32_t unit,
- uint32_t block, void *data_p, uint16_t num_blocks,
- cy_as_storage_callback callback)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (callback == 0)
- return CY_AS_ERROR_NULL_CALLBACK;
-
- return cy_as_storage_async_oper(dev_p,
- dev_p->storage_read_endpoint, CY_RQT_READ_BLOCK,
- CY_AS_REQUEST_RESPONSE_MS, bus, device, unit,
- block, data_p, num_blocks, NULL, callback);
-}
-EXPORT_SYMBOL(cy_as_storage_read_async);
-
-cy_as_return_status_t
-cy_as_storage_write_async(cy_as_device_handle handle,
- cy_as_bus_number_t bus, uint32_t device, uint32_t unit,
- uint32_t block, void *data_p, uint16_t num_blocks,
- cy_as_storage_callback callback)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (callback == 0)
- return CY_AS_ERROR_NULL_CALLBACK;
-
- if (dev_p->mtp_turbo_active)
- return CY_AS_ERROR_NOT_VALID_DURING_MTP;
-
- return cy_as_storage_async_oper(dev_p,
- dev_p->storage_write_endpoint, CY_RQT_WRITE_BLOCK,
- CY_AS_REQUEST_RESPONSE_MS, bus, device, unit, block,
- data_p, num_blocks, NULL, callback);
-}
-EXPORT_SYMBOL(cy_as_storage_write_async);
-
-static void
-my_storage_cancel_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t stat)
-{
- (void)context;
- (void)stat;
-
- /* Nothing to do here, except free up the
- * request and response structures. */
- cy_as_ll_destroy_response(dev_p, resp);
- cy_as_ll_destroy_request(dev_p, rqt);
-}
-
-
-cy_as_return_status_t
-cy_as_storage_cancel_async(cy_as_device_handle handle)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p , *reply_p;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (!cy_as_device_is_storage_async_pending(dev_p))
- return CY_AS_ERROR_ASYNC_NOT_PENDING;
-
- /*
- * create and send a mailbox request to firmware
- * asking it to abort processing of the current
- * P2S operation. the rest of the cancel processing will be
- * driven through the callbacks for the read/write call.
- */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_ABORT_P2S_XFER,
- CY_RQT_GENERAL_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- ret = cy_as_ll_send_request(dev_p, req_p,
- reply_p, cy_false, my_storage_cancel_callback);
- if (ret) {
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
- }
-
- return CY_AS_ERROR_SUCCESS;
-}
-EXPORT_SYMBOL(cy_as_storage_cancel_async);
-
-/*
- * This function does all the API side clean-up associated with
- * CyAsStorageStop, without any communication with the firmware.
- */
-void cy_as_storage_cleanup(cy_as_device *dev_p)
-{
- if (dev_p->storage_count) {
- cy_as_ll_destroy_request(dev_p, dev_p->storage_rw_req_p);
- cy_as_ll_destroy_response(dev_p, dev_p->storage_rw_resp_p);
- dev_p->storage_count = 0;
- cy_as_device_clear_scsi_messages(dev_p);
- cy_as_hal_mem_set(dev_p->storage_device_info,
- 0, sizeof(dev_p->storage_device_info));
-
- cy_as_device_clear_storage_async_pending(dev_p);
- dev_p->storage_cb = 0;
- dev_p->storage_cb_ms = 0;
- dev_p->storage_wait = cy_false;
- }
-}
-
-static cy_as_return_status_t
-my_handle_response_sd_reg_read(
- cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_storage_sd_reg_read_data *info)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t resp_type, i;
- uint16_t resp_len;
- uint8_t length = info->length;
- uint8_t *data_p = info->buf_p;
-
- resp_type = cy_as_ll_request_response__get_code(reply_p);
- if (resp_type == CY_RESP_SD_REGISTER_DATA) {
- uint16_t *resp_p = reply_p->data + 1;
- uint16_t temp;
-
- resp_len = cy_as_ll_request_response__get_word(reply_p, 0);
- cy_as_hal_assert(resp_len >= length);
-
- /*
- * copy the values into the output buffer after doing the
- * necessary bit shifting. the bit shifting is required because
- * the data comes out of the west bridge with a 6 bit offset.
- */
- i = 0;
- while (length) {
- temp = ((resp_p[i] << 6) | (resp_p[i + 1] >> 10));
- i++;
-
- *data_p++ = (uint8_t)(temp >> 8);
- length--;
-
- if (length) {
- *data_p++ = (uint8_t)(temp & 0xFF);
- length--;
- }
- }
- } else {
- if (resp_type == CY_RESP_SUCCESS_FAILURE)
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- else
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- }
-
- cy_as_ll_destroy_response(dev_p, reply_p);
- cy_as_ll_destroy_request(dev_p, req_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_storage_sd_register_read(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint8_t device,
- cy_as_sd_card_reg_type reg_type,
- cy_as_storage_sd_reg_read_data *data_p,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t length;
-
- /*
- * sanity checks required before sending the request to the
- * firmware.
- */
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (device >= CY_AS_MAX_STORAGE_DEVICES)
- return CY_AS_ERROR_NO_SUCH_DEVICE;
-
- if (reg_type > cy_as_sd_reg_CSD)
- return CY_AS_ERROR_INVALID_PARAMETER;
-
- /* If SD/MMC media is not supported on the
- * addressed bus, return error. */
- if ((dev_p->media_supported[bus] & (1 << cy_as_media_sd_flash)) == 0)
- return CY_AS_ERROR_INVALID_PARAMETER;
-
- /*
- * find the amount of data to be returned. this will be the minimum of
- * the actual data length, and the length requested.
- */
- switch (reg_type) {
- case cy_as_sd_reg_OCR:
- length = CY_AS_SD_REG_OCR_LENGTH;
- break;
- case cy_as_sd_reg_CID:
- length = CY_AS_SD_REG_CID_LENGTH;
- break;
- case cy_as_sd_reg_CSD:
- length = CY_AS_SD_REG_CSD_LENGTH;
- break;
-
- default:
- length = 0;
- cy_as_hal_assert(0);
- }
-
- if (length < data_p->length)
- data_p->length = length;
- length = data_p->length;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_SD_REGISTER_READ,
- CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- (create_address(bus, device, 0) | (uint16_t)reg_type));
-
- reply_p = cy_as_ll_create_response(dev_p,
- CY_AS_SD_REG_MAX_RESP_LENGTH);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_sd_reg_read(dev_p,
- req_p, reply_p, data_p);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_STOR_SDREGISTERREAD, data_p,
- dev_p->func_cbs_stor, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * MiscFuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_storage_sd_register_read);
-
-cy_as_return_status_t
-cy_as_storage_create_p_partition(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- /* of P-port only partition in blocks */
- uint32_t size,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /* Partitions cannot be created or deleted while
- * the USB stack is active. */
- if (dev_p->usb_count)
- return CY_AS_ERROR_USB_RUNNING;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_PARTITION_STORAGE,
- CY_RQT_STORAGE_RQT_CONTEXT, 3);
-
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* Reserve space for the reply, the reply
- * data will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, 0x00));
- cy_as_ll_request_response__set_word(req_p, 1,
- (uint16_t)((size >> 16) & 0xffff));
- cy_as_ll_request_response__set_word(req_p, 2,
- (uint16_t)(size & 0xffff));
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_no_data(dev_p, req_p, reply_p);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_STOR_PARTITION, 0, dev_p->func_cbs_stor,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * FuncCallback */
- return ret;
-
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_storage_create_p_partition);
-
-cy_as_return_status_t
-cy_as_storage_remove_p_partition(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /* Partitions cannot be created or deleted while
- * the USB stack is active. */
- if (dev_p->usb_count)
- return CY_AS_ERROR_USB_RUNNING;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_PARTITION_ERASE,
- CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* Reserve space for the reply, the reply
- * data will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- cy_as_ll_request_response__set_word(req_p,
- 0, create_address(bus, (uint8_t)device, 0x00));
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_no_data(dev_p, req_p, reply_p);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_NODATA, 0, dev_p->func_cbs_stor,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed
- * as part of the FuncCallback */
- return ret;
-
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_storage_remove_p_partition);
-
-static cy_as_return_status_t
-my_handle_response_get_transfer_amount(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_m_s_c_progress_data *data)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t code = cy_as_ll_request_response__get_code(reply_p);
- uint16_t v1, v2;
-
- if (code != CY_RESP_TRANSFER_COUNT) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- v1 = cy_as_ll_request_response__get_word(reply_p, 0);
- v2 = cy_as_ll_request_response__get_word(reply_p, 1);
- data->wr_count = (uint32_t)((v1 << 16) | v2);
-
- v1 = cy_as_ll_request_response__get_word(reply_p, 2);
- v2 = cy_as_ll_request_response__get_word(reply_p, 3);
- data->rd_count = (uint32_t)((v1 << 16) | v2);
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_storage_get_transfer_amount(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- cy_as_m_s_c_progress_data *data_p,
- cy_as_function_callback cb,
- uint32_t client
- )
-{
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /* Check if the firmware image supports this feature. */
- if ((dev_p->media_supported[0]) && (dev_p->media_supported[0]
- == (1 << cy_as_media_nand)))
- return CY_AS_ERROR_NOT_SUPPORTED;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_GET_TRANSFER_AMOUNT,
- CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* Reserve space for the reply, the reply data
- * will not exceed four words. */
- reply_p = cy_as_ll_create_response(dev_p, 4);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, 0x00));
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_get_transfer_amount(dev_p,
- req_p, reply_p, data_p);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_STOR_GETTRANSFERAMOUNT, (void *)data_p,
- dev_p->func_cbs_stor, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed as part of the
- * FuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-
-}
-EXPORT_SYMBOL(cy_as_storage_get_transfer_amount);
-
-cy_as_return_status_t
-cy_as_storage_erase(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint32_t erase_unit,
- uint16_t num_erase_units,
- cy_as_function_callback cb,
- uint32_t client
- )
-{
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_storage_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (bus < 0 || bus >= CY_AS_MAX_BUSES)
- return CY_AS_ERROR_NO_SUCH_BUS;
-
- if (device >= CY_AS_MAX_STORAGE_DEVICES)
- return CY_AS_ERROR_NO_SUCH_DEVICE;
-
- if (dev_p->storage_device_info[bus][device].block_size == 0)
- return CY_AS_ERROR_QUERY_DEVICE_NEEDED;
-
- /* If SD is not supported on the specified bus, then return ERROR */
- if (dev_p->storage_device_info[bus][device].type !=
- cy_as_media_sd_flash)
- return CY_AS_ERROR_NOT_SUPPORTED;
-
- if (num_erase_units == 0)
- return CY_AS_ERROR_SUCCESS;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_ERASE,
- CY_RQT_STORAGE_RQT_CONTEXT, 5);
-
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* Reserve space for the reply, the reply
- * data will not exceed four words. */
- reply_p = cy_as_ll_create_response(dev_p, 4);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, 0x00));
- cy_as_ll_request_response__set_word(req_p, 1,
- (uint16_t)((erase_unit >> 16) & 0xffff));
- cy_as_ll_request_response__set_word(req_p, 2,
- (uint16_t)(erase_unit & 0xffff));
- cy_as_ll_request_response__set_word(req_p, 3,
- (uint16_t)((num_erase_units >> 8) & 0x00ff));
- cy_as_ll_request_response__set_word(req_p, 4,
- (uint16_t)((num_erase_units << 8) & 0xff00));
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- ret = my_handle_response_no_data(dev_p, req_p, reply_p);
-
- /* If error = "invalid response", this (very likely) means
- * that we are not using the SD-only firmware module which
- * is the only one supporting storage_erase. in this case
- * force a "non supported" error code */
- if (ret == CY_AS_ERROR_INVALID_RESPONSE)
- ret = CY_AS_ERROR_NOT_SUPPORTED;
-
- return ret;
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_STOR_ERASE, 0, dev_p->func_cbs_stor,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_storage_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* The request and response are freed
- * as part of the FuncCallback */
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_storage_erase);
-
-static void
-cy_as_storage_func_callback(cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t stat)
-{
- cy_as_func_c_b_node *node = (cy_as_func_c_b_node *)
- dev_p->func_cbs_stor->head_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_bool ex_request = (rqt->flags & CY_AS_REQUEST_RESPONSE_EX)
- == CY_AS_REQUEST_RESPONSE_EX;
- cy_bool ms_request = (rqt->flags & CY_AS_REQUEST_RESPONSE_MS)
- == CY_AS_REQUEST_RESPONSE_MS;
- uint8_t code;
- uint8_t cntxt;
-
- cy_as_hal_assert(ex_request || ms_request);
- cy_as_hal_assert(dev_p->func_cbs_stor->count != 0);
- cy_as_hal_assert(dev_p->func_cbs_stor->type == CYAS_FUNC_CB);
- (void) ex_request;
- (void) ms_request;
-
- (void)context;
-
- cntxt = cy_as_ll_request_response__get_context(rqt);
- cy_as_hal_assert(cntxt == CY_RQT_STORAGE_RQT_CONTEXT);
-
- code = cy_as_ll_request_response__get_code(rqt);
- switch (code) {
- case CY_RQT_START_STORAGE:
- ret = my_handle_response_storage_start(dev_p, rqt, resp, stat);
- break;
- case CY_RQT_STOP_STORAGE:
- ret = my_handle_response_storage_stop(dev_p, rqt, resp, stat);
- break;
- case CY_RQT_CLAIM_STORAGE:
- ret = my_handle_response_storage_claim(dev_p, rqt, resp);
- break;
- case CY_RQT_RELEASE_STORAGE:
- ret = my_handle_response_storage_release(dev_p, rqt, resp);
- break;
- case CY_RQT_QUERY_MEDIA:
- cy_as_hal_assert(cy_false);/* Not used any more. */
- break;
- case CY_RQT_QUERY_BUS:
- cy_as_hal_assert(node->data != 0);
- ret = my_handle_response_storage_query_bus(dev_p,
- rqt, resp, (uint32_t *)node->data);
- break;
- case CY_RQT_QUERY_DEVICE:
- cy_as_hal_assert(node->data != 0);
- ret = my_handle_response_storage_query_device(dev_p,
- rqt, resp, node->data);
- break;
- case CY_RQT_QUERY_UNIT:
- cy_as_hal_assert(node->data != 0);
- ret = my_handle_response_storage_query_unit(dev_p,
- rqt, resp, node->data);
- break;
- case CY_RQT_SD_INTERFACE_CONTROL:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_SD_REGISTER_READ:
- cy_as_hal_assert(node->data != 0);
- ret = my_handle_response_sd_reg_read(dev_p, rqt, resp,
- (cy_as_storage_sd_reg_read_data *)node->data);
- break;
- case CY_RQT_PARTITION_STORAGE:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_PARTITION_ERASE:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_GET_TRANSFER_AMOUNT:
- cy_as_hal_assert(node->data != 0);
- ret = my_handle_response_get_transfer_amount(dev_p,
- rqt, resp, (cy_as_m_s_c_progress_data *)node->data);
- break;
- case CY_RQT_ERASE:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
-
- /* If error = "invalid response", this (very likely)
- * means that we are not using the SD-only firmware
- * module which is the only one supporting storage_erase.
- * in this case force a "non supported" error code */
- if (ret == CY_AS_ERROR_INVALID_RESPONSE)
- ret = CY_AS_ERROR_NOT_SUPPORTED;
-
- break;
-
- default:
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- cy_as_hal_assert(cy_false);
- break;
- }
-
- /*
- * if the low level layer returns a direct error, use the
- * corresponding error code. if not, use the error code
- * based on the response from firmware.
- */
- if (stat == CY_AS_ERROR_SUCCESS)
- stat = ret;
-
- /* Call the user callback, if there is one */
- if (node->cb_p)
- node->cb_p((cy_as_device_handle)dev_p, stat,
- node->client_data, node->data_type, node->data);
- cy_as_remove_c_b_node(dev_p->func_cbs_stor);
-}
-
-
-static void
-cy_as_sdio_sync_reply_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret)
-{
- (void)rqt;
-
- if ((cy_as_ll_request_response__get_code(resp) ==
- CY_RESP_SDIO_GET_TUPLE) ||
- (cy_as_ll_request_response__get_code(resp) ==
- CY_RESP_SDIO_EXT)) {
- ret = cy_as_ll_request_response__get_word(resp, 0);
- if ((ret & 0x00FF) != CY_AS_ERROR_SUCCESS) {
- if (cy_as_ll_request_response__get_code(rqt) ==
- CY_RQT_SDIO_READ_EXTENDED)
- cy_as_dma_cancel(dev_p,
- dev_p->storage_read_endpoint, ret);
- else
- cy_as_dma_cancel(dev_p,
- dev_p->storage_write_endpoint, ret);
- }
- } else {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- }
-
- dev_p->storage_rw_resp_p = resp;
- dev_p->storage_wait = cy_false;
- if (((ret & 0x00FF) == CY_AS_ERROR_IO_ABORTED) || ((ret & 0x00FF)
- == CY_AS_ERROR_IO_SUSPENDED))
- dev_p->storage_error = (ret & 0x00FF);
- else
- dev_p->storage_error = (ret & 0x00FF) ?
- CY_AS_ERROR_INVALID_RESPONSE : CY_AS_ERROR_SUCCESS;
-
- /* Wake any threads/processes that are waiting on
- * the read/write completion. */
- cy_as_hal_wake(&dev_p->context[context]->channel);
-}
-
-cy_as_return_status_t
-cy_as_sdio_device_check(
- cy_as_device *dev_p,
- cy_as_bus_number_t bus,
- uint32_t device)
-{
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (bus < 0 || bus >= CY_AS_MAX_BUSES)
- return CY_AS_ERROR_NO_SUCH_BUS;
-
- if (device >= CY_AS_MAX_STORAGE_DEVICES)
- return CY_AS_ERROR_NO_SUCH_DEVICE;
-
- if (!cy_as_device_is_astoria_dev(dev_p))
- return CY_AS_ERROR_NOT_SUPPORTED;
-
- return (is_storage_active(dev_p));
-}
-
-cy_as_return_status_t
-cy_as_sdio_direct_io(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- uint32_t address,
- uint8_t misc_buf,
- uint16_t argument,
- uint8_t is_write,
- uint8_t *data_p)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint16_t resp_data;
-
- /*
- * sanity checks required before sending the request to the
- * firmware.
- */
- cy_as_device *dev_p = (cy_as_device *)handle;
- ret = cy_as_sdio_device_check(dev_p, bus, device);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
-
- if (!(cy_as_sdio_check_function_initialized(handle,
- bus, n_function_no)))
- return CY_AS_ERROR_INVALID_FUNCTION;
- if (cy_as_sdio_check_function_suspended(handle, bus, n_function_no))
- return CY_AS_ERROR_FUNCTION_SUSPENDED;
-
- req_p = cy_as_ll_create_request(dev_p, (is_write == cy_true) ?
- CY_RQT_SDIO_WRITE_DIRECT : CY_RQT_SDIO_READ_DIRECT,
- CY_RQT_STORAGE_RQT_CONTEXT, 3);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /*Setting up request*/
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, n_function_no));
- /* D1 */
- if (is_write == cy_true) {
- cy_as_ll_request_response__set_word(req_p, 1,
- ((argument<<8) | 0x0080 | (n_function_no<<4) |
- ((misc_buf&CY_SDIO_RAW)<<3) |
- ((misc_buf&CY_SDIO_REARM_INT)>>5) |
- (uint16_t)(address>>15)));
- } else {
- cy_as_ll_request_response__set_word(req_p, 1,
- (n_function_no<<4) | ((misc_buf&CY_SDIO_REARM_INT)>>5) |
- (uint16_t)(address>>15));
- }
- /* D2 */
- cy_as_ll_request_response__set_word(req_p, 2,
- ((uint16_t)((address&0x00007fff)<<1)));
-
- /*Create response*/
- reply_p = cy_as_ll_create_response(dev_p, 2);
-
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- /*Sending the request*/
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /*Check reply type*/
- if (cy_as_ll_request_response__get_code(reply_p) ==
- CY_RESP_SDIO_DIRECT) {
- resp_data = cy_as_ll_request_response__get_word(reply_p, 0);
- if (resp_data >> 8)
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- else if (data_p != 0)
- *(uint8_t *)(data_p) = (uint8_t)(resp_data&0x00ff);
- } else {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- }
-
-destroy:
- if (req_p != 0)
- cy_as_ll_destroy_request(dev_p, req_p);
- if (reply_p != 0)
- cy_as_ll_destroy_response(dev_p, reply_p);
- return ret;
-}
-
-
-cy_as_return_status_t
-cy_as_sdio_direct_read(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- uint32_t address,
- uint8_t misc_buf,
- uint8_t *data_p)
-{
- return cy_as_sdio_direct_io(handle, bus, device, n_function_no,
- address, misc_buf, 0x00, cy_false, data_p);
-}
-EXPORT_SYMBOL(cy_as_sdio_direct_read);
-
-cy_as_return_status_t
-cy_as_sdio_direct_write(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- uint32_t address,
- uint8_t misc_buf,
- uint16_t argument,
- uint8_t *data_p)
-{
- return cy_as_sdio_direct_io(handle, bus, device, n_function_no,
- address, misc_buf, argument, cy_true, data_p);
-}
-EXPORT_SYMBOL(cy_as_sdio_direct_write);
-
-/*Cmd53 IO*/
-cy_as_return_status_t
-cy_as_sdio_extended_i_o(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- uint32_t address,
- uint8_t misc_buf,
- uint16_t argument,
- uint8_t is_write,
- uint8_t *data_p ,
- uint8_t is_resume)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t resp_type;
- uint8_t reqtype;
- uint16_t resp_data;
- cy_as_context *ctxt_p;
- uint32_t dmasize, loopcount = 200;
- cy_as_end_point_number_t ep;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
- ret = cy_as_sdio_device_check(dev_p, bus, device);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (!(cy_as_sdio_check_function_initialized(handle,
- bus, n_function_no)))
- return CY_AS_ERROR_INVALID_FUNCTION;
- if (cy_as_sdio_check_function_suspended(handle, bus, n_function_no))
- return CY_AS_ERROR_FUNCTION_SUSPENDED;
-
-
- if ((cy_as_device_is_storage_async_pending(dev_p)) ||
- (dev_p->storage_wait))
- return CY_AS_ERROR_ASYNC_PENDING;
-
- /* Request for 0 bytes of blocks is returned as a success*/
- if (argument == 0)
- return CY_AS_ERROR_SUCCESS;
-
- /* Initialise the request to send to the West Bridge device. */
- if (is_write == cy_true) {
- reqtype = CY_RQT_SDIO_WRITE_EXTENDED;
- ep = dev_p->storage_write_endpoint;
- } else {
- reqtype = CY_RQT_SDIO_READ_EXTENDED;
- ep = dev_p->storage_read_endpoint;
- }
-
- req_p = dev_p->storage_rw_req_p;
- cy_as_ll_init_request(req_p, reqtype, CY_RQT_STORAGE_RQT_CONTEXT, 3);
-
- /* Initialise the space for reply from the Antioch. */
- reply_p = dev_p->storage_rw_resp_p;
- cy_as_ll_init_response(reply_p, 2);
-
- /* Setup the DMA request */
- if (!(misc_buf&CY_SDIO_BLOCKMODE)) {
- if (argument >
- dev_p->sdiocard[bus].
- function[n_function_no-1].blocksize)
- return CY_AS_ERROR_INVALID_BLOCKSIZE;
-
- } else {
- if (argument > 511)
- return CY_AS_ERROR_INVALID_BLOCKSIZE;
- }
-
- if (argument == 512)
- argument = 0;
-
- dmasize = ((misc_buf&CY_SDIO_BLOCKMODE) != 0) ?
- dev_p->sdiocard[bus].function[n_function_no-1].blocksize
- * argument : argument;
-
- ret = cy_as_dma_queue_request(dev_p, ep, (void *)(data_p),
- dmasize, cy_false, (is_write & cy_true) ? cy_false :
- cy_true, cy_as_sync_storage_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device,
- n_function_no | ((is_resume) ? 0x80 : 0x00)));
- cy_as_ll_request_response__set_word(req_p, 1,
- ((uint16_t)n_function_no)<<12|
- ((uint16_t)(misc_buf & (CY_SDIO_BLOCKMODE|CY_SDIO_OP_INCR)))
- << 9 | (uint16_t)(address >> 7) |
- ((is_write == cy_true) ? 0x8000 : 0x0000));
- cy_as_ll_request_response__set_word(req_p, 2,
- ((uint16_t)(address&0x0000ffff) << 9) | argument);
-
-
- /* Send the request and wait for completion of storage request */
- dev_p->storage_wait = cy_true;
- ret = cy_as_ll_send_request(dev_p, req_p, reply_p,
- cy_true, cy_as_sdio_sync_reply_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_CANCELED);
- } else {
- /* Setup the DMA request */
- ctxt_p = dev_p->context[CY_RQT_STORAGE_RQT_CONTEXT];
- ret = cy_as_dma_drain_queue(dev_p, ep, cy_true);
-
- while (loopcount-- > 0) {
- if (dev_p->storage_wait == cy_false)
- break;
- cy_as_hal_sleep_on(&ctxt_p->channel, 10);
- }
- if (dev_p->storage_wait == cy_true) {
- dev_p->storage_wait = cy_false;
- cy_as_ll_remove_request(dev_p, ctxt_p, req_p, cy_true);
- dev_p->storage_error = CY_AS_ERROR_TIMEOUT;
- }
-
- ret = dev_p->storage_error;
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- resp_type = cy_as_ll_request_response__get_code(
- dev_p->storage_rw_resp_p);
- if (resp_type == CY_RESP_SDIO_EXT) {
- resp_data = cy_as_ll_request_response__get_word
- (reply_p, 0)&0x00ff;
- if (resp_data)
- ret = CY_AS_ERROR_INVALID_REQUEST;
-
- } else {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- }
- }
- return ret;
-
-}
-
-static void
-cy_as_sdio_async_reply_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret)
-{
- cy_as_storage_callback cb_ms;
- uint8_t reqtype;
- uint32_t pendingblocks;
- (void)rqt;
- (void)context;
-
- pendingblocks = 0;
- reqtype = cy_as_ll_request_response__get_code(rqt);
- if (ret == CY_AS_ERROR_SUCCESS) {
- if ((cy_as_ll_request_response__get_code(resp) ==
- CY_RESP_SUCCESS_FAILURE) ||
- (cy_as_ll_request_response__get_code(resp) ==
- CY_RESP_SDIO_EXT)) {
- ret = cy_as_ll_request_response__get_word(resp, 0);
- ret &= 0x00FF;
- } else {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- }
- }
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- if (reqtype == CY_RQT_SDIO_READ_EXTENDED)
- cy_as_dma_cancel(dev_p,
- dev_p->storage_read_endpoint, ret);
- else
- cy_as_dma_cancel(dev_p,
- dev_p->storage_write_endpoint, ret);
-
- dev_p->storage_error = ret;
- }
-
- dev_p->storage_wait = cy_false;
-
- /*
- * if the DMA callback has already been called,
- * the user callback has to be called from here.
- */
- if (!cy_as_device_is_storage_async_pending(dev_p)) {
- cy_as_hal_assert(dev_p->storage_cb_ms != NULL);
- cb_ms = dev_p->storage_cb_ms;
-
- dev_p->storage_cb = 0;
- dev_p->storage_cb_ms = 0;
-
- if ((ret == CY_AS_ERROR_SUCCESS) ||
- (ret == CY_AS_ERROR_IO_ABORTED) ||
- (ret == CY_AS_ERROR_IO_SUSPENDED)) {
- ret = dev_p->storage_error;
- pendingblocks = ((uint32_t)
- cy_as_ll_request_response__get_word
- (resp, 1)) << 16;
- } else
- ret = CY_AS_ERROR_INVALID_RESPONSE;
-
- cb_ms((cy_as_device_handle)dev_p, dev_p->storage_bus_index,
- dev_p->storage_device_index,
- (dev_p->storage_unit | pendingblocks),
- dev_p->storage_block_addr, dev_p->storage_oper, ret);
- } else
- dev_p->storage_error = ret;
-}
-
-
-cy_as_return_status_t
-cy_as_sdio_extended_i_o_async(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- uint32_t address,
- uint8_t misc_buf,
- uint16_t argument,
- uint8_t is_write,
- uint8_t *data_p,
- cy_as_storage_callback callback)
-{
-
- uint32_t mask;
- uint32_t dmasize;
- cy_as_ll_request_response *req_p , *reply_p;
- uint8_t reqtype;
- cy_as_end_point_number_t ep;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- ret = cy_as_sdio_device_check(dev_p, bus, device);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (!(cy_as_sdio_check_function_initialized(handle,
- bus, n_function_no)))
- return CY_AS_ERROR_INVALID_FUNCTION;
- if (cy_as_sdio_check_function_suspended(handle, bus, n_function_no))
- return CY_AS_ERROR_FUNCTION_SUSPENDED;
-
- if (callback == 0)
- return CY_AS_ERROR_NULL_CALLBACK;
-
- /* We are supposed to return success if the number of
- * blocks is zero
- */
- if (((misc_buf&CY_SDIO_BLOCKMODE) != 0) && (argument == 0)) {
- callback(handle, bus, device, n_function_no, address,
- ((is_write) ? cy_as_op_write : cy_as_op_read),
- CY_AS_ERROR_SUCCESS);
- return CY_AS_ERROR_SUCCESS;
- }
-
-
- /*
- * since async operations can be triggered by interrupt
- * code, we must insure that we do not get multiple async
- * operations going at one time and protect this test and
- * set operation from interrupts.
- */
- mask = cy_as_hal_disable_interrupts();
- if ((cy_as_device_is_storage_async_pending(dev_p)) ||
- (dev_p->storage_wait)) {
- cy_as_hal_enable_interrupts(mask);
- return CY_AS_ERROR_ASYNC_PENDING;
- }
- cy_as_device_set_storage_async_pending(dev_p);
- cy_as_hal_enable_interrupts(mask);
-
-
- /*
- * storage information about the currently
- * outstanding request
- */
- dev_p->storage_cb_ms = callback;
- dev_p->storage_bus_index = bus;
- dev_p->storage_device_index = device;
- dev_p->storage_unit = n_function_no;
- dev_p->storage_block_addr = address;
-
- if (is_write == cy_true) {
- reqtype = CY_RQT_SDIO_WRITE_EXTENDED;
- ep = dev_p->storage_write_endpoint;
- } else {
- reqtype = CY_RQT_SDIO_READ_EXTENDED;
- ep = dev_p->storage_read_endpoint;
- }
-
- /* Initialise the request to send to the West Bridge. */
- req_p = dev_p->storage_rw_req_p;
- cy_as_ll_init_request(req_p, reqtype,
- CY_RQT_STORAGE_RQT_CONTEXT, 3);
-
- /* Initialise the space for reply from the West Bridge. */
- reply_p = dev_p->storage_rw_resp_p;
- cy_as_ll_init_response(reply_p, 2);
-
- if (!(misc_buf&CY_SDIO_BLOCKMODE)) {
- if (argument >
- dev_p->sdiocard[bus].function[n_function_no-1].blocksize)
- return CY_AS_ERROR_INVALID_BLOCKSIZE;
-
- } else {
- if (argument > 511)
- return CY_AS_ERROR_INVALID_BLOCKSIZE;
- }
-
- if (argument == 512)
- argument = 0;
- dmasize = ((misc_buf&CY_SDIO_BLOCKMODE) != 0) ?
- dev_p->sdiocard[bus].function[n_function_no-1].blocksize *
- argument : argument;
-
- /* Setup the DMA request and adjust the storage
- * operation if we are reading */
- if (reqtype == CY_RQT_SDIO_READ_EXTENDED) {
- ret = cy_as_dma_queue_request(dev_p, ep,
- (void *)data_p, dmasize , cy_false, cy_true,
- cy_as_async_storage_callback);
- dev_p->storage_oper = cy_as_op_read;
- } else if (reqtype == CY_RQT_SDIO_WRITE_EXTENDED) {
- ret = cy_as_dma_queue_request(dev_p, ep, (void *)data_p,
- dmasize, cy_false, cy_false, cy_as_async_storage_callback);
- dev_p->storage_oper = cy_as_op_write;
- }
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_device_clear_storage_async_pending(dev_p);
- return ret;
- }
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, n_function_no));
- cy_as_ll_request_response__set_word(req_p, 1,
- ((uint16_t)n_function_no) << 12 |
- ((uint16_t)(misc_buf & (CY_SDIO_BLOCKMODE | CY_SDIO_OP_INCR)))
- << 9 | (uint16_t)(address>>7) |
- ((is_write == cy_true) ? 0x8000 : 0x0000));
- cy_as_ll_request_response__set_word(req_p, 2,
- ((uint16_t)(address&0x0000ffff) << 9) | argument);
-
-
- /* Send the request and wait for completion of storage request */
- dev_p->storage_wait = cy_true;
- ret = cy_as_ll_send_request(dev_p, req_p, reply_p, cy_true,
- cy_as_sdio_async_reply_callback);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_CANCELED);
- cy_as_device_clear_storage_async_pending(dev_p);
- } else {
- cy_as_dma_kick_start(dev_p, ep);
- }
-
- return ret;
-}
-
-/* CMD53 Extended Read*/
-cy_as_return_status_t
-cy_as_sdio_extended_read(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- uint32_t address,
- uint8_t misc_buf,
- uint16_t argument,
- uint8_t *data_p,
- cy_as_sdio_callback callback)
-{
- if (callback == 0)
- return cy_as_sdio_extended_i_o(handle, bus, device,
- n_function_no, address, misc_buf, argument,
- cy_false, data_p, 0);
-
- return cy_as_sdio_extended_i_o_async(handle, bus, device,
- n_function_no, address, misc_buf, argument, cy_false,
- data_p, callback);
-}
-EXPORT_SYMBOL(cy_as_sdio_extended_read);
-
-/* CMD53 Extended Write*/
-cy_as_return_status_t
-cy_as_sdio_extended_write(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- uint32_t address,
- uint8_t misc_buf,
- uint16_t argument,
- uint8_t *data_p,
- cy_as_sdio_callback callback)
-{
- if (callback == 0)
- return cy_as_sdio_extended_i_o(handle, bus, device,
- n_function_no, address, misc_buf, argument, cy_true,
- data_p, 0);
-
- return cy_as_sdio_extended_i_o_async(handle, bus, device,
- n_function_no, address, misc_buf, argument, cy_true,
- data_p, callback);
-}
-EXPORT_SYMBOL(cy_as_sdio_extended_write);
-
-/* Read the CIS info tuples for the given function and Tuple ID*/
-cy_as_return_status_t
-cy_as_sdio_get_c_i_s_info(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- uint16_t tuple_id,
- uint8_t *data_p)
-{
-
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint16_t resp_data;
- cy_as_context *ctxt_p;
- uint32_t loopcount = 200;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- ret = cy_as_sdio_device_check(dev_p, bus, device);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (!(cy_as_sdio_check_function_initialized(handle, bus, 0)))
- return CY_AS_ERROR_INVALID_FUNCTION;
-
- if ((cy_as_device_is_storage_async_pending(dev_p)) ||
- (dev_p->storage_wait))
- return CY_AS_ERROR_ASYNC_PENDING;
-
-
- /* Initialise the request to send to the Antioch. */
- req_p = dev_p->storage_rw_req_p;
- cy_as_ll_init_request(req_p, CY_RQT_SDIO_GET_TUPLE,
- CY_RQT_STORAGE_RQT_CONTEXT, 2);
-
- /* Initialise the space for reply from the Antioch. */
- reply_p = dev_p->storage_rw_resp_p;
- cy_as_ll_init_response(reply_p, 3);
-
- /* Setup the DMA request */
- ret = cy_as_dma_queue_request(dev_p, dev_p->storage_read_endpoint,
- data_p+1, 255, cy_false, cy_true, cy_as_sync_storage_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, n_function_no));
-
- /* Set tuple id to fetch. */
- cy_as_ll_request_response__set_word(req_p, 1, tuple_id<<8);
-
- /* Send the request and wait for completion of storage request */
- dev_p->storage_wait = cy_true;
- ret = cy_as_ll_send_request(dev_p, req_p, reply_p, cy_true,
- cy_as_sdio_sync_reply_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_dma_cancel(dev_p,
- dev_p->storage_read_endpoint, CY_AS_ERROR_CANCELED);
- } else {
- /* Setup the DMA request */
- ctxt_p = dev_p->context[CY_RQT_STORAGE_RQT_CONTEXT];
- ret = cy_as_dma_drain_queue(dev_p,
- dev_p->storage_read_endpoint, cy_true);
-
- while (loopcount-- > 0) {
- if (dev_p->storage_wait == cy_false)
- break;
- cy_as_hal_sleep_on(&ctxt_p->channel, 10);
- }
-
- if (dev_p->storage_wait == cy_true) {
- dev_p->storage_wait = cy_false;
- cy_as_ll_remove_request(dev_p, ctxt_p, req_p, cy_true);
- return CY_AS_ERROR_TIMEOUT;
- }
- ret = dev_p->storage_error;
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_ll_request_response__get_code
- (dev_p->storage_rw_resp_p) == CY_RESP_SDIO_GET_TUPLE) {
- resp_data = cy_as_ll_request_response__get_word
- (reply_p, 0);
- if (resp_data) {
- ret = CY_AS_ERROR_INVALID_REQUEST;
- } else if (data_p != 0)
- *(uint8_t *)data_p = (uint8_t)
- (cy_as_ll_request_response__get_word
- (reply_p, 0)&0x00ff);
- } else {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- }
- }
- return ret;
-}
-
-/*Query Device*/
-cy_as_return_status_t
-cy_as_sdio_query_card(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- cy_as_sdio_card *data_p)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- uint8_t resp_type;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- ret = cy_as_sdio_device_check(dev_p, bus, device);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /* Allocating memory to the SDIO device structure in dev_p */
-
- cy_as_hal_mem_set(&dev_p->sdiocard[bus], 0, sizeof(cy_as_sdio_device));
-
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_SDIO_QUERY_CARD,
- CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, 0));
-
- reply_p = cy_as_ll_create_response(dev_p, 5);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- resp_type = cy_as_ll_request_response__get_code(reply_p);
- if (resp_type == CY_RESP_SDIO_QUERY_CARD) {
- dev_p->sdiocard[bus].card.num_functions =
- (uint8_t)((reply_p->data[0]&0xff00)>>8);
- dev_p->sdiocard[bus].card.memory_present =
- (uint8_t)reply_p->data[0]&0x0001;
- dev_p->sdiocard[bus].card.manufacturer__id =
- reply_p->data[1];
- dev_p->sdiocard[bus].card.manufacturer_info =
- reply_p->data[2];
- dev_p->sdiocard[bus].card.blocksize =
- reply_p->data[3];
- dev_p->sdiocard[bus].card.maxblocksize =
- reply_p->data[3];
- dev_p->sdiocard[bus].card.card_capability =
- (uint8_t)((reply_p->data[4]&0xff00)>>8);
- dev_p->sdiocard[bus].card.sdio_version =
- (uint8_t)(reply_p->data[4]&0x00ff);
- dev_p->sdiocard[bus].function_init_map = 0x01;
- data_p->num_functions =
- dev_p->sdiocard[bus].card.num_functions;
- data_p->memory_present =
- dev_p->sdiocard[bus].card.memory_present;
- data_p->manufacturer__id =
- dev_p->sdiocard[bus].card.manufacturer__id;
- data_p->manufacturer_info =
- dev_p->sdiocard[bus].card.manufacturer_info;
- data_p->blocksize = dev_p->sdiocard[bus].card.blocksize;
- data_p->maxblocksize =
- dev_p->sdiocard[bus].card.maxblocksize;
- data_p->card_capability =
- dev_p->sdiocard[bus].card.card_capability;
- data_p->sdio_version =
- dev_p->sdiocard[bus].card.sdio_version;
- } else {
- if (resp_type == CY_RESP_SUCCESS_FAILURE)
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- else
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- }
-destroy:
- if (req_p != 0)
- cy_as_ll_destroy_request(dev_p, req_p);
- if (reply_p != 0)
- cy_as_ll_destroy_response(dev_p, reply_p);
- return ret;
-}
-EXPORT_SYMBOL(cy_as_sdio_query_card);
-
-/*Reset SDIO card. */
-cy_as_return_status_t
-cy_as_sdio_reset_card(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device)
-{
-
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t resp_type;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- ret = cy_as_sdio_device_check(dev_p, bus, device);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (dev_p->sdiocard != 0) {
- dev_p->sdiocard[bus].function_init_map = 0;
- dev_p->sdiocard[bus].function_suspended_map = 0;
- }
-
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_SDIO_RESET_DEV,
- CY_RQT_STORAGE_RQT_CONTEXT, 1);
-
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /*Setup mailbox */
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, 0));
-
- reply_p = cy_as_ll_create_response(dev_p, 2);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- resp_type = cy_as_ll_request_response__get_code(reply_p);
-
- if (resp_type == CY_RESP_SUCCESS_FAILURE) {
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- if (ret == CY_AS_ERROR_SUCCESS)
- ret = cy_as_sdio_query_card(handle, bus, device, 0);
- } else
- ret = CY_AS_ERROR_INVALID_RESPONSE;
-
-destroy:
- if (req_p != 0)
- cy_as_ll_destroy_request(dev_p, req_p);
- if (reply_p != 0)
- cy_as_ll_destroy_response(dev_p, reply_p);
- return ret;
-}
-
-/* Initialise an IO function*/
-cy_as_return_status_t
-cy_as_sdio_init_function(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- uint8_t misc_buf)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t resp_type;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- ret = cy_as_sdio_device_check(dev_p, bus, device);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (!(cy_as_sdio_check_function_initialized
- (handle, bus, 0)))
- return CY_AS_ERROR_NOT_RUNNING;
-
- if ((cy_as_sdio_check_function_initialized
- (handle, bus, n_function_no))) {
- if (misc_buf&CY_SDIO_FORCE_INIT)
- dev_p->sdiocard[bus].function_init_map &=
- (~(1 << n_function_no));
- else
- return CY_AS_ERROR_ALREADY_RUNNING;
- }
-
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_SDIO_INIT_FUNCTION, CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, n_function_no));
-
- reply_p = cy_as_ll_create_response(dev_p, 5);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- resp_type = cy_as_ll_request_response__get_code(reply_p);
-
- if (resp_type == CY_RESP_SDIO_INIT_FUNCTION) {
- dev_p->sdiocard[bus].function[n_function_no-1].function_code =
- (uint8_t)((reply_p->data[0]&0xff00)>>8);
- dev_p->sdiocard[bus].function[n_function_no-1].
- extended_func_code = (uint8_t)reply_p->data[0]&0x00ff;
- dev_p->sdiocard[bus].function[n_function_no-1].blocksize =
- reply_p->data[1];
- dev_p->sdiocard[bus].function[n_function_no-1].
- maxblocksize = reply_p->data[1];
- dev_p->sdiocard[bus].function[n_function_no-1].card_psn =
- (uint32_t)(reply_p->data[2])<<16;
- dev_p->sdiocard[bus].function[n_function_no-1].card_psn |=
- (uint32_t)(reply_p->data[3]);
- dev_p->sdiocard[bus].function[n_function_no-1].csa_bits =
- (uint8_t)((reply_p->data[4]&0xff00)>>8);
- dev_p->sdiocard[bus].function[n_function_no-1].wakeup_support =
- (uint8_t)(reply_p->data[4]&0x0001);
- dev_p->sdiocard[bus].function_init_map |= (1 << n_function_no);
- cy_as_sdio_clear_function_suspended(handle, bus, n_function_no);
-
- } else {
- if (resp_type == CY_RESP_SUCCESS_FAILURE)
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- else
- ret = CY_AS_ERROR_INVALID_FUNCTION;
- }
-
-destroy:
- if (req_p != 0)
- cy_as_ll_destroy_request(dev_p, req_p);
- if (reply_p != 0)
- cy_as_ll_destroy_response(dev_p, reply_p);
- return ret;
-}
-EXPORT_SYMBOL(cy_as_sdio_init_function);
-
-/*Query individual functions. */
-cy_as_return_status_t
-cy_as_sdio_query_function(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- cy_as_sdio_func *data_p)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- cy_as_return_status_t ret;
-
- ret = cy_as_sdio_device_check(dev_p, bus, device);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (!(cy_as_sdio_check_function_initialized(handle,
- bus, n_function_no)))
- return CY_AS_ERROR_INVALID_FUNCTION;
-
- data_p->blocksize =
- dev_p->sdiocard[bus].function[n_function_no-1].blocksize;
- data_p->card_psn =
- dev_p->sdiocard[bus].function[n_function_no-1].card_psn;
- data_p->csa_bits =
- dev_p->sdiocard[bus].function[n_function_no-1].csa_bits;
- data_p->extended_func_code =
- dev_p->sdiocard[bus].function[n_function_no-1].
- extended_func_code;
- data_p->function_code =
- dev_p->sdiocard[bus].function[n_function_no-1].function_code;
- data_p->maxblocksize =
- dev_p->sdiocard[bus].function[n_function_no-1].maxblocksize;
- data_p->wakeup_support =
- dev_p->sdiocard[bus].function[n_function_no-1].wakeup_support;
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-/* Abort the Current Extended IO Operation*/
-cy_as_return_status_t
-cy_as_sdio_abort_function(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t resp_type;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- ret = cy_as_sdio_device_check(dev_p, bus, device);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (!(cy_as_sdio_check_function_initialized(handle,
- bus, n_function_no)))
- return CY_AS_ERROR_INVALID_FUNCTION;
-
- if ((cy_as_device_is_storage_async_pending(dev_p)) ||
- (dev_p->storage_wait)) {
- if (!(cy_as_sdio_get_card_capability(handle, bus) &
- CY_SDIO_SDC))
- return CY_AS_ERROR_INVALID_COMMAND;
- }
-
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_SDIO_ABORT_IO,
- CY_RQT_GENERAL_RQT_CONTEXT, 1);
-
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /*Setup mailbox */
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, n_function_no));
-
- reply_p = cy_as_ll_create_response(dev_p, 2);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- resp_type = cy_as_ll_request_response__get_code(reply_p);
-
- if (resp_type == CY_RESP_SUCCESS_FAILURE)
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- else
- ret = CY_AS_ERROR_INVALID_RESPONSE;
-
-
-destroy:
- if (req_p != 0)
- cy_as_ll_destroy_request(dev_p, req_p);
- if (reply_p != 0)
- cy_as_ll_destroy_response(dev_p, reply_p);
- return ret;
-}
-
-/* Suspend IO to current function*/
-cy_as_return_status_t
-cy_as_sdio_suspend(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- ret = cy_as_sdio_device_check(dev_p, bus, device);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (!(cy_as_sdio_check_function_initialized(handle, bus,
- n_function_no)))
- return CY_AS_ERROR_INVALID_FUNCTION;
- if (!(cy_as_sdio_check_support_bus_suspend(handle, bus)))
- return CY_AS_ERROR_INVALID_FUNCTION;
- if (!(cy_as_sdio_get_card_capability(handle, bus) & CY_SDIO_SDC))
- return CY_AS_ERROR_INVALID_FUNCTION;
- if (cy_as_sdio_check_function_suspended(handle, bus, n_function_no))
- return CY_AS_ERROR_FUNCTION_SUSPENDED;
-
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_SDIO_SUSPEND, CY_RQT_GENERAL_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /*Setup mailbox */
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, n_function_no));
-
- reply_p = cy_as_ll_create_response(dev_p, 2);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- ret = cy_as_ll_request_response__get_code(reply_p);
- cy_as_sdio_set_function_suspended(handle, bus, n_function_no);
- }
-
- if (req_p != 0)
- cy_as_ll_destroy_request(dev_p, req_p);
- if (reply_p != 0)
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-/*Resume suspended function*/
-cy_as_return_status_t
-cy_as_sdio_resume(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- cy_as_oper_type op,
- uint8_t misc_buf,
- uint16_t pendingblockcount,
- uint8_t *data_p
- )
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t resp_data, ret = CY_AS_ERROR_SUCCESS;
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- ret = cy_as_sdio_device_check(dev_p, bus, device);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (!(cy_as_sdio_check_function_initialized
- (handle, bus, n_function_no)))
- return CY_AS_ERROR_INVALID_FUNCTION;
-
- /* If suspend resume is not supported return */
- if (!(cy_as_sdio_check_support_bus_suspend(handle, bus)))
- return CY_AS_ERROR_INVALID_FUNCTION;
-
- /* if the function is not suspended return. */
- if (!(cy_as_sdio_check_function_suspended
- (handle, bus, n_function_no)))
- return CY_AS_ERROR_INVALID_FUNCTION;
-
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_SDIO_RESUME, CY_RQT_STORAGE_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /*Setup mailbox */
- cy_as_ll_request_response__set_word(req_p, 0,
- create_address(bus, (uint8_t)device, n_function_no));
-
- reply_p = cy_as_ll_create_response(dev_p, 2);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) ==
- CY_RESP_SDIO_RESUME) {
- resp_data = cy_as_ll_request_response__get_word(reply_p, 0);
- if (resp_data & 0x00ff) {
- /* Send extended read request to resume the read. */
- if (op == cy_as_op_read) {
- ret = cy_as_sdio_extended_i_o(handle, bus,
- device, n_function_no, 0, misc_buf,
- pendingblockcount, cy_false, data_p, 1);
- } else {
- ret = cy_as_sdio_extended_i_o(handle, bus,
- device, n_function_no, 0, misc_buf,
- pendingblockcount, cy_true, data_p, 1);
- }
- } else {
- ret = CY_AS_ERROR_SUCCESS;
- }
- } else {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- }
-
-destroy:
- cy_as_sdio_clear_function_suspended(handle, bus, n_function_no);
- if (req_p != 0)
- cy_as_ll_destroy_request(dev_p, req_p);
- if (reply_p != 0)
- cy_as_ll_destroy_response(dev_p, reply_p);
- return ret;
-
-}
-
-/*Set function blocksize. Size cannot exceed max
- * block size for the function*/
-cy_as_return_status_t
-cy_as_sdio_set_blocksize(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no,
- uint16_t blocksize)
-{
- cy_as_return_status_t ret;
- cy_as_device *dev_p = (cy_as_device *)handle;
- ret = cy_as_sdio_device_check(dev_p, bus, device);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (!(cy_as_sdio_check_function_initialized
- (handle, bus, n_function_no)))
- return CY_AS_ERROR_INVALID_FUNCTION;
- if (n_function_no == 0) {
- if (blocksize > cy_as_sdio_get_card_max_blocksize(handle, bus))
- return CY_AS_ERROR_INVALID_BLOCKSIZE;
- else if (blocksize == cy_as_sdio_get_card_blocksize
- (handle, bus))
- return CY_AS_ERROR_SUCCESS;
- } else {
- if (blocksize >
- cy_as_sdio_get_function_max_blocksize(handle,
- bus, n_function_no))
- return CY_AS_ERROR_INVALID_BLOCKSIZE;
- else if (blocksize ==
- cy_as_sdio_get_function_blocksize(handle,
- bus, n_function_no))
- return CY_AS_ERROR_SUCCESS;
- }
-
- ret = cy_as_sdio_direct_write(handle, bus, device, 0,
- (uint16_t)(n_function_no << 8) |
- 0x10, 0, blocksize & 0x00ff, 0);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- ret = cy_as_sdio_direct_write(handle, bus, device, 0,
- (uint16_t)(n_function_no << 8) |
- 0x11, 0, (blocksize & 0xff00) >> 8, 0);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (n_function_no == 0)
- cy_as_sdio_set_card_block_size(handle, bus, blocksize);
- else
- cy_as_sdio_set_function_block_size(handle,
- bus, n_function_no, blocksize);
- return ret;
-}
-EXPORT_SYMBOL(cy_as_sdio_set_blocksize);
-
-/* Deinitialize an SDIO function*/
-cy_as_return_status_t
-cy_as_sdio_de_init_function(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- uint8_t n_function_no)
-{
- cy_as_return_status_t ret;
- uint8_t temp;
-
- if (n_function_no == 0)
- return CY_AS_ERROR_INVALID_FUNCTION;
-
- ret = cy_as_sdio_device_check((cy_as_device *)handle, bus, device);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (!(cy_as_sdio_check_function_initialized
- (handle, bus, n_function_no)))
- return CY_AS_ERROR_SUCCESS;
-
- temp = (uint8_t)(((cy_as_device *)handle)->sdiocard[bus].
- function_init_map & (~(1 << n_function_no)));
-
- cy_as_sdio_direct_write(handle, bus, device, 0, 0x02, 0, temp, 0);
-
- ((cy_as_device *)handle)->sdiocard[bus].function_init_map &=
- (~(1 << n_function_no));
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-
-/*[]*/
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasusb.c b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
deleted file mode 100644
index 1b55e611191..00000000000
--- a/drivers/staging/westbridge/astoria/api/src/cyasusb.c
+++ /dev/null
@@ -1,3740 +0,0 @@
-/* Cypress West Bridge API source file (cyasusb.c)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#include "../../include/linux/westbridge/cyashal.h"
-#include "../../include/linux/westbridge/cyasusb.h"
-#include "../../include/linux/westbridge/cyaserr.h"
-#include "../../include/linux/westbridge/cyasdma.h"
-#include "../../include/linux/westbridge/cyaslowlevel.h"
-#include "../../include/linux/westbridge/cyaslep2pep.h"
-#include "../../include/linux/westbridge/cyasregs.h"
-#include "../../include/linux/westbridge/cyasstorage.h"
-
-static cy_as_return_status_t
-cy_as_usb_ack_setup_packet(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-static void
-cy_as_usb_func_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret);
-/*
-* Reset the USB EP0 state
-*/
-static void
-cy_as_usb_reset_e_p0_state(cy_as_device *dev_p)
-{
- cy_as_log_debug_message(6, "cy_as_usb_reset_e_p0_state called");
-
- cy_as_device_clear_ack_delayed(dev_p);
- cy_as_device_clear_setup_packet(dev_p);
- if (cy_as_device_is_usb_async_pending(dev_p, 0))
- cy_as_usb_cancel_async((cy_as_device_handle)dev_p, 0);
-
- dev_p->usb_pending_buffer = 0;
-}
-
-/*
-* External function to map logical endpoints to physical endpoints
-*/
-static cy_as_return_status_t
-is_usb_active(cy_as_device *dev_p)
-{
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- if (dev_p->usb_count == 0)
- return CY_AS_ERROR_NOT_RUNNING;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-static void
-usb_ack_callback(cy_as_device_handle h,
- cy_as_return_status_t status,
- uint32_t client,
- cy_as_funct_c_b_type type,
- void *data)
-{
- cy_as_device *dev_p = (cy_as_device *)h;
-
- (void)client;
- (void)status;
- (void)data;
-
- cy_as_hal_assert(type == CY_FUNCT_CB_NODATA);
-
- if (dev_p->usb_pending_buffer) {
- cy_as_usb_io_callback cb;
-
- cb = dev_p->usb_cb[0];
- dev_p->usb_cb[0] = 0;
- cy_as_device_clear_usb_async_pending(dev_p, 0);
- if (cb)
- cb(h, 0, dev_p->usb_pending_size,
- dev_p->usb_pending_buffer, dev_p->usb_error);
-
- dev_p->usb_pending_buffer = 0;
- }
-
- cy_as_device_clear_setup_packet(dev_p);
-}
-
-static void
-my_usb_request_callback_usb_event(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p)
-{
- uint16_t ev;
- uint16_t val;
- cy_as_device_handle h = (cy_as_device_handle)dev_p;
-
- ev = cy_as_ll_request_response__get_word(req_p, 0);
- switch (ev) {
- case 0: /* Reserved */
- cy_as_ll_send_status_response(dev_p, CY_RQT_USB_RQT_CONTEXT,
- CY_AS_ERROR_INVALID_REQUEST, 0);
- break;
-
- case 1: /* Reserved */
- cy_as_ll_send_status_response(dev_p, CY_RQT_USB_RQT_CONTEXT,
- CY_AS_ERROR_INVALID_REQUEST, 0);
- break;
-
- case 2: /* USB Suspend */
- dev_p->usb_last_event = cy_as_event_usb_suspend;
- if (dev_p->usb_event_cb_ms)
- dev_p->usb_event_cb_ms(h, cy_as_event_usb_suspend, 0);
- else if (dev_p->usb_event_cb)
- dev_p->usb_event_cb(h, cy_as_event_usb_suspend, 0);
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
- break;
-
- case 3: /* USB Resume */
- dev_p->usb_last_event = cy_as_event_usb_resume;
- if (dev_p->usb_event_cb_ms)
- dev_p->usb_event_cb_ms(h, cy_as_event_usb_resume, 0);
- else if (dev_p->usb_event_cb)
- dev_p->usb_event_cb(h, cy_as_event_usb_resume, 0);
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
- break;
-
- case 4: /* USB Reset */
- /*
- * if we get a USB reset, the USB host did not understand
- * our response or we timed out for some reason. reset
- * our internal state to be ready for another set of
- * enumeration based requests.
- */
- if (cy_as_device_is_ack_delayed(dev_p))
- cy_as_usb_reset_e_p0_state(dev_p);
-
- dev_p->usb_last_event = cy_as_event_usb_reset;
- if (dev_p->usb_event_cb_ms)
- dev_p->usb_event_cb_ms(h, cy_as_event_usb_reset, 0);
- else if (dev_p->usb_event_cb)
- dev_p->usb_event_cb(h, cy_as_event_usb_reset, 0);
-
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
- cy_as_device_clear_usb_high_speed(dev_p);
- cy_as_usb_set_dma_sizes(dev_p);
- dev_p->usb_max_tx_size = 0x40;
- cy_as_dma_set_max_dma_size(dev_p, 0x06, 0x40);
- break;
-
- case 5: /* USB Set Configuration */
- /* The configuration to set */
- val = cy_as_ll_request_response__get_word(req_p, 1);
- dev_p->usb_last_event = cy_as_event_usb_set_config;
- if (dev_p->usb_event_cb_ms)
- dev_p->usb_event_cb_ms(h,
- cy_as_event_usb_set_config, &val);
- else if (dev_p->usb_event_cb)
- dev_p->usb_event_cb(h,
- cy_as_event_usb_set_config, &val);
-
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
- break;
-
- case 6: /* USB Speed change */
- /* Connect speed */
- val = cy_as_ll_request_response__get_word(req_p, 1);
- dev_p->usb_last_event = cy_as_event_usb_speed_change;
- if (dev_p->usb_event_cb_ms)
- dev_p->usb_event_cb_ms(h,
- cy_as_event_usb_speed_change, &val);
- else if (dev_p->usb_event_cb)
- dev_p->usb_event_cb(h,
- cy_as_event_usb_speed_change, &val);
-
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
- cy_as_device_set_usb_high_speed(dev_p);
- cy_as_usb_set_dma_sizes(dev_p);
- dev_p->usb_max_tx_size = 0x200;
- cy_as_dma_set_max_dma_size(dev_p, 0x06, 0x200);
- break;
-
- case 7: /* USB Clear Feature */
- /* EP Number */
- val = cy_as_ll_request_response__get_word(req_p, 1);
- if (dev_p->usb_event_cb_ms)
- dev_p->usb_event_cb_ms(h,
- cy_as_event_usb_clear_feature, &val);
- if (dev_p->usb_event_cb)
- dev_p->usb_event_cb(h,
- cy_as_event_usb_clear_feature, &val);
-
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
- break;
-
- default:
- cy_as_hal_print_message("invalid event type\n");
- cy_as_ll_send_data_response(dev_p, CY_RQT_USB_RQT_CONTEXT,
- CY_RESP_USB_INVALID_EVENT, sizeof(ev), &ev);
- break;
- }
-}
-
-static void
-my_usb_request_callback_usb_data(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p)
-{
- cy_as_end_point_number_t ep;
- uint8_t type;
- uint16_t len;
- uint16_t val;
- cy_as_device_handle h = (cy_as_device_handle)dev_p;
-
- val = cy_as_ll_request_response__get_word(req_p, 0);
- ep = (cy_as_end_point_number_t)((val >> 13) & 0x01);
- len = (val & 0x1ff);
-
- cy_as_hal_assert(len <= 64);
- cy_as_ll_request_response__unpack(req_p,
- 1, len, dev_p->usb_ep_data);
-
- type = (uint8_t)((val >> 14) & 0x03);
- if (type == 0) {
- if (cy_as_device_is_ack_delayed(dev_p)) {
- /*
- * A setup packet has arrived while we are
- * processing a previous setup packet. reset
- * our state with respect to EP0 to be ready
- * to process the new packet.
- */
- cy_as_usb_reset_e_p0_state(dev_p);
- }
-
- if (len != 8)
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT,
- CY_AS_ERROR_INVALID_REQUEST, 0);
- else {
- cy_as_device_clear_ep0_stalled(dev_p);
- cy_as_device_set_setup_packet(dev_p);
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT,
- CY_AS_ERROR_SUCCESS, 0);
-
- if (dev_p->usb_event_cb_ms)
- dev_p->usb_event_cb_ms(h,
- cy_as_event_usb_setup_packet,
- dev_p->usb_ep_data);
- else
- dev_p->usb_event_cb(h,
- cy_as_event_usb_setup_packet,
- dev_p->usb_ep_data);
-
- if ((!cy_as_device_is_ack_delayed(dev_p)) &&
- (!cy_as_device_is_ep0_stalled(dev_p)))
- cy_as_usb_ack_setup_packet(h,
- usb_ack_callback, 0);
- }
- } else if (type == 2) {
- if (len != 0)
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT,
- CY_AS_ERROR_INVALID_REQUEST, 0);
- else {
- if (dev_p->usb_event_cb_ms)
- dev_p->usb_event_cb_ms(h,
- cy_as_event_usb_status_packet, 0);
- else
- dev_p->usb_event_cb(h,
- cy_as_event_usb_status_packet, 0);
-
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT,
- CY_AS_ERROR_SUCCESS, 0);
- }
- } else if (type == 1) {
- /*
- * we need to hand the data associated with these
- * endpoints to the DMA module.
- */
- cy_as_dma_received_data(dev_p, ep, len, dev_p->usb_ep_data);
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
- }
-}
-
-static void
-my_usb_request_callback_inquiry(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p)
-{
- cy_as_usb_inquiry_data_dep cbdata;
- cy_as_usb_inquiry_data cbdata_ms;
- void *data;
- uint16_t val;
- cy_as_device_handle h = (cy_as_device_handle)dev_p;
- uint8_t def_inq_data[64];
- uint8_t evpd;
- uint8_t codepage;
- cy_bool updated;
- uint16_t length;
-
- cy_as_bus_number_t bus;
- uint32_t device;
- cy_as_media_type media;
-
- val = cy_as_ll_request_response__get_word(req_p, 0);
- bus = cy_as_storage_get_bus_from_address(val);
- device = cy_as_storage_get_device_from_address(val);
- media = cy_as_storage_get_media_from_address(val);
-
- val = cy_as_ll_request_response__get_word(req_p, 1);
- evpd = (uint8_t)((val >> 8) & 0x01);
- codepage = (uint8_t)(val & 0xff);
-
- length = cy_as_ll_request_response__get_word(req_p, 2);
- data = (void *)def_inq_data;
-
- updated = cy_false;
-
- if (dev_p->usb_event_cb_ms) {
- cbdata_ms.bus = bus;
- cbdata_ms.device = device;
- cbdata_ms.updated = updated;
- cbdata_ms.evpd = evpd;
- cbdata_ms.codepage = codepage;
- cbdata_ms.length = length;
- cbdata_ms.data = data;
-
- cy_as_hal_assert(cbdata_ms.length <= sizeof(def_inq_data));
- cy_as_ll_request_response__unpack(req_p,
- 3, cbdata_ms.length, cbdata_ms.data);
-
- dev_p->usb_event_cb_ms(h,
- cy_as_event_usb_inquiry_before, &cbdata_ms);
-
- updated = cbdata_ms.updated;
- data = cbdata_ms.data;
- length = cbdata_ms.length;
- } else if (dev_p->usb_event_cb) {
- cbdata.media = media;
- cbdata.updated = updated;
- cbdata.evpd = evpd;
- cbdata.codepage = codepage;
- cbdata.length = length;
- cbdata.data = data;
-
- cy_as_hal_assert(cbdata.length <=
- sizeof(def_inq_data));
- cy_as_ll_request_response__unpack(req_p, 3,
- cbdata.length, cbdata.data);
-
- dev_p->usb_event_cb(h,
- cy_as_event_usb_inquiry_before, &cbdata);
-
- updated = cbdata.updated;
- data = cbdata.data;
- length = cbdata.length;
- }
-
- if (updated && length > 192)
- cy_as_hal_print_message("an inquiry result from a "
- "cy_as_event_usb_inquiry_before event "
- "was greater than 192 bytes.");
-
- /* Now send the reply with the data back
- * to the West Bridge device */
- if (updated && length <= 192) {
- /*
- * the callback function modified the inquiry
- * data, ship the data back to the west bridge firmware.
- */
- cy_as_ll_send_data_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT,
- CY_RESP_INQUIRY_DATA, length, data);
- } else {
- /*
- * the callback did not modify the data, just acknowledge
- * that we processed the request
- */
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 1);
- }
-
- if (dev_p->usb_event_cb_ms)
- dev_p->usb_event_cb_ms(h,
- cy_as_event_usb_inquiry_after, &cbdata_ms);
- else if (dev_p->usb_event_cb)
- dev_p->usb_event_cb(h,
- cy_as_event_usb_inquiry_after, &cbdata);
-}
-
-static void
-my_usb_request_callback_start_stop(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p)
-{
- cy_as_bus_number_t bus;
- cy_as_media_type media;
- uint32_t device;
- uint16_t val;
-
- if (dev_p->usb_event_cb_ms || dev_p->usb_event_cb) {
- cy_bool loej;
- cy_bool start;
- cy_as_device_handle h = (cy_as_device_handle)dev_p;
-
- val = cy_as_ll_request_response__get_word(req_p, 0);
- bus = cy_as_storage_get_bus_from_address(val);
- device = cy_as_storage_get_device_from_address(val);
- media = cy_as_storage_get_media_from_address(val);
-
- val = cy_as_ll_request_response__get_word(req_p, 1);
- loej = (val & 0x02) ? cy_true : cy_false;
- start = (val & 0x01) ? cy_true : cy_false;
-
- if (dev_p->usb_event_cb_ms) {
- cy_as_usb_start_stop_data cbdata_ms;
-
- cbdata_ms.bus = bus;
- cbdata_ms.device = device;
- cbdata_ms.loej = loej;
- cbdata_ms.start = start;
- dev_p->usb_event_cb_ms(h,
- cy_as_event_usb_start_stop, &cbdata_ms);
-
- } else if (dev_p->usb_event_cb) {
- cy_as_usb_start_stop_data_dep cbdata;
-
- cbdata.media = media;
- cbdata.loej = loej;
- cbdata.start = start;
- dev_p->usb_event_cb(h,
- cy_as_event_usb_start_stop, &cbdata);
- }
- }
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 1);
-}
-
-static void
-my_usb_request_callback_uknown_c_b_w(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p)
-{
- uint16_t val;
- cy_as_device_handle h = (cy_as_device_handle)dev_p;
- uint8_t buf[16];
-
- uint8_t response[4];
- uint16_t reqlen;
- void *request;
- uint8_t status;
- uint8_t key;
- uint8_t asc;
- uint8_t ascq;
-
- val = cy_as_ll_request_response__get_word(req_p, 0);
- /* Failed by default */
- status = 1;
- /* Invalid command */
- key = 0x05;
- /* Invalid command */
- asc = 0x20;
- /* Invalid command */
- ascq = 0x00;
- reqlen = cy_as_ll_request_response__get_word(req_p, 1);
- request = buf;
-
- cy_as_hal_assert(reqlen <= sizeof(buf));
- cy_as_ll_request_response__unpack(req_p, 2, reqlen, request);
-
- if (dev_p->usb_event_cb_ms) {
- cy_as_usb_unknown_command_data cbdata_ms;
- cbdata_ms.bus = cy_as_storage_get_bus_from_address(val);
- cbdata_ms.device =
- cy_as_storage_get_device_from_address(val);
- cbdata_ms.reqlen = reqlen;
- cbdata_ms.request = request;
- cbdata_ms.status = status;
- cbdata_ms.key = key;
- cbdata_ms.asc = asc;
- cbdata_ms.ascq = ascq;
-
- dev_p->usb_event_cb_ms(h,
- cy_as_event_usb_unknown_storage, &cbdata_ms);
- status = cbdata_ms.status;
- key = cbdata_ms.key;
- asc = cbdata_ms.asc;
- ascq = cbdata_ms.ascq;
- } else if (dev_p->usb_event_cb) {
- cy_as_usb_unknown_command_data_dep cbdata;
- cbdata.media =
- cy_as_storage_get_media_from_address(val);
- cbdata.reqlen = reqlen;
- cbdata.request = request;
- cbdata.status = status;
- cbdata.key = key;
- cbdata.asc = asc;
- cbdata.ascq = ascq;
-
- dev_p->usb_event_cb(h,
- cy_as_event_usb_unknown_storage, &cbdata);
- status = cbdata.status;
- key = cbdata.key;
- asc = cbdata.asc;
- ascq = cbdata.ascq;
- }
-
- response[0] = status;
- response[1] = key;
- response[2] = asc;
- response[3] = ascq;
- cy_as_ll_send_data_response(dev_p, CY_RQT_USB_RQT_CONTEXT,
- CY_RESP_UNKNOWN_SCSI_COMMAND, sizeof(response), response);
-}
-
-static void
-my_usb_request_callback_m_s_c_progress(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p)
-{
- uint16_t val1, val2;
- cy_as_device_handle h = (cy_as_device_handle)dev_p;
-
- if ((dev_p->usb_event_cb) || (dev_p->usb_event_cb_ms)) {
- cy_as_m_s_c_progress_data cbdata;
-
- val1 = cy_as_ll_request_response__get_word(req_p, 0);
- val2 = cy_as_ll_request_response__get_word(req_p, 1);
- cbdata.wr_count = (uint32_t)((val1 << 16) | val2);
-
- val1 = cy_as_ll_request_response__get_word(req_p, 2);
- val2 = cy_as_ll_request_response__get_word(req_p, 3);
- cbdata.rd_count = (uint32_t)((val1 << 16) | val2);
-
- if (dev_p->usb_event_cb)
- dev_p->usb_event_cb(h,
- cy_as_event_usb_m_s_c_progress, &cbdata);
- else
- dev_p->usb_event_cb_ms(h,
- cy_as_event_usb_m_s_c_progress, &cbdata);
- }
-
- cy_as_ll_send_status_response(dev_p,
- CY_RQT_USB_RQT_CONTEXT, CY_AS_ERROR_SUCCESS, 0);
-}
-
-/*
-* This function processes the requests delivered from the
-* firmware within the West Bridge device that are delivered
-* in the USB context. These requests generally are EP0 and
-* EP1 related requests or USB events.
-*/
-static void
-my_usb_request_callback(cy_as_device *dev_p, uint8_t context,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *resp_p,
- cy_as_return_status_t ret)
-{
- uint16_t val;
- uint8_t code = cy_as_ll_request_response__get_code(req_p);
-
- (void)resp_p;
- (void)context;
- (void)ret;
-
- switch (code) {
- case CY_RQT_USB_EVENT:
- my_usb_request_callback_usb_event(dev_p, req_p);
- break;
-
- case CY_RQT_USB_EP_DATA:
- dev_p->usb_last_event = cy_as_event_usb_setup_packet;
- my_usb_request_callback_usb_data(dev_p, req_p);
- break;
-
- case CY_RQT_SCSI_INQUIRY_COMMAND:
- dev_p->usb_last_event = cy_as_event_usb_inquiry_after;
- my_usb_request_callback_inquiry(dev_p, req_p);
- break;
-
- case CY_RQT_SCSI_START_STOP_COMMAND:
- dev_p->usb_last_event = cy_as_event_usb_start_stop;
- my_usb_request_callback_start_stop(dev_p, req_p);
- break;
-
- case CY_RQT_SCSI_UNKNOWN_COMMAND:
- dev_p->usb_last_event = cy_as_event_usb_unknown_storage;
- my_usb_request_callback_uknown_c_b_w(dev_p, req_p);
- break;
-
- case CY_RQT_USB_ACTIVITY_UPDATE:
- dev_p->usb_last_event = cy_as_event_usb_m_s_c_progress;
- my_usb_request_callback_m_s_c_progress(dev_p, req_p);
- break;
-
- default:
- cy_as_hal_print_message("invalid request "
- "received on USB context\n");
- val = req_p->box0;
- cy_as_ll_send_data_response(dev_p, CY_RQT_USB_RQT_CONTEXT,
- CY_RESP_INVALID_REQUEST, sizeof(val), &val);
- break;
- }
-}
-
-static cy_as_return_status_t
-my_handle_response_usb_start(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_return_status_t ret)
-{
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /*
- * mark EP 0 and EP1 as 64 byte endpoints
- */
- cy_as_dma_set_max_dma_size(dev_p, 0, 64);
- cy_as_dma_set_max_dma_size(dev_p, 1, 64);
-
- dev_p->usb_count++;
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_destroy_c_b_queue(dev_p->usb_func_cbs);
- cy_as_ll_register_request_callback(dev_p,
- CY_RQT_USB_RQT_CONTEXT, 0);
- }
-
- cy_as_device_clear_u_s_s_pending(dev_p);
-
- return ret;
-
-}
-
-/*
-* This function starts the USB stack. The stack is reference
-* counted so if the stack is already started, this function
-* just increments the count. If the stack has not been started,
-* a start request is sent to the West Bridge device.
-*
-* Note: Starting the USB stack does not cause the USB signals
-* to be connected to the USB pins. To do this and therefore
-* initiate enumeration, CyAsUsbConnect() must be called.
-*/
-cy_as_return_status_t
-cy_as_usb_start(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p, *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_start called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- if (cy_as_device_is_u_s_s_pending(dev_p))
- return CY_AS_ERROR_STARTSTOP_PENDING;
-
- cy_as_device_set_u_s_s_pending(dev_p);
-
- if (dev_p->usb_count == 0) {
- /*
- * since we are just starting the stack,
- * mark USB as not connected to the remote host
- */
- cy_as_device_clear_usb_connected(dev_p);
- dev_p->usb_phy_config = 0;
-
- /* Queue for 1.0 Async Requests, kept for
- * backwards compatibility */
- dev_p->usb_func_cbs = cy_as_create_c_b_queue(CYAS_USB_FUNC_CB);
- if (dev_p->usb_func_cbs == 0) {
- cy_as_device_clear_u_s_s_pending(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- /* Reset the EP0 state */
- cy_as_usb_reset_e_p0_state(dev_p);
-
- /*
- * we register here because the start request may cause
- * events to occur before the response to the start request.
- */
- cy_as_ll_register_request_callback(dev_p,
- CY_RQT_USB_RQT_CONTEXT, my_usb_request_callback);
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_START_USB, CY_RQT_USB_RQT_CONTEXT, 0);
- if (req_p == 0) {
- cy_as_destroy_c_b_queue(dev_p->usb_func_cbs);
- dev_p->usb_func_cbs = 0;
- cy_as_device_clear_u_s_s_pending(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- /* Reserve space for the reply, the reply data
- * will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_destroy_c_b_queue(dev_p->usb_func_cbs);
- dev_p->usb_func_cbs = 0;
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_device_clear_u_s_s_pending(dev_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_usb_start(dev_p,
- req_p, reply_p, ret);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb,
- client, CY_FUNCT_CB_USB_START, 0,
- dev_p->func_cbs_usb,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
- } else {
- dev_p->usb_count++;
- if (cb)
- cb(handle, ret, client, CY_FUNCT_CB_USB_START, 0);
- }
-
- cy_as_device_clear_u_s_s_pending(dev_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_start);
-
-void
-cy_as_usb_reset(cy_as_device *dev_p)
-{
- int i;
-
- cy_as_device_clear_usb_connected(dev_p);
-
- for (i = 0; i < sizeof(dev_p->usb_config) /
- sizeof(dev_p->usb_config[0]); i++) {
- /*
- * cancel all pending USB read/write operations, as it is
- * possible that the USB stack comes up in a different
- * configuration with a different set of endpoints.
- */
- if (cy_as_device_is_usb_async_pending(dev_p, i))
- cy_as_usb_cancel_async(dev_p,
- (cy_as_end_point_number_t)i);
-
- dev_p->usb_cb[i] = 0;
- dev_p->usb_config[i].enabled = cy_false;
- }
-
- dev_p->usb_phy_config = 0;
-}
-
-/*
- * This function does all the API side clean-up associated
- * with CyAsUsbStop, without any communication with firmware.
- * This needs to be done when the device is being reset while
- * the USB stack is active.
- */
-void
-cy_as_usb_cleanup(cy_as_device *dev_p)
-{
- if (dev_p->usb_count) {
- cy_as_usb_reset_e_p0_state(dev_p);
- cy_as_usb_reset(dev_p);
- cy_as_hal_mem_set(dev_p->usb_config, 0,
- sizeof(dev_p->usb_config));
- cy_as_destroy_c_b_queue(dev_p->usb_func_cbs);
-
- dev_p->usb_count = 0;
- }
-}
-
-static cy_as_return_status_t
-my_handle_response_usb_stop(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_return_status_t ret)
-{
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /*
- * we successfully shutdown the stack, so
- * decrement to make the count zero.
- */
- cy_as_usb_cleanup(dev_p);
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_ll_register_request_callback(dev_p,
- CY_RQT_USB_RQT_CONTEXT, 0);
-
- cy_as_device_clear_u_s_s_pending(dev_p);
-
- return ret;
-}
-
-/*
-* This function stops the USB stack. The USB stack is reference
-* counted so first is reference count is decremented. If the
-* reference count is then zero, a request is sent to the West
-* Bridge device to stop the USB stack on the West Bridge device.
-*/
-cy_as_return_status_t
-cy_as_usb_stop(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p = 0, *reply_p = 0;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_stop called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_usb_connected(dev_p))
- return CY_AS_ERROR_USB_CONNECTED;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- if (cy_as_device_is_u_s_s_pending(dev_p))
- return CY_AS_ERROR_STARTSTOP_PENDING;
-
- cy_as_device_set_u_s_s_pending(dev_p);
-
- if (dev_p->usb_count == 1) {
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_STOP_USB,
- CY_RQT_USB_RQT_CONTEXT, 0);
- if (req_p == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- }
-
- /* Reserve space for the reply, the reply data will not
- * exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_usb_stop(dev_p,
- req_p, reply_p, ret);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_USB_STOP, 0, dev_p->func_cbs_usb,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
- } else if (dev_p->usb_count > 1) {
- /*
- * reset all LE_ps to inactive state, after cleaning
- * up any pending async read/write calls.
- */
- cy_as_usb_reset(dev_p);
- dev_p->usb_count--;
-
- if (cb)
- cb(handle, ret, client, CY_FUNCT_CB_USB_STOP, 0);
- }
-
- cy_as_device_clear_u_s_s_pending(dev_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_stop);
-
-/*
-* This function registers a callback to be called when
-* USB events are processed
-*/
-cy_as_return_status_t
-cy_as_usb_register_callback(cy_as_device_handle handle,
- cy_as_usb_event_callback callback)
-{
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_register_callback called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (!cy_as_device_is_configured(dev_p))
- return CY_AS_ERROR_NOT_CONFIGURED;
-
- if (!cy_as_device_is_firmware_loaded(dev_p))
- return CY_AS_ERROR_NO_FIRMWARE;
-
- dev_p->usb_event_cb = NULL;
- dev_p->usb_event_cb_ms = callback;
- return CY_AS_ERROR_SUCCESS;
-}
-EXPORT_SYMBOL(cy_as_usb_register_callback);
-
-static cy_as_return_status_t
-my_handle_response_no_data(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE)
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- else
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
-
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-static cy_as_return_status_t
-my_handle_response_connect(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_return_status_t ret)
-{
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- if (ret == CY_AS_ERROR_SUCCESS)
- cy_as_device_set_usb_connected(dev_p);
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-
-/*
-* This method asks the West Bridge device to connect the
-* internal USB D+ and D- signals to the USB pins, thus
-* starting the enumeration processes if the external pins
-* are connected to a USB host. If the external pins are
-* not connected to a USB host, enumeration will begin as soon
-* as the USB pins are connected to a host.
-*/
-cy_as_return_status_t
-cy_as_usb_connect(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_connect called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_SET_CONNECT_STATE, CY_RQT_USB_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* 1 = Connect request */
- cy_as_ll_request_response__set_word(req_p, 0, 1);
-
- /* Reserve space for the reply, the reply
- * data will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_connect(dev_p, req_p, reply_p, ret);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_USB_CONNECT, 0, dev_p->func_cbs_usb,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_connect);
-
-static cy_as_return_status_t
-my_handle_response_disconnect(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_return_status_t ret)
-{
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- if (ret == CY_AS_ERROR_SUCCESS)
- cy_as_device_clear_usb_connected(dev_p);
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-/*
-* This method forces a disconnect of the D+ and D- pins
-* external to the West Bridge device from the D+ and D-
-* signals internally, effectively disconnecting the West
-* Bridge device from any connected USB host.
-*/
-cy_as_return_status_t
-cy_as_usb_disconnect(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_disconnect called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- if (!cy_as_device_is_usb_connected(dev_p))
- return CY_AS_ERROR_USB_NOT_CONNECTED;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_SET_CONNECT_STATE, CY_RQT_USB_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0, 0);
-
- /* Reserve space for the reply, the reply
- * data will not exceed two bytes */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_disconnect(dev_p,
- req_p, reply_p, ret);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_USB_DISCONNECT, 0, dev_p->func_cbs_usb,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_disconnect);
-
-static cy_as_return_status_t
-my_handle_response_set_enum_config(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- /*
- * we configured the west bridge device and
- * enumeration is going to happen on the P port
- * processor. now we must enable endpoint zero
- */
- cy_as_usb_end_point_config config;
-
- config.dir = cy_as_usb_in_out;
- config.type = cy_as_usb_control;
- config.enabled = cy_true;
-
- ret = cy_as_usb_set_end_point_config((cy_as_device_handle *)
- dev_p, 0, &config);
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-/*
-* This method sets how the USB is enumerated and should
-* be called before the CyAsUsbConnect() is called.
-*/
-static cy_as_return_status_t
-my_usb_set_enum_config(cy_as_device *dev_p,
- uint8_t bus_mask,
- uint8_t media_mask,
- cy_bool use_antioch_enumeration,
- uint8_t mass_storage_interface,
- uint8_t mtp_interface,
- cy_bool mass_storage_callbacks,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_log_debug_message(6, "cy_as_usb_set_enum_config called");
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_usb_connected(dev_p))
- return CY_AS_ERROR_USB_CONNECTED;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- /* if we are using MTP firmware: */
- if (dev_p->is_mtp_firmware == 1) {
- /* we cannot enumerate MSC */
- if (mass_storage_interface != 0)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
-
- if (bus_mask == 0) {
- if (mtp_interface != 0)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
- } else if (bus_mask == 2) {
- /* enable EP 1 as it will be used */
- cy_as_dma_enable_end_point(dev_p, 1, cy_true,
- cy_as_direction_in);
- dev_p->usb_config[1].enabled = cy_true;
- dev_p->usb_config[1].dir = cy_as_usb_in;
- dev_p->usb_config[1].type = cy_as_usb_int;
- } else {
- return CY_AS_ERROR_INVALID_CONFIGURATION;
- }
- /* if we are not using MTP firmware, we cannot enumerate MTP */
- } else if (mtp_interface != 0)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
-
- /*
- * if we are not enumerating mass storage, we should
- * not be providing an interface number.
- */
- if (bus_mask == 0 && mass_storage_interface != 0)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
-
- /*
- * if we are going to use mtp_interface, bus mask must be 2.
- */
- if (mtp_interface != 0 && bus_mask != 2)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
-
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_SET_USB_CONFIG, CY_RQT_USB_RQT_CONTEXT, 4);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* Marshal the structure */
- cy_as_ll_request_response__set_word(req_p, 0,
- (uint16_t)((media_mask << 8) | bus_mask));
- cy_as_ll_request_response__set_word(req_p, 1,
- (uint16_t)use_antioch_enumeration);
- cy_as_ll_request_response__set_word(req_p, 2,
- dev_p->is_mtp_firmware ? mtp_interface :
- mass_storage_interface);
- cy_as_ll_request_response__set_word(req_p, 3,
- (uint16_t)mass_storage_callbacks);
-
- /* Reserve space for the reply, the reply
- * data will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
-
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_set_enum_config(dev_p,
- req_p, reply_p);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_USB_SETENUMCONFIG, 0, dev_p->func_cbs_usb,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-/*
- * This method sets how the USB is enumerated and should
- * be called before the CyAsUsbConnect() is called.
- */
-cy_as_return_status_t
-cy_as_usb_set_enum_config(cy_as_device_handle handle,
- cy_as_usb_enum_control *config_p,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- uint8_t bus_mask, media_mask;
- uint32_t bus, device;
- cy_as_return_status_t ret;
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if ((cy_as_device_is_in_callback(dev_p)) && (cb != 0))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- /* Since we are mapping the media types to bus with NAND to 0
- * and the rest to 1, and we are only allowing for enumerating
- * all the devices on a bus we just scan the array for any
- * positions where there a device is enabled and mark the bus
- * to be enumerated.
- */
- bus_mask = 0;
- media_mask = 0;
- for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) {
- for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) {
- if (config_p->devices_to_enumerate[bus][device] ==
- cy_true) {
- bus_mask |= (0x01 << bus);
- media_mask |= dev_p->media_supported[bus];
- media_mask |= dev_p->media_supported[bus];
- }
- }
- }
-
- return my_usb_set_enum_config(dev_p, bus_mask, media_mask,
- config_p->antioch_enumeration,
- config_p->mass_storage_interface,
- config_p->mtp_interface,
- config_p->mass_storage_callbacks,
- cb,
- client
- );
-}
-EXPORT_SYMBOL(cy_as_usb_set_enum_config);
-
-static cy_as_return_status_t
-my_handle_response_get_enum_config(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- void *config_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint16_t val;
- uint8_t bus_mask;
- uint32_t bus;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_USB_CONFIG) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- /* Marshal the reply */
- if (req_p->flags & CY_AS_REQUEST_RESPONSE_MS) {
- uint32_t device;
- cy_bool state;
- cy_as_usb_enum_control *ms_config_p =
- (cy_as_usb_enum_control *)config_p;
-
- bus_mask = (uint8_t)
- (cy_as_ll_request_response__get_word
- (reply_p, 0) & 0xFF);
- for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) {
- if (bus_mask & (1 << bus))
- state = cy_true;
- else
- state = cy_false;
-
- for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES;
- device++)
- ms_config_p->devices_to_enumerate[bus][device]
- = state;
- }
-
- ms_config_p->antioch_enumeration =
- (cy_bool)cy_as_ll_request_response__get_word
- (reply_p, 1);
-
- val = cy_as_ll_request_response__get_word(reply_p, 2);
- if (dev_p->is_mtp_firmware) {
- ms_config_p->mass_storage_interface = 0;
- ms_config_p->mtp_interface = (uint8_t)(val & 0xFF);
- } else {
- ms_config_p->mass_storage_interface =
- (uint8_t)(val & 0xFF);
- ms_config_p->mtp_interface = 0;
- }
- ms_config_p->mass_storage_callbacks = (cy_bool)(val >> 8);
-
- /*
- * firmware returns an invalid interface number for mass storage,
- * if mass storage is not enabled. this needs to be converted to
- * zero to match the input configuration.
- */
- if (bus_mask == 0) {
- if (dev_p->is_mtp_firmware)
- ms_config_p->mtp_interface = 0;
- else
- ms_config_p->mass_storage_interface = 0;
- }
- } else {
- cy_as_usb_enum_control_dep *ex_config_p =
- (cy_as_usb_enum_control_dep *)config_p;
-
- ex_config_p->enum_mass_storage = (uint8_t)
- ((cy_as_ll_request_response__get_word
- (reply_p, 0) >> 8) & 0xFF);
- ex_config_p->antioch_enumeration = (cy_bool)
- cy_as_ll_request_response__get_word(reply_p, 1);
-
- val = cy_as_ll_request_response__get_word(reply_p, 2);
- ex_config_p->mass_storage_interface = (uint8_t)(val & 0xFF);
- ex_config_p->mass_storage_callbacks = (cy_bool)(val >> 8);
-
- /*
- * firmware returns an invalid interface number for mass
- * storage, if mass storage is not enabled. this needs to
- * be converted to zero to match the input configuration.
- */
- if (ex_config_p->enum_mass_storage == 0)
- ex_config_p->mass_storage_interface = 0;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-/*
-* This sets up the request for the enumerateion configuration
-* information, based on if the request is from the old pre-1.2
-* functions.
-*/
-static cy_as_return_status_t
-my_usb_get_enum_config(cy_as_device_handle handle,
- uint16_t req_flags,
- void *config_p,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_get_enum_config called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_GET_USB_CONFIG, CY_RQT_USB_RQT_CONTEXT, 0);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* Reserve space for the reply, the reply data
- * will not exceed two bytes */
- reply_p = cy_as_ll_create_response(dev_p, 3);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- /* we need to know the type of request to
- * know how to manage the data */
- req_p->flags |= req_flags;
- return my_handle_response_get_enum_config(dev_p,
- req_p, reply_p, config_p);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_USB_GETENUMCONFIG, config_p,
- dev_p->func_cbs_usb, req_flags, req_p, reply_p,
- cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-/*
- * This method returns the enumerateion configuration information
- * from the West Bridge device. Generally this is not used by
- * client software but is provided mostly for debug information.
- * We want a method to read all state information from the device.
- */
-cy_as_return_status_t
-cy_as_usb_get_enum_config(cy_as_device_handle handle,
- cy_as_usb_enum_control *config_p,
- cy_as_function_callback cb,
- uint32_t client)
-{
- return my_usb_get_enum_config(handle,
- CY_AS_REQUEST_RESPONSE_MS, config_p, cb, client);
-}
-EXPORT_SYMBOL(cy_as_usb_get_enum_config);
-
-/*
-* This method sets the USB descriptor for a given entity.
-*/
-cy_as_return_status_t
-cy_as_usb_set_descriptor(cy_as_device_handle handle,
- cy_as_usb_desc_type type,
- uint8_t index,
- void *desc_p,
- uint16_t length,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint16_t pktlen;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_set_descriptor called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- if (length > CY_AS_MAX_USB_DESCRIPTOR_SIZE)
- return CY_AS_ERROR_INVALID_DESCRIPTOR;
-
- pktlen = (uint16_t)length / 2;
- if (length % 2)
- pktlen++;
- pktlen += 2; /* 1 for type, 1 for length */
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_SET_DESCRIPTOR,
- CY_RQT_USB_RQT_CONTEXT, (uint16_t)pktlen);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- (uint16_t)((uint8_t)type | (index << 8)));
- cy_as_ll_request_response__set_word(req_p, 1,
- (uint16_t)length);
- cy_as_ll_request_response__pack(req_p, 2, length, desc_p);
-
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_no_data(dev_p, req_p, reply_p);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_USB_SETDESCRIPTOR, 0, dev_p->func_cbs_usb,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_set_descriptor);
-
-/*
- * This method clears all descriptors that were previously
- * stored on the West Bridge through CyAsUsbSetDescriptor calls.
- */
-cy_as_return_status_t
-cy_as_usb_clear_descriptors(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_clear_descriptors called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if ((cy_as_device_is_in_callback(dev_p)) && (cb == 0))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_CLEAR_DESCRIPTORS, CY_RQT_USB_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
-
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_no_data(dev_p, req_p, reply_p);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_USB_CLEARDESCRIPTORS, 0,
- dev_p->func_cbs_usb,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_clear_descriptors);
-
-static cy_as_return_status_t
-my_handle_response_get_descriptor(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_get_descriptor_data *data)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint32_t retlen;
-
- if (cy_as_ll_request_response__get_code(reply_p) ==
- CY_RESP_SUCCESS_FAILURE) {
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- goto destroy;
- } else if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_USB_DESCRIPTOR) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- retlen = cy_as_ll_request_response__get_word(reply_p, 0);
- if (retlen > data->length) {
- ret = CY_AS_ERROR_INVALID_SIZE;
- goto destroy;
- }
-
- ret = CY_AS_ERROR_SUCCESS;
- cy_as_ll_request_response__unpack(reply_p, 1,
- retlen, data->desc_p);
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-/*
-* This method retreives the USB descriptor for a given type.
-*/
-cy_as_return_status_t
-cy_as_usb_get_descriptor(cy_as_device_handle handle,
- cy_as_usb_desc_type type,
- uint8_t index,
- cy_as_get_descriptor_data *data,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p , *reply_p;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_get_descriptor called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_GET_DESCRIPTOR, CY_RQT_USB_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- (uint16_t)((uint8_t)type | (index << 8)));
-
- /* Add one for the length field */
- reply_p = cy_as_ll_create_response(dev_p,
- CY_AS_MAX_USB_DESCRIPTOR_SIZE + 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(
- dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return my_handle_response_get_descriptor(dev_p,
- req_p, reply_p, data);
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_USB_GETDESCRIPTOR, data,
- dev_p->func_cbs_usb,
- CY_AS_REQUEST_RESPONSE_EX, req_p,
- reply_p, cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_get_descriptor);
-
-cy_as_return_status_t
-cy_as_usb_set_physical_configuration(cy_as_device_handle handle,
- uint8_t config)
-{
- cy_as_return_status_t ret;
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6,
- "cy_as_usb_set_physical_configuration called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_usb_connected(dev_p))
- return CY_AS_ERROR_USB_CONNECTED;
-
- if (config < 1 || config > 12)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
-
- dev_p->usb_phy_config = config;
-
- return CY_AS_ERROR_SUCCESS;
-}
-EXPORT_SYMBOL(cy_as_usb_set_physical_configuration);
-
-static cy_bool
-is_physical_valid(uint8_t config, cy_as_end_point_number_t ep)
-{
- static uint8_t validmask[12] = {
- 0x0f, /* Config 1 - 1, 2, 3, 4 */
- 0x07, /* Config 2 - 1, 2, 3 */
- 0x07, /* Config 3 - 1, 2, 3 */
- 0x0d, /* Config 4 - 1, 3, 4 */
- 0x05, /* Config 5 - 1, 3 */
- 0x05, /* Config 6 - 1, 3 */
- 0x0d, /* Config 7 - 1, 3, 4 */
- 0x05, /* Config 8 - 1, 3 */
- 0x05, /* Config 9 - 1, 3 */
- 0x0d, /* Config 10 - 1, 3, 4 */
- 0x09, /* Config 11 - 1, 4 */
- 0x01 /* Config 12 - 1 */
- };
-
- return (validmask[config - 1] & (1 << (ep - 1))) ? cy_true : cy_false;
-}
-
-/*
-* This method sets the configuration for an endpoint
-*/
-cy_as_return_status_t
-cy_as_usb_set_end_point_config(cy_as_device_handle handle,
- cy_as_end_point_number_t ep, cy_as_usb_end_point_config *config_p)
-{
- cy_as_return_status_t ret;
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_set_end_point_config called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_usb_connected(dev_p))
- return CY_AS_ERROR_USB_CONNECTED;
-
- if (ep >= 16 || ep == 2 || ep == 4 || ep == 6 || ep == 8)
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- if (ep == 0) {
- /* Endpoint 0 must be 64 byte, dir IN/OUT,
- * and control type */
- if (config_p->dir != cy_as_usb_in_out ||
- config_p->type != cy_as_usb_control)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
- } else if (ep == 1) {
- if ((dev_p->is_mtp_firmware == 1) &&
- (dev_p->usb_config[1].enabled == cy_true)) {
- return CY_AS_ERROR_INVALID_ENDPOINT;
- }
-
- /*
- * EP1 can only be used either as an OUT ep, or as an IN ep.
- */
- if ((config_p->type == cy_as_usb_control) ||
- (config_p->type == cy_as_usb_iso) ||
- (config_p->dir == cy_as_usb_in_out))
- return CY_AS_ERROR_INVALID_CONFIGURATION;
- } else {
- if (config_p->dir == cy_as_usb_in_out ||
- config_p->type == cy_as_usb_control)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
-
- if (!is_physical_valid(dev_p->usb_phy_config,
- config_p->physical))
- return CY_AS_ERROR_INVALID_PHYSICAL_ENDPOINT;
-
- /*
- * ISO endpoints must be on E_ps 3, 5, 7 or 9 as
- * they need to align directly with the underlying
- * physical endpoint.
- */
- if (config_p->type == cy_as_usb_iso) {
- if (ep != 3 && ep != 5 && ep != 7 && ep != 9)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
-
- if (ep == 3 && config_p->physical != 1)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
-
- if (ep == 5 && config_p->physical != 2)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
-
- if (ep == 7 && config_p->physical != 3)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
-
- if (ep == 9 && config_p->physical != 4)
- return CY_AS_ERROR_INVALID_CONFIGURATION;
- }
- }
-
- /* Store the configuration information until a
- * CyAsUsbCommitConfig is done */
- dev_p->usb_config[ep] = *config_p;
-
- /* If the endpoint is enabled, enable DMA associated
- * with the endpoint */
- /*
- * we make some assumptions that we check here. we assume
- * that the direction fields for the DMA module are the same
- * values as the direction values for the USB module.
- */
- cy_as_hal_assert((int)cy_as_usb_in == (int)cy_as_direction_in);
- cy_as_hal_assert((int)cy_as_usb_out == (int)cy_as_direction_out);
- cy_as_hal_assert((int)cy_as_usb_in_out == (int)cy_as_direction_in_out);
-
- return cy_as_dma_enable_end_point(dev_p, ep,
- config_p->enabled, (cy_as_dma_direction)config_p->dir);
-}
-EXPORT_SYMBOL(cy_as_usb_set_end_point_config);
-
-cy_as_return_status_t
-cy_as_usb_get_end_point_config(cy_as_device_handle handle,
- cy_as_end_point_number_t ep, cy_as_usb_end_point_config *config_p)
-{
- cy_as_return_status_t ret;
-
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_get_end_point_config called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (ep >= 16 || ep == 2 || ep == 4 || ep == 6 || ep == 8)
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- *config_p = dev_p->usb_config[ep];
-
- return CY_AS_ERROR_SUCCESS;
-}
-EXPORT_SYMBOL(cy_as_usb_get_end_point_config);
-
-/*
-* Commit the configuration of the various endpoints to the hardware.
-*/
-cy_as_return_status_t
-cy_as_usb_commit_config(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- uint32_t i;
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p , *reply_p;
- cy_as_device *dev_p;
- uint16_t data;
-
- cy_as_log_debug_message(6, "cy_as_usb_commit_config called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_usb_connected(dev_p))
- return CY_AS_ERROR_USB_CONNECTED;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- /*
- * this performs the mapping based on informatation that was
- * previously stored on the device about the various endpoints
- * and how they are configured. the output of this mapping is
- * setting the the 14 register values contained in usb_lepcfg
- * and usb_pepcfg
- */
- ret = cy_as_usb_map_logical2_physical(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /*
- * now, package the information about the various logical and
- * physical endpoint configuration registers and send it
- * across to the west bridge device.
- */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_SET_USB_CONFIG_REGISTERS, CY_RQT_USB_RQT_CONTEXT, 8);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_hal_print_message("USB configuration: %d\n",
- dev_p->usb_phy_config);
- cy_as_hal_print_message("EP1OUT: 0x%02x EP1IN: 0x%02x\n",
- dev_p->usb_ep1cfg[0], dev_p->usb_ep1cfg[1]);
- cy_as_hal_print_message("PEP registers: 0x%02x 0x%02x 0x%02x 0x%02x\n",
- dev_p->usb_pepcfg[0], dev_p->usb_pepcfg[1],
- dev_p->usb_pepcfg[2], dev_p->usb_pepcfg[3]);
-
- cy_as_hal_print_message("LEP registers: 0x%02x 0x%02x 0x%02x "
- "0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
- dev_p->usb_lepcfg[0], dev_p->usb_lepcfg[1],
- dev_p->usb_lepcfg[2], dev_p->usb_lepcfg[3],
- dev_p->usb_lepcfg[4], dev_p->usb_lepcfg[5],
- dev_p->usb_lepcfg[6], dev_p->usb_lepcfg[7],
- dev_p->usb_lepcfg[8], dev_p->usb_lepcfg[9]);
-
- /* Write the EP1OUTCFG and EP1INCFG data in the first word. */
- data = (uint16_t)((dev_p->usb_ep1cfg[0] << 8) |
- dev_p->usb_ep1cfg[1]);
- cy_as_ll_request_response__set_word(req_p, 0, data);
-
- /* Write the PEP CFG data in the next 2 words */
- for (i = 0; i < 4; i += 2) {
- data = (uint16_t)((dev_p->usb_pepcfg[i] << 8) |
- dev_p->usb_pepcfg[i + 1]);
- cy_as_ll_request_response__set_word(req_p,
- 1 + i / 2, data);
- }
-
- /* Write the LEP CFG data in the next 5 words */
- for (i = 0; i < 10; i += 2) {
- data = (uint16_t)((dev_p->usb_lepcfg[i] << 8) |
- dev_p->usb_lepcfg[i + 1]);
- cy_as_ll_request_response__set_word(req_p,
- 3 + i / 2, data);
- }
-
- /* A single status word response type */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- ret = my_handle_response_no_data(dev_p,
- req_p, reply_p);
-
- if (ret == CY_AS_ERROR_SUCCESS)
- ret = cy_as_usb_setup_dma(dev_p);
-
- return ret;
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_USB_COMMITCONFIG, 0, dev_p->func_cbs_usb,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_commit_config);
-
-static void
-sync_request_callback(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, void *buf_p,
- uint32_t size, cy_as_return_status_t err)
-{
- (void)ep;
- (void)buf_p;
-
- dev_p->usb_error = err;
- dev_p->usb_actual_cnt = size;
-}
-
-static void
-async_read_request_callback(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, void *buf_p,
- uint32_t size, cy_as_return_status_t err)
-{
- cy_as_device_handle h;
-
- cy_as_log_debug_message(6,
- "async_read_request_callback called");
-
- h = (cy_as_device_handle)dev_p;
-
- if (ep == 0 && cy_as_device_is_ack_delayed(dev_p)) {
- dev_p->usb_pending_buffer = buf_p;
- dev_p->usb_pending_size = size;
- dev_p->usb_error = err;
- cy_as_usb_ack_setup_packet(h, usb_ack_callback, 0);
- } else {
- cy_as_usb_io_callback cb;
-
- cb = dev_p->usb_cb[ep];
- dev_p->usb_cb[ep] = 0;
- cy_as_device_clear_usb_async_pending(dev_p, ep);
- if (cb)
- cb(h, ep, size, buf_p, err);
- }
-}
-
-static void
-async_write_request_callback(cy_as_device *dev_p,
- cy_as_end_point_number_t ep, void *buf_p,
- uint32_t size, cy_as_return_status_t err)
-{
- cy_as_device_handle h;
-
- cy_as_log_debug_message(6,
- "async_write_request_callback called");
-
- h = (cy_as_device_handle)dev_p;
-
- if (ep == 0 && cy_as_device_is_ack_delayed(dev_p)) {
- dev_p->usb_pending_buffer = buf_p;
- dev_p->usb_pending_size = size;
- dev_p->usb_error = err;
-
- /* The west bridge protocol generates ZLPs as required. */
- cy_as_usb_ack_setup_packet(h, usb_ack_callback, 0);
- } else {
- cy_as_usb_io_callback cb;
-
- cb = dev_p->usb_cb[ep];
- dev_p->usb_cb[ep] = 0;
-
- cy_as_device_clear_usb_async_pending(dev_p, ep);
- if (cb)
- cb(h, ep, size, buf_p, err);
- }
-}
-
-static void
-my_turbo_rqt_callback(cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t stat)
-{
- uint8_t code;
-
- (void)context;
- (void)stat;
-
- /* The Handlers are responsible for Deleting the rqt and resp when
- * they are finished
- */
- code = cy_as_ll_request_response__get_code(rqt);
- switch (code) {
- case CY_RQT_TURBO_SWITCH_ENDPOINT:
- cy_as_hal_assert(stat == CY_AS_ERROR_SUCCESS);
- cy_as_ll_destroy_request(dev_p, rqt);
- cy_as_ll_destroy_response(dev_p, resp);
- break;
- default:
- cy_as_hal_assert(cy_false);
- break;
- }
-}
-
-/* Send a mailbox request to prepare the endpoint for switching */
-static cy_as_return_status_t
-my_send_turbo_switch(cy_as_device *dev_p, uint32_t size, cy_bool pktread)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p , *reply_p;
-
- /* Create the request to send to the West Bridge device */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_TURBO_SWITCH_ENDPOINT, CY_RQT_TUR_RQT_CONTEXT, 3);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* Reserve space for the reply, the reply data will
- * not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- cy_as_ll_request_response__set_word(req_p, 0,
- (uint16_t)pktread);
- cy_as_ll_request_response__set_word(req_p, 1,
- (uint16_t)((size >> 16) & 0xFFFF));
- cy_as_ll_request_response__set_word(req_p, 2,
- (uint16_t)(size & 0xFFFF));
-
- ret = cy_as_ll_send_request(dev_p, req_p,
- reply_p, cy_false, my_turbo_rqt_callback);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_request(dev_p, reply_p);
- return ret;
- }
-
- return CY_AS_ERROR_SUCCESS;
-}
-
-cy_as_return_status_t
-cy_as_usb_read_data(cy_as_device_handle handle,
- cy_as_end_point_number_t ep, cy_bool pktread,
- uint32_t dsize, uint32_t *dataread, void *data)
-{
- cy_as_return_status_t ret;
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_read_data called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- if (ep >= 16 || ep == 4 || ep == 6 || ep == 8)
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* EP2 is available for reading when MTP is active */
- if (dev_p->mtp_count == 0 && ep == CY_AS_MTP_READ_ENDPOINT)
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* If the endpoint is disabled, we cannot
- * write data to the endpoint */
- if (!dev_p->usb_config[ep].enabled)
- return CY_AS_ERROR_ENDPOINT_DISABLED;
-
- if (dev_p->usb_config[ep].dir != cy_as_usb_out)
- return CY_AS_ERROR_USB_BAD_DIRECTION;
-
- ret = cy_as_dma_queue_request(dev_p, ep, data, dsize,
- pktread, cy_true, sync_request_callback);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (ep == CY_AS_MTP_READ_ENDPOINT) {
- ret = my_send_turbo_switch(dev_p, dsize, pktread);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_dma_cancel(dev_p, ep, ret);
- return ret;
- }
-
- ret = cy_as_dma_drain_queue(dev_p, ep, cy_false);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- } else {
- ret = cy_as_dma_drain_queue(dev_p, ep, cy_true);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- }
-
- ret = dev_p->usb_error;
- *dataread = dev_p->usb_actual_cnt;
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_read_data);
-
-cy_as_return_status_t
-cy_as_usb_read_data_async(cy_as_device_handle handle,
- cy_as_end_point_number_t ep, cy_bool pktread,
- uint32_t dsize, void *data, cy_as_usb_io_callback cb)
-{
- cy_as_return_status_t ret;
- uint32_t mask;
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_read_data_async called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (ep >= 16 || ep == 4 || ep == 6 || ep == 8)
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* EP2 is available for reading when MTP is active */
- if (dev_p->mtp_count == 0 && ep == CY_AS_MTP_READ_ENDPOINT)
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* If the endpoint is disabled, we cannot
- * write data to the endpoint */
- if (!dev_p->usb_config[ep].enabled)
- return CY_AS_ERROR_ENDPOINT_DISABLED;
-
- if (dev_p->usb_config[ep].dir != cy_as_usb_out &&
- dev_p->usb_config[ep].dir != cy_as_usb_in_out)
- return CY_AS_ERROR_USB_BAD_DIRECTION;
-
- /*
- * since async operations can be triggered by interrupt
- * code, we must insure that we do not get multiple async
- * operations going at one time and protect this test and
- * set operation from interrupts.
- */
- mask = cy_as_hal_disable_interrupts();
- if (cy_as_device_is_usb_async_pending(dev_p, ep)) {
- cy_as_hal_enable_interrupts(mask);
- return CY_AS_ERROR_ASYNC_PENDING;
- }
- cy_as_device_set_usb_async_pending(dev_p, ep);
-
- /*
- * if this is for EP0, we set this bit to delay the
- * ACK response until after this read has completed.
- */
- if (ep == 0)
- cy_as_device_set_ack_delayed(dev_p);
-
- cy_as_hal_enable_interrupts(mask);
-
- cy_as_hal_assert(dev_p->usb_cb[ep] == 0);
- dev_p->usb_cb[ep] = cb;
-
- ret = cy_as_dma_queue_request(dev_p, ep, data, dsize,
- pktread, cy_true, async_read_request_callback);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (ep == CY_AS_MTP_READ_ENDPOINT) {
- ret = my_send_turbo_switch(dev_p, dsize, pktread);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_dma_cancel(dev_p, ep, ret);
- return ret;
- }
- } else {
- /* Kick start the queue if it is not running */
- cy_as_dma_kick_start(dev_p, ep);
- }
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_read_data_async);
-
-cy_as_return_status_t
-cy_as_usb_write_data(cy_as_device_handle handle,
- cy_as_end_point_number_t ep, uint32_t dsize, void *data)
-{
- cy_as_return_status_t ret;
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_write_data called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- if (ep >= 16 || ep == 2 || ep == 4 || ep == 8)
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* EP6 is available for writing when MTP is active */
- if (dev_p->mtp_count == 0 && ep == CY_AS_MTP_WRITE_ENDPOINT)
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* If the endpoint is disabled, we cannot
- * write data to the endpoint */
- if (!dev_p->usb_config[ep].enabled)
- return CY_AS_ERROR_ENDPOINT_DISABLED;
-
- if (dev_p->usb_config[ep].dir != cy_as_usb_in &&
- dev_p->usb_config[ep].dir != cy_as_usb_in_out)
- return CY_AS_ERROR_USB_BAD_DIRECTION;
-
- /* Write on Turbo endpoint */
- if (ep == CY_AS_MTP_WRITE_ENDPOINT) {
- cy_as_ll_request_response *req_p, *reply_p;
-
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_TURBO_SEND_RESP_DATA_TO_HOST,
- CY_RQT_TUR_RQT_CONTEXT, 3);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p,
- 0, 0x0006); /* EP number to use. */
- cy_as_ll_request_response__set_word(req_p,
- 1, (uint16_t)((dsize >> 16) & 0xFFFF));
- cy_as_ll_request_response__set_word(req_p,
- 2, (uint16_t)(dsize & 0xFFFF));
-
- /* Reserve space for the reply, the reply data
- * will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (dsize) {
- ret = cy_as_dma_queue_request(dev_p,
- ep, data, dsize, cy_false,
- cy_false, sync_request_callback);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- }
-
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret == CY_AS_ERROR_SUCCESS) {
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE)
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- else
- ret = cy_as_ll_request_response__get_word
- (reply_p, 0);
- }
-
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- if (dsize)
- cy_as_dma_cancel(dev_p, ep, ret);
- return ret;
- }
-
- /* If this is a zero-byte write, firmware will
- * handle it. there is no need to do any work here.
- */
- if (!dsize)
- return CY_AS_ERROR_SUCCESS;
- } else {
- ret = cy_as_dma_queue_request(dev_p, ep, data, dsize,
- cy_false, cy_false, sync_request_callback);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- }
-
- if (ep != CY_AS_MTP_WRITE_ENDPOINT)
- ret = cy_as_dma_drain_queue(dev_p, ep, cy_true);
- else
- ret = cy_as_dma_drain_queue(dev_p, ep, cy_false);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- ret = dev_p->usb_error;
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_write_data);
-
-static void
-mtp_write_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret)
-{
- cy_as_usb_io_callback cb;
- cy_as_device_handle h = (cy_as_device_handle)dev_p;
-
- cy_as_hal_assert(context == CY_RQT_TUR_RQT_CONTEXT);
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- if (cy_as_ll_request_response__get_code(resp) !=
- CY_RESP_SUCCESS_FAILURE)
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- else
- ret = cy_as_ll_request_response__get_word(resp, 0);
- }
-
- /* If this was a zero byte transfer request, we can
- * call the callback from here. */
- if ((cy_as_ll_request_response__get_word(rqt, 1) == 0) &&
- (cy_as_ll_request_response__get_word(rqt, 2) == 0)) {
- cb = dev_p->usb_cb[CY_AS_MTP_WRITE_ENDPOINT];
- dev_p->usb_cb[CY_AS_MTP_WRITE_ENDPOINT] = 0;
- cy_as_device_clear_usb_async_pending(dev_p,
- CY_AS_MTP_WRITE_ENDPOINT);
- if (cb)
- cb(h, CY_AS_MTP_WRITE_ENDPOINT, 0, 0, ret);
-
- goto destroy;
- }
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- /* Firmware failed the request. Cancel the DMA transfer. */
- cy_as_dma_cancel(dev_p, 0x06, CY_AS_ERROR_CANCELED);
- dev_p->usb_cb[0x06] = 0;
- cy_as_device_clear_usb_async_pending(dev_p, 0x06);
- }
-
-destroy:
- cy_as_ll_destroy_response(dev_p, resp);
- cy_as_ll_destroy_request(dev_p, rqt);
-}
-
-cy_as_return_status_t
-cy_as_usb_write_data_async(cy_as_device_handle handle,
- cy_as_end_point_number_t ep, uint32_t dsize, void *data,
- cy_bool spacket, cy_as_usb_io_callback cb)
-{
- uint32_t mask;
- cy_as_return_status_t ret;
- cy_as_device *dev_p;
-
- cy_as_log_debug_message(6, "cy_as_usb_write_data_async called");
-
- dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (ep >= 16 || ep == 2 || ep == 4 || ep == 8)
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* EP6 is available for writing when MTP is active */
- if (dev_p->mtp_count == 0 && ep == CY_AS_MTP_WRITE_ENDPOINT)
- return CY_AS_ERROR_INVALID_ENDPOINT;
-
- /* If the endpoint is disabled, we cannot
- * write data to the endpoint */
- if (!dev_p->usb_config[ep].enabled)
- return CY_AS_ERROR_ENDPOINT_DISABLED;
-
- if (dev_p->usb_config[ep].dir != cy_as_usb_in &&
- dev_p->usb_config[ep].dir != cy_as_usb_in_out)
- return CY_AS_ERROR_USB_BAD_DIRECTION;
-
- /*
- * since async operations can be triggered by interrupt
- * code, we must insure that we do not get multiple
- * async operations going at one time and
- * protect this test and set operation from interrupts.
- */
- mask = cy_as_hal_disable_interrupts();
- if (cy_as_device_is_usb_async_pending(dev_p, ep)) {
- cy_as_hal_enable_interrupts(mask);
- return CY_AS_ERROR_ASYNC_PENDING;
- }
-
- cy_as_device_set_usb_async_pending(dev_p, ep);
-
- if (ep == 0)
- cy_as_device_set_ack_delayed(dev_p);
-
- cy_as_hal_enable_interrupts(mask);
-
- cy_as_hal_assert(dev_p->usb_cb[ep] == 0);
- dev_p->usb_cb[ep] = cb;
- dev_p->usb_spacket[ep] = spacket;
-
- /* Write on Turbo endpoint */
- if (ep == CY_AS_MTP_WRITE_ENDPOINT) {
- cy_as_ll_request_response *req_p, *reply_p;
-
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_TURBO_SEND_RESP_DATA_TO_HOST,
- CY_RQT_TUR_RQT_CONTEXT, 3);
-
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- cy_as_ll_request_response__set_word(req_p, 0,
- 0x0006); /* EP number to use. */
- cy_as_ll_request_response__set_word(req_p, 1,
- (uint16_t)((dsize >> 16) & 0xFFFF));
- cy_as_ll_request_response__set_word(req_p, 2,
- (uint16_t)(dsize & 0xFFFF));
-
- /* Reserve space for the reply, the reply data
- * will not exceed one word */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (dsize) {
- ret = cy_as_dma_queue_request(dev_p, ep, data,
- dsize, cy_false, cy_false,
- async_write_request_callback);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- }
-
- ret = cy_as_ll_send_request(dev_p, req_p, reply_p,
- cy_false, mtp_write_callback);
- if (ret != CY_AS_ERROR_SUCCESS) {
- if (dsize)
- cy_as_dma_cancel(dev_p, ep, ret);
- return ret;
- }
-
- /* Firmware will handle a zero byte transfer
- * without any DMA transfers. */
- if (!dsize)
- return CY_AS_ERROR_SUCCESS;
- } else {
- ret = cy_as_dma_queue_request(dev_p, ep, data, dsize,
- cy_false, cy_false, async_write_request_callback);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
- }
-
- /* Kick start the queue if it is not running */
- if (ep != CY_AS_MTP_WRITE_ENDPOINT)
- cy_as_dma_kick_start(dev_p, ep);
-
- return CY_AS_ERROR_SUCCESS;
-}
-EXPORT_SYMBOL(cy_as_usb_write_data_async);
-
-static void
-my_usb_cancel_async_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret)
-{
- uint8_t ep;
- (void)context;
-
- ep = (uint8_t)cy_as_ll_request_response__get_word(rqt, 0);
- if (ret == CY_AS_ERROR_SUCCESS) {
- if (cy_as_ll_request_response__get_code(resp) !=
- CY_RESP_SUCCESS_FAILURE)
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- else
- ret = cy_as_ll_request_response__get_word(resp, 0);
- }
-
- cy_as_ll_destroy_request(dev_p, rqt);
- cy_as_ll_destroy_response(dev_p, resp);
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_CANCELED);
- dev_p->usb_cb[ep] = 0;
- cy_as_device_clear_usb_async_pending(dev_p, ep);
- }
-}
-
-cy_as_return_status_t
-cy_as_usb_cancel_async(cy_as_device_handle handle,
- cy_as_end_point_number_t ep)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p, *reply_p;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ep &= 0x7F; /* Remove the direction bit. */
- if (!cy_as_device_is_usb_async_pending(dev_p, ep))
- return CY_AS_ERROR_ASYNC_NOT_PENDING;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_suspend_mode(dev_p))
- return CY_AS_ERROR_IN_SUSPEND;
-
- if ((ep == CY_AS_MTP_WRITE_ENDPOINT) ||
- (ep == CY_AS_MTP_READ_ENDPOINT)) {
- /* Need firmware support for the cancel operation. */
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_CANCEL_ASYNC_TRANSFER,
- CY_RQT_TUR_RQT_CONTEXT, 1);
-
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- cy_as_ll_request_response__set_word(req_p, 0,
- (uint16_t)ep);
-
- ret = cy_as_ll_send_request(dev_p, req_p, reply_p,
- cy_false, my_usb_cancel_async_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
- return ret;
- }
- } else {
- ret = cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_CANCELED);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- dev_p->usb_cb[ep] = 0;
- cy_as_device_clear_usb_async_pending(dev_p, ep);
- }
-
- return CY_AS_ERROR_SUCCESS;
-}
-EXPORT_SYMBOL(cy_as_usb_cancel_async);
-
-static void
-cy_as_usb_ack_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t ret)
-{
- cy_as_func_c_b_node *node = (cy_as_func_c_b_node *)
- dev_p->func_cbs_usb->head_p;
-
- (void)context;
-
- if (ret == CY_AS_ERROR_SUCCESS) {
- if (cy_as_ll_request_response__get_code(resp) !=
- CY_RESP_SUCCESS_FAILURE)
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- else
- ret = cy_as_ll_request_response__get_word(resp, 0);
- }
-
- node->cb_p((cy_as_device_handle)dev_p, ret,
- node->client_data, node->data_type, node->data);
- cy_as_remove_c_b_node(dev_p->func_cbs_usb);
-
- cy_as_ll_destroy_request(dev_p, rqt);
- cy_as_ll_destroy_response(dev_p, resp);
- cy_as_device_clear_ack_delayed(dev_p);
-}
-
-static cy_as_return_status_t
-cy_as_usb_ack_setup_packet(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p;
- cy_as_ll_request_response *reply_p;
- cy_as_func_c_b_node *cbnode;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p) && cb == 0)
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- cy_as_hal_assert(cb != 0);
-
- cbnode = cy_as_create_func_c_b_node(cb, client);
- if (cbnode == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- req_p = cy_as_ll_create_request(dev_p, 0,
- CY_RQT_USB_RQT_CONTEXT, 2);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- cy_as_ll_init_request(req_p, CY_RQT_ACK_SETUP_PACKET,
- CY_RQT_USB_RQT_CONTEXT, 1);
- cy_as_ll_init_response(reply_p, 1);
-
- req_p->flags |= CY_AS_REQUEST_RESPONSE_EX;
-
- cy_as_insert_c_b_node(dev_p->func_cbs_usb, cbnode);
-
- ret = cy_as_ll_send_request(dev_p, req_p, reply_p,
- cy_false, cy_as_usb_ack_callback);
-
- return ret;
-}
-
-/*
- * Flush all data in logical EP that is being NAK-ed or
- * Stall-ed, so that this does not continue to block data
- * on other LEPs that use the same physical EP.
- */
-static void
-cy_as_usb_flush_logical_e_p(
- cy_as_device *dev_p,
- uint16_t ep)
-{
- uint16_t addr, val, count;
-
- addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
- val = cy_as_hal_read_register(dev_p->tag, addr);
-
- while (val) {
- count = ((val & 0xFFF) + 1) / 2;
- while (count--)
- val = cy_as_hal_read_register(dev_p->tag, ep);
-
- cy_as_hal_write_register(dev_p->tag, addr, 0);
- val = cy_as_hal_read_register(dev_p->tag, addr);
- }
-}
-
-static cy_as_return_status_t
-cy_as_usb_nak_stall_request(cy_as_device_handle handle,
- cy_as_end_point_number_t ep,
- uint16_t request,
- cy_bool state,
- cy_as_usb_function_callback cb,
- cy_as_function_callback fcb,
- uint32_t client)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p , *reply_p;
- uint16_t data;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- if (cb)
- cy_as_hal_assert(fcb == 0);
- if (fcb)
- cy_as_hal_assert(cb == 0);
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p) && cb == 0 && fcb == 0)
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- req_p = cy_as_ll_create_request(dev_p,
- request, CY_RQT_USB_RQT_CONTEXT, 2);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* A single status word response type */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- /* Set the endpoint */
- data = (uint8_t)ep;
- cy_as_ll_request_response__set_word(req_p, 0, data);
-
- /* Set stall state to stalled */
- cy_as_ll_request_response__set_word(req_p, 1, (uint8_t)state);
-
- if (cb || fcb) {
- void *cbnode;
- cy_as_c_b_queue *queue;
- if (cb) {
- cbnode = cy_as_create_usb_func_c_b_node(cb, client);
- queue = dev_p->usb_func_cbs;
- } else {
- cbnode = cy_as_create_func_c_b_node(fcb, client);
- queue = dev_p->func_cbs_usb;
- req_p->flags |= CY_AS_REQUEST_RESPONSE_EX;
- }
-
- if (cbnode == 0) {
- ret = CY_AS_ERROR_OUT_OF_MEMORY;
- goto destroy;
- } else
- cy_as_insert_c_b_node(queue, cbnode);
-
-
- if (cy_as_device_is_setup_packet(dev_p)) {
- /* No Ack is needed on a stall request on EP0 */
- if ((state == cy_true) && (ep == 0)) {
- cy_as_device_set_ep0_stalled(dev_p);
- } else {
- cy_as_device_set_ack_delayed(dev_p);
- req_p->flags |=
- CY_AS_REQUEST_RESPONSE_DELAY_ACK;
- }
- }
-
- ret = cy_as_ll_send_request(dev_p, req_p,
- reply_p, cy_false, cy_as_usb_func_callback);
- if (ret != CY_AS_ERROR_SUCCESS) {
- if (req_p->flags & CY_AS_REQUEST_RESPONSE_DELAY_ACK)
- cy_as_device_rem_ack_delayed(dev_p);
- cy_as_remove_c_b_tail_node(queue);
-
- goto destroy;
- }
- } else {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) !=
- CY_RESP_SUCCESS_FAILURE) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
-
- if ((ret == CY_AS_ERROR_SUCCESS) &&
- (request == CY_RQT_STALL_ENDPOINT)) {
- if ((ep > 1) && (state != 0) &&
- (dev_p->usb_config[ep].dir == cy_as_usb_out))
- cy_as_usb_flush_logical_e_p(dev_p, ep);
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
- }
-
- return ret;
-}
-
-static cy_as_return_status_t
-my_handle_response_get_stall(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_bool *state_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t code = cy_as_ll_request_response__get_code(reply_p);
-
- if (code == CY_RESP_SUCCESS_FAILURE) {
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- goto destroy;
- } else if (code != CY_RESP_ENDPOINT_STALL) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- *state_p = (cy_bool)cy_as_ll_request_response__get_word(reply_p, 0);
- ret = CY_AS_ERROR_SUCCESS;
-
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-static cy_as_return_status_t
-my_handle_response_get_nak(cy_as_device *dev_p,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_bool *state_p)
-{
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
- uint8_t code = cy_as_ll_request_response__get_code(reply_p);
-
- if (code == CY_RESP_SUCCESS_FAILURE) {
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- goto destroy;
- } else if (code != CY_RESP_ENDPOINT_NAK) {
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- goto destroy;
- }
-
- *state_p = (cy_bool)cy_as_ll_request_response__get_word(reply_p, 0);
- ret = CY_AS_ERROR_SUCCESS;
-
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-static cy_as_return_status_t
-cy_as_usb_get_nak_stall(cy_as_device_handle handle,
- cy_as_end_point_number_t ep,
- uint16_t request,
- uint16_t response,
- cy_bool *state_p,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p , *reply_p;
- uint16_t data;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
-
- (void)response;
-
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p) && !cb)
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- req_p = cy_as_ll_create_request(dev_p, request,
- CY_RQT_USB_RQT_CONTEXT, 1);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* Set the endpoint */
- data = (uint8_t)ep;
- cy_as_ll_request_response__set_word(req_p, 0, (uint16_t)ep);
-
- /* A single status word response type */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p,
- req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (request == CY_RQT_GET_STALL)
- return my_handle_response_get_stall(dev_p,
- req_p, reply_p, state_p);
- else
- return my_handle_response_get_nak(dev_p,
- req_p, reply_p, state_p);
-
- } else {
- cy_as_funct_c_b_type type;
-
- if (request == CY_RQT_GET_STALL)
- type = CY_FUNCT_CB_USB_GETSTALL;
- else
- type = CY_FUNCT_CB_USB_GETNAK;
-
- ret = cy_as_misc_send_request(dev_p, cb, client, type,
- state_p, dev_p->func_cbs_usb, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-
-cy_as_return_status_t
-cy_as_usb_set_nak(cy_as_device_handle handle,
- cy_as_end_point_number_t ep,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- /*
- * we send the firmware the EP# with the appropriate direction
- * bit, regardless of what the user gave us.
- */
- ep &= 0x0f;
- if (dev_p->usb_config[ep].dir == cy_as_usb_in)
- ep |= 0x80;
-
- if (dev_p->mtp_count > 0)
- return CY_AS_ERROR_NOT_VALID_IN_MTP;
-
- return cy_as_usb_nak_stall_request(handle, ep,
- CY_RQT_ENDPOINT_SET_NAK, cy_true, 0, cb, client);
-}
-EXPORT_SYMBOL(cy_as_usb_set_nak);
-
-cy_as_return_status_t
-cy_as_usb_clear_nak(cy_as_device_handle handle,
- cy_as_end_point_number_t ep,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- /*
- * we send the firmware the EP# with the appropriate
- * direction bit, regardless of what the user gave us.
- */
- ep &= 0x0f;
- if (dev_p->usb_config[ep].dir == cy_as_usb_in)
- ep |= 0x80;
-
- if (dev_p->mtp_count > 0)
- return CY_AS_ERROR_NOT_VALID_IN_MTP;
-
- return cy_as_usb_nak_stall_request(handle, ep,
- CY_RQT_ENDPOINT_SET_NAK, cy_false, 0, cb, client);
-}
-EXPORT_SYMBOL(cy_as_usb_clear_nak);
-
-cy_as_return_status_t
-cy_as_usb_get_nak(cy_as_device_handle handle,
- cy_as_end_point_number_t ep,
- cy_bool *nak_p,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- /*
- * we send the firmware the EP# with the appropriate
- * direction bit, regardless of what the user gave us.
- */
- ep &= 0x0f;
- if (dev_p->usb_config[ep].dir == cy_as_usb_in)
- ep |= 0x80;
-
- if (dev_p->mtp_count > 0)
- return CY_AS_ERROR_NOT_VALID_IN_MTP;
-
- return cy_as_usb_get_nak_stall(handle, ep,
- CY_RQT_GET_ENDPOINT_NAK, CY_RESP_ENDPOINT_NAK,
- nak_p, cb, client);
-}
-EXPORT_SYMBOL(cy_as_usb_get_nak);
-
-cy_as_return_status_t
-cy_as_usb_set_stall(cy_as_device_handle handle,
- cy_as_end_point_number_t ep,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- /*
- * we send the firmware the EP# with the appropriate
- * direction bit, regardless of what the user gave us.
- */
- ep &= 0x0f;
- if (dev_p->usb_config[ep].dir == cy_as_usb_in)
- ep |= 0x80;
-
- if (dev_p->mtp_turbo_active)
- return CY_AS_ERROR_NOT_VALID_DURING_MTP;
-
- return cy_as_usb_nak_stall_request(handle, ep,
- CY_RQT_STALL_ENDPOINT, cy_true, 0, cb, client);
-}
-EXPORT_SYMBOL(cy_as_usb_set_stall);
-
-cy_as_return_status_t
-cy_as_usb_clear_stall(cy_as_device_handle handle,
- cy_as_end_point_number_t ep,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- /*
- * we send the firmware the EP# with the appropriate
- * direction bit, regardless of what the user gave us.
- */
- ep &= 0x0f;
- if (dev_p->usb_config[ep].dir == cy_as_usb_in)
- ep |= 0x80;
-
- if (dev_p->mtp_turbo_active)
- return CY_AS_ERROR_NOT_VALID_DURING_MTP;
-
- return cy_as_usb_nak_stall_request(handle, ep,
- CY_RQT_STALL_ENDPOINT, cy_false, 0, cb, client);
-}
-EXPORT_SYMBOL(cy_as_usb_clear_stall);
-
-cy_as_return_status_t
-cy_as_usb_get_stall(cy_as_device_handle handle,
- cy_as_end_point_number_t ep,
- cy_bool *stall_p,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- /*
- * we send the firmware the EP# with the appropriate
- * direction bit, regardless of what the user gave us.
- */
- ep &= 0x0f;
- if (dev_p->usb_config[ep].dir == cy_as_usb_in)
- ep |= 0x80;
-
- if (dev_p->mtp_turbo_active)
- return CY_AS_ERROR_NOT_VALID_DURING_MTP;
-
- return cy_as_usb_get_nak_stall(handle, ep,
- CY_RQT_GET_STALL, CY_RESP_ENDPOINT_STALL, stall_p, cb, client);
-}
-EXPORT_SYMBOL(cy_as_usb_get_stall);
-
-cy_as_return_status_t
-cy_as_usb_signal_remote_wakeup(cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p , *reply_p;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if (cy_as_device_is_in_callback(dev_p))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- if (dev_p->usb_last_event != cy_as_event_usb_suspend)
- return CY_AS_ERROR_NOT_IN_SUSPEND;
-
- req_p = cy_as_ll_create_request(dev_p,
- CY_RQT_USB_REMOTE_WAKEUP, CY_RQT_USB_RQT_CONTEXT, 0);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* A single status word response type */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) ==
- CY_RESP_SUCCESS_FAILURE)
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- else
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_USB_SIGNALREMOTEWAKEUP, 0,
- dev_p->func_cbs_usb,
- CY_AS_REQUEST_RESPONSE_EX, req_p,
- reply_p, cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_signal_remote_wakeup);
-
-cy_as_return_status_t
-cy_as_usb_set_m_s_report_threshold(cy_as_device_handle handle,
- uint32_t wr_sectors,
- uint32_t rd_sectors,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p , *reply_p;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- if ((cb == 0) && (cy_as_device_is_in_callback(dev_p)))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- /* Check if the firmware version supports this feature. */
- if ((dev_p->media_supported[0]) && (dev_p->media_supported[0] ==
- (1 << cy_as_media_nand)))
- return CY_AS_ERROR_NOT_SUPPORTED;
-
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_USB_STORAGE_MONITOR,
- CY_RQT_USB_RQT_CONTEXT, 4);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* A single status word response type */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- /* Set the read and write count parameters into
- * the request structure. */
- cy_as_ll_request_response__set_word(req_p, 0,
- (uint16_t)((wr_sectors >> 16) & 0xFFFF));
- cy_as_ll_request_response__set_word(req_p, 1,
- (uint16_t)(wr_sectors & 0xFFFF));
- cy_as_ll_request_response__set_word(req_p, 2,
- (uint16_t)((rd_sectors >> 16) & 0xFFFF));
- cy_as_ll_request_response__set_word(req_p, 3,
- (uint16_t)(rd_sectors & 0xFFFF));
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) ==
- CY_RESP_SUCCESS_FAILURE)
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- else
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_USB_SET_MSREPORT_THRESHOLD, 0,
- dev_p->func_cbs_usb, CY_AS_REQUEST_RESPONSE_EX,
- req_p, reply_p, cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_set_m_s_report_threshold);
-
-cy_as_return_status_t
-cy_as_usb_select_m_s_partitions(
- cy_as_device_handle handle,
- cy_as_bus_number_t bus,
- uint32_t device,
- cy_as_usb_m_s_type_t type,
- cy_as_function_callback cb,
- uint32_t client)
-{
- cy_as_return_status_t ret;
- cy_as_ll_request_response *req_p , *reply_p;
- uint16_t val;
-
- cy_as_device *dev_p = (cy_as_device *)handle;
- if (!dev_p || (dev_p->sig != CY_AS_DEVICE_HANDLE_SIGNATURE))
- return CY_AS_ERROR_INVALID_HANDLE;
-
- ret = is_usb_active(dev_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- return ret;
-
- /* This API has to be made before SetEnumConfig is called. */
- if (dev_p->usb_config[0].enabled)
- return CY_AS_ERROR_INVALID_CALL_SEQUENCE;
-
- if ((cb == 0) && (cy_as_device_is_in_callback(dev_p)))
- return CY_AS_ERROR_INVALID_IN_CALLBACK;
-
- req_p = cy_as_ll_create_request(dev_p, CY_RQT_MS_PARTITION_SELECT,
- CY_RQT_USB_RQT_CONTEXT, 2);
- if (req_p == 0)
- return CY_AS_ERROR_OUT_OF_MEMORY;
-
- /* A single status word response type */
- reply_p = cy_as_ll_create_response(dev_p, 1);
- if (reply_p == 0) {
- cy_as_ll_destroy_request(dev_p, req_p);
- return CY_AS_ERROR_OUT_OF_MEMORY;
- }
-
- /* Set the read and write count parameters into
- * the request structure. */
- cy_as_ll_request_response__set_word(req_p, 0,
- (uint16_t)((bus << 8) | device));
-
- val = 0;
- if ((type == cy_as_usb_m_s_unit0) || (type == cy_as_usb_m_s_both))
- val |= 1;
- if ((type == cy_as_usb_m_s_unit1) || (type == cy_as_usb_m_s_both))
- val |= (1 << 8);
-
- cy_as_ll_request_response__set_word(req_p, 1, val);
-
- if (cb == 0) {
- ret = cy_as_ll_send_request_wait_reply(dev_p, req_p, reply_p);
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
-
- if (cy_as_ll_request_response__get_code(reply_p) ==
- CY_RESP_SUCCESS_FAILURE)
- ret = cy_as_ll_request_response__get_word(reply_p, 0);
- else
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- } else {
- ret = cy_as_misc_send_request(dev_p, cb, client,
- CY_FUNCT_CB_NODATA, 0, dev_p->func_cbs_usb,
- CY_AS_REQUEST_RESPONSE_EX, req_p, reply_p,
- cy_as_usb_func_callback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- goto destroy;
- return ret;
- }
-
-destroy:
- cy_as_ll_destroy_request(dev_p, req_p);
- cy_as_ll_destroy_response(dev_p, reply_p);
-
- return ret;
-}
-EXPORT_SYMBOL(cy_as_usb_select_m_s_partitions);
-
-static void
-cy_as_usb_func_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_ll_request_response *rqt,
- cy_as_ll_request_response *resp,
- cy_as_return_status_t stat)
-{
- cy_as_usb_func_c_b_node* node = (cy_as_usb_func_c_b_node *)
- dev_p->usb_func_cbs->head_p;
- cy_as_func_c_b_node* fnode = (cy_as_func_c_b_node *)
- dev_p->func_cbs_usb->head_p;
- cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
-
- cy_as_device_handle h = (cy_as_device_handle)dev_p;
- cy_bool delayed_ack = (rqt->flags & CY_AS_REQUEST_RESPONSE_DELAY_ACK)
- == CY_AS_REQUEST_RESPONSE_DELAY_ACK;
- cy_bool ex_request = (rqt->flags & CY_AS_REQUEST_RESPONSE_EX)
- == CY_AS_REQUEST_RESPONSE_EX;
- cy_bool ms_request = (rqt->flags & CY_AS_REQUEST_RESPONSE_MS)
- == CY_AS_REQUEST_RESPONSE_MS;
- uint8_t code;
- uint8_t ep, state;
-
- if (!ex_request && !ms_request) {
- cy_as_hal_assert(dev_p->usb_func_cbs->count != 0);
- cy_as_hal_assert(dev_p->usb_func_cbs->type ==
- CYAS_USB_FUNC_CB);
- } else {
- cy_as_hal_assert(dev_p->func_cbs_usb->count != 0);
- cy_as_hal_assert(dev_p->func_cbs_usb->type == CYAS_FUNC_CB);
- }
-
- (void)context;
-
- /* The Handlers are responsible for Deleting the rqt and resp when
- * they are finished
- */
- code = cy_as_ll_request_response__get_code(rqt);
- switch (code) {
- case CY_RQT_START_USB:
- ret = my_handle_response_usb_start(dev_p, rqt, resp, stat);
- break;
- case CY_RQT_STOP_USB:
- ret = my_handle_response_usb_stop(dev_p, rqt, resp, stat);
- break;
- case CY_RQT_SET_CONNECT_STATE:
- if (!cy_as_ll_request_response__get_word(rqt, 0))
- ret = my_handle_response_disconnect(
- dev_p, rqt, resp, stat);
- else
- ret = my_handle_response_connect(
- dev_p, rqt, resp, stat);
- break;
- case CY_RQT_GET_CONNECT_STATE:
- break;
- case CY_RQT_SET_USB_CONFIG:
- ret = my_handle_response_set_enum_config(dev_p, rqt, resp);
- break;
- case CY_RQT_GET_USB_CONFIG:
- cy_as_hal_assert(fnode->data != 0);
- ret = my_handle_response_get_enum_config(dev_p,
- rqt, resp, fnode->data);
- break;
- case CY_RQT_STALL_ENDPOINT:
- ep = (uint8_t)cy_as_ll_request_response__get_word(rqt, 0);
- state = (uint8_t)cy_as_ll_request_response__get_word(rqt, 1);
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- if ((ret == CY_AS_ERROR_SUCCESS) && (ep > 1) && (state != 0)
- && (dev_p->usb_config[ep].dir == cy_as_usb_out))
- cy_as_usb_flush_logical_e_p(dev_p, ep);
- break;
- case CY_RQT_GET_STALL:
- cy_as_hal_assert(fnode->data != 0);
- ret = my_handle_response_get_stall(dev_p,
- rqt, resp, (cy_bool *)fnode->data);
- break;
- case CY_RQT_SET_DESCRIPTOR:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_GET_DESCRIPTOR:
- cy_as_hal_assert(fnode->data != 0);
- ret = my_handle_response_get_descriptor(dev_p,
- rqt, resp, (cy_as_get_descriptor_data *)fnode->data);
- break;
- case CY_RQT_SET_USB_CONFIG_REGISTERS:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- if (ret == CY_AS_ERROR_SUCCESS)
- ret = cy_as_usb_setup_dma(dev_p);
- break;
- case CY_RQT_ENDPOINT_SET_NAK:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_GET_ENDPOINT_NAK:
- cy_as_hal_assert(fnode->data != 0);
- ret = my_handle_response_get_nak(dev_p,
- rqt, resp, (cy_bool *)fnode->data);
- break;
- case CY_RQT_ACK_SETUP_PACKET:
- break;
- case CY_RQT_USB_REMOTE_WAKEUP:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_CLEAR_DESCRIPTORS:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_USB_STORAGE_MONITOR:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- case CY_RQT_MS_PARTITION_SELECT:
- ret = my_handle_response_no_data(dev_p, rqt, resp);
- break;
- default:
- ret = CY_AS_ERROR_INVALID_RESPONSE;
- cy_as_hal_assert(cy_false);
- break;
- }
-
- /*
- * if the low level layer returns a direct error, use
- * the corresponding error code. if not, use the error
- * code based on the response from firmware.
- */
- if (stat == CY_AS_ERROR_SUCCESS)
- stat = ret;
-
- if (ex_request || ms_request) {
- fnode->cb_p((cy_as_device_handle)dev_p, stat,
- fnode->client_data, fnode->data_type, fnode->data);
- cy_as_remove_c_b_node(dev_p->func_cbs_usb);
- } else {
- node->cb_p((cy_as_device_handle)dev_p, stat,
- node->client_data);
- cy_as_remove_c_b_node(dev_p->usb_func_cbs);
- }
-
- if (delayed_ack) {
- cy_as_hal_assert(cy_as_device_is_ack_delayed(dev_p));
- cy_as_device_rem_ack_delayed(dev_p);
-
- /*
- * send the ACK if required.
- */
- if (!cy_as_device_is_ack_delayed(dev_p))
- cy_as_usb_ack_setup_packet(h,
- usb_ack_callback, 0);
- }
-}
-
-
-/*[]*/
diff --git a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c b/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c
deleted file mode 100644
index dd4cd412aeb..00000000000
--- a/drivers/staging/westbridge/astoria/arch/arm/mach-omap2/cyashalomap_kernel.c
+++ /dev/null
@@ -1,2441 +0,0 @@
-/* Cypress WestBridge OMAP3430 Kernel Hal source file (cyashalomap_kernel.c)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor,
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifdef CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
-
-#include <linux/fs.h>
-#include <linux/ioport.h>
-#include <linux/timer.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/scatterlist.h>
-#include <linux/mm.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-/* include seems broken moving for patch submission
- * #include <mach/mux.h>
- * #include <mach/gpmc.h>
- * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h>
- * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h>
- * #include <mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h>
- * #include <linux/westbridge/cyaserr.h>
- * #include <linux/westbridge/cyasregs.h>
- * #include <linux/westbridge/cyasdma.h>
- * #include <linux/westbridge/cyasintr.h>
- */
-#include <linux/../../arch/arm/plat-omap/include/plat/mux.h>
-#include <linux/../../arch/arm/plat-omap/include/plat/gpmc.h>
-#include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h"
-#include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h"
-#include "../plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h"
-#include "../../../include/linux/westbridge/cyaserr.h"
-#include "../../../include/linux/westbridge/cyasregs.h"
-#include "../../../include/linux/westbridge/cyasdma.h"
-#include "../../../include/linux/westbridge/cyasintr.h"
-
-#define HAL_REV "1.1.0"
-
-/*
- * uncomment to enable 16bit pnand interface
- */
-#define PNAND_16BIT_MODE
-
-/*
- * selects one of 3 versions of pnand_lbd_read()
- * PNAND_LBD_READ_NO_PFE - original 8/16 bit code
- * reads through the gpmc CONTROLLER REGISTERS
- * ENABLE_GPMC_PF_ENGINE - USES GPMC PFE FIFO reads, in 8 bit mode,
- * same speed as the above
- * PFE_LBD_READ_V2 - slightly diffrenet, performance same as above
- */
-#define PNAND_LBD_READ_NO_PFE
-/* #define ENABLE_GPMC_PF_ENGINE */
-/* #define PFE_LBD_READ_V2 */
-
-/*
- * westbrige astoria ISR options to limit number of
- * back to back DMA transfers per ISR interrupt
- */
-#define MAX_DRQ_LOOPS_IN_ISR 4
-
-/*
- * debug prints enabling
- *#define DBGPRN_ENABLED
- *#define DBGPRN_DMA_SETUP_RD
- *#define DBGPRN_DMA_SETUP_WR
- */
-
-
-/*
- * For performance reasons, we handle storage endpoint transfers up to 4 KB
- * within the HAL itself.
- */
- #define CYASSTORAGE_WRITE_EP_NUM (4)
- #define CYASSTORAGE_READ_EP_NUM (8)
-
-/*
- * size of DMA packet HAL can accept from Storage API
- * HAL will fragment it into smaller chunks that the P port can accept
- */
-#define CYASSTORAGE_MAX_XFER_SIZE (2*32768)
-
-/*
- * P port MAX DMA packet size according to interface/ep configurartion
- */
-#define HAL_DMA_PKT_SZ 512
-
-#define is_storage_e_p(ep) (((ep) == 2) || ((ep) == 4) || \
- ((ep) == 6) || ((ep) == 8))
-
-/*
- * persistent, stores current GPMC interface cfg mode
- */
-static uint8_t pnand_16bit;
-
-/*
- * keep processing new WB DRQ in ISR until all handled (performance feature)
- */
-#define PROCESS_MULTIPLE_DRQ_IN_ISR (1)
-
-
-/*
- * ASTORIA PNAND IF COMMANDS, CASDO - READ, CASDI - WRITE
- */
-#define CASDO 0x05
-#define CASDI 0x85
-#define RDPAGE_B1 0x00
-#define RDPAGE_B2 0x30
-#define PGMPAGE_B1 0x80
-#define PGMPAGE_B2 0x10
-
-/*
- * The type of DMA operation, per endpoint
- */
-typedef enum cy_as_hal_dma_type {
- cy_as_hal_read,
- cy_as_hal_write,
- cy_as_hal_none
-} cy_as_hal_dma_type;
-
-
-/*
- * SG list halpers defined in scaterlist.h
-#define sg_is_chain(sg) ((sg)->page_link & 0x01)
-#define sg_is_last(sg) ((sg)->page_link & 0x02)
-#define sg_chain_ptr(sg) \
- ((struct scatterlist *) ((sg)->page_link & ~0x03))
-*/
-typedef struct cy_as_hal_endpoint_dma {
- cy_bool buffer_valid;
- uint8_t *data_p;
- uint32_t size;
- /*
- * sg_list_enabled - if true use, r/w DMA transfers use sg list,
- * FALSE use pointer to a buffer
- * sg_p - pointer to the owner's sg list, of there is such
- * (like blockdriver)
- * dma_xfer_sz - size of the next dma xfer on P port
- * seg_xfer_cnt - counts xfered bytes for in current sg_list
- * memory segment
- * req_xfer_cnt - total number of bytes transferred so far in
- * current request
- * req_length - total request length
- */
- bool sg_list_enabled;
- struct scatterlist *sg_p;
- uint16_t dma_xfer_sz;
- uint32_t seg_xfer_cnt;
- uint16_t req_xfer_cnt;
- uint16_t req_length;
- cy_as_hal_dma_type type;
- cy_bool pending;
-} cy_as_hal_endpoint_dma;
-
-/*
- * The list of OMAP devices (should be one)
- */
-static cy_as_omap_dev_kernel *m_omap_list_p;
-
-/*
- * The callback to call after DMA operations are complete
- */
-static cy_as_hal_dma_complete_callback callback;
-
-/*
- * Pending data size for the endpoints
- */
-static cy_as_hal_endpoint_dma end_points[16];
-
-/*
- * Forward declaration
- */
-static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p);
-
-static uint16_t intr_sequence_num;
-static uint8_t intr__enable;
-spinlock_t int_lock;
-
-static u32 iomux_vma;
-static u32 csa_phy;
-
-/*
- * gpmc I/O registers VMA
- */
-static u32 gpmc_base;
-
-/*
- * gpmc data VMA associated with CS4 (ASTORIA CS on GPMC)
- */
-static u32 gpmc_data_vma;
-static u32 ndata_reg_vma;
-static u32 ncmd_reg_vma;
-static u32 naddr_reg_vma;
-
-/*
- * fwd declarations
- */
-static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff);
-static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff);
-static inline u16 __attribute__((always_inline))
- ast_p_nand_casdo_read(u8 reg_addr8);
-static inline void __attribute__((always_inline))
- ast_p_nand_casdi_write(u8 reg_addr8, u16 data);
-
-/*
- * prints given number of omap registers
- */
-static void cy_as_hal_print_omap_regs(char *name_prefix,
- u8 name_base, u32 virt_base, u16 count)
-{
- u32 reg_val, reg_addr;
- u16 i;
- cy_as_hal_print_message(KERN_INFO "\n");
- for (i = 0; i < count; i++) {
-
- reg_addr = virt_base + (i*4);
- /* use virtual addresses here*/
- reg_val = __raw_readl(reg_addr);
- cy_as_hal_print_message(KERN_INFO "%s_%d[%8.8x]=%8.8x\n",
- name_prefix, name_base+i,
- reg_addr, reg_val);
- }
-}
-
-/*
- * setMUX function for a pad + additional pad flags
- */
-static u16 omap_cfg_reg_L(u32 pad_func_index)
-{
- static u8 sanity_check = 1;
-
- u32 reg_vma;
- u16 cur_val, wr_val, rdback_val;
-
- /*
- * do sanity check on the omap_mux_pin_cfg[] table
- */
- cy_as_hal_print_message(KERN_INFO" OMAP pins user_pad cfg ");
- if (sanity_check) {
- if ((omap_mux_pin_cfg[END_OF_TABLE].name[0] == 'E') &&
- (omap_mux_pin_cfg[END_OF_TABLE].name[1] == 'N') &&
- (omap_mux_pin_cfg[END_OF_TABLE].name[2] == 'D')) {
-
- cy_as_hal_print_message(KERN_INFO
- "table is good.\n");
- } else {
- cy_as_hal_print_message(KERN_WARNING
- "table is bad, fix it");
- }
- /*
- * do it only once
- */
- sanity_check = 0;
- }
-
- /*
- * get virtual address to the PADCNF_REG
- */
- reg_vma = (u32)iomux_vma + omap_mux_pin_cfg[pad_func_index].offset;
-
- /*
- * add additional USER PU/PD/EN flags
- */
- wr_val = omap_mux_pin_cfg[pad_func_index].mux_val;
- cur_val = IORD16(reg_vma);
-
- /*
- * PADCFG regs 16 bit long, packed into 32 bit regs,
- * can also be accessed as u16
- */
- IOWR16(reg_vma, wr_val);
- rdback_val = IORD16(reg_vma);
-
- /*
- * in case if the caller wants to save the old value
- */
- return wr_val;
-}
-
-#define BLKSZ_4K 0x1000
-
-/*
- * switch GPMC DATA bus mode
- */
-void cy_as_hal_gpmc_enable_16bit_bus(bool dbus16_enabled)
-{
- uint32_t tmp32;
-
- /*
- * disable gpmc CS4 operation 1st
- */
- tmp32 = gpmc_cs_read_reg(AST_GPMC_CS,
- GPMC_CS_CONFIG7) & ~GPMC_CONFIG7_CSVALID;
- gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
-
- /*
- * GPMC NAND data bus can be 8 or 16 bit wide
- */
- if (dbus16_enabled) {
- DBGPRN("enabling 16 bit bus\n");
- gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
- (GPMC_CONFIG1_DEVICETYPE(2) |
- GPMC_CONFIG1_WAIT_PIN_SEL(2) |
- GPMC_CONFIG1_DEVICESIZE_16)
- );
- } else {
- DBGPRN(KERN_INFO "enabling 8 bit bus\n");
- gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
- (GPMC_CONFIG1_DEVICETYPE(2) |
- GPMC_CONFIG1_WAIT_PIN_SEL(2))
- );
- }
-
- /*
- * re-enable astoria CS operation on GPMC
- */
- gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
- (tmp32 | GPMC_CONFIG7_CSVALID));
-
- /*
- *remember the state
- */
- pnand_16bit = dbus16_enabled;
-}
-
-static int cy_as_hal_gpmc_init(void)
-{
- u32 tmp32;
- int err;
- struct gpmc_timings timings;
-
- gpmc_base = (u32)ioremap_nocache(OMAP34XX_GPMC_BASE, BLKSZ_4K);
- DBGPRN(KERN_INFO "kernel has gpmc_base=%x , val@ the base=%x",
- gpmc_base, __raw_readl(gpmc_base)
- );
-
- /*
- * these are globals are full VMAs of the gpmc_base above
- */
- ncmd_reg_vma = GPMC_VMA(AST_GPMC_NAND_CMD);
- naddr_reg_vma = GPMC_VMA(AST_GPMC_NAND_ADDR);
- ndata_reg_vma = GPMC_VMA(AST_GPMC_NAND_DATA);
-
- /*
- * request GPMC CS for ASTORIA request
- */
- if (gpmc_cs_request(AST_GPMC_CS, SZ_16M, (void *)&csa_phy) < 0) {
- cy_as_hal_print_message(KERN_ERR "error failed to request"
- "ncs4 for ASTORIA\n");
- return -1;
- } else {
- DBGPRN(KERN_INFO "got phy_addr:%x for "
- "GPMC CS%d GPMC_CFGREG7[CS4]\n",
- csa_phy, AST_GPMC_CS);
- }
-
- /*
- * request VM region for 4K addr space for chip select 4 phy address
- * technically we don't need it for NAND devices, but do it anyway
- * so that data read/write bus cycle can be triggered by reading
- * or writing this mem region
- */
- if (!request_mem_region(csa_phy, BLKSZ_4K, "AST_OMAP_HAL")) {
- err = -EBUSY;
- cy_as_hal_print_message(KERN_ERR "error MEM region "
- "request for phy_addr:%x failed\n",
- csa_phy);
- goto out_free_cs;
- }
-
- /*
- * REMAP mem region associated with our CS
- */
- gpmc_data_vma = (u32)ioremap_nocache(csa_phy, BLKSZ_4K);
- if (!gpmc_data_vma) {
- err = -ENOMEM;
- cy_as_hal_print_message(KERN_ERR "error- ioremap()"
- "for phy_addr:%x failed", csa_phy);
-
- goto out_release_mem_region;
- }
- cy_as_hal_print_message(KERN_INFO "ioremap(%x) returned vma=%x\n",
- csa_phy, gpmc_data_vma);
-
- gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
- (GPMC_CONFIG1_DEVICETYPE(2) |
- GPMC_CONFIG1_WAIT_PIN_SEL(2)));
-
- memset(&timings, 0, sizeof(timings));
-
- /* cs timing */
- timings.cs_on = WB_GPMC_CS_t_o_n;
- timings.cs_wr_off = WB_GPMC_BUSCYC_t;
- timings.cs_rd_off = WB_GPMC_BUSCYC_t;
-
- /* adv timing */
- timings.adv_on = WB_GPMC_ADV_t_o_n;
- timings.adv_rd_off = WB_GPMC_BUSCYC_t;
- timings.adv_wr_off = WB_GPMC_BUSCYC_t;
-
- /* oe timing */
- timings.oe_on = WB_GPMC_OE_t_o_n;
- timings.oe_off = WB_GPMC_OE_t_o_f_f;
- timings.access = WB_GPMC_RD_t_a_c_c;
- timings.rd_cycle = WB_GPMC_BUSCYC_t;
-
- /* we timing */
- timings.we_on = WB_GPMC_WE_t_o_n;
- timings.we_off = WB_GPMC_WE_t_o_f_f;
- timings.wr_access = WB_GPMC_WR_t_a_c_c;
- timings.wr_cycle = WB_GPMC_BUSCYC_t;
-
- timings.page_burst_access = WB_GPMC_BUSCYC_t;
- timings.wr_data_mux_bus = WB_GPMC_BUSCYC_t;
- gpmc_cs_set_timings(AST_GPMC_CS, &timings);
-
- cy_as_hal_print_omap_regs("GPMC_CONFIG", 1,
- GPMC_VMA(GPMC_CFG_REG(1, AST_GPMC_CS)), 7);
-
- /*
- * DISABLE cs4, NOTE GPMC REG7 is already configured
- * at this point by gpmc_cs_request
- */
- tmp32 = gpmc_cs_read_reg(AST_GPMC_CS, GPMC_CS_CONFIG7) &
- ~GPMC_CONFIG7_CSVALID;
- gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7, tmp32);
-
- /*
- * PROGRAM chip select Region, (see OMAP3430 TRM PAGE 1088)
- */
- gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
- (AS_CS_MASK | AS_CS_BADDR));
-
- /*
- * by default configure GPMC into 8 bit mode
- * (to match astoria default mode)
- */
- gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG1,
- (GPMC_CONFIG1_DEVICETYPE(2) |
- GPMC_CONFIG1_WAIT_PIN_SEL(2)));
-
- /*
- * ENABLE astoria cs operation on GPMC
- */
- gpmc_cs_write_reg(AST_GPMC_CS, GPMC_CS_CONFIG7,
- (tmp32 | GPMC_CONFIG7_CSVALID));
-
- /*
- * No method currently exists to write this register through GPMC APIs
- * need to change WAIT2 polarity
- */
- tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
- tmp32 = tmp32 | NAND_FORCE_POSTED_WRITE_B | 0x40;
- IOWR32(GPMC_VMA(GPMC_CONFIG_REG), tmp32);
-
- tmp32 = IORD32(GPMC_VMA(GPMC_CONFIG_REG));
- cy_as_hal_print_message("GPMC_CONFIG_REG=0x%x\n", tmp32);
-
- return 0;
-
-out_release_mem_region:
- release_mem_region(csa_phy, BLKSZ_4K);
-
-out_free_cs:
- gpmc_cs_free(AST_GPMC_CS);
-
- return err;
-}
-
-/*
- * west bridge astoria ISR (Interrupt handler)
- */
-static irqreturn_t cy_astoria_int_handler(int irq,
- void *dev_id, struct pt_regs *regs)
-{
- cy_as_omap_dev_kernel *dev_p;
- uint16_t read_val = 0;
- uint16_t mask_val = 0;
-
- /*
- * debug stuff, counts number of loops per one intr trigger
- */
- uint16_t drq_loop_cnt = 0;
- uint8_t irq_pin;
- /*
- * flags to watch
- */
- const uint16_t sentinel = (CY_AS_MEM_P0_INTR_REG_MCUINT |
- CY_AS_MEM_P0_INTR_REG_MBINT |
- CY_AS_MEM_P0_INTR_REG_PMINT |
- CY_AS_MEM_P0_INTR_REG_PLLLOCKINT);
-
- /*
- * sample IRQ pin level (just for statistics)
- */
- irq_pin = __gpio_get_value(AST_INT);
-
- /*
- * this one just for debugging
- */
- intr_sequence_num++;
-
- /*
- * astoria device handle
- */
- dev_p = dev_id;
-
- /*
- * read Astoria intr register
- */
- read_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
- CY_AS_MEM_P0_INTR_REG);
-
- /*
- * save current mask value
- */
- mask_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
- CY_AS_MEM_P0_INT_MASK_REG);
-
- DBGPRN("<1>HAL__intr__enter:_seq:%d, P0_INTR_REG:%x\n",
- intr_sequence_num, read_val);
-
- /*
- * Disable WB interrupt signal generation while we are in ISR
- */
- cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
- CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
-
- /*
- * this is a DRQ Interrupt
- */
- if (read_val & CY_AS_MEM_P0_INTR_REG_DRQINT) {
-
- do {
- /*
- * handle DRQ interrupt
- */
- drq_loop_cnt++;
-
- cy_handle_d_r_q_interrupt(dev_p);
-
- /*
- * spending to much time in ISR may impact
- * average system performance
- */
- if (drq_loop_cnt >= MAX_DRQ_LOOPS_IN_ISR)
- break;
-
- /*
- * Keep processing if there is another DRQ int flag
- */
- } while (cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
- CY_AS_MEM_P0_INTR_REG) &
- CY_AS_MEM_P0_INTR_REG_DRQINT);
- }
-
- if (read_val & sentinel)
- cy_as_intr_service_interrupt((cy_as_hal_device_tag)dev_p);
-
- DBGPRN("<1>_hal:_intr__exit seq:%d, mask=%4.4x,"
- "int_pin:%d DRQ_jobs:%d\n",
- intr_sequence_num,
- mask_val,
- irq_pin,
- drq_loop_cnt);
-
- /*
- * re-enable WB hw interrupts
- */
- cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
- CY_AS_MEM_P0_INT_MASK_REG, mask_val);
-
- return IRQ_HANDLED;
-}
-
-static int cy_as_hal_configure_interrupts(void *dev_p)
-{
- int result;
- int irq_pin = AST_INT;
-
- irq_set_irq_type(OMAP_GPIO_IRQ(irq_pin), IRQ_TYPE_LEVEL_LOW);
-
- /*
- * for shared IRQS must provide non NULL device ptr
- * othervise the int won't register
- * */
- result = request_irq(OMAP_GPIO_IRQ(irq_pin),
- (irq_handler_t)cy_astoria_int_handler,
- IRQF_SHARED, "AST_INT#", dev_p);
-
- if (result == 0) {
- /*
- * OMAP_GPIO_IRQ(irq_pin) - omap logical IRQ number
- * assigned to this interrupt
- * OMAP_GPIO_BIT(AST_INT, GPIO_IRQENABLE1) - print status
- * of AST_INT GPIO IRQ_ENABLE FLAG
- */
- cy_as_hal_print_message(KERN_INFO"AST_INT omap_pin:"
- "%d assigned IRQ #%d IRQEN1=%d\n",
- irq_pin,
- OMAP_GPIO_IRQ(irq_pin),
- OMAP_GPIO_BIT(AST_INT, GPIO_IRQENABLE1)
- );
- } else {
- cy_as_hal_print_message("cyasomaphal: interrupt "
- "failed to register\n");
- gpio_free(irq_pin);
- cy_as_hal_print_message(KERN_WARNING
- "ASTORIA: can't get assigned IRQ"
- "%i for INT#\n", OMAP_GPIO_IRQ(irq_pin));
- }
-
- return result;
-}
-
-/*
- * initialize OMAP pads/pins to user defined functions
- */
-static void cy_as_hal_init_user_pads(user_pad_cfg_t *pad_cfg_tab)
-{
- /*
- * browse through the table an dinitiaze the pins
- */
- u32 in_level = 0;
- u16 tmp16, mux_val;
-
- while (pad_cfg_tab->name != NULL) {
-
- if (gpio_request(pad_cfg_tab->pin_num, NULL) == 0) {
-
- pad_cfg_tab->valid = 1;
- mux_val = omap_cfg_reg_L(pad_cfg_tab->mux_func);
-
- /*
- * always set drv level before changing out direction
- */
- __gpio_set_value(pad_cfg_tab->pin_num,
- pad_cfg_tab->drv);
-
- /*
- * "0" - OUT, "1", input omap_set_gpio_direction
- * (pad_cfg_tab->pin_num, pad_cfg_tab->dir);
- */
- if (pad_cfg_tab->dir)
- gpio_direction_input(pad_cfg_tab->pin_num);
- else
- gpio_direction_output(pad_cfg_tab->pin_num,
- pad_cfg_tab->drv);
-
- /* sample the pin */
- in_level = __gpio_get_value(pad_cfg_tab->pin_num);
-
- cy_as_hal_print_message(KERN_INFO "configured %s to "
- "OMAP pad_%d, DIR=%d "
- "DOUT=%d, DIN=%d\n",
- pad_cfg_tab->name,
- pad_cfg_tab->pin_num,
- pad_cfg_tab->dir,
- pad_cfg_tab->drv,
- in_level
- );
- } else {
- /*
- * get the pad_mux value to check on the pin_function
- */
- cy_as_hal_print_message(KERN_INFO "couldn't cfg pin %d"
- "for signal %s, its already taken\n",
- pad_cfg_tab->pin_num,
- pad_cfg_tab->name);
- }
-
- tmp16 = *(u16 *)PADCFG_VMA
- (omap_mux_pin_cfg[pad_cfg_tab->mux_func].offset);
-
- cy_as_hal_print_message(KERN_INFO "GPIO_%d(PAD_CFG=%x,OE=%d"
- "DOUT=%d, DIN=%d IRQEN=%d)\n\n",
- pad_cfg_tab->pin_num, tmp16,
- OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_OE),
- OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_OUT),
- OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_DATA_IN),
- OMAP_GPIO_BIT(pad_cfg_tab->pin_num, GPIO_IRQENABLE1)
- );
-
- /*
- * next pad_cfg deriptor
- */
- pad_cfg_tab++;
- }
-
- cy_as_hal_print_message(KERN_INFO"pads configured\n");
-}
-
-
-/*
- * release gpios taken by the module
- */
-static void cy_as_hal_release_user_pads(user_pad_cfg_t *pad_cfg_tab)
-{
- while (pad_cfg_tab->name != NULL) {
-
- if (pad_cfg_tab->valid) {
- gpio_free(pad_cfg_tab->pin_num);
- pad_cfg_tab->valid = 0;
- cy_as_hal_print_message(KERN_INFO "GPIO_%d "
- "released from %s\n",
- pad_cfg_tab->pin_num,
- pad_cfg_tab->name);
- } else {
- cy_as_hal_print_message(KERN_INFO "no release "
- "for %s, GPIO_%d, wasn't acquired\n",
- pad_cfg_tab->name,
- pad_cfg_tab->pin_num);
- }
- pad_cfg_tab++;
- }
-}
-
-void cy_as_hal_config_c_s_mux(void)
-{
- /*
- * FORCE the GPMC CS4 pin (it is in use by the zoom system)
- */
- omap_cfg_reg_L(T8_OMAP3430_GPMC_n_c_s4);
-}
-EXPORT_SYMBOL(cy_as_hal_config_c_s_mux);
-
-/*
- * inits all omap h/w
- */
-uint32_t cy_as_hal_processor_hw_init(void)
-{
- int i, err;
-
- cy_as_hal_print_message(KERN_INFO "init OMAP3430 hw...\n");
-
- iomux_vma = (u32)ioremap_nocache(
- (u32)CTLPADCONF_BASE_ADDR, CTLPADCONF_SIZE);
- cy_as_hal_print_message(KERN_INFO "PADCONF_VMA=%x val=%x\n",
- iomux_vma, IORD32(iomux_vma));
-
- /*
- * remap gpio banks
- */
- for (i = 0; i < 6; i++) {
- gpio_vma_tab[i].virt_addr = (u32)ioremap_nocache(
- gpio_vma_tab[i].phy_addr,
- gpio_vma_tab[i].size);
-
- cy_as_hal_print_message(KERN_INFO "%s virt_addr=%x\n",
- gpio_vma_tab[i].name,
- (u32)gpio_vma_tab[i].virt_addr);
- }
-
- /*
- * force OMAP_GPIO_126 to rleased state,
- * will be configured to drive reset
- */
- gpio_free(AST_RESET);
-
- /*
- *same thing with AStoria CS pin
- */
- gpio_free(AST_CS);
-
- /*
- * initialize all the OMAP pads connected to astoria
- */
- cy_as_hal_init_user_pads(user_pad_cfg);
-
- err = cy_as_hal_gpmc_init();
- if (err < 0)
- cy_as_hal_print_message(KERN_INFO"gpmc init failed:%d", err);
-
- cy_as_hal_config_c_s_mux();
-
- return gpmc_data_vma;
-}
-EXPORT_SYMBOL(cy_as_hal_processor_hw_init);
-
-void cy_as_hal_omap_hardware_deinit(cy_as_omap_dev_kernel *dev_p)
-{
- /*
- * free omap hw resources
- */
- if (gpmc_data_vma != 0)
- iounmap((void *)gpmc_data_vma);
-
- if (csa_phy != 0)
- release_mem_region(csa_phy, BLKSZ_4K);
-
- gpmc_cs_free(AST_GPMC_CS);
-
- free_irq(OMAP_GPIO_IRQ(AST_INT), dev_p);
-
- cy_as_hal_release_user_pads(user_pad_cfg);
-}
-
-/*
- * These are the functions that are not part of the
- * HAL layer, but are required to be called for this HAL
- */
-
-/*
- * Called On AstDevice LKM exit
- */
-int stop_o_m_a_p_kernel(const char *pgm, cy_as_hal_device_tag tag)
-{
- cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
-
- /*
- * TODO: Need to disable WB interrupt handlere 1st
- */
- if (0 == dev_p)
- return 1;
-
- cy_as_hal_print_message("<1>_stopping OMAP34xx HAL layer object\n");
- if (dev_p->m_sig != CY_AS_OMAP_KERNEL_HAL_SIG) {
- cy_as_hal_print_message("<1>%s: %s: bad HAL tag\n",
- pgm, __func__);
- return 1;
- }
-
- /*
- * disable interrupt
- */
- cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
- CY_AS_MEM_P0_INT_MASK_REG, 0x0000);
-
-#if 0
- if (dev_p->thread_flag == 0) {
- dev_p->thread_flag = 1;
- wait_for_completion(&dev_p->thread_complete);
- cy_as_hal_print_message("cyasomaphal:"
- "done cleaning thread\n");
- cy_as_hal_destroy_sleep_channel(&dev_p->thread_sc);
- }
-#endif
-
- cy_as_hal_omap_hardware_deinit(dev_p);
-
- /*
- * Rearrange the list
- */
- if (m_omap_list_p == dev_p)
- m_omap_list_p = dev_p->m_next_p;
-
- cy_as_hal_free(dev_p);
-
- cy_as_hal_print_message(KERN_INFO"OMAP_kernel_hal stopped\n");
- return 0;
-}
-
-int omap_start_intr(cy_as_hal_device_tag tag)
-{
- cy_as_omap_dev_kernel *dev_p = (cy_as_omap_dev_kernel *)tag;
- int ret = 0;
- const uint16_t mask = CY_AS_MEM_P0_INTR_REG_DRQINT |
- CY_AS_MEM_P0_INTR_REG_MBINT;
-
- /*
- * register for interrupts
- */
- ret = cy_as_hal_configure_interrupts(dev_p);
-
- /*
- * enable only MBox & DRQ interrupts for now
- */
- cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
- CY_AS_MEM_P0_INT_MASK_REG, mask);
-
- return 1;
-}
-
-/*
- * Below are the functions that communicate with the WestBridge device.
- * These are system dependent and must be defined by the HAL layer
- * for a given system.
- */
-
-/*
- * GPMC NAND command+addr write phase
- */
-static inline void nand_cmd_n_addr(u8 cmdb1, u16 col_addr, u32 row_addr)
-{
- /*
- * byte order on the bus <cmd> <CA0,CA1,RA0,RA1, RA2>
- */
- u32 tmpa32 = ((row_addr << 16) | col_addr);
- u8 RA2 = (u8)(row_addr >> 16);
-
- if (!pnand_16bit) {
- /*
- * GPMC PNAND 8bit BUS
- */
- /*
- * CMD1
- */
- IOWR8(ncmd_reg_vma, cmdb1);
-
- /*
- *pnand bus: <CA0,CA1,RA0,RA1>
- */
- IOWR32(naddr_reg_vma, tmpa32);
-
- /*
- * <RA2> , always zero
- */
- IOWR8(naddr_reg_vma, RA2);
-
- } else {
- /*
- * GPMC PNAND 16bit BUS , in 16 bit mode CMD
- * and ADDR sent on [d7..d0]
- */
- uint8_t CA0, CA1, RA0, RA1;
- CA0 = tmpa32 & 0x000000ff;
- CA1 = (tmpa32 >> 8) & 0x000000ff;
- RA0 = (tmpa32 >> 16) & 0x000000ff;
- RA1 = (tmpa32 >> 24) & 0x000000ff;
-
- /*
- * can't use 32 bit writes here omap will not serialize
- * them to lower half in16 bit mode
- */
-
- /*
- *pnand bus: <CMD1, CA0,CA1,RA0,RA1, RA2 (always zero)>
- */
- IOWR8(ncmd_reg_vma, cmdb1);
- IOWR8(naddr_reg_vma, CA0);
- IOWR8(naddr_reg_vma, CA1);
- IOWR8(naddr_reg_vma, RA0);
- IOWR8(naddr_reg_vma, RA1);
- IOWR8(naddr_reg_vma, RA2);
- }
-}
-
-/*
- * spin until r/b goes high
- */
-inline int wait_rn_b_high(void)
-{
- u32 w_spins = 0;
-
- /*
- * TODO: note R/b may go low here, need to spin until high
- * while (omap_get_gpio_datain(AST_RnB) == 0) {
- * w_spins++;
- * }
- * if (OMAP_GPIO_BIT(AST_RnB, GPIO_DATA_IN) == 0) {
- *
- * while (OMAP_GPIO_BIT(AST_RnB, GPIO_DATA_IN) == 0) {
- * w_spins++;
- * }
- * printk("<1>RnB=0!:%d\n",w_spins);
- * }
- */
- return w_spins;
-}
-
-#ifdef ENABLE_GPMC_PF_ENGINE
-/* #define PFE_READ_DEBUG
- * PNAND block read with OMAP PFE enabled
- * status: Not tested, NW, broken , etc
- */
-static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
-{
- uint16_t w32cnt;
- uint32_t *ptr32;
- uint8_t *ptr8;
- uint8_t bytes_in_fifo;
-
- /* debug vars*/
-#ifdef PFE_READ_DEBUG
- uint32_t loop_limit;
- uint16_t bytes_read = 0;
-#endif
-
- /*
- * configure the prefetch engine
- */
- uint32_t tmp32;
- uint32_t pfe_status;
-
- /*
- * DISABLE GPMC CS4 operation 1st, this is
- * in case engine is be already disabled
- */
- IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x0);
- IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
- IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
-
-#ifdef PFE_READ_DEBUG
- tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG1));
- if (tmp32 != GPMC_PREFETCH_CONFIG1_VAL) {
- printk(KERN_INFO "<1> prefetch is CONFIG1 read val:%8.8x, != VAL written:%8.8x\n",
- tmp32, GPMC_PREFETCH_CONFIG1_VAL);
- tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
- printk(KERN_INFO "<1> GPMC_PREFETCH_STATUS : %8.8x\n", tmp32);
- }
-
- /*
- *sanity check 2
- */
- tmp32 = IORD32(GPMC_VMA(GPMC_PREFETCH_CONFIG2));
- if (tmp32 != (count))
- printk(KERN_INFO "<1> GPMC_PREFETCH_CONFIG2 read val:%d, "
- "!= VAL written:%d\n", tmp32, count);
-#endif
-
- /*
- * ISSUE PNAND CMD+ADDR, note gpmc puts 32b words
- * on the bus least sig. byte 1st
- */
- nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
-
- IOWR8(ncmd_reg_vma, RDPAGE_B2);
-
- /*
- * start the prefetch engine
- */
- IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
-
- ptr32 = buff;
-
- while (1) {
- /*
- * GPMC PFE service loop
- */
- do {
- /*
- * spin until PFE fetched some
- * PNAND bus words in the FIFO
- */
- pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
- bytes_in_fifo = (pfe_status >> 24) & 0x7f;
- } while (bytes_in_fifo == 0);
-
- /* whole 32 bit words in fifo */
- w32cnt = bytes_in_fifo >> 2;
-
-#if 0
- /*
- *NOTE: FIFO_PTR indicates number of NAND bus words bytes
- * already received in the FIFO and available to be read
- * by DMA or MPU whether COUNTVAL indicates number of BUS
- * words yet to be read from PNAND bus words
- */
- printk(KERN_ERR "<1> got PF_STATUS:%8.8x FIFO_PTR:%d, COUNTVAL:%d, w32cnt:%d\n",
- pfe_status, bytes_in_fifo,
- (pfe_status & 0x3fff), w32cnt);
-#endif
-
- while (w32cnt--)
- *ptr32++ = IORD32(gpmc_data_vma);
-
- if ((pfe_status & 0x3fff) == 0) {
- /*
- * PFE acc angine done, there still may be data leftover
- * in the FIFO re-read FIFO BYTE counter (check for
- * leftovers from 32 bit read accesses above)
- */
- bytes_in_fifo = (IORD32(
- GPMC_VMA(GPMC_PREFETCH_STATUS)) >> 24) & 0x7f;
-
- /*
- * NOTE we may still have one word left in the fifo
- * read it out
- */
- ptr8 = ptr32;
- switch (bytes_in_fifo) {
-
- case 0:
- /*
- * nothing to do we already read the
- * FIFO out with 32 bit accesses
- */
- break;
- case 1:
- /*
- * this only possible
- * for 8 bit pNAND only
- */
- *ptr8 = IORD8(gpmc_data_vma);
- break;
-
- case 2:
- /*
- * this one can occur in either modes
- */
- *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
- break;
-
- case 3:
- /*
- * this only possible for 8 bit pNAND only
- */
- *(uint16_t *)ptr8 = IORD16(gpmc_data_vma);
- ptr8 += 2;
- *ptr8 = IORD8(gpmc_data_vma);
- break;
-
- case 4:
- /*
- * shouldn't happen, but has been seen
- * in 8 bit mode
- */
- *ptr32 = IORD32(gpmc_data_vma);
- break;
-
- default:
- printk(KERN_ERR"<1>_error: PFE FIFO bytes leftover is not read:%d\n",
- bytes_in_fifo);
- break;
- }
- /*
- * read is completed, get out of the while(1) loop
- */
- break;
- }
- }
-}
-#endif
-
-#ifdef PFE_LBD_READ_V2
-/*
- * PFE engine assisted reads with the 64 byte blocks
- */
-static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
-{
- uint8_t rd_cnt;
- uint32_t *ptr32;
- uint8_t *ptr8;
- uint16_t reminder;
- uint32_t pfe_status;
-
- /*
- * ISSUE PNAND CMD+ADDR
- * note gpmc puts 32b words on the bus least sig. byte 1st
- */
- nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
- IOWR8(ncmd_reg_vma, RDPAGE_B2);
-
- /*
- * setup PFE block
- * count - OMAP number of bytes to access on pnand bus
- */
-
- IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG1), GPMC_PREFETCH_CONFIG1_VAL);
- IOWR32(GPMC_VMA(GPMC_PREFETCH_CONFIG2), count);
- IOWR32(GPMC_VMA(GPMC_PREFETCH_CONTROL), 0x1);
-
- ptr32 = buff;
-
- do {
- pfe_status = IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS));
- rd_cnt = pfe_status >> (24+2);
-
- while (rd_cnt--)
- *ptr32++ = IORD32(gpmc_data_vma);
-
- } while (pfe_status & 0x3fff);
-
- /*
- * read out the leftover
- */
- ptr8 = ptr32;
- rd_cnt = (IORD32(GPMC_VMA(GPMC_PREFETCH_STATUS)) >> 24) & 0x7f;
-
- while (rd_cnt--)
- *ptr8++ = IORD8(gpmc_data_vma);
-}
-#endif
-
-#ifdef PNAND_LBD_READ_NO_PFE
-/*
- * Endpoint buffer read w/o OMAP GPMC Prefetch Engine
- * the original working code, works at max speed for 8 bit xfers
- * for 16 bit the bus diagram has gaps
- */
-static void p_nand_lbd_read(u16 col_addr, u32 row_addr, u16 count, void *buff)
-{
- uint16_t w32cnt;
- uint32_t *ptr32;
- uint16_t *ptr16;
- uint16_t remainder;
-
- DBGPRN("<1> %s(): NO_PFE\n", __func__);
-
- ptr32 = buff;
- /* number of whole 32 bit words in the transfer */
- w32cnt = count >> 2;
-
- /* remainder, in bytes(0..3) */
- remainder = count & 03;
-
- /*
- * note gpmc puts 32b words on the bus least sig. byte 1st
- */
- nand_cmd_n_addr(RDPAGE_B1, col_addr, row_addr);
- IOWR8(ncmd_reg_vma, RDPAGE_B2);
-
- /*
- * read data by 32 bit chunks
- */
- while (w32cnt--)
- *ptr32++ = IORD32(ndata_reg_vma);
-
- /*
- * now do the remainder(it can be 0, 1, 2 or 3)
- * same code for both 8 & 16 bit bus
- * do 1 or 2 MORE words
- */
- ptr16 = (uint16_t *)ptr32;
-
- switch (remainder) {
- case 1:
- /* read one 16 bit word
- * IN 8 BIT WE NEED TO READ even number of bytes
- */
- case 2:
- *ptr16 = IORD16(ndata_reg_vma);
- break;
- case 3:
- /*
- * for 3 bytes read 2 16 bit words
- */
- *ptr16++ = IORD16(ndata_reg_vma);
- *ptr16 = IORD16(ndata_reg_vma);
- break;
- default:
- /*
- * remainder is 0
- */
- break;
- }
-}
-#endif
-
-/*
- * uses LBD mode to write N bytes into astoria
- * Status: Working, however there are 150ns idle
- * timeafter every 2 (16 bit or 4(8 bit) bus cycles
- */
-static void p_nand_lbd_write(u16 col_addr, u32 row_addr, u16 count, void *buff)
-{
- uint16_t w32cnt;
- uint16_t remainder;
- uint8_t *ptr8;
- uint16_t *ptr16;
- uint32_t *ptr32;
-
- remainder = count & 03;
- w32cnt = count >> 2;
- ptr32 = buff;
- ptr8 = buff;
-
- /*
- * send: CMDB1, CA0,CA1,RA0,RA1,RA2
- */
- nand_cmd_n_addr(PGMPAGE_B1, col_addr, row_addr);
-
- /*
- * blast the data out in 32bit chunks
- */
- while (w32cnt--)
- IOWR32(ndata_reg_vma, *ptr32++);
-
- /*
- * do the reminder if there is one
- * same handling for both 8 & 16 bit pnand: mode
- */
- ptr16 = (uint16_t *)ptr32; /* do 1 or 2 words */
-
- switch (remainder) {
- case 1:
- /*
- * read one 16 bit word
- */
- case 2:
- IOWR16(ndata_reg_vma, *ptr16);
- break;
-
- case 3:
- /*
- * for 3 bytes read 2 16 bit words
- */
- IOWR16(ndata_reg_vma, *ptr16++);
- IOWR16(ndata_reg_vma, *ptr16);
- break;
- default:
- /*
- * reminder is 0
- */
- break;
- }
- /*
- * finally issue a PGM cmd
- */
- IOWR8(ncmd_reg_vma, PGMPAGE_B2);
-}
-
-/*
- * write Astoria register
- */
-static inline void ast_p_nand_casdi_write(u8 reg_addr8, u16 data)
-{
- unsigned long flags;
- u16 addr16;
- /*
- * throw an error if called from multiple threads
- */
- static atomic_t rdreg_usage_cnt = { 0 };
-
- /*
- * disable interrupts
- */
- local_irq_save(flags);
-
- if (atomic_read(&rdreg_usage_cnt) != 0) {
- cy_as_hal_print_message(KERN_ERR "cy_as_omap_hal:"
- "* cy_as_hal_write_register usage:%d\n",
- atomic_read(&rdreg_usage_cnt));
- }
-
- atomic_inc(&rdreg_usage_cnt);
-
- /*
- * 2 flavors of GPMC -> PNAND access
- */
- if (pnand_16bit) {
- /*
- * 16 BIT gpmc NAND mode
- */
-
- /*
- * CMD1, CA1, CA2,
- */
- IOWR8(ncmd_reg_vma, 0x85);
- IOWR8(naddr_reg_vma, reg_addr8);
- IOWR8(naddr_reg_vma, 0x0c);
-
- /*
- * this should be sent on the 16 bit bus
- */
- IOWR16(ndata_reg_vma, data);
- } else {
- /*
- * 8 bit nand mode GPMC will automatically
- * seriallize 16bit or 32 bit writes into
- * 8 bit onesto the lower 8 bit in LE order
- */
- addr16 = 0x0c00 | reg_addr8;
-
- /*
- * CMD1, CA1, CA2,
- */
- IOWR8(ncmd_reg_vma, 0x85);
- IOWR16(naddr_reg_vma, addr16);
- IOWR16(ndata_reg_vma, data);
- }
-
- /*
- * re-enable interrupts
- */
- atomic_dec(&rdreg_usage_cnt);
- local_irq_restore(flags);
-}
-
-
-/*
- * read astoria register via pNAND interface
- */
-static inline u16 ast_p_nand_casdo_read(u8 reg_addr8)
-{
- u16 data;
- u16 addr16;
- unsigned long flags;
- /*
- * throw an error if called from multiple threads
- */
- static atomic_t wrreg_usage_cnt = { 0 };
-
- /*
- * disable interrupts
- */
- local_irq_save(flags);
-
- if (atomic_read(&wrreg_usage_cnt) != 0) {
- /*
- * if it gets here ( from other threads), this function needs
- * need spin_lock_irq save() protection
- */
- cy_as_hal_print_message(KERN_ERR"cy_as_omap_hal: "
- "cy_as_hal_write_register usage:%d\n",
- atomic_read(&wrreg_usage_cnt));
- }
- atomic_inc(&wrreg_usage_cnt);
-
- /*
- * 2 flavors of GPMC -> PNAND access
- */
- if (pnand_16bit) {
- /*
- * 16 BIT gpmc NAND mode
- * CMD1, CA1, CA2,
- */
-
- IOWR8(ncmd_reg_vma, 0x05);
- IOWR8(naddr_reg_vma, reg_addr8);
- IOWR8(naddr_reg_vma, 0x0c);
- IOWR8(ncmd_reg_vma, 0x00E0);
-
- udelay(1);
-
- /*
- * much faster through the gPMC Register space
- */
- data = IORD16(ndata_reg_vma);
- } else {
- /*
- * 8 BIT gpmc NAND mode
- * CMD1, CA1, CA2, CMD2
- */
- addr16 = 0x0c00 | reg_addr8;
- IOWR8(ncmd_reg_vma, 0x05);
- IOWR16(naddr_reg_vma, addr16);
- IOWR8(ncmd_reg_vma, 0xE0);
- udelay(1);
- data = IORD16(ndata_reg_vma);
- }
-
- /*
- * re-enable interrupts
- */
- atomic_dec(&wrreg_usage_cnt);
- local_irq_restore(flags);
-
- return data;
-}
-
-
-/*
- * This function must be defined to write a register within the WestBridge
- * device. The addr value is the address of the register to write with
- * respect to the base address of the WestBridge device.
- */
-void cy_as_hal_write_register(
- cy_as_hal_device_tag tag,
- uint16_t addr, uint16_t data)
-{
- ast_p_nand_casdi_write((u8)addr, data);
-}
-
-/*
- * This function must be defined to read a register from the WestBridge
- * device. The addr value is the address of the register to read with
- * respect to the base address of the WestBridge device.
- */
-uint16_t cy_as_hal_read_register(cy_as_hal_device_tag tag, uint16_t addr)
-{
- uint16_t data = 0;
-
- /*
- * READ ASTORIA REGISTER USING CASDO
- */
- data = ast_p_nand_casdo_read((u8)addr);
-
- return data;
-}
-
-/*
- * preps Ep pointers & data counters for next packet
- * (fragment of the request) xfer returns true if
- * there is a next transfer, and false if all bytes in
- * current request have been xfered
- */
-static inline bool prep_for_next_xfer(cy_as_hal_device_tag tag, uint8_t ep)
-{
-
- if (!end_points[ep].sg_list_enabled) {
- /*
- * no further transfers for non storage EPs
- * (like EP2 during firmware download, done
- * in 64 byte chunks)
- */
- if (end_points[ep].req_xfer_cnt >= end_points[ep].req_length) {
- DBGPRN("<1> %s():RQ sz:%d non-_sg EP:%d completed\n",
- __func__, end_points[ep].req_length, ep);
-
- /*
- * no more transfers, we are done with the request
- */
- return false;
- }
-
- /*
- * calculate size of the next DMA xfer, corner
- * case for non-storage EPs where transfer size
- * is not egual N * HAL_DMA_PKT_SZ xfers
- */
- if ((end_points[ep].req_length - end_points[ep].req_xfer_cnt)
- >= HAL_DMA_PKT_SZ) {
- end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
- } else {
- /*
- * that would be the last chunk less
- * than P-port max size
- */
- end_points[ep].dma_xfer_sz = end_points[ep].req_length -
- end_points[ep].req_xfer_cnt;
- }
-
- return true;
- }
-
- /*
- * for SG_list assisted dma xfers
- * are we done with current SG ?
- */
- if (end_points[ep].seg_xfer_cnt == end_points[ep].sg_p->length) {
- /*
- * was it the Last SG segment on the list ?
- */
- if (sg_is_last(end_points[ep].sg_p)) {
- DBGPRN("<1> %s: EP:%d completed,"
- "%d bytes xfered\n",
- __func__,
- ep,
- end_points[ep].req_xfer_cnt
- );
-
- return false;
- } else {
- /*
- * There are more SG segments in current
- * request's sg list setup new segment
- */
-
- end_points[ep].seg_xfer_cnt = 0;
- end_points[ep].sg_p = sg_next(end_points[ep].sg_p);
- /* set data pointer for next DMA sg transfer*/
- end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
- DBGPRN("<1> %s new SG:_va:%p\n\n",
- __func__, end_points[ep].data_p);
- }
-
- }
-
- /*
- * for sg list xfers it will always be 512 or 1024
- */
- end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
-
- /*
- * next transfer is required
- */
-
- return true;
-}
-
-/*
- * Astoria DMA read request, APP_CPU reads from WB ep buffer
- */
-static void cy_service_e_p_dma_read_request(
- cy_as_omap_dev_kernel *dev_p, uint8_t ep)
-{
- cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
- uint16_t v, size;
- void *dptr;
- uint16_t col_addr = 0x0000;
- uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
- uint16_t ep_dma_reg = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
-
- /*
- * get the XFER size frtom WB eP DMA REGISTER
- */
- v = cy_as_hal_read_register(tag, ep_dma_reg);
-
- /*
- * amount of data in EP buff in bytes
- */
- size = v & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK;
-
- /*
- * memory pointer for this DMA packet xfer (sub_segment)
- */
- dptr = end_points[ep].data_p;
-
- DBGPRN("<1>HAL:_svc_dma_read on EP_%d sz:%d, intr_seq:%d, dptr:%p\n",
- ep,
- size,
- intr_sequence_num,
- dptr
- );
-
- cy_as_hal_assert(size != 0);
-
- if (size) {
- /*
- * the actual WB-->OMAP memory "soft" DMA xfer
- */
- p_nand_lbd_read(col_addr, row_addr, size, dptr);
- }
-
- /*
- * clear DMAVALID bit indicating that the data has been read
- */
- cy_as_hal_write_register(tag, ep_dma_reg, 0);
-
- end_points[ep].seg_xfer_cnt += size;
- end_points[ep].req_xfer_cnt += size;
-
- /*
- * pre-advance data pointer (if it's outside sg
- * list it will be reset anyway
- */
- end_points[ep].data_p += size;
-
- if (prep_for_next_xfer(tag, ep)) {
- /*
- * we have more data to read in this request,
- * setup next dma packet due tell WB how much
- * data we are going to xfer next
- */
- v = end_points[ep].dma_xfer_sz/*HAL_DMA_PKT_SZ*/ |
- CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
- cy_as_hal_write_register(tag, ep_dma_reg, v);
- } else {
- end_points[ep].pending = cy_false;
- end_points[ep].type = cy_as_hal_none;
- end_points[ep].buffer_valid = cy_false;
-
- /*
- * notify the API that we are done with rq on this EP
- */
- if (callback) {
- DBGPRN("<1>trigg rd_dma completion cb: xfer_sz:%d\n",
- end_points[ep].req_xfer_cnt);
- callback(tag, ep,
- end_points[ep].req_xfer_cnt,
- CY_AS_ERROR_SUCCESS);
- }
- }
-}
-
-/*
- * omap_cpu needs to transfer data to ASTORIA EP buffer
- */
-static void cy_service_e_p_dma_write_request(
- cy_as_omap_dev_kernel *dev_p, uint8_t ep)
-{
- uint16_t addr;
- uint16_t v = 0;
- uint32_t size;
- uint16_t col_addr = 0x0000;
- uint32_t row_addr = CYAS_DEV_CALC_EP_ADDR(ep);
- void *dptr;
-
- cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p;
- /*
- * note: size here its the size of the dma transfer could be
- * anything > 0 && < P_PORT packet size
- */
- size = end_points[ep].dma_xfer_sz;
- dptr = end_points[ep].data_p;
-
- /*
- * perform the soft DMA transfer, soft in this case
- */
- if (size)
- p_nand_lbd_write(col_addr, row_addr, size, dptr);
-
- end_points[ep].seg_xfer_cnt += size;
- end_points[ep].req_xfer_cnt += size;
- /*
- * pre-advance data pointer
- * (if it's outside sg list it will be reset anyway)
- */
- end_points[ep].data_p += size;
-
- /*
- * now clear DMAVAL bit to indicate we are done
- * transferring data and that the data can now be
- * sent via USB to the USB host, sent to storage,
- * or used internally.
- */
-
- addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
- cy_as_hal_write_register(tag, addr, size);
-
- /*
- * finally, tell the USB subsystem that the
- * data is gone and we can accept the
- * next request if one exists.
- */
- if (prep_for_next_xfer(tag, ep)) {
- /*
- * There is more data to go. Re-init the WestBridge DMA side
- */
- v = end_points[ep].dma_xfer_sz |
- CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
- cy_as_hal_write_register(tag, addr, v);
- } else {
-
- end_points[ep].pending = cy_false;
- end_points[ep].type = cy_as_hal_none;
- end_points[ep].buffer_valid = cy_false;
-
- /*
- * notify the API that we are done with rq on this EP
- */
- if (callback) {
- /*
- * this callback will wake up the process that might be
- * sleeping on the EP which data is being transferred
- */
- callback(tag, ep,
- end_points[ep].req_xfer_cnt,
- CY_AS_ERROR_SUCCESS);
- }
- }
-}
-
-/*
- * HANDLE DRQINT from Astoria (called in AS_Intr context
- */
-static void cy_handle_d_r_q_interrupt(cy_as_omap_dev_kernel *dev_p)
-{
- uint16_t v;
- static uint8_t service_ep = 2;
-
- /*
- * We've got DRQ INT, read DRQ STATUS Register */
- v = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
- CY_AS_MEM_P0_DRQ);
-
- if (v == 0) {
-#ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("stray DRQ interrupt detected\n");
-#endif
- return;
- }
-
- /*
- * Now, pick a given DMA request to handle, for now, we just
- * go round robin. Each bit position in the service_mask
- * represents an endpoint from EP2 to EP15. We rotate through
- * each of the endpoints to find one that needs to be serviced.
- */
- while ((v & (1 << service_ep)) == 0) {
-
- if (service_ep == 15)
- service_ep = 2;
- else
- service_ep++;
- }
-
- if (end_points[service_ep].type == cy_as_hal_write) {
- /*
- * handle DMA WRITE REQUEST: app_cpu will
- * write data into astoria EP buffer
- */
- cy_service_e_p_dma_write_request(dev_p, service_ep);
- } else if (end_points[service_ep].type == cy_as_hal_read) {
- /*
- * handle DMA READ REQUEST: cpu will
- * read EP buffer from Astoria
- */
- cy_service_e_p_dma_read_request(dev_p, service_ep);
- }
-#ifndef WESTBRIDGE_NDEBUG
- else
- cy_as_hal_print_message("cyashalomap:interrupt,"
- " w/o pending DMA job,"
- "-check DRQ_MASK logic\n");
-#endif
-
- /*
- * Now bump the EP ahead, so other endpoints get
- * a shot before the one we just serviced
- */
- if (end_points[service_ep].type == cy_as_hal_none) {
- if (service_ep == 15)
- service_ep = 2;
- else
- service_ep++;
- }
-
-}
-
-void cy_as_hal_dma_cancel_request(cy_as_hal_device_tag tag, uint8_t ep)
-{
- DBGPRN("cy_as_hal_dma_cancel_request on ep:%d", ep);
- if (end_points[ep].pending)
- cy_as_hal_write_register(tag,
- CY_AS_MEM_P0_EP2_DMA_REG + ep - 2, 0);
-
- end_points[ep].buffer_valid = cy_false;
- end_points[ep].type = cy_as_hal_none;
-}
-
-/*
- * enables/disables SG list assisted DMA xfers for the given EP
- * sg_list assisted XFERS can use physical addresses of mem pages in case if the
- * xfer is performed by a h/w DMA controller rather then the CPU on P port
- */
-void cy_as_hal_set_ep_dma_mode(uint8_t ep, bool sg_xfer_enabled)
-{
- end_points[ep].sg_list_enabled = sg_xfer_enabled;
- DBGPRN("<1> EP:%d sg_list assisted DMA mode set to = %d\n",
- ep, end_points[ep].sg_list_enabled);
-}
-EXPORT_SYMBOL(cy_as_hal_set_ep_dma_mode);
-
-/*
- * This function must be defined to transfer a block of data to
- * the WestBridge device. This function can use the burst write
- * (DMA) capabilities of WestBridge to do this, or it can just copy
- * the data using writes.
- */
-void cy_as_hal_dma_setup_write(cy_as_hal_device_tag tag,
- uint8_t ep, void *buf,
- uint32_t size, uint16_t maxsize)
-{
- uint32_t addr = 0;
- uint16_t v = 0;
-
- /*
- * Note: "size" is the actual request size
- * "maxsize" - is the P port fragment size
- * No EP0 or EP1 traffic should get here
- */
- cy_as_hal_assert(ep != 0 && ep != 1);
-
- /*
- * If this asserts, we have an ordering problem. Another DMA request
- * is coming down before the previous one has completed.
- */
- cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
- end_points[ep].buffer_valid = cy_true;
- end_points[ep].type = cy_as_hal_write;
- end_points[ep].pending = cy_true;
-
- /*
- * total length of the request
- */
- end_points[ep].req_length = size;
-
- if (size >= maxsize) {
- /*
- * set xfer size for very 1st DMA xfer operation
- * port max packet size ( typically 512 or 1024)
- */
- end_points[ep].dma_xfer_sz = maxsize;
- } else {
- /*
- * smaller xfers for non-storage EPs
- */
- end_points[ep].dma_xfer_sz = size;
- }
-
- /*
- * check the EP transfer mode uses sg_list rather then a memory buffer
- * block devices pass it to the HAL, so the hAL could get to the real
- * physical address for each segment and set up a DMA controller
- * hardware ( if there is one)
- */
- if (end_points[ep].sg_list_enabled) {
- /*
- * buf - pointer to the SG list
- * data_p - data pointer to the 1st DMA segment
- * seg_xfer_cnt - keeps track of N of bytes sent in current
- * sg_list segment
- * req_xfer_cnt - keeps track of the total N of bytes
- * transferred for the request
- */
- end_points[ep].sg_p = buf;
- end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
- end_points[ep].seg_xfer_cnt = 0;
- end_points[ep].req_xfer_cnt = 0;
-
-#ifdef DBGPRN_DMA_SETUP_WR
- DBGPRN("cyasomaphal:%s: EP:%d, buf:%p, buf_va:%p,"
- "req_sz:%d, maxsz:%d\n",
- __func__,
- ep,
- buf,
- end_points[ep].data_p,
- size,
- maxsize);
-#endif
-
- } else {
- /*
- * setup XFER for non sg_list assisted EPs
- */
-
- #ifdef DBGPRN_DMA_SETUP_WR
- DBGPRN("<1>%s non storage or sz < 512:"
- "EP:%d, sz:%d\n", __func__, ep, size);
- #endif
-
- end_points[ep].sg_p = NULL;
-
- /*
- * must be a VMA of a membuf in kernel space
- */
- end_points[ep].data_p = buf;
-
- /*
- * will keep track No of bytes xferred for the request
- */
- end_points[ep].req_xfer_cnt = 0;
- }
-
- /*
- * Tell WB we are ready to send data on the given endpoint
- */
- v = (end_points[ep].dma_xfer_sz & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK)
- | CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
-
- addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
-
- cy_as_hal_write_register(tag, addr, v);
-}
-
-/*
- * This function must be defined to transfer a block of data from
- * the WestBridge device. This function can use the burst read
- * (DMA) capabilities of WestBridge to do this, or it can just
- * copy the data using reads.
- */
-void cy_as_hal_dma_setup_read(cy_as_hal_device_tag tag,
- uint8_t ep, void *buf,
- uint32_t size, uint16_t maxsize)
-{
- uint32_t addr;
- uint16_t v;
-
- /*
- * Note: "size" is the actual request size
- * "maxsize" - is the P port fragment size
- * No EP0 or EP1 traffic should get here
- */
- cy_as_hal_assert(ep != 0 && ep != 1);
-
- /*
- * If this asserts, we have an ordering problem.
- * Another DMA request is coming down before the
- * previous one has completed. we should not get
- * new requests if current is still in process
- */
-
- cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);
-
- end_points[ep].buffer_valid = cy_true;
- end_points[ep].type = cy_as_hal_read;
- end_points[ep].pending = cy_true;
- end_points[ep].req_xfer_cnt = 0;
- end_points[ep].req_length = size;
-
- if (size >= maxsize) {
- /*
- * set xfer size for very 1st DMA xfer operation
- * port max packet size ( typically 512 or 1024)
- */
- end_points[ep].dma_xfer_sz = maxsize;
- } else {
- /*
- * so that we could handle small xfers on in case
- * of non-storage EPs
- */
- end_points[ep].dma_xfer_sz = size;
- }
-
- addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
-
- if (end_points[ep].sg_list_enabled) {
- /*
- * Handle sg-list assisted EPs
- * seg_xfer_cnt - keeps track of N of sent packets
- * buf - pointer to the SG list
- * data_p - data pointer for the 1st DMA segment
- */
- end_points[ep].seg_xfer_cnt = 0;
- end_points[ep].sg_p = buf;
- end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
-
- #ifdef DBGPRN_DMA_SETUP_RD
- DBGPRN("cyasomaphal:DMA_setup_read sg_list EP:%d, "
- "buf:%p, buf_va:%p, req_sz:%d, maxsz:%d\n",
- ep,
- buf,
- end_points[ep].data_p,
- size,
- maxsize);
- #endif
- v = (end_points[ep].dma_xfer_sz &
- CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
- CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
- cy_as_hal_write_register(tag, addr, v);
- } else {
- /*
- * Non sg list EP passed void *buf rather then scatterlist *sg
- */
- #ifdef DBGPRN_DMA_SETUP_RD
- DBGPRN("%s:non-sg_list EP:%d,"
- "RQ_sz:%d, maxsz:%d\n",
- __func__, ep, size, maxsize);
- #endif
-
- end_points[ep].sg_p = NULL;
-
- /*
- * must be a VMA of a membuf in kernel space
- */
- end_points[ep].data_p = buf;
-
- /*
- * Program the EP DMA register for Storage endpoints only.
- */
- if (is_storage_e_p(ep)) {
- v = (end_points[ep].dma_xfer_sz &
- CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
- CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL;
- cy_as_hal_write_register(tag, addr, v);
- }
- }
-}
-
-/*
- * This function must be defined to allow the WB API to
- * register a callback function that is called when a
- * DMA transfer is complete.
- */
-void cy_as_hal_dma_register_callback(cy_as_hal_device_tag tag,
- cy_as_hal_dma_complete_callback cb)
-{
- DBGPRN("<1>\n%s: WB API has registered a dma_complete callback:%x\n",
- __func__, (uint32_t)cb);
- callback = cb;
-}
-
-/*
- * This function must be defined to return the maximum size of
- * DMA request that can be handled on the given endpoint. The
- * return value should be the maximum size in bytes that the DMA
- * module can handle.
- */
-uint32_t cy_as_hal_dma_max_request_size(cy_as_hal_device_tag tag,
- cy_as_end_point_number_t ep)
-{
- /*
- * Storage reads and writes are always done in 512 byte blocks.
- * So, we do the count handling within the HAL, and save on
- * some of the data transfer delay.
- */
- if ((ep == CYASSTORAGE_READ_EP_NUM) ||
- (ep == CYASSTORAGE_WRITE_EP_NUM)) {
- /* max DMA request size HAL can handle by itself */
- return CYASSTORAGE_MAX_XFER_SIZE;
- } else {
- /*
- * For the USB - Processor endpoints, the maximum transfer
- * size depends on the speed of USB operation. So, we use
- * the following constant to indicate to the API that
- * splitting of the data into chunks less that or equal to
- * the max transfer size should be handled internally.
- */
-
- /* DEFINED AS 0xffffffff in cyasdma.h */
- return CY_AS_DMA_MAX_SIZE_HW_SIZE;
- }
-}
-
-/*
- * This function must be defined to set the state of the WAKEUP pin
- * on the WestBridge device. Generally this is done via a GPIO of
- * some type.
- */
-cy_bool cy_as_hal_set_wakeup_pin(cy_as_hal_device_tag tag, cy_bool state)
-{
- /*
- * Not supported as of now.
- */
- return cy_false;
-}
-
-void cy_as_hal_pll_lock_loss_handler(cy_as_hal_device_tag tag)
-{
- cy_as_hal_print_message("error: astoria PLL lock is lost\n");
- cy_as_hal_print_message("please check the input voltage levels");
- cy_as_hal_print_message("and clock, and restart the system\n");
-}
-
-/*
- * Below are the functions that must be defined to provide the basic
- * operating system services required by the API.
- */
-
-/*
- * This function is required by the API to allocate memory.
- * This function is expected to work exactly like malloc().
- */
-void *cy_as_hal_alloc(uint32_t cnt)
-{
- return kmalloc(cnt, GFP_ATOMIC);
-}
-
-/*
- * This function is required by the API to free memory allocated
- * with CyAsHalAlloc(). This function is'expected to work exacly
- * like free().
- */
-void cy_as_hal_free(void *mem_p)
-{
- kfree(mem_p);
-}
-
-/*
- * Allocator that can be used in interrupt context.
- * We have to ensure that the kmalloc call does not
- * sleep in this case.
- */
-void *cy_as_hal_c_b_alloc(uint32_t cnt)
-{
- return kmalloc(cnt, GFP_ATOMIC);
-}
-
-/*
- * This function is required to set a block of memory to a
- * specific value. This function is expected to work exactly
- * like memset()
- */
-void cy_as_hal_mem_set(void *ptr, uint8_t value, uint32_t cnt)
-{
- memset(ptr, value, cnt);
-}
-
-/*
- * This function is expected to create a sleep channel.
- * The data structure that represents the sleep channel object
- * sleep channel (which is Linux "wait_queue_head_t wq" for this particular HAL)
- * passed as a pointer, and allpocated by the caller
- * (typically as a local var on the stack) "Create" word should read as
- * "SleepOn", this func doesn't actually create anything
- */
-cy_bool cy_as_hal_create_sleep_channel(cy_as_hal_sleep_channel *channel)
-{
- init_waitqueue_head(&channel->wq);
- return cy_true;
-}
-
-/*
- * for this particular HAL it doesn't actually destroy anything
- * since no actual sleep object is created in CreateSleepChannel()
- * sleep channel is given by the pointer in the argument.
- */
-cy_bool cy_as_hal_destroy_sleep_channel(cy_as_hal_sleep_channel *channel)
-{
- return cy_true;
-}
-
-/*
- * platform specific wakeable Sleep implementation
- */
-cy_bool cy_as_hal_sleep_on(cy_as_hal_sleep_channel *channel, uint32_t ms)
-{
- wait_event_interruptible_timeout(channel->wq, 0, ((ms * HZ)/1000));
- return cy_true;
-}
-
-/*
- * wakes up the process waiting on the CHANNEL
- */
-cy_bool cy_as_hal_wake(cy_as_hal_sleep_channel *channel)
-{
- wake_up_interruptible_all(&channel->wq);
- return cy_true;
-}
-
-uint32_t cy_as_hal_disable_interrupts()
-{
- if (0 == intr__enable)
- ;
-
- intr__enable++;
- return 0;
-}
-
-void cy_as_hal_enable_interrupts(uint32_t val)
-{
- intr__enable--;
- if (0 == intr__enable)
- ;
-}
-
-/*
- * Sleep atleast 150ns, cpu dependent
- */
-void cy_as_hal_sleep150(void)
-{
- uint32_t i, j;
-
- j = 0;
- for (i = 0; i < 1000; i++)
- j += (~i);
-}
-
-void cy_as_hal_sleep(uint32_t ms)
-{
- cy_as_hal_sleep_channel channel;
-
- cy_as_hal_create_sleep_channel(&channel);
- cy_as_hal_sleep_on(&channel, ms);
- cy_as_hal_destroy_sleep_channel(&channel);
-}
-
-cy_bool cy_as_hal_is_polling()
-{
- return cy_false;
-}
-
-void cy_as_hal_c_b_free(void *ptr)
-{
- cy_as_hal_free(ptr);
-}
-
-/*
- * suppose to reinstate the astoria registers
- * that may be clobbered in sleep mode
- */
-void cy_as_hal_init_dev_registers(cy_as_hal_device_tag tag,
- cy_bool is_standby_wakeup)
-{
- /* specific to SPI, no implementation required */
- (void) tag;
- (void) is_standby_wakeup;
-}
-
-void cy_as_hal_read_regs_before_standby(cy_as_hal_device_tag tag)
-{
- /* specific to SPI, no implementation required */
- (void) tag;
-}
-
-cy_bool cy_as_hal_sync_device_clocks(cy_as_hal_device_tag tag)
-{
- /*
- * we are in asynchronous mode. so no need to handle this
- */
- return true;
-}
-
-/*
- * init OMAP h/w resources
- */
-int start_o_m_a_p_kernel(const char *pgm,
- cy_as_hal_device_tag *tag, cy_bool debug)
-{
- cy_as_omap_dev_kernel *dev_p;
- int i;
- u16 data16[4];
- u8 pncfg_reg;
-
- /*
- * No debug mode support through argument as of now
- */
- (void)debug;
-
- DBGPRN(KERN_INFO"starting OMAP34xx HAL...\n");
-
- /*
- * Initialize the HAL level endpoint DMA data.
- */
- for (i = 0; i < sizeof(end_points)/sizeof(end_points[0]); i++) {
- end_points[i].data_p = 0;
- end_points[i].pending = cy_false;
- end_points[i].size = 0;
- end_points[i].type = cy_as_hal_none;
- end_points[i].sg_list_enabled = cy_false;
-
- /*
- * by default the DMA transfers to/from the E_ps don't
- * use sg_list that implies that the upper devices like
- * blockdevice have to enable it for the E_ps in their
- * initialization code
- */
- }
-
- /*
- * allocate memory for OMAP HAL
- */
- dev_p = (cy_as_omap_dev_kernel *)cy_as_hal_alloc(
- sizeof(cy_as_omap_dev_kernel));
- if (dev_p == 0) {
- cy_as_hal_print_message("out of memory allocating OMAP"
- "device structure\n");
- return 0;
- }
-
- dev_p->m_sig = CY_AS_OMAP_KERNEL_HAL_SIG;
-
- /*
- * initialize OMAP hardware and StartOMAPKernelall gpio pins
- */
- dev_p->m_addr_base = (void *)cy_as_hal_processor_hw_init();
-
- /*
- * Now perform a hard reset of the device to have
- * the new settings take effect
- */
- __gpio_set_value(AST_WAKEUP, 1);
-
- /*
- * do Astoria h/w reset
- */
- DBGPRN(KERN_INFO"-_-_pulse -> westbridge RST pin\n");
-
- /*
- * NEGATIVE PULSE on RST pin
- */
- __gpio_set_value(AST_RESET, 0);
- mdelay(1);
- __gpio_set_value(AST_RESET, 1);
- mdelay(50);
-
- /*
- * note AFTER reset PNAND interface is 8 bit mode
- * so if gpmc Is configured in 8 bit mode upper half will be FF
- */
- pncfg_reg = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
-
-#ifdef PNAND_16BIT_MODE
-
- /*
- * switch to 16 bit mode, force NON-LNA LBD mode, 3 RA addr bytes
- */
- ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0001);
-
- /*
- * now in order to continue to talk to astoria
- * sw OMAP GPMC into 16 bit mode as well
- */
- cy_as_hal_gpmc_enable_16bit_bus(cy_true);
-#else
- /* Astoria and GPMC are already in 8 bit mode, just initialize PNAND_CFG */
- ast_p_nand_casdi_write(CY_AS_MEM_PNAND_CFG, 0x0000);
-#endif
-
- /*
- * NOTE: if you want to capture bus activity on the LA,
- * don't use printks in between the activities you want to capture.
- * prinks may take milliseconds, and the data of interest
- * will fall outside the LA capture window/buffer
- */
- data16[0] = ast_p_nand_casdo_read(CY_AS_MEM_CM_WB_CFG_ID);
- data16[1] = ast_p_nand_casdo_read(CY_AS_MEM_PNAND_CFG);
-
- if (data16[0] != 0xA200) {
- /*
- * astoria device is not found
- */
- printk(KERN_ERR "ERROR: astoria device is not found, CY_AS_MEM_CM_WB_CFG_ID ");
- printk(KERN_ERR "read returned:%4.4X: CY_AS_MEM_PNAND_CFG:%4.4x !\n",
- data16[0], data16[0]);
- goto bus_acc_error;
- }
-
- cy_as_hal_print_message(KERN_INFO" register access CASDO test:"
- "\n CY_AS_MEM_CM_WB_CFG_ID:%4.4x\n"
- "PNAND_CFG after RST:%4.4x\n "
- "CY_AS_MEM_PNAND_CFG"
- "after cfg_wr:%4.4x\n\n",
- data16[0], pncfg_reg, data16[1]);
-
- dev_p->thread_flag = 1;
- spin_lock_init(&int_lock);
- dev_p->m_next_p = m_omap_list_p;
-
- m_omap_list_p = dev_p;
- *tag = dev_p;
-
- cy_as_hal_configure_interrupts((void *)dev_p);
-
- cy_as_hal_print_message(KERN_INFO"OMAP3430__hal started tag:%p"
- ", kernel HZ:%d\n", dev_p, HZ);
-
- /*
- *make processor to storage endpoints SG assisted by default
- */
- cy_as_hal_set_ep_dma_mode(4, true);
- cy_as_hal_set_ep_dma_mode(8, true);
-
- return 1;
-
- /*
- * there's been a NAND bus access error or
- * astoria device is not connected
- */
-bus_acc_error:
- /*
- * at this point hal tag hasn't been set yet
- * so the device will not call omap_stop
- */
- cy_as_hal_omap_hardware_deinit(dev_p);
- cy_as_hal_free(dev_p);
- return 0;
-}
-
-#else
-/*
- * Some compilers do not like empty C files, so if the OMAP hal is not being
- * compiled, we compile this single function. We do this so that for a
- * given target HAL there are not multiple sources for the HAL functions.
- */
-void my_o_m_a_p_kernel_hal_dummy_function(void)
-{
-}
-
-#endif
diff --git a/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/cyashaldef.h b/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/cyashaldef.h
deleted file mode 100644
index c05e6d6cb70..00000000000
--- a/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/cyashaldef.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* Cypress West Bridge API header file (cyashaldef.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASHALDEF_H_
-#define _INCLUDED_CYASHALDEF_H_
-
-/* Summary
- * If set to TRUE, the basic numeric types are defined by the
- * West Bridge API code
- *
- * Description
- * The West Bridge API relies on some basic integral types to be
- * defined. These types include uint8_t, int8_t, uint16_t,
- * int16_t, uint32_t, and int32_t. If this macro is defined the
- * West Bridge API will define these types based on some basic
- * assumptions. If this value is set and the West Bridge API is
- * used to set these types, the definition of these types must be
- * examined to insure that they are appropriate for the given
- * target architecture and compiler.
- *
- * Notes
- * It is preferred that if the basic platform development
- * environment defines these types that the CY_DEFINE_BASIC_TYPES
- * macro be undefined and the appropriate target system header file
- * be added to the file cyashaldef.h.
- */
-
-#include <linux/types.h>
-
-
-#if !defined(__doxygen__)
-typedef int cy_bool;
-#define cy_true (1)
-#define cy_false (0)
-#endif
-
-#endif /* _INCLUDED_CYASHALDEF_H_ */
diff --git a/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h b/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h
deleted file mode 100644
index 6426ea61f3d..00000000000
--- a/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h
+++ /dev/null
@@ -1,319 +0,0 @@
-/* Cypress Antioch HAL for OMAP KERNEL header file (cyashalomapkernel.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-/*
- * This file contains the definition of the hardware abstraction
- * layer on OMAP3430 talking to the West Bridge Astoria device
- */
-
-
-#ifndef _INCLUDED_CYASHALOMAP_KERNEL_H_
-#define _INCLUDED_CYASHALOMAP_KERNEL_H_
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/wait.h>
-#include <linux/string.h>
-/* include does not seem to work
- * moving for patch submission
-#include <mach/gpmc.h>
-*/
-#include <linux/../../arch/arm/plat-omap/include/plat/gpmc.h>
-typedef struct cy_as_hal_sleep_channel_t {
- wait_queue_head_t wq;
-} cy_as_hal_sleep_channel;
-
-/* moved to staging location, eventual location
- * considered is here
-#include <mach/westbridge/cyashaldef.h>
-#include <linux/westbridge/cyastypes.h>
-#include <linux/westbridge/cyas_cplus_start.h>
-*/
-#include "../cyashaldef.h"
-#include "../../../../../../../include/linux/westbridge/cyastypes.h"
-#include "../../../../../../../include/linux/westbridge/cyas_cplus_start.h"
-#include "cyasomapdev_kernel.h"
-
-/*
- * Below are the data structures that must be defined by the HAL layer
- */
-
-/*
- * The HAL layer must define a TAG for identifying a specific Astoria
- * device in the system. In this case the tag is a void * which is
- * really an OMAP device pointer
- */
-typedef void *cy_as_hal_device_tag;
-
-
-/* This must be included after the CyAsHalDeviceTag type is defined */
-
-/* moved to staging location, eventual location
- * considered is here
- * #include <linux/westbridge/cyashalcb.h>
-*/
-#include "../../../../../../../include/linux/westbridge/cyashalcb.h"
-/*
- * Below are the functions that communicate with the West Bridge
- * device. These are system dependent and must be defined by
- * the HAL layer for a given system.
- */
-
-/*
- * This function must be defined to write a register within the Antioch
- * device. The addr value is the address of the register to write with
- * respect to the base address of the Antioch device.
- */
-void
-cy_as_hal_write_register(cy_as_hal_device_tag tag,
- uint16_t addr, uint16_t data);
-
-/*
- * This function must be defined to read a register from
- * the west bridge device. The addr value is the address of
- * the register to read with respect to the base address
- * of the west bridge device.
- */
-uint16_t
-cy_as_hal_read_register(cy_as_hal_device_tag tag, uint16_t addr);
-
-/*
- * This function must be defined to transfer a block of data
- * to the west bridge device. This function can use the burst write
- * (DMA) capabilities of Antioch to do this, or it can just copy
- * the data using writes.
- */
-void
-cy_as_hal_dma_setup_write(cy_as_hal_device_tag tag,
- uint8_t ep, void *buf, uint32_t size, uint16_t maxsize);
-
-/*
- * This function must be defined to transfer a block of data
- * from the Antioch device. This function can use the burst
- * read (DMA) capabilities of Antioch to do this, or it can
- * just copy the data using reads.
- */
-void
-cy_as_hal_dma_setup_read(cy_as_hal_device_tag tag, uint8_t ep,
- void *buf, uint32_t size, uint16_t maxsize);
-
-/*
- * This function must be defined to cancel any pending DMA request.
- */
-void
-cy_as_hal_dma_cancel_request(cy_as_hal_device_tag tag, uint8_t ep);
-
-/*
- * This function must be defined to allow the Antioch API to
- * register a callback function that is called when a DMA transfer
- * is complete.
- */
-void
-cy_as_hal_dma_register_callback(cy_as_hal_device_tag tag,
- cy_as_hal_dma_complete_callback cb);
-
-/*
- * This function must be defined to return the maximum size of DMA
- * request that can be handled on the given endpoint. The return
- * value should be the maximum size in bytes that the DMA module can
- * handle.
- */
-uint32_t
-cy_as_hal_dma_max_request_size(cy_as_hal_device_tag tag,
- cy_as_end_point_number_t ep);
-
-/*
- * This function must be defined to set the state of the WAKEUP pin
- * on the Antioch device. Generally this is done via a GPIO of some
- * type.
- */
-cy_bool
-cy_as_hal_set_wakeup_pin(cy_as_hal_device_tag tag, cy_bool state);
-
-/*
- * This function is called when the Antioch PLL loses lock, because
- * of a problem in the supply voltage or the input clock.
- */
-void
-cy_as_hal_pll_lock_loss_handler(cy_as_hal_device_tag tag);
-
-
-/**********************************************************************
- *
- * Below are the functions that must be defined to provide the basic
- * operating system services required by the API.
- *
-***********************************************************************/
-
-/*
- * This function is required by the API to allocate memory. This function
- * is expected to work exactly like malloc().
- */
-void *
-cy_as_hal_alloc(uint32_t cnt);
-
-/*
- * This function is required by the API to free memory allocated with
- * CyAsHalAlloc(). This function is expected to work exacly like free().
- */
-void
-cy_as_hal_free(void *mem_p);
-
-/*
- * This function is required by the API to allocate memory during a
- * callback. This function must be able to provide storage at inturupt
- * time.
- */
-void *
-cy_as_hal_c_b_alloc(uint32_t cnt);
-
-/*
- * This function is required by the API to free memory allocated with
- * CyAsCBHalAlloc().
- */
-void
-cy_as_hal_c_b_free(void *ptr);
-
-/*
- * This function is required to set a block of memory to a specific
- * value. This function is expected to work exactly like memset()
- */
-void
-cy_as_hal_mem_set(void *ptr, uint8_t value, uint32_t cnt);
-
-/*
- * This function is expected to create a sleep channel. The data
- * structure that represents the sleep channel is given by the
- * pointer in the argument.
- */
-cy_bool
-cy_as_hal_create_sleep_channel(cy_as_hal_sleep_channel *channel);
-
-/*
- * This function is expected to destroy a sleep channel. The data
- * structure that represents the sleep channel is given by
- * the pointer in the argument.
- */
-
-
-cy_bool
-cy_as_hal_destroy_sleep_channel(cy_as_hal_sleep_channel *channel);
-
-cy_bool
-cy_as_hal_sleep_on(cy_as_hal_sleep_channel *channel, uint32_t ms);
-
-cy_bool
-cy_as_hal_wake(cy_as_hal_sleep_channel *channel);
-
-uint32_t
-cy_as_hal_disable_interrupts(void);
-
-void
-cy_as_hal_enable_interrupts(uint32_t);
-
-void
-cy_as_hal_sleep150(void);
-
-void
-cy_as_hal_sleep(uint32_t ms);
-
-cy_bool
-cy_as_hal_is_polling(void);
-
-void cy_as_hal_init_dev_registers(cy_as_hal_device_tag tag,
- cy_bool is_standby_wakeup);
-
-/*
- * required only in spi mode
- */
-cy_bool cy_as_hal_sync_device_clocks(cy_as_hal_device_tag tag);
-
-void cy_as_hal_read_regs_before_standby(cy_as_hal_device_tag tag);
-
-
-#ifndef NDEBUG
-#define cy_as_hal_assert(cond) if (!(cond))\
- printk(KERN_WARNING"assertion failed at %s:%d\n", __FILE__, __LINE__);
-#else
-#define cy_as_hal_assert(cond)
-#endif
-
-#define cy_as_hal_print_message printk
-
-/* removable debug printks */
-#ifndef WESTBRIDGE_NDEBUG
-#define DBG_PRINT_ENABLED
-#endif
-
-/*#define MBOX_ACCESS_DBG_PRINT_ENABLED*/
-
-
-#ifdef DBG_PRINT_ENABLED
- /* Debug printing enabled */
-
- #define DBGPRN(...) printk(__VA_ARGS__)
- #define DBGPRN_FUNC_NAME printk("<1> %x:_func: %s\n", \
- current->pid, __func__)
-
-#else
- /** NO DEBUG PRINTING **/
- #define DBGPRN(...)
- #define DBGPRN_FUNC_NAME
-
-#endif
-
-/*
-CyAsMiscSetLogLevel(uint8_t level)
-{
- debug_level = level;
-}
-
-#ifdef CY_AS_LOG_SUPPORT
-
-void
-cy_as_log_debug_message(int level, const char *str)
-{
- if (level <= debug_level)
- cy_as_hal_print_message("log %d: %s\n", level, str);
-}
-*/
-
-
-/*
- * print buffer helper
- */
-void cyashal_prn_buf(void *buf, uint16_t offset, int len);
-
-/*
- * These are the functions that are not part of the HAL layer,
- * but are required to be called for this HAL.
- */
-int start_o_m_a_p_kernel(const char *pgm,
- cy_as_hal_device_tag *tag, cy_bool debug);
-int stop_o_m_a_p_kernel(const char *pgm, cy_as_hal_device_tag tag);
-int omap_start_intr(cy_as_hal_device_tag tag);
-void cy_as_hal_set_ep_dma_mode(uint8_t ep, bool sg_xfer_enabled);
-
-/* moved to staging location
-#include <linux/westbridge/cyas_cplus_end.h>
-*/
-#include "../../../../../../../include/linux/westbridge/cyas_cplus_start.h"
-#endif
diff --git a/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h b/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h
deleted file mode 100644
index 46f06ee2935..00000000000
--- a/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasmemmap.h
+++ /dev/null
@@ -1,558 +0,0 @@
-/*
- OMAP3430 ZOOM MDK astoria interface defs(cyasmemmap.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-/* include does not seem to work
- * moving for patch submission
-#include <mach/gpmc.h>
-#include <mach/mux.h>
-*/
-#include <linux/../../arch/arm/plat-omap/include/plat/gpmc.h>
-#include <linux/../../arch/arm/plat-omap/include/plat/mux.h>
-
-#ifndef _INCLUDED_CYASMEMMAP_H_
-#define _INCLUDED_CYASMEMMAP_H_
-
-/* defines copied from OMAP kernel branch */
-
-#define OMAP2_PULL_UP (1 << 4)
-#define OMAP2_PULL_ENA (1 << 3)
-#define OMAP34XX_MUX_MODE0 0
-#define OMAP34XX_MUX_MODE4 4
-#define OMAP3_INPUT_EN (1 << 8)
-#define OMAP34XX_PIN_INPUT_PULLUP (OMAP2_PULL_ENA | OMAP3_INPUT_EN \
- | OMAP2_PULL_UP)
-
-/*
- * for OMAP3430 <-> astoria : ADmux mode, 8 bit data path
- * WB Signal- OMAP3430 signal COMMENTS
- * --------------------------- --------------------
- * CS_L -GPMC_nCS4_GPIO_53 ZOOM I SOM board
- * signal: up_nCS_A_EXT
- * AD[7:0]-upD[7:0] buffered on the
- * transposer board
- * GPMC_ADDR
- * [A8:A1]->upD[7:0]
- * INT# -GPMC_nWP_GPIO_62
- * DACK -N/C not connected
- * WAKEUP-GPIO_167
- * RESET-GPIO_126
- * R/B -GPMC_WAIT2_GPIO_64
- * -------------------------------------------
- * The address range for nCS1B is 0x06000000 - 0x07FF FFFF.
-*/
-
-/*
- *OMAP_ZOOM LEDS
- */
-#define LED_0 156
-#define LED_1 128
-#define LED_2 64
-#define LED_3 60
-
-#define HIGH 1
-#define LOW 1
-
-/*
- *omap GPIO number
- */
-#define AST_WAKEUP 167
-#define AST_RESET 126
-#define AST__rn_b 64
-
-/*
- * NOTE THIS PIN IS USED AS WP for OMAP NAND
- */
-#define AST_INT 62
-
-/*
- * as an I/O, it is actually controlled by GPMC
- */
-#define AST_CS 55
-
-
-/*
- *GPMC prefetch engine
- */
-
-/* register and its bit fields */
-#define GPMC_PREFETCH_CONFIG1 0x01E0
-
- /*32 bytes for 16 bit pnand mode*/
- #define PFE_THRESHOLD 31
-
- /*
- * bit fields
- * PF_ACCESSMODE - 0 - read mode, 1 - write mode
- * PF_DMAMODE - 0 - default only intr line signal will be generated
- * PF_SYNCHROMODE - default 0 - engin will start access as soon as
- * ctrl re STARTENGINE is set
- * PF_WAITPINSEL - FOR synchro mode selects WAIT pin whch edge
- * will be monitored
- * PF_EN_ENGINE - 1- ENABLES ENGINE, but it needs to be started after
- * that C ctrl reg bit 0
- * PF_FIFO_THRESHOLD - FIFO threshold in number of BUS(8 or 16) words
- * PF_WEIGHTED_PRIO - NUM of cycles granted to PFE if RND_ROBIN
- * prioritization is enabled
- * PF_ROUND_ROBIN - if enabled, gives priority to other CS, but
- * reserves NUM of cycles for PFE's turn
- * PF_ENGIN_CS_SEL - GPMC CS assotiated with PFE function
- */
- #define PF_ACCESSMODE (0 << 0)
- #define PF_DMAMODE (0 << 2)
- #define PF_SYNCHROMODE (0 << 3)
- #define PF_WAITPINSEL (0x0 << 4)
- #define PF_EN_ENGINE (1 << 7)
- #define PF_FIFO_THRESHOLD (PFE_THRESHOLD << 8)
- #define PF_WEIGHTED_PRIO (0x0 << 16)
- #define PF_ROUND_ROBIN (0 << 23)
- #define PF_ENGIN_CS_SEL (AST_GPMC_CS << 24)
- #define PF_EN_OPTIM_ACC (0 << 27)
- #define PF_CYCLEOPTIM (0x0 << 28)
-
-#define GPMC_PREFETCH_CONFIG1_VAL (PF_ACCESSMODE | \
- PF_DMAMODE | PF_SYNCHROMODE | \
- PF_WAITPINSEL | PF_EN_ENGINE | \
- PF_FIFO_THRESHOLD | PF_FIFO_THRESHOLD | \
- PF_WEIGHTED_PRIO | PF_ROUND_ROBIN | \
- PF_ENGIN_CS_SEL | PF_EN_OPTIM_ACC | \
- PF_CYCLEOPTIM)
-
-/* register and its bit fields */
-#define GPMC_PREFETCH_CONFIG2 0x01E4
- /*
- * bit fields
- * 14 bit field NOTE this counts is also
- * is in number of BUS(8 or 16) words
- */
- #define PF_TRANSFERCOUNT (0x000)
-
-
-/* register and its bit fields */
-#define GPMC_PREFETCH_CONTROL 0x01EC
- /*
- * bit fields , ONLY BIT 0 is implemented
- * PFWE engin must be programmed with this bit = 0
- */
- #define PFPW_STARTENGINE (1 << 0)
-
-/* register and its bit fields */
-#define GPMC_PREFETCH_STATUS 0x01F0
-
- /* */
- #define PFE_FIFO_THRESHOLD (1 << 16)
-
-/*
- * GPMC posted write/prefetch engine end
- */
-
-
-/*
- * chip select number on GPMC ( 0..7 )
- */
-#define AST_GPMC_CS 4
-
-/*
- * not connected
- */
-#define AST_DACK 00
-
-
-/*
- * Physical address above the NAND flash
- * we use CS For mapping in OMAP3430 RAM space use 0x0600 0000
- */
-#define CYAS_DEV_BASE_ADDR (0x20000000)
-
-#define CYAS_DEV_MAX_ADDR (0xFF)
-#define CYAS_DEV_ADDR_RANGE (CYAS_DEV_MAX_ADDR << 1)
-
-#ifdef p_s_r_a_m_INTERFACE
- /* in CRAM or PSRAM mode OMAP A1..An wires-> Astoria, there is no A0 line */
- #define CYAS_DEV_CALC_ADDR(cyas_addr) (cyas_addr << 1)
- #define CYAS_DEV_CALC_EP_ADDR(ep) (ep << 1)
-#else
- /*
- * For pNAND interface it depends on NAND emulation mode
- * SBD/LBD etc we use NON-LNA_LBD mode, so it goes like this:
- * forlbd <CMD><CA0,CA1,RA0,RA1,RA2> <CMD>,
- * where CA1 address must have bits 2,3 = "11"
- * ep is mapped into RA1 bits {4:0}
- */
- #define CYAS_DEV_CALC_ADDR(cyas_addr) (cyas_addr | 0x0c00)
- #define CYAS_DEV_CALC_EP_ADDR(ep) ep
-#endif
-
-/*
- *OMAP3430 i/o access macros
- */
-#define IORD32(addr) (*(volatile u32 *)(addr))
-#define IOWR32(addr, val) (*(volatile u32 *)(addr) = val)
-
-#define IORD16(addr) (*(volatile u16 *)(addr))
-#define IOWR16(addr, val) (*(volatile u16 *)(addr) = val)
-
-#define IORD8(addr) (*(volatile u8 *)(addr))
-#define IOWR8(addr, val) (*(volatile u8 *)(addr) = val)
-
-/*
- * local defines for accessing to OMAP GPIO ***
- */
-#define CTLPADCONF_BASE_ADDR 0x48002000
-#define CTLPADCONF_SIZE 0x1000
-
-#define GPIO1_BASE_ADDR 0x48310000
-#define GPIO2_BASE_ADDR 0x49050000
-#define GPIO3_BASE_ADDR 0x49052000
-#define GPIO4_BASE_ADDR 0x49054000
-#define GPIO5_BASE_ADDR 0x49056000
-#define GPIO6_BASE_ADDR 0x49058000
-#define GPIO_SPACE_SIZE 0x1000
-
-
-/*
- * OMAP3430 GPMC timing for pNAND interface
- */
-#define GPMC_BASE 0x6E000000
-#define GPMC_REGION_SIZE 0x1000
-#define GPMC_CONFIG_REG (0x50)
-
-/*
- * bit 0 in the GPMC_CONFIG_REG
- */
-#define NAND_FORCE_POSTED_WRITE_B 1
-
-/*
- * WAIT2STATUS, must be (1 << 10)
- */
-#define AS_WAIT_PIN_MASK (1 << 10)
-
-
-/*
- * GPMC_CONFIG(reg number [1..7] [for chip sel CS[0..7])
- */
-#define GPMC_CFG_REG(N, CS) ((0x60 + (4*(N-1))) + (0x30*CS))
-
-/*
- *gpmc nand registers for CS4
- */
-#define AST_GPMC_NAND_CMD (0x7c + (0x30*AST_GPMC_CS))
-#define AST_GPMC_NAND_ADDR (0x80 + (0x30*AST_GPMC_CS))
-#define AST_GPMC_NAND_DATA (0x84 + (0x30*AST_GPMC_CS))
-
-#define GPMC_STAT_REG (0x54)
-#define GPMC_ERR_TYPE (0x48)
-
-/*
- * we get "gpmc_base" from kernel
- */
-#define GPMC_VMA(offset) (gpmc_base + offset)
-
-/*
- * GPMC CS space VMA start address
- */
-#define GPMC_CS_VMA(offset) (gpmc_data_vma + offset)
-
-/*
- * PAD_CFG mux space VMA
- */
-#define PADCFG_VMA(offset) (iomux_vma + offset)
-
-/*
- * CONFIG1: by default, sngle access, async r/w RD_MULTIPLE[30]
- * WR_MULTIPLE[28]; GPMC_FCL_DIV[1:0]
- */
-#define GPMC_FCLK_DIV ((0) << 0)
-
-/*
- * ADDITIONAL DIVIDER FOR ALL TIMING PARAMS
- */
-#define TIME_GRAN_SCALE ((0) << 4)
-
-/*
- * for use by gpmc_set_timings api, measured in ns, not clocks
- */
-#define WB_GPMC_BUSCYC_t (7 * 6)
-#define WB_GPMC_CS_t_o_n (0)
-#define WB_GPMC_ADV_t_o_n (0)
-#define WB_GPMC_OE_t_o_n (0)
-#define WB_GPMC_OE_t_o_f_f (5 * 6)
-#define WB_GPMC_WE_t_o_n (1 * 6)
-#define WB_GPMC_WE_t_o_f_f (5 * 6)
-#define WB_GPMC_RDS_ADJ (2 * 6)
-#define WB_GPMC_RD_t_a_c_c (WB_GPMC_OE_t_o_f_f + WB_GPMC_RDS_ADJ)
-#define WB_GPMC_WR_t_a_c_c (WB_GPMC_BUSCYC_t)
-
-#define DIR_OUT 0
-#define DIR_INP 1
-#define DRV_HI 1
-#define DRV_LO 0
-
-/*
- * GPMC_CONFIG7[cs] register bit fields
- * AS_CS_MASK - 3 bit mask for A26,A25,A24,
- * AS_CS_BADDR - 6 BIT VALUE A29 ...A24
- * CSVALID_B - CSVALID bit on GPMC_CONFIG7[cs] register
- */
-#define AS_CS_MASK (0X7 << 8)
-#define AS_CS_BADDR 0x02
-#define CSVALID_B (1 << 6)
-
-/*
- * DEFINE OMAP34XX GPIO OFFSETS (should have been defined in kernel /arch
- * these are offsets from the BASE_ADDRESS of the GPIO BLOCK
- */
-#define GPIO_REVISION 0x000
-#define GPIO_SYSCONFIG 0x010
-#define GPIO_SYSSTATUS1 0x014
-#define GPIO_IRQSTATUS1 0x018
-#define GPIO_IRQENABLE1 0x01C
-#define GPIO_IRQSTATUS2 0x028
-#define GPIO_CTRL 0x030
-#define GPIO_OE 0x034
-#define GPIO_DATA_IN 0x038
-#define GPIO_DATA_OUT 0x03C
-#define GPIO_LEVELDETECT0 0x040
-#define GPIO_LEVELDETECT1 0x044
-#define GPIO_RISINGDETECT 0x048
-#define GPIO_FALLINGDETECT 0x04c
-#define GPIO_CLEAR_DATAOUT 0x090
-#define GPIO_SET_DATAOUT 0x094
-
-typedef struct {
- char *name;
- u32 phy_addr;
- u32 virt_addr;
- u32 size;
-} io2vma_tab_t;
-
-/*
- * GPIO phy to translation VMA table
- */
-static io2vma_tab_t gpio_vma_tab[6] = {
- {"GPIO1_BASE_ADDR", GPIO1_BASE_ADDR , 0 , GPIO_SPACE_SIZE},
- {"GPIO2_BASE_ADDR", GPIO2_BASE_ADDR , 0 , GPIO_SPACE_SIZE},
- {"GPIO3_BASE_ADDR", GPIO3_BASE_ADDR , 0 , GPIO_SPACE_SIZE},
- {"GPIO4_BASE_ADDR", GPIO4_BASE_ADDR , 0 , GPIO_SPACE_SIZE},
- {"GPIO5_BASE_ADDR", GPIO5_BASE_ADDR , 0 , GPIO_SPACE_SIZE},
- {"GPIO6_BASE_ADDR", GPIO6_BASE_ADDR , 0 , GPIO_SPACE_SIZE}
-};
-/*
- * name - USER signal name assigned to the pin ( for printks)
- * mux_func - enum index NAME for the pad_cfg function
- * pin_num - pin_number if mux_func is GPIO, if not a GPIO it is -1
- * mux_ptr - pointer to the corresponding pad_cfg_reg
- * (used for pad release )
- * mux_save - preserve here original PAD_CNF value for this
- * pin (used for pad release)
- * dir - if GPIO: 0 - OUT , 1 - IN
- * dir_save - save original pin direction
- * drv - initial drive level "0" or "1"
- * drv_save - save original pin drive level
- * valid - 1 if successfuly configured
-*/
-typedef struct {
- char *name;
- u32 mux_func;
- int pin_num;
- u16 *mux_ptr;
- u16 mux_save;
- u8 dir;
- u8 dir_save;
- u8 drv;
- u8 drv_save;
- u8 valid;
-} user_pad_cfg_t;
-
-/*
- * need to ensure that enums are in sync with the
- * omap_mux_pin_cfg table, these enums designate
- * functions that OMAP pads can be configured to
- */
-enum {
- B23_OMAP3430_GPIO_167,
- D23_OMAP3430_GPIO_126,
- H1_OMAP3430_GPIO_62,
- H1_OMAP3430_GPMC_n_w_p,
- T8_OMAP3430_GPMC_n_c_s4,
- T8_OMAP3430_GPIO_55,
- R25_OMAP3430_GPIO_156,
- R27_OMAP3430_GPIO_128,
- K8_OMAP3430_GPIO_64,
- K8_GPMC_WAIT2,
- G3_OMAP3430_GPIO_60,
- G3_OMAP3430_n_b_e0_CLE,
- C6_GPMC_WAIT3,
- J1_OMAP3430_GPIO_61,
- C6_OMAP3430_GPIO_65,
-
- END_OF_TABLE
-};
-
-/*
- * number of GPIOS we plan to grab
- */
-#define GPIO_SLOTS 8
-
-/*
- * user_pads_init() reads(and saves) from/to this table
- * used in conjunction with omap_3430_mux_t table in .h file
- * because the way it's done in the kernel code
- * TODO: implement restore of the the original cfg and i/o regs
- */
-
-static user_pad_cfg_t user_pad_cfg[] = {
- /*
- * name,pad_func,pin_num, mux_ptr, mux_sav, dir,
- * dir_sav, drv, drv_save, valid
- */
- {"AST_WAKEUP", B23_OMAP3430_GPIO_167, 167, NULL, 0,
- DIR_OUT, 0, DRV_HI, 0, 0},
- {"AST_RESET", D23_OMAP3430_GPIO_126, 126, NULL, 0,
- DIR_OUT, 0, DRV_HI, 0, 0},
- {"AST__rn_b", K8_GPMC_WAIT2, 64, NULL, 0,
- DIR_INP, 0, 0, 0, 0},
- {"AST_INTR", H1_OMAP3430_GPIO_62, 62, NULL, 0,
- DIR_INP, 0, DRV_HI, 0, 0},
- {"AST_CS", T8_OMAP3430_GPMC_n_c_s4, 55, NULL, 0,
- DIR_OUT, 0, DRV_HI, 0, 0},
- {"LED_0", R25_OMAP3430_GPIO_156, 156, NULL, 0,
- DIR_OUT, 0, DRV_LO, 0, 0},
- {"LED_1", R27_OMAP3430_GPIO_128, 128, NULL, 0,
- DIR_OUT, 0, DRV_LO, 0, 0},
- {"AST_CLE", G3_OMAP3430_n_b_e0_CLE , 60, NULL, 0,
- DIR_OUT, 0, DRV_LO, 0, 0},
- /*
- * Z terminator, must always be present
- * for sanity check, don't remove
- */
- {NULL}
-};
-
-#define GPIO_BANK(pin) (pin >> 5)
-#define REG_WIDTH 32
-#define GPIO_REG_VMA(pin_num, offset) \
- (gpio_vma_tab[GPIO_BANK(pin_num)].virt_addr + offset)
-
-/*
- * OMAP GPIO_REG 32 BIT MASK for a bit or
- * flag in gpio_No[0..191] apply it to a 32 bit
- * location to set clear or check on a corresponding
- * gpio bit or flag
- */
-#define GPIO_REG_MASK(pin_num) (1 << \
- (pin_num - (GPIO_BANK(pin_num) * REG_WIDTH)))
-
-/*
- * OMAP GPIO registers bitwise access macros
- */
-
-#define OMAP_GPIO_BIT(pin_num, reg) \
- ((*((u32 *)GPIO_REG_VMA(pin_num, reg)) \
- & GPIO_REG_MASK(pin_num)) ? 1 : 0)
-
-#define RD_OMAP_GPIO_BIT(pin_num, v) OMAP_GPIO_BIT(pin_num, reg)
-
-/*
- *these are superfast set/clr bitbang macro, 48ns cyc tyme
- */
-#define OMAP_SET_GPIO(pin_num) \
- (*(u32 *)GPIO_REG_VMA(pin_num, GPIO_SET_DATAOUT) \
- = GPIO_REG_MASK(pin_num))
-#define OMAP_CLR_GPIO(pin_num) \
- (*(u32 *)GPIO_REG_VMA(pin_num, GPIO_CLEAR_DATAOUT) \
- = GPIO_REG_MASK(pin_num))
-
-#define WR_OMAP_GPIO_BIT(pin_num, v) \
- (v ? (*(u32 *)GPIO_REG_VMA(pin_num, \
- GPIO_SET_DATAOUT) = GPIO_REG_MASK(pin_num)) \
- : (*(u32 *)GPIO_REG_VMA(pin_num, \
- GPIO_CLEAR_DATAOUT) = GPIO_REG_MASK(pin_num)))
-
-/*
- * Note this pin cfg mimicks similar implementation
- * in linux kernel, which unfortunately doesn't allow
- * us to dynamically insert new custom GPIO mux
- * configurations all REG definitions used in this
- * applications. to add a new pad_cfg function, insert
- * a new ENUM and new pin_cfg entry in omap_mux_pin_cfg[]
- * table below
- *
- * offset - note this is a word offset since the
- * SCM regs are 16 bit packed in one 32 bit word
- * mux_val - just enough to describe pins used
- */
-typedef struct {
- char *name;
- u16 offset;
- u16 mux_val;
-} omap_3430_mux_t;
-
-/*
- * "OUTIN" is configuration when DATA reg drives the
- * pin but the level at the pin can be sensed
- */
-#define PAD_AS_OUTIN (OMAP34XX_MUX_MODE4 | \
- OMAP34XX_PIN_OUTPUT | OMAP34XX_PIN_INPUT)
-
-omap_3430_mux_t omap_mux_pin_cfg[] = {
- /*
- * B23_OMAP3430_GPIO_167 - GPIO func to PAD 167 WB wakeup
- * D23_OMAP3430_GPIO_126 - drive GPIO_126 ( AST RESET)
- * H1_OMAP3430_GPIO_62 - need a pullup on this pin
- * H1_OMAP3430_GPMC_n_w_p - GPMC NAND CTRL n_w_p out
- * T8_OMAP3430_GPMC_n_c_s4" - T8 is controlled b_y GPMC NAND ctrl
- * R25_OMAP3430_GPIO_156 - OMAPZOOM drive LED_0
- * R27_OMAP3430_GPIO_128 - OMAPZOOM drive LED_1
- * K8_OMAP3430_GPIO_64 - OMAPZOOM drive LED_2
- * K8_GPMC_WAIT2 - GPMC WAIT2 function on PAD K8
- * G3_OMAP3430_GPIO_60 - OMAPZOOM drive LED_3
- * G3_OMAP3430_n_b_e0_CLE -GPMC NAND ctrl CLE signal
- */
-
- {"B23_OMAP3430_GPIO_167", 0x0130, (OMAP34XX_MUX_MODE4)},
- {"D23_OMAP3430_GPIO_126", 0x0132, (OMAP34XX_MUX_MODE4)},
- {"H1_OMAP3430_GPIO_62", 0x00CA, (OMAP34XX_MUX_MODE4 |
- OMAP3_INPUT_EN | OMAP34XX_PIN_INPUT_PULLUP) },
- {"H1_OMAP3430_GPMC_n_w_p", 0x00CA, (OMAP34XX_MUX_MODE0)},
- {"T8_OMAP3430_GPMC_n_c_s4", 0x00B6, (OMAP34XX_MUX_MODE0) },
- {"T8_OMAP3430_GPIO_55", 0x00B6, (OMAP34XX_MUX_MODE4) },
- {"R25_OMAP3430_GPIO_156", 0x018C, (OMAP34XX_MUX_MODE4) },
- {"R27_OMAP3430_GPIO_128", 0x0154, (OMAP34XX_MUX_MODE4) },
- {"K8_OMAP3430_GPIO_64", 0x00d0, (OMAP34XX_MUX_MODE4) },
- {"K8_GPMC_WAIT2", 0x00d0, (OMAP34XX_MUX_MODE0) },
- {"G3_OMAP3430_GPIO_60", 0x00C6, (OMAP34XX_MUX_MODE4 |
- OMAP3_INPUT_EN)},
- {"G3_OMAP3430_n_b_e0_CLE", 0x00C6, (OMAP34XX_MUX_MODE0)},
- {"C6_GPMC_WAIT3", 0x00d2, (OMAP34XX_MUX_MODE0)},
- {"C6_OMAP3430_GPIO_65", 0x00d2, (OMAP34XX_MUX_MODE4 |
- OMAP3_INPUT_EN)},
- {"J1_OMAP3430_GPIO_61", 0x00C8, (OMAP34XX_MUX_MODE4 |
- OMAP3_INPUT_EN | OMAP34XX_PIN_INPUT_PULLUP)},
- /*
- * don't remove, used for sanity check.
- */
- {"END_OF_TABLE"}
-};
-
-
-#endif /* _INCLUDED_CYASMEMMAP_H_ */
-
-/*[]*/
diff --git a/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h b/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h
deleted file mode 100644
index 5a64bb6bb05..00000000000
--- a/drivers/staging/westbridge/astoria/arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyasomapdev_kernel.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* Cypress Antioch OMAP KERNEL file (cyanomapdev_kernel.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor,
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef __CY_AS_OMAP_DEV_KERNEL_H__
-#define __CY_AS_OMAP_DEV_KERNEL_H__
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/completion.h>
-
-/* include does not seem to work
- * moving for patch submission
-#include <mach/gpmc.h>
-*/
-#include <linux/../../arch/arm/plat-omap/include/plat/gpmc.h>
-
-/*
- * Constants
- */
-#define CY_AS_OMAP_KERNEL_HAL_SIG (0x1441)
-
-
-/*
- * Data structures
- */
-typedef struct cy_as_omap_dev_kernel {
- /* This is the signature for this data structure */
- unsigned int m_sig;
-
- /* Address base of Antioch Device */
- void *m_addr_base;
-
- /* This is a pointer to the next Antioch device in the system */
- struct cy_as_omap_dev_kernel *m_next_p;
-
- /* This is for thread sync */
- struct completion thread_complete;
-
- /* This is for thread to wait for interrupts */
- cy_as_hal_sleep_channel thread_sc;
-
- /* This is for thread to exit upon StopOmapKernel */
- int thread_flag; /* set 1 to exit */
-
- int dma_ch;
-
- /* This is for dma sync */
- struct completion dma_complete;
-} cy_as_omap_dev_kernel;
-
-#endif
-
-/*[]*/
diff --git a/drivers/staging/westbridge/astoria/block/Kconfig b/drivers/staging/westbridge/astoria/block/Kconfig
deleted file mode 100644
index 851bf96a7b8..00000000000
--- a/drivers/staging/westbridge/astoria/block/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# West Bridge block driver configuration
-#
-
-config WESTBRIDGE_BLOCK_DRIVER
- tristate "West Bridge Block Driver"
- help
- Include the West Bridge based block driver
-
diff --git a/drivers/staging/westbridge/astoria/block/Makefile b/drivers/staging/westbridge/astoria/block/Makefile
deleted file mode 100644
index 4a45dd0861e..00000000000
--- a/drivers/staging/westbridge/astoria/block/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the kernel westbridge block driver
-#
-
-ifneq ($(CONFIG_WESTBRIDGE_DEBUG),y)
- EXTRA_CFLAGS += -DWESTBRIDGE_NDEBUG
-endif
-
-obj-$(CONFIG_WESTBRIDGE_BLOCK_DRIVER) += cyasblkdev.o
-cyasblkdev-y := cyasblkdev_block.o cyasblkdev_queue.o
-
diff --git a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
deleted file mode 100644
index 87452bde7c9..00000000000
--- a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
+++ /dev/null
@@ -1,1631 +0,0 @@
-/* cyanblkdev_block.c - West Bridge Linux Block Driver source file
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-/*
- * Linux block driver implementation for Cypress West Bridge.
- * Based on the mmc block driver implementation by Andrew Christian
- * for the linux 2.6.26 kernel.
- * mmc_block.c, 5/28/2002
- */
-
-/*
- * Block driver for media (i.e., flash cards)
- *
- * Copyright 2002 Hewlett-Packard Company
- *
- * Use consistent with the GNU GPL is permitted,
- * provided that this copyright notice is
- * preserved in its entirety in all copies and derived works.
- *
- * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
- * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
- * FITNESS FOR ANY PARTICULAR PURPOSE.
- *
- * Many thanks to Alessandro Rubini and Jonathan Corbet!
- *
- * Author: Andrew Christian
- * 28 May 2002
- */
-
-#include <linux/moduleparam.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/hdreg.h>
-#include <linux/kdev_t.h>
-#include <linux/blkdev.h>
-
-#include <asm/system.h>
-#include <linux/uaccess.h>
-
-#include <linux/scatterlist.h>
-#include <linux/time.h>
-#include <linux/signal.h>
-#include <linux/delay.h>
-
-#include "cyasblkdev_queue.h"
-
-#define CYASBLKDEV_SHIFT 0 /* Only a single partition. */
-#define CYASBLKDEV_MAX_REQ_LEN (256)
-#define CYASBLKDEV_NUM_MINORS (256 >> CYASBLKDEV_SHIFT)
-#define CY_AS_TEST_NUM_BLOCKS (64)
-#define CYASBLKDEV_MINOR_0 1
-#define CYASBLKDEV_MINOR_1 2
-#define CYASBLKDEV_MINOR_2 3
-
-static int major;
-module_param(major, int, 0444);
-MODULE_PARM_DESC(major,
- "specify the major device number for cyasblkdev block driver");
-
-/* parameters passed from the user space */
-static int vfat_search;
-module_param(vfat_search, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(vfat_search,
- "dynamically find the location of the first sector");
-
-static int private_partition_bus = -1;
-module_param(private_partition_bus, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(private_partition_bus,
- "bus number for private partition");
-
-static int private_partition_size = -1;
-module_param(private_partition_size, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(private_partition_size,
- "size of the private partition");
-
-/*
- * There is one cyasblkdev_blk_data per slot.
- */
-struct cyasblkdev_blk_data {
- spinlock_t lock;
- int media_count[2];
- const struct block_device_operations *blkops;
- unsigned int usage;
- unsigned int suspended;
-
- /* handle to the west bridge device this handle, typdefed as *void */
- cy_as_device_handle dev_handle;
-
- /* our custom structure, in addition to request queue,
- * adds lock & semaphore items*/
- struct cyasblkdev_queue queue;
-
- /* 16 entries is enough given max request size
- * 16 * 4K (64 K per request)*/
- struct scatterlist sg[16];
-
- /* non-zero enables printk of executed reqests */
- unsigned int dbgprn_flags;
-
- /*gen_disk for private, system disk */
- struct gendisk *system_disk;
- cy_as_media_type system_disk_type;
- cy_bool system_disk_read_only;
- cy_bool system_disk_bus_num;
-
- /* sector size for the medium */
- unsigned int system_disk_blk_size;
- unsigned int system_disk_first_sector;
- unsigned int system_disk_unit_no;
-
- /*gen_disk for bus 0 */
- struct gendisk *user_disk_0;
- cy_as_media_type user_disk_0_type;
- cy_bool user_disk_0_read_only;
- cy_bool user_disk_0_bus_num;
-
- /* sector size for the medium */
- unsigned int user_disk_0_blk_size;
- unsigned int user_disk_0_first_sector;
- unsigned int user_disk_0_unit_no;
-
- /*gen_disk for bus 1 */
- struct gendisk *user_disk_1;
- cy_as_media_type user_disk_1_type;
- cy_bool user_disk_1_read_only;
- cy_bool user_disk_1_bus_num;
-
- /* sector size for the medium */
- unsigned int user_disk_1_blk_size;
- unsigned int user_disk_1_first_sector;
- unsigned int user_disk_1_unit_no;
-};
-
-/* pointer to west bridge block data device superstructure */
-static struct cyasblkdev_blk_data *gl_bd;
-
-static DEFINE_SEMAPHORE(open_lock);
-
-/* local forwardd declarationss */
-static cy_as_device_handle *cyas_dev_handle;
-static void cyasblkdev_blk_deinit(struct cyasblkdev_blk_data *bd);
-
-/*change debug print options */
- #define DBGPRN_RD_RQ (1 < 0)
- #define DBGPRN_WR_RQ (1 < 1)
- #define DBGPRN_RQ_END (1 < 2)
-
-int blkdev_ctl_dbgprn(
- int prn_flags
- )
-{
- int cur_options = gl_bd->dbgprn_flags;
-
- DBGPRN_FUNC_NAME;
-
- /* set new debug print options */
- gl_bd->dbgprn_flags = prn_flags;
-
- /* return previous */
- return cur_options;
-}
-EXPORT_SYMBOL(blkdev_ctl_dbgprn);
-
-static struct cyasblkdev_blk_data *cyasblkdev_blk_get(
- struct gendisk *disk
- )
-{
- struct cyasblkdev_blk_data *bd;
-
- DBGPRN_FUNC_NAME;
-
- down(&open_lock);
-
- bd = disk->private_data;
-
- if (bd && (bd->usage == 0))
- bd = NULL;
-
- if (bd) {
- bd->usage++;
- #ifndef NBDEBUG
- cy_as_hal_print_message(
- "cyasblkdev_blk_get: usage = %d\n", bd->usage);
- #endif
- }
- up(&open_lock);
-
- return bd;
-}
-
-static void cyasblkdev_blk_put(
- struct cyasblkdev_blk_data *bd
- )
-{
- DBGPRN_FUNC_NAME;
-
- down(&open_lock);
-
- if (bd) {
- bd->usage--;
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- " cyasblkdev_blk_put , bd->usage= %d\n", bd->usage);
- #endif
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "cyasblkdev: blk_put(bd) on bd = NULL!: usage = %d\n",
- bd->usage);
- #endif
- up(&open_lock);
- return;
- }
-
- if (bd->usage == 0) {
- put_disk(bd->user_disk_0);
- put_disk(bd->user_disk_1);
- put_disk(bd->system_disk);
- cyasblkdev_cleanup_queue(&bd->queue);
-
- if (CY_AS_ERROR_SUCCESS !=
- cy_as_storage_release(bd->dev_handle, 0, 0, 0, 0)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "cyasblkdev: cannot release bus 0\n");
- #endif
- }
-
- if (CY_AS_ERROR_SUCCESS !=
- cy_as_storage_release(bd->dev_handle, 1, 0, 0, 0)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "cyasblkdev: cannot release bus 1\n");
- #endif
- }
-
- if (CY_AS_ERROR_SUCCESS !=
- cy_as_storage_stop(bd->dev_handle, 0, 0)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "cyasblkdev: cannot stop storage stack\n");
- #endif
- }
-
- #ifdef __CY_ASTORIA_SCM_KERNEL_HAL__
- /* If the SCM Kernel HAL is being used, disable the use
- * of scatter/gather lists at the end of block driver usage.
- */
- cy_as_hal_disable_scatter_list(cyasdevice_gethaltag());
- #endif
-
- /*ptr to global struct cyasblkdev_blk_data */
- gl_bd = NULL;
- kfree(bd);
- }
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "cyasblkdev (blk_put): usage = %d\n",
- bd->usage);
- #endif
- up(&open_lock);
-}
-
-static int cyasblkdev_blk_open(
- struct block_device *bdev,
- fmode_t mode
- )
-{
- struct cyasblkdev_blk_data *bd = cyasblkdev_blk_get(bdev->bd_disk);
- int ret = -ENXIO;
-
- DBGPRN_FUNC_NAME;
-
- if (bd) {
- if (bd->usage == 2)
- check_disk_change(bdev);
-
- ret = 0;
-
- if (bdev->bd_disk == bd->user_disk_0) {
- if ((mode & FMODE_WRITE) && bd->user_disk_0_read_only) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "device marked as readonly "
- "and write requested\n");
- #endif
-
- cyasblkdev_blk_put(bd);
- ret = -EROFS;
- }
- } else if (bdev->bd_disk == bd->user_disk_1) {
- if ((mode & FMODE_WRITE) && bd->user_disk_1_read_only) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "device marked as readonly "
- "and write requested\n");
- #endif
-
- cyasblkdev_blk_put(bd);
- ret = -EROFS;
- }
- } else if (bdev->bd_disk == bd->system_disk) {
- if ((mode & FMODE_WRITE) && bd->system_disk_read_only) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "device marked as readonly "
- "and write requested\n");
- #endif
-
- cyasblkdev_blk_put(bd);
- ret = -EROFS;
- }
- }
- }
-
- return ret;
-}
-
-static int cyasblkdev_blk_release(
- struct gendisk *disk,
- fmode_t mode
- )
-{
- struct cyasblkdev_blk_data *bd = disk->private_data;
-
- DBGPRN_FUNC_NAME;
-
- cyasblkdev_blk_put(bd);
- return 0;
-}
-
-static int cyasblkdev_blk_ioctl(
- struct block_device *bdev,
- fmode_t mode,
- unsigned int cmd,
- unsigned long arg
- )
-{
- DBGPRN_FUNC_NAME;
-
- if (cmd == HDIO_GETGEO) {
- /*for now we only process geometry IOCTL*/
- struct hd_geometry geo;
-
- memset(&geo, 0, sizeof(struct hd_geometry));
-
- geo.cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
- geo.heads = 4;
- geo.sectors = 16;
- geo.start = get_start_sect(bdev);
-
- /* copy to user space */
- return copy_to_user((void __user *)arg, &geo, sizeof(geo))
- ? -EFAULT : 0;
- }
-
- return -ENOTTY;
-}
-
-/* check_events block_device opp
- * this one is called by kernel to confirm if the media really changed
- * as we indicated by issuing check_disk_change() call */
-unsigned int cyasblkdev_check_events(struct gendisk *gd, unsigned int clearing)
-{
- struct cyasblkdev_blk_data *bd;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("cyasblkdev_media_changed() is called\n");
- #endif
-
- if (gd)
- bd = gd->private_data;
- else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "cyasblkdev_media_changed() is called, "
- "but gd is null\n");
- #endif
- }
-
- /* return media change state - DISK_EVENT_MEDIA_CHANGE yes, 0 no */
- return 0;
-}
-
-/* this one called by kernel to give us a chence
- * to prep the new media before it starts to rescaning
- * of the newlly inserted SD media */
-int cyasblkdev_revalidate_disk(struct gendisk *gd)
-{
- /*int (*revalidate_disk) (struct gendisk *); */
-
- #ifndef WESTBRIDGE_NDEBUG
- if (gd)
- cy_as_hal_print_message(
- "cyasblkdev_revalidate_disk() is called, "
- "(gl_bd->usage:%d)\n", gl_bd->usage);
- #endif
-
- /* 0 means ok, kern can go ahead with partition rescan */
- return 0;
-}
-
-
-/*standard block device driver interface */
-static struct block_device_operations cyasblkdev_bdops = {
- .open = cyasblkdev_blk_open,
- .release = cyasblkdev_blk_release,
- .ioctl = cyasblkdev_blk_ioctl,
- /* .getgeo = cyasblkdev_blk_getgeo, */
- /* added to support media removal( real and simulated) media */
- .check_events = cyasblkdev_check_events,
- /* added to support media removal( real and simulated) media */
- .revalidate_disk = cyasblkdev_revalidate_disk,
- .owner = THIS_MODULE,
-};
-
-/* west bridge block device prep request function */
-static int cyasblkdev_blk_prep_rq(
- struct cyasblkdev_queue *bq,
- struct request *req
- )
-{
- struct cyasblkdev_blk_data *bd = bq->data;
- int stat = BLKPREP_OK;
-
- DBGPRN_FUNC_NAME;
-
- /* If we have no device, we haven't finished initialising. */
- if (!bd || !bd->dev_handle) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(KERN_ERR
- "cyasblkdev %s: killing request - no device/host\n",
- req->rq_disk->disk_name);
- #endif
- stat = BLKPREP_KILL;
- }
-
- if (bd->suspended) {
- blk_plug_device(bd->queue.queue);
- stat = BLKPREP_DEFER;
- }
-
- /* Check for excessive requests.*/
- if (blk_rq_pos(req) + blk_rq_sectors(req) > get_capacity(req->rq_disk)) {
- cy_as_hal_print_message("cyasblkdev: bad request address\n");
- stat = BLKPREP_KILL;
- }
-
- return stat;
-}
-
-/*west bridge storage async api on_completed callback */
-static void cyasblkdev_issuecallback(
- /* Handle to the device completing the storage operation */
- cy_as_device_handle handle,
- /* The media type completing the operation */
- cy_as_media_type type,
- /* The device completing the operation */
- uint32_t device,
- /* The unit completing the operation */
- uint32_t unit,
- /* The block number of the completed operation */
- uint32_t block_number,
- /* The type of operation */
- cy_as_oper_type op,
- /* The error status */
- cy_as_return_status_t status
- )
-{
- int retry_cnt = 0;
- DBGPRN_FUNC_NAME;
-
- if (status != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: async r/w: op:%d failed with error %d at address %d\n",
- __func__, op, status, block_number);
- #endif
- }
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s calling blk_end_request from issue_callback "
- "req=0x%x, status=0x%x, nr_sectors=0x%x\n",
- __func__, (unsigned int) gl_bd->queue.req, status,
- (unsigned int) blk_rq_sectors(gl_bd->queue.req));
- #endif
-
- /* note: blk_end_request w/o __ prefix should
- * not require spinlocks on the queue*/
- while (blk_end_request(gl_bd->queue.req,
- status, blk_rq_sectors(gl_bd->queue.req)*512)) {
- retry_cnt++;
- }
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s blkdev_callback: ended rq on %d sectors, "
- "with err:%d, n:%d times\n", __func__,
- (int)blk_rq_sectors(gl_bd->queue.req), status,
- retry_cnt
- );
- #endif
-
- spin_lock_irq(&gl_bd->lock);
-
- /*elevate next request, if there is one*/
- if (!blk_queue_plugged(gl_bd->queue.queue)) {
- /* queue is not plugged */
- gl_bd->queue.req = blk_fetch_request(gl_bd->queue.queue);
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s blkdev_callback: "
- "blk_fetch_request():%p\n",
- __func__, gl_bd->queue.req);
- #endif
- }
-
- if (gl_bd->queue.req) {
- spin_unlock_irq(&gl_bd->lock);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s blkdev_callback: about to "
- "call issue_fn:%p\n", __func__, gl_bd->queue.req);
- #endif
-
- gl_bd->queue.issue_fn(&gl_bd->queue, gl_bd->queue.req);
- } else {
- spin_unlock_irq(&gl_bd->lock);
- }
-}
-
-/* issue astoria blkdev request (issue_fn) */
-static int cyasblkdev_blk_issue_rq(
- struct cyasblkdev_queue *bq,
- struct request *req
- )
-{
- struct cyasblkdev_blk_data *bd = bq->data;
- int index = 0;
- int ret = CY_AS_ERROR_SUCCESS;
- uint32_t req_sector = 0;
- uint32_t req_nr_sectors = 0;
- int bus_num = 0;
- int lcl_unit_no = 0;
-
- DBGPRN_FUNC_NAME;
-
- /*
- * will construct a scatterlist for the given request;
- * the return value is the number of actually used
- * entries in the resulting list. Then, this scatterlist
- * can be used for the actual DMA prep operation.
- */
- spin_lock_irq(&bd->lock);
- index = blk_rq_map_sg(bq->queue, req, bd->sg);
-
- if (req->rq_disk == bd->user_disk_0) {
- bus_num = bd->user_disk_0_bus_num;
- req_sector = blk_rq_pos(req) + gl_bd->user_disk_0_first_sector;
- req_nr_sectors = blk_rq_sectors(req);
- lcl_unit_no = gl_bd->user_disk_0_unit_no;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: request made to disk 0 "
- "for sector=%d, num_sectors=%d, unit_no=%d\n",
- __func__, req_sector, (int) blk_rq_sectors(req),
- lcl_unit_no);
- #endif
- } else if (req->rq_disk == bd->user_disk_1) {
- bus_num = bd->user_disk_1_bus_num;
- req_sector = blk_rq_pos(req) + gl_bd->user_disk_1_first_sector;
- /*SECT_NUM_TRANSLATE(blk_rq_sectors(req));*/
- req_nr_sectors = blk_rq_sectors(req);
- lcl_unit_no = gl_bd->user_disk_1_unit_no;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: request made to disk 1 for "
- "sector=%d, num_sectors=%d, unit_no=%d\n", __func__,
- req_sector, (int) blk_rq_sectors(req), lcl_unit_no);
- #endif
- } else if (req->rq_disk == bd->system_disk) {
- bus_num = bd->system_disk_bus_num;
- req_sector = blk_rq_pos(req) + gl_bd->system_disk_first_sector;
- req_nr_sectors = blk_rq_sectors(req);
- lcl_unit_no = gl_bd->system_disk_unit_no;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: request made to system disk "
- "for sector=%d, num_sectors=%d, unit_no=%d\n", __func__,
- req_sector, (int) blk_rq_sectors(req), lcl_unit_no);
- #endif
- }
- #ifndef WESTBRIDGE_NDEBUG
- else {
- cy_as_hal_print_message(
- "%s: invalid disk used for request\n", __func__);
- }
- #endif
-
- spin_unlock_irq(&bd->lock);
-
- if (rq_data_dir(req) == READ) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: calling readasync() "
- "req_sector=0x%x, req_nr_sectors=0x%x, bd->sg:%x\n\n",
- __func__, req_sector, req_nr_sectors, (uint32_t)bd->sg);
- #endif
-
- ret = cy_as_storage_read_async(bd->dev_handle, bus_num, 0,
- lcl_unit_no, req_sector, bd->sg, req_nr_sectors,
- (cy_as_storage_callback)cyasblkdev_issuecallback);
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s:readasync() error %d at "
- "address %ld, unit no %d\n", __func__, ret,
- blk_rq_pos(req), lcl_unit_no);
- cy_as_hal_print_message("%s:ending i/o request "
- "on reg:%x\n", __func__, (uint32_t)req);
- #endif
-
- while (blk_end_request(req,
- (ret == CY_AS_ERROR_SUCCESS),
- req_nr_sectors*512))
- ;
-
- bq->req = NULL;
- }
- } else {
- ret = cy_as_storage_write_async(bd->dev_handle, bus_num, 0,
- lcl_unit_no, req_sector, bd->sg, req_nr_sectors,
- (cy_as_storage_callback)cyasblkdev_issuecallback);
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: write failed with "
- "error %d at address %ld, unit no %d\n",
- __func__, ret, blk_rq_pos(req), lcl_unit_no);
- #endif
-
- /*end IO op on this request(does both
- * end_that_request_... _first & _last) */
- while (blk_end_request(req,
- (ret == CY_AS_ERROR_SUCCESS),
- req_nr_sectors*512))
- ;
-
- bq->req = NULL;
- }
- }
-
- return ret;
-}
-
-static unsigned long
-dev_use[CYASBLKDEV_NUM_MINORS / (8 * sizeof(unsigned long))];
-
-
-/* storage event callback (note: called in astoria isr context) */
-static void cyasblkdev_storage_callback(
- cy_as_device_handle dev_h,
- cy_as_bus_number_t bus,
- uint32_t device,
- cy_as_storage_event evtype,
- void *evdata
- )
-{
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: bus:%d, device:%d, evtype:%d, "
- "evdata:%p\n ", __func__, bus, device, evtype, evdata);
- #endif
-
- switch (evtype) {
- case cy_as_storage_processor:
- break;
-
- case cy_as_storage_removed:
- break;
-
- case cy_as_storage_inserted:
- break;
-
- default:
- break;
- }
-}
-
-#define SECTORS_TO_SCAN 4096
-
-uint32_t cyasblkdev_get_vfat_offset(int bus_num, int unit_no)
-{
- /*
- * for sd media, vfat partition boot record is not always
- * located at sector it greatly depends on the system and
- * software that was used to format the sd however, linux
- * fs layer always expects it at sector 0, this function
- * finds the offset and then uses it in all media r/w
- * operations
- */
- int sect_no, stat;
- uint8_t *sect_buf;
- bool br_found = false;
-
- DBGPRN_FUNC_NAME;
-
- sect_buf = kmalloc(1024, GFP_KERNEL);
-
- /* since HAL layer always uses sg lists instead of the
- * buffer (for hw dmas) we need to initialize the sg list
- * for local buffer*/
- sg_init_one(gl_bd->sg, sect_buf, 512);
-
- /*
- * Check MPR partition table 1st, then try to scan through
- * 1st 384 sectors until BR signature(intel JMP istruction
- * code and ,0x55AA) is found
- */
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s scanning media for vfat partition...\n", __func__);
- #endif
-
- for (sect_no = 0; sect_no < SECTORS_TO_SCAN; sect_no++) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s before cyasstorageread "
- "gl_bd->sg addr=0x%x\n", __func__,
- (unsigned int) gl_bd->sg);
- #endif
-
- stat = cy_as_storage_read(
- /* Handle to the device of interest */
- gl_bd->dev_handle,
- /* The bus to access */
- bus_num,
- /* The device to access */
- 0,
- /* The unit to access */
- unit_no,
- /* absolute sector number */
- sect_no,
- /* sg structure */
- gl_bd->sg,
- /* The number of blocks to be read */
- 1
- );
-
- /* try only sectors with boot signature */
- if ((sect_buf[510] == 0x55) && (sect_buf[511] == 0xaa)) {
- /* vfat boot record may also be located at
- * sector 0, check it first */
- if (sect_buf[0] == 0xEB) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s vfat partition found "
- "at sector:%d\n",
- __func__, sect_no);
- #endif
-
- br_found = true;
- break;
- }
- }
-
- if (stat != 0) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s sector scan error\n",
- __func__);
- #endif
- break;
- }
- }
-
- kfree(sect_buf);
-
- if (br_found) {
- return sect_no;
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s vfat partition is not found, using 0 offset\n",
- __func__);
- #endif
- return 0;
- }
-}
-
-cy_as_storage_query_device_data dev_data = {0};
-
-static int cyasblkdev_add_disks(int bus_num,
- struct cyasblkdev_blk_data *bd,
- int total_media_count,
- int devidx)
-{
- int ret = 0;
- uint64_t disk_cap;
- int lcl_unit_no;
- cy_as_storage_query_unit_data unit_data = {0};
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s:query device: "
- "type:%d, removable:%d, writable:%d, "
- "blksize %d, units:%d, locked:%d, "
- "erase_sz:%d\n",
- __func__,
- dev_data.desc_p.type,
- dev_data.desc_p.removable,
- dev_data.desc_p.writeable,
- dev_data.desc_p.block_size,
- dev_data.desc_p.number_units,
- dev_data.desc_p.locked,
- dev_data.desc_p.erase_unit_size
- );
- #endif
-
- /* make sure that device is not locked */
- if (dev_data.desc_p.locked) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: device is locked\n", __func__);
- #endif
- ret = cy_as_storage_release(
- bd->dev_handle, bus_num, 0, 0, 0);
- if (ret != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s cannot release"
- " storage\n", __func__);
- #endif
- goto out;
- }
- goto out;
- }
-
- unit_data.device = 0;
- unit_data.unit = 0;
- unit_data.bus = bus_num;
- ret = cy_as_storage_query_unit(bd->dev_handle,
- &unit_data, 0, 0);
- if (ret != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cannot query "
- "%d device unit - reason code %d\n",
- __func__, bus_num, ret);
- #endif
- goto out;
- }
-
- if (private_partition_bus == bus_num) {
- if (private_partition_size > 0) {
- ret = cy_as_storage_create_p_partition(
- bd->dev_handle, bus_num, 0,
- private_partition_size, 0, 0);
- if ((ret != CY_AS_ERROR_SUCCESS) &&
- (ret != CY_AS_ERROR_ALREADY_PARTITIONED)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cy_as_storage_"
- "create_p_partition after size > 0 check "
- "failed with error code %d\n",
- __func__, ret);
- #endif
-
- disk_cap = (uint64_t)
- (unit_data.desc_p.unit_size);
- lcl_unit_no = 0;
-
- } else if (ret == CY_AS_ERROR_ALREADY_PARTITIONED) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: cy_as_storage_create_p_partition "
- "indicates memory already partitioned\n",
- __func__);
- #endif
-
- /*check to see that partition
- * matches size */
- if (unit_data.desc_p.unit_size !=
- private_partition_size) {
- ret = cy_as_storage_remove_p_partition(
- bd->dev_handle,
- bus_num, 0, 0, 0);
- if (ret == CY_AS_ERROR_SUCCESS) {
- ret = cy_as_storage_create_p_partition(
- bd->dev_handle, bus_num, 0,
- private_partition_size, 0, 0);
- if (ret == CY_AS_ERROR_SUCCESS) {
- unit_data.bus = bus_num;
- unit_data.device = 0;
- unit_data.unit = 1;
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: cy_as_storage_create_p_partition "
- "after removal unexpectedly failed "
- "with error %d\n", __func__, ret);
- #endif
-
- /* need to requery bus
- * seeing as delete
- * successful and create
- * failed we have changed
- * the disk properties */
- unit_data.bus = bus_num;
- unit_data.device = 0;
- unit_data.unit = 0;
- }
-
- ret = cy_as_storage_query_unit(
- bd->dev_handle,
- &unit_data, 0, 0);
- if (ret != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: cannot query %d "
- "device unit - reason code %d\n",
- __func__, bus_num, ret);
- #endif
- goto out;
- } else {
- disk_cap = (uint64_t)
- (unit_data.desc_p.unit_size);
- lcl_unit_no =
- unit_data.unit;
- }
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: cy_as_storage_remove_p_partition "
- "failed with error %d\n",
- __func__, ret);
- #endif
-
- unit_data.bus = bus_num;
- unit_data.device = 0;
- unit_data.unit = 1;
-
- ret = cy_as_storage_query_unit(
- bd->dev_handle, &unit_data, 0, 0);
- if (ret != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: cannot query %d "
- "device unit - reason "
- "code %d\n", __func__,
- bus_num, ret);
- #endif
- goto out;
- }
-
- disk_cap = (uint64_t)
- (unit_data.desc_p.unit_size);
- lcl_unit_no =
- unit_data.unit;
- }
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: partition "
- "exists and sizes equal\n",
- __func__);
- #endif
-
- /*partition already existed,
- * need to query second unit*/
- unit_data.bus = bus_num;
- unit_data.device = 0;
- unit_data.unit = 1;
-
- ret = cy_as_storage_query_unit(
- bd->dev_handle, &unit_data, 0, 0);
- if (ret != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: cannot query %d "
- "device unit "
- "- reason code %d\n",
- __func__, bus_num, ret);
- #endif
- goto out;
- } else {
- disk_cap = (uint64_t)
- (unit_data.desc_p.unit_size);
- lcl_unit_no = unit_data.unit;
- }
- }
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: cy_as_storage_create_p_partition "
- "created successfully\n", __func__);
- #endif
-
- disk_cap = (uint64_t)
- (unit_data.desc_p.unit_size -
- private_partition_size);
-
- lcl_unit_no = 1;
- }
- }
- #ifndef WESTBRIDGE_NDEBUG
- else {
- cy_as_hal_print_message(
- "%s: invalid partition_size%d\n", __func__,
- private_partition_size);
-
- disk_cap = (uint64_t)
- (unit_data.desc_p.unit_size);
- lcl_unit_no = 0;
- }
- #endif
- } else {
- disk_cap = (uint64_t)
- (unit_data.desc_p.unit_size);
- lcl_unit_no = 0;
- }
-
- if ((bus_num == 0) ||
- (total_media_count == 1)) {
- sprintf(bd->user_disk_0->disk_name,
- "cyasblkdevblk%d", devidx);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: disk unit_sz:%lu blk_sz:%d, "
- "start_blk:%lu, capacity:%llu\n",
- __func__, (unsigned long)
- unit_data.desc_p.unit_size,
- unit_data.desc_p.block_size,
- (unsigned long)
- unit_data.desc_p.start_block,
- (uint64_t)disk_cap
- );
- #endif
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: setting gendisk disk "
- "capacity to %d\n", __func__, (int) disk_cap);
- #endif
-
- /* initializing bd->queue */
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: init bd->queue\n",
- __func__);
- #endif
-
- /* this will create a
- * queue kernel thread */
- cyasblkdev_init_queue(
- &bd->queue, &bd->lock);
-
- bd->queue.prep_fn = cyasblkdev_blk_prep_rq;
- bd->queue.issue_fn = cyasblkdev_blk_issue_rq;
- bd->queue.data = bd;
-
- /*blk_size should always
- * be a multiple of 512,
- * set to the max to ensure
- * that all accesses aligned
- * to the greatest multiple,
- * can adjust request to
- * smaller block sizes
- * dynamically*/
-
- bd->user_disk_0_read_only = !dev_data.desc_p.writeable;
- bd->user_disk_0_blk_size = dev_data.desc_p.block_size;
- bd->user_disk_0_type = dev_data.desc_p.type;
- bd->user_disk_0_bus_num = bus_num;
- bd->user_disk_0->major = major;
- bd->user_disk_0->first_minor = devidx << CYASBLKDEV_SHIFT;
- bd->user_disk_0->minors = 8;
- bd->user_disk_0->fops = &cyasblkdev_bdops;
- bd->user_disk_0->events = DISK_EVENT_MEDIA_CHANGE;
- bd->user_disk_0->private_data = bd;
- bd->user_disk_0->queue = bd->queue.queue;
- bd->dbgprn_flags = DBGPRN_RD_RQ;
- bd->user_disk_0_unit_no = lcl_unit_no;
-
- blk_queue_logical_block_size(bd->queue.queue,
- bd->user_disk_0_blk_size);
-
- set_capacity(bd->user_disk_0,
- disk_cap);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: returned from set_capacity %d\n",
- __func__, (int) disk_cap);
- #endif
-
- /* need to start search from
- * public partition beginning */
- if (vfat_search) {
- bd->user_disk_0_first_sector =
- cyasblkdev_get_vfat_offset(
- bd->user_disk_0_bus_num,
- bd->user_disk_0_unit_no);
- } else {
- bd->user_disk_0_first_sector = 0;
- }
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: set user_disk_0_first "
- "sector to %d\n", __func__,
- bd->user_disk_0_first_sector);
- cy_as_hal_print_message(
- "%s: add_disk: disk->major=0x%x\n",
- __func__,
- bd->user_disk_0->major);
- cy_as_hal_print_message(
- "%s: add_disk: "
- "disk->first_minor=0x%x\n", __func__,
- bd->user_disk_0->first_minor);
- cy_as_hal_print_message(
- "%s: add_disk: "
- "disk->minors=0x%x\n", __func__,
- bd->user_disk_0->minors);
- cy_as_hal_print_message(
- "%s: add_disk: "
- "disk->disk_name=%s\n",
- __func__,
- bd->user_disk_0->disk_name);
- cy_as_hal_print_message(
- "%s: add_disk: "
- "disk->part_tbl=0x%x\n", __func__,
- (unsigned int)
- bd->user_disk_0->part_tbl);
- cy_as_hal_print_message(
- "%s: add_disk: "
- "disk->queue=0x%x\n", __func__,
- (unsigned int)
- bd->user_disk_0->queue);
- cy_as_hal_print_message(
- "%s: add_disk: "
- "disk->flags=0x%x\n",
- __func__, (unsigned int)
- bd->user_disk_0->flags);
- cy_as_hal_print_message(
- "%s: add_disk: "
- "disk->driverfs_dev=0x%x\n",
- __func__, (unsigned int)
- bd->user_disk_0->driverfs_dev);
- cy_as_hal_print_message(
- "%s: add_disk: "
- "disk->slave_dir=0x%x\n",
- __func__, (unsigned int)
- bd->user_disk_0->slave_dir);
- cy_as_hal_print_message(
- "%s: add_disk: "
- "disk->random=0x%x\n",
- __func__, (unsigned int)
- bd->user_disk_0->random);
- cy_as_hal_print_message(
- "%s: add_disk: "
- "disk->node_id=0x%x\n",
- __func__, (unsigned int)
- bd->user_disk_0->node_id);
-
- #endif
-
- add_disk(bd->user_disk_0);
-
- } else if ((bus_num == 1) &&
- (total_media_count == 2)) {
- bd->user_disk_1_read_only = !dev_data.desc_p.writeable;
- bd->user_disk_1_blk_size = dev_data.desc_p.block_size;
- bd->user_disk_1_type = dev_data.desc_p.type;
- bd->user_disk_1_bus_num = bus_num;
- bd->user_disk_1->major = major;
- bd->user_disk_1->first_minor = (devidx + 1) << CYASBLKDEV_SHIFT;
- bd->user_disk_1->minors = 8;
- bd->user_disk_1->fops = &cyasblkdev_bdops;
- bd->user_disk_1->events = DISK_EVENT_MEDIA_CHANGE;
- bd->user_disk_1->private_data = bd;
- bd->user_disk_1->queue = bd->queue.queue;
- bd->dbgprn_flags = DBGPRN_RD_RQ;
- bd->user_disk_1_unit_no = lcl_unit_no;
-
- sprintf(bd->user_disk_1->disk_name,
- "cyasblkdevblk%d", (devidx + 1));
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: disk unit_sz:%lu "
- "blk_sz:%d, "
- "start_blk:%lu, "
- "capacity:%llu\n",
- __func__,
- (unsigned long)
- unit_data.desc_p.unit_size,
- unit_data.desc_p.block_size,
- (unsigned long)
- unit_data.desc_p.start_block,
- (uint64_t)disk_cap
- );
- #endif
-
- /*blk_size should always be a
- * multiple of 512, set to the max
- * to ensure that all accesses
- * aligned to the greatest multiple,
- * can adjust request to smaller
- * block sizes dynamically*/
- if (bd->user_disk_0_blk_size >
- bd->user_disk_1_blk_size) {
- blk_queue_logical_block_size(bd->queue.queue,
- bd->user_disk_0_blk_size);
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: set hard sect_sz:%d\n",
- __func__,
- bd->user_disk_0_blk_size);
- #endif
- } else {
- blk_queue_logical_block_size(bd->queue.queue,
- bd->user_disk_1_blk_size);
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: set hard sect_sz:%d\n",
- __func__,
- bd->user_disk_1_blk_size);
- #endif
- }
-
- set_capacity(bd->user_disk_1, disk_cap);
- if (vfat_search) {
- bd->user_disk_1_first_sector =
- cyasblkdev_get_vfat_offset(
- bd->user_disk_1_bus_num,
- bd->user_disk_1_unit_no);
- } else {
- bd->user_disk_1_first_sector
- = 0;
- }
-
- add_disk(bd->user_disk_1);
- }
-
- if (lcl_unit_no > 0) {
- if (bd->system_disk == NULL) {
- bd->system_disk =
- alloc_disk(8);
-
- if (bd->system_disk == NULL) {
- kfree(bd);
- bd = ERR_PTR(-ENOMEM);
- return bd;
- }
- disk_cap = (uint64_t)
- (private_partition_size);
-
- /* set properties of
- * system disk */
- bd->system_disk_read_only = !dev_data.desc_p.writeable;
- bd->system_disk_blk_size = dev_data.desc_p.block_size;
- bd->system_disk_bus_num = bus_num;
- bd->system_disk->major = major;
- bd->system_disk->first_minor =
- (devidx + 2) << CYASBLKDEV_SHIFT;
- bd->system_disk->minors = 8;
- bd->system_disk->fops = &cyasblkdev_bdops;
- bd->system_disk->events = DISK_EVENT_MEDIA_CHANGE;
- bd->system_disk->private_data = bd;
- bd->system_disk->queue = bd->queue.queue;
- /* don't search for vfat
- * with system disk */
- bd->system_disk_first_sector = 0;
- sprintf(
- bd->system_disk->disk_name,
- "cyasblkdevblk%d", (devidx + 2));
-
- set_capacity(bd->system_disk,
- disk_cap);
-
- add_disk(bd->system_disk);
- }
- #ifndef WESTBRIDGE_NDEBUG
- else {
- cy_as_hal_print_message(
- "%s: system disk already allocated %d\n",
- __func__, bus_num);
- }
- #endif
- }
-out:
- return ret;
-}
-
-static struct cyasblkdev_blk_data *cyasblkdev_blk_alloc(void)
-{
- struct cyasblkdev_blk_data *bd;
- int ret = 0;
- cy_as_return_status_t stat = -1;
- int bus_num = 0;
- int total_media_count = 0;
- int devidx = 0;
- DBGPRN_FUNC_NAME;
-
- total_media_count = 0;
- devidx = find_first_zero_bit(dev_use, CYASBLKDEV_NUM_MINORS);
- if (devidx >= CYASBLKDEV_NUM_MINORS)
- return ERR_PTR(-ENOSPC);
-
- __set_bit(devidx, dev_use);
- __set_bit(devidx + 1, dev_use);
-
- bd = kzalloc(sizeof(struct cyasblkdev_blk_data), GFP_KERNEL);
- if (bd) {
- gl_bd = bd;
-
- spin_lock_init(&bd->lock);
- bd->usage = 1;
-
- /* setup the block_dev_ops pointer*/
- bd->blkops = &cyasblkdev_bdops;
-
- /* Get the device handle */
- bd->dev_handle = cyasdevice_getdevhandle();
- if (0 == bd->dev_handle) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: get device failed\n", __func__);
- #endif
- ret = ENODEV;
- goto out;
- }
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s west bridge device handle:%x\n",
- __func__, (uint32_t)bd->dev_handle);
- #endif
-
- /* start the storage api and get a handle to the
- * device we are interested in. */
-
- /* Error code to use if the conditions are not satisfied. */
- ret = ENOMEDIUM;
-
- stat = cy_as_misc_release_resource(bd->dev_handle, cy_as_bus_0);
- if ((stat != CY_AS_ERROR_SUCCESS) &&
- (stat != CY_AS_ERROR_RESOURCE_NOT_OWNED)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cannot release "
- "resource bus 0 - reason code %d\n",
- __func__, stat);
- #endif
- }
-
- stat = cy_as_misc_release_resource(bd->dev_handle, cy_as_bus_1);
- if ((stat != CY_AS_ERROR_SUCCESS) &&
- (stat != CY_AS_ERROR_RESOURCE_NOT_OWNED)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cannot release "
- "resource bus 0 - reason code %d\n",
- __func__, stat);
- #endif
- }
-
- /* start storage stack*/
- stat = cy_as_storage_start(bd->dev_handle, 0, 0x101);
- if (stat != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cannot start storage "
- "stack - reason code %d\n", __func__, stat);
- #endif
- goto out;
- }
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: storage started:%d ok\n",
- __func__, stat);
- #endif
-
- stat = cy_as_storage_register_callback(bd->dev_handle,
- cyasblkdev_storage_callback);
- if (stat != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cannot register callback "
- "- reason code %d\n", __func__, stat);
- #endif
- goto out;
- }
-
- for (bus_num = 0; bus_num < 2; bus_num++) {
- stat = cy_as_storage_query_bus(bd->dev_handle,
- bus_num, &bd->media_count[bus_num], 0, 0);
- if (stat == CY_AS_ERROR_SUCCESS) {
- total_media_count = total_media_count +
- bd->media_count[bus_num];
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cannot query %d, "
- "reason code: %d\n",
- __func__, bus_num, stat);
- #endif
- goto out;
- }
- }
-
- if (total_media_count == 0) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: no storage media was found\n", __func__);
- #endif
- goto out;
- } else if (total_media_count >= 1) {
- if (bd->user_disk_0 == NULL) {
-
- bd->user_disk_0 =
- alloc_disk(8);
- if (bd->user_disk_0 == NULL) {
- kfree(bd);
- bd = ERR_PTR(-ENOMEM);
- return bd;
- }
- }
- #ifndef WESTBRIDGE_NDEBUG
- else {
- cy_as_hal_print_message("%s: no available "
- "gen_disk for disk 0, "
- "physically inconsistent\n", __func__);
- }
- #endif
- }
-
- if (total_media_count == 2) {
- if (bd->user_disk_1 == NULL) {
- bd->user_disk_1 =
- alloc_disk(8);
- if (bd->user_disk_1 == NULL) {
- kfree(bd);
- bd = ERR_PTR(-ENOMEM);
- return bd;
- }
- }
- #ifndef WESTBRIDGE_NDEBUG
- else {
- cy_as_hal_print_message("%s: no available "
- "gen_disk for media, "
- "physically inconsistent\n", __func__);
- }
- #endif
- }
- #ifndef WESTBRIDGE_NDEBUG
- else if (total_media_count > 2) {
- cy_as_hal_print_message("%s: count corrupted = 0x%d\n",
- __func__, total_media_count);
- }
- #endif
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: %d device(s) found\n",
- __func__, total_media_count);
- #endif
-
- for (bus_num = 0; bus_num <= 1; bus_num++) {
- /*claim storage for cpu */
- stat = cy_as_storage_claim(bd->dev_handle,
- bus_num, 0, 0, 0);
- if (stat != CY_AS_ERROR_SUCCESS) {
- cy_as_hal_print_message("%s: cannot claim "
- "%d bus - reason code %d\n",
- __func__, bus_num, stat);
- goto out;
- }
-
- dev_data.bus = bus_num;
- dev_data.device = 0;
-
- stat = cy_as_storage_query_device(bd->dev_handle,
- &dev_data, 0, 0);
- if (stat == CY_AS_ERROR_SUCCESS) {
- cyasblkdev_add_disks(bus_num, bd,
- total_media_count, devidx);
- } else if (stat == CY_AS_ERROR_NO_SUCH_DEVICE) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: no device on bus %d\n",
- __func__, bus_num);
- #endif
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: cannot query %d device "
- "- reason code %d\n",
- __func__, bus_num, stat);
- #endif
- goto out;
- }
- } /* end for (bus_num = 0; bus_num <= 1; bus_num++)*/
-
- return bd;
- }
-out:
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: bd failed to initialize\n", __func__);
- #endif
-
- kfree(bd);
- bd = ERR_PTR(-ret);
- return bd;
-}
-
-
-/*init west bridge block device */
-static int cyasblkdev_blk_initialize(void)
-{
- struct cyasblkdev_blk_data *bd;
- int res;
-
- DBGPRN_FUNC_NAME;
-
- res = register_blkdev(major, "cyasblkdev");
-
- if (res < 0) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(KERN_WARNING
- "%s unable to get major %d for cyasblkdev media: %d\n",
- __func__, major, res);
- #endif
- return res;
- }
-
- if (major == 0)
- major = res;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s cyasblkdev registered with major number: %d\n",
- __func__, major);
- #endif
-
- bd = cyasblkdev_blk_alloc();
- if (IS_ERR(bd))
- return PTR_ERR(bd);
-
- return 0;
-}
-
-/* start block device */
-static int __init cyasblkdev_blk_init(void)
-{
- int res = -ENOMEM;
-
- DBGPRN_FUNC_NAME;
-
- /* get the cyasdev handle for future use*/
- cyas_dev_handle = cyasdevice_getdevhandle();
-
- if (cyasblkdev_blk_initialize() == 0)
- return 0;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("cyasblkdev init error:%d\n", res);
- #endif
- return res;
-}
-
-
-static void cyasblkdev_blk_deinit(struct cyasblkdev_blk_data *bd)
-{
- DBGPRN_FUNC_NAME;
-
- if (bd) {
- int devidx;
-
- if (bd->user_disk_0 != NULL) {
- del_gendisk(bd->user_disk_0);
- devidx = bd->user_disk_0->first_minor
- >> CYASBLKDEV_SHIFT;
- __clear_bit(devidx, dev_use);
- }
-
- if (bd->user_disk_1 != NULL) {
- del_gendisk(bd->user_disk_1);
- devidx = bd->user_disk_1->first_minor
- >> CYASBLKDEV_SHIFT;
- __clear_bit(devidx, dev_use);
- }
-
- if (bd->system_disk != NULL) {
- del_gendisk(bd->system_disk);
- devidx = bd->system_disk->first_minor
- >> CYASBLKDEV_SHIFT;
- __clear_bit(devidx, dev_use);
- }
-
- cyasblkdev_blk_put(bd);
- }
-}
-
-/* block device exit */
-static void __exit cyasblkdev_blk_exit(void)
-{
- DBGPRN_FUNC_NAME;
-
- cyasblkdev_blk_deinit(gl_bd);
- unregister_blkdev(major, "cyasblkdev");
-
-}
-
-module_init(cyasblkdev_blk_init);
-module_exit(cyasblkdev_blk_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("antioch (cyasblkdev) block device driver");
-MODULE_AUTHOR("cypress semiconductor");
-
-/*[]*/
diff --git a/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c b/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c
deleted file mode 100644
index d1996a27515..00000000000
--- a/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c
+++ /dev/null
@@ -1,417 +0,0 @@
-/* cyanblkdev_queue.h - Antioch Linux Block Driver queue source file
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor,
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-/*
- * Request queue handling for Antioch block device driver.
- * Based on the mmc queue handling code by Russell King in the
- * linux 2.6.10 kernel.
- */
-
-/*
- * linux/drivers/mmc/mmc_queue.c
- *
- * Copyright (C) 2003 Russell King, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-
-#include "cyasblkdev_queue.h"
-
-#define CYASBLKDEV_QUEUE_EXIT (1 << 0)
-#define CYASBLKDEV_QUEUE_SUSPENDED (1 << 1)
-#define CY_AS_USE_ASYNC_API
-
-
-
-/* print flags by name */
-const char *rq_flag_bit_names[] = {
- "REQ_RW", /* not set, read. set, write */
- "REQ_FAILFAST", /* no low level driver retries */
- "REQ_SORTED", /* elevator knows about this request */
- "REQ_SOFTBARRIER", /* may not be passed by ioscheduler */
- "REQ_HARDBARRIER", /* may not be passed by drive either */
- "REQ_FUA", /* forced unit access */
- "REQ_NOMERGE", /* don't touch this for merging */
- "REQ_STARTED", /* drive already may have started this one */
- "REQ_DONTPREP", /* don't call prep for this one */
- "REQ_QUEUED", /* uses queueing */
- "REQ_ELVPRIV", /* elevator private data attached */
- "REQ_FAILED", /* set if the request failed */
- "REQ_QUIET", /* don't worry about errors */
- "REQ_PREEMPT", /* set for "ide_preempt" requests */
- "REQ_ORDERED_COLOR",/* is before or after barrier */
- "REQ_RW_SYNC", /* request is sync (O_DIRECT) */
- "REQ_ALLOCED", /* request came from our alloc pool */
- "REQ_RW_META", /* metadata io request */
- "REQ_COPY_USER", /* contains copies of user pages */
- "REQ_NR_BITS", /* stops here */
-};
-
-void verbose_rq_flags(int flags)
-{
- int i;
- uint32_t j;
- j = 1;
- for (i = 0; i < 32; i++) {
- if (flags & j)
- DBGPRN("<1>%s", rq_flag_bit_names[i]);
- j = j << 1;
- }
-}
-
-
-/*
- * Prepare a -BLK_DEV request. Essentially, this means passing the
- * preparation off to the media driver. The media driver will
- * create request to CyAsDev.
- */
-static int cyasblkdev_prep_request(
- struct request_queue *q, struct request *req)
-{
- DBGPRN_FUNC_NAME;
-
- /* we only like normal block requests.*/
- if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s:%x bad request received\n",
- __func__, current->pid);
- #endif
-
- blk_dump_rq_flags(req, "cyasblkdev bad request");
- return BLKPREP_KILL;
- }
-
- req->cmd_flags |= REQ_DONTPREP;
-
- return BLKPREP_OK;
-}
-
-/* queue worker thread */
-static int cyasblkdev_queue_thread(void *d)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct cyasblkdev_queue *bq = d;
- struct request_queue *q = bq->queue;
- u32 qth_pid;
-
- DBGPRN_FUNC_NAME;
-
- /*
- * set iothread to ensure that we aren't put to sleep by
- * the process freezing. we handle suspension ourselves.
- */
- daemonize("cyasblkdev_queue_thread");
-
- /* signal to queue_init() so it could contnue */
- complete(&bq->thread_complete);
-
- down(&bq->thread_sem);
- add_wait_queue(&bq->thread_wq, &wait);
-
- qth_pid = current->pid;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s:%x started, bq:%p, q:%p\n", __func__, qth_pid, bq, q);
- #endif
-
- do {
- struct request *req = NULL;
-
- /* the thread wants to be woken up by signals as well */
- set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_irq(q->queue_lock);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: for bq->queue is null\n", __func__);
- #endif
-
- if (!bq->req) {
- /* chk if queue is plugged */
- if (!blk_queue_plugged(q)) {
- bq->req = req = blk_fetch_request(q);
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: blk_fetch_request:%x\n",
- __func__, (uint32_t)req);
- #endif
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: queue plugged, "
- "skip blk_fetch()\n", __func__);
- #endif
- }
- }
- spin_unlock_irq(q->queue_lock);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: checking if request queue is null\n", __func__);
- #endif
-
- if (!req) {
- if (bq->flags & CYASBLKDEV_QUEUE_EXIT) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s:got QUEUE_EXIT flag\n", __func__);
- #endif
-
- break;
- }
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: request queue is null, goto sleep, "
- "thread_sem->count=%d\n",
- __func__, bq->thread_sem.count);
- if (spin_is_locked(q->queue_lock)) {
- cy_as_hal_print_message("%s: queue_lock "
- "is locked, need to release\n", __func__);
- spin_unlock(q->queue_lock);
-
- if (spin_is_locked(q->queue_lock))
- cy_as_hal_print_message(
- "%s: unlock did not work\n",
- __func__);
- } else {
- cy_as_hal_print_message(
- "%s: checked lock, is not locked\n",
- __func__);
- }
- #endif
-
- up(&bq->thread_sem);
-
- /* yields to the next rdytorun proc,
- * then goes back to sleep*/
- schedule();
- down(&bq->thread_sem);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: wake_up,continue\n",
- __func__);
- #endif
- continue;
- }
-
- /* new req received, issue it to the driver */
- set_current_state(TASK_RUNNING);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: issued a RQ:%x\n",
- __func__, (uint32_t)req);
- #endif
-
- bq->issue_fn(bq, req);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: bq->issue_fn() returned\n",
- __func__);
- #endif
-
-
- } while (1);
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&bq->thread_wq, &wait);
- up(&bq->thread_sem);
-
- complete_and_exit(&bq->thread_complete, 0);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: is finished\n", __func__);
- #endif
-
- return 0;
-}
-
-/*
- * Generic request handler. it is called for any queue on a
- * particular host. When the host is not busy, we look for a request
- * on any queue on this host, and attempt to issue it. This may
- * not be the queue we were asked to process.
- */
-static void cyasblkdev_request(struct request_queue *q)
-{
- struct cyasblkdev_queue *bq = q->queuedata;
- DBGPRN_FUNC_NAME;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s new request on cyasblkdev_queue_t bq:=%x\n",
- __func__, (uint32_t)bq);
- #endif
-
- if (!bq->req) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s wake_up(&bq->thread_wq)\n",
- __func__);
- #endif
-
- /* wake up cyasblkdev_queue worker thread*/
- wake_up(&bq->thread_wq);
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: don't wake Q_thr, bq->req:%x\n",
- __func__, (uint32_t)bq->req);
- #endif
- }
-}
-
-/*
- * cyasblkdev_init_queue - initialise a queue structure.
- * @bq: cyasblkdev queue
- * @dev: CyAsDeviceHandle to attach this queue
- * @lock: queue lock
- *
- * Initialise a cyasblkdev_request queue.
- */
-
-/* MAX NUMBER OF SECTORS PER REQUEST **/
-#define Q_MAX_SECTORS 128
-
-/* MAX NUMBER OF PHYS SEGMENTS (entries in the SG list)*/
-#define Q_MAX_SGS 16
-
-int cyasblkdev_init_queue(struct cyasblkdev_queue *bq, spinlock_t *lock)
-{
- int ret;
-
- DBGPRN_FUNC_NAME;
-
- /* 1st param is a function that wakes up the queue thread */
- bq->queue = blk_init_queue(cyasblkdev_request, lock);
- if (!bq->queue)
- return -ENOMEM;
-
- blk_queue_prep_rq(bq->queue, cyasblkdev_prep_request);
-
- blk_queue_bounce_limit(bq->queue, BLK_BOUNCE_ANY);
- blk_queue_max_hw_sectors(bq->queue, Q_MAX_SECTORS);
-
- /* As of now, we have the HAL/driver support to
- * merge scattered segments and handle them simultaneously.
- * so, setting the max_phys_segments to 8. */
- /*blk_queue_max_phys_segments(bq->queue, Q_MAX_SGS);
- blk_queue_max_hw_segments(bq->queue, Q_MAX_SGS);*/
- blk_queue_max_segments(bq->queue, Q_MAX_SGS);
-
- /* should be < then HAL can handle */
- blk_queue_max_segment_size(bq->queue, 512*Q_MAX_SECTORS);
-
- bq->queue->queuedata = bq;
- bq->req = NULL;
-
- init_completion(&bq->thread_complete);
- init_waitqueue_head(&bq->thread_wq);
- sema_init(&bq->thread_sem, 1);
-
- ret = kernel_thread(cyasblkdev_queue_thread, bq, CLONE_KERNEL);
- if (ret >= 0) {
- /* wait until the thread is spawned */
- wait_for_completion(&bq->thread_complete);
-
- /* reinitialize the completion */
- init_completion(&bq->thread_complete);
- ret = 0;
- goto out;
- }
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(cyasblkdev_init_queue);
-
-/*called from blk_put() */
-void cyasblkdev_cleanup_queue(struct cyasblkdev_queue *bq)
-{
- DBGPRN_FUNC_NAME;
-
- bq->flags |= CYASBLKDEV_QUEUE_EXIT;
- wake_up(&bq->thread_wq);
- wait_for_completion(&bq->thread_complete);
-
- blk_cleanup_queue(bq->queue);
-}
-EXPORT_SYMBOL(cyasblkdev_cleanup_queue);
-
-
-/**
- * cyasblkdev_queue_suspend - suspend a CyAsBlkDev request queue
- * @bq: CyAsBlkDev queue to suspend
- *
- * Stop the block request queue, and wait for our thread to
- * complete any outstanding requests. This ensures that we
- * won't suspend while a request is being processed.
- */
-void cyasblkdev_queue_suspend(struct cyasblkdev_queue *bq)
-{
- struct request_queue *q = bq->queue;
- unsigned long flags;
-
- DBGPRN_FUNC_NAME;
-
- if (!(bq->flags & CYASBLKDEV_QUEUE_SUSPENDED)) {
- bq->flags |= CYASBLKDEV_QUEUE_SUSPENDED;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- down(&bq->thread_sem);
- }
-}
-EXPORT_SYMBOL(cyasblkdev_queue_suspend);
-
-/*cyasblkdev_queue_resume - resume a previously suspended
- * CyAsBlkDev request queue @bq: CyAsBlkDev queue to resume */
-void cyasblkdev_queue_resume(struct cyasblkdev_queue *bq)
-{
- struct request_queue *q = bq->queue;
- unsigned long flags;
-
- DBGPRN_FUNC_NAME;
-
- if (bq->flags & CYASBLKDEV_QUEUE_SUSPENDED) {
- bq->flags &= ~CYASBLKDEV_QUEUE_SUSPENDED;
-
- up(&bq->thread_sem);
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
-}
-EXPORT_SYMBOL(cyasblkdev_queue_resume);
-
-/*[]*/
diff --git a/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.h b/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.h
deleted file mode 100644
index 51cba6ae671..00000000000
--- a/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* cyanblkdev_queue.h - Antioch Linux Block Driver queue header file
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYANBLKDEV_QUEUE_H_
-#define _INCLUDED_CYANBLKDEV_QUEUE_H_
-
-/*
- * may contain various useful MACRO and debug printks
- */
-
-/* moved to staging location, eventual implementation
- * considered is here
- * #include <linux/westbridge/cyashal.h>
- * #include <linux/westbridge/cyastoria.h>
- * */
-
-#include "../include/linux/westbridge/cyashal.h"
-#include "../include/linux/westbridge/cyastoria.h"
-
-struct request;
-struct task_struct;
-
-struct cyasblkdev_queue {
- struct completion thread_complete;
- wait_queue_head_t thread_wq;
- struct semaphore thread_sem;
- unsigned int flags;
- struct request *req;
- int (*prep_fn)(struct cyasblkdev_queue *, struct request *);
- int (*issue_fn)(struct cyasblkdev_queue *, struct request *);
- void *data;
- struct request_queue *queue;
-};
-
-extern int cyasblkdev_init_queue(struct cyasblkdev_queue *, spinlock_t *);
-extern void cyasblkdev_cleanup_queue(struct cyasblkdev_queue *);
-extern void cyasblkdev_queue_suspend(struct cyasblkdev_queue *);
-extern void cyasblkdev_queue_resume(struct cyasblkdev_queue *);
-
-extern cy_as_device_handle cyasdevice_getdevhandle(void);
-#define MOD_LOGS 1
-void verbose_rq_flags(int flags);
-
-#endif /* _INCLUDED_CYANBLKDEV_QUEUE_H_ */
-
-/*[]*/
diff --git a/drivers/staging/westbridge/astoria/device/Kconfig b/drivers/staging/westbridge/astoria/device/Kconfig
deleted file mode 100644
index cc99658cf3a..00000000000
--- a/drivers/staging/westbridge/astoria/device/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# West Bridge block driver configuration
-#
-
-config WESTBRIDGE_DEVICE_DRIVER
- tristate "West Bridge Device Driver"
- help
- Include the West Bridge based device driver
-
diff --git a/drivers/staging/westbridge/astoria/device/Makefile b/drivers/staging/westbridge/astoria/device/Makefile
deleted file mode 100644
index 7af8b5b0a8f..00000000000
--- a/drivers/staging/westbridge/astoria/device/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Makefile for the kernel westbridge device driver
-#
-
-ifneq ($(CONFIG_WESTBRIDGE_DEBUG),y)
- EXTRA_CFLAGS += -DWESTBRIDGE_NDEBUG
-endif
-
-obj-$(CONFIG_WESTBRIDGE_DEVICE_DRIVER) += cyasdev.o
-
-
-ifeq ($(CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL),y)
-#moved for staging compatbility
-#cyasdev-y := ../../../arch/arm/mach-omap2/cyashalomap_kernel.o cyasdevice.o
-cyasdev-y := ../arch/arm/mach-omap2/cyashalomap_kernel.o cyasdevice.o \
- ../api/src/cyasdma.o ../api/src/cyasintr.o ../api/src/cyaslep2pep.o \
- ../api/src/cyaslowlevel.o ../api/src/cyasmisc.o ../api/src/cyasmtp.o \
- ../api/src/cyasstorage.o ../api/src/cyasusb.o
-
-else
-# should not get here, need to be built with some hal
-cyasdev-y := cyasdevice.o
-endif
diff --git a/drivers/staging/westbridge/astoria/device/cyasdevice.c b/drivers/staging/westbridge/astoria/device/cyasdevice.c
deleted file mode 100644
index 7de35ccffd3..00000000000
--- a/drivers/staging/westbridge/astoria/device/cyasdevice.c
+++ /dev/null
@@ -1,409 +0,0 @@
-/*
-## cyandevice.c - Linux Antioch device driver file
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/sched.h>
-#include <linux/scatterlist.h>
-#include <linux/err.h>
-#include <linux/firmware.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-/* moved for staging location
- * update/patch submission
-#include <linux/westbridge/cyastoria.h>
-#include <linux/westbridge/cyashal.h>
-#include <linux/westbridge/cyasregs.h>
-*/
-
-#include "../include/linux/westbridge/cyastoria.h"
-#include "../include/linux/westbridge/cyashal.h"
-#include "../include/linux/westbridge/cyasregs.h"
-
-typedef struct cyasdevice {
- /* Handle to the Antioch device */
- cy_as_device_handle dev_handle;
- /* Handle to the HAL */
- cy_as_hal_device_tag hal_tag;
- spinlock_t common_lock;
- unsigned long flags;
-} cyasdevice;
-
-/* global ptr to astoria device */
-static cyasdevice *cy_as_device_controller;
-int cy_as_device_init_done;
-const char *dev_handle_name = "cy_astoria_dev_handle";
-
-#ifdef CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
-extern void cy_as_hal_config_c_s_mux(void);
-#endif
-
-static void cyasdevice_deinit(cyasdevice *cy_as_dev)
-{
- cy_as_hal_print_message("<1>_cy_as_device deinitialize called\n");
- if (!cy_as_dev) {
- cy_as_hal_print_message("<1>_cy_as_device_deinit: "
- "device handle %x is invalid\n", (uint32_t)cy_as_dev);
- return;
- }
-
- /* stop west_brige */
- if (cy_as_dev->dev_handle) {
- cy_as_hal_print_message("<1>_cy_as_device: "
- "cy_as_misc_destroy_device called\n");
- if (cy_as_misc_destroy_device(cy_as_dev->dev_handle) !=
- CY_AS_ERROR_SUCCESS) {
- cy_as_hal_print_message(
- "<1>_cy_as_device: destroying failed\n");
- }
- }
-
- if (cy_as_dev->hal_tag) {
-
- #ifdef CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
- if (stop_o_m_a_p_kernel(dev_handle_name,
- cy_as_dev->hal_tag) != 0)
- cy_as_hal_print_message("<1>_cy_as_device: stopping "
- "OMAP kernel HAL failed\n");
-
- #endif
- }
- cy_as_hal_print_message("<1>_cy_as_device:HAL layer stopped\n");
-
- kfree(cy_as_dev);
- cy_as_device_controller = NULL;
- cy_as_hal_print_message("<1>_cy_as_device: deinitialized\n");
-}
-
-/*called from src/cyasmisc.c:MyMiscCallback() as a func
- * pointer [dev_p->misc_event_cb] which was previously
- * registered by CyAsLLRegisterRequestCallback(...,
- * MyMiscCallback); called from CyAsMiscConfigureDevice()
- * which is in turn called from cyasdevice_initialize() in
- * this src
- */
-static void cy_misc_callback(cy_as_device_handle h,
- cy_as_misc_event_type evtype, void *evdata)
-{
- (void)h;
- (void)evdata;
-
- switch (evtype) {
- case cy_as_event_misc_initialized:
- cy_as_hal_print_message("<1>_cy_as_device: "
- "initialization done callback triggered\n");
- cy_as_device_init_done = 1;
- break;
-
- case cy_as_event_misc_awake:
- cy_as_hal_print_message("<1>_cy_as_device: "
- "cy_as_event_misc_awake event callback triggered\n");
- cy_as_device_init_done = 1;
- break;
- default:
- break;
- }
-}
-
-void cy_as_acquire_common_lock()
-{
- spin_lock_irqsave(&cy_as_device_controller->common_lock,
- cy_as_device_controller->flags);
-}
-EXPORT_SYMBOL(cy_as_acquire_common_lock);
-
-void cy_as_release_common_lock()
-{
- spin_unlock_irqrestore(&cy_as_device_controller->common_lock,
- cy_as_device_controller->flags);
-}
-EXPORT_SYMBOL(cy_as_release_common_lock);
-
-/* reset astoria and reinit all regs */
- #define PNAND_REG_CFG_INIT_VAL 0x0000
-void hal_reset(cy_as_hal_device_tag tag)
-{
- cy_as_hal_print_message("<1> send soft hard rst: "
- "MEM_RST_CTRL_REG_HARD...\n");
- cy_as_hal_write_register(tag, CY_AS_MEM_RST_CTRL_REG,
- CY_AS_MEM_RST_CTRL_REG_HARD);
- mdelay(60);
-
- cy_as_hal_print_message("<1> after RST: si_rev_REG:%x, "
- "PNANDCFG_reg:%x\n",
- cy_as_hal_read_register(tag, CY_AS_MEM_CM_WB_CFG_ID),
- cy_as_hal_read_register(tag, CY_AS_MEM_PNAND_CFG)
- );
-
- /* set it to LBD */
- cy_as_hal_write_register(tag, CY_AS_MEM_PNAND_CFG,
- PNAND_REG_CFG_INIT_VAL);
-}
-EXPORT_SYMBOL(hal_reset);
-
-
-/* below structures and functions primarily
- * implemented for firmware loading */
-static struct platform_device *westbridge_pd;
-
-static int __devinit wb_probe(struct platform_device *devptr)
-{
- cy_as_hal_print_message("%s called\n", __func__);
- return 0;
-}
-
-static int __devexit wb_remove(struct platform_device *devptr)
-{
- cy_as_hal_print_message("%s called\n", __func__);
- return 0;
-}
-
-static struct platform_driver west_bridge_driver = {
- .probe = wb_probe,
- .remove = __devexit_p(wb_remove),
- .driver = {
- .name = "west_bridge_dev"},
-};
-
-/* west bridge device driver main init */
-static int cyasdevice_initialize(void)
-{
- cyasdevice *cy_as_dev = 0;
- int ret = 0;
- int retval = 0;
- cy_as_device_config config;
- cy_as_hal_sleep_channel channel;
- cy_as_get_firmware_version_data ver_data = {0};
- const char *str = "";
- int spin_lim;
- const struct firmware *fw_entry;
-
- cy_as_device_init_done = 0;
-
- cy_as_misc_set_log_level(8);
-
- cy_as_hal_print_message("<1>_cy_as_device initialize called\n");
-
- if (cy_as_device_controller != 0) {
- cy_as_hal_print_message("<1>_cy_as_device: the device "
- "has already been initilaized. ignoring\n");
- return -EBUSY;
- }
-
- /* cy_as_dev = CyAsHalAlloc (sizeof(cyasdevice), SLAB_KERNEL); */
- cy_as_dev = cy_as_hal_alloc(sizeof(cyasdevice));
- if (cy_as_dev == NULL) {
- cy_as_hal_print_message("<1>_cy_as_device: "
- "memory allocation failed\n");
- return -ENOMEM;
- }
- memset(cy_as_dev, 0, sizeof(cyasdevice));
-
-
- /* Init the HAL & CyAsDeviceHandle */
-
- #ifdef CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
- /* start OMAP HAL init instsnce */
-
- if (!start_o_m_a_p_kernel(dev_handle_name,
- &(cy_as_dev->hal_tag), cy_false)) {
-
- cy_as_hal_print_message(
- "<1>_cy_as_device: start OMAP34xx HAL failed\n");
- goto done;
- }
- #endif
-
- /* Now create the device */
- if (cy_as_misc_create_device(&(cy_as_dev->dev_handle),
- cy_as_dev->hal_tag) != CY_AS_ERROR_SUCCESS) {
-
- cy_as_hal_print_message(
- "<1>_cy_as_device: create device failed\n");
- goto done;
- }
-
- memset(&config, 0, sizeof(config));
- config.dmaintr = cy_true;
-
- ret = cy_as_misc_configure_device(cy_as_dev->dev_handle, &config);
- if (ret != CY_AS_ERROR_SUCCESS) {
-
- cy_as_hal_print_message(
- "<1>_cy_as_device: configure device "
- "failed. reason code: %d\n", ret);
- goto done;
- }
-
- ret = cy_as_misc_register_callback(cy_as_dev->dev_handle,
- cy_misc_callback);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_hal_print_message("<1>_cy_as_device: "
- "cy_as_misc_register_callback failed. "
- "reason code: %d\n", ret);
- goto done;
- }
-
- ret = platform_driver_register(&west_bridge_driver);
- if (unlikely(ret < 0))
- return ret;
- westbridge_pd = platform_device_register_simple(
- "west_bridge_dev", -1, NULL, 0);
-
- if (IS_ERR(westbridge_pd)) {
- platform_driver_unregister(&west_bridge_driver);
- return PTR_ERR(westbridge_pd);
- }
- /* Load the firmware */
- ret = request_firmware(&fw_entry,
- "west bridge fw", &westbridge_pd->dev);
- if (ret) {
- cy_as_hal_print_message("cy_as_device: "
- "request_firmware failed return val = %d\n", ret);
- } else {
- cy_as_hal_print_message("cy_as_device: "
- "got the firmware %d size=0x%x\n", ret, fw_entry->size);
-
- ret = cy_as_misc_download_firmware(
- cy_as_dev->dev_handle,
- fw_entry->data,
- fw_entry->size ,
- 0, 0);
- }
-
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_hal_print_message("<1>_cy_as_device: cannot download "
- "firmware. reason code: %d\n", ret);
- goto done;
- }
-
- /* spin until the device init is completed */
- /* 50 -MAX wait time for the FW load & init
- * to complete is 5sec*/
- spin_lim = 50;
-
- cy_as_hal_create_sleep_channel(&channel);
- while (!cy_as_device_init_done) {
-
- cy_as_hal_sleep_on(&channel, 100);
-
- if (spin_lim-- <= 0) {
- cy_as_hal_print_message(
- "<1>\n_e_r_r_o_r!: "
- "wait for FW init has timed out !!!");
- break;
- }
- }
- cy_as_hal_destroy_sleep_channel(&channel);
-
- if (spin_lim > 0)
- cy_as_hal_print_message(
- "cy_as_device: astoria firmware is loaded\n");
-
- ret = cy_as_misc_get_firmware_version(cy_as_dev->dev_handle,
- &ver_data, 0, 0);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_hal_print_message("<1>_cy_as_device: cannot get firmware "
- "version. reason code: %d\n", ret);
- goto done;
- }
-
- if ((ver_data.media_type & 0x01) && (ver_data.media_type & 0x06))
- str = "nand and SD/MMC.";
- else if ((ver_data.media_type & 0x01) && (ver_data.media_type & 0x08))
- str = "nand and CEATA.";
- else if (ver_data.media_type & 0x01)
- str = "nand.";
- else if (ver_data.media_type & 0x08)
- str = "CEATA.";
- else
- str = "SD/MMC.";
-
- cy_as_hal_print_message("<1> cy_as_device:_firmware version: %s "
- "major=%d minor=%d build=%d,\n_media types supported:%s\n",
- ((ver_data.is_debug_mode) ? "debug" : "release"),
- ver_data.major, ver_data.minor, ver_data.build, str);
-
- spin_lock_init(&cy_as_dev->common_lock);
-
- /* done now */
- cy_as_device_controller = cy_as_dev;
-
- return 0;
-
-done:
- if (cy_as_dev)
- cyasdevice_deinit(cy_as_dev);
-
- return -EINVAL;
-}
-
-cy_as_device_handle cyasdevice_getdevhandle(void)
-{
- if (cy_as_device_controller) {
- #ifdef CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
- cy_as_hal_config_c_s_mux();
- #endif
-
- return cy_as_device_controller->dev_handle;
- }
- return NULL;
-}
-EXPORT_SYMBOL(cyasdevice_getdevhandle);
-
-cy_as_hal_device_tag cyasdevice_gethaltag(void)
-{
- if (cy_as_device_controller)
- return (cy_as_hal_device_tag)
- cy_as_device_controller->hal_tag;
-
- return NULL;
-}
-EXPORT_SYMBOL(cyasdevice_gethaltag);
-
-
-/*init Westbridge device driver **/
-static int __init cyasdevice_init(void)
-{
- if (cyasdevice_initialize() != 0)
- return -ENODEV;
-
- return 0;
-}
-
-
-static void __exit cyasdevice_cleanup(void)
-{
-
- cyasdevice_deinit(cy_as_device_controller);
-}
-
-
-MODULE_DESCRIPTION("west bridge device driver");
-MODULE_AUTHOR("cypress semiconductor");
-MODULE_LICENSE("GPL");
-
-module_init(cyasdevice_init);
-module_exit(cyasdevice_cleanup);
-
-/*[]*/
diff --git a/drivers/staging/westbridge/astoria/gadget/Kconfig b/drivers/staging/westbridge/astoria/gadget/Kconfig
deleted file mode 100644
index 6fbdf2277b0..00000000000
--- a/drivers/staging/westbridge/astoria/gadget/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# West Bridge gadget driver configuration
-#
-
-config WESTBRIDGE_GADGET_DRIVER
- tristate "West Bridge Gadget Driver"
- help
- Include the West Bridge based gadget peripheral controller driver
-
diff --git a/drivers/staging/westbridge/astoria/gadget/Makefile b/drivers/staging/westbridge/astoria/gadget/Makefile
deleted file mode 100644
index a5eef7ee60a..00000000000
--- a/drivers/staging/westbridge/astoria/gadget/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the kernel westbridge hal
-#
-
-ifneq ($(CONFIG_WESTBRIDGE_DEBUG),y)
- EXTRA_CFLAGS += -DWESTBRIDGE_NDEBUG
-endif
-
-obj-$(CONFIG_WESTBRIDGE_GADGET_DRIVER) += cyasgadgetctrl.o
-cyasgadgetctrl-y := cyasgadget.o
-
diff --git a/drivers/staging/westbridge/astoria/gadget/cyasgadget.c b/drivers/staging/westbridge/astoria/gadget/cyasgadget.c
deleted file mode 100644
index be851ca54ce..00000000000
--- a/drivers/staging/westbridge/astoria/gadget/cyasgadget.c
+++ /dev/null
@@ -1,2177 +0,0 @@
-/* cyangadget.c - Linux USB Gadget driver file for the Cypress West Bridge
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-/*
- * Cypress West Bridge high/full speed usb device controller code
- * Based on the Netchip 2280 device controller by David Brownell
- * in the linux 2.6.10 kernel
- *
- * linux/drivers/usb/gadget/net2280.c
- */
-
-/*
- * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
- * Copyright (C) 2003 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330
- * Boston, MA 02111-1307 USA
- */
-
-#include "cyasgadget.h"
-
-#define CY_AS_DRIVER_DESC "cypress west bridge usb gadget"
-#define CY_AS_DRIVER_VERSION "REV B"
-#define DMA_ADDR_INVALID (~(dma_addr_t)0)
-
-static const char cy_as_driver_name[] = "cy_astoria_gadget";
-static const char cy_as_driver_desc[] = CY_AS_DRIVER_DESC;
-
-static const char cy_as_ep0name[] = "EP0";
-static const char *cy_as_ep_names[] = {
- cy_as_ep0name, "EP1",
- "EP2", "EP3", "EP4", "EP5", "EP6", "EP7", "EP8",
- "EP9", "EP10", "EP11", "EP12", "EP13", "EP14", "EP15"
-};
-
-/* forward declarations */
-static void
-cyas_ep_reset(
- struct cyasgadget_ep *an_ep);
-
-static int
-cyasgadget_fifo_status(
- struct usb_ep *_ep);
-
-static void
-cyasgadget_stallcallback(
- cy_as_device_handle h,
- cy_as_return_status_t status,
- uint32_t tag,
- cy_as_funct_c_b_type cbtype,
- void *cbdata);
-
-/* variables */
-static cyasgadget *cy_as_gadget_controller;
-
-static int append_mtp;
-module_param(append_mtp, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(append_mtp,
- "west bridge to append descriptors for mtp 0=no 1=yes");
-
-static int msc_enum_bus_0;
-module_param(msc_enum_bus_0, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(msc_enum_bus_0,
- "west bridge to enumerate bus 0 as msc 0=no 1=yes");
-
-static int msc_enum_bus_1;
-module_param(msc_enum_bus_1, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(msc_enum_bus_1,
- "west bridge to enumerate bus 1 as msc 0=no 1=yes");
-
-/* all Callbacks are placed in this subsection*/
-static void cy_as_gadget_usb_event_callback(
- cy_as_device_handle h,
- cy_as_usb_event ev,
- void *evdata
- )
-{
- cyasgadget *cy_as_dev;
- #ifndef WESTBRIDGE_NDEBUG
- struct usb_ctrlrequest *ctrlreq;
- #endif
-
- /* cy_as_dev = container_of(h, cyasgadget, dev_handle); */
- cy_as_dev = cy_as_gadget_controller;
- switch (ev) {
- case cy_as_event_usb_suspend:
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<1>_cy_as_event_usb_suspend received\n");
- #endif
- cy_as_dev->driver->suspend(&cy_as_dev->gadget);
- break;
-
- case cy_as_event_usb_resume:
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<1>_cy_as_event_usb_resume received\n");
- #endif
- cy_as_dev->driver->resume(&cy_as_dev->gadget);
- break;
-
- case cy_as_event_usb_reset:
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<1>_cy_as_event_usb_reset received\n");
- #endif
- break;
-
- case cy_as_event_usb_speed_change:
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<1>_cy_as_event_usb_speed_change received\n");
- #endif
- break;
-
- case cy_as_event_usb_set_config:
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<1>_cy_as_event_usb_set_config received\n");
- #endif
- break;
-
- case cy_as_event_usb_setup_packet:
- #ifndef WESTBRIDGE_NDEBUG
- ctrlreq = (struct usb_ctrlrequest *)evdata;
-
- cy_as_hal_print_message("<1>_cy_as_event_usb_setup_packet "
- "received"
- "bRequestType=0x%x,"
- "bRequest=0x%x,"
- "wValue=x%x,"
- "wIndex=0x%x,"
- "wLength=0x%x,",
- ctrlreq->bRequestType,
- ctrlreq->bRequest,
- ctrlreq->wValue,
- ctrlreq->wIndex,
- ctrlreq->wLength
- );
- #endif
- cy_as_dev->outsetupreq = 0;
- if ((((uint8_t *)evdata)[0] & USB_DIR_IN) == USB_DIR_OUT)
- cy_as_dev->outsetupreq = 1;
- cy_as_dev->driver->setup(&cy_as_dev->gadget,
- (struct usb_ctrlrequest *)evdata);
- break;
-
- case cy_as_event_usb_status_packet:
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<1>_cy_as_event_usb_status_packet received\n");
- #endif
- break;
-
- case cy_as_event_usb_inquiry_before:
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<1>_cy_as_event_usb_inquiry_before received\n");
- #endif
- break;
-
- case cy_as_event_usb_inquiry_after:
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<1>_cy_as_event_usb_inquiry_after received\n");
- #endif
- break;
-
- case cy_as_event_usb_start_stop:
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<1>_cy_as_event_usb_start_stop received\n");
- #endif
- break;
-
- default:
- break;
- }
-}
-
-static void cy_as_gadget_mtp_event_callback(
- cy_as_device_handle handle,
- cy_as_mtp_event evtype,
- void *evdata
- )
-{
-
- cyasgadget *dev = cy_as_gadget_controller;
- (void) handle;
-
- switch (evtype) {
- case cy_as_mtp_send_object_complete:
- {
- cy_as_mtp_send_object_complete_data *send_obj_data =
- (cy_as_mtp_send_object_complete_data *) evdata;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<6>MTP EVENT: send_object_complete\n");
- cy_as_hal_print_message(
- "<6>_bytes sent = %d\n_send status = %d",
- send_obj_data->byte_count,
- send_obj_data->status);
- #endif
-
- dev->tmtp_send_complete_data.byte_count =
- send_obj_data->byte_count;
- dev->tmtp_send_complete_data.status =
- send_obj_data->status;
- dev->tmtp_send_complete_data.transaction_id =
- send_obj_data->transaction_id;
- dev->tmtp_send_complete = cy_true;
- break;
- }
- case cy_as_mtp_get_object_complete:
- {
- cy_as_mtp_get_object_complete_data *get_obj_data =
- (cy_as_mtp_get_object_complete_data *) evdata;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<6>MTP EVENT: get_object_complete\n");
- cy_as_hal_print_message(
- "<6>_bytes got = %d\n_get status = %d",
- get_obj_data->byte_count, get_obj_data->status);
- #endif
-
- dev->tmtp_get_complete_data.byte_count =
- get_obj_data->byte_count;
- dev->tmtp_get_complete_data.status =
- get_obj_data->status;
- dev->tmtp_get_complete = cy_true;
- break;
- }
- case cy_as_mtp_block_table_needed:
- {
- dev->tmtp_need_new_blk_tbl = cy_true;
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<6>MTP EVENT: cy_as_mtp_block_table_needed\n");
- #endif
- break;
- }
- default:
- break;
- }
-}
-
-static void
-cyasgadget_setupreadcallback(
- cy_as_device_handle h,
- cy_as_end_point_number_t ep,
- uint32_t count,
- void *buf,
- cy_as_return_status_t status)
-{
- cyasgadget_ep *an_ep;
- cyasgadget_req *an_req;
- cyasgadget *cy_as_dev;
- unsigned stopped;
- unsigned long flags;
- (void)buf;
-
- cy_as_dev = cy_as_gadget_controller;
- if (cy_as_dev->driver == NULL)
- return;
-
- an_ep = &cy_as_dev->an_gadget_ep[ep];
- spin_lock_irqsave(&cy_as_dev->lock, flags);
- stopped = an_ep->stopped;
-
-#ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: ep=%d, count=%d, "
- "status=%d\n", __func__, ep, count, status);
-#endif
-
- an_req = list_entry(an_ep->queue.next,
- cyasgadget_req, queue);
- list_del_init(&an_req->queue);
-
- if (status == CY_AS_ERROR_SUCCESS)
- an_req->req.status = 0;
- else
- an_req->req.status = -status;
- an_req->req.actual = count;
- an_ep->stopped = 1;
-
- spin_unlock_irqrestore(&cy_as_dev->lock, flags);
-
- an_req->req.complete(&an_ep->usb_ep_inst, &an_req->req);
-
- an_ep->stopped = stopped;
-
-}
-/*called when the write of a setup packet has been completed*/
-static void cyasgadget_setupwritecallback(
- cy_as_device_handle h,
- cy_as_end_point_number_t ep,
- uint32_t count,
- void *buf,
- cy_as_return_status_t status
- )
-{
- cyasgadget_ep *an_ep;
- cyasgadget_req *an_req;
- cyasgadget *cy_as_dev;
- unsigned stopped;
- unsigned long flags;
-
- (void)buf;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called status=0x%x\n",
- __func__, status);
- #endif
-
- cy_as_dev = cy_as_gadget_controller;
-
- if (cy_as_dev->driver == NULL)
- return;
-
- an_ep = &cy_as_dev->an_gadget_ep[ep];
-
- spin_lock_irqsave(&cy_as_dev->lock, flags);
-
- stopped = an_ep->stopped;
-
-#ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("setup_write_callback: ep=%d, "
- "count=%d, status=%d\n", ep, count, status);
-#endif
-
- an_req = list_entry(an_ep->queue.next, cyasgadget_req, queue);
- list_del_init(&an_req->queue);
-
- an_req->req.actual = count;
- an_req->req.status = 0;
- an_ep->stopped = 1;
-
- spin_unlock_irqrestore(&cy_as_dev->lock, flags);
-
- an_req->req.complete(&an_ep->usb_ep_inst, &an_req->req);
-
- an_ep->stopped = stopped;
-
-}
-
-/* called when a read operation has completed.*/
-static void cyasgadget_readcallback(
- cy_as_device_handle h,
- cy_as_end_point_number_t ep,
- uint32_t count,
- void *buf,
- cy_as_return_status_t status
- )
-{
- cyasgadget_ep *an_ep;
- cyasgadget_req *an_req;
- cyasgadget *cy_as_dev;
- unsigned stopped;
- cy_as_return_status_t ret;
- unsigned long flags;
-
- (void)h;
- (void)buf;
-
- cy_as_dev = cy_as_gadget_controller;
-
- if (cy_as_dev->driver == NULL)
- return;
-
- an_ep = &cy_as_dev->an_gadget_ep[ep];
- stopped = an_ep->stopped;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: ep=%d, count=%d, status=%d\n",
- __func__, ep, count, status);
- #endif
-
- if (status == CY_AS_ERROR_CANCELED)
- return;
-
- spin_lock_irqsave(&cy_as_dev->lock, flags);
-
- an_req = list_entry(an_ep->queue.next, cyasgadget_req, queue);
- list_del_init(&an_req->queue);
-
- if (status == CY_AS_ERROR_SUCCESS)
- an_req->req.status = 0;
- else
- an_req->req.status = -status;
-
- an_req->complete = 1;
- an_req->req.actual = count;
- an_ep->stopped = 1;
-
- spin_unlock_irqrestore(&cy_as_dev->lock, flags);
- an_req->req.complete(&an_ep->usb_ep_inst, &an_req->req);
-
- an_ep->stopped = stopped;
-
- /* We need to call ReadAsync on this end-point
- * again, so as to not miss any data packets. */
- if (!an_ep->stopped) {
- spin_lock_irqsave(&cy_as_dev->lock, flags);
- an_req = 0;
- if (!list_empty(&an_ep->queue))
- an_req = list_entry(an_ep->queue.next,
- cyasgadget_req, queue);
-
- spin_unlock_irqrestore(&cy_as_dev->lock, flags);
-
- if ((an_req) && (an_req->req.status == -EINPROGRESS)) {
- ret = cy_as_usb_read_data_async(cy_as_dev->dev_handle,
- an_ep->num, cy_false, an_req->req.length,
- an_req->req.buf, cyasgadget_readcallback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_read_data_async failed "
- "with error code %d\n", ret);
- else
- an_req->req.status = -EALREADY;
- }
- }
-}
-
-/* function is called when a usb write operation has completed*/
-static void cyasgadget_writecallback(
- cy_as_device_handle h,
- cy_as_end_point_number_t ep,
- uint32_t count,
- void *buf,
- cy_as_return_status_t status
- )
-{
- cyasgadget_ep *an_ep;
- cyasgadget_req *an_req;
- cyasgadget *cy_as_dev;
- unsigned stopped = 0;
- cy_as_return_status_t ret;
- unsigned long flags;
-
- (void)h;
- (void)buf;
-
- cy_as_dev = cy_as_gadget_controller;
- if (cy_as_dev->driver == NULL)
- return;
-
- an_ep = &cy_as_dev->an_gadget_ep[ep];
-
- if (status == CY_AS_ERROR_CANCELED)
- return;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: ep=%d, count=%d, status=%d\n",
- __func__, ep, count, status);
- #endif
-
- spin_lock_irqsave(&cy_as_dev->lock, flags);
-
- an_req = list_entry(an_ep->queue.next, cyasgadget_req, queue);
- list_del_init(&an_req->queue);
- an_req->req.actual = count;
-
- /* Verify the status value before setting req.status to zero */
- if (status == CY_AS_ERROR_SUCCESS)
- an_req->req.status = 0;
- else
- an_req->req.status = -status;
-
- an_ep->stopped = 1;
-
- spin_unlock_irqrestore(&cy_as_dev->lock, flags);
-
- an_req->req.complete(&an_ep->usb_ep_inst, &an_req->req);
- an_ep->stopped = stopped;
-
- /* We need to call WriteAsync on this end-point again, so as to not
- miss any data packets. */
- if (!an_ep->stopped) {
- spin_lock_irqsave(&cy_as_dev->lock, flags);
- an_req = 0;
- if (!list_empty(&an_ep->queue))
- an_req = list_entry(an_ep->queue.next,
- cyasgadget_req, queue);
- spin_unlock_irqrestore(&cy_as_dev->lock, flags);
-
- if ((an_req) && (an_req->req.status == -EINPROGRESS)) {
- ret = cy_as_usb_write_data_async(cy_as_dev->dev_handle,
- an_ep->num, an_req->req.length, an_req->req.buf,
- cy_false, cyasgadget_writecallback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_write_data_async "
- "failed with error code %d\n", ret);
- else
- an_req->req.status = -EALREADY;
- }
- }
-}
-
-static void cyasgadget_stallcallback(
- cy_as_device_handle h,
- cy_as_return_status_t status,
- uint32_t tag,
- cy_as_funct_c_b_type cbtype,
- void *cbdata
- )
-{
- #ifndef WESTBRIDGE_NDEBUG
- if (status != CY_AS_ERROR_SUCCESS)
- cy_as_hal_print_message("<1>_set/_clear stall "
- "failed with status %d\n", status);
- #endif
-}
-
-
-/*******************************************************************/
-/* All usb_ep_ops (cyasgadget_ep_ops) are placed in this subsection*/
-/*******************************************************************/
-static int cyasgadget_enable(
- struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc
- )
-{
- cyasgadget *an_dev;
- cyasgadget_ep *an_ep;
- u32 max, tmp;
- unsigned long flags;
-
- an_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst);
- if (!_ep || !desc || an_ep->desc || _ep->name == cy_as_ep0name
- || desc->bDescriptorType != USB_DT_ENDPOINT)
- return -EINVAL;
-
- an_dev = an_ep->dev;
- if (!an_dev->driver || an_dev->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff;
-
- spin_lock_irqsave(&an_dev->lock, flags);
- _ep->maxpacket = max & 0x7ff;
- an_ep->desc = desc;
-
- /* ep_reset() has already been called */
- an_ep->stopped = 0;
- an_ep->out_overflow = 0;
-
- if (an_ep->cyepconfig.enabled != cy_true) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_end_point_config EP %s mismatch "
- "on enabled\n", an_ep->usb_ep_inst.name);
- #endif
- spin_unlock_irqrestore(&an_dev->lock, flags);
- return -EINVAL;
- }
-
- tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
- an_ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
-
- spin_unlock_irqrestore(&an_dev->lock, flags);
-
- switch (tmp) {
- case USB_ENDPOINT_XFER_ISOC:
- if (an_ep->cyepconfig.type != cy_as_usb_iso) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_end_point_config EP %s mismatch "
- "on type %d %d\n", an_ep->usb_ep_inst.name,
- an_ep->cyepconfig.type, cy_as_usb_iso);
- #endif
- return -EINVAL;
- }
- break;
- case USB_ENDPOINT_XFER_INT:
- if (an_ep->cyepconfig.type != cy_as_usb_int) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_end_point_config EP %s mismatch "
- "on type %d %d\n", an_ep->usb_ep_inst.name,
- an_ep->cyepconfig.type, cy_as_usb_int);
- #endif
- return -EINVAL;
- }
- break;
- default:
- if (an_ep->cyepconfig.type != cy_as_usb_bulk) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_end_point_config EP %s mismatch "
- "on type %d %d\n", an_ep->usb_ep_inst.name,
- an_ep->cyepconfig.type, cy_as_usb_bulk);
- #endif
- return -EINVAL;
- }
- break;
- }
-
- tmp = desc->bEndpointAddress;
- an_ep->is_in = (tmp & USB_DIR_IN) != 0;
-
- if ((an_ep->cyepconfig.dir == cy_as_usb_in) &&
- (!an_ep->is_in)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_end_point_config EP %s mismatch "
- "on dir %d %d\n", an_ep->usb_ep_inst.name,
- an_ep->cyepconfig.dir, cy_as_usb_in);
- #endif
- return -EINVAL;
- } else if ((an_ep->cyepconfig.dir == cy_as_usb_out) &&
- (an_ep->is_in)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_end_point_config EP %s mismatch "
- "on dir %d %d\n", an_ep->usb_ep_inst.name,
- an_ep->cyepconfig.dir, cy_as_usb_out);
- #endif
- return -EINVAL;
- }
-
- cy_as_usb_clear_stall(an_dev->dev_handle, an_ep->num,
- cyasgadget_stallcallback, 0);
-
- cy_as_hal_print_message("%s enabled %s (ep%d-%d) max %04x\n",
- __func__, _ep->name, an_ep->num, tmp, max);
-
- return 0;
-}
-
-static int cyasgadget_disable(
- struct usb_ep *_ep
- )
-{
- cyasgadget_ep *an_ep;
- unsigned long flags;
-
- an_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst);
- if (!_ep || !an_ep->desc || _ep->name == cy_as_ep0name)
- return -EINVAL;
-
- spin_lock_irqsave(&an_ep->dev->lock, flags);
- cyas_ep_reset(an_ep);
-
- spin_unlock_irqrestore(&an_ep->dev->lock, flags);
- return 0;
-}
-
-static struct usb_request *cyasgadget_alloc_request(
- struct usb_ep *_ep, gfp_t gfp_flags
- )
-{
- cyasgadget_ep *an_ep;
- cyasgadget_req *an_req;
-
- if (!_ep)
- return NULL;
-
- an_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst);
-
- an_req = kzalloc(sizeof(cyasgadget_req), gfp_flags);
- if (!an_req)
- return NULL;
-
- an_req->req.dma = DMA_ADDR_INVALID;
- INIT_LIST_HEAD(&an_req->queue);
-
- return &an_req->req;
-}
-
-static void cyasgadget_free_request(
- struct usb_ep *_ep,
- struct usb_request *_req
- )
-{
- cyasgadget_req *an_req;
-
- if (!_ep || !_req)
- return;
-
- an_req = container_of(_req, cyasgadget_req, req);
-
- kfree(an_req);
-}
-
-/* Load a packet into the fifo we use for usb IN transfers.
- * works for all endpoints. */
-static int cyasgadget_queue(
- struct usb_ep *_ep,
- struct usb_request *_req,
- gfp_t gfp_flags
- )
-{
- cyasgadget_req *as_req;
- cyasgadget_ep *as_ep;
- cyasgadget *cy_as_dev;
- unsigned long flags;
- cy_as_return_status_t ret = 0;
-
- as_req = container_of(_req, cyasgadget_req, req);
- if (!_req || !_req->complete || !_req->buf
- || !list_empty(&as_req->queue))
- return -EINVAL;
-
- as_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst);
-
- if (!_ep || (!as_ep->desc && (as_ep->num != 0)))
- return -EINVAL;
-
- cy_as_dev = as_ep->dev;
- if (!cy_as_dev->driver ||
- cy_as_dev->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&cy_as_dev->lock, flags);
-
- _req->status = -EINPROGRESS;
- _req->actual = 0;
-
- spin_unlock_irqrestore(&cy_as_dev->lock, flags);
-
- /* Call Async functions */
- if (as_ep->is_in) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_write_data_async being called "
- "on ep %d\n", as_ep->num);
- #endif
-
- ret = cy_as_usb_write_data_async(cy_as_dev->dev_handle,
- as_ep->num, _req->length, _req->buf,
- cy_false, cyasgadget_writecallback);
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_write_data_async failed with "
- "error code %d\n", ret);
- else
- _req->status = -EALREADY;
- } else if (as_ep->num == 0) {
- /*
- ret = cy_as_usb_write_data_async(cy_as_dev->dev_handle,
- as_ep->num, _req->length, _req->buf, cy_false,
- cyasgadget_setupwritecallback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_write_data_async failed with error "
- "code %d\n", ret);
- */
- if ((cy_as_dev->outsetupreq) && (_req->length)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_read_data_async "
- "being called on ep %d\n",
- as_ep->num);
- #endif
-
- ret = cy_as_usb_read_data_async (
- cy_as_dev->dev_handle, as_ep->num,
- cy_true, _req->length, _req->buf,
- cyasgadget_setupreadcallback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_read_data_async failed with "
- "error code %d\n", ret);
-
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_write_data_async "
- "being called on ep %d\n",
- as_ep->num);
- #endif
-
- ret = cy_as_usb_write_data_async(cy_as_dev->dev_handle,
- as_ep->num, _req->length, _req->buf, cy_false,
- cyasgadget_setupwritecallback);
-
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_write_data_async failed with "
- "error code %d\n", ret);
- }
-
- } else if (list_empty(&as_ep->queue)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_read_data_async being called since "
- "ep queue empty%d\n", ret);
- #endif
-
- ret = cy_as_usb_read_data_async(cy_as_dev->dev_handle,
- as_ep->num, cy_false, _req->length, _req->buf,
- cyasgadget_readcallback);
- if (ret != CY_AS_ERROR_SUCCESS)
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_read_data_async failed with error "
- "code %d\n", ret);
- else
- _req->status = -EALREADY;
- }
-
- spin_lock_irqsave(&cy_as_dev->lock, flags);
-
- if (as_req)
- list_add_tail(&as_req->queue, &as_ep->queue);
-
- spin_unlock_irqrestore(&cy_as_dev->lock, flags);
-
- return 0;
-}
-
-/* dequeue request */
-static int cyasgadget_dequeue(
- struct usb_ep *_ep,
- struct usb_request *_req
- )
-{
- cyasgadget_ep *an_ep;
- cyasgadget *dev;
- an_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst);
- dev = an_ep->dev;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
-
- cy_as_usb_cancel_async(dev->dev_handle, an_ep->num);
-
- return 0;
-}
-
-static int cyasgadget_set_halt(
- struct usb_ep *_ep,
- int value
- )
-{
- cyasgadget_ep *an_ep;
- int retval = 0;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
-
- an_ep = container_of(_ep, cyasgadget_ep, usb_ep_inst);
- if (!_ep || (!an_ep->desc && an_ep->num != 0))
- return -EINVAL;
-
- if (!an_ep->dev->driver || an_ep->dev->gadget.speed ==
- USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- if (an_ep->desc /* not ep0 */ &&
- (an_ep->desc->bmAttributes & 0x03) == USB_ENDPOINT_XFER_ISOC)
- return -EINVAL;
-
- if (!list_empty(&an_ep->queue))
- retval = -EAGAIN;
- else if (an_ep->is_in && value &&
- cyasgadget_fifo_status(_ep) != 0)
- retval = -EAGAIN;
- else {
- if (value) {
- cy_as_usb_set_stall(an_ep->dev->dev_handle,
- an_ep->num, cyasgadget_stallcallback, 0);
- } else {
- cy_as_usb_clear_stall(an_ep->dev->dev_handle,
- an_ep->num, cyasgadget_stallcallback, 0);
- }
- }
-
- return retval;
-}
-
-static int cyasgadget_fifo_status(
- struct usb_ep *_ep
- )
-{
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
-
- return 0;
-}
-
-static void cyasgadget_fifo_flush(
- struct usb_ep *_ep
- )
-{
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
-}
-
-static struct usb_ep_ops cyasgadget_ep_ops = {
- .enable = cyasgadget_enable,
- .disable = cyasgadget_disable,
- .alloc_request = cyasgadget_alloc_request,
- .free_request = cyasgadget_free_request,
- .queue = cyasgadget_queue,
- .dequeue = cyasgadget_dequeue,
- .set_halt = cyasgadget_set_halt,
- .fifo_status = cyasgadget_fifo_status,
- .fifo_flush = cyasgadget_fifo_flush,
-};
-
-/*************************************************************/
-/*This subsection contains all usb_gadget_ops cyasgadget_ops */
-/*************************************************************/
-static int cyasgadget_get_frame(
- struct usb_gadget *_gadget
- )
-{
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
- return 0;
-}
-
-static int cyasgadget_wakeup(
- struct usb_gadget *_gadget
- )
-{
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
- return 0;
-}
-
-static int cyasgadget_set_selfpowered(
- struct usb_gadget *_gadget,
- int value
- )
-{
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
- return 0;
-}
-
-static int cyasgadget_pullup(
- struct usb_gadget *_gadget,
- int is_on
- )
-{
- struct cyasgadget *cy_as_dev;
- unsigned long flags;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
-
- if (!_gadget)
- return -ENODEV;
-
- cy_as_dev = container_of(_gadget, cyasgadget, gadget);
-
- spin_lock_irqsave(&cy_as_dev->lock, flags);
- cy_as_dev->softconnect = (is_on != 0);
- if (is_on)
- cy_as_usb_connect(cy_as_dev->dev_handle, 0, 0);
- else
- cy_as_usb_disconnect(cy_as_dev->dev_handle, 0, 0);
-
- spin_unlock_irqrestore(&cy_as_dev->lock, flags);
-
- return 0;
-}
-
-static int cyasgadget_ioctl(
- struct usb_gadget *_gadget,
- unsigned code,
- unsigned long param
- )
-{
- int err = 0;
- int retval = 0;
- int ret_stat = 0;
- cyasgadget *dev = cy_as_gadget_controller;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called, code=%d, param=%ld\n",
- __func__, code, param);
- #endif
- /*
- * extract the type and number bitfields, and don't decode
- * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
- */
- if (_IOC_TYPE(code) != CYASGADGET_IOC_MAGIC) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s, bad magic number = 0x%x\n",
- __func__, _IOC_TYPE(code));
- #endif
- return -ENOTTY;
- }
-
- if (_IOC_NR(code) > CYASGADGET_IOC_MAXNR) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s, bad ioctl code = 0x%x\n",
- __func__, _IOC_NR(code));
- #endif
- return -ENOTTY;
- }
-
- /*
- * the direction is a bitmask, and VERIFY_WRITE catches R/W
- * transfers. `Type' is user-oriented, while
- * access_ok is kernel-oriented, so the concept of "read" and
- * "write" is reversed
- */
- if (_IOC_DIR(code) & _IOC_READ)
- err = !access_ok(VERIFY_WRITE,
- (void __user *)param, _IOC_SIZE(code));
- else if (_IOC_DIR(code) & _IOC_WRITE)
- err = !access_ok(VERIFY_READ,
- (void __user *)param, _IOC_SIZE(code));
-
- if (err) {
- cy_as_hal_print_message("%s, bad ioctl dir = 0x%x\n",
- __func__, _IOC_DIR(code));
- return -EFAULT;
- }
-
- switch (code) {
- case CYASGADGET_GETMTPSTATUS:
- {
- cy_as_gadget_ioctl_tmtp_status *usr_d =
- (cy_as_gadget_ioctl_tmtp_status *)param;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: got CYASGADGET_GETMTPSTATUS\n",
- __func__);
- #endif
-
- retval = __put_user(dev->tmtp_send_complete,
- (uint32_t __user *)(&(usr_d->tmtp_send_complete)));
- retval = __put_user(dev->tmtp_get_complete,
- (uint32_t __user *)(&(usr_d->tmtp_get_complete)));
- retval = __put_user(dev->tmtp_need_new_blk_tbl,
- (uint32_t __user *)(&(usr_d->tmtp_need_new_blk_tbl)));
-
- if (copy_to_user((&(usr_d->tmtp_send_complete_data)),
- (&(dev->tmtp_send_complete_data)),
- sizeof(cy_as_gadget_ioctl_send_object)))
- return -EFAULT;
-
- if (copy_to_user((&(usr_d->tmtp_get_complete_data)),
- (&(dev->tmtp_get_complete_data)),
- sizeof(cy_as_gadget_ioctl_get_object)))
- return -EFAULT;
- break;
- }
- case CYASGADGET_CLEARTMTPSTATUS:
- {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s got CYASGADGET_CLEARTMTPSTATUS\n",
- __func__);
- #endif
-
- dev->tmtp_send_complete = 0;
- dev->tmtp_get_complete = 0;
- dev->tmtp_need_new_blk_tbl = 0;
-
- break;
- }
- case CYASGADGET_INITSOJ:
- {
- cy_as_gadget_ioctl_i_s_o_j_d k_d;
- cy_as_gadget_ioctl_i_s_o_j_d *usr_d =
- (cy_as_gadget_ioctl_i_s_o_j_d *)param;
- cy_as_mtp_block_table blk_table;
- struct scatterlist sg;
- char *alloc_filename;
- struct file *file_to_allocate;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s got CYASGADGET_INITSOJ\n",
- __func__);
- #endif
-
- memset(&blk_table, 0, sizeof(blk_table));
-
- /* Get user argument structure */
- if (copy_from_user(&k_d, usr_d,
- sizeof(cy_as_gadget_ioctl_i_s_o_j_d)))
- return -EFAULT;
-
- /* better use fixed size buff*/
- alloc_filename = kmalloc(k_d.name_length + 1, GFP_KERNEL);
- if (alloc_filename == NULL)
- return -ENOMEM;
-
- /* get the filename */
- if (copy_from_user(alloc_filename, k_d.file_name,
- k_d.name_length + 1)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: CYASGADGET_INITSOJ, "
- "copy file name from user space failed\n",
- __func__);
- #endif
- kfree(alloc_filename);
- return -EFAULT;
- }
-
- file_to_allocate = filp_open(alloc_filename, O_RDWR, 0);
-
- if (!IS_ERR(file_to_allocate)) {
-
- struct address_space *mapping =
- file_to_allocate->f_mapping;
- const struct address_space_operations *a_ops =
- mapping->a_ops;
- struct inode *inode = mapping->host;
- struct inode *alloc_inode =
- file_to_allocate->f_path.dentry->d_inode;
- uint32_t num_clusters = 0;
- struct buffer_head bh;
- struct kstat stat;
- int nr_pages = 0;
- int ret_stat = 0;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: fhandle is OK, "
- "calling vfs_getattr\n", __func__);
- #endif
-
- ret_stat = vfs_getattr(file_to_allocate->f_path.mnt,
- file_to_allocate->f_path.dentry, &stat);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: returned from "
- "vfs_getattr() stat->blksize=0x%lx\n",
- __func__, stat.blksize);
- #endif
-
- /* TODO: get this from disk properties
- * (from blockdevice)*/
- #define SECTOR_SIZE 512
- if (stat.blksize != 0) {
- num_clusters = (k_d.num_bytes) / SECTOR_SIZE;
-
- if (((k_d.num_bytes) % SECTOR_SIZE) != 0)
- num_clusters++;
- } else {
- goto initsoj_safe_exit;
- }
-
- bh.b_state = 0;
- bh.b_blocknr = 0;
- /* block size is arbitrary , we'll use sector size*/
- bh.b_size = SECTOR_SIZE;
-
-
-
- /* clear dirty pages in page cache
- * (if were any allocated) */
- nr_pages = (k_d.num_bytes) / (PAGE_CACHE_SIZE);
-
- if (((k_d.num_bytes) % (PAGE_CACHE_SIZE)) != 0)
- nr_pages++;
-
- #ifndef WESTBRIDGE_NDEBUG
- /*check out how many pages where actually allocated */
- if (mapping->nrpages != nr_pages)
- cy_as_hal_print_message("%s mpage_cleardirty "
- "mapping->nrpages %d != num_pages %d\n",
- __func__, (int) mapping->nrpages,
- nr_pages);
-
- cy_as_hal_print_message("%s: calling "
- "mpage_cleardirty() "
- "for %d pages\n", __func__, nr_pages);
- #endif
-
- ret_stat = mpage_cleardirty(mapping, nr_pages);
-
- /*fill up the the block table from the addr mapping */
- if (a_ops->bmap) {
- int8_t blk_table_idx = -1;
- uint32_t file_block_idx = 0;
- uint32_t last_blk_addr_map = 0,
- curr_blk_addr_map = 0;
-
- #ifndef WESTBRIDGE_NDEBUG
- if (alloc_inode->i_bytes == 0)
- cy_as_hal_print_message(
- "%s: alloc_inode->ibytes =0\n",
- __func__);
- #endif
-
- /* iterate through the list of
- * blocks (not clusters)*/
- for (file_block_idx = 0;
- file_block_idx < num_clusters
- /*inode->i_bytes*/; file_block_idx++) {
-
- /* returns starting sector number */
- curr_blk_addr_map =
- a_ops->bmap(mapping,
- file_block_idx);
-
- /*no valid mapping*/
- if (curr_blk_addr_map == 0) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s:hit invalid "
- "mapping\n", __func__);
- #endif
- break;
- } else if (curr_blk_addr_map !=
- (last_blk_addr_map + 1) ||
- (blk_table.num_blocks
- [blk_table_idx] == 65535)) {
-
- /* next table entry */
- blk_table_idx++;
- /* starting sector of a
- * scattered cluster*/
- blk_table.start_blocks
- [blk_table_idx] =
- curr_blk_addr_map;
- /* ++ num of blocks in cur
- * table entry*/
- blk_table.
- num_blocks[blk_table_idx]++;
-
- #ifndef WESTBRIDGE_NDEBUG
- if (file_block_idx != 0)
- cy_as_hal_print_message(
- "<*> next table "
- "entry:%d required\n",
- blk_table_idx);
- #endif
- } else {
- /*add contiguous block*/
- blk_table.num_blocks
- [blk_table_idx]++;
- } /*if (curr_blk_addr_map == 0)*/
-
- last_blk_addr_map = curr_blk_addr_map;
- } /* end for (file_block_idx = 0; file_block_idx
- < inode->i_bytes;) */
-
- #ifndef WESTBRIDGE_NDEBUG
- /*print result for verification*/
- {
- int i;
- cy_as_hal_print_message(
- "%s: print block table "
- "mapping:\n",
- __func__);
- for (i = 0; i <= blk_table_idx; i++) {
- cy_as_hal_print_message(
- "<1> %d 0x%x 0x%x\n", i,
- blk_table.start_blocks[i],
- blk_table.num_blocks[i]);
- }
- }
- #endif
-
- /* copy the block table to user
- * space (for debug purposes) */
- retval = __put_user(
- blk_table.start_blocks[blk_table_idx],
- (uint32_t __user *)
- (&(usr_d->blk_addr_p)));
-
- retval = __put_user(
- blk_table.num_blocks[blk_table_idx],
- (uint32_t __user *)
- (&(usr_d->blk_count_p)));
-
- blk_table_idx++;
- retval = __put_user(blk_table_idx,
- (uint32_t __user *)
- (&(usr_d->item_count)));
-
- } /*end if (a_ops->bmap)*/
-
- filp_close(file_to_allocate, NULL);
-
- dev->tmtp_send_complete = 0;
- dev->tmtp_need_new_blk_tbl = 0;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: calling cy_as_mtp_init_send_object()\n",
- __func__);
- #endif
- sg_init_one(&sg, &blk_table, sizeof(blk_table));
- ret_stat = cy_as_mtp_init_send_object(dev->dev_handle,
- (cy_as_mtp_block_table *)&sg,
- k_d.num_bytes, 0, 0);
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: returned from "
- "cy_as_mtp_init_send_object()\n", __func__);
- #endif
-
- }
- #ifndef WESTBRIDGE_NDEBUG
- else {
- cy_as_hal_print_message(
- "%s: failed to allocate the file %s\n",
- __func__, alloc_filename);
- } /* end if (file_to_allocate)*/
- #endif
- kfree(alloc_filename);
-initsoj_safe_exit:
- ret_stat = 0;
- retval = __put_user(ret_stat,
- (uint32_t __user *)(&(usr_d->ret_val)));
-
- break;
- }
- case CYASGADGET_INITGOJ:
- {
- cy_as_gadget_ioctl_i_g_o_j_d k_d;
- cy_as_gadget_ioctl_i_g_o_j_d *usr_d =
- (cy_as_gadget_ioctl_i_g_o_j_d *)param;
- cy_as_mtp_block_table blk_table;
- struct scatterlist sg;
- char *map_filename;
- struct file *file_to_map;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: got CYASGADGET_INITGOJ\n",
- __func__);
- #endif
-
- memset(&blk_table, 0, sizeof(blk_table));
-
- /* Get user argument sturcutre */
- if (copy_from_user(&k_d, usr_d,
- sizeof(cy_as_gadget_ioctl_i_g_o_j_d)))
- return -EFAULT;
-
- map_filename = kmalloc(k_d.name_length + 1, GFP_KERNEL);
- if (map_filename == NULL)
- return -ENOMEM;
- if (copy_from_user(map_filename, k_d.file_name,
- k_d.name_length + 1)) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: copy file name from "
- "user space failed\n", __func__);
- #endif
- kfree(map_filename);
- return -EFAULT;
- }
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<*>%s: opening %s for kernel "
- "mode access map\n", __func__, map_filename);
- #endif
- file_to_map = filp_open(map_filename, O_RDWR, 0);
- if (file_to_map) {
- struct address_space *mapping = file_to_map->f_mapping;
- const struct address_space_operations
- *a_ops = mapping->a_ops;
- struct inode *inode = mapping->host;
-
- int8_t blk_table_idx = -1;
- uint32_t file_block_idx = 0;
- uint32_t last_blk_addr_map = 0, curr_blk_addr_map = 0;
-
- /*verify operation exists*/
- if (a_ops->bmap) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<*>%s: bmap found, i_bytes=0x%x, "
- "i_size=0x%x, i_blocks=0x%x\n",
- __func__, inode->i_bytes,
- (unsigned int) inode->i_size,
- (unsigned int) inode->i_blocks);
- #endif
-
- k_d.num_bytes = inode->i_size;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "<*>%s: k_d.num_bytes=0x%x\n",
- __func__, k_d.num_bytes);
- #endif
-
- for (file_block_idx = 0;
- file_block_idx < inode->i_size;
- file_block_idx++) {
- curr_blk_addr_map =
- a_ops->bmap(mapping,
- file_block_idx);
-
- if (curr_blk_addr_map == 0) {
- /*no valid mapping*/
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: no valid "
- "mapping\n", __func__);
- #endif
- break;
- } else if (curr_blk_addr_map !=
- (last_blk_addr_map + 1)) {
- /*non-contiguous break*/
- blk_table_idx++;
- blk_table.start_blocks
- [blk_table_idx] =
- curr_blk_addr_map;
- blk_table.num_blocks
- [blk_table_idx]++;
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: found non-"
- "contiguous break",
- __func__);
- #endif
- } else {
- /*add contiguous block*/
- blk_table.num_blocks
- [blk_table_idx]++;
- }
- last_blk_addr_map = curr_blk_addr_map;
- }
-
- /*print result for verification*/
- #ifndef WESTBRIDGE_NDEBUG
- {
- int i = 0;
-
- for (i = 0; i <= blk_table_idx; i++) {
- cy_as_hal_print_message(
- "%s %d 0x%x 0x%x\n",
- __func__, i,
- blk_table.start_blocks[i],
- blk_table.num_blocks[i]);
- }
- }
- #endif
- } else {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: could not find "
- "a_ops->bmap\n", __func__);
- #endif
- return -EFAULT;
- }
-
- filp_close(file_to_map, NULL);
-
- dev->tmtp_get_complete = 0;
- dev->tmtp_need_new_blk_tbl = 0;
-
- ret_stat = __put_user(
- blk_table.start_blocks[blk_table_idx],
- (uint32_t __user *)(&(usr_d->blk_addr_p)));
-
- ret_stat = __put_user(
- blk_table.num_blocks[blk_table_idx],
- (uint32_t __user *)(&(usr_d->blk_count_p)));
-
- sg_init_one(&sg, &blk_table, sizeof(blk_table));
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: calling cy_as_mtp_init_get_object() "
- "start=0x%x, num =0x%x, tid=0x%x, "
- "num_bytes=0x%x\n",
- __func__,
- blk_table.start_blocks[0],
- blk_table.num_blocks[0],
- k_d.tid,
- k_d.num_bytes);
- #endif
-
- ret_stat = cy_as_mtp_init_get_object(
- dev->dev_handle,
- (cy_as_mtp_block_table *)&sg,
- k_d.num_bytes, k_d.tid, 0, 0);
- if (ret_stat != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: cy_as_mtp_init_get_object "
- "failed ret_stat=0x%x\n",
- __func__, ret_stat);
- #endif
- }
- }
- #ifndef WESTBRIDGE_NDEBUG
- else {
- cy_as_hal_print_message(
- "%s: failed to open file %s\n",
- __func__, map_filename);
- }
- #endif
- kfree(map_filename);
-
- ret_stat = 0;
- retval = __put_user(ret_stat, (uint32_t __user *)
- (&(usr_d->ret_val)));
- break;
- }
- case CYASGADGET_CANCELSOJ:
- {
- cy_as_gadget_ioctl_cancel *usr_d =
- (cy_as_gadget_ioctl_cancel *)param;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message(
- "%s: got CYASGADGET_CANCELSOJ\n",
- __func__);
- #endif
-
- ret_stat = cy_as_mtp_cancel_send_object(dev->dev_handle, 0, 0);
-
- retval = __put_user(ret_stat, (uint32_t __user *)
- (&(usr_d->ret_val)));
- break;
- }
- case CYASGADGET_CANCELGOJ:
- {
- cy_as_gadget_ioctl_cancel *usr_d =
- (cy_as_gadget_ioctl_cancel *)param;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: got CYASGADGET_CANCELGOJ\n",
- __func__);
- #endif
-
- ret_stat = cy_as_mtp_cancel_get_object(dev->dev_handle, 0, 0);
-
- retval = __put_user(ret_stat,
- (uint32_t __user *)(&(usr_d->ret_val)));
- break;
- }
- default:
- {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: unknown ioctl received: %d\n",
- __func__, code);
-
- cy_as_hal_print_message("%s: known codes:\n"
- "CYASGADGET_GETMTPSTATUS=%d\n"
- "CYASGADGET_CLEARTMTPSTATUS=%d\n"
- "CYASGADGET_INITSOJ=%d\n"
- "CYASGADGET_INITGOJ=%d\n"
- "CYASGADGET_CANCELSOJ=%d\n"
- "CYASGADGET_CANCELGOJ=%d\n",
- __func__,
- CYASGADGET_GETMTPSTATUS,
- CYASGADGET_CLEARTMTPSTATUS,
- CYASGADGET_INITSOJ,
- CYASGADGET_INITGOJ,
- CYASGADGET_CANCELSOJ,
- CYASGADGET_CANCELGOJ);
- #endif
- break;
- }
- }
-
- return 0;
-}
-
-static const struct usb_gadget_ops cyasgadget_ops = {
- .get_frame = cyasgadget_get_frame,
- .wakeup = cyasgadget_wakeup,
- .set_selfpowered = cyasgadget_set_selfpowered,
- .pullup = cyasgadget_pullup,
- .ioctl = cyasgadget_ioctl,
-};
-
-
-/* keeping it simple:
- * - one bus driver, initted first;
- * - one function driver, initted second
- *
- * most of the work to support multiple controllers would
- * be to associate this gadget driver with all of them, or
- * perhaps to bind specific drivers to specific devices.
- */
-
-static void cyas_ep_reset(
- cyasgadget_ep *an_ep
- )
-{
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
-
- an_ep->desc = NULL;
- INIT_LIST_HEAD(&an_ep->queue);
-
- an_ep->stopped = 0;
- an_ep->is_in = 0;
- an_ep->is_iso = 0;
- an_ep->usb_ep_inst.maxpacket = ~0;
- an_ep->usb_ep_inst.ops = &cyasgadget_ep_ops;
-}
-
-static void cyas_usb_reset(
- cyasgadget *cy_as_dev
- )
-{
- cy_as_return_status_t ret;
- cy_as_usb_enum_control config;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_device *dev_p = (cy_as_device *)cy_as_dev->dev_handle;
-
- cy_as_hal_print_message("<1>%s called mtp_firmware=0x%x\n",
- __func__, dev_p->is_mtp_firmware);
- #endif
-
- ret = cy_as_misc_release_resource(cy_as_dev->dev_handle,
- cy_as_bus_u_s_b);
- if (ret != CY_AS_ERROR_SUCCESS && ret !=
- CY_AS_ERROR_RESOURCE_NOT_OWNED) {
- cy_as_hal_print_message("<1>_cy_as_gadget: cannot "
- "release usb resource: failed with error code %d\n",
- ret);
- return;
- }
-
- cy_as_dev->gadget.speed = USB_SPEED_HIGH;
-
- ret = cy_as_usb_start(cy_as_dev->dev_handle, 0, 0);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_start failed with error code %d\n",
- ret);
- return;
- }
- /* P port will do enumeration, not West Bridge */
- config.antioch_enumeration = cy_false;
- /* 1 2 : 1-BUS_NUM , 2:Storage_device number, SD - is bus 1*/
-
- /* TODO: add module param to enumerate mass storage */
- config.mass_storage_interface = 0;
-
- if (append_mtp) {
- ret = cy_as_mtp_start(cy_as_dev->dev_handle,
- cy_as_gadget_mtp_event_callback, 0, 0);
- if (ret == CY_AS_ERROR_SUCCESS) {
- cy_as_hal_print_message("MTP start passed, enumerating "
- "MTP interface\n");
- config.mtp_interface = append_mtp;
- /*Do not enumerate NAND storage*/
- config.devices_to_enumerate[0][0] = cy_false;
-
- /*enumerate SD storage as MTP*/
- config.devices_to_enumerate[1][0] = cy_true;
- }
- } else {
- cy_as_hal_print_message("MTP start not attempted, not "
- "enumerating MTP interface\n");
- config.mtp_interface = 0;
- /* enumerate mass storage based on module parameters */
- config.devices_to_enumerate[0][0] = msc_enum_bus_0;
- config.devices_to_enumerate[1][0] = msc_enum_bus_1;
- }
-
- ret = cy_as_usb_set_enum_config(cy_as_dev->dev_handle,
- &config, 0, 0);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_hal_print_message("<1>_cy_as_gadget: "
- "cy_as_usb_set_enum_config failed with error "
- "code %d\n", ret);
- return;
- }
-
- cy_as_usb_set_physical_configuration(cy_as_dev->dev_handle, 1);
-
-}
-
-static void cyas_usb_reinit(
- cyasgadget *cy_as_dev
- )
-{
- int index = 0;
- cyasgadget_ep *an_ep_p;
- cy_as_return_status_t ret;
- cy_as_device *dev_p = (cy_as_device *)cy_as_dev->dev_handle;
-
- INIT_LIST_HEAD(&cy_as_dev->gadget.ep_list);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called, is_mtp_firmware = "
- "0x%x\n", __func__, dev_p->is_mtp_firmware);
- #endif
-
- /* Init the end points */
- for (index = 1; index <= 15; index++) {
- an_ep_p = &cy_as_dev->an_gadget_ep[index];
- cyas_ep_reset(an_ep_p);
- an_ep_p->usb_ep_inst.name = cy_as_ep_names[index];
- an_ep_p->dev = cy_as_dev;
- an_ep_p->num = index;
- memset(&an_ep_p->cyepconfig, 0, sizeof(an_ep_p->cyepconfig));
-
- /* EP0, EPs 2,4,6,8 need not be added */
- if ((index <= 8) && (index % 2 == 0) &&
- (!dev_p->is_mtp_firmware)) {
- /* EP0 is 64 and EPs 2,4,6,8 not allowed */
- cy_as_dev->an_gadget_ep[index].fifo_size = 0;
- } else {
- if (index == 1)
- an_ep_p->fifo_size = 64;
- else
- an_ep_p->fifo_size = 512;
- list_add_tail(&an_ep_p->usb_ep_inst.ep_list,
- &cy_as_dev->gadget.ep_list);
- }
- }
- /* need to setendpointconfig before usb connect, this is not
- * quite compatible with gadget methodology (ep_enable called
- * by gadget after connect), therefore need to set config in
- * initialization and verify compatibility in ep_enable,
- * kick up error otherwise*/
- an_ep_p = &cy_as_dev->an_gadget_ep[3];
- an_ep_p->cyepconfig.enabled = cy_true;
- an_ep_p->cyepconfig.dir = cy_as_usb_out;
- an_ep_p->cyepconfig.type = cy_as_usb_bulk;
- an_ep_p->cyepconfig.size = 0;
- an_ep_p->cyepconfig.physical = 1;
- ret = cy_as_usb_set_end_point_config(an_ep_p->dev->dev_handle,
- 3, &an_ep_p->cyepconfig);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_hal_print_message("cy_as_usb_set_end_point_config "
- "failed with error code %d\n", ret);
- }
-
- cy_as_usb_set_stall(an_ep_p->dev->dev_handle, 3, 0, 0);
-
- an_ep_p = &cy_as_dev->an_gadget_ep[5];
- an_ep_p->cyepconfig.enabled = cy_true;
- an_ep_p->cyepconfig.dir = cy_as_usb_in;
- an_ep_p->cyepconfig.type = cy_as_usb_bulk;
- an_ep_p->cyepconfig.size = 0;
- an_ep_p->cyepconfig.physical = 2;
- ret = cy_as_usb_set_end_point_config(an_ep_p->dev->dev_handle,
- 5, &an_ep_p->cyepconfig);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_hal_print_message("cy_as_usb_set_end_point_config "
- "failed with error code %d\n", ret);
- }
-
- cy_as_usb_set_stall(an_ep_p->dev->dev_handle, 5, 0, 0);
-
- an_ep_p = &cy_as_dev->an_gadget_ep[9];
- an_ep_p->cyepconfig.enabled = cy_true;
- an_ep_p->cyepconfig.dir = cy_as_usb_in;
- an_ep_p->cyepconfig.type = cy_as_usb_bulk;
- an_ep_p->cyepconfig.size = 0;
- an_ep_p->cyepconfig.physical = 4;
- ret = cy_as_usb_set_end_point_config(an_ep_p->dev->dev_handle,
- 9, &an_ep_p->cyepconfig);
- if (ret != CY_AS_ERROR_SUCCESS) {
- cy_as_hal_print_message("cy_as_usb_set_end_point_config "
- "failed with error code %d\n", ret);
- }
-
- cy_as_usb_set_stall(an_ep_p->dev->dev_handle, 9, 0, 0);
-
- if (dev_p->mtp_count != 0) {
- /* these need to be set for compatibility with
- * the gadget_enable logic */
- an_ep_p = &cy_as_dev->an_gadget_ep[2];
- an_ep_p->cyepconfig.enabled = cy_true;
- an_ep_p->cyepconfig.dir = cy_as_usb_out;
- an_ep_p->cyepconfig.type = cy_as_usb_bulk;
- an_ep_p->cyepconfig.size = 0;
- an_ep_p->cyepconfig.physical = 0;
- cy_as_usb_set_stall(an_ep_p->dev->dev_handle, 2, 0, 0);
-
- an_ep_p = &cy_as_dev->an_gadget_ep[6];
- an_ep_p->cyepconfig.enabled = cy_true;
- an_ep_p->cyepconfig.dir = cy_as_usb_in;
- an_ep_p->cyepconfig.type = cy_as_usb_bulk;
- an_ep_p->cyepconfig.size = 0;
- an_ep_p->cyepconfig.physical = 0;
- cy_as_usb_set_stall(an_ep_p->dev->dev_handle, 6, 0, 0);
- }
-
- cyas_ep_reset(&cy_as_dev->an_gadget_ep[0]);
- cy_as_dev->an_gadget_ep[0].usb_ep_inst.name = cy_as_ep_names[0];
- cy_as_dev->an_gadget_ep[0].dev = cy_as_dev;
- cy_as_dev->an_gadget_ep[0].num = 0;
- cy_as_dev->an_gadget_ep[0].fifo_size = 64;
-
- cy_as_dev->an_gadget_ep[0].usb_ep_inst.maxpacket = 64;
- cy_as_dev->gadget.ep0 = &cy_as_dev->an_gadget_ep[0].usb_ep_inst;
- cy_as_dev->an_gadget_ep[0].stopped = 0;
- INIT_LIST_HEAD(&cy_as_dev->gadget.ep0->ep_list);
-}
-
-static void cyas_ep0_start(
- cyasgadget *dev
- )
-{
- cy_as_return_status_t ret;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
-
- ret = cy_as_usb_register_callback(dev->dev_handle,
- cy_as_gadget_usb_event_callback);
- if (ret != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cy_as_usb_register_callback "
- "failed with error code %d\n", __func__, ret);
- #endif
- return;
- }
-
- ret = cy_as_usb_commit_config(dev->dev_handle, 0, 0);
- if (ret != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cy_as_usb_commit_config "
- "failed with error code %d\n", __func__, ret);
- #endif
- return;
- }
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cy_as_usb_commit_config "
- "message sent\n", __func__);
- #endif
-
- ret = cy_as_usb_connect(dev->dev_handle, 0, 0);
- if (ret != CY_AS_ERROR_SUCCESS) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cy_as_usb_connect failed "
- "with error code %d\n", __func__, ret);
- #endif
- return;
- }
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s: cy_as_usb_connect message "
- "sent\n", __func__);
- #endif
-}
-
-/*
- * When a driver is successfully registered, it will receive
- * control requests including set_configuration(), which enables
- * non-control requests. then usb traffic follows until a
- * disconnect is reported. then a host may connect again, or
- * the driver might get unbound.
- */
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
-{
- cyasgadget *dev = cy_as_gadget_controller;
- int retval;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called driver=0x%x\n",
- __func__, (unsigned int) driver);
- #endif
-
- /* insist on high speed support from the driver, since
- * "must not be used in normal operation"
- */
- if (!driver
- || !bind
- || !driver->unbind
- || !driver->setup)
- return -EINVAL;
-
- if (!dev)
- return -ENODEV;
-
- if (dev->driver)
- return -EBUSY;
-
- /* hook up the driver ... */
- dev->softconnect = 1;
- driver->driver.bus = NULL;
- dev->driver = driver;
- dev->gadget.dev.driver = &driver->driver;
-
- /* Do the needful */
- cyas_usb_reset(dev); /* External usb */
- cyas_usb_reinit(dev); /* Internal */
-
- retval = bind(&dev->gadget);
- if (retval) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("%s bind to driver %s --> %d\n",
- __func__, driver->driver.name, retval);
- #endif
-
- dev->driver = NULL;
- dev->gadget.dev.driver = NULL;
- return retval;
- }
-
- /* ... then enable host detection and ep0; and we're ready
- * for set_configuration as well as eventual disconnect.
- */
- cyas_ep0_start(dev);
-
- return 0;
-}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-
-static void cyasgadget_nuke(
- cyasgadget_ep *an_ep
- )
-{
- cyasgadget *dev = cy_as_gadget_controller;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
-
- cy_as_usb_cancel_async(dev->dev_handle, an_ep->num);
- an_ep->stopped = 1;
-
- while (!list_empty(&an_ep->queue)) {
- cyasgadget_req *an_req = list_entry
- (an_ep->queue.next, cyasgadget_req, queue);
- list_del_init(&an_req->queue);
- an_req->req.status = -ESHUTDOWN;
- an_req->req.complete(&an_ep->usb_ep_inst, &an_req->req);
- }
-}
-
-static void cyasgadget_stop_activity(
- cyasgadget *dev,
- struct usb_gadget_driver *driver
- )
-{
- int index;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
-
- /* don't disconnect if it's not connected */
- if (dev->gadget.speed == USB_SPEED_UNKNOWN)
- driver = NULL;
-
- if (spin_is_locked(&dev->lock))
- spin_unlock(&dev->lock);
-
- /* Stop hardware; prevent new request submissions;
- * and kill any outstanding requests.
- */
- cy_as_usb_disconnect(dev->dev_handle, 0, 0);
-
- for (index = 3; index <= 7; index += 2) {
- cyasgadget_ep *an_ep_p = &dev->an_gadget_ep[index];
- cyasgadget_nuke(an_ep_p);
- }
-
- for (index = 9; index <= 15; index++) {
- cyasgadget_ep *an_ep_p = &dev->an_gadget_ep[index];
- cyasgadget_nuke(an_ep_p);
- }
-
- /* report disconnect; the driver is already quiesced */
- if (driver)
- driver->disconnect(&dev->gadget);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("cy_as_usb_disconnect returned success");
- #endif
-
- /* Stop Usb */
- cy_as_usb_stop(dev->dev_handle, 0, 0);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("cy_as_usb_stop returned success");
- #endif
-}
-
-int usb_gadget_unregister_driver(
- struct usb_gadget_driver *driver
- )
-{
- cyasgadget *dev = cy_as_gadget_controller;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
-
- if (!dev)
- return -ENODEV;
-
- if (!driver || driver != dev->driver)
- return -EINVAL;
-
- cyasgadget_stop_activity(dev, driver);
-
- driver->unbind(&dev->gadget);
- dev->gadget.dev.driver = NULL;
- dev->driver = NULL;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("unregistered driver '%s'\n",
- driver->driver.name);
- #endif
-
- return 0;
-}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
-static void cyas_gadget_release(
- struct device *_dev
- )
-{
- cyasgadget *dev = dev_get_drvdata(_dev);
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>%s called\n", __func__);
- #endif
-
- kfree(dev);
-}
-
-/* DeInitialize gadget driver */
-static void cyasgadget_deinit(
- cyasgadget *cy_as_dev
- )
-{
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget deinitialize called\n");
- #endif
-
- if (!cy_as_dev) {
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget_deinit: "
- "invalid cyasgadget device\n");
- #endif
- return;
- }
-
- if (cy_as_dev->driver) {
- /* should have been done already by driver model core */
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1> cy_as_gadget: '%s' "
- "is still registered\n",
- cy_as_dev->driver->driver.name);
- #endif
- usb_gadget_unregister_driver(cy_as_dev->driver);
- }
-
- kfree(cy_as_dev);
- cy_as_gadget_controller = NULL;
-}
-
-/* Initialize gadget driver */
-static int cyasgadget_initialize(void)
-{
- cyasgadget *cy_as_dev = 0;
- int retval = 0;
-
- #ifndef WESTBRIDGE_NDEBUG
- cy_as_hal_print_message("<1>_cy_as_gadget [V1.1] initialize called\n");
- #endif
-
- if (cy_as_gadget_controller != 0) {
- cy_as_hal_print_message("<1> cy_as_gadget: the device has "
- "already been initilaized. ignoring\n");
- return -EBUSY;
- }
-
- cy_as_dev = kzalloc(sizeof(cyasgadget), GFP_ATOMIC);
- if (cy_as_dev == NULL) {
- cy_as_hal_print_message("<1> cy_as_gadget: memory "
- "allocation failed\n");
- return -ENOMEM;
- }
-
- spin_lock_init(&cy_as_dev->lock);
- cy_as_dev->gadget.ops = &cyasgadget_ops;
- cy_as_dev->gadget.is_dualspeed = 1;
-
- /* the "gadget" abstracts/virtualizes the controller */
- /*strcpy(cy_as_dev->gadget.dev.bus_id, "cyasgadget");*/
- cy_as_dev->gadget.dev.release = cyas_gadget_release;
- cy_as_dev->gadget.name = cy_as_driver_name;
-
- /* Get the device handle */
- cy_as_dev->dev_handle = cyasdevice_getdevhandle();
- if (0 == cy_as_dev->dev_handle) {
- #ifndef NDEBUG
- cy_as_hal_print_message("<1> cy_as_gadget: "
- "no west bridge device\n");
- #endif
- retval = -EFAULT;
- goto done;
- }
-
- /* We are done now */
- cy_as_gadget_controller = cy_as_dev;
- return 0;
-
-/*
- * in case of an error
- */
-done:
- if (cy_as_dev)
- cyasgadget_deinit(cy_as_dev);
-
- return retval;
-}
-
-static int __init cyas_init(void)
-{
- int init_res = 0;
-
- init_res = cyasgadget_initialize();
-
- if (init_res != 0) {
- printk(KERN_WARNING "<1> gadget ctl instance "
- "init error:%d\n", init_res);
- if (init_res > 0) {
- /* force -E/0 linux convention */
- init_res = init_res * -1;
- }
- }
-
- return init_res;
-}
-module_init(cyas_init);
-
-static void __exit cyas_cleanup(void)
-{
- if (cy_as_gadget_controller != NULL)
- cyasgadget_deinit(cy_as_gadget_controller);
-}
-module_exit(cyas_cleanup);
-
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION(CY_AS_DRIVER_DESC);
-MODULE_AUTHOR("cypress semiconductor");
-
-/*[]*/
diff --git a/drivers/staging/westbridge/astoria/gadget/cyasgadget.h b/drivers/staging/westbridge/astoria/gadget/cyasgadget.h
deleted file mode 100644
index e01cea7eeb7..00000000000
--- a/drivers/staging/westbridge/astoria/gadget/cyasgadget.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/* cyangadget.h - Linux USB Gadget driver file for the Cypress West Bridge
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-/*
- * Cypress West Bridge high/full speed USB device controller code
- * Based on the Netchip 2280 device controller by David Brownell
- * in the linux 2.6.10 kernel
- *
- * linux/drivers/usb/gadget/net2280.h
- */
-
-/*
- * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
- * Copyright (C) 2003 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _INCLUDED_CYANGADGET_H_
-#define _INCLUDED_CYANGADGET_H_
-
-#include <linux/device.h>
-#include <linux/moduleparam.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/sched.h>
-
-#include "../include/linux/westbridge/cyastoria.h"
-#include "../include/linux/westbridge/cyashal.h"
-#include "../include/linux/westbridge/cyasdevice.h"
-#include "cyasgadget_ioctl.h"
-
-#include <linux/module.h>
-#include <linux/init.h>
-
-/*char driver defines, revisit*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/fs.h> /* everything... */
-#include <linux/errno.h> /* error codes */
-#include <linux/types.h> /* size_t */
-#include <linux/proc_fs.h>
-#include <linux/fcntl.h> /* O_ACCMODE */
-#include <linux/seq_file.h>
-#include <linux/cdev.h>
-#include <linux/scatterlist.h>
-#include <linux/pagemap.h>
-#include <linux/vmalloc.h> /* vmalloc(), vfree */
-#include <linux/msdos_fs.h> /*fat_alloc_cluster*/
-#include <linux/buffer_head.h>
-#include <asm/system.h> /* cli(), *_flags */
-#include <linux/uaccess.h> /* copy_*_user */
-
-extern int mpage_cleardirty(struct address_space *mapping, int num_pages);
-extern int fat_get_block(struct inode *, sector_t , struct buffer_head *, int);
-extern cy_as_device_handle *cyasdevice_getdevhandle(void);
-
-/* Driver data structures and utilities */
-typedef struct cyasgadget_ep {
- struct usb_ep usb_ep_inst;
- struct cyasgadget *dev;
-
- /* analogous to a host-side qh */
- struct list_head queue;
- const struct usb_endpoint_descriptor *desc;
- unsigned num:8,
- fifo_size:12,
- in_fifo_validate:1,
- out_overflow:1,
- stopped:1,
- is_in:1,
- is_iso:1;
- cy_as_usb_end_point_config cyepconfig;
-} cyasgadget_ep;
-
-typedef struct cyasgadget_req {
- struct usb_request req;
- struct list_head queue;
- int ep_num;
- unsigned mapped:1,
- valid:1,
- complete:1,
- ep_stopped:1;
-} cyasgadget_req;
-
-typedef struct cyasgadget {
- /* each device provides one gadget, several endpoints */
- struct usb_gadget gadget;
- spinlock_t lock;
- struct cyasgadget_ep an_gadget_ep[16];
- struct usb_gadget_driver *driver;
- /* Handle to the West Bridge device */
- cy_as_device_handle dev_handle;
- unsigned enabled:1,
- protocol_stall:1,
- softconnect:1,
- outsetupreq:1;
- struct completion thread_complete;
- wait_queue_head_t thread_wq;
- struct semaphore thread_sem;
- struct list_head thread_queue;
-
- cy_bool tmtp_send_complete;
- cy_bool tmtp_get_complete;
- cy_bool tmtp_need_new_blk_tbl;
- /* Data member used to store the SendObjectComplete event data */
- cy_as_mtp_send_object_complete_data tmtp_send_complete_data;
- /* Data member used to store the GetObjectComplete event data */
- cy_as_mtp_get_object_complete_data tmtp_get_complete_data;
-
-} cyasgadget;
-
-static inline void set_halt(cyasgadget_ep *ep)
-{
- return;
-}
-
-static inline void clear_halt(cyasgadget_ep *ep)
-{
- return;
-}
-
-#define xprintk(dev, level, fmt, args...) \
- printk(level "%s %s: " fmt, driver_name, \
- pci_name(dev->pdev), ## args)
-
-#ifdef DEBUG
-#undef DEBUG
-#define DEBUG(dev, fmt, args...) \
- xprintk(dev, KERN_DEBUG, fmt, ## args)
-#else
-#define DEBUG(dev, fmt, args...) \
- do { } while (0)
-#endif /* DEBUG */
-
-#ifdef VERBOSE
-#define VDEBUG DEBUG
-#else
-#define VDEBUG(dev, fmt, args...) \
- do { } while (0)
-#endif /* VERBOSE */
-
-#define ERROR(dev, fmt, args...) \
- xprintk(dev, KERN_ERR, fmt, ## args)
-#define GADG_WARN(dev, fmt, args...) \
- xprintk(dev, KERN_WARNING, fmt, ## args)
-#define INFO(dev, fmt, args...) \
- xprintk(dev, KERN_INFO, fmt, ## args)
-
-/*-------------------------------------------------------------------------*/
-
-static inline void start_out_naking(struct cyasgadget_ep *ep)
-{
- return;
-}
-
-static inline void stop_out_naking(struct cyasgadget_ep *ep)
-{
- return;
-}
-
-#endif /* _INCLUDED_CYANGADGET_H_ */
diff --git a/drivers/staging/westbridge/astoria/gadget/cyasgadget_ioctl.h b/drivers/staging/westbridge/astoria/gadget/cyasgadget_ioctl.h
deleted file mode 100644
index 21dd716f116..00000000000
--- a/drivers/staging/westbridge/astoria/gadget/cyasgadget_ioctl.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* cyasgadget_ioctl.h - Linux USB Gadget driver ioctl file for
- * Cypress West Bridge
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef CYASGADGET_IOCTL_H
-#define CYASGADGET_IOCTL_H
-
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-typedef struct cy_as_gadget_ioctl_send_object {
- uint32_t status;
- uint32_t byte_count;
- uint32_t transaction_id;
-} cy_as_gadget_ioctl_send_object;
-
-typedef struct cy_as_gadget_ioctl_get_object {
- uint32_t status;
- uint32_t byte_count;
-} cy_as_gadget_ioctl_get_object;
-
-
-typedef struct cy_as_gadget_ioctl_tmtp_status {
- cy_bool tmtp_send_complete;
- cy_bool tmtp_get_complete;
- cy_bool tmtp_need_new_blk_tbl;
- cy_as_gadget_ioctl_send_object tmtp_send_complete_data;
- cy_as_gadget_ioctl_get_object tmtp_get_complete_data;
- uint32_t t_usec;
-} cy_as_gadget_ioctl_tmtp_status;
-
-/*Init send object data*/
-typedef struct cy_as_gadget_ioctl_i_s_o_j_d {
- uint32_t *blk_addr_p; /* starting sector */
- uint16_t *blk_count_p; /* num of sectors in the block */
- /* number of entries in the blk table */
- uint32_t item_count;
- uint32_t num_bytes;
- /* in case if more prcise timestamping is done in kernel mode */
- uint32_t t_usec;
- uint32_t ret_val;
- char *file_name;
- uint32_t name_length;
-
-} cy_as_gadget_ioctl_i_s_o_j_d;
-
-
-/*Init get object data*/
-typedef struct cy_as_gadget_ioctl_i_g_o_j_d {
- uint32_t *blk_addr_p;
- uint16_t *blk_count_p;
- uint32_t item_count;
- uint32_t num_bytes;
- uint32_t tid;
- uint32_t ret_val;
- char *file_name;
- uint32_t name_length;
-
-} cy_as_gadget_ioctl_i_g_o_j_d;
-
-typedef struct cy_as_gadget_ioctl_cancel {
- uint32_t ret_val;
-} cy_as_gadget_ioctl_cancel;
-
-#define CYASGADGET_IOC_MAGIC 0xEF
-#define CYASGADGET_GETMTPSTATUS \
- _IOW(CYASGADGET_IOC_MAGIC, 0, cy_as_gadget_ioctl_tmtp_status)
-#define CYASGADGET_CLEARTMTPSTATUS \
- _IO(CYASGADGET_IOC_MAGIC, 1)
-#define CYASGADGET_INITSOJ \
- _IOW(CYASGADGET_IOC_MAGIC, 2, cy_as_gadget_ioctl_i_s_o_j_d)
-#define CYASGADGET_INITGOJ \
- _IOW(CYASGADGET_IOC_MAGIC, 3, cy_as_gadget_ioctl_i_g_o_j_d)
-#define CYASGADGET_CANCELSOJ \
- _IOW(CYASGADGET_IOC_MAGIC, 4, cy_as_gadget_ioctl_cancel)
-#define CYASGADGET_CANCELGOJ \
- _IOW(CYASGADGET_IOC_MAGIC, 5, cy_as_gadget_ioctl_cancel)
-#define CYASGADGET_IOC_MAXNR 6
-
-#endif
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanerr.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanerr.h
deleted file mode 100644
index c7d4ebb020c..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanerr.h
+++ /dev/null
@@ -1,418 +0,0 @@
-/* Cypress West Bridge API header file (cyanerr.h)
- ## Symbols for backward compatibility with previous releases of Antioch SDK.
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYANERR_H_
-#define _INCLUDED_CYANERR_H_
-
-#include "cyaserr.h"
-
-#ifndef __doxygen__
-
-/*
- * Function completed successfully.
- */
-#define CY_AN_ERROR_SUCCESS (CY_AS_ERROR_SUCCESS)
-
-/*
- * A function trying to acquire a resource was unable to do so.
- */
-#define CY_AN_ERROR_NOT_ACQUIRED (CY_AS_ERROR_NOT_ACQUIRED)
-
-/*
- * A function trying to acquire a resource was unable to do so.
- */
-#define CY_AN_ERROR_NOT_RELEASED (CY_AS_ERROR_NOT_RELEASED)
-
-/*
- * The West Bridge firmware is not loaded.
- */
-#define CY_AN_ERROR_NO_FIRMWARE (CY_AS_ERROR_NO_FIRMWARE)
-
-/*
- * A timeout occurred waiting on a response from the West Bridge device
- */
-#define CY_AN_ERROR_TIMEOUT (CY_AS_ERROR_TIMEOUT)
-
-/*
- * A request to download firmware was made while not in the CONFIG mode
- */
-#define CY_AN_ERROR_NOT_IN_CONFIG_MODE (CY_AS_ERROR_NOT_IN_CONFIG_MODE)
-
-/*
- * This error is returned if the firmware size specified is too invalid.
- */
-#define CY_AN_ERROR_INVALID_SIZE (CY_AS_ERROR_INVALID_SIZE)
-
-/*
- * This error is returned if a request is made to acquire a resource that has
- * already been acquired.
- */
-#define CY_AN_ERROR_RESOURCE_ALREADY_OWNED (CY_AS_ERROR_RESOURCE_ALREADY_OWNED)
-
-/*
- * This error is returned if a request is made to release a resource that has
- * not previously been acquired.
- */
-#define CY_AN_ERROR_RESOURCE_NOT_OWNED (CY_AS_ERROR_RESOURCE_NOT_OWNED)
-
-/*
- * This error is returned when a request is made for a media that does not
- * exist
- */
-#define CY_AN_ERROR_NO_SUCH_MEDIA (CY_AS_ERROR_NO_SUCH_MEDIA)
-
-/*
- * This error is returned when a request is made for a device that does
- * not exist
- */
-#define CY_AN_ERROR_NO_SUCH_DEVICE (CY_AS_ERROR_NO_SUCH_DEVICE)
-
-/*
- * This error is returned when a request is made for a unit that does
- * not exist
- */
-#define CY_AN_ERROR_NO_SUCH_UNIT (CY_AS_ERROR_NO_SUCH_UNIT)
-
-/*
- * This error is returned when a request is made for a block that does
- * not exist
- */
-#define CY_AN_ERROR_INVALID_BLOCK (CY_AS_ERROR_INVALID_BLOCK)
-
-/*
- * This error is returned when an invalid trace level is set.
- */
-#define CY_AN_ERROR_INVALID_TRACE_LEVEL (CY_AS_ERROR_INVALID_TRACE_LEVEL)
-
-/*
- * This error is returned when West Bridge is already in the standby state
- * and an attempt is made to put West Bridge into this state again.
- */
-#define CY_AN_ERROR_ALREADY_STANDBY (CY_AS_ERROR_ALREADY_STANDBY)
-
-/*
- * This error is returned when the API needs to set a pin on the
- * West Bridge device, but this is not supported by the underlying HAL
- * layer.
- */
-#define CY_AN_ERROR_SETTING_WAKEUP_PIN (CY_AS_ERROR_SETTING_WAKEUP_PIN)
-
-/*
- * This error is returned when a module is being started that has
- * already been started.
- */
-#define CY_AN_ERROR_ALREADY_RUNNING (CY_AS_ERROR_ALREADY_RUNNING)
-
-/*
- * This error is returned when a module is being stopped that has
- * already been stopped.
- */
-#define CY_AN_ERROR_NOT_RUNNING (CY_AS_ERROR_NOT_RUNNING)
-
-/*
- * This error is returned when the caller tries to claim a media that has
- * already been claimed.
- */
-#define CY_AN_ERROR_MEDIA_ALREADY_CLAIMED (CY_AS_ERROR_MEDIA_ALREADY_CLAIMED)
-
-/*
- * This error is returned when the caller tries to release a media that
- * has already been released.
- */
-#define CY_AN_ERROR_MEDIA_NOT_CLAIMED (CY_AS_ERROR_MEDIA_NOT_CLAIMED)
-
-/*
- * This error is returned when canceling trying to cancel an asynchronous
- * operation when an async operation is not pending.
- */
-#define CY_AN_ERROR_NO_OPERATION_PENDING (CY_AS_ERROR_NO_OPERATION_PENDING)
-
-/*
- * This error is returned when an invalid endpoint number is provided
- * to an API call.
- */
-#define CY_AN_ERROR_INVALID_ENDPOINT (CY_AS_ERROR_INVALID_ENDPOINT)
-
-/*
- * This error is returned when an invalid descriptor type
- * is specified in an API call.
- */
-#define CY_AN_ERROR_INVALID_DESCRIPTOR (CY_AS_ERROR_INVALID_DESCRIPTOR)
-
-/*
- * This error is returned when an invalid descriptor index
- * is specified in an API call.
- */
-#define CY_AN_ERROR_BAD_INDEX (CY_AS_ERROR_BAD_INDEX)
-
-/*
- * This error is returned if trying to set a USB descriptor
- * when in the P port enumeration mode.
- */
-#define CY_AN_ERROR_BAD_ENUMERATION_MODE (CY_AS_ERROR_BAD_ENUMERATION_MODE)
-
-/*
- * This error is returned when the endpoint configuration specified
- * is not valid.
- */
-#define CY_AN_ERROR_INVALID_CONFIGURATION (CY_AS_ERROR_INVALID_CONFIGURATION)
-
-/*
- * This error is returned when the API cannot verify it is connected
- * to an West Bridge device.
- */
-#define CY_AN_ERROR_NO_ANTIOCH (CY_AS_ERROR_NO_ANTIOCH)
-
-/*
- * This error is returned when an API function is called and
- * CyAnMiscConfigureDevice has not been called to configure West
- * Bridge for the current environment.
- */
-#define CY_AN_ERROR_NOT_CONFIGURED (CY_AS_ERROR_NOT_CONFIGURED)
-
-/*
- * This error is returned when West Bridge cannot allocate memory required for
- * internal API operations.
- */
-#define CY_AN_ERROR_OUT_OF_MEMORY (CY_AS_ERROR_OUT_OF_MEMORY)
-
-/*
- * This error is returned when a module is being started that has
- * already been started.
- */
-#define CY_AN_ERROR_NESTED_SLEEP (CY_AS_ERROR_NESTED_SLEEP)
-
-/*
- * This error is returned when an operation is attempted on an endpoint that has
- * been disabled.
- */
-#define CY_AN_ERROR_ENDPOINT_DISABLED (CY_AS_ERROR_ENDPOINT_DISABLED)
-
-/*
- * This error is returned when a call is made to an API function when the device
- * is in standby.
- */
-#define CY_AN_ERROR_IN_STANDBY (CY_AS_ERROR_IN_STANDBY)
-
-/*
- * This error is returned when an API call is made with an invalid handle value.
- */
-#define CY_AN_ERROR_INVALID_HANDLE (CY_AS_ERROR_INVALID_HANDLE)
-
-/*
- * This error is returned when an invalid response is returned from the West
- * Bridge device.
- */
-#define CY_AN_ERROR_INVALID_RESPONSE (CY_AS_ERROR_INVALID_RESPONSE)
-
-/*
- * This error is returned from the callback function for any asynchronous
- * read or write request that is canceled.
- */
-#define CY_AN_ERROR_CANCELED (CY_AS_ERROR_CANCELED)
-
-/*
- * This error is returned when the call to create sleep channel fails
- * in the HAL layer.
- */
-#define CY_AN_ERROR_CREATE_SLEEP_CHANNEL_FAILED \
- (CY_AS_ERROR_CREATE_SLEEP_CHANNEL_FAILED)
-
-/*
- * This error is returned when the call to CyAnMiscLeaveStandby
- * is made and the device is not in standby.
- */
-#define CY_AN_ERROR_NOT_IN_STANDBY (CY_AS_ERROR_NOT_IN_STANDBY)
-
-/*
- * This error is returned when the call to destroy sleep channel fails
- * in the HAL layer.
- */
-#define CY_AN_ERROR_DESTROY_SLEEP_CHANNEL_FAILED \
- (CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED)
-
-/*
- * This error is returned when an invalid resource is specified to a call
- * to CyAnMiscAcquireResource() or CyAnMiscReleaseResource()
- */
-#define CY_AN_ERROR_INVALID_RESOURCE (CY_AS_ERROR_INVALID_RESOURCE)
-
-/*
- * This error occurs when an operation is requested on an endpoint that has
- * a currently pending async operation.
- */
-#define CY_AN_ERROR_ASYNC_PENDING (CY_AS_ERROR_ASYNC_PENDING)
-
-/*
- * This error is returned when a call to CyAnStorageCancelAsync() or
- * CyAnUsbCancelAsync() is made when no asynchronous request is pending.
- */
-#define CY_AN_ERROR_ASYNC_NOT_PENDING (CY_AS_ERROR_ASYNC_NOT_PENDING)
-
-/*
- * This error is returned when a request is made to put the West Bridge device
- * into standby mode while the USB stack is still active.
- */
-#define CY_AN_ERROR_USB_RUNNING (CY_AS_ERROR_USB_RUNNING)
-
-/*
- * A request for in the wrong direction was issued on an endpoint.
- */
-#define CY_AN_ERROR_USB_BAD_DIRECTION (CY_AS_ERROR_USB_BAD_DIRECTION)
-
-/*
- * An invalid request was received
- */
-#define CY_AN_ERROR_INVALID_REQUEST (CY_AS_ERROR_INVALID_REQUEST)
-
-/*
- * An ACK request was requested while no setup packet was pending.
- */
-#define CY_AN_ERROR_NO_SETUP_PACKET_PENDING \
- (CY_AS_ERROR_NO_SETUP_PACKET_PENDING)
-
-/*
- * A call was made to a API function that cannot be called from a callback.
- */
-#define CY_AN_ERROR_INVALID_IN_CALLBACK (CY_AS_ERROR_INVALID_IN_CALLBACK)
-
-/*
- * A call was made to CyAnUsbSetEndPointConfig() before
- * CyAnUsbSetPhysicalConfiguration() was called.
- */
-#define CY_AN_ERROR_ENDPOINT_CONFIG_NOT_SET \
- (CY_AS_ERROR_ENDPOINT_CONFIG_NOT_SET)
-
-/*
- * The physical endpoint referenced is not valid in the current
- * physical configuration
- */
-#define CY_AN_ERROR_INVALID_PHYSICAL_ENDPOINT \
- (CY_AS_ERROR_INVALID_PHYSICAL_ENDPOINT)
-
-/*
- * The data supplied to the CyAnMiscDownloadFirmware() call is not aligned on a
- * WORD (16 bit) boundary.
- */
-#define CY_AN_ERROR_ALIGNMENT_ERROR (CY_AS_ERROR_ALIGNMENT_ERROR)
-
-/*
- * A call was made to destroy the West Bridge device, but the USB stack or the
- * storage stack was will running.
- */
-#define CY_AN_ERROR_STILL_RUNNING (CY_AS_ERROR_STILL_RUNNING)
-
-/*
- * A call was made to the API for a function that is not yet supported.
- */
-#define CY_AN_ERROR_NOT_YET_SUPPORTED (CY_AS_ERROR_NOT_YET_SUPPORTED)
-
-/*
- * A NULL callback was provided where a non-NULL callback was required
- */
-#define CY_AN_ERROR_NULL_CALLBACK (CY_AS_ERROR_NULL_CALLBACK)
-
-/*
- * This error is returned when a request is made to put the West Bridge device
- * into standby mode while the storage stack is still active.
- */
-#define CY_AN_ERROR_STORAGE_RUNNING (CY_AS_ERROR_STORAGE_RUNNING)
-
-/*
- * This error is returned when an operation is attempted that cannot be
- * completed while the USB stack is connected to a USB host.
- */
-#define CY_AN_ERROR_USB_CONNECTED (CY_AS_ERROR_USB_CONNECTED)
-
-/*
- * This error is returned when a USB disconnect is attempted and the
- * West Bridge device is not connected.
- */
-#define CY_AN_ERROR_USB_NOT_CONNECTED (CY_AS_ERROR_USB_NOT_CONNECTED)
-
-/*
- * This error is returned when an P2S storage operation attempted and
- * data could not be read or written to the storage media.
- */
-#define CY_AN_ERROR_MEDIA_ACCESS_FAILURE (CY_AS_ERROR_MEDIA_ACCESS_FAILURE)
-
-/*
- * This error is returned when an P2S storage operation attempted and
- * the media is write protected.
- */
-#define CY_AN_ERROR_MEDIA_WRITE_PROTECTED (CY_AS_ERROR_MEDIA_WRITE_PROTECTED)
-
-/*
- * This error is returned when an attempt is made to cancel a request
- * that has already been sent to the West Bridge.
- */
-#define CY_AN_ERROR_OPERATION_IN_TRANSIT (CY_AS_ERROR_OPERATION_IN_TRANSIT)
-
-/*
- * This error is returned when an invalid parameter is passed to one of
- * the APIs.
- */
-#define CY_AN_ERROR_INVALID_PARAMETER (CY_AS_ERROR_INVALID_PARAMETER)
-
-/*
- * This error is returned if an API is not supported by the current
- * West Bridge device or the active firmware version.
- */
-#define CY_AN_ERROR_NOT_SUPPORTED (CY_AS_ERROR_NOT_SUPPORTED)
-
-/*
- * This error is returned when a call is made to one of the Storage or
- * USB APIs while the device is in suspend mode.
- */
-#define CY_AN_ERROR_IN_SUSPEND (CY_AS_ERROR_IN_SUSPEND)
-
-/*
- * This error is returned when the call to CyAnMiscLeaveSuspend
- * is made and the device is not in suspend mode.
- */
-#define CY_AN_ERROR_NOT_IN_SUSPEND (CY_AS_ERROR_NOT_IN_SUSPEND)
-
-/*
- * This error is returned when a command that is disabled by USB is called.
- */
-#define CY_AN_ERROR_FEATURE_NOT_ENABLED (CY_AS_ERROR_FEATURE_NOT_ENABLED)
-
-/*
- * This error is returned when an Async storage read or write is called before a
- * query device call is issued.
- */
-#define CY_AN_ERROR_QUERY_DEVICE_NEEDED (CY_AS_ERROR_QUERY_DEVICE_NEEDED)
-
-/*
- * This error is returned when a call is made to USB or STORAGE Start or
- * Stop before a prior Start or Stop has finished.
- */
-#define CY_AN_ERROR_STARTSTOP_PENDING (CY_AS_ERROR_STARTSTOP_PENDING)
-
-/*
- * This error is returned when a request is made for a bus that does not exist
- */
-#define CY_AN_ERROR_NO_SUCH_BUS (CY_AS_ERROR_NO_SUCH_BUS)
-
-#endif /* __doxygen__ */
-
-#endif /* _INCLUDED_CYANERR_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanmedia.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanmedia.h
deleted file mode 100644
index be074887f5a..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanmedia.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Cypress West Bridge API header file (cyanmedia.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYANMEDIA_H_
-#define _INCLUDED_CYANMEDIA_H_
-
-#include "cyas_cplus_start.h"
-
-/* Summary
- Specifies a specific type of media supported by West Bridge
-
- Description
- The West Bridge device supports five specific types
- of media as storage/IO devices attached to it's S-Port. This
- type is used to indicate the type of media being referenced in
- any API call.
-*/
-#include "cyasmedia.h"
-
-/* Flash NAND memory (may be SLC or MLC) */
-#define cy_an_media_nand cy_as_media_nand
-
-/* An SD flash memory device */
-#define cy_an_media_sd_flash cy_as_media_sd_flash
-
-/* An MMC flash memory device */
-#define cy_an_media_mmc_flash cy_as_media_mmc_flash
-
-/* A CE-ATA disk drive */
-#define cy_an_media_ce_ata cy_as_media_ce_ata
-
- /* SDIO device. */
-#define cy_an_media_sdio cy_as_media_sdio
-#define cy_an_media_max_media_value \
- cy_as_media_max_media_value
-
-typedef cy_as_media_type cy_an_media_type;
-
-#include "cyas_cplus_end.h"
-
-#endif /* _INCLUDED_CYANMEDIA_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanmisc.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanmisc.h
deleted file mode 100644
index 0838648dc16..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanmisc.h
+++ /dev/null
@@ -1,614 +0,0 @@
-/* Cypress West Bridge API header file (cyanmisc.h)
- ## Version for backward compatibility with previous Antioch SDK releases.
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYANMISC_H_
-#define _INCLUDED_CYANMISC_H_
-
-#include "cyantypes.h"
-#include <cyasmisc.h>
-#include "cyanmedia.h"
-#include "cyas_cplus_start.h"
-
-#define CY_AN_LEAVE_STANDBY_DELAY_CLOCK \
- (CY_AS_LEAVE_STANDBY_DELAY_CLOCK)
-#define CY_AN_RESET_DELAY_CLOCK \
- (CY_AS_RESET_DELAY_CLOCK)
-
-#define CY_AN_LEAVE_STANDBY_DELAY_CRYSTAL \
- (CY_AS_LEAVE_STANDBY_DELAY_CRYSTAL)
-
-#define CY_AN_RESET_DELAY_CRYSTAL \
- (CY_AS_RESET_DELAY_CRYSTAL)
-
-/* Defines to convert the old CyAn names to the new
- * CyAs names
- */
-typedef cy_as_device_handle cy_an_device_handle;
-
-#define cy_an_device_dack_ack cy_as_device_dack_ack
-#define cy_an_device_dack_eob cy_as_device_dack_eob
-typedef cy_as_device_dack_mode cy_an_device_dack_mode;
-
-typedef cy_as_device_config cy_an_device_config;
-
-#define cy_an_resource_u_s_b cy_as_bus_u_sB
-#define cy_an_resource_sdio_MMC cy_as_bus_1
-#define cy_an_resource_nand cy_as_bus_0
-typedef cy_as_resource_type cy_an_resource_type;
-
-#define cy_an_reset_soft cy_as_reset_soft
-#define cy_an_reset_hard cy_as_reset_hard
-typedef cy_as_reset_type cy_an_reset_type;
-typedef cy_as_funct_c_b_type cy_an_funct_c_b_type;
-typedef cy_as_function_callback cy_an_function_callback;
-
-#define cy_an_event_misc_initialized \
- cy_as_event_misc_initialized
-#define cy_an_event_misc_awake \
- cy_as_event_misc_awake
-#define cy_an_event_misc_heart_beat \
- cy_as_event_misc_heart_beat
-#define cy_an_event_misc_wakeup \
- cy_as_event_misc_wakeup
-#define cy_an_event_misc_device_mismatch \
- cy_as_event_misc_device_mismatch
-typedef cy_as_misc_event_type \
- cy_an_misc_event_type;
-typedef cy_as_misc_event_callback \
- cy_an_misc_event_callback;
-
-#define cy_an_misc_gpio_0 cy_as_misc_gpio_0
-#define cy_an_misc_gpio_1 cy_as_misc_gpio_1
-#define cy_an_misc_gpio__nand_CE \
- cy_as_misc_gpio__nand_CE
-#define cy_an_misc_gpio__nand_CE2 \
- cy_as_misc_gpio__nand_CE2
-#define cy_an_misc_gpio__nand_WP \
- cy_as_misc_gpio__nand_WP
-#define cy_an_misc_gpio__nand_CLE \
- cy_as_misc_gpio__nand_CLE
-#define cy_an_misc_gpio__nand_ALE \
- cy_as_misc_gpio__nand_ALE
-#define cy_an_misc_gpio_U_valid \
- cy_as_misc_gpio_U_valid
-#define cy_an_misc_gpio_SD_POW \
- cy_as_misc_gpio_SD_POW
-typedef cy_as_misc_gpio cy_an_misc_gpio;
-
-#define CY_AN_SD_DEFAULT_FREQ CY_AS_SD_DEFAULT_FREQ
-#define CY_AN_SD_RATED_FREQ CY_AS_SD_RATED_FREQ
-typedef cy_as_low_speed_sd_freq cy_an_low_speed_sd_freq;
-
-#define CY_AN_HS_SD_FREQ_48 CY_AS_HS_SD_FREQ_48
-#define CY_AN_HS_SD_FREQ_24 CY_AS_HS_SD_FREQ_24
-typedef cy_as_high_speed_sd_freq \
- cy_an_high_speed_sd_freq;
-
-#define cy_an_misc_active_high cy_as_misc_active_high
-#define cy_an_misc_active_low cy_as_misc_active_low
-typedef cy_as_misc_signal_polarity cy_an_misc_signal_polarity;
-
-typedef cy_as_get_firmware_version_data \
- cy_an_get_firmware_version_data;
-
-enum {
- CYAN_FW_TRACE_LOG_NONE = 0,
- CYAN_FW_TRACE_LOG_STATE,
- CYAN_FW_TRACE_LOG_CALLS,
- CYAN_FW_TRACE_LOG_STACK_TRACE,
- CYAN_FW_TRACE_MAX_LEVEL
-};
-
-
-/***********************************/
-/***********************************/
-/* FUNCTIONS */
-/***********************************/
-/***********************************/
-
-
-EXTERN cy_an_return_status_t
-cy_an_misc_create_device(
- cy_an_device_handle *handle_p,
- cy_an_hal_device_tag tag
- );
-#define cy_an_misc_create_device(h, tag) \
- cy_as_misc_create_device((cy_as_device_handle *)(h), \
- (cy_as_hal_device_tag)(tag))
-
-EXTERN cy_an_return_status_t
-cy_an_misc_destroy_device(
- cy_an_device_handle handle
- );
-#define cy_an_misc_destroy_device(h) \
- cy_as_misc_destroy_device((cy_as_device_handle)(h))
-
-EXTERN cy_an_return_status_t
-cy_an_misc_configure_device(
- cy_an_device_handle handle,
- cy_an_device_config *config_p
- );
-#define cy_an_misc_configure_device(h, cfg) \
- cy_as_misc_configure_device((cy_as_device_handle)(h), \
- (cy_as_device_config *)(cfg))
-
-EXTERN cy_an_return_status_t
-cy_an_misc_in_standby(
- cy_an_device_handle handle,
- cy_bool *standby
- );
-#define cy_an_misc_in_standby(h, standby) \
- cy_as_misc_in_standby((cy_as_device_handle)(h), (standby))
-
-/* Sync version of Download Firmware */
-EXTERN cy_an_return_status_t
-cy_an_misc_download_firmware(
- cy_an_device_handle handle,
- const void *fw_p,
- uint16_t size
- );
-
-#define cy_an_misc_download_firmware(handle, fw_p, size) \
- cy_as_misc_download_firmware((cy_as_device_handle)\
- (handle), (fw_p), (size), 0, 0)
-
-/* Async version of Download Firmware */
-EXTERN cy_an_return_status_t
-cy_an_misc_download_firmware_e_x(
- cy_an_device_handle handle,
- const void *fw_p,
- uint16_t size,
- cy_an_function_callback cb,
- uint32_t client
- );
-
-#define cy_an_misc_download_firmware_e_x(h, fw_p, size, cb, client) \
- cy_as_misc_download_firmware((cy_as_device_handle)(h), \
- (fw_p), (size), (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Get Firmware Version */
-EXTERN cy_an_return_status_t
-cy_as_misc_get_firmware_version_dep(
- cy_as_device_handle handle,
- uint16_t *major,
- uint16_t *minor,
- uint16_t *build,
- uint8_t *media_type,
- cy_bool *is_debug_mode);
-
-#define cy_an_misc_get_firmware_version\
- (h, major, minor, bld, type, mode) \
- cy_as_misc_get_firmware_version_dep((cy_as_device_handle)(h), \
- (major), (minor), (bld), (type), (mode))
-
-/* Async version of Get Firmware Version*/
-EXTERN cy_an_return_status_t
-cy_an_misc_get_firmware_version_e_x(
- cy_an_device_handle handle,
- cy_an_get_firmware_version_data *data,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_get_firmware_version_e_x\
- (h, data, cb, client) \
- cy_as_misc_get_firmware_version((cy_as_device_handle)(h), \
- (data), (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Read MCU Register*/
-EXTERN cy_an_return_status_t
-cy_an_misc_read_m_c_u_register(
- cy_an_device_handle handle,
- uint16_t address,
- uint8_t *value
- );
-
-#define cy_an_misc_read_m_c_u_register(handle, address, value) \
- cy_as_misc_read_m_c_u_register((cy_as_device_handle)(handle), \
- (address), (value), 0, 0)
-
-/* Async version of Read MCU Register*/
-EXTERN cy_an_return_status_t
-cy_an_misc_read_m_c_u_register_e_x(
- cy_an_device_handle handle,
- uint16_t address,
- uint8_t *value,
- cy_an_function_callback cb,
- uint32_t client
- );
-
-#define cy_an_misc_read_m_c_u_register_e_x\
- (h, addr, val, cb, client) \
- cy_as_misc_read_m_c_u_register((cy_as_device_handle)(h), \
- (addr), (val), (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Write MCU Register*/
-EXTERN cy_an_return_status_t
-cy_an_misc_write_m_c_u_register(
- cy_an_device_handle handle,
- uint16_t address,
- uint8_t mask,
- uint8_t value
- );
-#define cy_an_misc_write_m_c_u_register\
- (handle, address, mask, value) \
- cy_as_misc_write_m_c_u_register((cy_as_device_handle)(handle), \
- (address), (mask), (value), 0, 0)
-
-/* Async version of Write MCU Register*/
-EXTERN cy_an_return_status_t
-cy_an_misc_write_m_c_u_register_e_x(
- cy_an_device_handle handle,
- uint16_t address,
- uint8_t mask,
- uint8_t value,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_write_m_c_u_register_e_x\
- (h, addr, mask, val, cb, client) \
- cy_as_misc_write_m_c_u_register((cy_as_device_handle)(h), \
- (addr), (mask), (val), (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Write MCU Register*/
-EXTERN cy_an_return_status_t
-cy_an_misc_reset(
- cy_an_device_handle handle,
- cy_an_reset_type type,
- cy_bool flush
- );
-#define cy_an_misc_reset(handle, type, flush) \
- cy_as_misc_reset((cy_as_device_handle)(handle), \
- (type), (flush), 0, 0)
-
-/* Async version of Write MCU Register*/
-EXTERN cy_an_return_status_t
-cy_an_misc_reset_e_x(
- cy_an_device_handle handle,
- cy_an_reset_type type,
- cy_bool flush,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_reset_e_x(h, type, flush, cb, client) \
- cy_as_misc_reset((cy_as_device_handle)(h), \
- (cy_as_reset_type)(type), (flush), \
- (cy_as_function_callback)(cb), (client))
-
-/* Synchronous version of CyAnMiscAcquireResource. */
-EXTERN cy_an_return_status_t
-cy_an_misc_acquire_resource(
- cy_an_device_handle handle,
- cy_an_resource_type type,
- cy_bool force
- );
-#define cy_an_misc_acquire_resource(h, type, force) \
- cy_as_misc_acquire_resource_dep((cy_as_device_handle)(h), \
- (cy_as_resource_type)(type), (force))
-
-/* Asynchronous version of CyAnMiscAcquireResource. */
-EXTERN cy_an_return_status_t
-cy_an_misc_acquire_resource_e_x(
- cy_an_device_handle handle,
- cy_an_resource_type *type,
- cy_bool force,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_acquire_resource_e_x\
- (h, type_p, force, cb, client) \
- cy_as_misc_acquire_resource((cy_as_device_handle)(h), \
- (cy_as_resource_type *)(type_p), \
- (force), (cy_as_function_callback)(cb), (client))
-
-/* The one and only version of Release resource */
-EXTERN cy_an_return_status_t
-cy_an_misc_release_resource(
- cy_an_device_handle handle,
- cy_an_resource_type type
- );
-#define cy_an_misc_release_resource(h, type)\
- cy_as_misc_release_resource((cy_as_device_handle)(h), \
- (cy_as_resource_type)(type))
-
-/* Synchronous version of CyAnMiscSetTraceLevel. */
-EXTERN cy_an_return_status_t
-cy_an_misc_set_trace_level(
- cy_an_device_handle handle,
- uint8_t level,
- cy_an_media_type media,
- uint32_t device,
- uint32_t unit
- );
-
-#define cy_an_misc_set_trace_level\
- (handle, level, media, device, unit) \
- cy_as_misc_set_trace_level_dep((cy_as_device_handle)(handle), \
- (level), (cy_as_media_type)(media), (device), (unit), 0, 0)
-
-/* Asynchronous version of CyAnMiscSetTraceLevel. */
-EXTERN cy_an_return_status_t
-cy_an_misc_set_trace_level_e_x(
- cy_an_device_handle handle,
- uint8_t level,
- cy_an_media_type media,
- uint32_t device,
- uint32_t unit,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_set_trace_level_e_x\
- (h, level, media, device, unit, cb, client) \
- cy_as_misc_set_trace_level_dep((cy_as_device_handle)(h), \
- (level), (cy_as_media_type)(media), (device), (unit), \
- (cy_as_function_callback)(cb), (client))
-
-/* Synchronous version of CyAnMiscEnterStandby. */
-EXTERN cy_an_return_status_t
-cy_an_misc_enter_standby(
- cy_an_device_handle handle,
- cy_bool pin
- );
-#define cy_an_misc_enter_standby(handle, pin) \
- cy_as_misc_enter_standby(\
- (cy_as_device_handle)(handle), (pin), 0, 0)
-
-/* Synchronous version of CyAnMiscEnterStandby. */
-EXTERN cy_an_return_status_t
-cy_an_misc_enter_standby_e_x(
- cy_an_device_handle handle,
- cy_bool pin,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_enter_standby_e_x(h, pin, cb, client) \
- cy_as_misc_enter_standby((cy_as_device_handle)(h), \
- (pin), (cy_as_function_callback)(cb), (client))
-
-/* Only one version of CyAnMiscLeaveStandby. */
-EXTERN cy_an_return_status_t
-cy_an_misc_leave_standby(
- cy_an_device_handle handle,
- cy_an_resource_type type
- );
-#define cy_an_misc_leave_standby(h, type) \
- cy_as_misc_leave_standby((cy_as_device_handle)(h), \
- (cy_as_resource_type)(type))
-
-/* The one version of Misc Register Callback */
-EXTERN cy_an_return_status_t
-cy_an_misc_register_callback(
- cy_an_device_handle handle,
- cy_an_misc_event_callback callback
- );
-#define cy_an_misc_register_callback(h, cb) \
- cy_as_misc_register_callback((cy_as_device_handle)(h), \
- (cy_as_misc_event_callback)(cb))
-
-/* The only version of SetLogLevel */
-EXTERN void
-cy_an_misc_set_log_level(
- uint8_t level
- );
-#define cy_an_misc_set_log_level(level) \
- cy_as_misc_set_log_level(level)
-
-/* Sync version of Misc Storage Changed */
-EXTERN cy_an_return_status_t
-cy_an_misc_storage_changed(
- cy_an_device_handle handle
- );
-#define cy_an_misc_storage_changed(handle) \
- cy_as_misc_storage_changed((cy_as_device_handle)(handle), 0, 0)
-
-/* Async version of Misc Storage Changed */
-EXTERN cy_an_return_status_t
-cy_an_misc_storage_changed_e_x(
- cy_an_device_handle handle,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_storage_changed_e_x(h, cb, client) \
- cy_as_misc_storage_changed((cy_as_device_handle)(h), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Heartbeat control */
-EXTERN cy_an_return_status_t
-cy_an_misc_heart_beat_control(
- cy_an_device_handle handle,
- cy_bool enable
- );
-#define cy_an_misc_heart_beat_control(handle, enable) \
- cy_as_misc_heart_beat_control((cy_as_device_handle)\
- (handle), (enable), 0, 0)
-
-/* Async version of Heartbeat control */
-EXTERN cy_an_return_status_t
-cy_an_misc_heart_beat_control_e_x(
- cy_an_device_handle handle,
- cy_bool enable,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_heart_beat_control_e_x(h, enable, cb, client) \
- cy_as_misc_heart_beat_control((cy_as_device_handle)(h), \
- (enable), (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Get Gpio */
-EXTERN cy_an_return_status_t
-cy_an_misc_get_gpio_value(
- cy_an_device_handle handle,
- cy_an_misc_gpio pin,
- uint8_t *value
- );
-#define cy_an_misc_get_gpio_value(handle, pin, value) \
- cy_as_misc_get_gpio_value((cy_as_device_handle)(handle), \
- (cy_as_misc_gpio)(pin), (value), 0, 0)
-
-/* Async version of Get Gpio */
-EXTERN cy_an_return_status_t
-cy_an_misc_get_gpio_value_e_x(
- cy_an_device_handle handle,
- cy_an_misc_gpio pin,
- uint8_t *value,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_get_gpio_value_e_x(h, pin, value, cb, client) \
- cy_as_misc_get_gpio_value((cy_as_device_handle)(h), \
- (cy_as_misc_gpio)(pin), (value), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Set Gpio */
-EXTERN cy_an_return_status_t
-cy_an_misc_set_gpio_value(
- cy_an_device_handle handle,
- cy_an_misc_gpio pin,
- uint8_t value
- );
-#define cy_an_misc_set_gpio_value(handle, pin, value) \
- cy_as_misc_set_gpio_value((cy_as_device_handle)(handle), \
- (cy_as_misc_gpio)(pin), (value), 0, 0)
-
-/* Async version of Set Gpio */
-EXTERN cy_an_return_status_t
-cy_an_misc_set_gpio_value_e_x(
- cy_an_device_handle handle,
- cy_an_misc_gpio pin,
- uint8_t value,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_set_gpio_value_e_x\
- (h, pin, value, cb, client) \
- cy_as_misc_set_gpio_value((cy_as_device_handle)(h), \
- (cy_as_misc_gpio)(pin), (value), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Enter suspend */
-EXTERN cy_an_return_status_t
-cy_an_misc_enter_suspend(
- cy_an_device_handle handle,
- cy_bool usb_wakeup_en,
- cy_bool gpio_wakeup_en
- );
-#define cy_an_misc_enter_suspend(handle, usb_wakeup_en, \
- gpio_wakeup_en) \
- cy_as_misc_enter_suspend((cy_as_device_handle)(handle), \
- (usb_wakeup_en), (gpio_wakeup_en), 0, 0)
-
-/* Async version of Enter suspend */
-EXTERN cy_an_return_status_t
-cy_an_misc_enter_suspend_e_x(
- cy_an_device_handle handle,
- cy_bool usb_wakeup_en,
- cy_bool gpio_wakeup_en,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_enter_suspend_e_x(h, usb_en, gpio_en, cb, client)\
- cy_as_misc_enter_suspend((cy_as_device_handle)(h), (usb_en), \
- (gpio_en), (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Enter suspend */
-EXTERN cy_an_return_status_t
-cy_an_misc_leave_suspend(
- cy_an_device_handle handle
- );
-#define cy_an_misc_leave_suspend(handle) \
- cy_as_misc_leave_suspend((cy_as_device_handle)(handle), 0, 0)
-
-/* Async version of Enter suspend */
-EXTERN cy_an_return_status_t
-cy_an_misc_leave_suspend_e_x(
- cy_an_device_handle handle,
- cy_an_function_callback cb,
- uint32_t client
- );
-
-#define cy_an_misc_leave_suspend_e_x(h, cb, client) \
- cy_as_misc_leave_suspend((cy_as_device_handle)(h), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of SetLowSpeedSDFreq */
-EXTERN cy_an_return_status_t
-cy_an_misc_set_low_speed_sd_freq(
- cy_an_device_handle handle,
- cy_an_low_speed_sd_freq setting
- );
-#define cy_an_misc_set_low_speed_sd_freq(h, setting) \
- cy_as_misc_set_low_speed_sd_freq((cy_as_device_handle)(h), \
- (cy_as_low_speed_sd_freq)(setting), 0, 0)
-
-/* Async version of SetLowSpeedSDFreq */
-EXTERN cy_an_return_status_t
-cy_an_misc_set_low_speed_sd_freq_e_x(
- cy_an_device_handle handle,
- cy_an_low_speed_sd_freq setting,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_set_low_speed_sd_freq_e_x\
-(h, setting, cb, client) \
- cy_as_misc_set_low_speed_sd_freq((cy_as_device_handle)(h), \
- (cy_as_low_speed_sd_freq)(setting), \
- (cy_as_function_callback)(cb), (client))
-
-/* SetHighSpeedSDFreq */
-EXTERN cy_an_return_status_t
-cy_an_misc_set_high_speed_sd_freq(
- cy_an_device_handle handle,
- cy_an_high_speed_sd_freq setting,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_misc_set_high_speed_sd_freq(h, setting, cb, client) \
- cy_as_misc_set_high_speed_sd_freq((cy_as_device_handle)(h), \
- (cy_as_high_speed_sd_freq)(setting), \
- (cy_as_function_callback)(cb), (client))
-
-/* ReserveLNABootArea */
-EXTERN cy_an_return_status_t
-cy_an_misc_reserve_l_n_a_boot_area(
- cy_an_device_handle handle,
- uint8_t numzones,
- cy_an_function_callback cb,
- uint32_t client);
-#define cy_an_misc_reserve_l_n_a_boot_area(h, num, cb, client) \
- cy_as_misc_reserve_l_n_a_boot_area((cy_as_device_handle)(h), \
- num, (cy_as_function_callback)(cb), (client))
-
-/* SetSDPowerPolarity */
-EXTERN cy_an_return_status_t
-cy_an_misc_set_sd_power_polarity(
- cy_an_device_handle handle,
- cy_an_misc_signal_polarity polarity,
- cy_an_function_callback cb,
- uint32_t client);
-#define cy_an_misc_set_sd_power_polarity(h, pol, cb, client) \
- cy_as_misc_set_sd_power_polarity((cy_as_device_handle)(h), \
- (cy_as_misc_signal_polarity)(pol), \
- (cy_as_function_callback)(cb), (client))
-
-#include "cyas_cplus_end.h"
-
-#endif
-
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanregs.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanregs.h
deleted file mode 100644
index d670291bd24..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanregs.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/* Cypress West Bridge API header file (cyanregs.h)
- ## Register and field definitions for the Antioch device.
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYANREG_H_
-#define _INCLUDED_CYANREG_H_
-
-#if !defined(__doxygen__)
-
-#define CY_AN_MEM_CM_WB_CFG_ID (0x80)
-#define CY_AN_MEM_CM_WB_CFG_ID_VER_MASK (0x000F)
-#define CY_AN_MEM_CM_WB_CFG_ID_HDID_MASK (0xFFF0)
-#define CY_AN_MEM_CM_WB_CFG_ID_HDID_ANTIOCH_VALUE (0xA100)
-#define CY_AN_MEM_CM_WB_CFG_ID_HDID_ASTORIA_FPGA_VALUE (0x6800)
-#define CY_AN_MEM_CM_WB_CFG_ID_HDID_ASTORIA_VALUE (0xA200)
-
-
-#define CY_AN_MEM_RST_CTRL_REG (0x81)
-#define CY_AN_MEM_RST_CTRL_REG_HARD (0x0003)
-#define CY_AN_MEM_RST_CTRL_REG_SOFT (0x0001)
-#define CY_AN_MEM_RST_RSTCMPT (0x0004)
-
-#define CY_AN_MEM_P0_ENDIAN (0x82)
-#define CY_AN_LITTLE_ENDIAN (0x0000)
-#define CY_AN_BIG_ENDIAN (0x0101)
-
-#define CY_AN_MEM_P0_VM_SET (0x83)
-#define CY_AN_MEM_P0_VM_SET_VMTYPE_MASK (0x0007)
-#define CY_AN_MEM_P0_VM_SET_VMTYPE_RAM (0x0005)
-#define CY_AN_MEM_P0_VM_SET_VMTYPE_VMWIDTH (0x0008)
-#define CY_AN_MEM_P0_VM_SET_VMTYPE_FLOWCTRL (0x0010)
-#define CY_AN_MEM_P0_VM_SET_IFMODE (0x0020)
-#define CY_AN_MEM_P0_VM_SET_CFGMODE (0x0040)
-#define CY_AN_MEM_P0_VM_SET_DACKEOB (0x0080)
-#define CY_AN_MEM_P0_VM_SET_OVERRIDE (0x0100)
-#define CY_AN_MEM_P0_VM_SET_INTOVERD (0x0200)
-#define CY_AN_MEM_P0_VM_SET_DRQOVERD (0x0400)
-#define CY_AN_MEM_P0_VM_SET_DRQPOL (0x0800)
-#define CY_AN_MEM_P0_VM_SET_DACKPOL (0x1000)
-
-
-#define CY_AN_MEM_P0_NV_SET (0x84)
-#define CY_AN_MEM_P0_NV_SET_WPSWEN (0x0001)
-#define CY_AN_MEM_P0_NV_SET_WPPOLAR (0x0002)
-
-#define CY_AN_MEM_PMU_UPDATE (0x85)
-#define CY_AN_MEM_PMU_UPDATE_UVALID (0x0001)
-#define CY_AN_MEM_PMU_UPDATE_USBUPDATE (0x0002)
-#define CY_AN_MEM_PMU_UPDATE_SDIOUPDATE (0x0004)
-
-#define CY_AN_MEM_P0_INTR_REG (0x90)
-#define CY_AN_MEM_P0_INTR_REG_MCUINT (0x0020)
-#define CY_AN_MEM_P0_INTR_REG_DRQINT (0x0800)
-#define CY_AN_MEM_P0_INTR_REG_MBINT (0x1000)
-#define CY_AN_MEM_P0_INTR_REG_PMINT (0x2000)
-#define CY_AN_MEM_P0_INTR_REG_PLLLOCKINT (0x4000)
-
-#define CY_AN_MEM_P0_INT_MASK_REG (0x91)
-#define CY_AN_MEM_P0_INT_MASK_REG_MMCUINT (0x0020)
-#define CY_AN_MEM_P0_INT_MASK_REG_MDRQINT (0x0800)
-#define CY_AN_MEM_P0_INT_MASK_REG_MMBINT (0x1000)
-#define CY_AN_MEM_P0_INT_MASK_REG_MPMINT (0x2000)
-#define CY_AN_MEM_P0_INT_MASK_REG_MPLLLOCKINT (0x4000)
-
-#define CY_AN_MEM_MCU_MB_STAT (0x92)
-#define CY_AN_MEM_P0_MCU_MBNOTRD (0x0001)
-
-#define CY_AN_MEM_P0_MCU_STAT (0x94)
-#define CY_AN_MEM_P0_MCU_STAT_CARDINS (0x0001)
-#define CY_AN_MEM_P0_MCU_STAT_CARDREM (0x0002)
-
-#define CY_AN_MEM_PWR_MAGT_STAT (0x95)
-#define CY_AN_MEM_PWR_MAGT_STAT_WAKEUP (0x0001)
-
-#define CY_AN_MEM_P0_RSE_ALLOCATE (0x98)
-#define CY_AN_MEM_P0_RSE_ALLOCATE_SDIOAVI (0x0001)
-#define CY_AN_MEM_P0_RSE_ALLOCATE_SDIOALLO (0x0002)
-#define CY_AN_MEM_P0_RSE_ALLOCATE_NANDAVI (0x0004)
-#define CY_AN_MEM_P0_RSE_ALLOCATE_NANDALLO (0x0008)
-#define CY_AN_MEM_P0_RSE_ALLOCATE_USBAVI (0x0010)
-#define CY_AN_MEM_P0_RSE_ALLOCATE_USBALLO (0x0020)
-
-#define CY_AN_MEM_P0_RSE_MASK (0x9A)
-#define CY_AN_MEM_P0_RSE_MASK_MSDIOBUS_RW (0x0003)
-#define CY_AN_MEM_P0_RSE_MASK_MNANDBUS_RW (0x00C0)
-#define CY_AN_MEM_P0_RSE_MASK_MUSBBUS_RW (0x0030)
-
-#define CY_AN_MEM_P0_DRQ (0xA0)
-#define CY_AN_MEM_P0_DRQ_EP2DRQ (0x0004)
-#define CY_AN_MEM_P0_DRQ_EP3DRQ (0x0008)
-#define CY_AN_MEM_P0_DRQ_EP4DRQ (0x0010)
-#define CY_AN_MEM_P0_DRQ_EP5DRQ (0x0020)
-#define CY_AN_MEM_P0_DRQ_EP6DRQ (0x0040)
-#define CY_AN_MEM_P0_DRQ_EP7DRQ (0x0080)
-#define CY_AN_MEM_P0_DRQ_EP8DRQ (0x0100)
-#define CY_AN_MEM_P0_DRQ_EP9DRQ (0x0200)
-#define CY_AN_MEM_P0_DRQ_EP10DRQ (0x0400)
-#define CY_AN_MEM_P0_DRQ_EP11DRQ (0x0800)
-#define CY_AN_MEM_P0_DRQ_EP12DRQ (0x1000)
-#define CY_AN_MEM_P0_DRQ_EP13DRQ (0x2000)
-#define CY_AN_MEM_P0_DRQ_EP14DRQ (0x4000)
-#define CY_AN_MEM_P0_DRQ_EP15DRQ (0x8000)
-
-#define CY_AN_MEM_P0_DRQ_MASK (0xA1)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP2DRQ (0x0004)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP3DRQ (0x0008)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP4DRQ (0x0010)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP5DRQ (0x0020)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP6DRQ (0x0040)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP7DRQ (0x0080)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP8DRQ (0x0100)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP9DRQ (0x0200)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP10DRQ (0x0400)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP11DRQ (0x0800)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP12DRQ (0x1000)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP13DRQ (0x2000)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP14DRQ (0x4000)
-#define CY_AN_MEM_P0_DRQ_MASK_MEP15DRQ (0x8000)
-
-#define CY_AN_MEM_P0_EP2_DMA_REG (0xA2)
-#define CY_AN_MEM_P0_E_pn_DMA_REG_COUNT_MASK (0x7FF)
-#define CY_AN_MEM_P0_E_pn_DMA_REG_DMAVAL (1 << 12)
-#define CY_AN_MEM_P0_EP3_DMA_REG (0xA3)
-#define CY_AN_MEM_P0_EP4_DMA_REG (0xA4)
-#define CY_AN_MEM_P0_EP5_DMA_REG (0xA5)
-#define CY_AN_MEM_P0_EP6_DMA_REG (0xA6)
-#define CY_AN_MEM_P0_EP7_DMA_REG (0xA7)
-#define CY_AN_MEM_P0_EP8_DMA_REG (0xA8)
-#define CY_AN_MEM_P0_EP9_DMA_REG (0xA9)
-#define CY_AN_MEM_P0_EP10_DMA_REG (0xAA)
-#define CY_AN_MEM_P0_EP11_DMA_REG (0xAB)
-#define CY_AN_MEM_P0_EP12_DMA_REG (0xAC)
-#define CY_AN_MEM_P0_EP13_DMA_REG (0xAD)
-#define CY_AN_MEM_P0_EP14_DMA_REG (0xAE)
-#define CY_AN_MEM_P0_EP15_DMA_REG (0xAF)
-
-#define CY_AN_MEM_IROS_IO_CFG (0xC1)
-#define CY_AN_MEM_IROS_IO_CFG_GPIODRVST_MASK (0x0003)
-#define CY_AN_MEM_IROS_IO_CFG_GPIOSLEW_MASK (0x0004)
-#define CY_AN_MEM_IROS_IO_CFG_PPIODRVST_MASK (0x0018)
-#define CY_AN_MEM_IROS_IO_CFG_PPIOSLEW_MASK (0x0020)
-#define CY_AN_MEM_IROS_IO_CFG_SSIODRVST_MASK (0x0300)
-#define CY_AN_MEM_IROS_IO_CFG_SSIOSLEW_MASK (0x0400)
-#define CY_AN_MEM_IROS_IO_CFG_SNIODRVST_MASK (0x1800)
-#define CY_AN_MEM_IROS_IO_CFG_SNIOSLEW_MASK (0x2000)
-
-#define CY_AN_MEM_PLL_LOCK_LOSS_STAT (0xC4)
-#define CY_AN_MEM_PLL_LOCK_LOSS_STAT_PLLSTAT (0x0800)
-
-#define CY_AN_MEM_P0_MAILBOX0 (0xF0)
-#define CY_AN_MEM_P0_MAILBOX1 (0xF1)
-#define CY_AN_MEM_P0_MAILBOX2 (0xF2)
-#define CY_AN_MEM_P0_MAILBOX3 (0xF3)
-
-#define CY_AN_MEM_MCU_MAILBOX0 (0xF8)
-#define CY_AN_MEM_MCU_MAILBOX1 (0xF9)
-#define CY_AN_MEM_MCU_MAILBOX2 (0xFA)
-#define CY_AN_MEM_MCU_MAILBOX3 (0xFB)
-
-#endif /* !defined(__doxygen__) */
-
-#endif /* _INCLUDED_CYANREG_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyansdkversion.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyansdkversion.h
deleted file mode 100644
index ac26b9556dd..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyansdkversion.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Cypress Antioch Sdk Version file (cyansdkversion.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYANSDK_VERSION_H_
-#define _INCLUDED_CYANSDK_VERSION_H_
-
-/* Antioch SDK version 1.3.2 */
-#define CYAN_MAJOR_VERSION (1)
-#define CYAN_MINOR_VERSION (3)
-#define CYAN_BUILD_NUMBER (473)
-
-#endif /*_INCLUDED_CYANSDK_VERSION_H_*/
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanstorage.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanstorage.h
deleted file mode 100644
index deb9af87fff..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanstorage.h
+++ /dev/null
@@ -1,419 +0,0 @@
-/* Cypress West Bridge API header file (cyanstorage.h)
- ## Header for backward compatibility with previous releases of Antioch SDK.
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYANSTORAGE_H_
-#define _INCLUDED_CYANSTORAGE_H_
-#ifndef __doxygen__
-
-#include "cyanmedia.h"
-#include "cyanmisc.h"
-#include "cyasstorage.h"
-#include "cyas_cplus_start.h"
-
-#define CY_AN_LUN_PHYSICAL_DEVICE (CY_AS_LUN_PHYSICAL_DEVICE)
-#define CY_AN_STORAGE_EP_SIZE (CY_AS_STORAGE_EP_SIZE)
-
-#define cy_an_storage_antioch cy_as_storage_antioch
-#define cy_an_storage_processor cy_as_storage_processor
-#define cy_an_storage_removed cy_as_storage_removed
-#define cy_an_storage_inserted cy_as_storage_inserted
-#define cy_an_sdio_interrupt cy_as_sdio_interrupt
-typedef cy_as_storage_event cy_an_storage_event;
-
-#define cy_an_op_read cy_as_op_read
-#define cy_an_op_write cy_as_op_write
-typedef cy_as_oper_type cy_an_oper_type;
-
-typedef cy_as_device_desc cy_an_device_desc;
-
-typedef cy_as_unit_desc cy_an_unit_desc;
-
-typedef cy_as_storage_callback_dep \
- cy_an_storage_callback;
-
-typedef cy_as_storage_event_callback_dep \
- cy_an_storage_event_callback;
-
-#define cy_an_sd_reg_OCR cy_as_sd_reg_OCR
-#define cy_an_sd_reg_CID cy_as_sd_reg_CID
-#define cy_an_sd_reg_CSD cy_as_sd_reg_CSD
-typedef cy_as_sd_card_reg_type \
- cy_an_sd_card_reg_type;
-
-typedef cy_as_storage_query_device_data_dep \
- cy_an_storage_query_device_data;
-
-typedef cy_as_storage_query_unit_data_dep \
- cy_an_storage_query_unit_data;
-
-typedef cy_as_storage_sd_reg_read_data \
- cy_an_storage_sd_reg_read_data;
-
-#define CY_AN_SD_REG_OCR_LENGTH (CY_AS_SD_REG_OCR_LENGTH)
-#define CY_AN_SD_REG_CID_LENGTH (CY_AS_SD_REG_CID_LENGTH)
-#define CY_AN_SD_REG_CSD_LENGTH (CY_AS_SD_REG_CSD_LENGTH)
-#define CY_AN_SD_REG_MAX_RESP_LENGTH \
- (CY_AS_SD_REG_MAX_RESP_LENGTH)
-
-/**** API Functions ******/
-
-/* Sync version of Storage Start */
-EXTERN cy_an_return_status_t
-cy_an_storage_start(
- cy_an_device_handle handle
- );
-#define cy_an_storage_start(handle) \
- cy_as_storage_start((cy_as_device_handle)(handle), 0, 0)
-
-/* Async version of Storage Start */
-EXTERN cy_an_return_status_t
-cy_an_storage_start_e_x(
- cy_an_device_handle handle,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_storage_start_e_x(h, cb, client) \
- cy_as_storage_start((cy_as_device_handle)(h), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Storage Stop */
-EXTERN cy_an_return_status_t
-cy_an_storage_stop(
- cy_an_device_handle handle
- );
-#define cy_an_storage_stop(handle) \
- cy_as_storage_stop((cy_as_device_handle)(handle), 0, 0)
-
-/* Async version of Storage Stop */
-EXTERN cy_an_return_status_t
-cy_an_storage_stop_e_x(
- cy_an_device_handle handle,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_storage_stop_e_x(h, cb, client) \
- cy_as_storage_stop((cy_as_device_handle)(h), \
- (cy_as_function_callback)(cb), (client))
-
-/* Register Call back api */
-EXTERN cy_an_return_status_t
-cy_an_storage_register_callback(
- cy_an_device_handle handle,
- cy_an_storage_event_callback callback
- );
-#define cy_an_storage_register_callback(h, cb) \
- cy_as_storage_register_callback_dep((cy_as_device_handle)(h), \
- (cy_as_storage_event_callback_dep)(cb))
-
-/* Sync version of Storage Claim */
-EXTERN cy_an_return_status_t
-cy_an_storage_claim(
- cy_an_device_handle handle,
- cy_an_media_type type
- );
-#define cy_an_storage_claim(h, type) \
- cy_as_storage_claim_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(type))
-
-/* Async version of Storage Claim */
-EXTERN cy_an_return_status_t
-cy_an_storage_claim_e_x(
- cy_an_device_handle handle,
- cy_an_media_type *type,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_storage_claim_e_x(h, type_p, cb, client) \
- cy_as_storage_claim_dep_EX((cy_as_device_handle)(h), \
- (cy_as_media_type *)(type_p), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync Version of Storage Release */
-EXTERN cy_an_return_status_t
-cy_an_storage_release(
- cy_an_device_handle handle,
- cy_an_media_type type
- );
-#define cy_an_storage_release(h, type) \
- cy_as_storage_release_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(type))
-
-/* Async Version of Storage Release */
-EXTERN cy_an_return_status_t
-cy_an_storage_release_e_x(
- cy_an_device_handle handle,
- cy_an_media_type *type,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_storage_release_e_x(h, type_p, cb, client) \
- cy_as_storage_release_dep_EX((cy_as_device_handle)(h), \
- (cy_as_media_type *)(type_p), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Query Media */
-EXTERN cy_an_return_status_t
-cy_an_storage_query_media(
- cy_an_device_handle handle,
- cy_an_media_type type,
- uint32_t *count
- );
-#define cy_an_storage_query_media(handle, type, count) \
- cy_as_storage_query_media((cy_as_device_handle)(handle), \
- (cy_as_media_type)(type), (count), 0, 0)
-
-/* Async version of Query Media */
-EXTERN cy_an_return_status_t
-cy_an_storage_query_media_e_x(
- cy_an_device_handle handle,
- cy_an_media_type type,
- uint32_t *count,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_storage_query_media_e_x(h, type, count, cb, client) \
- cy_as_storage_query_media((cy_as_device_handle)(h), \
- (cy_as_media_type)(type), (count), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Query device */
-EXTERN cy_an_return_status_t
-cy_an_storage_query_device(
- cy_an_device_handle handle,
- cy_an_media_type type,
- uint32_t device,
- cy_an_device_desc *desc_p
- );
-#define cy_an_storage_query_device(h, type, device, desc_p) \
- cy_as_storage_query_device_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(type), (device), (cy_as_device_desc *)(desc_p))
-
-/* Async version of Query device */
-EXTERN cy_an_return_status_t
-cy_an_storage_query_device_e_x(
- cy_an_device_handle handle,
- cy_an_storage_query_device_data *data,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_storage_query_device_e_x(h, data, cb, client) \
- cy_as_storage_query_device_dep_EX((cy_as_device_handle)(h), \
- (cy_as_storage_query_device_data_dep *)(data), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Query Unit */
-EXTERN cy_an_return_status_t
-cy_an_storage_query_unit(
- cy_an_device_handle handle,
- cy_an_media_type type,
- uint32_t device,
- uint32_t unit,
- cy_an_unit_desc *desc_p
- );
-#define cy_an_storage_query_unit(h, type, device, unit, desc_p) \
- cy_as_storage_query_unit_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(type), (device), \
- (unit), (cy_as_unit_desc *)(desc_p))
-
-/* Async version of Query Unit */
-EXTERN cy_an_return_status_t
-cy_an_storage_query_unit_e_x(
- cy_an_device_handle handle,
- cy_an_storage_query_unit_data *data_p,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_storage_query_unit_e_x(h, data_p, cb, client) \
- cy_as_storage_query_unit_dep_EX((cy_as_device_handle)(h), \
- (cy_as_storage_query_unit_data_dep *)(data_p), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of device control */
-EXTERN cy_an_return_status_t
-cy_an_storage_device_control(
- cy_an_device_handle handle,
- cy_bool card_detect_en,
- cy_bool write_prot_en
- );
-#define cy_an_storage_device_control(handle, \
- card_detect_en, write_prot_en) \
- cy_as_storage_device_control_dep((cy_as_device_handle)(handle), \
- (card_detect_en), (write_prot_en), 0, 0)
-
-/* Async version of device control */
-EXTERN cy_an_return_status_t
-cy_an_storage_device_control_e_x(
- cy_an_device_handle handle,
- cy_bool card_detect_en,
- cy_bool write_prot_en,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_storage_device_control_e_x(h, det_en, prot_en, cb, client) \
- cy_as_storage_device_control_dep((cy_as_device_handle)(h), (det_en), \
- (prot_en), (cy_as_function_callback)(cb), (client))
-
-/* Sync Read */
-EXTERN cy_an_return_status_t
-cy_an_storage_read(
- cy_an_device_handle handle,
- cy_an_media_type type,
- uint32_t device,
- uint32_t unit,
- uint32_t block,
- void *data_p,
- uint16_t num_blocks
- );
-#define cy_an_storage_read(h, type, device, unit, block, data_p, nblks) \
- cy_as_storage_read_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(type), (device), (unit), \
- (block), (data_p), (nblks))
-
-/* Async Read */
-EXTERN cy_an_return_status_t
-cy_an_storage_read_async(
- cy_an_device_handle handle,
- cy_an_media_type type,
- uint32_t device,
- uint32_t unit,
- uint32_t block,
- void *data_p,
- uint16_t num_blocks,
- cy_an_storage_callback callback
- );
-#define cy_an_storage_read_async(h, type, device, unit, \
- block, data_p, nblks, cb) \
- cy_as_storage_read_async_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(type), (device), (unit), (block), \
- (data_p), (nblks), (cy_as_storage_callback_dep)(cb))
-
-/* Sync Write */
-EXTERN cy_an_return_status_t
-cy_an_storage_write(
- cy_an_device_handle handle,
- cy_an_media_type type,
- uint32_t device,
- uint32_t unit,
- uint32_t block,
- void *data_p,
- uint16_t num_blocks
- );
-#define cy_an_storage_write(h, type, device, unit, \
- block, data_p, nblks) \
- cy_as_storage_write_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(type), (device), (unit), \
- (block), (data_p), (nblks))
-
-/* Async Write */
-EXTERN cy_an_return_status_t
-cy_an_storage_write_async(
- cy_an_device_handle handle,
- cy_an_media_type type,
- uint32_t device,
- uint32_t unit,
- uint32_t block,
- void *data_p,
- uint16_t num_blocks,
- cy_an_storage_callback callback
- );
-#define cy_an_storage_write_async(h, type, device, unit, \
- block, data_p, nblks, cb) \
- cy_as_storage_write_async_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(type), (device), (unit), (block), \
- (data_p), (nblks), (cy_as_storage_callback_dep)(cb))
-
-/* Cancel Async */
-EXTERN cy_an_return_status_t
-cy_an_storage_cancel_async(
- cy_an_device_handle handle
- );
-#define cy_an_storage_cancel_async(h) \
- cy_as_storage_cancel_async((cy_as_device_handle)(h))
-
-/* Sync SD Register Read*/
-EXTERN cy_an_return_status_t
-cy_an_storage_sd_register_read(
- cy_an_device_handle handle,
- cy_an_media_type type,
- uint8_t device,
- cy_an_sd_card_reg_type reg_type,
- uint8_t read_len,
- uint8_t *data_p
- );
-#define cy_an_storage_sd_register_read(h, type, device, \
- reg_type, len, data_p) \
- cy_as_storage_sd_register_read_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(type), (device), \
- (cy_as_sd_card_reg_type)(reg_type), (len), (data_p))
-
-/*Async SD Register Read*/
-EXTERN cy_an_return_status_t
-cy_an_storage_sd_register_read_e_x(
- cy_an_device_handle handle,
- cy_an_media_type type,
- uint8_t device,
- cy_an_sd_card_reg_type reg_type,
- cy_an_storage_sd_reg_read_data *data_p,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_storage_sd_register_read_e_x(h, type, device, \
- reg_type, data_p, cb, client) \
- cy_as_storage_sd_register_read_dep_EX((cy_as_device_handle)(h), \
- (cy_as_media_type)(type), (device), \
- (cy_as_sd_card_reg_type)(reg_type), \
- (cy_as_storage_sd_reg_read_data *)(data_p), \
- (cy_as_function_callback)(cb), (client))
-
-/* Create partition on storage device */
-EXTERN cy_an_return_status_t
-cy_an_storage_create_p_partition(
- cy_an_device_handle handle,
- cy_an_media_type media,
- uint32_t device,
- uint32_t size,
- cy_an_function_callback cb,
- uint32_t client);
-#define cy_an_storage_create_p_partition(h, media, dev, \
- size, cb, client) \
- cy_as_storage_create_p_partition_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(media), (dev), \
- (size), (cy_as_function_callback)(cb), (client))
-
-/* Remove partition on storage device */
-EXTERN cy_an_return_status_t
-cy_an_storage_remove_p_partition(
- cy_an_device_handle handle,
- cy_an_media_type media,
- uint32_t device,
- cy_an_function_callback cb,
- uint32_t client);
-#define cy_an_storage_remove_p_partition\
-(h, media, dev, cb, client) \
- cy_as_storage_remove_p_partition_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(media), (dev), \
- (cy_as_function_callback)(cb), (client))
-
-#include "cyas_cplus_end.h"
-#endif /*__doxygen__ */
-
-#endif
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyantioch.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyantioch.h
deleted file mode 100644
index d65b35a14ae..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyantioch.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Cypress West Bridge API header file (cyastioch.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYANTIOCH_H_
-#define _INCLUDED_CYANTIOCH_H_
-
-#if !defined(__doxygen__)
-
-#include "cyanerr.h"
-#include "cyanmisc.h"
-#include "cyanstorage.h"
-#include "cyanusb.h"
-#include "cyanch9.h"
-
-#endif
-
-#endif
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyantypes.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyantypes.h
deleted file mode 100644
index 48cd50f13c1..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyantypes.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Cypress West Bridge API header file (cyantypes.h)
-## Type definitions for backward compatibility with previous
-## Antioch SDK releases.
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYANTYPES_H_
-#define _INCLUDED_CYANTYPES_H_
-
-#include "cyastypes.h"
-typedef cy_as_end_point_number_t cy_an_end_point_number_t;
-typedef cy_as_return_status_t cy_an_return_status_t;
-typedef cy_as_bus_number_t cy_an_bus_number_t;
-#endif
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanusb.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanusb.h
deleted file mode 100644
index 1e4e7dbe31b..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyanusb.h
+++ /dev/null
@@ -1,619 +0,0 @@
-/* Cypress West Bridge API header file (cyanusb.h)
- ## Header for backward compatibility with previous Antioch SDK releases.
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYANUSB_H_
-#define _INCLUDED_CYANUSB_H_
-
-#if !defined(__doxygen__)
-
-#include "cyanmisc.h"
-#include "cyasusb.h"
-#include "cyas_cplus_start.h"
-
-#define CY_AN_MAX_USB_DESCRIPTOR_SIZE (CY_AS_MAX_USB_DESCRIPTOR_SIZE)
-
-typedef cy_as_usb_inquiry_data_dep cy_an_usb_inquiry_data;
-typedef cy_as_usb_unknown_command_data_dep \
- cy_an_usb_unknown_command_data;
-typedef cy_as_usb_start_stop_data_dep cy_an_usb_start_stop_data;
-typedef cy_as_m_s_c_progress_data cy_an_m_s_c_progress_data;
-
-#define cy_an_usb_nand_enum cy_as_usb_nand_enum
-#define cy_an_usb_sd_enum cy_as_usb_sd_enum
-#define cy_an_usb_mmc_enum cy_as_usb_mmc_enum
-#define cy_an_usb_ce_ata_enum cy_as_usb_ce_ata_enum
-typedef cy_as_usb_mass_storage_enum cy_an_usb_mass_storage_enum;
-
-#define cy_an_usb_desc_device cy_as_usb_desc_device
-#define cy_an_usb_desc_device_qual cy_as_usb_desc_device_qual
-#define cy_an_usb_desc_f_s_configuration \
- cy_as_usb_desc_f_s_configuration
-#define cy_an_usb_desc_h_s_configuration \
- cy_as_usb_desc_h_s_configuration
-#define cy_an_usb_desc_string cy_as_usb_desc_string
-typedef cy_as_usb_desc_type cy_an_usb_desc_type;
-
-#define cy_an_usb_in cy_as_usb_in
-#define cy_an_usb_out cy_as_usb_out
-#define cy_an_usb_in_out cy_as_usb_in_out
-typedef cy_as_usb_end_point_dir cy_an_usb_end_point_dir;
-
-
-#define cy_an_usb_control cy_as_usb_control
-#define cy_an_usb_iso cy_as_usb_iso
-#define cy_an_usb_bulk cy_as_usb_bulk
-#define cy_an_usb_int cy_as_usb_int
-typedef cy_as_usb_end_point_type cy_an_usb_end_point_type;
-
-
-typedef cy_as_usb_enum_control_dep cy_an_usb_enum_control;
-typedef cy_as_usb_end_point_config cy_an_usb_end_point_config;
-
-#define cy_an_usb_m_s_unit0 cy_as_usb_m_s_unit0
-#define cy_an_usb_m_s_unit1 cy_as_usb_m_s_unit1
-#define cy_an_usb_m_s_both cy_as_usb_m_s_both
-typedef cy_as_usb_m_s_type_t cy_an_usb_m_s_type_t;
-
-#define cy_an_event_usb_suspend cy_as_event_usb_suspend
-#define cy_an_event_usb_resume cy_as_event_usb_resume
-#define cy_an_event_usb_reset cy_as_event_usb_reset
-#define cy_an_event_usb_set_config cy_as_event_usb_set_config
-#define cy_an_event_usb_speed_change cy_as_event_usb_speed_change
-#define cy_an_event_usb_setup_packet cy_as_event_usb_setup_packet
-#define cy_an_event_usb_status_packet cy_as_event_usb_status_packet
-#define cy_an_event_usb_inquiry_before cy_as_event_usb_inquiry_before
-#define cy_an_event_usb_inquiry_after cy_as_event_usb_inquiry_after
-#define cy_an_event_usb_start_stop cy_as_event_usb_start_stop
-#define cy_an_event_usb_unknown_storage cy_as_event_usb_unknown_storage
-#define cy_an_event_usb_m_s_c_progress cy_as_event_usb_m_s_c_progress
-typedef cy_as_usb_event cy_an_usb_event;
-
-typedef cy_as_usb_event_callback_dep cy_an_usb_event_callback;
-
-typedef cy_as_usb_io_callback cy_an_usb_io_callback;
-typedef cy_as_usb_function_callback cy_an_usb_function_callback;
-
-/******* USB Functions ********************/
-
-/* Sync Usb Start */
-extern cy_an_return_status_t
-cy_an_usb_start(
- cy_an_device_handle handle
- );
-#define cy_an_usb_start(handle) \
- cy_as_usb_start((cy_as_device_handle)(handle), 0, 0)
-
-/*Async Usb Start */
-extern cy_an_return_status_t
-cy_an_usb_start_e_x(
- cy_an_device_handle handle,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_start_e_x(h, cb, client) \
- cy_as_usb_start((cy_as_device_handle)(h), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync Usb Stop */
-extern cy_an_return_status_t
-cy_an_usb_stop(
- cy_an_device_handle handle
- );
-#define cy_an_usb_stop(handle) \
- cy_as_usb_stop((cy_as_device_handle)(handle), 0, 0)
-
-/*Async Usb Stop */
-extern cy_an_return_status_t
-cy_an_usb_stop_e_x(
- cy_an_device_handle handle,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_stop_e_x(h, cb, client) \
- cy_as_usb_stop((cy_as_device_handle)(h), \
- (cy_as_function_callback)(cb), (client))
-
-/* Register USB event callback */
-EXTERN cy_an_return_status_t
-cy_an_usb_register_callback(
- cy_an_device_handle handle,
- cy_an_usb_event_callback callback
- );
-#define cy_an_usb_register_callback(h, cb) \
- cy_as_usb_register_callback_dep((cy_as_device_handle)(h), \
- (cy_as_usb_event_callback_dep)(cb))
-
-/*Sync Usb connect */
-EXTERN cy_an_return_status_t
-cy_an_usb_connect(
- cy_an_device_handle handle
- );
-#define cy_an_usb_connect(handle) \
- cy_as_usb_connect((cy_as_device_handle)(handle), 0, 0)
-
-/*Async Usb connect */
-extern cy_an_return_status_t
-cy_an_usb_connect_e_x(
- cy_an_device_handle handle,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_connect_e_x(h, cb, client) \
- cy_as_usb_connect((cy_as_device_handle)(h), \
- (cy_as_function_callback)(cb), (client))
-
-/*Sync Usb disconnect */
-EXTERN cy_an_return_status_t
-cy_an_usb_disconnect(
- cy_an_device_handle handle
- );
-#define cy_an_usb_disconnect(handle) \
- cy_as_usb_disconnect((cy_as_device_handle)(handle), 0, 0)
-
-/*Async Usb disconnect */
-extern cy_an_return_status_t
-cy_an_usb_disconnect_e_x(
- cy_an_device_handle handle,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_disconnect_e_x(h, cb, client) \
- cy_as_usb_disconnect((cy_as_device_handle)(h), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of set enum config */
-EXTERN cy_an_return_status_t
-cy_an_usb_set_enum_config(
- cy_an_device_handle handle,
- cy_an_usb_enum_control *config_p
- );
-#define cy_an_usb_set_enum_config(handle, config_p) \
- cy_as_usb_set_enum_config_dep((cy_as_device_handle)(handle), \
- (cy_as_usb_enum_control_dep *)(config_p), 0, 0)
-
-/* Async version of set enum config */
-extern cy_an_return_status_t
-cy_an_usb_set_enum_config_e_x(
- cy_an_device_handle handle,
- cy_an_usb_enum_control *config_p,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_set_enum_config_e_x(h, config_p, cb, client) \
- cy_as_usb_set_enum_config_dep((cy_as_device_handle)(h), \
- (cy_as_usb_enum_control_dep *)(config_p), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of get enum config */
-EXTERN cy_an_return_status_t
-cy_an_usb_get_enum_config(
- cy_an_device_handle handle,
- cy_an_usb_enum_control *config_p
- );
-#define cy_an_usb_get_enum_config(handle, config_p) \
- cy_as_usb_get_enum_config_dep((cy_as_device_handle)(handle), \
- (cy_as_usb_enum_control_dep *)(config_p), 0, 0)
-
-/* Async version of get enum config */
-extern cy_an_return_status_t
-cy_an_usb_get_enum_config_e_x(
- cy_an_device_handle handle,
- cy_an_usb_enum_control *config_p,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_get_enum_config_e_x(h, config_p, cb, client) \
- cy_as_usb_get_enum_config_dep((cy_as_device_handle)(h), \
- (cy_as_usb_enum_control_dep *)(config_p), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync Version of Set descriptor */
-EXTERN cy_an_return_status_t
-cy_an_usb_set_descriptor(
- cy_an_device_handle handle,
- cy_an_usb_desc_type type,
- uint8_t index,
- void *desc_p,
- uint16_t length
- );
-#define cy_an_usb_set_descriptor(handle, type, index, desc_p, length) \
- cy_as_usb_set_descriptor((cy_as_device_handle)(handle), \
- (cy_as_usb_desc_type)(type), (index), (desc_p), (length), 0, 0)
-
-/* Async Version of Set descriptor */
-extern cy_an_return_status_t
-cy_an_usb_set_descriptor_e_x(
- cy_an_device_handle handle,
- cy_an_usb_desc_type type,
- uint8_t index,
- void *desc_p,
- uint16_t length,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_set_descriptor_e_x\
- (h, type, index, desc_p, length, cb, client) \
- cy_as_usb_set_descriptor((cy_as_device_handle)(h), \
- (cy_as_usb_desc_type)(type), (index), (desc_p), (length), \
- (cy_as_function_callback)(cb), (client))
-
-/* Only version of clear descriptors */
-EXTERN cy_an_return_status_t
-cy_an_usb_clear_descriptors(
- cy_an_device_handle handle,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_clear_descriptors(h, cb, client) \
- cy_as_usb_clear_descriptors((cy_as_device_handle)(h), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of get descriptor*/
-EXTERN cy_an_return_status_t
-cy_an_usb_get_descriptor(
- cy_an_device_handle handle,
- cy_an_usb_desc_type type,
- uint8_t index,
- void *desc_p,
- uint32_t *length_p
- );
-#define cy_an_usb_get_descriptor(h, type, index, desc_p, length_p) \
- cy_as_usb_get_descriptor_dep((cy_as_device_handle)(h), \
- (cy_as_usb_desc_type)(type), (index), (desc_p), (length_p))
-
-typedef cy_as_get_descriptor_data cy_an_get_descriptor_data;
-
-/* Async version of get descriptor */
-extern cy_an_return_status_t
-cy_an_usb_get_descriptor_e_x(
- cy_an_device_handle handle,
- cy_an_usb_desc_type type,
- uint8_t index,
- cy_an_get_descriptor_data *data,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_get_descriptor_e_x\
- (h, type, index, data, cb, client) \
- cy_as_usb_get_descriptor((cy_as_device_handle)(h), \
- (cy_as_usb_desc_type)(type), (index), \
- (cy_as_get_descriptor_data *)(data), \
- (cy_as_function_callback)(cb), (client))
-
-EXTERN cy_an_return_status_t
-cy_an_usb_set_physical_configuration(
- cy_an_device_handle handle,
- uint8_t config
- );
-#define cy_an_usb_set_physical_configuration(h, config) \
- cy_as_usb_set_physical_configuration\
- ((cy_as_device_handle)(h), (config))
-
-EXTERN cy_an_return_status_t
-cy_an_usb_set_end_point_config(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_an_usb_end_point_config *config_p
- );
-#define cy_an_usb_set_end_point_config(h, ep, config_p) \
- cy_as_usb_set_end_point_config((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), \
- (cy_as_usb_end_point_config *)(config_p))
-
-EXTERN cy_an_return_status_t
-cy_an_usb_get_end_point_config(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_an_usb_end_point_config *config_p
- );
-#define cy_an_usb_get_end_point_config(h, ep, config_p) \
- cy_as_usb_get_end_point_config((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), \
- (cy_as_usb_end_point_config *)(config_p))
-
-/* Sync version of commit */
-EXTERN cy_an_return_status_t
-cy_an_usb_commit_config(
- cy_an_device_handle handle
- );
-#define cy_an_usb_commit_config(handle) \
- cy_as_usb_commit_config((cy_as_device_handle)(handle), 0, 0)
-
-/* Async version of commit */
-extern cy_an_return_status_t
-cy_an_usb_commit_config_e_x(
- cy_an_device_handle handle,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_commit_config_e_x(h, cb, client) \
- cy_as_usb_commit_config((cy_as_device_handle)(h), \
- (cy_as_function_callback)(cb), (client))
-
-EXTERN cy_an_return_status_t
-cy_an_usb_read_data(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_bool pktread,
- uint32_t dsize,
- uint32_t *dataread,
- void *data
- );
-#define cy_an_usb_read_data(h, ep, pkt, dsize, dataread, data_p) \
- cy_as_usb_read_data((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), (pkt), (dsize), \
- (dataread), (data_p))
-
-EXTERN cy_an_return_status_t
-cy_an_usb_read_data_async(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_bool pktread,
- uint32_t dsize,
- void *data,
- cy_an_usb_io_callback callback
- );
-#define cy_an_usb_read_data_async(h, ep, pkt, dsize, data_p, cb) \
- cy_as_usb_read_data_async((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), (pkt), (dsize), (data_p), \
- (cy_as_usb_io_callback)(cb))
-
-EXTERN cy_an_return_status_t
-cy_an_usb_write_data(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- uint32_t dsize,
- void *data
- );
-#define cy_an_usb_write_data(h, ep, dsize, data_p) \
- cy_as_usb_write_data((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), (dsize), (data_p))
-
-EXTERN cy_an_return_status_t
-cy_an_usb_write_data_async(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- uint32_t dsize,
- void *data,
- cy_bool spacket,
- cy_an_usb_io_callback callback
- );
-#define cy_an_usb_write_data_async(h, ep, dsize, data_p, spacket, cb) \
- cy_as_usb_write_data_async((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), (dsize), (data_p), (spacket), \
- (cy_as_usb_io_callback)(cb))
-
-EXTERN cy_an_return_status_t
-cy_an_usb_cancel_async(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep
- );
-#define cy_an_usb_cancel_async(h, ep) \
- cy_as_usb_cancel_async((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep))
-
-/* Sync version of set stall */
-EXTERN cy_an_return_status_t
-cy_an_usb_set_stall(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_an_usb_function_callback cb,
- uint32_t client
-);
-#define cy_an_usb_set_stall(h, ep, cb, client) \
- cy_as_usb_set_stall_dep((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), \
- (cy_as_usb_function_callback)(cb), (client))
-
-/* Async version of set stall */
-extern cy_an_return_status_t
-cy_an_usb_set_stall_e_x(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_an_function_callback cb,
- uint32_t client
-);
-#define cy_an_usb_set_stall_e_x(h, ep, cb, client) \
- cy_as_usb_set_stall((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), \
- (cy_as_function_callback)(cb), (client))
-
-/*Sync version of clear stall */
-EXTERN cy_an_return_status_t
-cy_an_usb_clear_stall(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_an_usb_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_clear_stall(h, ep, cb, client) \
- cy_as_usb_clear_stall_dep((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), \
- (cy_as_usb_function_callback)(cb), (client))
-
-/*Sync version of clear stall */
-extern cy_an_return_status_t
-cy_an_usb_clear_stall_e_x(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_clear_stall_e_x(h, ep, cb, client) \
- cy_as_usb_clear_stall((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync get stall */
-EXTERN cy_an_return_status_t
-cy_an_usb_get_stall(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_bool *stall_p
- );
-#define cy_an_usb_get_stall(handle, ep, stall_p) \
- cy_as_usb_get_stall((cy_as_device_handle)(handle), \
- (cy_as_end_point_number_t)(ep), (stall_p), 0, 0)
-
-/* Async get stall */
-extern cy_an_return_status_t
-cy_an_usb_get_stall_e_x(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_bool *stall_p,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_get_stall_e_x(h, ep, stall_p, cb, client) \
- cy_as_usb_get_stall((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), (stall_p), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of Set Nak */
-EXTERN cy_an_return_status_t
-cy_an_usb_set_nak(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_an_usb_function_callback cb,
- uint32_t client
-);
-
-#define cy_an_usb_set_nak(h, ep, cb, client) \
- cy_as_usb_set_nak_dep((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), \
- (cy_as_usb_function_callback)(cb), (client))
-
-/* Async version of Set Nak */
-extern cy_an_return_status_t
-cy_an_usb_set_nak_e_x(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_an_function_callback cb,
- uint32_t client
-);
-#define cy_an_usb_set_nak_e_x(h, ep, cb, client) \
- cy_as_usb_set_nak((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync version of clear nak */
-EXTERN cy_an_return_status_t
-cy_an_usb_clear_nak(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_an_usb_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_clear_nak(h, ep, cb, client) \
- cy_as_usb_clear_nak_dep((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), \
- (cy_as_usb_function_callback)(cb), (client))
-
-/* Sync version of clear nak */
-extern cy_an_return_status_t
-cy_an_usb_clear_nak_e_x(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_clear_nak_e_x(h, ep, cb, client) \
- cy_as_usb_clear_nak((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync Get NAK */
-EXTERN cy_an_return_status_t
-cy_an_usb_get_nak(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_bool *nak_p
-);
-#define cy_an_usb_get_nak(handle, ep, nak_p) \
- cy_as_usb_get_nak((cy_as_device_handle)(handle), \
- (cy_as_end_point_number_t)(ep), (nak_p), 0, 0)
-
-/* Async Get NAK */
-EXTERN cy_an_return_status_t
-cy_an_usb_get_nak_e_x(
- cy_an_device_handle handle,
- cy_an_end_point_number_t ep,
- cy_bool *nak_p,
- cy_an_function_callback cb,
- uint32_t client
-);
-#define cy_an_usb_get_nak_e_x(h, ep, nak_p, cb, client) \
- cy_as_usb_get_nak((cy_as_device_handle)(h), \
- (cy_as_end_point_number_t)(ep), (nak_p), \
- (cy_as_function_callback)(cb), (client))
-
-/* Sync remote wakup */
-EXTERN cy_an_return_status_t
-cy_an_usb_signal_remote_wakeup(
- cy_an_device_handle handle
- );
-#define cy_an_usb_signal_remote_wakeup(handle) \
- cy_as_usb_signal_remote_wakeup((cy_as_device_handle)(handle), 0, 0)
-
-/* Async remote wakup */
-EXTERN cy_an_return_status_t
-cy_an_usb_signal_remote_wakeup_e_x(
- cy_an_device_handle handle,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_signal_remote_wakeup_e_x(h, cb, client) \
- cy_as_usb_signal_remote_wakeup((cy_as_device_handle)(h), \
- (cy_as_function_callback)(cb), (client))
-
-/* Only version of SetMSReportThreshold */
-EXTERN cy_an_return_status_t
-cy_an_usb_set_m_s_report_threshold(
- cy_an_device_handle handle,
- uint32_t wr_sectors,
- uint32_t rd_sectors,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_set_m_s_report_threshold\
- (h, wr_cnt, rd_cnt, cb, client) \
- cy_as_usb_set_m_s_report_threshold((cy_as_device_handle)(h), \
- wr_cnt, rd_cnt, (cy_as_function_callback)(cb), (client))
-
-/* Select storage partitions to be enumerated. */
-EXTERN cy_an_return_status_t
-cy_an_usb_select_m_s_partitions(
- cy_an_device_handle handle,
- cy_an_media_type media,
- uint32_t device,
- cy_an_usb_m_s_type_t type,
- cy_an_function_callback cb,
- uint32_t client
- );
-#define cy_an_usb_select_m_s_partitions(h, media, dev, type, cb, client) \
- cy_as_usb_select_m_s_partitions_dep((cy_as_device_handle)(h), \
- (cy_as_media_type)(media), (dev), \
- (cy_as_usb_m_s_type_t)(type), (cy_as_function_callback)(cb), (client))
-
-#include "cyas_cplus_end.h"
-#endif /*__doxygen__*/
-#endif /*_INCLUDED_CYANUSB_H_*/
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyas_cplus_end.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyas_cplus_end.h
deleted file mode 100644
index ece44ca34f3..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyas_cplus_end.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * This file is included at the end of other include files.
- * It basically turns off the C++ specific code words that
- * insure this code is seen as C code even within
- * a C++ compiler.
- *
- */
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyas_cplus_start.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyas_cplus_start.h
deleted file mode 100644
index b879cefa9d6..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyas_cplus_start.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * This file is included after all other headers files, but before any other
- * definitions in the file. It basically insures that the definitions within
- * the file are seen as C defintions even when compiled by a C++ compiler.
- */
-
-#ifdef __cplusplus
-
-extern "C" {
-
-#endif
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyascast.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyascast.h
deleted file mode 100644
index 5f8c852fbb4..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyascast.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Cypress West Bridge API header file (cyascast.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASCAST_H_
-#define _INCLUDED_CYASCAST_H_
-
-#ifndef __doxygen__
-
-#ifdef _DEBUG
-#define cy_cast_int2U_int16(v) \
- (cy_as_hal_assert(v < 65536), (uint16_t)(v))
-#else /* _DEBUG */
-#define cy_cast_int2U_int16(v) ((uint16_t)(v))
-#endif /* _DEBUG */
-
-#endif /* __doxygen__ */
-#endif /* _INCLUDED_CYASCAST_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasdevice.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasdevice.h
deleted file mode 100644
index 6452a907009..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasdevice.h
+++ /dev/null
@@ -1,1057 +0,0 @@
-/* Cypress West Bridge API header file (cyasdevice.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-##Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef __INCLUDED_CYASDEVICE_H__
-#define __INCLUDED_CYASDEVICE_H__
-
-#include "cyashal.h"
-#include "cyasprotocol.h"
-#include "cyasusb.h"
-#include "cyasstorage.h"
-#include "cyasmtp.h"
-#include "cyas_cplus_start.h"
-
-/***********************************
- * West Bridge Constants
- ***********************************/
-
-/* The endpoints used by West Bridge for the P port to S port path */
-#define CY_AS_P2S_WRITE_ENDPOINT (0x04)
-#define CY_AS_P2S_READ_ENDPOINT (0x08)
-
-/* The endpoint to use for firmware download */
-#define CY_AS_FIRMWARE_ENDPOINT (0x02)
-
-/* The maximum size of the firmware image West Bridge can accept */
-#define CY_AS_MAXIMUM_FIRMWARE_SIZE (24 * 1024)
-
-/* The maximum size of a write for EP0 and EP1 */
-#define CY_AS_EP0_MAX_WRITE_SIZE (128)
-#define CY_AS_EP1_MAX_WRITE_SIZE (64)
-
-/* The bitfields for the device state value */
-
-/* The device is in StandBy mode */
-#define CY_AS_DEVICE_STATE_PIN_STANDBY (0x00000001)
-/* The device has been configured */
-#define CY_AS_DEVICE_STATE_CONFIGURED (0x00000002)
-/* The firmware has been loaded into the device */
-#define CY_AS_DEVICE_STATE_FIRMWARE_LOADED (0x00000004)
-/* The interrupt module has been initialized */
-#define CY_AS_DEVICE_STATE_LOWLEVEL_MODULE (0x00000008)
-/* The DMA module has been initialized */
-#define CY_AS_DEVICE_STATE_DMA_MODULE (0x00000010)
-/* The interrupt module has been initialized */
-#define CY_AS_DEVICE_STATE_INTR_MODULE (0x00000020)
-/* The storage module has been initialized */
-#define CY_AS_DEVICE_STATE_STORAGE_MODULE (0x00000040)
-/* The USB module has been initialized */
-#define CY_AS_DEVICE_STATE_USB_MODULE (0x00000080)
-/* If set, the API wants SCSI messages */
-#define CY_AS_DEVICE_STATE_STORAGE_SCSIMSG (0x00000100)
-/* If set, an ASYNC storage operation is pending */
-#define CY_AS_DEVICE_STATE_STORAGE_ASYNC_PENDING (0x00000200)
-/* If set, the USB port is connected */
-#define CY_AS_DEVICE_STATE_USB_CONNECTED (0x00000400)
-/* If set and USB is connected, it is high speed */
-#define CY_AS_DEVICE_STATE_USB_HIGHSPEED (0x00000800)
-/* If set, we are in a callback */
-#define CY_AS_DEVICE_STATE_IN_CALLBACK (0x00001000)
-/* If set, we are processing a setup packet */
-#define CY_AS_DEVICE_STATE_IN_SETUP_PACKET (0x00004000)
-/* The device was placed in standby via register */
-#define CY_AS_DEVICE_STATE_REGISTER_STANDBY (0x00008000)
-/* If set, the device is using a crystal */
-#define CY_AS_DEVICE_STATE_CRYSTAL (0x00010000)
-/* If set, wakeup has been called */
-#define CY_AS_DEVICE_STATE_WAKING (0x00020000)
-/* If set, EP0 has been stalled. */
-#define CY_AS_DEVICE_STATE_EP0_STALLED (0x00040000)
-/* If set, device is in suspend mode. */
-#define CY_AS_DEVICE_STATE_SUSPEND (0x00080000)
-/* If set, device is a reset is pending. */
-#define CY_AS_DEVICE_STATE_RESETP (0x00100000)
-/* If set, device is a standby is pending. */
-#define CY_AS_DEVICE_STATE_STANDP (0x00200000)
-/* If set, device has a storage start or stop pending. */
-#define CY_AS_DEVICE_STATE_SSSP (0x00400000)
-/* If set, device has a usb start or stop pending. */
-#define CY_AS_DEVICE_STATE_USSP (0x00800000)
-/* If set, device has a mtp start or stop pending. */
-#define CY_AS_DEVICE_STATE_MSSP (0x01000000)
-/* If set, P2S DMA transfer can be started. */
-#define CY_AS_DEVICE_STATE_P2SDMA_START (0x02000000)
-
-/* The bitfields for the endpoint state value */
-/* DMA requests are accepted into the queue */
-#define CY_AS_DMA_ENDPOINT_STATE_ENABLED (0x0001)
-/* The endpoint has a sleeping client, waiting on a queue drain */
-#define CY_AS_DMA_ENDPOINT_STATE_SLEEPING (0x0002)
-/* The DMA backend to hardware is running */
-#define CY_AS_DMA_ENDPOINT_STATE_DMA_RUNNING (0x0004)
-/* There is an outstanding DMA entry deployed to the HAL */
-#define CY_AS_DMA_ENDPOINT_STATE_IN_TRANSIT (0x0008)
-/* 0 = OUT (West Bridge -> P Port), 1 = IN (P Port -> West Bridge) */
-#define CY_AS_DMA_ENDPOINT_STATE_DIRECTION (0x0010)
-
-/* The state values for the request list */
-/* Mask for getting the state information */
-#define CY_AS_REQUEST_LIST_STATE_MASK (0x0f)
-/* The request is queued, nothing further */
-#define CY_AS_REQUEST_LIST_STATE_QUEUED (0x00)
-/* The request is sent, waiting for response */
-#define CY_AS_REQUEST_LIST_STATE_WAITING (0x01)
-/* The response has been received, processing response */
-#define CY_AS_REQUEST_LIST_STATE_RECEIVED (0x02)
-/* The request/response is being canceled */
-#define CY_AS_REQUEST_LIST_STATE_CANCELING (0x03)
-/* The request is synchronous */
-#define CY_AS_REQUEST_LIST_STATE_SYNC (0x80)
-
-/* The flag values for a LL RequestResponse */
-/* This request requires an ACK to be sent after it is completed */
-#define CY_AS_REQUEST_RESPONSE_DELAY_ACK (0x01)
-/* This request originated from a version V1.1 function call */
-#define CY_AS_REQUEST_RESPONSE_EX (0x02)
-/* This request originated from a version V1.2 function call */
-#define CY_AS_REQUEST_RESPONSE_MS (0x04)
-
-
-#define CY_AS_DEVICE_HANDLE_SIGNATURE (0x01211219)
-
-/*
- * This macro returns the endpoint pointer given the
- * device pointer and an endpoint number
- */
-#define CY_AS_NUM_EP(dev_p, num) ((dev_p)->endp[(num)])
-
-/****************************************
- * West Bridge Data Structures
- ****************************************/
-
-typedef struct cy_as_device cy_as_device;
-
-/* Summary
- This type defines a callback function that will be called
- on completion of a DMA operation.
-
- Description
- This function definition is for a function that is called when
- the DMA operation is complete. This function is called with the
- endpoint number, operation type, buffer pointer and size.
-
- See Also
- * CyAsDmaOper
- * CyAsDmaQueueWrite
- */
-typedef void (*cy_as_dma_callback)(
- /* The device that completed DMA */
- cy_as_device *dev_p,
- /* The endpoint that completed DMA */
- cy_as_end_point_number_t ep,
- /* The pointer to the buffer that completed DMA */
- void *mem_p,
- /* The amount of data transferred */
- uint32_t size,
- /* The error code for this DMA xfer */
- cy_as_return_status_t error
- );
-
-/* Summary
- This structure defines a DMA request that is queued
-
- Description
- This structure contains the information about a DMA
- request that is queued and is to be sent when possible.
-*/
-typedef struct cy_as_dma_queue_entry {
- /* Pointer to memory buffer for this request */
- void *buf_p;
- /* Size of the memory buffer for DMA operation */
- uint32_t size;
- /* Offset into memory buffer for next DMA operation */
- uint32_t offset;
- /* If TRUE and IN request */
- cy_bool packet;
- /* If TRUE, this is a read request */
- cy_bool readreq;
- /* Callback function for when DMA is complete */
- cy_as_dma_callback cb;
- /* Pointer to next entry in queue */
- struct cy_as_dma_queue_entry *next_p;
-} cy_as_dma_queue_entry;
-
-/* Summary
- This structure defines the endpoint data for a given
-
- Description
- This structure defines all of the information required
- to manage DMA for a given endpoint.
-*/
-typedef struct cy_as_dma_end_point {
- /* The endpoint number */
- cy_as_end_point_number_t ep;
- /* The state of this endpoint */
- uint8_t state;
- /* The maximum amount of data accepted in a packet by the hw */
- uint16_t maxhwdata;
- /* The maximum amount of data accepted by the HAL layer */
- uint32_t maxhaldata;
- /* The queue for DMA operations */
- cy_as_dma_queue_entry *queue_p;
- /* The last entry in the DMA queue */
- cy_as_dma_queue_entry *last_p;
- /* This sleep channel is used to wait while the DMA queue
- * drains for a given endpoint */
- cy_as_hal_sleep_channel channel;
-} cy_as_dma_end_point;
-
-#define cy_as_end_point_number_is_usb(n) \
- ((n) != 2 && (n) != 4 && (n) != 6 && (n) != 8)
-#define cy_as_end_point_number_is_storage(n) \
- ((n) == 2 || (n) == 4 || (n) == 6 || (n) == 8)
-
-#define cy_as_dma_end_point_is_enabled(ep) \
- ((ep)->state & CY_AS_DMA_ENDPOINT_STATE_ENABLED)
-#define cy_as_dma_end_point_enable(ep) \
- ((ep)->state |= CY_AS_DMA_ENDPOINT_STATE_ENABLED)
-#define cy_as_dma_end_point_disable(ep) \
- ((ep)->state &= ~CY_AS_DMA_ENDPOINT_STATE_ENABLED)
-
-#define cy_as_dma_end_point_is_sleeping(ep) \
- ((ep)->state & CY_AS_DMA_ENDPOINT_STATE_SLEEPING)
-#define cy_as_dma_end_point_set_sleep_state(ep) \
- ((ep)->state |= CY_AS_DMA_ENDPOINT_STATE_SLEEPING)
-#define cy_as_dma_end_point_set_wake_state(ep) \
- ((ep)->state &= ~CY_AS_DMA_ENDPOINT_STATE_SLEEPING)
-
-#define cy_as_dma_end_point_is_running(ep) \
- ((ep)->state & CY_AS_DMA_ENDPOINT_STATE_DMA_RUNNING)
-#define cy_as_dma_end_point_set_running(ep) \
- ((ep)->state |= CY_AS_DMA_ENDPOINT_STATE_DMA_RUNNING)
-#define cy_as_dma_end_point_set_stopped(ep) \
- ((ep)->state &= ~CY_AS_DMA_ENDPOINT_STATE_DMA_RUNNING)
-
-#define cy_as_dma_end_point_in_transit(ep) \
- ((ep)->state & CY_AS_DMA_ENDPOINT_STATE_IN_TRANSIT)
-#define cy_as_dma_end_point_set_in_transit(ep) \
- ((ep)->state |= CY_AS_DMA_ENDPOINT_STATE_IN_TRANSIT)
-#define cy_as_dma_end_point_clear_in_transit(ep) \
- ((ep)->state &= ~CY_AS_DMA_ENDPOINT_STATE_IN_TRANSIT)
-
-#define cy_as_dma_end_point_is_direction_in(ep) \
- (((ep)->state & CY_AS_DMA_ENDPOINT_STATE_DIRECTION) == \
- CY_AS_DMA_ENDPOINT_STATE_DIRECTION)
-#define cy_as_dma_end_point_is_direction_out(ep) \
- (((ep)->state & CY_AS_DMA_ENDPOINT_STATE_DIRECTION) == 0)
-#define cy_as_dma_end_point_set_direction_in(ep) \
- ((ep)->state |= CY_AS_DMA_ENDPOINT_STATE_DIRECTION)
-#define cy_as_dma_end_point_set_direction_out(ep) \
- ((ep)->state &= ~CY_AS_DMA_ENDPOINT_STATE_DIRECTION)
-
-#define cy_as_dma_end_point_is_usb(p) \
- cy_as_end_point_number_is_usb((p)->ep)
-#define cy_as_dma_end_point_is_storage(p) \
- cy_as_end_point_number_is_storage((p)->ep)
-
-typedef struct cy_as_ll_request_response {
- /* The mbox[0] contents - see low level comm section of API doc */
- uint16_t box0;
- /* The amount of data stored in this request/response in bytes */
- uint16_t stored;
- /* Length of this request in words */
- uint16_t length;
- /* Additional status information about the request */
- uint16_t flags;
- /* Note: This is over indexed and contains the request/response data */
- uint16_t data[1];
-} cy_as_ll_request_response;
-
-/*
- * The callback function for responses
- */
-typedef void (*cy_as_response_callback)(
- /* The device that had the response */
- cy_as_device *dev_p,
- /* The context receiving a response */
- uint8_t context,
- /* The request data */
- cy_as_ll_request_response *rqt,
- /* The response data */
- cy_as_ll_request_response *resp,
- /* The status of the request */
- cy_as_return_status_t status
- );
-
-typedef struct cy_as_ll_request_list_node {
- /* The request to send */
- cy_as_ll_request_response *rqt;
- /* The associated response for the request */
- cy_as_ll_request_response *resp;
- /* Length of the response */
- uint16_t length;
- /* The callback to call when done */
- cy_as_response_callback callback;
- /* The state of the request */
- uint8_t state;
- /* The next request in the list */
- struct cy_as_ll_request_list_node *next;
-} cy_as_ll_request_list_node;
-
-#define cy_as_request_get_node_state(node_p) \
- ((node_p)->state & CY_AS_REQUEST_LIST_STATE_MASK)
-#define cy_as_request_set_node_state(node_p, st) \
- ((node_p)->state = \
- ((node_p)->state & ~CY_AS_REQUEST_LIST_STATE_MASK) | (st))
-
-#define cy_as_request_node_is_sync(node_p) \
- ((node_p)->state & CY_AS_REQUEST_LIST_STATE_SYNC)
-#define cy_as_request_node_set_sync(node_p) \
- ((node_p)->state |= CY_AS_REQUEST_LIST_STATE_SYNC)
-#define cy_as_request_node_clear_sync(node_p) \
- ((node_p)->state &= ~CY_AS_REQUEST_LIST_STATE_SYNC)
-
-#ifndef __doxygen__
-typedef enum cy_as_c_b_node_type {
- CYAS_INVALID,
- CYAS_USB_FUNC_CB,
- CYAS_USB_IO_CB,
- CYAS_STORAGE_IO_CB,
- CYAS_FUNC_CB
-} cy_as_c_b_node_type;
-
-typedef struct cy_as_func_c_b_node {
- cy_as_c_b_node_type node_type;
- cy_as_function_callback cb_p;
- uint32_t client_data;
- cy_as_funct_c_b_type data_type;
- void *data;
- struct cy_as_func_c_b_node *next_p;
-} cy_as_func_c_b_node;
-
-extern cy_as_func_c_b_node*
-cy_as_create_func_c_b_node_data(cy_as_function_callback
- cb, uint32_t client, cy_as_funct_c_b_type type, void *data);
-
-extern cy_as_func_c_b_node*
-cy_as_create_func_c_b_node(cy_as_function_callback cb,
- uint32_t client);
-
-extern void
-cy_as_destroy_func_c_b_node(cy_as_func_c_b_node *node);
-
-typedef struct cy_as_mtp_func_c_b_node {
- cy_as_c_b_node_type type;
- cy_as_mtp_function_callback cb_p;
- uint32_t client_data;
- struct cy_as_mtp_func_c_b_node *next_p;
-} cy_as_mtp_func_c_b_node;
-
-extern cy_as_mtp_func_c_b_node*
-cy_as_create_mtp_func_c_b_node(cy_as_mtp_function_callback cb,
- uint32_t client);
-
-extern void
-cy_as_destroy_mtp_func_c_b_node(cy_as_mtp_func_c_b_node *node);
-
-typedef struct cy_as_usb_func_c_b_node {
- cy_as_c_b_node_type type;
- cy_as_usb_function_callback cb_p;
- uint32_t client_data;
- struct cy_as_usb_func_c_b_node *next_p;
-} cy_as_usb_func_c_b_node;
-
-extern cy_as_usb_func_c_b_node*
-cy_as_create_usb_func_c_b_node(cy_as_usb_function_callback cb,
- uint32_t client);
-
-extern void
-cy_as_destroy_usb_func_c_b_node(cy_as_usb_func_c_b_node *node);
-
-typedef struct cy_as_usb_io_c_b_node {
- cy_as_c_b_node_type type;
- cy_as_usb_io_callback cb_p;
- struct cy_as_usb_io_c_b_node *next_p;
-} cy_as_usb_io_c_b_node;
-
-extern cy_as_usb_io_c_b_node*
-cy_as_create_usb_io_c_b_node(cy_as_usb_io_callback cb);
-
-extern void
-cy_as_destroy_usb_io_c_b_node(cy_as_usb_io_c_b_node *node);
-
-typedef struct cy_as_storage_io_c_b_node {
- cy_as_c_b_node_type type;
- cy_as_storage_callback cb_p;
- /* The media for the currently outstanding async storage request */
- cy_as_media_type media;
- /* The device index for the currently outstanding async storage
- * request */
- uint32_t device_index;
- /* The unit index for the currently outstanding async storage
- * request */
- uint32_t unit;
- /* The block address for the currently outstanding async storage
- * request */
- uint32_t block_addr;
- /* The operation for the currently outstanding async storage
- * request */
- cy_as_oper_type oper;
- cy_as_ll_request_response *req_p;
- cy_as_ll_request_response *reply_p;
- struct cy_as_storage_io_c_b_node *next_p;
-} cy_as_storage_io_c_b_node;
-
-extern cy_as_storage_io_c_b_node*
-cy_as_create_storage_io_c_b_node(cy_as_storage_callback cb,
- cy_as_media_type media, uint32_t device_index,
- uint32_t unit, uint32_t block_addr, cy_as_oper_type oper,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p);
-
-extern void
-cy_as_destroy_storage_io_c_b_node(cy_as_storage_io_c_b_node *node);
-
-typedef struct cy_as_c_b_queue {
- void *head_p;
- void *tail_p;
- uint32_t count;
- cy_as_c_b_node_type type;
-} cy_as_c_b_queue;
-
-extern cy_as_c_b_queue *
-cy_as_create_c_b_queue(cy_as_c_b_node_type type);
-
-extern void
-cy_as_destroy_c_b_queue(cy_as_c_b_queue *queue);
-
-/* Allocates a new CyAsCBNode */
-extern void
-cy_as_insert_c_b_node(cy_as_c_b_queue *queue_p, void *cbnode);
-
-/* Removes the first CyAsCBNode from the queue and frees it */
-extern void
-cy_as_remove_c_b_node(cy_as_c_b_queue *queue_p);
-
-/* Remove the last CyAsCBNode from the queue and frees it */
-extern void
-cy_as_remove_c_b_tail_node(cy_as_c_b_queue *queue_p);
-
-/* Removes and frees all pending callbacks */
-extern void
-cy_as_clear_c_b_queue(cy_as_c_b_queue *queue_p);
-
-extern cy_as_return_status_t
-cy_as_misc_send_request(cy_as_device *dev_p,
- cy_as_function_callback cb,
- uint32_t client,
- cy_as_funct_c_b_type type,
- void *data,
- cy_as_c_b_queue *queue,
- uint16_t req_type,
- cy_as_ll_request_response *req_p,
- cy_as_ll_request_response *reply_p,
- cy_as_response_callback rcb);
-
-extern void
-cy_as_misc_cancel_ex_requests(cy_as_device *dev_p);
-
-/* Summary
- Free all memory allocated by and zero all
- structures initialized by CyAsUsbStart.
- */
-extern void
-cy_as_usb_cleanup(
- cy_as_device *dev_p);
-
-/* Summary
- Free all memory allocated and zero all structures initialized
- by CyAsStorageStart.
- */
-extern void
-cy_as_storage_cleanup(
- cy_as_device *dev_p);
-#endif
-
-/* Summary
- This structure defines the data structure to support a
- given command context
-
- Description
- All commands send to the West Bridge device via the mailbox
- registers are sent via a context.Each context is independent
- and there can be a parallel stream of requests and responses on
- each context. This structure is used to manage a single context.
-*/
-typedef struct cy_as_context {
- /* The context number for this context */
- uint8_t number;
- /* This sleep channel is used to sleep while waiting on a
- * response from the west bridge device for a request. */
- cy_as_hal_sleep_channel channel;
- /* The buffer for received requests */
- cy_as_ll_request_response *req_p;
- /* The length of the request being received */
- uint16_t request_length;
- /* The callback for the next request received */
- cy_as_response_callback request_callback;
- /* A list of low level requests to go to the firmware */
- cy_as_ll_request_list_node *request_queue_p;
- /* The list node in the request queue */
- cy_as_ll_request_list_node *last_node_p;
- /* Index up to which data is stored. */
- uint16_t queue_index;
- /* Index to the next request in the queue. */
- uint16_t rqt_index;
- /* Queue of data stored */
- uint16_t data_queue[128];
-
-} cy_as_context;
-
-#define cy_as_context_is_waiting(ctxt) \
- ((ctxt)->state & CY_AS_CTXT_STATE_WAITING_RESPONSE)
-#define cy_as_context_set_waiting(ctxt) \
- ((ctxt)->state |= CY_AS_CTXT_STATE_WAITING_RESPONSE)
-#define cy_as_context_clear_waiting(ctxt) \
- ((ctxt)->state &= ~CY_AS_CTXT_STATE_WAITING_RESPONSE)
-
-
-
-/* Summary
- This data structure stores SDIO function
- parameters for a SDIO card
-
- Description
-*/
-typedef struct cy_as_sdio_device {
- /* Keeps track of IO functions initialized*/
- uint8_t function_init_map;
- uint8_t function_suspended_map;
- /* Function 0 (Card Common) properties*/
- cy_as_sdio_card card;
- /* Function 1-7 (Mapped to array element 0-6) properties.*/
- cy_as_sdio_func function[7];
-
-} cy_as_sdio_device;
-
-/* Summary
-Macros to access the SDIO card properties
-*/
-
-#define cy_as_sdio_get_function_code(handle, bus, i) \
- (((cy_as_device *)handle)->sdiocard[bus].function[i-1].function_code)
-
-#define cy_as_sdio_get_function_ext_code(handle, bus, i) \
- (((cy_as_device *)handle)->sdiocard[bus].\
- function[i-1].extended_func_code)
-
-#define cy_as_sdio_get_function_p_s_n(handle, bus, i) \
- (((cy_as_device *)handle)->sdiocard[bus].function[i-1].card_psn)
-
-#define cy_as_sdio_get_function_blocksize(handle, bus, i) \
- (((cy_as_device *)handle)->sdiocard[bus].function[i-1].blocksize)
-
-#define cy_as_sdio_get_function_max_blocksize(handle, bus, i) \
- (((cy_as_device *)handle)->sdiocard[bus].function[i-1].maxblocksize)
-
-#define cy_as_sdio_get_function_csa_support(handle, bus, i) \
- (((cy_as_device *)handle)->sdiocard[bus].function[i-1].csa_bits)
-
-#define cy_as_sdio_get_function_wakeup_support(handle, bus, i) \
- (((cy_as_device *)handle)->sdiocard[bus].function[i-1]. wakeup_support)
-
-#define cy_as_sdio_set_function_block_size(handle, bus, i, blocksize) \
- (((cy_as_device *)handle)->sdiocard[bus].function[i-1].blocksize = \
- blocksize)
-
-#define cy_as_sdio_get_card_num_functions(handle, bus) \
- (((cy_as_device *)handle)->sdiocard[bus].card.num_functions)
-
-#define cy_as_sdio_get_card_mem_present(handle, bus) \
- (((cy_as_device *)handle)->sdiocard[bus].card.memory_present)
-
-#define cy_as_sdio_get_card_manf_id(handle, bus) \
- (((cy_as_device *)handle)->sdiocard[bus].card.manufacturer__id)
-
-#define cy_as_sdio_get_card_manf_info(handle, bus) \
- (((cy_as_device *)handle)->sdiocard[bus].card.manufacturer_info)
-
-#define cy_as_sdio_get_card_blocksize(handle, bus) \
- (((cy_as_device *)handle)->sdiocard[bus].card.blocksize)
-
-#define cy_as_sdio_get_card_max_blocksize(handle, bus) \
- (((cy_as_device *)handle)->sdiocard[bus].card.maxblocksize)
-
-#define cy_as_sdio_get_card_sdio_version(handle, bus) \
- (((cy_as_device *)handle)->sdiocard[bus].card.sdio_version)
-
-#define cy_as_sdio_get_card_capability(handle, bus) \
- (((cy_as_device *)handle)->sdiocard[bus].card.card_capability)
-
-#define cy_as_sdio_get_function_init_map(handle, bus) \
- (((cy_as_device *)handle)->sdiocard[bus].function_init_map)
-
-#define cy_as_sdio_check_function_initialized(handle, bus, i) \
- (((cy_as_sdio_get_function_init_map(handle, bus)) & (0x01<<i)) ? 1 : 0)
-
-#define cy_as_sdio_set_card_block_size(handle, bus, blocksize) \
- (((cy_as_device *)handle)->sdiocard[bus].card.blocksize = blocksize)
-
-#define cy_as_sdio_check_support_bus_suspend(handle, bus) \
- ((cy_as_sdio_get_card_capability(handle, bus) & CY_SDIO_SBS) ? 1 : 0)
-
-#define cy_as_sdio_check_function_suspended(handle, bus, i) \
- ((((cy_as_device *)handle)->sdiocard[bus].function_suspended_map & \
- (0x01<<i)) ? 1 : 0)
-
-#define cy_as_sdio_set_function_suspended(handle, bus, i) \
- ((((cy_as_device *)handle)->sdiocard[bus].function_suspended_map) \
- |= (0x01<<i))
-
-#define cy_as_sdio_clear_function_suspended(handle, bus, i) \
- ((((cy_as_device *)handle)->sdiocard[bus].function_suspended_map) \
- &= (~(0x01<<i)))
-
-/* Summary
- This data structure represents a single device.
-
- Description
-*/
-struct cy_as_device {
- /* General stuff */
- /* A signature to insure we have a valid handle */
- uint32_t sig;
- /* The ID of the silicon */
- uint16_t silicon_id;
- /* Pointer to the next device */
- struct cy_as_device *next_p;
- /* This is the client specific tag for this device */
- cy_as_hal_device_tag tag;
- /* This contains various state information about the device */
- uint32_t state;
- /* Flag indicating whether INT# pin is used for DRQ */
- cy_bool use_int_drq;
-
- /* DMA related */
- /* The endpoint pointers associated with this device */
- cy_as_dma_end_point *endp[16];
- /* List of queue entries that can be used for xfers */
- cy_as_dma_queue_entry *dma_freelist_p;
-
- /* Low level comm related */
- /* The contexts available in this device */
- cy_as_context *context[CY_RQT_CONTEXT_COUNT];
- /* The low level error returned from sending an async request */
- cy_as_return_status_t ll_error;
- /* A request is currently being sent to West Bridge. */
- cy_bool ll_sending_rqt;
- /* The current mailbox request should be aborted. */
- cy_bool ll_abort_curr_rqt;
- /* Indicates that the LL layer has queued mailbox data. */
- cy_bool ll_queued_data;
-
- /* MISC API related */
- /* Misc callback */
- cy_as_misc_event_callback misc_event_cb;
-
- /* Storage Related */
- /* The reference count for the Storage API */
- uint32_t storage_count;
- /* Callback for storage events */
- cy_as_storage_event_callback_dep storage_event_cb;
- /* V1.2+ callback for storage events */
- cy_as_storage_event_callback storage_event_cb_ms;
- /* The error for a sleeping storage operation */
- cy_as_return_status_t storage_error;
- /* Flag indicating that the storage stack is waiting for an operation */
- cy_bool storage_wait;
- /* Request used for storage read/writes. */
- cy_as_ll_request_response *storage_rw_req_p;
- /* Response used for storage read/writes. */
- cy_as_ll_request_response *storage_rw_resp_p;
- /* The storage callback */
- cy_as_storage_callback_dep storage_cb;
- /* The V1.2+ storage callback */
- cy_as_storage_callback storage_cb_ms;
- /* The bus index for the currently outstanding async storage request */
- cy_as_bus_number_t storage_bus_index;
- /* The device index for the currently outstanding async storage
- * request */
- uint32_t storage_device_index;
- /* The unit index for the currently outstanding async storage request */
- uint32_t storage_unit;
- /* The block address for the currently outstanding async storage
- * request */
- uint32_t storage_block_addr;
- /* The operation for the currently outstanding async storage request */
- cy_as_oper_type storage_oper;
- /* The endpoint used to read Storage data */
- cy_as_end_point_number_t storage_read_endpoint;
- /* The endpoint used to write endpoint data */
- cy_as_end_point_number_t storage_write_endpoint;
- cy_as_device_desc storage_device_info
- [CY_AS_MAX_BUSES][CY_AS_MAX_STORAGE_DEVICES];
- /* The information on each device on each bus */
-
- /* USB Related */
- /* This conatins the endpoint async state */
- uint16_t epasync;
- /* The reference count for the USB API */
- uint32_t usb_count;
- /* The physical endpoint configuration */
- uint8_t usb_phy_config;
- /* The callbacks for async func calls */
- cy_as_c_b_queue *usb_func_cbs;
- /* Endpoint configuration information */
- cy_as_usb_end_point_config usb_config[16];
- /* The USB callback */
- cy_as_usb_event_callback_dep usb_event_cb;
- /* The V1.2+ USB callback */
- cy_as_usb_event_callback usb_event_cb_ms;
- /* The error for a sleeping usb operation */
- cy_as_return_status_t usb_error;
- /* The USB callback for a pending storage operation */
- cy_as_usb_io_callback usb_cb[16];
- /* The buffer pending from a USB operation */
- void *usb_pending_buffer;
- /* The size of the buffer pending from a USB operation */
- uint32_t usb_pending_size;
- /* If true, send a short packet */
- cy_bool usb_spacket[16];
- /* The amount of data actually xferred */
- uint32_t usb_actual_cnt;
- /* EP1OUT and EP1IN config register contents */
- uint8_t usb_ep1cfg[2];
- /* LEP config register contents */
- uint16_t usb_lepcfg[10];
- /* PEP config register contents */
- uint16_t usb_pepcfg[4];
- /* Buffer for EP0 and EP1 data sent via mailboxes */
- uint8_t *usb_ep_data;
- /* Used to track how many ack requests are pending */
- uint32_t usb_delay_ack_count;
- /* Maximum transfer size for USB endpoints. */
- uint32_t usb_max_tx_size;
-
- /* Request for sending EP0 data to West Bridge */
- cy_as_ll_request_response *usb_ep0_dma_req;
- /* Response for EP0 data sent to West Bridge */
- cy_as_ll_request_response *usb_ep0_dma_resp;
- /* Request for sending EP1 data to West Bridge */
- cy_as_ll_request_response *usb_ep1_dma_req;
- /* Response for EP1 data sent to West Bridge */
- cy_as_ll_request_response *usb_ep1_dma_resp;
-
- cy_as_ll_request_response *usb_ep0_dma_req_save;
- cy_as_ll_request_response *usb_ep0_dma_resp_save;
-
- /* MTP Related */
- /* The reference count for the MTP API */
- uint32_t mtp_count;
- /* The MTP event callback supplied by the client */
- cy_as_mtp_event_callback mtp_event_cb;
- /* The current block table to be transferred */
- cy_as_mtp_block_table *tp_blk_tbl;
-
- cy_as_c_b_queue *func_cbs_mtp;
- cy_as_c_b_queue *func_cbs_usb;
- cy_as_c_b_queue *func_cbs_stor;
- cy_as_c_b_queue *func_cbs_misc;
- cy_as_c_b_queue *func_cbs_res;
-
- /* The last USB event that was received */
- cy_as_usb_event usb_last_event;
- /* Types of storage media supported by the firmware */
- uint8_t media_supported[CY_AS_MAX_BUSES];
-
- /* SDIO card parameters*/
- cy_as_sdio_device sdiocard[CY_AS_MAX_BUSES];
- /* if true, MTP enabled Firmware. */
- cy_bool is_mtp_firmware;
- /* if true, mailbox message has come already */
- cy_bool is_mtp_data_pending;
- /* True between the time an Init was called and
- * the complete event is generated */
- cy_bool mtp_turbo_active;
- /* mbox reported EP 2 data len */
- uint16_t mtp_data_len;
- /* The error for mtp EP4 write operation */
- cy_as_return_status_t mtp_error;
- /* mtp send/get operation callback */
- cy_as_function_callback mtp_cb;
- /* mtp send/get operation client id */
- uint32_t mtp_client;
- /* mtp operation type. To be used in callback */
- cy_as_funct_c_b_type mtp_op;
-
- /* Firmware is running in P2S only mode. */
- cy_bool is_storage_only_mode;
- /* Interrupt mask value during device standby. */
- uint32_t stby_int_mask;
-};
-
-#define cy_as_device_is_configured(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_CONFIGURED)
-#define cy_as_device_set_configured(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_CONFIGURED)
-#define cy_as_device_set_unconfigured(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_CONFIGURED)
-
-#define cy_as_device_is_dma_running(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_DMA_MODULE)
-#define cy_as_device_set_dma_running(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_DMA_MODULE)
-#define cy_as_device_set_dma_stopped(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_DMA_MODULE)
-
-#define cy_as_device_is_low_level_running(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_LOWLEVEL_MODULE)
-#define cy_as_device_set_low_level_running(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_LOWLEVEL_MODULE)
-#define cy_as_device_set_low_level_stopped(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_LOWLEVEL_MODULE)
-
-#define cy_as_device_is_intr_running(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_INTR_MODULE)
-#define cy_as_device_set_intr_running(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_INTR_MODULE)
-#define cy_as_device_set_intr_stopped(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_INTR_MODULE)
-
-#define cy_as_device_is_firmware_loaded(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_FIRMWARE_LOADED)
-#define cy_as_device_set_firmware_loaded(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_FIRMWARE_LOADED)
-#define cy_as_device_set_firmware_not_loaded(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_FIRMWARE_LOADED)
-
-#define cy_as_device_is_storage_running(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_STORAGE_MODULE)
-#define cy_as_device_set_storage_running(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_STORAGE_MODULE)
-#define cy_as_device_set_storage_stopped(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_STORAGE_MODULE)
-
-#define cy_as_device_is_usb_running(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_USB_MODULE)
-#define cy_as_device_set_usb_running(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_USB_MODULE)
-#define cy_as_device_set_usb_stopped(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_USB_MODULE)
-
-#define cy_as_device_wants_scsi_messages(dp) \
- (((dp)->state & CY_AS_DEVICE_STATE_STORAGE_SCSIMSG) \
- ? cy_true : cy_false)
-#define cy_as_device_set_scsi_messages(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_STORAGE_SCSIMSG)
-#define cy_as_device_clear_scsi_messages(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_STORAGE_SCSIMSG)
-
-#define cy_as_device_is_storage_async_pending(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_STORAGE_ASYNC_PENDING)
-#define cy_as_device_set_storage_async_pending(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_STORAGE_ASYNC_PENDING)
-#define cy_as_device_clear_storage_async_pending(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_STORAGE_ASYNC_PENDING)
-
-#define cy_as_device_is_usb_connected(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_USB_CONNECTED)
-#define cy_as_device_set_usb_connected(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_USB_CONNECTED)
-#define cy_as_device_clear_usb_connected(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_USB_CONNECTED)
-
-#define cy_as_device_is_usb_high_speed(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_USB_HIGHSPEED)
-#define cy_as_device_set_usb_high_speed(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_USB_HIGHSPEED)
-#define cy_as_device_clear_usb_high_speed(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_USB_HIGHSPEED)
-
-#define cy_as_device_is_in_callback(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_IN_CALLBACK)
-#define cy_as_device_set_in_callback(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_IN_CALLBACK)
-#define cy_as_device_clear_in_callback(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_IN_CALLBACK)
-
-#define cy_as_device_is_setup_i_o_performed(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_SETUP_IO_PERFORMED)
-#define cy_as_device_set_setup_i_o_performed(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_SETUP_IO_PERFORMED)
-#define cy_as_device_clear_setup_i_o_performed(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_SETUP_IO_PERFORMED)
-
-#define cy_as_device_is_ack_delayed(dp) \
- ((dp)->usb_delay_ack_count > 0)
-#define cy_as_device_set_ack_delayed(dp) \
- ((dp)->usb_delay_ack_count++)
-#define cy_as_device_rem_ack_delayed(dp) \
- ((dp)->usb_delay_ack_count--)
-#define cy_as_device_clear_ack_delayed(dp) \
- ((dp)->usb_delay_ack_count = 0)
-
-#define cy_as_device_is_setup_packet(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_IN_SETUP_PACKET)
-#define cy_as_device_set_setup_packet(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_IN_SETUP_PACKET)
-#define cy_as_device_clear_setup_packet(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_IN_SETUP_PACKET)
-
-#define cy_as_device_is_ep0_stalled(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_EP0_STALLED)
-#define cy_as_device_set_ep0_stalled(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_EP0_STALLED)
-#define cy_as_device_clear_ep0_stalled(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_EP0_STALLED)
-
-#define cy_as_device_is_register_standby(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_REGISTER_STANDBY)
-#define cy_as_device_set_register_standby(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_REGISTER_STANDBY)
-#define cy_as_device_clear_register_standby(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_REGISTER_STANDBY)
-
-#define cy_as_device_is_pin_standby(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_PIN_STANDBY)
-#define cy_as_device_set_pin_standby(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_PIN_STANDBY)
-#define cy_as_device_clear_pin_standby(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_PIN_STANDBY)
-
-#define cy_as_device_is_crystal(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_CRYSTAL)
-#define cy_as_device_is_external_clock(dp) \
- (!((dp)->state & CY_AS_DEVICE_STATE_CRYSTAL))
-#define cy_as_device_set_crystal(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_CRYSTAL)
-#define cy_as_device_set_external_clock(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_CRYSTAL)
-
-#define cy_as_device_is_waking(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_WAKING)
-#define cy_as_device_set_waking(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_WAKING)
-#define cy_as_device_clear_waking(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_WAKING)
-
-#define cy_as_device_is_in_suspend_mode(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_SUSPEND)
-#define cy_as_device_set_suspend_mode(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_SUSPEND)
-#define cy_as_device_clear_suspend_mode(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_SUSPEND)
-
-#define cy_as_device_is_reset_pending(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_RESETP)
-#define cy_as_device_set_reset_pending(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_RESETP)
-#define cy_as_device_clear_reset_pending(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_RESETP)
-
-#define cy_as_device_is_standby_pending(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_STANDP)
-#define cy_as_device_set_standby_pending(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_STANDP)
-#define cy_as_device_clear_standby_pending(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_STANDP)
-
-#define cy_as_device_is_s_s_s_pending(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_SSSP)
-#define cy_as_device_set_s_s_s_pending(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_SSSP)
-#define cy_as_device_clear_s_s_s_pending(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_SSSP)
-
-#define cy_as_device_is_u_s_s_pending(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_USSP)
-#define cy_as_device_set_u_s_s_pending(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_USSP)
-#define cy_as_device_clear_u_s_s_pending(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_USSP)
-
-#define cy_as_device_is_m_s_s_pending(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_MSSP)
-#define cy_as_device_set_m_s_s_pending(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_MSSP)
-#define cy_as_device_clear_m_s_s_pending(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_MSSP)
-
-#define cy_as_device_is_p2s_dma_start_recvd(dp) \
- ((dp)->state & CY_AS_DEVICE_STATE_P2SDMA_START)
-#define cy_as_device_set_p2s_dma_start_recvd(dp) \
- ((dp)->state |= CY_AS_DEVICE_STATE_P2SDMA_START)
-#define cy_as_device_clear_p2s_dma_start_recvd(dp) \
- ((dp)->state &= ~CY_AS_DEVICE_STATE_P2SDMA_START)
-
-#define cy_as_device_is_usb_async_pending(dp, ep) \
- ((dp)->epasync & (1 << ep))
-#define cy_as_device_set_usb_async_pending(dp, ep) \
- ((dp)->epasync |= (1 << ep))
-#define cy_as_device_clear_usb_async_pending(dp, ep) \
- ((dp)->epasync &= ~(1 << ep))
-
-#define cy_as_device_is_nand_storage_supported(dp) \
- ((dp)->media_supported[0] & 1)
-
-/* Macros to check the type of West Bridge device. */
-#define cy_as_device_is_astoria_dev(dp) \
- (((dp)->silicon_id == CY_AS_MEM_CM_WB_CFG_ID_HDID_ASTORIA_VALUE) || \
- ((dp)->silicon_id == CY_AS_MEM_CM_WB_CFG_ID_HDID_ASTORIA_FPGA_VALUE))
-#define cy_as_device_is_antioch_dev(dp) \
- ((dp)->silicon_id == CY_AS_MEM_CM_WB_CFG_ID_HDID_ANTIOCH_VALUE)
-
-#ifdef CY_AS_LOG_SUPPORT
-extern void cy_as_log_debug_message(int value, const char *msg);
-#else
-#define cy_as_log_debug_message(value, msg)
-#endif
-
-/* Summary
- This function finds the device object given the HAL tag
-
- Description
- The user associats a device TAG with each West Bridge device
- created. This tag is passed from the API functions to and HAL
- functions that need to ID a specific West Bridge device. This
- tag is also passed in from the user back into the API via
- interrupt functions. This function allows the API to find the
- device structure associated with a given tag.
-
- Notes
- This function does a simple linear search for the device based
- on the TAG. This function is called each time an West Bridge
- interrupt handler is called. Therefore this works fine for a
- small number of West Bridge devices (e.g. less than five).
- Anything more than this and this methodology will need to be
- updated.
-
- Returns
- Pointer to a CyAsDevice associated with the tag
-*/
-extern cy_as_device *
-cy_as_device_find_from_tag(
- cy_as_hal_device_tag tag
- );
-
-#include "cyas_cplus_end.h"
-
-#endif /* __INCLUDED_CYASDEVICE_H__ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasdma.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasdma.h
deleted file mode 100644
index 16dc9f96018..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasdma.h
+++ /dev/null
@@ -1,375 +0,0 @@
-/* Cypress West Bridge API header file (cyasdma.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASDMA_H_
-#define _INCLUDED_CYASDMA_H_
-
-#include "cyashal.h"
-#include "cyasdevice.h"
-
-#include "cyas_cplus_start.h"
-
-
-/*@@DMA Overview
- This module manages the DMA operations to/from the West Bridge
- device. The DMA module maintains a DMA queue for each endpoint
- so multiple DMA requests may be queued and they will complete
- at some future time.
-
- The DMA module must be started before it can be used. It is
- started by calling CyAsDmaStart(). This function initializes
- all of the endpoint data structures.
-
- In order to perform DMA on a particular endpoint, the endpoint
- must be enabled by calling CyAsDmaEnableEndPoint(). In addition
- to enabling or disabling the endpoint, this function also sets
- the direction for a given endpoint. Direction is given in USB
- terms. For P port to West Bridge traffic, the endpoint is a
- CyAsDirectionIn endpoint. For West Bridge to P port traffic,
- the endpoint is a CyAsDirectionOut endpoint.
-
- Once DMA is started and an endpoint is enabled, DMA requests
- are issued by calling CyAsDmaQueueRequest(). This function
- queue either a DMA read or DMA write request. The callback
- associated with the request is called once the request has been
- fulfilled.
-
- See Also
- * CyAsDmaStart
- * CyAsDmaEnableEndPoint
- * CyAsDmaDirection
- * CyAsDmaQueueRequest
- */
-
-/************************
- * West Bridge Constants
- ************************/
-#define CY_AS_DMA_MAX_SIZE_HW_SIZE (0xffffffff)
-
-/************************
- * West Bridge Data Structures
- ************************/
-
-/* Summary
- This type specifies the direction of an endpoint to the
- CyAsDmaEnableEndPoint function.
-
- Description
- When an endpoint is enabled, the direction of the endpoint
- can also be set. This type is used to specify the endpoint
- type. Note that the direction is specified in USB terms.
- Therefore, if the DMA is from the P port to West Bridge,
- the direction is IN.
-
- See Also
- * CyAsDmaEnableEndPoint
-*/
-typedef enum cy_as_dma_direction {
- /* Set the endpoint to type IN (P -> West Bridge) */
- cy_as_direction_in = 0,
- /* Set the endpoint to type OUT (West Bridge -> P) */
- cy_as_direction_out = 1,
- /* Only valid for EP 0 */
- cy_as_direction_in_out = 2,
- /* Do no change the endpoint type */
- cy_as_direction_dont_change = 3
-} cy_as_dma_direction;
-
-/*********************************
- * West Bridge Functions
- *********************************/
-
-/* Summary
- Initialize the DMA module and ready the module for receiving data
-
- Description
- This function initializes the DMA module by initializing all of
- the endpoint data structures associated with the device given.
- This function also register a DMA complete callback with the HAL
- DMA code. This callback is called whenever the HAL DMA subsystem
- completes a requested DMA operation.
-
- Returns
- CY_AS_ERROR_SUCCESS - the module initialized successfully
- CY_AS_ERROR_OUT_OF_MEMORY - memory allocation failed during
- initialization
- CY_AS_ERROR_ALREADY_RUNNING - the DMA module was already running
-
- See Also
- * CyAsDmaStop
-*/
-extern cy_as_return_status_t
-cy_as_dma_start(
- /* The device to start */
- cy_as_device *dev_p
- );
-
-/* Summary
- Shutdown the DMA module
-
- Description
- This function shuts down the DMA module for this device by
- canceling any DMA requests associated with each endpoint and
- then freeing the resources associated with each DMA endpoint.
-
- Returns
- CY_AS_ERROR_SUCCESS - the module shutdown successfully
- CY_AS_ERROR_NOT_RUNNING - the DMA module was not running
-
- See Also
- * CyAsDmaStart
- * CyAsDmaCancel
-*/
-extern cy_as_return_status_t
-cy_as_dma_stop(
- /* The device to stop */
- cy_as_device *dev_p
- );
-
-/* Summary
- This function cancels all outstanding DMA requests on a given endpoint
-
- Description
- This function cancels any DMA requests outstanding on a given endpoint
- by disabling the transfer of DMA requests from the queue to the HAL
- layer and then removing any pending DMA requests from the queue. The
- callback associated with any DMA requests that are being removed is
- called with an error code of CY_AS_ERROR_CANCELED.
-
- Notes
- If a request has already been sent to the HAL layer it will be
- completed and not canceled. Only requests that have not been sent to
- the HAL layer will be cancelled.
-
- Returns
- CY_AS_ERROR_SUCCESS - the traffic on the endpoint is canceled
- successfully
-
- See Also
-*/
-extern cy_as_return_status_t
-cy_as_dma_cancel(
- /* The device of interest */
- cy_as_device *dev_p,
- /* The endpoint to cancel */
- cy_as_end_point_number_t ep,
- cy_as_return_status_t err
- );
-
-/* Summary
- This function enables a single endpoint for DMA operations
-
- Description
- In order to enable the queuing of DMA requests on a given
- endpoint, the endpoint must be enabled for DMA. This function
- enables a given endpoint. In addition, this function sets the
- direction of the DMA operation.
-
- Returns
- * CY_AS_ERROR_INVALID_ENDPOINT - invalid endpoint number
- * CY_AS_ERROR_SUCCESS - endpoint was enabled or disabled
- * successfully
-
- See Also
- * CyAsDmaQueueRequest
-*/
-extern cy_as_return_status_t
-cy_as_dma_enable_end_point(
- /* The device of interest */
- cy_as_device *dev_p,
- /* The endpoint to enable or disable */
- cy_as_end_point_number_t ep,
- /* CyTrue to enable, CyFalse to disable */
- cy_bool enable,
- /* The direction of the endpoint */
- cy_as_dma_direction dir
-);
-
-/* Summary
- This function queue a DMA request for a given endpoint
-
- Description
- When an West Bridge API module wishes to do a DMA operation,
- this function is called on the associated endpoint to queue
- a DMA request. When the DMA request has been fulfilled, the
- callback associated with the DMA operation is called.
-
- Notes
- The buffer associated with the DMA request, must remain valid
- until after the callback function is calld.
-
- Returns
- * CY_AS_ERROR_SUCCESS - the DMA operation was queued successfully
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint number was invalid
- * CY_AS_ERROR_ENDPOINT_DISABLED - the endpoint was disabled
- * CY_AS_ERROR_OUT_OF_MEMORY - out of memory processing the request
-
- See Also
- * CyAsDmaEnableEndPoint
- * CyAsDmaCancel
-*/
-extern cy_as_return_status_t
-cy_as_dma_queue_request(
- /* The device of interest */
- cy_as_device *dev_p,
- /* The endpoint to receive a new request */
- cy_as_end_point_number_t ep,
- /* The memory buffer for the DMA request -
- * must be valid until after the callback has been called */
- void *mem_p,
- /* The size of the DMA request in bytes */
- uint32_t size,
- /* If true and a DMA read request, return the next packet
- * regardless of size */
- cy_bool packet,
- /* If true, this is a read request,
- * otherwise it is a write request */
- cy_bool readreq,
- /* The callback to call when the DMA request is complete,
- * either successfully or via an error */
- cy_as_dma_callback cb
- );
-
-/* Summary
- This function waits until all DMA requests on a given endpoint
- have been processed and then return
-
- Description
- There are times when a module in the West Bridge API needs to
- wait until the DMA operations have been queued. This function
- sleeps until all DMA requests have been fulfilled and only then
- returns to the caller.
-
- Notes
- I don't think we will need a list of sleeping clients to support
- multiple parallel client modules sleeping on a single endpoint,
- but if we do instead of having a single sleep channel in the
- endpoint, each client will have to supply a sleep channel and we
- will have to maintain a list of sleep channels to wake.
-
- Returns
- * CY_AS_ERROR_SUCCESS - the queue has drained successfully
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint given is not valid
- * CY_AS_ERROR_NESTED_SLEEP - CyAsDmaQueueRequest() was requested
- * on an endpoint where CyAsDmaQueueRequest was already called
-*/
-extern cy_as_return_status_t
-cy_as_dma_drain_queue(
- /* The device of interest */
- cy_as_device *dev_p,
- /* The endpoint to drain */
- cy_as_end_point_number_t ep,
- /* If CyTrue, call kickstart to start the DMA process,
- if cy_false, west bridge will start the DMA process */
- cy_bool kickstart
- );
-
-/* Summary
- Sets the maximum amount of data West Bridge can accept in a single
- DMA Operation for the given endpoint
-
- Description
- Depending on the configuration of the West Bridge device endpoint,
- the amount of data that can be accepted varies. This function
- sets the maximum amount of data West Bridge can accept in a single
- DMA operation. The value is stored with the endpoint and passed
- to the HAL layer in the CyAsHalDmaSetupWrite() and
- CyAsHalDmaSetupRead() functoins.
-
- Returns
- * CY_AS_ERROR_SUCCESS - the value was set successfully
- * CY_AS_ERROR_INVALID_SIZE - the size value was not valid
-*/
-extern cy_as_return_status_t
-cy_as_dma_set_max_dma_size(
- /* The device of interest */
- cy_as_device *dev_p,
- /* The endpoint to change */
- cy_as_end_point_number_t ep,
- /* The max size of this endpoint in bytes */
- uint32_t size
- );
-
-/* Summary
- This function starts the DMA process on a given channel.
-
- Description
- When transferring data from the P port processor to West
- Bridge, the DMA operation must be initiated P Port software
- for the first transfer. Subsequent transferrs will be
- handled at the interrupt level.
-
- Returns
- * CY_AS_ERROR_SUCCESS
-*/
-extern cy_as_return_status_t
-cy_as_dma_kick_start(
- /* The device of interest */
- cy_as_device *dev_p,
- /* The endpoint to change */
- cy_as_end_point_number_t ep
- );
-
-/* Summary
- This function receives endpoint data from a request.
-
- Description
- For endpoint 0 and 1 the endpoint data is transferred from
- the West Bridge device to the DMA via a lowlevel
- requests (via the mailbox registers).
-
- Returns
- * CY_AS_ERROR_SUCCESS
-*/
-extern cy_as_return_status_t
-cy_as_dma_received_data(
- /* The device of interest */
- cy_as_device *dev_p,
- /* The endpoint that received data */
- cy_as_end_point_number_t ep,
- /* The data size */
- uint32_t dsize,
- /* The data buffer */
- void *data
- );
-
-/* Summary
- This function is called when the DMA operation on
- an endpoint has been completed.
-
- Returns
- * void
- */
-extern void
-cy_as_dma_completed_callback(
- /* Tag to HAL completing the DMA operation. */
- cy_as_hal_device_tag tag,
- /* Endpoint on which DMA has been completed. */
- cy_as_end_point_number_t ep,
- /* Length of data received. */
- uint32_t length,
- /* Status of DMA operation. */
- cy_as_return_status_t status
- );
-
-#include "cyas_cplus_end.h"
-
-#endif /* _INCLUDED_CYASDMA_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyaserr.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyaserr.h
deleted file mode 100644
index 2cd0af1ed78..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyaserr.h
+++ /dev/null
@@ -1,1094 +0,0 @@
-/* Cypress West Bridge API header file (cyaserr.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASERR_H_
-#define _INCLUDED_CYASERR_H_
-
-/*@@West Bridge Errors
- Summary
- This section lists the error codes for West Bridge.
-
-*/
-
-/* Summary
- The function completed successfully
-*/
-#define CY_AS_ERROR_SUCCESS (0)
-
-/* Summary
- A function trying to acquire a resource was unable to do so.
-
- Description
- This code indicates that a resource that the API was trying to claim
- could not be claimed.
-
- See Also
- * CyAsMiscAcquireResource
- * CyAsStorageClaim
-*/
-#define CY_AS_ERROR_NOT_ACQUIRED (1)
-
-/* Summary
- A function trying to acquire a resource was unable to do so.
-
- Description
- The West Bridge API provides the capability to assign the storage media to
- either the West Bridge device or the USB port. This error indicates the
- P port was trying to release a storage media and was not able to do
- so. This generally means it was not owned by the P port processor.
-
- See Also
- * CyAsStorageRelease
-*/
-#define CY_AS_ERROR_NOT_RELEASED (2)
-
-/* Summary
- The West Bridge firmware is not loaded.
-
- Description
- Most of the API functions that are part of the West Bridge API rely on
- firmware running on the West Bridge device. This error code is
- returned when one of these functions is called and the firmware has
- not yet been loaded.
-
- See Also
- * CyAsMiscGetFirmwareVersion
- * CyAsMiscReset
- * CyAsMiscAcquireResource
- * CyAsMiscReleaseResource
- * CyAsMiscSetTraceLevel
- * CyAsStorageStart
- * CyAsStorageStop
- * CyAsStorageRegisterCallback
- * CyAsStorageClaim
- * CyAsStorageRelease
- * CyAsStorageQueryMedia
- * CyAsStorageQueryDevice
- * CyAsStorageQueryUnit
- * CyAsStorageRead
- * CyAsStorageWrite
- * CyAsStorageReadAsync
- * CyAsStorageWriteAsync
-*/
-#define CY_AS_ERROR_NO_FIRMWARE (3)
-
-/* Summary
- A timeout occurred waiting on a response from the West Bridge device
-
- Description
- When requests are made of the West Bridge device, a response is expected
- within a given timeframe. If a response is not recevied within the
- given timeframe, a timeout error occurs.
-*/
-#define CY_AS_ERROR_TIMEOUT (4)
-
-/* Summary
- A request to download firmware was made while not in the CONFIG mode
-
- Description
- Firmware is downloaded via the CyAsMiscDownloadFirmware() function. This
- function can only be called while in the CONFIG mode. This error indicates
- that the CyAsMiscDownloadFirmware() call was made while not in the CONFIG
- mode.
-
- See Also
- * CyAsMiscDownloadFirmware
-*/
-#define CY_AS_ERROR_NOT_IN_CONFIG_MODE (5)
-
-/* Summary
- This error is returned if the firmware size specified is too invalid.
-
- Description
- If the size of the firmware to be downloaded into West Bridge is
- invalid, this error is issued. Invalid firmware sizes are those
- greater than 24K or a size of zero.
-
- See Also
- * CyAsMiscDownloadFirmare
-*/
-#define CY_AS_ERROR_INVALID_SIZE (6)
-
-/* Summary
- This error is returned if a request is made to acquire a resource that has
- already been acquired.
-
- Description
- This error is returned if a request is made to acquire a resource that has
- already been acquired.
-
- See Also
- * CyAsMiscAcquireResource
- * CyAsMiscReleaseResource
-*/
-#define CY_AS_ERROR_RESOURCE_ALREADY_OWNED (7)
-
-/* Summary
- This error is returned if a request is made to release a resource that has
- not previously been acquired.
-
- Description
- This error is returned if a request is made to release a resource that has
- not previously been acquired.
-
- See Also
- * CyAsMiscAcquireResource
- * CyAsMiscReleaseResource
-*/
-#define CY_AS_ERROR_RESOURCE_NOT_OWNED (8)
-
-/* Summary
- This error is returned when a request is made for a media that
- does not exist
-
- Description
- This error is returned when a request is made that references
- a storage media that does not exist. This error is returned
- when the storage media is not present in the current system,
- or if the media value given is not valid.
-
- See Also
- * CyAsMiscSetTraceLevel
- * CyAsStorageClaim
- * CyAsStorageRelease
- * CyAsStorageRead
- * CyAsStorageWrite
- * CyAsStorageReadAsync
- * CyAsStorageWriteAsync
-*/
-#define CY_AS_ERROR_NO_SUCH_MEDIA (9)
-
-/* Summary
- This error is returned when a request is made for a device
- that does not exist
-
- Description
- This error is returned when a request is made that references a
- storage device that does not exist. This error is returned when
- the device index is not present in the current system, or if the
- device index exceeds 15.
-
- See Also
- * CyAsMiscSetTraceLevel
- * CyAsStorageQueryDevice
- * CyAsStorageRead
- * CyAsStorageWrite
- * CyAsStorageReadAsync
- * CyAsStorageWriteAsync
-*/
-#define CY_AS_ERROR_NO_SUCH_DEVICE (10)
-
-/* Summary
- This error is returned when a request is made for a unit that
- does not exist
-
- Description
- This error is returned when a request is made that references
- a storage unit that does not exist. This error is returned
- when the unit index is not present in the current system, or
- if the unit index exceeds 255.
-
- See Also
- * CyAsMiscSetTraceLevel
- * CyAsStorageQueryDevice
- * CyAsStorageQueryUnit
- * CyAsStorageRead
- * CyAsStorageWrite
- * CyAsStorageReadAsync
- * CyAsStorageWriteAsync
-*/
-#define CY_AS_ERROR_NO_SUCH_UNIT (11)
-
-/* Summary
- This error is returned when a request is made for a block that
- does not exist
-
- Description
- This error is returned when a request is made that references
- a storage block that does not exist. This error is returned
- when the block address reference an address beyond the end of
- the unit selected.
-
- See Also
- * CyAsStorageRead
- * CyAsStorageWrite
- * CyAsStorageReadAsync
- * CyAsStorageWriteAsync
-*/
-#define CY_AS_ERROR_INVALID_BLOCK (12)
-
-/* Summary
- This error is returned when an invalid trace level is set.
-
- Description
- This error is returned when the trace level request is greater
- than three.
-
- See Also
- * CyAsMiscSetTraceLevel
-*/
-#define CY_AS_ERROR_INVALID_TRACE_LEVEL (13)
-
-/* Summary
- This error is returned when West Bridge is already in the standby state
- and an attempt is made to put West Bridge into this state again.
-
- Description
- This error is returned when West Bridge is already in the standby state
- and an attempt is made to put West Bridge into this state again.
-
- See Also
- * CyAsMiscEnterStandby
-*/
-#define CY_AS_ERROR_ALREADY_STANDBY (14)
-
-/* Summary
- This error is returned when the API needs to set a pin on the
- West Bridge device, but this is not supported by the underlying HAL
- layer.
-
- Description
- This error is returned when the API needs to set a pin on the
- West Bridge device, but this is not supported by the underlying HAL
- layer.
-
- See Also
- * CyAsMiscEnterStandby
- * CyAsMiscLeaveStandby
-*/
-#define CY_AS_ERROR_SETTING_WAKEUP_PIN (15)
-
-/* Summary
- This error is returned when a module is being started that has
- already been started.
-
- Description
- This error is returned when a module is being started and that module
- has already been started. This error does not occur with the
- CyAsStorageStart() or CyAsUsbStart() functions as the storage and
- USB modules are reference counted.
-
- Note
- At the current time, this error is returned by module internal to
- the API but not returned by any of the API functions.
-*/
-#define CY_AS_ERROR_ALREADY_RUNNING (16)
-
-/* Summary
- This error is returned when a module is being stopped that has
- already been stopped.
-
- Description
- This error is returned when a module is being stopped and that module
- has already been stopped. This error does not occur with the
- CyAsStorageStop() or CyAsUsbStop() functions as the storage and USB
- modules are reference counted.
-
- Note
- At the current time, this error is returned by module internal to
- the API but not returned by any of the API functions.
-*/
-
-#define CY_AS_ERROR_NOT_RUNNING (17)
-
-/* Summary
- This error is returned when the caller tries to claim a media that
- has already been claimed.
-
- Description
- This error is returned when the caller tries to claim a media that
- has already been claimed.
-
- See Also
- * CyAsStorageClaim
-*/
-#define CY_AS_ERROR_MEDIA_ALREADY_CLAIMED (18)
-
-/* Summary
- This error is returned when the caller tries to release a media that has
- already been released.
-
- Description
- This error is returned when the caller tries to release a media that has
- already been released.
-
- See Also
- * CyAsStorageRelease
-*/
-#define CY_AS_ERROR_MEDIA_NOT_CLAIMED (19)
-
-/* Summary
- This error is returned when canceling trying to cancel an asynchronous
- operation when an async operation is not pending.
-
- Description
- This error is returned when a call is made to a function to cancel an
- asynchronous operation and there is no asynchronous operation pending.
-
- See Also
- * CyAsStorageCancelAsync
- * CyAsUsbCancelAsync
-*/
-#define CY_AS_ERROR_NO_OPERATION_PENDING (20)
-
-/* Summary
- This error is returned when an invalid endpoint number is provided to
- an API call.
-
- Description
- This error is returned when an invalid endpoint number is specified in
- an API call. The endpoint number may be invalid because it is greater
- than 15, or because it was a reference to an endpoint that is invalid
- for West Bridge (2, 4, 6, or 8).
-
- See Also
- * CyAsUsbSetEndPointConfig
- * CyAsUsbGetEndPointConfig
- * CyAsUsbReadData
- * CyAsUsbWriteData
- * CyAsUsbReadDataAsync
- * CyAsUsbWriteDataAsync
- * CyAsUsbSetStall
- * CyAsUsbGetStall
-*/
-#define CY_AS_ERROR_INVALID_ENDPOINT (21)
-
-/* Summary
- This error is returned when an invalid descriptor type
- is specified in an API call.
-
- Description
- This error is returned when an invalid descriptor type
- is specified in an API call.
-
- See Also
- * CyAsUsbSetDescriptor
- * CyAsUsbGetDescriptor
-*/
-#define CY_AS_ERROR_INVALID_DESCRIPTOR (22)
-
-/* Summary
- This error is returned when an invalid descriptor index
- is specified in an API call.
-
- Description
- This error is returned when an invalid descriptor index
- is specified in an API call.
-
- See Also
- * CyAsUsbSetDescriptor
- * CyAsUsbGetDescriptor
-*/
-#define CY_AS_ERROR_BAD_INDEX (23)
-
-/* Summary
- This error is returned if trying to set a USB descriptor
- when in the P port enumeration mode.
-
- Description
- This error is returned if trying to set a USB descriptor
- when in the P port enumeration mode.
-
- See Also
- * CyAsUsbSetDescriptor
- * CyAsUsbGetDescriptor
-*/
-#define CY_AS_ERROR_BAD_ENUMERATION_MODE (24)
-
-/* Summary
- This error is returned when the endpoint configuration specified
- is not valid.
-
- Description
- This error is returned when the endpoint configuration specified
- is not valid.
-
- See Also
- * CyAsUsbSetDescriptor
- * CyAsUsbGetDescriptor
- * CyAsUsbCommitConfig
-*/
-#define CY_AS_ERROR_INVALID_CONFIGURATION (25)
-
-/* Summary
- This error is returned when the API cannot verify it is connected
- to an West Bridge device.
-
- Description
- When the API is initialized, the API tries to read the ID register from
- the West Bridge device. The value from this ID register should match the
- value expected before communications with West Bridge are established. This
- error means that the contents of the ID register cannot be verified.
-
- See Also
- * CyAsMiscConfigureDevice
-*/
-#define CY_AS_ERROR_NO_ANTIOCH (26)
-
-/* Summary
- This error is returned when an API function is called and
- CyAsMiscConfigureDevice has not been called to configure West Bridge
- for the current environment.
-
- Description
- This error is returned when an API function is called and
- CyAsMiscConfigureDevice has not been called to configure West Bridge for
- the current environment.
-
- See Also
- * Almost all API function
-*/
-#define CY_AS_ERROR_NOT_CONFIGURED (27)
-
-/* Summary
- This error is returned when West Bridge cannot allocate memory required for
- internal API operations.
-
- Description
- This error is returned when West Bridge cannot allocate memory required for
- internal API operations.
-
- See Also
- * Almost all API functoins
-*/
-#define CY_AS_ERROR_OUT_OF_MEMORY (28)
-
-/* Summary
- This error is returned when a module is being started that has
- already been started.
-
- Description
- This error is returned when a module is being started and that module
- has already been started. This error does not occur with the
- CyAsStorageStart() or CyAsUsbStart() functions as the storage and
- USB modules are reference counted.
-
- Note
- At the current time, this error is returned by module internal to the API but
- not returned by any of the API functions.
-*/
-#define CY_AS_ERROR_NESTED_SLEEP (29)
-
-/* Summary
- This error is returned when an operation is attempted on an endpoint that has
- been disabled.
-
- Description
- This error is returned when an operation is attempted on an endpoint that has
- been disabled.
-
- See Also
- * CyAsUsbReadData
- * CyAsUsbWriteData
- * CyAsUsbReadDataAsync
- * CyAsUsbWriteDataAsync
-*/
-#define CY_AS_ERROR_ENDPOINT_DISABLED (30)
-
-/* Summary
- This error is returned when a call is made to an API function when
- the device is in standby.
-
- Description
- When the West Bridge device is in standby, the only two API functions that
- can be called are CyAsMiscInStandby() and CyAsMiscLeaveStandby().
- Calling any other API function will result in this error.
-
- See Also
-*/
-#define CY_AS_ERROR_IN_STANDBY (31)
-
-/* Summary
- This error is returned when an API call is made with an invalid handle value.
-
- Description
- This error is returned when an API call is made with an invalid handle value.
-
- See Also
-*/
-#define CY_AS_ERROR_INVALID_HANDLE (32)
-
-/* Summary
- This error is returned when an invalid response is returned from
- the West Bridge device.
-
- Description
- Many of the API calls result in requests made to the West Bridge
- device. This error occurs when the response from West Bridge is
- invalid and generally indicates that the West Bridge device
- should be reset.
-
- See Also
-*/
-#define CY_AS_ERROR_INVALID_RESPONSE (33)
-
-/* Summary
- This error is returned from the callback function for any asynchronous
- read or write request that is canceled.
-
- Description
- When asynchronous requests are canceled, this error is passed to the
- callback function associated with the request to indicate that the
- request has been canceled
-
- See Also
- * CyAsStorageReadAsync
- * CyAsStorageWriteAsync
- * CyAsUsbReadDataAsync
- * CyAsUsbWriteDataAsync
- * CyAsStorageCancelAsync
- * CyAsUsbCancelAsync
-*/
-#define CY_AS_ERROR_CANCELED (34)
-
-/* Summary
- This error is returned when the call to create sleep channel fails
- in the HAL layer.
-
- Description
- This error is returned when the call to create sleep channel fails
- in the HAL layer.
-
- See Also
- * CyAsMiscConfigureDevice
-*/
-#define CY_AS_ERROR_CREATE_SLEEP_CHANNEL_FAILED (35)
-
-/* Summary
- This error is returned when the call to CyAsMiscLeaveStandby
- is made and the device is not in standby.
-
- Description
- This error is returned when the call to CyAsMiscLeaveStandby
- is made and the device is not in standby.
-
- See Also
-*/
-#define CY_AS_ERROR_NOT_IN_STANDBY (36)
-
-/* Summary
- This error is returned when the call to destroy sleep channel fails
- in the HAL layer.
-
- Description
- This error is returned when the call to destroy sleep channel fails
- in the HAL layer.
-
- See Also
- * CyAsMiscDestroyDevice
-*/
-#define CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED (37)
-
-/* Summary
- This error is returned when an invalid resource is specified to a call
- to CyAsMiscAcquireResource() or CyAsMiscReleaseResource()
-
- Description
- This error is returned when an invalid resource is specified to a call
- to CyAsMiscAcquireResource() or CyAsMiscReleaseResource()
-
- See Also
- * CyAsMiscAcquireResource
- * CyAsMiscReleaseResource
-*/
-#define CY_AS_ERROR_INVALID_RESOURCE (38)
-
-/* Summary
- This error occurs when an operation is requested on an endpoint that has
- a currently pending async operation.
-
- Description
- There can only be a single asynchronous pending operation on a given
- endpoint and while the operation is pending on other operation can occur
- on the endpoint. In addition, the device cannot enter standby while
- any asynchronous operations are pending.
-
- See Also
- * CyAsStorageReadAsync
- * CyAsStorageWriteAsync
- * CyAsUsbReadDataAsync
- * CyAsUsbWriteDataAsync
- * CyAsStorageRead
- * CyAsStorageWrite
- * CyAsUsbReadData
- * CyAsUsbWriteData
- * CyAsMiscEnterStandby
-*/
-#define CY_AS_ERROR_ASYNC_PENDING (39)
-
-/* Summary
- This error is returned when a call to CyAsStorageCancelAsync() or
- CyAsUsbCancelAsync() is made when no asynchronous request is pending.
-
- Description
- This error is returned when a call to CyAsStorageCancelAsync() or
- CyAsUsbCancelAsync() is made when no asynchronous request is pending.
-
- See Also
- * CyAsStorageCancelAsync
- * CyAsUsbCancelAsync
-*/
-#define CY_AS_ERROR_ASYNC_NOT_PENDING (40)
-
-/* Summary
- This error is returned when a request is made to put the West Bridge device
- into standby mode while the USB stack is still active.
-
- Description
- This error is returned when a request is made to put the West Bridge device
- into standby mode while the USB stack is still active. You must call the
- function CyAsUsbStop() in order to shut down the USB stack in order to go
- into the standby mode.
-
- See Also
- * CyAsMiscEnterStandby
-*/
-#define CY_AS_ERROR_USB_RUNNING (41)
-
-/* Summary
- A request for in the wrong direction was issued on an endpoint.
-
- Description
- This error is returned when a write is attempted on an OUT endpoint or
- a read is attempted on an IN endpoint.
-
- See Also
- * CyAsUsbReadData
- * CyAsUsbWriteData
- * CyAsUsbReadDataAsync
- * CyAsUsbWriteDataAsync
-*/
-#define CY_AS_ERROR_USB_BAD_DIRECTION (42)
-
-/* Summary
- An invalid request was received
-
- Description
- This error is isused if an invalid request is issued.
-*/
-#define CY_AS_ERROR_INVALID_REQUEST (43)
-
-/* Summary
- An ACK request was requested while no setup packet was pending.
-
- Description
- This error is issued if CyAsUsbAckSetupPacket() is called when no
- setup packet is pending.
-*/
-#define CY_AS_ERROR_NO_SETUP_PACKET_PENDING (44)
-
-/* Summary
- A call was made to a API function that cannot be called from a callback.
-
- Description
- Only asynchronous functions can be called from within West Bridge callbacks.
- This error results when an invalid function is called from a callback.
-*/
-#define CY_AS_ERROR_INVALID_IN_CALLBACK (45)
-
-/* Summary
- A call was made to CyAsUsbSetEndPointConfig() before
- CyAsUsbSetPhysicalConfiguration() was called.
-
- Description
- When logical endpoints are configured, you must define the physical
- endpoint for the logical endpoint being configured. Therefore
- CyAsUsbSetPhysicalConfiguration() must be called to define the
- physical endpoints before calling CyAsUsbSetEndPointConfig().
-*/
-#define CY_AS_ERROR_ENDPOINT_CONFIG_NOT_SET (46)
-
-/* Summary
- The physical endpoint referenced is not valid in the current physical
- configuration
-
- Description
- When logical endpoints are configured, you must define the physical
- endpoint for the logical endpoint being configured. Given the
- current physical configuration, the physical endpoint referenced
- is not valid.
-*/
-#define CY_AS_ERROR_INVALID_PHYSICAL_ENDPOINT (47)
-
-/* Summary
- The data supplied to the CyAsMiscDownloadFirmware() call is not
- aligned on a WORD (16 bit) boundary.
-
- Description
- Many systems have problems with the transfer of data a word at a
- time when the data is not word aligned. For this reason, we
- require that the firmware image be aligned on a word boundary and
- be an even number of bytes. This error is returned if these
- conditions are not met.
-*/
-#define CY_AS_ERROR_ALIGNMENT_ERROR (48)
-
-/* Summary
- A call was made to destroy the West Bridge device, but the USB
- stack or the storage stack was will running.
-
- Description
- Before calling CyAsMiscDestroyDevice to destroy an West Bridge
- device created via a call to CyAsMiscCreateDevice, the USB and
- STORAGE stacks much be stopped via calls to CyAsUsbStop and
- CyAsStorageStop. This error indicates that one of these two
- stacks have not been stopped.
-*/
-#define CY_AS_ERROR_STILL_RUNNING (49)
-
-/* Summary
- A call was made to the API for a function that is not yet supported.
-
- Description
- There are calls that are not yet supported that may be called through
- the API. This is done to maintain compatibility in the future with
- the API. This error is returned if you are asking for a capability
- that does not yet exist.
-*/
-#define CY_AS_ERROR_NOT_YET_SUPPORTED (50)
-
-/* Summary
- A NULL callback was provided where a non-NULL callback was required
-
- Description
- When async IO function are called, a callback is required to indicate
- that the IO has completed. This callback must be non-NULL.
-*/
-#define CY_AS_ERROR_NULL_CALLBACK (51)
-
-/* Summary
- This error is returned when a request is made to put the West Bridge device
- into standby mode while the storage stack is still active.
-
- Description
- This error is returned when a request is made to put the West Bridge device
- into standby mode while the storage stack is still active. You must call the
- function CyAsStorageStop() in order to shut down the storage stack in order
- to go into the standby mode.
-
- See Also
- * CyAsMiscEnterStandby
-*/
-#define CY_AS_ERROR_STORAGE_RUNNING (52)
-
-/* Summary
- This error is returned when an operation is attempted that cannot be
- completed while the USB stack is connected to a USB host.
-
- Description
- This error is returned when an operation is attempted that cannot be
- completed while the USB stack is connected to a USB host. In order
- to successfully complete the desired operation, CyAsUsbDisconnect()
- must be called to disconnect from the host.
-*/
-#define CY_AS_ERROR_USB_CONNECTED (53)
-
-/* Summary
- This error is returned when a USB disconnect is attempted and the
- West Bridge device is not connected.
-
- Description
- This error is returned when a USB disconnect is attempted and the
- West Bridge device is not connected.
-*/
-#define CY_AS_ERROR_USB_NOT_CONNECTED (54)
-
-/* Summary
- This error is returned when an P2S storage operation attempted
- and data could not be read or written to the storage media.
-
- Description
- This error is returned when an P2S storage operation attempted
- and data could not be read or written to the storage media. If
- this error is recevied then a retry can be done.
-*/
-#define CY_AS_ERROR_MEDIA_ACCESS_FAILURE (55)
-
-/* Summary
- This error is returned when an P2S storage operation attempted
- and the media is write protected.
-
- Description
- This error is returned when an P2S storage operation attempted
- and the media is write protected.
-*/
-#define CY_AS_ERROR_MEDIA_WRITE_PROTECTED (56)
-
-/* Summary
- This error is returned when an attempt is made to cancel a request
- that has already been sent to the West Bridge.
-
- Description
- It is not possible to cancel an asynchronous storage read/write
- operation after the actual data transfer with the West Bridge
- has started. This error is returned if CyAsStorageCancelAsync
- is called to cancel such a request.
- */
-#define CY_AS_ERROR_OPERATION_IN_TRANSIT (57)
-
-/* Summary
- This error is returned when an invalid parameter is passed to
- one of the APIs.
-
- Description
- Some of the West Bridge APIs are applicable to only specific
- media types, devices etc. This error code is returned when a
- API is called with an invalid parameter type.
- */
-#define CY_AS_ERROR_INVALID_PARAMETER (58)
-
-/* Summary
- This error is returned if an API is not supported in the current setup.
-
- Description
- Some of the West Bridge APIs work only with specific device types
- or firmware images. This error is returned when such APIs are called
- when the current device or firmware does not support the invoked API
- function.
- */
-#define CY_AS_ERROR_NOT_SUPPORTED (59)
-
-/* Summary
- This error is returned when a call is made to one of the Storage or
- USB APIs while the device is in suspend mode.
-
- Description
- This error is returned when a call is made to one of the storage or
- USB APIs while the device is in suspend mode.
- */
-#define CY_AS_ERROR_IN_SUSPEND (60)
-
-/* Summary
- This error is returned when the call to CyAsMiscLeaveSuspend
- is made and the device is not in suspend mode.
-
- Description
- This error is returned when the call to CyAsMiscLeaveSuspend
- is made and the device is not in suspend mode.
- */
-#define CY_AS_ERROR_NOT_IN_SUSPEND (61)
-
-/* Summary
- This error is returned when a command that is disabled by USB is called.
-
- Description
- The remote wakeup capability should be exercised only if enabled by the
- USB host. This error is returned when the CyAsUsbSignalRemoteWakeup API
- is called when the feature has not been enabled by the USB host.
- */
-#define CY_AS_ERROR_FEATURE_NOT_ENABLED (62)
-
-/* Summary
- This error is returned when an Async storage read or write is called before a
- query device call is issued.
-
- Description
- In order for the SDK to properly set up a DMA the block size of a given media
- needs to be known. This is done by making a call to CyAsStorageQueryDevice.
- This call only needs to be made once per device. If this call is not issued
- before an Async read or write is issued this error code is returned.
- */
-#define CY_AS_ERROR_QUERY_DEVICE_NEEDED (63)
-
-/* Summary
- This error is returned when a call is made to USB or STORAGE Start or
- Stop before a prior Start or Stop has finished.
-
- Description
- The USB and STORAGE start and stop functions can only be called if a
- prior start or stop function call has fully completed. This means when
- an async EX call is made you must wait until the callback for that call
- has been completed before calling start or stop again.
- */
-#define CY_AS_ERROR_STARTSTOP_PENDING (64)
-
-/* Summary
- This error is returned when a request is made for a bus that does not exist
-
- Description
- This error is returned when a request is made that references a bus
- number that does not exist. This error is returned when the bus number
- is not present in the current system, or if the bus number given is not
- valid.
-
- See Also
- * CyAsMiscSetTraceLevel
- * CyAsStorageClaim
- * CyAsStorageRelease
- * CyAsStorageRead
- * CyAsStorageWrite
- * CyAsStorageReadAsync
- * CyAsStorageWriteAsync
-*/
-#define CY_AS_ERROR_NO_SUCH_BUS (65)
-
-/* Summary
- This error is returned when the bus corresponding to a media type cannot
- be resolved.
-
- Description
- In some S-Port configurations, the same media type may be supported on
- multiple buses. In this case, it is not possible to resolve the target
- address based on the media type. This error indicates that only
- bus-based addressing is supported in a particular run-time
- configuration.
-
- See Also
- * CyAsMediaType
- * CyAsBusNumber_t
- */
-#define CY_AS_ERROR_ADDRESS_RESOLUTION_ERROR (66)
-
-/* Summary
- This error is returned when an invalid command is passed to the
- CyAsStorageSDIOSync() function.
-
- Description
- This error indiactes an unknown Command type was passed to the SDIO
- command handler function.
- */
-
-#define CY_AS_ERROR_INVALID_COMMAND (67)
-
-
-/* Summary
- This error is returned when an invalid function /uninitialized
- function is passed to an SDIO function.
-
- Description
- This error indiactes an unknown/uninitialized function number was
- passed to a SDIO function.
- */
-#define CY_AS_ERROR_INVALID_FUNCTION (68)
-
-/* Summary
- This error is returned when an invalid block size is passed to
- CyAsSdioSetBlocksize().
-
- Description
- This error is returned when an invalid block size (greater than
- maximum block size supported) is passed to CyAsSdioSetBlocksize().
- */
-
-#define CY_AS_ERROR_INVALID_BLOCKSIZE (69)
-
-/* Summary
- This error is returned when an tuple requested is not found.
-
- Description
- This error is returned when an tuple requested is not found.
- */
-#define CY_AS_ERROR_TUPLE_NOT_FOUND (70)
-
-/* Summary
- This error is returned when an extended IO operation to an SDIO function is
- Aborted.
- Description
- This error is returned when an extended IO operation to an SDIO function is
- Aborted. */
-#define CY_AS_ERROR_IO_ABORTED (71)
-
-/* Summary
- This error is returned when an extended IO operation to an SDIO function is
- Suspended.
- Description
- This error is returned when an extended IO operation to an SDIO function is
- Suspended. */
-#define CY_AS_ERROR_IO_SUSPENDED (72)
-
-/* Summary
- This error is returned when IO is attempted to a Suspended SDIO function.
- Description
- This error is returned when IO is attempted to a Suspended SDIO function. */
-#define CY_AS_ERROR_FUNCTION_SUSPENDED (73)
-
-/* Summary
- This error is returned if an MTP function is called before MTPStart
- has completed.
- Description
- This error is returned if an MTP function is called before MTPStart
- has completed.
-*/
-#define CY_AS_ERROR_MTP_NOT_STARTED (74)
-
-/* Summary
- This error is returned by API functions that are not valid in MTP
- mode (CyAsStorageClaim for example)
- Description
- This error is returned by API functions that are not valid in MTP
- mode (CyAsStorageClaim for example)
-*/
-#define CY_AS_ERROR_NOT_VALID_IN_MTP (75)
-
-/* Summary
- This error is returned when an attempt is made to partition a
- storage device that is already partitioned.
-
- Description
- This error is returned when an attempt is made to partition a
- storage device that is already partitioned.
-*/
-#define CY_AS_ERROR_ALREADY_PARTITIONED (76)
-
-/* Summary
- This error is returned when a call is made to
- CyAsUsbSelectMSPartitions after CyAsUsbSetEnumConfig is called.
-
- Description
- This error is returned when a call is made to
- CyAsUsbSelectMSPartitions after CyAsUsbSetEnumConfig is called.
- */
-#define CY_AS_ERROR_INVALID_CALL_SEQUENCE (77)
-
-/* Summary
- This error is returned when a StorageWrite opperation is attempted
- during an ongoing MTP transfer.
- Description
- This error is returned when a StorageWrite opperation is attempted
- during an ongoing MTP transfer. A MTP transfer is initiated by a
- call to CyAsMTPInitSendObject or CyAsMTPInitGetObject and is not
- finished until the CyAsMTPSendObjectComplete or
- CyAsMTPGetObjectComplete event is generated.
-*/
-#define CY_AS_ERROR_NOT_VALID_DURING_MTP (78)
-
-/* Summary
- This error is returned when a StorageRead or StorageWrite is
- attempted while a UsbRead or UsbWrite on a Turbo endpoint (2 or 6) is
- pending, or visa versa.
- Description
- When there is a pending usb read or write on a turbo endpoint (2 or 6)
- a storage read or write call may not be performed. Similarly when there
- is a pending storage read or write a usb read or write may not be
- performed on a turbo endpoint (2 or 6).
-*/
-#define CY_AS_ERROR_STORAGE_EP_TURBO_EP_CONFLICT (79)
-
-/* Summary
- This error is returned when processor requests to reserve greater
- number of zones than available for proc booting via lna firmware.
-
- Description
- Astoria does not allocate any nand zones for the processor in this case.
-*/
-#define CY_AS_ERROR_EXCEEDED_NUM_ZONES_AVAIL (80)
-
-#endif /* _INCLUDED_CYASERR_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyashal.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyashal.h
deleted file mode 100644
index b695ba1a911..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyashal.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* Cypress West Bridge API header file (cyashal.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASHAL_H_
-#define _INCLUDED_CYASHAL_H_
-
-#if !defined(__doxygen__)
-
-/* The possible HAL layers defined and implemented by Cypress */
-
-#ifdef __CY_ASTORIA_FPGA_HAL__
-#ifdef CY_HAL_DEFINED
-#error only one HAL layer can be defined
-#endif
-
-#define CY_HAL_DEFINED
-
-#include "cyashalfpga.h"
-#endif
-
-/***** SCM User space HAL ****/
-#ifdef __CY_ASTORIA_SCM_HAL__
-#ifdef CY_HAL_DEFINED
-#error only one HAL layer can be defined
-#endif
-
-#define CY_HAL_DEFINEDŚŚ
-
-#include "cyanhalscm.h"
-#endif
-/***** SCM User space HAL ****/
-
-/***** SCM Kernel HAL ****/
-#ifdef __CY_ASTORIA_SCM_KERNEL_HAL__
-#ifdef CY_HAL_DEFINED
-#error only one HAL layer can be defined
-#endif
-
-#define CY_HAL_DEFINEDÅš
-
-#include "cyanhalscm_kernel.h"
-#endif
-/***** SCM Kernel HAL ****/
-
-/***** OMAP5912 Kernel HAL ****/
-#ifdef __CY_ASTORIA_OMAP_5912_KERNEL_HAL__
- #ifdef CY_HAL_DEFINED
- #error only one HAL layer can be defined
- #endif
-
- #define CY_HAL_DEFINED
-
- #include "cyanhalomap_kernel.h"
-#endif
-/***** eof OMAP5912 Kernel HAL ****/
-
-
-
-/***** OMAP3430 Kernel HAL ****/
-#ifdef CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL
-
- #ifdef CY_HAL_DEFINED
- #error only one HAL layer can be defined
- #endif
-
- #define CY_HAL_DEFINED
-/* moved to staging location, eventual implementation
- * considered is here
- * #include mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h>
-*/
- #include "../../../arch/arm/plat-omap/include/mach/westbridge/westbridge-omap3-pnand-hal/cyashalomap_kernel.h"
-
-#endif
-/*****************************/
-
-
-/******/
-#ifdef __CY_ASTORIA_CUSTOMER_HAL__
-#ifdef CY_HAL_DEFINED
-#error only one HAL layer can be defined
-#endif
-br
-#define CY_HAL_DEFINED
-#include "cyashal_customer.h"
-
-#endif
-
-#endif /* __doxygen__ */
-
-#endif /* _INCLUDED_CYASHAL_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyashalcb.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyashalcb.h
deleted file mode 100644
index 4d1670ee047..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyashalcb.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Cypress West Bridge API header file (cyashalcb.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASHALCB_H_
-#define _INCLUDED_CYASHALCB_H_
-
-/* Summary
- This type defines a callback function type called when a
- DMA operation has completed.
-
- Description
-
- See Also
- * CyAsHalDmaRegisterCallback
- * CyAsHalDmaSetupWrite
- * CyAsHalDmaSetupRead
-*/
-typedef void (*cy_as_hal_dma_complete_callback)(
- cy_as_hal_device_tag tag,
- cy_as_end_point_number_t ep,
- uint32_t cnt,
- cy_as_return_status_t ret);
-
-typedef cy_as_hal_dma_complete_callback \
- cy_an_hal_dma_complete_callback;
-#endif
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyashaldoc.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyashaldoc.h
deleted file mode 100644
index 5bcbe9bf2f5..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyashaldoc.h
+++ /dev/null
@@ -1,800 +0,0 @@
-/* Cypress West Bridge API header file (cyashaldoc.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASHALDOC_H_
-#define _INCLUDED_CYASHALDOC_H_
-
-#include "cyashaldef.h"
-
-/*@@Hardware Abstraction Layer (HAL)
- Summary
- This software module is supplied by the user of the West Bridge
- API. This module contains the software that is specific to the
- hardware implementation or operating system of the client
- system.
-
- * Sleep Channels *
- A sleep channel is a operating system object that provides that
- capability for one thread or process to sleep while waiting on
- the completion of some hardware event. The hardware event is
- usually processed by a hardware interrupt and the interrupt
- handler then wakes the thread or process that is sleeping.
-
- A sleep channel provides the mechanism for this operation. A
- sleep channel is created and initialized during the API
- initialization. When the API needs to wait for the hardware,
- the API performs a SleepOn() operation on the sleep channel.
- When hardware event occurs, an interrupt handler processes the
- event and then performs a Wake() operation on the sleep channel
- to wake the sleeping process or thread.
-
- * DMA Model *
- When the West Bridge API needs to transfer USB or storage data
- to/from the West Bridge device, this is done using a "DMA"
- operation. In this context the term DMA is used loosely as the
- West Bridge API does not really care if the data is transferred
- using a burst read or write operation, or if the data is
- transferred using programmed I/O operations. When a "DMA"
- operation is needed, the West Bridge API calls either
- CyAsHalDmaSetupRead() or CyAsHalDmaSetupWrite() depending on the
- direction of the data flow. The West Bridge API expects the
- "DMA" operation requested in the call to be completed and the
- registered "DMA complete" callback to be called.
-
- The West Bridge API looks at several factors to determine the
- size of the "DMA" request to pass to the HAL layer. First the
- West Bridge API calls CyAsHalDmaMaxRequestSize() to determine
- the maximum amount of data the HAL layer can accept for a "DMA"
- operation on the requested endpoint. The West Bridge API will
- never exceed this value in a "DMA" request to the HAL layer.
- The West Bridge API also sends the maximum amount of data the
- West Bridge device can accept as part of the "DMA" request. If
- the amount of data in the "DMA" request to the HAL layer
- exceeds the amount of data the West Bridge device can accept,
- it is expected that the HAL layer has the ability to break the
- request into multiple operations.
-
- If the HAL implementation requires the API to handle the size
- of the "DMA" requests for one or more endpoints, the value
- CY_AS_DMA_MAX_SIZE_HW_SIZE can be returned from the
- CyAsHalDmaMaxRequestSize() call. In this case, the API assumes
- that the maximum size of each "DMA" request should be limited
- to the maximum that can be accepted by the endpoint in question.
-
- Notes
- See the <install>/api/hal/scm_kernel/cyashalscm_kernel.c file
- for an example of how the DMA request size can be managed by
- the HAL implementation.
-
- * Interrupt Handling *
- The HAL implementation is required to handle interrupts arriving
- from the West Bridge device, and call the appropriate handlers.
- If the interrupt arriving is one of PLLLOCKINT, PMINT, MBINT or
- MCUINT, the CyAsIntrServiceInterrupt API should be called to
- service the interrupt. If the interrupt arriving is DRQINT, the
- HAL should identify the endpoint corresponding to which the DRQ
- is being generated and perform the read/write transfer from the
- West Bridge. See the <install>/api/hal/scm_kernel/
- cyashalscm_kernel.c or <install>/api/hal/fpga/cyashalfpga.c
- reference HAL implementations for examples.
-
- The HAL implementation can choose to poll the West Bridge
- interrupt status register instead of using interrupts. In this
- case, the polling has to be performed from a different thread/
- task than the one running the APIs. This is required because
- there are API calls that block on the reception of data from the
- West Bridge, which is delivered only through the interrupt
- handlers.
-
- * Required Functions *
- This section defines the types and functions that must be
- supplied in order to provide a complete HAL layer for the
- West Bridge API.
-
- Types that must be supplied:
- * CyAsHalSleepChannel
-
- Hardware functions that must be supplied:
- * CyAsHalWriteRegister
- * CyAsHalReadRegister
- * CyAsHalDmaSetupWrite
- * CyAsHalDmaSetupRead
- * CyAsHalDmaCancelRequest
- * CyAsHalDmaRegisterCallback
- * CyAsHalDmaMaxRequestSize
- * CyAsHalSetWakeupPin
- * CyAsHalSyncDeviceClocks
- * CyAsHalInitDevRegisters
- * CyAsHalReadRegsBeforeStandby
- * CyAsHalRestoreRegsAfterStandby
-
- Operating system functions that must be supplied:
- * CyAsHalAlloc
- * CyAsHalFree
- * CyAsHalCBAlloc
- * CyAsHalCBFree
- * CyAsHalMemSet
- * CyAsHalCreateSleepChannel
- * CyAsHalDestroySleepChannel
- * CyAsHalSleepOn
- * CyAsHalWake
- * CyAsHalDisableInterrupts
- * CyAsHalEnableInterrupts
- * CyAsHalSleep150
- * CyAsHalSleep
- * CyAsHalAssert
- * CyAsHalPrintMessage
- * CyAsHalIsPolling
-*/
-
-/* Summary
- This is the type that represents a sleep channel
-
- Description
- A sleep channel is an operating system object that, when a
- thread of control waits on the sleep channel, the thread
- sleeps until another thread signals the sleep object. This
- object is generally used when a high level API is called
- and must wait for a response that is supplied in an interrupt
- handler. The thread calling the API is put into a sleep
- state and when the reply arrives via the interrupt handler,
- the interrupt handler wakes the sleeping thread to indicate
- that the expect reply is available.
-*/
-typedef struct cy_as_hal_sleep_channel {
- /* This structure is filled in with OS specific information
- to implementat a sleep channel */
- int m_channel;
-} cy_as_hal_sleep_channel;
-
-/* Summary
- This function is called to write a register value
-
- Description
- This function is called to write a specific register to a
- specific value. The tag identifies the device of interest.
- The address is relative to the base address of the West
- Bridge device.
-
- Returns
- Nothing
-
- See Also
- * CyAsHalDeviceTag
- * CyAsHalReadRegister
-*/
-EXTERN void
-cy_as_hal_write_register(
-/* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag,
- /* The address we are writing to */
- uint16_t addr,
- /* The value to write to the register */
- uint16_t value
- );
-
-/* Summary
- This function is called to read a register value
-
- Description
- This function is called to read the contents of a specific
- register. The tag identifies the device of interest. The
- address is relative to the base address of the West Bridge
- device.
-
- Returns
- Contents of the register
-
- See Also
- * CyAsHalDeviceTag
- * CyAsHalWriteRegister
-*/
-EXTERN uint16_t
-cy_as_hal_read_register(
- /* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag,
- /* The address we are writing to */
- uint16_t addr
- );
-
-/* Summary
- This function initiates a DMA write operation to write
- to West Bridge
-
- Description
- This function initiates a DMA write operation. The request
- size will not exceed the value the HAL layer returned via
- CyAsHalDmaMaxRequestSize(). This request size may exceed
- the size of what the West Bridge device will accept as on
- packet and the HAL layer may need to divide the request
- into multiple hardware DMA operations.
-
- Returns
- None
-
- See Also
- * CyAsHalDmaSetupRead
- * CyAsHalDmaMaxRequestSize
-*/
-EXTERN void
-cy_as_hal_dma_setup_write(
- /* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag,
- /* The endpoint we are writing to */
- cy_as_end_point_number_t ep,
- /* The data to write via DMA */
- void *buf_p,
- /* The size of the data at buf_p */
- uint32_t size,
- /* The maximum amount of data that the endpoint
- * can accept as one packet */
- uint16_t maxsize
- );
-
-/* Summary
- This function initiates a DMA read operation from West Bridge
-
- Description
- This function initiates a DMA read operation. The request
- size will not exceed the value the HAL layer returned via
- CyAsHalDmaMaxRequestSize(). This request size may exceed
- the size of what the Anitoch will accept as one packet and
- the HAL layer may need to divide the request into multiple
- hardware DMA operations.
-
- Returns
- None
-
- See Also
- * CyAsHalDmaSetupRead
- * CyAsHalDmaMaxRequestSize
-*/
-EXTERN void
-cy_as_hal_dma_setup_read(
- /* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag,
- /* The endpoint we are reading from */
- cy_as_end_point_number_t ep,
- /* The buffer to read data into */
- void *buf_p,
- /* The amount of data to read */
- uint32_t size,
- /* The maximum amount of data that the endpoint
- * can provide in one DMA operation */
- uint16_t maxsize
- );
-
-/* Summary
- This function cancels a pending DMA request
-
- Description
- This function cancels a pending DMA request that has been
- passed down to the hardware. The HAL layer can elect to
- physically cancel the request if possible, or just ignore
- the results of the request if it is not possible.
-
- Returns
- None
-*/
-EXTERN void
-cy_as_hal_dma_cancel_request(
- /* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag,
- /* The endpoint we are reading from */
- cy_as_end_point_number_t ep
- );
-
-/* Summary
- This function registers a callback function to be called when
- a DMA request is completed
-
- Description
- This function registers a callback that is called when a request
- issued via CyAsHalDmaSetupWrite() or CyAsHalDmaSetupRead() has
- completed.
-
- Returns
- None
-
- See Also
- * CyAsHalDmaSetupWrite
- * CyAsHalDmaSetupRead
-*/
-EXTERN void
-cy_as_hal_dma_register_callback(
- /* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag,
- /* The callback to call when a request has completed */
- cy_as_hal_dma_complete_callback cb
- );
-
-/* Summary
- This function returns the maximum size of a DMA request that can
- be handled by the HAL.
-
- Description
- When DMA requests are passed to the HAL layer for processing,
- the HAL layer may have a limit on the size of the request that
- can be handled. This function is called by the DMA manager for
- an endpoint when DMA is enabled to get the maximum size of data
- the HAL layer can handle. The DMA manager insures that a request
- is never sent to the HAL layer that exceeds the size returned by
- this function.
-
- Returns
- the maximum size of DMA request the HAL layer can handle
-*/
-EXTERN uint32_t
-cy_as_hal_dma_max_request_size(
- /* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep
- );
-
-/* Summary
- This function sets the WAKEUP pin to a specific state on the
- West Bridge device.
-
- Description
- In order to enter the standby mode, the WAKEUP pin must be
- de-asserted. In order to resume from standby mode, the WAKEUP
- pin must be asserted. This function provides the mechanism to
- do this.
-
- Returns
- 1 if the pin was changed, 0 if the HAL layer does not support
- changing this pin
-*/
-EXTERN uint32_t
-cy_as_hal_set_wakeup_pin(
- /* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag,
- /* The desired state of the wakeup pin */
- cy_bool state
- );
-
-/* Summary
- Synchronise the West Bridge device clocks to re-establish device
- connectivity.
-
- Description
- When the Astoria bridge device is working in SPI mode, a long
- period of inactivity can cause a loss of serial synchronisation
- between the processor and Astoria. This function is called by
- the API when it detects such a condition, and is expected to take
- the action required to re-establish clock synchronisation between
- the devices.
-
- Returns
- CyTrue if the attempt to re-synchronise is successful,
- CyFalse if not.
- */
-EXTERN cy_bool
-cy_as_hal_sync_device_clocks(
- /* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag,
- );
-
-/* Summary
- Initialize West Bridge device registers that may have been
- modified while the device was in standby.
-
- Description
- The content of some West Bridge registers may be lost when
- the device is placed in standby mode. This function restores
- these register contents so that the device can continue to
- function normally after it wakes up from standby mode.
-
- This function is required to perform operations only when the
- API is being used with the Astoria device in one of the PNAND
- modes or in the PSPI mode. It can be a no-operation in all
- other cases.
-
- Returns
- None
- */
-EXTERN void
-cy_as_hal_init_dev_registers(
- /* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag,
- /* Indicates whether this is a wake-up from standby. */
- cy_bool is_standby_wakeup
- );
-
-/* Summary
- This function reads a set of P-port accessible device registers and
- stores their value for later use.
-
- Description
- The West Bridge Astoria device silicon has a known problem when
- operating in SPI mode on the P-port, where some of the device
- registers lose their value when the device goes in and out of
- standby mode. The suggested work-around is to reset the Astoria
- device as part of the wakeup procedure from standby.
-
- This requires that the values of some of the P-port accessible
- registers be restored to their pre-standby values after it has
- been reset. This HAL function can be used to read and store
- the values of these registers at the point where the device is
- being placed in standby mode.
-
- Returns
- None
-
- See Also
- * CyAsHalRestoreRegsAfterStandby
- */
-EXTERN void
-cy_as_hal_read_regs_before_standby(
- /* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag
- );
-
-/* Summary
- This function restores the old values to a set of P-port
- accessible device registers.
-
- Description
- This function is part of the work-around to a known West
- Bridge Astoria device error when operating in SPI mode on
- the P-port. This function is used to restore a set of
- P-port accessible registers to the values they had before
- the device was placed in standby mode.
-
- Returns
- None
-
- See Also
- * CyAsHalRestoreRegsAfterStandby
- */
-EXTERN void
-cy_as_hal_restore_regs_after_standby(
- /* The tag to ID a specific West Bridge device */
- cy_as_hal_device_tag tag
- );
-
-/*
- * The functions below this comment are part of the HAL layer,
- * as the HAL layer consists of the abstraction to both the
- * hardware platform and the operating system. However; the
- * functions below this comment all relate to the operating
- * environment and not specifically to the hardware platform
- * or specific device.
- */
-
-/* Summary
- This function allocates a block of memory
-
- Description
- This is the HAL layer equivalent of the malloc() function.
-
- Returns
- a pointer to a block of memory
-
- See Also
- * CyAsHalFree
-*/
-EXTERN void *
-cy_as_hal_alloc(
- /* The size of the memory block to allocate */
- uint32_t size
- );
-
-/* Summary
- This function frees a previously allocated block of memory
-
- Description
- This is the HAL layer equivalent of the free() function.
-
- Returns
- None
-
- See Also
- * CyAsHalAlloc
-*/
-EXTERN void
-cy_as_hal_free(
- /* Pointer to a memory block to free */
- void *ptr
- );
-
-/* Summary
- This function is a malloc equivalent that can be used from an
- interrupt context.
-
- Description
- This function is a malloc equivalent that will be called from the
- API in callbacks. This function is required to be able to provide
- memory in interrupt context.
-
- Notes
- For platforms where it is not possible to allocate memory in interrupt
- context, we provide a reference allocator that takes memory during
- initialization and implements malloc/free using this memory.
- See the <install>/api/hal/fpga/cyashalblkalloc.[ch] files for the
- implementation, and the <install>/api/hal/fpga/cyashalfpga.c file
- for an example of the use of this allocator.
-
- Returns
- A pointer to the allocated block of memory
-
- See Also
- * CyAsHalCBFree
- * CyAsHalAlloc
-*/
-EXTERN void *
-cy_as_hal_c_b_alloc(
- /* The size of the memory block to allocate */
- uint32_t size
- );
-
-/* Summary
- This function frees the memory allocated through the CyAsHalCBAlloc
- call.
-
- Description
- This function frees memory allocated through the CyAsHalCBAlloc
- call, and is also required to support calls from interrupt
- context.
-
- Returns
- None
-
- See Also
- * CyAsHalCBAlloc
- * CyAsHalFree
-*/
-EXTERN void
-cy_as_hal_c_b_free(
- /* Pointer to the memory block to be freed */
- void *ptr
- );
-
-/* Summary
- This function sets a block of memory to a specific value
-
- Description
- This function is the HAL layer equivalent of the memset() function.
-
- Returns
- None
-*/
-EXTERN void
-cy_as_mem_set(
- /* A pointer to a block of memory to set */
- void *ptr,
- /* The value to set the memory to */
- uint8_t value,
- /* The number of bytes to set */
- uint32_t cnt
- );
-
-/* Summary
- This function creates or initializes a sleep channel
-
- Description
- This function creates or initializes a sleep channel. The
- sleep channel defined using the HAL data structure
- CyAsHalSleepChannel.
-
- Returns
- CyTrue is the initialization was successful, and CyFalse otherwise
-
- See Also
- * CyAsHalSleepChannel
- * CyAsHalDestroySleepChannel
- * CyAsHalSleepOn
- * CyAsHalWake
-*/
-EXTERN cy_bool
-cy_as_hal_create_sleep_channel(
- /* Pointer to the sleep channel to create/initialize */
- cy_as_hal_sleep_channel *chan
- );
-
-/* Summary
- This function destroys an existing sleep channel
-
- Description
- This function destroys an existing sleep channel. The sleep channel
- is of type CyAsHalSleepChannel.
-
- Returns
- CyTrue if the channel was destroyed, and CyFalse otherwise
-
- See Also
- * CyAsHalSleepChannel
- * CyAsHalCreateSleepChannel
- * CyAsHalSleepOn
- * CyAsHalWake
-*/
-EXTERN cy_bool
-cy_as_hal_destroy_sleep_channel(
- /* The sleep channel to destroy */
- cy_as_hal_sleep_channel chan
- );
-
-/* Summary
- This function causes the calling process or thread to sleep until
- CyAsHalWake() is called
-
- Description
- This function causes the calling process or threadvto sleep.
- When CyAsHalWake() is called on the same sleep channel, this
- processes or thread is then wakened and allowed to run
-
- Returns
- CyTrue if the thread or process is asleep, and CyFalse otherwise
-
- See Also
- * CyAsHalSleepChannel
- * CyAsHalWake
-*/
-EXTERN cy_bool
-cy_as_hal_sleep_on(
- /* The sleep channel to sleep on */
- cy_as_hal_sleep_channel chan,
- /* The maximum time to sleep in milli-seconds */
- uint32_t ms
- );
-
-/* Summary
- This function casues the process or thread sleeping on the given
- sleep channel to wake
-
- Description
- This function causes the process or thread sleeping on the given
- sleep channel to wake. The channel
-
- Returns
- CyTrue if the thread or process is awake, and CyFalse otherwise
-
- See Also
- * CyAsHalSleepChannel
- * CyAsHalSleepOn
-*/
-EXTERN cy_bool
-cy_as_hal_wake(
- /* The sleep channel to wake */
- cy_as_hal_sleep_channel chan
- );
-
-/* Summary
- This function disables interrupts, insuring that short bursts
- of code can be run without danger of interrupt handlers running.
-
- Description
- There are cases within the API when lists must be manipulated by
- both the API and the associated interrupt handlers. In these
- cases, interrupts must be disabled to insure the integrity of the
- list during the modification. This function is used to disable
- interrupts during the short intervals where these lists are being
- changed.
-
- The HAL must have the ability to nest calls to
- CyAsHalDisableInterrupts and CyAsHalEnableInterrupts.
-
- Returns
- Any interrupt related state value which will be passed back into
- the subsequent CyAsHalEnableInterrupts call.
-
- See Also
- * CyAsHalEnableInterrupts
-*/
-EXTERN uint32_t
-cy_as_hal_disable_interrupts();
-
-/* Summary
- This function re-enables interrupts after a critical section of
- code in the API has been completed.
-
- Description
- There are cases within the API when lists must be manipulated by
- both the API and the associated interrupt handlers. In these
- cases, interrupts must be disabled to insure the integrity of the
- list during the modification. This function is used to enable
- interrupts after the short intervals where these lists are being
- changed.
-
- See Also
- * CyAsHalDisableInterrupts
-*/
-EXTERN void
-cy_as_hal_enable_interrupts(
- /* Value returned by the previous CyAsHalDisableInterrupts call. */
- uint32_t value
- );
-
-/* Summary
- This function sleeps for 150 ns.
-
- Description
- This function sleeps for 150 ns before allowing the calling function
- to continue. This function is used for a specific purpose and the
- sleep required is at least 150 ns.
-*/
-EXTERN void
-cy_as_hal_sleep150(
- );
-
-/* Summary
- This function sleeps for the given number of milliseconds
-
- Description
- This function sleeps for at least the given number of milliseonds
-*/
-EXTERN void
-cy_as_hal_sleep(
- uint32_t ms
- );
-
-/* Summary
- This function asserts when the condition evaluates to zero
-
- Description
- Within the API there are conditions which are checked to insure
- the integrity of the code. These conditions are checked only
- within a DEBUG build. This function is used to check the condition
- and if the result evaluates to zero, it should be considered a
- fatal error that should be reported to Cypress.
-*/
-EXTERN void
-cy_as_hal_assert(
- /* The condition to evaluate */
- cy_bool cond
- );
-
-/* Summary
- This function prints a message from the API to a human readable device
-
- Description
- There are places within the West Bridge API where printing a message
- is useful to the debug process. This function provides the mechanism
- to print a message.
-
- Returns
- NONE
-*/
-EXTERN void
-cy_as_hal_print_message(
- /* The message to print */
- const char *fmt_p,
- ... /* Variable arguments */
- );
-
-/* Summary
- This function reports whether the HAL implementation uses
- polling to service data coming from the West Bridge.
-
- Description
- This function reports whether the HAL implementation uses
- polling to service data coming from the West Bridge.
-
- Returns
- CyTrue if the HAL polls the West Bridge Interrupt Status registers
- to complete operations, CyFalse if the HAL is interrupt driven.
- */
-EXTERN cy_bool
-cy_as_hal_is_polling(
- void);
-
-#endif
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasintr.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasintr.h
deleted file mode 100644
index 60a6fffb5d5..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasintr.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* Cypress West Bridge API header file (cyasintr.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASINTR_H_
-#define _INCLUDED_CYASINTR_H_
-
-#include "cyasdevice.h"
-
-#include "cyas_cplus_start.h"
-
-/* Summary
- Initialize the interrupt manager module
-
- Description
- This function is called to initialize the interrupt module.
- This module enables interrupts as well as servies West Bridge
- related interrupts by determining the source of the interrupt
- and calling the appropriate handler function.
-
- Notes
- If the dmaintr parameter is TRUE, the initialization code
- initializes the interrupt mask to have the DMA related interrupt
- enabled via the general purpose interrupt. However, the interrupt
- service function assumes that the DMA interrupt is handled by the
- HAL layer before the interrupt module handler function is called.
-
- Returns
- * CY_AS_ERROR_SUCCESS - the interrupt module was initialized
- * correctly
- * CY_AS_ERROR_ALREADY_RUNNING - the interrupt module was already
- * started
-
- See Also
- * CyAsIntrStop
- * CyAsServiceInterrupt
-*/
-cy_as_return_status_t
-cy_as_intr_start(
- /* Device being initialized */
- cy_as_device *dev_p,
- /* If true, enable the DMA interrupt through the INT signal */
- cy_bool dmaintr
- );
-
-/* Summary
- Stop the interrupt manager module
-
- Description
- This function stops the interrupt module and masks all interrupts
- from the West Bridge device.
-
- Returns
- * CY_AS_ERROR_SUCCESS - the interrupt module was stopped
- * successfully
- * CY_AS_ERROR_NOT_RUNNING - the interrupt module was not
- * running
-
- See Also
- * CyAsIntrStart
- * CyAsServiceInterrupt
-*/
-cy_as_return_status_t
-cy_as_intr_stop(
- /* Device bein stopped */
- cy_as_device *dev_p
- );
-
-
-/* Summary
- The interrupt service routine for West Bridge
-
- Description
- When an interrupt is detected, this function is called to
- service the West Bridge interrupt. It is safe and efficient
- for this function to be called when no West Bridge interrupt
- has occurred. This function will determine it is not an West
- Bridge interrupt quickly and return.
-*/
-void cy_as_intr_service_interrupt(
- /* The USER supplied tag for this device */
- cy_as_hal_device_tag tag
- );
-
-#include "cyas_cplus_end.h"
-
-#endif /* _INCLUDED_CYASINTR_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyaslep2pep.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyaslep2pep.h
deleted file mode 100644
index 6626cc45474..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyaslep2pep.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Cypress West Bridge API header file (cyaslep2pep.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASLEP2PEP_H_
-#define _INCLUDED_CYASLEP2PEP_H_
-
-#include "cyasdevice.h"
-
-extern cy_as_return_status_t
-cy_as_usb_map_logical2_physical(cy_as_device *dev_p);
-
-extern cy_as_return_status_t
-cy_as_usb_setup_dma(cy_as_device *dev_p);
-
-extern cy_as_return_status_t
-cy_as_usb_set_dma_sizes(cy_as_device *dev_p);
-
-#endif
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyaslowlevel.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyaslowlevel.h
deleted file mode 100644
index 5c7972f91ef..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyaslowlevel.h
+++ /dev/null
@@ -1,366 +0,0 @@
-/* Cypress West Bridge API header file (cyaslowlevel.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASLOWLEVEL_H_
-#define _INCLUDED_CYASLOWLEVEL_H_
-
-/*@@Low Level Communications
-
- Summary
- The low level communications module is responsible for
- communications between the West Bridge device and the P
- port processor. Communications is organized as a series
- of requests and subsequent responses. For each request
- there is a one and only one response. Requests may go
- from the West Bridge device to the P port processor, or
- from the P Port processor to the West Bridge device.
-
- Description
- Requests are issued across what is called a context. A
- context is a single channel of communications from one
- processor to another processor. There can be only a single
- request outstanding on a context at a given time. Contexts
- are used to identify subsystems that can only process a
- single request at a time, but are independent of other
- contexts in the system. For instance, there is a context
- for communicating storage commands from the P port processor
- to the West Bridge device. There is also a context for
- communicating USB commands from the P port processor to the
- West Bridge device.
-
- Requests and responses are identical with the exception of
- the type bit in the request/response header. If the type
- bit is one, the packet is a request. If this bit is zero,
- the packet is a response. Also encoded within the header of
- the request/response is the code. The code is a command
- code for a request, or a response code for a response. For
- a request, the code is a function of the context. The code
- 0 has one meaning for the storage context and a different
- meaning for the USB context. The code is treated differently
- in the response. If the code in the response is less than 16,
- then the meaning of the response is global across all
- contexts. If the response is greater than or equal to 16,
- then the response is specific to the associated context.
-
- Requests and responses are transferred between processors
- through the mailbox registers. It may take one or more cycles
- to transmit a complete request or response. The context is
- encoded into each cycle of the transfer to insure the
- receiving processor can route the data to the appropriate
- context for processing. In this way, the traffic from multiple
- contexts can be multiplexed into a single data stream through
- the mailbox registers by the sending processor, and
- demultiplexed from the mailbox registers by the receiving
- processor.
-
- * Firmware Assumptions *
- The firmware assumes that mailbox contents will be consumed
- immediately. Therefore for multi-cycle packets, the data is
- sent in a tight polling loop from the firmware. This implies
- that the data must be read from the mailbox register on the P
- port side and processed immediately or performance of the
- firmware will suffer. In order to insure this is the case,
- the data from the mailboxes is read and stored immediately
- in a per context buffer. This occurs until the entire packet
- is received at which time the request packet is processed.
- Since the protocol is designed to allow for only one
- outstanding packet at a time, the firmware can never be in a
- position of waiting on the mailbox registers while the P port
- is processing a request. Only after the response to the
- previous request is sent will another request be sent.
-*/
-
-#include "cyashal.h"
-#include "cyasdevice.h"
-
-#include "cyas_cplus_start.h"
-
-/*
- * Constants
- */
-#define CY_AS_REQUEST_RESPONSE_CODE_MASK (0x00ff)
-#define CY_AS_REQUEST_RESPONSE_CONTEXT_MASK (0x0F00)
-#define CY_AS_REQUEST_RESPONSE_CONTEXT_SHIFT (8)
-#define CY_AS_REQUEST_RESPONSE_TYPE_MASK (0x4000)
-#define CY_AS_REQUEST_RESPONSE_LAST_MASK (0x8000)
-#define CY_AS_REQUEST_RESPONSE_CLEAR_STR_FLAG (0x1000)
-
-/*
- * These macros extract the data from a 16 bit value
- */
-#define cy_as_mbox_get_code(c) \
- ((uint8_t)((c) & CY_AS_REQUEST_RESPONSE_CODE_MASK))
-#define cy_as_mbox_get_context(c) \
- ((uint8_t)(((c) & CY_AS_REQUEST_RESPONSE_CONTEXT_MASK) \
- >> CY_AS_REQUEST_RESPONSE_CONTEXT_SHIFT))
-#define cy_as_mbox_is_last(c) \
- ((c) & CY_AS_REQUEST_RESPONSE_LAST_MASK)
-#define cy_as_mbox_is_request(c) \
- (((c) & CY_AS_REQUEST_RESPONSE_TYPE_MASK) != 0)
-#define cy_as_mbox_is_response(c) \
- (((c) & CY_AS_REQUEST_RESPONSE_TYPE_MASK) == 0)
-
-/*
- * These macros (not yet written) pack data into or extract data
- * from the m_box0 field of the request or response
- */
-#define cy_as_ll_request_response__set_code(req, code) \
- ((req)->box0 = \
- ((req)->box0 & ~CY_AS_REQUEST_RESPONSE_CODE_MASK) | \
- (code & CY_AS_REQUEST_RESPONSE_CODE_MASK))
-
-#define cy_as_ll_request_response__get_code(req) \
- cy_as_mbox_get_code((req)->box0)
-
-#define cy_as_ll_request_response__set_context(req, context) \
- ((req)->box0 |= ((context) << \
- CY_AS_REQUEST_RESPONSE_CONTEXT_SHIFT))
-
-#define cy_as_ll_request_response__set_clear_storage_flag(req) \
- ((req)->box0 |= CY_AS_REQUEST_RESPONSE_CLEAR_STR_FLAG)
-
-#define cy_as_ll_request_response__get_context(req) \
- cy_as_mbox_get_context((req)->box0)
-
-#define cy_as_ll_request_response__is_last(req) \
- cy_as_mbox_is_last((req)->box0)
-
-#define CY_an_ll_request_response___set_last(req) \
- ((req)->box0 |= CY_AS_REQUEST_RESPONSE_LAST_MASK)
-
-#define cy_as_ll_request_response__is_request(req) \
- cy_as_mbox_is_request((req)->box0)
-
-#define cy_as_ll_request_response__set_request(req) \
- ((req)->box0 |= CY_AS_REQUEST_RESPONSE_TYPE_MASK)
-
-#define cy_as_ll_request_response__set_response(req) \
- ((req)->box0 &= ~CY_AS_REQUEST_RESPONSE_TYPE_MASK)
-
-#define cy_as_ll_request_response__is_response(req) \
- cy_as_mbox_is_response((req)->box0)
-
-#define cy_as_ll_request_response__get_word(req, offset) \
- ((req)->data[(offset)])
-
-#define cy_as_ll_request_response__set_word(req, offset, \
- value) ((req)->data[(offset)] = value)
-
-typedef enum cy_as_remove_request_result_t {
- cy_as_remove_request_sucessful,
- cy_as_remove_request_in_transit,
- cy_as_remove_request_not_found
-} cy_as_remove_request_result_t;
-
-/* Summary
- Start the low level communications module
-
- Description
-*/
-cy_as_return_status_t
-cy_as_ll_start(
- cy_as_device *dev_p
- );
-
-cy_as_return_status_t
-cy_as_ll_stop(
- cy_as_device *dev_p
- );
-
-
-cy_as_ll_request_response *
-cy_as_ll_create_request(
- cy_as_device *dev_p,
- uint16_t code,
- uint8_t context,
- /* Length of the request in 16 bit words */
- uint16_t length
- );
-
-void
-cy_as_ll_init_request(
- cy_as_ll_request_response *req_p,
- uint16_t code,
- uint16_t context,
- uint16_t length);
-
-void
-cy_as_ll_init_response(
- cy_as_ll_request_response *req_p,
- uint16_t length);
-
-void
-cy_as_ll_destroy_request(
- cy_as_device *dev_p,
- cy_as_ll_request_response *);
-
-cy_as_ll_request_response *
-cy_as_ll_create_response(
- cy_as_device *dev_p,
- /* Length of the request in 16 bit words */
- uint16_t length
- );
-
-cy_as_remove_request_result_t
-cy_as_ll_remove_request(
- cy_as_device *dev_p,
- cy_as_context *ctxt_p,
- cy_as_ll_request_response *req_p,
- cy_bool force
- );
-void
-cy_as_ll_remove_all_requests(cy_as_device *dev_p,
- cy_as_context *ctxt_p);
-
-void
-cy_as_ll_destroy_response(
- cy_as_device *dev_p,
- cy_as_ll_request_response *);
-
-cy_as_return_status_t
-cy_as_ll_send_request(
- /* The West Bridge device */
- cy_as_device *dev_p,
- /* The request to send */
- cy_as_ll_request_response *req,
- /* Storage for a reply, must be sure it is of sufficient size */
- cy_as_ll_request_response *resp,
- /* If true, this is a sync request */
- cy_bool sync,
- /* Callback to call when reply is received */
- cy_as_response_callback cb
-);
-
-cy_as_return_status_t
-cy_as_ll_send_request_wait_reply(
- /* The West Bridge device */
- cy_as_device *dev_p,
- /* The request to send */
- cy_as_ll_request_response *req,
- /* Storage for a reply, must be sure it is of sufficient size */
- cy_as_ll_request_response *resp
-);
-
-/* Summary
- This function registers a callback function to be called when a
- request arrives on a given context.
-
- Description
-
- Returns
- * CY_AS_ERROR_SUCCESS
-*/
-extern cy_as_return_status_t
-cy_as_ll_register_request_callback(
- cy_as_device *dev_p,
- uint8_t context,
- cy_as_response_callback cb
- );
-
-/* Summary
- This function packs a set of bytes given by the data_p pointer
- into a request, reply structure.
-*/
-extern void
-cy_as_ll_request_response__pack(
- /* The destintation request or response */
- cy_as_ll_request_response *req,
- /* The offset of where to pack the data */
- uint32_t offset,
- /* The length of the data to pack in bytes */
- uint32_t length,
- /* The data to pack */
- void *data_p
- );
-
-/* Summary
- This function unpacks a set of bytes from a request/reply
- structure into a segment of memory given by the data_p pointer.
-*/
-extern void
-cy_as_ll_request_response__unpack(
- /* The source of the data to unpack */
- cy_as_ll_request_response *req,
- /* The offset of the data to unpack */
- uint32_t offset,
- /* The length of the data to unpack in bytes */
- uint32_t length,
- /* The destination of the unpack operation */
- void *data_p
- );
-
-/* Summary
- This function sends a status response back to the West Bridge
- device in response to a previously send request
-*/
-extern cy_as_return_status_t
-cy_as_ll_send_status_response(
- /* The West Bridge device */
- cy_as_device *dev_p,
- /* The context to send the response on */
- uint8_t context,
- /* The success/failure code to send */
- uint16_t code,
- /* Flag to clear wait on storage context */
- uint8_t clear_storage);
-
-/* Summary
- This function sends a response back to the West Bridge device.
-
- Description
- This function sends a response back to the West Bridge device.
- The response is sent on the context given by the 'context'
- variable. The code for the response is given by the 'code'
- argument. The data for the response is given by the data and
- length arguments.
-*/
-extern cy_as_return_status_t
-cy_as_ll_send_data_response(
- /* The West Bridge device */
- cy_as_device *dev_p,
- /* The context to send the response on */
- uint8_t context,
- /* The response code to use */
- uint16_t code,
- /* The length of the data for the response */
- uint16_t length,
- /* The data for the response */
- void *data
-);
-
-/* Summary
- This function removes any requests of the given type
- from the given context.
-
- Description
- This function removes requests of a given type from the
- context given via the context number.
-*/
-extern cy_as_return_status_t
-cy_as_ll_remove_ep_data_requests(
- /* The West Bridge device */
- cy_as_device *dev_p,
- cy_as_end_point_number_t ep
- );
-
-#include "cyas_cplus_end.h"
-
-#endif /* _INCLUDED_CYASLOWLEVEL_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmedia.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmedia.h
deleted file mode 100644
index 0e25ea94481..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmedia.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Cypress West Bridge API header file (cyasmedia.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASMEDIA_H_
-#define _INCLUDED_CYASMEDIA_H_
-
-#include "cyas_cplus_start.h"
-
-
-/* Summary
- Specifies a specific type of media supported by West Bridge
-
- Description
- The West Bridge device supports five specific types of media
- as storage/IO devices attached to it's S-Port. This type is
- used to indicate the type of media being referenced in any
- API call.
-*/
-typedef enum cy_as_media_type {
- /* Flash NAND memory (may be SLC or MLC) */
- cy_as_media_nand = 0x00,
- /* An SD flash memory device */
- cy_as_media_sd_flash = 0x01,
- /* An MMC flash memory device */
- cy_as_media_mmc_flash = 0x02,
- /* A CE-ATA disk drive */
- cy_as_media_ce_ata = 0x03,
- /* SDIO device. */
- cy_as_media_sdio = 0x04,
- cy_as_media_max_media_value = 0x05
-
-} cy_as_media_type;
-
-#include "cyas_cplus_end.h"
-
-#endif /* _INCLUDED_CYASMEDIA_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmisc.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmisc.h
deleted file mode 100644
index df7c2b66cf2..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmisc.h
+++ /dev/null
@@ -1,1549 +0,0 @@
-/* Cypress West Bridge API header file (cyasmisc.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASMISC_H_
-#define _INCLUDED_CYASMISC_H_
-
-#include "cyashal.h"
-#include "cyastypes.h"
-#include "cyasmedia.h"
-
-#include "cyas_cplus_start.h"
-
-#define CY_AS_LEAVE_STANDBY_DELAY_CLOCK (1)
-#define CY_AS_RESET_DELAY_CLOCK (1)
-
-#define CY_AS_LEAVE_STANDBY_DELAY_CRYSTAL (5)
-#define CY_AS_RESET_DELAY_CRYSTAL (5)
-
-/* The maximum number of buses supported */
-#define CY_AS_MAX_BUSES (2)
-
-/* The maximum number of storage devices supported per bus */
-#define CY_AS_MAX_STORAGE_DEVICES (1)
-
-#define CY_AS_FUNCTCBTYPE_DATA_MASK (0x60000000U)
-#define CY_AS_FUNCTCBTYPE_TYPE_MASK (0x1FFFFFFFU)
-
-#define cy_as_funct_c_b_type_get_type(t) \
- ((cy_as_funct_c_b_type)((t) & CY_AS_FUNCTCBTYPE_TYPE_MASK))
-#define cy_as_funct_c_b_type_contains_data(t) \
- (((cy_as_funct_c_b_type)((t) & \
- CY_AS_FUNCTCBTYPE_DATA_MASK)) == CY_FUNCT_CB_DATA)
-
-/**************************************
- * West Bridge Types
- **************************************/
-
-/* Summary
- Specifies a handle to an West Bridge device
-
- Description
- This type represents an opaque handle to an West Bridge device.
- This handle is created via the CyAsMiscCreateDevice() function
- and is used in all subsequent calls that communicate to the West
- Bridge device.
-
- See Also
- * CyAsMiscCreateDevice
- * CyAsMiscDestroyDevice
-*/
-typedef void *cy_as_device_handle;
-
-/* Summary
- This data type gives the mode for the DACK# signal
-*/
-typedef enum cy_as_device_dack_mode {
- cy_as_device_dack_ack, /* Operate in the ACK mode */
- cy_as_device_dack_eob /* Operate in the EOB mode */
-} cy_as_device_dack_mode;
-
-/* Summary
- This data structure gives the options for all hardware features.
-
- Description
- This structure contains the information required to initialize the
- West Bridge hardware. Any features of the device that can be
- configured by the caller are specified here.
-
- See Also
- * CyAsMiscConfigure
-*/
-typedef struct cy_as_device_config {
- /* If TRUE, the P port is running in SRAM mode. */
- cy_bool srammode;
- /* If TRUE, the P port is synchronous, otherwise async */
- cy_bool sync;
- /* If TRUE, DMA req will be delivered via the interrupt signal */
- cy_bool dmaintr;
- /* Mode for the DACK# signal */
- cy_as_device_dack_mode dackmode;
- /* If TRUE, the DRQ line is active high, otherwise active low */
- cy_bool drqpol;
- /* If TRUE, the DACK line is active high, otherwise active low */
- cy_bool dackpol;
- /* If TRUE, the clock is connected to a crystal, otherwise it is
- connected to a clock */
- cy_bool crystal;
-} cy_as_device_config;
-
-
-/* Summary
- Specifies a resource that can be owned by either the West Bridge
- device or by the processor.
-
- Description
- This enumerated type identifies a resource that can be owned
- either by the West Bridge device, or by the processor attached to
- the P port of the West Bridge device.
-
- See Also
- * CyAsMiscAcquireResource
- * CyAsMiscReleaseResource
-*/
-typedef enum cy_as_resource_type {
- cy_as_bus_u_s_b = 0, /* The USB D+ and D- pins */
- cy_as_bus_1 = 1, /* The SDIO bus */
- cy_as_bus_0 = 2 /* The NAND bus (not implemented) */
-} cy_as_resource_type;
-
-/* Summary
- Specifies the reset type for a software reset operation.
-
- Description
- When the West Bridge device is reset, there are two types of
- reset that arE possible. This type indicates the type of reset
- requested.
-
- Notes
- Both of these reset types are software based resets; and are
- distinct from a chip level HARD reset that is applied through
- the reset pin on the West Bridge.
-
- The CyAsResetSoft type resets only the on-chip micro-controller
- in the West Bridge. In this case, the previously loaded firmware
- will continue running. However, the Storage and USB stack
- operations will need to be restarted, as any state relating to
- these would have been lost.
-
- The CyAsResetHard type resets the entire West Bridge chip, and will
- need a fresh configuration and firmware download.
-
- See Also
- * <LINK CyAsMiscReset>
- */
-
-typedef enum cy_as_reset_type {
- /* Just resets the West Bridge micro-controller */
- cy_as_reset_soft,
- /* Resets entire device, firmware must be reloaded and
- the west bridge device must be re-initialized */
- cy_as_reset_hard
-} cy_as_reset_type;
-
-
-
-/* Summary
- This type specifies the polarity of the SD power pin.
-
- Description
- Sets the SD power pin ( port C, bit 6) to active low or
- active high.
-
-*/
-
-typedef enum cy_as_misc_signal_polarity {
- cy_as_misc_active_high,
- cy_as_misc_active_low
-
-} cy_as_misc_signal_polarity;
-
-
-
-/* Summary
- This type specifies the type of the data returned by a Function
- Callback.
-
- Description
- CY_FUNCT_CB_NODATA - This callback does not return any additional
- information in the data field.
- CY_FUNCT_CB_DATA - The data field is used, and the CyAsFunctCBType
- will also contain the type of this data.
-
- See Also
- CyAsFunctionCallback
-*/
-typedef enum cy_as_funct_c_b_type {
- CY_FUNCT_CB_INVALID = 0x0U,
- /* Data from a CyAsMiscGetFirmwareVersion call. */
- CY_FUNCT_CB_MISC_GETFIRMWAREVERSION,
- /* Data from a CyAsMiscHeartBeatControl call. */
- CY_FUNCT_CB_MISC_HEARTBEATCONTROL,
- /* Data from a CyAsMiscAcquireResource call. */
- CY_FUNCT_CB_MISC_ACQUIRERESOURCE,
- /* Data from a CyAsMiscReadMCURegister call. */
- CY_FUNCT_CB_MISC_READMCUREGISTER,
- /* Data from a CyAsMiscWriteMCURegister call. */
- CY_FUNCT_CB_MISC_WRITEMCUREGISTER,
- /* Data from a CyAsMiscSetTraceLevel call. */
- CY_FUNCT_CB_MISC_SETTRACELEVEL,
- /* Data from a CyAsMiscStorageChanged call. */
- CY_FUNCT_CB_MISC_STORAGECHANGED,
- /* Data from a CyAsMiscGetGpioValue call. */
- CY_FUNCT_CB_MISC_GETGPIOVALUE,
- /* Data from a CyAsMiscSetGpioValue call. */
- CY_FUNCT_CB_MISC_SETGPIOVALUE,
- /* Data from a CyAsMiscDownloadFirmware call. */
- CY_FUNCT_CB_MISC_DOWNLOADFIRMWARE,
- /* Data from a CyAsMiscEnterStandby call. */
- CY_FUNCT_CB_MISC_ENTERSTANDBY,
- /* Data from a CyAsMiscEnterSuspend call. */
- CY_FUNCT_CB_MISC_ENTERSUSPEND,
- /* Data from a CyAsMiscLeaveSuspend call. */
- CY_FUNCT_CB_MISC_LEAVESUSPEND,
- /* Data from a CyAsMiscReset call. */
- CY_FUNCT_CB_MISC_RESET,
- /* Data from a CyAsMiscSetLowSpeedSDFreq or
- * CyAsMiscSetHighSpeedSDFreq call. */
- CY_FUNCT_CB_MISC_SETSDFREQ,
- /* Data from a CyAsMiscSwitchPnandMode call */
- CY_FUNCT_CB_MISC_RESERVELNABOOTAREA,
- /* Data from a CyAsMiscSetSDPowerPolarity call */
- CY_FUNCT_CB_MISC_SETSDPOLARITY,
-
- /* Data from a CyAsStorageStart call. */
- CY_FUNCT_CB_STOR_START,
- /* Data from a CyAsStorageStop call. */
- CY_FUNCT_CB_STOR_STOP,
- /* Data from a CyAsStorageClaim call. */
- CY_FUNCT_CB_STOR_CLAIM,
- /* Data from a CyAsStorageRelease call. */
- CY_FUNCT_CB_STOR_RELEASE,
- /* Data from a CyAsStorageQueryMedia call. */
- CY_FUNCT_CB_STOR_QUERYMEDIA,
- /* Data from a CyAsStorageQueryBus call. */
- CY_FUNCT_CB_STOR_QUERYBUS,
- /* Data from a CyAsStorageQueryDevice call. */
- CY_FUNCT_CB_STOR_QUERYDEVICE,
- /* Data from a CyAsStorageQueryUnit call. */
- CY_FUNCT_CB_STOR_QUERYUNIT,
- /* Data from a CyAsStorageDeviceControl call. */
- CY_FUNCT_CB_STOR_DEVICECONTROL,
- /* Data from a CyAsStorageSDRegisterRead call. */
- CY_FUNCT_CB_STOR_SDREGISTERREAD,
- /* Data from a CyAsStorageCreatePartition call. */
- CY_FUNCT_CB_STOR_PARTITION,
- /* Data from a CyAsStorageGetTransferAmount call. */
- CY_FUNCT_CB_STOR_GETTRANSFERAMOUNT,
- /* Data from a CyAsStorageErase call. */
- CY_FUNCT_CB_STOR_ERASE,
- /* Data from a CyAsStorageCancelAsync call. */
- CY_FUNCT_CB_ABORT_P2S_XFER,
- /* Data from a CyAsUsbStart call. */
- CY_FUNCT_CB_USB_START,
- /* Data from a CyAsUsbStop call. */
- CY_FUNCT_CB_USB_STOP,
- /* Data from a CyAsUsbConnect call. */
- CY_FUNCT_CB_USB_CONNECT,
- /* Data from a CyAsUsbDisconnect call. */
- CY_FUNCT_CB_USB_DISCONNECT,
- /* Data from a CyAsUsbSetEnumConfig call. */
- CY_FUNCT_CB_USB_SETENUMCONFIG,
- /* Data from a CyAsUsbGetEnumConfig call. */
- CY_FUNCT_CB_USB_GETENUMCONFIG,
- /* Data from a CyAsUsbSetDescriptor call. */
- CY_FUNCT_CB_USB_SETDESCRIPTOR,
- /* Data from a CyAsUsbGetDescriptor call. */
- CY_FUNCT_CB_USB_GETDESCRIPTOR,
- /* Data from a CyAsUsbCommitConfig call. */
- CY_FUNCT_CB_USB_COMMITCONFIG,
- /* Data from a CyAsUsbGetNak call. */
- CY_FUNCT_CB_USB_GETNAK,
- /* Data from a CyAsUsbGetStall call. */
- CY_FUNCT_CB_USB_GETSTALL,
- /* Data from a CyAsUsbSignalRemoteWakeup call. */
- CY_FUNCT_CB_USB_SIGNALREMOTEWAKEUP,
- /* Data from a CyAnUsbClearDescriptors call. */
- CY_FUNCT_CB_USB_CLEARDESCRIPTORS,
- /* Data from a CyAnUsbSetMSReportThreshold call. */
- CY_FUNCT_CB_USB_SET_MSREPORT_THRESHOLD,
- /* Data from a CyAsMTPStart call. */
- CY_FUNCT_CB_MTP_START,
- /* Data from a CyAsMTPStop call. */
- CY_FUNCT_CB_MTP_STOP,
- /* Data from a CyAsMTPInitSendObject call. */
- CY_FUNCT_CB_MTP_INIT_SEND_OBJECT,
- /* Data from a CyAsMTPCancelSendObject call. */
- CY_FUNCT_CB_MTP_CANCEL_SEND_OBJECT,
- /* Data from a CyAsMTPInitGetObject call. */
- CY_FUNCT_CB_MTP_INIT_GET_OBJECT,
- /* Data from a CyAsMTPCancelGetObject call. */
- CY_FUNCT_CB_MTP_CANCEL_GET_OBJECT,
- /* Data from a CyAsMTPSendBlockTable call. */
- CY_FUNCT_CB_MTP_SEND_BLOCK_TABLE,
- /* Data from a CyAsMTPStopStorageOnly call. */
- CY_FUNCT_CB_MTP_STOP_STORAGE_ONLY,
- CY_FUNCT_CB_NODATA = 0x40000000U,
- CY_FUNCT_CB_DATA = 0x20000000U
-} cy_as_funct_c_b_type;
-
-/* Summary
- This type specifies the general West Bridge function callback.
-
- Description
- This callback is supplied as an argument to all asynchronous
- functions in the API. It iS called after the asynchronous function
- has completed.
-
- See Also
- CyAsFunctCBType
-*/
-typedef void (*cy_as_function_callback)(
- cy_as_device_handle handle,
- cy_as_return_status_t status,
- uint32_t client,
- cy_as_funct_c_b_type type,
- void *data);
-
-/* Summary
- This type specifies the general West Bridge event that has
- occurred.
-
- Description
- This type is used in the West Bridge misc callback function to
- indicate the type of callback.
-
- See Also
-*/
-typedef enum cy_as_misc_event_type {
- /* This event is sent when West Bridge has finished
- initialization and is ready to respond to API calls. */
- cy_as_event_misc_initialized = 0,
-
- /* This event is sent when West Bridge has left the
- standby state and is ready to respond to commands again. */
- cy_as_event_misc_awake,
-
- /* This event is sent periodically from the firmware
- to the processor. */
- cy_as_event_misc_heart_beat,
-
- /* This event is sent when the West Bridge has left the
- suspend mode and is ready to respond to commands
- again. */
- cy_as_event_misc_wakeup,
-
- /* This event is sent when the firmware image downloaded
- cannot run on the active west bridge device. */
- cy_as_event_misc_device_mismatch
-} cy_as_misc_event_type;
-
-/* Summary
- This type is the type of a callback function that is called when a
- West Bridge misc event occurs.
-
- Description
- At times West Bridge needs to inform the P port processor of events
- that have occurred. These events are asynchronous to the thread of
- control on the P port processor and as such are generally delivered
- via a callback function that is called as part of an interrupt
- handler. This type defines the type of function that must be provided
- as a callback function for West Bridge misc events.
-
- See Also
- * CyAsMiscEventType
-*/
-typedef void (*cy_as_misc_event_callback)(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* The event type being reported */
- cy_as_misc_event_type ev,
- /* The data assocaited with the event being reported */
- void *evdata
-);
-
-#ifndef __doxygen__
-/* Summary
- This enum provides info of various firmware trace levels.
-
- Description
-
- See Also
- * CyAsMiscSetTraceLevel
-*/
-enum {
- CYAS_FW_TRACE_LOG_NONE = 0, /* Log nothing. */
- CYAS_FW_TRACE_LOG_STATE, /* Log state information. */
- CYAS_FW_TRACE_LOG_CALLS, /* Log function calls. */
- CYAS_FW_TRACE_LOG_STACK_TRACE, /* Log function calls with args. */
- CYAS_FW_TRACE_MAX_LEVEL /* Max trace level sentinel. */
-};
-#endif
-
-/* Summary
- This enum lists the controllable GPIOs of the West Bridge device.
-
- Description
- The West Bridge device has GPIOs that can be used for user defined functions.
- This enumeration lists the GPIOs that are available on the device.
-
- Notes
- All of the GPIOs except UVALID can only be accessed when using West Bridge
- firmware images that support only SD/MMC/MMC+ storage devices. This
- functionality is not supported in firmware images that support NAND
- storage.
-
- See Also
- * CyAsMiscGetGpioValue
- * CyAsMiscSetGpioValue
- */
-typedef enum {
- cy_as_misc_gpio_0 = 0, /* GPIO[0] pin */
- cy_as_misc_gpio_1, /* GPIO[1] pin */
- cy_as_misc_gpio__nand_CE, /* NAND_CE pin, output only */
- cy_as_misc_gpio__nand_CE2, /* NAND_CE2 pin, output only */
- cy_as_misc_gpio__nand_WP, /* NAND_WP pin, output only */
- cy_as_misc_gpio__nand_CLE, /* NAND_CLE pin, output only */
- cy_as_misc_gpio__nand_ALE, /* NAND_ALE pin, output only */
- /* SD_POW pin, output only, do not drive low while storage is active */
- cy_as_misc_gpio_SD_POW,
- cy_as_misc_gpio_U_valid /* UVALID pin */
-} cy_as_misc_gpio;
-
-/* Summary
- This enum lists the set of clock frequencies that are supported for
- working with low speed SD media.
-
- Description
- West Bridge firmware uses a clock frequency less than the maximum
- possible rate for low speed SD media. This can be changed to a
- setting equal to the maximum frequency as desired by the user. This
- enumeration lists the different frequency settings that are
- supported.
-
- See Also
- * CyAsMiscSetLowSpeedSDFreq
- */
-typedef enum cy_as_low_speed_sd_freq {
- /* Approx. 21.82 MHz, default value */
- CY_AS_SD_DEFAULT_FREQ = 0,
- /* 24 MHz */
- CY_AS_SD_RATED_FREQ
-} cy_as_low_speed_sd_freq;
-
-/* Summary
- This enum lists the set of clock frequencies that are supported
- for working with high speed SD media.
-
- Description
- West Bridge firmware uses a 48 MHz clock by default to interface
- with high speed SD/MMC media. This can be changed to 24 MHz if
- so desired by the user. This enum lists the different frequencies
- that are supported.
-
- See Also
- * CyAsMiscSetHighSpeedSDFreq
- */
-typedef enum cy_as_high_speed_sd_freq {
- CY_AS_HS_SD_FREQ_48, /* 48 MHz, default value */
- CY_AS_HS_SD_FREQ_24 /* 24 MHz */
-} cy_as_high_speed_sd_freq;
-
-/* Summary
- Struct encapsulating all information returned by the
- CyAsMiscGetFirmwareVersion call.
-
- Description
- This struct encapsulates all return values from the asynchronous
- CyAsMiscGetFirmwareVersion call, so that a single data argument
- can be passed to the user provided callback function.
-
- See Also
- * CyAsMiscGetFirmwareVersion
- */
-typedef struct cy_as_get_firmware_version_data {
- /* Return value for major version number for the firmware */
- uint16_t major;
- /* Return value for minor version number for the firmware */
- uint16_t minor;
- /* Return value for build version number for the firmware */
- uint16_t build;
- /* Return value for media types supported in the current firmware */
- uint8_t media_type;
- /* Return value to indicate the release or debug mode of firmware */
- cy_bool is_debug_mode;
-} cy_as_get_firmware_version_data;
-
-
-/*****************************
- * West Bridge Functions
- *****************************/
-
-/* Summary
- This function creates a new West Bridge device and returns a
- handle to the device.
-
- Description
- This function initializes the API object that represents the West
- Bridge device and returns a handle to this device. This handle is
- required for all West Bridge related functions to identify the
- specific West Bridge device.
-
- * Valid In Asynchronous Callback: NO
-
- Returns
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_OUT_OF_MEMORY
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_create_device(
- /* Return value for handle to created device */
- cy_as_device_handle *handle_p,
- /* The HAL specific tag for this device */
- cy_as_hal_device_tag tag
- );
-
-/* Summary
- This functions destroys a previously created West Bridge device.
-
- Description
- When an West Bridge device is created, an opaque handle is returned
- that represents the device. This function destroys that handle and
- frees all resources associated with the handle.
-
- * Valid In Asynchronous Callback: NO
-
- Returns
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_STILL_RUNNING - The USB or STORAGE stacks are still
- * running, they must be stopped before the device can be destroyed
- * CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED - the HAL layer failed to
- * destroy a sleep channel
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_destroy_device(
- /* Handle to the device to destroy */
- cy_as_device_handle handle
- );
-
-/* Summary
- This function initializes the hardware for basic communication with
- West Bridge.
-
- Description
- This function initializes the hardware to establish basic
- communication with the West Bridge device. This is always the first
- function called to initialize communication with the West Bridge
- device.
-
- * Valid In Asynchronous Callback: NO
-
- Returns
- * CY_AS_ERROR_SUCCESS - the basic initialization was completed
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_IN_STANDBY
- * CY_AS_ERROR_ALREADY_RUNNING
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_NO_ANTIOCH - cannot find the West Bridge device
- * CY_AS_ERROR_CREATE_SLEEP_CHANNEL_FAILED -
- * the HAL layer falied to create a sleep channel
-
- See Also
- * CyAsDeviceConfig
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_configure_device(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* Configuration information */
- cy_as_device_config *config_p
- );
-
-/* Summary
- This function returns non-zero if West Bridge is in standby and
- zero otherwise.
-
- Description
- West Bridge supports a standby mode. This function is used to
- query West Bridge to determine if West Bridge is in a standby
- mode.
-
- * Valid In Asynchronous Callback: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_INVALID_HANDLE
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_in_standby(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* Return value for standby state */
- cy_bool *standby
- );
-
-/* Summary
- This function downloads the firmware to West Bridge device.
-
- Description
- This function downloads firmware from a given location and with a
- given size to the West Bridge device. After the firmware is
- downloaded the West Bridge device is moved out of configuration
- mode causing the firmware to be executed. It is an error to call
- this function when the device is not in configuration mode. The
- device is in configuration mode on power up and may be placed in
- configuration mode after power up with a hard reset.
-
- Notes
- The firmware must be on a word align boundary.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the firmware was successfully downloaded
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * was not configured
- * CY_AS_ERROR_NOT_IN_CONFIG_MODE
- * CY_AS_ERROR_INVALID_SIZE - the size of the firmware
- * exceeded 32768 bytes
- * CY_AS_ERROR_ALIGNMENT_ERROR
- * CY_AS_ERROR_IN_STANDBY - trying to download
- * while in standby mode
- * CY_AS_ERROR_TIMEOUT
-
- See Also
- * CyAsMiscReset
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_download_firmware(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* Pointer to the firmware to be downloaded */
- const void *fw_p,
- /* The size of the firmware in bytes */
- uint16_t size,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-
-/* Summary
- This function returns the version number of the firmware running in
- the West Bridge device.
-
- Description
- This function queries the West Bridge device and retreives the
- firmware version number. If the firmware is not loaded an error is
- returned indicated no firmware has been loaded.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the firmware version number was retreived
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been downloaded
- * to the device
- * CY_AS_ERROR_IN_STANDBY
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_TIMEOUT - there was a timeout waiting for a response
- * from the West Bridge firmware
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_get_firmware_version(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* Return values indicating the firmware version. */
- cy_as_get_firmware_version_data *data,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-#if !defined(__doxygen__)
-
-/* Summary
- This function reads and returns the contents of an MCU accessible
- register on the West Bridge.
-
- Description
- This function requests the firmware to read and return the contents
- of an MCU accessible register through the mailboxes.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the register content was retrieved.
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_TIMEOUT - there was a timeout waiting for a response
- * from the West Bridge firmware
- * CY_AS_ERROR_INVALID_RESPONSE - the firmware build does not
- * support this command.
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_read_m_c_u_register(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* Address of the register to read */
- uint16_t address,
- /* Return value for the MCU register content */
- uint8_t *value,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* Summary
- This function writes to an MCU accessible register on the West Bridge.
-
- Description
- This function requests the firmware to write a specified value to an
- MCU accessible register through the mailboxes.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Notes
- This function is only for internal use by the West Bridge API layer.
- Calling this function directly can cause device malfunction.
-
- Returns
- * CY_AS_ERROR_SUCCESS - the register content was updated.
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_TIMEOUT - there was a timeout waiting for a response
- * from the West Bridge firmware
- * CY_AS_ERROR_INVALID_RESPONSE - the firmware build does not support
- * this command.
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_write_m_c_u_register(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* Address of the register to write */
- uint16_t address,
- /* Mask to be applied on the register contents. */
- uint8_t mask,
- /* Data to be ORed with the register contents. */
- uint8_t value,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-#endif
-
-/* Summary
- This function will reset the West Bridge device and software API.
-
- Description
- This function will reset the West Bridge device and software API.
- The reset operation can be a hard reset or a soft reset. A hard
- reset will reset all aspects of the West Bridge device. The device
- will enter the configuration state and the firmware will have to be
- reloaded. The device will also have to be re-initialized. A soft
- reset just resets the West Bridge micro-controller.
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- When a hard reset is issued, the firmware that may have been
- previously loaded will be lost and any configuration information set
- via CyAsMiscConfigureDevice() will be lost. This will be reflected
- in the API maintained state of the device. In order to re-establish
- communications with the West Bridge device, CyAsMiscConfigureDevice()
- and CyAsMiscDownloadFirmware() must be called again.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the device has been reset
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_NOT_YET_SUPPORTED - current soft reset is not supported
- * CY_AS_ERROR_ASYNC_PENDING - Reset is unable to flush pending async
- * reads/writes in polling mode.
-
-
- See Also
- * CyAsMiscReset
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_reset(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* The type of reset to perform */
- cy_as_reset_type type,
- /* If true, flush all pending writes to mass storage
- before performing the reset. */
- cy_bool flush,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* Summary
- This function acquires a given resource.
-
- Description
- There are resources in the system that are shared between the
- West Bridge device and the processor attached to the P port of
- the West Bridge device. This API provides a mechanism for the
- P port processor to acquire ownership of a resource.
-
- Notes
- The ownership of the resources controlled by CyAsMiscAcquireResource()
- and CyAsMiscReleaseResource() defaults to a known state at hardware
- reset. After the firmware is loaded and begins execution the state of
- these resources may change. At any point if the P Port processor needs
- to acquire a resource it should do so explicitly to be sure of
- ownership.
-
- Returns
- * CY_AS_ERROR_SUCCESS - the p port successfully acquired the
- * resource of interest
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_INVALID_RESOURCE
- * CY_AS_ERROR_RESOURCE_ALREADY_OWNED - the p port already
- * owns this resource
- * CY_AS_ERROR_NOT_ACQUIRED - the resource cannot be acquired
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_TIMEOUT - there was a timeout waiting for a
- * response from the West Bridge firmware
-
- See Also
- * CyAsResourceType
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_acquire_resource(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* The resource to acquire */
- cy_as_resource_type *resource,
- /* If true, force West Bridge to release the resource */
- cy_bool force,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* Summary
- This function releases a given resource.
-
- Description
- There are resources in the system that are shared between the
- West Bridge device and the processor attached to the P port of
- the West Bridge device. This API provides a mechanism for the
- P port processor to release a resource that has previously been
- acquired via the CyAsMiscAcquireResource() call.
-
- * Valid In Asynchronous Callback: NO
-
- Returns
- * CY_AS_ERROR_SUCCESS - the p port successfully released
- * the resource of interest
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_INVALID_RESOURCE
- * CY_AS_ERROR_RESOURCE_NOT_OWNED - the p port does not own the
- * resource of interest
-
- See Also
- * CyAsResourceType
- * CyAsMiscAcquireResource
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_release_resource(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* The resource to release */
- cy_as_resource_type resource
- );
-
-#ifndef __doxygen__
-/* Summary
- This function sets the trace level for the West Bridge firmware.
-
- Description
- The West Bridge firmware has the ability to store information
- about the state and execution path of the firmware on a mass storage
- device attached to the West Bridge device. This function configures
- the specific mass storage device to be used and the type of information
- to be stored. This state information is used for debugging purposes
- and must be interpreted by a Cypress provided tool.
-
- *Trace Level*
- The trace level indicates the amount of information to output.
- * 0 = no trace information is output
- * 1 = state information is output
- * 2 = function call information is output
- * 3 = function call, arguments, and return value information is output
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- The media device and unit specified in this call will be overwritten
- and any data currently stored on this device and unit will be lost.
-
- * NOT IMPLEMENTED YET
-
- Returns
- * CY_AS_ERROR_SUCCESS - the trace configuration has been
- * successfully changed
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device
- * pair does not exist
- * CY_AS_ERROR_NO_SUCH_UNIT - the unit specified does not exist
- * CY_AS_ERROR_INVALID_TRACE_LEVEL - the trace level requested
- * does not exist
- * CY_AS_ERROR_TIMEOUT - there was a timeout waiting for a
- * response from the West Bridge firmware
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_set_trace_level(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* The trace level */
- uint8_t level,
- /* The bus for the output */
- cy_as_bus_number_t bus,
- /* The device for the output */
- uint32_t device,
- /* The unit for the output */
- uint32_t unit,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-#endif
-
-/* Summary
- This function places West Bridge into the low power standby mode.
-
- Description
- This function places West Bridge into a low power (sleep) mode, and
- cannot be called while the USB stack is active. This function first
- instructs the West Bridge firmware that the device is about to be
- placed into sleep mode. This allows West Bridge to complete any pending
- storage operations. After the West Bridge device has responded that
- pending operations are complete, the device is placed in standby mode.
-
- There are two methods of placing the device in standby mode. If the
- WAKEUP pin of the West Bridge is connected to a GPIO on the processor,
- the pin is de-asserted (via the HAL layer) and West Bridge enters into
- a sleep mode. If the WAKEUP pin is not accessible, the processor can
- write into the power management control/status register on the West
- Bridge to put the device into sleep mode.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function completed and West Bridge
- * is in sleep mode
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_ALREADY_STANDBY - the West Bridge device is already
- * in sleep mode
- * CY_AS_ERROR_TIMEOUT - there was a timeout waiting for a response
- * from the West Bridge firmware
- * CY_AS_ERROR_NOT_SUPPORTED - the HAL layer does not support changing
- * the WAKEUP pin
- * CY_AS_ERROR_USB_RUNNING - The USB stack is still running when the
- * EnterStandby call is made
- * CY_AS_ERROR_ASYNC_PENDING
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
- * CY_AS_ERROR_SETTING_WAKEUP_PIN
- * CY_AS_ERROR_ASYNC_PENDING - In polling mode EnterStandby can not
- * be called until all pending storage read/write requests have
- * finished.
-
- See Also
- * CyAsMiscLeaveStandby
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_enter_standby_e_x_u(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* If true, use the wakeup pin, otherwise use the register */
- cy_bool pin,
- /* Set true to enable specific usages of the
- UVALID signal, please refer to AN xx or ERRATA xx */
- cy_bool uvalid_special,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* Summary
- This function is provided for backwards compatibility.
-
- Description
- Calling this function is the same as calling CyAsMiscEnterStandbyEx
- with True for the lowpower parameter.
-
- See Also
- * CyAsMiscEnterStandbyEx
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_enter_standby(cy_as_device_handle handle,
- cy_bool pin,
- cy_as_function_callback cb,
- uint32_t client
- );
-
-/* Summary
- This function brings West Bridge out of sleep mode.
-
- Description
- This function asserts the WAKEUP pin (via the HAL layer). This
- brings the West Bridge out of the sleep state and allows the
- West Bridge firmware to process the event causing the wakeup.
- When all processing associated with the wakeup is complete, a
- callback function is called to tell the P port software that
- the firmware processing associated with wakeup is complete.
-
- * Valid In Asynchronous Callback: NO
-
- Returns:
- * CY_AS_ERROR_SUCCESS - the function completed and West Bridge
- * is in sleep mode
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_SETTING_WAKEUP_PIN
- * CY_AS_ERROR_NOT_IN_STANDBY - the West Bridge device is not in
- * the sleep state
- * CY_AS_ERROR_TIMEOUT - there was a timeout waiting for a
- * response from the West Bridge firmware
- * CY_AS_ERROR_NOT_SUPPORTED - the HAL layer does not support
- * changing the WAKEUP pin
-
- See Also
- * CyAsMiscEnterStandby
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_leave_standby(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* The resource causing the wakeup */
- cy_as_resource_type resource
- );
-
-/* Summary
- This function registers a callback function to be called when an
- asynchronous West Bridge MISC event occurs.
-
- Description
- When asynchronous misc events occur, a callback function can be
- called to alert the calling program. This functions allows the
- calling program to register a callback.
-
- * Valid In Asynchronous Callback: NO
-
- Returns:
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_INVALID_HANDLE
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_register_callback(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The function to call */
- cy_as_misc_event_callback callback
- );
-
-/* Summary
- This function sets the logging level for log messages.
-
- Description
- The API can print messages via the CyAsHalPrintMessage capability.
- This function sets the level of detail seen when printing messages
- from the API.
-
- * Valid In Asynchronous Callback:NO
-*/
-EXTERN void
-cy_as_misc_set_log_level(
- /* Level to set, 0 is fewer messages, 255 is all */
- uint8_t level
- );
-
-
-/* Summary
- This function tells West Bridge that SD or MMC media has been
- inserted or removed.
-
- Description
- In some hardware configurations, SD or MMC media detection is
- handled outside of the West Bridge device. This function is called
- when a change is detected to inform the West Bridge firmware to check
- for storage media changes.
-
- * Valid In Asynchronous Callback: NO
-
- Returns:
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_IN_STANDBY
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsMiscStorageChanged
-
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_storage_changed(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* Summary
- This function instructs the West Bridge firmware to start/stop
- sending periodic heartbeat messages to the processor.
-
- Description
- The West Bridge firmware can send heartbeat messages through the
- mailbox register once every 500 ms. This message can be an overhead
- as it causes regular Mailbox interrupts to happen, and is turned
- off by default. The message can be used to test and verify that the
- West Bridge firmware is alive. This API can be used to enable or
- disable the heartbeat message.
-
- * Valid In Asynchronous Callback: NO
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function completed successfully
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured yet
- * CY_AS_ERROR_NO_FIRMWARE - firmware has not been downloaded to
- * the West Bridge device
-
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_heart_beat_control(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Message enable/disable selection */
- cy_bool enable,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* Summary
- This function gets the current state of a GPIO pin on the
- West Bridge device.
-
- Description
- The West Bridge device has GPIO pins that can be used for user
- defined functions. This function gets the current state of the
- specified GPIO pin. Calling this function will configure the
- corresponding pin as an input.
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- Only GPIO[0], GPIO[1] and UVALID pins can be used as GP inputs.
- Of these pins, only the UVALID pin is supported by firmware images
- that include NAND storage support.
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function completed successfully
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured yet
- * CY_AS_ERROR_NO_FIRMWARE - firmware has not been downloaded
- * to the West Bridge device
- * CY_AS_ERROR_BAD_INDEX - an invalid GPIO was specified
- * CY_AS_ERROR_NOT_SUPPORTED - this feature is not supported
- * by the firmware
-
- See Also
- * CyAsMiscGpio
- * CyAsMiscSetGpioValue
- */
-EXTERN cy_as_return_status_t
-cy_as_misc_get_gpio_value(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Id of the GPIO pin to query */
- cy_as_misc_gpio pin,
- /* Current value of the GPIO pin */
- uint8_t *value,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* Summary
- This function updates the state of a GPIO pin on the West
- Bridge device.
-
- Description
- The West Bridge device has GPIO pins that can be used for
- user defined functions. This function updates the output
- value driven on a specified GPIO pin. Calling this function
- will configure the corresponding pin as an output.
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- All of the pins listed under CyAsMiscGpio can be used as GP
- outputs. This feature is note supported by firmware images
- that include NAND storage device support.
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function completed successfully
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured yet
- * CY_AS_ERROR_NO_FIRMWARE - firmware has not been downloaded
- * to the West Bridge device
- * CY_AS_ERROR_BAD_INDEX - an invalid GPIO was specified
- * CY_AS_ERROR_NOT_SUPPORTED - this feature is not supported
- * by firmware.
-
- See Also
- * CyAsMiscGpio
- * CyAsMiscGetGpioValue
- */
-EXTERN cy_as_return_status_t
-cy_as_misc_set_gpio_value(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Id of the GPIO pin to set */
- cy_as_misc_gpio pin,
- /* Value to be set on the GPIO pin */
- uint8_t value,
- /* Callback to call when the operation is complete. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* Summary
- Set the West Bridge device in the low power suspend mode.
-
- Description
- The West Bridge device has a low power suspend mode where the USB
- core and the internal microcontroller are powered down. This
- function sets the West Bridge device into this low power mode.
- This mode can only be entered when there is no active USB
- connection; i.e., when USB has not been connected or is suspended;
- and there are no pending USB or storage asynchronous calls. The
- device will exit the suspend mode and resume handling USB and
- processor requests when any activity is detected on the CE#, D+/D-
- or GPIO[0] lines.
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- The GPIO[0] pin needs to be configured as an input for the gpio
- wakeup to work. This flag should not be enabled if the pin is
- being used as a GP output.
-
- Returns
- * CY_AS_ERROR_SUCCESS - the device was placed in suspend mode.
- * CY_AS_ERROR_INVALID_HANDLE - the West Bridge handle passed
- * in is invalid.
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * yet been configured.
- * CY_AS_ERROR_NO_FIRMWARE - no firmware has been downloaded
- * to the device.
- * CY_AS_ERROR_IN_STANDBY - the device is already in sleep mode.
- * CY_AS_ERROR_USB_CONNECTED - the USB connection is active.
- * CY_AS_ERROR_ASYNC_PENDING - asynchronous storage/USB calls
- * are pending.
- * CY_AS_ERROR_OUT_OF_MEMORY - failed to allocate memory for
- * the operation.
- * CY_AS_ERROR_INVALID_RESPONSE - command not recognised by
- * firmware.
-
- See Also
- * CyAsMiscLeaveSuspend
- */
-EXTERN cy_as_return_status_t
-cy_as_misc_enter_suspend(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Control the USB wakeup source */
- cy_bool usb_wakeup_en,
- /* Control the GPIO[0] wakeup source */
- cy_bool gpio_wakeup_en,
- /* Callback to call when suspend mode entry is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* Summary
- Wake up the West Bridge device from suspend mode.
-
- Description
- This call wakes up the West Bridge device from suspend mode,
- and makes it ready for accepting other commands from the API.
- A CyAsEventMiscWakeup event will be delivered to the callback
- registered with CyAsMiscRegisterCallback to indicate that the
- wake up is complete.
-
- The CyAsEventMiscWakeup event will also be delivered if the
- wakeup happens due to USB or GPIO activity.
-
- * Valid In Asynchronous Callback: NO
-
- Returns
- * CY_AS_ERROR_SUCCESS - the device was woken up from
- * suspend mode.
- * CY_AS_ERROR_INVALID_HANDLE - invalid device handle
- * passed in.
- * CY_AS_ERROR_NOT_CONFIGURED - West Bridge device has
- * not been configured.
- * CY_AS_ERROR_NO_FIRMWARE - firmware has not been
- * downloaded to the device.
- * CY_AS_ERROR_NOT_IN_SUSPEND - the device is not in
- * suspend mode.
- * CY_AS_ERROR_OUT_OF_MEMORY - failed to allocate memory
- * for the operation.
- * CY_AS_ERROR_TIMEOUT - failed to wake up the device.
-
- See Also
- * CyAsMiscEnterSuspend
- */
-EXTERN cy_as_return_status_t
-cy_as_misc_leave_suspend(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Callback to call when device has resumed operation. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* Summary
- Reserve first numzones zones of nand device for storing
- processor boot image. LNA firmware works on the first
- numzones zones of nand to enable the processor to boot.
-
- Description
- This function reserves first numzones zones of nand device
- for storing processor boot image. This fonction MUST be
- completed before starting the storage stack for the setting
- to be taken into account.
-
- * Valid In Asynchronous Callback: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS- zones are reserved.
-
-*/
-EXTERN cy_as_return_status_t
-cy_as_misc_reserve_l_n_a_boot_area(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* number of nand zones to reserve */
- uint8_t numzones,
- /* Callback to call when device has resumed operation. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* Summary
- Select the clock frequency to be used when talking to low
- speed (non-high speed) SD media.
-
- Description
- West Bridge firmware uses a clock frequency less than the
- maximum possible rate for low speed SD media. This function
- selects the frequency setting from between the default speed
- and the maximum speed. This fonction MUST be completed before
- starting the storage stack for the setting to be taken into
- account.
-
- * Valid in Asynchronous Callback: Yes (if cb is non-zero)
- * Nestable: Yes
-
- Returns
- * CY_AS_ERROR_SUCCESS - the operation completed successfully.
- * CY_AS_ERROR_INVALID_HANDLE - invalid device handle passed in.
- * CY_AS_ERROR_NOT_CONFIGURED - West Bridge device has not been
- * configured.
- * CY_AS_ERROR_NO_FIRMWARE - firmware has not been downloaded
- * to the device.
- * CY_AS_ERROR_OUT_OF_MEMORY - failed to allocate memory for
- * the operation.
- * CY_AS_ERROR_IN_SUSPEND - West Bridge is in low power suspend
- * mode.
- * CY_AS_ERROR_INVALID_PARAMETER - invalid frequency setting
- * desired.
- * CY_AS_ERROR_TIMEOUT - West Bridge device did not respond to
- * the operation.
- * CY_AS_ERROR_INVALID_RESPONSE - active firmware does not support
- * the operation.
-
- See Also
- * CyAsLowSpeedSDFreq
- */
-EXTERN cy_as_return_status_t
-cy_as_misc_set_low_speed_sd_freq(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Frequency setting desired for low speed SD cards */
- cy_as_low_speed_sd_freq setting,
- /* Callback to call on completion */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-/* Summary
- Select the clock frequency to be used when talking to high speed
- SD/MMC media.
-
- Description
- West Bridge firmware uses a 48 MHz clock to interface with high
- speed SD/MMC media. This clock rate can be restricted to 24 MHz
- if desired. This function selects the frequency setting to be
- used. This fonction MUST be completed before starting the storage
- stack for the setting to be taken into account.
-
- * Valid in Asynchronous Callback: Yes (if cb is non-zero)
- * Nestable: Yes
-
- Returns
- * CY_AS_ERROR_SUCCESS - the operation completed successfully.
- * CY_AS_ERROR_INVALID_HANDLE - invalid device handle passed in.
- * CY_AS_ERROR_NOT_CONFIGURED - West Bridge device has not been
- * configured.
- * CY_AS_ERROR_NO_FIRMWARE - firmware has not been downloaded to
- * the device.
- * CY_AS_ERROR_OUT_OF_MEMORY - failed to allocate memory for the
- * operation.
- * CY_AS_ERROR_IN_SUSPEND - West Bridge is in low power suspend mode.
- * CY_AS_ERROR_INVALID_PARAMETER - invalid frequency setting desired.
- * CY_AS_ERROR_TIMEOUT - West Bridge device did not respond to the
- * operation.
- * CY_AS_ERROR_INVALID_RESPONSE - active firmware does not support
- * the operation.
-
- See Also
- * CyAsLowSpeedSDFreq
- */
-EXTERN cy_as_return_status_t
-cy_as_misc_set_high_speed_sd_freq(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Frequency setting desired for high speed SD cards */
- cy_as_high_speed_sd_freq setting,
- /* Callback to call on completion */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-/* Summary
- Select the polarity of the SD_POW output driven by West Bridge.
-
- Description
- The SD_POW signal driven by West Bridge can be used to control
- the supply of Vcc to the SD/MMC media connected to the device.
- This signal is driven as an active high signal by default. This
- function can be used to change the polarity of this signal if
- required. This fonction MUST be completed before starting the
- storage stack for the setting to be taken into account.
-
- * Valid in Asynchronous Callback: Yes (if cb is non-zero)
- * Nestable: Yes
-
- Returns
- * CY_AS_ERROR_SUCCESS - the operation completed successfully.
- * CY_AS_ERROR_INVALID_HANDLE - invalid device handle passed in.
- * CY_AS_ERROR_NOT_CONFIGURED - West Bridge device has not been
- * configured.
- * CY_AS_ERROR_NO_FIRMWARE - firmware has not been downloaded
- * to the device.
- * CY_AS_ERROR_OUT_OF_MEMORY - failed to allocate memory for
- * the operation.
- * CY_AS_ERROR_IN_SUSPEND - West Bridge is in low power
- * suspend mode.
- * CY_AS_ERROR_INVALID_PARAMETER - invalid frequency setting
- * desired.
- * CY_AS_ERROR_TIMEOUT - West Bridge device did not respond to
- * the operation.
- * CY_AS_ERROR_INVALID_RESPONSE - active firmware does not
- * support the operation.
-
- See Also
- * CyAsMiscSignalPolarity
- */
-EXTERN cy_as_return_status_t
-cy_as_misc_set_sd_power_polarity(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Desired polarity setting to the SD_POW signal. */
- cy_as_misc_signal_polarity polarity,
- /* Callback to call on completion. */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback. */
- uint32_t client
- );
-
-/* For supporting deprecated functions */
-#include "cyasmisc_dep.h"
-
-#include "cyas_cplus_end.h"
-
-#endif /* _INCLUDED_CYASMISC_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmisc_dep.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmisc_dep.h
deleted file mode 100644
index 8b258efc018..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmisc_dep.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Cypress West Bridge API header file (cyasmisc_dep.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-/* This header will contain Antioch specific declaration
- * of the APIs that are deprecated in Astoria SDK. This is
- * for maintaining backward compatibility with prior releases
- * of the Antioch SDK.
- */
-#ifndef __INCLUDED_CYASMISC_DEP_H__
-#define __INCLUDED_CYASMISC_DEP_H__
-
-#ifndef __doxygen__
-
-EXTERN cy_as_return_status_t
-cy_as_misc_acquire_resource_dep(cy_as_device_handle handle,
- cy_as_resource_type resource,
- cy_bool force);
-EXTERN cy_as_return_status_t
-cy_as_misc_get_firmware_version_dep(cy_as_device_handle handle,
- uint16_t *major,
- uint16_t *minor,
- uint16_t *build,
- uint8_t *media_type,
- cy_bool *is_debug_mode);
-EXTERN cy_as_return_status_t
-cy_as_misc_set_trace_level_dep(cy_as_device_handle handle,
- uint8_t level,
- cy_as_media_type media,
- uint32_t device,
- uint32_t unit,
- cy_as_function_callback cb,
- uint32_t client);
-#endif /*__doxygen*/
-
-#endif /*__INCLUDED_CYANSTORAGE_DEP_H__*/
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmtp.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmtp.h
deleted file mode 100644
index 05d34496977..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasmtp.h
+++ /dev/null
@@ -1,646 +0,0 @@
-/* Cypress West Bridge API header file (cyasmtp.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASMTP_H_
-#define _INCLUDED_CYASMTP_H_
-
-#include "cyasmisc.h"
-
-#include "cyas_cplus_start.h"
-
-/*@@Media Transfer Protocol (MTP) Overview
- Summary
- The MTP API has been designed to allow MTP enabled West Bridge
- devices to implement the MTP protocol while maintaining high
- performance. West Bridge has the capability to enter into a
- Turbo mode during a MTP SendObject or GetObject operation
- enabling it to directly stream the data into or out of the
- attached SD card with minimal involvement from the Processor.
-
- Description
- The MTP API is designed to act as a pass through implementation
- of the MTP protocol for all operations. Each MTP transaction
- received from the Host is passed through West Bridge and along
- to the Processor. The Processor can then respond to the
- transaction and pass data and/or responses back to the Host
- through West Bridge.
-
- The MTP API also allows for a high speed handling of MTP
- SendObject and GetObject operations, referred to as Turbo MTP.
- During a Turbo MTP operation West Bridge is responsible for
- reading or writing the data for the MTP operation directly from
- or to the SD card with minimal interaction from the Processor.
- The is done by having the Processor transfer a Block Table
- to West Bridge which contains the locations on the SD card that
- need to be read or written. During the handling of a Turbo
- Operation the Processor will then only periodically need to
- send a new Block Table to West Bridge when the first is used up.
- See the CyAsMTPInitSendObject and CyAsMTPInitGetObject functions
- for more details.
-
- In order to enable the MTP API you must first have a MTP enabled
- West Bridge loaded with MTP firmware. You then must start the USB
- and Storage APIs before starting the MTP API. See CyAsMTPStart
- for more details.
-*/
-
-/*@@Endpoints
- Summary
- When using MTP firmware endpoints 2 and 6 are dedicated
- to bulk MTP traffic and endpoint 1 is available for MTP
- events.
-
- Description
- When using a MTP enabled West Brdige device endpoints 2 and
- 6 are made available for use to implement the MTP protocol.
- These endpoints have a few special restrictions noted below
- but otherwise the existing USB APIs can be used normally with
- these endpoints.
-
- 1. CyAsUsbSetNak, CyAsUsbClearNak, and CyAsUsbGetNak are
- disabled for these endpoints
- 2. During a turbo operation CyAsUsbSetStall, CyAsUsbClearStall,
- and CyAsUsbGetStall are disabled.
-
-*/
-
-
-/* Summary
- This constants defines the maximum number of
- entries in the Block Table used to describe
- the locations for Send/GetObject operations.
-
- See Also
- * CyAsMtpSendObject
- * CyAsMtpGetObject
-*/
-#define CY_AS_MAX_BLOCK_TABLE_ENTRIES 64
-
-/* Summary
- Endpoint to be used for MTP reads from the USB host.
- */
-#define CY_AS_MTP_READ_ENDPOINT (2)
-
-/* Summary
- Endpoint to be used fro MTP writes to the USB host.
- */
-#define CY_AS_MTP_WRITE_ENDPOINT (6)
-
-/******************************************
- * MTP Types
- ******************************************/
-
-/* Summary
- The BlockTable used for turbo operations.
-
- Description
- This struct is used to specify the blocks
- to be used for both read/write and send/getObject
- operations.
-
- The start block is a starting Logical Block Address
- and num block is the number of blocks in that contiguous
- region.
-
- start_blocks[i]->[-------] <- start_blocks[i] + num_blocks[i]
-
- If you need fewer than CY_AS_MAX_BLOCK_TABLE_ENTRIES
- the remainder should be left empty. Empty is defined
- as num_blocks equal to 0.
-
- See Also
- * CyAsMTPInitSendObject
- * CyAsMTPInitGetObject
-
-*/
-typedef struct cy_as_mtp_block_table {
- uint32_t start_blocks[CY_AS_MAX_BLOCK_TABLE_ENTRIES];
- uint16_t num_blocks[CY_AS_MAX_BLOCK_TABLE_ENTRIES];
-} cy_as_mtp_block_table;
-
-/* Summary
- This type specifies the type of MTP event that has occurred.
-
- Description
- MTP events are used to communicate that West Bridge has
- either finished the handling of the given operation, or
- that it requires additional data to complete the operation.
-
- In no case does West Bridge send any MTP protocol responses,
- this always remain the responsibility of the client.
-
- See Also
- * CyAsMTPInitSendObject
- * CyAsMTPInitGetObject
- * CyAsMTPSendBlockTable
-
-*/
-typedef enum cy_as_mtp_event {
- /* This event is sent when West Bridge
- has finished writing the data from a
- send_object. west bridge will -not- send
- the MTP response. */
- cy_as_mtp_send_object_complete,
-
- /* This event is sent when West Bridge
- has finished sending the data for a
- get_object operation. west bridge will
- -not- send the MTP response. */
- cy_as_mtp_get_object_complete,
-
- /* This event is called when West Bridge
- needs a new block_table. this is only a
- notification, to transfer a block_table
- to west bridge the cy_as_mtp_send_block_table
- use the function. while west bridge is waiting
- for a block_table during a send_object it
- may need to NAK the endpoint. it is important
- that the cy_as_mtp_send_block_table call is made
- in a timely manner as eventually a delay
- will result in an USB reset. this event has
- no data */
- cy_as_mtp_block_table_needed
-} cy_as_mtp_event;
-
-/* Summary
- Data for the CyAsMTPSendObjectComplete event.
-
- Description
- Notification that a SendObject operation has been
- completed. The status of the operation is given
- (to distinguish between a cancelled and a success
- for example) as well as the block count. The blocks
- are used in order based on the current block table.
- If more than one block table was used for a given
- SendObject the count will include the total number
- of blocks written.
-
- This callback will be made only once per SendObject
- operation and it will only be called after all of
- the data has been committed to the SD card.
-
- See Also
- * CyAsMTPEvent
-
- */
-typedef struct cy_as_mtp_send_object_complete_data {
- cy_as_return_status_t status;
- uint32_t byte_count;
- uint32_t transaction_id;
-} cy_as_mtp_send_object_complete_data;
-
-/* Summary
- Data for the CyAsMTPGetObjectComplete event.
-
- Description
- Notification that a GetObject has finished. This
- event allows the P side to know when to send the MTP
- response for the GetObject operation.
-
- See Also
- * CyAsMTPEvent
-
-*/
-typedef struct cy_as_mtp_get_object_complete_data {
- cy_as_return_status_t status;
- uint32_t byte_count;
-} cy_as_mtp_get_object_complete_data;
-
-/* Summary
- MTP Event callback.
-
- Description
- Callback used to communicate that a SendObject
- operation has finished.
-
- See Also
- * CyAsMTPEvent
-*/
-typedef void (*cy_as_mtp_event_callback)(
- cy_as_device_handle handle,
- cy_as_mtp_event evtype,
- void *evdata
- );
-
-/* Summary
- This is the callback function called after asynchronous API
- functions have completed.
-
- Description
- When calling API functions from callback routines (interrupt
- handlers usually) the async version of these functions must
- be used. This callback is called when an asynchronous API
- function has completed.
-*/
-typedef void (*cy_as_mtp_function_callback)(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* The error status of the operation */
- cy_as_return_status_t status,
- /* A client supplied 32 bit tag */
- uint32_t client
-);
-
-/**************************************
- * MTP Functions
- **************************************/
-
-/* Summary
- This function starts the MTP stack.
-
- Description
- Initializes West Bridge for MTP activity and registers the MTP
- event callback.
-
- Before calling CyAsMTPStart, CyAsUsbStart and CyAsStorageStart must be
- called (in either order).
-
- MTPStart must be called before the device is enumerated. Please
- see the documentation for CyAsUsbSetEnumConfig and CyAsUsbEnumControl
- for details on enumerating a device for MTP.
-
- Calling MTPStart will not affect any ongoing P<->S traffic.
-
- This requires a MTP firmware image to be loaded on West Bridge.
-
- Returns
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_IN_SUSPEND
- * CY_AS_ERROR_INVALID_IN_CALLBACK
- * CY_AS_ERROR_STARTSTOP_PENDING
- * CY_AS_ERROR_NOT_RUNNING - CyAsUsbStart or CyAsStorageStart
- * have not been called
- * CY_AS_ERROR_NOT_SUPPORTED - West Bridge is not running
- * firmware with MTP support
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
-
- See Also
- * CyAsMTPStop
- * CyAsUsbStart
- * CyAsStorageStart
- * CyAsUsbSetEnumConfig
- * CyAsUsbEnumControl
-*/
-cy_as_return_status_t
-cy_as_mtp_start(
- cy_as_device_handle handle,
- cy_as_mtp_event_callback event_c_b,
- cy_as_function_callback cb,
- uint32_t client
- );
-
-
-/* Summary
- This function stops the MTP stack.
-
- Description
- Stops all MTP activity. Any ongoing transfers are
- canceled.
-
- This will not cause a UsbDisconnect but all
- MTP activity (both pass through and turbo) will
- stop.
-
- Returns
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_NOT_RUNNING
- * CY_AS_ERROR_IN_SUSPEND
- * CY_AS_ERROR_INVALID_IN_CALLBACK
- * CY_AS_ERROR_STARTSTOP_PENDING
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
-
- See Also
- * CyAsMTPStart
-*/
-cy_as_return_status_t
-cy_as_mtp_stop(
- cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client
- );
-
-/* Summary
- This function sets up a Turbo SendObject operation.
-
- Description
- Calling this function will setup West Bridge to
- enable Tubo handling of the next SendObject
- operation received. This will pass down the initial
- block table to the firmware and setup a direct u->s
- write for the SendObject operation.
-
- If this function is not called before a SendObject
- operation is seen the SendObject operation and data
- will be passed along to the P port like any other MTP
- command. It would then be the responsibility of the
- client to perform a normal StorageWrite call to
- store the data on the SD card. N.B. This will be
- very slow compared with the Turbo handling.
-
- The completion of this function only signals that
- West Bridge has been set up to receive the next SendObject
- operation. When the SendObject operation has been fully
- handled and the data written to the SD card a separate
- event will be triggered.
-
- Returns
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_IN_SUSPEND
- * CY_AS_ERROR_NOT_RUNNING
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_ASYNC_PENDING
- * CY_AS_ERROR_INVALID_RESPONSE
- * CY_AS_ERROR_NOT_SUPPORTED - West Bridge is not running
- * firmware with MTP support
-
- See Also
- * CyAsMTPCancelSendObject
- * CyAsMTPInitGetObject
- * CyAsMTPEvent
- * CyAsMTPSendBlockTable
-*/
-cy_as_return_status_t
-cy_as_mtp_init_send_object(
- cy_as_device_handle handle,
- cy_as_mtp_block_table *blk_table,
- uint32_t num_bytes,
- cy_as_function_callback cb,
- uint32_t client
- );
-
-/* Summary
- This function cancels an ongoing MTP operation.
-
- Description
- Causes West Bridge to cancel an ongoing SendObject
- operation. Note this is only a cancel to West Bridge,
- the MTP operation still needs to be canceled by
- sending a response.
-
- West Bridge will automatically set a Stall on the endpoint
- when the cancel is received.
-
- This function is only valid after CyAsMTPInitSendObject
- has been called, but before the CyAsMTPSendObjectComplete
- event has been sent.
-
- Returns
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_RUNNING
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
- * CY_AS_ERROR_NOT_SUPPORTED - West Bridge is not running
- * firmware with MTP support
- * CY_AS_ERROR_NO_OPERATION_PENDING
-
- See Also
- * CyAsMTPInitSendObject
-*/
-cy_as_return_status_t
-cy_as_mtp_cancel_send_object(
- cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client
- );
-
-/* Summary
- This function sets up a turbo GetObject operation.
-
- Description
- Called by the P in response to a GetObject
- operation. This provides West Bridge with the block
- addresses for the Object data that needs to be
- transferred.
-
- It is the responsibility of the Processor to send the MTP
- operation before calling CyAsMTPInitGetObject. West Bridge
- will then send the data phase of the transaction,
- automatically creating the required container for Data.
- Once all of the Data has been transferred a callback will
- be issued to inform the Processor that the Data phase has
- completed allowing it to send the required MTP response.
-
- If an entire Block Table is used then after the
- last block is transferred the CyAsMTPBtCallback
- will be called to allow an additional Block Table(s)
- to be specified.
-
- Returns
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_NOT_RUNNING
- * CY_AS_ERROR_IN_SUSPEND
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_ASYNC_PENDING
- * CY_AS_ERROR_INVALID_RESPONSE
- * CY_AS_ERROR_NOT_SUPPORTED - West Bridge is not running
- * firmware with MTP support
-
- See Also
- * CyAsMTPInitSendObject
- * CyAsMTPCancelGetObject
- * CyAsMTPEvent
- * CyAsMTPSendBlockTable
-*/
-cy_as_return_status_t
-cy_as_mtp_init_get_object(
- cy_as_device_handle handle,
- cy_as_mtp_block_table *table_p,
- uint32_t num_bytes,
- uint32_t transaction_id,
- cy_as_function_callback cb,
- uint32_t client
- );
-
-/* Summary
- This function cancels an ongoing turbo GetObject
- operation.
-
- Description
- Causes West Bridge to cancel an ongoing GetObject
- operation. Note this is only a cancel to West Bridge,
- the MTP operation still needs to be canceled by
- sending a response.
-
- This function is only valid after CyAsMTPGetSendObject
- has been called, but before the CyAsMTPGetObjectComplete
- event has been sent.
-
- Returns
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_RUNNING
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
- * CY_AS_ERROR_NOT_SUPPORTED - West Bridge is not running
- * firmware with MTP support
- * CY_AS_ERROR_NO_OPERATION_PENDING
-
- See Also
- * CyAsMTPInitGetObject
-*/
-cy_as_return_status_t
-cy_as_mtp_cancel_get_object(
- cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client
- );
-
-/* Summary
- This function is used to transfer a BlockTable as part of
- an ongoing MTP operation.
-
- Description
- This function is called in response to the
- CyAsMTPBlockTableNeeded event. This allows the client to
- pass in a BlockTable structure to West Bridge.
-
- The memory associated with the table will be copied and
- can be safely disposed of when the function returns if
- called synchronously, or when the callback is made if
- called asynchronously.
-
- This function is used for both SendObject and GetObject
- as both can generate the CyAsMTPBlockTableNeeded event.
-
- Returns
- * CY_AS_ERROR_SUCCESS
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_NOT_CONFIGURED
- * CY_AS_ERROR_NO_FIRMWARE
- * CY_AS_ERROR_NOT_RUNNING
- * CY_AS_ERROR_IN_SUSPEND
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_ASYNC_PENDING
- * CY_AS_ERROR_INVALID_RESPONSE
- * CY_AS_ERROR_NOT_SUPPORTED - West Bridge is not running
- * firmware with MTP support
-
- See Also
- * CyAsMTPInitSendObject
- * CyAsMTPInitGetObject
-*/
-cy_as_return_status_t
-cy_as_mtp_send_block_table(
- cy_as_device_handle handle,
- cy_as_mtp_block_table *table,
- cy_as_function_callback cb,
- uint32_t client
- );
-
-/* Summary
- This function is used to mark the start of a storage
- read/write burst from the P port processor.
-
- Description
- This function is used to mark the start of a storage
- read/write burst from the processor. All USB host access
- into the mass storage / MTP endpoints will be blocked
- while the read/write burst is ongoing, and will be allowed
- to resume only after CyAsMTPStorageOnlyStop is called.
- The burst mode is used to reduce the firmware overhead
- due to configuring the internal data paths repeatedly,
- and can help improve performance when a sequence of
- read/writes is performed in a burst.
-
- This function will not generate a special mailbox request,
- it will only set a flag on the next Storage Read/Write
- operation. Until such a call is made West Bridge will
- continue to accept incoming packets from the Host.
-
- * Valid in Asynchronous Callback: YES
-
- Returns
- * CY_AS_ERROR_INVALID_HANDLE - Invalid West Bridge device
- * handle was passed in.
- * CY_AS_ERROR_NOT_CONFIGURED - West Bridge device has not
- * been configured.
- * CY_AS_ERROR_NO_FIRMWARE - Firmware is not active on West
- * Bridge device.
- * CY_AS_ERROR_NOT_RUNNING - Storage stack is not running.
- * CY_AS_ERROR_SUCCESS - Burst mode has been started.
-
- See Also
- * CyAsStorageReadWriteBurstStop
- */
-cy_as_return_status_t
-cy_as_mtp_storage_only_start(
- /* Handle to the West Bridge device. */
- cy_as_device_handle handle
- );
-
-/* Summary
- This function is used to mark the end of a storage read/write
- burst from the P port processor.
-
- Description
- This function is used to mark the end of a storage read/write
- burst from the processor. At this point, USB access to the
- mass storage / MTP endpoints on the West Bridge device will be
- re-enabled.
-
- * Valid in Asynchronous Callback: NO
-
- Returns
- * CY_AS_ERROR_INVALID_HANDLE - Invalid West Bridge device handle
- * was passed in.
- * CY_AS_ERROR_NOT_CONFIGURED - West Bridge device has not been
- * configured.
- * CY_AS_ERROR_NO_FIRMWARE - Firmware is not active on West Bridge
- * device.
- * CY_AS_ERROR_NOT_RUNNING - Storage stack is not running.
- * CY_AS_ERROR_INVALID_IN_CALLBACK - This API cannot be called
- * from a callback.
- * CY_AS_ERROR_OUT_OF_MEMORY - Failed to allocate memory to
- * process the request.
- * CY_AS_ERROR_TIMEOUT - Failed to send request to firmware.
- * CY_AS_ERROR_SUCCESS - Burst mode has been stopped.
-
- See Also
- * CyAsStorageReadWriteBurstStart
- */
-cy_as_return_status_t
-cy_as_mtp_storage_only_stop(
- /* Handle to the West Bridge device. */
- cy_as_device_handle handle,
- cy_as_function_callback cb,
- uint32_t client
- );
-
-#include "cyas_cplus_end.h"
-
-#endif
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasprotocol.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasprotocol.h
deleted file mode 100644
index 773b645ea7e..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasprotocol.h
+++ /dev/null
@@ -1,3838 +0,0 @@
-/* Cypress West Bridge API header file (cyasprotocol.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASPROTOCOL_H_
-#define _INCLUDED_CYASPROTOCOL_H_
-
-/*
- * Constants defining the per context buffer sizes
- */
-#ifndef __doxygen__
-#define CY_CTX_GEN_MAX_DATA_SIZE (8)
-#define CY_CTX_RES_MAX_DATA_SIZE (8)
-#define CY_CTX_STR_MAX_DATA_SIZE (64)
-#define CY_CTX_USB_MAX_DATA_SIZE (130 + 23)
-#define CY_CTX_TUR_MAX_DATA_SIZE (12)
-#endif
-
-/* Summary
- This response indicates a command has been processed
- and returned a status.
-
- Direction
- West Bridge -> P Port Processor
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = all
- * Response Code = 0
-
- D0
- * 0 = success (CY_AS_ERROR_SUCCESS)
- * non-zero = error code
-
- Description
- This response indicates that a request was processed
- and no data was generated as a result of the request
- beyond a single 16 bit status value. This response
- contains the 16 bit data value.
- */
-#define CY_RESP_SUCCESS_FAILURE (0)
-
-/* Summary
- This response indicates an invalid request was sent
-
- Direction
- West Bridge -> P Port Processor
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = all
- * Response Code = 1
-
- D0
- * Mailbox contents for invalid request
-
- Description
- This response is returned when a request is sent
- that contains an invalid
- context or request code.
-*/
-#define CY_RESP_INVALID_REQUEST (1)
-
-/* Summary
- This response indicates a request of invalid length was sent
-
- Direction
- West Bridge -> P Port Processor
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = all
- * Response Code = 2
-
- D0
- * Mailbox contenxt for invalid request
- * Length for invalid request
-
- Description
- The software API and firmware sends requests across the
- P Port to West Bridge interface on different contexts.
- Each contexts has a maximum size of the request packet
- that can be received. The size of a request can be
- determined during the first cycle of a request transfer.
- If the request is larger than can be handled by the
- receiving context this response is returned. Note that
- the complete request is received before this response is
- sent, but that the request is dropped after this response
- is sent.
-*/
-#define CY_RESP_INVALID_LENGTH (2)
-
-
-/* Summary
- This response indicates a request was made to an
- invalid storage address.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = all
- * Response Code = 0
-
- D0
- Bits 15 - 12 : Media Type
- * 0 = NAND
- * 1 = SD Flash
- * 2 = MMC Flash
- * 3 = CE-ATA
-
- Bits 11 - 8 : Zero based device index
-
- Bits 7 - 0 : Zero based unit index
-
- D1
- Upper 16 bits of block address
-
- D2
- Lower 16 bits of block address
-
- D3
- Portion of address that is invalid
- * 0 = Media Type
- * 1 = Device Index
- * 2 = Unit Index
- * 3 = Block Address
-
- Description
- This response indicates a request to an invalid storage media
- address
- */
-#define CY_RESP_NO_SUCH_ADDRESS (3)
-
-
-/******************************************************/
-
-/*@@General requests
- Summary
- The general requests include:
- * CY_RQT_GET_FIRMWARE_VERSION
- * CY_RQT_SET_TRACE_LEVEL
- * CY_RQT_INITIALIZATION_COMPLETE
- * CY_RQT_READ_MCU_REGISTER
- * CY_RQT_WRITE_MCU_REGISTER
- * CY_RQT_STORAGE_MEDIA_CHANGED
- * CY_RQT_CONTROL_ANTIOCH_HEARTBEAT
- * CY_RQT_PREPARE_FOR_STANDBY
- * CY_RQT_ENTER_SUSPEND_MODE
- * CY_RQT_OUT_OF_SUSPEND
- * CY_RQT_GET_GPIO_STATE
- * CY_RQT_SET_GPIO_STATE
- * CY_RQT_SET_SD_CLOCK_FREQ
- * CY_RQT_WB_DEVICE_MISMATCH
- * CY_RQT_BOOTLOAD_NO_FIRMWARE
- * CY_RQT_RESERVE_LNA_BOOT_AREA
- * CY_RQT_ABORT_P2S_XFER
- */
-
-#ifndef __doxygen__
-#define CY_RQT_GENERAL_RQT_CONTEXT (0)
-#endif
-
-/* Summary
- This command returns the firmware version number,
- media types supported and debug/release mode information.
-
- Direction
- P Port Processor-> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 0
- * Request Code = 0
-
- Description
- The response contains the 16-bit major version, the
- 16-bit minor version, the 16 bit build number, media
- types supported and release/debug mode information.
-
- Responses
- * CY_RESP_FIRMWARE_VERSION
- */
-#define CY_RQT_GET_FIRMWARE_VERSION (0)
-
-
-/* Summary
- This command changes the trace level and trace information
- destination within the West Bridge firmware.
-
- Direction
- P Port Processor-> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 0
- * Request Code = 1
-
- D0
- Trace Level
- * 0 = no trace information
- * 1 = state information
- * 2 = function call
- * 3 = function call with args/return value
-
- D1
- Bits 12 - 15 : MediaType
- * 0 = NAND
- * 1 = SDIO Flash
- * 2 = MMC Flash
- * 3 = CE-ATA
-
- Bits 8 - 11 : Zero based device index
-
- Bits 0 - 7 : Zero based unit index
-
- Description
- The West Bridge firmware contains debugging facilities that can
- be used to trace the execution of the firmware. This request
- sets the level of tracing information that is stored and the
- location where it is stored.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_NO_SUCH_ADDRESS
- */
-#define CY_RQT_SET_TRACE_LEVEL (1)
-
-/* Summary
- This command indicates that the firmware is up and ready
- for communications with the P port processor.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 2
-
- Mailbox0
- * Context = 0
- * Request Code = 3
-
- D0
- Major Version
-
- D1
- Minor Version
-
- D2
- Build Number
-
- D3
- Bits 15-8: Media types supported on Bus 1.
- Bits 7-0: Media types supported on Bus 0.
- Bits 8, 0: NAND support.
- * 0: NAND is not supported.
- * 1: NAND is supported.
- Bits 9, 1: SD memory card support.
- * 0: SD memory card is not supported.
- * 1: SD memory card is supported.
- Bits 10, 2: MMC card support.
- * 0: MMC card is not supported.
- * 1: MMC card is supported.
- Bits 11, 3: CEATA drive support
- * 0: CEATA drive is not supported.
- * 1: CEATA drive is supported.
- Bits 12, 4: SD IO card support.
- * 0: SD IO card is not supported.
- * 1: SD IO card is supported.
-
- D4
- Bits 15 - 8 : MTP information
- * 0 : MTP not supported in firmware
- * 1 : MTP supported in firmware
- Bits 7 - 0 : Debug/Release mode information.
- * 0 : Release mode
- * 1 : Debug mode
-
- Description
- When the West Bridge firmware is loaded it being by performing
- initialization. Initialization must be complete before West
- Bridge is ready to accept requests from the P port processor.
- This request is sent from West Bridge to the P port processor
- to indicate that initialization is complete.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_INITIALIZATION_COMPLETE (3)
-
-/* Summary
- This command requests the firmware to read and return the contents
- of a MCU accessible
- register.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 4
-
- D0
- Address of register to read
-
- Description
- This debug command allows the processor to read the contents of
- a MCU accessible register.
-
- Responses
- * CY_RESP_MCU_REGISTER_DATA
- */
-#define CY_RQT_READ_MCU_REGISTER (4)
-
-/* Summary
- This command requests the firmware to write to an MCU
- accessible register.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 5
-
- D0
- Address of register to be written
-
- D1
- Bits 15 - 8 : Mask to be applied to existing data.
- Bits 7 - 0 : Data to be ORed with masked data.
-
- Description
- This debug command allows the processor to write to an MCU
- accessible register.
- Note: This has to be used with caution, and is supported by
- the firmware only in special debug builds.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_WRITE_MCU_REGISTER (5)
-
-/* Summary
- This command tells the West Bridge firmware that a change in
- storage media has been detected.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 6
-
- Description
- If the insertion or removal of SD or MMC cards is detected by
- hardware external to West Bridge, this command is used to tell
- the West Bridge firmware to re-initialize the storage controlled
- by the device.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
-*/
-#define CY_RQT_STORAGE_MEDIA_CHANGED (6)
-
-/* Summary
- This command enables/disables the periodic heartbeat message
- from the West Bridge firmware to the processor.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 7
-
- Description
- This command enables/disables the periodic heartbeat message
- from the West Bridge firmware to the processor. The heartbeat
- message is left enabled by default, and can lead to a loss
- in performance on the P port interface.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_CONTROL_ANTIOCH_HEARTBEAT (7)
-
-/* Summary
- This command requests the West Bridge firmware to prepare for
- the device going into standby
- mode.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 8
-
- Description
- This command is sent by the processor to the West Bridge as
- preparation for going into standby mode. The request allows the
- firmware to complete any pending/cached storage operations before
- going into the low power state.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_PREPARE_FOR_STANDBY (8)
-
-/* Summary
- Requests the firmware to go into suspend mode.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 9
-
- D0
- Bits 7-0: Wakeup control information.
-
- Description
- This command is sent by the processor to the West Bridge to
- request the device to be placed in suspend mode. The firmware
- will complete any pending/cached storage operations before
- going into the low power state.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_ENTER_SUSPEND_MODE (9)
-
-/* Summary
- Indicates that the device has left suspend mode.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 10
-
- Description
- This message is sent by the West Bridge to the Processor
- to indicate that the device has woken up from suspend mode,
- and is ready to accept new requests.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_OUT_OF_SUSPEND (10)
-
-/* Summary
- Request to get the current state of an West Bridge GPIO pin.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 11
-
- D0
- Bits 15 - 8 : GPIO pin identifier
-
- Responses
- * CY_RESP_GPIO_STATE
-
- Description
- Request from the processor to get the current state of
- an West Bridge GPIO pin.
- */
-#define CY_RQT_GET_GPIO_STATE (11)
-
-/* Summary
- Request to update the output value on an West Bridge
- GPIO pin.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 12
-
- D0
- Bits 15 - 8 : GPIO pin identifier
- Bit 0 : Desired output state
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
-
- Description
- Request from the processor to update the output value on
- an West Bridge GPIO pin.
- */
-#define CY_RQT_SET_GPIO_STATE (12)
-
-/* Summary
- Set the clock frequency on the SD interface of the West
- Bridge device.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 13
-
- D0
- Bit 8: Type of SD/MMC media
- 0 = low speed media
- 1 = high speed media
- Bit 0: Clock frequency selection
- 0 = Default frequency
- 1 = Alternate frequency (24 MHz in both cases)
-
- Description
- This request is sent by the processor to set the operating clock
- frequency used on the SD interface of the device.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_SET_SD_CLOCK_FREQ (13)
-
-/* Summary
- Indicates the firmware downloaded to West Bridge cannot
- run on the active device.
-
- Direction
- West Bridge -> P Port processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 14
-
- Description
- Some versions of West Bridge firmware can only run on specific
- types/versions of the West Bridge device. This error is
- returned when a firmware image is downloaded onto a device that
- does not support it.
-
- Responses
- * None
- */
-#define CY_RQT_WB_DEVICE_MISMATCH (14)
-
-/* Summary
- This command is indicates that no firmware was found in the
- storage media.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 0
- * Request code = 15
-
- Description
- The command is received only in case of silicon with bootloader
- ROM. The device sends the request if there is no firmware image
- found in the storage media or the image is corrupted. The
- device is waiting for P port to download a valid firmware image.
-
- Responses
- * None
- */
-#define CY_RQT_BOOTLOAD_NO_FIRMWARE (15)
-
-/* Summary
- This command reserves first numzones zones of nand device for
- storing processor boot image.
-
- Direction
- P Port Processor-> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 0
- * Request Code = 16
-
- D0
- Bits 7-0: numzones
-
- Description
- The first numzones zones in nand device will be used for storing
- proc boot image. LNA firmware in Astoria will work on this nand
- area and boots the processor which will then use the remaining
- nand for usual purposes.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_RESERVE_LNA_BOOT_AREA (16)
-
-/* Summary
- This command cancels the processing of a P2S operation in
- firmware.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 0
- * Request Code = 17
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
-*/
-#define CY_RQT_ABORT_P2S_XFER (17)
-
-/*
- * Used for debugging, ignore for normal operations
- */
-#ifndef __doxygen__
-#define CY_RQT_DEBUG_MESSAGE (127)
-#endif
-
-/******************************************************/
-
-/*@@General responses
- Summary
- The general responses include:
- * CY_RESP_FIRMWARE_VERSION
- * CY_RESP_MCU_REGISTER_DATA
- * CY_RESP_GPIO_STATE
- */
-
-
-/* Summary
- This response indicates success and contains the firmware
- version number, media types supported by the firmware and
- release/debug mode information.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 2
-
- MailBox0
- * Context = 0
- * Response Code = 16
-
- D0
- Major Version
-
- D1
- Minor Version
-
- D2
- Build Number
-
- D3
- Bits 15-8: Media types supported on Bus 1.
- Bits 7-0: Media types supported on Bus 0.
- Bits 8, 0: NAND support.
- * 0: NAND is not supported.
- * 1: NAND is supported.
- Bits 9, 1: SD memory card support.
- * 0: SD memory card is not supported.
- * 1: SD memory card is supported.
- Bits 10, 2: MMC card support.
- * 0: MMC card is not supported.
- * 1: MMC card is supported.
- Bits 11, 3: CEATA drive support
- * 0: CEATA drive is not supported.
- * 1: CEATA drive is supported.
- Bits 12, 4: SD IO card support.
- * 0: SD IO card is not supported.
- * 1: SD IO card is supported.
-
- D4
- Bits 15 - 8 : MTP information
- * 0 : MTP not supported in firmware
- * 1 : MTP supported in firmware
- Bits 7 - 0 : Debug/Release mode information.
- * 0 : Release mode
- * 1 : Debug mode
-
- Description
- This response is sent to return the firmware version
- number to the requestor.
- */
-#define CY_RESP_FIRMWARE_VERSION (16)
-
-/* Summary
- This response returns the contents of a MCU accessible
- register to the processor.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 0
- * Response code = 17
-
- D0
- Bits 7 - 0 : MCU register contents
-
- Description
- This response is sent by the firmware in response to the
- CY_RQT_READ_MCU_REGISTER
- command.
- */
-#define CY_RESP_MCU_REGISTER_DATA (17)
-
-/* Summary
- Reports the current state of an West Bridge GPIO pin.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 0
- * Request code = 18
-
- D0
- Bit 0: Current state of the GP input pin
-
- Description
- This response is sent by the West Bridge to report the
- current state observed on a general purpose input pin.
- */
-#define CY_RESP_GPIO_STATE (18)
-
-
-/* Summary
- This command notifies West Bridge the polarity of the
- SD power pin
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 0
- * Request Code = 19
- D0: CyAnMiscActivehigh / CyAnMiscActivelow
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-
- */
-
-#define CY_RQT_SDPOLARITY (19)
-
-/******************************/
-
-/*@@Resource requests
- Summary
-
- The resource requests include:
- * CY_RQT_ACQUIRE_RESOURCE
- * CY_RQT_RELEASE_RESOURCE
- */
-
-
-
-
-
-#ifndef __doxygen__
-#define CY_RQT_RESOURCE_RQT_CONTEXT (1)
-#endif
-
-
-/* Summary
- This command is a request from the P port processor
- for ownership of a resource.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 1
- * Request Code = 0
-
- D0
- Resource
- * 0 = USB
- * 1 = SDIO/MMC
- * 2 = NAND
-
- D1
- Force Flag
- * 0 = Normal
- * 1 = Force
-
- Description
- The resource may be the USB pins, the SDIO/MMC bus,
- or the NAND bus.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_NOT_RELEASED
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_BAD_RESOURCE
- */
-#define CY_RQT_ACQUIRE_RESOURCE (0)
-
-
-/* Summary
- This command is a request from the P port processor
- to release ownership of a resource.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 1
- * Request Code = 1
-
- D0
- Resource
- * 0 = USB
- * 1 = SDIO/MMC
- * 2 = NAND
-
- Description
- The resource may be the USB pins, the SDIO/MMC bus, or
- the NAND bus.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_NOT_OWNER
- */
-#define CY_RQT_RELEASE_RESOURCE (1)
-
-
-/****************************/
-
-/*@@Storage requests
- Summary
- The storage commands include:
- * CY_RQT_START_STORAGE
- * CY_RQT_STOP_STORAGE
- * CY_RQT_CLAIM_STORAGE
- * CY_RQT_RELEASE_STORAGE
- * CY_RQT_QUERY_MEDIA
- * CY_RQT_QUERY_DEVICE
- * CY_RQT_QUERY_UNIT
- * CY_RQT_READ_BLOCK
- * CY_RQT_WRITE_BLOCK
- * CY_RQT_MEDIA_CHANGED
- * CY_RQT_ANTIOCH_CLAIM
- * CY_RQT_ANTIOCH_RELEASE
- * CY_RQT_SD_INTERFACE_CONTROL
- * CY_RQT_SD_REGISTER_READ
- * CY_RQT_CHECK_CARD_LOCK
- * CY_RQT_QUERY_BUS
- * CY_RQT_PARTITION_STORAGE
- * CY_RQT_PARTITION_ERASE
- * CY_RQT_GET_TRANSFER_AMOUNT
- * CY_RQT_ERASE
- * CY_RQT_SDIO_READ_DIRECT
- * CY_RQT_SDIO_WRITE_DIRECT
- * CY_RQT_SDIO_READ_EXTENDED
- * CY_RQT_SDIO_WRITE_EXTENDED
- * CY_RQT_SDIO_INIT_FUNCTION
- * CY_RQT_SDIO_QUERY_CARD
- * CY_RQT_SDIO_GET_TUPLE
- * CY_RQT_SDIO_ABORT_IO
- * CY_RQT_SDIO_INTR
- * CY_RQT_SDIO_SUSPEND
- * CY_RQT_SDIO_RESUME
- * CY_RQT_SDIO_RESET_DEV
- * CY_RQT_P2S_DMA_START
- */
-#ifndef __doxygen__
-#define CY_RQT_STORAGE_RQT_CONTEXT (2)
-#endif
-
-/* Summary
- This command requests initialization of the storage stack.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 0
-
- Description
- This command is required before any other storage related command
- can be send to the West Bridge firmware.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_ALREADY_RUNNING
- */
-#define CY_RQT_START_STORAGE (0)
-
-
-/* Summary
- This command requests shutdown of the storage stack.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 1
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_NOT_RUNNING
- */
-#define CY_RQT_STOP_STORAGE (1)
-
-
-/* Summary
- This command requests ownership of the given media
- type by the P port processor.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 2
-
- D0
- Bits 12 - 15 : Bus Index
- Bits 8 - 11 : Zero based device index
-
- Responses
- * CY_RESP_MEDIA_CLAIMED_RELEASED
- * CY_RESP_NO_SUCH_ADDRESS
- */
-#define CY_RQT_CLAIM_STORAGE (2)
-
-
-/* Summary
- This command releases ownership of a given media type
- by the P port processor.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 3
-
- D0
- Bits 12 - 15 : Bus Index
- Bits 8 - 11 : Zero based device index
-
- Responses
- * CY_RESP_MEDIA_CLAIMED_RELEASED
- * CY_RESP_NO_SUCH_ADDRESS
- */
-#define CY_RQT_RELEASE_STORAGE (3)
-
-
-/* Summary
- This command returns the total number of logical devices
- of the given type of media.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 4
-
- D0
- Bits 12 - 15 : MediaType
- * 0 = NAND
- * 1 = SDIO Flash
- * 2 = MMC Flash
- * 3 = CE-ATA
-
- Bits 8 - 11 : Not Used
-
- Bits 0 - 7 : Not Used
-
- Responses
- * CY_RESP_MEDIA_DESCRIPTOR
- * CY_RESP_NO_SUCH_ADDRESS
- */
-#define CY_RQT_QUERY_MEDIA (4)
-
-
-/* Summary
- This command queries a given device to determine
- information about the number of logical units on
- the given device.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 5
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Not Used
-
- Responses
- * CY_RESP_DEVICE_DESCRIPTOR
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_INVALID_PARTITION_TABLE
- * CY_RESP_NO_SUCH_ADDRESS
- */
-#define CY_RQT_QUERY_DEVICE (5)
-
-
-/* Summary
- This command queries a given device to determine
- information about the size and location of a logical unit
- located on a physical device.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 6
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based unit index
-
- Responses
- * CY_RESP_UNIT_DESCRIPTOR
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_INVALID_PARTITION_TABLE
- * CY_RESP_NO_SUCH_ADDRESS
- */
-#define CY_RQT_QUERY_UNIT (6)
-
-
-/* Summary
- This command initiates the read of a specific block
- from the given media,
- device and unit.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 2
-
- MailBox0
- * Context = 2
- * Request Code = 7
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based unit index
-
- D1
- Upper 16 bits of block address
-
- D2
- Lower 16 bits of block address
-
- D3
- BIT 8 - 15 : Upper 8 bits of Number of blocks
-
- BIT 0 - 7 : Reserved
-
- * D4 *
- BITS 8 - 15 : Lower 8 bits of Number of blocks
- BITS 1 - 7 : Not Used
- BIT 0 : Indicates whether this command is a
- part of a P2S only burst.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_ANTIOCH_DEFERRED_ERROR
- */
-#define CY_RQT_READ_BLOCK (7)
-
-
-/* Summary
- This command initiates the write of a specific block
- from the given media, device and unit.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 2
-
- MailBox0
- * Context = 2
- * Request Code = 8
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based unit index
-
- D1
- Upper 16 bits of block address
-
- D2
- Lower 16 bits of block address
-
- D3
- BIT 8 - 15 : Upper 8 bits of Number of blocks
-
- BIT 0 - 7 : Reserved
-
- * D4 *
- BITS 8 - 15 : Lower 8 bits of Number of blocks
- BITS 1 - 7 : Not Used
- BIT 0 : Indicates whether this command is a
- part of a P2S only burst.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_ANTIOCH_DEFERRED_ERROR
- */
-#define CY_RQT_WRITE_BLOCK (8)
-
-/* Summary
- This request is sent when the West Bridge device detects
- a change in the status of the media.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request Code = 9
-
- D0
- Bits 12 - 15 : Bus index
- Bits 0 - 7 : Media type
-
- D1
- Bit 0 : Action
- * 0 = Inserted
- * 1 = Removed
-
- Description
- When the media manager detects the insertion or removal
- of a media from the West Bridge port, this request is sent
- from the West Bridge device to the P Port processor to
- inform the processor of the change in status of the media.
- This request is sent for both an insert operation and a
- removal operation.
-
- Responses
- * CY_RESPO_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_MEDIA_CHANGED (9)
-
-/* Summary
- This request is sent when the USB module wishes to claim
- storage media.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request Code = 10
-
- D0
- Bit 0:
- * 0 = do not release NAND
- * 1 = release NAND
-
- Bit 1:
- * 0 = do not release SD Flash
- * 1 = release SD Flash
-
- Bit 2:
- * 0 = do not release MMC flash
- * 1 = release MMC flash
-
- Bit 3:
- * 0 = do not release CE-ATA storage
- * 1 = release CE-ATA storage
-
- Bit 8:
- * 0 = do not release storage on bus 0
- * 1 = release storage on bus 0
-
- Bit 9:
- * 0 = do not release storage on bus 1
- * 1 = release storage on bus 1
-
- Description
- When the USB cable is attached to the West Bridge device,
- West Bridge will enumerate the storage devices per the USB
- initialization of West Bridge. In order for West Bridge to
- respond to requests received via USB for the mass storage
- devices, the USB module must claim the storeage. This
- request is a request to the P port processor to release the
- storage medium. The medium will not be visible on the USB
- host, until it has been released by the processor.
-*/
-#define CY_RQT_ANTIOCH_CLAIM (10)
-
-/* Summary
- This request is sent when the P port has asked West Bridge to
- release storage media, and the West Bridge device has
- completed this.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request Code = 11
-
- D0
- Bit 0:
- * 0 = No change in ownership of NAND storage
- * 1 = NAND ownership has been given to processor
-
- Bit 1:
- * 0 = No change in ownership of SD storage
- * 1 = SD ownership has been given to processor
-
- Bit 2:
- * 0 = No change in ownership of MMC storage
- * 1 = MMC ownership has been given to processor
-
- Bit 3:
- * 0 = No change in ownership of CE-ATA storage
- * 1 = CE-ATA ownership has been given to processor
-
- Bit 4:
- * 0 = No change in ownership of SD IO device
- * 1 = SD IO device ownership has been given to processor
-
- Bit 8:
- * 0 = No change in ownership of storage on bus 0
- * 1 = Bus 0 ownership has been given to processor
-
- Bit 9:
- * 0 = No change in ownership of storage on bus 1
- * 1 = Bus 1 ownership has been given to processor
-
- Description
- When the P port asks for control of a particular media, West
- Bridge may be able to release the media immediately. West
- Bridge may also need to complete the flush of buffers before
- releasing the media. In the later case, West Bridge will
- indicated a release is not possible immediately and West Bridge
- will send this request to the P port when the release has been
- completed.
-*/
-#define CY_RQT_ANTIOCH_RELEASE (11)
-
-/* Summary
- This request is sent by the Processor to enable/disable the
- handling of SD card detection and SD card write protection
- by the firmware.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request code = 12
-
- D0
- Bit 8: Enable/disable handling of card detection.
- Bit 1: SDAT_3 = 0, GIPO_0 = 1
- Bit 0: Enable/disable handling of write protection.
-
- Description
- This request is sent by the Processor to enable/disable
- the handling of SD card detection and SD card write
- protection by the firmware.
- */
-#define CY_RQT_SD_INTERFACE_CONTROL (12)
-
-/* Summary
- Request from the processor to read a register on the SD
- card, and return the contents.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request code = 13
-
- D0
- Bits 12 - 15 : MediaType
- * 0 = Reserved
- * 1 = SDIO Flash
- * 2 = MMC Flash
- * 3 = Reserved
-
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Type of register to read
-
- Description
- This request is sent by the processor to instruct the
- West Bridge to read a register on the SD/MMC card, and
- send the contents back through the CY_RESP_SD_REGISTER_DATA
- response.
- */
-#define CY_RQT_SD_REGISTER_READ (13)
-
-/* Summary
- Check if the SD/MMC card connected to West Bridge is
- password locked.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request code = 14
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
-
- Description
- This request is sent by the processor to check if the
- SD/MMC connected to the West Bridge is locked with a
- password.
- */
-#define CY_RQT_CHECK_CARD_LOCK (14)
-
-/* Summary
- This command returns the total number of logical devices on the
- given bus
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 15
-
- D0
- Bits 12 - 15 : Bus Number
-
- Bits 0 - 11: Not Used
-
- Responses
- * CY_RESP_BUS_DESCRIPTOR
- * CY_RESP_NO_SUCH_BUS
- */
-#define CY_RQT_QUERY_BUS (15)
-
-/* Summary
- Divide a storage device into two partitions.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request code = 16
-
- D0
- Bits 12 - 15 : Bus number
- Bits 8 - 11 : Device number
- Bits 0 - 7 : Not used
-
- D1
- Size of partition 0 (MS word)
-
- D2
- Size of partition 0 (LS word)
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_PARTITION_STORAGE (16)
-
-/* Summary
- Remove the partition table and unify all partitions on
- a storage device.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request code = 17
-
- D0
- Bits 12 - 15 : Bus number
- Bits 8 - 11 : Device number
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_PARTITION_ERASE (17)
-
-/* Summary
- Requests the current transfer amount.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request code = 18
-
- D0
- Bits 12 - 15 : Bus number
- Bits 8 - 11 : Device number
-
- Responses
- * CY_RESP_TRANSFER_COUNT
- */
-#define CY_RQT_GET_TRANSFER_AMOUNT (18)
-
-/* Summary
- Erases.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 2
-
- MailBox0
- * Context = 2
- * Request code = 19
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based unit index
-
- D1
- Upper 16 bits of erase unit
-
- D2
- Lower 16 bits of erase unit
-
- D3
- BIT 8 - 15 : Upper 8 bits of Number of erase units
- BIT 0 - 7 : Reserved
-
- * D4 *
- BIT 8 - 15 : Lower 8 bits of Number of erase units
- BIT 0 - 7 : Not Used
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- */
-#define CY_RQT_ERASE (19)
-
-/* Summary
- This command reads 1 byte from an SDIO card.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 23
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based function number
-
- D1
- Bits 8 - 15 : 0
- Bit 7 : 0 to indicate a read
- Bits 4 - 6 : Function number
- Bit 3 : 0
- Bit 2 : 1 if SDIO interrupt needs to be re-enabled.
- Bits 0 - 1 : Two Most significant bits of Read address
-
- D2
- Bits 1 - 15 : 15 Least significant bits of Read address
- Bit 0 : 0
-
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- * CY_RESP_SDIO_DIRECT
-*/
-#define CY_RQT_SDIO_READ_DIRECT (23)
-
-/* Summary
- This command writes 1 byte to an SDIO card.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 24
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based function number
-
- D1
- Bits 8 - 15 : Data to write
- Bit 7 : 1 to indicate a write
- Bits 4 - 6 : Function number
- Bit 3 : 1 if Read after write is enabled
- Bit 2 : 1 if SDIO interrupt needs to be re-enabled.
- Bits 0 - 1 : Two Most significant bits of write address
-
- D2
- Bits 1 - 15 : 15 Least significant bits of write address
- Bit 0 : 0
-
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SDIO_DIRECT
-*/
-#define CY_RQT_SDIO_WRITE_DIRECT (24)
-
-/* Summary
- This command reads performs a multi block/byte read from
- an SDIO card.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 25
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based function number
-
- D1
- Bit 15 : 0 to indicate a read
- Bit 12 - 14 : Function Number
- Bit 11 : Block Mode
- Bit 10 : OpCode
- Bits 0 - 9 : 10 Most significant bits of Read address
-
- D2
- Bits 9 - 15 : 7 Least significant bits of address
- Bits 0 - 8 : Block/Byte Count
-
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SDIO_EXT
-*/
-#define CY_RQT_SDIO_READ_EXTENDED (25)
-
-/* Summary
- This command reads performs a multi block/byte write
- to an SDIO card.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 26
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based function number
-
- D1
- Bit 15 : 1 to indicate a write
- Bit 12 - 14 : Function Number
- Bit 11 : Block Mode
- Bit 10 : OpCode
- Bits 0 - 9 : 10 Most significant bits of Read address
-
- D2
- Bits 9 - 15 : 7 Least significant bits of address
- Bits 0 - 8 : Block/Byte Count
-
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SDIO_EXT
-*/
-#define CY_RQT_SDIO_WRITE_EXTENDED (26)
-
-/* Summary
- This command initialises an IO function on the SDIO card.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 27
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based function number
-
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_SDIO_INIT_FUNCTION (27)
-
-/* Summary
- This command gets properties of the SDIO card.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 28
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_QUERY_CARD
-*/
-#define CY_RQT_SDIO_QUERY_CARD (28)
-
-/* Summary
- This command reads a tuple from the CIS of an SDIO card.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 29
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based function number
-
- D1
- Bits 8 - 15 : Tuple ID to read
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SDIO_GET_TUPLE
-*/
-#define CY_RQT_SDIO_GET_TUPLE (29)
-
-/* Summary
- This command Aborts an IO operation.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 30
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based function number
-
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_SDIO_ABORT_IO (30)
-
-/* Summary
- SDIO Interrupt request sent to the processor from the West Bridge device.
-
- Direction
- West Bridge ->P Port Processor
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 31
-
- D0
- Bits 0 - 7 : Bus Index
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_SDIO_INTR (31)
-
-/* Summary
- This command Suspends an IO operation.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 32
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based function number
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_SDIO_SUSPEND (32)
-
-/* Summary
- This command resumes a suspended operation.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 33
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based function number
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SDIO_RESUME
-*/
-#define CY_RQT_SDIO_RESUME (33)
-
-/* Summary
- This command resets an SDIO device.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request Code = 34
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : 0
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_SDIO_RESET_DEV (34)
-
-/* Summary
- This command asks the API to start the DMA transfer
- for a P2S operation.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Request code = 35
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_P2S_DMA_START (35)
-
-/******************************************************/
-
-/*@@Storage responses
- Summary
- The storage responses include:
- * CY_RESP_MEDIA_CLAIMED_RELEASED
- * CY_RESP_MEDIA_DESCRIPTOR
- * CY_RESP_DEVICE_DESCRIPTOR
- * CY_RESP_UNIT_DESCRIPTOR
- * CY_RESP_ANTIOCH_DEFERRED_ERROR
- * CY_RESP_SD_REGISTER_DATA
- * CY_RESP_SD_LOCK_STATUS
- * CY_RESP_BUS_DESCRIPTOR
- * CY_RESP_TRANSFER_COUNT
- * CY_RESP_SDIO_EXT
- * CY_RESP_SDIO_INIT_FUNCTION
- * CY_RESP_SDIO_QUERY_CARD
- * CY_RESP_SDIO_GET_TUPLE
- * CY_RESP_SDIO_DIRECT
- * CY_RESP_SDIO_INVALID_FUNCTION
- * CY_RESP_SDIO_RESUME
- */
-
-/* Summary
- Based on the request sent, the state of a given media was
- changed as indicated by this response.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Response Code = 16
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
-
- D1
- State of Media
- * 0 = released
- * 1 = claimed
- */
-#define CY_RESP_MEDIA_CLAIMED_RELEASED (16)
-
-
-/* Summary
- This response gives the number of physical devices
- associated with a given media type.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Response Code = 17
-
- D0
- Media Type
- Bits 12 - 15
- * 0 = NAND
- * 1 = SDIO Flash
- * 2 = MMC Flash
- * 3 = CE-ATA
-
- D1
- Number of devices
- */
-#define CY_RESP_MEDIA_DESCRIPTOR (17)
-
-
-/* Summary
- This response gives description of a physical device.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 2
-
- MailBox0
- * Context = 2
- * Response Code = 18
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Type of media present on bus
-
- D1
- Block Size in bytes
-
- D2
- Bit 15 : Is device removable
- Bit 9 : Is device password locked
- Bit 8 : Is device writeable
- Bits 0 - 7 : Number Of Units
-
- D3
- ERASE_UNIT_SIZE high 16 bits
-
- D4
- ERASE_UNIT_SIZE low 16 bits
-
- */
-#define CY_RESP_DEVICE_DESCRIPTOR (18)
-
-
-/* Summary
- This response gives description of a unit on a
- physical device.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 6
-
- MailBox0
- * Context = 2
- * Response Code = 19
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based unit index
-
- D1
- Bits 0 - 7 : Media type
- * 1 = NAND
- * 2 = SD FLASH
- * 4 = MMC FLASH
- * 8 = CEATA
- * 16 = SD IO
-
- D2
- Block Size in bytes
-
- D3
- Start Block Low 16 bits
-
- D4
- Start Block High 16 bits
-
- D5
- Unit Size Low 16 bits
-
- D6
- Unit Size High 16 bits
- */
-#define CY_RESP_UNIT_DESCRIPTOR (19)
-
-
-/* Summary
- This response is sent as error status for P2S
- Storage operation.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 2
-
- Mailbox0
- * Context = 2
- * Request Code = 20
-
- D0
- Bit 8 : Type of operation (Read / Write)
- Bits 7 - 0 : Error code
-
- D1
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Zero based unit index
-
- *D2 - D3*
- Address where the error occurred.
-
- D4
- Length of the operation in blocks.
-
- Description
- This error is returned by the West Bridge to the
- processor if a storage operation fails due to a
- medium error.
-*/
-#define CY_RESP_ANTIOCH_DEFERRED_ERROR (20)
-
-/* Summary
- Contents of a register on the SD/MMC card connected to
- West Bridge.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- Variable
-
- Mailbox0
- * Context = 2
- * Request code = 21
-
- D0
- Length of data in bytes
-
- D1 - Dn
- The register contents
-
- Description
- This is the response to a CY_RQT_SD_REGISTER_READ
- request.
-*/
-#define CY_RESP_SD_REGISTER_DATA (21)
-
-/* Summary
- Status of whether the SD card is password locked.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request code = 22
-
- D0
- Bit 0 : The card's lock status
-
- Description
- Status of whether the SD card is password locked.
-*/
-#define CY_RESP_SD_LOCK_STATUS (22)
-
-
-/* Summary
- This response gives the types of physical devices
- attached to a given bus.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 2
- * Response Code = 23
-
- D0
- Bus Number
- Bits 12 - 15
-
- D1
- Media present on addressed bus
- */
-#define CY_RESP_BUS_DESCRIPTOR (23)
-
-/* Summary
- Amount of data read/written through the USB mass
- storage/MTP device.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 2
-
- MailBox0
- * Context = 2
- * Request code = 24
-
- D0
- MS 16 bits of number of sectors written
-
- D1
- LS 16 bits of number of sectors written
-
- D2
- MS 16 bits of number of sectors read
-
- D3
- LS 16 bits of number of sectors read
-
- Description
- This is the response to the CY_RQT_GET_TRANSFER_AMOUNT
- request, and represents the number of sectors of data
- that has been written to or read from the storage device
- through the USB Mass storage or MTP interface.
- */
-#define CY_RESP_TRANSFER_COUNT (24)
-
-/* Summary
- Status of SDIO Extended read/write operation.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request code = 34
-
- D0
- Bit 8 : 1 if Read response, 0 if write response
- Bits 0-7: Error Status
-
- Description
- Status of SDIO Extended read write operation.
-*/
-
-#define CY_RESP_SDIO_EXT (34)
-
-/* Summary
- Status of SDIO operation to Initialize a function
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 2
-
- Mailbox0
- * Context = 2
- * Request code = 35
-
-
- D0
- Bits 8-15 : Function Interface Code
- Bits 0-7: Extended Function Interface Code
-
- D1
- Bits 0-15 : Function Block Size
-
- D2
- Bits 0-15 : Most significant Word of Function PSN
-
- D3
- Bits 0-15 : Least significant Word of Function PSN
-
- D4
- Bit 15 : CSA Enabled Status
- Bit 14 : CSA Support Status
- Bit 9 : CSA No Format Status
- Bit 8 : CSA Write Protect Status
- Bit 0 : Function Wake Up Support status
-
- Description
- Status of SDIO Function Initialization operation.
-*/
-#define CY_RESP_SDIO_INIT_FUNCTION (35)
-
-/* Summary
- Status of SDIO operation to query the Card
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 2
-
- Mailbox0
- * Context = 2
- * Request code = 36
-
-
- D0
- Bits 8-15 : Number of IO functions present
- Bit 0: 1 if memory is present
-
- D1
- Bits 0-15 : Card Manufacturer ID
-
- D2
- Bits 0-15 : Card Manufacturer Additional Information
-
- D3
- Bits 0-15 : Function 0 Block Size
-
- D4
- Bits 8-15 :SDIO Card Capability register
- Bits 0-7: SDIO Version
-
-
- Description
- Status of SDIO Card Query operation.
- */
-#define CY_RESP_SDIO_QUERY_CARD (36)
-/* Summary
- Status of SDIO CIS read operation
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request code = 37
-
- D0
- Bit 8 : 1
- Bits 0-7: Error Status
-
- D1
- Bits 0 - 7 : Size of data read.
-
- Description
- Status of SDIO Get Tuple Read operation.
- */
-#define CY_RESP_SDIO_GET_TUPLE (37)
-
-/* Summary
- Status of SDIO Direct read/write operation.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request code = 38
-
- D0
- Bit 8 : Error Status
- Bits 0-7: Data Read(If any)
-
- Description
- Status of SDIO Direct read write operation.
-
-*/
-#define CY_RESP_SDIO_DIRECT (38)
-
-/* Summary
- Indicates an un-initialized function has been used for IO
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request code = 39
-
- Description
- Indicates an IO request on an uninitialized function.
-*/
-#define CY_RESP_SDIO_INVALID_FUNCTION (39)
-
-/* Summary
- Response to a Resume request
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 2
- * Request code = 40
-
- D0
- Bits 8-15 : Error Status
- Bit 0: 1 if data is available. 0 otherwise.
-
- Description
- Response to a Resume request. Indicates if data is
- available after resum or not.
-*/
-#define CY_RESP_SDIO_RESUME (40)
-
-/******************************************************/
-
-/*@@USB requests
- Summary
- The USB requests include:
- * CY_RQT_START_USB
- * CY_RQT_STOP_USB
- * CY_RQT_SET_CONNECT_STATE
- * CY_RQT_GET_CONNECT_STATE
- * CY_RQT_SET_USB_CONFIG
- * CY_RQT_GET_USB_CONFIG
- * CY_RQT_STALL_ENDPOINT
- * CY_RQT_GET_STALL
- * CY_RQT_SET_DESCRIPTOR
- * CY_RQT_GET_DESCRIPTOR
- * CY_RQT_SET_USB_CONFIG_REGISTERS
- * CY_RQT_USB_EVENT
- * CY_RQT_USB_EP_DATA
- * CY_RQT_ENDPOINT_SET_NAK
- * CY_RQT_GET_ENDPOINT_NAK
- * CY_RQT_ACK_SETUP_PACKET
- * CY_RQT_SCSI_INQUIRY_COMMAND
- * CY_RQT_SCSI_START_STOP_COMMAND
- * CY_RQT_SCSI_UNKNOWN_COMMAND
- * CY_RQT_USB_REMOTE_WAKEUP
- * CY_RQT_CLEAR_DESCRIPTORS
- * CY_RQT_USB_STORAGE_MONITOR
- * CY_RQT_USB_ACTIVITY_UPDATE
- * CY_RQT_MS_PARTITION_SELECT
- */
-#ifndef __doxygen__
-#define CY_RQT_USB_RQT_CONTEXT (3)
-#endif
-
-/* Summary
- This command requests initialization of the USB stack.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Request Code = 0
-
- Description
- This command is required before any other USB related command can be
- sent to the West Bridge firmware.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_ALREADY_RUNNING
- */
-#define CY_RQT_START_USB (0)
-
-
-/* Summary
- This command requests shutdown of the USB stack.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Request Code = 1
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_NOT_RUNNING
- */
-#define CY_RQT_STOP_USB (1)
-
-
-/* Summary
- This command requests that the USB pins be connected
- or disconnected to/from the West Bridge device.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Request Code = 2
-
- D0
- Desired Connect State
- * 0 = DISCONNECTED
- * 1 = CONNECTED
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_NOT_RUNNING
- */
-#define CY_RQT_SET_CONNECT_STATE (2)
-
-
-/* Summary
- This command requests the connection state of the
- West Bridge USB pins.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Request Code = 3
-
- Responses
- * CY_RESP_CONNECT_STATE
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_NOT_RUNNING
- */
-#define CY_RQT_GET_CONNECT_STATE (3)
-
-
-/* Summary
- This request configures the USB subsystem.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 2
-
- MailBox0
- * Context = 3
- * Request Code = 4
-
- D0
- Bits 8 - 15: Media to enumerate (bit mask)
- Bits 0 - 7: Enumerate Mass Storage (bit mask)
- * 1 = Enumerate device on bus 0
- * 2 = Enumerate device on bus 1
-
- D1
- Enumeration Methodology
- * 1 = West Bridge enumeration
- * 0 = P Port enumeration
-
- D2
- Mass storage interface number - Interface number to
- be used for the mass storage interface
-
- D3
- Mass storage callbacks
- * 1 = relay to P port
- * 0 = completely handle in firmware
-
- Description
- This indicates how enumeration should be handled.
- Enumeration can be handled by the West Bridge device
- or by the P port processor.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_INVALID_MASK
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_INVALID_STORAGE_MEDIA
- */
-#define CY_RQT_SET_USB_CONFIG (4)
-
-
-/* Summary
- This request retrieves the current USB configuration from
- the West Bridge device.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Request Code = 5
-
- Responses
- * CY_RESP_USB_CONFIG
- */
-#define CY_RQT_GET_USB_CONFIG (5)
-
-
-/* Summary
- This request stalls the given endpoint.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Request Code = 6
-
- D0
- Endpoint Number
-
- D1
- * 1 = Stall Endpoint
- * 0 = Clear Stall
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_INVALID_ENDPOINT
- */
-#define CY_RQT_STALL_ENDPOINT (6)
-
-
-/* Summary
- This request retrieves the stall status of the
- requested endpoint.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Request Code = 7
-
- D0
- Endpoint number
-
- Responses
- * CY_RESP_ENDPOINT_STALL
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_INVALID_ENDPOINT
- */
-#define CY_RQT_GET_STALL (7)
-
-
-/* Summary
- This command sets the contents of a descriptor.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Request Code = 8
-
- D0
- Bit 15 - Bit 8
- Descriptor Index
-
- Bit 7 - Bit 0
- Descriptor Type
- * Device = 1
- * Device Qualifier = 2
- * Full Speed Configuration = 3
- * High Speed Configuration = 4
-
- * D1 - DN *
- Actual data for the descriptor
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_BAD_TYPE
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_BAD_INDEX
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_BAD_LENGTH
- */
-#define CY_RQT_SET_DESCRIPTOR (8)
-
-/* Summary
- This command gets the contents of a descriptor.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Request Code = 9
-
- D0
- Bit 15 - Bit 8
- Descriptor Index
-
- Bit 7 - Bit 0
- Descriptor Type
- * Device = 1
- * Device Qualifier = 2
- * Full Speed Configuration = 3
- * High Speed Configuration = 4
-
- Responses
- * CY_RESP_USB_DESCRIPTOR
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_BAD_TYPE
- * CY_RESP_SUCCESS_FAILURE:CY_ERR_BAD_INDEX
- */
-#define CY_RQT_GET_DESCRIPTOR (9)
-
-/* Summary
- This request is sent from the P port processor to the
- West Bridge device to physically configure the endpoints
- in the device.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 3
-
- MailBox0
- * Context = 3
- * Request Code = 10
-
- D0
- Bit 15 - Bit 8
- EP1OUTCFG register value
- Bit 7 - Bit 0
- EP1INCFG register value
-
- * D1 - D2 *
- PEPxCFS register values where x = 3, 5, 7, 9
-
- * D3 - D7 *
- LEPxCFG register values where x = 3, 5, 7, 9, 10,
- 11, 12, 13, 14, 15
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_SET_USB_CONFIG_REGISTERS (10)
-
-/* Summary
- This request is sent to the P port processor when a
- USB event occurs and needs to be relayed to the
- P port.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 3
- * Request Code = 11
-
- D0
- Event Type
- * 0 = Reserved
- * 1 = Reserved
- * 2 = USB Suspend
- * 3 = USB Resume
- * 4 = USB Reset
- * 5 = USB Set Configuration
- * 6 = USB Speed change
-
- D1
- If EventTYpe is USB Speed change
- * 0 = Full Speed
- * 1 = High Speed
-
- If EventType is USB Set Configuration
- * The number of the configuration to use
- * (may be zero to unconfigure)
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_USB_EVENT (11)
-
-/* Summary
- This request is sent in both directions to transfer
- endpoint data for endpoints 0 and 1.
-
- Direction
- West Bridge -> P Port Processor
- P Port Processor -> West Bridge
-
- Length (in transfers)
- Variable
-
- Mailbox0
- * Context = 3
- * Request Code = 12
-
- D0
- Bit 15 - 14 Data Type
- * 0 = Setup (payload should be the 8 byte setup packet)
- * 1 = Data
- * 2 = Status (payload should be empty)
-
- Bit 13 Endpoint Number (only 0 and 1 supported)
- Bit 12 First Packet (only supported for Host ->
- West Bridge traffic)
- Bit 11 Last Packet (only supported for Host ->
- West Bridge traffic)
-
- Bit 9 - 0 Data Length (real max data length is 64 bytes
- for EP0 and EP1)
-
- *D1-Dn*
- Endpoint data
-*/
-#define CY_RQT_USB_EP_DATA (12)
-
-
-/* Summary
- This request sets the NAK bit on an endpoint.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Request Code = 13
-
- D0
- Endpoint Number
-
- D1
- * 1 = NAK Endpoint
- * 0 = Clear NAK
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_INVALID_ENDPOINT
- */
-#define CY_RQT_ENDPOINT_SET_NAK (13)
-
-
-/* Summary
- This request retrieves the NAK config status of the
- requested endpoint.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Request Code = 14
-
- D0
- Endpoint number
-
- Responses
- * CY_RESP_ENDPOINT_NAK
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_INVALID_ENDPOINT
- */
-#define CY_RQT_GET_ENDPOINT_NAK (14)
-
-/* Summary
- This request acknowledges a setup packet that does not
- require any data transfer.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox
- * Context = 3
- * Request Code = 15
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_ACK_SETUP_PACKET (15)
-
-/* Summary
- This request is sent when the USB storage driver within
- West Bridge receives an Inquiry request.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- x - variable
-
- Mailbox0
- * Context = 3
- * Request Code = 16
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Media type being addressed
-
- D1
- Bits 8 : EVPD bit from request
- Bits 0 - 7 : Codepage from the inquiry request
-
- D2
- Length of the inquiry response in bytes
-
- * D3 - Dn *
- The inquiry response
-
- Description
- When the West Bridge firmware receives an SCSI Inquiry
- request from the USB host, the response to this mass
- storage command is created by West Bridge and forwarded to
- the P port processor. The P port processor may change
- this response before it is returned to the USB host. This
- request is the method by which this may happen.
-*/
-#define CY_RQT_SCSI_INQUIRY_COMMAND (16)
-
-/* Summary
- This request is sent when the USB storage driver within
- West Bridge receives a Start/Stop request.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 3
- * Request Code = 17
-
- D0
- Bits 12 - 15 : Bus index
- Bits 8 - 11 : Zero based device index
- Bits 0 - 7 : Media type being addressed
-
- D1
- Bit 1
- * LoEj Bit (See SCSI-3 specification)
-
- Bit 0
- * Start Bit (See SCSI-3 specification)
-
- Description
- When the West Bridge firmware received a SCSI Start/Stop
- request from the USB host, this request is relayed to the
- P port processor. This request is used to relay the command.
- The USB firmware will not response to the USB command until
- the response to this request is recevied by the firmware.
-*/
-#define CY_RQT_SCSI_START_STOP_COMMAND (17)
-
-/* Summary
- This request is sent when the USB storage driver
- receives an unknown CBW on mass storage.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 4
-
- Mailbox0
- * Context = 3
- * Request Code = 18
-
- D0
- Bits 12 - 15 : MediaType
- * 0 = NAND
- * 1 = SDIO Flash
- * 2 = MMC Flash
- * 3 = CE-ATA
-
- D1
- The length of the request in bytes
-
- D2 - Dn
- CBW command block from the SCSI host controller.
-
- Description
- When the firmware recevies a SCSI request that is not
- understood, this request is relayed to the
- P port processor.
-*/
-#define CY_RQT_SCSI_UNKNOWN_COMMAND (18)
-
-/* Summary
- Request the West Bridge to signal remote wakeup
- to the USB host.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 3
- * Request code = 19
-
- Description
- Request from the processor to West Bridge, to signal
- remote wakeup to the USB host.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_USB_REMOTE_WAKEUP (19)
-
-/* Summary
- Request the West Bridge to clear all descriptors tha
- were set previously
- using the Set Descriptor calls.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 3
- * Request code = 20
-
- Description
- Request from the processor to West Bridge, to clear
- all descriptor information that was previously stored
- on the West Bridge using CyAnUsbSetDescriptor calls.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_CLEAR_DESCRIPTORS (20)
-
-/* Summary
- Request the West Bridge to monitor USB to storage activity
- and send periodic updates.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 2
-
- Mailbox0
- * Context = 3
- * Request code = 21
-
- D0
- Upper 16 bits of write threshold
-
- D1
- Lower 16 bits of write threshold
-
- D2
- Upper 16 bits of read threshold
-
- D3
- Lower 16 bits of read threshold
-
- Description
- Request from the processor to West Bridge, to start
- monitoring the level of read/write activity on the
- USB mass storage drive and to set the threshold
- level at which progress reports are sent.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_USB_STORAGE_MONITOR (21)
-
-/* Summary
- Event from the West Bridge showing that U2S activity
- since the last event has crossed the threshold.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 2
-
- Mailbox0
- * Context = 3
- * Request code = 22
-
- D0
- Upper 16 bits of sectors written since last event.
-
- D1
- Lower 16 bits of sectors written since last event.
-
- D2
- Upper 16 bits of sectors read since last event.
-
- D3
- Lower 16 bits of sectors read since last event.
-
- Description
- Event notification from the West Bridge indicating
- that the number of read/writes on the USB mass
- storage device have crossed a pre-defined threshold
- level.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_USB_ACTIVITY_UPDATE (22)
-
-/* Summary
- Request to select the partitions to be enumerated on a
- storage device with partitions.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 3
- * Request code = 23
-
- D0
- Bits 8-15 : Bus index
- Bits 0- 7 : Device index
-
- D1
- Bits 8-15 : Control whether to enumerate partition 1.
- Bits 0- 7 : Control whether to enumerate partition 0.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE
- */
-#define CY_RQT_MS_PARTITION_SELECT (23)
-
-/************/
-
-/*@@USB responses
- Summary
- The USB responses include:
- * CY_RESP_USB_CONFIG
- * CY_RESP_ENDPOINT_CONFIG
- * CY_RESP_ENDPOINT_STALL
- * CY_RESP_CONNECT_STATE
- * CY_RESP_USB_DESCRIPTOR
- * CY_RESP_USB_INVALID_EVENT
- * CY_RESP_ENDPOINT_NAK
- * CY_RESP_INQUIRY_DATA
- * CY_RESP_UNKNOWN_SCSI_COMMAND
- */
-
-/* Summary
- This response contains the enumeration configuration
- information for the USB module.
-
- Direction
- 8051->P
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Response Code = 32
-
- D0
- Bits 8 - 15: Media to enumerate (bit mask)
- Bits 0 - 7: Buses to enumerate (bit mask)
- * 1 = Bus 0
- * 2 = Bus 1
-
- D1
- Enumeration Methodology
- * 0 = West Bridge enumeration
- * 1 = P Port enumeration
-
- D2
- Bits 7 - 0 : Interface Count - the number of interfaces
- Bits 15 - 8 : Mass storage callbacks
-
- */
-#define CY_RESP_USB_CONFIG (32)
-
-
-/* Summary
- This response contains the configuration information
- for the specified endpoint.
-
- Direction
- 8051->P
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Response Code = 33
-
- D0
- Bits 15 - 12 : Endpoint Number (0 - 15)
-
- Bits 11 - 10 : Endpoint Type
- * 0 = Control
- * 1 = Bulk
- * 2 = Interrupt
- * 3 = Isochronous
-
- Bits 9 : Endpoint Size
- * 0 = 512
- * 1 = 1024
-
- Bits 8 - 7 : Buffering
- * 0 = Double
- * 1 = Triple
- * 2 = Quad
-
- Bits 6 : Bit Direction
- * 0 = Input
- * 1 = Output
- */
-#define CY_RESP_ENDPOINT_CONFIG (33)
-
-
-/* Summary
- This response contains the stall status for
- the specified endpoint.
-
- Direction
- 8051->P
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Response Code = 34
-
- D0
- Stall status
- * 0 = Active
- * 1 = Stalled
- */
-#define CY_RESP_ENDPOINT_STALL (34)
-
-
-/* Summary
- This response contains the connected/disconnected
- state of the West Bridge USB pins.
-
- Direction
- 8051->P
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 3
- * Response Code = 35
-
- D0
- Connect state
- * 0 = Disconnected
- * 1 = Connected
- */
-#define CY_RESP_CONNECT_STATE (35)
-
-/* Summary
- This response contains the information
- about the USB configuration
-
- Direction
- West Bridge -> P Port Processor
-
- Length
- x bytes
-
- Mailbox0
- * Context = 3
- * Response Code = 36
-
- D0
- Length in bytes of the descriptor
-
- * D1 - DN *
- Descriptor contents
-*/
-#define CY_RESP_USB_DESCRIPTOR (36)
-
-/* Summary
- This response is sent in response to a bad USB event code
-
- Direction
- P Port Processor -> West Bridge
-
- Length
- 1 word (2 bytes)
-
- Mailbox0
- * Context = 3
- * Response Code = 37
-
- D0
- The invalid event code in the request
-*/
-#define CY_RESP_USB_INVALID_EVENT (37)
-
-/* Summary
- This response contains the current NAK status of
- a USB endpoint.
-
- Direction
- West Bridge -> P port processor
-
- Length
- 1 transfer
-
- Mailbox0
- * Context = 3
- * Response Code = 38
-
- D0
- The NAK status of the endpoint
- 1 : NAK bit set
- 0 : NAK bit clear
-*/
-#define CY_RESP_ENDPOINT_NAK (38)
-
-/* Summary
- This response gives the contents of the inquiry
- data back to West Bridge to returns to the USB host.
-
- Direction
- West Bridge -> P Port Processor
-
- Length
- Variable
-
- MailBox0
- * Context = 3
- * Response Code = 39
-
- D0
- Length of the inquiry response
-
- *D1 - Dn*
- Inquiry data
-*/
-#define CY_RESP_INQUIRY_DATA (39)
-
-/* Summary
- This response gives the status of an unknown SCSI command.
- This also gives three bytes of sense information.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 3
- * Response Code = 40
-
- D0
- The length of the reply in bytes
-
- D1
- * Status of the command
- * Sense Key
-
- D2
- * Additional Sense Code (ASC)
- * Additional Sense Code Qualifier (ASCQ)
-*/
-#define CY_RESP_UNKNOWN_SCSI_COMMAND (40)
-/*******************************************************/
-
-/*@@Turbo requests
- Summary
- The Turbo requests include:
- * CY_RQT_START_MTP
- * CY_RQT_STOP_MTP
- * CY_RQT_INIT_SEND_OBJECT
- * CY_RQT_CANCEL_SEND_OBJECT
- * CY_RQT_INIT_GET_OBJECT
- * CY_RQT_CANCEL_GET_OBJECT
- * CY_RQT_SEND_BLOCK_TABLE
- * CY_RQT_MTP_EVENT
- * CY_RQT_TURBO_CMD_FROM_HOST
- * CY_RQT_TURBO_SEND_RESP_DATA_TO_HOST
- * CY_RQT_TURBO_SWITCH_ENDPOINT
- * CY_RQT_TURBO_START_WRITE_DMA
- * CY_RQT_ENABLE_USB_PATH
- * CY_RQT_CANCEL_ASYNC_TRANSFER
- */
-#ifndef __doxygen__
-#define CY_RQT_TUR_RQT_CONTEXT (4)
-#endif
-
-/* Summary
- This command requests initialization of the MTP stack.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 4
- * Request Code = 0
-
- Description
- This command is required before any other MTP related
- command can be sent to the West Bridge firmware.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_ALREADY_RUNNING
- */
-#define CY_RQT_START_MTP (0)
-
-/* Summary
- This command requests shutdown of the MTP stack.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 4
- * Request Code = 1
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_NOT_RUNNING
- */
-#define CY_RQT_STOP_MTP (1)
-
-/* Summary
- This command sets up an MTP SendObject operation.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 4
- * Request Code = 2
-
- D0
- Total bytes for send object Low 16 bits
-
- D1
- Total bytes for send object High 16 bits
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_NOT_RUNNING
- */
-#define CY_RQT_INIT_SEND_OBJECT (2)
-
-/* Summary
- This command cancels West Bridges handling of
- an ongoing MTP SendObject operation. This
- does NOT send an MTP response.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 4
- * Request Code = 3
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_NOT_RUNNING
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_NO_OPERATION_PENDING
- */
-#define CY_RQT_CANCEL_SEND_OBJECT (3)
-
-/* Summary
- This command sets up an MTP GetObject operation.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 2
-
- MailBox0
- * Context = 4
- * Request Code = 4
-
- D0
- Total bytes for get object Low 16 bits
-
- D1
- Total bytes for get object High 16 bits
-
- D2
- Transaction Id for get object Low 16 bits
-
- D3
- Transaction Id for get object High 16 bits
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_NOT_RUNNING
- */
-#define CY_RQT_INIT_GET_OBJECT (4)
-
-/* Summary
- This command notifies West Bridge of a new
- BlockTable transfer.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 4
- * Request Code = 5
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_NOT_RUNNING
- */
-#define CY_RQT_SEND_BLOCK_TABLE (5)
-
-/* Summary
- This request is sent to the P port processor when a MTP event occurs
- and needs to be relayed to the P port.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 2
-
- Mailbox0
- * Context = 4
- * Request Code = 6
-
- D0
- Bits 15 - 8 : Return Status for GetObject/SendObject
- Bits 7 - 0 : Event Type
- * 0 = MTP SendObject Complete
- * 1 = MTP GetObject Complete
- * 2 = BlockTable Needed
-
- D1
- Lower 16 bits of the length of the data that got transferred
- in the Turbo Endpoint.(Applicable to "MTP SendObject Complete"
- and "MTP GetObject Complete" events)
-
- D2
- Upper 16 bits of the length of the data that got transferred
- in the Turbo Endpoint. (Applicable to "MTP SendObject Complete"
- and "MTP GetObject Complete" events)
-
- D3
- Lower 16 bits of the Transaction Id of the MTP_SEND_OBJECT
- command. (Applicable to "MTP SendObject Complete" event)
-
- D4
- Upper 16 bits of the Transaction Id of the MTP_SEND_OBJECT
- command. (Applicable to "MTP SendObject Complete" event)
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_MTP_EVENT (6)
-
-/* Summary
- This request is sent to the P port processor when a command
- is received from Host in a Turbo Endpoint. Upon receiving
- this event, P port should read the data from the endpoint as
- soon as possible.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 4
- * Request Code = 7
-
- D0
- This contains the EP number. (This will be always two now).
-
- D1
- Length of the data available in the Turbo Endpoint.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_TURBO_CMD_FROM_HOST (7)
-
-/* Summary
- This request is sent to the West Bridge when the P port
- needs to send data to the Host in a Turbo Endpoint.
- Upon receiving this event, Firmware will make the end point
- available for the P port. If the length is zero, then
- firmware will send a zero length packet.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 2
-
- Mailbox0
- * Context = 4
- * Request Code = 8
-
- D0
- This contains the EP number. (This will be always six now).
-
- D1
- Lower 16 bits of the length of the data that needs to be
- sent in the Turbo Endpoint.
-
- D2
- Upper 16 bits of the length of the data that needs to be
- sent in the Turbo Endpoint.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
-*/
-#define CY_RQT_TURBO_SEND_RESP_DATA_TO_HOST (8)
-
-/* Summary
- This command cancels West Bridges handling of
- an ongoing MTP GetObject operation. This
- does NOT send an MTP response.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 4
- * Request Code = 9
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_NOT_RUNNING
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_NO_OPERATION_PENDING
- */
-#define CY_RQT_CANCEL_GET_OBJECT (9)
-
-/* Summary
- This command switches a Turbo endpoint
- from the U port to the P port. If no data
- is in the endpoint the endpoint is
- primed to switch as soon as data is placed
- in the endpoint. The endpoint will continue
- to switch until all data has been transferd.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 2
-
- MailBox0
- * Context = 4
- * Request Code = 10
-
- D0
- Whether the read is a packet read.
-
- D1
- Lower 16 bits of the length of the data to switch
- the Turbo Endpoint for.
-
- D2
- Upper 16 bits of the length of the data to switch
- the Turbo Endpoint for.
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- * CY_RESP_SUCCESS_FAILURE:CY_RESP_NOT_RUNNING
- */
-#define CY_RQT_TURBO_SWITCH_ENDPOINT (10)
-
-/* Summary
- This command requests the API to start the DMA
- transfer of a packet of MTP data to the Antioch.
-
- Direction
- West Bridge -> P Port Processor
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 4
- * Request Code = 11
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- */
-#define CY_RQT_TURBO_START_WRITE_DMA (11)
-
-/* Summary
- This command requests the firmware to switch the
- internal data paths to enable USB access to the
- Mass storage / MTP endpoints.
-
- Direction
- P Port Processor -> West Bridge
-
- Length (in transfers)
- 1
-
- MailBox0
- * Context = 4
- * Request code = 12
-
- Responses
- * CY_RESP_SUCCESS_FAILURE:CY_AS_ERROR_SUCCESS
- */
-#define CY_RQT_ENABLE_USB_PATH (12)
-
-/* Summary
- Request to cancel an asynchronous MTP write from
- the processor side.
-
- Direction
- P Port processor -> West Bridge
-
- Length (in transfers)
- 1
-
- Mailbox0
- * Context = 4
- * Request code = 13
-
- D0
- * EP number
-
- Description
- This is a request to the firmware to update internal
- state so that a pending write on the MTP endpoint
- can be cancelled.
- */
-#define CY_RQT_CANCEL_ASYNC_TRANSFER (13)
-
-/******************************************************/
-
-/*@@Turbo responses
- Summary
- The Turbo responses include:
- * CY_RESP_MTP_INVALID_EVENT
- */
-
-/* Summary
- This response is sent in response to a bad MTP event code
-
- Direction
- P Port Processor -> West Bridge
-
- Length
- 1 word (2 bytes)
-
- Mailbox0
- * Context = 4
- * Response Code = 16
-
- D0
- The invalid event code in the request
-*/
-#define CY_RESP_MTP_INVALID_EVENT (16)
-
-#ifndef __doxygen__
-#define CY_RQT_CONTEXT_COUNT (5)
-#endif
-
-#endif /* _INCLUDED_CYASPROTOCOL_H_ */
-
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasregs.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasregs.h
deleted file mode 100644
index f049d7e32a4..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasregs.h
+++ /dev/null
@@ -1,201 +0,0 @@
-/* Cypress West Bridge API header file (cyasregs.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASREG_H_
-#define _INCLUDED_CYASREG_H_
-
-#if !defined(__doxygen__)
-
-#define CY_AS_MEM_CM_WB_CFG_ID (0x80)
-#define CY_AS_MEM_CM_WB_CFG_ID_VER_MASK (0x000F)
-#define CY_AS_MEM_CM_WB_CFG_ID_HDID_MASK (0xFFF0)
-#define CY_AS_MEM_CM_WB_CFG_ID_HDID_ANTIOCH_VALUE (0xA100)
-#define CY_AS_MEM_CM_WB_CFG_ID_HDID_ASTORIA_FPGA_VALUE (0x6800)
-#define CY_AS_MEM_CM_WB_CFG_ID_HDID_ASTORIA_VALUE (0xA200)
-
-
-#define CY_AS_MEM_RST_CTRL_REG (0x81)
-#define CY_AS_MEM_RST_CTRL_REG_HARD (0x0003)
-#define CY_AS_MEM_RST_CTRL_REG_SOFT (0x0001)
-#define CY_AS_MEM_RST_RSTCMPT (0x0004)
-
-#define CY_AS_MEM_P0_ENDIAN (0x82)
-#define CY_AS_LITTLE_ENDIAN (0x0000)
-#define CY_AS_BIG_ENDIAN (0x0101)
-
-#define CY_AS_MEM_P0_VM_SET (0x83)
-#define CY_AS_MEM_P0_VM_SET_VMTYPE_MASK (0x0007)
-#define CY_AS_MEM_P0_VM_SET_VMTYPE_RAM (0x0005)
-#define CY_AS_MEM_P0_VM_SET_VMTYPE_SRAM (0x0007)
-#define CY_AS_MEM_P0_VM_SET_VMTYPE_VMWIDTH (0x0008)
-#define CY_AS_MEM_P0_VM_SET_VMTYPE_FLOWCTRL (0x0010)
-#define CY_AS_MEM_P0_VM_SET_IFMODE (0x0020)
-#define CY_AS_MEM_P0_VM_SET_CFGMODE (0x0040)
-#define CY_AS_MEM_P0_VM_SET_DACKEOB (0x0080)
-#define CY_AS_MEM_P0_VM_SET_OVERRIDE (0x0100)
-#define CY_AS_MEM_P0_VM_SET_INTOVERD (0x0200)
-#define CY_AS_MEM_P0_VM_SET_DRQOVERD (0x0400)
-#define CY_AS_MEM_P0_VM_SET_DRQPOL (0x0800)
-#define CY_AS_MEM_P0_VM_SET_DACKPOL (0x1000)
-
-
-#define CY_AS_MEM_P0_NV_SET (0x84)
-#define CY_AS_MEM_P0_NV_SET_WPSWEN (0x0001)
-#define CY_AS_MEM_P0_NV_SET_WPPOLAR (0x0002)
-
-#define CY_AS_MEM_PMU_UPDATE (0x85)
-#define CY_AS_MEM_PMU_UPDATE_UVALID (0x0001)
-#define CY_AS_MEM_PMU_UPDATE_USBUPDATE (0x0002)
-#define CY_AS_MEM_PMU_UPDATE_SDIOUPDATE (0x0004)
-
-#define CY_AS_MEM_P0_INTR_REG (0x90)
-#define CY_AS_MEM_P0_INTR_REG_MCUINT (0x0020)
-#define CY_AS_MEM_P0_INTR_REG_DRQINT (0x0800)
-#define CY_AS_MEM_P0_INTR_REG_MBINT (0x1000)
-#define CY_AS_MEM_P0_INTR_REG_PMINT (0x2000)
-#define CY_AS_MEM_P0_INTR_REG_PLLLOCKINT (0x4000)
-
-#define CY_AS_MEM_P0_INT_MASK_REG (0x91)
-#define CY_AS_MEM_P0_INT_MASK_REG_MMCUINT (0x0020)
-#define CY_AS_MEM_P0_INT_MASK_REG_MDRQINT (0x0800)
-#define CY_AS_MEM_P0_INT_MASK_REG_MMBINT (0x1000)
-#define CY_AS_MEM_P0_INT_MASK_REG_MPMINT (0x2000)
-#define CY_AS_MEM_P0_INT_MASK_REG_MPLLLOCKINT (0x4000)
-
-#define CY_AS_MEM_MCU_MB_STAT (0x92)
-#define CY_AS_MEM_P0_MCU_MBNOTRD (0x0001)
-
-#define CY_AS_MEM_P0_MCU_STAT (0x94)
-#define CY_AS_MEM_P0_MCU_STAT_CARDINS (0x0001)
-#define CY_AS_MEM_P0_MCU_STAT_CARDREM (0x0002)
-
-#define CY_AS_MEM_PWR_MAGT_STAT (0x95)
-#define CY_AS_MEM_PWR_MAGT_STAT_WAKEUP (0x0001)
-
-#define CY_AS_MEM_P0_RSE_ALLOCATE (0x98)
-#define CY_AS_MEM_P0_RSE_ALLOCATE_SDIOAVI (0x0001)
-#define CY_AS_MEM_P0_RSE_ALLOCATE_SDIOALLO (0x0002)
-#define CY_AS_MEM_P0_RSE_ALLOCATE_NANDAVI (0x0004)
-#define CY_AS_MEM_P0_RSE_ALLOCATE_NANDALLO (0x0008)
-#define CY_AS_MEM_P0_RSE_ALLOCATE_USBAVI (0x0010)
-#define CY_AS_MEM_P0_RSE_ALLOCATE_USBALLO (0x0020)
-
-#define CY_AS_MEM_P0_RSE_MASK (0x9A)
-#define CY_AS_MEM_P0_RSE_MASK_MSDIOBUS_RW (0x0003)
-#define CY_AS_MEM_P0_RSE_MASK_MNANDBUS_RW (0x00C0)
-#define CY_AS_MEM_P0_RSE_MASK_MUSBBUS_RW (0x0030)
-
-#define CY_AS_MEM_P0_DRQ (0xA0)
-#define CY_AS_MEM_P0_DRQ_EP2DRQ (0x0004)
-#define CY_AS_MEM_P0_DRQ_EP3DRQ (0x0008)
-#define CY_AS_MEM_P0_DRQ_EP4DRQ (0x0010)
-#define CY_AS_MEM_P0_DRQ_EP5DRQ (0x0020)
-#define CY_AS_MEM_P0_DRQ_EP6DRQ (0x0040)
-#define CY_AS_MEM_P0_DRQ_EP7DRQ (0x0080)
-#define CY_AS_MEM_P0_DRQ_EP8DRQ (0x0100)
-#define CY_AS_MEM_P0_DRQ_EP9DRQ (0x0200)
-#define CY_AS_MEM_P0_DRQ_EP10DRQ (0x0400)
-#define CY_AS_MEM_P0_DRQ_EP11DRQ (0x0800)
-#define CY_AS_MEM_P0_DRQ_EP12DRQ (0x1000)
-#define CY_AS_MEM_P0_DRQ_EP13DRQ (0x2000)
-#define CY_AS_MEM_P0_DRQ_EP14DRQ (0x4000)
-#define CY_AS_MEM_P0_DRQ_EP15DRQ (0x8000)
-
-#define CY_AS_MEM_P0_DRQ_MASK (0xA1)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP2DRQ (0x0004)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP3DRQ (0x0008)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP4DRQ (0x0010)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP5DRQ (0x0020)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP6DRQ (0x0040)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP7DRQ (0x0080)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP8DRQ (0x0100)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP9DRQ (0x0200)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP10DRQ (0x0400)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP11DRQ (0x0800)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP12DRQ (0x1000)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP13DRQ (0x2000)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP14DRQ (0x4000)
-#define CY_AS_MEM_P0_DRQ_MASK_MEP15DRQ (0x8000)
-
-#define CY_AS_MEM_P0_EP2_DMA_REG (0xA2)
-#define CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK (0x7FF)
-#define CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL (1 << 12)
-#define CY_AS_MEM_P0_EP3_DMA_REG (0xA3)
-#define CY_AS_MEM_P0_EP4_DMA_REG (0xA4)
-#define CY_AS_MEM_P0_EP5_DMA_REG (0xA5)
-#define CY_AS_MEM_P0_EP6_DMA_REG (0xA6)
-#define CY_AS_MEM_P0_EP7_DMA_REG (0xA7)
-#define CY_AS_MEM_P0_EP8_DMA_REG (0xA8)
-#define CY_AS_MEM_P0_EP9_DMA_REG (0xA9)
-#define CY_AS_MEM_P0_EP10_DMA_REG (0xAA)
-#define CY_AS_MEM_P0_EP11_DMA_REG (0xAB)
-#define CY_AS_MEM_P0_EP12_DMA_REG (0xAC)
-#define CY_AS_MEM_P0_EP13_DMA_REG (0xAD)
-#define CY_AS_MEM_P0_EP14_DMA_REG (0xAE)
-#define CY_AS_MEM_P0_EP15_DMA_REG (0xAF)
-
-#define CY_AS_MEM_IROS_SLB_DATARET (0xC0)
-
-#define CY_AS_MEM_IROS_IO_CFG (0xC1)
-#define CY_AS_MEM_IROS_IO_CFG_GPIODRVST_MASK (0x0003)
-#define CY_AS_MEM_IROS_IO_CFG_GPIOSLEW_MASK (0x0004)
-#define CY_AS_MEM_IROS_IO_CFG_PPIODRVST_MASK (0x0018)
-#define CY_AS_MEM_IROS_IO_CFG_PPIOSLEW_MASK (0x0020)
-#define CY_AS_MEM_IROS_IO_CFG_SSIODRVST_MASK (0x0300)
-#define CY_AS_MEM_IROS_IO_CFG_SSIOSLEW_MASK (0x0400)
-#define CY_AS_MEM_IROS_IO_CFG_SNIODRVST_MASK (0x1800)
-#define CY_AS_MEM_IROS_IO_CFG_SNIOSLEW_MASK (0x2000)
-
-#define CY_AS_MEM_IROS_PLL_CFG (0xC2)
-
-#define CY_AS_MEM_IROS_PXB_DATARET (0xC3)
-
-#define CY_AS_MEM_PLL_LOCK_LOSS_STAT (0xC4)
-#define CY_AS_MEM_PLL_LOCK_LOSS_STAT_PLLSTAT (0x0800)
-
-#define CY_AS_MEM_IROS_SLEEP_CFG (0xC5)
-
-#define CY_AS_MEM_PNAND_CFG (0xDA)
-#define CY_AS_MEM_PNAND_CFG_IOWIDTH_MASK (0x0001)
-#define CY_AS_MEM_PNAND_CFG_IOWIDTH_8BIT (0x0000)
-#define CY_AS_MEM_PNAND_CFG_IOWIDTH_16BIT (0x0001)
-#define CY_AS_MEM_PNAND_CFG_BLKTYPE_MASK (0x0002)
-#define CY_AS_MEM_PNAND_CFG_BLKTYPE_SMALL (0x0002)
-#define CY_AS_MEM_PNAND_CFG_BLKTYPE_LARGE (0x0000)
-#define CY_AS_MEM_PNAND_CFG_EPABYTE_POS (4)
-#define CY_AS_MEM_PNAND_CFG_EPABYTE_MASK (0x0030)
-#define CY_AS_MEM_PNAND_CFG_EPABIT_POS (6)
-#define CY_AS_MEM_PNAND_CFG_EPABIT_MASK (0x00C0)
-#define CY_AS_MEM_PNAND_CFG_LNAEN_MASK (0x0100)
-
-#define CY_AS_MEM_P0_MAILBOX0 (0xF0)
-#define CY_AS_MEM_P0_MAILBOX1 (0xF1)
-#define CY_AS_MEM_P0_MAILBOX2 (0xF2)
-#define CY_AS_MEM_P0_MAILBOX3 (0xF3)
-
-#define CY_AS_MEM_MCU_MAILBOX0 (0xF8)
-#define CY_AS_MEM_MCU_MAILBOX1 (0xF9)
-#define CY_AS_MEM_MCU_MAILBOX2 (0xFA)
-#define CY_AS_MEM_MCU_MAILBOX3 (0xFB)
-
-#endif /* !defined(__doxygen__) */
-
-#endif /* _INCLUDED_CYASREG_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasstorage.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasstorage.h
deleted file mode 100644
index 52b93c3e481..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasstorage.h
+++ /dev/null
@@ -1,2759 +0,0 @@
-/* Cypress West Bridge API header file (cyasstorage.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASSTORAGE_H_
-#define _INCLUDED_CYASSTORAGE_H_
-
-#include "cyasmedia.h"
-#include "cyasmisc.h"
-#include "cyas_cplus_start.h"
-
-
-/*@@Storage APIs
- Summary
- This section documents the storage APIs supported by the
- West Bridge API.
-
- Description
- The storage API is based on some specific concepts which
- are referenced here.
- * <LINK Storage API Overview>
- * Addressing
- * Ownership
- * <LINK Asynchronous Versus Synchronous Operation>
-*/
-
-/*@@Storage API Overview
- Summary
- Storage devices are identified by media type. Each media
- type is considered a single logical device.
-
- Description
- Each media type has a consistent block size and consists
- of a set of logical blocks numbered from 0 to N - 1 where
- N is the size of the
- media type in blocks. The mass storage APIs defined below
- provide the
- capability to query for devices that are present, and
- read/write data to/from
- these devices.
-*/
-
-/*@@Addressing
- Summary
- Blocks within a storage device are address by a hierarchal
- block address. This
- address consists of the bus number, physical device,
- logical unit, and finally
- block address.
-
- Description
- While currently only a single device of each media type
- is supported, the address
- space reserves space in the future for multiple devices
- of each type. Therefore
- the second element of the address is the specific device
- being addressed within
- a given device type. For this release of the software,
- this value will always be
- zero to address the first device.
-
- The third element of the address is the logical unit.
- A device being managed
- by West Bridge can be partitioned into multiple logical
- units. This partition
- information is stored on each device itself. Currently,
- one of the storage devices
- managed by West Bridge can be partitioned into two
- logical units.
-
- Finally a logical block address is given within the
- logical unit to address an
- individual block.
-*/
-
-/*@@Ownership
- Summary
- While West Bridge supports concurrent block level
- operations from both the USB port and
- the processor port, this is not desirable in most
- situations as the file system
- contained on the storage media cannot be accessed
- concurrently. To insure access
- by only one of USB and the processor, the West Bridge
- API provides for ownership of storage
- devices based on media type.
-
- Description
- The processor requests ownership of a given media type
- by calling CyAsStorageClaim().
- The firmware in West Bridge releases control of the
- media and signals the processor through
- the event callback registered with
- CyAsStorageRegisterCallback(). The specific event is
- the CyAsStorageProcessor. The processor can later
- release the media via a call to
- CyAsStorageRelease(). This call is immediate and
- no callback is required.
-
- If the processor has claimed storage and the USB port
- is connected, West Bridge will need to
- claim the storage to manage the mass storage device.
- West Bridge requests the storage through
- the event callback registered with
- CyAsStorageRegisterCallback(). The specific event is
- CyAsStorageAntioch and is named as such to reflect
- the USB view of storage. This callback
- is a request for the processor to release storage.
- The storage is not actually released
- until the processor calls CyAsStorageRelease().
-
- Note that the CyAsStorageAntioch is only sent when the
- USB storage device is enumerated and
- NOT at every USB operation. The ownership of a given
- storage media type is assumed to belong
- to the processor until the USB connection is established.
- At that point, the storage ownership
- is transferred to West Bridge. After the USB connection
- is broken, ownership can be transferred
- back to the processor.
-*/
-
-/*@@Asynchronous Versus Synchronous Operation
- Summary
- When read or write operations are performed to the
- storage devices, these operations may be
- synchronous or asynchronous. A synchronous operation
- is an operation where the read or write
- operation is requested and the function does not return
- until the operation is complete. This
- type of function is the easiest to use but does not
- provide for optimal usage of the P port processor time.
-
- Description
- An asynchronous operation is one where the function returns
- as soon as the request is started.
- The specific read and write request will complete at some
- time in the future and the P port
- processor will be notified via a callback function. While
- asynchronous functions provide for
- much better usage of the CPU, these function have more
- stringent requirements for use. First,
- any buffer use for data transfer must be valid from the
- function call to request the operation
- through when the callback function is called. This basically
- implies that stack based buffers
- are not acceptable for asynchronous calls. Second, error
- handling must be deferred until the
- callback function is called indicating any kind of error
- that may have occurred.
-*/
-
-/*@@Partitioning
- Summary
- West Bridge API and firmware support the creation of up to
- two logical partitions on one
- of the storage devices that are managed by West Bridge. The
- partitions are managed through
- the CyAsStorageCreatePPartition and CyAsStorageRemovePPartition
- APIs.
-
- Description
- The CyAsStorageCreatePPartition API is used to divide the total
- storage on a storage
- device into two logical units or partitions. Since the partition
- information is stored
- on the storage device in a custom format, partitions should
- only be created on fixed
- storage devices (i.e., no removable SD/MMC cards). Any data
- stored on the device
- before the creation of the partition, is liable to be lost when
- a partition is created.
-
- The CyAsStorageRemovePPartition API is used to remove the
- stored partition information,
- so that all of the device's capacity is treated as a single
- partition again.
-
- When a storage device with two partitions (units) is being
- enumerated as a mass storage
- device through the West Bridge, it is possible to select the
- partitions to be made
- visible to the USB host. This is done through the
- CyAsUsbSelectMSPartitions API.
-*/
-
-/*********************************
- * West Bridge Constants
- **********************************/
-
-/* Summary
- This constants indicates a raw device access to the read/write
- functions
-
- Description
- When performing reading and writing operations on the
- storage devices attached
- to West Bridge, there are cases where writes need to
- happen to raw devices, versus
- the units contained within a device. This is
- specifically required to manage
- the partitions within physical devices. This constant
- is used in calls to
- CyAsStorageRead(), CyAsStorageReadAsync(),
- CyAsStorageWrite() and
- CyAsStorageWriteAsync(), to indicate that the raw
- physical device is being
- accessed and not any specific unit on the device.
-
- See Also
- * CyAsStorageRead
- * CyAsStorageReadAsync
- * CyAsStorageWrite
- * CyAsStorageWriteAsync
-*/
-#define CY_AS_LUN_PHYSICAL_DEVICE (0xffffffff)
-
-/* Summary
- This constant represents the maximum DMA burst length
- supported on a storage endpoint
-
- Description
- West Bridge reserves separate endpoints for accessing
- storage media through the
- CyAsStorageRead() and CyAsStorageWrite() calls. The
- maximum size of these
- endpoints is always 512 bytes, regardless of status
- and speed of the USB
- connection.
-*/
-#define CY_AS_STORAGE_EP_SIZE (512)
-
-/********************************
- * West Bridge Types
- *******************************/
-
-/* Summary
- This type indicates the type of event in an event
- callback from West Bridge
-
- Description
- At times West Bridge needs to inform the P port
- processor of events that have
- occurred. These events are asynchronous to the
- thread of control on the P
- port processor and as such are generally delivered
- via a callback function that
- is called as part of an interrupt handler. This
- type indicates the resonse for
- the call to the callback function.
-
- See Also
- * CyAsStorageEventCallback
- * CyAsStorageRegisterCallback
-*/
-typedef enum cy_as_storage_event {
- /* This event occurs when the West Bridge device has
- detected a USB connect and has enumerated the
- storage controlled by west bridge to the USB port.
- this event is the signal that the processor
- needs to release the storage media. west bridge will
- not have control of the storage media until the
- processor calls cy_as_release_storage() to release
- the specific media. */
- cy_as_storage_antioch,
-
- /* This event occurs when the processor has requested
- ownership of a given media type and west bridge has
- released the media. this event is an indicator
- that the transfer of ownership is complete and the
- processor now owns the given media type. */
- cy_as_storage_processor,
-
- /* This event occurs when a removable media type has
- been removed. */
- cy_as_storage_removed,
-
- /* This event occurs when a removable media type has
- been inserted. */
- cy_as_storage_inserted,
-
- /* This event occurs when the West Bridge device
- * percieves an interrrupt from an SDIO card */
- cy_as_sdio_interrupt
-
-} cy_as_storage_event;
-
-/* Summary
- This type gives the type of the operation in a storage
- operation callback
-
- Description
- This type is used in the callback function for asynchronous
- operation. This type indicates whether it is a
- CyAsStorageRead() or CyAsStorageWrite() operation that
- has completed.
-
- See Also
- * <LINK Asynchronous Versus Synchronous Operation>
- * CyAsStorageRead
- * CyAsStorageWrite
-*/
-typedef enum cy_as_oper_type {
- /* A data read operation */
- cy_as_op_read,
- /* A data write operation */
- cy_as_op_write
-} cy_as_oper_type;
-
-/* Summary
- This data structure describes a specific type of media
-
- Description
- This data structure is the return value from the
- CyAsStorageQueryDevice function. This structure provides
- information about the specific storage device being queried.
-
- See Also
- * CyAsStorageQueryDevice
-*/
-typedef struct cy_as_device_desc {
- /* Type of device */
- cy_as_media_type type;
- /* Is the device removable */
- cy_bool removable;
- /* Is the device writeable */
- cy_bool writeable;
- /* Basic block size for device */
- uint16_t block_size;
- /* Number of LUNs on the device */
- uint32_t number_units;
- /* Is the device password locked */
- cy_bool locked;
- /* Size in bytes of an Erase Unit. Block erase operation
- is only supported for SD storage, and the erase_unit_size
- is invalid for all other kinds of storage. */
- uint32_t erase_unit_size;
-} cy_as_device_desc;
-
-/* Summary
- This data structure describes a specific unit on a
- specific type of media
-
- Description
- This data structure is the return value from the
- CyAsStorageQueryUnit function. This structure provides
- information about the specific unit.
-
- See Also
- * CyAsStorageQueryUnit
-*/
-typedef struct cy_as_unit_desc {
- /* Type of device */
- cy_as_media_type type;
- /* Basic block size for device */
- uint16_t block_size;
- /* Physical start block for LUN */
- uint32_t start_block;
- /* Number of blocks in the LUN */
- uint32_t unit_size;
-} cy_as_unit_desc;
-
-/* Summary
- This function type defines a callback to be called after an
- asynchronous operation
-
- Description
- This function type defines a callback function that is called
- at the completion of any asynchronous read or write operation.
-
- See Also
- * CyAsStorageReadAsync()
- * CyAsStorageWriteAsync()
-*/
-typedef void (*cy_as_storage_callback)(
- /* Handle to the device completing the storage operation */
- cy_as_device_handle handle,
- /* The bus completing the operation */
- cy_as_bus_number_t bus,
- /* The device completing the operation */
- uint32_t device,
- /* The unit completing the operation */
- uint32_t unit,
- /* The block number of the completed operation */
- uint32_t block_number,
- /* The type of operation */
- cy_as_oper_type op,
- /* The error status */
- cy_as_return_status_t status
- );
-
-/* Summary
- This function type defines a callback to be called in the
- event of a storage related event
-
- Description
- At times West Bridge needs to inform the P port processor
- of events that have
- occurred. These events are asynchronous to the thread of
- control on the P
- port processor and as such are generally delivered via a
- callback function that
- is called as part of an interrupt handler. This type
- defines the type of function
- that must be provided as a callback function.
-
- See Also
- * CyAsStorageEvent
- * CyAsStorageRegisterCallback
-*/
-typedef void (*cy_as_storage_event_callback)(
- /* Handle to the device sending the event notification */
- cy_as_device_handle handle,
- /* The bus where the event happened */
- cy_as_bus_number_t bus,
- /* The device where the event happened */
- uint32_t device,
- /* The event type */
- cy_as_storage_event evtype,
- /* Event related data */
- void *evdata
- );
-
-/* Summary
- This function type defines a callback to be called after
- an asynchronous sdio operation
-
- Description
- The Callback function is called at the completion of an
- asynchronous sdio read or write operation.
-
- See Also
- * CyAsSdioExtendedRead()
- * CyAsSdioExtendedWrite()
-*/
-typedef void (*cy_as_sdio_callback)(
- /* Handle to the device completing the storage operation */
- cy_as_device_handle handle,
- /* The bus completing the operation */
- cy_as_bus_number_t bus,
- /* The device completing the operation */
- uint32_t device,
- /* The function number of the completing the operation.
- if the status of the operation is either CY_AS_ERROR_IO_ABORTED
- or CY_AS_IO_SUSPENDED then the most significant word parameter will
- contain the number of blocks still pending. */
- uint32_t function,
- /* The base address of the completed operation */
- uint32_t address,
- /* The type of operation */
- cy_as_oper_type op,
- /* The status of the operation */
- cy_as_return_status_t status
- );
-
-/* Summary
- Enumeration of SD/MMC card registers that can be read
- through the API.
-
- Description
- Some of the registers on the SD/MMC card(s) attached to the
- West Bridge can be read through the API layers. This type
- enumerates the registers that can be read.
-
- See Also
- * CyAsStorageSDRegisterRead
- */
-typedef enum cy_as_sd_card_reg_type {
- cy_as_sd_reg_OCR = 0,
- cy_as_sd_reg_CID,
- cy_as_sd_reg_CSD
-} cy_as_sd_card_reg_type;
-
-/* Summary
- Struct encapsulating parameters and return values for a
- CyAsStorageQueryDevice call.
-
- Description
- This struct holds the input parameters and the return values
- for an asynchronous CyAsStorageQueryDevice call.
-
- See Also
- * CyAsStorageQueryDevice
- */
-typedef struct cy_as_storage_query_device_data {
- /* The bus with the device to query */
- cy_as_bus_number_t bus;
- /* The logical device number to query */
- uint32_t device;
- /* The return value for the device descriptor */
- cy_as_device_desc desc_p;
-} cy_as_storage_query_device_data;
-
-
-/* Summary
- Struct encapsulating parameters and return values
- for a CyAsStorageQueryUnit call.
-
- Description
- This struct holds the input parameters and the return
- values for an asynchronous CyAsStorageQueryUnit call.
-
- See Also
- * CyAsStorageQueryUnit
- */
-typedef struct cy_as_storage_query_unit_data {
- /* The bus with the device to query */
- cy_as_bus_number_t bus;
- /* The logical device number to query */
- uint32_t device;
- /* The unit to query on the device */
- uint32_t unit;
- /* The return value for the unit descriptor */
- cy_as_unit_desc desc_p;
-} cy_as_storage_query_unit_data;
-
-/* Summary
- Struct encapsulating the input parameter and return
- values for a CyAsStorageSDRegisterRead call.
-
- Description
- This struct holds the input parameter and return
- values for an asynchronous CyAsStorageSDRegisterRead
- call.
-
- See Also
- * CyAsStorageSDRegisterRead
- */
-typedef struct cy_as_storage_sd_reg_read_data {
- /* Pointer to the result buffer. */
- uint8_t *buf_p;
- /* Length of data to be copied in bytes. */
- uint8_t length;
-} cy_as_storage_sd_reg_read_data;
-
-/* Summary
- Controls which pins are used for card detection
-
- Description
- When a StorageDeviceControl call is made to enable or
- disable card detection this enum is passed in to
- control which pin is used for the detection.
-
- See Also
- * CyAsStorageDeviceControl
-*/
-typedef enum cy_as_storage_card_detect {
- cy_as_storage_detect_GPIO,
- cy_as_storage_detect_SDAT_3
-} cy_as_storage_card_detect;
-
-#ifndef __doxygen__
-#define cy_as_storage_detect_GPIO_0 cy_as_storage_detect_GPIO
-
-/* Length of OCR value in bytes. */
-#define CY_AS_SD_REG_OCR_LENGTH (4)
-/* Length of CID value in bytes. */
-#define CY_AS_SD_REG_CID_LENGTH (16)
-/* Length of CSD value in bytes. */
-#define CY_AS_SD_REG_CSD_LENGTH (16)
-/* Max. length of register response in words. */
-#define CY_AS_SD_REG_MAX_RESP_LENGTH (10)
-
-#endif
-
-/* Summary
- This data structure is the data passed via the evdata
- paramater on a usb event callback for the mass storage
- device progress event.
-
- Description
- This data structure reports the number of sectors that have
- been written and read on the USB mass storage device since
- the last event report. The corresponding event is only sent
- when either the number of writes, or the number of reads has
- crossed a pre-set threshold.
-
- See Also
- * CyAsUsbEventCallback
- * CyAsUsbRegisterCallback
-*/
-typedef struct cy_as_m_s_c_progress_data {
- /* Number of sectors written since the last event. */
- uint32_t wr_count;
- /* Number of sectors read since the last event. */
- uint32_t rd_count;
-} cy_as_m_s_c_progress_data;
-
-/* Summary
-Flag to set Direct Write operation to read back from the
-address written to.
-
-
- See Also
- *CyAsSdioDirectWrite()
-*/
-#define CY_SDIO_RAW (0x01)
-
-
-/* Summary
-Flag to set Extended Read and Write to perform IO
-using a FIFO i.e. read or write from the specified
-address only.
-
- See Also
- *CyAsSdioExtendedRead()
- *CyAsSdioExtendedWrite()
-*/
-#define CY_SDIO_OP_FIFO (0x00)
-
-/* Summary
-Flag to set Extended Read and Write to perform incremental
-IO using the address provided as the base address.
-
-
- See Also
- *CyAsSdioExtendedRead()
- *CyAsSdioExtendedWrite()
-*/
-#define CY_SDIO_OP_INCR (0x02)
-
-/* Summary
-Flag to set Extended Read and Write to Block Mode operation
-
- See Also
- *CyAsSdioExtendedRead()
- *CyAsSdioExtendedWrite()
-*/
-#define CY_SDIO_BLOCKMODE (0x04)
-
-/* Summary
-Flag to set Extended Read and Write to Byte Mode operation
-
- See Also
- *CyAsSdioExtendedRead()
- *CyAsSdioExtendedWrite()
-*/
-#define CY_SDIO_BYTEMODE (0x00)
-
-/* Summary
-Flag to force re/initialization of a function.
-
-Description
-If not set a call to CyAsSdioInitFunction()
-will not initialize a function that has been previously
-initialized.
- See Also
- *CyAsSdioInitFunction()
- */
-#define CY_SDIO_FORCE_INIT (0x40)
-
-/* Summary
-Flag to re-enable the SDIO interrupts.
-
-Description
-Used with a direct read or direct write
-after the Interrupt triggerred by SDIO has been serviced
-and cleared to reset the West Bridge Sdio Interrupt.
- See Also
- *CyAsSdioDirectRead()
- *CyAsSdioDirectWrite()
-*/
-
-#define CY_SDIO_REARM_INT (0x80)
-
-
-/* Summary
- Flag to check if 4 bit support is enabled on a
- low speed card
- See Also
- <link CyAsSDIOCard::card_capability>*/
-#define CY_SDIO_4BLS (0x80)
-
-/* Summary
- Flag to check if card is a low speed card
- See Also
- <link CyAsSDIOCard::card_capability> */
-#define CY_SDIO_LSC (0x40)
-
-/* Summary
- Flag to check if interrupt during multiblock data
- transfer is enabled
- See Also
- <link CyAsSDIOCard::card_capability>*/
-#define CY_SDIO_E4MI (0x20)
-
-/* Summary
- Flag to check if interrupt during multiblock data
- transfer is supported
- See Also
- <link CyAsSDIOCard::card_capability> */
-#define CY_SDIO_S4MI (0x10)
-
-/* Summary
- Flag to check if card supports function suspending.
- See Also
- <link CyAsSDIOCard::card_capability> */
-#define CY_SDIO_SBS (0x08)
-
-/* Summary
- Flag to check if card supports SDIO Read-Wait
- See Also
- <link CyAsSDIOCard::card_capability> */
-#define CY_SDIO_SRW (0x04)
-
-/* Summary
- Flag to check if card supports multi-block transfers
- See Also
- <link CyAsSDIOCard::card_capability> */
-#define CY_SDIO_SMB (0x02)
-
-/* Summary
- Flag to check if card supports Direct IO commands
- during execution of an Extended
- IO function
- See Also
- <link CyAsSDIOCard::card_capability>*/
-#define CY_SDIO_SDC (0x01)
-
-/* Summary
- Flag to check if function has a CSA area.
- See Also
- <link CyAsSDIOFunc::csa_bits> */
-#define CY_SDIO_CSA_SUP (0x40)
-
-/* Summary
- Flag to check if CSA access is enabled.
- See Also
- <link CyAsSDIOFunc::csa_bits> */
-#define CY_SDIO_CSA_EN (0x80)
-
-/* Summary
- Flag to check if CSA is Write protected.
- See Also
- <link CyAsSDIOFunc::csa_bits> */
-#define CY_SDIO_CSA_WP (0x01)
-
-/* Summary
- Flag to check if CSA formatting is prohibited.
- See Also
- <link CyAsSDIOFunc::csa_bits>*/
-#define CY_SDIO_CSA_NF (0x02)
-
-/* Summary
- Flag to check if the function allows wake-up from low
- power mode using some vendor specific method.
- See Also
- <link CyAsSDIOFunc::wakeup_support>*/
-#define CY_SDIO_FN_WUS (0x01)
-
-
-/* Summary
- This data structure stores SDIO function 0
- parameters for a SDIO card
-*/
-typedef struct cy_as_sdio_card {
- /* Number of functions present on the card. */
- uint8_t num_functions;
- /* Memory present(Combo card) or not */
- uint8_t memory_present;
- /* 16 bit manufacturer ID */
- uint16_t manufacturer__id;
- /* Additional vendor specific info */
- uint16_t manufacturer_info;
- /* Max Block size for function 0 */
- uint16_t maxblocksize;
- /* Block size used for function 0 */
- uint16_t blocksize;
- /* SDIO version supported by the card */
- uint8_t sdio_version;
- /* Card capability flags */
- uint8_t card_capability;
-} cy_as_sdio_card;
-
-/* Summary
- This data structure stores SDIO function 1-7 parameters
- for a SDIO card
-*/
-typedef struct cy_as_sdio_func {
- /* SDIO function code. 0 if non standard function */
- uint8_t function_code;
- /* Extended function type code for non-standard function */
- uint8_t extended_func_code;
- /* Max IO Blocksize supported by the function */
- uint16_t maxblocksize;
- /* IO Blocksize used by the function */
- uint16_t blocksize;
- /* 32 bit product serial number for the function */
- uint32_t card_psn;
- /* Code storage area variables */
- uint8_t csa_bits;
- /* Function wake-up support */
- uint8_t wakeup_support;
-} cy_as_sdio_func;
-
-/***********************************
- * West Bridge Functions
- ************************************/
-
-/* Summary
- This function starts the West Bridge storage module.
-
- Description
- This function initializes the West Bridge storage software
- stack and readies this module to service storage related
- requests. If the stack is already running, the reference
- count for the stack is incremented.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was passed in
- * CY_AS_ERROR_SUCCESS - the module started successfully
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsStorageStop
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_start(
- /* Handle to the device */
- cy_as_device_handle handle,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-/* Summary
- This function stops the West Bridge storage module.
-
- Description
- This function decrements the reference count for the
- storage stack and if this count is zero, the storage
- stack is shut down. The shutdown frees all resources
- associated with the storage stack.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Notes
- While all resources associated with the storage stack
- will be freed is a shutdown occurs,
- resources associated with underlying layers of the
- software will not be freed if they
- are shared by the USB stack and the USB stack is
- active. Specifically the DMA manager,
- the interrupt manager, and the West Bridge
- communications module are all shared by both the
- USB stack and the storage stack.
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge
- * device has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not
- * been loaded into West Bridge
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_SUCCESS - this module was shut
- * down successfully
- * CY_AS_ERROR_TIMEOUT - a timeout occurred
- * communicating with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING
- * CY_AS_ERROR_ASYNC_PENDING
- * CY_AS_ERROR_OUT_OF_MEMORY
-
- See Also
- * CyAsStorageStart
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_stop(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-/* Summary
- This function is used to register a callback function
- for the storage API.
-
- Description
- At times West Bridge needs to inform the P port processor
- of events that have occurred. These events are asynchronous
- to the thread of control on the P
- port processor and as such are generally delivered via a
- callback function that
- is called as part of an interrupt handler. This function
- registers the callback
- function that is called when an event occurs. Each call
- to this function
- replaces any old callback function with a new callback
- function supplied on
- the most recent call. This function can also be called
- with a callback function
- of NULL in order to remove any existing callback function
-
- * Valid In Asynchronous Callback:YES
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has
- * not been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle
- * was passed in
- * CY_AS_ERROR_SUCCESS - the function was registered
- * successfully
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
-
- See Also
- * CyAsStorageEventCallback
- * CyAsStorageEvent
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_register_callback(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The callback function to call for async storage events */
- cy_as_storage_event_callback callback
- );
-
-/* Summary
- This function claims a given media type.
-
- Description
- This function communicates to West Bridge that the
- processor wants control of the
- given storage media type. Each media type can be
- claimed or released by the
- processor independently. As the processor is the
- master for the storage,
- West Bridge should release control of the requested
- media as soon as possible and
- signal the processor via the CyAsStorageProcessor event.
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- This function just notifies West Bridge that the storage
- is desired. The storage
- has not actually been released by West Bridge until the
- registered callback function
- is called with the CyAsStorageProcessor event
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_SUCCESS - this request was successfully
- * transmitted to the West Bridge device
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_MEDIA
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
- * CY_AS_ERROR_NOT_ACQUIRED
-
- See Also:
- * CyAsStorageClaim
- * CyAsStorageRelease
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_claim(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The bus to claim */
- cy_as_bus_number_t bus,
- /* The device to claim */
- uint32_t device,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-/* Summary
- This function releases a given media type.
-
- Description
- This function communicates to West Bridge that the
- processor has released control of
- the given storage media type. Each media type can
- be claimed or released by the
- processor independently. As the processor is the
- master for the storage, West Bridge
- can now assume ownership of the media type. No callback
- or event is generated.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle
- * was passed in
- * CY_AS_ERROR_SUCCESS - the media was successfully
- * released
- * CY_AS_ERROR_MEDIA_NOT_CLAIMED - the media was not
- * claimed by the P port
- * CY_AS_ERROR_TIMEOUT - a timeout occurred
- * communicating with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_MEDIA
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsStorageClaim
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_release(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The bus to release */
- cy_as_bus_number_t bus,
- /* The device to release */
- uint32_t device,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-/* Summary
- This function information about the number of devices present
- on a given bus
-
- Description
- This function retrieves information about how many devices on
- on the given
- West Bridge bus.
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- While the current implementation of West Bridge only
- supports one of logical device of
- each media type, future versions WestBridge/Antioch may
- support multiple devices.
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_SUCCESS - the media information was
- * returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred
- * communicating with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsStorageQueryDevice
- * CyAsStorageQueryUnit
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_query_bus(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The bus to query */
- cy_as_bus_number_t bus,
- /* The return value containing the number of
- devices present for this media type */
- uint32_t *count,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-/* Summary
- This function information about the number of devices
- present for a given media type
-
- Description
- This function retrieves information about how many
- devices of a given media type are attached to West Bridge.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Notes
- While the current implementation of West Bridge only
- supports one of logical device of each media type, future
- versions West Bridge may support multiple devices.
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_SUCCESS - the media information was
- * returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred
- * communicating with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsStorageQueryMedia
- * CyAsMediaType
- * CyAsStorageQueryDevice
- * CyAsStorageQueryUnit
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_query_media(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The type of media to query */
- cy_as_media_type type,
- /* The return value containing the number of
- devices present for this media type */
- uint32_t *count,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-/* Summary
- This function returns information about a given device
- of a specific media type
-
- Description
- This function retrieves information about a device of a
- given type of media. The function is called with a given
- media type and device and a pointer to a media descriptor
- (CyAsDeviceDesc). This function fills in the data in the
- media descriptor to provide information about the
- attributes of the device of the given device.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Notes
- Currently this API only supports a single logical device
- of each media type. Therefore the only acceptable value
- for the parameter device is zero (0).
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_SUCCESS - the media information was
- * returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_NO_SUCH_MEDIA
- * CY_AS_ERROR_NO_SUCH_DEVICE
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsMediaType
- * CyAsStorageQueryMedia
- * CyAsStorageQueryUnit
- * CyAsDeviceDesc
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_query_device(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* Parameters and return value for the query call */
- cy_as_storage_query_device_data *data,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-/* Summary
- This function returns information about a given unit on a
- specific device
-
- Description
- This function retrieves information about a device of a
- given logical unit. The function is called with a given
- media type, device address, unit address, and a pointer
- to a unit descriptor (CyAsUnitDesc). This function fills
- in the data in the unit descriptor to provide information
- about the attributes of the device of the given logical
- unit.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_NO_SUCH_DEVICE
- * CY_AS_ERROR_NO_SUCH_UNIT
- * CY_AS_ERROR_INVALID_RESPONSE
-
-
- See Also
- * CyAsMediaType
- * CyAsStorageQueryMedia
- * CyAsStorageQueryDevice
- * CyAsUnitDesc
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_query_unit(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* Parameters and return value for the query call */
- cy_as_storage_query_unit_data *data_p,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-/* Summary
- This function enables/disables the handling of SD/MMC card
- detection and SD/MMC write protection in West Bridge Firmware.
-
- Description
- If the detection of SD/MMC card insertion or removal is being
- done by the Processor directly, the West Bridge firmware needs
- to be instructed to disable the card detect feature. Also, if
- the hardware design does not use the SD_WP GPIO of the West
- Bridge to handle SD card's write protect notch, the handling
- of write protection if firmware should be disabled. This API
- is used to enable/disable the card detect and write protect
- support in West Bridge firmware.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the feature controls were
- * set successfully
- * CY_AS_ERROR_NO_SUCH_BUS - the specified bus is invalid
- * CY_AS_ERROR_NOT_SUPPORTED - function not supported on
- * the device in the specified bus
- * CY_AS_ERROR_IN_SUSPEND - the West Brdige device is in
- * suspended mode
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_device_control(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The bus to control */
- cy_as_bus_number_t bus,
- /* The device to control */
- uint32_t device,
- /* Enable/disable control for card detection */
- cy_bool card_detect_en,
- /* Enable/disable control for write protect handling */
- cy_bool write_prot_en,
- /* Control which pin is used for card detection */
- cy_as_storage_card_detect config_detect,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-/* Summary
- This function reads one or more blocks of data from
- the storage system.
-
- Description
- This function synchronously reads one or more blocks
- of data from the given media
- type/device and places the data into the data buffer
- given. This function does not
- return until the data is read and placed into the buffer.
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- If the Samsung CEATA drive is the target for a
- read/write operation, the maximum
- number of sectors that can be accessed through a
- single API call is limited to 2047.
- Longer accesses addressed to a Samsung CEATA drive
- can result in time-out errors.
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle
- * was passed in
- * CY_AS_ERROR_SUCCESS - the media information was
- * returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred
- * communicating with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified
- * does not exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified
- * media/device pair does not exist
- * CY_AS_ERROR_NO_SUCH_UNIT - the unit specified
- * does not exist
- * CY_AS_ERROR_ASYNC_PENDING - an async operation
- * is pending
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was
- * error in reading from the media
- * CY_AS_ERROR_MEDIA_WRITE_PROTECTED - the media is
- * write protected
- * CY_AS_ERROR_INVALID_PARAMETER - Reads/Writes greater
- * than 4095 logic blocks are not allowed
-
- See Also
- * CyAsStorageReadAsync
- * CyAsStorageWrite
- * CyAsStorageWriteAsync
- * CyAsStorageCancelAsync
- * <LINK Asynchronous Versus Synchronous Operation>
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_read(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The bus to access */
- cy_as_bus_number_t bus,
- /* The device to access */
- uint32_t device,
- /* The unit to access */
- uint32_t unit,
- /* The first block to access */
- uint32_t block,
- /* The buffer where data will be placed */
- void *data_p,
- /* The number of blocks to be read */
- uint16_t num_blocks
- );
-
-/* Summary
- This function asynchronously reads one or more blocks of data
- from the storage system.
-
- Description
- This function asynchronously reads one or more blocks of
- data from the given media
- type/device and places the data into the data buffer given.
- This function returns
- as soon as the request is transmitted to the West Bridge
- device but before the data is
- available. When the read is complete, the callback function
- is called to indicate the
- data has been placed into the data buffer. Note that the
- data buffer must remain
- valid from when the read is requested until the callback
- function is called.
-
- * Valid In Asynchronous Callback: YES
-
- Notes
- If the Samsung CEATA drive is the target for a read/write
- operation, the maximum
- number of sectors that can be accessed through a single API
- call is limited to 2047.
- Longer accesses addressed to a Samsung CEATA drive can
- result in time-out errors.
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle
- * was passed in
- * CY_AS_ERROR_SUCCESS - the media information was
- * returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred
- * communicating with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_ASYNC_PENDING - an async operation
- * is pending
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error
- * in reading from the media
- * CY_AS_ERROR_MEDIA_WRITE_PROTECTED - the media is
- * write protected
- * CY_AS_ERROR_QUERY_DEVICE_NEEDED - Before an
- * asynchronous read can be issue a call to
- * CyAsStorageQueryDevice must be made
- * CY_AS_ERROR_INVALID_PARAMETER - Reads/Writes greater
- * than 4095 logic blocks are not allowed
-
- See Also
- * CyAsStorageRead
- * CyAsStorageWrite
- * CyAsStorageWriteAsync
- * CyAsStorageCancelAsync
- * CyAsStorageQueryDevice
- * <LINK Asynchronous Versus Synchronous Operation>
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_read_async(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The bus to access */
- cy_as_bus_number_t bus,
- /* The device to access */
- uint32_t device,
- /* The unit to access */
- uint32_t unit,
- /* The first block to access */
- uint32_t block,
- /* The buffer where data will be placed */
- void *data_p,
- /* The number of blocks to be read */
- uint16_t num_blocks,
- /* The function to call when the read is complete
- or an error occurs */
- cy_as_storage_callback callback
- );
-
-/* Summary
- This function writes one or more blocks of data
- to the storage system.
-
- Description
- This function synchronously writes one or more blocks of
- data to the given media/device.
- This function does not return until the data is written
- into the media.
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- If the Samsung CEATA drive is the target for a read/write
- operation, the maximum
- number of sectors that can be accessed through a single
- API call is limited to 2047.
- Longer accesses addressed to a Samsung CEATA drive can
- result in time-out errors.
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_SUCCESS - the media information was
- * returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred
- * communicating with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does
- * not exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified
- * media/device pair does not exist
- * CY_AS_ERROR_NO_SUCH_UNIT - the unit specified
- * does not exist
- * CY_AS_ERROR_ASYNC_PENDING - an async operation
- * is pending
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error
- * in reading from the media
- * CY_AS_ERROR_MEDIA_WRITE_PROTECTED - the media is
- * write protected
- * CY_AS_ERROR_INVALID_PARAMETER - Reads/Writes greater
- * than 4095 logic blocks are not allowed
-
- See Also
- * CyAsStorageRead
- * CyAsStorageReadAsync
- * CyAsStorageWriteAsync
- * CyAsStorageCancelAsync
- * <LINK Asynchronous Versus Synchronous Operation>
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_write(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The bus to access */
- cy_as_bus_number_t bus,
- /* The device to access */
- uint32_t device,
- /* The unit to access */
- uint32_t unit,
- /* The first block to access */
- uint32_t block,
- /* The buffer containing the data to be written */
- void *data_p,
- /* The number of blocks to be written */
- uint16_t num_blocks
- );
-
-/* Summary
- This function asynchronously writes one or more blocks
- of data to the storage system
-
- Description
- This function asynchronously writes one or more blocks of
- data to the given media type/device.
- This function returns as soon as the request is transmitted
- to the West Bridge device
- but before the data is actually written. When the write is
- complete, the callback
- function is called to indicate the data has been physically
- written into the media.
-
- * Valid In Asynchronous Callback: YES
-
- Notes
- If the Samsung CEATA drive is the target for a read/write
- operation, the maximum
- number of sectors that can be accessed through a single API
- call is limited to 2047.
- Longer accesses addressed to a Samsung CEATA drive can
- result in time-out errors.
-
- Notes
- The data buffer must remain valid from when the write is
- requested until the callback function is called.
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has
- * not been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was passed in
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_ASYNC_PENDING - an async operation is
- * pending
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error in
- * reading from the media
- * CY_AS_ERROR_MEDIA_WRITE_PROTECTED - the media is write
- * protected
- * CY_AS_ERROR_QUERY_DEVICE_NEEDED - A query device call is
- * required before async writes are allowed
- * CY_AS_ERROR_INVALID_PARAMETER - Reads/Writes greater
- * than 4095 logic blocks are not allowed
-
- See Also
- * CyAsStorageRead
- * CyAsStorageWrite
- * CyAsStorageReadAsync
- * CyAsStorageCancelAsync
- * CyAsStorageQueryDevice
- * <LINK Asynchronous Versus Synchronous Operation>
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_write_async(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The bus to access */
- cy_as_bus_number_t bus,
- /* The device to access */
- uint32_t device,
- /* The unit to access */
- uint32_t unit,
- /* The first block to access */
- uint32_t block,
- /* The buffer where the data to be written is stored */
- void *data_p,
- /* The number of blocks to be written */
- uint16_t num_blocks,
- /* The function to call when the write is complete
- or an error occurs */
- cy_as_storage_callback callback
- );
-
-/* Summary
- This function aborts any outstanding asynchronous operation
-
- Description
- This function aborts any asynchronous block read or block
- write operation. As only a single asynchronous block read
- or write operation is possible at one time, this aborts
- the single operation in progress.
-
- * Valid In Asynchronous Callback: YES
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was passed in
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_OPERATION_PENDING - no asynchronous
- * operation is pending
-
- See Also
- * CyAsStorageRead
- * CyAsStorageReadAsync
- * CyAsStorageWrite
- * CyAsStorageWriteAsync
- * <LINK Asynchronous Versus Synchronous Operation>
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_cancel_async(
- /* Handle to the device with outstanding async request */
- cy_as_device_handle handle
- );
-
-/* Summary
- This function is used to read the content of SD registers
-
- Description
- This function is used to read the contents of CSD, CID and
- CSD registers of the SD Card.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the read operation was successful
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was passed in
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not been
- * started
- * CY_AS_ERROR_IN_SUSPEND - The West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device pair
- * does not exist
- * CY_AS_ERROR_INVALID_PARAMETER - The register type is invalid
- * or the media is not supported on the bus
- * CY_AS_ERROR_OUT_OF_MEMORY - failed to get memory to process
- * request
- * CY_AS_ERROR_INVALID_RESPONSE - communication failure with
- * West Bridge firmware
-
- See Also
- * CyAsStorageSDRegReadData
- */
-EXTERN cy_as_return_status_t
-cy_as_storage_sd_register_read(
- /* Handle to the West Bridge device. */
- cy_as_device_handle handle,
- /* The bus to query */
- cy_as_bus_number_t bus,
- /* The device to query */
- uint8_t device,
- /* The type of register to read. */
- cy_as_sd_card_reg_type reg_type,
- /* Output data buffer and length. */
- cy_as_storage_sd_reg_read_data *data_p,
- /* Callback function to call when done. */
- cy_as_function_callback cb,
- /* Call context to send to the cb function. */
- uint32_t client
- );
-
-/* Summary
- Creates a partition starting at the given block and using the
- remaining blocks on the card.
-
- Description
- Storage devices attached to West Bridge can be partitioned
- into two units.
- The visibility of these units through the mass storage
- interface can be
- individually controlled. This API is used to partition
- a device into two.
-
- * Valid in Asynchronous Callback: Yes (if cb supplied)
- * Nestable: Yes
-
- Returns
- * CY_AS_ERROR_SUCCESS - the partition was successfully created
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was passed in
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not been
- * started
- * CY_AS_ERROR_IN_SUSPEND - The West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_USB_RUNNING - Partition cannot be created while
- * USB stack is active
- * CY_AS_ERROR_OUT_OF_MEMORY - failed to get memory to
- * process request
- * CY_AS_ERROR_INVALID_REQUEST - feature not supported by
- * active device or firmware
- * CY_AS_ERROR_INVALID_RESPONSE - communication failure with
- * West Bridge firmware
- * CY_AS_ERROR_ALREADY_PARTITIONED - the storage device already
- * has been partitioned
- * CY_AS_ERROR_INVALID_BLOCK - Size specified for the partition
- * exceeds the actual device capacity
-
- See Also
- * <LINK Partitioning>
- * CyAsStorageRemovePPartition
- */
-EXTERN cy_as_return_status_t
-cy_as_storage_create_p_partition(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* Bus on which the device to be partitioned is connected */
- cy_as_bus_number_t bus,
- /* Device number to be partitioned */
- uint32_t device,
- /* Size of partition number 0 in blocks */
- uint32_t size,
- /* Callback in case of async call */
- cy_as_function_callback cb,
- /* Client context to pass to the callback */
- uint32_t client
- );
-
-/* Summary
- Removes the partition table on a storage device connected
- to the West Bridge.
-
- Description
- Storage devices attached to West Bridge can be partitioned
- into two units.This partition information is stored on the
- device and is non-volatile. This API is used to remove the
- stored partition information and make the entire device
- visible as a single partition (unit).
-
- * Valid in Asynchronous Callback: Yes (if cb supplied)
- * Nestable: Yes
-
- Returns
- * CY_AS_ERROR_SUCCESS - the partition was successfully
- * deleted
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_IN_SUSPEND - The West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_USB_RUNNING - Partition cannot be created
- * while USB stack is active
- * CY_AS_ERROR_OUT_OF_MEMORY - failed to get memory to
- * process request
- * CY_AS_ERROR_INVALID_REQUEST - operation not supported
- * by active device/firmware
- * CY_AS_ERROR_NO_SUCH_UNIT - the addressed device is
- * not partitioned
-
- See Also
- * <LINK Partitioning>
- * CyAsStorageCreatePPartition
- */
-EXTERN cy_as_return_status_t
-cy_as_storage_remove_p_partition(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* Bus on which device of interest is connected */
- cy_as_bus_number_t bus,
- /* Device number of interest */
- uint32_t device,
- /* Callback in case of async call */
- cy_as_function_callback cb,
- /* Client context to pass to the callback */
- uint32_t client
- );
-
-/* Summary
- Returns the amount of data read/written to the given
- device from the USB host.
-
- Description
-
- * Valid in Asynchronous Callback: Yes (if cb supplied)
- * Nestable: Yes
-
- Returns
- * CY_AS_ERROR_SUCCESS - API call completed successfully
- * CY_AS_ERROR_INVALID_HANDLE - Invalid West Bridge device
- * handle
- * CY_AS_ERROR_NOT_CONFIGURED - West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - No firmware image has been
- * loaded on West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - Storage stack has not been
- * started
- * CY_AS_ERROR_NOT_SUPPORTED - This function is not
- * supported by active firmware version
- * CY_AS_ERROR_OUT_OF_MEMORY - Failed to get memory to
- * process the request
- * CY_AS_ERROR_TIMEOUT - West Bridge firmware did not
- * respond to request
- * CY_AS_ERROR_INVALID_RESPONSE - Unexpected reply from
- * West Bridge firmware
-
- See Also
- * CyAsUsbSetMSReportThreshold
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_get_transfer_amount(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* Bus on which device of interest is connected */
- cy_as_bus_number_t bus,
- /* Device number of interest */
- uint32_t device,
- /* Return value containing read/write sector counts. */
- cy_as_m_s_c_progress_data *data_p,
- /* Callback in case of async call */
- cy_as_function_callback cb,
- /* Client context to pass to the callback */
- uint32_t client
- );
-
-/* Summary
- Performs a Sector Erase on an attached SD Card
-
- Description
- This allows you to erase an attached SD card. The area to erase
- is specified in terms of a starting Erase Unit and a number of
- Erase Units. The size of each Erase Unit is defined in the
- DeviceDesc returned from a StorageQueryDevice call and it can
- differ between SD cards.
-
- A large erase can take a while to complete depending on the SD
- card. In such a case it is recommended that an async call is made.
-
- Returns
- * CY_AS_ERROR_SUCCESS - API call completed successfully
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not been
- * started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was passed in
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_ASYNC_PENDING - an async operation is pending
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error in
- * reading from the media
- * CY_AS_ERROR_MEDIA_WRITE_PROTECTED - the media is write protected
- * CY_AS_ERROR_QUERY_DEVICE_NEEDED - A query device call is
- * required before erase is allowed
- * CY_AS_ERROR_NO_SUCH_BUS
- * CY_AS_ERROR_NO_SUCH_DEVICE
- * CY_AS_ERROR_NOT_SUPPORTED - Erase is currently only supported
- * on SD and using SD only firmware
- * CY_AS_ERROR_OUT_OF_MEMORY
-
- See Also
- * CyAsStorageSDRegisterRead
-*/
-EXTERN cy_as_return_status_t
-cy_as_storage_erase(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* Bus on which device of interest is connected */
- cy_as_bus_number_t bus,
- /* Device number of interest */
- uint32_t device,
- /* Erase Unit to start the erase */
- uint32_t erase_unit,
- /* Number of Erase Units to erase */
- uint16_t num_erase_units,
- /* Callback in case of async call */
- cy_as_function_callback cb,
- /* Client context to pass to the callback */
- uint32_t client
- );
-
-/* Summary
- This function is used to read a Tuple from the SDIO CIS area.
-
- Description
- This function is used to read a Tuple from the SDIO CIS area.
- This function is to be used only for IO to an SDIO card as
- other media will not respond to the SDIO command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device
- * is in suspend mode
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not
- * exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device
- * pair does not exist
- * CY_AS_ERROR_ASYNC_PENDING - an async operation is pending
- * CY_AS_ERROR_INVALID_REQUEST - an invalid IO request
- * type was made
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory available
- * CY_AS_ERROR_INVALID_RESPONSE - an error message was
- * received from the firmware
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error in
- * reading from the media
- * CY_AS_ERROR_INVALID_FUNCTION - An IO attempt was made to
- * an invalid function
- * CY_AS_ERROR_INVALID_ENDPOINT - A DMA request was made to
- * an invalid endpoint
- * CY_AS_ERROR_ENDPOINT_DISABLED - A DMA request was made to
- * a disabled endpoint
-
-*/
-cy_as_return_status_t
-cy_as_sdio_get_c_i_s_info(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no,
- /* Id of tuple to be fetched */
- uint16_t tuple_id,
- /* Buffer to hold tuple read from card.
- should be at least 256 bytes in size */
- uint8_t *data_p
- );
-
-
-/* Summary
- This function is used to read properties of the SDIO card.
-
- Description
- This function is used to read properties of the SDIO card
- into a CyAsSDIOCard structure.
- This function is to be used only for IO to an SDIO card as
- other media will not respond to the SDIO command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not been
- * started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_SUCCESS - the card information was returned
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not
- * exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device
- * pair does not exist
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory available
- * CY_AS_ERROR_INVALID_RESPONSE - an error message was
- * received from the firmware
-
-*/
-cy_as_return_status_t
-cy_as_sdio_query_card(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* Buffer to store card properties */
- cy_as_sdio_card *data_p
- );
-
-/* Summary
- This function is used to reset a SDIO card.
-
- Description
- This function is used to reset a SDIO card by writing to
- the reset bit in the CCCR and reinitializing the card. This
- function is to be used only for IO to an SDIO card as
- other media will not respond to the SDIO command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not
- * exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device
- * pair does not exist
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory available
- * CY_AS_ERROR_INVALID_RESPONSE - an error message was
- * received from the firmware
- */
-cy_as_return_status_t
-cy_as_sdio_reset_card(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device
- );
-
-/* Summary
- This function performs a Synchronous 1 byte read from the sdio
- device function.
-
- Description
- This function is used to perform a synchronous 1 byte read
- from an SDIO card function. This function is to be used only
- for IO to an SDIO card as other media will not respond to the
- SDIO command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was passed
- * in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device pair
- * does not exist
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory available
- * CY_AS_ERROR_INVALID_RESPONSE - an error message was received
- * from the firmware
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error in reading
- * from the media
- * CY_AS_ERROR_INVALID_FUNCTION - An IO attempt was made to an
- * invalid function
- * CY_AS_ERROR_FUNCTION_SUSPENDED - The function to which read
- * was attempted is in suspend
-*/
-cy_as_return_status_t
-cy_as_sdio_direct_read(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no,
- /* Address for IO */
- uint32_t address,
- /* Set to CY_SDIO_REARM_INT to reinitialize SDIO interrupt */
- uint8_t misc_buf,
- /* Buffer to hold byte read from card */
- uint8_t *data_p
- );
-
-/* Summary
- This function performs a Synchronous 1 byte write to the
- sdio device function.
-
- Description
- This function is used to perform a synchronous 1 byte write
- to an SDIO card function.
- This function is to be used only for IO to an SDIO card as
- other media will not respond to the SDIO command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not been
- * started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device
- * pair does not exist
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory available
- * CY_AS_ERROR_INVALID_RESPONSE - an error message was received
- * from the firmware
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error in
- * reading from the media
- * CY_AS_ERROR_INVALID_FUNCTION - An IO attempt was made to
- * an invalid function
- * CY_AS_ERROR_FUNCTION_SUSPENDED - The function to which
- * write was attempted is in suspend
-*/
-cy_as_return_status_t
-cy_as_sdio_direct_write(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no,
- /* Address for IO */
- uint32_t address,
- /* Set to CY_SDIO_REARM_INT to reinitialize SDIO interrupt,
- set to CY_SDIO_RAW for read after write */
- uint8_t misc_buf,
- /* Byte to write */
- uint16_t argument,
- /* Buffer to hold byte read from card in Read after write mode */
- uint8_t *data_p
- );
-
-/* Summary
- This function is used to set the blocksize of an SDIO function.
-
- Description
- This function is used to set the blocksize of an SDIO function.
- This function is to be used only for IO to an SDIO card as
- other media will not respond to the SDIO command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was
- * returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not
- * exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device
- * pair does not exist
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory
- * available
- * CY_AS_ERROR_INVALID_RESPONSE - an error message was
- * received from the firmware
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error in
- * reading from the media
- * CY_AS_ERROR_INVALID_FUNCTION - An IO attempt was made
- * to an invalid function
- * CY_AS_ERROR_INVALID_BLOCKSIZE - An incorrect blocksize
- * was passed to the function.
- * CY_AS_ERROR_FUNCTION_SUSPENDED - The function to which
- * write was attempted is in suspend
-*/
-cy_as_return_status_t
-cy_as_sdio_set_blocksize(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no,
- /* Block size to set. */
- uint16_t blocksize
- );
-
-/* Summary
- This function is used to read Multibyte/Block data from a
- IO function.
-
- Description
- This function is used to read Multibyte/Block data from a
- IO function. This function is to be used only for IO to an
- SDIO card as other media will not respond to the SDIO
- command set.
-
- * Valid in Asynchronous Callback: YES
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device
- * pair does not exist
- * CY_AS_ERROR_ASYNC_PENDING - an async operation is pending
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory available
- * CY_AS_ERROR_INVALID_RESPONSE - an error message was received
- * from the firmware
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error in
- * reading from the media
- * CY_AS_ERROR_INVALID_FUNCTION - An IO attempt was made to
- * an invalid function
- * CY_AS_ERROR_INVALID_BLOCKSIZE - An incorrect blocksize or
- * block count was passed to the function.
- * CY_AS_ERROR_FUNCTION_SUSPENDED - The function to which
- * write was attempted is in suspend
- * CY_AS_ERROR_IO_ABORTED - The IO operation was aborted
- * CY_AS_ERROR_IO_SUSPENDED - The IO operation was suspended
- * CY_AS_ERROR_INVALID_REQUEST - An invalid request was
- * passed to the card.
-
-*/
-cy_as_return_status_t
-cy_as_sdio_extended_read(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no,
- /* Base Address for IO */
- uint32_t address,
- /* Set to CY_SDIO_BLOCKMODE for block IO,
- CY_SDIO_BYTEMODE for multibyte IO,
- CY_SDIO_OP_FIFO to read multiple bytes from the
- same address, CY_SDIO_OP_INCR to read bytes from
- the incrementing addresses */
- uint8_t misc_buf,
- /* Block/Byte count to read */
- uint16_t argument,
- /* Buffer to hold data read from card */
- uint8_t *data_p,
- /* Callback in case of Asyncronous call. 0 if Synchronous */
- cy_as_sdio_callback callback
- );
-
-/* Summary
- This function is used to write Multibyte/Block data
- to a IO function.
-
- Description
- This function is used to write Multibyte/Block data
- to a IO function. This function is to be used only
- for IO to an SDIO card as other media will not respond
- to the SDIO command set.
-
- * Valid in Asynchronous Callback: YES
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not
- * exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device
- * pair does not exist
- * CY_AS_ERROR_ASYNC_PENDING - an async operation is pending
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory available
- * CY_AS_ERROR_INVALID_RESPONSE - an error message was
- * received from the firmware
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error in
- * reading from the media
- * CY_AS_ERROR_INVALID_FUNCTION - An IO attempt was made
- * to an invalid function
- * CY_AS_ERROR_INVALID_BLOCKSIZE - An incorrect blocksize or
- * block count was passed to the function.
- * CY_AS_ERROR_FUNCTION_SUSPENDED - The function to which
- * write was attempted is in suspend
- * CY_AS_ERROR_IO_ABORTED - The IO operation was aborted
- * CY_AS_ERROR_IO_SUSPENDED - The IO operation was suspended
- * CY_AS_ERROR_INVALID_REQUEST - An invalid request was
- * passed to the card.
-*/
-cy_as_return_status_t
-cy_as_sdio_extended_write(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no,
- /* Base Address for IO */
- uint32_t address,
- /* Set to CY_SDIO_BLOCKMODE for block IO,
- CY_SDIO_BYTEMODE for multibyte IO,
- CY_SDIO_OP_FIFO to write multiple bytes to the same address,
- CY_SDIO_OP_INCR to write multiple bytes to incrementing
- addresses */
- uint8_t misc_buf,
- /* Block/Byte count to write
- in case of byte mode the count should not exceed the block size
- or 512, whichever is smaller.
- in case of block mode, maximum number of blocks is 511. */
- uint16_t argument,
- /* Buffer to hold data to be written to card. */
- uint8_t *data_p,
- /* Callback in case of Asyncronous call. 0 if Synchronous */
- cy_as_sdio_callback callback
- );
-
-/* Summary
- This function is used to initialize a SDIO card function.
-
- Description
- This function is used to initialize a SDIO card function
- (1 - 7). This function is to be used only for IO to an
- SDIO card as other media will not respond to the SDIO
- command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not been
- * started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was passed
- * in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device
- * pair does not exist
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory available
- * CY_AS_ERROR_INVALID_RESPONSE - an error message was
- * received from the firmware
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error in
- * reading from the media
- * CY_AS_ERROR_INVALID_FUNCTION - An IO attempt was made
- * to an invalid function
-*/
-cy_as_return_status_t
-cy_as_sdio_init_function(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no,
- /* Set to CY_SDIO_FORCE_INIT to reinitialize function */
- uint8_t misc_buf
- );
-
-/* Summary
- This function is used to get properties of a SDIO card function.
-
- Description
- This function is used to get properties of a SDIO card functio
- (1 - 7) into a CyAsSDIOFunc structure. This function is to be
- used only for IO to an SDIO card as other media will not respond
- to the SDIO command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not been
- * started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was passed
- * in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the media specified does
- * not exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device pair
- * does not exist
- * CY_AS_ERROR_INVALID_FUNCTION - An IO request was made to
- * an invalid function
-*/
-cy_as_return_status_t
-cy_as_sdio_query_function(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no,
- /* Buffer to store function properties */
- cy_as_sdio_func *data_p
- );
-
-/* Summary
- This function is used to Abort the current IO function.
-
- Description
- This function is used to Abort the current IO function.
- This function is to be used only for IO to an SDIO card as
- other media will not respond to the SDIO command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was
- * returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not
- * exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified
- * media/device pair does not exist
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory
- * available
- * CY_AS_ERROR_INVALID_FUNCTION - An IO attempt was made
- * to an invalid function
-*/
-cy_as_return_status_t
-cy_as_sdio_abort_function(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no
- );
-
-/* Summary
- This function is used to Disable IO to an SDIO function.
-
- Description
- This function is used to Disable IO to an SDIO function.
- This function is to be used only for IO to an SDIO card as
- other media will not respond to the SDIO command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is
- * in suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was
- * returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not
- * exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified media/device
- * pair does not exist
- * CY_AS_ERROR_INVALID_FUNCTION - An IO attempt was made
- * to an invalid function
-*/
-cy_as_return_status_t
-cy_as_sdio_de_init_function(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no
- );
-
-/* Summary
- This function is used to Suspend the current IO function.
-
- Description
- This function is used to Suspend the current IO function.
- This function is to be used only for IO to an SDIO card as
- other media will not respond to the SDIO command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has
- * not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified does not
- * exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified
- * media/device pair does not exist
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory
- * available
- * CY_AS_ERROR_INVALID_FUNCTION - An IO attempt was made
- * to an invalid function
-*/
-cy_as_return_status_t
-cy_as_sdio_suspend(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no
- );
-
-/* Summary
- This function is used to resume a Suspended IO function.
-
- Description
- This function is used to resume a Suspended IO function.
- This function is to be used only for IO to an SDIO card as
- other media will not respond to the SDIO command set.
-
- * Valid in Asynchronous Callback: NO
- * Valid on Antioch device: NO
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device
- * has not been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been
- * loaded into West Bridge
- * CY_AS_ERROR_NOT_RUNNING - the storage stack has not
- * been started
- * CY_AS_ERROR_INVALID_HANDLE - an invalid handle was
- * passed in
- * CY_AS_ERROR_IN_SUSPEND - the West Bridge device is
- * in suspend mode
- * CY_AS_ERROR_SUCCESS - the media information was
- * returned
- * CY_AS_ERROR_TIMEOUT - a timeout occurred
- * communicating with the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the stack is not running
- * CY_AS_ERROR_NO_SUCH_BUS - the bus specified
- * does not exist
- * CY_AS_ERROR_NO_SUCH_DEVICE - the specified
- * media/device pair does not exist
- * CY_AS_ERROR_ASYNC_PENDING - an async operation
- * is pending
- * CY_AS_ERROR_OUT_OF_MEMORY - insufficient memory
- * available
- * CY_AS_ERROR_INVALID_RESPONSE - an error message was
- * received from the firmware
- * CY_AS_ERROR_MEDIA_ACCESS_FAILURE - there was error
- * in reading from the media
- * CY_AS_ERROR_INVALID_FUNCTION - An IO attempt was
- * made to an invalid function
- * CY_AS_ERROR_IO_ABORTED - The IO operation was
- * aborted
- * CY_AS_ERROR_IO_SUSPENDED - The IO operation was
- * suspended
- * CY_AS_ERROR_INVALID_REQUEST - An invalid request was
- * passed to the card.
-
-*/
-cy_as_return_status_t
-cy_as_sdio_resume(
- /* Handle to the Westbridge device */
- cy_as_device_handle handle,
- /* Bus to use */
- cy_as_bus_number_t bus,
- /* Device number */
- uint32_t device,
- /* IO function Number */
- uint8_t n_function_no,
- /* Operation to resume (Read or Write) */
- cy_as_oper_type op,
- /* Micellaneous buffer same as for Extended read and Write */
- uint8_t misc_buf,
- /* Number of pending blocks for IO. Should be less
- than or equal to the maximum defined for extended
- read and write */
- uint16_t pendingblockcount,
- /* Buffer to continue the Suspended IO operation */
- uint8_t *data_p
- );
-
-
-
-/* For supporting deprecated functions */
-#include "cyasstorage_dep.h"
-
-#include "cyas_cplus_end.h"
-
-#endif /* _INCLUDED_CYASSTORAGE_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasstorage_dep.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasstorage_dep.h
deleted file mode 100644
index 566b244bd8c..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasstorage_dep.h
+++ /dev/null
@@ -1,309 +0,0 @@
-/* Cypress West Bridge API header file (cyanstorage_dep.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-/* This header will contain Antioch specific declaration
- * of the APIs that are deprecated in Astoria SDK. This is
- * for maintaining backward compatibility
- */
-#ifndef __INCLUDED_CYANSTORAGE_DEP_H__
-#define __INCLUDED_CYANSTORAGE_DEP_H__
-
-#ifndef __doxygen__
-
-typedef void (*cy_as_storage_callback_dep)(
-/* Handle to the device completing the storage operation */
- cy_as_device_handle handle,
- /* The media type completing the operation */
- cy_as_media_type type,
- /* The device completing the operation */
- uint32_t device,
- /* The unit completing the operation */
- uint32_t unit,
- /* The block number of the completed operation */
- uint32_t block_number,
- /* The type of operation */
- cy_as_oper_type op,
- /* The error status */
- cy_as_return_status_t status
- );
-
-typedef void (*cy_as_storage_event_callback_dep)(
- /* Handle to the device sending the event notification */
- cy_as_device_handle handle,
- /* The media type */
- cy_as_media_type type,
- /* The event type */
- cy_as_storage_event evtype,
- /* Event related data */
- void *evdata
- );
-
-typedef struct cy_as_storage_query_device_data_dep {
- /* The type of media to query */
- cy_as_media_type type;
- /* The logical device number to query */
- uint32_t device;
- /* The return value for the device descriptor */
- cy_as_device_desc desc_p;
-} cy_as_storage_query_device_data_dep;
-
-typedef struct cy_as_storage_query_unit_data_dep {
- /* The type of media to query */
- cy_as_media_type type;
- /* The logical device number to query */
- uint32_t device;
- /* The unit to query on the device */
- uint32_t unit;
- /* The return value for the unit descriptor */
- cy_as_unit_desc desc_p;
-} cy_as_storage_query_unit_data_dep;
-
-
-/************ FUNCTIONS *********************/
-
-EXTERN cy_as_return_status_t
-cy_as_storage_register_callback_dep(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The callback function to call for async storage events */
- cy_as_storage_event_callback_dep callback
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_claim_dep(cy_as_device_handle handle,
- cy_as_media_type type
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_claim_dep_EX(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The type of media to claim */
- cy_as_media_type *type,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_release_dep(cy_as_device_handle handle,
- cy_as_media_type type
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_release_dep_EX(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* Handle to the device of interest */
- cy_as_media_type *type,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_query_device_dep(
- cy_as_device_handle handle,
- cy_as_media_type media,
- uint32_t device,
- cy_as_device_desc *desc_p
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_query_device_dep_EX(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* Parameters and return value for the query call */
- cy_as_storage_query_device_data_dep *data,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_query_unit_dep(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The type of media to query */
- cy_as_media_type type,
- /* The logical device number to query */
- uint32_t device,
- /* The unit to query on the device */
- uint32_t unit,
- /* The return value for the unit descriptor */
- cy_as_unit_desc *unit_p
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_query_unit_dep_EX(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* Parameters and return value for the query call */
- cy_as_storage_query_unit_data_dep *data_p,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_device_control_dep(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Enable/disable control for card detection */
- cy_bool card_detect_en,
- /* Enable/disable control for write protect handling */
- cy_bool write_prot_en,
- /* Callback to be called when the operation is complete */
- cy_as_function_callback cb,
- /* Client data to be passed to the callback */
- uint32_t client
- );
-
-
-EXTERN cy_as_return_status_t
-cy_as_storage_read_dep(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The type of media to access */
- cy_as_media_type type,
- /* The device to access */
- uint32_t device,
- /* The unit to access */
- uint32_t unit,
- /* The first block to access */
- uint32_t block,
- /* The buffer where data will be placed */
- void *data_p,
- /* The number of blocks to be read */
- uint16_t num_blocks
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_read_async_dep(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The type of media to access */
- cy_as_media_type type,
- /* The device to access */
- uint32_t device,
- /* The unit to access */
- uint32_t unit,
- /* The first block to access */
- uint32_t block,
- /* The buffer where data will be placed */
- void *data_p,
- /* The number of blocks to be read */
- uint16_t num_blocks,
- /* The function to call when the read is complete
- or an error occurs */
- cy_as_storage_callback_dep callback
- );
-EXTERN cy_as_return_status_t
-cy_as_storage_write_dep(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The type of media to access */
- cy_as_media_type type,
- /* The device to access */
- uint32_t device,
- /* The unit to access */
- uint32_t unit,
- /* The first block to access */
- uint32_t block,
- /* The buffer containing the data to be written */
- void *data_p,
- /* The number of blocks to be written */
- uint16_t num_blocks
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_write_async_dep(
- /* Handle to the device of interest */
- cy_as_device_handle handle,
- /* The type of media to access */
- cy_as_media_type type,
- /* The device to access */
- uint32_t device,
- /* The unit to access */
- uint32_t unit,
- /* The first block to access */
- uint32_t block,
- /* The buffer where the data to be written is stored */
- void *data_p,
- /* The number of blocks to be written */
- uint16_t num_blocks,
- /* The function to call when the write is complete
- or an error occurs */
- cy_as_storage_callback_dep callback
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_sd_register_read_dep(
- cy_as_device_handle handle,
- cy_as_media_type type,
- uint8_t device,
- cy_as_sd_card_reg_type reg_type,
- uint8_t read_len,
- uint8_t *data_p
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_sd_register_read_dep_EX(
- /* Handle to the West Bridge device. */
- cy_as_device_handle handle,
- /* The type of media to query */
- cy_as_media_type type,
- /* The device to query */
- uint8_t device,
- /* The type of register to read. */
- cy_as_sd_card_reg_type reg_type,
- /* Output data buffer and length. */
- cy_as_storage_sd_reg_read_data *data_p,
- /* Callback function to call when done. */
- cy_as_function_callback cb,
- /* Call context to send to the cb function. */
- uint32_t client
- );
-
-EXTERN cy_as_return_status_t
-cy_as_storage_create_p_partition_dep(
- cy_as_device_handle handle,
- cy_as_media_type media,
- uint32_t device,
- uint32_t size,
- cy_as_function_callback cb,
- uint32_t client);
-
-EXTERN cy_as_return_status_t
-cy_as_storage_remove_p_partition_dep(
- cy_as_device_handle handle,
- cy_as_media_type media,
- uint32_t device,
- cy_as_function_callback cb,
- uint32_t client);
-
-#endif /*__doxygen*/
-
-#endif /*__INCLUDED_CYANSTORAGE_DEP_H__*/
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyastoria.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyastoria.h
deleted file mode 100644
index b1b18d0685e..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyastoria.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Cypress West Bridge API header file (cyastioch.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASTORIA_H_
-#define _INCLUDED_CYASTORIA_H_
-
-#if !defined(__doxygen__)
-
-#include "cyaserr.h"
-#include "cyasmisc.h"
-#include "cyasstorage.h"
-#include "cyasusb.h"
-#include "cyasmtp.h"
-
-#endif
-
-#endif
-
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyastsdkversion.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyastsdkversion.h
deleted file mode 100644
index a3c10aa559e..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyastsdkversion.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Cypress Astoria Sdk Version file (cyastsdkversion.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street, Fifth Floor
-## Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASTSDK_VERSION_H_
-#define _INCLUDED_CYASTSDK_VERSION_H_
-
-/* Astoria SDK version 1.2.1 */
-#define CYAS_MAJOR_VERSION (1)
-#define CYAS_MINOR_VERSION (2)
-#define CYAS_BUILD_NUMBER (197)
-
-#endif /*_INCLUDED_CYASTSDK_VERSION_H_*/
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyastypes.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyastypes.h
deleted file mode 100644
index 18043c1f38d..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyastypes.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* Cypress West Bridge API header file (cyastypes.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASTYPES_H_
-#define _INCLUDED_CYASTYPES_H_
-/* moved to staging location, eventual implementation
- * considered is here
-#include <mach/westbridge/cyashaldef.h>
-*/
- #include "../../../arch/arm/plat-omap/include/mach/westbridge/cyashaldef.h"
-
-/* Types that are not available on specific platforms.
- * These are used only in the reference HAL implementations and
- * are not required for using the API.
- */
-#ifdef __unix__
-typedef unsigned long DWORD;
-typedef void *LPVOID;
-#define WINAPI
-#define INFINITE (0xFFFFFFFF)
-#define ptr_to_uint(ptr) ((unsigned int)(ptr))
-#endif
-
-/* Basic types used by the entire API */
-
-/* Summary
- This type represents an endpoint number
-*/
-typedef uint8_t cy_as_end_point_number_t;
-
-/* Summary
- This type is used to return status information from
- an API call.
-*/
-typedef uint16_t cy_as_return_status_t;
-
-/* Summary
- This type represents a bus number
-*/
-typedef uint32_t cy_as_bus_number_t;
-
-/* Summary
- All APIs provided with this release are marked extern
- through this definition. This definition can be changed
- to meet the scope changes required in the user build
- environment.
-
- For example, this can be changed to __declspec(exportdll)
- to enable exporting the API from a DLL.
- */
-#define EXTERN extern
-
-#endif
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasusb.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasusb.h
deleted file mode 100644
index e3ba9ca4c75..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasusb.h
+++ /dev/null
@@ -1,1862 +0,0 @@
-/* Cypress West Bridge API header file (cyasusb.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-#ifndef _INCLUDED_CYASUSB_H_
-#define _INCLUDED_CYASUSB_H_
-
-#include "cyasmisc.h"
-
-#include "cyas_cplus_start.h"
-
-/*@@Enumeration Model
- Summary
- The USB enumeration process is the process of communicating
- to the USB host information
- about the capabilities of the connected device. This
- process is completed by servicing
- requests for various types of descriptors. In the software
- APIs described below, this
- process is controlled in one of two ways.
-
- Description
- There are advantages to either type of enumeration
- and this is why both models are supported.
- P Port processor based enumeraton gives the P port
- processor maximum control and flexibility
- for providing USB configuration information. However,
- this does require (near) real time data
- responses from the P port processor during the enumeration
- process. West Bridge based enumeration
- requires no real time information from the P port processor,
- ensuring the fastest possible
- enumeration times.
-
- * P Port Based Enumeration *
- The first method for handling USB enumeration is for the
- processor client to handle all
- endpoint zero requests for descriptors. This mode is
- configured by indicating to the API
- that the processor wants to handle all endpoint zero
- requests. This is done by setting
- bit 0 in the end_point_mask to a 1. The processor uses
- CyAsUsbReadDataAsync() to read the request and
- CyAsUsbWriteDataAsync() to write the response.
-
- * West Bridge Based Enumeration *
- The second method for handling USB enumeration is the
- configuration information method.
- Before enabling a connection from the West Bridge device
- to the USB connector, the P Port
- processor sends information about the USB configuration to
- West Bridge through the configuration
- APIs. This information is stored within the West Bridge
- device. When a USB cable is attached,
- the West Bridge device then handles all descriptor requests
- based on the stored information.
- Note that this method of enumeration only supports a single
- USB configuration.
-
- In either model of enumeration, the processor client is
- responsible for ensuring that
- the system meets USB Chapter 9 compliance requirements. This
- can be done by providing spec
- compliant descriptors, and handling any setup packets that
- are sent to the client
- appropriately.
-
- Mass storage class compliance will be ensured by the West
- Bridge firmware when the mass
- storage functionality is enabled.
-*/
-
-/*@@Endpoint Configuration
- Summary
- The West Bridge device has one 64-byte control endpoint, one
- 64-byte low bandwidth endpoint, four bulk
- endpoints dedicated for mass storage usage, and up to ten
- bulk/interrupt/isochronous
- endpoints that can be used for USB-to-Processor communication.
-
- Description
- The four storage endpoints (Endpoints 2, 4, 6 and 8) are
- reserved for accessing storage
- devices attached to West Bridge and are not available for use
- by the processor. These are
- used implicitly when using the storage API to read/write to
- the storage media.
-
- Endpoint 0 is the standard USB control pipe used for all
- enumeration activity. Though
- the endpoint buffer is not directly accessible from the
- processor, read/write activity
- can be performed on this endpoint through the API layers.
- This endpoint is always
- configured as a bi-directional control endpoint.
-
- Endpoint 1 is a 64-byte endpoint that can be used for low
- bandwidth bulk/interrupt
- activity. The physical buffer is not accessible from the
- processor, but can be read/written
- through the API. As the data coming to this endpoint is
- being handled through the
- software layers, there can be loss of data if a read call
- is not waiting when an OUT
- packet arrives.
-
- Endpoints 3, 5, 7, 9, 10, 11, 12, 13, 14 and 15 are ten
- configurable endpoints
- mapped to parts of a total 4 KB FIFO buffer space on the
- West Bridge device. This 4 KB
- physical buffer space is divided into up to four endpoints
- called PEP1, PEP2, PEP3 and PEP4
- in this software document. There are multiple configurations
- in which this buffer space
- can be used, and the size and number of buffers available to
- each physical endpoint
- vary between these configurations. See the West Bridge PDD
- for details on the buffer
- orientation corresponding to each configuration.
-
- * Note *
- PEPs 1, 2, 3 and 4 are called Physical EP 3, 5, 7 and 9 in the
- West Bridge PDD. The
- sequential number scheme is used in the software to disambiguate
- these from the logical
- endpoint numbers, and also for convenience of array indexing.
-*/
-
-#if !defined(__doxygen__)
-
-
-#endif
-
-/* Summary
- This constants defines the maximum size of a USB descriptor
- when referenced via the CyAsUsbSetDescriptor or
- CyAsUsbGetDescriptor functions.
-
- See Also
- * CyAsUsbSetDescriptor
- * CyAsUsbGetDescriptor
-*/
-#define CY_AS_MAX_USB_DESCRIPTOR_SIZE (128)
-
-/***************************************
- * West Bridge Types
- ***************************************/
-
-
-/* Summary
- This data structure is the data passed via the evdata paramater
- on a usb event callback for the inquiry request.
-
- Description
- When a SCSI inquiry request arrives via the USB connection and
- the P Port has asked
- to receive inquiry requests, this request is forwarded to the
- client via the USB
- callback. This callback is called twice, once before the
- inquiry data is forwarded
- to the host (CyAsEventUsbInquiryBefore) and once after the
- inquiry has been sent to the
- USB host (CyAsEventUsbInquiryAfter). The evdata parameter
- is a pointer to this data
- structure.
-
- *CyAsEventUsbInquiryBefore*
- If the client just wishes to see the inquiry request and
- associated data, then a simple
- return from the callback will forward the inquiry response
- to the USB host. If the
- client wishes to change the data returned to the USB host,
- the updated parameter must
- be set to CyTrue and the memory area address by the data
- parameter should be updated.
- The data pointer can be changed to point to a new memory
- area and the length field
- changed to change the amount of data returned from the
- inquiry request. Note that the
- data area pointed to by the data parameter must remain
- valid and the contents must
- remain consistent until after the CyAsEventUsbInquiryAfter
- event has occurred. THE LENGTH
- MUST BE LESS THAN 192 BYTES OR THE CUSTOM INQUIRY RESPONSE
- WILL NOT BE RETURNED. If the
- length is too long, the default inquiry response will be
- returned.
-
- *CyAsEventUsbInquiryAfter*
- If the client needs to free any data, this event signals that
- the data associated with the inquiry is no longer needed.
-
- See Also
- * CyAsUsbEventCallback
- * CyAsUsbRegisterCallback
-*/
-typedef struct cy_as_usb_inquiry_data {
- /* The bus for the event */
- cy_as_bus_number_t bus;
- /* The device the event */
- uint32_t device;
- /* The EVPD bit from the SCSI INQUIRY request */
- uint8_t evpd;
- /* The codepage in the inquiry request */
- uint8_t codepage;
- /* This bool must be set to CyTrue indicate that the inquiry
- data was changed */
- cy_bool updated;
- /* The length of the data */
- uint16_t length;
- /* The inquiry data */
- void *data;
-} cy_as_usb_inquiry_data;
-
-
-/* Summary
- This data structure is the data passed via the evdata
- parameter on a usb event
- callback for the unknown mass storage request.
-
- Description
- When a SCSI request is made that the mass storage
- firmware in West Bridge does not
- know how to process, this request is passed to the
- processor for handling via
- the usb callback. This data structure is used to
- pass the request and the
- associated response. The user may set the status
- to indicate the status of the
- request. The status value is the bCSWStatus value
- from the USB mass storage
- Command Status Wrapper (0 = command passed, 1 =
- command failed). If the status
- is set to command failed (1), the sense information
- should be set as well. For
- more information about sense information, see the
- USB mass storage specification
- as well as the SCSI specifications for block devices.
- By default the status is
- initialized to 1 (failure) with a sense information
- of 05h/20h/00h which
- indicates INVALID COMMAND.
-*/
-typedef struct cy_as_usb_unknown_command_data {
- /* The bus for the event */
- cy_as_bus_number_t bus;
- /* The device for the event */
- uint32_t device;
-
- uint16_t reqlen;
- /* The request */
- void *request;
-
- /* The returned status value for the command */
- uint8_t status;
- /* If status is failed, the sense key */
- uint8_t key;
- /* If status is failed, the additional sense code */
- uint8_t asc;
- /* If status if failed, the additional sense code qualifier */
- uint8_t ascq;
-} cy_as_usb_unknown_command_data;
-
-
-/* Summary
- This data structure is the data passed via the evdata
- paramater on a usb event callback for the start/stop request.
-
- Description
- When a SCSI start stop request arrives via the USB connection
- and the P Port has asked
-
- See Also
- * CyAsUsbEventCallback
- * CyAsUsbRegisterCallback
-*/
-typedef struct cy_as_usb_start_stop_data {
- /* The bus for the event */
- cy_as_bus_number_t bus;
- /* The device for the event */
- uint32_t device;
- /* CyTrue means start request, CyFalse means stop request */
- cy_bool start;
- /* CyTrue means LoEj bit set, otherwise false */
- cy_bool loej;
-} cy_as_usb_start_stop_data;
-
-/* Summary
- This data type is used to indicate which mass storage devices
- are enumerated.
-
- Description
-
- See Also
- * CyAsUsbEnumControl
- * CyAsUsbSetEnumConfig
-*/
-typedef enum cy_as_usb_mass_storage_enum {
- cy_as_usb_nand_enum = 0x01,
- cy_as_usb_sd_enum = 0x02,
- cy_as_usb_mmc_enum = 0x04,
- cy_as_usb_ce_ata_enum = 0x08
-} cy_as_usb_mass_storage_enum;
-
-/* Summary
- This data type specifies the type of descriptor to transfer
- to the West Bridge device
-
- Description
- During enumeration, if West Bridge is handling enumeration,
- the West Bridge device needs to USB descriptors
- to complete the enumeration. The function CyAsUsbSetDescriptor()
- is used to transfer the descriptors
- to the West Bridge device. This type is an argument to that
- function and specifies which descriptor
- is being transferred.
-
- See Also
- * CyAsUsbSetDescriptor
- * CyAsUsbGetDescriptor
-*/
-typedef enum cy_as_usb_desc_type {
- /* A device descriptor - See USB 2.0 specification Chapter 9 */
- cy_as_usb_desc_device = 1,
- /* A device descriptor qualifier -
- * See USB 2.0 specification Chapter 9 */
- cy_as_usb_desc_device_qual = 2,
- /* A configuration descriptor for FS operation -
- * See USB 2.0 specification Chapter 9 */
- cy_as_usb_desc_f_s_configuration = 3,
- /* A configuration descriptor for HS operation -
- * See USB 2.0 specification Chapter 9 */
- cy_as_usb_desc_h_s_configuration = 4,
- cy_as_usb_desc_string = 5
-} cy_as_usb_desc_type;
-
-/* Summary
- This type specifies the direction of an endpoint
-
- Description
- This type is used when configuring the endpoint hardware
- to specify the direction
- of the endpoint.
-
- See Also
- * CyAsUsbEndPointConfig
- * CyAsUsbSetEndPointConfig
- * CyAsUsbGetEndPointConfig
-*/
-typedef enum cy_as_usb_end_point_dir {
- /* The endpoint direction is IN (West Bridge -> USB Host) */
- cy_as_usb_in = 0,
- /* The endpoint direction is OUT (USB Host -> West Bridge) */
- cy_as_usb_out = 1,
- /* The endpoint direction is IN/OUT (valid only for EP 0 & 1) */
- cy_as_usb_in_out = 2
-} cy_as_usb_end_point_dir;
-
-/* Summary
- This type specifies the type of an endpoint
-
- Description
- This type is used when configuring the endpoint hardware
- to specify the type of endpoint.
-
- See Also
- * CyAsUsbEndPointConfig
- * CyAsUsbSetEndPointConfig
- * CyAsUsbGetEndPointConfig
-*/
-typedef enum cy_as_usb_end_point_type {
- cy_as_usb_control,
- cy_as_usb_iso,
- cy_as_usb_bulk,
- cy_as_usb_int
-} cy_as_usb_end_point_type;
-
-/* Summary
- This type is a structure used to indicate the top level
- configuration of the USB stack
-
- Description
- In order to configure the USB stack, the CyAsUsbSetEnumConfig()
- function is called to indicate
- how mass storage is to be handled, the specific number of
- interfaces to be supported if
- West Bridge is handling enumeration, and the end points of
- specifi interest. This structure
- contains this information.
-
- See Also
- * CyAsUsbSetConfig
- * CyAsUsbGetConfig
- * <LINK Enumeration Model>
-*/
-typedef struct cy_as_usb_enum_control {
- /* Designate which devices on which buses to enumerate */
- cy_bool devices_to_enumerate[CY_AS_MAX_BUSES]
- [CY_AS_MAX_STORAGE_DEVICES];
- /* If true, West Bridge will control enumeration. If this
- * is false the P port controls enumeration. if the P port
- * is controlling enumeration, traffic will be received via
- * endpoint zero. */
- cy_bool antioch_enumeration;
- /* This is the interface # to use for the mass storage
- * interface, if mass storage is enumerated. if mass
- * storage is not enumerated this value should be zero. */
- uint8_t mass_storage_interface;
- /* This is the interface # to use for the MTP interface,
- * if MTP is enumerated. if MTP is not enumerated
- * this value should be zero. */
- uint8_t mtp_interface;
- /* If true, Inquiry, START/STOP, and unknown mass storage
- * requests cause a callback to occur for handling by the
- * baseband processor. */
- cy_bool mass_storage_callbacks;
-} cy_as_usb_enum_control;
-
-
-/* Summary
- This structure is used to configure a single endpoint
-
- Description
- This data structure contains all of the information required
- to configure the West Bridge hardware
- associated with a given endpoint.
-
- See Also
- * CyAsUsbSetEndPointConfig
- * CyAsUsbGetEndPointConfig
-*/
-typedef struct cy_as_usb_end_point_config {
- /* If true, this endpoint is enabled */
- cy_bool enabled;
- /* The direction of this endpoint */
- cy_as_usb_end_point_dir dir;
- /* The type of endpoint */
- cy_as_usb_end_point_type type;
- /* The physical endpoint #, 1, 2, 3, 4 */
- cy_as_end_point_number_t physical;
- /* The size of the endpoint in bytes */
- uint16_t size;
-} cy_as_usb_end_point_config;
-
-/* Summary
- List of partition enumeration combinations that can
- be selected on a partitioned storage device.
-
- Description
- West Bridge firmware supports creating up to two
- partitions on mass storage devices connected to
- West Bridge. When there are two partitions on a device,
- the user can choose which of these partitions should be
- made visible to a USB host through the mass storage
- interface. This enumeration lists the various enumeration
- selections that can be made.
-
- See Also
- * CyAsStorageCreatePPartition
- * CyAsStorageRemovePPartition
- * CyAsUsbSelectMSPartitions
- */
-typedef enum cy_as_usb_m_s_type_t {
- /* Enumerate only partition 0 as CD (autorun) device */
- cy_as_usb_m_s_unit0 = 0,
- /* Enumerate only partition 1 as MS device (default setting) */
- cy_as_usb_m_s_unit1,
- /* Enumerate both units */
- cy_as_usb_m_s_both
-} cy_as_usb_m_s_type_t;
-
-/* Summary
- This type specifies the type of USB event that has occurred
-
- Description
- This type is used in the USB event callback function to
- indicate the type of USB event that has occurred. The callback
- function includes both this reasons for the callback and a data
- parameter associated with the reason. The data parameter is used
- in a reason specific way and is documented below with each reason.
-
- See Also
- * CyAsUsbIoCallback
-*/
-typedef enum cy_as_usb_event {
- /* This event is sent when West Bridge is put into the suspend
- state by the USB host. the data parameter is not used and
- will be zero. */
- cy_as_event_usb_suspend,
- /* This event is sent when West Bridge is taken out of the
- suspend state by the USB host. the data parameter is not
- used and will be zero. */
- cy_as_event_usb_resume,
- /* This event is sent when a USB reset request is received
- by the west bridge device. the data parameter is not used and
- will be zero. */
- cy_as_event_usb_reset,
- /* This event is sent when a USB set configuration request is made.
- the data parameter is a pointer to a uint16_t that contains the
- configuration number. the configuration number may be zero to
- indicate an unconfigure operation. */
- cy_as_event_usb_set_config,
- /* This event is sent when the USB connection changes speed. This is
- generally a transition from full speed to high speed. the parameter
- to this event is a pointer to uint16_t that gives the speed of the
- USB connection. zero indicates full speed, one indicates high speed */
- cy_as_event_usb_speed_change,
- /* This event is sent when a setup packet is received.
- * The data parameter is a pointer to the eight bytes of setup data. */
- cy_as_event_usb_setup_packet,
- /* This event is sent when a status packet is received. The data
- parameter is not used. */
- cy_as_event_usb_status_packet,
- /* This event is sent when mass storage receives an inquiry
- request and we have asked to see these requests. */
- cy_as_event_usb_inquiry_before,
- /* This event is sent when mass storage has finished processing an
- inquiry request and any data associated with the request is no longer
- required. */
- cy_as_event_usb_inquiry_after,
- /* This event is sent when mass storage receives a start/stop
- * request and we have asked to see these requests */
- cy_as_event_usb_start_stop,
- /* This event is sent when a Clear Feature request is received.
- * The data parameter is the endpoint number. */
- cy_as_event_usb_clear_feature,
- /* This event is sent when mass storage receives a request
- * that is not known and we have asked to see these requests */
- cy_as_event_usb_unknown_storage,
- /* This event is sent when the read/write activity on the USB mass
- storage has crossed a pre-set level */
- cy_as_event_usb_m_s_c_progress
-} cy_as_usb_event;
-
-/* Summary
- This type is the type of a callback function that is
- called when a USB event occurs
-
- Description
- At times West Bridge needs to inform the P port processor
- of events that have
- occurred. These events are asynchronous to the thread of
- control on the P
- port processor and as such are generally delivered via a
- callback function that
- is called as part of an interrupt handler. This type
- defines the type of function
- that must be provided as a callback function for USB events.
-
- See Also
- * CyAsUsbEvent
-*/
-typedef void (*cy_as_usb_event_callback)(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* The event type being reported */
- cy_as_usb_event ev,
- /* The data assocaited with the event being reported */
- void *evdata
-);
-
-
-/* Summary
- This type is the callback function called after an
- asynchronous USB read/write operation
-
- Description
- This function type defines a callback function that is
- called at the completion of any
- asynchronous read or write operation.
-
- See Also
- * CyAsUsbReadDataAsync
- * CyAsUsbWriteDataAsync
- * CY_AS_ERROR_CANCELED
-*/
-typedef void (*cy_as_usb_io_callback)(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* The endpoint that has completed an operation */
- cy_as_end_point_number_t ep,
- /* THe amount of data transferred to/from USB */
- uint32_t count,
- /* The data buffer for the operation */
- void *buffer,
- /* The error status of the operation */
- cy_as_return_status_t status
-);
-
-/* Summary
- This type is the callback function called after asynchronous
- API functions have completed.
-
- Description
- When calling API functions from callback routines (interrupt
- handlers usually) the async version of
- these functions must be used. This callback is called when an
- asynchronous API function has completed.
-*/
-typedef void (*cy_as_usb_function_callback)(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* The error status of the operation */
- cy_as_return_status_t status,
- /* A client supplied 32 bit tag */
- uint32_t client
-);
-
-
-/********************************************
- * West Bridge Functions
- ********************************************/
-
-/* Summary
- This function starts the USB stack
-
- Description
- This function initializes the West Bridge USB software
- stack if it has not yet been stared.
- This initializes any required data structures and powers
- up any USB specific portions of
- the West Bridge hardware. If the stack had already been
- started, the USB stack reference count
- is incremented.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Notes
- This function cannot be called from any type of West Bridge
- callback.
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_SUCCESS - the stack initialized and is ready
- * for use
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating
- * with the West Bridge device
-
- See Also
- * CyAsUsbStop
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_start(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function stops the USB stack
-
- Description
- This function decrements the reference count for
- the USB stack and if this count
- is zero, the USB stack is shut down. The shutdown
- frees all resources associated
- with the USB stack.
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- While all resources associated with the USB stack will
- be freed is a shutdown occurs,
- resources associated with underlying layers of the software
- will not be freed if they
- are shared by the storage stack and the storage stack is active.
- Specifically the DMA manager,
- the interrupt manager, and the West Bridge communications module
- are all shared by both the
- USB stack and the storage stack.
-
- Returns
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
-
- See Also
- * CyAsUsbStart
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_stop(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function registers a callback function to be called when an
- asynchronous USB event occurs
-
- Description
- When asynchronous USB events occur, a callback function can be
- called to alert the calling program. This
- functions allows the calling program to register a callback.
-
- * Valid In Asynchronous Callback: YES
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_register_callback(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The function to call */
- cy_as_usb_event_callback callback
- );
-
-
-/* Summary
- This function connects the West Bridge device D+ and D- signals
- physically to the USB host.
-
- Description
- The West Bridge device has the ability to programmatically
- disconnect the USB pins on the device
- from the USB host. This feature allows for re-enumeration of
- the West Bridge device as a different
- device when necessary. This function connects the D+ and D-
- signal physically to the USB host
- if they have been previously disconnected.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
-
- See Also
- * CyAsUsbDisconnect
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_connect(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function disconnects the West Bridge device D+ and D-
- signals physically from the USB host.
-
- Description
- The West Bridge device has the ability to programmatically
- disconnect the USB pins on the device
- from the USB host. This feature allows for re-enumeration
- of the West Bridge device as a different
- device when necessary. This function disconnects the D+
- and D- signal physically from the USB host
- if they have been previously connected.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
-
- See Also
- * CyAsUsbConnect
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_disconnect(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function configures the USB stack
-
- Description
- This function is used to configure the USB stack. It is
- used to indicate which endpoints are going to
- be used, and how to deal with the mass storage USB device
- within West Bridge.
-
- * Valid In Asynchronous Callback: Yes (if cb supplied)
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
-
- See Also
- * CyAsUsbGetEnumConfig
- * CyAsUsbEnumControl
- */
-EXTERN cy_as_return_status_t
-cy_as_usb_set_enum_config(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The USB configuration information */
- cy_as_usb_enum_control *config_p,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function retreives the current configuration of
- the USB stack
-
- Description
- This function sends a request to West Bridge to retrieve
- the current configuration
-
- * Valid In Asynchronous Callback: Yes (if cb supplied)
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
-
- See Also
- * CyAsUsbSetConfig
- * CyAsUsbConfig
- */
-EXTERN cy_as_return_status_t
-cy_as_usb_get_enum_config(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The return value for USB congifuration information */
- cy_as_usb_enum_control *config_p,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function sets the USB descriptor
-
- Description
- This function is used to set the various descriptors
- assocaited with the USB enumeration
- process. This function should only be called when the
- West Bridge enumeration model is selected.
- Descriptors set using this function can be cleared by
- stopping the USB stack, or by calling
- the CyAsUsbClearDescriptors function.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Notes
- These descriptors are described in the USB 2.0 specification,
- Chapter 9.
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_DESCRIPTOR - the descriptor passed is
- * not valid
- * CY_AS_ERROR_BAD_INDEX - a bad index was given for the type
- * of descriptor given
- * CY_AS_ERROR_BAD_ENUMERATION_MODE - this function cannot be
- * called if the P port processor doing enumeration
-
- See Also
- * CyAsUsbGetDescriptor
- * CyAsUsbClearDescriptors
- * <LINK Enumeration Model>
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_set_descriptor(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The type of descriptor */
- cy_as_usb_desc_type type,
- /* Only valid for string descriptors */
- uint8_t index,
- /* The descriptor to be transferred */
- void *desc_p,
- /* The length of the descriptor in bytes */
- uint16_t length,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function clears all user descriptors stored
- on the West Bridge.
-
- Description
- This function is used to clear all descriptors that
- were previously
- stored on the West Bridge through CyAsUsbSetDescriptor
- calls, and go back
- to the default descriptor setup in the firmware. This
- function should
- only be called when the Antioch enumeration model is
- selected.
-
- * Valid In Asynchronous Callback: Yes (if cb supplied)
- * Nestable: Yes
-
- Returns
- * CY_AS_ERROR_SUCCESS - all descriptors cleared successfully
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_BAD_ENUMERATION_MODE - this function cannot be
- * called if the P port processor is doing enumeration
-
- See Also
- * CyAsUsbSetDescriptor
- * <LINK Enumeration Model>
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_clear_descriptors(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-/* Summary
- This structure contains the descriptor buffer to be
- filled by CyAsUsbGetDescriptor API.
-
- Description
- This data structure the buffer to hold the descriptor
- data, and an in/out parameter ti indicate the
- length of the buffer and descriptor data in bytes.
-
- See Also
- * CyAsUsbGetDescriptor
-*/
-typedef struct cy_as_get_descriptor_data {
- /* The buffer to hold the returned descriptor */
- void *desc_p;
- /* This is an input and output parameter.
- * Before the code this pointer points to a uint32_t
- * that contains the length of the buffer. after
- * the call, this value contains the amount of data
- * actually returned. */
- uint32_t length;
-
-} cy_as_get_descriptor_data;
-
-/* Summary
- This function retreives a given descriptor from the
- West Bridge device
-
- Description
- This function retreives a USB descriptor from the West
- Bridge device. This function should only be called when the
- West Bridge enumeration model is selected.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Notes
- These descriptors are described in the USB 2.0 specification,
- Chapter 9.
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_BAD_INDEX - a bad index was given for the type of
- * descriptor given
- * CY_AS_ERROR_BAD_ENUMERATION_MODE - this function cannot be
- * called if the P port processor doing enumeration
-
- See Also
- * CyAsUsbSetDescriptor
- * <LINK Enumeration Model>
-*/
-
-EXTERN cy_as_return_status_t
-cy_as_usb_get_descriptor(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The type of descriptor */
- cy_as_usb_desc_type type,
- /* Index for string descriptor */
- uint8_t index,
- /* Parameters and return value for the get descriptor call */
- cy_as_get_descriptor_data *data,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function sets the configuration of the physical
- endpoints into one of the twelve supported configuration
-
- Description
- USB endpoints are mapped onto one of four physical
- endpoints in the device. Therefore
- USB endpoints are known as logical endpoints and these
- logical endpoints are mapped to
- one of four physical endpoints. In support of these
- four physical endpoints there is
- four kilo-bytes of buffer spaces that can be used as
- buffers for these physical endpoints.
- This 4K of buffer space can be configured in one of
- twelve ways. This function sets the
- buffer configuration for the physical endpoints.
-
- * Config 1: PEP1 (2 * 512), PEP2 (2 * 512),
- * PEP3 (2 * 512), PEP4 (2 * 512)
- * Config 2: PEP1 (2 * 512), PEP2 (2 * 512),
- * PEP3 (4 * 512), PEP4 (N/A)
- * Config 3: PEP1 (2 * 512), PEP2 (2 * 512),
- * PEP3 (2 * 1024), PEP4(N/A)
- * Config 4: PEP1 (4 * 512), PEP2 (N/A),
- * PEP3 (2 * 512), PEP4 (2 * 512)
- * Config 5: PEP1 (4 * 512), PEP2 (N/A),
- * PEP3 (4 * 512), PEP4 (N/A)
- * Config 6: PEP1 (4 * 512), PEP2 (N/A),
- * PEP3 (2 * 1024), PEP4 (N/A)
- * Config 7: PEP1 (2 * 1024), PEP2 (N/A),
- * PEP3 (2 * 512), PEP4 (2 * 512)
- * Config 8: PEP1 (2 * 1024), PEP2 (N/A),
- * PEP3 (4 * 512), PEP4 (N/A)
- * Config 9: PEP1 (2 * 1024), PEP2 (N/A),
- * PEP3 (2 * 1024), PEP4 (N/A)
- * Config 10: PEP1 (3 * 512), PEP2 (N/A),
- * PEP3 (3 * 512), PEP4 (2 * 512)
- * Config 11: PEP1 (3 * 1024), PEP2 (N/A),
- * PEP3 (N/A), PEP4 (2 * 512)
- * Config 12: PEP1 (4 * 1024), PEP2 (N/A),
- * PEP3 (N/A), PEP4 (N/A)
-
- * Valid In Asynchronous Callback: NO
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_CONFIGURATION - the configuration given
- * is not between 1 and 12
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_set_physical_configuration(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The physical endpoint configuration number */
- uint8_t config
- );
-
-/* Summary
- This function sets the hardware configuration for a given endpoint
-
- Description
- This function sets the hardware configuration for a given endpoint.
- This is the method to set the direction of the endpoint, the type
- of endpoint, the size of the endpoint buffer, and the buffering
- style for the endpoint.
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- Add documentation about endpoint configuration limitations
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint parameter is invalid
- * CY_AS_ERROR_INVALID_CONFIGURATION - the endpoint configuration
- * given is not valid
- * CY_AS_ERROR_ENDPOINT_CONFIG_NOT_SET - the physical endpoint
- * configuration is not set
-
- See Also
- * CyAsUsbGetEndPointConfig
- * CyAsUsbEndPointConfig
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_set_end_point_config(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* The configuration information for the endpoint */
- cy_as_usb_end_point_config *config_p
- );
-
-/* Summary
- This function retreives the hardware configuration for
- a given endpoint
-
- Description
- This function gets the hardware configuration for the given
- endpoint. This include information about the direction of
- the endpoint, the type of endpoint, the size of the endpoint
- buffer, and the buffering style for the endpoint.
-
- * Valid In Asynchronous Callback: NO
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint parameter is
- * invalid
-
- See Also
- * CyAsUsbSetEndPointConfig
- * CyAsUsbEndPointConfig
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_get_end_point_config(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest*/
- cy_as_end_point_number_t ep,
- /* The return value containing the endpoint config
- * information */
- cy_as_usb_end_point_config *config_p
- );
-
-/* Summary
- This function commits the configuration information that
- has previously been set.
-
- Description
- The initialization process involves calling CyAsUsbSetEnumConfig()
- and CyAsUsbSetEndPointConfig(). These
- functions do not actually send the configuration information to
- the West Bridge device. Instead, these
- functions store away the configuration information and this
- CyAsUsbCommitConfig() actually finds the
- best hardware configuration based on the requested endpoint
- configuration and sends this optimal
- confiuration down to the West Bridge device.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - a configuration was found and sent
- * to West Bridge
- * CY_AS_ERROR_NOT_CONFIGURED - the West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - the firmware has not been loaded
- * into West Bridge
- * CY_AS_ERROR_INVALID_CONFIGURATION - the configuration requested
- * is not possible
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
-
- See Also
- * CyAsUsbSetEndPointConfig
- * CyAsUsbSetEnumConfig
-*/
-
-EXTERN cy_as_return_status_t
-cy_as_usb_commit_config(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function reads data from a USB endpoint.
-
- Description
- This function reads data from an OUT. This function blocks
- until the read is complete.
- If this is a packet read, a single received USB packet will
- complete the read. If this
- is not a packet read, this function will block until all of
- the data requested has been
- recevied.
-
- * Valid In Asynchronous Callback: NO
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint parameter is
- * invalid
-
- See Also
- * CyAsUsbReadDataAsync
- * CyAsUsbWriteData
- * CyAsUsbWriteDataAsync
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_read_data(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* If CyTrue, this is a packet read */
- cy_bool pktread,
- /* The amount of data to read */
- uint32_t dsize,
- /* The amount of data read */
- uint32_t *dataread,
- /* The buffer to hold the data read */
- void *data
- );
-
-/* Summary
- This function reads data from a USB endpoint
-
- Description
- This function reads data from an OUT endpoint. This
- function will return immediately and the callback
- provided will be called when the read is complete.
- If this is a packet read, then the callback will be
- called on the next received packet. If this is not a
- packet read, the callback will be called when the
- requested data is received.
-
- * Valid In Asynchronous Callback: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint parameter is
- * invalid
-
- See Also
- * CyAsUsbReadData
- * CyAsUsbWriteData
- * CyAsUsbWriteDataAsync
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_read_data_async(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* If CyTrue, this is a packet read */
- cy_bool pktread,
- /* The amount of data to read */
- uint32_t dsize,
- /* The buffer for storing the data */
- void *data,
- /* The callback function to call when the data is read */
- cy_as_usb_io_callback callback
- );
-
-/* Summary
- This function writes data to a USB endpoint
-
- Description
- This function writes data to an IN endpoint data buffer.
- Multiple USB packets may be sent until all data requeste
- has been sent. This function blocks until all of the data
- has been sent.
-
- * Valid In Asynchronous Callback: NO
-
- Notes
- Calling this function with a dsize of zero will result in
- a zero length packet transmitted to the USB host.
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint parameter is
- * invalid
-
- See Also
- * CyAsUsbReadData
- * CyAsUsbReadDataAsync
- * CyAsUsbWriteDataAsync
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_write_data(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint to write data to */
- cy_as_end_point_number_t ep,
- /* The size of the data to write */
- uint32_t dsize,
- /* The data buffer */
- void *data
- );
-
-/* Summary
- This function writes data to a USB endpoint
-
- Description
- This function writes data to an IN endpoint data buffer.
- This function returns immediately and when the write
- completes, or if an error occurs, the callback function
- is called to indicate completion of the write operation.
-
- * Valid In Asynchronous Callback: YES
-
- Notes
- Calling this function with a dsize of zero will result
- in a zero length packet transmitted to the USB host.
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down successfully
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint parameter is
- * invalid
-
- See Also
- * CyAsUsbReadData
- * CyAsUsbReadDataAsync
- * CyAsUsbWriteData
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_write_data_async(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint to write data to */
- cy_as_end_point_number_t ep,
- /* The size of the data */
- uint32_t dsize,
- /* The buffer containing the data */
- void *data,
- /* If true, send a short packet to terminate data */
- cy_bool spacket,
- /* The callback to call when the data is written */
- cy_as_usb_io_callback callback
- );
-
-/* Summary
- This function aborts an outstanding asynchronous
- operation on a given endpoint
-
- Description
- This function aborts any outstanding operation that is
- pending on the given endpoint.
-
- * Valid In Asynchronous Callback: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - this module was shut down
- * successfully
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not
- * running
- * CY_AS_ERROR_ASYNC_NOT_PENDING - no asynchronous USB
- * operation was pending
-
- See Also
- * CyAsUsbReadData
- * CyAsUsbReadDataAsync
- * CyAsUsbWriteData
- * CyAsUsbWriteDataAsync
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_cancel_async(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep
- );
-
-/* Summary
- This function sets a stall condition on a given endpoint
-
- Description
- This function sets a stall condition on the given endpoint.
- If the callback function is not zero, the function is
- executed asynchronously and the callback is called when
- the function is completed. If the callback function is
- zero, this function executes synchronously and will not
- return until the function has completed.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function succeeded
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint given was invalid,
- * or was not configured as an OUT endpoint
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_INVALID_IN_CALLBACK (only if no cb supplied)
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsUsbGetStall
- * CyAsUsbClearStall
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_set_stall(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
-);
-
-/* Summary
- This function clears a stall condition on a given endpoint
-
- Description
- This function clears a stall condition on the given endpoint.
- If the callback function is not zero, the function is
- executed asynchronously and the callback is called when the
- function is completed. If the callback function is zero, this
- function executes synchronously and will not return until the
- function has completed.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function succeeded
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint given was invalid,
- * or was not configured as an OUT endpoint
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_INVALID_IN_CALLBACK (only if no cb supplied)
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsUsbGetStall
- * CyAsUsbSetStall
-*/
-
-EXTERN cy_as_return_status_t
-cy_as_usb_clear_stall(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-
-/* Summary
- This function returns the stall status for a given endpoint
-
- Description
- This function returns the stall status for a given endpoint
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function succeeded
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint given was invalid,
- * or was not configured as an OUT endpoint
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_INVALID_IN_CALLBACK
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsUsbGetStall
- * CyAsUsbSetStall
- * CyAsUsbClearStall
-*/
-
-EXTERN cy_as_return_status_t
-cy_as_usb_get_stall(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* The return value for the stall state */
- cy_bool *stall_p,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function sets a NAK condition on a given endpoint
-
- Description
- This function sets a NAK condition on the given endpoint.
- If the callback function is not zero, the function is
- executed asynchronously and the callback is called when
- the function is completed. If the callback function is
- zero, this function executes synchronously and will not
- return until the function has completed.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function succeeded
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint given was
- * invalid, or was not configured as an OUT endpoint
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_INVALID_IN_CALLBACK (only if no cb supplied)
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsUsbGetNak
- * CyAsUsbClearNak
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_set_nak(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
-);
-
-/* Summary
- This function clears a NAK condition on a given endpoint
-
- Description
- This function clears a NAK condition on the given endpoint.
- If the callback function is not zero, the function is
- executed asynchronously and the callback is called when the
- function is completed. If the callback function is zero,
- this function executes synchronously and will not return
- until the function has completed.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function succeeded
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint given was invalid,
- * or was not configured as an OUT endpoint
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_INVALID_IN_CALLBACK (only if no cb supplied)
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsUsbGetNak
- * CyAsUsbSetNak
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_clear_nak(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function returns the NAK status for a given endpoint
-
- Description
- This function returns the NAK status for a given endpoint
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function succeeded
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_INVALID_ENDPOINT - the endpoint given was invalid,
- * or was not configured as an OUT endpoint
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_INVALID_IN_CALLBACK
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
-
- See Also
- * CyAsUsbSetNak
- * CyAsUsbClearNak
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_get_nak(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* The return value for the stall state */
- cy_bool *nak_p,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
-);
-
-/* Summary
- This function triggers a USB remote wakeup from the Processor
- API
-
- Description
- When there is a Suspend condition on the USB bus, this function
- programmatically takes the USB bus out of thi suspend state.
-
- * Valid In Asynchronous Callback: YES (if cb supplied)
- * Nestable: YES
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function succeeded
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_INVALID_HANDLE
- * CY_AS_ERROR_INVALID_IN_CALLBACK
- * CY_AS_ERROR_OUT_OF_MEMORY
- * CY_AS_ERROR_INVALID_RESPONSE
- * CY_AS_ERROR_NOT_IN_SUSPEND
-
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_signal_remote_wakeup(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- This function sets the threshold levels for mass storage progress
- reports from the West Bridge.
-
- Description
- The West Bridge firmware can be configured to track the amount of
- read/write activity on the mass storage device, and send progress
- reports when the activity level has crossed a threshold level.
- This function sets the threshold levels for the progress reports.
- Set wr_sectors and rd_sectors to 0, if the progress reports are to
- be turned off.
-
- * Valid In Asynchronous Callback: Yes (if cb supplied)
- * Nestable: Yes
-
- Returns
- * CY_AS_ERROR_SUCCESS - the function succeeded
- * CY_AS_ERROR_NOT_RUNNING - the USB stack is not running
- * CY_AS_ERROR_TIMEOUT - a timeout occurred communicating with
- * the West Bridge device
- * CY_AS_ERROR_INVALID_HANDLE - Bad handle
- * CY_AS_ERROR_INVALID_IN_CALLBACK - Synchronous call made
- * while in callback
- * CY_AS_ERROR_OUT_OF_MEMORY - Failed allocating memory for
- * request processing
- * CY_AS_ERROR_NOT_SUPPORTED - Firmware version does not support
- * mass storage progress tracking
- * CY_AS_ERROR_INVALID_RESPONSE - Unexpected response from
- * Firmware
-
- See Also
- * CyAsUsbMSCProgressData
- * CyAsEventUsbMSCProgress
-*/
-EXTERN cy_as_return_status_t
-cy_as_usb_set_m_s_report_threshold(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Number of sectors written before report is sent */
- uint32_t wr_sectors,
- /* Number of sectors read before report is sent */
- uint32_t rd_sectors,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-/* Summary
- Specify which of the partitions on a partitioned mass storage
- device should be made visible to USB.
-
- Description
- West Bridge firmware supports the creation of up to two
- partitions on mass storage devices connected to the West Bridge
- device. When there are two partitions on a device, the user can
- choose which of these partitions should be made visible to the
- USB host through the USB mass storage interface. This function
- allows the user to configure the partitions that should be
- enumerated. At least one partition should be selected through
- this API. If neither partition needs to be enumerated, use
- CyAsUsbSetEnumConfig to control this.
-
- * Valid in Asynchronous callback: Yes (if cb supplied)
- * Nestable: Yes
-
- Returns
- * CY_AS_ERROR_SUCCESS - operation completed successfully
- * CY_AS_ERROR_INVALID_HANDLE - invalid handle to the West
- * Bridge device
- * CY_AS_ERROR_NOT_CONFIGURED - West Bridge device has not
- * been configured
- * CY_AS_ERROR_NO_FIRMWARE - no firmware running on West
- * Bridge device
- * CY_AS_ERROR_NOT_RUNNING - USB stack has not been started
- * CY_AS_ERROR_IN_SUSPEND - West Bridge device is in
- * suspend mode
- * CY_AS_ERROR_INVALID_CALL_SEQUENCE - this API has to be
- * called before CyAsUsbSetEnumConfig
- * CY_AS_ERROR_OUT_OF_MEMORY - failed to get memory to
- * process the request
- * CY_AS_ERROR_NO_SUCH_UNIT - Storage device addressed has
- * not been partitioned
- * CY_AS_ERROR_NOT_SUPPORTED - operation is not supported by
- * active device/firmware.
-
- See Also
- * CyAsStorageCreatePPartition
- * CyAsStorageRemovePPartition
- * CyAsUsbMsType_t
- */
-EXTERN cy_as_return_status_t
-cy_as_usb_select_m_s_partitions(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* Bus index of the device being addressed */
- cy_as_bus_number_t bus,
- /* Device id of the device being addressed */
- uint32_t device,
- /* Selection of partitions to be enumerated */
- cy_as_usb_m_s_type_t type,
- /* The callback, if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-extern cy_as_media_type
-cy_as_storage_get_media_from_address(uint16_t v);
-
-extern cy_as_bus_number_t
-cy_as_storage_get_bus_from_address(uint16_t v);
-
-extern uint32_t
-cy_as_storage_get_device_from_address(uint16_t v);
-
-/* For supporting deprecated functions */
-#include "cyasusb_dep.h"
-
-#include "cyas_cplus_end.h"
-
-#endif /* _INCLUDED_CYASUSB_H_ */
diff --git a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasusb_dep.h b/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasusb_dep.h
deleted file mode 100644
index 829eddee39b..00000000000
--- a/drivers/staging/westbridge/astoria/include/linux/westbridge/cyasusb_dep.h
+++ /dev/null
@@ -1,224 +0,0 @@
-/* Cypress West Bridge API header file (cyasusb_dep.h)
-## ===========================
-## Copyright (C) 2010 Cypress Semiconductor
-##
-## This program is free software; you can redistribute it and/or
-## modify it under the terms of the GNU General Public License
-## as published by the Free Software Foundation; either version 2
-## of the License, or (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-## GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; if not, write to the Free Software
-## Foundation, Inc., 51 Franklin Street
-## Fifth Floor, Boston, MA 02110-1301, USA.
-## ===========================
-*/
-
-/*
- * This header will contain Antioch specific declaration
- * of the APIs that are deprecated in Astoria SDK. This is
- * for maintaining backward compatibility.
- */
-
-#ifndef __INCLUDED_CYASUSB_DEP_H__
-#define __INCLUDED_CYASUSB_DEP_H__
-
-#ifndef __doxygen__
-
-/*
- This data structure is the data passed via the evdata
- paramater on a usb event callback for the inquiry request.
-*/
-
-typedef struct cy_as_usb_inquiry_data_dep {
- /* The media for the event */
- cy_as_media_type media;
- /* The EVPD bit from the SCSI INQUIRY request */
- uint8_t evpd;
- /* The codepage in the inquiry request */
- uint8_t codepage;
- /* This bool must be set to CyTrue indicate
- * that the inquiry data was changed */
- cy_bool updated;
- /* The length of the data */
- uint16_t length;
- /* The inquiry data */
- void *data;
-} cy_as_usb_inquiry_data_dep;
-
-
-typedef struct cy_as_usb_unknown_command_data_dep {
- /* The media for the event */
- cy_as_media_type media;
- /* The length of the requst (should be 16 bytes) */
- uint16_t reqlen;
- /* The request */
- void *request;
- /* The returned status value for the command */
- uint8_t status;
- /* If status is failed, the sense key */
- uint8_t key;
- /* If status is failed, the additional sense code */
- uint8_t asc;
- /* If status if failed, the additional sense code qualifier */
- uint8_t ascq;
-} cy_as_usb_unknown_command_data_dep;
-
-
-typedef struct cy_as_usb_start_stop_data_dep {
- /* The media type for the event */
- cy_as_media_type media;
- /* CyTrue means start request, CyFalse means stop request */
- cy_bool start;
- /* CyTrue means LoEj bit set, otherwise false */
- cy_bool loej;
-} cy_as_usb_start_stop_data_dep;
-
-
-typedef struct cy_as_usb_enum_control_dep {
- /* The bits in this member determine which mass storage devices
- are enumerated. see cy_as_usb_mass_storage_enum for more details. */
- uint8_t enum_mass_storage;
- /* If true, West Bridge will control enumeration. If this is false the
- pport controls enumeration. if the P port is controlling
- enumeration, traffic will be received via endpoint zero. */
- cy_bool antioch_enumeration;
- /* This is the interface # to use for the mass storage interface,
- if mass storage is enumerated. if mass storage is not enumerated
- this value should be zero. */
- uint8_t mass_storage_interface;
- /* If true, Inquiry, START/STOP, and unknown mass storage
- requests cause a callback to occur for handling by the
- baseband processor. */
- cy_bool mass_storage_callbacks;
-} cy_as_usb_enum_control_dep;
-
-
-typedef void (*cy_as_usb_event_callback_dep)(
- /* Handle to the device to configure */
- cy_as_device_handle handle,
- /* The event type being reported */
- cy_as_usb_event ev,
- /* The data assocaited with the event being reported */
- void *evdata
-);
-
-
-
-/* Register Callback api */
-EXTERN cy_as_return_status_t
-cy_as_usb_register_callback_dep(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The function to call */
- cy_as_usb_event_callback_dep callback
- );
-
-
-extern cy_as_return_status_t
-cy_as_usb_set_enum_config_dep(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The USB configuration information */
- cy_as_usb_enum_control_dep *config_p,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-
-extern cy_as_return_status_t
-cy_as_usb_get_enum_config_dep(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The return value for USB congifuration information */
- cy_as_usb_enum_control_dep *config_p,
- /* The callback if async call */
- cy_as_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-extern cy_as_return_status_t
-cy_as_usb_get_descriptor_dep(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The type of descriptor */
- cy_as_usb_desc_type type,
- /* Index for string descriptor */
- uint8_t index,
- /* The buffer to hold the returned descriptor */
- void *desc_p,
- /* This is an input and output parameter. Before the code this pointer
- points to a uint32_t that contains the length of the buffer. after
- the call, this value contains the amount of data actually returned. */
- uint32_t *length_p
- );
-
-extern cy_as_return_status_t
-cy_as_usb_set_stall_dep(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* The callback if async call */
- cy_as_usb_function_callback cb,
- /* Client supplied data */
- uint32_t client
-);
-
-EXTERN cy_as_return_status_t
-cy_as_usb_clear_stall_dep(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* The callback if async call */
- cy_as_usb_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-EXTERN cy_as_return_status_t
-cy_as_usb_set_nak_dep(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* The callback if async call */
- cy_as_usb_function_callback cb,
- /* Client supplied data */
- uint32_t client
-);
-
-EXTERN cy_as_return_status_t
-cy_as_usb_clear_nak_dep(
- /* Handle to the West Bridge device */
- cy_as_device_handle handle,
- /* The endpoint of interest */
- cy_as_end_point_number_t ep,
- /* The callback if async call */
- cy_as_usb_function_callback cb,
- /* Client supplied data */
- uint32_t client
- );
-
-EXTERN cy_as_return_status_t
-cy_as_usb_select_m_s_partitions_dep(
- cy_as_device_handle handle,
- cy_as_media_type media,
- uint32_t device,
- cy_as_usb_m_s_type_t type,
- cy_as_function_callback cb,
- uint32_t client
- );
-
-#endif /*__doxygen*/
-
-#endif /*__INCLUDED_CYANSTORAGE_DEP_H__*/
diff --git a/drivers/staging/winbond/mds_s.h b/drivers/staging/winbond/mds_s.h
index eeedf018636..07d835b3b70 100644
--- a/drivers/staging/winbond/mds_s.h
+++ b/drivers/staging/winbond/mds_s.h
@@ -3,7 +3,7 @@
#include <linux/timer.h>
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "localpara.h"
#include "mac_structures.h"
diff --git a/drivers/staging/winbond/phy_calibration.c b/drivers/staging/winbond/phy_calibration.c
index 79e53e46ecc..c5a07fbe10a 100644
--- a/drivers/staging/winbond/phy_calibration.c
+++ b/drivers/staging/winbond/phy_calibration.c
@@ -44,147 +44,147 @@ static const s32 Angles[] = {
s32 _s13_to_s32(u32 data)
{
- u32 val;
+ u32 val;
- val = (data & 0x0FFF);
+ val = (data & 0x0FFF);
- if ((data & BIT(12)) != 0)
- val |= 0xFFFFF000;
+ if ((data & BIT(12)) != 0)
+ val |= 0xFFFFF000;
- return ((s32) val);
+ return ((s32) val);
}
u32 _s32_to_s13(s32 data)
{
- u32 val;
+ u32 val;
- if (data > 4095)
- data = 4095;
- else if (data < -4096)
- data = -4096;
+ if (data > 4095)
+ data = 4095;
+ else if (data < -4096)
+ data = -4096;
- val = data & 0x1FFF;
+ val = data & 0x1FFF;
- return val;
+ return val;
}
/****************************************************************************/
s32 _s4_to_s32(u32 data)
{
- s32 val;
+ s32 val;
- val = (data & 0x0007);
+ val = (data & 0x0007);
- if ((data & BIT(3)) != 0)
- val |= 0xFFFFFFF8;
+ if ((data & BIT(3)) != 0)
+ val |= 0xFFFFFFF8;
- return val;
+ return val;
}
u32 _s32_to_s4(s32 data)
{
- u32 val;
+ u32 val;
- if (data > 7)
- data = 7;
- else if (data < -8)
- data = -8;
+ if (data > 7)
+ data = 7;
+ else if (data < -8)
+ data = -8;
- val = data & 0x000F;
+ val = data & 0x000F;
- return val;
+ return val;
}
/****************************************************************************/
s32 _s5_to_s32(u32 data)
{
- s32 val;
+ s32 val;
- val = (data & 0x000F);
+ val = (data & 0x000F);
- if ((data & BIT(4)) != 0)
- val |= 0xFFFFFFF0;
+ if ((data & BIT(4)) != 0)
+ val |= 0xFFFFFFF0;
- return val;
+ return val;
}
u32 _s32_to_s5(s32 data)
{
- u32 val;
+ u32 val;
- if (data > 15)
- data = 15;
- else if (data < -16)
- data = -16;
+ if (data > 15)
+ data = 15;
+ else if (data < -16)
+ data = -16;
- val = data & 0x001F;
+ val = data & 0x001F;
- return val;
+ return val;
}
/****************************************************************************/
s32 _s6_to_s32(u32 data)
{
- s32 val;
+ s32 val;
- val = (data & 0x001F);
+ val = (data & 0x001F);
- if ((data & BIT(5)) != 0)
- val |= 0xFFFFFFE0;
+ if ((data & BIT(5)) != 0)
+ val |= 0xFFFFFFE0;
- return val;
+ return val;
}
u32 _s32_to_s6(s32 data)
{
- u32 val;
+ u32 val;
- if (data > 31)
- data = 31;
- else if (data < -32)
- data = -32;
+ if (data > 31)
+ data = 31;
+ else if (data < -32)
+ data = -32;
- val = data & 0x003F;
+ val = data & 0x003F;
- return val;
+ return val;
}
/****************************************************************************/
s32 _s9_to_s32(u32 data)
{
- s32 val;
+ s32 val;
- val = data & 0x00FF;
+ val = data & 0x00FF;
- if ((data & BIT(8)) != 0)
- val |= 0xFFFFFF00;
+ if ((data & BIT(8)) != 0)
+ val |= 0xFFFFFF00;
- return val;
+ return val;
}
u32 _s32_to_s9(s32 data)
{
- u32 val;
+ u32 val;
- if (data > 255)
- data = 255;
- else if (data < -256)
- data = -256;
+ if (data > 255)
+ data = 255;
+ else if (data < -256)
+ data = -256;
- val = data & 0x01FF;
+ val = data & 0x01FF;
- return val;
+ return val;
}
/****************************************************************************/
s32 _floor(s32 n)
{
- if (n > 0)
- n += 5;
- else
- n -= 5;
+ if (n > 0)
+ n += 5;
+ else
+ n -= 5;
- return (n/10);
+ return (n/10);
}
/****************************************************************************/
@@ -195,105 +195,105 @@ s32 _floor(s32 n)
*/
u32 _sqrt(u32 sqsum)
{
- u32 sq_rt;
-
- int g0, g1, g2, g3, g4;
- int seed;
- int next;
- int step;
-
- g4 = sqsum / 100000000;
- g3 = (sqsum - g4*100000000) / 1000000;
- g2 = (sqsum - g4*100000000 - g3*1000000) / 10000;
- g1 = (sqsum - g4*100000000 - g3*1000000 - g2*10000) / 100;
- g0 = (sqsum - g4*100000000 - g3*1000000 - g2*10000 - g1*100);
-
- next = g4;
- step = 0;
- seed = 0;
- while (((seed+1)*(step+1)) <= next) {
- step++;
- seed++;
- }
-
- sq_rt = seed * 10000;
- next = (next-(seed*step))*100 + g3;
-
- step = 0;
- seed = 2 * seed * 10;
- while (((seed+1)*(step+1)) <= next) {
- step++;
- seed++;
- }
-
- sq_rt = sq_rt + step * 1000;
- next = (next - seed * step) * 100 + g2;
- seed = (seed + step) * 10;
- step = 0;
- while (((seed+1)*(step+1)) <= next) {
- step++;
- seed++;
- }
-
- sq_rt = sq_rt + step * 100;
- next = (next - seed * step) * 100 + g1;
- seed = (seed + step) * 10;
- step = 0;
-
- while (((seed+1)*(step+1)) <= next) {
- step++;
- seed++;
- }
-
- sq_rt = sq_rt + step * 10;
- next = (next - seed * step) * 100 + g0;
- seed = (seed + step) * 10;
- step = 0;
-
- while (((seed+1)*(step+1)) <= next) {
- step++;
- seed++;
- }
-
- sq_rt = sq_rt + step;
-
- return sq_rt;
+ u32 sq_rt;
+
+ int g0, g1, g2, g3, g4;
+ int seed;
+ int next;
+ int step;
+
+ g4 = sqsum / 100000000;
+ g3 = (sqsum - g4*100000000) / 1000000;
+ g2 = (sqsum - g4*100000000 - g3*1000000) / 10000;
+ g1 = (sqsum - g4*100000000 - g3*1000000 - g2*10000) / 100;
+ g0 = (sqsum - g4*100000000 - g3*1000000 - g2*10000 - g1*100);
+
+ next = g4;
+ step = 0;
+ seed = 0;
+ while (((seed+1)*(step+1)) <= next) {
+ step++;
+ seed++;
+ }
+
+ sq_rt = seed * 10000;
+ next = (next-(seed*step))*100 + g3;
+
+ step = 0;
+ seed = 2 * seed * 10;
+ while (((seed+1)*(step+1)) <= next) {
+ step++;
+ seed++;
+ }
+
+ sq_rt = sq_rt + step * 1000;
+ next = (next - seed * step) * 100 + g2;
+ seed = (seed + step) * 10;
+ step = 0;
+ while (((seed+1)*(step+1)) <= next) {
+ step++;
+ seed++;
+ }
+
+ sq_rt = sq_rt + step * 100;
+ next = (next - seed * step) * 100 + g1;
+ seed = (seed + step) * 10;
+ step = 0;
+
+ while (((seed+1)*(step+1)) <= next) {
+ step++;
+ seed++;
+ }
+
+ sq_rt = sq_rt + step * 10;
+ next = (next - seed * step) * 100 + g0;
+ seed = (seed + step) * 10;
+ step = 0;
+
+ while (((seed+1)*(step+1)) <= next) {
+ step++;
+ seed++;
+ }
+
+ sq_rt = sq_rt + step;
+
+ return sq_rt;
}
/****************************************************************************/
void _sin_cos(s32 angle, s32 *sin, s32 *cos)
{
- s32 X, Y, TargetAngle, CurrAngle;
- unsigned Step;
-
- X = FIXED(AG_CONST); /* AG_CONST * cos(0) */
- Y = 0; /* AG_CONST * sin(0) */
- TargetAngle = abs(angle);
- CurrAngle = 0;
-
- for (Step = 0; Step < 12; Step++) {
- s32 NewX;
-
- if (TargetAngle > CurrAngle) {
- NewX = X - (Y >> Step);
- Y = (X >> Step) + Y;
- X = NewX;
- CurrAngle += Angles[Step];
- } else {
- NewX = X + (Y >> Step);
- Y = -(X >> Step) + Y;
- X = NewX;
- CurrAngle -= Angles[Step];
- }
- }
-
- if (angle > 0) {
- *cos = X;
- *sin = Y;
- } else {
- *cos = X;
- *sin = -Y;
- }
+ s32 X, Y, TargetAngle, CurrAngle;
+ unsigned Step;
+
+ X = FIXED(AG_CONST); /* AG_CONST * cos(0) */
+ Y = 0; /* AG_CONST * sin(0) */
+ TargetAngle = abs(angle);
+ CurrAngle = 0;
+
+ for (Step = 0; Step < 12; Step++) {
+ s32 NewX;
+
+ if (TargetAngle > CurrAngle) {
+ NewX = X - (Y >> Step);
+ Y = (X >> Step) + Y;
+ X = NewX;
+ CurrAngle += Angles[Step];
+ } else {
+ NewX = X + (Y >> Step);
+ Y = -(X >> Step) + Y;
+ X = NewX;
+ CurrAngle -= Angles[Step];
+ }
+ }
+
+ if (angle > 0) {
+ *cos = X;
+ *sin = Y;
+ } else {
+ *cos = X;
+ *sin = -Y;
+ }
}
static unsigned char hal_get_dxx_reg(struct hw_data *pHwData, u16 number, u32 * pValue)
@@ -338,24 +338,24 @@ void _reset_rx_cal(struct hw_data *phw_data)
/**********************************************/
void _rxadc_dc_offset_cancellation_winbond(struct hw_data *phw_data, u32 frequency)
{
- u32 reg_agc_ctrl3;
- u32 reg_a_acq_ctrl;
- u32 reg_b_acq_ctrl;
- u32 val;
+ u32 reg_agc_ctrl3;
+ u32 reg_a_acq_ctrl;
+ u32 reg_b_acq_ctrl;
+ u32 val;
- PHY_DEBUG(("[CAL] -> [1]_rxadc_dc_offset_cancellation()\n"));
- phy_init_rf(phw_data);
+ PHY_DEBUG(("[CAL] -> [1]_rxadc_dc_offset_cancellation()\n"));
+ phy_init_rf(phw_data);
- /* set calibration channel */
- if ((RF_WB_242 == phw_data->phy_type) ||
+ /* set calibration channel */
+ if ((RF_WB_242 == phw_data->phy_type) ||
(RF_WB_242_1 == phw_data->phy_type)) /* 20060619.5 Add */{
- if ((frequency >= 2412) && (frequency <= 2484)) {
- /* w89rf242 change frequency to 2390Mhz */
- PHY_DEBUG(("[CAL] W89RF242/11G/Channel=2390Mhz\n"));
+ if ((frequency >= 2412) && (frequency <= 2484)) {
+ /* w89rf242 change frequency to 2390Mhz */
+ PHY_DEBUG(("[CAL] W89RF242/11G/Channel=2390Mhz\n"));
phy_set_rf_data(phw_data, 3, (3<<24)|0x025586);
- }
- } else {
+ }
+ } else {
}
@@ -542,7 +542,7 @@ void _txidac_dc_offset_cancellation_winbond(struct hw_data *phw_data)
}
if (loop >= 19)
- fix_cancel_dc_i = 0;
+ fix_cancel_dc_i = 0;
reg_dc_cancel &= ~(0x03FF);
reg_dc_cancel |= (_s32_to_s5(fix_cancel_dc_i) << CANCEL_DC_I_SHIFT);
@@ -657,7 +657,7 @@ void _txqdac_dc_offset_cacellation_winbond(struct hw_data *phw_data)
}
if (loop >= 19)
- fix_cancel_dc_q = 0;
+ fix_cancel_dc_q = 0;
reg_dc_cancel &= ~(0x001F);
reg_dc_cancel |= (_s32_to_s5(fix_cancel_dc_q) << CANCEL_DC_Q_SHIFT);
@@ -1154,33 +1154,33 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
capture_time = 0;
for (capture_time = 0; capture_time < 10; capture_time++) {
- /* i. Set "calib_start" to 0x0 */
- reg_mode_ctrl &= ~MASK_CALIB_START;
- if (!hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl))/*20060718.1 modify */
- return 0;
- PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
+ /* i. Set "calib_start" to 0x0 */
+ reg_mode_ctrl &= ~MASK_CALIB_START;
+ if (!hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl))/*20060718.1 modify */
+ return 0;
+ PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
- reg_mode_ctrl &= ~MASK_IQCAL_MODE;
- reg_mode_ctrl |= (MASK_CALIB_START|0x1);
- hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
- PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
+ reg_mode_ctrl &= ~MASK_IQCAL_MODE;
+ reg_mode_ctrl |= (MASK_CALIB_START|0x1);
+ hw_set_dxx_reg(phw_data, REG_MODE_CTRL, reg_mode_ctrl);
+ PHY_DEBUG(("[CAL] MODE_CTRL (write) = 0x%08X\n", reg_mode_ctrl));
- /* c. */
- hw_get_dxx_reg(phw_data, REG_CALIB_READ1, &val);
- PHY_DEBUG(("[CAL] CALIB_READ1 = 0x%08X\n", val));
+ /* c. */
+ hw_get_dxx_reg(phw_data, REG_CALIB_READ1, &val);
+ PHY_DEBUG(("[CAL] CALIB_READ1 = 0x%08X\n", val));
- iqcal_tone_i = _s13_to_s32(val & 0x00001FFF);
- iqcal_tone_q = _s13_to_s32((val & 0x03FFE000) >> 13);
- PHY_DEBUG(("[CAL] ** iqcal_tone_i = %d, iqcal_tone_q = %d\n",
- iqcal_tone_i, iqcal_tone_q));
+ iqcal_tone_i = _s13_to_s32(val & 0x00001FFF);
+ iqcal_tone_q = _s13_to_s32((val & 0x03FFE000) >> 13);
+ PHY_DEBUG(("[CAL] ** iqcal_tone_i = %d, iqcal_tone_q = %d\n",
+ iqcal_tone_i, iqcal_tone_q));
- hw_get_dxx_reg(phw_data, REG_CALIB_READ2, &val);
- PHY_DEBUG(("[CAL] CALIB_READ2 = 0x%08X\n", val));
+ hw_get_dxx_reg(phw_data, REG_CALIB_READ2, &val);
+ PHY_DEBUG(("[CAL] CALIB_READ2 = 0x%08X\n", val));
- iqcal_image_i = _s13_to_s32(val & 0x00001FFF);
- iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13);
- PHY_DEBUG(("[CAL] ** iqcal_image_i = %d, iqcal_image_q = %d\n",
- iqcal_image_i, iqcal_image_q));
+ iqcal_image_i = _s13_to_s32(val & 0x00001FFF);
+ iqcal_image_q = _s13_to_s32((val & 0x03FFE000) >> 13);
+ PHY_DEBUG(("[CAL] ** iqcal_image_i = %d, iqcal_image_q = %d\n",
+ iqcal_image_i, iqcal_image_q));
if (capture_time == 0)
continue;
else {
@@ -1358,7 +1358,7 @@ u8 _rx_iq_calibration_loop_winbond(struct hw_data *phw_data, u16 factor, u32 fre
hw_set_dxx_reg(phw_data, 0x54, val);
if (loop == 3)
- return 0;
+ return 0;
}
PHY_DEBUG(("[CAL] ** CALIB_DATA = 0x%08X\n", val));
@@ -1476,40 +1476,40 @@ void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency)
/******************/
void phy_set_rf_data(struct hw_data *pHwData, u32 index, u32 value)
{
- u32 ltmp = 0;
-
- switch (pHwData->phy_type) {
- case RF_MAXIM_2825:
- case RF_MAXIM_V1: /* 11g Winbond 2nd BB(with Phy board (v1) + Maxim 331) */
- ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
- break;
-
- case RF_MAXIM_2827:
- ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
- break;
-
- case RF_MAXIM_2828:
- ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
- break;
-
- case RF_MAXIM_2829:
- ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
- break;
-
- case RF_AIROHA_2230:
- case RF_AIROHA_2230S: /* 20060420 Add this */
- ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse(value, 20);
- break;
-
- case RF_AIROHA_7230:
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | (value&0xffffff);
- break;
-
- case RF_WB_242:
- case RF_WB_242_1:/* 20060619.5 Add */
- ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse(value, 24);
- break;
- }
+ u32 ltmp = 0;
+
+ switch (pHwData->phy_type) {
+ case RF_MAXIM_2825:
+ case RF_MAXIM_V1: /* 11g Winbond 2nd BB(with Phy board (v1) + Maxim 331) */
+ ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
+ break;
+
+ case RF_MAXIM_2827:
+ ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
+ break;
+
+ case RF_MAXIM_2828:
+ ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
+ break;
+
+ case RF_MAXIM_2829:
+ ltmp = (1 << 31) | (0 << 30) | (18 << 24) | BitReverse(value, 18);
+ break;
+
+ case RF_AIROHA_2230:
+ case RF_AIROHA_2230S: /* 20060420 Add this */
+ ltmp = (1 << 31) | (0 << 30) | (20 << 24) | BitReverse(value, 20);
+ break;
+
+ case RF_AIROHA_7230:
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | (value&0xffffff);
+ break;
+
+ case RF_WB_242:
+ case RF_WB_242_1:/* 20060619.5 Add */
+ ltmp = (1 << 31) | (0 << 30) | (24 << 24) | BitReverse(value, 24);
+ break;
+ }
Wb35Reg_WriteSync(pHwData, 0x0864, ltmp);
}
diff --git a/drivers/staging/winbond/wb35reg_s.h b/drivers/staging/winbond/wb35reg_s.h
index eb274ffdd1b..dc79faa4029 100644
--- a/drivers/staging/winbond/wb35reg_s.h
+++ b/drivers/staging/winbond/wb35reg_s.h
@@ -3,7 +3,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct hw_data;
diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
index cd129b3ee6c..b61c9eb75ad 100644
--- a/drivers/staging/wlags49_h2/wl_internal.h
+++ b/drivers/staging/wlags49_h2/wl_internal.h
@@ -67,7 +67,6 @@
/*******************************************************************************
* include files
******************************************************************************/
-#include <linux/version.h>
#ifdef BUS_PCMCIA
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
@@ -990,14 +989,7 @@ struct wl_private
#endif // USE_WDS
}; // wl_private
-#ifdef HAVE_NETDEV_PRIV
#define wl_priv(dev) ((struct wl_private *) netdev_priv(dev))
-#else
-extern inline struct wl_private *wl_priv(struct net_device *dev)
-{
- return dev->priv;
-}
-#endif
/********************************************************************/
/* Locking and synchronization functions */
diff --git a/drivers/staging/wlags49_h2/wl_version.h b/drivers/staging/wlags49_h2/wl_version.h
index a5e604cd198..a5faada136d 100644
--- a/drivers/staging/wlags49_h2/wl_version.h
+++ b/drivers/staging/wlags49_h2/wl_version.h
@@ -66,7 +66,6 @@
* include files
******************************************************************************/
//#include <linux/config.h>
-#include <linux/version.h>
#ifndef CONFIG_MODVERSIONS
#define __NO_VERSION__
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 729d03d28d7..3c40096f0c0 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -442,9 +442,9 @@ int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks)
{
int i;
- for (i = 0; i < *nfchunks; i++) {
+ for (i = 0; i < *nfchunks; i++)
kfree(fchunk[i].data);
- }
+
*nfchunks = 0;
memset(fchunk, 0, sizeof(*fchunk));
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 21f25a21c29..417aea5e01c 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -50,7 +50,6 @@
* --------------------------------------------------------------------
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index cadec2ad0d3..4403e5f8059 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -5,7 +5,6 @@
*/
/* #include <linux/config.h> */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/staging/xgifb/vb_ext.c b/drivers/staging/xgifb/vb_ext.c
index 7e1f76adf73..b1a25730b7c 100644
--- a/drivers/staging/xgifb/vb_ext.c
+++ b/drivers/staging/xgifb/vb_ext.c
@@ -1,4 +1,3 @@
-#include <linux/version.h>
#include <linux/io.h>
#include <linux/types.h>
#include "XGIfb.h"
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 33c6876d2a8..493b5322039 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -1,6 +1,5 @@
#include "vgatypes.h"
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/delay.h> /* udelay */
#include "XGIfb.h"
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 2669b1b0f51..dc4d6e6fc9b 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -2,7 +2,6 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/types.h>
-#include <linux/version.h>
#include "XGIfb.h"
diff --git a/drivers/staging/zcache/Makefile b/drivers/staging/zcache/Makefile
index f5ec64f9447..60daa272c20 100644
--- a/drivers/staging/zcache/Makefile
+++ b/drivers/staging/zcache/Makefile
@@ -1,3 +1,3 @@
-zcache-y := tmem.o
+zcache-y := zcache-main.o tmem.o
obj-$(CONFIG_ZCACHE) += zcache.o
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
index e954d405b13..975e34bcd72 100644
--- a/drivers/staging/zcache/tmem.c
+++ b/drivers/staging/zcache/tmem.c
@@ -142,6 +142,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
obj->oid = *oidp;
obj->objnode_count = 0;
obj->pampd_count = 0;
+ (*tmem_pamops.new_obj)(obj);
SET_SENTINEL(obj, OBJ);
while (*new) {
BUG_ON(RB_EMPTY_NODE(*new));
@@ -274,7 +275,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
/*
* lookup index in object and return associated pampd (or NULL if not found)
*/
-static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
+static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
{
unsigned int height, shift;
struct tmem_objnode **slot = NULL;
@@ -303,9 +304,33 @@ static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
height--;
}
out:
+ return slot != NULL ? (void **)slot : NULL;
+}
+
+static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
+{
+ struct tmem_objnode **slot;
+
+ slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
return slot != NULL ? *slot : NULL;
}
+static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
+ void *new_pampd)
+{
+ struct tmem_objnode **slot;
+ void *ret = NULL;
+
+ slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
+ if ((slot != NULL) && (*slot != NULL)) {
+ void *old_pampd = *(void **)slot;
+ *(void **)slot = new_pampd;
+ (*tmem_pamops.free)(old_pampd, obj->pool, NULL, 0);
+ ret = new_pampd;
+ }
+ return ret;
+}
+
static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
void *pampd)
{
@@ -456,7 +481,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
if (ht == 1) {
obj->pampd_count--;
(*tmem_pamops.free)(objnode->slots[i],
- obj->pool);
+ obj->pool, NULL, 0);
objnode->slots[i] = NULL;
continue;
}
@@ -473,7 +498,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
return;
if (obj->objnode_tree_height == 0) {
obj->pampd_count--;
- (*tmem_pamops.free)(obj->objnode_tree_root, obj->pool);
+ (*tmem_pamops.free)(obj->objnode_tree_root, obj->pool, NULL, 0);
} else {
tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
obj->objnode_tree_height);
@@ -481,6 +506,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
obj->objnode_tree_height = 0;
}
obj->objnode_tree_root = NULL;
+ (*tmem_pamops.free_obj)(obj->pool, obj);
}
/*
@@ -503,15 +529,13 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj)
* always flushes for simplicity.
*/
int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
- struct page *page)
+ char *data, size_t size, bool raw, bool ephemeral)
{
struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
void *pampd = NULL, *pampd_del = NULL;
int ret = -ENOMEM;
- bool ephemeral;
struct tmem_hashbucket *hb;
- ephemeral = is_ephemeral(pool);
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
spin_lock(&hb->lock);
obj = objfound = tmem_obj_find(hb, oidp);
@@ -521,7 +545,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
/* if found, is a dup put, flush the old one */
pampd_del = tmem_pampd_delete_from_obj(obj, index);
BUG_ON(pampd_del != pampd);
- (*tmem_pamops.free)(pampd, pool);
+ (*tmem_pamops.free)(pampd, pool, oidp, index);
if (obj->pampd_count == 0) {
objnew = obj;
objfound = NULL;
@@ -538,7 +562,8 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
}
BUG_ON(obj == NULL);
BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
- pampd = (*tmem_pamops.create)(obj->pool, &obj->oid, index, page);
+ pampd = (*tmem_pamops.create)(data, size, raw, ephemeral,
+ obj->pool, &obj->oid, index);
if (unlikely(pampd == NULL))
goto free;
ret = tmem_pampd_add_to_obj(obj, index, pampd);
@@ -551,7 +576,7 @@ delete_and_free:
(void)tmem_pampd_delete_from_obj(obj, index);
free:
if (pampd)
- (*tmem_pamops.free)(pampd, pool);
+ (*tmem_pamops.free)(pampd, pool, NULL, 0);
if (objnew) {
tmem_obj_free(objnew, hb);
(*tmem_hostops.obj_free)(objnew, pool);
@@ -573,41 +598,52 @@ out:
* "put" done with the same handle).
*/
-int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, struct page *page)
+int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
+ char *data, size_t *size, bool raw, int get_and_free)
{
struct tmem_obj *obj;
void *pampd;
bool ephemeral = is_ephemeral(pool);
uint32_t ret = -1;
struct tmem_hashbucket *hb;
+ bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
+ bool lock_held = false;
hb = &pool->hashbucket[tmem_oid_hash(oidp)];
spin_lock(&hb->lock);
+ lock_held = true;
obj = tmem_obj_find(hb, oidp);
if (obj == NULL)
goto out;
- ephemeral = is_ephemeral(pool);
- if (ephemeral)
+ if (free)
pampd = tmem_pampd_delete_from_obj(obj, index);
else
pampd = tmem_pampd_lookup_in_obj(obj, index);
if (pampd == NULL)
goto out;
- ret = (*tmem_pamops.get_data)(page, pampd, pool);
- if (ret < 0)
- goto out;
- if (ephemeral) {
- (*tmem_pamops.free)(pampd, pool);
+ if (free) {
if (obj->pampd_count == 0) {
tmem_obj_free(obj, hb);
(*tmem_hostops.obj_free)(obj, pool);
obj = NULL;
}
}
+ if (tmem_pamops.is_remote(pampd)) {
+ lock_held = false;
+ spin_unlock(&hb->lock);
+ }
+ if (free)
+ ret = (*tmem_pamops.get_data_and_free)(
+ data, size, raw, pampd, pool, oidp, index);
+ else
+ ret = (*tmem_pamops.get_data)(
+ data, size, raw, pampd, pool, oidp, index);
+ if (ret < 0)
+ goto out;
ret = 0;
out:
- spin_unlock(&hb->lock);
+ if (lock_held)
+ spin_unlock(&hb->lock);
return ret;
}
@@ -632,7 +668,7 @@ int tmem_flush_page(struct tmem_pool *pool,
pampd = tmem_pampd_delete_from_obj(obj, index);
if (pampd == NULL)
goto out;
- (*tmem_pamops.free)(pampd, pool);
+ (*tmem_pamops.free)(pampd, pool, oidp, index);
if (obj->pampd_count == 0) {
tmem_obj_free(obj, hb);
(*tmem_hostops.obj_free)(obj, pool);
@@ -645,6 +681,30 @@ out:
}
/*
+ * If a page in tmem matches the handle, replace the page so that any
+ * subsequent "get" gets the new page. Returns 0 if
+ * there was a page to replace, else returns -1.
+ */
+int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
+ uint32_t index, void *new_pampd)
+{
+ struct tmem_obj *obj;
+ int ret = -1;
+ struct tmem_hashbucket *hb;
+
+ hb = &pool->hashbucket[tmem_oid_hash(oidp)];
+ spin_lock(&hb->lock);
+ obj = tmem_obj_find(hb, oidp);
+ if (obj == NULL)
+ goto out;
+ new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd);
+ ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
+out:
+ spin_unlock(&hb->lock);
+ return ret;
+}
+
+/*
* "Flush" all pages in tmem matching this oid.
*/
int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
index 2e07e217d51..ed147c4b110 100644
--- a/drivers/staging/zcache/tmem.h
+++ b/drivers/staging/zcache/tmem.h
@@ -147,6 +147,7 @@ struct tmem_obj {
unsigned int objnode_tree_height;
unsigned long objnode_count;
long pampd_count;
+ void *extra; /* for private use by pampd implementation */
DECL_SENTINEL
};
@@ -166,10 +167,18 @@ struct tmem_objnode {
/* pampd abstract datatype methods provided by the PAM implementation */
struct tmem_pamops {
- void *(*create)(struct tmem_pool *, struct tmem_oid *, uint32_t,
- struct page *);
- int (*get_data)(struct page *, void *, struct tmem_pool *);
- void (*free)(void *, struct tmem_pool *);
+ void *(*create)(char *, size_t, bool, int,
+ struct tmem_pool *, struct tmem_oid *, uint32_t);
+ int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
+ struct tmem_oid *, uint32_t);
+ int (*get_data_and_free)(char *, size_t *, bool, void *,
+ struct tmem_pool *, struct tmem_oid *,
+ uint32_t);
+ void (*free)(void *, struct tmem_pool *, struct tmem_oid *, uint32_t);
+ void (*free_obj)(struct tmem_pool *, struct tmem_obj *);
+ bool (*is_remote)(void *);
+ void (*new_obj)(struct tmem_obj *);
+ int (*replace_in_obj)(void *, struct tmem_obj *);
};
extern void tmem_register_pamops(struct tmem_pamops *m);
@@ -184,9 +193,11 @@ extern void tmem_register_hostops(struct tmem_hostops *m);
/* core tmem accessor functions */
extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- struct page *page);
+ char *, size_t, bool, bool);
extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- struct page *page);
+ char *, size_t *, bool, int);
+extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
+ void *);
extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
uint32_t index);
extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
diff --git a/drivers/staging/zcache/zcache.c b/drivers/staging/zcache/zcache-main.c
index 77ac2d4d3ef..855a5bb56a4 100644
--- a/drivers/staging/zcache/zcache.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -19,6 +19,7 @@
* http://marc.info/?l=linux-mm&m=127811271605009
*/
+#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/highmem.h>
#include <linux/list.h>
@@ -27,6 +28,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/atomic.h>
+#include <linux/math64.h>
#include "tmem.h"
#include "../zram/xvmalloc.h" /* if built in drivers/staging */
@@ -49,6 +51,36 @@
(__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
#endif
+#define MAX_POOLS_PER_CLIENT 16
+
+#define MAX_CLIENTS 16
+#define LOCAL_CLIENT ((uint16_t)-1)
+
+MODULE_LICENSE("GPL");
+
+struct zcache_client {
+ struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
+ struct xv_pool *xvpool;
+ bool allocated;
+ atomic_t refcount;
+};
+
+static struct zcache_client zcache_host;
+static struct zcache_client zcache_clients[MAX_CLIENTS];
+
+static inline uint16_t get_client_id_from_client(struct zcache_client *cli)
+{
+ BUG_ON(cli == NULL);
+ if (cli == &zcache_host)
+ return LOCAL_CLIENT;
+ return cli - &zcache_clients[0];
+}
+
+static inline bool is_local_client(struct zcache_client *cli)
+{
+ return cli == &zcache_host;
+}
+
/**********
* Compression buddies ("zbud") provides for packing two (or, possibly
* in the future, more) compressed ephemeral pages into a single "raw"
@@ -72,7 +104,8 @@
#define ZBUD_MAX_BUDS 2
struct zbud_hdr {
- uint32_t pool_id;
+ uint16_t client_id;
+ uint16_t pool_id;
struct tmem_oid oid;
uint32_t index;
uint16_t size; /* compressed size in bytes, zero means unused */
@@ -120,6 +153,7 @@ static unsigned long zcache_zbud_curr_zbytes;
static unsigned long zcache_zbud_cumul_zpages;
static unsigned long zcache_zbud_cumul_zbytes;
static unsigned long zcache_compress_poor;
+static unsigned long zcache_mean_compress_poor;
/* forward references */
static void *zcache_get_free_page(void);
@@ -294,7 +328,8 @@ static void zbud_free_and_delist(struct zbud_hdr *zh)
}
}
-static struct zbud_hdr *zbud_create(uint32_t pool_id, struct tmem_oid *oid,
+static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
+ struct tmem_oid *oid,
uint32_t index, struct page *page,
void *cdata, unsigned size)
{
@@ -353,6 +388,7 @@ init_zh:
zh->index = index;
zh->oid = *oid;
zh->pool_id = pool_id;
+ zh->client_id = client_id;
/* can wait to copy the data until the list locks are dropped */
spin_unlock(&zbud_budlists_spinlock);
@@ -407,7 +443,8 @@ static unsigned long zcache_evicted_raw_pages;
static unsigned long zcache_evicted_buddied_pages;
static unsigned long zcache_evicted_unbuddied_pages;
-static struct tmem_pool *zcache_get_pool_by_id(uint32_t poolid);
+static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
+ uint16_t poolid);
static void zcache_put_pool(struct tmem_pool *pool);
/*
@@ -417,7 +454,8 @@ static void zbud_evict_zbpg(struct zbud_page *zbpg)
{
struct zbud_hdr *zh;
int i, j;
- uint32_t pool_id[ZBUD_MAX_BUDS], index[ZBUD_MAX_BUDS];
+ uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS];
+ uint32_t index[ZBUD_MAX_BUDS];
struct tmem_oid oid[ZBUD_MAX_BUDS];
struct tmem_pool *pool;
@@ -426,6 +464,7 @@ static void zbud_evict_zbpg(struct zbud_page *zbpg)
for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) {
zh = &zbpg->buddy[i];
if (zh->size) {
+ client_id[j] = zh->client_id;
pool_id[j] = zh->pool_id;
oid[j] = zh->oid;
index[j] = zh->index;
@@ -435,7 +474,7 @@ static void zbud_evict_zbpg(struct zbud_page *zbpg)
}
spin_unlock(&zbpg->lock);
for (i = 0; i < j; i++) {
- pool = zcache_get_pool_by_id(pool_id[i]);
+ pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
if (pool != NULL) {
tmem_flush_page(pool, &oid[i], index[i]);
zcache_put_pool(pool);
@@ -552,9 +591,8 @@ static int zbud_show_unbuddied_list_counts(char *buf)
int i;
char *p = buf;
- for (i = 0; i < NCHUNKS - 1; i++)
+ for (i = 0; i < NCHUNKS; i++)
p += sprintf(p, "%u ", zbud_unbuddied[i].count);
- p += sprintf(p, "%d\n", zbud_unbuddied[i].count);
return p - buf;
}
@@ -602,7 +640,23 @@ struct zv_hdr {
DECL_SENTINEL
};
-static const int zv_max_page_size = (PAGE_SIZE / 8) * 7;
+/* rudimentary policy limits */
+/* total number of persistent pages may not exceed this percentage */
+static unsigned int zv_page_count_policy_percent = 75;
+/*
+ * byte count defining poor compression; pages with greater zsize will be
+ * rejected
+ */
+static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
+/*
+ * byte count defining poor *mean* compression; pages with greater zsize
+ * will be rejected until sufficient better-compressed pages are accepted
+ * driving the man below this threshold
+ */
+static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
+
+static unsigned long zv_curr_dist_counts[NCHUNKS];
+static unsigned long zv_cumul_dist_counts[NCHUNKS];
static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
struct tmem_oid *oid, uint32_t index,
@@ -611,13 +665,18 @@ static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
struct page *page;
struct zv_hdr *zv = NULL;
uint32_t offset;
+ int alloc_size = clen + sizeof(struct zv_hdr);
+ int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
int ret;
BUG_ON(!irqs_disabled());
- ret = xv_malloc(xvpool, clen + sizeof(struct zv_hdr),
+ BUG_ON(chunks >= NCHUNKS);
+ ret = xv_malloc(xvpool, alloc_size,
&page, &offset, ZCACHE_GFP_MASK);
if (unlikely(ret))
goto out;
+ zv_curr_dist_counts[chunks]++;
+ zv_cumul_dist_counts[chunks]++;
zv = kmap_atomic(page, KM_USER0) + offset;
zv->index = index;
zv->oid = *oid;
@@ -634,11 +693,14 @@ static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
unsigned long flags;
struct page *page;
uint32_t offset;
- uint16_t size;
+ uint16_t size = xv_get_object_size(zv);
+ int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
ASSERT_SENTINEL(zv, ZVH);
- size = xv_get_object_size(zv) - sizeof(*zv);
- BUG_ON(size == 0 || size > zv_max_page_size);
+ BUG_ON(chunks >= NCHUNKS);
+ zv_curr_dist_counts[chunks]--;
+ size -= sizeof(*zv);
+ BUG_ON(size == 0);
INVERT_SENTINEL(zv, ZVH);
page = virt_to_page(zv);
offset = (unsigned long)zv & ~PAGE_MASK;
@@ -656,7 +718,7 @@ static void zv_decompress(struct page *page, struct zv_hdr *zv)
ASSERT_SENTINEL(zv, ZVH);
size = xv_get_object_size(zv) - sizeof(*zv);
- BUG_ON(size == 0 || size > zv_max_page_size);
+ BUG_ON(size == 0);
to_va = kmap_atomic(page, KM_USER0);
ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
size, to_va, &clen);
@@ -665,6 +727,159 @@ static void zv_decompress(struct page *page, struct zv_hdr *zv)
BUG_ON(clen != PAGE_SIZE);
}
+#ifdef CONFIG_SYSFS
+/*
+ * show a distribution of compression stats for zv pages.
+ */
+
+static int zv_curr_dist_counts_show(char *buf)
+{
+ unsigned long i, n, chunks = 0, sum_total_chunks = 0;
+ char *p = buf;
+
+ for (i = 0; i < NCHUNKS; i++) {
+ n = zv_curr_dist_counts[i];
+ p += sprintf(p, "%lu ", n);
+ chunks += n;
+ sum_total_chunks += i * n;
+ }
+ p += sprintf(p, "mean:%lu\n",
+ chunks == 0 ? 0 : sum_total_chunks / chunks);
+ return p - buf;
+}
+
+static int zv_cumul_dist_counts_show(char *buf)
+{
+ unsigned long i, n, chunks = 0, sum_total_chunks = 0;
+ char *p = buf;
+
+ for (i = 0; i < NCHUNKS; i++) {
+ n = zv_cumul_dist_counts[i];
+ p += sprintf(p, "%lu ", n);
+ chunks += n;
+ sum_total_chunks += i * n;
+ }
+ p += sprintf(p, "mean:%lu\n",
+ chunks == 0 ? 0 : sum_total_chunks / chunks);
+ return p - buf;
+}
+
+/*
+ * setting zv_max_zsize via sysfs causes all persistent (e.g. swap)
+ * pages that don't compress to less than this value (including metadata
+ * overhead) to be rejected. We don't allow the value to get too close
+ * to PAGE_SIZE.
+ */
+static ssize_t zv_max_zsize_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", zv_max_zsize);
+}
+
+static ssize_t zv_max_zsize_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
+ return -EINVAL;
+ zv_max_zsize = val;
+ return count;
+}
+
+/*
+ * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap)
+ * pages that don't compress to less than this value (including metadata
+ * overhead) to be rejected UNLESS the mean compression is also smaller
+ * than this value. In other words, we are load-balancing-by-zsize the
+ * accepted pages. Again, we don't allow the value to get too close
+ * to PAGE_SIZE.
+ */
+static ssize_t zv_max_mean_zsize_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", zv_max_mean_zsize);
+}
+
+static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
+ return -EINVAL;
+ zv_max_mean_zsize = val;
+ return count;
+}
+
+/*
+ * setting zv_page_count_policy_percent via sysfs sets an upper bound of
+ * persistent (e.g. swap) pages that will be retained according to:
+ * (zv_page_count_policy_percent * totalram_pages) / 100)
+ * when that limit is reached, further puts will be rejected (until
+ * some pages have been flushed). Note that, due to compression,
+ * this number may exceed 100; it defaults to 75 and we set an
+ * arbitary limit of 150. A poor choice will almost certainly result
+ * in OOM's, so this value should only be changed prudently.
+ */
+static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", zv_page_count_policy_percent);
+}
+
+static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err || (val == 0) || (val > 150))
+ return -EINVAL;
+ zv_page_count_policy_percent = val;
+ return count;
+}
+
+static struct kobj_attribute zcache_zv_max_zsize_attr = {
+ .attr = { .name = "zv_max_zsize", .mode = 0644 },
+ .show = zv_max_zsize_show,
+ .store = zv_max_zsize_store,
+};
+
+static struct kobj_attribute zcache_zv_max_mean_zsize_attr = {
+ .attr = { .name = "zv_max_mean_zsize", .mode = 0644 },
+ .show = zv_max_mean_zsize_show,
+ .store = zv_max_mean_zsize_store,
+};
+
+static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = {
+ .attr = { .name = "zv_page_count_policy_percent",
+ .mode = 0644 },
+ .show = zv_page_count_policy_percent_show,
+ .store = zv_page_count_policy_percent_store,
+};
+#endif
+
/*
* zcache core code starts here
*/
@@ -677,36 +892,70 @@ static unsigned long zcache_flobj_found;
static unsigned long zcache_failed_eph_puts;
static unsigned long zcache_failed_pers_puts;
-#define MAX_POOLS_PER_CLIENT 16
-
-static struct {
- struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
- struct xv_pool *xvpool;
-} zcache_client;
-
/*
* Tmem operations assume the poolid implies the invoking client.
- * Zcache only has one client (the kernel itself), so translate
- * the poolid into the tmem_pool allocated for it. A KVM version
+ * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
+ * RAMster has each client numbered by cluster node, and a KVM version
* of zcache would have one client per guest and each client might
* have a poolid==N.
*/
-static struct tmem_pool *zcache_get_pool_by_id(uint32_t poolid)
+static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
{
struct tmem_pool *pool = NULL;
+ struct zcache_client *cli = NULL;
- if (poolid >= 0) {
- pool = zcache_client.tmem_pools[poolid];
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else {
+ if (cli_id >= MAX_CLIENTS)
+ goto out;
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ atomic_inc(&cli->refcount);
+ }
+ if (poolid < MAX_POOLS_PER_CLIENT) {
+ pool = cli->tmem_pools[poolid];
if (pool != NULL)
atomic_inc(&pool->refcount);
}
+out:
return pool;
}
static void zcache_put_pool(struct tmem_pool *pool)
{
- if (pool != NULL)
- atomic_dec(&pool->refcount);
+ struct zcache_client *cli = NULL;
+
+ if (pool == NULL)
+ BUG();
+ cli = pool->client;
+ atomic_dec(&pool->refcount);
+ atomic_dec(&cli->refcount);
+}
+
+int zcache_new_client(uint16_t cli_id)
+{
+ struct zcache_client *cli = NULL;
+ int ret = -1;
+
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ if (cli->allocated)
+ goto out;
+ cli->allocated = 1;
+#ifdef CONFIG_FRONTSWAP
+ cli->xvpool = xv_create_pool();
+ if (cli->xvpool == NULL)
+ goto out;
+#endif
+ ret = 0;
+out:
+ return ret;
}
/* counters for debugging */
@@ -901,48 +1150,61 @@ static unsigned long zcache_curr_pers_pampd_count_max;
/* forward reference */
static int zcache_compress(struct page *from, void **out_va, size_t *out_len);
-static void *zcache_pampd_create(struct tmem_pool *pool, struct tmem_oid *oid,
- uint32_t index, struct page *page)
+static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
+ struct tmem_pool *pool, struct tmem_oid *oid,
+ uint32_t index)
{
void *pampd = NULL, *cdata;
size_t clen;
int ret;
- bool ephemeral = is_ephemeral(pool);
unsigned long count;
-
- if (ephemeral) {
+ struct page *page = virt_to_page(data);
+ struct zcache_client *cli = pool->client;
+ uint16_t client_id = get_client_id_from_client(cli);
+ unsigned long zv_mean_zsize;
+ unsigned long curr_pers_pampd_count;
+ u64 total_zsize;
+
+ if (eph) {
ret = zcache_compress(page, &cdata, &clen);
if (ret == 0)
-
goto out;
if (clen == 0 || clen > zbud_max_buddy_size()) {
zcache_compress_poor++;
goto out;
}
- pampd = (void *)zbud_create(pool->pool_id, oid, index,
- page, cdata, clen);
+ pampd = (void *)zbud_create(client_id, pool->pool_id, oid,
+ index, page, cdata, clen);
if (pampd != NULL) {
count = atomic_inc_return(&zcache_curr_eph_pampd_count);
if (count > zcache_curr_eph_pampd_count_max)
zcache_curr_eph_pampd_count_max = count;
}
} else {
- /*
- * FIXME: This is all the "policy" there is for now.
- * 3/4 totpages should allow ~37% of RAM to be filled with
- * compressed frontswap pages
- */
- if (atomic_read(&zcache_curr_pers_pampd_count) >
- 3 * totalram_pages / 4)
+ curr_pers_pampd_count =
+ atomic_read(&zcache_curr_pers_pampd_count);
+ if (curr_pers_pampd_count >
+ (zv_page_count_policy_percent * totalram_pages) / 100)
goto out;
ret = zcache_compress(page, &cdata, &clen);
if (ret == 0)
goto out;
- if (clen > zv_max_page_size) {
+ /* reject if compression is too poor */
+ if (clen > zv_max_zsize) {
zcache_compress_poor++;
goto out;
}
- pampd = (void *)zv_create(zcache_client.xvpool, pool->pool_id,
+ /* reject if mean compression is too poor */
+ if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
+ total_zsize = xv_get_total_size_bytes(cli->xvpool);
+ zv_mean_zsize = div_u64(total_zsize,
+ curr_pers_pampd_count);
+ if (zv_mean_zsize > zv_max_mean_zsize) {
+ zcache_mean_compress_poor++;
+ goto out;
+ }
+ }
+ pampd = (void *)zv_create(cli->xvpool, pool->pool_id,
oid, index, cdata, clen);
if (pampd == NULL)
goto out;
@@ -958,15 +1220,31 @@ out:
* fill the pageframe corresponding to the struct page with the data
* from the passed pampd
*/
-static int zcache_pampd_get_data(struct page *page, void *pampd,
- struct tmem_pool *pool)
+static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw,
+ void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index)
{
int ret = 0;
- if (is_ephemeral(pool))
- ret = zbud_decompress(page, pampd);
- else
- zv_decompress(page, pampd);
+ BUG_ON(is_ephemeral(pool));
+ zv_decompress(virt_to_page(data), pampd);
+ return ret;
+}
+
+/*
+ * fill the pageframe corresponding to the struct page with the data
+ * from the passed pampd
+ */
+static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw,
+ void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index)
+{
+ int ret = 0;
+
+ BUG_ON(!is_ephemeral(pool));
+ zbud_decompress(virt_to_page(data), pampd);
+ zbud_free_and_delist((struct zbud_hdr *)pampd);
+ atomic_dec(&zcache_curr_eph_pampd_count);
return ret;
}
@@ -974,23 +1252,49 @@ static int zcache_pampd_get_data(struct page *page, void *pampd,
* free the pampd and remove it from any zcache lists
* pampd must no longer be pointed to from any tmem data structures!
*/
-static void zcache_pampd_free(void *pampd, struct tmem_pool *pool)
+static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
+ struct tmem_oid *oid, uint32_t index)
{
+ struct zcache_client *cli = pool->client;
+
if (is_ephemeral(pool)) {
zbud_free_and_delist((struct zbud_hdr *)pampd);
atomic_dec(&zcache_curr_eph_pampd_count);
BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
} else {
- zv_free(zcache_client.xvpool, (struct zv_hdr *)pampd);
+ zv_free(cli->xvpool, (struct zv_hdr *)pampd);
atomic_dec(&zcache_curr_pers_pampd_count);
BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
}
}
+static void zcache_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj)
+{
+}
+
+static void zcache_pampd_new_obj(struct tmem_obj *obj)
+{
+}
+
+static int zcache_pampd_replace_in_obj(void *pampd, struct tmem_obj *obj)
+{
+ return -1;
+}
+
+static bool zcache_pampd_is_remote(void *pampd)
+{
+ return 0;
+}
+
static struct tmem_pamops zcache_pamops = {
.create = zcache_pampd_create,
.get_data = zcache_pampd_get_data,
+ .get_data_and_free = zcache_pampd_get_data_and_free,
.free = zcache_pampd_free,
+ .free_obj = zcache_pampd_free_obj,
+ .new_obj = zcache_pampd_new_obj,
+ .replace_in_obj = zcache_pampd_replace_in_obj,
+ .is_remote = zcache_pampd_is_remote,
};
/*
@@ -1122,6 +1426,7 @@ ZCACHE_SYSFS_RO(put_to_flush);
ZCACHE_SYSFS_RO(aborted_preload);
ZCACHE_SYSFS_RO(aborted_shrink);
ZCACHE_SYSFS_RO(compress_poor);
+ZCACHE_SYSFS_RO(mean_compress_poor);
ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages);
ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages);
ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count);
@@ -1130,6 +1435,10 @@ ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts,
zbud_show_unbuddied_list_counts);
ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts,
zbud_show_cumul_chunk_counts);
+ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts,
+ zv_curr_dist_counts_show);
+ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts,
+ zv_cumul_dist_counts_show);
static struct attribute *zcache_attrs[] = {
&zcache_curr_obj_count_attr.attr,
@@ -1143,6 +1452,7 @@ static struct attribute *zcache_attrs[] = {
&zcache_failed_eph_puts_attr.attr,
&zcache_failed_pers_puts_attr.attr,
&zcache_compress_poor_attr.attr,
+ &zcache_mean_compress_poor_attr.attr,
&zcache_zbud_curr_raw_pages_attr.attr,
&zcache_zbud_curr_zpages_attr.attr,
&zcache_zbud_curr_zbytes_attr.attr,
@@ -1160,6 +1470,11 @@ static struct attribute *zcache_attrs[] = {
&zcache_aborted_shrink_attr.attr,
&zcache_zbud_unbuddied_list_counts_attr.attr,
&zcache_zbud_cumul_chunk_counts_attr.attr,
+ &zcache_zv_curr_dist_counts_attr.attr,
+ &zcache_zv_cumul_dist_counts_attr.attr,
+ &zcache_zv_max_zsize_attr.attr,
+ &zcache_zv_max_mean_zsize_attr.attr,
+ &zcache_zv_page_count_policy_percent_attr.attr,
NULL,
};
@@ -1212,19 +1527,20 @@ static struct shrinker zcache_shrinker = {
* zcache shims between cleancache/frontswap ops and tmem
*/
-static int zcache_put_page(int pool_id, struct tmem_oid *oidp,
+static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
uint32_t index, struct page *page)
{
struct tmem_pool *pool;
int ret = -1;
BUG_ON(!irqs_disabled());
- pool = zcache_get_pool_by_id(pool_id);
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
if (unlikely(pool == NULL))
goto out;
if (!zcache_freeze && zcache_do_preload(pool) == 0) {
/* preload does preempt_disable on success */
- ret = tmem_put(pool, oidp, index, page);
+ ret = tmem_put(pool, oidp, index, page_address(page),
+ PAGE_SIZE, 0, is_ephemeral(pool));
if (ret < 0) {
if (is_ephemeral(pool))
zcache_failed_eph_puts++;
@@ -1244,25 +1560,28 @@ out:
return ret;
}
-static int zcache_get_page(int pool_id, struct tmem_oid *oidp,
+static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
uint32_t index, struct page *page)
{
struct tmem_pool *pool;
int ret = -1;
unsigned long flags;
+ size_t size = PAGE_SIZE;
local_irq_save(flags);
- pool = zcache_get_pool_by_id(pool_id);
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
if (likely(pool != NULL)) {
if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_get(pool, oidp, index, page);
+ ret = tmem_get(pool, oidp, index, page_address(page),
+ &size, 0, is_ephemeral(pool));
zcache_put_pool(pool);
}
local_irq_restore(flags);
return ret;
}
-static int zcache_flush_page(int pool_id, struct tmem_oid *oidp, uint32_t index)
+static int zcache_flush_page(int cli_id, int pool_id,
+ struct tmem_oid *oidp, uint32_t index)
{
struct tmem_pool *pool;
int ret = -1;
@@ -1270,7 +1589,7 @@ static int zcache_flush_page(int pool_id, struct tmem_oid *oidp, uint32_t index)
local_irq_save(flags);
zcache_flush_total++;
- pool = zcache_get_pool_by_id(pool_id);
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
if (likely(pool != NULL)) {
if (atomic_read(&pool->obj_count) > 0)
ret = tmem_flush_page(pool, oidp, index);
@@ -1282,7 +1601,8 @@ static int zcache_flush_page(int pool_id, struct tmem_oid *oidp, uint32_t index)
return ret;
}
-static int zcache_flush_object(int pool_id, struct tmem_oid *oidp)
+static int zcache_flush_object(int cli_id, int pool_id,
+ struct tmem_oid *oidp)
{
struct tmem_pool *pool;
int ret = -1;
@@ -1290,7 +1610,7 @@ static int zcache_flush_object(int pool_id, struct tmem_oid *oidp)
local_irq_save(flags);
zcache_flobj_total++;
- pool = zcache_get_pool_by_id(pool_id);
+ pool = zcache_get_pool_by_id(cli_id, pool_id);
if (likely(pool != NULL)) {
if (atomic_read(&pool->obj_count) > 0)
ret = tmem_flush_object(pool, oidp);
@@ -1302,34 +1622,52 @@ static int zcache_flush_object(int pool_id, struct tmem_oid *oidp)
return ret;
}
-static int zcache_destroy_pool(int pool_id)
+static int zcache_destroy_pool(int cli_id, int pool_id)
{
struct tmem_pool *pool = NULL;
+ struct zcache_client *cli = NULL;
int ret = -1;
if (pool_id < 0)
goto out;
- pool = zcache_client.tmem_pools[pool_id];
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ atomic_inc(&cli->refcount);
+ pool = cli->tmem_pools[pool_id];
if (pool == NULL)
goto out;
- zcache_client.tmem_pools[pool_id] = NULL;
+ cli->tmem_pools[pool_id] = NULL;
/* wait for pool activity on other cpus to quiesce */
while (atomic_read(&pool->refcount) != 0)
;
+ atomic_dec(&cli->refcount);
local_bh_disable();
ret = tmem_destroy_pool(pool);
local_bh_enable();
kfree(pool);
- pr_info("zcache: destroyed pool id=%d\n", pool_id);
+ pr_info("zcache: destroyed pool id=%d, cli_id=%d\n",
+ pool_id, cli_id);
out:
return ret;
}
-static int zcache_new_pool(uint32_t flags)
+static int zcache_new_pool(uint16_t cli_id, uint32_t flags)
{
int poolid = -1;
struct tmem_pool *pool;
+ struct zcache_client *cli = NULL;
+ if (cli_id == LOCAL_CLIENT)
+ cli = &zcache_host;
+ else if ((unsigned int)cli_id < MAX_CLIENTS)
+ cli = &zcache_clients[cli_id];
+ if (cli == NULL)
+ goto out;
+ atomic_inc(&cli->refcount);
pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
if (pool == NULL) {
pr_info("zcache: pool creation failed: out of memory\n");
@@ -1337,7 +1675,7 @@ static int zcache_new_pool(uint32_t flags)
}
for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
- if (zcache_client.tmem_pools[poolid] == NULL)
+ if (cli->tmem_pools[poolid] == NULL)
break;
if (poolid >= MAX_POOLS_PER_CLIENT) {
pr_info("zcache: pool creation failed: max exceeded\n");
@@ -1346,14 +1684,16 @@ static int zcache_new_pool(uint32_t flags)
goto out;
}
atomic_set(&pool->refcount, 0);
- pool->client = &zcache_client;
+ pool->client = cli;
pool->pool_id = poolid;
tmem_new_pool(pool, flags);
- zcache_client.tmem_pools[poolid] = pool;
- pr_info("zcache: created %s tmem pool, id=%d\n",
+ cli->tmem_pools[poolid] = pool;
+ pr_info("zcache: created %s tmem pool, id=%d, client=%d\n",
flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
- poolid);
+ poolid, cli_id);
out:
+ if (cli != NULL)
+ atomic_dec(&cli->refcount);
return poolid;
}
@@ -1374,7 +1714,7 @@ static void zcache_cleancache_put_page(int pool_id,
struct tmem_oid oid = *(struct tmem_oid *)&key;
if (likely(ind == index))
- (void)zcache_put_page(pool_id, &oid, index, page);
+ (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index, page);
}
static int zcache_cleancache_get_page(int pool_id,
@@ -1386,7 +1726,7 @@ static int zcache_cleancache_get_page(int pool_id,
int ret = -1;
if (likely(ind == index))
- ret = zcache_get_page(pool_id, &oid, index, page);
+ ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index, page);
return ret;
}
@@ -1398,7 +1738,7 @@ static void zcache_cleancache_flush_page(int pool_id,
struct tmem_oid oid = *(struct tmem_oid *)&key;
if (likely(ind == index))
- (void)zcache_flush_page(pool_id, &oid, ind);
+ (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
}
static void zcache_cleancache_flush_inode(int pool_id,
@@ -1406,13 +1746,13 @@ static void zcache_cleancache_flush_inode(int pool_id,
{
struct tmem_oid oid = *(struct tmem_oid *)&key;
- (void)zcache_flush_object(pool_id, &oid);
+ (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
}
static void zcache_cleancache_flush_fs(int pool_id)
{
if (pool_id >= 0)
- (void)zcache_destroy_pool(pool_id);
+ (void)zcache_destroy_pool(LOCAL_CLIENT, pool_id);
}
static int zcache_cleancache_init_fs(size_t pagesize)
@@ -1420,7 +1760,7 @@ static int zcache_cleancache_init_fs(size_t pagesize)
BUG_ON(sizeof(struct cleancache_filekey) !=
sizeof(struct tmem_oid));
BUG_ON(pagesize != PAGE_SIZE);
- return zcache_new_pool(0);
+ return zcache_new_pool(LOCAL_CLIENT, 0);
}
static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
@@ -1429,7 +1769,7 @@ static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
BUG_ON(sizeof(struct cleancache_filekey) !=
sizeof(struct tmem_oid));
BUG_ON(pagesize != PAGE_SIZE);
- return zcache_new_pool(0);
+ return zcache_new_pool(LOCAL_CLIENT, 0);
}
static struct cleancache_ops zcache_cleancache_ops = {
@@ -1483,8 +1823,8 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
BUG_ON(!PageLocked(page));
if (likely(ind64 == ind)) {
local_irq_save(flags);
- ret = zcache_put_page(zcache_frontswap_poolid, &oid,
- iswiz(ind), page);
+ ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
+ &oid, iswiz(ind), page);
local_irq_restore(flags);
}
return ret;
@@ -1502,8 +1842,8 @@ static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
BUG_ON(!PageLocked(page));
if (likely(ind64 == ind))
- ret = zcache_get_page(zcache_frontswap_poolid, &oid,
- iswiz(ind), page);
+ ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
+ &oid, iswiz(ind), page);
return ret;
}
@@ -1515,8 +1855,8 @@ static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
struct tmem_oid oid = oswiz(type, ind);
if (likely(ind64 == ind))
- (void)zcache_flush_page(zcache_frontswap_poolid, &oid,
- iswiz(ind));
+ (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
+ &oid, iswiz(ind));
}
/* flush all pages from the passed swaptype */
@@ -1527,7 +1867,8 @@ static void zcache_frontswap_flush_area(unsigned type)
for (ind = SWIZ_MASK; ind >= 0; ind--) {
oid = oswiz(type, ind);
- (void)zcache_flush_object(zcache_frontswap_poolid, &oid);
+ (void)zcache_flush_object(LOCAL_CLIENT,
+ zcache_frontswap_poolid, &oid);
}
}
@@ -1535,7 +1876,8 @@ static void zcache_frontswap_init(unsigned ignored)
{
/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
if (zcache_frontswap_poolid < 0)
- zcache_frontswap_poolid = zcache_new_pool(TMEM_POOL_PERSIST);
+ zcache_frontswap_poolid =
+ zcache_new_pool(LOCAL_CLIENT, TMEM_POOL_PERSIST);
}
static struct frontswap_ops zcache_frontswap_ops = {
@@ -1594,9 +1936,9 @@ __setup("nofrontswap", no_frontswap);
static int __init zcache_init(void)
{
-#ifdef CONFIG_SYSFS
int ret = 0;
+#ifdef CONFIG_SYSFS
ret = sysfs_create_group(mm_kobj, &zcache_attr_group);
if (ret) {
pr_err("zcache: can't create sysfs\n");
@@ -1624,6 +1966,11 @@ static int __init zcache_init(void)
sizeof(struct tmem_objnode), 0, 0, NULL);
zcache_obj_cache = kmem_cache_create("zcache_obj",
sizeof(struct tmem_obj), 0, 0, NULL);
+ ret = zcache_new_client(LOCAL_CLIENT);
+ if (ret) {
+ pr_err("zcache: can't create client\n");
+ goto out;
+ }
#endif
#ifdef CONFIG_CLEANCACHE
if (zcache_enabled && use_cleancache) {
@@ -1642,11 +1989,6 @@ static int __init zcache_init(void)
if (zcache_enabled && use_frontswap) {
struct frontswap_ops old_ops;
- zcache_client.xvpool = xv_create_pool();
- if (zcache_client.xvpool == NULL) {
- pr_err("zcache: can't create xvpool\n");
- goto out;
- }
old_ops = zcache_frontswap_register_ops();
pr_info("zcache: frontswap enabled using kernel "
"transcendent memory and xvmalloc\n");
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index aab4ec48212..d70ec1ad10d 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -177,224 +177,355 @@ out:
zram->table[index].offset = 0;
}
-static void handle_zero_page(struct page *page)
+static void handle_zero_page(struct bio_vec *bvec)
{
+ struct page *page = bvec->bv_page;
void *user_mem;
user_mem = kmap_atomic(page, KM_USER0);
- memset(user_mem, 0, PAGE_SIZE);
+ memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
kunmap_atomic(user_mem, KM_USER0);
flush_dcache_page(page);
}
-static void handle_uncompressed_page(struct zram *zram,
- struct page *page, u32 index)
+static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset)
{
+ struct page *page = bvec->bv_page;
unsigned char *user_mem, *cmem;
user_mem = kmap_atomic(page, KM_USER0);
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
+ cmem = kmap_atomic(zram->table[index].page, KM_USER1);
- memcpy(user_mem, cmem, PAGE_SIZE);
- kunmap_atomic(user_mem, KM_USER0);
+ memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
kunmap_atomic(cmem, KM_USER1);
+ kunmap_atomic(user_mem, KM_USER0);
flush_dcache_page(page);
}
-static void zram_read(struct zram *zram, struct bio *bio)
+static inline int is_partial_io(struct bio_vec *bvec)
{
+ return bvec->bv_len != PAGE_SIZE;
+}
- int i;
- u32 index;
- struct bio_vec *bvec;
-
- zram_stat64_inc(zram, &zram->stats.num_reads);
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
+static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio)
+{
+ int ret;
+ size_t clen;
+ struct page *page;
+ struct zobj_header *zheader;
+ unsigned char *user_mem, *cmem, *uncmem = NULL;
- bio_for_each_segment(bvec, bio, i) {
- int ret;
- size_t clen;
- struct page *page;
- struct zobj_header *zheader;
- unsigned char *user_mem, *cmem;
+ page = bvec->bv_page;
- page = bvec->bv_page;
+ if (zram_test_flag(zram, index, ZRAM_ZERO)) {
+ handle_zero_page(bvec);
+ return 0;
+ }
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
- handle_zero_page(page);
- index++;
- continue;
- }
+ /* Requested page is not present in compressed area */
+ if (unlikely(!zram->table[index].page)) {
+ pr_debug("Read before write: sector=%lu, size=%u",
+ (ulong)(bio->bi_sector), bio->bi_size);
+ handle_zero_page(bvec);
+ return 0;
+ }
- /* Requested page is not present in compressed area */
- if (unlikely(!zram->table[index].page)) {
- pr_debug("Read before write: sector=%lu, size=%u",
- (ulong)(bio->bi_sector), bio->bi_size);
- handle_zero_page(page);
- index++;
- continue;
- }
+ /* Page is stored uncompressed since it's incompressible */
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
+ handle_uncompressed_page(zram, bvec, index, offset);
+ return 0;
+ }
- /* Page is stored uncompressed since it's incompressible */
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- handle_uncompressed_page(zram, page, index);
- index++;
- continue;
+ if (is_partial_io(bvec)) {
+ /* Use a temporary buffer to decompress the page */
+ uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!uncmem) {
+ pr_info("Error allocating temp memory!\n");
+ return -ENOMEM;
}
+ }
- user_mem = kmap_atomic(page, KM_USER0);
- clen = PAGE_SIZE;
+ user_mem = kmap_atomic(page, KM_USER0);
+ if (!is_partial_io(bvec))
+ uncmem = user_mem;
+ clen = PAGE_SIZE;
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
+ cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
+ zram->table[index].offset;
- ret = lzo1x_decompress_safe(
- cmem + sizeof(*zheader),
- xv_get_object_size(cmem) - sizeof(*zheader),
- user_mem, &clen);
+ ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
+ xv_get_object_size(cmem) - sizeof(*zheader),
+ uncmem, &clen);
- kunmap_atomic(user_mem, KM_USER0);
- kunmap_atomic(cmem, KM_USER1);
+ if (is_partial_io(bvec)) {
+ memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+ bvec->bv_len);
+ kfree(uncmem);
+ }
- /* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret != LZO_E_OK)) {
- pr_err("Decompression failed! err=%d, page=%u\n",
- ret, index);
- zram_stat64_inc(zram, &zram->stats.failed_reads);
- goto out;
- }
+ kunmap_atomic(cmem, KM_USER1);
+ kunmap_atomic(user_mem, KM_USER0);
- flush_dcache_page(page);
- index++;
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret != LZO_E_OK)) {
+ pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
+ zram_stat64_inc(zram, &zram->stats.failed_reads);
+ return ret;
}
- set_bit(BIO_UPTODATE, &bio->bi_flags);
- bio_endio(bio, 0);
- return;
+ flush_dcache_page(page);
-out:
- bio_io_error(bio);
+ return 0;
}
-static void zram_write(struct zram *zram, struct bio *bio)
+static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
{
- int i;
- u32 index;
- struct bio_vec *bvec;
+ int ret;
+ size_t clen = PAGE_SIZE;
+ struct zobj_header *zheader;
+ unsigned char *cmem;
- zram_stat64_inc(zram, &zram->stats.num_writes);
- index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ if (zram_test_flag(zram, index, ZRAM_ZERO) ||
+ !zram->table[index].page) {
+ memset(mem, 0, PAGE_SIZE);
+ return 0;
+ }
- bio_for_each_segment(bvec, bio, i) {
- int ret;
- u32 offset;
- size_t clen;
- struct zobj_header *zheader;
- struct page *page, *page_store;
- unsigned char *user_mem, *cmem, *src;
+ cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
+ zram->table[index].offset;
+
+ /* Page is stored uncompressed since it's incompressible */
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
+ memcpy(mem, cmem, PAGE_SIZE);
+ kunmap_atomic(cmem, KM_USER0);
+ return 0;
+ }
- page = bvec->bv_page;
- src = zram->compress_buffer;
+ ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
+ xv_get_object_size(cmem) - sizeof(*zheader),
+ mem, &clen);
+ kunmap_atomic(cmem, KM_USER0);
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret != LZO_E_OK)) {
+ pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
+ zram_stat64_inc(zram, &zram->stats.failed_reads);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+ int offset)
+{
+ int ret;
+ u32 store_offset;
+ size_t clen;
+ struct zobj_header *zheader;
+ struct page *page, *page_store;
+ unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
+
+ page = bvec->bv_page;
+ src = zram->compress_buffer;
+
+ if (is_partial_io(bvec)) {
/*
- * System overwrites unused sectors. Free memory associated
- * with this sector now.
+ * This is a partial IO. We need to read the full page
+ * before to write the changes.
*/
- if (zram->table[index].page ||
- zram_test_flag(zram, index, ZRAM_ZERO))
- zram_free_page(zram, index);
-
- mutex_lock(&zram->lock);
-
- user_mem = kmap_atomic(page, KM_USER0);
- if (page_zero_filled(user_mem)) {
- kunmap_atomic(user_mem, KM_USER0);
- mutex_unlock(&zram->lock);
- zram_stat_inc(&zram->stats.pages_zero);
- zram_set_flag(zram, index, ZRAM_ZERO);
- index++;
- continue;
+ uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!uncmem) {
+ pr_info("Error allocating temp memory!\n");
+ ret = -ENOMEM;
+ goto out;
}
+ ret = zram_read_before_write(zram, uncmem, index);
+ if (ret) {
+ kfree(uncmem);
+ goto out;
+ }
+ }
- ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
- zram->compress_workmem);
+ /*
+ * System overwrites unused sectors. Free memory associated
+ * with this sector now.
+ */
+ if (zram->table[index].page ||
+ zram_test_flag(zram, index, ZRAM_ZERO))
+ zram_free_page(zram, index);
+
+ user_mem = kmap_atomic(page, KM_USER0);
+ if (is_partial_io(bvec))
+ memcpy(uncmem + offset, user_mem + bvec->bv_offset,
+ bvec->bv_len);
+ else
+ uncmem = user_mem;
+
+ if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem, KM_USER0);
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+ zram_stat_inc(&zram->stats.pages_zero);
+ zram_set_flag(zram, index, ZRAM_ZERO);
+ ret = 0;
+ goto out;
+ }
- if (unlikely(ret != LZO_E_OK)) {
- mutex_unlock(&zram->lock);
- pr_err("Compression failed! err=%d\n", ret);
- zram_stat64_inc(zram, &zram->stats.failed_writes);
- goto out;
- }
+ ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
+ zram->compress_workmem);
- /*
- * Page is incompressible. Store it as-is (uncompressed)
- * since we do not want to return too many disk write
- * errors which has side effect of hanging the system.
- */
- if (unlikely(clen > max_zpage_size)) {
- clen = PAGE_SIZE;
- page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
- if (unlikely(!page_store)) {
- mutex_unlock(&zram->lock);
- pr_info("Error allocating memory for "
- "incompressible page: %u\n", index);
- zram_stat64_inc(zram,
- &zram->stats.failed_writes);
- goto out;
- }
-
- offset = 0;
- zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
- zram_stat_inc(&zram->stats.pages_expand);
- zram->table[index].page = page_store;
- src = kmap_atomic(page, KM_USER0);
- goto memstore;
- }
+ kunmap_atomic(user_mem, KM_USER0);
+ if (is_partial_io(bvec))
+ kfree(uncmem);
- if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
- &zram->table[index].page, &offset,
- GFP_NOIO | __GFP_HIGHMEM)) {
- mutex_unlock(&zram->lock);
- pr_info("Error allocating memory for compressed "
- "page: %u, size=%zu\n", index, clen);
- zram_stat64_inc(zram, &zram->stats.failed_writes);
+ if (unlikely(ret != LZO_E_OK)) {
+ pr_err("Compression failed! err=%d\n", ret);
+ goto out;
+ }
+
+ /*
+ * Page is incompressible. Store it as-is (uncompressed)
+ * since we do not want to return too many disk write
+ * errors which has side effect of hanging the system.
+ */
+ if (unlikely(clen > max_zpage_size)) {
+ clen = PAGE_SIZE;
+ page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
+ if (unlikely(!page_store)) {
+ pr_info("Error allocating memory for "
+ "incompressible page: %u\n", index);
+ ret = -ENOMEM;
goto out;
}
+ store_offset = 0;
+ zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
+ zram_stat_inc(&zram->stats.pages_expand);
+ zram->table[index].page = page_store;
+ src = kmap_atomic(page, KM_USER0);
+ goto memstore;
+ }
+
+ if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
+ &zram->table[index].page, &store_offset,
+ GFP_NOIO | __GFP_HIGHMEM)) {
+ pr_info("Error allocating memory for compressed "
+ "page: %u, size=%zu\n", index, clen);
+ ret = -ENOMEM;
+ goto out;
+ }
+
memstore:
- zram->table[index].offset = offset;
+ zram->table[index].offset = store_offset;
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
- zram->table[index].offset;
+ cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
+ zram->table[index].offset;
#if 0
- /* Back-reference needed for memory defragmentation */
- if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
- zheader = (struct zobj_header *)cmem;
- zheader->table_idx = index;
- cmem += sizeof(*zheader);
- }
+ /* Back-reference needed for memory defragmentation */
+ if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
+ zheader = (struct zobj_header *)cmem;
+ zheader->table_idx = index;
+ cmem += sizeof(*zheader);
+ }
#endif
- memcpy(cmem, src, clen);
+ memcpy(cmem, src, clen);
- kunmap_atomic(cmem, KM_USER1);
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
- kunmap_atomic(src, KM_USER0);
+ kunmap_atomic(cmem, KM_USER1);
+ if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
+ kunmap_atomic(src, KM_USER0);
+
+ /* Update stats */
+ zram_stat64_add(zram, &zram->stats.compr_size, clen);
+ zram_stat_inc(&zram->stats.pages_stored);
+ if (clen <= PAGE_SIZE / 2)
+ zram_stat_inc(&zram->stats.good_compress);
+
+ return 0;
+
+out:
+ if (ret)
+ zram_stat64_inc(zram, &zram->stats.failed_writes);
+ return ret;
+}
+
+static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
+ int offset, struct bio *bio, int rw)
+{
+ int ret;
+
+ if (rw == READ) {
+ down_read(&zram->lock);
+ ret = zram_bvec_read(zram, bvec, index, offset, bio);
+ up_read(&zram->lock);
+ } else {
+ down_write(&zram->lock);
+ ret = zram_bvec_write(zram, bvec, index, offset);
+ up_write(&zram->lock);
+ }
+
+ return ret;
+}
+
+static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
+{
+ if (*offset + bvec->bv_len >= PAGE_SIZE)
+ (*index)++;
+ *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
+}
+
+static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
+{
+ int i, offset;
+ u32 index;
+ struct bio_vec *bvec;
+
+ switch (rw) {
+ case READ:
+ zram_stat64_inc(zram, &zram->stats.num_reads);
+ break;
+ case WRITE:
+ zram_stat64_inc(zram, &zram->stats.num_writes);
+ break;
+ }
+
+ index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
+ offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
+
+ bio_for_each_segment(bvec, bio, i) {
+ int max_transfer_size = PAGE_SIZE - offset;
+
+ if (bvec->bv_len > max_transfer_size) {
+ /*
+ * zram_bvec_rw() can only make operation on a single
+ * zram page. Split the bio vector.
+ */
+ struct bio_vec bv;
+
+ bv.bv_page = bvec->bv_page;
+ bv.bv_len = max_transfer_size;
+ bv.bv_offset = bvec->bv_offset;
+
+ if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
+ goto out;
- /* Update stats */
- zram_stat64_add(zram, &zram->stats.compr_size, clen);
- zram_stat_inc(&zram->stats.pages_stored);
- if (clen <= PAGE_SIZE / 2)
- zram_stat_inc(&zram->stats.good_compress);
+ bv.bv_len = bvec->bv_len - max_transfer_size;
+ bv.bv_offset += max_transfer_size;
+ if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
+ goto out;
+ } else
+ if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
+ < 0)
+ goto out;
- mutex_unlock(&zram->lock);
- index++;
+ update_position(&index, &offset, bvec);
}
set_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -406,14 +537,14 @@ out:
}
/*
- * Check if request is within bounds and page aligned.
+ * Check if request is within bounds and aligned on zram logical blocks.
*/
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
if (unlikely(
(bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
- (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
- (bio->bi_size & (PAGE_SIZE - 1)))) {
+ (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
+ (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
return 0;
}
@@ -440,15 +571,7 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio)
return 0;
}
- switch (bio_data_dir(bio)) {
- case READ:
- zram_read(zram, bio);
- break;
-
- case WRITE:
- zram_write(zram, bio);
- break;
- }
+ __zram_make_request(zram, bio, bio_data_dir(bio));
return 0;
}
@@ -579,7 +702,7 @@ static int create_device(struct zram *zram, int device_id)
{
int ret = 0;
- mutex_init(&zram->lock);
+ init_rwsem(&zram->lock);
mutex_init(&zram->init_lock);
spin_lock_init(&zram->stat64_lock);
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index 408b2c067fc..abe5221c100 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -61,7 +61,10 @@ static const unsigned max_zpage_size = PAGE_SIZE / 4 * 3;
#define SECTOR_SIZE (1 << SECTOR_SHIFT)
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
-#define ZRAM_LOGICAL_BLOCK_SIZE 4096
+#define ZRAM_LOGICAL_BLOCK_SHIFT 12
+#define ZRAM_LOGICAL_BLOCK_SIZE (1 << ZRAM_LOGICAL_BLOCK_SHIFT)
+#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
+ (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
@@ -104,8 +107,8 @@ struct zram {
void *compress_buffer;
struct table *table;
spinlock_t stat64_lock; /* protect 64-bit stats */
- struct mutex lock; /* protect compression buffers against
- * concurrent writes */
+ struct rw_semaphore lock; /* protect compression buffers and table
+ * against concurrent read and writes */
struct request_queue *queue;
struct gendisk *disk;
int init_done;
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 5cb0f0ef6af..b28794b7212 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -31,5 +31,6 @@ config TCM_PSCSI
source "drivers/target/loopback/Kconfig"
source "drivers/target/tcm_fc/Kconfig"
+source "drivers/target/iscsi/Kconfig"
endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 21df808a992..1060c7b7f80 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -24,5 +24,5 @@ obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
# Fabric modules
obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
-
obj-$(CONFIG_TCM_FC) += tcm_fc/
+obj-$(CONFIG_ISCSI_TARGET) += iscsi/
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
new file mode 100644
index 00000000000..8345fb457a4
--- /dev/null
+++ b/drivers/target/iscsi/Kconfig
@@ -0,0 +1,9 @@
+config ISCSI_TARGET
+ tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
+ depends on NET
+ select CRYPTO
+ select CRYPTO_CRC32C
+ select CRYPTO_CRC32C_INTEL if X86
+ help
+ Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
+ Target Mode Stack.
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
new file mode 100644
index 00000000000..5b9a2cf7f0a
--- /dev/null
+++ b/drivers/target/iscsi/Makefile
@@ -0,0 +1,20 @@
+iscsi_target_mod-y += iscsi_target_parameters.o \
+ iscsi_target_seq_pdu_list.o \
+ iscsi_target_tq.o \
+ iscsi_target_auth.o \
+ iscsi_target_datain_values.o \
+ iscsi_target_device.o \
+ iscsi_target_erl0.o \
+ iscsi_target_erl1.o \
+ iscsi_target_erl2.o \
+ iscsi_target_login.o \
+ iscsi_target_nego.o \
+ iscsi_target_nodeattrib.o \
+ iscsi_target_tmr.o \
+ iscsi_target_tpg.o \
+ iscsi_target_util.o \
+ iscsi_target.o \
+ iscsi_target_configfs.o \
+ iscsi_target_stat.o
+
+obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
new file mode 100644
index 00000000000..c24fb10de60
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -0,0 +1,4564 @@
+/*******************************************************************************
+ * This file contains main functions related to the iSCSI Target Core Driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/crypto.h>
+#include <linux/completion.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi_device.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_configfs.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_tmr.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_stat.h"
+
+static LIST_HEAD(g_tiqn_list);
+static LIST_HEAD(g_np_list);
+static DEFINE_SPINLOCK(tiqn_lock);
+static DEFINE_SPINLOCK(np_lock);
+
+static struct idr tiqn_idr;
+struct idr sess_idr;
+struct mutex auth_id_lock;
+spinlock_t sess_idr_lock;
+
+struct iscsit_global *iscsit_global;
+
+struct kmem_cache *lio_cmd_cache;
+struct kmem_cache *lio_qr_cache;
+struct kmem_cache *lio_dr_cache;
+struct kmem_cache *lio_ooo_cache;
+struct kmem_cache *lio_r2t_cache;
+
+static int iscsit_handle_immediate_data(struct iscsi_cmd *,
+ unsigned char *buf, u32);
+static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+
+struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
+{
+ struct iscsi_tiqn *tiqn = NULL;
+
+ spin_lock(&tiqn_lock);
+ list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
+ if (!strcmp(tiqn->tiqn, buf)) {
+
+ spin_lock(&tiqn->tiqn_state_lock);
+ if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
+ tiqn->tiqn_access_count++;
+ spin_unlock(&tiqn->tiqn_state_lock);
+ spin_unlock(&tiqn_lock);
+ return tiqn;
+ }
+ spin_unlock(&tiqn->tiqn_state_lock);
+ }
+ }
+ spin_unlock(&tiqn_lock);
+
+ return NULL;
+}
+
+static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
+{
+ spin_lock(&tiqn->tiqn_state_lock);
+ if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
+ tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
+ spin_unlock(&tiqn->tiqn_state_lock);
+ return 0;
+ }
+ spin_unlock(&tiqn->tiqn_state_lock);
+
+ return -1;
+}
+
+void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
+{
+ spin_lock(&tiqn->tiqn_state_lock);
+ tiqn->tiqn_access_count--;
+ spin_unlock(&tiqn->tiqn_state_lock);
+}
+
+/*
+ * Note that IQN formatting is expected to be done in userspace, and
+ * no explict IQN format checks are done here.
+ */
+struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
+{
+ struct iscsi_tiqn *tiqn = NULL;
+ int ret;
+
+ if (strlen(buf) >= ISCSI_IQN_LEN) {
+ pr_err("Target IQN exceeds %d bytes\n",
+ ISCSI_IQN_LEN);
+ return ERR_PTR(-EINVAL);
+ }
+
+ tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL);
+ if (!tiqn) {
+ pr_err("Unable to allocate struct iscsi_tiqn\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sprintf(tiqn->tiqn, "%s", buf);
+ INIT_LIST_HEAD(&tiqn->tiqn_list);
+ INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
+ spin_lock_init(&tiqn->tiqn_state_lock);
+ spin_lock_init(&tiqn->tiqn_tpg_lock);
+ spin_lock_init(&tiqn->sess_err_stats.lock);
+ spin_lock_init(&tiqn->login_stats.lock);
+ spin_lock_init(&tiqn->logout_stats.lock);
+
+ if (!idr_pre_get(&tiqn_idr, GFP_KERNEL)) {
+ pr_err("idr_pre_get() for tiqn_idr failed\n");
+ kfree(tiqn);
+ return ERR_PTR(-ENOMEM);
+ }
+ tiqn->tiqn_state = TIQN_STATE_ACTIVE;
+
+ spin_lock(&tiqn_lock);
+ ret = idr_get_new(&tiqn_idr, NULL, &tiqn->tiqn_index);
+ if (ret < 0) {
+ pr_err("idr_get_new() failed for tiqn->tiqn_index\n");
+ spin_unlock(&tiqn_lock);
+ kfree(tiqn);
+ return ERR_PTR(ret);
+ }
+ list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
+ spin_unlock(&tiqn_lock);
+
+ pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
+
+ return tiqn;
+
+}
+
+static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
+{
+ /*
+ * Wait for accesses to said struct iscsi_tiqn to end.
+ */
+ spin_lock(&tiqn->tiqn_state_lock);
+ while (tiqn->tiqn_access_count != 0) {
+ spin_unlock(&tiqn->tiqn_state_lock);
+ msleep(10);
+ spin_lock(&tiqn->tiqn_state_lock);
+ }
+ spin_unlock(&tiqn->tiqn_state_lock);
+}
+
+void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
+{
+ /*
+ * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
+ * while holding tiqn->tiqn_state_lock. This means that all subsequent
+ * attempts to access this struct iscsi_tiqn will fail from both transport
+ * fabric and control code paths.
+ */
+ if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
+ pr_err("iscsit_set_tiqn_shutdown() failed\n");
+ return;
+ }
+
+ iscsit_wait_for_tiqn(tiqn);
+
+ spin_lock(&tiqn_lock);
+ list_del(&tiqn->tiqn_list);
+ idr_remove(&tiqn_idr, tiqn->tiqn_index);
+ spin_unlock(&tiqn_lock);
+
+ pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
+ tiqn->tiqn);
+ kfree(tiqn);
+}
+
+int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+{
+ int ret;
+ /*
+ * Determine if the network portal is accepting storage traffic.
+ */
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return -1;
+ }
+ if (np->np_login_tpg) {
+ pr_err("np->np_login_tpg() is not NULL!\n");
+ spin_unlock_bh(&np->np_thread_lock);
+ return -1;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+ /*
+ * Determine if the portal group is accepting storage traffic.
+ */
+ spin_lock_bh(&tpg->tpg_state_lock);
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+ spin_unlock_bh(&tpg->tpg_state_lock);
+ return -1;
+ }
+ spin_unlock_bh(&tpg->tpg_state_lock);
+
+ /*
+ * Here we serialize access across the TIQN+TPG Tuple.
+ */
+ ret = mutex_lock_interruptible(&tpg->np_login_lock);
+ if ((ret != 0) || signal_pending(current))
+ return -1;
+
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_login_tpg = tpg;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ return 0;
+}
+
+int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+{
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_login_tpg = NULL;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ mutex_unlock(&tpg->np_login_lock);
+
+ if (tiqn)
+ iscsit_put_tiqn_for_login(tiqn);
+
+ return 0;
+}
+
+static struct iscsi_np *iscsit_get_np(
+ struct __kernel_sockaddr_storage *sockaddr,
+ int network_transport)
+{
+ struct sockaddr_in *sock_in, *sock_in_e;
+ struct sockaddr_in6 *sock_in6, *sock_in6_e;
+ struct iscsi_np *np;
+ int ip_match = 0;
+ u16 port;
+
+ spin_lock_bh(&np_lock);
+ list_for_each_entry(np, &g_np_list, np_list) {
+ spin_lock(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+ spin_unlock(&np->np_thread_lock);
+ continue;
+ }
+
+ if (sockaddr->ss_family == AF_INET6) {
+ sock_in6 = (struct sockaddr_in6 *)sockaddr;
+ sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
+
+ if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
+ (void *)&sock_in6_e->sin6_addr.in6_u,
+ sizeof(struct in6_addr)))
+ ip_match = 1;
+
+ port = ntohs(sock_in6->sin6_port);
+ } else {
+ sock_in = (struct sockaddr_in *)sockaddr;
+ sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
+
+ if (sock_in->sin_addr.s_addr ==
+ sock_in_e->sin_addr.s_addr)
+ ip_match = 1;
+
+ port = ntohs(sock_in->sin_port);
+ }
+
+ if ((ip_match == 1) && (np->np_port == port) &&
+ (np->np_network_transport == network_transport)) {
+ /*
+ * Increment the np_exports reference count now to
+ * prevent iscsit_del_np() below from being called
+ * while iscsi_tpg_add_network_portal() is called.
+ */
+ np->np_exports++;
+ spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np_lock);
+ return np;
+ }
+ spin_unlock(&np->np_thread_lock);
+ }
+ spin_unlock_bh(&np_lock);
+
+ return NULL;
+}
+
+struct iscsi_np *iscsit_add_np(
+ struct __kernel_sockaddr_storage *sockaddr,
+ char *ip_str,
+ int network_transport)
+{
+ struct sockaddr_in *sock_in;
+ struct sockaddr_in6 *sock_in6;
+ struct iscsi_np *np;
+ int ret;
+ /*
+ * Locate the existing struct iscsi_np if already active..
+ */
+ np = iscsit_get_np(sockaddr, network_transport);
+ if (np)
+ return np;
+
+ np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
+ if (!np) {
+ pr_err("Unable to allocate memory for struct iscsi_np\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ np->np_flags |= NPF_IP_NETWORK;
+ if (sockaddr->ss_family == AF_INET6) {
+ sock_in6 = (struct sockaddr_in6 *)sockaddr;
+ snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
+ np->np_port = ntohs(sock_in6->sin6_port);
+ } else {
+ sock_in = (struct sockaddr_in *)sockaddr;
+ sprintf(np->np_ip, "%s", ip_str);
+ np->np_port = ntohs(sock_in->sin_port);
+ }
+
+ np->np_network_transport = network_transport;
+ spin_lock_init(&np->np_thread_lock);
+ init_completion(&np->np_restart_comp);
+ INIT_LIST_HEAD(&np->np_list);
+
+ ret = iscsi_target_setup_login_socket(np, sockaddr);
+ if (ret != 0) {
+ kfree(np);
+ return ERR_PTR(ret);
+ }
+
+ np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
+ if (IS_ERR(np->np_thread)) {
+ pr_err("Unable to create kthread: iscsi_np\n");
+ ret = PTR_ERR(np->np_thread);
+ kfree(np);
+ return ERR_PTR(ret);
+ }
+ /*
+ * Increment the np_exports reference count now to prevent
+ * iscsit_del_np() below from being run while a new call to
+ * iscsi_tpg_add_network_portal() for a matching iscsi_np is
+ * active. We don't need to hold np->np_thread_lock at this
+ * point because iscsi_np has not been added to g_np_list yet.
+ */
+ np->np_exports = 1;
+
+ spin_lock_bh(&np_lock);
+ list_add_tail(&np->np_list, &g_np_list);
+ spin_unlock_bh(&np_lock);
+
+ pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
+ np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
+ "TCP" : "SCTP");
+
+ return np;
+}
+
+int iscsit_reset_np_thread(
+ struct iscsi_np *np,
+ struct iscsi_tpg_np *tpg_np,
+ struct iscsi_portal_group *tpg)
+{
+ spin_lock_bh(&np->np_thread_lock);
+ if (tpg && tpg_np) {
+ /*
+ * The reset operation need only be performed when the
+ * passed struct iscsi_portal_group has a login in progress
+ * to one of the network portals.
+ */
+ if (tpg_np->tpg_np->np_login_tpg != tpg) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+ }
+ }
+ if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+ }
+ np->np_thread_state = ISCSI_NP_THREAD_RESET;
+
+ if (np->np_thread) {
+ spin_unlock_bh(&np->np_thread_lock);
+ send_sig(SIGINT, np->np_thread, 1);
+ wait_for_completion(&np->np_restart_comp);
+ spin_lock_bh(&np->np_thread_lock);
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ return 0;
+}
+
+int iscsit_del_np_comm(struct iscsi_np *np)
+{
+ if (!np->np_socket)
+ return 0;
+
+ /*
+ * Some network transports allocate their own struct sock->file,
+ * see if we need to free any additional allocated resources.
+ */
+ if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+ kfree(np->np_socket->file);
+ np->np_socket->file = NULL;
+ }
+
+ sock_release(np->np_socket);
+ return 0;
+}
+
+int iscsit_del_np(struct iscsi_np *np)
+{
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_exports--;
+ if (np->np_exports) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+ }
+ np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ if (np->np_thread) {
+ /*
+ * We need to send the signal to wakeup Linux/Net
+ * which may be sleeping in sock_accept()..
+ */
+ send_sig(SIGINT, np->np_thread, 1);
+ kthread_stop(np->np_thread);
+ }
+ iscsit_del_np_comm(np);
+
+ spin_lock_bh(&np_lock);
+ list_del(&np->np_list);
+ spin_unlock_bh(&np_lock);
+
+ pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
+ np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
+ "TCP" : "SCTP");
+
+ kfree(np);
+ return 0;
+}
+
+static int __init iscsi_target_init_module(void)
+{
+ int ret = 0;
+
+ pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
+
+ iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
+ if (!iscsit_global) {
+ pr_err("Unable to allocate memory for iscsit_global\n");
+ return -1;
+ }
+ mutex_init(&auth_id_lock);
+ spin_lock_init(&sess_idr_lock);
+ idr_init(&tiqn_idr);
+ idr_init(&sess_idr);
+
+ ret = iscsi_target_register_configfs();
+ if (ret < 0)
+ goto out;
+
+ ret = iscsi_thread_set_init();
+ if (ret < 0)
+ goto configfs_out;
+
+ if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
+ TARGET_THREAD_SET_COUNT) {
+ pr_err("iscsi_allocate_thread_sets() returned"
+ " unexpected value!\n");
+ goto ts_out1;
+ }
+
+ lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
+ sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
+ 0, NULL);
+ if (!lio_cmd_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_cmd_cache\n");
+ goto ts_out2;
+ }
+
+ lio_qr_cache = kmem_cache_create("lio_qr_cache",
+ sizeof(struct iscsi_queue_req),
+ __alignof__(struct iscsi_queue_req), 0, NULL);
+ if (!lio_qr_cache) {
+ pr_err("nable to kmem_cache_create() for"
+ " lio_qr_cache\n");
+ goto cmd_out;
+ }
+
+ lio_dr_cache = kmem_cache_create("lio_dr_cache",
+ sizeof(struct iscsi_datain_req),
+ __alignof__(struct iscsi_datain_req), 0, NULL);
+ if (!lio_dr_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_dr_cache\n");
+ goto qr_out;
+ }
+
+ lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
+ sizeof(struct iscsi_ooo_cmdsn),
+ __alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
+ if (!lio_ooo_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_ooo_cache\n");
+ goto dr_out;
+ }
+
+ lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
+ sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
+ 0, NULL);
+ if (!lio_r2t_cache) {
+ pr_err("Unable to kmem_cache_create() for"
+ " lio_r2t_cache\n");
+ goto ooo_out;
+ }
+
+ if (iscsit_load_discovery_tpg() < 0)
+ goto r2t_out;
+
+ return ret;
+r2t_out:
+ kmem_cache_destroy(lio_r2t_cache);
+ooo_out:
+ kmem_cache_destroy(lio_ooo_cache);
+dr_out:
+ kmem_cache_destroy(lio_dr_cache);
+qr_out:
+ kmem_cache_destroy(lio_qr_cache);
+cmd_out:
+ kmem_cache_destroy(lio_cmd_cache);
+ts_out2:
+ iscsi_deallocate_thread_sets();
+ts_out1:
+ iscsi_thread_set_free();
+configfs_out:
+ iscsi_target_deregister_configfs();
+out:
+ kfree(iscsit_global);
+ return -ENOMEM;
+}
+
+static void __exit iscsi_target_cleanup_module(void)
+{
+ iscsi_deallocate_thread_sets();
+ iscsi_thread_set_free();
+ iscsit_release_discovery_tpg();
+ kmem_cache_destroy(lio_cmd_cache);
+ kmem_cache_destroy(lio_qr_cache);
+ kmem_cache_destroy(lio_dr_cache);
+ kmem_cache_destroy(lio_ooo_cache);
+ kmem_cache_destroy(lio_r2t_cache);
+
+ iscsi_target_deregister_configfs();
+
+ kfree(iscsit_global);
+}
+
+int iscsit_add_reject(
+ u8 reason,
+ int fail_conn,
+ unsigned char *buf,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd;
+ struct iscsi_reject *hdr;
+ int ret;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return -1;
+
+ cmd->iscsi_opcode = ISCSI_OP_REJECT;
+ if (fail_conn)
+ cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
+
+ hdr = (struct iscsi_reject *) cmd->pdu;
+ hdr->reason = reason;
+
+ cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!cmd->buf_ptr) {
+ pr_err("Unable to allocate memory for cmd->buf_ptr\n");
+ iscsit_release_cmd(cmd);
+ return -1;
+ }
+ memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ cmd->i_state = ISTATE_SEND_REJECT;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ ret = wait_for_completion_interruptible(&cmd->reject_comp);
+ if (ret != 0)
+ return -1;
+
+ return (!fail_conn) ? 0 : -1;
+}
+
+int iscsit_add_reject_from_cmd(
+ u8 reason,
+ int fail_conn,
+ int add_to_conn,
+ unsigned char *buf,
+ struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn;
+ struct iscsi_reject *hdr;
+ int ret;
+
+ if (!cmd->conn) {
+ pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
+ cmd->init_task_tag);
+ return -1;
+ }
+ conn = cmd->conn;
+
+ cmd->iscsi_opcode = ISCSI_OP_REJECT;
+ if (fail_conn)
+ cmd->cmd_flags |= ICF_REJECT_FAIL_CONN;
+
+ hdr = (struct iscsi_reject *) cmd->pdu;
+ hdr->reason = reason;
+
+ cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!cmd->buf_ptr) {
+ pr_err("Unable to allocate memory for cmd->buf_ptr\n");
+ iscsit_release_cmd(cmd);
+ return -1;
+ }
+ memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
+
+ if (add_to_conn) {
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ }
+
+ cmd->i_state = ISTATE_SEND_REJECT;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ ret = wait_for_completion_interruptible(&cmd->reject_comp);
+ if (ret != 0)
+ return -1;
+
+ return (!fail_conn) ? 0 : -1;
+}
+
+/*
+ * Map some portion of the allocated scatterlist to an iovec, suitable for
+ * kernel sockets to copy data in/out. This handles both pages and slab-allocated
+ * buffers, since we have been tricky and mapped t_mem_sg to the buffer in
+ * either case (see iscsit_alloc_buffs)
+ */
+static int iscsit_map_iovec(
+ struct iscsi_cmd *cmd,
+ struct kvec *iov,
+ u32 data_offset,
+ u32 data_length)
+{
+ u32 i = 0;
+ struct scatterlist *sg;
+ unsigned int page_off;
+
+ /*
+ * We have a private mapping of the allocated pages in t_mem_sg.
+ * At this point, we also know each contains a page.
+ */
+ sg = &cmd->t_mem_sg[data_offset / PAGE_SIZE];
+ page_off = (data_offset % PAGE_SIZE);
+
+ cmd->first_data_sg = sg;
+ cmd->first_data_sg_off = page_off;
+
+ while (data_length) {
+ u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+
+ iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
+ iov[i].iov_len = cur_len;
+
+ data_length -= cur_len;
+ page_off = 0;
+ sg = sg_next(sg);
+ i++;
+ }
+
+ cmd->kmapped_nents = i;
+
+ return i;
+}
+
+static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
+{
+ u32 i;
+ struct scatterlist *sg;
+
+ sg = cmd->first_data_sg;
+
+ for (i = 0; i < cmd->kmapped_nents; i++)
+ kunmap(sg_page(&sg[i]));
+}
+
+static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
+{
+ struct iscsi_cmd *cmd;
+
+ conn->exp_statsn = exp_statsn;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ spin_lock(&cmd->istate_lock);
+ if ((cmd->i_state == ISTATE_SENT_STATUS) &&
+ (cmd->stat_sn < exp_statsn)) {
+ cmd->i_state = ISTATE_REMOVE;
+ spin_unlock(&cmd->istate_lock);
+ iscsit_add_cmd_to_immediate_queue(cmd, conn,
+ cmd->i_state);
+ continue;
+ }
+ spin_unlock(&cmd->istate_lock);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+}
+
+static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
+{
+ u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :
+ cmd->se_cmd.t_data_nents;
+
+ iov_count += TRANSPORT_IOV_DATA_BUFFER;
+
+ cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
+ if (!cmd->iov_data) {
+ pr_err("Unable to allocate cmd->iov_data\n");
+ return -ENOMEM;
+ }
+
+ cmd->orig_iov_data_count = iov_count;
+ return 0;
+}
+
+static int iscsit_alloc_buffs(struct iscsi_cmd *cmd)
+{
+ struct scatterlist *sgl;
+ u32 length = cmd->se_cmd.data_length;
+ int nents = DIV_ROUND_UP(length, PAGE_SIZE);
+ int i = 0, ret;
+ /*
+ * If no SCSI payload is present, allocate the default iovecs used for
+ * iSCSI PDU Header
+ */
+ if (!length)
+ return iscsit_allocate_iovecs(cmd);
+
+ sgl = kzalloc(sizeof(*sgl) * nents, GFP_KERNEL);
+ if (!sgl)
+ return -ENOMEM;
+
+ sg_init_table(sgl, nents);
+
+ while (length) {
+ int buf_size = min_t(int, length, PAGE_SIZE);
+ struct page *page;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ goto page_alloc_failed;
+
+ sg_set_page(&sgl[i], page, buf_size, 0);
+
+ length -= buf_size;
+ i++;
+ }
+
+ cmd->t_mem_sg = sgl;
+ cmd->t_mem_sg_nents = nents;
+
+ /* BIDI ops not supported */
+
+ /* Tell the core about our preallocated memory */
+ transport_generic_map_mem_to_cmd(&cmd->se_cmd, sgl, nents, NULL, 0);
+ /*
+ * Allocate iovecs for SCSI payload after transport_generic_map_mem_to_cmd
+ * so that cmd->se_cmd.t_tasks_se_num has been set.
+ */
+ ret = iscsit_allocate_iovecs(cmd);
+ if (ret < 0)
+ goto page_alloc_failed;
+
+ return 0;
+
+page_alloc_failed:
+ while (i >= 0) {
+ __free_page(sg_page(&sgl[i]));
+ i--;
+ }
+ kfree(cmd->t_mem_sg);
+ cmd->t_mem_sg = NULL;
+ return -ENOMEM;
+}
+
+static int iscsit_handle_scsi_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ int data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
+ int dump_immediate_data = 0, send_check_condition = 0, payload_length;
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_scsi_req *hdr;
+
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->cmd_pdus++;
+ if (conn->sess->se_sess->se_node_acl) {
+ spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ conn->sess->se_sess->se_node_acl->num_cmds++;
+ spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ }
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+
+ hdr = (struct iscsi_scsi_req *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->data_length = be32_to_cpu(hdr->data_length);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ /* FIXME; Add checks for AdditionalHeaderSegment */
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
+ !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+ pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
+ " not set. Bad iSCSI Initiator.\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
+ /*
+ * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
+ * that adds support for RESERVE/RELEASE. There is a bug
+ * add with this new functionality that sets R/W bits when
+ * neither CDB carries any READ or WRITE datapayloads.
+ */
+ if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
+ hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
+ goto done;
+ }
+
+ pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+ " set when Expected Data Transfer Length is 0 for"
+ " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+done:
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
+ !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
+ pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
+ " MUST be set if Expected Data Transfer Length is not 0."
+ " Bad iSCSI Initiator\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
+ pr_err("Bidirectional operations not supported!\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+ pr_err("Illegally set Immediate Bit in iSCSI Initiator"
+ " Scsi Command PDU.\n");
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ if (payload_length && !conn->sess->sess_ops->ImmediateData) {
+ pr_err("ImmediateData=No but DataSegmentLength=%u,"
+ " protocol error.\n", payload_length);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if ((hdr->data_length == payload_length) &&
+ (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
+ pr_err("Expected Data Transfer Length and Length of"
+ " Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
+ " bit is not set protocol error\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > hdr->data_length) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " EDTL: %u, protocol error.\n", payload_length,
+ hdr->data_length);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " MaxRecvDataSegmentLength: %u, protocol error.\n",
+ payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " FirstBurstLength: %u, protocol error.\n",
+ payload_length, conn->sess->sess_ops->FirstBurstLength);
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
+ buf, conn);
+ }
+
+ data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
+ (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
+ DMA_NONE;
+
+ cmd = iscsit_allocate_se_cmd(conn, hdr->data_length, data_direction,
+ (hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK));
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
+ buf, conn);
+
+ pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
+ " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
+ hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
+
+ cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
+ cmd->i_state = ISTATE_NEW_CMD;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ cmd->immediate_data = (payload_length) ? 1 : 0;
+ cmd->unsolicited_data = ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
+ (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
+ if (cmd->unsolicited_data)
+ cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
+
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ spin_lock_bh(&conn->sess->ttt_lock);
+ cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ if (cmd->targ_xfer_tag == 0xFFFFFFFF)
+ cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ spin_unlock_bh(&conn->sess->ttt_lock);
+ } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->first_burst_len = payload_length;
+
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ struct iscsi_datain_req *dr;
+
+ dr = iscsit_allocate_datain_req();
+ if (!dr)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+
+ iscsit_attach_datain_req(cmd, dr);
+ }
+
+ /*
+ * The CDB is going to an se_device_t.
+ */
+ ret = iscsit_get_lun_for_cmd(cmd, hdr->cdb,
+ get_unaligned_le64(&hdr->lun));
+ if (ret < 0) {
+ if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
+ pr_debug("Responding to non-acl'ed,"
+ " non-existent or non-exported iSCSI LUN:"
+ " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
+ }
+ if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+
+ send_check_condition = 1;
+ goto attach_cmd;
+ }
+ /*
+ * The Initiator Node has access to the LUN (the addressing method
+ * is handled inside of iscsit_get_lun_for_cmd()). Now it's time to
+ * allocate 1->N transport tasks (depending on sector count and
+ * maximum request size the physical HBA(s) can handle.
+ */
+ transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb);
+ if (transport_ret == -ENOMEM) {
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+ } else if (transport_ret == -EINVAL) {
+ /*
+ * Unsupported SAM Opcode. CHECK_CONDITION will be sent
+ * in iscsit_execute_cmd() during the CmdSN OOO Execution
+ * Mechinism.
+ */
+ send_check_condition = 1;
+ } else {
+ if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+ }
+
+attach_cmd:
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ /*
+ * Check if we need to delay processing because of ALUA
+ * Active/NonOptimized primary access state..
+ */
+ core_alua_check_nonop_delay(&cmd->se_cmd);
+ /*
+ * Allocate and setup SGL used with transport_generic_map_mem_to_cmd().
+ * also call iscsit_allocate_iovecs()
+ */
+ ret = iscsit_alloc_buffs(cmd);
+ if (ret < 0)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 1, buf, cmd);
+ /*
+ * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
+ * the Immediate Bit is not set, and no Immediate
+ * Data is attached.
+ *
+ * A PDU/CmdSN carrying Immediate Data can only
+ * be processed after the DataCRC has passed.
+ * If the DataCRC fails, the CmdSN MUST NOT
+ * be acknowledged. (See below)
+ */
+ if (!cmd->immediate_data) {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ /*
+ * If no Immediate Data is attached, it's OK to return now.
+ */
+ if (!cmd->immediate_data) {
+ if (send_check_condition)
+ return 0;
+
+ if (cmd->unsolicited_data) {
+ iscsit_set_dataout_sequence_values(cmd);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, cmd->conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ }
+
+ return 0;
+ }
+
+ /*
+ * Early CHECK_CONDITIONs never make it to the transport processing
+ * thread. They are processed in CmdSN order by
+ * iscsit_check_received_cmdsn() below.
+ */
+ if (send_check_condition) {
+ immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ dump_immediate_data = 1;
+ goto after_immediate_data;
+ }
+ /*
+ * Call directly into transport_generic_new_cmd() to perform
+ * the backend memory allocation.
+ */
+ ret = transport_generic_new_cmd(&cmd->se_cmd);
+ if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+ immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ dump_immediate_data = 1;
+ goto after_immediate_data;
+ }
+
+ immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length);
+after_immediate_data:
+ if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
+ /*
+ * A PDU/CmdSN carrying Immediate Data passed
+ * DataCRC, check against ExpCmdSN/MaxCmdSN if
+ * Immediate Bit is not set.
+ */
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ /*
+ * Special case for Unsupported SAM WRITE Opcodes
+ * and ImmediateData=Yes.
+ */
+ if (dump_immediate_data) {
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return -1;
+ } else if (cmd->unsolicited_data) {
+ iscsit_set_dataout_sequence_values(cmd);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, cmd->conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ }
+
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+
+ } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
+ /*
+ * Immediate Data failed DataCRC and ERL>=1,
+ * silently drop this PDU and let the initiator
+ * plug the CmdSN gap.
+ *
+ * FIXME: Send Unsolicited NOPIN with reserved
+ * TTT here to help the initiator figure out
+ * the missing CmdSN, although they should be
+ * intelligent enough to determine the missing
+ * CmdSN and issue a retry to plug the sequence.
+ */
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
+ return -1;
+
+ return 0;
+}
+
+static u32 iscsit_do_crypto_hash_sg(
+ struct hash_desc *hash,
+ struct iscsi_cmd *cmd,
+ u32 data_offset,
+ u32 data_length,
+ u32 padding,
+ u8 *pad_bytes)
+{
+ u32 data_crc;
+ u32 i;
+ struct scatterlist *sg;
+ unsigned int page_off;
+
+ crypto_hash_init(hash);
+
+ sg = cmd->first_data_sg;
+ page_off = cmd->first_data_sg_off;
+
+ i = 0;
+ while (data_length) {
+ u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
+
+ crypto_hash_update(hash, &sg[i], cur_len);
+
+ data_length -= cur_len;
+ page_off = 0;
+ i++;
+ }
+
+ if (padding) {
+ struct scatterlist pad_sg;
+
+ sg_init_one(&pad_sg, pad_bytes, padding);
+ crypto_hash_update(hash, &pad_sg, padding);
+ }
+ crypto_hash_final(hash, (u8 *) &data_crc);
+
+ return data_crc;
+}
+
+static void iscsit_do_crypto_hash_buf(
+ struct hash_desc *hash,
+ unsigned char *buf,
+ u32 payload_length,
+ u32 padding,
+ u8 *pad_bytes,
+ u8 *data_crc)
+{
+ struct scatterlist sg;
+
+ crypto_hash_init(hash);
+
+ sg_init_one(&sg, (u8 *)buf, payload_length);
+ crypto_hash_update(hash, &sg, payload_length);
+
+ if (padding) {
+ sg_init_one(&sg, pad_bytes, padding);
+ crypto_hash_update(hash, &sg, padding);
+ }
+ crypto_hash_final(hash, data_crc);
+}
+
+static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+{
+ int iov_ret, ooo_cmdsn = 0, ret;
+ u8 data_crc_failed = 0;
+ u32 checksum, iov_count = 0, padding = 0, rx_got = 0;
+ u32 rx_size = 0, payload_length;
+ struct iscsi_cmd *cmd = NULL;
+ struct se_cmd *se_cmd;
+ struct iscsi_data *hdr;
+ struct kvec *iov;
+ unsigned long flags;
+
+ hdr = (struct iscsi_data *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+ hdr->datasn = be32_to_cpu(hdr->datasn);
+ hdr->offset = be32_to_cpu(hdr->offset);
+
+ if (!payload_length) {
+ pr_err("DataOUT payload is ZERO, protocol error.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ /* iSCSI write */
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->rx_data_octets += payload_length;
+ if (conn->sess->se_sess->se_node_acl) {
+ spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
+ spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ }
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("DataSegmentLength: %u is greater than"
+ " MaxRecvDataSegmentLength: %u\n", payload_length,
+ conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
+ payload_length);
+ if (!cmd)
+ return 0;
+
+ pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
+ " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+ hdr->itt, hdr->ttt, hdr->datasn, hdr->offset,
+ payload_length, conn->cid);
+
+ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+ pr_err("Command ITT: 0x%08x received DataOUT after"
+ " last DataOUT received, dumping payload\n",
+ cmd->init_task_tag);
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+ }
+
+ if (cmd->data_direction != DMA_TO_DEVICE) {
+ pr_err("Command ITT: 0x%08x received DataOUT for a"
+ " NON-WRITE command.\n", cmd->init_task_tag);
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+ se_cmd = &cmd->se_cmd;
+ iscsit_mod_dataout_timer(cmd);
+
+ if ((hdr->offset + payload_length) > cmd->data_length) {
+ pr_err("DataOut Offset: %u, Length %u greater than"
+ " iSCSI Command EDTL %u, protocol error.\n",
+ hdr->offset, payload_length, cmd->data_length);
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+ 1, 0, buf, cmd);
+ }
+
+ if (cmd->unsolicited_data) {
+ int dump_unsolicited_data = 0;
+
+ if (conn->sess->sess_ops->InitialR2T) {
+ pr_err("Received unexpected unsolicited data"
+ " while InitialR2T=Yes, protocol error.\n");
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
+ return -1;
+ }
+ /*
+ * Special case for dealing with Unsolicited DataOUT
+ * and Unsupported SAM WRITE Opcodes and SE resource allocation
+ * failures;
+ */
+
+ /* Something's amiss if we're not in WRITE_PENDING state... */
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
+ (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+ dump_unsolicited_data = 1;
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ if (dump_unsolicited_data) {
+ /*
+ * Check if a delayed TASK_ABORTED status needs to
+ * be sent now if the ISCSI_FLAG_CMD_FINAL has been
+ * received with the unsolicitied data out.
+ */
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
+ iscsit_stop_dataout_timer(cmd);
+
+ transport_check_aborted_status(se_cmd,
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+ }
+ } else {
+ /*
+ * For the normal solicited data path:
+ *
+ * Check for a delayed TASK_ABORTED status and dump any
+ * incoming data out payload if one exists. Also, when the
+ * ISCSI_FLAG_CMD_FINAL is set to denote the end of the current
+ * data out sequence, we decrement outstanding_r2ts. Once
+ * outstanding_r2ts reaches zero, go ahead and send the delayed
+ * TASK_ABORTED status.
+ */
+ if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
+ if (--cmd->outstanding_r2ts < 1) {
+ iscsit_stop_dataout_timer(cmd);
+ transport_check_aborted_status(
+ se_cmd, 1);
+ }
+
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+ }
+ }
+ /*
+ * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
+ * within-command recovery checks before receiving the payload.
+ */
+ ret = iscsit_check_pre_dataout(cmd, buf);
+ if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)
+ return 0;
+ else if (ret == DATAOUT_CANNOT_RECOVER)
+ return -1;
+
+ rx_size += payload_length;
+ iov = &cmd->iov_data[0];
+
+ iov_ret = iscsit_map_iovec(cmd, iov, hdr->offset, payload_length);
+ if (iov_ret < 0)
+ return -1;
+
+ iov_count += iov_ret;
+
+ padding = ((-payload_length) & 3);
+ if (padding != 0) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = padding;
+ rx_size += padding;
+ pr_debug("Receiving %u padding bytes.\n", padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iov[iov_count].iov_base = &checksum;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
+
+ iscsit_unmap_iovec(cmd);
+
+ if (rx_got != rx_size)
+ return -1;
+
+ if (conn->conn_ops->DataDigest) {
+ u32 data_crc;
+
+ data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+ hdr->offset, payload_length, padding,
+ cmd->pad_bytes);
+
+ if (checksum != data_crc) {
+ pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
+ " DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
+ " does not match computed 0x%08x\n",
+ hdr->itt, hdr->offset, payload_length,
+ hdr->datasn, checksum, data_crc);
+ data_crc_failed = 1;
+ } else {
+ pr_debug("Got CRC32C DataDigest 0x%08x for"
+ " %u bytes of Data Out\n", checksum,
+ payload_length);
+ }
+ }
+ /*
+ * Increment post receive data and CRC values or perform
+ * within-command recovery.
+ */
+ ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed);
+ if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY))
+ return 0;
+ else if (ret == DATAOUT_SEND_R2T) {
+ iscsit_set_dataout_sequence_values(cmd);
+ iscsit_build_r2ts_for_cmd(cmd, conn, 0);
+ } else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
+ /*
+ * Handle extra special case for out of order
+ * Unsolicited Data Out.
+ */
+ spin_lock_bh(&cmd->istate_lock);
+ ooo_cmdsn = (cmd->cmd_flags & ICF_OOO_CMDSN);
+ cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+
+ iscsit_stop_dataout_timer(cmd);
+ return (!ooo_cmdsn) ? transport_generic_handle_data(
+ &cmd->se_cmd) : 0;
+ } else /* DATAOUT_CANNOT_RECOVER */
+ return -1;
+
+ return 0;
+}
+
+static int iscsit_handle_nop_out(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ unsigned char *ping_data = NULL;
+ int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size;
+ u32 checksum, data_crc, padding = 0, payload_length;
+ u64 lun;
+ struct iscsi_cmd *cmd = NULL;
+ struct kvec *iov = NULL;
+ struct iscsi_nopout *hdr;
+
+ hdr = (struct iscsi_nopout *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ lun = get_unaligned_le64(&hdr->lun);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ if ((hdr->itt == 0xFFFFFFFF) && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
+ " not set, protocol error.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("NOPOUT Ping Data DataSegmentLength: %u is"
+ " greater than MaxRecvDataSegmentLength: %u, protocol"
+ " error.\n", payload_length,
+ conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
+ (hdr->itt == 0xFFFFFFFF) ? "Response" : "Request",
+ hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
+ payload_length);
+ /*
+ * This is not a response to a Unsolicited NopIN, which means
+ * it can either be a NOPOUT ping request (with a valid ITT),
+ * or a NOPOUT not requesting a NOPIN (with a reserved ITT).
+ * Either way, make sure we allocate an struct iscsi_cmd, as both
+ * can contain ping data.
+ */
+ if (hdr->ttt == 0xFFFFFFFF) {
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return iscsit_add_reject(
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_NOOP_OUT;
+ cmd->i_state = ISTATE_SEND_NOPIN;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ?
+ 1 : 0);
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->data_direction = DMA_NONE;
+ }
+
+ if (payload_length && (hdr->ttt == 0xFFFFFFFF)) {
+ rx_size = payload_length;
+ ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
+ if (!ping_data) {
+ pr_err("Unable to allocate memory for"
+ " NOPOUT ping data.\n");
+ ret = -1;
+ goto out;
+ }
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = ping_data;
+ iov[niov++].iov_len = payload_length;
+
+ padding = ((-payload_length) & 3);
+ if (padding != 0) {
+ pr_debug("Receiving %u additional bytes"
+ " for padding.\n", padding);
+ iov[niov].iov_base = &cmd->pad_bytes;
+ iov[niov++].iov_len = padding;
+ rx_size += padding;
+ }
+ if (conn->conn_ops->DataDigest) {
+ iov[niov].iov_base = &checksum;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
+ if (rx_got != rx_size) {
+ ret = -1;
+ goto out;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ ping_data, payload_length,
+ padding, cmd->pad_bytes,
+ (u8 *)&data_crc);
+
+ if (checksum != data_crc) {
+ pr_err("Ping data CRC32C DataDigest"
+ " 0x%08x does not match computed 0x%08x\n",
+ checksum, data_crc);
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " NOPOUT Ping DataCRC failure while in"
+ " ERL=0.\n");
+ ret = -1;
+ goto out;
+ } else {
+ /*
+ * Silently drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_debug("Dropping NOPOUT"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ ret = 0;
+ goto out;
+ }
+ } else {
+ pr_debug("Got CRC32C DataDigest"
+ " 0x%08x for %u bytes of ping data.\n",
+ checksum, payload_length);
+ }
+ }
+
+ ping_data[payload_length] = '\0';
+ /*
+ * Attach ping data to struct iscsi_cmd->buf_ptr.
+ */
+ cmd->buf_ptr = (void *)ping_data;
+ cmd->buf_ptr_size = payload_length;
+
+ pr_debug("Got %u bytes of NOPOUT ping"
+ " data.\n", payload_length);
+ pr_debug("Ping Data: \"%s\"\n", ping_data);
+ }
+
+ if (hdr->itt != 0xFFFFFFFF) {
+ if (!cmd) {
+ pr_err("Checking CmdSN for NOPOUT,"
+ " but cmd is NULL!\n");
+ return -1;
+ }
+ /*
+ * Initiator is expecting a NopIN ping reply,
+ */
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+ iscsit_add_cmd_to_response_queue(cmd, conn,
+ cmd->i_state);
+ return 0;
+ }
+
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ ret = 0;
+ goto ping_out;
+ }
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+
+ return 0;
+ }
+
+ if (hdr->ttt != 0xFFFFFFFF) {
+ /*
+ * This was a response to a unsolicited NOPIN ping.
+ */
+ cmd = iscsit_find_cmd_from_ttt(conn, hdr->ttt);
+ if (!cmd)
+ return -1;
+
+ iscsit_stop_nopin_response_timer(conn);
+
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ iscsit_start_nopin_timer(conn);
+ } else {
+ /*
+ * Initiator is not expecting a NOPIN is response.
+ * Just ignore for now.
+ *
+ * iSCSI v19-91 10.18
+ * "A NOP-OUT may also be used to confirm a changed
+ * ExpStatSN if another PDU will not be available
+ * for a long time."
+ */
+ ret = 0;
+ goto out;
+ }
+
+ return 0;
+out:
+ if (cmd)
+ iscsit_release_cmd(cmd);
+ping_out:
+ kfree(ping_data);
+ return ret;
+}
+
+static int iscsit_handle_task_mgt_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_cmd *cmd;
+ struct se_tmr_req *se_tmr;
+ struct iscsi_tmr_req *tmr_req;
+ struct iscsi_tm *hdr;
+ u32 payload_length;
+ int out_of_order_cmdsn = 0;
+ int ret;
+ u8 function;
+
+ hdr = (struct iscsi_tm *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->rtt = be32_to_cpu(hdr->rtt);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+ hdr->refcmdsn = be32_to_cpu(hdr->refcmdsn);
+ hdr->exp_datasn = be32_to_cpu(hdr->exp_datasn);
+ hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
+ function = hdr->flags;
+
+ pr_debug("Got Task Management Request ITT: 0x%08x, CmdSN:"
+ " 0x%08x, Function: 0x%02x, RefTaskTag: 0x%08x, RefCmdSN:"
+ " 0x%08x, CID: %hu\n", hdr->itt, hdr->cmdsn, function,
+ hdr->rtt, hdr->refcmdsn, conn->cid);
+
+ if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
+ ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
+ (hdr->rtt != ISCSI_RESERVED_TAG))) {
+ pr_err("RefTaskTag should be set to 0xFFFFFFFF.\n");
+ hdr->rtt = ISCSI_RESERVED_TAG;
+ }
+
+ if ((function == ISCSI_TM_FUNC_TASK_REASSIGN) &&
+ !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ pr_err("Task Management Request TASK_REASSIGN not"
+ " issued as immediate command, bad iSCSI Initiator"
+ "implementation\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+ if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
+ (hdr->refcmdsn != ISCSI_RESERVED_TAG))
+ hdr->refcmdsn = ISCSI_RESERVED_TAG;
+
+ cmd = iscsit_allocate_se_cmd_for_tmr(conn, function);
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
+ cmd->i_state = ISTATE_SEND_TASKMGTRSP;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ se_tmr = cmd->se_cmd.se_tmr_req;
+ tmr_req = cmd->tmr_req;
+ /*
+ * Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
+ */
+ if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
+ ret = iscsit_get_lun_for_tmr(cmd,
+ get_unaligned_le64(&hdr->lun));
+ if (ret < 0) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
+ goto attach;
+ }
+ }
+
+ switch (function) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
+ if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ goto attach;
+ }
+ break;
+ case ISCSI_TM_FUNC_ABORT_TASK_SET:
+ case ISCSI_TM_FUNC_CLEAR_ACA:
+ case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ break;
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
+ goto attach;
+ }
+ break;
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
+ goto attach;
+ }
+ break;
+ case ISCSI_TM_FUNC_TASK_REASSIGN:
+ se_tmr->response = iscsit_tmr_task_reassign(cmd, buf);
+ /*
+ * Perform sanity checks on the ExpDataSN only if the
+ * TASK_REASSIGN was successful.
+ */
+ if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
+ break;
+
+ if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_INVALID, 1, 1,
+ buf, cmd);
+ break;
+ default:
+ pr_err("Unknown TMR function: 0x%02x, protocol"
+ " error.\n", function);
+ cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
+ goto attach;
+ }
+
+ if ((function != ISCSI_TM_FUNC_TASK_REASSIGN) &&
+ (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
+ se_tmr->call_transport = 1;
+attach:
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+ out_of_order_cmdsn = 1;
+ else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ return 0;
+ } else { /* (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) */
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+ }
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ if (out_of_order_cmdsn)
+ return 0;
+ /*
+ * Found the referenced task, send to transport for processing.
+ */
+ if (se_tmr->call_transport)
+ return transport_generic_handle_tmr(&cmd->se_cmd);
+
+ /*
+ * Could not find the referenced LUN, task, or Task Management
+ * command not authorized or supported. Change state and
+ * let the tx_thread send the response.
+ *
+ * For connection recovery, this is also the default action for
+ * TMR TASK_REASSIGN.
+ */
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+/* #warning FIXME: Support Text Command parameters besides SendTargets */
+static int iscsit_handle_text_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ char *text_ptr, *text_in;
+ int cmdsn_ret, niov = 0, rx_got, rx_size;
+ u32 checksum = 0, data_crc = 0, payload_length;
+ u32 padding = 0, pad_bytes = 0, text_length = 0;
+ struct iscsi_cmd *cmd;
+ struct kvec iov[3];
+ struct iscsi_text *hdr;
+
+ hdr = (struct iscsi_text *) buf;
+ payload_length = ntoh24(hdr->dlength);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ if (payload_length > conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("Unable to accept text parameter length: %u"
+ "greater than MaxRecvDataSegmentLength %u.\n",
+ payload_length, conn->conn_ops->MaxRecvDataSegmentLength);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
+ " ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
+ hdr->exp_statsn, payload_length);
+
+ rx_size = text_length = payload_length;
+ if (text_length) {
+ text_in = kzalloc(text_length, GFP_KERNEL);
+ if (!text_in) {
+ pr_err("Unable to allocate memory for"
+ " incoming text parameters\n");
+ return -1;
+ }
+
+ memset(iov, 0, 3 * sizeof(struct kvec));
+ iov[niov].iov_base = text_in;
+ iov[niov++].iov_len = text_length;
+
+ padding = ((-payload_length) & 3);
+ if (padding != 0) {
+ iov[niov].iov_base = &pad_bytes;
+ iov[niov++].iov_len = padding;
+ rx_size += padding;
+ pr_debug("Receiving %u additional bytes"
+ " for padding.\n", padding);
+ }
+ if (conn->conn_ops->DataDigest) {
+ iov[niov].iov_base = &checksum;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &iov[0], niov, rx_size);
+ if (rx_got != rx_size) {
+ kfree(text_in);
+ return -1;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ text_in, text_length,
+ padding, (u8 *)&pad_bytes,
+ (u8 *)&data_crc);
+
+ if (checksum != data_crc) {
+ pr_err("Text data CRC32C DataDigest"
+ " 0x%08x does not match computed"
+ " 0x%08x\n", checksum, data_crc);
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Text Data digest failure while in"
+ " ERL=0.\n");
+ kfree(text_in);
+ return -1;
+ } else {
+ /*
+ * Silently drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_debug("Dropping Text"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ kfree(text_in);
+ return 0;
+ }
+ } else {
+ pr_debug("Got CRC32C DataDigest"
+ " 0x%08x for %u bytes of text data.\n",
+ checksum, text_length);
+ }
+ }
+ text_in[text_length - 1] = '\0';
+ pr_debug("Successfully read %d bytes of text"
+ " data.\n", text_length);
+
+ if (strncmp("SendTargets", text_in, 11) != 0) {
+ pr_err("Received Text Data that is not"
+ " SendTargets, cannot continue.\n");
+ kfree(text_in);
+ return -1;
+ }
+ text_ptr = strchr(text_in, '=');
+ if (!text_ptr) {
+ pr_err("No \"=\" separator found in Text Data,"
+ " cannot continue.\n");
+ kfree(text_in);
+ return -1;
+ }
+ if (strncmp("=All", text_ptr, 4) != 0) {
+ pr_err("Unable to locate All value for"
+ " SendTargets key, cannot continue.\n");
+ kfree(text_in);
+ return -1;
+ }
+/*#warning Support SendTargets=(iSCSI Target Name/Nothing) values. */
+ kfree(text_in);
+ }
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_TEXT;
+ cmd->i_state = ISTATE_SEND_TEXTRSP;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->data_direction = DMA_NONE;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+
+ return 0;
+ }
+
+ return iscsit_execute_cmd(cmd, 0);
+}
+
+int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_conn *conn_p;
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Received logout request CLOSESESSION on CID: %hu"
+ " for SID: %u.\n", conn->cid, conn->sess->sid);
+
+ atomic_set(&sess->session_logout, 1);
+ atomic_set(&conn->conn_logout_remove, 1);
+ conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_SESSION;
+
+ iscsit_inc_conn_usage_count(conn);
+ iscsit_inc_session_usage_count(sess);
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn_p, &sess->sess_conn_list, conn_list) {
+ if (conn_p->conn_state != TARG_CONN_STATE_LOGGED_IN)
+ continue;
+
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+ conn_p->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_conn *l_conn;
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Received logout request CLOSECONNECTION for CID:"
+ " %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
+
+ /*
+ * A Logout Request with a CLOSECONNECTION reason code for a CID
+ * can arrive on a connection with a differing CID.
+ */
+ if (conn->cid == cmd->logout_cid) {
+ spin_lock_bh(&conn->state_lock);
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+ conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+
+ atomic_set(&conn->conn_logout_remove, 1);
+ conn->conn_logout_reason = ISCSI_LOGOUT_REASON_CLOSE_CONNECTION;
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_unlock_bh(&conn->state_lock);
+ } else {
+ /*
+ * Handle all different cid CLOSECONNECTION requests in
+ * iscsit_logout_post_handler_diffcid() as to give enough
+ * time for any non immediate command's CmdSN to be
+ * acknowledged on the connection in question.
+ *
+ * Here we simply make sure the CID is still around.
+ */
+ l_conn = iscsit_get_conn_from_cid(sess,
+ cmd->logout_cid);
+ if (!l_conn) {
+ cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
+ iscsit_add_cmd_to_response_queue(cmd, conn,
+ cmd->i_state);
+ return 0;
+ }
+
+ iscsit_dec_conn_usage_count(l_conn);
+ }
+
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
+ " CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
+
+ if (sess->sess_ops->ErrorRecoveryLevel != 2) {
+ pr_err("Received Logout Request REMOVECONNFORRECOVERY"
+ " while ERL!=2.\n");
+ cmd->logout_response = ISCSI_LOGOUT_RECOVERY_UNSUPPORTED;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ if (conn->cid == cmd->logout_cid) {
+ pr_err("Received Logout Request REMOVECONNFORRECOVERY"
+ " with CID: %hu on CID: %hu, implementation error.\n",
+ cmd->logout_cid, conn->cid);
+ cmd->logout_response = ISCSI_LOGOUT_CLEANUP_FAILED;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+static int iscsit_handle_logout_cmd(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ int cmdsn_ret, logout_remove = 0;
+ u8 reason_code = 0;
+ struct iscsi_cmd *cmd;
+ struct iscsi_logout *hdr;
+ struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
+
+ hdr = (struct iscsi_logout *) buf;
+ reason_code = (hdr->flags & 0x7f);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->cid = be16_to_cpu(hdr->cid);
+ hdr->cmdsn = be32_to_cpu(hdr->cmdsn);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+
+ if (tiqn) {
+ spin_lock(&tiqn->logout_stats.lock);
+ if (reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION)
+ tiqn->logout_stats.normal_logouts++;
+ else
+ tiqn->logout_stats.abnormal_logouts++;
+ spin_unlock(&tiqn->logout_stats.lock);
+ }
+
+ pr_debug("Got Logout Request ITT: 0x%08x CmdSN: 0x%08x"
+ " ExpStatSN: 0x%08x Reason: 0x%02x CID: %hu on CID: %hu\n",
+ hdr->itt, hdr->cmdsn, hdr->exp_statsn, reason_code,
+ hdr->cid, conn->cid);
+
+ if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
+ pr_err("Received logout request on connection that"
+ " is not in logged in state, ignoring request.\n");
+ return 0;
+ }
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
+ buf, conn);
+
+ cmd->iscsi_opcode = ISCSI_OP_LOGOUT;
+ cmd->i_state = ISTATE_SEND_LOGOUTRSP;
+ cmd->immediate_cmd = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
+ conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ cmd->cmd_sn = hdr->cmdsn;
+ cmd->exp_stat_sn = hdr->exp_statsn;
+ cmd->logout_cid = hdr->cid;
+ cmd->logout_reason = reason_code;
+ cmd->data_direction = DMA_NONE;
+
+ /*
+ * We need to sleep in these cases (by returning 1) until the Logout
+ * Response gets sent in the tx thread.
+ */
+ if ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_SESSION) ||
+ ((reason_code == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) &&
+ (hdr->cid == conn->cid)))
+ logout_remove = 1;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (reason_code != ISCSI_LOGOUT_REASON_RECOVERY)
+ iscsit_ack_from_expstatsn(conn, hdr->exp_statsn);
+
+ /*
+ * Immediate commands are executed, well, immediately.
+ * Non-Immediate Logout Commands are executed in CmdSN order.
+ */
+ if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
+ int ret = iscsit_execute_cmd(cmd, 0);
+
+ if (ret < 0)
+ return ret;
+ } else {
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ logout_remove = 0;
+ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+ }
+
+ return logout_remove;
+}
+
+static int iscsit_handle_snack(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ u32 unpacked_lun;
+ u64 lun;
+ struct iscsi_snack *hdr;
+
+ hdr = (struct iscsi_snack *) buf;
+ hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
+ lun = get_unaligned_le64(&hdr->lun);
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+ hdr->itt = be32_to_cpu(hdr->itt);
+ hdr->ttt = be32_to_cpu(hdr->ttt);
+ hdr->exp_statsn = be32_to_cpu(hdr->exp_statsn);
+ hdr->begrun = be32_to_cpu(hdr->begrun);
+ hdr->runlength = be32_to_cpu(hdr->runlength);
+
+ pr_debug("Got ISCSI_INIT_SNACK, ITT: 0x%08x, ExpStatSN:"
+ " 0x%08x, Type: 0x%02x, BegRun: 0x%08x, RunLength: 0x%08x,"
+ " CID: %hu\n", hdr->itt, hdr->exp_statsn, hdr->flags,
+ hdr->begrun, hdr->runlength, conn->cid);
+
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Initiator sent SNACK request while in"
+ " ErrorRecoveryLevel=0.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+ /*
+ * SNACK_DATA and SNACK_R2T are both 0, so check which function to
+ * call from inside iscsi_send_recovery_datain_or_r2t().
+ */
+ switch (hdr->flags & ISCSI_FLAG_SNACK_TYPE_MASK) {
+ case 0:
+ return iscsit_handle_recovery_datain_or_r2t(conn, buf,
+ hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
+ return 0;
+ case ISCSI_FLAG_SNACK_TYPE_STATUS:
+ return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
+ hdr->begrun, hdr->runlength);
+ case ISCSI_FLAG_SNACK_TYPE_DATA_ACK:
+ return iscsit_handle_data_ack(conn, hdr->ttt, hdr->begrun,
+ hdr->runlength);
+ case ISCSI_FLAG_SNACK_TYPE_RDATA:
+ /* FIXME: Support R-Data SNACK */
+ pr_err("R-Data SNACK Not Supported.\n");
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ default:
+ pr_err("Unknown SNACK type 0x%02x, protocol"
+ " error.\n", hdr->flags & 0x0f);
+ return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buf, conn);
+ }
+
+ return 0;
+}
+
+static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
+{
+ if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+ wait_for_completion_interruptible_timeout(
+ &conn->rx_half_close_comp,
+ ISCSI_RX_THREAD_TCP_TIMEOUT * HZ);
+ }
+}
+
+static int iscsit_handle_immediate_data(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u32 length)
+{
+ int iov_ret, rx_got = 0, rx_size = 0;
+ u32 checksum, iov_count = 0, padding = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct kvec *iov;
+
+ iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
+ if (iov_ret < 0)
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+
+ rx_size = length;
+ iov_count = iov_ret;
+ iov = &cmd->iov_data[0];
+
+ padding = ((-length) & 3);
+ if (padding != 0) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = padding;
+ rx_size += padding;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iov[iov_count].iov_base = &checksum;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ rx_size += ISCSI_CRC_LEN;
+ }
+
+ rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
+
+ iscsit_unmap_iovec(cmd);
+
+ if (rx_got != rx_size) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ u32 data_crc;
+
+ data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+ cmd->write_data_done, length, padding,
+ cmd->pad_bytes);
+
+ if (checksum != data_crc) {
+ pr_err("ImmediateData CRC32C DataDigest 0x%08x"
+ " does not match computed 0x%08x\n", checksum,
+ data_crc);
+
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Immediate Data digest failure while"
+ " in ERL=0.\n");
+ iscsit_add_reject_from_cmd(
+ ISCSI_REASON_DATA_DIGEST_ERROR,
+ 1, 0, buf, cmd);
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+ } else {
+ iscsit_add_reject_from_cmd(
+ ISCSI_REASON_DATA_DIGEST_ERROR,
+ 0, 0, buf, cmd);
+ return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
+ }
+ } else {
+ pr_debug("Got CRC32C DataDigest 0x%08x for"
+ " %u bytes of Immediate Data\n", checksum,
+ length);
+ }
+ }
+
+ cmd->write_data_done += length;
+
+ if (cmd->write_data_done == cmd->data_length) {
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+ }
+
+ return IMMEDIATE_DATA_NORMAL_OPERATION;
+}
+
+/*
+ * Called with sess->conn_lock held.
+ */
+/* #warning iscsi_build_conn_drop_async_message() only sends out on connections
+ with active network interface */
+static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd;
+ struct iscsi_conn *conn_p;
+
+ /*
+ * Only send a Asynchronous Message on connections whos network
+ * interface is still functional.
+ */
+ list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
+ if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
+ iscsit_inc_conn_usage_count(conn_p);
+ break;
+ }
+ }
+
+ if (!conn_p)
+ return;
+
+ cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
+ if (!cmd) {
+ iscsit_dec_conn_usage_count(conn_p);
+ return;
+ }
+
+ cmd->logout_cid = conn->cid;
+ cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
+ cmd->i_state = ISTATE_SEND_ASYNCMSG;
+
+ spin_lock_bh(&conn_p->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn_p->conn_cmd_list);
+ spin_unlock_bh(&conn_p->cmd_lock);
+
+ iscsit_add_cmd_to_response_queue(cmd, conn_p, cmd->i_state);
+ iscsit_dec_conn_usage_count(conn_p);
+}
+
+static int iscsit_send_conn_drop_async_message(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_async *hdr;
+
+ cmd->tx_size = ISCSI_HDR_LEN;
+ cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
+
+ hdr = (struct iscsi_async *) cmd->pdu;
+ hdr->opcode = ISCSI_OP_ASYNC_EVENT;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ cmd->init_task_tag = 0xFFFFFFFF;
+ cmd->targ_xfer_tag = 0xFFFFFFFF;
+ put_unaligned_be64(0xFFFFFFFFFFFFFFFFULL, &hdr->rsvd4[0]);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
+ hdr->param1 = cpu_to_be16(cmd->logout_cid);
+ hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
+ hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ cmd->tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest to"
+ " Async Message 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = cmd->tx_size;
+ cmd->iov_misc_count = 1;
+
+ pr_debug("Sending Connection Dropped Async Message StatSN:"
+ " 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
+ cmd->logout_cid, conn->cid);
+ return 0;
+}
+
+static int iscsit_send_data_in(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int *eodr)
+{
+ int iov_ret = 0, set_statsn = 0;
+ u32 iov_count = 0, tx_size = 0;
+ struct iscsi_datain datain;
+ struct iscsi_datain_req *dr;
+ struct iscsi_data_rsp *hdr;
+ struct kvec *iov;
+
+ memset(&datain, 0, sizeof(struct iscsi_datain));
+ dr = iscsit_get_datain_values(cmd, &datain);
+ if (!dr) {
+ pr_err("iscsit_get_datain_values failed for ITT: 0x%08x\n",
+ cmd->init_task_tag);
+ return -1;
+ }
+
+ /*
+ * Be paranoid and double check the logic for now.
+ */
+ if ((datain.offset + datain.length) > cmd->data_length) {
+ pr_err("Command ITT: 0x%08x, datain.offset: %u and"
+ " datain.length: %u exceeds cmd->data_length: %u\n",
+ cmd->init_task_tag, datain.offset, datain.length,
+ cmd->data_length);
+ return -1;
+ }
+
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->tx_data_octets += datain.length;
+ if (conn->sess->se_sess->se_node_acl) {
+ spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
+ spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
+ }
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+ /*
+ * Special case for successfully execution w/ both DATAIN
+ * and Sense Data.
+ */
+ if ((datain.flags & ISCSI_FLAG_DATA_STATUS) &&
+ (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
+ datain.flags &= ~ISCSI_FLAG_DATA_STATUS;
+ else {
+ if ((dr->dr_complete == DATAIN_COMPLETE_NORMAL) ||
+ (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ cmd->stat_sn = conn->stat_sn++;
+ set_statsn = 1;
+ } else if (dr->dr_complete ==
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
+ set_statsn = 1;
+ }
+
+ hdr = (struct iscsi_data_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_SCSI_DATA_IN;
+ hdr->flags = datain.flags;
+ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+ if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ }
+ }
+ hton24(hdr->dlength, datain.length);
+ if (hdr->flags & ISCSI_FLAG_DATA_ACK)
+ int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+ (struct scsi_lun *)&hdr->lun);
+ else
+ put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = (hdr->flags & ISCSI_FLAG_DATA_ACK) ?
+ cpu_to_be32(cmd->targ_xfer_tag) :
+ 0xFFFFFFFF;
+ hdr->statsn = (set_statsn) ? cpu_to_be32(cmd->stat_sn) :
+ 0xFFFFFFFF;
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->datasn = cpu_to_be32(datain.data_sn);
+ hdr->offset = cpu_to_be32(datain.offset);
+
+ iov = &cmd->iov_data[0];
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching CRC32 HeaderDigest"
+ " for DataIN PDU 0x%08x\n", *header_digest);
+ }
+
+ iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length);
+ if (iov_ret < 0)
+ return -1;
+
+ iov_count += iov_ret;
+ tx_size += datain.length;
+
+ cmd->padding = ((-datain.length) & 3);
+ if (cmd->padding) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = cmd->padding;
+ tx_size += cmd->padding;
+
+ pr_debug("Attaching %u padding bytes\n",
+ cmd->padding);
+ }
+ if (conn->conn_ops->DataDigest) {
+ cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
+ datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attached CRC32C DataDigest %d bytes, crc"
+ " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
+ }
+
+ cmd->iov_data_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
+ " DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+ cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
+ ntohl(hdr->offset), datain.length, conn->cid);
+
+ if (dr->dr_complete) {
+ *eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
+ 2 : 1;
+ iscsit_free_datain_req(cmd, dr);
+ }
+
+ return 0;
+}
+
+static int iscsit_send_logout_response(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int niov = 0, tx_size;
+ struct iscsi_conn *logout_conn = NULL;
+ struct iscsi_conn_recovery *cr = NULL;
+ struct iscsi_session *sess = conn->sess;
+ struct kvec *iov;
+ struct iscsi_logout_rsp *hdr;
+ /*
+ * The actual shutting down of Sessions and/or Connections
+ * for CLOSESESSION and CLOSECONNECTION Logout Requests
+ * is done in scsi_logout_post_handler().
+ */
+ switch (cmd->logout_reason) {
+ case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+ pr_debug("iSCSI session logout successful, setting"
+ " logout response to ISCSI_LOGOUT_SUCCESS.\n");
+ cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+ break;
+ case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+ if (cmd->logout_response == ISCSI_LOGOUT_CID_NOT_FOUND)
+ break;
+ /*
+ * For CLOSECONNECTION logout requests carrying
+ * a matching logout CID -> local CID, the reference
+ * for the local CID will have been incremented in
+ * iscsi_logout_closeconnection().
+ *
+ * For CLOSECONNECTION logout requests carrying
+ * a different CID than the connection it arrived
+ * on, the connection responding to cmd->logout_cid
+ * is stopped in iscsit_logout_post_handler_diffcid().
+ */
+
+ pr_debug("iSCSI CID: %hu logout on CID: %hu"
+ " successful.\n", cmd->logout_cid, conn->cid);
+ cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+ break;
+ case ISCSI_LOGOUT_REASON_RECOVERY:
+ if ((cmd->logout_response == ISCSI_LOGOUT_RECOVERY_UNSUPPORTED) ||
+ (cmd->logout_response == ISCSI_LOGOUT_CLEANUP_FAILED))
+ break;
+ /*
+ * If the connection is still active from our point of view
+ * force connection recovery to occur.
+ */
+ logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
+ cmd->logout_cid);
+ if ((logout_conn)) {
+ iscsit_connection_reinstatement_rcfr(logout_conn);
+ iscsit_dec_conn_usage_count(logout_conn);
+ }
+
+ cr = iscsit_get_inactive_connection_recovery_entry(
+ conn->sess, cmd->logout_cid);
+ if (!cr) {
+ pr_err("Unable to locate CID: %hu for"
+ " REMOVECONNFORRECOVERY Logout Request.\n",
+ cmd->logout_cid);
+ cmd->logout_response = ISCSI_LOGOUT_CID_NOT_FOUND;
+ break;
+ }
+
+ iscsit_discard_cr_cmds_by_expstatsn(cr, cmd->exp_stat_sn);
+
+ pr_debug("iSCSI REMOVECONNFORRECOVERY logout"
+ " for recovery for CID: %hu on CID: %hu successful.\n",
+ cmd->logout_cid, conn->cid);
+ cmd->logout_response = ISCSI_LOGOUT_SUCCESS;
+ break;
+ default:
+ pr_err("Unknown cmd->logout_reason: 0x%02x\n",
+ cmd->logout_reason);
+ return -1;
+ }
+
+ tx_size = ISCSI_HDR_LEN;
+ hdr = (struct iscsi_logout_rsp *)cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_LOGOUT_RSP;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->response = cmd->logout_response;
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = cmd->pdu;
+ iov[niov++].iov_len = ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest to"
+ " Logout Response 0x%08x\n", *header_digest);
+ }
+ cmd->iov_misc_count = niov;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Sending Logout Response ITT: 0x%08x StatSN:"
+ " 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
+ cmd->init_task_tag, cmd->stat_sn, hdr->response,
+ cmd->logout_cid, conn->cid);
+
+ return 0;
+}
+
+/*
+ * Unsolicited NOPIN, either requesting a response or not.
+ */
+static int iscsit_send_unsolicited_nopin(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int want_response)
+{
+ int tx_size = ISCSI_HDR_LEN;
+ struct iscsi_nopin *hdr;
+
+ hdr = (struct iscsi_nopin *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_NOOP_IN;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ cmd->stat_sn = conn->stat_sn;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest to"
+ " NopIN 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = tx_size;
+ cmd->iov_misc_count = 1;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
+ " 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
+
+ return 0;
+}
+
+static int iscsit_send_nopin_response(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int niov = 0, tx_size;
+ u32 padding = 0;
+ struct kvec *iov;
+ struct iscsi_nopin *hdr;
+
+ tx_size = ISCSI_HDR_LEN;
+ hdr = (struct iscsi_nopin *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_NOOP_IN;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, cmd->buf_ptr_size);
+ put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = cmd->pdu;
+ iov[niov++].iov_len = ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest"
+ " to NopIn 0x%08x\n", *header_digest);
+ }
+
+ /*
+ * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
+ * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
+ */
+ if (cmd->buf_ptr_size) {
+ iov[niov].iov_base = cmd->buf_ptr;
+ iov[niov++].iov_len = cmd->buf_ptr_size;
+ tx_size += cmd->buf_ptr_size;
+
+ pr_debug("Echoing back %u bytes of ping"
+ " data.\n", cmd->buf_ptr_size);
+
+ padding = ((-cmd->buf_ptr_size) & 3);
+ if (padding != 0) {
+ iov[niov].iov_base = &cmd->pad_bytes;
+ iov[niov++].iov_len = padding;
+ tx_size += padding;
+ pr_debug("Attaching %u additional"
+ " padding bytes.\n", padding);
+ }
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ cmd->buf_ptr, cmd->buf_ptr_size,
+ padding, (u8 *)&cmd->pad_bytes,
+ (u8 *)&cmd->data_crc);
+
+ iov[niov].iov_base = &cmd->data_crc;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attached DataDigest for %u"
+ " bytes of ping data, CRC 0x%08x\n",
+ cmd->buf_ptr_size, cmd->data_crc);
+ }
+ }
+
+ cmd->iov_misc_count = niov;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:"
+ " 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag,
+ cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
+
+ return 0;
+}
+
+int iscsit_send_r2t(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int tx_size = 0;
+ struct iscsi_r2t *r2t;
+ struct iscsi_r2t_rsp *hdr;
+
+ r2t = iscsit_get_r2t_from_list(cmd);
+ if (!r2t)
+ return -1;
+
+ hdr = (struct iscsi_r2t_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_R2T;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+ (struct scsi_lun *)&hdr->lun);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ spin_lock_bh(&conn->sess->ttt_lock);
+ r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ if (r2t->targ_xfer_tag == 0xFFFFFFFF)
+ r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ spin_unlock_bh(&conn->sess->ttt_lock);
+ hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
+ hdr->statsn = cpu_to_be32(conn->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
+ hdr->data_offset = cpu_to_be32(r2t->offset);
+ hdr->data_length = cpu_to_be32(r2t->xfer_len);
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for R2T"
+ " PDU 0x%08x\n", *header_digest);
+ }
+
+ pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
+ " 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
+ (!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
+ r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
+ r2t->offset, r2t->xfer_len, conn->cid);
+
+ cmd->iov_misc_count = 1;
+ cmd->tx_size = tx_size;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ r2t->sent_r2t = 1;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+/*
+ * type 0: Normal Operation.
+ * type 1: Called from Storage Transport.
+ * type 2: Called from iscsi_task_reassign_complete_write() for
+ * connection recovery.
+ */
+int iscsit_build_r2ts_for_cmd(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int type)
+{
+ int first_r2t = 1;
+ u32 offset = 0, xfer_len = 0;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ if (cmd->cmd_flags & ICF_SENT_LAST_R2T) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return 0;
+ }
+
+ if (conn->sess->sess_ops->DataSequenceInOrder && (type != 2))
+ if (cmd->r2t_offset < cmd->write_data_done)
+ cmd->r2t_offset = cmd->write_data_done;
+
+ while (cmd->outstanding_r2ts < conn->sess->sess_ops->MaxOutstandingR2T) {
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ offset = cmd->r2t_offset;
+
+ if (first_r2t && (type == 2)) {
+ xfer_len = ((offset +
+ (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len) >
+ cmd->data_length) ?
+ (cmd->data_length - offset) :
+ (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len));
+ } else {
+ xfer_len = ((offset +
+ conn->sess->sess_ops->MaxBurstLength) >
+ cmd->data_length) ?
+ (cmd->data_length - offset) :
+ conn->sess->sess_ops->MaxBurstLength;
+ }
+ cmd->r2t_offset += xfer_len;
+
+ if (cmd->r2t_offset == cmd->data_length)
+ cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+ } else {
+ struct iscsi_seq *seq;
+
+ seq = iscsit_get_seq_holder_for_r2t(cmd);
+ if (!seq) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ offset = seq->offset;
+ xfer_len = seq->xfer_len;
+
+ if (cmd->seq_send_order == cmd->seq_count)
+ cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+ }
+ cmd->outstanding_r2ts++;
+ first_r2t = 0;
+
+ if (iscsit_add_r2t_to_list(cmd, offset, xfer_len, 0, 0) < 0) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ if (cmd->cmd_flags & ICF_SENT_LAST_R2T)
+ break;
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+static int iscsit_send_status(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ u8 iov_count = 0, recovery;
+ u32 padding = 0, tx_size = 0;
+ struct iscsi_scsi_rsp *hdr;
+ struct kvec *iov;
+
+ recovery = (cmd->i_state != ISTATE_SEND_STATUS);
+ if (!recovery)
+ cmd->stat_sn = conn->stat_sn++;
+
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->rsp_pdus++;
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+
+ hdr = (struct iscsi_scsi_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
+ hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ }
+ hdr->response = cmd->iscsi_response;
+ hdr->cmd_status = cmd->se_cmd.scsi_status;
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ /*
+ * Attach SENSE DATA payload to iSCSI Response PDU
+ */
+ if (cmd->se_cmd.sense_buffer &&
+ ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+ (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+ padding = -(cmd->se_cmd.scsi_sense_length) & 3;
+ hton24(hdr->dlength, cmd->se_cmd.scsi_sense_length);
+ iov[iov_count].iov_base = cmd->se_cmd.sense_buffer;
+ iov[iov_count++].iov_len =
+ (cmd->se_cmd.scsi_sense_length + padding);
+ tx_size += cmd->se_cmd.scsi_sense_length;
+
+ if (padding) {
+ memset(cmd->se_cmd.sense_buffer +
+ cmd->se_cmd.scsi_sense_length, 0, padding);
+ tx_size += padding;
+ pr_debug("Adding %u bytes of padding to"
+ " SENSE.\n", padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ cmd->se_cmd.sense_buffer,
+ (cmd->se_cmd.scsi_sense_length + padding),
+ 0, NULL, (u8 *)&cmd->data_crc);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching CRC32 DataDigest for"
+ " SENSE, %u bytes CRC 0x%08x\n",
+ (cmd->se_cmd.scsi_sense_length + padding),
+ cmd->data_crc);
+ }
+
+ pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
+ " Response PDU\n",
+ cmd->se_cmd.scsi_sense_length);
+ }
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for Response"
+ " PDU 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
+ " Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
+ (!recovery) ? "" : "Recovery ", cmd->init_task_tag,
+ cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid);
+
+ return 0;
+}
+
+static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
+{
+ switch (se_tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ return ISCSI_TMF_RSP_COMPLETE;
+ case TMR_TASK_DOES_NOT_EXIST:
+ return ISCSI_TMF_RSP_NO_TASK;
+ case TMR_LUN_DOES_NOT_EXIST:
+ return ISCSI_TMF_RSP_NO_LUN;
+ case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
+ return ISCSI_TMF_RSP_NOT_SUPPORTED;
+ case TMR_FUNCTION_AUTHORIZATION_FAILED:
+ return ISCSI_TMF_RSP_AUTH_FAILED;
+ case TMR_FUNCTION_REJECTED:
+ default:
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+}
+
+static int iscsit_send_task_mgt_rsp(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+ struct iscsi_tm_rsp *hdr;
+ u32 tx_size = 0;
+
+ hdr = (struct iscsi_tm_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
+ hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ cmd->iov_misc[0].iov_base = cmd->pdu;
+ cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for Task"
+ " Mgmt Response PDU 0x%08x\n", *header_digest);
+ }
+
+ cmd->iov_misc_count = 1;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built Task Management Response ITT: 0x%08x,"
+ " StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
+ cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
+
+ return 0;
+}
+
+static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+{
+ char *payload = NULL;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tiqn *tiqn;
+ struct iscsi_tpg_np *tpg_np;
+ int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
+ unsigned char buf[256];
+
+ buffer_len = (conn->conn_ops->MaxRecvDataSegmentLength > 32768) ?
+ 32768 : conn->conn_ops->MaxRecvDataSegmentLength;
+
+ memset(buf, 0, 256);
+
+ payload = kzalloc(buffer_len, GFP_KERNEL);
+ if (!payload) {
+ pr_err("Unable to allocate memory for sendtargets"
+ " response.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock(&tiqn_lock);
+ list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
+ len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+ memcpy((void *)payload + payload_len, buf, len);
+ payload_len += len;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+ spin_lock(&tpg->tpg_state_lock);
+ if ((tpg->tpg_state == TPG_STATE_FREE) ||
+ (tpg->tpg_state == TPG_STATE_INACTIVE)) {
+ spin_unlock(&tpg->tpg_state_lock);
+ continue;
+ }
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
+ tpg_np_list) {
+ len = sprintf(buf, "TargetAddress="
+ "%s%s%s:%hu,%hu",
+ (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
+ "[" : "", tpg_np->tpg_np->np_ip,
+ (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
+ "]" : "", tpg_np->tpg_np->np_port,
+ tpg->tpgt);
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+ spin_unlock(&tpg->tpg_np_lock);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+ memcpy((void *)payload + payload_len, buf, len);
+ payload_len += len;
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+eob:
+ if (end_of_buf)
+ break;
+ }
+ spin_unlock(&tiqn_lock);
+
+ cmd->buf_ptr = payload;
+
+ return payload_len;
+}
+
+/*
+ * FIXME: Add support for F_BIT and C_BIT when the length is longer than
+ * MaxRecvDataSegmentLength.
+ */
+static int iscsit_send_text_rsp(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_text_rsp *hdr;
+ struct kvec *iov;
+ u32 padding = 0, tx_size = 0;
+ int text_length, iov_count = 0;
+
+ text_length = iscsit_build_sendtargets_response(cmd);
+ if (text_length < 0)
+ return text_length;
+
+ padding = ((-text_length) & 3);
+ if (padding != 0) {
+ memset(cmd->buf_ptr + text_length, 0, padding);
+ pr_debug("Attaching %u additional bytes for"
+ " padding.\n", padding);
+ }
+
+ hdr = (struct iscsi_text_rsp *) cmd->pdu;
+ memset(hdr, 0, ISCSI_HDR_LEN);
+ hdr->opcode = ISCSI_OP_TEXT_RSP;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, text_length);
+ hdr->itt = cpu_to_be32(cmd->init_task_tag);
+ hdr->ttt = cpu_to_be32(cmd->targ_xfer_tag);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ iov[iov_count].iov_base = cmd->buf_ptr;
+ iov[iov_count++].iov_len = text_length + padding;
+
+ tx_size += (ISCSI_HDR_LEN + text_length + padding);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for"
+ " Text Response PDU 0x%08x\n", *header_digest);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ cmd->buf_ptr, (text_length + padding),
+ 0, NULL, (u8 *)&cmd->data_crc);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching DataDigest for %u bytes of text"
+ " data, CRC 0x%08x\n", (text_length + padding),
+ cmd->data_crc);
+ }
+
+ cmd->iov_misc_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
+ " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
+ text_length, conn->cid);
+ return 0;
+}
+
+static int iscsit_send_reject(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ u32 iov_count = 0, tx_size = 0;
+ struct iscsi_reject *hdr;
+ struct kvec *iov;
+
+ hdr = (struct iscsi_reject *) cmd->pdu;
+ hdr->opcode = ISCSI_OP_REJECT;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, ISCSI_HDR_LEN);
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ iov = &cmd->iov_misc[0];
+
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ iov[iov_count].iov_base = cmd->buf_ptr;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+
+ tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)hdr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 HeaderDigest for"
+ " REJECT PDU 0x%08x\n", *header_digest);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)&cmd->data_crc);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32 DataDigest for REJECT"
+ " PDU 0x%08x\n", cmd->data_crc);
+ }
+
+ cmd->iov_misc_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
+ " CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
+
+ return 0;
+}
+
+static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
+{
+ if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
+ wait_for_completion_interruptible_timeout(
+ &conn->tx_half_close_comp,
+ ISCSI_TX_THREAD_TCP_TIMEOUT * HZ);
+ }
+}
+
+#ifdef CONFIG_SMP
+
+void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+{
+ struct iscsi_thread_set *ts = conn->thread_set;
+ int ord, cpu;
+ /*
+ * thread_id is assigned from iscsit_global->ts_bitmap from
+ * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
+ *
+ * Here we use thread_id to determine which CPU that this
+ * iSCSI connection's iscsi_thread_set will be scheduled to
+ * execute upon.
+ */
+ ord = ts->thread_id % cpumask_weight(cpu_online_mask);
+#if 0
+ pr_debug(">>>>>>>>>>>>>>>>>>>> Generated ord: %d from"
+ " thread_id: %d\n", ord, ts->thread_id);
+#endif
+ for_each_online_cpu(cpu) {
+ if (ord-- == 0) {
+ cpumask_set_cpu(cpu, conn->conn_cpumask);
+ return;
+ }
+ }
+ /*
+ * This should never be reached..
+ */
+ dump_stack();
+ cpumask_setall(conn->conn_cpumask);
+}
+
+static inline void iscsit_thread_check_cpumask(
+ struct iscsi_conn *conn,
+ struct task_struct *p,
+ int mode)
+{
+ char buf[128];
+ /*
+ * mode == 1 signals iscsi_target_tx_thread() usage.
+ * mode == 0 signals iscsi_target_rx_thread() usage.
+ */
+ if (mode == 1) {
+ if (!conn->conn_tx_reset_cpumask)
+ return;
+ conn->conn_tx_reset_cpumask = 0;
+ } else {
+ if (!conn->conn_rx_reset_cpumask)
+ return;
+ conn->conn_rx_reset_cpumask = 0;
+ }
+ /*
+ * Update the CPU mask for this single kthread so that
+ * both TX and RX kthreads are scheduled to run on the
+ * same CPU.
+ */
+ memset(buf, 0, 128);
+ cpumask_scnprintf(buf, 128, conn->conn_cpumask);
+#if 0
+ pr_debug(">>>>>>>>>>>>>> Calling set_cpus_allowed_ptr():"
+ " %s for %s\n", buf, p->comm);
+#endif
+ set_cpus_allowed_ptr(p, conn->conn_cpumask);
+}
+
+#else
+
+void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+{
+ return;
+}
+
+#define iscsit_thread_check_cpumask(X, Y, Z) ({})
+#endif /* CONFIG_SMP */
+
+int iscsi_target_tx_thread(void *arg)
+{
+ u8 state;
+ int eodr = 0;
+ int ret = 0;
+ int sent_status = 0;
+ int use_misc = 0;
+ int map_sg = 0;
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_conn *conn;
+ struct iscsi_queue_req *qr = NULL;
+ struct se_cmd *se_cmd;
+ struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+ */
+ allow_signal(SIGINT);
+
+restart:
+ conn = iscsi_tx_thread_pre_handler(ts);
+ if (!conn)
+ goto out;
+
+ eodr = map_sg = ret = sent_status = use_misc = 0;
+
+ while (!kthread_should_stop()) {
+ /*
+ * Ensure that both TX and RX per connection kthreads
+ * are scheduled to run on the same CPU.
+ */
+ iscsit_thread_check_cpumask(conn, current, 1);
+
+ schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+
+ if ((ts->status == ISCSI_THREAD_SET_RESET) ||
+ signal_pending(current))
+ goto transport_err;
+
+get_immediate:
+ qr = iscsit_get_cmd_from_immediate_queue(conn);
+ if (qr) {
+ atomic_set(&conn->check_immediate_queue, 0);
+ cmd = qr->cmd;
+ state = qr->state;
+ kmem_cache_free(lio_qr_cache, qr);
+
+ spin_lock_bh(&cmd->istate_lock);
+ switch (state) {
+ case ISTATE_SEND_R2T:
+ spin_unlock_bh(&cmd->istate_lock);
+ ret = iscsit_send_r2t(cmd, conn);
+ break;
+ case ISTATE_REMOVE:
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (cmd->data_direction == DMA_TO_DEVICE)
+ iscsit_stop_dataout_timer(cmd);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ /*
+ * Determine if a struct se_cmd is assoicated with
+ * this struct iscsi_cmd.
+ */
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
+ !(cmd->tmr_req))
+ iscsit_release_cmd(cmd);
+ else
+ transport_generic_free_cmd(&cmd->se_cmd,
+ 1, 0);
+ goto get_immediate;
+ case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_mod_nopin_response_timer(conn);
+ ret = iscsit_send_unsolicited_nopin(cmd,
+ conn, 1);
+ break;
+ case ISTATE_SEND_NOPIN_NO_RESPONSE:
+ spin_unlock_bh(&cmd->istate_lock);
+ ret = iscsit_send_unsolicited_nopin(cmd,
+ conn, 0);
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag, state,
+ conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+ if (ret < 0) {
+ conn->tx_immediate_queue = 0;
+ goto transport_err;
+ }
+
+ if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
+ conn->tx_immediate_queue = 0;
+ iscsit_tx_thread_wait_for_tcp(conn);
+ goto transport_err;
+ }
+
+ spin_lock_bh(&cmd->istate_lock);
+ switch (state) {
+ case ISTATE_SEND_R2T:
+ spin_unlock_bh(&cmd->istate_lock);
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ break;
+ case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+ cmd->i_state = ISTATE_SENT_NOPIN_WANT_RESPONSE;
+ spin_unlock_bh(&cmd->istate_lock);
+ break;
+ case ISTATE_SEND_NOPIN_NO_RESPONSE:
+ cmd->i_state = ISTATE_SENT_STATUS;
+ spin_unlock_bh(&cmd->istate_lock);
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag,
+ state, conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+ goto get_immediate;
+ } else
+ conn->tx_immediate_queue = 0;
+
+get_response:
+ qr = iscsit_get_cmd_from_response_queue(conn);
+ if (qr) {
+ cmd = qr->cmd;
+ state = qr->state;
+ kmem_cache_free(lio_qr_cache, qr);
+
+ spin_lock_bh(&cmd->istate_lock);
+check_rsp_state:
+ switch (state) {
+ case ISTATE_SEND_DATAIN:
+ spin_unlock_bh(&cmd->istate_lock);
+ ret = iscsit_send_data_in(cmd, conn,
+ &eodr);
+ map_sg = 1;
+ break;
+ case ISTATE_SEND_STATUS:
+ case ISTATE_SEND_STATUS_RECOVERY:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_status(cmd, conn);
+ break;
+ case ISTATE_SEND_LOGOUTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_logout_response(cmd, conn);
+ break;
+ case ISTATE_SEND_ASYNCMSG:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_conn_drop_async_message(
+ cmd, conn);
+ break;
+ case ISTATE_SEND_NOPIN:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_nopin_response(cmd, conn);
+ break;
+ case ISTATE_SEND_REJECT:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_reject(cmd, conn);
+ break;
+ case ISTATE_SEND_TASKMGTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_task_mgt_rsp(cmd, conn);
+ if (ret != 0)
+ break;
+ ret = iscsit_tmr_post_handler(cmd, conn);
+ if (ret != 0)
+ iscsit_fall_back_to_erl0(conn->sess);
+ break;
+ case ISTATE_SEND_TEXTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ use_misc = 1;
+ ret = iscsit_send_text_rsp(cmd, conn);
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag,
+ state, conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+ if (ret < 0) {
+ conn->tx_response_queue = 0;
+ goto transport_err;
+ }
+
+ se_cmd = &cmd->se_cmd;
+
+ if (map_sg && !conn->conn_ops->IFMarker) {
+ if (iscsit_fe_sendpage_sg(cmd, conn) < 0) {
+ conn->tx_response_queue = 0;
+ iscsit_tx_thread_wait_for_tcp(conn);
+ iscsit_unmap_iovec(cmd);
+ goto transport_err;
+ }
+ } else {
+ if (iscsit_send_tx_data(cmd, conn, use_misc) < 0) {
+ conn->tx_response_queue = 0;
+ iscsit_tx_thread_wait_for_tcp(conn);
+ iscsit_unmap_iovec(cmd);
+ goto transport_err;
+ }
+ }
+ map_sg = 0;
+ iscsit_unmap_iovec(cmd);
+
+ spin_lock_bh(&cmd->istate_lock);
+ switch (state) {
+ case ISTATE_SEND_DATAIN:
+ if (!eodr)
+ goto check_rsp_state;
+
+ if (eodr == 1) {
+ cmd->i_state = ISTATE_SENT_LAST_DATAIN;
+ sent_status = 1;
+ eodr = use_misc = 0;
+ } else if (eodr == 2) {
+ cmd->i_state = state =
+ ISTATE_SEND_STATUS;
+ sent_status = 0;
+ eodr = use_misc = 0;
+ goto check_rsp_state;
+ }
+ break;
+ case ISTATE_SEND_STATUS:
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ case ISTATE_SEND_ASYNCMSG:
+ case ISTATE_SEND_NOPIN:
+ case ISTATE_SEND_STATUS_RECOVERY:
+ case ISTATE_SEND_TEXTRSP:
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ case ISTATE_SEND_REJECT:
+ use_misc = 0;
+ if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
+ cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
+ spin_unlock_bh(&cmd->istate_lock);
+ complete(&cmd->reject_comp);
+ goto transport_err;
+ }
+ complete(&cmd->reject_comp);
+ break;
+ case ISTATE_SEND_TASKMGTRSP:
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ case ISTATE_SEND_LOGOUTRSP:
+ spin_unlock_bh(&cmd->istate_lock);
+ if (!iscsit_logout_post_handler(cmd, conn))
+ goto restart;
+ spin_lock_bh(&cmd->istate_lock);
+ use_misc = 0;
+ sent_status = 1;
+ break;
+ default:
+ pr_err("Unknown Opcode: 0x%02x ITT:"
+ " 0x%08x, i_state: %d on CID: %hu\n",
+ cmd->iscsi_opcode, cmd->init_task_tag,
+ cmd->i_state, conn->cid);
+ spin_unlock_bh(&cmd->istate_lock);
+ goto transport_err;
+ }
+
+ if (sent_status) {
+ cmd->i_state = ISTATE_SENT_STATUS;
+ sent_status = 0;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (atomic_read(&conn->check_immediate_queue))
+ goto get_immediate;
+
+ goto get_response;
+ } else
+ conn->tx_response_queue = 0;
+ }
+
+transport_err:
+ iscsit_take_action_for_connection_exit(conn);
+ goto restart;
+out:
+ return 0;
+}
+
+int iscsi_target_rx_thread(void *arg)
+{
+ int ret;
+ u8 buffer[ISCSI_HDR_LEN], opcode;
+ u32 checksum = 0, digest = 0;
+ struct iscsi_conn *conn = NULL;
+ struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ struct kvec iov;
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+ */
+ allow_signal(SIGINT);
+
+restart:
+ conn = iscsi_rx_thread_pre_handler(ts);
+ if (!conn)
+ goto out;
+
+ while (!kthread_should_stop()) {
+ /*
+ * Ensure that both TX and RX per connection kthreads
+ * are scheduled to run on the same CPU.
+ */
+ iscsit_thread_check_cpumask(conn, current, 0);
+
+ memset(buffer, 0, ISCSI_HDR_LEN);
+ memset(&iov, 0, sizeof(struct kvec));
+
+ iov.iov_base = buffer;
+ iov.iov_len = ISCSI_HDR_LEN;
+
+ ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
+ if (ret != ISCSI_HDR_LEN) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ goto transport_err;
+ }
+
+ /*
+ * Set conn->bad_hdr for use with REJECT PDUs.
+ */
+ memcpy(&conn->bad_hdr, &buffer, ISCSI_HDR_LEN);
+
+ if (conn->conn_ops->HeaderDigest) {
+ iov.iov_base = &digest;
+ iov.iov_len = ISCSI_CRC_LEN;
+
+ ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
+ if (ret != ISCSI_CRC_LEN) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ goto transport_err;
+ }
+
+ iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ buffer, ISCSI_HDR_LEN,
+ 0, NULL, (u8 *)&checksum);
+
+ if (digest != checksum) {
+ pr_err("HeaderDigest CRC32C failed,"
+ " received 0x%08x, computed 0x%08x\n",
+ digest, checksum);
+ /*
+ * Set the PDU to 0xff so it will intentionally
+ * hit default in the switch below.
+ */
+ memset(buffer, 0xff, ISCSI_HDR_LEN);
+ spin_lock_bh(&conn->sess->session_stats_lock);
+ conn->sess->conn_digest_errors++;
+ spin_unlock_bh(&conn->sess->session_stats_lock);
+ } else {
+ pr_debug("Got HeaderDigest CRC32C"
+ " 0x%08x\n", checksum);
+ }
+ }
+
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
+ goto transport_err;
+
+ opcode = buffer[0] & ISCSI_OPCODE_MASK;
+
+ if (conn->sess->sess_ops->SessionType &&
+ ((!(opcode & ISCSI_OP_TEXT)) ||
+ (!(opcode & ISCSI_OP_LOGOUT)))) {
+ pr_err("Received illegal iSCSI Opcode: 0x%02x"
+ " while in Discovery Session, rejecting.\n", opcode);
+ iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
+ buffer, conn);
+ goto transport_err;
+ }
+
+ switch (opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ if (iscsit_handle_scsi_cmd(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_SCSI_DATA_OUT:
+ if (iscsit_handle_data_out(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ if (iscsit_handle_nop_out(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_TEXT:
+ if (iscsit_handle_text_cmd(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_LOGOUT:
+ ret = iscsit_handle_logout_cmd(conn, buffer);
+ if (ret > 0) {
+ wait_for_completion_timeout(&conn->conn_logout_comp,
+ SECONDS_FOR_LOGOUT_COMP * HZ);
+ goto transport_err;
+ } else if (ret < 0)
+ goto transport_err;
+ break;
+ case ISCSI_OP_SNACK:
+ if (iscsit_handle_snack(conn, buffer) < 0)
+ goto transport_err;
+ break;
+ default:
+ pr_err("Got unknown iSCSI OpCode: 0x%02x\n",
+ opcode);
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Cannot recover from unknown"
+ " opcode while ERL=0, closing iSCSI connection"
+ ".\n");
+ goto transport_err;
+ }
+ if (!conn->conn_ops->OFMarker) {
+ pr_err("Unable to recover from unknown"
+ " opcode while OFMarker=No, closing iSCSI"
+ " connection.\n");
+ goto transport_err;
+ }
+ if (iscsit_recover_from_unknown_opcode(conn) < 0) {
+ pr_err("Unable to recover from unknown"
+ " opcode, closing iSCSI connection.\n");
+ goto transport_err;
+ }
+ break;
+ }
+ }
+
+transport_err:
+ if (!signal_pending(current))
+ atomic_set(&conn->transport_failed, 1);
+ iscsit_take_action_for_connection_exit(conn);
+ goto restart;
+out:
+ return 0;
+}
+
+static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
+ struct iscsi_session *sess = conn->sess;
+ struct se_cmd *se_cmd;
+ /*
+ * We expect this function to only ever be called from either RX or TX
+ * thread context via iscsit_close_connection() once the other context
+ * has been reset -> returned sleeping pre-handler state.
+ */
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) {
+
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+ iscsit_increment_maxcmdsn(cmd, sess);
+ se_cmd = &cmd->se_cmd;
+ /*
+ * Special cases for active iSCSI TMR, and
+ * transport_lookup_cmd_lun() failing from
+ * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd().
+ */
+ if (cmd->tmr_req && se_cmd->transport_wait_for_tasks)
+ se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
+ else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)
+ transport_release_cmd(se_cmd);
+ else
+ iscsit_release_cmd(cmd);
+
+ spin_lock_bh(&conn->cmd_lock);
+ continue;
+ }
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_increment_maxcmdsn(cmd, sess);
+ se_cmd = &cmd->se_cmd;
+
+ if (se_cmd->transport_wait_for_tasks)
+ se_cmd->transport_wait_for_tasks(se_cmd, 1, 1);
+
+ spin_lock_bh(&conn->cmd_lock);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+}
+
+static void iscsit_stop_timers_for_cmds(
+ struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->data_direction == DMA_TO_DEVICE)
+ iscsit_stop_dataout_timer(cmd);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+}
+
+int iscsit_close_connection(
+ struct iscsi_conn *conn)
+{
+ int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
+ struct iscsi_session *sess = conn->sess;
+
+ pr_debug("Closing iSCSI connection CID %hu on SID:"
+ " %u\n", conn->cid, sess->sid);
+ /*
+ * Always up conn_logout_comp just in case the RX Thread is sleeping
+ * and the logout response never got sent because the connection
+ * failed.
+ */
+ complete(&conn->conn_logout_comp);
+
+ iscsi_release_thread_set(conn);
+
+ iscsit_stop_timers_for_cmds(conn);
+ iscsit_stop_nopin_response_timer(conn);
+ iscsit_stop_nopin_timer(conn);
+ iscsit_free_queue_reqs_for_conn(conn);
+
+ /*
+ * During Connection recovery drop unacknowledged out of order
+ * commands for this connection, and prepare the other commands
+ * for realligence.
+ *
+ * During normal operation clear the out of order commands (but
+ * do not free the struct iscsi_ooo_cmdsn's) and release all
+ * struct iscsi_cmds.
+ */
+ if (atomic_read(&conn->connection_recovery)) {
+ iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
+ iscsit_prepare_cmds_for_realligance(conn);
+ } else {
+ iscsit_clear_ooo_cmdsns_for_conn(conn);
+ iscsit_release_commands_from_conn(conn);
+ }
+
+ /*
+ * Handle decrementing session or connection usage count if
+ * a logout response was not able to be sent because the
+ * connection failed. Fall back to Session Recovery here.
+ */
+ if (atomic_read(&conn->conn_logout_remove)) {
+ if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
+ iscsit_dec_conn_usage_count(conn);
+ iscsit_dec_session_usage_count(sess);
+ }
+ if (conn->conn_logout_reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION)
+ iscsit_dec_conn_usage_count(conn);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ atomic_set(&sess->session_reinstatement, 0);
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
+ }
+
+ spin_lock_bh(&sess->conn_lock);
+ list_del(&conn->conn_list);
+
+ /*
+ * Attempt to let the Initiator know this connection failed by
+ * sending an Connection Dropped Async Message on another
+ * active connection.
+ */
+ if (atomic_read(&conn->connection_recovery))
+ iscsit_build_conn_drop_async_message(conn);
+
+ spin_unlock_bh(&sess->conn_lock);
+
+ /*
+ * If connection reinstatement is being performed on this connection,
+ * up the connection reinstatement semaphore that is being blocked on
+ * in iscsit_cause_connection_reinstatement().
+ */
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->sleep_on_conn_wait_comp)) {
+ spin_unlock_bh(&conn->state_lock);
+ complete(&conn->conn_wait_comp);
+ wait_for_completion(&conn->conn_post_wait_comp);
+ spin_lock_bh(&conn->state_lock);
+ }
+
+ /*
+ * If connection reinstatement is being performed on this connection
+ * by receiving a REMOVECONNFORRECOVERY logout request, up the
+ * connection wait rcfr semaphore that is being blocked on
+ * an iscsit_connection_reinstatement_rcfr().
+ */
+ if (atomic_read(&conn->connection_wait_rcfr)) {
+ spin_unlock_bh(&conn->state_lock);
+ complete(&conn->conn_wait_rcfr_comp);
+ wait_for_completion(&conn->conn_post_wait_comp);
+ spin_lock_bh(&conn->state_lock);
+ }
+ atomic_set(&conn->connection_reinstatement, 1);
+ spin_unlock_bh(&conn->state_lock);
+
+ /*
+ * If any other processes are accessing this connection pointer we
+ * must wait until they have completed.
+ */
+ iscsit_check_conn_usage_count(conn);
+
+ if (conn->conn_rx_hash.tfm)
+ crypto_free_hash(conn->conn_rx_hash.tfm);
+ if (conn->conn_tx_hash.tfm)
+ crypto_free_hash(conn->conn_tx_hash.tfm);
+
+ if (conn->conn_cpumask)
+ free_cpumask_var(conn->conn_cpumask);
+
+ kfree(conn->conn_ops);
+ conn->conn_ops = NULL;
+
+ if (conn->sock) {
+ if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+ kfree(conn->sock->file);
+ conn->sock->file = NULL;
+ }
+ sock_release(conn->sock);
+ }
+ conn->thread_set = NULL;
+
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+ conn->conn_state = TARG_CONN_STATE_FREE;
+ kfree(conn);
+
+ spin_lock_bh(&sess->conn_lock);
+ atomic_dec(&sess->nconn);
+ pr_debug("Decremented iSCSI connection count to %hu from node:"
+ " %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ /*
+ * Make sure that if one connection fails in an non ERL=2 iSCSI
+ * Session that they all fail.
+ */
+ if ((sess->sess_ops->ErrorRecoveryLevel != 2) && !conn_logout &&
+ !atomic_read(&sess->session_logout))
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
+
+ /*
+ * If this was not the last connection in the session, and we are
+ * performing session reinstatement or falling back to ERL=0, call
+ * iscsit_stop_session() without sleeping to shutdown the other
+ * active connections.
+ */
+ if (atomic_read(&sess->nconn)) {
+ if (!atomic_read(&sess->session_reinstatement) &&
+ !atomic_read(&sess->session_fall_back_to_erl0)) {
+ spin_unlock_bh(&sess->conn_lock);
+ return 0;
+ }
+ if (!atomic_read(&sess->session_stop_active)) {
+ atomic_set(&sess->session_stop_active, 1);
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_stop_session(sess, 0, 0);
+ return 0;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+ return 0;
+ }
+
+ /*
+ * If this was the last connection in the session and one of the
+ * following is occurring:
+ *
+ * Session Reinstatement is not being performed, and are falling back
+ * to ERL=0 call iscsit_close_session().
+ *
+ * Session Logout was requested. iscsit_close_session() will be called
+ * elsewhere.
+ *
+ * Session Continuation is not being performed, start the Time2Retain
+ * handler and check if sleep_on_sess_wait_sem is active.
+ */
+ if (!atomic_read(&sess->session_reinstatement) &&
+ atomic_read(&sess->session_fall_back_to_erl0)) {
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_close_session(sess);
+
+ return 0;
+ } else if (atomic_read(&sess->session_logout)) {
+ pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+ sess->session_state = TARG_SESS_STATE_FREE;
+ spin_unlock_bh(&sess->conn_lock);
+
+ if (atomic_read(&sess->sleep_on_sess_wait_comp))
+ complete(&sess->session_wait_comp);
+
+ return 0;
+ } else {
+ pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+ sess->session_state = TARG_SESS_STATE_FAILED;
+
+ if (!atomic_read(&sess->session_continuation)) {
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_start_time2retain_handler(sess);
+ } else
+ spin_unlock_bh(&sess->conn_lock);
+
+ if (atomic_read(&sess->sleep_on_sess_wait_comp))
+ complete(&sess->session_wait_comp);
+
+ return 0;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ return 0;
+}
+
+int iscsit_close_session(struct iscsi_session *sess)
+{
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+ if (atomic_read(&sess->nconn)) {
+ pr_err("%d connection(s) still exist for iSCSI session"
+ " to %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ BUG();
+ }
+
+ spin_lock_bh(&se_tpg->session_lock);
+ atomic_set(&sess->session_logout, 1);
+ atomic_set(&sess->session_reinstatement, 1);
+ iscsit_stop_time2retain_timer(sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ /*
+ * transport_deregister_session_configfs() will clear the
+ * struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
+ * can be setting it again with __transport_register_session() in
+ * iscsi_post_login_handler() again after the iscsit_stop_session()
+ * completes in iscsi_np context.
+ */
+ transport_deregister_session_configfs(sess->se_sess);
+
+ /*
+ * If any other processes are accessing this session pointer we must
+ * wait until they have completed. If we are in an interrupt (the
+ * time2retain handler) and contain and active session usage count we
+ * restart the timer and exit.
+ */
+ if (!in_interrupt()) {
+ if (iscsit_check_session_usage_count(sess) == 1)
+ iscsit_stop_session(sess, 1, 1);
+ } else {
+ if (iscsit_check_session_usage_count(sess) == 2) {
+ atomic_set(&sess->session_logout, 0);
+ iscsit_start_time2retain_handler(sess);
+ return 0;
+ }
+ }
+
+ transport_deregister_session(sess->se_sess);
+
+ if (sess->sess_ops->ErrorRecoveryLevel == 2)
+ iscsit_free_connection_recovery_entires(sess);
+
+ iscsit_free_all_ooo_cmdsns(sess);
+
+ spin_lock_bh(&se_tpg->session_lock);
+ pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
+ sess->session_state = TARG_SESS_STATE_FREE;
+ pr_debug("Released iSCSI session from node: %s\n",
+ sess->sess_ops->InitiatorName);
+ tpg->nsessions--;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_nsessions--;
+
+ pr_debug("Decremented number of active iSCSI Sessions on"
+ " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
+
+ spin_lock(&sess_idr_lock);
+ idr_remove(&sess_idr, sess->session_index);
+ spin_unlock(&sess_idr_lock);
+
+ kfree(sess->sess_ops);
+ sess->sess_ops = NULL;
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ kfree(sess);
+ return 0;
+}
+
+static void iscsit_logout_post_handler_closesession(
+ struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+ iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ complete(&conn->conn_logout_comp);
+
+ iscsit_dec_conn_usage_count(conn);
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
+ iscsit_close_session(sess);
+}
+
+static void iscsit_logout_post_handler_samecid(
+ struct iscsi_conn *conn)
+{
+ iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
+ iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+
+ atomic_set(&conn->conn_logout_remove, 0);
+ complete(&conn->conn_logout_comp);
+
+ iscsit_cause_connection_reinstatement(conn, 1);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+static void iscsit_logout_post_handler_diffcid(
+ struct iscsi_conn *conn,
+ u16 cid)
+{
+ struct iscsi_conn *l_conn;
+ struct iscsi_session *sess = conn->sess;
+
+ if (!sess)
+ return;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
+ if (l_conn->cid == cid) {
+ iscsit_inc_conn_usage_count(l_conn);
+ break;
+ }
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ if (!l_conn)
+ return;
+
+ if (l_conn->sock)
+ l_conn->sock->ops->shutdown(l_conn->sock, RCV_SHUTDOWN);
+
+ spin_lock_bh(&l_conn->state_lock);
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGOUT.\n");
+ l_conn->conn_state = TARG_CONN_STATE_IN_LOGOUT;
+ spin_unlock_bh(&l_conn->state_lock);
+
+ iscsit_cause_connection_reinstatement(l_conn, 1);
+ iscsit_dec_conn_usage_count(l_conn);
+}
+
+/*
+ * Return of 0 causes the TX thread to restart.
+ */
+static int iscsit_logout_post_handler(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int ret = 0;
+
+ switch (cmd->logout_reason) {
+ case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ iscsit_logout_post_handler_closesession(conn);
+ break;
+ }
+ ret = 0;
+ break;
+ case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+ if (conn->cid == cmd->logout_cid) {
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ iscsit_logout_post_handler_samecid(conn);
+ break;
+ }
+ ret = 0;
+ } else {
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ iscsit_logout_post_handler_diffcid(conn,
+ cmd->logout_cid);
+ break;
+ case ISCSI_LOGOUT_CID_NOT_FOUND:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ break;
+ }
+ ret = 1;
+ }
+ break;
+ case ISCSI_LOGOUT_REASON_RECOVERY:
+ switch (cmd->logout_response) {
+ case ISCSI_LOGOUT_SUCCESS:
+ case ISCSI_LOGOUT_CID_NOT_FOUND:
+ case ISCSI_LOGOUT_RECOVERY_UNSUPPORTED:
+ case ISCSI_LOGOUT_CLEANUP_FAILED:
+ default:
+ break;
+ }
+ ret = 1;
+ break;
+ default:
+ break;
+
+ }
+ return ret;
+}
+
+void iscsit_fail_session(struct iscsi_session *sess)
+{
+ struct iscsi_conn *conn;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
+ conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
+ sess->session_state = TARG_SESS_STATE_FAILED;
+}
+
+int iscsit_free_session(struct iscsi_session *sess)
+{
+ u16 conn_count = atomic_read(&sess->nconn);
+ struct iscsi_conn *conn, *conn_tmp = NULL;
+ int is_last;
+
+ spin_lock_bh(&sess->conn_lock);
+ atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+ list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+ conn_list) {
+ if (conn_count == 0)
+ break;
+
+ if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+ is_last = 1;
+ } else {
+ iscsit_inc_conn_usage_count(conn_tmp);
+ is_last = 0;
+ }
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_cause_connection_reinstatement(conn, 1);
+ spin_lock_bh(&sess->conn_lock);
+
+ iscsit_dec_conn_usage_count(conn);
+ if (is_last == 0)
+ iscsit_dec_conn_usage_count(conn_tmp);
+
+ conn_count--;
+ }
+
+ if (atomic_read(&sess->nconn)) {
+ spin_unlock_bh(&sess->conn_lock);
+ wait_for_completion(&sess->session_wait_comp);
+ } else
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsit_close_session(sess);
+ return 0;
+}
+
+void iscsit_stop_session(
+ struct iscsi_session *sess,
+ int session_sleep,
+ int connection_sleep)
+{
+ u16 conn_count = atomic_read(&sess->nconn);
+ struct iscsi_conn *conn, *conn_tmp = NULL;
+ int is_last;
+
+ spin_lock_bh(&sess->conn_lock);
+ if (session_sleep)
+ atomic_set(&sess->sleep_on_sess_wait_comp, 1);
+
+ if (connection_sleep) {
+ list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
+ conn_list) {
+ if (conn_count == 0)
+ break;
+
+ if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
+ is_last = 1;
+ } else {
+ iscsit_inc_conn_usage_count(conn_tmp);
+ is_last = 0;
+ }
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_cause_connection_reinstatement(conn, 1);
+ spin_lock_bh(&sess->conn_lock);
+
+ iscsit_dec_conn_usage_count(conn);
+ if (is_last == 0)
+ iscsit_dec_conn_usage_count(conn_tmp);
+ conn_count--;
+ }
+ } else {
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
+ iscsit_cause_connection_reinstatement(conn, 0);
+ }
+
+ if (session_sleep && atomic_read(&sess->nconn)) {
+ spin_unlock_bh(&sess->conn_lock);
+ wait_for_completion(&sess->session_wait_comp);
+ } else
+ spin_unlock_bh(&sess->conn_lock);
+}
+
+int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+{
+ struct iscsi_session *sess;
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct se_session *se_sess, *se_sess_tmp;
+ int session_count = 0;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ if (tpg->nsessions && !force) {
+ spin_unlock_bh(&se_tpg->session_lock);
+ return -1;
+ }
+
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+ sess_list) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+ spin_lock(&sess->conn_lock);
+ if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ atomic_read(&sess->session_logout) ||
+ (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess->conn_lock);
+ continue;
+ }
+ atomic_set(&sess->session_reinstatement, 1);
+ spin_unlock(&sess->conn_lock);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ iscsit_free_session(sess);
+ spin_lock_bh(&se_tpg->session_lock);
+
+ session_count++;
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ pr_debug("Released %d iSCSI Session(s) from Target Portal"
+ " Group: %hu\n", session_count, tpg->tpgt);
+ return 0;
+}
+
+MODULE_DESCRIPTION("iSCSI-Target Driver for mainline target infrastructure");
+MODULE_VERSION("4.1.x");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(iscsi_target_init_module);
+module_exit(iscsi_target_cleanup_module);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
new file mode 100644
index 00000000000..5db2ddeed5e
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -0,0 +1,42 @@
+#ifndef ISCSI_TARGET_H
+#define ISCSI_TARGET_H
+
+extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
+extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
+extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
+extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
+extern void iscsit_del_tiqn(struct iscsi_tiqn *);
+extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
+extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
+ char *, int);
+extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
+ struct iscsi_portal_group *);
+extern int iscsit_del_np(struct iscsi_np *);
+extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
+extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
+extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int);
+extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
+extern int iscsi_target_tx_thread(void *);
+extern int iscsi_target_rx_thread(void *);
+extern int iscsit_close_connection(struct iscsi_conn *);
+extern int iscsit_close_session(struct iscsi_session *);
+extern void iscsit_fail_session(struct iscsi_session *);
+extern int iscsit_free_session(struct iscsi_session *);
+extern void iscsit_stop_session(struct iscsi_session *, int, int);
+extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
+
+extern struct iscsit_global *iscsit_global;
+extern struct target_fabric_configfs *lio_target_fabric_configfs;
+
+extern struct kmem_cache *lio_dr_cache;
+extern struct kmem_cache *lio_ooo_cache;
+extern struct kmem_cache *lio_cmd_cache;
+extern struct kmem_cache *lio_qr_cache;
+extern struct kmem_cache *lio_r2t_cache;
+
+#endif /*** ISCSI_TARGET_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
new file mode 100644
index 00000000000..11fd7430781
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -0,0 +1,490 @@
+/*******************************************************************************
+ * This file houses the main functions for the iSCSI CHAP support
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_auth.h"
+
+static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2])
+{
+ unsigned char result = 0;
+ /*
+ * MSB
+ */
+ if ((val[0] >= 'a') && (val[0] <= 'f'))
+ result = ((val[0] - 'a' + 10) & 0xf) << 4;
+ else
+ if ((val[0] >= 'A') && (val[0] <= 'F'))
+ result = ((val[0] - 'A' + 10) & 0xf) << 4;
+ else /* digit */
+ result = ((val[0] - '0') & 0xf) << 4;
+ /*
+ * LSB
+ */
+ if ((val[1] >= 'a') && (val[1] <= 'f'))
+ result |= ((val[1] - 'a' + 10) & 0xf);
+ else
+ if ((val[1] >= 'A') && (val[1] <= 'F'))
+ result |= ((val[1] - 'A' + 10) & 0xf);
+ else /* digit */
+ result |= ((val[1] - '0') & 0xf);
+
+ return result;
+}
+
+static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
+{
+ int i, j = 0;
+
+ for (i = 0; i < len; i += 2) {
+ dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]);
+ }
+
+ dst[j] = '\0';
+ return j;
+}
+
+static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
+{
+ int i;
+
+ for (i = 0; i < src_len; i++) {
+ sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
+ }
+}
+
+static void chap_set_random(char *data, int length)
+{
+ long r;
+ unsigned n;
+
+ while (length > 0) {
+ get_random_bytes(&r, sizeof(long));
+ r = r ^ (r >> 8);
+ r = r ^ (r >> 4);
+ n = r & 0x7;
+
+ get_random_bytes(&r, sizeof(long));
+ r = r ^ (r >> 8);
+ r = r ^ (r >> 5);
+ n = (n << 3) | (r & 0x7);
+
+ get_random_bytes(&r, sizeof(long));
+ r = r ^ (r >> 8);
+ r = r ^ (r >> 5);
+ n = (n << 2) | (r & 0x3);
+
+ *data++ = n;
+ length--;
+ }
+}
+
+static void chap_gen_challenge(
+ struct iscsi_conn *conn,
+ int caller,
+ char *c_str,
+ unsigned int *c_len)
+{
+ unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+
+ memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
+
+ chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH);
+ chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+ CHAP_CHALLENGE_LENGTH);
+ /*
+ * Set CHAP_C, and copy the generated challenge into c_str.
+ */
+ *c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex);
+ *c_len += 1;
+
+ pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
+ challenge_asciihex);
+}
+
+
+static struct iscsi_chap *chap_server_open(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ const char *a_str,
+ char *aic_str,
+ unsigned int *aic_len)
+{
+ struct iscsi_chap *chap;
+
+ if (!(auth->naf_flags & NAF_USERID_SET) ||
+ !(auth->naf_flags & NAF_PASSWORD_SET)) {
+ pr_err("CHAP user or password not set for"
+ " Initiator ACL\n");
+ return NULL;
+ }
+
+ conn->auth_protocol = kzalloc(sizeof(struct iscsi_chap), GFP_KERNEL);
+ if (!conn->auth_protocol)
+ return NULL;
+
+ chap = (struct iscsi_chap *) conn->auth_protocol;
+ /*
+ * We only support MD5 MDA presently.
+ */
+ if (strncmp(a_str, "CHAP_A=5", 8)) {
+ pr_err("CHAP_A is not MD5.\n");
+ return NULL;
+ }
+ pr_debug("[server] Got CHAP_A=5\n");
+ /*
+ * Send back CHAP_A set to MD5.
+ */
+ *aic_len = sprintf(aic_str, "CHAP_A=5");
+ *aic_len += 1;
+ chap->digest_type = CHAP_DIGEST_MD5;
+ pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+ /*
+ * Set Identifier.
+ */
+ chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
+ *aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
+ *aic_len += 1;
+ pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
+ /*
+ * Generate Challenge.
+ */
+ chap_gen_challenge(conn, 1, aic_str, aic_len);
+
+ return chap;
+}
+
+static void chap_close(struct iscsi_conn *conn)
+{
+ kfree(conn->auth_protocol);
+ conn->auth_protocol = NULL;
+}
+
+static int chap_server_compute_md5(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ char *nr_in_ptr,
+ char *nr_out_ptr,
+ unsigned int *nr_out_len)
+{
+ char *endptr;
+ unsigned char id, digest[MD5_SIGNATURE_SIZE];
+ unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
+ unsigned char identifier[10], *challenge = NULL;
+ unsigned char *challenge_binhex = NULL;
+ unsigned char client_digest[MD5_SIGNATURE_SIZE];
+ unsigned char server_digest[MD5_SIGNATURE_SIZE];
+ unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct crypto_hash *tfm;
+ struct hash_desc desc;
+ struct scatterlist sg;
+ int auth_ret = -1, ret, challenge_len;
+
+ memset(identifier, 0, 10);
+ memset(chap_n, 0, MAX_CHAP_N_SIZE);
+ memset(chap_r, 0, MAX_RESPONSE_LENGTH);
+ memset(digest, 0, MD5_SIGNATURE_SIZE);
+ memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
+ memset(client_digest, 0, MD5_SIGNATURE_SIZE);
+ memset(server_digest, 0, MD5_SIGNATURE_SIZE);
+
+ challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!challenge) {
+ pr_err("Unable to allocate challenge buffer\n");
+ goto out;
+ }
+
+ challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!challenge_binhex) {
+ pr_err("Unable to allocate challenge_binhex buffer\n");
+ goto out;
+ }
+ /*
+ * Extract CHAP_N.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n,
+ &type) < 0) {
+ pr_err("Could not find CHAP_N.\n");
+ goto out;
+ }
+ if (type == HEX) {
+ pr_err("Could not find CHAP_N.\n");
+ goto out;
+ }
+
+ if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
+ pr_err("CHAP_N values do not match!\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_N=%s\n", chap_n);
+ /*
+ * Extract CHAP_R.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r,
+ &type) < 0) {
+ pr_err("Could not find CHAP_R.\n");
+ goto out;
+ }
+ if (type != HEX) {
+ pr_err("Could not find CHAP_R.\n");
+ goto out;
+ }
+
+ pr_debug("[server] Got CHAP_R=%s\n", chap_r);
+ chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
+
+ tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("Unable to allocate struct crypto_hash\n");
+ goto out;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ ret = crypto_hash_init(&desc);
+ if (ret < 0) {
+ pr_err("crypto_hash_init() failed\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)&chap->id, 1);
+ ret = crypto_hash_update(&desc, &sg, 1);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for id\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)&auth->password, strlen(auth->password));
+ ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for password\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH);
+ ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for challenge\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ ret = crypto_hash_final(&desc, server_digest);
+ if (ret < 0) {
+ pr_err("crypto_hash_final() failed for server digest\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+ crypto_free_hash(tfm);
+
+ chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+ pr_debug("[server] MD5 Server Digest: %s\n", response);
+
+ if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
+ pr_debug("[server] MD5 Digests do not match!\n\n");
+ goto out;
+ } else
+ pr_debug("[server] MD5 Digests match, CHAP connetication"
+ " successful.\n\n");
+ /*
+ * One way authentication has succeeded, return now if mutual
+ * authentication is not enabled.
+ */
+ if (!auth->authenticate_target) {
+ kfree(challenge);
+ kfree(challenge_binhex);
+ return 0;
+ }
+ /*
+ * Get CHAP_I.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
+ pr_err("Could not find CHAP_I.\n");
+ goto out;
+ }
+
+ if (type == HEX)
+ id = (unsigned char)simple_strtoul((char *)&identifier[2],
+ &endptr, 0);
+ else
+ id = (unsigned char)simple_strtoul(identifier, &endptr, 0);
+ /*
+ * RFC 1994 says Identifier is no more than octet (8 bits).
+ */
+ pr_debug("[server] Got CHAP_I=%d\n", id);
+ /*
+ * Get CHAP_C.
+ */
+ if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
+ challenge, &type) < 0) {
+ pr_err("Could not find CHAP_C.\n");
+ goto out;
+ }
+
+ if (type != HEX) {
+ pr_err("Could not find CHAP_C.\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_C=%s\n", challenge);
+ challenge_len = chap_string_to_hex(challenge_binhex, challenge,
+ strlen(challenge));
+ if (!challenge_len) {
+ pr_err("Unable to convert incoming challenge\n");
+ goto out;
+ }
+ /*
+ * Generate CHAP_N and CHAP_R for mutual authentication.
+ */
+ tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("Unable to allocate struct crypto_hash\n");
+ goto out;
+ }
+ desc.tfm = tfm;
+ desc.flags = 0;
+
+ ret = crypto_hash_init(&desc);
+ if (ret < 0) {
+ pr_err("crypto_hash_init() failed\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)&id, 1);
+ ret = crypto_hash_update(&desc, &sg, 1);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for id\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ sg_init_one(&sg, (void *)auth->password_mutual,
+ strlen(auth->password_mutual));
+ ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for"
+ " password_mutual\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+ /*
+ * Convert received challenge to binary hex.
+ */
+ sg_init_one(&sg, (void *)challenge_binhex, challenge_len);
+ ret = crypto_hash_update(&desc, &sg, challenge_len);
+ if (ret < 0) {
+ pr_err("crypto_hash_update() failed for ma challenge\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+
+ ret = crypto_hash_final(&desc, digest);
+ if (ret < 0) {
+ pr_err("crypto_hash_final() failed for ma digest\n");
+ crypto_free_hash(tfm);
+ goto out;
+ }
+ crypto_free_hash(tfm);
+ /*
+ * Generate CHAP_N and CHAP_R.
+ */
+ *nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual);
+ *nr_out_len += 1;
+ pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual);
+ /*
+ * Convert response from binary hex to ascii hext.
+ */
+ chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+ *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
+ response);
+ *nr_out_len += 1;
+ pr_debug("[server] Sending CHAP_R=0x%s\n", response);
+ auth_ret = 0;
+out:
+ kfree(challenge);
+ kfree(challenge_binhex);
+ return auth_ret;
+}
+
+static int chap_got_response(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ char *nr_in_ptr,
+ char *nr_out_ptr,
+ unsigned int *nr_out_len)
+{
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+
+ switch (chap->digest_type) {
+ case CHAP_DIGEST_MD5:
+ if (chap_server_compute_md5(conn, auth, nr_in_ptr,
+ nr_out_ptr, nr_out_len) < 0)
+ return -1;
+ return 0;
+ default:
+ pr_err("Unknown CHAP digest type %d!\n",
+ chap->digest_type);
+ return -1;
+ }
+}
+
+u32 chap_main_loop(
+ struct iscsi_conn *conn,
+ struct iscsi_node_auth *auth,
+ char *in_text,
+ char *out_text,
+ int *in_len,
+ int *out_len)
+{
+ struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+
+ if (!chap) {
+ chap = chap_server_open(conn, auth, in_text, out_text, out_len);
+ if (!chap)
+ return 2;
+ chap->chap_state = CHAP_STAGE_SERVER_AIC;
+ return 0;
+ } else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
+ convert_null_to_semi(in_text, *in_len);
+ if (chap_got_response(conn, auth, in_text, out_text,
+ out_len) < 0) {
+ chap_close(conn);
+ return 2;
+ }
+ if (auth->authenticate_target)
+ chap->chap_state = CHAP_STAGE_SERVER_NR;
+ else
+ *out_len = 0;
+ chap_close(conn);
+ return 1;
+ }
+
+ return 2;
+}
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
new file mode 100644
index 00000000000..2f463c09626
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -0,0 +1,31 @@
+#ifndef _ISCSI_CHAP_H_
+#define _ISCSI_CHAP_H_
+
+#define CHAP_DIGEST_MD5 5
+#define CHAP_DIGEST_SHA 6
+
+#define CHAP_CHALLENGE_LENGTH 16
+#define CHAP_CHALLENGE_STR_LEN 4096
+#define MAX_RESPONSE_LENGTH 64 /* sufficient for MD5 */
+#define MAX_CHAP_N_SIZE 512
+
+#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */
+
+#define CHAP_STAGE_CLIENT_A 1
+#define CHAP_STAGE_SERVER_AIC 2
+#define CHAP_STAGE_CLIENT_NR 3
+#define CHAP_STAGE_CLIENT_NRIC 4
+#define CHAP_STAGE_SERVER_NR 5
+
+extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
+ int *, int *);
+
+struct iscsi_chap {
+ unsigned char digest_type;
+ unsigned char id;
+ unsigned char challenge[CHAP_CHALLENGE_LENGTH];
+ unsigned int authenticate_target;
+ unsigned int chap_state;
+} ____cacheline_aligned;
+
+#endif /*** _ISCSI_CHAP_H_ ***/
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
new file mode 100644
index 00000000000..f095e65b1cc
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -0,0 +1,1882 @@
+/*******************************************************************************
+ * This file contains the configfs implementation for iSCSI Target mode
+ * from the LIO-Target Project.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/configfs.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_fabric_lib.h>
+#include <target/target_core_device.h>
+#include <target/target_core_tpg.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_nodeattrib.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_stat.h"
+#include "iscsi_target_configfs.h"
+
+struct target_fabric_configfs *lio_target_fabric_configfs;
+
+struct lio_target_configfs_attribute {
+ struct configfs_attribute attr;
+ ssize_t (*show)(void *, char *);
+ ssize_t (*store)(void *, const char *, size_t);
+};
+
+struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
+ struct config_item *item,
+ struct iscsi_tiqn **tiqn_out)
+{
+ struct se_portal_group *se_tpg = container_of(to_config_group(item),
+ struct se_portal_group, tpg_group);
+ struct iscsi_portal_group *tpg =
+ (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
+ int ret;
+
+ if (!tpg) {
+ pr_err("Unable to locate struct iscsi_portal_group "
+ "pointer\n");
+ return NULL;
+ }
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return NULL;
+
+ *tiqn_out = tpg->tpg_tiqn;
+ return tpg;
+}
+
+/* Start items for lio_target_portal_cit */
+
+static ssize_t lio_target_np_show_sctp(
+ struct se_tpg_np *se_tpg_np,
+ char *page)
+{
+ struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+ struct iscsi_tpg_np, se_tpg_np);
+ struct iscsi_tpg_np *tpg_np_sctp;
+ ssize_t rb;
+
+ tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
+ if (tpg_np_sctp)
+ rb = sprintf(page, "1\n");
+ else
+ rb = sprintf(page, "0\n");
+
+ return rb;
+}
+
+static ssize_t lio_target_np_store_sctp(
+ struct se_tpg_np *se_tpg_np,
+ const char *page,
+ size_t count)
+{
+ struct iscsi_np *np;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+ struct iscsi_tpg_np, se_tpg_np);
+ struct iscsi_tpg_np *tpg_np_sctp = NULL;
+ char *endptr;
+ u32 op;
+ int ret;
+
+ op = simple_strtoul(page, &endptr, 0);
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %u\n", op);
+ return -EINVAL;
+ }
+ np = tpg_np->tpg_np;
+ if (!np) {
+ pr_err("Unable to locate struct iscsi_np from"
+ " struct iscsi_tpg_np\n");
+ return -EINVAL;
+ }
+
+ tpg = tpg_np->tpg;
+ if (iscsit_get_tpg(tpg) < 0)
+ return -EINVAL;
+
+ if (op) {
+ /*
+ * Use existing np->np_sockaddr for SCTP network portal reference
+ */
+ tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
+ np->np_ip, tpg_np, ISCSI_SCTP_TCP);
+ if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
+ goto out;
+ } else {
+ tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
+ if (!tpg_np_sctp)
+ goto out;
+
+ ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
+ if (ret < 0)
+ goto out;
+ }
+
+ iscsit_put_tpg(tpg);
+ return count;
+out:
+ iscsit_put_tpg(tpg);
+ return -EINVAL;
+}
+
+TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_portal_attrs[] = {
+ &lio_target_np_sctp.attr,
+ NULL,
+};
+
+/* Stop items for lio_target_portal_cit */
+
+/* Start items for lio_target_np_cit */
+
+#define MAX_PORTAL_LEN 256
+
+struct se_tpg_np *lio_target_call_addnptotpg(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
+ char *str, *str2, *ip_str, *port_str;
+ struct __kernel_sockaddr_storage sockaddr;
+ struct sockaddr_in *sock_in;
+ struct sockaddr_in6 *sock_in6;
+ unsigned long port;
+ int ret;
+ char buf[MAX_PORTAL_LEN + 1];
+
+ if (strlen(name) > MAX_PORTAL_LEN) {
+ pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
+ (int)strlen(name), MAX_PORTAL_LEN);
+ return ERR_PTR(-EOVERFLOW);
+ }
+ memset(buf, 0, MAX_PORTAL_LEN + 1);
+ snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
+
+ memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
+
+ str = strstr(buf, "[");
+ if (str) {
+ const char *end;
+
+ str2 = strstr(str, "]");
+ if (!str2) {
+ pr_err("Unable to locate trailing \"]\""
+ " in IPv6 iSCSI network portal address\n");
+ return ERR_PTR(-EINVAL);
+ }
+ str++; /* Skip over leading "[" */
+ *str2 = '\0'; /* Terminate the IPv6 address */
+ str2++; /* Skip over the "]" */
+ port_str = strstr(str2, ":");
+ if (!port_str) {
+ pr_err("Unable to locate \":port\""
+ " in IPv6 iSCSI network portal address\n");
+ return ERR_PTR(-EINVAL);
+ }
+ *port_str = '\0'; /* Terminate string for IP */
+ port_str++; /* Skip over ":" */
+
+ ret = strict_strtoul(port_str, 0, &port);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ sock_in6 = (struct sockaddr_in6 *)&sockaddr;
+ sock_in6->sin6_family = AF_INET6;
+ sock_in6->sin6_port = htons((unsigned short)port);
+ ret = in6_pton(str, IPV6_ADDRESS_SPACE,
+ (void *)&sock_in6->sin6_addr.in6_u, -1, &end);
+ if (ret <= 0) {
+ pr_err("in6_pton returned: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ str = ip_str = &buf[0];
+ port_str = strstr(ip_str, ":");
+ if (!port_str) {
+ pr_err("Unable to locate \":port\""
+ " in IPv4 iSCSI network portal address\n");
+ return ERR_PTR(-EINVAL);
+ }
+ *port_str = '\0'; /* Terminate string for IP */
+ port_str++; /* Skip over ":" */
+
+ ret = strict_strtoul(port_str, 0, &port);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed for port_str: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ sock_in = (struct sockaddr_in *)&sockaddr;
+ sock_in->sin_family = AF_INET;
+ sock_in->sin_port = htons((unsigned short)port);
+ sock_in->sin_addr.s_addr = in_aton(ip_str);
+ }
+ tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return ERR_PTR(-EINVAL);
+
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s TPGT: %hu"
+ " PORTAL: %s\n",
+ config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+ tpg->tpgt, name);
+ /*
+ * Assume ISCSI_TCP by default. Other network portals for other
+ * iSCSI fabrics:
+ *
+ * Traditional iSCSI over SCTP (initial support)
+ * iSER/TCP (TODO, hardware available)
+ * iSER/SCTP (TODO, software emulation with osc-iwarp)
+ * iSER/IB (TODO, hardware available)
+ *
+ * can be enabled with atributes under
+ * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
+ *
+ */
+ tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
+ ISCSI_TCP);
+ if (IS_ERR(tpg_np)) {
+ iscsit_put_tpg(tpg);
+ return ERR_PTR(PTR_ERR(tpg_np));
+ }
+ pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
+
+ iscsit_put_tpg(tpg);
+ return &tpg_np->se_tpg_np;
+}
+
+static void lio_target_call_delnpfromtpg(
+ struct se_tpg_np *se_tpg_np)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np;
+ struct se_portal_group *se_tpg;
+ int ret;
+
+ tpg_np = container_of(se_tpg_np, struct iscsi_tpg_np, se_tpg_np);
+ tpg = tpg_np->tpg;
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return;
+
+ se_tpg = &tpg->tpg_se_tpg;
+ pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
+ " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+ tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
+
+ ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
+ if (ret < 0)
+ goto out;
+
+ pr_debug("LIO_Target_ConfigFS: delnpfromtpg done!\n");
+out:
+ iscsit_put_tpg(tpg);
+}
+
+/* End items for lio_target_np_cit */
+
+/* Start items for lio_target_nacl_attrib_cit */
+
+#define DEF_NACL_ATTRIB(name) \
+static ssize_t iscsi_nacl_attrib_show_##name( \
+ struct se_node_acl *se_nacl, \
+ char *page) \
+{ \
+ struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
+ se_node_acl); \
+ \
+ return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
+} \
+ \
+static ssize_t iscsi_nacl_attrib_store_##name( \
+ struct se_node_acl *se_nacl, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
+ se_node_acl); \
+ char *endptr; \
+ u32 val; \
+ int ret; \
+ \
+ val = simple_strtoul(page, &endptr, 0); \
+ ret = iscsit_na_##name(nacl, val); \
+ if (ret < 0) \
+ return ret; \
+ \
+ return count; \
+}
+
+#define NACL_ATTR(_name, _mode) TF_NACL_ATTRIB_ATTR(iscsi, _name, _mode);
+/*
+ * Define iscsi_node_attrib_s_dataout_timeout
+ */
+DEF_NACL_ATTRIB(dataout_timeout);
+NACL_ATTR(dataout_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_dataout_timeout_retries
+ */
+DEF_NACL_ATTRIB(dataout_timeout_retries);
+NACL_ATTR(dataout_timeout_retries, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_default_erl
+ */
+DEF_NACL_ATTRIB(default_erl);
+NACL_ATTR(default_erl, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_nopin_timeout
+ */
+DEF_NACL_ATTRIB(nopin_timeout);
+NACL_ATTR(nopin_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_nopin_response_timeout
+ */
+DEF_NACL_ATTRIB(nopin_response_timeout);
+NACL_ATTR(nopin_response_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_datain_pdu_offsets
+ */
+DEF_NACL_ATTRIB(random_datain_pdu_offsets);
+NACL_ATTR(random_datain_pdu_offsets, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_datain_seq_offsets
+ */
+DEF_NACL_ATTRIB(random_datain_seq_offsets);
+NACL_ATTR(random_datain_seq_offsets, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_node_attrib_s_random_r2t_offsets
+ */
+DEF_NACL_ATTRIB(random_r2t_offsets);
+NACL_ATTR(random_r2t_offsets, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
+ &iscsi_nacl_attrib_dataout_timeout.attr,
+ &iscsi_nacl_attrib_dataout_timeout_retries.attr,
+ &iscsi_nacl_attrib_default_erl.attr,
+ &iscsi_nacl_attrib_nopin_timeout.attr,
+ &iscsi_nacl_attrib_nopin_response_timeout.attr,
+ &iscsi_nacl_attrib_random_datain_pdu_offsets.attr,
+ &iscsi_nacl_attrib_random_datain_seq_offsets.attr,
+ &iscsi_nacl_attrib_random_r2t_offsets.attr,
+ NULL,
+};
+
+/* End items for lio_target_nacl_attrib_cit */
+
+/* Start items for lio_target_nacl_auth_cit */
+
+#define __DEF_NACL_AUTH_STR(prefix, name, flags) \
+static ssize_t __iscsi_##prefix##_show_##name( \
+ struct iscsi_node_acl *nacl, \
+ char *page) \
+{ \
+ struct iscsi_node_auth *auth = &nacl->node_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
+} \
+ \
+static ssize_t __iscsi_##prefix##_store_##name( \
+ struct iscsi_node_acl *nacl, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_node_auth *auth = &nacl->node_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ snprintf(auth->name, PAGE_SIZE, "%s", page); \
+ if (!strncmp("NULL", auth->name, 4)) \
+ auth->naf_flags &= ~flags; \
+ else \
+ auth->naf_flags |= flags; \
+ \
+ if ((auth->naf_flags & NAF_USERID_IN_SET) && \
+ (auth->naf_flags & NAF_PASSWORD_IN_SET)) \
+ auth->authenticate_target = 1; \
+ else \
+ auth->authenticate_target = 0; \
+ \
+ return count; \
+}
+
+#define __DEF_NACL_AUTH_INT(prefix, name) \
+static ssize_t __iscsi_##prefix##_show_##name( \
+ struct iscsi_node_acl *nacl, \
+ char *page) \
+{ \
+ struct iscsi_node_auth *auth = &nacl->node_auth; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
+}
+
+#define DEF_NACL_AUTH_STR(name, flags) \
+ __DEF_NACL_AUTH_STR(nacl_auth, name, flags) \
+static ssize_t iscsi_nacl_auth_show_##name( \
+ struct se_node_acl *nacl, \
+ char *page) \
+{ \
+ return __iscsi_nacl_auth_show_##name(container_of(nacl, \
+ struct iscsi_node_acl, se_node_acl), page); \
+} \
+static ssize_t iscsi_nacl_auth_store_##name( \
+ struct se_node_acl *nacl, \
+ const char *page, \
+ size_t count) \
+{ \
+ return __iscsi_nacl_auth_store_##name(container_of(nacl, \
+ struct iscsi_node_acl, se_node_acl), page, count); \
+}
+
+#define DEF_NACL_AUTH_INT(name) \
+ __DEF_NACL_AUTH_INT(nacl_auth, name) \
+static ssize_t iscsi_nacl_auth_show_##name( \
+ struct se_node_acl *nacl, \
+ char *page) \
+{ \
+ return __iscsi_nacl_auth_show_##name(container_of(nacl, \
+ struct iscsi_node_acl, se_node_acl), page); \
+}
+
+#define AUTH_ATTR(_name, _mode) TF_NACL_AUTH_ATTR(iscsi, _name, _mode);
+#define AUTH_ATTR_RO(_name) TF_NACL_AUTH_ATTR_RO(iscsi, _name);
+
+/*
+ * One-way authentication userid
+ */
+DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
+AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
+/*
+ * One-way authentication password
+ */
+DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
+AUTH_ATTR(password, S_IRUGO | S_IWUSR);
+/*
+ * Enforce mutual authentication
+ */
+DEF_NACL_AUTH_INT(authenticate_target);
+AUTH_ATTR_RO(authenticate_target);
+/*
+ * Mutual authentication userid
+ */
+DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
+/*
+ * Mutual authentication password
+ */
+DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
+ &iscsi_nacl_auth_userid.attr,
+ &iscsi_nacl_auth_password.attr,
+ &iscsi_nacl_auth_authenticate_target.attr,
+ &iscsi_nacl_auth_userid_mutual.attr,
+ &iscsi_nacl_auth_password_mutual.attr,
+ NULL,
+};
+
+/* End items for lio_target_nacl_auth_cit */
+
+/* Start items for lio_target_nacl_param_cit */
+
+#define DEF_NACL_PARAM(name) \
+static ssize_t iscsi_nacl_param_show_##name( \
+ struct se_node_acl *se_nacl, \
+ char *page) \
+{ \
+ struct iscsi_session *sess; \
+ struct se_session *se_sess; \
+ ssize_t rb; \
+ \
+ spin_lock_bh(&se_nacl->nacl_sess_lock); \
+ se_sess = se_nacl->nacl_sess; \
+ if (!se_sess) { \
+ rb = snprintf(page, PAGE_SIZE, \
+ "No Active iSCSI Session\n"); \
+ } else { \
+ sess = se_sess->fabric_sess_ptr; \
+ rb = snprintf(page, PAGE_SIZE, "%u\n", \
+ (u32)sess->sess_ops->name); \
+ } \
+ spin_unlock_bh(&se_nacl->nacl_sess_lock); \
+ \
+ return rb; \
+}
+
+#define NACL_PARAM_ATTR(_name) TF_NACL_PARAM_ATTR_RO(iscsi, _name);
+
+DEF_NACL_PARAM(MaxConnections);
+NACL_PARAM_ATTR(MaxConnections);
+
+DEF_NACL_PARAM(InitialR2T);
+NACL_PARAM_ATTR(InitialR2T);
+
+DEF_NACL_PARAM(ImmediateData);
+NACL_PARAM_ATTR(ImmediateData);
+
+DEF_NACL_PARAM(MaxBurstLength);
+NACL_PARAM_ATTR(MaxBurstLength);
+
+DEF_NACL_PARAM(FirstBurstLength);
+NACL_PARAM_ATTR(FirstBurstLength);
+
+DEF_NACL_PARAM(DefaultTime2Wait);
+NACL_PARAM_ATTR(DefaultTime2Wait);
+
+DEF_NACL_PARAM(DefaultTime2Retain);
+NACL_PARAM_ATTR(DefaultTime2Retain);
+
+DEF_NACL_PARAM(MaxOutstandingR2T);
+NACL_PARAM_ATTR(MaxOutstandingR2T);
+
+DEF_NACL_PARAM(DataPDUInOrder);
+NACL_PARAM_ATTR(DataPDUInOrder);
+
+DEF_NACL_PARAM(DataSequenceInOrder);
+NACL_PARAM_ATTR(DataSequenceInOrder);
+
+DEF_NACL_PARAM(ErrorRecoveryLevel);
+NACL_PARAM_ATTR(ErrorRecoveryLevel);
+
+static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
+ &iscsi_nacl_param_MaxConnections.attr,
+ &iscsi_nacl_param_InitialR2T.attr,
+ &iscsi_nacl_param_ImmediateData.attr,
+ &iscsi_nacl_param_MaxBurstLength.attr,
+ &iscsi_nacl_param_FirstBurstLength.attr,
+ &iscsi_nacl_param_DefaultTime2Wait.attr,
+ &iscsi_nacl_param_DefaultTime2Retain.attr,
+ &iscsi_nacl_param_MaxOutstandingR2T.attr,
+ &iscsi_nacl_param_DataPDUInOrder.attr,
+ &iscsi_nacl_param_DataSequenceInOrder.attr,
+ &iscsi_nacl_param_ErrorRecoveryLevel.attr,
+ NULL,
+};
+
+/* End items for lio_target_nacl_param_cit */
+
+/* Start items for lio_target_acl_cit */
+
+static ssize_t lio_target_nacl_show_info(
+ struct se_node_acl *se_nacl,
+ char *page)
+{
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ struct se_session *se_sess;
+ ssize_t rb = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (!se_sess) {
+ rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
+ " Endpoint: %s\n", se_nacl->initiatorname);
+ } else {
+ sess = se_sess->fabric_sess_ptr;
+
+ if (sess->sess_ops->InitiatorName)
+ rb += sprintf(page+rb, "InitiatorName: %s\n",
+ sess->sess_ops->InitiatorName);
+ if (sess->sess_ops->InitiatorAlias)
+ rb += sprintf(page+rb, "InitiatorAlias: %s\n",
+ sess->sess_ops->InitiatorAlias);
+
+ rb += sprintf(page+rb, "LIO Session ID: %u "
+ "ISID: 0x%02x %02x %02x %02x %02x %02x "
+ "TSIH: %hu ", sess->sid,
+ sess->isid[0], sess->isid[1], sess->isid[2],
+ sess->isid[3], sess->isid[4], sess->isid[5],
+ sess->tsih);
+ rb += sprintf(page+rb, "SessionType: %s\n",
+ (sess->sess_ops->SessionType) ?
+ "Discovery" : "Normal");
+ rb += sprintf(page+rb, "Session State: ");
+ switch (sess->session_state) {
+ case TARG_SESS_STATE_FREE:
+ rb += sprintf(page+rb, "TARG_SESS_FREE\n");
+ break;
+ case TARG_SESS_STATE_ACTIVE:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
+ break;
+ case TARG_SESS_STATE_LOGGED_IN:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
+ break;
+ case TARG_SESS_STATE_FAILED:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
+ break;
+ case TARG_SESS_STATE_IN_CONTINUE:
+ rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
+ break;
+ default:
+ rb += sprintf(page+rb, "ERROR: Unknown Session"
+ " State!\n");
+ break;
+ }
+
+ rb += sprintf(page+rb, "---------------------[iSCSI Session"
+ " Values]-----------------------\n");
+ rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
+ " : MaxCmdSN : ITT : TTT\n");
+ rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
+ " 0x%08x 0x%08x\n",
+ sess->cmdsn_window,
+ (sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
+ sess->exp_cmd_sn, sess->max_cmd_sn,
+ sess->init_task_tag, sess->targ_xfer_tag);
+ rb += sprintf(page+rb, "----------------------[iSCSI"
+ " Connections]-------------------------\n");
+
+ spin_lock(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ rb += sprintf(page+rb, "CID: %hu Connection"
+ " State: ", conn->cid);
+ switch (conn->conn_state) {
+ case TARG_CONN_STATE_FREE:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_FREE\n");
+ break;
+ case TARG_CONN_STATE_XPT_UP:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_XPT_UP\n");
+ break;
+ case TARG_CONN_STATE_IN_LOGIN:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_IN_LOGIN\n");
+ break;
+ case TARG_CONN_STATE_LOGGED_IN:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_LOGGED_IN\n");
+ break;
+ case TARG_CONN_STATE_IN_LOGOUT:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_IN_LOGOUT\n");
+ break;
+ case TARG_CONN_STATE_LOGOUT_REQUESTED:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_LOGOUT_REQUESTED\n");
+ break;
+ case TARG_CONN_STATE_CLEANUP_WAIT:
+ rb += sprintf(page+rb,
+ "TARG_CONN_STATE_CLEANUP_WAIT\n");
+ break;
+ default:
+ rb += sprintf(page+rb,
+ "ERROR: Unknown Connection State!\n");
+ break;
+ }
+
+ rb += sprintf(page+rb, " Address %s %s", conn->login_ip,
+ (conn->network_transport == ISCSI_TCP) ?
+ "TCP" : "SCTP");
+ rb += sprintf(page+rb, " StatSN: 0x%08x\n",
+ conn->stat_sn);
+ }
+ spin_unlock(&sess->conn_lock);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return rb;
+}
+
+TF_NACL_BASE_ATTR_RO(lio_target, info);
+
+static ssize_t lio_target_nacl_show_cmdsn_depth(
+ struct se_node_acl *se_nacl,
+ char *page)
+{
+ return sprintf(page, "%u\n", se_nacl->queue_depth);
+}
+
+static ssize_t lio_target_nacl_store_cmdsn_depth(
+ struct se_node_acl *se_nacl,
+ const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ struct config_item *acl_ci, *tpg_ci, *wwn_ci;
+ char *endptr;
+ u32 cmdsn_depth = 0;
+ int ret;
+
+ cmdsn_depth = simple_strtoul(page, &endptr, 0);
+ if (cmdsn_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
+ pr_err("Passed cmdsn_depth: %u exceeds"
+ " TA_DEFAULT_CMDSN_DEPTH_MAX: %u\n", cmdsn_depth,
+ TA_DEFAULT_CMDSN_DEPTH_MAX);
+ return -EINVAL;
+ }
+ acl_ci = &se_nacl->acl_group.cg_item;
+ if (!acl_ci) {
+ pr_err("Unable to locatel acl_ci\n");
+ return -EINVAL;
+ }
+ tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
+ if (!tpg_ci) {
+ pr_err("Unable to locate tpg_ci\n");
+ return -EINVAL;
+ }
+ wwn_ci = &tpg_ci->ci_group->cg_item;
+ if (!wwn_ci) {
+ pr_err("Unable to locate config_item wwn_ci\n");
+ return -EINVAL;
+ }
+
+ if (iscsit_get_tpg(tpg) < 0)
+ return -EINVAL;
+ /*
+ * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
+ */
+ ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
+ config_item_name(acl_ci), cmdsn_depth, 1);
+
+ pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
+ "InitiatorName: %s\n", config_item_name(wwn_ci),
+ config_item_name(tpg_ci), cmdsn_depth,
+ config_item_name(acl_ci));
+
+ iscsit_put_tpg(tpg);
+ return (!ret) ? count : (ssize_t)ret;
+}
+
+TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_initiator_attrs[] = {
+ &lio_target_nacl_info.attr,
+ &lio_target_nacl_cmdsn_depth.attr,
+ NULL,
+};
+
+static struct se_node_acl *lio_tpg_alloc_fabric_acl(
+ struct se_portal_group *se_tpg)
+{
+ struct iscsi_node_acl *acl;
+
+ acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL);
+ if (!acl) {
+ pr_err("Unable to allocate memory for struct iscsi_node_acl\n");
+ return NULL;
+ }
+
+ return &acl->se_node_acl;
+}
+
+static struct se_node_acl *lio_target_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct config_group *stats_cg;
+ struct iscsi_node_acl *acl;
+ struct se_node_acl *se_nacl_new, *se_nacl;
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ u32 cmdsn_depth;
+
+ se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg);
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+
+ acl = container_of(se_nacl_new, struct iscsi_node_acl,
+ se_node_acl);
+
+ cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NdoeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, cmdsn_depth);
+ if (IS_ERR(se_nacl))
+ return se_nacl;
+
+ stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
+
+ stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!stats_cg->default_groups) {
+ pr_err("Unable to allocate memory for"
+ " stats_cg->default_groups\n");
+ core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
+ kfree(acl);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
+ stats_cg->default_groups[1] = NULL;
+ config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
+ "iscsi_sess_stats", &iscsi_stat_sess_cit);
+
+ return se_nacl;
+}
+
+static void lio_target_drop_nodeacl(
+ struct se_node_acl *se_nacl)
+{
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct iscsi_node_acl *acl = container_of(se_nacl,
+ struct iscsi_node_acl, se_node_acl);
+ struct config_item *df_item;
+ struct config_group *stats_cg;
+ int i;
+
+ stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
+ for (i = 0; stats_cg->default_groups[i]; i++) {
+ df_item = &stats_cg->default_groups[i]->cg_item;
+ stats_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(stats_cg->default_groups);
+
+ core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
+ kfree(acl);
+}
+
+/* End items for lio_target_acl_cit */
+
+/* Start items for lio_target_tpg_attrib_cit */
+
+#define DEF_TPG_ATTRIB(name) \
+ \
+static ssize_t iscsi_tpg_attrib_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ ssize_t rb; \
+ \
+ if (iscsit_get_tpg(tpg) < 0) \
+ return -EINVAL; \
+ \
+ rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
+ iscsit_put_tpg(tpg); \
+ return rb; \
+} \
+ \
+static ssize_t iscsi_tpg_attrib_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ char *endptr; \
+ u32 val; \
+ int ret; \
+ \
+ if (iscsit_get_tpg(tpg) < 0) \
+ return -EINVAL; \
+ \
+ val = simple_strtoul(page, &endptr, 0); \
+ ret = iscsit_ta_##name(tpg, val); \
+ if (ret < 0) \
+ goto out; \
+ \
+ iscsit_put_tpg(tpg); \
+ return count; \
+out: \
+ iscsit_put_tpg(tpg); \
+ return ret; \
+}
+
+#define TPG_ATTR(_name, _mode) TF_TPG_ATTRIB_ATTR(iscsi, _name, _mode);
+
+/*
+ * Define iscsi_tpg_attrib_s_authentication
+ */
+DEF_TPG_ATTRIB(authentication);
+TPG_ATTR(authentication, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_login_timeout
+ */
+DEF_TPG_ATTRIB(login_timeout);
+TPG_ATTR(login_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_netif_timeout
+ */
+DEF_TPG_ATTRIB(netif_timeout);
+TPG_ATTR(netif_timeout, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_generate_node_acls
+ */
+DEF_TPG_ATTRIB(generate_node_acls);
+TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_default_cmdsn_depth
+ */
+DEF_TPG_ATTRIB(default_cmdsn_depth);
+TPG_ATTR(default_cmdsn_depth, S_IRUGO | S_IWUSR);
+/*
+ Define iscsi_tpg_attrib_s_cache_dynamic_acls
+ */
+DEF_TPG_ATTRIB(cache_dynamic_acls);
+TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_TPG_ATTRIB(demo_mode_write_protect);
+TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_TPG_ATTRIB(prod_mode_write_protect);
+TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
+ &iscsi_tpg_attrib_authentication.attr,
+ &iscsi_tpg_attrib_login_timeout.attr,
+ &iscsi_tpg_attrib_netif_timeout.attr,
+ &iscsi_tpg_attrib_generate_node_acls.attr,
+ &iscsi_tpg_attrib_default_cmdsn_depth.attr,
+ &iscsi_tpg_attrib_cache_dynamic_acls.attr,
+ &iscsi_tpg_attrib_demo_mode_write_protect.attr,
+ &iscsi_tpg_attrib_prod_mode_write_protect.attr,
+ NULL,
+};
+
+/* End items for lio_target_tpg_attrib_cit */
+
+/* Start items for lio_target_tpg_param_cit */
+
+#define DEF_TPG_PARAM(name) \
+static ssize_t iscsi_tpg_param_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_param *param; \
+ ssize_t rb; \
+ \
+ if (iscsit_get_tpg(tpg) < 0) \
+ return -EINVAL; \
+ \
+ param = iscsi_find_param_from_key(__stringify(name), \
+ tpg->param_list); \
+ if (!param) { \
+ iscsit_put_tpg(tpg); \
+ return -EINVAL; \
+ } \
+ rb = snprintf(page, PAGE_SIZE, "%s\n", param->value); \
+ \
+ iscsit_put_tpg(tpg); \
+ return rb; \
+} \
+static ssize_t iscsi_tpg_param_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct iscsi_portal_group *tpg = container_of(se_tpg, \
+ struct iscsi_portal_group, tpg_se_tpg); \
+ char *buf; \
+ int ret; \
+ \
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL); \
+ if (!buf) \
+ return -ENOMEM; \
+ snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page); \
+ buf[strlen(buf)-1] = '\0'; /* Kill newline */ \
+ \
+ if (iscsit_get_tpg(tpg) < 0) { \
+ kfree(buf); \
+ return -EINVAL; \
+ } \
+ \
+ ret = iscsi_change_param_value(buf, tpg->param_list, 1); \
+ if (ret < 0) \
+ goto out; \
+ \
+ kfree(buf); \
+ iscsit_put_tpg(tpg); \
+ return count; \
+out: \
+ kfree(buf); \
+ iscsit_put_tpg(tpg); \
+ return -EINVAL; \
+}
+
+#define TPG_PARAM_ATTR(_name, _mode) TF_TPG_PARAM_ATTR(iscsi, _name, _mode);
+
+DEF_TPG_PARAM(AuthMethod);
+TPG_PARAM_ATTR(AuthMethod, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(HeaderDigest);
+TPG_PARAM_ATTR(HeaderDigest, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataDigest);
+TPG_PARAM_ATTR(DataDigest, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxConnections);
+TPG_PARAM_ATTR(MaxConnections, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(TargetAlias);
+TPG_PARAM_ATTR(TargetAlias, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(InitialR2T);
+TPG_PARAM_ATTR(InitialR2T, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(ImmediateData);
+TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxRecvDataSegmentLength);
+TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxBurstLength);
+TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(FirstBurstLength);
+TPG_PARAM_ATTR(FirstBurstLength, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DefaultTime2Wait);
+TPG_PARAM_ATTR(DefaultTime2Wait, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DefaultTime2Retain);
+TPG_PARAM_ATTR(DefaultTime2Retain, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(MaxOutstandingR2T);
+TPG_PARAM_ATTR(MaxOutstandingR2T, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataPDUInOrder);
+TPG_PARAM_ATTR(DataPDUInOrder, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(DataSequenceInOrder);
+TPG_PARAM_ATTR(DataSequenceInOrder, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(ErrorRecoveryLevel);
+TPG_PARAM_ATTR(ErrorRecoveryLevel, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(IFMarker);
+TPG_PARAM_ATTR(IFMarker, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(OFMarker);
+TPG_PARAM_ATTR(OFMarker, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(IFMarkInt);
+TPG_PARAM_ATTR(IFMarkInt, S_IRUGO | S_IWUSR);
+
+DEF_TPG_PARAM(OFMarkInt);
+TPG_PARAM_ATTR(OFMarkInt, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
+ &iscsi_tpg_param_AuthMethod.attr,
+ &iscsi_tpg_param_HeaderDigest.attr,
+ &iscsi_tpg_param_DataDigest.attr,
+ &iscsi_tpg_param_MaxConnections.attr,
+ &iscsi_tpg_param_TargetAlias.attr,
+ &iscsi_tpg_param_InitialR2T.attr,
+ &iscsi_tpg_param_ImmediateData.attr,
+ &iscsi_tpg_param_MaxRecvDataSegmentLength.attr,
+ &iscsi_tpg_param_MaxBurstLength.attr,
+ &iscsi_tpg_param_FirstBurstLength.attr,
+ &iscsi_tpg_param_DefaultTime2Wait.attr,
+ &iscsi_tpg_param_DefaultTime2Retain.attr,
+ &iscsi_tpg_param_MaxOutstandingR2T.attr,
+ &iscsi_tpg_param_DataPDUInOrder.attr,
+ &iscsi_tpg_param_DataSequenceInOrder.attr,
+ &iscsi_tpg_param_ErrorRecoveryLevel.attr,
+ &iscsi_tpg_param_IFMarker.attr,
+ &iscsi_tpg_param_OFMarker.attr,
+ &iscsi_tpg_param_IFMarkInt.attr,
+ &iscsi_tpg_param_OFMarkInt.attr,
+ NULL,
+};
+
+/* End items for lio_target_tpg_param_cit */
+
+/* Start items for lio_target_tpg_cit */
+
+static ssize_t lio_target_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ ssize_t len;
+
+ spin_lock(&tpg->tpg_state_lock);
+ len = sprintf(page, "%d\n",
+ (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0);
+ spin_unlock(&tpg->tpg_state_lock);
+
+ return len;
+}
+
+static ssize_t lio_target_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct iscsi_portal_group *tpg = container_of(se_tpg,
+ struct iscsi_portal_group, tpg_se_tpg);
+ char *endptr;
+ u32 op;
+ int ret = 0;
+
+ op = simple_strtoul(page, &endptr, 0);
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %u\n", op);
+ return -EINVAL;
+ }
+
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (op) {
+ ret = iscsit_tpg_enable_portal_group(tpg);
+ if (ret < 0)
+ goto out;
+ } else {
+ /*
+ * iscsit_tpg_disable_portal_group() assumes force=1
+ */
+ ret = iscsit_tpg_disable_portal_group(tpg, 1);
+ if (ret < 0)
+ goto out;
+ }
+
+ iscsit_put_tpg(tpg);
+ return count;
+out:
+ iscsit_put_tpg(tpg);
+ return -EINVAL;
+}
+
+TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_tpg_attrs[] = {
+ &lio_target_tpg_enable.attr,
+ NULL,
+};
+
+/* End items for lio_target_tpg_cit */
+
+/* Start items for lio_target_tiqn_cit */
+
+struct se_portal_group *lio_target_tiqn_addtpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tiqn *tiqn;
+ char *tpgt_str, *end_ptr;
+ int ret = 0;
+ unsigned short int tpgt;
+
+ tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+ /*
+ * Only tpgt_# directory groups can be created below
+ * target/iscsi/iqn.superturodiskarry/
+ */
+ tpgt_str = strstr(name, "tpgt_");
+ if (!tpgt_str) {
+ pr_err("Unable to locate \"tpgt_#\" directory"
+ " group\n");
+ return NULL;
+ }
+ tpgt_str += 5; /* Skip ahead of "tpgt_" */
+ tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+
+ tpg = iscsit_alloc_portal_group(tiqn, tpgt);
+ if (!tpg)
+ return NULL;
+
+ ret = core_tpg_register(
+ &lio_target_fabric_configfs->tf_ops,
+ wwn, &tpg->tpg_se_tpg, (void *)tpg,
+ TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0)
+ return NULL;
+
+ ret = iscsit_tpg_add_portal_group(tiqn, tpg);
+ if (ret != 0)
+ goto out;
+
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated TPG: %s\n",
+ name);
+ return &tpg->tpg_se_tpg;
+out:
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+ kfree(tpg);
+ return NULL;
+}
+
+void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tiqn *tiqn;
+
+ tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ tiqn = tpg->tpg_tiqn;
+ /*
+ * iscsit_tpg_del_portal_group() assumes force=1
+ */
+ pr_debug("LIO_Target_ConfigFS: DEREGISTER -> Releasing TPG\n");
+ iscsit_tpg_del_portal_group(tiqn, tpg, 1);
+}
+
+/* End items for lio_target_tiqn_cit */
+
+/* Start LIO-Target TIQN struct contig_item lio_target_cit */
+
+static ssize_t lio_target_wwn_show_attr_lio_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n");
+}
+
+TF_WWN_ATTR_RO(lio_target, lio_version);
+
+static struct configfs_attribute *lio_target_wwn_attrs[] = {
+ &lio_target_wwn_lio_version.attr,
+ NULL,
+};
+
+struct se_wwn *lio_target_call_coreaddtiqn(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct config_group *stats_cg;
+ struct iscsi_tiqn *tiqn;
+
+ tiqn = iscsit_add_tiqn((unsigned char *)name);
+ if (IS_ERR(tiqn))
+ return ERR_PTR(PTR_ERR(tiqn));
+ /*
+ * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
+ */
+ stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
+
+ stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+ GFP_KERNEL);
+ if (!stats_cg->default_groups) {
+ pr_err("Unable to allocate memory for"
+ " stats_cg->default_groups\n");
+ iscsit_del_tiqn(tiqn);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
+ stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
+ stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
+ stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
+ stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
+ stats_cg->default_groups[5] = NULL;
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
+ "iscsi_instance", &iscsi_stat_instance_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
+ "iscsi_sess_err", &iscsi_stat_sess_err_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
+ "iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
+ "iscsi_login_stats", &iscsi_stat_login_cit);
+ config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
+ "iscsi_logout_stats", &iscsi_stat_logout_cit);
+
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
+ " %s\n", name);
+ return &tiqn->tiqn_wwn;
+}
+
+void lio_target_call_coredeltiqn(
+ struct se_wwn *wwn)
+{
+ struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+ struct config_item *df_item;
+ struct config_group *stats_cg;
+ int i;
+
+ stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
+ for (i = 0; stats_cg->default_groups[i]; i++) {
+ df_item = &stats_cg->default_groups[i]->cg_item;
+ stats_cg->default_groups[i] = NULL;
+ config_item_put(df_item);
+ }
+ kfree(stats_cg->default_groups);
+
+ pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
+ tiqn->tiqn);
+ iscsit_del_tiqn(tiqn);
+}
+
+/* End LIO-Target TIQN struct contig_lio_target_cit */
+
+/* Start lio_target_discovery_auth_cit */
+
+#define DEF_DISC_AUTH_STR(name, flags) \
+ __DEF_NACL_AUTH_STR(disc, name, flags) \
+static ssize_t iscsi_disc_show_##name( \
+ struct target_fabric_configfs *tf, \
+ char *page) \
+{ \
+ return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
+ page); \
+} \
+static ssize_t iscsi_disc_store_##name( \
+ struct target_fabric_configfs *tf, \
+ const char *page, \
+ size_t count) \
+{ \
+ return __iscsi_disc_store_##name(&iscsit_global->discovery_acl, \
+ page, count); \
+}
+
+#define DEF_DISC_AUTH_INT(name) \
+ __DEF_NACL_AUTH_INT(disc, name) \
+static ssize_t iscsi_disc_show_##name( \
+ struct target_fabric_configfs *tf, \
+ char *page) \
+{ \
+ return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
+ page); \
+}
+
+#define DISC_AUTH_ATTR(_name, _mode) TF_DISC_ATTR(iscsi, _name, _mode)
+#define DISC_AUTH_ATTR_RO(_name) TF_DISC_ATTR_RO(iscsi, _name)
+
+/*
+ * One-way authentication userid
+ */
+DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
+DISC_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
+/*
+ * One-way authentication password
+ */
+DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
+DISC_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
+/*
+ * Enforce mutual authentication
+ */
+DEF_DISC_AUTH_INT(authenticate_target);
+DISC_AUTH_ATTR_RO(authenticate_target);
+/*
+ * Mutual authentication userid
+ */
+DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+DISC_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
+/*
+ * Mutual authentication password
+ */
+DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+DISC_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
+
+/*
+ * enforce_discovery_auth
+ */
+static ssize_t iscsi_disc_show_enforce_discovery_auth(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
+
+ return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
+}
+
+static ssize_t iscsi_disc_store_enforce_discovery_auth(
+ struct target_fabric_configfs *tf,
+ const char *page,
+ size_t count)
+{
+ struct iscsi_param *param;
+ struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
+ char *endptr;
+ u32 op;
+
+ op = simple_strtoul(page, &endptr, 0);
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for enforce_discovery_auth:"
+ " %u\n", op);
+ return -EINVAL;
+ }
+
+ if (!discovery_tpg) {
+ pr_err("iscsit_global->discovery_tpg is NULL\n");
+ return -EINVAL;
+ }
+
+ param = iscsi_find_param_from_key(AUTHMETHOD,
+ discovery_tpg->param_list);
+ if (!param)
+ return -EINVAL;
+
+ if (op) {
+ /*
+ * Reset the AuthMethod key to CHAP.
+ */
+ if (iscsi_update_param_value(param, CHAP) < 0)
+ return -EINVAL;
+
+ discovery_tpg->tpg_attrib.authentication = 1;
+ iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 1;
+ pr_debug("LIO-CORE[0] Successfully enabled"
+ " authentication enforcement for iSCSI"
+ " Discovery TPG\n");
+ } else {
+ /*
+ * Reset the AuthMethod key to CHAP,None
+ */
+ if (iscsi_update_param_value(param, "CHAP,None") < 0)
+ return -EINVAL;
+
+ discovery_tpg->tpg_attrib.authentication = 0;
+ iscsit_global->discovery_acl.node_auth.enforce_discovery_auth = 0;
+ pr_debug("LIO-CORE[0] Successfully disabled"
+ " authentication enforcement for iSCSI"
+ " Discovery TPG\n");
+ }
+
+ return count;
+}
+
+DISC_AUTH_ATTR(enforce_discovery_auth, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
+ &iscsi_disc_userid.attr,
+ &iscsi_disc_password.attr,
+ &iscsi_disc_authenticate_target.attr,
+ &iscsi_disc_userid_mutual.attr,
+ &iscsi_disc_password_mutual.attr,
+ &iscsi_disc_enforce_discovery_auth.attr,
+ NULL,
+};
+
+/* End lio_target_discovery_auth_cit */
+
+/* Start functions for target_core_fabric_ops */
+
+static char *iscsi_get_fabric_name(void)
+{
+ return "iSCSI";
+}
+
+static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ return cmd->init_task_tag;
+}
+
+static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ return cmd->i_state;
+}
+
+static int iscsi_is_state_remove(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ return (cmd->i_state == ISTATE_REMOVE);
+}
+
+static int lio_sess_logged_in(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ int ret;
+ /*
+ * Called with spin_lock_bh(&tpg_lock); and
+ * spin_lock(&se_tpg->session_lock); held.
+ */
+ spin_lock(&sess->conn_lock);
+ ret = (sess->session_state != TARG_SESS_STATE_LOGGED_IN);
+ spin_unlock(&sess->conn_lock);
+
+ return ret;
+}
+
+static u32 lio_sess_get_index(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ return sess->session_index;
+}
+
+static u32 lio_sess_get_initiator_sid(
+ struct se_session *se_sess,
+ unsigned char *buf,
+ u32 size)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ /*
+ * iSCSI Initiator Session Identifier from RFC-3720.
+ */
+ return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
+ sess->isid[0], sess->isid[1], sess->isid[2],
+ sess->isid[3], sess->isid[4], sess->isid[5]);
+}
+
+static int lio_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->i_state = ISTATE_SEND_DATAIN;
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+}
+
+static int lio_write_pending(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ if (!cmd->immediate_data && !cmd->unsolicited_data)
+ return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1);
+
+ return 0;
+}
+
+static int lio_write_pending_status(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ int ret;
+
+ spin_lock_bh(&cmd->istate_lock);
+ ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
+ spin_unlock_bh(&cmd->istate_lock);
+
+ return ret;
+}
+
+static int lio_queue_status(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+}
+
+static u16 lio_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+ unsigned char *buffer = se_cmd->sense_buffer;
+ /*
+ * From RFC-3720 10.4.7. Data Segment - Sense and Response Data Segment
+ * 16-bit SenseLength.
+ */
+ buffer[0] = ((sense_length >> 8) & 0xff);
+ buffer[1] = (sense_length & 0xff);
+ /*
+ * Return two byte offset into allocated sense_buffer.
+ */
+ return 2;
+}
+
+static u16 lio_get_fabric_sense_len(void)
+{
+ /*
+ * Return two byte offset into allocated sense_buffer.
+ */
+ return 2;
+}
+
+static int lio_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->i_state = ISTATE_SEND_TASKMGTRSP;
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ return 0;
+}
+
+static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return &tpg->tpg_tiqn->tiqn[0];
+}
+
+static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return tpg->tpgt;
+}
+
+static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+}
+
+static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
+}
+
+static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+}
+
+static int lio_tpg_check_demo_mode_write_protect(
+ struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+}
+
+static int lio_tpg_check_prod_mode_write_protect(
+ struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+}
+
+static void lio_tpg_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_acl)
+{
+ struct iscsi_node_acl *acl = container_of(se_acl,
+ struct iscsi_node_acl, se_node_acl);
+ kfree(acl);
+}
+
+/*
+ * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
+ *
+ * Also, this function calls iscsit_inc_session_usage_count() on the
+ * struct iscsi_session in question.
+ */
+static int lio_tpg_shutdown_session(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ spin_lock(&sess->conn_lock);
+ if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ atomic_read(&sess->session_logout) ||
+ (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess->conn_lock);
+ return 0;
+ }
+ atomic_set(&sess->session_reinstatement, 1);
+ spin_unlock(&sess->conn_lock);
+
+ iscsit_inc_session_usage_count(sess);
+ iscsit_stop_time2retain_timer(sess);
+
+ return 1;
+}
+
+/*
+ * Calls iscsit_dec_session_usage_count() as inverse of
+ * lio_tpg_shutdown_session()
+ */
+static void lio_tpg_close_session(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ /*
+ * If the iSCSI Session for the iSCSI Initiator Node exists,
+ * forcefully shutdown the iSCSI NEXUS.
+ */
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
+ iscsit_close_session(sess);
+}
+
+static void lio_tpg_stop_session(
+ struct se_session *se_sess,
+ int sess_sleep,
+ int conn_sleep)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ iscsit_stop_session(sess, sess_sleep, conn_sleep);
+}
+
+static void lio_tpg_fall_back_to_erl0(struct se_session *se_sess)
+{
+ struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+
+ iscsit_fall_back_to_erl0(sess);
+}
+
+static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+
+ return tpg->tpg_tiqn->tiqn_index;
+}
+
+static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
+{
+ struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
+ se_node_acl);
+
+ ISCSI_NODE_ATTRIB(acl)->nacl = acl;
+ iscsit_set_default_node_attribues(acl);
+}
+
+static void lio_release_cmd(struct se_cmd *se_cmd)
+{
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ iscsit_release_cmd(cmd);
+}
+
+/* End functions for target_core_fabric_ops */
+
+int iscsi_target_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric;
+ int ret;
+
+ lio_target_fabric_configfs = NULL;
+ fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi");
+ if (IS_ERR(fabric)) {
+ pr_err("target_fabric_configfs_init() for"
+ " LIO-Target failed!\n");
+ return PTR_ERR(fabric);
+ }
+ /*
+ * Setup the fabric API of function pointers used by target_core_mod..
+ */
+ fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name;
+ fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident;
+ fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn;
+ fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag;
+ fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth;
+ fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id;
+ fabric->tf_ops.tpg_get_pr_transport_id_len =
+ &iscsi_get_pr_transport_id_len;
+ fabric->tf_ops.tpg_parse_pr_out_transport_id =
+ &iscsi_parse_pr_out_transport_id;
+ fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode;
+ fabric->tf_ops.tpg_check_demo_mode_cache =
+ &lio_tpg_check_demo_mode_cache;
+ fabric->tf_ops.tpg_check_demo_mode_write_protect =
+ &lio_tpg_check_demo_mode_write_protect;
+ fabric->tf_ops.tpg_check_prod_mode_write_protect =
+ &lio_tpg_check_prod_mode_write_protect;
+ fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
+ fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
+ fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
+ fabric->tf_ops.release_cmd = &lio_release_cmd;
+ fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
+ fabric->tf_ops.close_session = &lio_tpg_close_session;
+ fabric->tf_ops.stop_session = &lio_tpg_stop_session;
+ fabric->tf_ops.fall_back_to_erl0 = &lio_tpg_fall_back_to_erl0;
+ fabric->tf_ops.sess_logged_in = &lio_sess_logged_in;
+ fabric->tf_ops.sess_get_index = &lio_sess_get_index;
+ fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
+ fabric->tf_ops.write_pending = &lio_write_pending;
+ fabric->tf_ops.write_pending_status = &lio_write_pending_status;
+ fabric->tf_ops.set_default_node_attributes =
+ &lio_set_default_node_attributes;
+ fabric->tf_ops.get_task_tag = &iscsi_get_task_tag;
+ fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state;
+ fabric->tf_ops.queue_data_in = &lio_queue_data_in;
+ fabric->tf_ops.queue_status = &lio_queue_status;
+ fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
+ fabric->tf_ops.set_fabric_sense_len = &lio_set_fabric_sense_len;
+ fabric->tf_ops.get_fabric_sense_len = &lio_get_fabric_sense_len;
+ fabric->tf_ops.is_state_remove = &iscsi_is_state_remove;
+ /*
+ * Setup function pointers for generic logic in target_core_fabric_configfs.c
+ */
+ fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn;
+ fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn;
+ fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg;
+ fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg;
+ fabric->tf_ops.fabric_post_link = NULL;
+ fabric->tf_ops.fabric_pre_unlink = NULL;
+ fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg;
+ fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg;
+ fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl;
+ fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl;
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ * sturct config_item_type's
+ */
+ TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
+ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
+
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() for"
+ " LIO-Target failed!\n");
+ target_fabric_configfs_free(fabric);
+ return ret;
+ }
+
+ lio_target_fabric_configfs = fabric;
+ pr_debug("LIO_TARGET[0] - Set fabric ->"
+ " lio_target_fabric_configfs\n");
+ return 0;
+}
+
+
+void iscsi_target_deregister_configfs(void)
+{
+ if (!lio_target_fabric_configfs)
+ return;
+ /*
+ * Shutdown discovery sessions and disable discovery TPG
+ */
+ if (iscsit_global->discovery_tpg)
+ iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+
+ target_fabric_configfs_deregister(lio_target_fabric_configfs);
+ lio_target_fabric_configfs = NULL;
+ pr_debug("LIO_TARGET[0] - Cleared"
+ " lio_target_fabric_configfs\n");
+}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.h b/drivers/target/iscsi/iscsi_target_configfs.h
new file mode 100644
index 00000000000..8cd5a63c4ed
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_configfs.h
@@ -0,0 +1,7 @@
+#ifndef ISCSI_TARGET_CONFIGFS_H
+#define ISCSI_TARGET_CONFIGFS_H
+
+extern int iscsi_target_register_configfs(void);
+extern void iscsi_target_deregister_configfs(void);
+
+#endif /* ISCSI_TARGET_CONFIGFS_H */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
new file mode 100644
index 00000000000..470ed551eeb
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -0,0 +1,859 @@
+#ifndef ISCSI_TARGET_CORE_H
+#define ISCSI_TARGET_CORE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+
+#define ISCSIT_VERSION "v4.1.0-rc1"
+#define ISCSI_MAX_DATASN_MISSING_COUNT 16
+#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
+#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
+#define SECONDS_FOR_ASYNC_LOGOUT 10
+#define SECONDS_FOR_ASYNC_TEXT 10
+#define SECONDS_FOR_LOGOUT_COMP 15
+#define WHITE_SPACE " \t\v\f\n\r"
+
+/* struct iscsi_node_attrib sanity values */
+#define NA_DATAOUT_TIMEOUT 3
+#define NA_DATAOUT_TIMEOUT_MAX 60
+#define NA_DATAOUT_TIMEOUT_MIX 2
+#define NA_DATAOUT_TIMEOUT_RETRIES 5
+#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+#define NA_NOPIN_TIMEOUT 5
+#define NA_NOPIN_TIMEOUT_MAX 60
+#define NA_NOPIN_TIMEOUT_MIN 3
+#define NA_NOPIN_RESPONSE_TIMEOUT 5
+#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
+#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
+#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
+#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
+#define NA_RANDOM_R2T_OFFSETS 0
+#define NA_DEFAULT_ERL 0
+#define NA_DEFAULT_ERL_MAX 2
+#define NA_DEFAULT_ERL_MIN 0
+
+/* struct iscsi_tpg_attrib sanity values */
+#define TA_AUTHENTICATION 1
+#define TA_LOGIN_TIMEOUT 15
+#define TA_LOGIN_TIMEOUT_MAX 30
+#define TA_LOGIN_TIMEOUT_MIN 5
+#define TA_NETIF_TIMEOUT 2
+#define TA_NETIF_TIMEOUT_MAX 15
+#define TA_NETIF_TIMEOUT_MIN 2
+#define TA_GENERATE_NODE_ACLS 0
+#define TA_DEFAULT_CMDSN_DEPTH 16
+#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
+#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
+#define TA_CACHE_DYNAMIC_ACLS 0
+/* Enabled by default in demo mode (generic_node_acls=1) */
+#define TA_DEMO_MODE_WRITE_PROTECT 1
+/* Disabled by default in production mode w/ explict ACLs */
+#define TA_PROD_MODE_WRITE_PROTECT 0
+#define TA_CACHE_CORE_NPS 0
+
+enum tpg_np_network_transport_table {
+ ISCSI_TCP = 0,
+ ISCSI_SCTP_TCP = 1,
+ ISCSI_SCTP_UDP = 2,
+ ISCSI_IWARP_TCP = 3,
+ ISCSI_IWARP_SCTP = 4,
+ ISCSI_INFINIBAND = 5,
+};
+
+/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
+enum target_conn_state_table {
+ TARG_CONN_STATE_FREE = 0x1,
+ TARG_CONN_STATE_XPT_UP = 0x3,
+ TARG_CONN_STATE_IN_LOGIN = 0x4,
+ TARG_CONN_STATE_LOGGED_IN = 0x5,
+ TARG_CONN_STATE_IN_LOGOUT = 0x6,
+ TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7,
+ TARG_CONN_STATE_CLEANUP_WAIT = 0x8,
+};
+
+/* RFC-3720 7.3.2 Session State Diagram for a Target */
+enum target_sess_state_table {
+ TARG_SESS_STATE_FREE = 0x1,
+ TARG_SESS_STATE_ACTIVE = 0x2,
+ TARG_SESS_STATE_LOGGED_IN = 0x3,
+ TARG_SESS_STATE_FAILED = 0x4,
+ TARG_SESS_STATE_IN_CONTINUE = 0x5,
+};
+
+/* struct iscsi_data_count->type */
+enum data_count_type {
+ ISCSI_RX_DATA = 1,
+ ISCSI_TX_DATA = 2,
+};
+
+/* struct iscsi_datain_req->dr_complete */
+enum datain_req_comp_table {
+ DATAIN_COMPLETE_NORMAL = 1,
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
+ DATAIN_COMPLETE_CONNECTION_RECOVERY = 3,
+};
+
+/* struct iscsi_datain_req->recovery */
+enum datain_req_rec_table {
+ DATAIN_WITHIN_COMMAND_RECOVERY = 1,
+ DATAIN_CONNECTION_RECOVERY = 2,
+};
+
+/* struct iscsi_portal_group->state */
+enum tpg_state_table {
+ TPG_STATE_FREE = 0,
+ TPG_STATE_ACTIVE = 1,
+ TPG_STATE_INACTIVE = 2,
+ TPG_STATE_COLD_RESET = 3,
+};
+
+/* struct iscsi_tiqn->tiqn_state */
+enum tiqn_state_table {
+ TIQN_STATE_ACTIVE = 1,
+ TIQN_STATE_SHUTDOWN = 2,
+};
+
+/* struct iscsi_cmd->cmd_flags */
+enum cmd_flags_table {
+ ICF_GOT_LAST_DATAOUT = 0x00000001,
+ ICF_GOT_DATACK_SNACK = 0x00000002,
+ ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004,
+ ICF_SENT_LAST_R2T = 0x00000008,
+ ICF_WITHIN_COMMAND_RECOVERY = 0x00000010,
+ ICF_CONTIG_MEMORY = 0x00000020,
+ ICF_ATTACHED_TO_RQUEUE = 0x00000040,
+ ICF_OOO_CMDSN = 0x00000080,
+ ICF_REJECT_FAIL_CONN = 0x00000100,
+};
+
+/* struct iscsi_cmd->i_state */
+enum cmd_i_state_table {
+ ISTATE_NO_STATE = 0,
+ ISTATE_NEW_CMD = 1,
+ ISTATE_DEFERRED_CMD = 2,
+ ISTATE_UNSOLICITED_DATA = 3,
+ ISTATE_RECEIVE_DATAOUT = 4,
+ ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
+ ISTATE_RECEIVED_LAST_DATAOUT = 6,
+ ISTATE_WITHIN_DATAOUT_RECOVERY = 7,
+ ISTATE_IN_CONNECTION_RECOVERY = 8,
+ ISTATE_RECEIVED_TASKMGT = 9,
+ ISTATE_SEND_ASYNCMSG = 10,
+ ISTATE_SENT_ASYNCMSG = 11,
+ ISTATE_SEND_DATAIN = 12,
+ ISTATE_SEND_LAST_DATAIN = 13,
+ ISTATE_SENT_LAST_DATAIN = 14,
+ ISTATE_SEND_LOGOUTRSP = 15,
+ ISTATE_SENT_LOGOUTRSP = 16,
+ ISTATE_SEND_NOPIN = 17,
+ ISTATE_SENT_NOPIN = 18,
+ ISTATE_SEND_REJECT = 19,
+ ISTATE_SENT_REJECT = 20,
+ ISTATE_SEND_R2T = 21,
+ ISTATE_SENT_R2T = 22,
+ ISTATE_SEND_R2T_RECOVERY = 23,
+ ISTATE_SENT_R2T_RECOVERY = 24,
+ ISTATE_SEND_LAST_R2T = 25,
+ ISTATE_SENT_LAST_R2T = 26,
+ ISTATE_SEND_LAST_R2T_RECOVERY = 27,
+ ISTATE_SENT_LAST_R2T_RECOVERY = 28,
+ ISTATE_SEND_STATUS = 29,
+ ISTATE_SEND_STATUS_BROKEN_PC = 30,
+ ISTATE_SENT_STATUS = 31,
+ ISTATE_SEND_STATUS_RECOVERY = 32,
+ ISTATE_SENT_STATUS_RECOVERY = 33,
+ ISTATE_SEND_TASKMGTRSP = 34,
+ ISTATE_SENT_TASKMGTRSP = 35,
+ ISTATE_SEND_TEXTRSP = 36,
+ ISTATE_SENT_TEXTRSP = 37,
+ ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
+ ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
+ ISTATE_SEND_NOPIN_NO_RESPONSE = 40,
+ ISTATE_REMOVE = 41,
+ ISTATE_FREE = 42,
+};
+
+/* Used for iscsi_recover_cmdsn() return values */
+enum recover_cmdsn_ret_table {
+ CMDSN_ERROR_CANNOT_RECOVER = -1,
+ CMDSN_NORMAL_OPERATION = 0,
+ CMDSN_LOWER_THAN_EXP = 1,
+ CMDSN_HIGHER_THAN_EXP = 2,
+};
+
+/* Used for iscsi_handle_immediate_data() return values */
+enum immedate_data_ret_table {
+ IMMEDIATE_DATA_CANNOT_RECOVER = -1,
+ IMMEDIATE_DATA_NORMAL_OPERATION = 0,
+ IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
+};
+
+/* Used for iscsi_decide_dataout_action() return values */
+enum dataout_action_ret_table {
+ DATAOUT_CANNOT_RECOVER = -1,
+ DATAOUT_NORMAL = 0,
+ DATAOUT_SEND_R2T = 1,
+ DATAOUT_SEND_TO_TRANSPORT = 2,
+ DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
+};
+
+/* Used for struct iscsi_node_auth->naf_flags */
+enum naf_flags_table {
+ NAF_USERID_SET = 0x01,
+ NAF_PASSWORD_SET = 0x02,
+ NAF_USERID_IN_SET = 0x04,
+ NAF_PASSWORD_IN_SET = 0x08,
+};
+
+/* Used by various struct timer_list to manage iSCSI specific state */
+enum iscsi_timer_flags_table {
+ ISCSI_TF_RUNNING = 0x01,
+ ISCSI_TF_STOP = 0x02,
+ ISCSI_TF_EXPIRED = 0x04,
+};
+
+/* Used for struct iscsi_np->np_flags */
+enum np_flags_table {
+ NPF_IP_NETWORK = 0x00,
+ NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
+};
+
+/* Used for struct iscsi_np->np_thread_state */
+enum np_thread_state_table {
+ ISCSI_NP_THREAD_ACTIVE = 1,
+ ISCSI_NP_THREAD_INACTIVE = 2,
+ ISCSI_NP_THREAD_RESET = 3,
+ ISCSI_NP_THREAD_SHUTDOWN = 4,
+ ISCSI_NP_THREAD_EXIT = 5,
+};
+
+struct iscsi_conn_ops {
+ u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
+ u8 DataDigest; /* [0,1] == [None,CRC32C] */
+ u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
+ u8 OFMarker; /* [0,1] == [No,Yes] */
+ u8 IFMarker; /* [0,1] == [No,Yes] */
+ u32 OFMarkInt; /* [1..65535] */
+ u32 IFMarkInt; /* [1..65535] */
+};
+
+struct iscsi_sess_ops {
+ char InitiatorName[224];
+ char InitiatorAlias[256];
+ char TargetName[224];
+ char TargetAlias[256];
+ char TargetAddress[256];
+ u16 TargetPortalGroupTag; /* [0..65535] */
+ u16 MaxConnections; /* [1..65535] */
+ u8 InitialR2T; /* [0,1] == [No,Yes] */
+ u8 ImmediateData; /* [0,1] == [No,Yes] */
+ u32 MaxBurstLength; /* [512..2**24-1] */
+ u32 FirstBurstLength; /* [512..2**24-1] */
+ u16 DefaultTime2Wait; /* [0..3600] */
+ u16 DefaultTime2Retain; /* [0..3600] */
+ u16 MaxOutstandingR2T; /* [1..65535] */
+ u8 DataPDUInOrder; /* [0,1] == [No,Yes] */
+ u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
+ u8 ErrorRecoveryLevel; /* [0..2] */
+ u8 SessionType; /* [0,1] == [Normal,Discovery]*/
+};
+
+struct iscsi_queue_req {
+ int state;
+ struct iscsi_cmd *cmd;
+ struct list_head qr_list;
+};
+
+struct iscsi_data_count {
+ int data_length;
+ int sync_and_steering;
+ enum data_count_type type;
+ u32 iov_count;
+ u32 ss_iov_count;
+ u32 ss_marker_count;
+ struct kvec *iov;
+};
+
+struct iscsi_param_list {
+ struct list_head param_list;
+ struct list_head extra_response_list;
+};
+
+struct iscsi_datain_req {
+ enum datain_req_comp_table dr_complete;
+ int generate_recovery_values;
+ enum datain_req_rec_table recovery;
+ u32 begrun;
+ u32 runlength;
+ u32 data_length;
+ u32 data_offset;
+ u32 data_offset_end;
+ u32 data_sn;
+ u32 next_burst_len;
+ u32 read_data_done;
+ u32 seq_send_order;
+ struct list_head dr_list;
+} ____cacheline_aligned;
+
+struct iscsi_ooo_cmdsn {
+ u16 cid;
+ u32 batch_count;
+ u32 cmdsn;
+ u32 exp_cmdsn;
+ struct iscsi_cmd *cmd;
+ struct list_head ooo_list;
+} ____cacheline_aligned;
+
+struct iscsi_datain {
+ u8 flags;
+ u32 data_sn;
+ u32 length;
+ u32 offset;
+} ____cacheline_aligned;
+
+struct iscsi_r2t {
+ int seq_complete;
+ int recovery_r2t;
+ int sent_r2t;
+ u32 r2t_sn;
+ u32 offset;
+ u32 targ_xfer_tag;
+ u32 xfer_len;
+ struct list_head r2t_list;
+} ____cacheline_aligned;
+
+struct iscsi_cmd {
+ enum iscsi_timer_flags_table dataout_timer_flags;
+ /* DataOUT timeout retries */
+ u8 dataout_timeout_retries;
+ /* Within command recovery count */
+ u8 error_recovery_count;
+ /* iSCSI dependent state for out or order CmdSNs */
+ enum cmd_i_state_table deferred_i_state;
+ /* iSCSI dependent state */
+ enum cmd_i_state_table i_state;
+ /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
+ u8 immediate_cmd;
+ /* Immediate data present */
+ u8 immediate_data;
+ /* iSCSI Opcode */
+ u8 iscsi_opcode;
+ /* iSCSI Response Code */
+ u8 iscsi_response;
+ /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+ u8 logout_reason;
+ /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+ u8 logout_response;
+ /* MaxCmdSN has been incremented */
+ u8 maxcmdsn_inc;
+ /* Immediate Unsolicited Dataout */
+ u8 unsolicited_data;
+ /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
+ u16 logout_cid;
+ /* Command flags */
+ enum cmd_flags_table cmd_flags;
+ /* Initiator Task Tag assigned from Initiator */
+ u32 init_task_tag;
+ /* Target Transfer Tag assigned from Target */
+ u32 targ_xfer_tag;
+ /* CmdSN assigned from Initiator */
+ u32 cmd_sn;
+ /* ExpStatSN assigned from Initiator */
+ u32 exp_stat_sn;
+ /* StatSN assigned to this ITT */
+ u32 stat_sn;
+ /* DataSN Counter */
+ u32 data_sn;
+ /* R2TSN Counter */
+ u32 r2t_sn;
+ /* Last DataSN acknowledged via DataAck SNACK */
+ u32 acked_data_sn;
+ /* Used for echoing NOPOUT ping data */
+ u32 buf_ptr_size;
+ /* Used to store DataDigest */
+ u32 data_crc;
+ /* Total size in bytes associated with command */
+ u32 data_length;
+ /* Counter for MaxOutstandingR2T */
+ u32 outstanding_r2ts;
+ /* Next R2T Offset when DataSequenceInOrder=Yes */
+ u32 r2t_offset;
+ /* Iovec current and orig count for iscsi_cmd->iov_data */
+ u32 iov_data_count;
+ u32 orig_iov_data_count;
+ /* Number of miscellaneous iovecs used for IP stack calls */
+ u32 iov_misc_count;
+ /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ u32 pdu_count;
+ /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+ u32 pdu_send_order;
+ /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+ u32 pdu_start;
+ u32 residual_count;
+ /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+ u32 seq_send_order;
+ /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+ u32 seq_count;
+ /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+ u32 seq_no;
+ /* Lowest offset in current DataOUT sequence */
+ u32 seq_start_offset;
+ /* Highest offset in current DataOUT sequence */
+ u32 seq_end_offset;
+ /* Total size in bytes received so far of READ data */
+ u32 read_data_done;
+ /* Total size in bytes received so far of WRITE data */
+ u32 write_data_done;
+ /* Counter for FirstBurstLength key */
+ u32 first_burst_len;
+ /* Counter for MaxBurstLength key */
+ u32 next_burst_len;
+ /* Transfer size used for IP stack calls */
+ u32 tx_size;
+ /* Buffer used for various purposes */
+ void *buf_ptr;
+ /* See include/linux/dma-mapping.h */
+ enum dma_data_direction data_direction;
+ /* iSCSI PDU Header + CRC */
+ unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
+ /* Number of times struct iscsi_cmd is present in immediate queue */
+ atomic_t immed_queue_count;
+ atomic_t response_queue_count;
+ atomic_t transport_sent;
+ spinlock_t datain_lock;
+ spinlock_t dataout_timeout_lock;
+ /* spinlock for protecting struct iscsi_cmd->i_state */
+ spinlock_t istate_lock;
+ /* spinlock for adding within command recovery entries */
+ spinlock_t error_lock;
+ /* spinlock for adding R2Ts */
+ spinlock_t r2t_lock;
+ /* DataIN List */
+ struct list_head datain_list;
+ /* R2T List */
+ struct list_head cmd_r2t_list;
+ struct completion reject_comp;
+ /* Timer for DataOUT */
+ struct timer_list dataout_timer;
+ /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
+ struct kvec *iov_data;
+ /* Iovecs for miscellaneous purposes */
+#define ISCSI_MISC_IOVECS 5
+ struct kvec iov_misc[ISCSI_MISC_IOVECS];
+ /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
+ struct iscsi_pdu *pdu_list;
+ /* Current struct iscsi_pdu used for DataPDUInOrder=No */
+ struct iscsi_pdu *pdu_ptr;
+ /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
+ struct iscsi_seq *seq_list;
+ /* Current struct iscsi_seq used for DataSequenceInOrder=No */
+ struct iscsi_seq *seq_ptr;
+ /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
+ struct iscsi_tmr_req *tmr_req;
+ /* Connection this command is alligient to */
+ struct iscsi_conn *conn;
+ /* Pointer to connection recovery entry */
+ struct iscsi_conn_recovery *cr;
+ /* Session the command is part of, used for connection recovery */
+ struct iscsi_session *sess;
+ /* list_head for connection list */
+ struct list_head i_list;
+ /* The TCM I/O descriptor that is accessed via container_of() */
+ struct se_cmd se_cmd;
+ /* Sense buffer that will be mapped into outgoing status */
+#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
+ unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
+
+ struct scatterlist *t_mem_sg;
+ u32 t_mem_sg_nents;
+
+ u32 padding;
+ u8 pad_bytes[4];
+
+ struct scatterlist *first_data_sg;
+ u32 first_data_sg_off;
+ u32 kmapped_nents;
+
+} ____cacheline_aligned;
+
+struct iscsi_tmr_req {
+ bool task_reassign:1;
+ u32 ref_cmd_sn;
+ u32 exp_data_sn;
+ struct iscsi_conn_recovery *conn_recovery;
+ struct se_tmr_req *se_tmr_req;
+};
+
+struct iscsi_conn {
+ /* Authentication Successful for this connection */
+ u8 auth_complete;
+ /* State connection is currently in */
+ u8 conn_state;
+ u8 conn_logout_reason;
+ u8 network_transport;
+ enum iscsi_timer_flags_table nopin_timer_flags;
+ enum iscsi_timer_flags_table nopin_response_timer_flags;
+ u8 tx_immediate_queue;
+ u8 tx_response_queue;
+ /* Used to know what thread encountered a transport failure */
+ u8 which_thread;
+ /* connection id assigned by the Initiator */
+ u16 cid;
+ /* Remote TCP Port */
+ u16 login_port;
+ int net_size;
+ u32 auth_id;
+#define CONNFLAG_SCTP_STRUCT_FILE 0x01
+ u32 conn_flags;
+ /* Used for iscsi_tx_login_rsp() */
+ u32 login_itt;
+ u32 exp_statsn;
+ /* Per connection status sequence number */
+ u32 stat_sn;
+ /* IFMarkInt's Current Value */
+ u32 if_marker;
+ /* OFMarkInt's Current Value */
+ u32 of_marker;
+ /* Used for calculating OFMarker offset to next PDU */
+ u32 of_marker_offset;
+ /* Complete Bad PDU for sending reject */
+ unsigned char bad_hdr[ISCSI_HDR_LEN];
+#define IPV6_ADDRESS_SPACE 48
+ unsigned char login_ip[IPV6_ADDRESS_SPACE];
+ int conn_usage_count;
+ int conn_waiting_on_uc;
+ atomic_t check_immediate_queue;
+ atomic_t conn_logout_remove;
+ atomic_t connection_exit;
+ atomic_t connection_recovery;
+ atomic_t connection_reinstatement;
+ atomic_t connection_wait;
+ atomic_t connection_wait_rcfr;
+ atomic_t sleep_on_conn_wait_comp;
+ atomic_t transport_failed;
+ struct completion conn_post_wait_comp;
+ struct completion conn_wait_comp;
+ struct completion conn_wait_rcfr_comp;
+ struct completion conn_waiting_on_uc_comp;
+ struct completion conn_logout_comp;
+ struct completion tx_half_close_comp;
+ struct completion rx_half_close_comp;
+ /* socket used by this connection */
+ struct socket *sock;
+ struct timer_list nopin_timer;
+ struct timer_list nopin_response_timer;
+ struct timer_list transport_timer;
+ /* Spinlock used for add/deleting cmd's from conn_cmd_list */
+ spinlock_t cmd_lock;
+ spinlock_t conn_usage_lock;
+ spinlock_t immed_queue_lock;
+ spinlock_t nopin_timer_lock;
+ spinlock_t response_queue_lock;
+ spinlock_t state_lock;
+ /* libcrypto RX and TX contexts for crc32c */
+ struct hash_desc conn_rx_hash;
+ struct hash_desc conn_tx_hash;
+ /* Used for scheduling TX and RX connection kthreads */
+ cpumask_var_t conn_cpumask;
+ int conn_rx_reset_cpumask:1;
+ int conn_tx_reset_cpumask:1;
+ /* list_head of struct iscsi_cmd for this connection */
+ struct list_head conn_cmd_list;
+ struct list_head immed_queue_list;
+ struct list_head response_queue_list;
+ struct iscsi_conn_ops *conn_ops;
+ struct iscsi_param_list *param_list;
+ /* Used for per connection auth state machine */
+ void *auth_protocol;
+ struct iscsi_login_thread_s *login_thread;
+ struct iscsi_portal_group *tpg;
+ /* Pointer to parent session */
+ struct iscsi_session *sess;
+ /* Pointer to thread_set in use for this conn's threads */
+ struct iscsi_thread_set *thread_set;
+ /* list_head for session connection list */
+ struct list_head conn_list;
+} ____cacheline_aligned;
+
+struct iscsi_conn_recovery {
+ u16 cid;
+ u32 cmd_count;
+ u32 maxrecvdatasegmentlength;
+ int ready_for_reallegiance;
+ struct list_head conn_recovery_cmd_list;
+ spinlock_t conn_recovery_cmd_lock;
+ struct timer_list time2retain_timer;
+ struct iscsi_session *sess;
+ struct list_head cr_list;
+} ____cacheline_aligned;
+
+struct iscsi_session {
+ u8 initiator_vendor;
+ u8 isid[6];
+ enum iscsi_timer_flags_table time2retain_timer_flags;
+ u8 version_active;
+ u16 cid_called;
+ u16 conn_recovery_count;
+ u16 tsih;
+ /* state session is currently in */
+ u32 session_state;
+ /* session wide counter: initiator assigned task tag */
+ u32 init_task_tag;
+ /* session wide counter: target assigned task tag */
+ u32 targ_xfer_tag;
+ u32 cmdsn_window;
+
+ /* protects cmdsn values */
+ struct mutex cmdsn_mutex;
+ /* session wide counter: expected command sequence number */
+ u32 exp_cmd_sn;
+ /* session wide counter: maximum allowed command sequence number */
+ u32 max_cmd_sn;
+ struct list_head sess_ooo_cmdsn_list;
+
+ /* LIO specific session ID */
+ u32 sid;
+ char auth_type[8];
+ /* unique within the target */
+ int session_index;
+ /* Used for session reference counting */
+ int session_usage_count;
+ int session_waiting_on_uc;
+ u32 cmd_pdus;
+ u32 rsp_pdus;
+ u64 tx_data_octets;
+ u64 rx_data_octets;
+ u32 conn_digest_errors;
+ u32 conn_timeout_errors;
+ u64 creation_time;
+ spinlock_t session_stats_lock;
+ /* Number of active connections */
+ atomic_t nconn;
+ atomic_t session_continuation;
+ atomic_t session_fall_back_to_erl0;
+ atomic_t session_logout;
+ atomic_t session_reinstatement;
+ atomic_t session_stop_active;
+ atomic_t sleep_on_sess_wait_comp;
+ atomic_t transport_wait_cmds;
+ /* connection list */
+ struct list_head sess_conn_list;
+ struct list_head cr_active_list;
+ struct list_head cr_inactive_list;
+ spinlock_t conn_lock;
+ spinlock_t cr_a_lock;
+ spinlock_t cr_i_lock;
+ spinlock_t session_usage_lock;
+ spinlock_t ttt_lock;
+ struct completion async_msg_comp;
+ struct completion reinstatement_comp;
+ struct completion session_wait_comp;
+ struct completion session_waiting_on_uc_comp;
+ struct timer_list time2retain_timer;
+ struct iscsi_sess_ops *sess_ops;
+ struct se_session *se_sess;
+ struct iscsi_portal_group *tpg;
+} ____cacheline_aligned;
+
+struct iscsi_login {
+ u8 auth_complete;
+ u8 checked_for_existing;
+ u8 current_stage;
+ u8 leading_connection;
+ u8 first_request;
+ u8 version_min;
+ u8 version_max;
+ char isid[6];
+ u32 cmd_sn;
+ u32 init_task_tag;
+ u32 initial_exp_statsn;
+ u32 rsp_length;
+ u16 cid;
+ u16 tsih;
+ char *req;
+ char *rsp;
+ char *req_buf;
+ char *rsp_buf;
+} ____cacheline_aligned;
+
+struct iscsi_node_attrib {
+ u32 dataout_timeout;
+ u32 dataout_timeout_retries;
+ u32 default_erl;
+ u32 nopin_timeout;
+ u32 nopin_response_timeout;
+ u32 random_datain_pdu_offsets;
+ u32 random_datain_seq_offsets;
+ u32 random_r2t_offsets;
+ u32 tmr_cold_reset;
+ u32 tmr_warm_reset;
+ struct iscsi_node_acl *nacl;
+};
+
+struct se_dev_entry_s;
+
+struct iscsi_node_auth {
+ enum naf_flags_table naf_flags;
+ int authenticate_target;
+ /* Used for iscsit_global->discovery_auth,
+ * set to zero (auth disabled) by default */
+ int enforce_discovery_auth;
+#define MAX_USER_LEN 256
+#define MAX_PASS_LEN 256
+ char userid[MAX_USER_LEN];
+ char password[MAX_PASS_LEN];
+ char userid_mutual[MAX_USER_LEN];
+ char password_mutual[MAX_PASS_LEN];
+};
+
+#include "iscsi_target_stat.h"
+
+struct iscsi_node_stat_grps {
+ struct config_group iscsi_sess_stats_group;
+ struct config_group iscsi_conn_stats_group;
+};
+
+struct iscsi_node_acl {
+ struct iscsi_node_attrib node_attrib;
+ struct iscsi_node_auth node_auth;
+ struct iscsi_node_stat_grps node_stat_grps;
+ struct se_node_acl se_node_acl;
+};
+
+#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
+
+#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
+#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
+
+struct iscsi_tpg_attrib {
+ u32 authentication;
+ u32 login_timeout;
+ u32 netif_timeout;
+ u32 generate_node_acls;
+ u32 cache_dynamic_acls;
+ u32 default_cmdsn_depth;
+ u32 demo_mode_write_protect;
+ u32 prod_mode_write_protect;
+ struct iscsi_portal_group *tpg;
+};
+
+struct iscsi_np {
+ int np_network_transport;
+ int np_ip_proto;
+ int np_sock_type;
+ enum np_thread_state_table np_thread_state;
+ enum iscsi_timer_flags_table np_login_timer_flags;
+ u32 np_exports;
+ enum np_flags_table np_flags;
+ unsigned char np_ip[IPV6_ADDRESS_SPACE];
+ u16 np_port;
+ spinlock_t np_thread_lock;
+ struct completion np_restart_comp;
+ struct socket *np_socket;
+ struct __kernel_sockaddr_storage np_sockaddr;
+ struct task_struct *np_thread;
+ struct timer_list np_login_timer;
+ struct iscsi_portal_group *np_login_tpg;
+ struct list_head np_list;
+} ____cacheline_aligned;
+
+struct iscsi_tpg_np {
+ struct iscsi_np *tpg_np;
+ struct iscsi_portal_group *tpg;
+ struct iscsi_tpg_np *tpg_np_parent;
+ struct list_head tpg_np_list;
+ struct list_head tpg_np_child_list;
+ struct list_head tpg_np_parent_list;
+ struct se_tpg_np se_tpg_np;
+ spinlock_t tpg_np_parent_lock;
+};
+
+struct iscsi_portal_group {
+ unsigned char tpg_chap_id;
+ /* TPG State */
+ enum tpg_state_table tpg_state;
+ /* Target Portal Group Tag */
+ u16 tpgt;
+ /* Id assigned to target sessions */
+ u16 ntsih;
+ /* Number of active sessions */
+ u32 nsessions;
+ /* Number of Network Portals available for this TPG */
+ u32 num_tpg_nps;
+ /* Per TPG LIO specific session ID. */
+ u32 sid;
+ /* Spinlock for adding/removing Network Portals */
+ spinlock_t tpg_np_lock;
+ spinlock_t tpg_state_lock;
+ struct se_portal_group tpg_se_tpg;
+ struct mutex tpg_access_lock;
+ struct mutex np_login_lock;
+ struct iscsi_tpg_attrib tpg_attrib;
+ /* Pointer to default list of iSCSI parameters for TPG */
+ struct iscsi_param_list *param_list;
+ struct iscsi_tiqn *tpg_tiqn;
+ struct list_head tpg_gnp_list;
+ struct list_head tpg_list;
+} ____cacheline_aligned;
+
+#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
+#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
+#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
+#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
+#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
+
+struct iscsi_wwn_stat_grps {
+ struct config_group iscsi_stat_group;
+ struct config_group iscsi_instance_group;
+ struct config_group iscsi_sess_err_group;
+ struct config_group iscsi_tgt_attr_group;
+ struct config_group iscsi_login_stats_group;
+ struct config_group iscsi_logout_stats_group;
+};
+
+struct iscsi_tiqn {
+#define ISCSI_IQN_LEN 224
+ unsigned char tiqn[ISCSI_IQN_LEN];
+ enum tiqn_state_table tiqn_state;
+ int tiqn_access_count;
+ u32 tiqn_active_tpgs;
+ u32 tiqn_ntpgs;
+ u32 tiqn_num_tpg_nps;
+ u32 tiqn_nsessions;
+ struct list_head tiqn_list;
+ struct list_head tiqn_tpg_list;
+ spinlock_t tiqn_state_lock;
+ spinlock_t tiqn_tpg_lock;
+ struct se_wwn tiqn_wwn;
+ struct iscsi_wwn_stat_grps tiqn_stat_grps;
+ int tiqn_index;
+ struct iscsi_sess_err_stats sess_err_stats;
+ struct iscsi_login_stats login_stats;
+ struct iscsi_logout_stats logout_stats;
+} ____cacheline_aligned;
+
+#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
+
+struct iscsit_global {
+ /* In core shutdown */
+ u32 in_shutdown;
+ u32 active_ts;
+ /* Unique identifier used for the authentication daemon */
+ u32 auth_id;
+ u32 inactive_ts;
+ /* Thread Set bitmap count */
+ int ts_bitmap_count;
+ /* Thread Set bitmap pointer */
+ unsigned long *ts_bitmap;
+ /* Used for iSCSI discovery session authentication */
+ struct iscsi_node_acl discovery_acl;
+ struct iscsi_portal_group *discovery_tpg;
+};
+
+#endif /* ISCSI_TARGET_CORE_H */
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
new file mode 100644
index 00000000000..8c049512951
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -0,0 +1,531 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target DataIN value generation functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_datain_values.h"
+
+struct iscsi_datain_req *iscsit_allocate_datain_req(void)
+{
+ struct iscsi_datain_req *dr;
+
+ dr = kmem_cache_zalloc(lio_dr_cache, GFP_ATOMIC);
+ if (!dr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_datain_req\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&dr->dr_list);
+
+ return dr;
+}
+
+void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+{
+ spin_lock(&cmd->datain_lock);
+ list_add_tail(&dr->dr_list, &cmd->datain_list);
+ spin_unlock(&cmd->datain_lock);
+}
+
+void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+{
+ spin_lock(&cmd->datain_lock);
+ list_del(&dr->dr_list);
+ spin_unlock(&cmd->datain_lock);
+
+ kmem_cache_free(lio_dr_cache, dr);
+}
+
+void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
+{
+ struct iscsi_datain_req *dr, *dr_tmp;
+
+ spin_lock(&cmd->datain_lock);
+ list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) {
+ list_del(&dr->dr_list);
+ kmem_cache_free(lio_dr_cache, dr);
+ }
+ spin_unlock(&cmd->datain_lock);
+}
+
+struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
+{
+ struct iscsi_datain_req *dr;
+
+ if (list_empty(&cmd->datain_list)) {
+ pr_err("cmd->datain_list is empty for ITT:"
+ " 0x%08x\n", cmd->init_task_tag);
+ return NULL;
+ }
+ list_for_each_entry(dr, &cmd->datain_list, dr_list)
+ break;
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 next_burst_len, read_data_done, read_data_left;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ next_burst_len = (!dr->recovery) ?
+ cmd->next_burst_len : dr->next_burst_len;
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return NULL;
+ }
+
+ if ((read_data_left <= conn->conn_ops->MaxRecvDataSegmentLength) &&
+ (read_data_left <= (conn->sess->sess_ops->MaxBurstLength -
+ next_burst_len))) {
+ datain->length = read_data_left;
+
+ datain->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+ } else {
+ if ((next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ datain->length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ next_burst_len += datain->length;
+ } else {
+ datain->length = (conn->sess->sess_ops->MaxBurstLength -
+ next_burst_len);
+ next_burst_len = 0;
+
+ datain->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+ }
+ }
+
+ datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ datain->offset = read_data_done;
+
+ if (!dr->recovery) {
+ cmd->next_burst_len = next_burst_len;
+ cmd->read_data_done += datain->length;
+ } else {
+ dr->next_burst_len = next_burst_len;
+ dr->read_data_done += datain->length;
+ }
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 offset, read_data_done, read_data_left, seq_send_order;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct iscsi_seq *seq;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+ seq_send_order = (!dr->recovery) ?
+ cmd->seq_send_order : dr->seq_send_order;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return NULL;
+ }
+
+ seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
+ if (!seq)
+ return NULL;
+
+ seq->sent = 1;
+
+ if (!dr->recovery && !seq->next_burst_len)
+ seq->first_datasn = cmd->data_sn;
+
+ offset = (seq->offset + seq->next_burst_len);
+
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ datain->length = (cmd->data_length - offset);
+ datain->offset = offset;
+
+ datain->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+
+ seq->next_burst_len = 0;
+ seq_send_order++;
+ } else {
+ if ((seq->next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ datain->length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ datain->offset = (seq->offset + seq->next_burst_len);
+
+ seq->next_burst_len += datain->length;
+ } else {
+ datain->length = (conn->sess->sess_ops->MaxBurstLength -
+ seq->next_burst_len);
+ datain->offset = (seq->offset + seq->next_burst_len);
+
+ datain->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ datain->flags |= ISCSI_FLAG_DATA_ACK;
+
+ seq->next_burst_len = 0;
+ seq_send_order++;
+ }
+ }
+
+ if ((read_data_done + datain->length) == cmd->data_length)
+ datain->flags |= ISCSI_FLAG_DATA_STATUS;
+
+ datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ if (!dr->recovery) {
+ cmd->seq_send_order = seq_send_order;
+ cmd->read_data_done += datain->length;
+ } else {
+ dr->seq_send_order = seq_send_order;
+ dr->read_data_done += datain->length;
+ }
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_CMD_FINAL)
+ seq->last_datasn = datain->data_sn;
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 next_burst_len, read_data_done, read_data_left;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct iscsi_pdu *pdu;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ next_burst_len = (!dr->recovery) ?
+ cmd->next_burst_len : dr->next_burst_len;
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return dr;
+ }
+
+ pdu = iscsit_get_pdu_holder_for_seq(cmd, NULL);
+ if (!pdu)
+ return dr;
+
+ if ((read_data_done + pdu->length) == cmd->data_length) {
+ pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+ next_burst_len = 0;
+ } else {
+ if ((next_burst_len + conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength)
+ next_burst_len += pdu->length;
+ else {
+ pdu->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+ next_burst_len = 0;
+ }
+ }
+
+ pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ if (!dr->recovery) {
+ cmd->next_burst_len = next_burst_len;
+ cmd->read_data_done += pdu->length;
+ } else {
+ dr->next_burst_len = next_burst_len;
+ dr->read_data_done += pdu->length;
+ }
+
+ datain->flags = pdu->flags;
+ datain->length = pdu->length;
+ datain->offset = pdu->offset;
+ datain->data_sn = pdu->data_sn;
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+/*
+ * For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No.
+ */
+static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ u32 read_data_done, read_data_left, seq_send_order;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct iscsi_pdu *pdu;
+ struct iscsi_seq *seq = NULL;
+
+ dr = iscsit_get_datain_req(cmd);
+ if (!dr)
+ return NULL;
+
+ if (dr->recovery && dr->generate_recovery_values) {
+ if (iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ cmd, dr) < 0)
+ return NULL;
+
+ dr->generate_recovery_values = 0;
+ }
+
+ read_data_done = (!dr->recovery) ?
+ cmd->read_data_done : dr->read_data_done;
+ seq_send_order = (!dr->recovery) ?
+ cmd->seq_send_order : dr->seq_send_order;
+
+ read_data_left = (cmd->data_length - read_data_done);
+ if (!read_data_left) {
+ pr_err("ITT: 0x%08x read_data_left is zero!\n",
+ cmd->init_task_tag);
+ return NULL;
+ }
+
+ seq = iscsit_get_seq_holder_for_datain(cmd, seq_send_order);
+ if (!seq)
+ return NULL;
+
+ seq->sent = 1;
+
+ if (!dr->recovery && !seq->next_burst_len)
+ seq->first_datasn = cmd->data_sn;
+
+ pdu = iscsit_get_pdu_holder_for_seq(cmd, seq);
+ if (!pdu)
+ return NULL;
+
+ if (seq->pdu_send_order == seq->pdu_count) {
+ pdu->flags |= ISCSI_FLAG_CMD_FINAL;
+ if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
+ pdu->flags |= ISCSI_FLAG_DATA_ACK;
+
+ seq->next_burst_len = 0;
+ seq_send_order++;
+ } else
+ seq->next_burst_len += pdu->length;
+
+ if ((read_data_done + pdu->length) == cmd->data_length)
+ pdu->flags |= ISCSI_FLAG_DATA_STATUS;
+
+ pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
+ if (!dr->recovery) {
+ cmd->seq_send_order = seq_send_order;
+ cmd->read_data_done += pdu->length;
+ } else {
+ dr->seq_send_order = seq_send_order;
+ dr->read_data_done += pdu->length;
+ }
+
+ datain->flags = pdu->flags;
+ datain->length = pdu->length;
+ datain->offset = pdu->offset;
+ datain->data_sn = pdu->data_sn;
+
+ if (!dr->recovery) {
+ if (datain->flags & ISCSI_FLAG_CMD_FINAL)
+ seq->last_datasn = datain->data_sn;
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS)
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return dr;
+ }
+
+ if (!dr->runlength) {
+ if (datain->flags & ISCSI_FLAG_DATA_STATUS) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ } else {
+ if ((dr->begrun + dr->runlength) == dr->data_sn) {
+ dr->dr_complete =
+ (dr->recovery == DATAIN_WITHIN_COMMAND_RECOVERY) ?
+ DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY :
+ DATAIN_COMPLETE_CONNECTION_RECOVERY;
+ }
+ }
+
+ return dr;
+}
+
+struct iscsi_datain_req *iscsit_get_datain_values(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain *datain)
+{
+ struct iscsi_conn *conn = cmd->conn;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder &&
+ conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_yes_and_yes(cmd, datain);
+ else if (!conn->sess->sess_ops->DataSequenceInOrder &&
+ conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_no_and_yes(cmd, datain);
+ else if (conn->sess->sess_ops->DataSequenceInOrder &&
+ !conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_yes_and_no(cmd, datain);
+ else if (!conn->sess->sess_ops->DataSequenceInOrder &&
+ !conn->sess->sess_ops->DataPDUInOrder)
+ return iscsit_set_datain_values_no_and_no(cmd, datain);
+
+ return NULL;
+}
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h
new file mode 100644
index 00000000000..646429ac5a0
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -0,0 +1,12 @@
+#ifndef ISCSI_TARGET_DATAIN_VALUES_H
+#define ISCSI_TARGET_DATAIN_VALUES_H
+
+extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
+extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_all_datain_reqs(struct iscsi_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsi_cmd *,
+ struct iscsi_datain *);
+
+#endif /*** ISCSI_TARGET_DATAIN_VALUES_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
new file mode 100644
index 00000000000..a19fa5eea88
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -0,0 +1,87 @@
+/*******************************************************************************
+ * This file contains the iSCSI Virtual Device and Disk Transport
+ * agnostic related functions.
+ *
+ \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/scsi_device.h>
+#include <target/target_core_base.h>
+#include <target/target_core_device.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+
+int iscsit_get_lun_for_tmr(
+ struct iscsi_cmd *cmd,
+ u64 lun)
+{
+ u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ return transport_lookup_tmr_lun(&cmd->se_cmd, unpacked_lun);
+}
+
+int iscsit_get_lun_for_cmd(
+ struct iscsi_cmd *cmd,
+ unsigned char *cdb,
+ u64 lun)
+{
+ u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ return transport_lookup_cmd_lun(&cmd->se_cmd, unpacked_lun);
+}
+
+void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
+{
+ struct se_node_acl *se_nacl;
+
+ /*
+ * This is a discovery session, the single queue slot was already
+ * assigned in iscsi_login_zero_tsih(). Since only Logout and
+ * Text Opcodes are allowed during discovery we do not have to worry
+ * about the HBA's queue depth here.
+ */
+ if (sess->sess_ops->SessionType)
+ return;
+
+ se_nacl = sess->se_sess->se_node_acl;
+
+ /*
+ * This is a normal session, set the Session's CmdSN window to the
+ * struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth
+ * has already been validated as a legal value in
+ * core_set_queue_depth_for_node().
+ */
+ sess->cmdsn_window = se_nacl->queue_depth;
+ sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
+}
+
+void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
+{
+ if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
+ return;
+
+ cmd->maxcmdsn_inc = 1;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ sess->max_cmd_sn += 1;
+ pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
+ mutex_unlock(&sess->cmdsn_mutex);
+}
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
new file mode 100644
index 00000000000..bef1cada15f
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -0,0 +1,9 @@
+#ifndef ISCSI_TARGET_DEVICE_H
+#define ISCSI_TARGET_DEVICE_H
+
+extern int iscsit_get_lun_for_tmr(struct iscsi_cmd *, u64);
+extern int iscsit_get_lun_for_cmd(struct iscsi_cmd *, unsigned char *, u64);
+extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
+extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
+
+#endif /* ISCSI_TARGET_DEVICE_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
new file mode 100644
index 00000000000..b7ffc3cd40c
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -0,0 +1,1004 @@
+/******************************************************************************
+ * This file contains error recovery level zero functions used by
+ * the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+/*
+ * Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence()
+ * checks against to determine a PDU's Offset+Length is within the current
+ * DataOUT Sequence. Used for DataSequenceInOrder=Yes only.
+ */
+void iscsit_set_dataout_sequence_values(
+ struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ /*
+ * Still set seq_start_offset and seq_end_offset for Unsolicited
+ * DataOUT, even if DataSequenceInOrder=No.
+ */
+ if (cmd->unsolicited_data) {
+ cmd->seq_start_offset = cmd->write_data_done;
+ cmd->seq_end_offset = (cmd->write_data_done +
+ (cmd->data_length >
+ conn->sess->sess_ops->FirstBurstLength) ?
+ conn->sess->sess_ops->FirstBurstLength : cmd->data_length);
+ return;
+ }
+
+ if (!conn->sess->sess_ops->DataSequenceInOrder)
+ return;
+
+ if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
+ cmd->seq_start_offset = cmd->write_data_done;
+ cmd->seq_end_offset = (cmd->data_length >
+ conn->sess->sess_ops->MaxBurstLength) ?
+ (cmd->write_data_done +
+ conn->sess->sess_ops->MaxBurstLength) : cmd->data_length;
+ } else {
+ cmd->seq_start_offset = cmd->seq_end_offset;
+ cmd->seq_end_offset = ((cmd->seq_end_offset +
+ conn->sess->sess_ops->MaxBurstLength) >=
+ cmd->data_length) ? cmd->data_length :
+ (cmd->seq_end_offset +
+ conn->sess->sess_ops->MaxBurstLength);
+ }
+}
+
+static int iscsit_dataout_within_command_recovery_check(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * We do the within-command recovery checks here as it is
+ * the first function called in iscsi_check_pre_dataout().
+ * Basically, if we are in within-command recovery and
+ * the PDU does not contain the offset the sequence needs,
+ * dump the payload.
+ *
+ * This only applies to DataPDUInOrder=Yes, for
+ * DataPDUInOrder=No we only re-request the failed PDU
+ * and check that all PDUs in a sequence are received
+ * upon end of sequence.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
+ (cmd->write_data_done != hdr->offset))
+ goto dump;
+
+ cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
+ } else {
+ struct iscsi_seq *seq;
+
+ seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
+ if (!seq)
+ return DATAOUT_CANNOT_RECOVER;
+ /*
+ * Set the struct iscsi_seq pointer to reuse later.
+ */
+ cmd->seq_ptr = seq;
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ if ((seq->status ==
+ DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
+ ((seq->offset != hdr->offset) ||
+ (seq->data_sn != hdr->datasn)))
+ goto dump;
+ } else {
+ if ((seq->status ==
+ DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
+ (seq->data_sn != hdr->datasn))
+ goto dump;
+ }
+
+ if (seq->status == DATAOUT_SEQUENCE_COMPLETE)
+ goto dump;
+
+ if (seq->status != DATAOUT_SEQUENCE_COMPLETE)
+ seq->status = 0;
+ }
+
+ return DATAOUT_NORMAL;
+
+dump:
+ pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:"
+ " 0x%08x\n", hdr->offset, payload_length, hdr->datasn);
+ return iscsit_dump_data_payload(conn, payload_length, 1);
+}
+
+static int iscsit_dataout_check_unsolicited_sequence(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ u32 first_burst_len;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+
+ if ((hdr->offset < cmd->seq_start_offset) ||
+ ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
+ pr_err("Command ITT: 0x%08x with Offset: %u,"
+ " Length: %u outside of Unsolicited Sequence %u:%u while"
+ " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
+ hdr->offset, payload_length, cmd->seq_start_offset,
+ cmd->seq_end_offset);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ first_burst_len = (cmd->first_burst_len + payload_length);
+
+ if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("Total %u bytes exceeds FirstBurstLength: %u"
+ " for this Unsolicited DataOut Burst.\n",
+ first_burst_len, conn->sess->sess_ops->FirstBurstLength);
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ /*
+ * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
+ * checks for the current Unsolicited DataOUT Sequence.
+ */
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
+ /*
+ * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
+ * sequence checks are handled in
+ * iscsit_dataout_datapduinorder_no_fbit().
+ */
+ if (!conn->sess->sess_ops->DataPDUInOrder)
+ goto out;
+
+ if ((first_burst_len != cmd->data_length) &&
+ (first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
+ pr_err("Unsolicited non-immediate data"
+ " received %u does not equal FirstBurstLength: %u, and"
+ " does not equal ExpXferLen %u.\n", first_burst_len,
+ conn->sess->sess_ops->FirstBurstLength,
+ cmd->data_length);
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ } else {
+ if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("Command ITT: 0x%08x reached"
+ " FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
+ " error.\n", cmd->init_task_tag,
+ conn->sess->sess_ops->FirstBurstLength);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ if (first_burst_len == cmd->data_length) {
+ pr_err("Command ITT: 0x%08x reached"
+ " ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
+ " error.\n", cmd->init_task_tag, cmd->data_length);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ }
+
+out:
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_check_sequence(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ u32 next_burst_len;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_seq *seq = NULL;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * For DataSequenceInOrder=Yes: Check that the offset and offset+length
+ * is within range as defined by iscsi_set_dataout_sequence_values().
+ *
+ * For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for
+ * offset+length tuple.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ /*
+ * Due to possibility of recovery DataOUT sent by the initiator
+ * fullfilling an Recovery R2T, it's best to just dump the
+ * payload here, instead of erroring out.
+ */
+ if ((hdr->offset < cmd->seq_start_offset) ||
+ ((hdr->offset + payload_length) > cmd->seq_end_offset)) {
+ pr_err("Command ITT: 0x%08x with Offset: %u,"
+ " Length: %u outside of Sequence %u:%u while"
+ " DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
+ hdr->offset, payload_length, cmd->seq_start_offset,
+ cmd->seq_end_offset);
+
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ return DATAOUT_WITHIN_COMMAND_RECOVERY;
+ }
+
+ next_burst_len = (cmd->next_burst_len + payload_length);
+ } else {
+ seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
+ if (!seq)
+ return DATAOUT_CANNOT_RECOVER;
+ /*
+ * Set the struct iscsi_seq pointer to reuse later.
+ */
+ cmd->seq_ptr = seq;
+
+ if (seq->status == DATAOUT_SEQUENCE_COMPLETE) {
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ return DATAOUT_WITHIN_COMMAND_RECOVERY;
+ }
+
+ next_burst_len = (seq->next_burst_len + payload_length);
+ }
+
+ if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) {
+ pr_err("Command ITT: 0x%08x, NextBurstLength: %u and"
+ " Length: %u exceeds MaxBurstLength: %u. protocol"
+ " error.\n", cmd->init_task_tag,
+ (next_burst_len - payload_length),
+ payload_length, conn->sess->sess_ops->MaxBurstLength);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ /*
+ * Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
+ * checks for the current DataOUT Sequence.
+ */
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
+ /*
+ * Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
+ * sequence checks are handled in
+ * iscsit_dataout_datapduinorder_no_fbit().
+ */
+ if (!conn->sess->sess_ops->DataPDUInOrder)
+ goto out;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if ((next_burst_len <
+ conn->sess->sess_ops->MaxBurstLength) &&
+ ((cmd->write_data_done + payload_length) <
+ cmd->data_length)) {
+ pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
+ " before end of DataOUT sequence, protocol"
+ " error.\n", cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ } else {
+ if (next_burst_len < seq->xfer_len) {
+ pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
+ " before end of DataOUT sequence, protocol"
+ " error.\n", cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ }
+ } else {
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (next_burst_len ==
+ conn->sess->sess_ops->MaxBurstLength) {
+ pr_err("Command ITT: 0x%08x reached"
+ " MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is"
+ " not set, protocol error.", cmd->init_task_tag,
+ conn->sess->sess_ops->MaxBurstLength);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ if ((cmd->write_data_done + payload_length) ==
+ cmd->data_length) {
+ pr_err("Command ITT: 0x%08x reached"
+ " last DataOUT PDU in sequence but ISCSI_FLAG_"
+ "CMD_FINAL is not set, protocol error.\n",
+ cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ } else {
+ if (next_burst_len == seq->xfer_len) {
+ pr_err("Command ITT: 0x%08x reached"
+ " last DataOUT PDU in sequence but ISCSI_FLAG_"
+ "CMD_FINAL is not set, protocol error.\n",
+ cmd->init_task_tag);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+ }
+ }
+
+out:
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_check_datasn(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int dump = 0, recovery = 0;
+ u32 data_sn = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * Considering the target has no method of re-requesting DataOUT
+ * by DataSN, if we receieve a greater DataSN than expected we
+ * assume the functions for DataPDUInOrder=[Yes,No] below will
+ * handle it.
+ *
+ * If the DataSN is less than expected, dump the payload.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder)
+ data_sn = cmd->data_sn;
+ else {
+ struct iscsi_seq *seq = cmd->seq_ptr;
+ data_sn = seq->data_sn;
+ }
+
+ if (hdr->datasn > data_sn) {
+ pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
+ " higher than expected 0x%08x.\n", cmd->init_task_tag,
+ hdr->datasn, data_sn);
+ recovery = 1;
+ goto recover;
+ } else if (hdr->datasn < data_sn) {
+ pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
+ " lower than expected 0x%08x, discarding payload.\n",
+ cmd->init_task_tag, hdr->datasn, data_sn);
+ dump = 1;
+ goto dump;
+ }
+
+ return DATAOUT_NORMAL;
+
+recover:
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to perform within-command recovery"
+ " while ERL=0.\n");
+ return DATAOUT_CANNOT_RECOVER;
+ }
+dump:
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY :
+ DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_pre_datapduinorder_yes(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int dump = 0, recovery = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ /*
+ * For DataSequenceInOrder=Yes: If the offset is greater than the global
+ * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
+ * occured and fail the connection.
+ *
+ * For DataSequenceInOrder=No: If the offset is greater than the per
+ * sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
+ * error has occured and fail the connection.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (hdr->offset != cmd->write_data_done) {
+ pr_err("Command ITT: 0x%08x, received offset"
+ " %u different than expected %u.\n", cmd->init_task_tag,
+ hdr->offset, cmd->write_data_done);
+ recovery = 1;
+ goto recover;
+ }
+ } else {
+ struct iscsi_seq *seq = cmd->seq_ptr;
+
+ if (hdr->offset > seq->offset) {
+ pr_err("Command ITT: 0x%08x, received offset"
+ " %u greater than expected %u.\n", cmd->init_task_tag,
+ hdr->offset, seq->offset);
+ recovery = 1;
+ goto recover;
+ } else if (hdr->offset < seq->offset) {
+ pr_err("Command ITT: 0x%08x, received offset"
+ " %u less than expected %u, discarding payload.\n",
+ cmd->init_task_tag, hdr->offset, seq->offset);
+ dump = 1;
+ goto dump;
+ }
+ }
+
+ return DATAOUT_NORMAL;
+
+recover:
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to perform within-command recovery"
+ " while ERL=0.\n");
+ return DATAOUT_CANNOT_RECOVER;
+ }
+dump:
+ if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ return (recovery) ? iscsit_recover_dataout_sequence(cmd,
+ hdr->offset, payload_length) :
+ (dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_pre_datapduinorder_no(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_pdu *pdu;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length);
+ if (!pdu)
+ return DATAOUT_CANNOT_RECOVER;
+
+ cmd->pdu_ptr = pdu;
+
+ switch (pdu->status) {
+ case ISCSI_PDU_NOT_RECEIVED:
+ case ISCSI_PDU_CRC_FAILED:
+ case ISCSI_PDU_TIMED_OUT:
+ break;
+ case ISCSI_PDU_RECEIVED_OK:
+ pr_err("Command ITT: 0x%08x received already gotten"
+ " Offset: %u, Length: %u\n", cmd->init_task_tag,
+ hdr->offset, payload_length);
+ return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
+ default:
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length)
+{
+ struct iscsi_r2t *r2t;
+
+ if (cmd->unsolicited_data)
+ return 0;
+
+ r2t = iscsit_get_r2t_for_eos(cmd, offset, length);
+ if (!r2t)
+ return -1;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ r2t->seq_complete = 1;
+ cmd->outstanding_r2ts--;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+static int iscsit_dataout_update_datapduinorder_no(
+ struct iscsi_cmd *cmd,
+ u32 data_sn,
+ int f_bit)
+{
+ int ret = 0;
+ struct iscsi_pdu *pdu = cmd->pdu_ptr;
+
+ pdu->data_sn = data_sn;
+
+ switch (pdu->status) {
+ case ISCSI_PDU_NOT_RECEIVED:
+ pdu->status = ISCSI_PDU_RECEIVED_OK;
+ break;
+ case ISCSI_PDU_CRC_FAILED:
+ pdu->status = ISCSI_PDU_RECEIVED_OK;
+ break;
+ case ISCSI_PDU_TIMED_OUT:
+ pdu->status = ISCSI_PDU_RECEIVED_OK;
+ break;
+ default:
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ if (f_bit) {
+ ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu);
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ return DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_post_crc_passed(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int ret, send_r2t = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_seq *seq = NULL;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ if (cmd->unsolicited_data) {
+ if ((cmd->first_burst_len + payload_length) ==
+ conn->sess->sess_ops->FirstBurstLength) {
+ if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ payload_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ send_r2t = 1;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ ret = iscsit_dataout_update_datapduinorder_no(cmd,
+ hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ cmd->first_burst_len += payload_length;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder)
+ cmd->data_sn++;
+ else {
+ seq = cmd->seq_ptr;
+ seq->data_sn++;
+ seq->offset += payload_length;
+ }
+
+ if (send_r2t) {
+ if (seq)
+ seq->status = DATAOUT_SEQUENCE_COMPLETE;
+ cmd->first_burst_len = 0;
+ cmd->unsolicited_data = 0;
+ }
+ } else {
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if ((cmd->next_burst_len + payload_length) ==
+ conn->sess->sess_ops->MaxBurstLength) {
+ if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ payload_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ send_r2t = 1;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ ret = iscsit_dataout_update_datapduinorder_no(
+ cmd, hdr->datasn,
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ cmd->next_burst_len += payload_length;
+ cmd->data_sn++;
+
+ if (send_r2t)
+ cmd->next_burst_len = 0;
+ } else {
+ seq = cmd->seq_ptr;
+
+ if ((seq->next_burst_len + payload_length) ==
+ seq->xfer_len) {
+ if (iscsit_dataout_update_r2t(cmd, hdr->offset,
+ payload_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+ send_r2t = 1;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ ret = iscsit_dataout_update_datapduinorder_no(
+ cmd, hdr->datasn,
+ (hdr->flags & ISCSI_FLAG_CMD_FINAL));
+ if (ret == DATAOUT_CANNOT_RECOVER)
+ return ret;
+ }
+
+ seq->data_sn++;
+ seq->offset += payload_length;
+ seq->next_burst_len += payload_length;
+
+ if (send_r2t) {
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_COMPLETE;
+ }
+ }
+ }
+
+ if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder)
+ cmd->data_sn = 0;
+
+ cmd->write_data_done += payload_length;
+
+ return (cmd->write_data_done == cmd->data_length) ?
+ DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ?
+ DATAOUT_SEND_R2T : DATAOUT_NORMAL;
+}
+
+static int iscsit_dataout_post_crc_failed(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ goto recover;
+ /*
+ * The rest of this function is only called when DataPDUInOrder=No.
+ */
+ pdu = cmd->pdu_ptr;
+
+ switch (pdu->status) {
+ case ISCSI_PDU_NOT_RECEIVED:
+ pdu->status = ISCSI_PDU_CRC_FAILED;
+ break;
+ case ISCSI_PDU_CRC_FAILED:
+ break;
+ case ISCSI_PDU_TIMED_OUT:
+ pdu->status = ISCSI_PDU_CRC_FAILED;
+ break;
+ default:
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+recover:
+ return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length);
+}
+
+/*
+ * Called from iscsit_handle_data_out() before DataOUT Payload is received
+ * and CRC computed.
+ */
+extern int iscsit_check_pre_dataout(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ int ret;
+ struct iscsi_conn *conn = cmd->conn;
+
+ ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+
+ ret = iscsit_dataout_check_datasn(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+
+ if (cmd->unsolicited_data) {
+ ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+ } else {
+ ret = iscsit_dataout_check_sequence(cmd, buf);
+ if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
+ (ret == DATAOUT_CANNOT_RECOVER))
+ return ret;
+ }
+
+ return (conn->sess->sess_ops->DataPDUInOrder) ?
+ iscsit_dataout_pre_datapduinorder_yes(cmd, buf) :
+ iscsit_dataout_pre_datapduinorder_no(cmd, buf);
+}
+
+/*
+ * Called from iscsit_handle_data_out() after DataOUT Payload is received
+ * and CRC computed.
+ */
+int iscsit_check_post_dataout(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u8 data_crc_failed)
+{
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->dataout_timeout_retries = 0;
+
+ if (!data_crc_failed)
+ return iscsit_dataout_post_crc_passed(cmd, buf);
+ else {
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from DataOUT CRC"
+ " failure while ERL=0, closing session.\n");
+ iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
+ 1, 0, buf, cmd);
+ return DATAOUT_CANNOT_RECOVER;
+ }
+
+ iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
+ 0, 0, buf, cmd);
+ return iscsit_dataout_post_crc_failed(cmd, buf);
+ }
+}
+
+static void iscsit_handle_time2retain_timeout(unsigned long data)
+{
+ struct iscsi_session *sess = (struct iscsi_session *) data;
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ if (sess->time2retain_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&se_tpg->session_lock);
+ return;
+ }
+ if (atomic_read(&sess->session_reinstatement)) {
+ pr_err("Exiting Time2Retain handler because"
+ " session_reinstatement=1\n");
+ spin_unlock_bh(&se_tpg->session_lock);
+ return;
+ }
+ sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED;
+
+ pr_err("Time2Retain timer expired for SID: %u, cleaning up"
+ " iSCSI session.\n", sess->sid);
+ {
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ if (tiqn) {
+ spin_lock(&tiqn->sess_err_stats.lock);
+ strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+ (void *)sess->sess_ops->InitiatorName);
+ tiqn->sess_err_stats.last_sess_failure_type =
+ ISCSI_SESS_ERR_CXN_TIMEOUT;
+ tiqn->sess_err_stats.cxn_timeout_errors++;
+ sess->conn_timeout_errors++;
+ spin_unlock(&tiqn->sess_err_stats.lock);
+ }
+ }
+
+ spin_unlock_bh(&se_tpg->session_lock);
+ iscsit_close_session(sess);
+}
+
+extern void iscsit_start_time2retain_handler(struct iscsi_session *sess)
+{
+ int tpg_active;
+ /*
+ * Only start Time2Retain timer when the assoicated TPG is still in
+ * an ACTIVE (eg: not disabled or shutdown) state.
+ */
+ spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+ tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
+ spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+
+ if (!tpg_active)
+ return;
+
+ if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING)
+ return;
+
+ pr_debug("Starting Time2Retain timer for %u seconds on"
+ " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
+
+ init_timer(&sess->time2retain_timer);
+ sess->time2retain_timer.expires =
+ (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ);
+ sess->time2retain_timer.data = (unsigned long)sess;
+ sess->time2retain_timer.function = iscsit_handle_time2retain_timeout;
+ sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
+ sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&sess->time2retain_timer);
+}
+
+/*
+ * Called with spin_lock_bh(&struct se_portal_group->session_lock) held
+ */
+extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
+{
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+
+ if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
+ return -1;
+
+ if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING))
+ return 0;
+
+ sess->time2retain_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ del_timer_sync(&sess->time2retain_timer);
+
+ spin_lock_bh(&se_tpg->session_lock);
+ sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
+ pr_debug("Stopped Time2Retain Timer for SID: %u\n",
+ sess->sid);
+ return 0;
+}
+
+void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->connection_exit)) {
+ spin_unlock_bh(&conn->state_lock);
+ goto sleep;
+ }
+
+ if (atomic_read(&conn->transport_failed)) {
+ spin_unlock_bh(&conn->state_lock);
+ goto sleep;
+ }
+ spin_unlock_bh(&conn->state_lock);
+
+ iscsi_thread_set_force_reinstatement(conn);
+
+sleep:
+ wait_for_completion(&conn->conn_wait_rcfr_comp);
+ complete(&conn->conn_post_wait_comp);
+}
+
+void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
+{
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->connection_exit)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ if (atomic_read(&conn->transport_failed)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ if (atomic_read(&conn->connection_reinstatement)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ if (iscsi_thread_set_force_reinstatement(conn) < 0) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ atomic_set(&conn->connection_reinstatement, 1);
+ if (!sleep) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ atomic_set(&conn->sleep_on_conn_wait_comp, 1);
+ spin_unlock_bh(&conn->state_lock);
+
+ wait_for_completion(&conn->conn_wait_comp);
+ complete(&conn->conn_post_wait_comp);
+}
+
+void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
+{
+ pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
+ " %u\n", sess->sid);
+
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
+}
+
+static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
+ !atomic_read(&sess->session_reinstatement) &&
+ !atomic_read(&sess->session_fall_back_to_erl0))
+ iscsit_connection_recovery_transport_reset(conn);
+ else {
+ pr_debug("Performing cleanup for failed iSCSI"
+ " Connection ID: %hu from %s\n", conn->cid,
+ sess->sess_ops->InitiatorName);
+ iscsit_close_connection(conn);
+ }
+}
+
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->state_lock);
+ if (atomic_read(&conn->connection_exit)) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+ atomic_set(&conn->connection_exit, 1);
+
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+ spin_unlock_bh(&conn->state_lock);
+ iscsit_close_connection(conn);
+ return;
+ }
+
+ if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) {
+ spin_unlock_bh(&conn->state_lock);
+ return;
+ }
+
+ pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
+ conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
+ spin_unlock_bh(&conn->state_lock);
+
+ iscsit_handle_connection_cleanup(conn);
+}
+
+/*
+ * This is the simple function that makes the magic of
+ * sync and steering happen in the follow paradoxical order:
+ *
+ * 0) Receive conn->of_marker (bytes left until next OFMarker)
+ * bytes into an offload buffer. When we pass the exact number
+ * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence
+ * rx_data() will automatically receive the identical u32 marker
+ * values and store it in conn->of_marker_offset;
+ * 1) Now conn->of_marker_offset will contain the offset to the start
+ * of the next iSCSI PDU. Dump these remaining bytes into another
+ * offload buffer.
+ * 2) We are done!
+ * Next byte in the TCP stream will contain the next iSCSI PDU!
+ * Cool Huh?!
+ */
+int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn)
+{
+ /*
+ * Make sure the remaining bytes to next maker is a sane value.
+ */
+ if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) {
+ pr_err("Remaining bytes to OFMarker: %u exceeds"
+ " OFMarkInt bytes: %u.\n", conn->of_marker,
+ conn->conn_ops->OFMarkInt * 4);
+ return -1;
+ }
+
+ pr_debug("Advancing %u bytes in TCP stream to get to the"
+ " next OFMarker.\n", conn->of_marker);
+
+ if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0)
+ return -1;
+
+ /*
+ * Make sure the offset marker we retrived is a valid value.
+ */
+ if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) +
+ conn->conn_ops->MaxRecvDataSegmentLength)) {
+ pr_err("OfMarker offset value: %u exceeds limit.\n",
+ conn->of_marker_offset);
+ return -1;
+ }
+
+ pr_debug("Discarding %u bytes of TCP stream to get to the"
+ " next iSCSI Opcode.\n", conn->of_marker_offset);
+
+ if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0)
+ return -1;
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
new file mode 100644
index 00000000000..21acc9a0637
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -0,0 +1,15 @@
+#ifndef ISCSI_TARGET_ERL0_H
+#define ISCSI_TARGET_ERL0_H
+
+extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
+extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
+extern void iscsit_start_time2retain_handler(struct iscsi_session *);
+extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
+extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
+extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
new file mode 100644
index 00000000000..980650792cf
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -0,0 +1,1299 @@
+/*******************************************************************************
+ * This file contains error recovery level one used by the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target.h"
+
+#define OFFLOAD_BUF_SIZE 32768
+
+/*
+ * Used to dump excess datain payload for certain error recovery
+ * situations. Receive in OFFLOAD_BUF_SIZE max of datain per rx_data().
+ *
+ * dump_padding_digest denotes if padding and data digests need
+ * to be dumped.
+ */
+int iscsit_dump_data_payload(
+ struct iscsi_conn *conn,
+ u32 buf_len,
+ int dump_padding_digest)
+{
+ char *buf, pad_bytes[4];
+ int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
+ u32 length, padding, offset = 0, size;
+ struct kvec iov;
+
+ length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+
+ buf = kzalloc(length, GFP_ATOMIC);
+ if (!buf) {
+ pr_err("Unable to allocate %u bytes for offload"
+ " buffer.\n", length);
+ return -1;
+ }
+ memset(&iov, 0, sizeof(struct kvec));
+
+ while (offset < buf_len) {
+ size = ((offset + length) > buf_len) ?
+ (buf_len - offset) : length;
+
+ iov.iov_len = size;
+ iov.iov_base = buf;
+
+ rx_got = rx_data(conn, &iov, 1, size);
+ if (rx_got != size) {
+ ret = DATAOUT_CANNOT_RECOVER;
+ goto out;
+ }
+
+ offset += size;
+ }
+
+ if (!dump_padding_digest)
+ goto out;
+
+ padding = ((-buf_len) & 3);
+ if (padding != 0) {
+ iov.iov_len = padding;
+ iov.iov_base = pad_bytes;
+
+ rx_got = rx_data(conn, &iov, 1, padding);
+ if (rx_got != padding) {
+ ret = DATAOUT_CANNOT_RECOVER;
+ goto out;
+ }
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ u32 data_crc;
+
+ iov.iov_len = ISCSI_CRC_LEN;
+ iov.iov_base = &data_crc;
+
+ rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
+ if (rx_got != ISCSI_CRC_LEN) {
+ ret = DATAOUT_CANNOT_RECOVER;
+ goto out;
+ }
+ }
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * Used for retransmitting R2Ts from a R2T SNACK request.
+ */
+static int iscsit_send_recovery_r2t_for_snack(
+ struct iscsi_cmd *cmd,
+ struct iscsi_r2t *r2t)
+{
+ /*
+ * If the struct iscsi_r2t has not been sent yet, we can safely
+ * ignore retransmission
+ * of the R2TSN in question.
+ */
+ spin_lock_bh(&cmd->r2t_lock);
+ if (!r2t->sent_r2t) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return 0;
+ }
+ r2t->sent_r2t = 0;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
+
+ return 0;
+}
+
+static int iscsit_handle_r2t_snack(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u32 begrun,
+ u32 runlength)
+{
+ u32 last_r2tsn;
+ struct iscsi_r2t *r2t;
+
+ /*
+ * Make sure the initiator is not requesting retransmission
+ * of R2TSNs already acknowledged by a TMR TASK_REASSIGN.
+ */
+ if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+ (begrun <= cmd->acked_data_sn)) {
+ pr_err("ITT: 0x%08x, R2T SNACK requesting"
+ " retransmission of R2TSN: 0x%08x to 0x%08x but already"
+ " acked to R2TSN: 0x%08x by TMR TASK_REASSIGN,"
+ " protocol error.\n", cmd->init_task_tag, begrun,
+ (begrun + runlength), cmd->acked_data_sn);
+
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+
+ if (runlength) {
+ if ((begrun + runlength) > cmd->r2t_sn) {
+ pr_err("Command ITT: 0x%08x received R2T SNACK"
+ " with BegRun: 0x%08x, RunLength: 0x%08x, exceeds"
+ " current R2TSN: 0x%08x, protocol error.\n",
+ cmd->init_task_tag, begrun, runlength, cmd->r2t_sn);
+ return iscsit_add_reject_from_cmd(
+ ISCSI_REASON_BOOKMARK_INVALID, 1, 0, buf, cmd);
+ }
+ last_r2tsn = (begrun + runlength);
+ } else
+ last_r2tsn = cmd->r2t_sn;
+
+ while (begrun < last_r2tsn) {
+ r2t = iscsit_get_holder_for_r2tsn(cmd, begrun);
+ if (!r2t)
+ return -1;
+ if (iscsit_send_recovery_r2t_for_snack(cmd, r2t) < 0)
+ return -1;
+
+ begrun++;
+ }
+
+ return 0;
+}
+
+/*
+ * Generates Offsets and NextBurstLength based on Begrun and Runlength
+ * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
+ *
+ * For DataSequenceInOrder=Yes and DataPDUInOrder=[Yes,No] only.
+ *
+ * FIXME: How is this handled for a RData SNACK?
+ */
+int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr)
+{
+ u32 data_sn = 0, data_sn_count = 0;
+ u32 pdu_start = 0, seq_no = 0;
+ u32 begrun = dr->begrun;
+ struct iscsi_conn *conn = cmd->conn;
+
+ while (begrun > data_sn++) {
+ data_sn_count++;
+ if ((dr->next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ dr->read_data_done +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ dr->next_burst_len +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ } else {
+ dr->read_data_done +=
+ (conn->sess->sess_ops->MaxBurstLength -
+ dr->next_burst_len);
+ dr->next_burst_len = 0;
+ pdu_start += data_sn_count;
+ data_sn_count = 0;
+ seq_no++;
+ }
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ cmd->seq_no = seq_no;
+ cmd->pdu_start = pdu_start;
+ cmd->pdu_send_order = data_sn_count;
+ }
+
+ return 0;
+}
+
+/*
+ * Generates Offsets and NextBurstLength based on Begrun and Runlength
+ * carried in a Data SNACK or ExpDataSN in TMR TASK_REASSIGN.
+ *
+ * For DataSequenceInOrder=No and DataPDUInOrder=[Yes,No] only.
+ *
+ * FIXME: How is this handled for a RData SNACK?
+ */
+int iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr)
+{
+ int found_seq = 0, i;
+ u32 data_sn, read_data_done = 0, seq_send_order = 0;
+ u32 begrun = dr->begrun;
+ u32 runlength = dr->runlength;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_seq *first_seq = NULL, *seq = NULL;
+
+ if (!cmd->seq_list) {
+ pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ return -1;
+ }
+
+ /*
+ * Calculate read_data_done for all sequences containing a
+ * first_datasn and last_datasn less than the BegRun.
+ *
+ * Locate the struct iscsi_seq the BegRun lies within and calculate
+ * NextBurstLenghth up to the DataSN based on MaxRecvDataSegmentLength.
+ *
+ * Also use struct iscsi_seq->seq_send_order to determine where to start.
+ */
+ for (i = 0; i < cmd->seq_count; i++) {
+ seq = &cmd->seq_list[i];
+
+ if (!seq->seq_send_order)
+ first_seq = seq;
+
+ /*
+ * No data has been transferred for this DataIN sequence, so the
+ * seq->first_datasn and seq->last_datasn have not been set.
+ */
+ if (!seq->sent) {
+#if 0
+ pr_err("Ignoring non-sent sequence 0x%08x ->"
+ " 0x%08x\n\n", seq->first_datasn,
+ seq->last_datasn);
+#endif
+ continue;
+ }
+
+ /*
+ * This DataIN sequence is precedes the received BegRun, add the
+ * total xfer_len of the sequence to read_data_done and reset
+ * seq->pdu_send_order.
+ */
+ if ((seq->first_datasn < begrun) &&
+ (seq->last_datasn < begrun)) {
+#if 0
+ pr_err("Pre BegRun sequence 0x%08x ->"
+ " 0x%08x\n", seq->first_datasn,
+ seq->last_datasn);
+#endif
+ read_data_done += cmd->seq_list[i].xfer_len;
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ continue;
+ }
+
+ /*
+ * The BegRun lies within this DataIN sequence.
+ */
+ if ((seq->first_datasn <= begrun) &&
+ (seq->last_datasn >= begrun)) {
+#if 0
+ pr_err("Found sequence begrun: 0x%08x in"
+ " 0x%08x -> 0x%08x\n", begrun,
+ seq->first_datasn, seq->last_datasn);
+#endif
+ seq_send_order = seq->seq_send_order;
+ data_sn = seq->first_datasn;
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ found_seq = 1;
+
+ /*
+ * For DataPDUInOrder=Yes, while the first DataSN of
+ * the sequence is less than the received BegRun, add
+ * the MaxRecvDataSegmentLength to read_data_done and
+ * to the sequence's next_burst_len;
+ *
+ * For DataPDUInOrder=No, while the first DataSN of the
+ * sequence is less than the received BegRun, find the
+ * struct iscsi_pdu of the DataSN in question and add the
+ * MaxRecvDataSegmentLength to read_data_done and to the
+ * sequence's next_burst_len;
+ */
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ while (data_sn < begrun) {
+ seq->pdu_send_order++;
+ read_data_done +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ seq->next_burst_len +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ data_sn++;
+ }
+ } else {
+ int j;
+ struct iscsi_pdu *pdu;
+
+ while (data_sn < begrun) {
+ seq->pdu_send_order++;
+
+ for (j = 0; j < seq->pdu_count; j++) {
+ pdu = &cmd->pdu_list[
+ seq->pdu_start + j];
+ if (pdu->data_sn == data_sn) {
+ read_data_done +=
+ pdu->length;
+ seq->next_burst_len +=
+ pdu->length;
+ }
+ }
+ data_sn++;
+ }
+ }
+ continue;
+ }
+
+ /*
+ * This DataIN sequence is larger than the received BegRun,
+ * reset seq->pdu_send_order and continue.
+ */
+ if ((seq->first_datasn > begrun) ||
+ (seq->last_datasn > begrun)) {
+#if 0
+ pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
+ seq->first_datasn, seq->last_datasn);
+#endif
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ continue;
+ }
+ }
+
+ if (!found_seq) {
+ if (!begrun) {
+ if (!first_seq) {
+ pr_err("ITT: 0x%08x, Begrun: 0x%08x"
+ " but first_seq is NULL\n",
+ cmd->init_task_tag, begrun);
+ return -1;
+ }
+ seq_send_order = first_seq->seq_send_order;
+ seq->next_burst_len = seq->pdu_send_order = 0;
+ goto done;
+ }
+
+ pr_err("Unable to locate struct iscsi_seq for ITT: 0x%08x,"
+ " BegRun: 0x%08x, RunLength: 0x%08x while"
+ " DataSequenceInOrder=No and DataPDUInOrder=%s.\n",
+ cmd->init_task_tag, begrun, runlength,
+ (conn->sess->sess_ops->DataPDUInOrder) ? "Yes" : "No");
+ return -1;
+ }
+
+done:
+ dr->read_data_done = read_data_done;
+ dr->seq_send_order = seq_send_order;
+
+ return 0;
+}
+
+static int iscsit_handle_recovery_datain(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (!atomic_read(&se_cmd->t_transport_complete)) {
+ pr_err("Ignoring ITT: 0x%08x Data SNACK\n",
+ cmd->init_task_tag);
+ return 0;
+ }
+
+ /*
+ * Make sure the initiator is not requesting retransmission
+ * of DataSNs already acknowledged by a Data ACK SNACK.
+ */
+ if ((cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+ (begrun <= cmd->acked_data_sn)) {
+ pr_err("ITT: 0x%08x, Data SNACK requesting"
+ " retransmission of DataSN: 0x%08x to 0x%08x but"
+ " already acked to DataSN: 0x%08x by Data ACK SNACK,"
+ " protocol error.\n", cmd->init_task_tag, begrun,
+ (begrun + runlength), cmd->acked_data_sn);
+
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+ 1, 0, buf, cmd);
+ }
+
+ /*
+ * Make sure BegRun and RunLength in the Data SNACK are sane.
+ * Note: (cmd->data_sn - 1) will carry the maximum DataSN sent.
+ */
+ if ((begrun + runlength) > (cmd->data_sn - 1)) {
+ pr_err("Initiator requesting BegRun: 0x%08x, RunLength"
+ ": 0x%08x greater than maximum DataSN: 0x%08x.\n",
+ begrun, runlength, (cmd->data_sn - 1));
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+ 1, 0, buf, cmd);
+ }
+
+ dr = iscsit_allocate_datain_req();
+ if (!dr)
+ return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ 1, 0, buf, cmd);
+
+ dr->data_sn = dr->begrun = begrun;
+ dr->runlength = runlength;
+ dr->generate_recovery_values = 1;
+ dr->recovery = DATAIN_WITHIN_COMMAND_RECOVERY;
+
+ iscsit_attach_datain_req(cmd, dr);
+
+ cmd->i_state = ISTATE_SEND_DATAIN;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+
+ return 0;
+}
+
+int iscsit_handle_recovery_datain_or_r2t(
+ struct iscsi_conn *conn,
+ unsigned char *buf,
+ u32 init_task_tag,
+ u32 targ_xfer_tag,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_cmd *cmd;
+
+ cmd = iscsit_find_cmd_from_itt(conn, init_task_tag);
+ if (!cmd)
+ return 0;
+
+ /*
+ * FIXME: This will not work for bidi commands.
+ */
+ switch (cmd->data_direction) {
+ case DMA_TO_DEVICE:
+ return iscsit_handle_r2t_snack(cmd, buf, begrun, runlength);
+ case DMA_FROM_DEVICE:
+ return iscsit_handle_recovery_datain(cmd, buf, begrun,
+ runlength);
+ default:
+ pr_err("Unknown cmd->data_direction: 0x%02x\n",
+ cmd->data_direction);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
+int iscsit_handle_status_snack(
+ struct iscsi_conn *conn,
+ u32 init_task_tag,
+ u32 targ_xfer_tag,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_cmd *cmd = NULL;
+ u32 last_statsn;
+ int found_cmd;
+
+ if (conn->exp_statsn > begrun) {
+ pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
+ " 0x%08x but already got ExpStatSN: 0x%08x on CID:"
+ " %hu.\n", begrun, runlength, conn->exp_statsn,
+ conn->cid);
+ return 0;
+ }
+
+ last_statsn = (!runlength) ? conn->stat_sn : (begrun + runlength);
+
+ while (begrun < last_statsn) {
+ found_cmd = 0;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->stat_sn == begrun) {
+ found_cmd = 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!found_cmd) {
+ pr_err("Unable to find StatSN: 0x%08x for"
+ " a Status SNACK, assuming this was a"
+ " protactic SNACK for an untransmitted"
+ " StatSN, ignoring.\n", begrun);
+ begrun++;
+ continue;
+ }
+
+ spin_lock_bh(&cmd->istate_lock);
+ if (cmd->i_state == ISTATE_SEND_DATAIN) {
+ spin_unlock_bh(&cmd->istate_lock);
+ pr_err("Ignoring Status SNACK for BegRun:"
+ " 0x%08x, RunLength: 0x%08x, assuming this was"
+ " a protactic SNACK for an untransmitted"
+ " StatSN\n", begrun, runlength);
+ begrun++;
+ continue;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ cmd->i_state = ISTATE_SEND_STATUS_RECOVERY;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ begrun++;
+ }
+
+ return 0;
+}
+
+int iscsit_handle_data_ack(
+ struct iscsi_conn *conn,
+ u32 targ_xfer_tag,
+ u32 begrun,
+ u32 runlength)
+{
+ struct iscsi_cmd *cmd = NULL;
+
+ cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag);
+ if (!cmd) {
+ pr_err("Data ACK SNACK for TTT: 0x%08x is"
+ " invalid.\n", targ_xfer_tag);
+ return -1;
+ }
+
+ if (begrun <= cmd->acked_data_sn) {
+ pr_err("ITT: 0x%08x Data ACK SNACK BegRUN: 0x%08x is"
+ " less than the already acked DataSN: 0x%08x.\n",
+ cmd->init_task_tag, begrun, cmd->acked_data_sn);
+ return -1;
+ }
+
+ /*
+ * For Data ACK SNACK, BegRun is the next expected DataSN.
+ * (see iSCSI v19: 10.16.6)
+ */
+ cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = (begrun - 1);
+
+ pr_debug("Received Data ACK SNACK for ITT: 0x%08x,"
+ " updated acked DataSN to 0x%08x.\n",
+ cmd->init_task_tag, cmd->acked_data_sn);
+
+ return 0;
+}
+
+static int iscsit_send_recovery_r2t(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 xfer_len)
+{
+ int ret;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ ret = iscsit_add_r2t_to_list(cmd, offset, xfer_len, 1, 0);
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return ret;
+}
+
+int iscsit_dataout_datapduinorder_no_fbit(
+ struct iscsi_cmd *cmd,
+ struct iscsi_pdu *pdu)
+{
+ int i, send_recovery_r2t = 0, recovery = 0;
+ u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *first_pdu = NULL;
+
+ /*
+ * Get an struct iscsi_pdu pointer to the first PDU, and total PDU count
+ * of the DataOUT sequence.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ for (i = 0; i < cmd->pdu_count; i++) {
+ if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
+ if (!first_pdu)
+ first_pdu = &cmd->pdu_list[i];
+ xfer_len += cmd->pdu_list[i].length;
+ pdu_count++;
+ } else if (pdu_count)
+ break;
+ }
+ } else {
+ struct iscsi_seq *seq = cmd->seq_ptr;
+
+ first_pdu = &cmd->pdu_list[seq->pdu_start];
+ pdu_count = seq->pdu_count;
+ }
+
+ if (!first_pdu || !pdu_count)
+ return DATAOUT_CANNOT_RECOVER;
+
+ /*
+ * Loop through the ending DataOUT Sequence checking each struct iscsi_pdu.
+ * The following ugly logic does batching of not received PDUs.
+ */
+ for (i = 0; i < pdu_count; i++) {
+ if (first_pdu[i].status == ISCSI_PDU_RECEIVED_OK) {
+ if (!send_recovery_r2t)
+ continue;
+
+ if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ send_recovery_r2t = length = offset = 0;
+ continue;
+ }
+ /*
+ * Set recovery = 1 for any missing, CRC failed, or timed
+ * out PDUs to let the DataOUT logic know that this sequence
+ * has not been completed yet.
+ *
+ * Also, only send a Recovery R2T for ISCSI_PDU_NOT_RECEIVED.
+ * We assume if the PDU either failed CRC or timed out
+ * that a Recovery R2T has already been sent.
+ */
+ recovery = 1;
+
+ if (first_pdu[i].status != ISCSI_PDU_NOT_RECEIVED)
+ continue;
+
+ if (!offset)
+ offset = first_pdu[i].offset;
+ length += first_pdu[i].length;
+
+ send_recovery_r2t = 1;
+ }
+
+ if (send_recovery_r2t)
+ if (iscsit_send_recovery_r2t(cmd, offset, length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ return (!recovery) ? DATAOUT_NORMAL : DATAOUT_WITHIN_COMMAND_RECOVERY;
+}
+
+static int iscsit_recalculate_dataout_values(
+ struct iscsi_cmd *cmd,
+ u32 pdu_offset,
+ u32 pdu_length,
+ u32 *r2t_offset,
+ u32 *r2t_length)
+{
+ int i;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = NULL;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ cmd->data_sn = 0;
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ *r2t_offset = cmd->write_data_done;
+ *r2t_length = (cmd->seq_end_offset -
+ cmd->write_data_done);
+ return 0;
+ }
+
+ *r2t_offset = cmd->seq_start_offset;
+ *r2t_length = (cmd->seq_end_offset - cmd->seq_start_offset);
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ if ((pdu->offset >= cmd->seq_start_offset) &&
+ ((pdu->offset + pdu->length) <=
+ cmd->seq_end_offset)) {
+ if (!cmd->unsolicited_data)
+ cmd->next_burst_len -= pdu->length;
+ else
+ cmd->first_burst_len -= pdu->length;
+
+ cmd->write_data_done -= pdu->length;
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+ } else {
+ struct iscsi_seq *seq = NULL;
+
+ seq = iscsit_get_seq_holder(cmd, pdu_offset, pdu_length);
+ if (!seq)
+ return -1;
+
+ *r2t_offset = seq->orig_offset;
+ *r2t_length = seq->xfer_len;
+
+ cmd->write_data_done -= (seq->offset - seq->orig_offset);
+ if (cmd->immediate_data)
+ cmd->first_burst_len = cmd->write_data_done;
+
+ seq->data_sn = 0;
+ seq->offset = seq->orig_offset;
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ return 0;
+
+ for (i = 0; i < seq->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i+seq->pdu_start];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+
+ return 0;
+}
+
+int iscsit_recover_dataout_sequence(
+ struct iscsi_cmd *cmd,
+ u32 pdu_offset,
+ u32 pdu_length)
+{
+ u32 r2t_length = 0, r2t_offset = 0;
+
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
+ &r2t_offset, &r2t_length) < 0)
+ return DATAOUT_CANNOT_RECOVER;
+
+ iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length);
+
+ return DATAOUT_WITHIN_COMMAND_RECOVERY;
+}
+
+static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
+{
+ struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL;
+
+ ooo_cmdsn = kmem_cache_zalloc(lio_ooo_cache, GFP_ATOMIC);
+ if (!ooo_cmdsn) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_ooo_cmdsn.\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&ooo_cmdsn->ooo_list);
+
+ return ooo_cmdsn;
+}
+
+/*
+ * Called with sess->cmdsn_mutex held.
+ */
+static int iscsit_attach_ooo_cmdsn(
+ struct iscsi_session *sess,
+ struct iscsi_ooo_cmdsn *ooo_cmdsn)
+{
+ struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
+ /*
+ * We attach the struct iscsi_ooo_cmdsn entry to the out of order
+ * list in increasing CmdSN order.
+ * This allows iscsi_execute_ooo_cmdsns() to detect any
+ * additional CmdSN holes while performing delayed execution.
+ */
+ if (list_empty(&sess->sess_ooo_cmdsn_list))
+ list_add_tail(&ooo_cmdsn->ooo_list,
+ &sess->sess_ooo_cmdsn_list);
+ else {
+ ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
+ typeof(*ooo_tail), ooo_list);
+ /*
+ * CmdSN is greater than the tail of the list.
+ */
+ if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn)
+ list_add_tail(&ooo_cmdsn->ooo_list,
+ &sess->sess_ooo_cmdsn_list);
+ else {
+ /*
+ * CmdSN is either lower than the head, or somewhere
+ * in the middle.
+ */
+ list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
+ ooo_list) {
+ while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
+ continue;
+
+ list_add(&ooo_cmdsn->ooo_list,
+ &ooo_tmp->ooo_list);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Removes an struct iscsi_ooo_cmdsn from a session's list,
+ * called with struct iscsi_session->cmdsn_mutex held.
+ */
+void iscsit_remove_ooo_cmdsn(
+ struct iscsi_session *sess,
+ struct iscsi_ooo_cmdsn *ooo_cmdsn)
+{
+ list_del(&ooo_cmdsn->ooo_list);
+ kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+}
+
+void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_ooo_cmdsn *ooo_cmdsn;
+ struct iscsi_session *sess = conn->sess;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) {
+ if (ooo_cmdsn->cid != conn->cid)
+ continue;
+
+ ooo_cmdsn->cmd = NULL;
+ }
+ mutex_unlock(&sess->cmdsn_mutex);
+}
+
+/*
+ * Called with sess->cmdsn_mutex held.
+ */
+int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
+{
+ int ooo_count = 0;
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+
+ list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+ &sess->sess_ooo_cmdsn_list, ooo_list) {
+ if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
+ continue;
+
+ if (!ooo_cmdsn->cmd) {
+ sess->exp_cmd_sn++;
+ iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+ continue;
+ }
+
+ cmd = ooo_cmdsn->cmd;
+ cmd->i_state = cmd->deferred_i_state;
+ ooo_count++;
+ sess->exp_cmd_sn++;
+ pr_debug("Executing out of order CmdSN: 0x%08x,"
+ " incremented ExpCmdSN to 0x%08x.\n",
+ cmd->cmd_sn, sess->exp_cmd_sn);
+
+ iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+
+ if (iscsit_execute_cmd(cmd, 1) < 0)
+ return -1;
+
+ continue;
+ }
+
+ return ooo_count;
+}
+
+/*
+ * Called either:
+ *
+ * 1. With sess->cmdsn_mutex held from iscsi_execute_ooo_cmdsns()
+ * or iscsi_check_received_cmdsn().
+ * 2. With no locks held directly from iscsi_handle_XXX_pdu() functions
+ * for immediate commands.
+ */
+int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ int lr = 0;
+
+ spin_lock_bh(&cmd->istate_lock);
+ if (ooo)
+ cmd->cmd_flags &= ~ICF_OOO_CMDSN;
+
+ switch (cmd->iscsi_opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ /*
+ * Go ahead and send the CHECK_CONDITION status for
+ * any SCSI CDB exceptions that may have occurred, also
+ * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
+ */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+ if (se_cmd->se_cmd_flags &
+ SCF_SCSI_RESERVATION_CONFLICT) {
+ cmd->i_state = ISTATE_SEND_STATUS;
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
+ cmd->i_state);
+ return 0;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+ /*
+ * Determine if delayed TASK_ABORTED status for WRITEs
+ * should be sent now if no unsolicited data out
+ * payloads are expected, or if the delayed status
+ * should be sent after unsolicited data out with
+ * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
+ */
+ if (transport_check_aborted_status(se_cmd,
+ (cmd->unsolicited_data == 0)) != 0)
+ return 0;
+ /*
+ * Otherwise send CHECK_CONDITION and sense for
+ * exception
+ */
+ return transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->scsi_sense_reason, 0);
+ }
+ /*
+ * Special case for delayed CmdSN with Immediate
+ * Data and/or Unsolicited Data Out attached.
+ */
+ if (cmd->immediate_data) {
+ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+ spin_unlock_bh(&cmd->istate_lock);
+ return transport_generic_handle_data(
+ &cmd->se_cmd);
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if (!(cmd->cmd_flags &
+ ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
+ /*
+ * Send the delayed TASK_ABORTED status for
+ * WRITEs if no more unsolicitied data is
+ * expected.
+ */
+ if (transport_check_aborted_status(se_cmd, 1)
+ != 0)
+ return 0;
+
+ iscsit_set_dataout_sequence_values(cmd);
+ iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0);
+ }
+ return 0;
+ }
+ /*
+ * The default handler.
+ */
+ spin_unlock_bh(&cmd->istate_lock);
+
+ if ((cmd->data_direction == DMA_TO_DEVICE) &&
+ !(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
+ /*
+ * Send the delayed TASK_ABORTED status for WRITEs if
+ * no more nsolicitied data is expected.
+ */
+ if (transport_check_aborted_status(se_cmd, 1) != 0)
+ return 0;
+
+ iscsit_set_dataout_sequence_values(cmd);
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ iscsit_start_dataout_timer(cmd, cmd->conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ }
+ return transport_handle_cdb_direct(&cmd->se_cmd);
+
+ case ISCSI_OP_NOOP_OUT:
+ case ISCSI_OP_TEXT:
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+ spin_unlock_bh(&cmd->istate_lock);
+ iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
+ cmd->i_state);
+ return 0;
+ }
+ spin_unlock_bh(&cmd->istate_lock);
+
+ return transport_generic_handle_tmr(&cmd->se_cmd);
+ case ISCSI_OP_LOGOUT:
+ spin_unlock_bh(&cmd->istate_lock);
+ switch (cmd->logout_reason) {
+ case ISCSI_LOGOUT_REASON_CLOSE_SESSION:
+ lr = iscsit_logout_closesession(cmd, cmd->conn);
+ break;
+ case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
+ lr = iscsit_logout_closeconnection(cmd, cmd->conn);
+ break;
+ case ISCSI_LOGOUT_REASON_RECOVERY:
+ lr = iscsit_logout_removeconnforrecovery(cmd, cmd->conn);
+ break;
+ default:
+ pr_err("Unknown iSCSI Logout Request Code:"
+ " 0x%02x\n", cmd->logout_reason);
+ return -1;
+ }
+
+ return lr;
+ default:
+ spin_unlock_bh(&cmd->istate_lock);
+ pr_err("Cannot perform out of order execution for"
+ " unknown iSCSI Opcode: 0x%02x\n", cmd->iscsi_opcode);
+ return -1;
+ }
+
+ return 0;
+}
+
+void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
+{
+ struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+ &sess->sess_ooo_cmdsn_list, ooo_list) {
+
+ list_del(&ooo_cmdsn->ooo_list);
+ kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+ }
+ mutex_unlock(&sess->cmdsn_mutex);
+}
+
+int iscsit_handle_ooo_cmdsn(
+ struct iscsi_session *sess,
+ struct iscsi_cmd *cmd,
+ u32 cmdsn)
+{
+ int batch = 0;
+ struct iscsi_ooo_cmdsn *ooo_cmdsn = NULL, *ooo_tail = NULL;
+
+ cmd->deferred_i_state = cmd->i_state;
+ cmd->i_state = ISTATE_DEFERRED_CMD;
+ cmd->cmd_flags |= ICF_OOO_CMDSN;
+
+ if (list_empty(&sess->sess_ooo_cmdsn_list))
+ batch = 1;
+ else {
+ ooo_tail = list_entry(sess->sess_ooo_cmdsn_list.prev,
+ typeof(*ooo_tail), ooo_list);
+ if (ooo_tail->cmdsn != (cmdsn - 1))
+ batch = 1;
+ }
+
+ ooo_cmdsn = iscsit_allocate_ooo_cmdsn();
+ if (!ooo_cmdsn)
+ return CMDSN_ERROR_CANNOT_RECOVER;
+
+ ooo_cmdsn->cmd = cmd;
+ ooo_cmdsn->batch_count = (batch) ?
+ (cmdsn - sess->exp_cmd_sn) : 1;
+ ooo_cmdsn->cid = cmd->conn->cid;
+ ooo_cmdsn->exp_cmdsn = sess->exp_cmd_sn;
+ ooo_cmdsn->cmdsn = cmdsn;
+
+ if (iscsit_attach_ooo_cmdsn(sess, ooo_cmdsn) < 0) {
+ kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
+ return CMDSN_ERROR_CANNOT_RECOVER;
+ }
+
+ return CMDSN_HIGHER_THAN_EXP;
+}
+
+static int iscsit_set_dataout_timeout_values(
+ struct iscsi_cmd *cmd,
+ u32 *offset,
+ u32 *length)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_r2t *r2t;
+
+ if (cmd->unsolicited_data) {
+ *offset = 0;
+ *length = (conn->sess->sess_ops->FirstBurstLength >
+ cmd->data_length) ?
+ cmd->data_length :
+ conn->sess->sess_ops->FirstBurstLength;
+ return 0;
+ }
+
+ spin_lock_bh(&cmd->r2t_lock);
+ if (list_empty(&cmd->cmd_r2t_list)) {
+ pr_err("cmd->cmd_r2t_list is empty!\n");
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if (r2t->sent_r2t && !r2t->recovery_r2t && !r2t->seq_complete) {
+ *offset = r2t->offset;
+ *length = r2t->xfer_len;
+ spin_unlock_bh(&cmd->r2t_lock);
+ return 0;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ pr_err("Unable to locate any incomplete DataOUT"
+ " sequences for ITT: 0x%08x.\n", cmd->init_task_tag);
+
+ return -1;
+}
+
+/*
+ * NOTE: Called from interrupt (timer) context.
+ */
+static void iscsit_handle_dataout_timeout(unsigned long data)
+{
+ u32 pdu_length = 0, pdu_offset = 0;
+ u32 r2t_length = 0, r2t_offset = 0;
+ struct iscsi_cmd *cmd = (struct iscsi_cmd *) data;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_session *sess = NULL;
+ struct iscsi_node_attrib *na;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ if (cmd->dataout_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_dec_conn_usage_count(conn);
+ return;
+ }
+ cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
+ sess = conn->sess;
+ na = iscsit_tpg_get_node_attrib(sess);
+
+ if (!sess->sess_ops->ErrorRecoveryLevel) {
+ pr_debug("Unable to recover from DataOut timeout while"
+ " in ERL=0.\n");
+ goto failure;
+ }
+
+ if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
+ pr_debug("Command ITT: 0x%08x exceeded max retries"
+ " for DataOUT timeout %u, closing iSCSI connection.\n",
+ cmd->init_task_tag, na->dataout_timeout_retries);
+ goto failure;
+ }
+
+ cmd->cmd_flags |= ICF_WITHIN_COMMAND_RECOVERY;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ pdu_offset = cmd->write_data_done;
+ if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len)) > cmd->data_length)
+ pdu_length = (cmd->data_length -
+ cmd->write_data_done);
+ else
+ pdu_length = (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len);
+ } else {
+ pdu_offset = cmd->seq_start_offset;
+ pdu_length = (cmd->seq_end_offset -
+ cmd->seq_start_offset);
+ }
+ } else {
+ if (iscsit_set_dataout_timeout_values(cmd, &pdu_offset,
+ &pdu_length) < 0)
+ goto failure;
+ }
+
+ if (iscsit_recalculate_dataout_values(cmd, pdu_offset, pdu_length,
+ &r2t_offset, &r2t_length) < 0)
+ goto failure;
+
+ pr_debug("Command ITT: 0x%08x timed out waiting for"
+ " completion of %sDataOUT Sequence Offset: %u, Length: %u\n",
+ cmd->init_task_tag, (cmd->unsolicited_data) ? "Unsolicited " :
+ "", r2t_offset, r2t_length);
+
+ if (iscsit_send_recovery_r2t(cmd, r2t_offset, r2t_length) < 0)
+ goto failure;
+
+ iscsit_start_dataout_timer(cmd, conn);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_dec_conn_usage_count(conn);
+
+ return;
+
+failure:
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_cause_connection_reinstatement(conn, 0);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ return;
+ }
+
+ mod_timer(&cmd->dataout_timer,
+ (get_jiffies_64() + na->dataout_timeout * HZ));
+ pr_debug("Updated DataOUT timer for ITT: 0x%08x",
+ cmd->init_task_tag);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
+
+/*
+ * Called with cmd->dataout_timeout_lock held.
+ */
+void iscsit_start_dataout_timer(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
+
+ if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
+ return;
+
+ pr_debug("Starting DataOUT timer for ITT: 0x%08x on"
+ " CID: %hu.\n", cmd->init_task_tag, conn->cid);
+
+ init_timer(&cmd->dataout_timer);
+ cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ);
+ cmd->dataout_timer.data = (unsigned long)cmd;
+ cmd->dataout_timer.function = iscsit_handle_dataout_timeout;
+ cmd->dataout_timer_flags &= ~ISCSI_TF_STOP;
+ cmd->dataout_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&cmd->dataout_timer);
+}
+
+void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
+{
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+ return;
+ }
+ cmd->dataout_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+
+ del_timer_sync(&cmd->dataout_timer);
+
+ spin_lock_bh(&cmd->dataout_timeout_lock);
+ cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
+ pr_debug("Stopped DataOUT Timer for ITT: 0x%08x\n",
+ cmd->init_task_tag);
+ spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
new file mode 100644
index 00000000000..85e67e29de6
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -0,0 +1,26 @@
+#ifndef ISCSI_TARGET_ERL1_H
+#define ISCSI_TARGET_ERL1_H
+
+extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
+extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
+ struct iscsi_cmd *, struct iscsi_datain_req *);
+extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
+ struct iscsi_cmd *, struct iscsi_datain_req *);
+extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
+ u32, u32, u32, u32);
+extern int iscsit_handle_status_snack(struct iscsi_conn *, u32, u32,
+ u32, u32);
+extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
+extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
+extern int iscsit_recover_dataout_sequence(struct iscsi_cmd *, u32, u32);
+extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *);
+extern void iscsit_free_all_ooo_cmdsns(struct iscsi_session *);
+extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *);
+extern int iscsit_execute_cmd(struct iscsi_cmd *, int);
+extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32);
+extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *);
+extern void iscsit_mod_dataout_timer(struct iscsi_cmd *);
+extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *);
+extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+
+#endif /* ISCSI_TARGET_ERL1_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
new file mode 100644
index 00000000000..91a4d170bda
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -0,0 +1,474 @@
+/*******************************************************************************
+ * This file contains error recovery level two functions used by
+ * the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target.h"
+
+/*
+ * FIXME: Does RData SNACK apply here as well?
+ */
+void iscsit_create_conn_recovery_datain_values(
+ struct iscsi_cmd *cmd,
+ u32 exp_data_sn)
+{
+ u32 data_sn = 0;
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->next_burst_len = 0;
+ cmd->read_data_done = 0;
+
+ while (exp_data_sn > data_sn) {
+ if ((cmd->next_burst_len +
+ conn->conn_ops->MaxRecvDataSegmentLength) <
+ conn->sess->sess_ops->MaxBurstLength) {
+ cmd->read_data_done +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ cmd->next_burst_len +=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ } else {
+ cmd->read_data_done +=
+ (conn->sess->sess_ops->MaxBurstLength -
+ cmd->next_burst_len);
+ cmd->next_burst_len = 0;
+ }
+ data_sn++;
+ }
+}
+
+void iscsit_create_conn_recovery_dataout_values(
+ struct iscsi_cmd *cmd)
+{
+ u32 write_data_done = 0;
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->data_sn = 0;
+ cmd->next_burst_len = 0;
+
+ while (cmd->write_data_done > write_data_done) {
+ if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
+ cmd->write_data_done)
+ write_data_done += conn->sess->sess_ops->MaxBurstLength;
+ else
+ break;
+ }
+
+ cmd->write_data_done = write_data_done;
+}
+
+static int iscsit_attach_active_connection_recovery_entry(
+ struct iscsi_session *sess,
+ struct iscsi_conn_recovery *cr)
+{
+ spin_lock(&sess->cr_a_lock);
+ list_add_tail(&cr->cr_list, &sess->cr_active_list);
+ spin_unlock(&sess->cr_a_lock);
+
+ return 0;
+}
+
+static int iscsit_attach_inactive_connection_recovery_entry(
+ struct iscsi_session *sess,
+ struct iscsi_conn_recovery *cr)
+{
+ spin_lock(&sess->cr_i_lock);
+ list_add_tail(&cr->cr_list, &sess->cr_inactive_list);
+
+ sess->conn_recovery_count++;
+ pr_debug("Incremented connection recovery count to %u for"
+ " SID: %u\n", sess->conn_recovery_count, sess->sid);
+ spin_unlock(&sess->cr_i_lock);
+
+ return 0;
+}
+
+struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
+ struct iscsi_session *sess,
+ u16 cid)
+{
+ struct iscsi_conn_recovery *cr;
+
+ spin_lock(&sess->cr_i_lock);
+ list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
+ if (cr->cid == cid) {
+ spin_unlock(&sess->cr_i_lock);
+ return cr;
+ }
+ }
+ spin_unlock(&sess->cr_i_lock);
+
+ return NULL;
+}
+
+void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+{
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_conn_recovery *cr, *cr_tmp;
+
+ spin_lock(&sess->cr_a_lock);
+ list_for_each_entry_safe(cr, cr_tmp, &sess->cr_active_list, cr_list) {
+ list_del(&cr->cr_list);
+ spin_unlock(&sess->cr_a_lock);
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp,
+ &cr->conn_recovery_cmd_list, i_list) {
+
+ list_del(&cmd->i_list);
+ cmd->conn = NULL;
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_lock(&sess->cr_a_lock);
+
+ kfree(cr);
+ }
+ spin_unlock(&sess->cr_a_lock);
+
+ spin_lock(&sess->cr_i_lock);
+ list_for_each_entry_safe(cr, cr_tmp, &sess->cr_inactive_list, cr_list) {
+ list_del(&cr->cr_list);
+ spin_unlock(&sess->cr_i_lock);
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp,
+ &cr->conn_recovery_cmd_list, i_list) {
+
+ list_del(&cmd->i_list);
+ cmd->conn = NULL;
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_lock(&sess->cr_i_lock);
+
+ kfree(cr);
+ }
+ spin_unlock(&sess->cr_i_lock);
+}
+
+int iscsit_remove_active_connection_recovery_entry(
+ struct iscsi_conn_recovery *cr,
+ struct iscsi_session *sess)
+{
+ spin_lock(&sess->cr_a_lock);
+ list_del(&cr->cr_list);
+
+ sess->conn_recovery_count--;
+ pr_debug("Decremented connection recovery count to %u for"
+ " SID: %u\n", sess->conn_recovery_count, sess->sid);
+ spin_unlock(&sess->cr_a_lock);
+
+ kfree(cr);
+
+ return 0;
+}
+
+int iscsit_remove_inactive_connection_recovery_entry(
+ struct iscsi_conn_recovery *cr,
+ struct iscsi_session *sess)
+{
+ spin_lock(&sess->cr_i_lock);
+ list_del(&cr->cr_list);
+ spin_unlock(&sess->cr_i_lock);
+
+ return 0;
+}
+
+/*
+ * Called with cr->conn_recovery_cmd_lock help.
+ */
+int iscsit_remove_cmd_from_connection_recovery(
+ struct iscsi_cmd *cmd,
+ struct iscsi_session *sess)
+{
+ struct iscsi_conn_recovery *cr;
+
+ if (!cmd->cr) {
+ pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+ " is NULL!\n", cmd->init_task_tag);
+ BUG();
+ }
+ cr = cmd->cr;
+
+ list_del(&cmd->i_list);
+ return --cr->cmd_count;
+}
+
+void iscsit_discard_cr_cmds_by_expstatsn(
+ struct iscsi_conn_recovery *cr,
+ u32 exp_statsn)
+{
+ u32 dropped_count = 0;
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_session *sess = cr->sess;
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp,
+ &cr->conn_recovery_cmd_list, i_list) {
+
+ if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
+ (cmd->deferred_i_state != ISTATE_REMOVE)) ||
+ (cmd->stat_sn >= exp_statsn)) {
+ continue;
+ }
+
+ dropped_count++;
+ pr_debug("Dropping Acknowledged ITT: 0x%08x, StatSN:"
+ " 0x%08x, CID: %hu.\n", cmd->init_task_tag,
+ cmd->stat_sn, cr->cid);
+
+ iscsit_remove_cmd_from_connection_recovery(cmd, sess);
+
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 0);
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+
+ pr_debug("Dropped %u total acknowledged commands on"
+ " CID: %hu less than old ExpStatSN: 0x%08x\n",
+ dropped_count, cr->cid, exp_statsn);
+
+ if (!cr->cmd_count) {
+ pr_debug("No commands to be reassigned for failed"
+ " connection CID: %hu on SID: %u\n",
+ cr->cid, sess->sid);
+ iscsit_remove_inactive_connection_recovery_entry(cr, sess);
+ iscsit_attach_active_connection_recovery_entry(sess, cr);
+ pr_debug("iSCSI connection recovery successful for CID:"
+ " %hu on SID: %u\n", cr->cid, sess->sid);
+ iscsit_remove_active_connection_recovery_entry(cr, sess);
+ } else {
+ iscsit_remove_inactive_connection_recovery_entry(cr, sess);
+ iscsit_attach_active_connection_recovery_entry(sess, cr);
+ }
+}
+
+int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+{
+ u32 dropped_count = 0;
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+ struct iscsi_session *sess = conn->sess;
+
+ mutex_lock(&sess->cmdsn_mutex);
+ list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
+ &sess->sess_ooo_cmdsn_list, ooo_list) {
+
+ if (ooo_cmdsn->cid != conn->cid)
+ continue;
+
+ dropped_count++;
+ pr_debug("Dropping unacknowledged CmdSN:"
+ " 0x%08x during connection recovery on CID: %hu\n",
+ ooo_cmdsn->cmdsn, conn->cid);
+ iscsit_remove_ooo_cmdsn(sess, ooo_cmdsn);
+ }
+ mutex_unlock(&sess->cmdsn_mutex);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+ if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
+ continue;
+
+ list_del(&cmd->i_list);
+
+ spin_unlock_bh(&conn->cmd_lock);
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock_bh(&conn->cmd_lock);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_debug("Dropped %u total unacknowledged commands on CID:"
+ " %hu for ExpCmdSN: 0x%08x.\n", dropped_count, conn->cid,
+ sess->exp_cmd_sn);
+ return 0;
+}
+
+int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+{
+ u32 cmd_count = 0;
+ struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsi_conn_recovery *cr;
+
+ /*
+ * Allocate an struct iscsi_conn_recovery for this connection.
+ * Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer
+ * (struct iscsi_cmd->cr) so we need to allocate this before preparing the
+ * connection's command list for connection recovery.
+ */
+ cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
+ if (!cr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_conn_recovery.\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&cr->cr_list);
+ INIT_LIST_HEAD(&cr->conn_recovery_cmd_list);
+ spin_lock_init(&cr->conn_recovery_cmd_lock);
+ /*
+ * Only perform connection recovery on ISCSI_OP_SCSI_CMD or
+ * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
+ * list_del(&cmd->i_list); to release the command to the
+ * session pool and remove it from the connection's list.
+ *
+ * Also stop the DataOUT timer, which will be restarted after
+ * sending the TMR response.
+ */
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
+
+ if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
+ (cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
+ pr_debug("Not performing realligence on"
+ " Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
+ " CID: %hu\n", cmd->iscsi_opcode,
+ cmd->init_task_tag, cmd->cmd_sn, conn->cid);
+
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 0);
+ spin_lock_bh(&conn->cmd_lock);
+ continue;
+ }
+
+ /*
+ * Special case where commands greater than or equal to
+ * the session's ExpCmdSN are attached to the connection
+ * list but not to the out of order CmdSN list. The one
+ * obvious case is when a command with immediate data
+ * attached must only check the CmdSN against ExpCmdSN
+ * after the data is received. The special case below
+ * is when the connection fails before data is received,
+ * but also may apply to other PDUs, so it has been
+ * made generic here.
+ */
+ if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
+ (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) ||
+ !(cmd->se_cmd.transport_wait_for_tasks))
+ iscsit_release_cmd(cmd);
+ else
+ cmd->se_cmd.transport_wait_for_tasks(
+ &cmd->se_cmd, 1, 1);
+ spin_lock_bh(&conn->cmd_lock);
+ continue;
+ }
+
+ cmd_count++;
+ pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
+ " realligence.\n", cmd->iscsi_opcode,
+ cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
+ conn->cid);
+
+ cmd->deferred_i_state = cmd->i_state;
+ cmd->i_state = ISTATE_IN_CONNECTION_RECOVERY;
+
+ if (cmd->data_direction == DMA_TO_DEVICE)
+ iscsit_stop_dataout_timer(cmd);
+
+ cmd->sess = conn->sess;
+
+ list_del(&cmd->i_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ iscsit_free_all_datain_reqs(cmd);
+
+ if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) &&
+ cmd->se_cmd.transport_wait_for_tasks)
+ cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd,
+ 0, 0);
+ /*
+ * Add the struct iscsi_cmd to the connection recovery cmd list
+ */
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list);
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+
+ spin_lock_bh(&conn->cmd_lock);
+ cmd->cr = cr;
+ cmd->conn = NULL;
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+ /*
+ * Fill in the various values in the preallocated struct iscsi_conn_recovery.
+ */
+ cr->cid = conn->cid;
+ cr->cmd_count = cmd_count;
+ cr->maxrecvdatasegmentlength = conn->conn_ops->MaxRecvDataSegmentLength;
+ cr->sess = conn->sess;
+
+ iscsit_attach_inactive_connection_recovery_entry(conn->sess, cr);
+
+ return 0;
+}
+
+int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn)
+{
+ atomic_set(&conn->connection_recovery, 1);
+
+ if (iscsit_close_connection(conn) < 0)
+ return -1;
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
new file mode 100644
index 00000000000..22f8d24780a
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -0,0 +1,18 @@
+#ifndef ISCSI_TARGET_ERL2_H
+#define ISCSI_TARGET_ERL2_H
+
+extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, u32);
+extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
+extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
+ struct iscsi_session *, u16);
+extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
+extern int iscsit_remove_active_connection_recovery_entry(
+ struct iscsi_conn_recovery *, struct iscsi_session *);
+extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
+ struct iscsi_session *);
+extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32);
+extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *);
+extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *);
+extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_ERL2_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
new file mode 100644
index 00000000000..bcaf82f4703
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -0,0 +1,1232 @@
+/*******************************************************************************
+ * This file contains the login functions used by the iSCSI Target driver.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/string.h>
+#include <linux/kthread.h>
+#include <linux/crypto.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_stat.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_parameters.h"
+
+extern struct idr sess_idr;
+extern struct mutex auth_id_lock;
+extern spinlock_t sess_idr_lock;
+
+static int iscsi_login_init_conn(struct iscsi_conn *conn)
+{
+ INIT_LIST_HEAD(&conn->conn_list);
+ INIT_LIST_HEAD(&conn->conn_cmd_list);
+ INIT_LIST_HEAD(&conn->immed_queue_list);
+ INIT_LIST_HEAD(&conn->response_queue_list);
+ init_completion(&conn->conn_post_wait_comp);
+ init_completion(&conn->conn_wait_comp);
+ init_completion(&conn->conn_wait_rcfr_comp);
+ init_completion(&conn->conn_waiting_on_uc_comp);
+ init_completion(&conn->conn_logout_comp);
+ init_completion(&conn->rx_half_close_comp);
+ init_completion(&conn->tx_half_close_comp);
+ spin_lock_init(&conn->cmd_lock);
+ spin_lock_init(&conn->conn_usage_lock);
+ spin_lock_init(&conn->immed_queue_lock);
+ spin_lock_init(&conn->nopin_timer_lock);
+ spin_lock_init(&conn->response_queue_lock);
+ spin_lock_init(&conn->state_lock);
+
+ if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate conn->conn_cpumask\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
+ * per struct iscsi_conn libcrypto contexts for crc32c and crc32-intel
+ */
+int iscsi_login_setup_crypto(struct iscsi_conn *conn)
+{
+ /*
+ * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
+ * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
+ * to software 1x8 byte slicing from crc32c.ko
+ */
+ conn->conn_rx_hash.flags = 0;
+ conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(conn->conn_rx_hash.tfm)) {
+ pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
+ return -ENOMEM;
+ }
+
+ conn->conn_tx_hash.flags = 0;
+ conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(conn->conn_tx_hash.tfm)) {
+ pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
+ crypto_free_hash(conn->conn_rx_hash.tfm);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int iscsi_login_check_initiator_version(
+ struct iscsi_conn *conn,
+ u8 version_max,
+ u8 version_min)
+{
+ if ((version_max != 0x00) || (version_min != 0x00)) {
+ pr_err("Unsupported iSCSI IETF Pre-RFC Revision,"
+ " version Min/Max 0x%02x/0x%02x, rejecting login.\n",
+ version_min, version_max);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_NO_VERSION);
+ return -1;
+ }
+
+ return 0;
+}
+
+int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+{
+ int sessiontype;
+ struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL;
+ struct iscsi_portal_group *tpg = conn->tpg;
+ struct iscsi_session *sess = NULL, *sess_p = NULL;
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct se_session *se_sess, *se_sess_tmp;
+
+ initiatorname_param = iscsi_find_param_from_key(
+ INITIATORNAME, conn->param_list);
+ if (!initiatorname_param)
+ return -1;
+
+ sessiontype_param = iscsi_find_param_from_key(
+ SESSIONTYPE, conn->param_list);
+ if (!sessiontype_param)
+ return -1;
+
+ sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+ sess_list) {
+
+ sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ spin_lock(&sess_p->conn_lock);
+ if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+ atomic_read(&sess_p->session_logout) ||
+ (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ spin_unlock(&sess_p->conn_lock);
+ continue;
+ }
+ if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) &&
+ (!strcmp((void *)sess_p->sess_ops->InitiatorName,
+ (void *)initiatorname_param->value) &&
+ (sess_p->sess_ops->SessionType == sessiontype))) {
+ atomic_set(&sess_p->session_reinstatement, 1);
+ spin_unlock(&sess_p->conn_lock);
+ iscsit_inc_session_usage_count(sess_p);
+ iscsit_stop_time2retain_timer(sess_p);
+ sess = sess_p;
+ break;
+ }
+ spin_unlock(&sess_p->conn_lock);
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+ /*
+ * If the Time2Retain handler has expired, the session is already gone.
+ */
+ if (!sess)
+ return 0;
+
+ pr_debug("%s iSCSI Session SID %u is still active for %s,"
+ " preforming session reinstatement.\n", (sessiontype) ?
+ "Discovery" : "Normal", sess->sid,
+ sess->sess_ops->InitiatorName);
+
+ spin_lock_bh(&sess->conn_lock);
+ if (sess->session_state == TARG_SESS_STATE_FAILED) {
+ spin_unlock_bh(&sess->conn_lock);
+ iscsit_dec_session_usage_count(sess);
+ return iscsit_close_session(sess);
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
+
+ return iscsit_close_session(sess);
+}
+
+static void iscsi_login_set_conn_values(
+ struct iscsi_session *sess,
+ struct iscsi_conn *conn,
+ u16 cid)
+{
+ conn->sess = sess;
+ conn->cid = cid;
+ /*
+ * Generate a random Status sequence number (statsn) for the new
+ * iSCSI connection.
+ */
+ get_random_bytes(&conn->stat_sn, sizeof(u32));
+
+ mutex_lock(&auth_id_lock);
+ conn->auth_id = iscsit_global->auth_id++;
+ mutex_unlock(&auth_id_lock);
+}
+
+/*
+ * This is the leading connection of a new session,
+ * or session reinstatement.
+ */
+static int iscsi_login_zero_tsih_s1(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_session *sess = NULL;
+ struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+ sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
+ if (!sess) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ pr_err("Could not allocate memory for session\n");
+ return -1;
+ }
+
+ iscsi_login_set_conn_values(sess, conn, pdu->cid);
+ sess->init_task_tag = pdu->itt;
+ memcpy((void *)&sess->isid, (void *)pdu->isid, 6);
+ sess->exp_cmd_sn = pdu->cmdsn;
+ INIT_LIST_HEAD(&sess->sess_conn_list);
+ INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
+ INIT_LIST_HEAD(&sess->cr_active_list);
+ INIT_LIST_HEAD(&sess->cr_inactive_list);
+ init_completion(&sess->async_msg_comp);
+ init_completion(&sess->reinstatement_comp);
+ init_completion(&sess->session_wait_comp);
+ init_completion(&sess->session_waiting_on_uc_comp);
+ mutex_init(&sess->cmdsn_mutex);
+ spin_lock_init(&sess->conn_lock);
+ spin_lock_init(&sess->cr_a_lock);
+ spin_lock_init(&sess->cr_i_lock);
+ spin_lock_init(&sess->session_usage_lock);
+ spin_lock_init(&sess->ttt_lock);
+
+ if (!idr_pre_get(&sess_idr, GFP_KERNEL)) {
+ pr_err("idr_pre_get() for sess_idr failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ spin_lock(&sess_idr_lock);
+ idr_get_new(&sess_idr, NULL, &sess->session_index);
+ spin_unlock(&sess_idr_lock);
+
+ sess->creation_time = get_jiffies_64();
+ spin_lock_init(&sess->session_stats_lock);
+ /*
+ * The FFP CmdSN window values will be allocated from the TPG's
+ * Initiator Node's ACL once the login has been successfully completed.
+ */
+ sess->max_cmd_sn = pdu->cmdsn;
+
+ sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
+ if (!sess->sess_ops) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_sess_ops.\n");
+ return -1;
+ }
+
+ sess->se_sess = transport_init_session();
+ if (!sess->se_sess) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_login_zero_tsih_s2(
+ struct iscsi_conn *conn)
+{
+ struct iscsi_node_attrib *na;
+ struct iscsi_session *sess = conn->sess;
+ unsigned char buf[32];
+
+ sess->tpg = conn->tpg;
+
+ /*
+ * Assign a new TPG Session Handle. Note this is protected with
+ * struct iscsi_portal_group->np_login_sem from iscsit_access_np().
+ */
+ sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+ if (!sess->tsih)
+ sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+
+ /*
+ * Create the default params from user defined values..
+ */
+ if (iscsi_copy_param_list(&conn->param_list,
+ ISCSI_TPG_C(conn)->param_list, 1) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ iscsi_set_keys_to_negotiate(0, conn->param_list);
+
+ if (sess->sess_ops->SessionType)
+ return iscsi_set_keys_irrelevant_for_discovery(
+ conn->param_list);
+
+ na = iscsit_tpg_get_node_attrib(sess);
+
+ /*
+ * Need to send TargetPortalGroupTag back in first login response
+ * on any iSCSI connection where the Initiator provides TargetName.
+ * See 5.3.1. Login Phase Start
+ *
+ * In our case, we have already located the struct iscsi_tiqn at this point.
+ */
+ memset(buf, 0, 32);
+ sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ /*
+ * Workaround for Initiators that have broken connection recovery logic.
+ *
+ * "We would really like to get rid of this." Linux-iSCSI.org team
+ */
+ memset(buf, 0, 32);
+ sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Remove PSTATE_NEGOTIATE for the four FIM related keys.
+ * The Initiator node will be able to enable FIM by proposing them itself.
+ */
+int iscsi_login_disable_FIM_keys(
+ struct iscsi_param_list *param_list,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_param *param;
+
+ param = iscsi_find_param_from_key("OFMarker", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " OFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ param = iscsi_find_param_from_key("OFMarkInt", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " IFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ param = iscsi_find_param_from_key("IFMarker", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " IFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ param = iscsi_find_param_from_key("IFMarkInt", param_list);
+ if (!param) {
+ pr_err("iscsi_find_param_from_key() for"
+ " IFMarker failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ param->state &= ~PSTATE_NEGOTIATE;
+
+ return 0;
+}
+
+static int iscsi_login_non_zero_tsih_s1(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+ iscsi_login_set_conn_values(NULL, conn, pdu->cid);
+ return 0;
+}
+
+/*
+ * Add a new connection to an existing session.
+ */
+static int iscsi_login_non_zero_tsih_s2(
+ struct iscsi_conn *conn,
+ unsigned char *buf)
+{
+ struct iscsi_portal_group *tpg = conn->tpg;
+ struct iscsi_session *sess = NULL, *sess_p = NULL;
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct se_session *se_sess, *se_sess_tmp;
+ struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+ sess_list) {
+
+ sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
+ atomic_read(&sess_p->session_logout) ||
+ (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
+ continue;
+ if (!memcmp((const void *)sess_p->isid,
+ (const void *)pdu->isid, 6) &&
+ (sess_p->tsih == pdu->tsih)) {
+ iscsit_inc_session_usage_count(sess_p);
+ iscsit_stop_time2retain_timer(sess_p);
+ sess = sess_p;
+ break;
+ }
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ /*
+ * If the Time2Retain handler has expired, the session is already gone.
+ */
+ if (!sess) {
+ pr_err("Initiator attempting to add a connection to"
+ " a non-existent session, rejecting iSCSI Login.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_NO_SESSION);
+ return -1;
+ }
+
+ /*
+ * Stop the Time2Retain timer if this is a failed session, we restart
+ * the timer if the login is not successful.
+ */
+ spin_lock_bh(&sess->conn_lock);
+ if (sess->session_state == TARG_SESS_STATE_FAILED)
+ atomic_set(&sess->session_continuation, 1);
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsi_login_set_conn_values(sess, conn, pdu->cid);
+
+ if (iscsi_copy_param_list(&conn->param_list,
+ ISCSI_TPG_C(conn)->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ iscsi_set_keys_to_negotiate(0, conn->param_list);
+ /*
+ * Need to send TargetPortalGroupTag back in first login response
+ * on any iSCSI connection where the Initiator provides TargetName.
+ * See 5.3.1. Login Phase Start
+ *
+ * In our case, we have already located the struct iscsi_tiqn at this point.
+ */
+ memset(buf, 0, 32);
+ sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ return iscsi_login_disable_FIM_keys(conn->param_list, conn);
+}
+
+int iscsi_login_post_auth_non_zero_tsih(
+ struct iscsi_conn *conn,
+ u16 cid,
+ u32 exp_statsn)
+{
+ struct iscsi_conn *conn_ptr = NULL;
+ struct iscsi_conn_recovery *cr = NULL;
+ struct iscsi_session *sess = conn->sess;
+
+ /*
+ * By following item 5 in the login table, if we have found
+ * an existing ISID and a valid/existing TSIH and an existing
+ * CID we do connection reinstatement. Currently we dont not
+ * support it so we send back an non-zero status class to the
+ * initiator and release the new connection.
+ */
+ conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
+ if ((conn_ptr)) {
+ pr_err("Connection exists with CID %hu for %s,"
+ " performing connection reinstatement.\n",
+ conn_ptr->cid, sess->sess_ops->InitiatorName);
+
+ iscsit_connection_reinstatement_rcfr(conn_ptr);
+ iscsit_dec_conn_usage_count(conn_ptr);
+ }
+
+ /*
+ * Check for any connection recovery entires containing CID.
+ * We use the original ExpStatSN sent in the first login request
+ * to acknowledge commands for the failed connection.
+ *
+ * Also note that an explict logout may have already been sent,
+ * but the response may not be sent due to additional connection
+ * loss.
+ */
+ if (sess->sess_ops->ErrorRecoveryLevel == 2) {
+ cr = iscsit_get_inactive_connection_recovery_entry(
+ sess, cid);
+ if ((cr)) {
+ pr_debug("Performing implicit logout"
+ " for connection recovery on CID: %hu\n",
+ conn->cid);
+ iscsit_discard_cr_cmds_by_expstatsn(cr, exp_statsn);
+ }
+ }
+
+ /*
+ * Else we follow item 4 from the login table in that we have
+ * found an existing ISID and a valid/existing TSIH and a new
+ * CID we go ahead and continue to add a new connection to the
+ * session.
+ */
+ pr_debug("Adding CID %hu to existing session for %s.\n",
+ cid, sess->sess_ops->InitiatorName);
+
+ if ((atomic_read(&sess->nconn) + 1) > sess->sess_ops->MaxConnections) {
+ pr_err("Adding additional connection to this session"
+ " would exceed MaxConnections %d, login failed.\n",
+ sess->sess_ops->MaxConnections);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_ISID_ERROR);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+
+ if (!sess->sess_ops->SessionType)
+ iscsit_start_nopin_timer(conn);
+}
+
+static int iscsi_post_login_handler(
+ struct iscsi_np *np,
+ struct iscsi_conn *conn,
+ u8 zero_tsih)
+{
+ int stop_timer = 0;
+ struct iscsi_session *sess = conn->sess;
+ struct se_session *se_sess = sess->se_sess;
+ struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ struct iscsi_thread_set *ts;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_SUCCESS,
+ ISCSI_LOGIN_STATUS_ACCEPT);
+
+ pr_debug("Moving to TARG_CONN_STATE_LOGGED_IN.\n");
+ conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
+
+ iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
+ iscsit_set_sync_and_steering_values(conn);
+ /*
+ * SCSI Initiator -> SCSI Target Port Mapping
+ */
+ ts = iscsi_get_thread_set();
+ if (!zero_tsih) {
+ iscsi_set_session_parameters(sess->sess_ops,
+ conn->param_list, 0);
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+
+ spin_lock_bh(&sess->conn_lock);
+ atomic_set(&sess->session_continuation, 0);
+ if (sess->session_state == TARG_SESS_STATE_FAILED) {
+ pr_debug("Moving to"
+ " TARG_SESS_STATE_LOGGED_IN.\n");
+ sess->session_state = TARG_SESS_STATE_LOGGED_IN;
+ stop_timer = 1;
+ }
+
+ pr_debug("iSCSI Login successful on CID: %hu from %s to"
+ " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip,
+ np->np_port, tpg->tpgt);
+
+ list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+ atomic_inc(&sess->nconn);
+ pr_debug("Incremented iSCSI Connection count to %hu"
+ " from node: %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsi_post_login_start_timers(conn);
+ iscsi_activate_thread_set(conn, ts);
+ /*
+ * Determine CPU mask to ensure connection's RX and TX kthreads
+ * are scheduled on the same CPU.
+ */
+ iscsit_thread_get_cpumask(conn);
+ conn->conn_rx_reset_cpumask = 1;
+ conn->conn_tx_reset_cpumask = 1;
+
+ iscsit_dec_conn_usage_count(conn);
+ if (stop_timer) {
+ spin_lock_bh(&se_tpg->session_lock);
+ iscsit_stop_time2retain_timer(sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+ }
+ iscsit_dec_session_usage_count(sess);
+ return 0;
+ }
+
+ iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+
+ iscsit_determine_maxcmdsn(sess);
+
+ spin_lock_bh(&se_tpg->session_lock);
+ __transport_register_session(&sess->tpg->tpg_se_tpg,
+ se_sess->se_node_acl, se_sess, (void *)sess);
+ pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
+ sess->session_state = TARG_SESS_STATE_LOGGED_IN;
+
+ pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
+ conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt);
+
+ spin_lock_bh(&sess->conn_lock);
+ list_add_tail(&conn->conn_list, &sess->sess_conn_list);
+ atomic_inc(&sess->nconn);
+ pr_debug("Incremented iSCSI Connection count to %hu from node:"
+ " %s\n", atomic_read(&sess->nconn),
+ sess->sess_ops->InitiatorName);
+ spin_unlock_bh(&sess->conn_lock);
+
+ sess->sid = tpg->sid++;
+ if (!sess->sid)
+ sess->sid = tpg->sid++;
+ pr_debug("Established iSCSI session from node: %s\n",
+ sess->sess_ops->InitiatorName);
+
+ tpg->nsessions++;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_nsessions++;
+
+ pr_debug("Incremented number of active iSCSI sessions to %u on"
+ " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ iscsi_post_login_start_timers(conn);
+ iscsi_activate_thread_set(conn, ts);
+ /*
+ * Determine CPU mask to ensure connection's RX and TX kthreads
+ * are scheduled on the same CPU.
+ */
+ iscsit_thread_get_cpumask(conn);
+ conn->conn_rx_reset_cpumask = 1;
+ conn->conn_tx_reset_cpumask = 1;
+
+ iscsit_dec_conn_usage_count(conn);
+
+ return 0;
+}
+
+static void iscsi_handle_login_thread_timeout(unsigned long data)
+{
+ struct iscsi_np *np = (struct iscsi_np *) data;
+
+ spin_lock_bh(&np->np_thread_lock);
+ pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
+ np->np_ip, np->np_port);
+
+ if (np->np_login_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return;
+ }
+
+ if (np->np_thread)
+ send_sig(SIGINT, np->np_thread, 1);
+
+ np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&np->np_thread_lock);
+}
+
+static void iscsi_start_login_thread_timer(struct iscsi_np *np)
+{
+ /*
+ * This used the TA_LOGIN_TIMEOUT constant because at this
+ * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
+ */
+ spin_lock_bh(&np->np_thread_lock);
+ init_timer(&np->np_login_timer);
+ np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
+ np->np_login_timer.data = (unsigned long)np;
+ np->np_login_timer.function = iscsi_handle_login_thread_timeout;
+ np->np_login_timer_flags &= ~ISCSI_TF_STOP;
+ np->np_login_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&np->np_login_timer);
+
+ pr_debug("Added timeout timer to iSCSI login request for"
+ " %u seconds.\n", TA_LOGIN_TIMEOUT);
+ spin_unlock_bh(&np->np_thread_lock);
+}
+
+static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
+{
+ spin_lock_bh(&np->np_thread_lock);
+ if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&np->np_thread_lock);
+ return;
+ }
+ np->np_login_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&np->np_thread_lock);
+
+ del_timer_sync(&np->np_login_timer);
+
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&np->np_thread_lock);
+}
+
+int iscsi_target_setup_login_socket(
+ struct iscsi_np *np,
+ struct __kernel_sockaddr_storage *sockaddr)
+{
+ struct socket *sock;
+ int backlog = 5, ret, opt = 0, len;
+
+ switch (np->np_network_transport) {
+ case ISCSI_TCP:
+ np->np_ip_proto = IPPROTO_TCP;
+ np->np_sock_type = SOCK_STREAM;
+ break;
+ case ISCSI_SCTP_TCP:
+ np->np_ip_proto = IPPROTO_SCTP;
+ np->np_sock_type = SOCK_STREAM;
+ break;
+ case ISCSI_SCTP_UDP:
+ np->np_ip_proto = IPPROTO_SCTP;
+ np->np_sock_type = SOCK_SEQPACKET;
+ break;
+ case ISCSI_IWARP_TCP:
+ case ISCSI_IWARP_SCTP:
+ case ISCSI_INFINIBAND:
+ default:
+ pr_err("Unsupported network_transport: %d\n",
+ np->np_network_transport);
+ return -EINVAL;
+ }
+
+ ret = sock_create(sockaddr->ss_family, np->np_sock_type,
+ np->np_ip_proto, &sock);
+ if (ret < 0) {
+ pr_err("sock_create() failed.\n");
+ return ret;
+ }
+ np->np_socket = sock;
+ /*
+ * The SCTP stack needs struct socket->file.
+ */
+ if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+ (np->np_network_transport == ISCSI_SCTP_UDP)) {
+ if (!sock->file) {
+ sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
+ if (!sock->file) {
+ pr_err("Unable to allocate struct"
+ " file for SCTP\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ np->np_flags |= NPF_SCTP_STRUCT_FILE;
+ }
+ }
+ /*
+ * Setup the np->np_sockaddr from the passed sockaddr setup
+ * in iscsi_target_configfs.c code..
+ */
+ memcpy((void *)&np->np_sockaddr, (void *)sockaddr,
+ sizeof(struct __kernel_sockaddr_storage));
+
+ if (sockaddr->ss_family == AF_INET6)
+ len = sizeof(struct sockaddr_in6);
+ else
+ len = sizeof(struct sockaddr_in);
+ /*
+ * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
+ */
+ opt = 1;
+ if (np->np_network_transport == ISCSI_TCP) {
+ ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
+ (char *)&opt, sizeof(opt));
+ if (ret < 0) {
+ pr_err("kernel_setsockopt() for TCP_NODELAY"
+ " failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+ (char *)&opt, sizeof(opt));
+ if (ret < 0) {
+ pr_err("kernel_setsockopt() for SO_REUSEADDR"
+ " failed\n");
+ goto fail;
+ }
+
+ ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
+ if (ret < 0) {
+ pr_err("kernel_bind() failed: %d\n", ret);
+ goto fail;
+ }
+
+ ret = kernel_listen(sock, backlog);
+ if (ret != 0) {
+ pr_err("kernel_listen() failed: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ np->np_socket = NULL;
+ if (sock) {
+ if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+ kfree(sock->file);
+ sock->file = NULL;
+ }
+
+ sock_release(sock);
+ }
+ return ret;
+}
+
+static int __iscsi_target_login_thread(struct iscsi_np *np)
+{
+ u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
+ int err, ret = 0, ip_proto, sock_type, set_sctp_conn_flag, stop;
+ struct iscsi_conn *conn = NULL;
+ struct iscsi_login *login;
+ struct iscsi_portal_group *tpg = NULL;
+ struct socket *new_sock, *sock;
+ struct kvec iov;
+ struct iscsi_login_req *pdu;
+ struct sockaddr_in sock_in;
+ struct sockaddr_in6 sock_in6;
+
+ flush_signals(current);
+ set_sctp_conn_flag = 0;
+ sock = np->np_socket;
+ ip_proto = np->np_ip_proto;
+ sock_type = np->np_sock_type;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ complete(&np->np_restart_comp);
+ } else {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ if (kernel_accept(sock, &new_sock, 0) < 0) {
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ spin_unlock_bh(&np->np_thread_lock);
+ complete(&np->np_restart_comp);
+ /* Get another socket */
+ return 1;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+ goto out;
+ }
+ /*
+ * The SCTP stack needs struct socket->file.
+ */
+ if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+ (np->np_network_transport == ISCSI_SCTP_UDP)) {
+ if (!new_sock->file) {
+ new_sock->file = kzalloc(
+ sizeof(struct file), GFP_KERNEL);
+ if (!new_sock->file) {
+ pr_err("Unable to allocate struct"
+ " file for SCTP\n");
+ sock_release(new_sock);
+ /* Get another socket */
+ return 1;
+ }
+ set_sctp_conn_flag = 1;
+ }
+ }
+
+ iscsi_start_login_thread_timer(np);
+
+ conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ if (!conn) {
+ pr_err("Could not allocate memory for"
+ " new connection\n");
+ if (set_sctp_conn_flag) {
+ kfree(new_sock->file);
+ new_sock->file = NULL;
+ }
+ sock_release(new_sock);
+ /* Get another socket */
+ return 1;
+ }
+
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+ conn->conn_state = TARG_CONN_STATE_FREE;
+ conn->sock = new_sock;
+
+ if (set_sctp_conn_flag)
+ conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
+
+ pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
+ conn->conn_state = TARG_CONN_STATE_XPT_UP;
+
+ /*
+ * Allocate conn->conn_ops early as a failure calling
+ * iscsit_tx_login_rsp() below will call tx_data().
+ */
+ conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+ if (!conn->conn_ops) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_conn_ops.\n");
+ goto new_sess_out;
+ }
+ /*
+ * Perform the remaining iSCSI connection initialization items..
+ */
+ if (iscsi_login_init_conn(conn) < 0)
+ goto new_sess_out;
+
+ memset(buffer, 0, ISCSI_HDR_LEN);
+ memset(&iov, 0, sizeof(struct kvec));
+ iov.iov_base = buffer;
+ iov.iov_len = ISCSI_HDR_LEN;
+
+ if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) {
+ pr_err("rx_data() returned an error.\n");
+ goto new_sess_out;
+ }
+
+ iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK);
+ if (!(iscsi_opcode & ISCSI_OP_LOGIN)) {
+ pr_err("First opcode is not login request,"
+ " failing login request.\n");
+ goto new_sess_out;
+ }
+
+ pdu = (struct iscsi_login_req *) buffer;
+ pdu->cid = be16_to_cpu(pdu->cid);
+ pdu->tsih = be16_to_cpu(pdu->tsih);
+ pdu->itt = be32_to_cpu(pdu->itt);
+ pdu->cmdsn = be32_to_cpu(pdu->cmdsn);
+ pdu->exp_statsn = be32_to_cpu(pdu->exp_statsn);
+ /*
+ * Used by iscsit_tx_login_rsp() for Login Resonses PDUs
+ * when Status-Class != 0.
+ */
+ conn->login_itt = pdu->itt;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+ spin_unlock_bh(&np->np_thread_lock);
+ pr_err("iSCSI Network Portal on %s:%hu currently not"
+ " active.\n", np->np_ip, np->np_port);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ goto new_sess_out;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ if (np->np_sockaddr.ss_family == AF_INET6) {
+ memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
+
+ if (conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in6, &err, 1) < 0) {
+ pr_err("sock_ops->getname() failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ goto new_sess_out;
+ }
+#if 0
+ if (!iscsi_ntop6((const unsigned char *)
+ &sock_in6.sin6_addr.in6_u,
+ (char *)&conn->ipv6_login_ip[0],
+ IPV6_ADDRESS_SPACE)) {
+ pr_err("iscsi_ntop6() failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ goto new_sess_out;
+ }
+#else
+ pr_debug("Skipping iscsi_ntop6()\n");
+#endif
+ } else {
+ memset(&sock_in, 0, sizeof(struct sockaddr_in));
+
+ if (conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in, &err, 1) < 0) {
+ pr_err("sock_ops->getname() failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ goto new_sess_out;
+ }
+ sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
+ conn->login_port = ntohs(sock_in.sin_port);
+ }
+
+ conn->network_transport = np->np_network_transport;
+
+ pr_debug("Received iSCSI login request from %s on %s Network"
+ " Portal %s:%hu\n", conn->login_ip,
+ (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
+ np->np_ip, np->np_port);
+
+ pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
+ conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
+
+ if (iscsi_login_check_initiator_version(conn, pdu->max_version,
+ pdu->min_version) < 0)
+ goto new_sess_out;
+
+ zero_tsih = (pdu->tsih == 0x0000);
+ if ((zero_tsih)) {
+ /*
+ * This is the leading connection of a new session.
+ * We wait until after authentication to check for
+ * session reinstatement.
+ */
+ if (iscsi_login_zero_tsih_s1(conn, buffer) < 0)
+ goto new_sess_out;
+ } else {
+ /*
+ * Add a new connection to an existing session.
+ * We check for a non-existant session in
+ * iscsi_login_non_zero_tsih_s2() below based
+ * on ISID/TSIH, but wait until after authentication
+ * to check for connection reinstatement, etc.
+ */
+ if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
+ goto new_sess_out;
+ }
+
+ /*
+ * This will process the first login request, and call
+ * iscsi_target_locate_portal(), and return a valid struct iscsi_login.
+ */
+ login = iscsi_target_init_negotiation(np, conn, buffer);
+ if (!login) {
+ tpg = conn->tpg;
+ goto new_sess_out;
+ }
+
+ tpg = conn->tpg;
+ if (!tpg) {
+ pr_err("Unable to locate struct iscsi_conn->tpg\n");
+ goto new_sess_out;
+ }
+
+ if (zero_tsih) {
+ if (iscsi_login_zero_tsih_s2(conn) < 0) {
+ iscsi_target_nego_release(login, conn);
+ goto new_sess_out;
+ }
+ } else {
+ if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) {
+ iscsi_target_nego_release(login, conn);
+ goto old_sess_out;
+ }
+ }
+
+ if (iscsi_target_start_negotiation(login, conn) < 0)
+ goto new_sess_out;
+
+ if (!conn->sess) {
+ pr_err("struct iscsi_conn session pointer is NULL!\n");
+ goto new_sess_out;
+ }
+
+ iscsi_stop_login_thread_timer(np);
+
+ if (signal_pending(current))
+ goto new_sess_out;
+
+ ret = iscsi_post_login_handler(np, conn, zero_tsih);
+
+ if (ret < 0)
+ goto new_sess_out;
+
+ iscsit_deaccess_np(np, tpg);
+ tpg = NULL;
+ /* Get another socket */
+ return 1;
+
+new_sess_out:
+ pr_err("iSCSI Login negotiation failed.\n");
+ iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ if (!zero_tsih || !conn->sess)
+ goto old_sess_out;
+ if (conn->sess->se_sess)
+ transport_free_session(conn->sess->se_sess);
+ if (conn->sess->session_index != 0) {
+ spin_lock_bh(&sess_idr_lock);
+ idr_remove(&sess_idr, conn->sess->session_index);
+ spin_unlock_bh(&sess_idr_lock);
+ }
+ if (conn->sess->sess_ops)
+ kfree(conn->sess->sess_ops);
+ if (conn->sess)
+ kfree(conn->sess);
+old_sess_out:
+ iscsi_stop_login_thread_timer(np);
+ /*
+ * If login negotiation fails check if the Time2Retain timer
+ * needs to be restarted.
+ */
+ if (!zero_tsih && conn->sess) {
+ spin_lock_bh(&conn->sess->conn_lock);
+ if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
+ struct se_portal_group *se_tpg =
+ &ISCSI_TPG_C(conn)->tpg_se_tpg;
+
+ atomic_set(&conn->sess->session_continuation, 0);
+ spin_unlock_bh(&conn->sess->conn_lock);
+ spin_lock_bh(&se_tpg->session_lock);
+ iscsit_start_time2retain_handler(conn->sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+ } else
+ spin_unlock_bh(&conn->sess->conn_lock);
+ iscsit_dec_session_usage_count(conn->sess);
+ }
+
+ if (!IS_ERR(conn->conn_rx_hash.tfm))
+ crypto_free_hash(conn->conn_rx_hash.tfm);
+ if (!IS_ERR(conn->conn_tx_hash.tfm))
+ crypto_free_hash(conn->conn_tx_hash.tfm);
+
+ if (conn->conn_cpumask)
+ free_cpumask_var(conn->conn_cpumask);
+
+ kfree(conn->conn_ops);
+
+ if (conn->param_list) {
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+ }
+ if (conn->sock) {
+ if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+ kfree(conn->sock->file);
+ conn->sock->file = NULL;
+ }
+ sock_release(conn->sock);
+ }
+ kfree(conn);
+
+ if (tpg) {
+ iscsit_deaccess_np(np, tpg);
+ tpg = NULL;
+ }
+
+out:
+ stop = kthread_should_stop();
+ if (!stop && signal_pending(current)) {
+ spin_lock_bh(&np->np_thread_lock);
+ stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
+ spin_unlock_bh(&np->np_thread_lock);
+ }
+ /* Wait for another socket.. */
+ if (!stop)
+ return 1;
+
+ iscsi_stop_login_thread_timer(np);
+ spin_lock_bh(&np->np_thread_lock);
+ np->np_thread_state = ISCSI_NP_THREAD_EXIT;
+ spin_unlock_bh(&np->np_thread_lock);
+ return 0;
+}
+
+int iscsi_target_login_thread(void *arg)
+{
+ struct iscsi_np *np = (struct iscsi_np *)arg;
+ int ret;
+
+ allow_signal(SIGINT);
+
+ while (!kthread_should_stop()) {
+ ret = __iscsi_target_login_thread(np);
+ /*
+ * We break and exit here unless another sock_accept() call
+ * is expected.
+ */
+ if (ret != 1)
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
new file mode 100644
index 00000000000..091dcae2532
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -0,0 +1,12 @@
+#ifndef ISCSI_TARGET_LOGIN_H
+#define ISCSI_TARGET_LOGIN_H
+
+extern int iscsi_login_setup_crypto(struct iscsi_conn *);
+extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
+extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
+extern int iscsi_target_setup_login_socket(struct iscsi_np *,
+ struct __kernel_sockaddr_storage *);
+extern int iscsi_target_login_thread(void *);
+extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
new file mode 100644
index 00000000000..4d087ac1106
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -0,0 +1,1067 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI Parameter negotiation.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/ctype.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_tpg.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_nego.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_auth.h"
+
+#define MAX_LOGIN_PDUS 7
+#define TEXT_LEN 4096
+
+void convert_null_to_semi(char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] == '\0')
+ buf[i] = ';';
+}
+
+int strlen_semi(char *buf)
+{
+ int i = 0;
+
+ while (buf[i] != '\0') {
+ if (buf[i] == ';')
+ return i;
+ i++;
+ }
+
+ return -1;
+}
+
+int extract_param(
+ const char *in_buf,
+ const char *pattern,
+ unsigned int max_length,
+ char *out_buf,
+ unsigned char *type)
+{
+ char *ptr;
+ int len;
+
+ if (!in_buf || !pattern || !out_buf || !type)
+ return -1;
+
+ ptr = strstr(in_buf, pattern);
+ if (!ptr)
+ return -1;
+
+ ptr = strstr(ptr, "=");
+ if (!ptr)
+ return -1;
+
+ ptr += 1;
+ if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
+ ptr += 2; /* skip 0x */
+ *type = HEX;
+ } else
+ *type = DECIMAL;
+
+ len = strlen_semi(ptr);
+ if (len < 0)
+ return -1;
+
+ if (len > max_length) {
+ pr_err("Length of input: %d exeeds max_length:"
+ " %d\n", len, max_length);
+ return -1;
+ }
+ memcpy(out_buf, ptr, len);
+ out_buf[len] = '\0';
+
+ return 0;
+}
+
+static u32 iscsi_handle_authentication(
+ struct iscsi_conn *conn,
+ char *in_buf,
+ char *out_buf,
+ int in_length,
+ int *out_length,
+ unsigned char *authtype)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_auth *auth;
+ struct iscsi_node_acl *iscsi_nacl;
+ struct se_node_acl *se_nacl;
+
+ if (!sess->sess_ops->SessionType) {
+ /*
+ * For SessionType=Normal
+ */
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_err("Unable to locate struct se_node_acl for"
+ " CHAP auth\n");
+ return -1;
+ }
+ iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
+ se_node_acl);
+ if (!iscsi_nacl) {
+ pr_err("Unable to locate struct iscsi_node_acl for"
+ " CHAP auth\n");
+ return -1;
+ }
+
+ auth = ISCSI_NODE_AUTH(iscsi_nacl);
+ } else {
+ /*
+ * For SessionType=Discovery
+ */
+ auth = &iscsit_global->discovery_acl.node_auth;
+ }
+
+ if (strstr("CHAP", authtype))
+ strcpy(conn->sess->auth_type, "CHAP");
+ else
+ strcpy(conn->sess->auth_type, NONE);
+
+ if (strstr("None", authtype))
+ return 1;
+#ifdef CANSRP
+ else if (strstr("SRP", authtype))
+ return srp_main_loop(conn, auth, in_buf, out_buf,
+ &in_length, out_length);
+#endif
+ else if (strstr("CHAP", authtype))
+ return chap_main_loop(conn, auth, in_buf, out_buf,
+ &in_length, out_length);
+ else if (strstr("SPKM1", authtype))
+ return 2;
+ else if (strstr("SPKM2", authtype))
+ return 2;
+ else if (strstr("KRB5", authtype))
+ return 2;
+ else
+ return 2;
+}
+
+static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
+{
+ kfree(conn->auth_protocol);
+}
+
+static int iscsi_target_check_login_request(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ int req_csg, req_nsg, rsp_csg, rsp_nsg;
+ u32 payload_length;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ switch (login_req->opcode & ISCSI_OPCODE_MASK) {
+ case ISCSI_OP_LOGIN:
+ break;
+ default:
+ pr_err("Received unknown opcode 0x%02x.\n",
+ login_req->opcode & ISCSI_OPCODE_MASK);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+ pr_err("Login request has both ISCSI_FLAG_LOGIN_CONTINUE"
+ " and ISCSI_FLAG_LOGIN_TRANSIT set, protocol error.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
+ rsp_csg = (login_rsp->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
+ req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
+ rsp_nsg = (login_rsp->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
+
+ if (req_csg != login->current_stage) {
+ pr_err("Initiator unexpectedly changed login stage"
+ " from %d to %d, login failed.\n", login->current_stage,
+ req_csg);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if ((req_nsg == 2) || (req_csg >= 2) ||
+ ((login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
+ (req_nsg <= req_csg))) {
+ pr_err("Illegal login_req->flags Combination, CSG: %d,"
+ " NSG: %d, ISCSI_FLAG_LOGIN_TRANSIT: %d.\n", req_csg,
+ req_nsg, (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT));
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if ((login_req->max_version != login->version_max) ||
+ (login_req->min_version != login->version_min)) {
+ pr_err("Login request changed Version Max/Nin"
+ " unexpectedly to 0x%02x/0x%02x, protocol error\n",
+ login_req->max_version, login_req->min_version);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if (memcmp(login_req->isid, login->isid, 6) != 0) {
+ pr_err("Login request changed ISID unexpectedly,"
+ " protocol error.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if (login_req->itt != login->init_task_tag) {
+ pr_err("Login request changed ITT unexpectedly to"
+ " 0x%08x, protocol error.\n", login_req->itt);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ if (payload_length > MAX_KEY_VALUE_PAIRS) {
+ pr_err("Login request payload exceeds default"
+ " MaxRecvDataSegmentLength: %u, protocol error.\n",
+ MAX_KEY_VALUE_PAIRS);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_target_check_first_request(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ struct iscsi_param *param = NULL;
+ struct se_node_acl *se_nacl;
+
+ login->first_request = 0;
+
+ list_for_each_entry(param, &conn->param_list->param_list, p_list) {
+ if (!strncmp(param->name, SESSIONTYPE, 11)) {
+ if (!IS_PSTATE_ACCEPTOR(param)) {
+ pr_err("SessionType key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ return -1;
+ }
+ if (!strncmp(param->value, DISCOVERY, 9))
+ return 0;
+ }
+
+ if (!strncmp(param->name, INITIATORNAME, 13)) {
+ if (!IS_PSTATE_ACCEPTOR(param)) {
+ if (!login->leading_connection)
+ continue;
+
+ pr_err("InitiatorName key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ return -1;
+ }
+
+ /*
+ * For non-leading connections, double check that the
+ * received InitiatorName matches the existing session's
+ * struct iscsi_node_acl.
+ */
+ if (!login->leading_connection) {
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_err("Unable to locate"
+ " struct se_node_acl\n");
+ iscsit_tx_login_rsp(conn,
+ ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
+ return -1;
+ }
+
+ if (strcmp(param->value,
+ se_nacl->initiatorname)) {
+ pr_err("Incorrect"
+ " InitiatorName: %s for this"
+ " iSCSI Initiator Node.\n",
+ param->value);
+ iscsit_tx_login_rsp(conn,
+ ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_TGT_NOT_FOUND);
+ return -1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ u32 padding = 0;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+
+ login_rsp->opcode = ISCSI_OP_LOGIN_RSP;
+ hton24(login_rsp->dlength, login->rsp_length);
+ memcpy(login_rsp->isid, login->isid, 6);
+ login_rsp->tsih = cpu_to_be16(login->tsih);
+ login_rsp->itt = cpu_to_be32(login->init_task_tag);
+ login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
+ login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+ login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+
+ pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
+ " ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
+ " %u\n", login_rsp->flags, ntohl(login_rsp->itt),
+ ntohl(login_rsp->exp_cmdsn), ntohl(login_rsp->max_cmdsn),
+ ntohl(login_rsp->statsn), login->rsp_length);
+
+ padding = ((-login->rsp_length) & 3);
+
+ if (iscsi_login_tx_data(
+ conn,
+ login->rsp,
+ login->rsp_buf,
+ login->rsp_length + padding) < 0)
+ return -1;
+
+ login->rsp_length = 0;
+ login_rsp->tsih = be16_to_cpu(login_rsp->tsih);
+ login_rsp->itt = be32_to_cpu(login_rsp->itt);
+ login_rsp->statsn = be32_to_cpu(login_rsp->statsn);
+ mutex_lock(&sess->cmdsn_mutex);
+ login_rsp->exp_cmdsn = be32_to_cpu(sess->exp_cmd_sn);
+ login_rsp->max_cmdsn = be32_to_cpu(sess->max_cmd_sn);
+ mutex_unlock(&sess->cmdsn_mutex);
+
+ return 0;
+}
+
+static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ u32 padding = 0, payload_length;
+ struct iscsi_login_req *login_req;
+
+ if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
+ return -1;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ payload_length = ntoh24(login_req->dlength);
+ login_req->tsih = be16_to_cpu(login_req->tsih);
+ login_req->itt = be32_to_cpu(login_req->itt);
+ login_req->cid = be16_to_cpu(login_req->cid);
+ login_req->cmdsn = be32_to_cpu(login_req->cmdsn);
+ login_req->exp_statsn = be32_to_cpu(login_req->exp_statsn);
+
+ pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
+ login_req->flags, login_req->itt, login_req->cmdsn,
+ login_req->exp_statsn, login_req->cid, payload_length);
+
+ if (iscsi_target_check_login_request(conn, login) < 0)
+ return -1;
+
+ padding = ((-payload_length) & 3);
+ memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+
+ if (iscsi_login_rx_data(
+ conn,
+ login->req_buf,
+ payload_length + padding) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ if (iscsi_target_do_tx_login_io(conn, login) < 0)
+ return -1;
+
+ if (iscsi_target_do_rx_login_io(conn, login) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int iscsi_target_get_initial_payload(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ u32 padding = 0, payload_length;
+ struct iscsi_login_req *login_req;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ payload_length = ntoh24(login_req->dlength);
+
+ pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
+ login_req->flags, login_req->itt, login_req->cmdsn,
+ login_req->exp_statsn, payload_length);
+
+ if (iscsi_target_check_login_request(conn, login) < 0)
+ return -1;
+
+ padding = ((-payload_length) & 3);
+
+ if (iscsi_login_rx_data(
+ conn,
+ login->req_buf,
+ payload_length + padding) < 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * NOTE: We check for existing sessions or connections AFTER the initiator
+ * has been successfully authenticated in order to protect against faked
+ * ISID/TSIH combinations.
+ */
+static int iscsi_target_check_for_existing_instances(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ if (login->checked_for_existing)
+ return 0;
+
+ login->checked_for_existing = 1;
+
+ if (!login->tsih)
+ return iscsi_check_for_session_reinstatement(conn);
+ else
+ return iscsi_login_post_auth_non_zero_tsih(conn, login->cid,
+ login->initial_exp_statsn);
+}
+
+static int iscsi_target_do_authentication(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ int authret;
+ u32 payload_length;
+ struct iscsi_param *param;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+ if (!param)
+ return -1;
+
+ authret = iscsi_handle_authentication(
+ conn,
+ login->req_buf,
+ login->rsp_buf,
+ payload_length,
+ &login->rsp_length,
+ param->value);
+ switch (authret) {
+ case 0:
+ pr_debug("Received OK response"
+ " from LIO Authentication, continuing.\n");
+ break;
+ case 1:
+ pr_debug("iSCSI security negotiation"
+ " completed sucessfully.\n");
+ login->auth_complete = 1;
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+ login_rsp->flags |= (ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
+ ISCSI_FLAG_LOGIN_TRANSIT);
+ login->current_stage = 1;
+ }
+ return iscsi_target_check_for_existing_instances(
+ conn, login);
+ case 2:
+ pr_err("Security negotiation"
+ " failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ default:
+ pr_err("Received unknown error %d from LIO"
+ " Authentication\n", authret);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_target_handle_csg_zero(
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ int ret;
+ u32 payload_length;
+ struct iscsi_param *param;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+ if (!param)
+ return -1;
+
+ ret = iscsi_decode_text_input(
+ PHASE_SECURITY|PHASE_DECLARATIVE,
+ SENDER_INITIATOR|SENDER_RECEIVER,
+ login->req_buf,
+ payload_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (ret > 0) {
+ if (login->auth_complete) {
+ pr_err("Initiator has already been"
+ " successfully authenticated, but is still"
+ " sending %s keys.\n", param->value);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ return -1;
+ }
+
+ goto do_auth;
+ }
+
+ if (login->first_request)
+ if (iscsi_target_check_first_request(conn, login) < 0)
+ return -1;
+
+ ret = iscsi_encode_text_output(
+ PHASE_SECURITY|PHASE_DECLARATIVE,
+ SENDER_TARGET,
+ login->rsp_buf,
+ &login->rsp_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (!iscsi_check_negotiated_keys(conn->param_list)) {
+ if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ !strncmp(param->value, NONE, 4)) {
+ pr_err("Initiator sent AuthMethod=None but"
+ " Target is enforcing iSCSI Authentication,"
+ " login failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ }
+
+ if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
+ !login->auth_complete)
+ return 0;
+
+ if (strncmp(param->value, NONE, 4) && !login->auth_complete)
+ return 0;
+
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
+ login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE1 |
+ ISCSI_FLAG_LOGIN_TRANSIT;
+ login->current_stage = 1;
+ }
+ }
+
+ return 0;
+do_auth:
+ return iscsi_target_do_authentication(conn, login);
+}
+
+static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ int ret;
+ u32 payload_length;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ ret = iscsi_decode_text_input(
+ PHASE_OPERATIONAL|PHASE_DECLARATIVE,
+ SENDER_INITIATOR|SENDER_RECEIVER,
+ login->req_buf,
+ payload_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (login->first_request)
+ if (iscsi_target_check_first_request(conn, login) < 0)
+ return -1;
+
+ if (iscsi_target_check_for_existing_instances(conn, login) < 0)
+ return -1;
+
+ ret = iscsi_encode_text_output(
+ PHASE_OPERATIONAL|PHASE_DECLARATIVE,
+ SENDER_TARGET,
+ login->rsp_buf,
+ &login->rsp_length,
+ conn->param_list);
+ if (ret < 0)
+ return -1;
+
+ if (!login->auth_complete &&
+ ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
+ pr_err("Initiator is requesting CSG: 1, has not been"
+ " successfully authenticated, and the Target is"
+ " enforcing iSCSI Authentication, login failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ }
+
+ if (!iscsi_check_negotiated_keys(conn->param_list))
+ if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE3) &&
+ (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT))
+ login_rsp->flags |= ISCSI_FLAG_LOGIN_NEXT_STAGE3 |
+ ISCSI_FLAG_LOGIN_TRANSIT;
+
+ return 0;
+}
+
+static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ int pdu_count = 0;
+ struct iscsi_login_req *login_req;
+ struct iscsi_login_rsp *login_rsp;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_login_rsp *) login->rsp;
+
+ while (1) {
+ if (++pdu_count > MAX_LOGIN_PDUS) {
+ pr_err("MAX_LOGIN_PDUS count reached.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ return -1;
+ }
+
+ switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) {
+ case 0:
+ login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK);
+ if (iscsi_target_handle_csg_zero(conn, login) < 0)
+ return -1;
+ break;
+ case 1:
+ login_rsp->flags |= ISCSI_FLAG_LOGIN_CURRENT_STAGE1;
+ if (iscsi_target_handle_csg_one(conn, login) < 0)
+ return -1;
+ if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+ login->tsih = conn->sess->tsih;
+ if (iscsi_target_do_tx_login_io(conn,
+ login) < 0)
+ return -1;
+ return 0;
+ }
+ break;
+ default:
+ pr_err("Illegal CSG: %d received from"
+ " Initiator, protocol error.\n",
+ (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
+ >> 2);
+ break;
+ }
+
+ if (iscsi_target_do_login_io(conn, login) < 0)
+ return -1;
+
+ if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+ login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
+ login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
+ }
+ }
+
+ return 0;
+}
+
+static void iscsi_initiatorname_tolower(
+ char *param_buf)
+{
+ char *c;
+ u32 iqn_size = strlen(param_buf), i;
+
+ for (i = 0; i < iqn_size; i++) {
+ c = (char *)&param_buf[i];
+ if (!isupper(*c))
+ continue;
+
+ *c = tolower(*c);
+ }
+}
+
+/*
+ * Processes the first Login Request..
+ */
+static int iscsi_target_locate_portal(
+ struct iscsi_np *np,
+ struct iscsi_conn *conn,
+ struct iscsi_login *login)
+{
+ char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL;
+ char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_tiqn *tiqn;
+ struct iscsi_login_req *login_req;
+ struct iscsi_targ_login_rsp *login_rsp;
+ u32 payload_length;
+ int sessiontype = 0, ret = 0;
+
+ login_req = (struct iscsi_login_req *) login->req;
+ login_rsp = (struct iscsi_targ_login_rsp *) login->rsp;
+ payload_length = ntoh24(login_req->dlength);
+
+ login->first_request = 1;
+ login->leading_connection = (!login_req->tsih) ? 1 : 0;
+ login->current_stage =
+ (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
+ login->version_min = login_req->min_version;
+ login->version_max = login_req->max_version;
+ memcpy(login->isid, login_req->isid, 6);
+ login->cmd_sn = login_req->cmdsn;
+ login->init_task_tag = login_req->itt;
+ login->initial_exp_statsn = login_req->exp_statsn;
+ login->cid = login_req->cid;
+ login->tsih = login_req->tsih;
+
+ if (iscsi_target_get_initial_payload(conn, login) < 0)
+ return -1;
+
+ tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
+ if (!tmpbuf) {
+ pr_err("Unable to allocate memory for tmpbuf.\n");
+ return -1;
+ }
+
+ memcpy(tmpbuf, login->req_buf, payload_length);
+ tmpbuf[payload_length] = '\0';
+ start = tmpbuf;
+ end = (start + payload_length);
+
+ /*
+ * Locate the initial keys expected from the Initiator node in
+ * the first login request in order to progress with the login phase.
+ */
+ while (start < end) {
+ if (iscsi_extract_key_value(start, &key, &value) < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ if (!strncmp(key, "InitiatorName", 13))
+ i_buf = value;
+ else if (!strncmp(key, "SessionType", 11))
+ s_buf = value;
+ else if (!strncmp(key, "TargetName", 10))
+ t_buf = value;
+
+ start += strlen(key) + strlen(value) + 2;
+ }
+
+ /*
+ * See 5.3. Login Phase.
+ */
+ if (!i_buf) {
+ pr_err("InitiatorName key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ ret = -1;
+ goto out;
+ }
+ /*
+ * Convert the incoming InitiatorName to lowercase following
+ * RFC-3720 3.2.6.1. section c) that says that iSCSI IQNs
+ * are NOT case sensitive.
+ */
+ iscsi_initiatorname_tolower(i_buf);
+
+ if (!s_buf) {
+ if (!login->leading_connection)
+ goto get_target;
+
+ pr_err("SessionType key not received"
+ " in first login request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ ret = -1;
+ goto out;
+ }
+
+ /*
+ * Use default portal group for discovery sessions.
+ */
+ sessiontype = strncmp(s_buf, DISCOVERY, 9);
+ if (!sessiontype) {
+ conn->tpg = iscsit_global->discovery_tpg;
+ if (!login->leading_connection)
+ goto get_target;
+
+ sess->sess_ops->SessionType = 1;
+ /*
+ * Setup crc32c modules from libcrypto
+ */
+ if (iscsi_login_setup_crypto(conn) < 0) {
+ pr_err("iscsi_login_setup_crypto() failed\n");
+ ret = -1;
+ goto out;
+ }
+ /*
+ * Serialize access across the discovery struct iscsi_portal_group to
+ * process login attempt.
+ */
+ if (iscsit_access_np(np, conn->tpg) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ goto out;
+ }
+ ret = 0;
+ goto out;
+ }
+
+get_target:
+ if (!t_buf) {
+ pr_err("TargetName key not received"
+ " in first login request while"
+ " SessionType=Normal.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_MISSING_FIELDS);
+ ret = -1;
+ goto out;
+ }
+
+ /*
+ * Locate Target IQN from Storage Node.
+ */
+ tiqn = iscsit_get_tiqn_for_login(t_buf);
+ if (!tiqn) {
+ pr_err("Unable to locate Target IQN: %s in"
+ " Storage Node\n", t_buf);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ goto out;
+ }
+ pr_debug("Located Storage Object: %s\n", tiqn->tiqn);
+
+ /*
+ * Locate Target Portal Group from Storage Node.
+ */
+ conn->tpg = iscsit_get_tpg_from_np(tiqn, np);
+ if (!conn->tpg) {
+ pr_err("Unable to locate Target Portal Group"
+ " on %s\n", tiqn->tiqn);
+ iscsit_put_tiqn_for_login(tiqn);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ goto out;
+ }
+ pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
+ /*
+ * Setup crc32c modules from libcrypto
+ */
+ if (iscsi_login_setup_crypto(conn) < 0) {
+ pr_err("iscsi_login_setup_crypto() failed\n");
+ ret = -1;
+ goto out;
+ }
+ /*
+ * Serialize access across the struct iscsi_portal_group to
+ * process login attempt.
+ */
+ if (iscsit_access_np(np, conn->tpg) < 0) {
+ iscsit_put_tiqn_for_login(tiqn);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ ret = -1;
+ conn->tpg = NULL;
+ goto out;
+ }
+
+ /*
+ * conn->sess->node_acl will be set when the referenced
+ * struct iscsi_session is located from received ISID+TSIH in
+ * iscsi_login_non_zero_tsih_s2().
+ */
+ if (!login->leading_connection) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * This value is required in iscsi_login_zero_tsih_s2()
+ */
+ sess->sess_ops->SessionType = 0;
+
+ /*
+ * Locate incoming Initiator IQN reference from Storage Node.
+ */
+ sess->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+ &conn->tpg->tpg_se_tpg, i_buf);
+ if (!sess->se_sess->se_node_acl) {
+ pr_err("iSCSI Initiator Node: %s is not authorized to"
+ " access iSCSI target portal group: %hu.\n",
+ i_buf, conn->tpg->tpgt);
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_TGT_FORBIDDEN);
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ kfree(tmpbuf);
+ return ret;
+}
+
+struct iscsi_login *iscsi_target_init_negotiation(
+ struct iscsi_np *np,
+ struct iscsi_conn *conn,
+ char *login_pdu)
+{
+ struct iscsi_login *login;
+
+ login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
+ if (!login) {
+ pr_err("Unable to allocate memory for struct iscsi_login.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return NULL;
+ }
+
+ login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!login->req) {
+ pr_err("Unable to allocate memory for Login Request.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ goto out;
+ }
+ memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
+
+ login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+ if (!login->req_buf) {
+ pr_err("Unable to allocate memory for response buffer.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ goto out;
+ }
+ /*
+ * SessionType: Discovery
+ *
+ * Locates Default Portal
+ *
+ * SessionType: Normal
+ *
+ * Locates Target Portal from NP -> Target IQN
+ */
+ if (iscsi_target_locate_portal(np, conn, login) < 0) {
+ pr_err("iSCSI Login negotiation failed.\n");
+ goto out;
+ }
+
+ return login;
+out:
+ kfree(login->req);
+ kfree(login->req_buf);
+ kfree(login);
+
+ return NULL;
+}
+
+int iscsi_target_start_negotiation(
+ struct iscsi_login *login,
+ struct iscsi_conn *conn)
+{
+ int ret = -1;
+
+ login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ if (!login->rsp) {
+ pr_err("Unable to allocate memory for"
+ " Login Response.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ ret = -1;
+ goto out;
+ }
+
+ login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+ if (!login->rsp_buf) {
+ pr_err("Unable to allocate memory for"
+ " request buffer.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ ret = -1;
+ goto out;
+ }
+
+ ret = iscsi_target_do_login(conn, login);
+out:
+ if (ret != 0)
+ iscsi_remove_failed_auth_entry(conn);
+
+ iscsi_target_nego_release(login, conn);
+ return ret;
+}
+
+void iscsi_target_nego_release(
+ struct iscsi_login *login,
+ struct iscsi_conn *conn)
+{
+ kfree(login->req);
+ kfree(login->rsp);
+ kfree(login->req_buf);
+ kfree(login->rsp_buf);
+ kfree(login);
+}
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
new file mode 100644
index 00000000000..92e133a5158
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -0,0 +1,17 @@
+#ifndef ISCSI_TARGET_NEGO_H
+#define ISCSI_TARGET_NEGO_H
+
+#define DECIMAL 0
+#define HEX 1
+
+extern void convert_null_to_semi(char *, int);
+extern int extract_param(const char *, const char *, unsigned int, char *,
+ unsigned char *);
+extern struct iscsi_login *iscsi_target_init_negotiation(
+ struct iscsi_np *, struct iscsi_conn *, char *);
+extern int iscsi_target_start_negotiation(
+ struct iscsi_login *, struct iscsi_conn *);
+extern void iscsi_target_nego_release(
+ struct iscsi_login *, struct iscsi_conn *);
+
+#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
new file mode 100644
index 00000000000..aeafbe0cd7d
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -0,0 +1,263 @@
+/*******************************************************************************
+ * This file contains the main functions related to Initiator Node Attributes.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_nodeattrib.h"
+
+static inline char *iscsit_na_get_initiatorname(
+ struct iscsi_node_acl *nacl)
+{
+ struct se_node_acl *se_nacl = &nacl->se_node_acl;
+
+ return &se_nacl->initiatorname[0];
+}
+
+void iscsit_set_default_node_attribues(
+ struct iscsi_node_acl *acl)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ a->dataout_timeout = NA_DATAOUT_TIMEOUT;
+ a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
+ a->nopin_timeout = NA_NOPIN_TIMEOUT;
+ a->nopin_response_timeout = NA_NOPIN_RESPONSE_TIMEOUT;
+ a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
+ a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
+ a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
+ a->default_erl = NA_DEFAULT_ERL;
+}
+
+extern int iscsit_na_dataout_timeout(
+ struct iscsi_node_acl *acl,
+ u32 dataout_timeout)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (dataout_timeout > NA_DATAOUT_TIMEOUT_MAX) {
+ pr_err("Requested DataOut Timeout %u larger than"
+ " maximum %u\n", dataout_timeout,
+ NA_DATAOUT_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (dataout_timeout < NA_DATAOUT_TIMEOUT_MIX) {
+ pr_err("Requested DataOut Timeout %u smaller than"
+ " minimum %u\n", dataout_timeout,
+ NA_DATAOUT_TIMEOUT_MIX);
+ return -EINVAL;
+ }
+
+ a->dataout_timeout = dataout_timeout;
+ pr_debug("Set DataOut Timeout to %u for Initiator Node"
+ " %s\n", a->dataout_timeout, iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_dataout_timeout_retries(
+ struct iscsi_node_acl *acl,
+ u32 dataout_timeout_retries)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (dataout_timeout_retries > NA_DATAOUT_TIMEOUT_RETRIES_MAX) {
+ pr_err("Requested DataOut Timeout Retries %u larger"
+ " than maximum %u", dataout_timeout_retries,
+ NA_DATAOUT_TIMEOUT_RETRIES_MAX);
+ return -EINVAL;
+ } else if (dataout_timeout_retries < NA_DATAOUT_TIMEOUT_RETRIES_MIN) {
+ pr_err("Requested DataOut Timeout Retries %u smaller"
+ " than minimum %u", dataout_timeout_retries,
+ NA_DATAOUT_TIMEOUT_RETRIES_MIN);
+ return -EINVAL;
+ }
+
+ a->dataout_timeout_retries = dataout_timeout_retries;
+ pr_debug("Set DataOut Timeout Retries to %u for"
+ " Initiator Node %s\n", a->dataout_timeout_retries,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_nopin_timeout(
+ struct iscsi_node_acl *acl,
+ u32 nopin_timeout)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
+ struct se_session *se_sess;
+ u32 orig_nopin_timeout = a->nopin_timeout;
+
+ if (nopin_timeout > NA_NOPIN_TIMEOUT_MAX) {
+ pr_err("Requested NopIn Timeout %u larger than maximum"
+ " %u\n", nopin_timeout, NA_NOPIN_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if ((nopin_timeout < NA_NOPIN_TIMEOUT_MIN) &&
+ (nopin_timeout != 0)) {
+ pr_err("Requested NopIn Timeout %u smaller than"
+ " minimum %u and not 0\n", nopin_timeout,
+ NA_NOPIN_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->nopin_timeout = nopin_timeout;
+ pr_debug("Set NopIn Timeout to %u for Initiator"
+ " Node %s\n", a->nopin_timeout,
+ iscsit_na_get_initiatorname(acl));
+ /*
+ * Reenable disabled nopin_timeout timer for all iSCSI connections.
+ */
+ if (!orig_nopin_timeout) {
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+
+ spin_lock(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list,
+ conn_list) {
+ if (conn->conn_state !=
+ TARG_CONN_STATE_LOGGED_IN)
+ continue;
+
+ spin_lock(&conn->nopin_timer_lock);
+ __iscsit_start_nopin_timer(conn);
+ spin_unlock(&conn->nopin_timer_lock);
+ }
+ spin_unlock(&sess->conn_lock);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+ }
+
+ return 0;
+}
+
+extern int iscsit_na_nopin_response_timeout(
+ struct iscsi_node_acl *acl,
+ u32 nopin_response_timeout)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (nopin_response_timeout > NA_NOPIN_RESPONSE_TIMEOUT_MAX) {
+ pr_err("Requested NopIn Response Timeout %u larger"
+ " than maximum %u\n", nopin_response_timeout,
+ NA_NOPIN_RESPONSE_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (nopin_response_timeout < NA_NOPIN_RESPONSE_TIMEOUT_MIN) {
+ pr_err("Requested NopIn Response Timeout %u smaller"
+ " than minimum %u\n", nopin_response_timeout,
+ NA_NOPIN_RESPONSE_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->nopin_response_timeout = nopin_response_timeout;
+ pr_debug("Set NopIn Response Timeout to %u for"
+ " Initiator Node %s\n", a->nopin_timeout,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_random_datain_pdu_offsets(
+ struct iscsi_node_acl *acl,
+ u32 random_datain_pdu_offsets)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (random_datain_pdu_offsets != 0 && random_datain_pdu_offsets != 1) {
+ pr_err("Requested Random DataIN PDU Offsets: %u not"
+ " 0 or 1\n", random_datain_pdu_offsets);
+ return -EINVAL;
+ }
+
+ a->random_datain_pdu_offsets = random_datain_pdu_offsets;
+ pr_debug("Set Random DataIN PDU Offsets to %u for"
+ " Initiator Node %s\n", a->random_datain_pdu_offsets,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_random_datain_seq_offsets(
+ struct iscsi_node_acl *acl,
+ u32 random_datain_seq_offsets)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (random_datain_seq_offsets != 0 && random_datain_seq_offsets != 1) {
+ pr_err("Requested Random DataIN Sequence Offsets: %u"
+ " not 0 or 1\n", random_datain_seq_offsets);
+ return -EINVAL;
+ }
+
+ a->random_datain_seq_offsets = random_datain_seq_offsets;
+ pr_debug("Set Random DataIN Sequence Offsets to %u for"
+ " Initiator Node %s\n", a->random_datain_seq_offsets,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_random_r2t_offsets(
+ struct iscsi_node_acl *acl,
+ u32 random_r2t_offsets)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (random_r2t_offsets != 0 && random_r2t_offsets != 1) {
+ pr_err("Requested Random R2T Offsets: %u not"
+ " 0 or 1\n", random_r2t_offsets);
+ return -EINVAL;
+ }
+
+ a->random_r2t_offsets = random_r2t_offsets;
+ pr_debug("Set Random R2T Offsets to %u for"
+ " Initiator Node %s\n", a->random_r2t_offsets,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
+
+extern int iscsit_na_default_erl(
+ struct iscsi_node_acl *acl,
+ u32 default_erl)
+{
+ struct iscsi_node_attrib *a = &acl->node_attrib;
+
+ if (default_erl != 0 && default_erl != 1 && default_erl != 2) {
+ pr_err("Requested default ERL: %u not 0, 1, or 2\n",
+ default_erl);
+ return -EINVAL;
+ }
+
+ a->default_erl = default_erl;
+ pr_debug("Set use ERL0 flag to %u for Initiator"
+ " Node %s\n", a->default_erl,
+ iscsit_na_get_initiatorname(acl));
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
new file mode 100644
index 00000000000..c970b326ef2
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -0,0 +1,14 @@
+#ifndef ISCSI_TARGET_NODEATTRIB_H
+#define ISCSI_TARGET_NODEATTRIB_H
+
+extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *);
+extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
+extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_nopin_response_timeout(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_datain_pdu_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_datain_seq_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_random_r2t_offsets(struct iscsi_node_acl *, u32);
+extern int iscsit_na_default_erl(struct iscsi_node_acl *, u32);
+
+#endif /* ISCSI_TARGET_NODEATTRIB_H */
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
new file mode 100644
index 00000000000..252e246cf51
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -0,0 +1,1905 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI Parameter negotiation.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/slab.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_parameters.h"
+
+int iscsi_login_rx_data(
+ struct iscsi_conn *conn,
+ char *buf,
+ int length)
+{
+ int rx_got;
+ struct kvec iov;
+
+ memset(&iov, 0, sizeof(struct kvec));
+ iov.iov_len = length;
+ iov.iov_base = buf;
+
+ /*
+ * Initial Marker-less Interval.
+ * Add the values regardless of IFMarker/OFMarker, considering
+ * it may not be negoitated yet.
+ */
+ conn->of_marker += length;
+
+ rx_got = rx_data(conn, &iov, 1, length);
+ if (rx_got != length) {
+ pr_err("rx_data returned %d, expecting %d.\n",
+ rx_got, length);
+ return -1;
+ }
+
+ return 0 ;
+}
+
+int iscsi_login_tx_data(
+ struct iscsi_conn *conn,
+ char *pdu_buf,
+ char *text_buf,
+ int text_length)
+{
+ int length, tx_sent;
+ struct kvec iov[2];
+
+ length = (ISCSI_HDR_LEN + text_length);
+
+ memset(&iov[0], 0, 2 * sizeof(struct kvec));
+ iov[0].iov_len = ISCSI_HDR_LEN;
+ iov[0].iov_base = pdu_buf;
+ iov[1].iov_len = text_length;
+ iov[1].iov_base = text_buf;
+
+ /*
+ * Initial Marker-less Interval.
+ * Add the values regardless of IFMarker/OFMarker, considering
+ * it may not be negoitated yet.
+ */
+ conn->if_marker += length;
+
+ tx_sent = tx_data(conn, &iov[0], 2, length);
+ if (tx_sent != length) {
+ pr_err("tx_data returned %d, expecting %d.\n",
+ tx_sent, length);
+ return -1;
+ }
+
+ return 0;
+}
+
+void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
+{
+ pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ?
+ "CRC32C" : "None");
+ pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ?
+ "CRC32C" : "None");
+ pr_debug("MaxRecvDataSegmentLength: %u\n",
+ conn_ops->MaxRecvDataSegmentLength);
+ pr_debug("OFMarker: %s\n", (conn_ops->OFMarker) ? "Yes" : "No");
+ pr_debug("IFMarker: %s\n", (conn_ops->IFMarker) ? "Yes" : "No");
+ if (conn_ops->OFMarker)
+ pr_debug("OFMarkInt: %u\n", conn_ops->OFMarkInt);
+ if (conn_ops->IFMarker)
+ pr_debug("IFMarkInt: %u\n", conn_ops->IFMarkInt);
+}
+
+void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
+{
+ pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName);
+ pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias);
+ pr_debug("TargetName: %s\n", sess_ops->TargetName);
+ pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias);
+ pr_debug("TargetPortalGroupTag: %hu\n",
+ sess_ops->TargetPortalGroupTag);
+ pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections);
+ pr_debug("InitialR2T: %s\n",
+ (sess_ops->InitialR2T) ? "Yes" : "No");
+ pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ?
+ "Yes" : "No");
+ pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength);
+ pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength);
+ pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait);
+ pr_debug("DefaultTime2Retain: %hu\n",
+ sess_ops->DefaultTime2Retain);
+ pr_debug("MaxOutstandingR2T: %hu\n",
+ sess_ops->MaxOutstandingR2T);
+ pr_debug("DataPDUInOrder: %s\n",
+ (sess_ops->DataPDUInOrder) ? "Yes" : "No");
+ pr_debug("DataSequenceInOrder: %s\n",
+ (sess_ops->DataSequenceInOrder) ? "Yes" : "No");
+ pr_debug("ErrorRecoveryLevel: %hu\n",
+ sess_ops->ErrorRecoveryLevel);
+ pr_debug("SessionType: %s\n", (sess_ops->SessionType) ?
+ "Discovery" : "Normal");
+}
+
+void iscsi_print_params(struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list)
+ pr_debug("%s: %s\n", param->name, param->value);
+}
+
+static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list,
+ char *name, char *value, u8 phase, u8 scope, u8 sender,
+ u16 type_range, u8 use)
+{
+ struct iscsi_param *param = NULL;
+
+ param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
+ if (!param) {
+ pr_err("Unable to allocate memory for parameter.\n");
+ goto out;
+ }
+ INIT_LIST_HEAD(&param->p_list);
+
+ param->name = kzalloc(strlen(name) + 1, GFP_KERNEL);
+ if (!param->name) {
+ pr_err("Unable to allocate memory for parameter name.\n");
+ goto out;
+ }
+
+ param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ if (!param->value) {
+ pr_err("Unable to allocate memory for parameter value.\n");
+ goto out;
+ }
+
+ memcpy(param->name, name, strlen(name));
+ param->name[strlen(name)] = '\0';
+ memcpy(param->value, value, strlen(value));
+ param->value[strlen(value)] = '\0';
+ param->phase = phase;
+ param->scope = scope;
+ param->sender = sender;
+ param->use = use;
+ param->type_range = type_range;
+
+ switch (param->type_range) {
+ case TYPERANGE_BOOL_AND:
+ param->type = TYPE_BOOL_AND;
+ break;
+ case TYPERANGE_BOOL_OR:
+ param->type = TYPE_BOOL_OR;
+ break;
+ case TYPERANGE_0_TO_2:
+ case TYPERANGE_0_TO_3600:
+ case TYPERANGE_0_TO_32767:
+ case TYPERANGE_0_TO_65535:
+ case TYPERANGE_1_TO_65535:
+ case TYPERANGE_2_TO_3600:
+ case TYPERANGE_512_TO_16777215:
+ param->type = TYPE_NUMBER;
+ break;
+ case TYPERANGE_AUTH:
+ case TYPERANGE_DIGEST:
+ param->type = TYPE_VALUE_LIST | TYPE_STRING;
+ break;
+ case TYPERANGE_MARKINT:
+ param->type = TYPE_NUMBER_RANGE;
+ param->type_range |= TYPERANGE_1_TO_65535;
+ break;
+ case TYPERANGE_ISCSINAME:
+ case TYPERANGE_SESSIONTYPE:
+ case TYPERANGE_TARGETADDRESS:
+ case TYPERANGE_UTF8:
+ param->type = TYPE_STRING;
+ break;
+ default:
+ pr_err("Unknown type_range 0x%02x\n",
+ param->type_range);
+ goto out;
+ }
+ list_add_tail(&param->p_list, &param_list->param_list);
+
+ return param;
+out:
+ if (param) {
+ kfree(param->value);
+ kfree(param->name);
+ kfree(param);
+ }
+
+ return NULL;
+}
+
+/* #warning Add extension keys */
+int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
+{
+ struct iscsi_param *param = NULL;
+ struct iscsi_param_list *pl;
+
+ pl = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
+ if (!pl) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_param_list.\n");
+ return -1 ;
+ }
+ INIT_LIST_HEAD(&pl->param_list);
+ INIT_LIST_HEAD(&pl->extra_response_list);
+
+ /*
+ * The format for setting the initial parameter definitions are:
+ *
+ * Parameter name:
+ * Initial value:
+ * Allowable phase:
+ * Scope:
+ * Allowable senders:
+ * Typerange:
+ * Use:
+ */
+ param = iscsi_set_default_param(pl, AUTHMETHOD, INITIAL_AUTHMETHOD,
+ PHASE_SECURITY, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_AUTH, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, HEADERDIGEST, INITIAL_HEADERDIGEST,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_DIGEST, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DATADIGEST, INITIAL_DATADIGEST,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_DIGEST, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXCONNECTIONS,
+ INITIAL_MAXCONNECTIONS, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, SENDTARGETS, INITIAL_SENDTARGETS,
+ PHASE_FFP0, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+ TYPERANGE_UTF8, 0);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETNAME, INITIAL_TARGETNAME,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_ISCSINAME, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, INITIATORNAME,
+ INITIAL_INITIATORNAME, PHASE_DECLARATIVE,
+ SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+ TYPERANGE_ISCSINAME, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETALIAS, INITIAL_TARGETALIAS,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
+ TYPERANGE_UTF8, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, INITIATORALIAS,
+ INITIAL_INITIATORALIAS, PHASE_DECLARATIVE,
+ SCOPE_SESSION_WIDE, SENDER_INITIATOR, TYPERANGE_UTF8,
+ USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETADDRESS,
+ INITIAL_TARGETADDRESS, PHASE_DECLARATIVE,
+ SCOPE_SESSION_WIDE, SENDER_TARGET,
+ TYPERANGE_TARGETADDRESS, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, TARGETPORTALGROUPTAG,
+ INITIAL_TARGETPORTALGROUPTAG,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_TARGET,
+ TYPERANGE_0_TO_65535, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, INITIALR2T, INITIAL_INITIALR2T,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, IMMEDIATEDATA,
+ INITIAL_IMMEDIATEDATA, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND,
+ USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH,
+ INITIAL_MAXRECVDATASEGMENTLENGTH,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_ALL);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXBURSTLENGTH,
+ INITIAL_MAXBURSTLENGTH, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, FIRSTBURSTLENGTH,
+ INITIAL_FIRSTBURSTLENGTH,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_512_TO_16777215, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DEFAULTTIME2WAIT,
+ INITIAL_DEFAULTTIME2WAIT,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DEFAULTTIME2RETAIN,
+ INITIAL_DEFAULTTIME2RETAIN,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_0_TO_3600, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, MAXOUTSTANDINGR2T,
+ INITIAL_MAXOUTSTANDINGR2T,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_1_TO_65535, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DATAPDUINORDER,
+ INITIAL_DATAPDUINORDER, PHASE_OPERATIONAL,
+ SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_OR,
+ USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, DATASEQUENCEINORDER,
+ INITIAL_DATASEQUENCEINORDER,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_BOOL_OR, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, ERRORRECOVERYLEVEL,
+ INITIAL_ERRORRECOVERYLEVEL,
+ PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+ TYPERANGE_0_TO_2, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, SESSIONTYPE, INITIAL_SESSIONTYPE,
+ PHASE_DECLARATIVE, SCOPE_SESSION_WIDE, SENDER_INITIATOR,
+ TYPERANGE_SESSIONTYPE, USE_LEADING_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, IFMARKER, INITIAL_IFMARKER,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, OFMARKER, INITIAL_OFMARKER,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_BOOL_AND, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT,
+ PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+ TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+ if (!param)
+ goto out;
+
+ *param_list_ptr = pl;
+ return 0;
+out:
+ iscsi_release_param_list(pl);
+ return -1;
+}
+
+int iscsi_set_keys_to_negotiate(
+ int sessiontype,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ param->state = 0;
+ if (!strcmp(param->name, AUTHMETHOD)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, HEADERDIGEST)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DATADIGEST)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXCONNECTIONS)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, TARGETNAME)) {
+ continue;
+ } else if (!strcmp(param->name, INITIATORNAME)) {
+ continue;
+ } else if (!strcmp(param->name, TARGETALIAS)) {
+ if (param->value)
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, INITIATORALIAS)) {
+ continue;
+ } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, INITIALR2T)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, IMMEDIATEDATA)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DATAPDUINORDER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, SESSIONTYPE)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, IFMARKER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, OFMARKER)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, IFMARKINT)) {
+ SET_PSTATE_NEGOTIATE(param);
+ } else if (!strcmp(param->name, OFMARKINT)) {
+ SET_PSTATE_NEGOTIATE(param);
+ }
+ }
+
+ return 0;
+}
+
+int iscsi_set_keys_irrelevant_for_discovery(
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!strcmp(param->name, MAXCONNECTIONS))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, INITIALR2T))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, IMMEDIATEDATA))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, MAXBURSTLENGTH))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, FIRSTBURSTLENGTH))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, MAXOUTSTANDINGR2T))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DATAPDUINORDER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DATASEQUENCEINORDER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, ERRORRECOVERYLEVEL))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DEFAULTTIME2WAIT))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, DEFAULTTIME2RETAIN))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, IFMARKER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, OFMARKER))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, IFMARKINT))
+ param->state &= ~PSTATE_NEGOTIATE;
+ else if (!strcmp(param->name, OFMARKINT))
+ param->state &= ~PSTATE_NEGOTIATE;
+ }
+
+ return 0;
+}
+
+int iscsi_copy_param_list(
+ struct iscsi_param_list **dst_param_list,
+ struct iscsi_param_list *src_param_list,
+ int leading)
+{
+ struct iscsi_param *new_param = NULL, *param = NULL;
+ struct iscsi_param_list *param_list = NULL;
+
+ param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
+ if (!param_list) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_param_list.\n");
+ goto err_out;
+ }
+ INIT_LIST_HEAD(&param_list->param_list);
+ INIT_LIST_HEAD(&param_list->extra_response_list);
+
+ list_for_each_entry(param, &src_param_list->param_list, p_list) {
+ if (!leading && (param->scope & SCOPE_SESSION_WIDE)) {
+ if ((strcmp(param->name, "TargetName") != 0) &&
+ (strcmp(param->name, "InitiatorName") != 0) &&
+ (strcmp(param->name, "TargetPortalGroupTag") != 0))
+ continue;
+ }
+
+ new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
+ if (!new_param) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_param.\n");
+ goto err_out;
+ }
+
+ new_param->set_param = param->set_param;
+ new_param->phase = param->phase;
+ new_param->scope = param->scope;
+ new_param->sender = param->sender;
+ new_param->type = param->type;
+ new_param->use = param->use;
+ new_param->type_range = param->type_range;
+
+ new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL);
+ if (!new_param->name) {
+ pr_err("Unable to allocate memory for"
+ " parameter name.\n");
+ goto err_out;
+ }
+
+ new_param->value = kzalloc(strlen(param->value) + 1,
+ GFP_KERNEL);
+ if (!new_param->value) {
+ pr_err("Unable to allocate memory for"
+ " parameter value.\n");
+ goto err_out;
+ }
+
+ memcpy(new_param->name, param->name, strlen(param->name));
+ new_param->name[strlen(param->name)] = '\0';
+ memcpy(new_param->value, param->value, strlen(param->value));
+ new_param->value[strlen(param->value)] = '\0';
+
+ list_add_tail(&new_param->p_list, &param_list->param_list);
+ }
+
+ if (!list_empty(&param_list->param_list))
+ *dst_param_list = param_list;
+ else {
+ pr_err("No parameters allocated.\n");
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ iscsi_release_param_list(param_list);
+ return -1;
+}
+
+static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
+{
+ struct iscsi_extra_response *er, *er_tmp;
+
+ list_for_each_entry_safe(er, er_tmp, &param_list->extra_response_list,
+ er_list) {
+ list_del(&er->er_list);
+ kfree(er);
+ }
+}
+
+void iscsi_release_param_list(struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param, *param_tmp;
+
+ list_for_each_entry_safe(param, param_tmp, &param_list->param_list,
+ p_list) {
+ list_del(&param->p_list);
+
+ kfree(param->name);
+ param->name = NULL;
+ kfree(param->value);
+ param->value = NULL;
+ kfree(param);
+ param = NULL;
+ }
+
+ iscsi_release_extra_responses(param_list);
+
+ kfree(param_list);
+}
+
+struct iscsi_param *iscsi_find_param_from_key(
+ char *key,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ if (!key || !param_list) {
+ pr_err("Key or parameter list pointer is NULL.\n");
+ return NULL;
+ }
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!strcmp(key, param->name))
+ return param;
+ }
+
+ pr_err("Unable to locate key \"%s\".\n", key);
+ return NULL;
+}
+
+int iscsi_extract_key_value(char *textbuf, char **key, char **value)
+{
+ *value = strchr(textbuf, '=');
+ if (!*value) {
+ pr_err("Unable to locate \"=\" seperator for key,"
+ " ignoring request.\n");
+ return -1;
+ }
+
+ *key = textbuf;
+ **value = '\0';
+ *value = *value + 1;
+
+ return 0;
+}
+
+int iscsi_update_param_value(struct iscsi_param *param, char *value)
+{
+ kfree(param->value);
+
+ param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+ if (!param->value) {
+ pr_err("Unable to allocate memory for value.\n");
+ return -1;
+ }
+
+ memcpy(param->value, value, strlen(value));
+ param->value[strlen(value)] = '\0';
+
+ pr_debug("iSCSI Parameter updated to %s=%s\n",
+ param->name, param->value);
+ return 0;
+}
+
+static int iscsi_add_notunderstood_response(
+ char *key,
+ char *value,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_extra_response *extra_response;
+
+ if (strlen(value) > VALUE_MAXLEN) {
+ pr_err("Value for notunderstood key \"%s\" exceeds %d,"
+ " protocol error.\n", key, VALUE_MAXLEN);
+ return -1;
+ }
+
+ extra_response = kzalloc(sizeof(struct iscsi_extra_response), GFP_KERNEL);
+ if (!extra_response) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_extra_response.\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&extra_response->er_list);
+
+ strncpy(extra_response->key, key, strlen(key) + 1);
+ strncpy(extra_response->value, NOTUNDERSTOOD,
+ strlen(NOTUNDERSTOOD) + 1);
+
+ list_add_tail(&extra_response->er_list,
+ &param_list->extra_response_list);
+ return 0;
+}
+
+static int iscsi_check_for_auth_key(char *key)
+{
+ /*
+ * RFC 1994
+ */
+ if (!strcmp(key, "CHAP_A") || !strcmp(key, "CHAP_I") ||
+ !strcmp(key, "CHAP_C") || !strcmp(key, "CHAP_N") ||
+ !strcmp(key, "CHAP_R"))
+ return 1;
+
+ /*
+ * RFC 2945
+ */
+ if (!strcmp(key, "SRP_U") || !strcmp(key, "SRP_N") ||
+ !strcmp(key, "SRP_g") || !strcmp(key, "SRP_s") ||
+ !strcmp(key, "SRP_A") || !strcmp(key, "SRP_B") ||
+ !strcmp(key, "SRP_M") || !strcmp(key, "SRP_HM"))
+ return 1;
+
+ return 0;
+}
+
+static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+{
+ if (IS_TYPE_BOOL_AND(param)) {
+ if (!strcmp(param->value, NO))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_TYPE_BOOL_OR(param)) {
+ if (!strcmp(param->value, YES))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, IMMEDIATEDATA))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_TYPE_NUMBER(param)) {
+ if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ /*
+ * The GlobalSAN iSCSI Initiator for MacOSX does
+ * not respond to MaxBurstLength, FirstBurstLength,
+ * DefaultTime2Wait or DefaultTime2Retain parameter keys.
+ * So, we set them to 'reply optional' here, and assume the
+ * the defaults from iscsi_parameters.h if the initiator
+ * is not RFC compliant and the keys are not negotiated.
+ */
+ if (!strcmp(param->name, MAXBURSTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ if (!strcmp(param->name, FIRSTBURSTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ if (!strcmp(param->name, DEFAULTTIME2WAIT))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ if (!strcmp(param->name, DEFAULTTIME2RETAIN))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, MAXCONNECTIONS))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_PHASE_DECLARATIVE(param))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+}
+
+static int iscsi_check_boolean_value(struct iscsi_param *param, char *value)
+{
+ if (strcmp(value, YES) && strcmp(value, NO)) {
+ pr_err("Illegal value for \"%s\", must be either"
+ " \"%s\" or \"%s\".\n", param->name, YES, NO);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_ptr)
+{
+ char *tmpptr;
+ int value = 0;
+
+ value = simple_strtoul(value_ptr, &tmpptr, 0);
+
+/* #warning FIXME: Fix this */
+#if 0
+ if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) {
+ pr_err("Illegal value \"%s\" for \"%s\".\n",
+ value, param->name);
+ return -1;
+ }
+#endif
+ if (IS_TYPERANGE_0_TO_2(param)) {
+ if ((value < 0) || (value > 2)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 2.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_0_TO_3600(param)) {
+ if ((value < 0) || (value > 3600)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 3600.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_0_TO_32767(param)) {
+ if ((value < 0) || (value > 32767)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 32767.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_0_TO_65535(param)) {
+ if ((value < 0) || (value > 65535)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 0 and 65535.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_1_TO_65535(param)) {
+ if ((value < 1) || (value > 65535)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 1 and 65535.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_2_TO_3600(param)) {
+ if ((value < 2) || (value > 3600)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 2 and 3600.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+ if (IS_TYPERANGE_512_TO_16777215(param)) {
+ if ((value < 512) || (value > 16777215)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " between 512 and 16777215.\n", param->name);
+ return -1;
+ }
+ return 0;
+ }
+
+ return 0;
+}
+
+static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value)
+{
+ char *left_val_ptr = NULL, *right_val_ptr = NULL;
+ char *tilde_ptr = NULL, *tmp_ptr = NULL;
+ u32 left_val, right_val, local_left_val, local_right_val;
+
+ if (strcmp(param->name, IFMARKINT) &&
+ strcmp(param->name, OFMARKINT)) {
+ pr_err("Only parameters \"%s\" or \"%s\" may contain a"
+ " numerical range value.\n", IFMARKINT, OFMARKINT);
+ return -1;
+ }
+
+ if (IS_PSTATE_PROPOSER(param))
+ return 0;
+
+ tilde_ptr = strchr(value, '~');
+ if (!tilde_ptr) {
+ pr_err("Unable to locate numerical range indicator"
+ " \"~\" for \"%s\".\n", param->name);
+ return -1;
+ }
+ *tilde_ptr = '\0';
+
+ left_val_ptr = value;
+ right_val_ptr = value + strlen(left_val_ptr) + 1;
+
+ if (iscsi_check_numerical_value(param, left_val_ptr) < 0)
+ return -1;
+ if (iscsi_check_numerical_value(param, right_val_ptr) < 0)
+ return -1;
+
+ left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
+ right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
+ *tilde_ptr = '~';
+
+ if (right_val < left_val) {
+ pr_err("Numerical range for parameter \"%s\" contains"
+ " a right value which is less than the left.\n",
+ param->name);
+ return -1;
+ }
+
+ /*
+ * For now, enforce reasonable defaults for [I,O]FMarkInt.
+ */
+ tilde_ptr = strchr(param->value, '~');
+ if (!tilde_ptr) {
+ pr_err("Unable to locate numerical range indicator"
+ " \"~\" for \"%s\".\n", param->name);
+ return -1;
+ }
+ *tilde_ptr = '\0';
+
+ left_val_ptr = param->value;
+ right_val_ptr = param->value + strlen(left_val_ptr) + 1;
+
+ local_left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
+ local_right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
+ *tilde_ptr = '~';
+
+ if (param->set_param) {
+ if ((left_val < local_left_val) ||
+ (right_val < local_left_val)) {
+ pr_err("Passed value range \"%u~%u\" is below"
+ " minimum left value \"%u\" for key \"%s\","
+ " rejecting.\n", left_val, right_val,
+ local_left_val, param->name);
+ return -1;
+ }
+ } else {
+ if ((left_val < local_left_val) &&
+ (right_val < local_left_val)) {
+ pr_err("Received value range \"%u~%u\" is"
+ " below minimum left value \"%u\" for key"
+ " \"%s\", rejecting.\n", left_val, right_val,
+ local_left_val, param->name);
+ SET_PSTATE_REJECT(param);
+ if (iscsi_update_param_value(param, REJECT) < 0)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value)
+{
+ if (IS_PSTATE_PROPOSER(param))
+ return 0;
+
+ if (IS_TYPERANGE_AUTH_PARAM(param)) {
+ if (strcmp(value, KRB5) && strcmp(value, SPKM1) &&
+ strcmp(value, SPKM2) && strcmp(value, SRP) &&
+ strcmp(value, CHAP) && strcmp(value, NONE)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " \"%s\", \"%s\", \"%s\", \"%s\", \"%s\""
+ " or \"%s\".\n", param->name, KRB5,
+ SPKM1, SPKM2, SRP, CHAP, NONE);
+ return -1;
+ }
+ }
+ if (IS_TYPERANGE_DIGEST_PARAM(param)) {
+ if (strcmp(value, CRC32C) && strcmp(value, NONE)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " \"%s\" or \"%s\".\n", param->name,
+ CRC32C, NONE);
+ return -1;
+ }
+ }
+ if (IS_TYPERANGE_SESSIONTYPE(param)) {
+ if (strcmp(value, DISCOVERY) && strcmp(value, NORMAL)) {
+ pr_err("Illegal value for \"%s\", must be"
+ " \"%s\" or \"%s\".\n", param->name,
+ DISCOVERY, NORMAL);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used to pick a value range number, currently just
+ * returns the lesser of both right values.
+ */
+static char *iscsi_get_value_from_number_range(
+ struct iscsi_param *param,
+ char *value)
+{
+ char *end_ptr, *tilde_ptr1 = NULL, *tilde_ptr2 = NULL;
+ u32 acceptor_right_value, proposer_right_value;
+
+ tilde_ptr1 = strchr(value, '~');
+ if (!tilde_ptr1)
+ return NULL;
+ *tilde_ptr1++ = '\0';
+ proposer_right_value = simple_strtoul(tilde_ptr1, &end_ptr, 0);
+
+ tilde_ptr2 = strchr(param->value, '~');
+ if (!tilde_ptr2)
+ return NULL;
+ *tilde_ptr2++ = '\0';
+ acceptor_right_value = simple_strtoul(tilde_ptr2, &end_ptr, 0);
+
+ return (acceptor_right_value >= proposer_right_value) ?
+ tilde_ptr1 : tilde_ptr2;
+}
+
+static char *iscsi_check_valuelist_for_support(
+ struct iscsi_param *param,
+ char *value)
+{
+ char *tmp1 = NULL, *tmp2 = NULL;
+ char *acceptor_values = NULL, *proposer_values = NULL;
+
+ acceptor_values = param->value;
+ proposer_values = value;
+
+ do {
+ if (!proposer_values)
+ return NULL;
+ tmp1 = strchr(proposer_values, ',');
+ if (tmp1)
+ *tmp1 = '\0';
+ acceptor_values = param->value;
+ do {
+ if (!acceptor_values) {
+ if (tmp1)
+ *tmp1 = ',';
+ return NULL;
+ }
+ tmp2 = strchr(acceptor_values, ',');
+ if (tmp2)
+ *tmp2 = '\0';
+ if (!acceptor_values || !proposer_values) {
+ if (tmp1)
+ *tmp1 = ',';
+ if (tmp2)
+ *tmp2 = ',';
+ return NULL;
+ }
+ if (!strcmp(acceptor_values, proposer_values)) {
+ if (tmp2)
+ *tmp2 = ',';
+ goto out;
+ }
+ if (tmp2)
+ *tmp2++ = ',';
+
+ acceptor_values = tmp2;
+ if (!acceptor_values)
+ break;
+ } while (acceptor_values);
+ if (tmp1)
+ *tmp1++ = ',';
+ proposer_values = tmp1;
+ } while (proposer_values);
+
+out:
+ return proposer_values;
+}
+
+static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value)
+{
+ u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
+ char *negoitated_value = NULL;
+
+ if (IS_PSTATE_ACCEPTOR(param)) {
+ pr_err("Received key \"%s\" twice, protocol error.\n",
+ param->name);
+ return -1;
+ }
+
+ if (IS_PSTATE_REJECT(param))
+ return 0;
+
+ if (IS_TYPE_BOOL_AND(param)) {
+ if (!strcmp(value, YES))
+ proposer_boolean_value = 1;
+ if (!strcmp(param->value, YES))
+ acceptor_boolean_value = 1;
+ if (acceptor_boolean_value && proposer_boolean_value)
+ do {} while (0);
+ else {
+ if (iscsi_update_param_value(param, NO) < 0)
+ return -1;
+ if (!proposer_boolean_value)
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
+ } else if (IS_TYPE_BOOL_OR(param)) {
+ if (!strcmp(value, YES))
+ proposer_boolean_value = 1;
+ if (!strcmp(param->value, YES))
+ acceptor_boolean_value = 1;
+ if (acceptor_boolean_value || proposer_boolean_value) {
+ if (iscsi_update_param_value(param, YES) < 0)
+ return -1;
+ if (proposer_boolean_value)
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
+ } else if (IS_TYPE_NUMBER(param)) {
+ char *tmpptr, buf[10];
+ u32 acceptor_value = simple_strtoul(param->value, &tmpptr, 0);
+ u32 proposer_value = simple_strtoul(value, &tmpptr, 0);
+
+ memset(buf, 0, 10);
+
+ if (!strcmp(param->name, MAXCONNECTIONS) ||
+ !strcmp(param->name, MAXBURSTLENGTH) ||
+ !strcmp(param->name, FIRSTBURSTLENGTH) ||
+ !strcmp(param->name, MAXOUTSTANDINGR2T) ||
+ !strcmp(param->name, DEFAULTTIME2RETAIN) ||
+ !strcmp(param->name, ERRORRECOVERYLEVEL)) {
+ if (proposer_value > acceptor_value) {
+ sprintf(buf, "%u", acceptor_value);
+ if (iscsi_update_param_value(param,
+ &buf[0]) < 0)
+ return -1;
+ } else {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ }
+ } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+ if (acceptor_value > proposer_value) {
+ sprintf(buf, "%u", acceptor_value);
+ if (iscsi_update_param_value(param,
+ &buf[0]) < 0)
+ return -1;
+ } else {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ }
+ } else {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ }
+
+ if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ } else if (IS_TYPE_NUMBER_RANGE(param)) {
+ negoitated_value = iscsi_get_value_from_number_range(
+ param, value);
+ if (!negoitated_value)
+ return -1;
+ if (iscsi_update_param_value(param, negoitated_value) < 0)
+ return -1;
+ } else if (IS_TYPE_VALUE_LIST(param)) {
+ negoitated_value = iscsi_check_valuelist_for_support(
+ param, value);
+ if (!negoitated_value) {
+ pr_err("Proposer's value list \"%s\" contains"
+ " no valid values from Acceptor's value list"
+ " \"%s\".\n", value, param->value);
+ return -1;
+ }
+ if (iscsi_update_param_value(param, negoitated_value) < 0)
+ return -1;
+ } else if (IS_PHASE_DECLARATIVE(param)) {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
+
+ return 0;
+}
+
+static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
+{
+ if (IS_PSTATE_RESPONSE_GOT(param)) {
+ pr_err("Received key \"%s\" twice, protocol error.\n",
+ param->name);
+ return -1;
+ }
+
+ if (IS_TYPE_NUMBER_RANGE(param)) {
+ u32 left_val = 0, right_val = 0, recieved_value = 0;
+ char *left_val_ptr = NULL, *right_val_ptr = NULL;
+ char *tilde_ptr = NULL, *tmp_ptr = NULL;
+
+ if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) {
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+ return 0;
+ }
+
+ tilde_ptr = strchr(value, '~');
+ if (tilde_ptr) {
+ pr_err("Illegal \"~\" in response for \"%s\".\n",
+ param->name);
+ return -1;
+ }
+ tilde_ptr = strchr(param->value, '~');
+ if (!tilde_ptr) {
+ pr_err("Unable to locate numerical range"
+ " indicator \"~\" for \"%s\".\n", param->name);
+ return -1;
+ }
+ *tilde_ptr = '\0';
+
+ left_val_ptr = param->value;
+ right_val_ptr = param->value + strlen(left_val_ptr) + 1;
+ left_val = simple_strtoul(left_val_ptr, &tmp_ptr, 0);
+ right_val = simple_strtoul(right_val_ptr, &tmp_ptr, 0);
+ recieved_value = simple_strtoul(value, &tmp_ptr, 0);
+
+ *tilde_ptr = '~';
+
+ if ((recieved_value < left_val) ||
+ (recieved_value > right_val)) {
+ pr_err("Illegal response \"%s=%u\", value must"
+ " be between %u and %u.\n", param->name,
+ recieved_value, left_val, right_val);
+ return -1;
+ }
+ } else if (IS_TYPE_VALUE_LIST(param)) {
+ char *comma_ptr = NULL, *tmp_ptr = NULL;
+
+ comma_ptr = strchr(value, ',');
+ if (comma_ptr) {
+ pr_err("Illegal \",\" in response for \"%s\".\n",
+ param->name);
+ return -1;
+ }
+
+ tmp_ptr = iscsi_check_valuelist_for_support(param, value);
+ if (!tmp_ptr)
+ return -1;
+ }
+
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int iscsi_check_value(struct iscsi_param *param, char *value)
+{
+ char *comma_ptr = NULL;
+
+ if (!strcmp(value, REJECT)) {
+ if (!strcmp(param->name, IFMARKINT) ||
+ !strcmp(param->name, OFMARKINT)) {
+ /*
+ * Reject is not fatal for [I,O]FMarkInt, and causes
+ * [I,O]FMarker to be reset to No. (See iSCSI v20 A.3.2)
+ */
+ SET_PSTATE_REJECT(param);
+ return 0;
+ }
+ pr_err("Received %s=%s\n", param->name, value);
+ return -1;
+ }
+ if (!strcmp(value, IRRELEVANT)) {
+ pr_debug("Received %s=%s\n", param->name, value);
+ SET_PSTATE_IRRELEVANT(param);
+ return 0;
+ }
+ if (!strcmp(value, NOTUNDERSTOOD)) {
+ if (!IS_PSTATE_PROPOSER(param)) {
+ pr_err("Received illegal offer %s=%s\n",
+ param->name, value);
+ return -1;
+ }
+
+/* #warning FIXME: Add check for X-ExtensionKey here */
+ pr_err("Standard iSCSI key \"%s\" cannot be answered"
+ " with \"%s\", protocol error.\n", param->name, value);
+ return -1;
+ }
+
+ do {
+ comma_ptr = NULL;
+ comma_ptr = strchr(value, ',');
+
+ if (comma_ptr && !IS_TYPE_VALUE_LIST(param)) {
+ pr_err("Detected value seperator \",\", but"
+ " key \"%s\" does not allow a value list,"
+ " protocol error.\n", param->name);
+ return -1;
+ }
+ if (comma_ptr)
+ *comma_ptr = '\0';
+
+ if (strlen(value) > VALUE_MAXLEN) {
+ pr_err("Value for key \"%s\" exceeds %d,"
+ " protocol error.\n", param->name,
+ VALUE_MAXLEN);
+ return -1;
+ }
+
+ if (IS_TYPE_BOOL_AND(param) || IS_TYPE_BOOL_OR(param)) {
+ if (iscsi_check_boolean_value(param, value) < 0)
+ return -1;
+ } else if (IS_TYPE_NUMBER(param)) {
+ if (iscsi_check_numerical_value(param, value) < 0)
+ return -1;
+ } else if (IS_TYPE_NUMBER_RANGE(param)) {
+ if (iscsi_check_numerical_range_value(param, value) < 0)
+ return -1;
+ } else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) {
+ if (iscsi_check_string_or_list_value(param, value) < 0)
+ return -1;
+ } else {
+ pr_err("Huh? 0x%02x\n", param->type);
+ return -1;
+ }
+
+ if (comma_ptr)
+ *comma_ptr++ = ',';
+
+ value = comma_ptr;
+ } while (value);
+
+ return 0;
+}
+
+static struct iscsi_param *__iscsi_check_key(
+ char *key,
+ int sender,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+
+ if (strlen(key) > KEY_MAXLEN) {
+ pr_err("Length of key name \"%s\" exceeds %d.\n",
+ key, KEY_MAXLEN);
+ return NULL;
+ }
+
+ param = iscsi_find_param_from_key(key, param_list);
+ if (!param)
+ return NULL;
+
+ if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "target" : "initiator");
+ return NULL;
+ }
+
+ if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "initiator" : "target");
+ return NULL;
+ }
+
+ return param;
+}
+
+static struct iscsi_param *iscsi_check_key(
+ char *key,
+ int phase,
+ int sender,
+ struct iscsi_param_list *param_list)
+{
+ struct iscsi_param *param;
+ /*
+ * Key name length must not exceed 63 bytes. (See iSCSI v20 5.1)
+ */
+ if (strlen(key) > KEY_MAXLEN) {
+ pr_err("Length of key name \"%s\" exceeds %d.\n",
+ key, KEY_MAXLEN);
+ return NULL;
+ }
+
+ param = iscsi_find_param_from_key(key, param_list);
+ if (!param)
+ return NULL;
+
+ if ((sender & SENDER_INITIATOR) && !IS_SENDER_INITIATOR(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "target" : "initiator");
+ return NULL;
+ }
+ if ((sender & SENDER_TARGET) && !IS_SENDER_TARGET(param)) {
+ pr_err("Key \"%s\" may not be sent to %s,"
+ " protocol error.\n", param->name,
+ (sender & SENDER_RECEIVER) ? "initiator" : "target");
+ return NULL;
+ }
+
+ if (IS_PSTATE_ACCEPTOR(param)) {
+ pr_err("Key \"%s\" received twice, protocol error.\n",
+ key);
+ return NULL;
+ }
+
+ if (!phase)
+ return param;
+
+ if (!(param->phase & phase)) {
+ pr_err("Key \"%s\" may not be negotiated during ",
+ param->name);
+ switch (phase) {
+ case PHASE_SECURITY:
+ pr_debug("Security phase.\n");
+ break;
+ case PHASE_OPERATIONAL:
+ pr_debug("Operational phase.\n");
+ default:
+ pr_debug("Unknown phase.\n");
+ }
+ return NULL;
+ }
+
+ return param;
+}
+
+static int iscsi_enforce_integrity_rules(
+ u8 phase,
+ struct iscsi_param_list *param_list)
+{
+ char *tmpptr;
+ u8 DataSequenceInOrder = 0;
+ u8 ErrorRecoveryLevel = 0, SessionType = 0;
+ u8 IFMarker = 0, OFMarker = 0;
+ u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0;
+ u32 FirstBurstLength = 0, MaxBurstLength = 0;
+ struct iscsi_param *param = NULL;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!(param->phase & phase))
+ continue;
+ if (!strcmp(param->name, SESSIONTYPE))
+ if (!strcmp(param->value, NORMAL))
+ SessionType = 1;
+ if (!strcmp(param->name, ERRORRECOVERYLEVEL))
+ ErrorRecoveryLevel = simple_strtoul(param->value,
+ &tmpptr, 0);
+ if (!strcmp(param->name, DATASEQUENCEINORDER))
+ if (!strcmp(param->value, YES))
+ DataSequenceInOrder = 1;
+ if (!strcmp(param->name, MAXBURSTLENGTH))
+ MaxBurstLength = simple_strtoul(param->value,
+ &tmpptr, 0);
+ if (!strcmp(param->name, IFMARKER))
+ if (!strcmp(param->value, YES))
+ IFMarker = 1;
+ if (!strcmp(param->name, OFMARKER))
+ if (!strcmp(param->value, YES))
+ OFMarker = 1;
+ if (!strcmp(param->name, IFMARKINT))
+ if (!strcmp(param->value, REJECT))
+ IFMarkInt_Reject = 1;
+ if (!strcmp(param->name, OFMARKINT))
+ if (!strcmp(param->value, REJECT))
+ OFMarkInt_Reject = 1;
+ }
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!(param->phase & phase))
+ continue;
+ if (!SessionType && (!IS_PSTATE_ACCEPTOR(param) &&
+ (strcmp(param->name, IFMARKER) &&
+ strcmp(param->name, OFMARKER) &&
+ strcmp(param->name, IFMARKINT) &&
+ strcmp(param->name, OFMARKINT))))
+ continue;
+ if (!strcmp(param->name, MAXOUTSTANDINGR2T) &&
+ DataSequenceInOrder && (ErrorRecoveryLevel > 0)) {
+ if (strcmp(param->value, "1")) {
+ if (iscsi_update_param_value(param, "1") < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+ if (!strcmp(param->name, MAXCONNECTIONS) && !SessionType) {
+ if (strcmp(param->value, "1")) {
+ if (iscsi_update_param_value(param, "1") < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+ if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+ FirstBurstLength = simple_strtoul(param->value,
+ &tmpptr, 0);
+ if (FirstBurstLength > MaxBurstLength) {
+ char tmpbuf[10];
+ memset(tmpbuf, 0, 10);
+ sprintf(tmpbuf, "%u", MaxBurstLength);
+ if (iscsi_update_param_value(param, tmpbuf))
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+ if (!strcmp(param->name, IFMARKER) && IFMarkInt_Reject) {
+ if (iscsi_update_param_value(param, NO) < 0)
+ return -1;
+ IFMarker = 0;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ if (!strcmp(param->name, OFMARKER) && OFMarkInt_Reject) {
+ if (iscsi_update_param_value(param, NO) < 0)
+ return -1;
+ OFMarker = 0;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ if (!strcmp(param->name, IFMARKINT) && !IFMarker) {
+ if (!strcmp(param->value, REJECT))
+ continue;
+ param->state &= ~PSTATE_NEGOTIATE;
+ if (iscsi_update_param_value(param, IRRELEVANT) < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ if (!strcmp(param->name, OFMARKINT) && !OFMarker) {
+ if (!strcmp(param->value, REJECT))
+ continue;
+ param->state &= ~PSTATE_NEGOTIATE;
+ if (iscsi_update_param_value(param, IRRELEVANT) < 0)
+ return -1;
+ pr_debug("Reset \"%s\" to \"%s\".\n",
+ param->name, param->value);
+ }
+ }
+
+ return 0;
+}
+
+int iscsi_decode_text_input(
+ u8 phase,
+ u8 sender,
+ char *textbuf,
+ u32 length,
+ struct iscsi_param_list *param_list)
+{
+ char *tmpbuf, *start = NULL, *end = NULL;
+
+ tmpbuf = kzalloc(length + 1, GFP_KERNEL);
+ if (!tmpbuf) {
+ pr_err("Unable to allocate memory for tmpbuf.\n");
+ return -1;
+ }
+
+ memcpy(tmpbuf, textbuf, length);
+ tmpbuf[length] = '\0';
+ start = tmpbuf;
+ end = (start + length);
+
+ while (start < end) {
+ char *key, *value;
+ struct iscsi_param *param;
+
+ if (iscsi_extract_key_value(start, &key, &value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+
+ pr_debug("Got key: %s=%s\n", key, value);
+
+ if (phase & PHASE_SECURITY) {
+ if (iscsi_check_for_auth_key(key) > 0) {
+ char *tmpptr = key + strlen(key);
+ *tmpptr = '=';
+ kfree(tmpbuf);
+ return 1;
+ }
+ }
+
+ param = iscsi_check_key(key, phase, sender, param_list);
+ if (!param) {
+ if (iscsi_add_notunderstood_response(key,
+ value, param_list) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+ start += strlen(key) + strlen(value) + 2;
+ continue;
+ }
+ if (iscsi_check_value(param, value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+
+ start += strlen(key) + strlen(value) + 2;
+
+ if (IS_PSTATE_PROPOSER(param)) {
+ if (iscsi_check_proposer_state(param, value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+ SET_PSTATE_RESPONSE_GOT(param);
+ } else {
+ if (iscsi_check_acceptor_state(param, value) < 0) {
+ kfree(tmpbuf);
+ return -1;
+ }
+ SET_PSTATE_ACCEPTOR(param);
+ }
+ }
+
+ kfree(tmpbuf);
+ return 0;
+}
+
+int iscsi_encode_text_output(
+ u8 phase,
+ u8 sender,
+ char *textbuf,
+ u32 *length,
+ struct iscsi_param_list *param_list)
+{
+ char *output_buf = NULL;
+ struct iscsi_extra_response *er;
+ struct iscsi_param *param;
+
+ output_buf = textbuf + *length;
+
+ if (iscsi_enforce_integrity_rules(phase, param_list) < 0)
+ return -1;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!(param->sender & sender))
+ continue;
+ if (IS_PSTATE_ACCEPTOR(param) &&
+ !IS_PSTATE_RESPONSE_SENT(param) &&
+ !IS_PSTATE_REPLY_OPTIONAL(param) &&
+ (param->phase & phase)) {
+ *length += sprintf(output_buf, "%s=%s",
+ param->name, param->value);
+ *length += 1;
+ output_buf = textbuf + *length;
+ SET_PSTATE_RESPONSE_SENT(param);
+ pr_debug("Sending key: %s=%s\n",
+ param->name, param->value);
+ continue;
+ }
+ if (IS_PSTATE_NEGOTIATE(param) &&
+ !IS_PSTATE_ACCEPTOR(param) &&
+ !IS_PSTATE_PROPOSER(param) &&
+ (param->phase & phase)) {
+ *length += sprintf(output_buf, "%s=%s",
+ param->name, param->value);
+ *length += 1;
+ output_buf = textbuf + *length;
+ SET_PSTATE_PROPOSER(param);
+ iscsi_check_proposer_for_optional_reply(param);
+ pr_debug("Sending key: %s=%s\n",
+ param->name, param->value);
+ }
+ }
+
+ list_for_each_entry(er, &param_list->extra_response_list, er_list) {
+ *length += sprintf(output_buf, "%s=%s", er->key, er->value);
+ *length += 1;
+ output_buf = textbuf + *length;
+ pr_debug("Sending key: %s=%s\n", er->key, er->value);
+ }
+ iscsi_release_extra_responses(param_list);
+
+ return 0;
+}
+
+int iscsi_check_negotiated_keys(struct iscsi_param_list *param_list)
+{
+ int ret = 0;
+ struct iscsi_param *param;
+
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (IS_PSTATE_NEGOTIATE(param) &&
+ IS_PSTATE_PROPOSER(param) &&
+ !IS_PSTATE_RESPONSE_GOT(param) &&
+ !IS_PSTATE_REPLY_OPTIONAL(param) &&
+ !IS_PHASE_DECLARATIVE(param)) {
+ pr_err("No response for proposed key \"%s\".\n",
+ param->name);
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+int iscsi_change_param_value(
+ char *keyvalue,
+ struct iscsi_param_list *param_list,
+ int check_key)
+{
+ char *key = NULL, *value = NULL;
+ struct iscsi_param *param;
+ int sender = 0;
+
+ if (iscsi_extract_key_value(keyvalue, &key, &value) < 0)
+ return -1;
+
+ if (!check_key) {
+ param = __iscsi_check_key(keyvalue, sender, param_list);
+ if (!param)
+ return -1;
+ } else {
+ param = iscsi_check_key(keyvalue, 0, sender, param_list);
+ if (!param)
+ return -1;
+
+ param->set_param = 1;
+ if (iscsi_check_value(param, value) < 0) {
+ param->set_param = 0;
+ return -1;
+ }
+ param->set_param = 0;
+ }
+
+ if (iscsi_update_param_value(param, value) < 0)
+ return -1;
+
+ return 0;
+}
+
+void iscsi_set_connection_parameters(
+ struct iscsi_conn_ops *ops,
+ struct iscsi_param_list *param_list)
+{
+ char *tmpptr;
+ struct iscsi_param *param;
+
+ pr_debug("---------------------------------------------------"
+ "---------------\n");
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
+ continue;
+ if (!strcmp(param->name, AUTHMETHOD)) {
+ pr_debug("AuthMethod: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, HEADERDIGEST)) {
+ ops->HeaderDigest = !strcmp(param->value, CRC32C);
+ pr_debug("HeaderDigest: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DATADIGEST)) {
+ ops->DataDigest = !strcmp(param->value, CRC32C);
+ pr_debug("DataDigest: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
+ ops->MaxRecvDataSegmentLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxRecvDataSegmentLength: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, OFMARKER)) {
+ ops->OFMarker = !strcmp(param->value, YES);
+ pr_debug("OFMarker: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, IFMARKER)) {
+ ops->IFMarker = !strcmp(param->value, YES);
+ pr_debug("IFMarker: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, OFMARKINT)) {
+ ops->OFMarkInt =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("OFMarkInt: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, IFMARKINT)) {
+ ops->IFMarkInt =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("IFMarkInt: %s\n",
+ param->value);
+ }
+ }
+ pr_debug("----------------------------------------------------"
+ "--------------\n");
+}
+
+void iscsi_set_session_parameters(
+ struct iscsi_sess_ops *ops,
+ struct iscsi_param_list *param_list,
+ int leading)
+{
+ char *tmpptr;
+ struct iscsi_param *param;
+
+ pr_debug("----------------------------------------------------"
+ "--------------\n");
+ list_for_each_entry(param, &param_list->param_list, p_list) {
+ if (!IS_PSTATE_ACCEPTOR(param) && !IS_PSTATE_PROPOSER(param))
+ continue;
+ if (!strcmp(param->name, INITIATORNAME)) {
+ if (!param->value)
+ continue;
+ if (leading)
+ snprintf(ops->InitiatorName,
+ sizeof(ops->InitiatorName),
+ "%s", param->value);
+ pr_debug("InitiatorName: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, INITIATORALIAS)) {
+ if (!param->value)
+ continue;
+ snprintf(ops->InitiatorAlias,
+ sizeof(ops->InitiatorAlias),
+ "%s", param->value);
+ pr_debug("InitiatorAlias: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, TARGETNAME)) {
+ if (!param->value)
+ continue;
+ if (leading)
+ snprintf(ops->TargetName,
+ sizeof(ops->TargetName),
+ "%s", param->value);
+ pr_debug("TargetName: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, TARGETALIAS)) {
+ if (!param->value)
+ continue;
+ snprintf(ops->TargetAlias, sizeof(ops->TargetAlias),
+ "%s", param->value);
+ pr_debug("TargetAlias: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, TARGETPORTALGROUPTAG)) {
+ ops->TargetPortalGroupTag =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("TargetPortalGroupTag: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXCONNECTIONS)) {
+ ops->MaxConnections =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxConnections: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, INITIALR2T)) {
+ ops->InitialR2T = !strcmp(param->value, YES);
+ pr_debug("InitialR2T: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, IMMEDIATEDATA)) {
+ ops->ImmediateData = !strcmp(param->value, YES);
+ pr_debug("ImmediateData: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXBURSTLENGTH)) {
+ ops->MaxBurstLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxBurstLength: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, FIRSTBURSTLENGTH)) {
+ ops->FirstBurstLength =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("FirstBurstLength: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DEFAULTTIME2WAIT)) {
+ ops->DefaultTime2Wait =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("DefaultTime2Wait: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DEFAULTTIME2RETAIN)) {
+ ops->DefaultTime2Retain =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("DefaultTime2Retain: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, MAXOUTSTANDINGR2T)) {
+ ops->MaxOutstandingR2T =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("MaxOutstandingR2T: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DATAPDUINORDER)) {
+ ops->DataPDUInOrder = !strcmp(param->value, YES);
+ pr_debug("DataPDUInOrder: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, DATASEQUENCEINORDER)) {
+ ops->DataSequenceInOrder = !strcmp(param->value, YES);
+ pr_debug("DataSequenceInOrder: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, ERRORRECOVERYLEVEL)) {
+ ops->ErrorRecoveryLevel =
+ simple_strtoul(param->value, &tmpptr, 0);
+ pr_debug("ErrorRecoveryLevel: %s\n",
+ param->value);
+ } else if (!strcmp(param->name, SESSIONTYPE)) {
+ ops->SessionType = !strcmp(param->value, DISCOVERY);
+ pr_debug("SessionType: %s\n",
+ param->value);
+ }
+ }
+ pr_debug("----------------------------------------------------"
+ "--------------\n");
+
+}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
new file mode 100644
index 00000000000..6a37fd6f128
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -0,0 +1,269 @@
+#ifndef ISCSI_PARAMETERS_H
+#define ISCSI_PARAMETERS_H
+
+struct iscsi_extra_response {
+ char key[64];
+ char value[32];
+ struct list_head er_list;
+} ____cacheline_aligned;
+
+struct iscsi_param {
+ char *name;
+ char *value;
+ u8 set_param;
+ u8 phase;
+ u8 scope;
+ u8 sender;
+ u8 type;
+ u8 use;
+ u16 type_range;
+ u32 state;
+ struct list_head p_list;
+} ____cacheline_aligned;
+
+extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
+extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
+extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
+extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
+extern void iscsi_print_params(struct iscsi_param_list *);
+extern int iscsi_create_default_params(struct iscsi_param_list **);
+extern int iscsi_set_keys_to_negotiate(int, struct iscsi_param_list *);
+extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
+extern int iscsi_copy_param_list(struct iscsi_param_list **,
+ struct iscsi_param_list *, int);
+extern int iscsi_change_param_value(char *, struct iscsi_param_list *, int);
+extern void iscsi_release_param_list(struct iscsi_param_list *);
+extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
+extern int iscsi_extract_key_value(char *, char **, char **);
+extern int iscsi_update_param_value(struct iscsi_param *, char *);
+extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_param_list *);
+extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
+ struct iscsi_param_list *);
+extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
+extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
+ struct iscsi_param_list *);
+extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
+ struct iscsi_param_list *, int);
+
+#define YES "Yes"
+#define NO "No"
+#define ALL "All"
+#define IRRELEVANT "Irrelevant"
+#define NONE "None"
+#define NOTUNDERSTOOD "NotUnderstood"
+#define REJECT "Reject"
+
+/*
+ * The Parameter Names.
+ */
+#define AUTHMETHOD "AuthMethod"
+#define HEADERDIGEST "HeaderDigest"
+#define DATADIGEST "DataDigest"
+#define MAXCONNECTIONS "MaxConnections"
+#define SENDTARGETS "SendTargets"
+#define TARGETNAME "TargetName"
+#define INITIATORNAME "InitiatorName"
+#define TARGETALIAS "TargetAlias"
+#define INITIATORALIAS "InitiatorAlias"
+#define TARGETADDRESS "TargetAddress"
+#define TARGETPORTALGROUPTAG "TargetPortalGroupTag"
+#define INITIALR2T "InitialR2T"
+#define IMMEDIATEDATA "ImmediateData"
+#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength"
+#define MAXBURSTLENGTH "MaxBurstLength"
+#define FIRSTBURSTLENGTH "FirstBurstLength"
+#define DEFAULTTIME2WAIT "DefaultTime2Wait"
+#define DEFAULTTIME2RETAIN "DefaultTime2Retain"
+#define MAXOUTSTANDINGR2T "MaxOutstandingR2T"
+#define DATAPDUINORDER "DataPDUInOrder"
+#define DATASEQUENCEINORDER "DataSequenceInOrder"
+#define ERRORRECOVERYLEVEL "ErrorRecoveryLevel"
+#define SESSIONTYPE "SessionType"
+#define IFMARKER "IFMarker"
+#define OFMARKER "OFMarker"
+#define IFMARKINT "IFMarkInt"
+#define OFMARKINT "OFMarkInt"
+#define X_EXTENSIONKEY "X-com.sbei.version"
+#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
+#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
+
+/*
+ * For AuthMethod.
+ */
+#define KRB5 "KRB5"
+#define SPKM1 "SPKM1"
+#define SPKM2 "SPKM2"
+#define SRP "SRP"
+#define CHAP "CHAP"
+
+/*
+ * Initial values for Parameter Negotiation.
+ */
+#define INITIAL_AUTHMETHOD CHAP
+#define INITIAL_HEADERDIGEST "CRC32C,None"
+#define INITIAL_DATADIGEST "CRC32C,None"
+#define INITIAL_MAXCONNECTIONS "1"
+#define INITIAL_SENDTARGETS ALL
+#define INITIAL_TARGETNAME "LIO.Target"
+#define INITIAL_INITIATORNAME "LIO.Initiator"
+#define INITIAL_TARGETALIAS "LIO Target"
+#define INITIAL_INITIATORALIAS "LIO Initiator"
+#define INITIAL_TARGETADDRESS "0.0.0.0:0000,0"
+#define INITIAL_TARGETPORTALGROUPTAG "1"
+#define INITIAL_INITIALR2T YES
+#define INITIAL_IMMEDIATEDATA YES
+#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192"
+#define INITIAL_MAXBURSTLENGTH "262144"
+#define INITIAL_FIRSTBURSTLENGTH "65536"
+#define INITIAL_DEFAULTTIME2WAIT "2"
+#define INITIAL_DEFAULTTIME2RETAIN "20"
+#define INITIAL_MAXOUTSTANDINGR2T "1"
+#define INITIAL_DATAPDUINORDER YES
+#define INITIAL_DATASEQUENCEINORDER YES
+#define INITIAL_ERRORRECOVERYLEVEL "0"
+#define INITIAL_SESSIONTYPE NORMAL
+#define INITIAL_IFMARKER NO
+#define INITIAL_OFMARKER NO
+#define INITIAL_IFMARKINT "2048~65535"
+#define INITIAL_OFMARKINT "2048~65535"
+
+/*
+ * For [Header,Data]Digests.
+ */
+#define CRC32C "CRC32C"
+
+/*
+ * For SessionType.
+ */
+#define DISCOVERY "Discovery"
+#define NORMAL "Normal"
+
+/*
+ * struct iscsi_param->use
+ */
+#define USE_LEADING_ONLY 0x01
+#define USE_INITIAL_ONLY 0x02
+#define USE_ALL 0x04
+
+#define IS_USE_LEADING_ONLY(p) ((p)->use & USE_LEADING_ONLY)
+#define IS_USE_INITIAL_ONLY(p) ((p)->use & USE_INITIAL_ONLY)
+#define IS_USE_ALL(p) ((p)->use & USE_ALL)
+
+#define SET_USE_INITIAL_ONLY(p) ((p)->use |= USE_INITIAL_ONLY)
+
+/*
+ * struct iscsi_param->sender
+ */
+#define SENDER_INITIATOR 0x01
+#define SENDER_TARGET 0x02
+#define SENDER_BOTH 0x03
+/* Used in iscsi_check_key() */
+#define SENDER_RECEIVER 0x04
+
+#define IS_SENDER_INITIATOR(p) ((p)->sender & SENDER_INITIATOR)
+#define IS_SENDER_TARGET(p) ((p)->sender & SENDER_TARGET)
+#define IS_SENDER_BOTH(p) ((p)->sender & SENDER_BOTH)
+
+/*
+ * struct iscsi_param->scope
+ */
+#define SCOPE_CONNECTION_ONLY 0x01
+#define SCOPE_SESSION_WIDE 0x02
+
+#define IS_SCOPE_CONNECTION_ONLY(p) ((p)->scope & SCOPE_CONNECTION_ONLY)
+#define IS_SCOPE_SESSION_WIDE(p) ((p)->scope & SCOPE_SESSION_WIDE)
+
+/*
+ * struct iscsi_param->phase
+ */
+#define PHASE_SECURITY 0x01
+#define PHASE_OPERATIONAL 0x02
+#define PHASE_DECLARATIVE 0x04
+#define PHASE_FFP0 0x08
+
+#define IS_PHASE_SECURITY(p) ((p)->phase & PHASE_SECURITY)
+#define IS_PHASE_OPERATIONAL(p) ((p)->phase & PHASE_OPERATIONAL)
+#define IS_PHASE_DECLARATIVE(p) ((p)->phase & PHASE_DECLARATIVE)
+#define IS_PHASE_FFP0(p) ((p)->phase & PHASE_FFP0)
+
+/*
+ * struct iscsi_param->type
+ */
+#define TYPE_BOOL_AND 0x01
+#define TYPE_BOOL_OR 0x02
+#define TYPE_NUMBER 0x04
+#define TYPE_NUMBER_RANGE 0x08
+#define TYPE_STRING 0x10
+#define TYPE_VALUE_LIST 0x20
+
+#define IS_TYPE_BOOL_AND(p) ((p)->type & TYPE_BOOL_AND)
+#define IS_TYPE_BOOL_OR(p) ((p)->type & TYPE_BOOL_OR)
+#define IS_TYPE_NUMBER(p) ((p)->type & TYPE_NUMBER)
+#define IS_TYPE_NUMBER_RANGE(p) ((p)->type & TYPE_NUMBER_RANGE)
+#define IS_TYPE_STRING(p) ((p)->type & TYPE_STRING)
+#define IS_TYPE_VALUE_LIST(p) ((p)->type & TYPE_VALUE_LIST)
+
+/*
+ * struct iscsi_param->type_range
+ */
+#define TYPERANGE_BOOL_AND 0x0001
+#define TYPERANGE_BOOL_OR 0x0002
+#define TYPERANGE_0_TO_2 0x0004
+#define TYPERANGE_0_TO_3600 0x0008
+#define TYPERANGE_0_TO_32767 0x0010
+#define TYPERANGE_0_TO_65535 0x0020
+#define TYPERANGE_1_TO_65535 0x0040
+#define TYPERANGE_2_TO_3600 0x0080
+#define TYPERANGE_512_TO_16777215 0x0100
+#define TYPERANGE_AUTH 0x0200
+#define TYPERANGE_DIGEST 0x0400
+#define TYPERANGE_ISCSINAME 0x0800
+#define TYPERANGE_MARKINT 0x1000
+#define TYPERANGE_SESSIONTYPE 0x2000
+#define TYPERANGE_TARGETADDRESS 0x4000
+#define TYPERANGE_UTF8 0x8000
+
+#define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2)
+#define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600)
+#define IS_TYPERANGE_0_TO_32767(p) ((p)->type_range & TYPERANGE_0_TO_32767)
+#define IS_TYPERANGE_0_TO_65535(p) ((p)->type_range & TYPERANGE_0_TO_65535)
+#define IS_TYPERANGE_1_TO_65535(p) ((p)->type_range & TYPERANGE_1_TO_65535)
+#define IS_TYPERANGE_2_TO_3600(p) ((p)->type_range & TYPERANGE_2_TO_3600)
+#define IS_TYPERANGE_512_TO_16777215(p) ((p)->type_range & \
+ TYPERANGE_512_TO_16777215)
+#define IS_TYPERANGE_AUTH_PARAM(p) ((p)->type_range & TYPERANGE_AUTH)
+#define IS_TYPERANGE_DIGEST_PARAM(p) ((p)->type_range & TYPERANGE_DIGEST)
+#define IS_TYPERANGE_SESSIONTYPE(p) ((p)->type_range & \
+ TYPERANGE_SESSIONTYPE)
+
+/*
+ * struct iscsi_param->state
+ */
+#define PSTATE_ACCEPTOR 0x01
+#define PSTATE_NEGOTIATE 0x02
+#define PSTATE_PROPOSER 0x04
+#define PSTATE_IRRELEVANT 0x08
+#define PSTATE_REJECT 0x10
+#define PSTATE_REPLY_OPTIONAL 0x20
+#define PSTATE_RESPONSE_GOT 0x40
+#define PSTATE_RESPONSE_SENT 0x80
+
+#define IS_PSTATE_ACCEPTOR(p) ((p)->state & PSTATE_ACCEPTOR)
+#define IS_PSTATE_NEGOTIATE(p) ((p)->state & PSTATE_NEGOTIATE)
+#define IS_PSTATE_PROPOSER(p) ((p)->state & PSTATE_PROPOSER)
+#define IS_PSTATE_IRRELEVANT(p) ((p)->state & PSTATE_IRRELEVANT)
+#define IS_PSTATE_REJECT(p) ((p)->state & PSTATE_REJECT)
+#define IS_PSTATE_REPLY_OPTIONAL(p) ((p)->state & PSTATE_REPLY_OPTIONAL)
+#define IS_PSTATE_RESPONSE_GOT(p) ((p)->state & PSTATE_RESPONSE_GOT)
+#define IS_PSTATE_RESPONSE_SENT(p) ((p)->state & PSTATE_RESPONSE_SENT)
+
+#define SET_PSTATE_ACCEPTOR(p) ((p)->state |= PSTATE_ACCEPTOR)
+#define SET_PSTATE_NEGOTIATE(p) ((p)->state |= PSTATE_NEGOTIATE)
+#define SET_PSTATE_PROPOSER(p) ((p)->state |= PSTATE_PROPOSER)
+#define SET_PSTATE_IRRELEVANT(p) ((p)->state |= PSTATE_IRRELEVANT)
+#define SET_PSTATE_REJECT(p) ((p)->state |= PSTATE_REJECT)
+#define SET_PSTATE_REPLY_OPTIONAL(p) ((p)->state |= PSTATE_REPLY_OPTIONAL)
+#define SET_PSTATE_RESPONSE_GOT(p) ((p)->state |= PSTATE_RESPONSE_GOT)
+#define SET_PSTATE_RESPONSE_SENT(p) ((p)->state |= PSTATE_RESPONSE_SENT)
+
+#endif /* ISCSI_PARAMETERS_H */
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
new file mode 100644
index 00000000000..fc694082bfc
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -0,0 +1,664 @@
+/*******************************************************************************
+ * This file contains main functions related to iSCSI DataSequenceInOrder=No
+ * and DataPDUInOrder=No.
+ *
+ \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/random.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_seq_pdu_list.h"
+
+#define OFFLOAD_BUF_SIZE 32768
+
+void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
+{
+ int i;
+ struct iscsi_seq *seq;
+
+ pr_debug("Dumping Sequence List for ITT: 0x%08x:\n",
+ cmd->init_task_tag);
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ seq = &cmd->seq_list[i];
+ pr_debug("i: %d, pdu_start: %d, pdu_count: %d,"
+ " offset: %d, xfer_len: %d, seq_send_order: %d,"
+ " seq_no: %d\n", i, seq->pdu_start, seq->pdu_count,
+ seq->offset, seq->xfer_len, seq->seq_send_order,
+ seq->seq_no);
+ }
+}
+
+void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
+{
+ int i;
+ struct iscsi_pdu *pdu;
+
+ pr_debug("Dumping PDU List for ITT: 0x%08x:\n",
+ cmd->init_task_tag);
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+ pr_debug("i: %d, offset: %d, length: %d,"
+ " pdu_send_order: %d, seq_no: %d\n", i, pdu->offset,
+ pdu->length, pdu->pdu_send_order, pdu->seq_no);
+ }
+}
+
+static void iscsit_ordered_seq_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ u32 i, seq_count = 0;
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+ continue;
+ cmd->seq_list[i].seq_send_order = seq_count++;
+ }
+}
+
+static void iscsit_ordered_pdu_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ u32 i, pdu_send_order = 0, seq_no = 0;
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+redo:
+ if (cmd->pdu_list[i].seq_no == seq_no) {
+ cmd->pdu_list[i].pdu_send_order = pdu_send_order++;
+ continue;
+ }
+ seq_no++;
+ pdu_send_order = 0;
+ goto redo;
+ }
+}
+
+/*
+ * Generate count random values into array.
+ * Use 0x80000000 to mark generates valued in array[].
+ */
+static void iscsit_create_random_array(u32 *array, u32 count)
+{
+ int i, j, k;
+
+ if (count == 1) {
+ array[0] = 0;
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+redo:
+ get_random_bytes(&j, sizeof(u32));
+ j = (1 + (int) (9999 + 1) - j) % count;
+ for (k = 0; k < i + 1; k++) {
+ j |= 0x80000000;
+ if ((array[k] & 0x80000000) && (array[k] == j))
+ goto redo;
+ }
+ array[i] = j;
+ }
+
+ for (i = 0; i < count; i++)
+ array[i] &= ~0x80000000;
+}
+
+static int iscsit_randomize_pdu_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ int i = 0;
+ u32 *array, pdu_count, seq_count = 0, seq_no = 0, seq_offset = 0;
+
+ for (pdu_count = 0; pdu_count < cmd->pdu_count; pdu_count++) {
+redo:
+ if (cmd->pdu_list[pdu_count].seq_no == seq_no) {
+ seq_count++;
+ continue;
+ }
+ array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ if (!array) {
+ pr_err("Unable to allocate memory"
+ " for random array.\n");
+ return -1;
+ }
+ iscsit_create_random_array(array, seq_count);
+
+ for (i = 0; i < seq_count; i++)
+ cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
+
+ kfree(array);
+
+ seq_offset += seq_count;
+ seq_count = 0;
+ seq_no++;
+ goto redo;
+ }
+
+ if (seq_count) {
+ array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ if (!array) {
+ pr_err("Unable to allocate memory for"
+ " random array.\n");
+ return -1;
+ }
+ iscsit_create_random_array(array, seq_count);
+
+ for (i = 0; i < seq_count; i++)
+ cmd->pdu_list[seq_offset+i].pdu_send_order = array[i];
+
+ kfree(array);
+ }
+
+ return 0;
+}
+
+static int iscsit_randomize_seq_lists(
+ struct iscsi_cmd *cmd,
+ u8 type)
+{
+ int i, j = 0;
+ u32 *array, seq_count = cmd->seq_count;
+
+ if ((type == PDULIST_IMMEDIATE) || (type == PDULIST_UNSOLICITED))
+ seq_count--;
+ else if (type == PDULIST_IMMEDIATE_AND_UNSOLICITED)
+ seq_count -= 2;
+
+ if (!seq_count)
+ return 0;
+
+ array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
+ if (!array) {
+ pr_err("Unable to allocate memory for random array.\n");
+ return -1;
+ }
+ iscsit_create_random_array(array, seq_count);
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+ continue;
+ cmd->seq_list[i].seq_send_order = array[j++];
+ }
+
+ kfree(array);
+ return 0;
+}
+
+static void iscsit_determine_counts_for_list(
+ struct iscsi_cmd *cmd,
+ struct iscsi_build_list *bl,
+ u32 *seq_count,
+ u32 *pdu_count)
+{
+ int check_immediate = 0;
+ u32 burstlength = 0, offset = 0;
+ u32 unsolicited_data_length = 0;
+ struct iscsi_conn *conn = cmd->conn;
+
+ if ((bl->type == PDULIST_IMMEDIATE) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ check_immediate = 1;
+
+ if ((bl->type == PDULIST_UNSOLICITED) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ unsolicited_data_length = (cmd->data_length >
+ conn->sess->sess_ops->FirstBurstLength) ?
+ conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+
+ while (offset < cmd->data_length) {
+ *pdu_count += 1;
+
+ if (check_immediate) {
+ check_immediate = 0;
+ offset += bl->immediate_data_length;
+ *seq_count += 1;
+ if (unsolicited_data_length)
+ unsolicited_data_length -=
+ bl->immediate_data_length;
+ continue;
+ }
+ if (unsolicited_data_length > 0) {
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
+ >= cmd->data_length) {
+ unsolicited_data_length -=
+ (cmd->data_length - offset);
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
+ >= conn->sess->sess_ops->FirstBurstLength) {
+ unsolicited_data_length -=
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ offset += (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ burstlength = 0;
+ *seq_count += 1;
+ continue;
+ }
+
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ unsolicited_data_length -=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ continue;
+ }
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ conn->sess->sess_ops->MaxBurstLength) {
+ offset += (conn->sess->sess_ops->MaxBurstLength -
+ burstlength);
+ burstlength = 0;
+ *seq_count += 1;
+ continue;
+ }
+
+ burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+}
+
+
+/*
+ * Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
+ * and DataPDUInOrder=No.
+ */
+static int iscsit_build_pdu_and_seq_list(
+ struct iscsi_cmd *cmd,
+ struct iscsi_build_list *bl)
+{
+ int check_immediate = 0, datapduinorder, datasequenceinorder;
+ u32 burstlength = 0, offset = 0, i = 0;
+ u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = cmd->pdu_list;
+ struct iscsi_seq *seq = cmd->seq_list;
+
+ datapduinorder = conn->sess->sess_ops->DataPDUInOrder;
+ datasequenceinorder = conn->sess->sess_ops->DataSequenceInOrder;
+
+ if ((bl->type == PDULIST_IMMEDIATE) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ check_immediate = 1;
+
+ if ((bl->type == PDULIST_UNSOLICITED) ||
+ (bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
+ unsolicited_data_length = (cmd->data_length >
+ conn->sess->sess_ops->FirstBurstLength) ?
+ conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
+
+ while (offset < cmd->data_length) {
+ pdu_count++;
+ if (!datapduinorder) {
+ pdu[i].offset = offset;
+ pdu[i].seq_no = seq_no;
+ }
+ if (!datasequenceinorder && (pdu_count == 1)) {
+ seq[seq_no].pdu_start = i;
+ seq[seq_no].seq_no = seq_no;
+ seq[seq_no].offset = offset;
+ seq[seq_no].orig_offset = offset;
+ }
+
+ if (check_immediate) {
+ check_immediate = 0;
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_IMMEDIATE;
+ pdu[i++].length = bl->immediate_data_length;
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_IMMEDIATE;
+ seq[seq_no].pdu_count = 1;
+ seq[seq_no].xfer_len =
+ bl->immediate_data_length;
+ }
+ offset += bl->immediate_data_length;
+ pdu_count = 0;
+ seq_no++;
+ if (unsolicited_data_length)
+ unsolicited_data_length -=
+ bl->immediate_data_length;
+ continue;
+ }
+ if (unsolicited_data_length > 0) {
+ if ((offset +
+ conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_UNSOLICITED;
+ pdu[i].length =
+ (cmd->data_length - offset);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_UNSOLICITED;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (cmd->data_length - offset));
+ }
+ unsolicited_data_length -=
+ (cmd->data_length - offset);
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((offset +
+ conn->conn_ops->MaxRecvDataSegmentLength) >=
+ conn->sess->sess_ops->FirstBurstLength) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_UNSOLICITED;
+ pdu[i++].length =
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_UNSOLICITED;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset));
+ }
+ unsolicited_data_length -=
+ (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ offset += (conn->sess->sess_ops->FirstBurstLength -
+ offset);
+ burstlength = 0;
+ pdu_count = 0;
+ seq_no++;
+ continue;
+ }
+
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_UNSOLICITED;
+ pdu[i++].length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+ burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ unsolicited_data_length -=
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ continue;
+ }
+ if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ cmd->data_length) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_NORMAL;
+ pdu[i].length = (cmd->data_length - offset);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_NORMAL;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (cmd->data_length - offset));
+ }
+ offset += (cmd->data_length - offset);
+ continue;
+ }
+ if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
+ conn->sess->sess_ops->MaxBurstLength) {
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_NORMAL;
+ pdu[i++].length =
+ (conn->sess->sess_ops->MaxBurstLength -
+ burstlength);
+ }
+ if (!datasequenceinorder) {
+ seq[seq_no].type = SEQTYPE_NORMAL;
+ seq[seq_no].pdu_count = pdu_count;
+ seq[seq_no].xfer_len = (burstlength +
+ (conn->sess->sess_ops->MaxBurstLength -
+ burstlength));
+ }
+ offset += (conn->sess->sess_ops->MaxBurstLength -
+ burstlength);
+ burstlength = 0;
+ pdu_count = 0;
+ seq_no++;
+ continue;
+ }
+
+ if (!datapduinorder) {
+ pdu[i].type = PDUTYPE_NORMAL;
+ pdu[i++].length =
+ conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+ burstlength += conn->conn_ops->MaxRecvDataSegmentLength;
+ offset += conn->conn_ops->MaxRecvDataSegmentLength;
+ }
+
+ if (!datasequenceinorder) {
+ if (bl->data_direction & ISCSI_PDU_WRITE) {
+ if (bl->randomize & RANDOM_R2T_OFFSETS) {
+ if (iscsit_randomize_seq_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_seq_lists(cmd, bl->type);
+ } else if (bl->data_direction & ISCSI_PDU_READ) {
+ if (bl->randomize & RANDOM_DATAIN_SEQ_OFFSETS) {
+ if (iscsit_randomize_seq_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_seq_lists(cmd, bl->type);
+ }
+#if 0
+ iscsit_dump_seq_list(cmd);
+#endif
+ }
+ if (!datapduinorder) {
+ if (bl->data_direction & ISCSI_PDU_WRITE) {
+ if (bl->randomize & RANDOM_DATAOUT_PDU_OFFSETS) {
+ if (iscsit_randomize_pdu_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_pdu_lists(cmd, bl->type);
+ } else if (bl->data_direction & ISCSI_PDU_READ) {
+ if (bl->randomize & RANDOM_DATAIN_PDU_OFFSETS) {
+ if (iscsit_randomize_pdu_lists(cmd, bl->type)
+ < 0)
+ return -1;
+ } else
+ iscsit_ordered_pdu_lists(cmd, bl->type);
+ }
+#if 0
+ iscsit_dump_pdu_list(cmd);
+#endif
+ }
+
+ return 0;
+}
+
+/*
+ * Only called while DataSequenceInOrder=No or DataPDUInOrder=No.
+ */
+int iscsit_do_build_list(
+ struct iscsi_cmd *cmd,
+ struct iscsi_build_list *bl)
+{
+ u32 pdu_count = 0, seq_count = 1;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = NULL;
+ struct iscsi_seq *seq = NULL;
+
+ iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count);
+
+ if (!conn->sess->sess_ops->DataSequenceInOrder) {
+ seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC);
+ if (!seq) {
+ pr_err("Unable to allocate struct iscsi_seq list\n");
+ return -1;
+ }
+ cmd->seq_list = seq;
+ cmd->seq_count = seq_count;
+ }
+
+ if (!conn->sess->sess_ops->DataPDUInOrder) {
+ pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC);
+ if (!pdu) {
+ pr_err("Unable to allocate struct iscsi_pdu list.\n");
+ kfree(seq);
+ return -1;
+ }
+ cmd->pdu_list = pdu;
+ cmd->pdu_count = pdu_count;
+ }
+
+ return iscsit_build_pdu_and_seq_list(cmd, bl);
+}
+
+struct iscsi_pdu *iscsit_get_pdu_holder(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 length)
+{
+ u32 i;
+ struct iscsi_pdu *pdu = NULL;
+
+ if (!cmd->pdu_list) {
+ pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+ return NULL;
+ }
+
+ pdu = &cmd->pdu_list[0];
+
+ for (i = 0; i < cmd->pdu_count; i++)
+ if ((pdu[i].offset == offset) && (pdu[i].length == length))
+ return &pdu[i];
+
+ pr_err("Unable to locate PDU holder for ITT: 0x%08x, Offset:"
+ " %u, Length: %u\n", cmd->init_task_tag, offset, length);
+ return NULL;
+}
+
+struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
+ struct iscsi_cmd *cmd,
+ struct iscsi_seq *seq)
+{
+ u32 i;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_pdu *pdu = NULL;
+
+ if (!cmd->pdu_list) {
+ pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+ return NULL;
+ }
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+redo:
+ pdu = &cmd->pdu_list[cmd->pdu_start];
+
+ for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
+#if 0
+ pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
+ "_send_order: %d, pdu[i].offset: %d,"
+ " pdu[i].length: %d\n", pdu[i].seq_no,
+ pdu[i].pdu_send_order, pdu[i].offset,
+ pdu[i].length);
+#endif
+ if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
+ cmd->pdu_send_order++;
+ return &pdu[i];
+ }
+ }
+
+ cmd->pdu_start += cmd->pdu_send_order;
+ cmd->pdu_send_order = 0;
+ cmd->seq_no++;
+
+ if (cmd->pdu_start < cmd->pdu_count)
+ goto redo;
+
+ pr_err("Command ITT: 0x%08x unable to locate"
+ " struct iscsi_pdu for cmd->pdu_send_order: %u.\n",
+ cmd->init_task_tag, cmd->pdu_send_order);
+ return NULL;
+ } else {
+ if (!seq) {
+ pr_err("struct iscsi_seq is NULL!\n");
+ return NULL;
+ }
+#if 0
+ pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
+ " seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
+ seq->seq_no);
+#endif
+ pdu = &cmd->pdu_list[seq->pdu_start];
+
+ if (seq->pdu_send_order == seq->pdu_count) {
+ pr_err("Command ITT: 0x%08x seq->pdu_send"
+ "_order: %u equals seq->pdu_count: %u\n",
+ cmd->init_task_tag, seq->pdu_send_order,
+ seq->pdu_count);
+ return NULL;
+ }
+
+ for (i = 0; i < seq->pdu_count; i++) {
+ if (pdu[i].pdu_send_order == seq->pdu_send_order) {
+ seq->pdu_send_order++;
+ return &pdu[i];
+ }
+ }
+
+ pr_err("Command ITT: 0x%08x unable to locate iscsi"
+ "_pdu_t for seq->pdu_send_order: %u.\n",
+ cmd->init_task_tag, seq->pdu_send_order);
+ return NULL;
+ }
+
+ return NULL;
+}
+
+struct iscsi_seq *iscsit_get_seq_holder(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 length)
+{
+ u32 i;
+
+ if (!cmd->seq_list) {
+ pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ return NULL;
+ }
+
+ for (i = 0; i < cmd->seq_count; i++) {
+#if 0
+ pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
+ "xfer_len: %d, seq_list[i].seq_no %u\n",
+ cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
+ cmd->seq_list[i].seq_no);
+#endif
+ if ((cmd->seq_list[i].orig_offset +
+ cmd->seq_list[i].xfer_len) >=
+ (offset + length))
+ return &cmd->seq_list[i];
+ }
+
+ pr_err("Unable to locate Sequence holder for ITT: 0x%08x,"
+ " Offset: %u, Length: %u\n", cmd->init_task_tag, offset,
+ length);
+ return NULL;
+}
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
new file mode 100644
index 00000000000..0d52a10e306
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -0,0 +1,86 @@
+#ifndef ISCSI_SEQ_AND_PDU_LIST_H
+#define ISCSI_SEQ_AND_PDU_LIST_H
+
+/* struct iscsi_pdu->status */
+#define DATAOUT_PDU_SENT 1
+
+/* struct iscsi_seq->type */
+#define SEQTYPE_IMMEDIATE 1
+#define SEQTYPE_UNSOLICITED 2
+#define SEQTYPE_NORMAL 3
+
+/* struct iscsi_seq->status */
+#define DATAOUT_SEQUENCE_GOT_R2T 1
+#define DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY 2
+#define DATAOUT_SEQUENCE_COMPLETE 3
+
+/* iscsi_determine_counts_for_list() type */
+#define PDULIST_NORMAL 1
+#define PDULIST_IMMEDIATE 2
+#define PDULIST_UNSOLICITED 3
+#define PDULIST_IMMEDIATE_AND_UNSOLICITED 4
+
+/* struct iscsi_pdu->type */
+#define PDUTYPE_IMMEDIATE 1
+#define PDUTYPE_UNSOLICITED 2
+#define PDUTYPE_NORMAL 3
+
+/* struct iscsi_pdu->status */
+#define ISCSI_PDU_NOT_RECEIVED 0
+#define ISCSI_PDU_RECEIVED_OK 1
+#define ISCSI_PDU_CRC_FAILED 2
+#define ISCSI_PDU_TIMED_OUT 3
+
+/* struct iscsi_build_list->randomize */
+#define RANDOM_DATAIN_PDU_OFFSETS 0x01
+#define RANDOM_DATAIN_SEQ_OFFSETS 0x02
+#define RANDOM_DATAOUT_PDU_OFFSETS 0x04
+#define RANDOM_R2T_OFFSETS 0x08
+
+/* struct iscsi_build_list->data_direction */
+#define ISCSI_PDU_READ 0x01
+#define ISCSI_PDU_WRITE 0x02
+
+struct iscsi_build_list {
+ int data_direction;
+ int randomize;
+ int type;
+ int immediate_data_length;
+};
+
+struct iscsi_pdu {
+ int status;
+ int type;
+ u8 flags;
+ u32 data_sn;
+ u32 length;
+ u32 offset;
+ u32 pdu_send_order;
+ u32 seq_no;
+} ____cacheline_aligned;
+
+struct iscsi_seq {
+ int sent;
+ int status;
+ int type;
+ u32 data_sn;
+ u32 first_datasn;
+ u32 last_datasn;
+ u32 next_burst_len;
+ u32 pdu_start;
+ u32 pdu_count;
+ u32 offset;
+ u32 orig_offset;
+ u32 pdu_send_order;
+ u32 r2t_sn;
+ u32 seq_send_order;
+ u32 seq_no;
+ u32 xfer_len;
+} ____cacheline_aligned;
+
+extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *);
+extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
+extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
+extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
+
+#endif /* ISCSI_SEQ_AND_PDU_LIST_H */
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
new file mode 100644
index 00000000000..bbdbe9301b2
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -0,0 +1,950 @@
+/*******************************************************************************
+ * Modern ConfigFS group context specific iSCSI statistics based on original
+ * iscsi_target_mib.c code
+ *
+ * Copyright (c) 2011 Rising Tide Systems
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/configfs.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/configfs_macros.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target_stat.h"
+
+#ifndef INITIAL_JIFFIES
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+#endif
+
+/* Instance Attributes Table */
+#define ISCSI_INST_NUM_NODES 1
+#define ISCSI_INST_DESCR "Storage Engine Target"
+#define ISCSI_INST_LAST_FAILURE_TYPE 0
+#define ISCSI_DISCONTINUITY_TIME 0
+
+#define ISCSI_NODE_INDEX 1
+
+#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
+
+/****************************************************************************
+ * iSCSI MIB Tables
+ ****************************************************************************/
+/*
+ * Instance Attributes Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_instance, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_INSTANCE_ATTR(_name, _mode) \
+static struct iscsi_stat_instance_attribute \
+ iscsi_stat_instance_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_instance_show_attr_##_name, \
+ iscsi_stat_instance_store_attr_##_name);
+
+#define ISCSI_STAT_INSTANCE_ATTR_RO(_name) \
+static struct iscsi_stat_instance_attribute \
+ iscsi_stat_instance_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_instance_show_attr_##_name);
+
+static ssize_t iscsi_stat_instance_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_instance_show_attr_min_ver(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(min_ver);
+
+static ssize_t iscsi_stat_instance_show_attr_max_ver(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(max_ver);
+
+static ssize_t iscsi_stat_instance_show_attr_portals(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(portals);
+
+static ssize_t iscsi_stat_instance_show_attr_nodes(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(nodes);
+
+static ssize_t iscsi_stat_instance_show_attr_sessions(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(sessions);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_sess(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+ u32 sess_err_count;
+
+ spin_lock_bh(&sess_err->lock);
+ sess_err_count = (sess_err->digest_errors +
+ sess_err->cxn_timeout_errors +
+ sess_err->pdu_format_errors);
+ spin_unlock_bh(&sess_err->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_sess);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_type(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ sess_err->last_sess_failure_type);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_type);
+
+static ssize_t iscsi_stat_instance_show_attr_fail_rem_name(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ sess_err->last_sess_fail_rem_name[0] ?
+ sess_err->last_sess_fail_rem_name : NONE);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(fail_rem_name);
+
+static ssize_t iscsi_stat_instance_show_attr_disc_time(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(disc_time);
+
+static ssize_t iscsi_stat_instance_show_attr_description(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(description);
+
+static ssize_t iscsi_stat_instance_show_attr_vendor(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n");
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
+
+static ssize_t iscsi_stat_instance_show_attr_version(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
+}
+ISCSI_STAT_INSTANCE_ATTR_RO(version);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_instance, iscsi_wwn_stat_grps,
+ iscsi_instance_group);
+
+static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
+ &iscsi_stat_instance_inst.attr,
+ &iscsi_stat_instance_min_ver.attr,
+ &iscsi_stat_instance_max_ver.attr,
+ &iscsi_stat_instance_portals.attr,
+ &iscsi_stat_instance_nodes.attr,
+ &iscsi_stat_instance_sessions.attr,
+ &iscsi_stat_instance_fail_sess.attr,
+ &iscsi_stat_instance_fail_type.attr,
+ &iscsi_stat_instance_fail_rem_name.attr,
+ &iscsi_stat_instance_disc_time.attr,
+ &iscsi_stat_instance_description.attr,
+ &iscsi_stat_instance_vendor.attr,
+ &iscsi_stat_instance_version.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_instance_item_ops = {
+ .show_attribute = iscsi_stat_instance_attr_show,
+ .store_attribute = iscsi_stat_instance_attr_store,
+};
+
+struct config_item_type iscsi_stat_instance_cit = {
+ .ct_item_ops = &iscsi_stat_instance_item_ops,
+ .ct_attrs = iscsi_stat_instance_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Instance Session Failure Stats Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_sess_err, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_SESS_ERR_ATTR(_name, _mode) \
+static struct iscsi_stat_sess_err_attribute \
+ iscsi_stat_sess_err_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_sess_err_show_attr_##_name, \
+ iscsi_stat_sess_err_store_attr_##_name);
+
+#define ISCSI_STAT_SESS_ERR_ATTR_RO(_name) \
+static struct iscsi_stat_sess_err_attribute \
+ iscsi_stat_sess_err_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_sess_err_show_attr_##_name);
+
+static ssize_t iscsi_stat_sess_err_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_sess_err_show_attr_digest_errors(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(digest_errors);
+
+static ssize_t iscsi_stat_sess_err_show_attr_cxn_errors(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(cxn_errors);
+
+static ssize_t iscsi_stat_sess_err_show_attr_format_errors(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
+}
+ISCSI_STAT_SESS_ERR_ATTR_RO(format_errors);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_sess_err, iscsi_wwn_stat_grps,
+ iscsi_sess_err_group);
+
+static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
+ &iscsi_stat_sess_err_inst.attr,
+ &iscsi_stat_sess_err_digest_errors.attr,
+ &iscsi_stat_sess_err_cxn_errors.attr,
+ &iscsi_stat_sess_err_format_errors.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_sess_err_item_ops = {
+ .show_attribute = iscsi_stat_sess_err_attr_show,
+ .store_attribute = iscsi_stat_sess_err_attr_store,
+};
+
+struct config_item_type iscsi_stat_sess_err_cit = {
+ .ct_item_ops = &iscsi_stat_sess_err_item_ops,
+ .ct_attrs = iscsi_stat_sess_err_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Target Attributes Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_TGT_ATTR(_name, _mode) \
+static struct iscsi_stat_tgt_attr_attribute \
+ iscsi_stat_tgt_attr_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_tgt-attr_show_attr_##_name, \
+ iscsi_stat_tgt_attr_store_attr_##_name);
+
+#define ISCSI_STAT_TGT_ATTR_RO(_name) \
+static struct iscsi_stat_tgt_attr_attribute \
+ iscsi_stat_tgt_attr_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_tgt_attr_show_attr_##_name);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_TGT_ATTR_RO(inst);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_indx(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_TGT_ATTR_RO(indx);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ u32 fail_count;
+
+ spin_lock(&lstat->lock);
+ fail_count = (lstat->redirects + lstat->authorize_fails +
+ lstat->authenticate_fails + lstat->negotiate_fails +
+ lstat->other_fails);
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
+}
+ISCSI_STAT_TGT_ATTR_RO(login_fails);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ u32 last_fail_time;
+
+ spin_lock(&lstat->lock);
+ last_fail_time = lstat->last_fail_time ?
+ (u32)(((u32)lstat->last_fail_time -
+ INITIAL_JIFFIES) * 100 / HZ) : 0;
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
+}
+ISCSI_STAT_TGT_ATTR_RO(last_fail_time);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ u32 last_fail_type;
+
+ spin_lock(&lstat->lock);
+ last_fail_type = lstat->last_fail_type;
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
+}
+ISCSI_STAT_TGT_ATTR_RO(last_fail_type);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_name(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ unsigned char buf[224];
+
+ spin_lock(&lstat->lock);
+ snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
+ lstat->last_intr_fail_name : NONE);
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_name);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ unsigned char buf[8];
+
+ spin_lock(&lstat->lock);
+ snprintf(buf, 8, "%s", (lstat->last_intr_fail_ip_addr != NULL) ?
+ "ipv6" : "ipv4");
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
+
+static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ unsigned char buf[32];
+
+ spin_lock(&lstat->lock);
+ if (lstat->last_intr_fail_ip_family == AF_INET6)
+ snprintf(buf, 32, "[%s]", lstat->last_intr_fail_ip_addr);
+ else
+ snprintf(buf, 32, "%s", lstat->last_intr_fail_ip_addr);
+ spin_unlock(&lstat->lock);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", buf);
+}
+ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps,
+ iscsi_tgt_attr_group);
+
+static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
+ &iscsi_stat_tgt_attr_inst.attr,
+ &iscsi_stat_tgt_attr_indx.attr,
+ &iscsi_stat_tgt_attr_login_fails.attr,
+ &iscsi_stat_tgt_attr_last_fail_time.attr,
+ &iscsi_stat_tgt_attr_last_fail_type.attr,
+ &iscsi_stat_tgt_attr_fail_intr_name.attr,
+ &iscsi_stat_tgt_attr_fail_intr_addr_type.attr,
+ &iscsi_stat_tgt_attr_fail_intr_addr.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_tgt_attr_item_ops = {
+ .show_attribute = iscsi_stat_tgt_attr_attr_show,
+ .store_attribute = iscsi_stat_tgt_attr_attr_store,
+};
+
+struct config_item_type iscsi_stat_tgt_attr_cit = {
+ .ct_item_ops = &iscsi_stat_tgt_attr_item_ops,
+ .ct_attrs = iscsi_stat_tgt_attr_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Target Login Stats Table
+ */
+CONFIGFS_EATTR_STRUCT(iscsi_stat_login, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_LOGIN(_name, _mode) \
+static struct iscsi_stat_login_attribute \
+ iscsi_stat_login_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_login_show_attr_##_name, \
+ iscsi_stat_login_store_attr_##_name);
+
+#define ISCSI_STAT_LOGIN_RO(_name) \
+static struct iscsi_stat_login_attribute \
+ iscsi_stat_login_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_login_show_attr_##_name);
+
+static ssize_t iscsi_stat_login_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_LOGIN_RO(inst);
+
+static ssize_t iscsi_stat_login_show_attr_indx(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_LOGIN_RO(indx);
+
+static ssize_t iscsi_stat_login_show_attr_accepts(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->accepts);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(accepts);
+
+static ssize_t iscsi_stat_login_show_attr_other_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->other_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(other_fails);
+
+static ssize_t iscsi_stat_login_show_attr_redirects(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->redirects);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(redirects);
+
+static ssize_t iscsi_stat_login_show_attr_authorize_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authorize_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(authorize_fails);
+
+static ssize_t iscsi_stat_login_show_attr_authenticate_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->authenticate_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(authenticate_fails);
+
+static ssize_t iscsi_stat_login_show_attr_negotiate_fails(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_login_stats *lstat = &tiqn->login_stats;
+ ssize_t ret;
+
+ spin_lock(&lstat->lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lstat->negotiate_fails);
+ spin_unlock(&lstat->lock);
+
+ return ret;
+}
+ISCSI_STAT_LOGIN_RO(negotiate_fails);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_login, iscsi_wwn_stat_grps,
+ iscsi_login_stats_group);
+
+static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
+ &iscsi_stat_login_inst.attr,
+ &iscsi_stat_login_indx.attr,
+ &iscsi_stat_login_accepts.attr,
+ &iscsi_stat_login_other_fails.attr,
+ &iscsi_stat_login_redirects.attr,
+ &iscsi_stat_login_authorize_fails.attr,
+ &iscsi_stat_login_authenticate_fails.attr,
+ &iscsi_stat_login_negotiate_fails.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_login_stats_item_ops = {
+ .show_attribute = iscsi_stat_login_attr_show,
+ .store_attribute = iscsi_stat_login_attr_store,
+};
+
+struct config_item_type iscsi_stat_login_cit = {
+ .ct_item_ops = &iscsi_stat_login_stats_item_ops,
+ .ct_attrs = iscsi_stat_login_stats_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Target Logout Stats Table
+ */
+
+CONFIGFS_EATTR_STRUCT(iscsi_stat_logout, iscsi_wwn_stat_grps);
+#define ISCSI_STAT_LOGOUT(_name, _mode) \
+static struct iscsi_stat_logout_attribute \
+ iscsi_stat_logout_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_logout_show_attr_##_name, \
+ iscsi_stat_logout_store_attr_##_name);
+
+#define ISCSI_STAT_LOGOUT_RO(_name) \
+static struct iscsi_stat_logout_attribute \
+ iscsi_stat_logout_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_logout_show_attr_##_name);
+
+static ssize_t iscsi_stat_logout_show_attr_inst(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_LOGOUT_RO(inst);
+
+static ssize_t iscsi_stat_logout_show_attr_indx(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
+}
+ISCSI_STAT_LOGOUT_RO(indx);
+
+static ssize_t iscsi_stat_logout_show_attr_normal_logouts(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
+}
+ISCSI_STAT_LOGOUT_RO(normal_logouts);
+
+static ssize_t iscsi_stat_logout_show_attr_abnormal_logouts(
+ struct iscsi_wwn_stat_grps *igrps, char *page)
+{
+ struct iscsi_tiqn *tiqn = container_of(igrps,
+ struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
+
+ return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
+}
+ISCSI_STAT_LOGOUT_RO(abnormal_logouts);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_logout, iscsi_wwn_stat_grps,
+ iscsi_logout_stats_group);
+
+static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
+ &iscsi_stat_logout_inst.attr,
+ &iscsi_stat_logout_indx.attr,
+ &iscsi_stat_logout_normal_logouts.attr,
+ &iscsi_stat_logout_abnormal_logouts.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_logout_stats_item_ops = {
+ .show_attribute = iscsi_stat_logout_attr_show,
+ .store_attribute = iscsi_stat_logout_attr_store,
+};
+
+struct config_item_type iscsi_stat_logout_cit = {
+ .ct_item_ops = &iscsi_stat_logout_stats_item_ops,
+ .ct_attrs = iscsi_stat_logout_stats_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/*
+ * Session Stats Table
+ */
+
+CONFIGFS_EATTR_STRUCT(iscsi_stat_sess, iscsi_node_stat_grps);
+#define ISCSI_STAT_SESS(_name, _mode) \
+static struct iscsi_stat_sess_attribute \
+ iscsi_stat_sess_##_name = \
+ __CONFIGFS_EATTR(_name, _mode, \
+ iscsi_stat_sess_show_attr_##_name, \
+ iscsi_stat_sess_store_attr_##_name);
+
+#define ISCSI_STAT_SESS_RO(_name) \
+static struct iscsi_stat_sess_attribute \
+ iscsi_stat_sess_##_name = \
+ __CONFIGFS_EATTR_RO(_name, \
+ iscsi_stat_sess_show_attr_##_name);
+
+static ssize_t iscsi_stat_sess_show_attr_inst(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn;
+ struct iscsi_tiqn *tiqn = container_of(wwn,
+ struct iscsi_tiqn, tiqn_wwn);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+}
+ISCSI_STAT_SESS_RO(inst);
+
+static ssize_t iscsi_stat_sess_show_attr_node(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(node);
+
+static ssize_t iscsi_stat_sess_show_attr_indx(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->session_index);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(indx);
+
+static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(cmd_pdus);
+
+static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(rsp_pdus);
+
+static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%llu\n",
+ (unsigned long long)sess->tx_data_octets);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(txdata_octs);
+
+static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%llu\n",
+ (unsigned long long)sess->rx_data_octets);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(rxdata_octs);
+
+static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->conn_digest_errors);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(conn_digest_errors);
+
+static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
+ struct iscsi_node_stat_grps *igrps, char *page)
+{
+ struct iscsi_node_acl *acl = container_of(igrps,
+ struct iscsi_node_acl, node_stat_grps);
+ struct se_node_acl *se_nacl = &acl->se_node_acl;
+ struct iscsi_session *sess;
+ struct se_session *se_sess;
+ ssize_t ret = 0;
+
+ spin_lock_bh(&se_nacl->nacl_sess_lock);
+ se_sess = se_nacl->nacl_sess;
+ if (se_sess) {
+ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ if (sess)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ sess->conn_timeout_errors);
+ }
+ spin_unlock_bh(&se_nacl->nacl_sess_lock);
+
+ return ret;
+}
+ISCSI_STAT_SESS_RO(conn_timeout_errors);
+
+CONFIGFS_EATTR_OPS(iscsi_stat_sess, iscsi_node_stat_grps,
+ iscsi_sess_stats_group);
+
+static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
+ &iscsi_stat_sess_inst.attr,
+ &iscsi_stat_sess_node.attr,
+ &iscsi_stat_sess_indx.attr,
+ &iscsi_stat_sess_cmd_pdus.attr,
+ &iscsi_stat_sess_rsp_pdus.attr,
+ &iscsi_stat_sess_txdata_octs.attr,
+ &iscsi_stat_sess_rxdata_octs.attr,
+ &iscsi_stat_sess_conn_digest_errors.attr,
+ &iscsi_stat_sess_conn_timeout_errors.attr,
+ NULL,
+};
+
+static struct configfs_item_operations iscsi_stat_sess_stats_item_ops = {
+ .show_attribute = iscsi_stat_sess_attr_show,
+ .store_attribute = iscsi_stat_sess_attr_store,
+};
+
+struct config_item_type iscsi_stat_sess_cit = {
+ .ct_item_ops = &iscsi_stat_sess_stats_item_ops,
+ .ct_attrs = iscsi_stat_sess_stats_attrs,
+ .ct_owner = THIS_MODULE,
+};
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/drivers/target/iscsi/iscsi_target_stat.h
new file mode 100644
index 00000000000..3ff76b4faad
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_stat.h
@@ -0,0 +1,64 @@
+#ifndef ISCSI_TARGET_STAT_H
+#define ISCSI_TARGET_STAT_H
+
+/*
+ * For struct iscsi_tiqn->tiqn_wwn default groups
+ */
+extern struct config_item_type iscsi_stat_instance_cit;
+extern struct config_item_type iscsi_stat_sess_err_cit;
+extern struct config_item_type iscsi_stat_tgt_attr_cit;
+extern struct config_item_type iscsi_stat_login_cit;
+extern struct config_item_type iscsi_stat_logout_cit;
+
+/*
+ * For struct iscsi_session->se_sess default groups
+ */
+extern struct config_item_type iscsi_stat_sess_cit;
+
+/* iSCSI session error types */
+#define ISCSI_SESS_ERR_UNKNOWN 0
+#define ISCSI_SESS_ERR_DIGEST 1
+#define ISCSI_SESS_ERR_CXN_TIMEOUT 2
+#define ISCSI_SESS_ERR_PDU_FORMAT 3
+
+/* iSCSI session error stats */
+struct iscsi_sess_err_stats {
+ spinlock_t lock;
+ u32 digest_errors;
+ u32 cxn_timeout_errors;
+ u32 pdu_format_errors;
+ u32 last_sess_failure_type;
+ char last_sess_fail_rem_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI login failure types (sub oids) */
+#define ISCSI_LOGIN_FAIL_OTHER 2
+#define ISCSI_LOGIN_FAIL_REDIRECT 3
+#define ISCSI_LOGIN_FAIL_AUTHORIZE 4
+#define ISCSI_LOGIN_FAIL_AUTHENTICATE 5
+#define ISCSI_LOGIN_FAIL_NEGOTIATE 6
+
+/* iSCSI login stats */
+struct iscsi_login_stats {
+ spinlock_t lock;
+ u32 accepts;
+ u32 other_fails;
+ u32 redirects;
+ u32 authorize_fails;
+ u32 authenticate_fails;
+ u32 negotiate_fails; /* used for notifications */
+ u64 last_fail_time; /* time stamp (jiffies) */
+ u32 last_fail_type;
+ int last_intr_fail_ip_family;
+ unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
+ char last_intr_fail_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI logout stats */
+struct iscsi_logout_stats {
+ spinlock_t lock;
+ u32 normal_logouts;
+ u32 abnormal_logouts;
+} ____cacheline_aligned;
+
+#endif /*** ISCSI_TARGET_STAT_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
new file mode 100644
index 00000000000..db1fe1ec84d
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -0,0 +1,849 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target specific Task Management functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <asm/unaligned.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_device.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_tmr.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+u8 iscsit_tmr_abort_task(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_cmd *ref_cmd;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+ struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
+
+ ref_cmd = iscsit_find_cmd_from_itt(conn, hdr->rtt);
+ if (!ref_cmd) {
+ pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
+ " %hu.\n", hdr->rtt, conn->cid);
+ return ((hdr->refcmdsn >= conn->sess->exp_cmd_sn) &&
+ (hdr->refcmdsn <= conn->sess->max_cmd_sn)) ?
+ ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
+ }
+ if (ref_cmd->cmd_sn != hdr->refcmdsn) {
+ pr_err("RefCmdSN 0x%08x does not equal"
+ " task's CmdSN 0x%08x. Rejecting ABORT_TASK.\n",
+ hdr->refcmdsn, ref_cmd->cmd_sn);
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+
+ se_tmr->ref_task_tag = hdr->rtt;
+ se_tmr->ref_cmd = &ref_cmd->se_cmd;
+ tmr_req->ref_cmd_sn = hdr->refcmdsn;
+ tmr_req->exp_data_sn = hdr->exp_datasn;
+
+ return ISCSI_TMF_RSP_COMPLETE;
+}
+
+/*
+ * Called from iscsit_handle_task_mgt_cmd().
+ */
+int iscsit_tmr_task_warm_reset(
+ struct iscsi_conn *conn,
+ struct iscsi_tmr_req *tmr_req,
+ unsigned char *buf)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+#if 0
+ struct iscsi_init_task_mgt_cmnd *hdr =
+ (struct iscsi_init_task_mgt_cmnd *) buf;
+#endif
+ if (!na->tmr_warm_reset) {
+ pr_err("TMR Opcode TARGET_WARM_RESET authorization"
+ " failed for Initiator Node: %s\n",
+ sess->se_sess->se_node_acl->initiatorname);
+ return -1;
+ }
+ /*
+ * Do the real work in transport_generic_do_tmr().
+ */
+ return 0;
+}
+
+int iscsit_tmr_task_cold_reset(
+ struct iscsi_conn *conn,
+ struct iscsi_tmr_req *tmr_req,
+ unsigned char *buf)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+ if (!na->tmr_cold_reset) {
+ pr_err("TMR Opcode TARGET_COLD_RESET authorization"
+ " failed for Initiator Node: %s\n",
+ sess->se_sess->se_node_acl->initiatorname);
+ return -1;
+ }
+ /*
+ * Do the real work in transport_generic_do_tmr().
+ */
+ return 0;
+}
+
+u8 iscsit_tmr_task_reassign(
+ struct iscsi_cmd *cmd,
+ unsigned char *buf)
+{
+ struct iscsi_cmd *ref_cmd = NULL;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_conn_recovery *cr = NULL;
+ struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+ struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
+ int ret;
+
+ pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
+ " RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
+ hdr->itt, hdr->rtt, hdr->exp_datasn, conn->cid);
+
+ if (conn->sess->sess_ops->ErrorRecoveryLevel != 2) {
+ pr_err("TMR TASK_REASSIGN not supported in ERL<2,"
+ " ignoring request.\n");
+ return ISCSI_TMF_RSP_NOT_SUPPORTED;
+ }
+
+ ret = iscsit_find_cmd_for_recovery(conn->sess, &ref_cmd, &cr, hdr->rtt);
+ if (ret == -2) {
+ pr_err("Command ITT: 0x%08x is still alligent to CID:"
+ " %hu\n", ref_cmd->init_task_tag, cr->cid);
+ return ISCSI_TMF_RSP_TASK_ALLEGIANT;
+ } else if (ret == -1) {
+ pr_err("Unable to locate RefTaskTag: 0x%08x in"
+ " connection recovery command list.\n", hdr->rtt);
+ return ISCSI_TMF_RSP_NO_TASK;
+ }
+ /*
+ * Temporary check to prevent connection recovery for
+ * connections with a differing MaxRecvDataSegmentLength.
+ */
+ if (cr->maxrecvdatasegmentlength !=
+ conn->conn_ops->MaxRecvDataSegmentLength) {
+ pr_err("Unable to perform connection recovery for"
+ " differing MaxRecvDataSegmentLength, rejecting"
+ " TMR TASK_REASSIGN.\n");
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+
+ se_tmr->ref_task_tag = hdr->rtt;
+ se_tmr->ref_cmd = &ref_cmd->se_cmd;
+ se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun);
+ tmr_req->ref_cmd_sn = hdr->refcmdsn;
+ tmr_req->exp_data_sn = hdr->exp_datasn;
+ tmr_req->conn_recovery = cr;
+ tmr_req->task_reassign = 1;
+ /*
+ * Command can now be reassigned to a new connection.
+ * The task management response must be sent before the
+ * reassignment actually happens. See iscsi_tmr_post_handler().
+ */
+ return ISCSI_TMF_RSP_COMPLETE;
+}
+
+static void iscsit_task_reassign_remove_cmd(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn_recovery *cr,
+ struct iscsi_session *sess)
+{
+ int ret;
+
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ ret = iscsit_remove_cmd_from_connection_recovery(cmd, sess);
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ if (!ret) {
+ pr_debug("iSCSI connection recovery successful for CID:"
+ " %hu on SID: %u\n", cr->cid, sess->sid);
+ iscsit_remove_active_connection_recovery_entry(cr, sess);
+ }
+}
+
+static int iscsit_task_reassign_complete_nop_out(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_conn_recovery *cr;
+
+ if (!cmd->cr) {
+ pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+ " is NULL!\n", cmd->init_task_tag);
+ return -1;
+ }
+ cr = cmd->cr;
+
+ /*
+ * Reset the StatSN so a new one for this commands new connection
+ * will be assigned.
+ * Reset the ExpStatSN as well so we may receive Status SNACKs.
+ */
+ cmd->stat_sn = cmd->exp_stat_sn = 0;
+
+ iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ cmd->i_state = ISTATE_SEND_NOPIN;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+static int iscsit_task_reassign_complete_write(
+ struct iscsi_cmd *cmd,
+ struct iscsi_tmr_req *tmr_req)
+{
+ int no_build_r2ts = 0;
+ u32 length = 0, offset = 0;
+ struct iscsi_conn *conn = cmd->conn;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ /*
+ * The Initiator must not send a R2T SNACK with a Begrun less than
+ * the TMR TASK_REASSIGN's ExpDataSN.
+ */
+ if (!tmr_req->exp_data_sn) {
+ cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = 0;
+ } else {
+ cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
+ }
+
+ /*
+ * The TMR TASK_REASSIGN's ExpDataSN contains the next R2TSN the
+ * Initiator is expecting. The Target controls all WRITE operations
+ * so if we have received all DataOUT we can safety ignore Initiator.
+ */
+ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
+ if (!atomic_read(&cmd->transport_sent)) {
+ pr_debug("WRITE ITT: 0x%08x: t_state: %d"
+ " never sent to transport\n",
+ cmd->init_task_tag, cmd->se_cmd.t_state);
+ return transport_generic_handle_data(se_cmd);
+ }
+
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ /*
+ * Special case to deal with DataSequenceInOrder=No and Non-Immeidate
+ * Unsolicited DataOut.
+ */
+ if (cmd->unsolicited_data) {
+ cmd->unsolicited_data = 0;
+
+ offset = cmd->next_burst_len = cmd->write_data_done;
+
+ if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
+ cmd->data_length) {
+ no_build_r2ts = 1;
+ length = (cmd->data_length - offset);
+ } else
+ length = (conn->sess->sess_ops->FirstBurstLength - offset);
+
+ spin_lock_bh(&cmd->r2t_lock);
+ if (iscsit_add_r2t_to_list(cmd, offset, length, 0, 0) < 0) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+ cmd->outstanding_r2ts++;
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ if (no_build_r2ts)
+ return 0;
+ }
+ /*
+ * iscsit_build_r2ts_for_cmd() can handle the rest from here.
+ */
+ return iscsit_build_r2ts_for_cmd(cmd, conn, 2);
+}
+
+static int iscsit_task_reassign_complete_read(
+ struct iscsi_cmd *cmd,
+ struct iscsi_tmr_req *tmr_req)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_datain_req *dr;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ /*
+ * The Initiator must not send a Data SNACK with a BegRun less than
+ * the TMR TASK_REASSIGN's ExpDataSN.
+ */
+ if (!tmr_req->exp_data_sn) {
+ cmd->cmd_flags &= ~ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = 0;
+ } else {
+ cmd->cmd_flags |= ICF_GOT_DATACK_SNACK;
+ cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);
+ }
+
+ if (!atomic_read(&cmd->transport_sent)) {
+ pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
+ " transport\n", cmd->init_task_tag,
+ cmd->se_cmd.t_state);
+ transport_generic_handle_cdb(se_cmd);
+ return 0;
+ }
+
+ if (!atomic_read(&se_cmd->t_transport_complete)) {
+ pr_err("READ ITT: 0x%08x: t_state: %d, never returned"
+ " from transport\n", cmd->init_task_tag,
+ cmd->se_cmd.t_state);
+ return -1;
+ }
+
+ dr = iscsit_allocate_datain_req();
+ if (!dr)
+ return -1;
+ /*
+ * The TMR TASK_REASSIGN's ExpDataSN contains the next DataSN the
+ * Initiator is expecting.
+ */
+ dr->data_sn = dr->begrun = tmr_req->exp_data_sn;
+ dr->runlength = 0;
+ dr->generate_recovery_values = 1;
+ dr->recovery = DATAIN_CONNECTION_RECOVERY;
+
+ iscsit_attach_datain_req(cmd, dr);
+
+ cmd->i_state = ISTATE_SEND_DATAIN;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+static int iscsit_task_reassign_complete_none(
+ struct iscsi_cmd *cmd,
+ struct iscsi_tmr_req *tmr_req)
+{
+ struct iscsi_conn *conn = cmd->conn;
+
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+}
+
+static int iscsit_task_reassign_complete_scsi_cmnd(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_conn_recovery *cr;
+
+ if (!cmd->cr) {
+ pr_err("struct iscsi_conn_recovery pointer for ITT: 0x%08x"
+ " is NULL!\n", cmd->init_task_tag);
+ return -1;
+ }
+ cr = cmd->cr;
+
+ /*
+ * Reset the StatSN so a new one for this commands new connection
+ * will be assigned.
+ * Reset the ExpStatSN as well so we may receive Status SNACKs.
+ */
+ cmd->stat_sn = cmd->exp_stat_sn = 0;
+
+ iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ cmd->i_state = ISTATE_SEND_STATUS;
+ iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ return 0;
+ }
+
+ switch (cmd->data_direction) {
+ case DMA_TO_DEVICE:
+ return iscsit_task_reassign_complete_write(cmd, tmr_req);
+ case DMA_FROM_DEVICE:
+ return iscsit_task_reassign_complete_read(cmd, tmr_req);
+ case DMA_NONE:
+ return iscsit_task_reassign_complete_none(cmd, tmr_req);
+ default:
+ pr_err("Unknown cmd->data_direction: 0x%02x\n",
+ cmd->data_direction);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int iscsit_task_reassign_complete(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd;
+ struct iscsi_cmd *cmd;
+ int ret = 0;
+
+ if (!se_tmr->ref_cmd) {
+ pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
+ return -1;
+ }
+ se_cmd = se_tmr->ref_cmd;
+ cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ cmd->conn = conn;
+
+ switch (cmd->iscsi_opcode) {
+ case ISCSI_OP_NOOP_OUT:
+ ret = iscsit_task_reassign_complete_nop_out(tmr_req, conn);
+ break;
+ case ISCSI_OP_SCSI_CMD:
+ ret = iscsit_task_reassign_complete_scsi_cmnd(tmr_req, conn);
+ break;
+ default:
+ pr_err("Illegal iSCSI Opcode 0x%02x during"
+ " command realligence\n", cmd->iscsi_opcode);
+ return -1;
+ }
+
+ if (ret != 0)
+ return ret;
+
+ pr_debug("Completed connection realligence for Opcode: 0x%02x,"
+ " ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
+ cmd->init_task_tag, conn->cid);
+
+ return 0;
+}
+
+/*
+ * Handles special after-the-fact actions related to TMRs.
+ * Right now the only one that its really needed for is
+ * connection recovery releated TASK_REASSIGN.
+ */
+extern int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+ struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
+ struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
+
+ if (tmr_req->task_reassign &&
+ (se_tmr->response == ISCSI_TMF_RSP_COMPLETE))
+ return iscsit_task_reassign_complete(tmr_req, conn);
+
+ return 0;
+}
+
+/*
+ * Nothing to do here, but leave it for good measure. :-)
+ */
+int iscsit_task_reassign_prepare_read(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ return 0;
+}
+
+static void iscsit_task_reassign_prepare_unsolicited_dataout(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ int i, j;
+ struct iscsi_pdu *pdu = NULL;
+ struct iscsi_seq *seq = NULL;
+
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ cmd->data_sn = 0;
+
+ if (cmd->immediate_data)
+ cmd->r2t_offset += (cmd->first_burst_len -
+ cmd->seq_start_offset);
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ cmd->write_data_done -= (cmd->immediate_data) ?
+ (cmd->first_burst_len -
+ cmd->seq_start_offset) :
+ cmd->first_burst_len;
+ cmd->first_burst_len = 0;
+ return;
+ }
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ if ((pdu->offset >= cmd->seq_start_offset) &&
+ ((pdu->offset + pdu->length) <=
+ cmd->seq_end_offset)) {
+ cmd->first_burst_len -= pdu->length;
+ cmd->write_data_done -= pdu->length;
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+ } else {
+ for (i = 0; i < cmd->seq_count; i++) {
+ seq = &cmd->seq_list[i];
+
+ if (seq->type != SEQTYPE_UNSOLICITED)
+ continue;
+
+ cmd->write_data_done -=
+ (seq->offset - seq->orig_offset);
+ cmd->first_burst_len = 0;
+ seq->data_sn = 0;
+ seq->offset = seq->orig_offset;
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ continue;
+
+ for (j = 0; j < seq->pdu_count; j++) {
+ pdu = &cmd->pdu_list[j+seq->pdu_start];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+ }
+}
+
+int iscsit_task_reassign_prepare_write(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_pdu *pdu = NULL;
+ struct iscsi_r2t *r2t = NULL, *r2t_tmp;
+ int first_incomplete_r2t = 1, i = 0;
+
+ /*
+ * The command was in the process of receiving Unsolicited DataOUT when
+ * the connection failed.
+ */
+ if (cmd->unsolicited_data)
+ iscsit_task_reassign_prepare_unsolicited_dataout(cmd, conn);
+
+ /*
+ * The Initiator is requesting R2Ts starting from zero, skip
+ * checking acknowledged R2Ts and start checking struct iscsi_r2ts
+ * greater than zero.
+ */
+ if (!tmr_req->exp_data_sn)
+ goto drop_unacknowledged_r2ts;
+
+ /*
+ * We now check that the PDUs in DataOUT sequences below
+ * the TMR TASK_REASSIGN ExpDataSN (R2TSN the Initiator is
+ * expecting next) have all the DataOUT they require to complete
+ * the DataOUT sequence. First scan from R2TSN 0 to TMR
+ * TASK_REASSIGN ExpDataSN-1.
+ *
+ * If we have not received all DataOUT in question, we must
+ * make sure to make the appropriate changes to values in
+ * struct iscsi_cmd (and elsewhere depending on session parameters)
+ * so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
+ * will resend a new R2T for the DataOUT sequences in question.
+ */
+ spin_lock_bh(&cmd->r2t_lock);
+ if (list_empty(&cmd->cmd_r2t_list)) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+
+ if (r2t->r2t_sn >= tmr_req->exp_data_sn)
+ continue;
+ /*
+ * Safely ignore Recovery R2Ts and R2Ts that have completed
+ * DataOUT sequences.
+ */
+ if (r2t->seq_complete)
+ continue;
+
+ if (r2t->recovery_r2t)
+ continue;
+
+ /*
+ * DataSequenceInOrder=Yes:
+ *
+ * Taking into account the iSCSI implementation requirement of
+ * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
+ * DataSequenceInOrder=Yes, we must take into consideration
+ * the following:
+ *
+ * DataSequenceInOrder=No:
+ *
+ * Taking into account that the Initiator controls the (possibly
+ * random) PDU Order in (possibly random) Sequence Order of
+ * DataOUT the target requests with R2Ts, we must take into
+ * consideration the following:
+ *
+ * DataPDUInOrder=Yes for DataSequenceInOrder=[Yes,No]:
+ *
+ * While processing non-complete R2T DataOUT sequence requests
+ * the Target will re-request only the total sequence length
+ * minus current received offset. This is because we must
+ * assume the initiator will continue sending DataOUT from the
+ * last PDU before the connection failed.
+ *
+ * DataPDUInOrder=No for DataSequenceInOrder=[Yes,No]:
+ *
+ * While processing non-complete R2T DataOUT sequence requests
+ * the Target will re-request the entire DataOUT sequence if
+ * any single PDU is missing from the sequence. This is because
+ * we have no logical method to determine the next PDU offset,
+ * and we must assume the Initiator will be sending any random
+ * PDU offset in the current sequence after TASK_REASSIGN
+ * has completed.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder) {
+ if (!first_incomplete_r2t) {
+ cmd->r2t_offset -= r2t->xfer_len;
+ goto next;
+ }
+
+ if (conn->sess->sess_ops->DataPDUInOrder) {
+ cmd->data_sn = 0;
+ cmd->r2t_offset -= (r2t->xfer_len -
+ cmd->next_burst_len);
+ first_incomplete_r2t = 0;
+ goto next;
+ }
+
+ cmd->data_sn = 0;
+ cmd->r2t_offset -= r2t->xfer_len;
+
+ for (i = 0; i < cmd->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ if ((pdu->offset >= r2t->offset) &&
+ (pdu->offset < (r2t->offset +
+ r2t->xfer_len))) {
+ cmd->next_burst_len -= pdu->length;
+ cmd->write_data_done -= pdu->length;
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+
+ first_incomplete_r2t = 0;
+ } else {
+ struct iscsi_seq *seq;
+
+ seq = iscsit_get_seq_holder(cmd, r2t->offset,
+ r2t->xfer_len);
+ if (!seq) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ cmd->write_data_done -=
+ (seq->offset - seq->orig_offset);
+ seq->data_sn = 0;
+ seq->offset = seq->orig_offset;
+ seq->next_burst_len = 0;
+ seq->status = DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY;
+
+ cmd->seq_send_order--;
+
+ if (conn->sess->sess_ops->DataPDUInOrder)
+ goto next;
+
+ for (i = 0; i < seq->pdu_count; i++) {
+ pdu = &cmd->pdu_list[i+seq->pdu_start];
+
+ if (pdu->status != ISCSI_PDU_RECEIVED_OK)
+ continue;
+
+ pdu->status = ISCSI_PDU_NOT_RECEIVED;
+ }
+ }
+
+next:
+ cmd->outstanding_r2ts--;
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ /*
+ * We now drop all unacknowledged R2Ts, ie: ExpDataSN from TMR
+ * TASK_REASSIGN to the last R2T in the list.. We are also careful
+ * to check that the Initiator is not requesting R2Ts for DataOUT
+ * sequences it has already completed.
+ *
+ * Free each R2T in question and adjust values in struct iscsi_cmd
+ * accordingly so iscsit_build_r2ts_for_cmd() do the rest of
+ * the work after the TMR TASK_REASSIGN Response is sent.
+ */
+drop_unacknowledged_r2ts:
+
+ cmd->cmd_flags &= ~ICF_SENT_LAST_R2T;
+ cmd->r2t_sn = tmr_req->exp_data_sn;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list) {
+ /*
+ * Skip up to the R2T Sequence number provided by the
+ * iSCSI TASK_REASSIGN TMR
+ */
+ if (r2t->r2t_sn < tmr_req->exp_data_sn)
+ continue;
+
+ if (r2t->seq_complete) {
+ pr_err("Initiator is requesting R2Ts from"
+ " R2TSN: 0x%08x, but R2TSN: 0x%08x, Offset: %u,"
+ " Length: %u is already complete."
+ " BAD INITIATOR ERL=2 IMPLEMENTATION!\n",
+ tmr_req->exp_data_sn, r2t->r2t_sn,
+ r2t->offset, r2t->xfer_len);
+ spin_unlock_bh(&cmd->r2t_lock);
+ return -1;
+ }
+
+ if (r2t->recovery_r2t) {
+ iscsit_free_r2t(r2t, cmd);
+ continue;
+ }
+
+ /* DataSequenceInOrder=Yes:
+ *
+ * Taking into account the iSCSI implementation requirement of
+ * MaxOutstandingR2T=1 while ErrorRecoveryLevel>0 and
+ * DataSequenceInOrder=Yes, it's safe to subtract the R2Ts
+ * entire transfer length from the commands R2T offset marker.
+ *
+ * DataSequenceInOrder=No:
+ *
+ * We subtract the difference from struct iscsi_seq between the
+ * current offset and original offset from cmd->write_data_done
+ * for account for DataOUT PDUs already received. Then reset
+ * the current offset to the original and zero out the current
+ * burst length, to make sure we re-request the entire DataOUT
+ * sequence.
+ */
+ if (conn->sess->sess_ops->DataSequenceInOrder)
+ cmd->r2t_offset -= r2t->xfer_len;
+ else
+ cmd->seq_send_order--;
+
+ cmd->outstanding_r2ts--;
+ iscsit_free_r2t(r2t, cmd);
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return 0;
+}
+
+/*
+ * Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
+ * a given struct iscsi_cmd.
+ */
+int iscsit_check_task_reassign_expdatasn(
+ struct iscsi_tmr_req *tmr_req,
+ struct iscsi_conn *conn)
+{
+ struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
+ struct se_cmd *se_cmd = se_tmr->ref_cmd;
+ struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+ if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
+ return 0;
+
+ if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
+ return 0;
+
+ if (ref_cmd->data_direction == DMA_NONE)
+ return 0;
+
+ /*
+ * For READs the TMR TASK_REASSIGNs ExpDataSN contains the next DataSN
+ * of DataIN the Initiator is expecting.
+ *
+ * Also check that the Initiator is not re-requesting DataIN that has
+ * already been acknowledged with a DataAck SNACK.
+ */
+ if (ref_cmd->data_direction == DMA_FROM_DEVICE) {
+ if (tmr_req->exp_data_sn > ref_cmd->data_sn) {
+ pr_err("Received ExpDataSN: 0x%08x for READ"
+ " in TMR TASK_REASSIGN greater than command's"
+ " DataSN: 0x%08x.\n", tmr_req->exp_data_sn,
+ ref_cmd->data_sn);
+ return -1;
+ }
+ if ((ref_cmd->cmd_flags & ICF_GOT_DATACK_SNACK) &&
+ (tmr_req->exp_data_sn <= ref_cmd->acked_data_sn)) {
+ pr_err("Received ExpDataSN: 0x%08x for READ"
+ " in TMR TASK_REASSIGN for previously"
+ " acknowledged DataIN: 0x%08x,"
+ " protocol error\n", tmr_req->exp_data_sn,
+ ref_cmd->acked_data_sn);
+ return -1;
+ }
+ return iscsit_task_reassign_prepare_read(tmr_req, conn);
+ }
+
+ /*
+ * For WRITEs the TMR TASK_REASSIGNs ExpDataSN contains the next R2TSN
+ * for R2Ts the Initiator is expecting.
+ *
+ * Do the magic in iscsit_task_reassign_prepare_write().
+ */
+ if (ref_cmd->data_direction == DMA_TO_DEVICE) {
+ if (tmr_req->exp_data_sn > ref_cmd->r2t_sn) {
+ pr_err("Received ExpDataSN: 0x%08x for WRITE"
+ " in TMR TASK_REASSIGN greater than command's"
+ " R2TSN: 0x%08x.\n", tmr_req->exp_data_sn,
+ ref_cmd->r2t_sn);
+ return -1;
+ }
+ return iscsit_task_reassign_prepare_write(tmr_req, conn);
+ }
+
+ pr_err("Unknown iSCSI data_direction: 0x%02x\n",
+ ref_cmd->data_direction);
+
+ return -1;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h
new file mode 100644
index 00000000000..142e992cb09
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tmr.h
@@ -0,0 +1,14 @@
+#ifndef ISCSI_TARGET_TMR_H
+#define ISCSI_TARGET_TMR_H
+
+extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+ unsigned char *);
+extern int iscsit_tmr_task_cold_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+ unsigned char *);
+extern u8 iscsit_tmr_task_reassign(struct iscsi_cmd *, unsigned char *);
+extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_check_task_reassign_expdatasn(struct iscsi_tmr_req *,
+ struct iscsi_conn *);
+
+#endif /* ISCSI_TARGET_TMR_H */
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
new file mode 100644
index 00000000000..d4cf2cd25c4
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -0,0 +1,759 @@
+/*******************************************************************************
+ * This file contains iSCSI Target Portal Group related functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+#include <target/target_core_tpg.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_login.h"
+#include "iscsi_target_nodeattrib.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+#include "iscsi_target_parameters.h"
+
+struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
+{
+ struct iscsi_portal_group *tpg;
+
+ tpg = kzalloc(sizeof(struct iscsi_portal_group), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct iscsi_portal_group\n");
+ return NULL;
+ }
+
+ tpg->tpgt = tpgt;
+ tpg->tpg_state = TPG_STATE_FREE;
+ tpg->tpg_tiqn = tiqn;
+ INIT_LIST_HEAD(&tpg->tpg_gnp_list);
+ INIT_LIST_HEAD(&tpg->tpg_list);
+ mutex_init(&tpg->tpg_access_lock);
+ mutex_init(&tpg->np_login_lock);
+ spin_lock_init(&tpg->tpg_state_lock);
+ spin_lock_init(&tpg->tpg_np_lock);
+
+ return tpg;
+}
+
+static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *);
+
+int iscsit_load_discovery_tpg(void)
+{
+ struct iscsi_param *param;
+ struct iscsi_portal_group *tpg;
+ int ret;
+
+ tpg = iscsit_alloc_portal_group(NULL, 1);
+ if (!tpg) {
+ pr_err("Unable to allocate struct iscsi_portal_group\n");
+ return -1;
+ }
+
+ ret = core_tpg_register(
+ &lio_target_fabric_configfs->tf_ops,
+ NULL, &tpg->tpg_se_tpg, (void *)tpg,
+ TRANSPORT_TPG_TYPE_DISCOVERY);
+ if (ret < 0) {
+ kfree(tpg);
+ return -1;
+ }
+
+ tpg->sid = 1; /* First Assigned LIO Session ID */
+ iscsit_set_default_tpg_attribs(tpg);
+
+ if (iscsi_create_default_params(&tpg->param_list) < 0)
+ goto out;
+ /*
+ * By default we disable authentication for discovery sessions,
+ * this can be changed with:
+ *
+ * /sys/kernel/config/target/iscsi/discovery_auth/enforce_discovery_auth
+ */
+ param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+ if (!param)
+ goto out;
+
+ if (iscsi_update_param_value(param, "CHAP,None") < 0)
+ goto out;
+
+ tpg->tpg_attrib.authentication = 0;
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_ACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ iscsit_global->discovery_tpg = tpg;
+ pr_debug("CORE[0] - Allocated Discovery TPG\n");
+
+ return 0;
+out:
+ if (tpg->sid == 1)
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+ kfree(tpg);
+ return -1;
+}
+
+void iscsit_release_discovery_tpg(void)
+{
+ struct iscsi_portal_group *tpg = iscsit_global->discovery_tpg;
+
+ if (!tpg)
+ return;
+
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+
+ kfree(tpg);
+ iscsit_global->discovery_tpg = NULL;
+}
+
+struct iscsi_portal_group *iscsit_get_tpg_from_np(
+ struct iscsi_tiqn *tiqn,
+ struct iscsi_np *np)
+{
+ struct iscsi_portal_group *tpg = NULL;
+ struct iscsi_tpg_np *tpg_np;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
+
+ spin_lock(&tpg->tpg_state_lock);
+ if (tpg->tpg_state == TPG_STATE_FREE) {
+ spin_unlock(&tpg->tpg_state_lock);
+ continue;
+ }
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+ if (tpg_np->tpg_np == np) {
+ spin_unlock(&tpg->tpg_np_lock);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ return tpg;
+ }
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return NULL;
+}
+
+int iscsit_get_tpg(
+ struct iscsi_portal_group *tpg)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
+ return ((ret != 0) || signal_pending(current)) ? -1 : 0;
+}
+
+void iscsit_put_tpg(struct iscsi_portal_group *tpg)
+{
+ mutex_unlock(&tpg->tpg_access_lock);
+}
+
+static void iscsit_clear_tpg_np_login_thread(
+ struct iscsi_tpg_np *tpg_np,
+ struct iscsi_portal_group *tpg)
+{
+ if (!tpg_np->tpg_np) {
+ pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
+ return;
+ }
+
+ iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg);
+}
+
+void iscsit_clear_tpg_np_login_threads(
+ struct iscsi_portal_group *tpg)
+{
+ struct iscsi_tpg_np *tpg_np;
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
+ if (!tpg_np->tpg_np) {
+ pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
+ continue;
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+ spin_lock(&tpg->tpg_np_lock);
+ }
+ spin_unlock(&tpg->tpg_np_lock);
+}
+
+void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
+{
+ iscsi_print_params(tpg->param_list);
+}
+
+static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ a->authentication = TA_AUTHENTICATION;
+ a->login_timeout = TA_LOGIN_TIMEOUT;
+ a->netif_timeout = TA_NETIF_TIMEOUT;
+ a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
+ a->generate_node_acls = TA_GENERATE_NODE_ACLS;
+ a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
+ a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
+ a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
+}
+
+int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
+{
+ if (tpg->tpg_state != TPG_STATE_FREE) {
+ pr_err("Unable to add iSCSI Target Portal Group: %d"
+ " while not in TPG_STATE_FREE state.\n", tpg->tpgt);
+ return -EEXIST;
+ }
+ iscsit_set_default_tpg_attribs(tpg);
+
+ if (iscsi_create_default_params(&tpg->param_list) < 0)
+ goto err_out;
+
+ ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_INACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ list_add_tail(&tpg->tpg_list, &tiqn->tiqn_tpg_list);
+ tiqn->tiqn_ntpgs++;
+ pr_debug("CORE[%s]_TPG[%hu] - Added iSCSI Target Portal Group\n",
+ tiqn->tiqn, tpg->tpgt);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return 0;
+err_out:
+ if (tpg->param_list) {
+ iscsi_release_param_list(tpg->param_list);
+ tpg->param_list = NULL;
+ }
+ kfree(tpg);
+ return -ENOMEM;
+}
+
+int iscsit_tpg_del_portal_group(
+ struct iscsi_tiqn *tiqn,
+ struct iscsi_portal_group *tpg,
+ int force)
+{
+ u8 old_state = tpg->tpg_state;
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_INACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
+ pr_err("Unable to delete iSCSI Target Portal Group:"
+ " %hu while active sessions exist, and force=0\n",
+ tpg->tpgt);
+ tpg->tpg_state = old_state;
+ return -EPERM;
+ }
+
+ core_tpg_clear_object_luns(&tpg->tpg_se_tpg);
+
+ if (tpg->param_list) {
+ iscsi_release_param_list(tpg->param_list);
+ tpg->param_list = NULL;
+ }
+
+ core_tpg_deregister(&tpg->tpg_se_tpg);
+
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = TPG_STATE_FREE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ tiqn->tiqn_ntpgs--;
+ list_del(&tpg->tpg_list);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ pr_debug("CORE[%s]_TPG[%hu] - Deleted iSCSI Target Portal Group\n",
+ tiqn->tiqn, tpg->tpgt);
+
+ kfree(tpg);
+ return 0;
+}
+
+int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
+{
+ struct iscsi_param *param;
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ spin_lock(&tpg->tpg_state_lock);
+ if (tpg->tpg_state == TPG_STATE_ACTIVE) {
+ pr_err("iSCSI target portal group: %hu is already"
+ " active, ignoring request.\n", tpg->tpgt);
+ spin_unlock(&tpg->tpg_state_lock);
+ return -EINVAL;
+ }
+ /*
+ * Make sure that AuthMethod does not contain None as an option
+ * unless explictly disabled. Set the default to CHAP if authentication
+ * is enforced (as per default), and remove the NONE option.
+ */
+ param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+ if (!param) {
+ spin_unlock(&tpg->tpg_state_lock);
+ return -ENOMEM;
+ }
+
+ if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
+ if (!strcmp(param->value, NONE))
+ if (iscsi_update_param_value(param, CHAP) < 0) {
+ spin_unlock(&tpg->tpg_state_lock);
+ return -ENOMEM;
+ }
+ if (iscsit_ta_authentication(tpg, 1) < 0) {
+ spin_unlock(&tpg->tpg_state_lock);
+ return -ENOMEM;
+ }
+ }
+
+ tpg->tpg_state = TPG_STATE_ACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ tiqn->tiqn_active_tpgs++;
+ pr_debug("iSCSI_TPG[%hu] - Enabled iSCSI Target Portal Group\n",
+ tpg->tpgt);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return 0;
+}
+
+int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
+{
+ struct iscsi_tiqn *tiqn;
+ u8 old_state = tpg->tpg_state;
+
+ spin_lock(&tpg->tpg_state_lock);
+ if (tpg->tpg_state == TPG_STATE_INACTIVE) {
+ pr_err("iSCSI Target Portal Group: %hu is already"
+ " inactive, ignoring request.\n", tpg->tpgt);
+ spin_unlock(&tpg->tpg_state_lock);
+ return -EINVAL;
+ }
+ tpg->tpg_state = TPG_STATE_INACTIVE;
+ spin_unlock(&tpg->tpg_state_lock);
+
+ iscsit_clear_tpg_np_login_threads(tpg);
+
+ if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
+ spin_lock(&tpg->tpg_state_lock);
+ tpg->tpg_state = old_state;
+ spin_unlock(&tpg->tpg_state_lock);
+ pr_err("Unable to disable iSCSI Target Portal Group:"
+ " %hu while active sessions exist, and force=0\n",
+ tpg->tpgt);
+ return -EPERM;
+ }
+
+ tiqn = tpg->tpg_tiqn;
+ if (!tiqn || (tpg == iscsit_global->discovery_tpg))
+ return 0;
+
+ spin_lock(&tiqn->tiqn_tpg_lock);
+ tiqn->tiqn_active_tpgs--;
+ pr_debug("iSCSI_TPG[%hu] - Disabled iSCSI Target Portal Group\n",
+ tpg->tpgt);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+
+ return 0;
+}
+
+struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
+ struct iscsi_session *sess)
+{
+ struct se_session *se_sess = sess->se_sess;
+ struct se_node_acl *se_nacl = se_sess->se_node_acl;
+ struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
+ se_node_acl);
+
+ return &acl->node_attrib;
+}
+
+struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
+ struct iscsi_tpg_np *tpg_np,
+ int network_transport)
+{
+ struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
+
+ spin_lock(&tpg_np->tpg_np_parent_lock);
+ list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
+ &tpg_np->tpg_np_parent_list, tpg_np_child_list) {
+ if (tpg_np_child->tpg_np->np_network_transport ==
+ network_transport) {
+ spin_unlock(&tpg_np->tpg_np_parent_lock);
+ return tpg_np_child;
+ }
+ }
+ spin_unlock(&tpg_np->tpg_np_parent_lock);
+
+ return NULL;
+}
+
+struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
+ struct iscsi_portal_group *tpg,
+ struct __kernel_sockaddr_storage *sockaddr,
+ char *ip_str,
+ struct iscsi_tpg_np *tpg_np_parent,
+ int network_transport)
+{
+ struct iscsi_np *np;
+ struct iscsi_tpg_np *tpg_np;
+
+ tpg_np = kzalloc(sizeof(struct iscsi_tpg_np), GFP_KERNEL);
+ if (!tpg_np) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_tpg_np.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ np = iscsit_add_np(sockaddr, ip_str, network_transport);
+ if (IS_ERR(np)) {
+ kfree(tpg_np);
+ return ERR_CAST(np);
+ }
+
+ INIT_LIST_HEAD(&tpg_np->tpg_np_list);
+ INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
+ INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
+ spin_lock_init(&tpg_np->tpg_np_parent_lock);
+ tpg_np->tpg_np = np;
+ tpg_np->tpg = tpg;
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_add_tail(&tpg_np->tpg_np_list, &tpg->tpg_gnp_list);
+ tpg->num_tpg_nps++;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_num_tpg_nps++;
+ spin_unlock(&tpg->tpg_np_lock);
+
+ if (tpg_np_parent) {
+ tpg_np->tpg_np_parent = tpg_np_parent;
+ spin_lock(&tpg_np_parent->tpg_np_parent_lock);
+ list_add_tail(&tpg_np->tpg_np_child_list,
+ &tpg_np_parent->tpg_np_parent_list);
+ spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
+ }
+
+ pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
+ tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+ (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+
+ return tpg_np;
+}
+
+static int iscsit_tpg_release_np(
+ struct iscsi_tpg_np *tpg_np,
+ struct iscsi_portal_group *tpg,
+ struct iscsi_np *np)
+{
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+
+ pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
+ tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+ (np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+
+ tpg_np->tpg_np = NULL;
+ tpg_np->tpg = NULL;
+ kfree(tpg_np);
+ /*
+ * iscsit_del_np() will shutdown struct iscsi_np when last TPG reference is released.
+ */
+ return iscsit_del_np(np);
+}
+
+int iscsit_tpg_del_network_portal(
+ struct iscsi_portal_group *tpg,
+ struct iscsi_tpg_np *tpg_np)
+{
+ struct iscsi_np *np;
+ struct iscsi_tpg_np *tpg_np_child, *tpg_np_child_tmp;
+ int ret = 0;
+
+ np = tpg_np->tpg_np;
+ if (!np) {
+ pr_err("Unable to locate struct iscsi_np from"
+ " struct iscsi_tpg_np\n");
+ return -EINVAL;
+ }
+
+ if (!tpg_np->tpg_np_parent) {
+ /*
+ * We are the parent tpg network portal. Release all of the
+ * child tpg_np's (eg: the non ISCSI_TCP ones) on our parent
+ * list first.
+ */
+ list_for_each_entry_safe(tpg_np_child, tpg_np_child_tmp,
+ &tpg_np->tpg_np_parent_list,
+ tpg_np_child_list) {
+ ret = iscsit_tpg_del_network_portal(tpg, tpg_np_child);
+ if (ret < 0)
+ pr_err("iscsit_tpg_del_network_portal()"
+ " failed: %d\n", ret);
+ }
+ } else {
+ /*
+ * We are not the parent ISCSI_TCP tpg network portal. Release
+ * our own network portals from the child list.
+ */
+ spin_lock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
+ list_del(&tpg_np->tpg_np_child_list);
+ spin_unlock(&tpg_np->tpg_np_parent->tpg_np_parent_lock);
+ }
+
+ spin_lock(&tpg->tpg_np_lock);
+ list_del(&tpg_np->tpg_np_list);
+ tpg->num_tpg_nps--;
+ if (tpg->tpg_tiqn)
+ tpg->tpg_tiqn->tiqn_num_tpg_nps--;
+ spin_unlock(&tpg->tpg_np_lock);
+
+ return iscsit_tpg_release_np(tpg_np, tpg, np);
+}
+
+int iscsit_tpg_set_initiator_node_queue_depth(
+ struct iscsi_portal_group *tpg,
+ unsigned char *initiatorname,
+ u32 queue_depth,
+ int force)
+{
+ return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
+ initiatorname, queue_depth, force);
+}
+
+int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
+{
+ unsigned char buf1[256], buf2[256], *none = NULL;
+ int len;
+ struct iscsi_param *param;
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((authentication != 1) && (authentication != 0)) {
+ pr_err("Illegal value for authentication parameter:"
+ " %u, ignoring request.\n", authentication);
+ return -1;
+ }
+
+ memset(buf1, 0, sizeof(buf1));
+ memset(buf2, 0, sizeof(buf2));
+
+ param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
+ if (!param)
+ return -EINVAL;
+
+ if (authentication) {
+ snprintf(buf1, sizeof(buf1), "%s", param->value);
+ none = strstr(buf1, NONE);
+ if (!none)
+ goto out;
+ if (!strncmp(none + 4, ",", 1)) {
+ if (!strcmp(buf1, none))
+ sprintf(buf2, "%s", none+5);
+ else {
+ none--;
+ *none = '\0';
+ len = sprintf(buf2, "%s", buf1);
+ none += 5;
+ sprintf(buf2 + len, "%s", none);
+ }
+ } else {
+ none--;
+ *none = '\0';
+ sprintf(buf2, "%s", buf1);
+ }
+ if (iscsi_update_param_value(param, buf2) < 0)
+ return -EINVAL;
+ } else {
+ snprintf(buf1, sizeof(buf1), "%s", param->value);
+ none = strstr(buf1, NONE);
+ if ((none))
+ goto out;
+ strncat(buf1, ",", strlen(","));
+ strncat(buf1, NONE, strlen(NONE));
+ if (iscsi_update_param_value(param, buf1) < 0)
+ return -EINVAL;
+ }
+
+out:
+ a->authentication = authentication;
+ pr_debug("%s iSCSI Authentication Methods for TPG: %hu.\n",
+ a->authentication ? "Enforcing" : "Disabling", tpg->tpgt);
+
+ return 0;
+}
+
+int iscsit_ta_login_timeout(
+ struct iscsi_portal_group *tpg,
+ u32 login_timeout)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if (login_timeout > TA_LOGIN_TIMEOUT_MAX) {
+ pr_err("Requested Login Timeout %u larger than maximum"
+ " %u\n", login_timeout, TA_LOGIN_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (login_timeout < TA_LOGIN_TIMEOUT_MIN) {
+ pr_err("Requested Logout Timeout %u smaller than"
+ " minimum %u\n", login_timeout, TA_LOGIN_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->login_timeout = login_timeout;
+ pr_debug("Set Logout Timeout to %u for Target Portal Group"
+ " %hu\n", a->login_timeout, tpg->tpgt);
+
+ return 0;
+}
+
+int iscsit_ta_netif_timeout(
+ struct iscsi_portal_group *tpg,
+ u32 netif_timeout)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
+ pr_err("Requested Network Interface Timeout %u larger"
+ " than maximum %u\n", netif_timeout,
+ TA_NETIF_TIMEOUT_MAX);
+ return -EINVAL;
+ } else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
+ pr_err("Requested Network Interface Timeout %u smaller"
+ " than minimum %u\n", netif_timeout,
+ TA_NETIF_TIMEOUT_MIN);
+ return -EINVAL;
+ }
+
+ a->netif_timeout = netif_timeout;
+ pr_debug("Set Network Interface Timeout to %u for"
+ " Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
+
+ return 0;
+}
+
+int iscsit_ta_generate_node_acls(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->generate_node_acls = flag;
+ pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
+ tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+
+ return 0;
+}
+
+int iscsit_ta_default_cmdsn_depth(
+ struct iscsi_portal_group *tpg,
+ u32 tcq_depth)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if (tcq_depth > TA_DEFAULT_CMDSN_DEPTH_MAX) {
+ pr_err("Requested Default Queue Depth: %u larger"
+ " than maximum %u\n", tcq_depth,
+ TA_DEFAULT_CMDSN_DEPTH_MAX);
+ return -EINVAL;
+ } else if (tcq_depth < TA_DEFAULT_CMDSN_DEPTH_MIN) {
+ pr_err("Requested Default Queue Depth: %u smaller"
+ " than minimum %u\n", tcq_depth,
+ TA_DEFAULT_CMDSN_DEPTH_MIN);
+ return -EINVAL;
+ }
+
+ a->default_cmdsn_depth = tcq_depth;
+ pr_debug("iSCSI_TPG[%hu] - Set Default CmdSN TCQ Depth to %u\n",
+ tpg->tpgt, a->default_cmdsn_depth);
+
+ return 0;
+}
+
+int iscsit_ta_cache_dynamic_acls(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->cache_dynamic_acls = flag;
+ pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
+ " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
+ "Enabled" : "Disabled");
+
+ return 0;
+}
+
+int iscsit_ta_demo_mode_write_protect(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->demo_mode_write_protect = flag;
+ pr_debug("iSCSI_TPG[%hu] - Demo Mode Write Protect bit: %s\n",
+ tpg->tpgt, (a->demo_mode_write_protect) ? "ON" : "OFF");
+
+ return 0;
+}
+
+int iscsit_ta_prod_mode_write_protect(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->prod_mode_write_protect = flag;
+ pr_debug("iSCSI_TPG[%hu] - Production Mode Write Protect bit:"
+ " %s\n", tpg->tpgt, (a->prod_mode_write_protect) ?
+ "ON" : "OFF");
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
new file mode 100644
index 00000000000..dda48c141a8
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -0,0 +1,41 @@
+#ifndef ISCSI_TARGET_TPG_H
+#define ISCSI_TARGET_TPG_H
+
+extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
+extern int iscsit_load_discovery_tpg(void);
+extern void iscsit_release_discovery_tpg(void);
+extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
+ struct iscsi_np *);
+extern int iscsit_get_tpg(struct iscsi_portal_group *);
+extern void iscsit_put_tpg(struct iscsi_portal_group *);
+extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *);
+extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
+extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
+extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
+ int);
+extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
+extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
+extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
+ struct iscsi_portal_group *, const char *, u32);
+extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
+ struct se_node_acl *);
+extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session *);
+extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
+extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
+extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
+ struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
+ int);
+extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
+ struct iscsi_tpg_np *);
+extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
+ unsigned char *, u32, int);
+extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
+
+#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
new file mode 100644
index 00000000000..0baac5bcebd
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -0,0 +1,551 @@
+/*******************************************************************************
+ * This file contains the iSCSI Login Thread and Thread Queue functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/bitmap.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target.h"
+
+static LIST_HEAD(active_ts_list);
+static LIST_HEAD(inactive_ts_list);
+static DEFINE_SPINLOCK(active_ts_lock);
+static DEFINE_SPINLOCK(inactive_ts_lock);
+static DEFINE_SPINLOCK(ts_bitmap_lock);
+
+static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
+{
+ spin_lock(&active_ts_lock);
+ list_add_tail(&ts->ts_list, &active_ts_list);
+ iscsit_global->active_ts++;
+ spin_unlock(&active_ts_lock);
+}
+
+extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
+{
+ spin_lock(&inactive_ts_lock);
+ list_add_tail(&ts->ts_list, &inactive_ts_list);
+ iscsit_global->inactive_ts++;
+ spin_unlock(&inactive_ts_lock);
+}
+
+static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
+{
+ spin_lock(&active_ts_lock);
+ list_del(&ts->ts_list);
+ iscsit_global->active_ts--;
+ spin_unlock(&active_ts_lock);
+}
+
+static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
+{
+ struct iscsi_thread_set *ts;
+
+ spin_lock(&inactive_ts_lock);
+ if (list_empty(&inactive_ts_list)) {
+ spin_unlock(&inactive_ts_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(ts, &inactive_ts_list, ts_list)
+ break;
+
+ list_del(&ts->ts_list);
+ iscsit_global->inactive_ts--;
+ spin_unlock(&inactive_ts_lock);
+
+ return ts;
+}
+
+extern int iscsi_allocate_thread_sets(u32 thread_pair_count)
+{
+ int allocated_thread_pair_count = 0, i, thread_id;
+ struct iscsi_thread_set *ts = NULL;
+
+ for (i = 0; i < thread_pair_count; i++) {
+ ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL);
+ if (!ts) {
+ pr_err("Unable to allocate memory for"
+ " thread set.\n");
+ return allocated_thread_pair_count;
+ }
+ /*
+ * Locate the next available regision in the thread_set_bitmap
+ */
+ spin_lock(&ts_bitmap_lock);
+ thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
+ iscsit_global->ts_bitmap_count, get_order(1));
+ spin_unlock(&ts_bitmap_lock);
+ if (thread_id < 0) {
+ pr_err("bitmap_find_free_region() failed for"
+ " thread_set_bitmap\n");
+ kfree(ts);
+ return allocated_thread_pair_count;
+ }
+
+ ts->thread_id = thread_id;
+ ts->status = ISCSI_THREAD_SET_FREE;
+ INIT_LIST_HEAD(&ts->ts_list);
+ spin_lock_init(&ts->ts_state_lock);
+ init_completion(&ts->rx_post_start_comp);
+ init_completion(&ts->tx_post_start_comp);
+ init_completion(&ts->rx_restart_comp);
+ init_completion(&ts->tx_restart_comp);
+ init_completion(&ts->rx_start_comp);
+ init_completion(&ts->tx_start_comp);
+
+ ts->create_threads = 1;
+ ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
+ ISCSI_TX_THREAD_NAME);
+ if (IS_ERR(ts->tx_thread)) {
+ dump_stack();
+ pr_err("Unable to start iscsi_target_tx_thread\n");
+ break;
+ }
+
+ ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s",
+ ISCSI_RX_THREAD_NAME);
+ if (IS_ERR(ts->rx_thread)) {
+ kthread_stop(ts->tx_thread);
+ pr_err("Unable to start iscsi_target_rx_thread\n");
+ break;
+ }
+ ts->create_threads = 0;
+
+ iscsi_add_ts_to_inactive_list(ts);
+ allocated_thread_pair_count++;
+ }
+
+ pr_debug("Spawned %d thread set(s) (%d total threads).\n",
+ allocated_thread_pair_count, allocated_thread_pair_count * 2);
+ return allocated_thread_pair_count;
+}
+
+extern void iscsi_deallocate_thread_sets(void)
+{
+ u32 released_count = 0;
+ struct iscsi_thread_set *ts = NULL;
+
+ while ((ts = iscsi_get_ts_from_inactive_list())) {
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_DIE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ if (ts->rx_thread) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ kthread_stop(ts->rx_thread);
+ }
+ if (ts->tx_thread) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ kthread_stop(ts->tx_thread);
+ }
+ /*
+ * Release this thread_id in the thread_set_bitmap
+ */
+ spin_lock(&ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap,
+ ts->thread_id, get_order(1));
+ spin_unlock(&ts_bitmap_lock);
+
+ released_count++;
+ kfree(ts);
+ }
+
+ if (released_count)
+ pr_debug("Stopped %d thread set(s) (%d total threads)."
+ "\n", released_count, released_count * 2);
+}
+
+static void iscsi_deallocate_extra_thread_sets(void)
+{
+ u32 orig_count, released_count = 0;
+ struct iscsi_thread_set *ts = NULL;
+
+ orig_count = TARGET_THREAD_SET_COUNT;
+
+ while ((iscsit_global->inactive_ts + 1) > orig_count) {
+ ts = iscsi_get_ts_from_inactive_list();
+ if (!ts)
+ break;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_DIE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ if (ts->rx_thread) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ kthread_stop(ts->rx_thread);
+ }
+ if (ts->tx_thread) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ kthread_stop(ts->tx_thread);
+ }
+ /*
+ * Release this thread_id in the thread_set_bitmap
+ */
+ spin_lock(&ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap,
+ ts->thread_id, get_order(1));
+ spin_unlock(&ts_bitmap_lock);
+
+ released_count++;
+ kfree(ts);
+ }
+
+ if (released_count) {
+ pr_debug("Stopped %d thread set(s) (%d total threads)."
+ "\n", released_count, released_count * 2);
+ }
+}
+
+void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
+{
+ iscsi_add_ts_to_active_list(ts);
+
+ spin_lock_bh(&ts->ts_state_lock);
+ conn->thread_set = ts;
+ ts->conn = conn;
+ spin_unlock_bh(&ts->ts_state_lock);
+ /*
+ * Start up the RX thread and wait on rx_post_start_comp. The RX
+ * Thread will then do the same for the TX Thread in
+ * iscsi_rx_thread_pre_handler().
+ */
+ complete(&ts->rx_start_comp);
+ wait_for_completion(&ts->rx_post_start_comp);
+}
+
+struct iscsi_thread_set *iscsi_get_thread_set(void)
+{
+ int allocate_ts = 0;
+ struct completion comp;
+ struct iscsi_thread_set *ts = NULL;
+ /*
+ * If no inactive thread set is available on the first call to
+ * iscsi_get_ts_from_inactive_list(), sleep for a second and
+ * try again. If still none are available after two attempts,
+ * allocate a set ourselves.
+ */
+get_set:
+ ts = iscsi_get_ts_from_inactive_list();
+ if (!ts) {
+ if (allocate_ts == 2)
+ iscsi_allocate_thread_sets(1);
+
+ init_completion(&comp);
+ wait_for_completion_timeout(&comp, 1 * HZ);
+
+ allocate_ts++;
+ goto get_set;
+ }
+
+ ts->delay_inactive = 1;
+ ts->signal_sent = 0;
+ ts->thread_count = 2;
+ init_completion(&ts->rx_restart_comp);
+ init_completion(&ts->tx_restart_comp);
+
+ return ts;
+}
+
+void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear)
+{
+ struct iscsi_thread_set *ts = NULL;
+
+ if (!conn->thread_set) {
+ pr_err("struct iscsi_conn->thread_set is NULL\n");
+ return;
+ }
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->thread_clear &= ~thread_clear;
+
+ if ((thread_clear & ISCSI_CLEAR_RX_THREAD) &&
+ (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD))
+ complete(&ts->rx_restart_comp);
+ else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) &&
+ (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD))
+ complete(&ts->tx_restart_comp);
+ spin_unlock_bh(&ts->ts_state_lock);
+}
+
+void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent)
+{
+ struct iscsi_thread_set *ts = NULL;
+
+ if (!conn->thread_set) {
+ pr_err("struct iscsi_conn->thread_set is NULL\n");
+ return;
+ }
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->signal_sent |= signal_sent;
+ spin_unlock_bh(&ts->ts_state_lock);
+}
+
+int iscsi_release_thread_set(struct iscsi_conn *conn)
+{
+ int thread_called = 0;
+ struct iscsi_thread_set *ts = NULL;
+
+ if (!conn || !conn->thread_set) {
+ pr_err("connection or thread set pointer is NULL\n");
+ BUG();
+ }
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_RESET;
+
+ if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME,
+ strlen(ISCSI_RX_THREAD_NAME)))
+ thread_called = ISCSI_RX_THREAD;
+ else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME,
+ strlen(ISCSI_TX_THREAD_NAME)))
+ thread_called = ISCSI_TX_THREAD;
+
+ if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) &&
+ (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) {
+
+ if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
+ }
+ ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+ wait_for_completion(&ts->rx_restart_comp);
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD;
+ }
+ if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) &&
+ (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) {
+
+ if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
+ }
+ ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+ wait_for_completion(&ts->tx_restart_comp);
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD;
+ }
+
+ ts->conn = NULL;
+ ts->status = ISCSI_THREAD_SET_FREE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return 0;
+}
+
+int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn)
+{
+ struct iscsi_thread_set *ts;
+
+ if (!conn->thread_set)
+ return -1;
+ ts = conn->thread_set;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ if (ts->status != ISCSI_THREAD_SET_ACTIVE) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ return -1;
+ }
+
+ if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) {
+ send_sig(SIGINT, ts->tx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
+ }
+ if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) {
+ send_sig(SIGINT, ts->rx_thread, 1);
+ ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
+ }
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return 0;
+}
+
+static void iscsi_check_to_add_additional_sets(void)
+{
+ int thread_sets_add;
+
+ spin_lock(&inactive_ts_lock);
+ thread_sets_add = iscsit_global->inactive_ts;
+ spin_unlock(&inactive_ts_lock);
+ if (thread_sets_add == 1)
+ iscsi_allocate_thread_sets(1);
+}
+
+static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+ spin_lock_bh(&ts->ts_state_lock);
+ if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ return -1;
+ }
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return 0;
+}
+
+struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+ int ret;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ if (ts->create_threads) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ goto sleep;
+ }
+
+ flush_signals(current);
+
+ if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ iscsi_del_ts_from_active_list(ts);
+
+ if (!iscsit_global->in_shutdown)
+ iscsi_deallocate_extra_thread_sets();
+
+ iscsi_add_ts_to_inactive_list(ts);
+ spin_lock_bh(&ts->ts_state_lock);
+ }
+
+ if ((ts->status == ISCSI_THREAD_SET_RESET) &&
+ (ts->thread_clear & ISCSI_CLEAR_RX_THREAD))
+ complete(&ts->rx_restart_comp);
+
+ ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+sleep:
+ ret = wait_for_completion_interruptible(&ts->rx_start_comp);
+ if (ret != 0)
+ return NULL;
+
+ if (iscsi_signal_thread_pre_handler(ts) < 0)
+ return NULL;
+
+ if (!ts->conn) {
+ pr_err("struct iscsi_thread_set->conn is NULL for"
+ " thread_id: %d, going back to sleep\n", ts->thread_id);
+ goto sleep;
+ }
+ iscsi_check_to_add_additional_sets();
+ /*
+ * The RX Thread starts up the TX Thread and sleeps.
+ */
+ ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
+ complete(&ts->tx_start_comp);
+ wait_for_completion(&ts->tx_post_start_comp);
+
+ return ts->conn;
+}
+
+struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
+{
+ int ret;
+
+ spin_lock_bh(&ts->ts_state_lock);
+ if (ts->create_threads) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ goto sleep;
+ }
+
+ flush_signals(current);
+
+ if (ts->delay_inactive && (--ts->thread_count == 0)) {
+ spin_unlock_bh(&ts->ts_state_lock);
+ iscsi_del_ts_from_active_list(ts);
+
+ if (!iscsit_global->in_shutdown)
+ iscsi_deallocate_extra_thread_sets();
+
+ iscsi_add_ts_to_inactive_list(ts);
+ spin_lock_bh(&ts->ts_state_lock);
+ }
+ if ((ts->status == ISCSI_THREAD_SET_RESET) &&
+ (ts->thread_clear & ISCSI_CLEAR_TX_THREAD))
+ complete(&ts->tx_restart_comp);
+
+ ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD;
+ spin_unlock_bh(&ts->ts_state_lock);
+sleep:
+ ret = wait_for_completion_interruptible(&ts->tx_start_comp);
+ if (ret != 0)
+ return NULL;
+
+ if (iscsi_signal_thread_pre_handler(ts) < 0)
+ return NULL;
+
+ if (!ts->conn) {
+ pr_err("struct iscsi_thread_set->conn is NULL for "
+ " thread_id: %d, going back to sleep\n",
+ ts->thread_id);
+ goto sleep;
+ }
+
+ iscsi_check_to_add_additional_sets();
+ /*
+ * From the TX thread, up the tx_post_start_comp that the RX Thread is
+ * sleeping on in iscsi_rx_thread_pre_handler(), then up the
+ * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on.
+ */
+ ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
+ complete(&ts->tx_post_start_comp);
+ complete(&ts->rx_post_start_comp);
+
+ spin_lock_bh(&ts->ts_state_lock);
+ ts->status = ISCSI_THREAD_SET_ACTIVE;
+ spin_unlock_bh(&ts->ts_state_lock);
+
+ return ts->conn;
+}
+
+int iscsi_thread_set_init(void)
+{
+ int size;
+
+ iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS;
+
+ size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long);
+ iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL);
+ if (!iscsit_global->ts_bitmap) {
+ pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&active_ts_lock);
+ spin_lock_init(&inactive_ts_lock);
+ spin_lock_init(&ts_bitmap_lock);
+ INIT_LIST_HEAD(&active_ts_list);
+ INIT_LIST_HEAD(&inactive_ts_list);
+
+ return 0;
+}
+
+void iscsi_thread_set_free(void)
+{
+ kfree(iscsit_global->ts_bitmap);
+}
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
new file mode 100644
index 00000000000..26e6a95ec20
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_tq.h
@@ -0,0 +1,88 @@
+#ifndef ISCSI_THREAD_QUEUE_H
+#define ISCSI_THREAD_QUEUE_H
+
+/*
+ * Defines for thread sets.
+ */
+extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
+extern void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *);
+extern int iscsi_allocate_thread_sets(u32);
+extern void iscsi_deallocate_thread_sets(void);
+extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
+extern struct iscsi_thread_set *iscsi_get_thread_set(void);
+extern void iscsi_set_thread_clear(struct iscsi_conn *, u8);
+extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8);
+extern int iscsi_release_thread_set(struct iscsi_conn *);
+extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *);
+extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *);
+extern int iscsi_thread_set_init(void);
+extern void iscsi_thread_set_free(void);
+
+extern int iscsi_target_tx_thread(void *);
+extern int iscsi_target_rx_thread(void *);
+
+#define TARGET_THREAD_SET_COUNT 4
+
+#define ISCSI_RX_THREAD 1
+#define ISCSI_TX_THREAD 2
+#define ISCSI_RX_THREAD_NAME "iscsi_trx"
+#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
+#define ISCSI_BLOCK_RX_THREAD 0x1
+#define ISCSI_BLOCK_TX_THREAD 0x2
+#define ISCSI_CLEAR_RX_THREAD 0x1
+#define ISCSI_CLEAR_TX_THREAD 0x2
+#define ISCSI_SIGNAL_RX_THREAD 0x1
+#define ISCSI_SIGNAL_TX_THREAD 0x2
+
+/* struct iscsi_thread_set->status */
+#define ISCSI_THREAD_SET_FREE 1
+#define ISCSI_THREAD_SET_ACTIVE 2
+#define ISCSI_THREAD_SET_DIE 3
+#define ISCSI_THREAD_SET_RESET 4
+#define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5
+
+/* By default allow a maximum of 32K iSCSI connections */
+#define ISCSI_TS_BITMAP_BITS 32768
+
+struct iscsi_thread_set {
+ /* flags used for blocking and restarting sets */
+ int blocked_threads;
+ /* flag for creating threads */
+ int create_threads;
+ /* flag for delaying readding to inactive list */
+ int delay_inactive;
+ /* status for thread set */
+ int status;
+ /* which threads have had signals sent */
+ int signal_sent;
+ /* flag for which threads exited first */
+ int thread_clear;
+ /* Active threads in the thread set */
+ int thread_count;
+ /* Unique thread ID */
+ u32 thread_id;
+ /* pointer to connection if set is active */
+ struct iscsi_conn *conn;
+ /* used for controlling ts state accesses */
+ spinlock_t ts_state_lock;
+ /* Used for rx side post startup */
+ struct completion rx_post_start_comp;
+ /* Used for tx side post startup */
+ struct completion tx_post_start_comp;
+ /* used for restarting thread queue */
+ struct completion rx_restart_comp;
+ /* used for restarting thread queue */
+ struct completion tx_restart_comp;
+ /* used for normal unused blocking */
+ struct completion rx_start_comp;
+ /* used for normal unused blocking */
+ struct completion tx_start_comp;
+ /* OS descriptor for rx thread */
+ struct task_struct *rx_thread;
+ /* OS descriptor for tx thread */
+ struct task_struct *tx_thread;
+ /* struct iscsi_thread_set in list list head*/
+ struct list_head ts_list;
+};
+
+#endif /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
new file mode 100644
index 00000000000..a1acb016790
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -0,0 +1,1819 @@
+/*******************************************************************************
+ * This file contains the iSCSI Target specific utility functions.
+ *
+ * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_transport.h>
+#include <target/target_core_tmr.h>
+#include <target/target_core_fabric_ops.h>
+#include <target/target_core_configfs.h>
+
+#include "iscsi_target_core.h"
+#include "iscsi_target_parameters.h"
+#include "iscsi_target_seq_pdu_list.h"
+#include "iscsi_target_datain_values.h"
+#include "iscsi_target_erl0.h"
+#include "iscsi_target_erl1.h"
+#include "iscsi_target_erl2.h"
+#include "iscsi_target_tpg.h"
+#include "iscsi_target_tq.h"
+#include "iscsi_target_util.h"
+#include "iscsi_target.h"
+
+#define PRINT_BUFF(buff, len) \
+{ \
+ int zzz; \
+ \
+ pr_debug("%d:\n", __LINE__); \
+ for (zzz = 0; zzz < len; zzz++) { \
+ if (zzz % 16 == 0) { \
+ if (zzz) \
+ pr_debug("\n"); \
+ pr_debug("%4i: ", zzz); \
+ } \
+ pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
+ } \
+ if ((len + 1) % 16) \
+ pr_debug("\n"); \
+}
+
+extern struct list_head g_tiqn_list;
+extern spinlock_t tiqn_lock;
+
+/*
+ * Called with cmd->r2t_lock held.
+ */
+int iscsit_add_r2t_to_list(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 xfer_len,
+ int recovery,
+ u32 r2t_sn)
+{
+ struct iscsi_r2t *r2t;
+
+ r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
+ if (!r2t) {
+ pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
+ return -1;
+ }
+ INIT_LIST_HEAD(&r2t->r2t_list);
+
+ r2t->recovery_r2t = recovery;
+ r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
+ r2t->offset = offset;
+ r2t->xfer_len = xfer_len;
+ list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
+
+ spin_lock_bh(&cmd->r2t_lock);
+ return 0;
+}
+
+struct iscsi_r2t *iscsit_get_r2t_for_eos(
+ struct iscsi_cmd *cmd,
+ u32 offset,
+ u32 length)
+{
+ struct iscsi_r2t *r2t;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if ((r2t->offset <= offset) &&
+ (r2t->offset + r2t->xfer_len) >= (offset + length)) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return r2t;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ pr_err("Unable to locate R2T for Offset: %u, Length:"
+ " %u\n", offset, length);
+ return NULL;
+}
+
+struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
+{
+ struct iscsi_r2t *r2t;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if (!r2t->sent_r2t) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return r2t;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ pr_err("Unable to locate next R2T to send for ITT:"
+ " 0x%08x.\n", cmd->init_task_tag);
+ return NULL;
+}
+
+/*
+ * Called with cmd->r2t_lock held.
+ */
+void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
+{
+ list_del(&r2t->r2t_list);
+ kmem_cache_free(lio_r2t_cache, r2t);
+}
+
+void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
+{
+ struct iscsi_r2t *r2t, *r2t_tmp;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
+ iscsit_free_r2t(r2t, cmd);
+ spin_unlock_bh(&cmd->r2t_lock);
+}
+
+/*
+ * May be called from software interrupt (timer) context for allocating
+ * iSCSI NopINs.
+ */
+struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+{
+ struct iscsi_cmd *cmd;
+
+ cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
+ if (!cmd) {
+ pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
+ return NULL;
+ }
+
+ cmd->conn = conn;
+ INIT_LIST_HEAD(&cmd->i_list);
+ INIT_LIST_HEAD(&cmd->datain_list);
+ INIT_LIST_HEAD(&cmd->cmd_r2t_list);
+ init_completion(&cmd->reject_comp);
+ spin_lock_init(&cmd->datain_lock);
+ spin_lock_init(&cmd->dataout_timeout_lock);
+ spin_lock_init(&cmd->istate_lock);
+ spin_lock_init(&cmd->error_lock);
+ spin_lock_init(&cmd->r2t_lock);
+
+ return cmd;
+}
+
+/*
+ * Called from iscsi_handle_scsi_cmd()
+ */
+struct iscsi_cmd *iscsit_allocate_se_cmd(
+ struct iscsi_conn *conn,
+ u32 data_length,
+ int data_direction,
+ int iscsi_task_attr)
+{
+ struct iscsi_cmd *cmd;
+ struct se_cmd *se_cmd;
+ int sam_task_attr;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->data_direction = data_direction;
+ cmd->data_length = data_length;
+ /*
+ * Figure out the SAM Task Attribute for the incoming SCSI CDB
+ */
+ if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
+ (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
+ sam_task_attr = MSG_SIMPLE_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
+ sam_task_attr = MSG_ORDERED_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
+ sam_task_attr = MSG_HEAD_TAG;
+ else if (iscsi_task_attr == ISCSI_ATTR_ACA)
+ sam_task_attr = MSG_ACA_TAG;
+ else {
+ pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
+ " MSG_SIMPLE_TAG\n", iscsi_task_attr);
+ sam_task_attr = MSG_SIMPLE_TAG;
+ }
+
+ se_cmd = &cmd->se_cmd;
+ /*
+ * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+ */
+ transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
+ conn->sess->se_sess, data_length, data_direction,
+ sam_task_attr, &cmd->sense_buffer[0]);
+ return cmd;
+}
+
+struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
+ struct iscsi_conn *conn,
+ u8 function)
+{
+ struct iscsi_cmd *cmd;
+ struct se_cmd *se_cmd;
+ u8 tcm_function;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->data_direction = DMA_NONE;
+
+ cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
+ if (!cmd->tmr_req) {
+ pr_err("Unable to allocate memory for"
+ " Task Management command!\n");
+ return NULL;
+ }
+ /*
+ * TASK_REASSIGN for ERL=2 / connection stays inside of
+ * LIO-Target $FABRIC_MOD
+ */
+ if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
+ return cmd;
+
+ se_cmd = &cmd->se_cmd;
+ /*
+ * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+ */
+ transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
+
+ switch (function) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ tcm_function = TMR_ABORT_TASK;
+ break;
+ case ISCSI_TM_FUNC_ABORT_TASK_SET:
+ tcm_function = TMR_ABORT_TASK_SET;
+ break;
+ case ISCSI_TM_FUNC_CLEAR_ACA:
+ tcm_function = TMR_CLEAR_ACA;
+ break;
+ case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+ tcm_function = TMR_CLEAR_TASK_SET;
+ break;
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ tcm_function = TMR_LUN_RESET;
+ break;
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ tcm_function = TMR_TARGET_WARM_RESET;
+ break;
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ tcm_function = TMR_TARGET_COLD_RESET;
+ break;
+ default:
+ pr_err("Unknown iSCSI TMR Function:"
+ " 0x%02x\n", function);
+ goto out;
+ }
+
+ se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
+ (void *)cmd->tmr_req, tcm_function);
+ if (!se_cmd->se_tmr_req)
+ goto out;
+
+ cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
+
+ return cmd;
+out:
+ iscsit_release_cmd(cmd);
+ if (se_cmd)
+ transport_free_se_cmd(se_cmd);
+ return NULL;
+}
+
+int iscsit_decide_list_to_build(
+ struct iscsi_cmd *cmd,
+ u32 immediate_data_length)
+{
+ struct iscsi_build_list bl;
+ struct iscsi_conn *conn = cmd->conn;
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na;
+
+ if (sess->sess_ops->DataSequenceInOrder &&
+ sess->sess_ops->DataPDUInOrder)
+ return 0;
+
+ if (cmd->data_direction == DMA_NONE)
+ return 0;
+
+ na = iscsit_tpg_get_node_attrib(sess);
+ memset(&bl, 0, sizeof(struct iscsi_build_list));
+
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ bl.data_direction = ISCSI_PDU_READ;
+ bl.type = PDULIST_NORMAL;
+ if (na->random_datain_pdu_offsets)
+ bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
+ if (na->random_datain_seq_offsets)
+ bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
+ } else {
+ bl.data_direction = ISCSI_PDU_WRITE;
+ bl.immediate_data_length = immediate_data_length;
+ if (na->random_r2t_offsets)
+ bl.randomize |= RANDOM_R2T_OFFSETS;
+
+ if (!cmd->immediate_data && !cmd->unsolicited_data)
+ bl.type = PDULIST_NORMAL;
+ else if (cmd->immediate_data && !cmd->unsolicited_data)
+ bl.type = PDULIST_IMMEDIATE;
+ else if (!cmd->immediate_data && cmd->unsolicited_data)
+ bl.type = PDULIST_UNSOLICITED;
+ else if (cmd->immediate_data && cmd->unsolicited_data)
+ bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
+ }
+
+ return iscsit_do_build_list(cmd, &bl);
+}
+
+struct iscsi_seq *iscsit_get_seq_holder_for_datain(
+ struct iscsi_cmd *cmd,
+ u32 seq_send_order)
+{
+ u32 i;
+
+ for (i = 0; i < cmd->seq_count; i++)
+ if (cmd->seq_list[i].seq_send_order == seq_send_order)
+ return &cmd->seq_list[i];
+
+ return NULL;
+}
+
+struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
+{
+ u32 i;
+
+ if (!cmd->seq_list) {
+ pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ return NULL;
+ }
+
+ for (i = 0; i < cmd->seq_count; i++) {
+ if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
+ continue;
+ if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
+ cmd->seq_send_order++;
+ return &cmd->seq_list[i];
+ }
+ }
+
+ return NULL;
+}
+
+struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
+ struct iscsi_cmd *cmd,
+ u32 r2t_sn)
+{
+ struct iscsi_r2t *r2t;
+
+ spin_lock_bh(&cmd->r2t_lock);
+ list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
+ if (r2t->r2t_sn == r2t_sn) {
+ spin_unlock_bh(&cmd->r2t_lock);
+ return r2t;
+ }
+ }
+ spin_unlock_bh(&cmd->r2t_lock);
+
+ return NULL;
+}
+
+static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
+{
+ int ret;
+
+ /*
+ * This is the proper method of checking received CmdSN against
+ * ExpCmdSN and MaxCmdSN values, as well as accounting for out
+ * or order CmdSNs due to multiple connection sessions and/or
+ * CRC failures.
+ */
+ if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
+ pr_err("Received CmdSN: 0x%08x is greater than"
+ " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
+ sess->max_cmd_sn);
+ ret = CMDSN_ERROR_CANNOT_RECOVER;
+
+ } else if (cmdsn == sess->exp_cmd_sn) {
+ sess->exp_cmd_sn++;
+ pr_debug("Received CmdSN matches ExpCmdSN,"
+ " incremented ExpCmdSN to: 0x%08x\n",
+ sess->exp_cmd_sn);
+ ret = CMDSN_NORMAL_OPERATION;
+
+ } else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
+ pr_debug("Received CmdSN: 0x%08x is greater"
+ " than ExpCmdSN: 0x%08x, not acknowledging.\n",
+ cmdsn, sess->exp_cmd_sn);
+ ret = CMDSN_HIGHER_THAN_EXP;
+
+ } else {
+ pr_err("Received CmdSN: 0x%08x is less than"
+ " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
+ sess->exp_cmd_sn);
+ ret = CMDSN_LOWER_THAN_EXP;
+ }
+
+ return ret;
+}
+
+/*
+ * Commands may be received out of order if MC/S is in use.
+ * Ensure they are executed in CmdSN order.
+ */
+int iscsit_sequence_cmd(
+ struct iscsi_conn *conn,
+ struct iscsi_cmd *cmd,
+ u32 cmdsn)
+{
+ int ret;
+ int cmdsn_ret;
+
+ mutex_lock(&conn->sess->cmdsn_mutex);
+
+ cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, cmdsn);
+ switch (cmdsn_ret) {
+ case CMDSN_NORMAL_OPERATION:
+ ret = iscsit_execute_cmd(cmd, 0);
+ if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
+ iscsit_execute_ooo_cmdsns(conn->sess);
+ break;
+ case CMDSN_HIGHER_THAN_EXP:
+ ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, cmdsn);
+ break;
+ case CMDSN_LOWER_THAN_EXP:
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ ret = cmdsn_ret;
+ break;
+ default:
+ ret = cmdsn_ret;
+ break;
+ }
+ mutex_unlock(&conn->sess->cmdsn_mutex);
+
+ return ret;
+}
+
+int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct iscsi_data *hdr = (struct iscsi_data *) buf;
+ u32 payload_length = ntoh24(hdr->dlength);
+
+ if (conn->sess->sess_ops->InitialR2T) {
+ pr_err("Received unexpected unsolicited data"
+ " while InitialR2T=Yes, protocol error.\n");
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
+ return -1;
+ }
+
+ if ((cmd->first_burst_len + payload_length) >
+ conn->sess->sess_ops->FirstBurstLength) {
+ pr_err("Total %u bytes exceeds FirstBurstLength: %u"
+ " for this Unsolicited DataOut Burst.\n",
+ (cmd->first_burst_len + payload_length),
+ conn->sess->sess_ops->FirstBurstLength);
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return -1;
+ }
+
+ if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
+ return 0;
+
+ if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
+ ((cmd->first_burst_len + payload_length) !=
+ conn->sess->sess_ops->FirstBurstLength)) {
+ pr_err("Unsolicited non-immediate data received %u"
+ " does not equal FirstBurstLength: %u, and does"
+ " not equal ExpXferLen %u.\n",
+ (cmd->first_burst_len + payload_length),
+ conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_INCORRECT_AMOUNT_OF_DATA, 0);
+ return -1;
+ }
+ return 0;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_itt(
+ struct iscsi_conn *conn,
+ u32 init_task_tag)
+{
+ struct iscsi_cmd *cmd;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock_bh(&conn->cmd_lock);
+ return cmd;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
+ init_task_tag, conn->cid);
+ return NULL;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
+ struct iscsi_conn *conn,
+ u32 init_task_tag,
+ u32 length)
+{
+ struct iscsi_cmd *cmd;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock_bh(&conn->cmd_lock);
+ return cmd;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
+ " dumping payload\n", init_task_tag, conn->cid);
+ if (length)
+ iscsit_dump_data_payload(conn, length, 1);
+
+ return NULL;
+}
+
+struct iscsi_cmd *iscsit_find_cmd_from_ttt(
+ struct iscsi_conn *conn,
+ u32 targ_xfer_tag)
+{
+ struct iscsi_cmd *cmd = NULL;
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
+ if (cmd->targ_xfer_tag == targ_xfer_tag) {
+ spin_unlock_bh(&conn->cmd_lock);
+ return cmd;
+ }
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
+ targ_xfer_tag, conn->cid);
+ return NULL;
+}
+
+int iscsit_find_cmd_for_recovery(
+ struct iscsi_session *sess,
+ struct iscsi_cmd **cmd_ptr,
+ struct iscsi_conn_recovery **cr_ptr,
+ u32 init_task_tag)
+{
+ struct iscsi_cmd *cmd = NULL;
+ struct iscsi_conn_recovery *cr;
+ /*
+ * Scan through the inactive connection recovery list's command list.
+ * If init_task_tag matches the command is still alligent.
+ */
+ spin_lock(&sess->cr_i_lock);
+ list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_unlock(&sess->cr_i_lock);
+
+ *cr_ptr = cr;
+ *cmd_ptr = cmd;
+ return -2;
+ }
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&sess->cr_i_lock);
+ /*
+ * Scan through the active connection recovery list's command list.
+ * If init_task_tag matches the command is ready to be reassigned.
+ */
+ spin_lock(&sess->cr_a_lock);
+ list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
+ spin_lock(&cr->conn_recovery_cmd_lock);
+ list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
+ if (cmd->init_task_tag == init_task_tag) {
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ spin_unlock(&sess->cr_a_lock);
+
+ *cr_ptr = cr;
+ *cmd_ptr = cmd;
+ return 0;
+ }
+ }
+ spin_unlock(&cr->conn_recovery_cmd_lock);
+ }
+ spin_unlock(&sess->cr_a_lock);
+
+ return -1;
+}
+
+void iscsit_add_cmd_to_immediate_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ u8 state)
+{
+ struct iscsi_queue_req *qr;
+
+ qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
+ if (!qr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_queue_req\n");
+ return;
+ }
+ INIT_LIST_HEAD(&qr->qr_list);
+ qr->cmd = cmd;
+ qr->state = state;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ list_add_tail(&qr->qr_list, &conn->immed_queue_list);
+ atomic_inc(&cmd->immed_queue_count);
+ atomic_set(&conn->check_immediate_queue, 1);
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ wake_up_process(conn->thread_set->tx_thread);
+}
+
+struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ if (list_empty(&conn->immed_queue_list)) {
+ spin_unlock_bh(&conn->immed_queue_lock);
+ return NULL;
+ }
+ list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
+ break;
+
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->immed_queue_count);
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ return qr;
+}
+
+static void iscsit_remove_cmd_from_immediate_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr, *qr_tmp;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ if (!atomic_read(&cmd->immed_queue_count)) {
+ spin_unlock_bh(&conn->immed_queue_lock);
+ return;
+ }
+
+ list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
+ if (qr->cmd != cmd)
+ continue;
+
+ atomic_dec(&qr->cmd->immed_queue_count);
+ list_del(&qr->qr_list);
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ if (atomic_read(&cmd->immed_queue_count)) {
+ pr_err("ITT: 0x%08x immed_queue_count: %d\n",
+ cmd->init_task_tag,
+ atomic_read(&cmd->immed_queue_count));
+ }
+}
+
+void iscsit_add_cmd_to_response_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ u8 state)
+{
+ struct iscsi_queue_req *qr;
+
+ qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
+ if (!qr) {
+ pr_err("Unable to allocate memory for"
+ " struct iscsi_queue_req\n");
+ return;
+ }
+ INIT_LIST_HEAD(&qr->qr_list);
+ qr->cmd = cmd;
+ qr->state = state;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ list_add_tail(&qr->qr_list, &conn->response_queue_list);
+ atomic_inc(&cmd->response_queue_count);
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ wake_up_process(conn->thread_set->tx_thread);
+}
+
+struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ if (list_empty(&conn->response_queue_list)) {
+ spin_unlock_bh(&conn->response_queue_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(qr, &conn->response_queue_list, qr_list)
+ break;
+
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->response_queue_count);
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ return qr;
+}
+
+static void iscsit_remove_cmd_from_response_queue(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr, *qr_tmp;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ if (!atomic_read(&cmd->response_queue_count)) {
+ spin_unlock_bh(&conn->response_queue_lock);
+ return;
+ }
+
+ list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
+ qr_list) {
+ if (qr->cmd != cmd)
+ continue;
+
+ atomic_dec(&qr->cmd->response_queue_count);
+ list_del(&qr->qr_list);
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ if (atomic_read(&cmd->response_queue_count)) {
+ pr_err("ITT: 0x%08x response_queue_count: %d\n",
+ cmd->init_task_tag,
+ atomic_read(&cmd->response_queue_count));
+ }
+}
+
+void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_queue_req *qr, *qr_tmp;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->immed_queue_count);
+
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ spin_lock_bh(&conn->response_queue_lock);
+ list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
+ qr_list) {
+ list_del(&qr->qr_list);
+ if (qr->cmd)
+ atomic_dec(&qr->cmd->response_queue_count);
+
+ kmem_cache_free(lio_qr_cache, qr);
+ }
+ spin_unlock_bh(&conn->response_queue_lock);
+}
+
+void iscsit_release_cmd(struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ int i;
+
+ iscsit_free_r2ts_from_list(cmd);
+ iscsit_free_all_datain_reqs(cmd);
+
+ kfree(cmd->buf_ptr);
+ kfree(cmd->pdu_list);
+ kfree(cmd->seq_list);
+ kfree(cmd->tmr_req);
+ kfree(cmd->iov_data);
+
+ for (i = 0; i < cmd->t_mem_sg_nents; i++)
+ __free_page(sg_page(&cmd->t_mem_sg[i]));
+
+ kfree(cmd->t_mem_sg);
+
+ if (conn) {
+ iscsit_remove_cmd_from_immediate_queue(cmd, conn);
+ iscsit_remove_cmd_from_response_queue(cmd, conn);
+ }
+
+ kmem_cache_free(lio_cmd_cache, cmd);
+}
+
+int iscsit_check_session_usage_count(struct iscsi_session *sess)
+{
+ spin_lock_bh(&sess->session_usage_lock);
+ if (sess->session_usage_count != 0) {
+ sess->session_waiting_on_uc = 1;
+ spin_unlock_bh(&sess->session_usage_lock);
+ if (in_interrupt())
+ return 2;
+
+ wait_for_completion(&sess->session_waiting_on_uc_comp);
+ return 1;
+ }
+ spin_unlock_bh(&sess->session_usage_lock);
+
+ return 0;
+}
+
+void iscsit_dec_session_usage_count(struct iscsi_session *sess)
+{
+ spin_lock_bh(&sess->session_usage_lock);
+ sess->session_usage_count--;
+
+ if (!sess->session_usage_count && sess->session_waiting_on_uc)
+ complete(&sess->session_waiting_on_uc_comp);
+
+ spin_unlock_bh(&sess->session_usage_lock);
+}
+
+void iscsit_inc_session_usage_count(struct iscsi_session *sess)
+{
+ spin_lock_bh(&sess->session_usage_lock);
+ sess->session_usage_count++;
+ spin_unlock_bh(&sess->session_usage_lock);
+}
+
+/*
+ * Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker
+ * array counts needed for sync and steering.
+ */
+static int iscsit_determine_sync_and_steering_counts(
+ struct iscsi_conn *conn,
+ struct iscsi_data_count *count)
+{
+ u32 length = count->data_length;
+ u32 marker, markint;
+
+ count->sync_and_steering = 1;
+
+ marker = (count->type == ISCSI_RX_DATA) ?
+ conn->of_marker : conn->if_marker;
+ markint = (count->type == ISCSI_RX_DATA) ?
+ (conn->conn_ops->OFMarkInt * 4) :
+ (conn->conn_ops->IFMarkInt * 4);
+ count->ss_iov_count = count->iov_count;
+
+ while (length > 0) {
+ if (length >= marker) {
+ count->ss_iov_count += 3;
+ count->ss_marker_count += 2;
+
+ length -= marker;
+ marker = markint;
+ } else
+ length = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Setup conn->if_marker and conn->of_marker values based upon
+ * the initial marker-less interval. (see iSCSI v19 A.2)
+ */
+int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
+{
+ int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
+ /*
+ * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
+ */
+ u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
+ u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
+
+ if (conn->conn_ops->OFMarker) {
+ /*
+ * Account for the first Login Command received not
+ * via iscsi_recv_msg().
+ */
+ conn->of_marker += ISCSI_HDR_LEN;
+ if (conn->of_marker <= OFMarkInt) {
+ conn->of_marker = (OFMarkInt - conn->of_marker);
+ } else {
+ login_ofmarker_count = (conn->of_marker / OFMarkInt);
+ next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
+ (login_ofmarker_count * MARKER_SIZE);
+ conn->of_marker = (next_marker - conn->of_marker);
+ }
+ conn->of_marker_offset = 0;
+ pr_debug("Setting OFMarker value to %u based on Initial"
+ " Markerless Interval.\n", conn->of_marker);
+ }
+
+ if (conn->conn_ops->IFMarker) {
+ if (conn->if_marker <= IFMarkInt) {
+ conn->if_marker = (IFMarkInt - conn->if_marker);
+ } else {
+ login_ifmarker_count = (conn->if_marker / IFMarkInt);
+ next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
+ (login_ifmarker_count * MARKER_SIZE);
+ conn->if_marker = (next_marker - conn->if_marker);
+ }
+ pr_debug("Setting IFMarker value to %u based on Initial"
+ " Markerless Interval.\n", conn->if_marker);
+ }
+
+ return 0;
+}
+
+struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
+{
+ struct iscsi_conn *conn;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ if ((conn->cid == cid) &&
+ (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
+ iscsit_inc_conn_usage_count(conn);
+ spin_unlock_bh(&sess->conn_lock);
+ return conn;
+ }
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ return NULL;
+}
+
+struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
+{
+ struct iscsi_conn *conn;
+
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+ if (conn->cid == cid) {
+ iscsit_inc_conn_usage_count(conn);
+ spin_lock(&conn->state_lock);
+ atomic_set(&conn->connection_wait_rcfr, 1);
+ spin_unlock(&conn->state_lock);
+ spin_unlock_bh(&sess->conn_lock);
+ return conn;
+ }
+ }
+ spin_unlock_bh(&sess->conn_lock);
+
+ return NULL;
+}
+
+void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->conn_usage_lock);
+ if (conn->conn_usage_count != 0) {
+ conn->conn_waiting_on_uc = 1;
+ spin_unlock_bh(&conn->conn_usage_lock);
+
+ wait_for_completion(&conn->conn_waiting_on_uc_comp);
+ return;
+ }
+ spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->conn_usage_lock);
+ conn->conn_usage_count--;
+
+ if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
+ complete(&conn->conn_waiting_on_uc_comp);
+
+ spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->conn_usage_lock);
+ conn->conn_usage_count++;
+ spin_unlock_bh(&conn->conn_usage_lock);
+}
+
+static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
+{
+ u8 state;
+ struct iscsi_cmd *cmd;
+
+ cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
+ if (!cmd)
+ return -1;
+
+ cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
+ state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
+ ISTATE_SEND_NOPIN_NO_RESPONSE;
+ cmd->init_task_tag = 0xFFFFFFFF;
+ spin_lock_bh(&conn->sess->ttt_lock);
+ cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
+ 0xFFFFFFFF;
+ if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
+ cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
+ spin_unlock_bh(&conn->sess->ttt_lock);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ if (want_response)
+ iscsit_start_nopin_response_timer(conn);
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
+
+ return 0;
+}
+
+static void iscsit_handle_nopin_response_timeout(unsigned long data)
+{
+ struct iscsi_conn *conn = (struct iscsi_conn *) data;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ iscsit_dec_conn_usage_count(conn);
+ return;
+ }
+
+ pr_debug("Did not receive response to NOPIN on CID: %hu on"
+ " SID: %u, failing connection.\n", conn->cid,
+ conn->sess->sid);
+ conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ {
+ struct iscsi_portal_group *tpg = conn->sess->tpg;
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ if (tiqn) {
+ spin_lock_bh(&tiqn->sess_err_stats.lock);
+ strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+ (void *)conn->sess->sess_ops->InitiatorName);
+ tiqn->sess_err_stats.last_sess_failure_type =
+ ISCSI_SESS_ERR_CXN_TIMEOUT;
+ tiqn->sess_err_stats.cxn_timeout_errors++;
+ conn->sess->conn_timeout_errors++;
+ spin_unlock_bh(&tiqn->sess_err_stats.lock);
+ }
+ }
+
+ iscsit_cause_connection_reinstatement(conn, 0);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+
+ mod_timer(&conn->nopin_response_timer,
+ (get_jiffies_64() + na->nopin_response_timeout * HZ));
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+/*
+ * Called with conn->nopin_timer_lock held.
+ */
+void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+
+ init_timer(&conn->nopin_response_timer);
+ conn->nopin_response_timer.expires =
+ (get_jiffies_64() + na->nopin_response_timeout * HZ);
+ conn->nopin_response_timer.data = (unsigned long)conn;
+ conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
+ conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
+ conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&conn->nopin_response_timer);
+
+ pr_debug("Started NOPIN Response Timer on CID: %d to %u"
+ " seconds\n", conn->cid, na->nopin_response_timeout);
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+ conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ del_timer_sync(&conn->nopin_response_timer);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+static void iscsit_handle_nopin_timeout(unsigned long data)
+{
+ struct iscsi_conn *conn = (struct iscsi_conn *) data;
+
+ iscsit_inc_conn_usage_count(conn);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ iscsit_dec_conn_usage_count(conn);
+ return;
+ }
+ conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ iscsit_add_nopin(conn, 1);
+ iscsit_dec_conn_usage_count(conn);
+}
+
+/*
+ * Called with conn->nopin_timer_lock held.
+ */
+void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+ /*
+ * NOPIN timeout is disabled.
+ */
+ if (!na->nopin_timeout)
+ return;
+
+ if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
+ return;
+
+ init_timer(&conn->nopin_timer);
+ conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
+ conn->nopin_timer.data = (unsigned long)conn;
+ conn->nopin_timer.function = iscsit_handle_nopin_timeout;
+ conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
+ conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&conn->nopin_timer);
+
+ pr_debug("Started NOPIN Timer on CID: %d at %u second"
+ " interval\n", conn->cid, na->nopin_timeout);
+}
+
+void iscsit_start_nopin_timer(struct iscsi_conn *conn)
+{
+ struct iscsi_session *sess = conn->sess;
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+ /*
+ * NOPIN timeout is disabled..
+ */
+ if (!na->nopin_timeout)
+ return;
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+
+ init_timer(&conn->nopin_timer);
+ conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
+ conn->nopin_timer.data = (unsigned long)conn;
+ conn->nopin_timer.function = iscsit_handle_nopin_timeout;
+ conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
+ conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
+ add_timer(&conn->nopin_timer);
+
+ pr_debug("Started NOPIN Timer on CID: %d at %u second"
+ " interval\n", conn->cid, na->nopin_timeout);
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
+{
+ spin_lock_bh(&conn->nopin_timer_lock);
+ if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
+ spin_unlock_bh(&conn->nopin_timer_lock);
+ return;
+ }
+ conn->nopin_timer_flags |= ISCSI_TF_STOP;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+
+ del_timer_sync(&conn->nopin_timer);
+
+ spin_lock_bh(&conn->nopin_timer_lock);
+ conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
+ spin_unlock_bh(&conn->nopin_timer_lock);
+}
+
+int iscsit_send_tx_data(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn,
+ int use_misc)
+{
+ int tx_sent, tx_size;
+ u32 iov_count;
+ struct kvec *iov;
+
+send_data:
+ tx_size = cmd->tx_size;
+
+ if (!use_misc) {
+ iov = &cmd->iov_data[0];
+ iov_count = cmd->iov_data_count;
+ } else {
+ iov = &cmd->iov_misc[0];
+ iov_count = cmd->iov_misc_count;
+ }
+
+ tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
+ if (tx_size != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_data;
+ } else
+ return -1;
+ }
+ cmd->tx_size = 0;
+
+ return 0;
+}
+
+int iscsit_fe_sendpage_sg(
+ struct iscsi_cmd *cmd,
+ struct iscsi_conn *conn)
+{
+ struct scatterlist *sg = cmd->first_data_sg;
+ struct kvec iov;
+ u32 tx_hdr_size, data_len;
+ u32 offset = cmd->first_data_sg_off;
+ int tx_sent;
+
+send_hdr:
+ tx_hdr_size = ISCSI_HDR_LEN;
+ if (conn->conn_ops->HeaderDigest)
+ tx_hdr_size += ISCSI_CRC_LEN;
+
+ iov.iov_base = cmd->pdu;
+ iov.iov_len = tx_hdr_size;
+
+ tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
+ if (tx_hdr_size != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_hdr;
+ }
+ return -1;
+ }
+
+ data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
+ if (conn->conn_ops->DataDigest)
+ data_len -= ISCSI_CRC_LEN;
+
+ /*
+ * Perform sendpage() for each page in the scatterlist
+ */
+ while (data_len) {
+ u32 space = (sg->length - offset);
+ u32 sub_len = min_t(u32, data_len, space);
+send_pg:
+ tx_sent = conn->sock->ops->sendpage(conn->sock,
+ sg_page(sg), sg->offset + offset, sub_len, 0);
+ if (tx_sent != sub_len) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tcp_sendpage() returned"
+ " -EAGAIN\n");
+ goto send_pg;
+ }
+
+ pr_err("tcp_sendpage() failure: %d\n",
+ tx_sent);
+ return -1;
+ }
+
+ data_len -= sub_len;
+ offset = 0;
+ sg = sg_next(sg);
+ }
+
+send_padding:
+ if (cmd->padding) {
+ struct kvec *iov_p =
+ &cmd->iov_data[cmd->iov_data_count-1];
+
+ tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
+ if (cmd->padding != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_padding;
+ }
+ return -1;
+ }
+ }
+
+send_datacrc:
+ if (conn->conn_ops->DataDigest) {
+ struct kvec *iov_d =
+ &cmd->iov_data[cmd->iov_data_count];
+
+ tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
+ if (ISCSI_CRC_LEN != tx_sent) {
+ if (tx_sent == -EAGAIN) {
+ pr_err("tx_data() returned -EAGAIN\n");
+ goto send_datacrc;
+ }
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
+ * back to the Initiator when an expection condition occurs with the
+ * errors set in status_class and status_detail.
+ *
+ * Parameters: iSCSI Connection, Status Class, Status Detail.
+ * Returns: 0 on success, -1 on error.
+ */
+int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
+{
+ u8 iscsi_hdr[ISCSI_HDR_LEN];
+ int err;
+ struct kvec iov;
+ struct iscsi_login_rsp *hdr;
+
+ iscsit_collect_login_stats(conn, status_class, status_detail);
+
+ memset(&iov, 0, sizeof(struct kvec));
+ memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
+
+ hdr = (struct iscsi_login_rsp *)&iscsi_hdr;
+ hdr->opcode = ISCSI_OP_LOGIN_RSP;
+ hdr->status_class = status_class;
+ hdr->status_detail = status_detail;
+ hdr->itt = cpu_to_be32(conn->login_itt);
+
+ iov.iov_base = &iscsi_hdr;
+ iov.iov_len = ISCSI_HDR_LEN;
+
+ PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
+
+ err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
+ if (err != ISCSI_HDR_LEN) {
+ pr_err("tx_data returned less than expected\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void iscsit_print_session_params(struct iscsi_session *sess)
+{
+ struct iscsi_conn *conn;
+
+ pr_debug("-----------------------------[Session Params for"
+ " SID: %u]-----------------------------\n", sess->sid);
+ spin_lock_bh(&sess->conn_lock);
+ list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
+ iscsi_dump_conn_ops(conn->conn_ops);
+ spin_unlock_bh(&sess->conn_lock);
+
+ iscsi_dump_sess_ops(sess->sess_ops);
+}
+
+static int iscsit_do_rx_data(
+ struct iscsi_conn *conn,
+ struct iscsi_data_count *count)
+{
+ int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
+ u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0;
+ struct kvec iov[count->ss_iov_count], *iov_p;
+ struct msghdr msg;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ memset(&msg, 0, sizeof(struct msghdr));
+
+ if (count->sync_and_steering) {
+ int size = 0;
+ u32 i, orig_iov_count = 0;
+ u32 orig_iov_len = 0, orig_iov_loc = 0;
+ u32 iov_count = 0, per_iov_bytes = 0;
+ u32 *rx_marker, old_rx_marker = 0;
+ struct kvec *iov_record;
+
+ memset(&rx_marker_val, 0,
+ count->ss_marker_count * sizeof(u32));
+ memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
+
+ iov_record = count->iov;
+ orig_iov_count = count->iov_count;
+ rx_marker = &conn->of_marker;
+
+ i = 0;
+ size = data;
+ orig_iov_len = iov_record[orig_iov_loc].iov_len;
+ while (size > 0) {
+ pr_debug("rx_data: #1 orig_iov_len %u,"
+ " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
+ pr_debug("rx_data: #2 rx_marker %u, size"
+ " %u\n", *rx_marker, size);
+
+ if (orig_iov_len >= *rx_marker) {
+ iov[iov_count].iov_len = *rx_marker;
+ iov[iov_count++].iov_base =
+ (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &rx_marker_val[rx_marker_iov++];
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &rx_marker_val[rx_marker_iov++];
+ old_rx_marker = *rx_marker;
+
+ /*
+ * OFMarkInt is in 32-bit words.
+ */
+ *rx_marker = (conn->conn_ops->OFMarkInt * 4);
+ size -= old_rx_marker;
+ orig_iov_len -= old_rx_marker;
+ per_iov_bytes += old_rx_marker;
+
+ pr_debug("rx_data: #3 new_rx_marker"
+ " %u, size %u\n", *rx_marker, size);
+ } else {
+ iov[iov_count].iov_len = orig_iov_len;
+ iov[iov_count++].iov_base =
+ (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ per_iov_bytes = 0;
+ *rx_marker -= orig_iov_len;
+ size -= orig_iov_len;
+
+ if (size)
+ orig_iov_len =
+ iov_record[++orig_iov_loc].iov_len;
+
+ pr_debug("rx_data: #4 new_rx_marker"
+ " %u, size %u\n", *rx_marker, size);
+ }
+ }
+ data += (rx_marker_iov * (MARKER_SIZE / 2));
+
+ iov_p = &iov[0];
+ iov_len = iov_count;
+
+ if (iov_count > count->ss_iov_count) {
+ pr_err("iov_count: %d, count->ss_iov_count:"
+ " %d\n", iov_count, count->ss_iov_count);
+ return -1;
+ }
+ if (rx_marker_iov > count->ss_marker_count) {
+ pr_err("rx_marker_iov: %d, count->ss_marker"
+ "_count: %d\n", rx_marker_iov,
+ count->ss_marker_count);
+ return -1;
+ }
+ } else {
+ iov_p = count->iov;
+ iov_len = count->iov_count;
+ }
+
+ while (total_rx < data) {
+ rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
+ (data - total_rx), MSG_WAITALL);
+ if (rx_loop <= 0) {
+ pr_debug("rx_loop: %d total_rx: %d\n",
+ rx_loop, total_rx);
+ return rx_loop;
+ }
+ total_rx += rx_loop;
+ pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
+ rx_loop, total_rx, data);
+ }
+
+ if (count->sync_and_steering) {
+ int j;
+ for (j = 0; j < rx_marker_iov; j++) {
+ pr_debug("rx_data: #5 j: %d, offset: %d\n",
+ j, rx_marker_val[j]);
+ conn->of_marker_offset = rx_marker_val[j];
+ }
+ total_rx -= (rx_marker_iov * (MARKER_SIZE / 2));
+ }
+
+ return total_rx;
+}
+
+static int iscsit_do_tx_data(
+ struct iscsi_conn *conn,
+ struct iscsi_data_count *count)
+{
+ int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
+ u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0;
+ struct kvec iov[count->ss_iov_count], *iov_p;
+ struct msghdr msg;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ if (data <= 0) {
+ pr_err("Data length is: %d\n", data);
+ return -1;
+ }
+
+ memset(&msg, 0, sizeof(struct msghdr));
+
+ if (count->sync_and_steering) {
+ int size = 0;
+ u32 i, orig_iov_count = 0;
+ u32 orig_iov_len = 0, orig_iov_loc = 0;
+ u32 iov_count = 0, per_iov_bytes = 0;
+ u32 *tx_marker, old_tx_marker = 0;
+ struct kvec *iov_record;
+
+ memset(&tx_marker_val, 0,
+ count->ss_marker_count * sizeof(u32));
+ memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec));
+
+ iov_record = count->iov;
+ orig_iov_count = count->iov_count;
+ tx_marker = &conn->if_marker;
+
+ i = 0;
+ size = data;
+ orig_iov_len = iov_record[orig_iov_loc].iov_len;
+ while (size > 0) {
+ pr_debug("tx_data: #1 orig_iov_len %u,"
+ " orig_iov_loc %u\n", orig_iov_len, orig_iov_loc);
+ pr_debug("tx_data: #2 tx_marker %u, size"
+ " %u\n", *tx_marker, size);
+
+ if (orig_iov_len >= *tx_marker) {
+ iov[iov_count].iov_len = *tx_marker;
+ iov[iov_count++].iov_base =
+ (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ tx_marker_val[tx_marker_iov] =
+ (size - *tx_marker);
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &tx_marker_val[tx_marker_iov++];
+ iov[iov_count].iov_len = (MARKER_SIZE / 2);
+ iov[iov_count++].iov_base =
+ &tx_marker_val[tx_marker_iov++];
+ old_tx_marker = *tx_marker;
+
+ /*
+ * IFMarkInt is in 32-bit words.
+ */
+ *tx_marker = (conn->conn_ops->IFMarkInt * 4);
+ size -= old_tx_marker;
+ orig_iov_len -= old_tx_marker;
+ per_iov_bytes += old_tx_marker;
+
+ pr_debug("tx_data: #3 new_tx_marker"
+ " %u, size %u\n", *tx_marker, size);
+ pr_debug("tx_data: #4 offset %u\n",
+ tx_marker_val[tx_marker_iov-1]);
+ } else {
+ iov[iov_count].iov_len = orig_iov_len;
+ iov[iov_count++].iov_base
+ = (iov_record[orig_iov_loc].iov_base +
+ per_iov_bytes);
+
+ per_iov_bytes = 0;
+ *tx_marker -= orig_iov_len;
+ size -= orig_iov_len;
+
+ if (size)
+ orig_iov_len =
+ iov_record[++orig_iov_loc].iov_len;
+
+ pr_debug("tx_data: #5 new_tx_marker"
+ " %u, size %u\n", *tx_marker, size);
+ }
+ }
+
+ data += (tx_marker_iov * (MARKER_SIZE / 2));
+
+ iov_p = &iov[0];
+ iov_len = iov_count;
+
+ if (iov_count > count->ss_iov_count) {
+ pr_err("iov_count: %d, count->ss_iov_count:"
+ " %d\n", iov_count, count->ss_iov_count);
+ return -1;
+ }
+ if (tx_marker_iov > count->ss_marker_count) {
+ pr_err("tx_marker_iov: %d, count->ss_marker"
+ "_count: %d\n", tx_marker_iov,
+ count->ss_marker_count);
+ return -1;
+ }
+ } else {
+ iov_p = count->iov;
+ iov_len = count->iov_count;
+ }
+
+ while (total_tx < data) {
+ tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+ (data - total_tx));
+ if (tx_loop <= 0) {
+ pr_debug("tx_loop: %d total_tx %d\n",
+ tx_loop, total_tx);
+ return tx_loop;
+ }
+ total_tx += tx_loop;
+ pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
+ tx_loop, total_tx, data);
+ }
+
+ if (count->sync_and_steering)
+ total_tx -= (tx_marker_iov * (MARKER_SIZE / 2));
+
+ return total_tx;
+}
+
+int rx_data(
+ struct iscsi_conn *conn,
+ struct kvec *iov,
+ int iov_count,
+ int data)
+{
+ struct iscsi_data_count c;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ memset(&c, 0, sizeof(struct iscsi_data_count));
+ c.iov = iov;
+ c.iov_count = iov_count;
+ c.data_length = data;
+ c.type = ISCSI_RX_DATA;
+
+ if (conn->conn_ops->OFMarker &&
+ (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
+ if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
+ return -1;
+ }
+
+ return iscsit_do_rx_data(conn, &c);
+}
+
+int tx_data(
+ struct iscsi_conn *conn,
+ struct kvec *iov,
+ int iov_count,
+ int data)
+{
+ struct iscsi_data_count c;
+
+ if (!conn || !conn->sock || !conn->conn_ops)
+ return -1;
+
+ memset(&c, 0, sizeof(struct iscsi_data_count));
+ c.iov = iov;
+ c.iov_count = iov_count;
+ c.data_length = data;
+ c.type = ISCSI_TX_DATA;
+
+ if (conn->conn_ops->IFMarker &&
+ (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) {
+ if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0)
+ return -1;
+ }
+
+ return iscsit_do_tx_data(conn, &c);
+}
+
+void iscsit_collect_login_stats(
+ struct iscsi_conn *conn,
+ u8 status_class,
+ u8 status_detail)
+{
+ struct iscsi_param *intrname = NULL;
+ struct iscsi_tiqn *tiqn;
+ struct iscsi_login_stats *ls;
+
+ tiqn = iscsit_snmp_get_tiqn(conn);
+ if (!tiqn)
+ return;
+
+ ls = &tiqn->login_stats;
+
+ spin_lock(&ls->lock);
+ if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
+ ((get_jiffies_64() - ls->last_fail_time) < 10)) {
+ /* We already have the failure info for this login */
+ spin_unlock(&ls->lock);
+ return;
+ }
+
+ if (status_class == ISCSI_STATUS_CLS_SUCCESS)
+ ls->accepts++;
+ else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
+ ls->redirects++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
+ } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
+ (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
+ ls->authenticate_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHENTICATE;
+ } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
+ (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
+ ls->authorize_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
+ } else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
+ (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
+ ls->negotiate_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
+ } else {
+ ls->other_fails++;
+ ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
+ }
+
+ /* Save initiator name, ip address and time, if it is a failed login */
+ if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
+ if (conn->param_list)
+ intrname = iscsi_find_param_from_key(INITIATORNAME,
+ conn->param_list);
+ strcpy(ls->last_intr_fail_name,
+ (intrname ? intrname->value : "Unknown"));
+
+ ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
+ snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
+ "%s", conn->login_ip);
+ ls->last_fail_time = get_jiffies_64();
+ }
+
+ spin_unlock(&ls->lock);
+}
+
+struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
+{
+ struct iscsi_portal_group *tpg;
+
+ if (!conn || !conn->sess)
+ return NULL;
+
+ tpg = conn->sess->tpg;
+ if (!tpg)
+ return NULL;
+
+ if (!tpg->tpg_tiqn)
+ return NULL;
+
+ return tpg->tpg_tiqn;
+}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
new file mode 100644
index 00000000000..2cd49d607bd
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -0,0 +1,60 @@
+#ifndef ISCSI_TARGET_UTIL_H
+#define ISCSI_TARGET_UTIL_H
+
+#define MARKER_SIZE 8
+
+extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
+extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
+extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
+extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
+extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int);
+extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8);
+extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
+extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
+int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, u32 cmdsn);
+extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, u32);
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
+ u32, u32);
+extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
+extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
+ struct iscsi_conn_recovery **, u32);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
+extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
+extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
+extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
+extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern int iscsit_check_session_usage_count(struct iscsi_session *);
+extern void iscsit_dec_session_usage_count(struct iscsi_session *);
+extern void iscsit_inc_session_usage_count(struct iscsi_session *);
+extern int iscsit_set_sync_and_steering_values(struct iscsi_conn *);
+extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
+extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
+extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_dec_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_inc_conn_usage_count(struct iscsi_conn *);
+extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_start_nopin_response_timer(struct iscsi_conn *);
+extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *);
+extern void __iscsit_start_nopin_timer(struct iscsi_conn *);
+extern void iscsit_start_nopin_timer(struct iscsi_conn *);
+extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
+extern int iscsit_send_tx_data(struct iscsi_cmd *, struct iscsi_conn *, int);
+extern int iscsit_fe_sendpage_sg(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_tx_login_rsp(struct iscsi_conn *, u8, u8);
+extern void iscsit_print_session_params(struct iscsi_session *);
+extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
+extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
+extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
+extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
+extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
+extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
+extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
+
+#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
index 57dcbc2d711..abe8ecbcdf0 100644
--- a/drivers/target/loopback/Kconfig
+++ b/drivers/target/loopback/Kconfig
@@ -3,9 +3,3 @@ config LOOPBACK_TARGET
help
Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
fabric loopback module.
-
-config LOOPBACK_TARGET_CDB_DEBUG
- bool "TCM loopback fabric module CDB debug code"
- depends on LOOPBACK_TARGET
- help
- Say Y here to enable the TCM loopback fabric module CDB debug code
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index dee2a2c909f..aa2d6799723 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -31,7 +31,6 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
@@ -80,7 +79,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
if (!tl_cmd) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
+ pr_err("Unable to allocate struct tcm_loop_cmd\n");
set_host_byte(sc, DID_ERROR);
return NULL;
}
@@ -118,17 +117,16 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
* Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
*/
if (scsi_bidi_cmnd(sc))
- T_TASK(se_cmd)->t_tasks_bidi = 1;
+ se_cmd->t_tasks_bidi = 1;
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
- if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) {
+ if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
set_host_byte(sc, DID_NO_CONNECT);
return NULL;
}
- transport_device_setup_cmd(se_cmd);
return se_cmd;
}
@@ -143,17 +141,17 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- void *mem_ptr, *mem_bidi_ptr = NULL;
- u32 sg_no_bidi = 0;
+ struct scatterlist *sgl_bidi = NULL;
+ u32 sgl_bidi_count = 0;
int ret;
/*
* Allocate the necessary tasks to complete the received CDB+data
*/
- ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd);
- if (ret == -1) {
+ ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
+ if (ret == -ENOMEM) {
/* Out of Resources */
return PYX_TRANSPORT_LU_COMM_FAILURE;
- } else if (ret == -2) {
+ } else if (ret == -EINVAL) {
/*
* Handle case for SAM_STAT_RESERVATION_CONFLICT
*/
@@ -165,35 +163,21 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
*/
return PYX_TRANSPORT_USE_SENSE_REASON;
}
+
/*
- * Setup the struct scatterlist memory from the received
- * struct scsi_cmnd.
+ * For BIDI commands, pass in the extra READ buffer
+ * to transport_generic_map_mem_to_cmd() below..
*/
- if (scsi_sg_count(sc)) {
- se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM;
- mem_ptr = (void *)scsi_sglist(sc);
- /*
- * For BIDI commands, pass in the extra READ buffer
- * to transport_generic_map_mem_to_cmd() below..
- */
- if (T_TASK(se_cmd)->t_tasks_bidi) {
- struct scsi_data_buffer *sdb = scsi_in(sc);
+ if (se_cmd->t_tasks_bidi) {
+ struct scsi_data_buffer *sdb = scsi_in(sc);
- mem_bidi_ptr = (void *)sdb->table.sgl;
- sg_no_bidi = sdb->table.nents;
- }
- } else {
- /*
- * Used for DMA_NONE
- */
- mem_ptr = NULL;
+ sgl_bidi = sdb->table.sgl;
+ sgl_bidi_count = sdb->table.nents;
}
- /*
- * Map the SG memory into struct se_mem->page linked list using the same
- * physical memory at sg->page_link.
- */
- ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr,
- scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi);
+
+ /* Tell the core about our preallocated memory */
+ ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+ scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
if (ret < 0)
return PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -216,13 +200,10 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
* Release the struct se_cmd, which will make a callback to release
* struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
*/
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
}
-/*
- * Called from struct target_core_fabric_ops->release_cmd_to_pool()
- */
-static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd)
+static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
@@ -300,7 +281,7 @@ static int tcm_loop_queuecommand(
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
- TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
+ pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
" scsi_buf_len: %u\n", sc->device->host->host_no,
sc->device->id, sc->device->channel, sc->device->lun,
sc->cmnd[0], scsi_bufflen(sc));
@@ -350,7 +331,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
*/
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
- printk(KERN_ERR "Unable to perform device reset without"
+ pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
@@ -363,13 +344,13 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) {
- printk(KERN_ERR "Unable to allocate memory for tl_cmd\n");
+ pr_err("Unable to allocate memory for tl_cmd\n");
return FAILED;
}
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
if (!tl_tmr) {
- printk(KERN_ERR "Unable to allocate memory for tl_tmr\n");
+ pr_err("Unable to allocate memory for tl_tmr\n");
goto release;
}
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
@@ -384,14 +365,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
/*
* Allocate the LUN_RESET TMR
*/
- se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
+ se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
TMR_LUN_RESET);
- if (!se_cmd->se_tmr_req)
+ if (IS_ERR(se_cmd->se_tmr_req))
goto release;
/*
* Locate the underlying TCM struct se_lun from sc->device->lun
*/
- if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0)
+ if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
goto release;
/*
* Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
@@ -407,7 +388,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
SUCCESS : FAILED;
release:
if (se_cmd)
- transport_generic_free_cmd(se_cmd, 1, 1, 0);
+ transport_generic_free_cmd(se_cmd, 1, 0);
else
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
kfree(tl_tmr);
@@ -454,7 +435,7 @@ static int tcm_loop_driver_probe(struct device *dev)
sh = scsi_host_alloc(&tcm_loop_driver_template,
sizeof(struct tcm_loop_hba));
if (!sh) {
- printk(KERN_ERR "Unable to allocate struct scsi_host\n");
+ pr_err("Unable to allocate struct scsi_host\n");
return -ENODEV;
}
tl_hba->sh = sh;
@@ -473,7 +454,7 @@ static int tcm_loop_driver_probe(struct device *dev)
error = scsi_add_host(sh, &tl_hba->dev);
if (error) {
- printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
+ pr_err("%s: scsi_add_host failed\n", __func__);
scsi_host_put(sh);
return -ENODEV;
}
@@ -514,7 +495,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
ret = device_register(&tl_hba->dev);
if (ret) {
- printk(KERN_ERR "device_register() failed for"
+ pr_err("device_register() failed for"
" tl_hba->dev: %d\n", ret);
return -ENODEV;
}
@@ -532,24 +513,24 @@ static int tcm_loop_alloc_core_bus(void)
tcm_loop_primary = root_device_register("tcm_loop_0");
if (IS_ERR(tcm_loop_primary)) {
- printk(KERN_ERR "Unable to allocate tcm_loop_primary\n");
+ pr_err("Unable to allocate tcm_loop_primary\n");
return PTR_ERR(tcm_loop_primary);
}
ret = bus_register(&tcm_loop_lld_bus);
if (ret) {
- printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n");
+ pr_err("bus_register() failed for tcm_loop_lld_bus\n");
goto dev_unreg;
}
ret = driver_register(&tcm_loop_driverfs);
if (ret) {
- printk(KERN_ERR "driver_register() failed for"
+ pr_err("driver_register() failed for"
"tcm_loop_driverfs\n");
goto bus_unreg;
}
- printk(KERN_INFO "Initialized TCM Loop Core Bus\n");
+ pr_debug("Initialized TCM Loop Core Bus\n");
return ret;
bus_unreg:
@@ -565,7 +546,7 @@ static void tcm_loop_release_core_bus(void)
bus_unregister(&tcm_loop_lld_bus);
root_device_unregister(tcm_loop_primary);
- printk(KERN_INFO "Releasing TCM Loop Core BUS\n");
+ pr_debug("Releasing TCM Loop Core BUS\n");
}
static char *tcm_loop_get_fabric_name(void)
@@ -593,7 +574,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_fabric_proto_ident(se_tpg);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -649,7 +630,7 @@ static u32 tcm_loop_get_pr_transport_id(
return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -679,7 +660,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -713,7 +694,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -762,7 +743,7 @@ static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
if (!tl_nacl) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n");
+ pr_err("Unable to allocate struct tcm_loop_nacl\n");
return NULL;
}
@@ -784,16 +765,6 @@ static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
return 1;
}
-static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd)
-{
- /*
- * Since TCM_loop is already passing struct scatterlist data from
- * struct scsi_cmnd, no more Linux/SCSI failure dependent state need
- * to be handled here.
- */
- return;
-}
-
static int tcm_loop_is_state_remove(struct se_cmd *se_cmd)
{
/*
@@ -882,7 +853,7 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
+ pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
sc->result = SAM_STAT_GOOD;
@@ -897,14 +868,14 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p"
+ pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
- memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer,
+ memcpy(sc->sense_buffer, se_cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
sc->result = SAM_STAT_CHECK_CONDITION;
set_driver_byte(sc, DRIVER_SENSE);
@@ -972,7 +943,7 @@ static int tcm_loop_port_link(
*/
scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n");
+ pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
return 0;
}
@@ -990,7 +961,7 @@ static void tcm_loop_port_unlink(
sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
se_lun->unpacked_lun);
if (!sd) {
- printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:"
+ pr_err("Unable to locate struct scsi_device for %d:%d:"
"%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
return;
}
@@ -1003,7 +974,7 @@ static void tcm_loop_port_unlink(
atomic_dec(&tl_tpg->tl_tpg_port_count);
smp_mb__after_atomic_dec();
- printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n");
+ pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
}
/* End items for tcm_loop_port_cit */
@@ -1017,24 +988,27 @@ static int tcm_loop_make_nexus(
struct se_portal_group *se_tpg;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
struct tcm_loop_nexus *tl_nexus;
+ int ret = -ENOMEM;
if (tl_tpg->tl_hba->tl_nexus) {
- printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
+ pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
return -EEXIST;
}
se_tpg = &tl_tpg->tl_se_tpg;
tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
if (!tl_nexus) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n");
+ pr_err("Unable to allocate struct tcm_loop_nexus\n");
return -ENOMEM;
}
/*
* Initialize the struct se_session pointer
*/
tl_nexus->se_sess = transport_init_session();
- if (!tl_nexus->se_sess)
+ if (IS_ERR(tl_nexus->se_sess)) {
+ ret = PTR_ERR(tl_nexus->se_sess);
goto out;
+ }
/*
* Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
@@ -1051,16 +1025,16 @@ static int tcm_loop_make_nexus(
* transport_register_session()
*/
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
- tl_nexus->se_sess, (void *)tl_nexus);
+ tl_nexus->se_sess, tl_nexus);
tl_tpg->tl_hba->tl_nexus = tl_nexus;
- printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+ pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
name);
return 0;
out:
kfree(tl_nexus);
- return -ENOMEM;
+ return ret;
}
static int tcm_loop_drop_nexus(
@@ -1079,13 +1053,13 @@ static int tcm_loop_drop_nexus(
return -ENODEV;
if (atomic_read(&tpg->tl_tpg_port_count)) {
- printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with"
+ pr_err("Unable to remove TCM_Loop I_T Nexus with"
" active TPG port count: %d\n",
atomic_read(&tpg->tl_tpg_port_count));
return -EPERM;
}
- printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+ pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
tl_nexus->se_sess->se_node_acl->initiatorname);
/*
@@ -1140,8 +1114,8 @@ static ssize_t tcm_loop_tpg_store_nexus(
* the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
* tcm_loop_make_nexus()
*/
- if (strlen(page) > TL_WWN_ADDR_LEN) {
- printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
+ if (strlen(page) >= TL_WWN_ADDR_LEN) {
+ pr_err("Emulated NAA Sas Address: %s, exceeds"
" max: %d\n", page, TL_WWN_ADDR_LEN);
return -EINVAL;
}
@@ -1150,7 +1124,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "naa.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
- printk(KERN_ERR "Passed SAS Initiator Port %s does not"
+ pr_err("Passed SAS Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@@ -1161,7 +1135,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "fc.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
- printk(KERN_ERR "Passed FCP Initiator Port %s does not"
+ pr_err("Passed FCP Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@@ -1172,7 +1146,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "iqn.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
- printk(KERN_ERR "Passed iSCSI Initiator Port %s does not"
+ pr_err("Passed iSCSI Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@@ -1180,7 +1154,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
port_ptr = &i_port[0];
goto check_newline;
}
- printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:"
+ pr_err("Unable to locate prefix for emulated Initiator Port:"
" %s\n", i_port);
return -EINVAL;
/*
@@ -1220,15 +1194,15 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
tpgt_str = strstr(name, "tpgt_");
if (!tpgt_str) {
- printk(KERN_ERR "Unable to locate \"tpgt_#\" directory"
+ pr_err("Unable to locate \"tpgt_#\" directory"
" group\n");
return ERR_PTR(-EINVAL);
}
tpgt_str += 5; /* Skip ahead of "tpgt_" */
tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
- if (tpgt > TL_TPGS_PER_HBA) {
- printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
+ if (tpgt >= TL_TPGS_PER_HBA) {
+ pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
" %u\n", tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
@@ -1239,12 +1213,12 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
* Register the tl_tpg as a emulated SAS TCM Target Endpoint
*/
ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
- wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg,
+ wwn, &tl_tpg->tl_se_tpg, tl_tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return ERR_PTR(-ENOMEM);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s"
+ pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
@@ -1271,7 +1245,7 @@ void tcm_loop_drop_naa_tpg(
*/
core_tpg_deregister(se_tpg);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s"
+ pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
}
@@ -1292,7 +1266,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
if (!tl_hba) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n");
+ pr_err("Unable to allocate struct tcm_loop_hba\n");
return ERR_PTR(-ENOMEM);
}
/*
@@ -1311,22 +1285,21 @@ struct se_wwn *tcm_loop_make_scsi_hba(
goto check_len;
}
ptr = strstr(name, "iqn.");
- if (ptr) {
- tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
- goto check_len;
+ if (!ptr) {
+ pr_err("Unable to locate prefix for emulated Target "
+ "Port: %s\n", name);
+ ret = -EINVAL;
+ goto out;
}
-
- printk(KERN_ERR "Unable to locate prefix for emulated Target Port:"
- " %s\n", name);
- return ERR_PTR(-EINVAL);
+ tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
check_len:
- if (strlen(name) > TL_WWN_ADDR_LEN) {
- printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
+ if (strlen(name) >= TL_WWN_ADDR_LEN) {
+ pr_err("Emulated NAA %s Address: %s, exceeds"
" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
TL_WWN_ADDR_LEN);
- kfree(tl_hba);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto out;
}
snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
@@ -1341,7 +1314,7 @@ check_len:
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
- printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target"
+ pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
" %s Address: %s at Linux/SCSI Host ID: %d\n",
tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
@@ -1364,7 +1337,7 @@ void tcm_loop_drop_scsi_hba(
*/
device_unregister(&tl_hba->dev);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target"
+ pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
" SAS Address: %s at Linux/SCSI Host ID: %d\n",
config_item_name(&wwn->wwn_group.cg_item), host_no);
}
@@ -1399,9 +1372,9 @@ static int tcm_loop_register_configfs(void)
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
- if (!fabric) {
- printk(KERN_ERR "tcm_loop_register_configfs() failed!\n");
- return -1;
+ if (IS_ERR(fabric)) {
+ pr_err("tcm_loop_register_configfs() failed!\n");
+ return PTR_ERR(fabric);
}
/*
* Setup the fabric API of function pointers used by target_core_mod
@@ -1433,19 +1406,11 @@ static int tcm_loop_register_configfs(void)
&tcm_loop_tpg_release_fabric_acl;
fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
/*
- * Since tcm_loop is mapping physical memory from Linux/SCSI
- * struct scatterlist arrays for each struct scsi_cmnd I/O,
- * we do not need TCM to allocate a iovec array for
- * virtual memory address mappings
- */
- fabric->tf_ops.alloc_cmd_iovecs = NULL;
- /*
* Used for setting up remaining TCM resources in process context
*/
fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map;
fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
- fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd;
- fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd;
+ fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
fabric->tf_ops.close_session = &tcm_loop_close_session;
fabric->tf_ops.stop_session = &tcm_loop_stop_session;
@@ -1462,7 +1427,6 @@ static int tcm_loop_register_configfs(void)
&tcm_loop_set_default_node_attributes;
fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
- fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure;
fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
fabric->tf_ops.queue_status = &tcm_loop_queue_status;
fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
@@ -1500,7 +1464,7 @@ static int tcm_loop_register_configfs(void)
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
- printk(KERN_ERR "target_fabric_configfs_register() for"
+ pr_err("target_fabric_configfs_register() for"
" TCM_Loop failed!\n");
target_fabric_configfs_free(fabric);
return -1;
@@ -1509,7 +1473,7 @@ static int tcm_loop_register_configfs(void)
* Setup our local pointer to *fabric.
*/
tcm_loop_fabric_configfs = fabric;
- printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->"
+ pr_debug("TCM_LOOP[0] - Set fabric ->"
" tcm_loop_fabric_configfs\n");
return 0;
}
@@ -1521,7 +1485,7 @@ static void tcm_loop_deregister_configfs(void)
target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
tcm_loop_fabric_configfs = NULL;
- printk(KERN_INFO "TCM_LOOP[0] - Cleared"
+ pr_debug("TCM_LOOP[0] - Cleared"
" tcm_loop_fabric_configfs\n");
}
@@ -1534,7 +1498,7 @@ static int __init tcm_loop_fabric_init(void)
__alignof__(struct tcm_loop_cmd),
0, NULL);
if (!tcm_loop_cmd_cache) {
- printk(KERN_ERR "kmem_cache_create() for"
+ pr_debug("kmem_cache_create() for"
" tcm_loop_cmd_cache failed\n");
return -ENOMEM;
}
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 7e9f7ab4554..6b76c7a22bb 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -16,12 +16,6 @@
*/
#define TL_SCSI_MAX_CMD_LEN 32
-#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG
-# define TL_CDB_DEBUG(x...) printk(KERN_INFO x)
-#else
-# define TL_CDB_DEBUG(x...)
-#endif
-
struct tcm_loop_cmd {
/* State of Linux/SCSI CDB+Data descriptor */
u32 sc_cmd_state;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 47abb42d9c3..98c98a3a025 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -46,6 +46,14 @@ static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port, int explict, int offline);
+static u16 alua_lu_gps_counter;
+static u32 alua_lu_gps_count;
+
+static DEFINE_SPINLOCK(lu_gps_lock);
+static LIST_HEAD(lu_gps_list);
+
+struct t10_alua_lu_gp *default_lu_gp;
+
/*
* REPORT_TARGET_PORT_GROUPS
*
@@ -53,16 +61,18 @@ static int core_alua_set_tg_pt_secondary_state(
*/
int core_emulate_report_target_port_groups(struct se_cmd *cmd)
{
- struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+ struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
Target port group descriptor */
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ buf = transport_kmap_first_data_page(cmd);
+
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
/*
* PREF: Preferred target port bit, determine if this
@@ -124,7 +134,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
*/
@@ -133,6 +143,8 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
buf[2] = ((rd_len >> 8) & 0xff);
buf[3] = (rd_len & 0xff);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -143,45 +155,53 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
*/
int core_emulate_set_target_port_groups(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
- struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep;
- struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl;
+ struct se_device *dev = cmd->se_dev;
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ struct se_port *port, *l_port = cmd->se_lun->lun_sep;
+ struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
- unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
+ unsigned char *buf;
+ unsigned char *ptr;
u32 len = 4; /* Skip over RESERVED area in header */
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
- if (!(l_port))
+ if (!l_port)
return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ buf = transport_kmap_first_data_page(cmd);
+
/*
* Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
*/
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
- if (!(l_tg_pt_gp_mem)) {
- printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ if (!l_tg_pt_gp_mem) {
+ pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
+ rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
- if (!(l_tg_pt_gp)) {
+ if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+ rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (!(rc)) {
- printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
+ if (!rc) {
+ pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ goto out;
}
+ ptr = &buf[4]; /* Skip over RESERVED area in header */
+
while (len < cmd->data_length) {
alua_access_state = (ptr[0] & 0x0f);
/*
@@ -201,7 +221,8 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
}
rc = -1;
/*
@@ -224,11 +245,11 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* Locate the matching target port group ID from
* the global tg_pt_gp list
*/
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &T10_ALUA(su_dev)->tg_pt_gps_list,
+ &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
@@ -236,24 +257,26 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
rc = core_alua_do_port_transition(tg_pt_gp,
dev, l_port, nacl,
alua_access_state, 1);
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
break;
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* If not matching target port group ID can be located
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
- if (rc != 0)
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ if (rc != 0) {
+ rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
} else {
/*
* Extact the RELATIVE TARGET PORT IDENTIFIER to identify
@@ -287,14 +310,19 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* be located, throw an exception with ASCQ:
* INVALID_PARAMETER_LIST
*/
- if (rc != 0)
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ if (rc != 0) {
+ rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
}
ptr += 4;
len += 4;
}
+out:
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -464,13 +492,13 @@ static int core_alua_state_check(
unsigned char *cdb,
u8 *alua_ascq)
{
- struct se_lun *lun = SE_LUN(cmd);
+ struct se_lun *lun = cmd->se_lun;
struct se_port *port = lun->lun_sep;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs;
- if (!(port))
+ if (!port)
return 0;
/*
* First, check for a struct se_port specific secondary ALUA target port
@@ -478,7 +506,7 @@ static int core_alua_state_check(
*/
if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
- printk(KERN_INFO "ALUA: Got secondary offline status for local"
+ pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
return 1;
@@ -520,9 +548,9 @@ static int core_alua_state_check(
*/
case ALUA_ACCESS_STATE_OFFLINE:
default:
- printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
+ pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
- return -1;
+ return -EINVAL;
}
return 0;
@@ -552,8 +580,8 @@ static int core_alua_check_transition(int state, int *primary)
*primary = 0;
break;
default:
- printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
- return -1;
+ pr_err("Unknown ALUA access state: 0x%02x\n", state);
+ return -EINVAL;
}
return 0;
@@ -610,7 +638,7 @@ int core_alua_check_nonop_delay(
* The ALUA Active/NonOptimized access state delay can be disabled
* in via configfs with a value of zero
*/
- if (!(cmd->alua_nonop_delay))
+ if (!cmd->alua_nonop_delay)
return 0;
/*
* struct se_cmd->alua_nonop_delay gets set by a target port group
@@ -639,7 +667,7 @@ static int core_alua_write_tpg_metadata(
file = filp_open(path, flags, 0600);
if (IS_ERR(file) || !file || !file->f_dentry) {
- printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
+ pr_err("filp_open(%s) for ALUA metadata failed\n",
path);
return -ENODEV;
}
@@ -653,7 +681,7 @@ static int core_alua_write_tpg_metadata(
set_fs(old_fs);
if (ret < 0) {
- printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
+ pr_err("Error writing ALUA metadata file: %s\n", path);
filp_close(file, NULL);
return -EIO;
}
@@ -750,7 +778,7 @@ static int core_alua_do_transition_tg_pt(
* se_deve->se_lun_acl pointer may be NULL for a
* entry created without explict Node+MappedLUN ACLs
*/
- if (!(lacl))
+ if (!lacl)
continue;
if (explict &&
@@ -792,7 +820,7 @@ static int core_alua_do_transition_tg_pt(
*/
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
- printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+ pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" from primary access state %s to %s\n", (explict) ? "explict" :
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
@@ -823,8 +851,8 @@ int core_alua_do_port_transition(
return -EINVAL;
md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
- if (!(md_buf)) {
- printk("Unable to allocate buf for ALUA metadata\n");
+ if (!md_buf) {
+ pr_err("Unable to allocate buf for ALUA metadata\n");
return -ENOMEM;
}
@@ -839,7 +867,7 @@ int core_alua_do_port_transition(
* we only do transition on the passed *l_tp_pt_gp, and not
* on all of the matching target port groups IDs in default_lu_gp.
*/
- if (!(lu_gp->lu_gp_id)) {
+ if (!lu_gp->lu_gp_id) {
/*
* core_alua_do_transition_tg_pt() will always return
* success.
@@ -866,12 +894,12 @@ int core_alua_do_port_transition(
smp_mb__after_atomic_inc();
spin_unlock(&lu_gp->lu_gp_lock);
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &T10_ALUA(su_dev)->tg_pt_gps_list,
+ &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
/*
* If the target behavior port asymmetric access state
@@ -893,7 +921,7 @@ int core_alua_do_port_transition(
}
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* core_alua_do_transition_tg_pt() will always return
* success.
@@ -901,11 +929,11 @@ int core_alua_do_port_transition(
core_alua_do_transition_tg_pt(tg_pt_gp, port,
nacl, md_buf, new_state, explict);
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
spin_lock(&lu_gp->lu_gp_lock);
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
@@ -913,7 +941,7 @@ int core_alua_do_port_transition(
}
spin_unlock(&lu_gp->lu_gp_lock);
- printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
+ pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
" Group IDs: %hu %s transition to primary state: %s\n",
config_item_name(&lu_gp->lu_gp_group.cg_item),
l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
@@ -942,11 +970,11 @@ static int core_alua_update_tpg_secondary_metadata(
memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
- TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg));
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
- if (TPG_TFO(se_tpg)->tpg_get_tag != NULL)
+ if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
- TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
@@ -954,7 +982,7 @@ static int core_alua_update_tpg_secondary_metadata(
port->sep_tg_pt_secondary_stat);
snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
- TPG_TFO(se_tpg)->get_fabric_name(), wwn,
+ se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
port->sep_lun->unpacked_lun);
return core_alua_write_tpg_metadata(path, md_buf, len);
@@ -973,11 +1001,11 @@ static int core_alua_set_tg_pt_secondary_state(
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (!(tg_pt_gp)) {
+ if (!tg_pt_gp) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_ERR "Unable to complete secondary state"
+ pr_err("Unable to complete secondary state"
" transition\n");
- return -1;
+ return -EINVAL;
}
trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
/*
@@ -994,7 +1022,7 @@ static int core_alua_set_tg_pt_secondary_state(
ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
- printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+ pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" to secondary access state: %s\n", (explict) ? "explict" :
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
@@ -1012,10 +1040,10 @@ static int core_alua_set_tg_pt_secondary_state(
*/
if (port->sep_tg_pt_secondary_write_md) {
md_buf = kzalloc(md_buf_len, GFP_KERNEL);
- if (!(md_buf)) {
- printk(KERN_ERR "Unable to allocate md_buf for"
+ if (!md_buf) {
+ pr_err("Unable to allocate md_buf for"
" secondary ALUA access metadata\n");
- return -1;
+ return -ENOMEM;
}
mutex_lock(&port->sep_tg_pt_md_mutex);
core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
@@ -1034,19 +1062,19 @@ core_alua_allocate_lu_gp(const char *name, int def_group)
struct t10_alua_lu_gp *lu_gp;
lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
- if (!(lu_gp)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
+ if (!lu_gp) {
+ pr_err("Unable to allocate struct t10_alua_lu_gp\n");
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&lu_gp->lu_gp_list);
+ INIT_LIST_HEAD(&lu_gp->lu_gp_node);
INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
spin_lock_init(&lu_gp->lu_gp_lock);
atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
if (def_group) {
- lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;
+ lu_gp->lu_gp_id = alua_lu_gps_counter++;
lu_gp->lu_gp_valid_id = 1;
- se_global->alua_lu_gps_count++;
+ alua_lu_gps_count++;
}
return lu_gp;
@@ -1060,41 +1088,41 @@ int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
* The lu_gp->lu_gp_id may only be set once..
*/
if (lu_gp->lu_gp_valid_id) {
- printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
+ pr_warn("ALUA LU Group already has a valid ID,"
" ignoring request\n");
- return -1;
+ return -EINVAL;
}
- spin_lock(&se_global->lu_gps_lock);
- if (se_global->alua_lu_gps_count == 0x0000ffff) {
- printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:"
+ spin_lock(&lu_gps_lock);
+ if (alua_lu_gps_count == 0x0000ffff) {
+ pr_err("Maximum ALUA alua_lu_gps_count:"
" 0x0000ffff reached\n");
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
- return -1;
+ return -ENOSPC;
}
again:
lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
- se_global->alua_lu_gps_counter++;
+ alua_lu_gps_counter++;
- list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) {
+ list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
- if (!(lu_gp_id))
+ if (!lu_gp_id)
goto again;
- printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
+ pr_warn("ALUA Logical Unit Group ID: %hu"
" already exists, ignoring request\n",
lu_gp_id);
- spin_unlock(&se_global->lu_gps_lock);
- return -1;
+ spin_unlock(&lu_gps_lock);
+ return -EINVAL;
}
}
lu_gp->lu_gp_id = lu_gp_id_tmp;
lu_gp->lu_gp_valid_id = 1;
- list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list);
- se_global->alua_lu_gps_count++;
- spin_unlock(&se_global->lu_gps_lock);
+ list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
+ alua_lu_gps_count++;
+ spin_unlock(&lu_gps_lock);
return 0;
}
@@ -1105,8 +1133,8 @@ core_alua_allocate_lu_gp_mem(struct se_device *dev)
struct t10_alua_lu_gp_member *lu_gp_mem;
lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
- if (!(lu_gp_mem)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
+ if (!lu_gp_mem) {
+ pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
@@ -1130,11 +1158,11 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* no associations can be made while we are releasing
* struct t10_alua_lu_gp.
*/
- spin_lock(&se_global->lu_gps_lock);
+ spin_lock(&lu_gps_lock);
atomic_set(&lu_gp->lu_gp_shutdown, 1);
- list_del(&lu_gp->lu_gp_list);
- se_global->alua_lu_gps_count--;
- spin_unlock(&se_global->lu_gps_lock);
+ list_del(&lu_gp->lu_gp_node);
+ alua_lu_gps_count--;
+ spin_unlock(&lu_gps_lock);
/*
* Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
* in target_core_configfs.c:target_core_store_alua_lu_gp() to be
@@ -1165,9 +1193,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* we want to re-assocate a given lu_gp_mem with default_lu_gp.
*/
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
- if (lu_gp != se_global->default_lu_gp)
+ if (lu_gp != default_lu_gp)
__core_alua_attach_lu_gp_mem(lu_gp_mem,
- se_global->default_lu_gp);
+ default_lu_gp);
else
lu_gp_mem->lu_gp = NULL;
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
@@ -1182,7 +1210,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
void core_alua_free_lu_gp_mem(struct se_device *dev)
{
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
@@ -1190,7 +1218,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev)
return;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem))
+ if (!lu_gp_mem)
return;
while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
@@ -1198,7 +1226,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev)
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if ((lu_gp)) {
+ if (lu_gp) {
spin_lock(&lu_gp->lu_gp_lock);
if (lu_gp_mem->lu_gp_assoc) {
list_del(&lu_gp_mem->lu_gp_mem_list);
@@ -1218,27 +1246,27 @@ struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
struct t10_alua_lu_gp *lu_gp;
struct config_item *ci;
- spin_lock(&se_global->lu_gps_lock);
- list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) {
- if (!(lu_gp->lu_gp_valid_id))
+ spin_lock(&lu_gps_lock);
+ list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
+ if (!lu_gp->lu_gp_valid_id)
continue;
ci = &lu_gp->lu_gp_group.cg_item;
- if (!(strcmp(config_item_name(ci), name))) {
+ if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&lu_gp->lu_gp_ref_cnt);
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
return lu_gp;
}
}
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
return NULL;
}
void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
{
- spin_lock(&se_global->lu_gps_lock);
+ spin_lock(&lu_gps_lock);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
}
/*
@@ -1279,8 +1307,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp;
tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
- if (!(tg_pt_gp)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
+ if (!tg_pt_gp) {
+ pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
return NULL;
}
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
@@ -1304,14 +1332,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
if (def_group) {
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
tg_pt_gp->tg_pt_gp_id =
- T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+ su_dev->t10_alua.alua_tg_pt_gps_counter++;
tg_pt_gp->tg_pt_gp_valid_id = 1;
- T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+ su_dev->t10_alua.alua_tg_pt_gps_count++;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &T10_ALUA(su_dev)->tg_pt_gps_list);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ &su_dev->t10_alua.tg_pt_gps_list);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
}
return tg_pt_gp;
@@ -1328,42 +1356,42 @@ int core_alua_set_tg_pt_gp_id(
* The tg_pt_gp->tg_pt_gp_id may only be set once..
*/
if (tg_pt_gp->tg_pt_gp_valid_id) {
- printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
+ pr_warn("ALUA TG PT Group already has a valid ID,"
" ignoring request\n");
- return -1;
+ return -EINVAL;
}
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) {
- printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
+ pr_err("Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n");
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
- return -1;
+ return -ENOSPC;
}
again:
tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
- T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+ su_dev->t10_alua.alua_tg_pt_gps_counter++;
- list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
- if (!(tg_pt_gp_id))
+ if (!tg_pt_gp_id)
goto again;
- printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
+ pr_err("ALUA Target Port Group ID: %hu already"
" exists, ignoring request\n", tg_pt_gp_id);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- return -1;
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ return -EINVAL;
}
}
tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
tg_pt_gp->tg_pt_gp_valid_id = 1;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &T10_ALUA(su_dev)->tg_pt_gps_list);
- T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ &su_dev->t10_alua.tg_pt_gps_list);
+ su_dev->t10_alua.alua_tg_pt_gps_count++;
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
return 0;
}
@@ -1375,8 +1403,8 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
GFP_KERNEL);
- if (!(tg_pt_gp_mem)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
+ if (!tg_pt_gp_mem) {
+ pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@@ -1403,10 +1431,10 @@ void core_alua_free_tg_pt_gp(
* no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_del(&tg_pt_gp->tg_pt_gp_list);
- T10_ALUA(su_dev)->alua_tg_pt_gps_counter--;
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ su_dev->t10_alua.alua_tg_pt_gps_counter--;
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
@@ -1438,9 +1466,9 @@ void core_alua_free_tg_pt_gp(
* default_tg_pt_gp.
*/
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) {
+ if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- T10_ALUA(su_dev)->default_tg_pt_gp);
+ su_dev->t10_alua.default_tg_pt_gp);
} else
tg_pt_gp_mem->tg_pt_gp = NULL;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1455,7 +1483,7 @@ void core_alua_free_tg_pt_gp(
void core_alua_free_tg_pt_gp_mem(struct se_port *port)
{
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
@@ -1463,7 +1491,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
return;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem))
+ if (!tg_pt_gp_mem)
return;
while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
@@ -1471,7 +1499,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if ((tg_pt_gp)) {
+ if (tg_pt_gp) {
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
if (tg_pt_gp_mem->tg_pt_gp_assoc) {
list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@@ -1493,19 +1521,19 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_item *ci;
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
- if (!(strcmp(config_item_name(ci), name))) {
+ if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
return tg_pt_gp;
}
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
return NULL;
}
@@ -1515,9 +1543,9 @@ static void core_alua_put_tg_pt_gp_from_name(
{
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
}
/*
@@ -1555,7 +1583,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
{
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
struct config_item *tg_pt_ci;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0;
@@ -1564,12 +1592,12 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
return len;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem))
+ if (!tg_pt_gp_mem)
return len;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if ((tg_pt_gp)) {
+ if (tg_pt_gp) {
tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
" %hu\nTG Port Primary Access State: %s\nTG Port "
@@ -1605,16 +1633,16 @@ ssize_t core_alua_store_tg_pt_gp_info(
tpg = port->sep_tpg;
lun = port->sep_lun;
- if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
- printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
- " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
+ pr_warn("SPC3_ALUA_EMULATED not enabled for"
+ " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
return -EINVAL;
}
if (count > TG_PT_GROUP_NAME_BUF) {
- printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
+ pr_err("ALUA Target Port Group alias too large!\n");
return -EINVAL;
}
memset(buf, 0, TG_PT_GROUP_NAME_BUF);
@@ -1631,31 +1659,31 @@ ssize_t core_alua_store_tg_pt_gp_info(
*/
tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
strstrip(buf));
- if (!(tg_pt_gp_new))
+ if (!tg_pt_gp_new)
return -ENODEV;
}
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem)) {
+ if (!tg_pt_gp_mem) {
if (tg_pt_gp_new)
core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
- printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
+ pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
return -EINVAL;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if ((tg_pt_gp)) {
+ if (tg_pt_gp) {
/*
* Clearing an existing tg_pt_gp association, and replacing
* with the default_tg_pt_gp.
*/
- if (!(tg_pt_gp_new)) {
- printk(KERN_INFO "Target_Core_ConfigFS: Moving"
+ if (!tg_pt_gp_new) {
+ pr_debug("Target_Core_ConfigFS: Moving"
" %s/tpgt_%hu/%s from ALUA Target Port Group:"
" alua/%s, ID: %hu back to"
" default_tg_pt_gp\n",
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item),
config_item_name(
&tg_pt_gp->tg_pt_gp_group.cg_item),
@@ -1663,7 +1691,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- T10_ALUA(su_dev)->default_tg_pt_gp);
+ su_dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
return count;
@@ -1679,10 +1707,10 @@ ssize_t core_alua_store_tg_pt_gp_info(
*/
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
+ pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
" Target Port Group: alua/%s, ID: %hu\n", (move) ?
- "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item),
config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
tg_pt_gp_new->tg_pt_gp_id);
@@ -1716,11 +1744,11 @@ ssize_t core_alua_store_access_type(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_access_type\n");
+ pr_err("Unable to extract alua_access_type\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
- printk(KERN_ERR "Illegal value for alua_access_type:"
+ pr_err("Illegal value for alua_access_type:"
" %lu\n", tmp);
return -EINVAL;
}
@@ -1754,11 +1782,11 @@ ssize_t core_alua_store_nonop_delay_msecs(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
+ pr_err("Unable to extract nonop_delay_msecs\n");
return -EINVAL;
}
if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
- printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
+ pr_err("Passed nonop_delay_msecs: %lu, exceeds"
" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_NONOP_DELAY_MSECS);
return -EINVAL;
@@ -1785,11 +1813,11 @@ ssize_t core_alua_store_trans_delay_msecs(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
+ pr_err("Unable to extract trans_delay_msecs\n");
return -EINVAL;
}
if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
- printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
+ pr_err("Passed trans_delay_msecs: %lu, exceeds"
" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_TRANS_DELAY_MSECS);
return -EINVAL;
@@ -1816,11 +1844,11 @@ ssize_t core_alua_store_preferred_bit(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract preferred ALUA value\n");
+ pr_err("Unable to extract preferred ALUA value\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
+ pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_pref = (int)tmp;
@@ -1830,7 +1858,7 @@ ssize_t core_alua_store_preferred_bit(
ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
{
- if (!(lun->lun_sep))
+ if (!lun->lun_sep)
return -ENODEV;
return sprintf(page, "%d\n",
@@ -1846,22 +1874,22 @@ ssize_t core_alua_store_offline_bit(
unsigned long tmp;
int ret;
- if (!(lun->lun_sep))
+ if (!lun->lun_sep)
return -ENODEV;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
+ pr_err("Unable to extract alua_tg_pt_offline value\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
+ pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
tmp);
return -EINVAL;
}
tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem)) {
- printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
+ if (!tg_pt_gp_mem) {
+ pr_err("Unable to locate *tg_pt_gp_mem\n");
return -EINVAL;
}
@@ -1890,13 +1918,13 @@ ssize_t core_alua_store_secondary_status(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
+ pr_err("Unable to extract alua_tg_pt_status\n");
return -EINVAL;
}
if ((tmp != ALUA_STATUS_NONE) &&
(tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
(tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
- printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
+ pr_err("Illegal value for alua_tg_pt_status: %lu\n",
tmp);
return -EINVAL;
}
@@ -1923,11 +1951,11 @@ ssize_t core_alua_store_secondary_write_metadata(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
+ pr_err("Unable to extract alua_tg_pt_write_md\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
+ pr_err("Illegal value for alua_tg_pt_write_md:"
" %lu\n", tmp);
return -EINVAL;
}
@@ -1939,7 +1967,7 @@ ssize_t core_alua_store_secondary_write_metadata(
int core_setup_alua(struct se_device *dev, int force_pt)
{
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_lu_gp_member *lu_gp_mem;
/*
* If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
@@ -1947,44 +1975,44 @@ int core_setup_alua(struct se_device *dev, int force_pt)
* cause a problem because libata and some SATA RAID HBAs appear
* under Linux/SCSI, but emulate SCSI logic themselves.
*/
- if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) {
+ if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
alua->alua_type = SPC_ALUA_PASSTHROUGH;
alua->alua_state_check = &core_alua_state_check_nop;
- printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
- " emulation\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
+ " emulation\n", dev->transport->name);
return 0;
}
/*
* If SPC-3 or above is reported by real or emulated struct se_device,
* use emulated ALUA.
*/
- if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
- printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
- " device\n", TRANSPORT(dev)->name);
+ if (dev->transport->get_device_rev(dev) >= SCSI_3) {
+ pr_debug("%s: Enabling ALUA Emulation for SPC-3"
+ " device\n", dev->transport->name);
/*
* Associate this struct se_device with the default ALUA
* LUN Group.
*/
lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
- if (IS_ERR(lu_gp_mem) || !lu_gp_mem)
- return -1;
+ if (IS_ERR(lu_gp_mem))
+ return PTR_ERR(lu_gp_mem);
alua->alua_type = SPC3_ALUA_EMULATED;
alua->alua_state_check = &core_alua_state_check;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
__core_alua_attach_lu_gp_mem(lu_gp_mem,
- se_global->default_lu_gp);
+ default_lu_gp);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
- printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
+ pr_debug("%s: Adding to default ALUA LU Group:"
" core/alua/lu_gps/default_lu_gp\n",
- TRANSPORT(dev)->name);
+ dev->transport->name);
} else {
alua->alua_type = SPC2_ALUA_DISABLED;
alua->alua_state_check = &core_alua_state_check_nop;
- printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
- " device\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Disabling ALUA Emulation for SPC-2"
+ " device\n", dev->transport->name);
}
return 0;
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 7f19c8b7b84..8ae09a1bdf7 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -23,6 +23,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -64,20 +65,22 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf)
static int
target_emulate_inquiry_std(struct se_cmd *cmd)
{
- struct se_lun *lun = SE_LUN(cmd);
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
+ struct se_lun *lun = cmd->se_lun;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
/*
* Make sure we at least have 6 bytes of INQUIRY response
* payload going back for EVPD=0
*/
if (cmd->data_length < 6) {
- printk(KERN_ERR "SCSI Inquiry payload length: %u"
+ pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=0\n", cmd->data_length);
- return -1;
+ return -EINVAL;
}
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = dev->transport->get_device_type(dev);
if (buf[0] == TYPE_TAPE)
buf[1] = 0x80;
@@ -86,12 +89,12 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
/*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
- if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
+ if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
target_fill_alua_data(lun->lun_sep, buf);
if (cmd->data_length < 8) {
buf[4] = 1; /* Set additional length to 1 */
- return 0;
+ goto out;
}
buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
@@ -102,40 +105,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
*/
if (cmd->data_length < 36) {
buf[4] = 3; /* Set additional length to 3 */
- return 0;
+ goto out;
}
snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
snprintf((unsigned char *)&buf[16], 16, "%s",
- &DEV_T10_WWN(dev)->model[0]);
+ &dev->se_sub_dev->t10_wwn.model[0]);
snprintf((unsigned char *)&buf[32], 4, "%s",
- &DEV_T10_WWN(dev)->revision[0]);
+ &dev->se_sub_dev->t10_wwn.revision[0]);
buf[4] = 31; /* Set additional length to 31 */
- return 0;
-}
-
-/* supported vital product data pages */
-static int
-target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
-{
- buf[1] = 0x00;
- if (cmd->data_length < 8)
- return 0;
-
- buf[4] = 0x0;
- /*
- * Only report the INQUIRY EVPD=1 pages after a valid NAA
- * Registered Extended LUN WWN has been set via ConfigFS
- * during device creation/restart.
- */
- if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
- buf[3] = 3;
- buf[5] = 0x80;
- buf[6] = 0x83;
- buf[7] = 0x86;
- }
+out:
+ transport_kunmap_first_data_page(cmd);
return 0;
}
@@ -143,16 +124,15 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
u16 len = 0;
- buf[1] = 0x80;
if (dev->se_sub_dev->su_dev_flags &
SDF_EMULATED_VPD_UNIT_SERIAL) {
u32 unit_serial_len;
unit_serial_len =
- strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+ strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
if (((len + 4) + unit_serial_len) > cmd->data_length) {
@@ -162,7 +142,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
len += sprintf((unsigned char *)&buf[4], "%s",
- &DEV_T10_WWN(dev)->unit_serial[0]);
+ &dev->se_sub_dev->t10_wwn.unit_serial[0]);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
@@ -176,21 +156,18 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
- struct se_lun *lun = SE_LUN(cmd);
+ struct se_device *dev = cmd->se_dev;
+ struct se_lun *lun = cmd->se_lun;
struct se_port *port = NULL;
struct se_portal_group *tpg = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- unsigned char binary, binary_new;
- unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
+ unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
u32 prod_len;
u32 unit_serial_len, off = 0;
- int i;
u16 len = 0, id_len;
- buf[1] = 0x83;
off = 4;
/*
@@ -210,11 +187,11 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
/* CODE SET == Binary */
buf[off++] = 0x1;
- /* Set ASSOICATION == addressed logical unit: 0)b */
+ /* Set ASSOCIATION == addressed logical unit: 0)b */
buf[off] = 0x00;
/* Identifier/Designator type == NAA identifier */
- buf[off++] = 0x3;
+ buf[off++] |= 0x3;
off++;
/* Identifier/Designator length */
@@ -237,16 +214,9 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* VENDOR_SPECIFIC_IDENTIFIER and
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
*/
- binary = transport_asciihex_to_binaryhex(
- &DEV_T10_WWN(dev)->unit_serial[0]);
- buf[off++] |= (binary & 0xf0) >> 4;
- for (i = 0; i < 24; i += 2) {
- binary_new = transport_asciihex_to_binaryhex(
- &DEV_T10_WWN(dev)->unit_serial[i+2]);
- buf[off] = (binary & 0x0f) << 4;
- buf[off++] |= (binary_new & 0xf0) >> 4;
- binary = binary_new;
- }
+ buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
+ hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
+
len = 20;
off = (len + 4);
@@ -263,7 +233,7 @@ check_t10_vend_desc:
if (dev->se_sub_dev->su_dev_flags &
SDF_EMULATED_VPD_UNIT_SERIAL) {
unit_serial_len =
- strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+ strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
if ((len + (id_len + 4) +
@@ -274,7 +244,7 @@ check_t10_vend_desc:
}
id_len += sprintf((unsigned char *)&buf[off+12],
"%s:%s", prod,
- &DEV_T10_WWN(dev)->unit_serial[0]);
+ &dev->se_sub_dev->t10_wwn.unit_serial[0]);
}
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
@@ -312,10 +282,10 @@ check_port:
goto check_tpgi;
}
buf[off] =
- (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOICATION == target port: 01b */
+ /* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == Relative target port identifer */
buf[off++] |= 0x4;
@@ -335,7 +305,7 @@ check_port:
* section 7.5.1 Table 362
*/
check_tpgi:
- if (T10_ALUA(dev->se_sub_dev)->alua_type !=
+ if (dev->se_sub_dev->t10_alua.alua_type !=
SPC3_ALUA_EMULATED)
goto check_scsi_name;
@@ -349,7 +319,7 @@ check_tpgi:
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (!(tg_pt_gp)) {
+ if (!tg_pt_gp) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
goto check_lu_gp;
}
@@ -357,10 +327,10 @@ check_tpgi:
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
buf[off] =
- (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOICATION == target port: 01b */
+ /* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == Target port group identifier */
buf[off++] |= 0x5;
@@ -380,12 +350,12 @@ check_lu_gp:
goto check_scsi_name;
}
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem))
+ if (!lu_gp_mem)
goto check_scsi_name;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if (!(lu_gp)) {
+ if (!lu_gp) {
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
goto check_scsi_name;
}
@@ -409,7 +379,7 @@ check_lu_gp:
* section 7.5.1 Table 362
*/
check_scsi_name:
- scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
+ scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
scsi_name_len += 10;
/* Check for 4-byte padding */
@@ -424,10 +394,10 @@ check_scsi_name:
goto set_len;
}
buf[off] =
- (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOICATION == target port: 01b */
+ /* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == SCSI name string */
buf[off++] |= 0x8;
@@ -438,9 +408,9 @@ check_scsi_name:
* Target Port, this means "<iSCSI name>,t,0x<TPGT> in
* UTF-8 encoding.
*/
- tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+ tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
- TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
scsi_name_len += 1 /* Include NULL terminator */;
/*
* The null-terminated, null-padded (see 4.4.2) SCSI
@@ -471,13 +441,12 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
if (cmd->data_length < 60)
return 0;
- buf[1] = 0x86;
buf[2] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
- if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
+ if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
buf[6] = 0x01;
return 0;
}
@@ -486,7 +455,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
int have_tp = 0;
/*
@@ -494,27 +463,29 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning.
*/
- if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
have_tp = 1;
if (cmd->data_length < (0x10 + 4)) {
- printk(KERN_INFO "Received data_length: %u"
+ pr_debug("Received data_length: %u"
" too small for EVPD 0xb0\n",
cmd->data_length);
- return -1;
+ return -EINVAL;
}
if (have_tp && cmd->data_length < (0x3c + 4)) {
- printk(KERN_INFO "Received data_length: %u"
+ pr_debug("Received data_length: %u"
" too small for TPE=1 EVPD 0xb0\n",
cmd->data_length);
have_tp = 0;
}
buf[0] = dev->transport->get_device_type(dev);
- buf[1] = 0xb0;
buf[3] = have_tp ? 0x3c : 0x10;
+ /* Set WSNZ to 1 */
+ buf[4] = 0x01;
+
/*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
*/
@@ -523,12 +494,12 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP or the initiator sent a too
@@ -540,35 +511,51 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM UNMAP LBA COUNT
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
/*
* Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
&buf[24]);
/*
* Set OPTIMAL UNMAP GRANULARITY
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
/*
* UNMAP GRANULARITY ALIGNMENT
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
&buf[32]);
- if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
+ if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
buf[32] |= 0x80; /* Set the UGAVALID bit */
return 0;
}
+/* Block Device Characteristics VPD page */
+static int
+target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[3] = 0x3c;
+
+ if (cmd->data_length >= 5 &&
+ dev->se_sub_dev->se_dev_attrib.is_nonrot)
+ buf[5] = 1;
+
+ return 0;
+}
+
/* Thin Provisioning VPD */
static int
target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
/*
* From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
@@ -579,7 +566,6 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* defined in table 162.
*/
buf[0] = dev->transport->get_device_type(dev);
- buf[1] = 0xb2;
/*
* Set Hardcoded length mentioned above for DP=0
@@ -602,7 +588,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* the UNMAP command (see 5.25). A TPU bit set to zero indicates
* that the device server does not support the UNMAP command.
*/
- if (DEV_ATTRIB(dev)->emulate_tpu != 0)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
buf[5] = 0x80;
/*
@@ -611,18 +597,59 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* A TPWS bit set to zero indicates that the device server does not
* support the use of the WRITE SAME (16) command to unmap LBAs.
*/
- if (DEV_ATTRIB(dev)->emulate_tpws != 0)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
buf[5] |= 0x40;
return 0;
}
static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
+
+static struct {
+ uint8_t page;
+ int (*emulate)(struct se_cmd *, unsigned char *);
+} evpd_handlers[] = {
+ { .page = 0x00, .emulate = target_emulate_evpd_00 },
+ { .page = 0x80, .emulate = target_emulate_evpd_80 },
+ { .page = 0x83, .emulate = target_emulate_evpd_83 },
+ { .page = 0x86, .emulate = target_emulate_evpd_86 },
+ { .page = 0xb0, .emulate = target_emulate_evpd_b0 },
+ { .page = 0xb1, .emulate = target_emulate_evpd_b1 },
+ { .page = 0xb2, .emulate = target_emulate_evpd_b2 },
+};
+
+/* supported vital product data pages */
+static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+{
+ int p;
+
+ if (cmd->data_length < 8)
+ return 0;
+ /*
+ * Only report the INQUIRY EVPD=1 pages after a valid NAA
+ * Registered Extended LUN WWN has been set via ConfigFS
+ * during device creation/restart.
+ */
+ if (cmd->se_dev->se_sub_dev->su_dev_flags &
+ SDF_EMULATED_VPD_UNIT_SERIAL) {
+ buf[3] = ARRAY_SIZE(evpd_handlers);
+ for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers),
+ cmd->data_length - 4); ++p)
+ buf[p + 4] = evpd_handlers[p].page;
+ }
+
+ return 0;
+}
+
+static int
target_emulate_inquiry(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
- unsigned char *cdb = cmd->t_task->t_task_cdb;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
+ unsigned char *cdb = cmd->t_task_cdb;
+ int p, ret;
if (!(cdb[1] & 0x1))
return target_emulate_inquiry_std(cmd);
@@ -635,38 +662,33 @@ target_emulate_inquiry(struct se_cmd *cmd)
* payload length left for the next outgoing EVPD metadata
*/
if (cmd->data_length < 4) {
- printk(KERN_ERR "SCSI Inquiry payload length: %u"
+ pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
- return -1;
+ return -EINVAL;
}
+
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = dev->transport->get_device_type(dev);
- switch (cdb[2]) {
- case 0x00:
- return target_emulate_evpd_00(cmd, buf);
- case 0x80:
- return target_emulate_evpd_80(cmd, buf);
- case 0x83:
- return target_emulate_evpd_83(cmd, buf);
- case 0x86:
- return target_emulate_evpd_86(cmd, buf);
- case 0xb0:
- return target_emulate_evpd_b0(cmd, buf);
- case 0xb2:
- return target_emulate_evpd_b2(cmd, buf);
- default:
- printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
- return -1;
- }
+ for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
+ if (cdb[2] == evpd_handlers[p].page) {
+ buf[1] = cdb[2];
+ ret = evpd_handlers[p].emulate(cmd, buf);
+ transport_kunmap_first_data_page(cmd);
+ return ret;
+ }
- return 0;
+ transport_kunmap_first_data_page(cmd);
+ pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ return -EINVAL;
}
static int
target_emulate_readcapacity(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
u32 blocks;
@@ -675,30 +697,36 @@ target_emulate_readcapacity(struct se_cmd *cmd)
else
blocks = (u32)blocks_long;
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = (blocks >> 24) & 0xff;
buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff;
buf[3] = blocks & 0xff;
- buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
- buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
- buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
- buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
+ buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+ buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+ buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+ buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
/*
* Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
*/
- if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
put_unaligned_be32(0xFFFFFFFF, &buf[0]);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
static int
target_emulate_readcapacity_16(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = (blocks >> 56) & 0xff;
buf[1] = (blocks >> 48) & 0xff;
buf[2] = (blocks >> 40) & 0xff;
@@ -707,17 +735,19 @@ target_emulate_readcapacity_16(struct se_cmd *cmd)
buf[5] = (blocks >> 16) & 0xff;
buf[6] = (blocks >> 8) & 0xff;
buf[7] = blocks & 0xff;
- buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
- buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
- buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
- buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
+ buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+ buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+ buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+ buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
- if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
buf[14] = 0x80;
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -737,6 +767,35 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
p[1] = 0x0a;
p[2] = 2;
/*
+ * From spc4r23, 7.4.7 Control mode page
+ *
+ * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
+ * restrictions on the algorithm used for reordering commands
+ * having the SIMPLE task attribute (see SAM-4).
+ *
+ * Table 368 -- QUEUE ALGORITHM MODIFIER field
+ * Code Description
+ * 0h Restricted reordering
+ * 1h Unrestricted reordering allowed
+ * 2h to 7h Reserved
+ * 8h to Fh Vendor specific
+ *
+ * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
+ * the device server shall order the processing sequence of commands
+ * having the SIMPLE task attribute such that data integrity is maintained
+ * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
+ * requests is halted at any time, the final value of all data observable
+ * on the medium shall be the same as if all the commands had been processed
+ * with the ORDERED task attribute).
+ *
+ * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
+ * device server may reorder the processing sequence of commands having the
+ * SIMPLE task attribute in any manner. Any data integrity exposures related to
+ * command sequence order shall be explicitly handled by the application client
+ * through the selection of appropriate ommands and task attributes.
+ */
+ p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
+ /*
* From spc4r17, section 7.4.6 Control mode Page
*
* Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
@@ -765,8 +824,8 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes.
*/
- p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
- (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+ p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
+ (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
/*
* From spc4r17, section 7.4.6 Control mode Page
*
@@ -779,7 +838,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
- p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
+ p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
p[8] = 0xff;
p[9] = 0xff;
p[11] = 30;
@@ -792,7 +851,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p)
{
p[0] = 0x08;
p[1] = 0x12;
- if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
@@ -830,9 +889,9 @@ target_modesense_dpofua(unsigned char *buf, int type)
static int
target_emulate_modesense(struct se_cmd *cmd, int ten)
{
- struct se_device *dev = SE_DEV(cmd);
- char *cdb = cmd->t_task->t_task_cdb;
- unsigned char *rbuf = cmd->t_task->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ char *cdb = cmd->t_task_cdb;
+ unsigned char *rbuf;
int type = dev->transport->get_device_type(dev);
int offset = (ten) ? 8 : 4;
int length = 0;
@@ -856,7 +915,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
length += target_modesense_control(dev, &buf[offset+length]);
break;
default:
- printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
+ pr_err("Got Unknown Mode Page: 0x%02x\n",
cdb[2] & 0x3f);
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
}
@@ -867,13 +926,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
buf[0] = (offset >> 8) & 0xff;
buf[1] = offset & 0xff;
- if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[3], type);
- if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
- (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+ if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
target_modesense_dpofua(&buf[3], type);
if ((offset + 2) > cmd->data_length)
@@ -883,19 +942,22 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
offset -= 1;
buf[0] = offset & 0xff;
- if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[2], type);
- if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
- (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+ if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
target_modesense_dpofua(&buf[2], type);
if ((offset + 1) > cmd->data_length)
offset = cmd->data_length;
}
+
+ rbuf = transport_kmap_first_data_page(cmd);
memcpy(rbuf, buf, offset);
+ transport_kunmap_first_data_page(cmd);
return 0;
}
@@ -903,16 +965,20 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
static int
target_emulate_request_sense(struct se_cmd *cmd)
{
- unsigned char *cdb = cmd->t_task->t_task_cdb;
- unsigned char *buf = cmd->t_task->t_task_buf;
+ unsigned char *cdb = cmd->t_task_cdb;
+ unsigned char *buf;
u8 ua_asc = 0, ua_ascq = 0;
+ int err = 0;
if (cdb[1] & 0x01) {
- printk(KERN_ERR "REQUEST_SENSE description emulation not"
+ pr_err("REQUEST_SENSE description emulation not"
" supported\n");
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
+
+ buf = transport_kmap_first_data_page(cmd);
+
+ if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
/*
* CURRENT ERROR, UNIT ATTENTION
*/
@@ -924,7 +990,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
*/
if (cmd->data_length <= 18) {
buf[7] = 0x00;
- return 0;
+ err = -EINVAL;
+ goto end;
}
/*
* The Additional Sense Code (ASC) from the UNIT ATTENTION
@@ -944,7 +1011,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
*/
if (cmd->data_length <= 18) {
buf[7] = 0x00;
- return 0;
+ err = -EINVAL;
+ goto end;
}
/*
* NO ADDITIONAL SENSE INFORMATION
@@ -953,6 +1021,9 @@ target_emulate_request_sense(struct se_cmd *cmd)
buf[7] = 0x0A;
}
+end:
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -963,13 +1034,13 @@ target_emulate_request_sense(struct se_cmd *cmd)
static int
target_emulate_unmap(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
- unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf, *ptr = NULL;
+ unsigned char *cdb = &cmd->t_task_cdb[0];
sector_t lba;
unsigned int size = cmd->data_length, range;
- int ret, offset;
+ int ret = 0, offset;
unsigned short dl, bd_dl;
/* First UNMAP block descriptor starts at 8 byte offset */
@@ -977,21 +1048,24 @@ target_emulate_unmap(struct se_task *task)
size -= 8;
dl = get_unaligned_be16(&cdb[0]);
bd_dl = get_unaligned_be16(&cdb[2]);
+
+ buf = transport_kmap_first_data_page(cmd);
+
ptr = &buf[offset];
- printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
+ pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
while (size) {
lba = get_unaligned_be64(&ptr[0]);
range = get_unaligned_be32(&ptr[8]);
- printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
+ pr_debug("UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range);
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
- printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
+ pr_err("blkdev_issue_discard() failed: %d\n",
ret);
- return -1;
+ goto err;
}
ptr += 16;
@@ -1000,7 +1074,10 @@ target_emulate_unmap(struct se_task *task)
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
- return 0;
+err:
+ transport_kunmap_first_data_page(cmd);
+
+ return ret;
}
/*
@@ -1008,23 +1085,36 @@ target_emulate_unmap(struct se_task *task)
* Note this is not used for TCM/pSCSI passthrough
*/
static int
-target_emulate_write_same(struct se_task *task)
+target_emulate_write_same(struct se_task *task, int write_same32)
{
- struct se_cmd *cmd = TASK_CMD(task);
- struct se_device *dev = SE_DEV(cmd);
- sector_t lba = cmd->t_task->t_task_lba;
- unsigned int range;
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ sector_t range;
+ sector_t lba = cmd->t_task_lba;
+ unsigned int num_blocks;
int ret;
+ /*
+ * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict
+ * range when non zero is supplied, otherwise calculate the remaining
+ * range based on ->get_blocks() - starting LBA.
+ */
+ if (write_same32)
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
+ else
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
- range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
+ if (num_blocks != 0)
+ range = num_blocks;
+ else
+ range = (dev->transport->get_blocks(dev) - lba);
- printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
- (unsigned long long)lba, range);
+ pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
+ (unsigned long long)lba, (unsigned long long)range);
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
- printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
- return -1;
+ pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
+ return ret;
}
task->task_scsi_status = GOOD;
@@ -1035,12 +1125,12 @@ target_emulate_write_same(struct se_task *task)
int
transport_emulate_control_cdb(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
- struct se_device *dev = SE_DEV(cmd);
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
unsigned short service_action;
int ret = 0;
- switch (cmd->t_task->t_task_cdb[0]) {
+ switch (cmd->t_task_cdb[0]) {
case INQUIRY:
ret = target_emulate_inquiry(cmd);
break;
@@ -1054,13 +1144,13 @@ transport_emulate_control_cdb(struct se_task *task)
ret = target_emulate_modesense(cmd, 1);
break;
case SERVICE_ACTION_IN:
- switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
+ switch (cmd->t_task_cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
ret = target_emulate_readcapacity_16(cmd);
break;
default:
- printk(KERN_ERR "Unsupported SA: 0x%02x\n",
- cmd->t_task->t_task_cdb[1] & 0x1f);
+ pr_err("Unsupported SA: 0x%02x\n",
+ cmd->t_task_cdb[1] & 0x1f);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
break;
@@ -1069,7 +1159,7 @@ transport_emulate_control_cdb(struct se_task *task)
break;
case UNMAP:
if (!dev->transport->do_discard) {
- printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
+ pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
@@ -1077,27 +1167,27 @@ transport_emulate_control_cdb(struct se_task *task)
break;
case WRITE_SAME_16:
if (!dev->transport->do_discard) {
- printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
+ pr_err("WRITE_SAME_16 emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
- ret = target_emulate_write_same(task);
+ ret = target_emulate_write_same(task, 0);
break;
case VARIABLE_LENGTH_CMD:
service_action =
- get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
+ get_unaligned_be16(&cmd->t_task_cdb[8]);
switch (service_action) {
case WRITE_SAME_32:
if (!dev->transport->do_discard) {
- printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
+ pr_err("WRITE_SAME_32 SA emulation not"
" supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
- ret = target_emulate_write_same(task);
+ ret = target_emulate_write_same(task, 1);
break;
default:
- printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
+ pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
" 0x%02x\n", service_action);
break;
}
@@ -1105,8 +1195,7 @@ transport_emulate_control_cdb(struct se_task *task)
case SYNCHRONIZE_CACHE:
case 0x91: /* SYNCHRONIZE_CACHE_16: */
if (!dev->transport->do_sync_cache) {
- printk(KERN_ERR
- "SYNCHRONIZE_CACHE emulation not supported"
+ pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
@@ -1123,8 +1212,8 @@ transport_emulate_control_cdb(struct se_task *task)
case WRITE_FILEMARKS:
break;
default:
- printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
- cmd->t_task->t_task_cdb[0], dev->transport->name);
+ pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
+ cmd->t_task_cdb[0], dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index ee6fad979b5..b2575d8568c 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -37,6 +37,7 @@
#include <linux/parser.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
+#include <linux/spinlock.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -52,6 +53,8 @@
#include "target_core_rd.h"
#include "target_core_stat.h"
+extern struct t10_alua_lu_gp *default_lu_gp;
+
static struct list_head g_tf_list;
static struct mutex g_tf_lock;
@@ -61,6 +64,13 @@ struct target_core_configfs_attribute {
ssize_t (*store)(void *, const char *, size_t);
};
+static struct config_group target_core_hbagroup;
+static struct config_group alua_group;
+static struct config_group alua_lu_gps_group;
+
+static DEFINE_SPINLOCK(se_device_lock);
+static LIST_HEAD(se_dev_list);
+
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
@@ -94,12 +104,12 @@ static struct target_fabric_configfs *target_core_get_fabric(
{
struct target_fabric_configfs *tf;
- if (!(name))
+ if (!name)
return NULL;
mutex_lock(&g_tf_lock);
list_for_each_entry(tf, &g_tf_list, tf_list) {
- if (!(strcmp(tf->tf_name, name))) {
+ if (!strcmp(tf->tf_name, name)) {
atomic_inc(&tf->tf_access_cnt);
mutex_unlock(&g_tf_lock);
return tf;
@@ -120,7 +130,7 @@ static struct config_group *target_core_register_fabric(
struct target_fabric_configfs *tf;
int ret;
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
" %s\n", group, name);
/*
* Ensure that TCM subsystem plugins are loaded at this point for
@@ -140,7 +150,7 @@ static struct config_group *target_core_register_fabric(
* registered, but simply provids auto loading logic for modules with
* mkdir(2) system calls with known TCM fabric modules.
*/
- if (!(strncmp(name, "iscsi", 5))) {
+ if (!strncmp(name, "iscsi", 5)) {
/*
* Automatically load the LIO Target fabric module when the
* following is called:
@@ -149,11 +159,11 @@ static struct config_group *target_core_register_fabric(
*/
ret = request_module("iscsi_target_mod");
if (ret < 0) {
- printk(KERN_ERR "request_module() failed for"
+ pr_err("request_module() failed for"
" iscsi_target_mod.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
- } else if (!(strncmp(name, "loopback", 8))) {
+ } else if (!strncmp(name, "loopback", 8)) {
/*
* Automatically load the tcm_loop fabric module when the
* following is called:
@@ -162,25 +172,25 @@ static struct config_group *target_core_register_fabric(
*/
ret = request_module("tcm_loop");
if (ret < 0) {
- printk(KERN_ERR "request_module() failed for"
+ pr_err("request_module() failed for"
" tcm_loop.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
}
tf = target_core_get_fabric(name);
- if (!(tf)) {
- printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
+ if (!tf) {
+ pr_err("target_core_get_fabric() failed for %s\n",
name);
return ERR_PTR(-EINVAL);
}
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
" %s\n", tf->tf_name);
/*
* On a successful target_core_get_fabric() look, the returned
* struct target_fabric_configfs *tf will contain a usage reference.
*/
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
+ pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
&TF_CIT_TMPL(tf)->tfc_wwn_cit);
tf->tf_group.default_groups = tf->tf_default_groups;
@@ -192,14 +202,14 @@ static struct config_group *target_core_register_fabric(
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
&TF_CIT_TMPL(tf)->tfc_discovery_cit);
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
" %s\n", tf->tf_group.cg_item.ci_name);
/*
* Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
*/
tf->tf_ops.tf_subsys = tf->tf_subsys;
tf->tf_fabric = &tf->tf_group.cg_item;
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
" for %s\n", name);
return &tf->tf_group;
@@ -218,18 +228,18 @@ static void target_core_deregister_fabric(
struct config_item *df_item;
int i;
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
" tf list\n", config_item_name(item));
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
" %s\n", tf->tf_name);
atomic_dec(&tf->tf_access_cnt);
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing"
" tf->tf_fabric for %s\n", tf->tf_name);
tf->tf_fabric = NULL;
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
" %s\n", config_item_name(item));
tf_group = &tf->tf_group;
@@ -296,22 +306,18 @@ struct target_fabric_configfs *target_fabric_configfs_init(
{
struct target_fabric_configfs *tf;
- if (!(fabric_mod)) {
- printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
- return NULL;
- }
if (!(name)) {
- printk(KERN_ERR "Unable to locate passed fabric name\n");
- return NULL;
+ pr_err("Unable to locate passed fabric name\n");
+ return ERR_PTR(-EINVAL);
}
- if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
- printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
+ if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
+ pr_err("Passed name: %s exceeds TARGET_FABRIC"
"_NAME_SIZE\n", name);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
- if (!(tf))
+ if (!tf)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&tf->tf_list);
@@ -330,9 +336,9 @@ struct target_fabric_configfs *target_fabric_configfs_init(
list_add_tail(&tf->tf_list, &g_tf_list);
mutex_unlock(&g_tf_lock);
- printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
">>>>>>>>>>>>>>\n");
- printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
+ pr_debug("Initialized struct target_fabric_configfs: %p for"
" %s\n", tf, tf->tf_name);
return tf;
}
@@ -361,140 +367,132 @@ static int target_fabric_tf_ops_check(
{
struct target_core_fabric_ops *tfo = &tf->tf_ops;
- if (!(tfo->get_fabric_name)) {
- printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
- return -EINVAL;
- }
- if (!(tfo->get_fabric_proto_ident)) {
- printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
+ if (!tfo->get_fabric_name) {
+ pr_err("Missing tfo->get_fabric_name()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_wwn)) {
- printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
+ if (!tfo->get_fabric_proto_ident) {
+ pr_err("Missing tfo->get_fabric_proto_ident()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_tag)) {
- printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
+ if (!tfo->tpg_get_wwn) {
+ pr_err("Missing tfo->tpg_get_wwn()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_default_depth)) {
- printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
+ if (!tfo->tpg_get_tag) {
+ pr_err("Missing tfo->tpg_get_tag()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_pr_transport_id)) {
- printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
+ if (!tfo->tpg_get_default_depth) {
+ pr_err("Missing tfo->tpg_get_default_depth()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_pr_transport_id_len)) {
- printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
+ if (!tfo->tpg_get_pr_transport_id) {
+ pr_err("Missing tfo->tpg_get_pr_transport_id()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_demo_mode)) {
- printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
+ if (!tfo->tpg_get_pr_transport_id_len) {
+ pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_demo_mode_cache)) {
- printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
+ if (!tfo->tpg_check_demo_mode) {
+ pr_err("Missing tfo->tpg_check_demo_mode()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_demo_mode_write_protect)) {
- printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
+ if (!tfo->tpg_check_demo_mode_cache) {
+ pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_prod_mode_write_protect)) {
- printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
+ if (!tfo->tpg_check_demo_mode_write_protect) {
+ pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
return -EINVAL;
}
- if (!(tfo->tpg_alloc_fabric_acl)) {
- printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
+ if (!tfo->tpg_check_prod_mode_write_protect) {
+ pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
return -EINVAL;
}
- if (!(tfo->tpg_release_fabric_acl)) {
- printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
+ if (!tfo->tpg_alloc_fabric_acl) {
+ pr_err("Missing tfo->tpg_alloc_fabric_acl()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_inst_index)) {
- printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
+ if (!tfo->tpg_release_fabric_acl) {
+ pr_err("Missing tfo->tpg_release_fabric_acl()\n");
return -EINVAL;
}
- if (!(tfo->release_cmd_to_pool)) {
- printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n");
+ if (!tfo->tpg_get_inst_index) {
+ pr_err("Missing tfo->tpg_get_inst_index()\n");
return -EINVAL;
}
- if (!(tfo->release_cmd_direct)) {
- printk(KERN_ERR "Missing tfo->release_cmd_direct()\n");
+ if (!tfo->release_cmd) {
+ pr_err("Missing tfo->release_cmd()\n");
return -EINVAL;
}
- if (!(tfo->shutdown_session)) {
- printk(KERN_ERR "Missing tfo->shutdown_session()\n");
+ if (!tfo->shutdown_session) {
+ pr_err("Missing tfo->shutdown_session()\n");
return -EINVAL;
}
- if (!(tfo->close_session)) {
- printk(KERN_ERR "Missing tfo->close_session()\n");
+ if (!tfo->close_session) {
+ pr_err("Missing tfo->close_session()\n");
return -EINVAL;
}
- if (!(tfo->stop_session)) {
- printk(KERN_ERR "Missing tfo->stop_session()\n");
+ if (!tfo->stop_session) {
+ pr_err("Missing tfo->stop_session()\n");
return -EINVAL;
}
- if (!(tfo->fall_back_to_erl0)) {
- printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
+ if (!tfo->fall_back_to_erl0) {
+ pr_err("Missing tfo->fall_back_to_erl0()\n");
return -EINVAL;
}
- if (!(tfo->sess_logged_in)) {
- printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
+ if (!tfo->sess_logged_in) {
+ pr_err("Missing tfo->sess_logged_in()\n");
return -EINVAL;
}
- if (!(tfo->sess_get_index)) {
- printk(KERN_ERR "Missing tfo->sess_get_index()\n");
+ if (!tfo->sess_get_index) {
+ pr_err("Missing tfo->sess_get_index()\n");
return -EINVAL;
}
- if (!(tfo->write_pending)) {
- printk(KERN_ERR "Missing tfo->write_pending()\n");
+ if (!tfo->write_pending) {
+ pr_err("Missing tfo->write_pending()\n");
return -EINVAL;
}
- if (!(tfo->write_pending_status)) {
- printk(KERN_ERR "Missing tfo->write_pending_status()\n");
+ if (!tfo->write_pending_status) {
+ pr_err("Missing tfo->write_pending_status()\n");
return -EINVAL;
}
- if (!(tfo->set_default_node_attributes)) {
- printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
+ if (!tfo->set_default_node_attributes) {
+ pr_err("Missing tfo->set_default_node_attributes()\n");
return -EINVAL;
}
- if (!(tfo->get_task_tag)) {
- printk(KERN_ERR "Missing tfo->get_task_tag()\n");
+ if (!tfo->get_task_tag) {
+ pr_err("Missing tfo->get_task_tag()\n");
return -EINVAL;
}
- if (!(tfo->get_cmd_state)) {
- printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
+ if (!tfo->get_cmd_state) {
+ pr_err("Missing tfo->get_cmd_state()\n");
return -EINVAL;
}
- if (!(tfo->new_cmd_failure)) {
- printk(KERN_ERR "Missing tfo->new_cmd_failure()\n");
+ if (!tfo->queue_data_in) {
+ pr_err("Missing tfo->queue_data_in()\n");
return -EINVAL;
}
- if (!(tfo->queue_data_in)) {
- printk(KERN_ERR "Missing tfo->queue_data_in()\n");
+ if (!tfo->queue_status) {
+ pr_err("Missing tfo->queue_status()\n");
return -EINVAL;
}
- if (!(tfo->queue_status)) {
- printk(KERN_ERR "Missing tfo->queue_status()\n");
+ if (!tfo->queue_tm_rsp) {
+ pr_err("Missing tfo->queue_tm_rsp()\n");
return -EINVAL;
}
- if (!(tfo->queue_tm_rsp)) {
- printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
+ if (!tfo->set_fabric_sense_len) {
+ pr_err("Missing tfo->set_fabric_sense_len()\n");
return -EINVAL;
}
- if (!(tfo->set_fabric_sense_len)) {
- printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
+ if (!tfo->get_fabric_sense_len) {
+ pr_err("Missing tfo->get_fabric_sense_len()\n");
return -EINVAL;
}
- if (!(tfo->get_fabric_sense_len)) {
- printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
- return -EINVAL;
- }
- if (!(tfo->is_state_remove)) {
- printk(KERN_ERR "Missing tfo->is_state_remove()\n");
+ if (!tfo->is_state_remove) {
+ pr_err("Missing tfo->is_state_remove()\n");
return -EINVAL;
}
/*
@@ -502,20 +500,20 @@ static int target_fabric_tf_ops_check(
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
* target_core_fabric_configfs.c WWN+TPG group context code.
*/
- if (!(tfo->fabric_make_wwn)) {
- printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
+ if (!tfo->fabric_make_wwn) {
+ pr_err("Missing tfo->fabric_make_wwn()\n");
return -EINVAL;
}
- if (!(tfo->fabric_drop_wwn)) {
- printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
+ if (!tfo->fabric_drop_wwn) {
+ pr_err("Missing tfo->fabric_drop_wwn()\n");
return -EINVAL;
}
- if (!(tfo->fabric_make_tpg)) {
- printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
+ if (!tfo->fabric_make_tpg) {
+ pr_err("Missing tfo->fabric_make_tpg()\n");
return -EINVAL;
}
- if (!(tfo->fabric_drop_tpg)) {
- printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
+ if (!tfo->fabric_drop_tpg) {
+ pr_err("Missing tfo->fabric_drop_tpg()\n");
return -EINVAL;
}
@@ -533,22 +531,15 @@ static int target_fabric_tf_ops_check(
int target_fabric_configfs_register(
struct target_fabric_configfs *tf)
{
- struct config_group *su_group;
int ret;
- if (!(tf)) {
- printk(KERN_ERR "Unable to locate target_fabric_configfs"
+ if (!tf) {
+ pr_err("Unable to locate target_fabric_configfs"
" pointer\n");
return -EINVAL;
}
- if (!(tf->tf_subsys)) {
- printk(KERN_ERR "Unable to target struct config_subsystem"
- " pointer\n");
- return -EINVAL;
- }
- su_group = &tf->tf_subsys->su_group;
- if (!(su_group)) {
- printk(KERN_ERR "Unable to locate target struct config_group"
+ if (!tf->tf_subsys) {
+ pr_err("Unable to target struct config_subsystem"
" pointer\n");
return -EINVAL;
}
@@ -556,7 +547,7 @@ int target_fabric_configfs_register(
if (ret < 0)
return ret;
- printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
">>>>>>>>>>\n");
return 0;
}
@@ -565,48 +556,39 @@ EXPORT_SYMBOL(target_fabric_configfs_register);
void target_fabric_configfs_deregister(
struct target_fabric_configfs *tf)
{
- struct config_group *su_group;
struct configfs_subsystem *su;
- if (!(tf)) {
- printk(KERN_ERR "Unable to locate passed target_fabric_"
+ if (!tf) {
+ pr_err("Unable to locate passed target_fabric_"
"configfs\n");
return;
}
su = tf->tf_subsys;
- if (!(su)) {
- printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
+ if (!su) {
+ pr_err("Unable to locate passed tf->tf_subsys"
" pointer\n");
return;
}
- su_group = &tf->tf_subsys->su_group;
- if (!(su_group)) {
- printk(KERN_ERR "Unable to locate target struct config_group"
- " pointer\n");
- return;
- }
-
- printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
">>>>>>>>>>>>\n");
mutex_lock(&g_tf_lock);
if (atomic_read(&tf->tf_access_cnt)) {
mutex_unlock(&g_tf_lock);
- printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
+ pr_err("Non zero tf->tf_access_cnt for fabric %s\n",
tf->tf_name);
BUG();
}
list_del(&tf->tf_list);
mutex_unlock(&g_tf_lock);
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
" %s\n", tf->tf_name);
tf->tf_module = NULL;
tf->tf_subsys = NULL;
kfree(tf);
- printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
">>>>>\n");
- return;
}
EXPORT_SYMBOL(target_fabric_configfs_deregister);
@@ -627,11 +609,12 @@ static ssize_t target_core_dev_show_attr_##_name( \
\
spin_lock(&se_dev->se_dev_lock); \
dev = se_dev->se_dev_ptr; \
- if (!(dev)) { \
+ if (!dev) { \
spin_unlock(&se_dev->se_dev_lock); \
return -ENODEV; \
} \
- rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
+ rb = snprintf(page, PAGE_SIZE, "%u\n", \
+ (u32)dev->se_sub_dev->se_dev_attrib._name); \
spin_unlock(&se_dev->se_dev_lock); \
\
return rb; \
@@ -650,14 +633,14 @@ static ssize_t target_core_dev_store_attr_##_name( \
\
spin_lock(&se_dev->se_dev_lock); \
dev = se_dev->se_dev_ptr; \
- if (!(dev)) { \
+ if (!dev) { \
spin_unlock(&se_dev->se_dev_lock); \
return -ENODEV; \
} \
ret = strict_strtoul(page, 0, &val); \
if (ret < 0) { \
spin_unlock(&se_dev->se_dev_lock); \
- printk(KERN_ERR "strict_strtoul() failed with" \
+ pr_err("strict_strtoul() failed with" \
" ret: %d\n", ret); \
return -EINVAL; \
} \
@@ -715,6 +698,12 @@ SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(enforce_pr_isids);
SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(is_nonrot);
+SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_rest_reord);
+SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
+
DEF_DEV_ATTRIB_RO(hw_block_size);
SE_DEV_ATTR_RO(hw_block_size);
@@ -763,6 +752,8 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_emulate_tpu.attr,
&target_core_dev_attrib_emulate_tpws.attr,
&target_core_dev_attrib_enforce_pr_isids.attr,
+ &target_core_dev_attrib_is_nonrot.attr,
+ &target_core_dev_attrib_emulate_rest_reord.attr,
&target_core_dev_attrib_hw_block_size.attr,
&target_core_dev_attrib_block_size.attr,
&target_core_dev_attrib_hw_max_sectors.attr,
@@ -819,7 +810,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
struct se_device *dev;
dev = se_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
@@ -846,13 +837,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* VPD Unit Serial Number that OS dependent multipath can depend on.
*/
if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
- printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
+ pr_err("Underlying SCSI device firmware provided VPD"
" Unit Serial, ignoring request\n");
return -EOPNOTSUPP;
}
- if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
- printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
+ if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
+ pr_err("Emulated VPD Unit Serial exceeds"
" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
return -EOVERFLOW;
}
@@ -863,9 +854,9 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* could cause negative effects.
*/
dev = su_dev->se_dev_ptr;
- if ((dev)) {
+ if (dev) {
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "Unable to set VPD Unit Serial while"
+ pr_err("Unable to set VPD Unit Serial while"
" active %d $FABRIC_MOD exports exist\n",
atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
@@ -883,7 +874,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
"%s", strstrip(buf));
su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
- printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
+ pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
" %s\n", su_dev->t10_wwn.unit_serial);
return count;
@@ -905,19 +896,19 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
ssize_t len = 0;
dev = se_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
memset(buf, 0, VPD_TMP_BUF_SIZE);
spin_lock(&t10_wwn->t10_vpd_lock);
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
- if (!(vpd->protocol_identifier_set))
+ if (!vpd->protocol_identifier_set)
continue;
transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
- if ((len + strlen(buf) > PAGE_SIZE))
+ if (len + strlen(buf) >= PAGE_SIZE)
break;
len += sprintf(page+len, "%s", buf);
@@ -952,7 +943,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
ssize_t len = 0; \
\
dev = se_dev->se_dev_ptr; \
- if (!(dev)) \
+ if (!dev) \
return -ENODEV; \
\
spin_lock(&t10_wwn->t10_vpd_lock); \
@@ -962,19 +953,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) > PAGE_SIZE)) \
+ if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
} \
@@ -984,7 +975,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
}
/*
- * VPD page 0x83 Assoication: Logical Unit
+ * VPD page 0x83 Association: Logical Unit
*/
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
@@ -1083,7 +1074,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
*len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return *len;
@@ -1093,7 +1084,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(
PR_REG_ISID_ID_LEN);
*len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
- TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+ se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
spin_unlock(&dev->dev_reservation_lock);
@@ -1109,13 +1100,13 @@ static ssize_t target_core_dev_pr_show_spc2_res(
spin_lock(&dev->dev_reservation_lock);
se_nacl = dev->dev_reserved_node_acl;
- if (!(se_nacl)) {
+ if (!se_nacl) {
*len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return *len;
}
*len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
- TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+ se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -1128,10 +1119,10 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(
{
ssize_t len = 0;
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- switch (T10_RES(su_dev)->res_type) {
+ switch (su_dev->t10_pr.res_type) {
case SPC3_PERSISTENT_RESERVATIONS:
target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
page, &len);
@@ -1165,15 +1156,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
@@ -1202,13 +1193,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
struct se_subsystem_dev *su_dev,
char *page)
{
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
- return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
+ return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
}
SE_DEV_PR_ATTR_RO(res_pr_generation);
@@ -1229,15 +1220,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
@@ -1245,7 +1236,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
lun = pr_reg->pr_reg_tg_pt_lun;
- tfo = TPG_TFO(se_tpg);
+ tfo = se_tpg->se_tpg_tfo;
len += sprintf(page+len, "SPC-3 Reservation: %s"
" Target Node Endpoint: %s\n", tfo->get_fabric_name(),
@@ -1276,16 +1267,16 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
ssize_t len = 0;
int reg_count = 0, prf_isid;
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
len += sprintf(page+len, "SPC-3 PR Registrations:\n");
- spin_lock(&T10_RES(su_dev)->registration_lock);
- list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ spin_lock(&su_dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
pr_reg_list) {
memset(buf, 0, 384);
@@ -1299,15 +1290,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
&i_buf[0] : "", pr_reg->pr_res_key,
pr_reg->pr_res_generation);
- if ((len + strlen(buf) > PAGE_SIZE))
+ if (len + strlen(buf) >= PAGE_SIZE)
break;
len += sprintf(page+len, "%s", buf);
reg_count++;
}
- spin_unlock(&T10_RES(su_dev)->registration_lock);
+ spin_unlock(&su_dev->t10_pr.registration_lock);
- if (!(reg_count))
+ if (!reg_count)
len += sprintf(page+len, "None\n");
return len;
@@ -1327,15 +1318,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type(
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
@@ -1358,10 +1349,10 @@ static ssize_t target_core_dev_pr_show_attr_res_type(
{
ssize_t len = 0;
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- switch (T10_RES(su_dev)->res_type) {
+ switch (su_dev->t10_pr.res_type) {
case SPC3_PERSISTENT_RESERVATIONS:
len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
break;
@@ -1389,14 +1380,14 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
struct se_subsystem_dev *su_dev,
char *page)
{
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
- (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
+ (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
}
SE_DEV_PR_ATTR_RO(res_aptpl_active);
@@ -1408,10 +1399,10 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
struct se_subsystem_dev *su_dev,
char *page)
{
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1460,14 +1451,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
u8 type = 0, scope;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_INFO "Unable to process APTPL metadata while"
+ pr_debug("Unable to process APTPL metadata while"
" active fabric exports exist\n");
return -EINVAL;
}
@@ -1496,8 +1487,8 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
- printk(KERN_ERR "APTPL metadata initiator_node="
+ if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
+ pr_err("APTPL metadata initiator_node="
" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
PR_APTPL_MAX_IPORT_LEN);
ret = -EINVAL;
@@ -1510,8 +1501,8 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(isid) > PR_REG_ISID_LEN) {
- printk(KERN_ERR "APTPL metadata initiator_isid"
+ if (strlen(isid) >= PR_REG_ISID_LEN) {
+ pr_err("APTPL metadata initiator_isid"
"= exceeds PR_REG_ISID_LEN: %d\n",
PR_REG_ISID_LEN);
ret = -EINVAL;
@@ -1526,7 +1517,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
}
ret = strict_strtoull(arg_p, 0, &tmp_ll);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoull() failed for"
+ pr_err("strict_strtoull() failed for"
" sa_res_key=\n");
goto out;
}
@@ -1571,8 +1562,8 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
ret = -ENOMEM;
goto out;
}
- if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
- printk(KERN_ERR "APTPL metadata target_node="
+ if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
+ pr_err("APTPL metadata target_node="
" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
PR_APTPL_MAX_TPORT_LEN);
ret = -EINVAL;
@@ -1596,20 +1587,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
}
}
- if (!(i_port) || !(t_port) || !(sa_res_key)) {
- printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+ if (!i_port || !t_port || !sa_res_key) {
+ pr_err("Illegal parameters for APTPL registration\n");
ret = -EINVAL;
goto out;
}
if (res_holder && !(type)) {
- printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
+ pr_err("Illegal PR type: 0x%02x for reservation"
" holder\n", type);
ret = -EINVAL;
goto out;
}
- ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
+ ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
i_port, isid, mapped_lun, t_port, tpgt, target_lun,
res_holder, all_tg_pt, type);
out:
@@ -1662,7 +1653,7 @@ static ssize_t target_core_show_dev_info(void *p, char *page)
int bl = 0;
ssize_t read_bytes = 0;
- if (!(se_dev->se_dev_ptr))
+ if (!se_dev->se_dev_ptr)
return -ENODEV;
transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
@@ -1688,8 +1679,8 @@ static ssize_t target_core_store_dev_control(
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
- if (!(se_dev->se_dev_su_ptr)) {
- printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
+ if (!se_dev->se_dev_su_ptr) {
+ pr_err("Unable to locate struct se_subsystem_dev>se"
"_dev_su_ptr\n");
return -EINVAL;
}
@@ -1725,7 +1716,7 @@ static ssize_t target_core_store_dev_alias(
ssize_t read_bytes;
if (count > (SE_DEV_ALIAS_LEN-1)) {
- printk(KERN_ERR "alias count: %d exceeds"
+ pr_err("alias count: %d exceeds"
" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
SE_DEV_ALIAS_LEN-1);
return -EINVAL;
@@ -1735,7 +1726,7 @@ static ssize_t target_core_store_dev_alias(
read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
"%s", page);
- printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
+ pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
se_dev->se_dev_alias);
@@ -1771,7 +1762,7 @@ static ssize_t target_core_store_dev_udev_path(
ssize_t read_bytes;
if (count > (SE_UDEV_PATH_LEN-1)) {
- printk(KERN_ERR "udev_path count: %d exceeds"
+ pr_err("udev_path count: %d exceeds"
" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
SE_UDEV_PATH_LEN-1);
return -EINVAL;
@@ -1781,7 +1772,7 @@ static ssize_t target_core_store_dev_udev_path(
read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
- printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
+ pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
se_dev->se_dev_udev_path);
@@ -1809,13 +1800,13 @@ static ssize_t target_core_store_dev_enable(
char *ptr;
ptr = strstr(page, "1");
- if (!(ptr)) {
- printk(KERN_ERR "For dev_enable ops, only valid value"
+ if (!ptr) {
+ pr_err("For dev_enable ops, only valid value"
" is \"1\"\n");
return -EINVAL;
}
- if ((se_dev->se_dev_ptr)) {
- printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
+ if (se_dev->se_dev_ptr) {
+ pr_err("se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
}
@@ -1830,7 +1821,7 @@ static ssize_t target_core_store_dev_enable(
return -EINVAL;
se_dev->se_dev_ptr = dev;
- printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
+ pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
" %p\n", se_dev->se_dev_ptr);
return count;
@@ -1854,22 +1845,22 @@ static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
+ if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
return len;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem)) {
- printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+ if (!lu_gp_mem) {
+ pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
" pointer\n");
return -EINVAL;
}
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if ((lu_gp)) {
+ if (lu_gp) {
lu_ci = &lu_gp->lu_gp_group.cg_item;
len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
config_item_name(lu_ci), lu_gp->lu_gp_id);
@@ -1893,17 +1884,17 @@ static ssize_t target_core_store_alua_lu_gp(
int move = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
- printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
+ if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
+ pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&su_dev->se_dev_group.cg_item));
return -EINVAL;
}
if (count > LU_GROUP_NAME_BUF) {
- printk(KERN_ERR "ALUA LU Group Alias too large!\n");
+ pr_err("ALUA LU Group Alias too large!\n");
return -EINVAL;
}
memset(buf, 0, LU_GROUP_NAME_BUF);
@@ -1919,27 +1910,27 @@ static ssize_t target_core_store_alua_lu_gp(
* core_alua_get_lu_gp_by_name below().
*/
lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
- if (!(lu_gp_new))
+ if (!lu_gp_new)
return -ENODEV;
}
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem)) {
+ if (!lu_gp_mem) {
if (lu_gp_new)
core_alua_put_lu_gp_from_name(lu_gp_new);
- printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+ pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
" pointer\n");
return -EINVAL;
}
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if ((lu_gp)) {
+ if (lu_gp) {
/*
* Clearing an existing lu_gp association, and replacing
* with NULL
*/
- if (!(lu_gp_new)) {
- printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
+ if (!lu_gp_new) {
+ pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
" %hu\n",
config_item_name(&hba->hba_group.cg_item),
@@ -1964,7 +1955,7 @@ static ssize_t target_core_store_alua_lu_gp(
__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
- printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
+ pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
" core/alua/lu_gps/%s, ID: %hu\n",
(move) ? "Moving" : "Adding",
config_item_name(&hba->hba_group.cg_item),
@@ -2008,7 +1999,7 @@ static void target_core_dev_release(struct config_item *item)
*`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
*/
if (se_dev->se_dev_ptr) {
- printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+ pr_debug("Target_Core_ConfigFS: Calling se_free_"
"virtual_device() for se_dev_ptr: %p\n",
se_dev->se_dev_ptr);
@@ -2017,14 +2008,14 @@ static void target_core_dev_release(struct config_item *item)
/*
* Release struct se_subsystem_dev->se_dev_su_ptr..
*/
- printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+ pr_debug("Target_Core_ConfigFS: Calling t->free_"
"device() for se_dev_su_ptr: %p\n",
se_dev->se_dev_su_ptr);
t->free_device(se_dev->se_dev_su_ptr);
}
- printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+ pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
"_dev_t: %p\n", se_dev);
kfree(se_dev);
}
@@ -2039,10 +2030,10 @@ static ssize_t target_core_dev_show(struct config_item *item,
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
- if (!(tc_attr->show))
+ if (!tc_attr->show)
return -EINVAL;
- return tc_attr->show((void *)se_dev, page);
+ return tc_attr->show(se_dev, page);
}
static ssize_t target_core_dev_store(struct config_item *item,
@@ -2055,10 +2046,10 @@ static ssize_t target_core_dev_store(struct config_item *item,
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
- if (!(tc_attr->store))
+ if (!tc_attr->store)
return -EINVAL;
- return tc_attr->store((void *)se_dev, page, count);
+ return tc_attr->store(se_dev, page, count);
}
static struct configfs_item_operations target_core_dev_item_ops = {
@@ -2098,7 +2089,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
struct t10_alua_lu_gp *lu_gp,
char *page)
{
- if (!(lu_gp->lu_gp_valid_id))
+ if (!lu_gp->lu_gp_valid_id)
return 0;
return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
@@ -2115,12 +2106,12 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
ret = strict_strtoul(page, 0, &lu_gp_id);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoul() returned %d for"
+ pr_err("strict_strtoul() returned %d for"
" lu_gp_id\n", ret);
return -EINVAL;
}
if (lu_gp_id > 0x0000ffff) {
- printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
+ pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
" 0x0000ffff\n", lu_gp_id);
return -EINVAL;
}
@@ -2129,7 +2120,7 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
if (ret < 0)
return -EINVAL;
- printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
+ pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
" Group: core/alua/lu_gps/%s to ID: %hu\n",
config_item_name(&alua_lu_gp_cg->cg_item),
lu_gp->lu_gp_id);
@@ -2167,7 +2158,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
- printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+ pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
@@ -2231,7 +2222,7 @@ static struct config_group *target_core_alua_create_lu_gp(
config_group_init_type_name(alua_lu_gp_cg, name,
&target_core_alua_lu_gp_cit);
- printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
+ pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
" Group: core/alua/lu_gps/%s\n",
config_item_name(alua_lu_gp_ci));
@@ -2246,7 +2237,7 @@ static void target_core_alua_drop_lu_gp(
struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
struct t10_alua_lu_gp, lu_gp_group);
- printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
+ pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
" Group: core/alua/lu_gps/%s, ID: %hu\n",
config_item_name(item), lu_gp->lu_gp_id);
/*
@@ -2305,22 +2296,22 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
unsigned long tmp;
int new_state, ret;
- if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
- printk(KERN_ERR "Unable to do implict ALUA on non valid"
+ if (!tg_pt_gp->tg_pt_gp_valid_id) {
+ pr_err("Unable to do implict ALUA on non valid"
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk("Unable to extract new ALUA access state from"
+ pr_err("Unable to extract new ALUA access state from"
" %s\n", page);
return -EINVAL;
}
new_state = (int)tmp;
if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
- printk(KERN_ERR "Unable to process implict configfs ALUA"
+ pr_err("Unable to process implict configfs ALUA"
" transition while TPGS_IMPLICT_ALUA is diabled\n");
return -EINVAL;
}
@@ -2351,8 +2342,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
unsigned long tmp;
int new_status, ret;
- if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
- printk(KERN_ERR "Unable to do set ALUA access status on non"
+ if (!tg_pt_gp->tg_pt_gp_valid_id) {
+ pr_err("Unable to do set ALUA access status on non"
" valid tg_pt_gp ID: %hu\n",
tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
@@ -2360,7 +2351,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract new ALUA access status"
+ pr_err("Unable to extract new ALUA access status"
" from %s\n", page);
return -EINVAL;
}
@@ -2369,7 +2360,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
if ((new_status != ALUA_STATUS_NONE) &&
(new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
(new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
- printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
+ pr_err("Illegal ALUA access status: 0x%02x\n",
new_status);
return -EINVAL;
}
@@ -2420,12 +2411,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_write_metadata\n");
+ pr_err("Unable to extract alua_write_metadata\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for alua_write_metadata:"
+ pr_err("Illegal value for alua_write_metadata:"
" %lu\n", tmp);
return -EINVAL;
}
@@ -2507,7 +2498,7 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
return 0;
return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
@@ -2524,12 +2515,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
ret = strict_strtoul(page, 0, &tg_pt_gp_id);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoul() returned %d for"
+ pr_err("strict_strtoul() returned %d for"
" tg_pt_gp_id\n", ret);
return -EINVAL;
}
if (tg_pt_gp_id > 0x0000ffff) {
- printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
+ pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
" 0x0000ffff\n", tg_pt_gp_id);
return -EINVAL;
}
@@ -2538,7 +2529,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
if (ret < 0)
return -EINVAL;
- printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
+ pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
"core/alua/tg_pt_gps/%s to ID: %hu\n",
config_item_name(&alua_tg_pt_gp_cg->cg_item),
tg_pt_gp->tg_pt_gp_id);
@@ -2572,14 +2563,14 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
lun = port->sep_lun;
cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
- "/%s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
- printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+ pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
@@ -2645,7 +2636,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
struct config_item *alua_tg_pt_gp_ci = NULL;
tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
- if (!(tg_pt_gp))
+ if (!tg_pt_gp)
return NULL;
alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
@@ -2654,7 +2645,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
config_group_init_type_name(alua_tg_pt_gp_cg, name,
&target_core_alua_tg_pt_gp_cit);
- printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
+ pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
" Group: alua/tg_pt_gps/%s\n",
config_item_name(alua_tg_pt_gp_ci));
@@ -2668,7 +2659,7 @@ static void target_core_alua_drop_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
struct t10_alua_tg_pt_gp, tg_pt_gp_group);
- printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
+ pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
" Group: alua/tg_pt_gps/%s, ID: %hu\n",
config_item_name(item), tg_pt_gp->tg_pt_gp_id);
/*
@@ -2759,21 +2750,21 @@ static struct config_group *target_core_make_subdev(
se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
if (!se_dev) {
- printk(KERN_ERR "Unable to allocate memory for"
+ pr_err("Unable to allocate memory for"
" struct se_subsystem_dev\n");
goto unlock;
}
- INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+ INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_reservation.registration_lock);
- spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+ INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
+ INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
+ spin_lock_init(&se_dev->t10_pr.registration_lock);
+ spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
se_dev->t10_wwn.t10_sub_dev = se_dev;
se_dev->t10_alua.t10_sub_dev = se_dev;
se_dev->se_dev_attrib.da_sub_dev = se_dev;
@@ -2783,7 +2774,7 @@ static struct config_group *target_core_make_subdev(
dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
GFP_KERNEL);
- if (!(dev_cg->default_groups))
+ if (!dev_cg->default_groups)
goto out;
/*
* Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
@@ -2794,14 +2785,14 @@ static struct config_group *target_core_make_subdev(
* configfs tree for device object's struct config_group.
*/
se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
- if (!(se_dev->se_dev_su_ptr)) {
- printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+ if (!se_dev->se_dev_su_ptr) {
+ pr_err("Unable to locate subsystem dependent pointer"
" from allocate_virtdevice()\n");
goto out;
}
- spin_lock(&se_global->g_device_lock);
- list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
- spin_unlock(&se_global->g_device_lock);
+ spin_lock(&se_device_lock);
+ list_add_tail(&se_dev->se_dev_node, &se_dev_list);
+ spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
@@ -2826,14 +2817,14 @@ static struct config_group *target_core_make_subdev(
* Add core/$HBA/$DEV/alua/default_tg_pt_gp
*/
tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
- if (!(tg_pt_gp))
+ if (!tg_pt_gp)
goto out;
- tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+ tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(tg_pt_gp_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
+ if (!tg_pt_gp_cg->default_groups) {
+ pr_err("Unable to allocate tg_pt_gp_cg->"
"default_groups\n");
goto out;
}
@@ -2842,28 +2833,28 @@ static struct config_group *target_core_make_subdev(
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
tg_pt_gp_cg->default_groups[1] = NULL;
- T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
+ se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
/*
* Add core/$HBA/$DEV/statistics/ default groups
*/
- dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
+ dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
GFP_KERNEL);
if (!dev_stat_grp->default_groups) {
- printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n");
+ pr_err("Unable to allocate dev_stat_grp->default_groups\n");
goto out;
}
target_stat_setup_dev_default_groups(se_dev);
- printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
+ pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
mutex_unlock(&hba->hba_access_mutex);
return &se_dev->se_dev_group;
out:
- if (T10_ALUA(se_dev)->default_tg_pt_gp) {
- core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
- T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+ if (se_dev->t10_alua.default_tg_pt_gp) {
+ core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
+ se_dev->t10_alua.default_tg_pt_gp = NULL;
}
if (dev_stat_grp)
kfree(dev_stat_grp->default_groups);
@@ -2896,11 +2887,11 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
- spin_lock(&se_global->g_device_lock);
- list_del(&se_dev->g_se_dev_list);
- spin_unlock(&se_global->g_device_lock);
+ spin_lock(&se_device_lock);
+ list_del(&se_dev->se_dev_node);
+ spin_unlock(&se_device_lock);
- dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
+ dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
dev_stat_grp->default_groups[i] = NULL;
@@ -2908,7 +2899,7 @@ static void target_core_drop_subdev(
}
kfree(dev_stat_grp->default_groups);
- tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+ tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
tg_pt_gp_cg->default_groups[i] = NULL;
@@ -2919,7 +2910,7 @@ static void target_core_drop_subdev(
* core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
* directly from target_core_alua_tg_pt_gp_release().
*/
- T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+ se_dev->t10_alua.default_tg_pt_gp = NULL;
dev_cg = &se_dev->se_dev_group;
for (i = 0; dev_cg->default_groups[i]; i++) {
@@ -2988,13 +2979,13 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
ret = strict_strtoul(page, 0, &mode_flag);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
+ pr_err("Unable to extract hba mode flag: %d\n", ret);
return -EINVAL;
}
spin_lock(&hba->device_lock);
- if (!(list_empty(&hba->hba_dev_list))) {
- printk(KERN_ERR "Unable to set hba_mode with active devices\n");
+ if (!list_empty(&hba->hba_dev_list)) {
+ pr_err("Unable to set hba_mode with active devices\n");
spin_unlock(&hba->device_lock);
return -EINVAL;
}
@@ -3052,8 +3043,8 @@ static struct config_group *target_core_call_addhbatotarget(
int ret;
memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
- if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
- printk(KERN_ERR "Passed *name strlen(): %d exceeds"
+ if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
+ pr_err("Passed *name strlen(): %d exceeds"
" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
TARGET_CORE_NAME_MAX_LEN);
return ERR_PTR(-ENAMETOOLONG);
@@ -3061,8 +3052,8 @@ static struct config_group *target_core_call_addhbatotarget(
snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
str = strstr(buf, "_");
- if (!(str)) {
- printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
+ if (!str) {
+ pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
return ERR_PTR(-EINVAL);
}
se_plugin_str = buf;
@@ -3071,7 +3062,7 @@ static struct config_group *target_core_call_addhbatotarget(
* Namely rd_direct and rd_mcp..
*/
str2 = strstr(str+1, "_");
- if ((str2)) {
+ if (str2) {
*str2 = '\0'; /* Terminate for *se_plugin_str */
str2++; /* Skip to start of plugin dependent ID */
str = str2;
@@ -3082,7 +3073,7 @@ static struct config_group *target_core_call_addhbatotarget(
ret = strict_strtoul(str, 0, &plugin_dep_id);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoul() returned %d for"
+ pr_err("strict_strtoul() returned %d for"
" plugin_dep_id\n", ret);
return ERR_PTR(-EINVAL);
}
@@ -3135,7 +3126,7 @@ static int __init target_core_init_configfs(void)
struct t10_alua_lu_gp *lu_gp;
int ret;
- printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
+ pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
" Engine: %s on %s/%s on "UTS_RELEASE"\n",
TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
@@ -3145,10 +3136,9 @@ static int __init target_core_init_configfs(void)
INIT_LIST_HEAD(&g_tf_list);
mutex_init(&g_tf_lock);
- init_scsi_index_table();
- ret = init_se_global();
+ ret = init_se_kmem_caches();
if (ret < 0)
- return -1;
+ return ret;
/*
* Create $CONFIGFS/target/core default group for HBA <-> Storage Object
* and ALUA Logical Unit Group and Target Port Group infrastructure.
@@ -3156,44 +3146,44 @@ static int __init target_core_init_configfs(void)
target_cg = &subsys->su_group;
target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(target_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
+ if (!target_cg->default_groups) {
+ pr_err("Unable to allocate target_cg->default_groups\n");
goto out_global;
}
- config_group_init_type_name(&se_global->target_core_hbagroup,
+ config_group_init_type_name(&target_core_hbagroup,
"core", &target_core_cit);
- target_cg->default_groups[0] = &se_global->target_core_hbagroup;
+ target_cg->default_groups[0] = &target_core_hbagroup;
target_cg->default_groups[1] = NULL;
/*
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
- hba_cg = &se_global->target_core_hbagroup;
+ hba_cg = &target_core_hbagroup;
hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(hba_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
+ if (!hba_cg->default_groups) {
+ pr_err("Unable to allocate hba_cg->default_groups\n");
goto out_global;
}
- config_group_init_type_name(&se_global->alua_group,
+ config_group_init_type_name(&alua_group,
"alua", &target_core_alua_cit);
- hba_cg->default_groups[0] = &se_global->alua_group;
+ hba_cg->default_groups[0] = &alua_group;
hba_cg->default_groups[1] = NULL;
/*
* Add ALUA Logical Unit Group and Target Port Group ConfigFS
* groups under /sys/kernel/config/target/core/alua/
*/
- alua_cg = &se_global->alua_group;
+ alua_cg = &alua_group;
alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(alua_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
+ if (!alua_cg->default_groups) {
+ pr_err("Unable to allocate alua_cg->default_groups\n");
goto out_global;
}
- config_group_init_type_name(&se_global->alua_lu_gps_group,
+ config_group_init_type_name(&alua_lu_gps_group,
"lu_gps", &target_core_alua_lu_gps_cit);
- alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
+ alua_cg->default_groups[0] = &alua_lu_gps_group;
alua_cg->default_groups[1] = NULL;
/*
* Add core/alua/lu_gps/default_lu_gp
@@ -3202,11 +3192,11 @@ static int __init target_core_init_configfs(void)
if (IS_ERR(lu_gp))
goto out_global;
- lu_gp_cg = &se_global->alua_lu_gps_group;
+ lu_gp_cg = &alua_lu_gps_group;
lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(lu_gp_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
+ if (!lu_gp_cg->default_groups) {
+ pr_err("Unable to allocate lu_gp_cg->default_groups\n");
goto out_global;
}
@@ -3214,17 +3204,17 @@ static int __init target_core_init_configfs(void)
&target_core_alua_lu_gp_cit);
lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
lu_gp_cg->default_groups[1] = NULL;
- se_global->default_lu_gp = lu_gp;
+ default_lu_gp = lu_gp;
/*
* Register the target_core_mod subsystem with configfs.
*/
ret = configfs_register_subsystem(subsys);
if (ret < 0) {
- printk(KERN_ERR "Error %d while registering subsystem %s\n",
+ pr_err("Error %d while registering subsystem %s\n",
ret, subsys->su_group.cg_item.ci_namebuf);
goto out_global;
}
- printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
+ pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
" Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
/*
@@ -3244,9 +3234,9 @@ out:
core_dev_release_virtual_lun0();
rd_module_exit();
out_global:
- if (se_global->default_lu_gp) {
- core_alua_free_lu_gp(se_global->default_lu_gp);
- se_global->default_lu_gp = NULL;
+ if (default_lu_gp) {
+ core_alua_free_lu_gp(default_lu_gp);
+ default_lu_gp = NULL;
}
if (lu_gp_cg)
kfree(lu_gp_cg->default_groups);
@@ -3255,8 +3245,8 @@ out_global:
if (hba_cg)
kfree(hba_cg->default_groups);
kfree(target_cg->default_groups);
- release_se_global();
- return -1;
+ release_se_kmem_caches();
+ return ret;
}
static void __exit target_core_exit_configfs(void)
@@ -3266,10 +3256,9 @@ static void __exit target_core_exit_configfs(void)
struct config_item *item;
int i;
- se_global->in_shutdown = 1;
subsys = target_core_subsystem[0];
- lu_gp_cg = &se_global->alua_lu_gps_group;
+ lu_gp_cg = &alua_lu_gps_group;
for (i = 0; lu_gp_cg->default_groups[i]; i++) {
item = &lu_gp_cg->default_groups[i]->cg_item;
lu_gp_cg->default_groups[i] = NULL;
@@ -3278,7 +3267,7 @@ static void __exit target_core_exit_configfs(void)
kfree(lu_gp_cg->default_groups);
lu_gp_cg->default_groups = NULL;
- alua_cg = &se_global->alua_group;
+ alua_cg = &alua_group;
for (i = 0; alua_cg->default_groups[i]; i++) {
item = &alua_cg->default_groups[i]->cg_item;
alua_cg->default_groups[i] = NULL;
@@ -3287,7 +3276,7 @@ static void __exit target_core_exit_configfs(void)
kfree(alua_cg->default_groups);
alua_cg->default_groups = NULL;
- hba_cg = &se_global->target_core_hbagroup;
+ hba_cg = &target_core_hbagroup;
for (i = 0; hba_cg->default_groups[i]; i++) {
item = &hba_cg->default_groups[i]->cg_item;
hba_cg->default_groups[i] = NULL;
@@ -3302,17 +3291,15 @@ static void __exit target_core_exit_configfs(void)
configfs_unregister_subsystem(subsys);
kfree(subsys->su_group.default_groups);
- core_alua_free_lu_gp(se_global->default_lu_gp);
- se_global->default_lu_gp = NULL;
+ core_alua_free_lu_gp(default_lu_gp);
+ default_lu_gp = NULL;
- printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
+ pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
" Infrastructure\n");
core_dev_release_virtual_lun0();
rd_module_exit();
- release_se_global();
-
- return;
+ release_se_kmem_caches();
}
MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8407f9ca2b3..b38b6c993e6 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1,7 +1,7 @@
/*******************************************************************************
* Filename: target_core_device.c (based on iscsi_target_device.c)
*
- * This file contains the iSCSI Virtual Device and Disk Transport
+ * This file contains the TCM Virtual Device and Disk Transport
* agnostic related functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
@@ -54,176 +54,183 @@
static void se_dev_start(struct se_device *dev);
static void se_dev_stop(struct se_device *dev);
-int transport_get_lun_for_cmd(
- struct se_cmd *se_cmd,
- unsigned char *cdb,
- u32 unpacked_lun)
+static struct se_hba *lun0_hba;
+static struct se_subsystem_dev *lun0_su_dev;
+/* not static, needed by tpg.c */
+struct se_device *g_lun0_dev;
+
+int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
- struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
- struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
+ struct se_device *dev;
unsigned long flags;
- int read_only = 0;
- spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
- deve = se_cmd->se_deve =
- &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- if (se_cmd) {
- deve->total_cmds++;
- deve->total_bytes += se_cmd->data_length;
-
- if (se_cmd->data_direction == DMA_TO_DEVICE) {
- if (deve->lun_flags &
- TRANSPORT_LUNFLAGS_READ_ONLY) {
- read_only = 1;
- goto out;
- }
- deve->write_bytes += se_cmd->data_length;
- } else if (se_cmd->data_direction ==
- DMA_FROM_DEVICE) {
- deve->read_bytes += se_cmd->data_length;
- }
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
+ se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+ if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ struct se_dev_entry *deve = se_cmd->se_deve;
+
+ deve->total_cmds++;
+ deve->total_bytes += se_cmd->data_length;
+
+ if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+ (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+ se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+ " Access for 0x%08x\n",
+ se_cmd->se_tfo->get_fabric_name(),
+ unpacked_lun);
+ spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+ return -EACCES;
}
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE)
+ deve->write_bytes += se_cmd->data_length;
+ else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+ deve->read_bytes += se_cmd->data_length;
+
deve->deve_cmds++;
- se_lun = se_cmd->se_lun = deve->se_lun;
+ se_lun = deve->se_lun;
+ se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+ se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
-out:
- spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
if (!se_lun) {
- if (read_only) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ /*
+ * Use the se_portal_group->tpg_virt_lun0 to allow for
+ * REPORT_LUNS, et al to be returned when no active
+ * MappedLUN=0 exists for this Initiator Port.
+ */
+ if (unpacked_lun != 0) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+ pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
- CMD_TFO(se_cmd)->get_fabric_name(),
+ se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
- return -1;
- } else {
- /*
- * Use the se_portal_group->tpg_virt_lun0 to allow for
- * REPORT_LUNS, et al to be returned when no active
- * MappedLUN=0 exists for this Initiator Port.
- */
- if (unpacked_lun != 0) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08x\n",
- CMD_TFO(se_cmd)->get_fabric_name(),
- unpacked_lun);
- return -1;
- }
- /*
- * Force WRITE PROTECT for virtual LUN 0
- */
- if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
- (se_cmd->data_direction != DMA_NONE)) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
- }
-#if 0
- printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
- CMD_TFO(se_cmd)->get_fabric_name());
-#endif
- se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
- se_cmd->orig_fe_lun = 0;
- se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
- se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ return -ENODEV;
}
+ /*
+ * Force WRITE PROTECT for virtual LUN 0
+ */
+ if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+ (se_cmd->data_direction != DMA_NONE)) {
+ se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -EACCES;
+ }
+
+ se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+ se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+ se_cmd->orig_fe_lun = 0;
+ se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
* Determine if the struct se_lun is online.
+ * FIXME: Check for LUN_RESET + UNIT Attention
*/
-/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
+ return -ENODEV;
}
- {
- struct se_device *dev = se_lun->lun_se_dev;
- spin_lock_irq(&dev->stats_lock);
+ /* Directly associate cmd with se_dev */
+ se_cmd->se_dev = se_lun->lun_se_dev;
+
+ /* TODO: get rid of this and use atomics for stats */
+ dev = se_lun->lun_se_dev;
+ spin_lock_irqsave(&dev->stats_lock, flags);
dev->num_cmds++;
if (se_cmd->data_direction == DMA_TO_DEVICE)
dev->write_bytes += se_cmd->data_length;
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
dev->read_bytes += se_cmd->data_length;
- spin_unlock_irq(&dev->stats_lock);
- }
+ spin_unlock_irqrestore(&dev->stats_lock, flags);
/*
* Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
* for tracking state of struct se_cmds during LUN shutdown events.
*/
spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
- list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
- atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
-#if 0
- printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
- CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
-#endif
+ list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
+ atomic_set(&se_cmd->transport_lun_active, 1);
spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
return 0;
}
-EXPORT_SYMBOL(transport_get_lun_for_cmd);
+EXPORT_SYMBOL(transport_lookup_cmd_lun);
-int transport_get_lun_for_tmr(
- struct se_cmd *se_cmd,
- u32 unpacked_lun)
+int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
- struct se_device *dev = NULL;
struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
- struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+ unsigned long flags;
+
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
+ se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+ deve = se_cmd->se_deve;
- spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
- deve = se_cmd->se_deve =
- &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
- dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
+ se_tmr->tmr_lun = deve->se_lun;
+ se_cmd->se_lun = deve->se_lun;
+ se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
-/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
+ se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
- spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
if (!se_lun) {
- printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+ pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
- CMD_TFO(se_cmd)->get_fabric_name(),
+ se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
+ return -ENODEV;
}
/*
* Determine if the struct se_lun is online.
+ * FIXME: Check for LUN_RESET + UNIT Attention
*/
-/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
+ return -ENODEV;
}
- spin_lock(&dev->se_tmr_lock);
- list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
- spin_unlock(&dev->se_tmr_lock);
+ /* Directly associate cmd with se_dev */
+ se_cmd->se_dev = se_lun->lun_se_dev;
+ se_tmr->tmr_dev = se_lun->lun_se_dev;
+
+ spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
+ list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
+ spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
return 0;
}
-EXPORT_SYMBOL(transport_get_lun_for_tmr);
+EXPORT_SYMBOL(transport_lookup_tmr_lun);
/*
* This function is called from core_scsi3_emulate_pro_register_and_move()
@@ -248,17 +255,17 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
continue;
lun = deve->se_lun;
- if (!(lun)) {
- printk(KERN_ERR "%s device entries device pointer is"
+ if (!lun) {
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
port = lun->lun_sep;
- if (!(port)) {
- printk(KERN_ERR "%s device entries device pointer is"
+ if (!port) {
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
if (port->sep_rtpi != rtpi)
@@ -294,9 +301,9 @@ int core_free_device_list_for_node(
continue;
if (!deve->se_lun) {
- printk(KERN_ERR "%s device entries device pointer is"
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
lun = deve->se_lun;
@@ -322,8 +329,6 @@ void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
deve->deve_cmds--;
spin_unlock_irq(&se_nacl->device_list_lock);
-
- return;
}
void core_update_device_list_access(
@@ -343,8 +348,6 @@ void core_update_device_list_access(
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
spin_unlock_irq(&nacl->device_list_lock);
-
- return;
}
/* core_update_device_list_for_node():
@@ -369,7 +372,7 @@ int core_update_device_list_for_node(
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*/
- if (!(enable)) {
+ if (!enable) {
/*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly concerted to MappedLUNs ->
@@ -392,18 +395,18 @@ int core_update_device_list_for_node(
*/
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
if (deve->se_lun_acl != NULL) {
- printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
+ pr_err("struct se_dev_entry->se_lun_acl"
" already set for demo mode -> explict"
" LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
- return -1;
+ return -EINVAL;
}
if (deve->se_lun != lun) {
- printk(KERN_ERR "struct se_dev_entry->se_lun does"
+ pr_err("struct se_dev_entry->se_lun does"
" match passed struct se_lun for demo mode"
" -> explict LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
- return -1;
+ return -EINVAL;
}
deve->se_lun_acl = lun_acl;
trans = 1;
@@ -491,8 +494,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
spin_lock_bh(&tpg->acl_node_lock);
}
spin_unlock_bh(&tpg->acl_node_lock);
-
- return;
}
static struct se_port *core_alloc_port(struct se_device *dev)
@@ -500,9 +501,9 @@ static struct se_port *core_alloc_port(struct se_device *dev)
struct se_port *port, *port_tmp;
port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
- if (!(port)) {
- printk(KERN_ERR "Unable to allocate struct se_port\n");
- return NULL;
+ if (!port) {
+ pr_err("Unable to allocate struct se_port\n");
+ return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&port->sep_alua_list);
INIT_LIST_HEAD(&port->sep_list);
@@ -512,10 +513,10 @@ static struct se_port *core_alloc_port(struct se_device *dev)
spin_lock(&dev->se_port_lock);
if (dev->dev_port_count == 0x0000ffff) {
- printk(KERN_WARNING "Reached dev->dev_port_count =="
+ pr_warn("Reached dev->dev_port_count =="
" 0x0000ffff\n");
spin_unlock(&dev->se_port_lock);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
again:
/*
@@ -531,7 +532,7 @@ again:
* 3h to FFFFh Relative port 3 through 65 535
*/
port->sep_rtpi = dev->dev_rpti_counter++;
- if (!(port->sep_rtpi))
+ if (!port->sep_rtpi)
goto again;
list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
@@ -553,7 +554,7 @@ static void core_export_port(
struct se_port *port,
struct se_lun *lun)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
spin_lock(&dev->se_port_lock);
@@ -566,20 +567,20 @@ static void core_export_port(
list_add_tail(&port->sep_list, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
- if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
+ if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
- printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
+ pr_err("Unable to allocate t10_alua_tg_pt"
"_gp_member_t\n");
return;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- T10_ALUA(su_dev)->default_tg_pt_gp);
+ su_dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
+ pr_debug("%s/%s: Adding to default ALUA Target Port"
" Group: alua/default_tg_pt_gp\n",
- TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
+ dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
}
dev->dev_port_count++;
@@ -606,8 +607,6 @@ static void core_release_port(struct se_device *dev, struct se_port *port)
list_del(&port->sep_list);
dev->dev_port_count--;
kfree(port);
-
- return;
}
int core_dev_export(
@@ -618,8 +617,8 @@ int core_dev_export(
struct se_port *port;
port = core_alloc_port(dev);
- if (!(port))
- return -1;
+ if (IS_ERR(port))
+ return PTR_ERR(port);
lun->lun_se_dev = dev;
se_dev_start(dev);
@@ -656,33 +655,35 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
{
struct se_dev_entry *deve;
struct se_lun *se_lun;
- struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
struct se_task *se_task;
- unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
+ unsigned char *buf;
u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
- list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
+ list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
break;
- if (!(se_task)) {
- printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
+ if (!se_task) {
+ pr_err("Unable to locate struct se_task for struct se_cmd\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
+ buf = transport_kmap_first_data_page(se_cmd);
+
/*
* If no struct se_session pointer is present, this struct se_cmd is
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
- if (!(se_sess)) {
+ if (!se_sess) {
int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
lun_count = 1;
goto done;
}
- spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = &SE_NODE_ACL(se_sess)->device_list[i];
+ deve = &se_sess->se_node_acl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
se_lun = deve->se_lun;
@@ -699,12 +700,13 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
offset += 8;
cdb_offset += 8;
}
- spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
/*
* See SPC3 r07, page 159.
*/
done:
+ transport_kunmap_first_data_page(se_cmd);
lun_count *= 8;
buf[0] = ((lun_count >> 24) & 0xff);
buf[1] = ((lun_count >> 16) & 0xff);
@@ -743,26 +745,20 @@ void se_release_device_for_hba(struct se_device *dev)
core_scsi3_free_all_registrations(dev);
se_release_vpd_for_dev(dev);
- kfree(dev->dev_status_queue_obj);
- kfree(dev->dev_queue_obj);
kfree(dev);
-
- return;
}
void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
- spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+ spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
list_for_each_entry_safe(vpd, vpd_tmp,
- &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
+ &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
list_del(&vpd->vpd_list);
kfree(vpd);
}
- spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
-
- return;
+ spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
}
/* se_free_virtual_device():
@@ -821,12 +817,13 @@ static void se_dev_stop(struct se_device *dev)
int se_dev_check_online(struct se_device *dev)
{
+ unsigned long flags;
int ret;
- spin_lock_irq(&dev->dev_status_lock);
+ spin_lock_irqsave(&dev->dev_status_lock, flags);
ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
(dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
- spin_unlock_irq(&dev->dev_status_lock);
+ spin_unlock_irqrestore(&dev->dev_status_lock, flags);
return ret;
}
@@ -848,59 +845,61 @@ void se_dev_set_default_attribs(
{
struct queue_limits *limits = &dev_limits->limits;
- DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
- DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
- DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
- DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
- DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
- DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
- DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
- DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
- DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
- DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
- DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
+ dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+ dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+ dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
+ dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
+ dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
+ dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
+ dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
+ dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
+ dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
/*
* The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
* iblock_create_virtdevice() from struct queue_limits values
* if blk_queue_discard()==1
*/
- DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
- DEV_ATTRIB(dev)->max_unmap_block_desc_count =
- DA_MAX_UNMAP_BLOCK_DESC_COUNT;
- DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
- DEV_ATTRIB(dev)->unmap_granularity_alignment =
+ dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+ dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+ DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
/*
* block_size is based on subsystem plugin dependent requirements.
*/
- DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
- DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
+ dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
+ dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
/*
* max_sectors is based on subsystem plugin dependent requirements.
*/
- DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
- DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
+ dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
/*
* Set optimal_sectors from max_sectors, which can be lowered via
* configfs.
*/
- DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
+ dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
/*
* queue_depth is based on subsystem plugin dependent requirements.
*/
- DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
- DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
+ dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
+ dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
}
int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
{
if (task_timeout > DA_TASK_TIMEOUT_MAX) {
- printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
+ pr_err("dev[%p]: Passed task_timeout: %u larger then"
" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
- return -1;
+ return -EINVAL;
} else {
- DEV_ATTRIB(dev)->task_timeout = task_timeout;
- printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
+ dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
+ pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
dev, task_timeout);
}
@@ -911,9 +910,9 @@ int se_dev_set_max_unmap_lba_count(
struct se_device *dev,
u32 max_unmap_lba_count)
{
- DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
- printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
- dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
+ dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
+ pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
return 0;
}
@@ -921,9 +920,10 @@ int se_dev_set_max_unmap_block_desc_count(
struct se_device *dev,
u32 max_unmap_block_desc_count)
{
- DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
- printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
- dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
+ dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+ max_unmap_block_desc_count;
+ pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
return 0;
}
@@ -931,9 +931,9 @@ int se_dev_set_unmap_granularity(
struct se_device *dev,
u32 unmap_granularity)
{
- DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
- printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
- dev, DEV_ATTRIB(dev)->unmap_granularity);
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
+ pr_debug("dev[%p]: Set unmap_granularity: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
return 0;
}
@@ -941,109 +941,109 @@ int se_dev_set_unmap_granularity_alignment(
struct se_device *dev,
u32 unmap_granularity_alignment)
{
- DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
- printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
- dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
+ pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
return 0;
}
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->dpo_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
- return -1;
+ if (dev->transport->dpo_emulated == NULL) {
+ pr_err("dev->transport->dpo_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
- return -1;
+ if (dev->transport->dpo_emulated(dev) == 0) {
+ pr_err("dev->transport->dpo_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_dpo = flag;
- printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
- " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
+ dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
+ pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
+ " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_write_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
- return -1;
+ if (dev->transport->fua_write_emulated == NULL) {
+ pr_err("dev->transport->fua_write_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
- return -1;
+ if (dev->transport->fua_write_emulated(dev) == 0) {
+ pr_err("dev->transport->fua_write_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_fua_write = flag;
- printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_fua_write);
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
+ pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
return 0;
}
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_read_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
- return -1;
+ if (dev->transport->fua_read_emulated == NULL) {
+ pr_err("dev->transport->fua_read_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
- return -1;
+ if (dev->transport->fua_read_emulated(dev) == 0) {
+ pr_err("dev->transport->fua_read_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_fua_read = flag;
- printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_fua_read);
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
+ pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->write_cache_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
- return -1;
+ if (dev->transport->write_cache_emulated == NULL) {
+ pr_err("dev->transport->write_cache_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
- return -1;
+ if (dev->transport->write_cache_emulated(dev) == 0) {
+ pr_err("dev->transport->write_cache_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_write_cache = flag;
- printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_write_cache);
+ dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
+ pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
return 0;
}
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1) && (flag != 2)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ pr_err("dev[%p]: Unable to change SE Device"
" UA_INTRLCK_CTRL while dev_export_obj: %d count"
" exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
- printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
+ dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
+ pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
return 0;
}
@@ -1051,19 +1051,19 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
int se_dev_set_emulate_tas(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
+ pr_err("dev[%p]: Unable to change SE Device TAS while"
" dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_tas = flag;
- printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
- dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
+ dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
+ pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+ dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
return 0;
}
@@ -1071,20 +1071,20 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
- printk(KERN_ERR "Generic Block Discard not supported\n");
+ if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- DEV_ATTRIB(dev)->emulate_tpu = flag;
- printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+ dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
+ pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
dev, flag);
return 0;
}
@@ -1092,20 +1092,20 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
- printk(KERN_ERR "Generic Block Discard not supported\n");
+ if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- DEV_ATTRIB(dev)->emulate_tpws = flag;
- printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+ dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
+ pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
dev, flag);
return 0;
}
@@ -1113,12 +1113,36 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+ dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
+ pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
+ (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
+ return 0;
+}
+
+int se_dev_set_is_nonrot(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ return -EINVAL;
+ }
+ dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
+ pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
+ dev, flag);
+ return 0;
+}
+
+int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
+{
+ if (flag != 0) {
+ printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
+ " reordering not implemented\n", dev);
+ return -ENOSYS;
}
- DEV_ATTRIB(dev)->enforce_pr_isids = flag;
- printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
- (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
+ dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
+ pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
return 0;
}
@@ -1130,44 +1154,44 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
u32 orig_queue_depth = dev->queue_depth;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
+ pr_err("dev[%p]: Unable to change SE Device TCQ while"
" dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- if (!(queue_depth)) {
- printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
+ if (!queue_depth) {
+ pr_err("dev[%p]: Illegal ZERO value for queue"
"_depth\n", dev);
- return -1;
+ return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
- printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ pr_err("dev[%p]: Passed queue_depth: %u"
" exceeds TCM/SE_Device TCQ: %u\n",
dev, queue_depth,
- DEV_ATTRIB(dev)->hw_queue_depth);
- return -1;
+ dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ return -EINVAL;
}
} else {
- if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
- if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
- printk(KERN_ERR "dev[%p]: Passed queue_depth:"
+ if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
+ if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ pr_err("dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, queue_depth,
- DEV_ATTRIB(dev)->hw_queue_depth);
- return -1;
+ dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ return -EINVAL;
}
}
}
- DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
+ dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
if (queue_depth > orig_queue_depth)
atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
else if (queue_depth < orig_queue_depth)
atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
- printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
+ pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
}
@@ -1177,50 +1201,50 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
int force = 0; /* Force setting for VDEVS */
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ pr_err("dev[%p]: Unable to change SE Device"
" max_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- if (!(max_sectors)) {
- printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
+ if (!max_sectors) {
+ pr_err("dev[%p]: Illegal ZERO value for"
" max_sectors\n", dev);
- return -1;
+ return -EINVAL;
}
if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
+ pr_err("dev[%p]: Passed max_sectors: %u less than"
" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MIN);
- return -1;
+ return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
+ pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors:"
" %u\n", dev, max_sectors,
- DEV_ATTRIB(dev)->hw_max_sectors);
- return -1;
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ return -EINVAL;
}
} else {
- if (!(force) && (max_sectors >
- DEV_ATTRIB(dev)->hw_max_sectors)) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ if (!force && (max_sectors >
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
+ pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors"
": %u, use force=1 to override.\n", dev,
- max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
- return -1;
+ max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ return -EINVAL;
}
if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ pr_err("dev[%p]: Passed max_sectors: %u"
" greater than DA_STATUS_MAX_SECTORS_MAX:"
" %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MAX);
- return -1;
+ return -EINVAL;
}
}
- DEV_ATTRIB(dev)->max_sectors = max_sectors;
- printk("dev[%p]: SE Device max_sectors changed to %u\n",
+ dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
+ pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
dev, max_sectors);
return 0;
}
@@ -1228,25 +1252,25 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ pr_err("dev[%p]: Unable to change SE Device"
" optimal_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ pr_err("dev[%p]: Passed optimal_sectors cannot be"
" changed for TCM/pSCSI\n", dev);
return -EINVAL;
}
- if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
- printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
+ if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
+ pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
" greater than max_sectors: %u\n", dev,
- optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
+ optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
return -EINVAL;
}
- DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
- printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
+ dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
+ pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
dev, optimal_sectors);
return 0;
}
@@ -1254,31 +1278,31 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
+ pr_err("dev[%p]: Unable to change SE Device block_size"
" while dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
if ((block_size != 512) &&
(block_size != 1024) &&
(block_size != 2048) &&
(block_size != 4096)) {
- printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
+ pr_err("dev[%p]: Illegal value for block_device: %u"
" for SE device, must be 512, 1024, 2048 or 4096\n",
dev, block_size);
- return -1;
+ return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ pr_err("dev[%p]: Not allowed to change block_size for"
" Physical Device, use for Linux/SCSI to change"
" block_size for underlying hardware\n", dev);
- return -1;
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->block_size = block_size;
- printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
+ dev->se_sub_dev->se_dev_attrib.block_size = block_size;
+ pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
return 0;
}
@@ -1293,13 +1317,13 @@ struct se_lun *core_dev_add_lun(
u32 lun_access = 0;
if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
- printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
+ pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
atomic_read(&dev->dev_access_obj.obj_access_count));
return NULL;
}
lun_p = core_tpg_pre_addlun(tpg, lun);
- if ((IS_ERR(lun_p)) || !(lun_p))
+ if ((IS_ERR(lun_p)) || !lun_p)
return NULL;
if (dev->dev_flags & DF_READ_ONLY)
@@ -1310,15 +1334,15 @@ struct se_lun *core_dev_add_lun(
if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
return NULL;
- printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
- " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
- TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
+ pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+ " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
*/
- if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
+ if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl;
spin_lock_bh(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
@@ -1346,15 +1370,15 @@ int core_dev_del_lun(
int ret = 0;
lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
- if (!(lun))
+ if (!lun)
return ret;
core_tpg_post_dellun(tpg, lun);
- printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
- " device object\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
- TPG_TFO(tpg)->get_fabric_name());
+ pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
+ " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name());
return 0;
}
@@ -1365,21 +1389,21 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
"_PER_TPG-1: %u for Target Portal Group: %hu\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
+ pr_err("%s Logical Unit Number: %u is not free on"
" Target Portal Group: %hu, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
@@ -1398,21 +1422,21 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
"_TPG-1: %u for Target Portal Group: %hu\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
@@ -1430,20 +1454,20 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_lun_acl *lacl;
struct se_node_acl *nacl;
- if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
- printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
+ pr_err("%s InitiatorName exceeds maximum size.\n",
+ tpg->se_tpg_tfo->get_fabric_name());
*ret = -EOVERFLOW;
return NULL;
}
nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if (!(nacl)) {
+ if (!nacl) {
*ret = -EINVAL;
return NULL;
}
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
- if (!(lacl)) {
- printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
+ if (!lacl) {
+ pr_err("Unable to allocate memory for struct se_lun_acl.\n");
*ret = -ENOMEM;
return NULL;
}
@@ -1466,16 +1490,16 @@ int core_dev_add_initiator_node_lun_acl(
struct se_node_acl *nacl;
lun = core_dev_get_lun(tpg, unpacked_lun);
- if (!(lun)) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ if (!lun) {
+ pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
return -EINVAL;
}
nacl = lacl->se_lun_nacl;
- if (!(nacl))
+ if (!nacl)
return -EINVAL;
if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
@@ -1494,9 +1518,9 @@ int core_dev_add_initiator_node_lun_acl(
smp_mb__after_atomic_inc();
spin_unlock(&lun->lun_acl_lock);
- printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
- " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
+ pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+ " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
lacl->initiatorname);
/*
@@ -1519,7 +1543,7 @@ int core_dev_del_initiator_node_lun_acl(
struct se_node_acl *nacl;
nacl = lacl->se_lun_nacl;
- if (!(nacl))
+ if (!nacl)
return -EINVAL;
spin_lock(&lun->lun_acl_lock);
@@ -1533,10 +1557,10 @@ int core_dev_del_initiator_node_lun_acl(
lacl->se_lun = NULL;
- printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
+ pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
" InitiatorNode: %s Mapped LUN: %u\n",
- TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
lacl->initiatorname, lacl->mapped_lun);
return 0;
@@ -1546,10 +1570,10 @@ void core_dev_free_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl)
{
- printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
- " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
- TPG_TFO(tpg)->get_fabric_name(),
+ pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
+ " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
+ tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun);
kfree(lacl);
@@ -1564,64 +1588,64 @@ int core_dev_setup_virtual_lun0(void)
char buf[16];
int ret;
- hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
+ hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
if (IS_ERR(hba))
return PTR_ERR(hba);
- se_global->g_lun0_hba = hba;
+ lun0_hba = hba;
t = hba->transport;
se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
- if (!(se_dev)) {
- printk(KERN_ERR "Unable to allocate memory for"
+ if (!se_dev) {
+ pr_err("Unable to allocate memory for"
" struct se_subsystem_dev\n");
ret = -ENOMEM;
goto out;
}
- INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+ INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_reservation.registration_lock);
- spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+ INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
+ INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
+ spin_lock_init(&se_dev->t10_pr.registration_lock);
+ spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
se_dev->t10_wwn.t10_sub_dev = se_dev;
se_dev->t10_alua.t10_sub_dev = se_dev;
se_dev->se_dev_attrib.da_sub_dev = se_dev;
se_dev->se_dev_hba = hba;
se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
- if (!(se_dev->se_dev_su_ptr)) {
- printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+ if (!se_dev->se_dev_su_ptr) {
+ pr_err("Unable to locate subsystem dependent pointer"
" from allocate_virtdevice()\n");
ret = -ENOMEM;
goto out;
}
- se_global->g_lun0_su_dev = se_dev;
+ lun0_su_dev = se_dev;
memset(buf, 0, 16);
sprintf(buf, "rd_pages=8");
t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
- if (!(dev) || IS_ERR(dev)) {
- ret = -ENOMEM;
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
goto out;
}
se_dev->se_dev_ptr = dev;
- se_global->g_lun0_dev = dev;
+ g_lun0_dev = dev;
return 0;
out:
- se_global->g_lun0_su_dev = NULL;
+ lun0_su_dev = NULL;
kfree(se_dev);
- if (se_global->g_lun0_hba) {
- core_delete_hba(se_global->g_lun0_hba);
- se_global->g_lun0_hba = NULL;
+ if (lun0_hba) {
+ core_delete_hba(lun0_hba);
+ lun0_hba = NULL;
}
return ret;
}
@@ -1629,14 +1653,14 @@ out:
void core_dev_release_virtual_lun0(void)
{
- struct se_hba *hba = se_global->g_lun0_hba;
- struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
+ struct se_hba *hba = lun0_hba;
+ struct se_subsystem_dev *su_dev = lun0_su_dev;
- if (!(hba))
+ if (!hba)
return;
- if (se_global->g_lun0_dev)
- se_free_virtual_device(se_global->g_lun0_dev, hba);
+ if (g_lun0_dev)
+ se_free_virtual_device(g_lun0_dev, hba);
kfree(su_dev);
core_delete_hba(hba);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 07ab5a3bb8e..f1654694f4e 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -60,7 +60,7 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf)
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
cit->ct_owner = tf->tf_module; \
- printk("Setup generic %s\n", __stringify(_name)); \
+ pr_debug("Setup generic %s\n", __stringify(_name)); \
}
/* Start of tfc_tpg_mappedlun_cit */
@@ -80,8 +80,8 @@ static int target_fabric_mappedlun_link(
/*
* Ensure that the source port exists
*/
- if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
- printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
+ if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
+ pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
"_tpg does not exist\n");
return -EINVAL;
}
@@ -96,12 +96,12 @@ static int target_fabric_mappedlun_link(
* Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
*/
if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
- printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
+ pr_err("Illegal Initiator ACL SymLink outside of %s\n",
config_item_name(wwn_ci));
return -EINVAL;
}
if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
- printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
+ pr_err("Illegal Initiator ACL Symlink outside of %s"
" TPGT: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci));
return -EINVAL;
@@ -118,7 +118,7 @@ static int target_fabric_mappedlun_link(
lun_access = deve->lun_flags;
else
lun_access =
- (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
+ (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
TRANSPORT_LUNFLAGS_READ_WRITE;
spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
@@ -147,7 +147,7 @@ static int target_fabric_mappedlun_unlink(
/*
* Determine if the underlying MappedLUN has already been released..
*/
- if (!(deve->se_lun))
+ if (!deve->se_lun)
return 0;
lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
@@ -202,9 +202,9 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
TRANSPORT_LUNFLAGS_READ_WRITE,
lacl->se_lun_nacl);
- printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
+ pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %u Write Protect bit to %s\n",
- TPG_TFO(se_tpg)->get_fabric_name(),
+ se_tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
return count;
@@ -327,14 +327,14 @@ static struct config_group *target_fabric_make_mappedlun(
int ret = 0;
acl_ci = &group->cg_item;
- if (!(acl_ci)) {
- printk(KERN_ERR "Unable to locatel acl_ci\n");
+ if (!acl_ci) {
+ pr_err("Unable to locatel acl_ci\n");
return NULL;
}
buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
- if (!(buf)) {
- printk(KERN_ERR "Unable to allocate memory for name buf\n");
+ if (!buf) {
+ pr_err("Unable to allocate memory for name buf\n");
return ERR_PTR(-ENOMEM);
}
snprintf(buf, strlen(name) + 1, "%s", name);
@@ -342,7 +342,7 @@ static struct config_group *target_fabric_make_mappedlun(
* Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
*/
if (strstr(buf, "lun_") != buf) {
- printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
+ pr_err("Unable to locate \"lun_\" from buf: %s"
" name: %s\n", buf, name);
ret = -EINVAL;
goto out;
@@ -358,7 +358,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
config_item_name(acl_ci), &ret);
- if (!(lacl)) {
+ if (!lacl) {
ret = -EINVAL;
goto out;
}
@@ -367,7 +367,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!lacl_cg->default_groups) {
- printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n");
+ pr_err("Unable to allocate lacl_cg->default_groups\n");
ret = -ENOMEM;
goto out;
}
@@ -379,11 +379,11 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
lacl_cg->default_groups[1] = NULL;
- ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+ ml_stat_grp = &lacl->ml_stat_grps.stat_group;
ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL);
if (!ml_stat_grp->default_groups) {
- printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n");
+ pr_err("Unable to allocate ml_stat_grp->default_groups\n");
ret = -ENOMEM;
goto out;
}
@@ -408,7 +408,7 @@ static void target_fabric_drop_mappedlun(
struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
int i;
- ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+ ml_stat_grp = &lacl->ml_stat_grps.stat_group;
for (i = 0; ml_stat_grp->default_groups[i]; i++) {
df_item = &ml_stat_grp->default_groups[i]->cg_item;
ml_stat_grp->default_groups[i] = NULL;
@@ -474,8 +474,8 @@ static struct config_group *target_fabric_make_nodeacl(
struct se_node_acl *se_nacl;
struct config_group *nacl_cg;
- if (!(tf->tf_ops.fabric_make_nodeacl)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
+ if (!tf->tf_ops.fabric_make_nodeacl) {
+ pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
return ERR_PTR(-ENOSYS);
}
@@ -572,13 +572,13 @@ static struct config_group *target_fabric_make_np(
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_tpg_np *se_tpg_np;
- if (!(tf->tf_ops.fabric_make_np)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
+ if (!tf->tf_ops.fabric_make_np) {
+ pr_err("tf->tf_ops.fabric_make_np is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
- if (!(se_tpg_np) || IS_ERR(se_tpg_np))
+ if (!se_tpg_np || IS_ERR(se_tpg_np))
return ERR_PTR(-EINVAL);
se_tpg_np->tpg_np_parent = se_tpg;
@@ -627,10 +627,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
@@ -641,10 +638,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
@@ -659,10 +653,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_offline_bit(lun, page);
@@ -673,10 +664,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_offline_bit(lun, page, count);
@@ -691,10 +679,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_secondary_status(lun, page);
@@ -705,10 +690,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_secondary_status(lun, page, count);
@@ -723,10 +705,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_secondary_write_metadata(lun, page);
@@ -737,10 +716,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_secondary_write_metadata(lun, page, count);
@@ -781,13 +757,13 @@ static int target_fabric_port_link(
tf = se_tpg->se_tpg_wwn->wwn_tf;
if (lun->lun_se_dev != NULL) {
- printk(KERN_ERR "Port Symlink already exists\n");
+ pr_err("Port Symlink already exists\n");
return -EEXIST;
}
dev = se_dev->se_dev_ptr;
- if (!(dev)) {
- printk(KERN_ERR "Unable to locate struct se_device pointer from"
+ if (!dev) {
+ pr_err("Unable to locate struct se_device pointer from"
" %s\n", config_item_name(se_dev_ci));
ret = -ENODEV;
goto out;
@@ -795,8 +771,8 @@ static int target_fabric_port_link(
lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
lun->unpacked_lun);
- if ((IS_ERR(lun_p)) || !(lun_p)) {
- printk(KERN_ERR "core_dev_add_lun() failed\n");
+ if (IS_ERR(lun_p) || !lun_p) {
+ pr_err("core_dev_add_lun() failed\n");
ret = -EINVAL;
goto out;
}
@@ -888,7 +864,7 @@ static struct config_group *target_fabric_make_lun(
int errno;
if (strstr(name, "lun_") != name) {
- printk(KERN_ERR "Unable to locate \'_\" in"
+ pr_err("Unable to locate \'_\" in"
" \"lun_$LUN_NUMBER\"\n");
return ERR_PTR(-EINVAL);
}
@@ -896,14 +872,14 @@ static struct config_group *target_fabric_make_lun(
return ERR_PTR(-EINVAL);
lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
- if (!(lun))
+ if (!lun)
return ERR_PTR(-EINVAL);
lun_cg = &lun->lun_group;
lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!lun_cg->default_groups) {
- printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n");
+ pr_err("Unable to allocate lun_cg->default_groups\n");
return ERR_PTR(-ENOMEM);
}
@@ -914,11 +890,11 @@ static struct config_group *target_fabric_make_lun(
lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
lun_cg->default_groups[1] = NULL;
- port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+ port_stat_grp = &lun->port_stat_grps.stat_group;
port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL);
if (!port_stat_grp->default_groups) {
- printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n");
+ pr_err("Unable to allocate port_stat_grp->default_groups\n");
errno = -ENOMEM;
goto out;
}
@@ -941,7 +917,7 @@ static void target_fabric_drop_lun(
struct config_group *lun_cg, *port_stat_grp;
int i;
- port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+ port_stat_grp = &lun->port_stat_grps.stat_group;
for (i = 0; port_stat_grp->default_groups[i]; i++) {
df_item = &port_stat_grp->default_groups[i]->cg_item;
port_stat_grp->default_groups[i] = NULL;
@@ -1031,13 +1007,13 @@ static struct config_group *target_fabric_make_tpg(
struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg;
- if (!(tf->tf_ops.fabric_make_tpg)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
+ if (!tf->tf_ops.fabric_make_tpg) {
+ pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
- if (!(se_tpg) || IS_ERR(se_tpg))
+ if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
/*
* Setup default groups from pre-allocated se_tpg->tpg_default_groups
@@ -1130,13 +1106,13 @@ static struct config_group *target_fabric_make_wwn(
struct target_fabric_configfs, tf_group);
struct se_wwn *wwn;
- if (!(tf->tf_ops.fabric_make_wwn)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
+ if (!tf->tf_ops.fabric_make_wwn) {
+ pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
return ERR_PTR(-ENOSYS);
}
wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
- if (!(wwn) || IS_ERR(wwn))
+ if (!wwn || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
wwn->wwn_tf = tf;
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 1e193f32489..c4ea3a9a555 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -25,6 +25,7 @@
*
******************************************************************************/
+#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
@@ -61,9 +62,8 @@ u32 sas_get_pr_transport_id(
int *format_code,
unsigned char *buf)
{
- unsigned char binary, *ptr;
- int i;
- u32 off = 4;
+ unsigned char *ptr;
+
/*
* Set PROTOCOL IDENTIFIER to 6h for SAS
*/
@@ -74,10 +74,8 @@ u32 sas_get_pr_transport_id(
*/
ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
- for (i = 0; i < 16; i += 2) {
- binary = transport_asciihex_to_binaryhex(&ptr[i]);
- buf[off++] = binary;
- }
+ hex2bin(&buf[4], ptr, 8);
+
/*
* The SAS Transport ID is a hardcoded 24-byte length
*/
@@ -157,7 +155,7 @@ u32 fc_get_pr_transport_id(
int *format_code,
unsigned char *buf)
{
- unsigned char binary, *ptr;
+ unsigned char *ptr;
int i;
u32 off = 8;
/*
@@ -172,12 +170,11 @@ u32 fc_get_pr_transport_id(
ptr = &se_nacl->initiatorname[0];
for (i = 0; i < 24; ) {
- if (!(strncmp(&ptr[i], ":", 1))) {
+ if (!strncmp(&ptr[i], ":", 1)) {
i++;
continue;
}
- binary = transport_asciihex_to_binaryhex(&ptr[i]);
- buf[off++] = binary;
+ hex2bin(&buf[off++], &ptr[i], 1);
i += 2;
}
/*
@@ -386,7 +383,7 @@ char *iscsi_parse_pr_out_transport_id(
* Reserved
*/
if ((format_code != 0x00) && (format_code != 0x40)) {
- printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
+ pr_err("Illegal format code: 0x%02x for iSCSI"
" Initiator Transport ID\n", format_code);
return NULL;
}
@@ -406,7 +403,7 @@ char *iscsi_parse_pr_out_transport_id(
tid_len += padding;
if ((add_len + 4) != tid_len) {
- printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
+ pr_debug("LIO-Target Extracted add_len: %hu "
"does not match calculated tid_len: %u,"
" using tid_len instead\n", add_len+4, tid_len);
*out_tid_len = tid_len;
@@ -420,8 +417,8 @@ char *iscsi_parse_pr_out_transport_id(
*/
if (format_code == 0x40) {
p = strstr((char *)&buf[4], ",i,0x");
- if (!(p)) {
- printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
+ if (!p) {
+ pr_err("Unable to locate \",i,0x\" seperator"
" for Initiator port identifier: %s\n",
(char *)&buf[4]);
return NULL;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 150c4305f38..bc1b33639b8 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -42,18 +42,6 @@
#include "target_core_file.h"
-#if 1
-#define DEBUG_FD_CACHE(x...) printk(x)
-#else
-#define DEBUG_FD_CACHE(x...)
-#endif
-
-#if 1
-#define DEBUG_FD_FUA(x...) printk(x)
-#else
-#define DEBUG_FD_FUA(x...)
-#endif
-
static struct se_subsystem_api fileio_template;
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
@@ -65,24 +53,21 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
struct fd_host *fd_host;
fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
- if (!(fd_host)) {
- printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
- return -1;
+ if (!fd_host) {
+ pr_err("Unable to allocate memory for struct fd_host\n");
+ return -ENOMEM;
}
fd_host->fd_host_id = host_id;
- atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
- atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
- hba->hba_ptr = (void *) fd_host;
+ hba->hba_ptr = fd_host;
- printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+ pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
- " Target Core with TCQ Depth: %d MaxSectors: %u\n",
- hba->hba_id, fd_host->fd_host_id,
- atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
+ pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
+ " MaxSectors: %u\n",
+ hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
return 0;
}
@@ -91,7 +76,7 @@ static void fd_detach_hba(struct se_hba *hba)
{
struct fd_host *fd_host = hba->hba_ptr;
- printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
+ pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
" Target Core\n", hba->hba_id, fd_host->fd_host_id);
kfree(fd_host);
@@ -104,14 +89,14 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
- if (!(fd_dev)) {
- printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
+ if (!fd_dev) {
+ pr_err("Unable to allocate memory for struct fd_dev\n");
return NULL;
}
fd_dev->fd_host = fd_host;
- printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
+ pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
return fd_dev;
}
@@ -144,7 +129,7 @@ static struct se_device *fd_create_virtdevice(
set_fs(old_fs);
if (IS_ERR(dev_p)) {
- printk(KERN_ERR "getname(%s) failed: %lu\n",
+ pr_err("getname(%s) failed: %lu\n",
fd_dev->fd_dev_name, IS_ERR(dev_p));
ret = PTR_ERR(dev_p);
goto fail;
@@ -167,12 +152,12 @@ static struct se_device *fd_create_virtdevice(
file = filp_open(dev_p, flags, 0600);
if (IS_ERR(file)) {
- printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+ pr_err("filp_open(%s) failed\n", dev_p);
ret = PTR_ERR(file);
goto fail;
}
if (!file || !file->f_dentry) {
- printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+ pr_err("filp_open(%s) failed\n", dev_p);
goto fail;
}
fd_dev->fd_file = file;
@@ -202,14 +187,14 @@ static struct se_device *fd_create_virtdevice(
fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
- printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
+ pr_debug("FILEIO: Using size: %llu bytes from struct"
" block_device blocks: %llu logical_block_size: %d\n",
fd_dev->fd_dev_size,
div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
- printk(KERN_ERR "FILEIO: Missing fd_dev_size="
+ pr_err("FILEIO: Missing fd_dev_size="
" parameter, and no backing struct"
" block_device\n");
goto fail;
@@ -226,15 +211,15 @@ static struct se_device *fd_create_virtdevice(
dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba, &fileio_template,
- se_dev, dev_flags, (void *)fd_dev,
+ se_dev, dev_flags, fd_dev,
&dev_limits, "FILEIO", FD_VERSION);
- if (!(dev))
+ if (!dev)
goto fail;
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
fd_dev->fd_queue_depth = dev->queue_depth;
- printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
+ pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
@@ -272,45 +257,45 @@ static inline struct fd_request *FILE_REQ(struct se_task *task)
static struct se_task *
-fd_alloc_task(struct se_cmd *cmd)
+fd_alloc_task(unsigned char *cdb)
{
struct fd_request *fd_req;
fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
- if (!(fd_req)) {
- printk(KERN_ERR "Unable to allocate struct fd_request\n");
+ if (!fd_req) {
+ pr_err("Unable to allocate struct fd_request\n");
return NULL;
}
- fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
-
return &fd_req->fd_task;
}
static int fd_do_readv(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
- struct file *fd = req->fd_dev->fd_file;
+ struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+ struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
- loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+ loff_t pos = (task->task_lba *
+ task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i;
- iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
- if (!(iov)) {
- printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
- return -1;
+ iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+ if (!iov) {
+ pr_err("Unable to allocate fd_do_readv iov[]\n");
+ return -ENOMEM;
}
- for (i = 0; i < task->task_sg_num; i++) {
+ for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
- ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
+ ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
set_fs(old_fs);
kfree(iov);
@@ -321,16 +306,16 @@ static int fd_do_readv(struct se_task *task)
*/
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (ret < 0 || ret != task->task_size) {
- printk(KERN_ERR "vfs_readv() returned %d,"
+ pr_err("vfs_readv() returned %d,"
" expecting %d for S_ISBLK\n", ret,
(int)task->task_size);
- return -1;
+ return (ret < 0 ? ret : -EINVAL);
}
} else {
if (ret < 0) {
- printk(KERN_ERR "vfs_readv() returned %d for non"
+ pr_err("vfs_readv() returned %d for non"
" S_ISBLK\n", ret);
- return -1;
+ return ret;
}
}
@@ -340,34 +325,36 @@ static int fd_do_readv(struct se_task *task)
static int fd_do_writev(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
- struct file *fd = req->fd_dev->fd_file;
+ struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+ struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
- loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+ loff_t pos = (task->task_lba *
+ task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0;
- iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
- if (!(iov)) {
- printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
- return -1;
+ iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+ if (!iov) {
+ pr_err("Unable to allocate fd_do_writev iov[]\n");
+ return -ENOMEM;
}
- for (i = 0; i < task->task_sg_num; i++) {
+ for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
- ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
+ ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
set_fs(old_fs);
kfree(iov);
if (ret < 0 || ret != task->task_size) {
- printk(KERN_ERR "vfs_writev() returned %d\n", ret);
- return -1;
+ pr_err("vfs_writev() returned %d\n", ret);
+ return (ret < 0 ? ret : -EINVAL);
}
return 1;
@@ -375,10 +362,10 @@ static int fd_do_writev(struct se_task *task)
static void fd_emulate_sync_cache(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
- int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
+ int immed = (cmd->t_task_cdb[1] & 0x2);
loff_t start, end;
int ret;
@@ -392,11 +379,11 @@ static void fd_emulate_sync_cache(struct se_task *task)
/*
* Determine if we will be flushing the entire device.
*/
- if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
+ if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
start = 0;
end = LLONG_MAX;
} else {
- start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
+ start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
if (cmd->data_length)
end = start + cmd->data_length;
else
@@ -405,7 +392,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
- printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+ pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (!immed)
transport_complete_sync_cache(cmd, ret == 0);
@@ -446,16 +433,16 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
- loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
+ loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
loff_t end = start + task->task_size;
int ret;
- DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
+ pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
task->task_lba, task->task_size);
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
- printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+ pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
}
static int fd_do_task(struct se_task *task)
@@ -474,9 +461,9 @@ static int fd_do_task(struct se_task *task)
ret = fd_do_writev(task);
if (ret > 0 &&
- DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
- DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
- T_TASK(cmd)->t_tasks_fua) {
+ dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ cmd->t_tasks_fua) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
@@ -549,7 +536,7 @@ static ssize_t fd_set_configfs_dev_params(
snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
"%s", arg_p);
kfree(arg_p);
- printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
+ pr_debug("FILEIO: Referencing Path: %s\n",
fd_dev->fd_dev_name);
fd_dev->fbd_flags |= FBDF_HAS_PATH;
break;
@@ -562,23 +549,23 @@ static ssize_t fd_set_configfs_dev_params(
ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
kfree(arg_p);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoull() failed for"
+ pr_err("strict_strtoull() failed for"
" fd_dev_size=\n");
goto out;
}
- printk(KERN_INFO "FILEIO: Referencing Size: %llu"
+ pr_debug("FILEIO: Referencing Size: %llu"
" bytes\n", fd_dev->fd_dev_size);
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break;
case Opt_fd_buffered_io:
match_int(args, &arg);
if (arg != 1) {
- printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
+ pr_err("bogus fd_buffered_io=%d value\n", arg);
ret = -EINVAL;
goto out;
}
- printk(KERN_INFO "FILEIO: Using buffered I/O"
+ pr_debug("FILEIO: Using buffered I/O"
" operations for struct fd_dev\n");
fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
@@ -598,8 +585,8 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
- printk(KERN_ERR "Missing fd_dev_name=\n");
- return -1;
+ pr_err("Missing fd_dev_name=\n");
+ return -EINVAL;
}
return 0;
@@ -654,7 +641,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
{
struct fd_dev *fd_dev = dev->dev_ptr;
unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
- DEV_ATTRIB(dev)->block_size);
+ dev->se_sub_dev->se_dev_attrib.block_size);
return blocks_long;
}
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index ef4de2b4bd4..daebd710b89 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -4,8 +4,6 @@
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
-/* Maximum queuedepth for the FILEIO HBA */
-#define FD_HBA_QUEUE_DEPTH 256
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
@@ -18,8 +16,6 @@ struct fd_request {
struct se_task fd_task;
/* SCSI CDB from iSCSI Command PDU */
unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
- /* FILEIO device */
- struct fd_dev *fd_dev;
} ____cacheline_aligned;
#define FBDF_HAS_PATH 0x01
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 0b8f8da8901..0639b975d6f 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -1,7 +1,7 @@
/*******************************************************************************
* Filename: target_core_hba.c
*
- * This file copntains the iSCSI HBA Transport related functions.
+ * This file contains the TCM HBA Transport related functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
@@ -45,6 +45,11 @@
static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex);
+static u32 hba_id_counter;
+
+static DEFINE_SPINLOCK(hba_lock);
+static LIST_HEAD(hba_list);
+
int transport_subsystem_register(struct se_subsystem_api *sub_api)
{
struct se_subsystem_api *s;
@@ -53,8 +58,8 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
- if (!(strcmp(s->name, sub_api->name))) {
- printk(KERN_ERR "%p is already registered with"
+ if (!strcmp(s->name, sub_api->name)) {
+ pr_err("%p is already registered with"
" duplicate name %s, unable to process"
" request\n", s, s->name);
mutex_unlock(&subsystem_mutex);
@@ -64,7 +69,7 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
list_add_tail(&sub_api->sub_api_list, &subsystem_list);
mutex_unlock(&subsystem_mutex);
- printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
+ pr_debug("TCM: Registered subsystem plugin: %s struct module:"
" %p\n", sub_api->name, sub_api->owner);
return 0;
}
@@ -104,21 +109,17 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
if (!hba) {
- printk(KERN_ERR "Unable to allocate struct se_hba\n");
+ pr_err("Unable to allocate struct se_hba\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock);
- spin_lock_init(&hba->hba_queue_lock);
mutex_init(&hba->hba_access_mutex);
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags;
- atomic_set(&hba->max_queue_depth, 0);
- atomic_set(&hba->left_queue_depth, 0);
-
hba->transport = core_get_backend(plugin_name);
if (!hba->transport) {
ret = -EINVAL;
@@ -129,12 +130,12 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
if (ret < 0)
goto out_module_put;
- spin_lock(&se_global->hba_lock);
- hba->hba_id = se_global->g_hba_id_counter++;
- list_add_tail(&hba->hba_list, &se_global->g_hba_list);
- spin_unlock(&se_global->hba_lock);
+ spin_lock(&hba_lock);
+ hba->hba_id = hba_id_counter++;
+ list_add_tail(&hba->hba_node, &hba_list);
+ spin_unlock(&hba_lock);
- printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
+ pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id);
return hba;
@@ -156,11 +157,11 @@ core_delete_hba(struct se_hba *hba)
hba->transport->detach_hba(hba);
- spin_lock(&se_global->hba_lock);
- list_del(&hba->hba_list);
- spin_unlock(&se_global->hba_lock);
+ spin_lock(&hba_lock);
+ list_del(&hba->hba_node);
+ spin_unlock(&hba_lock);
- printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
+ pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
if (hba->transport->owner)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 86639004af9..7e123410544 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -47,12 +47,6 @@
#include "target_core_iblock.h"
-#if 0
-#define DEBUG_IBLOCK(x...) printk(x)
-#else
-#define DEBUG_IBLOCK(x...)
-#endif
-
static struct se_subsystem_api iblock_template;
static void iblock_bio_done(struct bio *, int);
@@ -66,25 +60,22 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
struct iblock_hba *ib_host;
ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
- if (!(ib_host)) {
- printk(KERN_ERR "Unable to allocate memory for"
+ if (!ib_host) {
+ pr_err("Unable to allocate memory for"
" struct iblock_hba\n");
return -ENOMEM;
}
ib_host->iblock_host_id = host_id;
- atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
- atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
- hba->hba_ptr = (void *) ib_host;
+ hba->hba_ptr = ib_host;
- printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
+ pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
- " Target Core TCQ Depth: %d\n", hba->hba_id,
- ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
+ pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
+ hba->hba_id, ib_host->iblock_host_id);
return 0;
}
@@ -93,7 +84,7 @@ static void iblock_detach_hba(struct se_hba *hba)
{
struct iblock_hba *ib_host = hba->hba_ptr;
- printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
+ pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
" Target Core\n", hba->hba_id, ib_host->iblock_host_id);
kfree(ib_host);
@@ -106,13 +97,13 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
struct iblock_hba *ib_host = hba->hba_ptr;
ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
- if (!(ib_dev)) {
- printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
+ if (!ib_dev) {
+ pr_err("Unable to allocate struct iblock_dev\n");
return NULL;
}
ib_dev->ibd_host = ib_host;
- printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name);
+ pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
return ib_dev;
}
@@ -131,8 +122,8 @@ static struct se_device *iblock_create_virtdevice(
u32 dev_flags = 0;
int ret = -EINVAL;
- if (!(ib_dev)) {
- printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
+ if (!ib_dev) {
+ pr_err("Unable to locate struct iblock_dev parameter\n");
return ERR_PTR(ret);
}
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
@@ -140,16 +131,16 @@ static struct se_device *iblock_create_virtdevice(
* These settings need to be made tunable..
*/
ib_dev->ibd_bio_set = bioset_create(32, 64);
- if (!(ib_dev->ibd_bio_set)) {
- printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
+ if (!ib_dev->ibd_bio_set) {
+ pr_err("IBLOCK: Unable to create bioset()\n");
return ERR_PTR(-ENOMEM);
}
- printk(KERN_INFO "IBLOCK: Created bio_set()\n");
+ pr_debug("IBLOCK: Created bio_set()\n");
/*
* iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
* must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
*/
- printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n",
+ pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
@@ -167,42 +158,41 @@ static struct se_device *iblock_create_virtdevice(
limits->logical_block_size = bdev_logical_block_size(bd);
limits->max_hw_sectors = queue_max_hw_sectors(q);
limits->max_sectors = queue_max_sectors(q);
- dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
- dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
+ dev_limits.hw_queue_depth = q->nr_requests;
+ dev_limits.queue_depth = q->nr_requests;
- ib_dev->ibd_major = MAJOR(bd->bd_dev);
- ib_dev->ibd_minor = MINOR(bd->bd_dev);
ib_dev->ibd_bd = bd;
dev = transport_add_device_to_core_hba(hba,
- &iblock_template, se_dev, dev_flags, (void *)ib_dev,
+ &iblock_template, se_dev, dev_flags, ib_dev,
&dev_limits, "IBLOCK", IBLOCK_VERSION);
- if (!(dev))
+ if (!dev)
goto failed;
- ib_dev->ibd_depth = dev->queue_depth;
-
/*
* Check if the underlying struct block_device request_queue supports
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(q)) {
- DEV_ATTRIB(dev)->max_unmap_lba_count =
+ dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors;
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
- DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
- DEV_ATTRIB(dev)->unmap_granularity =
+ dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity =
q->limits.discard_granularity;
- DEV_ATTRIB(dev)->unmap_granularity_alignment =
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
- printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
+ pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
}
+ if (blk_queue_nonrot(q))
+ dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
+
return dev;
failed:
@@ -211,8 +201,6 @@ failed:
ib_dev->ibd_bio_set = NULL;
}
ib_dev->ibd_bd = NULL;
- ib_dev->ibd_major = 0;
- ib_dev->ibd_minor = 0;
return ERR_PTR(ret);
}
@@ -233,17 +221,16 @@ static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
}
static struct se_task *
-iblock_alloc_task(struct se_cmd *cmd)
+iblock_alloc_task(unsigned char *cdb)
{
struct iblock_req *ib_req;
ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
- if (!(ib_req)) {
- printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
+ if (!ib_req) {
+ pr_err("Unable to allocate memory for struct iblock_req\n");
return NULL;
}
- ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
atomic_set(&ib_req->ib_bio_cnt, 0);
return &ib_req->ib_task;
}
@@ -257,12 +244,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
- if (block_size == DEV_ATTRIB(dev)->block_size)
+ if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
return blocks_long;
switch (block_size) {
case 4096:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 2048:
blocks_long <<= 1;
break;
@@ -276,7 +263,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 2048:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 1;
break;
@@ -291,7 +278,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 1024:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 2;
break;
@@ -306,7 +293,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 512:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 3;
break;
@@ -332,9 +319,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
*/
static void iblock_emulate_sync_cache(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
- int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
+ int immed = (cmd->t_task_cdb[1] & 0x2);
sector_t error_sector;
int ret;
@@ -351,7 +338,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
*/
ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
if (ret != 0) {
- printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
+ pr_err("IBLOCK: block_issue_flush() failed: %d "
" error_sector: %llu\n", ret,
(unsigned long long)error_sector);
}
@@ -401,9 +388,9 @@ static int iblock_do_task(struct se_task *task)
* Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit.
*/
- if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
- (DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
- T_TASK(task->task_se_cmd)->t_tasks_fua))
+ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ task->task_se_cmd->t_tasks_fua))
rw = WRITE_FUA;
else
rw = WRITE;
@@ -415,8 +402,9 @@ static int iblock_do_task(struct se_task *task)
while (bio) {
nbio = bio->bi_next;
bio->bi_next = NULL;
- DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
- " bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
+ pr_debug("Calling submit_bio() task: %p bio: %p"
+ " bio->bi_sector: %llu\n", task, bio,
+ (unsigned long long)bio->bi_sector);
submit_bio(rw, bio);
bio = nbio;
@@ -470,7 +458,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, arg, token;
+ int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -486,7 +474,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
switch (token) {
case Opt_udev_path:
if (ib_dev->ibd_bd) {
- printk(KERN_ERR "Unable to set udev_path= while"
+ pr_err("Unable to set udev_path= while"
" ib_dev->ibd_bd exists\n");
ret = -EEXIST;
goto out;
@@ -499,15 +487,11 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
"%s", arg_p);
kfree(arg_p);
- printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
+ pr_debug("IBLOCK: Referencing UDEV path: %s\n",
ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
break;
case Opt_force:
- match_int(args, &arg);
- ib_dev->ibd_force = arg;
- printk(KERN_INFO "IBLOCK: Set force=%d\n",
- ib_dev->ibd_force);
break;
default:
break;
@@ -526,8 +510,8 @@ static ssize_t iblock_check_configfs_dev_params(
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
- printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
- return -1;
+ pr_err("Missing udev_path= parameters for IBLOCK\n");
+ return -EINVAL;
}
return 0;
@@ -555,12 +539,11 @@ static ssize_t iblock_show_configfs_dev_params(
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
- ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
+ MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
"" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
- bl += sprintf(b + bl, "Major: %d Minor: %d\n",
- ibd->ibd_major, ibd->ibd_minor);
+ bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
}
return bl;
@@ -585,103 +568,103 @@ static struct bio *iblock_get_bio(
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
- if (!(bio)) {
- printk(KERN_ERR "Unable to allocate memory for bio\n");
+ if (!bio) {
+ pr_err("Unable to allocate memory for bio\n");
*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
return NULL;
}
- DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
- " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
- DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
+ pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
+ " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
+ pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
bio->bi_bdev = ib_dev->ibd_bd;
- bio->bi_private = (void *) task;
+ bio->bi_private = task;
bio->bi_destructor = iblock_bio_destructor;
bio->bi_end_io = &iblock_bio_done;
bio->bi_sector = lba;
atomic_inc(&ib_req->ib_bio_cnt);
- DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
- DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
+ pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
+ pr_debug("Set ib_req->ib_bio_cnt: %d\n",
atomic_read(&ib_req->ib_bio_cnt));
return bio;
}
-static int iblock_map_task_SG(struct se_task *task)
+static int iblock_map_data_SG(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
struct iblock_req *ib_req = IBLOCK_REQ(task);
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
struct scatterlist *sg;
int ret = 0;
- u32 i, sg_num = task->task_sg_num;
+ u32 i, sg_num = task->task_sg_nents;
sector_t block_lba;
/*
* Do starting conversion up from non 512-byte blocksize with
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
*/
- if (DEV_ATTRIB(dev)->block_size == 4096)
+ if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
block_lba = (task->task_lba << 3);
- else if (DEV_ATTRIB(dev)->block_size == 2048)
+ else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
block_lba = (task->task_lba << 2);
- else if (DEV_ATTRIB(dev)->block_size == 1024)
+ else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
block_lba = (task->task_lba << 1);
- else if (DEV_ATTRIB(dev)->block_size == 512)
+ else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
block_lba = task->task_lba;
else {
- printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
- " %u\n", DEV_ATTRIB(dev)->block_size);
+ pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
+ " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
- if (!(bio))
+ if (!bio)
return ret;
ib_req->ib_bio = bio;
hbio = tbio = bio;
/*
* Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
- * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
+ * from task->task_sg -> struct scatterlist memory.
*/
- for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
- DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ pr_debug("task: %p bio: %p Calling bio_add_page(): page:"
" %p len: %u offset: %u\n", task, bio, sg_page(sg),
sg->length, sg->offset);
again:
ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
if (ret != sg->length) {
- DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
- bio->bi_sector);
- DEBUG_IBLOCK("** task->task_size: %u\n",
+ pr_debug("*** Set bio->bi_sector: %llu\n",
+ (unsigned long long)bio->bi_sector);
+ pr_debug("** task->task_size: %u\n",
task->task_size);
- DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
+ pr_debug("*** bio->bi_max_vecs: %u\n",
bio->bi_max_vecs);
- DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
+ pr_debug("*** bio->bi_vcnt: %u\n",
bio->bi_vcnt);
bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
block_lba, sg_num);
- if (!(bio))
+ if (!bio)
goto fail;
tbio = tbio->bi_next = bio;
- DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
+ pr_debug("-----------------> Added +1 bio: %p to"
" list, Going to again\n", bio);
goto again;
}
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sg_num--;
- DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
+ pr_debug("task: %p bio-add_page() passed!, decremented"
" sg_num to %u\n", task, sg_num);
- DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
- " to %llu\n", task, block_lba);
- DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
+ pr_debug("task: %p bio_add_page() passed!, increased lba"
+ " to %llu\n", task, (unsigned long long)block_lba);
+ pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
" %u\n", task, bio->bi_vcnt);
}
@@ -727,11 +710,11 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
- if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
err = -EIO;
if (err != 0) {
- printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
+ pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
" err: %d\n", bio, err);
/*
* Bump the ib_bio_err_cnt and release bio.
@@ -742,15 +725,15 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Wait to complete the task until the last bio as completed.
*/
- if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+ if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
return;
ibr->ib_bio = NULL;
transport_complete_task(task, 0);
return;
}
- DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
- task, bio, task->task_lba, bio->bi_sector, err);
+ pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+ task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err);
/*
* bio_put() will call iblock_bio_destructor() to release the bio back
* to ibr->ib_bio_set.
@@ -759,7 +742,7 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Wait to complete the task until the last bio as completed.
*/
- if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+ if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
return;
/*
* Return GOOD status for task if zero ib_bio_err_cnt exists.
@@ -772,7 +755,7 @@ static struct se_subsystem_api iblock_template = {
.name = "iblock",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
- .map_task_SG = iblock_map_task_SG,
+ .map_data_SG = iblock_map_data_SG,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
.allocate_virtdevice = iblock_allocate_virtdevice,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 64c1f4d69f7..a121cd1b657 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -3,9 +3,6 @@
#define IBLOCK_VERSION "4.0"
-#define IBLOCK_HBA_QUEUE_DEPTH 512
-#define IBLOCK_DEVICE_QUEUE_DEPTH 32
-#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
#define IBLOCK_MAX_CDBS 16
#define IBLOCK_LBA_SHIFT 9
@@ -15,18 +12,12 @@ struct iblock_req {
atomic_t ib_bio_cnt;
atomic_t ib_bio_err_cnt;
struct bio *ib_bio;
- struct iblock_dev *ib_dev;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01
-#define IBDF_HAS_FORCE 0x02
struct iblock_dev {
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
- int ibd_force;
- int ibd_major;
- int ibd_minor;
- u32 ibd_depth;
u32 ibd_flags;
struct bio_set *ibd_bio_set;
struct block_device *ibd_bd;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index a79f518ca6e..1c1b849cd4f 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -62,7 +62,7 @@ int core_pr_dump_initiator_port(
char *buf,
u32 size)
{
- if (!(pr_reg->isid_present_at_reg))
+ if (!pr_reg->isid_present_at_reg)
return 0;
snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
@@ -95,7 +95,7 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
struct se_session *sess = cmd->se_sess;
int ret;
- if (!(sess))
+ if (!sess)
return 0;
spin_lock(&dev->dev_reservation_lock);
@@ -105,13 +105,13 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
}
if (dev->dev_reserved_node_acl != sess->se_node_acl) {
spin_unlock(&dev->dev_reservation_lock);
- return -1;
+ return -EINVAL;
}
if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
- ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1;
+ ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL;
spin_unlock(&dev->dev_reservation_lock);
return ret;
@@ -123,7 +123,7 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd)
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
- if (!(sess) || !(tpg))
+ if (!sess || !tpg)
return 0;
spin_lock(&dev->dev_reservation_lock);
@@ -142,9 +142,9 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd)
dev->dev_res_bin_isid = 0;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
}
- printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->"
- " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(),
- SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+ pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
+ " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -157,9 +157,9 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
- if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) &&
- (T_TASK(cmd)->t_task_cdb[1] & 0x02)) {
- printk(KERN_ERR "LongIO and Obselete Bits set, returning"
+ if ((cmd->t_task_cdb[1] & 0x01) &&
+ (cmd->t_task_cdb[1] & 0x02)) {
+ pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
return PYX_TRANSPORT_ILLEGAL_REQUEST;
}
@@ -167,19 +167,19 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
* This is currently the case for target_core_mod passthrough struct se_cmd
* ops
*/
- if (!(sess) || !(tpg))
+ if (!sess || !tpg)
return 0;
spin_lock(&dev->dev_reservation_lock);
if (dev->dev_reserved_node_acl &&
(dev->dev_reserved_node_acl != sess->se_node_acl)) {
- printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
- TPG_TFO(tpg)->get_fabric_name());
- printk(KERN_ERR "Original reserver LUN: %u %s\n",
- SE_LUN(cmd)->unpacked_lun,
+ pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+ tpg->se_tpg_tfo->get_fabric_name());
+ pr_err("Original reserver LUN: %u %s\n",
+ cmd->se_lun->unpacked_lun,
dev->dev_reserved_node_acl->initiatorname);
- printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u"
- " from %s \n", SE_LUN(cmd)->unpacked_lun,
+ pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
+ " from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -192,9 +192,9 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
dev->dev_res_bin_isid = sess->sess_bin_isid;
dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
}
- printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
- " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
- SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+ pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+ " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -215,15 +215,15 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
struct se_session *se_sess = cmd->se_sess;
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
- struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation;
- unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
- int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS);
+ struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
+ unsigned char *cdb = &cmd->t_task_cdb[0];
+ int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
int conflict = 0;
- if (!(se_sess))
+ if (!se_sess)
return 0;
- if (!(crh))
+ if (!crh)
goto after_crh;
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -280,7 +280,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
}
if (conflict) {
- printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE"
+ pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -307,7 +307,7 @@ static int core_scsi3_pr_seq_non_holder(
u32 pr_reg_type)
{
struct se_dev_entry *se_deve;
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
int other_cdb = 0, ignore_reg;
int registered_nexus = 0, ret = 1; /* Conflict by default */
int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
@@ -362,7 +362,7 @@ static int core_scsi3_pr_seq_non_holder(
registered_nexus = 1;
break;
default:
- return -1;
+ return -EINVAL;
}
/*
* Referenced from spc4r17 table 45 for *NON* PR holder access
@@ -412,9 +412,9 @@ static int core_scsi3_pr_seq_non_holder(
ret = (registered_nexus) ? 0 : 1;
break;
default:
- printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+ pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- return -1;
+ return -EINVAL;
}
break;
case RELEASE:
@@ -459,9 +459,9 @@ static int core_scsi3_pr_seq_non_holder(
ret = 0; /* Allowed */
break;
default:
- printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n",
+ pr_err("Unknown MI Service Action: 0x%02x\n",
(cdb[1] & 0x1f));
- return -1;
+ return -EINVAL;
}
break;
case ACCESS_CONTROL_IN:
@@ -481,9 +481,9 @@ static int core_scsi3_pr_seq_non_holder(
* Case where the CDB is explicitly allowed in the above switch
* statement.
*/
- if (!(ret) && !(other_cdb)) {
+ if (!ret && !other_cdb) {
#if 0
- printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s"
+ pr_debug("Allowing explict CDB: 0x%02x for %s"
" reservation holder\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
#endif
@@ -498,7 +498,7 @@ static int core_scsi3_pr_seq_non_holder(
/*
* Conflict for write exclusive
*/
- printk(KERN_INFO "%s Conflict for unregistered nexus"
+ pr_debug("%s Conflict for unregistered nexus"
" %s CDB: 0x%02x to %s reservation\n",
transport_dump_cmd_direction(cmd),
se_sess->se_node_acl->initiatorname, cdb[0],
@@ -515,8 +515,8 @@ static int core_scsi3_pr_seq_non_holder(
* nexuses to issue CDBs.
*/
#if 0
- if (!(registered_nexus)) {
- printk(KERN_INFO "Allowing implict CDB: 0x%02x"
+ if (!registered_nexus) {
+ pr_debug("Allowing implict CDB: 0x%02x"
" for %s reservation on unregistered"
" nexus\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
@@ -531,14 +531,14 @@ static int core_scsi3_pr_seq_non_holder(
* allow commands from registered nexuses.
*/
#if 0
- printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s"
+ pr_debug("Allowing implict CDB: 0x%02x for %s"
" reservation\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
#endif
return 0;
}
}
- printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+ pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
" for %s reservation\n", transport_dump_cmd_direction(cmd),
(registered_nexus) ? "" : "un",
se_sess->se_node_acl->initiatorname, cdb[0],
@@ -549,7 +549,7 @@ static int core_scsi3_pr_seq_non_holder(
static u32 core_scsi3_pr_generation(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
u32 prg;
/*
* PRGeneration field shall contain the value of a 32-bit wrapping
@@ -561,7 +561,7 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
* See spc4r17 section 6.3.12 READ_KEYS service action
*/
spin_lock(&dev->dev_reservation_lock);
- prg = T10_RES(su_dev)->pr_generation++;
+ prg = su_dev->t10_pr.pr_generation++;
spin_unlock(&dev->dev_reservation_lock);
return prg;
@@ -575,7 +575,7 @@ static int core_scsi3_pr_reservation_check(
struct se_session *sess = cmd->se_sess;
int ret;
- if (!(sess))
+ if (!sess)
return 0;
/*
* A legacy SPC-2 reservation is being held.
@@ -584,7 +584,7 @@ static int core_scsi3_pr_reservation_check(
return core_scsi2_reservation_check(cmd, pr_reg_type);
spin_lock(&dev->dev_reservation_lock);
- if (!(dev->dev_pr_res_holder)) {
+ if (!dev->dev_pr_res_holder) {
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
@@ -592,14 +592,14 @@ static int core_scsi3_pr_reservation_check(
cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
spin_unlock(&dev->dev_reservation_lock);
- return -1;
+ return -EINVAL;
}
- if (!(dev->dev_pr_res_holder->isid_present_at_reg)) {
+ if (!dev->dev_pr_res_holder->isid_present_at_reg) {
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
- sess->sess_bin_isid) ? 0 : -1;
+ sess->sess_bin_isid) ? 0 : -EINVAL;
/*
* Use bit in *pr_reg_type to notify ISID mismatch in
* core_scsi3_pr_seq_non_holder().
@@ -620,19 +620,19 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
int all_tg_pt,
int aptpl)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
- if (!(pr_reg)) {
- printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+ if (!pr_reg) {
+ pr_err("Unable to allocate struct t10_pr_registration\n");
return NULL;
}
- pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len,
+ pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
GFP_ATOMIC);
- if (!(pr_reg->pr_aptpl_buf)) {
- printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n");
+ if (!pr_reg->pr_aptpl_buf) {
+ pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
kmem_cache_free(t10_pr_reg_cache, pr_reg);
return NULL;
}
@@ -692,12 +692,12 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
*/
pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
sa_res_key, all_tg_pt, aptpl);
- if (!(pr_reg))
+ if (!pr_reg)
return NULL;
/*
* Return pointer to pr_reg for ALL_TG_PT=0
*/
- if (!(all_tg_pt))
+ if (!all_tg_pt)
return pr_reg;
/*
* Create list of matching SCSI Initiator Port registrations
@@ -717,7 +717,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* that have not been make explict via a ConfigFS
* MappedLUN group for the SCSI Initiator Node ACL.
*/
- if (!(deve_tmp->se_lun_acl))
+ if (!deve_tmp->se_lun_acl)
continue;
nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
@@ -751,7 +751,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
*/
ret = core_scsi3_lunacl_depend_item(deve_tmp);
if (ret < 0) {
- printk(KERN_ERR "core_scsi3_lunacl_depend"
+ pr_err("core_scsi3_lunacl_depend"
"_item() failed\n");
atomic_dec(&port->sep_tg_pt_ref_cnt);
smp_mb__after_atomic_dec();
@@ -769,7 +769,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
nacl_tmp, deve_tmp, NULL,
sa_res_key, all_tg_pt, aptpl);
- if (!(pr_reg_atp)) {
+ if (!pr_reg_atp) {
atomic_dec(&port->sep_tg_pt_ref_cnt);
smp_mb__after_atomic_dec();
atomic_dec(&deve_tmp->pr_ref_count);
@@ -803,7 +803,7 @@ out:
}
int core_scsi3_alloc_aptpl_registration(
- struct t10_reservation_template *pr_tmpl,
+ struct t10_reservation *pr_tmpl,
u64 sa_res_key,
unsigned char *i_port,
unsigned char *isid,
@@ -817,15 +817,15 @@ int core_scsi3_alloc_aptpl_registration(
{
struct t10_pr_registration *pr_reg;
- if (!(i_port) || !(t_port) || !(sa_res_key)) {
- printk(KERN_ERR "Illegal parameters for APTPL registration\n");
- return -1;
+ if (!i_port || !t_port || !sa_res_key) {
+ pr_err("Illegal parameters for APTPL registration\n");
+ return -EINVAL;
}
pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
- if (!(pr_reg)) {
- printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
- return -1;
+ if (!pr_reg) {
+ pr_err("Unable to allocate struct t10_pr_registration\n");
+ return -ENOMEM;
}
pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
@@ -869,7 +869,7 @@ int core_scsi3_alloc_aptpl_registration(
pr_reg->pr_res_holder = res_holder;
list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
- printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from"
+ pr_debug("SPC-3 PR APTPL Successfully added registration%s from"
" metadata\n", (res_holder) ? "+reservation" : "");
return 0;
}
@@ -891,13 +891,13 @@ static void core_scsi3_aptpl_reserve(
dev->dev_pr_res_holder = pr_reg;
spin_unlock(&dev->dev_reservation_lock);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created"
+ pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
" new reservation holder TYPE: %s ALL_TG_PT: %d\n",
- TPG_TFO(tpg)->get_fabric_name(),
+ tpg->se_tpg_tfo->get_fabric_name(),
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
- TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname,
+ pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
+ tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
}
@@ -913,7 +913,7 @@ static int __core_scsi3_check_aptpl_registration(
struct se_dev_entry *deve)
{
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
u16 tpgt;
@@ -925,8 +925,8 @@ static int __core_scsi3_check_aptpl_registration(
*/
snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
- TPG_TFO(tpg)->tpg_get_wwn(tpg));
- tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+ tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
/*
* Look for the matching registrations+reservation from those
* created from APTPL metadata. Note that multiple registrations
@@ -936,7 +936,7 @@ static int __core_scsi3_check_aptpl_registration(
spin_lock(&pr_tmpl->aptpl_reg_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
pr_reg_aptpl_list) {
- if (!(strcmp(pr_reg->pr_iport, i_port)) &&
+ if (!strcmp(pr_reg->pr_iport, i_port) &&
(pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
!(strcmp(pr_reg->pr_tport, t_port)) &&
(pr_reg->pr_reg_tpgt == tpgt) &&
@@ -980,11 +980,11 @@ int core_scsi3_check_aptpl_registration(
struct se_lun *lun,
struct se_lun_acl *lun_acl)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_node_acl *nacl = lun_acl->se_lun_nacl;
struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
@@ -1006,19 +1006,19 @@ static void __core_scsi3_dump_registration(
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
+ pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
" Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
"_AND_MOVE" : (register_type == 1) ?
"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
(prf_isid) ? i_buf : "");
- printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
+ pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
tfo->tpg_get_tag(se_tpg));
- printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+ pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
" Port(s)\n", tfo->get_fabric_name(),
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
- TRANSPORT(dev)->name);
- printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+ dev->transport->name);
+ pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
" 0x%08x APTPL: %d\n", tfo->get_fabric_name(),
pr_reg->pr_res_key, pr_reg->pr_res_generation,
pr_reg->pr_reg_aptpl);
@@ -1035,10 +1035,10 @@ static void __core_scsi3_add_registration(
int register_type,
int register_move)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
/*
* Increment PRgeneration counter for struct se_device upon a successful
@@ -1050,7 +1050,7 @@ static void __core_scsi3_add_registration(
* for the REGISTER.
*/
pr_reg->pr_res_generation = (register_move) ?
- T10_RES(su_dev)->pr_generation++ :
+ su_dev->t10_pr.pr_generation++ :
core_scsi3_pr_generation(dev);
spin_lock(&pr_tmpl->registration_lock);
@@ -1062,7 +1062,7 @@ static void __core_scsi3_add_registration(
/*
* Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
*/
- if (!(pr_reg->pr_reg_all_tg_pt) || (register_move))
+ if (!pr_reg->pr_reg_all_tg_pt || register_move)
return;
/*
* Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
@@ -1106,8 +1106,8 @@ static int core_scsi3_alloc_registration(
pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
sa_res_key, all_tg_pt, aptpl);
- if (!(pr_reg))
- return -1;
+ if (!pr_reg)
+ return -EPERM;
__core_scsi3_add_registration(dev, nacl, pr_reg,
register_type, register_move);
@@ -1119,7 +1119,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
struct se_node_acl *nacl,
unsigned char *isid)
{
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct se_portal_group *tpg;
@@ -1137,14 +1137,14 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
* If this registration does NOT contain a fabric provided
* ISID, then we have found a match.
*/
- if (!(pr_reg->isid_present_at_reg)) {
+ if (!pr_reg->isid_present_at_reg) {
/*
* Determine if this SCSI device server requires that
* SCSI Intiatior TransportID w/ ISIDs is enforced
* for fabric modules (iSCSI) requiring them.
*/
- if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
- if (DEV_ATTRIB(dev)->enforce_pr_isids)
+ if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
+ if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids)
continue;
}
atomic_inc(&pr_reg->pr_res_holders);
@@ -1157,7 +1157,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
* SCSI Initiator Port TransportIDs, then we expect a valid
* matching ISID to be provided by the local SCSI Initiator Port.
*/
- if (!(isid))
+ if (!isid)
continue;
if (strcmp(isid, pr_reg->pr_reg_isid))
continue;
@@ -1180,9 +1180,9 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
struct se_portal_group *tpg = nacl->se_tpg;
unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
- if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+ if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
memset(&buf[0], 0, PR_REG_ISID_LEN);
- TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0],
+ tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0],
PR_REG_ISID_LEN);
isid_ptr = &buf[0];
}
@@ -1206,7 +1206,7 @@ static int core_scsi3_check_implict_release(
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if (!(pr_res_holder)) {
+ if (!pr_res_holder) {
spin_unlock(&dev->dev_reservation_lock);
return ret;
}
@@ -1236,11 +1236,11 @@ static int core_scsi3_check_implict_release(
(!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
pr_reg->pr_reg_nacl->initiatorname)) &&
(pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
- printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1"
+ pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1"
" UNREGISTER while existing reservation with matching"
" key 0x%016Lx is present from another SCSI Initiator"
" Port\n", pr_reg->pr_res_key);
- ret = -1;
+ ret = -EPERM;
}
spin_unlock(&dev->dev_reservation_lock);
@@ -1248,7 +1248,7 @@ static int core_scsi3_check_implict_release(
}
/*
- * Called with struct t10_reservation_template->registration_lock held.
+ * Called with struct t10_reservation->registration_lock held.
*/
static void __core_scsi3_free_registration(
struct se_device *dev,
@@ -1258,7 +1258,7 @@ static void __core_scsi3_free_registration(
{
struct target_core_fabric_ops *tfo =
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
@@ -1283,25 +1283,25 @@ static void __core_scsi3_free_registration(
*/
while (atomic_read(&pr_reg->pr_res_holders) != 0) {
spin_unlock(&pr_tmpl->registration_lock);
- printk("SPC-3 PR [%s] waiting for pr_res_holders\n",
+ pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
tfo->get_fabric_name());
cpu_relax();
spin_lock(&pr_tmpl->registration_lock);
}
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
+ pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
" Node: %s%s\n", tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
- printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+ pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
" Port(s)\n", tfo->get_fabric_name(),
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
- TRANSPORT(dev)->name);
- printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+ dev->transport->name);
+ pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
" 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
pr_reg->pr_res_generation);
- if (!(preempt_and_abort_list)) {
+ if (!preempt_and_abort_list) {
pr_reg->pr_reg_deve = NULL;
pr_reg->pr_reg_nacl = NULL;
kfree(pr_reg->pr_aptpl_buf);
@@ -1319,7 +1319,7 @@ void core_scsi3_free_pr_reg_from_nacl(
struct se_device *dev,
struct se_node_acl *nacl)
{
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
/*
* If the passed se_node_acl matches the reservation holder,
@@ -1349,7 +1349,7 @@ void core_scsi3_free_pr_reg_from_nacl(
void core_scsi3_free_all_registrations(
struct se_device *dev)
{
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
spin_lock(&dev->dev_reservation_lock);
@@ -1381,13 +1381,13 @@ void core_scsi3_free_all_registrations(
static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
{
- return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
&tpg->tpg_group.cg_item);
}
static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
{
- configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&tpg->tpg_group.cg_item);
atomic_dec(&tpg->tpg_pr_ref_count);
@@ -1401,7 +1401,7 @@ static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
if (nacl->dynamic_node_acl)
return 0;
- return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
&nacl->acl_group.cg_item);
}
@@ -1415,7 +1415,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
return;
}
- configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&nacl->acl_group.cg_item);
atomic_dec(&nacl->acl_pr_ref_count);
@@ -1430,13 +1430,13 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
/*
* For nacl->dynamic_node_acl=1
*/
- if (!(lun_acl))
+ if (!lun_acl)
return 0;
nacl = lun_acl->se_lun_nacl;
tpg = nacl->se_tpg;
- return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
&lun_acl->se_lun_group.cg_item);
}
@@ -1448,7 +1448,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
/*
* For nacl->dynamic_node_acl=1
*/
- if (!(lun_acl)) {
+ if (!lun_acl) {
atomic_dec(&se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
return;
@@ -1456,7 +1456,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
nacl = lun_acl->se_lun_nacl;
tpg = nacl->se_tpg;
- configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&lun_acl->se_lun_group.cg_item);
atomic_dec(&se_deve->pr_ref_count);
@@ -1471,10 +1471,10 @@ static int core_scsi3_decode_spec_i_port(
int all_tg_pt,
int aptpl)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_port *tmp_port;
struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
struct se_node_acl *dest_node_acl = NULL;
struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
@@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port(
struct list_head tid_dest_list;
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
struct target_core_fabric_ops *tmp_tf_ops;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tpdl, tid_len = 0;
@@ -1500,8 +1500,8 @@ static int core_scsi3_decode_spec_i_port(
* processing in the loop of tid_dest_list below.
*/
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
- if (!(tidh_new)) {
- printk(KERN_ERR "Unable to allocate tidh_new\n");
+ if (!tidh_new) {
+ pr_err("Unable to allocate tidh_new\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1509,10 +1509,10 @@ static int core_scsi3_decode_spec_i_port(
tidh_new->dest_node_acl = se_sess->se_node_acl;
tidh_new->dest_se_deve = local_se_deve;
- local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+ local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
se_sess->se_node_acl, local_se_deve, l_isid,
sa_res_key, all_tg_pt, aptpl);
- if (!(local_pr_reg)) {
+ if (!local_pr_reg) {
kfree(tidh_new);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -1524,6 +1524,8 @@ static int core_scsi3_decode_spec_i_port(
*/
tidh_new->dest_local_nexus = 1;
list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+ buf = transport_kmap_first_data_page(cmd);
/*
* For a PERSISTENT RESERVE OUT specify initiator ports payload,
* first extract TransportID Parameter Data Length, and make sure
@@ -1535,7 +1537,7 @@ static int core_scsi3_decode_spec_i_port(
tpdl |= buf[27] & 0xff;
if ((tpdl + 28) != cmd->data_length) {
- printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header"
+ pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -1555,13 +1557,13 @@ static int core_scsi3_decode_spec_i_port(
spin_lock(&dev->se_port_lock);
list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
tmp_tpg = tmp_port->sep_tpg;
- if (!(tmp_tpg))
+ if (!tmp_tpg)
continue;
- tmp_tf_ops = TPG_TFO(tmp_tpg);
- if (!(tmp_tf_ops))
+ tmp_tf_ops = tmp_tpg->se_tpg_tfo;
+ if (!tmp_tf_ops)
continue;
- if (!(tmp_tf_ops->get_fabric_proto_ident) ||
- !(tmp_tf_ops->tpg_parse_pr_out_transport_id))
+ if (!tmp_tf_ops->get_fabric_proto_ident ||
+ !tmp_tf_ops->tpg_parse_pr_out_transport_id)
continue;
/*
* Look for the matching proto_ident provided by
@@ -1575,7 +1577,7 @@ static int core_scsi3_decode_spec_i_port(
i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
tmp_tpg, (const char *)ptr, &tid_len,
&iport_ptr);
- if (!(i_str))
+ if (!i_str)
continue;
atomic_inc(&tmp_tpg->tpg_pr_ref_count);
@@ -1584,7 +1586,7 @@ static int core_scsi3_decode_spec_i_port(
ret = core_scsi3_tpg_depend_item(tmp_tpg);
if (ret != 0) {
- printk(KERN_ERR " core_scsi3_tpg_depend_item()"
+ pr_err(" core_scsi3_tpg_depend_item()"
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -1605,7 +1607,7 @@ static int core_scsi3_decode_spec_i_port(
}
spin_unlock_bh(&tmp_tpg->acl_node_lock);
- if (!(dest_node_acl)) {
+ if (!dest_node_acl) {
core_scsi3_tpg_undepend_item(tmp_tpg);
spin_lock(&dev->se_port_lock);
continue;
@@ -1613,7 +1615,7 @@ static int core_scsi3_decode_spec_i_port(
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
if (ret != 0) {
- printk(KERN_ERR "configfs_depend_item() failed"
+ pr_err("configfs_depend_item() failed"
" for dest_node_acl->acl_group\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -1623,9 +1625,9 @@ static int core_scsi3_decode_spec_i_port(
}
dest_tpg = tmp_tpg;
- printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:"
+ pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
" %s Port RTPI: %hu\n",
- TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, dest_rtpi);
spin_lock(&dev->se_port_lock);
@@ -1633,20 +1635,20 @@ static int core_scsi3_decode_spec_i_port(
}
spin_unlock(&dev->se_port_lock);
- if (!(dest_tpg)) {
- printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate"
+ if (!dest_tpg) {
+ pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
#if 0
- printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
+ pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
" tid_len: %d for %s + %s\n",
- TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length,
+ dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
tpdl, tid_len, i_str, iport_ptr);
#endif
if (tid_len > tpdl) {
- printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:"
+ pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1660,10 +1662,10 @@ static int core_scsi3_decode_spec_i_port(
*/
dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
dest_rtpi);
- if (!(dest_se_deve)) {
- printk(KERN_ERR "Unable to locate %s dest_se_deve"
+ if (!dest_se_deve) {
+ pr_err("Unable to locate %s dest_se_deve"
" from destination RTPI: %hu\n",
- TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_rtpi);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
@@ -1674,7 +1676,7 @@ static int core_scsi3_decode_spec_i_port(
ret = core_scsi3_lunacl_depend_item(dest_se_deve);
if (ret < 0) {
- printk(KERN_ERR "core_scsi3_lunacl_depend_item()"
+ pr_err("core_scsi3_lunacl_depend_item()"
" failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
@@ -1684,9 +1686,9 @@ static int core_scsi3_decode_spec_i_port(
goto out;
}
#if 0
- printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s"
+ pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
" dest_se_deve mapped_lun: %u\n",
- TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
#endif
/*
@@ -1712,8 +1714,8 @@ static int core_scsi3_decode_spec_i_port(
*/
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
GFP_KERNEL);
- if (!(tidh_new)) {
- printk(KERN_ERR "Unable to allocate tidh_new\n");
+ if (!tidh_new) {
+ pr_err("Unable to allocate tidh_new\n");
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1741,10 +1743,10 @@ static int core_scsi3_decode_spec_i_port(
* and then call __core_scsi3_add_registration() in the
* 2nd loop which will never fail.
*/
- dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+ dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_se_deve, iport_ptr,
sa_res_key, all_tg_pt, aptpl);
- if (!(dest_pr_reg)) {
+ if (!dest_pr_reg) {
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1760,6 +1762,9 @@ static int core_scsi3_decode_spec_i_port(
tid_len = 0;
}
+
+ transport_kunmap_first_data_page(cmd);
+
/*
* Go ahead and create a registrations from tid_dest_list for the
* SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
@@ -1787,12 +1792,12 @@ static int core_scsi3_decode_spec_i_port(
prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
- __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl,
+ __core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
dest_pr_reg, 0, 0);
- printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
+ pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
" registered Transport ID for Node: %s%s Mapped LUN:"
- " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(),
+ " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, (prf_isid) ?
&i_buf[0] : "", dest_se_deve->mapped_lun);
@@ -1806,6 +1811,7 @@ static int core_scsi3_decode_spec_i_port(
return 0;
out:
+ transport_kunmap_first_data_page(cmd);
/*
* For the failure case, release everything from tid_dest_list
* including *dest_pr_reg and the configfs dependances..
@@ -1855,7 +1861,7 @@ static int __core_scsi3_update_aptpl_buf(
{
struct se_lun *lun;
struct se_portal_group *tpg;
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
unsigned char tmp[512], isid_buf[32];
ssize_t len = 0;
@@ -1873,8 +1879,8 @@ static int __core_scsi3_update_aptpl_buf(
/*
* Walk the registration list..
*/
- spin_lock(&T10_RES(su_dev)->registration_lock);
- list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ spin_lock(&su_dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
pr_reg_list) {
tmp[0] = '\0';
@@ -1900,7 +1906,7 @@ static int __core_scsi3_update_aptpl_buf(
"res_holder=1\nres_type=%02x\n"
"res_scope=%02x\nres_all_tg_pt=%d\n"
"mapped_lun=%u\n", reg_count,
- TPG_TFO(tpg)->get_fabric_name(),
+ tpg->se_tpg_tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_res_type,
pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
@@ -1910,17 +1916,17 @@ static int __core_scsi3_update_aptpl_buf(
"initiator_fabric=%s\ninitiator_node=%s\n%s"
"sa_res_key=%llu\nres_holder=0\n"
"res_all_tg_pt=%d\nmapped_lun=%u\n",
- reg_count, TPG_TFO(tpg)->get_fabric_name(),
+ reg_count, tpg->se_tpg_tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
pr_reg->pr_res_mapped_lun);
}
- if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
- printk(KERN_ERR "Unable to update renaming"
+ if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+ pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&T10_RES(su_dev)->registration_lock);
- return -1;
+ spin_unlock(&su_dev->t10_pr.registration_lock);
+ return -EMSGSIZE;
}
len += sprintf(buf+len, "%s", tmp);
@@ -1929,23 +1935,23 @@ static int __core_scsi3_update_aptpl_buf(
*/
snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
"tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
- " %d\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ " %d\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
- if ((len + strlen(tmp) > pr_aptpl_buf_len)) {
- printk(KERN_ERR "Unable to update renaming"
+ if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
+ pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&T10_RES(su_dev)->registration_lock);
- return -1;
+ spin_unlock(&su_dev->t10_pr.registration_lock);
+ return -EMSGSIZE;
}
len += sprintf(buf+len, "%s", tmp);
reg_count++;
}
- spin_unlock(&T10_RES(su_dev)->registration_lock);
+ spin_unlock(&su_dev->t10_pr.registration_lock);
- if (!(reg_count))
+ if (!reg_count)
len += sprintf(buf+len, "No Registrations or Reservations");
return 0;
@@ -1975,7 +1981,7 @@ static int __core_scsi3_write_aptpl_to_file(
unsigned char *buf,
u32 pr_aptpl_buf_len)
{
- struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn;
+ struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
struct file *file;
struct iovec iov[1];
mm_segment_t old_fs;
@@ -1986,22 +1992,22 @@ static int __core_scsi3_write_aptpl_to_file(
memset(iov, 0, sizeof(struct iovec));
memset(path, 0, 512);
- if (strlen(&wwn->unit_serial[0]) > 512) {
- printk(KERN_ERR "WWN value for struct se_device does not fit"
+ if (strlen(&wwn->unit_serial[0]) >= 512) {
+ pr_err("WWN value for struct se_device does not fit"
" into path buffer\n");
- return -1;
+ return -EMSGSIZE;
}
snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
if (IS_ERR(file) || !file || !file->f_dentry) {
- printk(KERN_ERR "filp_open(%s) for APTPL metadata"
+ pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
- return -1;
+ return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
}
iov[0].iov_base = &buf[0];
- if (!(pr_aptpl_buf_len))
+ if (!pr_aptpl_buf_len)
iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
else
iov[0].iov_len = pr_aptpl_buf_len;
@@ -2012,9 +2018,9 @@ static int __core_scsi3_write_aptpl_to_file(
set_fs(old_fs);
if (ret < 0) {
- printk("Error writing APTPL metadata file: %s\n", path);
+ pr_debug("Error writing APTPL metadata file: %s\n", path);
filp_close(file, NULL);
- return -1;
+ return -EIO;
}
filp_close(file, NULL);
@@ -2032,7 +2038,7 @@ static int core_scsi3_update_and_write_aptpl(
/*
* Can be called with a NULL pointer from PROUT service action CLEAR
*/
- if (!(in_buf)) {
+ if (!in_buf) {
memset(null_buf, 0, 64);
buf = &null_buf[0];
/*
@@ -2049,14 +2055,14 @@ static int core_scsi3_update_and_write_aptpl(
ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
clear_aptpl_metadata);
if (ret != 0)
- return -1;
+ return ret;
/*
* __core_scsi3_write_aptpl_to_file() will call strlen()
* on the passed buf to determine pr_aptpl_buf_len.
*/
ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
if (ret != 0)
- return -1;
+ return ret;
return ret;
}
@@ -2070,28 +2076,28 @@ static int core_scsi3_emulate_pro_register(
int spec_i_pt,
int ignore_key)
{
- struct se_session *se_sess = SE_SESS(cmd);
- struct se_device *dev = SE_DEV(cmd);
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *se_deve;
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
/* Used for APTPL metadata w/ UNREGISTER */
unsigned char *pr_aptpl_buf = NULL;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
int pr_holder = 0, ret = 0, type;
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
- if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+ if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
- TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0],
+ se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0],
PR_REG_ISID_LEN);
isid_ptr = &isid_buf[0];
}
@@ -2099,30 +2105,30 @@ static int core_scsi3_emulate_pro_register(
* Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
*/
pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
- if (!(pr_reg_e)) {
+ if (!pr_reg_e) {
if (res_key) {
- printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero"
+ pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
}
/*
* Do nothing but return GOOD status.
*/
- if (!(sa_res_key))
+ if (!sa_res_key)
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
- if (!(spec_i_pt)) {
+ if (!spec_i_pt) {
/*
* Perform the Service Action REGISTER on the Initiator
* Port Endpoint that the PRO was received from on the
* Logical Unit of the SCSI device server.
*/
- ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+ ret = core_scsi3_alloc_registration(cmd->se_dev,
se_sess->se_node_acl, se_deve, isid_ptr,
sa_res_key, all_tg_pt, aptpl,
ignore_key, 0);
if (ret != 0) {
- printk(KERN_ERR "Unable to allocate"
+ pr_err("Unable to allocate"
" struct t10_pr_registration\n");
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -2143,10 +2149,10 @@ static int core_scsi3_emulate_pro_register(
/*
* Nothing left to do for the APTPL=0 case.
*/
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
- printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
" REGISTER\n");
return 0;
}
@@ -2155,15 +2161,15 @@ static int core_scsi3_emulate_pro_register(
* update the APTPL metadata information using its
* preallocated *pr_reg->pr_aptpl_buf.
*/
- pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
se_sess->se_node_acl, se_sess);
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret)) {
+ if (!ret) {
pr_tmpl->pr_aptpl_active = 1;
- printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
}
core_scsi3_put_pr_reg(pr_reg);
@@ -2175,9 +2181,9 @@ static int core_scsi3_emulate_pro_register(
pr_reg = pr_reg_e;
type = pr_reg->pr_res_type;
- if (!(ignore_key)) {
+ if (!ignore_key) {
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+ pr_err("SPC-3 PR REGISTER: Received"
" res_key: 0x%016Lx does not match"
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key,
@@ -2187,7 +2193,7 @@ static int core_scsi3_emulate_pro_register(
}
}
if (spec_i_pt) {
- printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT"
+ pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -2197,7 +2203,7 @@ static int core_scsi3_emulate_pro_register(
* must also set ALL_TG_PT=1 in the incoming PROUT.
*/
if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
- printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+ pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
core_scsi3_put_pr_reg(pr_reg);
@@ -2209,8 +2215,8 @@ static int core_scsi3_emulate_pro_register(
if (aptpl) {
pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
GFP_KERNEL);
- if (!(pr_aptpl_buf)) {
- printk(KERN_ERR "Unable to allocate"
+ if (!pr_aptpl_buf) {
+ pr_err("Unable to allocate"
" pr_aptpl_buf\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -2221,9 +2227,9 @@ static int core_scsi3_emulate_pro_register(
* Nexus sa_res_key=1 Change Reservation Key for registered I_T
* Nexus.
*/
- if (!(sa_res_key)) {
+ if (!sa_res_key) {
pr_holder = core_scsi3_check_implict_release(
- SE_DEV(cmd), pr_reg);
+ cmd->se_dev, pr_reg);
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
core_scsi3_put_pr_reg(pr_reg);
@@ -2240,7 +2246,7 @@ static int core_scsi3_emulate_pro_register(
&pr_tmpl->registration_list,
pr_reg_list) {
- if (!(pr_reg_p->pr_reg_all_tg_pt))
+ if (!pr_reg_p->pr_reg_all_tg_pt)
continue;
if (pr_reg_p->pr_res_key != res_key)
@@ -2260,7 +2266,7 @@ static int core_scsi3_emulate_pro_register(
/*
* Release the calling I_T Nexus registration now..
*/
- __core_scsi3_free_registration(SE_DEV(cmd), pr_reg,
+ __core_scsi3_free_registration(cmd->se_dev, pr_reg,
NULL, 1);
/*
* From spc4r17, section 5.7.11.3 Unregistering
@@ -2289,10 +2295,10 @@ static int core_scsi3_emulate_pro_register(
}
spin_unlock(&pr_tmpl->registration_lock);
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
core_scsi3_update_and_write_aptpl(dev, NULL, 0);
- printk("SPC-3 PR: Set APTPL Bit Deactivated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
" for UNREGISTER\n");
return 0;
}
@@ -2300,9 +2306,9 @@ static int core_scsi3_emulate_pro_register(
ret = core_scsi3_update_and_write_aptpl(dev,
&pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret)) {
+ if (!ret) {
pr_tmpl->pr_aptpl_active = 1;
- printk("SPC-3 PR: Set APTPL Bit Activated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated"
" for UNREGISTER\n");
}
@@ -2315,20 +2321,20 @@ static int core_scsi3_emulate_pro_register(
* READ_KEYS service action.
*/
pr_reg->pr_res_generation = core_scsi3_pr_generation(
- SE_DEV(cmd));
+ cmd->se_dev);
pr_reg->pr_res_key = sa_res_key;
- printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+ pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
" Key for %s to: 0x%016Lx PRgeneration:"
- " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(),
+ " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
(ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
pr_reg->pr_reg_nacl->initiatorname,
pr_reg->pr_res_key, pr_reg->pr_res_generation);
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
core_scsi3_update_and_write_aptpl(dev, NULL, 0);
core_scsi3_put_pr_reg(pr_reg);
- printk("SPC-3 PR: Set APTPL Bit Deactivated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
" for REGISTER\n");
return 0;
}
@@ -2336,9 +2342,9 @@ static int core_scsi3_emulate_pro_register(
ret = core_scsi3_update_and_write_aptpl(dev,
&pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret)) {
+ if (!ret) {
pr_tmpl->pr_aptpl_active = 1;
- printk("SPC-3 PR: Set APTPL Bit Activated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated"
" for REGISTER\n");
}
@@ -2378,19 +2384,19 @@ static int core_scsi3_pro_reserve(
int scope,
u64 res_key)
{
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
struct se_dev_entry *se_deve;
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_res_holder;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
int ret, prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
se_tpg = se_sess->se_tpg;
@@ -2398,10 +2404,10 @@ static int core_scsi3_pro_reserve(
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
- pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
- if (!(pr_reg)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -2415,7 +2421,7 @@ static int core_scsi3_pro_reserve(
* registered with the logical unit for the I_T nexus; and
*/
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
+ pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
@@ -2432,7 +2438,7 @@ static int core_scsi3_pro_reserve(
* and that persistent reservation has a scope of LU_SCOPE.
*/
if (scope != PR_SCOPE_LU_SCOPE) {
- printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+ pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -2456,12 +2462,12 @@ static int core_scsi3_pro_reserve(
*/
if (pr_res_holder != pr_reg) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
- printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+ pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s while reservation already held by"
" [%s]: %s, returning RESERVATION_CONFLICT\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
- TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2478,13 +2484,13 @@ static int core_scsi3_pro_reserve(
if ((pr_res_holder->pr_res_type != type) ||
(pr_res_holder->pr_res_scope != scope)) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
- printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+ pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s trying to change TYPE and/or SCOPE,"
" while reservation already held by [%s]: %s,"
" returning RESERVATION_CONFLICT\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
- TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2516,22 +2522,22 @@ static int core_scsi3_pro_reserve(
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new"
+ pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type),
+ cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL metadata"
" for RESERVE\n");
}
@@ -2558,7 +2564,7 @@ static int core_scsi3_emulate_pro_reserve(
ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
break;
default:
- printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:"
+ pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -2587,12 +2593,12 @@ static void __core_scsi3_complete_pro_release(
*/
dev->dev_pr_res_holder = NULL;
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+ pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
tfo->get_fabric_name(), (explict) ? "explict" : "implict",
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n",
+ pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
tfo->get_fabric_name(), se_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
/*
@@ -2608,22 +2614,22 @@ static int core_scsi3_emulate_pro_release(
u64 res_key)
{
struct se_device *dev = cmd->se_dev;
- struct se_session *se_sess = SE_SESS(cmd);
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
int ret, all_reg = 0;
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
- if (!(pr_reg)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -2641,7 +2647,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if (!(pr_res_holder)) {
+ if (!pr_res_holder) {
/*
* No persistent reservation, return GOOD status.
*/
@@ -2678,7 +2684,7 @@ static int core_scsi3_emulate_pro_release(
* that is registered with the logical unit for the I_T nexus;
*/
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
+ pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
@@ -2694,13 +2700,13 @@ static int core_scsi3_emulate_pro_release(
if ((pr_res_holder->pr_res_type != type) ||
(pr_res_holder->pr_res_scope != scope)) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
- printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release"
+ pr_err("SPC-3 PR RELEASE: Attempted to release"
" reservation from [%s]: %s with different TYPE "
"and/or SCOPE while reservation already held by"
" [%s]: %s, returning RESERVATION_CONFLICT\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
- TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2758,11 +2764,11 @@ static int core_scsi3_emulate_pro_release(
write_aptpl:
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
}
core_scsi3_put_pr_reg(pr_reg);
@@ -2775,18 +2781,18 @@ static int core_scsi3_emulate_pro_clear(
{
struct se_device *dev = cmd->se_dev;
struct se_node_acl *pr_reg_nacl;
- struct se_session *se_sess = SE_SESS(cmd);
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct se_session *se_sess = cmd->se_sess;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
u32 pr_res_mapped_lun = 0;
int calling_it_nexus = 0;
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
- pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+ pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev,
se_sess->se_node_acl, se_sess);
- if (!(pr_reg_n)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg_n) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -2802,7 +2808,7 @@ static int core_scsi3_emulate_pro_clear(
* that is registered with the logical unit for the I_T nexus.
*/
if (res_key != pr_reg_n->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+ pr_err("SPC-3 PR REGISTER: Received"
" res_key: 0x%016Lx does not match"
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
@@ -2839,18 +2845,18 @@ static int core_scsi3_emulate_pro_clear(
* command with CLEAR service action was received, with the
* additional sense code set to RESERVATIONS PREEMPTED.
*/
- if (!(calling_it_nexus))
+ if (!calling_it_nexus)
core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
}
spin_unlock(&pr_tmpl->registration_lock);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n",
- CMD_TFO(cmd)->get_fabric_name());
+ pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
+ cmd->se_tfo->get_fabric_name());
if (pr_tmpl->pr_aptpl_active) {
- core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
- printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+ pr_debug("SPC-3 PR: Updated APTPL metadata"
" for CLEAR\n");
}
@@ -2889,12 +2895,12 @@ static void __core_scsi3_complete_pro_preempt(
pr_reg->pr_res_type = type;
pr_reg->pr_res_scope = scope;
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new"
+ pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
+ pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
/*
@@ -2920,7 +2926,7 @@ static void core_scsi3_release_preempt_and_abort(
if (pr_reg_holder == pr_reg)
continue;
if (pr_reg->pr_res_holder) {
- printk(KERN_WARNING "pr_reg->pr_res_holder still set\n");
+ pr_warn("pr_reg->pr_res_holder still set\n");
continue;
}
@@ -2954,25 +2960,25 @@ static int core_scsi3_pro_preempt(
u64 sa_res_key,
int abort)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *se_deve;
struct se_node_acl *pr_reg_nacl;
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
struct list_head preempt_and_abort_list;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
u32 pr_res_mapped_lun = 0;
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
int prh_type = 0, prh_scope = 0, ret;
- if (!(se_sess))
+ if (!se_sess)
return PYX_TRANSPORT_LU_COMM_FAILURE;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
- pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
- if (!(pr_reg_n)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg_n) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -2982,7 +2988,7 @@ static int core_scsi3_pro_preempt(
return PYX_TRANSPORT_RESERVATION_CONFLICT;
}
if (scope != PR_SCOPE_LU_SCOPE) {
- printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+ pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -2995,7 +3001,7 @@ static int core_scsi3_pro_preempt(
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
all_reg = 1;
- if (!(all_reg) && !(sa_res_key)) {
+ if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3009,7 +3015,7 @@ static int core_scsi3_pro_preempt(
* server shall perform a preempt by doing the following in an
* uninterrupted series of actions. (See below..)
*/
- if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) {
+ if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) {
/*
* No existing or SA Reservation Key matching reservations..
*
@@ -3036,7 +3042,7 @@ static int core_scsi3_pro_preempt(
* was received, with the additional sense code set
* to REGISTRATIONS PREEMPTED.
*/
- if (!(all_reg)) {
+ if (!all_reg) {
if (pr_reg->pr_res_key != sa_res_key)
continue;
@@ -3076,7 +3082,7 @@ static int core_scsi3_pro_preempt(
NULL, 0);
released_regs++;
}
- if (!(calling_it_nexus))
+ if (!calling_it_nexus)
core_scsi3_ua_allocate(pr_reg_nacl,
pr_res_mapped_lun, 0x2A,
ASCQ_2AH_RESERVATIONS_PREEMPTED);
@@ -3089,7 +3095,7 @@ static int core_scsi3_pro_preempt(
* registered reservation key, then the device server shall
* complete the command with RESERVATION CONFLICT status.
*/
- if (!(released_regs)) {
+ if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -3111,17 +3117,17 @@ static int core_scsi3_pro_preempt(
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk(KERN_INFO "SPC-3 PR: Updated APTPL"
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL"
" metadata for PREEMPT%s\n", (abort) ?
"_AND_ABORT" : "");
}
core_scsi3_put_pr_reg(pr_reg_n);
- core_scsi3_pr_generation(SE_DEV(cmd));
+ core_scsi3_pr_generation(cmd->se_dev);
return 0;
}
/*
@@ -3247,16 +3253,16 @@ static int core_scsi3_pro_preempt(
}
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk("SPC-3 PR: Updated APTPL metadata for PREEMPT"
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
"%s\n", (abort) ? "_AND_ABORT" : "");
}
core_scsi3_put_pr_reg(pr_reg_n);
- core_scsi3_pr_generation(SE_DEV(cmd));
+ core_scsi3_pr_generation(cmd->se_dev);
return 0;
}
@@ -3281,7 +3287,7 @@ static int core_scsi3_emulate_pro_preempt(
res_key, sa_res_key, abort);
break;
default:
- printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s"
+ pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -3297,17 +3303,17 @@ static int core_scsi3_emulate_pro_register_and_move(
int aptpl,
int unreg)
{
- struct se_session *se_sess = SE_SESS(cmd);
- struct se_device *dev = SE_DEV(cmd);
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *se_deve, *dest_se_deve = NULL;
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_lun *se_lun = cmd->se_lun;
struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
struct se_port *se_port;
struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ unsigned char *buf;
unsigned char *initiator_str;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tid_len, tmp_tid_len;
@@ -3315,14 +3321,14 @@ static int core_scsi3_emulate_pro_register_and_move(
unsigned short rtpi;
unsigned char proto_ident;
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
se_tpg = se_sess->se_tpg;
- tf_ops = TPG_TFO(se_tpg);
+ tf_ops = se_tpg->se_tpg_tfo;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Follow logic from spc4r17 Section 5.7.8, Table 50 --
@@ -3330,10 +3336,10 @@ static int core_scsi3_emulate_pro_register_and_move(
*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
- pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
- if (!(pr_reg)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
+ if (!pr_reg) {
+ pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -3342,7 +3348,7 @@ static int core_scsi3_emulate_pro_register_and_move(
* provided during this initiator's I_T nexus registration.
*/
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received"
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
@@ -3351,26 +3357,30 @@ static int core_scsi3_emulate_pro_register_and_move(
/*
* The service active reservation key needs to be non zero
*/
- if (!(sa_res_key)) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero"
+ if (!sa_res_key) {
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
+
/*
* Determine the Relative Target Port Identifier where the reservation
* will be moved to for the TransportID containing SCSI initiator WWN
* information.
*/
+ buf = transport_kmap_first_data_page(cmd);
rtpi = (buf[18] & 0xff) << 8;
rtpi |= buf[19] & 0xff;
tid_len = (buf[20] & 0xff) << 24;
tid_len |= (buf[21] & 0xff) << 16;
tid_len |= (buf[22] & 0xff) << 8;
tid_len |= buf[23] & 0xff;
+ transport_kunmap_first_data_page(cmd);
+ buf = NULL;
if ((tid_len + 24) != cmd->data_length) {
- printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
+ pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
core_scsi3_put_pr_reg(pr_reg);
@@ -3382,10 +3392,10 @@ static int core_scsi3_emulate_pro_register_and_move(
if (se_port->sep_rtpi != rtpi)
continue;
dest_se_tpg = se_port->sep_tpg;
- if (!(dest_se_tpg))
+ if (!dest_se_tpg)
continue;
- dest_tf_ops = TPG_TFO(dest_se_tpg);
- if (!(dest_tf_ops))
+ dest_tf_ops = dest_se_tpg->se_tpg_tfo;
+ if (!dest_tf_ops)
continue;
atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
@@ -3394,7 +3404,7 @@ static int core_scsi3_emulate_pro_register_and_move(
ret = core_scsi3_tpg_depend_item(dest_se_tpg);
if (ret != 0) {
- printk(KERN_ERR "core_scsi3_tpg_depend_item() failed"
+ pr_err("core_scsi3_tpg_depend_item() failed"
" for dest_se_tpg\n");
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -3407,20 +3417,22 @@ static int core_scsi3_emulate_pro_register_and_move(
}
spin_unlock(&dev->se_port_lock);
- if (!(dest_se_tpg) || (!dest_tf_ops)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+ if (!dest_se_tpg || !dest_tf_ops) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
+
+ buf = transport_kmap_first_data_page(cmd);
proto_ident = (buf[24] & 0x0f);
#if 0
- printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+ pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
" 0x%02x\n", proto_ident);
#endif
if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received"
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
" proto_ident: 0x%02x does not match ident: 0x%02x"
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
@@ -3429,7 +3441,7 @@ static int core_scsi3_emulate_pro_register_and_move(
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
ret = PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -3437,14 +3449,17 @@ static int core_scsi3_emulate_pro_register_and_move(
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
(const char *)&buf[24], &tmp_tid_len, &iport_ptr);
- if (!(initiator_str)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+ if (!initiator_str) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
- printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+ transport_kunmap_first_data_page(cmd);
+ buf = NULL;
+
+ pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
" %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
"port" : "device", initiator_str, (iport_ptr != NULL) ?
iport_ptr : "");
@@ -3459,18 +3474,18 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_reg_nacl = pr_reg->pr_reg_nacl;
matching_iname = (!strcmp(initiator_str,
pr_reg_nacl->initiatorname)) ? 1 : 0;
- if (!(matching_iname))
+ if (!matching_iname)
goto after_iport_check;
- if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
+ if (!iport_ptr || !pr_reg->isid_present_at_reg) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
- if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
+ if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
@@ -3490,8 +3505,8 @@ after_iport_check:
}
spin_unlock_bh(&dest_se_tpg->acl_node_lock);
- if (!(dest_node_acl)) {
- printk(KERN_ERR "Unable to locate %s dest_node_acl for"
+ if (!dest_node_acl) {
+ pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3499,7 +3514,7 @@ after_iport_check:
}
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
if (ret != 0) {
- printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for"
+ pr_err("core_scsi3_nodeacl_depend_item() for"
" dest_node_acl\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -3508,7 +3523,7 @@ after_iport_check:
goto out;
}
#if 0
- printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
+ pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
dest_node_acl->initiatorname);
#endif
@@ -3517,8 +3532,8 @@ after_iport_check:
* PORT IDENTIFIER.
*/
dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
- if (!(dest_se_deve)) {
- printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:"
+ if (!dest_se_deve) {
+ pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
@@ -3526,7 +3541,7 @@ after_iport_check:
ret = core_scsi3_lunacl_depend_item(dest_se_deve);
if (ret < 0) {
- printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n");
+ pr_err("core_scsi3_lunacl_depend_item() failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
@@ -3534,7 +3549,7 @@ after_iport_check:
goto out;
}
#if 0
- printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
+ pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
" ACL for dest_se_deve->mapped_lun: %u\n",
dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
dest_se_deve->mapped_lun);
@@ -3545,8 +3560,8 @@ after_iport_check:
*/
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if (!(pr_res_holder)) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation"
+ if (!pr_res_holder) {
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
@@ -3559,7 +3574,7 @@ after_iport_check:
* Register behaviors for a REGISTER AND MOVE service action
*/
if (pr_res_holder != pr_reg) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -3576,7 +3591,7 @@ after_iport_check:
*/
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move"
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move"
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
@@ -3611,8 +3626,8 @@ after_iport_check:
*/
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
- if (!(dest_pr_reg)) {
- ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+ if (!dest_pr_reg) {
+ ret = core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_se_deve, iport_ptr,
sa_res_key, 0, aptpl, 2, 1);
if (ret != 0) {
@@ -3644,16 +3659,16 @@ after_iport_check:
/*
* Increment PRGeneration for existing registrations..
*/
- if (!(new_reg))
+ if (!new_reg)
dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
spin_unlock(&dev->dev_reservation_lock);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
+ pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
" created new reservation holder TYPE: %s on object RTPI:"
" %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
core_scsi3_pr_dump_type(type), rtpi,
dest_pr_reg->pr_res_generation);
- printk(KERN_INFO "SPC-3 PR Successfully moved reservation from"
+ pr_debug("SPC-3 PR Successfully moved reservation from"
" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
@@ -3681,24 +3696,28 @@ after_iport_check:
* Clear the APTPL metadata if APTPL has been disabled, otherwise
* write out the updated metadata to struct file for this SCSI device.
*/
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
- printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
" REGISTER_AND_MOVE\n");
} else {
pr_tmpl->pr_aptpl_active = 1;
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&dest_pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk("SPC-3 PR: Set APTPL Bit Activated for"
+ if (!ret)
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
" REGISTER_AND_MOVE\n");
}
+ transport_kunmap_first_data_page(cmd);
+
core_scsi3_put_pr_reg(dest_pr_reg);
return 0;
out:
+ if (buf)
+ transport_kunmap_first_data_page(cmd);
if (dest_se_deve)
core_scsi3_lunacl_undepend_item(dest_se_deve);
if (dest_node_acl)
@@ -3723,7 +3742,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
*/
static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
{
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u64 res_key, sa_res_key;
int sa, scope, type, aptpl;
int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
@@ -3731,11 +3750,11 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!(SE_SESS(cmd)))
+ if (!cmd->se_sess)
return PYX_TRANSPORT_LU_COMM_FAILURE;
if (cmd->data_length < 24) {
- printk(KERN_WARNING "SPC-PR: Received PR OUT parameter list"
+ pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -3745,6 +3764,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
sa = (cdb[1] & 0x1f);
scope = (cdb[2] & 0xf0);
type = (cdb[2] & 0x0f);
+
+ buf = transport_kmap_first_data_page(cmd);
/*
* From PERSISTENT_RESERVE_OUT parameter list (payload)
*/
@@ -3762,6 +3783,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
aptpl = (buf[17] & 0x01);
unreg = (buf[17] & 0x02);
}
+ transport_kunmap_first_data_page(cmd);
+ buf = NULL;
+
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
@@ -3776,9 +3800,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
* the sense key set to ILLEGAL REQUEST, and the additional sense
* code set to PARAMETER LIST LENGTH ERROR.
*/
- if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+ if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
(cmd->data_length != 24)) {
- printk(KERN_WARNING "SPC-PR: Received PR OUT illegal parameter"
+ pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -3812,7 +3836,7 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
sa_res_key, aptpl, unreg);
default:
- printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+ pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -3827,25 +3851,26 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
*/
static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
{
- struct se_device *se_dev = SE_DEV(cmd);
- struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct se_device *se_dev = cmd->se_dev;
+ struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u32 add_len = 0, off = 8;
if (cmd->data_length < 8) {
- printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u"
+ pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
- buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
- buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
- buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+ buf = transport_kmap_first_data_page(cmd);
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
- spin_lock(&T10_RES(su_dev)->registration_lock);
- list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ spin_lock(&su_dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
pr_reg_list) {
/*
* Check for overflow of 8byte PRI READ_KEYS payload and
@@ -3865,13 +3890,15 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
add_len += 8;
}
- spin_unlock(&T10_RES(su_dev)->registration_lock);
+ spin_unlock(&su_dev->t10_pr.registration_lock);
buf[4] = ((add_len >> 24) & 0xff);
buf[5] = ((add_len >> 16) & 0xff);
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -3882,23 +3909,24 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
*/
static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
{
- struct se_device *se_dev = SE_DEV(cmd);
- struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct se_device *se_dev = cmd->se_dev;
+ struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u64 pr_res_key;
u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
if (cmd->data_length < 8) {
- printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+ pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
- buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
- buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
- buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+ buf = transport_kmap_first_data_page(cmd);
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
spin_lock(&se_dev->dev_reservation_lock);
pr_reg = se_dev->dev_pr_res_holder;
@@ -3911,10 +3939,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
- if (cmd->data_length < 22) {
- spin_unlock(&se_dev->dev_reservation_lock);
- return 0;
- }
+ if (cmd->data_length < 22)
+ goto err;
+
/*
* Set the Reservation key.
*
@@ -3951,7 +3978,10 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
buf[21] = (pr_reg->pr_res_scope & 0xf0) |
(pr_reg->pr_res_type & 0x0f);
}
+
+err:
spin_unlock(&se_dev->dev_reservation_lock);
+ transport_kunmap_first_data_page(cmd);
return 0;
}
@@ -3963,17 +3993,19 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
*/
static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ unsigned char *buf;
u16 add_len = 8; /* Hardcoded to 8. */
if (cmd->data_length < 6) {
- printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+ pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = ((add_len << 8) & 0xff);
buf[1] = (add_len & 0xff);
buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
@@ -4004,6 +4036,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -4014,27 +4048,29 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
*/
static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
{
- struct se_device *se_dev = SE_DEV(cmd);
+ struct se_device *se_dev = cmd->se_dev;
struct se_node_acl *se_nacl;
- struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
+ unsigned char *buf;
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
u32 off = 8; /* off into first Full Status descriptor */
int format_code = 0;
if (cmd->data_length < 8) {
- printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+ pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
- buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
- buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
- buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+ buf = transport_kmap_first_data_page(cmd);
+
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
spin_lock(&pr_tmpl->registration_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
@@ -4051,11 +4087,11 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* Determine expected length of $FABRIC_MOD specific
* TransportID full status descriptor..
*/
- exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len(
+ exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len(
se_tpg, se_nacl, pr_reg, &format_code);
if ((exp_desc_len + add_len) > cmd->data_length) {
- printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran"
+ pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
" out of buffer: %d\n", cmd->data_length);
spin_lock(&pr_tmpl->registration_lock);
atomic_dec(&pr_reg->pr_res_holders);
@@ -4105,7 +4141,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* bit is set to one, the contents of the RELATIVE TARGET PORT
* IDENTIFIER field are not defined by this standard.
*/
- if (!(pr_reg->pr_reg_all_tg_pt)) {
+ if (!pr_reg->pr_reg_all_tg_pt) {
struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
@@ -4116,7 +4152,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
/*
* Now, have the $FABRIC_MOD fill in the protocol identifier
*/
- desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg,
+ desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg,
se_nacl, pr_reg, &format_code, &buf[off+4]);
spin_lock(&pr_tmpl->registration_lock);
@@ -4150,6 +4186,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -4165,7 +4203,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
case PRI_READ_FULL_STATUS:
return core_scsi3_pri_read_full_status(cmd);
default:
- printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service"
+ pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cdb[1] & 0x1f);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -4174,7 +4212,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
int core_scsi3_emulate_pr(struct se_cmd *cmd)
{
- unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+ unsigned char *cdb = &cmd->t_task_cdb[0];
struct se_device *dev = cmd->se_dev;
/*
* Following spc2r20 5.5.1 Reservations overview:
@@ -4186,7 +4224,7 @@ int core_scsi3_emulate_pr(struct se_cmd *cmd)
* CONFLICT status.
*/
if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
- printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy"
+ pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -4213,39 +4251,39 @@ static int core_pt_seq_non_holder(
int core_setup_reservations(struct se_device *dev, int force_pt)
{
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_reservation_template *rest = &su_dev->t10_reservation;
+ struct t10_reservation *rest = &su_dev->t10_pr;
/*
* If this device is from Target_Core_Mod/pSCSI, use the reservations
* of the Underlying SCSI hardware. In Linux/SCSI terms, this can
* cause a problem because libata and some SATA RAID HBAs appear
* under Linux/SCSI, but to emulate reservations themselves.
*/
- if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) {
+ if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) {
rest->res_type = SPC_PASSTHROUGH;
rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
- printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation"
- " emulation\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
+ " emulation\n", dev->transport->name);
return 0;
}
/*
* If SPC-3 or above is reported by real or emulated struct se_device,
* use emulated Persistent Reservations.
*/
- if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+ if (dev->transport->get_device_rev(dev) >= SCSI_3) {
rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
- printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS"
- " emulation\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS"
+ " emulation\n", dev->transport->name);
} else {
rest->res_type = SPC2_RESERVATIONS;
rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
rest->pr_ops.t10_seq_non_holder =
&core_scsi2_reservation_seq_non_holder;
- printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n",
- TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
+ dev->transport->name);
}
return 0;
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 5603bcfd86d..c8f47d06458 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -49,7 +49,7 @@ extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern int core_scsi2_emulate_crh(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
- struct t10_reservation_template *, u64,
+ struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
unsigned char *, u16, u32, int, int, u8);
extern int core_scsi3_check_aptpl_registration(struct se_device *,
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 331d423fd0e..2b7b0da9146 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -55,24 +55,6 @@ static struct se_subsystem_api pscsi_template;
static void pscsi_req_done(struct request *, int);
-/* pscsi_get_sh():
- *
- *
- */
-static struct Scsi_Host *pscsi_get_sh(u32 host_no)
-{
- struct Scsi_Host *sh = NULL;
-
- sh = scsi_host_lookup(host_no);
- if (IS_ERR(sh)) {
- printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
- " %u\n", host_no);
- return NULL;
- }
-
- return sh;
-}
-
/* pscsi_attach_hba():
*
* pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
@@ -80,28 +62,23 @@ static struct Scsi_Host *pscsi_get_sh(u32 host_no)
*/
static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
{
- int hba_depth;
struct pscsi_hba_virt *phv;
phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
- if (!(phv)) {
- printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
- return -1;
+ if (!phv) {
+ pr_err("Unable to allocate struct pscsi_hba_virt\n");
+ return -ENOMEM;
}
phv->phv_host_id = host_id;
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
- hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
- atomic_set(&hba->left_queue_depth, hba_depth);
- atomic_set(&hba->max_queue_depth, hba_depth);
- hba->hba_ptr = (void *)phv;
+ hba->hba_ptr = phv;
- printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
+ pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
- " Target Core with TCQ Depth: %d\n", hba->hba_id,
- atomic_read(&hba->max_queue_depth));
+ pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
+ hba->hba_id);
return 0;
}
@@ -114,12 +91,12 @@ static void pscsi_detach_hba(struct se_hba *hba)
if (scsi_host) {
scsi_host_put(scsi_host);
- printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
+ pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
" Generic Target Core\n", hba->hba_id,
(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
"Unknown");
} else
- printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
+ pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
" from Generic Target Core\n", hba->hba_id);
kfree(phv);
@@ -130,20 +107,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
{
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
- int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
/*
* Release the struct Scsi_Host
*/
- if (!(mode_flag)) {
- if (!(sh))
+ if (!mode_flag) {
+ if (!sh)
return 0;
phv->phv_lld_host = NULL;
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
- atomic_set(&hba->left_queue_depth, hba_depth);
- atomic_set(&hba->max_queue_depth, hba_depth);
- printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
+ pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
" %s\n", hba->hba_id, (sh->hostt->name) ?
(sh->hostt->name) : "Unknown");
@@ -154,27 +128,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
* Otherwise, locate struct Scsi_Host from the original passed
* pSCSI Host ID and enable for phba mode
*/
- sh = pscsi_get_sh(phv->phv_host_id);
- if (!(sh)) {
- printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
+ sh = scsi_host_lookup(phv->phv_host_id);
+ if (IS_ERR(sh)) {
+ pr_err("pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
- return -1;
+ return PTR_ERR(sh);
}
- /*
- * Usually the SCSI LLD will use the hostt->can_queue value to define
- * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
- * this at all and set sh->can_queue at runtime.
- */
- hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
- sh->hostt->can_queue : sh->can_queue;
-
- atomic_set(&hba->left_queue_depth, hba_depth);
- atomic_set(&hba->max_queue_depth, hba_depth);
phv->phv_lld_host = sh;
phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
- printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
+ pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
return 1;
@@ -236,7 +200,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
if (!buf)
- return -1;
+ return -ENOMEM;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = INQUIRY;
@@ -259,7 +223,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
out_free:
kfree(buf);
- return -1;
+ return -EPERM;
}
static void
@@ -293,15 +257,15 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
page_83 = &buf[off];
ident_len = page_83[3];
if (!ident_len) {
- printk(KERN_ERR "page_83[3]: identifier"
+ pr_err("page_83[3]: identifier"
" length zero!\n");
break;
}
- printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
+ pr_debug("T10 VPD Identifer Length: %d\n", ident_len);
vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
if (!vpd) {
- printk(KERN_ERR "Unable to allocate memory for"
+ pr_err("Unable to allocate memory for"
" struct t10_vpd\n");
goto out;
}
@@ -353,7 +317,7 @@ static struct se_device *pscsi_add_device_to_list(
if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
- printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
+ pr_err("Set broken SCSI Device %d:%d:%d"
" queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth);
}
@@ -364,10 +328,8 @@ static struct se_device *pscsi_add_device_to_list(
q = sd->request_queue;
limits = &dev_limits.limits;
limits->logical_block_size = sd->sector_size;
- limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
- queue_max_hw_sectors(q) : sd->host->max_sectors;
- limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
- queue_max_sectors(q) : sd->host->max_sectors;
+ limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
dev_limits.hw_queue_depth = sd->queue_depth;
dev_limits.queue_depth = sd->queue_depth;
/*
@@ -391,9 +353,9 @@ static struct se_device *pscsi_add_device_to_list(
pdv->pdv_sd = sd;
dev = transport_add_device_to_core_hba(hba, &pscsi_template,
- se_dev, dev_flags, (void *)pdv,
+ se_dev, dev_flags, pdv,
&dev_limits, NULL, NULL);
- if (!(dev)) {
+ if (!dev) {
pdv->pdv_sd = NULL;
return NULL;
}
@@ -423,14 +385,14 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
struct pscsi_dev_virt *pdv;
pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
- if (!(pdv)) {
- printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
+ if (!pdv) {
+ pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
return NULL;
}
pdv->pdv_se_hba = hba;
- printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
- return (void *)pdv;
+ pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
+ return pdv;
}
/*
@@ -450,7 +412,7 @@ static struct se_device *pscsi_create_type_disk(
u32 dev_flags = 0;
if (scsi_device_get(sd)) {
- printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+ pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return NULL;
@@ -463,19 +425,19 @@ static struct se_device *pscsi_create_type_disk(
bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
if (IS_ERR(bd)) {
- printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
+ pr_err("pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
return NULL;
}
pdv->pdv_bd = bd;
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!(dev)) {
+ if (!dev) {
blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
scsi_device_put(sd);
return NULL;
}
- printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
return dev;
@@ -497,7 +459,7 @@ static struct se_device *pscsi_create_type_rom(
u32 dev_flags = 0;
if (scsi_device_get(sd)) {
- printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+ pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return NULL;
@@ -505,11 +467,11 @@ static struct se_device *pscsi_create_type_rom(
spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!(dev)) {
+ if (!dev) {
scsi_device_put(sd);
return NULL;
}
- printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
@@ -533,10 +495,10 @@ static struct se_device *pscsi_create_type_other(
spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!(dev))
+ if (!dev)
return NULL;
- printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
@@ -555,8 +517,8 @@ static struct se_device *pscsi_create_virtdevice(
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
- if (!(pdv)) {
- printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
+ if (!pdv) {
+ pr_err("Unable to locate struct pscsi_dev_virt"
" parameter\n");
return ERR_PTR(-EINVAL);
}
@@ -564,9 +526,9 @@ static struct se_device *pscsi_create_virtdevice(
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
* struct Scsi_Host we will need to bring the TCM/pSCSI object online
*/
- if (!(sh)) {
+ if (!sh) {
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
- printk(KERN_ERR "pSCSI: Unable to locate struct"
+ pr_err("pSCSI: Unable to locate struct"
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
return ERR_PTR(-ENODEV);
}
@@ -575,7 +537,7 @@ static struct se_device *pscsi_create_virtdevice(
* reference, we enforce that udev_path has been set
*/
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
- printk(KERN_ERR "pSCSI: udev_path attribute has not"
+ pr_err("pSCSI: udev_path attribute has not"
" been set before ENABLE=1\n");
return ERR_PTR(-EINVAL);
}
@@ -586,8 +548,8 @@ static struct se_device *pscsi_create_virtdevice(
*/
if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
spin_lock(&hba->device_lock);
- if (!(list_empty(&hba->hba_dev_list))) {
- printk(KERN_ERR "pSCSI: Unable to set hba_mode"
+ if (!list_empty(&hba->hba_dev_list)) {
+ pr_err("pSCSI: Unable to set hba_mode"
" with active devices\n");
spin_unlock(&hba->device_lock);
return ERR_PTR(-EEXIST);
@@ -601,16 +563,16 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
sh = phv->phv_lld_host;
} else {
- sh = pscsi_get_sh(pdv->pdv_host_id);
- if (!(sh)) {
- printk(KERN_ERR "pSCSI: Unable to locate"
+ sh = scsi_host_lookup(pdv->pdv_host_id);
+ if (IS_ERR(sh)) {
+ pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return ERR_PTR(-ENODEV);
+ return (struct se_device *) sh;
}
}
} else {
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
- printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
+ pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while"
" struct Scsi_Host exists\n");
return ERR_PTR(-EEXIST);
}
@@ -639,7 +601,7 @@ static struct se_device *pscsi_create_virtdevice(
break;
}
- if (!(dev)) {
+ if (!dev) {
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
@@ -653,7 +615,7 @@ static struct se_device *pscsi_create_virtdevice(
}
spin_unlock_irq(sh->host_lock);
- printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
+ pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
@@ -728,13 +690,12 @@ static int pscsi_transport_complete(struct se_task *task)
*/
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
- if (!TASK_CMD(task)->se_deve)
+ if (!task->task_se_cmd->se_deve)
goto after_mode_sense;
- if (TASK_CMD(task)->se_deve->lun_flags &
+ if (task->task_se_cmd->se_deve->lun_flags &
TRANSPORT_LUNFLAGS_READ_ONLY) {
- unsigned char *buf = (unsigned char *)
- T_TASK(task->task_se_cmd)->t_task_buf;
+ unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd);
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@@ -743,6 +704,8 @@ static int pscsi_transport_complete(struct se_task *task)
if (!(buf[2] & 0x80))
buf[2] |= 0x80;
}
+
+ transport_kunmap_first_data_page(task->task_se_cmd);
}
}
after_mode_sense:
@@ -766,8 +729,8 @@ after_mode_sense:
u32 blocksize;
buf = sg_virt(&sg[0]);
- if (!(buf)) {
- printk(KERN_ERR "Unable to get buf for scatterlist\n");
+ if (!buf) {
+ pr_err("Unable to get buf for scatterlist\n");
goto after_mode_select;
}
@@ -797,34 +760,20 @@ after_mode_select:
}
static struct se_task *
-pscsi_alloc_task(struct se_cmd *cmd)
+pscsi_alloc_task(unsigned char *cdb)
{
struct pscsi_plugin_task *pt;
- unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
- pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
+ /*
+ * Dynamically alloc cdb space, since it may be larger than
+ * TCM_MAX_COMMAND_SIZE
+ */
+ pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
if (!pt) {
- printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
+ pr_err("Unable to allocate struct pscsi_plugin_task\n");
return NULL;
}
- /*
- * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
- * allocate the extended CDB buffer for per struct se_task context
- * pt->pscsi_cdb now.
- */
- if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
-
- pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
- if (!(pt->pscsi_cdb)) {
- printk(KERN_ERR "pSCSI: Unable to allocate extended"
- " pt->pscsi_cdb\n");
- kfree(pt);
- return NULL;
- }
- } else
- pt->pscsi_cdb = &pt->__pscsi_cdb[0];
-
return &pt->pscsi_task;
}
@@ -849,7 +798,7 @@ static inline void pscsi_blk_init_request(
* also set the end_io_data pointer.to struct se_task.
*/
req->end_io = pscsi_req_done;
- req->end_io_data = (void *)task;
+ req->end_io_data = task;
/*
* Load the referenced struct se_task's SCSI CDB into
* include/linux/blkdev.h:struct request->cmd
@@ -859,7 +808,7 @@ static inline void pscsi_blk_init_request(
/*
* Setup pointer for outgoing sense data.
*/
- req->sense = (void *)&pt->pscsi_sense[0];
+ req->sense = &pt->pscsi_sense[0];
req->sense_len = 0;
}
@@ -874,8 +823,8 @@ static int pscsi_blk_get_request(struct se_task *task)
pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
(task->task_data_direction == DMA_TO_DEVICE),
GFP_KERNEL);
- if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
- printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
+ if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) {
+ pr_err("PSCSI: blk_get_request() failed: %ld\n",
IS_ERR(pt->pscsi_req));
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -920,15 +869,8 @@ static int pscsi_do_task(struct se_task *task)
static void pscsi_free_task(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct se_cmd *cmd = task->task_se_cmd;
/*
- * Release the extended CDB allocation from pscsi_alloc_task()
- * if one exists.
- */
- if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
- kfree(pt->pscsi_cdb);
- /*
* We do not release the bio(s) here associated with this task, as
* this is handled by bio_put() and pscsi_bi_endio().
*/
@@ -973,7 +915,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
switch (token) {
case Opt_scsi_host_id:
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
- printk(KERN_ERR "PSCSI[%d]: Unable to accept"
+ pr_err("PSCSI[%d]: Unable to accept"
" scsi_host_id while phv_mode =="
" PHV_LLD_SCSI_HOST_NO\n",
phv->phv_host_id);
@@ -982,14 +924,14 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
}
match_int(args, &arg);
pdv->pdv_host_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
+ pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
" %d\n", phv->phv_host_id, pdv->pdv_host_id);
pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
break;
case Opt_scsi_channel_id:
match_int(args, &arg);
pdv->pdv_channel_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
+ pr_debug("PSCSI[%d]: Referencing SCSI Channel"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_channel_id);
pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
@@ -997,7 +939,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
case Opt_scsi_target_id:
match_int(args, &arg);
pdv->pdv_target_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
+ pr_debug("PSCSI[%d]: Referencing SCSI Target"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_target_id);
pdv->pdv_flags |= PDF_HAS_TARGET_ID;
@@ -1005,7 +947,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
case Opt_scsi_lun_id:
match_int(args, &arg);
pdv->pdv_lun_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
+ pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
pdv->pdv_flags |= PDF_HAS_LUN_ID;
break;
@@ -1028,9 +970,9 @@ static ssize_t pscsi_check_configfs_dev_params(
if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
!(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
!(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
- printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
+ pr_err("Missing scsi_channel_id=, scsi_target_id= and"
" scsi_lun_id= parameters\n");
- return -1;
+ return -EINVAL;
}
return 0;
@@ -1090,7 +1032,7 @@ static void pscsi_bi_endio(struct bio *bio, int error)
bio_put(bio);
}
-static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
+static inline struct bio *pscsi_get_bio(int sg_num)
{
struct bio *bio;
/*
@@ -1098,8 +1040,8 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
* in block/blk-core.c:blk_make_request()
*/
bio = bio_kmalloc(GFP_KERNEL, sg_num);
- if (!(bio)) {
- printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
+ if (!bio) {
+ pr_err("PSCSI: bio_kmalloc() failed\n");
return NULL;
}
bio->bi_end_io = pscsi_bi_endio;
@@ -1107,13 +1049,7 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
return bio;
}
-#if 0
-#define DEBUG_PSCSI(x...) printk(x)
-#else
-#define DEBUG_PSCSI(x...)
-#endif
-
-static int __pscsi_map_task_SG(
+static int __pscsi_map_SG(
struct se_task *task,
struct scatterlist *task_sg,
u32 task_sg_num,
@@ -1134,7 +1070,7 @@ static int __pscsi_map_task_SG(
return 0;
/*
* For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
- * the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
+ * the bio_vec maplist from task->task_sg ->
* struct scatterlist memory. The struct se_task->task_sg[] currently needs
* to be attached to struct bios for submission to Linux/SCSI using
* struct request to struct scsi_device->request_queue.
@@ -1143,34 +1079,34 @@ static int __pscsi_map_task_SG(
* is ported to upstream SCSI passthrough functionality that accepts
* struct scatterlist->page_link or struct page as a paraemeter.
*/
- DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
+ pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
for_each_sg(task_sg, sg, task_sg_num, i) {
page = sg_page(sg);
off = sg->offset;
len = sg->length;
- DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
+ pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
page, len, off);
while (len > 0 && data_len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
bytes = min(bytes, data_len);
- if (!(bio)) {
+ if (!bio) {
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
nr_pages -= nr_vecs;
/*
* Calls bio_kmalloc() and sets bio->bi_end_io()
*/
- bio = pscsi_get_bio(pdv, nr_vecs);
- if (!(bio))
+ bio = pscsi_get_bio(nr_vecs);
+ if (!bio)
goto fail;
if (rw)
bio->bi_rw |= REQ_WRITE;
- DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
+ pr_debug("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio,
(rw) ? "rw" : "r", nr_vecs);
/*
@@ -1185,7 +1121,7 @@ static int __pscsi_map_task_SG(
tbio = tbio->bi_next = bio;
}
- DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
+ pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
" bio: %p page: %p len: %d off: %d\n", i, bio,
page, len, off);
@@ -1194,11 +1130,11 @@ static int __pscsi_map_task_SG(
if (rc != bytes)
goto fail;
- DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
+ pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
bio->bi_vcnt, nr_vecs);
if (bio->bi_vcnt > nr_vecs) {
- DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
+ pr_debug("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
/*
@@ -1220,15 +1156,15 @@ static int __pscsi_map_task_SG(
* Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
* primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
*/
- if (!(bidi_read)) {
+ if (!bidi_read) {
/*
* Starting with v2.6.31, call blk_make_request() passing in *hbio to
* allocate the pSCSI task a struct request.
*/
pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
hbio, GFP_KERNEL);
- if (!(pt->pscsi_req)) {
- printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
+ if (!pt->pscsi_req) {
+ pr_err("pSCSI: blk_make_request() failed\n");
goto fail;
}
/*
@@ -1237,7 +1173,7 @@ static int __pscsi_map_task_SG(
*/
pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
- return task->task_sg_num;
+ return task->task_sg_nents;
}
/*
* Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
@@ -1245,13 +1181,13 @@ static int __pscsi_map_task_SG(
*/
pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
hbio, GFP_KERNEL);
- if (!(pt->pscsi_req->next_rq)) {
- printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
+ if (!pt->pscsi_req->next_rq) {
+ pr_err("pSCSI: blk_make_request() failed for BIDI\n");
goto fail;
}
pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
- return task->task_sg_num;
+ return task->task_sg_nents;
fail:
while (hbio) {
bio = hbio;
@@ -1262,7 +1198,10 @@ fail:
return ret;
}
-static int pscsi_map_task_SG(struct se_task *task)
+/*
+ * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call.
+ */
+static int pscsi_map_SG(struct se_task *task)
{
int ret;
@@ -1270,14 +1209,14 @@ static int pscsi_map_task_SG(struct se_task *task)
* Setup the main struct request for the task->task_sg[] payload
*/
- ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
+ ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0);
if (ret >= 0 && task->task_sg_bidi) {
/*
* If present, set up the extra BIDI-COMMAND SCSI READ
* struct request and payload.
*/
- ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
- task->task_sg_num, 1);
+ ret = __pscsi_map_SG(task, task->task_sg_bidi,
+ task->task_sg_nents, 1);
}
if (ret < 0)
@@ -1285,33 +1224,6 @@ static int pscsi_map_task_SG(struct se_task *task)
return 0;
}
-/* pscsi_map_task_non_SG():
- *
- *
- */
-static int pscsi_map_task_non_SG(struct se_task *task)
-{
- struct se_cmd *cmd = TASK_CMD(task);
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
- int ret = 0;
-
- if (pscsi_blk_get_request(task) < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
- if (!task->task_size)
- return 0;
-
- ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
- pt->pscsi_req, T_TASK(cmd)->t_task_buf,
- task->task_size, GFP_KERNEL);
- if (ret < 0) {
- printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- }
- return 0;
-}
-
static int pscsi_CDB_none(struct se_task *task)
{
return pscsi_blk_get_request(task);
@@ -1383,9 +1295,9 @@ static inline void pscsi_process_SAM_status(
struct pscsi_plugin_task *pt)
{
task->task_scsi_status = status_byte(pt->pscsi_result);
- if ((task->task_scsi_status)) {
+ if (task->task_scsi_status) {
task->task_scsi_status <<= 1;
- printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
+ pr_debug("PSCSI Status Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
}
@@ -1395,18 +1307,16 @@ static inline void pscsi_process_SAM_status(
transport_complete_task(task, (!task->task_scsi_status));
break;
default:
- printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
+ pr_debug("PSCSI Host Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- TASK_CMD(task)->transport_error_status =
+ task->task_se_cmd->transport_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
transport_complete_task(task, 0);
break;
}
-
- return;
}
static void pscsi_req_done(struct request *req, int uptodate)
@@ -1433,8 +1343,8 @@ static struct se_subsystem_api pscsi_template = {
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
.cdb_none = pscsi_CDB_none,
- .map_task_non_SG = pscsi_map_task_non_SG,
- .map_task_SG = pscsi_map_task_SG,
+ .map_control_SG = pscsi_map_SG,
+ .map_data_SG = pscsi_map_SG,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index a4cd5d352c3..ebf4f1ae2c8 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -2,7 +2,6 @@
#define TARGET_CORE_PSCSI_H
#define PSCSI_VERSION "v4.0"
-#define PSCSI_VIRTUAL_HBA_DEPTH 2048
/* used in pscsi_find_alloc_len() */
#ifndef INQUIRY_DATA_SIZE
@@ -24,13 +23,12 @@
struct pscsi_plugin_task {
struct se_task pscsi_task;
- unsigned char *pscsi_cdb;
- unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
int pscsi_direction;
int pscsi_result;
u32 pscsi_resid;
struct request *pscsi_req;
+ unsigned char pscsi_cdb[0];
} ____cacheline_aligned;
#define PDF_HAS_CHANNEL_ID 0x01
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 7837dd365a9..3dd81d24d9a 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -44,12 +44,8 @@
#include "target_core_rd.h"
-static struct se_subsystem_api rd_dr_template;
static struct se_subsystem_api rd_mcp_template;
-/* #define DEBUG_RAMDISK_MCP */
-/* #define DEBUG_RAMDISK_DR */
-
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
*
*
@@ -59,24 +55,21 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
struct rd_host *rd_host;
rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
- if (!(rd_host)) {
- printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
+ if (!rd_host) {
+ pr_err("Unable to allocate memory for struct rd_host\n");
return -ENOMEM;
}
rd_host->rd_host_id = host_id;
- atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
- atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
- hba->hba_ptr = (void *) rd_host;
+ hba->hba_ptr = rd_host;
- printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+ pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
- " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
- rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
- RD_MAX_SECTORS);
+ pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+ " MaxSectors: %u\n", hba->hba_id,
+ rd_host->rd_host_id, RD_MAX_SECTORS);
return 0;
}
@@ -85,7 +78,7 @@ static void rd_detach_hba(struct se_hba *hba)
{
struct rd_host *rd_host = hba->hba_ptr;
- printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+ pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
kfree(rd_host);
@@ -114,7 +107,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
for (j = 0; j < sg_per_table; j++) {
pg = sg_page(&sg[j]);
- if ((pg)) {
+ if (pg) {
__free_page(pg);
page_count++;
}
@@ -123,7 +116,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
kfree(sg);
}
- printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
+ pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
@@ -148,7 +141,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
struct scatterlist *sg;
if (rd_dev->rd_page_count <= 0) {
- printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
+ pr_err("Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
return -EINVAL;
}
@@ -157,8 +150,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
- if (!(sg_table)) {
- printk(KERN_ERR "Unable to allocate memory for Ramdisk"
+ if (!sg_table) {
+ pr_err("Unable to allocate memory for Ramdisk"
" scatterlist tables\n");
return -ENOMEM;
}
@@ -172,13 +165,13 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
GFP_KERNEL);
- if (!(sg)) {
- printk(KERN_ERR "Unable to allocate scatterlist array"
+ if (!sg) {
+ pr_err("Unable to allocate scatterlist array"
" for struct rd_dev\n");
return -ENOMEM;
}
- sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
+ sg_init_table(sg, sg_per_table);
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
@@ -188,8 +181,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
for (j = 0; j < sg_per_table; j++) {
pg = alloc_pages(GFP_KERNEL, 0);
- if (!(pg)) {
- printk(KERN_ERR "Unable to allocate scatterlist"
+ if (!pg) {
+ pr_err("Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return -ENOMEM;
}
@@ -201,7 +194,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
total_sg_needed -= sg_per_table;
}
- printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+ pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count);
@@ -218,8 +211,8 @@ static void *rd_allocate_virtdevice(
struct rd_host *rd_host = hba->hba_ptr;
rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
- if (!(rd_dev)) {
- printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
+ if (!rd_dev) {
+ pr_err("Unable to allocate memory for struct rd_dev\n");
return NULL;
}
@@ -229,11 +222,6 @@ static void *rd_allocate_virtdevice(
return rd_dev;
}
-static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
-{
- return rd_allocate_virtdevice(hba, name, 1);
-}
-
static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
{
return rd_allocate_virtdevice(hba, name, 0);
@@ -273,16 +261,15 @@ static struct se_device *rd_create_virtdevice(
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba,
- (rd_dev->rd_direct) ? &rd_dr_template :
- &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
+ &rd_mcp_template, se_dev, dev_flags, rd_dev,
&dev_limits, prod, rev);
- if (!(dev))
+ if (!dev)
goto fail;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
rd_dev->rd_queue_depth = dev->queue_depth;
- printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+ pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
" %u pages in %u tables, %lu total bytes\n",
rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
@@ -296,14 +283,6 @@ fail:
return ERR_PTR(ret);
}
-static struct se_device *rd_DIRECT_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
-{
- return rd_create_virtdevice(hba, se_dev, p, 1);
-}
-
static struct se_device *rd_MEMCPY_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
@@ -330,16 +309,15 @@ static inline struct rd_request *RD_REQ(struct se_task *task)
}
static struct se_task *
-rd_alloc_task(struct se_cmd *cmd)
+rd_alloc_task(unsigned char *cdb)
{
struct rd_request *rd_req;
rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
if (!rd_req) {
- printk(KERN_ERR "Unable to allocate struct rd_request\n");
+ pr_err("Unable to allocate struct rd_request\n");
return NULL;
}
- rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
return &rd_req->rd_task;
}
@@ -360,7 +338,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return sg_table;
}
- printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
+ pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
page);
return NULL;
@@ -373,7 +351,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
static int rd_MEMCPY_read(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_dev;
+ struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@@ -382,32 +360,32 @@ static int rd_MEMCPY_read(struct rd_request *req)
u32 rd_offset = req->rd_offset;
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = task->task_sg;
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
+
+ pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
-#endif
+
src_offset = rd_offset;
while (req->rd_size) {
if ((sg_d[i].length - dst_offset) <
(sg_s[j].length - src_offset)) {
length = (sg_d[i].length - dst_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
+
+ pr_debug("Step 1 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
sg_s[j].length);
- printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
+ pr_debug("Step 1 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -424,15 +402,15 @@ static int rd_MEMCPY_read(struct rd_request *req)
page_end = 0;
} else {
length = (sg_s[j].length - src_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
+
+ pr_debug("Step 2 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset,
j, sg_s[j].length);
- printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
+ pr_debug("Step 2 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -456,32 +434,29 @@ static int rd_MEMCPY_read(struct rd_request *req)
memcpy(dst, src, length);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+ pr_debug("page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
-#endif
+
req->rd_size -= length;
- if (!(req->rd_size))
+ if (!req->rd_size)
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u in same page table\n",
+ pr_debug("page: %u in same page table\n",
req->rd_page);
-#endif
continue;
}
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "getting new page table for page: %u\n",
+
+ pr_debug("getting new page table for page: %u\n",
req->rd_page);
-#endif
+
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
sg_s = &table->sg_table[j = 0];
}
@@ -496,7 +471,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
static int rd_MEMCPY_write(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_dev;
+ struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@@ -505,32 +480,32 @@ static int rd_MEMCPY_write(struct rd_request *req)
u32 rd_offset = req->rd_offset;
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
sg_s = task->task_sg;
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
+
+ pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
-#endif
+
dst_offset = rd_offset;
while (req->rd_size) {
if ((sg_s[i].length - src_offset) <
(sg_d[j].length - dst_offset)) {
length = (sg_s[i].length - src_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
+
+ pr_debug("Step 1 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
- printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
+ pr_debug("Step 1 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -547,15 +522,15 @@ static int rd_MEMCPY_write(struct rd_request *req)
page_end = 0;
} else {
length = (sg_d[j].length - dst_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
+
+ pr_debug("Step 2 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
- printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
+ pr_debug("Step 2 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -579,32 +554,29 @@ static int rd_MEMCPY_write(struct rd_request *req)
memcpy(dst, src, length);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+ pr_debug("page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
-#endif
+
req->rd_size -= length;
- if (!(req->rd_size))
+ if (!req->rd_size)
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u in same page table\n",
+ pr_debug("page: %u in same page table\n",
req->rd_page);
-#endif
continue;
}
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "getting new page table for page: %u\n",
+
+ pr_debug("getting new page table for page: %u\n",
req->rd_page);
-#endif
+
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
sg_d = &table->sg_table[j = 0];
}
@@ -623,11 +595,11 @@ static int rd_MEMCPY_do_task(struct se_task *task)
unsigned long long lba;
int ret;
- req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
+ req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
lba = task->task_lba;
req->rd_offset = (do_div(lba,
- (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
- DEV_ATTRIB(dev)->block_size;
+ (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
+ dev->se_sub_dev->se_dev_attrib.block_size;
req->rd_size = task->task_size;
if (task->task_data_direction == DMA_FROM_DEVICE)
@@ -644,274 +616,6 @@ static int rd_MEMCPY_do_task(struct se_task *task)
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
-/* rd_DIRECT_with_offset():
- *
- *
- */
-static int rd_DIRECT_with_offset(
- struct se_task *task,
- struct list_head *se_mem_list,
- u32 *se_mem_cnt,
- u32 *task_offset)
-{
- struct rd_request *req = RD_REQ(task);
- struct rd_dev *dev = req->rd_dev;
- struct rd_dev_sg_table *table;
- struct se_mem *se_mem;
- struct scatterlist *sg_s;
- u32 j = 0, set_offset = 1;
- u32 get_next_table = 0, offset_length, table_sg_end;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
- (task->task_data_direction == DMA_TO_DEVICE) ?
- "Write" : "Read",
- task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
-#endif
- while (req->rd_size) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- return -1;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
-
- if (set_offset) {
- offset_length = sg_s[j].length - req->rd_offset;
- if (offset_length > req->rd_size)
- offset_length = req->rd_size;
-
- se_mem->se_page = sg_page(&sg_s[j++]);
- se_mem->se_off = req->rd_offset;
- se_mem->se_len = offset_length;
-
- set_offset = 0;
- get_next_table = (j > table_sg_end);
- goto check_eot;
- }
-
- offset_length = (req->rd_size < req->rd_offset) ?
- req->rd_size : req->rd_offset;
-
- se_mem->se_page = sg_page(&sg_s[j]);
- se_mem->se_len = offset_length;
-
- set_offset = 1;
-
-check_eot:
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
- " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
- req->rd_page, req->rd_size, offset_length, j, se_mem,
- se_mem->se_page, se_mem->se_off, se_mem->se_len);
-#endif
- list_add_tail(&se_mem->se_list, se_mem_list);
- (*se_mem_cnt)++;
-
- req->rd_size -= offset_length;
- if (!(req->rd_size))
- goto out;
-
- if (!set_offset && !get_next_table)
- continue;
-
- if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "page: %u in same page table\n",
- req->rd_page);
-#endif
- continue;
- }
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "getting new page table for page: %u\n",
- req->rd_page);
-#endif
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- sg_s = &table->sg_table[j = 0];
- }
-
-out:
- T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
- *se_mem_cnt);
-#endif
- return 0;
-}
-
-/* rd_DIRECT_without_offset():
- *
- *
- */
-static int rd_DIRECT_without_offset(
- struct se_task *task,
- struct list_head *se_mem_list,
- u32 *se_mem_cnt,
- u32 *task_offset)
-{
- struct rd_request *req = RD_REQ(task);
- struct rd_dev *dev = req->rd_dev;
- struct rd_dev_sg_table *table;
- struct se_mem *se_mem;
- struct scatterlist *sg_s;
- u32 length, j = 0;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
- (task->task_data_direction == DMA_TO_DEVICE) ?
- "Write" : "Read",
- task->task_lba, req->rd_size, req->rd_page);
-#endif
- while (req->rd_size) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- return -1;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
-
- length = (req->rd_size < sg_s[j].length) ?
- req->rd_size : sg_s[j].length;
-
- se_mem->se_page = sg_page(&sg_s[j++]);
- se_mem->se_len = length;
-
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
- " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
- req->rd_size, j, se_mem, se_mem->se_page,
- se_mem->se_off, se_mem->se_len);
-#endif
- list_add_tail(&se_mem->se_list, se_mem_list);
- (*se_mem_cnt)++;
-
- req->rd_size -= length;
- if (!(req->rd_size))
- goto out;
-
- if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_DR
- printk("page: %u in same page table\n",
- req->rd_page);
-#endif
- continue;
- }
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "getting new page table for page: %u\n",
- req->rd_page);
-#endif
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- sg_s = &table->sg_table[j = 0];
- }
-
-out:
- T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
- *se_mem_cnt);
-#endif
- return 0;
-}
-
-/* rd_DIRECT_do_se_mem_map():
- *
- *
- */
-static int rd_DIRECT_do_se_mem_map(
- struct se_task *task,
- struct list_head *se_mem_list,
- void *in_mem,
- struct se_mem *in_se_mem,
- struct se_mem **out_se_mem,
- u32 *se_mem_cnt,
- u32 *task_offset_in)
-{
- struct se_cmd *cmd = task->task_se_cmd;
- struct rd_request *req = RD_REQ(task);
- u32 task_offset = *task_offset_in;
- unsigned long long lba;
- int ret;
-
- req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
- PAGE_SIZE);
- lba = task->task_lba;
- req->rd_offset = (do_div(lba,
- (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
- DEV_ATTRIB(task->se_dev)->block_size;
- req->rd_size = task->task_size;
-
- if (req->rd_offset)
- ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
- task_offset_in);
- else
- ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
- task_offset_in);
-
- if (ret < 0)
- return ret;
-
- if (CMD_TFO(cmd)->task_sg_chaining == 0)
- return 0;
- /*
- * Currently prevent writers from multiple HW fabrics doing
- * pci_map_sg() to RD_DR's internal scatterlist memory.
- */
- if (cmd->data_direction == DMA_TO_DEVICE) {
- printk(KERN_ERR "DMA_TO_DEVICE not supported for"
- " RAMDISK_DR with task_sg_chaining=1\n");
- return -1;
- }
- /*
- * Special case for if task_sg_chaining is enabled, then
- * we setup struct se_task->task_sg[], as it will be used by
- * transport_do_task_sg_chain() for creating chainged SGLs
- * across multiple struct se_task->task_sg[].
- */
- if (!(transport_calc_sg_num(task,
- list_entry(T_TASK(cmd)->t_mem_list->next,
- struct se_mem, se_list),
- task_offset)))
- return -1;
-
- return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
- list_entry(T_TASK(cmd)->t_mem_list->next,
- struct se_mem, se_list),
- out_se_mem, se_mem_cnt, task_offset_in);
-}
-
-/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static int rd_DIRECT_do_task(struct se_task *task)
-{
- /*
- * At this point the locally allocated RD tables have been mapped
- * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
- */
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
-}
-
/* rd_free_task(): (Part of se_subsystem_api_t template)
*
*
@@ -956,7 +660,7 @@ static ssize_t rd_set_configfs_dev_params(
case Opt_rd_pages:
match_int(args, &arg);
rd_dev->rd_page_count = arg;
- printk(KERN_INFO "RAMDISK: Referencing Page"
+ pr_debug("RAMDISK: Referencing Page"
" Count: %u\n", rd_dev->rd_page_count);
rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
break;
@@ -974,8 +678,8 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
- printk(KERN_INFO "Missing rd_pages= parameter\n");
- return -1;
+ pr_debug("Missing rd_pages= parameter\n");
+ return -EINVAL;
}
return 0;
@@ -1021,32 +725,11 @@ static sector_t rd_get_blocks(struct se_device *dev)
{
struct rd_dev *rd_dev = dev->dev_ptr;
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
- DEV_ATTRIB(dev)->block_size) - 1;
+ dev->se_sub_dev->se_dev_attrib.block_size) - 1;
return blocks_long;
}
-static struct se_subsystem_api rd_dr_template = {
- .name = "rd_dr",
- .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
- .attach_hba = rd_attach_hba,
- .detach_hba = rd_detach_hba,
- .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
- .create_virtdevice = rd_DIRECT_create_virtdevice,
- .free_device = rd_free_device,
- .alloc_task = rd_alloc_task,
- .do_task = rd_DIRECT_do_task,
- .free_task = rd_free_task,
- .check_configfs_dev_params = rd_check_configfs_dev_params,
- .set_configfs_dev_params = rd_set_configfs_dev_params,
- .show_configfs_dev_params = rd_show_configfs_dev_params,
- .get_cdb = rd_get_cdb,
- .get_device_rev = rd_get_device_rev,
- .get_device_type = rd_get_device_type,
- .get_blocks = rd_get_blocks,
- .do_se_mem_map = rd_DIRECT_do_se_mem_map,
-};
-
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
@@ -1071,13 +754,8 @@ int __init rd_module_init(void)
{
int ret;
- ret = transport_subsystem_register(&rd_dr_template);
- if (ret < 0)
- return ret;
-
ret = transport_subsystem_register(&rd_mcp_template);
if (ret < 0) {
- transport_subsystem_release(&rd_dr_template);
return ret;
}
@@ -1086,6 +764,5 @@ int __init rd_module_init(void)
void rd_module_exit(void)
{
- transport_subsystem_release(&rd_dr_template);
transport_subsystem_release(&rd_mcp_template);
}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 3ea19e29d8e..0d027732cd0 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -7,8 +7,6 @@
/* Largest piece of memory kmalloc can allocate */
#define RD_MAX_ALLOCATION_SIZE 65536
-/* Maximum queuedepth for the Ramdisk HBA */
-#define RD_HBA_QUEUE_DEPTH 256
#define RD_DEVICE_QUEUE_DEPTH 32
#define RD_MAX_DEVICE_QUEUE_DEPTH 128
#define RD_BLOCKSIZE 512
@@ -34,8 +32,6 @@ struct rd_request {
u32 rd_page_count;
/* Scatterlist count */
u32 rd_size;
- /* Ramdisk device */
- struct rd_dev *rd_dev;
} ____cacheline_aligned;
struct rd_dev_sg_table {
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
index dc6fed037ab..72843441d4f 100644
--- a/drivers/target/target_core_scdb.c
+++ b/drivers/target/target_core_scdb.c
@@ -42,13 +42,13 @@
*/
void split_cdb_XX_6(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
cdb[1] = (lba >> 16) & 0x1f;
cdb[2] = (lba >> 8) & 0xff;
cdb[3] = lba & 0xff;
- cdb[4] = *sectors & 0xff;
+ cdb[4] = sectors & 0xff;
}
/* split_cdb_XX_10():
@@ -57,11 +57,11 @@ void split_cdb_XX_6(
*/
void split_cdb_XX_10(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be16(*sectors, &cdb[7]);
+ put_unaligned_be16(sectors, &cdb[7]);
}
/* split_cdb_XX_12():
@@ -70,11 +70,11 @@ void split_cdb_XX_10(
*/
void split_cdb_XX_12(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be32(*sectors, &cdb[6]);
+ put_unaligned_be32(sectors, &cdb[6]);
}
/* split_cdb_XX_16():
@@ -83,11 +83,11 @@ void split_cdb_XX_12(
*/
void split_cdb_XX_16(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be64(lba, &cdb[2]);
- put_unaligned_be32(*sectors, &cdb[10]);
+ put_unaligned_be32(sectors, &cdb[10]);
}
/*
@@ -97,9 +97,9 @@ void split_cdb_XX_16(
*/
void split_cdb_XX_32(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be64(lba, &cdb[12]);
- put_unaligned_be32(*sectors, &cdb[28]);
+ put_unaligned_be32(sectors, &cdb[28]);
}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
index 98cd1c01ed8..48e9ccc9585 100644
--- a/drivers/target/target_core_scdb.h
+++ b/drivers/target/target_core_scdb.h
@@ -1,10 +1,10 @@
#ifndef TARGET_CORE_SCDB_H
#define TARGET_CORE_SCDB_H
-extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *);
#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 5e3a067a747..a8d6e1dee93 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -402,8 +402,8 @@ static ssize_t target_stat_scsi_lu_show_attr_lu_name(
return -ENODEV;
/* scsiLuWwnName */
return snprintf(page, PAGE_SIZE, "%s\n",
- (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
- (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None");
+ (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ?
+ dev->se_sub_dev->t10_wwn.unit_serial : "None");
}
DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
@@ -413,17 +413,17 @@ static ssize_t target_stat_scsi_lu_show_attr_vend(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
- int j;
- char str[28];
+ int i;
+ char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1];
if (!dev)
return -ENODEV;
+
/* scsiLuVendorId */
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
- for (j = 0; j < 8; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
- DEV_T10_WWN(dev)->vendor[j] : 0x20;
- str[8] = 0;
+ for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ?
+ dev->se_sub_dev->t10_wwn.vendor[i] : ' ';
+ str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(vend);
@@ -434,18 +434,17 @@ static ssize_t target_stat_scsi_lu_show_attr_prod(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
- int j;
- char str[28];
+ int i;
+ char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1];
if (!dev)
return -ENODEV;
/* scsiLuProductId */
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
- for (j = 0; j < 16; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
- DEV_T10_WWN(dev)->model[j] : 0x20;
- str[16] = 0;
+ for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ?
+ dev->se_sub_dev->t10_wwn.model[i] : ' ';
+ str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(prod);
@@ -456,18 +455,17 @@ static ssize_t target_stat_scsi_lu_show_attr_rev(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
- int j;
- char str[28];
+ int i;
+ char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1];
if (!dev)
return -ENODEV;
/* scsiLuRevisionId */
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
- for (j = 0; j < 4; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
- DEV_T10_WWN(dev)->revision[j] : 0x20;
- str[4] = 0;
+ for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++)
+ str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ?
+ dev->se_sub_dev->t10_wwn.revision[i] : ' ';
+ str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(rev);
@@ -484,7 +482,7 @@ static ssize_t target_stat_scsi_lu_show_attr_dev_type(
/* scsiLuPeripheralType */
return snprintf(page, PAGE_SIZE, "%u\n",
- TRANSPORT(dev)->get_device_type(dev));
+ dev->transport->get_device_type(dev));
}
DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
@@ -668,18 +666,18 @@ static struct config_item_type target_stat_scsi_lu_cit = {
*/
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
{
- struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group;
+ struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group;
- config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group,
+ config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group,
"scsi_dev", &target_stat_scsi_dev_cit);
- config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group,
+ config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group,
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
- config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group,
+ config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group,
"scsi_lu", &target_stat_scsi_lu_cit);
- dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group;
- dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group;
- dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group;
+ dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group;
+ dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group;
+ dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group;
dev_stat_grp->default_groups[3] = NULL;
}
@@ -922,7 +920,7 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name(
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
- TPG_TFO(tpg)->get_fabric_name(), sep->sep_index);
+ tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index);
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -945,8 +943,8 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
- TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -1128,7 +1126,7 @@ static ssize_t target_stat_scsi_transport_show_attr_device(
tpg = sep->sep_tpg;
/* scsiTransportType */
ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -1150,7 +1148,7 @@ static ssize_t target_stat_scsi_transport_show_attr_indx(
}
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -1173,10 +1171,10 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
return -ENODEV;
}
tpg = sep->sep_tpg;
- wwn = DEV_T10_WWN(dev);
+ wwn = &dev->se_sub_dev->t10_wwn;
/* scsiTransportDevName */
ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
(strlen(wwn->unit_serial)) ? wwn->unit_serial :
wwn->vendor);
spin_unlock(&lun->lun_sep_lock);
@@ -1212,18 +1210,18 @@ static struct config_item_type target_stat_scsi_transport_cit = {
*/
void target_stat_setup_port_default_groups(struct se_lun *lun)
{
- struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+ struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group;
- config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group,
+ config_group_init_type_name(&lun->port_stat_grps.scsi_port_group,
"scsi_port", &target_stat_scsi_port_cit);
- config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group,
+ config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group,
"scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
- config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group,
+ config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group,
"scsi_transport", &target_stat_scsi_transport_cit);
- port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group;
- port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group;
- port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group;
+ port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group;
+ port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group;
+ port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group;
port_stat_grp->default_groups[3] = NULL;
}
@@ -1264,7 +1262,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1314,7 +1312,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port(
}
tpg = nacl->se_tpg;
/* scsiAuthIntrTgtPortIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1632,7 +1630,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1682,7 +1680,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
}
tpg = nacl->se_tpg;
/* scsiPortIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1708,7 +1706,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
tpg = nacl->se_tpg;
/* scsiAttIntrPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->sess_get_index(se_sess));
+ tpg->se_tpg_tfo->sess_get_index(se_sess));
spin_unlock_irq(&nacl->nacl_sess_lock);
return ret;
}
@@ -1757,8 +1755,8 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
tpg = nacl->se_tpg;
/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
memset(buf, 0, 64);
- if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL)
- TPG_TFO(tpg)->sess_get_initiator_sid(se_sess,
+ if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
+ tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
(unsigned char *)&buf[0], 64);
ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
@@ -1797,14 +1795,14 @@ static struct config_item_type target_stat_scsi_att_intr_port_cit = {
*/
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
{
- struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+ struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group;
- config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group,
+ config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group,
"scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
- config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group,
+ config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group,
"scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
- ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group;
- ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group;
+ ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group;
+ ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group;
ml_stat_grp->default_groups[2] = NULL;
}
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 59b8b9c5ad7..27d4925e51c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -41,13 +41,6 @@
#include "target_core_alua.h"
#include "target_core_pr.h"
-#define DEBUG_LUN_RESET
-#ifdef DEBUG_LUN_RESET
-#define DEBUG_LR(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_LR(x...)
-#endif
-
struct se_tmr_req *core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
@@ -57,8 +50,8 @@ struct se_tmr_req *core_tmr_alloc_req(
tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
GFP_ATOMIC : GFP_KERNEL);
- if (!(tmr)) {
- printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
+ if (!tmr) {
+ pr_err("Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
}
tmr->task_cmd = se_cmd;
@@ -75,10 +68,16 @@ void core_tmr_release_req(
{
struct se_device *dev = tmr->tmr_dev;
- spin_lock(&dev->se_tmr_lock);
+ if (!dev) {
+ kmem_cache_free(se_tmr_req_cache, tmr);
+ return;
+ }
+
+ spin_lock_irq(&dev->se_tmr_lock);
list_del(&tmr->tmr_list);
+ spin_unlock_irq(&dev->se_tmr_lock);
+
kmem_cache_free(se_tmr_req_cache, tmr);
- spin_unlock(&dev->se_tmr_lock);
}
static void core_tmr_handle_tas_abort(
@@ -87,14 +86,14 @@ static void core_tmr_handle_tas_abort(
int tas,
int fe_count)
{
- if (!(fe_count)) {
+ if (!fe_count) {
transport_cmd_finish_abort(cmd, 1);
return;
}
/*
* TASK ABORTED status (TAS) bit support
*/
- if (((tmr_nacl != NULL) &&
+ if ((tmr_nacl &&
(tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
transport_send_task_abort(cmd);
@@ -107,15 +106,14 @@ int core_tmr_lun_reset(
struct list_head *preempt_and_abort_list,
struct se_cmd *prout_cmd)
{
- struct se_cmd *cmd;
- struct se_queue_req *qr, *qr_tmp;
+ struct se_cmd *cmd, *tcmd;
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
- struct se_queue_obj *qobj = dev->dev_queue_obj;
+ struct se_queue_obj *qobj = &dev->dev_queue_obj;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_task *task, *task_tmp;
unsigned long flags;
- int fe_count, state, tas;
+ int fe_count, tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
@@ -127,7 +125,7 @@ int core_tmr_lun_reset(
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
- tas = DEV_ATTRIB(dev)->emulate_tas;
+ tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
@@ -136,20 +134,20 @@ int core_tmr_lun_reset(
tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
- DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
+ pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
- TPG_TFO(tmr_tpg)->get_fabric_name(),
+ tmr_tpg->se_tpg_tfo->get_fabric_name(),
tmr_nacl->initiatorname);
}
}
- DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
+ pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
- TRANSPORT(dev)->name, tas);
+ dev->transport->name, tas);
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
*/
- spin_lock(&dev->se_tmr_lock);
+ spin_lock_irq(&dev->se_tmr_lock);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
/*
* Allow the received TMR to return with FUNCTION_COMPLETE.
@@ -158,8 +156,8 @@ int core_tmr_lun_reset(
continue;
cmd = tmr_p->task_cmd;
- if (!(cmd)) {
- printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
+ if (!cmd) {
+ pr_err("Unable to locate struct se_cmd for TMR\n");
continue;
}
/*
@@ -167,33 +165,33 @@ int core_tmr_lun_reset(
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
*/
- if ((preempt_and_abort_list != NULL) &&
+ if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
- spin_unlock(&dev->se_tmr_lock);
+ spin_unlock_irq(&dev->se_tmr_lock);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- spin_lock(&dev->se_tmr_lock);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->t_transport_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ spin_lock_irq(&dev->se_tmr_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- spin_lock(&dev->se_tmr_lock);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ spin_lock_irq(&dev->se_tmr_lock);
continue;
}
- DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
+ pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_cmd_finish_abort_tmr(cmd);
- spin_lock(&dev->se_tmr_lock);
+ spin_lock_irq(&dev->se_tmr_lock);
}
- spin_unlock(&dev->se_tmr_lock);
+ spin_unlock_irq(&dev->se_tmr_lock);
/*
* Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
* This is following sam4r17, section 5.6 Aborting commands, Table 38
@@ -218,23 +216,17 @@ int core_tmr_lun_reset(
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
t_state_list) {
- if (!(TASK_CMD(task))) {
- printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+ if (!task->task_se_cmd) {
+ pr_err("task->task_se_cmd is NULL!\n");
continue;
}
- cmd = TASK_CMD(task);
+ cmd = task->task_se_cmd;
- if (!T_TASK(cmd)) {
- printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
- " %p ITT: 0x%08x\n", task, cmd,
- CMD_TFO(cmd)->get_task_tag(cmd));
- continue;
- }
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
- if ((preempt_and_abort_list != NULL) &&
+ if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
@@ -248,38 +240,38 @@ int core_tmr_lun_reset(
atomic_set(&task->task_state_active, 0);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ pr_debug("LUN_RESET: %s cmd: %p task: %p"
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
"def_t_state: %d/%d cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
- CMD_TFO(cmd)->get_task_tag(cmd), 0,
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
- cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
- DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+ cmd->se_tfo->get_task_tag(cmd), 0,
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
+ cmd->deferred_t_state, cmd->t_task_cdb[0]);
+ pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- t_transport_active: %d"
" t_transport_stop: %d t_transport_sent: %d\n",
- CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
- T_TASK(cmd)->t_task_cdbs,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
- atomic_read(&T_TASK(cmd)->t_transport_active),
- atomic_read(&T_TASK(cmd)->t_transport_stop),
- atomic_read(&T_TASK(cmd)->t_transport_sent));
+ cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
+ cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+ atomic_read(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_transport_active),
+ atomic_read(&cmd->t_transport_stop),
+ atomic_read(&cmd->t_transport_sent));
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
+ pr_debug("LUN_RESET: Waiting for task: %p to shutdown"
" for dev: %p\n", task, dev);
wait_for_completion(&task->task_stop_comp);
- DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
+ pr_debug("LUN_RESET Completed task: %p shutdown for"
" dev: %p\n", task, dev);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
@@ -289,34 +281,34 @@ int core_tmr_lun_reset(
}
__transport_stop_task_timer(task, &flags);
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
- DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
+ &cmd->t_state_lock, flags);
+ pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+ atomic_read(&cmd->t_task_cdbs_ex_left));
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
+ fe_count = atomic_read(&cmd->t_fe_count);
- if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
- DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
+ if (atomic_read(&cmd->t_transport_active)) {
+ pr_debug("LUN_RESET: got t_transport_active = 1 for"
" task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev);
- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ atomic_set(&cmd->t_transport_aborted, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
+ pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_aborted, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
@@ -331,25 +323,12 @@ int core_tmr_lun_reset(
* reference, otherwise the struct se_cmd is released.
*/
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
- cmd = (struct se_cmd *)qr->cmd;
- if (!(cmd)) {
- /*
- * Skip these for non PREEMPT_AND_ABORT usage..
- */
- if (preempt_and_abort_list != NULL)
- continue;
-
- atomic_dec(&qobj->queue_cnt);
- list_del(&qr->qr_list);
- kfree(qr);
- continue;
- }
+ list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
- if ((preempt_and_abort_list != NULL) &&
+ if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
@@ -359,30 +338,22 @@ int core_tmr_lun_reset(
if (prout_cmd == cmd)
continue;
- atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+ atomic_dec(&cmd->t_transport_queue_active);
atomic_dec(&qobj->queue_cnt);
- list_del(&qr->qr_list);
+ list_del(&cmd->se_queue_node);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- state = qr->state;
- kfree(qr);
-
- DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
+ pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
- "Preempt" : "", cmd, state,
- atomic_read(&T_TASK(cmd)->t_fe_count));
+ "Preempt" : "", cmd, cmd->t_state,
+ atomic_read(&cmd->t_fe_count));
/*
* Signal that the command has failed via cmd->se_cmd_flags,
- * and call TFO->new_cmd_failure() to wakeup any fabric
- * dependent code used to wait for unsolicited data out
- * allocation to complete. The fabric module is expected
- * to dump any remaining unsolicited data out for the aborted
- * command at this point.
*/
transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
- atomic_read(&T_TASK(cmd)->t_fe_count));
+ atomic_read(&cmd->t_fe_count));
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
}
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
@@ -390,21 +361,21 @@ int core_tmr_lun_reset(
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
*/
- if (!(preempt_and_abort_list) &&
+ if (!preempt_and_abort_list &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
- printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
+ pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
spin_lock_irq(&dev->stats_lock);
dev->num_resets++;
spin_unlock_irq(&dev->stats_lock);
- DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
+ pr_debug("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
- TRANSPORT(dev)->name);
+ dev->transport->name);
return 0;
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 5ec745fed93..4f1ba4c5ef1 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -44,6 +44,12 @@
#include <target/target_core_fabric_ops.h>
#include "target_core_hba.h"
+#include "target_core_stat.h"
+
+extern struct se_device *g_lun0_dev;
+
+static DEFINE_SPINLOCK(tpg_lock);
+static LIST_HEAD(tpg_list);
/* core_clear_initiator_node_from_tpg():
*
@@ -66,9 +72,9 @@ static void core_clear_initiator_node_from_tpg(
continue;
if (!deve->se_lun) {
- printk(KERN_ERR "%s device entries device pointer is"
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
@@ -80,14 +86,13 @@ static void core_clear_initiator_node_from_tpg(
spin_lock(&lun->lun_acl_lock);
list_for_each_entry_safe(acl, acl_tmp,
&lun->lun_acl_list, lacl_list) {
- if (!(strcmp(acl->initiatorname,
- nacl->initiatorname)) &&
- (acl->mapped_lun == deve->mapped_lun))
+ if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
+ (acl->mapped_lun == deve->mapped_lun))
break;
}
if (!acl) {
- printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
+ pr_err("Unable to locate struct se_lun_acl for %s,"
" mapped_lun: %u\n", nacl->initiatorname,
deve->mapped_lun);
spin_unlock(&lun->lun_acl_lock);
@@ -115,7 +120,7 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(
struct se_node_acl *acl;
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
- if (!(strcmp(acl->initiatorname, initiatorname)))
+ if (!strcmp(acl->initiatorname, initiatorname))
return acl;
}
@@ -134,8 +139,8 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
spin_lock_bh(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
- if (!(strcmp(acl->initiatorname, initiatorname)) &&
- (!(acl->dynamic_node_acl))) {
+ if (!strcmp(acl->initiatorname, initiatorname) &&
+ !acl->dynamic_node_acl) {
spin_unlock_bh(&tpg->acl_node_lock);
return acl;
}
@@ -171,7 +176,7 @@ void core_tpg_add_node_to_devs(
* By default in LIO-Target $FABRIC_MOD,
* demo_mode_write_protect is ON, or READ_ONLY;
*/
- if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
+ if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
if (dev->dev_flags & DF_READ_ONLY)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
@@ -181,16 +186,16 @@ void core_tpg_add_node_to_devs(
* Allow only optical drives to issue R/W in default RO
* demo mode.
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
+ if (dev->transport->get_device_type(dev) == TYPE_DISK)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
}
- printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+ pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
" access for LUN in Demo Mode\n",
- TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
"READ-WRITE" : "READ-ONLY");
@@ -210,8 +215,8 @@ static int core_set_queue_depth_for_node(
struct se_node_acl *acl)
{
if (!acl->queue_depth) {
- printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
- "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
+ pr_err("Queue depth for %s Initiator Node: %s is 0,"
+ "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
acl->initiatorname);
acl->queue_depth = 1;
}
@@ -230,10 +235,10 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
- if (!(nacl->device_list)) {
- printk(KERN_ERR "Unable to allocate memory for"
+ if (!nacl->device_list) {
+ pr_err("Unable to allocate memory for"
" struct se_node_acl->device_list\n");
- return -1;
+ return -ENOMEM;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = &nacl->device_list[i];
@@ -259,14 +264,14 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_node_acl *acl;
acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if ((acl))
+ if (acl)
return acl;
- if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
+ if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
return NULL;
- acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
- if (!(acl))
+ acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
+ if (!acl)
return NULL;
INIT_LIST_HEAD(&acl->acl_list);
@@ -274,23 +279,23 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
- acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
+ acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
acl->dynamic_node_acl = 1;
- TPG_TFO(tpg)->set_default_node_attributes(acl);
+ tpg->se_tpg_tfo->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
@@ -301,10 +306,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock);
- printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
- TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+ pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+ tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
return acl;
}
@@ -351,12 +356,12 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if ((acl)) {
+ if (acl) {
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
- printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
- " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
+ pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
+ " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
spin_unlock_bh(&tpg->acl_node_lock);
/*
* Release the locally allocated struct se_node_acl
@@ -364,22 +369,22 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
* a pointer to an existing demo mode node ACL.
*/
if (se_nacl)
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
se_nacl);
goto done;
}
- printk(KERN_ERR "ACL entry for %s Initiator"
+ pr_err("ACL entry for %s Initiator"
" Node %s already exists for TPG %u, ignoring"
- " request.\n", TPG_TFO(tpg)->get_fabric_name(),
- initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+ " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
+ initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock);
return ERR_PTR(-EEXIST);
}
spin_unlock_bh(&tpg->acl_node_lock);
- if (!(se_nacl)) {
- printk("struct se_node_acl pointer is NULL\n");
+ if (!se_nacl) {
+ pr_err("struct se_node_acl pointer is NULL\n");
return ERR_PTR(-EINVAL);
}
/*
@@ -400,16 +405,16 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
- TPG_TFO(tpg)->set_default_node_attributes(acl);
+ tpg->se_tpg_tfo->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-ENOMEM);
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-EINVAL);
}
@@ -419,10 +424,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
spin_unlock_bh(&tpg->acl_node_lock);
done:
- printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
- TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+ pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+ tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
return acl;
}
@@ -457,7 +462,7 @@ int core_tpg_del_initiator_node_acl(
/*
* Determine if the session needs to be closed by our context.
*/
- if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+ if (!tpg->se_tpg_tfo->shutdown_session(sess))
continue;
spin_unlock_bh(&tpg->session_lock);
@@ -465,7 +470,7 @@ int core_tpg_del_initiator_node_acl(
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
- TPG_TFO(tpg)->close_session(sess);
+ tpg->se_tpg_tfo->close_session(sess);
spin_lock_bh(&tpg->session_lock);
}
@@ -475,10 +480,10 @@ int core_tpg_del_initiator_node_acl(
core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
- printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
- TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
+ pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+ tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
return 0;
}
@@ -500,11 +505,11 @@ int core_tpg_set_initiator_node_queue_depth(
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if (!(acl)) {
- printk(KERN_ERR "Access Control List entry for %s Initiator"
+ if (!acl) {
+ pr_err("Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring"
- " request.\n", TPG_TFO(tpg)->get_fabric_name(),
- initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+ " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
+ initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock);
return -ENODEV;
}
@@ -520,12 +525,12 @@ int core_tpg_set_initiator_node_queue_depth(
continue;
if (!force) {
- printk(KERN_ERR "Unable to change queue depth for %s"
+ pr_err("Unable to change queue depth for %s"
" Initiator Node: %s while session is"
" operational. To forcefully change the queue"
" depth and force session reinstatement"
" use the \"force=1\" parameter.\n",
- TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+ tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
spin_unlock_bh(&tpg->session_lock);
spin_lock_bh(&tpg->acl_node_lock);
@@ -537,7 +542,7 @@ int core_tpg_set_initiator_node_queue_depth(
/*
* Determine if the session needs to be closed by our context.
*/
- if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+ if (!tpg->se_tpg_tfo->shutdown_session(sess))
continue;
init_sess = sess;
@@ -549,7 +554,7 @@ int core_tpg_set_initiator_node_queue_depth(
* Change the value in the Node's struct se_node_acl, and call
* core_set_queue_depth_for_node() to add the requested queue depth.
*
- * Finally call TPG_TFO(tpg)->close_session() to force session
+ * Finally call tpg->se_tpg_tfo->close_session() to force session
* reinstatement to occur if there is an active session for the
* $FABRIC_MOD Initiator Node in question.
*/
@@ -561,10 +566,10 @@ int core_tpg_set_initiator_node_queue_depth(
* Force session reinstatement if
* core_set_queue_depth_for_node() failed, because we assume
* the $FABRIC_MOD has already the set session reinstatement
- * bit from TPG_TFO(tpg)->shutdown_session() called above.
+ * bit from tpg->se_tpg_tfo->shutdown_session() called above.
*/
if (init_sess)
- TPG_TFO(tpg)->close_session(init_sess);
+ tpg->se_tpg_tfo->close_session(init_sess);
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
@@ -578,12 +583,12 @@ int core_tpg_set_initiator_node_queue_depth(
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
if (init_sess)
- TPG_TFO(tpg)->close_session(init_sess);
+ tpg->se_tpg_tfo->close_session(init_sess);
- printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
+ pr_debug("Successfuly changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", queue_depth,
- initiatorname, TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
@@ -597,7 +602,7 @@ EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
- struct se_device *dev = se_global->g_lun0_dev;
+ struct se_device *dev = g_lun0_dev;
struct se_lun *lun = &se_tpg->tpg_virt_lun0;
u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
int ret;
@@ -614,7 +619,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
if (ret < 0)
- return -1;
+ return ret;
return 0;
}
@@ -638,8 +643,8 @@ int core_tpg_register(
se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
- if (!(se_tpg->tpg_lun_list)) {
- printk(KERN_ERR "Unable to allocate struct se_portal_group->"
+ if (!se_tpg->tpg_lun_list) {
+ pr_err("Unable to allocate struct se_portal_group->"
"tpg_lun_list\n");
return -ENOMEM;
}
@@ -663,7 +668,7 @@ int core_tpg_register(
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
- INIT_LIST_HEAD(&se_tpg->se_tpg_list);
+ INIT_LIST_HEAD(&se_tpg->se_tpg_node);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
spin_lock_init(&se_tpg->acl_node_lock);
spin_lock_init(&se_tpg->session_lock);
@@ -676,11 +681,11 @@ int core_tpg_register(
}
}
- spin_lock_bh(&se_global->se_tpg_lock);
- list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
- spin_unlock_bh(&se_global->se_tpg_lock);
+ spin_lock_bh(&tpg_lock);
+ list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
+ spin_unlock_bh(&tpg_lock);
- printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
+ pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
@@ -694,16 +699,16 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
{
struct se_node_acl *nacl, *nacl_tmp;
- printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
+ pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
" for endpoint: %s Portal Tag %u\n",
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
- "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
- TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
- TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+ "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
- spin_lock_bh(&se_global->se_tpg_lock);
- list_del(&se_tpg->se_tpg_list);
- spin_unlock_bh(&se_global->se_tpg_lock);
+ spin_lock_bh(&tpg_lock);
+ list_del(&se_tpg->se_tpg_node);
+ spin_unlock_bh(&tpg_lock);
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
@@ -721,7 +726,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
- TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+ se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
spin_lock_bh(&se_tpg->acl_node_lock);
}
@@ -743,21 +748,21 @@ struct se_lun *core_tpg_pre_addlun(
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
- TPG_TFO(tpg)->get_fabric_name(),
+ tpg->se_tpg_tfo->get_fabric_name(),
unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
return ERR_PTR(-EOVERFLOW);
}
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
- printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
+ pr_err("TPG Logical Unit Number: %u is already active"
" on %s Target Portal Group: %u, ignoring request.\n",
- unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return ERR_PTR(-EINVAL);
}
@@ -772,8 +777,11 @@ int core_tpg_post_addlun(
u32 lun_access,
void *lun_ptr)
{
- if (core_dev_export(lun_ptr, tpg, lun) < 0)
- return -1;
+ int ret;
+
+ ret = core_dev_export(lun_ptr, tpg, lun);
+ if (ret < 0)
+ return ret;
spin_lock(&tpg->tpg_lun_lock);
lun->lun_access = lun_access;
@@ -799,21 +807,21 @@ struct se_lun *core_tpg_pre_dellun(
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
return ERR_PTR(-EOVERFLOW);
}
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %u, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4dafeb8b563..89760329d5d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -58,139 +58,12 @@
#include "target_core_scdb.h"
#include "target_core_ua.h"
-/* #define DEBUG_CDB_HANDLER */
-#ifdef DEBUG_CDB_HANDLER
-#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CDB_H(x...)
-#endif
-
-/* #define DEBUG_CMD_MAP */
-#ifdef DEBUG_CMD_MAP
-#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CMD_M(x...)
-#endif
-
-/* #define DEBUG_MEM_ALLOC */
-#ifdef DEBUG_MEM_ALLOC
-#define DEBUG_MEM(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_MEM(x...)
-#endif
-
-/* #define DEBUG_MEM2_ALLOC */
-#ifdef DEBUG_MEM2_ALLOC
-#define DEBUG_MEM2(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_MEM2(x...)
-#endif
-
-/* #define DEBUG_SG_CALC */
-#ifdef DEBUG_SG_CALC
-#define DEBUG_SC(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_SC(x...)
-#endif
-
-/* #define DEBUG_SE_OBJ */
-#ifdef DEBUG_SE_OBJ
-#define DEBUG_SO(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_SO(x...)
-#endif
-
-/* #define DEBUG_CMD_VOL */
-#ifdef DEBUG_CMD_VOL
-#define DEBUG_VOL(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_VOL(x...)
-#endif
-
-/* #define DEBUG_CMD_STOP */
-#ifdef DEBUG_CMD_STOP
-#define DEBUG_CS(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CS(x...)
-#endif
-
-/* #define DEBUG_PASSTHROUGH */
-#ifdef DEBUG_PASSTHROUGH
-#define DEBUG_PT(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_PT(x...)
-#endif
-
-/* #define DEBUG_TASK_STOP */
-#ifdef DEBUG_TASK_STOP
-#define DEBUG_TS(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TS(x...)
-#endif
-
-/* #define DEBUG_TRANSPORT_STOP */
-#ifdef DEBUG_TRANSPORT_STOP
-#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TRANSPORT_S(x...)
-#endif
-
-/* #define DEBUG_TASK_FAILURE */
-#ifdef DEBUG_TASK_FAILURE
-#define DEBUG_TF(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TF(x...)
-#endif
-
-/* #define DEBUG_DEV_OFFLINE */
-#ifdef DEBUG_DEV_OFFLINE
-#define DEBUG_DO(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_DO(x...)
-#endif
-
-/* #define DEBUG_TASK_STATE */
-#ifdef DEBUG_TASK_STATE
-#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TSTATE(x...)
-#endif
-
-/* #define DEBUG_STATUS_THR */
-#ifdef DEBUG_STATUS_THR
-#define DEBUG_ST(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_ST(x...)
-#endif
-
-/* #define DEBUG_TASK_TIMEOUT */
-#ifdef DEBUG_TASK_TIMEOUT
-#define DEBUG_TT(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TT(x...)
-#endif
-
-/* #define DEBUG_GENERIC_REQUEST_FAILURE */
-#ifdef DEBUG_GENERIC_REQUEST_FAILURE
-#define DEBUG_GRF(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_GRF(x...)
-#endif
-
-/* #define DEBUG_SAM_TASK_ATTRS */
-#ifdef DEBUG_SAM_TASK_ATTRS
-#define DEBUG_STA(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_STA(x...)
-#endif
-
-struct se_global *se_global;
+static int sub_api_initialized;
static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
-struct kmem_cache *se_mem_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
@@ -201,116 +74,87 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
typedef int (*map_func_t)(struct se_task *, u32);
static int transport_generic_write_pending(struct se_cmd *);
-static int transport_processing_thread(void *);
+static int transport_processing_thread(void *param);
static int __transport_execute_tasks(struct se_device *dev);
static void transport_complete_task_attr(struct se_cmd *cmd);
+static int transport_complete_qf(struct se_cmd *cmd);
+static void transport_handle_queue_full(struct se_cmd *cmd,
+ struct se_device *dev, int (*qf_callback)(struct se_cmd *));
static void transport_direct_request_timeout(struct se_cmd *cmd);
static void transport_free_dev_tasks(struct se_cmd *cmd);
-static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
- unsigned long long starting_lba, u32 sectors,
+static u32 transport_allocate_tasks(struct se_cmd *cmd,
+ unsigned long long starting_lba,
enum dma_data_direction data_direction,
- struct list_head *mem_list, int set_counts);
-static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
- u32 dma_size);
+ struct scatterlist *sgl, unsigned int nents);
+static int transport_generic_get_mem(struct se_cmd *cmd);
static int transport_generic_remove(struct se_cmd *cmd,
- int release_to_pool, int session_reinstatement);
-static int transport_get_sectors(struct se_cmd *cmd);
-static struct list_head *transport_init_se_mem_list(void);
-static int transport_map_sg_to_mem(struct se_cmd *cmd,
- struct list_head *se_mem_list, void *in_mem,
- u32 *se_mem_cnt);
-static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
- unsigned char *dst, struct list_head *se_mem_list);
+ int session_reinstatement);
static void transport_release_fe_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
struct se_queue_obj *qobj);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
static void transport_stop_all_task_timers(struct se_cmd *cmd);
-int init_se_global(void)
+int init_se_kmem_caches(void)
{
- struct se_global *global;
-
- global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
- if (!(global)) {
- printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
- return -1;
- }
-
- INIT_LIST_HEAD(&global->g_lu_gps_list);
- INIT_LIST_HEAD(&global->g_se_tpg_list);
- INIT_LIST_HEAD(&global->g_hba_list);
- INIT_LIST_HEAD(&global->g_se_dev_list);
- spin_lock_init(&global->g_device_lock);
- spin_lock_init(&global->hba_lock);
- spin_lock_init(&global->se_tpg_lock);
- spin_lock_init(&global->lu_gps_lock);
- spin_lock_init(&global->plugin_class_lock);
-
se_cmd_cache = kmem_cache_create("se_cmd_cache",
sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
- if (!(se_cmd_cache)) {
- printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
+ if (!se_cmd_cache) {
+ pr_err("kmem_cache_create for struct se_cmd failed\n");
goto out;
}
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
- if (!(se_tmr_req_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
+ if (!se_tmr_req_cache) {
+ pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
0, NULL);
- if (!(se_sess_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_session"
+ if (!se_sess_cache) {
+ pr_err("kmem_cache_create() for struct se_session"
" failed\n");
goto out;
}
se_ua_cache = kmem_cache_create("se_ua_cache",
sizeof(struct se_ua), __alignof__(struct se_ua),
0, NULL);
- if (!(se_ua_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
- goto out;
- }
- se_mem_cache = kmem_cache_create("se_mem_cache",
- sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
- if (!(se_mem_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
+ if (!se_ua_cache) {
+ pr_err("kmem_cache_create() for struct se_ua failed\n");
goto out;
}
t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
sizeof(struct t10_pr_registration),
__alignof__(struct t10_pr_registration), 0, NULL);
- if (!(t10_pr_reg_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
+ if (!t10_pr_reg_cache) {
+ pr_err("kmem_cache_create() for struct t10_pr_registration"
" failed\n");
goto out;
}
t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
0, NULL);
- if (!(t10_alua_lu_gp_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
+ if (!t10_alua_lu_gp_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
" failed\n");
goto out;
}
t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
sizeof(struct t10_alua_lu_gp_member),
__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
- if (!(t10_alua_lu_gp_mem_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
+ if (!t10_alua_lu_gp_mem_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
"cache failed\n");
goto out;
}
t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
sizeof(struct t10_alua_tg_pt_gp),
__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
- if (!(t10_alua_tg_pt_gp_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+ if (!t10_alua_tg_pt_gp_cache) {
+ pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"cache failed\n");
goto out;
}
@@ -319,14 +163,12 @@ int init_se_global(void)
sizeof(struct t10_alua_tg_pt_gp_member),
__alignof__(struct t10_alua_tg_pt_gp_member),
0, NULL);
- if (!(t10_alua_tg_pt_gp_mem_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+ if (!t10_alua_tg_pt_gp_mem_cache) {
+ pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"mem_t failed\n");
goto out;
}
- se_global = global;
-
return 0;
out:
if (se_cmd_cache)
@@ -337,8 +179,6 @@ out:
kmem_cache_destroy(se_sess_cache);
if (se_ua_cache)
kmem_cache_destroy(se_ua_cache);
- if (se_mem_cache)
- kmem_cache_destroy(se_mem_cache);
if (t10_pr_reg_cache)
kmem_cache_destroy(t10_pr_reg_cache);
if (t10_alua_lu_gp_cache)
@@ -349,45 +189,25 @@ out:
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
if (t10_alua_tg_pt_gp_mem_cache)
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
- kfree(global);
- return -1;
+ return -ENOMEM;
}
-void release_se_global(void)
+void release_se_kmem_caches(void)
{
- struct se_global *global;
-
- global = se_global;
- if (!(global))
- return;
-
kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
- kmem_cache_destroy(se_mem_cache);
kmem_cache_destroy(t10_pr_reg_cache);
kmem_cache_destroy(t10_alua_lu_gp_cache);
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
- kfree(global);
-
- se_global = NULL;
}
-/* SCSI statistics table index */
-static struct scsi_index_table scsi_index_table;
-
-/*
- * Initialize the index table for allocating unique row indexes to various mib
- * tables.
- */
-void init_scsi_index_table(void)
-{
- memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
- spin_lock_init(&scsi_index_table.lock);
-}
+/* This code ensures unique mib indexes are handed out. */
+static DEFINE_SPINLOCK(scsi_mib_index_lock);
+static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
/*
* Allocate a new row index for the entry type specified
@@ -396,16 +216,11 @@ u32 scsi_get_new_index(scsi_index_t type)
{
u32 new_index;
- if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
- printk(KERN_ERR "Invalid index type %d\n", type);
- return -EINVAL;
- }
+ BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
- spin_lock(&scsi_index_table.lock);
- new_index = ++scsi_index_table.scsi_mib_index[type];
- if (new_index == 0)
- new_index = ++scsi_index_table.scsi_mib_index[type];
- spin_unlock(&scsi_index_table.lock);
+ spin_lock(&scsi_mib_index_lock);
+ new_index = ++scsi_mib_index[type];
+ spin_unlock(&scsi_mib_index_lock);
return new_index;
}
@@ -425,34 +240,37 @@ static int transport_subsystem_reqmods(void)
ret = request_module("target_core_iblock");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_iblock\n");
+ pr_err("Unable to load target_core_iblock\n");
ret = request_module("target_core_file");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_file\n");
+ pr_err("Unable to load target_core_file\n");
ret = request_module("target_core_pscsi");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_pscsi\n");
+ pr_err("Unable to load target_core_pscsi\n");
ret = request_module("target_core_stgt");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_stgt\n");
+ pr_err("Unable to load target_core_stgt\n");
return 0;
}
int transport_subsystem_check_init(void)
{
- if (se_global->g_sub_api_initialized)
+ int ret;
+
+ if (sub_api_initialized)
return 0;
/*
* Request the loading of known TCM subsystem plugins..
*/
- if (transport_subsystem_reqmods() < 0)
- return -1;
+ ret = transport_subsystem_reqmods();
+ if (ret < 0)
+ return ret;
- se_global->g_sub_api_initialized = 1;
+ sub_api_initialized = 1;
return 0;
}
@@ -461,8 +279,8 @@ struct se_session *transport_init_session(void)
struct se_session *se_sess;
se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
- if (!(se_sess)) {
- printk(KERN_ERR "Unable to allocate struct se_session from"
+ if (!se_sess) {
+ pr_err("Unable to allocate struct se_session from"
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
@@ -497,9 +315,9 @@ void __transport_register_session(
* If the fabric module supports an ISID based TransportID,
* save this value in binary from the fabric I_T Nexus now.
*/
- if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+ if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
memset(&buf[0], 0, PR_REG_ISID_LEN);
- TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
+ se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
&buf[0], PR_REG_ISID_LEN);
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
@@ -516,8 +334,8 @@ void __transport_register_session(
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
- printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
- TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
+ pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+ se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
}
EXPORT_SYMBOL(__transport_register_session);
@@ -536,13 +354,13 @@ EXPORT_SYMBOL(transport_register_session);
void transport_deregister_session_configfs(struct se_session *se_sess)
{
struct se_node_acl *se_nacl;
-
+ unsigned long flags;
/*
* Used by struct se_node_acl's under ConfigFS to locate active struct se_session
*/
se_nacl = se_sess->se_node_acl;
- if ((se_nacl)) {
- spin_lock_irq(&se_nacl->nacl_sess_lock);
+ if (se_nacl) {
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
list_del(&se_sess->sess_acl_list);
/*
* If the session list is empty, then clear the pointer.
@@ -556,7 +374,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
se_nacl->acl_sess_list.prev,
struct se_session, sess_acl_list);
}
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);
@@ -572,7 +390,7 @@ void transport_deregister_session(struct se_session *se_sess)
struct se_portal_group *se_tpg = se_sess->se_tpg;
struct se_node_acl *se_nacl;
- if (!(se_tpg)) {
+ if (!se_tpg) {
transport_free_session(se_sess);
return;
}
@@ -588,18 +406,18 @@ void transport_deregister_session(struct se_session *se_sess)
* struct se_node_acl if it had been previously dynamically generated.
*/
se_nacl = se_sess->se_node_acl;
- if ((se_nacl)) {
+ if (se_nacl) {
spin_lock_bh(&se_tpg->acl_node_lock);
if (se_nacl->dynamic_node_acl) {
- if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
- se_tpg))) {
+ if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
+ se_tpg)) {
list_del(&se_nacl->acl_list);
se_tpg->num_node_acls--;
spin_unlock_bh(&se_tpg->acl_node_lock);
core_tpg_wait_for_nacl_pr_ref(se_nacl);
core_free_device_list_for_node(se_nacl, se_tpg);
- TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
+ se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
se_nacl);
spin_lock_bh(&se_tpg->acl_node_lock);
}
@@ -609,13 +427,13 @@ void transport_deregister_session(struct se_session *se_sess)
transport_free_session(se_sess);
- printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
- TPG_TFO(se_tpg)->get_fabric_name());
+ pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
+ se_tpg->se_tpg_tfo->get_fabric_name());
}
EXPORT_SYMBOL(transport_deregister_session);
/*
- * Called with T_TASK(cmd)->t_state_lock held.
+ * Called with cmd->t_state_lock held.
*/
static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
{
@@ -623,28 +441,25 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
struct se_task *task;
unsigned long flags;
- if (!T_TASK(cmd))
- return;
-
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
dev = task->se_dev;
- if (!(dev))
+ if (!dev)
continue;
if (atomic_read(&task->task_active))
continue;
- if (!(atomic_read(&task->task_state_active)))
+ if (!atomic_read(&task->task_state_active))
continue;
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_del(&task->t_state_list);
- DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
- CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
+ pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
+ cmd->se_tfo->get_task_tag(cmd), dev, task);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
atomic_set(&task->task_state_active, 0);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
+ atomic_dec(&cmd->t_task_cdbs_ex_left);
}
}
@@ -663,34 +478,34 @@ static int transport_cmd_check_stop(
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
/*
* Determine if IOCTL context caller in requesting the stopping of this
* command for LUN shutdown purposes.
*/
- if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
- DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
+ if (atomic_read(&cmd->transport_lun_stop)) {
+ pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
cmd->deferred_t_state = cmd->t_state;
cmd->t_state = TRANSPORT_DEFERRED_CMD;
- atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ atomic_set(&cmd->t_transport_active, 0);
if (transport_off == 2)
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&T_TASK(cmd)->transport_lun_stop_comp);
+ complete(&cmd->transport_lun_stop_comp);
return 1;
}
/*
* Determine if frontend context caller is requesting the stopping of
- * this command for frontend excpections.
+ * this command for frontend exceptions.
*/
- if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
- DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
+ if (atomic_read(&cmd->t_transport_stop)) {
+ pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
cmd->deferred_t_state = cmd->t_state;
cmd->t_state = TRANSPORT_DEFERRED_CMD;
@@ -703,13 +518,13 @@ static int transport_cmd_check_stop(
*/
if (transport_off == 2)
cmd->se_lun = NULL;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&T_TASK(cmd)->t_transport_stop_comp);
+ complete(&cmd->t_transport_stop_comp);
return 1;
}
if (transport_off) {
- atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ atomic_set(&cmd->t_transport_active, 0);
if (transport_off == 2) {
transport_all_task_dev_remove_state(cmd);
/*
@@ -722,20 +537,20 @@ static int transport_cmd_check_stop(
* their internally allocated I/O reference now and
* struct se_cmd now.
*/
- if (CMD_TFO(cmd)->check_stop_free != NULL) {
+ if (cmd->se_tfo->check_stop_free != NULL) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- CMD_TFO(cmd)->check_stop_free(cmd);
+ cmd->se_tfo->check_stop_free(cmd);
return 1;
}
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
} else if (t_state)
cmd->t_state = t_state;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
@@ -747,30 +562,30 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
- struct se_lun *lun = SE_LUN(cmd);
+ struct se_lun *lun = cmd->se_lun;
unsigned long flags;
if (!lun)
return;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto check_lun;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
check_lun:
spin_lock_irqsave(&lun->lun_cmd_lock, flags);
- if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
- list_del(&cmd->se_lun_list);
- atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+ if (atomic_read(&cmd->transport_lun_active)) {
+ list_del(&cmd->se_lun_node);
+ atomic_set(&cmd->transport_lun_active, 0);
#if 0
- printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
- CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
+ pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
+ cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
#endif
}
spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
@@ -778,92 +593,59 @@ check_lun:
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
- transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+ transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
return;
if (remove)
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
{
- transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+ transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
if (transport_cmd_check_stop_to_fabric(cmd))
return;
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
-static int transport_add_cmd_to_queue(
+static void transport_add_cmd_to_queue(
struct se_cmd *cmd,
int t_state)
{
struct se_device *dev = cmd->se_dev;
- struct se_queue_obj *qobj = dev->dev_queue_obj;
- struct se_queue_req *qr;
+ struct se_queue_obj *qobj = &dev->dev_queue_obj;
unsigned long flags;
- qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
- if (!(qr)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " struct se_queue_req\n");
- return -1;
- }
- INIT_LIST_HEAD(&qr->qr_list);
-
- qr->cmd = (void *)cmd;
- qr->state = t_state;
+ INIT_LIST_HEAD(&cmd->se_queue_node);
if (t_state) {
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = t_state;
- atomic_set(&T_TASK(cmd)->t_transport_active, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_active, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- list_add_tail(&qr->qr_list, &qobj->qobj_list);
- atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
+ if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
+ cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
+ list_add(&cmd->se_queue_node, &qobj->qobj_list);
+ } else
+ list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
+ atomic_inc(&cmd->t_transport_queue_active);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
atomic_inc(&qobj->queue_cnt);
wake_up_interruptible(&qobj->thread_wq);
- return 0;
-}
-
-/*
- * Called with struct se_queue_obj->cmd_queue_lock held.
- */
-static struct se_queue_req *
-__transport_get_qr_from_queue(struct se_queue_obj *qobj)
-{
- struct se_cmd *cmd;
- struct se_queue_req *qr = NULL;
-
- if (list_empty(&qobj->qobj_list))
- return NULL;
-
- list_for_each_entry(qr, &qobj->qobj_list, qr_list)
- break;
-
- if (qr->cmd) {
- cmd = (struct se_cmd *)qr->cmd;
- atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
- }
- list_del(&qr->qr_list);
- atomic_dec(&qobj->queue_cnt);
-
- return qr;
}
-static struct se_queue_req *
-transport_get_qr_from_queue(struct se_queue_obj *qobj)
+static struct se_cmd *
+transport_get_cmd_from_queue(struct se_queue_obj *qobj)
{
struct se_cmd *cmd;
- struct se_queue_req *qr;
unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
@@ -871,50 +653,42 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj)
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return NULL;
}
+ cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
- list_for_each_entry(qr, &qobj->qobj_list, qr_list)
- break;
+ atomic_dec(&cmd->t_transport_queue_active);
- if (qr->cmd) {
- cmd = (struct se_cmd *)qr->cmd;
- atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
- }
- list_del(&qr->qr_list);
+ list_del(&cmd->se_queue_node);
atomic_dec(&qobj->queue_cnt);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return qr;
+ return cmd;
}
static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
struct se_queue_obj *qobj)
{
- struct se_cmd *q_cmd;
- struct se_queue_req *qr = NULL, *qr_p = NULL;
+ struct se_cmd *t;
unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
+ if (!atomic_read(&cmd->t_transport_queue_active)) {
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return;
}
- list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
- q_cmd = (struct se_cmd *)qr->cmd;
- if (q_cmd != cmd)
- continue;
-
- atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
- atomic_dec(&qobj->queue_cnt);
- list_del(&qr->qr_list);
- kfree(qr);
- }
+ list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
+ if (t == cmd) {
+ atomic_dec(&cmd->t_transport_queue_active);
+ atomic_dec(&qobj->queue_cnt);
+ list_del(&cmd->se_queue_node);
+ break;
+ }
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
- printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- atomic_read(&T_TASK(cmd)->t_transport_queue_active));
+ if (atomic_read(&cmd->t_transport_queue_active)) {
+ pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
+ cmd->se_tfo->get_task_tag(cmd),
+ atomic_read(&cmd->t_transport_queue_active));
}
}
@@ -924,7 +698,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
*/
void transport_complete_sync_cache(struct se_cmd *cmd, int good)
{
- struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
+ struct se_task *task = list_entry(cmd->t_task_list.next,
struct se_task, t_list);
if (good) {
@@ -933,7 +707,7 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
} else {
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
- TASK_CMD(task)->transport_error_status =
+ task->task_se_cmd->transport_error_status =
PYX_TRANSPORT_ILLEGAL_REQUEST;
}
@@ -948,22 +722,18 @@ EXPORT_SYMBOL(transport_complete_sync_cache);
*/
void transport_complete_task(struct se_task *task, int success)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = task->se_dev;
int t_state;
unsigned long flags;
#if 0
- printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
- T_TASK(cmd)->t_task_cdb[0], dev);
+ pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
+ cmd->t_task_cdb[0], dev);
#endif
- if (dev) {
- spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+ if (dev)
atomic_inc(&dev->depth_left);
- atomic_inc(&SE_HBA(dev)->left_queue_depth);
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
- }
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_set(&task->task_active, 0);
/*
@@ -985,14 +755,14 @@ void transport_complete_task(struct se_task *task, int success)
*/
if (atomic_read(&task->task_stop)) {
/*
- * Decrement T_TASK(cmd)->t_se_count if this task had
+ * Decrement cmd->t_se_count if this task had
* previously thrown its timeout exception handler.
*/
if (atomic_read(&task->task_timeout)) {
- atomic_dec(&T_TASK(cmd)->t_se_count);
+ atomic_dec(&cmd->t_se_count);
atomic_set(&task->task_timeout, 0);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&task->task_stop_comp);
return;
@@ -1003,34 +773,34 @@ void transport_complete_task(struct se_task *task, int success)
* the processing thread.
*/
if (atomic_read(&task->task_timeout)) {
- if (!(atomic_dec_and_test(
- &T_TASK(cmd)->t_task_cdbs_timeout_left))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ if (!atomic_dec_and_test(
+ &cmd->t_task_cdbs_timeout_left)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
return;
}
t_state = TRANSPORT_COMPLETE_TIMEOUT;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_add_cmd_to_queue(cmd, t_state);
return;
}
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
+ atomic_dec(&cmd->t_task_cdbs_timeout_left);
/*
* Decrement the outstanding t_task_cdbs_left count. The last
* struct se_task from struct se_cmd will complete itself into the
* device queue depending upon int success.
*/
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
if (!success)
- T_TASK(cmd)->t_tasks_failed = 1;
+ cmd->t_tasks_failed = 1;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- if (!success || T_TASK(cmd)->t_tasks_failed) {
+ if (!success || cmd->t_tasks_failed) {
t_state = TRANSPORT_COMPLETE_FAILURE;
if (!task->task_error_status) {
task->task_error_status =
@@ -1039,10 +809,10 @@ void transport_complete_task(struct se_task *task, int success)
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
} else {
- atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
+ atomic_set(&cmd->t_transport_complete, 1);
t_state = TRANSPORT_COMPLETE_OK;
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_add_cmd_to_queue(cmd, t_state);
}
@@ -1080,9 +850,9 @@ static inline int transport_add_task_check_sam_attr(
&task_prev->t_execute_list :
&dev->execute_task_list);
- DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
+ pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
" in execution queue\n",
- T_TASK(task->task_se_cmd)->t_task_cdb[0]);
+ task->task_se_cmd->t_task_cdb[0]);
return 1;
}
/*
@@ -1124,8 +894,8 @@ static void __transport_add_task_to_execute_queue(
atomic_set(&task->task_state_active, 1);
- DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
- CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
+ pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
task, dev);
}
@@ -1135,8 +905,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
struct se_task *task;
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
dev = task->se_dev;
if (atomic_read(&task->task_state_active))
@@ -1146,23 +916,23 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
list_add_tail(&task->t_state_list, &dev->state_task_list);
atomic_set(&task->task_state_active, 1);
- DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
- CMD_TFO(task->task_se_cmd)->get_task_tag(
+ pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ task->task_se_cmd->se_tfo->get_task_tag(
task->task_se_cmd), task, dev);
spin_unlock(&dev->execute_task_lock);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_task *task, *task_prev = NULL;
unsigned long flags;
spin_lock_irqsave(&dev->execute_task_lock, flags);
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
if (atomic_read(&task->task_execute_queue))
continue;
/*
@@ -1174,30 +944,6 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
task_prev = task;
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-
- return;
-}
-
-/* transport_get_task_from_execute_queue():
- *
- * Called with dev->execute_task_lock held.
- */
-static struct se_task *
-transport_get_task_from_execute_queue(struct se_device *dev)
-{
- struct se_task *task;
-
- if (list_empty(&dev->execute_task_list))
- return NULL;
-
- list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
- break;
-
- list_del(&task->t_execute_list);
- atomic_set(&task->task_execute_queue, 0);
- atomic_dec(&dev->execute_tasks);
-
- return task;
}
/* transport_remove_task_from_execute_queue():
@@ -1222,6 +968,40 @@ void transport_remove_task_from_execute_queue(
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
+/*
+ * Handle QUEUE_FULL / -EAGAIN status
+ */
+
+static void target_qf_do_work(struct work_struct *work)
+{
+ struct se_device *dev = container_of(work, struct se_device,
+ qf_work_queue);
+ struct se_cmd *cmd, *cmd_tmp;
+
+ spin_lock_irq(&dev->qf_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
+
+ list_del(&cmd->se_qf_node);
+ atomic_dec(&dev->dev_qf_count);
+ smp_mb__after_atomic_dec();
+ spin_unlock_irq(&dev->qf_cmd_lock);
+
+ pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
+ " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
+ (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
+ (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
+ : "UNKNOWN");
+ /*
+ * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd
+ * has been added to head of queue
+ */
+ transport_add_cmd_to_queue(cmd, cmd->t_state);
+
+ spin_lock_irq(&dev->qf_cmd_lock);
+ }
+ spin_unlock_irq(&dev->qf_cmd_lock);
+}
+
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
switch (cmd->data_direction) {
@@ -1269,7 +1049,7 @@ void transport_dump_dev_state(
atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
- DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
+ dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
*bl += sprintf(b + *bl, " ");
}
@@ -1279,33 +1059,29 @@ void transport_dump_dev_state(
*/
static void transport_release_all_cmds(struct se_device *dev)
{
- struct se_cmd *cmd = NULL;
- struct se_queue_req *qr = NULL, *qr_p = NULL;
+ struct se_cmd *cmd, *tcmd;
int bug_out = 0, t_state;
unsigned long flags;
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
- list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
- qr_list) {
-
- cmd = (struct se_cmd *)qr->cmd;
- t_state = qr->state;
- list_del(&qr->qr_list);
- kfree(qr);
- spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
+ spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
+ list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
+ se_queue_node) {
+ t_state = cmd->t_state;
+ list_del(&cmd->se_queue_node);
+ spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
flags);
- printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
+ pr_err("Releasing ITT: 0x%08x, i_state: %u,"
" t_state: %u directly\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), t_state);
transport_release_fe_cmd(cmd);
bug_out = 1;
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
}
- spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags);
#if 0
if (bug_out)
BUG();
@@ -1362,7 +1138,7 @@ void transport_dump_vpd_proto_id(
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
- printk(KERN_INFO "%s", buf);
+ pr_debug("%s", buf);
}
void
@@ -1387,7 +1163,8 @@ int transport_dump_vpd_assoc(
int p_buf_len)
{
unsigned char buf[VPD_TMP_BUF_SIZE];
- int ret = 0, len;
+ int ret = 0;
+ int len;
memset(buf, 0, VPD_TMP_BUF_SIZE);
len = sprintf(buf, "T10 VPD Identifier Association: ");
@@ -1404,14 +1181,14 @@ int transport_dump_vpd_assoc(
break;
default:
sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
- ret = -1;
+ ret = -EINVAL;
break;
}
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
- printk("%s", buf);
+ pr_debug("%s", buf);
return ret;
}
@@ -1434,7 +1211,8 @@ int transport_dump_vpd_ident_type(
int p_buf_len)
{
unsigned char buf[VPD_TMP_BUF_SIZE];
- int ret = 0, len;
+ int ret = 0;
+ int len;
memset(buf, 0, VPD_TMP_BUF_SIZE);
len = sprintf(buf, "T10 VPD Identifier Type: ");
@@ -1461,14 +1239,17 @@ int transport_dump_vpd_ident_type(
default:
sprintf(buf+len, "Unsupported: 0x%02x\n",
vpd->device_identifier_type);
- ret = -1;
+ ret = -EINVAL;
break;
}
- if (p_buf)
+ if (p_buf) {
+ if (p_buf_len < strlen(buf)+1)
+ return -EINVAL;
strncpy(p_buf, buf, p_buf_len);
- else
- printk("%s", buf);
+ } else {
+ pr_debug("%s", buf);
+ }
return ret;
}
@@ -1511,14 +1292,14 @@ int transport_dump_vpd_ident(
default:
sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
" 0x%02x", vpd->device_identifier_code_set);
- ret = -1;
+ ret = -EINVAL;
break;
}
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
- printk("%s", buf);
+ pr_debug("%s", buf);
return ret;
}
@@ -1569,51 +1350,51 @@ static void core_setup_task_attr_emulation(struct se_device *dev)
* This is currently not available in upsream Linux/SCSI Target
* mode code, and is assumed to be disabled while using TCM/pSCSI.
*/
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
return;
}
dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
- DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
- " device\n", TRANSPORT(dev)->name,
- TRANSPORT(dev)->get_device_rev(dev));
+ pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
+ " device\n", dev->transport->name,
+ dev->transport->get_device_rev(dev));
}
static void scsi_dump_inquiry(struct se_device *dev)
{
- struct t10_wwn *wwn = DEV_T10_WWN(dev);
+ struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
int i, device_type;
/*
* Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
*/
- printk(" Vendor: ");
+ pr_debug(" Vendor: ");
for (i = 0; i < 8; i++)
if (wwn->vendor[i] >= 0x20)
- printk("%c", wwn->vendor[i]);
+ pr_debug("%c", wwn->vendor[i]);
else
- printk(" ");
+ pr_debug(" ");
- printk(" Model: ");
+ pr_debug(" Model: ");
for (i = 0; i < 16; i++)
if (wwn->model[i] >= 0x20)
- printk("%c", wwn->model[i]);
+ pr_debug("%c", wwn->model[i]);
else
- printk(" ");
+ pr_debug(" ");
- printk(" Revision: ");
+ pr_debug(" Revision: ");
for (i = 0; i < 4; i++)
if (wwn->revision[i] >= 0x20)
- printk("%c", wwn->revision[i]);
+ pr_debug("%c", wwn->revision[i]);
else
- printk(" ");
+ pr_debug(" ");
- printk("\n");
+ pr_debug("\n");
- device_type = TRANSPORT(dev)->get_device_type(dev);
- printk(" Type: %s ", scsi_device_type(device_type));
- printk(" ANSI SCSI revision: %02x\n",
- TRANSPORT(dev)->get_device_rev(dev));
+ device_type = dev->transport->get_device_type(dev);
+ pr_debug(" Type: %s ", scsi_device_type(device_type));
+ pr_debug(" ANSI SCSI revision: %02x\n",
+ dev->transport->get_device_rev(dev));
}
struct se_device *transport_add_device_to_core_hba(
@@ -1630,33 +1411,15 @@ struct se_device *transport_add_device_to_core_hba(
struct se_device *dev;
dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
- if (!(dev)) {
- printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
- return NULL;
- }
- dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
- if (!(dev->dev_queue_obj)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " dev->dev_queue_obj\n");
- kfree(dev);
+ if (!dev) {
+ pr_err("Unable to allocate memory for se_dev_t\n");
return NULL;
}
- transport_init_queue_obj(dev->dev_queue_obj);
-
- dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
- GFP_KERNEL);
- if (!(dev->dev_status_queue_obj)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " dev->dev_status_queue_obj\n");
- kfree(dev->dev_queue_obj);
- kfree(dev);
- return NULL;
- }
- transport_init_queue_obj(dev->dev_status_queue_obj);
+ transport_init_queue_obj(&dev->dev_queue_obj);
dev->dev_flags = device_flags;
dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
- dev->dev_ptr = (void *) transport_dev;
+ dev->dev_ptr = transport_dev;
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
@@ -1668,6 +1431,7 @@ struct se_device *transport_add_device_to_core_hba(
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
+ INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->ordered_cmd_lock);
@@ -1678,6 +1442,7 @@ struct se_device *transport_add_device_to_core_hba(
spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
+ spin_lock_init(&dev->qf_cmd_lock);
dev->queue_depth = dev_limits->queue_depth;
atomic_set(&dev->depth_left, dev->queue_depth);
@@ -1715,13 +1480,16 @@ struct se_device *transport_add_device_to_core_hba(
* Startup the struct se_device processing thread
*/
dev->process_thread = kthread_run(transport_processing_thread, dev,
- "LIO_%s", TRANSPORT(dev)->name);
+ "LIO_%s", dev->transport->name);
if (IS_ERR(dev->process_thread)) {
- printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
- TRANSPORT(dev)->name);
+ pr_err("Unable to create kthread: LIO_%s\n",
+ dev->transport->name);
goto out;
}
-
+ /*
+ * Setup work_queue for QUEUE_FULL
+ */
+ INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
/*
* Preload the initial INQUIRY const values if we are doing
* anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
@@ -1730,16 +1498,16 @@ struct se_device *transport_add_device_to_core_hba(
* originals once back into DEV_T10_WWN(dev) for the virtual device
* setup.
*/
- if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (!(inquiry_prod) || !(inquiry_prod)) {
- printk(KERN_ERR "All non TCM/pSCSI plugins require"
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (!inquiry_prod || !inquiry_rev) {
+ pr_err("All non TCM/pSCSI plugins require"
" INQUIRY consts\n");
goto out;
}
- strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
- strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
- strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
+ strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
+ strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
+ strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
}
scsi_dump_inquiry(dev);
@@ -1754,8 +1522,6 @@ out:
se_release_vpd_for_dev(dev);
- kfree(dev->dev_status_queue_obj);
- kfree(dev->dev_queue_obj);
kfree(dev);
return NULL;
@@ -1794,12 +1560,11 @@ transport_generic_get_task(struct se_cmd *cmd,
enum dma_data_direction data_direction)
{
struct se_task *task;
- struct se_device *dev = SE_DEV(cmd);
- unsigned long flags;
+ struct se_device *dev = cmd->se_dev;
- task = dev->transport->alloc_task(cmd);
+ task = dev->transport->alloc_task(cmd->t_task_cdb);
if (!task) {
- printk(KERN_ERR "Unable to allocate struct se_task\n");
+ pr_err("Unable to allocate struct se_task\n");
return NULL;
}
@@ -1807,26 +1572,15 @@ transport_generic_get_task(struct se_cmd *cmd,
INIT_LIST_HEAD(&task->t_execute_list);
INIT_LIST_HEAD(&task->t_state_list);
init_completion(&task->task_stop_comp);
- task->task_no = T_TASK(cmd)->t_tasks_no++;
task->task_se_cmd = cmd;
task->se_dev = dev;
task->task_data_direction = data_direction;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
-
return task;
}
static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
-void transport_device_setup_cmd(struct se_cmd *cmd)
-{
- cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
-}
-EXPORT_SYMBOL(transport_device_setup_cmd);
-
/*
* Used by fabric modules containing a local struct se_cmd within their
* fabric dependent per I/O descriptor.
@@ -1840,20 +1594,17 @@ void transport_init_se_cmd(
int task_attr,
unsigned char *sense_buffer)
{
- INIT_LIST_HEAD(&cmd->se_lun_list);
- INIT_LIST_HEAD(&cmd->se_delayed_list);
- INIT_LIST_HEAD(&cmd->se_ordered_list);
- /*
- * Setup t_task pointer to t_task_backstore
- */
- cmd->t_task = &cmd->t_task_backstore;
+ INIT_LIST_HEAD(&cmd->se_lun_node);
+ INIT_LIST_HEAD(&cmd->se_delayed_node);
+ INIT_LIST_HEAD(&cmd->se_ordered_node);
+ INIT_LIST_HEAD(&cmd->se_qf_node);
- INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
- init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
- init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
- init_completion(&T_TASK(cmd)->t_transport_stop_comp);
- spin_lock_init(&T_TASK(cmd)->t_state_lock);
- atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
+ INIT_LIST_HEAD(&cmd->t_task_list);
+ init_completion(&cmd->transport_lun_fe_stop_comp);
+ init_completion(&cmd->transport_lun_stop_comp);
+ init_completion(&cmd->t_transport_stop_comp);
+ spin_lock_init(&cmd->t_state_lock);
+ atomic_set(&cmd->transport_dev_active, 1);
cmd->se_tfo = tfo;
cmd->se_sess = se_sess;
@@ -1870,23 +1621,23 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
- if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
return 0;
if (cmd->sam_task_attr == MSG_ACA_TAG) {
- DEBUG_STA("SAM Task Attribute ACA"
+ pr_debug("SAM Task Attribute ACA"
" emulation is not supported\n");
- return -1;
+ return -EINVAL;
}
/*
* Used to determine when ORDERED commands should go from
* Dormant to Active status.
*/
- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
+ cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
smp_mb__after_atomic_inc();
- DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+ pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
cmd->se_ordered_id, cmd->sam_task_attr,
- TRANSPORT(cmd->se_dev)->name);
+ cmd->se_dev->transport->name);
return 0;
}
@@ -1898,8 +1649,8 @@ void transport_free_se_cmd(
/*
* Check and free any extended CDB buffer that was allocated
*/
- if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
- kfree(T_TASK(se_cmd)->t_task_cdb);
+ if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb)
+ kfree(se_cmd->t_task_cdb);
}
EXPORT_SYMBOL(transport_free_se_cmd);
@@ -1922,42 +1673,41 @@ int transport_generic_allocate_tasks(
*/
cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
- transport_device_setup_cmd(cmd);
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
*/
if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
- printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
+ pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
- return -1;
+ return -EINVAL;
}
/*
* If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
* allocate the additional extended CDB buffer now.. Otherwise
* setup the pointer from __t_task_cdb to t_task_cdb.
*/
- if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
- T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
+ if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
+ cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
GFP_KERNEL);
- if (!(T_TASK(cmd)->t_task_cdb)) {
- printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
- " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
+ if (!cmd->t_task_cdb) {
+ pr_err("Unable to allocate cmd->t_task_cdb"
+ " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
- (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
- return -1;
+ (unsigned long)sizeof(cmd->__t_task_cdb));
+ return -ENOMEM;
}
} else
- T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
+ cmd->t_task_cdb = &cmd->__t_task_cdb[0];
/*
- * Copy the original CDB into T_TASK(cmd).
+ * Copy the original CDB into cmd->
*/
- memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
+ memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
/*
* Setup the received CDB based on SCSI defined opcodes and
* perform unit attention, persistent reservations and ALUA
- * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb
+ * checks for virtual device backends. The cmd->t_task_cdb
* pointer is expected to be setup before we reach this point.
*/
ret = transport_generic_cmd_sequencer(cmd, cdb);
@@ -1969,7 +1719,7 @@ int transport_generic_allocate_tasks(
if (transport_check_alloc_task_attr(cmd) < 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -2;
+ return -EINVAL;
}
spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep)
@@ -1986,10 +1736,10 @@ EXPORT_SYMBOL(transport_generic_allocate_tasks);
int transport_generic_handle_cdb(
struct se_cmd *cmd)
{
- if (!SE_LUN(cmd)) {
+ if (!cmd->se_lun) {
dump_stack();
- printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
- return -1;
+ pr_err("cmd->se_lun is NULL\n");
+ return -EINVAL;
}
transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
@@ -1997,6 +1747,56 @@ int transport_generic_handle_cdb(
}
EXPORT_SYMBOL(transport_generic_handle_cdb);
+static void transport_generic_request_failure(struct se_cmd *,
+ struct se_device *, int, int);
+/*
+ * Used by fabric module frontends to queue tasks directly.
+ * Many only be used from process context only
+ */
+int transport_handle_cdb_direct(
+ struct se_cmd *cmd)
+{
+ int ret;
+
+ if (!cmd->se_lun) {
+ dump_stack();
+ pr_err("cmd->se_lun is NULL\n");
+ return -EINVAL;
+ }
+ if (in_interrupt()) {
+ dump_stack();
+ pr_err("transport_generic_handle_cdb cannot be called"
+ " from interrupt context\n");
+ return -EINVAL;
+ }
+ /*
+ * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following
+ * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
+ * in existing usage to ensure that outstanding descriptors are handled
+ * correctly during shutdown via transport_generic_wait_for_tasks()
+ *
+ * Also, we don't take cmd->t_state_lock here as we only expect
+ * this to be called for initial descriptor submission.
+ */
+ cmd->t_state = TRANSPORT_NEW_CMD;
+ atomic_set(&cmd->t_transport_active, 1);
+ /*
+ * transport_generic_new_cmd() is already handling QUEUE_FULL,
+ * so follow TRANSPORT_NEW_CMD processing thread context usage
+ * and call transport_generic_request_failure() if necessary..
+ */
+ ret = transport_generic_new_cmd(cmd);
+ if (ret == -EAGAIN)
+ return 0;
+ else if (ret < 0) {
+ cmd->transport_error_status = ret;
+ transport_generic_request_failure(cmd, NULL, 0,
+ (cmd->data_direction != DMA_TO_DEVICE));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(transport_handle_cdb_direct);
+
/*
* Used by fabric module frontends defining a TFO->new_cmd_map() caller
* to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
@@ -2005,10 +1805,10 @@ EXPORT_SYMBOL(transport_generic_handle_cdb);
int transport_generic_handle_cdb_map(
struct se_cmd *cmd)
{
- if (!SE_LUN(cmd)) {
+ if (!cmd->se_lun) {
dump_stack();
- printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
- return -1;
+ pr_err("cmd->se_lun is NULL\n");
+ return -EINVAL;
}
transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
@@ -2030,7 +1830,7 @@ int transport_generic_handle_data(
* in interrupt code, the signal_pending() check is skipped.
*/
if (!in_interrupt() && signal_pending(current))
- return -1;
+ return -EPERM;
/*
* If the received CDB has aleady been ABORTED by the generic
* target engine, we now call transport_check_aborted_status()
@@ -2057,7 +1857,6 @@ int transport_generic_handle_tmr(
* This is needed for early exceptions.
*/
cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
- transport_device_setup_cmd(cmd);
transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
return 0;
@@ -2077,16 +1876,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
unsigned long flags;
int ret = 0;
- DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("ITT[0x%08x] - Stopping tasks\n",
+ cmd->se_tfo->get_task_tag(cmd));
/*
* No tasks remain in the execution queue
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list) {
- DEBUG_TS("task_no[%d] - Processing task %p\n",
+ &cmd->t_task_list, t_list) {
+ pr_debug("task_no[%d] - Processing task %p\n",
task->task_no, task);
/*
* If the struct se_task has not been sent and is not active,
@@ -2094,14 +1893,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
*/
if (!atomic_read(&task->task_sent) &&
!atomic_read(&task->task_active)) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
transport_remove_task_from_execute_queue(task,
task->se_dev);
- DEBUG_TS("task_no[%d] - Removed from execute queue\n",
+ pr_debug("task_no[%d] - Removed from execute queue\n",
task->task_no);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
continue;
}
@@ -2111,42 +1910,32 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
*/
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
- DEBUG_TS("task_no[%d] - Waiting to complete\n",
+ pr_debug("task_no[%d] - Waiting to complete\n",
task->task_no);
wait_for_completion(&task->task_stop_comp);
- DEBUG_TS("task_no[%d] - Stopped successfully\n",
+ pr_debug("task_no[%d] - Stopped successfully\n",
task->task_no);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
} else {
- DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
+ pr_debug("task_no[%d] - Did nothing\n", task->task_no);
ret++;
}
__transport_stop_task_timer(task, &flags);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return ret;
}
-static void transport_failure_reset_queue_depth(struct se_device *dev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
- atomic_inc(&dev->depth_left);
- atomic_inc(&SE_HBA(dev)->left_queue_depth);
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
-}
-
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
@@ -2156,29 +1945,31 @@ static void transport_generic_request_failure(
int complete,
int sc)
{
- DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
- " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
- T_TASK(cmd)->t_task_cdb[0]);
- DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
+ int ret = 0;
+
+ pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+ " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+ cmd->t_task_cdb[0]);
+ pr_debug("-----[ i_state: %d t_state/def_t_state:"
" %d/%d transport_error_status: %d\n",
- CMD_TFO(cmd)->get_cmd_state(cmd),
+ cmd->se_tfo->get_cmd_state(cmd),
cmd->t_state, cmd->deferred_t_state,
cmd->transport_error_status);
- DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
+ pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d"
- " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
- atomic_read(&T_TASK(cmd)->t_transport_active),
- atomic_read(&T_TASK(cmd)->t_transport_stop),
- atomic_read(&T_TASK(cmd)->t_transport_sent));
+ " t_transport_sent: %d\n", cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+ atomic_read(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_task_cdbs_ex_left),
+ atomic_read(&cmd->t_transport_active),
+ atomic_read(&cmd->t_transport_stop),
+ atomic_read(&cmd->t_transport_sent));
transport_stop_all_task_timers(cmd);
if (dev)
- transport_failure_reset_queue_depth(dev);
+ atomic_inc(&dev->depth_left);
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
@@ -2211,8 +2002,8 @@ static void transport_generic_request_failure(
* we force this session to fall back to session
* recovery.
*/
- CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
- CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
+ cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
+ cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
goto check_stop;
case PYX_TRANSPORT_LU_COMM_FAILURE:
@@ -2240,13 +2031,15 @@ static void transport_generic_request_failure(
*
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
- if (SE_SESS(cmd) &&
- DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+ if (cmd->se_sess &&
+ cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+ core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- CMD_TFO(cmd)->queue_status(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
goto check_stop;
case PYX_TRANSPORT_USE_SENSE_REASON:
/*
@@ -2254,8 +2047,8 @@ static void transport_generic_request_failure(
*/
break;
default:
- printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
- T_TASK(cmd)->t_task_cdb[0],
+ pr_err("Unknown transport error for CDB 0x%02x: %d\n",
+ cmd->t_task_cdb[0],
cmd->transport_error_status);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
@@ -2263,32 +2056,41 @@ static void transport_generic_request_failure(
if (!sc)
transport_new_cmd_failure(cmd);
- else
- transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
+ else {
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+ if (ret == -EAGAIN)
+ goto queue_full;
+ }
+
check_stop:
transport_lun_remove_cmd(cmd);
- if (!(transport_cmd_check_stop_to_fabric(cmd)))
+ if (!transport_cmd_check_stop_to_fabric(cmd))
;
+ return;
+
+queue_full:
+ cmd->t_state = TRANSPORT_COMPLETE_OK;
+ transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
}
static void transport_direct_request_timeout(struct se_cmd *cmd)
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->t_transport_timeout)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
- &T_TASK(cmd)->t_se_count);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_sub(atomic_read(&cmd->t_transport_timeout),
+ &cmd->t_se_count);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static void transport_generic_request_timeout(struct se_cmd *cmd)
@@ -2296,35 +2098,18 @@ static void transport_generic_request_timeout(struct se_cmd *cmd)
unsigned long flags;
/*
- * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
+ * Reset cmd->t_se_count to allow transport_generic_remove()
* to allow last call to free memory resources.
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
- int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (atomic_read(&cmd->t_transport_timeout) > 1) {
+ int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
- atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
+ atomic_sub(tmp, &cmd->t_se_count);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_generic_remove(cmd, 0, 0);
-}
-
-static int
-transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
-{
- unsigned char *buf;
-
- buf = kzalloc(data_length, GFP_KERNEL);
- if (!(buf)) {
- printk(KERN_ERR "Unable to allocate memory for buffer\n");
- return -1;
- }
-
- T_TASK(cmd)->t_tasks_se_num = 0;
- T_TASK(cmd)->t_task_buf = buf;
-
- return 0;
+ transport_generic_remove(cmd, 0);
}
static inline u32 transport_lba_21(unsigned char *cdb)
@@ -2364,9 +2149,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
- spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
/*
@@ -2375,14 +2160,14 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
static void transport_task_timeout_handler(unsigned long data)
{
struct se_task *task = (struct se_task *)data;
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
unsigned long flags;
- DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
+ pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
if (task->task_flags & TF_STOP) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
task->task_flags &= ~TF_RUNNING;
@@ -2390,46 +2175,46 @@ static void transport_task_timeout_handler(unsigned long data)
/*
* Determine if transport_complete_task() has already been called.
*/
- if (!(atomic_read(&task->task_active))) {
- DEBUG_TT("transport task: %p cmd: %p timeout task_active"
+ if (!atomic_read(&task->task_active)) {
+ pr_debug("transport task: %p cmd: %p timeout task_active"
" == 0\n", task, cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- atomic_inc(&T_TASK(cmd)->t_se_count);
- atomic_inc(&T_TASK(cmd)->t_transport_timeout);
- T_TASK(cmd)->t_tasks_failed = 1;
+ atomic_inc(&cmd->t_se_count);
+ atomic_inc(&cmd->t_transport_timeout);
+ cmd->t_tasks_failed = 1;
atomic_set(&task->task_timeout, 1);
task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
task->task_scsi_status = 1;
if (atomic_read(&task->task_stop)) {
- DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
+ pr_debug("transport task: %p cmd: %p timeout task_stop"
" == 1\n", task, cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&task->task_stop_comp);
return;
}
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
- DEBUG_TT("transport task: %p cmd: %p timeout non zero"
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
+ pr_debug("transport task: %p cmd: %p timeout non zero"
" t_task_cdbs_left\n", task, cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
+ pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
task, cmd);
cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
}
/*
- * Called with T_TASK(cmd)->t_state_lock held.
+ * Called with cmd->t_state_lock held.
*/
static void transport_start_task_timer(struct se_task *task)
{
@@ -2441,8 +2226,8 @@ static void transport_start_task_timer(struct se_task *task)
/*
* If the task_timeout is disabled, exit now.
*/
- timeout = DEV_ATTRIB(dev)->task_timeout;
- if (!(timeout))
+ timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
+ if (!timeout)
return;
init_timer(&task->task_timer);
@@ -2453,27 +2238,27 @@ static void transport_start_task_timer(struct se_task *task)
task->task_flags |= TF_RUNNING;
add_timer(&task->task_timer);
#if 0
- printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
+ pr_debug("Starting task timer for cmd: %p task: %p seconds:"
" %d\n", task->task_se_cmd, task, timeout);
#endif
}
/*
- * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
+ * Called with spin_lock_irq(&cmd->t_state_lock) held.
*/
void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
- if (!(task->task_flags & TF_RUNNING))
+ if (!task->task_flags & TF_RUNNING)
return;
task->task_flags |= TF_STOP;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
del_timer_sync(&task->task_timer);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
+ spin_lock_irqsave(&cmd->t_state_lock, *flags);
task->task_flags &= ~TF_RUNNING;
task->task_flags &= ~TF_STOP;
}
@@ -2483,11 +2268,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd)
struct se_task *task = NULL, *task_tmp;
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list)
+ &cmd->t_task_list, t_list)
__transport_stop_task_timer(task, &flags);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static inline int transport_tcq_window_closed(struct se_device *dev)
@@ -2498,7 +2283,7 @@ static inline int transport_tcq_window_closed(struct se_device *dev)
} else
msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
- wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
return 0;
}
@@ -2511,45 +2296,45 @@ static inline int transport_tcq_window_closed(struct se_device *dev)
*/
static inline int transport_execute_task_attr(struct se_cmd *cmd)
{
- if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
return 1;
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
+ atomic_inc(&cmd->se_dev->dev_hoq_count);
smp_mb__after_atomic_inc();
- DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
+ pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
- T_TASK(cmd)->t_task_cdb[0],
+ cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
- list_add_tail(&cmd->se_ordered_list,
- &SE_DEV(cmd)->ordered_cmd_list);
- spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
+ spin_lock(&cmd->se_dev->ordered_cmd_lock);
+ list_add_tail(&cmd->se_ordered_node,
+ &cmd->se_dev->ordered_cmd_list);
+ spin_unlock(&cmd->se_dev->ordered_cmd_lock);
- atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
+ atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
- DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
+ pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
" list, se_ordered_id: %u\n",
- T_TASK(cmd)->t_task_cdb[0],
+ cmd->t_task_cdb[0],
cmd->se_ordered_id);
/*
* Add ORDERED command to tail of execution queue if
* no other older commands exist that need to be
* completed first.
*/
- if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
+ if (!atomic_read(&cmd->se_dev->simple_cmds))
return 1;
} else {
/*
* For SIMPLE and UNTAGGED Task Attribute commands
*/
- atomic_inc(&SE_DEV(cmd)->simple_cmds);
+ atomic_inc(&cmd->se_dev->simple_cmds);
smp_mb__after_atomic_inc();
}
/*
@@ -2557,20 +2342,20 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* add the dormant task(s) built for the passed struct se_cmd to the
* execution queue and become in Active state for this struct se_device.
*/
- if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
+ if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
/*
* Otherwise, add cmd w/ tasks to delayed cmd queue that
* will be drained upon completion of HEAD_OF_QUEUE task.
*/
- spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
+ spin_lock(&cmd->se_dev->delayed_cmd_lock);
cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
- list_add_tail(&cmd->se_delayed_list,
- &SE_DEV(cmd)->delayed_cmd_list);
- spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
+ list_add_tail(&cmd->se_delayed_node,
+ &cmd->se_dev->delayed_cmd_list);
+ spin_unlock(&cmd->se_dev->delayed_cmd_lock);
- DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
+ pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
" delayed CMD list, se_ordered_id: %u\n",
- T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
+ cmd->t_task_cdb[0], cmd->sam_task_attr,
cmd->se_ordered_id);
/*
* Return zero to let transport_execute_tasks() know
@@ -2592,25 +2377,23 @@ static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
- if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
- if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
- cmd->transport_error_status =
- PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, NULL, 0, 1);
- return 0;
- }
+ if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
+ cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
+ transport_generic_request_failure(cmd, NULL, 0, 1);
+ return 0;
}
+
/*
* Call transport_cmd_check_stop() to see if a fabric exception
* has occurred that prevents execution.
*/
- if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
+ if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
/*
* Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
* attribute for the tasks of the received struct se_cmd CDB
*/
add_tasks = transport_execute_task_attr(cmd);
- if (add_tasks == 0)
+ if (!add_tasks)
goto execute_tasks;
/*
* This calls transport_add_tasks_from_cmd() to handle
@@ -2625,7 +2408,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)
* storage object.
*/
execute_tasks:
- __transport_execute_tasks(SE_DEV(cmd));
+ __transport_execute_tasks(cmd->se_dev);
return 0;
}
@@ -2639,51 +2422,49 @@ static int __transport_execute_tasks(struct se_device *dev)
{
int error;
struct se_cmd *cmd = NULL;
- struct se_task *task;
+ struct se_task *task = NULL;
unsigned long flags;
/*
* Check if there is enough room in the device and HBA queue to send
- * struct se_transport_task's to the selected transport.
+ * struct se_tasks to the selected transport.
*/
check_depth:
- spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
- if (!(atomic_read(&dev->depth_left)) ||
- !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ if (!atomic_read(&dev->depth_left))
return transport_tcq_window_closed(dev);
- }
- dev->dev_tcq_window_closed = 0;
- spin_lock(&dev->execute_task_lock);
- task = transport_get_task_from_execute_queue(dev);
- spin_unlock(&dev->execute_task_lock);
+ dev->dev_tcq_window_closed = 0;
- if (!task) {
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ spin_lock_irq(&dev->execute_task_lock);
+ if (list_empty(&dev->execute_task_list)) {
+ spin_unlock_irq(&dev->execute_task_lock);
return 0;
}
+ task = list_first_entry(&dev->execute_task_list,
+ struct se_task, t_execute_list);
+ list_del(&task->t_execute_list);
+ atomic_set(&task->task_execute_queue, 0);
+ atomic_dec(&dev->execute_tasks);
+ spin_unlock_irq(&dev->execute_task_lock);
atomic_dec(&dev->depth_left);
- atomic_dec(&SE_HBA(dev)->left_queue_depth);
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
- cmd = TASK_CMD(task);
+ cmd = task->task_se_cmd;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_set(&task->task_active, 1);
atomic_set(&task->task_sent, 1);
- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
+ atomic_inc(&cmd->t_task_cdbs_sent);
- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
- T_TASK(cmd)->t_task_cdbs)
+ if (atomic_read(&cmd->t_task_cdbs_sent) ==
+ cmd->t_task_list_num)
atomic_set(&cmd->transport_sent, 1);
transport_start_task_timer(task);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
/*
* The struct se_cmd->transport_emulate_cdb() function pointer is used
- * to grab REPORT_LUNS CDBs before they hit the
+ * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
* struct se_subsystem_api->do_task() caller below.
*/
if (cmd->transport_emulate_cdb) {
@@ -2718,11 +2499,11 @@ check_depth:
* call ->do_task() directly and let the underlying TCM subsystem plugin
* code handle the CDB emulation.
*/
- if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
- (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
+ if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
error = transport_emulate_control_cdb(task);
else
- error = TRANSPORT(dev)->do_task(task);
+ error = dev->transport->do_task(task);
if (error != 0) {
cmd->transport_error_status = error;
@@ -2745,12 +2526,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)
* Any unsolicited data will get dumped for failed command inside of
* the fabric plugin
*/
- spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
-
- CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
@@ -2760,7 +2539,7 @@ static inline u32 transport_get_sectors_6(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2772,7 +2551,7 @@ static inline u32 transport_get_sectors_6(
/*
* Use 24-bit allocation length for TYPE_TAPE.
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
/*
@@ -2788,7 +2567,7 @@ static inline u32 transport_get_sectors_10(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2800,8 +2579,8 @@ static inline u32 transport_get_sectors_10(
/*
* XXX_10 is not defined in SSC, throw an exception
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
- *ret = -1;
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
+ *ret = -EINVAL;
return 0;
}
@@ -2818,7 +2597,7 @@ static inline u32 transport_get_sectors_12(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2830,8 +2609,8 @@ static inline u32 transport_get_sectors_12(
/*
* XXX_12 is not defined in SSC, throw an exception
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
- *ret = -1;
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
+ *ret = -EINVAL;
return 0;
}
@@ -2848,7 +2627,7 @@ static inline u32 transport_get_sectors_16(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2860,7 +2639,7 @@ static inline u32 transport_get_sectors_16(
/*
* Use 24-bit allocation length for TYPE_TAPE.
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
type_disk:
@@ -2890,57 +2669,30 @@ static inline u32 transport_get_size(
unsigned char *cdb,
struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
if (cdb[1] & 1) { /* sectors */
- return DEV_ATTRIB(dev)->block_size * sectors;
+ return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
} else /* bytes */
return sectors;
}
#if 0
- printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
- " %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
- DEV_ATTRIB(dev)->block_size * sectors,
- TRANSPORT(dev)->name);
+ pr_debug("Returning block_size: %u, sectors: %u == %u for"
+ " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
+ dev->se_sub_dev->se_dev_attrib.block_size * sectors,
+ dev->transport->name);
#endif
- return DEV_ATTRIB(dev)->block_size * sectors;
+ return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
}
-unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
-{
- unsigned char result = 0;
- /*
- * MSB
- */
- if ((val[0] >= 'a') && (val[0] <= 'f'))
- result = ((val[0] - 'a' + 10) & 0xf) << 4;
- else
- if ((val[0] >= 'A') && (val[0] <= 'F'))
- result = ((val[0] - 'A' + 10) & 0xf) << 4;
- else /* digit */
- result = ((val[0] - '0') & 0xf) << 4;
- /*
- * LSB
- */
- if ((val[1] >= 'a') && (val[1] <= 'f'))
- result |= ((val[1] - 'a' + 10) & 0xf);
- else
- if ((val[1] >= 'A') && (val[1] <= 'F'))
- result |= ((val[1] - 'A' + 10) & 0xf);
- else /* digit */
- result |= ((val[1] - '0') & 0xf);
-
- return result;
-}
-EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
-
static void transport_xor_callback(struct se_cmd *cmd)
{
unsigned char *buf, *addr;
- struct se_mem *se_mem;
+ struct scatterlist *sg;
unsigned int offset;
int i;
+ int count;
/*
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
*
@@ -2953,32 +2705,37 @@ static void transport_xor_callback(struct se_cmd *cmd)
* 5) transfer the resulting XOR data to the data-in buffer.
*/
buf = kmalloc(cmd->data_length, GFP_KERNEL);
- if (!(buf)) {
- printk(KERN_ERR "Unable to allocate xor_callback buf\n");
+ if (!buf) {
+ pr_err("Unable to allocate xor_callback buf\n");
return;
}
/*
- * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
+ * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
* into the locally allocated *buf
*/
- transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
+ sg_copy_to_buffer(cmd->t_data_sg,
+ cmd->t_data_nents,
+ buf,
+ cmd->data_length);
+
/*
* Now perform the XOR against the BIDI read memory located at
- * T_TASK(cmd)->t_mem_bidi_list
+ * cmd->t_mem_bidi_list
*/
offset = 0;
- list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
- addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
- if (!(addr))
+ for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
+ addr = kmap_atomic(sg_page(sg), KM_USER0);
+ if (!addr)
goto out;
- for (i = 0; i < se_mem->se_len; i++)
- *(addr + se_mem->se_off + i) ^= *(buf + offset + i);
+ for (i = 0; i < sg->length; i++)
+ *(addr + sg->offset + i) ^= *(buf + offset + i);
- offset += se_mem->se_len;
+ offset += sg->length;
kunmap_atomic(addr, KM_USER0);
}
+
out:
kfree(buf);
}
@@ -2994,75 +2751,60 @@ static int transport_get_sense_data(struct se_cmd *cmd)
unsigned long flags;
u32 offset = 0;
- if (!SE_LUN(cmd)) {
- printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
- return -1;
- }
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ WARN_ON(!cmd->se_lun);
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list) {
+ &cmd->t_task_list, t_list) {
if (!task->task_sense)
continue;
dev = task->se_dev;
- if (!(dev))
+ if (!dev)
continue;
- if (!TRANSPORT(dev)->get_sense_buffer) {
- printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
+ if (!dev->transport->get_sense_buffer) {
+ pr_err("dev->transport->get_sense_buffer"
" is NULL\n");
continue;
}
- sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
- if (!(sense_buffer)) {
- printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
+ sense_buffer = dev->transport->get_sense_buffer(task);
+ if (!sense_buffer) {
+ pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
" sense buffer for task with sense\n",
- CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
+ cmd->se_tfo->get_task_tag(cmd), task->task_no);
continue;
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+ offset = cmd->se_tfo->set_fabric_sense_len(cmd,
TRANSPORT_SENSE_BUFFER);
- memcpy((void *)&buffer[offset], (void *)sense_buffer,
+ memcpy(&buffer[offset], sense_buffer,
TRANSPORT_SENSE_BUFFER);
cmd->scsi_status = task->task_scsi_status;
/* Automatically padded */
cmd->scsi_sense_length =
(TRANSPORT_SENSE_BUFFER + offset);
- printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
+ pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
" and sense\n",
- dev->se_hba->hba_id, TRANSPORT(dev)->name,
+ dev->se_hba->hba_id, dev->transport->name,
cmd->scsi_status);
return 0;
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return -1;
}
-static int transport_allocate_resources(struct se_cmd *cmd)
-{
- u32 length = cmd->data_length;
-
- if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
- (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
- return transport_generic_get_mem(cmd, length, PAGE_SIZE);
- else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
- return transport_generic_allocate_buf(cmd, length);
- else
- return 0;
-}
-
static int
transport_handle_reservation_conflict(struct se_cmd *cmd)
{
@@ -3077,12 +2819,40 @@ transport_handle_reservation_conflict(struct se_cmd *cmd)
*
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
- if (SE_SESS(cmd) &&
- DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+ if (cmd->se_sess &&
+ cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+ core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- return -2;
+ return -EINVAL;
+}
+
+static inline long long transport_dev_end_lba(struct se_device *dev)
+{
+ return dev->transport->get_blocks(dev) + 1;
+}
+
+static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ u32 sectors;
+
+ if (dev->transport->get_device_type(dev) != TYPE_DISK)
+ return 0;
+
+ sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
+
+ if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
+ pr_err("LBA: %llu Sectors: %u exceeds"
+ " transport_dev_end_lba(): %llu\n",
+ cmd->t_task_lba, sectors,
+ transport_dev_end_lba(dev));
+ pr_err(" We should return CHECK_CONDITION"
+ " but we don't yet\n");
+ return 0;
+ }
+
+ return sectors;
}
/* transport_generic_cmd_sequencer():
@@ -3099,7 +2869,7 @@ static int transport_generic_cmd_sequencer(
struct se_cmd *cmd,
unsigned char *cdb)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
int ret = 0, sector_ret = 0, passthrough;
u32 sectors = 0, size = 0, pr_reg_type = 0;
@@ -3113,12 +2883,12 @@ static int transport_generic_cmd_sequencer(
&transport_nop_wait_for_tasks;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
- return -2;
+ return -EINVAL;
}
/*
* Check status of Asymmetric Logical Unit Assignment port
*/
- ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
+ ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
if (ret != 0) {
cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
/*
@@ -3128,22 +2898,22 @@ static int transport_generic_cmd_sequencer(
*/
if (ret > 0) {
#if 0
- printk(KERN_INFO "[%s]: ALUA TG Port not available,"
+ pr_debug("[%s]: ALUA TG Port not available,"
" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
- CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
+ cmd->se_tfo->get_fabric_name(), alua_ascq);
#endif
transport_set_sense_codes(cmd, 0x04, alua_ascq);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
- return -2;
+ return -EINVAL;
}
goto out_invalid_cdb_field;
}
/*
* Check status for SPC-3 Persistent Reservations
*/
- if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
- if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
+ if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
+ if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
cmd, cdb, pr_reg_type) != 0)
return transport_handle_reservation_conflict(cmd);
/*
@@ -3160,7 +2930,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_6;
- T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_10:
@@ -3169,7 +2939,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_10;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_12:
@@ -3178,7 +2948,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_12;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_16:
@@ -3187,7 +2957,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_16;
- T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_6:
@@ -3196,7 +2966,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_6;
- T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_10:
@@ -3205,8 +2975,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_10;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
@@ -3215,8 +2985,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_12;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
@@ -3225,22 +2995,22 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_16;
- T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(T_TASK(cmd)->t_tasks_bidi))
+ !(cmd->t_tasks_bidi))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_10;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- passthrough = (TRANSPORT(dev)->transport_type ==
+ passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
/*
* Skip the remaining assignments for TCM/PSCSI passthrough
@@ -3251,7 +3021,7 @@ static int transport_generic_cmd_sequencer(
* Setup BIDI XOR callback to be run during transport_generic_complete_ok()
*/
cmd->transport_complete_callback = &transport_xor_callback;
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
@@ -3259,7 +3029,7 @@ static int transport_generic_cmd_sequencer(
* Determine if this is TCM/PSCSI device and we should disable
* internal emulation for this CDB.
*/
- passthrough = (TRANSPORT(dev)->transport_type ==
+ passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
switch (service_action) {
@@ -3273,7 +3043,7 @@ static int transport_generic_cmd_sequencer(
* XDWRITE_READ_32 logic.
*/
cmd->transport_split_cdb = &split_cdb_XX_32;
- T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
+ cmd->t_task_lba = transport_lba_64_ext(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
/*
@@ -3287,14 +3057,22 @@ static int transport_generic_cmd_sequencer(
* transport_generic_complete_ok()
*/
cmd->transport_complete_callback = &transport_xor_callback;
- T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
+ cmd->t_tasks_fua = (cdb[10] & 0x8);
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
+
+ if (sectors)
+ size = transport_get_size(sectors, cdb, cmd);
+ else {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
+ " supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
/*
@@ -3304,7 +3082,7 @@ static int transport_generic_cmd_sequencer(
break;
if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
- printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+ pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
goto out_invalid_cdb_field;
@@ -3314,28 +3092,28 @@ static int transport_generic_cmd_sequencer(
* tpws with the UNMAP=1 bit set.
*/
if (!(cdb[10] & 0x08)) {
- printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
+ pr_err("WRITE_SAME w/o UNMAP bit not"
" supported for Block Discard Emulation\n");
goto out_invalid_cdb_field;
}
break;
default:
- printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
+ pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
goto out_unsupported_cdb;
}
break;
- case 0xa3:
- if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+ case MAINTENANCE_IN:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/* MAINTENANCE_IN from SCC-2 */
/*
* Check for emulated MI_REPORT_TARGET_PGS.
*/
if (cdb[1] == MI_REPORT_TARGET_PGS) {
cmd->transport_emulate_cdb =
- (T10_ALUA(su_dev)->alua_type ==
+ (su_dev->t10_alua.alua_type ==
SPC3_ALUA_EMULATED) ?
- &core_emulate_report_target_port_groups :
+ core_emulate_report_target_port_groups :
NULL;
}
size = (cdb[6] << 24) | (cdb[7] << 16) |
@@ -3344,7 +3122,7 @@ static int transport_generic_cmd_sequencer(
/* GPCMD_SEND_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MODE_SELECT:
size = cdb[4];
@@ -3356,7 +3134,7 @@ static int transport_generic_cmd_sequencer(
break;
case MODE_SENSE:
size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MODE_SENSE_10:
case GPCMD_READ_BUFFER_CAPACITY:
@@ -3364,11 +3142,11 @@ static int transport_generic_cmd_sequencer(
case LOG_SELECT:
case LOG_SENSE:
size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_BLOCK_LIMITS:
size = READ_BLOCK_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case GPCMD_GET_CONFIGURATION:
case GPCMD_READ_FORMAT_CAPACITIES:
@@ -3380,11 +3158,11 @@ static int transport_generic_cmd_sequencer(
case PERSISTENT_RESERVE_IN:
case PERSISTENT_RESERVE_OUT:
cmd->transport_emulate_cdb =
- (T10_RES(su_dev)->res_type ==
+ (su_dev->t10_pr.res_type ==
SPC3_PERSISTENT_RESERVATIONS) ?
- &core_scsi3_emulate_pr : NULL;
+ core_scsi3_emulate_pr : NULL;
size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case GPCMD_MECHANISM_STATUS:
case GPCMD_READ_DVD_STRUCTURE:
@@ -3393,19 +3171,19 @@ static int transport_generic_cmd_sequencer(
break;
case READ_POSITION:
size = READ_POSITION_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
- case 0xa4:
- if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+ case MAINTENANCE_OUT:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/* MAINTENANCE_OUT from SCC-2
*
* Check for emulated MO_SET_TARGET_PGS.
*/
if (cdb[1] == MO_SET_TARGET_PGS) {
cmd->transport_emulate_cdb =
- (T10_ALUA(su_dev)->alua_type ==
+ (su_dev->t10_alua.alua_type ==
SPC3_ALUA_EMULATED) ?
- &core_emulate_set_target_port_groups :
+ core_emulate_set_target_port_groups :
NULL;
}
@@ -3415,7 +3193,7 @@ static int transport_generic_cmd_sequencer(
/* GPCMD_REPORT_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case INQUIRY:
size = (cdb[3] << 8) + cdb[4];
@@ -3423,23 +3201,23 @@ static int transport_generic_cmd_sequencer(
* Do implict HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
- if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_CAPACITY:
size = READ_CAP_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_MEDIA_SERIAL_NUMBER:
case SECURITY_PROTOCOL_IN:
case SECURITY_PROTOCOL_OUT:
size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case SERVICE_ACTION_IN:
case ACCESS_CONTROL_IN:
@@ -3450,36 +3228,36 @@ static int transport_generic_cmd_sequencer(
case WRITE_ATTRIBUTE:
size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
size = (cdb[3] << 8) | cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
#if 0
case GPCMD_READ_CD:
sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
size = (2336 * sectors);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
#endif
case READ_TOC:
size = cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case REQUEST_SENSE:
size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_ELEMENT_STATUS:
size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case WRITE_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case RESERVE:
case RESERVE_10:
@@ -3500,9 +3278,9 @@ static int transport_generic_cmd_sequencer(
* emulation disabled.
*/
cmd->transport_emulate_cdb =
- (T10_RES(su_dev)->res_type !=
+ (su_dev->t10_pr.res_type !=
SPC_PASSTHROUGH) ?
- &core_scsi2_emulate_crh : NULL;
+ core_scsi2_emulate_crh : NULL;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case RELEASE:
@@ -3517,9 +3295,9 @@ static int transport_generic_cmd_sequencer(
size = cmd->data_length;
cmd->transport_emulate_cdb =
- (T10_RES(su_dev)->res_type !=
+ (su_dev->t10_pr.res_type !=
SPC_PASSTHROUGH) ?
- &core_scsi2_emulate_crh : NULL;
+ core_scsi2_emulate_crh : NULL;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case SYNCHRONIZE_CACHE:
@@ -3529,10 +3307,10 @@ static int transport_generic_cmd_sequencer(
*/
if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
} else {
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
}
if (sector_ret)
goto out_unsupported_cdb;
@@ -3543,7 +3321,7 @@ static int transport_generic_cmd_sequencer(
/*
* For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
*/
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
break;
/*
* Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
@@ -3554,32 +3332,27 @@ static int transport_generic_cmd_sequencer(
* Check to ensure that LBA + Range does not exceed past end of
* device.
*/
- if (transport_get_sectors(cmd) < 0)
+ if (!transport_cmd_get_valid_sectors(cmd))
goto out_invalid_cdb_field;
break;
case UNMAP:
size = get_unaligned_be16(&cdb[7]);
- passthrough = (TRANSPORT(dev)->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV);
- /*
- * Determine if the received UNMAP used to for direct passthrough
- * into Linux/SCSI with struct request via TCM/pSCSI or we are
- * signaling the use of internal transport_generic_unmap() emulation
- * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
- * subsystem plugin backstores.
- */
- if (!(passthrough))
- cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
-
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case WRITE_SAME_16:
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
- passthrough = (TRANSPORT(dev)->transport_type ==
+
+ if (sectors)
+ size = transport_get_size(sectors, cdb, cmd);
+ else {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+ passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
/*
* Determine if the received WRITE_SAME_16 is used to for direct
@@ -3588,9 +3361,9 @@ static int transport_generic_cmd_sequencer(
* emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
* TCM/FILEIO subsystem plugin backstores.
*/
- if (!(passthrough)) {
+ if (!passthrough) {
if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
- printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+ pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
goto out_invalid_cdb_field;
@@ -3600,7 +3373,7 @@ static int transport_generic_cmd_sequencer(
* tpws with the UNMAP=1 bit set.
*/
if (!(cdb[1] & 0x08)) {
- printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
+ pr_err("WRITE_SAME w/o UNMAP bit not "
" supported for Block Discard Emulation\n");
goto out_invalid_cdb_field;
}
@@ -3625,34 +3398,34 @@ static int transport_generic_cmd_sequencer(
break;
case REPORT_LUNS:
cmd->transport_emulate_cdb =
- &transport_core_report_lun_response;
+ transport_core_report_lun_response;
size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/*
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
- if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
default:
- printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
+ pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
- CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
+ cmd->se_tfo->get_fabric_name(), cdb[0]);
cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
goto out_unsupported_cdb;
}
if (size != cmd->data_length) {
- printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
+ pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
" %u does not match SCSI CDB Length: %u for SAM Opcode:"
- " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
+ " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
cmd->data_length, size, cdb[0]);
cmd->cmd_spdtl = size;
if (cmd->data_direction == DMA_TO_DEVICE) {
- printk(KERN_ERR "Rejecting underflow/overflow"
+ pr_err("Rejecting underflow/overflow"
" WRITE data\n");
goto out_invalid_cdb_field;
}
@@ -3660,10 +3433,10 @@ static int transport_generic_cmd_sequencer(
* Reject READ_* or WRITE_* with overflow/underflow for
* type SCF_SCSI_DATA_SG_IO_CDB.
*/
- if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) {
- printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
+ if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
+ pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
" CDB on non 512-byte sector setup subsystem"
- " plugin: %s\n", TRANSPORT(dev)->name);
+ " plugin: %s\n", dev->transport->name);
/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
goto out_invalid_cdb_field;
}
@@ -3678,105 +3451,22 @@ static int transport_generic_cmd_sequencer(
cmd->data_length = size;
}
+ /* Let's limit control cdbs to a page, for simplicity's sake. */
+ if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
+ size > PAGE_SIZE)
+ goto out_invalid_cdb_field;
+
transport_set_supported_SAM_opcode(cmd);
return ret;
out_unsupported_cdb:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -2;
+ return -EINVAL;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -2;
-}
-
-static inline void transport_release_tasks(struct se_cmd *);
-
-/*
- * This function will copy a contiguous *src buffer into a destination
- * struct scatterlist array.
- */
-static void transport_memcpy_write_contig(
- struct se_cmd *cmd,
- struct scatterlist *sg_d,
- unsigned char *src)
-{
- u32 i = 0, length = 0, total_length = cmd->data_length;
- void *dst;
-
- while (total_length) {
- length = sg_d[i].length;
-
- if (length > total_length)
- length = total_length;
-
- dst = sg_virt(&sg_d[i]);
-
- memcpy(dst, src, length);
-
- if (!(total_length -= length))
- return;
-
- src += length;
- i++;
- }
-}
-
-/*
- * This function will copy a struct scatterlist array *sg_s into a destination
- * contiguous *dst buffer.
- */
-static void transport_memcpy_read_contig(
- struct se_cmd *cmd,
- unsigned char *dst,
- struct scatterlist *sg_s)
-{
- u32 i = 0, length = 0, total_length = cmd->data_length;
- void *src;
-
- while (total_length) {
- length = sg_s[i].length;
-
- if (length > total_length)
- length = total_length;
-
- src = sg_virt(&sg_s[i]);
-
- memcpy(dst, src, length);
-
- if (!(total_length -= length))
- return;
-
- dst += length;
- i++;
- }
-}
-
-static void transport_memcpy_se_mem_read_contig(
- struct se_cmd *cmd,
- unsigned char *dst,
- struct list_head *se_mem_list)
-{
- struct se_mem *se_mem;
- void *src;
- u32 length = 0, total_length = cmd->data_length;
-
- list_for_each_entry(se_mem, se_mem_list, se_list) {
- length = se_mem->se_len;
-
- if (length > total_length)
- length = total_length;
-
- src = page_address(se_mem->se_page) + se_mem->se_off;
-
- memcpy(dst, src, length);
-
- if (!(total_length -= length))
- return;
-
- dst += length;
- }
+ return -EINVAL;
}
/*
@@ -3786,7 +3476,7 @@ static void transport_memcpy_se_mem_read_contig(
*/
static void transport_complete_task_attr(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_cmd *cmd_p, *cmd_tmp;
int new_active_tasks = 0;
@@ -3794,25 +3484,25 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
atomic_dec(&dev->simple_cmds);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
- DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
+ pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_dec(&dev->dev_hoq_count);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
- DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
+ pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&dev->ordered_cmd_lock);
- list_del(&cmd->se_ordered_list);
+ list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
- DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
+ pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
}
/*
@@ -3822,15 +3512,15 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
*/
spin_lock(&dev->delayed_cmd_lock);
list_for_each_entry_safe(cmd_p, cmd_tmp,
- &dev->delayed_cmd_list, se_delayed_list) {
+ &dev->delayed_cmd_list, se_delayed_node) {
- list_del(&cmd_p->se_delayed_list);
+ list_del(&cmd_p->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
- DEBUG_STA("Calling add_tasks() for"
+ pr_debug("Calling add_tasks() for"
" cmd_p: 0x%02x Task Attr: 0x%02x"
" Dormant -> Active, se_ordered_id: %u\n",
- T_TASK(cmd_p)->t_task_cdb[0],
+ cmd_p->t_task_cdb[0],
cmd_p->sam_task_attr, cmd_p->se_ordered_id);
transport_add_tasks_from_cmd(cmd_p);
@@ -3846,20 +3536,79 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
* to do the processing of the Active tasks.
*/
if (new_active_tasks != 0)
- wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
+}
+
+static int transport_complete_qf(struct se_cmd *cmd)
+{
+ int ret = 0;
+
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+ return cmd->se_tfo->queue_status(cmd);
+
+ switch (cmd->data_direction) {
+ case DMA_FROM_DEVICE:
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ break;
+ case DMA_TO_DEVICE:
+ if (cmd->t_bidi_data_sg) {
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ if (ret < 0)
+ return ret;
+ }
+ /* Fall through for DMA_TO_DEVICE */
+ case DMA_NONE:
+ ret = cmd->se_tfo->queue_status(cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void transport_handle_queue_full(
+ struct se_cmd *cmd,
+ struct se_device *dev,
+ int (*qf_callback)(struct se_cmd *))
+{
+ spin_lock_irq(&dev->qf_cmd_lock);
+ cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL;
+ cmd->transport_qf_callback = qf_callback;
+ list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
+ atomic_inc(&dev->dev_qf_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
+
+ schedule_work(&cmd->se_dev->qf_work_queue);
}
static void transport_generic_complete_ok(struct se_cmd *cmd)
{
- int reason = 0;
+ int reason = 0, ret;
/*
* Check if we need to move delayed/dormant tasks from cmds on the
* delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
* Attribute.
*/
- if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
/*
+ * Check to schedule QUEUE_FULL work, or execute an existing
+ * cmd->transport_qf_callback()
+ */
+ if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
+ schedule_work(&cmd->se_dev->qf_work_queue);
+
+ if (cmd->transport_qf_callback) {
+ ret = cmd->transport_qf_callback(cmd);
+ if (ret < 0)
+ goto queue_full;
+
+ cmd->transport_qf_callback = NULL;
+ goto done;
+ }
+ /*
* Check if we need to retrieve a sense buffer from
* the struct se_cmd in question.
*/
@@ -3872,8 +3621,11 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
* a non GOOD status.
*/
if (cmd->scsi_status) {
- transport_send_check_condition_and_sense(
+ ret = transport_send_check_condition_and_sense(
cmd, reason, 1);
+ if (ret == -EAGAIN)
+ goto queue_full;
+
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
@@ -3889,53 +3641,57 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
spin_lock(&cmd->se_lun->lun_sep_lock);
- if (SE_LUN(cmd)->lun_sep) {
- SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+ if (cmd->se_lun->lun_sep) {
+ cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
- /*
- * If enabled by TCM fabirc module pre-registered SGL
- * memory, perform the memcpy() from the TCM internal
- * contigious buffer back to the original SGL.
- */
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
- transport_memcpy_write_contig(cmd,
- T_TASK(cmd)->t_task_pt_sgl,
- T_TASK(cmd)->t_task_buf);
- CMD_TFO(cmd)->queue_data_in(cmd);
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
break;
case DMA_TO_DEVICE:
spin_lock(&cmd->se_lun->lun_sep_lock);
- if (SE_LUN(cmd)->lun_sep) {
- SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
+ if (cmd->se_lun->lun_sep) {
+ cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
/*
* Check if we need to send READ payload for BIDI-COMMAND
*/
- if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
+ if (cmd->t_bidi_data_sg) {
spin_lock(&cmd->se_lun->lun_sep_lock);
- if (SE_LUN(cmd)->lun_sep) {
- SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+ if (cmd->se_lun->lun_sep) {
+ cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
- CMD_TFO(cmd)->queue_data_in(cmd);
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
break;
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
- CMD_TFO(cmd)->queue_status(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
break;
default:
break;
}
+done:
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
+ return;
+
+queue_full:
+ pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
+ " data_direction: %d\n", cmd, cmd->data_direction);
+ transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
}
static void transport_free_dev_tasks(struct se_cmd *cmd)
@@ -3943,9 +3699,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
struct se_task *task, *task_tmp;
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list) {
+ &cmd->t_task_list, t_list) {
if (atomic_read(&task->task_active))
continue;
@@ -3954,75 +3710,40 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
list_del(&task->t_list);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (task->se_dev)
- TRANSPORT(task->se_dev)->free_task(task);
+ task->se_dev->transport->free_task(task);
else
- printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
+ pr_err("task[%u] - task->se_dev is NULL\n",
task->task_no);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
-static inline void transport_free_pages(struct se_cmd *cmd)
+static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
{
- struct se_mem *se_mem, *se_mem_tmp;
- int free_page = 1;
-
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
- free_page = 0;
- if (cmd->se_dev->transport->do_se_mem_map)
- free_page = 0;
+ struct scatterlist *sg;
+ int count;
- if (T_TASK(cmd)->t_task_buf) {
- kfree(T_TASK(cmd)->t_task_buf);
- T_TASK(cmd)->t_task_buf = NULL;
- return;
- }
+ for_each_sg(sgl, sg, nents, count)
+ __free_page(sg_page(sg));
- /*
- * Caller will handle releasing of struct se_mem.
- */
- if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
- return;
+ kfree(sgl);
+}
- if (!(T_TASK(cmd)->t_tasks_se_num))
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
return;
- list_for_each_entry_safe(se_mem, se_mem_tmp,
- T_TASK(cmd)->t_mem_list, se_list) {
- /*
- * We only release call __free_page(struct se_mem->se_page) when
- * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
- */
- if (free_page)
- __free_page(se_mem->se_page);
+ transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
+ cmd->t_data_sg = NULL;
+ cmd->t_data_nents = 0;
- list_del(&se_mem->se_list);
- kmem_cache_free(se_mem_cache, se_mem);
- }
-
- if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
- list_for_each_entry_safe(se_mem, se_mem_tmp,
- T_TASK(cmd)->t_mem_bidi_list, se_list) {
- /*
- * We only release call __free_page(struct se_mem->se_page) when
- * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
- */
- if (free_page)
- __free_page(se_mem->se_page);
-
- list_del(&se_mem->se_list);
- kmem_cache_free(se_mem_cache, se_mem);
- }
- }
-
- kfree(T_TASK(cmd)->t_mem_bidi_list);
- T_TASK(cmd)->t_mem_bidi_list = NULL;
- kfree(T_TASK(cmd)->t_mem_list);
- T_TASK(cmd)->t_mem_list = NULL;
- T_TASK(cmd)->t_tasks_se_num = 0;
+ transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
+ cmd->t_bidi_data_sg = NULL;
+ cmd->t_bidi_data_nents = 0;
}
static inline void transport_release_tasks(struct se_cmd *cmd)
@@ -4034,23 +3755,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (atomic_read(&cmd->t_fe_count)) {
+ if (!atomic_dec_and_test(&cmd->t_fe_count)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
return 1;
}
}
- if (atomic_read(&T_TASK(cmd)->t_se_count)) {
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ if (atomic_read(&cmd->t_se_count)) {
+ if (!atomic_dec_and_test(&cmd->t_se_count)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
return 1;
}
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
@@ -4062,68 +3783,57 @@ static void transport_release_fe_cmd(struct se_cmd *cmd)
if (transport_dec_and_check(cmd))
return;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto free_pages;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_release_tasks(cmd);
free_pages:
transport_free_pages(cmd);
transport_free_se_cmd(cmd);
- CMD_TFO(cmd)->release_cmd_direct(cmd);
+ cmd->se_tfo->release_cmd(cmd);
}
-static int transport_generic_remove(
- struct se_cmd *cmd,
- int release_to_pool,
- int session_reinstatement)
+static int
+transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
{
unsigned long flags;
- if (!(T_TASK(cmd)))
- goto release_cmd;
-
if (transport_dec_and_check(cmd)) {
if (session_reinstatement) {
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
}
return 1;
}
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto free_pages;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_release_tasks(cmd);
+
free_pages:
transport_free_pages(cmd);
-
-release_cmd:
- if (release_to_pool) {
- transport_release_cmd_to_pool(cmd);
- } else {
- transport_free_se_cmd(cmd);
- CMD_TFO(cmd)->release_cmd_direct(cmd);
- }
-
+ transport_release_cmd(cmd);
return 0;
}
/*
- * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
+ * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
+ * allocating in the core.
* @cmd: Associated se_cmd descriptor
* @mem: SGL style memory for TCM WRITE / READ
* @sg_mem_num: Number of SGL elements
@@ -4135,614 +3845,163 @@ release_cmd:
*/
int transport_generic_map_mem_to_cmd(
struct se_cmd *cmd,
- struct scatterlist *mem,
- u32 sg_mem_num,
- struct scatterlist *mem_bidi_in,
- u32 sg_mem_bidi_num)
+ struct scatterlist *sgl,
+ u32 sgl_count,
+ struct scatterlist *sgl_bidi,
+ u32 sgl_bidi_count)
{
- u32 se_mem_cnt_out = 0;
- int ret;
-
- if (!(mem) || !(sg_mem_num))
+ if (!sgl || !sgl_count)
return 0;
- /*
- * Passed *mem will contain a list_head containing preformatted
- * struct se_mem elements...
- */
- if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
- if ((mem_bidi_in) || (sg_mem_bidi_num)) {
- printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
- " with BIDI-COMMAND\n");
- return -ENOSYS;
- }
- T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
- T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
- cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
- return 0;
- }
- /*
- * Otherwise, assume the caller is passing a struct scatterlist
- * array from include/linux/scatterlist.h
- */
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
- /*
- * For CDB using TCM struct se_mem linked list scatterlist memory
- * processed into a TCM struct se_subsystem_dev, we do the mapping
- * from the passed physical memory to struct se_mem->se_page here.
- */
- T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_list))
- return -ENOMEM;
- ret = transport_map_sg_to_mem(cmd,
- T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
- if (ret < 0)
- return -ENOMEM;
-
- T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
- /*
- * Setup BIDI READ list of struct se_mem elements
- */
- if ((mem_bidi_in) && (sg_mem_bidi_num)) {
- T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_bidi_list)) {
- kfree(T_TASK(cmd)->t_mem_list);
- return -ENOMEM;
- }
- se_mem_cnt_out = 0;
+ cmd->t_data_sg = sgl;
+ cmd->t_data_nents = sgl_count;
- ret = transport_map_sg_to_mem(cmd,
- T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
- &se_mem_cnt_out);
- if (ret < 0) {
- kfree(T_TASK(cmd)->t_mem_list);
- return -ENOMEM;
- }
-
- T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
+ if (sgl_bidi && sgl_bidi_count) {
+ cmd->t_bidi_data_sg = sgl_bidi;
+ cmd->t_bidi_data_nents = sgl_bidi_count;
}
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
-
- } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
- if (mem_bidi_in || sg_mem_bidi_num) {
- printk(KERN_ERR "BIDI-Commands not supported using "
- "SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
- return -ENOSYS;
- }
- /*
- * For incoming CDBs using a contiguous buffer internall with TCM,
- * save the passed struct scatterlist memory. After TCM storage object
- * processing has completed for this struct se_cmd, TCM core will call
- * transport_memcpy_[write,read]_contig() as necessary from
- * transport_generic_complete_ok() and transport_write_pending() in order
- * to copy the TCM buffer to/from the original passed *mem in SGL ->
- * struct scatterlist format.
- */
- cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
- T_TASK(cmd)->t_task_pt_sgl = mem;
}
return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
-
-static inline long long transport_dev_end_lba(struct se_device *dev)
-{
- return dev->transport->get_blocks(dev) + 1;
-}
-
-static int transport_get_sectors(struct se_cmd *cmd)
-{
- struct se_device *dev = SE_DEV(cmd);
-
- T_TASK(cmd)->t_tasks_sectors =
- (cmd->data_length / DEV_ATTRIB(dev)->block_size);
- if (!(T_TASK(cmd)->t_tasks_sectors))
- T_TASK(cmd)->t_tasks_sectors = 1;
-
- if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
- return 0;
-
- if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
- transport_dev_end_lba(dev)) {
- printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
- " transport_dev_end_lba(): %llu\n",
- T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
- transport_dev_end_lba(dev));
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
- return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
- }
-
- return 0;
-}
-
static int transport_new_cmd_obj(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- u32 task_cdbs = 0, rc;
-
- if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
- task_cdbs++;
- T_TASK(cmd)->t_task_cdbs++;
- } else {
- int set_counts = 1;
+ struct se_device *dev = cmd->se_dev;
+ u32 task_cdbs;
+ u32 rc;
+ int set_counts = 1;
- /*
- * Setup any BIDI READ tasks and memory from
- * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
- * are queued first for the non pSCSI passthrough case.
- */
- if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
- (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
- rc = transport_generic_get_cdb_count(cmd,
- T_TASK(cmd)->t_task_lba,
- T_TASK(cmd)->t_tasks_sectors,
- DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
- set_counts);
- if (!(rc)) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- }
- set_counts = 0;
- }
- /*
- * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
- * Note for BIDI transfers this will contain the WRITE payload
- */
- task_cdbs = transport_generic_get_cdb_count(cmd,
- T_TASK(cmd)->t_task_lba,
- T_TASK(cmd)->t_tasks_sectors,
- cmd->data_direction, T_TASK(cmd)->t_mem_list,
- set_counts);
- if (!(task_cdbs)) {
+ /*
+ * Setup any BIDI READ tasks and memory from
+ * cmd->t_mem_bidi_list so the READ struct se_tasks
+ * are queued first for the non pSCSI passthrough case.
+ */
+ if (cmd->t_bidi_data_sg &&
+ (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
+ rc = transport_allocate_tasks(cmd,
+ cmd->t_task_lba,
+ DMA_FROM_DEVICE,
+ cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents);
+ if (rc <= 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
- T_TASK(cmd)->t_task_cdbs += task_cdbs;
-
-#if 0
- printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
- " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
- T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
- T_TASK(cmd)->t_task_cdbs);
-#endif
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
+ set_counts = 0;
}
-
- atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
- atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
- atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
- return 0;
-}
-
-static struct list_head *transport_init_se_mem_list(void)
-{
- struct list_head *se_mem_list;
-
- se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!(se_mem_list)) {
- printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
- return NULL;
- }
- INIT_LIST_HEAD(se_mem_list);
-
- return se_mem_list;
-}
-
-static int
-transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
-{
- unsigned char *buf;
- struct se_mem *se_mem;
-
- T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_list))
- return -ENOMEM;
-
/*
- * If the device uses memory mapping this is enough.
+ * Setup the tasks and memory from cmd->t_mem_list
+ * Note for BIDI transfers this will contain the WRITE payload
*/
- if (cmd->se_dev->transport->do_se_mem_map)
- return 0;
-
- /*
- * Setup BIDI-COMMAND READ list of struct se_mem elements
- */
- if (T_TASK(cmd)->t_tasks_bidi) {
- T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_bidi_list)) {
- kfree(T_TASK(cmd)->t_mem_list);
- return -ENOMEM;
- }
+ task_cdbs = transport_allocate_tasks(cmd,
+ cmd->t_task_lba,
+ cmd->data_direction,
+ cmd->t_data_sg,
+ cmd->t_data_nents);
+ if (task_cdbs <= 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
}
- while (length) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- goto out;
- }
-
-/* #warning FIXME Allocate contigous pages for struct se_mem elements */
- se_mem->se_page = alloc_pages(GFP_KERNEL, 0);
- if (!(se_mem->se_page)) {
- printk(KERN_ERR "alloc_pages() failed\n");
- goto out;
- }
-
- buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
- if (!(buf)) {
- printk(KERN_ERR "kmap_atomic() failed\n");
- goto out;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
- se_mem->se_len = (length > dma_size) ? dma_size : length;
- memset(buf, 0, se_mem->se_len);
- kunmap_atomic(buf, KM_IRQ0);
-
- list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
- T_TASK(cmd)->t_tasks_se_num++;
-
- DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
- " Offset(%u)\n", se_mem->se_page, se_mem->se_len,
- se_mem->se_off);
-
- length -= se_mem->se_len;
+ if (set_counts) {
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
}
- DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
- T_TASK(cmd)->t_tasks_se_num);
+ cmd->t_task_list_num = task_cdbs;
+ atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
+ atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
+ atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
return 0;
-out:
- if (se_mem)
- __free_pages(se_mem->se_page, 0);
- kmem_cache_free(se_mem_cache, se_mem);
- return -1;
}
-u32 transport_calc_sg_num(
- struct se_task *task,
- struct se_mem *in_se_mem,
- u32 task_offset)
+void *transport_kmap_first_data_page(struct se_cmd *cmd)
{
- struct se_cmd *se_cmd = task->task_se_cmd;
- struct se_device *se_dev = SE_DEV(se_cmd);
- struct se_mem *se_mem = in_se_mem;
- struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
- u32 sg_length, task_size = task->task_size, task_sg_num_padded;
-
- while (task_size != 0) {
- DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
- " se_mem->se_off(%u) task_offset(%u)\n",
- se_mem->se_page, se_mem->se_len,
- se_mem->se_off, task_offset);
-
- if (task_offset == 0) {
- if (task_size >= se_mem->se_len) {
- sg_length = se_mem->se_len;
-
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list)))
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- } else {
- sg_length = task_size;
- task_size -= sg_length;
- goto next;
- }
+ struct scatterlist *sg = cmd->t_data_sg;
- DEBUG_SC("sg_length(%u) task_size(%u)\n",
- sg_length, task_size);
- } else {
- if ((se_mem->se_len - task_offset) > task_size) {
- sg_length = task_size;
- task_size -= sg_length;
- goto next;
- } else {
- sg_length = (se_mem->se_len - task_offset);
-
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list)))
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- }
-
- DEBUG_SC("sg_length(%u) task_size(%u)\n",
- sg_length, task_size);
-
- task_offset = 0;
- }
- task_size -= sg_length;
-next:
- DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
- task->task_no, task_size);
-
- task->task_sg_num++;
- }
+ BUG_ON(!sg);
/*
- * Check if the fabric module driver is requesting that all
- * struct se_task->task_sg[] be chained together.. If so,
- * then allocate an extra padding SG entry for linking and
- * marking the end of the chained SGL.
+ * We need to take into account a possible offset here for fabrics like
+ * tcm_loop who may be using a contig buffer from the SCSI midlayer for
+ * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
*/
- if (tfo->task_sg_chaining) {
- task_sg_num_padded = (task->task_sg_num + 1);
- task->task_padded_sg = 1;
- } else
- task_sg_num_padded = task->task_sg_num;
-
- task->task_sg = kzalloc(task_sg_num_padded *
- sizeof(struct scatterlist), GFP_KERNEL);
- if (!(task->task_sg)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " task->task_sg\n");
- return 0;
- }
- sg_init_table(&task->task_sg[0], task_sg_num_padded);
- /*
- * Setup task->task_sg_bidi for SCSI READ payload for
- * TCM/pSCSI passthrough if present for BIDI-COMMAND
- */
- if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
- (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
- task->task_sg_bidi = kzalloc(task_sg_num_padded *
- sizeof(struct scatterlist), GFP_KERNEL);
- if (!(task->task_sg_bidi)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " task->task_sg_bidi\n");
- return 0;
- }
- sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
- }
- /*
- * For the chaining case, setup the proper end of SGL for the
- * initial submission struct task into struct se_subsystem_api.
- * This will be cleared later by transport_do_task_sg_chain()
- */
- if (task->task_padded_sg) {
- sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
- /*
- * Added the 'if' check before marking end of bi-directional
- * scatterlist (which gets created only in case of request
- * (RD + WR).
- */
- if (task->task_sg_bidi)
- sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
- }
-
- DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
- " task_sg_num_padded(%u)\n", task->task_sg_num,
- task_sg_num_padded);
-
- return task->task_sg_num;
+ return kmap(sg_page(sg)) + sg->offset;
}
+EXPORT_SYMBOL(transport_kmap_first_data_page);
-static inline int transport_set_tasks_sectors_disk(
- struct se_task *task,
- struct se_device *dev,
- unsigned long long lba,
- u32 sectors,
- int *max_sectors_set)
+void transport_kunmap_first_data_page(struct se_cmd *cmd)
{
- if ((lba + sectors) > transport_dev_end_lba(dev)) {
- task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
-
- if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
- task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
- *max_sectors_set = 1;
- }
- } else {
- if (sectors > DEV_ATTRIB(dev)->max_sectors) {
- task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
- *max_sectors_set = 1;
- } else
- task->task_sectors = sectors;
- }
-
- return 0;
+ kunmap(sg_page(cmd->t_data_sg));
}
+EXPORT_SYMBOL(transport_kunmap_first_data_page);
-static inline int transport_set_tasks_sectors_non_disk(
- struct se_task *task,
- struct se_device *dev,
- unsigned long long lba,
- u32 sectors,
- int *max_sectors_set)
+static int
+transport_generic_get_mem(struct se_cmd *cmd)
{
- if (sectors > DEV_ATTRIB(dev)->max_sectors) {
- task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
- *max_sectors_set = 1;
- } else
- task->task_sectors = sectors;
+ u32 length = cmd->data_length;
+ unsigned int nents;
+ struct page *page;
+ int i = 0;
- return 0;
-}
+ nents = DIV_ROUND_UP(length, PAGE_SIZE);
+ cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
+ if (!cmd->t_data_sg)
+ return -ENOMEM;
-static inline int transport_set_tasks_sectors(
- struct se_task *task,
- struct se_device *dev,
- unsigned long long lba,
- u32 sectors,
- int *max_sectors_set)
-{
- return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
- transport_set_tasks_sectors_disk(task, dev, lba, sectors,
- max_sectors_set) :
- transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
- max_sectors_set);
-}
+ cmd->t_data_nents = nents;
+ sg_init_table(cmd->t_data_sg, nents);
-static int transport_map_sg_to_mem(
- struct se_cmd *cmd,
- struct list_head *se_mem_list,
- void *in_mem,
- u32 *se_mem_cnt)
-{
- struct se_mem *se_mem;
- struct scatterlist *sg;
- u32 sg_count = 1, cmd_size = cmd->data_length;
+ while (length) {
+ u32 page_len = min_t(u32, length, PAGE_SIZE);
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ goto out;
- if (!in_mem) {
- printk(KERN_ERR "No source scatterlist\n");
- return -1;
+ sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
+ length -= page_len;
+ i++;
}
- sg = (struct scatterlist *)in_mem;
-
- while (cmd_size) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- return -1;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
- DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
- " sg_page: %p offset: %d length: %d\n", cmd_size,
- sg_page(sg), sg->offset, sg->length);
-
- se_mem->se_page = sg_page(sg);
- se_mem->se_off = sg->offset;
-
- if (cmd_size > sg->length) {
- se_mem->se_len = sg->length;
- sg = sg_next(sg);
- sg_count++;
- } else
- se_mem->se_len = cmd_size;
-
- cmd_size -= se_mem->se_len;
-
- DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
- *se_mem_cnt, cmd_size);
- DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
- se_mem->se_page, se_mem->se_off, se_mem->se_len);
+ return 0;
- list_add_tail(&se_mem->se_list, se_mem_list);
- (*se_mem_cnt)++;
+out:
+ while (i >= 0) {
+ __free_page(sg_page(&cmd->t_data_sg[i]));
+ i--;
}
-
- DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
- " struct se_mem\n", sg_count, *se_mem_cnt);
-
- if (sg_count != *se_mem_cnt)
- BUG();
-
- return 0;
+ kfree(cmd->t_data_sg);
+ cmd->t_data_sg = NULL;
+ return -ENOMEM;
}
-/* transport_map_mem_to_sg():
- *
- *
- */
-int transport_map_mem_to_sg(
- struct se_task *task,
- struct list_head *se_mem_list,
- void *in_mem,
- struct se_mem *in_se_mem,
- struct se_mem **out_se_mem,
- u32 *se_mem_cnt,
- u32 *task_offset)
+/* Reduce sectors if they are too long for the device */
+static inline sector_t transport_limit_task_sectors(
+ struct se_device *dev,
+ unsigned long long lba,
+ sector_t sectors)
{
- struct se_cmd *se_cmd = task->task_se_cmd;
- struct se_mem *se_mem = in_se_mem;
- struct scatterlist *sg = (struct scatterlist *)in_mem;
- u32 task_size = task->task_size, sg_no = 0;
-
- if (!sg) {
- printk(KERN_ERR "Unable to locate valid struct"
- " scatterlist pointer\n");
- return -1;
- }
+ sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
- while (task_size != 0) {
- /*
- * Setup the contigious array of scatterlists for
- * this struct se_task.
- */
- sg_assign_page(sg, se_mem->se_page);
-
- if (*task_offset == 0) {
- sg->offset = se_mem->se_off;
+ if (dev->transport->get_device_type(dev) == TYPE_DISK)
+ if ((lba + sectors) > transport_dev_end_lba(dev))
+ sectors = ((transport_dev_end_lba(dev) - lba) + 1);
- if (task_size >= se_mem->se_len) {
- sg->length = se_mem->se_len;
-
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list))) {
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- (*se_mem_cnt)++;
- }
- } else {
- sg->length = task_size;
- /*
- * Determine if we need to calculate an offset
- * into the struct se_mem on the next go around..
- */
- task_size -= sg->length;
- if (!(task_size))
- *task_offset = sg->length;
-
- goto next;
- }
-
- } else {
- sg->offset = (*task_offset + se_mem->se_off);
-
- if ((se_mem->se_len - *task_offset) > task_size) {
- sg->length = task_size;
- /*
- * Determine if we need to calculate an offset
- * into the struct se_mem on the next go around..
- */
- task_size -= sg->length;
- if (!(task_size))
- *task_offset += sg->length;
-
- goto next;
- } else {
- sg->length = (se_mem->se_len - *task_offset);
-
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list))) {
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- (*se_mem_cnt)++;
- }
- }
-
- *task_offset = 0;
- }
- task_size -= sg->length;
-next:
- DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
- " task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
- sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
-
- sg_no++;
- if (!(task_size))
- break;
-
- sg = sg_next(sg);
-
- if (task_size > se_cmd->data_length)
- BUG();
- }
- *out_se_mem = se_mem;
-
- DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
- " SGs\n", task->task_no, *se_mem_cnt, sg_no);
-
- return 0;
+ return sectors;
}
+
/*
* This function can be used by HW target mode drivers to create a linked
* scatterlist from all contiguously allocated struct se_task->task_sg[].
@@ -4751,334 +4010,235 @@ next:
*/
void transport_do_task_sg_chain(struct se_cmd *cmd)
{
- struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
- struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
- struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
+ struct scatterlist *sg_first = NULL;
+ struct scatterlist *sg_prev = NULL;
+ int sg_prev_nents = 0;
+ struct scatterlist *sg;
struct se_task *task;
- struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
- u32 task_sg_num = 0, sg_count = 0;
+ u32 chained_nents = 0;
int i;
- if (tfo->task_sg_chaining == 0) {
- printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
- " %s\n", tfo->get_fabric_name());
- dump_stack();
- return;
- }
+ BUG_ON(!cmd->se_tfo->task_sg_chaining);
+
/*
* Walk the struct se_task list and setup scatterlist chains
- * for each contiguosly allocated struct se_task->task_sg[].
+ * for each contiguously allocated struct se_task->task_sg[].
*/
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
- if (!(task->task_sg) || !(task->task_padded_sg))
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
+ if (!task->task_sg)
continue;
- if (sg_head && sg_link) {
- sg_head_cur = &task->task_sg[0];
- sg_link_cur = &task->task_sg[task->task_sg_num];
- /*
- * Either add chain or mark end of scatterlist
- */
- if (!(list_is_last(&task->t_list,
- &T_TASK(cmd)->t_task_list))) {
- /*
- * Clear existing SGL termination bit set in
- * transport_calc_sg_num(), see sg_mark_end()
- */
- sg_end_cur = &task->task_sg[task->task_sg_num - 1];
- sg_end_cur->page_link &= ~0x02;
-
- sg_chain(sg_head, task_sg_num, sg_head_cur);
- sg_count += task->task_sg_num;
- task_sg_num = (task->task_sg_num + 1);
- } else {
- sg_chain(sg_head, task_sg_num, sg_head_cur);
- sg_count += task->task_sg_num;
- task_sg_num = task->task_sg_num;
- }
+ BUG_ON(!task->task_padded_sg);
- sg_head = sg_head_cur;
- sg_link = sg_link_cur;
- continue;
- }
- sg_head = sg_first = &task->task_sg[0];
- sg_link = &task->task_sg[task->task_sg_num];
- /*
- * Check for single task..
- */
- if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
- /*
- * Clear existing SGL termination bit set in
- * transport_calc_sg_num(), see sg_mark_end()
- */
- sg_end = &task->task_sg[task->task_sg_num - 1];
- sg_end->page_link &= ~0x02;
- sg_count += task->task_sg_num;
- task_sg_num = (task->task_sg_num + 1);
+ if (!sg_first) {
+ sg_first = task->task_sg;
+ chained_nents = task->task_sg_nents;
} else {
- sg_count += task->task_sg_num;
- task_sg_num = task->task_sg_num;
+ sg_chain(sg_prev, sg_prev_nents, task->task_sg);
+ chained_nents += task->task_sg_nents;
}
+
+ sg_prev = task->task_sg;
+ sg_prev_nents = task->task_sg_nents;
}
/*
* Setup the starting pointer and total t_tasks_sg_linked_no including
* padding SGs for linking and to mark the end.
*/
- T_TASK(cmd)->t_tasks_sg_chained = sg_first;
- T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
+ cmd->t_tasks_sg_chained = sg_first;
+ cmd->t_tasks_sg_chained_no = chained_nents;
- DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
- " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
- T_TASK(cmd)->t_tasks_sg_chained_no);
+ pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
+ " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
+ cmd->t_tasks_sg_chained_no);
- for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
- T_TASK(cmd)->t_tasks_sg_chained_no, i) {
+ for_each_sg(cmd->t_tasks_sg_chained, sg,
+ cmd->t_tasks_sg_chained_no, i) {
- DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
- i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
+ pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
+ i, sg, sg_page(sg), sg->length, sg->offset);
if (sg_is_chain(sg))
- DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
+ pr_debug("SG: %p sg_is_chain=1\n", sg);
if (sg_is_last(sg))
- DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
+ pr_debug("SG: %p sg_is_last=1\n", sg);
}
}
EXPORT_SYMBOL(transport_do_task_sg_chain);
-static int transport_do_se_mem_map(
- struct se_device *dev,
- struct se_task *task,
- struct list_head *se_mem_list,
- void *in_mem,
- struct se_mem *in_se_mem,
- struct se_mem **out_se_mem,
- u32 *se_mem_cnt,
- u32 *task_offset_in)
-{
- u32 task_offset = *task_offset_in;
- int ret = 0;
- /*
- * se_subsystem_api_t->do_se_mem_map is used when internal allocation
- * has been done by the transport plugin.
- */
- if (TRANSPORT(dev)->do_se_mem_map) {
- ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
- in_mem, in_se_mem, out_se_mem, se_mem_cnt,
- task_offset_in);
- if (ret == 0)
- T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
-
- return ret;
- }
-
- BUG_ON(list_empty(se_mem_list));
- /*
- * This is the normal path for all normal non BIDI and BIDI-COMMAND
- * WRITE payloads.. If we need to do BIDI READ passthrough for
- * TCM/pSCSI the first call to transport_do_se_mem_map ->
- * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
- * allocation for task->task_sg_bidi, and the subsequent call to
- * transport_do_se_mem_map() from transport_generic_get_cdb_count()
- */
- if (!(task->task_sg_bidi)) {
- /*
- * Assume default that transport plugin speaks preallocated
- * scatterlists.
- */
- if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
- return -1;
- /*
- * struct se_task->task_sg now contains the struct scatterlist array.
- */
- return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
- in_se_mem, out_se_mem, se_mem_cnt,
- task_offset_in);
- }
- /*
- * Handle the se_mem_list -> struct task->task_sg_bidi
- * memory map for the extra BIDI READ payload
- */
- return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
- in_se_mem, out_se_mem, se_mem_cnt,
- task_offset_in);
-}
-
-static u32 transport_generic_get_cdb_count(
+/*
+ * Break up cmd into chunks transport can handle
+ */
+static int transport_allocate_data_tasks(
struct se_cmd *cmd,
unsigned long long lba,
- u32 sectors,
enum dma_data_direction data_direction,
- struct list_head *mem_list,
- int set_counts)
+ struct scatterlist *sgl,
+ unsigned int sgl_nents)
{
unsigned char *cdb = NULL;
struct se_task *task;
- struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
- struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
- struct se_device *dev = SE_DEV(cmd);
- int max_sectors_set = 0, ret;
- u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
-
- if (!mem_list) {
- printk(KERN_ERR "mem_list is NULL in transport_generic_get"
- "_cdb_count()\n");
- return 0;
- }
- /*
- * While using RAMDISK_DR backstores is the only case where
- * mem_list will ever be empty at this point.
- */
- if (!(list_empty(mem_list)))
- se_mem = list_entry(mem_list->next, struct se_mem, se_list);
- /*
- * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
- * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
- */
- if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
- !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
- (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
- se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
- struct se_mem, se_list);
-
- while (sectors) {
- DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
- CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
- transport_dev_end_lba(dev));
+ struct se_device *dev = cmd->se_dev;
+ unsigned long flags;
+ int task_count, i, ret;
+ sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
+ u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
+ struct scatterlist *sg;
+ struct scatterlist *cmd_sg;
- task = transport_generic_get_task(cmd, data_direction);
- if (!(task))
- goto out;
+ WARN_ON(cmd->data_length % sector_size);
+ sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
+ task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
+
+ cmd_sg = sgl;
+ for (i = 0; i < task_count; i++) {
+ unsigned int task_size;
+ int count;
- transport_set_tasks_sectors(task, dev, lba, sectors,
- &max_sectors_set);
+ task = transport_generic_get_task(cmd, data_direction);
+ if (!task)
+ return -ENOMEM;
task->task_lba = lba;
- lba += task->task_sectors;
- sectors -= task->task_sectors;
- task->task_size = (task->task_sectors *
- DEV_ATTRIB(dev)->block_size);
-
- cdb = TRANSPORT(dev)->get_cdb(task);
- if ((cdb)) {
- memcpy(cdb, T_TASK(cmd)->t_task_cdb,
- scsi_command_size(T_TASK(cmd)->t_task_cdb));
- cmd->transport_split_cdb(task->task_lba,
- &task->task_sectors, cdb);
- }
+ task->task_sectors = min(sectors, dev_max_sectors);
+ task->task_size = task->task_sectors * sector_size;
- /*
- * Perform the SE OBJ plugin and/or Transport plugin specific
- * mapping for T_TASK(cmd)->t_mem_list. And setup the
- * task->task_sg and if necessary task->task_sg_bidi
- */
- ret = transport_do_se_mem_map(dev, task, mem_list,
- NULL, se_mem, &se_mem_lout, &se_mem_cnt,
- &task_offset_in);
- if (ret < 0)
- goto out;
+ cdb = dev->transport->get_cdb(task);
+ BUG_ON(!cdb);
+
+ memcpy(cdb, cmd->t_task_cdb,
+ scsi_command_size(cmd->t_task_cdb));
+
+ /* Update new cdb with updated lba/sectors */
+ cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
- se_mem = se_mem_lout;
/*
- * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
- * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
- *
- * Note that the first call to transport_do_se_mem_map() above will
- * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
- * -> transport_calc_sg_num(), and the second here will do the
- * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
+ * Check if the fabric module driver is requesting that all
+ * struct se_task->task_sg[] be chained together.. If so,
+ * then allocate an extra padding SG entry for linking and
+ * marking the end of the chained SGL.
+ * Possibly over-allocate task sgl size by using cmd sgl size.
+ * It's so much easier and only a waste when task_count > 1.
+ * That is extremely rare.
*/
- if (task->task_sg_bidi != NULL) {
- ret = transport_do_se_mem_map(dev, task,
- T_TASK(cmd)->t_mem_bidi_list, NULL,
- se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
- &task_offset_in);
- if (ret < 0)
- goto out;
+ task->task_sg_nents = sgl_nents;
+ if (cmd->se_tfo->task_sg_chaining) {
+ task->task_sg_nents++;
+ task->task_padded_sg = 1;
+ }
- se_mem_bidi = se_mem_bidi_lout;
+ task->task_sg = kmalloc(sizeof(struct scatterlist) *
+ task->task_sg_nents, GFP_KERNEL);
+ if (!task->task_sg) {
+ cmd->se_dev->transport->free_task(task);
+ return -ENOMEM;
}
- task_cdbs++;
- DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
- task_cdbs, task->task_sg_num);
+ sg_init_table(task->task_sg, task->task_sg_nents);
- if (max_sectors_set) {
- max_sectors_set = 0;
- continue;
+ task_size = task->task_size;
+
+ /* Build new sgl, only up to task_size */
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
+ if (cmd_sg->length > task_size)
+ break;
+
+ *sg = *cmd_sg;
+ task_size -= cmd_sg->length;
+ cmd_sg = sg_next(cmd_sg);
}
- if (!sectors)
- break;
- }
+ lba += task->task_sectors;
+ sectors -= task->task_sectors;
- if (set_counts) {
- atomic_inc(&T_TASK(cmd)->t_fe_count);
- atomic_inc(&T_TASK(cmd)->t_se_count);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_add_tail(&task->t_list, &cmd->t_task_list);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
+ /*
+ * Now perform the memory map of task->task_sg[] into backend
+ * subsystem memory..
+ */
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
+ if (atomic_read(&task->task_sent))
+ continue;
+ if (!dev->transport->map_data_SG)
+ continue;
- DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
- CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
- ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
+ ret = dev->transport->map_data_SG(task);
+ if (ret < 0)
+ return 0;
+ }
- return task_cdbs;
-out:
- return 0;
+ return task_count;
}
static int
-transport_map_control_cmd_to_task(struct se_cmd *cmd)
+transport_allocate_control_task(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
unsigned char *cdb;
struct se_task *task;
- int ret;
+ unsigned long flags;
+ int ret = 0;
task = transport_generic_get_task(cmd, cmd->data_direction);
if (!task)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return -ENOMEM;
- cdb = TRANSPORT(dev)->get_cdb(task);
- if (cdb)
- memcpy(cdb, cmd->t_task->t_task_cdb,
- scsi_command_size(cmd->t_task->t_task_cdb));
+ cdb = dev->transport->get_cdb(task);
+ BUG_ON(!cdb);
+ memcpy(cdb, cmd->t_task_cdb,
+ scsi_command_size(cmd->t_task_cdb));
+
+ task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
+ GFP_KERNEL);
+ if (!task->task_sg) {
+ cmd->se_dev->transport->free_task(task);
+ return -ENOMEM;
+ }
+ memcpy(task->task_sg, cmd->t_data_sg,
+ sizeof(struct scatterlist) * cmd->t_data_nents);
task->task_size = cmd->data_length;
- task->task_sg_num =
- (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
+ task->task_sg_nents = cmd->t_data_nents;
- atomic_inc(&cmd->t_task->t_fe_count);
- atomic_inc(&cmd->t_task->t_se_count);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_add_tail(&task->t_list, &cmd->t_task_list);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
- struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
- u32 se_mem_cnt = 0, task_offset = 0;
-
- if (!list_empty(T_TASK(cmd)->t_mem_list))
- se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
- struct se_mem, se_list);
-
- ret = transport_do_se_mem_map(dev, task,
- cmd->t_task->t_mem_list, NULL, se_mem,
- &se_mem_lout, &se_mem_cnt, &task_offset);
- if (ret < 0)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
-
- if (dev->transport->map_task_SG)
- return dev->transport->map_task_SG(task);
- return 0;
- } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
- if (dev->transport->map_task_non_SG)
- return dev->transport->map_task_non_SG(task);
- return 0;
+ if (dev->transport->map_control_SG)
+ ret = dev->transport->map_control_SG(task);
} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
if (dev->transport->cdb_none)
- return dev->transport->cdb_none(task);
- return 0;
+ ret = dev->transport->cdb_none(task);
} else {
+ pr_err("target: Unknown control cmd type!\n");
BUG();
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
}
+
+ /* Success! Return number of tasks allocated */
+ if (ret == 0)
+ return 1;
+ return ret;
+}
+
+static u32 transport_allocate_tasks(
+ struct se_cmd *cmd,
+ unsigned long long lba,
+ enum dma_data_direction data_direction,
+ struct scatterlist *sgl,
+ unsigned int sgl_nents)
+{
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)
+ return transport_allocate_data_tasks(cmd, lba, data_direction,
+ sgl, sgl_nents);
+ else
+ return transport_allocate_control_task(cmd);
+
}
+
/* transport_generic_new_cmd(): Called from transport_processing_thread()
*
* Allocate storage transport resources from a set of values predefined
@@ -5088,64 +4248,33 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
/*
* Generate struct se_task(s) and/or their payloads for this CDB.
*/
-static int transport_generic_new_cmd(struct se_cmd *cmd)
+int transport_generic_new_cmd(struct se_cmd *cmd)
{
- struct se_portal_group *se_tpg;
- struct se_task *task;
- struct se_device *dev = SE_DEV(cmd);
int ret = 0;
/*
* Determine is the TCM fabric module has already allocated physical
* memory, and is directly calling transport_generic_map_mem_to_cmd()
- * to setup beforehand the linked list of physical memory at
- * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
+ * beforehand.
*/
- if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
- ret = transport_allocate_resources(cmd);
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
+ cmd->data_length) {
+ ret = transport_generic_get_mem(cmd);
if (ret < 0)
return ret;
}
-
- ret = transport_get_sectors(cmd);
- if (ret < 0)
- return ret;
-
+ /*
+ * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for
+ * control or data CDB types, and perform the map to backend subsystem
+ * code from SGL memory allocated here by transport_generic_get_mem(), or
+ * via pre-existing SGL memory setup explictly by fabric module code with
+ * transport_generic_map_mem_to_cmd().
+ */
ret = transport_new_cmd_obj(cmd);
if (ret < 0)
return ret;
-
/*
- * Determine if the calling TCM fabric module is talking to
- * Linux/NET via kernel sockets and needs to allocate a
- * struct iovec array to complete the struct se_cmd
- */
- se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
- if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
- ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
- if (ret < 0)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
- }
-
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
- if (atomic_read(&task->task_sent))
- continue;
- if (!dev->transport->map_task_SG)
- continue;
-
- ret = dev->transport->map_task_SG(task);
- if (ret < 0)
- return ret;
- }
- } else {
- ret = transport_map_control_cmd_to_task(cmd);
- if (ret < 0)
- return ret;
- }
-
- /*
- * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
+ * For WRITEs, let the fabric know its buffer is ready..
* This WRITE struct se_cmd (and all of its associated struct se_task's)
* will be added to the struct se_device execution queue after its WRITE
* data has arrived. (ie: It gets handled by the transport processing
@@ -5162,6 +4291,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
transport_execute_tasks(cmd);
return 0;
}
+EXPORT_SYMBOL(transport_generic_new_cmd);
/* transport_generic_process_write():
*
@@ -5169,68 +4299,15 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
*/
void transport_generic_process_write(struct se_cmd *cmd)
{
-#if 0
- /*
- * Copy SCSI Presented DTL sector(s) from received buffers allocated to
- * original EDTL
- */
- if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
- if (!T_TASK(cmd)->t_tasks_se_num) {
- unsigned char *dst, *buf =
- (unsigned char *)T_TASK(cmd)->t_task_buf;
-
- dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
- if (!(dst)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " WRITE underflow\n");
- transport_generic_request_failure(cmd, NULL,
- PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
- return;
- }
- memcpy(dst, buf, cmd->cmd_spdtl);
-
- kfree(T_TASK(cmd)->t_task_buf);
- T_TASK(cmd)->t_task_buf = dst;
- } else {
- struct scatterlist *sg =
- (struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
- struct scatterlist *orig_sg;
-
- orig_sg = kzalloc(sizeof(struct scatterlist) *
- T_TASK(cmd)->t_tasks_se_num,
- GFP_KERNEL))) {
- if (!(orig_sg)) {
- printk(KERN_ERR "Unable to allocate memory"
- " for WRITE underflow\n");
- transport_generic_request_failure(cmd, NULL,
- PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
- return;
- }
-
- memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
- sizeof(struct scatterlist) *
- T_TASK(cmd)->t_tasks_se_num);
-
- cmd->data_length = cmd->cmd_spdtl;
- /*
- * FIXME, clear out original struct se_task and state
- * information.
- */
- if (transport_generic_new_cmd(cmd) < 0) {
- transport_generic_request_failure(cmd, NULL,
- PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
- kfree(orig_sg);
- return;
- }
-
- transport_memcpy_write_sg(cmd, orig_sg);
- }
- }
-#endif
transport_execute_tasks(cmd);
}
EXPORT_SYMBOL(transport_generic_process_write);
+static int transport_write_pending_qf(struct se_cmd *cmd)
+{
+ return cmd->se_tfo->write_pending(cmd);
+}
+
/* transport_generic_write_pending():
*
*
@@ -5240,24 +4317,26 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
unsigned long flags;
int ret;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = TRANSPORT_WRITE_PENDING;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- /*
- * For the TCM control CDBs using a contiguous buffer, do the memcpy
- * from the passed Linux/SCSI struct scatterlist located at
- * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
- * T_TASK(se_cmd)->t_task_buf.
- */
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
- transport_memcpy_read_contig(cmd,
- T_TASK(cmd)->t_task_buf,
- T_TASK(cmd)->t_task_pt_sgl);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (cmd->transport_qf_callback) {
+ ret = cmd->transport_qf_callback(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
+ else if (ret < 0)
+ return ret;
+
+ cmd->transport_qf_callback = NULL;
+ return 0;
+ }
+
/*
* Clear the se_cmd for WRITE_PENDING status in order to set
- * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
+ * cmd->t_transport_active=0 so that transport_generic_handle_data
* can be called from HW target mode interrupt code. This is safe
- * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
+ * to be called with transport_off=1 before the cmd->se_tfo->write_pending
* because the se_cmd->se_lun pointer is not being cleared.
*/
transport_cmd_check_stop(cmd, 1, 0);
@@ -5266,26 +4345,30 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
* Call the fabric write_pending function here to let the
* frontend know that WRITE buffers are ready.
*/
- ret = CMD_TFO(cmd)->write_pending(cmd);
- if (ret < 0)
+ ret = cmd->se_tfo->write_pending(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
+ else if (ret < 0)
return ret;
return PYX_TRANSPORT_WRITE_PENDING;
+
+queue_full:
+ pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
+ transport_handle_queue_full(cmd, cmd->se_dev,
+ transport_write_pending_qf);
+ return ret;
}
-/* transport_release_cmd_to_pool():
- *
- *
- */
-void transport_release_cmd_to_pool(struct se_cmd *cmd)
+void transport_release_cmd(struct se_cmd *cmd)
{
- BUG_ON(!T_TASK(cmd));
- BUG_ON(!CMD_TFO(cmd));
+ BUG_ON(!cmd->se_tfo);
transport_free_se_cmd(cmd);
- CMD_TFO(cmd)->release_cmd_to_pool(cmd);
+ cmd->se_tfo->release_cmd(cmd);
}
-EXPORT_SYMBOL(transport_release_cmd_to_pool);
+EXPORT_SYMBOL(transport_release_cmd);
/* transport_generic_free_cmd():
*
@@ -5294,19 +4377,18 @@ EXPORT_SYMBOL(transport_release_cmd_to_pool);
void transport_generic_free_cmd(
struct se_cmd *cmd,
int wait_for_tasks,
- int release_to_pool,
int session_reinstatement)
{
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
- transport_release_cmd_to_pool(cmd);
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
+ transport_release_cmd(cmd);
else {
core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
- if (SE_LUN(cmd)) {
+ if (cmd->se_lun) {
#if 0
- printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
- " SE_LUN(cmd)\n", cmd,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("cmd: %p ITT: 0x%08x contains"
+ " cmd->se_lun\n", cmd,
+ cmd->se_tfo->get_task_tag(cmd));
#endif
transport_lun_remove_cmd(cmd);
}
@@ -5316,8 +4398,7 @@ void transport_generic_free_cmd(
transport_free_dev_tasks(cmd);
- transport_generic_remove(cmd, release_to_pool,
- session_reinstatement);
+ transport_generic_remove(cmd, session_reinstatement);
}
}
EXPORT_SYMBOL(transport_generic_free_cmd);
@@ -5343,43 +4424,36 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
* If the frontend has already requested this struct se_cmd to
* be stopped, we can safely ignore this struct se_cmd.
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
- atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
- DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
- " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (atomic_read(&cmd->t_transport_stop)) {
+ atomic_set(&cmd->transport_lun_stop, 0);
+ pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
+ " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_cmd_check_stop(cmd, 1, 0);
- return -1;
+ return -EPERM;
}
- atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&cmd->transport_lun_fe_stop, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
ret = transport_stop_tasks_for_cmd(cmd);
- DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
- " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
+ pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
+ " %d\n", cmd, cmd->t_task_list_num, ret);
if (!ret) {
- DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
- wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
- DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
+ cmd->se_tfo->get_task_tag(cmd));
+ wait_for_completion(&cmd->transport_lun_stop_comp);
+ pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
+ cmd->se_tfo->get_task_tag(cmd));
}
- transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+ transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
return 0;
}
-/* #define DEBUG_CLEAR_LUN */
-#ifdef DEBUG_CLEAR_LUN
-#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CLEAR_L(x...)
-#endif
-
static void __transport_clear_lun_from_sessions(struct se_lun *lun)
{
struct se_cmd *cmd = NULL;
@@ -5389,66 +4463,59 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
* Initiator Port.
*/
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- while (!list_empty_careful(&lun->lun_cmd_list)) {
- cmd = list_entry(lun->lun_cmd_list.next,
- struct se_cmd, se_lun_list);
- list_del(&cmd->se_lun_list);
-
- if (!(T_TASK(cmd))) {
- printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
- "[i,t]_state: %u/%u\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
- BUG();
- }
- atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+ while (!list_empty(&lun->lun_cmd_list)) {
+ cmd = list_first_entry(&lun->lun_cmd_list,
+ struct se_cmd, se_lun_node);
+ list_del(&cmd->se_lun_node);
+
+ atomic_set(&cmd->transport_lun_active, 0);
/*
* This will notify iscsi_target_transport.c:
* transport_cmd_check_stop() that a LUN shutdown is in
* progress for the iscsi_cmd_t.
*/
- spin_lock(&T_TASK(cmd)->t_state_lock);
- DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
+ spin_lock(&cmd->t_state_lock);
+ pr_debug("SE_LUN[%d] - Setting cmd->transport"
"_lun_stop for ITT: 0x%08x\n",
- SE_LUN(cmd)->unpacked_lun,
- CMD_TFO(cmd)->get_task_tag(cmd));
- atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
- spin_unlock(&T_TASK(cmd)->t_state_lock);
+ cmd->se_lun->unpacked_lun,
+ cmd->se_tfo->get_task_tag(cmd));
+ atomic_set(&cmd->transport_lun_stop, 1);
+ spin_unlock(&cmd->t_state_lock);
spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
- if (!(SE_LUN(cmd))) {
- printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+ if (!cmd->se_lun) {
+ pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
BUG();
}
/*
* If the Storage engine still owns the iscsi_cmd_t, determine
* and/or stop its context.
*/
- DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
- "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
+ "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
+ cmd->se_tfo->get_task_tag(cmd));
- if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
+ if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
continue;
}
- DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
+ pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
"_wait_for_tasks(): SUCCESS\n",
- SE_LUN(cmd)->unpacked_lun,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_lun->unpacked_lun,
+ cmd->se_tfo->get_task_tag(cmd));
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
goto check_cond;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
transport_free_dev_tasks(cmd);
/*
@@ -5465,24 +4532,24 @@ check_cond:
* be released, notify the waiting thread now that LU has
* finished accessing it.
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
- if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
- DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
+ spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
+ if (atomic_read(&cmd->transport_lun_fe_stop)) {
+ pr_debug("SE_LUN[%d] - Detected FE stop for"
" struct se_cmd: %p ITT: 0x%08x\n",
lun->unpacked_lun,
- cmd, CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd, cmd->se_tfo->get_task_tag(cmd));
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
cmd_flags);
transport_cmd_check_stop(cmd, 1, 0);
- complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+ complete(&cmd->transport_lun_fe_stop_comp);
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
continue;
}
- DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
- lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
+ lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
}
spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
@@ -5502,11 +4569,11 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
{
struct task_struct *kt;
- kt = kthread_run(transport_clear_lun_thread, (void *)lun,
+ kt = kthread_run(transport_clear_lun_thread, lun,
"tcm_cl_%u", lun->unpacked_lun);
if (IS_ERR(kt)) {
- printk(KERN_ERR "Unable to start clear_lun thread\n");
- return -1;
+ pr_err("Unable to start clear_lun thread\n");
+ return PTR_ERR(kt);
}
wait_for_completion(&lun->lun_shutdown_comp);
@@ -5528,20 +4595,20 @@ static void transport_generic_wait_for_tasks(
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
return;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
/*
* If we are already stopped due to an external event (ie: LUN shutdown)
* sleep until the connection can have the passed struct se_cmd back.
- * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
+ * The cmd->transport_lun_stopped_sem will be upped by
* transport_clear_lun_from_sessions() once the ConfigFS context caller
* has completed its operation on the struct se_cmd.
*/
- if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+ if (atomic_read(&cmd->transport_lun_stop)) {
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
- " wait_for_completion(&T_TASK(cmd)transport_lun_fe"
+ pr_debug("wait_for_tasks: Stopping"
+ " wait_for_completion(&cmd->t_tasktransport_lun_fe"
"_stop_comp); for ITT: 0x%08x\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
/*
* There is a special case for WRITES where a FE exception +
* LUN shutdown means ConfigFS context is still sleeping on
@@ -5549,10 +4616,10 @@ static void transport_generic_wait_for_tasks(
* We go ahead and up transport_lun_stop_comp just to be sure
* here.
*/
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- complete(&T_TASK(cmd)->transport_lun_stop_comp);
- wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ complete(&cmd->transport_lun_stop_comp);
+ wait_for_completion(&cmd->transport_lun_fe_stop_comp);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
transport_all_task_dev_remove_state(cmd);
/*
@@ -5560,44 +4627,44 @@ static void transport_generic_wait_for_tasks(
* struct se_cmd, now owns the structure and can be released through
* normal means below.
*/
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
- " wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
+ pr_debug("wait_for_tasks: Stopped"
+ " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
"stop_comp); for ITT: 0x%08x\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
- atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+ atomic_set(&cmd->transport_lun_stop, 0);
}
- if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
- atomic_read(&T_TASK(cmd)->t_transport_aborted))
+ if (!atomic_read(&cmd->t_transport_active) ||
+ atomic_read(&cmd->t_transport_aborted))
goto remove;
- atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
+ atomic_set(&cmd->t_transport_stop, 1);
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
+ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
- " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+ " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
cmd->deferred_t_state);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
- wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
+ wait_for_completion(&cmd->t_transport_stop_comp);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_set(&T_TASK(cmd)->t_transport_active, 0);
- atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_active, 0);
+ atomic_set(&cmd->t_transport_stop, 0);
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
- "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("wait_for_tasks: Stopped wait_for_compltion("
+ "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
+ cmd->se_tfo->get_task_tag(cmd));
remove:
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (!remove_cmd)
return;
- transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
+ transport_generic_free_cmd(cmd, 0, session_reinstatement);
}
static int transport_get_sense_codes(
@@ -5632,13 +4699,13 @@ int transport_send_check_condition_and_sense(
int offset;
u8 asc = 0, ascq = 0;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (!reason && from_transport)
goto after_reason;
@@ -5651,7 +4718,7 @@ int transport_send_check_condition_and_sense(
* TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
* from include/scsi/scsi_cmnd.h
*/
- offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+ offset = cmd->se_tfo->set_fabric_sense_len(cmd,
TRANSPORT_SENSE_BUFFER);
/*
* Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
@@ -5788,8 +4855,7 @@ int transport_send_check_condition_and_sense(
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
after_reason:
- CMD_TFO(cmd)->queue_status(cmd);
- return 0;
+ return cmd->se_tfo->queue_status(cmd);
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
@@ -5797,18 +4863,18 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
int ret = 0;
- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
- if (!(send_status) ||
+ if (atomic_read(&cmd->t_transport_aborted) != 0) {
+ if (!send_status ||
(cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
return 1;
#if 0
- printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
" status for CDB: 0x%02x ITT: 0x%08x\n",
- T_TASK(cmd)->t_task_cdb[0],
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->t_task_cdb[0],
+ cmd->se_tfo->get_task_tag(cmd));
#endif
cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
- CMD_TFO(cmd)->queue_status(cmd);
+ cmd->se_tfo->queue_status(cmd);
ret = 1;
}
return ret;
@@ -5824,8 +4890,8 @@ void transport_send_task_abort(struct se_cmd *cmd)
* queued back to fabric module by transport_check_aborted_status().
*/
if (cmd->data_direction == DMA_TO_DEVICE) {
- if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
+ if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+ atomic_inc(&cmd->t_transport_aborted);
smp_mb__after_atomic_inc();
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
transport_new_cmd_failure(cmd);
@@ -5834,11 +4900,11 @@ void transport_send_task_abort(struct se_cmd *cmd)
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
#if 0
- printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
- " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+ " ITT: 0x%08x\n", cmd->t_task_cdb[0],
+ cmd->se_tfo->get_task_tag(cmd));
#endif
- CMD_TFO(cmd)->queue_status(cmd);
+ cmd->se_tfo->queue_status(cmd);
}
/* transport_generic_do_tmr():
@@ -5847,14 +4913,12 @@ void transport_send_task_abort(struct se_cmd *cmd)
*/
int transport_generic_do_tmr(struct se_cmd *cmd)
{
- struct se_cmd *ref_cmd;
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
int ret;
switch (tmr->function) {
case TMR_ABORT_TASK:
- ref_cmd = tmr->ref_cmd;
tmr->response = TMR_FUNCTION_REJECTED;
break;
case TMR_ABORT_TASK_SET:
@@ -5874,14 +4938,14 @@ int transport_generic_do_tmr(struct se_cmd *cmd)
tmr->response = TMR_FUNCTION_REJECTED;
break;
default:
- printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
+ pr_err("Uknown TMR function: 0x%02x.\n",
tmr->function);
tmr->response = TMR_FUNCTION_REJECTED;
break;
}
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
- CMD_TFO(cmd)->queue_tm_rsp(cmd);
+ cmd->se_tfo->queue_tm_rsp(cmd);
transport_cmd_check_stop(cmd, 2, 0);
return 0;
@@ -5911,62 +4975,54 @@ transport_get_task_from_state_list(struct se_device *dev)
static void transport_processing_shutdown(struct se_device *dev)
{
struct se_cmd *cmd;
- struct se_queue_req *qr;
struct se_task *task;
- u8 state;
unsigned long flags;
/*
* Empty the struct se_device's struct se_task state list.
*/
spin_lock_irqsave(&dev->execute_task_lock, flags);
while ((task = transport_get_task_from_state_list(dev))) {
- if (!(TASK_CMD(task))) {
- printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+ if (!task->task_se_cmd) {
+ pr_err("task->task_se_cmd is NULL!\n");
continue;
}
- cmd = TASK_CMD(task);
+ cmd = task->task_se_cmd;
- if (!T_TASK(cmd)) {
- printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
- " %p ITT: 0x%08x\n", task, cmd,
- CMD_TFO(cmd)->get_task_tag(cmd));
- continue;
- }
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
- DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
- " i_state/def_i_state: %d/%d, t_state/def_t_state:"
+ pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
+ " i_state: %d, t_state/def_t_state:"
" %d/%d cdb: 0x%02x\n", cmd, task,
- CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd),
cmd->t_state, cmd->deferred_t_state,
- T_TASK(cmd)->t_task_cdb[0]);
- DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
+ cmd->t_task_cdb[0]);
+ pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
" %d t_task_cdbs_sent: %d -- t_transport_active: %d"
" t_transport_stop: %d t_transport_sent: %d\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- T_TASK(cmd)->t_task_cdbs,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
- atomic_read(&T_TASK(cmd)->t_transport_active),
- atomic_read(&T_TASK(cmd)->t_transport_stop),
- atomic_read(&T_TASK(cmd)->t_transport_sent));
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+ atomic_read(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_transport_active),
+ atomic_read(&cmd->t_transport_stop),
+ atomic_read(&cmd->t_transport_sent));
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- DEBUG_DO("Waiting for task: %p to shutdown for dev:"
+ pr_debug("Waiting for task: %p to shutdown for dev:"
" %p\n", task, dev);
wait_for_completion(&task->task_stop_comp);
- DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
+ pr_debug("Completed task: %p shutdown for dev: %p\n",
task, dev);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
@@ -5976,72 +5032,72 @@ static void transport_processing_shutdown(struct se_device *dev)
}
__transport_stop_task_timer(task, &flags);
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- DEBUG_DO("Skipping task: %p, dev: %p for"
+ pr_debug("Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+ atomic_read(&cmd->t_task_cdbs_ex_left));
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
- DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
+ if (atomic_read(&cmd->t_transport_active)) {
+ pr_debug("got t_transport_active = 1 for task: %p, dev:"
" %p\n", task, dev);
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ if (atomic_read(&cmd->t_fe_count)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_send_check_condition_and_sense(
cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
0);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop(cmd, 1, 0);
} else {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
+ pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
task, dev);
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ if (atomic_read(&cmd->t_fe_count)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_send_check_condition_and_sense(cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop(cmd, 1, 0);
} else {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
spin_lock_irqsave(&dev->execute_task_lock, flags);
@@ -6050,18 +5106,12 @@ static void transport_processing_shutdown(struct se_device *dev)
/*
* Empty the struct se_device's struct se_cmd list.
*/
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
- while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
- spin_unlock_irqrestore(
- &dev->dev_queue_obj->cmd_queue_lock, flags);
- cmd = (struct se_cmd *)qr->cmd;
- state = qr->state;
- kfree(qr);
-
- DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
- cmd, state);
-
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
+
+ pr_debug("From Device Queue: cmd: %p t_state: %d\n",
+ cmd, cmd->t_state);
+
+ if (atomic_read(&cmd->t_fe_count)) {
transport_send_check_condition_and_sense(cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@ -6070,11 +5120,9 @@ static void transport_processing_shutdown(struct se_device *dev)
} else {
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
}
- spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
}
/* transport_processing_thread():
@@ -6083,16 +5131,15 @@ static void transport_processing_shutdown(struct se_device *dev)
*/
static int transport_processing_thread(void *param)
{
- int ret, t_state;
+ int ret;
struct se_cmd *cmd;
struct se_device *dev = (struct se_device *) param;
- struct se_queue_req *qr;
set_user_nice(current, -20);
while (!kthread_should_stop()) {
- ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
- atomic_read(&dev->dev_queue_obj->queue_cnt) ||
+ ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
+ atomic_read(&dev->dev_queue_obj.queue_cnt) ||
kthread_should_stop());
if (ret < 0)
goto out;
@@ -6108,22 +5155,18 @@ static int transport_processing_thread(void *param)
get_cmd:
__transport_execute_tasks(dev);
- qr = transport_get_qr_from_queue(dev->dev_queue_obj);
- if (!(qr))
+ cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
+ if (!cmd)
continue;
- cmd = (struct se_cmd *)qr->cmd;
- t_state = qr->state;
- kfree(qr);
-
- switch (t_state) {
+ switch (cmd->t_state) {
case TRANSPORT_NEW_CMD_MAP:
- if (!(CMD_TFO(cmd)->new_cmd_map)) {
- printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
+ if (!cmd->se_tfo->new_cmd_map) {
+ pr_err("cmd->se_tfo->new_cmd_map is"
" NULL for TRANSPORT_NEW_CMD_MAP\n");
BUG();
}
- ret = CMD_TFO(cmd)->new_cmd_map(cmd);
+ ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
cmd->transport_error_status = ret;
transport_generic_request_failure(cmd, NULL,
@@ -6134,7 +5177,9 @@ get_cmd:
/* Fall through */
case TRANSPORT_NEW_CMD:
ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
+ if (ret == -EAGAIN)
+ break;
+ else if (ret < 0) {
cmd->transport_error_status = ret;
transport_generic_request_failure(cmd, NULL,
0, (cmd->data_direction !=
@@ -6149,10 +5194,10 @@ get_cmd:
transport_generic_complete_ok(cmd);
break;
case TRANSPORT_REMOVE:
- transport_generic_remove(cmd, 1, 0);
+ transport_generic_remove(cmd, 0);
break;
case TRANSPORT_FREE_CMD_INTR:
- transport_generic_free_cmd(cmd, 0, 1, 0);
+ transport_generic_free_cmd(cmd, 0, 0);
break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
@@ -6164,13 +5209,16 @@ get_cmd:
transport_stop_all_task_timers(cmd);
transport_generic_request_timeout(cmd);
break;
+ case TRANSPORT_COMPLETE_QF_WP:
+ transport_generic_write_pending(cmd);
+ break;
default:
- printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
+ pr_err("Unknown t_state: %d deferred_t_state:"
" %d for ITT: 0x%08x i_state: %d on SE LUN:"
- " %u\n", t_state, cmd->deferred_t_state,
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd),
- SE_LUN(cmd)->unpacked_lun);
+ " %u\n", cmd->t_state, cmd->deferred_t_state,
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd),
+ cmd->se_lun->unpacked_lun);
BUG();
}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index df355176a37..31e3c652527 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -49,15 +49,15 @@ int core_scsi3_ua_check(
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
- if (!(sess))
+ if (!sess)
return 0;
nacl = sess->se_node_acl;
- if (!(nacl))
+ if (!nacl)
return 0;
deve = &nacl->device_list[cmd->orig_fe_lun];
- if (!(atomic_read(&deve->ua_count)))
+ if (!atomic_read(&deve->ua_count))
return 0;
/*
* From sam4r14, section 5.14 Unit attention condition:
@@ -80,10 +80,10 @@ int core_scsi3_ua_check(
case REQUEST_SENSE:
return 0;
default:
- return -1;
+ return -EINVAL;
}
- return -1;
+ return -EINVAL;
}
int core_scsi3_ua_allocate(
@@ -97,13 +97,13 @@ int core_scsi3_ua_allocate(
/*
* PASSTHROUGH OPS
*/
- if (!(nacl))
- return -1;
+ if (!nacl)
+ return -EINVAL;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
- if (!(ua)) {
- printk(KERN_ERR "Unable to allocate struct se_ua\n");
- return -1;
+ if (!ua) {
+ pr_err("Unable to allocate struct se_ua\n");
+ return -ENOMEM;
}
INIT_LIST_HEAD(&ua->ua_dev_list);
INIT_LIST_HEAD(&ua->ua_nacl_list);
@@ -177,9 +177,9 @@ int core_scsi3_ua_allocate(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
- printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
+ pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
" 0x%02x, ASCQ: 0x%02x\n",
- TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
+ nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
asc, ascq);
atomic_inc(&deve->ua_count);
@@ -208,23 +208,23 @@ void core_scsi3_ua_for_check_condition(
u8 *asc,
u8 *ascq)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
- if (!(sess))
+ if (!sess)
return;
nacl = sess->se_node_acl;
- if (!(nacl))
+ if (!nacl)
return;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
- if (!(atomic_read(&deve->ua_count))) {
+ if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
return;
}
@@ -240,7 +240,7 @@ void core_scsi3_ua_for_check_condition(
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
- if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
+ if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
@@ -264,13 +264,13 @@ void core_scsi3_ua_for_check_condition(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
- printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
+ pr_debug("[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
- TPG_TFO(nacl->se_tpg)->get_fabric_name(),
- (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
- "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
- cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
+ nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+ "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
+ cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
}
int core_scsi3_ua_clear_for_request_sense(
@@ -284,18 +284,18 @@ int core_scsi3_ua_clear_for_request_sense(
struct se_ua *ua = NULL, *ua_p;
int head = 1;
- if (!(sess))
- return -1;
+ if (!sess)
+ return -EINVAL;
nacl = sess->se_node_acl;
- if (!(nacl))
- return -1;
+ if (!nacl)
+ return -EINVAL;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
- if (!(atomic_read(&deve->ua_count))) {
+ if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
- return -1;
+ return -EPERM;
}
/*
* The highest priority Unit Attentions are placed at the head of the
@@ -323,10 +323,10 @@ int core_scsi3_ua_clear_for_request_sense(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
- printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
+ pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
- " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+ " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
cmd->orig_fe_lun, *asc, *ascq);
- return (head) ? -1 : 0;
+ return (head) ? -EPERM : 0;
}
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
index 7a5c2b64cf6..20b14bb087c 100644
--- a/drivers/target/tcm_fc/Makefile
+++ b/drivers/target/tcm_fc/Makefile
@@ -1,15 +1,6 @@
-EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \
- -I$(srctree)/drivers/scsi/ \
- -I$(srctree)/include/scsi/ \
- -I$(srctree)/drivers/target/tcm_fc/
-
-tcm_fc-y += tfc_cmd.o \
- tfc_conf.o \
- tfc_io.o \
- tfc_sess.o
+tcm_fc-y += tfc_cmd.o \
+ tfc_conf.o \
+ tfc_io.o \
+ tfc_sess.o
obj-$(CONFIG_TCM_FC) += tcm_fc.o
-
-ifdef CONFIGFS_TCM_FC_DEBUG
-EXTRA_CFLAGS += -DTCM_FC_DEBUG
-endif
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index defff32b788..bd4fe21a23b 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -23,30 +23,6 @@
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
-/*
- * Debug options.
- */
-#define FT_DEBUG_CONF 0x01 /* configuration messages */
-#define FT_DEBUG_SESS 0x02 /* session messages */
-#define FT_DEBUG_TM 0x04 /* TM operations */
-#define FT_DEBUG_IO 0x08 /* I/O commands */
-#define FT_DEBUG_DATA 0x10 /* Data transfer */
-
-extern unsigned int ft_debug_logging; /* debug options */
-
-#define FT_DEBUG(mask, fmt, args...) \
- do { \
- if (ft_debug_logging & (mask)) \
- printk(KERN_INFO "tcm_fc: %s: " fmt, \
- __func__, ##args); \
- } while (0)
-
-#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
-#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
-#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
-#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
-#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
-
struct ft_transport_id {
__u8 format;
__u8 __resvd1[7];
@@ -144,7 +120,7 @@ enum ft_cmd_state {
*/
struct ft_cmd {
enum ft_cmd_state state;
- u16 lun; /* LUN from request */
+ u32 lun; /* LUN from request */
struct ft_sess *sess; /* session held for cmd */
struct fc_seq *seq; /* sequence in exchange mgr */
struct se_cmd se_cmd; /* Local TCM I/O descriptor */
@@ -195,7 +171,6 @@ int ft_write_pending(struct se_cmd *);
int ft_write_pending_status(struct se_cmd *);
u32 ft_get_task_tag(struct se_cmd *);
int ft_get_cmd_state(struct se_cmd *);
-void ft_new_cmd_failure(struct se_cmd *);
int ft_queue_tm_resp(struct se_cmd *);
int ft_is_state_remove(struct se_cmd *);
@@ -212,4 +187,9 @@ void ft_dump_cmd(struct ft_cmd *, const char *caller);
ssize_t ft_format_wwn(char *, size_t, u64);
+/*
+ * Underlying HW specific helper function
+ */
+void ft_invl_hw_context(struct ft_cmd *);
+
#endif /* __TCM_FC_H__ */
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index c056a1132ae..5654dc22f7a 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -45,7 +45,6 @@
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_base.h>
#include <target/target_core_tmr.h>
#include <target/configfs_macros.h>
@@ -59,33 +58,30 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
struct fc_exch *ep;
struct fc_seq *sp;
struct se_cmd *se_cmd;
- struct se_mem *mem;
- struct se_transport_task *task;
-
- if (!(ft_debug_logging & FT_DEBUG_IO))
- return;
+ struct scatterlist *sg;
+ int count;
se_cmd = &cmd->se_cmd;
- printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
+ pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
- printk(KERN_INFO "%s: cmd %p cdb %p\n",
+ pr_debug("%s: cmd %p cdb %p\n",
caller, cmd, cmd->cdb);
- printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
-
- task = T_TASK(se_cmd);
- printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
- caller, cmd, task, task->t_tasks_se_num,
- task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
- if (task->t_mem_list)
- list_for_each_entry(mem, task->t_mem_list, se_list)
- printk(KERN_INFO "%s: cmd %p mem %p page %p "
- "len 0x%x off 0x%x\n",
- caller, cmd, mem,
- mem->se_page, mem->se_len, mem->se_off);
+ pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
+
+ pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
+ caller, cmd, se_cmd->t_data_nents,
+ se_cmd->data_length, se_cmd->se_cmd_flags);
+
+ for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
+ pr_debug("%s: cmd %p sg %p page %p "
+ "len 0x%x off 0x%x\n",
+ caller, cmd, sg,
+ sg_page(sg), sg->length, sg->offset);
+
sp = cmd->seq;
if (sp) {
ep = fc_seq_exch(sp);
- printk(KERN_INFO "%s: cmd %p sid %x did %x "
+ pr_debug("%s: cmd %p sid %x did %x "
"ox_id %x rx_id %x seq_id %x e_stat %x\n",
caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
sp->id, ep->esb_stat);
@@ -94,40 +90,19 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
}
-/*
- * Get LUN from CDB.
- */
-static int ft_get_lun_for_cmd(struct ft_cmd *cmd, u8 *lunp)
-{
- u64 lun;
-
- lun = lunp[1];
- switch (lunp[0] >> 6) {
- case 0:
- break;
- case 1:
- lun |= (lunp[0] & 0x3f) << 8;
- break;
- default:
- return -1;
- }
- if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
- return -1;
- cmd->lun = lun;
- return transport_get_lun_for_cmd(&cmd->se_cmd, NULL, lun);
-}
-
static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
{
- struct se_queue_obj *qobj;
+ struct ft_tpg *tpg = sess->tport->tpg;
+ struct se_queue_obj *qobj = &tpg->qobj;
unsigned long flags;
qobj = &sess->tport->tpg->qobj;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
atomic_inc(&qobj->queue_cnt);
- wake_up_interruptible(&qobj->thread_wq);
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ wake_up_process(tpg->thread);
}
static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
@@ -172,7 +147,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
void ft_check_stop_free(struct se_cmd *se_cmd)
{
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
}
/*
@@ -279,18 +254,18 @@ int ft_write_pending(struct se_cmd *se_cmd)
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
/*
- * Map se_mem list to scatterlist, so that
- * DDP can be setup. DDP setup function require
- * scatterlist. se_mem_list is internal to
- * TCM/LIO target
+ * cmd may have been broken up into multiple
+ * tasks. Link their sgs together so we can
+ * operate on them all at once.
*/
transport_do_task_sg_chain(se_cmd);
- cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
+ cmd->sg = se_cmd->t_tasks_sg_chained;
cmd->sg_cnt =
- T_TASK(se_cmd)->t_tasks_sg_chained_no;
+ se_cmd->t_tasks_sg_chained_no;
}
- if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
- cmd->sg, cmd->sg_cnt))
+ if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
+ cmd->sg,
+ cmd->sg_cnt))
cmd->was_ddp_setup = 1;
}
}
@@ -317,12 +292,6 @@ int ft_is_state_remove(struct se_cmd *se_cmd)
return 0; /* XXX TBD */
}
-void ft_new_cmd_failure(struct se_cmd *se_cmd)
-{
- /* XXX TBD */
- printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd);
-}
-
/*
* FC sequence response handler for follow-on sequences (data) and aborts.
*/
@@ -335,7 +304,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
/* XXX need to find cmd if queued */
cmd->se_cmd.t_state = TRANSPORT_REMOVE;
cmd->seq = NULL;
- transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
return;
}
@@ -349,10 +318,11 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
case FC_RCTL_DD_SOL_CTL: /* transfer ready */
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
default:
- printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
+ pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
+ ft_invl_hw_context(cmd);
fc_frame_free(fp);
- transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
break;
}
}
@@ -374,7 +344,7 @@ static void ft_send_resp_status(struct fc_lport *lport,
struct fcp_resp_rsp_info *info;
fh = fc_frame_header_get(rx_fp);
- FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
+ pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
len = sizeof(*fcp);
if (status == SAM_STAT_GOOD)
@@ -402,12 +372,23 @@ static void ft_send_resp_status(struct fc_lport *lport,
/*
* Send error or task management response.
- * Always frees the cmd and associated state.
*/
-static void ft_send_resp_code(struct ft_cmd *cmd, enum fcp_resp_rsp_codes code)
+static void ft_send_resp_code(struct ft_cmd *cmd,
+ enum fcp_resp_rsp_codes code)
{
ft_send_resp_status(cmd->sess->tport->lport,
cmd->req_frame, SAM_STAT_GOOD, code);
+}
+
+
+/*
+ * Send error or task management response.
+ * Always frees the cmd and associated state.
+ */
+static void ft_send_resp_code_and_free(struct ft_cmd *cmd,
+ enum fcp_resp_rsp_codes code)
+{
+ ft_send_resp_code(cmd, code);
ft_free_cmd(cmd);
}
@@ -418,6 +399,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
{
struct se_tmr_req *tmr;
struct fcp_cmnd *fcp;
+ struct ft_sess *sess;
u8 tm_func;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
@@ -425,13 +407,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
switch (fcp->fc_tm_flags) {
case FCP_TMF_LUN_RESET:
tm_func = TMR_LUN_RESET;
- if (ft_get_lun_for_cmd(cmd, fcp->fc_lun) < 0) {
- ft_dump_cmd(cmd, __func__);
- transport_send_check_condition_and_sense(&cmd->se_cmd,
- cmd->se_cmd.scsi_sense_reason, 0);
- ft_sess_put(cmd->sess);
- return;
- }
break;
case FCP_TMF_TGT_RESET:
tm_func = TMR_TARGET_WARM_RESET;
@@ -450,19 +425,49 @@ static void ft_send_tm(struct ft_cmd *cmd)
* FCP4r01 indicates having a combination of
* tm_flags set is invalid.
*/
- FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
- ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
+ pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
+ ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
return;
}
- FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
+ pr_debug("alloc tm cmd fn %d\n", tm_func);
tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
if (!tmr) {
- FT_TM_DBG("alloc failed\n");
- ft_send_resp_code(cmd, FCP_TMF_FAILED);
+ pr_debug("alloc failed\n");
+ ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
return;
}
cmd->se_cmd.se_tmr_req = tmr;
+
+ switch (fcp->fc_tm_flags) {
+ case FCP_TMF_LUN_RESET:
+ cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+ if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) {
+ /*
+ * Make sure to clean up newly allocated TMR request
+ * since "unable to handle TMR request because failed
+ * to get to LUN"
+ */
+ pr_debug("Failed to get LUN for TMR func %d, "
+ "se_cmd %p, unpacked_lun %d\n",
+ tm_func, &cmd->se_cmd, cmd->lun);
+ ft_dump_cmd(cmd, __func__);
+ sess = cmd->sess;
+ transport_send_check_condition_and_sense(&cmd->se_cmd,
+ cmd->se_cmd.scsi_sense_reason, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
+ ft_sess_put(sess);
+ return;
+ }
+ break;
+ case FCP_TMF_TGT_RESET:
+ case FCP_TMF_CLR_TASK_SET:
+ case FCP_TMF_ABT_TASK_SET:
+ case FCP_TMF_CLR_ACA:
+ break;
+ default:
+ return;
+ }
transport_generic_handle_tmr(&cmd->se_cmd);
}
@@ -494,7 +499,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
code = FCP_TMF_FAILED;
break;
}
- FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
+ pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code);
return 0;
@@ -522,7 +527,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
return;
busy:
- FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
+ pr_debug("cmd or seq allocation failure - sending BUSY\n");
ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
@@ -547,7 +552,7 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
default:
- printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
+ pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
@@ -635,7 +640,8 @@ static void ft_send_cmd(struct ft_cmd *cmd)
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
- ret = ft_get_lun_for_cmd(cmd, fcp->fc_lun);
+ cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
+ ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
if (ret < 0) {
ft_dump_cmd(cmd, __func__);
transport_send_check_condition_and_sense(&cmd->se_cmd,
@@ -645,30 +651,29 @@ static void ft_send_cmd(struct ft_cmd *cmd)
ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
- FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
+ pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
ft_dump_cmd(cmd, __func__);
- if (ret == -1) {
+ if (ret == -ENOMEM) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
return;
}
- if (ret == -2) {
+ if (ret == -EINVAL) {
if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
ft_queue_status(se_cmd);
else
transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
return;
}
transport_generic_handle_cdb(se_cmd);
return;
err:
- ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
- return;
+ ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
}
/*
@@ -676,7 +681,7 @@ err:
*/
static void ft_exec_req(struct ft_cmd *cmd)
{
- FT_IO_DBG("cmd state %x\n", cmd->state);
+ pr_debug("cmd state %x\n", cmd->state);
switch (cmd->state) {
case FC_CMD_ST_NEW:
ft_send_cmd(cmd);
@@ -695,15 +700,12 @@ int ft_thread(void *arg)
struct ft_tpg *tpg = arg;
struct se_queue_obj *qobj = &tpg->qobj;
struct ft_cmd *cmd;
- int ret;
-
- set_user_nice(current, -20);
while (!kthread_should_stop()) {
- ret = wait_event_interruptible(qobj->thread_wq,
- atomic_read(&qobj->queue_cnt) || kthread_should_stop());
- if (ret < 0 || kthread_should_stop())
+ schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+ if (kthread_should_stop())
goto out;
+
cmd = ft_dequeue_cmd(qobj);
if (cmd)
ft_exec_req(cmd);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 84e868c255d..8781d1e423d 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -48,7 +48,6 @@
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_base.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
@@ -106,7 +105,7 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
}
err = 4;
fail:
- FT_CONF_DBG("err %u len %zu pos %u byte %u\n",
+ pr_debug("err %u len %zu pos %u byte %u\n",
err, cp - name, pos, byte);
return -1;
}
@@ -216,14 +215,14 @@ static struct se_node_acl *ft_add_acl(
u64 wwpn;
u32 q_depth;
- FT_CONF_DBG("add acl %s\n", name);
+ pr_debug("add acl %s\n", name);
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL);
acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
- if (!(acl))
+ if (!acl)
return ERR_PTR(-ENOMEM);
acl->node_auth.port_name = wwpn;
@@ -239,11 +238,11 @@ static void ft_del_acl(struct se_node_acl *se_acl)
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
- FT_CONF_DBG("del acl %s\n",
+ pr_debug("del acl %s\n",
config_item_name(&se_acl->acl_group.cg_item));
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
- FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n",
+ pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
acl, se_acl, tpg, &tpg->se_tpg);
core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
@@ -260,11 +259,11 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
spin_lock_bh(&se_tpg->acl_node_lock);
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
- FT_CONF_DBG("acl %p port_name %llx\n",
+ pr_debug("acl %p port_name %llx\n",
acl, (unsigned long long)acl->node_auth.port_name);
if (acl->node_auth.port_name == rdata->ids.port_name ||
acl->node_auth.node_name == rdata->ids.node_name) {
- FT_CONF_DBG("acl %p port_name %llx matched\n", acl,
+ pr_debug("acl %p port_name %llx matched\n", acl,
(unsigned long long)rdata->ids.port_name);
found = acl;
/* XXX need to hold onto ACL */
@@ -280,11 +279,11 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
struct ft_node_acl *acl;
acl = kzalloc(sizeof(*acl), GFP_KERNEL);
- if (!(acl)) {
- printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
+ if (!acl) {
+ pr_err("Unable to allocate struct ft_node_acl\n");
return NULL;
}
- FT_CONF_DBG("acl %p\n", acl);
+ pr_debug("acl %p\n", acl);
return &acl->se_node_acl;
}
@@ -294,7 +293,7 @@ static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
- FT_CONF_DBG(KERN_INFO "acl %p\n", acl);
+ pr_debug("acl %p\n", acl);
kfree(acl);
}
@@ -311,7 +310,7 @@ static struct se_portal_group *ft_add_tpg(
unsigned long index;
int ret;
- FT_CONF_DBG("tcm_fc: add tpg %s\n", name);
+ pr_debug("tcm_fc: add tpg %s\n", name);
/*
* Name must be "tpgt_" followed by the index.
@@ -331,7 +330,7 @@ static struct se_portal_group *ft_add_tpg(
transport_init_queue_obj(&tpg->qobj);
ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
- (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
return NULL;
@@ -354,7 +353,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
- FT_CONF_DBG("del tpg %s\n",
+ pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
kthread_stop(tpg->thread);
@@ -412,7 +411,7 @@ static struct se_wwn *ft_add_lport(
struct ft_lport_acl *old_lacl;
u64 wwpn;
- FT_CONF_DBG("add lport %s\n", name);
+ pr_debug("add lport %s\n", name);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return NULL;
lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
@@ -441,7 +440,7 @@ static void ft_del_lport(struct se_wwn *wwn)
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
- FT_CONF_DBG("del lport %s\n",
+ pr_debug("del lport %s\n",
config_item_name(&wwn->wwn_group.cg_item));
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
@@ -536,8 +535,7 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
.tpg_get_inst_index = ft_tpg_get_inst_index,
.check_stop_free = ft_check_stop_free,
- .release_cmd_to_pool = ft_release_cmd,
- .release_cmd_direct = ft_release_cmd,
+ .release_cmd = ft_release_cmd,
.shutdown_session = ft_sess_shutdown,
.close_session = ft_sess_close,
.stop_session = ft_sess_stop,
@@ -550,7 +548,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.set_default_node_attributes = ft_set_default_node_attr,
.get_task_tag = ft_get_task_tag,
.get_cmd_state = ft_get_cmd_state,
- .new_cmd_failure = ft_new_cmd_failure,
.queue_data_in = ft_queue_data_in,
.queue_status = ft_queue_status,
.queue_tm_rsp = ft_queue_tm_resp,
@@ -582,10 +579,10 @@ int ft_register_configfs(void)
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
- if (!fabric) {
- printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
+ if (IS_ERR(fabric)) {
+ pr_err("%s: target_fabric_configfs_init() failed!\n",
__func__);
- return -1;
+ return PTR_ERR(fabric);
}
fabric->tf_ops = ft_fabric_ops;
@@ -610,11 +607,8 @@ int ft_register_configfs(void)
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
- FT_CONF_DBG("target_fabric_configfs_register() for"
+ pr_debug("target_fabric_configfs_register() for"
" FC Target failed!\n");
- printk(KERN_INFO
- "%s: target_fabric_configfs_register() failed!\n",
- __func__);
target_fabric_configfs_free(fabric);
return -1;
}
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 4c3c0efbe13..c37f4cd9645 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -39,6 +39,7 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
+#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -53,7 +54,6 @@
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_base.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
@@ -65,21 +65,20 @@
int ft_queue_data_in(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
- struct se_transport_task *task;
struct fc_frame *fp = NULL;
struct fc_exch *ep;
struct fc_lport *lport;
- struct se_mem *mem;
+ struct scatterlist *sg = NULL;
size_t remaining;
u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
- u32 mem_off;
+ u32 mem_off = 0;
u32 fh_off = 0;
u32 frame_off = 0;
size_t frame_len = 0;
- size_t mem_len;
+ size_t mem_len = 0;
size_t tlen;
size_t off_in_page;
- struct page *page;
+ struct page *page = NULL;
int use_sg;
int error;
void *page_addr;
@@ -90,24 +89,17 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
lport = ep->lp;
cmd->seq = lport->tt.seq_start_next(cmd->seq);
- task = T_TASK(se_cmd);
- BUG_ON(!task);
remaining = se_cmd->data_length;
/*
- * Setup to use first mem list entry if any.
+ * Setup to use first mem list entry, unless no data.
*/
- if (task->t_tasks_se_num) {
- mem = list_first_entry(task->t_mem_list,
- struct se_mem, se_list);
- mem_len = mem->se_len;
- mem_off = mem->se_off;
- page = mem->se_page;
- } else {
- mem = NULL;
- mem_len = remaining;
- mem_off = 0;
- page = NULL;
+ BUG_ON(remaining && !se_cmd->t_data_sg);
+ if (remaining) {
+ sg = se_cmd->t_data_sg;
+ mem_len = sg->length;
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
/* no scatter/gather in skb for odd word length due to fc_seq_send() */
@@ -115,12 +107,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
while (remaining) {
if (!mem_len) {
- BUG_ON(!mem);
- mem = list_entry(mem->se_list.next,
- struct se_mem, se_list);
- mem_len = min((size_t)mem->se_len, remaining);
- mem_off = mem->se_off;
- page = mem->se_page;
+ sg = sg_next(sg);
+ mem_len = min((size_t)sg->length, remaining);
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
if (!frame_len) {
/*
@@ -148,18 +138,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
tlen = min(mem_len, frame_len);
if (use_sg) {
- if (!mem) {
- BUG_ON(!task->t_task_buf);
- page_addr = task->t_task_buf + mem_off;
- /*
- * In this case, offset is 'offset_in_page' of
- * (t_task_buf + mem_off) instead of 'mem_off'.
- */
- off_in_page = offset_in_page(page_addr);
- page = virt_to_page(page_addr);
- tlen = min(tlen, PAGE_SIZE - off_in_page);
- } else
- off_in_page = mem_off;
+ off_in_page = mem_off;
BUG_ON(!page);
get_page(page);
skb_fill_page_desc(fp_skb(fp),
@@ -169,7 +148,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
fp_skb(fp)->data_len += tlen;
fp_skb(fp)->truesize +=
PAGE_SIZE << compound_order(page);
- } else if (mem) {
+ } else {
BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
KM_SOFTIRQ0);
@@ -180,10 +159,6 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
memcpy(to, from, tlen);
kunmap_atomic(page_addr, KM_SOFTIRQ0);
to += tlen;
- } else {
- from = task->t_task_buf + mem_off;
- memcpy(to, from, tlen);
- to += tlen;
}
mem_off += tlen;
@@ -201,9 +176,8 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
error = lport->tt.seq_send(lport, cmd->seq, fp);
if (error) {
/* XXX For now, initiator will retry */
- if (printk_ratelimit())
- printk(KERN_ERR "%s: Failed to send frame %p, "
- "xid <0x%x>, remaining <0x%x>, "
+ pr_err_ratelimited("%s: Failed to send frame %p, "
+ "xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
remaining, lport->lso_max);
@@ -221,84 +195,67 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
struct fc_seq *seq = cmd->seq;
struct fc_exch *ep;
struct fc_lport *lport;
- struct se_transport_task *task;
struct fc_frame_header *fh;
- struct se_mem *mem;
- u32 mem_off;
+ struct scatterlist *sg = NULL;
+ u32 mem_off = 0;
u32 rel_off;
size_t frame_len;
- size_t mem_len;
+ size_t mem_len = 0;
size_t tlen;
- struct page *page;
+ struct page *page = NULL;
void *page_addr;
void *from;
void *to;
u32 f_ctl;
void *buf;
- task = T_TASK(se_cmd);
- BUG_ON(!task);
-
fh = fc_frame_header_get(fp);
if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
goto drop;
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ ep = fc_seq_exch(seq);
+ lport = ep->lp;
+ if (cmd->was_ddp_setup) {
+ BUG_ON(!ep);
+ BUG_ON(!lport);
+ }
+
/*
- * Doesn't expect even single byte of payload. Payload
+ * Doesn't expect payload if DDP is setup. Payload
* is expected to be copied directly to user buffers
- * due to DDP (Large Rx offload) feature, hence
- * BUG_ON if BUF is non-NULL
+ * due to DDP (Large Rx offload),
*/
buf = fc_frame_payload_get(fp, 1);
- if (cmd->was_ddp_setup && buf) {
- printk(KERN_INFO "%s: When DDP was setup, not expected to"
- "receive frame with payload, Payload shall be"
- "copied directly to buffer instead of coming "
- "via. legacy receive queues\n", __func__);
- BUG_ON(buf);
- }
+ if (buf)
+ pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
+ "cmd->sg_cnt 0x%x. DDP was setup"
+ " hence not expected to receive frame with "
+ "payload, Frame will be dropped if "
+ "'Sequence Initiative' bit in f_ctl is "
+ "not set\n", __func__, ep->xid, f_ctl,
+ cmd->sg, cmd->sg_cnt);
+ /*
+ * Invalidate HW DDP context if it was setup for respective
+ * command. Invalidation of HW DDP context is requited in both
+ * situation (success and error).
+ */
+ ft_invl_hw_context(cmd);
/*
- * If ft_cmd indicated 'ddp_setup', in that case only the last frame
- * should come with 'TSI bit being set'. If 'TSI bit is not set and if
- * data frame appears here, means error condition. In both the cases
- * release the DDP context (ddp_put) and in error case, as well
- * initiate error recovery mechanism.
+ * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
+ * write data frame is received successfully where payload is
+ * posted directly to user buffer and only the last frame's
+ * header is posted in receive queue.
+ *
+ * If "Sequence Initiative (TSI)" bit is not set, means error
+ * condition w.r.t. DDP, hence drop the packet and let explict
+ * ABORTS from other end of exchange timer trigger the recovery.
*/
- ep = fc_seq_exch(seq);
- if (cmd->was_ddp_setup) {
- BUG_ON(!ep);
- lport = ep->lp;
- BUG_ON(!lport);
- }
- if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) {
- f_ctl = ntoh24(fh->fh_f_ctl);
- /*
- * If TSI bit set in f_ctl, means last write data frame is
- * received successfully where payload is posted directly
- * to user buffer and only the last frame's header is posted
- * in legacy receive queue
- */
- if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */
- cmd->write_data_len = lport->tt.ddp_done(lport,
- ep->xid);
- goto last_frame;
- } else {
- /*
- * Updating the write_data_len may be meaningless at
- * this point, but just in case if required in future
- * for debugging or any other purpose
- */
- printk(KERN_ERR "%s: Received frame with TSI bit not"
- " being SET, dropping the frame, "
- "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
- __func__, cmd->sg, cmd->sg_cnt);
- cmd->write_data_len = lport->tt.ddp_done(lport,
- ep->xid);
- lport->tt.seq_exch_abort(cmd->seq, 0);
- goto drop;
- }
- }
+ if (f_ctl & FC_FC_SEQ_INIT)
+ goto last_frame;
+ else
+ goto drop;
rel_off = ntohl(fh->fh_parm_offset);
frame_len = fr_len(fp);
@@ -312,29 +269,22 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
frame_len = se_cmd->data_length - rel_off;
/*
- * Setup to use first mem list entry if any.
+ * Setup to use first mem list entry, unless no data.
*/
- if (task->t_tasks_se_num) {
- mem = list_first_entry(task->t_mem_list,
- struct se_mem, se_list);
- mem_len = mem->se_len;
- mem_off = mem->se_off;
- page = mem->se_page;
- } else {
- mem = NULL;
- page = NULL;
- mem_off = 0;
- mem_len = frame_len;
+ BUG_ON(frame_len && !se_cmd->t_data_sg);
+ if (frame_len) {
+ sg = se_cmd->t_data_sg;
+ mem_len = sg->length;
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
while (frame_len) {
if (!mem_len) {
- BUG_ON(!mem);
- mem = list_entry(mem->se_list.next,
- struct se_mem, se_list);
- mem_len = mem->se_len;
- mem_off = mem->se_off;
- page = mem->se_page;
+ sg = sg_next(sg);
+ mem_len = sg->length;
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
if (rel_off >= mem_len) {
rel_off -= mem_len;
@@ -347,19 +297,15 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
tlen = min(mem_len, frame_len);
- if (mem) {
- to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
- KM_SOFTIRQ0);
- page_addr = to;
- to += mem_off & ~PAGE_MASK;
- tlen = min(tlen, (size_t)(PAGE_SIZE -
- (mem_off & ~PAGE_MASK)));
- memcpy(to, from, tlen);
- kunmap_atomic(page_addr, KM_SOFTIRQ0);
- } else {
- to = task->t_task_buf + mem_off;
- memcpy(to, from, tlen);
- }
+ to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
+ KM_SOFTIRQ0);
+ page_addr = to;
+ to += mem_off & ~PAGE_MASK;
+ tlen = min(tlen, (size_t)(PAGE_SIZE -
+ (mem_off & ~PAGE_MASK)));
+ memcpy(to, from, tlen);
+ kunmap_atomic(page_addr, KM_SOFTIRQ0);
+
from += tlen;
frame_len -= tlen;
mem_off += tlen;
@@ -372,3 +318,39 @@ last_frame:
drop:
fc_frame_free(fp);
}
+
+/*
+ * Handle and cleanup any HW specific resources if
+ * received ABORTS, errors, timeouts.
+ */
+void ft_invl_hw_context(struct ft_cmd *cmd)
+{
+ struct fc_seq *seq = cmd->seq;
+ struct fc_exch *ep = NULL;
+ struct fc_lport *lport = NULL;
+
+ BUG_ON(!cmd);
+
+ /* Cleanup the DDP context in HW if DDP was setup */
+ if (cmd->was_ddp_setup && seq) {
+ ep = fc_seq_exch(seq);
+ if (ep) {
+ lport = ep->lp;
+ if (lport && (ep->xid <= lport->lro_xid))
+ /*
+ * "ddp_done" trigger invalidation of HW
+ * specific DDP context
+ */
+ cmd->write_data_len = lport->tt.ddp_done(lport,
+ ep->xid);
+
+ /*
+ * Resetting same variable to indicate HW's
+ * DDP context has been invalidated to avoid
+ * re_invalidation of same context (context is
+ * identified using ep->xid)
+ */
+ cmd->was_ddp_setup = 0;
+ }
+ }
+}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index a3bd57f2ea3..dbb5eaeee39 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -46,10 +46,8 @@
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_base.h>
#include <target/configfs_macros.h>
-#include <scsi/libfc.h>
#include "tcm_fc.h"
static void ft_sess_delete_all(struct ft_tport *);
@@ -198,13 +196,13 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
- FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
+ pr_debug("port_id %x found %p\n", port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
- FT_SESS_DBG("port_id %x not found\n", port_id);
+ pr_debug("port_id %x not found\n", port_id);
return NULL;
}
@@ -229,7 +227,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
return NULL;
sess->se_sess = transport_init_session();
- if (!sess->se_sess) {
+ if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
}
@@ -240,7 +238,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
- FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
+ pr_debug("port_id %x sess %p\n", port_id, sess);
transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
sess->se_sess, sess);
@@ -314,7 +312,7 @@ int ft_sess_shutdown(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
- FT_SESS_DBG("port_id %x\n", sess->port_id);
+ pr_debug("port_id %x\n", sess->port_id);
return 1;
}
@@ -332,10 +330,10 @@ void ft_sess_close(struct se_session *se_sess)
lport = sess->tport->lport;
port_id = sess->port_id;
if (port_id == -1) {
- mutex_lock(&ft_lport_lock);
+ mutex_unlock(&ft_lport_lock);
return;
}
- FT_SESS_DBG("port_id %x\n", port_id);
+ pr_debug("port_id %x\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
transport_deregister_session_configfs(se_sess);
@@ -348,7 +346,7 @@ void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
- FT_SESS_DBG("port_id %x\n", sess->port_id);
+ pr_debug("port_id %x\n", sess->port_id);
}
int ft_sess_logged_in(struct se_session *se_sess)
@@ -458,7 +456,7 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
- FT_SESS_DBG("port_id %x flags %x ret %x\n",
+ pr_debug("port_id %x flags %x ret %x\n",
rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
@@ -518,11 +516,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
- FT_SESS_DBG("sid %x\n", sid);
+ pr_debug("sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
- FT_SESS_DBG("sid %x sess lookup failed\n", sid);
+ pr_debug("sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index bf7c687519e..f7f71b2d310 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -14,11 +14,7 @@ menuconfig THERMAL
If you want this support, you should say Y or M here.
config THERMAL_HWMON
- bool "Hardware monitoring support"
+ bool
depends on THERMAL
depends on HWMON=y || HWMON=THERMAL
- help
- The generic thermal sysfs driver's hardware monitoring support
- requires a 2.10.7/3.0.2 or later lm-sensors userspace.
-
- Say Y if your user-space is new enough.
+ default y
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 0b1c82ad680..708f8e92771 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -420,6 +420,29 @@ thermal_cooling_device_trip_point_show(struct device *dev,
/* hwmon sys I/F */
#include <linux/hwmon.h>
+
+/* thermal zone devices with the same type share one hwmon device */
+struct thermal_hwmon_device {
+ char type[THERMAL_NAME_LENGTH];
+ struct device *device;
+ int count;
+ struct list_head tz_list;
+ struct list_head node;
+};
+
+struct thermal_hwmon_attr {
+ struct device_attribute attr;
+ char name[16];
+};
+
+/* one temperature input for each thermal zone */
+struct thermal_hwmon_temp {
+ struct list_head hwmon_node;
+ struct thermal_zone_device *tz;
+ struct thermal_hwmon_attr temp_input; /* hwmon sys attr */
+ struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */
+};
+
static LIST_HEAD(thermal_hwmon_list);
static ssize_t
@@ -437,9 +460,10 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
int ret;
struct thermal_hwmon_attr *hwmon_attr
= container_of(attr, struct thermal_hwmon_attr, attr);
- struct thermal_zone_device *tz
- = container_of(hwmon_attr, struct thermal_zone_device,
+ struct thermal_hwmon_temp *temp
+ = container_of(hwmon_attr, struct thermal_hwmon_temp,
temp_input);
+ struct thermal_zone_device *tz = temp->tz;
ret = tz->ops->get_temp(tz, &temperature);
@@ -455,9 +479,10 @@ temp_crit_show(struct device *dev, struct device_attribute *attr,
{
struct thermal_hwmon_attr *hwmon_attr
= container_of(attr, struct thermal_hwmon_attr, attr);
- struct thermal_zone_device *tz
- = container_of(hwmon_attr, struct thermal_zone_device,
+ struct thermal_hwmon_temp *temp
+ = container_of(hwmon_attr, struct thermal_hwmon_temp,
temp_crit);
+ struct thermal_zone_device *tz = temp->tz;
long temperature;
int ret;
@@ -469,22 +494,54 @@ temp_crit_show(struct device *dev, struct device_attribute *attr,
}
-static int
-thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+static struct thermal_hwmon_device *
+thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz)
{
struct thermal_hwmon_device *hwmon;
- int new_hwmon_device = 1;
- int result;
mutex_lock(&thermal_list_lock);
list_for_each_entry(hwmon, &thermal_hwmon_list, node)
if (!strcmp(hwmon->type, tz->type)) {
- new_hwmon_device = 0;
mutex_unlock(&thermal_list_lock);
- goto register_sys_interface;
+ return hwmon;
+ }
+ mutex_unlock(&thermal_list_lock);
+
+ return NULL;
+}
+
+/* Find the temperature input matching a given thermal zone */
+static struct thermal_hwmon_temp *
+thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
+ const struct thermal_zone_device *tz)
+{
+ struct thermal_hwmon_temp *temp;
+
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(temp, &hwmon->tz_list, hwmon_node)
+ if (temp->tz == tz) {
+ mutex_unlock(&thermal_list_lock);
+ return temp;
}
mutex_unlock(&thermal_list_lock);
+ return NULL;
+}
+
+static int
+thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+ struct thermal_hwmon_device *hwmon;
+ struct thermal_hwmon_temp *temp;
+ int new_hwmon_device = 1;
+ int result;
+
+ hwmon = thermal_hwmon_lookup_by_type(tz);
+ if (hwmon) {
+ new_hwmon_device = 0;
+ goto register_sys_interface;
+ }
+
hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL);
if (!hwmon)
return -ENOMEM;
@@ -502,30 +559,36 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
goto free_mem;
register_sys_interface:
- tz->hwmon = hwmon;
+ temp = kzalloc(sizeof(struct thermal_hwmon_temp), GFP_KERNEL);
+ if (!temp) {
+ result = -ENOMEM;
+ goto unregister_name;
+ }
+
+ temp->tz = tz;
hwmon->count++;
- snprintf(tz->temp_input.name, THERMAL_NAME_LENGTH,
+ snprintf(temp->temp_input.name, THERMAL_NAME_LENGTH,
"temp%d_input", hwmon->count);
- tz->temp_input.attr.attr.name = tz->temp_input.name;
- tz->temp_input.attr.attr.mode = 0444;
- tz->temp_input.attr.show = temp_input_show;
- sysfs_attr_init(&tz->temp_input.attr.attr);
- result = device_create_file(hwmon->device, &tz->temp_input.attr);
+ temp->temp_input.attr.attr.name = temp->temp_input.name;
+ temp->temp_input.attr.attr.mode = 0444;
+ temp->temp_input.attr.show = temp_input_show;
+ sysfs_attr_init(&temp->temp_input.attr.attr);
+ result = device_create_file(hwmon->device, &temp->temp_input.attr);
if (result)
- goto unregister_name;
+ goto free_temp_mem;
if (tz->ops->get_crit_temp) {
unsigned long temperature;
if (!tz->ops->get_crit_temp(tz, &temperature)) {
- snprintf(tz->temp_crit.name, THERMAL_NAME_LENGTH,
+ snprintf(temp->temp_crit.name, THERMAL_NAME_LENGTH,
"temp%d_crit", hwmon->count);
- tz->temp_crit.attr.attr.name = tz->temp_crit.name;
- tz->temp_crit.attr.attr.mode = 0444;
- tz->temp_crit.attr.show = temp_crit_show;
- sysfs_attr_init(&tz->temp_crit.attr.attr);
+ temp->temp_crit.attr.attr.name = temp->temp_crit.name;
+ temp->temp_crit.attr.attr.mode = 0444;
+ temp->temp_crit.attr.show = temp_crit_show;
+ sysfs_attr_init(&temp->temp_crit.attr.attr);
result = device_create_file(hwmon->device,
- &tz->temp_crit.attr);
+ &temp->temp_crit.attr);
if (result)
goto unregister_input;
}
@@ -534,13 +597,15 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
mutex_lock(&thermal_list_lock);
if (new_hwmon_device)
list_add_tail(&hwmon->node, &thermal_hwmon_list);
- list_add_tail(&tz->hwmon_node, &hwmon->tz_list);
+ list_add_tail(&temp->hwmon_node, &hwmon->tz_list);
mutex_unlock(&thermal_list_lock);
return 0;
unregister_input:
- device_remove_file(hwmon->device, &tz->temp_input.attr);
+ device_remove_file(hwmon->device, &temp->temp_input.attr);
+ free_temp_mem:
+ kfree(temp);
unregister_name:
if (new_hwmon_device) {
device_remove_file(hwmon->device, &dev_attr_name);
@@ -556,15 +621,30 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
static void
thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
{
- struct thermal_hwmon_device *hwmon = tz->hwmon;
+ struct thermal_hwmon_device *hwmon;
+ struct thermal_hwmon_temp *temp;
+
+ hwmon = thermal_hwmon_lookup_by_type(tz);
+ if (unlikely(!hwmon)) {
+ /* Should never happen... */
+ dev_dbg(&tz->device, "hwmon device lookup failed!\n");
+ return;
+ }
+
+ temp = thermal_hwmon_lookup_temp(hwmon, tz);
+ if (unlikely(!temp)) {
+ /* Should never happen... */
+ dev_dbg(&tz->device, "temperature input lookup failed!\n");
+ return;
+ }
- tz->hwmon = NULL;
- device_remove_file(hwmon->device, &tz->temp_input.attr);
+ device_remove_file(hwmon->device, &temp->temp_input.attr);
if (tz->ops->get_crit_temp)
- device_remove_file(hwmon->device, &tz->temp_crit.attr);
+ device_remove_file(hwmon->device, &temp->temp_crit.attr);
mutex_lock(&thermal_list_lock);
- list_del(&tz->hwmon_node);
+ list_del(&temp->hwmon_node);
+ kfree(temp);
if (!list_empty(&hwmon->tz_list)) {
mutex_unlock(&thermal_list_lock);
return;
diff --git a/drivers/tty/bfin_jtag_comm.c b/drivers/tty/bfin_jtag_comm.c
index 03c285bb2f1..3a997760ec3 100644
--- a/drivers/tty/bfin_jtag_comm.c
+++ b/drivers/tty/bfin_jtag_comm.c
@@ -25,7 +25,7 @@
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); })
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 6f2c9809f1f..e371753ba92 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -19,6 +19,11 @@ config HVC_CONSOLE
console. This driver allows each pSeries partition to have a console
which is accessed via the HMC.
+config HVC_OLD_HVSI
+ bool "Old driver for pSeries serial port (/dev/hvsi*)"
+ depends on HVC_CONSOLE
+ default n
+
config HVC_ISERIES
bool "iSeries Hypervisor Virtual Console support"
depends on PPC_ISERIES
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
index 40a25d93fe5..e2920531637 100644
--- a/drivers/tty/hvc/Makefile
+++ b/drivers/tty/hvc/Makefile
@@ -1,4 +1,5 @@
-obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
+obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi_lib.o
+obj-$(CONFIG_HVC_OLD_HVSI) += hvsi.o
obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
obj-$(CONFIG_HVC_TILE) += hvc_tile.o
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index e9cba13ee80..e1aaf4f309b 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -39,6 +39,7 @@
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/slab.h>
+#include <linux/serial_core.h>
#include <asm/uaccess.h>
@@ -163,8 +164,10 @@ static void hvc_console_print(struct console *co, const char *b,
} else {
r = cons_ops[index]->put_chars(vtermnos[index], c, i);
if (r <= 0) {
- /* throw away chars on error */
- i = 0;
+ /* throw away characters on error
+ * but spin in case of -EAGAIN */
+ if (r != -EAGAIN)
+ i = 0;
} else if (r > 0) {
i -= r;
if (i > 0)
@@ -184,7 +187,7 @@ static struct tty_driver *hvc_console_device(struct console *c, int *index)
}
static int __init hvc_console_setup(struct console *co, char *options)
-{
+{
if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
return -ENODEV;
@@ -448,7 +451,7 @@ static int hvc_push(struct hvc_struct *hp)
n = hp->ops->put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf);
if (n <= 0) {
- if (n == 0) {
+ if (n == 0 || n == -EAGAIN) {
hp->do_wakeup = 1;
return 0;
}
@@ -745,6 +748,58 @@ static int khvcd(void *unused)
return 0;
}
+static int hvc_tiocmget(struct tty_struct *tty)
+{
+ struct hvc_struct *hp = tty->driver_data;
+
+ if (!hp || !hp->ops->tiocmget)
+ return -EINVAL;
+ return hp->ops->tiocmget(hp);
+}
+
+static int hvc_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+{
+ struct hvc_struct *hp = tty->driver_data;
+
+ if (!hp || !hp->ops->tiocmset)
+ return -EINVAL;
+ return hp->ops->tiocmset(hp, set, clear);
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+int hvc_poll_init(struct tty_driver *driver, int line, char *options)
+{
+ return 0;
+}
+
+static int hvc_poll_get_char(struct tty_driver *driver, int line)
+{
+ struct tty_struct *tty = driver->ttys[0];
+ struct hvc_struct *hp = tty->driver_data;
+ int n;
+ char ch;
+
+ n = hp->ops->get_chars(hp->vtermno, &ch, 1);
+
+ if (n == 0)
+ return NO_POLL_CHAR;
+
+ return ch;
+}
+
+static void hvc_poll_put_char(struct tty_driver *driver, int line, char ch)
+{
+ struct tty_struct *tty = driver->ttys[0];
+ struct hvc_struct *hp = tty->driver_data;
+ int n;
+
+ do {
+ n = hp->ops->put_chars(hp->vtermno, &ch, 1);
+ } while (n <= 0);
+}
+#endif
+
static const struct tty_operations hvc_ops = {
.open = hvc_open,
.close = hvc_close,
@@ -753,6 +808,13 @@ static const struct tty_operations hvc_ops = {
.unthrottle = hvc_unthrottle,
.write_room = hvc_write_room,
.chars_in_buffer = hvc_chars_in_buffer,
+ .tiocmget = hvc_tiocmget,
+ .tiocmset = hvc_tiocmset,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_init = hvc_poll_init,
+ .poll_get_char = hvc_poll_get_char,
+ .poll_put_char = hvc_poll_put_char,
+#endif
};
struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 54381eba4e4..c335a1492a5 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -73,6 +73,10 @@ struct hv_ops {
int (*notifier_add)(struct hvc_struct *hp, int irq);
void (*notifier_del)(struct hvc_struct *hp, int irq);
void (*notifier_hangup)(struct hvc_struct *hp, int irq);
+
+ /* tiocmget/set implementation */
+ int (*tiocmget)(struct hvc_struct *hp);
+ int (*tiocmset)(struct hvc_struct *hp, unsigned int set, unsigned int clear);
};
/* Register a vterm and a slot index for use as a console (console_init) */
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index e6eea148524..130aace67f3 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -27,15 +27,27 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * TODO:
+ *
+ * - handle error in sending hvsi protocol packets
+ * - retry nego on subsequent sends ?
*/
+#undef DEBUG
+
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/console.h>
#include <asm/hvconsole.h>
#include <asm/vio.h>
#include <asm/prom.h>
#include <asm/firmware.h>
+#include <asm/hvsi.h>
+#include <asm/udbg.h>
#include "hvc_console.h"
@@ -43,59 +55,236 @@ static const char hvc_driver_name[] = "hvc_console";
static struct vio_device_id hvc_driver_table[] __devinitdata = {
{"serial", "hvterm1"},
+#ifndef HVC_OLD_HVSI
+ {"serial", "hvterm-protocol"},
+#endif
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, hvc_driver_table);
-static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
+typedef enum hv_protocol {
+ HV_PROTOCOL_RAW,
+ HV_PROTOCOL_HVSI
+} hv_protocol_t;
+
+struct hvterm_priv {
+ u32 termno; /* HV term number */
+ hv_protocol_t proto; /* Raw data or HVSI packets */
+ struct hvsi_priv hvsi; /* HVSI specific data */
+ spinlock_t buf_lock;
+ char buf[SIZE_VIO_GET_CHARS];
+ int left;
+ int offset;
+};
+static struct hvterm_priv *hvterm_privs[MAX_NR_HVC_CONSOLES];
+/* For early boot console */
+static struct hvterm_priv hvterm_priv0;
+
+static int hvterm_raw_get_chars(uint32_t vtermno, char *buf, int count)
{
- unsigned long got;
- int i;
+ struct hvterm_priv *pv = hvterm_privs[vtermno];
+ unsigned long i;
+ unsigned long flags;
+ int got;
- /*
- * Vio firmware will read up to SIZE_VIO_GET_CHARS at its own discretion
- * so we play safe and avoid the situation where got > count which could
- * overload the flip buffer.
- */
- if (count < SIZE_VIO_GET_CHARS)
- return -EAGAIN;
+ if (WARN_ON(!pv))
+ return 0;
- got = hvc_get_chars(vtermno, buf, count);
+ spin_lock_irqsave(&pv->buf_lock, flags);
- /*
- * Work around a HV bug where it gives us a null
- * after every \r. -- paulus
- */
- for (i = 1; i < got; ++i) {
- if (buf[i] == 0 && buf[i-1] == '\r') {
- --got;
- if (i < got)
- memmove(&buf[i], &buf[i+1],
- got - i);
+ if (pv->left == 0) {
+ pv->offset = 0;
+ pv->left = hvc_get_chars(pv->termno, pv->buf, count);
+
+ /*
+ * Work around a HV bug where it gives us a null
+ * after every \r. -- paulus
+ */
+ for (i = 1; i < pv->left; ++i) {
+ if (pv->buf[i] == 0 && pv->buf[i-1] == '\r') {
+ --pv->left;
+ if (i < pv->left) {
+ memmove(&pv->buf[i], &pv->buf[i+1],
+ pv->left - i);
+ }
+ }
}
}
+
+ got = min(count, pv->left);
+ memcpy(buf, &pv->buf[pv->offset], got);
+ pv->offset += got;
+ pv->left -= got;
+
+ spin_unlock_irqrestore(&pv->buf_lock, flags);
+
return got;
}
-static const struct hv_ops hvc_get_put_ops = {
- .get_chars = filtered_get_chars,
- .put_chars = hvc_put_chars,
+static int hvterm_raw_put_chars(uint32_t vtermno, const char *buf, int count)
+{
+ struct hvterm_priv *pv = hvterm_privs[vtermno];
+
+ if (WARN_ON(!pv))
+ return 0;
+
+ return hvc_put_chars(pv->termno, buf, count);
+}
+
+static const struct hv_ops hvterm_raw_ops = {
+ .get_chars = hvterm_raw_get_chars,
+ .put_chars = hvterm_raw_put_chars,
.notifier_add = notifier_add_irq,
.notifier_del = notifier_del_irq,
.notifier_hangup = notifier_hangup_irq,
};
+static int hvterm_hvsi_get_chars(uint32_t vtermno, char *buf, int count)
+{
+ struct hvterm_priv *pv = hvterm_privs[vtermno];
+
+ if (WARN_ON(!pv))
+ return 0;
+
+ return hvsilib_get_chars(&pv->hvsi, buf, count);
+}
+
+static int hvterm_hvsi_put_chars(uint32_t vtermno, const char *buf, int count)
+{
+ struct hvterm_priv *pv = hvterm_privs[vtermno];
+
+ if (WARN_ON(!pv))
+ return 0;
+
+ return hvsilib_put_chars(&pv->hvsi, buf, count);
+}
+
+static int hvterm_hvsi_open(struct hvc_struct *hp, int data)
+{
+ struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
+ int rc;
+
+ pr_devel("HVSI@%x: open !\n", pv->termno);
+
+ rc = notifier_add_irq(hp, data);
+ if (rc)
+ return rc;
+
+ return hvsilib_open(&pv->hvsi, hp);
+}
+
+static void hvterm_hvsi_close(struct hvc_struct *hp, int data)
+{
+ struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: do close !\n", pv->termno);
+
+ hvsilib_close(&pv->hvsi, hp);
+
+ notifier_del_irq(hp, data);
+}
+
+void hvterm_hvsi_hangup(struct hvc_struct *hp, int data)
+{
+ struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: do hangup !\n", pv->termno);
+
+ hvsilib_close(&pv->hvsi, hp);
+
+ notifier_hangup_irq(hp, data);
+}
+
+static int hvterm_hvsi_tiocmget(struct hvc_struct *hp)
+{
+ struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
+
+ if (!pv)
+ return -EINVAL;
+ return pv->hvsi.mctrl;
+}
+
+static int hvterm_hvsi_tiocmset(struct hvc_struct *hp, unsigned int set,
+ unsigned int clear)
+{
+ struct hvterm_priv *pv = hvterm_privs[hp->vtermno];
+
+ pr_devel("HVSI@%x: Set modem control, set=%x,clr=%x\n",
+ pv->termno, set, clear);
+
+ if (set & TIOCM_DTR)
+ hvsilib_write_mctrl(&pv->hvsi, 1);
+ else if (clear & TIOCM_DTR)
+ hvsilib_write_mctrl(&pv->hvsi, 0);
+
+ return 0;
+}
+
+static const struct hv_ops hvterm_hvsi_ops = {
+ .get_chars = hvterm_hvsi_get_chars,
+ .put_chars = hvterm_hvsi_put_chars,
+ .notifier_add = hvterm_hvsi_open,
+ .notifier_del = hvterm_hvsi_close,
+ .notifier_hangup = hvterm_hvsi_hangup,
+ .tiocmget = hvterm_hvsi_tiocmget,
+ .tiocmset = hvterm_hvsi_tiocmset,
+};
+
static int __devinit hvc_vio_probe(struct vio_dev *vdev,
- const struct vio_device_id *id)
+ const struct vio_device_id *id)
{
+ const struct hv_ops *ops;
struct hvc_struct *hp;
+ struct hvterm_priv *pv;
+ hv_protocol_t proto;
+ int i, termno = -1;
/* probed with invalid parameters. */
if (!vdev || !id)
return -EPERM;
- hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops,
- MAX_VIO_PUT_CHARS);
+ if (of_device_is_compatible(vdev->dev.of_node, "hvterm1")) {
+ proto = HV_PROTOCOL_RAW;
+ ops = &hvterm_raw_ops;
+ } else if (of_device_is_compatible(vdev->dev.of_node, "hvterm-protocol")) {
+ proto = HV_PROTOCOL_HVSI;
+ ops = &hvterm_hvsi_ops;
+ } else {
+ pr_err("hvc_vio: Unkown protocol for %s\n", vdev->dev.of_node->full_name);
+ return -ENXIO;
+ }
+
+ pr_devel("hvc_vio_probe() device %s, using %s protocol\n",
+ vdev->dev.of_node->full_name,
+ proto == HV_PROTOCOL_RAW ? "raw" : "hvsi");
+
+ /* Is it our boot one ? */
+ if (hvterm_privs[0] == &hvterm_priv0 &&
+ vdev->unit_address == hvterm_priv0.termno) {
+ pv = hvterm_privs[0];
+ termno = 0;
+ pr_devel("->boot console, using termno 0\n");
+ }
+ /* nope, allocate a new one */
+ else {
+ for (i = 0; i < MAX_NR_HVC_CONSOLES && termno < 0; i++)
+ if (!hvterm_privs[i])
+ termno = i;
+ pr_devel("->non-boot console, using termno %d\n", termno);
+ if (termno < 0)
+ return -ENODEV;
+ pv = kzalloc(sizeof(struct hvterm_priv), GFP_KERNEL);
+ if (!pv)
+ return -ENOMEM;
+ pv->termno = vdev->unit_address;
+ pv->proto = proto;
+ spin_lock_init(&pv->buf_lock);
+ hvterm_privs[termno] = pv;
+ hvsilib_init(&pv->hvsi, hvc_get_chars, hvc_put_chars,
+ pv->termno, 0);
+ }
+
+ hp = hvc_alloc(termno, vdev->irq, ops, MAX_VIO_PUT_CHARS);
if (IS_ERR(hp))
return PTR_ERR(hp);
dev_set_drvdata(&vdev->dev, hp);
@@ -106,8 +295,16 @@ static int __devinit hvc_vio_probe(struct vio_dev *vdev,
static int __devexit hvc_vio_remove(struct vio_dev *vdev)
{
struct hvc_struct *hp = dev_get_drvdata(&vdev->dev);
+ int rc, termno;
- return hvc_remove(hp);
+ termno = hp->vtermno;
+ rc = hvc_remove(hp);
+ if (rc == 0) {
+ if (hvterm_privs[termno] != &hvterm_priv0)
+ kfree(hvterm_privs[termno]);
+ hvterm_privs[termno] = NULL;
+ }
+ return rc;
}
static struct vio_driver hvc_vio_driver = {
@@ -140,34 +337,149 @@ static void __exit hvc_vio_exit(void)
}
module_exit(hvc_vio_exit);
-/* the device tree order defines our numbering */
-static int hvc_find_vtys(void)
+static void udbg_hvc_putc(char c)
{
- struct device_node *vty;
- int num_found = 0;
+ int count = -1;
- for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
- vty = of_find_node_by_name(vty, "vty")) {
- const uint32_t *vtermno;
+ if (c == '\n')
+ udbg_hvc_putc('\r');
- /* We have statically defined space for only a certain number
- * of console adapters.
- */
- if (num_found >= MAX_NR_HVC_CONSOLES) {
- of_node_put(vty);
+ do {
+ switch(hvterm_priv0.proto) {
+ case HV_PROTOCOL_RAW:
+ count = hvterm_raw_put_chars(0, &c, 1);
+ break;
+ case HV_PROTOCOL_HVSI:
+ count = hvterm_hvsi_put_chars(0, &c, 1);
break;
}
+ } while(count == 0);
+}
+
+static int udbg_hvc_getc_poll(void)
+{
+ int rc = 0;
+ char c;
- vtermno = of_get_property(vty, "reg", NULL);
- if (!vtermno)
- continue;
+ switch(hvterm_priv0.proto) {
+ case HV_PROTOCOL_RAW:
+ rc = hvterm_raw_get_chars(0, &c, 1);
+ break;
+ case HV_PROTOCOL_HVSI:
+ rc = hvterm_hvsi_get_chars(0, &c, 1);
+ break;
+ }
+ if (!rc)
+ return -1;
+ return c;
+}
- if (of_device_is_compatible(vty, "hvterm1")) {
- hvc_instantiate(*vtermno, num_found, &hvc_get_put_ops);
- ++num_found;
+static int udbg_hvc_getc(void)
+{
+ int ch;
+ for (;;) {
+ ch = udbg_hvc_getc_poll();
+ if (ch == -1) {
+ /* This shouldn't be needed...but... */
+ volatile unsigned long delay;
+ for (delay=0; delay < 2000000; delay++)
+ ;
+ } else {
+ return ch;
}
}
+}
+
+void __init hvc_vio_init_early(void)
+{
+ struct device_node *stdout_node;
+ const u32 *termno;
+ const char *name;
+ const struct hv_ops *ops;
+
+ /* find the boot console from /chosen/stdout */
+ if (!of_chosen)
+ return;
+ name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ if (name == NULL)
+ return;
+ stdout_node = of_find_node_by_path(name);
+ if (!stdout_node)
+ return;
+ name = of_get_property(stdout_node, "name", NULL);
+ if (!name) {
+ printk(KERN_WARNING "stdout node missing 'name' property!\n");
+ goto out;
+ }
+
+ /* Check if it's a virtual terminal */
+ if (strncmp(name, "vty", 3) != 0)
+ goto out;
+ termno = of_get_property(stdout_node, "reg", NULL);
+ if (termno == NULL)
+ goto out;
+ hvterm_priv0.termno = *termno;
+ spin_lock_init(&hvterm_priv0.buf_lock);
+ hvterm_privs[0] = &hvterm_priv0;
+
+ /* Check the protocol */
+ if (of_device_is_compatible(stdout_node, "hvterm1")) {
+ hvterm_priv0.proto = HV_PROTOCOL_RAW;
+ ops = &hvterm_raw_ops;
+ }
+ else if (of_device_is_compatible(stdout_node, "hvterm-protocol")) {
+ hvterm_priv0.proto = HV_PROTOCOL_HVSI;
+ ops = &hvterm_hvsi_ops;
+ hvsilib_init(&hvterm_priv0.hvsi, hvc_get_chars, hvc_put_chars,
+ hvterm_priv0.termno, 1);
+ /* HVSI, perform the handshake now */
+ hvsilib_establish(&hvterm_priv0.hvsi);
+ } else
+ goto out;
+ udbg_putc = udbg_hvc_putc;
+ udbg_getc = udbg_hvc_getc;
+ udbg_getc_poll = udbg_hvc_getc_poll;
+#ifdef HVC_OLD_HVSI
+ /* When using the old HVSI driver don't register the HVC
+ * backend for HVSI, only do udbg
+ */
+ if (hvterm_priv0.proto == HV_PROTOCOL_HVSI)
+ goto out;
+#endif
+ add_preferred_console("hvc", 0, NULL);
+ hvc_instantiate(0, 0, ops);
+out:
+ of_node_put(stdout_node);
+}
- return num_found;
+/* call this from early_init() for a working debug console on
+ * vterm capable LPAR machines
+ */
+#ifdef CONFIG_PPC_EARLY_DEBUG_LPAR
+void __init udbg_init_debug_lpar(void)
+{
+ hvterm_privs[0] = &hvterm_priv0;
+ hvterm_priv0.termno = 0;
+ hvterm_priv0.proto = HV_PROTOCOL_RAW;
+ spin_lock_init(&hvterm_priv0.buf_lock);
+ udbg_putc = udbg_hvc_putc;
+ udbg_getc = udbg_hvc_getc;
+ udbg_getc_poll = udbg_hvc_getc_poll;
+}
+#endif /* CONFIG_PPC_EARLY_DEBUG_LPAR */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI
+void __init udbg_init_debug_lpar_hvsi(void)
+{
+ hvterm_privs[0] = &hvterm_priv0;
+ hvterm_priv0.termno = CONFIG_PPC_EARLY_DEBUG_HVSI_VTERMNO;
+ hvterm_priv0.proto = HV_PROTOCOL_HVSI;
+ spin_lock_init(&hvterm_priv0.buf_lock);
+ udbg_putc = udbg_hvc_putc;
+ udbg_getc = udbg_hvc_getc;
+ udbg_getc_poll = udbg_hvc_getc_poll;
+ hvsilib_init(&hvterm_priv0.hvsi, hvc_get_chars, hvc_put_chars,
+ hvterm_priv0.termno, 1);
+ hvsilib_establish(&hvterm_priv0.hvsi);
}
-console_initcall(hvc_find_vtys);
+#endif /* CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI */
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 8a8d6373f16..c94e2f5853d 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -49,6 +49,7 @@
#include <asm/uaccess.h>
#include <asm/vio.h>
#include <asm/param.h>
+#include <asm/hvsi.h>
#define HVSI_MAJOR 229
#define HVSI_MINOR 128
@@ -109,68 +110,6 @@ enum HVSI_PROTOCOL_STATE {
};
#define HVSI_CONSOLE 0x1
-#define VS_DATA_PACKET_HEADER 0xff
-#define VS_CONTROL_PACKET_HEADER 0xfe
-#define VS_QUERY_PACKET_HEADER 0xfd
-#define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc
-
-/* control verbs */
-#define VSV_SET_MODEM_CTL 1 /* to service processor only */
-#define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */
-#define VSV_CLOSE_PROTOCOL 3
-
-/* query verbs */
-#define VSV_SEND_VERSION_NUMBER 1
-#define VSV_SEND_MODEM_CTL_STATUS 2
-
-/* yes, these masks are not consecutive. */
-#define HVSI_TSDTR 0x01
-#define HVSI_TSCD 0x20
-
-struct hvsi_header {
- uint8_t type;
- uint8_t len;
- uint16_t seqno;
-} __attribute__((packed));
-
-struct hvsi_data {
- uint8_t type;
- uint8_t len;
- uint16_t seqno;
- uint8_t data[HVSI_MAX_OUTGOING_DATA];
-} __attribute__((packed));
-
-struct hvsi_control {
- uint8_t type;
- uint8_t len;
- uint16_t seqno;
- uint16_t verb;
- /* optional depending on verb: */
- uint32_t word;
- uint32_t mask;
-} __attribute__((packed));
-
-struct hvsi_query {
- uint8_t type;
- uint8_t len;
- uint16_t seqno;
- uint16_t verb;
-} __attribute__((packed));
-
-struct hvsi_query_response {
- uint8_t type;
- uint8_t len;
- uint16_t seqno;
- uint16_t verb;
- uint16_t query_seqno;
- union {
- uint8_t version;
- uint32_t mctrl_word;
- } u;
-} __attribute__((packed));
-
-
-
static inline int is_console(struct hvsi_struct *hp)
{
return hp->flags & HVSI_CONSOLE;
@@ -356,18 +295,18 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
struct hvsi_query_response packet __ALIGNED__;
int wrote;
- packet.type = VS_QUERY_RESPONSE_PACKET_HEADER;
- packet.len = sizeof(struct hvsi_query_response);
- packet.seqno = atomic_inc_return(&hp->seqno);
+ packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
+ packet.hdr.len = sizeof(struct hvsi_query_response);
+ packet.hdr.seqno = atomic_inc_return(&hp->seqno);
packet.verb = VSV_SEND_VERSION_NUMBER;
packet.u.version = HVSI_VERSION;
packet.query_seqno = query_seqno+1;
- pr_debug("%s: sending %i bytes\n", __func__, packet.len);
- dbg_dump_hex((uint8_t*)&packet, packet.len);
+ pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
+ dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
- wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
- if (wrote != packet.len) {
+ wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
+ if (wrote != packet.hdr.len) {
printk(KERN_ERR "hvsi%i: couldn't send query response!\n",
hp->index);
return -EIO;
@@ -382,7 +321,7 @@ static void hvsi_recv_query(struct hvsi_struct *hp, uint8_t *packet)
switch (hp->state) {
case HVSI_WAIT_FOR_VER_QUERY:
- hvsi_version_respond(hp, query->seqno);
+ hvsi_version_respond(hp, query->hdr.seqno);
__set_state(hp, HVSI_OPEN);
break;
default:
@@ -640,16 +579,16 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
struct hvsi_query packet __ALIGNED__;
int wrote;
- packet.type = VS_QUERY_PACKET_HEADER;
- packet.len = sizeof(struct hvsi_query);
- packet.seqno = atomic_inc_return(&hp->seqno);
+ packet.hdr.type = VS_QUERY_PACKET_HEADER;
+ packet.hdr.len = sizeof(struct hvsi_query);
+ packet.hdr.seqno = atomic_inc_return(&hp->seqno);
packet.verb = verb;
- pr_debug("%s: sending %i bytes\n", __func__, packet.len);
- dbg_dump_hex((uint8_t*)&packet, packet.len);
+ pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
+ dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
- wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
- if (wrote != packet.len) {
+ wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
+ if (wrote != packet.hdr.len) {
printk(KERN_ERR "hvsi%i: couldn't send query (%i)!\n", hp->index,
wrote);
return -EIO;
@@ -683,20 +622,20 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
struct hvsi_control packet __ALIGNED__;
int wrote;
- packet.type = VS_CONTROL_PACKET_HEADER,
- packet.seqno = atomic_inc_return(&hp->seqno);
- packet.len = sizeof(struct hvsi_control);
+ packet.hdr.type = VS_CONTROL_PACKET_HEADER,
+ packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+ packet.hdr.len = sizeof(struct hvsi_control);
packet.verb = VSV_SET_MODEM_CTL;
packet.mask = HVSI_TSDTR;
if (mctrl & TIOCM_DTR)
packet.word = HVSI_TSDTR;
- pr_debug("%s: sending %i bytes\n", __func__, packet.len);
- dbg_dump_hex((uint8_t*)&packet, packet.len);
+ pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
+ dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
- wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
- if (wrote != packet.len) {
+ wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
+ if (wrote != packet.hdr.len) {
printk(KERN_ERR "hvsi%i: couldn't set DTR!\n", hp->index);
return -EIO;
}
@@ -766,13 +705,13 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
- packet.type = VS_DATA_PACKET_HEADER;
- packet.seqno = atomic_inc_return(&hp->seqno);
- packet.len = count + sizeof(struct hvsi_header);
+ packet.hdr.type = VS_DATA_PACKET_HEADER;
+ packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+ packet.hdr.len = count + sizeof(struct hvsi_header);
memcpy(&packet.data, buf, count);
- ret = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
- if (ret == packet.len) {
+ ret = hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
+ if (ret == packet.hdr.len) {
/* return the number of chars written, not the packet length */
return count;
}
@@ -783,15 +722,15 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
{
struct hvsi_control packet __ALIGNED__;
- packet.type = VS_CONTROL_PACKET_HEADER;
- packet.seqno = atomic_inc_return(&hp->seqno);
- packet.len = 6;
+ packet.hdr.type = VS_CONTROL_PACKET_HEADER;
+ packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+ packet.hdr.len = 6;
packet.verb = VSV_CLOSE_PROTOCOL;
- pr_debug("%s: sending %i bytes\n", __func__, packet.len);
- dbg_dump_hex((uint8_t*)&packet, packet.len);
+ pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
+ dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
- hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
+ hvc_put_chars(hp->vtermno, (char *)&packet, packet.hdr.len);
}
static int hvsi_open(struct tty_struct *tty, struct file *filp)
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
new file mode 100644
index 00000000000..bd9b09827b2
--- /dev/null
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -0,0 +1,426 @@
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/console.h>
+#include <asm/hvsi.h>
+
+#include "hvc_console.h"
+
+static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
+{
+ packet->seqno = atomic_inc_return(&pv->seqno);
+
+ /* Assumes that always succeeds, works in practice */
+ return pv->put_chars(pv->termno, (char *)packet, packet->len);
+}
+
+static void hvsi_start_handshake(struct hvsi_priv *pv)
+{
+ struct hvsi_query q;
+
+ /* Reset state */
+ pv->established = 0;
+ atomic_set(&pv->seqno, 0);
+
+ pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
+
+ /* Send version query */
+ q.hdr.type = VS_QUERY_PACKET_HEADER;
+ q.hdr.len = sizeof(struct hvsi_query);
+ q.verb = VSV_SEND_VERSION_NUMBER;
+ hvsi_send_packet(pv, &q.hdr);
+}
+
+static int hvsi_send_close(struct hvsi_priv *pv)
+{
+ struct hvsi_control ctrl;
+
+ pv->established = 0;
+
+ ctrl.hdr.type = VS_CONTROL_PACKET_HEADER;
+ ctrl.hdr.len = sizeof(struct hvsi_control);
+ ctrl.verb = VSV_CLOSE_PROTOCOL;
+ return hvsi_send_packet(pv, &ctrl.hdr);
+}
+
+static void hvsi_cd_change(struct hvsi_priv *pv, int cd)
+{
+ if (cd)
+ pv->mctrl |= TIOCM_CD;
+ else {
+ pv->mctrl &= ~TIOCM_CD;
+
+ /* We copy the existing hvsi driver semantics
+ * here which are to trigger a hangup when
+ * we get a carrier loss.
+ * Closing our connection to the server will
+ * do just that.
+ */
+ if (!pv->is_console && pv->opened) {
+ pr_devel("HVSI@%x Carrier lost, hanging up !\n",
+ pv->termno);
+ hvsi_send_close(pv);
+ }
+ }
+}
+
+static void hvsi_got_control(struct hvsi_priv *pv)
+{
+ struct hvsi_control *pkt = (struct hvsi_control *)pv->inbuf;
+
+ switch (pkt->verb) {
+ case VSV_CLOSE_PROTOCOL:
+ /* We restart the handshaking */
+ hvsi_start_handshake(pv);
+ break;
+ case VSV_MODEM_CTL_UPDATE:
+ /* Transition of carrier detect */
+ hvsi_cd_change(pv, pkt->word & HVSI_TSCD);
+ break;
+ }
+}
+
+static void hvsi_got_query(struct hvsi_priv *pv)
+{
+ struct hvsi_query *pkt = (struct hvsi_query *)pv->inbuf;
+ struct hvsi_query_response r;
+
+ /* We only handle version queries */
+ if (pkt->verb != VSV_SEND_VERSION_NUMBER)
+ return;
+
+ pr_devel("HVSI@%x: Got version query, sending response...\n",
+ pv->termno);
+
+ /* Send version response */
+ r.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
+ r.hdr.len = sizeof(struct hvsi_query_response);
+ r.verb = VSV_SEND_VERSION_NUMBER;
+ r.u.version = HVSI_VERSION;
+ r.query_seqno = pkt->hdr.seqno;
+ hvsi_send_packet(pv, &r.hdr);
+
+ /* Assume protocol is open now */
+ pv->established = 1;
+}
+
+static void hvsi_got_response(struct hvsi_priv *pv)
+{
+ struct hvsi_query_response *r =
+ (struct hvsi_query_response *)pv->inbuf;
+
+ switch(r->verb) {
+ case VSV_SEND_MODEM_CTL_STATUS:
+ hvsi_cd_change(pv, r->u.mctrl_word & HVSI_TSCD);
+ pv->mctrl_update = 1;
+ break;
+ }
+}
+
+static int hvsi_check_packet(struct hvsi_priv *pv)
+{
+ u8 len, type;
+
+ /* Check header validity. If it's invalid, we ditch
+ * the whole buffer and hope we eventually resync
+ */
+ if (pv->inbuf[0] < 0xfc) {
+ pv->inbuf_len = pv->inbuf_pktlen = 0;
+ return 0;
+ }
+ type = pv->inbuf[0];
+ len = pv->inbuf[1];
+
+ /* Packet incomplete ? */
+ if (pv->inbuf_len < len)
+ return 0;
+
+ pr_devel("HVSI@%x: Got packet type %x len %d bytes:\n",
+ pv->termno, type, len);
+
+ /* We have a packet, yay ! Handle it */
+ switch(type) {
+ case VS_DATA_PACKET_HEADER:
+ pv->inbuf_pktlen = len - 4;
+ pv->inbuf_cur = 4;
+ return 1;
+ case VS_CONTROL_PACKET_HEADER:
+ hvsi_got_control(pv);
+ break;
+ case VS_QUERY_PACKET_HEADER:
+ hvsi_got_query(pv);
+ break;
+ case VS_QUERY_RESPONSE_PACKET_HEADER:
+ hvsi_got_response(pv);
+ break;
+ }
+
+ /* Swallow packet and retry */
+ pv->inbuf_len -= len;
+ memmove(pv->inbuf, &pv->inbuf[len], pv->inbuf_len);
+ return 1;
+}
+
+static int hvsi_get_packet(struct hvsi_priv *pv)
+{
+ /* If we have room in the buffer, ask HV for more */
+ if (pv->inbuf_len < HVSI_INBUF_SIZE)
+ pv->inbuf_len += pv->get_chars(pv->termno,
+ &pv->inbuf[pv->inbuf_len],
+ HVSI_INBUF_SIZE - pv->inbuf_len);
+ /*
+ * If we have at least 4 bytes in the buffer, check for
+ * a full packet and retry
+ */
+ if (pv->inbuf_len >= 4)
+ return hvsi_check_packet(pv);
+ return 0;
+}
+
+int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count)
+{
+ unsigned int tries, read = 0;
+
+ if (WARN_ON(!pv))
+ return 0;
+
+ /* If we aren't open, don't do anything in order to avoid races
+ * with connection establishment. The hvc core will call this
+ * before we have returned from notifier_add(), and we need to
+ * avoid multiple users playing with the receive buffer
+ */
+ if (!pv->opened)
+ return 0;
+
+ /* We try twice, once with what data we have and once more
+ * after we try to fetch some more from the hypervisor
+ */
+ for (tries = 1; count && tries < 2; tries++) {
+ /* Consume existing data packet */
+ if (pv->inbuf_pktlen) {
+ unsigned int l = min(count, (int)pv->inbuf_pktlen);
+ memcpy(&buf[read], &pv->inbuf[pv->inbuf_cur], l);
+ pv->inbuf_cur += l;
+ pv->inbuf_pktlen -= l;
+ count -= l;
+ read += l;
+ }
+ if (count == 0)
+ break;
+
+ /* Data packet fully consumed, move down remaning data */
+ if (pv->inbuf_cur) {
+ pv->inbuf_len -= pv->inbuf_cur;
+ memmove(pv->inbuf, &pv->inbuf[pv->inbuf_cur],
+ pv->inbuf_len);
+ pv->inbuf_cur = 0;
+ }
+
+ /* Try to get another packet */
+ if (hvsi_get_packet(pv))
+ tries--;
+ }
+ if (!pv->established) {
+ pr_devel("HVSI@%x: returning -EPIPE\n", pv->termno);
+ return -EPIPE;
+ }
+ return read;
+}
+
+int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count)
+{
+ struct hvsi_data dp;
+ int rc, adjcount = min(count, HVSI_MAX_OUTGOING_DATA);
+
+ if (WARN_ON(!pv))
+ return 0;
+
+ dp.hdr.type = VS_DATA_PACKET_HEADER;
+ dp.hdr.len = adjcount + sizeof(struct hvsi_header);
+ memcpy(dp.data, buf, adjcount);
+ rc = hvsi_send_packet(pv, &dp.hdr);
+ if (rc <= 0)
+ return rc;
+ return adjcount;
+}
+
+static void maybe_msleep(unsigned long ms)
+{
+ /* During early boot, IRQs are disabled, use mdelay */
+ if (irqs_disabled())
+ mdelay(ms);
+ else
+ msleep(ms);
+}
+
+int hvsilib_read_mctrl(struct hvsi_priv *pv)
+{
+ struct hvsi_query q;
+ int rc, timeout;
+
+ pr_devel("HVSI@%x: Querying modem control status...\n",
+ pv->termno);
+
+ pv->mctrl_update = 0;
+ q.hdr.type = VS_QUERY_PACKET_HEADER;
+ q.hdr.len = sizeof(struct hvsi_query);
+ q.hdr.seqno = atomic_inc_return(&pv->seqno);
+ q.verb = VSV_SEND_MODEM_CTL_STATUS;
+ rc = hvsi_send_packet(pv, &q.hdr);
+ if (rc <= 0) {
+ pr_devel("HVSI@%x: Error %d...\n", pv->termno, rc);
+ return rc;
+ }
+
+ /* Try for up to 200ms */
+ for (timeout = 0; timeout < 20; timeout++) {
+ if (!pv->established)
+ return -ENXIO;
+ if (pv->mctrl_update)
+ return 0;
+ if (!hvsi_get_packet(pv))
+ maybe_msleep(10);
+ }
+ return -EIO;
+}
+
+int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr)
+{
+ struct hvsi_control ctrl;
+ unsigned short mctrl;
+
+ mctrl = pv->mctrl;
+ if (dtr)
+ mctrl |= TIOCM_DTR;
+ else
+ mctrl &= ~TIOCM_DTR;
+ if (mctrl == pv->mctrl)
+ return 0;
+ pv->mctrl = mctrl;
+
+ pr_devel("HVSI@%x: %s DTR...\n", pv->termno,
+ dtr ? "Setting" : "Clearing");
+
+ ctrl.hdr.type = VS_CONTROL_PACKET_HEADER,
+ ctrl.hdr.len = sizeof(struct hvsi_control);
+ ctrl.verb = VSV_SET_MODEM_CTL;
+ ctrl.mask = HVSI_TSDTR;
+ ctrl.word = dtr ? HVSI_TSDTR : 0;
+ return hvsi_send_packet(pv, &ctrl.hdr);
+}
+
+void hvsilib_establish(struct hvsi_priv *pv)
+{
+ int timeout;
+
+ pr_devel("HVSI@%x: Establishing...\n", pv->termno);
+
+ /* Try for up to 200ms, there can be a packet to
+ * start the process waiting for us...
+ */
+ for (timeout = 0; timeout < 20; timeout++) {
+ if (pv->established)
+ goto established;
+ if (!hvsi_get_packet(pv))
+ maybe_msleep(10);
+ }
+
+ /* Failed, send a close connection packet just
+ * in case
+ */
+ pr_devel("HVSI@%x: ... sending close\n", pv->termno);
+
+ hvsi_send_close(pv);
+
+ /* Then restart handshake */
+
+ pr_devel("HVSI@%x: ... restarting handshake\n", pv->termno);
+
+ hvsi_start_handshake(pv);
+
+ pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno);
+
+ /* Try for up to 200s */
+ for (timeout = 0; timeout < 20; timeout++) {
+ if (pv->established)
+ goto established;
+ if (!hvsi_get_packet(pv))
+ maybe_msleep(10);
+ }
+
+ if (!pv->established) {
+ pr_devel("HVSI@%x: Timeout handshaking, giving up !\n",
+ pv->termno);
+ return;
+ }
+ established:
+ /* Query modem control lines */
+
+ pr_devel("HVSI@%x: ... established, reading mctrl\n", pv->termno);
+
+ hvsilib_read_mctrl(pv);
+
+ /* Set our own DTR */
+
+ pr_devel("HVSI@%x: ... setting mctrl\n", pv->termno);
+
+ hvsilib_write_mctrl(pv, 1);
+
+ /* Set the opened flag so reads are allowed */
+ wmb();
+ pv->opened = 1;
+}
+
+int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp)
+{
+ pr_devel("HVSI@%x: open !\n", pv->termno);
+
+ /* Keep track of the tty data structure */
+ pv->tty = tty_kref_get(hp->tty);
+
+ hvsilib_establish(pv);
+
+ return 0;
+}
+
+void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp)
+{
+ unsigned long flags;
+
+ pr_devel("HVSI@%x: close !\n", pv->termno);
+
+ if (!pv->is_console) {
+ pr_devel("HVSI@%x: Not a console, tearing down\n",
+ pv->termno);
+
+ /* Clear opened, synchronize with khvcd */
+ spin_lock_irqsave(&hp->lock, flags);
+ pv->opened = 0;
+ spin_unlock_irqrestore(&hp->lock, flags);
+
+ /* Clear our own DTR */
+ if (!pv->tty || (pv->tty->termios->c_cflag & HUPCL))
+ hvsilib_write_mctrl(pv, 0);
+
+ /* Tear down the connection */
+ hvsi_send_close(pv);
+ }
+
+ if (pv->tty)
+ tty_kref_put(pv->tty);
+ pv->tty = NULL;
+}
+
+void hvsilib_init(struct hvsi_priv *pv,
+ int (*get_chars)(uint32_t termno, char *buf, int count),
+ int (*put_chars)(uint32_t termno, const char *buf,
+ int count),
+ int termno, int is_console)
+{
+ memset(pv, 0, sizeof(*pv));
+ pv->get_chars = get_chars;
+ pv->put_chars = put_chars;
+ pv->termno = termno;
+ pv->is_console = is_console;
+}
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index ba679ce0a77..d15a071b1a5 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -44,6 +44,7 @@
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/slab.h>
+#include <linux/ratelimit.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -242,8 +243,8 @@ static void moxa_wait_finish(void __iomem *ofsAddr)
while (readw(ofsAddr + FuncCode) != 0)
if (time_after(jiffies, end))
return;
- if (readw(ofsAddr + FuncCode) != 0 && printk_ratelimit())
- printk(KERN_WARNING "moxa function expired\n");
+ if (readw(ofsAddr + FuncCode) != 0)
+ printk_ratelimited(KERN_WARNING "moxa function expired\n");
}
static void moxafunc(void __iomem *ofsAddr, u16 cmd, u16 arg)
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index d188f378684..7fc8c02fea6 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -39,6 +39,7 @@
#include <linux/pci.h>
#include <linux/bitops.h>
#include <linux/slab.h>
+#include <linux/ratelimit.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -1490,8 +1491,7 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
switch (cmd) {
case MOXA_GET_MAJOR:
- if (printk_ratelimit())
- printk(KERN_WARNING "mxser: '%s' uses deprecated ioctl "
+ printk_ratelimited(KERN_WARNING "mxser: '%s' uses deprecated ioctl "
"%x (GET_MAJOR), fix your userspace\n",
current->comm, cmd);
return put_user(ttymajor, (int __user *)argp);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 09e8c7d53af..8a50e4eebf1 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -58,6 +58,10 @@
#include <linux/serial.h>
#include <linux/kfifo.h>
#include <linux/skbuff.h>
+#include <net/arp.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/gsmmux.h>
static int debug;
@@ -77,8 +81,24 @@ module_param(debug, int, 0600);
* Semi-arbitrary buffer size limits. 0710 is normally run with 32-64 byte
* limits so this is plenty
*/
-#define MAX_MRU 512
-#define MAX_MTU 512
+#define MAX_MRU 1500
+#define MAX_MTU 1500
+#define GSM_NET_TX_TIMEOUT (HZ*10)
+
+/**
+ * struct gsm_mux_net - network interface
+ * @struct gsm_dlci* dlci
+ * @struct net_device_stats stats;
+ *
+ * Created when net interface is initialized.
+ **/
+struct gsm_mux_net {
+ struct kref ref;
+ struct gsm_dlci *dlci;
+ struct net_device_stats stats;
+};
+
+#define STATS(net) (((struct gsm_mux_net *)netdev_priv(net))->stats)
/*
* Each block of data we have queued to go out is in the form of
@@ -113,6 +133,8 @@ struct gsm_dlci {
#define DLCI_OPENING 1 /* Sending SABM not seen UA */
#define DLCI_OPEN 2 /* SABM/UA complete */
#define DLCI_CLOSING 3 /* Sending DISC not seen UA/DM */
+ struct kref ref; /* freed from port or mux close */
+ struct mutex mutex;
/* Link layer */
spinlock_t lock; /* Protects the internal state */
@@ -123,6 +145,7 @@ struct gsm_dlci {
struct kfifo *fifo; /* Queue fifo for the DLCI */
struct kfifo _fifo; /* For new fifo API porting only */
int adaption; /* Adaption layer in use */
+ int prev_adaption;
u32 modem_rx; /* Our incoming virtual modem lines */
u32 modem_tx; /* Our outgoing modem lines */
int dead; /* Refuse re-open */
@@ -134,6 +157,8 @@ struct gsm_dlci {
struct sk_buff_head skb_list; /* Queued frames */
/* Data handling callback */
void (*data)(struct gsm_dlci *dlci, u8 *data, int len);
+ void (*prev_data)(struct gsm_dlci *dlci, u8 *data, int len);
+ struct net_device *net; /* network interface, if created */
};
/* DLCI 0, 62/63 are special or reseved see gsmtty_open */
@@ -169,6 +194,8 @@ struct gsm_control {
struct gsm_mux {
struct tty_struct *tty; /* The tty our ldisc is bound to */
spinlock_t lock;
+ unsigned int num;
+ struct kref ref;
/* Events on the GSM channel */
wait_queue_head_t event;
@@ -250,6 +277,8 @@ struct gsm_mux {
static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */
static spinlock_t gsm_mux_lock;
+static struct tty_driver *gsm_tty_driver;
+
/*
* This section of the driver logic implements the GSM encodings
* both the basic and the 'advanced'. Reliable transport is not
@@ -875,10 +904,13 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
*dp++ = last << 7 | first << 6 | 1; /* EA */
len--;
}
- memcpy(dp, skb_pull(dlci->skb, len), len);
+ memcpy(dp, dlci->skb->data, len);
+ skb_pull(dlci->skb, len);
__gsm_data_queue(dlci, msg);
- if (last)
+ if (last) {
+ kfree_skb(dlci->skb);
dlci->skb = NULL;
+ }
return size;
}
@@ -911,7 +943,7 @@ static void gsm_dlci_data_sweep(struct gsm_mux *gsm)
i++;
continue;
}
- if (dlci->adaption < 3)
+ if (dlci->adaption < 3 && !dlci->net)
len = gsm_dlci_data_output(gsm, dlci);
else
len = gsm_dlci_data_output_framed(gsm, dlci);
@@ -938,9 +970,12 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
/* If we have nothing running then we need to fire up */
- if (dlci->gsm->tx_bytes == 0)
- gsm_dlci_data_output(dlci->gsm, dlci);
- else if (dlci->gsm->tx_bytes < TX_THRESH_LO)
+ if (dlci->gsm->tx_bytes == 0) {
+ if (dlci->net)
+ gsm_dlci_data_output_framed(dlci->gsm, dlci);
+ else
+ gsm_dlci_data_output(dlci->gsm, dlci);
+ } else if (dlci->gsm->tx_bytes < TX_THRESH_LO)
gsm_dlci_data_sweep(dlci->gsm);
spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
}
@@ -984,10 +1019,22 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data,
*/
static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
- u32 modem)
+ u32 modem, int clen)
{
int mlines = 0;
- u8 brk = modem >> 6;
+ u8 brk = 0;
+
+ /* The modem status command can either contain one octet (v.24 signals)
+ or two octets (v.24 signals + break signals). The length field will
+ either be 2 or 3 respectively. This is specified in section
+ 5.4.6.3.7 of the 27.010 mux spec. */
+
+ if (clen == 2)
+ modem = modem & 0x7f;
+ else {
+ brk = modem & 0x7f;
+ modem = (modem >> 7) & 0x7f;
+ };
/* Flow control/ready to communicate */
if (modem & MDM_FC) {
@@ -1061,7 +1108,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
return;
}
tty = tty_port_tty_get(&dlci->port);
- gsm_process_modem(tty, dlci, modem);
+ gsm_process_modem(tty, dlci, modem, clen);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
@@ -1482,12 +1529,13 @@ static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
* open we shovel the bits down it, if not we drop them.
*/
-static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
+static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int clen)
{
/* krefs .. */
struct tty_port *port = &dlci->port;
struct tty_struct *tty = tty_port_tty_get(port);
unsigned int modem = 0;
+ int len = clen;
if (debug & 16)
pr_debug("%d bytes for tty %p\n", len, tty);
@@ -1507,7 +1555,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, u8 *data, int len)
if (len == 0)
return;
}
- gsm_process_modem(tty, dlci, modem);
+ gsm_process_modem(tty, dlci, modem, clen);
/* Line state will go via DLCI 0 controls only */
case 1:
default:
@@ -1574,6 +1622,8 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
if (dlci == NULL)
return NULL;
spin_lock_init(&dlci->lock);
+ kref_init(&dlci->ref);
+ mutex_init(&dlci->mutex);
dlci->fifo = &dlci->_fifo;
if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
kfree(dlci);
@@ -1599,26 +1649,52 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
}
/**
- * gsm_dlci_free - release DLCI
+ * gsm_dlci_free - free DLCI
+ * @dlci: DLCI to free
+ *
+ * Free up a DLCI.
+ *
+ * Can sleep.
+ */
+static void gsm_dlci_free(struct kref *ref)
+{
+ struct gsm_dlci *dlci = container_of(ref, struct gsm_dlci, ref);
+
+ del_timer_sync(&dlci->t1);
+ dlci->gsm->dlci[dlci->addr] = NULL;
+ kfifo_free(dlci->fifo);
+ while ((dlci->skb = skb_dequeue(&dlci->skb_list)))
+ kfree_skb(dlci->skb);
+ kfree(dlci);
+}
+
+static inline void dlci_get(struct gsm_dlci *dlci)
+{
+ kref_get(&dlci->ref);
+}
+
+static inline void dlci_put(struct gsm_dlci *dlci)
+{
+ kref_put(&dlci->ref, gsm_dlci_free);
+}
+
+/**
+ * gsm_dlci_release - release DLCI
* @dlci: DLCI to destroy
*
- * Free up a DLCI. Currently to keep the lifetime rules sane we only
- * clean up DLCI objects when the MUX closes rather than as the port
- * is closed down on both the tty and mux levels.
+ * Release a DLCI. Actual free is deferred until either
+ * mux is closed or tty is closed - whichever is last.
*
* Can sleep.
*/
-static void gsm_dlci_free(struct gsm_dlci *dlci)
+static void gsm_dlci_release(struct gsm_dlci *dlci)
{
struct tty_struct *tty = tty_port_tty_get(&dlci->port);
if (tty) {
tty_vhangup(tty);
tty_kref_put(tty);
}
- del_timer_sync(&dlci->t1);
- dlci->gsm->dlci[dlci->addr] = NULL;
- kfifo_free(dlci->fifo);
- kfree(dlci);
+ dlci_put(dlci);
}
/*
@@ -1809,10 +1885,6 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
break;
case GSM_FCS: /* FCS follows the packet */
gsm->received_fcs = c;
- if (c == GSM0_SOF) {
- gsm->state = GSM_SEARCH;
- break;
- }
gsm_queue(gsm);
gsm->state = GSM_SSOF;
break;
@@ -1956,7 +2028,7 @@ void gsm_cleanup_mux(struct gsm_mux *gsm)
/* Free up any link layer users */
for (i = 0; i < NUM_DLCI; i++)
if (gsm->dlci[i])
- gsm_dlci_free(gsm->dlci[i]);
+ gsm_dlci_release(gsm->dlci[i]);
/* Now wipe the queues */
for (txq = gsm->tx_head; txq != NULL; txq = gsm->tx_head) {
gsm->tx_head = txq->next;
@@ -1996,6 +2068,7 @@ int gsm_activate_mux(struct gsm_mux *gsm)
spin_lock(&gsm_mux_lock);
for (i = 0; i < MAX_MUX; i++) {
if (gsm_mux[i] == NULL) {
+ gsm->num = i;
gsm_mux[i] = gsm;
break;
}
@@ -2016,8 +2089,7 @@ EXPORT_SYMBOL_GPL(gsm_activate_mux);
* gsm_free_mux - free up a mux
* @mux: mux to free
*
- * Dispose of allocated resources for a dead mux. No refcounting
- * at present so the mux must be truly dead.
+ * Dispose of allocated resources for a dead mux
*/
void gsm_free_mux(struct gsm_mux *gsm)
{
@@ -2028,6 +2100,28 @@ void gsm_free_mux(struct gsm_mux *gsm)
EXPORT_SYMBOL_GPL(gsm_free_mux);
/**
+ * gsm_free_muxr - free up a mux
+ * @mux: mux to free
+ *
+ * Dispose of allocated resources for a dead mux
+ */
+static void gsm_free_muxr(struct kref *ref)
+{
+ struct gsm_mux *gsm = container_of(ref, struct gsm_mux, ref);
+ gsm_free_mux(gsm);
+}
+
+static inline void mux_get(struct gsm_mux *gsm)
+{
+ kref_get(&gsm->ref);
+}
+
+static inline void mux_put(struct gsm_mux *gsm)
+{
+ kref_put(&gsm->ref, gsm_free_muxr);
+}
+
+/**
* gsm_alloc_mux - allocate a mux
*
* Creates a new mux ready for activation.
@@ -2050,12 +2144,12 @@ struct gsm_mux *gsm_alloc_mux(void)
return NULL;
}
spin_lock_init(&gsm->lock);
+ kref_init(&gsm->ref);
gsm->t1 = T1;
gsm->t2 = T2;
gsm->n2 = N2;
gsm->ftype = UIH;
- gsm->initiator = 0;
gsm->adaption = 1;
gsm->encoding = 1;
gsm->mru = 64; /* Default to encoding 1 so these should be 64 */
@@ -2101,13 +2195,20 @@ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
- int ret;
+ int ret, i;
+ int base = gsm->num << 6; /* Base for this MUX */
gsm->tty = tty_kref_get(tty);
gsm->output = gsmld_output;
ret = gsm_activate_mux(gsm);
if (ret != 0)
tty_kref_put(gsm->tty);
+ else {
+ /* Don't register device 0 - this is the control channel and not
+ a usable tty interface */
+ for (i = 1; i < NUM_DLCI; i++)
+ tty_register_device(gsm_tty_driver, base + i, NULL);
+ }
return ret;
}
@@ -2122,7 +2223,12 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
{
+ int i;
+ int base = gsm->num << 6; /* Base for this MUX */
+
WARN_ON(tty != gsm->tty);
+ for (i = 1; i < NUM_DLCI; i++)
+ tty_unregister_device(gsm_tty_driver, base + i);
gsm_cleanup_mux(gsm);
tty_kref_put(gsm->tty);
gsm->tty = NULL;
@@ -2210,7 +2316,7 @@ static void gsmld_close(struct tty_struct *tty)
gsmld_flush_buffer(tty);
/* Do other clean up here */
- gsm_free_mux(gsm);
+ mux_put(gsm);
}
/**
@@ -2462,6 +2568,220 @@ static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
}
}
+/*
+ * Network interface
+ *
+ */
+
+static int gsm_mux_net_open(struct net_device *net)
+{
+ pr_debug("%s called\n", __func__);
+ netif_start_queue(net);
+ return 0;
+}
+
+static int gsm_mux_net_close(struct net_device *net)
+{
+ netif_stop_queue(net);
+ return 0;
+}
+
+static struct net_device_stats *gsm_mux_net_get_stats(struct net_device *net)
+{
+ return &((struct gsm_mux_net *)netdev_priv(net))->stats;
+}
+static void dlci_net_free(struct gsm_dlci *dlci)
+{
+ if (!dlci->net) {
+ WARN_ON(1);
+ return;
+ }
+ dlci->adaption = dlci->prev_adaption;
+ dlci->data = dlci->prev_data;
+ free_netdev(dlci->net);
+ dlci->net = NULL;
+}
+static void net_free(struct kref *ref)
+{
+ struct gsm_mux_net *mux_net;
+ struct gsm_dlci *dlci;
+
+ mux_net = container_of(ref, struct gsm_mux_net, ref);
+ dlci = mux_net->dlci;
+
+ if (dlci->net) {
+ unregister_netdev(dlci->net);
+ dlci_net_free(dlci);
+ }
+}
+
+static inline void muxnet_get(struct gsm_mux_net *mux_net)
+{
+ kref_get(&mux_net->ref);
+}
+
+static inline void muxnet_put(struct gsm_mux_net *mux_net)
+{
+ kref_put(&mux_net->ref, net_free);
+}
+
+static int gsm_mux_net_start_xmit(struct sk_buff *skb,
+ struct net_device *net)
+{
+ struct gsm_mux_net *mux_net = (struct gsm_mux_net *)netdev_priv(net);
+ struct gsm_dlci *dlci = mux_net->dlci;
+ muxnet_get(mux_net);
+
+ skb_queue_head(&dlci->skb_list, skb);
+ STATS(net).tx_packets++;
+ STATS(net).tx_bytes += skb->len;
+ gsm_dlci_data_kick(dlci);
+ /* And tell the kernel when the last transmit started. */
+ net->trans_start = jiffies;
+ muxnet_put(mux_net);
+ return NETDEV_TX_OK;
+}
+
+/* called when a packet did not ack after watchdogtimeout */
+static void gsm_mux_net_tx_timeout(struct net_device *net)
+{
+ /* Tell syslog we are hosed. */
+ dev_dbg(&net->dev, "Tx timed out.\n");
+
+ /* Update statistics */
+ STATS(net).tx_errors++;
+}
+
+static void gsm_mux_rx_netchar(struct gsm_dlci *dlci,
+ unsigned char *in_buf, int size)
+{
+ struct net_device *net = dlci->net;
+ struct sk_buff *skb;
+ struct gsm_mux_net *mux_net = (struct gsm_mux_net *)netdev_priv(net);
+ muxnet_get(mux_net);
+
+ /* Allocate an sk_buff */
+ skb = dev_alloc_skb(size + NET_IP_ALIGN);
+ if (!skb) {
+ /* We got no receive buffer. */
+ STATS(net).rx_dropped++;
+ muxnet_put(mux_net);
+ return;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ memcpy(skb_put(skb, size), in_buf, size);
+
+ skb->dev = net;
+ skb->protocol = __constant_htons(ETH_P_IP);
+
+ /* Ship it off to the kernel */
+ netif_rx(skb);
+
+ /* update out statistics */
+ STATS(net).rx_packets++;
+ STATS(net).rx_bytes += size;
+ muxnet_put(mux_net);
+ return;
+}
+
+int gsm_change_mtu(struct net_device *net, int new_mtu)
+{
+ struct gsm_mux_net *mux_net = (struct gsm_mux_net *)netdev_priv(net);
+ if ((new_mtu < 8) || (new_mtu > mux_net->dlci->gsm->mtu))
+ return -EINVAL;
+ net->mtu = new_mtu;
+ return 0;
+}
+
+static void gsm_mux_net_init(struct net_device *net)
+{
+ static const struct net_device_ops gsm_netdev_ops = {
+ .ndo_open = gsm_mux_net_open,
+ .ndo_stop = gsm_mux_net_close,
+ .ndo_start_xmit = gsm_mux_net_start_xmit,
+ .ndo_tx_timeout = gsm_mux_net_tx_timeout,
+ .ndo_get_stats = gsm_mux_net_get_stats,
+ .ndo_change_mtu = gsm_change_mtu,
+ };
+
+ net->netdev_ops = &gsm_netdev_ops;
+
+ /* fill in the other fields */
+ net->watchdog_timeo = GSM_NET_TX_TIMEOUT;
+ net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ net->type = ARPHRD_NONE;
+ net->tx_queue_len = 10;
+}
+
+
+/* caller holds the dlci mutex */
+static void gsm_destroy_network(struct gsm_dlci *dlci)
+{
+ struct gsm_mux_net *mux_net;
+
+ pr_debug("destroy network interface");
+ if (!dlci->net)
+ return;
+ mux_net = (struct gsm_mux_net *)netdev_priv(dlci->net);
+ muxnet_put(mux_net);
+}
+
+
+/* caller holds the dlci mutex */
+static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
+{
+ char *netname;
+ int retval = 0;
+ struct net_device *net;
+ struct gsm_mux_net *mux_net;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* Already in a non tty mode */
+ if (dlci->adaption > 2)
+ return -EBUSY;
+
+ if (nc->protocol != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+ if (nc->adaption != 3 && nc->adaption != 4)
+ return -EPROTONOSUPPORT;
+
+ pr_debug("create network interface");
+
+ netname = "gsm%d";
+ if (nc->if_name[0] != '\0')
+ netname = nc->if_name;
+ net = alloc_netdev(sizeof(struct gsm_mux_net),
+ netname,
+ gsm_mux_net_init);
+ if (!net) {
+ pr_err("alloc_netdev failed");
+ return -ENOMEM;
+ }
+ net->mtu = dlci->gsm->mtu;
+ mux_net = (struct gsm_mux_net *)netdev_priv(net);
+ mux_net->dlci = dlci;
+ kref_init(&mux_net->ref);
+ strncpy(nc->if_name, net->name, IFNAMSIZ); /* return net name */
+
+ /* reconfigure dlci for network */
+ dlci->prev_adaption = dlci->adaption;
+ dlci->prev_data = dlci->data;
+ dlci->adaption = nc->adaption;
+ dlci->data = gsm_mux_rx_netchar;
+ dlci->net = net;
+
+ pr_debug("register netdev");
+ retval = register_netdev(net);
+ if (retval) {
+ pr_err("network register fail %d\n", retval);
+ dlci_net_free(dlci);
+ return retval;
+ }
+ return net->ifindex; /* return network index */
+}
/* Line discipline for real tty */
struct tty_ldisc_ops tty_ldisc_packet = {
@@ -2565,6 +2885,9 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
port = &dlci->port;
port->count++;
tty->driver_data = dlci;
+ dlci_get(dlci);
+ dlci_get(dlci->gsm->dlci[0]);
+ mux_get(dlci->gsm);
tty_port_tty_set(port, tty);
dlci->modem_rx = 0;
@@ -2580,13 +2903,23 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
static void gsmtty_close(struct tty_struct *tty, struct file *filp)
{
struct gsm_dlci *dlci = tty->driver_data;
+ struct gsm_mux *gsm;
+
if (dlci == NULL)
return;
+ mutex_lock(&dlci->mutex);
+ gsm_destroy_network(dlci);
+ mutex_unlock(&dlci->mutex);
+ gsm = dlci->gsm;
if (tty_port_close_start(&dlci->port, tty, filp) == 0)
- return;
+ goto out;
gsm_dlci_begin_close(dlci);
tty_port_close_end(&dlci->port, tty);
tty_port_tty_set(&dlci->port, NULL);
+out:
+ dlci_put(dlci);
+ dlci_put(gsm->dlci[0]);
+ mux_put(gsm);
}
static void gsmtty_hangup(struct tty_struct *tty)
@@ -2663,7 +2996,32 @@ static int gsmtty_tiocmset(struct tty_struct *tty,
static int gsmtty_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
- return -ENOIOCTLCMD;
+ struct gsm_dlci *dlci = tty->driver_data;
+ struct gsm_netconfig nc;
+ int index;
+
+ switch (cmd) {
+ case GSMIOC_ENABLE_NET:
+ if (copy_from_user(&nc, (void __user *)arg, sizeof(nc)))
+ return -EFAULT;
+ nc.if_name[IFNAMSIZ-1] = '\0';
+ /* return net interface index or error code */
+ mutex_lock(&dlci->mutex);
+ index = gsm_create_network(dlci, &nc);
+ mutex_unlock(&dlci->mutex);
+ if (copy_to_user((void __user *)arg, &nc, sizeof(nc)))
+ return -EFAULT;
+ return index;
+ case GSMIOC_DISABLE_NET:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mutex_lock(&dlci->mutex);
+ gsm_destroy_network(dlci);
+ mutex_unlock(&dlci->mutex);
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
}
static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old)
@@ -2712,7 +3070,6 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
return gsmtty_modem_update(dlci, encode);
}
-static struct tty_driver *gsm_tty_driver;
/* Virtual ttys for the demux */
static const struct tty_operations gsmtty_ops = {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 0ad32888091..39d6ab6551e 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -185,7 +185,6 @@ static void reset_buffer_flags(struct tty_struct *tty)
tty->canon_head = tty->canon_data = tty->erasing = 0;
memset(&tty->read_flags, 0, sizeof tty->read_flags);
n_tty_set_room(tty);
- check_unthrottle(tty);
}
/**
@@ -1587,6 +1586,7 @@ static int n_tty_open(struct tty_struct *tty)
return -ENOMEM;
}
reset_buffer_flags(tty);
+ tty_unthrottle(tty);
tty->column = 0;
n_tty_set_termios(tty, NULL);
tty->minimum_to_wake = 1;
@@ -1815,6 +1815,7 @@ do_it_again:
/* FIXME: does n_tty_set_room need locking ? */
n_tty_set_room(tty);
timeout = schedule_timeout(timeout);
+ BUG_ON(!tty->read_buf);
continue;
}
__set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 13043e8d37f..6a1241c7f84 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -83,7 +83,7 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/unaligned.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250.c
index b40f7b90c81..f2dfec82faf 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -81,7 +81,7 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
#define DEBUG_INTR(fmt...) do { } while (0)
#endif
-#define PASS_LIMIT 256
+#define PASS_LIMIT 512
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
@@ -1107,7 +1107,7 @@ static void autoconfig_16550a(struct uart_8250_port *up)
*/
DEBUG_AUTOCONF("Xscale ");
up->port.type = PORT_XSCALE;
- up->capabilities |= UART_CAP_UUE;
+ up->capabilities |= UART_CAP_UUE | UART_CAP_RTOIE;
return;
}
} else {
@@ -3318,6 +3318,7 @@ void serial8250_unregister_port(int line)
uart->port.flags &= ~UPF_BOOT_AUTOCONF;
uart->port.type = PORT_UNKNOWN;
uart->port.dev = &serial8250_isa_devs->dev;
+ uart->capabilities = uart_config[uart->port.type].flags;
uart_add_one_port(&serial8250_reg, &uart->port);
} else {
uart->port.dev = NULL;
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
index 4b4968a294b..6b887d90a20 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250_pci.c
@@ -39,6 +39,7 @@ struct pci_serial_quirk {
u32 device;
u32 subvendor;
u32 subdevice;
+ int (*probe)(struct pci_dev *dev);
int (*init)(struct pci_dev *dev);
int (*setup)(struct serial_private *,
const struct pciserial_board *,
@@ -56,6 +57,9 @@ struct serial_private {
int line[0];
};
+static int pci_default_setup(struct serial_private*,
+ const struct pciserial_board*, struct uart_port*, int);
+
static void moan_device(const char *str, struct pci_dev *dev)
{
printk(KERN_WARNING
@@ -571,6 +575,28 @@ static const struct timedia_struct {
{ 8, timedia_eight_port }
};
+/*
+ * There are nearly 70 different Timedia/SUNIX PCI serial devices. Instead of
+ * listing them individually, this driver merely grabs them all with
+ * PCI_ANY_ID. Some of these devices, however, also feature a parallel port,
+ * and should be left free to be claimed by parport_serial instead.
+ */
+static int pci_timedia_probe(struct pci_dev *dev)
+{
+ /*
+ * Check the third digit of the subdevice ID
+ * (0,2,3,5,6: serial only -- 7,8,9: serial + parallel)
+ */
+ if ((dev->subsystem_device & 0x00f0) >= 0x70) {
+ dev_info(&dev->dev,
+ "ignoring Timedia subdevice %04x for parport_serial\n",
+ dev->subsystem_device);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int pci_timedia_init(struct pci_dev *dev)
{
const unsigned short *ids;
@@ -743,7 +769,7 @@ pci_ni8430_setup(struct serial_private *priv,
len = pci_resource_len(priv->dev, bar);
p = ioremap_nocache(base, len);
- /* enable the transciever */
+ /* enable the transceiver */
writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE,
p + offset + NI8430_PORTCON);
@@ -752,6 +778,62 @@ pci_ni8430_setup(struct serial_private *priv,
return setup_port(priv, port, bar, offset, board->reg_shift);
}
+static int pci_netmos_9900_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ unsigned int bar;
+
+ if ((priv->dev->subsystem_device & 0xff00) == 0x3000) {
+ /* netmos apparently orders BARs by datasheet layout, so serial
+ * ports get BARs 0 and 3 (or 1 and 4 for memmapped)
+ */
+ bar = 3 * idx;
+
+ return setup_port(priv, port, bar, 0, board->reg_shift);
+ } else {
+ return pci_default_setup(priv, board, port, idx);
+ }
+}
+
+/* the 99xx series comes with a range of device IDs and a variety
+ * of capabilities:
+ *
+ * 9900 has varying capabilities and can cascade to sub-controllers
+ * (cascading should be purely internal)
+ * 9904 is hardwired with 4 serial ports
+ * 9912 and 9922 are hardwired with 2 serial ports
+ */
+static int pci_netmos_9900_numports(struct pci_dev *dev)
+{
+ unsigned int c = dev->class;
+ unsigned int pi;
+ unsigned short sub_serports;
+
+ pi = (c & 0xff);
+
+ if (pi == 2) {
+ return 1;
+ } else if ((pi == 0) &&
+ (dev->device == PCI_DEVICE_ID_NETMOS_9900)) {
+ /* two possibilities: 0x30ps encodes number of parallel and
+ * serial ports, or 0x1000 indicates *something*. This is not
+ * immediately obvious, since the 2s1p+4s configuration seems
+ * to offer all functionality on functions 0..2, while still
+ * advertising the same function 3 as the 4s+2s1p config.
+ */
+ sub_serports = dev->subsystem_device & 0xf;
+ if (sub_serports > 0) {
+ return sub_serports;
+ } else {
+ printk(KERN_NOTICE "NetMos/Mostech serial driver ignoring port on ambiguous config.\n");
+ return 0;
+ }
+ }
+
+ moan_device("unknown NetMos/Mostech program interface", dev);
+ return 0;
+}
static int pci_netmos_init(struct pci_dev *dev)
{
@@ -761,12 +843,28 @@ static int pci_netmos_init(struct pci_dev *dev)
if ((dev->device == PCI_DEVICE_ID_NETMOS_9901) ||
(dev->device == PCI_DEVICE_ID_NETMOS_9865))
return 0;
+
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return 0;
+ switch (dev->device) { /* FALLTHROUGH on all */
+ case PCI_DEVICE_ID_NETMOS_9904:
+ case PCI_DEVICE_ID_NETMOS_9912:
+ case PCI_DEVICE_ID_NETMOS_9922:
+ case PCI_DEVICE_ID_NETMOS_9900:
+ num_serial = pci_netmos_9900_numports(dev);
+ break;
+
+ default:
+ if (num_serial == 0 ) {
+ moan_device("unknown NetMos/Mostech device", dev);
+ }
+ }
+
if (num_serial == 0)
return -ENODEV;
+
return num_serial;
}
@@ -973,7 +1071,7 @@ ce4100_serial_setup(struct serial_private *priv,
static int
pci_omegapci_setup(struct serial_private *priv,
- struct pciserial_board *board,
+ const struct pciserial_board *board,
struct uart_port *port, int idx)
{
return setup_port(priv, port, 2, idx * 8, 0);
@@ -994,6 +1092,15 @@ static int skip_tx_en_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
+static int pci_eg20t_init(struct pci_dev *dev)
+{
+#if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE)
+ return -ENODEV;
+#else
+ return 0;
+#endif
+}
+
/* This should be in linux/pci_ids.h */
#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
@@ -1387,6 +1494,7 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.device = PCI_DEVICE_ID_TIMEDIA_1889,
.subvendor = PCI_VENDOR_ID_TIMEDIA,
.subdevice = PCI_ANY_ID,
+ .probe = pci_timedia_probe,
.init = pci_timedia_init,
.setup = pci_timedia_setup,
},
@@ -1417,7 +1525,7 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.init = pci_netmos_init,
- .setup = pci_default_setup,
+ .setup = pci_netmos_9900_setup,
},
/*
* For Oxford Semiconductor Tornado based devices
@@ -1446,6 +1554,56 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.init = pci_oxsemi_tornado_init,
.setup = pci_default_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8811,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8812,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8813,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x8814,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8027,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8028,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x8029,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x800C,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x800D,
+ .init = pci_eg20t_init,
+ },
+ {
+ .vendor = 0x10DB,
+ .device = 0x800D,
+ .init = pci_eg20t_init,
+ },
/*
* Cronyx Omega PCI (PLX-chip based)
*/
@@ -1644,6 +1802,7 @@ enum pci_board_num_t {
pbn_ADDIDATA_PCIe_8_3906250,
pbn_ce4100_1_115200,
pbn_omegapci,
+ pbn_NETMOS9900_2s_115200,
};
/*
@@ -2345,6 +2504,11 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.base_baud = 115200,
.uart_offset = 0x200,
},
+ [pbn_NETMOS9900_2s_115200] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 115200,
+ },
};
static const struct pci_device_id softmodem_blacklist[] = {
@@ -2581,11 +2745,19 @@ EXPORT_SYMBOL_GPL(pciserial_resume_ports);
static int __devinit
pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
{
+ struct pci_serial_quirk *quirk;
struct serial_private *priv;
const struct pciserial_board *board;
struct pciserial_board tmp;
int rc;
+ quirk = find_quirk(dev);
+ if (quirk->probe) {
+ rc = quirk->probe(dev);
+ if (rc)
+ return rc;
+ }
+
if (ent->driver_data >= ARRAY_SIZE(pci_boards)) {
printk(KERN_ERR "pci_init_one: invalid driver_data: %ld\n",
ent->driver_data);
@@ -2595,6 +2767,7 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
board = &pci_boards[ent->driver_data];
rc = pci_enable_device(dev);
+ pci_save_state(dev);
if (rc)
return rc;
@@ -3826,6 +3999,27 @@ static struct pci_device_id serial_pci_tbl[] = {
0xA000, 0x1000,
0, 0, pbn_b0_1_115200 },
+ /* the 9901 is a rebranded 9912 */
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9912,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9904,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900,
+ 0xA000, 0x3002,
+ 0, 0, pbn_NETMOS9900_2s_115200 },
+
/*
* Best Connectivity PCI Multi I/O cards
*/
@@ -3868,6 +4062,51 @@ static struct pci_device_id serial_pci_tbl[] = {
{ 0, }
};
+static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
+ pci_channel_state_t state)
+{
+ struct serial_private *priv = pci_get_drvdata(dev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (priv)
+ pciserial_suspend_ports(priv);
+
+ pci_disable_device(dev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
+{
+ int rc;
+
+ rc = pci_enable_device(dev);
+
+ if (rc)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_restore_state(dev);
+ pci_save_state(dev);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void serial8250_io_resume(struct pci_dev *dev)
+{
+ struct serial_private *priv = pci_get_drvdata(dev);
+
+ if (priv)
+ pciserial_resume_ports(priv);
+}
+
+static struct pci_error_handlers serial8250_err_handler = {
+ .error_detected = serial8250_io_error_detected,
+ .slot_reset = serial8250_io_slot_reset,
+ .resume = serial8250_io_resume,
+};
+
static struct pci_driver serial_pci_driver = {
.name = "serial",
.probe = pciserial_init_one,
@@ -3877,6 +4116,7 @@ static struct pci_driver serial_pci_driver = {
.resume = pciserial_resume_one,
#endif
.id_table = serial_pci_tbl,
+ .err_handler = &serial8250_err_handler,
};
static int __init serial8250_pci_init(void)
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 636144cea93..4dcb37bbdf9 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -457,7 +457,6 @@ config SERIAL_SAMSUNG_UARTS_4
config SERIAL_SAMSUNG_UARTS
int
depends on ARM && PLAT_SAMSUNG
- default 2 if ARCH_S3C2400
default 6 if ARCH_S5P6450
default 4 if SERIAL_SAMSUNG_UARTS_4
default 3
@@ -489,13 +488,6 @@ config SERIAL_SAMSUNG_CONSOLE
your boot loader about how to pass options to the kernel at
boot time.)
-config SERIAL_S3C2400
- tristate "Samsung S3C2410 Serial port support"
- depends on ARM && SERIAL_SAMSUNG && CPU_S3C2400
- default y if CPU_S3C2400
- help
- Serial port support for the Samsung S3C2400 SoC
-
config SERIAL_S3C2410
tristate "Samsung S3C2410 Serial port support"
depends on SERIAL_SAMSUNG && CPU_S3C2410
@@ -519,13 +511,6 @@ config SERIAL_S3C2440
help
Serial port support for the Samsung S3C2440, S3C2416 and S3C2442 SoC
-config SERIAL_S3C24A0
- tristate "Samsung S3C24A0 Serial port support"
- depends on SERIAL_SAMSUNG && CPU_S3C24A0
- default y if CPU_S3C24A0
- help
- Serial port support for the Samsung S3C24A0 SoC
-
config SERIAL_S3C6400
tristate "Samsung S3C6400/S3C6410/S5P6440/S5P6450/S5PC100 Serial port support"
depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440 || CPU_S5P6450 || CPU_S5PC100)
@@ -974,7 +959,7 @@ config SERIAL_IP22_ZILOG_CONSOLE
config SERIAL_SH_SCI
tristate "SuperH SCI(F) serial port support"
- depends on HAVE_CLK && (SUPERH || H8300 || ARCH_SHMOBILE)
+ depends on HAVE_CLK && (SUPERH || ARCH_SHMOBILE)
select SERIAL_CORE
config SERIAL_SH_SCI_NR_UARTS
@@ -1419,7 +1404,7 @@ config SERIAL_SC26XX
config SERIAL_SC26XX_CONSOLE
bool "Console on SC2681/SC2692 serial port"
- depends on SERIAL_SC26XX
+ depends on SERIAL_SC26XX=y
select SERIAL_CORE_CONSOLE
help
Support for Console on SC2681/SC2692 serial ports.
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index cb2628fee4c..83b4da6a106 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -38,11 +38,9 @@ obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o
obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
-obj-$(CONFIG_SERIAL_S3C2400) += s3c2400.o
obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o
obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
-obj-$(CONFIG_SERIAL_S3C24A0) += s3c24a0.o
obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8dc0541feec..f5f6831b0a6 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -50,6 +50,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/sizes.h>
@@ -65,6 +66,30 @@
#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
#define UART_DUMMY_DR_RX (1 << 16)
+
+#define UART_WA_SAVE_NR 14
+
+static void pl011_lockup_wa(unsigned long data);
+static const u32 uart_wa_reg[UART_WA_SAVE_NR] = {
+ ST_UART011_DMAWM,
+ ST_UART011_TIMEOUT,
+ ST_UART011_LCRH_RX,
+ UART011_IBRD,
+ UART011_FBRD,
+ ST_UART011_LCRH_TX,
+ UART011_IFLS,
+ ST_UART011_XFCR,
+ ST_UART011_XON1,
+ ST_UART011_XON2,
+ ST_UART011_XOFF1,
+ ST_UART011_XOFF2,
+ UART011_CR,
+ UART011_IMSC
+};
+
+static u32 uart_wa_regdata[UART_WA_SAVE_NR];
+static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0);
+
/* There is by now at least one vendor with differing details, so handle it */
struct vendor_data {
unsigned int ifls;
@@ -72,6 +97,7 @@ struct vendor_data {
unsigned int lcrh_tx;
unsigned int lcrh_rx;
bool oversampling;
+ bool interrupt_may_hang; /* vendor-specific */
bool dma_threshold;
};
@@ -90,9 +116,12 @@ static struct vendor_data vendor_st = {
.lcrh_tx = ST_UART011_LCRH_TX,
.lcrh_rx = ST_UART011_LCRH_RX,
.oversampling = true,
+ .interrupt_may_hang = true,
.dma_threshold = true,
};
+static struct uart_amba_port *amba_ports[UART_NR];
+
/* Deals with DMA transactions */
struct pl011_sgbuf {
@@ -132,6 +161,7 @@ struct uart_amba_port {
unsigned int lcrh_rx; /* vendor-specific */
bool autorts;
char type[12];
+ bool interrupt_may_hang; /* vendor-specific */
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
bool using_tx_dma;
@@ -1008,6 +1038,68 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
#endif
+/*
+ * pl011_lockup_wa
+ * This workaround aims to break the deadlock situation
+ * when after long transfer over uart in hardware flow
+ * control, uart interrupt registers cannot be cleared.
+ * Hence uart transfer gets blocked.
+ *
+ * It is seen that during such deadlock condition ICR
+ * don't get cleared even on multiple write. This leads
+ * pass_counter to decrease and finally reach zero. This
+ * can be taken as trigger point to run this UART_BT_WA.
+ *
+ */
+static void pl011_lockup_wa(unsigned long data)
+{
+ struct uart_amba_port *uap = amba_ports[0];
+ void __iomem *base = uap->port.membase;
+ struct circ_buf *xmit = &uap->port.state->xmit;
+ struct tty_struct *tty = uap->port.state->port.tty;
+ int buf_empty_retries = 200;
+ int loop;
+
+ /* Stop HCI layer from submitting data for tx */
+ tty->hw_stopped = 1;
+ while (!uart_circ_empty(xmit)) {
+ if (buf_empty_retries-- == 0)
+ break;
+ udelay(100);
+ }
+
+ /* Backup registers */
+ for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
+ uart_wa_regdata[loop] = readl(base + uart_wa_reg[loop]);
+
+ /* Disable UART so that FIFO data is flushed out */
+ writew(0x00, uap->port.membase + UART011_CR);
+
+ /* Soft reset UART module */
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->reset)
+ plat->reset();
+ }
+
+ /* Restore registers */
+ for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
+ writew(uart_wa_regdata[loop] ,
+ uap->port.membase + uart_wa_reg[loop]);
+
+ /* Initialise the old status of the modem signals */
+ uap->old_status = readw(uap->port.membase + UART01x_FR) &
+ UART01x_FR_MODEM_ANY;
+
+ if (readl(base + UART011_MIS) & 0x2)
+ printk(KERN_EMERG "UART_BT_WA: ***FAILED***\n");
+
+ /* Start Tx/Rx */
+ tty->hw_stopped = 0;
+}
+
static void pl011_stop_tx(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1158,8 +1250,11 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
if (status & UART011_TXIS)
pl011_tx_chars(uap);
- if (pass_counter-- == 0)
+ if (pass_counter-- == 0) {
+ if (uap->interrupt_may_hang)
+ tasklet_schedule(&pl011_lockup_tlet);
break;
+ }
status = readw(uap->port.membase + UART011_MIS);
} while (status != 0);
@@ -1339,6 +1434,14 @@ static int pl011_startup(struct uart_port *port)
writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irq(&uap->port.lock);
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->init)
+ plat->init();
+ }
+
return 0;
clk_dis:
@@ -1394,6 +1497,15 @@ static void pl011_shutdown(struct uart_port *port)
* Shut down the clock producer
*/
clk_disable(uap->clk);
+
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->exit)
+ plat->exit();
+ }
+
}
static void
@@ -1700,6 +1812,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
if (!uap)
return -ENODEV;
+ if (uap->port.dev->platform_data) {
+ struct amba_pl011_data *plat;
+
+ plat = uap->port.dev->platform_data;
+ if (plat->init)
+ plat->init();
+ }
+
uap->port.uartclk = clk_get_rate(uap->clk);
if (options)
@@ -1774,6 +1894,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->lcrh_rx = vendor->lcrh_rx;
uap->lcrh_tx = vendor->lcrh_tx;
uap->fifosize = vendor->fifosize;
+ uap->interrupt_may_hang = vendor->interrupt_may_hang;
uap->port.dev = &dev->dev;
uap->port.mapbase = dev->res.start;
uap->port.membase = base;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 6d5d6e679fc..af9b7814965 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1709,12 +1709,13 @@ static int atmel_serial_resume(struct platform_device *pdev)
static int __devinit atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *port;
+ struct atmel_uart_data *pdata = pdev->dev.platform_data;
void *data;
int ret;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
- port = &atmel_ports[pdev->id];
+ port = &atmel_ports[pdata->num];
port->backup_imr = 0;
atmel_init_port(port, pdev);
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index a1a0e55d080..c0b68b9cad9 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -250,6 +250,20 @@ static void bcm_uart_do_rx(struct uart_port *port)
/* get overrun/fifo empty information from ier
* register */
iestat = bcm_uart_readl(port, UART_IR_REG);
+
+ if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
+ unsigned int val;
+
+ /* fifo reset is required to clear
+ * interrupt */
+ val = bcm_uart_readl(port, UART_CTL_REG);
+ val |= UART_CTL_RSTRXFIFO_MASK;
+ bcm_uart_writel(port, val, UART_CTL_REG);
+
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+
if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
break;
@@ -284,10 +298,6 @@ static void bcm_uart_do_rx(struct uart_port *port)
if (uart_handle_sysrq_char(port, c))
continue;
- if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
- port->icount.overrun++;
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- }
if ((cstat & port->ignore_status_mask) == 0)
tty_insert_flip_char(tty, c, flag);
diff --git a/drivers/tty/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c
index 9b1ff2b6bb3..ff6979181ac 100644
--- a/drivers/tty/serial/bfin_5xx.c
+++ b/drivers/tty/serial/bfin_5xx.c
@@ -1304,8 +1304,7 @@ static int bfin_serial_probe(struct platform_device *pdev)
goto out_error_free_peripherals;
}
- uart->port.membase = ioremap(res->start,
- res->end - res->start);
+ uart->port.membase = ioremap(res->start, resource_size(res));
if (!uart->port.membase) {
dev_err(&pdev->dev, "Cannot map uart IO\n");
ret = -ENXIO;
@@ -1483,7 +1482,7 @@ static int bfin_earlyprintk_probe(struct platform_device *pdev)
}
bfin_earlyprintk_port.port.membase = ioremap(res->start,
- res->end - res->start);
+ resource_size(res));
if (!bfin_earlyprintk_port.port.membase) {
dev_err(&pdev->dev, "Cannot map uart IO\n");
ret = -ENXIO;
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 57421d77632..ddc487a2d42 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -48,7 +48,7 @@
#include <linux/sysrq.h>
#include <linux/tty.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/bootinfo.h>
#include <asm/io.h>
#include <asm/system.h>
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 5315525220f..426434e5eb7 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -36,6 +36,7 @@
* you need to use this driver for another platform.
*
*****************************************************************************/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/termios.h>
#include <linux/tty.h>
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index a54473123e0..7e91b3d368c 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -45,10 +45,11 @@
#include <linux/delay.h>
#include <linux/rational.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <mach/hardware.h>
#include <mach/imx-uart.h>
/* Register definitions */
@@ -66,8 +67,9 @@
#define UBIR 0xa4 /* BRM Incremental Register */
#define UBMR 0xa8 /* BRM Modulator Register */
#define UBRC 0xac /* Baud Rate Count Register */
-#define MX2_ONEMS 0xb0 /* One Millisecond register */
-#define UTS (cpu_is_mx1() ? 0xd0 : 0xb4) /* UART Test Register */
+#define IMX21_ONEMS 0xb0 /* One Millisecond register */
+#define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
+#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
/* UART Control Register Bit Fields.*/
#define URXD_CHARRDY (1<<15)
@@ -87,7 +89,7 @@
#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
#define UCR1_SNDBRK (1<<4) /* Send break */
#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
-#define MX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, mx1 only */
+#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
#define UCR1_DOZE (1<<1) /* Doze */
#define UCR1_UARTEN (1<<0) /* UART enabled */
#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
@@ -113,9 +115,7 @@
#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
-#define MX1_UCR3_REF25 (1<<3) /* Ref freq 25 MHz, only on mx1 */
-#define MX1_UCR3_REF30 (1<<2) /* Ref Freq 30 MHz, only on mx1 */
-#define MX2_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select, on mx2/mx3 */
+#define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */
#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
#define UCR3_BPEN (1<<0) /* Preset registers enable */
#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */
@@ -181,6 +181,18 @@
#define UART_NR 8
+/* i.mx21 type uart runs on all i.mx except i.mx1 */
+enum imx_uart_type {
+ IMX1_UART,
+ IMX21_UART,
+};
+
+/* device type dependent stuff */
+struct imx_uart_data {
+ unsigned uts_reg;
+ enum imx_uart_type devtype;
+};
+
struct imx_port {
struct uart_port port;
struct timer_list timer;
@@ -192,6 +204,7 @@ struct imx_port {
unsigned int irda_inv_tx:1;
unsigned short trcv_delay; /* transceiver delay */
struct clk *clk;
+ struct imx_uart_data *devdata;
};
#ifdef CONFIG_IRDA
@@ -200,6 +213,52 @@ struct imx_port {
#define USE_IRDA(sport) (0)
#endif
+static struct imx_uart_data imx_uart_devdata[] = {
+ [IMX1_UART] = {
+ .uts_reg = IMX1_UTS,
+ .devtype = IMX1_UART,
+ },
+ [IMX21_UART] = {
+ .uts_reg = IMX21_UTS,
+ .devtype = IMX21_UART,
+ },
+};
+
+static struct platform_device_id imx_uart_devtype[] = {
+ {
+ .name = "imx1-uart",
+ .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
+ }, {
+ .name = "imx21-uart",
+ .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, imx_uart_devtype);
+
+static struct of_device_id imx_uart_dt_ids[] = {
+ { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
+ { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
+
+static inline unsigned uts_reg(struct imx_port *sport)
+{
+ return sport->devdata->uts_reg;
+}
+
+static inline int is_imx1_uart(struct imx_port *sport)
+{
+ return sport->devdata->devtype == IMX1_UART;
+}
+
+static inline int is_imx21_uart(struct imx_port *sport)
+{
+ return sport->devdata->devtype == IMX21_UART;
+}
+
/*
* Handle any change of modem status signal since we were last called.
*/
@@ -326,7 +385,8 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
struct circ_buf *xmit = &sport->port.state->xmit;
while (!uart_circ_empty(xmit) &&
- !(readl(sport->port.membase + UTS) & UTS_TXFULL)) {
+ !(readl(sport->port.membase + uts_reg(sport))
+ & UTS_TXFULL)) {
/* send xmit->buf[xmit->tail]
* out the port here */
writel(xmit->buf[xmit->tail], sport->port.membase + URTX0);
@@ -373,7 +433,7 @@ static void imx_start_tx(struct uart_port *port)
writel(temp, sport->port.membase + UCR4);
}
- if (readl(sport->port.membase + UTS) & UTS_TXEMPTY)
+ if (readl(sport->port.membase + uts_reg(sport)) & UTS_TXEMPTY)
imx_transmit_buffer(sport);
}
@@ -689,9 +749,9 @@ static int imx_startup(struct uart_port *port)
}
}
- if (!cpu_is_mx1()) {
+ if (is_imx21_uart(sport)) {
temp = readl(sport->port.membase + UCR3);
- temp |= MX2_UCR3_RXDMUXSEL;
+ temp |= IMX21_UCR3_RXDMUXSEL;
writel(temp, sport->port.membase + UCR3);
}
@@ -923,9 +983,9 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
writel(num, sport->port.membase + UBIR);
writel(denom, sport->port.membase + UBMR);
- if (!cpu_is_mx1())
+ if (is_imx21_uart(sport))
writel(sport->port.uartclk / div / 1000,
- sport->port.membase + MX2_ONEMS);
+ sport->port.membase + IMX21_ONEMS);
writel(old_ucr1, sport->port.membase + UCR1);
@@ -954,7 +1014,7 @@ static void imx_release_port(struct uart_port *port)
struct resource *mmres;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mmres->start, mmres->end - mmres->start + 1);
+ release_mem_region(mmres->start, resource_size(mmres));
}
/*
@@ -970,8 +1030,7 @@ static int imx_request_port(struct uart_port *port)
if (!mmres)
return -ENODEV;
- ret = request_mem_region(mmres->start, mmres->end - mmres->start + 1,
- "imx-uart");
+ ret = request_mem_region(mmres->start, resource_size(mmres), "imx-uart");
return ret ? 0 : -EBUSY;
}
@@ -1042,7 +1101,7 @@ static void imx_console_putchar(struct uart_port *port, int ch)
{
struct imx_port *sport = (struct imx_port *)port;
- while (readl(sport->port.membase + UTS) & UTS_TXFULL)
+ while (readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)
barrier();
writel(ch, sport->port.membase + URTX0);
@@ -1063,8 +1122,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
ucr1 = old_ucr1 = readl(sport->port.membase + UCR1);
old_ucr2 = readl(sport->port.membase + UCR2);
- if (cpu_is_mx1())
- ucr1 |= MX1_UCR1_UARTCLKEN;
+ if (is_imx1_uart(sport))
+ ucr1 |= IMX1_UCR1_UARTCLKEN;
ucr1 |= UCR1_UARTEN;
ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN);
@@ -1223,6 +1282,58 @@ static int serial_imx_resume(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_OF
+static int serial_imx_probe_dt(struct imx_port *sport,
+ struct platform_device *pdev)
+{
+ static int portnum = 0;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(imx_uart_dt_ids, &pdev->dev);
+
+ if (!np)
+ return -ENODEV;
+
+ sport->port.line = portnum++;
+ if (sport->port.line >= UART_NR)
+ return -EINVAL;
+
+ if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
+ sport->have_rtscts = 1;
+
+ if (of_get_property(np, "fsl,irda-mode", NULL))
+ sport->use_irda = 1;
+
+ sport->devdata = of_id->data;
+
+ return 0;
+}
+#else
+static inline int serial_imx_probe_dt(struct imx_port *sport,
+ struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
+
+static void serial_imx_probe_pdata(struct imx_port *sport,
+ struct platform_device *pdev)
+{
+ struct imxuart_platform_data *pdata = pdev->dev.platform_data;
+
+ sport->port.line = pdev->id;
+ sport->devdata = (struct imx_uart_data *) pdev->id_entry->driver_data;
+
+ if (!pdata)
+ return;
+
+ if (pdata->flags & IMXUART_HAVE_RTSCTS)
+ sport->have_rtscts = 1;
+
+ if (pdata->flags & IMXUART_IRDA)
+ sport->use_irda = 1;
+}
+
static int serial_imx_probe(struct platform_device *pdev)
{
struct imx_port *sport;
@@ -1235,6 +1346,10 @@ static int serial_imx_probe(struct platform_device *pdev)
if (!sport)
return -ENOMEM;
+ ret = serial_imx_probe_dt(sport, pdev);
+ if (ret == -ENODEV)
+ serial_imx_probe_pdata(sport, pdev);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
@@ -1259,7 +1374,6 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->port.fifosize = 32;
sport->port.ops = &imx_pops;
sport->port.flags = UPF_BOOT_AUTOCONF;
- sport->port.line = pdev->id;
init_timer(&sport->timer);
sport->timer.function = imx_timeout;
sport->timer.data = (unsigned long)sport;
@@ -1273,17 +1387,9 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->port.uartclk = clk_get_rate(sport->clk);
- imx_ports[pdev->id] = sport;
+ imx_ports[sport->port.line] = sport;
pdata = pdev->dev.platform_data;
- if (pdata && (pdata->flags & IMXUART_HAVE_RTSCTS))
- sport->have_rtscts = 1;
-
-#ifdef CONFIG_IRDA
- if (pdata && (pdata->flags & IMXUART_IRDA))
- sport->use_irda = 1;
-#endif
-
if (pdata && pdata->init) {
ret = pdata->init(pdev);
if (ret)
@@ -1341,9 +1447,11 @@ static struct platform_driver serial_imx_driver = {
.suspend = serial_imx_suspend,
.resume = serial_imx_resume,
+ .id_table = imx_uart_devtype,
.driver = {
.name = "imx-uart",
.owner = THIS_MODULE,
+ .of_match_table = imx_uart_dt_ids,
},
};
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 18f548449c6..96da17868cf 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -125,7 +125,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
brd->bd_uart_offset = 0x200;
brd->bd_dividend = 921600;
- brd->re_map_membase = ioremap(brd->membase, 0x1000);
+ brd->re_map_membase = ioremap(brd->membase, pci_resource_len(pdev, 0));
if (!brd->re_map_membase) {
dev_err(&pdev->dev,
"card has no PCI Memory resources, "
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 84db7321cce..8e07517f8ac 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -892,7 +892,7 @@ static int m32r_sio_request_port(struct uart_port *port)
* If we have a mapbase, then request that as well.
*/
if (ret == 0 && up->port.flags & UPF_IOREMAP) {
- int size = res->end - res->start + 1;
+ int size = resource_size(res);
up->port.membase = ioremap(up->port.mapbase, size);
if (!up->port.membase)
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 1bd28450ca4..a764bf99743 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -421,7 +421,6 @@ static int max3110_main_thread(void *_max)
int ret = 0;
struct circ_buf *xmit = &max->con_xmit;
- init_waitqueue_head(wq);
pr_info(PR_FMT "start main thread\n");
do {
@@ -823,7 +822,7 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
res = RC_TAG;
ret = max3110_write_then_read(max, (u8 *)&res, (u8 *)&res, 2, 0);
if (ret < 0 || res == 0 || res == 0xffff) {
- printk(KERN_ERR "MAX3111 deemed not present (conf reg %04x)",
+ dev_dbg(&spi->dev, "MAX3111 deemed not present (conf reg %04x)",
res);
ret = -ENODEV;
goto err_get_page;
@@ -838,6 +837,8 @@ static int __devinit serial_m3110_probe(struct spi_device *spi)
max->con_xmit.head = 0;
max->con_xmit.tail = 0;
+ init_waitqueue_head(&max->wq);
+
max->main_thread = kthread_run(max3110_main_thread,
max, "max3110_main");
if (IS_ERR(max->main_thread)) {
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index c911b2419ab..e58cece6f44 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -32,17 +32,17 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
{
struct resource resource;
struct device_node *np = ofdev->dev.of_node;
- const __be32 *clk, *spd;
- const __be32 *prop;
- int ret, prop_size;
+ u32 clk, spd, prop;
+ int ret;
memset(port, 0, sizeof *port);
- spd = of_get_property(np, "current-speed", NULL);
- clk = of_get_property(np, "clock-frequency", NULL);
- if (!clk) {
+ if (of_property_read_u32(np, "clock-frequency", &clk)) {
dev_warn(&ofdev->dev, "no clock-frequency property set\n");
return -ENODEV;
}
+ /* If current-speed was set, then try not to change it. */
+ if (of_property_read_u32(np, "current-speed", &spd) == 0)
+ port->custom_divisor = clk / (16 * spd);
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
@@ -54,25 +54,35 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
port->mapbase = resource.start;
/* Check for shifted address mapping */
- prop = of_get_property(np, "reg-offset", &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- port->mapbase += be32_to_cpup(prop);
+ if (of_property_read_u32(np, "reg-offset", &prop) == 0)
+ port->mapbase += prop;
/* Check for registers offset within the devices address range */
- prop = of_get_property(np, "reg-shift", &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- port->regshift = be32_to_cpup(prop);
+ if (of_property_read_u32(np, "reg-shift", &prop) == 0)
+ port->regshift = prop;
port->irq = irq_of_parse_and_map(np, 0);
port->iotype = UPIO_MEM;
+ if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
+ switch (prop) {
+ case 1:
+ port->iotype = UPIO_MEM;
+ break;
+ case 4:
+ port->iotype = UPIO_MEM32;
+ break;
+ default:
+ dev_warn(&ofdev->dev, "unsupported reg-io-width (%d)\n",
+ prop);
+ return -EINVAL;
+ }
+ }
+
port->type = type;
- port->uartclk = be32_to_cpup(clk);
+ port->uartclk = clk;
port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP
| UPF_FIXED_PORT | UPF_FIXED_TYPE;
port->dev = &ofdev->dev;
- /* If current-speed was set, then try not to change it. */
- if (spd)
- port->custom_divisor = be32_to_cpup(clk) / (16 * (be32_to_cpup(spd)));
return 0;
}
@@ -171,6 +181,7 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
{ .compatible = "ns16550", .data = (void *)PORT_16550, },
{ .compatible = "ns16750", .data = (void *)PORT_16750, },
{ .compatible = "ns16850", .data = (void *)PORT_16850, },
+ { .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, },
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
{ .compatible = "ibm,qpace-nwp-serial",
.data = (void *)PORT_NWPSERIAL, },
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 47cadf47414..c37df8d0fa2 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1241,8 +1241,8 @@ static int serial_omap_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (!request_mem_region(mem->start, (mem->end - mem->start) + 1,
- pdev->dev.driver->name)) {
+ if (!request_mem_region(mem->start, resource_size(mem),
+ pdev->dev.driver->name)) {
dev_err(&pdev->dev, "memory region already claimed\n");
return -EBUSY;
}
@@ -1308,7 +1308,7 @@ err:
dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
pdev->id, __func__, ret);
do_release_region:
- release_mem_region(mem->start, (mem->end - mem->start) + 1);
+ release_mem_region(mem->start, resource_size(mem));
return ret;
}
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 46521093089..846dfcd3ce0 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -14,6 +14,7 @@
*along with this program; if not, write to the Free Software
*Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/kernel.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -44,6 +45,7 @@ enum {
/* Set the max number of UART port
* Intel EG20T PCH: 4 port
* OKI SEMICONDUCTOR ML7213 IOH: 3 port
+ * OKI SEMICONDUCTOR ML7223 IOH: 2 port
*/
#define PCH_UART_NR 4
@@ -137,8 +139,6 @@ enum {
#define PCH_UART_DLL 0x00
#define PCH_UART_DLM 0x01
-#define DIV_ROUND(a, b) (((a) + ((b)/2)) / (b))
-
#define PCH_UART_IID_RLS (PCH_UART_IIR_REI)
#define PCH_UART_IID_RDR (PCH_UART_IIR_RRI)
#define PCH_UART_IID_RDR_TO (PCH_UART_IIR_RRI | PCH_UART_IIR_TOI)
@@ -316,7 +316,7 @@ static int pch_uart_hal_set_line(struct eg20t_port *priv, int baud,
unsigned int dll, dlm, lcr;
int div;
- div = DIV_ROUND(priv->base_baud / 16, baud);
+ div = DIV_ROUND_CLOSEST(priv->base_baud / 16, baud);
if (div < 0 || USHRT_MAX <= div) {
dev_err(priv->port.dev, "Invalid Baud(div=0x%x)\n", div);
return -EINVAL;
@@ -1429,6 +1429,8 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
goto init_port_hal_free;
}
+ pci_enable_msi(pdev);
+
iobase = pci_resource_start(pdev, 0);
mapbase = pci_resource_start(pdev, 1);
priv->mapbase = mapbase;
@@ -1485,6 +1487,8 @@ static void pch_uart_pci_remove(struct pci_dev *pdev)
struct eg20t_port *priv;
priv = (struct eg20t_port *)pci_get_drvdata(pdev);
+
+ pci_disable_msi(pdev);
pch_uart_exit_port(priv);
pci_disable_device(pdev);
kfree(priv);
@@ -1568,6 +1572,7 @@ static int __devinit pch_uart_pci_probe(struct pci_dev *pdev,
return ret;
probe_disable_device:
+ pci_disable_msi(pdev);
pci_disable_device(pdev);
probe_error:
return ret;
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 4302e6e3768..531931c1b25 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -803,7 +803,7 @@ static int serial_pxa_probe(struct platform_device *dev)
break;
}
- sport->port.membase = ioremap(mmres->start, mmres->end - mmres->start + 1);
+ sport->port.membase = ioremap(mmres->start, resource_size(mmres));
if (!sport->port.membase) {
ret = -ENOMEM;
goto err_clk;
diff --git a/drivers/tty/serial/s3c2400.c b/drivers/tty/serial/s3c2400.c
deleted file mode 100644
index d13051b3df8..00000000000
--- a/drivers/tty/serial/s3c2400.c
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Driver for Samsung SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2005 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-
-#include <asm/irq.h>
-
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-gpio.h>
-
-#include "samsung.h"
-
-static int s3c2400_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- clk->divisor = 1;
- clk->name = "pclk";
-
- return 0;
-}
-
-static int s3c2400_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- return 0;
-}
-
-static int s3c2400_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- dbg("s3c2400_serial_resetport: port=%p (%08lx), cfg=%p\n",
- port, port->mapbase, cfg);
-
- wr_regl(port, S3C2410_UCON, cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c2400_uart_inf = {
- .name = "Samsung S3C2400 UART",
- .type = PORT_S3C2400,
- .fifosize = 16,
- .rx_fifomask = S3C2410_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2410_UFSTAT_RXFULL,
- .tx_fifofull = S3C2410_UFSTAT_TXFULL,
- .tx_fifomask = S3C2410_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
- .get_clksrc = s3c2400_serial_getsource,
- .set_clksrc = s3c2400_serial_setsource,
- .reset_port = s3c2400_serial_resetport,
-};
-
-static int s3c2400_serial_probe(struct platform_device *dev)
-{
- return s3c24xx_serial_probe(dev, &s3c2400_uart_inf);
-}
-
-static struct platform_driver s3c2400_serial_driver = {
- .probe = s3c2400_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c2400-uart",
- .owner = THIS_MODULE,
- },
-};
-
-s3c24xx_console_init(&s3c2400_serial_driver, &s3c2400_uart_inf);
-
-static inline int s3c2400_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c2400_serial_driver, &s3c2400_uart_inf);
-}
-
-static inline void s3c2400_serial_exit(void)
-{
- platform_driver_unregister(&s3c2400_serial_driver);
-}
-
-module_init(s3c2400_serial_init);
-module_exit(s3c2400_serial_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("Samsung S3C2400 SoC Serial port driver");
-MODULE_ALIAS("platform:s3c2400-uart");
diff --git a/drivers/tty/serial/s3c2410.c b/drivers/tty/serial/s3c2410.c
index bffe6ff9b15..b1d7e7c1849 100644
--- a/drivers/tty/serial/s3c2410.c
+++ b/drivers/tty/serial/s3c2410.c
@@ -96,8 +96,6 @@ static struct platform_driver s3c2410_serial_driver = {
},
};
-s3c24xx_console_init(&s3c2410_serial_driver, &s3c2410_uart_inf);
-
static int __init s3c2410_serial_init(void)
{
return s3c24xx_serial_init(&s3c2410_serial_driver, &s3c2410_uart_inf);
diff --git a/drivers/tty/serial/s3c2412.c b/drivers/tty/serial/s3c2412.c
index 7e2b9504a68..2234bf9ced4 100644
--- a/drivers/tty/serial/s3c2412.c
+++ b/drivers/tty/serial/s3c2412.c
@@ -130,8 +130,6 @@ static struct platform_driver s3c2412_serial_driver = {
},
};
-s3c24xx_console_init(&s3c2412_serial_driver, &s3c2412_uart_inf);
-
static inline int s3c2412_serial_init(void)
{
return s3c24xx_serial_init(&s3c2412_serial_driver, &s3c2412_uart_inf);
diff --git a/drivers/tty/serial/s3c2440.c b/drivers/tty/serial/s3c2440.c
index 9e10d415d5f..1d0c324b813 100644
--- a/drivers/tty/serial/s3c2440.c
+++ b/drivers/tty/serial/s3c2440.c
@@ -159,8 +159,6 @@ static struct platform_driver s3c2440_serial_driver = {
},
};
-s3c24xx_console_init(&s3c2440_serial_driver, &s3c2440_uart_inf);
-
static int __init s3c2440_serial_init(void)
{
return s3c24xx_serial_init(&s3c2440_serial_driver, &s3c2440_uart_inf);
diff --git a/drivers/tty/serial/s3c24a0.c b/drivers/tty/serial/s3c24a0.c
deleted file mode 100644
index 914eff22e49..00000000000
--- a/drivers/tty/serial/s3c24a0.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Driver for Samsung S3C24A0 SoC onboard UARTs.
- *
- * Based on drivers/serial/s3c2410.c
- *
- * Author: Sandeep Patil <sandeep.patil@azingo.com>
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-gpio.h>
-
-#include "samsung.h"
-
-static int s3c24a0_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- if (strcmp(clk->name, "uclk") == 0)
- ucon |= S3C2410_UCON_UCLK;
- else
- ucon &= ~S3C2410_UCON_UCLK;
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-static int s3c24a0_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- clk->divisor = 1;
- clk->name = (ucon & S3C2410_UCON_UCLK) ? "uclk" : "pclk";
-
- return 0;
-}
-
-static int s3c24a0_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- dbg("s3c24a0_serial_resetport: port=%p (%08lx), cfg=%p\n",
- port, port->mapbase, cfg);
-
- wr_regl(port, S3C2410_UCON, cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c24a0_uart_inf = {
- .name = "Samsung S3C24A0 UART",
- .type = PORT_S3C2410,
- .fifosize = 16,
- .rx_fifomask = S3C24A0_UFSTAT_RXMASK,
- .rx_fifoshift = S3C24A0_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C24A0_UFSTAT_RXFULL,
- .tx_fifofull = S3C24A0_UFSTAT_TXFULL,
- .tx_fifomask = S3C24A0_UFSTAT_TXMASK,
- .tx_fifoshift = S3C24A0_UFSTAT_TXSHIFT,
- .get_clksrc = s3c24a0_serial_getsource,
- .set_clksrc = s3c24a0_serial_setsource,
- .reset_port = s3c24a0_serial_resetport,
-};
-
-static int s3c24a0_serial_probe(struct platform_device *dev)
-{
- return s3c24xx_serial_probe(dev, &s3c24a0_uart_inf);
-}
-
-static struct platform_driver s3c24a0_serial_driver = {
- .probe = s3c24a0_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c24a0-uart",
- .owner = THIS_MODULE,
- },
-};
-
-s3c24xx_console_init(&s3c24a0_serial_driver, &s3c24a0_uart_inf);
-
-static int __init s3c24a0_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c24a0_serial_driver, &s3c24a0_uart_inf);
-}
-
-static void __exit s3c24a0_serial_exit(void)
-{
- platform_driver_unregister(&s3c24a0_serial_driver);
-}
-
-module_init(s3c24a0_serial_init);
-module_exit(s3c24a0_serial_exit);
-
diff --git a/drivers/tty/serial/s3c6400.c b/drivers/tty/serial/s3c6400.c
index ded26c42ff3..e2f6913d84d 100644
--- a/drivers/tty/serial/s3c6400.c
+++ b/drivers/tty/serial/s3c6400.c
@@ -130,8 +130,6 @@ static struct platform_driver s3c6400_serial_driver = {
},
};
-s3c24xx_console_init(&s3c6400_serial_driver, &s3c6400_uart_inf);
-
static int __init s3c6400_serial_init(void)
{
return s3c24xx_serial_init(&s3c6400_serial_driver, &s3c6400_uart_inf);
diff --git a/drivers/tty/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
index fb2619f93d8..8b0b888a1b7 100644
--- a/drivers/tty/serial/s5pv210.c
+++ b/drivers/tty/serial/s5pv210.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
+#include <linux/delay.h>
#include <asm/irq.h>
#include <mach/hardware.h>
@@ -30,7 +31,7 @@ static int s5pv210_serial_setsource(struct uart_port *port,
struct s3c2410_uartcfg *cfg = port->dev->platform_data;
unsigned long ucon = rd_regl(port, S3C2410_UCON);
- if ((cfg->clocks_size) == 1)
+ if (cfg->flags & NO_NEED_CHECK_CLKSRC)
return 0;
if (strcmp(clk->name, "pclk") == 0)
@@ -55,7 +56,7 @@ static int s5pv210_serial_getsource(struct uart_port *port,
clk->divisor = 1;
- if ((cfg->clocks_size) == 1)
+ if (cfg->flags & NO_NEED_CHECK_CLKSRC)
return 0;
switch (ucon & S5PV210_UCON_CLKMASK) {
@@ -83,6 +84,9 @@ static int s5pv210_serial_resetport(struct uart_port *port,
wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
wr_regl(port, S3C2410_UFCON, cfg->ufcon);
+ /* It is need to delay When reset FIFO register */
+ udelay(1);
+
return 0;
}
@@ -135,13 +139,6 @@ static struct platform_driver s5p_serial_driver = {
},
};
-static int __init s5pv210_serial_console_init(void)
-{
- return s3c24xx_serial_initconsole(&s5p_serial_driver, s5p_uart_inf);
-}
-
-console_initcall(s5pv210_serial_console_init);
-
static int __init s5p_serial_init(void)
{
return s3c24xx_serial_init(&s5p_serial_driver, *s5p_uart_inf);
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index f66f6482930..afc62942315 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1194,12 +1194,10 @@ int __devexit s3c24xx_serial_remove(struct platform_device *dev)
EXPORT_SYMBOL_GPL(s3c24xx_serial_remove);
/* UART power management code */
-
-#ifdef CONFIG_PM
-
-static int s3c24xx_serial_suspend(struct platform_device *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int s3c24xx_serial_suspend(struct device *dev)
{
- struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
+ struct uart_port *port = s3c24xx_dev_to_port(dev);
if (port)
uart_suspend_port(&s3c24xx_uart_drv, port);
@@ -1207,9 +1205,9 @@ static int s3c24xx_serial_suspend(struct platform_device *dev, pm_message_t stat
return 0;
}
-static int s3c24xx_serial_resume(struct platform_device *dev)
+static int s3c24xx_serial_resume(struct device *dev)
{
- struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
+ struct uart_port *port = s3c24xx_dev_to_port(dev);
struct s3c24xx_uart_port *ourport = to_ourport(port);
if (port) {
@@ -1222,17 +1220,20 @@ static int s3c24xx_serial_resume(struct platform_device *dev)
return 0;
}
-#endif
+
+static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
+ .suspend = s3c24xx_serial_suspend,
+ .resume = s3c24xx_serial_resume,
+};
+#else /* !CONFIG_PM_SLEEP */
+#define s3c24xx_serial_pm_ops NULL
+#endif /* CONFIG_PM_SLEEP */
int s3c24xx_serial_init(struct platform_driver *drv,
struct s3c24xx_uart_info *info)
{
dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
-
-#ifdef CONFIG_PM
- drv->suspend = s3c24xx_serial_suspend;
- drv->resume = s3c24xx_serial_resume;
-#endif
+ drv->driver.pm = &s3c24xx_serial_pm_ops;
return platform_driver_register(drv);
}
@@ -1416,10 +1417,8 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
/* is the port configured? */
- if (port->mapbase == 0x0) {
- co->index = 0;
- port = &s3c24xx_serial_ports[co->index].port;
- }
+ if (port->mapbase == 0x0)
+ return -ENODEV;
cons_uart = port;
@@ -1451,7 +1450,8 @@ static struct console s3c24xx_serial_console = {
.flags = CON_PRINTBUFFER,
.index = -1,
.write = s3c24xx_serial_console_write,
- .setup = s3c24xx_serial_console_setup
+ .setup = s3c24xx_serial_console_setup,
+ .data = &s3c24xx_uart_drv,
};
int s3c24xx_serial_initconsole(struct platform_driver *drv,
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index 5b098cd7604..a69d9a54be9 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -79,25 +79,6 @@ extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
extern int s3c24xx_serial_init(struct platform_driver *drv,
struct s3c24xx_uart_info *info);
-#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
-
-#define s3c24xx_console_init(__drv, __inf) \
-static int __init s3c_serial_console_init(void) \
-{ \
- struct s3c24xx_uart_info *uinfo[CONFIG_SERIAL_SAMSUNG_UARTS]; \
- int i; \
- \
- for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++) \
- uinfo[i] = __inf; \
- return s3c24xx_serial_initconsole(__drv, uinfo); \
-} \
- \
-console_initcall(s3c_serial_console_init)
-
-#else
-#define s3c24xx_console_init(drv, inf) extern void no_console(void)
-#endif
-
#ifdef CONFIG_SERIAL_SAMSUNG_DEBUG
extern void printascii(const char *);
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index ea2340b814e..6bc2e3f876f 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -39,7 +39,7 @@
#include <linux/tty.h>
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/war.h>
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ebd8629c108..2ec57b2fb27 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -54,10 +54,6 @@
#include <asm/sh_bios.h>
#endif
-#ifdef CONFIG_H8300
-#include <asm/gpio.h>
-#endif
-
#include "sh-sci.h"
struct sci_port {
@@ -66,12 +62,6 @@ struct sci_port {
/* Platform configuration */
struct plat_sci_port *cfg;
- /* Port enable callback */
- void (*enable)(struct uart_port *port);
-
- /* Port disable callback */
- void (*disable)(struct uart_port *port);
-
/* Break timer */
struct timer_list break_timer;
int break_flag;
@@ -81,6 +71,8 @@ struct sci_port {
/* Function clock */
struct clk *fclk;
+ char *irqstr[SCIx_NR_IRQS];
+
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
@@ -121,6 +113,278 @@ to_sci_port(struct uart_port *uart)
return container_of(uart, struct sci_port, port);
}
+struct plat_sci_reg {
+ u8 offset, size;
+};
+
+/* Helper for invalidating specific entries of an inherited map. */
+#define sci_reg_invalid { .offset = 0, .size = 0 }
+
+static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
+ [SCIx_PROBE_REGTYPE] = {
+ [0 ... SCIx_NR_REGS - 1] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SCI definitions, dependent on the port's regshift
+ * value.
+ */
+ [SCIx_SCI_REGTYPE] = {
+ [SCSMR] = { 0x00, 8 },
+ [SCBRR] = { 0x01, 8 },
+ [SCSCR] = { 0x02, 8 },
+ [SCxTDR] = { 0x03, 8 },
+ [SCxSR] = { 0x04, 8 },
+ [SCxRDR] = { 0x05, 8 },
+ [SCFCR] = sci_reg_invalid,
+ [SCFDR] = sci_reg_invalid,
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common definitions for legacy IrDA ports, dependent on
+ * regshift value.
+ */
+ [SCIx_IRDA_REGTYPE] = {
+ [SCSMR] = { 0x00, 8 },
+ [SCBRR] = { 0x01, 8 },
+ [SCSCR] = { 0x02, 8 },
+ [SCxTDR] = { 0x03, 8 },
+ [SCxSR] = { 0x04, 8 },
+ [SCxRDR] = { 0x05, 8 },
+ [SCFCR] = { 0x06, 8 },
+ [SCFDR] = { 0x07, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SCIFA definitions.
+ */
+ [SCIx_SCIFA_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x20, 8 },
+ [SCxSR] = { 0x14, 16 },
+ [SCxRDR] = { 0x24, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SCIFB definitions.
+ */
+ [SCIx_SCIFB_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x40, 8 },
+ [SCxSR] = { 0x14, 16 },
+ [SCxRDR] = { 0x60, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SH-3 SCIF definitions.
+ */
+ [SCIx_SH3_SCIF_REGTYPE] = {
+ [SCSMR] = { 0x00, 8 },
+ [SCBRR] = { 0x02, 8 },
+ [SCSCR] = { 0x04, 8 },
+ [SCxTDR] = { 0x06, 8 },
+ [SCxSR] = { 0x08, 16 },
+ [SCxRDR] = { 0x0a, 8 },
+ [SCFCR] = { 0x0c, 8 },
+ [SCFDR] = { 0x0e, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+
+ /*
+ * Common SH-4(A) SCIF(B) definitions.
+ */
+ [SCIx_SH4_SCIF_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = { 0x20, 16 },
+ [SCLSR] = { 0x24, 16 },
+ },
+
+ /*
+ * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
+ * register.
+ */
+ [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = { 0x24, 16 },
+ },
+
+ /*
+ * Common SH-4(A) SCIF(B) definitions for ports with FIFO data
+ * count registers.
+ */
+ [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */
+ [SCRFDR] = { 0x20, 16 },
+ [SCSPTR] = { 0x24, 16 },
+ [SCLSR] = { 0x28, 16 },
+ },
+
+ /*
+ * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
+ * registers.
+ */
+ [SCIx_SH7705_SCIF_REGTYPE] = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x20, 8 },
+ [SCxSR] = { 0x14, 16 },
+ [SCxRDR] = { 0x24, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCTFDR] = sci_reg_invalid,
+ [SCRFDR] = sci_reg_invalid,
+ [SCSPTR] = sci_reg_invalid,
+ [SCLSR] = sci_reg_invalid,
+ },
+};
+
+#define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset)
+
+/*
+ * The "offset" here is rather misleading, in that it refers to an enum
+ * value relative to the port mapping rather than the fixed offset
+ * itself, which needs to be manually retrieved from the platform's
+ * register map for the given port.
+ */
+static unsigned int sci_serial_in(struct uart_port *p, int offset)
+{
+ struct plat_sci_reg *reg = sci_getreg(p, offset);
+
+ if (reg->size == 8)
+ return ioread8(p->membase + (reg->offset << p->regshift));
+ else if (reg->size == 16)
+ return ioread16(p->membase + (reg->offset << p->regshift));
+ else
+ WARN(1, "Invalid register access\n");
+
+ return 0;
+}
+
+static void sci_serial_out(struct uart_port *p, int offset, int value)
+{
+ struct plat_sci_reg *reg = sci_getreg(p, offset);
+
+ if (reg->size == 8)
+ iowrite8(value, p->membase + (reg->offset << p->regshift));
+ else if (reg->size == 16)
+ iowrite16(value, p->membase + (reg->offset << p->regshift));
+ else
+ WARN(1, "Invalid register access\n");
+}
+
+#define sci_in(up, offset) (up->serial_in(up, offset))
+#define sci_out(up, offset, value) (up->serial_out(up, offset, value))
+
+static int sci_probe_regmap(struct plat_sci_port *cfg)
+{
+ switch (cfg->type) {
+ case PORT_SCI:
+ cfg->regtype = SCIx_SCI_REGTYPE;
+ break;
+ case PORT_IRDA:
+ cfg->regtype = SCIx_IRDA_REGTYPE;
+ break;
+ case PORT_SCIFA:
+ cfg->regtype = SCIx_SCIFA_REGTYPE;
+ break;
+ case PORT_SCIFB:
+ cfg->regtype = SCIx_SCIFB_REGTYPE;
+ break;
+ case PORT_SCIF:
+ /*
+ * The SH-4 is a bit of a misnomer here, although that's
+ * where this particular port layout originated. This
+ * configuration (or some slight variation thereof)
+ * remains the dominant model for all SCIFs.
+ */
+ cfg->regtype = SCIx_SH4_SCIF_REGTYPE;
+ break;
+ default:
+ printk(KERN_ERR "Can't probe register map for given port\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void sci_port_enable(struct sci_port *sci_port)
+{
+ if (!sci_port->port.dev)
+ return;
+
+ pm_runtime_get_sync(sci_port->port.dev);
+
+ clk_enable(sci_port->iclk);
+ sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
+ clk_enable(sci_port->fclk);
+}
+
+static void sci_port_disable(struct sci_port *sci_port)
+{
+ if (!sci_port->port.dev)
+ return;
+
+ clk_disable(sci_port->fclk);
+ clk_disable(sci_port->iclk);
+
+ pm_runtime_put_sync(sci_port->port.dev);
+}
+
#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
#ifdef CONFIG_CONSOLE_POLL
@@ -164,223 +428,76 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
}
#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
-#if defined(__H8300H__) || defined(__H8300S__)
static void sci_init_pins(struct uart_port *port, unsigned int cflag)
{
- int ch = (port->mapbase - SMR0) >> 3;
-
- /* set DDR regs */
- H8300_GPIO_DDR(h8300_sci_pins[ch].port,
- h8300_sci_pins[ch].rx,
- H8300_GPIO_INPUT);
- H8300_GPIO_DDR(h8300_sci_pins[ch].port,
- h8300_sci_pins[ch].tx,
- H8300_GPIO_OUTPUT);
-
- /* tx mark output*/
- H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- if (port->mapbase == 0xA4400000) {
- __raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
- __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
- } else if (port->mapbase == 0xA4410000)
- __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- unsigned short data;
-
- if (cflag & CRTSCTS) {
- /* enable RTS/CTS */
- if (port->mapbase == 0xa4430000) { /* SCIF0 */
- /* Clear PTCR bit 9-2; enable all scif pins but sck */
- data = __raw_readw(PORT_PTCR);
- __raw_writew((data & 0xfc03), PORT_PTCR);
- } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
- /* Clear PVCR bit 9-2 */
- data = __raw_readw(PORT_PVCR);
- __raw_writew((data & 0xfc03), PORT_PVCR);
- }
- } else {
- if (port->mapbase == 0xa4430000) { /* SCIF0 */
- /* Clear PTCR bit 5-2; enable only tx and rx */
- data = __raw_readw(PORT_PTCR);
- __raw_writew((data & 0xffc3), PORT_PTCR);
- } else if (port->mapbase == 0xa4438000) { /* SCIF1 */
- /* Clear PVCR bit 5-2 */
- data = __raw_readw(PORT_PVCR);
- __raw_writew((data & 0xffc3), PORT_PVCR);
- }
- }
-}
-#elif defined(CONFIG_CPU_SH3)
-/* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- unsigned short data;
-
- /* We need to set SCPCR to enable RTS/CTS */
- data = __raw_readw(SCPCR);
- /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
- __raw_writew(data & 0x0fcf, SCPCR);
-
- if (!(cflag & CRTSCTS)) {
- /* We need to set SCPCR to enable RTS/CTS */
- data = __raw_readw(SCPCR);
- /* Clear out SCP7MD1,0, SCP4MD1,0,
- Set SCP6MD1,0 = {01} (output) */
- __raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
+ struct sci_port *s = to_sci_port(port);
+ struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
- data = __raw_readb(SCPDR);
- /* Set /RTS2 (bit6) = 0 */
- __raw_writeb(data & 0xbf, SCPDR);
+ /*
+ * Use port-specific handler if provided.
+ */
+ if (s->cfg->ops && s->cfg->ops->init_pins) {
+ s->cfg->ops->init_pins(port, cflag);
+ return;
}
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- unsigned short data;
- if (port->mapbase == 0xffe00000) {
- data = __raw_readw(PSCR);
- data &= ~0x03cf;
- if (!(cflag & CRTSCTS))
- data |= 0x0340;
+ /*
+ * For the generic path SCSPTR is necessary. Bail out if that's
+ * unavailable, too.
+ */
+ if (!reg->size)
+ return;
- __raw_writew(data, PSCR);
- }
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \
- defined(CONFIG_CPU_SUBTYPE_SH7763) || \
- defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786) || \
- defined(CONFIG_CPU_SUBTYPE_SHX3)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
if (!(cflag & CRTSCTS))
- __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */
+ sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */
}
-#elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A)
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- if (!(cflag & CRTSCTS))
- __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */
-}
-#else
-static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
-{
- /* Nothing to do */
-}
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
- defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
-static int scif_txfill(struct uart_port *port)
-{
- return sci_in(port, SCTFDR) & 0xff;
-}
-
-static int scif_txroom(struct uart_port *port)
+static int sci_txfill(struct uart_port *port)
{
- return SCIF_TXROOM_MAX - scif_txfill(port);
-}
+ struct plat_sci_reg *reg;
-static int scif_rxfill(struct uart_port *port)
-{
- return sci_in(port, SCRFDR) & 0xff;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-static int scif_txfill(struct uart_port *port)
-{
- if (port->mapbase == 0xffe00000 ||
- port->mapbase == 0xffe08000)
- /* SCIF0/1*/
+ reg = sci_getreg(port, SCTFDR);
+ if (reg->size)
return sci_in(port, SCTFDR) & 0xff;
- else
- /* SCIF2 */
+
+ reg = sci_getreg(port, SCFDR);
+ if (reg->size)
return sci_in(port, SCFDR) >> 8;
-}
-static int scif_txroom(struct uart_port *port)
-{
- if (port->mapbase == 0xffe00000 ||
- port->mapbase == 0xffe08000)
- /* SCIF0/1*/
- return SCIF_TXROOM_MAX - scif_txfill(port);
- else
- /* SCIF2 */
- return SCIF2_TXROOM_MAX - scif_txfill(port);
+ return !(sci_in(port, SCxSR) & SCI_TDRE);
}
-static int scif_rxfill(struct uart_port *port)
-{
- if ((port->mapbase == 0xffe00000) ||
- (port->mapbase == 0xffe08000)) {
- /* SCIF0/1*/
- return sci_in(port, SCRFDR) & 0xff;
- } else {
- /* SCIF2 */
- return sci_in(port, SCFDR) & SCIF2_RFDC_MASK;
- }
-}
-#elif defined(CONFIG_ARCH_SH7372)
-static int scif_txfill(struct uart_port *port)
+static int sci_txroom(struct uart_port *port)
{
- if (port->type == PORT_SCIFA)
- return sci_in(port, SCFDR) >> 8;
- else
- return sci_in(port, SCTFDR);
+ return port->fifosize - sci_txfill(port);
}
-static int scif_txroom(struct uart_port *port)
+static int sci_rxfill(struct uart_port *port)
{
- return port->fifosize - scif_txfill(port);
-}
+ struct plat_sci_reg *reg;
-static int scif_rxfill(struct uart_port *port)
-{
- if (port->type == PORT_SCIFA)
- return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
- else
- return sci_in(port, SCRFDR);
-}
-#else
-static int scif_txfill(struct uart_port *port)
-{
- return sci_in(port, SCFDR) >> 8;
-}
+ reg = sci_getreg(port, SCRFDR);
+ if (reg->size)
+ return sci_in(port, SCRFDR) & 0xff;
-static int scif_txroom(struct uart_port *port)
-{
- return SCIF_TXROOM_MAX - scif_txfill(port);
-}
+ reg = sci_getreg(port, SCFDR);
+ if (reg->size)
+ return sci_in(port, SCFDR) & ((port->fifosize << 1) - 1);
-static int scif_rxfill(struct uart_port *port)
-{
- return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
+ return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
}
-#endif
-static int sci_txfill(struct uart_port *port)
+/*
+ * SCI helper for checking the state of the muxed port/RXD pins.
+ */
+static inline int sci_rxd_in(struct uart_port *port)
{
- return !(sci_in(port, SCxSR) & SCI_TDRE);
-}
+ struct sci_port *s = to_sci_port(port);
-static int sci_txroom(struct uart_port *port)
-{
- return !sci_txfill(port);
-}
+ if (s->cfg->port_reg <= 0)
+ return 1;
-static int sci_rxfill(struct uart_port *port)
-{
- return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
+ return !!__raw_readb(s->cfg->port_reg);
}
/* ********************************************************************** *
@@ -406,10 +523,7 @@ static void sci_transmit_chars(struct uart_port *port)
return;
}
- if (port->type == PORT_SCI)
- count = sci_txroom(port);
- else
- count = scif_txroom(port);
+ count = sci_txroom(port);
do {
unsigned char c;
@@ -464,13 +578,8 @@ static void sci_receive_chars(struct uart_port *port)
return;
while (1) {
- if (port->type == PORT_SCI)
- count = sci_rxfill(port);
- else
- count = scif_rxfill(port);
-
/* Don't copy more bytes than there is room for in the buffer */
- count = tty_buffer_request_room(tty, count);
+ count = tty_buffer_request_room(tty, sci_rxfill(port));
/* If for any reason we can't copy more data, we're done! */
if (count == 0)
@@ -561,8 +670,7 @@ static void sci_break_timer(unsigned long data)
{
struct sci_port *port = (struct sci_port *)data;
- if (port->enable)
- port->enable(&port->port);
+ sci_port_enable(port);
if (sci_rxd_in(&port->port) == 0) {
port->break_flag = 1;
@@ -574,8 +682,7 @@ static void sci_break_timer(unsigned long data)
} else
port->break_flag = 0;
- if (port->disable)
- port->disable(&port->port);
+ sci_port_disable(port);
}
static int sci_handle_errors(struct uart_port *port)
@@ -583,13 +690,19 @@ static int sci_handle_errors(struct uart_port *port)
int copied = 0;
unsigned short status = sci_in(port, SCxSR);
struct tty_struct *tty = port->state->port.tty;
+ struct sci_port *s = to_sci_port(port);
- if (status & SCxSR_ORER(port)) {
- /* overrun error */
- if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
- copied++;
+ /*
+ * Handle overruns, if supported.
+ */
+ if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) {
+ if (status & (1 << s->cfg->overrun_bit)) {
+ /* overrun error */
+ if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
+ copied++;
- dev_notice(port->dev, "overrun error");
+ dev_notice(port->dev, "overrun error");
+ }
}
if (status & SCxSR_FER(port)) {
@@ -637,12 +750,15 @@ static int sci_handle_errors(struct uart_port *port)
static int sci_handle_fifo_overrun(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
+ struct sci_port *s = to_sci_port(port);
+ struct plat_sci_reg *reg;
int copied = 0;
- if (port->type != PORT_SCIF)
+ reg = sci_getreg(port, SCLSR);
+ if (!reg->size)
return 0;
- if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) {
+ if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
sci_out(port, SCLSR, 0);
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
@@ -840,74 +956,102 @@ static int sci_notifier(struct notifier_block *self,
return NOTIFY_OK;
}
-static void sci_clk_enable(struct uart_port *port)
-{
- struct sci_port *sci_port = to_sci_port(port);
-
- pm_runtime_get_sync(port->dev);
+static struct sci_irq_desc {
+ const char *desc;
+ irq_handler_t handler;
+} sci_irq_desc[] = {
+ /*
+ * Split out handlers, the default case.
+ */
+ [SCIx_ERI_IRQ] = {
+ .desc = "rx err",
+ .handler = sci_er_interrupt,
+ },
- clk_enable(sci_port->iclk);
- sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
- clk_enable(sci_port->fclk);
-}
+ [SCIx_RXI_IRQ] = {
+ .desc = "rx full",
+ .handler = sci_rx_interrupt,
+ },
-static void sci_clk_disable(struct uart_port *port)
-{
- struct sci_port *sci_port = to_sci_port(port);
+ [SCIx_TXI_IRQ] = {
+ .desc = "tx empty",
+ .handler = sci_tx_interrupt,
+ },
- clk_disable(sci_port->fclk);
- clk_disable(sci_port->iclk);
+ [SCIx_BRI_IRQ] = {
+ .desc = "break",
+ .handler = sci_br_interrupt,
+ },
- pm_runtime_put_sync(port->dev);
-}
+ /*
+ * Special muxed handler.
+ */
+ [SCIx_MUX_IRQ] = {
+ .desc = "mux",
+ .handler = sci_mpxed_interrupt,
+ },
+};
static int sci_request_irq(struct sci_port *port)
{
- int i;
- irqreturn_t (*handlers[4])(int irq, void *ptr) = {
- sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt,
- sci_br_interrupt,
- };
- const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full",
- "SCI Transmit Data Empty", "SCI Break" };
-
- if (port->cfg->irqs[0] == port->cfg->irqs[1]) {
- if (unlikely(!port->cfg->irqs[0]))
- return -ENODEV;
-
- if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt,
- IRQF_DISABLED, "sci", port)) {
- dev_err(port->port.dev, "Can't allocate IRQ\n");
- return -ENODEV;
+ struct uart_port *up = &port->port;
+ int i, j, ret = 0;
+
+ for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
+ struct sci_irq_desc *desc;
+ unsigned int irq;
+
+ if (SCIx_IRQ_IS_MUXED(port)) {
+ i = SCIx_MUX_IRQ;
+ irq = up->irq;
+ } else
+ irq = port->cfg->irqs[i];
+
+ desc = sci_irq_desc + i;
+ port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
+ dev_name(up->dev), desc->desc);
+ if (!port->irqstr[j]) {
+ dev_err(up->dev, "Failed to allocate %s IRQ string\n",
+ desc->desc);
+ goto out_nomem;
}
- } else {
- for (i = 0; i < ARRAY_SIZE(handlers); i++) {
- if (unlikely(!port->cfg->irqs[i]))
- continue;
-
- if (request_irq(port->cfg->irqs[i], handlers[i],
- IRQF_DISABLED, desc[i], port)) {
- dev_err(port->port.dev, "Can't allocate IRQ\n");
- return -ENODEV;
- }
+
+ ret = request_irq(irq, desc->handler, up->irqflags,
+ port->irqstr[j], port);
+ if (unlikely(ret)) {
+ dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
+ goto out_noirq;
}
}
return 0;
+
+out_noirq:
+ while (--i >= 0)
+ free_irq(port->cfg->irqs[i], port);
+
+out_nomem:
+ while (--j >= 0)
+ kfree(port->irqstr[j]);
+
+ return ret;
}
static void sci_free_irq(struct sci_port *port)
{
int i;
- if (port->cfg->irqs[0] == port->cfg->irqs[1])
- free_irq(port->cfg->irqs[0], port);
- else {
- for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) {
- if (!port->cfg->irqs[i])
- continue;
+ /*
+ * Intentionally in reverse order so we iterate over the muxed
+ * IRQ first.
+ */
+ for (i = 0; i < SCIx_NR_IRQS; i++) {
+ free_irq(port->cfg->irqs[i], port);
+ kfree(port->irqstr[i]);
- free_irq(port->cfg->irqs[i], port);
+ if (SCIx_IRQ_IS_MUXED(port)) {
+ /* If there's only one IRQ, we're done. */
+ return;
}
}
}
@@ -915,7 +1059,7 @@ static void sci_free_irq(struct sci_port *port)
static unsigned int sci_tx_empty(struct uart_port *port)
{
unsigned short status = sci_in(port, SCxSR);
- unsigned short in_tx_fifo = scif_txfill(port);
+ unsigned short in_tx_fifo = sci_txfill(port);
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
}
@@ -1438,8 +1582,7 @@ static int sci_startup(struct uart_port *port)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
- if (s->enable)
- s->enable(port);
+ sci_port_enable(s);
ret = sci_request_irq(s);
if (unlikely(ret < 0))
@@ -1465,8 +1608,7 @@ static void sci_shutdown(struct uart_port *port)
sci_free_dma(port);
sci_free_irq(s);
- if (s->disable)
- s->disable(port);
+ sci_port_disable(s);
}
static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
@@ -1513,8 +1655,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
if (likely(baud && port->uartclk))
t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
- if (s->enable)
- s->enable(port);
+ sci_port_enable(s);
do {
status = sci_in(port, SCxSR);
@@ -1584,8 +1725,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
- if (s->disable)
- s->disable(port);
+ sci_port_disable(s);
}
static const char *sci_type(struct uart_port *port)
@@ -1726,6 +1866,7 @@ static int __devinit sci_init_single(struct platform_device *dev,
struct plat_sci_port *p)
{
struct uart_port *port = &sci_port->port;
+ int ret;
port->ops = &sci_uart_ops;
port->iotype = UPIO_MEM;
@@ -1746,6 +1887,12 @@ static int __devinit sci_init_single(struct platform_device *dev,
break;
}
+ if (p->regtype == SCIx_PROBE_REGTYPE) {
+ ret = sci_probe_regmap(p);
+ if (unlikely(ret))
+ return ret;
+ }
+
if (dev) {
sci_port->iclk = clk_get(&dev->dev, "sci_ick");
if (IS_ERR(sci_port->iclk)) {
@@ -1764,8 +1911,6 @@ static int __devinit sci_init_single(struct platform_device *dev,
if (IS_ERR(sci_port->fclk))
sci_port->fclk = NULL;
- sci_port->enable = sci_clk_enable;
- sci_port->disable = sci_clk_disable;
port->dev = &dev->dev;
pm_runtime_enable(&dev->dev);
@@ -1775,20 +1920,51 @@ static int __devinit sci_init_single(struct platform_device *dev,
sci_port->break_timer.function = sci_break_timer;
init_timer(&sci_port->break_timer);
+ /*
+ * Establish some sensible defaults for the error detection.
+ */
+ if (!p->error_mask)
+ p->error_mask = (p->type == PORT_SCI) ?
+ SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK;
+
+ /*
+ * Establish sensible defaults for the overrun detection, unless
+ * the part has explicitly disabled support for it.
+ */
+ if (p->overrun_bit != SCIx_NOT_SUPPORTED) {
+ if (p->type == PORT_SCI)
+ p->overrun_bit = 5;
+ else if (p->scbrr_algo_id == SCBRR_ALGO_4)
+ p->overrun_bit = 9;
+ else
+ p->overrun_bit = 0;
+
+ /*
+ * Make the error mask inclusive of overrun detection, if
+ * supported.
+ */
+ p->error_mask |= (1 << p->overrun_bit);
+ }
+
sci_port->cfg = p;
port->mapbase = p->mapbase;
port->type = p->type;
port->flags = p->flags;
+ port->regshift = p->regshift;
/*
- * The UART port needs an IRQ value, so we peg this to the TX IRQ
+ * The UART port needs an IRQ value, so we peg this to the RX IRQ
* for the multi-IRQ ports, which is where we are primarily
* concerned with the shutdown path synchronization.
*
* For the muxed case there's nothing more to do.
*/
port->irq = p->irqs[SCIx_RXI_IRQ];
+ port->irqflags = IRQF_DISABLED;
+
+ port->serial_in = sci_serial_in;
+ port->serial_out = sci_serial_out;
if (p->dma_dev)
dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
@@ -1814,8 +1990,7 @@ static void serial_console_write(struct console *co, const char *s,
struct uart_port *port = &sci_port->port;
unsigned short bits;
- if (sci_port->enable)
- sci_port->enable(port);
+ sci_port_enable(sci_port);
uart_console_write(port, s, count, serial_console_putchar);
@@ -1824,8 +1999,7 @@ static void serial_console_write(struct console *co, const char *s,
while ((sci_in(port, SCxSR) & bits) != bits)
cpu_relax();
- if (sci_port->disable)
- sci_port->disable(port);
+ sci_port_disable(sci_port);
}
static int __devinit serial_console_setup(struct console *co, char *options)
@@ -1857,20 +2031,13 @@ static int __devinit serial_console_setup(struct console *co, char *options)
if (unlikely(ret != 0))
return ret;
- if (sci_port->enable)
- sci_port->enable(port);
+ sci_port_enable(sci_port);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
- ret = uart_set_options(port, co, baud, parity, bits, flow);
-#if defined(__H8300H__) || defined(__H8300S__)
- /* disable rx interrupt */
- if (ret == 0)
- sci_stop_rx(port);
-#endif
/* TODO: disable clock */
- return ret;
+ return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct console serial_console = {
@@ -2081,3 +2248,5 @@ module_exit(sci_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:sh-sci");
+MODULE_AUTHOR("Paul Mundt");
+MODULE_DESCRIPTION("SuperH SCI(F) serial driver");
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index b04d937c911..e9bed038aa1 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -2,169 +2,14 @@
#include <linux/io.h>
#include <linux/gpio.h>
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
-#include <asm/regs306x.h>
-#endif
-#if defined(CONFIG_H8S2678)
-#include <asm/regs267x.h>
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
- defined(CONFIG_CPU_SUBTYPE_SH7707) || \
- defined(CONFIG_CPU_SUBTYPE_SH7708) || \
- defined(CONFIG_CPU_SUBTYPE_SH7709)
-# define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */
-# define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
-# define SCIF0 0xA4400000
-# define SCIF2 0xA4410000
-# define SCPCR 0xA4000116
-# define SCPDR 0xA4000136
-#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH73A0) || \
- defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
-# define PORT_PTCR 0xA405011EUL
-# define PORT_PVCR 0xA4050122UL
-# define SCIF_ORER 0x0200 /* overrun error bit */
-#elif defined(CONFIG_SH_RTS7751R2D)
-# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
-# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
- defined(CONFIG_CPU_SUBTYPE_SH7091) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751R)
-# define SCSPTR1 0xffe0001c /* 8 bit SCI */
-# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
-# define SCSPTR0 0xfe600024 /* 16 bit SCIF */
-# define SCSPTR1 0xfe610024 /* 16 bit SCIF */
-# define SCSPTR2 0xfe620024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
-# define SCSPTR0 0xA4400000 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-# define PACR 0xa4050100
-# define PBCR 0xa4050102
-#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
-# define SCSPTR0 0xffe00010 /* 16 bit SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
-# define PADR 0xA4050120
-# define PSDR 0xA405013e
-# define PWDR 0xA4050166
-# define PSCR 0xA405011E
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
-# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
-# define SCSPTR0 SCPDR0
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
-# define SCSPTR0 0xa4050160
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
-# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
-# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
-#elif defined(CONFIG_H8S2678)
-# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
-# define SCSPTR0 0xfe4b0020
-# define SCIF_ORER 0x0001
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
-# define SCSPTR0 0xff923020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
-# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* Overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
-# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* Overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
- defined(CONFIG_CPU_SUBTYPE_SH7203) || \
- defined(CONFIG_CPU_SUBTYPE_SH7206) || \
- defined(CONFIG_CPU_SUBTYPE_SH7263)
-# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
-# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
-# define SCSPTR0 0xffc30020 /* 16 bit SCIF */
-# define SCIF_ORER 0x0001 /* Overrun error bit */
-#else
-# error CPU subtype not defined
-#endif
-
-/* SCxSR SCI */
-#define SCI_TDRE 0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_RDRF 0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_ORER 0x20 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_FER 0x10 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_PER 0x08 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-#define SCI_TEND 0x04 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/* SCI_MPB 0x02 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-/* SCI_MPBT 0x01 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */
-
-#define SCI_ERRORS ( SCI_PER | SCI_FER | SCI_ORER)
-
-/* SCxSR SCIF */
-#define SCIF_ER 0x0080 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_TEND 0x0040 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_TDFE 0x0020 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_BRK 0x0010 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_FER 0x0008 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_PER 0x0004 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_RDF 0x0002 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-#define SCIF_DR 0x0001 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
- defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH73A0) || \
- defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
-# define SCIF_ORER 0x0200
-# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER)
-# define SCIF_RFDC_MASK 0x007f
-# define SCIF_TXROOM_MAX 64
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK )
-# define SCIF_RFDC_MASK 0x007f
-# define SCIF_TXROOM_MAX 64
-/* SH7763 SCIF2 support */
-# define SCIF2_RFDC_MASK 0x001f
-# define SCIF2_TXROOM_MAX 16
-#else
-# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK)
-# define SCIF_RFDC_MASK 0x001f
-# define SCIF_TXROOM_MAX 16
-#endif
-
-#ifndef SCIF_ORER
-#define SCIF_ORER 0x0000
-#endif
-
#define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND)
-#define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS)
#define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF)
#define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE)
#define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER)
#define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER)
#define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK)
-#define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER)
+
+#define SCxSR_ERRORS(port) (to_sci_port(port)->cfg->error_mask)
#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
@@ -191,278 +36,3 @@
#define SCI_MAJOR 204
#define SCI_MINOR_START 8
-
-#define SCI_IN(size, offset) \
- if ((size) == 8) { \
- return ioread8(port->membase + (offset)); \
- } else { \
- return ioread16(port->membase + (offset)); \
- }
-#define SCI_OUT(size, offset, value) \
- if ((size) == 8) { \
- iowrite8(value, port->membase + (offset)); \
- } else if ((size) == 16) { \
- iowrite16(value, port->membase + (offset)); \
- }
-
-#define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\
- static inline unsigned int sci_##name##_in(struct uart_port *port) \
- { \
- if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
- SCI_IN(scif_size, scif_offset) \
- } else { /* PORT_SCI or PORT_SCIFA */ \
- SCI_IN(sci_size, sci_offset); \
- } \
- } \
- static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
- { \
- if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \
- SCI_OUT(scif_size, scif_offset, value) \
- } else { /* PORT_SCI or PORT_SCIFA */ \
- SCI_OUT(sci_size, sci_offset, value); \
- } \
- }
-
-#ifdef CONFIG_H8300
-/* h8300 don't have SCIF */
-#define CPU_SCIF_FNS(name) \
- static inline unsigned int sci_##name##_in(struct uart_port *port) \
- { \
- return 0; \
- } \
- static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
- { \
- }
-#else
-#define CPU_SCIF_FNS(name, scif_offset, scif_size) \
- static inline unsigned int sci_##name##_in(struct uart_port *port) \
- { \
- SCI_IN(scif_size, scif_offset); \
- } \
- static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \
- { \
- SCI_OUT(scif_size, scif_offset, value); \
- }
-#endif
-
-#define CPU_SCI_FNS(name, sci_offset, sci_size) \
- static inline unsigned int sci_##name##_in(struct uart_port* port) \
- { \
- SCI_IN(sci_size, sci_offset); \
- } \
- static inline void sci_##name##_out(struct uart_port* port, unsigned int value) \
- { \
- SCI_OUT(sci_size, sci_offset, value); \
- }
-
-#if defined(CONFIG_CPU_SH3) || \
- defined(CONFIG_ARCH_SH73A0) || \
- defined(CONFIG_ARCH_SH7367) || \
- defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
-#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \
- defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH7367)
-#define SCIF_FNS(name, scif_offset, scif_size) \
- CPU_SCIF_FNS(name, scif_offset, scif_size)
-#elif defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372) || \
- defined(CONFIG_ARCH_SH73A0)
-#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \
- CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size)
-#define SCIF_FNS(name, scif_offset, scif_size) \
- CPU_SCIF_FNS(name, scif_offset, scif_size)
-#else
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh3_scif_offset, sh3_scif_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh3_scif_offset, sh3_scif_size)
-#endif
-#elif defined(__H8300H__) || defined(__H8300S__)
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
- defined(CONFIG_CPU_SUBTYPE_SH7724)
- #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size)
- #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
-#else
-#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \
- sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \
- h8_sci_offset, h8_sci_size) \
- CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size)
-#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \
- CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size)
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \
- defined(CONFIG_CPU_SUBTYPE_SH7720) || \
- defined(CONFIG_CPU_SUBTYPE_SH7721) || \
- defined(CONFIG_ARCH_SH7367)
-
-SCIF_FNS(SCSMR, 0x00, 16)
-SCIF_FNS(SCBRR, 0x04, 8)
-SCIF_FNS(SCSCR, 0x08, 16)
-SCIF_FNS(SCxSR, 0x14, 16)
-SCIF_FNS(SCFCR, 0x18, 16)
-SCIF_FNS(SCFDR, 0x1c, 16)
-SCIF_FNS(SCxTDR, 0x20, 8)
-SCIF_FNS(SCxRDR, 0x24, 8)
-SCIF_FNS(SCLSR, 0x00, 0)
-#elif defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372) || \
- defined(CONFIG_ARCH_SH73A0)
-SCIF_FNS(SCSMR, 0x00, 16)
-SCIF_FNS(SCBRR, 0x04, 8)
-SCIF_FNS(SCSCR, 0x08, 16)
-SCIF_FNS(SCTDSR, 0x0c, 16)
-SCIF_FNS(SCFER, 0x10, 16)
-SCIF_FNS(SCxSR, 0x14, 16)
-SCIF_FNS(SCFCR, 0x18, 16)
-SCIF_FNS(SCFDR, 0x1c, 16)
-SCIF_FNS(SCTFDR, 0x38, 16)
-SCIF_FNS(SCRFDR, 0x3c, 16)
-SCIx_FNS(SCxTDR, 0x20, 8, 0x40, 8)
-SCIx_FNS(SCxRDR, 0x24, 8, 0x60, 8)
-SCIF_FNS(SCLSR, 0x00, 0)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\
- defined(CONFIG_CPU_SUBTYPE_SH7724)
-SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16)
-SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8)
-SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
-SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8)
-SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16)
-SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8)
-SCIx_FNS(SCSPTR, 0, 0, 0, 0)
-SCIF_FNS(SCFCR, 0x18, 16)
-SCIF_FNS(SCFDR, 0x1c, 16)
-SCIF_FNS(SCLSR, 0x24, 16)
-#else
-/* reg SCI/SH3 SCI/SH4 SCIF/SH3 SCIF/SH4 SCI/H8*/
-/* name off sz off sz off sz off sz off sz*/
-SCIx_FNS(SCSMR, 0x00, 8, 0x00, 8, 0x00, 8, 0x00, 16, 0x00, 8)
-SCIx_FNS(SCBRR, 0x02, 8, 0x04, 8, 0x02, 8, 0x04, 8, 0x01, 8)
-SCIx_FNS(SCSCR, 0x04, 8, 0x08, 8, 0x04, 8, 0x08, 16, 0x02, 8)
-SCIx_FNS(SCxTDR, 0x06, 8, 0x0c, 8, 0x06, 8, 0x0C, 8, 0x03, 8)
-SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8)
-SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8)
-SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16)
-#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \
- defined(CONFIG_CPU_SUBTYPE_SH7780) || \
- defined(CONFIG_CPU_SUBTYPE_SH7785) || \
- defined(CONFIG_CPU_SUBTYPE_SH7786)
-SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
-SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
-SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
-SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
-SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
-#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
-SCIF_FNS(SCFDR, 0, 0, 0x1C, 16)
-SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
-SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
-SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
-SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
-#else
-SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16)
-#if defined(CONFIG_CPU_SUBTYPE_SH7722)
-SCIF_FNS(SCSPTR, 0, 0, 0, 0)
-#else
-SCIF_FNS(SCSPTR, 0, 0, 0x20, 16)
-#endif
-SCIF_FNS(SCLSR, 0, 0, 0x24, 16)
-#endif
-#endif
-#define sci_in(port, reg) sci_##reg##_in(port)
-#define sci_out(port, reg, value) sci_##reg##_out(port, value)
-
-/* H8/300 series SCI pins assignment */
-#if defined(__H8300H__) || defined(__H8300S__)
-static const struct __attribute__((packed)) {
- int port; /* GPIO port no */
- unsigned short rx,tx; /* GPIO bit no */
-} h8300_sci_pins[] = {
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
- { /* SCI0 */
- .port = H8300_GPIO_P9,
- .rx = H8300_GPIO_B2,
- .tx = H8300_GPIO_B0,
- },
- { /* SCI1 */
- .port = H8300_GPIO_P9,
- .rx = H8300_GPIO_B3,
- .tx = H8300_GPIO_B1,
- },
- { /* SCI2 */
- .port = H8300_GPIO_PB,
- .rx = H8300_GPIO_B7,
- .tx = H8300_GPIO_B6,
- }
-#elif defined(CONFIG_H8S2678)
- { /* SCI0 */
- .port = H8300_GPIO_P3,
- .rx = H8300_GPIO_B2,
- .tx = H8300_GPIO_B0,
- },
- { /* SCI1 */
- .port = H8300_GPIO_P3,
- .rx = H8300_GPIO_B3,
- .tx = H8300_GPIO_B1,
- },
- { /* SCI2 */
- .port = H8300_GPIO_P5,
- .rx = H8300_GPIO_B1,
- .tx = H8300_GPIO_B0,
- }
-#endif
-};
-#endif
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
- defined(CONFIG_CPU_SUBTYPE_SH7707) || \
- defined(CONFIG_CPU_SUBTYPE_SH7708) || \
- defined(CONFIG_CPU_SUBTYPE_SH7709)
-static inline int sci_rxd_in(struct uart_port *port)
-{
- if (port->mapbase == 0xfffffe80)
- return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */
- return 1;
-}
-#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751) || \
- defined(CONFIG_CPU_SUBTYPE_SH7751R) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
- defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
- defined(CONFIG_CPU_SUBTYPE_SH7091)
-static inline int sci_rxd_in(struct uart_port *port)
-{
- if (port->mapbase == 0xffe00000)
- return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
- return 1;
-}
-#elif defined(__H8300H__) || defined(__H8300S__)
-static inline int sci_rxd_in(struct uart_port *port)
-{
- int ch = (port->mapbase - SMR0) >> 3;
- return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0;
-}
-#else /* default case for non-SCI processors */
-static inline int sci_rxd_in(struct uart_port *port)
-{
- return 1;
-}
-#endif
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 92aa54550e8..ad0f8f5f6ea 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1435,7 +1435,7 @@ static int __devinit su_probe(struct platform_device *op)
rp = &op->resource[0];
up->port.mapbase = rp->start;
- up->reg_size = (rp->end - rp->start) + 1;
+ up->reg_size = resource_size(rp);
up->port.membase = of_ioremap(rp, 0, up->reg_size, "su");
if (!up->port.membase) {
if (type != SU_PORT_PORT)
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 37fc4e3d487..026cb9ea5cd 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -573,8 +573,7 @@ static int __init vt8500_serial_probe(struct platform_device *pdev)
snprintf(vt8500_port->name, sizeof(vt8500_port->name),
"VT8500 UART%d", pdev->id);
- vt8500_port->uart.membase = ioremap(mmres->start,
- mmres->end - mmres->start + 1);
+ vt8500_port->uart.membase = ioremap(mmres->start, resource_size(mmres));
if (!vt8500_port->uart.membase) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 1a7fd3e7031..0aebd7121b5 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -65,7 +65,7 @@
#include <linux/tty.h>
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#include <asm/dec/interrupts.h>
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 6556f7452ba..150e4f747c7 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -94,6 +94,7 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/serial.h>
+#include <linux/ratelimit.h>
#include <linux/uaccess.h>
#include <asm/system.h>
@@ -1420,8 +1421,7 @@ err_module_put:
/* call the tty release_tty routine to clean out this slot */
err_release_tty:
- if (printk_ratelimit())
- printk(KERN_INFO "tty_init_dev: ldisc open failed, "
+ printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, "
"clearing slot %d\n", idx);
release_tty(tty, idx);
return ERR_PTR(retval);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 5d01d32e2cf..ef925d58171 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -555,7 +555,7 @@ static void tty_ldisc_flush_works(struct tty_struct *tty)
static int tty_ldisc_wait_idle(struct tty_struct *tty)
{
int ret;
- ret = wait_event_interruptible_timeout(tty_ldisc_idle,
+ ret = wait_event_timeout(tty_ldisc_idle,
atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
if (ret < 0)
return ret;
@@ -763,6 +763,8 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
if (IS_ERR(ld))
return -1;
+ WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
+
tty_ldisc_close(tty, tty->ldisc);
tty_ldisc_put(tty->ldisc);
tty->ldisc = NULL;
diff --git a/drivers/uio/uio_pdrv.c b/drivers/uio/uio_pdrv.c
index 7d3e469b990..bdc3db94612 100644
--- a/drivers/uio/uio_pdrv.c
+++ b/drivers/uio/uio_pdrv.c
@@ -58,7 +58,7 @@ static int uio_pdrv_probe(struct platform_device *pdev)
uiomem->memtype = UIO_MEM_PHYS;
uiomem->addr = r->start;
- uiomem->size = r->end - r->start + 1;
+ uiomem->size = resource_size(r);
++uiomem;
}
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 0f424af7f10..bae96d24676 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -23,6 +23,10 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
#define DRIVER_NAME "uio_pdrv_genirq"
struct uio_pdrv_genirq_platdata {
@@ -97,6 +101,27 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
int ret = -EINVAL;
int i;
+ if (!uioinfo) {
+ int irq;
+
+ /* alloc uioinfo for one device */
+ uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
+ if (!uioinfo) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "unable to kmalloc\n");
+ goto bad2;
+ }
+ uioinfo->name = pdev->dev.of_node->name;
+ uioinfo->version = "devicetree";
+
+ /* Multiple IRQs are not supported */
+ irq = platform_get_irq(pdev, 0);
+ if (irq == -ENXIO)
+ uioinfo->irq = UIO_IRQ_NONE;
+ else
+ uioinfo->irq = irq;
+ }
+
if (!uioinfo || !uioinfo->name || !uioinfo->version) {
dev_err(&pdev->dev, "missing platform_data\n");
goto bad0;
@@ -137,7 +162,7 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
uiomem->memtype = UIO_MEM_PHYS;
uiomem->addr = r->start;
- uiomem->size = r->end - r->start + 1;
+ uiomem->size = resource_size(r);
++uiomem;
}
@@ -180,6 +205,10 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
kfree(priv);
pm_runtime_disable(&pdev->dev);
bad0:
+ /* kfree uioinfo for OF */
+ if (pdev->dev.of_node)
+ kfree(uioinfo);
+ bad2:
return ret;
}
@@ -193,6 +222,10 @@ static int uio_pdrv_genirq_remove(struct platform_device *pdev)
priv->uioinfo->handler = NULL;
priv->uioinfo->irqcontrol = NULL;
+ /* kfree uioinfo for OF */
+ if (pdev->dev.of_node)
+ kfree(priv->uioinfo);
+
kfree(priv);
return 0;
}
@@ -219,6 +252,15 @@ static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
.runtime_resume = uio_pdrv_genirq_runtime_nop,
};
+#ifdef CONFIG_OF
+static const struct of_device_id __devinitconst uio_of_genirq_match[] = {
+ { /* empty for now */ },
+};
+MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
+#else
+# define uio_of_genirq_match NULL
+#endif
+
static struct platform_driver uio_pdrv_genirq = {
.probe = uio_pdrv_genirq_probe,
.remove = uio_pdrv_genirq_remove,
@@ -226,6 +268,7 @@ static struct platform_driver uio_pdrv_genirq = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.pm = &uio_pdrv_genirq_dev_pm_ops,
+ .of_match_table = uio_of_genirq_match,
},
};
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index e71521ce301..428f36801e0 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -116,14 +116,14 @@ struct uea_cmvs_v1 {
u32 address;
u16 offset;
u32 data;
-} __attribute__ ((packed));
+} __packed;
struct uea_cmvs_v2 {
u32 group;
u32 address;
u32 offset;
u32 data;
-} __attribute__ ((packed));
+} __packed;
/* information about currently processed cmv */
struct cmv_dsc_e1 {
@@ -352,7 +352,7 @@ struct block_index {
__le32 PageAddress;
__le16 dummy1;
__le16 PageNumber;
-} __attribute__ ((packed));
+} __packed;
#define E4_IS_BOOT_PAGE(PageSize) ((le32_to_cpu(PageSize)) & 0x80000000)
#define E4_PAGE_BYTES(PageSize) ((le32_to_cpu(PageSize) & 0x7fffffff) * 4)
@@ -367,7 +367,7 @@ struct l1_code {
u8 page_number_to_block_index[E4_MAX_PAGE_NUMBER];
struct block_index page_header[E4_NO_SWAPPAGE_HEADERS];
u8 code[0];
-} __attribute__ ((packed));
+} __packed;
/* structures describing a block within a DSP page */
struct block_info_e1 {
@@ -377,7 +377,7 @@ struct block_info_e1 {
__le16 wOvlOffset;
__le16 wOvl; /* overlay */
__le16 wLast;
-} __attribute__ ((packed));
+} __packed;
#define E1_BLOCK_INFO_SIZE 12
struct block_info_e4 {
@@ -387,7 +387,7 @@ struct block_info_e4 {
__be32 dwSize;
__be32 dwAddress;
__be16 wReserved;
-} __attribute__ ((packed));
+} __packed;
#define E4_BLOCK_INFO_SIZE 14
#define UEA_BIHDR 0xabcd
@@ -467,7 +467,7 @@ struct cmv_e1 {
__le32 dwSymbolicAddress;
__le16 wOffsetAddress;
__le32 dwData;
-} __attribute__ ((packed));
+} __packed;
struct cmv_e4 {
__be16 wGroup;
@@ -475,17 +475,17 @@ struct cmv_e4 {
__be16 wOffset;
__be16 wAddress;
__be32 dwData[6];
-} __attribute__ ((packed));
+} __packed;
/* structures representing swap information */
struct swap_info_e1 {
__u8 bSwapPageNo;
__u8 bOvl; /* overlay */
-} __attribute__ ((packed));
+} __packed;
struct swap_info_e4 {
__u8 bSwapPageNo;
-} __attribute__ ((packed));
+} __packed;
/* structures representing interrupt data */
#define e1_bSwapPageNo u.e1.s1.swapinfo.bSwapPageNo
@@ -499,23 +499,23 @@ union intr_data_e1 {
struct {
struct swap_info_e1 swapinfo;
__le16 wDataSize;
- } __attribute__ ((packed)) s1;
+ } __packed s1;
struct {
struct cmv_e1 cmv;
__le16 wDataSize;
- } __attribute__ ((packed)) s2;
-} __attribute__ ((packed));
+ } __packed s2;
+} __packed;
union intr_data_e4 {
struct {
struct swap_info_e4 swapinfo;
__le16 wDataSize;
- } __attribute__ ((packed)) s1;
+ } __packed s1;
struct {
struct cmv_e4 cmv;
__le16 wDataSize;
- } __attribute__ ((packed)) s2;
-} __attribute__ ((packed));
+ } __packed s2;
+} __packed;
struct intr_pkt {
__u8 bType;
@@ -528,15 +528,15 @@ struct intr_pkt {
union intr_data_e1 e1;
union intr_data_e4 e4;
} u;
-} __attribute__ ((packed));
+} __packed;
#define E1_INTR_PKT_SIZE 28
#define E4_INTR_PKT_SIZE 64
static struct usb_driver uea_driver;
static DEFINE_MUTEX(uea_mutex);
-static const char *chip_name[] = {"ADI930", "Eagle I", "Eagle II", "Eagle III",
- "Eagle IV"};
+static const char * const chip_name[] = {
+ "ADI930", "Eagle I", "Eagle II", "Eagle III", "Eagle IV"};
static int modem_index;
static unsigned int debug;
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 989e16e4ab5..d3448ca110c 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -81,6 +81,7 @@
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/kthread.h>
+#include <linux/ratelimit.h>
#ifdef VERBOSE_DEBUG
static int usbatm_print_packet(const unsigned char *data, int len);
@@ -668,8 +669,7 @@ static int usbatm_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
/* racy disconnection check - fine */
if (!instance || instance->disconnected) {
#ifdef DEBUG
- if (printk_ratelimit())
- printk(KERN_DEBUG "%s: %s!\n", __func__, instance ? "disconnected" : "NULL instance");
+ printk_ratelimited(KERN_DEBUG "%s: %s!\n", __func__, instance ? "disconnected" : "NULL instance");
#endif
err = -ENODEV;
goto fail;
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 9eca4053312..cb3a93243a0 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -58,6 +58,7 @@
#include <linux/mutex.h>
#undef DEBUG
#include <linux/usb.h>
+#include <linux/ratelimit.h>
/*
* Version Information
@@ -348,8 +349,7 @@ static int usblp_check_status(struct usblp *usblp, int err)
mutex_lock(&usblp->mut);
if ((error = usblp_read_status(usblp, usblp->statusbuf)) < 0) {
mutex_unlock(&usblp->mut);
- if (printk_ratelimit())
- printk(KERN_ERR
+ printk_ratelimited(KERN_ERR
"usblp%d: error %d reading printer status\n",
usblp->minor, error);
return 0;
@@ -653,8 +653,7 @@ static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case LPGETSTATUS:
if ((retval = usblp_read_status(usblp, usblp->statusbuf))) {
- if (printk_ratelimit())
- printk(KERN_ERR "usblp%d:"
+ printk_ratelimited(KERN_ERR "usblp%d:"
"failed reading printer status (%d)\n",
usblp->minor, retval);
retval = -EIO;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 385acb895ab..3f94ac34dce 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -268,7 +268,7 @@ usbtmc_abort_bulk_in_status:
dev_err(dev, "usb_bulk_msg returned %d\n", rv);
goto exit;
}
- } while ((actual = max_size) &&
+ } while ((actual == max_size) &&
(n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
if (actual == max_size) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index c962608b4b9..26678cadfb2 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -123,10 +123,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
}
if (usb_endpoint_xfer_isoc(&ep->desc))
- max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) *
- (desc->bmAttributes + 1);
+ max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
+ le16_to_cpu(ep->desc.wMaxPacketSize);
else if (usb_endpoint_xfer_int(&ep->desc))
- max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1);
+ max_tx = le16_to_cpu(ep->desc.wMaxPacketSize) *
+ (desc->bMaxBurst + 1);
else
max_tx = 999999;
if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) {
@@ -134,10 +135,10 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
"config %d interface %d altsetting %d ep %d: "
"setting to %d\n",
usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int",
- desc->wBytesPerInterval,
+ le16_to_cpu(desc->wBytesPerInterval),
cfgno, inum, asnum, ep->desc.bEndpointAddress,
max_tx);
- ep->ss_ep_comp.wBytesPerInterval = max_tx;
+ ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
}
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e35a17687c0..34e3da5aa72 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev)
* Just re-enable it without affecting the endpoint toggles.
*/
usb_enable_interface(udev, intf, false);
- } else if (!error && !intf->dev.power.in_suspend) {
+ } else if (!error && !intf->dev.power.is_prepared) {
r = usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
if (r < 0)
@@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf)
}
/* Try to rebind the interface */
- if (!intf->dev.power.in_suspend) {
+ if (!intf->dev.power.is_prepared) {
intf->needs_binding = 0;
rc = device_attach(&intf->dev);
if (rc < 0)
@@ -1107,7 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev,
if (intf->condition == USB_INTERFACE_UNBOUND) {
/* Carry out a deferred switch to altsetting 0 */
- if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) {
+ if (intf->needs_altsetting0 && !intf->dev.power.is_prepared) {
usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
intf->needs_altsetting0 = 0;
@@ -1187,13 +1187,22 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
for (i = n - 1; i >= 0; --i) {
intf = udev->actconfig->interface[i];
status = usb_suspend_interface(udev, intf, msg);
+
+ /* Ignore errors during system sleep transitions */
+ if (!(msg.event & PM_EVENT_AUTO))
+ status = 0;
if (status != 0)
break;
}
}
- if (status == 0)
+ if (status == 0) {
status = usb_suspend_device(udev, msg);
+ /* Again, ignore errors during system sleep transitions */
+ if (!(msg.event & PM_EVENT_AUTO))
+ status = 0;
+ }
+
/* If the suspend failed, resume interfaces that did get suspended */
if (status != 0) {
msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ace9f8442e5..8669ba3fe79 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -337,6 +337,17 @@ static const u8 ss_rh_config_descriptor[] = {
0x02, 0x00 /* __le16 ss_wBytesPerInterval; 15 bits for max 15 ports */
};
+/* authorized_default behaviour:
+ * -1 is authorized for all devices except wireless (old behaviour)
+ * 0 is unauthorized for all devices
+ * 1 is authorized for all devices
+ */
+static int authorized_default = -1;
+module_param(authorized_default, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(authorized_default,
+ "Default USB device authorization: 0 is not authorized, 1 is "
+ "authorized, -1 is authorized except for wireless USB (default, "
+ "old behaviour");
/*-------------------------------------------------------------------------*/
/**
@@ -2371,7 +2382,11 @@ int usb_add_hcd(struct usb_hcd *hcd,
dev_info(hcd->self.controller, "%s\n", hcd->product_desc);
- hcd->authorized_default = hcd->wireless? 0 : 1;
+ /* Keep old behaviour if authorized_default is not in [0, 1]. */
+ if (authorized_default < 0 || authorized_default > 1)
+ hcd->authorized_default = hcd->wireless? 0 : 1;
+ else
+ hcd->authorized_default = authorized_default;
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
/* HC is in reset state, but accessible. Now do the one-time init,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 90ae1753dda..a428aa080a3 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1634,6 +1634,7 @@ void usb_disconnect(struct usb_device **pdev)
{
struct usb_device *udev = *pdev;
int i;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
if (!udev) {
pr_debug ("%s nodev\n", __func__);
@@ -1661,7 +1662,9 @@ void usb_disconnect(struct usb_device **pdev)
* so that the hardware is now fully quiesced.
*/
dev_dbg (&udev->dev, "unregistering device\n");
+ mutex_lock(hcd->bandwidth_mutex);
usb_disable_device(udev, 0);
+ mutex_unlock(hcd->bandwidth_mutex);
usb_hcd_synchronize_unlinks(udev);
usb_remove_ep_devs(&udev->ep0);
@@ -2362,6 +2365,10 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
USB_DEVICE_REMOTE_WAKEUP, 0,
NULL, 0,
USB_CTRL_SET_TIMEOUT);
+
+ /* System sleep transitions should never fail */
+ if (!(msg.event & PM_EVENT_AUTO))
+ status = 0;
} else {
/* device has up to 10 msec to fully suspend */
dev_dbg(&udev->dev, "usb %ssuspend\n",
@@ -2611,16 +2618,15 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
struct usb_device *hdev = hub->hdev;
unsigned port1;
- /* fail if children aren't already suspended */
+ /* Warn if children aren't already suspended */
for (port1 = 1; port1 <= hdev->maxchild; port1++) {
struct usb_device *udev;
udev = hdev->children [port1-1];
if (udev && udev->can_submit) {
- if (!(msg.event & PM_EVENT_AUTO))
- dev_dbg(&intf->dev, "port %d nyet suspended\n",
- port1);
- return -EBUSY;
+ dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
+ if (msg.event & PM_EVENT_AUTO)
+ return -EBUSY;
}
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 5701e857392..0b5ec234c78 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1135,15 +1135,26 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
* Deallocates hcd/hardware state for the endpoints (nuking all or most
* pending urbs) and usbcore state for the interfaces, so that usbcore
* must usb_set_configuration() before any interfaces could be used.
+ *
+ * Must be called with hcd->bandwidth_mutex held.
*/
void usb_disable_device(struct usb_device *dev, int skip_ep0)
{
int i;
+ struct usb_hcd *hcd = bus_to_hcd(dev->bus);
/* getting rid of interfaces will disconnect
* any drivers bound to them (a key side effect)
*/
if (dev->actconfig) {
+ /*
+ * FIXME: In order to avoid self-deadlock involving the
+ * bandwidth_mutex, we have to mark all the interfaces
+ * before unregistering any of them.
+ */
+ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++)
+ dev->actconfig->interface[i]->unregistering = 1;
+
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *interface;
@@ -1153,7 +1164,6 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
continue;
dev_dbg(&dev->dev, "unregistering interface %s\n",
dev_name(&interface->dev));
- interface->unregistering = 1;
remove_intf_ep_devs(interface);
device_del(&interface->dev);
}
@@ -1172,6 +1182,16 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
skip_ep0 ? "non-ep0" : "all");
+ if (hcd->driver->check_bandwidth) {
+ /* First pass: Cancel URBs, leave endpoint pointers intact. */
+ for (i = skip_ep0; i < 16; ++i) {
+ usb_disable_endpoint(dev, i, false);
+ usb_disable_endpoint(dev, i + USB_DIR_IN, false);
+ }
+ /* Remove endpoints from the host controller internal state */
+ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+ /* Second pass: remove endpoint pointers */
+ }
for (i = skip_ep0; i < 16; ++i) {
usb_disable_endpoint(dev, i, true);
usb_disable_endpoint(dev, i + USB_DIR_IN, true);
@@ -1273,6 +1293,8 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
interface);
return -EINVAL;
}
+ if (iface->unregistering)
+ return -ENODEV;
alt = usb_altnum_to_altsetting(iface, alternate);
if (!alt) {
@@ -1727,6 +1749,7 @@ free_interfaces:
/* if it's already configured, clear out old state first.
* getting rid of old interfaces means unbinding their drivers.
*/
+ mutex_lock(hcd->bandwidth_mutex);
if (dev->state != USB_STATE_ADDRESS)
usb_disable_device(dev, 1); /* Skip ep0 */
@@ -1739,7 +1762,6 @@ free_interfaces:
* host controller will not allow submissions to dropped endpoints. If
* this call fails, the device state is unchanged.
*/
- mutex_lock(hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
if (ret < 0) {
mutex_unlock(hcd->bandwidth_mutex);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 029e288805b..5a084b9cfa3 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -96,9 +96,6 @@ config USB_GADGET_VBUS_DRAW
This value will be used except for system-specific gadget
drivers that have more specific information.
-config USB_GADGET_SELECTED
- boolean
-
#
# USB Peripheral Controller Support
#
@@ -122,10 +119,9 @@ choice
# Integrated controllers
#
-config USB_GADGET_AT91
- boolean "Atmel AT91 USB Device Port"
+config USB_AT91
+ tristate "Atmel AT91 USB Device Port"
depends on ARCH_AT91 && !ARCH_AT91SAM9RL && !ARCH_AT91CAP9 && !ARCH_AT91SAM9G45
- select USB_GADGET_SELECTED
help
Many Atmel AT91 processors (such as the AT91RM2000) have a
full speed USB Device Port with support for five configurable
@@ -135,27 +131,16 @@ config USB_GADGET_AT91
dynamically linked module called "at91_udc" and force all
gadget drivers to also be dynamically linked.
-config USB_AT91
- tristate
- depends on USB_GADGET_AT91
- default USB_GADGET
-
-config USB_GADGET_ATMEL_USBA
- boolean "Atmel USBA"
+config USB_ATMEL_USBA
+ tristate "Atmel USBA"
select USB_GADGET_DUALSPEED
depends on AVR32 || ARCH_AT91CAP9 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
help
USBA is the integrated high-speed USB Device controller on
the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel.
-config USB_ATMEL_USBA
- tristate
- depends on USB_GADGET_ATMEL_USBA
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_FSL_USB2
- boolean "Freescale Highspeed USB DR Peripheral Controller"
+config USB_FSL_USB2
+ tristate "Freescale Highspeed USB DR Peripheral Controller"
depends on FSL_SOC || ARCH_MXC
select USB_GADGET_DUALSPEED
select USB_FSL_MPH_DR_OF if OF
@@ -170,26 +155,15 @@ config USB_GADGET_FSL_USB2
dynamically linked module called "fsl_usb2_udc" and force
all gadget drivers to also be dynamically linked.
-config USB_FSL_USB2
- tristate
- depends on USB_GADGET_FSL_USB2
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_FUSB300
- boolean "Faraday FUSB300 USB Peripheral Controller"
+config USB_FUSB300
+ tristate "Faraday FUSB300 USB Peripheral Controller"
+ depends on !PHYS_ADDR_T_64BIT
select USB_GADGET_DUALSPEED
help
Faraday usb device controller FUSB300 driver
-config USB_FUSB300
- tristate
- depends on USB_GADGET_FUSB300
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_OMAP
- boolean "OMAP USB Device Controller"
+config USB_OMAP
+ tristate "OMAP USB Device Controller"
depends on ARCH_OMAP
select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_H4_OTG
select USB_OTG_UTILS if ARCH_OMAP
@@ -204,14 +178,8 @@ config USB_GADGET_OMAP
dynamically linked module called "omap_udc" and force all
gadget drivers to also be dynamically linked.
-config USB_OMAP
- tristate
- depends on USB_GADGET_OMAP
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_PXA25X
- boolean "PXA 25x or IXP 4xx"
+config USB_PXA25X
+ tristate "PXA 25x or IXP 4xx"
depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX
select USB_OTG_UTILS
help
@@ -226,24 +194,18 @@ config USB_GADGET_PXA25X
dynamically linked module called "pxa25x_udc" and force all
gadget drivers to also be dynamically linked.
-config USB_PXA25X
- tristate
- depends on USB_GADGET_PXA25X
- default USB_GADGET
- select USB_GADGET_SELECTED
-
# if there's only one gadget driver, using only two bulk endpoints,
# don't waste memory for the other endpoints
config USB_PXA25X_SMALL
- depends on USB_GADGET_PXA25X
+ depends on USB_PXA25X
bool
default n if USB_ETH_RNDIS
default y if USB_ZERO
default y if USB_ETH
default y if USB_G_SERIAL
-config USB_GADGET_R8A66597
- boolean "Renesas R8A66597 USB Peripheral Controller"
+config USB_R8A66597
+ tristate "Renesas R8A66597 USB Peripheral Controller"
select USB_GADGET_DUALSPEED
help
R8A66597 is a discrete USB host and peripheral controller chip that
@@ -254,32 +216,22 @@ config USB_GADGET_R8A66597
dynamically linked module called "r8a66597_udc" and force all
gadget drivers to also be dynamically linked.
-config USB_R8A66597
- tristate
- depends on USB_GADGET_R8A66597
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_RENESAS_USBHS
- boolean "Renesas USBHS"
+config USB_RENESAS_USBHS_UDC
+ tristate 'Renesas USBHS controller'
+ depends on SUPERH || ARCH_SHMOBILE
depends on USB_RENESAS_USBHS
select USB_GADGET_DUALSPEED
help
- Renesas USBHS is a discrete USB host and peripheral controller
- chip that supports both full and high speed USB 2.0 data transfers.
- platform is able to configure endpoint (pipe) style
+ Renesas USBHS is a discrete USB host and peripheral controller chip
+ that supports both full and high speed USB 2.0 data transfers.
+ It has nine or more configurable endpoints, and endpoint zero.
- Say "y" to enable the gadget specific portion of the USBHS driver.
-
-
-config USB_RENESAS_USBHS_UDC
- tristate
- depends on USB_GADGET_RENESAS_USBHS
- default USB_GADGET
- select USB_GADGET_SELECTED
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "renesas_usbhs" and force all
+ gadget drivers to also be dynamically linked.
-config USB_GADGET_PXA27X
- boolean "PXA 27x"
+config USB_PXA27X
+ tristate "PXA 27x"
depends on ARCH_PXA && (PXA27x || PXA3xx)
select USB_OTG_UTILS
help
@@ -293,14 +245,8 @@ config USB_GADGET_PXA27X
dynamically linked module called "pxa27x_udc" and force all
gadget drivers to also be dynamically linked.
-config USB_PXA27X
- tristate
- depends on USB_GADGET_PXA27X
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_S3C_HSOTG
- boolean "S3C HS/OtG USB Device controller"
+config USB_S3C_HSOTG
+ tristate "S3C HS/OtG USB Device controller"
depends on S3C_DEV_USB_HSOTG
select USB_GADGET_S3C_HSOTG_PIO
select USB_GADGET_DUALSPEED
@@ -308,14 +254,8 @@ config USB_GADGET_S3C_HSOTG
The Samsung S3C64XX USB2.0 high-speed gadget controller
integrated into the S3C64XX series SoC.
-config USB_S3C_HSOTG
- tristate
- depends on USB_GADGET_S3C_HSOTG
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_IMX
- boolean "Freescale IMX USB Peripheral Controller"
+config USB_IMX
+ tristate "Freescale IMX USB Peripheral Controller"
depends on ARCH_MX1
help
Freescale's IMX series include an integrated full speed
@@ -329,14 +269,8 @@ config USB_GADGET_IMX
dynamically linked module called "imx_udc" and force all
gadget drivers to also be dynamically linked.
-config USB_IMX
- tristate
- depends on USB_GADGET_IMX
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_S3C2410
- boolean "S3C2410 USB Device Controller"
+config USB_S3C2410
+ tristate "S3C2410 USB Device Controller"
depends on ARCH_S3C2410
help
Samsung's S3C2410 is an ARM-4 processor with an integrated
@@ -346,18 +280,12 @@ config USB_GADGET_S3C2410
This driver has been tested on the S3C2410, S3C2412, and
S3C2440 processors.
-config USB_S3C2410
- tristate
- depends on USB_GADGET_S3C2410
- default USB_GADGET
- select USB_GADGET_SELECTED
-
config USB_S3C2410_DEBUG
boolean "S3C2410 udc debug messages"
- depends on USB_GADGET_S3C2410
+ depends on USB_S3C2410
-config USB_GADGET_S3C_HSUDC
- boolean "S3C2416, S3C2443 and S3C2450 USB Device Controller"
+config USB_S3C_HSUDC
+ tristate "S3C2416, S3C2443 and S3C2450 USB Device Controller"
depends on ARCH_S3C2410
select USB_GADGET_DUALSPEED
help
@@ -367,41 +295,29 @@ config USB_GADGET_S3C_HSUDC
This driver has been tested on S3C2416 and S3C2450 processors.
-config USB_S3C_HSUDC
- tristate
- depends on USB_GADGET_S3C_HSUDC
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_PXA_U2O
- boolean "PXA9xx Processor USB2.0 controller"
+config USB_PXA_U2O
+ tristate "PXA9xx Processor USB2.0 controller"
+ depends on ARCH_MMP
select USB_GADGET_DUALSPEED
help
PXA9xx Processor series include a high speed USB2.0 device
controller, which support high speed and full speed USB peripheral.
-config USB_PXA_U2O
- tristate
- depends on USB_GADGET_PXA_U2O
- default USB_GADGET
- select USB_GADGET_SELECTED
-
#
# Controllers available in both integrated and discrete versions
#
# musb builds in ../musb along with host support
config USB_GADGET_MUSB_HDRC
- boolean "Inventra HDRC USB Peripheral (TI, ADI, ...)"
- depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+ tristate "Inventra HDRC USB Peripheral (TI, ADI, ...)"
+ depends on USB_MUSB_HDRC
select USB_GADGET_DUALSPEED
- select USB_GADGET_SELECTED
help
This OTG-capable silicon IP is used in dual designs including
the TI DaVinci, OMAP 243x, OMAP 343x, TUSB 6010, and ADI Blackfin
-config USB_GADGET_M66592
- boolean "Renesas M66592 USB Peripheral Controller"
+config USB_M66592
+ tristate "Renesas M66592 USB Peripheral Controller"
select USB_GADGET_DUALSPEED
help
M66592 is a discrete USB peripheral controller chip that
@@ -412,18 +328,12 @@ config USB_GADGET_M66592
dynamically linked module called "m66592_udc" and force all
gadget drivers to also be dynamically linked.
-config USB_M66592
- tristate
- depends on USB_GADGET_M66592
- default USB_GADGET
- select USB_GADGET_SELECTED
-
#
# Controllers available only in discrete form (and all PCI controllers)
#
-config USB_GADGET_AMD5536UDC
- boolean "AMD5536 UDC"
+config USB_AMD5536UDC
+ tristate "AMD5536 UDC"
depends on PCI
select USB_GADGET_DUALSPEED
help
@@ -437,14 +347,8 @@ config USB_GADGET_AMD5536UDC
dynamically linked module called "amd5536udc" and force all
gadget drivers to also be dynamically linked.
-config USB_AMD5536UDC
- tristate
- depends on USB_GADGET_AMD5536UDC
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_FSL_QE
- boolean "Freescale QE/CPM USB Device Controller"
+config USB_FSL_QE
+ tristate "Freescale QE/CPM USB Device Controller"
depends on FSL_SOC && (QUICC_ENGINE || CPM)
help
Some of Freescale PowerPC processors have a Full Speed
@@ -456,14 +360,8 @@ config USB_GADGET_FSL_QE
Set CONFIG_USB_GADGET to "m" to build this driver as a
dynamically linked module called "fsl_qe_udc".
-config USB_FSL_QE
- tristate
- depends on USB_GADGET_FSL_QE
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_CI13XXX_PCI
- boolean "MIPS USB CI13xxx PCI UDC"
+config USB_CI13XXX_PCI
+ tristate "MIPS USB CI13xxx PCI UDC"
depends on PCI
select USB_GADGET_DUALSPEED
help
@@ -474,14 +372,31 @@ config USB_GADGET_CI13XXX_PCI
dynamically linked module called "ci13xxx_udc" and force all
gadget drivers to also be dynamically linked.
-config USB_CI13XXX_PCI
- tristate
- depends on USB_GADGET_CI13XXX_PCI
- default USB_GADGET
- select USB_GADGET_SELECTED
+config USB_NET2272
+ tristate "PLX NET2272"
+ select USB_GADGET_DUALSPEED
+ help
+ PLX NET2272 is a USB peripheral controller which supports
+ both full and high speed USB 2.0 data transfers.
+
+ It has three configurable endpoints, as well as endpoint zero
+ (for control transfer).
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "net2272" and force all
+ gadget drivers to also be dynamically linked.
+
+config USB_NET2272_DMA
+ boolean "Support external DMA controller"
+ depends on USB_NET2272
+ help
+ The NET2272 part can optionally support an external DMA
+ controller, but your board has to have support in the
+ driver itself.
-config USB_GADGET_NET2280
- boolean "NetChip 228x"
+ If unsure, say "N" here. The driver works fine in PIO mode.
+
+config USB_NET2280
+ tristate "NetChip 228x"
depends on PCI
select USB_GADGET_DUALSPEED
help
@@ -496,14 +411,8 @@ config USB_GADGET_NET2280
dynamically linked module called "net2280" and force all
gadget drivers to also be dynamically linked.
-config USB_NET2280
- tristate
- depends on USB_GADGET_NET2280
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_GOKU
- boolean "Toshiba TC86C001 'Goku-S'"
+config USB_GOKU
+ tristate "Toshiba TC86C001 'Goku-S'"
depends on PCI
help
The Toshiba TC86C001 is a PCI device which includes controllers
@@ -516,15 +425,10 @@ config USB_GADGET_GOKU
dynamically linked module called "goku_udc" and to force all
gadget drivers to also be dynamically linked.
-config USB_GOKU
- tristate
- depends on USB_GADGET_GOKU
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_LANGWELL
- boolean "Intel Langwell USB Device Controller"
+config USB_LANGWELL
+ tristate "Intel Langwell USB Device Controller"
depends on PCI
+ depends on !PHYS_ADDR_T_64BIT
select USB_GADGET_DUALSPEED
help
Intel Langwell USB Device Controller is a High-Speed USB
@@ -537,14 +441,8 @@ config USB_GADGET_LANGWELL
dynamically linked module called "langwell_udc" and force all
gadget drivers to also be dynamically linked.
-config USB_LANGWELL
- tristate
- depends on USB_GADGET_LANGWELL
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_EG20T
- boolean "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
+config USB_EG20T
+ tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
depends on PCI
select USB_GADGET_DUALSPEED
help
@@ -565,14 +463,8 @@ config USB_GADGET_EG20T
ML7213 is companion chip for Intel Atom E6xx series.
ML7213 is completely compatible for Intel EG20T PCH.
-config USB_EG20T
- tristate
- depends on USB_GADGET_EG20T
- default USB_GADGET
- select USB_GADGET_SELECTED
-
-config USB_GADGET_CI13XXX_MSM
- boolean "MIPS USB CI13xxx for MSM"
+config USB_CI13XXX_MSM
+ tristate "MIPS USB CI13xxx for MSM"
depends on ARCH_MSM
select USB_GADGET_DUALSPEED
select USB_MSM_OTG
@@ -588,31 +480,26 @@ config USB_GADGET_CI13XXX_MSM
dynamically linked module called "ci13xxx_msm" and force all
gadget drivers to also be dynamically linked.
-config USB_CI13XXX_MSM
- tristate
- depends on USB_GADGET_CI13XXX_MSM
- default USB_GADGET
- select USB_GADGET_SELECTED
-
#
# LAST -- dummy/emulated controller
#
-config USB_GADGET_DUMMY_HCD
- boolean "Dummy HCD (DEVELOPMENT)"
+config USB_DUMMY_HCD
+ tristate "Dummy HCD (DEVELOPMENT)"
depends on USB=y || (USB=m && USB_GADGET=m)
select USB_GADGET_DUALSPEED
+ select USB_GADGET_SUPERSPEED
help
This host controller driver emulates USB, looping all data transfer
requests back to a USB "gadget driver" in the same host. The host
side is the master; the gadget side is the slave. Gadget drivers
can be high, full, or low speed; and they have access to endpoints
like those from NET2280, PXA2xx, or SA1100 hardware.
-
+
This may help in some stages of creating a driver to embed in a
Linux device, since it lets you debug several parts of the gadget
driver without its hardware or drivers being involved.
-
+
Since such a gadget side driver needs to interoperate with a host
side Linux-USB device driver, this may help to debug both sides
of a USB protocol stack.
@@ -621,12 +508,6 @@ config USB_GADGET_DUMMY_HCD
dynamically linked module called "dummy_hcd" and force all
gadget drivers to also be dynamically linked.
-config USB_DUMMY_HCD
- tristate
- depends on USB_GADGET_DUMMY_HCD
- default USB_GADGET
- select USB_GADGET_SELECTED
-
# NOTE: Please keep dummy_hcd LAST so that "real hardware" appears
# first and will be selected by default.
@@ -637,12 +518,18 @@ config USB_GADGET_DUALSPEED
bool
depends on USB_GADGET
+# Selected by UDC drivers that support super-speed opperation
+config USB_GADGET_SUPERSPEED
+ bool
+ depends on USB_GADGET
+ depends on USB_GADGET_DUALSPEED
+
#
# USB Gadget Drivers
#
choice
tristate "USB Gadget Drivers"
- depends on USB_GADGET && USB_GADGET_SELECTED
+ depends on USB_GADGET
default USB_ETH
help
A Linux "Gadget Driver" talks to the USB Peripheral Controller
@@ -848,7 +735,7 @@ config USB_FUNCTIONFS_GENERIC
no Ethernet interface.
config USB_FILE_STORAGE
- tristate "File-backed Storage Gadget"
+ tristate "File-backed Storage Gadget (DEPRECATED)"
depends on BLOCK
help
The File-backed Storage Gadget acts as a USB Mass Storage
@@ -859,6 +746,9 @@ config USB_FILE_STORAGE
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "g_file_storage".
+ NOTE: This driver is deprecated. Its replacement is the
+ Mass Storage Gadget.
+
config USB_FILE_STORAGE_TEST
bool "File-backed Storage Gadget testing version"
depends on USB_FILE_STORAGE
@@ -878,14 +768,11 @@ config USB_MASS_STORAGE
device (in much the same way as the "loop" device driver),
specified as a module parameter or sysfs option.
- This is heavily based on File-backed Storage Gadget and in most
- cases you will want to use FSG instead. This gadget is mostly
- here to test the functionality of the Mass Storage Function
- which may be used with composite framework.
+ This driver is an updated replacement for the deprecated
+ File-backed Storage Gadget (g_file_storage).
Say "y" to link the driver statically, or "m" to build
- a dynamically linked module called "g_mass_storage". If unsure,
- consider File-backed Storage Gadget.
+ a dynamically linked module called "g_mass_storage".
config USB_G_SERIAL
tristate "Serial Gadget (with CDC ACM and CDC OBEX support)"
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 4fe92b18a05..9ba725af4a0 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -3,7 +3,9 @@
#
ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG
+obj-$(CONFIG_USB_GADGET) += udc-core.o
obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o
+obj-$(CONFIG_USB_NET2272) += net2272.o
obj-$(CONFIG_USB_NET2280) += net2280.o
obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o
obj-$(CONFIG_USB_PXA25X) += pxa25x_udc.o
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 95e8138cd48..70f2b376c86 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -1438,10 +1438,15 @@ static int udc_wakeup(struct usb_gadget *gadget)
return 0;
}
+static int amd5536_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int amd5536_stop(struct usb_gadget_driver *driver);
/* gadget operations */
static const struct usb_gadget_ops udc_ops = {
.wakeup = udc_wakeup,
.get_frame = udc_get_frame,
+ .start = amd5536_start,
+ .stop = amd5536_stop,
};
/* Setups endpoint parameters, adds endpoints to linked list */
@@ -1955,7 +1960,7 @@ static int setup_ep0(struct udc *dev)
}
/* Called by gadget driver to register itself */
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int amd5536_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct udc *dev = udc;
@@ -2002,7 +2007,6 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
return 0;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
/* shutdown requests and disconnect from gadget */
static void
@@ -2027,7 +2031,7 @@ __acquires(dev->lock)
}
/* Called by gadget driver to unregister itself */
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int amd5536_stop(struct usb_gadget_driver *driver)
{
struct udc *dev = udc;
unsigned long flags;
@@ -2057,8 +2061,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
/* Clear pending NAK bits */
static void udc_process_cnak_queue(struct udc *dev)
@@ -3134,6 +3136,7 @@ static void udc_pci_remove(struct pci_dev *pdev)
dev = pci_get_drvdata(pdev);
+ usb_del_gadget_udc(&udc->gadget);
/* gadget driver must not be registered */
BUG_ON(dev->driver != NULL);
@@ -3382,8 +3385,13 @@ static int udc_probe(struct udc *dev)
"driver version: %s(for Geode5536 B1)\n", tmp);
udc = dev;
+ retval = usb_add_gadget_udc(&udc->pdev->dev, &dev->gadget);
+ if (retval)
+ goto finished;
+
retval = device_register(&dev->gadget.dev);
if (retval) {
+ usb_del_gadget_udc(&dev->gadget);
put_device(&dev->gadget.dev);
goto finished;
}
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index f4690ffcb48..ddb118a7680 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -35,6 +35,7 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
+#include <linux/prefetch.h>
#include <linux/clk.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -985,12 +986,18 @@ static int at91_set_selfpowered(struct usb_gadget *gadget, int is_on)
return 0;
}
+static int at91_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int at91_stop(struct usb_gadget_driver *driver);
+
static const struct usb_gadget_ops at91_udc_ops = {
.get_frame = at91_get_frame,
.wakeup = at91_wakeup,
.set_selfpowered = at91_set_selfpowered,
.vbus_session = at91_vbus_session,
.pullup = at91_pullup,
+ .start = at91_start,
+ .stop = at91_stop,
/*
* VBUS-powered devices may also also want to support bigger
@@ -1628,7 +1635,7 @@ static void at91_vbus_timer(unsigned long data)
schedule_work(&udc->vbus_timer_work);
}
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int at91_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct at91_udc *udc = &controller;
@@ -1672,9 +1679,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
DBG("bound to %s\n", driver->driver.name);
return 0;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
+static int at91_stop(struct usb_gadget_driver *driver)
{
struct at91_udc *udc = &controller;
unsigned long flags;
@@ -1696,7 +1702,6 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
DBG("unbound from %s\n", driver->driver.name);
return 0;
}
-EXPORT_SYMBOL (usb_gadget_unregister_driver);
/*-------------------------------------------------------------------------*/
@@ -1854,13 +1859,18 @@ static int __init at91udc_probe(struct platform_device *pdev)
DBG("no VBUS detection, assuming always-on\n");
udc->vbus = 1;
}
+ retval = usb_add_gadget_udc(dev, &udc->gadget);
+ if (retval)
+ goto fail4;
dev_set_drvdata(dev, udc);
device_init_wakeup(dev, 1);
create_debug_file(udc);
INFO("%s version %s\n", driver_name, DRIVER_VERSION);
return 0;
-
+fail4:
+ if (udc->board.vbus_pin > 0 && !udc->board.vbus_polled)
+ free_irq(udc->board.vbus_pin, udc);
fail3:
if (udc->board.vbus_pin > 0)
gpio_free(udc->board.vbus_pin);
@@ -1887,6 +1897,7 @@ static int __exit at91udc_remove(struct platform_device *pdev)
DBG("remove\n");
+ usb_del_gadget_udc(&udc->gadget);
if (udc->driver)
return -EBUSY;
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index db1a659702b..5b1665eb1be 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -272,7 +272,7 @@ static void usba_init_debugfs(struct usba_udc *udc)
regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
CTRL_IOMEM_ID);
- regs->d_inode->i_size = regs_resource->end - regs_resource->start + 1;
+ regs->d_inode->i_size = resource_size(regs_resource);
udc->debugfs_regs = regs;
usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
@@ -1007,10 +1007,16 @@ usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
return 0;
}
+static int atmel_usba_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int atmel_usba_stop(struct usb_gadget_driver *driver);
+
static const struct usb_gadget_ops usba_udc_ops = {
.get_frame = usba_udc_get_frame,
.wakeup = usba_udc_wakeup,
.set_selfpowered = usba_udc_set_selfpowered,
+ .start = atmel_usba_start,
+ .stop = atmel_usba_stop,
};
static struct usb_endpoint_descriptor usba_ep0_desc = {
@@ -1789,7 +1795,7 @@ out:
return IRQ_HANDLED;
}
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int atmel_usba_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct usba_udc *udc = &the_udc;
@@ -1842,9 +1848,8 @@ err_driver_bind:
udc->gadget.dev.driver = NULL;
return ret;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int atmel_usba_stop(struct usb_gadget_driver *driver)
{
struct usba_udc *udc = &the_udc;
unsigned long flags;
@@ -1880,7 +1885,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
static int __init usba_udc_probe(struct platform_device *pdev)
{
@@ -2021,12 +2025,24 @@ static int __init usba_udc_probe(struct platform_device *pdev)
}
}
+ ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+ if (ret)
+ goto err_add_udc;
+
usba_init_debugfs(udc);
for (i = 1; i < pdata->num_ep; i++)
usba_ep_init_debugfs(udc, &usba_ep[i]);
return 0;
+err_add_udc:
+ if (gpio_is_valid(pdata->vbus_pin)) {
+ free_irq(gpio_to_irq(udc->vbus_pin), udc);
+ gpio_free(udc->vbus_pin);
+ }
+
+ device_unregister(&udc->gadget.dev);
+
err_device_add:
free_irq(irq, udc);
err_request_irq:
@@ -2053,6 +2069,8 @@ static int __exit usba_udc_remove(struct platform_device *pdev)
udc = platform_get_drvdata(pdev);
+ usb_del_gadget_udc(&udc->gadget);
+
for (i = 1; i < pdata->num_ep; i++)
usba_ep_cleanup_debugfs(&usba_ep[i]);
usba_cleanup_debugfs(udc);
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
index 93b999e49ef..9d89ae4765a 100644
--- a/drivers/usb/gadget/audio.c
+++ b/drivers/usb/gadget/audio.c
@@ -165,6 +165,7 @@ static struct usb_composite_driver audio_driver = {
.name = "g_audio",
.dev = &device_desc,
.strings = audio_strings,
+ .max_speed = USB_SPEED_HIGH,
.unbind = __exit_p(audio_unbind),
};
diff --git a/drivers/usb/gadget/cdc2.c b/drivers/usb/gadget/cdc2.c
index 2720ab07ef1..b1c1afbb875 100644
--- a/drivers/usb/gadget/cdc2.c
+++ b/drivers/usb/gadget/cdc2.c
@@ -244,6 +244,7 @@ static struct usb_composite_driver cdc_driver = {
.name = "g_cdc",
.dev = &device_desc,
.strings = dev_strings,
+ .max_speed = USB_SPEED_HIGH,
.unbind = __exit_p(cdc_unbind),
};
diff --git a/drivers/usb/gadget/ci13xxx_msm.c b/drivers/usb/gadget/ci13xxx_msm.c
index 139ac941959..470981ad6f7 100644
--- a/drivers/usb/gadget/ci13xxx_msm.c
+++ b/drivers/usb/gadget/ci13xxx_msm.c
@@ -126,6 +126,7 @@ static struct platform_driver ci13xxx_msm_driver = {
.probe = ci13xxx_msm_probe,
.driver = { .name = "msm_hsusb", },
};
+MODULE_ALIAS("platform:msm_hsusb");
static int __init ci13xxx_msm_init(void)
{
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index baaf87ed768..1265a8502ea 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -857,7 +857,7 @@ static void dbg_print(u8 addr, const char *name, int status, const char *extra)
stamp = stamp * 1000000 + tval.tv_usec;
scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
- "%04X\t» %02X %-7.7s %4i «\t%s\n",
+ "%04X\t? %02X %-7.7s %4i ?\t%s\n",
stamp, addr, name, status, extra);
dbg_inc(&dbg_data.idx);
@@ -865,7 +865,7 @@ static void dbg_print(u8 addr, const char *name, int status, const char *extra)
write_unlock_irqrestore(&dbg_data.lck, flags);
if (dbg_data.tty != 0)
- pr_notice("%04X\t» %02X %-7.7s %4i «\t%s\n",
+ pr_notice("%04X\t? %02X %-7.7s %4i ?\t%s\n",
stamp, addr, name, status, extra);
}
@@ -1025,15 +1025,15 @@ static ssize_t show_inters(struct device *dev, struct device_attribute *attr,
n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n",
isr_statistics.test);
- n += scnprintf(buf + n, PAGE_SIZE - n, "» ui = %d\n",
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? ui = %d\n",
isr_statistics.ui);
- n += scnprintf(buf + n, PAGE_SIZE - n, "» uei = %d\n",
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? uei = %d\n",
isr_statistics.uei);
- n += scnprintf(buf + n, PAGE_SIZE - n, "» pci = %d\n",
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? pci = %d\n",
isr_statistics.pci);
- n += scnprintf(buf + n, PAGE_SIZE - n, "» uri = %d\n",
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? uri = %d\n",
isr_statistics.uri);
- n += scnprintf(buf + n, PAGE_SIZE - n, "» sli = %d\n",
+ n += scnprintf(buf + n, PAGE_SIZE - n, "? sli = %d\n",
isr_statistics.sli);
n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n",
isr_statistics.none);
@@ -1214,12 +1214,13 @@ static DEVICE_ATTR(qheads, S_IRUSR, show_qheads, NULL);
*
* Check "device.h" for details
*/
+#define DUMP_ENTRIES 512
static ssize_t show_registers(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
unsigned long flags;
- u32 dump[512];
+ u32 *dump;
unsigned i, k, n = 0;
dbg_trace("[%s] %p\n", __func__, buf);
@@ -1228,8 +1229,14 @@ static ssize_t show_registers(struct device *dev,
return 0;
}
+ dump = kmalloc(sizeof(u32) * DUMP_ENTRIES, GFP_KERNEL);
+ if (!dump) {
+ dev_err(dev, "%s: out of memory\n", __func__);
+ return 0;
+ }
+
spin_lock_irqsave(udc->lock, flags);
- k = hw_register_read(dump, sizeof(dump)/sizeof(u32));
+ k = hw_register_read(dump, DUMP_ENTRIES);
spin_unlock_irqrestore(udc->lock, flags);
for (i = 0; i < k; i++) {
@@ -1237,6 +1244,7 @@ static ssize_t show_registers(struct device *dev,
"reg[0x%04X] = 0x%08X\n",
i * (unsigned)sizeof(u32), dump[i]);
}
+ kfree(dump);
return n;
}
@@ -2515,6 +2523,9 @@ static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
return -ENOTSUPP;
}
+static int ci13xxx_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int ci13xxx_stop(struct usb_gadget_driver *driver);
/**
* Device operations part of the API to the USB controller hardware,
* which don't involve endpoints (or i/o)
@@ -2524,17 +2535,19 @@ static const struct usb_gadget_ops usb_gadget_ops = {
.vbus_session = ci13xxx_vbus_session,
.wakeup = ci13xxx_wakeup,
.vbus_draw = ci13xxx_vbus_draw,
+ .start = ci13xxx_start,
+ .stop = ci13xxx_stop,
};
/**
- * usb_gadget_probe_driver: register a gadget driver
+ * ci13xxx_start: register a gadget driver
* @driver: the driver being registered
* @bind: the driver's bind callback
*
- * Check usb_gadget_probe_driver() at <linux/usb/gadget.h> for details.
+ * Check ci13xxx_start() at <linux/usb/gadget.h> for details.
* Interrupts are enabled here.
*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int ci13xxx_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct ci13xxx *udc = _udc;
@@ -2615,10 +2628,13 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
if (retval)
goto done;
spin_unlock_irqrestore(udc->lock, flags);
- retval = usb_ep_enable(&udc->ep0out.ep, &ctrl_endpt_out_desc);
+ udc->ep0out.ep.desc = &ctrl_endpt_out_desc;
+ retval = usb_ep_enable(&udc->ep0out.ep);
if (retval)
return retval;
- retval = usb_ep_enable(&udc->ep0in.ep, &ctrl_endpt_in_desc);
+
+ udc->ep0in.ep.desc = &ctrl_endpt_in_desc;
+ retval = usb_ep_enable(&udc->ep0in.ep);
if (retval)
return retval;
spin_lock_irqsave(udc->lock, flags);
@@ -2657,14 +2673,13 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
spin_unlock_irqrestore(udc->lock, flags);
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
/**
- * usb_gadget_unregister_driver: unregister a gadget driver
+ * ci13xxx_stop: unregister a gadget driver
*
* Check usb_gadget_unregister_driver() at "usb_gadget.h" for details
*/
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int ci13xxx_stop(struct usb_gadget_driver *driver)
{
struct ci13xxx *udc = _udc;
unsigned long i, flags;
@@ -2726,7 +2741,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
/******************************************************************************
* BUS block
@@ -2901,12 +2915,23 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
if (retval)
goto remove_dbg;
}
+
+ retval = usb_add_gadget_udc(dev, &udc->gadget);
+ if (retval)
+ goto remove_trans;
+
pm_runtime_no_callbacks(&udc->gadget.dev);
pm_runtime_enable(&udc->gadget.dev);
_udc = udc;
return retval;
+remove_trans:
+ if (udc->transceiver) {
+ otg_set_peripheral(udc->transceiver, &udc->gadget);
+ otg_put_transceiver(udc->transceiver);
+ }
+
err("error = %i", retval);
remove_dbg:
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
@@ -2936,6 +2961,7 @@ static void udc_remove(void)
err("EINVAL");
return;
}
+ usb_del_gadget_udc(&udc->gadget);
if (udc->transceiver) {
otg_set_peripheral(udc->transceiver, &udc->gadget);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 5cbb1a41c22..aef47414f5d 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -27,7 +27,7 @@
#include <linux/utsname.h>
#include <linux/usb/composite.h>
-
+#include <asm/unaligned.h>
/*
* The code in this file is utility code, used to build a gadget driver
@@ -74,6 +74,130 @@ MODULE_PARM_DESC(iSerialNumber, "SerialNumber string");
static char composite_manufacturer[50];
/*-------------------------------------------------------------------------*/
+/**
+ * next_ep_desc() - advance to the next EP descriptor
+ * @t: currect pointer within descriptor array
+ *
+ * Return: next EP descriptor or NULL
+ *
+ * Iterate over @t until either EP descriptor found or
+ * NULL (that indicates end of list) encountered
+ */
+static struct usb_descriptor_header**
+next_ep_desc(struct usb_descriptor_header **t)
+{
+ for (; *t; t++) {
+ if ((*t)->bDescriptorType == USB_DT_ENDPOINT)
+ return t;
+ }
+ return NULL;
+}
+
+/*
+ * for_each_ep_desc()- iterate over endpoint descriptors in the
+ * descriptors list
+ * @start: pointer within descriptor array.
+ * @ep_desc: endpoint descriptor to use as the loop cursor
+ */
+#define for_each_ep_desc(start, ep_desc) \
+ for (ep_desc = next_ep_desc(start); \
+ ep_desc; ep_desc = next_ep_desc(ep_desc+1))
+
+/**
+ * config_ep_by_speed() - configures the given endpoint
+ * according to gadget speed.
+ * @g: pointer to the gadget
+ * @f: usb function
+ * @_ep: the endpoint to configure
+ *
+ * Return: error code, 0 on success
+ *
+ * This function chooses the right descriptors for a given
+ * endpoint according to gadget speed and saves it in the
+ * endpoint desc field. If the endpoint already has a descriptor
+ * assigned to it - overwrites it with currently corresponding
+ * descriptor. The endpoint maxpacket field is updated according
+ * to the chosen descriptor.
+ * Note: the supplied function should hold all the descriptors
+ * for supported speeds
+ */
+int config_ep_by_speed(struct usb_gadget *g,
+ struct usb_function *f,
+ struct usb_ep *_ep)
+{
+ struct usb_endpoint_descriptor *chosen_desc = NULL;
+ struct usb_descriptor_header **speed_desc = NULL;
+
+ struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
+ int want_comp_desc = 0;
+
+ struct usb_descriptor_header **d_spd; /* cursor for speed desc */
+
+ if (!g || !f || !_ep)
+ return -EIO;
+
+ /* select desired speed */
+ switch (g->speed) {
+ case USB_SPEED_SUPER:
+ if (gadget_is_superspeed(g)) {
+ speed_desc = f->ss_descriptors;
+ want_comp_desc = 1;
+ break;
+ }
+ /* else: Fall trough */
+ case USB_SPEED_HIGH:
+ if (gadget_is_dualspeed(g)) {
+ speed_desc = f->hs_descriptors;
+ break;
+ }
+ /* else: fall through */
+ default:
+ speed_desc = f->descriptors;
+ }
+ /* find descriptors */
+ for_each_ep_desc(speed_desc, d_spd) {
+ chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
+ if (chosen_desc->bEndpointAddress == _ep->address)
+ goto ep_found;
+ }
+ return -EIO;
+
+ep_found:
+ /* commit results */
+ _ep->maxpacket = le16_to_cpu(chosen_desc->wMaxPacketSize);
+ _ep->desc = chosen_desc;
+ _ep->comp_desc = NULL;
+ _ep->maxburst = 0;
+ _ep->mult = 0;
+ if (!want_comp_desc)
+ return 0;
+
+ /*
+ * Companion descriptor should follow EP descriptor
+ * USB 3.0 spec, #9.6.7
+ */
+ comp_desc = (struct usb_ss_ep_comp_descriptor *)*(++d_spd);
+ if (!comp_desc ||
+ (comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP))
+ return -EIO;
+ _ep->comp_desc = comp_desc;
+ if (g->speed == USB_SPEED_SUPER) {
+ switch (usb_endpoint_type(_ep->desc)) {
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ _ep->maxburst = comp_desc->bMaxBurst;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ /* mult: bits 1:0 of bmAttributes */
+ _ep->mult = comp_desc->bmAttributes & 0x3;
+ break;
+ default:
+ /* Do nothing for control endpoints */
+ break;
+ }
+ }
+ return 0;
+}
/**
* usb_add_function() - add a function to a configuration
@@ -123,6 +247,8 @@ int usb_add_function(struct usb_configuration *config,
config->fullspeed = true;
if (!config->highspeed && function->hs_descriptors)
config->highspeed = true;
+ if (!config->superspeed && function->ss_descriptors)
+ config->superspeed = true;
done:
if (value)
@@ -266,10 +392,17 @@ static int config_buf(struct usb_configuration *config,
list_for_each_entry(f, &config->functions, list) {
struct usb_descriptor_header **descriptors;
- if (speed == USB_SPEED_HIGH)
+ switch (speed) {
+ case USB_SPEED_SUPER:
+ descriptors = f->ss_descriptors;
+ break;
+ case USB_SPEED_HIGH:
descriptors = f->hs_descriptors;
- else
+ break;
+ default:
descriptors = f->descriptors;
+ }
+
if (!descriptors)
continue;
status = usb_descriptor_fillbuf(next, len,
@@ -292,9 +425,10 @@ static int config_desc(struct usb_composite_dev *cdev, unsigned w_value)
u8 type = w_value >> 8;
enum usb_device_speed speed = USB_SPEED_UNKNOWN;
- if (gadget_is_dualspeed(gadget)) {
- int hs = 0;
-
+ if (gadget->speed == USB_SPEED_SUPER)
+ speed = gadget->speed;
+ else if (gadget_is_dualspeed(gadget)) {
+ int hs = 0;
if (gadget->speed == USB_SPEED_HIGH)
hs = 1;
if (type == USB_DT_OTHER_SPEED_CONFIG)
@@ -308,13 +442,20 @@ static int config_desc(struct usb_composite_dev *cdev, unsigned w_value)
w_value &= 0xff;
list_for_each_entry(c, &cdev->configs, list) {
/* ignore configs that won't work at this speed */
- if (speed == USB_SPEED_HIGH) {
+ switch (speed) {
+ case USB_SPEED_SUPER:
+ if (!c->superspeed)
+ continue;
+ break;
+ case USB_SPEED_HIGH:
if (!c->highspeed)
continue;
- } else {
+ break;
+ default:
if (!c->fullspeed)
continue;
}
+
if (w_value == 0)
return config_buf(c, speed, cdev->req->buf, type);
w_value--;
@@ -328,16 +469,22 @@ static int count_configs(struct usb_composite_dev *cdev, unsigned type)
struct usb_configuration *c;
unsigned count = 0;
int hs = 0;
+ int ss = 0;
if (gadget_is_dualspeed(gadget)) {
if (gadget->speed == USB_SPEED_HIGH)
hs = 1;
+ if (gadget->speed == USB_SPEED_SUPER)
+ ss = 1;
if (type == USB_DT_DEVICE_QUALIFIER)
hs = !hs;
}
list_for_each_entry(c, &cdev->configs, list) {
/* ignore configs that won't work at this speed */
- if (hs) {
+ if (ss) {
+ if (!c->superspeed)
+ continue;
+ } else if (hs) {
if (!c->highspeed)
continue;
} else {
@@ -349,6 +496,71 @@ static int count_configs(struct usb_composite_dev *cdev, unsigned type)
return count;
}
+/**
+ * bos_desc() - prepares the BOS descriptor.
+ * @cdev: pointer to usb_composite device to generate the bos
+ * descriptor for
+ *
+ * This function generates the BOS (Binary Device Object)
+ * descriptor and its device capabilities descriptors. The BOS
+ * descriptor should be supported by a SuperSpeed device.
+ */
+static int bos_desc(struct usb_composite_dev *cdev)
+{
+ struct usb_ext_cap_descriptor *usb_ext;
+ struct usb_ss_cap_descriptor *ss_cap;
+ struct usb_dcd_config_params dcd_config_params;
+ struct usb_bos_descriptor *bos = cdev->req->buf;
+
+ bos->bLength = USB_DT_BOS_SIZE;
+ bos->bDescriptorType = USB_DT_BOS;
+
+ bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE);
+ bos->bNumDeviceCaps = 0;
+
+ /*
+ * A SuperSpeed device shall include the USB2.0 extension descriptor
+ * and shall support LPM when operating in USB2.0 HS mode.
+ */
+ usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+ bos->bNumDeviceCaps++;
+ le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE);
+ usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
+ usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+ usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
+ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
+
+ /*
+ * The Superspeed USB Capability descriptor shall be implemented by all
+ * SuperSpeed devices.
+ */
+ ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+ bos->bNumDeviceCaps++;
+ le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE);
+ ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE;
+ ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+ ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE;
+ ss_cap->bmAttributes = 0; /* LTM is not supported yet */
+ ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION |
+ USB_FULL_SPEED_OPERATION |
+ USB_HIGH_SPEED_OPERATION |
+ USB_5GBPS_OPERATION);
+ ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
+
+ /* Get Controller configuration */
+ if (cdev->gadget->ops->get_config_params)
+ cdev->gadget->ops->get_config_params(&dcd_config_params);
+ else {
+ dcd_config_params.bU1devExitLat = USB_DEFULT_U1_DEV_EXIT_LAT;
+ dcd_config_params.bU2DevExitLat =
+ cpu_to_le16(USB_DEFULT_U2_DEV_EXIT_LAT);
+ }
+ ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
+ ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
+
+ return le16_to_cpu(bos->wTotalLength);
+}
+
static void device_qual(struct usb_composite_dev *cdev)
{
struct usb_qualifier_descriptor *qual = cdev->req->buf;
@@ -361,7 +573,7 @@ static void device_qual(struct usb_composite_dev *cdev)
qual->bDeviceSubClass = cdev->desc.bDeviceSubClass;
qual->bDeviceProtocol = cdev->desc.bDeviceProtocol;
/* ASSUME same EP0 fifo size at both speeds */
- qual->bMaxPacketSize0 = cdev->desc.bMaxPacketSize0;
+ qual->bMaxPacketSize0 = cdev->gadget->ep0->maxpacket;
qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE_QUALIFIER);
qual->bRESERVED = 0;
}
@@ -392,28 +604,46 @@ static int set_config(struct usb_composite_dev *cdev,
unsigned power = gadget_is_otg(gadget) ? 8 : 100;
int tmp;
- if (cdev->config)
- reset_config(cdev);
-
if (number) {
list_for_each_entry(c, &cdev->configs, list) {
if (c->bConfigurationValue == number) {
+ /*
+ * We disable the FDs of the previous
+ * configuration only if the new configuration
+ * is a valid one
+ */
+ if (cdev->config)
+ reset_config(cdev);
result = 0;
break;
}
}
if (result < 0)
goto done;
- } else
+ } else { /* Zero configuration value - need to reset the config */
+ if (cdev->config)
+ reset_config(cdev);
result = 0;
+ }
INFO(cdev, "%s speed config #%d: %s\n",
({ char *speed;
switch (gadget->speed) {
- case USB_SPEED_LOW: speed = "low"; break;
- case USB_SPEED_FULL: speed = "full"; break;
- case USB_SPEED_HIGH: speed = "high"; break;
- default: speed = "?"; break;
+ case USB_SPEED_LOW:
+ speed = "low";
+ break;
+ case USB_SPEED_FULL:
+ speed = "full";
+ break;
+ case USB_SPEED_HIGH:
+ speed = "high";
+ break;
+ case USB_SPEED_SUPER:
+ speed = "super";
+ break;
+ default:
+ speed = "?";
+ break;
} ; speed; }), number, c ? c->label : "unconfigured");
if (!c)
@@ -435,10 +665,16 @@ static int set_config(struct usb_composite_dev *cdev,
* function's setup callback instead of the current
* configuration's setup callback.
*/
- if (gadget->speed == USB_SPEED_HIGH)
+ switch (gadget->speed) {
+ case USB_SPEED_SUPER:
+ descriptors = f->ss_descriptors;
+ break;
+ case USB_SPEED_HIGH:
descriptors = f->hs_descriptors;
- else
+ break;
+ default:
descriptors = f->descriptors;
+ }
for (; *descriptors; ++descriptors) {
struct usb_endpoint_descriptor *ep;
@@ -531,8 +767,9 @@ int usb_add_config(struct usb_composite_dev *cdev,
} else {
unsigned i;
- DBG(cdev, "cfg %d/%p speeds:%s%s\n",
+ DBG(cdev, "cfg %d/%p speeds:%s%s%s\n",
config->bConfigurationValue, config,
+ config->superspeed ? " super" : "",
config->highspeed ? " high" : "",
config->fullspeed
? (gadget_is_dualspeed(cdev->gadget)
@@ -811,6 +1048,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_request *req = cdev->req;
int value = -EOPNOTSUPP;
+ int status = 0;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u8 intf = w_index & 0xFF;
u16 w_value = le16_to_cpu(ctrl->wValue);
@@ -838,18 +1076,31 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
case USB_DT_DEVICE:
cdev->desc.bNumConfigurations =
count_configs(cdev, USB_DT_DEVICE);
+ cdev->desc.bMaxPacketSize0 =
+ cdev->gadget->ep0->maxpacket;
+ if (gadget_is_superspeed(gadget)) {
+ if (gadget->speed >= USB_SPEED_SUPER) {
+ cdev->desc.bcdUSB = cpu_to_le16(0x0300);
+ cdev->desc.bMaxPacketSize0 = 9;
+ } else {
+ cdev->desc.bcdUSB = cpu_to_le16(0x0210);
+ }
+ }
+
value = min(w_length, (u16) sizeof cdev->desc);
memcpy(req->buf, &cdev->desc, value);
break;
case USB_DT_DEVICE_QUALIFIER:
- if (!gadget_is_dualspeed(gadget))
+ if (!gadget_is_dualspeed(gadget) ||
+ gadget->speed >= USB_SPEED_SUPER)
break;
device_qual(cdev);
value = min_t(int, w_length,
sizeof(struct usb_qualifier_descriptor));
break;
case USB_DT_OTHER_SPEED_CONFIG:
- if (!gadget_is_dualspeed(gadget))
+ if (!gadget_is_dualspeed(gadget) ||
+ gadget->speed >= USB_SPEED_SUPER)
break;
/* FALLTHROUGH */
case USB_DT_CONFIG:
@@ -863,6 +1114,12 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
if (value >= 0)
value = min(w_length, (u16) value);
break;
+ case USB_DT_BOS:
+ if (gadget_is_superspeed(gadget)) {
+ value = bos_desc(cdev);
+ value = min(w_length, (u16) value);
+ }
+ break;
}
break;
@@ -930,6 +1187,61 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
*((u8 *)req->buf) = value;
value = min(w_length, (u16) 1);
break;
+
+ /*
+ * USB 3.0 additions:
+ * Function driver should handle get_status request. If such cb
+ * wasn't supplied we respond with default value = 0
+ * Note: function driver should supply such cb only for the first
+ * interface of the function
+ */
+ case USB_REQ_GET_STATUS:
+ if (!gadget_is_superspeed(gadget))
+ goto unknown;
+ if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE))
+ goto unknown;
+ value = 2; /* This is the length of the get_status reply */
+ put_unaligned_le16(0, req->buf);
+ if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+ break;
+ f = cdev->config->interface[intf];
+ if (!f)
+ break;
+ status = f->get_status ? f->get_status(f) : 0;
+ if (status < 0)
+ break;
+ put_unaligned_le16(status & 0x0000ffff, req->buf);
+ break;
+ /*
+ * Function drivers should handle SetFeature/ClearFeature
+ * (FUNCTION_SUSPEND) request. function_suspend cb should be supplied
+ * only for the first interface of the function
+ */
+ case USB_REQ_CLEAR_FEATURE:
+ case USB_REQ_SET_FEATURE:
+ if (!gadget_is_superspeed(gadget))
+ goto unknown;
+ if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE))
+ goto unknown;
+ switch (w_value) {
+ case USB_INTRF_FUNC_SUSPEND:
+ if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+ break;
+ f = cdev->config->interface[intf];
+ if (!f)
+ break;
+ value = 0;
+ if (f->func_suspend)
+ value = f->func_suspend(f, w_index >> 8);
+ if (value < 0) {
+ ERROR(cdev,
+ "func_suspend() returned error %d\n",
+ value);
+ value = 0;
+ }
+ break;
+ }
+ break;
default:
unknown:
VDBG(cdev,
@@ -1140,7 +1452,6 @@ static int composite_bind(struct usb_gadget *gadget)
goto fail;
cdev->desc = *composite->dev;
- cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
/* standardized runtime overrides for device ID data */
if (idVendor)
@@ -1247,7 +1558,11 @@ composite_resume(struct usb_gadget *gadget)
/*-------------------------------------------------------------------------*/
static struct usb_gadget_driver composite_driver = {
+#ifdef CONFIG_USB_GADGET_SUPERSPEED
+ .speed = USB_SPEED_SUPER,
+#else
.speed = USB_SPEED_HIGH,
+#endif
.unbind = composite_unbind,
@@ -1293,6 +1608,8 @@ int usb_composite_probe(struct usb_composite_driver *driver,
driver->iProduct = driver->name;
composite_driver.function = (char *) driver->name;
composite_driver.driver.name = driver->name;
+ composite_driver.speed = min((u8)composite_driver.speed,
+ (u8)driver->max_speed);
composite = driver;
composite_gadget_bind = bind;
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 09084fd646a..b2c00133487 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -165,28 +165,3 @@ usb_copy_descriptors(struct usb_descriptor_header **src)
return ret;
}
-/**
- * usb_find_endpoint - find a copy of an endpoint descriptor
- * @src: original vector of descriptors
- * @copy: copy of @src
- * @match: endpoint descriptor found in @src
- *
- * This returns the copy of the @match descriptor made for @copy. Its
- * intended use is to help remembering the endpoint descriptor to use
- * when enabling a given endpoint.
- */
-struct usb_endpoint_descriptor *
-usb_find_endpoint(
- struct usb_descriptor_header **src,
- struct usb_descriptor_header **copy,
- struct usb_endpoint_descriptor *match
-)
-{
- while (*src) {
- if (*src == (void *) match)
- return (void *)*copy;
- src++;
- copy++;
- }
- return NULL;
-}
diff --git a/drivers/usb/gadget/dbgp.c b/drivers/usb/gadget/dbgp.c
index dbe92ee8847..8beefdd3678 100644
--- a/drivers/usb/gadget/dbgp.c
+++ b/drivers/usb/gadget/dbgp.c
@@ -173,7 +173,9 @@ fail_1:
static int __enable_ep(struct usb_ep *ep, struct usb_endpoint_descriptor *desc)
{
- int err = usb_ep_enable(ep, desc);
+ int err;
+ ep->desc = desc;
+ err = usb_ep_enable(ep);
ep->driver_data = dbgp.gadget;
return err;
}
@@ -268,8 +270,8 @@ static int __init dbgp_configure_endpoints(struct usb_gadget *gadget)
dbgp.serial->in = dbgp.i_ep;
dbgp.serial->out = dbgp.o_ep;
- dbgp.serial->in_desc = &i_desc;
- dbgp.serial->out_desc = &o_desc;
+ dbgp.serial->in->desc = &i_desc;
+ dbgp.serial->out->desc = &o_desc;
if (gserial_setup(gadget, 1) < 0) {
stp = 3;
@@ -312,7 +314,6 @@ static int __init dbgp_bind(struct usb_gadget *gadget)
dbgp.req->length = DBGP_REQ_EP0_LEN;
gadget->ep0->driver_data = gadget;
- device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
#ifdef CONFIG_USB_G_DBGP_SERIAL
dbgp.serial = kzalloc(sizeof(struct gserial), GFP_KERNEL);
@@ -363,6 +364,7 @@ static int dbgp_setup(struct usb_gadget *gadget,
dev_dbg(&dbgp.gadget->dev, "setup: desc device\n");
len = sizeof device_desc;
data = &device_desc;
+ device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
break;
case USB_DT_DEBUG:
dev_dbg(&dbgp.gadget->dev, "setup: desc debug\n");
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index d3dcabc1a5f..e755a9d267f 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -70,6 +70,19 @@ MODULE_DESCRIPTION (DRIVER_DESC);
MODULE_AUTHOR ("David Brownell");
MODULE_LICENSE ("GPL");
+struct dummy_hcd_module_parameters {
+ bool is_super_speed;
+ bool is_high_speed;
+};
+
+static struct dummy_hcd_module_parameters mod_data = {
+ .is_super_speed = false,
+ .is_high_speed = true,
+};
+module_param_named(is_super_speed, mod_data.is_super_speed, bool, S_IRUGO);
+MODULE_PARM_DESC(is_super_speed, "true to simulate SuperSpeed connection");
+module_param_named(is_high_speed, mod_data.is_high_speed, bool, S_IRUGO);
+MODULE_PARM_DESC(is_high_speed, "true to simulate HighSpeed connection");
/*-------------------------------------------------------------------------*/
/* gadget side driver data structres */
@@ -152,6 +165,22 @@ enum dummy_rh_state {
DUMMY_RH_RUNNING
};
+struct dummy_hcd {
+ struct dummy *dum;
+ enum dummy_rh_state rh_state;
+ struct timer_list timer;
+ u32 port_status;
+ u32 old_status;
+ unsigned long re_timeout;
+
+ struct usb_device *udev;
+ struct list_head urbp_list;
+
+ unsigned active:1;
+ unsigned old_active:1;
+ unsigned resuming:1;
+};
+
struct dummy {
spinlock_t lock;
@@ -167,36 +196,27 @@ struct dummy {
u16 devstatus;
unsigned udc_suspended:1;
unsigned pullup:1;
- unsigned active:1;
- unsigned old_active:1;
/*
* MASTER/HOST side support
*/
- enum dummy_rh_state rh_state;
- struct timer_list timer;
- u32 port_status;
- u32 old_status;
- unsigned resuming:1;
- unsigned long re_timeout;
-
- struct usb_device *udev;
- struct list_head urbp_list;
+ struct dummy_hcd *hs_hcd;
+ struct dummy_hcd *ss_hcd;
};
-static inline struct dummy *hcd_to_dummy (struct usb_hcd *hcd)
+static inline struct dummy_hcd *hcd_to_dummy_hcd(struct usb_hcd *hcd)
{
- return (struct dummy *) (hcd->hcd_priv);
+ return (struct dummy_hcd *) (hcd->hcd_priv);
}
-static inline struct usb_hcd *dummy_to_hcd (struct dummy *dum)
+static inline struct usb_hcd *dummy_hcd_to_hcd(struct dummy_hcd *dum)
{
return container_of((void *) dum, struct usb_hcd, hcd_priv);
}
-static inline struct device *dummy_dev (struct dummy *dum)
+static inline struct device *dummy_dev(struct dummy_hcd *dum)
{
- return dummy_to_hcd(dum)->self.controller;
+ return dummy_hcd_to_hcd(dum)->self.controller;
}
static inline struct device *udc_dev (struct dummy *dum)
@@ -209,9 +229,13 @@ static inline struct dummy *ep_to_dummy (struct dummy_ep *ep)
return container_of (ep->gadget, struct dummy, gadget);
}
-static inline struct dummy *gadget_to_dummy (struct usb_gadget *gadget)
+static inline struct dummy_hcd *gadget_to_dummy_hcd(struct usb_gadget *gadget)
{
- return container_of (gadget, struct dummy, gadget);
+ struct dummy *dum = container_of(gadget, struct dummy, gadget);
+ if (dum->gadget.speed == USB_SPEED_SUPER)
+ return dum->ss_hcd;
+ else
+ return dum->hs_hcd;
}
static inline struct dummy *gadget_dev_to_dummy (struct device *dev)
@@ -219,7 +243,7 @@ static inline struct dummy *gadget_dev_to_dummy (struct device *dev)
return container_of (dev, struct dummy, gadget.dev);
}
-static struct dummy *the_controller;
+static struct dummy the_controller;
/*-------------------------------------------------------------------------*/
@@ -259,61 +283,122 @@ stop_activity (struct dummy *dum)
/* driver now does any non-usb quiescing necessary */
}
-/* caller must hold lock */
-static void
-set_link_state (struct dummy *dum)
-{
- dum->active = 0;
- if ((dum->port_status & USB_PORT_STAT_POWER) == 0)
- dum->port_status = 0;
-
- /* UDC suspend must cause a disconnect */
- else if (!dum->pullup || dum->udc_suspended) {
- dum->port_status &= ~(USB_PORT_STAT_CONNECTION |
- USB_PORT_STAT_ENABLE |
- USB_PORT_STAT_LOW_SPEED |
- USB_PORT_STAT_HIGH_SPEED |
- USB_PORT_STAT_SUSPEND);
- if ((dum->old_status & USB_PORT_STAT_CONNECTION) != 0)
- dum->port_status |= (USB_PORT_STAT_C_CONNECTION << 16);
+/**
+ * set_link_state_by_speed() - Sets the current state of the link according to
+ * the hcd speed
+ * @dum_hcd: pointer to the dummy_hcd structure to update the link state for
+ *
+ * This function updates the port_status according to the link state and the
+ * speed of the hcd.
+ */
+static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
+{
+ struct dummy *dum = dum_hcd->dum;
+
+ if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) {
+ if ((dum_hcd->port_status & USB_SS_PORT_STAT_POWER) == 0) {
+ dum_hcd->port_status = 0;
+ } else if (!dum->pullup || dum->udc_suspended) {
+ /* UDC suspend must cause a disconnect */
+ dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_ENABLE);
+ if ((dum_hcd->old_status &
+ USB_PORT_STAT_CONNECTION) != 0)
+ dum_hcd->port_status |=
+ (USB_PORT_STAT_C_CONNECTION << 16);
+ } else {
+ /* device is connected and not suspended */
+ dum_hcd->port_status |= (USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_SPEED_5GBPS) ;
+ if ((dum_hcd->old_status &
+ USB_PORT_STAT_CONNECTION) == 0)
+ dum_hcd->port_status |=
+ (USB_PORT_STAT_C_CONNECTION << 16);
+ if ((dum_hcd->port_status &
+ USB_PORT_STAT_ENABLE) == 1 &&
+ (dum_hcd->port_status &
+ USB_SS_PORT_LS_U0) == 1 &&
+ dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
+ dum_hcd->active = 1;
+ }
} else {
- dum->port_status |= USB_PORT_STAT_CONNECTION;
- if ((dum->old_status & USB_PORT_STAT_CONNECTION) == 0)
- dum->port_status |= (USB_PORT_STAT_C_CONNECTION << 16);
- if ((dum->port_status & USB_PORT_STAT_ENABLE) == 0)
- dum->port_status &= ~USB_PORT_STAT_SUSPEND;
- else if ((dum->port_status & USB_PORT_STAT_SUSPEND) == 0 &&
- dum->rh_state != DUMMY_RH_SUSPENDED)
- dum->active = 1;
+ if ((dum_hcd->port_status & USB_PORT_STAT_POWER) == 0) {
+ dum_hcd->port_status = 0;
+ } else if (!dum->pullup || dum->udc_suspended) {
+ /* UDC suspend must cause a disconnect */
+ dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_LOW_SPEED |
+ USB_PORT_STAT_HIGH_SPEED |
+ USB_PORT_STAT_SUSPEND);
+ if ((dum_hcd->old_status &
+ USB_PORT_STAT_CONNECTION) != 0)
+ dum_hcd->port_status |=
+ (USB_PORT_STAT_C_CONNECTION << 16);
+ } else {
+ dum_hcd->port_status |= USB_PORT_STAT_CONNECTION;
+ if ((dum_hcd->old_status &
+ USB_PORT_STAT_CONNECTION) == 0)
+ dum_hcd->port_status |=
+ (USB_PORT_STAT_C_CONNECTION << 16);
+ if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0)
+ dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
+ else if ((dum_hcd->port_status &
+ USB_PORT_STAT_SUSPEND) == 0 &&
+ dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
+ dum_hcd->active = 1;
+ }
}
+}
+
+/* caller must hold lock */
+static void set_link_state(struct dummy_hcd *dum_hcd)
+{
+ struct dummy *dum = dum_hcd->dum;
- if ((dum->port_status & USB_PORT_STAT_ENABLE) == 0 || dum->active)
- dum->resuming = 0;
+ dum_hcd->active = 0;
+ if (dum->pullup)
+ if ((dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 &&
+ dum->gadget.speed != USB_SPEED_SUPER) ||
+ (dummy_hcd_to_hcd(dum_hcd)->speed != HCD_USB3 &&
+ dum->gadget.speed == USB_SPEED_SUPER))
+ return;
- if ((dum->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
- (dum->port_status & USB_PORT_STAT_RESET) != 0) {
- if ((dum->old_status & USB_PORT_STAT_CONNECTION) != 0 &&
- (dum->old_status & USB_PORT_STAT_RESET) == 0 &&
- dum->driver) {
- stop_activity (dum);
- spin_unlock (&dum->lock);
- dum->driver->disconnect (&dum->gadget);
- spin_lock (&dum->lock);
+ set_link_state_by_speed(dum_hcd);
+
+ if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
+ dum_hcd->active)
+ dum_hcd->resuming = 0;
+
+ /* if !connected or reset */
+ if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
+ (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
+ /*
+ * We're connected and not reset (reset occurred now),
+ * and driver attached - disconnect!
+ */
+ if ((dum_hcd->old_status & USB_PORT_STAT_CONNECTION) != 0 &&
+ (dum_hcd->old_status & USB_PORT_STAT_RESET) == 0 &&
+ dum->driver) {
+ stop_activity(dum);
+ spin_unlock(&dum->lock);
+ dum->driver->disconnect(&dum->gadget);
+ spin_lock(&dum->lock);
}
- } else if (dum->active != dum->old_active) {
- if (dum->old_active && dum->driver->suspend) {
- spin_unlock (&dum->lock);
- dum->driver->suspend (&dum->gadget);
- spin_lock (&dum->lock);
- } else if (!dum->old_active && dum->driver->resume) {
- spin_unlock (&dum->lock);
- dum->driver->resume (&dum->gadget);
- spin_lock (&dum->lock);
+ } else if (dum_hcd->active != dum_hcd->old_active) {
+ if (dum_hcd->old_active && dum->driver->suspend) {
+ spin_unlock(&dum->lock);
+ dum->driver->suspend(&dum->gadget);
+ spin_lock(&dum->lock);
+ } else if (!dum_hcd->old_active && dum->driver->resume) {
+ spin_unlock(&dum->lock);
+ dum->driver->resume(&dum->gadget);
+ spin_lock(&dum->lock);
}
}
- dum->old_status = dum->port_status;
- dum->old_active = dum->active;
+ dum_hcd->old_status = dum_hcd->port_status;
+ dum_hcd->old_active = dum_hcd->active;
}
/*-------------------------------------------------------------------------*/
@@ -332,6 +417,7 @@ static int
dummy_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct dummy *dum;
+ struct dummy_hcd *dum_hcd;
struct dummy_ep *ep;
unsigned max;
int retval;
@@ -341,9 +427,19 @@ dummy_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dum = ep_to_dummy (ep);
- if (!dum->driver || !is_enabled (dum))
+ if (!dum->driver)
+ return -ESHUTDOWN;
+
+ dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
+ if (!is_enabled(dum_hcd))
return -ESHUTDOWN;
- max = le16_to_cpu(desc->wMaxPacketSize) & 0x3ff;
+
+ /*
+ * For HS/FS devices only bits 0..10 of the wMaxPacketSize represent the
+ * maximum packet size.
+ * For SS devices the wMaxPacketSize is limited by 1024.
+ */
+ max = le16_to_cpu(desc->wMaxPacketSize) & 0x7ff;
/* drivers must not request bad settings, since lower levels
* (hardware or its drivers) may not check. some endpoints
@@ -361,6 +457,10 @@ dummy_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
goto done;
}
switch (dum->gadget.speed) {
+ case USB_SPEED_SUPER:
+ if (max == 1024)
+ break;
+ goto done;
case USB_SPEED_HIGH:
if (max == 512)
break;
@@ -379,6 +479,7 @@ dummy_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
goto done;
/* real hardware might not handle all packet sizes */
switch (dum->gadget.speed) {
+ case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
if (max <= 1024)
break;
@@ -399,6 +500,7 @@ dummy_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
goto done;
/* real hardware might not handle all packet sizes */
switch (dum->gadget.speed) {
+ case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
if (max <= 1024)
break;
@@ -425,10 +527,18 @@ dummy_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
(desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
({ char *val;
switch (desc->bmAttributes & 0x03) {
- case USB_ENDPOINT_XFER_BULK: val = "bulk"; break;
- case USB_ENDPOINT_XFER_ISOC: val = "iso"; break;
- case USB_ENDPOINT_XFER_INT: val = "intr"; break;
- default: val = "ctrl"; break;
+ case USB_ENDPOINT_XFER_BULK:
+ val = "bulk";
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ val = "iso";
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ val = "intr";
+ break;
+ default:
+ val = "ctrl";
+ break;
}; val; }),
max);
@@ -507,6 +617,7 @@ dummy_queue (struct usb_ep *_ep, struct usb_request *_req,
struct dummy_ep *ep;
struct dummy_request *req;
struct dummy *dum;
+ struct dummy_hcd *dum_hcd;
unsigned long flags;
req = usb_request_to_dummy_request (_req);
@@ -518,7 +629,8 @@ dummy_queue (struct usb_ep *_ep, struct usb_request *_req,
return -EINVAL;
dum = ep_to_dummy (ep);
- if (!dum->driver || !is_enabled (dum))
+ dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
+ if (!dum->driver || !is_enabled(dum_hcd))
return -ESHUTDOWN;
#if 0
@@ -662,24 +774,24 @@ static int dummy_g_get_frame (struct usb_gadget *_gadget)
static int dummy_wakeup (struct usb_gadget *_gadget)
{
- struct dummy *dum;
+ struct dummy_hcd *dum_hcd;
- dum = gadget_to_dummy (_gadget);
- if (!(dum->devstatus & ( (1 << USB_DEVICE_B_HNP_ENABLE)
+ dum_hcd = gadget_to_dummy_hcd(_gadget);
+ if (!(dum_hcd->dum->devstatus & ((1 << USB_DEVICE_B_HNP_ENABLE)
| (1 << USB_DEVICE_REMOTE_WAKEUP))))
return -EINVAL;
- if ((dum->port_status & USB_PORT_STAT_CONNECTION) == 0)
+ if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0)
return -ENOLINK;
- if ((dum->port_status & USB_PORT_STAT_SUSPEND) == 0 &&
- dum->rh_state != DUMMY_RH_SUSPENDED)
+ if ((dum_hcd->port_status & USB_PORT_STAT_SUSPEND) == 0 &&
+ dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
return -EIO;
/* FIXME: What if the root hub is suspended but the port isn't? */
/* hub notices our request, issues downstream resume, etc */
- dum->resuming = 1;
- dum->re_timeout = jiffies + msecs_to_jiffies(20);
- mod_timer (&dummy_to_hcd (dum)->rh_timer, dum->re_timeout);
+ dum_hcd->resuming = 1;
+ dum_hcd->re_timeout = jiffies + msecs_to_jiffies(20);
+ mod_timer(&dummy_hcd_to_hcd(dum_hcd)->rh_timer, dum_hcd->re_timeout);
return 0;
}
@@ -687,7 +799,7 @@ static int dummy_set_selfpowered (struct usb_gadget *_gadget, int value)
{
struct dummy *dum;
- dum = gadget_to_dummy (_gadget);
+ dum = (gadget_to_dummy_hcd(_gadget))->dum;
if (value)
dum->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
else
@@ -695,26 +807,68 @@ static int dummy_set_selfpowered (struct usb_gadget *_gadget, int value)
return 0;
}
+static void dummy_udc_udpate_ep0(struct dummy *dum)
+{
+ u32 i;
+
+ if (dum->gadget.speed == USB_SPEED_SUPER) {
+ for (i = 0; i < DUMMY_ENDPOINTS; i++)
+ dum->ep[i].ep.max_streams = 0x10;
+ dum->ep[0].ep.maxpacket = 9;
+ } else {
+ for (i = 0; i < DUMMY_ENDPOINTS; i++)
+ dum->ep[i].ep.max_streams = 0;
+ dum->ep[0].ep.maxpacket = 64;
+ }
+}
+
static int dummy_pullup (struct usb_gadget *_gadget, int value)
{
+ struct dummy_hcd *dum_hcd;
struct dummy *dum;
unsigned long flags;
- dum = gadget_to_dummy (_gadget);
+ dum = gadget_dev_to_dummy(&_gadget->dev);
+
+ if (value && dum->driver) {
+ if (mod_data.is_super_speed)
+ dum->gadget.speed = dum->driver->speed;
+ else if (mod_data.is_high_speed)
+ dum->gadget.speed = min_t(u8, USB_SPEED_HIGH,
+ dum->driver->speed);
+ else
+ dum->gadget.speed = USB_SPEED_FULL;
+ dummy_udc_udpate_ep0(dum);
+
+ if (dum->gadget.speed < dum->driver->speed)
+ dev_dbg(udc_dev(dum), "This device can perform faster"
+ " if you connect it to a %s port...\n",
+ (dum->driver->speed == USB_SPEED_SUPER ?
+ "SuperSpeed" : "HighSpeed"));
+ }
+ dum_hcd = gadget_to_dummy_hcd(_gadget);
+
spin_lock_irqsave (&dum->lock, flags);
dum->pullup = (value != 0);
- set_link_state (dum);
+ set_link_state(dum_hcd);
spin_unlock_irqrestore (&dum->lock, flags);
- usb_hcd_poll_rh_status (dummy_to_hcd (dum));
+ usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
return 0;
}
+static int dummy_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int dummy_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+
static const struct usb_gadget_ops dummy_ops = {
.get_frame = dummy_g_get_frame,
.wakeup = dummy_wakeup,
.set_selfpowered = dummy_set_selfpowered,
.pullup = dummy_pullup,
+ .udc_start = dummy_udc_start,
+ .udc_stop = dummy_udc_stop,
};
/*-------------------------------------------------------------------------*/
@@ -747,18 +901,13 @@ static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
* for each driver that registers: just add to a big root hub.
*/
-int
-usb_gadget_probe_driver(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
+static int dummy_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct dummy *dum = the_controller;
- int retval, i;
+ struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
+ struct dummy *dum = dum_hcd->dum;
- if (!dum)
- return -EINVAL;
- if (dum->driver)
- return -EBUSY;
- if (!bind || !driver->setup || driver->speed == USB_SPEED_UNKNOWN)
+ if (driver->speed == USB_SPEED_UNKNOWN)
return -EINVAL;
/*
@@ -768,121 +917,77 @@ usb_gadget_probe_driver(struct usb_gadget_driver *driver,
dum->devstatus = 0;
- INIT_LIST_HEAD (&dum->gadget.ep_list);
- for (i = 0; i < DUMMY_ENDPOINTS; i++) {
- struct dummy_ep *ep = &dum->ep [i];
-
- if (!ep_name [i])
- break;
- ep->ep.name = ep_name [i];
- ep->ep.ops = &dummy_ep_ops;
- list_add_tail (&ep->ep.ep_list, &dum->gadget.ep_list);
- ep->halted = ep->wedged = ep->already_seen =
- ep->setup_stage = 0;
- ep->ep.maxpacket = ~0;
- ep->last_io = jiffies;
- ep->gadget = &dum->gadget;
- ep->desc = NULL;
- INIT_LIST_HEAD (&ep->queue);
- }
-
- dum->gadget.ep0 = &dum->ep [0].ep;
- dum->ep [0].ep.maxpacket = 64;
- list_del_init (&dum->ep [0].ep.ep_list);
- INIT_LIST_HEAD(&dum->fifo_req.queue);
-
- driver->driver.bus = NULL;
dum->driver = driver;
- dum->gadget.dev.driver = &driver->driver;
dev_dbg (udc_dev(dum), "binding gadget driver '%s'\n",
driver->driver.name);
- retval = bind(&dum->gadget);
- if (retval) {
- dum->driver = NULL;
- dum->gadget.dev.driver = NULL;
- return retval;
- }
-
- /* khubd will enumerate this in a while */
- spin_lock_irq (&dum->lock);
- dum->pullup = 1;
- set_link_state (dum);
- spin_unlock_irq (&dum->lock);
-
- usb_hcd_poll_rh_status (dummy_to_hcd (dum));
return 0;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int
-usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
+static int dummy_udc_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct dummy *dum = the_controller;
- unsigned long flags;
-
- if (!dum)
- return -ENODEV;
- if (!driver || driver != dum->driver || !driver->unbind)
- return -EINVAL;
+ struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
+ struct dummy *dum = dum_hcd->dum;
dev_dbg (udc_dev(dum), "unregister gadget driver '%s'\n",
driver->driver.name);
- spin_lock_irqsave (&dum->lock, flags);
- dum->pullup = 0;
- set_link_state (dum);
- spin_unlock_irqrestore (&dum->lock, flags);
-
- driver->unbind (&dum->gadget);
- dum->gadget.dev.driver = NULL;
dum->driver = NULL;
- spin_lock_irqsave (&dum->lock, flags);
- dum->pullup = 0;
- set_link_state (dum);
- spin_unlock_irqrestore (&dum->lock, flags);
-
- usb_hcd_poll_rh_status (dummy_to_hcd (dum));
+ dummy_pullup(&dum->gadget, 0);
return 0;
}
-EXPORT_SYMBOL (usb_gadget_unregister_driver);
#undef is_enabled
-/* just declare this in any driver that really need it */
-extern int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode);
-
-int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode)
-{
- return -ENOSYS;
-}
-EXPORT_SYMBOL (net2280_set_fifo_mode);
-
-
/* The gadget structure is stored inside the hcd structure and will be
* released along with it. */
static void
dummy_gadget_release (struct device *dev)
{
- struct dummy *dum = gadget_dev_to_dummy (dev);
+ return;
+}
+
+static void init_dummy_udc_hw(struct dummy *dum)
+{
+ int i;
+
+ INIT_LIST_HEAD(&dum->gadget.ep_list);
+ for (i = 0; i < DUMMY_ENDPOINTS; i++) {
+ struct dummy_ep *ep = &dum->ep[i];
+
+ if (!ep_name[i])
+ break;
+ ep->ep.name = ep_name[i];
+ ep->ep.ops = &dummy_ep_ops;
+ list_add_tail(&ep->ep.ep_list, &dum->gadget.ep_list);
+ ep->halted = ep->wedged = ep->already_seen =
+ ep->setup_stage = 0;
+ ep->ep.maxpacket = ~0;
+ ep->last_io = jiffies;
+ ep->gadget = &dum->gadget;
+ ep->desc = NULL;
+ INIT_LIST_HEAD(&ep->queue);
+ }
+
+ dum->gadget.ep0 = &dum->ep[0].ep;
+ list_del_init(&dum->ep[0].ep.ep_list);
+ INIT_LIST_HEAD(&dum->fifo_req.queue);
- usb_put_hcd (dummy_to_hcd (dum));
+#ifdef CONFIG_USB_OTG
+ dum->gadget.is_otg = 1;
+#endif
}
static int dummy_udc_probe (struct platform_device *pdev)
{
- struct dummy *dum = the_controller;
+ struct dummy *dum = &the_controller;
int rc;
- usb_get_hcd(dummy_to_hcd(dum));
-
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
dum->gadget.is_dualspeed = 1;
- /* maybe claim OTG support, though we won't complete HNP */
- dum->gadget.is_otg = (dummy_to_hcd(dum)->self.otg_port != 0);
-
dev_set_name(&dum->gadget.dev, "gadget");
dum->gadget.dev.parent = &pdev->dev;
dum->gadget.dev.release = dummy_gadget_release;
@@ -892,11 +997,22 @@ static int dummy_udc_probe (struct platform_device *pdev)
return rc;
}
+ init_dummy_udc_hw(dum);
+
+ rc = usb_add_gadget_udc(&pdev->dev, &dum->gadget);
+ if (rc < 0)
+ goto err_udc;
+
rc = device_create_file (&dum->gadget.dev, &dev_attr_function);
if (rc < 0)
- device_unregister (&dum->gadget.dev);
- else
- platform_set_drvdata(pdev, dum);
+ goto err_dev;
+ platform_set_drvdata(pdev, dum);
+ return rc;
+
+err_dev:
+ usb_del_gadget_udc(&dum->gadget);
+err_udc:
+ device_unregister(&dum->gadget.dev);
return rc;
}
@@ -904,37 +1020,41 @@ static int dummy_udc_remove (struct platform_device *pdev)
{
struct dummy *dum = platform_get_drvdata (pdev);
+ usb_del_gadget_udc(&dum->gadget);
platform_set_drvdata (pdev, NULL);
device_remove_file (&dum->gadget.dev, &dev_attr_function);
device_unregister (&dum->gadget.dev);
return 0;
}
-static int dummy_udc_suspend (struct platform_device *pdev, pm_message_t state)
+static void dummy_udc_pm(struct dummy *dum, struct dummy_hcd *dum_hcd,
+ int suspend)
{
- struct dummy *dum = platform_get_drvdata(pdev);
+ spin_lock_irq(&dum->lock);
+ dum->udc_suspended = suspend;
+ set_link_state(dum_hcd);
+ spin_unlock_irq(&dum->lock);
+}
- dev_dbg (&pdev->dev, "%s\n", __func__);
- spin_lock_irq (&dum->lock);
- dum->udc_suspended = 1;
- set_link_state (dum);
- spin_unlock_irq (&dum->lock);
+static int dummy_udc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct dummy *dum = platform_get_drvdata(pdev);
+ struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
- usb_hcd_poll_rh_status (dummy_to_hcd (dum));
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+ dummy_udc_pm(dum, dum_hcd, 1);
+ usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
return 0;
}
-static int dummy_udc_resume (struct platform_device *pdev)
+static int dummy_udc_resume(struct platform_device *pdev)
{
- struct dummy *dum = platform_get_drvdata(pdev);
+ struct dummy *dum = platform_get_drvdata(pdev);
+ struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
- dev_dbg (&pdev->dev, "%s\n", __func__);
- spin_lock_irq (&dum->lock);
- dum->udc_suspended = 0;
- set_link_state (dum);
- spin_unlock_irq (&dum->lock);
-
- usb_hcd_poll_rh_status (dummy_to_hcd (dum));
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+ dummy_udc_pm(dum, dum_hcd, 0);
+ usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
return 0;
}
@@ -968,7 +1088,7 @@ static int dummy_urb_enqueue (
struct urb *urb,
gfp_t mem_flags
) {
- struct dummy *dum;
+ struct dummy_hcd *dum_hcd;
struct urbp *urbp;
unsigned long flags;
int rc;
@@ -981,51 +1101,51 @@ static int dummy_urb_enqueue (
return -ENOMEM;
urbp->urb = urb;
- dum = hcd_to_dummy (hcd);
- spin_lock_irqsave (&dum->lock, flags);
+ dum_hcd = hcd_to_dummy_hcd(hcd);
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
rc = usb_hcd_link_urb_to_ep(hcd, urb);
if (rc) {
kfree(urbp);
goto done;
}
- if (!dum->udev) {
- dum->udev = urb->dev;
- usb_get_dev (dum->udev);
- } else if (unlikely (dum->udev != urb->dev))
- dev_err (dummy_dev(dum), "usb_device address has changed!\n");
+ if (!dum_hcd->udev) {
+ dum_hcd->udev = urb->dev;
+ usb_get_dev(dum_hcd->udev);
+ } else if (unlikely(dum_hcd->udev != urb->dev))
+ dev_err(dummy_dev(dum_hcd), "usb_device address has changed!\n");
- list_add_tail (&urbp->urbp_list, &dum->urbp_list);
+ list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
urb->hcpriv = urbp;
if (usb_pipetype (urb->pipe) == PIPE_CONTROL)
urb->error_count = 1; /* mark as a new urb */
/* kick the scheduler, it'll do the rest */
- if (!timer_pending (&dum->timer))
- mod_timer (&dum->timer, jiffies + 1);
+ if (!timer_pending(&dum_hcd->timer))
+ mod_timer(&dum_hcd->timer, jiffies + 1);
done:
- spin_unlock_irqrestore(&dum->lock, flags);
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return rc;
}
static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
- struct dummy *dum;
+ struct dummy_hcd *dum_hcd;
unsigned long flags;
int rc;
/* giveback happens automatically in timer callback,
* so make sure the callback happens */
- dum = hcd_to_dummy (hcd);
- spin_lock_irqsave (&dum->lock, flags);
+ dum_hcd = hcd_to_dummy_hcd(hcd);
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
- if (!rc && dum->rh_state != DUMMY_RH_RUNNING &&
- !list_empty(&dum->urbp_list))
- mod_timer (&dum->timer, jiffies);
+ if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING &&
+ !list_empty(&dum_hcd->urbp_list))
+ mod_timer(&dum_hcd->timer, jiffies);
- spin_unlock_irqrestore (&dum->lock, flags);
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return rc;
}
@@ -1162,10 +1282,25 @@ static int periodic_bytes (struct dummy *dum, struct dummy_ep *ep)
tmp *= 8 /* applies to entire frame */;
limit += limit * tmp;
}
+ if (dum->gadget.speed == USB_SPEED_SUPER) {
+ switch (ep->desc->bmAttributes & 0x03) {
+ case USB_ENDPOINT_XFER_ISOC:
+ /* Sec. 4.4.8.2 USB3.0 Spec */
+ limit = 3 * 16 * 1024 * 8;
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ /* Sec. 4.4.7.2 USB3.0 Spec */
+ limit = 3 * 1024 * 8;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ default:
+ break;
+ }
+ }
return limit;
}
-#define is_active(dum) ((dum->port_status & \
+#define is_active(dum_hcd) ((dum_hcd->port_status & \
(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | \
USB_PORT_STAT_SUSPEND)) \
== (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE))
@@ -1174,7 +1309,8 @@ static struct dummy_ep *find_endpoint (struct dummy *dum, u8 address)
{
int i;
- if (!is_active (dum))
+ if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
+ dum->ss_hcd : dum->hs_hcd)))
return NULL;
if ((address & ~USB_DIR_IN) == 0)
return &dum->ep [0];
@@ -1211,11 +1347,12 @@ static struct dummy_ep *find_endpoint (struct dummy *dum, u8 address)
* 1 - if the request wasn't handles
* error code on error
*/
-static int handle_control_request(struct dummy *dum, struct urb *urb,
+static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
struct usb_ctrlrequest *setup,
int *status)
{
struct dummy_ep *ep2;
+ struct dummy *dum = dum_hcd->dum;
int ret_val = 1;
unsigned w_index;
unsigned w_value;
@@ -1247,6 +1384,27 @@ static int handle_control_request(struct dummy *dum, struct urb *urb,
case USB_DEVICE_A_ALT_HNP_SUPPORT:
dum->gadget.a_alt_hnp_support = 1;
break;
+ case USB_DEVICE_U1_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_U1_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_U2_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_LTM_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
default:
ret_val = -EOPNOTSUPP;
}
@@ -1273,6 +1431,27 @@ static int handle_control_request(struct dummy *dum, struct urb *urb,
case USB_DEVICE_REMOTE_WAKEUP:
w_value = USB_DEVICE_REMOTE_WAKEUP;
break;
+ case USB_DEVICE_U1_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_U1_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_U2_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ if (dummy_hcd_to_hcd(dum_hcd)->speed ==
+ HCD_USB3)
+ w_value = USB_DEV_STAT_LTM_ENABLED;
+ else
+ ret_val = -EOPNOTSUPP;
+ break;
default:
ret_val = -EOPNOTSUPP;
break;
@@ -1334,9 +1513,10 @@ static int handle_control_request(struct dummy *dum, struct urb *urb,
/* drive both sides of the transfers; looks like irq handlers to
* both drivers except the callbacks aren't in_irq().
*/
-static void dummy_timer (unsigned long _dum)
+static void dummy_timer(unsigned long _dum_hcd)
{
- struct dummy *dum = (struct dummy *) _dum;
+ struct dummy_hcd *dum_hcd = (struct dummy_hcd *) _dum_hcd;
+ struct dummy *dum = dum_hcd->dum;
struct urbp *urbp, *tmp;
unsigned long flags;
int limit, total;
@@ -1353,8 +1533,12 @@ static void dummy_timer (unsigned long _dum)
case USB_SPEED_HIGH:
total = 512/*bytes*/ * 13/*packets*/ * 8/*uframes*/;
break;
+ case USB_SPEED_SUPER:
+ /* Bus speed is 500000 bytes/ms, so use a little less */
+ total = 490000;
+ break;
default:
- dev_err (dummy_dev(dum), "bogus device speed\n");
+ dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
return;
}
@@ -1363,8 +1547,8 @@ static void dummy_timer (unsigned long _dum)
/* look at each urb queued by the host side driver */
spin_lock_irqsave (&dum->lock, flags);
- if (!dum->udev) {
- dev_err (dummy_dev(dum),
+ if (!dum_hcd->udev) {
+ dev_err(dummy_dev(dum_hcd),
"timer fired with no URBs pending?\n");
spin_unlock_irqrestore (&dum->lock, flags);
return;
@@ -1377,7 +1561,7 @@ static void dummy_timer (unsigned long _dum)
}
restart:
- list_for_each_entry_safe (urbp, tmp, &dum->urbp_list, urbp_list) {
+ list_for_each_entry_safe(urbp, tmp, &dum_hcd->urbp_list, urbp_list) {
struct urb *urb;
struct dummy_request *req;
u8 address;
@@ -1388,7 +1572,7 @@ restart:
urb = urbp->urb;
if (urb->unlinked)
goto return_urb;
- else if (dum->rh_state != DUMMY_RH_RUNNING)
+ else if (dum_hcd->rh_state != DUMMY_RH_RUNNING)
continue;
type = usb_pipetype (urb->pipe);
@@ -1406,7 +1590,7 @@ restart:
ep = find_endpoint(dum, address);
if (!ep) {
/* set_configuration() disagreement */
- dev_dbg (dummy_dev(dum),
+ dev_dbg(dummy_dev(dum_hcd),
"no ep configured for urb %p\n",
urb);
status = -EPROTO;
@@ -1422,7 +1606,7 @@ restart:
}
if (ep->halted && !ep->setup_stage) {
/* NOTE: must not be iso! */
- dev_dbg (dummy_dev(dum), "ep %s halted, urb %p\n",
+ dev_dbg(dummy_dev(dum_hcd), "ep %s halted, urb %p\n",
ep->ep.name, urb);
status = -EPIPE;
goto return_urb;
@@ -1457,7 +1641,7 @@ restart:
ep->setup_stage = 0;
ep->halted = 0;
- value = handle_control_request(dum, urb, &setup,
+ value = handle_control_request(dum_hcd, urb, &setup,
&status);
/* gadget driver handles all other requests. block
@@ -1527,20 +1711,20 @@ return_urb:
if (ep)
ep->already_seen = ep->setup_stage = 0;
- usb_hcd_unlink_urb_from_ep(dummy_to_hcd(dum), urb);
+ usb_hcd_unlink_urb_from_ep(dummy_hcd_to_hcd(dum_hcd), urb);
spin_unlock (&dum->lock);
- usb_hcd_giveback_urb(dummy_to_hcd(dum), urb, status);
+ usb_hcd_giveback_urb(dummy_hcd_to_hcd(dum_hcd), urb, status);
spin_lock (&dum->lock);
goto restart;
}
- if (list_empty (&dum->urbp_list)) {
- usb_put_dev (dum->udev);
- dum->udev = NULL;
- } else if (dum->rh_state == DUMMY_RH_RUNNING) {
+ if (list_empty(&dum_hcd->urbp_list)) {
+ usb_put_dev(dum_hcd->udev);
+ dum_hcd->udev = NULL;
+ } else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
/* want a 1 msec delay here */
- mod_timer (&dum->timer, jiffies + msecs_to_jiffies(1));
+ mod_timer(&dum_hcd->timer, jiffies + msecs_to_jiffies(1));
}
spin_unlock_irqrestore (&dum->lock, flags);
@@ -1557,36 +1741,48 @@ return_urb:
static int dummy_hub_status (struct usb_hcd *hcd, char *buf)
{
- struct dummy *dum;
+ struct dummy_hcd *dum_hcd;
unsigned long flags;
int retval = 0;
- dum = hcd_to_dummy (hcd);
+ dum_hcd = hcd_to_dummy_hcd(hcd);
- spin_lock_irqsave (&dum->lock, flags);
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd))
goto done;
- if (dum->resuming && time_after_eq (jiffies, dum->re_timeout)) {
- dum->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
- dum->port_status &= ~USB_PORT_STAT_SUSPEND;
- set_link_state (dum);
+ if (dum_hcd->resuming && time_after_eq(jiffies, dum_hcd->re_timeout)) {
+ dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
+ dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
+ set_link_state(dum_hcd);
}
- if ((dum->port_status & PORT_C_MASK) != 0) {
+ if ((dum_hcd->port_status & PORT_C_MASK) != 0) {
*buf = (1 << 1);
- dev_dbg (dummy_dev(dum), "port status 0x%08x has changes\n",
- dum->port_status);
+ dev_dbg(dummy_dev(dum_hcd), "port status 0x%08x has changes\n",
+ dum_hcd->port_status);
retval = 1;
- if (dum->rh_state == DUMMY_RH_SUSPENDED)
+ if (dum_hcd->rh_state == DUMMY_RH_SUSPENDED)
usb_hcd_resume_root_hub (hcd);
}
done:
- spin_unlock_irqrestore (&dum->lock, flags);
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return retval;
}
static inline void
+ss_hub_descriptor(struct usb_hub_descriptor *desc)
+{
+ memset(desc, 0, sizeof *desc);
+ desc->bDescriptorType = 0x2a;
+ desc->bDescLength = 12;
+ desc->wHubCharacteristics = cpu_to_le16(0x0001);
+ desc->bNbrPorts = 1;
+ desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
+ desc->u.ss.DeviceRemovable = 0xffff;
+}
+
+static inline void
hub_descriptor (struct usb_hub_descriptor *desc)
{
memset (desc, 0, sizeof *desc);
@@ -1606,39 +1802,64 @@ static int dummy_hub_control (
char *buf,
u16 wLength
) {
- struct dummy *dum;
+ struct dummy_hcd *dum_hcd;
int retval = 0;
unsigned long flags;
if (!HCD_HW_ACCESSIBLE(hcd))
return -ETIMEDOUT;
- dum = hcd_to_dummy (hcd);
- spin_lock_irqsave (&dum->lock, flags);
+ dum_hcd = hcd_to_dummy_hcd(hcd);
+
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
switch (typeReq) {
case ClearHubFeature:
break;
case ClearPortFeature:
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- if (dum->port_status & USB_PORT_STAT_SUSPEND) {
+ if (hcd->speed == HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "USB_PORT_FEAT_SUSPEND req not "
+ "supported for USB 3.0 roothub\n");
+ goto error;
+ }
+ if (dum_hcd->port_status & USB_PORT_STAT_SUSPEND) {
/* 20msec resume signaling */
- dum->resuming = 1;
- dum->re_timeout = jiffies +
+ dum_hcd->resuming = 1;
+ dum_hcd->re_timeout = jiffies +
msecs_to_jiffies(20);
}
break;
case USB_PORT_FEAT_POWER:
- if (dum->port_status & USB_PORT_STAT_POWER)
- dev_dbg (dummy_dev(dum), "power-off\n");
+ if (hcd->speed == HCD_USB3) {
+ if (dum_hcd->port_status & USB_PORT_STAT_POWER)
+ dev_dbg(dummy_dev(dum_hcd),
+ "power-off\n");
+ } else
+ if (dum_hcd->port_status &
+ USB_SS_PORT_STAT_POWER)
+ dev_dbg(dummy_dev(dum_hcd),
+ "power-off\n");
/* FALLS THROUGH */
default:
- dum->port_status &= ~(1 << wValue);
- set_link_state (dum);
+ dum_hcd->port_status &= ~(1 << wValue);
+ set_link_state(dum_hcd);
}
break;
case GetHubDescriptor:
- hub_descriptor ((struct usb_hub_descriptor *) buf);
+ if (hcd->speed == HCD_USB3 &&
+ (wLength < USB_DT_SS_HUB_SIZE ||
+ wValue != (USB_DT_SS_HUB << 8))) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "Wrong hub descriptor type for "
+ "USB 3.0 roothub.\n");
+ goto error;
+ }
+ if (hcd->speed == HCD_USB3)
+ ss_hub_descriptor((struct usb_hub_descriptor *) buf);
+ else
+ hub_descriptor((struct usb_hub_descriptor *) buf);
break;
case GetHubStatus:
*(__le32 *) buf = cpu_to_le32 (0);
@@ -1650,127 +1871,210 @@ static int dummy_hub_control (
/* whoever resets or resumes must GetPortStatus to
* complete it!!
*/
- if (dum->resuming &&
- time_after_eq (jiffies, dum->re_timeout)) {
- dum->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
- dum->port_status &= ~USB_PORT_STAT_SUSPEND;
+ if (dum_hcd->resuming &&
+ time_after_eq(jiffies, dum_hcd->re_timeout)) {
+ dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
+ dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
}
- if ((dum->port_status & USB_PORT_STAT_RESET) != 0 &&
- time_after_eq (jiffies, dum->re_timeout)) {
- dum->port_status |= (USB_PORT_STAT_C_RESET << 16);
- dum->port_status &= ~USB_PORT_STAT_RESET;
- if (dum->pullup) {
- dum->port_status |= USB_PORT_STAT_ENABLE;
- /* give it the best speed we agree on */
- dum->gadget.speed = dum->driver->speed;
- dum->gadget.ep0->maxpacket = 64;
- switch (dum->gadget.speed) {
- case USB_SPEED_HIGH:
- dum->port_status |=
- USB_PORT_STAT_HIGH_SPEED;
- break;
- case USB_SPEED_LOW:
- dum->gadget.ep0->maxpacket = 8;
- dum->port_status |=
- USB_PORT_STAT_LOW_SPEED;
- break;
- default:
- dum->gadget.speed = USB_SPEED_FULL;
- break;
+ if ((dum_hcd->port_status & USB_PORT_STAT_RESET) != 0 &&
+ time_after_eq(jiffies, dum_hcd->re_timeout)) {
+ dum_hcd->port_status |= (USB_PORT_STAT_C_RESET << 16);
+ dum_hcd->port_status &= ~USB_PORT_STAT_RESET;
+ if (dum_hcd->dum->pullup) {
+ dum_hcd->port_status |= USB_PORT_STAT_ENABLE;
+
+ if (hcd->speed < HCD_USB3) {
+ switch (dum_hcd->dum->gadget.speed) {
+ case USB_SPEED_HIGH:
+ dum_hcd->port_status |=
+ USB_PORT_STAT_HIGH_SPEED;
+ break;
+ case USB_SPEED_LOW:
+ dum_hcd->dum->gadget.ep0->
+ maxpacket = 8;
+ dum_hcd->port_status |=
+ USB_PORT_STAT_LOW_SPEED;
+ break;
+ default:
+ dum_hcd->dum->gadget.speed =
+ USB_SPEED_FULL;
+ break;
+ }
}
}
}
- set_link_state (dum);
- ((__le16 *) buf)[0] = cpu_to_le16 (dum->port_status);
- ((__le16 *) buf)[1] = cpu_to_le16 (dum->port_status >> 16);
+ set_link_state(dum_hcd);
+ ((__le16 *) buf)[0] = cpu_to_le16 (dum_hcd->port_status);
+ ((__le16 *) buf)[1] = cpu_to_le16 (dum_hcd->port_status >> 16);
break;
case SetHubFeature:
retval = -EPIPE;
break;
case SetPortFeature:
switch (wValue) {
+ case USB_PORT_FEAT_LINK_STATE:
+ if (hcd->speed != HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "USB_PORT_FEAT_LINK_STATE req not "
+ "supported for USB 2.0 roothub\n");
+ goto error;
+ }
+ /*
+ * Since this is dummy we don't have an actual link so
+ * there is nothing to do for the SET_LINK_STATE cmd
+ */
+ break;
+ case USB_PORT_FEAT_U1_TIMEOUT:
+ case USB_PORT_FEAT_U2_TIMEOUT:
+ /* TODO: add suspend/resume support! */
+ if (hcd->speed != HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "USB_PORT_FEAT_U1/2_TIMEOUT req not "
+ "supported for USB 2.0 roothub\n");
+ goto error;
+ }
+ break;
case USB_PORT_FEAT_SUSPEND:
- if (dum->active) {
- dum->port_status |= USB_PORT_STAT_SUSPEND;
+ /* Applicable only for USB2.0 hub */
+ if (hcd->speed == HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "USB_PORT_FEAT_SUSPEND req not "
+ "supported for USB 3.0 roothub\n");
+ goto error;
+ }
+ if (dum_hcd->active) {
+ dum_hcd->port_status |= USB_PORT_STAT_SUSPEND;
/* HNP would happen here; for now we
* assume b_bus_req is always true.
*/
- set_link_state (dum);
+ set_link_state(dum_hcd);
if (((1 << USB_DEVICE_B_HNP_ENABLE)
- & dum->devstatus) != 0)
- dev_dbg (dummy_dev(dum),
+ & dum_hcd->dum->devstatus) != 0)
+ dev_dbg(dummy_dev(dum_hcd),
"no HNP yet!\n");
}
break;
case USB_PORT_FEAT_POWER:
- dum->port_status |= USB_PORT_STAT_POWER;
- set_link_state (dum);
+ if (hcd->speed == HCD_USB3)
+ dum_hcd->port_status |= USB_SS_PORT_STAT_POWER;
+ else
+ dum_hcd->port_status |= USB_PORT_STAT_POWER;
+ set_link_state(dum_hcd);
break;
+ case USB_PORT_FEAT_BH_PORT_RESET:
+ /* Applicable only for USB3.0 hub */
+ if (hcd->speed != HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "USB_PORT_FEAT_BH_PORT_RESET req not "
+ "supported for USB 2.0 roothub\n");
+ goto error;
+ }
+ /* FALLS THROUGH */
case USB_PORT_FEAT_RESET:
/* if it's already enabled, disable */
- dum->port_status &= ~(USB_PORT_STAT_ENABLE
+ if (hcd->speed == HCD_USB3) {
+ dum_hcd->port_status = 0;
+ dum_hcd->port_status =
+ (USB_SS_PORT_STAT_POWER |
+ USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_RESET);
+ } else
+ dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED);
- dum->devstatus = 0;
- /* 50msec reset signaling */
- dum->re_timeout = jiffies + msecs_to_jiffies(50);
+ /*
+ * We want to reset device status. All but the
+ * Self powered feature
+ */
+ dum_hcd->dum->devstatus &=
+ (1 << USB_DEVICE_SELF_POWERED);
+ /*
+ * FIXME USB3.0: what is the correct reset signaling
+ * interval? Is it still 50msec as for HS?
+ */
+ dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
/* FALLS THROUGH */
default:
- if ((dum->port_status & USB_PORT_STAT_POWER) != 0) {
- dum->port_status |= (1 << wValue);
- set_link_state (dum);
- }
+ if (hcd->speed == HCD_USB3) {
+ if ((dum_hcd->port_status &
+ USB_SS_PORT_STAT_POWER) != 0) {
+ dum_hcd->port_status |= (1 << wValue);
+ set_link_state(dum_hcd);
+ }
+ } else
+ if ((dum_hcd->port_status &
+ USB_PORT_STAT_POWER) != 0) {
+ dum_hcd->port_status |= (1 << wValue);
+ set_link_state(dum_hcd);
+ }
+ }
+ break;
+ case GetPortErrorCount:
+ if (hcd->speed != HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "GetPortErrorCount req not "
+ "supported for USB 2.0 roothub\n");
+ goto error;
+ }
+ /* We'll always return 0 since this is a dummy hub */
+ *(__le32 *) buf = cpu_to_le32(0);
+ break;
+ case SetHubDepth:
+ if (hcd->speed != HCD_USB3) {
+ dev_dbg(dummy_dev(dum_hcd),
+ "SetHubDepth req not supported for "
+ "USB 2.0 roothub\n");
+ goto error;
}
break;
-
default:
- dev_dbg (dummy_dev(dum),
+ dev_dbg(dummy_dev(dum_hcd),
"hub control req%04x v%04x i%04x l%d\n",
typeReq, wValue, wIndex, wLength);
-
+error:
/* "protocol stall" on error */
retval = -EPIPE;
}
- spin_unlock_irqrestore (&dum->lock, flags);
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
- if ((dum->port_status & PORT_C_MASK) != 0)
+ if ((dum_hcd->port_status & PORT_C_MASK) != 0)
usb_hcd_poll_rh_status (hcd);
return retval;
}
static int dummy_bus_suspend (struct usb_hcd *hcd)
{
- struct dummy *dum = hcd_to_dummy (hcd);
+ struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
dev_dbg (&hcd->self.root_hub->dev, "%s\n", __func__);
- spin_lock_irq (&dum->lock);
- dum->rh_state = DUMMY_RH_SUSPENDED;
- set_link_state (dum);
+ spin_lock_irq(&dum_hcd->dum->lock);
+ dum_hcd->rh_state = DUMMY_RH_SUSPENDED;
+ set_link_state(dum_hcd);
hcd->state = HC_STATE_SUSPENDED;
- spin_unlock_irq (&dum->lock);
+ spin_unlock_irq(&dum_hcd->dum->lock);
return 0;
}
static int dummy_bus_resume (struct usb_hcd *hcd)
{
- struct dummy *dum = hcd_to_dummy (hcd);
+ struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
int rc = 0;
dev_dbg (&hcd->self.root_hub->dev, "%s\n", __func__);
- spin_lock_irq (&dum->lock);
+ spin_lock_irq(&dum_hcd->dum->lock);
if (!HCD_HW_ACCESSIBLE(hcd)) {
rc = -ESHUTDOWN;
} else {
- dum->rh_state = DUMMY_RH_RUNNING;
- set_link_state (dum);
- if (!list_empty(&dum->urbp_list))
- mod_timer (&dum->timer, jiffies);
+ dum_hcd->rh_state = DUMMY_RH_RUNNING;
+ set_link_state(dum_hcd);
+ if (!list_empty(&dum_hcd->urbp_list))
+ mod_timer(&dum_hcd->timer, jiffies);
hcd->state = HC_STATE_RUNNING;
}
- spin_unlock_irq (&dum->lock);
+ spin_unlock_irq(&dum_hcd->dum->lock);
return rc;
}
@@ -1786,18 +2090,37 @@ show_urb (char *buf, size_t size, struct urb *urb)
urb,
({ char *s;
switch (urb->dev->speed) {
- case USB_SPEED_LOW: s = "ls"; break;
- case USB_SPEED_FULL: s = "fs"; break;
- case USB_SPEED_HIGH: s = "hs"; break;
- default: s = "?"; break;
+ case USB_SPEED_LOW:
+ s = "ls";
+ break;
+ case USB_SPEED_FULL:
+ s = "fs";
+ break;
+ case USB_SPEED_HIGH:
+ s = "hs";
+ break;
+ case USB_SPEED_SUPER:
+ s = "ss";
+ break;
+ default:
+ s = "?";
+ break;
}; s; }),
ep, ep ? (usb_pipein (urb->pipe) ? "in" : "out") : "",
({ char *s; \
switch (usb_pipetype (urb->pipe)) { \
- case PIPE_CONTROL: s = ""; break; \
- case PIPE_BULK: s = "-bulk"; break; \
- case PIPE_INTERRUPT: s = "-int"; break; \
- default: s = "-iso"; break; \
+ case PIPE_CONTROL: \
+ s = ""; \
+ break; \
+ case PIPE_BULK: \
+ s = "-bulk"; \
+ break; \
+ case PIPE_INTERRUPT: \
+ s = "-int"; \
+ break; \
+ default: \
+ s = "-iso"; \
+ break; \
}; s;}),
urb->actual_length, urb->transfer_buffer_length);
}
@@ -1806,43 +2129,63 @@ static ssize_t
show_urbs (struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_hcd *hcd = dev_get_drvdata (dev);
- struct dummy *dum = hcd_to_dummy (hcd);
+ struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
struct urbp *urbp;
size_t size = 0;
unsigned long flags;
- spin_lock_irqsave (&dum->lock, flags);
- list_for_each_entry (urbp, &dum->urbp_list, urbp_list) {
+ spin_lock_irqsave(&dum_hcd->dum->lock, flags);
+ list_for_each_entry(urbp, &dum_hcd->urbp_list, urbp_list) {
size_t temp;
temp = show_urb (buf, PAGE_SIZE - size, urbp->urb);
buf += temp;
size += temp;
}
- spin_unlock_irqrestore (&dum->lock, flags);
+ spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return size;
}
static DEVICE_ATTR (urbs, S_IRUGO, show_urbs, NULL);
-static int dummy_start (struct usb_hcd *hcd)
+static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- struct dummy *dum;
+ init_timer(&dum_hcd->timer);
+ dum_hcd->timer.function = dummy_timer;
+ dum_hcd->timer.data = (unsigned long)dum_hcd;
+ dum_hcd->rh_state = DUMMY_RH_RUNNING;
+ INIT_LIST_HEAD(&dum_hcd->urbp_list);
+ dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET;
+ dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
+ dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
+#ifdef CONFIG_USB_OTG
+ dummy_hcd_to_hcd(dum_hcd)->self.otg_port = 1;
+#endif
+ return 0;
+
+ /* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
+ return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs);
+}
- dum = hcd_to_dummy (hcd);
+static int dummy_start(struct usb_hcd *hcd)
+{
+ struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
/*
* MASTER side init ... we emulate a root hub that'll only ever
* talk to one device (the slave side). Also appears in sysfs,
* just like more familiar pci-based HCDs.
*/
- spin_lock_init (&dum->lock);
- init_timer (&dum->timer);
- dum->timer.function = dummy_timer;
- dum->timer.data = (unsigned long) dum;
- dum->rh_state = DUMMY_RH_RUNNING;
+ if (!usb_hcd_is_primary_hcd(hcd))
+ return dummy_start_ss(dum_hcd);
- INIT_LIST_HEAD (&dum->urbp_list);
+ spin_lock_init(&dum_hcd->dum->lock);
+ init_timer(&dum_hcd->timer);
+ dum_hcd->timer.function = dummy_timer;
+ dum_hcd->timer.data = (unsigned long)dum_hcd;
+ dum_hcd->rh_state = DUMMY_RH_RUNNING;
+
+ INIT_LIST_HEAD(&dum_hcd->urbp_list);
hcd->power_budget = POWER_BUDGET;
hcd->state = HC_STATE_RUNNING;
@@ -1853,18 +2196,17 @@ static int dummy_start (struct usb_hcd *hcd)
#endif
/* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
- return device_create_file (dummy_dev(dum), &dev_attr_urbs);
+ return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs);
}
static void dummy_stop (struct usb_hcd *hcd)
{
struct dummy *dum;
- dum = hcd_to_dummy (hcd);
-
- device_remove_file (dummy_dev(dum), &dev_attr_urbs);
- usb_gadget_unregister_driver (dum->driver);
- dev_info (dummy_dev(dum), "stopped\n");
+ dum = (hcd_to_dummy_hcd(hcd))->dum;
+ device_remove_file(dummy_dev(hcd_to_dummy_hcd(hcd)), &dev_attr_urbs);
+ usb_gadget_unregister_driver(dum->driver);
+ dev_info(dummy_dev(hcd_to_dummy_hcd(hcd)), "stopped\n");
}
/*-------------------------------------------------------------------------*/
@@ -1874,13 +2216,59 @@ static int dummy_h_get_frame (struct usb_hcd *hcd)
return dummy_g_get_frame (NULL);
}
-static const struct hc_driver dummy_hcd = {
+static int dummy_setup(struct usb_hcd *hcd)
+{
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ the_controller.hs_hcd = hcd_to_dummy_hcd(hcd);
+ the_controller.hs_hcd->dum = &the_controller;
+ /*
+ * Mark the first roothub as being USB 2.0.
+ * The USB 3.0 roothub will be registered later by
+ * dummy_hcd_probe()
+ */
+ hcd->speed = HCD_USB2;
+ hcd->self.root_hub->speed = USB_SPEED_HIGH;
+ } else {
+ the_controller.ss_hcd = hcd_to_dummy_hcd(hcd);
+ the_controller.ss_hcd->dum = &the_controller;
+ hcd->speed = HCD_USB3;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER;
+ }
+ return 0;
+}
+
+/* Change a group of bulk endpoints to support multiple stream IDs */
+int dummy_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags)
+{
+ if (hcd->speed != HCD_USB3)
+ dev_dbg(dummy_dev(hcd_to_dummy_hcd(hcd)),
+ "%s() - ERROR! Not supported for USB2.0 roothub\n",
+ __func__);
+ return 0;
+}
+
+/* Reverts a group of bulk endpoints back to not using stream IDs. */
+int dummy_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags)
+{
+ if (hcd->speed != HCD_USB3)
+ dev_dbg(dummy_dev(hcd_to_dummy_hcd(hcd)),
+ "%s() - ERROR! Not supported for USB2.0 roothub\n",
+ __func__);
+ return 0;
+}
+
+static struct hc_driver dummy_hcd = {
.description = (char *) driver_name,
.product_desc = "Dummy host controller",
- .hcd_priv_size = sizeof(struct dummy),
+ .hcd_priv_size = sizeof(struct dummy_hcd),
- .flags = HCD_USB2,
+ .flags = HCD_USB3 | HCD_SHARED,
+ .reset = dummy_setup,
.start = dummy_start,
.stop = dummy_stop,
@@ -1893,51 +2281,85 @@ static const struct hc_driver dummy_hcd = {
.hub_control = dummy_hub_control,
.bus_suspend = dummy_bus_suspend,
.bus_resume = dummy_bus_resume,
+
+ .alloc_streams = dummy_alloc_streams,
+ .free_streams = dummy_free_streams,
};
static int dummy_hcd_probe(struct platform_device *pdev)
{
- struct usb_hcd *hcd;
+ struct usb_hcd *hs_hcd;
+ struct usb_hcd *ss_hcd;
int retval;
dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
- hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
- if (!hcd)
+ if (!mod_data.is_super_speed)
+ dummy_hcd.flags = HCD_USB2;
+ hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
+ if (!hs_hcd)
return -ENOMEM;
- the_controller = hcd_to_dummy (hcd);
- hcd->has_tt = 1;
+ hs_hcd->has_tt = 1;
- retval = usb_add_hcd(hcd, 0, 0);
+ retval = usb_add_hcd(hs_hcd, 0, 0);
if (retval != 0) {
- usb_put_hcd (hcd);
- the_controller = NULL;
+ usb_put_hcd(hs_hcd);
+ return retval;
+ }
+
+ if (mod_data.is_super_speed) {
+ ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev,
+ dev_name(&pdev->dev), hs_hcd);
+ if (!ss_hcd) {
+ retval = -ENOMEM;
+ goto dealloc_usb2_hcd;
+ }
+
+ retval = usb_add_hcd(ss_hcd, 0, 0);
+ if (retval)
+ goto put_usb3_hcd;
}
+ return 0;
+
+put_usb3_hcd:
+ usb_put_hcd(ss_hcd);
+dealloc_usb2_hcd:
+ usb_put_hcd(hs_hcd);
+ the_controller.hs_hcd = the_controller.ss_hcd = NULL;
return retval;
}
-static int dummy_hcd_remove (struct platform_device *pdev)
+static int dummy_hcd_remove(struct platform_device *pdev)
{
- struct usb_hcd *hcd;
+ struct dummy *dum;
+
+ dum = (hcd_to_dummy_hcd(platform_get_drvdata(pdev)))->dum;
+
+ if (dum->ss_hcd) {
+ usb_remove_hcd(dummy_hcd_to_hcd(dum->ss_hcd));
+ usb_put_hcd(dummy_hcd_to_hcd(dum->ss_hcd));
+ }
+
+ usb_remove_hcd(dummy_hcd_to_hcd(dum->hs_hcd));
+ usb_put_hcd(dummy_hcd_to_hcd(dum->hs_hcd));
+
+ the_controller.hs_hcd = NULL;
+ the_controller.ss_hcd = NULL;
- hcd = platform_get_drvdata (pdev);
- usb_remove_hcd (hcd);
- usb_put_hcd (hcd);
- the_controller = NULL;
return 0;
}
static int dummy_hcd_suspend (struct platform_device *pdev, pm_message_t state)
{
struct usb_hcd *hcd;
- struct dummy *dum;
+ struct dummy_hcd *dum_hcd;
int rc = 0;
dev_dbg (&pdev->dev, "%s\n", __func__);
hcd = platform_get_drvdata (pdev);
- dum = hcd_to_dummy (hcd);
- if (dum->rh_state == DUMMY_RH_RUNNING) {
+ dum_hcd = hcd_to_dummy_hcd(hcd);
+ if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
dev_warn(&pdev->dev, "Root hub isn't suspended!\n");
rc = -EBUSY;
} else
@@ -1980,6 +2402,9 @@ static int __init init (void)
if (usb_disabled ())
return -ENODEV;
+ if (!mod_data.is_high_speed && mod_data.is_super_speed)
+ return -EINVAL;
+
the_hcd_pdev = platform_device_alloc(driver_name, -1);
if (!the_hcd_pdev)
return retval;
@@ -1997,7 +2422,8 @@ static int __init init (void)
retval = platform_device_add(the_hcd_pdev);
if (retval < 0)
goto err_add_hcd;
- if (!the_controller) {
+ if (!the_controller.hs_hcd ||
+ (!the_controller.ss_hcd && mod_data.is_super_speed)) {
/*
* The hcd was added successfully but its probe function failed
* for some reason.
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 9b7360ff5aa..7a7e6b7e1fd 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -63,13 +63,16 @@ static int
ep_matches (
struct usb_gadget *gadget,
struct usb_ep *ep,
- struct usb_endpoint_descriptor *desc
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp
)
{
u8 type;
const char *tmp;
u16 max;
+ int num_req_streams = 0;
+
/* endpoint already claimed? */
if (NULL != ep->driver_data)
return 0;
@@ -129,6 +132,22 @@ ep_matches (
}
/*
+ * Get the number of required streams from the EP companion
+ * descriptor and see if the EP matches it
+ */
+ if (usb_endpoint_xfer_bulk(desc)) {
+ if (ep_comp) {
+ num_req_streams = ep_comp->bmAttributes & 0x1f;
+ if (num_req_streams > ep->max_streams)
+ return 0;
+ /* Update the ep_comp descriptor if needed */
+ if (num_req_streams != ep->max_streams)
+ ep_comp->bmAttributes = ep->max_streams;
+ }
+
+ }
+
+ /*
* If the protocol driver hasn't yet decided on wMaxPacketSize
* and wants to know the maximum possible, provide the info.
*/
@@ -142,13 +161,13 @@ ep_matches (
max = 0x7ff & le16_to_cpu(desc->wMaxPacketSize);
switch (type) {
case USB_ENDPOINT_XFER_INT:
- /* INT: limit 64 bytes full speed, 1024 high speed */
+ /* INT: limit 64 bytes full speed, 1024 high/super speed */
if (!gadget->is_dualspeed && max > 64)
return 0;
/* FALLTHROUGH */
case USB_ENDPOINT_XFER_ISOC:
- /* ISO: limit 1023 bytes full speed, 1024 high speed */
+ /* ISO: limit 1023 bytes full speed, 1024 high/super speed */
if (ep->maxpacket < max)
return 0;
if (!gadget->is_dualspeed && max > 1023)
@@ -183,7 +202,7 @@ ep_matches (
}
/* report (variable) full speed bulk maxpacket */
- if (USB_ENDPOINT_XFER_BULK == type) {
+ if ((USB_ENDPOINT_XFER_BULK == type) && !ep_comp) {
int size = ep->maxpacket;
/* min() doesn't work on bitfields with gcc-3.5 */
@@ -191,6 +210,7 @@ ep_matches (
size = 64;
desc->wMaxPacketSize = cpu_to_le16(size);
}
+ ep->address = desc->bEndpointAddress;
return 1;
}
@@ -207,38 +227,53 @@ find_ep (struct usb_gadget *gadget, const char *name)
}
/**
- * usb_ep_autoconfig - choose an endpoint matching the descriptor
+ * usb_ep_autoconfig_ss() - choose an endpoint matching the ep
+ * descriptor and ep companion descriptor
* @gadget: The device to which the endpoint must belong.
* @desc: Endpoint descriptor, with endpoint direction and transfer mode
- * initialized. For periodic transfers, the maximum packet
- * size must also be initialized. This is modified on success.
+ * initialized. For periodic transfers, the maximum packet
+ * size must also be initialized. This is modified on
+ * success.
+ * @ep_comp: Endpoint companion descriptor, with the required
+ * number of streams. Will be modified when the chosen EP
+ * supports a different number of streams.
*
- * By choosing an endpoint to use with the specified descriptor, this
- * routine simplifies writing gadget drivers that work with multiple
- * USB device controllers. The endpoint would be passed later to
- * usb_ep_enable(), along with some descriptor.
+ * This routine replaces the usb_ep_autoconfig when needed
+ * superspeed enhancments. If such enhancemnets are required,
+ * the FD should call usb_ep_autoconfig_ss directly and provide
+ * the additional ep_comp parameter.
+ *
+ * By choosing an endpoint to use with the specified descriptor,
+ * this routine simplifies writing gadget drivers that work with
+ * multiple USB device controllers. The endpoint would be
+ * passed later to usb_ep_enable(), along with some descriptor.
*
* That second descriptor won't always be the same as the first one.
* For example, isochronous endpoints can be autoconfigured for high
* bandwidth, and then used in several lower bandwidth altsettings.
* Also, high and full speed descriptors will be different.
*
- * Be sure to examine and test the results of autoconfiguration on your
- * hardware. This code may not make the best choices about how to use the
- * USB controller, and it can't know all the restrictions that may apply.
- * Some combinations of driver and hardware won't be able to autoconfigure.
+ * Be sure to examine and test the results of autoconfiguration
+ * on your hardware. This code may not make the best choices
+ * about how to use the USB controller, and it can't know all
+ * the restrictions that may apply. Some combinations of driver
+ * and hardware won't be able to autoconfigure.
*
* On success, this returns an un-claimed usb_ep, and modifies the endpoint
* descriptor bEndpointAddress. For bulk endpoints, the wMaxPacket value
- * is initialized as if the endpoint were used at full speed. To prevent
- * the endpoint from being returned by a later autoconfig call, claim it
- * by assigning ep->driver_data to some non-null value.
+ * is initialized as if the endpoint were used at full speed and
+ * the bmAttribute field in the ep companion descriptor is
+ * updated with the assigned number of streams if it is
+ * different from the original value. To prevent the endpoint
+ * from being returned by a later autoconfig call, claim it by
+ * assigning ep->driver_data to some non-null value.
*
* On failure, this returns a null endpoint descriptor.
*/
-struct usb_ep *usb_ep_autoconfig (
+struct usb_ep *usb_ep_autoconfig_ss(
struct usb_gadget *gadget,
- struct usb_endpoint_descriptor *desc
+ struct usb_endpoint_descriptor *desc,
+ struct usb_ss_ep_comp_descriptor *ep_comp
)
{
struct usb_ep *ep;
@@ -252,23 +287,24 @@ struct usb_ep *usb_ep_autoconfig (
if (gadget_is_net2280 (gadget) && type == USB_ENDPOINT_XFER_INT) {
/* ep-e, ep-f are PIO with only 64 byte fifos */
ep = find_ep (gadget, "ep-e");
- if (ep && ep_matches (gadget, ep, desc))
+ if (ep && ep_matches(gadget, ep, desc, ep_comp))
return ep;
ep = find_ep (gadget, "ep-f");
- if (ep && ep_matches (gadget, ep, desc))
+ if (ep && ep_matches(gadget, ep, desc, ep_comp))
return ep;
} else if (gadget_is_goku (gadget)) {
if (USB_ENDPOINT_XFER_INT == type) {
/* single buffering is enough */
- ep = find_ep (gadget, "ep3-bulk");
- if (ep && ep_matches (gadget, ep, desc))
+ ep = find_ep(gadget, "ep3-bulk");
+ if (ep && ep_matches(gadget, ep, desc, ep_comp))
return ep;
} else if (USB_ENDPOINT_XFER_BULK == type
&& (USB_DIR_IN & desc->bEndpointAddress)) {
/* DMA may be available */
- ep = find_ep (gadget, "ep2-bulk");
- if (ep && ep_matches (gadget, ep, desc))
+ ep = find_ep(gadget, "ep2-bulk");
+ if (ep && ep_matches(gadget, ep, desc,
+ ep_comp))
return ep;
}
@@ -287,14 +323,14 @@ struct usb_ep *usb_ep_autoconfig (
ep = find_ep(gadget, "ep2out");
} else
ep = NULL;
- if (ep && ep_matches (gadget, ep, desc))
+ if (ep && ep_matches(gadget, ep, desc, ep_comp))
return ep;
#endif
}
/* Second, look at endpoints until an unclaimed one looks usable */
list_for_each_entry (ep, &gadget->ep_list, ep_list) {
- if (ep_matches (gadget, ep, desc))
+ if (ep_matches(gadget, ep, desc, ep_comp))
return ep;
}
@@ -303,6 +339,46 @@ struct usb_ep *usb_ep_autoconfig (
}
/**
+ * usb_ep_autoconfig() - choose an endpoint matching the
+ * descriptor
+ * @gadget: The device to which the endpoint must belong.
+ * @desc: Endpoint descriptor, with endpoint direction and transfer mode
+ * initialized. For periodic transfers, the maximum packet
+ * size must also be initialized. This is modified on success.
+ *
+ * By choosing an endpoint to use with the specified descriptor, this
+ * routine simplifies writing gadget drivers that work with multiple
+ * USB device controllers. The endpoint would be passed later to
+ * usb_ep_enable(), along with some descriptor.
+ *
+ * That second descriptor won't always be the same as the first one.
+ * For example, isochronous endpoints can be autoconfigured for high
+ * bandwidth, and then used in several lower bandwidth altsettings.
+ * Also, high and full speed descriptors will be different.
+ *
+ * Be sure to examine and test the results of autoconfiguration on your
+ * hardware. This code may not make the best choices about how to use the
+ * USB controller, and it can't know all the restrictions that may apply.
+ * Some combinations of driver and hardware won't be able to autoconfigure.
+ *
+ * On success, this returns an un-claimed usb_ep, and modifies the endpoint
+ * descriptor bEndpointAddress. For bulk endpoints, the wMaxPacket value
+ * is initialized as if the endpoint were used at full speed. To prevent
+ * the endpoint from being returned by a later autoconfig call, claim it
+ * by assigning ep->driver_data to some non-null value.
+ *
+ * On failure, this returns a null endpoint descriptor.
+ */
+struct usb_ep *usb_ep_autoconfig(
+ struct usb_gadget *gadget,
+ struct usb_endpoint_descriptor *desc
+)
+{
+ return usb_ep_autoconfig_ss(gadget, desc, NULL);
+}
+
+
+/**
* usb_ep_autoconfig_reset - reset endpoint autoconfig state
* @gadget: device for which autoconfig state will be reset
*
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 1690c9d6825..aafc84f33e2 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -401,6 +401,7 @@ static struct usb_composite_driver eth_driver = {
.name = "g_ether",
.dev = &device_desc,
.strings = dev_strings,
+ .max_speed = USB_SPEED_SUPER,
.unbind = __exit_p(eth_unbind),
};
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index bd6226cbae8..3f8849339ad 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -39,12 +39,6 @@
* descriptors (roughly equivalent to CDC Unions) may sometimes help.
*/
-struct acm_ep_descs {
- struct usb_endpoint_descriptor *in;
- struct usb_endpoint_descriptor *out;
- struct usb_endpoint_descriptor *notify;
-};
-
struct f_acm {
struct gserial port;
u8 ctrl_id, data_id;
@@ -58,11 +52,7 @@ struct f_acm {
*/
spinlock_t lock;
- struct acm_ep_descs fs;
- struct acm_ep_descs hs;
-
struct usb_ep *notify;
- struct usb_endpoint_descriptor *notify_desc;
struct usb_request *notify_req;
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
@@ -405,23 +395,27 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
usb_ep_disable(acm->notify);
} else {
VDBG(cdev, "init acm ctrl interface %d\n", intf);
- acm->notify_desc = ep_choose(cdev->gadget,
- acm->hs.notify,
- acm->fs.notify);
+ if (config_ep_by_speed(cdev->gadget, f, acm->notify))
+ return -EINVAL;
}
- usb_ep_enable(acm->notify, acm->notify_desc);
+ usb_ep_enable(acm->notify);
acm->notify->driver_data = acm;
} else if (intf == acm->data_id) {
if (acm->port.in->driver_data) {
DBG(cdev, "reset acm ttyGS%d\n", acm->port_num);
gserial_disconnect(&acm->port);
- } else {
+ }
+ if (!acm->port.in->desc || !acm->port.out->desc) {
DBG(cdev, "activate acm ttyGS%d\n", acm->port_num);
- acm->port.in_desc = ep_choose(cdev->gadget,
- acm->hs.in, acm->fs.in);
- acm->port.out_desc = ep_choose(cdev->gadget,
- acm->hs.out, acm->fs.out);
+ if (config_ep_by_speed(cdev->gadget, f,
+ acm->port.in) ||
+ config_ep_by_speed(cdev->gadget, f,
+ acm->port.out)) {
+ acm->port.in->desc = NULL;
+ acm->port.out->desc = NULL;
+ return -EINVAL;
+ }
}
gserial_connect(&acm->port, acm->port_num);
@@ -629,18 +623,11 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
acm->notify_req->complete = acm_cdc_notify_complete;
acm->notify_req->context = acm;
- /* copy descriptors, and track endpoint copies */
+ /* copy descriptors */
f->descriptors = usb_copy_descriptors(acm_fs_function);
if (!f->descriptors)
goto fail;
- acm->fs.in = usb_find_endpoint(acm_fs_function,
- f->descriptors, &acm_fs_in_desc);
- acm->fs.out = usb_find_endpoint(acm_fs_function,
- f->descriptors, &acm_fs_out_desc);
- acm->fs.notify = usb_find_endpoint(acm_fs_function,
- f->descriptors, &acm_fs_notify_desc);
-
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
@@ -653,15 +640,8 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
acm_hs_notify_desc.bEndpointAddress =
acm_fs_notify_desc.bEndpointAddress;
- /* copy descriptors, and track endpoint copies */
+ /* copy descriptors */
f->hs_descriptors = usb_copy_descriptors(acm_hs_function);
-
- acm->hs.in = usb_find_endpoint(acm_hs_function,
- f->hs_descriptors, &acm_hs_in_desc);
- acm->hs.out = usb_find_endpoint(acm_hs_function,
- f->hs_descriptors, &acm_hs_out_desc);
- acm->hs.notify = usb_find_endpoint(acm_hs_function,
- f->hs_descriptors, &acm_hs_notify_desc);
}
DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 8ee330a2ab5..a9a4eade7e8 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "u_audio.h"
@@ -279,7 +279,6 @@ struct f_audio {
/* endpoints handle full and/or high speeds */
struct usb_ep *out_ep;
- struct usb_endpoint_descriptor *out_desc;
spinlock_t lock;
struct f_audio_buf *copy_buf;
@@ -575,7 +574,7 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (intf == 1) {
if (alt == 1) {
- usb_ep_enable(out_ep, audio->out_desc);
+ usb_ep_enable(out_ep);
out_ep->driver_data = audio;
audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
if (IS_ERR(audio->copy_buf))
@@ -677,6 +676,7 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
if (!ep)
goto fail;
audio->out_ep = ep;
+ audio->out_ep->desc = &as_out_ep_desc;
ep->driver_data = cdev; /* claim */
status = -ENOMEM;
@@ -776,7 +776,6 @@ int __init audio_bind_config(struct usb_configuration *c)
audio->card.func.set_alt = f_audio_set_alt;
audio->card.func.setup = f_audio_setup;
audio->card.func.disable = f_audio_disable;
- audio->out_desc = &as_out_ep_desc;
control_selector_init(audio);
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 544257a89ed..3691a0cb946 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -46,11 +46,6 @@
* and also means that a get_alt() method is required.
*/
-struct ecm_ep_descs {
- struct usb_endpoint_descriptor *in;
- struct usb_endpoint_descriptor *out;
- struct usb_endpoint_descriptor *notify;
-};
enum ecm_notify_state {
ECM_NOTIFY_NONE, /* don't notify */
@@ -64,11 +59,7 @@ struct f_ecm {
char ethaddr[14];
- struct ecm_ep_descs fs;
- struct ecm_ep_descs hs;
-
struct usb_ep *notify;
- struct usb_endpoint_descriptor *notify_desc;
struct usb_request *notify_req;
u8 notify_state;
bool is_open;
@@ -86,10 +77,12 @@ static inline struct f_ecm *func_to_ecm(struct usb_function *f)
/* peak (theoretical) bulk transfer rate in bits-per-second */
static inline unsigned ecm_bitrate(struct usb_gadget *g)
{
- if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 13 * 1024 * 8 * 1000 * 8;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
return 13 * 512 * 8 * 1000 * 8;
else
- return 19 * 64 * 1 * 1000 * 8;
+ return 19 * 64 * 1 * 1000 * 8;
}
/*-------------------------------------------------------------------------*/
@@ -219,8 +212,10 @@ static struct usb_descriptor_header *ecm_fs_function[] = {
(struct usb_descriptor_header *) &ecm_header_desc,
(struct usb_descriptor_header *) &ecm_union_desc,
(struct usb_descriptor_header *) &ecm_desc,
+
/* NOTE: status endpoint might need to be removed */
(struct usb_descriptor_header *) &fs_ecm_notify_desc,
+
/* data interface, altsettings 0 and 1 */
(struct usb_descriptor_header *) &ecm_data_nop_intf,
(struct usb_descriptor_header *) &ecm_data_intf,
@@ -240,6 +235,7 @@ static struct usb_endpoint_descriptor hs_ecm_notify_desc = {
.wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT),
.bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
};
+
static struct usb_endpoint_descriptor hs_ecm_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -264,8 +260,10 @@ static struct usb_descriptor_header *ecm_hs_function[] = {
(struct usb_descriptor_header *) &ecm_header_desc,
(struct usb_descriptor_header *) &ecm_union_desc,
(struct usb_descriptor_header *) &ecm_desc,
+
/* NOTE: status endpoint might need to be removed */
(struct usb_descriptor_header *) &hs_ecm_notify_desc,
+
/* data interface, altsettings 0 and 1 */
(struct usb_descriptor_header *) &ecm_data_nop_intf,
(struct usb_descriptor_header *) &ecm_data_intf,
@@ -274,6 +272,76 @@ static struct usb_descriptor_header *ecm_hs_function[] = {
NULL,
};
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_ecm_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_ecm_intr_comp_desc = {
+ .bLength = sizeof ss_ecm_intr_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(ECM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ss_ecm_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor ss_ecm_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_ecm_bulk_comp_desc = {
+ .bLength = sizeof ss_ecm_bulk_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ecm_ss_function[] = {
+ /* CDC ECM control descriptors */
+ (struct usb_descriptor_header *) &ecm_control_intf,
+ (struct usb_descriptor_header *) &ecm_header_desc,
+ (struct usb_descriptor_header *) &ecm_union_desc,
+ (struct usb_descriptor_header *) &ecm_desc,
+
+ /* NOTE: status endpoint might need to be removed */
+ (struct usb_descriptor_header *) &ss_ecm_notify_desc,
+ (struct usb_descriptor_header *) &ss_ecm_intr_comp_desc,
+
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ecm_data_nop_intf,
+ (struct usb_descriptor_header *) &ecm_data_intf,
+ (struct usb_descriptor_header *) &ss_ecm_in_desc,
+ (struct usb_descriptor_header *) &ss_ecm_bulk_comp_desc,
+ (struct usb_descriptor_header *) &ss_ecm_out_desc,
+ (struct usb_descriptor_header *) &ss_ecm_bulk_comp_desc,
+ NULL,
+};
+
/* string descriptors: */
static struct usb_string ecm_string_defs[] = {
@@ -464,13 +532,13 @@ static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (ecm->notify->driver_data) {
VDBG(cdev, "reset ecm control %d\n", intf);
usb_ep_disable(ecm->notify);
- } else {
+ }
+ if (!(ecm->notify->desc)) {
VDBG(cdev, "init ecm ctrl %d\n", intf);
- ecm->notify_desc = ep_choose(cdev->gadget,
- ecm->hs.notify,
- ecm->fs.notify);
+ if (config_ep_by_speed(cdev->gadget, f, ecm->notify))
+ goto fail;
}
- usb_ep_enable(ecm->notify, ecm->notify_desc);
+ usb_ep_enable(ecm->notify);
ecm->notify->driver_data = ecm;
/* Data interface has two altsettings, 0 and 1 */
@@ -483,12 +551,17 @@ static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
gether_disconnect(&ecm->port);
}
- if (!ecm->port.in) {
+ if (!ecm->port.in_ep->desc ||
+ !ecm->port.out_ep->desc) {
DBG(cdev, "init ecm\n");
- ecm->port.in = ep_choose(cdev->gadget,
- ecm->hs.in, ecm->fs.in);
- ecm->port.out = ep_choose(cdev->gadget,
- ecm->hs.out, ecm->fs.out);
+ if (config_ep_by_speed(cdev->gadget, f,
+ ecm->port.in_ep) ||
+ config_ep_by_speed(cdev->gadget, f,
+ ecm->port.out_ep)) {
+ ecm->port.in_ep->desc = NULL;
+ ecm->port.out_ep->desc = NULL;
+ goto fail;
+ }
}
/* CDC Ethernet only sends data in non-default altsettings.
@@ -549,7 +622,7 @@ static void ecm_disable(struct usb_function *f)
if (ecm->notify->driver_data) {
usb_ep_disable(ecm->notify);
ecm->notify->driver_data = NULL;
- ecm->notify_desc = NULL;
+ ecm->notify->desc = NULL;
}
}
@@ -665,13 +738,6 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
if (!f->descriptors)
goto fail;
- ecm->fs.in = usb_find_endpoint(ecm_fs_function,
- f->descriptors, &fs_ecm_in_desc);
- ecm->fs.out = usb_find_endpoint(ecm_fs_function,
- f->descriptors, &fs_ecm_out_desc);
- ecm->fs.notify = usb_find_endpoint(ecm_fs_function,
- f->descriptors, &fs_ecm_notify_desc);
-
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
@@ -688,13 +754,20 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
f->hs_descriptors = usb_copy_descriptors(ecm_hs_function);
if (!f->hs_descriptors)
goto fail;
+ }
- ecm->hs.in = usb_find_endpoint(ecm_hs_function,
- f->hs_descriptors, &hs_ecm_in_desc);
- ecm->hs.out = usb_find_endpoint(ecm_hs_function,
- f->hs_descriptors, &hs_ecm_out_desc);
- ecm->hs.notify = usb_find_endpoint(ecm_hs_function,
- f->hs_descriptors, &hs_ecm_notify_desc);
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ss_ecm_in_desc.bEndpointAddress =
+ fs_ecm_in_desc.bEndpointAddress;
+ ss_ecm_out_desc.bEndpointAddress =
+ fs_ecm_out_desc.bEndpointAddress;
+ ss_ecm_notify_desc.bEndpointAddress =
+ fs_ecm_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(ecm_ss_function);
+ if (!f->ss_descriptors)
+ goto fail;
}
/* NOTE: all that is done without knowing or caring about
@@ -706,6 +779,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
ecm->port.close = ecm_close;
DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
ecm->port.in_ep->name, ecm->port.out_ep->name,
ecm->notify->name);
@@ -714,6 +788,8 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
fail:
if (f->descriptors)
usb_free_descriptors(f->descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
if (ecm->notify_req) {
kfree(ecm->notify_req->buf);
@@ -723,9 +799,9 @@ fail:
/* we might as well release our claims on endpoints */
if (ecm->notify)
ecm->notify->driver_data = NULL;
- if (ecm->port.out)
+ if (ecm->port.out_ep->desc)
ecm->port.out_ep->driver_data = NULL;
- if (ecm->port.in)
+ if (ecm->port.in_ep->desc)
ecm->port.in_ep->driver_data = NULL;
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
@@ -740,6 +816,8 @@ ecm_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(c->cdev, "ecm unbind\n");
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index b3c30429015..046c6d0e696 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -35,17 +35,9 @@
* Ethernet link.
*/
-struct eem_ep_descs {
- struct usb_endpoint_descriptor *in;
- struct usb_endpoint_descriptor *out;
-};
-
struct f_eem {
struct gether port;
u8 ctrl_id;
-
- struct eem_ep_descs fs;
- struct eem_ep_descs hs;
};
static inline struct f_eem *func_to_eem(struct usb_function *f)
@@ -123,6 +115,45 @@ static struct usb_descriptor_header *eem_hs_function[] __initdata = {
NULL,
};
+/* super speed support: */
+
+static struct usb_endpoint_descriptor eem_ss_in_desc __initdata = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor eem_ss_out_desc __initdata = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor eem_ss_bulk_comp_desc __initdata = {
+ .bLength = sizeof eem_ss_bulk_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *eem_ss_function[] __initdata = {
+ /* CDC EEM control descriptors */
+ (struct usb_descriptor_header *) &eem_intf,
+ (struct usb_descriptor_header *) &eem_ss_in_desc,
+ (struct usb_descriptor_header *) &eem_ss_bulk_comp_desc,
+ (struct usb_descriptor_header *) &eem_ss_out_desc,
+ (struct usb_descriptor_header *) &eem_ss_bulk_comp_desc,
+ NULL,
+};
+
/* string descriptors: */
static struct usb_string eem_string_defs[] = {
@@ -176,12 +207,16 @@ static int eem_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
gether_disconnect(&eem->port);
}
- if (!eem->port.in) {
+ if (!eem->port.in_ep->desc || !eem->port.out_ep->desc) {
DBG(cdev, "init eem\n");
- eem->port.in = ep_choose(cdev->gadget,
- eem->hs.in, eem->fs.in);
- eem->port.out = ep_choose(cdev->gadget,
- eem->hs.out, eem->fs.out);
+ if (config_ep_by_speed(cdev->gadget, f,
+ eem->port.in_ep) ||
+ config_ep_by_speed(cdev->gadget, f,
+ eem->port.out_ep)) {
+ eem->port.in_ep->desc = NULL;
+ eem->port.out_ep->desc = NULL;
+ goto fail;
+ }
}
/* zlps should not occur because zero-length EEM packets
@@ -253,11 +288,6 @@ eem_bind(struct usb_configuration *c, struct usb_function *f)
if (!f->descriptors)
goto fail;
- eem->fs.in = usb_find_endpoint(eem_fs_function,
- f->descriptors, &eem_fs_in_desc);
- eem->fs.out = usb_find_endpoint(eem_fs_function,
- f->descriptors, &eem_fs_out_desc);
-
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
@@ -272,14 +302,22 @@ eem_bind(struct usb_configuration *c, struct usb_function *f)
f->hs_descriptors = usb_copy_descriptors(eem_hs_function);
if (!f->hs_descriptors)
goto fail;
+ }
+
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ eem_ss_in_desc.bEndpointAddress =
+ eem_fs_in_desc.bEndpointAddress;
+ eem_ss_out_desc.bEndpointAddress =
+ eem_fs_out_desc.bEndpointAddress;
- eem->hs.in = usb_find_endpoint(eem_hs_function,
- f->hs_descriptors, &eem_hs_in_desc);
- eem->hs.out = usb_find_endpoint(eem_hs_function,
- f->hs_descriptors, &eem_hs_out_desc);
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(eem_ss_function);
+ if (!f->ss_descriptors)
+ goto fail;
}
DBG(cdev, "CDC Ethernet (EEM): %s speed IN/%s OUT/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
eem->port.in_ep->name, eem->port.out_ep->name);
return 0;
@@ -287,11 +325,13 @@ eem_bind(struct usb_configuration *c, struct usb_function *f)
fail:
if (f->descriptors)
usb_free_descriptors(f->descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
/* we might as well release our claims on endpoints */
- if (eem->port.out)
+ if (eem->port.out_ep->desc)
eem->port.out_ep->driver_data = NULL;
- if (eem->port.in)
+ if (eem->port.in_ep->desc)
eem->port.in_ep->driver_data = NULL;
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
@@ -306,6 +346,8 @@ eem_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(c->cdev, "eem unbind\n");
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 19fffccc370..c161a9aaeb7 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1544,7 +1544,8 @@ static int ffs_func_eps_enable(struct ffs_function *func)
ds = ep->descs[ep->descs[1] ? 1 : 0];
ep->ep->driver_data = ep;
- ret = usb_ep_enable(ep->ep, ds);
+ ep->ep->desc = ds;
+ ret = usb_ep_enable(ep->ep);
if (likely(!ret)) {
epfile->ep = ep;
epfile->in = usb_endpoint_dir_in(ds);
diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c
index 598e7e2ab80..83a266bdb40 100644
--- a/drivers/usb/gadget/f_hid.c
+++ b/drivers/usb/gadget/f_hid.c
@@ -59,8 +59,6 @@ struct f_hidg {
struct cdev cdev;
struct usb_function func;
struct usb_ep *in_ep;
- struct usb_endpoint_descriptor *fs_in_ep_desc;
- struct usb_endpoint_descriptor *hs_in_ep_desc;
};
static inline struct f_hidg *func_to_hidg(struct usb_function *f)
@@ -369,6 +367,13 @@ static int hidg_setup(struct usb_function *f,
case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8
| USB_REQ_GET_DESCRIPTOR):
switch (value >> 8) {
+ case HID_DT_HID:
+ VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
+ length = min_t(unsigned short, length,
+ hidg_desc.bLength);
+ memcpy(req->buf, &hidg_desc, length);
+ goto respond;
+ break;
case HID_DT_REPORT:
VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
length = min_t(unsigned short, length,
@@ -416,7 +421,6 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct usb_composite_dev *cdev = f->config->cdev;
struct f_hidg *hidg = func_to_hidg(f);
- const struct usb_endpoint_descriptor *ep_desc;
int status = 0;
VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt);
@@ -426,9 +430,13 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (hidg->in_ep->driver_data != NULL)
usb_ep_disable(hidg->in_ep);
- ep_desc = ep_choose(f->config->cdev->gadget,
- hidg->hs_in_ep_desc, hidg->fs_in_ep_desc);
- status = usb_ep_enable(hidg->in_ep, ep_desc);
+ status = config_ep_by_speed(f->config->cdev->gadget, f,
+ hidg->in_ep);
+ if (status) {
+ ERROR(cdev, "config_ep_by_speed FAILED!\n");
+ goto fail;
+ }
+ status = usb_ep_enable(hidg->in_ep);
if (status < 0) {
ERROR(cdev, "Enable endpoint FAILED!\n");
goto fail;
@@ -498,21 +506,12 @@ static int __init hidg_bind(struct usb_configuration *c, struct usb_function *f)
if (!f->descriptors)
goto fail;
- hidg->fs_in_ep_desc = usb_find_endpoint(hidg_fs_descriptors,
- f->descriptors,
- &hidg_fs_in_ep_desc);
-
if (gadget_is_dualspeed(c->cdev->gadget)) {
hidg_hs_in_ep_desc.bEndpointAddress =
hidg_fs_in_ep_desc.bEndpointAddress;
f->hs_descriptors = usb_copy_descriptors(hidg_hs_descriptors);
if (!f->hs_descriptors)
goto fail;
- hidg->hs_in_ep_desc = usb_find_endpoint(hidg_hs_descriptors,
- f->hs_descriptors,
- &hidg_hs_in_ep_desc);
- } else {
- hidg->hs_in_ep_desc = NULL;
}
mutex_init(&hidg->lock);
diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c
index b37960f9e75..ca660d40b11 100644
--- a/drivers/usb/gadget/f_loopback.c
+++ b/drivers/usb/gadget/f_loopback.c
@@ -118,6 +118,49 @@ static struct usb_descriptor_header *hs_loopback_descs[] = {
NULL,
};
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_loop_source_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+struct usb_ss_ep_comp_descriptor ss_loop_source_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_endpoint_descriptor ss_loop_sink_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+struct usb_ss_ep_comp_descriptor ss_loop_sink_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *ss_loopback_descs[] = {
+ (struct usb_descriptor_header *) &loopback_intf,
+ (struct usb_descriptor_header *) &ss_loop_source_desc,
+ (struct usb_descriptor_header *) &ss_loop_source_comp_desc,
+ (struct usb_descriptor_header *) &ss_loop_sink_desc,
+ (struct usb_descriptor_header *) &ss_loop_sink_comp_desc,
+ NULL,
+};
+
/* function-specific strings: */
static struct usb_string strings_loopback[] = {
@@ -175,8 +218,18 @@ autoconf_fail:
f->hs_descriptors = hs_loopback_descs;
}
+ /* support super speed hardware */
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ss_loop_source_desc.bEndpointAddress =
+ fs_loop_source_desc.bEndpointAddress;
+ ss_loop_sink_desc.bEndpointAddress =
+ fs_loop_sink_desc.bEndpointAddress;
+ f->ss_descriptors = ss_loopback_descs;
+ }
+
DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ (gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
f->name, loop->in_ep->name, loop->out_ep->name);
return 0;
}
@@ -250,26 +303,27 @@ static int
enable_loopback(struct usb_composite_dev *cdev, struct f_loopback *loop)
{
int result = 0;
- const struct usb_endpoint_descriptor *src, *sink;
struct usb_ep *ep;
struct usb_request *req;
unsigned i;
- src = ep_choose(cdev->gadget,
- &hs_loop_source_desc, &fs_loop_source_desc);
- sink = ep_choose(cdev->gadget,
- &hs_loop_sink_desc, &fs_loop_sink_desc);
-
/* one endpoint writes data back IN to the host */
ep = loop->in_ep;
- result = usb_ep_enable(ep, src);
+ result = config_ep_by_speed(cdev->gadget, &(loop->function), ep);
+ if (result)
+ return result;
+ result = usb_ep_enable(ep);
if (result < 0)
return result;
ep->driver_data = loop;
/* one endpoint just reads OUT packets */
ep = loop->out_ep;
- result = usb_ep_enable(ep, sink);
+ result = config_ep_by_speed(cdev->gadget, &(loop->function), ep);
+ if (result)
+ goto fail0;
+
+ result = usb_ep_enable(ep);
if (result < 0) {
fail0:
ep = loop->in_ep;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index efb58f9f5aa..5b933958200 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2324,18 +2324,6 @@ static int get_next_command(struct fsg_common *common)
/*-------------------------------------------------------------------------*/
-static int enable_endpoint(struct fsg_common *common, struct usb_ep *ep,
- const struct usb_endpoint_descriptor *d)
-{
- int rc;
-
- ep->driver_data = common;
- rc = usb_ep_enable(ep, d);
- if (rc)
- ERROR(common, "can't enable %s, result %d\n", ep->name, rc);
- return rc;
-}
-
static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
struct usb_request **preq)
{
@@ -2349,7 +2337,6 @@ static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
/* Reset interface setting and re-init endpoint state (toggle etc). */
static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
{
- const struct usb_endpoint_descriptor *d;
struct fsg_dev *fsg;
int i, rc = 0;
@@ -2396,20 +2383,26 @@ reset:
fsg = common->fsg;
/* Enable the endpoints */
- d = fsg_ep_desc(common->gadget,
- &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
- rc = enable_endpoint(common, fsg->bulk_in, d);
+ rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
+ if (rc)
+ goto reset;
+ rc = usb_ep_enable(fsg->bulk_in);
if (rc)
goto reset;
+ fsg->bulk_in->driver_data = common;
fsg->bulk_in_enabled = 1;
- d = fsg_ep_desc(common->gadget,
- &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
- rc = enable_endpoint(common, fsg->bulk_out, d);
+ rc = config_ep_by_speed(common->gadget, &(fsg->function),
+ fsg->bulk_out);
+ if (rc)
+ goto reset;
+ rc = usb_ep_enable(fsg->bulk_out);
if (rc)
goto reset;
+ fsg->bulk_out->driver_data = common;
fsg->bulk_out_enabled = 1;
- common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
+ common->bulk_out_maxpacket =
+ le16_to_cpu(fsg->bulk_out->desc->wMaxPacketSize);
clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
/* Allocate the requests */
diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
index 86902a60bcd..ae69ed7e6b9 100644
--- a/drivers/usb/gadget/f_ncm.c
+++ b/drivers/usb/gadget/f_ncm.c
@@ -48,12 +48,6 @@
#define NCM_NDP_HDR_CRC 0x01000000
#define NCM_NDP_HDR_NOCRC 0x00000000
-struct ncm_ep_descs {
- struct usb_endpoint_descriptor *in;
- struct usb_endpoint_descriptor *out;
- struct usb_endpoint_descriptor *notify;
-};
-
enum ncm_notify_state {
NCM_NOTIFY_NONE, /* don't notify */
NCM_NOTIFY_CONNECT, /* issue CONNECT next */
@@ -66,11 +60,7 @@ struct f_ncm {
char ethaddr[14];
- struct ncm_ep_descs fs;
- struct ncm_ep_descs hs;
-
struct usb_ep *notify;
- struct usb_endpoint_descriptor *notify_desc;
struct usb_request *notify_req;
u8 notify_state;
bool is_open;
@@ -802,13 +792,14 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (ncm->notify->driver_data) {
DBG(cdev, "reset ncm control %d\n", intf);
usb_ep_disable(ncm->notify);
- } else {
+ }
+
+ if (!(ncm->notify->desc)) {
DBG(cdev, "init ncm ctrl %d\n", intf);
- ncm->notify_desc = ep_choose(cdev->gadget,
- ncm->hs.notify,
- ncm->fs.notify);
+ if (config_ep_by_speed(cdev->gadget, f, ncm->notify))
+ goto fail;
}
- usb_ep_enable(ncm->notify, ncm->notify_desc);
+ usb_ep_enable(ncm->notify);
ncm->notify->driver_data = ncm;
/* Data interface has two altsettings, 0 and 1 */
@@ -829,14 +820,17 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (alt == 1) {
struct net_device *net;
- if (!ncm->port.in) {
+ if (!ncm->port.in_ep->desc ||
+ !ncm->port.out_ep->desc) {
DBG(cdev, "init ncm\n");
- ncm->port.in = ep_choose(cdev->gadget,
- ncm->hs.in,
- ncm->fs.in);
- ncm->port.out = ep_choose(cdev->gadget,
- ncm->hs.out,
- ncm->fs.out);
+ if (config_ep_by_speed(cdev->gadget, f,
+ ncm->port.in_ep) ||
+ config_ep_by_speed(cdev->gadget, f,
+ ncm->port.out_ep)) {
+ ncm->port.in_ep->desc = NULL;
+ ncm->port.out_ep->desc = NULL;
+ goto fail;
+ }
}
/* TODO */
@@ -1111,7 +1105,7 @@ static void ncm_disable(struct usb_function *f)
if (ncm->notify->driver_data) {
usb_ep_disable(ncm->notify);
ncm->notify->driver_data = NULL;
- ncm->notify_desc = NULL;
+ ncm->notify->desc = NULL;
}
}
@@ -1228,13 +1222,6 @@ ncm_bind(struct usb_configuration *c, struct usb_function *f)
if (!f->descriptors)
goto fail;
- ncm->fs.in = usb_find_endpoint(ncm_fs_function,
- f->descriptors, &fs_ncm_in_desc);
- ncm->fs.out = usb_find_endpoint(ncm_fs_function,
- f->descriptors, &fs_ncm_out_desc);
- ncm->fs.notify = usb_find_endpoint(ncm_fs_function,
- f->descriptors, &fs_ncm_notify_desc);
-
/*
* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
@@ -1252,13 +1239,6 @@ ncm_bind(struct usb_configuration *c, struct usb_function *f)
f->hs_descriptors = usb_copy_descriptors(ncm_hs_function);
if (!f->hs_descriptors)
goto fail;
-
- ncm->hs.in = usb_find_endpoint(ncm_hs_function,
- f->hs_descriptors, &hs_ncm_in_desc);
- ncm->hs.out = usb_find_endpoint(ncm_hs_function,
- f->hs_descriptors, &hs_ncm_out_desc);
- ncm->hs.notify = usb_find_endpoint(ncm_hs_function,
- f->hs_descriptors, &hs_ncm_notify_desc);
}
/*
@@ -1288,9 +1268,9 @@ fail:
/* we might as well release our claims on endpoints */
if (ncm->notify)
ncm->notify->driver_data = NULL;
- if (ncm->port.out)
+ if (ncm->port.out_ep->desc)
ncm->port.out_ep->driver_data = NULL;
- if (ncm->port.in)
+ if (ncm->port.in_ep->desc)
ncm->port.in_ep->driver_data = NULL;
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
diff --git a/drivers/usb/gadget/f_obex.c b/drivers/usb/gadget/f_obex.c
index 8f8c6437147..394502abeb9 100644
--- a/drivers/usb/gadget/f_obex.c
+++ b/drivers/usb/gadget/f_obex.c
@@ -39,20 +39,12 @@
* ready to handle the commands.
*/
-struct obex_ep_descs {
- struct usb_endpoint_descriptor *obex_in;
- struct usb_endpoint_descriptor *obex_out;
-};
-
struct f_obex {
struct gserial port;
u8 ctrl_id;
u8 data_id;
u8 port_num;
u8 can_activate;
-
- struct obex_ep_descs fs;
- struct obex_ep_descs hs;
};
static inline struct f_obex *func_to_obex(struct usb_function *f)
@@ -227,12 +219,16 @@ static int obex_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
gserial_disconnect(&obex->port);
}
- if (!obex->port.in_desc) {
+ if (!obex->port.in->desc || !obex->port.out->desc) {
DBG(cdev, "init obex ttyGS%d\n", obex->port_num);
- obex->port.in_desc = ep_choose(cdev->gadget,
- obex->hs.obex_in, obex->fs.obex_in);
- obex->port.out_desc = ep_choose(cdev->gadget,
- obex->hs.obex_out, obex->fs.obex_out);
+ if (config_ep_by_speed(cdev->gadget, f,
+ obex->port.in) ||
+ config_ep_by_speed(cdev->gadget, f,
+ obex->port.out)) {
+ obex->port.out->desc = NULL;
+ obex->port.in->desc = NULL;
+ goto fail;
+ }
}
if (alt == 1) {
@@ -346,11 +342,6 @@ obex_bind(struct usb_configuration *c, struct usb_function *f)
/* copy descriptors, and track endpoint copies */
f->descriptors = usb_copy_descriptors(fs_function);
- obex->fs.obex_in = usb_find_endpoint(fs_function,
- f->descriptors, &obex_fs_ep_in_desc);
- obex->fs.obex_out = usb_find_endpoint(fs_function,
- f->descriptors, &obex_fs_ep_out_desc);
-
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
@@ -364,11 +355,6 @@ obex_bind(struct usb_configuration *c, struct usb_function *f)
/* copy descriptors, and track endpoint copies */
f->hs_descriptors = usb_copy_descriptors(hs_function);
-
- obex->hs.obex_in = usb_find_endpoint(hs_function,
- f->hs_descriptors, &obex_hs_ep_in_desc);
- obex->hs.obex_out = usb_find_endpoint(hs_function,
- f->hs_descriptors, &obex_hs_ep_out_desc);
}
/* Avoid letting this gadget enumerate until the userspace
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 5e1495097ec..8f8d3f6cd89 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -20,6 +20,7 @@
* 02110-1301 USA
*/
+#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -427,17 +428,16 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
spin_lock(&port->lock);
__pn_reset(f);
if (alt == 1) {
- struct usb_endpoint_descriptor *out, *in;
int i;
- out = ep_choose(gadget,
- &pn_hs_sink_desc,
- &pn_fs_sink_desc);
- in = ep_choose(gadget,
- &pn_hs_source_desc,
- &pn_fs_source_desc);
- usb_ep_enable(fp->out_ep, out);
- usb_ep_enable(fp->in_ep, in);
+ if (config_ep_by_speed(gadget, f, fp->in_ep) ||
+ config_ep_by_speed(gadget, f, fp->out_ep)) {
+ fp->in_ep->desc = NULL;
+ fp->out_ep->desc = NULL;
+ return -EINVAL;
+ }
+ usb_ep_enable(fp->out_ep);
+ usb_ep_enable(fp->in_ep);
port->usb = fp;
fp->out_ep->driver_data = fp;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index fa12ec8364e..3ea4666be3d 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -29,7 +29,7 @@
#include <linux/device.h>
#include <linux/etherdevice.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "u_ether.h"
#include "rndis.h"
@@ -76,23 +76,13 @@
* - MS-Windows drivers sometimes emit undocumented requests.
*/
-struct rndis_ep_descs {
- struct usb_endpoint_descriptor *in;
- struct usb_endpoint_descriptor *out;
- struct usb_endpoint_descriptor *notify;
-};
-
struct f_rndis {
struct gether port;
u8 ctrl_id, data_id;
u8 ethaddr[ETH_ALEN];
int config;
- struct rndis_ep_descs fs;
- struct rndis_ep_descs hs;
-
struct usb_ep *notify;
- struct usb_endpoint_descriptor *notify_desc;
struct usb_request *notify_req;
atomic_t notify_count;
};
@@ -105,10 +95,12 @@ static inline struct f_rndis *func_to_rndis(struct usb_function *f)
/* peak (theoretical) bulk transfer rate in bits-per-second */
static unsigned int bitrate(struct usb_gadget *g)
{
- if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+ if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+ return 13 * 1024 * 8 * 1000 * 8;
+ else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
return 13 * 512 * 8 * 1000 * 8;
else
- return 19 * 64 * 1 * 1000 * 8;
+ return 19 * 64 * 1 * 1000 * 8;
}
/*-------------------------------------------------------------------------*/
@@ -226,6 +218,7 @@ static struct usb_endpoint_descriptor fs_out_desc = {
static struct usb_descriptor_header *eth_fs_function[] = {
(struct usb_descriptor_header *) &rndis_iad_descriptor,
+
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_control_intf,
(struct usb_descriptor_header *) &header_desc,
@@ -233,6 +226,7 @@ static struct usb_descriptor_header *eth_fs_function[] = {
(struct usb_descriptor_header *) &rndis_acm_descriptor,
(struct usb_descriptor_header *) &rndis_union_desc,
(struct usb_descriptor_header *) &fs_notify_desc,
+
/* data interface has no altsetting */
(struct usb_descriptor_header *) &rndis_data_intf,
(struct usb_descriptor_header *) &fs_in_desc,
@@ -251,6 +245,7 @@ static struct usb_endpoint_descriptor hs_notify_desc = {
.wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT),
.bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
};
+
static struct usb_endpoint_descriptor hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
@@ -271,6 +266,7 @@ static struct usb_endpoint_descriptor hs_out_desc = {
static struct usb_descriptor_header *eth_hs_function[] = {
(struct usb_descriptor_header *) &rndis_iad_descriptor,
+
/* control interface matches ACM, not Ethernet */
(struct usb_descriptor_header *) &rndis_control_intf,
(struct usb_descriptor_header *) &header_desc,
@@ -278,6 +274,7 @@ static struct usb_descriptor_header *eth_hs_function[] = {
(struct usb_descriptor_header *) &rndis_acm_descriptor,
(struct usb_descriptor_header *) &rndis_union_desc,
(struct usb_descriptor_header *) &hs_notify_desc,
+
/* data interface has no altsetting */
(struct usb_descriptor_header *) &rndis_data_intf,
(struct usb_descriptor_header *) &hs_in_desc,
@@ -285,6 +282,76 @@ static struct usb_descriptor_header *eth_hs_function[] = {
NULL,
};
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT),
+ .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = {
+ .bLength = sizeof ss_intr_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+ .bLength = sizeof ss_bulk_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *eth_ss_function[] = {
+ (struct usb_descriptor_header *) &rndis_iad_descriptor,
+
+ /* control interface matches ACM, not Ethernet */
+ (struct usb_descriptor_header *) &rndis_control_intf,
+ (struct usb_descriptor_header *) &header_desc,
+ (struct usb_descriptor_header *) &call_mgmt_descriptor,
+ (struct usb_descriptor_header *) &rndis_acm_descriptor,
+ (struct usb_descriptor_header *) &rndis_union_desc,
+ (struct usb_descriptor_header *) &ss_notify_desc,
+ (struct usb_descriptor_header *) &ss_intr_comp_desc,
+
+ /* data interface has no altsetting */
+ (struct usb_descriptor_header *) &rndis_data_intf,
+ (struct usb_descriptor_header *) &ss_in_desc,
+ (struct usb_descriptor_header *) &ss_bulk_comp_desc,
+ (struct usb_descriptor_header *) &ss_out_desc,
+ (struct usb_descriptor_header *) &ss_bulk_comp_desc,
+ NULL,
+};
+
/* string descriptors: */
static struct usb_string rndis_string_defs[] = {
@@ -484,13 +551,13 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (rndis->notify->driver_data) {
VDBG(cdev, "reset rndis control %d\n", intf);
usb_ep_disable(rndis->notify);
- } else {
+ }
+ if (!rndis->notify->desc) {
VDBG(cdev, "init rndis ctrl %d\n", intf);
- rndis->notify_desc = ep_choose(cdev->gadget,
- rndis->hs.notify,
- rndis->fs.notify);
+ if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+ goto fail;
}
- usb_ep_enable(rndis->notify, rndis->notify_desc);
+ usb_ep_enable(rndis->notify);
rndis->notify->driver_data = rndis;
} else if (intf == rndis->data_id) {
@@ -501,12 +568,16 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
gether_disconnect(&rndis->port);
}
- if (!rndis->port.in) {
+ if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) {
DBG(cdev, "init rndis\n");
- rndis->port.in = ep_choose(cdev->gadget,
- rndis->hs.in, rndis->fs.in);
- rndis->port.out = ep_choose(cdev->gadget,
- rndis->hs.out, rndis->fs.out);
+ if (config_ep_by_speed(cdev->gadget, f,
+ rndis->port.in_ep) ||
+ config_ep_by_speed(cdev->gadget, f,
+ rndis->port.out_ep)) {
+ rndis->port.in_ep->desc = NULL;
+ rndis->port.out_ep->desc = NULL;
+ goto fail;
+ }
}
/* Avoid ZLPs; they can be troublesome. */
@@ -662,13 +733,6 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
if (!f->descriptors)
goto fail;
- rndis->fs.in = usb_find_endpoint(eth_fs_function,
- f->descriptors, &fs_in_desc);
- rndis->fs.out = usb_find_endpoint(eth_fs_function,
- f->descriptors, &fs_out_desc);
- rndis->fs.notify = usb_find_endpoint(eth_fs_function,
- f->descriptors, &fs_notify_desc);
-
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
@@ -683,16 +747,22 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
/* copy descriptors, and track endpoint copies */
f->hs_descriptors = usb_copy_descriptors(eth_hs_function);
-
if (!f->hs_descriptors)
goto fail;
+ }
- rndis->hs.in = usb_find_endpoint(eth_hs_function,
- f->hs_descriptors, &hs_in_desc);
- rndis->hs.out = usb_find_endpoint(eth_hs_function,
- f->hs_descriptors, &hs_out_desc);
- rndis->hs.notify = usb_find_endpoint(eth_hs_function,
- f->hs_descriptors, &hs_notify_desc);
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ss_in_desc.bEndpointAddress =
+ fs_in_desc.bEndpointAddress;
+ ss_out_desc.bEndpointAddress =
+ fs_out_desc.bEndpointAddress;
+ ss_notify_desc.bEndpointAddress =
+ fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(eth_ss_function);
+ if (!f->ss_descriptors)
+ goto fail;
}
rndis->port.open = rndis_open;
@@ -719,12 +789,15 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
*/
DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
rndis->port.in_ep->name, rndis->port.out_ep->name,
rndis->notify->name);
return 0;
fail:
+ if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors)
+ usb_free_descriptors(f->ss_descriptors);
if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors)
usb_free_descriptors(f->hs_descriptors);
if (f->descriptors)
@@ -738,9 +811,9 @@ fail:
/* we might as well release our claims on endpoints */
if (rndis->notify)
rndis->notify->driver_data = NULL;
- if (rndis->port.out)
+ if (rndis->port.out_ep->desc)
rndis->port.out_ep->driver_data = NULL;
- if (rndis->port.in)
+ if (rndis->port.in_ep->desc)
rndis->port.in_ep->driver_data = NULL;
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
@@ -756,6 +829,8 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f)
rndis_deregister(rndis->config);
rndis_exit();
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 490b00b01a7..91fdf790ed2 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -27,18 +27,10 @@
* if you can arrange appropriate host side drivers.
*/
-struct gser_descs {
- struct usb_endpoint_descriptor *in;
- struct usb_endpoint_descriptor *out;
-};
-
struct f_gser {
struct gserial port;
u8 data_id;
u8 port_num;
-
- struct gser_descs fs;
- struct gser_descs hs;
};
static inline struct f_gser *func_to_gser(struct usb_function *f)
@@ -136,12 +128,15 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (gser->port.in->driver_data) {
DBG(cdev, "reset generic ttyGS%d\n", gser->port_num);
gserial_disconnect(&gser->port);
- } else {
+ }
+ if (!gser->port.in->desc || !gser->port.out->desc) {
DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
- gser->port.in_desc = ep_choose(cdev->gadget,
- gser->hs.in, gser->fs.in);
- gser->port.out_desc = ep_choose(cdev->gadget,
- gser->hs.out, gser->fs.out);
+ if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+ !config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+ gser->port.in->desc = NULL;
+ gser->port.out->desc = NULL;
+ return -EINVAL;
+ }
}
gserial_connect(&gser->port, gser->port_num);
return 0;
@@ -193,12 +188,6 @@ gser_bind(struct usb_configuration *c, struct usb_function *f)
/* copy descriptors, and track endpoint copies */
f->descriptors = usb_copy_descriptors(gser_fs_function);
- gser->fs.in = usb_find_endpoint(gser_fs_function,
- f->descriptors, &gser_fs_in_desc);
- gser->fs.out = usb_find_endpoint(gser_fs_function,
- f->descriptors, &gser_fs_out_desc);
-
-
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
@@ -211,11 +200,6 @@ gser_bind(struct usb_configuration *c, struct usb_function *f)
/* copy descriptors, and track endpoint copies */
f->hs_descriptors = usb_copy_descriptors(gser_hs_function);
-
- gser->hs.in = usb_find_endpoint(gser_hs_function,
- f->hs_descriptors, &gser_hs_in_desc);
- gser->hs.out = usb_find_endpoint(gser_hs_function,
- f->hs_descriptors, &gser_hs_out_desc);
}
DBG(cdev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
diff --git a/drivers/usb/gadget/f_sourcesink.c b/drivers/usb/gadget/f_sourcesink.c
index e403a534dd5..e18b4f52095 100644
--- a/drivers/usb/gadget/f_sourcesink.c
+++ b/drivers/usb/gadget/f_sourcesink.c
@@ -131,6 +131,49 @@ static struct usb_descriptor_header *hs_source_sink_descs[] = {
NULL,
};
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_source_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+struct usb_ss_ep_comp_descriptor ss_source_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_endpoint_descriptor ss_sink_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+struct usb_ss_ep_comp_descriptor ss_sink_comp_desc = {
+ .bLength = USB_DT_SS_EP_COMP_SIZE,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = 0,
+};
+
+static struct usb_descriptor_header *ss_source_sink_descs[] = {
+ (struct usb_descriptor_header *) &source_sink_intf,
+ (struct usb_descriptor_header *) &ss_source_desc,
+ (struct usb_descriptor_header *) &ss_source_comp_desc,
+ (struct usb_descriptor_header *) &ss_sink_desc,
+ (struct usb_descriptor_header *) &ss_sink_comp_desc,
+ NULL,
+};
+
/* function-specific strings: */
static struct usb_string strings_sourcesink[] = {
@@ -187,8 +230,18 @@ autoconf_fail:
f->hs_descriptors = hs_source_sink_descs;
}
+ /* support super speed hardware */
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ss_source_desc.bEndpointAddress =
+ fs_source_desc.bEndpointAddress;
+ ss_sink_desc.bEndpointAddress =
+ fs_sink_desc.bEndpointAddress;
+ f->ss_descriptors = ss_source_sink_descs;
+ }
+
DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ (gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
f->name, ss->in_ep->name, ss->out_ep->name);
return 0;
}
@@ -343,15 +396,14 @@ static int
enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss)
{
int result = 0;
- const struct usb_endpoint_descriptor *src, *sink;
struct usb_ep *ep;
- src = ep_choose(cdev->gadget, &hs_source_desc, &fs_source_desc);
- sink = ep_choose(cdev->gadget, &hs_sink_desc, &fs_sink_desc);
-
/* one endpoint writes (sources) zeroes IN (to the host) */
ep = ss->in_ep;
- result = usb_ep_enable(ep, src);
+ result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
+ if (result)
+ return result;
+ result = usb_ep_enable(ep);
if (result < 0)
return result;
ep->driver_data = ss;
@@ -367,7 +419,10 @@ fail:
/* one endpoint reads (sinks) anything OUT (from the host) */
ep = ss->out_ep;
- result = usb_ep_enable(ep, sink);
+ result = config_ep_by_speed(cdev->gadget, &(ss->function), ep);
+ if (result)
+ goto fail;
+ result = usb_ep_enable(ep);
if (result < 0)
goto fail;
ep->driver_data = ss;
@@ -435,6 +490,8 @@ static int sourcesink_setup(struct usb_configuration *c,
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
+ req->length = USB_BUFSIZ;
+
/* composite driver infrastructure handles everything except
* the two control test requests.
*/
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index 8675ca41532..3dc53754ab6 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -57,18 +57,10 @@
* caring about specific product and vendor IDs.
*/
-struct geth_descs {
- struct usb_endpoint_descriptor *in;
- struct usb_endpoint_descriptor *out;
-};
-
struct f_gether {
struct gether port;
char ethaddr[14];
-
- struct geth_descs fs;
- struct geth_descs hs;
};
static inline struct f_gether *func_to_geth(struct usb_function *f)
@@ -209,6 +201,46 @@ static struct usb_descriptor_header *hs_eth_function[] __initdata = {
NULL,
};
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_subset_in_desc __initdata = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor ss_subset_out_desc __initdata = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc __initdata = {
+ .bLength = sizeof ss_subset_bulk_comp_desc,
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ss_eth_function[] __initdata = {
+ (struct usb_descriptor_header *) &subset_data_intf,
+ (struct usb_descriptor_header *) &mdlm_header_desc,
+ (struct usb_descriptor_header *) &mdlm_desc,
+ (struct usb_descriptor_header *) &mdlm_detail_desc,
+ (struct usb_descriptor_header *) &ether_desc,
+ (struct usb_descriptor_header *) &ss_subset_in_desc,
+ (struct usb_descriptor_header *) &ss_subset_bulk_comp_desc,
+ (struct usb_descriptor_header *) &ss_subset_out_desc,
+ (struct usb_descriptor_header *) &ss_subset_bulk_comp_desc,
+ NULL,
+};
+
/* string descriptors: */
static struct usb_string geth_string_defs[] = {
@@ -243,10 +275,12 @@ static int geth_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
DBG(cdev, "init + activate cdc subset\n");
- geth->port.in = ep_choose(cdev->gadget,
- geth->hs.in, geth->fs.in);
- geth->port.out = ep_choose(cdev->gadget,
- geth->hs.out, geth->fs.out);
+ if (config_ep_by_speed(cdev->gadget, f, geth->port.in_ep) ||
+ config_ep_by_speed(cdev->gadget, f, geth->port.out_ep)) {
+ geth->port.in_ep->desc = NULL;
+ geth->port.out_ep->desc = NULL;
+ return -EINVAL;
+ }
net = gether_connect(&geth->port);
return IS_ERR(net) ? PTR_ERR(net) : 0;
@@ -296,12 +330,8 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
/* copy descriptors, and track endpoint copies */
f->descriptors = usb_copy_descriptors(fs_eth_function);
-
- geth->fs.in = usb_find_endpoint(fs_eth_function,
- f->descriptors, &fs_subset_in_desc);
- geth->fs.out = usb_find_endpoint(fs_eth_function,
- f->descriptors, &fs_subset_out_desc);
-
+ if (!f->descriptors)
+ goto fail;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
@@ -315,11 +345,20 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
/* copy descriptors, and track endpoint copies */
f->hs_descriptors = usb_copy_descriptors(hs_eth_function);
+ if (!f->hs_descriptors)
+ goto fail;
+ }
- geth->hs.in = usb_find_endpoint(hs_eth_function,
- f->hs_descriptors, &hs_subset_in_desc);
- geth->hs.out = usb_find_endpoint(hs_eth_function,
- f->hs_descriptors, &hs_subset_out_desc);
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ss_subset_in_desc.bEndpointAddress =
+ fs_subset_in_desc.bEndpointAddress;
+ ss_subset_out_desc.bEndpointAddress =
+ fs_subset_out_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->ss_descriptors = usb_copy_descriptors(ss_eth_function);
+ if (!f->ss_descriptors)
+ goto fail;
}
/* NOTE: all that is done without knowing or caring about
@@ -328,15 +367,21 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
*/
DBG(cdev, "CDC Subset: %s speed IN/%s OUT/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
geth->port.in_ep->name, geth->port.out_ep->name);
return 0;
fail:
+ if (f->descriptors)
+ usb_free_descriptors(f->descriptors);
+ if (f->hs_descriptors)
+ usb_free_descriptors(f->hs_descriptors);
+
/* we might as well release our claims on endpoints */
- if (geth->port.out)
+ if (geth->port.out_ep->desc)
geth->port.out_ep->driver_data = NULL;
- if (geth->port.in)
+ if (geth->port.in_ep->desc)
geth->port.in_ep->driver_data = NULL;
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
@@ -347,6 +392,8 @@ fail:
static void
geth_unbind(struct usb_configuration *c, struct usb_function *f)
{
+ if (gadget_is_superspeed(c->cdev->gadget))
+ usb_free_descriptors(f->ss_descriptors);
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
diff --git a/drivers/usb/gadget/f_uvc.c b/drivers/usb/gadget/f_uvc.c
index be446b7e7ea..7a8b9aa4aea 100644
--- a/drivers/usb/gadget/f_uvc.c
+++ b/drivers/usb/gadget/f_uvc.c
@@ -262,8 +262,10 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
if (uvc->state != UVC_STATE_CONNECTED)
return 0;
- if (uvc->video.ep)
- usb_ep_enable(uvc->video.ep, &uvc_streaming_ep);
+ if (uvc->video.ep) {
+ uvc->video.ep->desc = &uvc_streaming_ep;
+ usb_ep_enable(uvc->video.ep);
+ }
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_STREAMON;
@@ -649,7 +651,7 @@ uvc_bind_config(struct usb_configuration *c,
if (ret)
kfree(uvc);
- return 0;
+ return ret;
error:
kfree(uvc);
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 0360f56221e..639e14a2fd1 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -929,6 +929,7 @@ static int standard_setup_req(struct fsg_dev *fsg,
case USB_DT_DEVICE:
VDBG(fsg, "get device descriptor\n");
+ device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
value = sizeof device_desc;
memcpy(req->buf, &device_desc, value);
break;
@@ -936,6 +937,11 @@ static int standard_setup_req(struct fsg_dev *fsg,
VDBG(fsg, "get device qualifier\n");
if (!gadget_is_dualspeed(fsg->gadget))
break;
+ /*
+ * Assume ep0 uses the same maxpacket value for both
+ * speeds
+ */
+ dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
value = sizeof dev_qualifier;
memcpy(req->buf, &dev_qualifier, value);
break;
@@ -2713,7 +2719,8 @@ static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
int rc;
ep->driver_data = fsg;
- rc = usb_ep_enable(ep, d);
+ ep->desc = d;
+ rc = usb_ep_enable(ep);
if (rc)
ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
return rc;
@@ -3416,7 +3423,6 @@ static int __init fsg_bind(struct usb_gadget *gadget)
}
/* Fix up the descriptors */
- device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
device_desc.idVendor = cpu_to_le16(mod_data.vendor);
device_desc.idProduct = cpu_to_le16(mod_data.product);
device_desc.bcdDevice = cpu_to_le16(mod_data.release);
@@ -3430,9 +3436,6 @@ static int __init fsg_bind(struct usb_gadget *gadget)
if (gadget_is_dualspeed(gadget)) {
fsg_hs_function[i + FSG_HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
- /* Assume ep0 uses the same maxpacket value for both speeds */
- dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
-
/* Assume endpoint addresses are the same for both speeds */
fsg_hs_bulk_in_desc.bEndpointAddress =
fsg_fs_bulk_in_desc.bEndpointAddress;
@@ -3486,6 +3489,8 @@ static int __init fsg_bind(struct usb_gadget *gadget)
}
INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+ INFO(fsg, "NOTE: This driver is deprecated. "
+ "Consider using g_mass_storage instead.\n");
INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 3a68e09309f..3bf872e1ad3 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1927,6 +1927,10 @@ static int qe_pullup(struct usb_gadget *gadget, int is_on)
return -ENOTSUPP;
}
+static int fsl_qe_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int fsl_qe_stop(struct usb_gadget_driver *driver);
+
/* defined in usb_gadget.h */
static struct usb_gadget_ops qe_gadget_ops = {
.get_frame = qe_get_frame,
@@ -1935,6 +1939,8 @@ static struct usb_gadget_ops qe_gadget_ops = {
.vbus_session = qe_vbus_session,
.vbus_draw = qe_vbus_draw,
.pullup = qe_pullup,
+ .start = fsl_qe_start,
+ .stop = fsl_qe_stop,
};
/*-------------------------------------------------------------------------
@@ -2320,7 +2326,7 @@ static irqreturn_t qe_udc_irq(int irq, void *_udc)
/*-------------------------------------------------------------------------
Gadget driver probe and unregister.
--------------------------------------------------------------------------*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int fsl_qe_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
int retval;
@@ -2369,9 +2375,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
udc_controller->gadget.name, driver->driver.name);
return 0;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int fsl_qe_stop(struct usb_gadget_driver *driver)
{
struct qe_ep *loop_ep;
unsigned long flags;
@@ -2411,7 +2416,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
driver->driver.name);
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
/* udc structure's alloc and setup, include ep-param alloc */
static struct qe_udc __devinit *qe_udc_config(struct platform_device *ofdev)
@@ -2662,11 +2666,17 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev)
if (ret)
goto err6;
+ ret = usb_add_gadget_udc(&ofdev->dev, &udc_controller->gadget);
+ if (ret)
+ goto err7;
+
dev_info(udc_controller->dev,
"%s USB controller initialized as device\n",
(udc_controller->soc_type == PORT_QE) ? "QE" : "CPM");
return 0;
+err7:
+ device_unregister(&udc_controller->gadget.dev);
err6:
free_irq(udc_controller->usb_irq, udc_controller);
err5:
@@ -2721,6 +2731,8 @@ static int __devexit qe_udc_remove(struct platform_device *ofdev)
if (!udc_controller)
return -ENODEV;
+ usb_del_gadget_udc(&udc_controller->gadget);
+
udc_controller->done = &done;
tasklet_disable(&udc_controller->rx_tasklet);
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 2cd9a60c7f3..de24a4233c2 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -46,7 +46,6 @@
#include <asm/system.h>
#include <asm/unaligned.h>
#include <asm/dma.h>
-#include <asm/cacheflush.h>
#include "fsl_usb2_udc.h"
@@ -118,6 +117,17 @@ static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
#define fsl_readl(p) (*_fsl_readl)((p))
#define fsl_writel(v, p) (*_fsl_writel)((v), (p))
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata)
+{
+ if (pdata->big_endian_mmio) {
+ _fsl_readl = _fsl_readl_be;
+ _fsl_writel = _fsl_writel_be;
+ } else {
+ _fsl_readl = _fsl_readl_le;
+ _fsl_writel = _fsl_writel_le;
+ }
+}
+
static inline u32 cpu_to_hc32(const u32 x)
{
return udc_controller->pdata->big_endian_desc
@@ -132,6 +142,8 @@ static inline u32 hc32_to_cpu(const u32 x)
: le32_to_cpu((__force __le32)x);
}
#else /* !CONFIG_PPC32 */
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata) {}
+
#define fsl_readl(addr) readl(addr)
#define fsl_writel(val32, addr) writel(val32, addr)
#define cpu_to_hc32(x) cpu_to_le32(x)
@@ -1232,6 +1244,9 @@ static int fsl_pullup(struct usb_gadget *gadget, int is_on)
return 0;
}
+static int fsl_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int fsl_stop(struct usb_gadget_driver *driver);
/* defined in gadget.h */
static struct usb_gadget_ops fsl_gadget_ops = {
.get_frame = fsl_get_frame,
@@ -1240,6 +1255,8 @@ static struct usb_gadget_ops fsl_gadget_ops = {
.vbus_session = fsl_vbus_session,
.vbus_draw = fsl_vbus_draw,
.pullup = fsl_pullup,
+ .start = fsl_start,
+ .stop = fsl_stop,
};
/* Set protocol stall on ep0, protocol stall will automatically be cleared
@@ -1277,6 +1294,11 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
req->req.complete = NULL;
req->dtd_count = 0;
+ req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+ req->req.buf, req->req.length,
+ ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 1;
+
if (fsl_req_to_dtd(req) == 0)
fsl_queue_td(ep, req);
else
@@ -1348,9 +1370,6 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
/* Fill in the reqest structure */
*((u16 *) req->req.buf) = cpu_to_le16(tmp);
- /* flush cache for the req buffer */
- flush_dcache_range((u32)req->req.buf, (u32)req->req.buf + 8);
-
req->ep = ep;
req->req.length = 2;
req->req.status = -EINPROGRESS;
@@ -1358,6 +1377,11 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
req->req.complete = NULL;
req->dtd_count = 0;
+ req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+ req->req.buf, req->req.length,
+ ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 1;
+
/* prime the data phase */
if ((fsl_req_to_dtd(req) == 0))
fsl_queue_td(ep, req);
@@ -1908,7 +1932,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
* Hook to gadget drivers
* Called by initialization code of gadget drivers
*----------------------------------------------------------------*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int fsl_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
int retval = -ENODEV;
@@ -1976,10 +2000,9 @@ out:
retval);
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
/* Disconnect from gadget driver */
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int fsl_stop(struct usb_gadget_driver *driver)
{
struct fsl_ep *loop_ep;
unsigned long flags;
@@ -2022,7 +2045,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
driver->driver.name);
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
/*-------------------------------------------------------------------------
PROC File System Support
@@ -2354,7 +2376,6 @@ static int __init struct_udc_setup(struct fsl_udc *udc,
struct fsl_req, req);
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
- udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
@@ -2445,7 +2466,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
}
if (pdata->operating_mode == FSL_USB2_DR_DEVICE) {
- if (!request_mem_region(res->start, res->end - res->start + 1,
+ if (!request_mem_region(res->start, resource_size(res),
driver_name)) {
ERR("request mem region for %s failed\n", pdev->name);
ret = -EBUSY;
@@ -2470,13 +2491,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
}
/* Set accessors only after pdata->init() ! */
- if (pdata->big_endian_mmio) {
- _fsl_readl = _fsl_readl_be;
- _fsl_writel = _fsl_writel_be;
- } else {
- _fsl_readl = _fsl_readl_le;
- _fsl_writel = _fsl_writel_le;
- }
+ fsl_set_accessors(pdata);
#ifndef CONFIG_ARCH_MXC
if (pdata->have_sysif_regs)
@@ -2578,9 +2593,16 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_unregister;
}
+
+ ret = usb_add_gadget_udc(&pdev->dev, &udc_controller->gadget);
+ if (ret)
+ goto err_del_udc;
+
create_proc_file();
return 0;
+err_del_udc:
+ dma_pool_destroy(udc_controller->td_pool);
err_unregister:
device_unregister(&udc_controller->gadget.dev);
err_free_irq:
@@ -2593,7 +2615,7 @@ err_iounmap_noclk:
iounmap(dr_regs);
err_release_mem_region:
if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
err_kfree:
kfree(udc_controller);
udc_controller = NULL;
@@ -2612,6 +2634,8 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
if (!udc_controller)
return -ENODEV;
+
+ usb_del_gadget_udc(&udc_controller->gadget);
udc_controller->done = &done;
fsl_udc_clk_release();
@@ -2628,7 +2652,7 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
free_irq(udc_controller->irq, udc_controller);
iounmap(dr_regs);
if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
device_unregister(&udc_controller->gadget.dev);
/* free udc --wait for the release() finished */
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index 763d462454b..4ec888f9000 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -609,107 +609,6 @@ void fusb300_rdcxf(struct fusb300 *fusb300,
}
}
-#if 0
-static void fusb300_dbg_fifo(struct fusb300_ep *ep,
- u8 entry, u16 length)
-{
- u32 reg;
- u32 i = 0;
- u32 j = 0;
-
- reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM);
- reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) |
- FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG);
- reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) |
- FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG);
- iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM);
-
- for (i = 0; i < (length >> 2); i++) {
- if (i * 4 == 1024)
- break;
- reg = ioread32(ep->fusb300->reg +
- FUSB300_OFFSET_BUFDBG_START + i * 4);
- printk(KERN_DEBUG" 0x%-8x", reg);
- j++;
- if ((j % 4) == 0)
- printk(KERN_DEBUG "\n");
- }
-
- if (length % 4) {
- reg = ioread32(ep->fusb300->reg +
- FUSB300_OFFSET_BUFDBG_START + i * 4);
- printk(KERN_DEBUG " 0x%x\n", reg);
- }
-
- if ((j % 4) != 0)
- printk(KERN_DEBUG "\n");
-
- fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM,
- FUSB300_GTM_TST_FIFO_DEG);
-}
-
-static void fusb300_cmp_dbg_fifo(struct fusb300_ep *ep,
- u8 entry, u16 length, u8 *golden)
-{
- u32 reg;
- u32 i = 0;
- u32 golden_value;
- u8 *tmp;
-
- tmp = golden;
-
- printk(KERN_DEBUG "fusb300_cmp_dbg_fifo (entry %d) : start\n", entry);
-
- reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM);
- reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) |
- FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG);
- reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) |
- FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG);
- iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM);
-
- for (i = 0; i < (length >> 2); i++) {
- if (i * 4 == 1024)
- break;
- golden_value = *tmp | *(tmp + 1) << 8 |
- *(tmp + 2) << 16 | *(tmp + 3) << 24;
-
- reg = ioread32(ep->fusb300->reg +
- FUSB300_OFFSET_BUFDBG_START + i*4);
-
- if (reg != golden_value) {
- printk(KERN_DEBUG "0x%x : ", (u32)(ep->fusb300->reg +
- FUSB300_OFFSET_BUFDBG_START + i*4));
- printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n",
- golden_value, reg);
- }
- tmp += 4;
- }
-
- switch (length % 4) {
- case 1:
- golden_value = *tmp;
- case 2:
- golden_value = *tmp | *(tmp + 1) << 8;
- case 3:
- golden_value = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
- default:
- break;
-
- reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4);
- if (reg != golden_value) {
- printk(KERN_DEBUG "0x%x:", (u32)(ep->fusb300->reg +
- FUSB300_OFFSET_BUFDBG_START + i*4));
- printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n",
- golden_value, reg);
- }
- }
-
- printk(KERN_DEBUG "fusb300_cmp_dbg_fifo : end\n");
- fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM,
- FUSB300_GTM_TST_FIFO_DEG);
-}
-#endif
-
static void fusb300_rdfifo(struct fusb300_ep *ep,
struct fusb300_request *req,
u32 length)
@@ -767,56 +666,6 @@ static void fusb300_rdfifo(struct fusb300_ep *ep,
} while (!reg);
}
-/* write data to fifo */
-static void fusb300_wrfifo(struct fusb300_ep *ep,
- struct fusb300_request *req)
-{
- int i = 0;
- u8 *tmp;
- u32 data, reg;
- struct fusb300 *fusb300 = ep->fusb300;
-
- tmp = req->req.buf;
- req->req.actual = req->req.length;
-
- for (i = (req->req.length >> 2); i > 0; i--) {
- data = *tmp | *(tmp + 1) << 8 |
- *(tmp + 2) << 16 | *(tmp + 3) << 24;
-
- iowrite32(data, fusb300->reg +
- FUSB300_OFFSET_EPPORT(ep->epnum));
- tmp += 4;
- }
-
- switch (req->req.length % 4) {
- case 1:
- data = *tmp;
- iowrite32(data, fusb300->reg +
- FUSB300_OFFSET_EPPORT(ep->epnum));
- break;
- case 2:
- data = *tmp | *(tmp + 1) << 8;
- iowrite32(data, fusb300->reg +
- FUSB300_OFFSET_EPPORT(ep->epnum));
- break;
- case 3:
- data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16;
- iowrite32(data, fusb300->reg +
- FUSB300_OFFSET_EPPORT(ep->epnum));
- break;
- default:
- break;
- }
-
- do {
- reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1);
- reg &= FUSB300_IGR1_SYNF0_EMPTY_INT;
- if (i)
- printk(KERN_INFO"sync fifo is not empty!\n");
- i++;
- } while (!reg);
-}
-
static u8 fusb300_get_epnstall(struct fusb300 *fusb300, u8 ep)
{
u8 value;
@@ -980,11 +829,6 @@ static void set_address(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
} \
} while (0)
-static void fusb300_ep0_complete(struct usb_ep *ep,
- struct usb_request *req)
-{
-}
-
static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
{
u8 *p = (u8 *)ctrl;
@@ -1029,17 +873,6 @@ static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl)
return ret;
}
-static void fusb300_set_ep_bycnt(struct fusb300_ep *ep, u32 bycnt)
-{
- struct fusb300 *fusb300 = ep->fusb300;
- u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum));
-
- reg &= ~FUSB300_FFR_BYCNT;
- reg |= bycnt & FUSB300_FFR_BYCNT;
-
- iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum));
-}
-
static void done(struct fusb300_ep *ep, struct fusb300_request *req,
int status)
{
@@ -1063,8 +896,8 @@ static void done(struct fusb300_ep *ep, struct fusb300_request *req,
fusb300_set_cxdone(ep->fusb300);
}
-void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep,
- struct fusb300_request *req)
+static void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep, dma_addr_t d,
+ u32 len)
{
u32 value;
u32 reg;
@@ -1076,10 +909,9 @@ void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep,
reg &= FUSB300_EPPRD0_H;
} while (reg);
- iowrite32((u32) req->req.buf, ep->fusb300->reg +
- FUSB300_OFFSET_EPPRD_W1(ep->epnum));
+ iowrite32(d, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W1(ep->epnum));
- value = FUSB300_EPPRD0_BTC(req->req.length) | FUSB300_EPPRD0_H |
+ value = FUSB300_EPPRD0_BTC(len) | FUSB300_EPPRD0_H |
FUSB300_EPPRD0_F | FUSB300_EPPRD0_L | FUSB300_EPPRD0_I;
iowrite32(value, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum));
@@ -1116,13 +948,12 @@ static void fusb300_set_idma(struct fusb300_ep *ep,
struct fusb300_request *req)
{
dma_addr_t d;
- u8 *tmp = NULL;
d = dma_map_single(NULL, req->req.buf, req->req.length, DMA_TO_DEVICE);
if (dma_mapping_error(NULL, d)) {
- kfree(req->req.buf);
printk(KERN_DEBUG "dma_mapping_error\n");
+ return;
}
dma_sync_single_for_device(NULL, d, req->req.length, DMA_TO_DEVICE);
@@ -1130,17 +961,11 @@ static void fusb300_set_idma(struct fusb300_ep *ep,
fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER0,
FUSB300_IGER0_EEPn_PRD_INT(ep->epnum));
- tmp = req->req.buf;
- req->req.buf = (u8 *)d;
-
- fusb300_fill_idma_prdtbl(ep, req);
+ fusb300_fill_idma_prdtbl(ep, d, req->req.length);
/* check idma is done */
fusb300_wait_idma_finished(ep);
- req->req.buf = tmp;
-
- if (d)
- dma_unmap_single(NULL, d, req->req.length, DMA_TO_DEVICE);
+ dma_unmap_single(NULL, d, req->req.length, DMA_TO_DEVICE);
}
static void in_ep_fifo_handler(struct fusb300_ep *ep)
@@ -1148,14 +973,8 @@ static void in_ep_fifo_handler(struct fusb300_ep *ep)
struct fusb300_request *req = list_entry(ep->queue.next,
struct fusb300_request, queue);
- if (req->req.length) {
-#if 0
- fusb300_set_ep_bycnt(ep, req->req.length);
- fusb300_wrfifo(ep, req);
-#else
+ if (req->req.length)
fusb300_set_idma(ep, req);
-#endif
- }
done(ep, req, 0);
}
@@ -1500,7 +1319,7 @@ static void init_controller(struct fusb300 *fusb300)
/*------------------------------------------------------------------------*/
static struct fusb300 *the_controller;
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int fusb300_udc_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct fusb300 *fusb300 = the_controller;
@@ -1544,9 +1363,8 @@ error:
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int fusb300_udc_stop(struct usb_gadget_driver *driver)
{
struct fusb300 *fusb300 = the_controller;
@@ -1562,7 +1380,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
/*--------------------------------------------------------------------------*/
static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active)
@@ -1572,12 +1389,15 @@ static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active)
static struct usb_gadget_ops fusb300_gadget_ops = {
.pullup = fusb300_udc_pullup,
+ .start = fusb300_udc_start,
+ .stop = fusb300_udc_stop,
};
static int __exit fusb300_remove(struct platform_device *pdev)
{
struct fusb300 *fusb300 = dev_get_drvdata(&pdev->dev);
+ usb_del_gadget_udc(&fusb300->gadget);
iounmap(fusb300->reg);
free_irq(platform_get_irq(pdev, 0), fusb300);
@@ -1702,9 +1522,15 @@ static int __init fusb300_probe(struct platform_device *pdev)
goto clean_up3;
init_controller(fusb300);
+ ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget);
+ if (ret)
+ goto err_add_udc;
+
dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
return 0;
+err_add_udc:
+ fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
clean_up3:
free_irq(ires->start, fusb300);
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index ebf6970a10b..704c2800ac0 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -162,6 +162,7 @@ static struct usb_composite_driver gfs_driver = {
.name = DRIVER_NAME,
.dev = &gfs_dev_desc,
.strings = gfs_dev_strings,
+ .max_speed = USB_SPEED_HIGH,
.unbind = gfs_unbind,
.iProduct = DRIVER_DESC,
};
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index bcdac7c73e8..f3a83cd0ef5 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -15,150 +15,40 @@
#ifndef __GADGET_CHIPS_H
#define __GADGET_CHIPS_H
-#ifdef CONFIG_USB_GADGET_NET2280
-#define gadget_is_net2280(g) !strcmp("net2280", (g)->name)
-#else
-#define gadget_is_net2280(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_AMD5536UDC
-#define gadget_is_amd5536udc(g) !strcmp("amd5536udc", (g)->name)
-#else
-#define gadget_is_amd5536udc(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_DUMMY_HCD
-#define gadget_is_dummy(g) !strcmp("dummy_udc", (g)->name)
-#else
-#define gadget_is_dummy(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_PXA25X
-#define gadget_is_pxa(g) !strcmp("pxa25x_udc", (g)->name)
-#else
-#define gadget_is_pxa(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_GOKU
-#define gadget_is_goku(g) !strcmp("goku_udc", (g)->name)
-#else
-#define gadget_is_goku(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_OMAP
-#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name)
-#else
-#define gadget_is_omap(g) 0
-#endif
-
-/* various unstable versions available */
-#ifdef CONFIG_USB_GADGET_PXA27X
-#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name)
-#else
-#define gadget_is_pxa27x(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_ATMEL_USBA
-#define gadget_is_atmel_usba(g) !strcmp("atmel_usba_udc", (g)->name)
-#else
-#define gadget_is_atmel_usba(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_S3C2410
-#define gadget_is_s3c2410(g) !strcmp("s3c2410_udc", (g)->name)
-#else
-#define gadget_is_s3c2410(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_AT91
-#define gadget_is_at91(g) !strcmp("at91_udc", (g)->name)
-#else
-#define gadget_is_at91(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_IMX
-#define gadget_is_imx(g) !strcmp("imx_udc", (g)->name)
-#else
-#define gadget_is_imx(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_FSL_USB2
-#define gadget_is_fsl_usb2(g) !strcmp("fsl-usb2-udc", (g)->name)
-#else
-#define gadget_is_fsl_usb2(g) 0
-#endif
-
-/* Mentor high speed "dual role" controller, in peripheral role */
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
-#define gadget_is_musbhdrc(g) !strcmp("musb-hdrc", (g)->name)
-#else
-#define gadget_is_musbhdrc(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_LANGWELL
-#define gadget_is_langwell(g) (!strcmp("langwell_udc", (g)->name))
-#else
-#define gadget_is_langwell(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_M66592
-#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name)
-#else
-#define gadget_is_m66592(g) 0
-#endif
-
-/* Freescale CPM/QE UDC SUPPORT */
-#ifdef CONFIG_USB_GADGET_FSL_QE
-#define gadget_is_fsl_qe(g) !strcmp("fsl_qe_udc", (g)->name)
-#else
-#define gadget_is_fsl_qe(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_CI13XXX_PCI
-#define gadget_is_ci13xxx_pci(g) (!strcmp("ci13xxx_pci", (g)->name))
-#else
-#define gadget_is_ci13xxx_pci(g) 0
-#endif
-
-// CONFIG_USB_GADGET_SX2
-// CONFIG_USB_GADGET_AU1X00
-// ...
-
-#ifdef CONFIG_USB_GADGET_R8A66597
-#define gadget_is_r8a66597(g) !strcmp("r8a66597_udc", (g)->name)
-#else
-#define gadget_is_r8a66597(g) 0
-#endif
-
-#ifdef CONFIG_USB_S3C_HSOTG
-#define gadget_is_s3c_hsotg(g) (!strcmp("s3c-hsotg", (g)->name))
-#else
-#define gadget_is_s3c_hsotg(g) 0
-#endif
-
-#ifdef CONFIG_USB_S3C_HSUDC
-#define gadget_is_s3c_hsudc(g) (!strcmp("s3c-hsudc", (g)->name))
-#else
-#define gadget_is_s3c_hsudc(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_EG20T
-#define gadget_is_pch(g) (!strcmp("pch_udc", (g)->name))
-#else
-#define gadget_is_pch(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_CI13XXX_MSM
+/*
+ * NOTICE: the entries below are alphabetical and should be kept
+ * that way.
+ *
+ * Always be sure to add new entries to the correct position or
+ * accept the bashing later.
+ *
+ * If you have forgotten the alphabetical order let VIM/EMACS
+ * do that for you.
+ */
+#define gadget_is_amd5536udc(g) (!strcmp("amd5536udc", (g)->name))
+#define gadget_is_at91(g) (!strcmp("at91_udc", (g)->name))
+#define gadget_is_atmel_usba(g) (!strcmp("atmel_usba_udc", (g)->name))
#define gadget_is_ci13xxx_msm(g) (!strcmp("ci13xxx_msm", (g)->name))
-#else
-#define gadget_is_ci13xxx_msm(g) 0
-#endif
-
-#ifdef CONFIG_USB_GADGET_RENESAS_USBHS
-#define gadget_is_renesas_usbhs(g) (!strcmp("renesas_usbhs_udc", (g)->name))
-#else
-#define gadget_is_renesas_usbhs(g) 0
-#endif
+#define gadget_is_ci13xxx_pci(g) (!strcmp("ci13xxx_pci", (g)->name))
+#define gadget_is_dummy(g) (!strcmp("dummy_udc", (g)->name))
+#define gadget_is_fsl_qe(g) (!strcmp("fsl_qe_udc", (g)->name))
+#define gadget_is_fsl_usb2(g) (!strcmp("fsl-usb2-udc", (g)->name))
+#define gadget_is_goku(g) (!strcmp("goku_udc", (g)->name))
+#define gadget_is_imx(g) (!strcmp("imx_udc", (g)->name))
+#define gadget_is_langwell(g) (!strcmp("langwell_udc", (g)->name))
+#define gadget_is_m66592(g) (!strcmp("m66592_udc", (g)->name))
+#define gadget_is_musbhdrc(g) (!strcmp("musb-hdrc", (g)->name))
+#define gadget_is_net2272(g) (!strcmp("net2272", (g)->name))
+#define gadget_is_net2280(g) (!strcmp("net2280", (g)->name))
+#define gadget_is_omap(g) (!strcmp("omap_udc", (g)->name))
+#define gadget_is_pch(g) (!strcmp("pch_udc", (g)->name))
+#define gadget_is_pxa(g) (!strcmp("pxa25x_udc", (g)->name))
+#define gadget_is_pxa27x(g) (!strcmp("pxa27x_udc", (g)->name))
+#define gadget_is_r8a66597(g) (!strcmp("r8a66597_udc", (g)->name))
+#define gadget_is_renesas_usbhs(g) (!strcmp("renesas_usbhs_udc", (g)->name))
+#define gadget_is_s3c2410(g) (!strcmp("s3c2410_udc", (g)->name))
+#define gadget_is_s3c_hsotg(g) (!strcmp("s3c-hsotg", (g)->name))
+#define gadget_is_s3c_hsudc(g) (!strcmp("s3c-hsudc", (g)->name))
/**
* usb_gadget_controller_number - support bcdDevice id convention
@@ -223,6 +113,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x29;
else if (gadget_is_s3c_hsudc(gadget))
return 0x30;
+ else if (gadget_is_net2272(gadget))
+ return 0x31;
return -ENOENT;
}
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 47b86b99d44..8b9220e128a 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -537,14 +537,16 @@ static int set_gmidi_config(struct gmidi_device *dev, gfp_t gfp_flags)
struct usb_ep *ep;
unsigned i;
- err = usb_ep_enable(dev->in_ep, &bulk_in_desc);
+ dev->in_ep->desc = &bulk_in_desc;
+ err = usb_ep_enable(dev->in_ep);
if (err) {
ERROR(dev, "can't start %s: %d\n", dev->in_ep->name, err);
goto fail;
}
dev->in_ep->driver_data = dev;
- err = usb_ep_enable(dev->out_ep, &bulk_out_desc);
+ dev->out_ep->desc = &bulk_out_desc;
+ err = usb_ep_enable(dev->out_ep);
if (err) {
ERROR(dev, "can't start %s: %d\n", dev->out_ep->name, err);
goto fail;
@@ -693,6 +695,7 @@ static int gmidi_setup(struct usb_gadget *gadget,
switch (w_value >> 8) {
case USB_DT_DEVICE:
+ device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
value = min(w_length, (u16) sizeof(device_desc));
memcpy(req->buf, &device_desc, value);
break;
@@ -1247,8 +1250,6 @@ autoconf_fail:
dev->req->complete = gmidi_setup_complete;
- device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
-
gadget->ep0->driver_data = dev;
INFO(dev, "%s, version: " DRIVER_VERSION "\n", longname);
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index bf6e11c758d..7f87805cddc 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -996,8 +996,14 @@ static int goku_get_frame(struct usb_gadget *_gadget)
return -EOPNOTSUPP;
}
+static int goku_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int goku_stop(struct usb_gadget_driver *driver);
+
static const struct usb_gadget_ops goku_ops = {
.get_frame = goku_get_frame,
+ .start = goku_start,
+ .stop = goku_stop,
// no remote wakeup
// not selfpowered
};
@@ -1344,7 +1350,7 @@ static struct goku_udc *the_controller;
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int goku_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct goku_udc *dev = the_controller;
@@ -1382,7 +1388,6 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
return 0;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
static void
stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
@@ -1408,7 +1413,7 @@ stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
udc_enable(dev);
}
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int goku_stop(struct usb_gadget_driver *driver)
{
struct goku_udc *dev = the_controller;
unsigned long flags;
@@ -1429,8 +1434,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
/*-------------------------------------------------------------------------*/
@@ -1730,6 +1733,8 @@ static void goku_remove(struct pci_dev *pdev)
DBG(dev, "%s\n", __func__);
+ usb_del_gadget_udc(&dev->gadget);
+
BUG_ON(dev->driver);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
@@ -1854,6 +1859,10 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err;
}
dev->registered = 1;
+ retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+ if (retval)
+ goto err;
+
return 0;
err:
diff --git a/drivers/usb/gadget/hid.c b/drivers/usb/gadget/hid.c
index 2523e54097b..9fb575034a0 100644
--- a/drivers/usb/gadget/hid.c
+++ b/drivers/usb/gadget/hid.c
@@ -255,6 +255,7 @@ static struct usb_composite_driver hidg_driver = {
.name = "g_hid",
.dev = &device_desc,
.strings = dev_strings,
+ .max_speed = USB_SPEED_HIGH,
.unbind = __exit_p(hid_unbind),
};
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index ade40066dec..692fd9b2248 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1237,9 +1237,14 @@ irq_handler_t intr_handler(int i)
*******************************************************************************
*/
+static int imx_udc_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int imx_udc_stop(struct usb_gadget_driver *driver);
static const struct usb_gadget_ops imx_udc_ops = {
.get_frame = imx_udc_get_frame,
.wakeup = imx_udc_wakeup,
+ .start = imx_udc_start,
+ .stop = imx_udc_stop,
};
static struct imx_udc_struct controller = {
@@ -1324,7 +1329,7 @@ static struct imx_udc_struct controller = {
* USB gadget driver functions
*******************************************************************************
*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int imx_udc_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct imx_udc_struct *imx_usb = &controller;
@@ -1368,9 +1373,8 @@ fail:
imx_usb->gadget.dev.driver = NULL;
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int imx_udc_stop(struct usb_gadget_driver *driver)
{
struct imx_udc_struct *imx_usb = &controller;
@@ -1394,7 +1398,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
/*******************************************************************************
* Module functions
@@ -1504,8 +1507,14 @@ static int __init imx_udc_probe(struct platform_device *pdev)
imx_usb->timer.function = handle_config;
imx_usb->timer.data = (unsigned long)imx_usb;
- return 0;
+ ret = usb_add_gadget_udc(&pdev->dev, &imx_usb->gadget);
+ if (ret)
+ goto fail4;
+ return 0;
+fail4:
+ for (i = 0; i < IMX_USB_NB_EP + 1; i++)
+ free_irq(imx_usb->usbd_int[i], imx_usb);
fail3:
clk_put(clk);
clk_disable(clk);
@@ -1525,6 +1534,7 @@ static int __exit imx_udc_remove(struct platform_device *pdev)
struct imxusb_platform_data *pdata = pdev->dev.platform_data;
int i;
+ usb_del_gadget_udc(&imx_usb->gadget);
imx_udc_disable(imx_usb);
del_timer(&imx_usb->timer);
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index a56876aaf76..1b240990448 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -832,14 +832,16 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
switch (data->dev->gadget->speed) {
case USB_SPEED_LOW:
case USB_SPEED_FULL:
- value = usb_ep_enable (ep, &data->desc);
+ ep->desc = &data->desc;
+ value = usb_ep_enable(ep);
if (value == 0)
data->state = STATE_EP_ENABLED;
break;
#ifdef CONFIG_USB_GADGET_DUALSPEED
case USB_SPEED_HIGH:
/* fails if caller didn't provide that descriptor... */
- value = usb_ep_enable (ep, &data->hs_desc);
+ ep->desc = &data->hs_desc;
+ value = usb_ep_enable(ep);
if (value == 0)
data->state = STATE_EP_ENABLED;
break;
@@ -1345,7 +1347,7 @@ static void make_qualifier (struct dev_data *dev)
qual.bDeviceProtocol = desc->bDeviceProtocol;
/* assumes ep0 uses the same value for both speeds ... */
- qual.bMaxPacketSize0 = desc->bMaxPacketSize0;
+ qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
qual.bNumConfigurations = 1;
qual.bRESERVED = 0;
@@ -1402,7 +1404,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
}
dev->state = STATE_DEV_CONNECTED;
- dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
INFO (dev, "connected\n");
event = next_event (dev, GADGETFS_CONNECT);
@@ -1430,6 +1431,7 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
case USB_DT_DEVICE:
value = min (w_length, (u16) sizeof *dev->dev);
+ dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
req->buf = dev->dev;
break;
#ifdef CONFIG_USB_GADGET_DUALSPEED
@@ -1710,7 +1712,6 @@ gadgetfs_bind (struct usb_gadget *gadget)
set_gadget_data (gadget, dev);
dev->gadget = gadget;
gadget->ep0->driver_data = dev;
- dev->dev->bMaxPacketSize0 = gadget->ep0->maxpacket;
/* preallocate control response and buffer */
dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index 9cee88a43a7..a06e2c27b43 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -593,8 +593,8 @@ static int queue_dtd(struct langwell_ep *ep, struct langwell_request *req)
/* ep0 */
dev_vdbg(&dev->pdev->dev, "%s-%s\n", ep->name, DIR_STRING(ep));
- dev_vdbg(&dev->pdev->dev, "ep_dqh[%d] addr: 0x%08x\n",
- i, (u32)&(dev->ep_dqh[i]));
+ dev_vdbg(&dev->pdev->dev, "ep_dqh[%d] addr: 0x%p\n",
+ i, &(dev->ep_dqh[i]));
bit_mask = is_in(ep) ?
(1 << (ep->ep_num + 16)) : (1 << (ep->ep_num));
@@ -1321,7 +1321,9 @@ static int langwell_pullup(struct usb_gadget *_gadget, int is_on)
return 0;
}
-
+static int langwell_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int langwell_stop(struct usb_gadget_driver *driver);
/* device controller usb_gadget_ops structure */
static const struct usb_gadget_ops langwell_ops = {
@@ -1342,6 +1344,9 @@ static const struct usb_gadget_ops langwell_ops = {
/* D+ pullup, software-controlled connect/disconnect to USB host */
.pullup = langwell_pullup,
+
+ .start = langwell_start,
+ .stop = langwell_stop,
};
@@ -1852,7 +1857,7 @@ static DEVICE_ATTR(remote_wakeup, S_IWUSR, NULL, store_remote_wakeup);
* the driver might get unbound.
*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int langwell_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct langwell_udc *dev = the_controller;
@@ -1914,11 +1919,9 @@ err_unbind:
dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-
/* unregister gadget driver */
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int langwell_stop(struct usb_gadget_driver *driver)
{
struct langwell_udc *dev = the_controller;
unsigned long flags;
@@ -1965,8 +1968,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
dev_dbg(&dev->pdev->dev, "<--- %s()\n", __func__);
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
/*-------------------------------------------------------------------------*/
@@ -3270,7 +3271,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
/* allocate device dQH memory */
size = dev->ep_max * sizeof(struct langwell_dqh);
- dev_vdbg(&dev->pdev->dev, "orig size = %d\n", size);
+ dev_vdbg(&dev->pdev->dev, "orig size = %zd\n", size);
if (size < DQH_ALIGNMENT)
size = DQH_ALIGNMENT;
else if ((size % DQH_ALIGNMENT) != 0) {
@@ -3285,7 +3286,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
goto error;
}
dev->ep_dqh_size = size;
- dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
+ dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %zd\n", dev->ep_dqh_size);
/* initialize ep0 status request structure */
dev->status_req = kzalloc(sizeof(struct langwell_request), GFP_KERNEL);
@@ -3373,6 +3374,10 @@ static int langwell_udc_probe(struct pci_dev *pdev,
if (retval)
goto error;
+ retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+ if (retval)
+ goto error;
+
retval = device_create_file(&pdev->dev, &dev_attr_langwell_udc);
if (retval)
goto error;
@@ -3403,6 +3408,7 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
+ usb_del_gadget_udc(&dev->gadget);
/* disable interrupt and set controller to stop state */
langwell_udc_stop(dev);
@@ -3464,7 +3470,7 @@ static int langwell_udc_resume(struct pci_dev *pdev)
/* allocate device dQH memory */
size = dev->ep_max * sizeof(struct langwell_dqh);
- dev_vdbg(&dev->pdev->dev, "orig size = %d\n", size);
+ dev_vdbg(&dev->pdev->dev, "orig size = %zd\n", size);
if (size < DQH_ALIGNMENT)
size = DQH_ALIGNMENT;
else if ((size % DQH_ALIGNMENT) != 0) {
@@ -3478,7 +3484,7 @@ static int langwell_udc_resume(struct pci_dev *pdev)
return -ENOMEM;
}
dev->ep_dqh_size = size;
- dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %d\n", dev->ep_dqh_size);
+ dev_vdbg(&dev->pdev->dev, "ep_dqh_size = %zd\n", dev->ep_dqh_size);
/* create dTD dma_pool resource */
dev->dtd_pool = dma_pool_create("langwell_dtd",
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 084aa080a2d..491f825ed5c 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006-2007 Renesas Solutions Corp.
*
- * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -691,6 +691,7 @@ static void init_controller(struct m66592 *m66592)
static void disable_controller(struct m66592 *m66592)
{
+ m66592_bclr(m66592, M66592_UTST, M66592_TESTMODE);
if (!m66592->pdata->on_chip) {
m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG);
udelay(1);
@@ -780,7 +781,7 @@ static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req)
/* write fifo */
if (req->req.buf) {
if (size > 0)
- m66592_write_fifo(m66592, ep->fifoaddr, buf, size);
+ m66592_write_fifo(m66592, ep, buf, size);
if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
m66592_bset(m66592, M66592_BVAL, ep->fifoctr);
}
@@ -826,7 +827,7 @@ static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req)
/* write fifo */
if (req->req.buf) {
- m66592_write_fifo(m66592, ep->fifoaddr, buf, size);
+ m66592_write_fifo(m66592, ep, buf, size);
if ((size == 0)
|| ((size % ep->ep.maxpacket) != 0)
|| ((bufsize != ep->ep.maxpacket)
@@ -1048,10 +1049,30 @@ static void clear_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
{
+ u16 tmp;
+ int timeout = 3000;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
- control_end(m66592, 1);
+ switch (le16_to_cpu(ctrl->wValue)) {
+ case USB_DEVICE_TEST_MODE:
+ control_end(m66592, 1);
+ /* Wait for the completion of status stage */
+ do {
+ tmp = m66592_read(m66592, M66592_INTSTS0) &
+ M66592_CTSQ;
+ udelay(1);
+ } while (tmp != M66592_CS_IDST || timeout-- > 0);
+
+ if (tmp == M66592_CS_IDST)
+ m66592_bset(m66592,
+ le16_to_cpu(ctrl->wIndex >> 8),
+ M66592_TESTMODE);
+ break;
+ default:
+ pipe_stall(m66592, 0);
+ break;
+ }
break;
case USB_RECIP_INTERFACE:
control_end(m66592, 1);
@@ -1454,7 +1475,7 @@ static struct usb_ep_ops m66592_ep_ops = {
/*-------------------------------------------------------------------------*/
static struct m66592 *the_controller;
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int m66592_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct m66592 *m66592 = the_controller;
@@ -1506,9 +1527,8 @@ error:
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int m66592_stop(struct usb_gadget_driver *driver)
{
struct m66592 *m66592 = the_controller;
unsigned long flags;
@@ -1533,7 +1553,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
m66592->driver = NULL;
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
/*-------------------------------------------------------------------------*/
static int m66592_get_frame(struct usb_gadget *_gadget)
@@ -1542,14 +1561,34 @@ static int m66592_get_frame(struct usb_gadget *_gadget)
return m66592_read(m66592, M66592_FRMNUM) & 0x03FF;
}
+static int m66592_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct m66592 *m66592 = gadget_to_m66592(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&m66592->lock, flags);
+ if (is_on)
+ m66592_bset(m66592, M66592_DPRPU, M66592_SYSCFG);
+ else
+ m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG);
+ spin_unlock_irqrestore(&m66592->lock, flags);
+
+ return 0;
+}
+
static struct usb_gadget_ops m66592_gadget_ops = {
.get_frame = m66592_get_frame,
+ .start = m66592_start,
+ .stop = m66592_stop,
+ .pullup = m66592_pullup,
};
static int __exit m66592_remove(struct platform_device *pdev)
{
struct m66592 *m66592 = dev_get_drvdata(&pdev->dev);
+ usb_del_gadget_udc(&m66592->gadget);
+
del_timer_sync(&m66592->timer);
iounmap(m66592->reg);
free_irq(platform_get_irq(pdev, 0), m66592);
@@ -1691,9 +1730,16 @@ static int __init m66592_probe(struct platform_device *pdev)
init_controller(m66592);
+ ret = usb_add_gadget_udc(&pdev->dev, &m66592->gadget);
+ if (ret)
+ goto err_add_udc;
+
dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
return 0;
+err_add_udc:
+ m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
+
clean_up3:
#ifdef CONFIG_HAVE_CLK
if (m66592->pdata->on_chip) {
diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h
index c3caf1ac73c..7b93d579af3 100644
--- a/drivers/usb/gadget/m66592-udc.h
+++ b/drivers/usb/gadget/m66592-udc.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006-2007 Renesas Solutions Corp.
*
- * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -561,11 +561,26 @@ static inline void m66592_write(struct m66592 *m66592, u16 val,
iowrite16(val, m66592->reg + offset);
}
+static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat,
+ unsigned long offset)
+{
+ u16 tmp;
+ tmp = m66592_read(m66592, offset);
+ tmp = tmp & (~pat);
+ tmp = tmp | val;
+ m66592_write(m66592, tmp, offset);
+}
+
+#define m66592_bclr(m66592, val, offset) \
+ m66592_mdfy(m66592, 0, val, offset)
+#define m66592_bset(m66592, val, offset) \
+ m66592_mdfy(m66592, val, 0, offset)
+
static inline void m66592_write_fifo(struct m66592 *m66592,
- unsigned long offset,
+ struct m66592_ep *ep,
void *buf, unsigned long len)
{
- void __iomem *fifoaddr = m66592->reg + offset;
+ void __iomem *fifoaddr = m66592->reg + ep->fifoaddr;
if (m66592->pdata->on_chip) {
unsigned long count;
@@ -591,26 +606,15 @@ static inline void m66592_write_fifo(struct m66592 *m66592,
iowrite16_rep(fifoaddr, buf, len);
if (odd) {
unsigned char *p = buf + len*2;
+ if (m66592->pdata->wr0_shorted_to_wr1)
+ m66592_bclr(m66592, M66592_MBW_16, ep->fifosel);
iowrite8(*p, fifoaddr);
+ if (m66592->pdata->wr0_shorted_to_wr1)
+ m66592_bset(m66592, M66592_MBW_16, ep->fifosel);
}
}
}
-static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat,
- unsigned long offset)
-{
- u16 tmp;
- tmp = m66592_read(m66592, offset);
- tmp = tmp & (~pat);
- tmp = tmp | val;
- m66592_write(m66592, tmp, offset);
-}
-
-#define m66592_bclr(m66592, val, offset) \
- m66592_mdfy(m66592, 0, val, offset)
-#define m66592_bset(m66592, val, offset) \
- m66592_mdfy(m66592, val, 0, offset)
-
#endif /* ifndef __M66592_UDC_H__ */
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index 01822422c3e..d3eb27427c5 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -169,6 +169,7 @@ static struct usb_composite_driver msg_driver = {
.name = "g_mass_storage",
.dev = &msg_device_desc,
.iProduct = DRIVER_DESC,
+ .max_speed = USB_SPEED_HIGH,
.needs_serial = 1,
};
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index d9feced348e..8c7b74717d8 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -351,6 +351,7 @@ static struct usb_composite_driver multi_driver = {
.name = "g_multi",
.dev = &device_desc,
.strings = dev_strings,
+ .max_speed = USB_SPEED_HIGH,
.unbind = __exit_p(multi_unbind),
.iProduct = DRIVER_DESC,
.needs_serial = 1,
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index b1a8146b9d5..ce1ac2bcb31 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -1128,6 +1128,9 @@ static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
return 0;
}
+static int mv_udc_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int mv_udc_stop(struct usb_gadget_driver *driver);
/* device controller usb_gadget_ops structure */
static const struct usb_gadget_ops mv_ops = {
@@ -1139,6 +1142,8 @@ static const struct usb_gadget_ops mv_ops = {
/* D+ pullup, software-controlled connect/disconnect to USB host */
.pullup = mv_udc_pullup,
+ .start = mv_udc_start,
+ .stop = mv_udc_stop,
};
static void mv_udc_testmode(struct mv_udc *udc, u16 index, bool enter)
@@ -1230,7 +1235,7 @@ static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
}
}
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int mv_udc_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct mv_udc *udc = the_controller;
@@ -1270,9 +1275,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
return 0;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int mv_udc_stop(struct usb_gadget_driver *driver)
{
struct mv_udc *udc = the_controller;
unsigned long flags;
@@ -1296,7 +1300,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
static int
udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
@@ -1880,9 +1883,10 @@ static void gadget_release(struct device *_dev)
static int mv_udc_remove(struct platform_device *dev)
{
struct mv_udc *udc = the_controller;
-
DECLARE_COMPLETION(done);
+ usb_del_gadget_udc(&udc->gadget);
+
udc->done = &done;
/* free memory allocated in probe */
@@ -2074,11 +2078,12 @@ int mv_udc_probe(struct platform_device *dev)
the_controller = udc;
- goto out;
+ retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
+ if (!retval)
+ return retval;
error:
if (udc)
mv_udc_remove(udc->dev);
-out:
return retval;
}
@@ -2126,7 +2131,7 @@ static struct platform_driver udc_driver = {
#endif
},
};
-
+MODULE_ALIAS("platform:pxa-u2o");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
diff --git a/drivers/usb/gadget/ncm.c b/drivers/usb/gadget/ncm.c
index 99c179ad729..62ee5087dca 100644
--- a/drivers/usb/gadget/ncm.c
+++ b/drivers/usb/gadget/ncm.c
@@ -228,6 +228,7 @@ static struct usb_composite_driver ncm_driver = {
.name = "g_ncm",
.dev = &device_desc,
.strings = dev_strings,
+ .max_speed = USB_SPEED_HIGH,
.unbind = __exit_p(gncm_unbind),
};
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
new file mode 100644
index 00000000000..ab98ea926a1
--- /dev/null
+++ b/drivers/usb/gadget/net2272.c
@@ -0,0 +1,2752 @@
+/*
+ * Driver for PLX NET2272 USB device controller
+ *
+ * Copyright (C) 2005-2006 PLX Technology, Inc.
+ * Copyright (C) 2006-2011 Analog Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include <asm/byteorder.h>
+#include <asm/system.h>
+#include <asm/unaligned.h>
+
+#include "net2272.h"
+
+#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
+
+static const char driver_name[] = "net2272";
+static const char driver_vers[] = "2006 October 17/mainline";
+static const char driver_desc[] = DRIVER_DESC;
+
+static const char ep0name[] = "ep0";
+static const char * const ep_name[] = {
+ ep0name,
+ "ep-a", "ep-b", "ep-c",
+};
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+#ifdef CONFIG_USB_GADGET_NET2272_DMA
+/*
+ * use_dma: the NET2272 can use an external DMA controller.
+ * Note that since there is no generic DMA api, some functions,
+ * notably request_dma, start_dma, and cancel_dma will need to be
+ * modified for your platform's particular dma controller.
+ *
+ * If use_dma is disabled, pio will be used instead.
+ */
+static int use_dma = 0;
+module_param(use_dma, bool, 0644);
+
+/*
+ * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
+ * The NET2272 can only use dma for a single endpoint at a time.
+ * At some point this could be modified to allow either endpoint
+ * to take control of dma as it becomes available.
+ *
+ * Note that DMA should not be used on OUT endpoints unless it can
+ * be guaranteed that no short packets will arrive on an IN endpoint
+ * while the DMA operation is pending. Otherwise the OUT DMA will
+ * terminate prematurely (See NET2272 Errata 630-0213-0101)
+ */
+static ushort dma_ep = 1;
+module_param(dma_ep, ushort, 0644);
+
+/*
+ * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
+ * mode 0 == Slow DREQ mode
+ * mode 1 == Fast DREQ mode
+ * mode 2 == Burst mode
+ */
+static ushort dma_mode = 2;
+module_param(dma_mode, ushort, 0644);
+#else
+#define use_dma 0
+#define dma_ep 1
+#define dma_mode 2
+#endif
+
+/*
+ * fifo_mode: net2272 buffer configuration:
+ * mode 0 == ep-{a,b,c} 512db each
+ * mode 1 == ep-a 1k, ep-{b,c} 512db
+ * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
+ * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
+ */
+static ushort fifo_mode = 0;
+module_param(fifo_mode, ushort, 0644);
+
+/*
+ * enable_suspend: When enabled, the driver will respond to
+ * USB suspend requests by powering down the NET2272. Otherwise,
+ * USB suspend requests will be ignored. This is acceptible for
+ * self-powered devices. For bus powered devices set this to 1.
+ */
+static ushort enable_suspend = 0;
+module_param(enable_suspend, ushort, 0644);
+
+static void assert_out_naking(struct net2272_ep *ep, const char *where)
+{
+ u8 tmp;
+
+#ifndef DEBUG
+ return;
+#endif
+
+ tmp = net2272_ep_read(ep, EP_STAT0);
+ if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
+ dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
+ ep->ep.name, where, tmp);
+ net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+ }
+}
+#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
+
+static void stop_out_naking(struct net2272_ep *ep)
+{
+ u8 tmp = net2272_ep_read(ep, EP_STAT0);
+
+ if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
+ net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+}
+
+#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
+
+static char *type_string(u8 bmAttributes)
+{
+ switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_BULK: return "bulk";
+ case USB_ENDPOINT_XFER_ISOC: return "iso";
+ case USB_ENDPOINT_XFER_INT: return "intr";
+ default: return "control";
+ }
+}
+
+static char *buf_state_string(unsigned state)
+{
+ switch (state) {
+ case BUFF_FREE: return "free";
+ case BUFF_VALID: return "valid";
+ case BUFF_LCL: return "local";
+ case BUFF_USB: return "usb";
+ default: return "unknown";
+ }
+}
+
+static char *dma_mode_string(void)
+{
+ if (!use_dma)
+ return "PIO";
+ switch (dma_mode) {
+ case 0: return "SLOW DREQ";
+ case 1: return "FAST DREQ";
+ case 2: return "BURST";
+ default: return "invalid";
+ }
+}
+
+static void net2272_dequeue_all(struct net2272_ep *);
+static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
+static int net2272_fifo_status(struct usb_ep *);
+
+static struct usb_ep_ops net2272_ep_ops;
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
+{
+ struct net2272 *dev;
+ struct net2272_ep *ep;
+ u32 max;
+ u8 tmp;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || !desc || ep->desc || _ep->name == ep0name
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+ dev = ep->dev;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ _ep->maxpacket = max & 0x7fff;
+ ep->desc = desc;
+
+ /* net2272_ep_reset() has already been called */
+ ep->stopped = 0;
+ ep->wedged = 0;
+
+ /* set speed-dependent max packet */
+ net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
+ net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
+
+ /* set type, direction, address; reset fifo counters */
+ net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+ tmp = usb_endpoint_type(desc);
+ if (usb_endpoint_xfer_bulk(desc)) {
+ /* catch some particularly blatant driver bugs */
+ if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
+ (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return -ERANGE;
+ }
+ }
+ ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
+ tmp <<= ENDPOINT_TYPE;
+ tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
+ tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
+ tmp |= (1 << ENDPOINT_ENABLE);
+
+ /* for OUT transfers, block the rx fifo until a read is posted */
+ ep->is_in = usb_endpoint_dir_in(desc);
+ if (!ep->is_in)
+ net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+
+ net2272_ep_write(ep, EP_CFG, tmp);
+
+ /* enable irqs */
+ tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
+ net2272_write(dev, IRQENB0, tmp);
+
+ tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
+ | net2272_ep_read(ep, EP_IRQENB);
+ net2272_ep_write(ep, EP_IRQENB, tmp);
+
+ tmp = desc->bEndpointAddress;
+ dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
+ _ep->name, tmp & 0x0f, PIPEDIR(tmp),
+ type_string(desc->bmAttributes), max,
+ net2272_ep_read(ep, EP_CFG));
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+
+static void net2272_ep_reset(struct net2272_ep *ep)
+{
+ u8 tmp;
+
+ ep->desc = NULL;
+ INIT_LIST_HEAD(&ep->queue);
+
+ ep->ep.maxpacket = ~0;
+ ep->ep.ops = &net2272_ep_ops;
+
+ /* disable irqs, endpoint */
+ net2272_ep_write(ep, EP_IRQENB, 0);
+
+ /* init to our chosen defaults, notably so that we NAK OUT
+ * packets until the driver queues a read.
+ */
+ tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
+ net2272_ep_write(ep, EP_RSPSET, tmp);
+
+ tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
+ if (ep->num != 0)
+ tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
+
+ net2272_ep_write(ep, EP_RSPCLR, tmp);
+
+ /* scrub most status bits, and flush any fifo state */
+ net2272_ep_write(ep, EP_STAT0,
+ (1 << DATA_IN_TOKEN_INTERRUPT)
+ | (1 << DATA_OUT_TOKEN_INTERRUPT)
+ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+ | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+ | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
+
+ net2272_ep_write(ep, EP_STAT1,
+ (1 << TIMEOUT)
+ | (1 << USB_OUT_ACK_SENT)
+ | (1 << USB_OUT_NAK_SENT)
+ | (1 << USB_IN_ACK_RCVD)
+ | (1 << USB_IN_NAK_SENT)
+ | (1 << USB_STALL_SENT)
+ | (1 << LOCAL_OUT_ZLP)
+ | (1 << BUFFER_FLUSH));
+
+ /* fifo size is handled seperately */
+}
+
+static int net2272_disable(struct usb_ep *_ep)
+{
+ struct net2272_ep *ep;
+ unsigned long flags;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || !ep->desc || _ep->name == ep0name)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ net2272_dequeue_all(ep);
+ net2272_ep_reset(ep);
+
+ dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
+
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static struct usb_request *
+net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+{
+ struct net2272_ep *ep;
+ struct net2272_request *req;
+
+ if (!_ep)
+ return NULL;
+ ep = container_of(_ep, struct net2272_ep, ep);
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ req->req.dma = DMA_ADDR_INVALID;
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void
+net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct net2272_ep *ep;
+ struct net2272_request *req;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || !_req)
+ return;
+
+ req = container_of(_req, struct net2272_request, req);
+ WARN_ON(!list_empty(&req->queue));
+ kfree(req);
+}
+
+static void
+net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
+{
+ struct net2272 *dev;
+ unsigned stopped = ep->stopped;
+
+ if (ep->num == 0) {
+ if (ep->dev->protocol_stall) {
+ ep->stopped = 1;
+ set_halt(ep);
+ }
+ allow_status(ep);
+ }
+
+ list_del_init(&req->queue);
+
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ else
+ status = req->req.status;
+
+ dev = ep->dev;
+ if (use_dma && req->mapped) {
+ dma_unmap_single(dev->dev, req->req.dma, req->req.length,
+ ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->req.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+ }
+
+ if (status && status != -ESHUTDOWN)
+ dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length, req->req.buf);
+
+ /* don't modify queue heads during completion callback */
+ ep->stopped = 1;
+ spin_unlock(&dev->lock);
+ req->req.complete(&ep->ep, &req->req);
+ spin_lock(&dev->lock);
+ ep->stopped = stopped;
+}
+
+static int
+net2272_write_packet(struct net2272_ep *ep, u8 *buf,
+ struct net2272_request *req, unsigned max)
+{
+ u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
+ u16 *bufp;
+ unsigned length, count;
+ u8 tmp;
+
+ length = min(req->req.length - req->req.actual, max);
+ req->req.actual += length;
+
+ dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
+ ep->ep.name, req, max, length,
+ (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
+
+ count = length;
+ bufp = (u16 *)buf;
+
+ while (likely(count >= 2)) {
+ /* no byte-swap required; chip endian set during init */
+ writew(*bufp++, ep_data);
+ count -= 2;
+ }
+ buf = (u8 *)bufp;
+
+ /* write final byte by placing the NET2272 into 8-bit mode */
+ if (unlikely(count)) {
+ tmp = net2272_read(ep->dev, LOCCTL);
+ net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
+ writeb(*buf, ep_data);
+ net2272_write(ep->dev, LOCCTL, tmp);
+ }
+ return length;
+}
+
+/* returns: 0: still running, 1: completed, negative: errno */
+static int
+net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
+{
+ u8 *buf;
+ unsigned count, max;
+ int status;
+
+ dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
+ ep->ep.name, req->req.actual, req->req.length);
+
+ /*
+ * Keep loading the endpoint until the final packet is loaded,
+ * or the endpoint buffer is full.
+ */
+ top:
+ /*
+ * Clear interrupt status
+ * - Packet Transmitted interrupt will become set again when the
+ * host successfully takes another packet
+ */
+ net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
+ while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
+ buf = req->req.buf + req->req.actual;
+ prefetch(buf);
+
+ /* force pagesel */
+ net2272_ep_read(ep, EP_STAT0);
+
+ max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
+ (net2272_ep_read(ep, EP_AVAIL0));
+
+ if (max < ep->ep.maxpacket)
+ max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
+ | (net2272_ep_read(ep, EP_AVAIL0));
+
+ count = net2272_write_packet(ep, buf, req, max);
+ /* see if we are done */
+ if (req->req.length == req->req.actual) {
+ /* validate short or zlp packet */
+ if (count < ep->ep.maxpacket)
+ set_fifo_bytecount(ep, 0);
+ net2272_done(ep, req, 0);
+
+ if (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request,
+ queue);
+ status = net2272_kick_dma(ep, req);
+
+ if (status < 0)
+ if ((net2272_ep_read(ep, EP_STAT0)
+ & (1 << BUFFER_EMPTY)))
+ goto top;
+ }
+ return 1;
+ }
+ net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
+ }
+ return 0;
+}
+
+static void
+net2272_out_flush(struct net2272_ep *ep)
+{
+ ASSERT_OUT_NAKING(ep);
+
+ net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
+ | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
+ net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+}
+
+static int
+net2272_read_packet(struct net2272_ep *ep, u8 *buf,
+ struct net2272_request *req, unsigned avail)
+{
+ u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
+ unsigned is_short;
+ u16 *bufp;
+
+ req->req.actual += avail;
+
+ dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
+ ep->ep.name, req, avail,
+ (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
+
+ is_short = (avail < ep->ep.maxpacket);
+
+ if (unlikely(avail == 0)) {
+ /* remove any zlp from the buffer */
+ (void)readw(ep_data);
+ return is_short;
+ }
+
+ /* Ensure we get the final byte */
+ if (unlikely(avail % 2))
+ avail++;
+ bufp = (u16 *)buf;
+
+ do {
+ *bufp++ = readw(ep_data);
+ avail -= 2;
+ } while (avail);
+
+ /*
+ * To avoid false endpoint available race condition must read
+ * ep stat0 twice in the case of a short transfer
+ */
+ if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
+ net2272_ep_read(ep, EP_STAT0);
+
+ return is_short;
+}
+
+static int
+net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
+{
+ u8 *buf;
+ unsigned is_short;
+ int count;
+ int tmp;
+ int cleanup = 0;
+ int status = -1;
+
+ dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
+ ep->ep.name, req->req.actual, req->req.length);
+
+ top:
+ do {
+ buf = req->req.buf + req->req.actual;
+ prefetchw(buf);
+
+ count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
+ | net2272_ep_read(ep, EP_AVAIL0);
+
+ net2272_ep_write(ep, EP_STAT0,
+ (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
+ (1 << DATA_PACKET_RECEIVED_INTERRUPT));
+
+ tmp = req->req.length - req->req.actual;
+
+ if (count > tmp) {
+ if ((tmp % ep->ep.maxpacket) != 0) {
+ dev_err(ep->dev->dev,
+ "%s out fifo %d bytes, expected %d\n",
+ ep->ep.name, count, tmp);
+ cleanup = 1;
+ }
+ count = (tmp > 0) ? tmp : 0;
+ }
+
+ is_short = net2272_read_packet(ep, buf, req, count);
+
+ /* completion */
+ if (unlikely(cleanup || is_short ||
+ ((req->req.actual == req->req.length)
+ && !req->req.zero))) {
+
+ if (cleanup) {
+ net2272_out_flush(ep);
+ net2272_done(ep, req, -EOVERFLOW);
+ } else
+ net2272_done(ep, req, 0);
+
+ /* re-initialize endpoint transfer registers
+ * otherwise they may result in erroneous pre-validation
+ * for subsequent control reads
+ */
+ if (unlikely(ep->num == 0)) {
+ net2272_ep_write(ep, EP_TRANSFER2, 0);
+ net2272_ep_write(ep, EP_TRANSFER1, 0);
+ net2272_ep_write(ep, EP_TRANSFER0, 0);
+ }
+
+ if (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request, queue);
+ status = net2272_kick_dma(ep, req);
+ if ((status < 0) &&
+ !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
+ goto top;
+ }
+ return 1;
+ }
+ } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
+
+ return 0;
+}
+
+static void
+net2272_pio_advance(struct net2272_ep *ep)
+{
+ struct net2272_request *req;
+
+ if (unlikely(list_empty(&ep->queue)))
+ return;
+
+ req = list_entry(ep->queue.next, struct net2272_request, queue);
+ (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
+}
+
+/* returns 0 on success, else negative errno */
+static int
+net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
+ unsigned len, unsigned dir)
+{
+ dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
+ ep, buf, len, dir);
+
+ /* The NET2272 only supports a single dma channel */
+ if (dev->dma_busy)
+ return -EBUSY;
+ /*
+ * EP_TRANSFER (used to determine the number of bytes received
+ * in an OUT transfer) is 24 bits wide; don't ask for more than that.
+ */
+ if ((dir == 1) && (len > 0x1000000))
+ return -EINVAL;
+
+ dev->dma_busy = 1;
+
+ /* initialize platform's dma */
+#ifdef CONFIG_PCI
+ /* NET2272 addr, buffer addr, length, etc. */
+ switch (dev->dev_id) {
+ case PCI_DEVICE_ID_RDK1:
+ /* Setup PLX 9054 DMA mode */
+ writel((1 << LOCAL_BUS_WIDTH) |
+ (1 << TA_READY_INPUT_ENABLE) |
+ (0 << LOCAL_BURST_ENABLE) |
+ (1 << DONE_INTERRUPT_ENABLE) |
+ (1 << LOCAL_ADDRESSING_MODE) |
+ (1 << DEMAND_MODE) |
+ (1 << DMA_EOT_ENABLE) |
+ (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
+ (1 << DMA_CHANNEL_INTERRUPT_SELECT),
+ dev->rdk1.plx9054_base_addr + DMAMODE0);
+
+ writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
+ writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
+ writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
+ writel((dir << DIRECTION_OF_TRANSFER) |
+ (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
+ dev->rdk1.plx9054_base_addr + DMADPR0);
+ writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
+ readl(dev->rdk1.plx9054_base_addr + INTCSR),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+
+ break;
+ }
+#endif
+
+ net2272_write(dev, DMAREQ,
+ (0 << DMA_BUFFER_VALID) |
+ (1 << DMA_REQUEST_ENABLE) |
+ (1 << DMA_CONTROL_DACK) |
+ (dev->dma_eot_polarity << EOT_POLARITY) |
+ (dev->dma_dack_polarity << DACK_POLARITY) |
+ (dev->dma_dreq_polarity << DREQ_POLARITY) |
+ ((ep >> 1) << DMA_ENDPOINT_SELECT));
+
+ (void) net2272_read(dev, SCRATCH);
+
+ return 0;
+}
+
+static void
+net2272_start_dma(struct net2272 *dev)
+{
+ /* start platform's dma controller */
+#ifdef CONFIG_PCI
+ switch (dev->dev_id) {
+ case PCI_DEVICE_ID_RDK1:
+ writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
+ dev->rdk1.plx9054_base_addr + DMACSR0);
+ break;
+ }
+#endif
+}
+
+/* returns 0 on success, else negative errno */
+static int
+net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
+{
+ unsigned size;
+ u8 tmp;
+
+ if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
+ return -EINVAL;
+
+ /* don't use dma for odd-length transfers
+ * otherwise, we'd need to deal with the last byte with pio
+ */
+ if (req->req.length & 1)
+ return -EINVAL;
+
+ dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
+ ep->ep.name, req, (unsigned long long) req->req.dma);
+
+ net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
+
+ /* The NET2272 can only use DMA on one endpoint at a time */
+ if (ep->dev->dma_busy)
+ return -EBUSY;
+
+ /* Make sure we only DMA an even number of bytes (we'll use
+ * pio to complete the transfer)
+ */
+ size = req->req.length;
+ size &= ~1;
+
+ /* device-to-host transfer */
+ if (ep->is_in) {
+ /* initialize platform's dma controller */
+ if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
+ /* unable to obtain DMA channel; return error and use pio mode */
+ return -EBUSY;
+ req->req.actual += size;
+
+ /* host-to-device transfer */
+ } else {
+ tmp = net2272_ep_read(ep, EP_STAT0);
+
+ /* initialize platform's dma controller */
+ if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
+ /* unable to obtain DMA channel; return error and use pio mode */
+ return -EBUSY;
+
+ if (!(tmp & (1 << BUFFER_EMPTY)))
+ ep->not_empty = 1;
+ else
+ ep->not_empty = 0;
+
+
+ /* allow the endpoint's buffer to fill */
+ net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+
+ /* this transfer completed and data's already in the fifo
+ * return error so pio gets used.
+ */
+ if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
+
+ /* deassert dreq */
+ net2272_write(ep->dev, DMAREQ,
+ (0 << DMA_BUFFER_VALID) |
+ (0 << DMA_REQUEST_ENABLE) |
+ (1 << DMA_CONTROL_DACK) |
+ (ep->dev->dma_eot_polarity << EOT_POLARITY) |
+ (ep->dev->dma_dack_polarity << DACK_POLARITY) |
+ (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
+ ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
+
+ return -EBUSY;
+ }
+ }
+
+ /* Don't use per-packet interrupts: use dma interrupts only */
+ net2272_ep_write(ep, EP_IRQENB, 0);
+
+ net2272_start_dma(ep->dev);
+
+ return 0;
+}
+
+static void net2272_cancel_dma(struct net2272 *dev)
+{
+#ifdef CONFIG_PCI
+ switch (dev->dev_id) {
+ case PCI_DEVICE_ID_RDK1:
+ writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
+ writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
+ while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
+ (1 << CHANNEL_DONE)))
+ continue; /* wait for dma to stabalize */
+
+ /* dma abort generates an interrupt */
+ writeb(1 << CHANNEL_CLEAR_INTERRUPT,
+ dev->rdk1.plx9054_base_addr + DMACSR0);
+ break;
+ }
+#endif
+
+ dev->dma_busy = 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct net2272_request *req;
+ struct net2272_ep *ep;
+ struct net2272 *dev;
+ unsigned long flags;
+ int status = -1;
+ u8 s;
+
+ req = container_of(_req, struct net2272_request, req);
+ if (!_req || !_req->complete || !_req->buf
+ || !list_empty(&req->queue))
+ return -EINVAL;
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return -EINVAL;
+ dev = ep->dev;
+ if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ /* set up dma mapping in case the caller didn't */
+ if (use_dma && ep->dma && _req->dma == DMA_ADDR_INVALID) {
+ _req->dma = dma_map_single(dev->dev, _req->buf, _req->length,
+ ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 1;
+ }
+
+ dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
+ _ep->name, _req, _req->length, _req->buf,
+ (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ /* kickstart this i/o queue? */
+ if (list_empty(&ep->queue) && !ep->stopped) {
+ /* maybe there's no control data, just status ack */
+ if (ep->num == 0 && _req->length == 0) {
+ net2272_done(ep, req, 0);
+ dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
+ goto done;
+ }
+
+ /* Return zlp, don't let it block subsequent packets */
+ s = net2272_ep_read(ep, EP_STAT0);
+ if (s & (1 << BUFFER_EMPTY)) {
+ /* Buffer is empty check for a blocking zlp, handle it */
+ if ((s & (1 << NAK_OUT_PACKETS)) &&
+ net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
+ dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
+ /*
+ * Request is going to terminate with a short packet ...
+ * hope the client is ready for it!
+ */
+ status = net2272_read_fifo(ep, req);
+ /* clear short packet naking */
+ net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
+ goto done;
+ }
+ }
+
+ /* try dma first */
+ status = net2272_kick_dma(ep, req);
+
+ if (status < 0) {
+ /* dma failed (most likely in use by another endpoint)
+ * fallback to pio
+ */
+ status = 0;
+
+ if (ep->is_in)
+ status = net2272_write_fifo(ep, req);
+ else {
+ s = net2272_ep_read(ep, EP_STAT0);
+ if ((s & (1 << BUFFER_EMPTY)) == 0)
+ status = net2272_read_fifo(ep, req);
+ }
+
+ if (unlikely(status != 0)) {
+ if (status > 0)
+ status = 0;
+ req = NULL;
+ }
+ }
+ }
+ if (likely(req != 0))
+ list_add_tail(&req->queue, &ep->queue);
+
+ if (likely(!list_empty(&ep->queue)))
+ net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
+ done:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/* dequeue ALL requests */
+static void
+net2272_dequeue_all(struct net2272_ep *ep)
+{
+ struct net2272_request *req;
+
+ /* called with spinlock held */
+ ep->stopped = 1;
+
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request,
+ queue);
+ net2272_done(ep, req, -ESHUTDOWN);
+ }
+}
+
+/* dequeue JUST ONE request */
+static int
+net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct net2272_ep *ep;
+ struct net2272_request *req;
+ unsigned long flags;
+ int stopped;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0) || !_req)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ stopped = ep->stopped;
+ ep->stopped = 1;
+
+ /* make sure it's still queued on this endpoint */
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == _req)
+ break;
+ }
+ if (&req->req != _req) {
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return -EINVAL;
+ }
+
+ /* queue head may be partially complete */
+ if (ep->queue.next == &req->queue) {
+ dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
+ net2272_done(ep, req, -ECONNRESET);
+ }
+ req = NULL;
+ ep->stopped = stopped;
+
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
+{
+ struct net2272_ep *ep;
+ unsigned long flags;
+ int ret = 0;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return -EINVAL;
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+ if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->dev->lock, flags);
+ if (!list_empty(&ep->queue))
+ ret = -EAGAIN;
+ else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
+ ret = -EAGAIN;
+ else {
+ dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
+ value ? "set" : "clear",
+ wedged ? "wedge" : "halt");
+ /* set/clear */
+ if (value) {
+ if (ep->num == 0)
+ ep->dev->protocol_stall = 1;
+ else
+ set_halt(ep);
+ if (wedged)
+ ep->wedged = 1;
+ } else {
+ clear_halt(ep);
+ ep->wedged = 0;
+ }
+ }
+ spin_unlock_irqrestore(&ep->dev->lock, flags);
+
+ return ret;
+}
+
+static int
+net2272_set_halt(struct usb_ep *_ep, int value)
+{
+ return net2272_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int
+net2272_set_wedge(struct usb_ep *_ep)
+{
+ if (!_ep || _ep->name == ep0name)
+ return -EINVAL;
+ return net2272_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static int
+net2272_fifo_status(struct usb_ep *_ep)
+{
+ struct net2272_ep *ep;
+ u16 avail;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return -ENODEV;
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return -ESHUTDOWN;
+
+ avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
+ avail |= net2272_ep_read(ep, EP_AVAIL0);
+ if (avail > ep->fifo_size)
+ return -EOVERFLOW;
+ if (ep->is_in)
+ avail = ep->fifo_size - avail;
+ return avail;
+}
+
+static void
+net2272_fifo_flush(struct usb_ep *_ep)
+{
+ struct net2272_ep *ep;
+
+ ep = container_of(_ep, struct net2272_ep, ep);
+ if (!_ep || (!ep->desc && ep->num != 0))
+ return;
+ if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
+ return;
+
+ net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
+}
+
+static struct usb_ep_ops net2272_ep_ops = {
+ .enable = net2272_enable,
+ .disable = net2272_disable,
+
+ .alloc_request = net2272_alloc_request,
+ .free_request = net2272_free_request,
+
+ .queue = net2272_queue,
+ .dequeue = net2272_dequeue,
+
+ .set_halt = net2272_set_halt,
+ .set_wedge = net2272_set_wedge,
+ .fifo_status = net2272_fifo_status,
+ .fifo_flush = net2272_fifo_flush,
+};
+
+/*---------------------------------------------------------------------------*/
+
+static int
+net2272_get_frame(struct usb_gadget *_gadget)
+{
+ struct net2272 *dev;
+ unsigned long flags;
+ u16 ret;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct net2272, gadget);
+ spin_lock_irqsave(&dev->lock, flags);
+
+ ret = net2272_read(dev, FRAME1) << 8;
+ ret |= net2272_read(dev, FRAME0);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+}
+
+static int
+net2272_wakeup(struct usb_gadget *_gadget)
+{
+ struct net2272 *dev;
+ u8 tmp;
+ unsigned long flags;
+
+ if (!_gadget)
+ return 0;
+ dev = container_of(_gadget, struct net2272, gadget);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ tmp = net2272_read(dev, USBCTL0);
+ if (tmp & (1 << IO_WAKEUP_ENABLE))
+ net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+static int
+net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
+{
+ struct net2272 *dev;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct net2272, gadget);
+
+ dev->is_selfpowered = value;
+
+ return 0;
+}
+
+static int
+net2272_pullup(struct usb_gadget *_gadget, int is_on)
+{
+ struct net2272 *dev;
+ u8 tmp;
+ unsigned long flags;
+
+ if (!_gadget)
+ return -ENODEV;
+ dev = container_of(_gadget, struct net2272, gadget);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ tmp = net2272_read(dev, USBCTL0);
+ dev->softconnect = (is_on != 0);
+ if (is_on)
+ tmp |= (1 << USB_DETECT_ENABLE);
+ else
+ tmp &= ~(1 << USB_DETECT_ENABLE);
+ net2272_write(dev, USBCTL0, tmp);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+static int net2272_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int net2272_stop(struct usb_gadget_driver *driver);
+
+static const struct usb_gadget_ops net2272_ops = {
+ .get_frame = net2272_get_frame,
+ .wakeup = net2272_wakeup,
+ .set_selfpowered = net2272_set_selfpowered,
+ .pullup = net2272_pullup,
+ .start = net2272_start,
+ .stop = net2272_stop,
+};
+
+/*---------------------------------------------------------------------------*/
+
+static ssize_t
+net2272_show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ struct net2272 *dev;
+ char *next;
+ unsigned size, t;
+ unsigned long flags;
+ u8 t1, t2;
+ int i;
+ const char *s;
+
+ dev = dev_get_drvdata(_dev);
+ next = buf;
+ size = PAGE_SIZE;
+ spin_lock_irqsave(&dev->lock, flags);
+
+ if (dev->driver)
+ s = dev->driver->driver.name;
+ else
+ s = "(none)";
+
+ /* Main Control Registers */
+ t = scnprintf(next, size, "%s version %s,"
+ "chiprev %02x, locctl %02x\n"
+ "irqenb0 %02x irqenb1 %02x "
+ "irqstat0 %02x irqstat1 %02x\n",
+ driver_name, driver_vers, dev->chiprev,
+ net2272_read(dev, LOCCTL),
+ net2272_read(dev, IRQENB0),
+ net2272_read(dev, IRQENB1),
+ net2272_read(dev, IRQSTAT0),
+ net2272_read(dev, IRQSTAT1));
+ size -= t;
+ next += t;
+
+ /* DMA */
+ t1 = net2272_read(dev, DMAREQ);
+ t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
+ t1, ep_name[(t1 & 0x01) + 1],
+ t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
+ t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
+ t1 & (1 << DMA_REQUEST) ? "req " : "",
+ t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
+ size -= t;
+ next += t;
+
+ /* USB Control Registers */
+ t1 = net2272_read(dev, USBCTL1);
+ if (t1 & (1 << VBUS_PIN)) {
+ if (t1 & (1 << USB_HIGH_SPEED))
+ s = "high speed";
+ else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+ s = "powered";
+ else
+ s = "full speed";
+ } else
+ s = "not attached";
+ t = scnprintf(next, size,
+ "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
+ net2272_read(dev, USBCTL0), t1,
+ net2272_read(dev, OURADDR), s);
+ size -= t;
+ next += t;
+
+ /* Endpoint Registers */
+ for (i = 0; i < 4; ++i) {
+ struct net2272_ep *ep;
+
+ ep = &dev->ep[i];
+ if (i && !ep->desc)
+ continue;
+
+ t1 = net2272_ep_read(ep, EP_CFG);
+ t2 = net2272_ep_read(ep, EP_RSPSET);
+ t = scnprintf(next, size,
+ "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
+ "irqenb %02x\n",
+ ep->ep.name, t1, t2,
+ (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
+ (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
+ (t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
+ (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
+ (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
+ (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
+ (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
+ (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
+ net2272_ep_read(ep, EP_IRQENB));
+ size -= t;
+ next += t;
+
+ t = scnprintf(next, size,
+ "\tstat0 %02x stat1 %02x avail %04x "
+ "(ep%d%s-%s)%s\n",
+ net2272_ep_read(ep, EP_STAT0),
+ net2272_ep_read(ep, EP_STAT1),
+ (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
+ t1 & 0x0f,
+ ep->is_in ? "in" : "out",
+ type_string(t1 >> 5),
+ ep->stopped ? "*" : "");
+ size -= t;
+ next += t;
+
+ t = scnprintf(next, size,
+ "\tep_transfer %06x\n",
+ ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
+ ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
+ ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
+ size -= t;
+ next += t;
+
+ t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
+ t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
+ t = scnprintf(next, size,
+ "\tbuf-a %s buf-b %s\n",
+ buf_state_string(t1),
+ buf_state_string(t2));
+ size -= t;
+ next += t;
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return PAGE_SIZE - size;
+}
+static DEVICE_ATTR(registers, S_IRUGO, net2272_show_registers, NULL);
+
+/*---------------------------------------------------------------------------*/
+
+static void
+net2272_set_fifo_mode(struct net2272 *dev, int mode)
+{
+ u8 tmp;
+
+ tmp = net2272_read(dev, LOCCTL) & 0x3f;
+ tmp |= (mode << 6);
+ net2272_write(dev, LOCCTL, tmp);
+
+ INIT_LIST_HEAD(&dev->gadget.ep_list);
+
+ /* always ep-a, ep-c ... maybe not ep-b */
+ list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
+
+ switch (mode) {
+ case 0:
+ list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
+ dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
+ break;
+ case 1:
+ list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
+ dev->ep[1].fifo_size = 1024;
+ dev->ep[2].fifo_size = 512;
+ break;
+ case 2:
+ list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
+ dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
+ break;
+ case 3:
+ dev->ep[1].fifo_size = 1024;
+ break;
+ }
+
+ /* ep-c is always 2 512 byte buffers */
+ list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
+ dev->ep[3].fifo_size = 512;
+}
+
+/*---------------------------------------------------------------------------*/
+
+static struct net2272 *the_controller;
+
+static void
+net2272_usb_reset(struct net2272 *dev)
+{
+ dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ net2272_cancel_dma(dev);
+
+ net2272_write(dev, IRQENB0, 0);
+ net2272_write(dev, IRQENB1, 0);
+
+ /* clear irq state */
+ net2272_write(dev, IRQSTAT0, 0xff);
+ net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
+
+ net2272_write(dev, DMAREQ,
+ (0 << DMA_BUFFER_VALID) |
+ (0 << DMA_REQUEST_ENABLE) |
+ (1 << DMA_CONTROL_DACK) |
+ (dev->dma_eot_polarity << EOT_POLARITY) |
+ (dev->dma_dack_polarity << DACK_POLARITY) |
+ (dev->dma_dreq_polarity << DREQ_POLARITY) |
+ ((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
+
+ net2272_cancel_dma(dev);
+ net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
+
+ /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
+ * note that the higher level gadget drivers are expected to convert data to little endian.
+ * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
+ */
+ net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
+ net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
+}
+
+static void
+net2272_usb_reinit(struct net2272 *dev)
+{
+ int i;
+
+ /* basic endpoint init */
+ for (i = 0; i < 4; ++i) {
+ struct net2272_ep *ep = &dev->ep[i];
+
+ ep->ep.name = ep_name[i];
+ ep->dev = dev;
+ ep->num = i;
+ ep->not_empty = 0;
+
+ if (use_dma && ep->num == dma_ep)
+ ep->dma = 1;
+
+ if (i > 0 && i <= 3)
+ ep->fifo_size = 512;
+ else
+ ep->fifo_size = 64;
+ net2272_ep_reset(ep);
+ }
+ dev->ep[0].ep.maxpacket = 64;
+
+ dev->gadget.ep0 = &dev->ep[0].ep;
+ dev->ep[0].stopped = 0;
+ INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
+}
+
+static void
+net2272_ep0_start(struct net2272 *dev)
+{
+ struct net2272_ep *ep0 = &dev->ep[0];
+
+ net2272_ep_write(ep0, EP_RSPSET,
+ (1 << NAK_OUT_PACKETS_MODE) |
+ (1 << ALT_NAK_OUT_PACKETS));
+ net2272_ep_write(ep0, EP_RSPCLR,
+ (1 << HIDE_STATUS_PHASE) |
+ (1 << CONTROL_STATUS_PHASE_HANDSHAKE));
+ net2272_write(dev, USBCTL0,
+ (dev->softconnect << USB_DETECT_ENABLE) |
+ (1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
+ (1 << IO_WAKEUP_ENABLE));
+ net2272_write(dev, IRQENB0,
+ (1 << SETUP_PACKET_INTERRUPT_ENABLE) |
+ (1 << ENDPOINT_0_INTERRUPT_ENABLE) |
+ (1 << DMA_DONE_INTERRUPT_ENABLE));
+ net2272_write(dev, IRQENB1,
+ (1 << VBUS_INTERRUPT_ENABLE) |
+ (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
+ (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
+}
+
+/* when a driver is successfully registered, it will receive
+ * control requests including set_configuration(), which enables
+ * non-control requests. then usb traffic follows until a
+ * disconnect is reported. then a host may connect again, or
+ * the driver might get unbound.
+ */
+static int net2272_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *))
+{
+ struct net2272 *dev = the_controller;
+ int ret;
+ unsigned i;
+
+ if (!driver || !bind || !driver->unbind || !driver->setup ||
+ driver->speed != USB_SPEED_HIGH)
+ return -EINVAL;
+ if (!dev)
+ return -ENODEV;
+ if (dev->driver)
+ return -EBUSY;
+
+ for (i = 0; i < 4; ++i)
+ dev->ep[i].irqs = 0;
+ /* hook up the driver ... */
+ dev->softconnect = 1;
+ driver->driver.bus = NULL;
+ dev->driver = driver;
+ dev->gadget.dev.driver = &driver->driver;
+ ret = bind(&dev->gadget);
+ if (ret) {
+ dev_dbg(dev->dev, "bind to driver %s --> %d\n",
+ driver->driver.name, ret);
+ dev->driver = NULL;
+ dev->gadget.dev.driver = NULL;
+ return ret;
+ }
+
+ /* ... then enable host detection and ep0; and we're ready
+ * for set_configuration as well as eventual disconnect.
+ */
+ net2272_ep0_start(dev);
+
+ dev_dbg(dev->dev, "%s ready\n", driver->driver.name);
+
+ return 0;
+}
+
+static void
+stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
+{
+ int i;
+
+ /* don't disconnect if it's not connected */
+ if (dev->gadget.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+
+ /* stop hardware; prevent new request submissions;
+ * and kill any outstanding requests.
+ */
+ net2272_usb_reset(dev);
+ for (i = 0; i < 4; ++i)
+ net2272_dequeue_all(&dev->ep[i]);
+
+ /* report disconnect; the driver is already quiesced */
+ if (driver) {
+ spin_unlock(&dev->lock);
+ driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+
+ }
+ net2272_usb_reinit(dev);
+}
+
+static int net2272_stop(struct usb_gadget_driver *driver)
+{
+ struct net2272 *dev = the_controller;
+ unsigned long flags;
+
+ if (!dev)
+ return -ENODEV;
+ if (!driver || driver != dev->driver)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ stop_activity(dev, driver);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ net2272_pullup(&dev->gadget, 0);
+
+ driver->unbind(&dev->gadget);
+ dev->gadget.dev.driver = NULL;
+ dev->driver = NULL;
+
+ dev_dbg(dev->dev, "unregistered driver '%s'\n", driver->driver.name);
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+/* handle ep-a/ep-b dma completions */
+static void
+net2272_handle_dma(struct net2272_ep *ep)
+{
+ struct net2272_request *req;
+ unsigned len;
+ int status;
+
+ if (!list_empty(&ep->queue))
+ req = list_entry(ep->queue.next,
+ struct net2272_request, queue);
+ else
+ req = NULL;
+
+ dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
+
+ /* Ensure DREQ is de-asserted */
+ net2272_write(ep->dev, DMAREQ,
+ (0 << DMA_BUFFER_VALID)
+ | (0 << DMA_REQUEST_ENABLE)
+ | (1 << DMA_CONTROL_DACK)
+ | (ep->dev->dma_eot_polarity << EOT_POLARITY)
+ | (ep->dev->dma_dack_polarity << DACK_POLARITY)
+ | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
+ | ((ep->dma >> 1) << DMA_ENDPOINT_SELECT));
+
+ ep->dev->dma_busy = 0;
+
+ net2272_ep_write(ep, EP_IRQENB,
+ (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
+ | net2272_ep_read(ep, EP_IRQENB));
+
+ /* device-to-host transfer completed */
+ if (ep->is_in) {
+ /* validate a short packet or zlp if necessary */
+ if ((req->req.length % ep->ep.maxpacket != 0) ||
+ req->req.zero)
+ set_fifo_bytecount(ep, 0);
+
+ net2272_done(ep, req, 0);
+ if (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request, queue);
+ status = net2272_kick_dma(ep, req);
+ if (status < 0)
+ net2272_pio_advance(ep);
+ }
+
+ /* host-to-device transfer completed */
+ } else {
+ /* terminated with a short packet? */
+ if (net2272_read(ep->dev, IRQSTAT0) &
+ (1 << DMA_DONE_INTERRUPT)) {
+ /* abort system dma */
+ net2272_cancel_dma(ep->dev);
+ }
+
+ /* EP_TRANSFER will contain the number of bytes
+ * actually received.
+ * NOTE: There is no overflow detection on EP_TRANSFER:
+ * We can't deal with transfers larger than 2^24 bytes!
+ */
+ len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
+ | (net2272_ep_read(ep, EP_TRANSFER1) << 8)
+ | (net2272_ep_read(ep, EP_TRANSFER0));
+
+ if (ep->not_empty)
+ len += 4;
+
+ req->req.actual += len;
+
+ /* get any remaining data */
+ net2272_pio_advance(ep);
+ }
+}
+
+/*---------------------------------------------------------------------------*/
+
+static void
+net2272_handle_ep(struct net2272_ep *ep)
+{
+ struct net2272_request *req;
+ u8 stat0, stat1;
+
+ if (!list_empty(&ep->queue))
+ req = list_entry(ep->queue.next,
+ struct net2272_request, queue);
+ else
+ req = NULL;
+
+ /* ack all, and handle what we care about */
+ stat0 = net2272_ep_read(ep, EP_STAT0);
+ stat1 = net2272_ep_read(ep, EP_STAT1);
+ ep->irqs++;
+
+ dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
+ ep->ep.name, stat0, stat1, req ? &req->req : 0);
+
+ net2272_ep_write(ep, EP_STAT0, stat0 &
+ ~((1 << NAK_OUT_PACKETS)
+ | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
+ net2272_ep_write(ep, EP_STAT1, stat1);
+
+ /* data packet(s) received (in the fifo, OUT)
+ * direction must be validated, otherwise control read status phase
+ * could be interpreted as a valid packet
+ */
+ if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
+ net2272_pio_advance(ep);
+ /* data packet(s) transmitted (IN) */
+ else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
+ net2272_pio_advance(ep);
+}
+
+static struct net2272_ep *
+net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
+{
+ struct net2272_ep *ep;
+
+ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+ return &dev->ep[0];
+
+ list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
+ u8 bEndpointAddress;
+
+ if (!ep->desc)
+ continue;
+ bEndpointAddress = ep->desc->bEndpointAddress;
+ if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
+ continue;
+ if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
+ return ep;
+ }
+ return NULL;
+}
+
+/*
+ * USB Test Packet:
+ * JKJKJKJK * 9
+ * JJKKJJKK * 8
+ * JJJJKKKK * 8
+ * JJJJJJJKKKKKKK * 8
+ * JJJJJJJK * 8
+ * {JKKKKKKK * 10}, JK
+ */
+static const u8 net2272_test_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
+ 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
+ 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
+ 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
+};
+
+static void
+net2272_set_test_mode(struct net2272 *dev, int mode)
+{
+ int i;
+
+ /* Disable all net2272 interrupts:
+ * Nothing but a power cycle should stop the test.
+ */
+ net2272_write(dev, IRQENB0, 0x00);
+ net2272_write(dev, IRQENB1, 0x00);
+
+ /* Force tranceiver to high-speed */
+ net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
+
+ net2272_write(dev, PAGESEL, 0);
+ net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
+ net2272_write(dev, EP_RSPCLR,
+ (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
+ | (1 << HIDE_STATUS_PHASE));
+ net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
+ net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
+
+ /* wait for status phase to complete */
+ while (!(net2272_read(dev, EP_STAT0) &
+ (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
+ ;
+
+ /* Enable test mode */
+ net2272_write(dev, USBTEST, mode);
+
+ /* load test packet */
+ if (mode == TEST_PACKET) {
+ /* switch to 8 bit mode */
+ net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
+ ~(1 << DATA_WIDTH));
+
+ for (i = 0; i < sizeof(net2272_test_packet); ++i)
+ net2272_write(dev, EP_DATA, net2272_test_packet[i]);
+
+ /* Validate test packet */
+ net2272_write(dev, EP_TRANSFER0, 0);
+ }
+}
+
+static void
+net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
+{
+ struct net2272_ep *ep;
+ u8 num, scratch;
+
+ /* starting a control request? */
+ if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
+ union {
+ u8 raw[8];
+ struct usb_ctrlrequest r;
+ } u;
+ int tmp = 0;
+ struct net2272_request *req;
+
+ if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
+ dev->gadget.speed = USB_SPEED_HIGH;
+ else
+ dev->gadget.speed = USB_SPEED_FULL;
+ dev_dbg(dev->dev, "%s speed\n",
+ (dev->gadget.speed == USB_SPEED_HIGH) ? "high" : "full");
+ }
+
+ ep = &dev->ep[0];
+ ep->irqs++;
+
+ /* make sure any leftover interrupt state is cleared */
+ stat &= ~(1 << ENDPOINT_0_INTERRUPT);
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next,
+ struct net2272_request, queue);
+ net2272_done(ep, req,
+ (req->req.actual == req->req.length) ? 0 : -EPROTO);
+ }
+ ep->stopped = 0;
+ dev->protocol_stall = 0;
+ net2272_ep_write(ep, EP_STAT0,
+ (1 << DATA_IN_TOKEN_INTERRUPT)
+ | (1 << DATA_OUT_TOKEN_INTERRUPT)
+ | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
+ | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
+ | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
+ net2272_ep_write(ep, EP_STAT1,
+ (1 << TIMEOUT)
+ | (1 << USB_OUT_ACK_SENT)
+ | (1 << USB_OUT_NAK_SENT)
+ | (1 << USB_IN_ACK_RCVD)
+ | (1 << USB_IN_NAK_SENT)
+ | (1 << USB_STALL_SENT)
+ | (1 << LOCAL_OUT_ZLP));
+
+ /*
+ * Ensure Control Read pre-validation setting is beyond maximum size
+ * - Control Writes can leave non-zero values in EP_TRANSFER. If
+ * an EP0 transfer following the Control Write is a Control Read,
+ * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
+ * pre-validation count.
+ * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
+ * the pre-validation count cannot cause an unexpected validatation
+ */
+ net2272_write(dev, PAGESEL, 0);
+ net2272_write(dev, EP_TRANSFER2, 0xff);
+ net2272_write(dev, EP_TRANSFER1, 0xff);
+ net2272_write(dev, EP_TRANSFER0, 0xff);
+
+ u.raw[0] = net2272_read(dev, SETUP0);
+ u.raw[1] = net2272_read(dev, SETUP1);
+ u.raw[2] = net2272_read(dev, SETUP2);
+ u.raw[3] = net2272_read(dev, SETUP3);
+ u.raw[4] = net2272_read(dev, SETUP4);
+ u.raw[5] = net2272_read(dev, SETUP5);
+ u.raw[6] = net2272_read(dev, SETUP6);
+ u.raw[7] = net2272_read(dev, SETUP7);
+ /*
+ * If you have a big endian cpu make sure le16_to_cpus
+ * performs the proper byte swapping here...
+ */
+ le16_to_cpus(&u.r.wValue);
+ le16_to_cpus(&u.r.wIndex);
+ le16_to_cpus(&u.r.wLength);
+
+ /* ack the irq */
+ net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
+ stat ^= (1 << SETUP_PACKET_INTERRUPT);
+
+ /* watch control traffic at the token level, and force
+ * synchronization before letting the status phase happen.
+ */
+ ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
+ if (ep->is_in) {
+ scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
+ | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
+ | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
+ stop_out_naking(ep);
+ } else
+ scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
+ | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
+ | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
+ net2272_ep_write(ep, EP_IRQENB, scratch);
+
+ if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
+ goto delegate;
+ switch (u.r.bRequest) {
+ case USB_REQ_GET_STATUS: {
+ struct net2272_ep *e;
+ u16 status = 0;
+
+ switch (u.r.bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_ENDPOINT:
+ e = net2272_get_ep_by_addr(dev, u.r.wIndex);
+ if (!e || u.r.wLength > 2)
+ goto do_stall;
+ if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
+ status = __constant_cpu_to_le16(1);
+ else
+ status = __constant_cpu_to_le16(0);
+
+ /* don't bother with a request object! */
+ net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
+ writew(status, net2272_reg_addr(dev, EP_DATA));
+ set_fifo_bytecount(&dev->ep[0], 0);
+ allow_status(ep);
+ dev_vdbg(dev->dev, "%s stat %02x\n",
+ ep->ep.name, status);
+ goto next_endpoints;
+ case USB_RECIP_DEVICE:
+ if (u.r.wLength > 2)
+ goto do_stall;
+ if (dev->is_selfpowered)
+ status = (1 << USB_DEVICE_SELF_POWERED);
+
+ /* don't bother with a request object! */
+ net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
+ writew(status, net2272_reg_addr(dev, EP_DATA));
+ set_fifo_bytecount(&dev->ep[0], 0);
+ allow_status(ep);
+ dev_vdbg(dev->dev, "device stat %02x\n", status);
+ goto next_endpoints;
+ case USB_RECIP_INTERFACE:
+ if (u.r.wLength > 2)
+ goto do_stall;
+
+ /* don't bother with a request object! */
+ net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
+ writew(status, net2272_reg_addr(dev, EP_DATA));
+ set_fifo_bytecount(&dev->ep[0], 0);
+ allow_status(ep);
+ dev_vdbg(dev->dev, "interface status %02x\n", status);
+ goto next_endpoints;
+ }
+
+ break;
+ }
+ case USB_REQ_CLEAR_FEATURE: {
+ struct net2272_ep *e;
+
+ if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+ goto delegate;
+ if (u.r.wValue != USB_ENDPOINT_HALT ||
+ u.r.wLength != 0)
+ goto do_stall;
+ e = net2272_get_ep_by_addr(dev, u.r.wIndex);
+ if (!e)
+ goto do_stall;
+ if (e->wedged) {
+ dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
+ ep->ep.name);
+ } else {
+ dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
+ clear_halt(e);
+ }
+ allow_status(ep);
+ goto next_endpoints;
+ }
+ case USB_REQ_SET_FEATURE: {
+ struct net2272_ep *e;
+
+ if (u.r.bRequestType == USB_RECIP_DEVICE) {
+ if (u.r.wIndex != NORMAL_OPERATION)
+ net2272_set_test_mode(dev, (u.r.wIndex >> 8));
+ allow_status(ep);
+ dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
+ goto next_endpoints;
+ } else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
+ goto delegate;
+ if (u.r.wValue != USB_ENDPOINT_HALT ||
+ u.r.wLength != 0)
+ goto do_stall;
+ e = net2272_get_ep_by_addr(dev, u.r.wIndex);
+ if (!e)
+ goto do_stall;
+ set_halt(e);
+ allow_status(ep);
+ dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
+ goto next_endpoints;
+ }
+ case USB_REQ_SET_ADDRESS: {
+ net2272_write(dev, OURADDR, u.r.wValue & 0xff);
+ allow_status(ep);
+ break;
+ }
+ default:
+ delegate:
+ dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
+ "ep_cfg %08x\n",
+ u.r.bRequestType, u.r.bRequest,
+ u.r.wValue, u.r.wIndex,
+ net2272_ep_read(ep, EP_CFG));
+ spin_unlock(&dev->lock);
+ tmp = dev->driver->setup(&dev->gadget, &u.r);
+ spin_lock(&dev->lock);
+ }
+
+ /* stall ep0 on error */
+ if (tmp < 0) {
+ do_stall:
+ dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
+ u.r.bRequestType, u.r.bRequest, tmp);
+ dev->protocol_stall = 1;
+ }
+ /* endpoint dma irq? */
+ } else if (stat & (1 << DMA_DONE_INTERRUPT)) {
+ net2272_cancel_dma(dev);
+ net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
+ stat &= ~(1 << DMA_DONE_INTERRUPT);
+ num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
+ ? 2 : 1;
+
+ ep = &dev->ep[num];
+ net2272_handle_dma(ep);
+ }
+
+ next_endpoints:
+ /* endpoint data irq? */
+ scratch = stat & 0x0f;
+ stat &= ~0x0f;
+ for (num = 0; scratch; num++) {
+ u8 t;
+
+ /* does this endpoint's FIFO and queue need tending? */
+ t = 1 << num;
+ if ((scratch & t) == 0)
+ continue;
+ scratch ^= t;
+
+ ep = &dev->ep[num];
+ net2272_handle_ep(ep);
+ }
+
+ /* some interrupts we can just ignore */
+ stat &= ~(1 << SOF_INTERRUPT);
+
+ if (stat)
+ dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
+}
+
+static void
+net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
+{
+ u8 tmp, mask;
+
+ /* after disconnect there's nothing else to do! */
+ tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
+ mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
+
+ if (stat & tmp) {
+ net2272_write(dev, IRQSTAT1, tmp);
+ if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
+ ((net2272_read(dev, USBCTL1) & mask) == 0))
+ || ((net2272_read(dev, USBCTL1) & (1 << VBUS_PIN))
+ == 0))
+ && (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
+ dev_dbg(dev->dev, "disconnect %s\n",
+ dev->driver->driver.name);
+ stop_activity(dev, dev->driver);
+ net2272_ep0_start(dev);
+ return;
+ }
+ stat &= ~tmp;
+
+ if (!stat)
+ return;
+ }
+
+ tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
+ if (stat & tmp) {
+ net2272_write(dev, IRQSTAT1, tmp);
+ if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
+ if (dev->driver->suspend)
+ dev->driver->suspend(&dev->gadget);
+ if (!enable_suspend) {
+ stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
+ dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
+ }
+ } else {
+ if (dev->driver->resume)
+ dev->driver->resume(&dev->gadget);
+ }
+ stat &= ~tmp;
+ }
+
+ /* clear any other status/irqs */
+ if (stat)
+ net2272_write(dev, IRQSTAT1, stat);
+
+ /* some status we can just ignore */
+ stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
+ | (1 << SUSPEND_REQUEST_INTERRUPT)
+ | (1 << RESUME_INTERRUPT));
+ if (!stat)
+ return;
+ else
+ dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
+}
+
+static irqreturn_t net2272_irq(int irq, void *_dev)
+{
+ struct net2272 *dev = _dev;
+#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
+ u32 intcsr;
+#endif
+#if defined(PLX_PCI_RDK)
+ u8 dmareq;
+#endif
+ spin_lock(&dev->lock);
+#if defined(PLX_PCI_RDK)
+ intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
+
+ if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
+ writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+ net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
+ net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
+ intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
+ writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+ }
+ if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
+ writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
+ dev->rdk1.plx9054_base_addr + DMACSR0);
+
+ dmareq = net2272_read(dev, DMAREQ);
+ if (dmareq & 0x01)
+ net2272_handle_dma(&dev->ep[2]);
+ else
+ net2272_handle_dma(&dev->ep[1]);
+ }
+#endif
+#if defined(PLX_PCI_RDK2)
+ /* see if PCI int for us by checking irqstat */
+ intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
+ if (!intcsr & (1 << NET2272_PCI_IRQ))
+ return IRQ_NONE;
+ /* check dma interrupts */
+#endif
+ /* Platform/devcice interrupt handler */
+#if !defined(PLX_PCI_RDK)
+ net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
+ net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
+#endif
+ spin_unlock(&dev->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int net2272_present(struct net2272 *dev)
+{
+ /*
+ * Quick test to see if CPU can communicate properly with the NET2272.
+ * Verifies connection using writes and reads to write/read and
+ * read-only registers.
+ *
+ * This routine is strongly recommended especially during early bring-up
+ * of new hardware, however for designs that do not apply Power On System
+ * Tests (POST) it may discarded (or perhaps minimized).
+ */
+ unsigned int ii;
+ u8 val, refval;
+
+ /* Verify NET2272 write/read SCRATCH register can write and read */
+ refval = net2272_read(dev, SCRATCH);
+ for (ii = 0; ii < 0x100; ii += 7) {
+ net2272_write(dev, SCRATCH, ii);
+ val = net2272_read(dev, SCRATCH);
+ if (val != ii) {
+ dev_dbg(dev->dev,
+ "%s: write/read SCRATCH register test failed: "
+ "wrote:0x%2.2x, read:0x%2.2x\n",
+ __func__, ii, val);
+ return -EINVAL;
+ }
+ }
+ /* To be nice, we write the original SCRATCH value back: */
+ net2272_write(dev, SCRATCH, refval);
+
+ /* Verify NET2272 CHIPREV register is read-only: */
+ refval = net2272_read(dev, CHIPREV_2272);
+ for (ii = 0; ii < 0x100; ii += 7) {
+ net2272_write(dev, CHIPREV_2272, ii);
+ val = net2272_read(dev, CHIPREV_2272);
+ if (val != refval) {
+ dev_dbg(dev->dev,
+ "%s: write/read CHIPREV register test failed: "
+ "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
+ __func__, ii, val, refval);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Verify NET2272's "NET2270 legacy revision" register
+ * - NET2272 has two revision registers. The NET2270 legacy revision
+ * register should read the same value, regardless of the NET2272
+ * silicon revision. The legacy register applies to NET2270
+ * firmware being applied to the NET2272.
+ */
+ val = net2272_read(dev, CHIPREV_LEGACY);
+ if (val != NET2270_LEGACY_REV) {
+ /*
+ * Unexpected legacy revision value
+ * - Perhaps the chip is a NET2270?
+ */
+ dev_dbg(dev->dev,
+ "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
+ " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
+ __func__, NET2270_LEGACY_REV, val);
+ return -EINVAL;
+ }
+
+ /*
+ * Verify NET2272 silicon revision
+ * - This revision register is appropriate for the silicon version
+ * of the NET2272
+ */
+ val = net2272_read(dev, CHIPREV_2272);
+ switch (val) {
+ case CHIPREV_NET2272_R1:
+ /*
+ * NET2272 Rev 1 has DMA related errata:
+ * - Newer silicon (Rev 1A or better) required
+ */
+ dev_dbg(dev->dev,
+ "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
+ __func__);
+ break;
+ case CHIPREV_NET2272_R1A:
+ break;
+ default:
+ /* NET2272 silicon version *may* not work with this firmware */
+ dev_dbg(dev->dev,
+ "%s: unexpected silicon revision register value: "
+ " CHIPREV_2272: 0x%2.2x\n",
+ __func__, val);
+ /*
+ * Return Success, even though the chip rev is not an expected value
+ * - Older, pre-built firmware can attempt to operate on newer silicon
+ * - Often, new silicon is perfectly compatible
+ */
+ }
+
+ /* Success: NET2272 checks out OK */
+ return 0;
+}
+
+static void
+net2272_gadget_release(struct device *_dev)
+{
+ struct net2272 *dev = dev_get_drvdata(_dev);
+ kfree(dev);
+}
+
+/*---------------------------------------------------------------------------*/
+
+static void __devexit
+net2272_remove(struct net2272 *dev)
+{
+ usb_del_gadget_udc(&dev->gadget);
+
+ /* start with the driver above us */
+ if (dev->driver) {
+ /* should have been done already by driver model core */
+ dev_warn(dev->dev, "pci remove, driver '%s' is still registered\n",
+ dev->driver->driver.name);
+ usb_gadget_unregister_driver(dev->driver);
+ }
+
+ free_irq(dev->irq, dev);
+ iounmap(dev->base_addr);
+
+ device_unregister(&dev->gadget.dev);
+ device_remove_file(dev->dev, &dev_attr_registers);
+
+ dev_info(dev->dev, "unbind\n");
+ the_controller = NULL;
+}
+
+static struct net2272 * __devinit
+net2272_probe_init(struct device *dev, unsigned int irq)
+{
+ struct net2272 *ret;
+
+ if (the_controller) {
+ dev_warn(dev, "ignoring\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ if (!irq) {
+ dev_dbg(dev, "No IRQ!\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* alloc, and start init */
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&ret->lock);
+ ret->irq = irq;
+ ret->dev = dev;
+ ret->gadget.ops = &net2272_ops;
+ ret->gadget.is_dualspeed = 1;
+
+ /* the "gadget" abstracts/virtualizes the controller */
+ dev_set_name(&ret->gadget.dev, "gadget");
+ ret->gadget.dev.parent = dev;
+ ret->gadget.dev.dma_mask = dev->dma_mask;
+ ret->gadget.dev.release = net2272_gadget_release;
+ ret->gadget.name = driver_name;
+
+ return ret;
+}
+
+static int __devinit
+net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
+{
+ int ret;
+
+ /* See if there... */
+ if (net2272_present(dev)) {
+ dev_warn(dev->dev, "2272 not found!\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ net2272_usb_reset(dev);
+ net2272_usb_reinit(dev);
+
+ ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
+ if (ret) {
+ dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
+ goto err;
+ }
+
+ dev->chiprev = net2272_read(dev, CHIPREV_2272);
+
+ /* done */
+ dev_info(dev->dev, "%s\n", driver_desc);
+ dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
+ dev->irq, dev->base_addr, dev->chiprev,
+ dma_mode_string());
+ dev_info(dev->dev, "version: %s\n", driver_vers);
+
+ the_controller = dev;
+
+ ret = device_register(&dev->gadget.dev);
+ if (ret)
+ goto err_irq;
+ ret = device_create_file(dev->dev, &dev_attr_registers);
+ if (ret)
+ goto err_dev_reg;
+
+ ret = usb_add_gadget_udc(dev->dev, &dev->gadget);
+ if (ret)
+ goto err_add_udc;
+
+ return 0;
+
+err_add_udc:
+ device_remove_file(dev->dev, &dev_attr_registers);
+ err_dev_reg:
+ device_unregister(&dev->gadget.dev);
+ err_irq:
+ free_irq(dev->irq, dev);
+ err:
+ return ret;
+}
+
+#ifdef CONFIG_PCI
+
+/*
+ * wrap this driver around the specified device, but
+ * don't respond over USB until a gadget driver binds to us
+ */
+
+static int __devinit
+net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
+{
+ unsigned long resource, len, tmp;
+ void __iomem *mem_mapped_addr[4];
+ int ret, i;
+
+ /*
+ * BAR 0 holds PLX 9054 config registers
+ * BAR 1 is i/o memory; unused here
+ * BAR 2 holds EPLD config registers
+ * BAR 3 holds NET2272 registers
+ */
+
+ /* Find and map all address spaces */
+ for (i = 0; i < 4; ++i) {
+ if (i == 1)
+ continue; /* BAR1 unused */
+
+ resource = pci_resource_start(pdev, i);
+ len = pci_resource_len(pdev, i);
+
+ if (!request_mem_region(resource, len, driver_name)) {
+ dev_dbg(dev->dev, "controller already in use\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ mem_mapped_addr[i] = ioremap_nocache(resource, len);
+ if (mem_mapped_addr[i] == NULL) {
+ release_mem_region(resource, len);
+ dev_dbg(dev->dev, "can't map memory\n");
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+
+ dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
+ dev->rdk1.epld_base_addr = mem_mapped_addr[2];
+ dev->base_addr = mem_mapped_addr[3];
+
+ /* Set PLX 9054 bus width (16 bits) */
+ tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
+ writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
+ dev->rdk1.plx9054_base_addr + LBRD1);
+
+ /* Enable PLX 9054 Interrupts */
+ writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
+ (1 << PCI_INTERRUPT_ENABLE) |
+ (1 << LOCAL_INTERRUPT_INPUT_ENABLE),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+
+ writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
+ dev->rdk1.plx9054_base_addr + DMACSR0);
+
+ /* reset */
+ writeb((1 << EPLD_DMA_ENABLE) |
+ (1 << DMA_CTL_DACK) |
+ (1 << DMA_TIMEOUT_ENABLE) |
+ (1 << USER) |
+ (0 << MPX_MODE) |
+ (1 << BUSWIDTH) |
+ (1 << NET2272_RESET),
+ dev->base_addr + EPLD_IO_CONTROL_REGISTER);
+
+ mb();
+ writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
+ ~(1 << NET2272_RESET),
+ dev->base_addr + EPLD_IO_CONTROL_REGISTER);
+ udelay(200);
+
+ return 0;
+
+ err:
+ while (--i >= 0) {
+ iounmap(mem_mapped_addr[i]);
+ release_mem_region(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+ }
+
+ return ret;
+}
+
+static int __devinit
+net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
+{
+ unsigned long resource, len;
+ void __iomem *mem_mapped_addr[2];
+ int ret, i;
+
+ /*
+ * BAR 0 holds FGPA config registers
+ * BAR 1 holds NET2272 registers
+ */
+
+ /* Find and map all address spaces, bar2-3 unused in rdk 2 */
+ for (i = 0; i < 2; ++i) {
+ resource = pci_resource_start(pdev, i);
+ len = pci_resource_len(pdev, i);
+
+ if (!request_mem_region(resource, len, driver_name)) {
+ dev_dbg(dev->dev, "controller already in use\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ mem_mapped_addr[i] = ioremap_nocache(resource, len);
+ if (mem_mapped_addr[i] == NULL) {
+ release_mem_region(resource, len);
+ dev_dbg(dev->dev, "can't map memory\n");
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+
+ dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
+ dev->base_addr = mem_mapped_addr[1];
+
+ mb();
+ /* Set 2272 bus width (16 bits) and reset */
+ writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
+ udelay(200);
+ writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
+ /* Print fpga version number */
+ dev_info(dev->dev, "RDK2 FPGA version %08x\n",
+ readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
+ /* Enable FPGA Interrupts */
+ writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
+
+ return 0;
+
+ err:
+ while (--i >= 0) {
+ iounmap(mem_mapped_addr[i]);
+ release_mem_region(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+ }
+
+ return ret;
+}
+
+static int __devinit
+net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct net2272 *dev;
+ int ret;
+
+ dev = net2272_probe_init(&pdev->dev, pdev->irq);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ dev->dev_id = pdev->device;
+
+ if (pci_enable_device(pdev) < 0) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ pci_set_master(pdev);
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
+ case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
+ default: BUG();
+ }
+ if (ret)
+ goto err_pci;
+
+ ret = net2272_probe_fin(dev, 0);
+ if (ret)
+ goto err_pci;
+
+ pci_set_drvdata(pdev, dev);
+
+ return 0;
+
+ err_pci:
+ pci_disable_device(pdev);
+ err_free:
+ kfree(dev);
+
+ return ret;
+}
+
+static void __devexit
+net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
+{
+ int i;
+
+ /* disable PLX 9054 interrupts */
+ writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
+ ~(1 << PCI_INTERRUPT_ENABLE),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+
+ /* clean up resources allocated during probe() */
+ iounmap(dev->rdk1.plx9054_base_addr);
+ iounmap(dev->rdk1.epld_base_addr);
+
+ for (i = 0; i < 4; ++i) {
+ if (i == 1)
+ continue; /* BAR1 unused */
+ release_mem_region(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+ }
+}
+
+static void __devexit
+net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
+{
+ int i;
+
+ /* disable fpga interrupts
+ writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
+ ~(1 << PCI_INTERRUPT_ENABLE),
+ dev->rdk1.plx9054_base_addr + INTCSR);
+ */
+
+ /* clean up resources allocated during probe() */
+ iounmap(dev->rdk2.fpga_base_addr);
+
+ for (i = 0; i < 2; ++i)
+ release_mem_region(pci_resource_start(pdev, i),
+ pci_resource_len(pdev, i));
+}
+
+static void __devexit
+net2272_pci_remove(struct pci_dev *pdev)
+{
+ struct net2272 *dev = pci_get_drvdata(pdev);
+
+ net2272_remove(dev);
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
+ case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
+ default: BUG();
+ }
+
+ pci_disable_device(pdev);
+
+ kfree(dev);
+}
+
+/* Table of matching PCI IDs */
+static struct pci_device_id __devinitdata pci_ids[] = {
+ { /* RDK 1 card */
+ .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
+ .class_mask = 0,
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_RDK1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { /* RDK 2 card */
+ .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
+ .class_mask = 0,
+ .vendor = PCI_VENDOR_ID_PLX,
+ .device = PCI_DEVICE_ID_RDK2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver net2272_pci_driver = {
+ .name = driver_name,
+ .id_table = pci_ids,
+
+ .probe = net2272_pci_probe,
+ .remove = __devexit_p(net2272_pci_remove),
+};
+
+static int net2272_pci_register(void)
+{
+ return pci_register_driver(&net2272_pci_driver);
+}
+
+static void net2272_pci_unregister(void)
+{
+ pci_unregister_driver(&net2272_pci_driver);
+}
+
+#else
+static inline int net2272_pci_register(void) { return 0; }
+static inline void net2272_pci_unregister(void) { }
+#endif
+
+/*---------------------------------------------------------------------------*/
+
+static int __devinit
+net2272_plat_probe(struct platform_device *pdev)
+{
+ struct net2272 *dev;
+ int ret;
+ unsigned int irqflags;
+ resource_size_t base, len;
+ struct resource *iomem, *iomem_bus, *irq_res;
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
+ if (!irq_res || !iomem) {
+ dev_err(&pdev->dev, "must provide irq/base addr");
+ return -EINVAL;
+ }
+
+ dev = net2272_probe_init(&pdev->dev, irq_res->start);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ irqflags = 0;
+ if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
+ irqflags |= IRQF_TRIGGER_RISING;
+ if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
+ irqflags |= IRQF_TRIGGER_FALLING;
+ if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
+ irqflags |= IRQF_TRIGGER_HIGH;
+ if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
+ irqflags |= IRQF_TRIGGER_LOW;
+
+ base = iomem->start;
+ len = resource_size(iomem);
+ if (iomem_bus)
+ dev->base_shift = iomem_bus->start;
+
+ if (!request_mem_region(base, len, driver_name)) {
+ dev_dbg(dev->dev, "get request memory region!\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ dev->base_addr = ioremap_nocache(base, len);
+ if (!dev->base_addr) {
+ dev_dbg(dev->dev, "can't map memory\n");
+ ret = -EFAULT;
+ goto err_req;
+ }
+
+ ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
+ if (ret)
+ goto err_io;
+
+ platform_set_drvdata(pdev, dev);
+ dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
+ (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
+
+ the_controller = dev;
+
+ return 0;
+
+ err_io:
+ iounmap(dev->base_addr);
+ err_req:
+ release_mem_region(base, len);
+ err:
+ return ret;
+}
+
+static int __devexit
+net2272_plat_remove(struct platform_device *pdev)
+{
+ struct net2272 *dev = platform_get_drvdata(pdev);
+
+ net2272_remove(dev);
+
+ release_mem_region(pdev->resource[0].start,
+ resource_size(&pdev->resource[0]));
+
+ kfree(dev);
+
+ return 0;
+}
+
+static struct platform_driver net2272_plat_driver = {
+ .probe = net2272_plat_probe,
+ .remove = __devexit_p(net2272_plat_remove),
+ .driver = {
+ .name = driver_name,
+ .owner = THIS_MODULE,
+ },
+ /* FIXME .suspend, .resume */
+};
+MODULE_ALIAS("platform:net2272");
+
+static int __init net2272_init(void)
+{
+ int ret;
+
+ ret = net2272_pci_register();
+ if (ret)
+ return ret;
+ ret = platform_driver_register(&net2272_plat_driver);
+ if (ret)
+ goto err_pci;
+ return ret;
+
+err_pci:
+ net2272_pci_unregister();
+ return ret;
+}
+module_init(net2272_init);
+
+static void __exit net2272_cleanup(void)
+{
+ net2272_pci_unregister();
+ platform_driver_unregister(&net2272_plat_driver);
+}
+module_exit(net2272_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("PLX Technology, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/net2272.h b/drivers/usb/gadget/net2272.h
new file mode 100644
index 00000000000..e5950578935
--- /dev/null
+++ b/drivers/usb/gadget/net2272.h
@@ -0,0 +1,601 @@
+/*
+ * PLX NET2272 high/full speed USB device controller
+ *
+ * Copyright (C) 2005-2006 PLX Technology, Inc.
+ * Copyright (C) 2006-2011 Analog Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __NET2272_H__
+#define __NET2272_H__
+
+/* Main Registers */
+#define REGADDRPTR 0x00
+#define REGDATA 0x01
+#define IRQSTAT0 0x02
+#define ENDPOINT_0_INTERRUPT 0
+#define ENDPOINT_A_INTERRUPT 1
+#define ENDPOINT_B_INTERRUPT 2
+#define ENDPOINT_C_INTERRUPT 3
+#define VIRTUALIZED_ENDPOINT_INTERRUPT 4
+#define SETUP_PACKET_INTERRUPT 5
+#define DMA_DONE_INTERRUPT 6
+#define SOF_INTERRUPT 7
+#define IRQSTAT1 0x03
+#define CONTROL_STATUS_INTERRUPT 1
+#define VBUS_INTERRUPT 2
+#define SUSPEND_REQUEST_INTERRUPT 3
+#define SUSPEND_REQUEST_CHANGE_INTERRUPT 4
+#define RESUME_INTERRUPT 5
+#define ROOT_PORT_RESET_INTERRUPT 6
+#define RESET_STATUS 7
+#define PAGESEL 0x04
+#define DMAREQ 0x1c
+#define DMA_ENDPOINT_SELECT 0
+#define DREQ_POLARITY 1
+#define DACK_POLARITY 2
+#define EOT_POLARITY 3
+#define DMA_CONTROL_DACK 4
+#define DMA_REQUEST_ENABLE 5
+#define DMA_REQUEST 6
+#define DMA_BUFFER_VALID 7
+#define SCRATCH 0x1d
+#define IRQENB0 0x20
+#define ENDPOINT_0_INTERRUPT_ENABLE 0
+#define ENDPOINT_A_INTERRUPT_ENABLE 1
+#define ENDPOINT_B_INTERRUPT_ENABLE 2
+#define ENDPOINT_C_INTERRUPT_ENABLE 3
+#define VIRTUALIZED_ENDPOINT_INTERRUPT_ENABLE 4
+#define SETUP_PACKET_INTERRUPT_ENABLE 5
+#define DMA_DONE_INTERRUPT_ENABLE 6
+#define SOF_INTERRUPT_ENABLE 7
+#define IRQENB1 0x21
+#define VBUS_INTERRUPT_ENABLE 2
+#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
+#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 4
+#define RESUME_INTERRUPT_ENABLE 5
+#define ROOT_PORT_RESET_INTERRUPT_ENABLE 6
+#define LOCCTL 0x22
+#define DATA_WIDTH 0
+#define LOCAL_CLOCK_OUTPUT 1
+#define LOCAL_CLOCK_OUTPUT_OFF 0
+#define LOCAL_CLOCK_OUTPUT_3_75MHZ 1
+#define LOCAL_CLOCK_OUTPUT_7_5MHZ 2
+#define LOCAL_CLOCK_OUTPUT_15MHZ 3
+#define LOCAL_CLOCK_OUTPUT_30MHZ 4
+#define LOCAL_CLOCK_OUTPUT_60MHZ 5
+#define DMA_SPLIT_BUS_MODE 4
+#define BYTE_SWAP 5
+#define BUFFER_CONFIGURATION 6
+#define BUFFER_CONFIGURATION_EPA512_EPB512 0
+#define BUFFER_CONFIGURATION_EPA1024_EPB512 1
+#define BUFFER_CONFIGURATION_EPA1024_EPB1024 2
+#define BUFFER_CONFIGURATION_EPA1024DB 3
+#define CHIPREV_LEGACY 0x23
+#define NET2270_LEGACY_REV 0x40
+#define LOCCTL1 0x24
+#define DMA_MODE 0
+#define SLOW_DREQ 0
+#define FAST_DREQ 1
+#define BURST_MODE 2
+#define DMA_DACK_ENABLE 2
+#define CHIPREV_2272 0x25
+#define CHIPREV_NET2272_R1 0x10
+#define CHIPREV_NET2272_R1A 0x11
+/* USB Registers */
+#define USBCTL0 0x18
+#define IO_WAKEUP_ENABLE 1
+#define USB_DETECT_ENABLE 3
+#define USB_ROOT_PORT_WAKEUP_ENABLE 5
+#define USBCTL1 0x19
+#define VBUS_PIN 0
+#define USB_FULL_SPEED 1
+#define USB_HIGH_SPEED 2
+#define GENERATE_RESUME 3
+#define VIRTUAL_ENDPOINT_ENABLE 4
+#define FRAME0 0x1a
+#define FRAME1 0x1b
+#define OURADDR 0x30
+#define FORCE_IMMEDIATE 7
+#define USBDIAG 0x31
+#define FORCE_TRANSMIT_CRC_ERROR 0
+#define PREVENT_TRANSMIT_BIT_STUFF 1
+#define FORCE_RECEIVE_ERROR 2
+#define FAST_TIMES 4
+#define USBTEST 0x32
+#define TEST_MODE_SELECT 0
+#define NORMAL_OPERATION 0
+#define TEST_J 1
+#define TEST_K 2
+#define TEST_SE0_NAK 3
+#define TEST_PACKET 4
+#define TEST_FORCE_ENABLE 5
+#define XCVRDIAG 0x33
+#define FORCE_FULL_SPEED 2
+#define FORCE_HIGH_SPEED 3
+#define OPMODE 4
+#define NORMAL_OPERATION 0
+#define NON_DRIVING 1
+#define DISABLE_BITSTUFF_AND_NRZI_ENCODE 2
+#define LINESTATE 6
+#define SE0_STATE 0
+#define J_STATE 1
+#define K_STATE 2
+#define SE1_STATE 3
+#define VIRTOUT0 0x34
+#define VIRTOUT1 0x35
+#define VIRTIN0 0x36
+#define VIRTIN1 0x37
+#define SETUP0 0x40
+#define SETUP1 0x41
+#define SETUP2 0x42
+#define SETUP3 0x43
+#define SETUP4 0x44
+#define SETUP5 0x45
+#define SETUP6 0x46
+#define SETUP7 0x47
+/* Endpoint Registers (Paged via PAGESEL) */
+#define EP_DATA 0x05
+#define EP_STAT0 0x06
+#define DATA_IN_TOKEN_INTERRUPT 0
+#define DATA_OUT_TOKEN_INTERRUPT 1
+#define DATA_PACKET_TRANSMITTED_INTERRUPT 2
+#define DATA_PACKET_RECEIVED_INTERRUPT 3
+#define SHORT_PACKET_TRANSFERRED_INTERRUPT 4
+#define NAK_OUT_PACKETS 5
+#define BUFFER_EMPTY 6
+#define BUFFER_FULL 7
+#define EP_STAT1 0x07
+#define TIMEOUT 0
+#define USB_OUT_ACK_SENT 1
+#define USB_OUT_NAK_SENT 2
+#define USB_IN_ACK_RCVD 3
+#define USB_IN_NAK_SENT 4
+#define USB_STALL_SENT 5
+#define LOCAL_OUT_ZLP 6
+#define BUFFER_FLUSH 7
+#define EP_TRANSFER0 0x08
+#define EP_TRANSFER1 0x09
+#define EP_TRANSFER2 0x0a
+#define EP_IRQENB 0x0b
+#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0
+#define DATA_OUT_TOKEN_INTERRUPT_ENABLE 1
+#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2
+#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3
+#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 4
+#define EP_AVAIL0 0x0c
+#define EP_AVAIL1 0x0d
+#define EP_RSPCLR 0x0e
+#define EP_RSPSET 0x0f
+#define ENDPOINT_HALT 0
+#define ENDPOINT_TOGGLE 1
+#define NAK_OUT_PACKETS_MODE 2
+#define CONTROL_STATUS_PHASE_HANDSHAKE 3
+#define INTERRUPT_MODE 4
+#define AUTOVALIDATE 5
+#define HIDE_STATUS_PHASE 6
+#define ALT_NAK_OUT_PACKETS 7
+#define EP_MAXPKT0 0x28
+#define EP_MAXPKT1 0x29
+#define ADDITIONAL_TRANSACTION_OPPORTUNITIES 3
+#define NONE_ADDITIONAL_TRANSACTION 0
+#define ONE_ADDITIONAL_TRANSACTION 1
+#define TWO_ADDITIONAL_TRANSACTION 2
+#define EP_CFG 0x2a
+#define ENDPOINT_NUMBER 0
+#define ENDPOINT_DIRECTION 4
+#define ENDPOINT_TYPE 5
+#define ENDPOINT_ENABLE 7
+#define EP_HBW 0x2b
+#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 0
+#define DATA0_PID 0
+#define DATA1_PID 1
+#define DATA2_PID 2
+#define MDATA_PID 3
+#define EP_BUFF_STATES 0x2c
+#define BUFFER_A_STATE 0
+#define BUFFER_B_STATE 2
+#define BUFF_FREE 0
+#define BUFF_VALID 1
+#define BUFF_LCL 2
+#define BUFF_USB 3
+
+/*---------------------------------------------------------------------------*/
+
+#define PCI_DEVICE_ID_RDK1 0x9054
+
+/* PCI-RDK EPLD Registers */
+#define RDK_EPLD_IO_REGISTER1 0x00000000
+#define RDK_EPLD_USB_RESET 0
+#define RDK_EPLD_USB_POWERDOWN 1
+#define RDK_EPLD_USB_WAKEUP 2
+#define RDK_EPLD_USB_EOT 3
+#define RDK_EPLD_DPPULL 4
+#define RDK_EPLD_IO_REGISTER2 0x00000004
+#define RDK_EPLD_BUSWIDTH 0
+#define RDK_EPLD_USER 2
+#define RDK_EPLD_RESET_INTERRUPT_ENABLE 3
+#define RDK_EPLD_DMA_TIMEOUT_ENABLE 4
+#define RDK_EPLD_STATUS_REGISTER 0x00000008
+#define RDK_EPLD_USB_LRESET 0
+#define RDK_EPLD_REVISION_REGISTER 0x0000000c
+
+/* PCI-RDK PLX 9054 Registers */
+#define INTCSR 0x68
+#define PCI_INTERRUPT_ENABLE 8
+#define LOCAL_INTERRUPT_INPUT_ENABLE 11
+#define LOCAL_INPUT_INTERRUPT_ACTIVE 15
+#define LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE 18
+#define LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE 19
+#define DMA_CHANNEL_0_INTERRUPT_ACTIVE 21
+#define DMA_CHANNEL_1_INTERRUPT_ACTIVE 22
+#define CNTRL 0x6C
+#define RELOAD_CONFIGURATION_REGISTERS 29
+#define PCI_ADAPTER_SOFTWARE_RESET 30
+#define DMAMODE0 0x80
+#define LOCAL_BUS_WIDTH 0
+#define INTERNAL_WAIT_STATES 2
+#define TA_READY_INPUT_ENABLE 6
+#define LOCAL_BURST_ENABLE 8
+#define SCATTER_GATHER_MODE 9
+#define DONE_INTERRUPT_ENABLE 10
+#define LOCAL_ADDRESSING_MODE 11
+#define DEMAND_MODE 12
+#define DMA_EOT_ENABLE 14
+#define FAST_SLOW_TERMINATE_MODE_SELECT 15
+#define DMA_CHANNEL_INTERRUPT_SELECT 17
+#define DMAPADR0 0x84
+#define DMALADR0 0x88
+#define DMASIZ0 0x8c
+#define DMADPR0 0x90
+#define DESCRIPTOR_LOCATION 0
+#define END_OF_CHAIN 1
+#define INTERRUPT_AFTER_TERMINAL_COUNT 2
+#define DIRECTION_OF_TRANSFER 3
+#define DMACSR0 0xa8
+#define CHANNEL_ENABLE 0
+#define CHANNEL_START 1
+#define CHANNEL_ABORT 2
+#define CHANNEL_CLEAR_INTERRUPT 3
+#define CHANNEL_DONE 4
+#define DMATHR 0xb0
+#define LBRD1 0xf8
+#define MEMORY_SPACE_LOCAL_BUS_WIDTH 0
+#define W8_BIT 0
+#define W16_BIT 1
+
+/* Special OR'ing of INTCSR bits */
+#define LOCAL_INTERRUPT_TEST \
+ ((1 << LOCAL_INPUT_INTERRUPT_ACTIVE) | \
+ (1 << LOCAL_INTERRUPT_INPUT_ENABLE))
+
+#define DMA_CHANNEL_0_TEST \
+ ((1 << DMA_CHANNEL_0_INTERRUPT_ACTIVE) | \
+ (1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE))
+
+#define DMA_CHANNEL_1_TEST \
+ ((1 << DMA_CHANNEL_1_INTERRUPT_ACTIVE) | \
+ (1 << LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE))
+
+/* EPLD Registers */
+#define RDK_EPLD_IO_REGISTER1 0x00000000
+#define RDK_EPLD_USB_RESET 0
+#define RDK_EPLD_USB_POWERDOWN 1
+#define RDK_EPLD_USB_WAKEUP 2
+#define RDK_EPLD_USB_EOT 3
+#define RDK_EPLD_DPPULL 4
+#define RDK_EPLD_IO_REGISTER2 0x00000004
+#define RDK_EPLD_BUSWIDTH 0
+#define RDK_EPLD_USER 2
+#define RDK_EPLD_RESET_INTERRUPT_ENABLE 3
+#define RDK_EPLD_DMA_TIMEOUT_ENABLE 4
+#define RDK_EPLD_STATUS_REGISTER 0x00000008
+#define RDK_EPLD_USB_LRESET 0
+#define RDK_EPLD_REVISION_REGISTER 0x0000000c
+
+#define EPLD_IO_CONTROL_REGISTER 0x400
+#define NET2272_RESET 0
+#define BUSWIDTH 1
+#define MPX_MODE 3
+#define USER 4
+#define DMA_TIMEOUT_ENABLE 5
+#define DMA_CTL_DACK 6
+#define EPLD_DMA_ENABLE 7
+#define EPLD_DMA_CONTROL_REGISTER 0x800
+#define SPLIT_DMA_MODE 0
+#define SPLIT_DMA_DIRECTION 1
+#define SPLIT_DMA_ENABLE 2
+#define SPLIT_DMA_INTERRUPT_ENABLE 3
+#define SPLIT_DMA_INTERRUPT 4
+#define EPLD_DMA_MODE 5
+#define EPLD_DMA_CONTROLLER_ENABLE 7
+#define SPLIT_DMA_ADDRESS_LOW 0xc00
+#define SPLIT_DMA_ADDRESS_HIGH 0x1000
+#define SPLIT_DMA_BYTE_COUNT_LOW 0x1400
+#define SPLIT_DMA_BYTE_COUNT_HIGH 0x1800
+#define EPLD_REVISION_REGISTER 0x1c00
+#define SPLIT_DMA_RAM 0x4000
+#define DMA_RAM_SIZE 0x1000
+
+/*---------------------------------------------------------------------------*/
+
+#define PCI_DEVICE_ID_RDK2 0x3272
+
+/* PCI-RDK version 2 registers */
+
+/* Main Control Registers */
+
+#define RDK2_IRQENB 0x00
+#define RDK2_IRQSTAT 0x04
+#define PB7 23
+#define PB6 22
+#define PB5 21
+#define PB4 20
+#define PB3 19
+#define PB2 18
+#define PB1 17
+#define PB0 16
+#define GP3 23
+#define GP2 23
+#define GP1 23
+#define GP0 23
+#define DMA_RETRY_ABORT 6
+#define DMA_PAUSE_DONE 5
+#define DMA_ABORT_DONE 4
+#define DMA_OUT_FIFO_TRANSFER_DONE 3
+#define DMA_LOCAL_DONE 2
+#define DMA_PCI_DONE 1
+#define NET2272_PCI_IRQ 0
+
+#define RDK2_LOCCTLRDK 0x08
+#define CHIP_RESET 3
+#define SPLIT_DMA 2
+#define MULTIPLEX_MODE 1
+#define BUS_WIDTH 0
+
+#define RDK2_GPIOCTL 0x10
+#define GP3_OUT_ENABLE 7
+#define GP2_OUT_ENABLE 6
+#define GP1_OUT_ENABLE 5
+#define GP0_OUT_ENABLE 4
+#define GP3_DATA 3
+#define GP2_DATA 2
+#define GP1_DATA 1
+#define GP0_DATA 0
+
+#define RDK2_LEDSW 0x14
+#define LED3 27
+#define LED2 26
+#define LED1 25
+#define LED0 24
+#define PBUTTON 16
+#define DIPSW 0
+
+#define RDK2_DIAG 0x18
+#define RDK2_FAST_TIMES 2
+#define FORCE_PCI_SERR 1
+#define FORCE_PCI_INT 0
+#define RDK2_FPGAREV 0x1C
+
+/* Dma Control registers */
+#define RDK2_DMACTL 0x80
+#define ADDR_HOLD 24
+#define RETRY_COUNT 16 /* 23:16 */
+#define FIFO_THRESHOLD 11 /* 15:11 */
+#define MEM_WRITE_INVALIDATE 10
+#define READ_MULTIPLE 9
+#define READ_LINE 8
+#define RDK2_DMA_MODE 6 /* 7:6 */
+#define CONTROL_DACK 5
+#define EOT_ENABLE 4
+#define EOT_POLARITY 3
+#define DACK_POLARITY 2
+#define DREQ_POLARITY 1
+#define DMA_ENABLE 0
+
+#define RDK2_DMASTAT 0x84
+#define GATHER_COUNT 12 /* 14:12 */
+#define FIFO_COUNT 6 /* 11:6 */
+#define FIFO_FLUSH 5
+#define FIFO_TRANSFER 4
+#define PAUSE_DONE 3
+#define ABORT_DONE 2
+#define DMA_ABORT 1
+#define DMA_START 0
+
+#define RDK2_DMAPCICOUNT 0x88
+#define DMA_DIRECTION 31
+#define DMA_PCI_BYTE_COUNT 0 /* 0:23 */
+
+#define RDK2_DMALOCCOUNT 0x8C /* 0:23 dma local byte count */
+
+#define RDK2_DMAADDR 0x90 /* 2:31 PCI bus starting address */
+
+/*---------------------------------------------------------------------------*/
+
+#define REG_INDEXED_THRESHOLD (1 << 5)
+
+/* DRIVER DATA STRUCTURES and UTILITIES */
+struct net2272_ep {
+ struct usb_ep ep;
+ struct net2272 *dev;
+ unsigned long irqs;
+
+ /* analogous to a host-side qh */
+ struct list_head queue;
+ const struct usb_endpoint_descriptor *desc;
+ unsigned num:8,
+ fifo_size:12,
+ stopped:1,
+ wedged:1,
+ is_in:1,
+ is_iso:1,
+ dma:1,
+ not_empty:1;
+};
+
+struct net2272 {
+ /* each device provides one gadget, several endpoints */
+ struct usb_gadget gadget;
+ struct device *dev;
+ unsigned short dev_id;
+
+ spinlock_t lock;
+ struct net2272_ep ep[4];
+ struct usb_gadget_driver *driver;
+ unsigned protocol_stall:1,
+ softconnect:1,
+ is_selfpowered:1,
+ wakeup:1,
+ dma_eot_polarity:1,
+ dma_dack_polarity:1,
+ dma_dreq_polarity:1,
+ dma_busy:1;
+ u16 chiprev;
+ u8 pagesel;
+
+ unsigned int irq;
+ unsigned short fifo_mode;
+
+ unsigned int base_shift;
+ u16 __iomem *base_addr;
+ union {
+#ifdef CONFIG_PCI
+ struct {
+ void __iomem *plx9054_base_addr;
+ void __iomem *epld_base_addr;
+ } rdk1;
+ struct {
+ /* Bar0, Bar1 is base_addr both mem-mapped */
+ void __iomem *fpga_base_addr;
+ } rdk2;
+#endif
+ };
+};
+
+static void __iomem *
+net2272_reg_addr(struct net2272 *dev, unsigned int reg)
+{
+ return dev->base_addr + (reg << dev->base_shift);
+}
+
+static void
+net2272_write(struct net2272 *dev, unsigned int reg, u8 value)
+{
+ if (reg >= REG_INDEXED_THRESHOLD) {
+ /*
+ * Indexed register; use REGADDRPTR/REGDATA
+ * - Save and restore REGADDRPTR. This prevents REGADDRPTR from
+ * changes between other code sections, but it is time consuming.
+ * - Performance tips: either do not save and restore REGADDRPTR (if it
+ * is safe) or do save/restore operations only in critical sections.
+ u8 tmp = readb(dev->base_addr + REGADDRPTR);
+ */
+ writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR));
+ writeb(value, net2272_reg_addr(dev, REGDATA));
+ /* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */
+ } else
+ writeb(value, net2272_reg_addr(dev, reg));
+}
+
+static u8
+net2272_read(struct net2272 *dev, unsigned int reg)
+{
+ u8 ret;
+
+ if (reg >= REG_INDEXED_THRESHOLD) {
+ /*
+ * Indexed register; use REGADDRPTR/REGDATA
+ * - Save and restore REGADDRPTR. This prevents REGADDRPTR from
+ * changes between other code sections, but it is time consuming.
+ * - Performance tips: either do not save and restore REGADDRPTR (if it
+ * is safe) or do save/restore operations only in critical sections.
+ u8 tmp = readb(dev->base_addr + REGADDRPTR);
+ */
+ writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR));
+ ret = readb(net2272_reg_addr(dev, REGDATA));
+ /* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */
+ } else
+ ret = readb(net2272_reg_addr(dev, reg));
+
+ return ret;
+}
+
+static void
+net2272_ep_write(struct net2272_ep *ep, unsigned int reg, u8 value)
+{
+ struct net2272 *dev = ep->dev;
+
+ if (dev->pagesel != ep->num) {
+ net2272_write(dev, PAGESEL, ep->num);
+ dev->pagesel = ep->num;
+ }
+ net2272_write(dev, reg, value);
+}
+
+static u8
+net2272_ep_read(struct net2272_ep *ep, unsigned int reg)
+{
+ struct net2272 *dev = ep->dev;
+
+ if (dev->pagesel != ep->num) {
+ net2272_write(dev, PAGESEL, ep->num);
+ dev->pagesel = ep->num;
+ }
+ return net2272_read(dev, reg);
+}
+
+static void allow_status(struct net2272_ep *ep)
+{
+ /* ep0 only */
+ net2272_ep_write(ep, EP_RSPCLR,
+ (1 << CONTROL_STATUS_PHASE_HANDSHAKE) |
+ (1 << ALT_NAK_OUT_PACKETS) |
+ (1 << NAK_OUT_PACKETS_MODE));
+ ep->stopped = 1;
+}
+
+static void set_halt(struct net2272_ep *ep)
+{
+ /* ep0 and bulk/intr endpoints */
+ net2272_ep_write(ep, EP_RSPCLR, 1 << CONTROL_STATUS_PHASE_HANDSHAKE);
+ net2272_ep_write(ep, EP_RSPSET, 1 << ENDPOINT_HALT);
+}
+
+static void clear_halt(struct net2272_ep *ep)
+{
+ /* ep0 and bulk/intr endpoints */
+ net2272_ep_write(ep, EP_RSPCLR,
+ (1 << ENDPOINT_HALT) | (1 << ENDPOINT_TOGGLE));
+}
+
+/* count (<= 4) bytes in the next fifo write will be valid */
+static void set_fifo_bytecount(struct net2272_ep *ep, unsigned count)
+{
+ /* net2272_ep_write will truncate to u8 for us */
+ net2272_ep_write(ep, EP_TRANSFER2, count >> 16);
+ net2272_ep_write(ep, EP_TRANSFER1, count >> 8);
+ net2272_ep_write(ep, EP_TRANSFER0, count);
+}
+
+struct net2272_request {
+ struct usb_request req;
+ struct list_head queue;
+ unsigned mapped:1,
+ valid:1;
+};
+
+#endif
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 476d88e1ae9..3dd40b4e675 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1410,11 +1410,17 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
return 0;
}
+static int net2280_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int net2280_stop(struct usb_gadget_driver *driver);
+
static const struct usb_gadget_ops net2280_ops = {
.get_frame = net2280_get_frame,
.wakeup = net2280_wakeup,
.set_selfpowered = net2280_set_selfpowered,
.pullup = net2280_pullup,
+ .start = net2280_start,
+ .stop = net2280_stop,
};
/*-------------------------------------------------------------------------*/
@@ -1738,62 +1744,6 @@ static void set_fifo_mode (struct net2280 *dev, int mode)
list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
}
-/* just declare this in any driver that really need it */
-extern int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode);
-
-/**
- * net2280_set_fifo_mode - change allocation of fifo buffers
- * @gadget: access to the net2280 device that will be updated
- * @mode: 0 for default, four 1kB buffers (ep-a through ep-d);
- * 1 for two 2kB buffers (ep-a and ep-b only);
- * 2 for one 2kB buffer (ep-a) and two 1kB ones (ep-b, ep-c).
- *
- * returns zero on success, else negative errno. when this succeeds,
- * the contents of gadget->ep_list may have changed.
- *
- * you may only call this function when endpoints a-d are all disabled.
- * use it whenever extra hardware buffering can help performance, such
- * as before enabling "high bandwidth" interrupt endpoints that use
- * maxpacket bigger than 512 (when double buffering would otherwise
- * be unavailable).
- */
-int net2280_set_fifo_mode (struct usb_gadget *gadget, int mode)
-{
- int i;
- struct net2280 *dev;
- int status = 0;
- unsigned long flags;
-
- if (!gadget)
- return -ENODEV;
- dev = container_of (gadget, struct net2280, gadget);
-
- spin_lock_irqsave (&dev->lock, flags);
-
- for (i = 1; i <= 4; i++)
- if (dev->ep [i].desc) {
- status = -EINVAL;
- break;
- }
- if (mode < 0 || mode > 2)
- status = -EINVAL;
- if (status == 0)
- set_fifo_mode (dev, mode);
- spin_unlock_irqrestore (&dev->lock, flags);
-
- if (status == 0) {
- if (mode == 1)
- DEBUG (dev, "fifo: ep-a 2K, ep-b 2K\n");
- else if (mode == 2)
- DEBUG (dev, "fifo: ep-a 2K, ep-b 1K, ep-c 1K\n");
- /* else all are 1K */
- }
- return status;
-}
-EXPORT_SYMBOL (net2280_set_fifo_mode);
-
-/*-------------------------------------------------------------------------*/
-
/* keeping it simple:
* - one bus driver, initted first;
* - one function driver, initted second
@@ -1930,7 +1880,7 @@ static void ep0_start (struct net2280 *dev)
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int net2280_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct net2280 *dev = the_controller;
@@ -1994,7 +1944,6 @@ err_unbind:
dev->driver = NULL;
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
static void
stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
@@ -2022,7 +1971,7 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
usb_reinit (dev);
}
-int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
+static int net2280_stop(struct usb_gadget_driver *driver)
{
struct net2280 *dev = the_controller;
unsigned long flags;
@@ -2049,8 +1998,6 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
DEBUG (dev, "unregistered driver '%s'\n", driver->driver.name);
return 0;
}
-EXPORT_SYMBOL (usb_gadget_unregister_driver);
-
/*-------------------------------------------------------------------------*/
@@ -2732,6 +2679,8 @@ static void net2280_remove (struct pci_dev *pdev)
{
struct net2280 *dev = pci_get_drvdata (pdev);
+ usb_del_gadget_udc(&dev->gadget);
+
BUG_ON(dev->driver);
/* then clean up the resources we allocated during probe() */
@@ -2916,6 +2865,9 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
retval = device_create_file (&pdev->dev, &dev_attr_registers);
if (retval) goto done;
+ retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+ if (retval)
+ goto done;
return 0;
done:
diff --git a/drivers/usb/gadget/nokia.c b/drivers/usb/gadget/nokia.c
index 55ca63ad350..c7fb7723c01 100644
--- a/drivers/usb/gadget/nokia.c
+++ b/drivers/usb/gadget/nokia.c
@@ -241,6 +241,7 @@ static struct usb_composite_driver nokia_driver = {
.name = "g_nokia",
.dev = &device_desc,
.strings = dev_strings,
+ .max_speed = USB_SPEED_HIGH,
.unbind = __exit_p(nokia_unbind),
};
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 82fd2493533..740c7daed27 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -1375,6 +1375,10 @@ static int omap_pullup(struct usb_gadget *gadget, int is_on)
return 0;
}
+static int omap_udc_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int omap_udc_stop(struct usb_gadget_driver *driver);
+
static struct usb_gadget_ops omap_gadget_ops = {
.get_frame = omap_get_frame,
.wakeup = omap_wakeup,
@@ -1382,6 +1386,8 @@ static struct usb_gadget_ops omap_gadget_ops = {
.vbus_session = omap_vbus_session,
.vbus_draw = omap_vbus_draw,
.pullup = omap_pullup,
+ .start = omap_udc_start,
+ .stop = omap_udc_stop,
};
/*-------------------------------------------------------------------------*/
@@ -2102,7 +2108,7 @@ static inline int machine_without_vbus_sense(void)
);
}
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int omap_udc_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
int status = -ENODEV;
@@ -2186,9 +2192,8 @@ done:
omap_udc_enable_clock(0);
return status;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
+static int omap_udc_stop(struct usb_gadget_driver *driver)
{
unsigned long flags;
int status = -ENODEV;
@@ -2222,8 +2227,6 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
DBG("unregistered driver '%s'\n", driver->driver.name);
return status;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
/*-------------------------------------------------------------------------*/
@@ -2991,9 +2994,16 @@ known:
create_proc_file();
status = device_add(&udc->gadget.dev);
+ if (status)
+ goto cleanup4;
+
+ status = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (!status)
return status;
/* If fail, fall through */
+cleanup4:
+ remove_proc_file();
+
#ifdef USE_ISO
cleanup3:
free_irq(pdev->resource[2].start, udc);
@@ -3029,6 +3039,8 @@ static int __exit omap_udc_remove(struct platform_device *pdev)
if (!udc)
return -ENODEV;
+
+ usb_del_gadget_udc(&udc->gadget);
if (udc->driver)
return -EBUSY;
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 68dbcc3e4cc..f96615ab6b7 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -1176,6 +1176,9 @@ static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
return -EOPNOTSUPP;
}
+static int pch_udc_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int pch_udc_stop(struct usb_gadget_driver *driver);
static const struct usb_gadget_ops pch_udc_ops = {
.get_frame = pch_udc_pcd_get_frame,
.wakeup = pch_udc_pcd_wakeup,
@@ -1183,6 +1186,8 @@ static const struct usb_gadget_ops pch_udc_ops = {
.pullup = pch_udc_pcd_pullup,
.vbus_session = pch_udc_pcd_vbus_session,
.vbus_draw = pch_udc_pcd_vbus_draw,
+ .start = pch_udc_start,
+ .stop = pch_udc_stop,
};
/**
@@ -2690,7 +2695,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
return 0;
}
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int pch_udc_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct pch_udc_dev *dev = pch_udc;
@@ -2733,9 +2738,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
dev->connected = 1;
return 0;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int pch_udc_stop(struct usb_gadget_driver *driver)
{
struct pch_udc_dev *dev = pch_udc;
@@ -2761,7 +2765,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
pch_udc_set_disconnect(dev);
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
static void pch_udc_shutdown(struct pci_dev *pdev)
{
@@ -2778,6 +2781,8 @@ static void pch_udc_remove(struct pci_dev *pdev)
{
struct pch_udc_dev *dev = pci_get_drvdata(pdev);
+ usb_del_gadget_udc(&dev->gadget);
+
/* gadget driver must not be registered */
if (dev->driver)
dev_err(&pdev->dev,
@@ -2953,6 +2958,9 @@ static int pch_udc_probe(struct pci_dev *pdev,
/* Put the device in disconnected state till a driver is bound */
pch_udc_set_disconnect(dev);
+ retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+ if (retval)
+ goto finished;
return 0;
finished:
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 271ef94668e..a341dde6f9c 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -89,8 +89,7 @@ struct printer_dev {
u8 config;
s8 interface;
struct usb_ep *in_ep, *out_ep;
- const struct usb_endpoint_descriptor
- *in, *out;
+
struct list_head rx_reqs; /* List of free RX structs */
struct list_head rx_reqs_active; /* List of Active RX xfers */
struct list_head rx_buffers; /* List of completed xfers */
@@ -795,12 +794,14 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
}
static int
-printer_fsync(struct file *fd, int datasync)
+printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
{
struct printer_dev *dev = fd->private_data;
+ struct inode *inode = fd->f_path.dentry->d_inode;
unsigned long flags;
int tx_list_empty;
+ mutex_lock(&inode->i_mutex);
spin_lock_irqsave(&dev->lock, flags);
tx_list_empty = (likely(list_empty(&dev->tx_reqs)));
spin_unlock_irqrestore(&dev->lock, flags);
@@ -810,6 +811,7 @@ printer_fsync(struct file *fd, int datasync)
wait_event_interruptible(dev->tx_flush_wait,
(likely(list_empty(&dev->tx_reqs_active))));
}
+ mutex_unlock(&inode->i_mutex);
return 0;
}
@@ -895,19 +897,20 @@ set_printer_interface(struct printer_dev *dev)
{
int result = 0;
- dev->in = ep_desc(dev->gadget, &hs_ep_in_desc, &fs_ep_in_desc);
+ dev->in_ep->desc = ep_desc(dev->gadget, &hs_ep_in_desc, &fs_ep_in_desc);
dev->in_ep->driver_data = dev;
- dev->out = ep_desc(dev->gadget, &hs_ep_out_desc, &fs_ep_out_desc);
+ dev->out_ep->desc = ep_desc(dev->gadget, &hs_ep_out_desc,
+ &fs_ep_out_desc);
dev->out_ep->driver_data = dev;
- result = usb_ep_enable(dev->in_ep, dev->in);
+ result = usb_ep_enable(dev->in_ep);
if (result != 0) {
DBG(dev, "enable %s --> %d\n", dev->in_ep->name, result);
goto done;
}
- result = usb_ep_enable(dev->out_ep, dev->out);
+ result = usb_ep_enable(dev->out_ep);
if (result != 0) {
DBG(dev, "enable %s --> %d\n", dev->in_ep->name, result);
goto done;
@@ -918,8 +921,8 @@ done:
if (result != 0) {
(void) usb_ep_disable(dev->in_ep);
(void) usb_ep_disable(dev->out_ep);
- dev->in = NULL;
- dev->out = NULL;
+ dev->in_ep->desc = NULL;
+ dev->out_ep->desc = NULL;
}
/* caller is responsible for cleanup on error */
@@ -933,12 +936,14 @@ static void printer_reset_interface(struct printer_dev *dev)
DBG(dev, "%s\n", __func__);
- if (dev->in)
+ if (dev->in_ep->desc)
usb_ep_disable(dev->in_ep);
- if (dev->out)
+ if (dev->out_ep->desc)
usb_ep_disable(dev->out_ep);
+ dev->in_ep->desc = NULL;
+ dev->out_ep->desc = NULL;
dev->interface = -1;
}
@@ -1104,9 +1109,9 @@ static void printer_soft_reset(struct printer_dev *dev)
list_add(&req->list, &dev->tx_reqs);
}
- if (usb_ep_enable(dev->in_ep, dev->in))
+ if (usb_ep_enable(dev->in_ep))
DBG(dev, "Failed to enable USB in_ep\n");
- if (usb_ep_enable(dev->out_ep, dev->out))
+ if (usb_ep_enable(dev->out_ep))
DBG(dev, "Failed to enable USB out_ep\n");
wake_up_interruptible(&dev->rx_wait);
@@ -1146,6 +1151,8 @@ printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
switch (wValue >> 8) {
case USB_DT_DEVICE:
+ device_desc.bMaxPacketSize0 =
+ gadget->ep0->maxpacket;
value = min(wLength, (u16) sizeof device_desc);
memcpy(req->buf, &device_desc, value);
break;
@@ -1153,6 +1160,12 @@ printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
case USB_DT_DEVICE_QUALIFIER:
if (!gadget->is_dualspeed)
break;
+ /*
+ * assumes ep0 uses the same value for both
+ * speeds
+ */
+ dev_qualifier.bMaxPacketSize0 =
+ gadget->ep0->maxpacket;
value = min(wLength,
(u16) sizeof dev_qualifier);
memcpy(req->buf, &dev_qualifier, value);
@@ -1448,15 +1461,11 @@ autoconf_fail:
out_ep->driver_data = out_ep; /* claim */
#ifdef CONFIG_USB_GADGET_DUALSPEED
- /* assumes ep0 uses the same value for both speeds ... */
- dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0;
-
- /* and that all endpoints are dual-speed */
+ /* assumes that all endpoints are dual-speed */
hs_ep_in_desc.bEndpointAddress = fs_ep_in_desc.bEndpointAddress;
hs_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
#endif /* DUALSPEED */
- device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket;
usb_gadget_set_selfpowered(gadget);
if (gadget->is_otg) {
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 774545494cf..e4e59b4de25 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -1011,12 +1011,18 @@ static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
return -EOPNOTSUPP;
}
+static int pxa25x_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int pxa25x_stop(struct usb_gadget_driver *driver);
+
static const struct usb_gadget_ops pxa25x_udc_ops = {
.get_frame = pxa25x_udc_get_frame,
.wakeup = pxa25x_udc_wakeup,
.vbus_session = pxa25x_udc_vbus_session,
.pullup = pxa25x_udc_pullup,
.vbus_draw = pxa25x_udc_vbus_draw,
+ .start = pxa25x_start,
+ .stop = pxa25x_stop,
};
/*-------------------------------------------------------------------------*/
@@ -1263,7 +1269,7 @@ static void udc_enable (struct pxa25x_udc *dev)
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int pxa25x_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct pxa25x_udc *dev = the_controller;
@@ -1322,7 +1328,6 @@ fail:
bind_fail:
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
static void
stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
@@ -1351,7 +1356,7 @@ stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
udc_reinit(dev);
}
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int pxa25x_stop(struct usb_gadget_driver *driver)
{
struct pxa25x_udc *dev = the_controller;
@@ -1379,8 +1384,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
dump_state(dev);
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
/*-------------------------------------------------------------------------*/
@@ -2231,8 +2234,11 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
#endif
create_debug_files(dev);
- return 0;
+ retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
+ if (!retval)
+ return retval;
+ remove_debug_files(dev);
#ifdef CONFIG_ARCH_LUBBOCK
lubbock_fail0:
free_irq(LUBBOCK_USB_DISC_IRQ, dev);
@@ -2261,6 +2267,7 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev)
{
struct pxa25x_udc *dev = platform_get_drvdata(pdev);
+ usb_del_gadget_udc(&dev->gadget);
if (dev->driver)
return -EBUSY;
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 57607696735..85b68c75dc9 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -1680,12 +1680,18 @@ static int pxa_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
return -EOPNOTSUPP;
}
+static int pxa27x_udc_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int pxa27x_udc_stop(struct usb_gadget_driver *driver);
+
static const struct usb_gadget_ops pxa_udc_ops = {
.get_frame = pxa_udc_get_frame,
.wakeup = pxa_udc_wakeup,
.pullup = pxa_udc_pullup,
.vbus_session = pxa_udc_vbus_session,
.vbus_draw = pxa_udc_vbus_draw,
+ .start = pxa27x_udc_start,
+ .stop = pxa27x_udc_stop,
};
/**
@@ -1791,7 +1797,7 @@ static void udc_enable(struct pxa_udc *udc)
}
/**
- * usb_gadget_probe_driver - Register gadget driver
+ * pxa27x_start - Register gadget driver
* @driver: gadget driver
* @bind: bind function
*
@@ -1805,7 +1811,7 @@ static void udc_enable(struct pxa_udc *udc)
*
* Returns 0 if no error, -EINVAL, -ENODEV, -EBUSY otherwise
*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int pxa27x_udc_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct pxa_udc *udc = the_controller;
@@ -1860,8 +1866,6 @@ add_fail:
udc->gadget.dev.driver = NULL;
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-
/**
* stop_activity - Stops udc endpoints
@@ -1888,12 +1892,12 @@ static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
}
/**
- * usb_gadget_unregister_driver - Unregister the gadget driver
+ * pxa27x_udc_stop - Unregister the gadget driver
* @driver: gadget driver
*
* Returns 0 if no error, -ENODEV, -EINVAL otherwise
*/
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int pxa27x_udc_stop(struct usb_gadget_driver *driver)
{
struct pxa_udc *udc = the_controller;
@@ -1917,7 +1921,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
return otg_set_peripheral(udc->transceiver, NULL);
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
/**
* handle_ep0_ctrl_req - handle control endpoint control request
@@ -2516,9 +2519,14 @@ static int __init pxa_udc_probe(struct platform_device *pdev)
driver_name, IRQ_USB, retval);
goto err_irq;
}
+ retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+ if (retval)
+ goto err_add_udc;
pxa_init_debugfs(udc);
return 0;
+err_add_udc:
+ free_irq(udc->irq, udc);
err_irq:
iounmap(udc->regs);
err_map:
@@ -2537,6 +2545,7 @@ static int __exit pxa_udc_remove(struct platform_device *_dev)
struct pxa_udc *udc = platform_get_drvdata(_dev);
int gpio = udc->mach->gpio_pullup;
+ usb_del_gadget_udc(&udc->gadget);
usb_gadget_unregister_driver(udc->driver);
free_irq(udc->irq, udc);
pxa_cleanup_debugfs(udc);
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index cd16231d8c7..b01696eab06 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -88,9 +88,9 @@
#define UDCISR_INT_MASK (UDCICR_FIFOERR | UDCICR_PKTCOMPL)
#define UDCOTGICR_IESF (1 << 24) /* OTG SET_FEATURE command recvd */
-#define UDCOTGICR_IEXR (1 << 17) /* Extra Transciever Interrupt
+#define UDCOTGICR_IEXR (1 << 17) /* Extra Transceiver Interrupt
Rising Edge Interrupt Enable */
-#define UDCOTGICR_IEXF (1 << 16) /* Extra Transciever Interrupt
+#define UDCOTGICR_IEXF (1 << 16) /* Extra Transceiver Interrupt
Falling Edge Interrupt Enable */
#define UDCOTGICR_IEVV40R (1 << 9) /* OTG Vbus Valid 4.0V Rising Edge
Interrupt Enable */
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 6dcc1f68fa6..50991e5bd5e 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2006-2009 Renesas Solutions Corp.
*
- * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -576,7 +576,11 @@ static void init_controller(struct r8a66597 *r8a66597)
u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
if (r8a66597->pdata->on_chip) {
- r8a66597_bset(r8a66597, 0x04, SYSCFG1);
+ if (r8a66597->pdata->buswait)
+ r8a66597_write(r8a66597, r8a66597->pdata->buswait,
+ SYSCFG1);
+ else
+ r8a66597_write(r8a66597, 0x0f, SYSCFG1);
r8a66597_bset(r8a66597, HSE, SYSCFG0);
r8a66597_bclr(r8a66597, USBE, SYSCFG0);
@@ -618,6 +622,7 @@ static void disable_controller(struct r8a66597 *r8a66597)
{
if (r8a66597->pdata->on_chip) {
r8a66597_bset(r8a66597, SCKE, SYSCFG0);
+ r8a66597_bclr(r8a66597, UTST, TESTMODE);
/* disable interrupts */
r8a66597_write(r8a66597, 0, INTENB0);
@@ -635,6 +640,7 @@ static void disable_controller(struct r8a66597 *r8a66597)
r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
} else {
+ r8a66597_bclr(r8a66597, UTST, TESTMODE);
r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
udelay(1);
r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
@@ -999,10 +1005,29 @@ static void clear_feature(struct r8a66597 *r8a66597,
static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
{
+ u16 tmp;
+ int timeout = 3000;
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
- control_end(r8a66597, 1);
+ switch (le16_to_cpu(ctrl->wValue)) {
+ case USB_DEVICE_TEST_MODE:
+ control_end(r8a66597, 1);
+ /* Wait for the completion of status stage */
+ do {
+ tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
+ udelay(1);
+ } while (tmp != CS_IDST || timeout-- > 0);
+
+ if (tmp == CS_IDST)
+ r8a66597_bset(r8a66597,
+ le16_to_cpu(ctrl->wIndex >> 8),
+ TESTMODE);
+ break;
+ default:
+ pipe_stall(r8a66597, 0);
+ break;
+ }
break;
case USB_RECIP_INTERFACE:
control_end(r8a66597, 1);
@@ -1410,7 +1435,7 @@ static struct usb_ep_ops r8a66597_ep_ops = {
/*-------------------------------------------------------------------------*/
static struct r8a66597 *the_controller;
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int r8a66597_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct r8a66597 *r8a66597 = the_controller;
@@ -1444,6 +1469,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
goto error;
}
+ init_controller(r8a66597);
r8a66597_bset(r8a66597, VBSE, INTENB0);
if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
r8a66597_start_xclock(r8a66597);
@@ -1462,9 +1488,8 @@ error:
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int r8a66597_stop(struct usb_gadget_driver *driver)
{
struct r8a66597 *r8a66597 = the_controller;
unsigned long flags;
@@ -1475,20 +1500,16 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
spin_lock_irqsave(&r8a66597->lock, flags);
if (r8a66597->gadget.speed != USB_SPEED_UNKNOWN)
r8a66597_usb_disconnect(r8a66597);
- spin_unlock_irqrestore(&r8a66597->lock, flags);
-
r8a66597_bclr(r8a66597, VBSE, INTENB0);
+ disable_controller(r8a66597);
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
driver->unbind(&r8a66597->gadget);
- init_controller(r8a66597);
- disable_controller(r8a66597);
-
device_del(&r8a66597->gadget.dev);
r8a66597->driver = NULL;
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
/*-------------------------------------------------------------------------*/
static int r8a66597_get_frame(struct usb_gadget *_gadget)
@@ -1497,14 +1518,33 @@ static int r8a66597_get_frame(struct usb_gadget *_gadget)
return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
}
+static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&r8a66597->lock, flags);
+ if (is_on)
+ r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
+ else
+ r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
+
+ return 0;
+}
+
static struct usb_gadget_ops r8a66597_gadget_ops = {
.get_frame = r8a66597_get_frame,
+ .start = r8a66597_start,
+ .stop = r8a66597_stop,
+ .pullup = r8a66597_pullup,
};
static int __exit r8a66597_remove(struct platform_device *pdev)
{
struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev);
+ usb_del_gadget_udc(&r8a66597->gadget);
del_timer_sync(&r8a66597->timer);
iounmap(r8a66597->reg);
free_irq(platform_get_irq(pdev, 0), r8a66597);
@@ -1645,11 +1685,15 @@ static int __init r8a66597_probe(struct platform_device *pdev)
goto clean_up3;
r8a66597->ep0_req->complete = nop_completion;
- init_controller(r8a66597);
+ ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget);
+ if (ret)
+ goto err_add_udc;
dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
return 0;
+err_add_udc:
+ r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
clean_up3:
free_irq(irq, r8a66597);
clean_up2:
@@ -1679,6 +1723,7 @@ static struct platform_driver r8a66597_driver = {
.name = (char *) udc_name,
},
};
+MODULE_ALIAS("platform:r8a66597_udc");
static int __init r8a66597_udc_init(void)
{
diff --git a/drivers/usb/gadget/r8a66597-udc.h b/drivers/usb/gadget/r8a66597-udc.h
index 5fc22e09a0f..503f766c23a 100644
--- a/drivers/usb/gadget/r8a66597-udc.h
+++ b/drivers/usb/gadget/r8a66597-udc.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 2007-2009 Renesas Solutions Corp.
*
- * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 0dfee282878..8bdee67ce09 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2574,7 +2574,7 @@ static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
return 0;
}
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int s3c_hsotg_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct s3c_hsotg *hsotg = our_hsotg;
@@ -2745,9 +2745,8 @@ err:
hsotg->gadget.dev.driver = NULL;
return ret;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int s3c_hsotg_stop(struct usb_gadget_driver *driver)
{
struct s3c_hsotg *hsotg = our_hsotg;
int ep;
@@ -2775,7 +2774,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
{
@@ -2784,6 +2782,8 @@ static int s3c_hsotg_gadget_getframe(struct usb_gadget *gadget)
static struct usb_gadget_ops s3c_hsotg_gadget_ops = {
.get_frame = s3c_hsotg_gadget_getframe,
+ .start = s3c_hsotg_start,
+ .stop = s3c_hsotg_stop,
};
/**
@@ -3403,6 +3403,10 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
for (epnum = 0; epnum < S3C_HSOTG_EPS; epnum++)
s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
+ ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget);
+ if (ret)
+ goto err_add_udc;
+
s3c_hsotg_create_debug(hsotg);
s3c_hsotg_dump(hsotg);
@@ -3410,6 +3414,11 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
our_hsotg = hsotg;
return 0;
+err_add_udc:
+ s3c_hsotg_gate(pdev, false);
+ clk_disable(hsotg->clk);
+ clk_put(hsotg->clk);
+
err_regs:
iounmap(hsotg->regs);
@@ -3427,6 +3436,8 @@ static int __devexit s3c_hsotg_remove(struct platform_device *pdev)
{
struct s3c_hsotg *hsotg = platform_get_drvdata(pdev);
+ usb_del_gadget_udc(&hsotg->gadget);
+
s3c_hsotg_delete_debug(hsotg);
usb_gadget_unregister_driver(hsotg->driver);
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index d5e3e1e5862..3fa717c5f4b 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -1133,7 +1133,7 @@ static irqreturn_t s3c_hsudc_irq(int irq, void *_dev)
return IRQ_HANDLED;
}
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int s3c_hsudc_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct s3c_hsudc *hsudc = the_controller;
@@ -1181,9 +1181,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
return 0;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int s3c_hsudc_stop(struct usb_gadget_driver *driver)
{
struct s3c_hsudc *hsudc = the_controller;
unsigned long flags;
@@ -1210,7 +1209,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
driver->driver.name);
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
static inline u32 s3c_hsudc_read_frameno(struct s3c_hsudc *hsudc)
{
@@ -1224,6 +1222,8 @@ static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget)
static struct usb_gadget_ops s3c_hsudc_gadget_ops = {
.get_frame = s3c_hsudc_gadget_getframe,
+ .start = s3c_hsudc_start,
+ .stop = s3c_hsudc_stop,
};
static int s3c_hsudc_probe(struct platform_device *pdev)
@@ -1311,7 +1311,15 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
disable_irq(hsudc->irq);
local_irq_enable();
+
+ ret = usb_add_gadget_udc(&pdev->dev, &hsudc->gadget);
+ if (ret)
+ goto err_add_udc;
+
return 0;
+err_add_udc:
+ clk_disable(hsudc->uclk);
+ clk_put(hsudc->uclk);
err_clk:
free_irq(hsudc->irq, hsudc);
err_irq:
@@ -1333,6 +1341,7 @@ static struct platform_driver s3c_hsudc_driver = {
},
.probe = s3c_hsudc_probe,
};
+MODULE_ALIAS("platform:s3c-hsudc");
static int __init s3c_hsudc_modinit(void)
{
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 100f2635cf0..8d31848aab0 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1552,6 +1552,10 @@ static int s3c2410_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
return -ENOTSUPP;
}
+static int s3c2410_udc_start(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *));
+static int s3c2410_udc_stop(struct usb_gadget_driver *driver);
+
static const struct usb_gadget_ops s3c2410_ops = {
.get_frame = s3c2410_udc_get_frame,
.wakeup = s3c2410_udc_wakeup,
@@ -1559,6 +1563,8 @@ static const struct usb_gadget_ops s3c2410_ops = {
.pullup = s3c2410_udc_pullup,
.vbus_session = s3c2410_udc_vbus_session,
.vbus_draw = s3c2410_vbus_draw,
+ .start = s3c2410_udc_start,
+ .stop = s3c2410_udc_stop,
};
static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd)
@@ -1567,7 +1573,7 @@ static void s3c2410_udc_command(enum s3c2410_udc_cmd_e cmd)
return;
if (udc_info->udc_command) {
- udc_info->udc_command(S3C2410_UDC_P_DISABLE);
+ udc_info->udc_command(cmd);
} else if (gpio_is_valid(udc_info->pullup_pin)) {
int value;
@@ -1672,10 +1678,7 @@ static void s3c2410_udc_enable(struct s3c2410_udc *dev)
s3c2410_udc_command(S3C2410_UDC_P_ENABLE);
}
-/*
- * usb_gadget_probe_driver
- */
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int s3c2410_udc_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct s3c2410_udc *udc = the_controller;
@@ -1730,12 +1733,8 @@ register_error:
udc->gadget.dev.driver = NULL;
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-/*
- * usb_gadget_unregister_driver
- */
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int s3c2410_udc_stop(struct usb_gadget_driver *driver)
{
struct s3c2410_udc *udc = the_controller;
@@ -1955,6 +1954,10 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
goto err_vbus_irq;
}
+ retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+ if (retval)
+ goto err_add_udc;
+
if (s3c2410_udc_debugfs_root) {
udc->regs_info = debugfs_create_file("registers", S_IRUGO,
s3c2410_udc_debugfs_root,
@@ -1967,6 +1970,10 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
return 0;
+err_add_udc:
+ if (udc_info && !udc_info->udc_command &&
+ gpio_is_valid(udc_info->pullup_pin))
+ gpio_free(udc_info->pullup_pin);
err_vbus_irq:
if (udc_info && udc_info->vbus_pin > 0)
free_irq(gpio_to_irq(udc_info->vbus_pin), udc);
@@ -1992,6 +1999,8 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
unsigned int irq;
dev_dbg(&pdev->dev, "%s()\n", __func__);
+
+ usb_del_gadget_udc(&udc->gadget);
if (udc->driver)
return -EBUSY;
@@ -2048,26 +2057,23 @@ static int s3c2410_udc_resume(struct platform_device *pdev)
#define s3c2410_udc_resume NULL
#endif
-static struct platform_driver udc_driver_2410 = {
- .driver = {
- .name = "s3c2410-usbgadget",
- .owner = THIS_MODULE,
- },
- .probe = s3c2410_udc_probe,
- .remove = s3c2410_udc_remove,
- .suspend = s3c2410_udc_suspend,
- .resume = s3c2410_udc_resume,
+static const struct platform_device_id s3c_udc_ids[] = {
+ { "s3c2410-usbgadget", },
+ { "s3c2440-usbgadget", },
+ { }
};
+MODULE_DEVICE_TABLE(platform, s3c_udc_ids);
-static struct platform_driver udc_driver_2440 = {
+static struct platform_driver udc_driver_24x0 = {
.driver = {
- .name = "s3c2440-usbgadget",
+ .name = "s3c24x0-usbgadget",
.owner = THIS_MODULE,
},
.probe = s3c2410_udc_probe,
.remove = s3c2410_udc_remove,
.suspend = s3c2410_udc_suspend,
.resume = s3c2410_udc_resume,
+ .id_table = s3c_udc_ids,
};
static int __init udc_init(void)
@@ -2083,11 +2089,7 @@ static int __init udc_init(void)
s3c2410_udc_debugfs_root = NULL;
}
- retval = platform_driver_register(&udc_driver_2410);
- if (retval)
- goto err;
-
- retval = platform_driver_register(&udc_driver_2440);
+ retval = platform_driver_register(&udc_driver_24x0);
if (retval)
goto err;
@@ -2100,13 +2102,10 @@ err:
static void __exit udc_exit(void)
{
- platform_driver_unregister(&udc_driver_2410);
- platform_driver_unregister(&udc_driver_2440);
+ platform_driver_unregister(&udc_driver_24x0);
debugfs_remove(s3c2410_udc_debugfs_root);
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
module_init(udc_init);
module_exit(udc_exit);
@@ -2114,5 +2113,3 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c2410-usbgadget");
-MODULE_ALIAS("platform:s3c2440-usbgadget");
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index 1ac57a973aa..ed1b816e58d 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -242,6 +242,7 @@ static struct usb_composite_driver gserial_driver = {
.name = "g_serial",
.dev = &device_desc,
.strings = dev_strings,
+ .max_speed = USB_SPEED_HIGH,
};
static int __init init(void)
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index 1fa4f705b0b..d3dd227a2bf 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -494,7 +494,7 @@ static struct usb_descriptor_header *fsg_hs_function[] = {
};
/* Maxpacket and other transfer characteristics vary by speed. */
-static struct usb_endpoint_descriptor *
+static __maybe_unused struct usb_endpoint_descriptor *
fsg_ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
struct usb_endpoint_descriptor *hs)
{
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 2ac1d214732..dfed4c1d96c 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -97,16 +97,17 @@ struct eth_dev {
static unsigned qmult = 5;
module_param(qmult, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(qmult, "queue length multiplier at high speed");
+MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
#else /* full speed (low speed doesn't do bulk) */
#define qmult 1
#endif
-/* for dual-speed hardware, use deeper queues at highspeed */
+/* for dual-speed hardware, use deeper queues at high/super speed */
static inline int qlen(struct usb_gadget *gadget)
{
- if (gadget_is_dualspeed(gadget) && gadget->speed == USB_SPEED_HIGH)
+ if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
+ gadget->speed == USB_SPEED_SUPER))
return qmult * DEFAULT_QLEN;
else
return DEFAULT_QLEN;
@@ -598,9 +599,10 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
req->length = length;
- /* throttle highspeed IRQ rate back slightly */
+ /* throttle high/super speed IRQ rate back slightly */
if (gadget_is_dualspeed(dev->gadget))
- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
+ req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
+ dev->gadget->speed == USB_SPEED_SUPER)
? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
: 0;
@@ -693,8 +695,8 @@ static int eth_stop(struct net_device *net)
usb_ep_disable(link->out_ep);
if (netif_carrier_ok(net)) {
DBG(dev, "host still using in/out endpoints\n");
- usb_ep_enable(link->in_ep, link->in);
- usb_ep_enable(link->out_ep, link->out);
+ usb_ep_enable(link->in_ep);
+ usb_ep_enable(link->out_ep);
}
}
spin_unlock_irqrestore(&dev->lock, flags);
@@ -871,7 +873,7 @@ struct net_device *gether_connect(struct gether *link)
return ERR_PTR(-EINVAL);
link->in_ep->driver_data = dev;
- result = usb_ep_enable(link->in_ep, link->in);
+ result = usb_ep_enable(link->in_ep);
if (result != 0) {
DBG(dev, "enable %s --> %d\n",
link->in_ep->name, result);
@@ -879,7 +881,7 @@ struct net_device *gether_connect(struct gether *link)
}
link->out_ep->driver_data = dev;
- result = usb_ep_enable(link->out_ep, link->out);
+ result = usb_ep_enable(link->out_ep);
if (result != 0) {
DBG(dev, "enable %s --> %d\n",
link->out_ep->name, result);
@@ -969,7 +971,7 @@ void gether_disconnect(struct gether *link)
}
spin_unlock(&dev->req_lock);
link->in_ep->driver_data = NULL;
- link->in = NULL;
+ link->in_ep->desc = NULL;
usb_ep_disable(link->out_ep);
spin_lock(&dev->req_lock);
@@ -984,7 +986,7 @@ void gether_disconnect(struct gether *link)
}
spin_unlock(&dev->req_lock);
link->out_ep->driver_data = NULL;
- link->out = NULL;
+ link->out_ep->desc = NULL;
/* finish forgetting about this USB link episode */
dev->header_len = 0;
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index b56e1e7d423..c966440ddd7 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -52,10 +52,6 @@ struct gether {
struct usb_ep *in_ep;
struct usb_ep *out_ep;
- /* descriptors match device speed at gether_connect() time */
- struct usb_endpoint_descriptor *in;
- struct usb_endpoint_descriptor *out;
-
bool is_zlp_ok;
u16 cdc_filter;
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 40f7716b31f..a8aa46962d8 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -1247,12 +1247,12 @@ int gserial_connect(struct gserial *gser, u8 port_num)
port = ports[port_num].port;
/* activate the endpoints */
- status = usb_ep_enable(gser->in, gser->in_desc);
+ status = usb_ep_enable(gser->in);
if (status < 0)
return status;
gser->in->driver_data = port;
- status = usb_ep_enable(gser->out, gser->out_desc);
+ status = usb_ep_enable(gser->out);
if (status < 0)
goto fail_out;
gser->out->driver_data = port;
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index 300f0ed9475..9b0fe6450fb 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -35,8 +35,6 @@ struct gserial {
struct usb_ep *in;
struct usb_ep *out;
- struct usb_endpoint_descriptor *in_desc;
- struct usb_endpoint_descriptor *out_desc;
/* REVISIT avoid this CDC-ACM support harder ... */
struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
new file mode 100644
index 00000000000..05ba4721436
--- /dev/null
+++ b/drivers/usb/gadget/udc-core.c
@@ -0,0 +1,484 @@
+/**
+ * udc.c - Core UDC Framework
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/err.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/**
+ * struct usb_udc - describes one usb device controller
+ * @driver - the gadget driver pointer. For use by the class code
+ * @dev - the child device to the actual controller
+ * @gadget - the gadget. For use by the class code
+ * @list - for use by the udc class driver
+ *
+ * This represents the internal data structure which is used by the UDC-class
+ * to hold information about udc driver and gadget together.
+ */
+struct usb_udc {
+ struct usb_gadget_driver *driver;
+ struct usb_gadget *gadget;
+ struct device dev;
+ struct list_head list;
+};
+
+static struct class *udc_class;
+static LIST_HEAD(udc_list);
+static DEFINE_MUTEX(udc_lock);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_gadget_start - tells usb device controller to start up
+ * @gadget: The gadget we want to get started
+ * @driver: The driver we want to bind to @gadget
+ * @bind: The bind function for @driver
+ *
+ * This call is issued by the UDC Class driver when it's about
+ * to register a gadget driver to the device controller, before
+ * calling gadget driver's bind() method.
+ *
+ * It allows the controller to be powered off until strictly
+ * necessary to have it powered on.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *))
+{
+ return gadget->ops->start(driver, bind);
+}
+
+/**
+ * usb_gadget_udc_start - tells usb device controller to start up
+ * @gadget: The gadget we want to get started
+ * @driver: The driver we want to bind to @gadget
+ *
+ * This call is issued by the UDC Class driver when it's about
+ * to register a gadget driver to the device controller, before
+ * calling gadget driver's bind() method.
+ *
+ * It allows the controller to be powered off until strictly
+ * necessary to have it powered on.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ return gadget->ops->udc_start(gadget, driver);
+}
+
+/**
+ * usb_gadget_stop - tells usb device controller we don't need it anymore
+ * @gadget: The device we want to stop activity
+ * @driver: The driver to unbind from @gadget
+ *
+ * This call is issued by the UDC Class driver after calling
+ * gadget driver's unbind() method.
+ *
+ * The details are implementation specific, but it can go as
+ * far as powering off UDC completely and disable its data
+ * line pullups.
+ */
+static inline void usb_gadget_stop(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ gadget->ops->stop(driver);
+}
+
+/**
+ * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
+ * @gadget: The device we want to stop activity
+ * @driver: The driver to unbind from @gadget
+ *
+ * This call is issued by the UDC Class driver after calling
+ * gadget driver's unbind() method.
+ *
+ * The details are implementation specific, but it can go as
+ * far as powering off UDC completely and disable its data
+ * line pullups.
+ */
+static inline void usb_gadget_udc_stop(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ gadget->ops->udc_stop(gadget, driver);
+}
+
+/**
+ * usb_udc_release - release the usb_udc struct
+ * @dev: the dev member within usb_udc
+ *
+ * This is called by driver's core in order to free memory once the last
+ * reference is released.
+ */
+static void usb_udc_release(struct device *dev)
+{
+ struct usb_udc *udc;
+
+ udc = container_of(dev, struct usb_udc, dev);
+ dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
+ kfree(udc);
+}
+
+static const struct attribute_group *usb_udc_attr_groups[];
+/**
+ * usb_add_gadget_udc - adds a new gadget to the udc class driver list
+ * @parent: the parent device to this udc. Usually the controller
+ * driver's device.
+ * @gadget: the gadget to be added to the list
+ *
+ * Returns zero on success, negative errno otherwise.
+ */
+int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
+{
+ struct usb_udc *udc;
+ int ret = -ENOMEM;
+
+ udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ goto err1;
+
+ device_initialize(&udc->dev);
+ udc->dev.release = usb_udc_release;
+ udc->dev.class = udc_class;
+ udc->dev.groups = usb_udc_attr_groups;
+ udc->dev.parent = parent;
+ ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
+ if (ret)
+ goto err2;
+
+ udc->gadget = gadget;
+
+ mutex_lock(&udc_lock);
+ list_add_tail(&udc->list, &udc_list);
+
+ ret = device_add(&udc->dev);
+ if (ret)
+ goto err3;
+
+ mutex_unlock(&udc_lock);
+
+ return 0;
+err3:
+ list_del(&udc->list);
+ mutex_unlock(&udc_lock);
+
+err2:
+ put_device(&udc->dev);
+
+err1:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
+
+static int udc_is_newstyle(struct usb_udc *udc)
+{
+ if (udc->gadget->ops->udc_start && udc->gadget->ops->udc_stop)
+ return 1;
+ return 0;
+}
+
+
+static void usb_gadget_remove_driver(struct usb_udc *udc)
+{
+ dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
+ udc->gadget->name);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+
+ if (udc_is_newstyle(udc)) {
+ usb_gadget_disconnect(udc->gadget);
+ udc->driver->unbind(udc->gadget);
+ usb_gadget_udc_stop(udc->gadget, udc->driver);
+
+ } else {
+ usb_gadget_stop(udc->gadget, udc->driver);
+ }
+
+ udc->driver = NULL;
+ udc->dev.driver = NULL;
+}
+
+/**
+ * usb_del_gadget_udc - deletes @udc from udc_list
+ * @gadget: the gadget to be removed.
+ *
+ * This, will call usb_gadget_unregister_driver() if
+ * the @udc is still busy.
+ */
+void usb_del_gadget_udc(struct usb_gadget *gadget)
+{
+ struct usb_udc *udc = NULL;
+
+ mutex_lock(&udc_lock);
+ list_for_each_entry(udc, &udc_list, list)
+ if (udc->gadget == gadget)
+ goto found;
+
+ dev_err(gadget->dev.parent, "gadget not registered.\n");
+ mutex_unlock(&udc_lock);
+
+ return;
+
+found:
+ dev_vdbg(gadget->dev.parent, "unregistering gadget\n");
+
+ list_del(&udc->list);
+ mutex_unlock(&udc_lock);
+
+ if (udc->driver)
+ usb_gadget_remove_driver(udc);
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+ device_unregister(&udc->dev);
+}
+EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+ int (*bind)(struct usb_gadget *))
+{
+ struct usb_udc *udc = NULL;
+ int ret;
+
+ if (!driver || !bind || !driver->setup)
+ return -EINVAL;
+
+ mutex_lock(&udc_lock);
+ list_for_each_entry(udc, &udc_list, list) {
+ /* For now we take the first one */
+ if (!udc->driver)
+ goto found;
+ }
+
+ pr_debug("couldn't find an available UDC\n");
+ mutex_unlock(&udc_lock);
+ return -ENODEV;
+
+found:
+ dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
+ driver->function);
+
+ udc->driver = driver;
+ udc->dev.driver = &driver->driver;
+
+ if (udc_is_newstyle(udc)) {
+ ret = bind(udc->gadget);
+ if (ret)
+ goto err1;
+ ret = usb_gadget_udc_start(udc->gadget, driver);
+ if (ret) {
+ driver->unbind(udc->gadget);
+ goto err1;
+ }
+ usb_gadget_connect(udc->gadget);
+ } else {
+
+ ret = usb_gadget_start(udc->gadget, driver, bind);
+ if (ret)
+ goto err1;
+
+ }
+
+ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ mutex_unlock(&udc_lock);
+ return 0;
+
+err1:
+ dev_err(&udc->dev, "failed to start %s: %d\n",
+ udc->driver->function, ret);
+ udc->driver = NULL;
+ udc->dev.driver = NULL;
+ mutex_unlock(&udc_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_probe_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ struct usb_udc *udc = NULL;
+ int ret = -ENODEV;
+
+ if (!driver || !driver->unbind)
+ return -EINVAL;
+
+ mutex_lock(&udc_lock);
+ list_for_each_entry(udc, &udc_list, list)
+ if (udc->driver == driver) {
+ usb_gadget_remove_driver(udc);
+ ret = 0;
+ break;
+ }
+
+ mutex_unlock(&udc_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t usb_udc_srp_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ struct usb_udc *udc = dev_get_drvdata(dev);
+
+ if (sysfs_streq(buf, "1"))
+ usb_gadget_wakeup(udc->gadget);
+
+ return n;
+}
+static DEVICE_ATTR(srp, S_IWUSR, NULL, usb_udc_srp_store);
+
+static ssize_t usb_udc_softconn_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ struct usb_udc *udc = dev_get_drvdata(dev);
+
+ if (sysfs_streq(buf, "connect")) {
+ usb_gadget_connect(udc->gadget);
+ } else if (sysfs_streq(buf, "disconnect")) {
+ usb_gadget_disconnect(udc->gadget);
+ } else {
+ dev_err(dev, "unsupported command '%s'\n", buf);
+ return -EINVAL;
+ }
+
+ return n;
+}
+static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
+
+static ssize_t usb_udc_speed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ struct usb_gadget *gadget = udc->gadget;
+
+ switch (gadget->speed) {
+ case USB_SPEED_LOW:
+ return snprintf(buf, PAGE_SIZE, "low-speed\n");
+ case USB_SPEED_FULL:
+ return snprintf(buf, PAGE_SIZE, "full-speed\n");
+ case USB_SPEED_HIGH:
+ return snprintf(buf, PAGE_SIZE, "high-speed\n");
+ case USB_SPEED_WIRELESS:
+ return snprintf(buf, PAGE_SIZE, "wireless\n");
+ case USB_SPEED_SUPER:
+ return snprintf(buf, PAGE_SIZE, "super-speed\n");
+ case USB_SPEED_UNKNOWN: /* FALLTHROUGH */
+ default:
+ return snprintf(buf, PAGE_SIZE, "UNKNOWN\n");
+ }
+}
+static DEVICE_ATTR(speed, S_IRUSR, usb_udc_speed_show, NULL);
+
+#define USB_UDC_ATTR(name) \
+ssize_t usb_udc_##name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
+ struct usb_gadget *gadget = udc->gadget; \
+ \
+ return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
+} \
+static DEVICE_ATTR(name, S_IRUSR, usb_udc_##name##_show, NULL)
+
+static USB_UDC_ATTR(is_dualspeed);
+static USB_UDC_ATTR(is_otg);
+static USB_UDC_ATTR(is_a_peripheral);
+static USB_UDC_ATTR(b_hnp_enable);
+static USB_UDC_ATTR(a_hnp_support);
+static USB_UDC_ATTR(a_alt_hnp_support);
+
+static struct attribute *usb_udc_attrs[] = {
+ &dev_attr_srp.attr,
+ &dev_attr_soft_connect.attr,
+ &dev_attr_speed.attr,
+
+ &dev_attr_is_dualspeed.attr,
+ &dev_attr_is_otg.attr,
+ &dev_attr_is_a_peripheral.attr,
+ &dev_attr_b_hnp_enable.attr,
+ &dev_attr_a_hnp_support.attr,
+ &dev_attr_a_alt_hnp_support.attr,
+ NULL,
+};
+
+static const struct attribute_group usb_udc_attr_group = {
+ .attrs = usb_udc_attrs,
+};
+
+static const struct attribute_group *usb_udc_attr_groups[] = {
+ &usb_udc_attr_group,
+ NULL,
+};
+
+static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ int ret;
+
+ ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name);
+ if (ret) {
+ dev_err(dev, "failed to add uevent USB_UDC_NAME\n");
+ return ret;
+ }
+
+ if (udc->driver) {
+ ret = add_uevent_var(env, "USB_UDC_DRIVER=%s",
+ udc->driver->function);
+ if (ret) {
+ dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int __init usb_udc_init(void)
+{
+ udc_class = class_create(THIS_MODULE, "udc");
+ if (IS_ERR(udc_class)) {
+ pr_err("failed to create udc class --> %ld\n",
+ PTR_ERR(udc_class));
+ return PTR_ERR(udc_class);
+ }
+
+ udc_class->dev_uevent = usb_udc_uevent;
+ return 0;
+}
+subsys_initcall(usb_udc_init);
+
+static void __exit usb_udc_exit(void)
+{
+ class_destroy(udc_class);
+}
+module_exit(usb_udc_exit);
+
+MODULE_DESCRIPTION("UDC Framework");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index f7395ac5dc1..aa0ad34e0f1 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -19,7 +19,7 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "uvc.h"
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index 5e807f083bc..52f8f9e513a 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -124,24 +124,12 @@ uvc_v4l2_open(struct file *file)
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_file_handle *handle;
- int ret;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (handle == NULL)
return -ENOMEM;
- ret = v4l2_fh_init(&handle->vfh, vdev);
- if (ret < 0)
- goto error;
-
- ret = v4l2_event_init(&handle->vfh);
- if (ret < 0)
- goto error;
-
- ret = v4l2_event_alloc(&handle->vfh, 8);
- if (ret < 0)
- goto error;
-
+ v4l2_fh_init(&handle->vfh, vdev);
v4l2_fh_add(&handle->vfh);
handle->device = &uvc->video;
@@ -149,10 +137,6 @@ uvc_v4l2_open(struct file *file)
uvc_function_connect(uvc);
return 0;
-
-error:
- v4l2_fh_exit(&handle->vfh);
- return ret;
}
static int
@@ -314,7 +298,7 @@ uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
return -EINVAL;
- return v4l2_event_subscribe(&handle->vfh, arg);
+ return v4l2_event_subscribe(&handle->vfh, arg, 2);
}
case VIDIOC_UNSUBSCRIBE_EVENT:
@@ -354,7 +338,7 @@ uvc_v4l2_poll(struct file *file, poll_table *wait)
struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
unsigned int mask = 0;
- poll_wait(file, &handle->vfh.events->wait, wait);
+ poll_wait(file, &handle->vfh.wait, wait);
if (v4l2_event_pending(&handle->vfh))
mask |= POLLPRI;
diff --git a/drivers/usb/gadget/webcam.c b/drivers/usb/gadget/webcam.c
index a5a0fdb808c..df6882de50b 100644
--- a/drivers/usb/gadget/webcam.c
+++ b/drivers/usb/gadget/webcam.c
@@ -373,6 +373,7 @@ static struct usb_composite_driver webcam_driver = {
.name = "g_webcam",
.dev = &webcam_device_descriptor,
.strings = webcam_device_strings,
+ .max_speed = USB_SPEED_HIGH,
.unbind = webcam_unbind,
};
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 6d16db9d9d2..00e2fd2d479 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -340,6 +340,7 @@ static struct usb_composite_driver zero_driver = {
.name = "zero",
.dev = &device_desc,
.strings = dev_strings,
+ .max_speed = USB_SPEED_SUPER,
.unbind = zero_unbind,
.suspend = zero_suspend,
.resume = zero_resume,
diff --git a/drivers/usb/host/ehci-ath79.c b/drivers/usb/host/ehci-ath79.c
index 98cc8a13169..4d2e88d04da 100644
--- a/drivers/usb/host/ehci-ath79.c
+++ b/drivers/usb/host/ehci-ath79.c
@@ -44,7 +44,6 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct platform_device *pdev = to_platform_device(hcd->self.controller);
const struct platform_device_id *id;
- int hclength;
int ret;
id = platform_get_device_id(pdev);
@@ -53,20 +52,23 @@ static int ehci_ath79_init(struct usb_hcd *hcd)
return -EINVAL;
}
- hclength = HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
switch (id->driver_data) {
case EHCI_ATH79_IP_V1:
ehci->has_synopsys_hc_bug = 1;
ehci->caps = hcd->regs;
- ehci->regs = hcd->regs + hclength;
+ ehci->regs = hcd->regs +
+ HC_LENGTH(ehci,
+ ehci_readl(ehci, &ehci->caps->hc_capbase));
break;
case EHCI_ATH79_IP_V2:
hcd->has_tt = 1;
ehci->caps = hcd->regs + 0x100;
- ehci->regs = hcd->regs + 0x100 + hclength;
+ ehci->regs = hcd->regs + 0x100 +
+ HC_LENGTH(ehci,
+ ehci_readl(ehci, &ehci->caps->hc_capbase));
break;
default:
@@ -146,7 +148,7 @@ static int ehci_ath79_probe(struct platform_device *pdev)
return -ENOMEM;
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&pdev->dev, "controller already in use\n");
diff --git a/drivers/usb/host/ehci-cns3xxx.c b/drivers/usb/host/ehci-cns3xxx.c
index d41745c6f0c..6536abdea6e 100644
--- a/drivers/usb/host/ehci-cns3xxx.c
+++ b/drivers/usb/host/ehci-cns3xxx.c
@@ -107,7 +107,7 @@ static int cns3xxx_ehci_probe(struct platform_device *pdev)
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index f380bf97e5a..34a3140d1e5 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -100,7 +100,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
goto err2;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index 93b230dc51a..fdfd8c5b639 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -130,7 +130,7 @@ static int __devinit ehci_hcd_grlib_probe(struct platform_device *op)
return -ENOMEM;
hcd->rsrc_start = res.start;
- hcd->rsrc_len = res.end - res.start + 1;
+ hcd->rsrc_len = resource_size(&res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b435ed67dd5..f72ae0b6ee7 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1,4 +1,8 @@
/*
+ * Enhanced Host Controller Interface (EHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
+ *
* Copyright (c) 2000-2004 by David Brownell
*
* This program is free software; you can redistribute it and/or modify it
@@ -90,7 +94,8 @@ static const char hcd_name [] = "ehci_hcd";
#define EHCI_IAA_MSECS 10 /* arbitrary */
#define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */
#define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
-#define EHCI_SHRINK_FRAMES 5 /* async qh unlink delay */
+#define EHCI_SHRINK_JIFFIES (DIV_ROUND_UP(HZ, 200) + 1)
+ /* 200-ms async qh unlink delay */
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh = 0; // 0 to 6
@@ -110,7 +115,7 @@ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
/* for link power management(LPM) feature */
static unsigned int hird;
module_param(hird, int, S_IRUGO);
-MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n");
+MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
@@ -148,10 +153,7 @@ timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action)
break;
/* case TIMER_ASYNC_SHRINK: */
default:
- /* add a jiffie since we synch against the
- * 8 KHz uframe counter.
- */
- t = DIV_ROUND_UP(EHCI_SHRINK_FRAMES * HZ, 1000) + 1;
+ t = EHCI_SHRINK_JIFFIES;
break;
}
mod_timer(&ehci->watchdog, t + jiffies);
@@ -336,6 +338,7 @@ static void ehci_work(struct ehci_hcd *ehci);
#include "ehci-mem.c"
#include "ehci-q.c"
#include "ehci-sched.c"
+#include "ehci-sysfs.c"
/*-------------------------------------------------------------------------*/
@@ -520,7 +523,7 @@ static void ehci_stop (struct usb_hcd *hcd)
ehci_reset (ehci);
spin_unlock_irq(&ehci->lock);
- remove_companion_file(ehci);
+ remove_sysfs_files(ehci);
remove_debug_files (ehci);
/* root hub is shut down separately (first, when possible) */
@@ -571,6 +574,12 @@ static int ehci_init(struct usb_hcd *hcd)
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
/*
+ * by default set standard 80% (== 100 usec/uframe) max periodic
+ * bandwidth as required by USB 2.0
+ */
+ ehci->uframe_periodic_max = 100;
+
+ /*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
@@ -754,7 +763,7 @@ static int ehci_run (struct usb_hcd *hcd)
* since the class device isn't created that early.
*/
create_debug_files(ehci);
- create_companion_file(ehci);
+ create_sysfs_files(ehci);
return 0;
}
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index ea6184bf48d..e051b30c184 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -471,29 +471,6 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-/* Display the ports dedicated to the companion controller */
-static ssize_t show_companion(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ehci_hcd *ehci;
- int nports, index, n;
- int count = PAGE_SIZE;
- char *ptr = buf;
-
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
- nports = HCS_N_PORTS(ehci->hcs_params);
-
- for (index = 0; index < nports; ++index) {
- if (test_bit(index, &ehci->companion_ports)) {
- n = scnprintf(ptr, count, "%d\n", index + 1);
- ptr += n;
- count -= n;
- }
- }
- return ptr - buf;
-}
-
/*
* Sets the owner of a port
*/
@@ -528,58 +505,6 @@ static void set_owner(struct ehci_hcd *ehci, int portnum, int new_owner)
}
}
-/*
- * Dedicate or undedicate a port to the companion controller.
- * Syntax is "[-]portnum", where a leading '-' sign means
- * return control of the port to the EHCI controller.
- */
-static ssize_t store_companion(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ehci_hcd *ehci;
- int portnum, new_owner;
-
- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
- new_owner = PORT_OWNER; /* Owned by companion */
- if (sscanf(buf, "%d", &portnum) != 1)
- return -EINVAL;
- if (portnum < 0) {
- portnum = - portnum;
- new_owner = 0; /* Owned by EHCI */
- }
- if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params))
- return -ENOENT;
- portnum--;
- if (new_owner)
- set_bit(portnum, &ehci->companion_ports);
- else
- clear_bit(portnum, &ehci->companion_ports);
- set_owner(ehci, portnum, new_owner);
- return count;
-}
-static DEVICE_ATTR(companion, 0644, show_companion, store_companion);
-
-static inline int create_companion_file(struct ehci_hcd *ehci)
-{
- int i = 0;
-
- /* with integrated TT there is no companion! */
- if (!ehci_is_TDI(ehci))
- i = device_create_file(ehci_to_hcd(ehci)->self.controller,
- &dev_attr_companion);
- return i;
-}
-
-static inline void remove_companion_file(struct ehci_hcd *ehci)
-{
- /* with integrated TT there is no companion! */
- if (!ehci_is_TDI(ehci))
- device_remove_file(ehci_to_hcd(ehci)->self.controller,
- &dev_attr_companion);
-}
-
-
/*-------------------------------------------------------------------------*/
static int check_reset_complete (
@@ -891,10 +816,11 @@ static int ehci_hub_control (
* power switching; they're allowed to just limit the
* current. khubd will turn the power back on.
*/
- if (HCS_PPC (ehci->hcs_params)){
+ if ((temp & PORT_OC) && HCS_PPC(ehci->hcs_params)) {
ehci_writel(ehci,
temp & ~(PORT_RWC_BITS | PORT_POWER),
status_reg);
+ temp = ehci_readl(ehci, status_reg);
}
}
@@ -1120,7 +1046,19 @@ static int ehci_hub_control (
if (!selector || selector > 5)
goto error;
ehci_quiesce(ehci);
+
+ /* Put all enabled ports into suspend */
+ while (ports--) {
+ u32 __iomem *sreg =
+ &ehci->regs->port_status[ports];
+
+ temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS;
+ if (temp & PORT_PE)
+ ehci_writel(ehci, temp | PORT_SUSPEND,
+ sreg);
+ }
ehci_halt(ehci);
+ temp = ehci_readl(ehci, status_reg);
temp |= selector << 16;
ehci_writel(ehci, temp, status_reg);
break;
diff --git a/drivers/usb/host/ehci-ixp4xx.c b/drivers/usb/host/ehci-ixp4xx.c
index 50e600d26e2..c4460f3d009 100644
--- a/drivers/usb/host/ehci-ixp4xx.c
+++ b/drivers/usb/host/ehci-ixp4xx.c
@@ -100,7 +100,7 @@ static int ixp4xx_ehci_probe(struct platform_device *pdev)
goto fail_request_resource;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index b5a0bf649c9..592d5f76803 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -40,27 +40,9 @@ static int ehci_msm_reset(struct usb_hcd *hcd)
int retval;
ehci->caps = USB_CAPLENGTH;
- ehci->regs = USB_CAPLENGTH +
- HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
- dbg_hcs_params(ehci, "reset");
- dbg_hcc_params(ehci, "reset");
-
- /* cache the data to minimize the chip reads*/
- ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
-
hcd->has_tt = 1;
- ehci->sbrn = HCD_USB2;
-
- retval = ehci_halt(ehci);
- if (retval)
- return retval;
-
- /* data structure init */
- retval = ehci_init(hcd);
- if (retval)
- return retval;
- retval = ehci_reset(ehci);
+ retval = ehci_setup(hcd);
if (retval)
return retval;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index 0c058be35a3..555a73c864b 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -24,6 +24,7 @@
#include <linux/usb/ulpi.h>
#include <linux/slab.h>
+#include <mach/hardware.h>
#include <mach/mxc_ehci.h>
#include <asm/mach-types.h>
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index ff55757ba7d..c3ba3ed5f3a 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -124,7 +124,7 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
return -ENOMEM;
hcd->rsrc_start = res_mem->start;
- hcd->rsrc_len = res_mem->end - res_mem->start + 1;
+ hcd->rsrc_len = resource_size(res_mem);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
OCTEON_EHCI_HCD_NAME)) {
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 55a57c23dd0..45240321ca0 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -98,6 +98,18 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
}
}
+static void disable_put_regulator(
+ struct ehci_hcd_omap_platform_data *pdata)
+{
+ int i;
+
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ if (pdata->regulator[i]) {
+ regulator_disable(pdata->regulator[i]);
+ regulator_put(pdata->regulator[i]);
+ }
+ }
+}
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
@@ -231,9 +243,11 @@ err_add_hcd:
omap_usbhs_disable(dev);
err_enable:
+ disable_put_regulator(pdata);
usb_put_hcd(hcd);
err_io:
+ iounmap(regs);
return ret;
}
@@ -253,6 +267,8 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
omap_usbhs_disable(dev);
+ disable_put_regulator(dev->platform_data);
+ iounmap(hcd->regs);
usb_put_hcd(hcd);
return 0;
}
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index cd69099cda1..e8d54de44ac 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -124,7 +124,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL)
return -ENOMEM;
- res_len = res->end - res->start + 1;
+ res_len = resource_size(res);
if (!request_mem_region(res->start, res_len, "mab regs"))
return -EBUSY;
@@ -140,7 +140,7 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
retval = -ENOMEM;
goto err2;
}
- res_len = res->end - res->start + 1;
+ res_len = resource_size(res);
if (!request_mem_region(res->start, res_len, "usbid regs")) {
retval = -EBUSY;
goto err2;
@@ -154,13 +154,13 @@ static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
return 0;
err3:
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- res_len = res->end - res->start + 1;
+ res_len = resource_size(res);
release_mem_region(res->start, res_len);
err2:
iounmap(dev->mab_regs);
err1:
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- res_len = res->end - res->start + 1;
+ res_len = resource_size(res);
release_mem_region(res->start, res_len);
dev_err(&pdev->dev, "Failed to map non-EHCI regs.\n");
return retval;
@@ -194,7 +194,7 @@ int usb_hcd_msp_probe(const struct hc_driver *driver,
goto err1;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, dev->name)) {
retval = -EBUSY;
goto err1;
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 8552db6c29c..41d11fe1425 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -130,7 +130,7 @@ static int __devinit ehci_hcd_ppc_of_probe(struct platform_device *op)
return -ENOMEM;
hcd->rsrc_start = res.start;
- hcd->rsrc_len = res.end - res.start + 1;
+ hcd->rsrc_len = resource_size(&res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 5d6bc624c96..0917e3a3246 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -103,7 +103,7 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
unsigned is_out, epnum;
- is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
+ is_out = qh->is_out;
epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
@@ -946,6 +946,7 @@ done:
hw = qh->hw;
hw->hw_info1 = cpu_to_hc32(ehci, info1);
hw->hw_info2 = cpu_to_hc32(ehci, info2);
+ qh->is_out = !is_input;
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
qh_refresh (ehci, qh);
return qh;
@@ -1231,6 +1232,8 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
prev->hw->hw_next = qh->hw->hw_next;
prev->qh_next = qh->qh_next;
+ if (ehci->qh_scan_next == qh)
+ ehci->qh_scan_next = qh->qh_next.qh;
wmb ();
/* If the controller isn't running, we don't have to wait for it */
@@ -1256,53 +1259,49 @@ static void scan_async (struct ehci_hcd *ehci)
struct ehci_qh *qh;
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
- ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index);
timer_action_done (ehci, TIMER_ASYNC_SHRINK);
-rescan:
stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state);
- qh = ehci->async->qh_next.qh;
- if (likely (qh != NULL)) {
- do {
- /* clean any finished work for this qh */
- if (!list_empty(&qh->qtd_list) && (stopped ||
- qh->stamp != ehci->stamp)) {
- int temp;
-
- /* unlinks could happen here; completion
- * reporting drops the lock. rescan using
- * the latest schedule, but don't rescan
- * qhs we already finished (no looping)
- * unless the controller is stopped.
- */
- qh = qh_get (qh);
- qh->stamp = ehci->stamp;
- temp = qh_completions (ehci, qh);
- if (qh->needs_rescan)
- unlink_async(ehci, qh);
- qh_put (qh);
- if (temp != 0) {
- goto rescan;
- }
- }
- /* unlink idle entries, reducing DMA usage as well
- * as HCD schedule-scanning costs. delay for any qh
- * we just scanned, there's a not-unusual case that it
- * doesn't stay idle for long.
- * (plus, avoids some kind of re-activation race.)
+ ehci->qh_scan_next = ehci->async->qh_next.qh;
+ while (ehci->qh_scan_next) {
+ qh = ehci->qh_scan_next;
+ ehci->qh_scan_next = qh->qh_next.qh;
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why ehci->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then ehci->qh_scan_next is adjusted
+ * in start_unlink_async().
*/
- if (list_empty(&qh->qtd_list)
- && qh->qh_state == QH_STATE_LINKED) {
- if (!ehci->reclaim && (stopped ||
- ((ehci->stamp - qh->stamp) & 0x1fff)
- >= EHCI_SHRINK_FRAMES * 8))
- start_unlink_async(ehci, qh);
- else
- action = TIMER_ASYNC_SHRINK;
- }
+ qh = qh_get(qh);
+ temp = qh_completions(ehci, qh);
+ if (qh->needs_rescan)
+ unlink_async(ehci, qh);
+ qh->unlink_time = jiffies + EHCI_SHRINK_JIFFIES;
+ qh_put(qh);
+ if (temp != 0)
+ goto rescan;
+ }
- qh = qh->qh_next.qh;
- } while (qh);
+ /* unlink idle entries, reducing DMA usage as well
+ * as HCD schedule-scanning costs. delay for any qh
+ * we just scanned, there's a not-unusual case that it
+ * doesn't stay idle for long.
+ * (plus, avoids some kind of re-activation race.)
+ */
+ if (list_empty(&qh->qtd_list)
+ && qh->qh_state == QH_STATE_LINKED) {
+ if (!ehci->reclaim && (stopped ||
+ time_after_eq(jiffies, qh->unlink_time)))
+ start_unlink_async(ehci, qh);
+ else
+ action = TIMER_ASYNC_SHRINK;
+ }
}
if (action == TIMER_ASYNC_SHRINK)
timer_action (ehci, TIMER_ASYNC_SHRINK);
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index e3374c8f7b3..b3958b3d316 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -189,6 +189,100 @@ static void s5p_ehci_shutdown(struct platform_device *pdev)
hcd->driver->shutdown(hcd);
}
+#ifdef CONFIG_PM
+static int s5p_ehci_suspend(struct device *dev)
+{
+ struct s5p_ehci_hcd *s5p_ehci = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = s5p_ehci->hcd;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
+ unsigned long flags;
+ int rc = 0;
+
+ if (time_before(jiffies, ehci->next_statechange))
+ msleep(20);
+
+ /*
+ * Root hub was already suspended. Disable irq emission and
+ * mark HW unaccessible. The PM and USB cores make sure that
+ * the root hub is either suspended or stopped.
+ */
+ ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
+ spin_lock_irqsave(&ehci->lock, flags);
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ (void)ehci_readl(ehci, &ehci->regs->intr_enable);
+
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ if (pdata && pdata->phy_exit)
+ pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
+
+ return rc;
+}
+
+static int s5p_ehci_resume(struct device *dev)
+{
+ struct s5p_ehci_hcd *s5p_ehci = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = s5p_ehci->hcd;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
+
+ if (pdata && pdata->phy_init)
+ pdata->phy_init(pdev, S5P_USB_PHY_HOST);
+
+ if (time_before(jiffies, ehci->next_statechange))
+ msleep(100);
+
+ /* Mark hardware accessible again as we are out of D3 state by now */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
+ int mask = INTR_MASK;
+
+ ehci_prepare_ports_for_controller_resume(ehci);
+ if (!hcd->self.root_hub->do_remote_wakeup)
+ mask &= ~STS_PCD;
+ ehci_writel(ehci, mask, &ehci->regs->intr_enable);
+ ehci_readl(ehci, &ehci->regs->intr_enable);
+ return 0;
+ }
+
+ usb_root_hub_lost_power(hcd->self.root_hub);
+
+ (void) ehci_halt(ehci);
+ (void) ehci_reset(ehci);
+
+ /* emptying the schedule aborts any urbs */
+ spin_lock_irq(&ehci->lock);
+ if (ehci->reclaim)
+ end_unlink_async(ehci);
+ ehci_work(ehci);
+ spin_unlock_irq(&ehci->lock);
+
+ ehci_writel(ehci, ehci->command, &ehci->regs->command);
+ ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
+
+ /* here we "know" root ports should always stay powered */
+ ehci_port_power(ehci, 1);
+
+ hcd->state = HC_STATE_SUSPENDED;
+
+ return 0;
+}
+#else
+#define s5p_ehci_suspend NULL
+#define s5p_ehci_resume NULL
+#endif
+
+static const struct dev_pm_ops s5p_ehci_pm_ops = {
+ .suspend = s5p_ehci_suspend,
+ .resume = s5p_ehci_resume,
+};
+
static struct platform_driver s5p_ehci_driver = {
.probe = s5p_ehci_probe,
.remove = __devexit_p(s5p_ehci_remove),
@@ -196,6 +290,7 @@ static struct platform_driver s5p_ehci_driver = {
.driver = {
.name = "s5p-ehci",
.owner = THIS_MODULE,
+ .pm = &s5p_ehci_pm_ops,
}
};
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 6c9fbe352f7..2abf8543f08 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -172,7 +172,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
}
}
#ifdef DEBUG
- if (usecs > 100)
+ if (usecs > ehci->uframe_periodic_max)
ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
frame * 8 + uframe, usecs);
#endif
@@ -709,11 +709,8 @@ static int check_period (
if (uframe >= 8)
return 0;
- /*
- * 80% periodic == 100 usec/uframe available
- * convert "usecs we need" to "max already claimed"
- */
- usecs = 100 - usecs;
+ /* convert "usecs we need" to "max already claimed" */
+ usecs = ehci->uframe_periodic_max - usecs;
/* we "know" 2 and 4 uframe intervals were rejected; so
* for period 0, check _every_ microframe in the schedule.
@@ -1286,9 +1283,9 @@ itd_slot_ok (
{
uframe %= period;
do {
- /* can't commit more than 80% periodic == 100 usec */
+ /* can't commit more than uframe_periodic_max usec */
if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
- > (100 - usecs))
+ > (ehci->uframe_periodic_max - usecs))
return 0;
/* we know urb->interval is 2^N uframes */
@@ -1345,7 +1342,7 @@ sitd_slot_ok (
#endif
/* check starts (OUT uses more than one) */
- max_used = 100 - stream->usecs;
+ max_used = ehci->uframe_periodic_max - stream->usecs;
for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
if (periodic_usecs (ehci, frame, uf) > max_used)
return 0;
@@ -1354,7 +1351,7 @@ sitd_slot_ok (
/* for IN, check CSPLIT */
if (stream->c_usecs) {
uf = uframe & 7;
- max_used = 100 - stream->c_usecs;
+ max_used = ehci->uframe_periodic_max - stream->c_usecs;
do {
tmp = 1 << uf;
tmp <<= 8;
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
new file mode 100644
index 00000000000..14ced00ba22
--- /dev/null
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2007 by Alan Stern
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* this file is part of ehci-hcd.c */
+
+
+/* Display the ports dedicated to the companion controller */
+static ssize_t show_companion(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ehci_hcd *ehci;
+ int nports, index, n;
+ int count = PAGE_SIZE;
+ char *ptr = buf;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ nports = HCS_N_PORTS(ehci->hcs_params);
+
+ for (index = 0; index < nports; ++index) {
+ if (test_bit(index, &ehci->companion_ports)) {
+ n = scnprintf(ptr, count, "%d\n", index + 1);
+ ptr += n;
+ count -= n;
+ }
+ }
+ return ptr - buf;
+}
+
+/*
+ * Dedicate or undedicate a port to the companion controller.
+ * Syntax is "[-]portnum", where a leading '-' sign means
+ * return control of the port to the EHCI controller.
+ */
+static ssize_t store_companion(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ehci_hcd *ehci;
+ int portnum, new_owner;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ new_owner = PORT_OWNER; /* Owned by companion */
+ if (sscanf(buf, "%d", &portnum) != 1)
+ return -EINVAL;
+ if (portnum < 0) {
+ portnum = - portnum;
+ new_owner = 0; /* Owned by EHCI */
+ }
+ if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params))
+ return -ENOENT;
+ portnum--;
+ if (new_owner)
+ set_bit(portnum, &ehci->companion_ports);
+ else
+ clear_bit(portnum, &ehci->companion_ports);
+ set_owner(ehci, portnum, new_owner);
+ return count;
+}
+static DEVICE_ATTR(companion, 0644, show_companion, store_companion);
+
+
+/*
+ * Display / Set uframe_periodic_max
+ */
+static ssize_t show_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ehci_hcd *ehci;
+ int n;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
+ return n;
+}
+
+
+static ssize_t store_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ehci_hcd *ehci;
+ unsigned uframe_periodic_max;
+ unsigned frame, uframe;
+ unsigned short allocated_max;
+ unsigned long flags;
+ ssize_t ret;
+
+ ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+ return -EINVAL;
+
+ if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
+ ehci_info(ehci, "rejecting invalid request for "
+ "uframe_periodic_max=%u\n", uframe_periodic_max);
+ return -EINVAL;
+ }
+
+ ret = -EINVAL;
+
+ /*
+ * lock, so that our checking does not race with possible periodic
+ * bandwidth allocation through submitting new urbs.
+ */
+ spin_lock_irqsave (&ehci->lock, flags);
+
+ /*
+ * for request to decrease max periodic bandwidth, we have to check
+ * every microframe in the schedule to see whether the decrease is
+ * possible.
+ */
+ if (uframe_periodic_max < ehci->uframe_periodic_max) {
+ allocated_max = 0;
+
+ for (frame = 0; frame < ehci->periodic_size; ++frame)
+ for (uframe = 0; uframe < 7; ++uframe)
+ allocated_max = max(allocated_max,
+ periodic_usecs (ehci, frame, uframe));
+
+ if (allocated_max > uframe_periodic_max) {
+ ehci_info(ehci,
+ "cannot decrease uframe_periodic_max becase "
+ "periodic bandwidth is already allocated "
+ "(%u > %u)\n",
+ allocated_max, uframe_periodic_max);
+ goto out_unlock;
+ }
+ }
+
+ /* increasing is always ok */
+
+ ehci_info(ehci, "setting max periodic bandwidth to %u%% "
+ "(== %u usec/uframe)\n",
+ 100*uframe_periodic_max/125, uframe_periodic_max);
+
+ if (uframe_periodic_max != 100)
+ ehci_warn(ehci, "max periodic bandwidth set is non-standard\n");
+
+ ehci->uframe_periodic_max = uframe_periodic_max;
+ ret = count;
+
+out_unlock:
+ spin_unlock_irqrestore (&ehci->lock, flags);
+ return ret;
+}
+static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max);
+
+
+static inline int create_sysfs_files(struct ehci_hcd *ehci)
+{
+ struct device *controller = ehci_to_hcd(ehci)->self.controller;
+ int i = 0;
+
+ /* with integrated TT there is no companion! */
+ if (!ehci_is_TDI(ehci))
+ i = device_create_file(controller, &dev_attr_companion);
+ if (i)
+ goto out;
+
+ i = device_create_file(controller, &dev_attr_uframe_periodic_max);
+out:
+ return i;
+}
+
+static inline void remove_sysfs_files(struct ehci_hcd *ehci)
+{
+ struct device *controller = ehci_to_hcd(ehci)->self.controller;
+
+ /* with integrated TT there is no companion! */
+ if (!ehci_is_TDI(ehci))
+ device_remove_file(controller, &dev_attr_companion);
+
+ device_remove_file(controller, &dev_attr_uframe_periodic_max);
+}
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index 52a027aaa37..d661cf7de14 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -41,7 +41,7 @@ static int __devinit usb_w90x900_probe(const struct hc_driver *driver,
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
retval = -EBUSY;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index a64d6d66d76..32793ce3d9e 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -174,7 +174,7 @@ static int __devinit ehci_hcd_xilinx_of_probe(struct platform_device *op)
return -ENOMEM;
hcd->rsrc_start = res.start;
- hcd->rsrc_len = res.end - res.start + 1;
+ hcd->rsrc_len = resource_size(&res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index bd6ff489baf..cc7d337ec35 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -75,6 +75,7 @@ struct ehci_hcd { /* one per controller */
struct ehci_qh *async;
struct ehci_qh *dummy; /* For AMD quirk use */
struct ehci_qh *reclaim;
+ struct ehci_qh *qh_scan_next;
unsigned scanning : 1;
/* periodic schedule support */
@@ -87,6 +88,8 @@ struct ehci_hcd { /* one per controller */
union ehci_shadow *pshadow; /* mirror hw periodic table */
int next_uframe; /* scan periodic, start here */
unsigned periodic_sched; /* periodic activity count */
+ unsigned uframe_periodic_max; /* max periodic time per uframe */
+
/* list of itds & sitds completed while clock_frame was still active */
struct list_head cached_itd_list;
@@ -117,7 +120,6 @@ struct ehci_hcd { /* one per controller */
struct timer_list iaa_watchdog;
struct timer_list watchdog;
unsigned long actions;
- unsigned stamp;
unsigned periodic_stamp;
unsigned random_frame;
unsigned long next_statechange;
@@ -343,6 +345,7 @@ struct ehci_qh {
struct ehci_qh *reclaim; /* next to reclaim */
struct ehci_hcd *ehci;
+ unsigned long unlink_time;
/*
* Do NOT use atomic operations for QH refcounting. On some CPUs
@@ -374,6 +377,7 @@ struct ehci_qh {
#define NO_FRAME ((unsigned short)~0) /* pick new start */
struct usb_device *dev; /* access to TT */
+ unsigned is_out:1; /* bulk or intr OUT */
unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
};
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 19223c7449e..572ea53b022 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -605,7 +605,7 @@ static int __devinit of_fhci_probe(struct platform_device *ofdev)
goto err_regs;
}
- hcd->regs = ioremap(usb_regs.start, usb_regs.end - usb_regs.start + 1);
+ hcd->regs = ioremap(usb_regs.start, resource_size(&usb_regs));
if (!hcd->regs) {
dev_err(dev, "could not ioremap regs\n");
ret = -ENOMEM;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index c9e6e454c62..840beda66dd 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1555,7 +1555,7 @@ static void kill_transfer(struct usb_hcd *hcd, struct urb *urb,
/* We need to forcefully reclaim the slot since some transfers never
return, e.g. interrupt transfers and NAKed bulk transfers. */
- if (usb_pipebulk(urb->pipe)) {
+ if (usb_pipecontrol(urb->pipe) || usb_pipebulk(urb->pipe)) {
skip_map = reg_read32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG);
skip_map |= (1 << qh->slot);
reg_write32(hcd->regs, HC_ATL_PTD_SKIPMAP_REG, skip_map);
@@ -1583,6 +1583,9 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
int retval = 0;
spin_lock_irqsave(&priv->lock, spinflags);
+ retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (retval)
+ goto out;
qh = urb->ep->hcpriv;
if (!qh) {
diff --git a/drivers/usb/host/ohci-ath79.c b/drivers/usb/host/ohci-ath79.c
index ffea3e7cb0a..c620c50f677 100644
--- a/drivers/usb/host/ohci-ath79.c
+++ b/drivers/usb/host/ohci-ath79.c
@@ -93,8 +93,8 @@ static int ohci_ath79_probe(struct platform_device *pdev)
ret = -ENODEV;
goto err_put_hcd;
}
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&pdev->dev, "controller already in use\n");
diff --git a/drivers/usb/host/ohci-cns3xxx.c b/drivers/usb/host/ohci-cns3xxx.c
index f05ef87e934..5a00a1e1c6c 100644
--- a/drivers/usb/host/ohci-cns3xxx.c
+++ b/drivers/usb/host/ohci-cns3xxx.c
@@ -100,7 +100,7 @@ static int cns3xxx_ohci_probe(struct platform_device *pdev)
goto err1;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index d22fb4d577b..6aca2c4453f 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -322,7 +322,7 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
goto err2;
}
hcd->rsrc_start = mem->start;
- hcd->rsrc_len = mem->end - mem->start + 1;
+ hcd->rsrc_len = resource_size(mem);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 9aa10bdf391..f9cf3f04b74 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1,5 +1,7 @@
/*
- * OHCI HCD (Host Controller Driver) for USB.
+ * Open Host Controller Interface (OHCI) driver for USB.
+ *
+ * Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
diff --git a/drivers/usb/host/ohci-octeon.c b/drivers/usb/host/ohci-octeon.c
index e4ddfaf8870..d8b45647d1d 100644
--- a/drivers/usb/host/ohci-octeon.c
+++ b/drivers/usb/host/ohci-octeon.c
@@ -135,7 +135,7 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
return -ENOMEM;
hcd->rsrc_start = res_mem->start;
- hcd->rsrc_len = res_mem->end - res_mem->start + 1;
+ hcd->rsrc_len = resource_size(res_mem);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
OCTEON_OHCI_HCD_NAME)) {
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 1ca1821320f..0c12f4e14dc 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -110,7 +110,7 @@ static int __devinit ohci_hcd_ppc_of_probe(struct platform_device *op)
return -ENOMEM;
hcd->rsrc_start = res.start;
- hcd->rsrc_len = res.end - res.start + 1;
+ hcd->rsrc_len = resource_size(&res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index 89e670e38c1..c0f595c4448 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -56,7 +56,7 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
pr_debug("%s: request_mem_region failed\n", __FILE__);
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index d8eb3bdafab..4204d9720d2 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -131,7 +131,7 @@ int usb_hcd_sa1111_probe (const struct hc_driver *driver,
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = dev->res.start;
- hcd->rsrc_len = dev->res.end - dev->res.start + 1;
+ hcd->rsrc_len = resource_size(&dev->res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dbg("request_mem_region failed");
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c
index f47867ff78c..14cecb52a9f 100644
--- a/drivers/usb/host/ohci-sh.c
+++ b/drivers/usb/host/ohci-sh.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2008 Renesas Solutions Corp.
*
- * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 041d30f30c1..78918ca0da2 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -103,8 +103,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
goto err0;
}
- if (!request_mem_region(mem->start, mem->end - mem->start + 1,
- pdev->name)) {
+ if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
dev_err(dev, "request_mem_region failed\n");
retval = -EBUSY;
goto err0;
@@ -126,7 +125,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
if (!dma_declare_coherent_memory(dev, mem->start,
mem->start - mem->parent->start,
- (mem->end - mem->start) + 1,
+ resource_size(mem),
DMA_MEMORY_MAP |
DMA_MEMORY_EXCLUSIVE)) {
dev_err(dev, "cannot declare coherent memory\n");
@@ -149,7 +148,7 @@ static int ohci_hcd_sm501_drv_probe(struct platform_device *pdev)
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, pdev->name)) {
dev_err(dev, "request_mem_region failed\n");
@@ -185,7 +184,7 @@ err3:
err2:
dma_release_declared_memory(dev);
err1:
- release_mem_region(mem->start, mem->end - mem->start + 1);
+ release_mem_region(mem->start, resource_size(mem));
err0:
return retval;
}
@@ -201,7 +200,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
dma_release_declared_memory(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (mem)
- release_mem_region(mem->start, mem->end - mem->start + 1);
+ release_mem_region(mem->start, resource_size(mem));
/* mask interrupts and disable power */
diff --git a/drivers/usb/host/ohci-ssb.c b/drivers/usb/host/ohci-ssb.c
index 48ee6943bf3..c4aea3b8315 100644
--- a/drivers/usb/host/ohci-ssb.c
+++ b/drivers/usb/host/ohci-ssb.c
@@ -2,7 +2,7 @@
* Sonics Silicon Backplane
* Broadcom USB-core OHCI driver
*
- * Copyright 2007 Michael Buesch <mb@bu3sch.de>
+ * Copyright 2007 Michael Buesch <m@bues.ch>
*
* Derived from the OHCI-PCI driver
* Copyright 1999 Roman Weissgaerber
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 3558491dd87..57ad1271fc9 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -208,13 +208,13 @@ static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
}
hcd->rsrc_start = regs->start;
- hcd->rsrc_len = regs->end - regs->start + 1;
+ hcd->rsrc_len = resource_size(regs);
tmio = hcd_to_tmio(hcd);
spin_lock_init(&tmio->lock);
- tmio->ccr = ioremap(config->start, config->end - config->start + 1);
+ tmio->ccr = ioremap(config->start, resource_size(config));
if (!tmio->ccr) {
ret = -ENOMEM;
goto err_ioremap_ccr;
@@ -228,7 +228,7 @@ static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
if (!dma_declare_coherent_memory(&dev->dev, sram->start,
sram->start,
- sram->end - sram->start + 1,
+ resource_size(sram),
DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE)) {
ret = -EBUSY;
goto err_dma_declare;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 5fbe997dc6d..dcd889803f0 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3828,7 +3828,7 @@ static int oxu_drv_probe(struct platform_device *pdev)
return -ENODEV;
}
memstart = res->start;
- memlen = res->end - res->start + 1;
+ memlen = resource_size(res);
dev_dbg(&pdev->dev, "MEM resource %lx-%lx\n", memstart, memlen);
if (!request_mem_region(memstart, memlen,
oxu_hc_driver.description)) {
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index fd930618c28..629a96813fd 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -35,6 +35,8 @@
#define OHCI_INTRSTATUS 0x0c
#define OHCI_INTRENABLE 0x10
#define OHCI_INTRDISABLE 0x14
+#define OHCI_FMINTERVAL 0x34
+#define OHCI_HCR (1 << 0) /* host controller reset */
#define OHCI_OCR (1 << 3) /* ownership change request */
#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
#define OHCI_CTRL_IR (1 << 8) /* interrupt routing */
@@ -497,6 +499,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
/* reset controller, preserving RWC (and possibly IR) */
writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+ readl(base + OHCI_CONTROL);
+
+ /* Some NVIDIA controllers stop working if kept in RESET for too long */
+ if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
+ u32 fminterval;
+ int cnt;
+
+ /* drive reset for at least 50 ms (7.1.7.5) */
+ msleep(50);
+
+ /* software reset of the controller, preserving HcFmInterval */
+ fminterval = readl(base + OHCI_FMINTERVAL);
+ writel(OHCI_HCR, base + OHCI_CMDSTATUS);
+
+ /* reset requires max 10 us delay */
+ for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
+ if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
+ break;
+ udelay(1);
+ }
+ writel(fminterval, base + OHCI_FMINTERVAL);
+
+ /* Now we're in the SUSPEND state with all devices reset
+ * and wakeups and interrupts disabled
+ */
+ }
/*
* disable interrupts
@@ -507,20 +535,34 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
iounmap(base);
}
+static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
+ {
+ /* Pegatron Lucid (ExoPC) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
+ },
+ },
+ {
+ /* Pegatron Lucid (Ordissimo AIRIS) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"),
+ },
+ },
+ { }
+};
+
static void __devinit ehci_bios_handoff(struct pci_dev *pdev,
void __iomem *op_reg_base,
u32 cap, u8 offset)
{
int try_handoff = 1, tried_handoff = 0;
- /* The Pegatron Lucid (ExoPC) tablet sporadically waits for 90
- * seconds trying the handoff on its unused controller. Skip
- * it. */
+ /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
+ * the handoff on its unused controller. Skip it. */
if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
- const char *dmi_bn = dmi_get_system_info(DMI_BOARD_NAME);
- const char *dmi_bv = dmi_get_system_info(DMI_BIOS_VERSION);
- if (dmi_bn && !strcmp(dmi_bn, "EXOPG06411") &&
- dmi_bv && !strcmp(dmi_bv, "Lucid-CE-133"))
+ if (dmi_check_system(ehci_dmi_nohandoff_table))
try_handoff = 0;
}
@@ -775,7 +817,7 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
/* If the BIOS owns the HC, signal that the OS wants it, and wait */
if (val & XHCI_HC_BIOS_OWNED) {
- writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
+ writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
/* Wait for 5 seconds with 10 microsecond polling interval */
timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index db6f8b9c19b..40a0d8b03ad 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -6,7 +6,7 @@
* Portions Copyright (C) 2004-2005 David Brownell
* Portions Copyright (C) 1999 Roman Weissgaerber
*
- * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1438,7 +1438,7 @@ static void packet_write(struct r8a66597 *r8a66597, u16 pipenum)
if (pipenum > 0)
r8a66597_write(r8a66597, ~(1 << pipenum), BEMPSTS);
if (urb->transfer_buffer) {
- r8a66597_write_fifo(r8a66597, td->pipe->fifoaddr, buf, size);
+ r8a66597_write_fifo(r8a66597, td->pipe, buf, size);
if (!usb_pipebulk(urb->pipe) || td->maxpacket != size)
r8a66597_write(r8a66597, BVAL, td->pipe->fifoctr);
}
@@ -2306,7 +2306,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
dbg("resume port = %d", port);
rh->port &= ~USB_PORT_STAT_SUSPEND;
- rh->port |= USB_PORT_STAT_C_SUSPEND < 16;
+ rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
msleep(50);
r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
@@ -2517,6 +2517,7 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&r8a66597->child_device);
hcd->rsrc_start = res->start;
+ hcd->has_tt = 1;
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | irq_trigger);
if (ret != 0) {
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index 25563e9a90b..f28782d20ee 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -201,11 +201,26 @@ static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val,
iowrite16(val, r8a66597->reg + offset);
}
+static inline void r8a66597_mdfy(struct r8a66597 *r8a66597,
+ u16 val, u16 pat, unsigned long offset)
+{
+ u16 tmp;
+ tmp = r8a66597_read(r8a66597, offset);
+ tmp = tmp & (~pat);
+ tmp = tmp | val;
+ r8a66597_write(r8a66597, tmp, offset);
+}
+
+#define r8a66597_bclr(r8a66597, val, offset) \
+ r8a66597_mdfy(r8a66597, 0, val, offset)
+#define r8a66597_bset(r8a66597, val, offset) \
+ r8a66597_mdfy(r8a66597, val, 0, offset)
+
static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
- unsigned long offset, u16 *buf,
+ struct r8a66597_pipe *pipe, u16 *buf,
int len)
{
- void __iomem *fifoaddr = r8a66597->reg + offset;
+ void __iomem *fifoaddr = r8a66597->reg + pipe->fifoaddr;
unsigned long count;
unsigned char *pb;
int i;
@@ -230,26 +245,15 @@ static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597,
iowrite16_rep(fifoaddr, buf, len);
if (unlikely(odd)) {
buf = &buf[len];
+ if (r8a66597->pdata->wr0_shorted_to_wr1)
+ r8a66597_bclr(r8a66597, MBW_16, pipe->fifosel);
iowrite8((unsigned char)*buf, fifoaddr);
+ if (r8a66597->pdata->wr0_shorted_to_wr1)
+ r8a66597_bset(r8a66597, MBW_16, pipe->fifosel);
}
}
}
-static inline void r8a66597_mdfy(struct r8a66597 *r8a66597,
- u16 val, u16 pat, unsigned long offset)
-{
- u16 tmp;
- tmp = r8a66597_read(r8a66597, offset);
- tmp = tmp & (~pat);
- tmp = tmp | val;
- r8a66597_write(r8a66597, tmp, offset);
-}
-
-#define r8a66597_bclr(r8a66597, val, offset) \
- r8a66597_mdfy(r8a66597, 0, val, offset)
-#define r8a66597_bset(r8a66597, val, offset) \
- r8a66597_mdfy(r8a66597, val, 0, offset)
-
static inline unsigned long get_syscfg_reg(int port)
{
return port == 0 ? SYSCFG0 : SYSCFG1;
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index d01c1e22768..f7a62138e3e 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -111,7 +111,7 @@ static int __devinit uhci_hcd_grlib_probe(struct platform_device *op)
return -ENOMEM;
hcd->rsrc_start = res.start;
- hcd->rsrc_len = res.end - res.start + 1;
+ hcd->rsrc_len = resource_size(&res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
index f7582e8e216..d3e13b640d4 100644
--- a/drivers/usb/host/whci/init.c
+++ b/drivers/usb/host/whci/init.c
@@ -178,7 +178,7 @@ void whc_clean_up(struct whc *whc)
if (whc->qset_pool)
dma_pool_destroy(whc->qset_pool);
- len = whc->umc->resource.end - whc->umc->resource.start + 1;
+ len = resource_size(&whc->umc->resource);
if (whc->base)
iounmap(whc->base);
if (whc->base_phys)
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 1f50b4468e8..e9b0f043455 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -266,11 +266,11 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
xhci_dbg(xhci, "Interrupter target = 0x%x\n",
GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
xhci_dbg(xhci, "Cycle bit = %u\n",
- (unsigned int) (le32_to_cpu(trb->link.control) & TRB_CYCLE));
+ le32_to_cpu(trb->link.control) & TRB_CYCLE);
xhci_dbg(xhci, "Toggle cycle bit = %u\n",
- (unsigned int) (le32_to_cpu(trb->link.control) & LINK_TOGGLE));
+ le32_to_cpu(trb->link.control) & LINK_TOGGLE);
xhci_dbg(xhci, "No Snoop bit = %u\n",
- (unsigned int) (le32_to_cpu(trb->link.control) & TRB_NO_SNOOP));
+ le32_to_cpu(trb->link.control) & TRB_NO_SNOOP);
break;
case TRB_TYPE(TRB_TRANSFER):
address = le64_to_cpu(trb->trans_event.buffer);
@@ -284,9 +284,9 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
address = le64_to_cpu(trb->event_cmd.cmd_trb);
xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
xhci_dbg(xhci, "Completion status = %u\n",
- (unsigned int) GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
+ GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
xhci_dbg(xhci, "Flags = 0x%x\n",
- (unsigned int) le32_to_cpu(trb->event_cmd.flags));
+ le32_to_cpu(trb->event_cmd.flags));
break;
default:
xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
@@ -318,10 +318,10 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
trb = &seg->trbs[i];
xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
- (u32)lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- (u32)upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
- (unsigned int) le32_to_cpu(trb->link.intr_target),
- (unsigned int) le32_to_cpu(trb->link.control));
+ lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
+ upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
+ le32_to_cpu(trb->link.intr_target),
+ le32_to_cpu(trb->link.control));
addr += sizeof(*trb);
}
}
@@ -402,8 +402,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
addr,
lower_32_bits(le64_to_cpu(entry->seg_addr)),
upper_32_bits(le64_to_cpu(entry->seg_addr)),
- (unsigned int) le32_to_cpu(entry->seg_size),
- (unsigned int) le32_to_cpu(entry->rsvd));
+ le32_to_cpu(entry->seg_size),
+ le32_to_cpu(entry->rsvd));
addr += sizeof(*entry);
}
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0f8e1d29a85..d446886b22b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -89,8 +89,8 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
return;
prev->next = next;
if (link_trbs) {
- prev->trbs[TRBS_PER_SEGMENT-1].link.
- segment_ptr = cpu_to_le64(next->dma);
+ prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
+ cpu_to_le64(next->dma);
/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
@@ -187,8 +187,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
if (link_trbs) {
/* See section 4.9.2.1 and 6.4.4.1 */
- prev->trbs[TRBS_PER_SEGMENT-1].link.
- control |= cpu_to_le32(LINK_TOGGLE);
+ prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
+ cpu_to_le32(LINK_TOGGLE);
xhci_dbg(xhci, "Wrote link toggle flag to"
" segment %p (virtual), 0x%llx (DMA)\n",
prev, (unsigned long long)prev->dma);
@@ -549,8 +549,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
addr = cur_ring->first_seg->dma |
SCT_FOR_CTX(SCT_PRI_TR) |
cur_ring->cycle_state;
- stream_info->stream_ctx_array[cur_stream].
- stream_ring = cpu_to_le64(addr);
+ stream_info->stream_ctx_array[cur_stream].stream_ring =
+ cpu_to_le64(addr);
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr);
@@ -786,7 +786,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
slot_id,
&xhci->dcbaa->dev_context_ptrs[slot_id],
- (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
+ le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
return 1;
fail:
@@ -890,19 +890,19 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
/* 3) Only the control endpoint is valid - one endpoint context */
- slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | (u32) udev->route);
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
switch (udev->speed) {
case USB_SPEED_SUPER:
- slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_SS);
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
break;
case USB_SPEED_HIGH:
- slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_HS);
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
break;
case USB_SPEED_FULL:
- slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_FS);
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
break;
case USB_SPEED_LOW:
- slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_LS);
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
break;
case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -916,7 +916,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
port_num = xhci_find_real_port_number(xhci, udev);
if (!port_num)
return -EINVAL;
- slot_ctx->dev_info2 |= cpu_to_le32((u32) ROOT_HUB_PORT(port_num));
+ slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
/* Set the port number in the virtual_device to the faked port number */
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent)
@@ -1215,8 +1215,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
/* dig out max burst from ep companion desc */
max_packet = ep->ss_ep_comp.bMaxBurst;
- if (!max_packet)
- xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
break;
case USB_SPEED_HIGH:
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 17541d09eab..cb16de213f6 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -29,6 +29,9 @@
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+#define PCI_VENDOR_ID_ETRON 0x1b6f
+#define PCI_DEVICE_ID_ASROCK_P67 0x7023
+
static const char hcd_name[] = "xhci_hcd";
/* called after powerup, by probe or system-pm "wakeup" */
@@ -134,6 +137,11 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
xhci->limit_active_eps = 64;
}
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
+ }
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 800f417c730..7113d16e2d3 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -113,15 +113,13 @@ static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (ring == xhci->event_ring)
return trb == &seg->trbs[TRBS_PER_SEGMENT];
else
- return (le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK)
- == TRB_TYPE(TRB_LINK);
+ return TRB_TYPE_LINK_LE32(trb->link.control);
}
static int enqueue_is_link_trb(struct xhci_ring *ring)
{
struct xhci_link_trb *link = &ring->enqueue->link;
- return ((le32_to_cpu(link->control) & TRB_TYPE_BITMASK) ==
- TRB_TYPE(TRB_LINK));
+ return TRB_TYPE_LINK_LE32(link->control);
}
/* Updates trb to point to the next TRB in the ring, and updates seg if the next
@@ -372,7 +370,7 @@ static struct xhci_segment *find_trb_seg(
while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
- if (le32_to_cpu(generic_trb->field[3]) & LINK_TOGGLE)
+ if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
*cycle_state ^= 0x1;
cur_seg = cur_seg->next;
if (cur_seg == start_seg)
@@ -489,8 +487,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
}
trb = &state->new_deq_ptr->generic;
- if ((le32_to_cpu(trb->field[3]) & TRB_TYPE_BITMASK) ==
- TRB_TYPE(TRB_LINK) && (le32_to_cpu(trb->field[3]) & LINK_TOGGLE))
+ if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
+ (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
@@ -525,8 +523,7 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
true;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if ((le32_to_cpu(cur_trb->generic.field[3]) & TRB_TYPE_BITMASK)
- == TRB_TYPE(TRB_LINK)) {
+ if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
/* Unchain any chained Link TRBs, but
* leave the pointers intact.
*/
@@ -1000,7 +997,7 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
* but we don't care.
*/
xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
- (unsigned int) GET_COMP_CODE(le32_to_cpu(event->status)));
+ GET_COMP_CODE(le32_to_cpu(event->status)));
/* HW with the reset endpoint quirk needs to have a configure endpoint
* command complete before the endpoint can be used. Queue that here
@@ -1458,7 +1455,8 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
* endpoint anyway. Check if a babble halted the
* endpoint.
*/
- if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == EP_STATE_HALTED)
+ if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
+ cpu_to_le32(EP_STATE_HALTED))
return 1;
return 0;
@@ -1733,6 +1731,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
frame->status = -EOVERFLOW;
skip_td = true;
break;
+ case COMP_DEV_ERR:
case COMP_STALL:
frame->status = -EPROTO;
skip_td = true;
@@ -1752,10 +1751,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
for (cur_trb = ep_ring->dequeue,
cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if ((le32_to_cpu(cur_trb->generic.field[3]) &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
- (le32_to_cpu(cur_trb->generic.field[3]) &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
+ if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
+ !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
}
len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
@@ -1767,9 +1764,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
}
- if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
- *status = 0;
-
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
@@ -1787,8 +1781,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx];
- /* The transfer is partly done */
- *status = -EXDEV;
+ /* The transfer is partly done. */
frame->status = -EXDEV;
/* calc actual length */
@@ -1888,10 +1881,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
cur_trb != event_trb;
next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
- if ((le32_to_cpu(cur_trb->generic.field[3]) &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
- (le32_to_cpu(cur_trb->generic.field[3]) &
- TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
+ if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
+ !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
td->urb->actual_length +=
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
}
@@ -2016,6 +2007,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
goto cleanup;
+ case COMP_DEV_ERR:
+ xhci_warn(xhci, "WARN: detect an incompatible device");
+ status = -EPROTO;
+ break;
case COMP_MISSED_INT:
/*
* When encounter missed service error, one or more isoc tds
@@ -2046,8 +2041,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
ep_index);
xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
- (unsigned int) (le32_to_cpu(event->flags)
- & TRB_TYPE_BITMASK)>>10);
+ (le32_to_cpu(event->flags) &
+ TRB_TYPE_BITMASK)>>10);
xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
if (ep->skip) {
ep->skip = false;
@@ -2063,6 +2058,20 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Is this a TRB in the currently executing TD? */
event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
td->last_trb, event_dma);
+
+ /*
+ * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
+ * is not in the current TD pointed by ep_ring->dequeue because
+ * that the hardware dequeue pointer still at the previous TRB
+ * of the current TD. The previous TRB maybe a Link TD or the
+ * last TRB of the previous TD. The command completion handle
+ * will take care the rest.
+ */
+ if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
+ ret = 0;
+ goto cleanup;
+ }
+
if (!event_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
@@ -2104,9 +2113,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* corresponding TD has been cancelled. Just ignore
* the TD.
*/
- if ((le32_to_cpu(event_trb->generic.field[3])
- & TRB_TYPE_BITMASK)
- == TRB_TYPE(TRB_TR_NOOP)) {
+ if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
xhci_dbg(xhci,
"event_trb is a no-op TRB. Skip it\n");
goto cleanup;
@@ -2158,6 +2165,11 @@ cleanup:
urb->transfer_buffer_length,
status);
spin_unlock(&xhci->lock);
+ /* EHCI, UHCI, and OHCI always unconditionally set the
+ * urb->status of an isochronous endpoint to 0.
+ */
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ status = 0;
usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock);
}
@@ -2432,7 +2444,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
next->link.control |= cpu_to_le32(TRB_CHAIN);
wmb();
- next->link.control ^= cpu_to_le32((u32) TRB_CYCLE);
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 06e7023258d..1c4432d8fc1 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -345,7 +345,8 @@ static void xhci_event_ring_work(unsigned long arg)
spin_lock_irqsave(&xhci->lock, flags);
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
- if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
+ if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "HW died, polling stopped.\n");
spin_unlock_irqrestore(&xhci->lock, flags);
return;
@@ -759,6 +760,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
msleep(100);
spin_lock_irq(&xhci->lock);
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ hibernated = true;
if (!hibernated) {
/* step 1: restore register */
@@ -937,8 +940,11 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
return 0;
}
+ xhci = hcd_to_xhci(hcd);
+ if (xhci->xhc_state & XHCI_STATE_HALTED)
+ return -ENODEV;
+
if (check_virt_dev) {
- xhci = hcd_to_xhci(hcd);
if (!udev->slot_id || !xhci->devs
|| !xhci->devs[udev->slot_id]) {
printk(KERN_DEBUG "xHCI %s called with unaddressed "
@@ -1240,7 +1246,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_urb_free_priv(xhci, urb_priv);
return ret;
}
- if (xhci->xhc_state & XHCI_STATE_DYING) {
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
"non-responsive xHCI host.\n",
urb->ep->desc.bEndpointAddress, urb);
@@ -1340,8 +1347,8 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
/* If the HC already knows the endpoint is disabled,
* or the HCD has noted it is disabled, ignore this request
*/
- if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
- EP_STATE_DISABLED ||
+ if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
+ cpu_to_le32(EP_STATE_DISABLED)) ||
le32_to_cpu(ctrl_ctx->drop_flags) &
xhci_get_endpoint_flag(&ep->desc)) {
xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
@@ -1401,6 +1408,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
u32 added_ctxs;
unsigned int last_ctx;
u32 new_add_flags, new_drop_flags, new_slot_info;
+ struct xhci_virt_device *virt_dev;
int ret = 0;
ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
@@ -1425,11 +1433,25 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
return 0;
}
- in_ctx = xhci->devs[udev->slot_id]->in_ctx;
- out_ctx = xhci->devs[udev->slot_id]->out_ctx;
+ virt_dev = xhci->devs[udev->slot_id];
+ in_ctx = virt_dev->in_ctx;
+ out_ctx = virt_dev->out_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
+
+ /* If this endpoint is already in use, and the upper layers are trying
+ * to add it again without dropping it, reject the addition.
+ */
+ if (virt_dev->eps[ep_index].ring &&
+ !(le32_to_cpu(ctrl_ctx->drop_flags) &
+ xhci_get_endpoint_flag(&ep->desc))) {
+ xhci_warn(xhci, "Trying to add endpoint 0x%x "
+ "without dropping it.\n",
+ (unsigned int) ep->desc.bEndpointAddress);
+ return -EINVAL;
+ }
+
/* If the HCD has already noted the endpoint is enabled,
* ignore this request.
*/
@@ -1445,8 +1467,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* process context, not interrupt context (or so documenation
* for usb_set_interface() and usb_set_configuration() claim).
*/
- if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
- udev, ep, GFP_NOIO) < 0) {
+ if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
__func__, ep->desc.bEndpointAddress);
return -ENOMEM;
@@ -1537,6 +1558,11 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
"and endpoint is not disabled.\n");
ret = -EINVAL;
break;
+ case COMP_DEV_ERR:
+ dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
+ "configure command.\n");
+ ret = -ENODEV;
+ break;
case COMP_SUCCESS:
dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
ret = 0;
@@ -1571,6 +1597,11 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
ret = -EINVAL;
break;
+ case COMP_DEV_ERR:
+ dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
+ "context command.\n");
+ ret = -ENODEV;
+ break;
case COMP_MEL_ERR:
/* Max Exit Latency too large error */
dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
@@ -1732,8 +1763,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
/* Enqueue pointer can be left pointing to the link TRB,
* we must handle that
*/
- if ((le32_to_cpu(command->command_trb->link.control)
- & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
+ if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
command->command_trb =
xhci->cmd_ring->enq_seg->next->trbs;
@@ -2533,8 +2563,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Enqueue pointer can be left pointing to the link TRB,
* we must handle that
*/
- if ((le32_to_cpu(reset_device_cmd->command_trb->link.control)
- & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
+ if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
reset_device_cmd->command_trb =
xhci->cmd_ring->enq_seg->next->trbs;
@@ -2641,7 +2670,10 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
int i, ret;
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
- if (ret <= 0)
+ /* If the host is halted due to driver unload, we still need to free the
+ * device.
+ */
+ if (ret <= 0 && ret != -ENODEV)
return;
virt_dev = xhci->devs[udev->slot_id];
@@ -2655,7 +2687,8 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
spin_lock_irqsave(&xhci->lock, flags);
/* Don't disable the slot if the host controller is dead. */
state = xhci_readl(xhci, &xhci->op_regs->status);
- if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
+ if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_free_virt_device(xhci, udev->slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
return;
@@ -2853,6 +2886,11 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
dev_warn(&udev->dev, "Device not responding to set address.\n");
ret = -EPROTO;
break;
+ case COMP_DEV_ERR:
+ dev_warn(&udev->dev, "ERROR: Incompatible device for address "
+ "device command.\n");
+ ret = -ENODEV;
+ break;
case COMP_SUCCESS:
xhci_dbg(xhci, "Successful Address Device command\n");
break;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7d1ea3bf5e1..cae8e23308b 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -874,6 +874,8 @@ struct xhci_transfer_event {
#define COMP_PING_ERR 20
/* Event Ring is full */
#define COMP_ER_FULL 21
+/* Incompatible Device Error */
+#define COMP_DEV_ERR 22
/* Missed Service Error - HC couldn't service an isoc ep within interval */
#define COMP_MISSED_INT 23
/* Successfully stopped command ring */
@@ -1070,6 +1072,13 @@ union xhci_trb {
/* Get NEC firmware revision. */
#define TRB_NEC_GET_FW 49
+#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
+/* Above, but for __le32 types -- can avoid work by swapping constants: */
+#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+ cpu_to_le32(TRB_TYPE(TRB_LINK)))
+#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+ cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
+
#define NEC_FW_MINOR(p) (((p) >> 0) & 0xff)
#define NEC_FW_MAJOR(p) (((p) >> 8) & 0xff)
@@ -1308,6 +1317,7 @@ struct xhci_hcd {
*/
#define XHCI_EP_LIMIT_QUIRK (1 << 5)
#define XHCI_BROKEN_MSI (1 << 6)
+#define XHCI_RESET_ON_RESUME (1 << 7)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index a0037961e5b..27e209a7222 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -131,7 +131,7 @@
#include <linux/usb.h>
#include <linux/proc_fs.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/blkdev.h>
#include "../../scsi/scsi.h"
#include <scsi/scsi_host.h>
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 68ab460a735..ac0d75a9005 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -29,7 +29,7 @@
#include <linux/backlight.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define APPLE_VENDOR_ID 0x05AC
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index b16bd3ce391..2f41089cd85 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -187,7 +187,7 @@ struct usb_ftdi {
u32 controlreg;
u8 response[4 + 1024];
int expected;
- int recieved;
+ int received;
int ed_found;
};
#define kref_to_usb_ftdi(d) container_of(d, struct usb_ftdi, kref)
@@ -353,7 +353,7 @@ static void ftdi_elan_abandon_targets(struct usb_ftdi *ftdi)
mutex_lock(&ftdi->u132_lock);
}
}
- ftdi->recieved = 0;
+ ftdi->received = 0;
ftdi->expected = 4;
ftdi->ed_found = 0;
mutex_unlock(&ftdi->u132_lock);
@@ -411,7 +411,7 @@ static void ftdi_elan_flush_targets(struct usb_ftdi *ftdi)
}
}
}
- ftdi->recieved = 0;
+ ftdi->received = 0;
ftdi->expected = 4;
ftdi->ed_found = 0;
mutex_unlock(&ftdi->u132_lock);
@@ -447,7 +447,7 @@ static void ftdi_elan_cancel_targets(struct usb_ftdi *ftdi)
}
}
}
- ftdi->recieved = 0;
+ ftdi->received = 0;
ftdi->expected = 4;
ftdi->ed_found = 0;
mutex_unlock(&ftdi->u132_lock);
@@ -874,7 +874,7 @@ static char *have_ed_set_response(struct usb_ftdi *ftdi,
mutex_unlock(&ftdi->u132_lock);
ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response,
payload);
- ftdi->recieved = 0;
+ ftdi->received = 0;
ftdi->expected = 4;
ftdi->ed_found = 0;
return ftdi->response;
@@ -890,7 +890,7 @@ static char *have_ed_set_response(struct usb_ftdi *ftdi,
mutex_unlock(&ftdi->u132_lock);
ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response,
payload);
- ftdi->recieved = 0;
+ ftdi->received = 0;
ftdi->expected = 4;
ftdi->ed_found = 0;
return ftdi->response;
@@ -905,7 +905,7 @@ static char *have_ed_set_response(struct usb_ftdi *ftdi,
mutex_unlock(&ftdi->u132_lock);
ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response,
payload);
- ftdi->recieved = 0;
+ ftdi->received = 0;
ftdi->expected = 4;
ftdi->ed_found = 0;
return ftdi->response;
@@ -914,7 +914,7 @@ static char *have_ed_set_response(struct usb_ftdi *ftdi,
mutex_unlock(&ftdi->u132_lock);
ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response,
payload);
- ftdi->recieved = 0;
+ ftdi->received = 0;
ftdi->expected = 4;
ftdi->ed_found = 0;
return ftdi->response;
@@ -934,7 +934,7 @@ static char *have_ed_get_response(struct usb_ftdi *ftdi,
if (target->active)
ftdi_elan_do_callback(ftdi, target, NULL, 0);
target->abandoning = 0;
- ftdi->recieved = 0;
+ ftdi->received = 0;
ftdi->expected = 4;
ftdi->ed_found = 0;
return ftdi->response;
@@ -951,7 +951,7 @@ static char *have_ed_get_response(struct usb_ftdi *ftdi,
*/
static int ftdi_elan_respond_engine(struct usb_ftdi *ftdi)
{
- u8 *b = ftdi->response + ftdi->recieved;
+ u8 *b = ftdi->response + ftdi->received;
int bytes_read = 0;
int retry_on_empty = 1;
int retry_on_timeout = 3;
@@ -1043,11 +1043,11 @@ static int ftdi_elan_respond_engine(struct usb_ftdi *ftdi)
u8 c = ftdi->bulk_in_buffer[++ftdi->bulk_in_last];
bytes_read += 1;
ftdi->bulk_in_left -= 1;
- if (ftdi->recieved == 0 && c == 0xFF) {
+ if (ftdi->received == 0 && c == 0xFF) {
goto have;
} else
*b++ = c;
- if (++ftdi->recieved < ftdi->expected) {
+ if (++ftdi->received < ftdi->expected) {
goto have;
} else if (ftdi->ed_found) {
int ed_number = (ftdi->response[0] >> 5) & 0x03;
@@ -1069,7 +1069,7 @@ static int ftdi_elan_respond_engine(struct usb_ftdi *ftdi)
}
ftdi_elan_do_callback(ftdi, target, 4 + ftdi->response,
payload);
- ftdi->recieved = 0;
+ ftdi->received = 0;
ftdi->expected = 4;
ftdi->ed_found = 0;
b = ftdi->response;
@@ -1089,7 +1089,7 @@ static int ftdi_elan_respond_engine(struct usb_ftdi *ftdi)
*respond->value = data;
*respond->result = 0;
complete(&respond->wait_completion);
- ftdi->recieved = 0;
+ ftdi->received = 0;
ftdi->expected = 4;
ftdi->ed_found = 0;
b = ftdi->response;
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index c302e1983c7..1c3afcc11bd 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -670,6 +670,9 @@ int mon_text_add(struct mon_bus *mbus, const struct usb_bus *ubus)
int busnum = ubus? ubus->busnum: 0;
int rc;
+ if (mon_dir == NULL)
+ return 0;
+
if (ubus != NULL) {
rc = snprintf(name, NAMESZ, "%dt", busnum);
if (rc <= 0 || rc >= NAMESZ)
@@ -740,12 +743,12 @@ int __init mon_text_init(void)
mondir = debugfs_create_dir("usbmon", usb_debug_root);
if (IS_ERR(mondir)) {
- printk(KERN_NOTICE TAG ": debugfs is not available\n");
- return -ENODEV;
+ /* debugfs not available, but we can use usbmon without it */
+ return 0;
}
if (mondir == NULL) {
printk(KERN_NOTICE TAG ": unable to create usbmon directory\n");
- return -ENODEV;
+ return -ENOMEM;
}
mon_dir = mondir;
return 0;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 13093481f91..fc34b8b1191 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -3,12 +3,9 @@
# for silicon based on Mentor Graphics INVENTRA designs
#
-comment "Enable Host or Gadget support to see Inventra options"
- depends on !USB && USB_GADGET=n
-
# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
config USB_MUSB_HDRC
- depends on (USB || USB_GADGET)
+ depends on USB && USB_GADGET
depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523))
select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
select TWL4030_USB if MACH_OMAP_3430SDP
@@ -67,79 +64,6 @@ config USB_MUSB_UX500
endchoice
-choice
- prompt "Driver Mode"
- depends on USB_MUSB_HDRC
- help
- Dual-Role devices can support both host and peripheral roles,
- as well as a the special "OTG Device" role which can switch
- between both roles as needed.
-
-# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support;
-# OTG needs both roles, not just USB_MUSB_HOST.
-config USB_MUSB_HOST
- depends on USB
- bool "USB Host"
- help
- Say Y here if your system supports the USB host role.
- If it has a USB "A" (rectangular), "Mini-A" (uncommon),
- or "Mini-AB" connector, it supports the host role.
- (With a "Mini-AB" connector, you should enable USB OTG.)
-
-# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral
-# side support ... OTG needs both roles
-config USB_MUSB_PERIPHERAL
- depends on USB_GADGET
- bool "USB Peripheral (gadget stack)"
- select USB_GADGET_MUSB_HDRC
- help
- Say Y here if your system supports the USB peripheral role.
- If it has a USB "B" (squarish), "Mini-B", or "Mini-AB"
- connector, it supports the peripheral role.
- (With a "Mini-AB" connector, you should enable USB OTG.)
-
-config USB_MUSB_OTG
- depends on USB && USB_GADGET && PM && EXPERIMENTAL
- bool "Both host and peripheral: USB OTG (On The Go) Device"
- select USB_GADGET_MUSB_HDRC
- select USB_OTG
- help
- The most notable feature of USB OTG is support for a
- "Dual-Role" device, which can act as either a device
- or a host. The initial role choice can be changed
- later, when two dual-role devices talk to each other.
-
- At this writing, the OTG support in this driver is incomplete,
- omitting the mandatory HNP or SRP protocols. However, some
- of the cable based role switching works. (That is, grounding
- the ID pin switches the controller to host mode, while leaving
- it floating leaves it in peripheral mode.)
-
- Select this if your system has a Mini-AB connector, or
- to simplify certain kinds of configuration.
-
- To implement your OTG Targeted Peripherals List (TPL), enable
- USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h"
- to match your requirements.
-
-endchoice
-
-# enable peripheral support (including with OTG)
-config USB_GADGET_MUSB_HDRC
- bool
- depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
-# default y
-# select USB_GADGET_DUALSPEED
-# select USB_GADGET_SELECTED
-
-# enables host support (including with OTG)
-config USB_MUSB_HDRC_HCD
- bool
- depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG)
- select USB_OTG if USB_GADGET_MUSB_HDRC
- default y
-
-
config MUSB_PIO_ONLY
bool 'Disable DMA (always use PIO)'
depends on USB_MUSB_HDRC
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index c4d228b6ef8..d8fd9d092de 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -6,8 +6,8 @@ obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
musb_hdrc-y := musb_core.o
-musb_hdrc-$(CONFIG_USB_GADGET_MUSB_HDRC) += musb_gadget_ep0.o musb_gadget.o
-musb_hdrc-$(CONFIG_USB_MUSB_HDRC_HCD) += musb_virthub.o musb_host.o
+musb_hdrc-y += musb_gadget_ep0.o musb_gadget.o
+musb_hdrc-y += musb_virthub.o musb_host.o
musb_hdrc-$(CONFIG_DEBUG_FS) += musb_debugfs.o
# Hardware Glue Layer
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 23ac28f98d9..08f1d0b662a 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -124,11 +124,7 @@ static void am35x_musb_disable(struct musb *musb)
musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
}
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
#define portstate(stmt) stmt
-#else
-#define portstate(stmt)
-#endif
static void am35x_musb_set_vbus(struct musb *musb, int is_on)
{
diff --git a/drivers/usb/musb/blackfin.h b/drivers/usb/musb/blackfin.h
index bd9352a2ef2..c84dae546dc 100644
--- a/drivers/usb/musb/blackfin.h
+++ b/drivers/usb/musb/blackfin.h
@@ -47,7 +47,7 @@
* So, need to either use silicon v0.2+ or disable DMA mode in MUSB.
*/
#if ANOMALY_05000380 && defined(CONFIG_BF52x) && \
- defined(CONFIG_USB_MUSB_HDRC) && !defined(CONFIG_MUSB_PIO_ONLY)
+ !defined(CONFIG_MUSB_PIO_ONLY)
# error "Please use PIO mode in MUSB driver on bf52x chip v0.0 and v0.1"
#endif
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 662ed34980b..4da7492ddbd 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -172,11 +172,7 @@ static void da8xx_musb_disable(struct musb *musb)
musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
}
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-#define portstate(stmt) stmt
-#else
-#define portstate(stmt)
-#endif
+#define portstate(stmt) stmt
static void da8xx_musb_set_vbus(struct musb *musb, int is_on)
{
@@ -397,21 +393,15 @@ static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode)
cfgchip2 &= ~CFGCHIP2_OTGMODE;
switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
case MUSB_HOST: /* Force VBUS valid, ID = 0 */
cfgchip2 |= CFGCHIP2_FORCE_HOST;
break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
cfgchip2 |= CFGCHIP2_FORCE_DEVICE;
break;
-#endif
-#ifdef CONFIG_USB_MUSB_OTG
case MUSB_OTG: /* Don't override the VBUS/ID comparators */
cfgchip2 |= CFGCHIP2_NO_OVERRIDE;
break;
-#endif
default:
dev_dbg(musb->controller, "Trying to set unsupported mode %u\n", musb_mode);
}
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 2a2adf6492c..8bdf25a8b02 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -143,12 +143,7 @@ static void davinci_musb_disable(struct musb *musb)
}
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
#define portstate(stmt) stmt
-#else
-#define portstate(stmt)
-#endif
-
/*
* VBUS SWITCHING IS BOARD-SPECIFIC ... at least for the DM6446 EVM,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c71b0372786..20a28731c33 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -328,8 +328,6 @@ void musb_load_testpacket(struct musb *musb)
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_USB_MUSB_OTG
-
/*
* Handles OTG hnp timeouts, such as b_ase0_brst
*/
@@ -401,8 +399,6 @@ void musb_hnp_stop(struct musb *musb)
musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
}
-#endif
-
/*
* Interrupt Service Routine to record USB "global" interrupts.
* Since these do not happen often and signify things of
@@ -432,7 +428,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
dev_dbg(musb->controller, "RESUME (%s)\n", otg_state_string(musb->xceiv->state));
if (devctl & MUSB_DEVCTL_HM) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
void __iomem *mbase = musb->mregs;
switch (musb->xceiv->state) {
@@ -472,17 +467,13 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
"host",
otg_state_string(musb->xceiv->state));
}
-#endif
} else {
switch (musb->xceiv->state) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
case OTG_STATE_A_SUSPEND:
/* possibly DISCONNECT is upcoming */
musb->xceiv->state = OTG_STATE_A_HOST;
usb_hcd_resume_root_hub(musb_to_hcd(musb));
break;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_PERIPHERAL:
/* disconnect while suspended? we may
@@ -500,7 +491,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
case OTG_STATE_B_IDLE:
musb->int_usb &= ~MUSB_INTR_SUSPEND;
break;
-#endif
default:
WARNING("bogus %s RESUME (%s)\n",
"peripheral",
@@ -509,7 +499,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
}
}
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
/* see manual for the order of the tests */
if (int_usb & MUSB_INTR_SESSREQ) {
void __iomem *mbase = musb->mregs;
@@ -609,14 +598,12 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
handled = IRQ_HANDLED;
}
-#endif
if (int_usb & MUSB_INTR_SUSPEND) {
dev_dbg(musb->controller, "SUSPEND (%s) devctl %02x power %02x\n",
otg_state_string(musb->xceiv->state), devctl, power);
handled = IRQ_HANDLED;
switch (musb->xceiv->state) {
-#ifdef CONFIG_USB_MUSB_OTG
case OTG_STATE_A_PERIPHERAL:
/* We also come here if the cable is removed, since
* this silicon doesn't report ID-no-longer-grounded.
@@ -633,7 +620,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
? : OTG_TIME_A_WAIT_BCON));
break;
-#endif
case OTG_STATE_B_IDLE:
if (!musb->is_active)
break;
@@ -642,13 +628,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->is_active = is_otg_enabled(musb)
&& musb->xceiv->gadget->b_hnp_enable;
if (musb->is_active) {
-#ifdef CONFIG_USB_MUSB_OTG
musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
mod_timer(&musb->otg_timer, jiffies
+ msecs_to_jiffies(
OTG_TIME_B_ASE0_BRST));
-#endif
}
break;
case OTG_STATE_A_WAIT_BCON:
@@ -672,7 +656,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
}
}
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
if (int_usb & MUSB_INTR_CONNECT) {
struct usb_hcd *hcd = musb_to_hcd(musb);
@@ -682,7 +665,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->ep0_stage = MUSB_EP0_START;
-#ifdef CONFIG_USB_MUSB_OTG
/* flush endpoints when transitioning from Device Mode */
if (is_peripheral_active(musb)) {
/* REVISIT HNP; just force disconnect */
@@ -690,7 +672,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb_writew(musb->mregs, MUSB_INTRTXE, musb->epmask);
musb_writew(musb->mregs, MUSB_INTRRXE, musb->epmask & 0xfffe);
musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
-#endif
musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
|USB_PORT_STAT_HIGH_SPEED
|USB_PORT_STAT_ENABLE
@@ -739,7 +720,6 @@ b_host:
dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n",
otg_state_string(musb->xceiv->state), devctl);
}
-#endif /* CONFIG_USB_MUSB_HDRC_HCD */
if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n",
@@ -748,7 +728,6 @@ b_host:
handled = IRQ_HANDLED;
switch (musb->xceiv->state) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
case OTG_STATE_A_HOST:
case OTG_STATE_A_SUSPEND:
usb_hcd_resume_root_hub(musb_to_hcd(musb));
@@ -757,8 +736,6 @@ b_host:
musb_platform_try_idle(musb, jiffies
+ msecs_to_jiffies(musb->a_wait_bcon));
break;
-#endif /* HOST */
-#ifdef CONFIG_USB_MUSB_OTG
case OTG_STATE_B_HOST:
/* REVISIT this behaves for "real disconnect"
* cases; make sure the other transitions from
@@ -777,13 +754,10 @@ b_host:
/* FALLTHROUGH */
case OTG_STATE_B_WAIT_ACON:
/* FALLTHROUGH */
-#endif /* OTG */
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
musb_g_disconnect(musb);
break;
-#endif /* GADGET */
default:
WARNING("unhandled DISCONNECT transition (%s)\n",
otg_state_string(musb->xceiv->state));
@@ -814,7 +788,6 @@ b_host:
dev_dbg(musb->controller, "BUS RESET as %s\n",
otg_state_string(musb->xceiv->state));
switch (musb->xceiv->state) {
-#ifdef CONFIG_USB_OTG
case OTG_STATE_A_SUSPEND:
/* We need to ignore disconnect on suspend
* otherwise tusb 2.0 won't reconnect after a
@@ -842,7 +815,6 @@ b_host:
musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
musb_g_reset(musb);
break;
-#endif
case OTG_STATE_B_IDLE:
musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
/* FALLTHROUGH */
@@ -927,7 +899,6 @@ void musb_start(struct musb *musb)
/* put into basic highspeed mode and start session */
musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
- | MUSB_POWER_SOFTCONN
| MUSB_POWER_HSENAB
/* ENSUSPEND wedges tusb */
/* | MUSB_POWER_ENSUSPEND */
@@ -1038,10 +1009,15 @@ static void musb_shutdown(struct platform_device *pdev)
* We don't currently use dynamic fifo setup capability to do anything
* more than selecting one of a bunch of predefined configurations.
*/
-#if defined(CONFIG_USB_MUSB_TUSB6010) || defined(CONFIG_USB_MUSB_OMAP2PLUS) \
- || defined(CONFIG_USB_MUSB_AM35X)
+#if defined(CONFIG_USB_MUSB_TUSB6010) \
+ || defined(CONFIG_USB_MUSB_TUSB6010_MODULE) \
+ || defined(CONFIG_USB_MUSB_OMAP2PLUS) \
+ || defined(CONFIG_USB_MUSB_OMAP2PLUS_MODULE) \
+ || defined(CONFIG_USB_MUSB_AM35X) \
+ || defined(CONFIG_USB_MUSB_AM35X_MODULE)
static ushort __initdata fifo_mode = 4;
-#elif defined(CONFIG_USB_MUSB_UX500)
+#elif defined(CONFIG_USB_MUSB_UX500) \
+ || defined(CONFIG_USB_MUSB_UX500_MODULE)
static ushort __initdata fifo_mode = 5;
#else
static ushort __initdata fifo_mode = 2;
@@ -1191,14 +1167,12 @@ fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
/* configure the FIFO */
musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
/* EP0 reserved endpoint for control, bidirectional;
* EP1 reserved for bulk, two unidirection halves.
*/
if (hw_ep->epnum == 1)
musb->bulk_ep = hw_ep;
/* REVISIT error check: be sure ep0 can both rx and tx ... */
-#endif
switch (cfg->style) {
case FIFO_TX:
musb_write_txfifosz(mbase, c_size);
@@ -1317,12 +1291,10 @@ done:
n + 1, musb->config->num_eps * 2 - 1,
offset, (1 << (musb->config->ram_bits + 2)));
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
if (!musb->bulk_ep) {
pr_debug("%s: missing bulk\n", musb_driver_name);
return -EINVAL;
}
-#endif
return 0;
}
@@ -1353,7 +1325,6 @@ static int __init ep_config_from_hw(struct musb *musb)
/* FIXME set up hw_ep->{rx,tx}_double_buffered */
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
/* pick an RX/TX endpoint for bulk */
if (hw_ep->max_packet_sz_tx < 512
|| hw_ep->max_packet_sz_rx < 512)
@@ -1365,15 +1336,12 @@ static int __init ep_config_from_hw(struct musb *musb)
if (musb->bulk_ep)
continue;
musb->bulk_ep = hw_ep;
-#endif
}
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
if (!musb->bulk_ep) {
pr_debug("%s: missing bulk\n", musb_driver_name);
return -EINVAL;
}
-#endif
return 0;
}
@@ -1429,13 +1397,11 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
} else {
musb->is_multipoint = 0;
type = "";
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
#ifndef CONFIG_USB_OTG_BLACKLIST_HUB
printk(KERN_ERR
"%s: kernel must blacklist external hubs\n",
musb_driver_name);
#endif
-#endif
}
/* log release info */
@@ -1479,11 +1445,9 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
#endif
hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
hw_ep->target_regs = musb_read_target_reg_base(i, mbase);
hw_ep->rx_reinit = 1;
hw_ep->tx_reinit = 1;
-#endif
if (hw_ep->max_packet_sz_tx) {
dev_dbg(musb->controller,
@@ -1561,14 +1525,6 @@ irqreturn_t musb_interrupt(struct musb *musb)
(devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
musb->int_usb, musb->int_tx, musb->int_rx);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- if (is_otg_enabled(musb) || is_peripheral_enabled(musb))
- if (!musb->gadget_driver) {
- dev_dbg(musb->controller, "No gadget driver loaded\n");
- return IRQ_HANDLED;
- }
-#endif
-
/* the core can interrupt us for multiple reasons; docs have
* a generic interrupt flowchart to follow
*/
@@ -1767,8 +1723,6 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
-
/* Gadget drivers can't know that a host is connected so they might want
* to start SRP, but users can. This allows userspace to trigger SRP.
*/
@@ -1792,14 +1746,10 @@ musb_srp_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
-#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
-
static struct attribute *musb_attributes[] = {
&dev_attr_mode.attr,
&dev_attr_vbus.attr,
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
&dev_attr_srp.attr,
-#endif
NULL
};
@@ -1832,7 +1782,6 @@ allocate_instance(struct device *dev,
struct musb *musb;
struct musb_hw_ep *ep;
int epnum;
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
struct usb_hcd *hcd;
hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
@@ -1850,12 +1799,6 @@ allocate_instance(struct device *dev,
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
-#else
- musb = kzalloc(sizeof *musb, GFP_KERNEL);
- if (!musb)
- return NULL;
-
-#endif
dev_set_drvdata(dev, musb);
musb->mregs = mbase;
musb->ctrl_base = mbase;
@@ -1885,9 +1828,7 @@ static void musb_free(struct musb *musb)
sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
musb_gadget_cleanup(musb);
-#endif
if (musb->nIrq >= 0) {
if (musb->irq_wake)
@@ -1901,11 +1842,7 @@ static void musb_free(struct musb *musb)
dma_controller_destroy(c);
}
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- usb_put_hcd(musb_to_hcd(musb));
-#else
kfree(musb);
-#endif
}
/*
@@ -1955,7 +1892,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
* - initializes musb->xceiv, usually by otg_get_transceiver()
* - stops powering VBUS
*
- * There are various transciever configurations. Blackfin,
+ * There are various transceiver configurations. Blackfin,
* DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
* external/discrete ones in various flavors (twl4030 family,
* isp1504, non-OTG, etc) mostly hooking up through ULPI.
@@ -2000,9 +1937,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (status < 0)
goto fail3;
-#ifdef CONFIG_USB_MUSB_OTG
setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
-#endif
/* Init IRQ workqueue before request_irq */
INIT_WORK(&musb->irq_work, musb_irq_work);
@@ -2214,7 +2149,16 @@ static void musb_save_context(struct musb *musb)
musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
for (i = 0; i < musb->config->num_eps; ++i) {
- epio = musb->endpoints[i].regs;
+ struct musb_hw_ep *hw_ep;
+
+ hw_ep = &musb->endpoints[i];
+ if (!hw_ep)
+ continue;
+
+ epio = hw_ep->regs;
+ if (!epio)
+ continue;
+
musb->context.index_regs[i].txmaxp =
musb_readw(epio, MUSB_TXMAXP);
musb->context.index_regs[i].txcsr =
@@ -2280,7 +2224,16 @@ static void musb_restore_context(struct musb *musb)
musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
for (i = 0; i < musb->config->num_eps; ++i) {
- epio = musb->endpoints[i].regs;
+ struct musb_hw_ep *hw_ep;
+
+ hw_ep = &musb->endpoints[i];
+ if (!hw_ep)
+ continue;
+
+ epio = hw_ep->regs;
+ if (!epio)
+ continue;
+
musb_writew(epio, MUSB_TXMAXP,
musb->context.index_regs[i].txmaxp);
musb_writew(epio, MUSB_TXCSR,
@@ -2329,13 +2282,13 @@ static void musb_restore_context(struct musb *musb)
musb->context.index_regs[i].rxhubport);
}
}
+ musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
}
static int musb_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct musb *musb = dev_to_musb(dev);
unsigned long flags;
- struct musb *musb = dev_to_musb(&pdev->dev);
spin_lock_irqsave(&musb->lock, flags);
@@ -2357,8 +2310,7 @@ static int musb_suspend(struct device *dev)
static int musb_resume_noirq(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct musb *musb = dev_to_musb(&pdev->dev);
+ struct musb *musb = dev_to_musb(dev);
musb_restore_context(musb);
@@ -2426,34 +2378,13 @@ static struct platform_driver musb_driver = {
static int __init musb_init(void)
{
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
if (usb_disabled())
return 0;
-#endif
pr_info("%s: version " MUSB_VERSION ", "
-#ifdef CONFIG_MUSB_PIO_ONLY
- "pio"
-#elif defined(CONFIG_USB_TI_CPPI_DMA)
- "cppi-dma"
-#elif defined(CONFIG_USB_INVENTRA_DMA)
- "musb-dma"
-#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
- "tusb-omap-dma"
-#elif defined(CONFIG_USB_UX500_DMA)
- "ux500-dma"
-#else
"?dma?"
-#endif
", "
-#ifdef CONFIG_USB_MUSB_OTG
- "otg (peripheral+host)"
-#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
- "peripheral"
-#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
- "host"
-#endif
- ,
+ "otg (peripheral+host)",
musb_driver_name);
return platform_driver_probe(&musb_driver, musb_probe);
}
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 0e053b58796..668eeef601a 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -72,10 +72,6 @@ struct musb_ep;
#include <linux/usb/hcd.h>
#include "musb_host.h"
-
-
-#ifdef CONFIG_USB_MUSB_OTG
-
#define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST)
#define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL)
#define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG)
@@ -86,24 +82,6 @@ struct musb_ep;
#define is_peripheral_active(m) (!(m)->is_host)
#define is_host_active(m) ((m)->is_host)
-#else
-#define is_peripheral_enabled(musb) is_peripheral_capable()
-#define is_host_enabled(musb) is_host_capable()
-#define is_otg_enabled(musb) 0
-
-#define is_peripheral_active(musb) is_peripheral_capable()
-#define is_host_active(musb) is_host_capable()
-#endif
-
-#if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL)
-/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always
- * override that choice selection (often USB_GADGET_DUMMY_HCD).
- */
-#ifndef CONFIG_USB_GADGET_MUSB_HDRC
-#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC
-#endif
-#endif /* need MUSB gadget selection */
-
#ifndef CONFIG_HAVE_CLK
/* Dummy stub for clk framework */
#define clk_get(dev, id) NULL
@@ -119,8 +97,6 @@ struct musb_ep;
/****************************** PERIPHERAL ROLE *****************************/
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
-
#define is_peripheral_capable() (1)
extern irqreturn_t musb_g_ep0_irq(struct musb *);
@@ -132,40 +108,14 @@ extern void musb_g_resume(struct musb *);
extern void musb_g_wakeup(struct musb *);
extern void musb_g_disconnect(struct musb *);
-#else
-
-#define is_peripheral_capable() (0)
-
-static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; }
-static inline void musb_g_reset(struct musb *m) {}
-static inline void musb_g_suspend(struct musb *m) {}
-static inline void musb_g_resume(struct musb *m) {}
-static inline void musb_g_wakeup(struct musb *m) {}
-static inline void musb_g_disconnect(struct musb *m) {}
-
-#endif
-
/****************************** HOST ROLE ***********************************/
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-
#define is_host_capable() (1)
extern irqreturn_t musb_h_ep0_irq(struct musb *);
extern void musb_host_tx(struct musb *, u8);
extern void musb_host_rx(struct musb *, u8);
-#else
-
-#define is_host_capable() (0)
-
-static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; }
-static inline void musb_host_tx(struct musb *m, u8 e) {}
-static inline void musb_host_rx(struct musb *m, u8 e) {}
-
-#endif
-
-
/****************************** CONSTANTS ********************************/
#ifndef MUSB_C_NUM_EPS
@@ -261,7 +211,7 @@ enum musb_g_ep0_state {
* @try_ilde: tries to idle the IP
* @vbus_status: returns vbus status if possible
* @set_vbus: forces vbus status
- * @channel_program: pre check for standard dma channel_program func
+ * @adjust_channel_params: pre check for standard dma channel_program func
*/
struct musb_platform_ops {
int (*init)(struct musb *musb);
@@ -315,7 +265,6 @@ struct musb_hw_ep {
void __iomem *fifo_sync_va;
#endif
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
void __iomem *target_regs;
/* currently scheduled peripheral endpoint */
@@ -324,31 +273,20 @@ struct musb_hw_ep {
u8 rx_reinit;
u8 tx_reinit;
-#endif
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
/* peripheral side */
struct musb_ep ep_in; /* TX */
struct musb_ep ep_out; /* RX */
-#endif
};
static inline struct musb_request *next_in_request(struct musb_hw_ep *hw_ep)
{
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
return next_request(&hw_ep->ep_in);
-#else
- return NULL;
-#endif
}
static inline struct musb_request *next_out_request(struct musb_hw_ep *hw_ep)
{
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
return next_request(&hw_ep->ep_out);
-#else
- return NULL;
-#endif
}
struct musb_csr_regs {
@@ -393,7 +331,6 @@ struct musb {
u32 port1_status;
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
unsigned long rh_timer;
enum musb_h_ep0_state ep0_stage;
@@ -411,7 +348,6 @@ struct musb {
struct list_head out_bulk; /* of musb_qh */
struct timer_list otg_timer;
-#endif
struct notifier_block nb;
struct dma_controller *dma_controller;
@@ -472,7 +408,6 @@ struct musb {
#define can_bulk_combine(musb,type) \
(((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
/* is_suspended means USB B_PERIPHERAL suspend */
unsigned is_suspended:1;
@@ -496,7 +431,6 @@ struct musb {
enum musb_g_ep0_state ep0_state;
struct usb_gadget g; /* the gadget */
struct usb_gadget_driver *gadget_driver; /* its driver */
-#endif
/*
* FIXME: Remove this flag.
@@ -518,12 +452,10 @@ struct musb {
#endif
};
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
static inline struct musb *gadget_to_musb(struct usb_gadget *g)
{
return container_of(g, struct musb, g);
}
-#endif
#ifdef CONFIG_BLACKFIN
static inline int musb_read_fifosize(struct musb *musb,
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 0a50a35e185..8c41a2e6ea7 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1524,6 +1524,12 @@ static void musb_gadget_fifo_flush(struct usb_ep *ep)
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
+ /*
+ * Setting both TXPKTRDY and FLUSHFIFO makes controller
+ * to interrupt current FIFO loading, but not flushing
+ * the already loaded ones.
+ */
+ csr &= ~MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
musb_writew(epio, MUSB_TXCSR, csr);
@@ -1657,8 +1663,8 @@ static void musb_pullup(struct musb *musb, int is_on)
/* FIXME if on, HdrcStart; if off, HdrcStop */
- dev_dbg(musb->controller, "gadget %s D+ pullup %s\n",
- musb->gadget_driver->function, is_on ? "on" : "off");
+ dev_dbg(musb->controller, "gadget D+ pullup %s\n",
+ is_on ? "on" : "off");
musb_writeb(musb->mregs, MUSB_POWER, power);
}
@@ -1692,6 +1698,8 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
is_on = !!is_on;
+ pm_runtime_get_sync(musb->controller);
+
/* NOTE: this assumes we are sensing vbus; we'd rather
* not pullup unless the B-session is active.
*/
@@ -1701,9 +1709,17 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
musb_pullup(musb, is_on);
}
spin_unlock_irqrestore(&musb->lock, flags);
+
+ pm_runtime_put(musb->controller);
+
return 0;
}
+static int musb_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+static int musb_gadget_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver);
+
static const struct usb_gadget_ops musb_gadget_operations = {
.get_frame = musb_gadget_get_frame,
.wakeup = musb_gadget_wakeup,
@@ -1711,6 +1727,8 @@ static const struct usb_gadget_ops musb_gadget_operations = {
/* .vbus_session = musb_gadget_vbus_session, */
.vbus_draw = musb_gadget_vbus_draw,
.pullup = musb_gadget_pullup,
+ .udc_start = musb_gadget_start,
+ .udc_stop = musb_gadget_stop,
};
/* ----------------------------------------------------------------------- */
@@ -1721,7 +1739,6 @@ static const struct usb_gadget_ops musb_gadget_operations = {
* about there being only one external upstream port. It assumes
* all peripheral ports are external...
*/
-static struct musb *the_gadget;
static void musb_gadget_release(struct device *dev)
{
@@ -1808,9 +1825,6 @@ int __init musb_gadget_setup(struct musb *musb)
* musb peripherals at the same time, only the bus lock
* is probably held.
*/
- if (the_gadget)
- return -EBUSY;
- the_gadget = musb;
musb->g.ops = &musb_gadget_operations;
musb->g.is_dualspeed = 1;
@@ -1834,18 +1848,22 @@ int __init musb_gadget_setup(struct musb *musb)
status = device_register(&musb->g.dev);
if (status != 0) {
put_device(&musb->g.dev);
- the_gadget = NULL;
+ return status;
}
+ status = usb_add_gadget_udc(musb->controller, &musb->g);
+ if (status)
+ goto err;
+
+ return 0;
+err:
+ device_unregister(&musb->g.dev);
return status;
}
void musb_gadget_cleanup(struct musb *musb)
{
- if (musb != the_gadget)
- return;
-
+ usb_del_gadget_udc(&musb->g);
device_unregister(&musb->g.dev);
- the_gadget = NULL;
}
/*
@@ -1857,59 +1875,30 @@ void musb_gadget_cleanup(struct musb *musb)
* -ENOMEM no memory to perform the operation
*
* @param driver the gadget driver
- * @param bind the driver's bind function
* @return <0 if error, 0 if everything is fine
*/
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
+static int musb_gadget_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct musb *musb = the_gadget;
+ struct musb *musb = gadget_to_musb(g);
unsigned long flags;
int retval = -EINVAL;
- if (!driver
- || driver->speed != USB_SPEED_HIGH
- || !bind || !driver->setup)
- goto err0;
-
- /* driver must be initialized to support peripheral mode */
- if (!musb) {
- dev_dbg(musb->controller, "no dev??\n");
- retval = -ENODEV;
+ if (driver->speed != USB_SPEED_HIGH)
goto err0;
- }
pm_runtime_get_sync(musb->controller);
dev_dbg(musb->controller, "registering driver %s\n", driver->function);
- if (musb->gadget_driver) {
- dev_dbg(musb->controller, "%s is already bound to %s\n",
- musb_driver_name,
- musb->gadget_driver->driver.name);
- retval = -EBUSY;
- goto err0;
- }
-
- spin_lock_irqsave(&musb->lock, flags);
+ musb->softconnect = 0;
musb->gadget_driver = driver;
- musb->g.dev.driver = &driver->driver;
- driver->driver.bus = NULL;
- musb->softconnect = 1;
- spin_unlock_irqrestore(&musb->lock, flags);
-
- retval = bind(&musb->g);
- if (retval) {
- dev_dbg(musb->controller, "bind to driver %s failed --> %d\n",
- driver->driver.name, retval);
- goto err1;
- }
spin_lock_irqsave(&musb->lock, flags);
+ musb->is_active = 1;
otg_set_peripheral(musb->xceiv, &musb->g);
musb->xceiv->state = OTG_STATE_B_IDLE;
- musb->is_active = 1;
/*
* FIXME this ignores the softconnect flag. Drivers are
@@ -1921,8 +1910,6 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
if (!is_otg_enabled(musb))
musb_start(musb);
- otg_set_peripheral(musb->xceiv, &musb->g);
-
spin_unlock_irqrestore(&musb->lock, flags);
if (is_otg_enabled(musb)) {
@@ -1954,15 +1941,9 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
err2:
if (!is_otg_enabled(musb))
musb_stop(musb);
-
-err1:
- musb->gadget_driver = NULL;
- musb->g.dev.driver = NULL;
-
err0:
return retval;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
{
@@ -2012,17 +1993,12 @@ static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
*
* @param driver the gadget driver to unregister
*/
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int musb_gadget_stop(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
{
- struct musb *musb = the_gadget;
+ struct musb *musb = gadget_to_musb(g);
unsigned long flags;
- if (!driver || !driver->unbind || !musb)
- return -EINVAL;
-
- if (!musb->gadget_driver)
- return -EINVAL;
-
if (musb->xceiv->last_event == USB_EVENT_NONE)
pm_runtime_get_sync(musb->controller);
@@ -2033,9 +2009,7 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
spin_lock_irqsave(&musb->lock, flags);
-#ifdef CONFIG_USB_MUSB_OTG
musb_hnp_stop(musb);
-#endif
(void) musb_gadget_vbus_draw(&musb->g, 0);
@@ -2045,13 +2019,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
- spin_unlock_irqrestore(&musb->lock, flags);
- driver->unbind(&musb->g);
- spin_lock_irqsave(&musb->lock, flags);
-
- musb->gadget_driver = NULL;
- musb->g.dev.driver = NULL;
-
musb->is_active = 0;
musb_platform_try_idle(musb, 0);
spin_unlock_irqrestore(&musb->lock, flags);
@@ -2071,8 +2038,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
-
/* ----------------------------------------------------------------------- */
@@ -2158,7 +2123,6 @@ void musb_g_disconnect(struct musb *musb)
switch (musb->xceiv->state) {
default:
-#ifdef CONFIG_USB_MUSB_OTG
dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
otg_state_string(musb->xceiv->state));
musb->xceiv->state = OTG_STATE_A_IDLE;
@@ -2170,7 +2134,6 @@ void musb_g_disconnect(struct musb *musb)
break;
case OTG_STATE_B_WAIT_ACON:
case OTG_STATE_B_HOST:
-#endif
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
musb->xceiv->state = OTG_STATE_B_IDLE;
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index b2faff23550..9378b359c1f 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -88,7 +88,6 @@ static int service_tx_status_request(
case USB_RECIP_DEVICE:
result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
-#ifdef CONFIG_USB_MUSB_OTG
if (musb->g.is_otg) {
result[0] |= musb->g.b_hnp_enable
<< USB_DEVICE_B_HNP_ENABLE;
@@ -97,7 +96,6 @@ static int service_tx_status_request(
result[0] |= musb->g.a_hnp_support
<< USB_DEVICE_A_HNP_SUPPORT;
}
-#endif
break;
case USB_RECIP_INTERFACE:
@@ -392,7 +390,6 @@ __acquires(musb->lock)
if (handled > 0)
musb->test_mode = true;
break;
-#ifdef CONFIG_USB_MUSB_OTG
case USB_DEVICE_B_HNP_ENABLE:
if (!musb->g.is_otg)
goto stall;
@@ -409,7 +406,6 @@ __acquires(musb->lock)
goto stall;
musb->g.a_alt_hnp_support = 1;
break;
-#endif
case USB_DEVICE_DEBUG_MODE:
handled = 0;
break;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 7295e316bdf..8b2473fa0f4 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1575,7 +1575,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
/* even if there was an error, we did the dma
* for iso_frame_desc->length
*/
- if (d->status != EILSEQ && d->status != -EOVERFLOW)
+ if (d->status != -EILSEQ && d->status != -EOVERFLOW)
d->status = 0;
if (++qh->iso_idx >= urb->number_of_packets)
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 14b00776638..622d09fb9ab 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -95,7 +95,6 @@ extern const struct hc_driver musb_hc_driver;
static inline struct urb *next_urb(struct musb_qh *qh)
{
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
struct list_head *queue;
if (!qh)
@@ -104,9 +103,6 @@ static inline struct urb *next_urb(struct musb_qh *qh)
if (list_empty(queue))
return NULL;
return list_entry(queue->next, struct urb, urb_list);
-#else
- return NULL;
-#endif
}
#endif /* _MUSB_HOST_H */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 2d80a575883..e9f80adc45a 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -88,14 +88,12 @@ static void musb_port_suspend(struct musb *musb, bool do_suspend)
OTG_TIME_A_AIDL_BDIS));
musb_platform_try_idle(musb, 0);
break;
-#ifdef CONFIG_USB_MUSB_OTG
case OTG_STATE_B_HOST:
musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
musb->is_active = is_otg_enabled(musb)
&& musb->xceiv->host->b_hnp_enable;
musb_platform_try_idle(musb, 0);
break;
-#endif
default:
dev_dbg(musb->controller, "bogus rh suspend? %s\n",
otg_state_string(musb->xceiv->state));
@@ -118,13 +116,11 @@ static void musb_port_reset(struct musb *musb, bool do_reset)
u8 power;
void __iomem *mbase = musb->mregs;
-#ifdef CONFIG_USB_MUSB_OTG
if (musb->xceiv->state == OTG_STATE_B_IDLE) {
dev_dbg(musb->controller, "HNP: Returning from HNP; no hub reset from b_idle\n");
musb->port1_status &= ~USB_PORT_STAT_RESET;
return;
}
-#endif
if (!is_host_active(musb))
return;
@@ -191,14 +187,12 @@ void musb_root_disconnect(struct musb *musb)
switch (musb->xceiv->state) {
case OTG_STATE_A_SUSPEND:
-#ifdef CONFIG_USB_MUSB_OTG
if (is_otg_enabled(musb)
&& musb->xceiv->host->b_hnp_enable) {
musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
musb->g.is_a_peripheral = 1;
break;
}
-#endif
/* FALLTHROUGH */
case OTG_STATE_A_HOST:
musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index c5d4c44d0ff..ba85f273e48 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -51,9 +51,7 @@ static void musb_do_idle(unsigned long _musb)
{
struct musb *musb = (void *)_musb;
unsigned long flags;
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
u8 power;
-#endif
u8 devctl;
spin_lock_irqsave(&musb->lock, flags);
@@ -70,7 +68,6 @@ static void musb_do_idle(unsigned long _musb)
MUSB_HST_MODE(musb);
}
break;
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
case OTG_STATE_A_SUSPEND:
/* finish RESUME signaling? */
if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
@@ -87,15 +84,12 @@ static void musb_do_idle(unsigned long _musb)
musb->xceiv->state = OTG_STATE_A_HOST;
}
break;
-#endif
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
case OTG_STATE_A_HOST:
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
musb->xceiv->state = OTG_STATE_B_IDLE;
else
musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
-#endif
default:
break;
}
@@ -243,13 +237,11 @@ static int musb_otg_notifications(struct notifier_block *nb,
dev_dbg(musb->controller, "ID GND\n");
if (is_otg_enabled(musb)) {
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
if (musb->gadget_driver) {
pm_runtime_get_sync(musb->controller);
otg_init(musb->xceiv);
omap2430_musb_set_vbus(musb, 1);
}
-#endif
} else {
pm_runtime_get_sync(musb->controller);
otg_init(musb->xceiv);
@@ -260,21 +252,16 @@ static int musb_otg_notifications(struct notifier_block *nb,
case USB_EVENT_VBUS:
dev_dbg(musb->controller, "VBUS Connect\n");
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
if (musb->gadget_driver)
pm_runtime_get_sync(musb->controller);
-#endif
otg_init(musb->xceiv);
break;
case USB_EVENT_NONE:
dev_dbg(musb->controller, "VBUS Disconnect\n");
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
if (is_otg_enabled(musb) || is_peripheral_enabled(musb))
- if (musb->gadget_driver)
-#endif
- {
+ if (musb->gadget_driver) {
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index b410357cf01..9eec41fbf3a 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -269,8 +269,6 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
static struct musb *the_musb;
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
-
/* This is used by gadget drivers, and OTG transceiver logic, allowing
* at most mA current to be drawn from VBUS during a Default-B session
* (that is, while VBUS exceeds 4.4V). In Default-A (including pure host
@@ -310,10 +308,6 @@ static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
return 0;
}
-#else
-#define tusb_draw_power NULL
-#endif
-
/* workaround for issue 13: change clock during chip idle
* (to be fixed in rev3 silicon) ... symptoms include disconnect
* or looping suspend/resume cycles
@@ -440,19 +434,15 @@ static void musb_do_idle(unsigned long _musb)
if (is_host_active(musb) && (musb->port1_status >> 16))
goto done;
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- if (is_peripheral_enabled(musb) && !musb->gadget_driver)
+ if (is_peripheral_enabled(musb) && !musb->gadget_driver) {
wakeups = 0;
- else {
+ } else {
wakeups = TUSB_PRCM_WHOSTDISCON
- | TUSB_PRCM_WBUS
+ | TUSB_PRCM_WBUS
| TUSB_PRCM_WVBUS;
if (is_otg_enabled(musb))
wakeups |= TUSB_PRCM_WID;
}
-#else
- wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS;
-#endif
tusb_allow_idle(musb, wakeups);
}
done:
@@ -610,30 +600,22 @@ static int tusb_musb_set_mode(struct musb *musb, u8 musb_mode)
switch (musb_mode) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
case MUSB_HOST: /* Disable PHY ID detect, ground ID */
phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
dev_conf |= TUSB_DEV_CONF_ID_SEL;
dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
break;
-#endif
-
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */
phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
break;
-#endif
-
-#ifdef CONFIG_USB_MUSB_OTG
case MUSB_OTG: /* Use PHY ID detection */
phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
break;
-#endif
default:
dev_dbg(musb->controller, "Trying to set mode %i\n", musb_mode);
@@ -684,7 +666,6 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
/* B-dev state machine: no vbus ~= disconnect */
if ((is_otg_enabled(musb) && !musb->xceiv->default_a)
|| !is_host_enabled(musb)) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
/* ? musb_root_disconnect(musb); */
musb->port1_status &=
~(USB_PORT_STAT_CONNECTION
@@ -693,7 +674,6 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
| USB_PORT_STAT_HIGH_SPEED
| USB_PORT_STAT_TEST
);
-#endif
if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
dev_dbg(musb->controller, "Forcing disconnect (no interrupt)\n");
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index c784e6c03aa..07c8a73dfe4 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -89,7 +89,7 @@ static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
if (reg != 0) {
- dev_dbg(musb->controller, "ep%i dmareq0 is busy for ep%i\n",
+ dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n",
chdat->epnum, reg & 0xf);
return -EAGAIN;
}
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index 8c282258e1b..ca9b690a7e4 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -660,7 +660,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
int ret = IRQ_NONE;
struct isp1301 *isp = _isp;
- /* update ISP1301 transciever from OTG controller */
+ /* update ISP1301 transceiver from OTG controller */
if (otg_irq & OPRT_CHG) {
omap_writew(OPRT_CHG, OTG_IRQ_SRC);
isp1301_defer_work(isp, WORK_UPDATE_ISP);
@@ -755,7 +755,7 @@ static irqreturn_t omap_otg_irq(int irq, void *_isp)
omap_writew(A_VBUS_ERR, OTG_IRQ_SRC);
ret = IRQ_HANDLED;
- /* switch driver; the transciever code activates it,
+ /* switch driver; the transceiver code activates it,
* ungating the udc clock or resuming OHCI.
*/
} else if (otg_irq & DRIVER_SWITCH) {
diff --git a/drivers/usb/otg/otg_fsm.c b/drivers/usb/otg/otg_fsm.c
index b0cc422f2ff..09117387d2a 100644
--- a/drivers/usb/otg/otg_fsm.c
+++ b/drivers/usb/otg/otg_fsm.c
@@ -28,7 +28,6 @@
#include <linux/usb.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
-#include <linux/types.h>
#include "otg_fsm.h"
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index efeb4d1517f..14f66c35862 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -166,7 +166,7 @@ struct twl4030_usb {
};
/* internal define on top of container_of */
-#define xceiv_to_twl(x) container_of((x), struct twl4030_usb, otg);
+#define xceiv_to_twl(x) container_of((x), struct twl4030_usb, otg)
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index cfb5aa72b19..b4d2c0972b3 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -95,11 +95,15 @@ struct twl6030_usb {
struct regulator *usb3v3;
+ /* used to set vbus, in atomic path */
+ struct work_struct set_vbus_work;
+
int irq1;
int irq2;
u8 linkstat;
u8 asleep;
bool irq_enabled;
+ bool vbus_enable;
unsigned long features;
};
@@ -370,20 +374,31 @@ static int twl6030_enable_irq(struct otg_transceiver *x)
return 0;
}
-static int twl6030_set_vbus(struct otg_transceiver *x, bool enabled)
+static void otg_set_vbus_work(struct work_struct *data)
{
- struct twl6030_usb *twl = xceiv_to_twl(x);
+ struct twl6030_usb *twl = container_of(data, struct twl6030_usb,
+ set_vbus_work);
/*
* Start driving VBUS. Set OPA_MODE bit in CHARGERUSB_CTRL1
* register. This enables boost mode.
*/
- if (enabled)
+
+ if (twl->vbus_enable)
twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x40,
- CHARGERUSB_CTRL1);
- else
+ CHARGERUSB_CTRL1);
+ else
twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x00,
- CHARGERUSB_CTRL1);
+ CHARGERUSB_CTRL1);
+}
+
+static int twl6030_set_vbus(struct otg_transceiver *x, bool enabled)
+{
+ struct twl6030_usb *twl = xceiv_to_twl(x);
+
+ twl->vbus_enable = enabled;
+ schedule_work(&twl->set_vbus_work);
+
return 0;
}
@@ -444,6 +459,8 @@ static int __devinit twl6030_usb_probe(struct platform_device *pdev)
ATOMIC_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
+ INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
+
twl->irq_enabled = true;
status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
@@ -494,6 +511,7 @@ static int __exit twl6030_usb_remove(struct platform_device *pdev)
regulator_put(twl->usb3v3);
pdata->phy_exit(twl->dev);
device_remove_file(twl->dev, &dev_attr_vbus);
+ cancel_work_sync(&twl->set_vbus_work);
kfree(twl);
return 0;
diff --git a/drivers/usb/renesas_usbhs/Kconfig b/drivers/usb/renesas_usbhs/Kconfig
index b2e64918884..286cbf1ca7d 100644
--- a/drivers/usb/renesas_usbhs/Kconfig
+++ b/drivers/usb/renesas_usbhs/Kconfig
@@ -1,5 +1,5 @@
#
-# Renesas USB Controller Drivers
+# Renesas USBHS Controller Drivers
#
config USB_RENESAS_USBHS
@@ -7,10 +7,9 @@ config USB_RENESAS_USBHS
depends on SUPERH || ARCH_SHMOBILE
default n
help
- Renesas USBHS is a discrete USB host and peripheral controller chip
- that supports both full and high speed USB 2.0 data transfers.
- It has nine or more configurable endpoints, and endpoint zero.
+ Renesas USBHS is a discrete USB host and peripheral controller chip
+ that supports both full and high speed USB 2.0 data transfers.
+ It has nine or more configurable endpoints, and endpoint zero.
- Say "y" to link the driver statically, or "m" to build a
- dynamically linked module called "renesas_usbhs" and force all
- gadget drivers to also be dynamically linked.
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "renesas_usbhs"
diff --git a/drivers/usb/renesas_usbhs/Makefile b/drivers/usb/renesas_usbhs/Makefile
index b8798ad1627..ce08345fa15 100644
--- a/drivers/usb/renesas_usbhs/Makefile
+++ b/drivers/usb/renesas_usbhs/Makefile
@@ -4,6 +4,6 @@
obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs.o
-renesas_usbhs-y := common.o mod.o pipe.o
+renesas_usbhs-y := common.o mod.o pipe.o fifo.o
renesas_usbhs-$(CONFIG_USB_RENESAS_USBHS_UDC) += mod_gadget.o
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index f3664d6af66..d8239e5efa6 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -21,6 +21,29 @@
#include <linux/sysfs.h>
#include "./common.h"
+/*
+ * image of renesas_usbhs
+ *
+ * ex) gadget case
+
+ * mod.c
+ * mod_gadget.c
+ * mod_host.c pipe.c fifo.c
+ *
+ * +-------+ +-----------+
+ * | pipe0 |------>| fifo pio |
+ * +------------+ +-------+ +-----------+
+ * | mod_gadget |=====> | pipe1 |--+
+ * +------------+ +-------+ | +-----------+
+ * | pipe2 | | +-| fifo dma0 |
+ * +------------+ +-------+ | | +-----------+
+ * | mod_host | | pipe3 |<-|--+
+ * +------------+ +-------+ | +-----------+
+ * | .... | +--->| fifo dma1 |
+ * | .... | +-----------+
+ */
+
+
#define USBHSF_RUNTIME_PWCTRL (1 << 0)
/* status */
@@ -304,6 +327,8 @@ static int __devinit usbhs_probe(struct platform_device *pdev)
priv->dparam->pipe_type = usbhsc_default_pipe_type;
priv->dparam->pipe_size = ARRAY_SIZE(usbhsc_default_pipe_type);
}
+ if (!priv->dparam->pio_dma_border)
+ priv->dparam->pio_dma_border = 64; /* 64byte */
/* FIXME */
/* runtime power control ? */
@@ -323,10 +348,14 @@ static int __devinit usbhs_probe(struct platform_device *pdev)
if (ret < 0)
goto probe_end_iounmap;
- ret = usbhs_mod_probe(priv);
+ ret = usbhs_fifo_probe(priv);
if (ret < 0)
goto probe_end_pipe_exit;
+ ret = usbhs_mod_probe(priv);
+ if (ret < 0)
+ goto probe_end_fifo_exit;
+
/* dev_set_drvdata should be called after usbhs_mod_init */
dev_set_drvdata(&pdev->dev, priv);
@@ -374,6 +403,8 @@ probe_end_call_remove:
usbhs_platform_call(priv, hardware_exit, pdev);
probe_end_mod_exit:
usbhs_mod_remove(priv);
+probe_end_fifo_exit:
+ usbhs_fifo_remove(priv);
probe_end_pipe_exit:
usbhs_pipe_remove(priv);
probe_end_iounmap:
@@ -404,6 +435,7 @@ static int __devexit usbhs_remove(struct platform_device *pdev)
usbhs_platform_call(priv, hardware_exit, pdev);
usbhs_mod_remove(priv);
+ usbhs_fifo_remove(priv);
usbhs_pipe_remove(priv);
iounmap(priv->base);
kfree(priv);
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 0aadcb40276..b410463a121 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -36,6 +36,12 @@ struct usbhs_priv;
#define CFIFO 0x0014
#define CFIFOSEL 0x0020
#define CFIFOCTR 0x0022
+#define D0FIFO 0x0100
+#define D0FIFOSEL 0x0028
+#define D0FIFOCTR 0x002A
+#define D1FIFO 0x0120
+#define D1FIFOSEL 0x002C
+#define D1FIFOCTR 0x002E
#define INTENB0 0x0030
#define INTENB1 0x0032
#define BRDYENB 0x0036
@@ -60,6 +66,30 @@ struct usbhs_priv;
#define PIPEMAXP 0x006C
#define PIPEPERI 0x006E
#define PIPEnCTR 0x0070
+#define PIPE1TRE 0x0090
+#define PIPE1TRN 0x0092
+#define PIPE2TRE 0x0094
+#define PIPE2TRN 0x0096
+#define PIPE3TRE 0x0098
+#define PIPE3TRN 0x009A
+#define PIPE4TRE 0x009C
+#define PIPE4TRN 0x009E
+#define PIPE5TRE 0x00A0
+#define PIPE5TRN 0x00A2
+#define PIPEBTRE 0x00A4
+#define PIPEBTRN 0x00A6
+#define PIPECTRE 0x00A8
+#define PIPECTRN 0x00AA
+#define PIPEDTRE 0x00AC
+#define PIPEDTRN 0x00AE
+#define PIPEETRE 0x00B0
+#define PIPEETRN 0x00B2
+#define PIPEFTRE 0x00B4
+#define PIPEFTRN 0x00B6
+#define PIPE9TRE 0x00B8
+#define PIPE9TRN 0x00BA
+#define PIPEATRE 0x00BC
+#define PIPEATRN 0x00BE
/* SYSCFG */
#define SCKE (1 << 10) /* USB Module Clock Enable */
@@ -78,6 +108,7 @@ struct usbhs_priv;
#define RHST_HIGH_SPEED 3 /* High-speed connection */
/* CFIFOSEL */
+#define DREQE (1 << 12) /* DMA Transfer Request Enable */
#define MBW_32 (0x2 << 10) /* CFIFO Port Access Bit Width */
/* CFIFOCTR */
@@ -164,6 +195,10 @@ struct usbhs_priv;
#define CCPL (1 << 2) /* Control Transfer End Enable */
+/* PIPEnTRE */
+#define TRENB (1 << 9) /* Transaction Counter Enable */
+#define TRCLR (1 << 8) /* Transaction Counter Clear */
+
/* FRMNUM */
#define FRNM_MASK (0x7FF)
@@ -194,6 +229,11 @@ struct usbhs_priv {
* pipe control
*/
struct usbhs_pipe_info pipe_info;
+
+ /*
+ * fifo control
+ */
+ struct usbhs_fifo_info fifo_info;
};
/*
@@ -204,6 +244,10 @@ void usbhs_write(struct usbhs_priv *priv, u32 reg, u16 data);
void usbhs_bset(struct usbhs_priv *priv, u32 reg, u16 mask, u16 data);
int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev);
+
+#define usbhs_lock(p, f) spin_lock_irqsave(usbhs_priv_to_lock(p), f)
+#define usbhs_unlock(p, f) spin_unlock_irqrestore(usbhs_priv_to_lock(p), f)
+
/*
* sysconfig
*/
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
new file mode 100644
index 00000000000..a34430f55fb
--- /dev/null
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -0,0 +1,1016 @@
+/*
+ * Renesas USB driver
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+#include "./common.h"
+#include "./pipe.h"
+
+#define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
+#define usbhsf_get_d0fifo(p) (&((p)->fifo_info.d0fifo))
+#define usbhsf_get_d1fifo(p) (&((p)->fifo_info.d1fifo))
+
+#define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
+
+/*
+ * packet initialize
+ */
+void usbhs_pkt_init(struct usbhs_pkt *pkt)
+{
+ pkt->dma = DMA_ADDR_INVALID;
+ INIT_LIST_HEAD(&pkt->node);
+}
+
+/*
+ * packet control function
+ */
+static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
+ struct device *dev = usbhs_priv_to_dev(priv);
+
+ dev_err(dev, "null handler\n");
+
+ return -EINVAL;
+}
+
+static struct usbhs_pkt_handle usbhsf_null_handler = {
+ .prepare = usbhsf_null_handle,
+ .try_run = usbhsf_null_handle,
+};
+
+void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
+ struct usbhs_pkt_handle *handler,
+ void *buf, int len, int zero)
+{
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ unsigned long flags;
+
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ if (!handler) {
+ dev_err(dev, "no handler function\n");
+ handler = &usbhsf_null_handler;
+ }
+
+ list_del_init(&pkt->node);
+ list_add_tail(&pkt->node, &pipe->list);
+
+ pkt->pipe = pipe;
+ pkt->buf = buf;
+ pkt->handler = handler;
+ pkt->length = len;
+ pkt->zero = zero;
+ pkt->actual = 0;
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ usbhs_pkt_start(pipe);
+}
+
+static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
+{
+ list_del_init(&pkt->node);
+}
+
+static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
+{
+ if (list_empty(&pipe->list))
+ return NULL;
+
+ return list_entry(pipe->list.next, struct usbhs_pkt, node);
+}
+
+struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
+{
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ unsigned long flags;
+
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ if (!pkt)
+ pkt = __usbhsf_pkt_get(pipe);
+
+ if (pkt)
+ __usbhsf_pkt_del(pkt);
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ return pkt;
+}
+
+int __usbhs_pkt_handler(struct usbhs_pipe *pipe, int type)
+{
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
+ struct usbhs_pkt *pkt;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int (*func)(struct usbhs_pkt *pkt, int *is_done);
+ unsigned long flags;
+ int ret = 0;
+ int is_done = 0;
+
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ pkt = __usbhsf_pkt_get(pipe);
+ if (!pkt)
+ goto __usbhs_pkt_handler_end;
+
+ switch (type) {
+ case USBHSF_PKT_PREPARE:
+ func = pkt->handler->prepare;
+ break;
+ case USBHSF_PKT_TRY_RUN:
+ func = pkt->handler->try_run;
+ break;
+ case USBHSF_PKT_DMA_DONE:
+ func = pkt->handler->dma_done;
+ break;
+ default:
+ dev_err(dev, "unknown pkt hander\n");
+ goto __usbhs_pkt_handler_end;
+ }
+
+ ret = func(pkt, &is_done);
+
+ if (is_done)
+ __usbhsf_pkt_del(pkt);
+
+__usbhs_pkt_handler_end:
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ if (is_done) {
+ info->done(pkt);
+ usbhs_pkt_start(pipe);
+ }
+
+ return ret;
+}
+
+/*
+ * irq enable/disable function
+ */
+#define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, bempsts, e)
+#define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, brdysts, e)
+#define usbhsf_irq_callback_ctrl(pipe, status, enable) \
+ ({ \
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \
+ struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
+ u16 status = (1 << usbhs_pipe_number(pipe)); \
+ if (!mod) \
+ return; \
+ if (enable) \
+ mod->irq_##status |= status; \
+ else \
+ mod->irq_##status &= ~status; \
+ usbhs_irq_callback_update(priv, mod); \
+ })
+
+static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
+{
+ /*
+ * And DCP pipe can NOT use "ready interrupt" for "send"
+ * it should use "empty" interrupt.
+ * see
+ * "Operation" - "Interrupt Function" - "BRDY Interrupt"
+ *
+ * on the other hand, normal pipe can use "ready interrupt" for "send"
+ * even though it is single/double buffer
+ */
+ if (usbhs_pipe_is_dcp(pipe))
+ usbhsf_irq_empty_ctrl(pipe, enable);
+ else
+ usbhsf_irq_ready_ctrl(pipe, enable);
+}
+
+static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
+{
+ usbhsf_irq_ready_ctrl(pipe, enable);
+}
+
+/*
+ * FIFO ctrl
+ */
+static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
+ struct usbhs_fifo *fifo)
+{
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+
+ usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
+}
+
+static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
+ struct usbhs_fifo *fifo)
+{
+ int timeout = 1024;
+
+ do {
+ /* The FIFO port is accessible */
+ if (usbhs_read(priv, fifo->ctr) & FRDY)
+ return 0;
+
+ udelay(10);
+ } while (timeout--);
+
+ return -EBUSY;
+}
+
+static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
+ struct usbhs_fifo *fifo)
+{
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+
+ if (!usbhs_pipe_is_dcp(pipe))
+ usbhsf_fifo_barrier(priv, fifo);
+
+ usbhs_write(priv, fifo->ctr, BCLR);
+}
+
+static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
+ struct usbhs_fifo *fifo)
+{
+ return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
+}
+
+static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
+ struct usbhs_fifo *fifo)
+{
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+
+ usbhs_pipe_select_fifo(pipe, NULL);
+ usbhs_write(priv, fifo->sel, 0);
+}
+
+static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
+ struct usbhs_fifo *fifo,
+ int write)
+{
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int timeout = 1024;
+ u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */
+ u16 base = usbhs_pipe_number(pipe); /* CURPIPE */
+
+ if (usbhs_pipe_is_busy(pipe) ||
+ usbhsf_fifo_is_busy(fifo))
+ return -EBUSY;
+
+ if (usbhs_pipe_is_dcp(pipe))
+ base |= (1 == write) << 5; /* ISEL */
+
+ /* "base" will be used below */
+ usbhs_write(priv, fifo->sel, base | MBW_32);
+
+ /* check ISEL and CURPIPE value */
+ while (timeout--) {
+ if (base == (mask & usbhs_read(priv, fifo->sel))) {
+ usbhs_pipe_select_fifo(pipe, fifo);
+ return 0;
+ }
+ udelay(10);
+ }
+
+ dev_err(dev, "fifo select error\n");
+
+ return -EIO;
+}
+
+/*
+ * PIO push handler
+ */
+static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
+ void __iomem *addr = priv->base + fifo->port;
+ u8 *buf;
+ int maxp = usbhs_pipe_get_maxpacket(pipe);
+ int total_len;
+ int i, ret, len;
+ int is_short;
+
+ ret = usbhsf_fifo_select(pipe, fifo, 1);
+ if (ret < 0)
+ return 0;
+
+ ret = usbhs_pipe_is_accessible(pipe);
+ if (ret < 0) {
+ /* inaccessible pipe is not an error */
+ ret = 0;
+ goto usbhs_fifo_write_busy;
+ }
+
+ ret = usbhsf_fifo_barrier(priv, fifo);
+ if (ret < 0)
+ goto usbhs_fifo_write_busy;
+
+ buf = pkt->buf + pkt->actual;
+ len = pkt->length - pkt->actual;
+ len = min(len, maxp);
+ total_len = len;
+ is_short = total_len < maxp;
+
+ /*
+ * FIXME
+ *
+ * 32-bit access only
+ */
+ if (len >= 4 && !((unsigned long)buf & 0x03)) {
+ iowrite32_rep(addr, buf, len / 4);
+ len %= 4;
+ buf += total_len - len;
+ }
+
+ /* the rest operation */
+ for (i = 0; i < len; i++)
+ iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
+
+ /*
+ * variable update
+ */
+ pkt->actual += total_len;
+
+ if (pkt->actual < pkt->length)
+ *is_done = 0; /* there are remainder data */
+ else if (is_short)
+ *is_done = 1; /* short packet */
+ else
+ *is_done = !pkt->zero; /* send zero packet ? */
+
+ /*
+ * pipe/irq handling
+ */
+ if (is_short)
+ usbhsf_send_terminator(pipe, fifo);
+
+ usbhsf_tx_irq_ctrl(pipe, !*is_done);
+ usbhs_pipe_enable(pipe);
+
+ dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
+ usbhs_pipe_number(pipe),
+ pkt->length, pkt->actual, *is_done, pkt->zero);
+
+ /*
+ * Transmission end
+ */
+ if (*is_done) {
+ if (usbhs_pipe_is_dcp(pipe))
+ usbhs_dcp_control_transfer_done(pipe);
+ }
+
+ usbhsf_fifo_unselect(pipe, fifo);
+
+ return 0;
+
+usbhs_fifo_write_busy:
+ usbhsf_fifo_unselect(pipe, fifo);
+
+ /*
+ * pipe is busy.
+ * retry in interrupt
+ */
+ usbhsf_tx_irq_ctrl(pipe, 1);
+
+ return ret;
+}
+
+struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
+ .prepare = usbhsf_pio_try_push,
+ .try_run = usbhsf_pio_try_push,
+};
+
+/*
+ * PIO pop handler
+ */
+static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+
+ if (usbhs_pipe_is_busy(pipe))
+ return 0;
+
+ /*
+ * pipe enable to prepare packet receive
+ */
+
+ usbhs_pipe_enable(pipe);
+ usbhsf_rx_irq_ctrl(pipe, 1);
+
+ return 0;
+}
+
+static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
+ void __iomem *addr = priv->base + fifo->port;
+ u8 *buf;
+ u32 data = 0;
+ int maxp = usbhs_pipe_get_maxpacket(pipe);
+ int rcv_len, len;
+ int i, ret;
+ int total_len = 0;
+
+ ret = usbhsf_fifo_select(pipe, fifo, 0);
+ if (ret < 0)
+ return 0;
+
+ ret = usbhsf_fifo_barrier(priv, fifo);
+ if (ret < 0)
+ goto usbhs_fifo_read_busy;
+
+ rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
+
+ buf = pkt->buf + pkt->actual;
+ len = pkt->length - pkt->actual;
+ len = min(len, rcv_len);
+ total_len = len;
+
+ /*
+ * Buffer clear if Zero-Length packet
+ *
+ * see
+ * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
+ */
+ if (0 == rcv_len) {
+ usbhsf_fifo_clear(pipe, fifo);
+ goto usbhs_fifo_read_end;
+ }
+
+ /*
+ * FIXME
+ *
+ * 32-bit access only
+ */
+ if (len >= 4 && !((unsigned long)buf & 0x03)) {
+ ioread32_rep(addr, buf, len / 4);
+ len %= 4;
+ buf += total_len - len;
+ }
+
+ /* the rest operation */
+ for (i = 0; i < len; i++) {
+ if (!(i & 0x03))
+ data = ioread32(addr);
+
+ buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
+ }
+
+ pkt->actual += total_len;
+
+usbhs_fifo_read_end:
+ if ((pkt->actual == pkt->length) || /* receive all data */
+ (total_len < maxp)) { /* short packet */
+ *is_done = 1;
+ usbhsf_rx_irq_ctrl(pipe, 0);
+ usbhs_pipe_disable(pipe);
+ }
+
+ dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n",
+ usbhs_pipe_number(pipe),
+ pkt->length, pkt->actual, *is_done, pkt->zero);
+
+usbhs_fifo_read_busy:
+ usbhsf_fifo_unselect(pipe, fifo);
+
+ return ret;
+}
+
+struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
+ .prepare = usbhsf_prepare_pop,
+ .try_run = usbhsf_pio_try_pop,
+};
+
+/*
+ * DCP ctrol statge handler
+ */
+static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
+{
+ usbhs_dcp_control_transfer_done(pkt->pipe);
+
+ *is_done = 1;
+
+ return 0;
+}
+
+struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
+ .prepare = usbhsf_ctrl_stage_end,
+ .try_run = usbhsf_ctrl_stage_end,
+};
+
+/*
+ * DMA fifo functions
+ */
+static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
+ struct usbhs_pkt *pkt)
+{
+ if (&usbhs_fifo_dma_push_handler == pkt->handler)
+ return fifo->tx_chan;
+
+ if (&usbhs_fifo_dma_pop_handler == pkt->handler)
+ return fifo->rx_chan;
+
+ return NULL;
+}
+
+static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
+ struct usbhs_pkt *pkt)
+{
+ struct usbhs_fifo *fifo;
+
+ /* DMA :: D0FIFO */
+ fifo = usbhsf_get_d0fifo(priv);
+ if (usbhsf_dma_chan_get(fifo, pkt) &&
+ !usbhsf_fifo_is_busy(fifo))
+ return fifo;
+
+ /* DMA :: D1FIFO */
+ fifo = usbhsf_get_d1fifo(priv);
+ if (usbhsf_dma_chan_get(fifo, pkt) &&
+ !usbhsf_fifo_is_busy(fifo))
+ return fifo;
+
+ return NULL;
+}
+
+#define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE)
+#define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0)
+static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
+ struct usbhs_fifo *fifo,
+ u16 dreqe)
+{
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+
+ usbhs_bset(priv, fifo->sel, DREQE, dreqe);
+}
+
+#define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
+#define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
+static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
+
+ return info->dma_map_ctrl(pkt, map);
+}
+
+static void usbhsf_dma_complete(void *arg);
+static void usbhsf_dma_prepare_tasklet(unsigned long data)
+{
+ struct usbhs_pkt *pkt = (struct usbhs_pkt *)data;
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct scatterlist sg;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ enum dma_data_direction dir;
+ dma_cookie_t cookie;
+
+ dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, virt_to_page(pkt->dma),
+ pkt->length, offset_in_page(pkt->dma));
+ sg_dma_address(&sg) = pkt->dma + pkt->actual;
+ sg_dma_len(&sg) = pkt->trans;
+
+ desc = chan->device->device_prep_slave_sg(chan, &sg, 1, dir,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!desc)
+ return;
+
+ desc->callback = usbhsf_dma_complete;
+ desc->callback_param = pipe;
+
+ cookie = desc->tx_submit(desc);
+ if (cookie < 0) {
+ dev_err(dev, "Failed to submit dma descriptor\n");
+ return;
+ }
+
+ dev_dbg(dev, " %s %d (%d/ %d)\n",
+ fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
+
+ usbhsf_dma_start(pipe, fifo);
+ dma_async_issue_pending(chan);
+}
+
+/*
+ * DMA push handler
+ */
+static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct usbhs_fifo *fifo;
+ int len = pkt->length - pkt->actual;
+ int ret;
+
+ if (usbhs_pipe_is_busy(pipe))
+ return 0;
+
+ /* use PIO if packet is less than pio_dma_border or pipe is DCP */
+ if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
+ usbhs_pipe_is_dcp(pipe))
+ goto usbhsf_pio_prepare_push;
+
+ if (len % 4) /* 32bit alignment */
+ goto usbhsf_pio_prepare_push;
+
+ if (((u32)pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
+ goto usbhsf_pio_prepare_push;
+
+ /* get enable DMA fifo */
+ fifo = usbhsf_get_dma_fifo(priv, pkt);
+ if (!fifo)
+ goto usbhsf_pio_prepare_push;
+
+ if (usbhsf_dma_map(pkt) < 0)
+ goto usbhsf_pio_prepare_push;
+
+ ret = usbhsf_fifo_select(pipe, fifo, 0);
+ if (ret < 0)
+ goto usbhsf_pio_prepare_push_unmap;
+
+ pkt->trans = len;
+
+ tasklet_init(&fifo->tasklet,
+ usbhsf_dma_prepare_tasklet,
+ (unsigned long)pkt);
+
+ tasklet_schedule(&fifo->tasklet);
+
+ return 0;
+
+usbhsf_pio_prepare_push_unmap:
+ usbhsf_dma_unmap(pkt);
+usbhsf_pio_prepare_push:
+ /*
+ * change handler to PIO
+ */
+ pkt->handler = &usbhs_fifo_pio_push_handler;
+
+ return pkt->handler->prepare(pkt, is_done);
+}
+
+static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+
+ pkt->actual = pkt->trans;
+
+ *is_done = !pkt->zero; /* send zero packet ? */
+
+ usbhsf_dma_stop(pipe, pipe->fifo);
+ usbhsf_dma_unmap(pkt);
+ usbhsf_fifo_unselect(pipe, pipe->fifo);
+
+ return 0;
+}
+
+struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
+ .prepare = usbhsf_dma_prepare_push,
+ .dma_done = usbhsf_dma_push_done,
+};
+
+/*
+ * DMA pop handler
+ */
+static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct usbhs_fifo *fifo;
+ int len, ret;
+
+ if (usbhs_pipe_is_busy(pipe))
+ return 0;
+
+ if (usbhs_pipe_is_dcp(pipe))
+ goto usbhsf_pio_prepare_pop;
+
+ /* get enable DMA fifo */
+ fifo = usbhsf_get_dma_fifo(priv, pkt);
+ if (!fifo)
+ goto usbhsf_pio_prepare_pop;
+
+ if (((u32)pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
+ goto usbhsf_pio_prepare_pop;
+
+ ret = usbhsf_fifo_select(pipe, fifo, 0);
+ if (ret < 0)
+ goto usbhsf_pio_prepare_pop;
+
+ /* use PIO if packet is less than pio_dma_border */
+ len = usbhsf_fifo_rcv_len(priv, fifo);
+ len = min(pkt->length - pkt->actual, len);
+ if (len % 4) /* 32bit alignment */
+ goto usbhsf_pio_prepare_pop_unselect;
+
+ if (len < usbhs_get_dparam(priv, pio_dma_border))
+ goto usbhsf_pio_prepare_pop_unselect;
+
+ ret = usbhsf_fifo_barrier(priv, fifo);
+ if (ret < 0)
+ goto usbhsf_pio_prepare_pop_unselect;
+
+ if (usbhsf_dma_map(pkt) < 0)
+ goto usbhsf_pio_prepare_pop_unselect;
+
+ /* DMA */
+
+ /*
+ * usbhs_fifo_dma_pop_handler :: prepare
+ * enabled irq to come here.
+ * but it is no longer needed for DMA. disable it.
+ */
+ usbhsf_rx_irq_ctrl(pipe, 0);
+
+ pkt->trans = len;
+
+ tasklet_init(&fifo->tasklet,
+ usbhsf_dma_prepare_tasklet,
+ (unsigned long)pkt);
+
+ tasklet_schedule(&fifo->tasklet);
+
+ return 0;
+
+usbhsf_pio_prepare_pop_unselect:
+ usbhsf_fifo_unselect(pipe, fifo);
+usbhsf_pio_prepare_pop:
+
+ /*
+ * change handler to PIO
+ */
+ pkt->handler = &usbhs_fifo_pio_pop_handler;
+
+ return pkt->handler->try_run(pkt, is_done);
+}
+
+static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
+{
+ struct usbhs_pipe *pipe = pkt->pipe;
+ int maxp = usbhs_pipe_get_maxpacket(pipe);
+
+ usbhsf_dma_stop(pipe, pipe->fifo);
+ usbhsf_dma_unmap(pkt);
+ usbhsf_fifo_unselect(pipe, pipe->fifo);
+
+ pkt->actual += pkt->trans;
+
+ if ((pkt->actual == pkt->length) || /* receive all data */
+ (pkt->trans < maxp)) { /* short packet */
+ *is_done = 1;
+ } else {
+ /* re-enable */
+ usbhsf_prepare_pop(pkt, is_done);
+ }
+
+ return 0;
+}
+
+struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
+ .prepare = usbhsf_prepare_pop,
+ .try_run = usbhsf_dma_try_pop,
+ .dma_done = usbhsf_dma_pop_done
+};
+
+/*
+ * DMA setting
+ */
+static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct sh_dmae_slave *slave = param;
+
+ /*
+ * FIXME
+ *
+ * usbhs doesn't recognize id = 0 as valid DMA
+ */
+ if (0 == slave->slave_id)
+ return false;
+
+ chan->private = slave;
+
+ return true;
+}
+
+static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
+{
+ if (fifo->tx_chan)
+ dma_release_channel(fifo->tx_chan);
+ if (fifo->rx_chan)
+ dma_release_channel(fifo->rx_chan);
+
+ fifo->tx_chan = NULL;
+ fifo->rx_chan = NULL;
+}
+
+static void usbhsf_dma_init(struct usbhs_priv *priv,
+ struct usbhs_fifo *fifo)
+{
+ struct device *dev = usbhs_priv_to_dev(priv);
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
+ &fifo->tx_slave);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
+ &fifo->rx_slave);
+
+ if (fifo->tx_chan || fifo->rx_chan)
+ dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
+ fifo->name,
+ fifo->tx_chan ? "[TX]" : " ",
+ fifo->rx_chan ? "[RX]" : " ");
+}
+
+/*
+ * irq functions
+ */
+static int usbhsf_irq_empty(struct usbhs_priv *priv,
+ struct usbhs_irq_state *irq_state)
+{
+ struct usbhs_pipe *pipe;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int i, ret;
+
+ if (!irq_state->bempsts) {
+ dev_err(dev, "debug %s !!\n", __func__);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
+
+ /*
+ * search interrupted "pipe"
+ * not "uep".
+ */
+ usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
+ if (!(irq_state->bempsts & (1 << i)))
+ continue;
+
+ ret = usbhs_pkt_run(pipe);
+ if (ret < 0)
+ dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
+ }
+
+ return 0;
+}
+
+static int usbhsf_irq_ready(struct usbhs_priv *priv,
+ struct usbhs_irq_state *irq_state)
+{
+ struct usbhs_pipe *pipe;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int i, ret;
+
+ if (!irq_state->brdysts) {
+ dev_err(dev, "debug %s !!\n", __func__);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
+
+ /*
+ * search interrupted "pipe"
+ * not "uep".
+ */
+ usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
+ if (!(irq_state->brdysts & (1 << i)))
+ continue;
+
+ ret = usbhs_pkt_run(pipe);
+ if (ret < 0)
+ dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
+ }
+
+ return 0;
+}
+
+static void usbhsf_dma_complete(void *arg)
+{
+ struct usbhs_pipe *pipe = arg;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ int ret;
+
+ ret = usbhs_pkt_dmadone(pipe);
+ if (ret < 0)
+ dev_err(dev, "dma_complete run_error %d : %d\n",
+ usbhs_pipe_number(pipe), ret);
+}
+
+/*
+ * fifo init
+ */
+void usbhs_fifo_init(struct usbhs_priv *priv)
+{
+ struct usbhs_mod *mod = usbhs_mod_get_current(priv);
+ struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
+ struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv);
+ struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv);
+
+ mod->irq_empty = usbhsf_irq_empty;
+ mod->irq_ready = usbhsf_irq_ready;
+ mod->irq_bempsts = 0;
+ mod->irq_brdysts = 0;
+
+ cfifo->pipe = NULL;
+ cfifo->tx_chan = NULL;
+ cfifo->rx_chan = NULL;
+
+ d0fifo->pipe = NULL;
+ d0fifo->tx_chan = NULL;
+ d0fifo->rx_chan = NULL;
+
+ d1fifo->pipe = NULL;
+ d1fifo->tx_chan = NULL;
+ d1fifo->rx_chan = NULL;
+
+ usbhsf_dma_init(priv, usbhsf_get_d0fifo(priv));
+ usbhsf_dma_init(priv, usbhsf_get_d1fifo(priv));
+}
+
+void usbhs_fifo_quit(struct usbhs_priv *priv)
+{
+ struct usbhs_mod *mod = usbhs_mod_get_current(priv);
+
+ mod->irq_empty = NULL;
+ mod->irq_ready = NULL;
+ mod->irq_bempsts = 0;
+ mod->irq_brdysts = 0;
+
+ usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
+ usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));
+}
+
+int usbhs_fifo_probe(struct usbhs_priv *priv)
+{
+ struct usbhs_fifo *fifo;
+
+ /* CFIFO */
+ fifo = usbhsf_get_cfifo(priv);
+ fifo->name = "CFIFO";
+ fifo->port = CFIFO;
+ fifo->sel = CFIFOSEL;
+ fifo->ctr = CFIFOCTR;
+
+ /* D0FIFO */
+ fifo = usbhsf_get_d0fifo(priv);
+ fifo->name = "D0FIFO";
+ fifo->port = D0FIFO;
+ fifo->sel = D0FIFOSEL;
+ fifo->ctr = D0FIFOCTR;
+ fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id);
+ fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id);
+
+ /* D1FIFO */
+ fifo = usbhsf_get_d1fifo(priv);
+ fifo->name = "D1FIFO";
+ fifo->port = D1FIFO;
+ fifo->sel = D1FIFOSEL;
+ fifo->ctr = D1FIFOCTR;
+ fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id);
+ fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id);
+
+ return 0;
+}
+
+void usbhs_fifo_remove(struct usbhs_priv *priv)
+{
+}
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
new file mode 100644
index 00000000000..ed6d8e56c13
--- /dev/null
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -0,0 +1,104 @@
+/*
+ * Renesas USB driver
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#ifndef RENESAS_USB_FIFO_H
+#define RENESAS_USB_FIFO_H
+
+#include <linux/interrupt.h>
+#include <linux/sh_dma.h>
+#include <asm/dma.h>
+#include "pipe.h"
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+struct usbhs_fifo {
+ char *name;
+ u32 port; /* xFIFO */
+ u32 sel; /* xFIFOSEL */
+ u32 ctr; /* xFIFOCTR */
+
+ struct usbhs_pipe *pipe;
+ struct tasklet_struct tasklet;
+
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+
+ struct sh_dmae_slave tx_slave;
+ struct sh_dmae_slave rx_slave;
+};
+
+struct usbhs_fifo_info {
+ struct usbhs_fifo cfifo;
+ struct usbhs_fifo d0fifo;
+ struct usbhs_fifo d1fifo;
+};
+
+struct usbhs_pkt_handle;
+struct usbhs_pkt {
+ struct list_head node;
+ struct usbhs_pipe *pipe;
+ struct usbhs_pkt_handle *handler;
+ dma_addr_t dma;
+ void *buf;
+ int length;
+ int trans;
+ int actual;
+ int zero;
+};
+
+struct usbhs_pkt_handle {
+ int (*prepare)(struct usbhs_pkt *pkt, int *is_done);
+ int (*try_run)(struct usbhs_pkt *pkt, int *is_done);
+ int (*dma_done)(struct usbhs_pkt *pkt, int *is_done);
+};
+
+/*
+ * fifo
+ */
+int usbhs_fifo_probe(struct usbhs_priv *priv);
+void usbhs_fifo_remove(struct usbhs_priv *priv);
+void usbhs_fifo_init(struct usbhs_priv *priv);
+void usbhs_fifo_quit(struct usbhs_priv *priv);
+
+/*
+ * packet info
+ */
+enum {
+ USBHSF_PKT_PREPARE,
+ USBHSF_PKT_TRY_RUN,
+ USBHSF_PKT_DMA_DONE,
+};
+
+extern struct usbhs_pkt_handle usbhs_fifo_pio_push_handler;
+extern struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler;
+extern struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler;
+
+extern struct usbhs_pkt_handle usbhs_fifo_dma_push_handler;
+extern struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler;
+
+
+void usbhs_pkt_init(struct usbhs_pkt *pkt);
+void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
+ struct usbhs_pkt_handle *handler,
+ void *buf, int len, int zero);
+struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
+int __usbhs_pkt_handler(struct usbhs_pipe *pipe, int type);
+
+#define usbhs_pkt_start(p) __usbhs_pkt_handler(p, USBHSF_PKT_PREPARE)
+#define usbhs_pkt_run(p) __usbhs_pkt_handler(p, USBHSF_PKT_TRY_RUN)
+#define usbhs_pkt_dmadone(p) __usbhs_pkt_handler(p, USBHSF_PKT_DMA_DONE)
+
+#endif /* RENESAS_USB_FIFO_H */
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 547486ccd05..cb2d451d511 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -14,6 +14,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -26,26 +27,25 @@
*/
struct usbhsg_request {
struct usb_request req;
- struct list_head node;
+ struct usbhs_pkt pkt;
};
#define EP_NAME_SIZE 8
struct usbhsg_gpriv;
-struct usbhsg_pipe_handle;
struct usbhsg_uep {
struct usb_ep ep;
struct usbhs_pipe *pipe;
- struct list_head list;
char ep_name[EP_NAME_SIZE];
struct usbhsg_gpriv *gpriv;
- struct usbhsg_pipe_handle *handler;
+ struct usbhs_pkt_handle *handler;
};
struct usbhsg_gpriv {
struct usb_gadget gadget;
struct usbhs_mod mod;
+ struct list_head link;
struct usbhsg_uep *uep;
int uep_size;
@@ -58,12 +58,6 @@ struct usbhsg_gpriv {
#define USBHSG_STATUS_WEDGE (1 << 2)
};
-struct usbhsg_pipe_handle {
- int (*prepare)(struct usbhsg_uep *uep, struct usbhsg_request *ureq);
- int (*try_run)(struct usbhsg_uep *uep, struct usbhsg_request *ureq);
- void (*irq_mask)(struct usbhsg_uep *uep, int enable);
-};
-
struct usbhsg_recip_handle {
char *name;
int (*device)(struct usbhs_priv *priv, struct usbhsg_uep *uep,
@@ -83,7 +77,7 @@ struct usbhsg_recip_handle {
struct usbhsg_gpriv, mod)
#define __usbhsg_for_each_uep(start, pos, g, i) \
- for (i = start, pos = (g)->uep; \
+ for (i = start, pos = (g)->uep + i; \
i < (g)->uep_size; \
i++, pos = (g)->uep + i)
@@ -100,7 +94,6 @@ struct usbhsg_recip_handle {
container_of(r, struct usbhsg_request, req)
#define usbhsg_ep_to_uep(e) container_of(e, struct usbhsg_uep, ep)
-#define usbhsg_gpriv_to_lock(gp) usbhs_priv_to_lock((gp)->mod.priv)
#define usbhsg_gpriv_to_dev(gp) usbhs_priv_to_dev((gp)->mod.priv)
#define usbhsg_gpriv_to_priv(gp) ((gp)->mod.priv)
#define usbhsg_gpriv_to_dcp(gp) ((gp)->uep)
@@ -110,6 +103,10 @@ struct usbhsg_recip_handle {
#define usbhsg_pipe_to_uep(p) ((p)->mod_private)
#define usbhsg_is_dcp(u) ((u) == usbhsg_gpriv_to_dcp((u)->gpriv))
+#define usbhsg_ureq_to_pkt(u) (&(u)->pkt)
+#define usbhsg_pkt_to_ureq(i) \
+ container_of(i, struct usbhsg_request, pkt)
+
#define usbhsg_is_not_connected(gp) ((gp)->gadget.speed == USB_SPEED_UNKNOWN)
/* status */
@@ -118,37 +115,18 @@ struct usbhsg_recip_handle {
#define usbhsg_status_clr(gp, b) (gp->status &= ~b)
#define usbhsg_status_has(gp, b) (gp->status & b)
-/*
- * usbhsg_trylock
- *
- * This driver don't use spin_try_lock
- * to avoid warning of CONFIG_DEBUG_SPINLOCK
- */
-static spinlock_t *usbhsg_trylock(struct usbhsg_gpriv *gpriv,
- unsigned long *flags)
-{
- spinlock_t *lock = usbhsg_gpriv_to_lock(gpriv);
-
- /* check spin lock status
- * to avoid deadlock/nest */
- if (spin_is_locked(lock))
- return NULL;
+/* controller */
+LIST_HEAD(the_controller_link);
- spin_lock_irqsave(lock, *flags);
-
- return lock;
-}
-
-static void usbhsg_unlock(spinlock_t *lock, unsigned long *flags)
-{
- if (!lock)
- return;
-
- spin_unlock_irqrestore(lock, *flags);
-}
+#define usbhsg_for_each_controller(gpriv)\
+ list_for_each_entry(gpriv, &the_controller_link, link)
+#define usbhsg_controller_register(gpriv)\
+ list_add_tail(&(gpriv)->link, &the_controller_link)
+#define usbhsg_controller_unregister(gpriv)\
+ list_del_init(&(gpriv)->link)
/*
- * list push/pop
+ * queue push/pop
*/
static void usbhsg_queue_push(struct usbhsg_uep *uep,
struct usbhsg_request *ureq)
@@ -156,79 +134,17 @@ static void usbhsg_queue_push(struct usbhsg_uep *uep,
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pkt *pkt = usbhsg_ureq_to_pkt(ureq);
+ struct usb_request *req = &ureq->req;
- /*
- ********* assume under spin lock *********
- */
- list_del_init(&ureq->node);
- list_add_tail(&ureq->node, &uep->list);
- ureq->req.actual = 0;
- ureq->req.status = -EINPROGRESS;
+ req->actual = 0;
+ req->status = -EINPROGRESS;
+ usbhs_pkt_push(pipe, pkt, uep->handler,
+ req->buf, req->length, req->zero);
dev_dbg(dev, "pipe %d : queue push (%d)\n",
usbhs_pipe_number(pipe),
- ureq->req.length);
-}
-
-static struct usbhsg_request *usbhsg_queue_get(struct usbhsg_uep *uep)
-{
- /*
- ********* assume under spin lock *********
- */
- if (list_empty(&uep->list))
- return NULL;
-
- return list_entry(uep->list.next, struct usbhsg_request, node);
-}
-
-#define usbhsg_queue_prepare(uep) __usbhsg_queue_handler(uep, 1);
-#define usbhsg_queue_handle(uep) __usbhsg_queue_handler(uep, 0);
-static int __usbhsg_queue_handler(struct usbhsg_uep *uep, int prepare)
-{
- struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
- struct device *dev = usbhsg_gpriv_to_dev(gpriv);
- struct usbhsg_request *ureq;
- spinlock_t *lock;
- unsigned long flags;
- int ret = 0;
-
- if (!uep->handler) {
- dev_err(dev, "no handler function\n");
- return -EIO;
- }
-
- /*
- * CAUTION [*queue handler*]
- *
- * This function will be called for start/restart queue operation.
- * OTOH the most much worry for USB driver is spinlock nest.
- * Specially it are
- * - usb_ep_ops :: queue
- * - usb_request :: complete
- *
- * But the caller of this function need not care about spinlock.
- * This function is using usbhsg_trylock for it.
- * if "is_locked" is 1, this mean this function lock it.
- * but if it is 0, this mean it is already under spin lock.
- * see also
- * CAUTION [*endpoint queue*]
- * CAUTION [*request complete*]
- */
-
- /****************** spin try lock *******************/
- lock = usbhsg_trylock(gpriv, &flags);
-
- ureq = usbhsg_queue_get(uep);
- if (ureq) {
- if (prepare)
- ret = uep->handler->prepare(uep, ureq);
- else
- ret = uep->handler->try_run(uep, ureq);
- }
- usbhsg_unlock(lock, &flags);
- /******************** spin unlock ******************/
-
- return ret;
+ req->length);
}
static void usbhsg_queue_pop(struct usbhsg_uep *uep,
@@ -239,289 +155,91 @@ static void usbhsg_queue_pop(struct usbhsg_uep *uep,
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
- /*
- ********* assume under spin lock *********
- */
-
- /*
- * CAUTION [*request complete*]
- *
- * There is a possibility not to be called in correct order
- * if "complete" is called without spinlock.
- *
- * So, this function assume it is under spinlock,
- * and call usb_request :: complete.
- *
- * But this "complete" will push next usb_request.
- * It mean "usb_ep_ops :: queue" which is using spinlock is called
- * under spinlock.
- *
- * To avoid dead-lock, this driver is using usbhsg_trylock.
- * CAUTION [*endpoint queue*]
- * CAUTION [*queue handler*]
- */
-
dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe));
- list_del_init(&ureq->node);
-
ureq->req.status = status;
ureq->req.complete(&uep->ep, &ureq->req);
-
- /* more request ? */
- if (0 == status)
- usbhsg_queue_prepare(uep);
}
-/*
- * irq enable/disable function
- */
-#define usbhsg_irq_callback_ctrl(uep, status, enable) \
- ({ \
- struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep); \
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); \
- struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); \
- struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
- if (!mod) \
- return; \
- if (enable) \
- mod->irq_##status |= (1 << usbhs_pipe_number(pipe)); \
- else \
- mod->irq_##status &= ~(1 << usbhs_pipe_number(pipe)); \
- usbhs_irq_callback_update(priv, mod); \
- })
-
-static void usbhsg_irq_empty_ctrl(struct usbhsg_uep *uep, int enable)
+static void usbhsg_queue_done(struct usbhs_pkt *pkt)
{
- usbhsg_irq_callback_ctrl(uep, bempsts, enable);
-}
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
+ struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
-static void usbhsg_irq_ready_ctrl(struct usbhsg_uep *uep, int enable)
-{
- usbhsg_irq_callback_ctrl(uep, brdysts, enable);
-}
-
-/*
- * handler function
- */
-static int usbhsg_try_run_ctrl_stage_end(struct usbhsg_uep *uep,
- struct usbhsg_request *ureq)
-{
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
-
- /*
- ********* assume under spin lock *********
- */
+ ureq->req.actual = pkt->actual;
- usbhs_dcp_control_transfer_done(pipe);
usbhsg_queue_pop(uep, ureq, 0);
-
- return 0;
}
-static int usbhsg_try_run_send_packet(struct usbhsg_uep *uep,
- struct usbhsg_request *ureq)
+/*
+ * dma map/unmap
+ */
+static int usbhsg_dma_map(struct device *dev,
+ struct usbhs_pkt *pkt,
+ enum dma_data_direction dir)
{
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
struct usb_request *req = &ureq->req;
- struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
- struct device *dev = usbhsg_gpriv_to_dev(gpriv);
- void *buf;
- int remainder, send;
- int is_done = 0;
- int enable;
- int maxp;
- /*
- ********* assume under spin lock *********
- */
-
- maxp = usbhs_pipe_get_maxpacket(pipe);
- buf = req->buf + req->actual;
- remainder = req->length - req->actual;
-
- send = usbhs_fifo_write(pipe, buf, remainder);
-
- /*
- * send < 0 : pipe busy
- * send = 0 : send zero packet
- * send > 0 : send data
- *
- * send <= max_packet
- */
- if (send > 0)
- req->actual += send;
-
- /* send all packet ? */
- if (send < remainder)
- is_done = 0; /* there are remainder data */
- else if (send < maxp)
- is_done = 1; /* short packet */
- else
- is_done = !req->zero; /* send zero packet ? */
-
- dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
- usbhs_pipe_number(pipe),
- remainder, send, is_done, req->zero);
-
- /*
- * enable interrupt and send again in irq handler
- * if it still have remainder data which should be sent.
- */
- enable = !is_done;
- uep->handler->irq_mask(uep, enable);
-
- /*
- * usbhs_fifo_enable execute
- * - after callback_update,
- * - before queue_pop / stage_end
- */
- usbhs_fifo_enable(pipe);
-
- /*
- * all data were sent ?
- */
- if (is_done) {
- /* it care below call in
- "function mode" */
- if (usbhsg_is_dcp(uep))
- usbhs_dcp_control_transfer_done(pipe);
-
- usbhsg_queue_pop(uep, ureq, 0);
+ if (pkt->dma != DMA_ADDR_INVALID) {
+ dev_err(dev, "dma is already mapped\n");
+ return -EIO;
}
- return 0;
-}
-
-static int usbhsg_prepare_send_packet(struct usbhsg_uep *uep,
- struct usbhsg_request *ureq)
-{
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
-
- /*
- ********* assume under spin lock *********
- */
+ if (req->dma == DMA_ADDR_INVALID) {
+ pkt->dma = dma_map_single(dev, pkt->buf, pkt->length, dir);
+ } else {
+ dma_sync_single_for_device(dev, req->dma, req->length, dir);
+ pkt->dma = req->dma;
+ }
- usbhs_fifo_prepare_write(pipe);
- usbhsg_try_run_send_packet(uep, ureq);
+ if (dma_mapping_error(dev, pkt->dma)) {
+ dev_err(dev, "dma mapping error %x\n", pkt->dma);
+ return -EIO;
+ }
return 0;
}
-static int usbhsg_try_run_receive_packet(struct usbhsg_uep *uep,
- struct usbhsg_request *ureq)
+static int usbhsg_dma_unmap(struct device *dev,
+ struct usbhs_pkt *pkt,
+ enum dma_data_direction dir)
{
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
struct usb_request *req = &ureq->req;
- struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
- struct device *dev = usbhsg_gpriv_to_dev(gpriv);
- void *buf;
- int maxp;
- int remainder, recv;
- int is_done = 0;
-
- /*
- ********* assume under spin lock *********
- */
-
- maxp = usbhs_pipe_get_maxpacket(pipe);
- buf = req->buf + req->actual;
- remainder = req->length - req->actual;
-
- recv = usbhs_fifo_read(pipe, buf, remainder);
- /*
- * recv < 0 : pipe busy
- * recv >= 0 : receive data
- *
- * recv <= max_packet
- */
- if (recv < 0)
- return -EBUSY;
-
- /* update parameters */
- req->actual += recv;
-
- if ((recv == remainder) || /* receive all data */
- (recv < maxp)) /* short packet */
- is_done = 1;
- dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n",
- usbhs_pipe_number(pipe),
- remainder, recv, is_done, req->zero);
+ if (pkt->dma == DMA_ADDR_INVALID) {
+ dev_err(dev, "dma is not mapped\n");
+ return -EIO;
+ }
- /* read all data ? */
- if (is_done) {
- int disable = 0;
+ if (req->dma == DMA_ADDR_INVALID)
+ dma_unmap_single(dev, pkt->dma, pkt->length, dir);
+ else
+ dma_sync_single_for_cpu(dev, req->dma, req->length, dir);
- uep->handler->irq_mask(uep, disable);
- usbhs_fifo_disable(pipe);
- usbhsg_queue_pop(uep, ureq, 0);
- }
+ pkt->dma = DMA_ADDR_INVALID;
return 0;
}
-static int usbhsg_prepare_receive_packet(struct usbhsg_uep *uep,
- struct usbhsg_request *ureq)
+static int usbhsg_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
{
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
- int enable = 1;
- int ret;
-
- /*
- ********* assume under spin lock *********
- */
-
- ret = usbhs_fifo_prepare_read(pipe);
- if (ret < 0)
- return ret;
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
+ struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ struct device *dev = usbhsg_gpriv_to_dev(gpriv);
+ enum dma_data_direction dir;
- /*
- * data will be read in interrupt handler
- */
- uep->handler->irq_mask(uep, enable);
+ dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- return ret;
+ if (map)
+ return usbhsg_dma_map(dev, pkt, dir);
+ else
+ return usbhsg_dma_unmap(dev, pkt, dir);
}
-static struct usbhsg_pipe_handle usbhsg_handler_send_by_empty = {
- .prepare = usbhsg_prepare_send_packet,
- .try_run = usbhsg_try_run_send_packet,
- .irq_mask = usbhsg_irq_empty_ctrl,
-};
-
-static struct usbhsg_pipe_handle usbhsg_handler_send_by_ready = {
- .prepare = usbhsg_prepare_send_packet,
- .try_run = usbhsg_try_run_send_packet,
- .irq_mask = usbhsg_irq_ready_ctrl,
-};
-
-static struct usbhsg_pipe_handle usbhsg_handler_recv_by_ready = {
- .prepare = usbhsg_prepare_receive_packet,
- .try_run = usbhsg_try_run_receive_packet,
- .irq_mask = usbhsg_irq_ready_ctrl,
-};
-
-static struct usbhsg_pipe_handle usbhsg_handler_ctrl_stage_end = {
- .prepare = usbhsg_try_run_ctrl_stage_end,
- .try_run = usbhsg_try_run_ctrl_stage_end,
-};
-
-/*
- * DCP pipe can NOT use "ready interrupt" for "send"
- * it should use "empty" interrupt.
- * see
- * "Operation" - "Interrupt Function" - "BRDY Interrupt"
- *
- * on the other hand, normal pipe can use "ready interrupt" for "send"
- * even though it is single/double buffer
- */
-#define usbhsg_handler_send_ctrl usbhsg_handler_send_by_empty
-#define usbhsg_handler_recv_ctrl usbhsg_handler_recv_by_ready
-
-#define usbhsg_handler_send_packet usbhsg_handler_send_by_ready
-#define usbhsg_handler_recv_packet usbhsg_handler_recv_by_ready
-
/*
* USB_TYPE_STANDARD / clear feature functions
*/
@@ -546,15 +264,13 @@ static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv,
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
if (!usbhsg_status_has(gpriv, USBHSG_STATUS_WEDGE)) {
- usbhs_fifo_disable(pipe);
+ usbhs_pipe_disable(pipe);
usbhs_pipe_clear_sequence(pipe);
- usbhs_fifo_enable(pipe);
+ usbhs_pipe_enable(pipe);
}
usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
- usbhsg_queue_prepare(uep);
-
return 0;
}
@@ -575,6 +291,7 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
struct usbhsg_uep *uep;
+ struct usbhs_pipe *pipe;
int recip = ctrl->bRequestType & USB_RECIP_MASK;
int nth = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
int ret;
@@ -583,9 +300,11 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
char *msg;
uep = usbhsg_gpriv_to_nth_uep(gpriv, nth);
- if (!usbhsg_uep_to_pipe(uep)) {
+ pipe = usbhsg_uep_to_pipe(uep);
+ if (!pipe) {
dev_err(dev, "wrong recip request\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto usbhsg_recip_run_handle_end;
}
switch (recip) {
@@ -608,10 +327,20 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
}
if (func) {
+ unsigned long flags;
+
dev_dbg(dev, "%s (pipe %d :%s)\n", handler->name, nth, msg);
+
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
ret = func(priv, uep, ctrl);
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
}
+usbhsg_recip_run_handle_end:
+ usbhs_pkt_start(pipe);
+
return ret;
}
@@ -660,13 +389,13 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv,
switch (stage) {
case READ_DATA_STAGE:
- dcp->handler = &usbhsg_handler_send_ctrl;
+ dcp->handler = &usbhs_fifo_pio_push_handler;
break;
case WRITE_DATA_STAGE:
- dcp->handler = &usbhsg_handler_recv_ctrl;
+ dcp->handler = &usbhs_fifo_pio_pop_handler;
break;
case NODATA_STATUS_STAGE:
- dcp->handler = &usbhsg_handler_ctrl_stage_end;
+ dcp->handler = &usbhs_ctrl_stage_end_handler;
break;
default:
return ret;
@@ -695,128 +424,27 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv,
ret = gpriv->driver->setup(&gpriv->gadget, &ctrl);
if (ret < 0)
- usbhs_fifo_stall(pipe);
+ usbhs_pipe_stall(pipe);
return ret;
}
-static int usbhsg_irq_empty(struct usbhs_priv *priv,
- struct usbhs_irq_state *irq_state)
-{
- struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
- struct usbhsg_uep *uep;
- struct usbhs_pipe *pipe;
- struct device *dev = usbhsg_gpriv_to_dev(gpriv);
- int i, ret;
-
- if (!irq_state->bempsts) {
- dev_err(dev, "debug %s !!\n", __func__);
- return -EIO;
- }
-
- dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
-
- /*
- * search interrupted "pipe"
- * not "uep".
- */
- usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
- if (!(irq_state->bempsts & (1 << i)))
- continue;
-
- uep = usbhsg_pipe_to_uep(pipe);
- ret = usbhsg_queue_handle(uep);
- if (ret < 0)
- dev_err(dev, "send error %d : %d\n", i, ret);
- }
-
- return 0;
-}
-
-static int usbhsg_irq_ready(struct usbhs_priv *priv,
- struct usbhs_irq_state *irq_state)
-{
- struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
- struct usbhsg_uep *uep;
- struct usbhs_pipe *pipe;
- struct device *dev = usbhsg_gpriv_to_dev(gpriv);
- int i, ret;
-
- if (!irq_state->brdysts) {
- dev_err(dev, "debug %s !!\n", __func__);
- return -EIO;
- }
-
- dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
-
- /*
- * search interrupted "pipe"
- * not "uep".
- */
- usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
- if (!(irq_state->brdysts & (1 << i)))
- continue;
-
- uep = usbhsg_pipe_to_uep(pipe);
- ret = usbhsg_queue_handle(uep);
- if (ret < 0)
- dev_err(dev, "receive error %d : %d\n", i, ret);
- }
-
- return 0;
-}
-
/*
*
* usb_dcp_ops
*
*/
-static int usbhsg_dcp_enable(struct usbhsg_uep *uep)
-{
- struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
- struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
- struct usbhs_pipe *pipe;
-
- /*
- ********* assume under spin lock *********
- */
-
- pipe = usbhs_dcp_malloc(priv);
- if (!pipe)
- return -EIO;
-
- uep->pipe = pipe;
- uep->pipe->mod_private = uep;
- INIT_LIST_HEAD(&uep->list);
-
- return 0;
-}
-
-#define usbhsg_dcp_disable usbhsg_pipe_disable
static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
{
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
- struct usbhsg_request *ureq;
- int disable = 0;
-
- /*
- ********* assume under spin lock *********
- */
+ struct usbhs_pkt *pkt;
- usbhs_fifo_disable(pipe);
-
- /*
- * disable pipe irq
- */
- usbhsg_irq_empty_ctrl(uep, disable);
- usbhsg_irq_ready_ctrl(uep, disable);
+ usbhs_pipe_disable(pipe);
while (1) {
- ureq = usbhsg_queue_get(uep);
- if (!ureq)
+ pkt = usbhs_pkt_pop(pipe, NULL);
+ if (!pkt)
break;
-
- usbhsg_queue_pop(uep, ureq, -ECONNRESET);
}
return 0;
@@ -843,57 +471,44 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct usbhs_pipe *pipe;
- spinlock_t *lock;
- unsigned long flags;
int ret = -EIO;
/*
* if it already have pipe,
* nothing to do
*/
- if (uep->pipe)
+ if (uep->pipe) {
+ usbhs_pipe_clear(uep->pipe);
+ usbhs_pipe_clear_sequence(uep->pipe);
return 0;
-
- /******************** spin lock ********************/
- lock = usbhsg_trylock(gpriv, &flags);
+ }
pipe = usbhs_pipe_malloc(priv, desc);
if (pipe) {
uep->pipe = pipe;
pipe->mod_private = uep;
- INIT_LIST_HEAD(&uep->list);
+ /*
+ * usbhs_fifo_dma_push/pop_handler try to
+ * use dmaengine if possible.
+ * It will use pio handler if impossible.
+ */
if (usb_endpoint_dir_in(desc))
- uep->handler = &usbhsg_handler_send_packet;
+ uep->handler = &usbhs_fifo_dma_push_handler;
else
- uep->handler = &usbhsg_handler_recv_packet;
+ uep->handler = &usbhs_fifo_dma_pop_handler;
ret = 0;
}
- usbhsg_unlock(lock, &flags);
- /******************** spin unlock ******************/
-
return ret;
}
static int usbhsg_ep_disable(struct usb_ep *ep)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
- struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
- spinlock_t *lock;
- unsigned long flags;
- int ret;
-
- /******************** spin lock ********************/
- lock = usbhsg_trylock(gpriv, &flags);
- ret = usbhsg_pipe_disable(uep);
-
- usbhsg_unlock(lock, &flags);
- /******************** spin unlock ******************/
-
- return ret;
+ return usbhsg_pipe_disable(uep);
}
static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep,
@@ -905,7 +520,10 @@ static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep,
if (!ureq)
return NULL;
- INIT_LIST_HEAD(&ureq->node);
+ usbhs_pkt_init(usbhsg_ureq_to_pkt(ureq));
+
+ ureq->req.dma = DMA_ADDR_INVALID;
+
return &ureq->req;
}
@@ -914,7 +532,7 @@ static void usbhsg_ep_free_request(struct usb_ep *ep,
{
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
- WARN_ON(!list_empty(&ureq->node));
+ WARN_ON(!list_empty(&ureq->pkt.node));
kfree(ureq);
}
@@ -925,69 +543,27 @@ static int usbhsg_ep_queue(struct usb_ep *ep, struct usb_request *req,
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
- spinlock_t *lock;
- unsigned long flags;
- int ret = 0;
-
- /*
- * CAUTION [*endpoint queue*]
- *
- * This function will be called from usb_request :: complete
- * or usb driver timing.
- * If this function is called from usb_request :: complete,
- * it is already under spinlock on this driver.
- * but it is called frm usb driver, this function should call spinlock.
- *
- * This function is using usbshg_trylock to solve this issue.
- * if "is_locked" is 1, this mean this function lock it.
- * but if it is 0, this mean it is already under spin lock.
- * see also
- * CAUTION [*queue handler*]
- * CAUTION [*request complete*]
- */
-
- /******************** spin lock ********************/
- lock = usbhsg_trylock(gpriv, &flags);
/* param check */
if (usbhsg_is_not_connected(gpriv) ||
unlikely(!gpriv->driver) ||
unlikely(!pipe))
- ret = -ESHUTDOWN;
- else
- usbhsg_queue_push(uep, ureq);
-
- usbhsg_unlock(lock, &flags);
- /******************** spin unlock ******************/
+ return -ESHUTDOWN;
- usbhsg_queue_prepare(uep);
+ usbhsg_queue_push(uep, ureq);
- return ret;
+ return 0;
}
static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
- struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
- spinlock_t *lock;
- unsigned long flags;
-
- /*
- * see
- * CAUTION [*queue handler*]
- * CAUTION [*endpoint queue*]
- * CAUTION [*request complete*]
- */
-
- /******************** spin lock ********************/
- lock = usbhsg_trylock(gpriv, &flags);
+ struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
usbhsg_queue_pop(uep, ureq, -ECONNRESET);
- usbhsg_unlock(lock, &flags);
- /******************** spin unlock ******************/
-
return 0;
}
@@ -996,42 +572,32 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
struct device *dev = usbhsg_gpriv_to_dev(gpriv);
- spinlock_t *lock;
unsigned long flags;
- int ret = -EAGAIN;
- /*
- * see
- * CAUTION [*queue handler*]
- * CAUTION [*endpoint queue*]
- * CAUTION [*request complete*]
- */
+ usbhsg_pipe_disable(uep);
- /******************** spin lock ********************/
- lock = usbhsg_trylock(gpriv, &flags);
- if (!usbhsg_queue_get(uep)) {
+ dev_dbg(dev, "set halt %d (pipe %d)\n",
+ halt, usbhs_pipe_number(pipe));
- dev_dbg(dev, "set halt %d (pipe %d)\n",
- halt, usbhs_pipe_number(pipe));
-
- if (halt)
- usbhs_fifo_stall(pipe);
- else
- usbhs_fifo_disable(pipe);
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
- if (halt && wedge)
- usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE);
- else
- usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
+ if (halt)
+ usbhs_pipe_stall(pipe);
+ else
+ usbhs_pipe_disable(pipe);
- ret = 0;
- }
+ if (halt && wedge)
+ usbhsg_status_set(gpriv, USBHSG_STATUS_WEDGE);
+ else
+ usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
- usbhsg_unlock(lock, &flags);
+ usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
- return ret;
+ return 0;
}
static int usbhsg_ep_set_halt(struct usb_ep *ep, int value)
@@ -1067,28 +633,40 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct device *dev = usbhs_priv_to_dev(priv);
- spinlock_t *lock;
unsigned long flags;
+ int ret = 0;
/******************** spin lock ********************/
- lock = usbhsg_trylock(gpriv, &flags);
+ usbhs_lock(priv, flags);
- /*
- * enable interrupt and systems if ready
- */
usbhsg_status_set(gpriv, status);
if (!(usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) &&
usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD)))
- goto usbhsg_try_start_unlock;
+ ret = -1; /* not ready */
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ********************/
+
+ if (ret < 0)
+ return 0; /* not ready is not error */
+ /*
+ * enable interrupt and systems if ready
+ */
dev_dbg(dev, "start gadget\n");
/*
* pipe initialize and enable DCP
*/
- usbhs_pipe_init(priv);
+ usbhs_pipe_init(priv,
+ usbhsg_queue_done,
+ usbhsg_dma_map_ctrl);
+ usbhs_fifo_init(priv);
usbhsg_uep_init(gpriv);
- usbhsg_dcp_enable(dcp);
+
+ /* dcp init */
+ dcp->pipe = usbhs_dcp_malloc(priv);
+ dcp->pipe->mod_private = dcp;
/*
* system config enble
@@ -1105,16 +683,8 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
*/
mod->irq_dev_state = usbhsg_irq_dev_state;
mod->irq_ctrl_stage = usbhsg_irq_ctrl_stage;
- mod->irq_empty = usbhsg_irq_empty;
- mod->irq_ready = usbhsg_irq_ready;
- mod->irq_bempsts = 0;
- mod->irq_brdysts = 0;
usbhs_irq_callback_update(priv, mod);
-usbhsg_try_start_unlock:
- usbhsg_unlock(lock, &flags);
- /******************** spin unlock ********************/
-
return 0;
}
@@ -1124,31 +694,33 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
struct device *dev = usbhs_priv_to_dev(priv);
- spinlock_t *lock;
unsigned long flags;
+ int ret = 0;
/******************** spin lock ********************/
- lock = usbhsg_trylock(gpriv, &flags);
+ usbhs_lock(priv, flags);
- /*
- * disable interrupt and systems if 1st try
- */
usbhsg_status_clr(gpriv, status);
if (!usbhsg_status_has(gpriv, USBHSG_STATUS_STARTED) &&
!usbhsg_status_has(gpriv, USBHSG_STATUS_REGISTERD))
- goto usbhsg_try_stop_unlock;
+ ret = -1; /* already done */
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ********************/
+
+ if (ret < 0)
+ return 0; /* already done is not error */
+
+ /*
+ * disable interrupt and systems if 1st try
+ */
+ usbhs_fifo_quit(priv);
/* disable all irq */
mod->irq_dev_state = NULL;
mod->irq_ctrl_stage = NULL;
- mod->irq_empty = NULL;
- mod->irq_ready = NULL;
- mod->irq_bempsts = 0;
- mod->irq_brdysts = 0;
usbhs_irq_callback_update(priv, mod);
- usbhsg_dcp_disable(dcp);
-
gpriv->gadget.speed = USB_SPEED_UNKNOWN;
/* disable sys */
@@ -1156,8 +728,7 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
usbhs_sys_function_ctrl(priv, 0);
usbhs_sys_usb_ctrl(priv, 0);
- usbhsg_unlock(lock, &flags);
- /******************** spin unlock ********************/
+ usbhsg_pipe_disable(dcp);
if (gpriv->driver &&
gpriv->driver->disconnect)
@@ -1166,11 +737,6 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
dev_dbg(dev, "stop gadget\n");
return 0;
-
-usbhsg_try_stop_unlock:
- usbhsg_unlock(lock, &flags);
-
- return 0;
}
/*
@@ -1178,11 +744,10 @@ usbhsg_try_stop_unlock:
* linux usb function
*
*/
-struct usbhsg_gpriv *the_controller;
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
+static int usbhsg_gadget_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
- struct usbhsg_gpriv *gpriv = the_controller;
+ struct usbhsg_gpriv *gpriv;
struct usbhs_priv *priv;
struct device *dev;
int ret;
@@ -1192,10 +757,17 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
!driver->setup ||
driver->speed != USB_SPEED_HIGH)
return -EINVAL;
- if (!gpriv)
- return -ENODEV;
- if (gpriv->driver)
- return -EBUSY;
+
+ /*
+ * find unused controller
+ */
+ usbhsg_for_each_controller(gpriv) {
+ if (!gpriv->driver)
+ goto find_unused_controller;
+ }
+ return -ENODEV;
+
+find_unused_controller:
dev = usbhsg_gpriv_to_dev(gpriv);
priv = usbhsg_gpriv_to_priv(gpriv);
@@ -1229,22 +801,28 @@ add_fail:
return ret;
}
-EXPORT_SYMBOL(usb_gadget_probe_driver);
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+static int usbhsg_gadget_stop(struct usb_gadget_driver *driver)
{
- struct usbhsg_gpriv *gpriv = the_controller;
+ struct usbhsg_gpriv *gpriv;
struct usbhs_priv *priv;
- struct device *dev = usbhsg_gpriv_to_dev(gpriv);
-
- if (!gpriv)
- return -ENODEV;
+ struct device *dev;
if (!driver ||
- !driver->unbind ||
- driver != gpriv->driver)
+ !driver->unbind)
return -EINVAL;
+ /*
+ * find controller
+ */
+ usbhsg_for_each_controller(gpriv) {
+ if (gpriv->driver == driver)
+ goto find_matching_controller;
+ }
+ return -ENODEV;
+
+find_matching_controller:
+
dev = usbhsg_gpriv_to_dev(gpriv);
priv = usbhsg_gpriv_to_priv(gpriv);
@@ -1260,7 +838,6 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
return 0;
}
-EXPORT_SYMBOL(usb_gadget_unregister_driver);
/*
* usb gadget ops
@@ -1275,6 +852,8 @@ static int usbhsg_get_frame(struct usb_gadget *gadget)
static struct usb_gadget_ops usbhsg_gadget_ops = {
.get_frame = usbhsg_get_frame,
+ .start = usbhsg_gadget_start,
+ .stop = usbhsg_gadget_stop,
};
static int usbhsg_start(struct usbhs_priv *priv)
@@ -1294,6 +873,7 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
struct device *dev = usbhs_priv_to_dev(priv);
int pipe_size = usbhs_get_dparam(priv, pipe_size);
int i;
+ int ret;
gpriv = kzalloc(sizeof(struct usbhsg_gpriv), GFP_KERNEL);
if (!gpriv) {
@@ -1304,6 +884,7 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
uep = kzalloc(sizeof(struct usbhsg_uep) * pipe_size, GFP_KERNEL);
if (!uep) {
dev_err(dev, "Could not allocate ep\n");
+ ret = -ENOMEM;
goto usbhs_mod_gadget_probe_err_gpriv;
}
@@ -1350,7 +931,6 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
uep->ep.name = uep->ep_name;
uep->ep.ops = &usbhsg_ep_ops;
INIT_LIST_HEAD(&uep->ep.ep_list);
- INIT_LIST_HEAD(&uep->list);
/* init DCP */
if (usbhsg_is_dcp(uep)) {
@@ -1364,22 +944,33 @@ int __devinit usbhs_mod_gadget_probe(struct usbhs_priv *priv)
}
}
- the_controller = gpriv;
+ usbhsg_controller_register(gpriv);
+
+ ret = usb_add_gadget_udc(dev, &gpriv->gadget);
+ if (ret)
+ goto err_add_udc;
+
dev_info(dev, "gadget probed\n");
return 0;
+err_add_udc:
+ kfree(gpriv->uep);
usbhs_mod_gadget_probe_err_gpriv:
kfree(gpriv);
- return -ENOMEM;
+ return ret;
}
void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv)
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+ usb_del_gadget_udc(&gpriv->gadget);
+
+ usbhsg_controller_unregister(gpriv);
+
kfree(gpriv->uep);
kfree(gpriv);
}
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index bc4521c5426..1b14cae4570 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -15,7 +15,6 @@
*
*/
#include <linux/delay.h>
-#include <linux/io.h>
#include <linux/slab.h>
#include "./common.h"
#include "./pipe.h"
@@ -23,13 +22,8 @@
/*
* macros
*/
-#define usbhsp_priv_to_pipeinfo(pr) (&(pr)->pipe_info)
-#define usbhsp_pipe_to_priv(p) ((p)->priv)
-
#define usbhsp_addr_offset(p) ((usbhs_pipe_number(p) - 1) * 2)
-#define usbhsp_is_dcp(p) ((p)->priv->pipe_info.pipe == (p))
-
#define usbhsp_flags_set(p, f) ((p)->flags |= USBHS_PIPE_FLAGS_##f)
#define usbhsp_flags_clr(p, f) ((p)->flags &= ~USBHS_PIPE_FLAGS_##f)
#define usbhsp_flags_has(p, f) ((p)->flags & USBHS_PIPE_FLAGS_##f)
@@ -77,10 +71,10 @@ void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
*/
static void usbhsp_pipectrl_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int offset = usbhsp_addr_offset(pipe);
- if (usbhsp_is_dcp(pipe))
+ if (usbhs_pipe_is_dcp(pipe))
usbhs_bset(priv, DCPCTR, mask, val);
else
usbhs_bset(priv, PIPEnCTR + offset, mask, val);
@@ -88,10 +82,10 @@ static void usbhsp_pipectrl_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
static u16 usbhsp_pipectrl_get(struct usbhs_pipe *pipe)
{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int offset = usbhsp_addr_offset(pipe);
- if (usbhsp_is_dcp(pipe))
+ if (usbhs_pipe_is_dcp(pipe))
return usbhs_read(priv, DCPCTR);
else
return usbhs_read(priv, PIPEnCTR + offset);
@@ -104,9 +98,9 @@ static void __usbhsp_pipe_xxx_set(struct usbhs_pipe *pipe,
u16 dcp_reg, u16 pipe_reg,
u16 mask, u16 val)
{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
- if (usbhsp_is_dcp(pipe))
+ if (usbhs_pipe_is_dcp(pipe))
usbhs_bset(priv, dcp_reg, mask, val);
else
usbhs_bset(priv, pipe_reg, mask, val);
@@ -115,9 +109,9 @@ static void __usbhsp_pipe_xxx_set(struct usbhs_pipe *pipe,
static u16 __usbhsp_pipe_xxx_get(struct usbhs_pipe *pipe,
u16 dcp_reg, u16 pipe_reg)
{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
- if (usbhsp_is_dcp(pipe))
+ if (usbhs_pipe_is_dcp(pipe))
return usbhs_read(priv, dcp_reg);
else
return usbhs_read(priv, pipe_reg);
@@ -136,7 +130,7 @@ static void usbhsp_pipe_cfg_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
*/
static void usbhsp_pipe_buf_set(struct usbhs_pipe *pipe, u16 mask, u16 val)
{
- if (usbhsp_is_dcp(pipe))
+ if (usbhs_pipe_is_dcp(pipe))
return;
__usbhsp_pipe_xxx_set(pipe, 0, PIPEBUF, mask, val);
@@ -160,7 +154,7 @@ static u16 usbhsp_pipe_maxp_get(struct usbhs_pipe *pipe)
*/
static void usbhsp_pipe_select(struct usbhs_pipe *pipe)
{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
/*
* On pipe, this is necessary before
@@ -182,7 +176,7 @@ static void usbhsp_pipe_select(struct usbhs_pipe *pipe)
static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe)
{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int timeout = 1024;
u16 val;
@@ -205,7 +199,7 @@ static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe)
* - "Pipe Control Registers Switching Procedure"
*/
usbhs_write(priv, CFIFOSEL, 0);
- usbhs_fifo_disable(pipe);
+ usbhs_pipe_disable(pipe);
do {
val = usbhsp_pipectrl_get(pipe);
@@ -220,7 +214,7 @@ static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe)
return -EBUSY;
}
-static int usbhsp_pipe_is_accessible(struct usbhs_pipe *pipe)
+int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe)
{
u16 val;
@@ -253,7 +247,7 @@ static void __usbhsp_pid_try_nak_if_stall(struct usbhs_pipe *pipe)
}
}
-void usbhs_fifo_disable(struct usbhs_pipe *pipe)
+void usbhs_pipe_disable(struct usbhs_pipe *pipe)
{
int timeout = 1024;
u16 val;
@@ -273,7 +267,7 @@ void usbhs_fifo_disable(struct usbhs_pipe *pipe)
} while (timeout--);
}
-void usbhs_fifo_enable(struct usbhs_pipe *pipe)
+void usbhs_pipe_enable(struct usbhs_pipe *pipe)
{
/* see "Pipe n Control Register" - "PID" */
__usbhsp_pid_try_nak_if_stall(pipe);
@@ -281,7 +275,7 @@ void usbhs_fifo_enable(struct usbhs_pipe *pipe)
usbhsp_pipectrl_set(pipe, PID_MASK, PID_BUF);
}
-void usbhs_fifo_stall(struct usbhs_pipe *pipe)
+void usbhs_pipe_stall(struct usbhs_pipe *pipe)
{
u16 pid = usbhsp_pipectrl_get(pipe);
@@ -302,191 +296,6 @@ void usbhs_fifo_stall(struct usbhs_pipe *pipe)
}
/*
- * CFIFO ctrl
- */
-void usbhs_fifo_send_terminator(struct usbhs_pipe *pipe)
-{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
-
- usbhs_bset(priv, CFIFOCTR, BVAL, BVAL);
-}
-
-static void usbhsp_fifo_clear(struct usbhs_pipe *pipe)
-{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
-
- usbhs_write(priv, CFIFOCTR, BCLR);
-}
-
-static int usbhsp_fifo_barrier(struct usbhs_priv *priv)
-{
- int timeout = 1024;
-
- do {
- /* The FIFO port is accessible */
- if (usbhs_read(priv, CFIFOCTR) & FRDY)
- return 0;
-
- udelay(10);
- } while (timeout--);
-
- return -EBUSY;
-}
-
-static int usbhsp_fifo_rcv_len(struct usbhs_priv *priv)
-{
- return usbhs_read(priv, CFIFOCTR) & DTLN_MASK;
-}
-
-static int usbhsp_fifo_select(struct usbhs_pipe *pipe, int write)
-{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
- struct device *dev = usbhs_priv_to_dev(priv);
- int timeout = 1024;
- u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */
- u16 base = usbhs_pipe_number(pipe); /* CURPIPE */
-
- if (usbhsp_is_dcp(pipe))
- base |= (1 == write) << 5; /* ISEL */
-
- /* "base" will be used below */
- usbhs_write(priv, CFIFOSEL, base | MBW_32);
-
- /* check ISEL and CURPIPE value */
- while (timeout--) {
- if (base == (mask & usbhs_read(priv, CFIFOSEL)))
- return 0;
- udelay(10);
- }
-
- dev_err(dev, "fifo select error\n");
-
- return -EIO;
-}
-
-int usbhs_fifo_prepare_write(struct usbhs_pipe *pipe)
-{
- return usbhsp_fifo_select(pipe, 1);
-}
-
-int usbhs_fifo_write(struct usbhs_pipe *pipe, u8 *buf, int len)
-{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
- void __iomem *addr = priv->base + CFIFO;
- int maxp = usbhs_pipe_get_maxpacket(pipe);
- int total_len;
- int i, ret;
-
- ret = usbhsp_pipe_is_accessible(pipe);
- if (ret < 0)
- return ret;
-
- ret = usbhsp_fifo_select(pipe, 1);
- if (ret < 0)
- return ret;
-
- ret = usbhsp_fifo_barrier(priv);
- if (ret < 0)
- return ret;
-
- len = min(len, maxp);
- total_len = len;
-
- /*
- * FIXME
- *
- * 32-bit access only
- */
- if (len >= 4 &&
- !((unsigned long)buf & 0x03)) {
- iowrite32_rep(addr, buf, len / 4);
- len %= 4;
- buf += total_len - len;
- }
-
- /* the rest operation */
- for (i = 0; i < len; i++)
- iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
-
- if (total_len < maxp)
- usbhs_fifo_send_terminator(pipe);
-
- return total_len;
-}
-
-int usbhs_fifo_prepare_read(struct usbhs_pipe *pipe)
-{
- int ret;
-
- /*
- * select pipe and enable it to prepare packet receive
- */
- ret = usbhsp_fifo_select(pipe, 0);
- if (ret < 0)
- return ret;
-
- usbhs_fifo_enable(pipe);
-
- return ret;
-}
-
-int usbhs_fifo_read(struct usbhs_pipe *pipe, u8 *buf, int len)
-{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
- void __iomem *addr = priv->base + CFIFO;
- int rcv_len;
- int i, ret;
- int total_len;
- u32 data = 0;
-
- ret = usbhsp_fifo_select(pipe, 0);
- if (ret < 0)
- return ret;
-
- ret = usbhsp_fifo_barrier(priv);
- if (ret < 0)
- return ret;
-
- rcv_len = usbhsp_fifo_rcv_len(priv);
-
- /*
- * Buffer clear if Zero-Length packet
- *
- * see
- * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
- */
- if (0 == rcv_len) {
- usbhsp_fifo_clear(pipe);
- return 0;
- }
-
- len = min(rcv_len, len);
- total_len = len;
-
- /*
- * FIXME
- *
- * 32-bit access only
- */
- if (len >= 4 &&
- !((unsigned long)buf & 0x03)) {
- ioread32_rep(addr, buf, len / 4);
- len %= 4;
- buf += rcv_len - len;
- }
-
- /* the rest operation */
- for (i = 0; i < len; i++) {
- if (!(i & 0x03))
- data = ioread32(addr);
-
- buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
- }
-
- return total_len;
-}
-
-/*
* pipe setup
*/
static int usbhsp_possible_double_buffer(struct usbhs_pipe *pipe)
@@ -519,7 +328,7 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe,
};
int is_double = usbhsp_possible_double_buffer(pipe);
- if (usbhsp_is_dcp(pipe))
+ if (usbhs_pipe_is_dcp(pipe))
return -EINVAL;
/*
@@ -550,12 +359,15 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe,
/* DIR */
if (usb_endpoint_dir_in(desc))
- usbhsp_flags_set(pipe, IS_DIR_IN);
+ usbhsp_flags_set(pipe, IS_DIR_HOST);
if ((is_host && usb_endpoint_dir_out(desc)) ||
(!is_host && usb_endpoint_dir_in(desc)))
dir |= DIR_OUT;
+ if (!dir)
+ usbhsp_flags_set(pipe, IS_DIR_IN);
+
/* SHTNAK */
if (usbhsp_type_is(pipe, USB_ENDPOINT_XFER_BULK) &&
!dir)
@@ -587,8 +399,8 @@ static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe,
const struct usb_endpoint_descriptor *desc,
int is_host)
{
- struct usbhs_priv *priv = usbhsp_pipe_to_priv(pipe);
- struct usbhs_pipe_info *info = usbhsp_priv_to_pipeinfo(priv);
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
struct device *dev = usbhs_priv_to_dev(priv);
int pipe_num = usbhs_pipe_number(pipe);
int is_double = usbhsp_possible_double_buffer(pipe);
@@ -666,7 +478,7 @@ static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe,
*/
int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe)
{
- u16 mask = usbhsp_is_dcp(pipe) ? DCP_MAXP_MASK : PIPE_MAXP_MASK;
+ u16 mask = usbhs_pipe_is_dcp(pipe) ? DCP_MAXP_MASK : PIPE_MAXP_MASK;
usbhsp_pipe_select(pipe);
@@ -678,11 +490,22 @@ int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe)
return usbhsp_flags_has(pipe, IS_DIR_IN);
}
+int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
+{
+ return usbhsp_flags_has(pipe, IS_DIR_HOST);
+}
+
void usbhs_pipe_clear_sequence(struct usbhs_pipe *pipe)
{
usbhsp_pipectrl_set(pipe, SQCLR, SQCLR);
}
+void usbhs_pipe_clear(struct usbhs_pipe *pipe)
+{
+ usbhsp_pipectrl_set(pipe, ACLRM, ACLRM);
+ usbhsp_pipectrl_set(pipe, ACLRM, 0);
+}
+
static struct usbhs_pipe *usbhsp_get_pipe(struct usbhs_priv *priv, u32 type)
{
struct usbhs_pipe *pos, *pipe;
@@ -714,12 +537,20 @@ static struct usbhs_pipe *usbhsp_get_pipe(struct usbhs_priv *priv, u32 type)
return pipe;
}
-void usbhs_pipe_init(struct usbhs_priv *priv)
+void usbhs_pipe_init(struct usbhs_priv *priv,
+ void (*done)(struct usbhs_pkt *pkt),
+ int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map))
{
- struct usbhs_pipe_info *info = usbhsp_priv_to_pipeinfo(priv);
+ struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
+ struct device *dev = usbhs_priv_to_dev(priv);
struct usbhs_pipe *pipe;
int i;
+ if (!done) {
+ dev_err(dev, "no done function\n");
+ return;
+ }
+
/*
* FIXME
*
@@ -738,10 +569,16 @@ void usbhs_pipe_init(struct usbhs_priv *priv)
info->bufnmb_last++;
usbhsp_flags_init(pipe);
+ pipe->fifo = NULL;
pipe->mod_private = NULL;
+ INIT_LIST_HEAD(&pipe->list);
- usbhsp_fifo_clear(pipe);
+ /* pipe force init */
+ usbhs_pipe_clear(pipe);
}
+
+ info->done = done;
+ info->dma_map_ctrl = dma_map_ctrl;
}
struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
@@ -761,7 +598,9 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
return NULL;
}
- usbhs_fifo_disable(pipe);
+ INIT_LIST_HEAD(&pipe->list);
+
+ usbhs_pipe_disable(pipe);
/* make sure pipe is not busy */
ret = usbhsp_pipe_barrier(pipe);
@@ -774,11 +613,6 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
pipebuf = usbhsp_setup_pipebuff(pipe, desc, is_host);
pipemaxp = usbhsp_setup_pipemaxp(pipe, desc, is_host);
- /* buffer clear
- * see PIPECFG :: BFRE */
- usbhsp_pipectrl_set(pipe, ACLRM, ACLRM);
- usbhsp_pipectrl_set(pipe, ACLRM, 0);
-
usbhsp_pipe_select(pipe);
usbhsp_pipe_cfg_set(pipe, 0xFFFF, pipecfg);
usbhsp_pipe_buf_set(pipe, 0xFFFF, pipebuf);
@@ -794,6 +628,18 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
return pipe;
}
+void usbhs_pipe_select_fifo(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo)
+{
+ if (pipe->fifo)
+ pipe->fifo->pipe = NULL;
+
+ pipe->fifo = fifo;
+
+ if (fifo)
+ fifo->pipe = pipe;
+}
+
+
/*
* dcp control
*/
@@ -813,25 +659,25 @@ struct usbhs_pipe *usbhs_dcp_malloc(struct usbhs_priv *priv)
usbhsp_pipe_select(pipe);
usbhs_pipe_clear_sequence(pipe);
+ INIT_LIST_HEAD(&pipe->list);
return pipe;
}
void usbhs_dcp_control_transfer_done(struct usbhs_pipe *pipe)
{
- WARN_ON(!usbhsp_is_dcp(pipe));
+ WARN_ON(!usbhs_pipe_is_dcp(pipe));
- usbhs_fifo_enable(pipe);
+ usbhs_pipe_enable(pipe);
usbhsp_pipectrl_set(pipe, CCPL, CCPL);
}
-
/*
* pipe module function
*/
int usbhs_pipe_probe(struct usbhs_priv *priv)
{
- struct usbhs_pipe_info *info = usbhsp_priv_to_pipeinfo(priv);
+ struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
struct usbhs_pipe *pipe;
struct device *dev = usbhs_priv_to_dev(priv);
u32 *pipe_type = usbhs_get_dparam(priv, pipe_type);
@@ -868,7 +714,7 @@ int usbhs_pipe_probe(struct usbhs_priv *priv)
void usbhs_pipe_remove(struct usbhs_priv *priv)
{
- struct usbhs_pipe_info *info = usbhsp_priv_to_pipeinfo(priv);
+ struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
kfree(info->pipe);
}
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 1cca9b7fb26..41534cb0e73 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -18,6 +18,7 @@
#define RENESAS_USB_PIPE_H
#include "./common.h"
+#include "./fifo.h"
/*
* struct
@@ -26,10 +27,13 @@ struct usbhs_pipe {
u32 pipe_type; /* USB_ENDPOINT_XFER_xxx */
struct usbhs_priv *priv;
+ struct usbhs_fifo *fifo;
+ struct list_head list;
u32 flags;
#define USBHS_PIPE_FLAGS_IS_USED (1 << 0)
#define USBHS_PIPE_FLAGS_IS_DIR_IN (1 << 1)
+#define USBHS_PIPE_FLAGS_IS_DIR_HOST (1 << 2)
void *mod_private;
};
@@ -38,6 +42,9 @@ struct usbhs_pipe_info {
struct usbhs_pipe *pipe;
int size; /* array size of "pipe" */
int bufnmb_last; /* FIXME : driver needs good allocator */
+
+ void (*done)(struct usbhs_pkt *pkt);
+ int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map);
};
/*
@@ -55,25 +62,9 @@ struct usbhs_pipe_info {
__usbhs_for_each_pipe(0, pos, &((priv)->pipe_info), i)
/*
- * pipe module probe / remove
+ * data
*/
-int usbhs_pipe_probe(struct usbhs_priv *priv);
-void usbhs_pipe_remove(struct usbhs_priv *priv);
-
-/*
- * cfifo
- */
-int usbhs_fifo_write(struct usbhs_pipe *pipe, u8 *buf, int len);
-int usbhs_fifo_read(struct usbhs_pipe *pipe, u8 *buf, int len);
-int usbhs_fifo_prepare_write(struct usbhs_pipe *pipe);
-int usbhs_fifo_prepare_read(struct usbhs_pipe *pipe);
-
-void usbhs_fifo_enable(struct usbhs_pipe *pipe);
-void usbhs_fifo_disable(struct usbhs_pipe *pipe);
-void usbhs_fifo_stall(struct usbhs_pipe *pipe);
-
-void usbhs_fifo_send_terminator(struct usbhs_pipe *pipe);
-
+#define usbhs_priv_to_pipeinfo(pr) (&(pr)->pipe_info)
/*
* usb request
@@ -87,13 +78,27 @@ void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req);
struct usbhs_pipe
*usbhs_pipe_malloc(struct usbhs_priv *priv,
const struct usb_endpoint_descriptor *desc);
-
+int usbhs_pipe_probe(struct usbhs_priv *priv);
+void usbhs_pipe_remove(struct usbhs_priv *priv);
int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe);
-void usbhs_pipe_init(struct usbhs_priv *priv);
+int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe);
+void usbhs_pipe_init(struct usbhs_priv *priv,
+ void (*done)(struct usbhs_pkt *pkt),
+ int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map));
int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe);
void usbhs_pipe_clear_sequence(struct usbhs_pipe *pipe);
-
+void usbhs_pipe_clear(struct usbhs_pipe *pipe);
+int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
+void usbhs_pipe_enable(struct usbhs_pipe *pipe);
+void usbhs_pipe_disable(struct usbhs_pipe *pipe);
+void usbhs_pipe_stall(struct usbhs_pipe *pipe);
+void usbhs_pipe_select_fifo(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo);
+
+#define usbhs_pipe_to_priv(p) ((p)->priv)
#define usbhs_pipe_number(p) (int)((p) - (p)->priv->pipe_info.pipe)
+#define usbhs_pipe_is_dcp(p) ((p)->priv->pipe_info.pipe == (p))
+#define usbhs_pipe_to_fifo(p) ((p)->fifo)
+#define usbhs_pipe_is_busy(p) usbhs_pipe_to_fifo(p)
/*
* dcp control
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 16272897755..78a2cf9551c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -151,6 +151,7 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
* /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
*/
static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
@@ -179,6 +180,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_232RL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_8U2232C_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_4232H_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_232H_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) },
@@ -848,7 +850,8 @@ static const char *ftdi_chip_name[] = {
[FT2232C] = "FT2232C",
[FT232RL] = "FT232RL",
[FT2232H] = "FT2232H",
- [FT4232H] = "FT4232H"
+ [FT4232H] = "FT4232H",
+ [FT232H] = "FT232H"
};
@@ -1168,7 +1171,8 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
break;
case FT2232H: /* FT2232H chip */
case FT4232H: /* FT4232H chip */
- if ((baud <= 12000000) & (baud >= 1200)) {
+ case FT232H: /* FT232H chip */
+ if ((baud <= 12000000) && (baud >= 1200)) {
div_value = ftdi_2232h_baud_to_divisor(baud);
} else if (baud < 1200) {
div_value = ftdi_232bm_baud_to_divisor(baud);
@@ -1202,7 +1206,10 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
urb_index_value = get_ftdi_divisor(tty, port);
urb_value = (__u16)urb_index_value;
urb_index = (__u16)(urb_index_value >> 16);
- if (priv->interface) { /* FT2232C */
+ if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) ||
+ (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) {
+ /* Probably the BM type needs the MSB of the encoded fractional
+ * divider also moved like for the chips above. Any infos? */
urb_index = (__u16)((urb_index << 8) | priv->interface);
}
@@ -1429,9 +1436,12 @@ static void ftdi_determine_type(struct usb_serial_port *port)
} else if (version < 0x600) {
/* Assume it's an FT232BM (or FT245BM) */
priv->chip_type = FT232BM;
- } else {
- /* Assume it's an FT232R */
+ } else if (version < 0x900) {
+ /* Assume it's an FT232RL */
priv->chip_type = FT232RL;
+ } else {
+ /* Assume it's an FT232H */
+ priv->chip_type = FT232H;
}
dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
}
@@ -1559,7 +1569,8 @@ static int create_sysfs_attrs(struct usb_serial_port *port)
priv->chip_type == FT2232C ||
priv->chip_type == FT232RL ||
priv->chip_type == FT2232H ||
- priv->chip_type == FT4232H)) {
+ priv->chip_type == FT4232H ||
+ priv->chip_type == FT232H)) {
retval = device_create_file(&port->dev,
&dev_attr_latency_timer);
}
@@ -1580,7 +1591,8 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
priv->chip_type == FT2232C ||
priv->chip_type == FT232RL ||
priv->chip_type == FT2232H ||
- priv->chip_type == FT4232H) {
+ priv->chip_type == FT4232H ||
+ priv->chip_type == FT232H) {
device_remove_file(&port->dev, &dev_attr_latency_timer);
}
}
@@ -2212,6 +2224,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
case FT232RL:
case FT2232H:
case FT4232H:
+ case FT232H:
len = 2;
break;
default:
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 213fe3d6128..19584faa86f 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -156,7 +156,8 @@ enum ftdi_chip_type {
FT2232C = 4,
FT232RL = 5,
FT2232H = 6,
- FT4232H = 7
+ FT4232H = 7,
+ FT232H = 8
};
enum ftdi_sio_baudrate {
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index ab1fcdf3c37..bf5227ad3ef 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -22,6 +22,7 @@
#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
+#define FTDI_232H_PID 0x6014 /* Single channel hi-speed device */
#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
@@ -1158,4 +1159,8 @@
/* USB-Nano-485*/
#define FTDI_CTI_NANO_PID 0xF60B
-
+/*
+ * ZeitControl cardsystems GmbH rfid-readers http://zeitconrol.de
+ */
+/* TagTracer MIFARE*/
+#define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index b0a7a9e909a..1a49ca9c8ea 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -34,7 +34,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 60b25d8ea0e..81565619891 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -148,6 +148,10 @@ static void option_instat_callback(struct urb *urb);
#define HUAWEI_PRODUCT_K4505 0x1464
#define HUAWEI_PRODUCT_K3765 0x1465
#define HUAWEI_PRODUCT_E14AC 0x14AC
+#define HUAWEI_PRODUCT_K3770 0x14C9
+#define HUAWEI_PRODUCT_K3771 0x14CA
+#define HUAWEI_PRODUCT_K4510 0x14CB
+#define HUAWEI_PRODUCT_K4511 0x14CC
#define HUAWEI_PRODUCT_ETS1220 0x1803
#define HUAWEI_PRODUCT_E353 0x1506
@@ -547,6 +551,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 30461fcc220..1d33260de01 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -91,6 +91,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
+ { USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -342,10 +343,28 @@ static void pl2303_set_termios(struct tty_struct *tty,
baud = 6000000;
}
dbg("%s - baud set = %d", __func__, baud);
- buf[0] = baud & 0xff;
- buf[1] = (baud >> 8) & 0xff;
- buf[2] = (baud >> 16) & 0xff;
- buf[3] = (baud >> 24) & 0xff;
+ if (baud <= 115200) {
+ buf[0] = baud & 0xff;
+ buf[1] = (baud >> 8) & 0xff;
+ buf[2] = (baud >> 16) & 0xff;
+ buf[3] = (baud >> 24) & 0xff;
+ } else {
+ /* apparently the formula for higher speeds is:
+ * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
+ */
+ unsigned tmp = 12*1000*1000*32 / baud;
+ buf[3] = 0x80;
+ buf[2] = 0;
+ buf[1] = (tmp >= 256);
+ while (tmp >= 256) {
+ tmp >>= 2;
+ buf[1] <<= 1;
+ }
+ if (tmp > 256) {
+ tmp %= 256;
+ }
+ buf[0] = tmp;
+ }
}
/* For reference buf[4]=0 is 1 stop bits */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 1b025f75daf..ca0d237683b 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -144,3 +144,7 @@
/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
#define ADLINK_VENDOR_ID 0x0b63
#define ADLINK_ND6530_PRODUCT_ID 0x6530
+
+/* WinChipHead USB->RS 232 adapter */
+#define WINCHIPHEAD_VENDOR_ID 0x4348
+#define WINCHIPHEAD_USBSER_PRODUCT_ID 0x5523
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 54a9dab1f33..aeccc7f0a93 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -45,6 +45,7 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
{USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
{USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */
+ {USB_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
{USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */
{USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */
{USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */
@@ -78,6 +79,7 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
+ {USB_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
{USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */
{USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index c6d92a53008..ea8445689c8 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1745,6 +1745,7 @@ static int ti_download_firmware(struct ti_device *tdev)
}
if (fw_p->size > TI_FIRMWARE_BUF_SIZE) {
dev_err(&dev->dev, "%s - firmware too large %zu\n", __func__, fw_p->size);
+ release_firmware(fw_p);
return -ENOENT;
}
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 97987255be7..bedc4b9f2ac 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -40,6 +40,10 @@ config USB_STORAGE_REALTEK
If this driver is compiled as a module, it will be named ums-realtek.
+config REALTEK_AUTOPM
+ bool "Realtek Card Reader autosuspend support"
+ depends on USB_STORAGE_REALTEK && CONFIG_PM_RUNTIME
+ default y
config USB_STORAGE_DATAFAB
tristate "Datafab Compact Flash Reader support"
@@ -187,8 +191,8 @@ config USB_STORAGE_ENE_UB6250
depends on USB && SCSI
depends on USB_STORAGE
---help---
- Say Y here if you wish to control a ENE SD Card reader.
- To use SM/MS card, please build driver/staging/keucr/keucr.ko
+ Say Y here if you wish to control a ENE SD/MS Card reader.
+ To use SM card, please build driver/staging/keucr/keucr.ko
This option depends on 'SCSI' support being enabled, but you
probably also need 'SCSI device support: SCSI disk support'
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 31645afff5f..4dca3ef0668 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -100,6 +100,141 @@ static struct us_unusual_dev ene_ub6250_unusual_dev_list[] = {
#define FDIR_WRITE 0
#define FDIR_READ 1
+/* For MS Card */
+
+/* Status Register 1 */
+#define MS_REG_ST1_MB 0x80 /* media busy */
+#define MS_REG_ST1_FB1 0x40 /* flush busy 1 */
+#define MS_REG_ST1_DTER 0x20 /* error on data(corrected) */
+#define MS_REG_ST1_UCDT 0x10 /* unable to correct data */
+#define MS_REG_ST1_EXER 0x08 /* error on extra(corrected) */
+#define MS_REG_ST1_UCEX 0x04 /* unable to correct extra */
+#define MS_REG_ST1_FGER 0x02 /* error on overwrite flag(corrected) */
+#define MS_REG_ST1_UCFG 0x01 /* unable to correct overwrite flag */
+#define MS_REG_ST1_DEFAULT (MS_REG_ST1_MB | MS_REG_ST1_FB1 | MS_REG_ST1_DTER | MS_REG_ST1_UCDT | MS_REG_ST1_EXER | MS_REG_ST1_UCEX | MS_REG_ST1_FGER | MS_REG_ST1_UCFG)
+
+/* Overwrite Area */
+#define MS_REG_OVR_BKST 0x80 /* block status */
+#define MS_REG_OVR_BKST_OK MS_REG_OVR_BKST /* OK */
+#define MS_REG_OVR_BKST_NG 0x00 /* NG */
+#define MS_REG_OVR_PGST0 0x40 /* page status */
+#define MS_REG_OVR_PGST1 0x20
+#define MS_REG_OVR_PGST_MASK (MS_REG_OVR_PGST0 | MS_REG_OVR_PGST1)
+#define MS_REG_OVR_PGST_OK (MS_REG_OVR_PGST0 | MS_REG_OVR_PGST1) /* OK */
+#define MS_REG_OVR_PGST_NG MS_REG_OVR_PGST1 /* NG */
+#define MS_REG_OVR_PGST_DATA_ERROR 0x00 /* data error */
+#define MS_REG_OVR_UDST 0x10 /* update status */
+#define MS_REG_OVR_UDST_UPDATING 0x00 /* updating */
+#define MS_REG_OVR_UDST_NO_UPDATE MS_REG_OVR_UDST
+#define MS_REG_OVR_RESERVED 0x08
+#define MS_REG_OVR_DEFAULT (MS_REG_OVR_BKST_OK | MS_REG_OVR_PGST_OK | MS_REG_OVR_UDST_NO_UPDATE | MS_REG_OVR_RESERVED)
+
+/* Management Flag */
+#define MS_REG_MNG_SCMS0 0x20 /* serial copy management system */
+#define MS_REG_MNG_SCMS1 0x10
+#define MS_REG_MNG_SCMS_MASK (MS_REG_MNG_SCMS0 | MS_REG_MNG_SCMS1)
+#define MS_REG_MNG_SCMS_COPY_OK (MS_REG_MNG_SCMS0 | MS_REG_MNG_SCMS1)
+#define MS_REG_MNG_SCMS_ONE_COPY MS_REG_MNG_SCMS1
+#define MS_REG_MNG_SCMS_NO_COPY 0x00
+#define MS_REG_MNG_ATFLG 0x08 /* address transfer table flag */
+#define MS_REG_MNG_ATFLG_OTHER MS_REG_MNG_ATFLG /* other */
+#define MS_REG_MNG_ATFLG_ATTBL 0x00 /* address transfer table */
+#define MS_REG_MNG_SYSFLG 0x04 /* system flag */
+#define MS_REG_MNG_SYSFLG_USER MS_REG_MNG_SYSFLG /* user block */
+#define MS_REG_MNG_SYSFLG_BOOT 0x00 /* system block */
+#define MS_REG_MNG_RESERVED 0xc3
+#define MS_REG_MNG_DEFAULT (MS_REG_MNG_SCMS_COPY_OK | MS_REG_MNG_ATFLG_OTHER | MS_REG_MNG_SYSFLG_USER | MS_REG_MNG_RESERVED)
+
+
+#define MS_MAX_PAGES_PER_BLOCK 32
+#define MS_MAX_INITIAL_ERROR_BLOCKS 10
+#define MS_LIB_BITS_PER_BYTE 8
+
+#define MS_SYSINF_FORMAT_FAT 1
+#define MS_SYSINF_USAGE_GENERAL 0
+
+#define MS_SYSINF_MSCLASS_TYPE_1 1
+#define MS_SYSINF_PAGE_SIZE MS_BYTES_PER_PAGE /* fixed */
+
+#define MS_SYSINF_CARDTYPE_RDONLY 1
+#define MS_SYSINF_CARDTYPE_RDWR 2
+#define MS_SYSINF_CARDTYPE_HYBRID 3
+#define MS_SYSINF_SECURITY 0x01
+#define MS_SYSINF_SECURITY_NO_SUPPORT MS_SYSINF_SECURITY
+#define MS_SYSINF_SECURITY_SUPPORT 0
+
+#define MS_SYSINF_RESERVED1 1
+#define MS_SYSINF_RESERVED2 1
+
+#define MS_SYSENT_TYPE_INVALID_BLOCK 0x01
+#define MS_SYSENT_TYPE_CIS_IDI 0x0a /* CIS/IDI */
+
+#define SIZE_OF_KIRO 1024
+#define BYTE_MASK 0xff
+
+/* ms error code */
+#define MS_STATUS_WRITE_PROTECT 0x0106
+#define MS_STATUS_SUCCESS 0x0000
+#define MS_ERROR_FLASH_READ 0x8003
+#define MS_ERROR_FLASH_ERASE 0x8005
+#define MS_LB_ERROR 0xfff0
+#define MS_LB_BOOT_BLOCK 0xfff1
+#define MS_LB_INITIAL_ERROR 0xfff2
+#define MS_STATUS_SUCCESS_WITH_ECC 0xfff3
+#define MS_LB_ACQUIRED_ERROR 0xfff4
+#define MS_LB_NOT_USED_ERASED 0xfff5
+#define MS_NOCARD_ERROR 0xfff8
+#define MS_NO_MEMORY_ERROR 0xfff9
+#define MS_STATUS_INT_ERROR 0xfffa
+#define MS_STATUS_ERROR 0xfffe
+#define MS_LB_NOT_USED 0xffff
+
+#define MS_REG_MNG_SYSFLG 0x04 /* system flag */
+#define MS_REG_MNG_SYSFLG_USER MS_REG_MNG_SYSFLG /* user block */
+
+#define MS_BOOT_BLOCK_ID 0x0001
+#define MS_BOOT_BLOCK_FORMAT_VERSION 0x0100
+#define MS_BOOT_BLOCK_DATA_ENTRIES 2
+
+#define MS_NUMBER_OF_SYSTEM_ENTRY 4
+#define MS_NUMBER_OF_BOOT_BLOCK 2
+#define MS_BYTES_PER_PAGE 512
+#define MS_LOGICAL_BLOCKS_PER_SEGMENT 496
+#define MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT 494
+
+#define MS_PHYSICAL_BLOCKS_PER_SEGMENT 0x200 /* 512 */
+#define MS_PHYSICAL_BLOCKS_PER_SEGMENT_MASK 0x1ff
+
+/* overwrite area */
+#define MS_REG_OVR_BKST 0x80 /* block status */
+#define MS_REG_OVR_BKST_OK MS_REG_OVR_BKST /* OK */
+#define MS_REG_OVR_BKST_NG 0x00 /* NG */
+
+/* Status Register 1 */
+#define MS_REG_ST1_DTER 0x20 /* error on data(corrected) */
+#define MS_REG_ST1_EXER 0x08 /* error on extra(corrected) */
+#define MS_REG_ST1_FGER 0x02 /* error on overwrite flag(corrected) */
+
+/* MemoryStick Register */
+/* Status Register 0 */
+#define MS_REG_ST0_WP 0x01 /* write protected */
+#define MS_REG_ST0_WP_ON MS_REG_ST0_WP
+
+#define MS_LIB_CTRL_RDONLY 0
+#define MS_LIB_CTRL_WRPROTECT 1
+
+/*dphy->log table */
+#define ms_libconv_to_logical(pdx, PhyBlock) (((PhyBlock) >= (pdx)->MS_Lib.NumberOfPhyBlock) ? MS_STATUS_ERROR : (pdx)->MS_Lib.Phy2LogMap[PhyBlock])
+#define ms_libconv_to_physical(pdx, LogBlock) (((LogBlock) >= (pdx)->MS_Lib.NumberOfLogBlock) ? MS_STATUS_ERROR : (pdx)->MS_Lib.Log2PhyMap[LogBlock])
+
+#define ms_lib_ctrl_set(pdx, Flag) ((pdx)->MS_Lib.flags |= (1 << (Flag)))
+#define ms_lib_ctrl_reset(pdx, Flag) ((pdx)->MS_Lib.flags &= ~(1 << (Flag)))
+#define ms_lib_ctrl_check(pdx, Flag) ((pdx)->MS_Lib.flags & (1 << (Flag)))
+
+#define ms_lib_iswritable(pdx) ((ms_lib_ctrl_check((pdx), MS_LIB_CTRL_RDONLY) == 0) && (ms_lib_ctrl_check(pdx, MS_LIB_CTRL_WRPROTECT) == 0))
+#define ms_lib_clear_pagemap(pdx) memset((pdx)->MS_Lib.pagemap, 0, sizeof((pdx)->MS_Lib.pagemap))
+#define memstick_logaddr(logadr1, logadr0) ((((u16)(logadr1)) << 8) | (logadr0))
+
struct SD_STATUS {
u8 Insert:1;
@@ -132,6 +267,164 @@ struct SM_STATUS {
u8 IsMS:1;
};
+struct ms_bootblock_cis {
+ u8 bCistplDEVICE[6]; /* 0 */
+ u8 bCistplDEVICE0C[6]; /* 6 */
+ u8 bCistplJEDECC[4]; /* 12 */
+ u8 bCistplMANFID[6]; /* 16 */
+ u8 bCistplVER1[32]; /* 22 */
+ u8 bCistplFUNCID[4]; /* 54 */
+ u8 bCistplFUNCE0[4]; /* 58 */
+ u8 bCistplFUNCE1[5]; /* 62 */
+ u8 bCistplCONF[7]; /* 67 */
+ u8 bCistplCFTBLENT0[10];/* 74 */
+ u8 bCistplCFTBLENT1[8]; /* 84 */
+ u8 bCistplCFTBLENT2[12];/* 92 */
+ u8 bCistplCFTBLENT3[8]; /* 104 */
+ u8 bCistplCFTBLENT4[17];/* 112 */
+ u8 bCistplCFTBLENT5[8]; /* 129 */
+ u8 bCistplCFTBLENT6[17];/* 137 */
+ u8 bCistplCFTBLENT7[8]; /* 154 */
+ u8 bCistplNOLINK[3]; /* 162 */
+} ;
+
+struct ms_bootblock_idi {
+#define MS_IDI_GENERAL_CONF 0x848A
+ u16 wIDIgeneralConfiguration; /* 0 */
+ u16 wIDInumberOfCylinder; /* 1 */
+ u16 wIDIreserved0; /* 2 */
+ u16 wIDInumberOfHead; /* 3 */
+ u16 wIDIbytesPerTrack; /* 4 */
+ u16 wIDIbytesPerSector; /* 5 */
+ u16 wIDIsectorsPerTrack; /* 6 */
+ u16 wIDItotalSectors[2]; /* 7-8 high,low */
+ u16 wIDIreserved1[11]; /* 9-19 */
+ u16 wIDIbufferType; /* 20 */
+ u16 wIDIbufferSize; /* 21 */
+ u16 wIDIlongCmdECC; /* 22 */
+ u16 wIDIfirmVersion[4]; /* 23-26 */
+ u16 wIDImodelName[20]; /* 27-46 */
+ u16 wIDIreserved2; /* 47 */
+ u16 wIDIlongWordSupported; /* 48 */
+ u16 wIDIdmaSupported; /* 49 */
+ u16 wIDIreserved3; /* 50 */
+ u16 wIDIpioTiming; /* 51 */
+ u16 wIDIdmaTiming; /* 52 */
+ u16 wIDItransferParameter; /* 53 */
+ u16 wIDIformattedCylinder; /* 54 */
+ u16 wIDIformattedHead; /* 55 */
+ u16 wIDIformattedSectorsPerTrack;/* 56 */
+ u16 wIDIformattedTotalSectors[2];/* 57-58 */
+ u16 wIDImultiSector; /* 59 */
+ u16 wIDIlbaSectors[2]; /* 60-61 */
+ u16 wIDIsingleWordDMA; /* 62 */
+ u16 wIDImultiWordDMA; /* 63 */
+ u16 wIDIreserved4[192]; /* 64-255 */
+};
+
+struct ms_bootblock_sysent_rec {
+ u32 dwStart;
+ u32 dwSize;
+ u8 bType;
+ u8 bReserved[3];
+};
+
+struct ms_bootblock_sysent {
+ struct ms_bootblock_sysent_rec entry[MS_NUMBER_OF_SYSTEM_ENTRY];
+};
+
+struct ms_bootblock_sysinf {
+ u8 bMsClass; /* must be 1 */
+ u8 bCardType; /* see below */
+ u16 wBlockSize; /* n KB */
+ u16 wBlockNumber; /* number of physical block */
+ u16 wTotalBlockNumber; /* number of logical block */
+ u16 wPageSize; /* must be 0x200 */
+ u8 bExtraSize; /* 0x10 */
+ u8 bSecuritySupport;
+ u8 bAssemblyDate[8];
+ u8 bFactoryArea[4];
+ u8 bAssemblyMakerCode;
+ u8 bAssemblyMachineCode[3];
+ u16 wMemoryMakerCode;
+ u16 wMemoryDeviceCode;
+ u16 wMemorySize;
+ u8 bReserved1;
+ u8 bReserved2;
+ u8 bVCC;
+ u8 bVPP;
+ u16 wControllerChipNumber;
+ u16 wControllerFunction; /* New MS */
+ u8 bReserved3[9]; /* New MS */
+ u8 bParallelSupport; /* New MS */
+ u16 wFormatValue; /* New MS */
+ u8 bFormatType;
+ u8 bUsage;
+ u8 bDeviceType;
+ u8 bReserved4[22];
+ u8 bFUValue3;
+ u8 bFUValue4;
+ u8 bReserved5[15];
+};
+
+struct ms_bootblock_header {
+ u16 wBlockID;
+ u16 wFormatVersion;
+ u8 bReserved1[184];
+ u8 bNumberOfDataEntry;
+ u8 bReserved2[179];
+};
+
+struct ms_bootblock_page0 {
+ struct ms_bootblock_header header;
+ struct ms_bootblock_sysent sysent;
+ struct ms_bootblock_sysinf sysinf;
+};
+
+struct ms_bootblock_cis_idi {
+ union {
+ struct ms_bootblock_cis cis;
+ u8 dmy[256];
+ } cis;
+
+ union {
+ struct ms_bootblock_idi idi;
+ u8 dmy[256];
+ } idi;
+
+};
+
+/* ENE MS Lib struct */
+struct ms_lib_type_extdat {
+ u8 reserved;
+ u8 intr;
+ u8 status0;
+ u8 status1;
+ u8 ovrflg;
+ u8 mngflg;
+ u16 logadr;
+};
+
+struct ms_lib_ctrl {
+ u32 flags;
+ u32 BytesPerSector;
+ u32 NumberOfCylinder;
+ u32 SectorsPerCylinder;
+ u16 cardType; /* R/W, RO, Hybrid */
+ u16 blockSize;
+ u16 PagesPerBlock;
+ u16 NumberOfPhyBlock;
+ u16 NumberOfLogBlock;
+ u16 NumberOfSegment;
+ u16 *Phy2LogMap; /* phy2log table */
+ u16 *Log2PhyMap; /* log2phy table */
+ u16 wrtblk;
+ unsigned char *pagemap[(MS_MAX_PAGES_PER_BLOCK + (MS_LIB_BITS_PER_BYTE-1)) / MS_LIB_BITS_PER_BYTE];
+ unsigned char *blkpag;
+ struct ms_lib_type_extdat *blkext;
+ unsigned char copybuf[512];
+};
+
/* SD Block Length */
/* 2^9 = 512 Bytes, The HW maximum read/write data length */
@@ -162,7 +455,7 @@ struct ene_ub6250_info {
/*----- MS Control Data ---------------- */
bool MS_SWWP;
u32 MSP_TotalBlock;
- /*MS_LibControl MS_Lib;*/
+ struct ms_lib_ctrl MS_Lib;
bool MS_IsRWPage;
u16 MS_Model;
@@ -180,6 +473,7 @@ struct ene_ub6250_info {
};
static int ene_sd_init(struct us_data *us);
+static int ene_ms_init(struct us_data *us);
static int ene_load_bincode(struct us_data *us, unsigned char flag);
static void ene_ub6250_info_destructor(void *extra)
@@ -431,6 +725,1101 @@ static int sd_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
return result;
}
+/*
+ * ENE MS Card
+ */
+
+static int ms_lib_set_logicalpair(struct us_data *us, u16 logblk, u16 phyblk)
+{
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ if ((logblk >= info->MS_Lib.NumberOfLogBlock) || (phyblk >= info->MS_Lib.NumberOfPhyBlock))
+ return (u32)-1;
+
+ info->MS_Lib.Phy2LogMap[phyblk] = logblk;
+ info->MS_Lib.Log2PhyMap[logblk] = phyblk;
+
+ return 0;
+}
+
+static int ms_lib_set_logicalblockmark(struct us_data *us, u16 phyblk, u16 mark)
+{
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
+ return (u32)-1;
+
+ info->MS_Lib.Phy2LogMap[phyblk] = mark;
+
+ return 0;
+}
+
+static int ms_lib_set_initialerrorblock(struct us_data *us, u16 phyblk)
+{
+ return ms_lib_set_logicalblockmark(us, phyblk, MS_LB_INITIAL_ERROR);
+}
+
+static int ms_lib_set_bootblockmark(struct us_data *us, u16 phyblk)
+{
+ return ms_lib_set_logicalblockmark(us, phyblk, MS_LB_BOOT_BLOCK);
+}
+
+static int ms_lib_free_logicalmap(struct us_data *us)
+{
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ kfree(info->MS_Lib.Phy2LogMap);
+ info->MS_Lib.Phy2LogMap = NULL;
+
+ kfree(info->MS_Lib.Log2PhyMap);
+ info->MS_Lib.Log2PhyMap = NULL;
+
+ return 0;
+}
+
+int ms_lib_alloc_logicalmap(struct us_data *us)
+{
+ u32 i;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ info->MS_Lib.Phy2LogMap = kmalloc(info->MS_Lib.NumberOfPhyBlock * sizeof(u16), GFP_KERNEL);
+ info->MS_Lib.Log2PhyMap = kmalloc(info->MS_Lib.NumberOfLogBlock * sizeof(u16), GFP_KERNEL);
+
+ if ((info->MS_Lib.Phy2LogMap == NULL) || (info->MS_Lib.Log2PhyMap == NULL)) {
+ ms_lib_free_logicalmap(us);
+ return (u32)-1;
+ }
+
+ for (i = 0; i < info->MS_Lib.NumberOfPhyBlock; i++)
+ info->MS_Lib.Phy2LogMap[i] = MS_LB_NOT_USED;
+
+ for (i = 0; i < info->MS_Lib.NumberOfLogBlock; i++)
+ info->MS_Lib.Log2PhyMap[i] = MS_LB_NOT_USED;
+
+ return 0;
+}
+
+static void ms_lib_clear_writebuf(struct us_data *us)
+{
+ int i;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ info->MS_Lib.wrtblk = (u16)-1;
+ ms_lib_clear_pagemap(info);
+
+ if (info->MS_Lib.blkpag)
+ memset(info->MS_Lib.blkpag, 0xff, info->MS_Lib.PagesPerBlock * info->MS_Lib.BytesPerSector);
+
+ if (info->MS_Lib.blkext) {
+ for (i = 0; i < info->MS_Lib.PagesPerBlock; i++) {
+ info->MS_Lib.blkext[i].status1 = MS_REG_ST1_DEFAULT;
+ info->MS_Lib.blkext[i].ovrflg = MS_REG_OVR_DEFAULT;
+ info->MS_Lib.blkext[i].mngflg = MS_REG_MNG_DEFAULT;
+ info->MS_Lib.blkext[i].logadr = MS_LB_NOT_USED;
+ }
+ }
+}
+
+static int ms_count_freeblock(struct us_data *us, u16 PhyBlock)
+{
+ u32 Ende, Count;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ Ende = PhyBlock + MS_PHYSICAL_BLOCKS_PER_SEGMENT;
+ for (Count = 0; PhyBlock < Ende; PhyBlock++) {
+ switch (info->MS_Lib.Phy2LogMap[PhyBlock]) {
+ case MS_LB_NOT_USED:
+ case MS_LB_NOT_USED_ERASED:
+ Count++;
+ default:
+ break;
+ }
+ }
+
+ return Count;
+}
+
+static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
+ u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat)
+{
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ int result;
+ u8 ExtBuf[4];
+ u32 bn = PhyBlockAddr * 0x20 + PageNum;
+
+ /* printk(KERN_INFO "MS --- MS_ReaderReadPage,
+ PhyBlockAddr = %x, PageNum = %x\n", PhyBlockAddr, PageNum); */
+
+ result = ene_load_bincode(us, MS_RW_PATTERN);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ /* Read Page Data */
+ memset(bcb, 0, sizeof(struct bulk_cb_wrap));
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = 0x200;
+ bcb->Flags = 0x80;
+ bcb->CDB[0] = 0xF1;
+
+ bcb->CDB[1] = 0x02; /* in init.c ENE_MSInit() is 0x01 */
+
+ bcb->CDB[5] = (unsigned char)(bn);
+ bcb->CDB[4] = (unsigned char)(bn>>8);
+ bcb->CDB[3] = (unsigned char)(bn>>16);
+ bcb->CDB[2] = (unsigned char)(bn>>24);
+
+ result = ene_send_scsi_cmd(us, FDIR_READ, PageBuf, 0);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+
+ /* Read Extra Data */
+ memset(bcb, 0, sizeof(struct bulk_cb_wrap));
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = 0x4;
+ bcb->Flags = 0x80;
+ bcb->CDB[0] = 0xF1;
+ bcb->CDB[1] = 0x03;
+
+ bcb->CDB[5] = (unsigned char)(PageNum);
+ bcb->CDB[4] = (unsigned char)(PhyBlockAddr);
+ bcb->CDB[3] = (unsigned char)(PhyBlockAddr>>8);
+ bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16);
+ bcb->CDB[6] = 0x01;
+
+ result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ ExtraDat->reserved = 0;
+ ExtraDat->intr = 0x80; /* Not yet,fireware support */
+ ExtraDat->status0 = 0x10; /* Not yet,fireware support */
+
+ ExtraDat->status1 = 0x00; /* Not yet,fireware support */
+ ExtraDat->ovrflg = ExtBuf[0];
+ ExtraDat->mngflg = ExtBuf[1];
+ ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
+static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageData)
+{
+ struct ms_bootblock_sysent *SysEntry;
+ struct ms_bootblock_sysinf *SysInfo;
+ u32 i, result;
+ u8 PageNumber;
+ u8 *PageBuffer;
+ struct ms_lib_type_extdat ExtraData;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
+ if (PageBuffer == NULL)
+ return (u32)-1;
+
+ result = (u32)-1;
+
+ SysInfo = &(((struct ms_bootblock_page0 *)PageData)->sysinf);
+
+ if ((SysInfo->bMsClass != MS_SYSINF_MSCLASS_TYPE_1) ||
+ (be16_to_cpu(SysInfo->wPageSize) != MS_SYSINF_PAGE_SIZE) ||
+ ((SysInfo->bSecuritySupport & MS_SYSINF_SECURITY) == MS_SYSINF_SECURITY_SUPPORT) ||
+ (SysInfo->bReserved1 != MS_SYSINF_RESERVED1) ||
+ (SysInfo->bReserved2 != MS_SYSINF_RESERVED2) ||
+ (SysInfo->bFormatType != MS_SYSINF_FORMAT_FAT) ||
+ (SysInfo->bUsage != MS_SYSINF_USAGE_GENERAL))
+ goto exit;
+ /* */
+ switch (info->MS_Lib.cardType = SysInfo->bCardType) {
+ case MS_SYSINF_CARDTYPE_RDONLY:
+ ms_lib_ctrl_set(info, MS_LIB_CTRL_RDONLY);
+ break;
+ case MS_SYSINF_CARDTYPE_RDWR:
+ ms_lib_ctrl_reset(info, MS_LIB_CTRL_RDONLY);
+ break;
+ case MS_SYSINF_CARDTYPE_HYBRID:
+ default:
+ goto exit;
+ }
+
+ info->MS_Lib.blockSize = be16_to_cpu(SysInfo->wBlockSize);
+ info->MS_Lib.NumberOfPhyBlock = be16_to_cpu(SysInfo->wBlockNumber);
+ info->MS_Lib.NumberOfLogBlock = be16_to_cpu(SysInfo->wTotalBlockNumber)-2;
+ info->MS_Lib.PagesPerBlock = info->MS_Lib.blockSize * SIZE_OF_KIRO / MS_BYTES_PER_PAGE;
+ info->MS_Lib.NumberOfSegment = info->MS_Lib.NumberOfPhyBlock / MS_PHYSICAL_BLOCKS_PER_SEGMENT;
+ info->MS_Model = be16_to_cpu(SysInfo->wMemorySize);
+
+ /*Allocate to all number of logicalblock and physicalblock */
+ if (ms_lib_alloc_logicalmap(us))
+ goto exit;
+
+ /* Mark the book block */
+ ms_lib_set_bootblockmark(us, PhyBlock);
+
+ SysEntry = &(((struct ms_bootblock_page0 *)PageData)->sysent);
+
+ for (i = 0; i < MS_NUMBER_OF_SYSTEM_ENTRY; i++) {
+ u32 EntryOffset, EntrySize;
+
+ EntryOffset = be32_to_cpu(SysEntry->entry[i].dwStart);
+
+ if (EntryOffset == 0xffffff)
+ continue;
+ EntrySize = be32_to_cpu(SysEntry->entry[i].dwSize);
+
+ if (EntrySize == 0)
+ continue;
+
+ if (EntryOffset + MS_BYTES_PER_PAGE + EntrySize > info->MS_Lib.blockSize * (u32)SIZE_OF_KIRO)
+ continue;
+
+ if (i == 0) {
+ u8 PrevPageNumber = 0;
+ u16 phyblk;
+
+ if (SysEntry->entry[i].bType != MS_SYSENT_TYPE_INVALID_BLOCK)
+ goto exit;
+
+ while (EntrySize > 0) {
+
+ PageNumber = (u8)(EntryOffset / MS_BYTES_PER_PAGE + 1);
+ if (PageNumber != PrevPageNumber) {
+ switch (ms_read_readpage(us, PhyBlock, PageNumber, (u32 *)PageBuffer, &ExtraData)) {
+ case MS_STATUS_SUCCESS:
+ break;
+ case MS_STATUS_WRITE_PROTECT:
+ case MS_ERROR_FLASH_READ:
+ case MS_STATUS_ERROR:
+ default:
+ goto exit;
+ }
+
+ PrevPageNumber = PageNumber;
+ }
+
+ phyblk = be16_to_cpu(*(u16 *)(PageBuffer + (EntryOffset % MS_BYTES_PER_PAGE)));
+ if (phyblk < 0x0fff)
+ ms_lib_set_initialerrorblock(us, phyblk);
+
+ EntryOffset += 2;
+ EntrySize -= 2;
+ }
+ } else if (i == 1) { /* CIS/IDI */
+ struct ms_bootblock_idi *idi;
+
+ if (SysEntry->entry[i].bType != MS_SYSENT_TYPE_CIS_IDI)
+ goto exit;
+
+ switch (ms_read_readpage(us, PhyBlock, (u8)(EntryOffset / MS_BYTES_PER_PAGE + 1), (u32 *)PageBuffer, &ExtraData)) {
+ case MS_STATUS_SUCCESS:
+ break;
+ case MS_STATUS_WRITE_PROTECT:
+ case MS_ERROR_FLASH_READ:
+ case MS_STATUS_ERROR:
+ default:
+ goto exit;
+ }
+
+ idi = &((struct ms_bootblock_cis_idi *)(PageBuffer + (EntryOffset % MS_BYTES_PER_PAGE)))->idi.idi;
+ if (le16_to_cpu(idi->wIDIgeneralConfiguration) != MS_IDI_GENERAL_CONF)
+ goto exit;
+
+ info->MS_Lib.BytesPerSector = le16_to_cpu(idi->wIDIbytesPerSector);
+ if (info->MS_Lib.BytesPerSector != MS_BYTES_PER_PAGE)
+ goto exit;
+ }
+ } /* End for .. */
+
+ result = 0;
+
+exit:
+ if (result)
+ ms_lib_free_logicalmap(us);
+
+ kfree(PageBuffer);
+
+ result = 0;
+ return result;
+}
+
+static void ms_lib_free_writebuf(struct us_data *us)
+{
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+ info->MS_Lib.wrtblk = (u16)-1; /* set to -1 */
+
+ /* memset((fdoExt)->MS_Lib.pagemap, 0, sizeof((fdoExt)->MS_Lib.pagemap)) */
+
+ ms_lib_clear_pagemap(info); /* (pdx)->MS_Lib.pagemap memset 0 in ms.h */
+
+ if (info->MS_Lib.blkpag) {
+ kfree((u8 *)(info->MS_Lib.blkpag)); /* Arnold test ... */
+ info->MS_Lib.blkpag = NULL;
+ }
+
+ if (info->MS_Lib.blkext) {
+ kfree((u8 *)(info->MS_Lib.blkext)); /* Arnold test ... */
+ info->MS_Lib.blkext = NULL;
+ }
+}
+
+
+static void ms_lib_free_allocatedarea(struct us_data *us)
+{
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ ms_lib_free_writebuf(us); /* Free MS_Lib.pagemap */
+ ms_lib_free_logicalmap(us); /* kfree MS_Lib.Phy2LogMap and MS_Lib.Log2PhyMap */
+
+ /* set struct us point flag to 0 */
+ info->MS_Lib.flags = 0;
+ info->MS_Lib.BytesPerSector = 0;
+ info->MS_Lib.SectorsPerCylinder = 0;
+
+ info->MS_Lib.cardType = 0;
+ info->MS_Lib.blockSize = 0;
+ info->MS_Lib.PagesPerBlock = 0;
+
+ info->MS_Lib.NumberOfPhyBlock = 0;
+ info->MS_Lib.NumberOfLogBlock = 0;
+}
+
+
+static int ms_lib_alloc_writebuf(struct us_data *us)
+{
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ info->MS_Lib.wrtblk = (u16)-1;
+
+ info->MS_Lib.blkpag = kmalloc(info->MS_Lib.PagesPerBlock * info->MS_Lib.BytesPerSector, GFP_KERNEL);
+ info->MS_Lib.blkext = kmalloc(info->MS_Lib.PagesPerBlock * sizeof(struct ms_lib_type_extdat), GFP_KERNEL);
+
+ if ((info->MS_Lib.blkpag == NULL) || (info->MS_Lib.blkext == NULL)) {
+ ms_lib_free_writebuf(us);
+ return (u32)-1;
+ }
+
+ ms_lib_clear_writebuf(us);
+
+return 0;
+}
+
+static int ms_lib_force_setlogical_pair(struct us_data *us, u16 logblk, u16 phyblk)
+{
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ if (logblk == MS_LB_NOT_USED)
+ return 0;
+
+ if ((logblk >= info->MS_Lib.NumberOfLogBlock) ||
+ (phyblk >= info->MS_Lib.NumberOfPhyBlock))
+ return (u32)-1;
+
+ info->MS_Lib.Phy2LogMap[phyblk] = logblk;
+ info->MS_Lib.Log2PhyMap[logblk] = phyblk;
+
+ return 0;
+}
+
+static int ms_read_copyblock(struct us_data *us, u16 oldphy, u16 newphy,
+ u16 PhyBlockAddr, u8 PageNum, unsigned char *buf, u16 len)
+{
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ int result;
+
+ /* printk(KERN_INFO "MS_ReaderCopyBlock --- PhyBlockAddr = %x,
+ PageNum = %x\n", PhyBlockAddr, PageNum); */
+ result = ene_load_bincode(us, MS_RW_PATTERN);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ memset(bcb, 0, sizeof(struct bulk_cb_wrap));
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = 0x200*len;
+ bcb->Flags = 0x00;
+ bcb->CDB[0] = 0xF0;
+ bcb->CDB[1] = 0x08;
+ bcb->CDB[4] = (unsigned char)(oldphy);
+ bcb->CDB[3] = (unsigned char)(oldphy>>8);
+ bcb->CDB[2] = 0; /* (BYTE)(oldphy>>16) */
+ bcb->CDB[7] = (unsigned char)(newphy);
+ bcb->CDB[6] = (unsigned char)(newphy>>8);
+ bcb->CDB[5] = 0; /* (BYTE)(newphy>>16) */
+ bcb->CDB[9] = (unsigned char)(PhyBlockAddr);
+ bcb->CDB[8] = (unsigned char)(PhyBlockAddr>>8);
+ bcb->CDB[10] = PageNum;
+
+ result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
+static int ms_read_eraseblock(struct us_data *us, u32 PhyBlockAddr)
+{
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ int result;
+ u32 bn = PhyBlockAddr;
+
+ /* printk(KERN_INFO "MS --- ms_read_eraseblock,
+ PhyBlockAddr = %x\n", PhyBlockAddr); */
+ result = ene_load_bincode(us, MS_RW_PATTERN);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ memset(bcb, 0, sizeof(struct bulk_cb_wrap));
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = 0x200;
+ bcb->Flags = 0x80;
+ bcb->CDB[0] = 0xF2;
+ bcb->CDB[1] = 0x06;
+ bcb->CDB[4] = (unsigned char)(bn);
+ bcb->CDB[3] = (unsigned char)(bn>>8);
+ bcb->CDB[2] = (unsigned char)(bn>>16);
+
+ result = ene_send_scsi_cmd(us, FDIR_READ, NULL, 0);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
+static int ms_lib_check_disableblock(struct us_data *us, u16 PhyBlock)
+{
+ unsigned char *PageBuf = NULL;
+ u16 result = MS_STATUS_SUCCESS;
+ u16 blk, index = 0;
+ struct ms_lib_type_extdat extdat;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ PageBuf = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
+ if (PageBuf == NULL) {
+ result = MS_NO_MEMORY_ERROR;
+ goto exit;
+ }
+
+ ms_read_readpage(us, PhyBlock, 1, (u32 *)PageBuf, &extdat);
+ do {
+ blk = be16_to_cpu(PageBuf[index]);
+ if (blk == MS_LB_NOT_USED)
+ break;
+ if (blk == info->MS_Lib.Log2PhyMap[0]) {
+ result = MS_ERROR_FLASH_READ;
+ break;
+ }
+ index++;
+ } while (1);
+
+exit:
+ kfree(PageBuf);
+ return result;
+}
+
+static int ms_lib_setacquired_errorblock(struct us_data *us, u16 phyblk)
+{
+ u16 log;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
+ return (u32)-1;
+
+ log = info->MS_Lib.Phy2LogMap[phyblk];
+
+ if (log < info->MS_Lib.NumberOfLogBlock)
+ info->MS_Lib.Log2PhyMap[log] = MS_LB_NOT_USED;
+
+ if (info->MS_Lib.Phy2LogMap[phyblk] != MS_LB_INITIAL_ERROR)
+ info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_ACQUIRED_ERROR;
+
+ return 0;
+}
+
+static int ms_lib_overwrite_extra(struct us_data *us, u32 PhyBlockAddr,
+ u8 PageNum, u8 OverwriteFlag)
+{
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ int result;
+
+ /* printk("MS --- MS_LibOverwriteExtra,
+ PhyBlockAddr = %x, PageNum = %x\n", PhyBlockAddr, PageNum); */
+ result = ene_load_bincode(us, MS_RW_PATTERN);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ memset(bcb, 0, sizeof(struct bulk_cb_wrap));
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = 0x4;
+ bcb->Flags = 0x80;
+ bcb->CDB[0] = 0xF2;
+ bcb->CDB[1] = 0x05;
+ bcb->CDB[5] = (unsigned char)(PageNum);
+ bcb->CDB[4] = (unsigned char)(PhyBlockAddr);
+ bcb->CDB[3] = (unsigned char)(PhyBlockAddr>>8);
+ bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16);
+ bcb->CDB[6] = OverwriteFlag;
+ bcb->CDB[7] = 0xFF;
+ bcb->CDB[8] = 0xFF;
+ bcb->CDB[9] = 0xFF;
+
+ result = ene_send_scsi_cmd(us, FDIR_READ, NULL, 0);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
+static int ms_lib_error_phyblock(struct us_data *us, u16 phyblk)
+{
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
+ return MS_STATUS_ERROR;
+
+ ms_lib_setacquired_errorblock(us, phyblk);
+
+ if (ms_lib_iswritable(info))
+ return ms_lib_overwrite_extra(us, phyblk, 0, (u8)(~MS_REG_OVR_BKST & BYTE_MASK));
+
+ return MS_STATUS_SUCCESS;
+}
+
+static int ms_lib_erase_phyblock(struct us_data *us, u16 phyblk)
+{
+ u16 log;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
+ return MS_STATUS_ERROR;
+
+ log = info->MS_Lib.Phy2LogMap[phyblk];
+
+ if (log < info->MS_Lib.NumberOfLogBlock)
+ info->MS_Lib.Log2PhyMap[log] = MS_LB_NOT_USED;
+
+ info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_NOT_USED;
+
+ if (ms_lib_iswritable(info)) {
+ switch (ms_read_eraseblock(us, phyblk)) {
+ case MS_STATUS_SUCCESS:
+ info->MS_Lib.Phy2LogMap[phyblk] = MS_LB_NOT_USED_ERASED;
+ return MS_STATUS_SUCCESS;
+ case MS_ERROR_FLASH_ERASE:
+ case MS_STATUS_INT_ERROR:
+ ms_lib_error_phyblock(us, phyblk);
+ return MS_ERROR_FLASH_ERASE;
+ case MS_STATUS_ERROR:
+ default:
+ ms_lib_ctrl_set(info, MS_LIB_CTRL_RDONLY); /* MS_LibCtrlSet will used by ENE_MSInit ,need check, and why us to info*/
+ ms_lib_setacquired_errorblock(us, phyblk);
+ return MS_STATUS_ERROR;
+ }
+ }
+
+ ms_lib_setacquired_errorblock(us, phyblk);
+
+ return MS_STATUS_SUCCESS;
+}
+
+static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
+ u8 PageNum, struct ms_lib_type_extdat *ExtraDat)
+{
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ int result;
+ u8 ExtBuf[4];
+
+ /* printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); */
+ memset(bcb, 0, sizeof(struct bulk_cb_wrap));
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = 0x4;
+ bcb->Flags = 0x80;
+ bcb->CDB[0] = 0xF1;
+ bcb->CDB[1] = 0x03;
+ bcb->CDB[5] = (unsigned char)(PageNum);
+ bcb->CDB[4] = (unsigned char)(PhyBlock);
+ bcb->CDB[3] = (unsigned char)(PhyBlock>>8);
+ bcb->CDB[2] = (unsigned char)(PhyBlock>>16);
+ bcb->CDB[6] = 0x01;
+
+ result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ ExtraDat->reserved = 0;
+ ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */
+ ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */
+ ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */
+ ExtraDat->ovrflg = ExtBuf[0];
+ ExtraDat->mngflg = ExtBuf[1];
+ ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
+static int ms_libsearch_block_from_physical(struct us_data *us, u16 phyblk)
+{
+ u16 Newblk;
+ u16 blk;
+ struct ms_lib_type_extdat extdat; /* need check */
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+
+ if (phyblk >= info->MS_Lib.NumberOfPhyBlock)
+ return MS_LB_ERROR;
+
+ for (blk = phyblk + 1; blk != phyblk; blk++) {
+ if ((blk & MS_PHYSICAL_BLOCKS_PER_SEGMENT_MASK) == 0)
+ blk -= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
+
+ Newblk = info->MS_Lib.Phy2LogMap[blk];
+ if (info->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED_ERASED) {
+ return blk;
+ } else if (info->MS_Lib.Phy2LogMap[blk] == MS_LB_NOT_USED) {
+ switch (ms_lib_read_extra(us, blk, 0, &extdat)) {
+ case MS_STATUS_SUCCESS:
+ case MS_STATUS_SUCCESS_WITH_ECC:
+ break;
+ case MS_NOCARD_ERROR:
+ return MS_NOCARD_ERROR;
+ case MS_STATUS_INT_ERROR:
+ return MS_LB_ERROR;
+ case MS_ERROR_FLASH_READ:
+ default:
+ ms_lib_setacquired_errorblock(us, blk);
+ continue;
+ } /* End switch */
+
+ if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
+ ms_lib_setacquired_errorblock(us, blk);
+ continue;
+ }
+
+ switch (ms_lib_erase_phyblock(us, blk)) {
+ case MS_STATUS_SUCCESS:
+ return blk;
+ case MS_STATUS_ERROR:
+ return MS_LB_ERROR;
+ case MS_ERROR_FLASH_ERASE:
+ default:
+ ms_lib_error_phyblock(us, blk);
+ break;
+ }
+ }
+ } /* End for */
+
+ return MS_LB_ERROR;
+}
+static int ms_libsearch_block_from_logical(struct us_data *us, u16 logblk)
+{
+ u16 phyblk;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ phyblk = ms_libconv_to_physical(info, logblk);
+ if (phyblk >= MS_LB_ERROR) {
+ if (logblk >= info->MS_Lib.NumberOfLogBlock)
+ return MS_LB_ERROR;
+
+ phyblk = (logblk + MS_NUMBER_OF_BOOT_BLOCK) / MS_LOGICAL_BLOCKS_PER_SEGMENT;
+ phyblk *= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
+ phyblk += MS_PHYSICAL_BLOCKS_PER_SEGMENT - 1;
+ }
+
+ return ms_libsearch_block_from_physical(us, phyblk);
+}
+
+static int ms_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb)
+{
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
+
+ /* pr_info("MS_SCSI_Test_Unit_Ready\n"); */
+ if (info->MS_Status.Insert && info->MS_Status.Ready) {
+ return USB_STOR_TRANSPORT_GOOD;
+ } else {
+ ene_ms_init(us);
+ return USB_STOR_TRANSPORT_GOOD;
+ }
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
+static int ms_scsi_inquiry(struct us_data *us, struct scsi_cmnd *srb)
+{
+ /* pr_info("MS_SCSI_Inquiry\n"); */
+ unsigned char data_ptr[36] = {
+ 0x00, 0x80, 0x02, 0x00, 0x1F, 0x00, 0x00, 0x00, 0x55,
+ 0x53, 0x42, 0x32, 0x2E, 0x30, 0x20, 0x20, 0x43, 0x61,
+ 0x72, 0x64, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x31, 0x30, 0x30};
+
+ usb_stor_set_xfer_buf(data_ptr, 36, srb);
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
+static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
+{
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+ unsigned char mediaNoWP[12] = {
+ 0x0b, 0x00, 0x00, 0x08, 0x00, 0x00,
+ 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
+ unsigned char mediaWP[12] = {
+ 0x0b, 0x00, 0x80, 0x08, 0x00, 0x00,
+ 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 };
+
+ if (info->MS_Status.WtP)
+ usb_stor_set_xfer_buf(mediaWP, 12, srb);
+ else
+ usb_stor_set_xfer_buf(mediaNoWP, 12, srb);
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
+static int ms_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
+{
+ u32 bl_num;
+ u16 bl_len;
+ unsigned int offset = 0;
+ unsigned char buf[8];
+ struct scatterlist *sg = NULL;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ US_DEBUGP("ms_scsi_read_capacity\n");
+ bl_len = 0x200;
+ if (info->MS_Status.IsMSPro)
+ bl_num = info->MSP_TotalBlock - 1;
+ else
+ bl_num = info->MS_Lib.NumberOfLogBlock * info->MS_Lib.blockSize * 2 - 1;
+
+ info->bl_num = bl_num;
+ US_DEBUGP("bl_len = %x\n", bl_len);
+ US_DEBUGP("bl_num = %x\n", bl_num);
+
+ /*srb->request_bufflen = 8; */
+ buf[0] = (bl_num >> 24) & 0xff;
+ buf[1] = (bl_num >> 16) & 0xff;
+ buf[2] = (bl_num >> 8) & 0xff;
+ buf[3] = (bl_num >> 0) & 0xff;
+ buf[4] = (bl_len >> 24) & 0xff;
+ buf[5] = (bl_len >> 16) & 0xff;
+ buf[6] = (bl_len >> 8) & 0xff;
+ buf[7] = (bl_len >> 0) & 0xff;
+
+ usb_stor_access_xfer_buf(buf, 8, srb, &sg, &offset, TO_XFER_BUF);
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
+static void ms_lib_phy_to_log_range(u16 PhyBlock, u16 *LogStart, u16 *LogEnde)
+{
+ PhyBlock /= MS_PHYSICAL_BLOCKS_PER_SEGMENT;
+
+ if (PhyBlock) {
+ *LogStart = MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT + (PhyBlock - 1) * MS_LOGICAL_BLOCKS_PER_SEGMENT;/*496*/
+ *LogEnde = *LogStart + MS_LOGICAL_BLOCKS_PER_SEGMENT;/*496*/
+ } else {
+ *LogStart = 0;
+ *LogEnde = MS_LOGICAL_BLOCKS_IN_1ST_SEGMENT;/*494*/
+ }
+}
+
+static int ms_lib_read_extrablock(struct us_data *us, u32 PhyBlock,
+ u8 PageNum, u8 blen, void *buf)
+{
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ int result;
+
+ /* printk("MS_LibReadExtraBlock --- PhyBlock = %x,
+ PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen); */
+
+ /* Read Extra Data */
+ memset(bcb, 0, sizeof(struct bulk_cb_wrap));
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = 0x4 * blen;
+ bcb->Flags = 0x80;
+ bcb->CDB[0] = 0xF1;
+ bcb->CDB[1] = 0x03;
+ bcb->CDB[5] = (unsigned char)(PageNum);
+ bcb->CDB[4] = (unsigned char)(PhyBlock);
+ bcb->CDB[3] = (unsigned char)(PhyBlock>>8);
+ bcb->CDB[2] = (unsigned char)(PhyBlock>>16);
+ bcb->CDB[6] = blen;
+
+ result = ene_send_scsi_cmd(us, FDIR_READ, buf, 0);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
+static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st)
+{
+ u16 PhyBlock, newblk, i;
+ u16 LogStart, LogEnde;
+ struct ms_lib_type_extdat extdat;
+ u8 buf[0x200];
+ u32 count = 0, index = 0;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) {
+ ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde);
+
+ for (i = 0; i < MS_PHYSICAL_BLOCKS_PER_SEGMENT; i++, PhyBlock++) {
+ switch (ms_libconv_to_logical(info, PhyBlock)) {
+ case MS_STATUS_ERROR:
+ continue;
+ default:
+ break;
+ }
+
+ if (count == PhyBlock) {
+ ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf);
+ count += 0x80;
+ }
+ index = (PhyBlock % 0x80) * 4;
+
+ extdat.ovrflg = buf[index];
+ extdat.mngflg = buf[index+1];
+ extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]);
+
+ if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
+ ms_lib_setacquired_errorblock(us, PhyBlock);
+ continue;
+ }
+
+ if ((extdat.mngflg & MS_REG_MNG_ATFLG) == MS_REG_MNG_ATFLG_ATTBL) {
+ ms_lib_erase_phyblock(us, PhyBlock);
+ continue;
+ }
+
+ if (extdat.logadr != MS_LB_NOT_USED) {
+ if ((extdat.logadr < LogStart) || (LogEnde <= extdat.logadr)) {
+ ms_lib_erase_phyblock(us, PhyBlock);
+ continue;
+ }
+
+ newblk = ms_libconv_to_physical(info, extdat.logadr);
+
+ if (newblk != MS_LB_NOT_USED) {
+ if (extdat.logadr == 0) {
+ ms_lib_set_logicalpair(us, extdat.logadr, PhyBlock);
+ if (ms_lib_check_disableblock(us, btBlk1st)) {
+ ms_lib_set_logicalpair(us, extdat.logadr, newblk);
+ continue;
+ }
+ }
+
+ ms_lib_read_extra(us, newblk, 0, &extdat);
+ if ((extdat.ovrflg & MS_REG_OVR_UDST) == MS_REG_OVR_UDST_UPDATING) {
+ ms_lib_erase_phyblock(us, PhyBlock);
+ continue;
+ } else {
+ ms_lib_erase_phyblock(us, newblk);
+ }
+ }
+
+ ms_lib_set_logicalpair(us, extdat.logadr, PhyBlock);
+ }
+ }
+ } /* End for ... */
+
+ return MS_STATUS_SUCCESS;
+}
+
+
+static int ms_scsi_read(struct us_data *us, struct scsi_cmnd *srb)
+{
+ int result;
+ unsigned char *cdb = srb->cmnd;
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ u32 bn = ((cdb[2] << 24) & 0xff000000) | ((cdb[3] << 16) & 0x00ff0000) |
+ ((cdb[4] << 8) & 0x0000ff00) | ((cdb[5] << 0) & 0x000000ff);
+ u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff);
+ u32 blenByte = blen * 0x200;
+
+ if (bn > info->bl_num)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ if (info->MS_Status.IsMSPro) {
+ result = ene_load_bincode(us, MSP_RW_PATTERN);
+ if (result != USB_STOR_XFER_GOOD) {
+ US_DEBUGP("Load MPS RW pattern Fail !!\n");
+ return USB_STOR_TRANSPORT_ERROR;
+ }
+
+ /* set up the command wrapper */
+ memset(bcb, 0, sizeof(struct bulk_cb_wrap));
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = blenByte;
+ bcb->Flags = 0x80;
+ bcb->CDB[0] = 0xF1;
+ bcb->CDB[1] = 0x02;
+ bcb->CDB[5] = (unsigned char)(bn);
+ bcb->CDB[4] = (unsigned char)(bn>>8);
+ bcb->CDB[3] = (unsigned char)(bn>>16);
+ bcb->CDB[2] = (unsigned char)(bn>>24);
+
+ result = ene_send_scsi_cmd(us, FDIR_READ, scsi_sglist(srb), 1);
+ } else {
+ void *buf;
+ int offset = 0;
+ u16 phyblk, logblk;
+ u8 PageNum;
+ u16 len;
+ u32 blkno;
+
+ buf = kmalloc(blenByte, GFP_KERNEL);
+ if (buf == NULL)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ result = ene_load_bincode(us, MS_RW_PATTERN);
+ if (result != USB_STOR_XFER_GOOD) {
+ pr_info("Load MS RW pattern Fail !!\n");
+ result = USB_STOR_TRANSPORT_ERROR;
+ goto exit;
+ }
+
+ logblk = (u16)(bn / info->MS_Lib.PagesPerBlock);
+ PageNum = (u8)(bn % info->MS_Lib.PagesPerBlock);
+
+ while (1) {
+ if (blen > (info->MS_Lib.PagesPerBlock-PageNum))
+ len = info->MS_Lib.PagesPerBlock-PageNum;
+ else
+ len = blen;
+
+ phyblk = ms_libconv_to_physical(info, logblk);
+ blkno = phyblk * 0x20 + PageNum;
+
+ /* set up the command wrapper */
+ memset(bcb, 0, sizeof(struct bulk_cb_wrap));
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = 0x200 * len;
+ bcb->Flags = 0x80;
+ bcb->CDB[0] = 0xF1;
+ bcb->CDB[1] = 0x02;
+ bcb->CDB[5] = (unsigned char)(blkno);
+ bcb->CDB[4] = (unsigned char)(blkno>>8);
+ bcb->CDB[3] = (unsigned char)(blkno>>16);
+ bcb->CDB[2] = (unsigned char)(blkno>>24);
+
+ result = ene_send_scsi_cmd(us, FDIR_READ, buf+offset, 0);
+ if (result != USB_STOR_XFER_GOOD) {
+ pr_info("MS_SCSI_Read --- result = %x\n", result);
+ result = USB_STOR_TRANSPORT_ERROR;
+ goto exit;
+ }
+
+ blen -= len;
+ if (blen <= 0)
+ break;
+ logblk++;
+ PageNum = 0;
+ offset += MS_BYTES_PER_PAGE*len;
+ }
+ usb_stor_set_xfer_buf(buf, blenByte, srb);
+exit:
+ kfree(buf);
+ }
+ return result;
+}
+
+static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb)
+{
+ int result;
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ unsigned char *cdb = srb->cmnd;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ u32 bn = ((cdb[2] << 24) & 0xff000000) |
+ ((cdb[3] << 16) & 0x00ff0000) |
+ ((cdb[4] << 8) & 0x0000ff00) |
+ ((cdb[5] << 0) & 0x000000ff);
+ u16 blen = ((cdb[7] << 8) & 0xff00) | ((cdb[8] << 0) & 0x00ff);
+ u32 blenByte = blen * 0x200;
+
+ if (bn > info->bl_num)
+ return USB_STOR_TRANSPORT_ERROR;
+
+ if (info->MS_Status.IsMSPro) {
+ result = ene_load_bincode(us, MSP_RW_PATTERN);
+ if (result != USB_STOR_XFER_GOOD) {
+ pr_info("Load MSP RW pattern Fail !!\n");
+ return USB_STOR_TRANSPORT_ERROR;
+ }
+
+ /* set up the command wrapper */
+ memset(bcb, 0, sizeof(struct bulk_cb_wrap));
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = blenByte;
+ bcb->Flags = 0x00;
+ bcb->CDB[0] = 0xF0;
+ bcb->CDB[1] = 0x04;
+ bcb->CDB[5] = (unsigned char)(bn);
+ bcb->CDB[4] = (unsigned char)(bn>>8);
+ bcb->CDB[3] = (unsigned char)(bn>>16);
+ bcb->CDB[2] = (unsigned char)(bn>>24);
+
+ result = ene_send_scsi_cmd(us, FDIR_WRITE, scsi_sglist(srb), 1);
+ } else {
+ void *buf;
+ int offset;
+ u16 PhyBlockAddr;
+ u8 PageNum;
+ u32 result;
+ u16 len, oldphy, newphy;
+
+ buf = kmalloc(blenByte, GFP_KERNEL);
+ if (buf == NULL)
+ return USB_STOR_TRANSPORT_ERROR;
+ usb_stor_set_xfer_buf(buf, blenByte, srb);
+
+ result = ene_load_bincode(us, MS_RW_PATTERN);
+ if (result != USB_STOR_XFER_GOOD) {
+ pr_info("Load MS RW pattern Fail !!\n");
+ result = USB_STOR_TRANSPORT_ERROR;
+ goto exit;
+ }
+
+ PhyBlockAddr = (u16)(bn / info->MS_Lib.PagesPerBlock);
+ PageNum = (u8)(bn % info->MS_Lib.PagesPerBlock);
+
+ while (1) {
+ if (blen > (info->MS_Lib.PagesPerBlock-PageNum))
+ len = info->MS_Lib.PagesPerBlock-PageNum;
+ else
+ len = blen;
+
+ oldphy = ms_libconv_to_physical(info, PhyBlockAddr); /* need check us <-> info */
+ newphy = ms_libsearch_block_from_logical(us, PhyBlockAddr);
+
+ result = ms_read_copyblock(us, oldphy, newphy, PhyBlockAddr, PageNum, buf+offset, len);
+
+ if (result != USB_STOR_XFER_GOOD) {
+ pr_info("MS_SCSI_Write --- result = %x\n", result);
+ result = USB_STOR_TRANSPORT_ERROR;
+ goto exit;
+ }
+
+ info->MS_Lib.Phy2LogMap[oldphy] = MS_LB_NOT_USED_ERASED;
+ ms_lib_force_setlogical_pair(us, PhyBlockAddr, newphy);
+
+ blen -= len;
+ if (blen <= 0)
+ break;
+ PhyBlockAddr++;
+ PageNum = 0;
+ offset += MS_BYTES_PER_PAGE*len;
+ }
+exit:
+ kfree(buf);
+ }
+ return result;
+}
+
+/*
+ * ENE MS Card
+ */
+
static int ene_get_card_type(struct us_data *us, u16 index, void *buf)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
@@ -505,6 +1894,19 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag)
US_DEBUGP("SD_RDWR_PATTERN\n");
fw_name = "ene-ub6250/sd_rdwr.bin";
break;
+ /* For MS */
+ case MS_INIT_PATTERN:
+ US_DEBUGP("MS_INIT_PATTERN\n");
+ fw_name = "ene-ub6250/ms_init.bin";
+ break;
+ case MSP_RW_PATTERN:
+ US_DEBUGP("MSP_RW_PATTERN\n");
+ fw_name = "ene-ub6250/msp_rdwr.bin";
+ break;
+ case MS_RW_PATTERN:
+ US_DEBUGP("MS_RW_PATTERN\n");
+ fw_name = "ene-ub6250/ms_rdwr.bin";
+ break;
default:
US_DEBUGP("----------- Unknown PATTERN ----------\n");
goto nofw;
@@ -540,6 +1942,182 @@ nofw:
return result;
}
+static int ms_card_init(struct us_data *us)
+{
+ u32 result;
+ u16 TmpBlock;
+ unsigned char *PageBuffer0 = NULL, *PageBuffer1 = NULL;
+ struct ms_lib_type_extdat extdat;
+ u16 btBlk1st, btBlk2nd;
+ u32 btBlk1stErred;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ printk(KERN_INFO "MS_CardInit start\n");
+
+ ms_lib_free_allocatedarea(us); /* Clean buffer and set struct us_data flag to 0 */
+
+ /* get two PageBuffer */
+ PageBuffer0 = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
+ PageBuffer1 = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
+ if ((PageBuffer0 == NULL) || (PageBuffer1 == NULL)) {
+ result = MS_NO_MEMORY_ERROR;
+ goto exit;
+ }
+
+ btBlk1st = btBlk2nd = MS_LB_NOT_USED;
+ btBlk1stErred = 0;
+
+ for (TmpBlock = 0; TmpBlock < MS_MAX_INITIAL_ERROR_BLOCKS+2; TmpBlock++) {
+
+ switch (ms_read_readpage(us, TmpBlock, 0, (u32 *)PageBuffer0, &extdat)) {
+ case MS_STATUS_SUCCESS:
+ break;
+ case MS_STATUS_INT_ERROR:
+ break;
+ case MS_STATUS_ERROR:
+ default:
+ continue;
+ }
+
+ if ((extdat.ovrflg & MS_REG_OVR_BKST) == MS_REG_OVR_BKST_NG)
+ continue;
+
+ if (((extdat.mngflg & MS_REG_MNG_SYSFLG) == MS_REG_MNG_SYSFLG_USER) ||
+ (be16_to_cpu(((struct ms_bootblock_page0 *)PageBuffer0)->header.wBlockID) != MS_BOOT_BLOCK_ID) ||
+ (be16_to_cpu(((struct ms_bootblock_page0 *)PageBuffer0)->header.wFormatVersion) != MS_BOOT_BLOCK_FORMAT_VERSION) ||
+ (((struct ms_bootblock_page0 *)PageBuffer0)->header.bNumberOfDataEntry != MS_BOOT_BLOCK_DATA_ENTRIES))
+ continue;
+
+ if (btBlk1st != MS_LB_NOT_USED) {
+ btBlk2nd = TmpBlock;
+ break;
+ }
+
+ btBlk1st = TmpBlock;
+ memcpy(PageBuffer1, PageBuffer0, MS_BYTES_PER_PAGE);
+ if (extdat.status1 & (MS_REG_ST1_DTER | MS_REG_ST1_EXER | MS_REG_ST1_FGER))
+ btBlk1stErred = 1;
+ }
+
+ if (btBlk1st == MS_LB_NOT_USED) {
+ result = MS_STATUS_ERROR;
+ goto exit;
+ }
+
+ /* write protect */
+ if ((extdat.status0 & MS_REG_ST0_WP) == MS_REG_ST0_WP_ON)
+ ms_lib_ctrl_set(info, MS_LIB_CTRL_WRPROTECT);
+
+ result = MS_STATUS_ERROR;
+ /* 1st Boot Block */
+ if (btBlk1stErred == 0)
+ result = ms_lib_process_bootblock(us, btBlk1st, PageBuffer1);
+ /* 1st */
+ /* 2nd Boot Block */
+ if (result && (btBlk2nd != MS_LB_NOT_USED))
+ result = ms_lib_process_bootblock(us, btBlk2nd, PageBuffer0);
+
+ if (result) {
+ result = MS_STATUS_ERROR;
+ goto exit;
+ }
+
+ for (TmpBlock = 0; TmpBlock < btBlk1st; TmpBlock++)
+ info->MS_Lib.Phy2LogMap[TmpBlock] = MS_LB_INITIAL_ERROR;
+
+ info->MS_Lib.Phy2LogMap[btBlk1st] = MS_LB_BOOT_BLOCK;
+
+ if (btBlk2nd != MS_LB_NOT_USED) {
+ for (TmpBlock = btBlk1st + 1; TmpBlock < btBlk2nd; TmpBlock++)
+ info->MS_Lib.Phy2LogMap[TmpBlock] = MS_LB_INITIAL_ERROR;
+
+ info->MS_Lib.Phy2LogMap[btBlk2nd] = MS_LB_BOOT_BLOCK;
+ }
+
+ result = ms_lib_scan_logicalblocknumber(us, btBlk1st);
+ if (result)
+ goto exit;
+
+ for (TmpBlock = MS_PHYSICAL_BLOCKS_PER_SEGMENT;
+ TmpBlock < info->MS_Lib.NumberOfPhyBlock;
+ TmpBlock += MS_PHYSICAL_BLOCKS_PER_SEGMENT) {
+ if (ms_count_freeblock(us, TmpBlock) == 0) {
+ ms_lib_ctrl_set(info, MS_LIB_CTRL_WRPROTECT);
+ break;
+ }
+ }
+
+ /* write */
+ if (ms_lib_alloc_writebuf(us)) {
+ result = MS_NO_MEMORY_ERROR;
+ goto exit;
+ }
+
+ result = MS_STATUS_SUCCESS;
+
+exit:
+ kfree(PageBuffer1);
+ kfree(PageBuffer0);
+
+ printk(KERN_INFO "MS_CardInit end\n");
+ return result;
+}
+
+static int ene_ms_init(struct us_data *us)
+{
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ int result;
+ u8 buf[0x200];
+ u16 MSP_BlockSize, MSP_UserAreaBlocks;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+
+ printk(KERN_INFO "transport --- ENE_MSInit\n");
+
+ /* the same part to test ENE */
+
+ result = ene_load_bincode(us, MS_INIT_PATTERN);
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR "Load MS Init Code Fail !!\n");
+ return USB_STOR_TRANSPORT_ERROR;
+ }
+
+ memset(bcb, 0, sizeof(struct bulk_cb_wrap));
+ bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+ bcb->DataTransferLength = 0x200;
+ bcb->Flags = 0x80;
+ bcb->CDB[0] = 0xF1;
+ bcb->CDB[1] = 0x01;
+
+ result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
+ if (result != USB_STOR_XFER_GOOD) {
+ printk(KERN_ERR "Execution MS Init Code Fail !!\n");
+ return USB_STOR_TRANSPORT_ERROR;
+ }
+ /* the same part to test ENE */
+ info->MS_Status = *(struct MS_STATUS *)&buf[0];
+
+ if (info->MS_Status.Insert && info->MS_Status.Ready) {
+ printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert);
+ printk(KERN_INFO "Ready = %x\n", info->MS_Status.Ready);
+ printk(KERN_INFO "IsMSPro = %x\n", info->MS_Status.IsMSPro);
+ printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG);
+ printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP);
+ if (info->MS_Status.IsMSPro) {
+ MSP_BlockSize = (buf[6] << 8) | buf[7];
+ MSP_UserAreaBlocks = (buf[10] << 8) | buf[11];
+ info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
+ } else {
+ ms_card_init(us); /* Card is MS (to ms.c)*/
+ }
+ US_DEBUGP("MS Init Code OK !!\n");
+ } else {
+ US_DEBUGP("MS Card Not Ready --- %x\n", buf[0]);
+ return USB_STOR_TRANSPORT_ERROR;
+ }
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
static int ene_sd_init(struct us_data *us)
{
int result;
@@ -619,7 +2197,13 @@ static int ene_init(struct us_data *us)
return USB_STOR_TRANSPORT_ERROR;
}
}
-
+ if (misc_reg03 & 0x02) {
+ if (!info->MS_Status.Ready) {
+ result = ene_ms_init(us);
+ if (result != USB_STOR_XFER_GOOD)
+ return USB_STOR_TRANSPORT_ERROR;
+ }
+ }
return result;
}
@@ -662,6 +2246,41 @@ static int sd_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
return result;
}
+/*
+ * ms_scsi_irp()
+ */
+int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
+{
+ int result;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra;
+ info->SrbStatus = SS_SUCCESS;
+ switch (srb->cmnd[0]) {
+ case TEST_UNIT_READY:
+ result = ms_scsi_test_unit_ready(us, srb);
+ break; /* 0x00 */
+ case INQUIRY:
+ result = ms_scsi_inquiry(us, srb);
+ break; /* 0x12 */
+ case MODE_SENSE:
+ result = ms_scsi_mode_sense(us, srb);
+ break; /* 0x1A */
+ case READ_CAPACITY:
+ result = ms_scsi_read_capacity(us, srb);
+ break; /* 0x25 */
+ case READ_10:
+ result = ms_scsi_read(us, srb);
+ break; /* 0x28 */
+ case WRITE_10:
+ result = ms_scsi_write(us, srb);
+ break; /* 0x2A */
+ default:
+ info->SrbStatus = SS_ILLEGAL_REQUEST;
+ result = USB_STOR_TRANSPORT_FAILED;
+ break;
+ }
+ return result;
+}
+
static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int result = 0;
@@ -669,11 +2288,15 @@ static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
/*US_DEBUG(usb_stor_show_command(srb)); */
scsi_set_resid(srb, 0);
- if (unlikely(!info->SD_Status.Ready))
+ if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) {
result = ene_init(us);
- else
- result = sd_scsi_irp(us, srb);
+ } else {
+ if (info->SD_Status.Ready)
+ result = sd_scsi_irp(us, srb);
+ if (info->MS_Status.Ready)
+ result = ms_scsi_irp(us, srb);
+ }
return 0;
}
@@ -714,10 +2337,8 @@ static int ene_ub6250_probe(struct usb_interface *intf,
}
if (!(misc_reg03 & 0x01)) {
- result = -ENODEV;
- printk(KERN_NOTICE "ums_eneub6250: The driver only supports SD card. "
- "To use SM/MS card, please build driver/staging/keucr\n");
- usb_stor_disconnect(intf);
+ pr_info("ums_eneub6250: The driver only supports SD/MS card. "
+ "To use SM card, please build driver/staging/keucr\n");
}
return result;
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index d509a4a7d74..34adc4b42ce 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -24,7 +24,6 @@
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
-#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/version.h>
@@ -51,6 +50,35 @@ static int auto_delink_en = 1;
module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
+#ifdef CONFIG_REALTEK_AUTOPM
+static int ss_en = 1;
+module_param(ss_en, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ss_en, "enable selective suspend");
+
+static int ss_delay = 50;
+module_param(ss_delay, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ss_delay,
+ "seconds to delay before entering selective suspend");
+
+enum RTS51X_STAT {
+ RTS51X_STAT_INIT,
+ RTS51X_STAT_IDLE,
+ RTS51X_STAT_RUN,
+ RTS51X_STAT_SS
+};
+
+#define POLLING_INTERVAL 50
+
+#define rts51x_set_stat(chip, stat) \
+ ((chip)->state = (enum RTS51X_STAT)(stat))
+#define rts51x_get_stat(chip) ((chip)->state)
+
+#define SET_LUN_READY(chip, lun) ((chip)->lun_ready |= ((u8)1 << (lun)))
+#define CLR_LUN_READY(chip, lun) ((chip)->lun_ready &= ~((u8)1 << (lun)))
+#define TST_LUN_READY(chip, lun) ((chip)->lun_ready & ((u8)1 << (lun)))
+
+#endif
+
struct rts51x_status {
u16 vid;
u16 pid;
@@ -70,14 +98,25 @@ struct rts51x_status {
};
struct rts51x_chip {
- u16 vendor_id;
- u16 product_id;
- char max_lun;
+ u16 vendor_id;
+ u16 product_id;
+ char max_lun;
- struct rts51x_status *status;
- int status_len;
+ struct rts51x_status *status;
+ int status_len;
- u32 flag;
+ u32 flag;
+#ifdef CONFIG_REALTEK_AUTOPM
+ struct us_data *us;
+ struct timer_list rts51x_suspend_timer;
+ unsigned long timer_expires;
+ int pwr_state;
+ u8 lun_ready;
+ enum RTS51X_STAT state;
+ int support_auto_delink;
+#endif
+ /* used to back up the protocal choosen in probe1 phase */
+ proto_cmnd proto_handler_backup;
};
/* flag definition */
@@ -97,9 +136,14 @@ struct rts51x_chip {
#define RTS51X_GET_VID(chip) ((chip)->vendor_id)
#define RTS51X_GET_PID(chip) ((chip)->product_id)
+#define VENDOR_ID(chip) ((chip)->status[0].vid)
+#define PRODUCT_ID(chip) ((chip)->status[0].pid)
#define FW_VERSION(chip) ((chip)->status[0].fw_ver)
#define STATUS_LEN(chip) ((chip)->status_len)
+#define STATUS_SUCCESS 0
+#define STATUS_FAIL 1
+
/* Check card reader function */
#define SUPPORT_DETAILED_TYPE1(chip) \
CHK_BIT((chip)->status[0].function[0], 1)
@@ -119,15 +163,6 @@ struct rts51x_chip {
#define CHECK_ID(chip, pid, fw_ver) \
(CHECK_PID((chip), (pid)) && CHECK_FW_VER((chip), (fw_ver)))
-#define wait_timeout_x(task_state, msecs) \
-do { \
- set_current_state((task_state)); \
- schedule_timeout((msecs) * HZ / 1000); \
-} while (0)
-
-#define wait_timeout(msecs) \
- wait_timeout_x(TASK_INTERRUPTIBLE, (msecs))
-
static int init_realtek_cr(struct us_data *us);
/*
@@ -143,8 +178,9 @@ static int init_realtek_cr(struct us_data *us);
static const struct usb_device_id realtek_cr_ids[] = {
# include "unusual_realtek.h"
- { } /* Terminating entry */
+ {} /* Terminating entry */
};
+
MODULE_DEVICE_TABLE(usb, realtek_cr_ids);
#undef UNUSUAL_DEV
@@ -165,7 +201,7 @@ MODULE_DEVICE_TABLE(usb, realtek_cr_ids);
static struct us_unusual_dev realtek_cr_unusual_dev_list[] = {
# include "unusual_realtek.h"
- { } /* Terminating entry */
+ {} /* Terminating entry */
};
#undef UNUSUAL_DEV
@@ -174,8 +210,8 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun,
u8 *cmd, int cmd_len, u8 *buf, int buf_len,
enum dma_data_direction dir, int *act_len)
{
- struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
- struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
+ struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *)us->iobuf;
+ struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *)us->iobuf;
int result;
unsigned int residue;
unsigned int cswlen;
@@ -195,7 +231,7 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun,
/* send it to out endpoint */
result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
- bcb, cbwlen, NULL);
+ bcb, cbwlen, NULL);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
@@ -204,24 +240,23 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun,
if (buf && buf_len) {
unsigned int pipe = (dir == DMA_FROM_DEVICE) ?
- us->recv_bulk_pipe : us->send_bulk_pipe;
+ us->recv_bulk_pipe : us->send_bulk_pipe;
result = usb_stor_bulk_transfer_buf(us, pipe,
- buf, buf_len, NULL);
+ buf, buf_len, NULL);
if (result == USB_STOR_XFER_ERROR)
return USB_STOR_TRANSPORT_ERROR;
}
/* get CSW for device status */
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
- bcs, US_BULK_CS_WRAP_LEN, &cswlen);
+ bcs, US_BULK_CS_WRAP_LEN, &cswlen);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* check bulk status */
if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN)) {
US_DEBUGP("Signature mismatch: got %08X, expecting %08X\n",
- le32_to_cpu(bcs->Signature),
- US_BULK_CS_SIGN);
+ le32_to_cpu(bcs->Signature), US_BULK_CS_SIGN);
return USB_STOR_TRANSPORT_ERROR;
}
@@ -249,8 +284,8 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun,
case US_BULK_STAT_PHASE:
/* phase error -- note that a transport reset will be
- * invoked by the invoke_transport() function
- */
+ * invoked by the invoke_transport() function
+ */
return USB_STOR_TRANSPORT_ERROR;
}
@@ -266,10 +301,10 @@ static int rts51x_get_max_lun(struct us_data *us)
/* issue the command */
us->iobuf[0] = 0;
result = usb_stor_control_msg(us, us->recv_ctrl_pipe,
- US_BULK_GET_MAX_LUN,
- USB_DIR_IN | USB_TYPE_CLASS |
- USB_RECIP_INTERFACE,
- 0, us->ifnum, us->iobuf, 1, 10*HZ);
+ US_BULK_GET_MAX_LUN,
+ USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE,
+ 0, us->ifnum, us->iobuf, 1, 10 * HZ);
US_DEBUGP("GetMaxLUN command result is %d, data is %d\n",
result, us->iobuf[0]);
@@ -284,16 +319,16 @@ static int rts51x_get_max_lun(struct us_data *us)
static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
{
int retval;
- u8 cmnd[12] = {0};
+ u8 cmnd[12] = { 0 };
US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len);
cmnd[0] = 0xF0;
cmnd[1] = 0x0D;
- cmnd[2] = (u8)(addr >> 8);
- cmnd[3] = (u8)addr;
- cmnd[4] = (u8)(len >> 8);
- cmnd[5] = (u8)len;
+ cmnd[2] = (u8) (addr >> 8);
+ cmnd[3] = (u8) addr;
+ cmnd[4] = (u8) (len >> 8);
+ cmnd[5] = (u8) len;
retval = rts51x_bulk_transport(us, 0, cmnd, 12,
data, len, DMA_FROM_DEVICE, NULL);
@@ -306,16 +341,16 @@ static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
{
int retval;
- u8 cmnd[12] = {0};
+ u8 cmnd[12] = { 0 };
US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len);
cmnd[0] = 0xF0;
cmnd[1] = 0x0E;
- cmnd[2] = (u8)(addr >> 8);
- cmnd[3] = (u8)addr;
- cmnd[4] = (u8)(len >> 8);
- cmnd[5] = (u8)len;
+ cmnd[2] = (u8) (addr >> 8);
+ cmnd[3] = (u8) addr;
+ cmnd[4] = (u8) (len >> 8);
+ cmnd[5] = (u8) len;
retval = rts51x_bulk_transport(us, 0, cmnd, 12,
data, len, DMA_TO_DEVICE, NULL);
@@ -329,7 +364,7 @@ static int rts51x_read_status(struct us_data *us,
u8 lun, u8 *status, int len, int *actlen)
{
int retval;
- u8 cmnd[12] = {0};
+ u8 cmnd[12] = { 0 };
US_DEBUGP("%s, lun = %d\n", __func__, lun);
@@ -356,12 +391,12 @@ static int rts51x_check_status(struct us_data *us, u8 lun)
US_DEBUGP("chip->status_len = %d\n", chip->status_len);
- chip->status[lun].vid = ((u16)buf[0] << 8) | buf[1];
- chip->status[lun].pid = ((u16)buf[2] << 8) | buf[3];
+ chip->status[lun].vid = ((u16) buf[0] << 8) | buf[1];
+ chip->status[lun].pid = ((u16) buf[2] << 8) | buf[3];
chip->status[lun].cur_lun = buf[4];
chip->status[lun].card_type = buf[5];
chip->status[lun].total_lun = buf[6];
- chip->status[lun].fw_ver = ((u16)buf[7] << 8) | buf[8];
+ chip->status[lun].fw_ver = ((u16) buf[7] << 8) | buf[8];
chip->status[lun].phy_exist = buf[9];
chip->status[lun].multi_flag = buf[10];
chip->status[lun].multi_card = buf[11];
@@ -432,6 +467,8 @@ static int config_autodelink_after_power_on(struct us_data *us)
int retval;
u8 value;
+ US_DEBUGP("%s: <---\n", __func__);
+
if (!CHK_AUTO_DELINK(chip))
return 0;
@@ -465,7 +502,7 @@ static int config_autodelink_after_power_on(struct us_data *us)
CLR_BIT(value, 2);
if (CHECK_ID(chip, 0x0159, 0x5889) ||
- CHECK_ID(chip, 0x0138, 0x3880)) {
+ CHECK_ID(chip, 0x0138, 0x3880)) {
CLR_BIT(value, 0);
CLR_BIT(value, 7);
}
@@ -487,6 +524,8 @@ static int config_autodelink_after_power_on(struct us_data *us)
}
}
+ US_DEBUGP("%s: --->\n", __func__);
+
return 0;
}
@@ -496,6 +535,8 @@ static int config_autodelink_before_power_down(struct us_data *us)
int retval;
u8 value;
+ US_DEBUGP("%s: <---\n", __func__);
+
if (!CHK_AUTO_DELINK(chip))
return 0;
@@ -528,14 +569,14 @@ static int config_autodelink_before_power_down(struct us_data *us)
return -EIO;
} else {
if (CHECK_ID(chip, 0x0159, 0x5889) ||
- CHECK_ID(chip, 0x0138, 0x3880) ||
- CHECK_ID(chip, 0x0138, 0x3882)) {
+ CHECK_ID(chip, 0x0138, 0x3880) ||
+ CHECK_ID(chip, 0x0138, 0x3882)) {
retval = rts51x_read_mem(us, 0xFE47, &value, 1);
if (retval < 0)
return -EIO;
if (CHECK_ID(chip, 0x0159, 0x5889) ||
- CHECK_ID(chip, 0x0138, 0x3880)) {
+ CHECK_ID(chip, 0x0138, 0x3880)) {
SET_BIT(value, 0);
SET_BIT(value, 7);
}
@@ -556,25 +597,323 @@ static int config_autodelink_before_power_down(struct us_data *us)
}
}
+ US_DEBUGP("%s: --->\n", __func__);
+
+ return 0;
+}
+
+static void fw5895_init(struct us_data *us)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
+ int retval;
+ u8 val;
+
+ US_DEBUGP("%s: <---\n", __func__);
+
+ if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) {
+ US_DEBUGP("Not the specified device, return immediately!\n");
+ } else {
+ retval = rts51x_read_mem(us, 0xFD6F, &val, 1);
+ if (retval == STATUS_SUCCESS && (val & 0x1F) == 0) {
+ val = 0x1F;
+ retval = rts51x_write_mem(us, 0xFD70, &val, 1);
+ if (retval != STATUS_SUCCESS)
+ US_DEBUGP("Write memory fail\n");
+ } else {
+ US_DEBUGP("Read memory fail, OR (val & 0x1F) != 0\n");
+ }
+ }
+
+ US_DEBUGP("%s: --->\n", __func__);
+}
+
+#ifdef CONFIG_REALTEK_AUTOPM
+static void fw5895_set_mmc_wp(struct us_data *us)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
+ int retval;
+ u8 buf[13];
+
+ US_DEBUGP("%s: <---\n", __func__);
+
+ if ((PRODUCT_ID(chip) != 0x0158) || (FW_VERSION(chip) != 0x5895)) {
+ US_DEBUGP("Not the specified device, return immediately!\n");
+ } else {
+ retval = rts51x_read_mem(us, 0xFD6F, buf, 1);
+ if (retval == STATUS_SUCCESS && (buf[0] & 0x24) == 0x24) {
+ /* SD Exist and SD WP */
+ retval = rts51x_read_mem(us, 0xD04E, buf, 1);
+ if (retval == STATUS_SUCCESS) {
+ buf[0] |= 0x04;
+ retval = rts51x_write_mem(us, 0xFD70, buf, 1);
+ if (retval != STATUS_SUCCESS)
+ US_DEBUGP("Write memory fail\n");
+ } else {
+ US_DEBUGP("Read memory fail\n");
+ }
+ } else {
+ US_DEBUGP("Read memory fail, OR (buf[0]&0x24)!=0x24\n");
+ }
+ }
+
+ US_DEBUGP("%s: --->\n", __func__);
+}
+
+static void rts51x_modi_suspend_timer(struct rts51x_chip *chip)
+{
+ US_DEBUGP("%s: <---, state:%d\n", __func__, rts51x_get_stat(chip));
+
+ chip->timer_expires = jiffies + msecs_to_jiffies(1000*ss_delay);
+ mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires);
+
+ US_DEBUGP("%s: --->\n", __func__);
+}
+
+static void rts51x_suspend_timer_fn(unsigned long data)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)data;
+ struct us_data *us = chip->us;
+
+ US_DEBUGP("%s: <---\n", __func__);
+
+ switch (rts51x_get_stat(chip)) {
+ case RTS51X_STAT_INIT:
+ case RTS51X_STAT_RUN:
+ rts51x_modi_suspend_timer(chip);
+ break;
+ case RTS51X_STAT_IDLE:
+ case RTS51X_STAT_SS:
+ US_DEBUGP("%s: RTS51X_STAT_SS, intf->pm_usage_cnt:%d,"
+ "power.usage:%d\n", __func__,
+ atomic_read(&us->pusb_intf->pm_usage_cnt),
+ atomic_read(&us->pusb_intf->dev.power.usage_count));
+
+ if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) {
+ US_DEBUGP("%s: Ready to enter SS state.\n",
+ __func__);
+ rts51x_set_stat(chip, RTS51X_STAT_SS);
+ /* ignore mass storage interface's children */
+ pm_suspend_ignore_children(&us->pusb_intf->dev, true);
+ usb_autopm_put_interface(us->pusb_intf);
+ US_DEBUGP("%s: RTS51X_STAT_SS 01,"
+ "intf->pm_usage_cnt:%d, power.usage:%d\n",
+ __func__,
+ atomic_read(&us->pusb_intf->pm_usage_cnt),
+ atomic_read(
+ &us->pusb_intf->dev.power.usage_count));
+ }
+ break;
+ default:
+ US_DEBUGP("%s: Unknonwn state !!!\n", __func__);
+ break;
+ }
+
+ US_DEBUGP("%s: --->\n", __func__);
+}
+
+static inline int working_scsi(struct scsi_cmnd *srb)
+{
+ if ((srb->cmnd[0] == TEST_UNIT_READY) ||
+ (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
+{
+ struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
+ static int card_first_show = 1;
+ static u8 media_not_present[] = { 0x70, 0, 0x02, 0, 0, 0, 0,
+ 10, 0, 0, 0, 0, 0x3A, 0, 0, 0, 0, 0
+ };
+ static u8 invalid_cmd_field[] = { 0x70, 0, 0x05, 0, 0, 0, 0,
+ 10, 0, 0, 0, 0, 0x24, 0, 0, 0, 0, 0
+ };
+ int ret;
+
+ US_DEBUGP("%s: <---\n", __func__);
+
+ if (working_scsi(srb)) {
+ US_DEBUGP("%s: working scsi, intf->pm_usage_cnt:%d,"
+ "power.usage:%d\n", __func__,
+ atomic_read(&us->pusb_intf->pm_usage_cnt),
+ atomic_read(&us->pusb_intf->dev.power.usage_count));
+
+ if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) {
+ ret = usb_autopm_get_interface(us->pusb_intf);
+ US_DEBUGP("%s: working scsi, ret=%d\n", __func__, ret);
+ }
+ if (rts51x_get_stat(chip) != RTS51X_STAT_RUN)
+ rts51x_set_stat(chip, RTS51X_STAT_RUN);
+ chip->proto_handler_backup(srb, us);
+ } else {
+ if (rts51x_get_stat(chip) == RTS51X_STAT_SS) {
+ US_DEBUGP("%s: NOT working scsi\n", __func__);
+ if ((srb->cmnd[0] == TEST_UNIT_READY) &&
+ (chip->pwr_state == US_SUSPEND)) {
+ if (TST_LUN_READY(chip, srb->device->lun)) {
+ srb->result = SAM_STAT_GOOD;
+ } else {
+ srb->result = SAM_STAT_CHECK_CONDITION;
+ memcpy(srb->sense_buffer,
+ media_not_present,
+ US_SENSE_SIZE);
+ }
+ US_DEBUGP("%s: TEST_UNIT_READY--->\n",
+ __func__);
+ goto out;
+ }
+ if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
+ int prevent = srb->cmnd[4] & 0x1;
+ if (prevent) {
+ srb->result = SAM_STAT_CHECK_CONDITION;
+ memcpy(srb->sense_buffer,
+ invalid_cmd_field,
+ US_SENSE_SIZE);
+ } else {
+ srb->result = SAM_STAT_GOOD;
+ }
+ US_DEBUGP("%s: ALLOW_MEDIUM_REMOVAL--->\n",
+ __func__);
+ goto out;
+ }
+ } else {
+ US_DEBUGP("%s: NOT working scsi, not SS\n", __func__);
+ chip->proto_handler_backup(srb, us);
+ /* Check wether card is plugged in */
+ if (srb->cmnd[0] == TEST_UNIT_READY) {
+ if (srb->result == SAM_STAT_GOOD) {
+ SET_LUN_READY(chip, srb->device->lun);
+ if (card_first_show) {
+ card_first_show = 0;
+ fw5895_set_mmc_wp(us);
+ }
+ } else {
+ CLR_LUN_READY(chip, srb->device->lun);
+ card_first_show = 1;
+ }
+ }
+ if (rts51x_get_stat(chip) != RTS51X_STAT_IDLE)
+ rts51x_set_stat(chip, RTS51X_STAT_IDLE);
+ }
+ }
+out:
+ US_DEBUGP("%s: state:%d\n", __func__, rts51x_get_stat(chip));
+ if (rts51x_get_stat(chip) == RTS51X_STAT_RUN)
+ rts51x_modi_suspend_timer(chip);
+
+ US_DEBUGP("%s: --->\n", __func__);
+}
+
+static int realtek_cr_autosuspend_setup(struct us_data *us)
+{
+ struct rts51x_chip *chip;
+ struct rts51x_status *status = NULL;
+ u8 buf[16];
+ int retval;
+
+ chip = (struct rts51x_chip *)us->extra;
+ chip->support_auto_delink = 0;
+ chip->pwr_state = US_RESUME;
+ chip->lun_ready = 0;
+ rts51x_set_stat(chip, RTS51X_STAT_INIT);
+
+ retval = rts51x_read_status(us, 0, buf, 16, &(chip->status_len));
+ if (retval != STATUS_SUCCESS) {
+ US_DEBUGP("Read status fail\n");
+ return -EIO;
+ }
+ status = chip->status;
+ status->vid = ((u16) buf[0] << 8) | buf[1];
+ status->pid = ((u16) buf[2] << 8) | buf[3];
+ status->cur_lun = buf[4];
+ status->card_type = buf[5];
+ status->total_lun = buf[6];
+ status->fw_ver = ((u16) buf[7] << 8) | buf[8];
+ status->phy_exist = buf[9];
+ status->multi_flag = buf[10];
+ status->multi_card = buf[11];
+ status->log_exist = buf[12];
+ if (chip->status_len == 16) {
+ status->detailed_type.detailed_type1 = buf[13];
+ status->function[0] = buf[14];
+ status->function[1] = buf[15];
+ }
+
+ /* back up the proto_handler in us->extra */
+ chip = (struct rts51x_chip *)(us->extra);
+ chip->proto_handler_backup = us->proto_handler;
+ /* Set the autosuspend_delay to 0 */
+ pm_runtime_set_autosuspend_delay(&us->pusb_dev->dev, 0);
+ /* override us->proto_handler setted in get_protocol() */
+ us->proto_handler = rts51x_invoke_transport;
+
+ chip->timer_expires = 0;
+ setup_timer(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn,
+ (unsigned long)chip);
+ fw5895_init(us);
+
+ /* enable autosuspend funciton of the usb device */
+ usb_enable_autosuspend(us->pusb_dev);
+
return 0;
}
+#endif
static void realtek_cr_destructor(void *extra)
{
struct rts51x_chip *chip = (struct rts51x_chip *)extra;
+ US_DEBUGP("%s: <---\n", __func__);
+
if (!chip)
return;
-
+#ifdef CONFIG_REALTEK_AUTOPM
+ if (ss_en) {
+ del_timer(&chip->rts51x_suspend_timer);
+ chip->timer_expires = 0;
+ }
+#endif
kfree(chip->status);
}
#ifdef CONFIG_PM
-static void realtek_pm_hook(struct us_data *us, int pm_state)
+int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message)
+{
+ struct us_data *us = usb_get_intfdata(iface);
+
+ US_DEBUGP("%s: <---\n", __func__);
+
+ /* wait until no command is running */
+ mutex_lock(&us->dev_mutex);
+
+ config_autodelink_before_power_down(us);
+
+ mutex_unlock(&us->dev_mutex);
+
+ US_DEBUGP("%s: --->\n", __func__);
+
+ return 0;
+}
+
+static int realtek_cr_resume(struct usb_interface *iface)
{
- if (pm_state == US_SUSPEND)
- (void)config_autodelink_before_power_down(us);
+ struct us_data *us = usb_get_intfdata(iface);
+
+ US_DEBUGP("%s: <---\n", __func__);
+
+ fw5895_init(us);
+ config_autodelink_after_power_on(us);
+
+ US_DEBUGP("%s: --->\n", __func__);
+
+ return 0;
}
+#else
+#define realtek_cr_suspend NULL
+#define realtek_cr_resume NULL
#endif
static int init_realtek_cr(struct us_data *us)
@@ -588,10 +927,6 @@ static int init_realtek_cr(struct us_data *us)
us->extra = chip;
us->extra_destructor = realtek_cr_destructor;
-#ifdef CONFIG_PM
- us->suspend_resume_hook = realtek_pm_hook;
-#endif
-
us->max_lun = chip->max_lun = rts51x_get_max_lun(us);
US_DEBUGP("chip->max_lun = %d\n", chip->max_lun);
@@ -602,18 +937,24 @@ static int init_realtek_cr(struct us_data *us)
goto INIT_FAIL;
for (i = 0; i <= (int)(chip->max_lun); i++) {
- retval = rts51x_check_status(us, (u8)i);
+ retval = rts51x_check_status(us, (u8) i);
if (retval < 0)
goto INIT_FAIL;
}
if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
- CHECK_FW_VER(chip, 0x5901))
+ CHECK_FW_VER(chip, 0x5901))
SET_AUTO_DELINK(chip);
if (STATUS_LEN(chip) == 16) {
if (SUPPORT_AUTO_DELINK(chip))
SET_AUTO_DELINK(chip);
}
+#ifdef CONFIG_REALTEK_AUTOPM
+ if (ss_en) {
+ chip->us = us;
+ realtek_cr_autosuspend_setup(us);
+ }
+#endif
US_DEBUGP("chip->flag = 0x%x\n", chip->flag);
@@ -632,7 +973,7 @@ INIT_FAIL:
}
static int realtek_cr_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+ const struct usb_device_id *id)
{
struct us_data *us;
int result;
@@ -640,25 +981,30 @@ static int realtek_cr_probe(struct usb_interface *intf,
US_DEBUGP("Probe Realtek Card Reader!\n");
result = usb_stor_probe1(&us, intf, id,
- (id - realtek_cr_ids) + realtek_cr_unusual_dev_list);
+ (id - realtek_cr_ids) +
+ realtek_cr_unusual_dev_list);
if (result)
return result;
result = usb_stor_probe2(us);
+
return result;
}
static struct usb_driver realtek_cr_driver = {
- .name = "ums-realtek",
- .probe = realtek_cr_probe,
- .disconnect = usb_stor_disconnect,
- .suspend = usb_stor_suspend,
- .resume = usb_stor_resume,
- .reset_resume = usb_stor_reset_resume,
- .pre_reset = usb_stor_pre_reset,
- .post_reset = usb_stor_post_reset,
- .id_table = realtek_cr_ids,
- .soft_unbind = 1,
+ .name = "ums-realtek",
+ .probe = realtek_cr_probe,
+ .disconnect = usb_stor_disconnect,
+ /* .suspend = usb_stor_suspend, */
+ /* .resume = usb_stor_resume, */
+ .reset_resume = usb_stor_reset_resume,
+ .suspend = realtek_cr_suspend,
+ .resume = realtek_cr_resume,
+ .pre_reset = usb_stor_pre_reset,
+ .post_reset = usb_stor_post_reset,
+ .id_table = realtek_cr_ids,
+ .soft_unbind = 1,
+ .supports_autosuspend = 1,
};
static int __init realtek_cr_init(void)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ccff3483eeb..3041a974faf 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1988,6 +1988,16 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100,
"Micro Mini 1GB",
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
+/*
+ * Nick Bowler <nbowler@elliptictech.com>
+ * SCSI stack spams (otherwise harmless) error messages.
+ */
+UNUSUAL_DEV( 0xc251, 0x4003, 0x0100, 0x0100,
+ "Keil Software, Inc.",
+ "V2M MotherBoard",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NOT_LOCKABLE),
+
/* Reported by Andrew Simmons <andrew.simmons@gmail.com> */
UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001,
"DataStor",
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index c0c5665e60a..200fd7c6c7d 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -298,7 +298,7 @@ static int cbaf_cdid_get(struct cbaf *cbaf)
if (result < needed) {
dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs "
"%zu bytes needed)\n", (size_t)result, needed);
- return result;
+ return -ENOENT;
}
strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN);
@@ -350,7 +350,7 @@ static ssize_t cbaf_wusb_chid_store(struct device *dev,
return result;
result = cbaf_cdid_get(cbaf);
if (result < 0)
- return -result;
+ return result;
return size;
}
static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store);
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index ca80171f42c..2acc7f504c5 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -58,7 +58,7 @@
* destination address.
*/
#include <linux/init.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 6ccd93a9b90..419334568be 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -83,6 +83,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/hash.h>
+#include <linux/ratelimit.h>
#include "wa-hc.h"
#include "wusbhc.h"
@@ -1217,16 +1218,14 @@ static int wa_xfer_status_to_errno(u8 status)
if (status == 0)
return 0;
if (status >= ARRAY_SIZE(xlat)) {
- if (printk_ratelimit())
- printk(KERN_ERR "%s(): BUG? "
+ printk_ratelimited(KERN_ERR "%s(): BUG? "
"Unknown WA transfer status 0x%02x\n",
__func__, real_status);
return -EINVAL;
}
errno = xlat[status];
if (unlikely(errno > 0)) {
- if (printk_ratelimit())
- printk(KERN_ERR "%s(): BUG? "
+ printk_ratelimited(KERN_ERR "%s(): BUG? "
"Inconsistent WA status: 0x%02x\n",
__func__, real_status);
errno = -errno;
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
index 001c8b4020a..bdcb13cc1d5 100644
--- a/drivers/uwb/uwbd.c
+++ b/drivers/uwb/uwbd.c
@@ -256,7 +256,7 @@ static void uwbd_event_handle(struct uwb_event *evt)
* UWB Daemon
*
* Listens to all UWB notifications and takes care to track the state
- * of the UWB neighboorhood for the kernel. When we do a run, we
+ * of the UWB neighbourhood for the kernel. When we do a run, we
* spinlock, move the list to a private copy and release the
* lock. Hold it as little as possible. Not a conflict: it is
* guaranteed we own the events in the private list.
diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c
index 70a004aa19d..3ae3c702500 100644
--- a/drivers/uwb/whc-rc.c
+++ b/drivers/uwb/whc-rc.c
@@ -222,7 +222,7 @@ int whcrc_setup_rc_umc(struct whcrc *whcrc)
struct umc_dev *umc_dev = whcrc->umc_dev;
whcrc->area = umc_dev->resource.start;
- whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1;
+ whcrc->rc_len = resource_size(&umc_dev->resource);
result = -EBUSY;
if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) {
dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e224a92baa1..882a51fe7b3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -12,6 +12,7 @@
#include <linux/virtio_net.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/rcupdate.h>
@@ -28,10 +29,18 @@
#include "vhost.h"
+static int experimental_zcopytx;
+module_param(experimental_zcopytx, int, 0444);
+MODULE_PARM_DESC(experimental_zcopytx, "Enable Experimental Zero Copy TX");
+
/* Max number of bytes transferred before requeueing the job.
* Using this limit prevents one virtqueue from starving others. */
#define VHOST_NET_WEIGHT 0x80000
+/* MAX number of TX used buffers for outstanding zerocopy */
+#define VHOST_MAX_PEND 128
+#define VHOST_GOODCOPY_LEN 256
+
enum {
VHOST_NET_VQ_RX = 0,
VHOST_NET_VQ_TX = 1,
@@ -54,6 +63,12 @@ struct vhost_net {
enum vhost_net_poll_state tx_poll_state;
};
+static bool vhost_sock_zcopy(struct socket *sock)
+{
+ return unlikely(experimental_zcopytx) &&
+ sock_flag(sock->sk, SOCK_ZEROCOPY);
+}
+
/* Pop first len bytes from iovec. Return number of segments used. */
static int move_iovec_hdr(struct iovec *from, struct iovec *to,
size_t len, int iov_count)
@@ -129,6 +144,8 @@ static void handle_tx(struct vhost_net *net)
int err, wmem;
size_t hdr_size;
struct socket *sock;
+ struct vhost_ubuf_ref *uninitialized_var(ubufs);
+ bool zcopy;
/* TODO: check that we are running from vhost_worker? */
sock = rcu_dereference_check(vq->private_data, 1);
@@ -149,8 +166,13 @@ static void handle_tx(struct vhost_net *net)
if (wmem < sock->sk->sk_sndbuf / 2)
tx_poll_stop(net);
hdr_size = vq->vhost_hlen;
+ zcopy = vhost_sock_zcopy(sock);
for (;;) {
+ /* Release DMAs done buffers first */
+ if (zcopy)
+ vhost_zerocopy_signal_used(vq);
+
head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
ARRAY_SIZE(vq->iov),
&out, &in,
@@ -160,12 +182,25 @@ static void handle_tx(struct vhost_net *net)
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
+ int num_pends;
+
wmem = atomic_read(&sock->sk->sk_wmem_alloc);
if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
tx_poll_start(net, sock);
set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
break;
}
+ /* If more outstanding DMAs, queue the work.
+ * Handle upend_idx wrap around
+ */
+ num_pends = likely(vq->upend_idx >= vq->done_idx) ?
+ (vq->upend_idx - vq->done_idx) :
+ (vq->upend_idx + UIO_MAXIOV - vq->done_idx);
+ if (unlikely(num_pends > VHOST_MAX_PEND)) {
+ tx_poll_start(net, sock);
+ set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+ break;
+ }
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
@@ -188,9 +223,39 @@ static void handle_tx(struct vhost_net *net)
iov_length(vq->hdr, s), hdr_size);
break;
}
+ /* use msg_control to pass vhost zerocopy ubuf info to skb */
+ if (zcopy) {
+ vq->heads[vq->upend_idx].id = head;
+ if (len < VHOST_GOODCOPY_LEN) {
+ /* copy don't need to wait for DMA done */
+ vq->heads[vq->upend_idx].len =
+ VHOST_DMA_DONE_LEN;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ ubufs = NULL;
+ } else {
+ struct ubuf_info *ubuf = &vq->ubuf_info[head];
+
+ vq->heads[vq->upend_idx].len = len;
+ ubuf->callback = vhost_zerocopy_callback;
+ ubuf->arg = vq->ubufs;
+ ubuf->desc = vq->upend_idx;
+ msg.msg_control = ubuf;
+ msg.msg_controllen = sizeof(ubuf);
+ ubufs = vq->ubufs;
+ kref_get(&ubufs->kref);
+ }
+ vq->upend_idx = (vq->upend_idx + 1) % UIO_MAXIOV;
+ }
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
+ if (zcopy) {
+ if (ubufs)
+ vhost_ubuf_put(ubufs);
+ vq->upend_idx = ((unsigned)vq->upend_idx - 1) %
+ UIO_MAXIOV;
+ }
vhost_discard_vq_desc(vq, 1);
tx_poll_start(net, sock);
break;
@@ -198,7 +263,8 @@ static void handle_tx(struct vhost_net *net)
if (err != len)
pr_debug("Truncated TX packet: "
" len %d != %zd\n", err, len);
- vhost_add_used_and_signal(&net->dev, vq, head, 0);
+ if (!zcopy)
+ vhost_add_used_and_signal(&net->dev, vq, head, 0);
total_len += len;
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
vhost_poll_queue(&vq->poll);
@@ -603,6 +669,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
{
struct socket *sock, *oldsock;
struct vhost_virtqueue *vq;
+ struct vhost_ubuf_ref *ubufs, *oldubufs = NULL;
int r;
mutex_lock(&n->dev.mutex);
@@ -632,13 +699,31 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
oldsock = rcu_dereference_protected(vq->private_data,
lockdep_is_held(&vq->mutex));
if (sock != oldsock) {
+ ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock));
+ if (IS_ERR(ubufs)) {
+ r = PTR_ERR(ubufs);
+ goto err_ubufs;
+ }
+ oldubufs = vq->ubufs;
+ vq->ubufs = ubufs;
vhost_net_disable_vq(n, vq);
rcu_assign_pointer(vq->private_data, sock);
vhost_net_enable_vq(n, vq);
+
+ r = vhost_init_used(vq);
+ if (r)
+ goto err_vq;
}
mutex_unlock(&vq->mutex);
+ if (oldubufs) {
+ vhost_ubuf_put_and_wait(oldubufs);
+ mutex_lock(&vq->mutex);
+ vhost_zerocopy_signal_used(vq);
+ mutex_unlock(&vq->mutex);
+ }
+
if (oldsock) {
vhost_net_flush_vq(n, index);
fput(oldsock->file);
@@ -647,6 +732,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
mutex_unlock(&n->dev.mutex);
return 0;
+err_ubufs:
+ fput(sock->file);
err_vq:
mutex_unlock(&vq->mutex);
err:
@@ -776,6 +863,8 @@ static struct miscdevice vhost_net_misc = {
static int vhost_net_init(void)
{
+ if (experimental_zcopytx)
+ vhost_enable_zcopy(VHOST_NET_VQ_TX);
return misc_register(&vhost_net_misc);
}
module_init(vhost_net_init);
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 734e1d74ad8..fc9a1d75281 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -195,8 +195,13 @@ static long vhost_test_run(struct vhost_test *n, int test)
lockdep_is_held(&vq->mutex));
rcu_assign_pointer(vq->private_data, priv);
+ r = vhost_init_used(&n->vqs[index]);
+
mutex_unlock(&vq->mutex);
+ if (r)
+ goto err;
+
if (oldpriv) {
vhost_test_flush_vq(n, index);
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ea966b35635..c14c42b95ab 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -37,6 +37,8 @@ enum {
VHOST_MEMORY_F_LOG = 0x1,
};
+static unsigned vhost_zcopy_mask __read_mostly;
+
#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
@@ -179,6 +181,9 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->call_ctx = NULL;
vq->call = NULL;
vq->log_ctx = NULL;
+ vq->upend_idx = 0;
+ vq->done_idx = 0;
+ vq->ubufs = NULL;
}
static int vhost_worker(void *data)
@@ -225,10 +230,28 @@ static int vhost_worker(void *data)
return 0;
}
+static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
+{
+ kfree(vq->indirect);
+ vq->indirect = NULL;
+ kfree(vq->log);
+ vq->log = NULL;
+ kfree(vq->heads);
+ vq->heads = NULL;
+ kfree(vq->ubuf_info);
+ vq->ubuf_info = NULL;
+}
+
+void vhost_enable_zcopy(int vq)
+{
+ vhost_zcopy_mask |= 0x1 << vq;
+}
+
/* Helper to allocate iovec buffers for all vqs. */
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
{
int i;
+ bool zcopy;
for (i = 0; i < dev->nvqs; ++i) {
dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
@@ -237,19 +260,21 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
GFP_KERNEL);
dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
UIO_MAXIOV, GFP_KERNEL);
-
+ zcopy = vhost_zcopy_mask & (0x1 << i);
+ if (zcopy)
+ dev->vqs[i].ubuf_info =
+ kmalloc(sizeof *dev->vqs[i].ubuf_info *
+ UIO_MAXIOV, GFP_KERNEL);
if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
- !dev->vqs[i].heads)
+ !dev->vqs[i].heads ||
+ (zcopy && !dev->vqs[i].ubuf_info))
goto err_nomem;
}
return 0;
err_nomem:
- for (; i >= 0; --i) {
- kfree(dev->vqs[i].indirect);
- kfree(dev->vqs[i].log);
- kfree(dev->vqs[i].heads);
- }
+ for (; i >= 0; --i)
+ vhost_vq_free_iovecs(&dev->vqs[i]);
return -ENOMEM;
}
@@ -257,14 +282,8 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
{
int i;
- for (i = 0; i < dev->nvqs; ++i) {
- kfree(dev->vqs[i].indirect);
- dev->vqs[i].indirect = NULL;
- kfree(dev->vqs[i].log);
- dev->vqs[i].log = NULL;
- kfree(dev->vqs[i].heads);
- dev->vqs[i].heads = NULL;
- }
+ for (i = 0; i < dev->nvqs; ++i)
+ vhost_vq_free_iovecs(&dev->vqs[i]);
}
long vhost_dev_init(struct vhost_dev *dev,
@@ -287,6 +306,7 @@ long vhost_dev_init(struct vhost_dev *dev,
dev->vqs[i].log = NULL;
dev->vqs[i].indirect = NULL;
dev->vqs[i].heads = NULL;
+ dev->vqs[i].ubuf_info = NULL;
dev->vqs[i].dev = dev;
mutex_init(&dev->vqs[i].mutex);
vhost_vq_reset(dev, dev->vqs + i);
@@ -390,6 +410,30 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
return 0;
}
+/* In case of DMA done not in order in lower device driver for some reason.
+ * upend_idx is used to track end of used idx, done_idx is used to track head
+ * of used idx. Once lower device DMA done contiguously, we will signal KVM
+ * guest used idx.
+ */
+int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
+{
+ int i;
+ int j = 0;
+
+ for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
+ if ((vq->heads[i].len == VHOST_DMA_DONE_LEN)) {
+ vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
+ vhost_add_used_and_signal(vq->dev, vq,
+ vq->heads[i].id, 0);
+ ++j;
+ } else
+ break;
+ }
+ if (j)
+ vq->done_idx = i;
+ return j;
+}
+
/* Caller should have device mutex */
void vhost_dev_cleanup(struct vhost_dev *dev)
{
@@ -400,6 +444,13 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
vhost_poll_stop(&dev->vqs[i].poll);
vhost_poll_flush(&dev->vqs[i].poll);
}
+ /* Wait for all lower device DMAs done. */
+ if (dev->vqs[i].ubufs)
+ vhost_ubuf_put_and_wait(dev->vqs[i].ubufs);
+
+ /* Signal guest as appropriate. */
+ vhost_zerocopy_signal_used(&dev->vqs[i]);
+
if (dev->vqs[i].error_ctx)
eventfd_ctx_put(dev->vqs[i].error_ctx);
if (dev->vqs[i].error)
@@ -578,17 +629,6 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return 0;
}
-static int init_used(struct vhost_virtqueue *vq,
- struct vring_used __user *used)
-{
- int r = put_user(vq->used_flags, &used->flags);
-
- if (r)
- return r;
- vq->signalled_used_valid = false;
- return get_user(vq->last_used_idx, &used->idx);
-}
-
static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
{
struct file *eventfp, *filep = NULL,
@@ -701,10 +741,6 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
}
}
- r = init_used(vq, (struct vring_used __user *)(unsigned long)
- a.used_user_addr);
- if (r)
- break;
vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
@@ -959,6 +995,57 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
return 0;
}
+static int vhost_update_used_flags(struct vhost_virtqueue *vq)
+{
+ void __user *used;
+ if (__put_user(vq->used_flags, &vq->used->flags) < 0)
+ return -EFAULT;
+ if (unlikely(vq->log_used)) {
+ /* Make sure the flag is seen before log. */
+ smp_wmb();
+ /* Log used flag write. */
+ used = &vq->used->flags;
+ log_write(vq->log_base, vq->log_addr +
+ (used - (void __user *)vq->used),
+ sizeof vq->used->flags);
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
+ }
+ return 0;
+}
+
+static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
+{
+ if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
+ return -EFAULT;
+ if (unlikely(vq->log_used)) {
+ void __user *used;
+ /* Make sure the event is seen before log. */
+ smp_wmb();
+ /* Log avail event write */
+ used = vhost_avail_event(vq);
+ log_write(vq->log_base, vq->log_addr +
+ (used - (void __user *)vq->used),
+ sizeof *vhost_avail_event(vq));
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
+ }
+ return 0;
+}
+
+int vhost_init_used(struct vhost_virtqueue *vq)
+{
+ int r;
+ if (!vq->private_data)
+ return 0;
+
+ r = vhost_update_used_flags(vq);
+ if (r)
+ return r;
+ vq->signalled_used_valid = false;
+ return get_user(vq->last_used_idx, &vq->used->idx);
+}
+
static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
struct iovec iov[], int iov_size)
{
@@ -1430,34 +1517,20 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
return false;
vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
- r = put_user(vq->used_flags, &vq->used->flags);
+ r = vhost_update_used_flags(vq);
if (r) {
vq_err(vq, "Failed to enable notification at %p: %d\n",
&vq->used->flags, r);
return false;
}
} else {
- r = put_user(vq->avail_idx, vhost_avail_event(vq));
+ r = vhost_update_avail_event(vq, vq->avail_idx);
if (r) {
vq_err(vq, "Failed to update avail event index at %p: %d\n",
vhost_avail_event(vq), r);
return false;
}
}
- if (unlikely(vq->log_used)) {
- void __user *used;
- /* Make sure data is seen before log. */
- smp_wmb();
- used = vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX) ?
- &vq->used->flags : vhost_avail_event(vq);
- /* Log used flags or event index entry write. Both are 16 bit
- * fields. */
- log_write(vq->log_base, vq->log_addr +
- (used - (void __user *)vq->used),
- sizeof(u16));
- if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
- }
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
smp_mb();
@@ -1480,9 +1553,55 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
return;
vq->used_flags |= VRING_USED_F_NO_NOTIFY;
if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
- r = put_user(vq->used_flags, &vq->used->flags);
+ r = vhost_update_used_flags(vq);
if (r)
vq_err(vq, "Failed to enable notification at %p: %d\n",
&vq->used->flags, r);
}
}
+
+static void vhost_zerocopy_done_signal(struct kref *kref)
+{
+ struct vhost_ubuf_ref *ubufs = container_of(kref, struct vhost_ubuf_ref,
+ kref);
+ wake_up(&ubufs->wait);
+}
+
+struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *vq,
+ bool zcopy)
+{
+ struct vhost_ubuf_ref *ubufs;
+ /* No zero copy backend? Nothing to count. */
+ if (!zcopy)
+ return NULL;
+ ubufs = kmalloc(sizeof *ubufs, GFP_KERNEL);
+ if (!ubufs)
+ return ERR_PTR(-ENOMEM);
+ kref_init(&ubufs->kref);
+ init_waitqueue_head(&ubufs->wait);
+ ubufs->vq = vq;
+ return ubufs;
+}
+
+void vhost_ubuf_put(struct vhost_ubuf_ref *ubufs)
+{
+ kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
+}
+
+void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
+{
+ kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
+ wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
+ kfree(ubufs);
+}
+
+void vhost_zerocopy_callback(void *arg)
+{
+ struct ubuf_info *ubuf = arg;
+ struct vhost_ubuf_ref *ubufs = ubuf->arg;
+ struct vhost_virtqueue *vq = ubufs->vq;
+
+ /* set len = 1 to mark this desc buffers done DMA */
+ vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
+ kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
+}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 8e03379dd30..a801e2821d0 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -11,7 +11,12 @@
#include <linux/uio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
+
+/* This is for zerocopy, used buffer len is set to 1 when lower device DMA
+ * done */
+#define VHOST_DMA_DONE_LEN 1
+#define VHOST_DMA_CLEAR_LEN 0
struct vhost_device;
@@ -50,6 +55,18 @@ struct vhost_log {
u64 len;
};
+struct vhost_virtqueue;
+
+struct vhost_ubuf_ref {
+ struct kref kref;
+ wait_queue_head_t wait;
+ struct vhost_virtqueue *vq;
+};
+
+struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *, bool zcopy);
+void vhost_ubuf_put(struct vhost_ubuf_ref *);
+void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *);
+
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -114,6 +131,16 @@ struct vhost_virtqueue {
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;
+ /* vhost zerocopy support fields below: */
+ /* last used idx for outstanding DMA zerocopy buffers */
+ int upend_idx;
+ /* first used idx for DMA done zerocopy buffers */
+ int done_idx;
+ /* an array of userspace buffers info */
+ struct ubuf_info *ubuf_info;
+ /* Reference counting for outstanding ubufs.
+ * Protected by vq mutex. Writers must also take device mutex. */
+ struct vhost_ubuf_ref *ubufs;
};
struct vhost_dev {
@@ -147,6 +174,7 @@ int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
struct vhost_log *log, unsigned int *log_num);
void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
+int vhost_init_used(struct vhost_virtqueue *);
int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
unsigned count);
@@ -160,6 +188,8 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len);
+void vhost_zerocopy_callback(void *arg);
+int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
#define vq_err(vq, fmt, ...) do { \
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
@@ -186,4 +216,6 @@ static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
return acked_features & (1 << bit);
}
+void vhost_enable_zcopy(int vq);
+
#endif
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 5fc983c5b92..cf03ad06714 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -447,6 +447,8 @@ static int clcdfb_register(struct clcd_fb *fb)
goto out;
}
+ fb->fb.device = &fb->dev->dev;
+
fb->fb.fix.mmio_start = fb->dev->res.start;
fb->fb.fix.mmio_len = resource_size(&fb->dev->res);
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 4484c721f0f..817ab60f753 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -906,7 +906,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
if (map) {
/* use a pre-allocated memory buffer */
info->fix.smem_start = map->start;
- info->fix.smem_len = map->end - map->start + 1;
+ info->fix.smem_len = resource_size(map);
if (!request_mem_region(info->fix.smem_start,
info->fix.smem_len, pdev->name)) {
ret = -EBUSY;
@@ -932,7 +932,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
/* LCDC registers */
info->fix.mmio_start = regs->start;
- info->fix.mmio_len = regs->end - regs->start + 1;
+ info->fix.mmio_len = resource_size(regs);
if (!request_mem_region(info->fix.mmio_start,
info->fix.mmio_len, pdev->name)) {
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index d7aaec5667b..44bdce4242a 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -3458,9 +3458,10 @@ static int __devinit atyfb_setup_generic(struct pci_dev *pdev,
raddr = addr + 0x7ff000UL;
rrp = &pdev->resource[2];
- if ((rrp->flags & IORESOURCE_MEM) && request_mem_region(rrp->start, rrp->end - rrp->start + 1, "atyfb")) {
+ if ((rrp->flags & IORESOURCE_MEM) &&
+ request_mem_region(rrp->start, resource_size(rrp), "atyfb")) {
par->aux_start = rrp->start;
- par->aux_size = rrp->end - rrp->start + 1;
+ par->aux_size = resource_size(rrp);
raddr = rrp->start;
PRINTKI("using auxiliary register aperture\n");
}
@@ -3550,7 +3551,7 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev,
/* Reserve space */
res_start = rp->start;
- res_size = rp->end - rp->start + 1;
+ res_size = resource_size(rp);
if (!request_mem_region(res_start, res_size, "atyfb"))
return -EBUSY;
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 34b2fc472fe..01a8fde67f2 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -486,7 +486,7 @@ static int __devinit au1100fb_drv_probe(struct platform_device *dev)
}
au1100fb_fix.mmio_start = regs_res->start;
- au1100fb_fix.mmio_len = regs_res->end - regs_res->start + 1;
+ au1100fb_fix.mmio_len = resource_size(regs_res);
if (!request_mem_region(au1100fb_fix.mmio_start, au1100fb_fix.mmio_len,
DRIVER_NAME)) {
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 2d93c8d61ad..278aeaa9250 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -117,6 +117,14 @@ config LCD_LD9040
If you have an LD9040 Panel, say Y to enable its
control driver.
+config LCD_AMS369FG06
+ tristate "AMS369FG06 AMOLED LCD Driver"
+ depends on SPI && BACKLIGHT_CLASS_DEVICE
+ default n
+ help
+ If you have an AMS369FG06 AMOLED Panel, say Y to enable its
+ LCD control driver.
+
endif # LCD_CLASS_DEVICE
#
@@ -327,6 +335,13 @@ config BACKLIGHT_PCF50633
If you have a backlight driven by a NXP PCF50633 MFD, say Y here to
enable its driver.
+config BACKLIGHT_AAT2870
+ tristate "AnalogicTech AAT2870 Backlight"
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_AAT2870_CORE
+ help
+ If you have a AnalogicTech AAT2870 say Y to enable the
+ backlight driver.
+
endif # BACKLIGHT_CLASS_DEVICE
endif # BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index ee72adb8786..fdd1fc4b277 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
obj-$(CONFIG_LCD_LD9040) += ld9040.o
+obj-$(CONFIG_LCD_AMS369FG06) += ams369fg06.o
obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
@@ -37,4 +38,5 @@ obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
+obj-$(CONFIG_BACKLIGHT_AAT2870) += aat2870_bl.o
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
new file mode 100644
index 00000000000..331f1ef1dad
--- /dev/null
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -0,0 +1,246 @@
+/*
+ * linux/drivers/video/backlight/aat2870_bl.c
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+#include <linux/mfd/aat2870.h>
+
+struct aat2870_bl_driver_data {
+ struct platform_device *pdev;
+ struct backlight_device *bd;
+
+ int channels;
+ int max_current;
+ int brightness; /* current brightness */
+};
+
+static inline int aat2870_brightness(struct aat2870_bl_driver_data *aat2870_bl,
+ int brightness)
+{
+ struct backlight_device *bd = aat2870_bl->bd;
+ int val;
+
+ val = brightness * (aat2870_bl->max_current - 1);
+ val /= bd->props.max_brightness;
+
+ return val;
+}
+
+static inline int aat2870_bl_enable(struct aat2870_bl_driver_data *aat2870_bl)
+{
+ struct aat2870_data *aat2870
+ = dev_get_drvdata(aat2870_bl->pdev->dev.parent);
+
+ return aat2870->write(aat2870, AAT2870_BL_CH_EN,
+ (u8)aat2870_bl->channels);
+}
+
+static inline int aat2870_bl_disable(struct aat2870_bl_driver_data *aat2870_bl)
+{
+ struct aat2870_data *aat2870
+ = dev_get_drvdata(aat2870_bl->pdev->dev.parent);
+
+ return aat2870->write(aat2870, AAT2870_BL_CH_EN, 0x0);
+}
+
+static int aat2870_bl_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+static int aat2870_bl_update_status(struct backlight_device *bd)
+{
+ struct aat2870_bl_driver_data *aat2870_bl = dev_get_drvdata(&bd->dev);
+ struct aat2870_data *aat2870 =
+ dev_get_drvdata(aat2870_bl->pdev->dev.parent);
+ int brightness = bd->props.brightness;
+ int ret;
+
+ if ((brightness < 0) || (bd->props.max_brightness < brightness)) {
+ dev_err(&bd->dev, "invalid brightness, %d\n", brightness);
+ return -EINVAL;
+ }
+
+ dev_dbg(&bd->dev, "brightness=%d, power=%d, state=%d\n",
+ bd->props.brightness, bd->props.power, bd->props.state);
+
+ if ((bd->props.power != FB_BLANK_UNBLANK) ||
+ (bd->props.state & BL_CORE_FBBLANK) ||
+ (bd->props.state & BL_CORE_SUSPENDED))
+ brightness = 0;
+
+ ret = aat2870->write(aat2870, AAT2870_BLM,
+ (u8)aat2870_brightness(aat2870_bl, brightness));
+ if (ret < 0)
+ return ret;
+
+ if (brightness == 0) {
+ ret = aat2870_bl_disable(aat2870_bl);
+ if (ret < 0)
+ return ret;
+ } else if (aat2870_bl->brightness == 0) {
+ ret = aat2870_bl_enable(aat2870_bl);
+ if (ret < 0)
+ return ret;
+ }
+
+ aat2870_bl->brightness = brightness;
+
+ return 0;
+}
+
+static int aat2870_bl_check_fb(struct backlight_device *bd, struct fb_info *fi)
+{
+ return 1;
+}
+
+static const struct backlight_ops aat2870_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = aat2870_bl_get_brightness,
+ .update_status = aat2870_bl_update_status,
+ .check_fb = aat2870_bl_check_fb,
+};
+
+static int aat2870_bl_probe(struct platform_device *pdev)
+{
+ struct aat2870_bl_platform_data *pdata = pdev->dev.platform_data;
+ struct aat2870_bl_driver_data *aat2870_bl;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ int ret = 0;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (pdev->id != AAT2870_ID_BL) {
+ dev_err(&pdev->dev, "Invalid device ID, %d\n", pdev->id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ aat2870_bl = kzalloc(sizeof(struct aat2870_bl_driver_data), GFP_KERNEL);
+ if (!aat2870_bl) {
+ dev_err(&pdev->dev,
+ "Failed to allocate memory for aat2870 backlight\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+
+ props.type = BACKLIGHT_RAW;
+ bd = backlight_device_register("aat2870-backlight", &pdev->dev,
+ aat2870_bl, &aat2870_bl_ops, &props);
+ if (IS_ERR(bd)) {
+ dev_err(&pdev->dev,
+ "Failed allocate memory for backlight device\n");
+ ret = PTR_ERR(bd);
+ goto out_kfree;
+ }
+
+ aat2870_bl->pdev = pdev;
+ platform_set_drvdata(pdev, aat2870_bl);
+
+ aat2870_bl->bd = bd;
+
+ if (pdata->channels > 0)
+ aat2870_bl->channels = pdata->channels;
+ else
+ aat2870_bl->channels = AAT2870_BL_CH_ALL;
+
+ if (pdata->max_current > 0)
+ aat2870_bl->max_current = pdata->max_current;
+ else
+ aat2870_bl->max_current = AAT2870_CURRENT_27_9;
+
+ if (pdata->max_brightness > 0)
+ bd->props.max_brightness = pdata->max_brightness;
+ else
+ bd->props.max_brightness = 255;
+
+ aat2870_bl->brightness = 0;
+ bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.brightness = bd->props.max_brightness;
+
+ ret = aat2870_bl_update_status(bd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize\n");
+ goto out_bl_dev_unregister;
+ }
+
+ return 0;
+
+out_bl_dev_unregister:
+ backlight_device_unregister(bd);
+out_kfree:
+ kfree(aat2870_bl);
+out:
+ return ret;
+}
+
+static int aat2870_bl_remove(struct platform_device *pdev)
+{
+ struct aat2870_bl_driver_data *aat2870_bl = platform_get_drvdata(pdev);
+ struct backlight_device *bd = aat2870_bl->bd;
+
+ bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.brightness = 0;
+ backlight_update_status(bd);
+
+ backlight_device_unregister(bd);
+ kfree(aat2870_bl);
+
+ return 0;
+}
+
+static struct platform_driver aat2870_bl_driver = {
+ .driver = {
+ .name = "aat2870-backlight",
+ .owner = THIS_MODULE,
+ },
+ .probe = aat2870_bl_probe,
+ .remove = aat2870_bl_remove,
+};
+
+static int __init aat2870_bl_init(void)
+{
+ return platform_driver_register(&aat2870_bl_driver);
+}
+subsys_initcall(aat2870_bl_init);
+
+static void __exit aat2870_bl_exit(void)
+{
+ platform_driver_unregister(&aat2870_bl_driver);
+}
+module_exit(aat2870_bl_exit);
+
+MODULE_DESCRIPTION("AnalogicTech AAT2870 Backlight");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jin Park <jinyoungp@nvidia.com>");
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index d2a96a421ff..183b6f63985 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -722,8 +722,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
goto out2;
}
- bl->props.max_brightness =
- bl->props.brightness = ADP8860_MAX_BRIGHTNESS;
+ bl->props.brightness = ADP8860_MAX_BRIGHTNESS;
data->bl = bl;
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
new file mode 100644
index 00000000000..9f0a491e2a0
--- /dev/null
+++ b/drivers/video/backlight/ams369fg06.c
@@ -0,0 +1,646 @@
+/*
+ * ams369fg06 AMOLED LCD panel driver.
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * Derived from drivers/video/s6e63m0.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/wait.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/lcd.h>
+#include <linux/backlight.h>
+
+#define SLEEPMSEC 0x1000
+#define ENDDEF 0x2000
+#define DEFMASK 0xFF00
+#define COMMAND_ONLY 0xFE
+#define DATA_ONLY 0xFF
+
+#define MAX_GAMMA_LEVEL 5
+#define GAMMA_TABLE_COUNT 21
+
+#define MIN_BRIGHTNESS 0
+#define MAX_BRIGHTNESS 255
+#define DEFAULT_BRIGHTNESS 150
+
+struct ams369fg06 {
+ struct device *dev;
+ struct spi_device *spi;
+ unsigned int power;
+ struct lcd_device *ld;
+ struct backlight_device *bd;
+ struct lcd_platform_data *lcd_pd;
+};
+
+static const unsigned short seq_display_on[] = {
+ 0x14, 0x03,
+ ENDDEF, 0x0000
+};
+
+static const unsigned short seq_display_off[] = {
+ 0x14, 0x00,
+ ENDDEF, 0x0000
+};
+
+static const unsigned short seq_stand_by_on[] = {
+ 0x1D, 0xA1,
+ SLEEPMSEC, 200,
+ ENDDEF, 0x0000
+};
+
+static const unsigned short seq_stand_by_off[] = {
+ 0x1D, 0xA0,
+ SLEEPMSEC, 250,
+ ENDDEF, 0x0000
+};
+
+static const unsigned short seq_setting[] = {
+ 0x31, 0x08,
+ 0x32, 0x14,
+ 0x30, 0x02,
+ 0x27, 0x01,
+ 0x12, 0x08,
+ 0x13, 0x08,
+ 0x15, 0x00,
+ 0x16, 0x00,
+
+ 0xef, 0xd0,
+ DATA_ONLY, 0xe8,
+
+ 0x39, 0x44,
+ 0x40, 0x00,
+ 0x41, 0x3f,
+ 0x42, 0x2a,
+ 0x43, 0x27,
+ 0x44, 0x27,
+ 0x45, 0x1f,
+ 0x46, 0x44,
+ 0x50, 0x00,
+ 0x51, 0x00,
+ 0x52, 0x17,
+ 0x53, 0x24,
+ 0x54, 0x26,
+ 0x55, 0x1f,
+ 0x56, 0x43,
+ 0x60, 0x00,
+ 0x61, 0x3f,
+ 0x62, 0x2a,
+ 0x63, 0x25,
+ 0x64, 0x24,
+ 0x65, 0x1b,
+ 0x66, 0x5c,
+
+ 0x17, 0x22,
+ 0x18, 0x33,
+ 0x19, 0x03,
+ 0x1a, 0x01,
+ 0x22, 0xa4,
+ 0x23, 0x00,
+ 0x26, 0xa0,
+
+ 0x1d, 0xa0,
+ SLEEPMSEC, 300,
+
+ 0x14, 0x03,
+
+ ENDDEF, 0x0000
+};
+
+/* gamma value: 2.2 */
+static const unsigned int ams369fg06_22_250[] = {
+ 0x00, 0x3f, 0x2a, 0x27, 0x27, 0x1f, 0x44,
+ 0x00, 0x00, 0x17, 0x24, 0x26, 0x1f, 0x43,
+ 0x00, 0x3f, 0x2a, 0x25, 0x24, 0x1b, 0x5c,
+};
+
+static const unsigned int ams369fg06_22_200[] = {
+ 0x00, 0x3f, 0x28, 0x29, 0x27, 0x21, 0x3e,
+ 0x00, 0x00, 0x10, 0x25, 0x27, 0x20, 0x3d,
+ 0x00, 0x3f, 0x28, 0x27, 0x25, 0x1d, 0x53,
+};
+
+static const unsigned int ams369fg06_22_150[] = {
+ 0x00, 0x3f, 0x2d, 0x29, 0x28, 0x23, 0x37,
+ 0x00, 0x00, 0x0b, 0x25, 0x28, 0x22, 0x36,
+ 0x00, 0x3f, 0x2b, 0x28, 0x26, 0x1f, 0x4a,
+};
+
+static const unsigned int ams369fg06_22_100[] = {
+ 0x00, 0x3f, 0x30, 0x2a, 0x2b, 0x24, 0x2f,
+ 0x00, 0x00, 0x00, 0x25, 0x29, 0x24, 0x2e,
+ 0x00, 0x3f, 0x2f, 0x29, 0x29, 0x21, 0x3f,
+};
+
+static const unsigned int ams369fg06_22_50[] = {
+ 0x00, 0x3f, 0x3c, 0x2c, 0x2d, 0x27, 0x24,
+ 0x00, 0x00, 0x00, 0x22, 0x2a, 0x27, 0x23,
+ 0x00, 0x3f, 0x3b, 0x2c, 0x2b, 0x24, 0x31,
+};
+
+struct ams369fg06_gamma {
+ unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
+};
+
+static struct ams369fg06_gamma gamma_table = {
+ .gamma_22_table[0] = (unsigned int *)&ams369fg06_22_50,
+ .gamma_22_table[1] = (unsigned int *)&ams369fg06_22_100,
+ .gamma_22_table[2] = (unsigned int *)&ams369fg06_22_150,
+ .gamma_22_table[3] = (unsigned int *)&ams369fg06_22_200,
+ .gamma_22_table[4] = (unsigned int *)&ams369fg06_22_250,
+};
+
+static int ams369fg06_spi_write_byte(struct ams369fg06 *lcd, int addr, int data)
+{
+ u16 buf[1];
+ struct spi_message msg;
+
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ };
+
+ buf[0] = (addr << 8) | data;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(lcd->spi, &msg);
+}
+
+static int ams369fg06_spi_write(struct ams369fg06 *lcd, unsigned char address,
+ unsigned char command)
+{
+ int ret = 0;
+
+ if (address != DATA_ONLY)
+ ret = ams369fg06_spi_write_byte(lcd, 0x70, address);
+ if (command != COMMAND_ONLY)
+ ret = ams369fg06_spi_write_byte(lcd, 0x72, command);
+
+ return ret;
+}
+
+static int ams369fg06_panel_send_sequence(struct ams369fg06 *lcd,
+ const unsigned short *wbuf)
+{
+ int ret = 0, i = 0;
+
+ while ((wbuf[i] & DEFMASK) != ENDDEF) {
+ if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
+ ret = ams369fg06_spi_write(lcd, wbuf[i], wbuf[i+1]);
+ if (ret)
+ break;
+ } else
+ mdelay(wbuf[i+1]);
+ i += 2;
+ }
+
+ return ret;
+}
+
+static int _ams369fg06_gamma_ctl(struct ams369fg06 *lcd,
+ const unsigned int *gamma)
+{
+ unsigned int i = 0;
+ int ret = 0;
+
+ for (i = 0 ; i < GAMMA_TABLE_COUNT / 3; i++) {
+ ret = ams369fg06_spi_write(lcd, 0x40 + i, gamma[i]);
+ ret = ams369fg06_spi_write(lcd, 0x50 + i, gamma[i+7*1]);
+ ret = ams369fg06_spi_write(lcd, 0x60 + i, gamma[i+7*2]);
+ if (ret) {
+ dev_err(lcd->dev, "failed to set gamma table.\n");
+ goto gamma_err;
+ }
+ }
+
+gamma_err:
+ return ret;
+}
+
+static int ams369fg06_gamma_ctl(struct ams369fg06 *lcd, int brightness)
+{
+ int ret = 0;
+ int gamma = 0;
+
+ if ((brightness >= 0) && (brightness <= 50))
+ gamma = 0;
+ else if ((brightness > 50) && (brightness <= 100))
+ gamma = 1;
+ else if ((brightness > 100) && (brightness <= 150))
+ gamma = 2;
+ else if ((brightness > 150) && (brightness <= 200))
+ gamma = 3;
+ else if ((brightness > 200) && (brightness <= 255))
+ gamma = 4;
+
+ ret = _ams369fg06_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
+
+ return ret;
+}
+
+static int ams369fg06_ldi_init(struct ams369fg06 *lcd)
+{
+ int ret, i;
+ static const unsigned short *init_seq[] = {
+ seq_setting,
+ seq_stand_by_off,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
+ ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int ams369fg06_ldi_enable(struct ams369fg06 *lcd)
+{
+ int ret, i;
+ static const unsigned short *init_seq[] = {
+ seq_stand_by_off,
+ seq_display_on,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
+ ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int ams369fg06_ldi_disable(struct ams369fg06 *lcd)
+{
+ int ret, i;
+
+ static const unsigned short *init_seq[] = {
+ seq_display_off,
+ seq_stand_by_on,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
+ ret = ams369fg06_panel_send_sequence(lcd, init_seq[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int ams369fg06_power_is_on(int power)
+{
+ return ((power) <= FB_BLANK_NORMAL);
+}
+
+static int ams369fg06_power_on(struct ams369fg06 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd = NULL;
+ struct backlight_device *bd = NULL;
+
+ pd = lcd->lcd_pd;
+ if (!pd) {
+ dev_err(lcd->dev, "platform data is NULL.\n");
+ return -EFAULT;
+ }
+
+ bd = lcd->bd;
+ if (!bd) {
+ dev_err(lcd->dev, "backlight device is NULL.\n");
+ return -EFAULT;
+ }
+
+ if (!pd->power_on) {
+ dev_err(lcd->dev, "power_on is NULL.\n");
+ return -EFAULT;
+ } else {
+ pd->power_on(lcd->ld, 1);
+ mdelay(pd->power_on_delay);
+ }
+
+ if (!pd->reset) {
+ dev_err(lcd->dev, "reset is NULL.\n");
+ return -EFAULT;
+ } else {
+ pd->reset(lcd->ld);
+ mdelay(pd->reset_delay);
+ }
+
+ ret = ams369fg06_ldi_init(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "failed to initialize ldi.\n");
+ return ret;
+ }
+
+ ret = ams369fg06_ldi_enable(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "failed to enable ldi.\n");
+ return ret;
+ }
+
+ /* set brightness to current value after power on or resume. */
+ ret = ams369fg06_gamma_ctl(lcd, bd->props.brightness);
+ if (ret) {
+ dev_err(lcd->dev, "lcd gamma setting failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ams369fg06_power_off(struct ams369fg06 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd = NULL;
+
+ pd = lcd->lcd_pd;
+ if (!pd) {
+ dev_err(lcd->dev, "platform data is NULL\n");
+ return -EFAULT;
+ }
+
+ ret = ams369fg06_ldi_disable(lcd);
+ if (ret) {
+ dev_err(lcd->dev, "lcd setting failed.\n");
+ return -EIO;
+ }
+
+ mdelay(pd->power_off_delay);
+
+ if (!pd->power_on) {
+ dev_err(lcd->dev, "power_on is NULL.\n");
+ return -EFAULT;
+ } else
+ pd->power_on(lcd->ld, 0);
+
+ return 0;
+}
+
+static int ams369fg06_power(struct ams369fg06 *lcd, int power)
+{
+ int ret = 0;
+
+ if (ams369fg06_power_is_on(power) &&
+ !ams369fg06_power_is_on(lcd->power))
+ ret = ams369fg06_power_on(lcd);
+ else if (!ams369fg06_power_is_on(power) &&
+ ams369fg06_power_is_on(lcd->power))
+ ret = ams369fg06_power_off(lcd);
+
+ if (!ret)
+ lcd->power = power;
+
+ return ret;
+}
+
+static int ams369fg06_get_power(struct lcd_device *ld)
+{
+ struct ams369fg06 *lcd = lcd_get_data(ld);
+
+ return lcd->power;
+}
+
+static int ams369fg06_set_power(struct lcd_device *ld, int power)
+{
+ struct ams369fg06 *lcd = lcd_get_data(ld);
+
+ if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
+ power != FB_BLANK_NORMAL) {
+ dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
+ return -EINVAL;
+ }
+
+ return ams369fg06_power(lcd, power);
+}
+
+static int ams369fg06_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+static int ams369fg06_set_brightness(struct backlight_device *bd)
+{
+ int ret = 0;
+ int brightness = bd->props.brightness;
+ struct ams369fg06 *lcd = dev_get_drvdata(&bd->dev);
+
+ if (brightness < MIN_BRIGHTNESS ||
+ brightness > bd->props.max_brightness) {
+ dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
+ MIN_BRIGHTNESS, MAX_BRIGHTNESS);
+ return -EINVAL;
+ }
+
+ ret = ams369fg06_gamma_ctl(lcd, bd->props.brightness);
+ if (ret) {
+ dev_err(&bd->dev, "lcd brightness setting failed.\n");
+ return -EIO;
+ }
+
+ return ret;
+}
+
+static struct lcd_ops ams369fg06_lcd_ops = {
+ .get_power = ams369fg06_get_power,
+ .set_power = ams369fg06_set_power,
+};
+
+static const struct backlight_ops ams369fg06_backlight_ops = {
+ .get_brightness = ams369fg06_get_brightness,
+ .update_status = ams369fg06_set_brightness,
+};
+
+static int __devinit ams369fg06_probe(struct spi_device *spi)
+{
+ int ret = 0;
+ struct ams369fg06 *lcd = NULL;
+ struct lcd_device *ld = NULL;
+ struct backlight_device *bd = NULL;
+ struct backlight_properties props;
+
+ lcd = kzalloc(sizeof(struct ams369fg06), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ /* ams369fg06 lcd panel uses 3-wire 16bits SPI Mode. */
+ spi->bits_per_word = 16;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi setup failed.\n");
+ goto out_free_lcd;
+ }
+
+ lcd->spi = spi;
+ lcd->dev = &spi->dev;
+
+ lcd->lcd_pd = spi->dev.platform_data;
+ if (!lcd->lcd_pd) {
+ dev_err(&spi->dev, "platform data is NULL\n");
+ goto out_free_lcd;
+ }
+
+ ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
+ &ams369fg06_lcd_ops);
+ if (IS_ERR(ld)) {
+ ret = PTR_ERR(ld);
+ goto out_free_lcd;
+ }
+
+ lcd->ld = ld;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = MAX_BRIGHTNESS;
+
+ bd = backlight_device_register("ams369fg06-bl", &spi->dev, lcd,
+ &ams369fg06_backlight_ops, &props);
+ if (IS_ERR(bd)) {
+ ret = PTR_ERR(bd);
+ goto out_lcd_unregister;
+ }
+
+ bd->props.brightness = DEFAULT_BRIGHTNESS;
+ lcd->bd = bd;
+
+ if (!lcd->lcd_pd->lcd_enabled) {
+ /*
+ * if lcd panel was off from bootloader then
+ * current lcd status is powerdown and then
+ * it enables lcd panel.
+ */
+ lcd->power = FB_BLANK_POWERDOWN;
+
+ ams369fg06_power(lcd, FB_BLANK_UNBLANK);
+ } else
+ lcd->power = FB_BLANK_UNBLANK;
+
+ dev_set_drvdata(&spi->dev, lcd);
+
+ dev_info(&spi->dev, "ams369fg06 panel driver has been probed.\n");
+
+ return 0;
+
+out_lcd_unregister:
+ lcd_device_unregister(ld);
+out_free_lcd:
+ kfree(lcd);
+ return ret;
+}
+
+static int __devexit ams369fg06_remove(struct spi_device *spi)
+{
+ struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+
+ ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
+ backlight_device_unregister(lcd->bd);
+ lcd_device_unregister(lcd->ld);
+ kfree(lcd);
+
+ return 0;
+}
+
+#if defined(CONFIG_PM)
+static unsigned int before_power;
+
+static int ams369fg06_suspend(struct spi_device *spi, pm_message_t mesg)
+{
+ int ret = 0;
+ struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+
+ dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power);
+
+ before_power = lcd->power;
+
+ /*
+ * when lcd panel is suspend, lcd panel becomes off
+ * regardless of status.
+ */
+ ret = ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
+
+ return ret;
+}
+
+static int ams369fg06_resume(struct spi_device *spi)
+{
+ int ret = 0;
+ struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+
+ /*
+ * after suspended, if lcd panel status is FB_BLANK_UNBLANK
+ * (at that time, before_power is FB_BLANK_UNBLANK) then
+ * it changes that status to FB_BLANK_POWERDOWN to get lcd on.
+ */
+ if (before_power == FB_BLANK_UNBLANK)
+ lcd->power = FB_BLANK_POWERDOWN;
+
+ dev_dbg(&spi->dev, "before_power = %d\n", before_power);
+
+ ret = ams369fg06_power(lcd, before_power);
+
+ return ret;
+}
+#else
+#define ams369fg06_suspend NULL
+#define ams369fg06_resume NULL
+#endif
+
+static void ams369fg06_shutdown(struct spi_device *spi)
+{
+ struct ams369fg06 *lcd = dev_get_drvdata(&spi->dev);
+
+ ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
+}
+
+static struct spi_driver ams369fg06_driver = {
+ .driver = {
+ .name = "ams369fg06",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ams369fg06_probe,
+ .remove = __devexit_p(ams369fg06_remove),
+ .shutdown = ams369fg06_shutdown,
+ .suspend = ams369fg06_suspend,
+ .resume = ams369fg06_resume,
+};
+
+static int __init ams369fg06_init(void)
+{
+ return spi_register_driver(&ams369fg06_driver);
+}
+
+static void __exit ams369fg06_exit(void)
+{
+ spi_unregister_driver(&ams369fg06_driver);
+}
+
+module_init(ams369fg06_init);
+module_exit(ams369fg06_exit);
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("ams369fg06 LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index 7281b2506a6..5934655eb1f 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -668,6 +668,7 @@ static int ld9040_probe(struct spi_device *spi)
struct ld9040 *lcd = NULL;
struct lcd_device *ld = NULL;
struct backlight_device *bd = NULL;
+ struct backlight_properties props;
lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL);
if (!lcd)
@@ -699,14 +700,17 @@ static int ld9040_probe(struct spi_device *spi)
lcd->ld = ld;
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = MAX_BRIGHTNESS;
+
bd = backlight_device_register("ld9040-bl", &spi->dev,
- lcd, &ld9040_backlight_ops, NULL);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_lcd;
+ lcd, &ld9040_backlight_ops, &props);
+ if (IS_ERR(bd)) {
+ ret = PTR_ERR(bd);
+ goto out_unregister_lcd;
}
- bd->props.max_brightness = MAX_BRIGHTNESS;
bd->props.brightness = MAX_BRIGHTNESS;
lcd->bd = bd;
@@ -731,6 +735,8 @@ static int ld9040_probe(struct spi_device *spi)
dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
return 0;
+out_unregister_lcd:
+ lcd_device_unregister(lcd->ld);
out_free_lcd:
kfree(lcd);
return ret;
@@ -741,6 +747,7 @@ static int __devexit ld9040_remove(struct spi_device *spi)
struct ld9040 *lcd = dev_get_drvdata(&spi->dev);
ld9040_power(lcd, FB_BLANK_POWERDOWN);
+ backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
kfree(lcd);
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index 322040f686c..694e5aab0d6 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -738,6 +738,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
struct s6e63m0 *lcd = NULL;
struct lcd_device *ld = NULL;
struct backlight_device *bd = NULL;
+ struct backlight_properties props;
lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL);
if (!lcd)
@@ -769,16 +770,18 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
lcd->ld = ld;
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = MAX_BRIGHTNESS;
+
bd = backlight_device_register("s6e63m0bl-bl", &spi->dev, lcd,
- &s6e63m0_backlight_ops, NULL);
+ &s6e63m0_backlight_ops, &props);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
goto out_lcd_unregister;
}
- bd->props.max_brightness = MAX_BRIGHTNESS;
bd->props.brightness = MAX_BRIGHTNESS;
- bd->props.type = BACKLIGHT_RAW;
lcd->bd = bd;
/*
@@ -840,7 +843,7 @@ static int __devexit s6e63m0_remove(struct spi_device *spi)
}
#if defined(CONFIG_PM)
-unsigned int before_power;
+static unsigned int before_power;
static int s6e63m0_suspend(struct spi_device *spi, pm_message_t mesg)
{
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index 42fe155aba0..e02764319ff 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -303,7 +303,7 @@ static int __devinit cobalt_lcdfb_probe(struct platform_device *dev)
return -EBUSY;
}
- info->screen_size = res->end - res->start + 1;
+ info->screen_size = resource_size(res);
info->screen_base = ioremap(res->start, info->screen_size);
info->fbops = &cobalt_lcd_fbops;
info->fix = cobalt_lcdfb_fix;
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index c225dcce89e..9075bea5587 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -709,11 +709,11 @@ static int __init control_of_init(struct device_node *dp)
/* Map in frame buffer and registers */
p->fb_orig_base = fb_res.start;
- p->fb_orig_size = fb_res.end - fb_res.start + 1;
+ p->fb_orig_size = resource_size(&fb_res);
/* use the big-endian aperture (??) */
p->frame_buffer_phys = fb_res.start + 0x800000;
p->control_regs_phys = reg_res.start;
- p->control_regs_size = reg_res.end - reg_res.start + 1;
+ p->control_regs_size = resource_size(&reg_res);
if (!p->fb_orig_base ||
!request_mem_region(p->fb_orig_base,p->fb_orig_size,"controlfb")) {
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index cbdb1bd77c2..40e5f17d1e4 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -4,7 +4,7 @@
* Framebuffer support for the EP93xx series.
*
* Copyright (C) 2007 Bluewater Systems Ltd
- * Author: Ryan Mallon <ryan@bluewatersys.com>
+ * Author: Ryan Mallon
*
* Copyright (c) 2009 H Hartley Sweeten <hsweeten@visionengravers.com>
*
@@ -644,6 +644,6 @@ module_exit(ep93xxfb_exit);
MODULE_DESCRIPTION("EP93XX Framebuffer Driver");
MODULE_ALIAS("platform:ep93xx-fb");
-MODULE_AUTHOR("Ryan Mallon <ryan&bluewatersys.com>, "
+MODULE_AUTHOR("Ryan Mallon, "
"H Hartley Sweeten <hsweeten@visionengravers.com");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 804000183c5..32814e8800e 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -66,19 +66,26 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma,
return 0;
}
-int fb_deferred_io_fsync(struct file *file, int datasync)
+int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct fb_info *info = file->private_data;
+ struct inode *inode = file->f_path.dentry->d_inode;
+ int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (err)
+ return err;
/* Skip if deferred io is compiled-in but disabled on this fbdev */
if (!info->fbdefio)
return 0;
+ mutex_lock(&inode->i_mutex);
/* Kill off the delayed work */
cancel_delayed_work_sync(&info->deferred_work);
/* Run it immediately */
- return schedule_delayed_work(&info->deferred_work, 0);
+ err = schedule_delayed_work(&info->deferred_work, 0);
+ mutex_unlock(&inode->i_mutex);
+ return err;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index bedf5be27f0..0acc7d65aea 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -555,8 +555,6 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
static int fsl_diu_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- unsigned long htotal, vtotal;
-
pr_debug("check_var xres: %d\n", var->xres);
pr_debug("check_var yres: %d\n", var->yres);
@@ -635,20 +633,6 @@ static int fsl_diu_check_var(struct fb_var_screeninfo *var,
break;
}
- /* If the pixclock is below the minimum spec'd value then set to
- * refresh rate for 60Hz since this is supported by most monitors.
- * Refer to Documentation/fb/ for calculations.
- */
- if ((var->pixclock < MIN_PIX_CLK) || (var->pixclock > MAX_PIX_CLK)) {
- htotal = var->xres + var->right_margin + var->hsync_len +
- var->left_margin;
- vtotal = var->yres + var->lower_margin + var->vsync_len +
- var->upper_margin;
- var->pixclock = (vtotal * htotal * 6UL) / 100UL;
- var->pixclock = KHZ2PICOS(var->pixclock);
- pr_debug("pixclock set for 60Hz refresh = %u ps\n",
- var->pixclock);
- }
var->height = -1;
var->width = -1;
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
index c6b554f72c6..5a5d0928df3 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/geode/gx1fb_core.c
@@ -29,7 +29,7 @@ static int crt_option = 1;
static char panel_option[32] = "";
/* Modes relevant to the GX1 (taken from modedb.c) */
-static const struct fb_videomode __initdata gx1_modedb[] = {
+static const struct fb_videomode __devinitdata gx1_modedb[] = {
/* 640x480-60 VESA */
{ NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
@@ -195,7 +195,7 @@ static int gx1fb_blank(int blank_mode, struct fb_info *info)
return par->vid_ops->blank_display(info, blank_mode);
}
-static int __init gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
+static int __devinit gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
{
struct geodefb_par *par = info->par;
unsigned gx_base;
@@ -268,7 +268,7 @@ static struct fb_ops gx1fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
+static struct fb_info * __devinit gx1fb_init_fbinfo(struct device *dev)
{
struct geodefb_par *par;
struct fb_info *info;
@@ -318,7 +318,7 @@ static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
return info;
}
-static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int __devinit gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct geodefb_par *par;
struct fb_info *info;
@@ -382,7 +382,7 @@ static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *
return ret;
}
-static void gx1fb_remove(struct pci_dev *pdev)
+static void __devexit gx1fb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct geodefb_par *par = info->par;
@@ -441,7 +441,7 @@ static struct pci_driver gx1fb_driver = {
.name = "gx1fb",
.id_table = gx1fb_id_table,
.probe = gx1fb_probe,
- .remove = gx1fb_remove,
+ .remove = __devexit_p(gx1fb_remove),
};
static int __init gx1fb_init(void)
@@ -456,7 +456,7 @@ static int __init gx1fb_init(void)
return pci_register_driver(&gx1fb_driver);
}
-static void __exit gx1fb_cleanup(void)
+static void __devexit gx1fb_cleanup(void)
{
pci_unregister_driver(&gx1fb_driver);
}
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index fbef15f7a21..614251a9af9 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -233,7 +233,7 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
videomemory = vzalloc(videomemorysize);
if (!videomemory)
- return retval;
+ goto err_videomem_alloc;
info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
if (!info)
@@ -275,6 +275,7 @@ err_fbreg:
framebuffer_release(info);
err_fballoc:
vfree(videomemory);
+err_videomem_alloc:
module_put(board->owner);
return retval;
}
diff --git a/drivers/video/i810/i810.h b/drivers/video/i810/i810.h
index f37de60ecc5..1414b73ac55 100644
--- a/drivers/video/i810/i810.h
+++ b/drivers/video/i810/i810.h
@@ -137,7 +137,7 @@
#define DRAM_ON 0x08
#define DRAM_OFF 0xE7
#define PG_ENABLE_MASK 0x01
-#define RING_SIZE_MASK (RINGBUFFER_SIZE - 1);
+#define RING_SIZE_MASK (RINGBUFFER_SIZE - 1)
/* defines for restoring registers partially */
#define ADDR_MAP_MASK (0x07 << 5)
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index f70bd63b018..ee1de3e26de 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -697,7 +697,7 @@ static int __devinit of_platform_mb862xx_probe(struct platform_device *ofdev)
goto fbrel;
}
- res_size = 1 + res.end - res.start;
+ res_size = resource_size(&res);
par->res = request_mem_region(res.start, res_size, DRV_NAME);
if (par->res == NULL) {
dev_err(dev, "Cannot claim framebuffer/mmio\n");
@@ -787,7 +787,7 @@ static int __devexit of_platform_mb862xx_remove(struct platform_device *ofdev)
{
struct fb_info *fbi = dev_get_drvdata(&ofdev->dev);
struct mb862xxfb_par *par = fbi->par;
- resource_size_t res_size = 1 + par->res->end - par->res->start;
+ resource_size_t res_size = resource_size(par->res);
unsigned long reg;
dev_dbg(fbi->dev, "%s release\n", fbi->fix.id);
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index c3636d55a3c..243d16f09b8 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -406,8 +406,7 @@ int mdp_probe(struct platform_device *pdev)
goto error_get_irq;
}
- mdp->base = ioremap(resource->start,
- resource->end - resource->start);
+ mdp->base = ioremap(resource->start, resource_size(resource));
if (mdp->base == 0) {
printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n");
ret = -ENOMEM;
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index ec351309e60..c6e3b4fcdd6 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -525,10 +525,9 @@ static int setup_fbmem(struct msmfb_info *msmfb, struct platform_device *pdev)
return -ENOMEM;
}
fb->fix.smem_start = resource->start;
- fb->fix.smem_len = resource->end - resource->start;
- fbram = ioremap(resource->start,
- resource->end - resource->start);
- if (fbram == 0) {
+ fb->fix.smem_len = resource_size(resource);
+ fbram = ioremap(resource->start, resource_size(resource));
+ if (fbram == NULL) {
printk(KERN_ERR "msmfb: cannot allocate fbram!\n");
return -ENOMEM;
}
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index f838d9e277f..0fff59782e4 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -551,7 +551,7 @@ static int __devinit nuc900fb_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- size = (res->end - res->start) + 1;
+ size = resource_size(res);
fbi->mem = request_mem_region(res->start, size, pdev->name);
if (fbi->mem == NULL) {
dev_err(&pdev->dev, "failed to alloc memory region\n");
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index fdd5d4ae437..4e888ac09b3 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -504,14 +504,18 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev)
return 0;
r = omapdss_dsi_display_enable(dssdev);
- if (r)
- goto err;
+ if (r) {
+ dev_err(&dssdev->dev, "failed to enable DSI\n");
+ goto err1;
+ }
omapdss_dsi_vc_enable_hs(dssdev, td->channel, true);
r = _taal_enable_te(dssdev, true);
- if (r)
- goto err;
+ if (r) {
+ dev_err(&dssdev->dev, "failed to re-enable TE");
+ goto err2;
+ }
enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
@@ -521,13 +525,15 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev)
return 0;
-err:
- dev_err(&dssdev->dev, "exit ULPS failed");
- r = taal_panel_reset(dssdev);
-
- enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
- td->ulps_enabled = false;
+err2:
+ dev_err(&dssdev->dev, "failed to exit ULPS");
+ r = taal_panel_reset(dssdev);
+ if (!r) {
+ enable_irq(gpio_to_irq(panel_data->ext_te_gpio));
+ td->ulps_enabled = false;
+ }
+err1:
taal_queue_ulps_work(dssdev);
return r;
@@ -1241,11 +1247,8 @@ static void taal_power_off(struct omap_dss_device *dssdev)
int r;
r = taal_dcs_write_0(td, DCS_DISPLAY_OFF);
- if (!r) {
+ if (!r)
r = taal_sleep_in(td);
- /* HACK: wait a bit so that the message goes through */
- msleep(10);
- }
if (r) {
dev_err(&dssdev->dev,
@@ -1317,8 +1320,11 @@ static void taal_disable(struct omap_dss_device *dssdev)
dsi_bus_lock(dssdev);
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- taal_wake_up(dssdev);
- taal_power_off(dssdev);
+ int r;
+
+ r = taal_wake_up(dssdev);
+ if (!r)
+ taal_power_off(dssdev);
}
dsi_bus_unlock(dssdev);
@@ -1897,20 +1903,6 @@ err:
mutex_unlock(&td->lock);
}
-static int taal_set_update_mode(struct omap_dss_device *dssdev,
- enum omap_dss_update_mode mode)
-{
- if (mode != OMAP_DSS_UPDATE_MANUAL)
- return -EINVAL;
- return 0;
-}
-
-static enum omap_dss_update_mode taal_get_update_mode(
- struct omap_dss_device *dssdev)
-{
- return OMAP_DSS_UPDATE_MANUAL;
-}
-
static struct omap_dss_driver taal_driver = {
.probe = taal_probe,
.remove = __exit_p(taal_remove),
@@ -1920,9 +1912,6 @@ static struct omap_dss_driver taal_driver = {
.suspend = taal_suspend,
.resume = taal_resume,
- .set_update_mode = taal_set_update_mode,
- .get_update_mode = taal_get_update_mode,
-
.update = taal_update,
.sync = taal_sync,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 6b3e2da1141..0d12524db14 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -117,18 +117,6 @@ config OMAP2_DSS_MIN_FCK_PER_PCK
Max FCK is 173MHz, so this doesn't work if your PCK
is very high.
-config OMAP2_DSS_SLEEP_BEFORE_RESET
- bool "Sleep 50ms before DSS reset"
- default y
- help
- For some unknown reason we may get SYNC_LOST errors from the display
- subsystem at initialization time if we don't sleep before resetting
- the DSS. See the source (dss.c) for more comments.
-
- However, 50ms is quite long time to sleep, and with some
- configurations the SYNC_LOST may never happen, so the sleep can
- be disabled here.
-
config OMAP2_DSS_SLEEP_AFTER_VENC_RESET
bool "Sleep 20ms after VENC reset"
default y
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 3da426719dd..76821fefce9 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -183,8 +183,11 @@ static int omap_dss_probe(struct platform_device *pdev)
goto err_dss;
}
- /* keep clocks enabled to prevent context saves/restores during init */
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ r = dispc_init_platform_driver();
+ if (r) {
+ DSSERR("Failed to initialize dispc platform driver\n");
+ goto err_dispc;
+ }
r = rfbi_init_platform_driver();
if (r) {
@@ -192,12 +195,6 @@ static int omap_dss_probe(struct platform_device *pdev)
goto err_rfbi;
}
- r = dispc_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize dispc platform driver\n");
- goto err_dispc;
- }
-
r = venc_init_platform_driver();
if (r) {
DSSERR("Failed to initialize venc platform driver\n");
@@ -238,8 +235,6 @@ static int omap_dss_probe(struct platform_device *pdev)
pdata->default_device = dssdev;
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
-
return 0;
err_register:
@@ -268,11 +263,11 @@ static int omap_dss_remove(struct platform_device *pdev)
dss_uninitialize_debugfs();
+ hdmi_uninit_platform_driver();
+ dsi_uninit_platform_driver();
venc_uninit_platform_driver();
- dispc_uninit_platform_driver();
rfbi_uninit_platform_driver();
- dsi_uninit_platform_driver();
- hdmi_uninit_platform_driver();
+ dispc_uninit_platform_driver();
dss_uninit_platform_driver();
dss_uninit_overlays(pdev);
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 7a9a2e7d968..0f3961a1ce2 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -33,6 +33,8 @@
#include <linux/workqueue.h>
#include <linux/hardirq.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <plat/sram.h>
#include <plat/clock.h>
@@ -77,6 +79,12 @@ struct dispc_v_coef {
s8 vc00;
};
+enum omap_burst_size {
+ BURST_SIZE_X2 = 0,
+ BURST_SIZE_X4 = 1,
+ BURST_SIZE_X8 = 2,
+};
+
#define REG_GET(idx, start, end) \
FLD_GET(dispc_read_reg(idx), start, end)
@@ -92,7 +100,11 @@ struct dispc_irq_stats {
static struct {
struct platform_device *pdev;
void __iomem *base;
+
+ int ctx_loss_cnt;
+
int irq;
+ struct clk *dss_clk;
u32 fifo_size[3];
@@ -102,6 +114,7 @@ static struct {
u32 error_irqs;
struct work_struct error_work;
+ bool ctx_valid;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -134,18 +147,34 @@ static inline u32 dispc_read_reg(const u16 idx)
return __raw_readl(dispc.base + idx);
}
+static int dispc_get_ctx_loss_count(void)
+{
+ struct device *dev = &dispc.pdev->dev;
+ struct omap_display_platform_data *pdata = dev->platform_data;
+ struct omap_dss_board_info *board_data = pdata->board_data;
+ int cnt;
+
+ if (!board_data->get_context_loss_count)
+ return -ENOENT;
+
+ cnt = board_data->get_context_loss_count(dev);
+
+ WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
+
+ return cnt;
+}
+
#define SR(reg) \
dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
#define RR(reg) \
dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)])
-void dispc_save_context(void)
+static void dispc_save_context(void)
{
int i;
- if (cpu_is_omap24xx())
- return;
- SR(SYSCONFIG);
+ DSSDBG("dispc_save_context\n");
+
SR(IRQENABLE);
SR(CONTROL);
SR(CONFIG);
@@ -158,7 +187,8 @@ void dispc_save_context(void)
SR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
SR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
- SR(GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ SR(GLOBAL_ALPHA);
SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -188,20 +218,25 @@ void dispc_save_context(void)
SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
- SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
- SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ }
SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
}
- SR(OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(OMAP_DSS_GFX));
/* VID1 */
SR(OVL_BA0(OMAP_DSS_VIDEO1));
@@ -226,8 +261,10 @@ void dispc_save_context(void)
for (i = 0; i < 5; i++)
SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
SR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -248,7 +285,8 @@ void dispc_save_context(void)
if (dss_has_feature(FEAT_ATTR2))
SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
- SR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
/* VID2 */
SR(OVL_BA0(OMAP_DSS_VIDEO2));
@@ -273,8 +311,10 @@ void dispc_save_context(void)
for (i = 0; i < 5; i++)
SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
- for (i = 0; i < 8; i++)
- SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
SR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -295,16 +335,35 @@ void dispc_save_context(void)
if (dss_has_feature(FEAT_ATTR2))
SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
- SR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ if (dss_has_feature(FEAT_PRELOAD))
+ SR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
if (dss_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
+
+ dispc.ctx_loss_cnt = dispc_get_ctx_loss_count();
+ dispc.ctx_valid = true;
+
+ DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
}
-void dispc_restore_context(void)
+static void dispc_restore_context(void)
{
- int i;
- RR(SYSCONFIG);
+ int i, ctx;
+
+ DSSDBG("dispc_restore_context\n");
+
+ if (!dispc.ctx_valid)
+ return;
+
+ ctx = dispc_get_ctx_loss_count();
+
+ if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
+ return;
+
+ DSSDBG("ctx_loss_count: saved %d, current %d\n",
+ dispc.ctx_loss_cnt, ctx);
+
/*RR(IRQENABLE);*/
/*RR(CONTROL);*/
RR(CONFIG);
@@ -317,7 +376,8 @@ void dispc_restore_context(void)
RR(TIMING_V(OMAP_DSS_CHANNEL_LCD));
RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD));
RR(DIVISORo(OMAP_DSS_CHANNEL_LCD));
- RR(GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ RR(GLOBAL_ALPHA);
RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -347,20 +407,25 @@ void dispc_restore_context(void)
RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ }
}
- RR(OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(OMAP_DSS_GFX));
/* VID1 */
RR(OVL_BA0(OMAP_DSS_VIDEO1));
@@ -385,8 +450,10 @@ void dispc_restore_context(void)
for (i = 0; i < 5; i++)
RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
RR(OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -407,7 +474,8 @@ void dispc_restore_context(void)
if (dss_has_feature(FEAT_ATTR2))
RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1));
- RR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(OMAP_DSS_VIDEO1));
/* VID2 */
RR(OVL_BA0(OMAP_DSS_VIDEO2));
@@ -432,8 +500,10 @@ void dispc_restore_context(void)
for (i = 0; i < 5; i++)
RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i));
- for (i = 0; i < 8; i++)
- RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ for (i = 0; i < 8; i++)
+ RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
RR(OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -454,7 +524,8 @@ void dispc_restore_context(void)
if (dss_has_feature(FEAT_ATTR2))
RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
- RR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ if (dss_has_feature(FEAT_PRELOAD))
+ RR(OVL_PRELOAD(OMAP_DSS_VIDEO2));
if (dss_has_feature(FEAT_CORE_CLK_DIV))
RR(DIVISOR);
@@ -471,19 +542,35 @@ void dispc_restore_context(void)
* the context is fully restored
*/
RR(IRQENABLE);
+
+ DSSDBG("context restored\n");
}
#undef SR
#undef RR
-static inline void enable_clocks(bool enable)
+int dispc_runtime_get(void)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int r;
+
+ DSSDBG("dispc_runtime_get\n");
+
+ r = pm_runtime_get_sync(&dispc.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
}
+void dispc_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("dispc_runtime_put\n");
+
+ r = pm_runtime_put(&dispc.pdev->dev);
+ WARN_ON(r < 0);
+}
+
+
bool dispc_go_busy(enum omap_channel channel)
{
int bit;
@@ -505,8 +592,6 @@ void dispc_go(enum omap_channel channel)
int bit;
bool enable_bit, go_bit;
- enable_clocks(1);
-
if (channel == OMAP_DSS_CHANNEL_LCD ||
channel == OMAP_DSS_CHANNEL_LCD2)
bit = 0; /* LCDENABLE */
@@ -520,7 +605,7 @@ void dispc_go(enum omap_channel channel)
enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
if (!enable_bit)
- goto end;
+ return;
if (channel == OMAP_DSS_CHANNEL_LCD ||
channel == OMAP_DSS_CHANNEL_LCD2)
@@ -535,7 +620,7 @@ void dispc_go(enum omap_channel channel)
if (go_bit) {
DSSERR("GO bit not down for channel %d\n", channel);
- goto end;
+ return;
}
DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" :
@@ -545,8 +630,6 @@ void dispc_go(enum omap_channel channel)
REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit);
else
REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
-end:
- enable_clocks(0);
}
static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
@@ -920,7 +1003,7 @@ static void _dispc_set_color_mode(enum omap_plane plane,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
-static void _dispc_set_channel_out(enum omap_plane plane,
+void dispc_set_channel_out(enum omap_plane plane,
enum omap_channel channel)
{
int shift;
@@ -967,13 +1050,10 @@ static void _dispc_set_channel_out(enum omap_plane plane,
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
}
-void dispc_set_burst_size(enum omap_plane plane,
+static void dispc_set_burst_size(enum omap_plane plane,
enum omap_burst_size burst_size)
{
int shift;
- u32 val;
-
- enable_clocks(1);
switch (plane) {
case OMAP_DSS_GFX:
@@ -988,11 +1068,24 @@ void dispc_set_burst_size(enum omap_plane plane,
return;
}
- val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
- val = FLD_MOD(val, burst_size, shift+1, shift);
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), burst_size, shift + 1, shift);
+}
- enable_clocks(0);
+static void dispc_configure_burst_sizes(void)
+{
+ int i;
+ const int burst_size = BURST_SIZE_X8;
+
+ /* Configure burst size always to maximum size */
+ for (i = 0; i < omap_dss_get_num_overlays(); ++i)
+ dispc_set_burst_size(i, burst_size);
+}
+
+u32 dispc_get_burst_size(enum omap_plane plane)
+{
+ unsigned unit = dss_feat_get_burst_size_unit();
+ /* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */
+ return unit * 8;
}
void dispc_enable_gamma_table(bool enable)
@@ -1009,6 +1102,40 @@ void dispc_enable_gamma_table(bool enable)
REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
}
+void dispc_enable_cpr(enum omap_channel channel, bool enable)
+{
+ u16 reg;
+
+ if (channel == OMAP_DSS_CHANNEL_LCD)
+ reg = DISPC_CONFIG;
+ else if (channel == OMAP_DSS_CHANNEL_LCD2)
+ reg = DISPC_CONFIG2;
+ else
+ return;
+
+ REG_FLD_MOD(reg, enable, 15, 15);
+}
+
+void dispc_set_cpr_coef(enum omap_channel channel,
+ struct omap_dss_cpr_coefs *coefs)
+{
+ u32 coef_r, coef_g, coef_b;
+
+ if (channel != OMAP_DSS_CHANNEL_LCD && channel != OMAP_DSS_CHANNEL_LCD2)
+ return;
+
+ coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) |
+ FLD_VAL(coefs->rb, 9, 0);
+ coef_g = FLD_VAL(coefs->gr, 31, 22) | FLD_VAL(coefs->gg, 20, 11) |
+ FLD_VAL(coefs->gb, 9, 0);
+ coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) |
+ FLD_VAL(coefs->bb, 9, 0);
+
+ dispc_write_reg(DISPC_CPR_COEF_R(channel), coef_r);
+ dispc_write_reg(DISPC_CPR_COEF_G(channel), coef_g);
+ dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b);
+}
+
static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
{
u32 val;
@@ -1029,9 +1156,7 @@ void dispc_enable_replication(enum omap_plane plane, bool enable)
else
bit = 10;
- enable_clocks(1);
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
- enable_clocks(0);
}
void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
@@ -1039,9 +1164,7 @@ void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_SIZE_MGR(channel), val);
- enable_clocks(0);
}
void dispc_set_digit_size(u16 width, u16 height)
@@ -1049,9 +1172,7 @@ void dispc_set_digit_size(u16 width, u16 height)
u32 val;
BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
- enable_clocks(0);
}
static void dispc_read_plane_fifo_sizes(void)
@@ -1059,18 +1180,17 @@ static void dispc_read_plane_fifo_sizes(void)
u32 size;
int plane;
u8 start, end;
+ u32 unit;
- enable_clocks(1);
+ unit = dss_feat_get_buffer_size_unit();
dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
- size = FLD_GET(dispc_read_reg(DISPC_OVL_FIFO_SIZE_STATUS(plane)),
- start, end);
+ size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(plane), start, end);
+ size *= unit;
dispc.fifo_size[plane] = size;
}
-
- enable_clocks(0);
}
u32 dispc_get_plane_fifo_size(enum omap_plane plane)
@@ -1078,15 +1198,22 @@ u32 dispc_get_plane_fifo_size(enum omap_plane plane)
return dispc.fifo_size[plane];
}
-void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
+void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high)
{
u8 hi_start, hi_end, lo_start, lo_end;
+ u32 unit;
+
+ unit = dss_feat_get_buffer_size_unit();
+
+ WARN_ON(low % unit != 0);
+ WARN_ON(high % unit != 0);
+
+ low /= unit;
+ high /= unit;
dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
- enable_clocks(1);
-
DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
plane,
REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
@@ -1098,18 +1225,12 @@ void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
FLD_VAL(high, hi_start, hi_end) |
FLD_VAL(low, lo_start, lo_end));
-
- enable_clocks(0);
}
void dispc_enable_fifomerge(bool enable)
{
- enable_clocks(1);
-
DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
-
- enable_clocks(0);
}
static void _dispc_set_fir(enum omap_plane plane,
@@ -1729,14 +1850,7 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
return dispc_pclk_rate(channel) * vf * hf;
}
-void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
-{
- enable_clocks(1);
- _dispc_set_channel_out(plane, channel_out);
- enable_clocks(0);
-}
-
-static int _dispc_setup_plane(enum omap_plane plane,
+int dispc_setup_plane(enum omap_plane plane,
u32 paddr, u16 screen_width,
u16 pos_x, u16 pos_y,
u16 width, u16 height,
@@ -1744,7 +1858,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
enum omap_color_mode color_mode,
bool ilace,
enum omap_dss_rotation_type rotation_type,
- u8 rotation, int mirror,
+ u8 rotation, bool mirror,
u8 global_alpha, u8 pre_mult_alpha,
enum omap_channel channel, u32 puv_addr)
{
@@ -1758,6 +1872,14 @@ static int _dispc_setup_plane(enum omap_plane plane,
u16 frame_height = height;
unsigned int field_offset = 0;
+ DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> "
+ "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
+ plane, paddr, screen_width, pos_x, pos_y,
+ width, height,
+ out_width, out_height,
+ ilace, color_mode,
+ rotation, mirror, channel);
+
if (paddr == 0)
return -EINVAL;
@@ -1903,9 +2025,13 @@ static int _dispc_setup_plane(enum omap_plane plane,
return 0;
}
-static void _dispc_enable_plane(enum omap_plane plane, bool enable)
+int dispc_enable_plane(enum omap_plane plane, bool enable)
{
+ DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
+
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
+
+ return 0;
}
static void dispc_disable_isr(void *data, u32 mask)
@@ -1929,8 +2055,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
int r;
u32 irq;
- enable_clocks(1);
-
/* When we disable LCD output, we need to wait until frame is done.
* Otherwise the DSS is still working, and turning off the clocks
* prevents DSS from going to OFF mode */
@@ -1964,8 +2088,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable)
if (r)
DSSERR("failed to unregister FRAMEDONE isr\n");
}
-
- enable_clocks(0);
}
static void _enable_digit_out(bool enable)
@@ -1978,12 +2100,8 @@ static void dispc_enable_digit_out(bool enable)
struct completion frame_done_completion;
int r;
- enable_clocks(1);
-
- if (REG_GET(DISPC_CONTROL, 1, 1) == enable) {
- enable_clocks(0);
+ if (REG_GET(DISPC_CONTROL, 1, 1) == enable)
return;
- }
if (enable) {
unsigned long flags;
@@ -2035,8 +2153,6 @@ static void dispc_enable_digit_out(bool enable)
_omap_dispc_set_irqs();
spin_unlock_irqrestore(&dispc.irq_lock, flags);
}
-
- enable_clocks(0);
}
bool dispc_is_channel_enabled(enum omap_channel channel)
@@ -2067,9 +2183,7 @@ void dispc_lcd_enable_signal_polarity(bool act_high)
if (!dss_has_feature(FEAT_LCDENABLEPOL))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
- enable_clocks(0);
}
void dispc_lcd_enable_signal(bool enable)
@@ -2077,9 +2191,7 @@ void dispc_lcd_enable_signal(bool enable)
if (!dss_has_feature(FEAT_LCDENABLESIGNAL))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
- enable_clocks(0);
}
void dispc_pck_free_enable(bool enable)
@@ -2087,19 +2199,15 @@ void dispc_pck_free_enable(bool enable)
if (!dss_has_feature(FEAT_PCKFREEENABLE))
return;
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
- enable_clocks(0);
}
void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16);
else
REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
- enable_clocks(0);
}
@@ -2122,27 +2230,21 @@ void dispc_set_lcd_display_type(enum omap_channel channel,
return;
}
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3);
else
REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
- enable_clocks(0);
}
void dispc_set_loadmode(enum omap_dss_load_mode mode)
{
- enable_clocks(1);
REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1);
- enable_clocks(0);
}
void dispc_set_default_color(enum omap_channel channel, u32 color)
{
- enable_clocks(1);
dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
- enable_clocks(0);
}
u32 dispc_get_default_color(enum omap_channel channel)
@@ -2153,9 +2255,7 @@ u32 dispc_get_default_color(enum omap_channel channel)
channel != OMAP_DSS_CHANNEL_LCD &&
channel != OMAP_DSS_CHANNEL_LCD2);
- enable_clocks(1);
l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel));
- enable_clocks(0);
return l;
}
@@ -2164,7 +2264,6 @@ void dispc_set_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type type,
u32 trans_key)
{
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2173,14 +2272,12 @@ void dispc_set_trans_key(enum omap_channel ch,
REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11);
dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
- enable_clocks(0);
}
void dispc_get_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type *type,
u32 *trans_key)
{
- enable_clocks(1);
if (type) {
if (ch == OMAP_DSS_CHANNEL_LCD)
*type = REG_GET(DISPC_CONFIG, 11, 11);
@@ -2194,33 +2291,28 @@ void dispc_get_trans_key(enum omap_channel ch,
if (trans_key)
*trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch));
- enable_clocks(0);
}
void dispc_enable_trans_key(enum omap_channel ch, bool enable)
{
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
else /* OMAP_DSS_CHANNEL_LCD2 */
REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
- enable_clocks(0);
}
void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
{
if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return;
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
else /* OMAP_DSS_CHANNEL_LCD2 */
REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18);
- enable_clocks(0);
}
bool dispc_alpha_blending_enabled(enum omap_channel ch)
{
@@ -2229,7 +2321,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch)
if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
return false;
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
enabled = REG_GET(DISPC_CONFIG, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2238,7 +2329,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch)
enabled = REG_GET(DISPC_CONFIG2, 18, 18);
else
BUG();
- enable_clocks(0);
return enabled;
}
@@ -2248,7 +2338,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
{
bool enabled;
- enable_clocks(1);
if (ch == OMAP_DSS_CHANNEL_LCD)
enabled = REG_GET(DISPC_CONFIG, 10, 10);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
@@ -2257,7 +2346,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch)
enabled = REG_GET(DISPC_CONFIG2, 10, 10);
else
BUG();
- enable_clocks(0);
return enabled;
}
@@ -2285,12 +2373,10 @@ void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
return;
}
- enable_clocks(1);
if (channel == OMAP_DSS_CHANNEL_LCD2)
REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8);
else
REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
- enable_clocks(0);
}
void dispc_set_parallel_interface_mode(enum omap_channel channel,
@@ -2322,8 +2408,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel,
return;
}
- enable_clocks(1);
-
if (channel == OMAP_DSS_CHANNEL_LCD2) {
l = dispc_read_reg(DISPC_CONTROL2);
l = FLD_MOD(l, stallmode, 11, 11);
@@ -2335,8 +2419,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel,
l = FLD_MOD(l, gpout1, 16, 16);
dispc_write_reg(DISPC_CONTROL, l);
}
-
- enable_clocks(0);
}
static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
@@ -2389,10 +2471,8 @@ static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw,
FLD_VAL(vbp, 31, 20);
}
- enable_clocks(1);
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
- enable_clocks(0);
}
/* change name to mode? */
@@ -2435,10 +2515,8 @@ static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
BUG_ON(lck_div < 1);
BUG_ON(pck_div < 2);
- enable_clocks(1);
dispc_write_reg(DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
- enable_clocks(0);
}
static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div,
@@ -2457,7 +2535,7 @@ unsigned long dispc_fclk_rate(void)
switch (dss_get_dispc_clk_source()) {
case OMAP_DSS_CLK_SRC_FCK:
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dispc.dss_clk);
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
@@ -2487,7 +2565,7 @@ unsigned long dispc_lclk_rate(enum omap_channel channel)
switch (dss_get_lcd_clk_source(channel)) {
case OMAP_DSS_CLK_SRC_FCK:
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dispc.dss_clk);
break;
case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
dsidev = dsi_get_dsidev_from_id(0);
@@ -2526,7 +2604,8 @@ void dispc_dump_clocks(struct seq_file *s)
enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
enum omap_dss_clk_source lcd_clk_src;
- enable_clocks(1);
+ if (dispc_runtime_get())
+ return;
seq_printf(s, "- DISPC -\n");
@@ -2574,7 +2653,8 @@ void dispc_dump_clocks(struct seq_file *s)
seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
}
- enable_clocks(0);
+
+ dispc_runtime_put();
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -2629,7 +2709,8 @@ void dispc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dispc_runtime_get())
+ return;
DUMPREG(DISPC_REVISION);
DUMPREG(DISPC_SYSCONFIG);
@@ -2649,7 +2730,8 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_GLOBAL_ALPHA);
+ if (dss_has_feature(FEAT_GLOBAL_ALPHA))
+ DUMPREG(DISPC_GLOBAL_ALPHA);
DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT));
DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD));
if (dss_has_feature(FEAT_MGR_LCD2)) {
@@ -2680,20 +2762,25 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD));
DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
- DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ if (dss_has_feature(FEAT_CPR)) {
+ DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD));
+ DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD));
+ DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD));
+ }
if (dss_has_feature(FEAT_MGR_LCD2)) {
DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2));
DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2));
DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
- DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ if (dss_has_feature(FEAT_CPR)) {
+ DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2));
+ DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2));
+ DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2));
+ }
}
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
+ if (dss_has_feature(FEAT_PRELOAD))
+ DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX));
DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1));
DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1));
@@ -2744,14 +2831,16 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7));
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1));
@@ -2812,14 +2901,17 @@ void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3));
DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6));
- DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7));
+
+ if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6));
+ DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7));
+ }
if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2));
@@ -2858,10 +2950,12 @@ void dispc_dump_regs(struct seq_file *s)
if (dss_has_feature(FEAT_ATTR2))
DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2));
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1));
- DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ if (dss_has_feature(FEAT_PRELOAD)) {
+ DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1));
+ DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2));
+ }
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
#undef DUMPREG
}
@@ -2882,9 +2976,7 @@ static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf,
l |= FLD_VAL(acbi, 11, 8);
l |= FLD_VAL(acb, 7, 0);
- enable_clocks(1);
dispc_write_reg(DISPC_POL_FREQ(channel), l);
- enable_clocks(0);
}
void dispc_set_pol_freq(enum omap_channel channel,
@@ -3005,15 +3097,11 @@ static void _omap_dispc_set_irqs(void)
mask |= isr_data->mask;
}
- enable_clocks(1);
-
old_mask = dispc_read_reg(DISPC_IRQENABLE);
/* clear the irqstatus for newly enabled irqs */
dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
dispc_write_reg(DISPC_IRQENABLE, mask);
-
- enable_clocks(0);
}
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
@@ -3522,13 +3610,6 @@ static void _omap_dispc_initial_config(void)
{
u32 l;
- l = dispc_read_reg(DISPC_SYSCONFIG);
- l = FLD_MOD(l, 2, 13, 12); /* MIDLEMODE: smart standby */
- l = FLD_MOD(l, 2, 4, 3); /* SIDLEMODE: smart idle */
- l = FLD_MOD(l, 1, 2, 2); /* ENWAKEUP */
- l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */
- dispc_write_reg(DISPC_SYSCONFIG, l);
-
/* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
l = dispc_read_reg(DISPC_DIVISOR);
@@ -3552,58 +3633,8 @@ static void _omap_dispc_initial_config(void)
dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
dispc_read_plane_fifo_sizes();
-}
-int dispc_enable_plane(enum omap_plane plane, bool enable)
-{
- DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
-
- enable_clocks(1);
- _dispc_enable_plane(plane, enable);
- enable_clocks(0);
-
- return 0;
-}
-
-int dispc_setup_plane(enum omap_plane plane,
- u32 paddr, u16 screen_width,
- u16 pos_x, u16 pos_y,
- u16 width, u16 height,
- u16 out_width, u16 out_height,
- enum omap_color_mode color_mode,
- bool ilace,
- enum omap_dss_rotation_type rotation_type,
- u8 rotation, bool mirror, u8 global_alpha,
- u8 pre_mult_alpha, enum omap_channel channel,
- u32 puv_addr)
-{
- int r = 0;
-
- DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d, %d, %dx%d -> "
- "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n",
- plane, paddr, screen_width, pos_x, pos_y,
- width, height,
- out_width, out_height,
- ilace, color_mode,
- rotation, mirror, channel);
-
- enable_clocks(1);
-
- r = _dispc_setup_plane(plane,
- paddr, screen_width,
- pos_x, pos_y,
- width, height,
- out_width, out_height,
- color_mode, ilace,
- rotation_type,
- rotation, mirror,
- global_alpha,
- pre_mult_alpha,
- channel, puv_addr);
-
- enable_clocks(0);
-
- return r;
+ dispc_configure_burst_sizes();
}
/* DISPC HW IP initialisation */
@@ -3612,9 +3643,19 @@ static int omap_dispchw_probe(struct platform_device *pdev)
u32 rev;
int r = 0;
struct resource *dispc_mem;
+ struct clk *clk;
dispc.pdev = pdev;
+ clk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get fck\n");
+ r = PTR_ERR(clk);
+ goto err_get_clk;
+ }
+
+ dispc.dss_clk = clk;
+
spin_lock_init(&dispc.irq_lock);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -3628,62 +3669,103 @@ static int omap_dispchw_probe(struct platform_device *pdev)
if (!dispc_mem) {
DSSERR("can't get IORESOURCE_MEM DISPC\n");
r = -EINVAL;
- goto fail0;
+ goto err_ioremap;
}
dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem));
if (!dispc.base) {
DSSERR("can't ioremap DISPC\n");
r = -ENOMEM;
- goto fail0;
+ goto err_ioremap;
}
dispc.irq = platform_get_irq(dispc.pdev, 0);
if (dispc.irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto fail1;
+ goto err_irq;
}
r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED,
"OMAP DISPC", dispc.pdev);
if (r < 0) {
DSSERR("request_irq failed\n");
- goto fail1;
+ goto err_irq;
}
- enable_clocks(1);
+ pm_runtime_enable(&pdev->dev);
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_runtime_get;
_omap_dispc_initial_config();
_omap_dispc_initialize_irq();
- dispc_save_context();
-
rev = dispc_read_reg(DISPC_REVISION);
dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- enable_clocks(0);
+ dispc_runtime_put();
return 0;
-fail1:
+
+err_runtime_get:
+ pm_runtime_disable(&pdev->dev);
+ free_irq(dispc.irq, dispc.pdev);
+err_irq:
iounmap(dispc.base);
-fail0:
+err_ioremap:
+ clk_put(dispc.dss_clk);
+err_get_clk:
return r;
}
static int omap_dispchw_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
+
+ clk_put(dispc.dss_clk);
+
free_irq(dispc.irq, dispc.pdev);
iounmap(dispc.base);
return 0;
}
+static int dispc_runtime_suspend(struct device *dev)
+{
+ dispc_save_context();
+ clk_disable(dispc.dss_clk);
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int dispc_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ return r;
+
+ clk_enable(dispc.dss_clk);
+ dispc_restore_context();
+
+ return 0;
+}
+
+static const struct dev_pm_ops dispc_pm_ops = {
+ .runtime_suspend = dispc_runtime_suspend,
+ .runtime_resume = dispc_runtime_resume,
+};
+
static struct platform_driver omap_dispchw_driver = {
.probe = omap_dispchw_probe,
.remove = omap_dispchw_remove,
.driver = {
.name = "omapdss_dispc",
.owner = THIS_MODULE,
+ .pm = &dispc_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index c2dfc8c5005..94495e45ec5 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -29,6 +29,7 @@
#include <video/omapdss.h>
#include "dss.h"
+#include "dss_features.h"
static ssize_t display_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -65,48 +66,6 @@ static ssize_t display_enabled_store(struct device *dev,
return size;
}
-static ssize_t display_upd_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- enum omap_dss_update_mode mode = OMAP_DSS_UPDATE_AUTO;
- if (dssdev->driver->get_update_mode)
- mode = dssdev->driver->get_update_mode(dssdev);
- return snprintf(buf, PAGE_SIZE, "%d\n", mode);
-}
-
-static ssize_t display_upd_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- int val, r;
- enum omap_dss_update_mode mode;
-
- if (!dssdev->driver->set_update_mode)
- return -EINVAL;
-
- r = kstrtoint(buf, 0, &val);
- if (r)
- return r;
-
- switch (val) {
- case OMAP_DSS_UPDATE_DISABLED:
- case OMAP_DSS_UPDATE_AUTO:
- case OMAP_DSS_UPDATE_MANUAL:
- mode = (enum omap_dss_update_mode)val;
- break;
- default:
- return -EINVAL;
- }
-
- r = dssdev->driver->set_update_mode(dssdev, mode);
- if (r)
- return r;
-
- return size;
-}
-
static ssize_t display_tear_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -294,8 +253,6 @@ static ssize_t display_wss_store(struct device *dev,
static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
display_enabled_show, display_enabled_store);
-static DEVICE_ATTR(update_mode, S_IRUGO|S_IWUSR,
- display_upd_mode_show, display_upd_mode_store);
static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
display_tear_show, display_tear_store);
static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
@@ -309,7 +266,6 @@ static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
static struct device_attribute *display_sysfs_attrs[] = {
&dev_attr_enabled,
- &dev_attr_update_mode,
&dev_attr_tear_elim,
&dev_attr_timings,
&dev_attr_rotate,
@@ -327,16 +283,13 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
EXPORT_SYMBOL(omapdss_default_get_resolution);
void default_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high)
{
- unsigned burst_size_bytes;
-
- *burst_size = OMAP_DSS_BURST_16x32;
- burst_size_bytes = 16 * 32 / 8;
+ unsigned buf_unit = dss_feat_get_buffer_size_unit();
- *fifo_high = fifo_size - 1;
- *fifo_low = fifo_size - burst_size_bytes;
+ *fifo_high = fifo_size - buf_unit;
+ *fifo_low = fifo_size - burst_size;
}
int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index ff6bd30132d..f053b180ecd 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -23,7 +23,6 @@
#define DSS_SUBSYS_NAME "DPI"
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -130,8 +129,6 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
bool is_tft;
int r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
-
dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
dssdev->panel.acbi, dssdev->panel.acb);
@@ -144,7 +141,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000,
&fck, &lck_div, &pck_div);
if (r)
- goto err0;
+ return r;
pck = fck / lck_div / pck_div / 1000;
@@ -158,12 +155,10 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
dispc_set_lcd_timings(dssdev->manager->id, t);
-err0:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
- return r;
+ return 0;
}
-static int dpi_basic_init(struct omap_dss_device *dssdev)
+static void dpi_basic_init(struct omap_dss_device *dssdev)
{
bool is_tft;
@@ -175,8 +170,6 @@ static int dpi_basic_init(struct omap_dss_device *dssdev)
OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
dispc_set_tft_data_lines(dssdev->manager->id,
dssdev->phy.dpi.data_lines);
-
- return 0;
}
int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
@@ -186,31 +179,38 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
if (cpu_is_omap34xx()) {
r = regulator_enable(dpi.vdds_dsi_reg);
if (r)
- goto err1;
+ goto err_reg_enable;
}
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
- r = dpi_basic_init(dssdev);
+ r = dispc_runtime_get();
if (r)
- goto err2;
+ goto err_get_dispc;
+
+ dpi_basic_init(dssdev);
if (dpi_use_dsi_pll(dssdev)) {
- dss_clk_enable(DSS_CLK_SYSCK);
+ r = dsi_runtime_get(dpi.dsidev);
+ if (r)
+ goto err_get_dsi;
+
r = dsi_pll_init(dpi.dsidev, 0, 1);
if (r)
- goto err3;
+ goto err_dsi_pll_init;
}
r = dpi_set_mode(dssdev);
if (r)
- goto err4;
+ goto err_set_mode;
mdelay(2);
@@ -218,19 +218,22 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
return 0;
-err4:
+err_set_mode:
if (dpi_use_dsi_pll(dssdev))
dsi_pll_uninit(dpi.dsidev, true);
-err3:
+err_dsi_pll_init:
if (dpi_use_dsi_pll(dssdev))
- dss_clk_disable(DSS_CLK_SYSCK);
-err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dsi_runtime_put(dpi.dsidev);
+err_get_dsi:
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
-err1:
+err_reg_enable:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
return r;
}
EXPORT_SYMBOL(omapdss_dpi_display_enable);
@@ -242,10 +245,11 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
if (dpi_use_dsi_pll(dssdev)) {
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
dsi_pll_uninit(dpi.dsidev, true);
- dss_clk_disable(DSS_CLK_SYSCK);
+ dsi_runtime_put(dpi.dsidev);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
+ dss_runtime_put();
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
@@ -257,11 +261,26 @@ EXPORT_SYMBOL(omapdss_dpi_display_disable);
void dpi_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
+ int r;
+
DSSDBG("dpi_set_timings\n");
dssdev->panel.timings = *timings;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ r = dss_runtime_get();
+ if (r)
+ return;
+
+ r = dispc_runtime_get();
+ if (r) {
+ dss_runtime_put();
+ return;
+ }
+
dpi_set_mode(dssdev);
dispc_go(dssdev->manager->id);
+
+ dispc_runtime_put();
+ dss_runtime_put();
}
}
EXPORT_SYMBOL(dpi_set_timings);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 345757cfcbe..7adbbeb8433 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -36,6 +36,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/clock.h>
@@ -267,8 +268,12 @@ struct dsi_isr_tables {
struct dsi_data {
struct platform_device *pdev;
void __iomem *base;
+
int irq;
+ struct clk *dss_clk;
+ struct clk *sys_clk;
+
void (*dsi_mux_pads)(bool enable);
struct dsi_clock_info current_cinfo;
@@ -389,15 +394,6 @@ static inline u32 dsi_read_reg(struct platform_device *dsidev,
return __raw_readl(dsi->base + idx.idx);
}
-
-void dsi_save_context(void)
-{
-}
-
-void dsi_restore_context(void)
-{
-}
-
void dsi_bus_lock(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -493,9 +489,18 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name)
total_bytes * 1000 / total_us);
}
#else
-#define dsi_perf_mark_setup(x)
-#define dsi_perf_mark_start(x)
-#define dsi_perf_show(x, y)
+static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
+{
+}
+
+static inline void dsi_perf_mark_start(struct platform_device *dsidev)
+{
+}
+
+static inline void dsi_perf_show(struct platform_device *dsidev,
+ const char *name)
+{
+}
#endif
static void print_irq_status(u32 status)
@@ -1039,13 +1044,27 @@ static u32 dsi_get_errors(struct platform_device *dsidev)
return e;
}
-/* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */
-static inline void enable_clocks(bool enable)
+int dsi_runtime_get(struct platform_device *dsidev)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int r;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+
+ DSSDBG("dsi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&dsi->pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+void dsi_runtime_put(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ int r;
+
+ DSSDBG("dsi_runtime_put\n");
+
+ r = pm_runtime_put(&dsi->pdev->dev);
+ WARN_ON(r < 0);
}
/* source clock for DSI PLL. this could also be PCLKFREE */
@@ -1055,9 +1074,9 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (enable)
- dss_clk_enable(DSS_CLK_SYSCK);
+ clk_enable(dsi->sys_clk);
else
- dss_clk_disable(DSS_CLK_SYSCK);
+ clk_disable(dsi->sys_clk);
if (enable && dsi->pll_locked) {
if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
@@ -1150,10 +1169,11 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
{
unsigned long r;
int dsi_module = dsi_get_dsidev_id(dsidev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
/* DSI FCLK source is DSS_CLK_FCK */
- r = dss_clk_get_rate(DSS_CLK_FCK);
+ r = clk_get_rate(dsi->dss_clk);
} else {
/* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
@@ -1262,7 +1282,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
return -EINVAL;
if (cinfo->use_sys_clk) {
- cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK);
+ cinfo->clkin = clk_get_rate(dsi->sys_clk);
/* XXX it is unclear if highfreq should be used
* with DSS_SYS_CLK source also */
cinfo->highfreq = 0;
@@ -1311,7 +1331,7 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
int match = 0;
unsigned long dss_sys_clk, max_dss_fck;
- dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK);
+ dss_sys_clk = clk_get_rate(dsi->sys_clk);
max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
@@ -1601,7 +1621,6 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
dsi->vdds_dsi_reg = vdds_dsi;
}
- enable_clocks(1);
dsi_enable_pll_clock(dsidev, 1);
/*
* Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
@@ -1653,7 +1672,6 @@ err1:
}
err0:
dsi_disable_scp_clk(dsidev);
- enable_clocks(0);
dsi_enable_pll_clock(dsidev, 0);
return r;
}
@@ -1671,7 +1689,6 @@ void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
}
dsi_disable_scp_clk(dsidev);
- enable_clocks(0);
dsi_enable_pll_clock(dsidev, 0);
DSSDBG("PLL uninit done\n");
@@ -1688,7 +1705,8 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
dispc_clk_src = dss_get_dispc_clk_source();
dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
- enable_clocks(1);
+ if (dsi_runtime_get(dsidev))
+ return;
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
@@ -1731,7 +1749,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
}
void dsi_dump_clocks(struct seq_file *s)
@@ -1873,7 +1891,8 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dsi_runtime_get(dsidev))
+ return;
dsi_enable_scp_clk(dsidev);
DUMPREG(DSI_REVISION);
@@ -1947,7 +1966,7 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
DUMPREG(DSI_PLL_CONFIGURATION2);
dsi_disable_scp_clk(dsidev);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dsi_runtime_put(dsidev);
#undef DUMPREG
}
@@ -2463,28 +2482,6 @@ static void dsi_cio_uninit(struct platform_device *dsidev)
dsi->dsi_mux_pads(false);
}
-static int _dsi_wait_reset(struct platform_device *dsidev)
-{
- int t = 0;
-
- while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) {
- if (++t > 5) {
- DSSERR("soft reset failed\n");
- return -ENODEV;
- }
- udelay(1);
- }
-
- return 0;
-}
-
-static int _dsi_reset(struct platform_device *dsidev)
-{
- /* Soft reset */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1);
- return _dsi_wait_reset(dsidev);
-}
-
static void dsi_config_tx_fifo(struct platform_device *dsidev,
enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
@@ -3386,6 +3383,10 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
+ /* Reset LANEx_ULPS_SIG2 */
+ REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (0 << 0) | (0 << 1) | (0 << 2),
+ 7, 5);
+
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
dsi_if_enable(dsidev, false);
@@ -4198,22 +4199,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
dsi_pll_uninit(dsidev, disconnect_lanes);
}
-static int dsi_core_init(struct platform_device *dsidev)
-{
- /* Autoidle */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0);
-
- /* ENWAKEUP */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2);
-
- /* SIDLEMODE smart-idle */
- REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3);
-
- _dsi_initialize_irq(dsidev);
-
- return 0;
-}
-
int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -4229,37 +4214,37 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
- enable_clocks(1);
- dsi_enable_pll_clock(dsidev, 1);
-
- r = _dsi_reset(dsidev);
+ r = dsi_runtime_get(dsidev);
if (r)
- goto err1;
+ goto err_get_dsi;
- dsi_core_init(dsidev);
+ dsi_enable_pll_clock(dsidev, 1);
+
+ _dsi_initialize_irq(dsidev);
r = dsi_display_init_dispc(dssdev);
if (r)
- goto err1;
+ goto err_init_dispc;
r = dsi_display_init_dsi(dssdev);
if (r)
- goto err2;
+ goto err_init_dsi;
mutex_unlock(&dsi->lock);
return 0;
-err2:
+err_init_dsi:
dsi_display_uninit_dispc(dssdev);
-err1:
- enable_clocks(0);
+err_init_dispc:
dsi_enable_pll_clock(dsidev, 0);
+ dsi_runtime_put(dsidev);
+err_get_dsi:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_display_enable FAILED\n");
return r;
@@ -4278,11 +4263,16 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
mutex_lock(&dsi->lock);
+ dsi_sync_vc(dsidev, 0);
+ dsi_sync_vc(dsidev, 1);
+ dsi_sync_vc(dsidev, 2);
+ dsi_sync_vc(dsidev, 3);
+
dsi_display_uninit_dispc(dssdev);
dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
dsi_enable_pll_clock(dsidev, 0);
omap_dss_stop_device(dssdev);
@@ -4302,16 +4292,11 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
EXPORT_SYMBOL(omapdss_dsi_enable_te);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high)
{
- unsigned burst_size_bytes;
-
- *burst_size = OMAP_DSS_BURST_16x32;
- burst_size_bytes = 16 * 32 / 8;
-
- *fifo_high = fifo_size - burst_size_bytes;
- *fifo_low = fifo_size - burst_size_bytes * 2;
+ *fifo_high = fifo_size - burst_size;
+ *fifo_low = fifo_size - burst_size * 2;
}
int dsi_init_display(struct omap_dss_device *dssdev)
@@ -4437,7 +4422,47 @@ static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
}
-static int dsi_init(struct platform_device *dsidev)
+static int dsi_get_clocks(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct clk *clk;
+
+ clk = clk_get(&dsidev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get fck\n");
+ return PTR_ERR(clk);
+ }
+
+ dsi->dss_clk = clk;
+
+ if (cpu_is_omap34xx() || cpu_is_omap3630())
+ clk = clk_get(&dsidev->dev, "dss2_alwon_fck");
+ else
+ clk = clk_get(&dsidev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ clk_put(dsi->dss_clk);
+ dsi->dss_clk = NULL;
+ return PTR_ERR(clk);
+ }
+
+ dsi->sys_clk = clk;
+
+ return 0;
+}
+
+static void dsi_put_clocks(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+
+ if (dsi->dss_clk)
+ clk_put(dsi->dss_clk);
+ if (dsi->sys_clk)
+ clk_put(dsi->sys_clk);
+}
+
+/* DSI1 HW IP initialisation */
+static int omap_dsi1hw_probe(struct platform_device *dsidev)
{
struct omap_display_platform_data *dss_plat_data;
struct omap_dss_board_info *board_info;
@@ -4449,7 +4474,7 @@ static int dsi_init(struct platform_device *dsidev)
dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
if (!dsi) {
r = -ENOMEM;
- goto err0;
+ goto err_alloc;
}
dsi->pdev = dsidev;
@@ -4472,6 +4497,12 @@ static int dsi_init(struct platform_device *dsidev)
mutex_init(&dsi->lock);
sema_init(&dsi->bus_lock, 1);
+ r = dsi_get_clocks(dsidev);
+ if (r)
+ goto err_get_clk;
+
+ pm_runtime_enable(&dsidev->dev);
+
INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work,
dsi_framedone_timeout_work_callback);
@@ -4484,26 +4515,26 @@ static int dsi_init(struct platform_device *dsidev)
if (!dsi_mem) {
DSSERR("can't get IORESOURCE_MEM DSI\n");
r = -EINVAL;
- goto err1;
+ goto err_ioremap;
}
dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem));
if (!dsi->base) {
DSSERR("can't ioremap DSI\n");
r = -ENOMEM;
- goto err1;
+ goto err_ioremap;
}
dsi->irq = platform_get_irq(dsi->pdev, 0);
if (dsi->irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err2;
+ goto err_get_irq;
}
r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED,
dev_name(&dsidev->dev), dsi->pdev);
if (r < 0) {
DSSERR("request_irq failed\n");
- goto err2;
+ goto err_get_irq;
}
/* DSI VCs initialization */
@@ -4515,7 +4546,9 @@ static int dsi_init(struct platform_device *dsidev)
dsi_calc_clock_param_ranges(dsidev);
- enable_clocks(1);
+ r = dsi_runtime_get(dsidev);
+ if (r)
+ goto err_get_dsi;
rev = dsi_read_reg(dsidev, DSI_REVISION);
dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
@@ -4523,21 +4556,32 @@ static int dsi_init(struct platform_device *dsidev)
dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
- enable_clocks(0);
+ dsi_runtime_put(dsidev);
return 0;
-err2:
+
+err_get_dsi:
+ free_irq(dsi->irq, dsi->pdev);
+err_get_irq:
iounmap(dsi->base);
-err1:
+err_ioremap:
+ pm_runtime_disable(&dsidev->dev);
+err_get_clk:
kfree(dsi);
-err0:
+err_alloc:
return r;
}
-static void dsi_exit(struct platform_device *dsidev)
+static int omap_dsi1hw_remove(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ WARN_ON(dsi->scp_clk_refcount > 0);
+
+ pm_runtime_disable(&dsidev->dev);
+
+ dsi_put_clocks(dsidev);
+
if (dsi->vdds_dsi_reg != NULL) {
if (dsi->vdds_dsi_enabled) {
regulator_disable(dsi->vdds_dsi_reg);
@@ -4553,38 +4597,56 @@ static void dsi_exit(struct platform_device *dsidev)
kfree(dsi);
- DSSDBG("omap_dsi_exit\n");
+ return 0;
}
-/* DSI1 HW IP initialisation */
-static int omap_dsi1hw_probe(struct platform_device *dsidev)
+static int dsi_runtime_suspend(struct device *dev)
{
- int r;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev));
- r = dsi_init(dsidev);
- if (r) {
- DSSERR("Failed to initialize DSI\n");
- goto err_dsi;
- }
-err_dsi:
- return r;
+ clk_disable(dsi->dss_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
}
-static int omap_dsi1hw_remove(struct platform_device *dsidev)
+static int dsi_runtime_resume(struct device *dev)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev));
+ int r;
+
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
+
+ clk_enable(dsi->dss_clk);
- dsi_exit(dsidev);
- WARN_ON(dsi->scp_clk_refcount > 0);
return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
}
+static const struct dev_pm_ops dsi_pm_ops = {
+ .runtime_suspend = dsi_runtime_suspend,
+ .runtime_resume = dsi_runtime_resume,
+};
+
static struct platform_driver omap_dsi1hw_driver = {
.probe = omap_dsi1hw_probe,
.remove = omap_dsi1hw_remove,
.driver = {
.name = "omapdss_dsi1",
.owner = THIS_MODULE,
+ .pm = &dsi_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index d9489d5c4f0..0f9c3a6457a 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -28,6 +28,8 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/clock.h>
@@ -59,15 +61,9 @@ struct dss_reg {
static struct {
struct platform_device *pdev;
void __iomem *base;
- int ctx_id;
struct clk *dpll4_m4_ck;
- struct clk *dss_ick;
- struct clk *dss_fck;
- struct clk *dss_sys_clk;
- struct clk *dss_tv_fck;
- struct clk *dss_video_fck;
- unsigned num_clks_enabled;
+ struct clk *dss_clk;
unsigned long cache_req_pck;
unsigned long cache_prate;
@@ -78,6 +74,7 @@ static struct {
enum omap_dss_clk_source dispc_clk_source;
enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
+ bool ctx_valid;
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
} dss;
@@ -87,13 +84,6 @@ static const char * const dss_generic_clk_source_names[] = {
[OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK",
};
-static void dss_clk_enable_all_no_ctx(void);
-static void dss_clk_disable_all_no_ctx(void);
-static void dss_clk_enable_no_ctx(enum dss_clock clks);
-static void dss_clk_disable_no_ctx(enum dss_clock clks);
-
-static int _omap_dss_wait_reset(void);
-
static inline void dss_write_reg(const struct dss_reg idx, u32 val)
{
__raw_writel(val, dss.base + idx.idx);
@@ -109,12 +99,10 @@ static inline u32 dss_read_reg(const struct dss_reg idx)
#define RR(reg) \
dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
-void dss_save_context(void)
+static void dss_save_context(void)
{
- if (cpu_is_omap24xx())
- return;
+ DSSDBG("dss_save_context\n");
- SR(SYSCONFIG);
SR(CONTROL);
if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -122,14 +110,19 @@ void dss_save_context(void)
SR(SDI_CONTROL);
SR(PLL_CONTROL);
}
+
+ dss.ctx_valid = true;
+
+ DSSDBG("context saved\n");
}
-void dss_restore_context(void)
+static void dss_restore_context(void)
{
- if (_omap_dss_wait_reset())
- DSSERR("DSS not coming out of reset after sleep\n");
+ DSSDBG("dss_restore_context\n");
+
+ if (!dss.ctx_valid)
+ return;
- RR(SYSCONFIG);
RR(CONTROL);
if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
@@ -137,6 +130,8 @@ void dss_restore_context(void)
RR(SDI_CONTROL);
RR(PLL_CONTROL);
}
+
+ DSSDBG("context restored\n");
}
#undef SR
@@ -234,6 +229,7 @@ const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
return dss_generic_clk_source_names[clk_src];
}
+
void dss_dump_clocks(struct seq_file *s)
{
unsigned long dpll4_ck_rate;
@@ -241,13 +237,14 @@ void dss_dump_clocks(struct seq_file *s)
const char *fclk_name, *fclk_real_name;
unsigned long fclk_rate;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dss_runtime_get())
+ return;
seq_printf(s, "- DSS -\n");
fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
- fclk_rate = dss_clk_get_rate(DSS_CLK_FCK);
+ fclk_rate = clk_get_rate(dss.dss_clk);
if (dss.dpll4_m4_ck) {
dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
@@ -273,14 +270,15 @@ void dss_dump_clocks(struct seq_file *s)
fclk_rate);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dss_runtime_put();
}
void dss_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (dss_runtime_get())
+ return;
DUMPREG(DSS_REVISION);
DUMPREG(DSS_SYSCONFIG);
@@ -294,7 +292,7 @@ void dss_dump_regs(struct seq_file *s)
DUMPREG(DSS_SDI_STATUS);
}
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dss_runtime_put();
#undef DUMPREG
}
@@ -437,7 +435,7 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo)
} else {
if (cinfo->fck_div != 0)
return -EINVAL;
- cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
+ cinfo->fck = clk_get_rate(dss.dss_clk);
}
return 0;
@@ -467,7 +465,7 @@ int dss_set_clock_div(struct dss_clock_info *cinfo)
int dss_get_clock_div(struct dss_clock_info *cinfo)
{
- cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK);
+ cinfo->fck = clk_get_rate(dss.dss_clk);
if (dss.dpll4_m4_ck) {
unsigned long prate;
@@ -512,7 +510,7 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
- fck = dss_clk_get_rate(DSS_CLK_FCK);
+ fck = clk_get_rate(dss.dss_clk);
if (req_pck == dss.cache_req_pck &&
((cpu_is_omap34xx() && prate == dss.cache_prate) ||
dss.cache_dss_cinfo.fck == fck)) {
@@ -539,7 +537,7 @@ retry:
if (dss.dpll4_m4_ck == NULL) {
struct dispc_clock_info cur_dispc;
/* XXX can we change the clock on omap2? */
- fck = dss_clk_get_rate(DSS_CLK_FCK);
+ fck = clk_get_rate(dss.dss_clk);
fck_div = 1;
dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
@@ -616,28 +614,6 @@ found:
return 0;
}
-static int _omap_dss_wait_reset(void)
-{
- int t = 0;
-
- while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
- if (++t > 1000) {
- DSSERR("soft reset failed\n");
- return -ENODEV;
- }
- udelay(1);
- }
-
- return 0;
-}
-
-static int _omap_dss_reset(void)
-{
- /* Soft reset */
- REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
- return _omap_dss_wait_reset();
-}
-
void dss_set_venc_output(enum omap_dss_venc_type type)
{
int l = 0;
@@ -663,424 +639,88 @@ void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select hdmi)
REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */
}
-static int dss_init(void)
+static int dss_get_clocks(void)
{
+ struct clk *clk;
int r;
- u32 rev;
- struct resource *dss_mem;
- struct clk *dpll4_m4_ck;
- dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
- if (!dss_mem) {
- DSSERR("can't get IORESOURCE_MEM DSS\n");
- r = -EINVAL;
- goto fail0;
- }
- dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
- if (!dss.base) {
- DSSERR("can't ioremap DSS\n");
- r = -ENOMEM;
- goto fail0;
+ clk = clk_get(&dss.pdev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get clock fck\n");
+ r = PTR_ERR(clk);
+ goto err;
}
- /* disable LCD and DIGIT output. This seems to fix the synclost
- * problem that we get, if the bootloader starts the DSS and
- * the kernel resets it */
- omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
-
-#ifdef CONFIG_OMAP2_DSS_SLEEP_BEFORE_RESET
- /* We need to wait here a bit, otherwise we sometimes start to
- * get synclost errors, and after that only power cycle will
- * restore DSS functionality. I have no idea why this happens.
- * And we have to wait _before_ resetting the DSS, but after
- * enabling clocks.
- *
- * This bug was at least present on OMAP3430. It's unknown
- * if it happens on OMAP2 or OMAP3630.
- */
- msleep(50);
-#endif
-
- _omap_dss_reset();
+ dss.dss_clk = clk;
- /* autoidle */
- REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
-
- /* Select DPLL */
- REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
-
-#ifdef CONFIG_OMAP2_DSS_VENC
- REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
- REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
- REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
-#endif
if (cpu_is_omap34xx()) {
- dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
- if (IS_ERR(dpll4_m4_ck)) {
+ clk = clk_get(NULL, "dpll4_m4_ck");
+ if (IS_ERR(clk)) {
DSSERR("Failed to get dpll4_m4_ck\n");
- r = PTR_ERR(dpll4_m4_ck);
- goto fail1;
+ r = PTR_ERR(clk);
+ goto err;
}
} else if (cpu_is_omap44xx()) {
- dpll4_m4_ck = clk_get(NULL, "dpll_per_m5x2_ck");
- if (IS_ERR(dpll4_m4_ck)) {
- DSSERR("Failed to get dpll4_m4_ck\n");
- r = PTR_ERR(dpll4_m4_ck);
- goto fail1;
+ clk = clk_get(NULL, "dpll_per_m5x2_ck");
+ if (IS_ERR(clk)) {
+ DSSERR("Failed to get dpll_per_m5x2_ck\n");
+ r = PTR_ERR(clk);
+ goto err;
}
} else { /* omap24xx */
- dpll4_m4_ck = NULL;
+ clk = NULL;
}
- dss.dpll4_m4_ck = dpll4_m4_ck;
-
- dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
- dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
- dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
-
- dss_save_context();
-
- rev = dss_read_reg(DSS_REVISION);
- printk(KERN_INFO "OMAP DSS rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+ dss.dpll4_m4_ck = clk;
return 0;
-fail1:
- iounmap(dss.base);
-fail0:
- return r;
-}
-
-static void dss_exit(void)
-{
+err:
+ if (dss.dss_clk)
+ clk_put(dss.dss_clk);
if (dss.dpll4_m4_ck)
clk_put(dss.dpll4_m4_ck);
- iounmap(dss.base);
-}
-
-/* CONTEXT */
-static int dss_get_ctx_id(void)
-{
- struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
- int r;
-
- if (!pdata->board_data->get_last_off_on_transaction_id)
- return 0;
- r = pdata->board_data->get_last_off_on_transaction_id(&dss.pdev->dev);
- if (r < 0) {
- dev_err(&dss.pdev->dev, "getting transaction ID failed, "
- "will force context restore\n");
- r = -1;
- }
- return r;
-}
-
-int dss_need_ctx_restore(void)
-{
- int id = dss_get_ctx_id();
-
- if (id < 0 || id != dss.ctx_id) {
- DSSDBG("ctx id %d -> id %d\n",
- dss.ctx_id, id);
- dss.ctx_id = id;
- return 1;
- } else {
- return 0;
- }
-}
-
-static void save_all_ctx(void)
-{
- DSSDBG("save context\n");
-
- dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
-
- dss_save_context();
- dispc_save_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_save_context();
-#endif
-
- dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK);
-}
-
-static void restore_all_ctx(void)
-{
- DSSDBG("restore context\n");
-
- dss_clk_enable_all_no_ctx();
-
- dss_restore_context();
- dispc_restore_context();
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_restore_context();
-#endif
-
- dss_clk_disable_all_no_ctx();
-}
-
-static int dss_get_clock(struct clk **clock, const char *clk_name)
-{
- struct clk *clk;
-
- clk = clk_get(&dss.pdev->dev, clk_name);
-
- if (IS_ERR(clk)) {
- DSSERR("can't get clock %s", clk_name);
- return PTR_ERR(clk);
- }
-
- *clock = clk;
-
- DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
-
- return 0;
-}
-
-static int dss_get_clocks(void)
-{
- int r;
- struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data;
-
- dss.dss_ick = NULL;
- dss.dss_fck = NULL;
- dss.dss_sys_clk = NULL;
- dss.dss_tv_fck = NULL;
- dss.dss_video_fck = NULL;
-
- r = dss_get_clock(&dss.dss_ick, "ick");
- if (r)
- goto err;
-
- r = dss_get_clock(&dss.dss_fck, "fck");
- if (r)
- goto err;
-
- if (!pdata->opt_clock_available) {
- r = -ENODEV;
- goto err;
- }
-
- if (pdata->opt_clock_available("sys_clk")) {
- r = dss_get_clock(&dss.dss_sys_clk, "sys_clk");
- if (r)
- goto err;
- }
-
- if (pdata->opt_clock_available("tv_clk")) {
- r = dss_get_clock(&dss.dss_tv_fck, "tv_clk");
- if (r)
- goto err;
- }
-
- if (pdata->opt_clock_available("video_clk")) {
- r = dss_get_clock(&dss.dss_video_fck, "video_clk");
- if (r)
- goto err;
- }
-
- return 0;
-
-err:
- if (dss.dss_ick)
- clk_put(dss.dss_ick);
- if (dss.dss_fck)
- clk_put(dss.dss_fck);
- if (dss.dss_sys_clk)
- clk_put(dss.dss_sys_clk);
- if (dss.dss_tv_fck)
- clk_put(dss.dss_tv_fck);
- if (dss.dss_video_fck)
- clk_put(dss.dss_video_fck);
-
return r;
}
static void dss_put_clocks(void)
{
- if (dss.dss_video_fck)
- clk_put(dss.dss_video_fck);
- if (dss.dss_tv_fck)
- clk_put(dss.dss_tv_fck);
- if (dss.dss_sys_clk)
- clk_put(dss.dss_sys_clk);
- clk_put(dss.dss_fck);
- clk_put(dss.dss_ick);
-}
-
-unsigned long dss_clk_get_rate(enum dss_clock clk)
-{
- switch (clk) {
- case DSS_CLK_ICK:
- return clk_get_rate(dss.dss_ick);
- case DSS_CLK_FCK:
- return clk_get_rate(dss.dss_fck);
- case DSS_CLK_SYSCK:
- return clk_get_rate(dss.dss_sys_clk);
- case DSS_CLK_TVFCK:
- return clk_get_rate(dss.dss_tv_fck);
- case DSS_CLK_VIDFCK:
- return clk_get_rate(dss.dss_video_fck);
- }
-
- BUG();
- return 0;
-}
-
-static unsigned count_clk_bits(enum dss_clock clks)
-{
- unsigned num_clks = 0;
-
- if (clks & DSS_CLK_ICK)
- ++num_clks;
- if (clks & DSS_CLK_FCK)
- ++num_clks;
- if (clks & DSS_CLK_SYSCK)
- ++num_clks;
- if (clks & DSS_CLK_TVFCK)
- ++num_clks;
- if (clks & DSS_CLK_VIDFCK)
- ++num_clks;
-
- return num_clks;
-}
-
-static void dss_clk_enable_no_ctx(enum dss_clock clks)
-{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_enable(dss.dss_ick);
- if (clks & DSS_CLK_FCK)
- clk_enable(dss.dss_fck);
- if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
- clk_enable(dss.dss_sys_clk);
- if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
- clk_enable(dss.dss_tv_fck);
- if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
- clk_enable(dss.dss_video_fck);
-
- dss.num_clks_enabled += num_clks;
-}
-
-void dss_clk_enable(enum dss_clock clks)
-{
- bool check_ctx = dss.num_clks_enabled == 0;
-
- dss_clk_enable_no_ctx(clks);
-
- /*
- * HACK: On omap4 the registers may not be accessible right after
- * enabling the clocks. At some point this will be handled by
- * pm_runtime, but for the time begin this should make things work.
- */
- if (cpu_is_omap44xx() && check_ctx)
- udelay(10);
-
- if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
- restore_all_ctx();
+ if (dss.dpll4_m4_ck)
+ clk_put(dss.dpll4_m4_ck);
+ clk_put(dss.dss_clk);
}
-static void dss_clk_disable_no_ctx(enum dss_clock clks)
+struct clk *dss_get_ick(void)
{
- unsigned num_clks = count_clk_bits(clks);
-
- if (clks & DSS_CLK_ICK)
- clk_disable(dss.dss_ick);
- if (clks & DSS_CLK_FCK)
- clk_disable(dss.dss_fck);
- if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk)
- clk_disable(dss.dss_sys_clk);
- if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck)
- clk_disable(dss.dss_tv_fck);
- if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck)
- clk_disable(dss.dss_video_fck);
-
- dss.num_clks_enabled -= num_clks;
+ return clk_get(&dss.pdev->dev, "ick");
}
-void dss_clk_disable(enum dss_clock clks)
+int dss_runtime_get(void)
{
- if (cpu_is_omap34xx()) {
- unsigned num_clks = count_clk_bits(clks);
-
- BUG_ON(dss.num_clks_enabled < num_clks);
+ int r;
- if (dss.num_clks_enabled == num_clks)
- save_all_ctx();
- }
+ DSSDBG("dss_runtime_get\n");
- dss_clk_disable_no_ctx(clks);
+ r = pm_runtime_get_sync(&dss.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
}
-static void dss_clk_enable_all_no_ctx(void)
+void dss_runtime_put(void)
{
- enum dss_clock clks;
-
- clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_VIDFCK;
- dss_clk_enable_no_ctx(clks);
-}
-
-static void dss_clk_disable_all_no_ctx(void)
-{
- enum dss_clock clks;
+ int r;
- clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK;
- if (cpu_is_omap34xx())
- clks |= DSS_CLK_VIDFCK;
- dss_clk_disable_no_ctx(clks);
-}
+ DSSDBG("dss_runtime_put\n");
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
-/* CLOCKS */
-static void core_dump_clocks(struct seq_file *s)
-{
- int i;
- struct clk *clocks[5] = {
- dss.dss_ick,
- dss.dss_fck,
- dss.dss_sys_clk,
- dss.dss_tv_fck,
- dss.dss_video_fck
- };
-
- const char *names[5] = {
- "ick",
- "fck",
- "sys_clk",
- "tv_fck",
- "video_fck"
- };
-
- seq_printf(s, "- CORE -\n");
-
- seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled);
-
- for (i = 0; i < 5; i++) {
- if (!clocks[i])
- continue;
- seq_printf(s, "%s (%s)%*s\t%lu\t%d\n",
- names[i],
- clocks[i]->name,
- 24 - strlen(names[i]) - strlen(clocks[i]->name),
- "",
- clk_get_rate(clocks[i]),
- clocks[i]->usecount);
- }
+ r = pm_runtime_put(&dss.pdev->dev);
+ WARN_ON(r < 0);
}
-#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
/* DEBUGFS */
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
void dss_debug_dump_clocks(struct seq_file *s)
{
- core_dump_clocks(s);
dss_dump_clocks(s);
dispc_dump_clocks(s);
#ifdef CONFIG_OMAP2_DSS_DSI
@@ -1089,28 +729,51 @@ void dss_debug_dump_clocks(struct seq_file *s)
}
#endif
-
/* DSS HW IP initialisation */
static int omap_dsshw_probe(struct platform_device *pdev)
{
+ struct resource *dss_mem;
+ u32 rev;
int r;
dss.pdev = pdev;
+ dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
+ if (!dss_mem) {
+ DSSERR("can't get IORESOURCE_MEM DSS\n");
+ r = -EINVAL;
+ goto err_ioremap;
+ }
+ dss.base = ioremap(dss_mem->start, resource_size(dss_mem));
+ if (!dss.base) {
+ DSSERR("can't ioremap DSS\n");
+ r = -ENOMEM;
+ goto err_ioremap;
+ }
+
r = dss_get_clocks();
if (r)
goto err_clocks;
- dss_clk_enable_all_no_ctx();
+ pm_runtime_enable(&pdev->dev);
- dss.ctx_id = dss_get_ctx_id();
- DSSDBG("initial ctx id %u\n", dss.ctx_id);
+ r = dss_runtime_get();
+ if (r)
+ goto err_runtime_get;
- r = dss_init();
- if (r) {
- DSSERR("Failed to initialize DSS\n");
- goto err_dss;
- }
+ /* Select DPLL */
+ REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+ REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
+ REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
+ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
+#endif
+ dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
+ dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
+ dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
+ dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
r = dpi_init();
if (r) {
@@ -1124,42 +787,66 @@ static int omap_dsshw_probe(struct platform_device *pdev)
goto err_sdi;
}
- dss_clk_disable_all_no_ctx();
+ rev = dss_read_reg(DSS_REVISION);
+ printk(KERN_INFO "OMAP DSS rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ dss_runtime_put();
+
return 0;
err_sdi:
dpi_exit();
err_dpi:
- dss_exit();
-err_dss:
- dss_clk_disable_all_no_ctx();
+ dss_runtime_put();
+err_runtime_get:
+ pm_runtime_disable(&pdev->dev);
dss_put_clocks();
err_clocks:
+ iounmap(dss.base);
+err_ioremap:
return r;
}
static int omap_dsshw_remove(struct platform_device *pdev)
{
+ dpi_exit();
+ sdi_exit();
- dss_exit();
+ iounmap(dss.base);
- /*
- * As part of hwmod changes, DSS is not the only controller of dss
- * clocks; hwmod framework itself will also enable clocks during hwmod
- * init for dss, and autoidle is set in h/w for DSS. Hence, there's no
- * need to disable clocks if their usecounts > 1.
- */
- WARN_ON(dss.num_clks_enabled > 0);
+ pm_runtime_disable(&pdev->dev);
dss_put_clocks();
+
+ return 0;
+}
+
+static int dss_runtime_suspend(struct device *dev)
+{
+ dss_save_context();
+ clk_disable(dss.dss_clk);
return 0;
}
+static int dss_runtime_resume(struct device *dev)
+{
+ clk_enable(dss.dss_clk);
+ dss_restore_context();
+ return 0;
+}
+
+static const struct dev_pm_ops dss_pm_ops = {
+ .runtime_suspend = dss_runtime_suspend,
+ .runtime_resume = dss_runtime_resume,
+};
+
static struct platform_driver omap_dsshw_driver = {
.probe = omap_dsshw_probe,
.remove = omap_dsshw_remove,
.driver = {
.name = "omapdss_dss",
.owner = THIS_MODULE,
+ .pm = &dss_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 8ab6d43329b..9c94b1152c2 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -97,26 +97,12 @@ extern unsigned int dss_debug;
#define FLD_MOD(orig, val, start, end) \
(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
-enum omap_burst_size {
- OMAP_DSS_BURST_4x32 = 0,
- OMAP_DSS_BURST_8x32 = 1,
- OMAP_DSS_BURST_16x32 = 2,
-};
-
enum omap_parallel_interface_mode {
OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */
OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */
OMAP_DSS_PARALLELMODE_DSI,
};
-enum dss_clock {
- DSS_CLK_ICK = 1 << 0, /* DSS_L3_ICLK and DSS_L4_ICLK */
- DSS_CLK_FCK = 1 << 1, /* DSS1_ALWON_FCLK */
- DSS_CLK_SYSCK = 1 << 2, /* DSS2_ALWON_FCLK */
- DSS_CLK_TVFCK = 1 << 3, /* DSS_TV_FCLK */
- DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/
-};
-
enum dss_hdmi_venc_clk_source_select {
DSS_VENC_TV_CLK = 0,
DSS_HDMI_M_PCLK = 1,
@@ -194,7 +180,7 @@ void dss_uninit_device(struct platform_device *pdev,
bool dss_use_replication(struct omap_dss_device *dssdev,
enum omap_color_mode mode);
void default_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high);
/* manager */
@@ -220,13 +206,12 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
int dss_init_platform_driver(void);
void dss_uninit_platform_driver(void);
+int dss_runtime_get(void);
+void dss_runtime_put(void);
+
+struct clk *dss_get_ick(void);
+
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
-void dss_save_context(void);
-void dss_restore_context(void);
-void dss_clk_enable(enum dss_clock clks);
-void dss_clk_disable(enum dss_clock clks);
-unsigned long dss_clk_get_rate(enum dss_clock clk);
-int dss_need_ctx_restore(void);
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
@@ -283,15 +268,15 @@ struct file_operations;
int dsi_init_platform_driver(void);
void dsi_uninit_platform_driver(void);
+int dsi_runtime_get(struct platform_device *dsidev);
+void dsi_runtime_put(struct platform_device *dsidev);
+
void dsi_dump_clocks(struct seq_file *s);
void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
const struct file_operations *debug_fops);
void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
const struct file_operations *debug_fops);
-void dsi_save_context(void);
-void dsi_restore_context(void);
-
int dsi_init_display(struct omap_dss_device *display);
void dsi_irq_handler(void);
unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
@@ -304,7 +289,7 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
bool enable_hsdiv);
void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
- u32 fifo_size, enum omap_burst_size *burst_size,
+ u32 fifo_size, u32 burst_size,
u32 *fifo_low, u32 *fifo_high);
void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
@@ -317,6 +302,13 @@ static inline int dsi_init_platform_driver(void)
static inline void dsi_uninit_platform_driver(void)
{
}
+static inline int dsi_runtime_get(struct platform_device *dsidev)
+{
+ return 0;
+}
+static inline void dsi_runtime_put(struct platform_device *dsidev)
+{
+}
static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
{
WARN("%s: DSI not compiled in, returning rate as 0\n", __func__);
@@ -384,8 +376,8 @@ void dispc_dump_regs(struct seq_file *s);
void dispc_irq_handler(void);
void dispc_fake_vsync_irq(void);
-void dispc_save_context(void);
-void dispc_restore_context(void);
+int dispc_runtime_get(void);
+void dispc_runtime_put(void);
void dispc_enable_sidle(void);
void dispc_disable_sidle(void);
@@ -398,10 +390,12 @@ void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable);
void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
void dispc_set_digit_size(u16 width, u16 height);
u32 dispc_get_plane_fifo_size(enum omap_plane plane);
-void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high);
+void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
void dispc_enable_fifomerge(bool enable);
-void dispc_set_burst_size(enum omap_plane plane,
- enum omap_burst_size burst_size);
+u32 dispc_get_burst_size(enum omap_plane plane);
+void dispc_enable_cpr(enum omap_channel channel, bool enable);
+void dispc_set_cpr_coef(enum omap_channel channel,
+ struct omap_dss_cpr_coefs *coefs);
void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr);
void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr);
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index 1c18888e5df..b415c4ee621 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -49,6 +49,9 @@ struct omap_dss_features {
const enum omap_color_mode *supported_color_modes;
const char * const *clksrc_names;
const struct dss_param_range *dss_params;
+
+ const u32 buffer_size_unit;
+ const u32 burst_size_unit;
};
/* This struct is assigned to one of the below during initialization */
@@ -274,6 +277,8 @@ static const struct omap_dss_features omap2_dss_features = {
.supported_color_modes = omap2_dss_supported_color_modes,
.clksrc_names = omap2_dss_clk_source_names,
.dss_params = omap2_dss_param_range,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
};
/* OMAP3 DSS Features */
@@ -286,7 +291,9 @@ static const struct omap_dss_features omap3430_dss_features = {
FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE |
FEAT_FUNCGATED | FEAT_ROWREPEATENABLE |
FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF |
- FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC,
+ FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC |
+ FEAT_VENC_REQUIRES_TV_DAC_CLK | FEAT_CPR | FEAT_PRELOAD |
+ FEAT_FIR_COEF_V,
.num_mgrs = 2,
.num_ovls = 3,
@@ -294,6 +301,8 @@ static const struct omap_dss_features omap3430_dss_features = {
.supported_color_modes = omap3_dss_supported_color_modes,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
};
static const struct omap_dss_features omap3630_dss_features = {
@@ -306,7 +315,8 @@ static const struct omap_dss_features omap3630_dss_features = {
FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED |
FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT |
FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG |
- FEAT_DSI_PLL_FREQSEL,
+ FEAT_DSI_PLL_FREQSEL | FEAT_CPR | FEAT_PRELOAD |
+ FEAT_FIR_COEF_V,
.num_mgrs = 2,
.num_ovls = 3,
@@ -314,6 +324,8 @@ static const struct omap_dss_features omap3630_dss_features = {
.supported_color_modes = omap3_dss_supported_color_modes,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
};
/* OMAP4 DSS Features */
@@ -327,7 +339,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 |
FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
- FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
+ FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 |
+ FEAT_CPR | FEAT_PRELOAD | FEAT_FIR_COEF_V,
.num_mgrs = 3,
.num_ovls = 3,
@@ -335,6 +348,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
.supported_color_modes = omap4_dss_supported_color_modes,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .buffer_size_unit = 16,
+ .burst_size_unit = 16,
};
/* For all the other OMAP4 versions */
@@ -348,7 +363,8 @@ static const struct omap_dss_features omap4_dss_features = {
FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC |
FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH |
FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE |
- FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2,
+ FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 | FEAT_CPR |
+ FEAT_PRELOAD | FEAT_FIR_COEF_V,
.num_mgrs = 3,
.num_ovls = 3,
@@ -356,6 +372,8 @@ static const struct omap_dss_features omap4_dss_features = {
.supported_color_modes = omap4_dss_supported_color_modes,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .buffer_size_unit = 16,
+ .burst_size_unit = 16,
};
/* Functions returning values related to a DSS feature */
@@ -401,6 +419,16 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
return omap_current_dss_features->clksrc_names[id];
}
+u32 dss_feat_get_buffer_size_unit(void)
+{
+ return omap_current_dss_features->buffer_size_unit;
+}
+
+u32 dss_feat_get_burst_size_unit(void)
+{
+ return omap_current_dss_features->burst_size_unit;
+}
+
/* DSS has_feature check */
bool dss_has_feature(enum dss_feat_id id)
{
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 07b346f7d91..b7398cbcda5 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -51,6 +51,10 @@ enum dss_feat_id {
FEAT_HDMI_CTS_SWMODE = 1 << 19,
FEAT_HANDLE_UV_SEPARATE = 1 << 20,
FEAT_ATTR2 = 1 << 21,
+ FEAT_VENC_REQUIRES_TV_DAC_CLK = 1 << 22,
+ FEAT_CPR = 1 << 23,
+ FEAT_PRELOAD = 1 << 24,
+ FEAT_FIR_COEF_V = 1 << 25,
};
/* DSS register field id */
@@ -90,6 +94,9 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
enum omap_color_mode color_mode);
const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
+u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
+u32 dss_feat_get_burst_size_unit(void); /* in bytes */
+
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
void dss_features_init(void);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index b0555f4f0a7..256f27a9064 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -29,6 +29,9 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
#include <video/omapdss.h>
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
@@ -51,6 +54,9 @@ static struct {
u8 edid_set;
bool custom_set;
struct hdmi_config cfg;
+
+ struct clk *sys_clk;
+ struct clk *hdmi_clk;
} hdmi;
/*
@@ -162,6 +168,27 @@ static inline int hdmi_wait_for_bit_change(const struct hdmi_reg idx,
return val;
}
+static int hdmi_runtime_get(void)
+{
+ int r;
+
+ DSSDBG("hdmi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&hdmi.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+static void hdmi_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("hdmi_runtime_put\n");
+
+ r = pm_runtime_put(&hdmi.pdev->dev);
+ WARN_ON(r < 0);
+}
+
int hdmi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
@@ -311,30 +338,11 @@ static int hdmi_phy_init(void)
return 0;
}
-static int hdmi_wait_softreset(void)
-{
- /* reset W1 */
- REG_FLD_MOD(HDMI_WP_SYSCONFIG, 0x1, 0, 0);
-
- /* wait till SOFTRESET == 0 */
- if (hdmi_wait_for_bit_change(HDMI_WP_SYSCONFIG, 0, 0, 0) != 0) {
- DSSERR("sysconfig reset failed\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
static int hdmi_pll_program(struct hdmi_pll_info *fmt)
{
u16 r = 0;
enum hdmi_clk_refsel refsel;
- /* wait for wrapper reset */
- r = hdmi_wait_softreset();
- if (r)
- return r;
-
r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
if (r)
return r;
@@ -1064,7 +1072,7 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
unsigned long clkin, refclk;
u32 mf;
- clkin = dss_clk_get_rate(DSS_CLK_SYSCK) / 10000;
+ clkin = clk_get_rate(hdmi.sys_clk) / 10000;
/*
* Input clock is predivided by N + 1
* out put of which is reference clk
@@ -1098,16 +1106,6 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy,
DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd);
}
-static void hdmi_enable_clocks(int enable)
-{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK |
- DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK |
- DSS_CLK_SYSCK | DSS_CLK_VIDFCK);
-}
-
static int hdmi_power_on(struct omap_dss_device *dssdev)
{
int r, code = 0;
@@ -1115,7 +1113,9 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
struct omap_video_timings *p;
unsigned long phy;
- hdmi_enable_clocks(1);
+ r = hdmi_runtime_get();
+ if (r)
+ return r;
dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0);
@@ -1180,7 +1180,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
return 0;
err:
- hdmi_enable_clocks(0);
+ hdmi_runtime_put();
return -EIO;
}
@@ -1191,7 +1191,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
hdmi_wp_video_start(0);
hdmi_phy_off();
hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF);
- hdmi_enable_clocks(0);
+ hdmi_runtime_put();
hdmi.edid_set = 0;
}
@@ -1686,14 +1686,43 @@ static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
};
#endif
+static int hdmi_get_clocks(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ clk = clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ return PTR_ERR(clk);
+ }
+
+ hdmi.sys_clk = clk;
+
+ clk = clk_get(&pdev->dev, "dss_48mhz_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get hdmi_clk\n");
+ clk_put(hdmi.sys_clk);
+ return PTR_ERR(clk);
+ }
+
+ hdmi.hdmi_clk = clk;
+
+ return 0;
+}
+
+static void hdmi_put_clocks(void)
+{
+ if (hdmi.sys_clk)
+ clk_put(hdmi.sys_clk);
+ if (hdmi.hdmi_clk)
+ clk_put(hdmi.hdmi_clk);
+}
+
/* HDMI HW IP initialisation */
static int omapdss_hdmihw_probe(struct platform_device *pdev)
{
struct resource *hdmi_mem;
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
- int ret;
-#endif
+ int r;
hdmi.pdata = pdev->dev.platform_data;
hdmi.pdev = pdev;
@@ -1713,17 +1742,25 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ r = hdmi_get_clocks(pdev);
+ if (r) {
+ iounmap(hdmi.base_wp);
+ return r;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
hdmi_panel_init();
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
/* Register ASoC codec DAI */
- ret = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
+ r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
&hdmi_codec_dai_drv, 1);
- if (ret) {
+ if (r) {
DSSERR("can't register ASoC HDMI audio codec\n");
- return ret;
+ return r;
}
#endif
return 0;
@@ -1738,17 +1775,62 @@ static int omapdss_hdmihw_remove(struct platform_device *pdev)
snd_soc_unregister_codec(&pdev->dev);
#endif
+ pm_runtime_disable(&pdev->dev);
+
+ hdmi_put_clocks();
+
iounmap(hdmi.base_wp);
return 0;
}
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ clk_disable(hdmi.hdmi_clk);
+ clk_disable(hdmi.sys_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r < 0)
+ goto err_get_dispc;
+
+
+ clk_enable(hdmi.sys_clk);
+ clk_enable(hdmi.hdmi_clk);
+
+ return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
static struct platform_driver omapdss_hdmihw_driver = {
.probe = omapdss_hdmihw_probe,
.remove = omapdss_hdmihw_remove,
.driver = {
.name = "omapdss_hdmi",
.owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 9aeea50e33f..13d72d5c714 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -275,6 +275,108 @@ static ssize_t manager_alpha_blending_enabled_store(
return size;
}
+static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.cpr_enable);
+}
+
+static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ int v;
+ int r;
+ bool enable;
+
+ if (!dss_has_feature(FEAT_CPR))
+ return -ENODEV;
+
+ r = kstrtoint(buf, 0, &v);
+ if (r)
+ return r;
+
+ enable = !!v;
+
+ mgr->get_manager_info(mgr, &info);
+
+ if (info.cpr_enable == enable)
+ return size;
+
+ info.cpr_enable = enable;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
+static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr,
+ char *buf)
+{
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE,
+ "%d %d %d %d %d %d %d %d %d\n",
+ info.cpr_coefs.rr,
+ info.cpr_coefs.rg,
+ info.cpr_coefs.rb,
+ info.cpr_coefs.gr,
+ info.cpr_coefs.gg,
+ info.cpr_coefs.gb,
+ info.cpr_coefs.br,
+ info.cpr_coefs.bg,
+ info.cpr_coefs.bb);
+}
+
+static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr,
+ const char *buf, size_t size)
+{
+ struct omap_overlay_manager_info info;
+ struct omap_dss_cpr_coefs coefs;
+ int r, i;
+ s16 *arr;
+
+ if (!dss_has_feature(FEAT_CPR))
+ return -ENODEV;
+
+ if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd",
+ &coefs.rr, &coefs.rg, &coefs.rb,
+ &coefs.gr, &coefs.gg, &coefs.gb,
+ &coefs.br, &coefs.bg, &coefs.bb) != 9)
+ return -EINVAL;
+
+ arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb,
+ coefs.gr, coefs.gg, coefs.gb,
+ coefs.br, coefs.bg, coefs.bb };
+
+ for (i = 0; i < 9; ++i) {
+ if (arr[i] < -512 || arr[i] > 511)
+ return -EINVAL;
+ }
+
+ mgr->get_manager_info(mgr, &info);
+
+ info.cpr_coefs = coefs;
+
+ r = mgr->set_manager_info(mgr, &info);
+ if (r)
+ return r;
+
+ r = mgr->apply(mgr);
+ if (r)
+ return r;
+
+ return size;
+}
+
struct manager_attribute {
struct attribute attr;
ssize_t (*show)(struct omap_overlay_manager *, char *);
@@ -300,6 +402,12 @@ static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR,
static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
manager_alpha_blending_enabled_show,
manager_alpha_blending_enabled_store);
+static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR,
+ manager_cpr_enable_show,
+ manager_cpr_enable_store);
+static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR,
+ manager_cpr_coef_show,
+ manager_cpr_coef_store);
static struct attribute *manager_sysfs_attrs[] = {
@@ -310,6 +418,8 @@ static struct attribute *manager_sysfs_attrs[] = {
&manager_attr_trans_key_value.attr,
&manager_attr_trans_key_enabled.attr,
&manager_attr_alpha_blending_enabled.attr,
+ &manager_attr_cpr_enable.attr,
+ &manager_attr_cpr_coef.attr,
NULL
};
@@ -391,33 +501,14 @@ struct overlay_cache_data {
bool enabled;
- u32 paddr;
- void __iomem *vaddr;
- u32 p_uv_addr; /* relevant for NV12 format only */
- u16 screen_width;
- u16 width;
- u16 height;
- enum omap_color_mode color_mode;
- u8 rotation;
- enum omap_dss_rotation_type rotation_type;
- bool mirror;
-
- u16 pos_x;
- u16 pos_y;
- u16 out_width; /* if 0, out_width == width */
- u16 out_height; /* if 0, out_height == height */
- u8 global_alpha;
- u8 pre_mult_alpha;
+ struct omap_overlay_info info;
enum omap_channel channel;
bool replication;
bool ilace;
- enum omap_burst_size burst_size;
u32 fifo_low;
u32 fifo_high;
-
- bool manual_update;
};
struct manager_cache_data {
@@ -429,15 +520,8 @@ struct manager_cache_data {
* VSYNC/EVSYNC */
bool shadow_dirty;
- u32 default_color;
-
- enum omap_dss_trans_key_type trans_key_type;
- u32 trans_key;
- bool trans_enabled;
-
- bool alpha_enabled;
+ struct omap_overlay_manager_info info;
- bool manual_upd_display;
bool manual_update;
bool do_manual_update;
@@ -539,24 +623,15 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return 0;
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
+ return 0;
+
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
|| dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
} else {
- if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
- enum omap_dss_update_mode mode;
- mode = dssdev->driver->get_update_mode(dssdev);
- if (mode != OMAP_DSS_UPDATE_AUTO)
- return 0;
-
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_FRAMEDONE
- : DISPC_IRQ_FRAMEDONE2;
- } else {
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_VSYNC
- : DISPC_IRQ_VSYNC2;
- }
+ irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
}
mc = &dss_cache.manager_cache[mgr->id];
@@ -617,24 +692,15 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return 0;
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
+ return 0;
+
if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
|| dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
} else {
- if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
- enum omap_dss_update_mode mode;
- mode = dssdev->driver->get_update_mode(dssdev);
- if (mode != OMAP_DSS_UPDATE_AUTO)
- return 0;
-
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_FRAMEDONE
- : DISPC_IRQ_FRAMEDONE2;
- } else {
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_VSYNC
- : DISPC_IRQ_VSYNC2;
- }
+ irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
+ DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
}
oc = &dss_cache.overlay_cache[ovl->id];
@@ -720,10 +786,12 @@ static bool rectangle_intersects(int x1, int y1, int w1, int h1,
static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc)
{
- if (oc->out_width != 0 && oc->width != oc->out_width)
+ struct omap_overlay_info *oi = &oc->info;
+
+ if (oi->out_width != 0 && oi->width != oi->out_width)
return true;
- if (oc->out_height != 0 && oc->height != oc->out_height)
+ if (oi->out_height != 0 && oi->height != oi->out_height)
return true;
return false;
@@ -733,6 +801,8 @@ static int configure_overlay(enum omap_plane plane)
{
struct overlay_cache_data *c;
struct manager_cache_data *mc;
+ struct omap_overlay_info *oi;
+ struct omap_overlay_manager_info *mi;
u16 outw, outh;
u16 x, y, w, h;
u32 paddr;
@@ -742,6 +812,7 @@ static int configure_overlay(enum omap_plane plane)
DSSDBGF("%d", plane);
c = &dss_cache.overlay_cache[plane];
+ oi = &c->info;
if (!c->enabled) {
dispc_enable_plane(plane, 0);
@@ -749,21 +820,22 @@ static int configure_overlay(enum omap_plane plane)
}
mc = &dss_cache.manager_cache[c->channel];
+ mi = &mc->info;
- x = c->pos_x;
- y = c->pos_y;
- w = c->width;
- h = c->height;
- outw = c->out_width == 0 ? c->width : c->out_width;
- outh = c->out_height == 0 ? c->height : c->out_height;
- paddr = c->paddr;
+ x = oi->pos_x;
+ y = oi->pos_y;
+ w = oi->width;
+ h = oi->height;
+ outw = oi->out_width == 0 ? oi->width : oi->out_width;
+ outh = oi->out_height == 0 ? oi->height : oi->out_height;
+ paddr = oi->paddr;
orig_w = w;
orig_h = h;
orig_outw = outw;
orig_outh = outh;
- if (c->manual_update && mc->do_manual_update) {
+ if (mc->manual_update && mc->do_manual_update) {
unsigned bpp;
unsigned scale_x_m = w, scale_x_d = outw;
unsigned scale_y_m = h, scale_y_d = outh;
@@ -775,7 +847,7 @@ static int configure_overlay(enum omap_plane plane)
return 0;
}
- switch (c->color_mode) {
+ switch (oi->color_mode) {
case OMAP_DSS_COLOR_NV12:
bpp = 8;
break;
@@ -805,23 +877,23 @@ static int configure_overlay(enum omap_plane plane)
BUG();
}
- if (mc->x > c->pos_x) {
+ if (mc->x > oi->pos_x) {
x = 0;
- outw -= (mc->x - c->pos_x);
- paddr += (mc->x - c->pos_x) *
+ outw -= (mc->x - oi->pos_x);
+ paddr += (mc->x - oi->pos_x) *
scale_x_m / scale_x_d * bpp / 8;
} else {
- x = c->pos_x - mc->x;
+ x = oi->pos_x - mc->x;
}
- if (mc->y > c->pos_y) {
+ if (mc->y > oi->pos_y) {
y = 0;
- outh -= (mc->y - c->pos_y);
- paddr += (mc->y - c->pos_y) *
+ outh -= (mc->y - oi->pos_y);
+ paddr += (mc->y - oi->pos_y) *
scale_y_m / scale_y_d *
- c->screen_width * bpp / 8;
+ oi->screen_width * bpp / 8;
} else {
- y = c->pos_y - mc->y;
+ y = oi->pos_y - mc->y;
}
if (mc->w < (x + outw))
@@ -840,8 +912,8 @@ static int configure_overlay(enum omap_plane plane)
* the width if the original width was bigger.
*/
if ((w & 1) &&
- (c->color_mode == OMAP_DSS_COLOR_YUV2 ||
- c->color_mode == OMAP_DSS_COLOR_UYVY)) {
+ (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
+ oi->color_mode == OMAP_DSS_COLOR_UYVY)) {
if (orig_w > w)
w += 1;
else
@@ -851,19 +923,19 @@ static int configure_overlay(enum omap_plane plane)
r = dispc_setup_plane(plane,
paddr,
- c->screen_width,
+ oi->screen_width,
x, y,
w, h,
outw, outh,
- c->color_mode,
+ oi->color_mode,
c->ilace,
- c->rotation_type,
- c->rotation,
- c->mirror,
- c->global_alpha,
- c->pre_mult_alpha,
+ oi->rotation_type,
+ oi->rotation,
+ oi->mirror,
+ oi->global_alpha,
+ oi->pre_mult_alpha,
c->channel,
- c->p_uv_addr);
+ oi->p_uv_addr);
if (r) {
/* this shouldn't happen */
@@ -874,8 +946,7 @@ static int configure_overlay(enum omap_plane plane)
dispc_enable_replication(plane, c->replication);
- dispc_set_burst_size(plane, c->burst_size);
- dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high);
+ dispc_set_fifo_threshold(plane, c->fifo_low, c->fifo_high);
dispc_enable_plane(plane, 1);
@@ -884,16 +955,21 @@ static int configure_overlay(enum omap_plane plane)
static void configure_manager(enum omap_channel channel)
{
- struct manager_cache_data *c;
+ struct omap_overlay_manager_info *mi;
DSSDBGF("%d", channel);
- c = &dss_cache.manager_cache[channel];
+ /* picking info from the cache */
+ mi = &dss_cache.manager_cache[channel].info;
- dispc_set_default_color(channel, c->default_color);
- dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
- dispc_enable_trans_key(channel, c->trans_enabled);
- dispc_enable_alpha_blending(channel, c->alpha_enabled);
+ dispc_set_default_color(channel, mi->default_color);
+ dispc_set_trans_key(channel, mi->trans_key_type, mi->trans_key);
+ dispc_enable_trans_key(channel, mi->trans_enabled);
+ dispc_enable_alpha_blending(channel, mi->alpha_enabled);
+ if (dss_has_feature(FEAT_CPR)) {
+ dispc_enable_cpr(channel, mi->cpr_enable);
+ dispc_set_cpr_coef(channel, &mi->cpr_coefs);
+ }
}
/* configure_dispc() tries to write values from cache to shadow registers.
@@ -928,7 +1004,7 @@ static int configure_dispc(void)
if (!oc->dirty)
continue;
- if (oc->manual_update && !mc->do_manual_update)
+ if (mc->manual_update && !mc->do_manual_update)
continue;
if (mgr_busy[oc->channel]) {
@@ -976,7 +1052,7 @@ static int configure_dispc(void)
/* We don't need GO with manual update display. LCD iface will
* always be turned off after frame, and new settings will be
* taken in to use at next update */
- if (!mc->manual_upd_display)
+ if (!mc->manual_update)
dispc_go(i);
}
@@ -1011,6 +1087,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
{
struct overlay_cache_data *oc;
struct manager_cache_data *mc;
+ struct omap_overlay_info *oi;
const int num_ovls = dss_feat_get_num_ovls();
struct omap_overlay_manager *mgr;
int i;
@@ -1053,6 +1130,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
unsigned outw, outh;
oc = &dss_cache.overlay_cache[i];
+ oi = &oc->info;
if (oc->channel != mgr->id)
continue;
@@ -1068,39 +1146,39 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev,
if (!dispc_is_overlay_scaled(oc))
continue;
- outw = oc->out_width == 0 ?
- oc->width : oc->out_width;
- outh = oc->out_height == 0 ?
- oc->height : oc->out_height;
+ outw = oi->out_width == 0 ?
+ oi->width : oi->out_width;
+ outh = oi->out_height == 0 ?
+ oi->height : oi->out_height;
/* is the overlay outside the update region? */
if (!rectangle_intersects(x, y, w, h,
- oc->pos_x, oc->pos_y,
+ oi->pos_x, oi->pos_y,
outw, outh))
continue;
/* if the overlay totally inside the update region? */
- if (rectangle_subset(oc->pos_x, oc->pos_y, outw, outh,
+ if (rectangle_subset(oi->pos_x, oi->pos_y, outw, outh,
x, y, w, h))
continue;
- if (x > oc->pos_x)
- x1 = oc->pos_x;
+ if (x > oi->pos_x)
+ x1 = oi->pos_x;
else
x1 = x;
- if (y > oc->pos_y)
- y1 = oc->pos_y;
+ if (y > oi->pos_y)
+ y1 = oi->pos_y;
else
y1 = y;
- if ((x + w) < (oc->pos_x + outw))
- x2 = oc->pos_x + outw;
+ if ((x + w) < (oi->pos_x + outw))
+ x2 = oi->pos_x + outw;
else
x2 = x + w;
- if ((y + h) < (oc->pos_y + outh))
- y2 = oc->pos_y + outh;
+ if ((y + h) < (oi->pos_y + outh))
+ y2 = oi->pos_y + outh;
else
y2 = y + h;
@@ -1236,6 +1314,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
+ r = dispc_runtime_get();
+ if (r)
+ return r;
+
spin_lock_irqsave(&dss_cache.lock, flags);
/* Configure overlays */
@@ -1275,23 +1357,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
ovl->info_dirty = false;
oc->dirty = true;
-
- oc->paddr = ovl->info.paddr;
- oc->vaddr = ovl->info.vaddr;
- oc->p_uv_addr = ovl->info.p_uv_addr;
- oc->screen_width = ovl->info.screen_width;
- oc->width = ovl->info.width;
- oc->height = ovl->info.height;
- oc->color_mode = ovl->info.color_mode;
- oc->rotation = ovl->info.rotation;
- oc->rotation_type = ovl->info.rotation_type;
- oc->mirror = ovl->info.mirror;
- oc->pos_x = ovl->info.pos_x;
- oc->pos_y = ovl->info.pos_y;
- oc->out_width = ovl->info.out_width;
- oc->out_height = ovl->info.out_height;
- oc->global_alpha = ovl->info.global_alpha;
- oc->pre_mult_alpha = ovl->info.pre_mult_alpha;
+ oc->info = ovl->info;
oc->replication =
dss_use_replication(dssdev, ovl->info.color_mode);
@@ -1302,11 +1368,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
oc->enabled = true;
- oc->manual_update =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
- dssdev->driver->get_update_mode(dssdev) !=
- OMAP_DSS_UPDATE_AUTO;
-
++num_planes_enabled;
}
@@ -1334,20 +1395,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
mgr->info_dirty = false;
mc->dirty = true;
-
- mc->default_color = mgr->info.default_color;
- mc->trans_key_type = mgr->info.trans_key_type;
- mc->trans_key = mgr->info.trans_key;
- mc->trans_enabled = mgr->info.trans_enabled;
- mc->alpha_enabled = mgr->info.alpha_enabled;
-
- mc->manual_upd_display =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+ mc->info = mgr->info;
mc->manual_update =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
- dssdev->driver->get_update_mode(dssdev) !=
- OMAP_DSS_UPDATE_AUTO;
+ dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
}
/* XXX TODO: Try to get fifomerge working. The problem is that it
@@ -1368,7 +1419,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
/* Configure overlay fifos */
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
struct omap_dss_device *dssdev;
- u32 size;
+ u32 size, burst_size;
ovl = omap_dss_get_overlay(i);
@@ -1386,6 +1437,8 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
if (use_fifomerge)
size *= 3;
+ burst_size = dispc_get_burst_size(ovl->id);
+
switch (dssdev->type) {
case OMAP_DISPLAY_TYPE_DPI:
case OMAP_DISPLAY_TYPE_DBI:
@@ -1393,13 +1446,13 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
case OMAP_DISPLAY_TYPE_VENC:
case OMAP_DISPLAY_TYPE_HDMI:
default_get_overlay_fifo_thresholds(ovl->id, size,
- &oc->burst_size, &oc->fifo_low,
+ burst_size, &oc->fifo_low,
&oc->fifo_high);
break;
#ifdef CONFIG_OMAP2_DSS_DSI
case OMAP_DISPLAY_TYPE_DSI:
dsi_get_overlay_fifo_thresholds(ovl->id, size,
- &oc->burst_size, &oc->fifo_low,
+ burst_size, &oc->fifo_low,
&oc->fifo_high);
break;
#endif
@@ -1409,7 +1462,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
}
r = 0;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
if (!dss_cache.irq_enabled) {
u32 mask;
@@ -1422,10 +1474,11 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
dss_cache.irq_enabled = true;
}
configure_dispc();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
spin_unlock_irqrestore(&dss_cache.lock, flags);
+ dispc_runtime_put();
+
return r;
}
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 0f08025b1f0..c84380c53c3 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -84,32 +84,42 @@ static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
old_mgr = ovl->manager;
+ r = dispc_runtime_get();
+ if (r)
+ return r;
+
/* detach old manager */
if (old_mgr) {
r = ovl->unset_manager(ovl);
if (r) {
DSSERR("detach failed\n");
- return r;
+ goto err;
}
r = old_mgr->apply(old_mgr);
if (r)
- return r;
+ goto err;
}
if (mgr) {
r = ovl->set_manager(ovl, mgr);
if (r) {
DSSERR("Failed to attach overlay\n");
- return r;
+ goto err;
}
r = mgr->apply(mgr);
if (r)
- return r;
+ goto err;
}
+ dispc_runtime_put();
+
return size;
+
+err:
+ dispc_runtime_put();
+ return r;
}
static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
@@ -238,6 +248,9 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
u8 alpha;
struct omap_overlay_info info;
+ if (!dss_has_feature(FEAT_GLOBAL_ALPHA))
+ return -ENODEV;
+
r = kstrtou8(buf, 0, &alpha);
if (r)
return r;
@@ -504,7 +517,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
ovl->manager = mgr;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
/* XXX: When there is an overlay on a DSI manual update display, and
* the overlay is first disabled, then moved to tv, and enabled, we
* seem to get SYNC_LOST_DIGIT error.
@@ -518,7 +530,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl,
* the overlay, but before moving the overlay to TV.
*/
dispc_set_channel_out(ovl->id, mgr->id);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
}
@@ -719,6 +730,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
}
if (mgr) {
+ dispc_runtime_get();
+
for (i = 0; i < dss_feat_get_num_ovls(); i++) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
@@ -728,6 +741,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
omap_dss_set_manager(ovl, mgr);
}
}
+
+ dispc_runtime_put();
}
}
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index c06fbe0bc67..39f4c597026 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -33,6 +33,8 @@
#include <linux/hrtimer.h>
#include <linux/seq_file.h>
#include <linux/semaphore.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include "dss.h"
@@ -120,12 +122,25 @@ static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
return __raw_readl(rfbi.base + idx.idx);
}
-static void rfbi_enable_clocks(bool enable)
+static int rfbi_runtime_get(void)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ int r;
+
+ DSSDBG("rfbi_runtime_get\n");
+
+ r = pm_runtime_get_sync(&rfbi.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+static void rfbi_runtime_put(void)
+{
+ int r;
+
+ DSSDBG("rfbi_runtime_put\n");
+
+ r = pm_runtime_put(&rfbi.pdev->dev);
+ WARN_ON(r < 0);
}
void rfbi_bus_lock(void)
@@ -805,7 +820,8 @@ void rfbi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ if (rfbi_runtime_get())
+ return;
DUMPREG(RFBI_REVISION);
DUMPREG(RFBI_SYSCONFIG);
@@ -836,7 +852,7 @@ void rfbi_dump_regs(struct seq_file *s)
DUMPREG(RFBI_VSYNC_WIDTH);
DUMPREG(RFBI_HSYNC_WIDTH);
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ rfbi_runtime_put();
#undef DUMPREG
}
@@ -844,7 +860,9 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
{
int r;
- rfbi_enable_clocks(1);
+ r = rfbi_runtime_get();
+ if (r)
+ return r;
r = omap_dss_start_device(dssdev);
if (r) {
@@ -879,6 +897,7 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
err1:
omap_dss_stop_device(dssdev);
err0:
+ rfbi_runtime_put();
return r;
}
EXPORT_SYMBOL(omapdss_rfbi_display_enable);
@@ -889,7 +908,7 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
DISPC_IRQ_FRAMEDONE);
omap_dss_stop_device(dssdev);
- rfbi_enable_clocks(0);
+ rfbi_runtime_put();
}
EXPORT_SYMBOL(omapdss_rfbi_display_disable);
@@ -904,8 +923,9 @@ int rfbi_init_display(struct omap_dss_device *dssdev)
static int omap_rfbihw_probe(struct platform_device *pdev)
{
u32 rev;
- u32 l;
struct resource *rfbi_mem;
+ struct clk *clk;
+ int r;
rfbi.pdev = pdev;
@@ -914,46 +934,102 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0);
if (!rfbi_mem) {
DSSERR("can't get IORESOURCE_MEM RFBI\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto err_ioremap;
}
rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem));
if (!rfbi.base) {
DSSERR("can't ioremap RFBI\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto err_ioremap;
}
- rfbi_enable_clocks(1);
+ pm_runtime_enable(&pdev->dev);
+
+ r = rfbi_runtime_get();
+ if (r)
+ goto err_get_rfbi;
msleep(10);
- rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
+ if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap3630())
+ clk = dss_get_ick();
+ else
+ clk = clk_get(&pdev->dev, "ick");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get ick\n");
+ r = PTR_ERR(clk);
+ goto err_get_ick;
+ }
+
+ rfbi.l4_khz = clk_get_rate(clk) / 1000;
- /* Enable autoidle and smart-idle */
- l = rfbi_read_reg(RFBI_SYSCONFIG);
- l |= (1 << 0) | (2 << 3);
- rfbi_write_reg(RFBI_SYSCONFIG, l);
+ clk_put(clk);
rev = rfbi_read_reg(RFBI_REVISION);
dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- rfbi_enable_clocks(0);
+ rfbi_runtime_put();
return 0;
+
+err_get_ick:
+ rfbi_runtime_put();
+err_get_rfbi:
+ pm_runtime_disable(&pdev->dev);
+ iounmap(rfbi.base);
+err_ioremap:
+ return r;
}
static int omap_rfbihw_remove(struct platform_device *pdev)
{
+ pm_runtime_disable(&pdev->dev);
iounmap(rfbi.base);
return 0;
}
+static int rfbi_runtime_suspend(struct device *dev)
+{
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int rfbi_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r < 0)
+ goto err_get_dispc;
+
+ return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
+}
+
+static const struct dev_pm_ops rfbi_pm_ops = {
+ .runtime_suspend = rfbi_runtime_suspend,
+ .runtime_resume = rfbi_runtime_resume,
+};
+
static struct platform_driver omap_rfbihw_driver = {
.probe = omap_rfbihw_probe,
.remove = omap_rfbihw_remove,
.driver = {
.name = "omapdss_rfbi",
.owner = THIS_MODULE,
+ .pm = &rfbi_pm_ops,
},
};
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 0bd4b0350f8..3a688c871a4 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -20,13 +20,11 @@
#define DSS_SUBSYS_NAME "SDI"
#include <linux/kernel.h>
-#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <video/omapdss.h>
-#include <plat/cpu.h>
#include "dss.h"
static struct {
@@ -60,14 +58,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
- goto err0;
+ goto err_start_dev;
}
r = regulator_enable(sdi.vdds_sdi_reg);
if (r)
- goto err1;
+ goto err_reg_enable;
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
+ r = dss_runtime_get();
+ if (r)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r)
+ goto err_get_dispc;
sdi_basic_init(dssdev);
@@ -80,7 +84,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
r = dss_calc_clock_div(1, t->pixel_clock * 1000,
&dss_cinfo, &dispc_cinfo);
if (r)
- goto err2;
+ goto err_calc_clock_div;
fck = dss_cinfo.fck;
lck_div = dispc_cinfo.lck_div;
@@ -101,27 +105,34 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
r = dss_set_clock_div(&dss_cinfo);
if (r)
- goto err2;
+ goto err_set_dss_clock_div;
r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo);
if (r)
- goto err2;
+ goto err_set_dispc_clock_div;
dss_sdi_init(dssdev->phy.sdi.datapairs);
r = dss_sdi_enable();
if (r)
- goto err1;
+ goto err_sdi_enable;
mdelay(2);
dssdev->manager->enable(dssdev->manager);
return 0;
-err2:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+
+err_sdi_enable:
+err_set_dispc_clock_div:
+err_set_dss_clock_div:
+err_calc_clock_div:
+ dispc_runtime_put();
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
regulator_disable(sdi.vdds_sdi_reg);
-err1:
+err_reg_enable:
omap_dss_stop_device(dssdev);
-err0:
+err_start_dev:
return r;
}
EXPORT_SYMBOL(omapdss_sdi_display_enable);
@@ -132,7 +143,8 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
dss_sdi_disable();
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
+ dispc_runtime_put();
+ dss_runtime_put();
regulator_disable(sdi.vdds_sdi_reg);
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 980f919ed98..173c66430da 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -33,11 +33,13 @@
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <video/omapdss.h>
#include <plat/cpu.h>
#include "dss.h"
+#include "dss_features.h"
/* Venc registers */
#define VENC_REV_ID 0x00
@@ -292,6 +294,9 @@ static struct {
struct mutex venc_lock;
u32 wss_data;
struct regulator *vdda_dac_reg;
+
+ struct clk *tv_clk;
+ struct clk *tv_dac_clk;
} venc;
static inline void venc_write_reg(int idx, u32 val)
@@ -380,14 +385,25 @@ static void venc_reset(void)
#endif
}
-static void venc_enable_clocks(int enable)
+static int venc_runtime_get(void)
+{
+ int r;
+
+ DSSDBG("venc_runtime_get\n");
+
+ r = pm_runtime_get_sync(&venc.pdev->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+static void venc_runtime_put(void)
{
- if (enable)
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
- DSS_CLK_VIDFCK);
- else
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK |
- DSS_CLK_VIDFCK);
+ int r;
+
+ DSSDBG("venc_runtime_put\n");
+
+ r = pm_runtime_put(&venc.pdev->dev);
+ WARN_ON(r < 0);
}
static const struct venc_config *venc_timings_to_config(
@@ -406,8 +422,6 @@ static void venc_power_on(struct omap_dss_device *dssdev)
{
u32 l;
- venc_enable_clocks(1);
-
venc_reset();
venc_write_config(venc_timings_to_config(&dssdev->panel.timings));
@@ -448,8 +462,6 @@ static void venc_power_off(struct omap_dss_device *dssdev)
dssdev->platform_disable(dssdev);
regulator_disable(venc.vdda_dac_reg);
-
- venc_enable_clocks(0);
}
@@ -487,6 +499,10 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
goto err1;
}
+ r = venc_runtime_get();
+ if (r)
+ goto err1;
+
venc_power_on(dssdev);
venc.wss_data = 0;
@@ -520,6 +536,8 @@ static void venc_panel_disable(struct omap_dss_device *dssdev)
venc_power_off(dssdev);
+ venc_runtime_put();
+
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
omap_dss_stop_device(dssdev);
@@ -538,20 +556,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev)
return venc_panel_enable(dssdev);
}
-static enum omap_dss_update_mode venc_get_update_mode(
- struct omap_dss_device *dssdev)
-{
- return OMAP_DSS_UPDATE_AUTO;
-}
-
-static int venc_set_update_mode(struct omap_dss_device *dssdev,
- enum omap_dss_update_mode mode)
-{
- if (mode != OMAP_DSS_UPDATE_AUTO)
- return -EINVAL;
- return 0;
-}
-
static void venc_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -598,6 +602,7 @@ static u32 venc_get_wss(struct omap_dss_device *dssdev)
static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
{
const struct venc_config *config;
+ int r;
DSSDBG("venc_set_wss\n");
@@ -608,16 +613,19 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
/* Invert due to VENC_L21_WC_CTL:INV=1 */
venc.wss_data = (wss ^ 0xfffff) << 8;
- venc_enable_clocks(1);
+ r = venc_runtime_get();
+ if (r)
+ goto err;
venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
venc.wss_data);
- venc_enable_clocks(0);
+ venc_runtime_put();
+err:
mutex_unlock(&venc.venc_lock);
- return 0;
+ return r;
}
static struct omap_dss_driver venc_driver = {
@@ -632,9 +640,6 @@ static struct omap_dss_driver venc_driver = {
.get_resolution = omapdss_default_get_resolution,
.get_recommended_bpp = omapdss_default_get_recommended_bpp,
- .set_update_mode = venc_set_update_mode,
- .get_update_mode = venc_get_update_mode,
-
.get_timings = venc_get_timings,
.set_timings = venc_set_timings,
.check_timings = venc_check_timings,
@@ -673,7 +678,8 @@ void venc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
- venc_enable_clocks(1);
+ if (venc_runtime_get())
+ return;
DUMPREG(VENC_F_CONTROL);
DUMPREG(VENC_VIDOUT_CTRL);
@@ -717,16 +723,56 @@ void venc_dump_regs(struct seq_file *s)
DUMPREG(VENC_OUTPUT_CONTROL);
DUMPREG(VENC_OUTPUT_TEST);
- venc_enable_clocks(0);
+ venc_runtime_put();
#undef DUMPREG
}
+static int venc_get_clocks(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ clk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get fck\n");
+ return PTR_ERR(clk);
+ }
+
+ venc.tv_clk = clk;
+
+ if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) {
+ if (cpu_is_omap34xx() || cpu_is_omap3630())
+ clk = clk_get(&pdev->dev, "dss_96m_fck");
+ else
+ clk = clk_get(&pdev->dev, "tv_dac_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get tv_dac_clk\n");
+ clk_put(venc.tv_clk);
+ return PTR_ERR(clk);
+ }
+ } else {
+ clk = NULL;
+ }
+
+ venc.tv_dac_clk = clk;
+
+ return 0;
+}
+
+static void venc_put_clocks(void)
+{
+ if (venc.tv_clk)
+ clk_put(venc.tv_clk);
+ if (venc.tv_dac_clk)
+ clk_put(venc.tv_dac_clk);
+}
+
/* VENC HW IP initialisation */
static int omap_venchw_probe(struct platform_device *pdev)
{
u8 rev_id;
struct resource *venc_mem;
+ int r;
venc.pdev = pdev;
@@ -737,22 +783,40 @@ static int omap_venchw_probe(struct platform_device *pdev)
venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0);
if (!venc_mem) {
DSSERR("can't get IORESOURCE_MEM VENC\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto err_ioremap;
}
venc.base = ioremap(venc_mem->start, resource_size(venc_mem));
if (!venc.base) {
DSSERR("can't ioremap VENC\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto err_ioremap;
}
- venc_enable_clocks(1);
+ r = venc_get_clocks(pdev);
+ if (r)
+ goto err_get_clk;
+
+ pm_runtime_enable(&pdev->dev);
+
+ r = venc_runtime_get();
+ if (r)
+ goto err_get_venc;
rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
- venc_enable_clocks(0);
+ venc_runtime_put();
return omap_dss_register_driver(&venc_driver);
+
+err_get_venc:
+ pm_runtime_disable(&pdev->dev);
+ venc_put_clocks();
+err_get_clk:
+ iounmap(venc.base);
+err_ioremap:
+ return r;
}
static int omap_venchw_remove(struct platform_device *pdev)
@@ -763,16 +827,61 @@ static int omap_venchw_remove(struct platform_device *pdev)
}
omap_dss_unregister_driver(&venc_driver);
+ pm_runtime_disable(&pdev->dev);
+ venc_put_clocks();
+
iounmap(venc.base);
return 0;
}
+static int venc_runtime_suspend(struct device *dev)
+{
+ if (venc.tv_dac_clk)
+ clk_disable(venc.tv_dac_clk);
+ clk_disable(venc.tv_clk);
+
+ dispc_runtime_put();
+ dss_runtime_put();
+
+ return 0;
+}
+
+static int venc_runtime_resume(struct device *dev)
+{
+ int r;
+
+ r = dss_runtime_get();
+ if (r < 0)
+ goto err_get_dss;
+
+ r = dispc_runtime_get();
+ if (r < 0)
+ goto err_get_dispc;
+
+ clk_enable(venc.tv_clk);
+ if (venc.tv_dac_clk)
+ clk_enable(venc.tv_dac_clk);
+
+ return 0;
+
+err_get_dispc:
+ dss_runtime_put();
+err_get_dss:
+ return r;
+}
+
+static const struct dev_pm_ops venc_pm_ops = {
+ .runtime_suspend = venc_runtime_suspend,
+ .runtime_resume = venc_runtime_resume,
+};
+
static struct platform_driver omap_venchw_driver = {
.probe = omap_venchw_probe,
.remove = omap_venchw_remove,
.driver = {
.name = "omapdss_venc",
.owner = THIS_MODULE,
+ .pm = &venc_pm_ops,
},
};
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index cff450392b7..6b1ac23dbbd 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -316,67 +316,67 @@ int omapfb_update_window(struct fb_info *fbi,
}
EXPORT_SYMBOL(omapfb_update_window);
-static int omapfb_set_update_mode(struct fb_info *fbi,
+int omapfb_set_update_mode(struct fb_info *fbi,
enum omapfb_update_mode mode)
{
struct omap_dss_device *display = fb2display(fbi);
- enum omap_dss_update_mode um;
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb_display_data *d;
int r;
- if (!display || !display->driver->set_update_mode)
+ if (!display)
return -EINVAL;
- switch (mode) {
- case OMAPFB_UPDATE_DISABLED:
- um = OMAP_DSS_UPDATE_DISABLED;
- break;
+ if (mode != OMAPFB_AUTO_UPDATE && mode != OMAPFB_MANUAL_UPDATE)
+ return -EINVAL;
- case OMAPFB_AUTO_UPDATE:
- um = OMAP_DSS_UPDATE_AUTO;
- break;
+ omapfb_lock(fbdev);
- case OMAPFB_MANUAL_UPDATE:
- um = OMAP_DSS_UPDATE_MANUAL;
- break;
+ d = get_display_data(fbdev, display);
- default:
- return -EINVAL;
+ if (d->update_mode == mode) {
+ omapfb_unlock(fbdev);
+ return 0;
}
- r = display->driver->set_update_mode(display, um);
+ r = 0;
+
+ if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+ if (mode == OMAPFB_AUTO_UPDATE)
+ omapfb_start_auto_update(fbdev, display);
+ else /* MANUAL_UPDATE */
+ omapfb_stop_auto_update(fbdev, display);
+
+ d->update_mode = mode;
+ } else { /* AUTO_UPDATE */
+ if (mode == OMAPFB_MANUAL_UPDATE)
+ r = -EINVAL;
+ }
+
+ omapfb_unlock(fbdev);
return r;
}
-static int omapfb_get_update_mode(struct fb_info *fbi,
+int omapfb_get_update_mode(struct fb_info *fbi,
enum omapfb_update_mode *mode)
{
struct omap_dss_device *display = fb2display(fbi);
- enum omap_dss_update_mode m;
+ struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
+ struct omapfb_display_data *d;
if (!display)
return -EINVAL;
- if (!display->driver->get_update_mode) {
- *mode = OMAPFB_AUTO_UPDATE;
- return 0;
- }
+ omapfb_lock(fbdev);
- m = display->driver->get_update_mode(display);
+ d = get_display_data(fbdev, display);
- switch (m) {
- case OMAP_DSS_UPDATE_DISABLED:
- *mode = OMAPFB_UPDATE_DISABLED;
- break;
- case OMAP_DSS_UPDATE_AUTO:
- *mode = OMAPFB_AUTO_UPDATE;
- break;
- case OMAP_DSS_UPDATE_MANUAL:
- *mode = OMAPFB_MANUAL_UPDATE;
- break;
- default:
- BUG();
- }
+ *mode = d->update_mode;
+
+ omapfb_unlock(fbdev);
return 0;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 505bc12a303..602b71a92d3 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -46,6 +46,10 @@ static char *def_vram;
static int def_vrfb;
static int def_rotate;
static int def_mirror;
+static bool auto_update;
+static unsigned int auto_update_freq;
+module_param(auto_update, bool, 0);
+module_param(auto_update_freq, uint, 0644);
#ifdef DEBUG
unsigned int omapfb_debug;
@@ -1242,6 +1246,7 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
struct omapfb_info *ofbi = FB2OFB(fbi);
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omap_dss_device *display = fb2display(fbi);
+ struct omapfb_display_data *d;
int r = 0;
if (!display)
@@ -1249,6 +1254,8 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
omapfb_lock(fbdev);
+ d = get_display_data(fbdev, display);
+
switch (blank) {
case FB_BLANK_UNBLANK:
if (display->state != OMAP_DSS_DISPLAY_SUSPENDED)
@@ -1257,6 +1264,11 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
if (display->driver->resume)
r = display->driver->resume(display);
+ if ((display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) &&
+ d->update_mode == OMAPFB_AUTO_UPDATE &&
+ !d->auto_update_work_enabled)
+ omapfb_start_auto_update(fbdev, display);
+
break;
case FB_BLANK_NORMAL:
@@ -1268,6 +1280,9 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
goto exit;
+ if (d->auto_update_work_enabled)
+ omapfb_stop_auto_update(fbdev, display);
+
if (display->driver->suspend)
r = display->driver->suspend(display);
@@ -1724,6 +1739,78 @@ err:
return r;
}
+static void omapfb_auto_update_work(struct work_struct *work)
+{
+ struct omap_dss_device *dssdev;
+ struct omap_dss_driver *dssdrv;
+ struct omapfb_display_data *d;
+ u16 w, h;
+ unsigned int freq;
+ struct omapfb2_device *fbdev;
+
+ d = container_of(work, struct omapfb_display_data,
+ auto_update_work.work);
+
+ dssdev = d->dssdev;
+ dssdrv = dssdev->driver;
+ fbdev = d->fbdev;
+
+ if (!dssdrv || !dssdrv->update)
+ return;
+
+ if (dssdrv->sync)
+ dssdrv->sync(dssdev);
+
+ dssdrv->get_resolution(dssdev, &w, &h);
+ dssdrv->update(dssdev, 0, 0, w, h);
+
+ freq = auto_update_freq;
+ if (freq == 0)
+ freq = 20;
+ queue_delayed_work(fbdev->auto_update_wq,
+ &d->auto_update_work, HZ / freq);
+}
+
+void omapfb_start_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display)
+{
+ struct omapfb_display_data *d;
+
+ if (fbdev->auto_update_wq == NULL) {
+ struct workqueue_struct *wq;
+
+ wq = create_singlethread_workqueue("omapfb_auto_update");
+
+ if (wq == NULL) {
+ dev_err(fbdev->dev, "Failed to create workqueue for "
+ "auto-update\n");
+ return;
+ }
+
+ fbdev->auto_update_wq = wq;
+ }
+
+ d = get_display_data(fbdev, display);
+
+ INIT_DELAYED_WORK(&d->auto_update_work, omapfb_auto_update_work);
+
+ d->auto_update_work_enabled = true;
+
+ omapfb_auto_update_work(&d->auto_update_work.work);
+}
+
+void omapfb_stop_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display)
+{
+ struct omapfb_display_data *d;
+
+ d = get_display_data(fbdev, display);
+
+ cancel_delayed_work_sync(&d->auto_update_work);
+
+ d->auto_update_work_enabled = false;
+}
+
/* initialize fb_info, var, fix to something sane based on the display */
static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
{
@@ -1858,10 +1945,21 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
}
for (i = 0; i < fbdev->num_displays; i++) {
- if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED)
- fbdev->displays[i]->driver->disable(fbdev->displays[i]);
+ struct omap_dss_device *dssdev = fbdev->displays[i].dssdev;
+
+ if (fbdev->displays[i].auto_update_work_enabled)
+ omapfb_stop_auto_update(fbdev, dssdev);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
+ dssdev->driver->disable(dssdev);
+
+ omap_dss_put_device(dssdev);
+ }
- omap_dss_put_device(fbdev->displays[i]);
+ if (fbdev->auto_update_wq != NULL) {
+ flush_workqueue(fbdev->auto_update_wq);
+ destroy_workqueue(fbdev->auto_update_wq);
+ fbdev->auto_update_wq = NULL;
}
dev_set_drvdata(fbdev->dev, NULL);
@@ -2084,14 +2182,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
int r;
u8 bpp;
struct omap_video_timings timings, temp_timings;
+ struct omapfb_display_data *d;
r = omapfb_mode_to_timings(mode_str, &timings, &bpp);
if (r)
return r;
- fbdev->bpp_overrides[fbdev->num_bpp_overrides].dssdev = display;
- fbdev->bpp_overrides[fbdev->num_bpp_overrides].bpp = bpp;
- ++fbdev->num_bpp_overrides;
+ d = get_display_data(fbdev, display);
+ d->bpp_override = bpp;
if (display->driver->check_timings) {
r = display->driver->check_timings(display, &timings);
@@ -2117,14 +2215,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
static int omapfb_get_recommended_bpp(struct omapfb2_device *fbdev,
struct omap_dss_device *dssdev)
{
- int i;
+ struct omapfb_display_data *d;
BUG_ON(dssdev->driver->get_recommended_bpp == NULL);
- for (i = 0; i < fbdev->num_bpp_overrides; ++i) {
- if (dssdev == fbdev->bpp_overrides[i].dssdev)
- return fbdev->bpp_overrides[i].bpp;
- }
+ d = get_display_data(fbdev, dssdev);
+
+ if (d->bpp_override != 0)
+ return d->bpp_override;
return dssdev->driver->get_recommended_bpp(dssdev);
}
@@ -2156,9 +2254,9 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
display = NULL;
for (i = 0; i < fbdev->num_displays; ++i) {
- if (strcmp(fbdev->displays[i]->name,
+ if (strcmp(fbdev->displays[i].dssdev->name,
display_str) == 0) {
- display = fbdev->displays[i];
+ display = fbdev->displays[i].dssdev;
break;
}
}
@@ -2182,6 +2280,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
struct omap_dss_device *dssdev)
{
struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omapfb_display_data *d;
int r;
r = dssdrv->enable(dssdev);
@@ -2191,8 +2290,20 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
return r;
}
+ d = get_display_data(fbdev, dssdev);
+
+ d->fbdev = fbdev;
+
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
u16 w, h;
+
+ if (auto_update) {
+ omapfb_start_auto_update(fbdev, dssdev);
+ d->update_mode = OMAPFB_AUTO_UPDATE;
+ } else {
+ d->update_mode = OMAPFB_MANUAL_UPDATE;
+ }
+
if (dssdrv->enable_te) {
r = dssdrv->enable_te(dssdev, 1);
if (r) {
@@ -2201,16 +2312,6 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
}
}
- if (dssdrv->set_update_mode) {
- r = dssdrv->set_update_mode(dssdev,
- OMAP_DSS_UPDATE_MANUAL);
- if (r) {
- dev_err(fbdev->dev,
- "Failed to set update mode\n");
- return r;
- }
- }
-
dssdrv->get_resolution(dssdev, &w, &h);
r = dssdrv->update(dssdev, 0, 0, w, h);
if (r) {
@@ -2219,15 +2320,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
return r;
}
} else {
- if (dssdrv->set_update_mode) {
- r = dssdrv->set_update_mode(dssdev,
- OMAP_DSS_UPDATE_AUTO);
- if (r) {
- dev_err(fbdev->dev,
- "Failed to set update mode\n");
- return r;
- }
- }
+ d->update_mode = OMAPFB_AUTO_UPDATE;
}
return 0;
@@ -2275,6 +2368,8 @@ static int omapfb_probe(struct platform_device *pdev)
fbdev->num_displays = 0;
dssdev = NULL;
for_each_dss_dev(dssdev) {
+ struct omapfb_display_data *d;
+
omap_dss_get_device(dssdev);
if (!dssdev->driver) {
@@ -2282,7 +2377,12 @@ static int omapfb_probe(struct platform_device *pdev)
r = -ENODEV;
}
- fbdev->displays[fbdev->num_displays++] = dssdev;
+ d = &fbdev->displays[fbdev->num_displays++];
+ d->dssdev = dssdev;
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
+ d->update_mode = OMAPFB_MANUAL_UPDATE;
+ else
+ d->update_mode = OMAPFB_AUTO_UPDATE;
}
if (r)
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 2f5e817b2a9..153bf1aceeb 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -518,6 +518,39 @@ static ssize_t show_virt(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr);
}
+static ssize_t show_upd_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ enum omapfb_update_mode mode;
+ int r;
+
+ r = omapfb_get_update_mode(fbi, &mode);
+
+ if (r)
+ return r;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode);
+}
+
+static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ unsigned mode;
+ int r;
+
+ r = kstrtouint(buf, 0, &mode);
+ if (r)
+ return r;
+
+ r = omapfb_set_update_mode(fbi, mode);
+ if (r)
+ return r;
+
+ return count;
+}
+
static struct device_attribute omapfb_attrs[] = {
__ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type,
store_rotate_type),
@@ -528,6 +561,7 @@ static struct device_attribute omapfb_attrs[] = {
store_overlays_rotate),
__ATTR(phys_addr, S_IRUGO, show_phys, NULL),
__ATTR(virt_addr, S_IRUGO, show_virt, NULL),
+ __ATTR(update_mode, S_IRUGO | S_IWUSR, show_upd_mode, store_upd_mode),
};
int omapfb_create_sysfs(struct omapfb2_device *fbdev)
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index aa1b1d97427..fdf0edeccf4 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -73,6 +73,15 @@ struct omapfb_info {
bool mirror;
};
+struct omapfb_display_data {
+ struct omapfb2_device *fbdev;
+ struct omap_dss_device *dssdev;
+ u8 bpp_override;
+ enum omapfb_update_mode update_mode;
+ bool auto_update_work_enabled;
+ struct delayed_work auto_update_work;
+};
+
struct omapfb2_device {
struct device *dev;
struct mutex mtx;
@@ -86,17 +95,13 @@ struct omapfb2_device {
struct omapfb2_mem_region regions[10];
unsigned num_displays;
- struct omap_dss_device *displays[10];
+ struct omapfb_display_data displays[10];
unsigned num_overlays;
struct omap_overlay *overlays[10];
unsigned num_managers;
struct omap_overlay_manager *managers[10];
- unsigned num_bpp_overrides;
- struct {
- struct omap_dss_device *dssdev;
- u8 bpp;
- } bpp_overrides[10];
+ struct workqueue_struct *auto_update_wq;
};
struct omapfb_colormode {
@@ -128,6 +133,13 @@ int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
u16 posx, u16 posy, u16 outw, u16 outh);
+void omapfb_start_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display);
+void omapfb_stop_auto_update(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display);
+int omapfb_get_update_mode(struct fb_info *fbi, enum omapfb_update_mode *mode);
+int omapfb_set_update_mode(struct fb_info *fbi, enum omapfb_update_mode mode);
+
/* find the display connected to this fb, if any */
static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
{
@@ -143,6 +155,19 @@ static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
return NULL;
}
+static inline struct omapfb_display_data *get_display_data(
+ struct omapfb2_device *fbdev, struct omap_dss_device *dssdev)
+{
+ int i;
+
+ for (i = 0; i < fbdev->num_displays; ++i)
+ if (fbdev->displays[i].dssdev == dssdev)
+ return &fbdev->displays[i];
+
+ /* This should never happen */
+ BUG();
+}
+
static inline void omapfb_lock(struct omapfb2_device *fbdev)
{
mutex_lock(&fbdev->mtx);
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index ef532d9d3c9..f27ae16ead2 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -567,7 +567,7 @@ static int __devinit platinumfb_probe(struct platform_device* odev)
* northbridge and that can fail. Only request framebuffer
*/
if (!request_mem_region(pinfo->rsrc_fb.start,
- pinfo->rsrc_fb.end - pinfo->rsrc_fb.start + 1,
+ resource_size(&pinfo->rsrc_fb),
"platinumfb framebuffer")) {
printk(KERN_ERR "platinumfb: Can't request framebuffer !\n");
framebuffer_release(info);
@@ -658,8 +658,7 @@ static int __devexit platinumfb_remove(struct platform_device* odev)
iounmap(pinfo->cmap_regs);
release_mem_region(pinfo->rsrc_fb.start,
- pinfo->rsrc_fb.end -
- pinfo->rsrc_fb.start + 1);
+ resource_size(&pinfo->rsrc_fb));
release_mem_region(pinfo->cmap_regs_phys, 0x1000);
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index bb95ec56d25..18ead6f0184 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -662,7 +662,7 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
info->fix.ypanstep = 0;
info->fix.ywrapstep = 0;
info->fix.mmio_start = res->start;
- info->fix.mmio_len = res->end - res->start + 1;
+ info->fix.mmio_len = resource_size(res);
info->fix.accel = FB_ACCEL_NONE;
info->fbops = &pxa168fb_ops;
info->pseudo_palette = fbi->pseudo_palette;
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index 32549d177b1..dcaab9012ca 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -55,7 +55,7 @@
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
-#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) || (chip<=S3_PROSAVAGEDDR))
+#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) && (chip<=S3_PROSAVAGEDDR))
#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 019dbd3f12b..b048417247e 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -24,7 +24,7 @@
#include <linux/backlight.h>
#include <linux/gpio.h>
#include <video/sh_mobile_lcdc.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "sh_mobile_lcdcfb.h"
#include "sh_mobile_meram.h"
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index 9170c82b495..cc7d7329dc1 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -218,7 +218,7 @@ static inline void meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata,
icb_offset = 0xc0000000 | (cfg->current_reg << 23);
*icb_addr_y = icb_offset | (cfg->icb[0].marker_icb << 24);
- if ((*icb_addr_c) && is_nvcolor(cfg->pixelformat))
+ if (is_nvcolor(cfg->pixelformat))
*icb_addr_c = icb_offset | (cfg->icb[1].marker_icb << 24);
}
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 87f0be1e78b..6294dca9550 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -1664,7 +1664,7 @@ static void sm501fb_stop(struct sm501fb_info *info)
resource_size(info->regs_res));
}
-static int sm501fb_init_fb(struct fb_info *fb,
+static int __devinit sm501fb_init_fb(struct fb_info *fb,
enum sm501_controller head,
const char *fbname)
{
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 52b0f3e8cca..087fc9960bb 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/delay.h>
-#include <linux/prefetch.h>
#include <video/udlfb.h>
#include "edid.h"
@@ -1233,8 +1232,12 @@ static int dlfb_setup_modes(struct dlfb_data *dev,
if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info))
fb_add_videomode(&info->monspecs.modedb[i],
&info->modelist);
- else /* if we've removed top/best mode */
- info->monspecs.misc &= ~FB_MISC_1ST_DETAIL;
+ else {
+ if (i == 0)
+ /* if we've removed top/best mode */
+ info->monspecs.misc
+ &= ~FB_MISC_1ST_DETAIL;
+ }
}
default_vmode = fb_find_best_display(&info->monspecs,
diff --git a/drivers/video/vermilion/vermilion.h b/drivers/video/vermilion/vermilion.h
index 7491abfcf1f..43d11ec197f 100644
--- a/drivers/video/vermilion/vermilion.h
+++ b/drivers/video/vermilion/vermilion.h
@@ -31,7 +31,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/mutex.h>
#define VML_DEVICE_GPU 0x5002
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index a99bbe86db1..501b3406c6d 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -175,6 +175,7 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
static void vesafb_destroy(struct fb_info *info)
{
+ fb_dealloc_cmap(&info->cmap);
if (info->screen_base)
iounmap(info->screen_base);
release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index cf43c80d27f..53aa4430d86 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -2016,7 +2016,7 @@ static int __init viafb_setup(void)
int __init viafb_init(void)
{
u32 dummy_x, dummy_y;
- int r;
+ int r = 0;
if (machine_is_olpc())
/* Apply XO-1.5-specific configuration. */
@@ -2039,7 +2039,7 @@ int __init viafb_init(void)
printk(KERN_INFO
"VIA Graphics Integration Chipset framebuffer %d.%d initializing\n",
VERSION_MAJOR, VERSION_MINOR);
- return 0;
+ return r;
}
void __exit viafb_exit(void)
diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig
new file mode 100644
index 00000000000..2dcdbc9364d
--- /dev/null
+++ b/drivers/virt/Kconfig
@@ -0,0 +1,32 @@
+#
+# Virtualization support drivers
+#
+
+menuconfig VIRT_DRIVERS
+ bool "Virtualization drivers"
+ ---help---
+ Say Y here to get to see options for device drivers that support
+ virtualization environments.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRT_DRIVERS
+
+config FSL_HV_MANAGER
+ tristate "Freescale hypervisor management driver"
+ depends on FSL_SOC
+ help
+ The Freescale hypervisor management driver provides several services
+ to drivers and applications related to the Freescale hypervisor:
+
+ 1) An ioctl interface for querying and managing partitions.
+
+ 2) A file interface to reading incoming doorbells.
+
+ 3) An interrupt handler for shutting down the partition upon
+ receiving the shutdown doorbell from a manager partition.
+
+ 4) A kernel interface for receiving callbacks when a managed
+ partition shuts down.
+
+endif
diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile
new file mode 100644
index 00000000000..c47f04dd343
--- /dev/null
+++ b/drivers/virt/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for drivers that support virtualization
+#
+
+obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
new file mode 100644
index 00000000000..3d9162151fd
--- /dev/null
+++ b/drivers/virt/fsl_hypervisor.c
@@ -0,0 +1,938 @@
+/*
+ * Freescale Hypervisor Management Driver
+
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The Freescale hypervisor management driver provides several services to
+ * drivers and applications related to the Freescale hypervisor:
+ *
+ * 1. An ioctl interface for querying and managing partitions.
+ *
+ * 2. A file interface to reading incoming doorbells.
+ *
+ * 3. An interrupt handler for shutting down the partition upon receiving the
+ * shutdown doorbell from a manager partition.
+ *
+ * 4. A kernel interface for receiving callbacks when a managed partition
+ * shuts down.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/uaccess.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+
+#include <linux/io.h>
+#include <asm/fsl_hcalls.h>
+
+#include <linux/fsl_hypervisor.h>
+
+static BLOCKING_NOTIFIER_HEAD(failover_subscribers);
+
+/*
+ * Ioctl interface for FSL_HV_IOCTL_PARTITION_RESTART
+ *
+ * Restart a running partition
+ */
+static long ioctl_restart(struct fsl_hv_ioctl_restart __user *p)
+{
+ struct fsl_hv_ioctl_restart param;
+
+ /* Get the parameters from the user */
+ if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_restart)))
+ return -EFAULT;
+
+ param.ret = fh_partition_restart(param.partition);
+
+ if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Ioctl interface for FSL_HV_IOCTL_PARTITION_STATUS
+ *
+ * Query the status of a partition
+ */
+static long ioctl_status(struct fsl_hv_ioctl_status __user *p)
+{
+ struct fsl_hv_ioctl_status param;
+ u32 status;
+
+ /* Get the parameters from the user */
+ if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_status)))
+ return -EFAULT;
+
+ param.ret = fh_partition_get_status(param.partition, &status);
+ if (!param.ret)
+ param.status = status;
+
+ if (copy_to_user(p, &param, sizeof(struct fsl_hv_ioctl_status)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Ioctl interface for FSL_HV_IOCTL_PARTITION_START
+ *
+ * Start a stopped partition.
+ */
+static long ioctl_start(struct fsl_hv_ioctl_start __user *p)
+{
+ struct fsl_hv_ioctl_start param;
+
+ /* Get the parameters from the user */
+ if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_start)))
+ return -EFAULT;
+
+ param.ret = fh_partition_start(param.partition, param.entry_point,
+ param.load);
+
+ if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Ioctl interface for FSL_HV_IOCTL_PARTITION_STOP
+ *
+ * Stop a running partition
+ */
+static long ioctl_stop(struct fsl_hv_ioctl_stop __user *p)
+{
+ struct fsl_hv_ioctl_stop param;
+
+ /* Get the parameters from the user */
+ if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_stop)))
+ return -EFAULT;
+
+ param.ret = fh_partition_stop(param.partition);
+
+ if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Ioctl interface for FSL_HV_IOCTL_MEMCPY
+ *
+ * The FH_MEMCPY hypercall takes an array of address/address/size structures
+ * to represent the data being copied. As a convenience to the user, this
+ * ioctl takes a user-create buffer and a pointer to a guest physically
+ * contiguous buffer in the remote partition, and creates the
+ * address/address/size array for the hypercall.
+ */
+static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
+{
+ struct fsl_hv_ioctl_memcpy param;
+
+ struct page **pages = NULL;
+ void *sg_list_unaligned = NULL;
+ struct fh_sg_list *sg_list = NULL;
+
+ unsigned int num_pages;
+ unsigned long lb_offset; /* Offset within a page of the local buffer */
+
+ unsigned int i;
+ long ret = 0;
+ int num_pinned; /* return value from get_user_pages() */
+ phys_addr_t remote_paddr; /* The next address in the remote buffer */
+ uint32_t count; /* The number of bytes left to copy */
+
+ /* Get the parameters from the user */
+ if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_memcpy)))
+ return -EFAULT;
+
+ /*
+ * One partition must be local, the other must be remote. In other
+ * words, if source and target are both -1, or are both not -1, then
+ * return an error.
+ */
+ if ((param.source == -1) == (param.target == -1))
+ return -EINVAL;
+
+ /*
+ * The array of pages returned by get_user_pages() covers only
+ * page-aligned memory. Since the user buffer is probably not
+ * page-aligned, we need to handle the discrepancy.
+ *
+ * We calculate the offset within a page of the S/G list, and make
+ * adjustments accordingly. This will result in a page list that looks
+ * like this:
+ *
+ * ---- <-- first page starts before the buffer
+ * | |
+ * |////|-> ----
+ * |////| | |
+ * ---- | |
+ * | |
+ * ---- | |
+ * |////| | |
+ * |////| | |
+ * |////| | |
+ * ---- | |
+ * | |
+ * ---- | |
+ * |////| | |
+ * |////| | |
+ * |////| | |
+ * ---- | |
+ * | |
+ * ---- | |
+ * |////| | |
+ * |////|-> ----
+ * | | <-- last page ends after the buffer
+ * ----
+ *
+ * The distance between the start of the first page and the start of the
+ * buffer is lb_offset. The hashed (///) areas are the parts of the
+ * page list that contain the actual buffer.
+ *
+ * The advantage of this approach is that the number of pages is
+ * equal to the number of entries in the S/G list that we give to the
+ * hypervisor.
+ */
+ lb_offset = param.local_vaddr & (PAGE_SIZE - 1);
+ num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ /* Allocate the buffers we need */
+
+ /*
+ * 'pages' is an array of struct page pointers that's initialized by
+ * get_user_pages().
+ */
+ pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ pr_debug("fsl-hv: could not allocate page list\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * sg_list is the list of fh_sg_list objects that we pass to the
+ * hypervisor.
+ */
+ sg_list_unaligned = kmalloc(num_pages * sizeof(struct fh_sg_list) +
+ sizeof(struct fh_sg_list) - 1, GFP_KERNEL);
+ if (!sg_list_unaligned) {
+ pr_debug("fsl-hv: could not allocate S/G list\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+ sg_list = PTR_ALIGN(sg_list_unaligned, sizeof(struct fh_sg_list));
+
+ /* Get the physical addresses of the source buffer */
+ down_read(&current->mm->mmap_sem);
+ num_pinned = get_user_pages(current, current->mm,
+ param.local_vaddr - lb_offset, num_pages,
+ (param.source == -1) ? READ : WRITE,
+ 0, pages, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (num_pinned != num_pages) {
+ /* get_user_pages() failed */
+ pr_debug("fsl-hv: could not lock source buffer\n");
+ ret = (num_pinned < 0) ? num_pinned : -EFAULT;
+ goto exit;
+ }
+
+ /*
+ * Build the fh_sg_list[] array. The first page is special
+ * because it's misaligned.
+ */
+ if (param.source == -1) {
+ sg_list[0].source = page_to_phys(pages[0]) + lb_offset;
+ sg_list[0].target = param.remote_paddr;
+ } else {
+ sg_list[0].source = param.remote_paddr;
+ sg_list[0].target = page_to_phys(pages[0]) + lb_offset;
+ }
+ sg_list[0].size = min_t(uint64_t, param.count, PAGE_SIZE - lb_offset);
+
+ remote_paddr = param.remote_paddr + sg_list[0].size;
+ count = param.count - sg_list[0].size;
+
+ for (i = 1; i < num_pages; i++) {
+ if (param.source == -1) {
+ /* local to remote */
+ sg_list[i].source = page_to_phys(pages[i]);
+ sg_list[i].target = remote_paddr;
+ } else {
+ /* remote to local */
+ sg_list[i].source = remote_paddr;
+ sg_list[i].target = page_to_phys(pages[i]);
+ }
+ sg_list[i].size = min_t(uint64_t, count, PAGE_SIZE);
+
+ remote_paddr += sg_list[i].size;
+ count -= sg_list[i].size;
+ }
+
+ param.ret = fh_partition_memcpy(param.source, param.target,
+ virt_to_phys(sg_list), num_pages);
+
+exit:
+ if (pages) {
+ for (i = 0; i < num_pages; i++)
+ if (pages[i])
+ put_page(pages[i]);
+ }
+
+ kfree(sg_list_unaligned);
+ kfree(pages);
+
+ if (!ret)
+ if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
+ return -EFAULT;
+
+ return ret;
+}
+
+/*
+ * Ioctl interface for FSL_HV_IOCTL_DOORBELL
+ *
+ * Ring a doorbell
+ */
+static long ioctl_doorbell(struct fsl_hv_ioctl_doorbell __user *p)
+{
+ struct fsl_hv_ioctl_doorbell param;
+
+ /* Get the parameters from the user. */
+ if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_doorbell)))
+ return -EFAULT;
+
+ param.ret = ev_doorbell_send(param.doorbell);
+
+ if (copy_to_user(&p->ret, &param.ret, sizeof(__u32)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long ioctl_dtprop(struct fsl_hv_ioctl_prop __user *p, int set)
+{
+ struct fsl_hv_ioctl_prop param;
+ char __user *upath, *upropname;
+ void __user *upropval;
+ char *path = NULL, *propname = NULL;
+ void *propval = NULL;
+ int ret = 0;
+
+ /* Get the parameters from the user. */
+ if (copy_from_user(&param, p, sizeof(struct fsl_hv_ioctl_prop)))
+ return -EFAULT;
+
+ upath = (char __user *)(uintptr_t)param.path;
+ upropname = (char __user *)(uintptr_t)param.propname;
+ upropval = (void __user *)(uintptr_t)param.propval;
+
+ path = strndup_user(upath, FH_DTPROP_MAX_PATHLEN);
+ if (IS_ERR(path)) {
+ ret = PTR_ERR(path);
+ goto out;
+ }
+
+ propname = strndup_user(upropname, FH_DTPROP_MAX_PATHLEN);
+ if (IS_ERR(propname)) {
+ ret = PTR_ERR(propname);
+ goto out;
+ }
+
+ if (param.proplen > FH_DTPROP_MAX_PROPLEN) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ propval = kmalloc(param.proplen, GFP_KERNEL);
+ if (!propval) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (set) {
+ if (copy_from_user(propval, upropval, param.proplen)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ param.ret = fh_partition_set_dtprop(param.handle,
+ virt_to_phys(path),
+ virt_to_phys(propname),
+ virt_to_phys(propval),
+ param.proplen);
+ } else {
+ param.ret = fh_partition_get_dtprop(param.handle,
+ virt_to_phys(path),
+ virt_to_phys(propname),
+ virt_to_phys(propval),
+ &param.proplen);
+
+ if (param.ret == 0) {
+ if (copy_to_user(upropval, propval, param.proplen) ||
+ put_user(param.proplen, &p->proplen)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ }
+
+ if (put_user(param.ret, &p->ret))
+ ret = -EFAULT;
+
+out:
+ kfree(path);
+ kfree(propval);
+ kfree(propname);
+
+ return ret;
+}
+
+/*
+ * Ioctl main entry point
+ */
+static long fsl_hv_ioctl(struct file *file, unsigned int cmd,
+ unsigned long argaddr)
+{
+ void __user *arg = (void __user *)argaddr;
+ long ret;
+
+ switch (cmd) {
+ case FSL_HV_IOCTL_PARTITION_RESTART:
+ ret = ioctl_restart(arg);
+ break;
+ case FSL_HV_IOCTL_PARTITION_GET_STATUS:
+ ret = ioctl_status(arg);
+ break;
+ case FSL_HV_IOCTL_PARTITION_START:
+ ret = ioctl_start(arg);
+ break;
+ case FSL_HV_IOCTL_PARTITION_STOP:
+ ret = ioctl_stop(arg);
+ break;
+ case FSL_HV_IOCTL_MEMCPY:
+ ret = ioctl_memcpy(arg);
+ break;
+ case FSL_HV_IOCTL_DOORBELL:
+ ret = ioctl_doorbell(arg);
+ break;
+ case FSL_HV_IOCTL_GETPROP:
+ ret = ioctl_dtprop(arg, 0);
+ break;
+ case FSL_HV_IOCTL_SETPROP:
+ ret = ioctl_dtprop(arg, 1);
+ break;
+ default:
+ pr_debug("fsl-hv: bad ioctl dir=%u type=%u cmd=%u size=%u\n",
+ _IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd),
+ _IOC_SIZE(cmd));
+ return -ENOTTY;
+ }
+
+ return ret;
+}
+
+/* Linked list of processes that have us open */
+static struct list_head db_list;
+
+/* spinlock for db_list */
+static DEFINE_SPINLOCK(db_list_lock);
+
+/* The size of the doorbell event queue. This must be a power of two. */
+#define QSIZE 16
+
+/* Returns the next head/tail pointer, wrapping around the queue if necessary */
+#define nextp(x) (((x) + 1) & (QSIZE - 1))
+
+/* Per-open data structure */
+struct doorbell_queue {
+ struct list_head list;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ unsigned int head;
+ unsigned int tail;
+ uint32_t q[QSIZE];
+};
+
+/* Linked list of ISRs that we registered */
+struct list_head isr_list;
+
+/* Per-ISR data structure */
+struct doorbell_isr {
+ struct list_head list;
+ unsigned int irq;
+ uint32_t doorbell; /* The doorbell handle */
+ uint32_t partition; /* The partition handle, if used */
+};
+
+/*
+ * Add a doorbell to all of the doorbell queues
+ */
+static void fsl_hv_queue_doorbell(uint32_t doorbell)
+{
+ struct doorbell_queue *dbq;
+ unsigned long flags;
+
+ /* Prevent another core from modifying db_list */
+ spin_lock_irqsave(&db_list_lock, flags);
+
+ list_for_each_entry(dbq, &db_list, list) {
+ if (dbq->head != nextp(dbq->tail)) {
+ dbq->q[dbq->tail] = doorbell;
+ /*
+ * This memory barrier eliminates the need to grab
+ * the spinlock for dbq.
+ */
+ smp_wmb();
+ dbq->tail = nextp(dbq->tail);
+ wake_up_interruptible(&dbq->wait);
+ }
+ }
+
+ spin_unlock_irqrestore(&db_list_lock, flags);
+}
+
+/*
+ * Interrupt handler for all doorbells
+ *
+ * We use the same interrupt handler for all doorbells. Whenever a doorbell
+ * is rung, and we receive an interrupt, we just put the handle for that
+ * doorbell (passed to us as *data) into all of the queues.
+ */
+static irqreturn_t fsl_hv_isr(int irq, void *data)
+{
+ fsl_hv_queue_doorbell((uintptr_t) data);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * State change thread function
+ *
+ * The state change notification arrives in an interrupt, but we can't call
+ * blocking_notifier_call_chain() in an interrupt handler. We could call
+ * atomic_notifier_call_chain(), but that would require the clients' call-back
+ * function to run in interrupt context. Since we don't want to impose that
+ * restriction on the clients, we use a threaded IRQ to process the
+ * notification in kernel context.
+ */
+static irqreturn_t fsl_hv_state_change_thread(int irq, void *data)
+{
+ struct doorbell_isr *dbisr = data;
+
+ blocking_notifier_call_chain(&failover_subscribers, dbisr->partition,
+ NULL);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Interrupt handler for state-change doorbells
+ */
+static irqreturn_t fsl_hv_state_change_isr(int irq, void *data)
+{
+ unsigned int status;
+ struct doorbell_isr *dbisr = data;
+ int ret;
+
+ /* It's still a doorbell, so add it to all the queues. */
+ fsl_hv_queue_doorbell(dbisr->doorbell);
+
+ /* Determine the new state, and if it's stopped, notify the clients. */
+ ret = fh_partition_get_status(dbisr->partition, &status);
+ if (!ret && (status == FH_PARTITION_STOPPED))
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Returns a bitmask indicating whether a read will block
+ */
+static unsigned int fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
+{
+ struct doorbell_queue *dbq = filp->private_data;
+ unsigned long flags;
+ unsigned int mask;
+
+ spin_lock_irqsave(&dbq->lock, flags);
+
+ poll_wait(filp, &dbq->wait, p);
+ mask = (dbq->head == dbq->tail) ? 0 : (POLLIN | POLLRDNORM);
+
+ spin_unlock_irqrestore(&dbq->lock, flags);
+
+ return mask;
+}
+
+/*
+ * Return the handles for any incoming doorbells
+ *
+ * If there are doorbell handles in the queue for this open instance, then
+ * return them to the caller as an array of 32-bit integers. Otherwise,
+ * block until there is at least one handle to return.
+ */
+static ssize_t fsl_hv_read(struct file *filp, char __user *buf, size_t len,
+ loff_t *off)
+{
+ struct doorbell_queue *dbq = filp->private_data;
+ uint32_t __user *p = (uint32_t __user *) buf; /* for put_user() */
+ unsigned long flags;
+ ssize_t count = 0;
+
+ /* Make sure we stop when the user buffer is full. */
+ while (len >= sizeof(uint32_t)) {
+ uint32_t dbell; /* Local copy of doorbell queue data */
+
+ spin_lock_irqsave(&dbq->lock, flags);
+
+ /*
+ * If the queue is empty, then either we're done or we need
+ * to block. If the application specified O_NONBLOCK, then
+ * we return the appropriate error code.
+ */
+ if (dbq->head == dbq->tail) {
+ spin_unlock_irqrestore(&dbq->lock, flags);
+ if (count)
+ break;
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ if (wait_event_interruptible(dbq->wait,
+ dbq->head != dbq->tail))
+ return -ERESTARTSYS;
+ continue;
+ }
+
+ /*
+ * Even though we have an smp_wmb() in the ISR, the core
+ * might speculatively execute the "dbell = ..." below while
+ * it's evaluating the if-statement above. In that case, the
+ * value put into dbell could be stale if the core accepts the
+ * speculation. To prevent that, we need a read memory barrier
+ * here as well.
+ */
+ smp_rmb();
+
+ /* Copy the data to a temporary local buffer, because
+ * we can't call copy_to_user() from inside a spinlock
+ */
+ dbell = dbq->q[dbq->head];
+ dbq->head = nextp(dbq->head);
+
+ spin_unlock_irqrestore(&dbq->lock, flags);
+
+ if (put_user(dbell, p))
+ return -EFAULT;
+ p++;
+ count += sizeof(uint32_t);
+ len -= sizeof(uint32_t);
+ }
+
+ return count;
+}
+
+/*
+ * Open the driver and prepare for reading doorbells.
+ *
+ * Every time an application opens the driver, we create a doorbell queue
+ * for that file handle. This queue is used for any incoming doorbells.
+ */
+static int fsl_hv_open(struct inode *inode, struct file *filp)
+{
+ struct doorbell_queue *dbq;
+ unsigned long flags;
+ int ret = 0;
+
+ dbq = kzalloc(sizeof(struct doorbell_queue), GFP_KERNEL);
+ if (!dbq) {
+ pr_err("fsl-hv: out of memory\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&dbq->lock);
+ init_waitqueue_head(&dbq->wait);
+
+ spin_lock_irqsave(&db_list_lock, flags);
+ list_add(&dbq->list, &db_list);
+ spin_unlock_irqrestore(&db_list_lock, flags);
+
+ filp->private_data = dbq;
+
+ return ret;
+}
+
+/*
+ * Close the driver
+ */
+static int fsl_hv_close(struct inode *inode, struct file *filp)
+{
+ struct doorbell_queue *dbq = filp->private_data;
+ unsigned long flags;
+
+ int ret = 0;
+
+ spin_lock_irqsave(&db_list_lock, flags);
+ list_del(&dbq->list);
+ spin_unlock_irqrestore(&db_list_lock, flags);
+
+ kfree(dbq);
+
+ return ret;
+}
+
+static const struct file_operations fsl_hv_fops = {
+ .owner = THIS_MODULE,
+ .open = fsl_hv_open,
+ .release = fsl_hv_close,
+ .poll = fsl_hv_poll,
+ .read = fsl_hv_read,
+ .unlocked_ioctl = fsl_hv_ioctl,
+};
+
+static struct miscdevice fsl_hv_misc_dev = {
+ MISC_DYNAMIC_MINOR,
+ "fsl-hv",
+ &fsl_hv_fops
+};
+
+static irqreturn_t fsl_hv_shutdown_isr(int irq, void *data)
+{
+ orderly_poweroff(false);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Returns the handle of the parent of the given node
+ *
+ * The handle is the value of the 'hv-handle' property
+ */
+static int get_parent_handle(struct device_node *np)
+{
+ struct device_node *parent;
+ const uint32_t *prop;
+ uint32_t handle;
+ int len;
+
+ parent = of_get_parent(np);
+ if (!parent)
+ /* It's not really possible for this to fail */
+ return -ENODEV;
+
+ /*
+ * The proper name for the handle property is "hv-handle", but some
+ * older versions of the hypervisor used "reg".
+ */
+ prop = of_get_property(parent, "hv-handle", &len);
+ if (!prop)
+ prop = of_get_property(parent, "reg", &len);
+
+ if (!prop || (len != sizeof(uint32_t))) {
+ /* This can happen only if the node is malformed */
+ of_node_put(parent);
+ return -ENODEV;
+ }
+
+ handle = be32_to_cpup(prop);
+ of_node_put(parent);
+
+ return handle;
+}
+
+/*
+ * Register a callback for failover events
+ *
+ * This function is called by device drivers to register their callback
+ * functions for fail-over events.
+ */
+int fsl_hv_failover_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&failover_subscribers, nb);
+}
+EXPORT_SYMBOL(fsl_hv_failover_register);
+
+/*
+ * Unregister a callback for failover events
+ */
+int fsl_hv_failover_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&failover_subscribers, nb);
+}
+EXPORT_SYMBOL(fsl_hv_failover_unregister);
+
+/*
+ * Return TRUE if we're running under FSL hypervisor
+ *
+ * This function checks to see if we're running under the Freescale
+ * hypervisor, and returns zero if we're not, or non-zero if we are.
+ *
+ * First, it checks if MSR[GS]==1, which means we're running under some
+ * hypervisor. Then it checks if there is a hypervisor node in the device
+ * tree. Currently, that means there needs to be a node in the root called
+ * "hypervisor" and which has a property named "fsl,hv-version".
+ */
+static int has_fsl_hypervisor(void)
+{
+ struct device_node *node;
+ int ret;
+
+ if (!(mfmsr() & MSR_GS))
+ return 0;
+
+ node = of_find_node_by_path("/hypervisor");
+ if (!node)
+ return 0;
+
+ ret = of_find_property(node, "fsl,hv-version", NULL) != NULL;
+
+ of_node_put(node);
+
+ return ret;
+}
+
+/*
+ * Freescale hypervisor management driver init
+ *
+ * This function is called when this module is loaded.
+ *
+ * Register ourselves as a miscellaneous driver. This will register the
+ * fops structure and create the right sysfs entries for udev.
+ */
+static int __init fsl_hypervisor_init(void)
+{
+ struct device_node *np;
+ struct doorbell_isr *dbisr, *n;
+ int ret;
+
+ pr_info("Freescale hypervisor management driver\n");
+
+ if (!has_fsl_hypervisor()) {
+ pr_info("fsl-hv: no hypervisor found\n");
+ return -ENODEV;
+ }
+
+ ret = misc_register(&fsl_hv_misc_dev);
+ if (ret) {
+ pr_err("fsl-hv: cannot register device\n");
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&db_list);
+ INIT_LIST_HEAD(&isr_list);
+
+ for_each_compatible_node(np, NULL, "epapr,hv-receive-doorbell") {
+ unsigned int irq;
+ const uint32_t *handle;
+
+ handle = of_get_property(np, "interrupts", NULL);
+ irq = irq_of_parse_and_map(np, 0);
+ if (!handle || (irq == NO_IRQ)) {
+ pr_err("fsl-hv: no 'interrupts' property in %s node\n",
+ np->full_name);
+ continue;
+ }
+
+ dbisr = kzalloc(sizeof(*dbisr), GFP_KERNEL);
+ if (!dbisr)
+ goto out_of_memory;
+
+ dbisr->irq = irq;
+ dbisr->doorbell = be32_to_cpup(handle);
+
+ if (of_device_is_compatible(np, "fsl,hv-shutdown-doorbell")) {
+ /* The shutdown doorbell gets its own ISR */
+ ret = request_irq(irq, fsl_hv_shutdown_isr, 0,
+ np->name, NULL);
+ } else if (of_device_is_compatible(np,
+ "fsl,hv-state-change-doorbell")) {
+ /*
+ * The state change doorbell triggers a notification if
+ * the state of the managed partition changes to
+ * "stopped". We need a separate interrupt handler for
+ * that, and we also need to know the handle of the
+ * target partition, not just the handle of the
+ * doorbell.
+ */
+ dbisr->partition = ret = get_parent_handle(np);
+ if (ret < 0) {
+ pr_err("fsl-hv: node %s has missing or "
+ "malformed parent\n", np->full_name);
+ kfree(dbisr);
+ continue;
+ }
+ ret = request_threaded_irq(irq, fsl_hv_state_change_isr,
+ fsl_hv_state_change_thread,
+ 0, np->name, dbisr);
+ } else
+ ret = request_irq(irq, fsl_hv_isr, 0, np->name, dbisr);
+
+ if (ret < 0) {
+ pr_err("fsl-hv: could not request irq %u for node %s\n",
+ irq, np->full_name);
+ kfree(dbisr);
+ continue;
+ }
+
+ list_add(&dbisr->list, &isr_list);
+
+ pr_info("fsl-hv: registered handler for doorbell %u\n",
+ dbisr->doorbell);
+ }
+
+ return 0;
+
+out_of_memory:
+ list_for_each_entry_safe(dbisr, n, &isr_list, list) {
+ free_irq(dbisr->irq, dbisr);
+ list_del(&dbisr->list);
+ kfree(dbisr);
+ }
+
+ misc_deregister(&fsl_hv_misc_dev);
+
+ return -ENOMEM;
+}
+
+/*
+ * Freescale hypervisor management driver termination
+ *
+ * This function is called when this driver is unloaded.
+ */
+static void __exit fsl_hypervisor_exit(void)
+{
+ struct doorbell_isr *dbisr, *n;
+
+ list_for_each_entry_safe(dbisr, n, &isr_list, list) {
+ free_irq(dbisr->irq, dbisr);
+ list_del(&dbisr->list);
+ kfree(dbisr);
+ }
+
+ misc_deregister(&fsl_hv_misc_dev);
+}
+
+module_init(fsl_hypervisor_init);
+module_exit(fsl_hypervisor_exit);
+
+MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
+MODULE_DESCRIPTION("Freescale hypervisor management driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 3dd6294d10b..57e493b1bd2 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -7,6 +7,8 @@ config VIRTIO_RING
tristate
depends on VIRTIO
+menu "Virtio drivers"
+
config VIRTIO_PCI
tristate "PCI driver for virtio devices (EXPERIMENTAL)"
depends on PCI && EXPERIMENTAL
@@ -33,3 +35,4 @@ config VIRTIO_BALLOON
If unsure, say M.
+endmenu
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index ad57593d224..a0c8965c1a7 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -109,6 +109,7 @@ struct ds1wm_data {
/* byte to write that makes all intr disabled, */
/* considering active_state (IAS) (optimization) */
u8 int_en_reg_none;
+ unsigned int reset_recover_delay; /* see ds1wm.h */
};
static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
@@ -187,6 +188,9 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
return 1;
}
+ if (ds1wm_data->reset_recover_delay)
+ msleep(ds1wm_data->reset_recover_delay);
+
return 0;
}
@@ -490,6 +494,7 @@ static int ds1wm_probe(struct platform_device *pdev)
}
ds1wm_data->irq = res->start;
ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
+ ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 1550431ccb6..334d1ccf9c9 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -20,7 +20,7 @@
*/
#include <asm/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <linux/delay.h>
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 17726a05a0a..402928b135d 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -86,6 +86,11 @@ static struct w1_family w1_therm_family_DS1822 = {
.fops = &w1_therm_fops,
};
+static struct w1_family w1_therm_family_DS28EA00 = {
+ .fid = W1_THERM_DS28EA00,
+ .fops = &w1_therm_fops,
+};
+
struct w1_therm_family_converter
{
u8 broken;
@@ -111,6 +116,10 @@ static struct w1_therm_family_converter w1_therm_families[] = {
.f = &w1_therm_family_DS18B20,
.convert = w1_DS18B20_convert_temp
},
+ {
+ .f = &w1_therm_family_DS28EA00,
+ .convert = w1_DS18B20_convert_temp
+ },
};
static inline int w1_DS18B20_convert_temp(u8 rom[9])
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 10606c82275..6c136c19e98 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -33,7 +33,7 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "w1.h"
#include "w1_log.h"
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 97479ae70b9..98a1ac0f469 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -24,7 +24,7 @@
#include <linux/types.h>
#include <linux/device.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define W1_FAMILY_DEFAULT 0
#define W1_FAMILY_SMEM_01 0x01
@@ -38,6 +38,7 @@
#define W1_EEPROM_DS2431 0x2D
#define W1_FAMILY_DS2760 0x30
#define W1_FAMILY_DS2780 0x32
+#define W1_THERM_DS28EA00 0x42
#define MAXNAMELEN 32
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 022f9eb0b7b..86b0735e6aa 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -28,6 +28,14 @@ menuconfig WATCHDOG
if WATCHDOG
+config WATCHDOG_CORE
+ bool "WatchDog Timer Driver Core"
+ ---help---
+ Say Y here if you want to use the new watchdog timer driver core.
+ This driver provides a framework for all watchdog timer drivers
+ and gives them the /dev/watchdog interface (and later also the
+ sysfs interface).
+
config WATCHDOG_NOWAYOUT
bool "Disable watchdog shutdown on close"
help
@@ -186,6 +194,15 @@ config SA1100_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called sa1100_wdt.
+config DW_WATCHDOG
+ tristate "Synopsys DesignWare watchdog"
+ depends on ARM && HAVE_CLK
+ help
+ Say Y here if to include support for the Synopsys DesignWare
+ watchdog timer found in many ARM chips.
+ To compile this driver as a module, choose M here: the
+ module will be called dw_wdt.
+
config MPCORE_WATCHDOG
tristate "MPcore watchdog"
depends on HAVE_ARM_TWD
@@ -321,7 +338,7 @@ config MAX63XX_WATCHDOG
config IMX2_WDT
tristate "IMX2+ Watchdog"
- depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+ depends on IMX_HAVE_PLATFORM_IMX2_WDT
help
This is the driver for the hardware watchdog
on the Freescale IMX2 and later processors.
@@ -535,8 +552,7 @@ config I6300ESB_WDT
config INTEL_SCU_WATCHDOG
bool "Intel SCU Watchdog for Mobile Platforms"
- depends on WATCHDOG
- depends on INTEL_SCU_IPC
+ depends on X86_MRST
---help---
Hardware driver for the watchdog time built into the Intel SCU
for Intel Mobile Platforms.
@@ -600,8 +616,7 @@ config IT87_WDT
config HP_WATCHDOG
tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
- depends on X86
- default m
+ depends on X86 && PCI
help
A software monitoring watchdog and NMI sourcing driver. This driver
will detect lockups and provide a stack trace. This is a driver that
@@ -881,6 +896,20 @@ config M54xx_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called m54xx_wdt.
+# MicroBlaze Architecture
+
+config XILINX_WATCHDOG
+ tristate "Xilinx Watchdog timer"
+ depends on MICROBLAZE
+ ---help---
+ Watchdog driver for the xps_timebase_wdt ip core.
+
+ IMPORTANT: The xps_timebase_wdt parent must have the property
+ "clock-frequency" at device tree.
+
+ To compile this driver as a module, choose M here: the
+ module will be called of_xilinx_wdt.
+
# MIPS Architecture
config ATH79_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index ed26f7094e4..55bd5740e91 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -2,6 +2,10 @@
# Makefile for the WatchDog device drivers.
#
+# The WatchDog Timer Driver Core.
+watchdog-objs += watchdog_core.o watchdog_dev.o
+obj-$(CONFIG_WATCHDOG_CORE) += watchdog.o
+
# Only one watchdog can succeed. We probe the ISA/PCI/USB based
# watchdog-cards first, then the architecture specific watchdog
# drivers and then the architecture independent "softdog" driver.
@@ -37,6 +41,7 @@ obj-$(CONFIG_IXP4XX_WATCHDOG) += ixp4xx_wdt.o
obj-$(CONFIG_KS8695_WATCHDOG) += ks8695_wdt.o
obj-$(CONFIG_S3C2410_WATCHDOG) += s3c2410_wdt.o
obj-$(CONFIG_SA1100_WATCHDOG) += sa1100_wdt.o
+obj-$(CONFIG_DW_WATCHDOG) += dw_wdt.o
obj-$(CONFIG_MPCORE_WATCHDOG) += mpcore_wdt.o
obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
@@ -109,6 +114,9 @@ obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
# M68K Architecture
obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
+# MicroBlaze Architecture
+obj-$(CONFIG_XILINX_WATCHDOG) += of_xilinx_wdt.o
+
# MIPS Architecture
obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
diff --git a/drivers/watchdog/at32ap700x_wdt.c b/drivers/watchdog/at32ap700x_wdt.c
index 750bc5281d7..4ca5d40304b 100644
--- a/drivers/watchdog/at32ap700x_wdt.c
+++ b/drivers/watchdog/at32ap700x_wdt.c
@@ -448,7 +448,7 @@ static void __exit at32_wdt_exit(void)
}
module_exit(at32_wdt_exit);
-MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
+MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("Watchdog driver for Atmel AT32AP700X");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index eac26021e8d..87445b2d72a 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -31,7 +31,7 @@
#include <linux/bitops.h>
#include <linux/uaccess.h>
-#include <mach/at91_wdt.h>
+#include "at91sam9_wdt.h"
#define DRV_NAME "AT91SAM9 Watchdog"
@@ -284,27 +284,8 @@ static int __exit at91wdt_remove(struct platform_device *pdev)
return res;
}
-#ifdef CONFIG_PM
-
-static int at91wdt_suspend(struct platform_device *pdev, pm_message_t message)
-{
- return 0;
-}
-
-static int at91wdt_resume(struct platform_device *pdev)
-{
- return 0;
-}
-
-#else
-#define at91wdt_suspend NULL
-#define at91wdt_resume NULL
-#endif
-
static struct platform_driver at91wdt_driver = {
.remove = __exit_p(at91wdt_remove),
- .suspend = at91wdt_suspend,
- .resume = at91wdt_resume,
.driver = {
.name = "at91_wdt",
.owner = THIS_MODULE,
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
new file mode 100644
index 00000000000..757f9cab5c8
--- /dev/null
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -0,0 +1,37 @@
+/*
+ * drivers/watchdog/at91sam9_wdt.h
+ *
+ * Copyright (C) 2007 Andrew Victor
+ * Copyright (C) 2007 Atmel Corporation.
+ *
+ * Watchdog Timer (WDT) - System peripherals regsters.
+ * Based on AT91SAM9261 datasheet revision D.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91_WDT_H
+#define AT91_WDT_H
+
+#define AT91_WDT_CR (AT91_WDT + 0x00) /* Watchdog Control Register */
+#define AT91_WDT_WDRSTT (1 << 0) /* Restart */
+#define AT91_WDT_KEY (0xa5 << 24) /* KEY Password */
+
+#define AT91_WDT_MR (AT91_WDT + 0x04) /* Watchdog Mode Register */
+#define AT91_WDT_WDV (0xfff << 0) /* Counter Value */
+#define AT91_WDT_WDFIEN (1 << 12) /* Fault Interrupt Enable */
+#define AT91_WDT_WDRSTEN (1 << 13) /* Reset Processor */
+#define AT91_WDT_WDRPROC (1 << 14) /* Timer Restart */
+#define AT91_WDT_WDDIS (1 << 15) /* Watchdog Disable */
+#define AT91_WDT_WDD (0xfff << 16) /* Delta Value */
+#define AT91_WDT_WDDBGHLT (1 << 28) /* Debug Halt */
+#define AT91_WDT_WDIDLEHLT (1 << 29) /* Idle Halt */
+
+#define AT91_WDT_SR (AT91_WDT + 0x08) /* Watchdog Status Register */
+#define AT91_WDT_WDUNF (1 << 0) /* Watchdog Underflow */
+#define AT91_WDT_WDERR (1 << 1) /* Watchdog Error */
+
+#endif
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
new file mode 100644
index 00000000000..f10f8c0abba
--- /dev/null
+++ b/drivers/watchdog/dw_wdt.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright 2010-2011 Picochip Ltd., Jamie Iles
+ * http://www.picochip.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This file implements a driver for the Synopsys DesignWare watchdog device
+ * in the many ARM subsystems. The watchdog has 16 different timeout periods
+ * and these are a function of the input clock frequency.
+ *
+ * The DesignWare watchdog cannot be stopped once it has been started so we
+ * use a software timer to implement a ping that will keep the watchdog alive.
+ * If we receive an expected close for the watchdog then we keep the timer
+ * running, otherwise the timer is stopped and the watchdog will expire.
+ */
+#define pr_fmt(fmt) "dw_wdt: " fmt
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+
+#define WDOG_CONTROL_REG_OFFSET 0x00
+#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
+#define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04
+#define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
+#define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c
+#define WDOG_COUNTER_RESTART_KICK_VALUE 0x76
+
+/* The maximum TOP (timeout period) value that can be set in the watchdog. */
+#define DW_WDT_MAX_TOP 15
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+#define WDT_TIMEOUT (HZ / 2)
+
+static struct {
+ spinlock_t lock;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long in_use;
+ unsigned long next_heartbeat;
+ struct timer_list timer;
+ int expect_close;
+} dw_wdt;
+
+static inline int dw_wdt_is_enabled(void)
+{
+ return readl(dw_wdt.regs + WDOG_CONTROL_REG_OFFSET) &
+ WDOG_CONTROL_REG_WDT_EN_MASK;
+}
+
+static inline int dw_wdt_top_in_seconds(unsigned top)
+{
+ /*
+ * There are 16 possible timeout values in 0..15 where the number of
+ * cycles is 2 ^ (16 + i) and the watchdog counts down.
+ */
+ return (1 << (16 + top)) / clk_get_rate(dw_wdt.clk);
+}
+
+static int dw_wdt_get_top(void)
+{
+ int top = readl(dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
+
+ return dw_wdt_top_in_seconds(top);
+}
+
+static inline void dw_wdt_set_next_heartbeat(void)
+{
+ dw_wdt.next_heartbeat = jiffies + dw_wdt_get_top() * HZ;
+}
+
+static int dw_wdt_set_top(unsigned top_s)
+{
+ int i, top_val = DW_WDT_MAX_TOP;
+
+ /*
+ * Iterate over the timeout values until we find the closest match. We
+ * always look for >=.
+ */
+ for (i = 0; i <= DW_WDT_MAX_TOP; ++i)
+ if (dw_wdt_top_in_seconds(i) >= top_s) {
+ top_val = i;
+ break;
+ }
+
+ /* Set the new value in the watchdog. */
+ writel(top_val, dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+
+ dw_wdt_set_next_heartbeat();
+
+ return dw_wdt_top_in_seconds(top_val);
+}
+
+static void dw_wdt_keepalive(void)
+{
+ writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
+ WDOG_COUNTER_RESTART_REG_OFFSET);
+}
+
+static void dw_wdt_ping(unsigned long data)
+{
+ if (time_before(jiffies, dw_wdt.next_heartbeat) ||
+ (!nowayout && !dw_wdt.in_use)) {
+ dw_wdt_keepalive();
+ mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
+ } else
+ pr_crit("keepalive missed, machine will reset\n");
+}
+
+static int dw_wdt_open(struct inode *inode, struct file *filp)
+{
+ if (test_and_set_bit(0, &dw_wdt.in_use))
+ return -EBUSY;
+
+ /* Make sure we don't get unloaded. */
+ __module_get(THIS_MODULE);
+
+ spin_lock(&dw_wdt.lock);
+ if (!dw_wdt_is_enabled()) {
+ /*
+ * The watchdog is not currently enabled. Set the timeout to
+ * the maximum and then start it.
+ */
+ dw_wdt_set_top(DW_WDT_MAX_TOP);
+ writel(WDOG_CONTROL_REG_WDT_EN_MASK,
+ dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
+ }
+
+ dw_wdt_set_next_heartbeat();
+
+ spin_unlock(&dw_wdt.lock);
+
+ return nonseekable_open(inode, filp);
+}
+
+ssize_t dw_wdt_write(struct file *filp, const char __user *buf, size_t len,
+ loff_t *offset)
+{
+ if (!len)
+ return 0;
+
+ if (!nowayout) {
+ size_t i;
+
+ dw_wdt.expect_close = 0;
+
+ for (i = 0; i < len; ++i) {
+ char c;
+
+ if (get_user(c, buf + i))
+ return -EFAULT;
+
+ if (c == 'V') {
+ dw_wdt.expect_close = 1;
+ break;
+ }
+ }
+ }
+
+ dw_wdt_set_next_heartbeat();
+ mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
+
+ return len;
+}
+
+static u32 dw_wdt_time_left(void)
+{
+ return readl(dw_wdt.regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
+ clk_get_rate(dw_wdt.clk);
+}
+
+static const struct watchdog_info dw_wdt_ident = {
+ .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE,
+ .identity = "Synopsys DesignWare Watchdog",
+};
+
+static long dw_wdt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ unsigned long val;
+ int timeout;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user((struct watchdog_info *)arg, &dw_wdt_ident,
+ sizeof(dw_wdt_ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, (int *)arg);
+
+ case WDIOC_KEEPALIVE:
+ dw_wdt_set_next_heartbeat();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(val, (int __user *)arg))
+ return -EFAULT;
+ timeout = dw_wdt_set_top(val);
+ return put_user(timeout , (int __user *)arg);
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(dw_wdt_get_top(), (int __user *)arg);
+
+ case WDIOC_GETTIMELEFT:
+ /* Get the time left until expiry. */
+ if (get_user(val, (int __user *)arg))
+ return -EFAULT;
+ return put_user(dw_wdt_time_left(), (int __user *)arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int dw_wdt_release(struct inode *inode, struct file *filp)
+{
+ clear_bit(0, &dw_wdt.in_use);
+
+ if (!dw_wdt.expect_close) {
+ del_timer(&dw_wdt.timer);
+
+ if (!nowayout)
+ pr_crit("unexpected close, system will reboot soon\n");
+ else
+ pr_crit("watchdog cannot be disabled, system will reboot soon\n");
+ }
+
+ dw_wdt.expect_close = 0;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int dw_wdt_suspend(struct device *dev)
+{
+ clk_disable(dw_wdt.clk);
+
+ return 0;
+}
+
+static int dw_wdt_resume(struct device *dev)
+{
+ int err = clk_enable(dw_wdt.clk);
+
+ if (err)
+ return err;
+
+ dw_wdt_keepalive();
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw_wdt_pm_ops = {
+ .suspend = dw_wdt_suspend,
+ .resume = dw_wdt_resume,
+};
+#endif /* CONFIG_PM */
+
+static const struct file_operations wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = dw_wdt_open,
+ .write = dw_wdt_write,
+ .unlocked_ioctl = dw_wdt_ioctl,
+ .release = dw_wdt_release
+};
+
+static struct miscdevice dw_wdt_miscdev = {
+ .fops = &wdt_fops,
+ .name = "watchdog",
+ .minor = WATCHDOG_MINOR,
+};
+
+static int __devinit dw_wdt_drv_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!mem)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
+ "dw_wdt"))
+ return -ENOMEM;
+
+ dw_wdt.regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!dw_wdt.regs)
+ return -ENOMEM;
+
+ dw_wdt.clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dw_wdt.clk))
+ return PTR_ERR(dw_wdt.clk);
+
+ ret = clk_enable(dw_wdt.clk);
+ if (ret)
+ goto out_put_clk;
+
+ spin_lock_init(&dw_wdt.lock);
+
+ ret = misc_register(&dw_wdt_miscdev);
+ if (ret)
+ goto out_disable_clk;
+
+ dw_wdt_set_next_heartbeat();
+ setup_timer(&dw_wdt.timer, dw_wdt_ping, 0);
+ mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
+
+ return 0;
+
+out_disable_clk:
+ clk_disable(dw_wdt.clk);
+out_put_clk:
+ clk_put(dw_wdt.clk);
+
+ return ret;
+}
+
+static int __devexit dw_wdt_drv_remove(struct platform_device *pdev)
+{
+ misc_deregister(&dw_wdt_miscdev);
+
+ clk_disable(dw_wdt.clk);
+ clk_put(dw_wdt.clk);
+
+ return 0;
+}
+
+static struct platform_driver dw_wdt_driver = {
+ .probe = dw_wdt_drv_probe,
+ .remove = __devexit_p(dw_wdt_drv_remove),
+ .driver = {
+ .name = "dw_wdt",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &dw_wdt_pm_ops,
+#endif /* CONFIG_PM */
+ },
+};
+
+static int __init dw_wdt_watchdog_init(void)
+{
+ return platform_driver_register(&dw_wdt_driver);
+}
+module_init(dw_wdt_watchdog_init);
+
+static void __exit dw_wdt_watchdog_exit(void)
+{
+ platform_driver_unregister(&dw_wdt_driver);
+}
+module_exit(dw_wdt_watchdog_exit);
+
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 29a7cd4b90c..b146082bd85 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -329,4 +329,4 @@ MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
MODULE_DESCRIPTION("GE watchdog driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS("platform: gef_wdt");
+MODULE_ALIAS("platform:gef_wdt");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 8cb26855bfe..410fba45378 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -36,7 +36,7 @@
#include <asm/cacheflush.h>
#endif /* CONFIG_HPWDT_NMI_DECODING */
-#define HPWDT_VERSION "1.2.0"
+#define HPWDT_VERSION "1.3.0"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
#define HPWDT_MAX_TIMER TICKS_TO_SECS(65535)
@@ -87,6 +87,19 @@ struct smbios_cru64_info {
};
#define SMBIOS_CRU64_INFORMATION 212
+/* type 219 */
+struct smbios_proliant_info {
+ u8 type;
+ u8 byte_length;
+ u16 handle;
+ u32 power_features;
+ u32 omega_features;
+ u32 reserved;
+ u32 misc_features;
+};
+#define SMBIOS_ICRU_INFORMATION 219
+
+
struct cmn_registers {
union {
struct {
@@ -132,6 +145,7 @@ struct cmn_registers {
static unsigned int hpwdt_nmi_decoding;
static unsigned int allow_kdump;
static unsigned int priority; /* hpwdt at end of die_notify list */
+static unsigned int is_icru;
static DEFINE_SPINLOCK(rom_lock);
static void *cru_rom_addr;
static struct cmn_registers cmn_regs;
@@ -476,19 +490,22 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
goto out;
spin_lock_irqsave(&rom_lock, rom_pl);
- if (!die_nmi_called)
+ if (!die_nmi_called && !is_icru)
asminline_call(&cmn_regs, cru_rom_addr);
die_nmi_called = 1;
spin_unlock_irqrestore(&rom_lock, rom_pl);
- if (cmn_regs.u1.ral == 0) {
- printk(KERN_WARNING "hpwdt: An NMI occurred, "
- "but unable to determine source.\n");
- } else {
- if (allow_kdump)
- hpwdt_stop();
- panic("An NMI occurred, please see the Integrated "
- "Management Log for details.\n");
+ if (!is_icru) {
+ if (cmn_regs.u1.ral == 0) {
+ printk(KERN_WARNING "hpwdt: An NMI occurred, "
+ "but unable to determine source.\n");
+ }
}
+
+ if (allow_kdump)
+ hpwdt_stop();
+ panic("An NMI occurred, please see the Integrated "
+ "Management Log for details.\n");
+
out:
return NOTIFY_OK;
}
@@ -659,30 +676,63 @@ static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev)
}
#endif /* CONFIG_X86_LOCAL_APIC */
+/*
+ * dmi_find_icru
+ *
+ * Routine Description:
+ * This function checks whether or not we are on an iCRU-based server.
+ * This check is independent of architecture and needs to be made for
+ * any ProLiant system.
+ */
+static void __devinit dmi_find_icru(const struct dmi_header *dm, void *dummy)
+{
+ struct smbios_proliant_info *smbios_proliant_ptr;
+
+ if (dm->type == SMBIOS_ICRU_INFORMATION) {
+ smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
+ if (smbios_proliant_ptr->misc_features & 0x01)
+ is_icru = 1;
+ }
+}
+
static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
{
int retval;
/*
- * We need to map the ROM to get the CRU service.
- * For 32 bit Operating Systems we need to go through the 32 Bit
- * BIOS Service Directory
- * For 64 bit Operating Systems we get that service through SMBIOS.
+ * On typical CRU-based systems we need to map that service in
+ * the BIOS. For 32 bit Operating Systems we need to go through
+ * the 32 Bit BIOS Service Directory. For 64 bit Operating
+ * Systems we get that service through SMBIOS.
+ *
+ * On systems that support the new iCRU service all we need to
+ * do is call dmi_walk to get the supported flag value and skip
+ * the old cru detect code.
*/
- retval = detect_cru_service();
- if (retval < 0) {
- dev_warn(&dev->dev,
- "Unable to detect the %d Bit CRU Service.\n",
- HPWDT_ARCH);
- return retval;
- }
+ dmi_walk(dmi_find_icru, NULL);
+ if (!is_icru) {
+
+ /*
+ * We need to map the ROM to get the CRU service.
+ * For 32 bit Operating Systems we need to go through the 32 Bit
+ * BIOS Service Directory
+ * For 64 bit Operating Systems we get that service through SMBIOS.
+ */
+ retval = detect_cru_service();
+ if (retval < 0) {
+ dev_warn(&dev->dev,
+ "Unable to detect the %d Bit CRU Service.\n",
+ HPWDT_ARCH);
+ return retval;
+ }
- /*
- * We know this is the only CRU call we need to make so lets keep as
- * few instructions as possible once the NMI comes in.
- */
- cmn_regs.u1.rah = 0x0D;
- cmn_regs.u1.ral = 0x02;
+ /*
+ * We know this is the only CRU call we need to make so lets keep as
+ * few instructions as possible once the NMI comes in.
+ */
+ cmn_regs.u1.rah = 0x0D;
+ cmn_regs.u1.ral = 0x02;
+ }
/*
* If the priority is set to 1, then we will be put first on the
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 5fd020da7c5..751a591684d 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -120,72 +120,12 @@ enum iTCO_chipsets {
TCO_3420, /* 3420 */
TCO_3450, /* 3450 */
TCO_EP80579, /* EP80579 */
- TCO_CPT1, /* Cougar Point */
- TCO_CPT2, /* Cougar Point Desktop */
- TCO_CPT3, /* Cougar Point Mobile */
- TCO_CPT4, /* Cougar Point */
- TCO_CPT5, /* Cougar Point */
- TCO_CPT6, /* Cougar Point */
- TCO_CPT7, /* Cougar Point */
- TCO_CPT8, /* Cougar Point */
- TCO_CPT9, /* Cougar Point */
- TCO_CPT10, /* Cougar Point */
- TCO_CPT11, /* Cougar Point */
- TCO_CPT12, /* Cougar Point */
- TCO_CPT13, /* Cougar Point */
- TCO_CPT14, /* Cougar Point */
- TCO_CPT15, /* Cougar Point */
- TCO_CPT16, /* Cougar Point */
- TCO_CPT17, /* Cougar Point */
- TCO_CPT18, /* Cougar Point */
- TCO_CPT19, /* Cougar Point */
- TCO_CPT20, /* Cougar Point */
- TCO_CPT21, /* Cougar Point */
- TCO_CPT22, /* Cougar Point */
- TCO_CPT23, /* Cougar Point */
- TCO_CPT24, /* Cougar Point */
- TCO_CPT25, /* Cougar Point */
- TCO_CPT26, /* Cougar Point */
- TCO_CPT27, /* Cougar Point */
- TCO_CPT28, /* Cougar Point */
- TCO_CPT29, /* Cougar Point */
- TCO_CPT30, /* Cougar Point */
- TCO_CPT31, /* Cougar Point */
- TCO_PBG1, /* Patsburg */
- TCO_PBG2, /* Patsburg */
+ TCO_CPT, /* Cougar Point */
+ TCO_CPTD, /* Cougar Point Desktop */
+ TCO_CPTM, /* Cougar Point Mobile */
+ TCO_PBG, /* Patsburg */
TCO_DH89XXCC, /* DH89xxCC */
- TCO_PPT0, /* Panther Point */
- TCO_PPT1, /* Panther Point */
- TCO_PPT2, /* Panther Point */
- TCO_PPT3, /* Panther Point */
- TCO_PPT4, /* Panther Point */
- TCO_PPT5, /* Panther Point */
- TCO_PPT6, /* Panther Point */
- TCO_PPT7, /* Panther Point */
- TCO_PPT8, /* Panther Point */
- TCO_PPT9, /* Panther Point */
- TCO_PPT10, /* Panther Point */
- TCO_PPT11, /* Panther Point */
- TCO_PPT12, /* Panther Point */
- TCO_PPT13, /* Panther Point */
- TCO_PPT14, /* Panther Point */
- TCO_PPT15, /* Panther Point */
- TCO_PPT16, /* Panther Point */
- TCO_PPT17, /* Panther Point */
- TCO_PPT18, /* Panther Point */
- TCO_PPT19, /* Panther Point */
- TCO_PPT20, /* Panther Point */
- TCO_PPT21, /* Panther Point */
- TCO_PPT22, /* Panther Point */
- TCO_PPT23, /* Panther Point */
- TCO_PPT24, /* Panther Point */
- TCO_PPT25, /* Panther Point */
- TCO_PPT26, /* Panther Point */
- TCO_PPT27, /* Panther Point */
- TCO_PPT28, /* Panther Point */
- TCO_PPT29, /* Panther Point */
- TCO_PPT30, /* Panther Point */
- TCO_PPT31, /* Panther Point */
+ TCO_PPT, /* Panther Point */
};
static struct {
@@ -244,83 +184,14 @@ static struct {
{"3450", 2},
{"EP80579", 2},
{"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Cougar Point", 2},
- {"Patsburg", 2},
+ {"Cougar Point Desktop", 2},
+ {"Cougar Point Mobile", 2},
{"Patsburg", 2},
{"DH89xxCC", 2},
{"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
- {"Panther Point", 2},
{NULL, 0}
};
-#define ITCO_PCI_DEVICE(dev, data) \
- .vendor = PCI_VENDOR_ID_INTEL, \
- .device = dev, \
- .subvendor = PCI_ANY_ID, \
- .subdevice = PCI_ANY_ID, \
- .class = 0, \
- .class_mask = 0, \
- .driver_data = data
-
/*
* This data only exists for exporting the supported PCI ids
* via MODULE_DEVICE_TABLE. We do not actually register a
@@ -328,138 +199,138 @@ static struct {
* functions that probably will be registered by other drivers.
*/
static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AA_0, TCO_ICH)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AB_0, TCO_ICH0)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_0, TCO_ICH2)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_10, TCO_ICH2M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801CA_0, TCO_ICH3)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801CA_12, TCO_ICH3M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801DB_0, TCO_ICH4)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801DB_12, TCO_ICH4M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801E_0, TCO_CICH)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801EB_0, TCO_ICH5)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB_1, TCO_6300ESB)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_0, TCO_ICH6)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_1, TCO_ICH6M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH6_2, TCO_ICH6W)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ESB2_0, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2671, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2672, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2673, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2674, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2675, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2676, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2677, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2678, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x2679, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267a, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267b, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267c, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267d, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267e, TCO_631XESB)},
- { ITCO_PCI_DEVICE(0x267f, TCO_631XESB)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_0, TCO_ICH7)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_30, TCO_ICH7DH)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1, TCO_ICH7M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31, TCO_ICH7MDH)},
- { ITCO_PCI_DEVICE(0x27bc, TCO_NM10)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0, TCO_ICH8)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2, TCO_ICH8DH)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3, TCO_ICH8DO)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_4, TCO_ICH8M)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_1, TCO_ICH8ME)},
- { ITCO_PCI_DEVICE(0x2918, TCO_ICH9)},
- { ITCO_PCI_DEVICE(0x2916, TCO_ICH9R)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_2, TCO_ICH9DH)},
- { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH9_4, TCO_ICH9DO)},
- { ITCO_PCI_DEVICE(0x2919, TCO_ICH9M)},
- { ITCO_PCI_DEVICE(0x2917, TCO_ICH9ME)},
- { ITCO_PCI_DEVICE(0x3a18, TCO_ICH10)},
- { ITCO_PCI_DEVICE(0x3a16, TCO_ICH10R)},
- { ITCO_PCI_DEVICE(0x3a1a, TCO_ICH10D)},
- { ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)},
- { ITCO_PCI_DEVICE(0x3b00, TCO_PCH)},
- { ITCO_PCI_DEVICE(0x3b01, TCO_PCHM)},
- { ITCO_PCI_DEVICE(0x3b02, TCO_P55)},
- { ITCO_PCI_DEVICE(0x3b03, TCO_PM55)},
- { ITCO_PCI_DEVICE(0x3b06, TCO_H55)},
- { ITCO_PCI_DEVICE(0x3b07, TCO_QM57)},
- { ITCO_PCI_DEVICE(0x3b08, TCO_H57)},
- { ITCO_PCI_DEVICE(0x3b09, TCO_HM55)},
- { ITCO_PCI_DEVICE(0x3b0a, TCO_Q57)},
- { ITCO_PCI_DEVICE(0x3b0b, TCO_HM57)},
- { ITCO_PCI_DEVICE(0x3b0d, TCO_PCHMSFF)},
- { ITCO_PCI_DEVICE(0x3b0f, TCO_QS57)},
- { ITCO_PCI_DEVICE(0x3b12, TCO_3400)},
- { ITCO_PCI_DEVICE(0x3b14, TCO_3420)},
- { ITCO_PCI_DEVICE(0x3b16, TCO_3450)},
- { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)},
- { ITCO_PCI_DEVICE(0x1c41, TCO_CPT1)},
- { ITCO_PCI_DEVICE(0x1c42, TCO_CPT2)},
- { ITCO_PCI_DEVICE(0x1c43, TCO_CPT3)},
- { ITCO_PCI_DEVICE(0x1c44, TCO_CPT4)},
- { ITCO_PCI_DEVICE(0x1c45, TCO_CPT5)},
- { ITCO_PCI_DEVICE(0x1c46, TCO_CPT6)},
- { ITCO_PCI_DEVICE(0x1c47, TCO_CPT7)},
- { ITCO_PCI_DEVICE(0x1c48, TCO_CPT8)},
- { ITCO_PCI_DEVICE(0x1c49, TCO_CPT9)},
- { ITCO_PCI_DEVICE(0x1c4a, TCO_CPT10)},
- { ITCO_PCI_DEVICE(0x1c4b, TCO_CPT11)},
- { ITCO_PCI_DEVICE(0x1c4c, TCO_CPT12)},
- { ITCO_PCI_DEVICE(0x1c4d, TCO_CPT13)},
- { ITCO_PCI_DEVICE(0x1c4e, TCO_CPT14)},
- { ITCO_PCI_DEVICE(0x1c4f, TCO_CPT15)},
- { ITCO_PCI_DEVICE(0x1c50, TCO_CPT16)},
- { ITCO_PCI_DEVICE(0x1c51, TCO_CPT17)},
- { ITCO_PCI_DEVICE(0x1c52, TCO_CPT18)},
- { ITCO_PCI_DEVICE(0x1c53, TCO_CPT19)},
- { ITCO_PCI_DEVICE(0x1c54, TCO_CPT20)},
- { ITCO_PCI_DEVICE(0x1c55, TCO_CPT21)},
- { ITCO_PCI_DEVICE(0x1c56, TCO_CPT22)},
- { ITCO_PCI_DEVICE(0x1c57, TCO_CPT23)},
- { ITCO_PCI_DEVICE(0x1c58, TCO_CPT24)},
- { ITCO_PCI_DEVICE(0x1c59, TCO_CPT25)},
- { ITCO_PCI_DEVICE(0x1c5a, TCO_CPT26)},
- { ITCO_PCI_DEVICE(0x1c5b, TCO_CPT27)},
- { ITCO_PCI_DEVICE(0x1c5c, TCO_CPT28)},
- { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)},
- { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)},
- { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)},
- { ITCO_PCI_DEVICE(0x1d40, TCO_PBG1)},
- { ITCO_PCI_DEVICE(0x1d41, TCO_PBG2)},
- { ITCO_PCI_DEVICE(0x2310, TCO_DH89XXCC)},
- { ITCO_PCI_DEVICE(0x1e40, TCO_PPT0)},
- { ITCO_PCI_DEVICE(0x1e41, TCO_PPT1)},
- { ITCO_PCI_DEVICE(0x1e42, TCO_PPT2)},
- { ITCO_PCI_DEVICE(0x1e43, TCO_PPT3)},
- { ITCO_PCI_DEVICE(0x1e44, TCO_PPT4)},
- { ITCO_PCI_DEVICE(0x1e45, TCO_PPT5)},
- { ITCO_PCI_DEVICE(0x1e46, TCO_PPT6)},
- { ITCO_PCI_DEVICE(0x1e47, TCO_PPT7)},
- { ITCO_PCI_DEVICE(0x1e48, TCO_PPT8)},
- { ITCO_PCI_DEVICE(0x1e49, TCO_PPT9)},
- { ITCO_PCI_DEVICE(0x1e4a, TCO_PPT10)},
- { ITCO_PCI_DEVICE(0x1e4b, TCO_PPT11)},
- { ITCO_PCI_DEVICE(0x1e4c, TCO_PPT12)},
- { ITCO_PCI_DEVICE(0x1e4d, TCO_PPT13)},
- { ITCO_PCI_DEVICE(0x1e4e, TCO_PPT14)},
- { ITCO_PCI_DEVICE(0x1e4f, TCO_PPT15)},
- { ITCO_PCI_DEVICE(0x1e50, TCO_PPT16)},
- { ITCO_PCI_DEVICE(0x1e51, TCO_PPT17)},
- { ITCO_PCI_DEVICE(0x1e52, TCO_PPT18)},
- { ITCO_PCI_DEVICE(0x1e53, TCO_PPT19)},
- { ITCO_PCI_DEVICE(0x1e54, TCO_PPT20)},
- { ITCO_PCI_DEVICE(0x1e55, TCO_PPT21)},
- { ITCO_PCI_DEVICE(0x1e56, TCO_PPT22)},
- { ITCO_PCI_DEVICE(0x1e57, TCO_PPT23)},
- { ITCO_PCI_DEVICE(0x1e58, TCO_PPT24)},
- { ITCO_PCI_DEVICE(0x1e59, TCO_PPT25)},
- { ITCO_PCI_DEVICE(0x1e5a, TCO_PPT26)},
- { ITCO_PCI_DEVICE(0x1e5b, TCO_PPT27)},
- { ITCO_PCI_DEVICE(0x1e5c, TCO_PPT28)},
- { ITCO_PCI_DEVICE(0x1e5d, TCO_PPT29)},
- { ITCO_PCI_DEVICE(0x1e5e, TCO_PPT30)},
- { ITCO_PCI_DEVICE(0x1e5f, TCO_PPT31)},
+ { PCI_VDEVICE(INTEL, 0x2410), TCO_ICH},
+ { PCI_VDEVICE(INTEL, 0x2420), TCO_ICH0},
+ { PCI_VDEVICE(INTEL, 0x2440), TCO_ICH2},
+ { PCI_VDEVICE(INTEL, 0x244c), TCO_ICH2M},
+ { PCI_VDEVICE(INTEL, 0x2480), TCO_ICH3},
+ { PCI_VDEVICE(INTEL, 0x248c), TCO_ICH3M},
+ { PCI_VDEVICE(INTEL, 0x24c0), TCO_ICH4},
+ { PCI_VDEVICE(INTEL, 0x24cc), TCO_ICH4M},
+ { PCI_VDEVICE(INTEL, 0x2450), TCO_CICH},
+ { PCI_VDEVICE(INTEL, 0x24d0), TCO_ICH5},
+ { PCI_VDEVICE(INTEL, 0x25a1), TCO_6300ESB},
+ { PCI_VDEVICE(INTEL, 0x2640), TCO_ICH6},
+ { PCI_VDEVICE(INTEL, 0x2641), TCO_ICH6M},
+ { PCI_VDEVICE(INTEL, 0x2642), TCO_ICH6W},
+ { PCI_VDEVICE(INTEL, 0x2670), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2671), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2672), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2673), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2674), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2675), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2676), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2677), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2678), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2679), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267a), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267b), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267c), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267d), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267e), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267f), TCO_631XESB},
+ { PCI_VDEVICE(INTEL, 0x27b8), TCO_ICH7},
+ { PCI_VDEVICE(INTEL, 0x27b0), TCO_ICH7DH},
+ { PCI_VDEVICE(INTEL, 0x27b9), TCO_ICH7M},
+ { PCI_VDEVICE(INTEL, 0x27bd), TCO_ICH7MDH},
+ { PCI_VDEVICE(INTEL, 0x27bc), TCO_NM10},
+ { PCI_VDEVICE(INTEL, 0x2810), TCO_ICH8},
+ { PCI_VDEVICE(INTEL, 0x2812), TCO_ICH8DH},
+ { PCI_VDEVICE(INTEL, 0x2814), TCO_ICH8DO},
+ { PCI_VDEVICE(INTEL, 0x2815), TCO_ICH8M},
+ { PCI_VDEVICE(INTEL, 0x2811), TCO_ICH8ME},
+ { PCI_VDEVICE(INTEL, 0x2918), TCO_ICH9},
+ { PCI_VDEVICE(INTEL, 0x2916), TCO_ICH9R},
+ { PCI_VDEVICE(INTEL, 0x2912), TCO_ICH9DH},
+ { PCI_VDEVICE(INTEL, 0x2914), TCO_ICH9DO},
+ { PCI_VDEVICE(INTEL, 0x2919), TCO_ICH9M},
+ { PCI_VDEVICE(INTEL, 0x2917), TCO_ICH9ME},
+ { PCI_VDEVICE(INTEL, 0x3a18), TCO_ICH10},
+ { PCI_VDEVICE(INTEL, 0x3a16), TCO_ICH10R},
+ { PCI_VDEVICE(INTEL, 0x3a1a), TCO_ICH10D},
+ { PCI_VDEVICE(INTEL, 0x3a14), TCO_ICH10DO},
+ { PCI_VDEVICE(INTEL, 0x3b00), TCO_PCH},
+ { PCI_VDEVICE(INTEL, 0x3b01), TCO_PCHM},
+ { PCI_VDEVICE(INTEL, 0x3b02), TCO_P55},
+ { PCI_VDEVICE(INTEL, 0x3b03), TCO_PM55},
+ { PCI_VDEVICE(INTEL, 0x3b06), TCO_H55},
+ { PCI_VDEVICE(INTEL, 0x3b07), TCO_QM57},
+ { PCI_VDEVICE(INTEL, 0x3b08), TCO_H57},
+ { PCI_VDEVICE(INTEL, 0x3b09), TCO_HM55},
+ { PCI_VDEVICE(INTEL, 0x3b0a), TCO_Q57},
+ { PCI_VDEVICE(INTEL, 0x3b0b), TCO_HM57},
+ { PCI_VDEVICE(INTEL, 0x3b0d), TCO_PCHMSFF},
+ { PCI_VDEVICE(INTEL, 0x3b0f), TCO_QS57},
+ { PCI_VDEVICE(INTEL, 0x3b12), TCO_3400},
+ { PCI_VDEVICE(INTEL, 0x3b14), TCO_3420},
+ { PCI_VDEVICE(INTEL, 0x3b16), TCO_3450},
+ { PCI_VDEVICE(INTEL, 0x5031), TCO_EP80579},
+ { PCI_VDEVICE(INTEL, 0x1c41), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c42), TCO_CPTD},
+ { PCI_VDEVICE(INTEL, 0x1c43), TCO_CPTM},
+ { PCI_VDEVICE(INTEL, 0x1c44), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c45), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c46), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c47), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c48), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c49), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4a), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4b), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4c), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4d), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4e), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4f), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c50), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c51), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c52), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c53), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c54), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c55), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c56), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c57), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c58), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c59), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5a), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5b), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5c), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5d), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5e), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5f), TCO_CPT},
+ { PCI_VDEVICE(INTEL, 0x1d40), TCO_PBG},
+ { PCI_VDEVICE(INTEL, 0x1d41), TCO_PBG},
+ { PCI_VDEVICE(INTEL, 0x2310), TCO_DH89XXCC},
+ { PCI_VDEVICE(INTEL, 0x1e40), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e41), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e42), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e43), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e44), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e45), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e46), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e47), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e48), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e49), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4a), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4b), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4c), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4d), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4e), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4f), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e50), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e51), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e52), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e53), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e54), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e55), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e56), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e57), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e58), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e59), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5a), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5b), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5c), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
@@ -1052,15 +923,10 @@ static void iTCO_wdt_shutdown(struct platform_device *dev)
iTCO_wdt_stop();
}
-#define iTCO_wdt_suspend NULL
-#define iTCO_wdt_resume NULL
-
static struct platform_driver iTCO_wdt_driver = {
.probe = iTCO_wdt_probe,
.remove = __devexit_p(iTCO_wdt_remove),
.shutdown = iTCO_wdt_shutdown,
- .suspend = iTCO_wdt_suspend,
- .resume = iTCO_wdt_resume,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 86f7cac1026..b8ef2c6dca7 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -329,12 +329,18 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
}
+static const struct of_device_id imx2_wdt_dt_ids[] = {
+ { .compatible = "fsl,imx21-wdt", },
+ { /* sentinel */ }
+};
+
static struct platform_driver imx2_wdt_driver = {
.remove = __exit_p(imx2_wdt_remove),
.shutdown = imx2_wdt_shutdown,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = imx2_wdt_dt_ids,
},
};
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
index 919bdd16136..1abdc0454c5 100644
--- a/drivers/watchdog/intel_scu_watchdog.c
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -42,9 +42,8 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/sfi.h>
-#include <linux/types.h>
#include <asm/irq.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/intel_scu_ipc.h>
#include <asm/apb_timer.h>
#include <asm/mrst.h>
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 6143f52ba6b..8d2d8502d3e 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -28,10 +28,10 @@
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/fs.h>
-#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/ioport.h>
#define NAME "it8712f_wdt"
@@ -51,7 +51,6 @@ MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
static unsigned long wdt_open;
static unsigned expect_close;
-static spinlock_t io_lock;
static unsigned char revision;
/* Dog Food address - We use the game port address */
@@ -121,20 +120,26 @@ static inline void superio_select(int ldn)
outb(ldn, VAL);
}
-static inline void superio_enter(void)
+static inline int superio_enter(void)
{
- spin_lock(&io_lock);
+ /*
+ * Try to reserve REG and REG + 1 for exclusive access.
+ */
+ if (!request_muxed_region(REG, 2, NAME))
+ return -EBUSY;
+
outb(0x87, REG);
outb(0x01, REG);
outb(0x55, REG);
outb(0x55, REG);
+ return 0;
}
static inline void superio_exit(void)
{
outb(0x02, REG);
outb(0x02, VAL);
- spin_unlock(&io_lock);
+ release_region(REG, 2);
}
static inline void it8712f_wdt_ping(void)
@@ -173,10 +178,13 @@ static int it8712f_wdt_get_status(void)
return 0;
}
-static void it8712f_wdt_enable(void)
+static int it8712f_wdt_enable(void)
{
+ int ret = superio_enter();
+ if (ret)
+ return ret;
+
printk(KERN_DEBUG NAME ": enabling watchdog timer\n");
- superio_enter();
superio_select(LDN_GPIO);
superio_outb(wdt_control_reg, WDT_CONTROL);
@@ -186,13 +194,17 @@ static void it8712f_wdt_enable(void)
superio_exit();
it8712f_wdt_ping();
+
+ return 0;
}
-static void it8712f_wdt_disable(void)
+static int it8712f_wdt_disable(void)
{
- printk(KERN_DEBUG NAME ": disabling watchdog timer\n");
+ int ret = superio_enter();
+ if (ret)
+ return ret;
- superio_enter();
+ printk(KERN_DEBUG NAME ": disabling watchdog timer\n");
superio_select(LDN_GPIO);
superio_outb(0, WDT_CONFIG);
@@ -202,6 +214,7 @@ static void it8712f_wdt_disable(void)
superio_outb(0, WDT_TIMEOUT);
superio_exit();
+ return 0;
}
static int it8712f_wdt_notify(struct notifier_block *this,
@@ -252,6 +265,7 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
WDIOF_MAGICCLOSE,
};
int value;
+ int ret;
switch (cmd) {
case WDIOC_GETSUPPORT:
@@ -259,7 +273,9 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
return 0;
case WDIOC_GETSTATUS:
- superio_enter();
+ ret = superio_enter();
+ if (ret)
+ return ret;
superio_select(LDN_GPIO);
value = it8712f_wdt_get_status();
@@ -280,7 +296,9 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
if (value > (max_units * 60))
return -EINVAL;
margin = value;
- superio_enter();
+ ret = superio_enter();
+ if (ret)
+ return ret;
superio_select(LDN_GPIO);
it8712f_wdt_update_margin();
@@ -299,10 +317,14 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd,
static int it8712f_wdt_open(struct inode *inode, struct file *file)
{
+ int ret;
/* only allow one at a time */
if (test_and_set_bit(0, &wdt_open))
return -EBUSY;
- it8712f_wdt_enable();
+
+ ret = it8712f_wdt_enable();
+ if (ret)
+ return ret;
return nonseekable_open(inode, file);
}
@@ -313,7 +335,8 @@ static int it8712f_wdt_release(struct inode *inode, struct file *file)
": watchdog device closed unexpectedly, will not"
" disable the watchdog timer\n");
} else if (!nowayout) {
- it8712f_wdt_disable();
+ if (it8712f_wdt_disable())
+ printk(KERN_WARNING NAME "Watchdog disable failed\n");
}
expect_close = 0;
clear_bit(0, &wdt_open);
@@ -340,8 +363,10 @@ static int __init it8712f_wdt_find(unsigned short *address)
{
int err = -ENODEV;
int chip_type;
+ int ret = superio_enter();
+ if (ret)
+ return ret;
- superio_enter();
chip_type = superio_inw(DEVID);
if (chip_type != IT8712F_DEVID)
goto exit;
@@ -382,8 +407,6 @@ static int __init it8712f_wdt_init(void)
{
int err = 0;
- spin_lock_init(&io_lock);
-
if (it8712f_wdt_find(&address))
return -ENODEV;
@@ -392,7 +415,11 @@ static int __init it8712f_wdt_init(void)
return -EBUSY;
}
- it8712f_wdt_disable();
+ err = it8712f_wdt_disable();
+ if (err) {
+ printk(KERN_ERR NAME ": unable to disable watchdog timer.\n");
+ goto out;
+ }
err = register_reboot_notifier(&it8712f_wdt_notifier);
if (err) {
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index b1bc72f9a20..a2d9a1266a2 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -137,7 +137,6 @@
static unsigned int base, gpact, ciract, max_units, chip_type;
static unsigned long wdt_status;
-static DEFINE_SPINLOCK(spinlock);
static int nogameport = DEFAULT_NOGAMEPORT;
static int exclusive = DEFAULT_EXCLUSIVE;
@@ -163,18 +162,26 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started, default="
/* Superio Chip */
-static inline void superio_enter(void)
+static inline int superio_enter(void)
{
+ /*
+ * Try to reserve REG and REG + 1 for exclusive access.
+ */
+ if (!request_muxed_region(REG, 2, WATCHDOG_NAME))
+ return -EBUSY;
+
outb(0x87, REG);
outb(0x01, REG);
outb(0x55, REG);
outb(0x55, REG);
+ return 0;
}
static inline void superio_exit(void)
{
outb(0x02, REG);
outb(0x02, VAL);
+ release_region(REG, 2);
}
static inline void superio_select(int ldn)
@@ -255,12 +262,11 @@ static void wdt_keepalive(void)
set_bit(WDTS_KEEPALIVE, &wdt_status);
}
-static void wdt_start(void)
+static int wdt_start(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
+ int ret = superio_enter();
+ if (ret)
+ return ret;
superio_select(GPIO);
if (test_bit(WDTS_USE_GP, &wdt_status))
@@ -270,15 +276,15 @@ static void wdt_start(void)
wdt_update_timeout();
superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
+
+ return 0;
}
-static void wdt_stop(void)
+static int wdt_stop(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
+ int ret = superio_enter();
+ if (ret)
+ return ret;
superio_select(GPIO);
superio_outb(0x00, WDTCTRL);
@@ -288,7 +294,7 @@ static void wdt_stop(void)
superio_outb(0x00, WDTVALMSB);
superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
+ return 0;
}
/**
@@ -303,8 +309,6 @@ static void wdt_stop(void)
static int wdt_set_timeout(int t)
{
- unsigned long flags;
-
if (t < 1 || t > max_units * 60)
return -EINVAL;
@@ -313,14 +317,15 @@ static int wdt_set_timeout(int t)
else
timeout = t;
- spin_lock_irqsave(&spinlock, flags);
if (test_bit(WDTS_TIMER_RUN, &wdt_status)) {
- superio_enter();
+ int ret = superio_enter();
+ if (ret)
+ return ret;
+
superio_select(GPIO);
wdt_update_timeout();
superio_exit();
}
- spin_unlock_irqrestore(&spinlock, flags);
return 0;
}
@@ -339,12 +344,12 @@ static int wdt_set_timeout(int t)
static int wdt_get_status(int *status)
{
- unsigned long flags;
-
*status = 0;
if (testmode) {
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
+ int ret = superio_enter();
+ if (ret)
+ return ret;
+
superio_select(GPIO);
if (superio_inb(WDTCTRL) & WDT_ZERO) {
superio_outb(0x00, WDTCTRL);
@@ -353,7 +358,6 @@ static int wdt_get_status(int *status)
}
superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
}
if (test_and_clear_bit(WDTS_KEEPALIVE, &wdt_status))
*status |= WDIOF_KEEPALIVEPING;
@@ -379,9 +383,17 @@ static int wdt_open(struct inode *inode, struct file *file)
if (exclusive && test_and_set_bit(WDTS_DEV_OPEN, &wdt_status))
return -EBUSY;
if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status)) {
+ int ret;
if (nowayout && !test_and_set_bit(WDTS_LOCKED, &wdt_status))
__module_get(THIS_MODULE);
- wdt_start();
+
+ ret = wdt_start();
+ if (ret) {
+ clear_bit(WDTS_LOCKED, &wdt_status);
+ clear_bit(WDTS_TIMER_RUN, &wdt_status);
+ clear_bit(WDTS_DEV_OPEN, &wdt_status);
+ return ret;
+ }
}
return nonseekable_open(inode, file);
}
@@ -403,7 +415,16 @@ static int wdt_release(struct inode *inode, struct file *file)
{
if (test_bit(WDTS_TIMER_RUN, &wdt_status)) {
if (test_and_clear_bit(WDTS_EXPECTED, &wdt_status)) {
- wdt_stop();
+ int ret = wdt_stop();
+ if (ret) {
+ /*
+ * Stop failed. Just keep the watchdog alive
+ * and hope nothing bad happens.
+ */
+ set_bit(WDTS_EXPECTED, &wdt_status);
+ wdt_keepalive();
+ return ret;
+ }
clear_bit(WDTS_TIMER_RUN, &wdt_status);
} else {
wdt_keepalive();
@@ -484,7 +505,9 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
&ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
- wdt_get_status(&status);
+ rc = wdt_get_status(&status);
+ if (rc)
+ return rc;
return put_user(status, uarg.i);
case WDIOC_GETBOOTSTATUS:
@@ -500,14 +523,22 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (new_options) {
case WDIOS_DISABLECARD:
- if (test_bit(WDTS_TIMER_RUN, &wdt_status))
- wdt_stop();
+ if (test_bit(WDTS_TIMER_RUN, &wdt_status)) {
+ rc = wdt_stop();
+ if (rc)
+ return rc;
+ }
clear_bit(WDTS_TIMER_RUN, &wdt_status);
return 0;
case WDIOS_ENABLECARD:
- if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status))
- wdt_start();
+ if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status)) {
+ rc = wdt_start();
+ if (rc) {
+ clear_bit(WDTS_TIMER_RUN, &wdt_status);
+ return rc;
+ }
+ }
return 0;
default:
@@ -560,16 +591,17 @@ static int __init it87_wdt_init(void)
int rc = 0;
int try_gameport = !nogameport;
u8 chip_rev;
- unsigned long flags;
+ int gp_rreq_fail = 0;
wdt_status = 0;
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
+ rc = superio_enter();
+ if (rc)
+ return rc;
+
chip_type = superio_inw(CHIPID);
chip_rev = superio_inb(CHIPREV) & 0x0f;
superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
switch (chip_type) {
case IT8702_ID:
@@ -603,8 +635,9 @@ static int __init it87_wdt_init(void)
return -ENODEV;
}
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
+ rc = superio_enter();
+ if (rc)
+ return rc;
superio_select(GPIO);
superio_outb(WDT_TOV1, WDTCFG);
@@ -620,21 +653,16 @@ static int __init it87_wdt_init(void)
}
gpact = superio_inb(ACTREG);
superio_outb(0x01, ACTREG);
- superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
if (request_region(base, 1, WATCHDOG_NAME))
set_bit(WDTS_USE_GP, &wdt_status);
else
- rc = -EIO;
- } else {
- superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
+ gp_rreq_fail = 1;
}
/* If we haven't Gameport support, try to get CIR support */
if (!test_bit(WDTS_USE_GP, &wdt_status)) {
if (!request_region(CIR_BASE, 8, WATCHDOG_NAME)) {
- if (rc == -EIO)
+ if (gp_rreq_fail)
printk(KERN_ERR PFX
"I/O Address 0x%04x and 0x%04x"
" already in use\n", base, CIR_BASE);
@@ -646,21 +674,16 @@ static int __init it87_wdt_init(void)
goto err_out;
}
base = CIR_BASE;
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
superio_select(CIR);
superio_outw(base, BASEREG);
superio_outb(0x00, CIR_ILS);
ciract = superio_inb(ACTREG);
superio_outb(0x01, ACTREG);
- if (rc == -EIO) {
+ if (gp_rreq_fail) {
superio_select(GAMEPORT);
superio_outb(gpact, ACTREG);
}
-
- superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
}
if (timeout < 1 || timeout > max_units * 60) {
@@ -704,6 +727,7 @@ static int __init it87_wdt_init(void)
"nogameport=%d)\n", chip_type, chip_rev, timeout,
nowayout, testmode, exclusive, nogameport);
+ superio_exit();
return 0;
err_out_reboot:
@@ -711,49 +735,37 @@ err_out_reboot:
err_out_region:
release_region(base, test_bit(WDTS_USE_GP, &wdt_status) ? 1 : 8);
if (!test_bit(WDTS_USE_GP, &wdt_status)) {
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
superio_select(CIR);
superio_outb(ciract, ACTREG);
- superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
}
err_out:
if (try_gameport) {
- spin_lock_irqsave(&spinlock, flags);
- superio_enter();
superio_select(GAMEPORT);
superio_outb(gpact, ACTREG);
- superio_exit();
- spin_unlock_irqrestore(&spinlock, flags);
}
+ superio_exit();
return rc;
}
static void __exit it87_wdt_exit(void)
{
- unsigned long flags;
- int nolock;
-
- nolock = !spin_trylock_irqsave(&spinlock, flags);
- superio_enter();
- superio_select(GPIO);
- superio_outb(0x00, WDTCTRL);
- superio_outb(0x00, WDTCFG);
- superio_outb(0x00, WDTVALLSB);
- if (max_units > 255)
- superio_outb(0x00, WDTVALMSB);
- if (test_bit(WDTS_USE_GP, &wdt_status)) {
- superio_select(GAMEPORT);
- superio_outb(gpact, ACTREG);
- } else {
- superio_select(CIR);
- superio_outb(ciract, ACTREG);
+ if (superio_enter() == 0) {
+ superio_select(GPIO);
+ superio_outb(0x00, WDTCTRL);
+ superio_outb(0x00, WDTCFG);
+ superio_outb(0x00, WDTVALLSB);
+ if (max_units > 255)
+ superio_outb(0x00, WDTVALMSB);
+ if (test_bit(WDTS_USE_GP, &wdt_status)) {
+ superio_select(GAMEPORT);
+ superio_outb(gpact, ACTREG);
+ } else {
+ superio_select(CIR);
+ superio_outb(ciract, ACTREG);
+ }
+ superio_exit();
}
- superio_exit();
- if (!nolock)
- spin_unlock_irqrestore(&spinlock, flags);
misc_deregister(&wdt_miscdev);
unregister_reboot_notifier(&wdt_notifier);
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index 2b4af222b5f..4dc31024d26 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -407,12 +407,35 @@ static int __devexit mpcore_wdt_remove(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_PM
+static int mpcore_wdt_suspend(struct platform_device *dev, pm_message_t msg)
+{
+ struct mpcore_wdt *wdt = platform_get_drvdata(dev);
+ mpcore_wdt_stop(wdt); /* Turn the WDT off */
+ return 0;
+}
+
+static int mpcore_wdt_resume(struct platform_device *dev)
+{
+ struct mpcore_wdt *wdt = platform_get_drvdata(dev);
+ /* re-activate timer */
+ if (test_bit(0, &wdt->timer_alive))
+ mpcore_wdt_start(wdt);
+ return 0;
+}
+#else
+#define mpcore_wdt_suspend NULL
+#define mpcore_wdt_resume NULL
+#endif
+
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:mpcore_wdt");
static struct platform_driver mpcore_wdt_driver = {
.probe = mpcore_wdt_probe,
.remove = __devexit_p(mpcore_wdt_remove),
+ .suspend = mpcore_wdt_suspend,
+ .resume = mpcore_wdt_resume,
.shutdown = mpcore_wdt_shutdown,
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 1479dc4d612..ac37bb82392 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -66,23 +66,18 @@ static struct {
int default_ticks;
unsigned long inuse;
unsigned gpio;
- int gstate;
+ unsigned int gstate;
} mtx1_wdt_device;
static void mtx1_wdt_trigger(unsigned long unused)
{
- u32 tmp;
-
spin_lock(&mtx1_wdt_device.lock);
if (mtx1_wdt_device.running)
ticks--;
/* toggle wdt gpio */
- mtx1_wdt_device.gstate = ~mtx1_wdt_device.gstate;
- if (mtx1_wdt_device.gstate)
- gpio_direction_output(mtx1_wdt_device.gpio, 1);
- else
- gpio_direction_input(mtx1_wdt_device.gpio);
+ mtx1_wdt_device.gstate = !mtx1_wdt_device.gstate;
+ gpio_set_value(mtx1_wdt_device.gpio, mtx1_wdt_device.gstate);
if (mtx1_wdt_device.queue && ticks)
mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
@@ -105,7 +100,7 @@ static void mtx1_wdt_start(void)
if (!mtx1_wdt_device.queue) {
mtx1_wdt_device.queue = 1;
mtx1_wdt_device.gstate = 1;
- gpio_direction_output(mtx1_wdt_device.gpio, 1);
+ gpio_set_value(mtx1_wdt_device.gpio, 1);
mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
}
mtx1_wdt_device.running++;
@@ -120,7 +115,7 @@ static int mtx1_wdt_stop(void)
if (mtx1_wdt_device.queue) {
mtx1_wdt_device.queue = 0;
mtx1_wdt_device.gstate = 0;
- gpio_direction_output(mtx1_wdt_device.gpio, 0);
+ gpio_set_value(mtx1_wdt_device.gpio, 0);
}
ticks = mtx1_wdt_device.default_ticks;
spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags);
@@ -214,6 +209,12 @@ static int __devinit mtx1_wdt_probe(struct platform_device *pdev)
int ret;
mtx1_wdt_device.gpio = pdev->resource[0].start;
+ ret = gpio_request_one(mtx1_wdt_device.gpio,
+ GPIOF_OUT_INIT_HIGH, "mtx1-wdt");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request gpio");
+ return ret;
+ }
spin_lock_init(&mtx1_wdt_device.lock);
init_completion(&mtx1_wdt_device.stop);
@@ -224,11 +225,11 @@ static int __devinit mtx1_wdt_probe(struct platform_device *pdev)
ret = misc_register(&mtx1_wdt_misc);
if (ret < 0) {
- printk(KERN_ERR " mtx-1_wdt : failed to register\n");
+ dev_err(&pdev->dev, "failed to register\n");
return ret;
}
mtx1_wdt_start();
- printk(KERN_INFO "MTX-1 Watchdog driver\n");
+ dev_info(&pdev->dev, "MTX-1 Watchdog driver\n");
return 0;
}
@@ -239,11 +240,13 @@ static int __devexit mtx1_wdt_remove(struct platform_device *pdev)
mtx1_wdt_device.queue = 0;
wait_for_completion(&mtx1_wdt_device.stop);
}
+
+ gpio_free(mtx1_wdt_device.gpio);
misc_deregister(&mtx1_wdt_misc);
return 0;
}
-static struct platform_driver mtx1_wdt = {
+static struct platform_driver mtx1_wdt_driver = {
.probe = mtx1_wdt_probe,
.remove = __devexit_p(mtx1_wdt_remove),
.driver.name = "mtx1-wdt",
@@ -252,12 +255,12 @@ static struct platform_driver mtx1_wdt = {
static int __init mtx1_wdt_init(void)
{
- return platform_driver_register(&mtx1_wdt);
+ return platform_driver_register(&mtx1_wdt_driver);
}
static void __exit mtx1_wdt_exit(void)
{
- platform_driver_unregister(&mtx1_wdt);
+ platform_driver_unregister(&mtx1_wdt_driver);
}
module_init(mtx1_wdt_init);
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index afa78a54711..809f41c30c4 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -458,7 +458,15 @@ static int __devexit nv_tco_remove(struct platform_device *dev)
static void nv_tco_shutdown(struct platform_device *dev)
{
+ u32 val;
+
tco_timer_stop();
+
+ /* Some BIOSes fail the POST (once) if the NO_REBOOT flag is not
+ * unset during shutdown. */
+ pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+ val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
+ pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
}
static struct platform_driver nv_tco_driver = {
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
new file mode 100644
index 00000000000..4ec741ac952
--- /dev/null
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -0,0 +1,433 @@
+/*
+* of_xilinx_wdt.c 1.01 A Watchdog Device Driver for Xilinx xps_timebase_wdt
+*
+* (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>)
+*
+* -----------------------
+*
+* This program is free software; you can redistribute it and/or
+* modify it under the terms of the GNU General Public License
+* as published by the Free Software Foundation; either version
+* 2 of the License, or (at your option) any later version.
+*
+* -----------------------
+* 30-May-2011 Alejandro Cabrera <aldaya@gmail.com>
+* - If "xlnx,wdt-enable-once" wasn't found on device tree the
+* module will use CONFIG_WATCHDOG_NOWAYOUT
+* - If the device tree parameters ("clock-frequency" and
+* "xlnx,wdt-interval") wasn't found the driver won't
+* know the wdt reset interval
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/watchdog.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+
+/* Register offsets for the Wdt device */
+#define XWT_TWCSR0_OFFSET 0x0 /* Control/Status Register0 */
+#define XWT_TWCSR1_OFFSET 0x4 /* Control/Status Register1 */
+#define XWT_TBR_OFFSET 0x8 /* Timebase Register Offset */
+
+/* Control/Status Register Masks */
+#define XWT_CSR0_WRS_MASK 0x00000008 /* Reset status */
+#define XWT_CSR0_WDS_MASK 0x00000004 /* Timer state */
+#define XWT_CSR0_EWDT1_MASK 0x00000002 /* Enable bit 1 */
+
+/* Control/Status Register 0/1 bits */
+#define XWT_CSRX_EWDT2_MASK 0x00000001 /* Enable bit 2 */
+
+/* SelfTest constants */
+#define XWT_MAX_SELFTEST_LOOP_COUNT 0x00010000
+#define XWT_TIMER_FAILED 0xFFFFFFFF
+
+#define WATCHDOG_NAME "Xilinx Watchdog"
+#define PFX WATCHDOG_NAME ": "
+
+struct xwdt_device {
+ struct resource res;
+ void __iomem *base;
+ u32 nowayout;
+ u32 wdt_interval;
+ u32 boot_status;
+};
+
+static struct xwdt_device xdev;
+
+static u32 timeout;
+static u32 control_status_reg;
+static u8 expect_close;
+static u8 no_timeout;
+static unsigned long driver_open;
+
+static DEFINE_SPINLOCK(spinlock);
+
+static void xwdt_start(void)
+{
+ spin_lock(&spinlock);
+
+ /* Clean previous status and enable the watchdog timer */
+ control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+ control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
+
+ iowrite32((control_status_reg | XWT_CSR0_EWDT1_MASK),
+ xdev.base + XWT_TWCSR0_OFFSET);
+
+ iowrite32(XWT_CSRX_EWDT2_MASK, xdev.base + XWT_TWCSR1_OFFSET);
+
+ spin_unlock(&spinlock);
+}
+
+static void xwdt_stop(void)
+{
+ spin_lock(&spinlock);
+
+ control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+
+ iowrite32((control_status_reg & ~XWT_CSR0_EWDT1_MASK),
+ xdev.base + XWT_TWCSR0_OFFSET);
+
+ iowrite32(0, xdev.base + XWT_TWCSR1_OFFSET);
+
+ spin_unlock(&spinlock);
+ printk(KERN_INFO PFX "Stopped!\n");
+}
+
+static void xwdt_keepalive(void)
+{
+ spin_lock(&spinlock);
+
+ control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+ control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK);
+ iowrite32(control_status_reg, xdev.base + XWT_TWCSR0_OFFSET);
+
+ spin_unlock(&spinlock);
+}
+
+static void xwdt_get_status(int *status)
+{
+ int new_status;
+
+ spin_lock(&spinlock);
+
+ control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET);
+ new_status = ((control_status_reg &
+ (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK)) != 0);
+ spin_unlock(&spinlock);
+
+ *status = 0;
+ if (new_status & 1)
+ *status |= WDIOF_CARDRESET;
+}
+
+static u32 xwdt_selftest(void)
+{
+ int i;
+ u32 timer_value1;
+ u32 timer_value2;
+
+ spin_lock(&spinlock);
+
+ timer_value1 = ioread32(xdev.base + XWT_TBR_OFFSET);
+ timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET);
+
+ for (i = 0;
+ ((i <= XWT_MAX_SELFTEST_LOOP_COUNT) &&
+ (timer_value2 == timer_value1)); i++) {
+ timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET);
+ }
+
+ spin_unlock(&spinlock);
+
+ if (timer_value2 != timer_value1)
+ return ~XWT_TIMER_FAILED;
+ else
+ return XWT_TIMER_FAILED;
+}
+
+static int xwdt_open(struct inode *inode, struct file *file)
+{
+ /* Only one process can handle the wdt at a time */
+ if (test_and_set_bit(0, &driver_open))
+ return -EBUSY;
+
+ /* Make sure that the module are always loaded...*/
+ if (xdev.nowayout)
+ __module_get(THIS_MODULE);
+
+ xwdt_start();
+ printk(KERN_INFO PFX "Started...\n");
+
+ return nonseekable_open(inode, file);
+}
+
+static int xwdt_release(struct inode *inode, struct file *file)
+{
+ if (expect_close == 42) {
+ xwdt_stop();
+ } else {
+ printk(KERN_CRIT PFX
+ "Unexpected close, not stopping watchdog!\n");
+ xwdt_keepalive();
+ }
+
+ clear_bit(0, &driver_open);
+ expect_close = 0;
+ return 0;
+}
+
+/*
+ * xwdt_write:
+ * @file: file handle to the watchdog
+ * @buf: buffer to write (unused as data does not matter here
+ * @count: count of bytes
+ * @ppos: pointer to the position to write. No seeks allowed
+ *
+ * A write to a watchdog device is defined as a keepalive signal. Any
+ * write of data will do, as we don't define content meaning.
+ */
+static ssize_t xwdt_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ if (!xdev.nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ expect_close = 0;
+
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_close = 42;
+ }
+ }
+ xwdt_keepalive();
+ }
+ return len;
+}
+
+static const struct watchdog_info ident = {
+ .options = WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+ .firmware_version = 1,
+ .identity = WATCHDOG_NAME,
+};
+
+/*
+ * xwdt_ioctl:
+ * @file: file handle to the device
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ *
+ * The watchdog API defines a common set of functions for all watchdogs
+ * according to their available features.
+ */
+static long xwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int status;
+
+ union {
+ struct watchdog_info __user *ident;
+ int __user *i;
+ } uarg;
+
+ uarg.i = (int __user *)arg;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(uarg.ident, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(xdev.boot_status, uarg.i);
+
+ case WDIOC_GETSTATUS:
+ xwdt_get_status(&status);
+ return put_user(status, uarg.i);
+
+ case WDIOC_KEEPALIVE:
+ xwdt_keepalive();
+ return 0;
+
+ case WDIOC_GETTIMEOUT:
+ if (no_timeout)
+ return -ENOTTY;
+ else
+ return put_user(timeout, uarg.i);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations xwdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = xwdt_write,
+ .open = xwdt_open,
+ .release = xwdt_release,
+ .unlocked_ioctl = xwdt_ioctl,
+};
+
+static struct miscdevice xwdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &xwdt_fops,
+};
+
+static int __devinit xwdt_probe(struct platform_device *pdev)
+{
+ int rc;
+ u32 *tmptr;
+ u32 *pfreq;
+
+ no_timeout = 0;
+
+ pfreq = (u32 *)of_get_property(pdev->dev.of_node->parent,
+ "clock-frequency", NULL);
+
+ if (pfreq == NULL) {
+ printk(KERN_WARNING PFX
+ "The watchdog clock frequency cannot be obtained!\n");
+ no_timeout = 1;
+ }
+
+ rc = of_address_to_resource(pdev->dev.of_node, 0, &xdev.res);
+ if (rc) {
+ printk(KERN_WARNING PFX "invalid address!\n");
+ return rc;
+ }
+
+ tmptr = (u32 *)of_get_property(pdev->dev.of_node,
+ "xlnx,wdt-interval", NULL);
+ if (tmptr == NULL) {
+ printk(KERN_WARNING PFX "Parameter \"xlnx,wdt-interval\""
+ " not found in device tree!\n");
+ no_timeout = 1;
+ } else {
+ xdev.wdt_interval = *tmptr;
+ }
+
+ tmptr = (u32 *)of_get_property(pdev->dev.of_node,
+ "xlnx,wdt-enable-once", NULL);
+ if (tmptr == NULL) {
+ printk(KERN_WARNING PFX "Parameter \"xlnx,wdt-enable-once\""
+ " not found in device tree!\n");
+ xdev.nowayout = WATCHDOG_NOWAYOUT;
+ }
+
+/*
+ * Twice of the 2^wdt_interval / freq because the first wdt overflow is
+ * ignored (interrupt), reset is only generated at second wdt overflow
+ */
+ if (!no_timeout)
+ timeout = 2 * ((1<<xdev.wdt_interval) / *pfreq);
+
+ if (!request_mem_region(xdev.res.start,
+ xdev.res.end - xdev.res.start + 1, WATCHDOG_NAME)) {
+ rc = -ENXIO;
+ printk(KERN_ERR PFX "memory request failure!\n");
+ goto err_out;
+ }
+
+ xdev.base = ioremap(xdev.res.start, xdev.res.end - xdev.res.start + 1);
+ if (xdev.base == NULL) {
+ rc = -ENOMEM;
+ printk(KERN_ERR PFX "ioremap failure!\n");
+ goto release_mem;
+ }
+
+ rc = xwdt_selftest();
+ if (rc == XWT_TIMER_FAILED) {
+ printk(KERN_ERR PFX "SelfTest routine error!\n");
+ goto unmap_io;
+ }
+
+ xwdt_get_status(&xdev.boot_status);
+
+ rc = misc_register(&xwdt_miscdev);
+ if (rc) {
+ printk(KERN_ERR PFX
+ "cannot register miscdev on minor=%d (err=%d)\n",
+ xwdt_miscdev.minor, rc);
+ goto unmap_io;
+ }
+
+ if (no_timeout)
+ printk(KERN_INFO PFX
+ "driver loaded (timeout=? sec, nowayout=%d)\n",
+ xdev.nowayout);
+ else
+ printk(KERN_INFO PFX
+ "driver loaded (timeout=%d sec, nowayout=%d)\n",
+ timeout, xdev.nowayout);
+
+ expect_close = 0;
+ clear_bit(0, &driver_open);
+
+ return 0;
+
+unmap_io:
+ iounmap(xdev.base);
+release_mem:
+ release_mem_region(xdev.res.start, resource_size(&xdev.res));
+err_out:
+ return rc;
+}
+
+static int __devexit xwdt_remove(struct platform_device *dev)
+{
+ misc_deregister(&xwdt_miscdev);
+ iounmap(xdev.base);
+ release_mem_region(xdev.res.start, resource_size(&xdev.res));
+
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static struct of_device_id __devinitdata xwdt_of_match[] = {
+ { .compatible = "xlnx,xps-timebase-wdt-1.01.a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xwdt_of_match);
+
+static struct platform_driver xwdt_driver = {
+ .probe = xwdt_probe,
+ .remove = __devexit_p(xwdt_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = WATCHDOG_NAME,
+ .of_match_table = xwdt_of_match,
+ },
+};
+
+static int __init xwdt_init(void)
+{
+ return platform_driver_register(&xwdt_driver);
+}
+
+static void __exit xwdt_exit(void)
+{
+ platform_driver_unregister(&xwdt_driver);
+}
+
+module_init(xwdt_init);
+module_exit(xwdt_exit);
+
+MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>");
+MODULE_DESCRIPTION("Xilinx Watchdog driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index b7c13905157..e78d8998676 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -56,6 +56,7 @@
#define IO_DEFAULT 0x2E /* Address used on Portwell Boards */
static int io = IO_DEFAULT;
+static int swc_base_addr = -1;
static int timeout = DEFAULT_TIMEOUT; /* timeout value */
static unsigned long timer_enabled; /* is the timer enabled? */
@@ -116,9 +117,8 @@ static inline void pc87413_enable_swc(void)
/* Read SWC I/O base address */
-static inline unsigned int pc87413_get_swc_base(void)
+static void pc87413_get_swc_base_addr(void)
{
- unsigned int swc_base_addr = 0;
unsigned char addr_l, addr_h = 0;
/* Step 3: Read SWC I/O Base Address */
@@ -136,12 +136,11 @@ static inline unsigned int pc87413_get_swc_base(void)
"Read SWC I/O Base Address: low %d, high %d, res %d\n",
addr_l, addr_h, swc_base_addr);
#endif
- return swc_base_addr;
}
/* Select Bank 3 of SWC */
-static inline void pc87413_swc_bank3(unsigned int swc_base_addr)
+static inline void pc87413_swc_bank3(void)
{
/* Step 4: Select Bank3 of SWC */
outb_p(inb(swc_base_addr + 0x0f) | 0x03, swc_base_addr + 0x0f);
@@ -152,8 +151,7 @@ static inline void pc87413_swc_bank3(unsigned int swc_base_addr)
/* Set watchdog timeout to x minutes */
-static inline void pc87413_programm_wdto(unsigned int swc_base_addr,
- char pc87413_time)
+static inline void pc87413_programm_wdto(char pc87413_time)
{
/* Step 5: Programm WDTO, Twd. */
outb_p(pc87413_time, swc_base_addr + WDTO);
@@ -164,7 +162,7 @@ static inline void pc87413_programm_wdto(unsigned int swc_base_addr,
/* Enable WDEN */
-static inline void pc87413_enable_wden(unsigned int swc_base_addr)
+static inline void pc87413_enable_wden(void)
{
/* Step 6: Enable WDEN */
outb_p(inb(swc_base_addr + WDCTL) | 0x01, swc_base_addr + WDCTL);
@@ -174,7 +172,7 @@ static inline void pc87413_enable_wden(unsigned int swc_base_addr)
}
/* Enable SW_WD_TREN */
-static inline void pc87413_enable_sw_wd_tren(unsigned int swc_base_addr)
+static inline void pc87413_enable_sw_wd_tren(void)
{
/* Enable SW_WD_TREN */
outb_p(inb(swc_base_addr + WDCFG) | 0x80, swc_base_addr + WDCFG);
@@ -185,7 +183,7 @@ static inline void pc87413_enable_sw_wd_tren(unsigned int swc_base_addr)
/* Disable SW_WD_TREN */
-static inline void pc87413_disable_sw_wd_tren(unsigned int swc_base_addr)
+static inline void pc87413_disable_sw_wd_tren(void)
{
/* Disable SW_WD_TREN */
outb_p(inb(swc_base_addr + WDCFG) & 0x7f, swc_base_addr + WDCFG);
@@ -196,7 +194,7 @@ static inline void pc87413_disable_sw_wd_tren(unsigned int swc_base_addr)
/* Enable SW_WD_TRG */
-static inline void pc87413_enable_sw_wd_trg(unsigned int swc_base_addr)
+static inline void pc87413_enable_sw_wd_trg(void)
{
/* Enable SW_WD_TRG */
outb_p(inb(swc_base_addr + WDCTL) | 0x80, swc_base_addr + WDCTL);
@@ -207,7 +205,7 @@ static inline void pc87413_enable_sw_wd_trg(unsigned int swc_base_addr)
/* Disable SW_WD_TRG */
-static inline void pc87413_disable_sw_wd_trg(unsigned int swc_base_addr)
+static inline void pc87413_disable_sw_wd_trg(void)
{
/* Disable SW_WD_TRG */
outb_p(inb(swc_base_addr + WDCTL) & 0x7f, swc_base_addr + WDCTL);
@@ -222,18 +220,13 @@ static inline void pc87413_disable_sw_wd_trg(unsigned int swc_base_addr)
static void pc87413_enable(void)
{
- unsigned int swc_base_addr;
-
spin_lock(&io_lock);
- pc87413_select_wdt_out();
- pc87413_enable_swc();
- swc_base_addr = pc87413_get_swc_base();
- pc87413_swc_bank3(swc_base_addr);
- pc87413_programm_wdto(swc_base_addr, timeout);
- pc87413_enable_wden(swc_base_addr);
- pc87413_enable_sw_wd_tren(swc_base_addr);
- pc87413_enable_sw_wd_trg(swc_base_addr);
+ pc87413_swc_bank3();
+ pc87413_programm_wdto(timeout);
+ pc87413_enable_wden();
+ pc87413_enable_sw_wd_tren();
+ pc87413_enable_sw_wd_trg();
spin_unlock(&io_lock);
}
@@ -242,17 +235,12 @@ static void pc87413_enable(void)
static void pc87413_disable(void)
{
- unsigned int swc_base_addr;
-
spin_lock(&io_lock);
- pc87413_select_wdt_out();
- pc87413_enable_swc();
- swc_base_addr = pc87413_get_swc_base();
- pc87413_swc_bank3(swc_base_addr);
- pc87413_disable_sw_wd_tren(swc_base_addr);
- pc87413_disable_sw_wd_trg(swc_base_addr);
- pc87413_programm_wdto(swc_base_addr, 0);
+ pc87413_swc_bank3();
+ pc87413_disable_sw_wd_tren();
+ pc87413_disable_sw_wd_trg();
+ pc87413_programm_wdto(0);
spin_unlock(&io_lock);
}
@@ -261,20 +249,15 @@ static void pc87413_disable(void)
static void pc87413_refresh(void)
{
- unsigned int swc_base_addr;
-
spin_lock(&io_lock);
- pc87413_select_wdt_out();
- pc87413_enable_swc();
- swc_base_addr = pc87413_get_swc_base();
- pc87413_swc_bank3(swc_base_addr);
- pc87413_disable_sw_wd_tren(swc_base_addr);
- pc87413_disable_sw_wd_trg(swc_base_addr);
- pc87413_programm_wdto(swc_base_addr, timeout);
- pc87413_enable_wden(swc_base_addr);
- pc87413_enable_sw_wd_tren(swc_base_addr);
- pc87413_enable_sw_wd_trg(swc_base_addr);
+ pc87413_swc_bank3();
+ pc87413_disable_sw_wd_tren();
+ pc87413_disable_sw_wd_trg();
+ pc87413_programm_wdto(timeout);
+ pc87413_enable_wden();
+ pc87413_enable_sw_wd_tren();
+ pc87413_enable_sw_wd_trg();
spin_unlock(&io_lock);
}
@@ -528,7 +511,8 @@ static int __init pc87413_init(void)
printk(KERN_INFO PFX "Version " VERSION " at io 0x%X\n",
WDT_INDEX_IO_PORT);
- /* request_region(io, 2, "pc87413"); */
+ if (!request_muxed_region(io, 2, MODNAME))
+ return -EBUSY;
ret = register_reboot_notifier(&pc87413_notifier);
if (ret != 0) {
@@ -541,12 +525,32 @@ static int __init pc87413_init(void)
printk(KERN_ERR PFX
"cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
- unregister_reboot_notifier(&pc87413_notifier);
- return ret;
+ goto reboot_unreg;
}
printk(KERN_INFO PFX "initialized. timeout=%d min \n", timeout);
+
+ pc87413_select_wdt_out();
+ pc87413_enable_swc();
+ pc87413_get_swc_base_addr();
+
+ if (!request_region(swc_base_addr, 0x20, MODNAME)) {
+ printk(KERN_ERR PFX
+ "cannot request SWC region at 0x%x\n", swc_base_addr);
+ ret = -EBUSY;
+ goto misc_unreg;
+ }
+
pc87413_enable();
+
+ release_region(io, 2);
return 0;
+
+misc_unreg:
+ misc_deregister(&pc87413_miscdev);
+reboot_unreg:
+ unregister_reboot_notifier(&pc87413_notifier);
+ release_region(io, 2);
+ return ret;
}
/**
@@ -569,7 +573,7 @@ static void __exit pc87413_exit(void)
misc_deregister(&pc87413_miscdev);
unregister_reboot_notifier(&pc87413_notifier);
- /* release_region(io, 2); */
+ release_region(swc_base_addr, 0x20);
printk(KERN_INFO MODNAME " watchdog component driver removed.\n");
}
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index f7f5aa00df6..30da88f47cd 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -589,6 +589,15 @@ static int s3c2410wdt_resume(struct platform_device *dev)
#define s3c2410wdt_resume NULL
#endif /* CONFIG_PM */
+#ifdef CONFIG_OF
+static const struct of_device_id s3c2410_wdt_match[] = {
+ { .compatible = "samsung,s3c2410-wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
+#else
+#define s3c2410_wdt_match NULL
+#endif
static struct platform_driver s3c2410wdt_driver = {
.probe = s3c2410wdt_probe,
@@ -599,6 +608,7 @@ static struct platform_driver s3c2410wdt_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "s3c2410-wdt",
+ .of_match_table = s3c2410_wdt_match,
},
};
diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c
index ff11504c376..93ac5895312 100644
--- a/drivers/watchdog/sbc7240_wdt.c
+++ b/drivers/watchdog/sbc7240_wdt.c
@@ -29,7 +29,7 @@
#include <linux/watchdog.h>
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/system.h>
#define SBC7240_PREFIX "sbc7240_wdt: "
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index c7cf4b01f58..029467e3463 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -472,15 +472,10 @@ static void sch311x_wdt_shutdown(struct platform_device *dev)
sch311x_wdt_stop();
}
-#define sch311x_wdt_suspend NULL
-#define sch311x_wdt_resume NULL
-
static struct platform_driver sch311x_wdt_driver = {
.probe = sch311x_wdt_probe,
.remove = __devexit_p(sch311x_wdt_remove),
.shutdown = sch311x_wdt_shutdown,
- .suspend = sch311x_wdt_suspend,
- .resume = sch311x_wdt_resume,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index db84f2322d1..a267dc078da 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -64,7 +64,7 @@
* misses its deadline, the kernel timer will allow the WDT to overflow.
*/
static int clock_division_ratio = WTCSR_CKS_4096;
-#define next_ping_period(cks) msecs_to_jiffies(cks - 4)
+#define next_ping_period(cks) (jiffies + msecs_to_jiffies(cks - 4))
static const struct watchdog_info sh_wdt_info;
static struct platform_device *sh_wdt_dev;
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 0d80e08b643..cc2cfbe33b3 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -134,6 +134,8 @@ static void wdt_enable(void)
writel(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
writel(LOCK, wdt->base + WDTLOCK);
+ /* Flush posted writes. */
+ readl(wdt->base + WDTLOCK);
spin_unlock(&wdt->lock);
}
@@ -144,9 +146,10 @@ static void wdt_disable(void)
writel(UNLOCK, wdt->base + WDTLOCK);
writel(0, wdt->base + WDTCONTROL);
- writel(0, wdt->base + WDTLOAD);
writel(LOCK, wdt->base + WDTLOCK);
+ /* Flush posted writes. */
+ readl(wdt->base + WDTLOCK);
spin_unlock(&wdt->lock);
}
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
new file mode 100644
index 00000000000..cfa1a1518aa
--- /dev/null
+++ b/drivers/watchdog/watchdog_core.c
@@ -0,0 +1,111 @@
+/*
+ * watchdog_core.c
+ *
+ * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
+ * All Rights Reserved.
+ *
+ * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
+ *
+ * This source code is part of the generic code that can be used
+ * by all the watchdog timer drivers.
+ *
+ * Based on source code of the following authors:
+ * Matt Domsch <Matt_Domsch@dell.com>,
+ * Rob Radez <rob@osinvestor.com>,
+ * Rusty Lynch <rusty@linux.co.intel.com>
+ * Satyam Sharma <satyam@infradead.org>
+ * Randy Dunlap <randy.dunlap@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
+ * admit liability nor provide warranty for any of this software.
+ * This material is provided "AS-IS" and at no charge.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h> /* For EXPORT_SYMBOL/module stuff/... */
+#include <linux/types.h> /* For standard types */
+#include <linux/errno.h> /* For the -ENODEV/... values */
+#include <linux/kernel.h> /* For printk/panic/... */
+#include <linux/watchdog.h> /* For watchdog specific items */
+#include <linux/init.h> /* For __init/__exit/... */
+
+#include "watchdog_dev.h" /* For watchdog_dev_register/... */
+
+/**
+ * watchdog_register_device() - register a watchdog device
+ * @wdd: watchdog device
+ *
+ * Register a watchdog device with the kernel so that the
+ * watchdog timer can be accessed from userspace.
+ *
+ * A zero is returned on success and a negative errno code for
+ * failure.
+ */
+int watchdog_register_device(struct watchdog_device *wdd)
+{
+ int ret;
+
+ if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
+ return -EINVAL;
+
+ /* Mandatory operations need to be supported */
+ if (wdd->ops->start == NULL || wdd->ops->stop == NULL)
+ return -EINVAL;
+
+ /*
+ * Check that we have valid min and max timeout values, if
+ * not reset them both to 0 (=not used or unknown)
+ */
+ if (wdd->min_timeout > wdd->max_timeout) {
+ pr_info("Invalid min and max timeout values, resetting to 0!\n");
+ wdd->min_timeout = 0;
+ wdd->max_timeout = 0;
+ }
+
+ /*
+ * Note: now that all watchdog_device data has been verified, we
+ * will not check this anymore in other functions. If data gets
+ * corrupted in a later stage then we expect a kernel panic!
+ */
+
+ /* We only support 1 watchdog device via the /dev/watchdog interface */
+ ret = watchdog_dev_register(wdd);
+ if (ret) {
+ pr_err("error registering /dev/watchdog (err=%d).\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(watchdog_register_device);
+
+/**
+ * watchdog_unregister_device() - unregister a watchdog device
+ * @wdd: watchdog device to unregister
+ *
+ * Unregister a watchdog device that was previously successfully
+ * registered with watchdog_register_device().
+ */
+void watchdog_unregister_device(struct watchdog_device *wdd)
+{
+ int ret;
+
+ if (wdd == NULL)
+ return;
+
+ ret = watchdog_dev_unregister(wdd);
+ if (ret)
+ pr_err("error unregistering /dev/watchdog (err=%d).\n", ret);
+}
+EXPORT_SYMBOL_GPL(watchdog_unregister_device);
+
+MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
+MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
+MODULE_DESCRIPTION("WatchDog Timer Driver Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
new file mode 100644
index 00000000000..d33520d0b4c
--- /dev/null
+++ b/drivers/watchdog/watchdog_dev.c
@@ -0,0 +1,395 @@
+/*
+ * watchdog_dev.c
+ *
+ * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
+ * All Rights Reserved.
+ *
+ * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
+ *
+ *
+ * This source code is part of the generic code that can be used
+ * by all the watchdog timer drivers.
+ *
+ * This part of the generic code takes care of the following
+ * misc device: /dev/watchdog.
+ *
+ * Based on source code of the following authors:
+ * Matt Domsch <Matt_Domsch@dell.com>,
+ * Rob Radez <rob@osinvestor.com>,
+ * Rusty Lynch <rusty@linux.co.intel.com>
+ * Satyam Sharma <satyam@infradead.org>
+ * Randy Dunlap <randy.dunlap@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
+ * admit liability nor provide warranty for any of this software.
+ * This material is provided "AS-IS" and at no charge.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h> /* For module stuff/... */
+#include <linux/types.h> /* For standard types (like size_t) */
+#include <linux/errno.h> /* For the -ENODEV/... values */
+#include <linux/kernel.h> /* For printk/panic/... */
+#include <linux/fs.h> /* For file operations */
+#include <linux/watchdog.h> /* For watchdog specific items */
+#include <linux/miscdevice.h> /* For handling misc devices */
+#include <linux/init.h> /* For __init/__exit/... */
+#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
+
+/* make sure we only register one /dev/watchdog device */
+static unsigned long watchdog_dev_busy;
+/* the watchdog device behind /dev/watchdog */
+static struct watchdog_device *wdd;
+
+/*
+ * watchdog_ping: ping the watchdog.
+ * @wddev: the watchdog device to ping
+ *
+ * If the watchdog has no own ping operation then it needs to be
+ * restarted via the start operation. This wrapper function does
+ * exactly that.
+ * We only ping when the watchdog device is running.
+ */
+
+static int watchdog_ping(struct watchdog_device *wddev)
+{
+ if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+ if (wddev->ops->ping)
+ return wddev->ops->ping(wddev); /* ping the watchdog */
+ else
+ return wddev->ops->start(wddev); /* restart watchdog */
+ }
+ return 0;
+}
+
+/*
+ * watchdog_start: wrapper to start the watchdog.
+ * @wddev: the watchdog device to start
+ *
+ * Start the watchdog if it is not active and mark it active.
+ * This function returns zero on success or a negative errno code for
+ * failure.
+ */
+
+static int watchdog_start(struct watchdog_device *wddev)
+{
+ int err;
+
+ if (!test_bit(WDOG_ACTIVE, &wdd->status)) {
+ err = wddev->ops->start(wddev);
+ if (err < 0)
+ return err;
+
+ set_bit(WDOG_ACTIVE, &wdd->status);
+ }
+ return 0;
+}
+
+/*
+ * watchdog_stop: wrapper to stop the watchdog.
+ * @wddev: the watchdog device to stop
+ *
+ * Stop the watchdog if it is still active and unmark it active.
+ * This function returns zero on success or a negative errno code for
+ * failure.
+ * If the 'nowayout' feature was set, the watchdog cannot be stopped.
+ */
+
+static int watchdog_stop(struct watchdog_device *wddev)
+{
+ int err = -EBUSY;
+
+ if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
+ pr_info("%s: nowayout prevents watchdog to be stopped!\n",
+ wdd->info->identity);
+ return err;
+ }
+
+ if (test_bit(WDOG_ACTIVE, &wdd->status)) {
+ err = wddev->ops->stop(wddev);
+ if (err < 0)
+ return err;
+
+ clear_bit(WDOG_ACTIVE, &wdd->status);
+ }
+ return 0;
+}
+
+/*
+ * watchdog_write: writes to the watchdog.
+ * @file: file from VFS
+ * @data: user address of data
+ * @len: length of data
+ * @ppos: pointer to the file offset
+ *
+ * A write to a watchdog device is defined as a keepalive ping.
+ * Writing the magic 'V' sequence allows the next close to turn
+ * off the watchdog (if 'nowayout' is not set).
+ */
+
+static ssize_t watchdog_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ size_t i;
+ char c;
+
+ if (len == 0)
+ return 0;
+
+ /*
+ * Note: just in case someone wrote the magic character
+ * five months ago...
+ */
+ clear_bit(WDOG_ALLOW_RELEASE, &wdd->status);
+
+ /* scan to see whether or not we got the magic character */
+ for (i = 0; i != len; i++) {
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ set_bit(WDOG_ALLOW_RELEASE, &wdd->status);
+ }
+
+ /* someone wrote to us, so we send the watchdog a keepalive ping */
+ watchdog_ping(wdd);
+
+ return len;
+}
+
+/*
+ * watchdog_ioctl: handle the different ioctl's for the watchdog device.
+ * @file: file handle to the device
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ *
+ * The watchdog API defines a common set of functions for all watchdogs
+ * according to their available features.
+ */
+
+static long watchdog_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int __user *p = argp;
+ unsigned int val;
+ int err;
+
+ if (wdd->ops->ioctl) {
+ err = wdd->ops->ioctl(wdd, cmd, arg);
+ if (err != -ENOIOCTLCMD)
+ return err;
+ }
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, wdd->info,
+ sizeof(struct watchdog_info)) ? -EFAULT : 0;
+ case WDIOC_GETSTATUS:
+ val = wdd->ops->status ? wdd->ops->status(wdd) : 0;
+ return put_user(val, p);
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(wdd->bootstatus, p);
+ case WDIOC_SETOPTIONS:
+ if (get_user(val, p))
+ return -EFAULT;
+ if (val & WDIOS_DISABLECARD) {
+ err = watchdog_stop(wdd);
+ if (err < 0)
+ return err;
+ }
+ if (val & WDIOS_ENABLECARD) {
+ err = watchdog_start(wdd);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+ case WDIOC_KEEPALIVE:
+ if (!(wdd->info->options & WDIOF_KEEPALIVEPING))
+ return -EOPNOTSUPP;
+ watchdog_ping(wdd);
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if ((wdd->ops->set_timeout == NULL) ||
+ !(wdd->info->options & WDIOF_SETTIMEOUT))
+ return -EOPNOTSUPP;
+ if (get_user(val, p))
+ return -EFAULT;
+ if ((wdd->max_timeout != 0) &&
+ (val < wdd->min_timeout || val > wdd->max_timeout))
+ return -EINVAL;
+ err = wdd->ops->set_timeout(wdd, val);
+ if (err < 0)
+ return err;
+ wdd->timeout = val;
+ /* If the watchdog is active then we send a keepalive ping
+ * to make sure that the watchdog keep's running (and if
+ * possible that it takes the new timeout) */
+ watchdog_ping(wdd);
+ /* Fall */
+ case WDIOC_GETTIMEOUT:
+ /* timeout == 0 means that we don't know the timeout */
+ if (wdd->timeout == 0)
+ return -EOPNOTSUPP;
+ return put_user(wdd->timeout, p);
+ default:
+ return -ENOTTY;
+ }
+}
+
+/*
+ * watchdog_open: open the /dev/watchdog device.
+ * @inode: inode of device
+ * @file: file handle to device
+ *
+ * When the /dev/watchdog device gets opened, we start the watchdog.
+ * Watch out: the /dev/watchdog device is single open, so we make sure
+ * it can only be opened once.
+ */
+
+static int watchdog_open(struct inode *inode, struct file *file)
+{
+ int err = -EBUSY;
+
+ /* the watchdog is single open! */
+ if (test_and_set_bit(WDOG_DEV_OPEN, &wdd->status))
+ return -EBUSY;
+
+ /*
+ * If the /dev/watchdog device is open, we don't want the module
+ * to be unloaded.
+ */
+ if (!try_module_get(wdd->ops->owner))
+ goto out;
+
+ err = watchdog_start(wdd);
+ if (err < 0)
+ goto out_mod;
+
+ /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
+ return nonseekable_open(inode, file);
+
+out_mod:
+ module_put(wdd->ops->owner);
+out:
+ clear_bit(WDOG_DEV_OPEN, &wdd->status);
+ return err;
+}
+
+/*
+ * watchdog_release: release the /dev/watchdog device.
+ * @inode: inode of device
+ * @file: file handle to device
+ *
+ * This is the code for when /dev/watchdog gets closed. We will only
+ * stop the watchdog when we have received the magic char (and nowayout
+ * was not set), else the watchdog will keep running.
+ */
+
+static int watchdog_release(struct inode *inode, struct file *file)
+{
+ int err = -EBUSY;
+
+ /*
+ * We only stop the watchdog if we received the magic character
+ * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
+ * watchdog_stop will fail.
+ */
+ if (test_and_clear_bit(WDOG_ALLOW_RELEASE, &wdd->status) ||
+ !(wdd->info->options & WDIOF_MAGICCLOSE))
+ err = watchdog_stop(wdd);
+
+ /* If the watchdog was not stopped, send a keepalive ping */
+ if (err < 0) {
+ pr_crit("%s: watchdog did not stop!\n", wdd->info->identity);
+ watchdog_ping(wdd);
+ }
+
+ /* Allow the owner module to be unloaded again */
+ module_put(wdd->ops->owner);
+
+ /* make sure that /dev/watchdog can be re-opened */
+ clear_bit(WDOG_DEV_OPEN, &wdd->status);
+
+ return 0;
+}
+
+static const struct file_operations watchdog_fops = {
+ .owner = THIS_MODULE,
+ .write = watchdog_write,
+ .unlocked_ioctl = watchdog_ioctl,
+ .open = watchdog_open,
+ .release = watchdog_release,
+};
+
+static struct miscdevice watchdog_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &watchdog_fops,
+};
+
+/*
+ * watchdog_dev_register:
+ * @watchdog: watchdog device
+ *
+ * Register a watchdog device as /dev/watchdog. /dev/watchdog
+ * is actually a miscdevice and thus we set it up like that.
+ */
+
+int watchdog_dev_register(struct watchdog_device *watchdog)
+{
+ int err;
+
+ /* Only one device can register for /dev/watchdog */
+ if (test_and_set_bit(0, &watchdog_dev_busy)) {
+ pr_err("only one watchdog can use /dev/watchdog.\n");
+ return -EBUSY;
+ }
+
+ wdd = watchdog;
+
+ err = misc_register(&watchdog_miscdev);
+ if (err != 0) {
+ pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
+ watchdog->info->identity, WATCHDOG_MINOR, err);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ wdd = NULL;
+ clear_bit(0, &watchdog_dev_busy);
+ return err;
+}
+
+/*
+ * watchdog_dev_unregister:
+ * @watchdog: watchdog device
+ *
+ * Deregister the /dev/watchdog device.
+ */
+
+int watchdog_dev_unregister(struct watchdog_device *watchdog)
+{
+ /* Check that a watchdog device was registered in the past */
+ if (!test_bit(0, &watchdog_dev_busy) || !wdd)
+ return -ENODEV;
+
+ /* We can only unregister the watchdog device that was registered */
+ if (watchdog != wdd) {
+ pr_err("%s: watchdog was not registered as /dev/watchdog.\n",
+ watchdog->info->identity);
+ return -ENODEV;
+ }
+
+ misc_deregister(&watchdog_miscdev);
+ wdd = NULL;
+ clear_bit(0, &watchdog_dev_busy);
+ return 0;
+}
diff --git a/drivers/watchdog/watchdog_dev.h b/drivers/watchdog/watchdog_dev.h
new file mode 100644
index 00000000000..bc7612be25c
--- /dev/null
+++ b/drivers/watchdog/watchdog_dev.h
@@ -0,0 +1,33 @@
+/*
+ * watchdog_core.h
+ *
+ * (c) Copyright 2008-2011 Alan Cox <alan@lxorguk.ukuu.org.uk>,
+ * All Rights Reserved.
+ *
+ * (c) Copyright 2008-2011 Wim Van Sebroeck <wim@iguana.be>.
+ *
+ * This source code is part of the generic code that can be used
+ * by all the watchdog timer drivers.
+ *
+ * Based on source code of the following authors:
+ * Matt Domsch <Matt_Domsch@dell.com>,
+ * Rob Radez <rob@osinvestor.com>,
+ * Rusty Lynch <rusty@linux.co.intel.com>
+ * Satyam Sharma <satyam@infradead.org>
+ * Randy Dunlap <randy.dunlap@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
+ * admit liability nor provide warranty for any of this software.
+ * This material is provided "AS-IS" and at no charge.
+ */
+
+/*
+ * Functions/procedures to be called by the core
+ */
+int watchdog_dev_register(struct watchdog_device *);
+int watchdog_dev_unregister(struct watchdog_device *);
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 8c4b2d5bb7d..871caea4e1c 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -320,6 +320,11 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
struct wm831x_watchdog_pdata *pdata;
int reg, ret;
+ if (wm831x) {
+ dev_err(&pdev->dev, "wm831x watchdog already registered\n");
+ return -EBUSY;
+ }
+
wm831x = dev_get_drvdata(pdev->dev.parent);
ret = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index a59638b37c1..5f7ff8e2fc1 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -9,6 +9,53 @@ config XEN_BALLOON
the system to expand the domain's memory allocation, or alternatively
return unneeded memory to the system.
+config XEN_SELFBALLOONING
+ bool "Dynamically self-balloon kernel memory to target"
+ depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP && XEN_TMEM
+ default n
+ help
+ Self-ballooning dynamically balloons available kernel memory driven
+ by the current usage of anonymous memory ("committed AS") and
+ controlled by various sysfs-settable parameters. Configuring
+ FRONTSWAP is highly recommended; if it is not configured, self-
+ ballooning is disabled by default but can be enabled with the
+ 'selfballooning' kernel boot parameter. If FRONTSWAP is configured,
+ frontswap-selfshrinking is enabled by default but can be disabled
+ with the 'noselfshrink' kernel boot parameter; and self-ballooning
+ is enabled by default but can be disabled with the 'noselfballooning'
+ kernel boot parameter. Note that systems without a sufficiently
+ large swap device should not enable self-ballooning.
+
+config XEN_BALLOON_MEMORY_HOTPLUG
+ bool "Memory hotplug support for Xen balloon driver"
+ default n
+ depends on XEN_BALLOON && MEMORY_HOTPLUG
+ help
+ Memory hotplug support for Xen balloon driver allows expanding memory
+ available for the system above limit declared at system startup.
+ It is very useful on critical systems which require long
+ run without rebooting.
+
+ Memory could be hotplugged in following steps:
+
+ 1) dom0: xl mem-max <domU> <maxmem>
+ where <maxmem> is >= requested memory size,
+
+ 2) dom0: xl mem-set <domU> <memory>
+ where <memory> is requested memory size; alternatively memory
+ could be added by writing proper value to
+ /sys/devices/system/xen_memory/xen_memory0/target or
+ /sys/devices/system/xen_memory/xen_memory0/target_kb on dumU,
+
+ 3) domU: for i in /sys/devices/system/memory/memory*/state; do \
+ [ "`cat "$i"`" = offline ] && echo online > "$i"; done
+
+ Memory could be onlined automatically on domU by adding following line to udev rules:
+
+ SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'"
+
+ In that case step 3 should be omitted.
+
config XEN_SCRUB_PAGES
bool "Scrub pages before returning them to system"
depends on XEN_BALLOON
@@ -105,4 +152,33 @@ config SWIOTLB_XEN
depends on PCI
select SWIOTLB
+config XEN_TMEM
+ bool
+ default y if (CLEANCACHE || FRONTSWAP)
+ help
+ Shim to interface in-kernel Transcendent Memory hooks
+ (e.g. cleancache and frontswap) to Xen tmem hypercalls.
+
+config XEN_PCIDEV_BACKEND
+ tristate "Xen PCI-device backend driver"
+ depends on PCI && X86 && XEN
+ depends on XEN_BACKEND
+ default m
+ help
+ The PCI device backend driver allows the kernel to export arbitrary
+ PCI devices to other guests. If you select this to be a module, you
+ will need to make sure no other driver has bound to the device(s)
+ you want to make visible to other guests.
+
+ The parameter "passthrough" allows you specify how you want the PCI
+ devices to appear in the guest. You can choose the default (0) where
+ PCI topology starts at 00.00.0, or (1) for passthrough if you want
+ the PCI devices topology appear the same as in the host.
+
+ The "hide" parameter (only applicable if backend driver is compiled
+ into the kernel) allows you to bind the PCI devices to this module
+ from the default device drivers. The argument is the list of PCI BDFs:
+ xen-pciback.hide=(03:00.0)(04:00.0)
+
+ If in doubt, say m.
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index bbc18258ecc..72bbb27d7a6 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,6 +1,5 @@
obj-y += grant-table.o features.o events.o manage.o balloon.o
obj-y += xenbus/
-obj-y += tmem.o
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_features.o := $(nostackp)
@@ -9,14 +8,17 @@ obj-$(CONFIG_BLOCK) += biomerge.o
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
+obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
+obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_DOM0) += pci.o
+obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index f54290baa3d..5dfd8f8ff07 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -4,6 +4,12 @@
* Copyright (c) 2003, B Dragovic
* Copyright (c) 2003-2004, M Williamson, K Fraser
* Copyright (c) 2005 Dan M. Smith, IBM Corporation
+ * Copyright (c) 2010 Daniel Kiper
+ *
+ * Memory hotplug support was written by Daniel Kiper. Work on
+ * it was sponsored by Google under Google Summer of Code 2010
+ * program. Jeremy Fitzhardinge from Citrix was the mentor for
+ * this project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
@@ -40,6 +46,9 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/gfp.h>
+#include <linux/notifier.h>
+#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
@@ -194,6 +203,87 @@ static enum bp_state update_schedule(enum bp_state state)
return BP_EAGAIN;
}
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+static long current_credit(void)
+{
+ return balloon_stats.target_pages - balloon_stats.current_pages -
+ balloon_stats.hotplug_pages;
+}
+
+static bool balloon_is_inflated(void)
+{
+ if (balloon_stats.balloon_low || balloon_stats.balloon_high ||
+ balloon_stats.balloon_hotplug)
+ return true;
+ else
+ return false;
+}
+
+/*
+ * reserve_additional_memory() adds memory region of size >= credit above
+ * max_pfn. New region is section aligned and size is modified to be multiple
+ * of section size. Those features allow optimal use of address space and
+ * establish proper alignment when this function is called first time after
+ * boot (last section not fully populated at boot time contains unused memory
+ * pages with PG_reserved bit not set; online_pages_range() does not allow page
+ * onlining in whole range if first onlined page does not have PG_reserved
+ * bit set). Real size of added memory is established at page onlining stage.
+ */
+
+static enum bp_state reserve_additional_memory(long credit)
+{
+ int nid, rc;
+ u64 hotplug_start_paddr;
+ unsigned long balloon_hotplug = credit;
+
+ hotplug_start_paddr = PFN_PHYS(SECTION_ALIGN_UP(max_pfn));
+ balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
+ nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
+
+ rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
+
+ if (rc) {
+ pr_info("xen_balloon: %s: add_memory() failed: %i\n", __func__, rc);
+ return BP_EAGAIN;
+ }
+
+ balloon_hotplug -= credit;
+
+ balloon_stats.hotplug_pages += credit;
+ balloon_stats.balloon_hotplug = balloon_hotplug;
+
+ return BP_DONE;
+}
+
+static void xen_online_page(struct page *page)
+{
+ __online_page_set_limits(page);
+
+ mutex_lock(&balloon_mutex);
+
+ __balloon_append(page);
+
+ if (balloon_stats.hotplug_pages)
+ --balloon_stats.hotplug_pages;
+ else
+ --balloon_stats.balloon_hotplug;
+
+ mutex_unlock(&balloon_mutex);
+}
+
+static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
+{
+ if (val == MEM_ONLINE)
+ schedule_delayed_work(&balloon_worker, 0);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block xen_memory_nb = {
+ .notifier_call = xen_memory_notifier,
+ .priority = 0
+};
+#else
static long current_credit(void)
{
unsigned long target = balloon_stats.target_pages;
@@ -206,6 +296,21 @@ static long current_credit(void)
return target - balloon_stats.current_pages;
}
+static bool balloon_is_inflated(void)
+{
+ if (balloon_stats.balloon_low || balloon_stats.balloon_high)
+ return true;
+ else
+ return false;
+}
+
+static enum bp_state reserve_additional_memory(long credit)
+{
+ balloon_stats.target_pages = balloon_stats.current_pages;
+ return BP_DONE;
+}
+#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
+
static enum bp_state increase_reservation(unsigned long nr_pages)
{
int rc;
@@ -217,6 +322,15 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
.domid = DOMID_SELF
};
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+ if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) {
+ nr_pages = min(nr_pages, balloon_stats.balloon_hotplug);
+ balloon_stats.hotplug_pages += nr_pages;
+ balloon_stats.balloon_hotplug -= nr_pages;
+ return BP_DONE;
+ }
+#endif
+
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
@@ -279,6 +393,15 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
.domid = DOMID_SELF
};
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+ if (balloon_stats.hotplug_pages) {
+ nr_pages = min(nr_pages, balloon_stats.hotplug_pages);
+ balloon_stats.hotplug_pages -= nr_pages;
+ balloon_stats.balloon_hotplug += nr_pages;
+ return BP_DONE;
+ }
+#endif
+
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
@@ -340,8 +463,12 @@ static void balloon_process(struct work_struct *work)
do {
credit = current_credit();
- if (credit > 0)
- state = increase_reservation(credit);
+ if (credit > 0) {
+ if (balloon_is_inflated())
+ state = increase_reservation(credit);
+ else
+ state = reserve_additional_memory(credit);
+ }
if (credit < 0)
state = decrease_reservation(-credit, GFP_BALLOON);
@@ -448,6 +575,14 @@ static int __init balloon_init(void)
balloon_stats.retry_count = 1;
balloon_stats.max_retry_count = RETRY_UNLIMITED;
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+ balloon_stats.hotplug_pages = 0;
+ balloon_stats.balloon_hotplug = 0;
+
+ set_online_page_callback(&xen_online_page);
+ register_memory_notifier(&xen_memory_nb);
+#endif
+
/*
* Initialise the balloon with excess memory space. We need
* to make sure we don't add memory which doesn't exist or
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 30df85d8fca..da70f5c32eb 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -615,11 +615,6 @@ static int find_irq_by_gsi(unsigned gsi)
return -1;
}
-int xen_allocate_pirq_gsi(unsigned gsi)
-{
- return gsi;
-}
-
/*
* Do not make any assumptions regarding the relationship between the
* IRQ number returned here and the Xen pirq argument.
@@ -1693,6 +1688,6 @@ void __init xen_init_IRQ(void)
} else {
irq_ctx_init(smp_processor_id());
if (xen_initial_domain())
- xen_setup_pirqs();
+ pci_xen_initial_domain();
}
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index fd725cde6ad..4f44b347b24 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -82,7 +82,7 @@ static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
static int get_free_entries(unsigned count)
{
unsigned long flags;
- int ref, rc;
+ int ref, rc = 0;
grant_ref_t head;
spin_lock_irqsave(&gnttab_list_lock, flags);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 816a44959ef..d369965e8f8 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -1,7 +1,7 @@
/*
* Xen implementation for transcendent memory (tmem)
*
- * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
+ * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
* Author: Dan Magenheimer
*/
@@ -9,8 +9,14 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pagemap.h>
+#include <linux/module.h>
#include <linux/cleancache.h>
+/* temporary ifdef until include/linux/frontswap.h is upstream */
+#ifdef CONFIG_FRONTSWAP
+#include <linux/frontswap.h>
+#endif
+
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <asm/xen/hypercall.h>
@@ -122,14 +128,8 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
}
-static int xen_tmem_destroy_pool(u32 pool_id)
-{
- struct tmem_oid oid = { { 0 } };
-
- return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
-}
-
-int tmem_enabled;
+int tmem_enabled __read_mostly;
+EXPORT_SYMBOL(tmem_enabled);
static int __init enable_tmem(char *s)
{
@@ -139,6 +139,14 @@ static int __init enable_tmem(char *s)
__setup("tmem", enable_tmem);
+#ifdef CONFIG_CLEANCACHE
+static int xen_tmem_destroy_pool(u32 pool_id)
+{
+ struct tmem_oid oid = { { 0 } };
+
+ return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
+}
+
/* cleancache ops */
static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
@@ -240,18 +248,156 @@ static struct cleancache_ops tmem_cleancache_ops = {
.init_shared_fs = tmem_cleancache_init_shared_fs,
.init_fs = tmem_cleancache_init_fs
};
+#endif
-static int __init xen_tmem_init(void)
+#ifdef CONFIG_FRONTSWAP
+/* frontswap tmem operations */
+
+/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
+static int tmem_frontswap_poolid;
+
+/*
+ * Swizzling increases objects per swaptype, increasing tmem concurrency
+ * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
+ */
+#define SWIZ_BITS 4
+#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
+#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
+#define iswiz(_ind) (_ind >> SWIZ_BITS)
+
+static inline struct tmem_oid oswiz(unsigned type, u32 ind)
{
- struct cleancache_ops old_ops;
+ struct tmem_oid oid = { .oid = { 0 } };
+ oid.oid[0] = _oswiz(type, ind);
+ return oid;
+}
+/* returns 0 if the page was successfully put into frontswap, -1 if not */
+static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
+ struct page *page)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ unsigned long pfn = page_to_pfn(page);
+ int pool = tmem_frontswap_poolid;
+ int ret;
+
+ if (pool < 0)
+ return -1;
+ if (ind64 != ind)
+ return -1;
+ mb(); /* ensure page is quiescent; tmem may address it with an alias */
+ ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
+ /* translate Xen tmem return values to linux semantics */
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+/*
+ * returns 0 if the page was successfully gotten from frontswap, -1 if
+ * was not present (should never happen!)
+ */
+static int tmem_frontswap_get_page(unsigned type, pgoff_t offset,
+ struct page *page)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ unsigned long pfn = page_to_pfn(page);
+ int pool = tmem_frontswap_poolid;
+ int ret;
+
+ if (pool < 0)
+ return -1;
+ if (ind64 != ind)
+ return -1;
+ ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
+ /* translate Xen tmem return values to linux semantics */
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+/* flush a single page from frontswap */
+static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ int pool = tmem_frontswap_poolid;
+
+ if (pool < 0)
+ return;
+ if (ind64 != ind)
+ return;
+ (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
+}
+
+/* flush all pages from the passed swaptype */
+static void tmem_frontswap_flush_area(unsigned type)
+{
+ int pool = tmem_frontswap_poolid;
+ int ind;
+
+ if (pool < 0)
+ return;
+ for (ind = SWIZ_MASK; ind >= 0; ind--)
+ (void)xen_tmem_flush_object(pool, oswiz(type, ind));
+}
+
+static void tmem_frontswap_init(unsigned ignored)
+{
+ struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
+
+ /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
+ if (tmem_frontswap_poolid < 0)
+ tmem_frontswap_poolid =
+ xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
+}
+
+static int __initdata use_frontswap = 1;
+
+static int __init no_frontswap(char *s)
+{
+ use_frontswap = 0;
+ return 1;
+}
+
+__setup("nofrontswap", no_frontswap);
+
+static struct frontswap_ops tmem_frontswap_ops = {
+ .put_page = tmem_frontswap_put_page,
+ .get_page = tmem_frontswap_get_page,
+ .flush_page = tmem_frontswap_flush_page,
+ .flush_area = tmem_frontswap_flush_area,
+ .init = tmem_frontswap_init
+};
+#endif
+
+static int __init xen_tmem_init(void)
+{
if (!xen_domain())
return 0;
+#ifdef CONFIG_FRONTSWAP
+ if (tmem_enabled && use_frontswap) {
+ char *s = "";
+ struct frontswap_ops old_ops =
+ frontswap_register_ops(&tmem_frontswap_ops);
+
+ tmem_frontswap_poolid = -1;
+ if (old_ops.init != NULL)
+ s = " (WARNING: frontswap_ops overridden)";
+ printk(KERN_INFO "frontswap enabled, RAM provided by "
+ "Xen Transcendent Memory\n");
+ }
+#endif
#ifdef CONFIG_CLEANCACHE
BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
if (tmem_enabled && use_cleancache) {
char *s = "";
- old_ops = cleancache_register_ops(&tmem_cleancache_ops);
+ struct cleancache_ops old_ops =
+ cleancache_register_ops(&tmem_cleancache_ops);
if (old_ops.init_fs != NULL)
s = " (WARNING: cleancache_ops overridden)";
printk(KERN_INFO "cleancache enabled, RAM provided by "
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index a4ff225ee86..5c9dc43c1e9 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -98,6 +98,8 @@ static int __init balloon_init(void)
register_balloon(&balloon_sysdev);
+ register_xen_selfballooning(&balloon_sysdev);
+
target_watch.callback = watch_target;
xenstore_notifier.notifier_call = balloon_init_watcher;
diff --git a/drivers/xen/xen-pciback/Makefile b/drivers/xen/xen-pciback/Makefile
new file mode 100644
index 00000000000..ffe0ad3438b
--- /dev/null
+++ b/drivers/xen/xen-pciback/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback.o
+
+xen-pciback-y := pci_stub.o pciback_ops.o xenbus.o
+xen-pciback-y += conf_space.o conf_space_header.o \
+ conf_space_capability.o \
+ conf_space_quirks.o vpci.o \
+ passthrough.o
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
new file mode 100644
index 00000000000..a8031445d94
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -0,0 +1,438 @@
+/*
+ * PCI Backend - Functions for creating a virtual configuration space for
+ * exported PCI Devices.
+ * It's dangerous to allow PCI Driver Domains to change their
+ * device's resources (memory, i/o ports, interrupts). We need to
+ * restrict changes to certain PCI Configuration registers:
+ * BARs, INTERRUPT_PIN, most registers in the header...
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include "pciback.h"
+#include "conf_space.h"
+#include "conf_space_quirks.h"
+
+#define DRV_NAME "xen-pciback"
+static int permissive;
+module_param(permissive, bool, 0644);
+
+/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
+ * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
+#define DEFINE_PCI_CONFIG(op, size, type) \
+int xen_pcibk_##op##_config_##size \
+(struct pci_dev *dev, int offset, type value, void *data) \
+{ \
+ return pci_##op##_config_##size(dev, offset, value); \
+}
+
+DEFINE_PCI_CONFIG(read, byte, u8 *)
+DEFINE_PCI_CONFIG(read, word, u16 *)
+DEFINE_PCI_CONFIG(read, dword, u32 *)
+
+DEFINE_PCI_CONFIG(write, byte, u8)
+DEFINE_PCI_CONFIG(write, word, u16)
+DEFINE_PCI_CONFIG(write, dword, u32)
+
+static int conf_space_read(struct pci_dev *dev,
+ const struct config_field_entry *entry,
+ int offset, u32 *value)
+{
+ int ret = 0;
+ const struct config_field *field = entry->field;
+
+ *value = 0;
+
+ switch (field->size) {
+ case 1:
+ if (field->u.b.read)
+ ret = field->u.b.read(dev, offset, (u8 *) value,
+ entry->data);
+ break;
+ case 2:
+ if (field->u.w.read)
+ ret = field->u.w.read(dev, offset, (u16 *) value,
+ entry->data);
+ break;
+ case 4:
+ if (field->u.dw.read)
+ ret = field->u.dw.read(dev, offset, value, entry->data);
+ break;
+ }
+ return ret;
+}
+
+static int conf_space_write(struct pci_dev *dev,
+ const struct config_field_entry *entry,
+ int offset, u32 value)
+{
+ int ret = 0;
+ const struct config_field *field = entry->field;
+
+ switch (field->size) {
+ case 1:
+ if (field->u.b.write)
+ ret = field->u.b.write(dev, offset, (u8) value,
+ entry->data);
+ break;
+ case 2:
+ if (field->u.w.write)
+ ret = field->u.w.write(dev, offset, (u16) value,
+ entry->data);
+ break;
+ case 4:
+ if (field->u.dw.write)
+ ret = field->u.dw.write(dev, offset, value,
+ entry->data);
+ break;
+ }
+ return ret;
+}
+
+static inline u32 get_mask(int size)
+{
+ if (size == 1)
+ return 0xff;
+ else if (size == 2)
+ return 0xffff;
+ else
+ return 0xffffffff;
+}
+
+static inline int valid_request(int offset, int size)
+{
+ /* Validate request (no un-aligned requests) */
+ if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
+ return 1;
+ return 0;
+}
+
+static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
+ int offset)
+{
+ if (offset >= 0) {
+ new_val_mask <<= (offset * 8);
+ new_val <<= (offset * 8);
+ } else {
+ new_val_mask >>= (offset * -8);
+ new_val >>= (offset * -8);
+ }
+ val = (val & ~new_val_mask) | (new_val & new_val_mask);
+
+ return val;
+}
+
+static int pcibios_err_to_errno(int err)
+{
+ switch (err) {
+ case PCIBIOS_SUCCESSFUL:
+ return XEN_PCI_ERR_success;
+ case PCIBIOS_DEVICE_NOT_FOUND:
+ return XEN_PCI_ERR_dev_not_found;
+ case PCIBIOS_BAD_REGISTER_NUMBER:
+ return XEN_PCI_ERR_invalid_offset;
+ case PCIBIOS_FUNC_NOT_SUPPORTED:
+ return XEN_PCI_ERR_not_implemented;
+ case PCIBIOS_SET_FAILED:
+ return XEN_PCI_ERR_access_denied;
+ }
+ return err;
+}
+
+int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
+ u32 *ret_val)
+{
+ int err = 0;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ const struct config_field_entry *cfg_entry;
+ const struct config_field *field;
+ int req_start, req_end, field_start, field_end;
+ /* if read fails for any reason, return 0
+ * (as if device didn't respond) */
+ u32 value = 0, tmp_val;
+
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x\n",
+ pci_name(dev), size, offset);
+
+ if (!valid_request(offset, size)) {
+ err = XEN_PCI_ERR_invalid_offset;
+ goto out;
+ }
+
+ /* Get the real value first, then modify as appropriate */
+ switch (size) {
+ case 1:
+ err = pci_read_config_byte(dev, offset, (u8 *) &value);
+ break;
+ case 2:
+ err = pci_read_config_word(dev, offset, (u16 *) &value);
+ break;
+ case 4:
+ err = pci_read_config_dword(dev, offset, &value);
+ break;
+ }
+
+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
+ field = cfg_entry->field;
+
+ req_start = offset;
+ req_end = offset + size;
+ field_start = OFFSET(cfg_entry);
+ field_end = OFFSET(cfg_entry) + field->size;
+
+ if ((req_start >= field_start && req_start < field_end)
+ || (req_end > field_start && req_end <= field_end)) {
+ err = conf_space_read(dev, cfg_entry, field_start,
+ &tmp_val);
+ if (err)
+ goto out;
+
+ value = merge_value(value, tmp_val,
+ get_mask(field->size),
+ field_start - req_start);
+ }
+ }
+
+out:
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x = %x\n",
+ pci_name(dev), size, offset, value);
+
+ *ret_val = value;
+ return pcibios_err_to_errno(err);
+}
+
+int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
+{
+ int err = 0, handled = 0;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ const struct config_field_entry *cfg_entry;
+ const struct config_field *field;
+ u32 tmp_val;
+ int req_start, req_end, field_start, field_end;
+
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG
+ DRV_NAME ": %s: write request %d bytes at 0x%x = %x\n",
+ pci_name(dev), size, offset, value);
+
+ if (!valid_request(offset, size))
+ return XEN_PCI_ERR_invalid_offset;
+
+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
+ field = cfg_entry->field;
+
+ req_start = offset;
+ req_end = offset + size;
+ field_start = OFFSET(cfg_entry);
+ field_end = OFFSET(cfg_entry) + field->size;
+
+ if ((req_start >= field_start && req_start < field_end)
+ || (req_end > field_start && req_end <= field_end)) {
+ tmp_val = 0;
+
+ err = xen_pcibk_config_read(dev, field_start,
+ field->size, &tmp_val);
+ if (err)
+ break;
+
+ tmp_val = merge_value(tmp_val, value, get_mask(size),
+ req_start - field_start);
+
+ err = conf_space_write(dev, cfg_entry, field_start,
+ tmp_val);
+
+ /* handled is set true here, but not every byte
+ * may have been written! Properly detecting if
+ * every byte is handled is unnecessary as the
+ * flag is used to detect devices that need
+ * special helpers to work correctly.
+ */
+ handled = 1;
+ }
+ }
+
+ if (!handled && !err) {
+ /* By default, anything not specificially handled above is
+ * read-only. The permissive flag changes this behavior so
+ * that anything not specifically handled above is writable.
+ * This means that some fields may still be read-only because
+ * they have entries in the config_field list that intercept
+ * the write and do nothing. */
+ if (dev_data->permissive || permissive) {
+ switch (size) {
+ case 1:
+ err = pci_write_config_byte(dev, offset,
+ (u8) value);
+ break;
+ case 2:
+ err = pci_write_config_word(dev, offset,
+ (u16) value);
+ break;
+ case 4:
+ err = pci_write_config_dword(dev, offset,
+ (u32) value);
+ break;
+ }
+ } else if (!dev_data->warned_on_write) {
+ dev_data->warned_on_write = 1;
+ dev_warn(&dev->dev, "Driver tried to write to a "
+ "read-only configuration space field at offset"
+ " 0x%x, size %d. This may be harmless, but if "
+ "you have problems with your device:\n"
+ "1) see permissive attribute in sysfs\n"
+ "2) report problems to the xen-devel "
+ "mailing list along with details of your "
+ "device obtained from lspci.\n", offset, size);
+ }
+ }
+
+ return pcibios_err_to_errno(err);
+}
+
+void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
+{
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ struct config_field_entry *cfg_entry, *t;
+ const struct config_field *field;
+
+ dev_dbg(&dev->dev, "free-ing dynamically allocated virtual "
+ "configuration space fields\n");
+ if (!dev_data)
+ return;
+
+ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
+ field = cfg_entry->field;
+
+ if (field->clean) {
+ field->clean((struct config_field *)field);
+
+ kfree(cfg_entry->data);
+
+ list_del(&cfg_entry->list);
+ kfree(cfg_entry);
+ }
+
+ }
+}
+
+void xen_pcibk_config_reset_dev(struct pci_dev *dev)
+{
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ const struct config_field_entry *cfg_entry;
+ const struct config_field *field;
+
+ dev_dbg(&dev->dev, "resetting virtual configuration space\n");
+ if (!dev_data)
+ return;
+
+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
+ field = cfg_entry->field;
+
+ if (field->reset)
+ field->reset(dev, OFFSET(cfg_entry), cfg_entry->data);
+ }
+}
+
+void xen_pcibk_config_free_dev(struct pci_dev *dev)
+{
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ struct config_field_entry *cfg_entry, *t;
+ const struct config_field *field;
+
+ dev_dbg(&dev->dev, "free-ing virtual configuration space fields\n");
+ if (!dev_data)
+ return;
+
+ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
+ list_del(&cfg_entry->list);
+
+ field = cfg_entry->field;
+
+ if (field->release)
+ field->release(dev, OFFSET(cfg_entry), cfg_entry->data);
+
+ kfree(cfg_entry);
+ }
+}
+
+int xen_pcibk_config_add_field_offset(struct pci_dev *dev,
+ const struct config_field *field,
+ unsigned int base_offset)
+{
+ int err = 0;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ struct config_field_entry *cfg_entry;
+ void *tmp;
+
+ cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
+ if (!cfg_entry) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ cfg_entry->data = NULL;
+ cfg_entry->field = field;
+ cfg_entry->base_offset = base_offset;
+
+ /* silently ignore duplicate fields */
+ err = xen_pcibk_field_is_dup(dev, OFFSET(cfg_entry));
+ if (err)
+ goto out;
+
+ if (field->init) {
+ tmp = field->init(dev, OFFSET(cfg_entry));
+
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto out;
+ }
+
+ cfg_entry->data = tmp;
+ }
+
+ dev_dbg(&dev->dev, "added config field at offset 0x%02x\n",
+ OFFSET(cfg_entry));
+ list_add_tail(&cfg_entry->list, &dev_data->config_fields);
+
+out:
+ if (err)
+ kfree(cfg_entry);
+
+ return err;
+}
+
+/* This sets up the device's virtual configuration space to keep track of
+ * certain registers (like the base address registers (BARs) so that we can
+ * keep the client from manipulating them directly.
+ */
+int xen_pcibk_config_init_dev(struct pci_dev *dev)
+{
+ int err = 0;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+
+ dev_dbg(&dev->dev, "initializing virtual configuration space\n");
+
+ INIT_LIST_HEAD(&dev_data->config_fields);
+
+ err = xen_pcibk_config_header_add_fields(dev);
+ if (err)
+ goto out;
+
+ err = xen_pcibk_config_capability_add_fields(dev);
+ if (err)
+ goto out;
+
+ err = xen_pcibk_config_quirks_init(dev);
+
+out:
+ return err;
+}
+
+int xen_pcibk_config_init(void)
+{
+ return xen_pcibk_config_capability_init();
+}
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
new file mode 100644
index 00000000000..e56c934ad13
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -0,0 +1,126 @@
+/*
+ * PCI Backend - Common data structures for overriding the configuration space
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#ifndef __XEN_PCIBACK_CONF_SPACE_H__
+#define __XEN_PCIBACK_CONF_SPACE_H__
+
+#include <linux/list.h>
+#include <linux/err.h>
+
+/* conf_field_init can return an errno in a ptr with ERR_PTR() */
+typedef void *(*conf_field_init) (struct pci_dev *dev, int offset);
+typedef void (*conf_field_reset) (struct pci_dev *dev, int offset, void *data);
+typedef void (*conf_field_free) (struct pci_dev *dev, int offset, void *data);
+
+typedef int (*conf_dword_write) (struct pci_dev *dev, int offset, u32 value,
+ void *data);
+typedef int (*conf_word_write) (struct pci_dev *dev, int offset, u16 value,
+ void *data);
+typedef int (*conf_byte_write) (struct pci_dev *dev, int offset, u8 value,
+ void *data);
+typedef int (*conf_dword_read) (struct pci_dev *dev, int offset, u32 *value,
+ void *data);
+typedef int (*conf_word_read) (struct pci_dev *dev, int offset, u16 *value,
+ void *data);
+typedef int (*conf_byte_read) (struct pci_dev *dev, int offset, u8 *value,
+ void *data);
+
+/* These are the fields within the configuration space which we
+ * are interested in intercepting reads/writes to and changing their
+ * values.
+ */
+struct config_field {
+ unsigned int offset;
+ unsigned int size;
+ unsigned int mask;
+ conf_field_init init;
+ conf_field_reset reset;
+ conf_field_free release;
+ void (*clean) (struct config_field *field);
+ union {
+ struct {
+ conf_dword_write write;
+ conf_dword_read read;
+ } dw;
+ struct {
+ conf_word_write write;
+ conf_word_read read;
+ } w;
+ struct {
+ conf_byte_write write;
+ conf_byte_read read;
+ } b;
+ } u;
+ struct list_head list;
+};
+
+struct config_field_entry {
+ struct list_head list;
+ const struct config_field *field;
+ unsigned int base_offset;
+ void *data;
+};
+
+#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
+
+/* Add fields to a device - the add_fields macro expects to get a pointer to
+ * the first entry in an array (of which the ending is marked by size==0)
+ */
+int xen_pcibk_config_add_field_offset(struct pci_dev *dev,
+ const struct config_field *field,
+ unsigned int offset);
+
+static inline int xen_pcibk_config_add_field(struct pci_dev *dev,
+ const struct config_field *field)
+{
+ return xen_pcibk_config_add_field_offset(dev, field, 0);
+}
+
+static inline int xen_pcibk_config_add_fields(struct pci_dev *dev,
+ const struct config_field *field)
+{
+ int i, err = 0;
+ for (i = 0; field[i].size != 0; i++) {
+ err = xen_pcibk_config_add_field(dev, &field[i]);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static inline int xen_pcibk_config_add_fields_offset(struct pci_dev *dev,
+ const struct config_field *field,
+ unsigned int offset)
+{
+ int i, err = 0;
+ for (i = 0; field[i].size != 0; i++) {
+ err = xen_pcibk_config_add_field_offset(dev, &field[i], offset);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+/* Read/Write the real configuration space */
+int xen_pcibk_read_config_byte(struct pci_dev *dev, int offset, u8 *value,
+ void *data);
+int xen_pcibk_read_config_word(struct pci_dev *dev, int offset, u16 *value,
+ void *data);
+int xen_pcibk_read_config_dword(struct pci_dev *dev, int offset, u32 *value,
+ void *data);
+int xen_pcibk_write_config_byte(struct pci_dev *dev, int offset, u8 value,
+ void *data);
+int xen_pcibk_write_config_word(struct pci_dev *dev, int offset, u16 value,
+ void *data);
+int xen_pcibk_write_config_dword(struct pci_dev *dev, int offset, u32 value,
+ void *data);
+
+int xen_pcibk_config_capability_init(void);
+
+int xen_pcibk_config_header_add_fields(struct pci_dev *dev);
+int xen_pcibk_config_capability_add_fields(struct pci_dev *dev);
+
+#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
new file mode 100644
index 00000000000..7f83e9083e9
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -0,0 +1,207 @@
+/*
+ * PCI Backend - Handles the virtual fields found on the capability lists
+ * in the configuration space.
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include "pciback.h"
+#include "conf_space.h"
+
+static LIST_HEAD(capabilities);
+struct xen_pcibk_config_capability {
+ struct list_head cap_list;
+
+ int capability;
+
+ /* If the device has the capability found above, add these fields */
+ const struct config_field *fields;
+};
+
+static const struct config_field caplist_header[] = {
+ {
+ .offset = PCI_CAP_LIST_ID,
+ .size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
+ .u.w.read = xen_pcibk_read_config_word,
+ .u.w.write = NULL,
+ },
+ {}
+};
+
+static inline void register_capability(struct xen_pcibk_config_capability *cap)
+{
+ list_add_tail(&cap->cap_list, &capabilities);
+}
+
+int xen_pcibk_config_capability_add_fields(struct pci_dev *dev)
+{
+ int err = 0;
+ struct xen_pcibk_config_capability *cap;
+ int cap_offset;
+
+ list_for_each_entry(cap, &capabilities, cap_list) {
+ cap_offset = pci_find_capability(dev, cap->capability);
+ if (cap_offset) {
+ dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
+ cap->capability, cap_offset);
+
+ err = xen_pcibk_config_add_fields_offset(dev,
+ caplist_header,
+ cap_offset);
+ if (err)
+ goto out;
+ err = xen_pcibk_config_add_fields_offset(dev,
+ cap->fields,
+ cap_offset);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ return err;
+}
+
+static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
+ void *data)
+{
+ /* Disallow writes to the vital product data */
+ if (value & PCI_VPD_ADDR_F)
+ return PCIBIOS_SET_FAILED;
+ else
+ return pci_write_config_word(dev, offset, value);
+}
+
+static const struct config_field caplist_vpd[] = {
+ {
+ .offset = PCI_VPD_ADDR,
+ .size = 2,
+ .u.w.read = xen_pcibk_read_config_word,
+ .u.w.write = vpd_address_write,
+ },
+ {
+ .offset = PCI_VPD_DATA,
+ .size = 4,
+ .u.dw.read = xen_pcibk_read_config_dword,
+ .u.dw.write = NULL,
+ },
+ {}
+};
+
+static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
+ void *data)
+{
+ int err;
+ u16 real_value;
+
+ err = pci_read_config_word(dev, offset, &real_value);
+ if (err)
+ goto out;
+
+ *value = real_value & ~PCI_PM_CAP_PME_MASK;
+
+out:
+ return err;
+}
+
+/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
+ * Can't allow driver domain to enable PMEs - they're shared */
+#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
+
+static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
+ void *data)
+{
+ int err;
+ u16 old_value;
+ pci_power_t new_state, old_state;
+
+ err = pci_read_config_word(dev, offset, &old_value);
+ if (err)
+ goto out;
+
+ old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
+ new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
+
+ new_value &= PM_OK_BITS;
+ if ((old_value & PM_OK_BITS) != new_value) {
+ new_value = (old_value & ~PM_OK_BITS) | new_value;
+ err = pci_write_config_word(dev, offset, new_value);
+ if (err)
+ goto out;
+ }
+
+ /* Let pci core handle the power management change */
+ dev_dbg(&dev->dev, "set power state to %x\n", new_state);
+ err = pci_set_power_state(dev, new_state);
+ if (err) {
+ err = PCIBIOS_SET_FAILED;
+ goto out;
+ }
+
+ out:
+ return err;
+}
+
+/* Ensure PMEs are disabled */
+static void *pm_ctrl_init(struct pci_dev *dev, int offset)
+{
+ int err;
+ u16 value;
+
+ err = pci_read_config_word(dev, offset, &value);
+ if (err)
+ goto out;
+
+ if (value & PCI_PM_CTRL_PME_ENABLE) {
+ value &= ~PCI_PM_CTRL_PME_ENABLE;
+ err = pci_write_config_word(dev, offset, value);
+ }
+
+out:
+ return ERR_PTR(err);
+}
+
+static const struct config_field caplist_pm[] = {
+ {
+ .offset = PCI_PM_PMC,
+ .size = 2,
+ .u.w.read = pm_caps_read,
+ },
+ {
+ .offset = PCI_PM_CTRL,
+ .size = 2,
+ .init = pm_ctrl_init,
+ .u.w.read = xen_pcibk_read_config_word,
+ .u.w.write = pm_ctrl_write,
+ },
+ {
+ .offset = PCI_PM_PPB_EXTENSIONS,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ },
+ {
+ .offset = PCI_PM_DATA_REGISTER,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ },
+ {}
+};
+
+static struct xen_pcibk_config_capability xen_pcibk_config_capability_pm = {
+ .capability = PCI_CAP_ID_PM,
+ .fields = caplist_pm,
+};
+static struct xen_pcibk_config_capability xen_pcibk_config_capability_vpd = {
+ .capability = PCI_CAP_ID_VPD,
+ .fields = caplist_vpd,
+};
+
+int xen_pcibk_config_capability_init(void)
+{
+ register_capability(&xen_pcibk_config_capability_vpd);
+ register_capability(&xen_pcibk_config_capability_pm);
+
+ return 0;
+}
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
new file mode 100644
index 00000000000..da3cbdfcb5d
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -0,0 +1,386 @@
+/*
+ * PCI Backend - Handles the virtual fields in the configuration space headers.
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include "pciback.h"
+#include "conf_space.h"
+
+struct pci_bar_info {
+ u32 val;
+ u32 len_val;
+ int which;
+};
+
+#define DRV_NAME "xen-pciback"
+#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
+#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
+
+static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
+{
+ int i;
+ int ret;
+
+ ret = xen_pcibk_read_config_word(dev, offset, value, data);
+ if (!atomic_read(&dev->enable_cnt))
+ return ret;
+
+ for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ if (dev->resource[i].flags & IORESOURCE_IO)
+ *value |= PCI_COMMAND_IO;
+ if (dev->resource[i].flags & IORESOURCE_MEM)
+ *value |= PCI_COMMAND_MEMORY;
+ }
+
+ return ret;
+}
+
+static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ int err;
+
+ dev_data = pci_get_drvdata(dev);
+ if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: enable\n",
+ pci_name(dev));
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+ if (dev_data)
+ dev_data->enable_intx = 1;
+ } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: disable\n",
+ pci_name(dev));
+ pci_disable_device(dev);
+ if (dev_data)
+ dev_data->enable_intx = 0;
+ }
+
+ if (!dev->is_busmaster && is_master_cmd(value)) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n",
+ pci_name(dev));
+ pci_set_master(dev);
+ }
+
+ if (value & PCI_COMMAND_INVALIDATE) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG
+ DRV_NAME ": %s: enable memory-write-invalidate\n",
+ pci_name(dev));
+ err = pci_set_mwi(dev);
+ if (err) {
+ printk(KERN_WARNING
+ DRV_NAME ": %s: cannot enable "
+ "memory-write-invalidate (%d)\n",
+ pci_name(dev), err);
+ value &= ~PCI_COMMAND_INVALIDATE;
+ }
+ }
+
+ return pci_write_config_word(dev, offset, value);
+}
+
+static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
+{
+ struct pci_bar_info *bar = data;
+
+ if (unlikely(!bar)) {
+ printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
+ pci_name(dev));
+ return XEN_PCI_ERR_op_failed;
+ }
+
+ /* A write to obtain the length must happen as a 32-bit write.
+ * This does not (yet) support writing individual bytes
+ */
+ if (value == ~PCI_ROM_ADDRESS_ENABLE)
+ bar->which = 1;
+ else {
+ u32 tmpval;
+ pci_read_config_dword(dev, offset, &tmpval);
+ if (tmpval != bar->val && value == bar->val) {
+ /* Allow restoration of bar value. */
+ pci_write_config_dword(dev, offset, bar->val);
+ }
+ bar->which = 0;
+ }
+
+ /* Do we need to support enabling/disabling the rom address here? */
+
+ return 0;
+}
+
+/* For the BARs, only allow writes which write ~0 or
+ * the correct resource information
+ * (Needed for when the driver probes the resource usage)
+ */
+static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
+{
+ struct pci_bar_info *bar = data;
+
+ if (unlikely(!bar)) {
+ printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
+ pci_name(dev));
+ return XEN_PCI_ERR_op_failed;
+ }
+
+ /* A write to obtain the length must happen as a 32-bit write.
+ * This does not (yet) support writing individual bytes
+ */
+ if (value == ~0)
+ bar->which = 1;
+ else {
+ u32 tmpval;
+ pci_read_config_dword(dev, offset, &tmpval);
+ if (tmpval != bar->val && value == bar->val) {
+ /* Allow restoration of bar value. */
+ pci_write_config_dword(dev, offset, bar->val);
+ }
+ bar->which = 0;
+ }
+
+ return 0;
+}
+
+static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
+{
+ struct pci_bar_info *bar = data;
+
+ if (unlikely(!bar)) {
+ printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
+ pci_name(dev));
+ return XEN_PCI_ERR_op_failed;
+ }
+
+ *value = bar->which ? bar->len_val : bar->val;
+
+ return 0;
+}
+
+static inline void read_dev_bar(struct pci_dev *dev,
+ struct pci_bar_info *bar_info, int offset,
+ u32 len_mask)
+{
+ int pos;
+ struct resource *res = dev->resource;
+
+ if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1)
+ pos = PCI_ROM_RESOURCE;
+ else {
+ pos = (offset - PCI_BASE_ADDRESS_0) / 4;
+ if (pos && ((res[pos - 1].flags & (PCI_BASE_ADDRESS_SPACE |
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
+ (PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_64))) {
+ bar_info->val = res[pos - 1].start >> 32;
+ bar_info->len_val = res[pos - 1].end >> 32;
+ return;
+ }
+ }
+
+ bar_info->val = res[pos].start |
+ (res[pos].flags & PCI_REGION_FLAG_MASK);
+ bar_info->len_val = res[pos].end - res[pos].start + 1;
+}
+
+static void *bar_init(struct pci_dev *dev, int offset)
+{
+ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+
+ if (!bar)
+ return ERR_PTR(-ENOMEM);
+
+ read_dev_bar(dev, bar, offset, ~0);
+ bar->which = 0;
+
+ return bar;
+}
+
+static void *rom_init(struct pci_dev *dev, int offset)
+{
+ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+
+ if (!bar)
+ return ERR_PTR(-ENOMEM);
+
+ read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
+ bar->which = 0;
+
+ return bar;
+}
+
+static void bar_reset(struct pci_dev *dev, int offset, void *data)
+{
+ struct pci_bar_info *bar = data;
+
+ bar->which = 0;
+}
+
+static void bar_release(struct pci_dev *dev, int offset, void *data)
+{
+ kfree(data);
+}
+
+static int xen_pcibk_read_vendor(struct pci_dev *dev, int offset,
+ u16 *value, void *data)
+{
+ *value = dev->vendor;
+
+ return 0;
+}
+
+static int xen_pcibk_read_device(struct pci_dev *dev, int offset,
+ u16 *value, void *data)
+{
+ *value = dev->device;
+
+ return 0;
+}
+
+static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
+ void *data)
+{
+ *value = (u8) dev->irq;
+
+ return 0;
+}
+
+static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
+{
+ u8 cur_value;
+ int err;
+
+ err = pci_read_config_byte(dev, offset, &cur_value);
+ if (err)
+ goto out;
+
+ if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START)
+ || value == PCI_BIST_START)
+ err = pci_write_config_byte(dev, offset, value);
+
+out:
+ return err;
+}
+
+static const struct config_field header_common[] = {
+ {
+ .offset = PCI_VENDOR_ID,
+ .size = 2,
+ .u.w.read = xen_pcibk_read_vendor,
+ },
+ {
+ .offset = PCI_DEVICE_ID,
+ .size = 2,
+ .u.w.read = xen_pcibk_read_device,
+ },
+ {
+ .offset = PCI_COMMAND,
+ .size = 2,
+ .u.w.read = command_read,
+ .u.w.write = command_write,
+ },
+ {
+ .offset = PCI_INTERRUPT_LINE,
+ .size = 1,
+ .u.b.read = interrupt_read,
+ },
+ {
+ .offset = PCI_INTERRUPT_PIN,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ },
+ {
+ /* Any side effects of letting driver domain control cache line? */
+ .offset = PCI_CACHE_LINE_SIZE,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ .u.b.write = xen_pcibk_write_config_byte,
+ },
+ {
+ .offset = PCI_LATENCY_TIMER,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ },
+ {
+ .offset = PCI_BIST,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ .u.b.write = bist_write,
+ },
+ {}
+};
+
+#define CFG_FIELD_BAR(reg_offset) \
+ { \
+ .offset = reg_offset, \
+ .size = 4, \
+ .init = bar_init, \
+ .reset = bar_reset, \
+ .release = bar_release, \
+ .u.dw.read = bar_read, \
+ .u.dw.write = bar_write, \
+ }
+
+#define CFG_FIELD_ROM(reg_offset) \
+ { \
+ .offset = reg_offset, \
+ .size = 4, \
+ .init = rom_init, \
+ .reset = bar_reset, \
+ .release = bar_release, \
+ .u.dw.read = bar_read, \
+ .u.dw.write = rom_write, \
+ }
+
+static const struct config_field header_0[] = {
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
+ CFG_FIELD_ROM(PCI_ROM_ADDRESS),
+ {}
+};
+
+static const struct config_field header_1[] = {
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
+ CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
+ {}
+};
+
+int xen_pcibk_config_header_add_fields(struct pci_dev *dev)
+{
+ int err;
+
+ err = xen_pcibk_config_add_fields(dev, header_common);
+ if (err)
+ goto out;
+
+ switch (dev->hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ err = xen_pcibk_config_add_fields(dev, header_0);
+ break;
+
+ case PCI_HEADER_TYPE_BRIDGE:
+ err = xen_pcibk_config_add_fields(dev, header_1);
+ break;
+
+ default:
+ err = -EINVAL;
+ printk(KERN_ERR DRV_NAME ": %s: Unsupported header type %d!\n",
+ pci_name(dev), dev->hdr_type);
+ break;
+ }
+
+out:
+ return err;
+}
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c
new file mode 100644
index 00000000000..921a889e65e
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space_quirks.c
@@ -0,0 +1,140 @@
+/*
+ * PCI Backend - Handle special overlays for broken devices.
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ * Author: Chris Bookholt <hap10@epoch.ncsc.mil>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include "pciback.h"
+#include "conf_space.h"
+#include "conf_space_quirks.h"
+
+LIST_HEAD(xen_pcibk_quirks);
+#define DRV_NAME "xen-pciback"
+static inline const struct pci_device_id *
+match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
+{
+ if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) &&
+ (id->device == PCI_ANY_ID || id->device == dev->device) &&
+ (id->subvendor == PCI_ANY_ID ||
+ id->subvendor == dev->subsystem_vendor) &&
+ (id->subdevice == PCI_ANY_ID ||
+ id->subdevice == dev->subsystem_device) &&
+ !((id->class ^ dev->class) & id->class_mask))
+ return id;
+ return NULL;
+}
+
+static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev)
+{
+ struct xen_pcibk_config_quirk *tmp_quirk;
+
+ list_for_each_entry(tmp_quirk, &xen_pcibk_quirks, quirks_list)
+ if (match_one_device(&tmp_quirk->devid, dev) != NULL)
+ goto out;
+ tmp_quirk = NULL;
+ printk(KERN_DEBUG DRV_NAME
+ ":quirk didn't match any device xen_pciback knows about\n");
+out:
+ return tmp_quirk;
+}
+
+static inline void register_quirk(struct xen_pcibk_config_quirk *quirk)
+{
+ list_add_tail(&quirk->quirks_list, &xen_pcibk_quirks);
+}
+
+int xen_pcibk_field_is_dup(struct pci_dev *dev, unsigned int reg)
+{
+ int ret = 0;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ struct config_field_entry *cfg_entry;
+
+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
+ if (OFFSET(cfg_entry) == reg) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
+ *field)
+{
+ int err = 0;
+
+ switch (field->size) {
+ case 1:
+ field->u.b.read = xen_pcibk_read_config_byte;
+ field->u.b.write = xen_pcibk_write_config_byte;
+ break;
+ case 2:
+ field->u.w.read = xen_pcibk_read_config_word;
+ field->u.w.write = xen_pcibk_write_config_word;
+ break;
+ case 4:
+ field->u.dw.read = xen_pcibk_read_config_dword;
+ field->u.dw.write = xen_pcibk_write_config_dword;
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ xen_pcibk_config_add_field(dev, field);
+
+out:
+ return err;
+}
+
+int xen_pcibk_config_quirks_init(struct pci_dev *dev)
+{
+ struct xen_pcibk_config_quirk *quirk;
+ int ret = 0;
+
+ quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
+ if (!quirk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ quirk->devid.vendor = dev->vendor;
+ quirk->devid.device = dev->device;
+ quirk->devid.subvendor = dev->subsystem_vendor;
+ quirk->devid.subdevice = dev->subsystem_device;
+ quirk->devid.class = 0;
+ quirk->devid.class_mask = 0;
+ quirk->devid.driver_data = 0UL;
+
+ quirk->pdev = dev;
+
+ register_quirk(quirk);
+out:
+ return ret;
+}
+
+void xen_pcibk_config_field_free(struct config_field *field)
+{
+ kfree(field);
+}
+
+int xen_pcibk_config_quirk_release(struct pci_dev *dev)
+{
+ struct xen_pcibk_config_quirk *quirk;
+ int ret = 0;
+
+ quirk = xen_pcibk_find_quirk(dev);
+ if (!quirk) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ list_del(&quirk->quirks_list);
+ kfree(quirk);
+
+out:
+ return ret;
+}
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.h b/drivers/xen/xen-pciback/conf_space_quirks.h
new file mode 100644
index 00000000000..cfcc517e457
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space_quirks.h
@@ -0,0 +1,33 @@
+/*
+ * PCI Backend - Data structures for special overlays for broken devices.
+ *
+ * Ryan Wilson <hap9@epoch.ncsc.mil>
+ * Chris Bookholt <hap10@epoch.ncsc.mil>
+ */
+
+#ifndef __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
+#define __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
+
+#include <linux/pci.h>
+#include <linux/list.h>
+
+struct xen_pcibk_config_quirk {
+ struct list_head quirks_list;
+ struct pci_device_id devid;
+ struct pci_dev *pdev;
+};
+
+int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
+ *field);
+
+int xen_pcibk_config_quirks_remove_field(struct pci_dev *dev, int reg);
+
+int xen_pcibk_config_quirks_init(struct pci_dev *dev);
+
+void xen_pcibk_config_field_free(struct config_field *field);
+
+int xen_pcibk_config_quirk_release(struct pci_dev *dev);
+
+int xen_pcibk_field_is_dup(struct pci_dev *dev, unsigned int reg);
+
+#endif
diff --git a/drivers/xen/xen-pciback/passthrough.c b/drivers/xen/xen-pciback/passthrough.c
new file mode 100644
index 00000000000..1d32a9a42c0
--- /dev/null
+++ b/drivers/xen/xen-pciback/passthrough.c
@@ -0,0 +1,194 @@
+/*
+ * PCI Backend - Provides restricted access to the real PCI bus topology
+ * to the frontend
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include "pciback.h"
+
+struct passthrough_dev_data {
+ /* Access to dev_list must be protected by lock */
+ struct list_head dev_list;
+ spinlock_t lock;
+};
+
+static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
+ unsigned int domain,
+ unsigned int bus,
+ unsigned int devfn)
+{
+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
+ struct pci_dev_entry *dev_entry;
+ struct pci_dev *dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_data->lock, flags);
+
+ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
+ if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
+ && bus == (unsigned int)dev_entry->dev->bus->number
+ && devfn == dev_entry->dev->devfn) {
+ dev = dev_entry->dev;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&dev_data->lock, flags);
+
+ return dev;
+}
+
+static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev,
+ int devid, publish_pci_dev_cb publish_cb)
+{
+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
+ struct pci_dev_entry *dev_entry;
+ unsigned long flags;
+ unsigned int domain, bus, devfn;
+ int err;
+
+ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
+ if (!dev_entry)
+ return -ENOMEM;
+ dev_entry->dev = dev;
+
+ spin_lock_irqsave(&dev_data->lock, flags);
+ list_add_tail(&dev_entry->list, &dev_data->dev_list);
+ spin_unlock_irqrestore(&dev_data->lock, flags);
+
+ /* Publish this device. */
+ domain = (unsigned int)pci_domain_nr(dev->bus);
+ bus = (unsigned int)dev->bus->number;
+ devfn = dev->devfn;
+ err = publish_cb(pdev, domain, bus, devfn, devid);
+
+ return err;
+}
+
+static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev)
+{
+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
+ struct pci_dev_entry *dev_entry, *t;
+ struct pci_dev *found_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_data->lock, flags);
+
+ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
+ if (dev_entry->dev == dev) {
+ list_del(&dev_entry->list);
+ found_dev = dev_entry->dev;
+ kfree(dev_entry);
+ }
+ }
+
+ spin_unlock_irqrestore(&dev_data->lock, flags);
+
+ if (found_dev)
+ pcistub_put_pci_dev(found_dev);
+}
+
+static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
+{
+ struct passthrough_dev_data *dev_data;
+
+ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ spin_lock_init(&dev_data->lock);
+
+ INIT_LIST_HEAD(&dev_data->dev_list);
+
+ pdev->pci_dev_data = dev_data;
+
+ return 0;
+}
+
+static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
+ publish_pci_root_cb publish_root_cb)
+{
+ int err = 0;
+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
+ struct pci_dev_entry *dev_entry, *e, *tmp;
+ struct pci_dev *dev;
+ int found;
+ unsigned int domain, bus;
+
+ spin_lock(&dev_data->lock);
+
+ list_for_each_entry_safe(dev_entry, tmp, &dev_data->dev_list, list) {
+ /* Only publish this device as a root if none of its
+ * parent bridges are exported
+ */
+ found = 0;
+ dev = dev_entry->dev->bus->self;
+ for (; !found && dev != NULL; dev = dev->bus->self) {
+ list_for_each_entry(e, &dev_data->dev_list, list) {
+ if (dev == e->dev) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
+ bus = (unsigned int)dev_entry->dev->bus->number;
+
+ if (!found) {
+ spin_unlock(&dev_data->lock);
+ err = publish_root_cb(pdev, domain, bus);
+ if (err)
+ break;
+ spin_lock(&dev_data->lock);
+ }
+ }
+
+ if (!err)
+ spin_unlock(&dev_data->lock);
+
+ return err;
+}
+
+static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
+{
+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
+ struct pci_dev_entry *dev_entry, *t;
+
+ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
+ list_del(&dev_entry->list);
+ pcistub_put_pci_dev(dev_entry->dev);
+ kfree(dev_entry);
+ }
+
+ kfree(dev_data);
+ pdev->pci_dev_data = NULL;
+}
+
+static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
+ struct xen_pcibk_device *pdev,
+ unsigned int *domain, unsigned int *bus,
+ unsigned int *devfn)
+{
+ *domain = pci_domain_nr(pcidev->bus);
+ *bus = pcidev->bus->number;
+ *devfn = pcidev->devfn;
+ return 1;
+}
+
+struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
+ .name = "passthrough",
+ .init = __xen_pcibk_init_devices,
+ .free = __xen_pcibk_release_devices,
+ .find = __xen_pcibk_get_pcifront_dev,
+ .publish = __xen_pcibk_publish_pci_roots,
+ .release = __xen_pcibk_release_pci_dev,
+ .add = __xen_pcibk_add_pci_dev,
+ .get = __xen_pcibk_get_pci_dev,
+};
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
new file mode 100644
index 00000000000..aec214ac0a1
--- /dev/null
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -0,0 +1,1376 @@
+/*
+ * PCI Stub Driver - Grabs devices in backend to be exported later
+ *
+ * Ryan Wilson <hap9@epoch.ncsc.mil>
+ * Chris Bookholt <hap10@epoch.ncsc.mil>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rwsem.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <xen/events.h>
+#include <asm/xen/pci.h>
+#include <asm/xen/hypervisor.h>
+#include "pciback.h"
+#include "conf_space.h"
+#include "conf_space_quirks.h"
+
+#define DRV_NAME "xen-pciback"
+
+static char *pci_devs_to_hide;
+wait_queue_head_t xen_pcibk_aer_wait_queue;
+/*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
+* We want to avoid in middle of AER ops, xen_pcibk devices is being removed
+*/
+static DECLARE_RWSEM(pcistub_sem);
+module_param_named(hide, pci_devs_to_hide, charp, 0444);
+
+struct pcistub_device_id {
+ struct list_head slot_list;
+ int domain;
+ unsigned char bus;
+ unsigned int devfn;
+};
+static LIST_HEAD(pcistub_device_ids);
+static DEFINE_SPINLOCK(device_ids_lock);
+
+struct pcistub_device {
+ struct kref kref;
+ struct list_head dev_list;
+ spinlock_t lock;
+
+ struct pci_dev *dev;
+ struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
+};
+
+/* Access to pcistub_devices & seized_devices lists and the initialize_devices
+ * flag must be locked with pcistub_devices_lock
+ */
+static DEFINE_SPINLOCK(pcistub_devices_lock);
+static LIST_HEAD(pcistub_devices);
+
+/* wait for device_initcall before initializing our devices
+ * (see pcistub_init_devices_late)
+ */
+static int initialize_devices;
+static LIST_HEAD(seized_devices);
+
+static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+
+ dev_dbg(&dev->dev, "pcistub_device_alloc\n");
+
+ psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
+ if (!psdev)
+ return NULL;
+
+ psdev->dev = pci_dev_get(dev);
+ if (!psdev->dev) {
+ kfree(psdev);
+ return NULL;
+ }
+
+ kref_init(&psdev->kref);
+ spin_lock_init(&psdev->lock);
+
+ return psdev;
+}
+
+/* Don't call this directly as it's called by pcistub_device_put */
+static void pcistub_device_release(struct kref *kref)
+{
+ struct pcistub_device *psdev;
+
+ psdev = container_of(kref, struct pcistub_device, kref);
+
+ dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
+
+ xen_unregister_device_domain_owner(psdev->dev);
+
+ /* Clean-up the device */
+ xen_pcibk_reset_device(psdev->dev);
+ xen_pcibk_config_free_dyn_fields(psdev->dev);
+ xen_pcibk_config_free_dev(psdev->dev);
+ kfree(pci_get_drvdata(psdev->dev));
+ pci_set_drvdata(psdev->dev, NULL);
+
+ pci_dev_put(psdev->dev);
+
+ kfree(psdev);
+}
+
+static inline void pcistub_device_get(struct pcistub_device *psdev)
+{
+ kref_get(&psdev->kref);
+}
+
+static inline void pcistub_device_put(struct pcistub_device *psdev)
+{
+ kref_put(&psdev->kref, pcistub_device_release);
+}
+
+static struct pcistub_device *pcistub_device_find(int domain, int bus,
+ int slot, int func)
+{
+ struct pcistub_device *psdev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (psdev->dev != NULL
+ && domain == pci_domain_nr(psdev->dev->bus)
+ && bus == psdev->dev->bus->number
+ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
+ pcistub_device_get(psdev);
+ goto out;
+ }
+ }
+
+ /* didn't find it */
+ psdev = NULL;
+
+out:
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ return psdev;
+}
+
+static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
+ struct pcistub_device *psdev)
+{
+ struct pci_dev *pci_dev = NULL;
+ unsigned long flags;
+
+ pcistub_device_get(psdev);
+
+ spin_lock_irqsave(&psdev->lock, flags);
+ if (!psdev->pdev) {
+ psdev->pdev = pdev;
+ pci_dev = psdev->dev;
+ }
+ spin_unlock_irqrestore(&psdev->lock, flags);
+
+ if (!pci_dev)
+ pcistub_device_put(psdev);
+
+ return pci_dev;
+}
+
+struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
+ int domain, int bus,
+ int slot, int func)
+{
+ struct pcistub_device *psdev;
+ struct pci_dev *found_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (psdev->dev != NULL
+ && domain == pci_domain_nr(psdev->dev->bus)
+ && bus == psdev->dev->bus->number
+ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
+ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ return found_dev;
+}
+
+struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+ struct pci_dev *found_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (psdev->dev == dev) {
+ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ return found_dev;
+}
+
+void pcistub_put_pci_dev(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev, *found_psdev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (psdev->dev == dev) {
+ found_psdev = psdev;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ /*hold this lock for avoiding breaking link between
+ * pcistub and xen_pcibk when AER is in processing
+ */
+ down_write(&pcistub_sem);
+ /* Cleanup our device
+ * (so it's ready for the next domain)
+ */
+ xen_pcibk_reset_device(found_psdev->dev);
+ xen_pcibk_config_free_dyn_fields(found_psdev->dev);
+ xen_pcibk_config_reset_dev(found_psdev->dev);
+
+ spin_lock_irqsave(&found_psdev->lock, flags);
+ found_psdev->pdev = NULL;
+ spin_unlock_irqrestore(&found_psdev->lock, flags);
+
+ pcistub_device_put(found_psdev);
+ up_write(&pcistub_sem);
+}
+
+static int __devinit pcistub_match_one(struct pci_dev *dev,
+ struct pcistub_device_id *pdev_id)
+{
+ /* Match the specified device by domain, bus, slot, func and also if
+ * any of the device's parent bridges match.
+ */
+ for (; dev != NULL; dev = dev->bus->self) {
+ if (pci_domain_nr(dev->bus) == pdev_id->domain
+ && dev->bus->number == pdev_id->bus
+ && dev->devfn == pdev_id->devfn)
+ return 1;
+
+ /* Sometimes topmost bridge links to itself. */
+ if (dev == dev->bus->self)
+ break;
+ }
+
+ return 0;
+}
+
+static int __devinit pcistub_match(struct pci_dev *dev)
+{
+ struct pcistub_device_id *pdev_id;
+ unsigned long flags;
+ int found = 0;
+
+ spin_lock_irqsave(&device_ids_lock, flags);
+ list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
+ if (pcistub_match_one(dev, pdev_id)) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&device_ids_lock, flags);
+
+ return found;
+}
+
+static int __devinit pcistub_init_device(struct pci_dev *dev)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ int err = 0;
+
+ dev_dbg(&dev->dev, "initializing...\n");
+
+ /* The PCI backend is not intended to be a module (or to work with
+ * removable PCI devices (yet). If it were, xen_pcibk_config_free()
+ * would need to be called somewhere to free the memory allocated
+ * here and then to call kfree(pci_get_drvdata(psdev->dev)).
+ */
+ dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
+ + strlen(pci_name(dev)) + 1, GFP_ATOMIC);
+ if (!dev_data) {
+ err = -ENOMEM;
+ goto out;
+ }
+ pci_set_drvdata(dev, dev_data);
+
+ /*
+ * Setup name for fake IRQ handler. It will only be enabled
+ * once the device is turned on by the guest.
+ */
+ sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev));
+
+ dev_dbg(&dev->dev, "initializing config\n");
+
+ init_waitqueue_head(&xen_pcibk_aer_wait_queue);
+ err = xen_pcibk_config_init_dev(dev);
+ if (err)
+ goto out;
+
+ /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
+ * must do this here because pcibios_enable_device may specify
+ * the pci device's true irq (and possibly its other resources)
+ * if they differ from what's in the configuration space.
+ * This makes the assumption that the device's resources won't
+ * change after this point (otherwise this code may break!)
+ */
+ dev_dbg(&dev->dev, "enabling device\n");
+ err = pci_enable_device(dev);
+ if (err)
+ goto config_release;
+
+ /* Now disable the device (this also ensures some private device
+ * data is setup before we export)
+ */
+ dev_dbg(&dev->dev, "reset device\n");
+ xen_pcibk_reset_device(dev);
+
+ return 0;
+
+config_release:
+ xen_pcibk_config_free_dev(dev);
+
+out:
+ pci_set_drvdata(dev, NULL);
+ kfree(dev_data);
+ return err;
+}
+
+/*
+ * Because some initialization still happens on
+ * devices during fs_initcall, we need to defer
+ * full initialization of our devices until
+ * device_initcall.
+ */
+static int __init pcistub_init_devices_late(void)
+{
+ struct pcistub_device *psdev;
+ unsigned long flags;
+ int err = 0;
+
+ pr_debug(DRV_NAME ": pcistub_init_devices_late\n");
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ while (!list_empty(&seized_devices)) {
+ psdev = container_of(seized_devices.next,
+ struct pcistub_device, dev_list);
+ list_del(&psdev->dev_list);
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ err = pcistub_init_device(psdev->dev);
+ if (err) {
+ dev_err(&psdev->dev->dev,
+ "error %d initializing device\n", err);
+ kfree(psdev);
+ psdev = NULL;
+ }
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ if (psdev)
+ list_add_tail(&psdev->dev_list, &pcistub_devices);
+ }
+
+ initialize_devices = 1;
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ return 0;
+}
+
+static int __devinit pcistub_seize(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+ unsigned long flags;
+ int err = 0;
+
+ psdev = pcistub_device_alloc(dev);
+ if (!psdev)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ if (initialize_devices) {
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ /* don't want irqs disabled when calling pcistub_init_device */
+ err = pcistub_init_device(psdev->dev);
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ if (!err)
+ list_add(&psdev->dev_list, &pcistub_devices);
+ } else {
+ dev_dbg(&dev->dev, "deferring initialization\n");
+ list_add(&psdev->dev_list, &seized_devices);
+ }
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ if (err)
+ pcistub_device_put(psdev);
+
+ return err;
+}
+
+static int __devinit pcistub_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int err = 0;
+
+ dev_dbg(&dev->dev, "probing...\n");
+
+ if (pcistub_match(dev)) {
+
+ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
+ && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+ dev_err(&dev->dev, "can't export pci devices that "
+ "don't have a normal (0) or bridge (1) "
+ "header type!\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&dev->dev, "seizing device\n");
+ err = pcistub_seize(dev);
+ } else
+ /* Didn't find the device */
+ err = -ENODEV;
+
+out:
+ return err;
+}
+
+static void pcistub_remove(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev, *found_psdev = NULL;
+ unsigned long flags;
+
+ dev_dbg(&dev->dev, "removing\n");
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ xen_pcibk_config_quirk_release(dev);
+
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (psdev->dev == dev) {
+ found_psdev = psdev;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ if (found_psdev) {
+ dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
+ found_psdev->pdev);
+
+ if (found_psdev->pdev) {
+ printk(KERN_WARNING DRV_NAME ": ****** removing device "
+ "%s while still in-use! ******\n",
+ pci_name(found_psdev->dev));
+ printk(KERN_WARNING DRV_NAME ": ****** driver domain may"
+ " still access this device's i/o resources!\n");
+ printk(KERN_WARNING DRV_NAME ": ****** shutdown driver "
+ "domain before binding device\n");
+ printk(KERN_WARNING DRV_NAME ": ****** to other drivers "
+ "or domains\n");
+
+ xen_pcibk_release_pci_dev(found_psdev->pdev,
+ found_psdev->dev);
+ }
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+ list_del(&found_psdev->dev_list);
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ /* the final put for releasing from the list */
+ pcistub_device_put(found_psdev);
+ }
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pcistub_ids) = {
+ {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {0,},
+};
+
+#define PCI_NODENAME_MAX 40
+static void kill_domain_by_device(struct pcistub_device *psdev)
+{
+ struct xenbus_transaction xbt;
+ int err;
+ char nodename[PCI_NODENAME_MAX];
+
+ if (!psdev)
+ dev_err(&psdev->dev->dev,
+ "device is NULL when do AER recovery/kill_domain\n");
+ snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
+ psdev->pdev->xdev->otherend_id);
+ nodename[strlen(nodename)] = '\0';
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ dev_err(&psdev->dev->dev,
+ "error %d when start xenbus transaction\n", err);
+ return;
+ }
+ /*PV AER handlers will set this flag*/
+ xenbus_printf(xbt, nodename, "aerState" , "aerfail");
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ dev_err(&psdev->dev->dev,
+ "error %d when end xenbus transaction\n", err);
+ return;
+ }
+}
+
+/* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
+ * backend need to have cooperation. In xen_pcibk, those steps will do similar
+ * jobs: send service request and waiting for front_end response.
+*/
+static pci_ers_result_t common_process(struct pcistub_device *psdev,
+ pci_channel_state_t state, int aer_cmd,
+ pci_ers_result_t result)
+{
+ pci_ers_result_t res = result;
+ struct xen_pcie_aer_op *aer_op;
+ int ret;
+
+ /*with PV AER drivers*/
+ aer_op = &(psdev->pdev->sh_info->aer_op);
+ aer_op->cmd = aer_cmd ;
+ /*useful for error_detected callback*/
+ aer_op->err = state;
+ /*pcifront_end BDF*/
+ ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
+ &aer_op->domain, &aer_op->bus, &aer_op->devfn);
+ if (!ret) {
+ dev_err(&psdev->dev->dev,
+ DRV_NAME ": failed to get pcifront device\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+ wmb();
+
+ dev_dbg(&psdev->dev->dev,
+ DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n",
+ aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
+ /*local flag to mark there's aer request, xen_pcibk callback will use
+ * this flag to judge whether we need to check pci-front give aer
+ * service ack signal
+ */
+ set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
+
+ /*It is possible that a pcifront conf_read_write ops request invokes
+ * the callback which cause the spurious execution of wake_up.
+ * Yet it is harmless and better than a spinlock here
+ */
+ set_bit(_XEN_PCIB_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags);
+ wmb();
+ notify_remote_via_irq(psdev->pdev->evtchn_irq);
+
+ ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
+ !(test_bit(_XEN_PCIB_active, (unsigned long *)
+ &psdev->pdev->sh_info->flags)), 300*HZ);
+
+ if (!ret) {
+ if (test_bit(_XEN_PCIB_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_err(&psdev->dev->dev,
+ "pcifront aer process not responding!\n");
+ clear_bit(_XEN_PCIB_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags);
+ aer_op->err = PCI_ERS_RESULT_NONE;
+ return res;
+ }
+ }
+ clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
+
+ if (test_bit(_XEN_PCIF_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_dbg(&psdev->dev->dev,
+ "schedule pci_conf service in xen_pcibk\n");
+ xen_pcibk_test_and_schedule_op(psdev->pdev);
+ }
+
+ res = (pci_ers_result_t)aer_op->err;
+ return res;
+}
+
+/*
+* xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case
+* of the device driver could provide this service, and then wait for pcifront
+* ack.
+* @dev: pointer to PCI devices
+* return value is used by aer_core do_recovery policy
+*/
+static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+ pci_ers_result_t result;
+
+ result = PCI_ERS_RESULT_RECOVERED;
+ dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+
+ if (!psdev || !psdev->pdev) {
+ dev_err(&dev->dev,
+ DRV_NAME " device is not found/assigned\n");
+ goto end;
+ }
+
+ if (!psdev->pdev->sh_info) {
+ dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ " by HVM, kill it\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+
+ if (!test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_err(&dev->dev,
+ "guest with no AER driver should have been killed\n");
+ goto release;
+ }
+ result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
+
+ if (result == PCI_ERS_RESULT_NONE ||
+ result == PCI_ERS_RESULT_DISCONNECT) {
+ dev_dbg(&dev->dev,
+ "No AER slot_reset service or disconnected!\n");
+ kill_domain_by_device(psdev);
+ }
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return result;
+
+}
+
+
+/*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront
+* in case of the device driver could provide this service, and then wait
+* for pcifront ack
+* @dev: pointer to PCI devices
+* return value is used by aer_core do_recovery policy
+*/
+
+static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+ pci_ers_result_t result;
+
+ result = PCI_ERS_RESULT_RECOVERED;
+ dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+
+ if (!psdev || !psdev->pdev) {
+ dev_err(&dev->dev,
+ DRV_NAME " device is not found/assigned\n");
+ goto end;
+ }
+
+ if (!psdev->pdev->sh_info) {
+ dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ " by HVM, kill it\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+
+ if (!test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_err(&dev->dev,
+ "guest with no AER driver should have been killed\n");
+ goto release;
+ }
+ result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
+
+ if (result == PCI_ERS_RESULT_NONE ||
+ result == PCI_ERS_RESULT_DISCONNECT) {
+ dev_dbg(&dev->dev,
+ "No AER mmio_enabled service or disconnected!\n");
+ kill_domain_by_device(psdev);
+ }
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return result;
+}
+
+/*xen_pcibk_error_detected: it will send the error_detected request to pcifront
+* in case of the device driver could provide this service, and then wait
+* for pcifront ack.
+* @dev: pointer to PCI devices
+* @error: the current PCI connection state
+* return value is used by aer_core do_recovery policy
+*/
+
+static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
+ pci_channel_state_t error)
+{
+ struct pcistub_device *psdev;
+ pci_ers_result_t result;
+
+ result = PCI_ERS_RESULT_CAN_RECOVER;
+ dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+
+ if (!psdev || !psdev->pdev) {
+ dev_err(&dev->dev,
+ DRV_NAME " device is not found/assigned\n");
+ goto end;
+ }
+
+ if (!psdev->pdev->sh_info) {
+ dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ " by HVM, kill it\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+
+ /*Guest owns the device yet no aer handler regiested, kill guest*/
+ if (!test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+ result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
+
+ if (result == PCI_ERS_RESULT_NONE ||
+ result == PCI_ERS_RESULT_DISCONNECT) {
+ dev_dbg(&dev->dev,
+ "No AER error_detected service or disconnected!\n");
+ kill_domain_by_device(psdev);
+ }
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return result;
+}
+
+/*xen_pcibk_error_resume: it will send the error_resume request to pcifront
+* in case of the device driver could provide this service, and then wait
+* for pcifront ack.
+* @dev: pointer to PCI devices
+*/
+
+static void xen_pcibk_error_resume(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+
+ dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+
+ if (!psdev || !psdev->pdev) {
+ dev_err(&dev->dev,
+ DRV_NAME " device is not found/assigned\n");
+ goto end;
+ }
+
+ if (!psdev->pdev->sh_info) {
+ dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ " by HVM, kill it\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+
+ if (!test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_err(&dev->dev,
+ "guest with no AER driver should have been killed\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+ common_process(psdev, 1, XEN_PCI_OP_aer_resume,
+ PCI_ERS_RESULT_RECOVERED);
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return;
+}
+
+/*add xen_pcibk AER handling*/
+static struct pci_error_handlers xen_pcibk_error_handler = {
+ .error_detected = xen_pcibk_error_detected,
+ .mmio_enabled = xen_pcibk_mmio_enabled,
+ .slot_reset = xen_pcibk_slot_reset,
+ .resume = xen_pcibk_error_resume,
+};
+
+/*
+ * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
+ * for a normal device. I don't want it to be loaded automatically.
+ */
+
+static struct pci_driver xen_pcibk_pci_driver = {
+ /* The name should be xen_pciback, but until the tools are updated
+ * we will keep it as pciback. */
+ .name = "pciback",
+ .id_table = pcistub_ids,
+ .probe = pcistub_probe,
+ .remove = pcistub_remove,
+ .err_handler = &xen_pcibk_error_handler,
+};
+
+static inline int str_to_slot(const char *buf, int *domain, int *bus,
+ int *slot, int *func)
+{
+ int err;
+
+ err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
+ if (err == 4)
+ return 0;
+ else if (err < 0)
+ return -EINVAL;
+
+ /* try again without domain */
+ *domain = 0;
+ err = sscanf(buf, " %x:%x.%x", bus, slot, func);
+ if (err == 3)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
+ *slot, int *func, int *reg, int *size, int *mask)
+{
+ int err;
+
+ err =
+ sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot,
+ func, reg, size, mask);
+ if (err == 7)
+ return 0;
+ return -EINVAL;
+}
+
+static int pcistub_device_id_add(int domain, int bus, int slot, int func)
+{
+ struct pcistub_device_id *pci_dev_id;
+ unsigned long flags;
+
+ pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
+ if (!pci_dev_id)
+ return -ENOMEM;
+
+ pci_dev_id->domain = domain;
+ pci_dev_id->bus = bus;
+ pci_dev_id->devfn = PCI_DEVFN(slot, func);
+
+ pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%01x\n",
+ domain, bus, slot, func);
+
+ spin_lock_irqsave(&device_ids_lock, flags);
+ list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
+ spin_unlock_irqrestore(&device_ids_lock, flags);
+
+ return 0;
+}
+
+static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
+{
+ struct pcistub_device_id *pci_dev_id, *t;
+ int devfn = PCI_DEVFN(slot, func);
+ int err = -ENOENT;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device_ids_lock, flags);
+ list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
+ slot_list) {
+ if (pci_dev_id->domain == domain
+ && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) {
+ /* Don't break; here because it's possible the same
+ * slot could be in the list more than once
+ */
+ list_del(&pci_dev_id->slot_list);
+ kfree(pci_dev_id);
+
+ err = 0;
+
+ pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%01x from "
+ "seize list\n", domain, bus, slot, func);
+ }
+ }
+ spin_unlock_irqrestore(&device_ids_lock, flags);
+
+ return err;
+}
+
+static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
+ int size, int mask)
+{
+ int err = 0;
+ struct pcistub_device *psdev;
+ struct pci_dev *dev;
+ struct config_field *field;
+
+ psdev = pcistub_device_find(domain, bus, slot, func);
+ if (!psdev || !psdev->dev) {
+ err = -ENODEV;
+ goto out;
+ }
+ dev = psdev->dev;
+
+ field = kzalloc(sizeof(*field), GFP_ATOMIC);
+ if (!field) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ field->offset = reg;
+ field->size = size;
+ field->mask = mask;
+ field->init = NULL;
+ field->reset = NULL;
+ field->release = NULL;
+ field->clean = xen_pcibk_config_field_free;
+
+ err = xen_pcibk_config_quirks_add_field(dev, field);
+ if (err)
+ kfree(field);
+out:
+ return err;
+}
+
+static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int domain, bus, slot, func;
+ int err;
+
+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
+ if (err)
+ goto out;
+
+ err = pcistub_device_id_add(domain, bus, slot, func);
+
+out:
+ if (!err)
+ err = count;
+ return err;
+}
+
+DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
+
+static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int domain, bus, slot, func;
+ int err;
+
+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
+ if (err)
+ goto out;
+
+ err = pcistub_device_id_remove(domain, bus, slot, func);
+
+out:
+ if (!err)
+ err = count;
+ return err;
+}
+
+DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
+
+static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
+{
+ struct pcistub_device_id *pci_dev_id;
+ size_t count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device_ids_lock, flags);
+ list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
+ if (count >= PAGE_SIZE)
+ break;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "%04x:%02x:%02x.%01x\n",
+ pci_dev_id->domain, pci_dev_id->bus,
+ PCI_SLOT(pci_dev_id->devfn),
+ PCI_FUNC(pci_dev_id->devfn));
+ }
+ spin_unlock_irqrestore(&device_ids_lock, flags);
+
+ return count;
+}
+
+DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
+
+static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
+{
+ struct pcistub_device *psdev;
+ struct xen_pcibk_dev_data *dev_data;
+ size_t count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (count >= PAGE_SIZE)
+ break;
+ if (!psdev->dev)
+ continue;
+ dev_data = pci_get_drvdata(psdev->dev);
+ if (!dev_data)
+ continue;
+ count +=
+ scnprintf(buf + count, PAGE_SIZE - count,
+ "%s:%s:%sing:%ld\n",
+ pci_name(psdev->dev),
+ dev_data->isr_on ? "on" : "off",
+ dev_data->ack_intr ? "ack" : "not ack",
+ dev_data->handled);
+ }
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ return count;
+}
+
+DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
+
+static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
+ const char *buf,
+ size_t count)
+{
+ struct pcistub_device *psdev;
+ struct xen_pcibk_dev_data *dev_data;
+ int domain, bus, slot, func;
+ int err = -ENOENT;
+
+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
+ if (err)
+ goto out;
+
+ psdev = pcistub_device_find(domain, bus, slot, func);
+
+ if (!psdev)
+ goto out;
+
+ dev_data = pci_get_drvdata(psdev->dev);
+ if (!dev_data)
+ goto out;
+
+ dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n",
+ dev_data->irq_name, dev_data->isr_on,
+ !dev_data->isr_on);
+
+ dev_data->isr_on = !(dev_data->isr_on);
+ if (dev_data->isr_on)
+ dev_data->ack_intr = 1;
+out:
+ if (!err)
+ err = count;
+ return err;
+}
+DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL, pcistub_irq_handler_switch);
+
+static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int domain, bus, slot, func, reg, size, mask;
+ int err;
+
+ err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
+ &mask);
+ if (err)
+ goto out;
+
+ err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
+
+out:
+ if (!err)
+ err = count;
+ return err;
+}
+
+static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
+{
+ int count = 0;
+ unsigned long flags;
+ struct xen_pcibk_config_quirk *quirk;
+ struct xen_pcibk_dev_data *dev_data;
+ const struct config_field *field;
+ const struct config_field_entry *cfg_entry;
+
+ spin_lock_irqsave(&device_ids_lock, flags);
+ list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) {
+ if (count >= PAGE_SIZE)
+ goto out;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
+ quirk->pdev->bus->number,
+ PCI_SLOT(quirk->pdev->devfn),
+ PCI_FUNC(quirk->pdev->devfn),
+ quirk->devid.vendor, quirk->devid.device,
+ quirk->devid.subvendor,
+ quirk->devid.subdevice);
+
+ dev_data = pci_get_drvdata(quirk->pdev);
+
+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
+ field = cfg_entry->field;
+ if (count >= PAGE_SIZE)
+ goto out;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "\t\t%08x:%01x:%08x\n",
+ cfg_entry->base_offset +
+ field->offset, field->size,
+ field->mask);
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&device_ids_lock, flags);
+
+ return count;
+}
+
+DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
+
+static ssize_t permissive_add(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int domain, bus, slot, func;
+ int err;
+ struct pcistub_device *psdev;
+ struct xen_pcibk_dev_data *dev_data;
+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
+ if (err)
+ goto out;
+ psdev = pcistub_device_find(domain, bus, slot, func);
+ if (!psdev) {
+ err = -ENODEV;
+ goto out;
+ }
+ if (!psdev->dev) {
+ err = -ENODEV;
+ goto release;
+ }
+ dev_data = pci_get_drvdata(psdev->dev);
+ /* the driver data for a device should never be null at this point */
+ if (!dev_data) {
+ err = -ENXIO;
+ goto release;
+ }
+ if (!dev_data->permissive) {
+ dev_data->permissive = 1;
+ /* Let user know that what they're doing could be unsafe */
+ dev_warn(&psdev->dev->dev, "enabling permissive mode "
+ "configuration space accesses!\n");
+ dev_warn(&psdev->dev->dev,
+ "permissive mode is potentially unsafe!\n");
+ }
+release:
+ pcistub_device_put(psdev);
+out:
+ if (!err)
+ err = count;
+ return err;
+}
+
+static ssize_t permissive_show(struct device_driver *drv, char *buf)
+{
+ struct pcistub_device *psdev;
+ struct xen_pcibk_dev_data *dev_data;
+ size_t count = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (count >= PAGE_SIZE)
+ break;
+ if (!psdev->dev)
+ continue;
+ dev_data = pci_get_drvdata(psdev->dev);
+ if (!dev_data || !dev_data->permissive)
+ continue;
+ count +=
+ scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
+ pci_name(psdev->dev));
+ }
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ return count;
+}
+
+DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
+
+static void pcistub_exit(void)
+{
+ driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
+ driver_remove_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_remove_slot);
+ driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots);
+ driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
+ driver_remove_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_permissive);
+ driver_remove_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_irq_handlers);
+ driver_remove_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_irq_handler_state);
+ pci_unregister_driver(&xen_pcibk_pci_driver);
+}
+
+static int __init pcistub_init(void)
+{
+ int pos = 0;
+ int err = 0;
+ int domain, bus, slot, func;
+ int parsed;
+
+ if (pci_devs_to_hide && *pci_devs_to_hide) {
+ do {
+ parsed = 0;
+
+ err = sscanf(pci_devs_to_hide + pos,
+ " (%x:%x:%x.%x) %n",
+ &domain, &bus, &slot, &func, &parsed);
+ if (err != 4) {
+ domain = 0;
+ err = sscanf(pci_devs_to_hide + pos,
+ " (%x:%x.%x) %n",
+ &bus, &slot, &func, &parsed);
+ if (err != 3)
+ goto parse_error;
+ }
+
+ err = pcistub_device_id_add(domain, bus, slot, func);
+ if (err)
+ goto out;
+
+ /* if parsed<=0, we've reached the end of the string */
+ pos += parsed;
+ } while (parsed > 0 && pci_devs_to_hide[pos]);
+ }
+
+ /* If we're the first PCI Device Driver to register, we're the
+ * first one to get offered PCI devices as they become
+ * available (and thus we can be the first to grab them)
+ */
+ err = pci_register_driver(&xen_pcibk_pci_driver);
+ if (err < 0)
+ goto out;
+
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_new_slot);
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_remove_slot);
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_slots);
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_quirks);
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_permissive);
+
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_irq_handlers);
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_irq_handler_state);
+ if (err)
+ pcistub_exit();
+
+out:
+ return err;
+
+parse_error:
+ printk(KERN_ERR DRV_NAME ": Error parsing pci_devs_to_hide at \"%s\"\n",
+ pci_devs_to_hide + pos);
+ return -EINVAL;
+}
+
+#ifndef MODULE
+/*
+ * fs_initcall happens before device_initcall
+ * so xen_pcibk *should* get called first (b/c we
+ * want to suck up any device before other drivers
+ * get a chance by being the first pci device
+ * driver to register)
+ */
+fs_initcall(pcistub_init);
+#endif
+
+static int __init xen_pcibk_init(void)
+{
+ int err;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ err = xen_pcibk_config_init();
+ if (err)
+ return err;
+
+#ifdef MODULE
+ err = pcistub_init();
+ if (err < 0)
+ return err;
+#endif
+
+ pcistub_init_devices_late();
+ err = xen_pcibk_xenbus_register();
+ if (err)
+ pcistub_exit();
+
+ return err;
+}
+
+static void __exit xen_pcibk_cleanup(void)
+{
+ xen_pcibk_xenbus_unregister();
+ pcistub_exit();
+}
+
+module_init(xen_pcibk_init);
+module_exit(xen_pcibk_cleanup);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
new file mode 100644
index 00000000000..a0e131a8150
--- /dev/null
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -0,0 +1,183 @@
+/*
+ * PCI Backend Common Data Structures & Function Declarations
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+#ifndef __XEN_PCIBACK_H__
+#define __XEN_PCIBACK_H__
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <xen/xenbus.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <xen/interface/io/pciif.h>
+
+struct pci_dev_entry {
+ struct list_head list;
+ struct pci_dev *dev;
+};
+
+#define _PDEVF_op_active (0)
+#define PDEVF_op_active (1<<(_PDEVF_op_active))
+#define _PCIB_op_pending (1)
+#define PCIB_op_pending (1<<(_PCIB_op_pending))
+
+struct xen_pcibk_device {
+ void *pci_dev_data;
+ spinlock_t dev_lock;
+ struct xenbus_device *xdev;
+ struct xenbus_watch be_watch;
+ u8 be_watching;
+ int evtchn_irq;
+ struct xen_pci_sharedinfo *sh_info;
+ unsigned long flags;
+ struct work_struct op_work;
+};
+
+struct xen_pcibk_dev_data {
+ struct list_head config_fields;
+ unsigned int permissive:1;
+ unsigned int warned_on_write:1;
+ unsigned int enable_intx:1;
+ unsigned int isr_on:1; /* Whether the IRQ handler is installed. */
+ unsigned int ack_intr:1; /* .. and ACK-ing */
+ unsigned long handled;
+ unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
+ char irq_name[0]; /* xen-pcibk[000:04:00.0] */
+};
+
+/* Used by XenBus and xen_pcibk_ops.c */
+extern wait_queue_head_t xen_pcibk_aer_wait_queue;
+extern struct workqueue_struct *xen_pcibk_wq;
+/* Used by pcistub.c and conf_space_quirks.c */
+extern struct list_head xen_pcibk_quirks;
+
+/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
+struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
+ int domain, int bus,
+ int slot, int func);
+struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev);
+void pcistub_put_pci_dev(struct pci_dev *dev);
+
+/* Ensure a device is turned off or reset */
+void xen_pcibk_reset_device(struct pci_dev *pdev);
+
+/* Access a virtual configuration space for a PCI device */
+int xen_pcibk_config_init(void);
+int xen_pcibk_config_init_dev(struct pci_dev *dev);
+void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev);
+void xen_pcibk_config_reset_dev(struct pci_dev *dev);
+void xen_pcibk_config_free_dev(struct pci_dev *dev);
+int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
+ u32 *ret_val);
+int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size,
+ u32 value);
+
+/* Handle requests for specific devices from the frontend */
+typedef int (*publish_pci_dev_cb) (struct xen_pcibk_device *pdev,
+ unsigned int domain, unsigned int bus,
+ unsigned int devfn, unsigned int devid);
+typedef int (*publish_pci_root_cb) (struct xen_pcibk_device *pdev,
+ unsigned int domain, unsigned int bus);
+
+/* Backend registration for the two types of BDF representation:
+ * vpci - BDFs start at 00
+ * passthrough - BDFs are exactly like in the host.
+ */
+struct xen_pcibk_backend {
+ char *name;
+ int (*init)(struct xen_pcibk_device *pdev);
+ void (*free)(struct xen_pcibk_device *pdev);
+ int (*find)(struct pci_dev *pcidev, struct xen_pcibk_device *pdev,
+ unsigned int *domain, unsigned int *bus,
+ unsigned int *devfn);
+ int (*publish)(struct xen_pcibk_device *pdev, publish_pci_root_cb cb);
+ void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev);
+ int (*add)(struct xen_pcibk_device *pdev, struct pci_dev *dev,
+ int devid, publish_pci_dev_cb publish_cb);
+ struct pci_dev *(*get)(struct xen_pcibk_device *pdev,
+ unsigned int domain, unsigned int bus,
+ unsigned int devfn);
+};
+
+extern struct xen_pcibk_backend xen_pcibk_vpci_backend;
+extern struct xen_pcibk_backend xen_pcibk_passthrough_backend;
+extern struct xen_pcibk_backend *xen_pcibk_backend;
+
+static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev,
+ int devid,
+ publish_pci_dev_cb publish_cb)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->add)
+ return xen_pcibk_backend->add(pdev, dev, devid, publish_cb);
+ return -1;
+};
+static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->free)
+ return xen_pcibk_backend->release(pdev, dev);
+};
+
+static inline struct pci_dev *
+xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain,
+ unsigned int bus, unsigned int devfn)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->get)
+ return xen_pcibk_backend->get(pdev, domain, bus, devfn);
+ return NULL;
+};
+/**
+* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in xen_pcibk
+* before sending aer request to pcifront, so that guest could identify
+* device, coopearte with xen_pcibk to finish aer recovery job if device driver
+* has the capability
+*/
+static inline int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
+ struct xen_pcibk_device *pdev,
+ unsigned int *domain,
+ unsigned int *bus,
+ unsigned int *devfn)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->find)
+ return xen_pcibk_backend->find(pcidev, pdev, domain, bus,
+ devfn);
+ return -1;
+};
+static inline int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->init)
+ return xen_pcibk_backend->init(pdev);
+ return -1;
+};
+static inline int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
+ publish_pci_root_cb cb)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->publish)
+ return xen_pcibk_backend->publish(pdev, cb);
+ return -1;
+};
+static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->free)
+ return xen_pcibk_backend->free(pdev);
+};
+/* Handles events from front-end */
+irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
+void xen_pcibk_do_op(struct work_struct *data);
+
+int xen_pcibk_xenbus_register(void);
+void xen_pcibk_xenbus_unregister(void);
+
+extern int verbose_request;
+
+void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev);
+#endif
+
+/* Handles shared IRQs that can to device domain and control domain. */
+void xen_pcibk_irq_handler(struct pci_dev *dev, int reset);
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
new file mode 100644
index 00000000000..8c95c3415b7
--- /dev/null
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -0,0 +1,384 @@
+/*
+ * PCI Backend Operations - respond to PCI requests from Frontend
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <xen/events.h>
+#include <linux/sched.h>
+#include "pciback.h"
+
+#define DRV_NAME "xen-pciback"
+int verbose_request;
+module_param(verbose_request, int, 0644);
+
+static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id);
+
+/* Ensure a device is has the fake IRQ handler "turned on/off" and is
+ * ready to be exported. This MUST be run after xen_pcibk_reset_device
+ * which does the actual PCI device enable/disable.
+ */
+static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ int rc;
+ int enable = 0;
+
+ dev_data = pci_get_drvdata(dev);
+ if (!dev_data)
+ return;
+
+ /* We don't deal with bridges */
+ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
+ return;
+
+ if (reset) {
+ dev_data->enable_intx = 0;
+ dev_data->ack_intr = 0;
+ }
+ enable = dev_data->enable_intx;
+
+ /* Asked to disable, but ISR isn't runnig */
+ if (!enable && !dev_data->isr_on)
+ return;
+
+ /* Squirrel away the IRQs in the dev_data. We need this
+ * b/c when device transitions to MSI, the dev->irq is
+ * overwritten with the MSI vector.
+ */
+ if (enable)
+ dev_data->irq = dev->irq;
+
+ /*
+ * SR-IOV devices in all use MSI-X and have no legacy
+ * interrupts, so inhibit creating a fake IRQ handler for them.
+ */
+ if (dev_data->irq == 0)
+ goto out;
+
+ dev_dbg(&dev->dev, "%s: #%d %s %s%s %s-> %s\n",
+ dev_data->irq_name,
+ dev_data->irq,
+ pci_is_enabled(dev) ? "on" : "off",
+ dev->msi_enabled ? "MSI" : "",
+ dev->msix_enabled ? "MSI/X" : "",
+ dev_data->isr_on ? "enable" : "disable",
+ enable ? "enable" : "disable");
+
+ if (enable) {
+ rc = request_irq(dev_data->irq,
+ xen_pcibk_guest_interrupt, IRQF_SHARED,
+ dev_data->irq_name, dev);
+ if (rc) {
+ dev_err(&dev->dev, "%s: failed to install fake IRQ " \
+ "handler for IRQ %d! (rc:%d)\n",
+ dev_data->irq_name, dev_data->irq, rc);
+ goto out;
+ }
+ } else {
+ free_irq(dev_data->irq, dev);
+ dev_data->irq = 0;
+ }
+ dev_data->isr_on = enable;
+ dev_data->ack_intr = enable;
+out:
+ dev_dbg(&dev->dev, "%s: #%d %s %s%s %s\n",
+ dev_data->irq_name,
+ dev_data->irq,
+ pci_is_enabled(dev) ? "on" : "off",
+ dev->msi_enabled ? "MSI" : "",
+ dev->msix_enabled ? "MSI/X" : "",
+ enable ? (dev_data->isr_on ? "enabled" : "failed to enable") :
+ (dev_data->isr_on ? "failed to disable" : "disabled"));
+}
+
+/* Ensure a device is "turned off" and ready to be exported.
+ * (Also see xen_pcibk_config_reset to ensure virtual configuration space is
+ * ready to be re-exported)
+ */
+void xen_pcibk_reset_device(struct pci_dev *dev)
+{
+ u16 cmd;
+
+ xen_pcibk_control_isr(dev, 1 /* reset device */);
+
+ /* Disable devices (but not bridges) */
+ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
+#ifdef CONFIG_PCI_MSI
+ /* The guest could have been abruptly killed without
+ * disabling MSI/MSI-X interrupts.*/
+ if (dev->msix_enabled)
+ pci_disable_msix(dev);
+ if (dev->msi_enabled)
+ pci_disable_msi(dev);
+#endif
+ pci_disable_device(dev);
+
+ pci_write_config_word(dev, PCI_COMMAND, 0);
+
+ dev->is_busmaster = 0;
+ } else {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & (PCI_COMMAND_INVALIDATE)) {
+ cmd &= ~(PCI_COMMAND_INVALIDATE);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ dev->is_busmaster = 0;
+ }
+ }
+}
+
+#ifdef CONFIG_PCI_MSI
+static
+int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, struct xen_pci_op *op)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ int otherend = pdev->xdev->otherend_id;
+ int status;
+
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
+
+ status = pci_enable_msi(dev);
+
+ if (status) {
+ printk(KERN_ERR "error enable msi for guest %x status %x\n",
+ otherend, status);
+ op->value = 0;
+ return XEN_PCI_ERR_op_failed;
+ }
+
+ /* The value the guest needs is actually the IDT vector, not the
+ * the local domain's IRQ number. */
+
+ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
+ op->value);
+
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 0;
+
+ return 0;
+}
+
+static
+int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, struct xen_pci_op *op)
+{
+ struct xen_pcibk_dev_data *dev_data;
+
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
+ pci_name(dev));
+ pci_disable_msi(dev);
+
+ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
+ op->value);
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 1;
+ return 0;
+}
+
+static
+int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, struct xen_pci_op *op)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ int i, result;
+ struct msix_entry *entries;
+
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
+ pci_name(dev));
+ if (op->value > SH_INFO_MAX_VEC)
+ return -EINVAL;
+
+ entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
+ if (entries == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < op->value; i++) {
+ entries[i].entry = op->msix_entries[i].entry;
+ entries[i].vector = op->msix_entries[i].vector;
+ }
+
+ result = pci_enable_msix(dev, entries, op->value);
+
+ if (result == 0) {
+ for (i = 0; i < op->value; i++) {
+ op->msix_entries[i].entry = entries[i].entry;
+ if (entries[i].vector)
+ op->msix_entries[i].vector =
+ xen_pirq_from_irq(entries[i].vector);
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: " \
+ "MSI-X[%d]: %d\n",
+ pci_name(dev), i,
+ op->msix_entries[i].vector);
+ }
+ } else {
+ printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n",
+ pci_name(dev), result);
+ }
+ kfree(entries);
+
+ op->value = result;
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 0;
+
+ return result;
+}
+
+static
+int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, struct xen_pci_op *op)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
+ pci_name(dev));
+ pci_disable_msix(dev);
+
+ /*
+ * SR-IOV devices (which don't have any legacy IRQ) have
+ * an undefined IRQ value of zero.
+ */
+ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev),
+ op->value);
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 1;
+ return 0;
+}
+#endif
+/*
+* Now the same evtchn is used for both pcifront conf_read_write request
+* as well as pcie aer front end ack. We use a new work_queue to schedule
+* xen_pcibk conf_read_write service for avoiding confict with aer_core
+* do_recovery job which also use the system default work_queue
+*/
+void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
+{
+ /* Check that frontend is requesting an operation and that we are not
+ * already processing a request */
+ if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
+ && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
+ queue_work(xen_pcibk_wq, &pdev->op_work);
+ }
+ /*_XEN_PCIB_active should have been cleared by pcifront. And also make
+ sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
+ if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
+ && test_bit(_PCIB_op_pending, &pdev->flags)) {
+ wake_up(&xen_pcibk_aer_wait_queue);
+ }
+}
+
+/* Performing the configuration space reads/writes must not be done in atomic
+ * context because some of the pci_* functions can sleep (mostly due to ACPI
+ * use of semaphores). This function is intended to be called from a work
+ * queue in process context taking a struct xen_pcibk_device as a parameter */
+
+void xen_pcibk_do_op(struct work_struct *data)
+{
+ struct xen_pcibk_device *pdev =
+ container_of(data, struct xen_pcibk_device, op_work);
+ struct pci_dev *dev;
+ struct xen_pcibk_dev_data *dev_data = NULL;
+ struct xen_pci_op *op = &pdev->sh_info->op;
+ int test_intx = 0;
+
+ dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
+
+ if (dev == NULL)
+ op->err = XEN_PCI_ERR_dev_not_found;
+ else {
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ test_intx = dev_data->enable_intx;
+ switch (op->cmd) {
+ case XEN_PCI_OP_conf_read:
+ op->err = xen_pcibk_config_read(dev,
+ op->offset, op->size, &op->value);
+ break;
+ case XEN_PCI_OP_conf_write:
+ op->err = xen_pcibk_config_write(dev,
+ op->offset, op->size, op->value);
+ break;
+#ifdef CONFIG_PCI_MSI
+ case XEN_PCI_OP_enable_msi:
+ op->err = xen_pcibk_enable_msi(pdev, dev, op);
+ break;
+ case XEN_PCI_OP_disable_msi:
+ op->err = xen_pcibk_disable_msi(pdev, dev, op);
+ break;
+ case XEN_PCI_OP_enable_msix:
+ op->err = xen_pcibk_enable_msix(pdev, dev, op);
+ break;
+ case XEN_PCI_OP_disable_msix:
+ op->err = xen_pcibk_disable_msix(pdev, dev, op);
+ break;
+#endif
+ default:
+ op->err = XEN_PCI_ERR_not_implemented;
+ break;
+ }
+ }
+ if (!op->err && dev && dev_data) {
+ /* Transition detected */
+ if ((dev_data->enable_intx != test_intx))
+ xen_pcibk_control_isr(dev, 0 /* no reset */);
+ }
+ /* Tell the driver domain that we're done. */
+ wmb();
+ clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
+ notify_remote_via_irq(pdev->evtchn_irq);
+
+ /* Mark that we're done. */
+ smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
+ clear_bit(_PDEVF_op_active, &pdev->flags);
+ smp_mb__after_clear_bit(); /* /before/ final check for work */
+
+ /* Check to see if the driver domain tried to start another request in
+ * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
+ */
+ xen_pcibk_test_and_schedule_op(pdev);
+}
+
+irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id)
+{
+ struct xen_pcibk_device *pdev = dev_id;
+
+ xen_pcibk_test_and_schedule_op(pdev);
+
+ return IRQ_HANDLED;
+}
+static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id)
+{
+ struct pci_dev *dev = (struct pci_dev *)dev_id;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+
+ if (dev_data->isr_on && dev_data->ack_intr) {
+ dev_data->handled++;
+ if ((dev_data->handled % 1000) == 0) {
+ if (xen_test_irq_shared(irq)) {
+ printk(KERN_INFO "%s IRQ line is not shared "
+ "with other domains. Turning ISR off\n",
+ dev_data->irq_name);
+ dev_data->ack_intr = 0;
+ }
+ }
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
new file mode 100644
index 00000000000..4a42cfb0959
--- /dev/null
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -0,0 +1,259 @@
+/*
+ * PCI Backend - Provides a Virtual PCI bus (with real devices)
+ * to the frontend
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include "pciback.h"
+
+#define PCI_SLOT_MAX 32
+#define DRV_NAME "xen-pciback"
+
+struct vpci_dev_data {
+ /* Access to dev_list must be protected by lock */
+ struct list_head dev_list[PCI_SLOT_MAX];
+ spinlock_t lock;
+};
+
+static inline struct list_head *list_first(struct list_head *head)
+{
+ return head->next;
+}
+
+static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
+ unsigned int domain,
+ unsigned int bus,
+ unsigned int devfn)
+{
+ struct pci_dev_entry *entry;
+ struct pci_dev *dev = NULL;
+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+ unsigned long flags;
+
+ if (domain != 0 || bus != 0)
+ return NULL;
+
+ if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
+ spin_lock_irqsave(&vpci_dev->lock, flags);
+
+ list_for_each_entry(entry,
+ &vpci_dev->dev_list[PCI_SLOT(devfn)],
+ list) {
+ if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
+ dev = entry->dev;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ }
+ return dev;
+}
+
+static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
+{
+ if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
+ && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
+ return 1;
+
+ return 0;
+}
+
+static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, int devid,
+ publish_pci_dev_cb publish_cb)
+{
+ int err = 0, slot, func = -1;
+ struct pci_dev_entry *t, *dev_entry;
+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+ unsigned long flags;
+
+ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
+ err = -EFAULT;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Can't export bridges on the virtual PCI bus");
+ goto out;
+ }
+
+ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
+ if (!dev_entry) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error adding entry to virtual PCI bus");
+ goto out;
+ }
+
+ dev_entry->dev = dev;
+
+ spin_lock_irqsave(&vpci_dev->lock, flags);
+
+ /* Keep multi-function devices together on the virtual PCI bus */
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ if (!list_empty(&vpci_dev->dev_list[slot])) {
+ t = list_entry(list_first(&vpci_dev->dev_list[slot]),
+ struct pci_dev_entry, list);
+
+ if (match_slot(dev, t->dev)) {
+ pr_info(DRV_NAME ": vpci: %s: "
+ "assign to virtual slot %d func %d\n",
+ pci_name(dev), slot,
+ PCI_FUNC(dev->devfn));
+ list_add_tail(&dev_entry->list,
+ &vpci_dev->dev_list[slot]);
+ func = PCI_FUNC(dev->devfn);
+ goto unlock;
+ }
+ }
+ }
+
+ /* Assign to a new slot on the virtual PCI bus */
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ if (list_empty(&vpci_dev->dev_list[slot])) {
+ printk(KERN_INFO DRV_NAME
+ ": vpci: %s: assign to virtual slot %d\n",
+ pci_name(dev), slot);
+ list_add_tail(&dev_entry->list,
+ &vpci_dev->dev_list[slot]);
+ func = PCI_FUNC(dev->devfn);
+ goto unlock;
+ }
+ }
+
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "No more space on root virtual PCI bus");
+
+unlock:
+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
+
+ /* Publish this device. */
+ if (!err)
+ err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
+
+out:
+ return err;
+}
+
+static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev)
+{
+ int slot;
+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+ struct pci_dev *found_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpci_dev->lock, flags);
+
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ struct pci_dev_entry *e, *tmp;
+ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
+ list) {
+ if (e->dev == dev) {
+ list_del(&e->list);
+ found_dev = e->dev;
+ kfree(e);
+ goto out;
+ }
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
+
+ if (found_dev)
+ pcistub_put_pci_dev(found_dev);
+}
+
+static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
+{
+ int slot;
+ struct vpci_dev_data *vpci_dev;
+
+ vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
+ if (!vpci_dev)
+ return -ENOMEM;
+
+ spin_lock_init(&vpci_dev->lock);
+
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++)
+ INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
+
+ pdev->pci_dev_data = vpci_dev;
+
+ return 0;
+}
+
+static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
+ publish_pci_root_cb publish_cb)
+{
+ /* The Virtual PCI bus has only one root */
+ return publish_cb(pdev, 0, 0);
+}
+
+static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
+{
+ int slot;
+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ struct pci_dev_entry *e, *tmp;
+ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
+ list) {
+ list_del(&e->list);
+ pcistub_put_pci_dev(e->dev);
+ kfree(e);
+ }
+ }
+
+ kfree(vpci_dev);
+ pdev->pci_dev_data = NULL;
+}
+
+static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
+ struct xen_pcibk_device *pdev,
+ unsigned int *domain, unsigned int *bus,
+ unsigned int *devfn)
+{
+ struct pci_dev_entry *entry;
+ struct pci_dev *dev = NULL;
+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+ unsigned long flags;
+ int found = 0, slot;
+
+ spin_lock_irqsave(&vpci_dev->lock, flags);
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ list_for_each_entry(entry,
+ &vpci_dev->dev_list[slot],
+ list) {
+ dev = entry->dev;
+ if (dev && dev->bus->number == pcidev->bus->number
+ && pci_domain_nr(dev->bus) ==
+ pci_domain_nr(pcidev->bus)
+ && dev->devfn == pcidev->devfn) {
+ found = 1;
+ *domain = 0;
+ *bus = 0;
+ *devfn = PCI_DEVFN(slot,
+ PCI_FUNC(pcidev->devfn));
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ return found;
+}
+
+struct xen_pcibk_backend xen_pcibk_vpci_backend = {
+ .name = "vpci",
+ .init = __xen_pcibk_init_devices,
+ .free = __xen_pcibk_release_devices,
+ .find = __xen_pcibk_get_pcifront_dev,
+ .publish = __xen_pcibk_publish_pci_roots,
+ .release = __xen_pcibk_release_pci_dev,
+ .add = __xen_pcibk_add_pci_dev,
+ .get = __xen_pcibk_get_pci_dev,
+};
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
new file mode 100644
index 00000000000..978d2c6f5dc
--- /dev/null
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -0,0 +1,748 @@
+/*
+ * PCI Backend Xenbus Setup - handles setup with frontend and xend
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <asm/xen/pci.h>
+#include "pciback.h"
+
+#define DRV_NAME "xen-pciback"
+#define INVALID_EVTCHN_IRQ (-1)
+struct workqueue_struct *xen_pcibk_wq;
+
+static int __read_mostly passthrough;
+module_param(passthrough, bool, S_IRUGO);
+MODULE_PARM_DESC(passthrough,
+ "Option to specify how to export PCI topology to guest:\n"\
+ " 0 - (default) Hide the true PCI topology and makes the frontend\n"\
+ " there is a single PCI bus with only the exported devices on it.\n"\
+ " For example, a device at 03:05.0 will be re-assigned to 00:00.0\n"\
+ " while second device at 02:1a.1 will be re-assigned to 00:01.1.\n"\
+ " 1 - Passthrough provides a real view of the PCI topology to the\n"\
+ " frontend (for example, a device at 06:01.b will still appear at\n"\
+ " 06:01.b to the frontend). This is similar to how Xen 2.0.x\n"\
+ " exposed PCI devices to its driver domains. This may be required\n"\
+ " for drivers which depend on finding their hardward in certain\n"\
+ " bus/slot locations.");
+
+static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
+{
+ struct xen_pcibk_device *pdev;
+
+ pdev = kzalloc(sizeof(struct xen_pcibk_device), GFP_KERNEL);
+ if (pdev == NULL)
+ goto out;
+ dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
+
+ pdev->xdev = xdev;
+ dev_set_drvdata(&xdev->dev, pdev);
+
+ spin_lock_init(&pdev->dev_lock);
+
+ pdev->sh_info = NULL;
+ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
+ pdev->be_watching = 0;
+
+ INIT_WORK(&pdev->op_work, xen_pcibk_do_op);
+
+ if (xen_pcibk_init_devices(pdev)) {
+ kfree(pdev);
+ pdev = NULL;
+ }
+out:
+ return pdev;
+}
+
+static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)
+{
+ spin_lock(&pdev->dev_lock);
+
+ /* Ensure the guest can't trigger our handler before removing devices */
+ if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) {
+ unbind_from_irqhandler(pdev->evtchn_irq, pdev);
+ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
+ }
+ spin_unlock(&pdev->dev_lock);
+
+ /* If the driver domain started an op, make sure we complete it
+ * before releasing the shared memory */
+
+ /* Note, the workqueue does not use spinlocks at all.*/
+ flush_workqueue(xen_pcibk_wq);
+
+ spin_lock(&pdev->dev_lock);
+ if (pdev->sh_info != NULL) {
+ xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info);
+ pdev->sh_info = NULL;
+ }
+ spin_unlock(&pdev->dev_lock);
+
+}
+
+static void free_pdev(struct xen_pcibk_device *pdev)
+{
+ if (pdev->be_watching) {
+ unregister_xenbus_watch(&pdev->be_watch);
+ pdev->be_watching = 0;
+ }
+
+ xen_pcibk_disconnect(pdev);
+
+ xen_pcibk_release_devices(pdev);
+
+ dev_set_drvdata(&pdev->xdev->dev, NULL);
+ pdev->xdev = NULL;
+
+ kfree(pdev);
+}
+
+static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
+ int remote_evtchn)
+{
+ int err = 0;
+ void *vaddr;
+
+ dev_dbg(&pdev->xdev->dev,
+ "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
+ gnt_ref, remote_evtchn);
+
+ err = xenbus_map_ring_valloc(pdev->xdev, gnt_ref, &vaddr);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error mapping other domain page in ours.");
+ goto out;
+ }
+
+ spin_lock(&pdev->dev_lock);
+ pdev->sh_info = vaddr;
+ spin_unlock(&pdev->dev_lock);
+
+ err = bind_interdomain_evtchn_to_irqhandler(
+ pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
+ 0, DRV_NAME, pdev);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error binding event channel to IRQ");
+ goto out;
+ }
+
+ spin_lock(&pdev->dev_lock);
+ pdev->evtchn_irq = err;
+ spin_unlock(&pdev->dev_lock);
+ err = 0;
+
+ dev_dbg(&pdev->xdev->dev, "Attached!\n");
+out:
+ return err;
+}
+
+static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
+{
+ int err = 0;
+ int gnt_ref, remote_evtchn;
+ char *magic = NULL;
+
+
+ /* Make sure we only do this setup once */
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateInitialised)
+ goto out;
+
+ /* Wait for frontend to state that it has published the configuration */
+ if (xenbus_read_driver_state(pdev->xdev->otherend) !=
+ XenbusStateInitialised)
+ goto out;
+
+ dev_dbg(&pdev->xdev->dev, "Reading frontend config\n");
+
+ err = xenbus_gather(XBT_NIL, pdev->xdev->otherend,
+ "pci-op-ref", "%u", &gnt_ref,
+ "event-channel", "%u", &remote_evtchn,
+ "magic", NULL, &magic, NULL);
+ if (err) {
+ /* If configuration didn't get read correctly, wait longer */
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading configuration from frontend");
+ goto out;
+ }
+
+ if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
+ xenbus_dev_fatal(pdev->xdev, -EFAULT,
+ "version mismatch (%s/%s) with pcifront - "
+ "halting xen_pcibk",
+ magic, XEN_PCI_MAGIC);
+ goto out;
+ }
+
+ err = xen_pcibk_do_attach(pdev, gnt_ref, remote_evtchn);
+ if (err)
+ goto out;
+
+ dev_dbg(&pdev->xdev->dev, "Connecting...\n");
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
+ if (err)
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error switching to connected state!");
+
+ dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
+out:
+
+ kfree(magic);
+
+ return err;
+}
+
+static int xen_pcibk_publish_pci_dev(struct xen_pcibk_device *pdev,
+ unsigned int domain, unsigned int bus,
+ unsigned int devfn, unsigned int devid)
+{
+ int err;
+ int len;
+ char str[64];
+
+ len = snprintf(str, sizeof(str), "vdev-%d", devid);
+ if (unlikely(len >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
+ "%04x:%02x:%02x.%02x", domain, bus,
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+out:
+ return err;
+}
+
+static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
+ int domain, int bus, int slot, int func,
+ int devid)
+{
+ struct pci_dev *dev;
+ int err = 0;
+
+ dev_dbg(&pdev->xdev->dev, "exporting dom %x bus %x slot %x func %x\n",
+ domain, bus, slot, func);
+
+ dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func);
+ if (!dev) {
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Couldn't locate PCI device "
+ "(%04x:%02x:%02x.%01x)! "
+ "perhaps already in-use?",
+ domain, bus, slot, func);
+ goto out;
+ }
+
+ err = xen_pcibk_add_pci_dev(pdev, dev, devid,
+ xen_pcibk_publish_pci_dev);
+ if (err)
+ goto out;
+
+ dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
+ if (xen_register_device_domain_owner(dev,
+ pdev->xdev->otherend_id) != 0) {
+ dev_err(&dev->dev, "device has been assigned to another " \
+ "domain! Over-writting the ownership, but beware.\n");
+ xen_unregister_device_domain_owner(dev);
+ xen_register_device_domain_owner(dev, pdev->xdev->otherend_id);
+ }
+
+ /* TODO: It'd be nice to export a bridge and have all of its children
+ * get exported with it. This may be best done in xend (which will
+ * have to calculate resource usage anyway) but we probably want to
+ * put something in here to ensure that if a bridge gets given to a
+ * driver domain, that all devices under that bridge are not given
+ * to other driver domains (as he who controls the bridge can disable
+ * it and stop the other devices from working).
+ */
+out:
+ return err;
+}
+
+static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
+ int domain, int bus, int slot, int func)
+{
+ int err = 0;
+ struct pci_dev *dev;
+
+ dev_dbg(&pdev->xdev->dev, "removing dom %x bus %x slot %x func %x\n",
+ domain, bus, slot, func);
+
+ dev = xen_pcibk_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func));
+ if (!dev) {
+ err = -EINVAL;
+ dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device "
+ "(%04x:%02x:%02x.%01x)! not owned by this domain\n",
+ domain, bus, slot, func);
+ goto out;
+ }
+
+ dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id);
+ xen_unregister_device_domain_owner(dev);
+
+ xen_pcibk_release_pci_dev(pdev, dev);
+
+out:
+ return err;
+}
+
+static int xen_pcibk_publish_pci_root(struct xen_pcibk_device *pdev,
+ unsigned int domain, unsigned int bus)
+{
+ unsigned int d, b;
+ int i, root_num, len, err;
+ char str[64];
+
+ dev_dbg(&pdev->xdev->dev, "Publishing pci roots\n");
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
+ "root_num", "%d", &root_num);
+ if (err == 0 || err == -ENOENT)
+ root_num = 0;
+ else if (err < 0)
+ goto out;
+
+ /* Verify that we haven't already published this pci root */
+ for (i = 0; i < root_num; i++) {
+ len = snprintf(str, sizeof(str), "root-%d", i);
+ if (unlikely(len >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
+ str, "%x:%x", &d, &b);
+ if (err < 0)
+ goto out;
+ if (err != 2) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (d == domain && b == bus) {
+ err = 0;
+ goto out;
+ }
+ }
+
+ len = snprintf(str, sizeof(str), "root-%d", root_num);
+ if (unlikely(len >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dev_dbg(&pdev->xdev->dev, "writing root %d at %04x:%02x\n",
+ root_num, domain, bus);
+
+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
+ "%04x:%02x", domain, bus);
+ if (err)
+ goto out;
+
+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
+ "root_num", "%d", (root_num + 1));
+
+out:
+ return err;
+}
+
+static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
+{
+ int err = 0;
+ int num_devs;
+ int domain, bus, slot, func;
+ int substate;
+ int i, len;
+ char state_str[64];
+ char dev_str[64];
+
+
+ dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
+
+ /* Make sure we only reconfigure once */
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateReconfiguring)
+ goto out;
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
+ &num_devs);
+ if (err != 1) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading number of devices");
+ goto out;
+ }
+
+ for (i = 0; i < num_devs; i++) {
+ len = snprintf(state_str, sizeof(state_str), "state-%d", i);
+ if (unlikely(len >= (sizeof(state_str) - 1))) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "String overflow while reading "
+ "configuration");
+ goto out;
+ }
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str,
+ "%d", &substate);
+ if (err != 1)
+ substate = XenbusStateUnknown;
+
+ switch (substate) {
+ case XenbusStateInitialising:
+ dev_dbg(&pdev->xdev->dev, "Attaching dev-%d ...\n", i);
+
+ len = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
+ if (unlikely(len >= (sizeof(dev_str) - 1))) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "String overflow while "
+ "reading configuration");
+ goto out;
+ }
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
+ dev_str, "%x:%x:%x.%x",
+ &domain, &bus, &slot, &func);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading device "
+ "configuration");
+ goto out;
+ }
+ if (err != 4) {
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error parsing pci device "
+ "configuration");
+ goto out;
+ }
+
+ err = xen_pcibk_export_device(pdev, domain, bus, slot,
+ func, i);
+ if (err)
+ goto out;
+
+ /* Publish pci roots. */
+ err = xen_pcibk_publish_pci_roots(pdev,
+ xen_pcibk_publish_pci_root);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error while publish PCI root"
+ "buses for frontend");
+ goto out;
+ }
+
+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
+ state_str, "%d",
+ XenbusStateInitialised);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error switching substate of "
+ "dev-%d\n", i);
+ goto out;
+ }
+ break;
+
+ case XenbusStateClosing:
+ dev_dbg(&pdev->xdev->dev, "Detaching dev-%d ...\n", i);
+
+ len = snprintf(dev_str, sizeof(dev_str), "vdev-%d", i);
+ if (unlikely(len >= (sizeof(dev_str) - 1))) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "String overflow while "
+ "reading configuration");
+ goto out;
+ }
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
+ dev_str, "%x:%x:%x.%x",
+ &domain, &bus, &slot, &func);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading device "
+ "configuration");
+ goto out;
+ }
+ if (err != 4) {
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error parsing pci device "
+ "configuration");
+ goto out;
+ }
+
+ err = xen_pcibk_remove_device(pdev, domain, bus, slot,
+ func);
+ if (err)
+ goto out;
+
+ /* TODO: If at some point we implement support for pci
+ * root hot-remove on pcifront side, we'll need to
+ * remove unnecessary xenstore nodes of pci roots here.
+ */
+
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error switching to reconfigured state!");
+ goto out;
+ }
+
+out:
+ return 0;
+}
+
+static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
+ enum xenbus_state fe_state)
+{
+ struct xen_pcibk_device *pdev = dev_get_drvdata(&xdev->dev);
+
+ dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
+
+ switch (fe_state) {
+ case XenbusStateInitialised:
+ xen_pcibk_attach(pdev);
+ break;
+
+ case XenbusStateReconfiguring:
+ xen_pcibk_reconfigure(pdev);
+ break;
+
+ case XenbusStateConnected:
+ /* pcifront switched its state from reconfiguring to connected.
+ * Then switch to connected state.
+ */
+ xenbus_switch_state(xdev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosing:
+ xen_pcibk_disconnect(pdev);
+ xenbus_switch_state(xdev, XenbusStateClosing);
+ break;
+
+ case XenbusStateClosed:
+ xen_pcibk_disconnect(pdev);
+ xenbus_switch_state(xdev, XenbusStateClosed);
+ if (xenbus_dev_is_online(xdev))
+ break;
+ /* fall through if not online */
+ case XenbusStateUnknown:
+ dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
+ device_unregister(&xdev->dev);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev)
+{
+ /* Get configuration from xend (if available now) */
+ int domain, bus, slot, func;
+ int err = 0;
+ int i, num_devs;
+ char dev_str[64];
+ char state_str[64];
+
+ /* It's possible we could get the call to setup twice, so make sure
+ * we're not already connected.
+ */
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateInitWait)
+ goto out;
+
+ dev_dbg(&pdev->xdev->dev, "getting be setup\n");
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
+ &num_devs);
+ if (err != 1) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading number of devices");
+ goto out;
+ }
+
+ for (i = 0; i < num_devs; i++) {
+ int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
+ if (unlikely(l >= (sizeof(dev_str) - 1))) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "String overflow while reading "
+ "configuration");
+ goto out;
+ }
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str,
+ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading device configuration");
+ goto out;
+ }
+ if (err != 4) {
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error parsing pci device "
+ "configuration");
+ goto out;
+ }
+
+ err = xen_pcibk_export_device(pdev, domain, bus, slot, func, i);
+ if (err)
+ goto out;
+
+ /* Switch substate of this device. */
+ l = snprintf(state_str, sizeof(state_str), "state-%d", i);
+ if (unlikely(l >= (sizeof(state_str) - 1))) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "String overflow while reading "
+ "configuration");
+ goto out;
+ }
+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, state_str,
+ "%d", XenbusStateInitialised);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err, "Error switching "
+ "substate of dev-%d\n", i);
+ goto out;
+ }
+ }
+
+ err = xen_pcibk_publish_pci_roots(pdev, xen_pcibk_publish_pci_root);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error while publish PCI root buses "
+ "for frontend");
+ goto out;
+ }
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
+ if (err)
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error switching to initialised state!");
+
+out:
+ if (!err)
+ /* see if pcifront is already configured (if not, we'll wait) */
+ xen_pcibk_attach(pdev);
+
+ return err;
+}
+
+static void xen_pcibk_be_watch(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ struct xen_pcibk_device *pdev =
+ container_of(watch, struct xen_pcibk_device, be_watch);
+
+ switch (xenbus_read_driver_state(pdev->xdev->nodename)) {
+ case XenbusStateInitWait:
+ xen_pcibk_setup_backend(pdev);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int xen_pcibk_xenbus_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int err = 0;
+ struct xen_pcibk_device *pdev = alloc_pdev(dev);
+
+ if (pdev == NULL) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(dev, err,
+ "Error allocating xen_pcibk_device struct");
+ goto out;
+ }
+
+ /* wait for xend to configure us */
+ err = xenbus_switch_state(dev, XenbusStateInitWait);
+ if (err)
+ goto out;
+
+ /* watch the backend node for backend configuration information */
+ err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
+ xen_pcibk_be_watch);
+ if (err)
+ goto out;
+
+ pdev->be_watching = 1;
+
+ /* We need to force a call to our callback here in case
+ * xend already configured us!
+ */
+ xen_pcibk_be_watch(&pdev->be_watch, NULL, 0);
+
+out:
+ return err;
+}
+
+static int xen_pcibk_xenbus_remove(struct xenbus_device *dev)
+{
+ struct xen_pcibk_device *pdev = dev_get_drvdata(&dev->dev);
+
+ if (pdev != NULL)
+ free_pdev(pdev);
+
+ return 0;
+}
+
+static const struct xenbus_device_id xenpci_ids[] = {
+ {"pci"},
+ {""},
+};
+
+static struct xenbus_driver xenbus_xen_pcibk_driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .ids = xenpci_ids,
+ .probe = xen_pcibk_xenbus_probe,
+ .remove = xen_pcibk_xenbus_remove,
+ .otherend_changed = xen_pcibk_frontend_changed,
+};
+
+struct xen_pcibk_backend *xen_pcibk_backend;
+
+int __init xen_pcibk_xenbus_register(void)
+{
+ xen_pcibk_wq = create_workqueue("xen_pciback_workqueue");
+ if (!xen_pcibk_wq) {
+ printk(KERN_ERR "%s: create"
+ "xen_pciback_workqueue failed\n", __func__);
+ return -EFAULT;
+ }
+ xen_pcibk_backend = &xen_pcibk_vpci_backend;
+ if (passthrough)
+ xen_pcibk_backend = &xen_pcibk_passthrough_backend;
+ pr_info(DRV_NAME ": backend is %s\n", xen_pcibk_backend->name);
+ return xenbus_register_backend(&xenbus_xen_pcibk_driver);
+}
+
+void __exit xen_pcibk_xenbus_unregister(void)
+{
+ destroy_workqueue(xen_pcibk_wq);
+ xenbus_unregister_driver(&xenbus_xen_pcibk_driver);
+}
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
new file mode 100644
index 00000000000..6ea852e2516
--- /dev/null
+++ b/drivers/xen/xen-selfballoon.c
@@ -0,0 +1,486 @@
+/******************************************************************************
+ * Xen selfballoon driver (and optional frontswap self-shrinking driver)
+ *
+ * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
+ *
+ * This code complements the cleancache and frontswap patchsets to optimize
+ * support for Xen Transcendent Memory ("tmem"). The policy it implements
+ * is rudimentary and will likely improve over time, but it does work well
+ * enough today.
+ *
+ * Two functionalities are implemented here which both use "control theory"
+ * (feedback) to optimize memory utilization. In a virtualized environment
+ * such as Xen, RAM is often a scarce resource and we would like to ensure
+ * that each of a possibly large number of virtual machines is using RAM
+ * efficiently, i.e. using as little as possible when under light load
+ * and obtaining as much as possible when memory demands are high.
+ * Since RAM needs vary highly dynamically and sometimes dramatically,
+ * "hysteresis" is used, that is, memory target is determined not just
+ * on current data but also on past data stored in the system.
+ *
+ * "Selfballooning" creates memory pressure by managing the Xen balloon
+ * driver to decrease and increase available kernel memory, driven
+ * largely by the target value of "Committed_AS" (see /proc/meminfo).
+ * Since Committed_AS does not account for clean mapped pages (i.e. pages
+ * in RAM that are identical to pages on disk), selfballooning has the
+ * affect of pushing less frequently used clean pagecache pages out of
+ * kernel RAM and, presumably using cleancache, into Xen tmem where
+ * Xen can more efficiently optimize RAM utilization for such pages.
+ *
+ * When kernel memory demand unexpectedly increases faster than Xen, via
+ * the selfballoon driver, is able to (or chooses to) provide usable RAM,
+ * the kernel may invoke swapping. In most cases, frontswap is able
+ * to absorb this swapping into Xen tmem. However, due to the fact
+ * that the kernel swap subsystem assumes swapping occurs to a disk,
+ * swapped pages may sit on the disk for a very long time; even if
+ * the kernel knows the page will never be used again. This is because
+ * the disk space costs very little and can be overwritten when
+ * necessary. When such stale pages are in frontswap, however, they
+ * are taking up valuable real estate. "Frontswap selfshrinking" works
+ * to resolve this: When frontswap activity is otherwise stable
+ * and the guest kernel is not under memory pressure, the "frontswap
+ * selfshrinking" accounts for this by providing pressure to remove some
+ * pages from frontswap and return them to kernel memory.
+ *
+ * For both "selfballooning" and "frontswap-selfshrinking", a worker
+ * thread is used and sysfs tunables are provided to adjust the frequency
+ * and rate of adjustments to achieve the goal, as well as to disable one
+ * or both functions independently.
+ *
+ * While some argue that this functionality can and should be implemented
+ * in userspace, it has been observed that bad things happen (e.g. OOMs).
+ *
+ * System configuration note: Selfballooning should not be enabled on
+ * systems without a sufficiently large swap device configured; for best
+ * results, it is recommended that total swap be increased by the size
+ * of the guest memory. Also, while technically not required to be
+ * configured, it is highly recommended that frontswap also be configured
+ * and enabled when selfballooning is running. So, selfballooning
+ * is disabled by default if frontswap is not configured and can only
+ * be enabled with the "selfballooning" kernel boot option; similarly
+ * selfballooning is enabled by default if frontswap is configured and
+ * can be disabled with the "noselfballooning" kernel boot option. Finally,
+ * when frontswap is configured, frontswap-selfshrinking can be disabled
+ * with the "noselfshrink" kernel boot option.
+ *
+ * Selfballooning is disallowed in domain0 and force-disabled.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <xen/balloon.h>
+#include <xen/tmem.h>
+#include <xen/xen.h>
+
+/* Enable/disable with sysfs. */
+static int xen_selfballooning_enabled __read_mostly;
+
+/*
+ * Controls rate at which memory target (this iteration) approaches
+ * ultimate goal when memory need is increasing (up-hysteresis) or
+ * decreasing (down-hysteresis). Higher values of hysteresis cause
+ * slower increases/decreases. The default values for the various
+ * parameters were deemed reasonable by experimentation, may be
+ * workload-dependent, and can all be adjusted via sysfs.
+ */
+static unsigned int selfballoon_downhysteresis __read_mostly = 8;
+static unsigned int selfballoon_uphysteresis __read_mostly = 1;
+
+/* In HZ, controls frequency of worker invocation. */
+static unsigned int selfballoon_interval __read_mostly = 5;
+
+static void selfballoon_process(struct work_struct *work);
+static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
+
+#ifdef CONFIG_FRONTSWAP
+#include <linux/frontswap.h>
+
+/* Enable/disable with sysfs. */
+static bool frontswap_selfshrinking __read_mostly;
+
+/* Enable/disable with kernel boot option. */
+static bool use_frontswap_selfshrink __initdata = true;
+
+/*
+ * The default values for the following parameters were deemed reasonable
+ * by experimentation, may be workload-dependent, and can all be
+ * adjusted via sysfs.
+ */
+
+/* Control rate for frontswap shrinking. Higher hysteresis is slower. */
+static unsigned int frontswap_hysteresis __read_mostly = 20;
+
+/*
+ * Number of selfballoon worker invocations to wait before observing that
+ * frontswap selfshrinking should commence. Note that selfshrinking does
+ * not use a separate worker thread.
+ */
+static unsigned int frontswap_inertia __read_mostly = 3;
+
+/* Countdown to next invocation of frontswap_shrink() */
+static unsigned long frontswap_inertia_counter;
+
+/*
+ * Invoked by the selfballoon worker thread, uses current number of pages
+ * in frontswap (frontswap_curr_pages()), previous status, and control
+ * values (hysteresis and inertia) to determine if frontswap should be
+ * shrunk and what the new frontswap size should be. Note that
+ * frontswap_shrink is essentially a partial swapoff that immediately
+ * transfers pages from the "swap device" (frontswap) back into kernel
+ * RAM; despite the name, frontswap "shrinking" is very different from
+ * the "shrinker" interface used by the kernel MM subsystem to reclaim
+ * memory.
+ */
+static void frontswap_selfshrink(void)
+{
+ static unsigned long cur_frontswap_pages;
+ static unsigned long last_frontswap_pages;
+ static unsigned long tgt_frontswap_pages;
+
+ last_frontswap_pages = cur_frontswap_pages;
+ cur_frontswap_pages = frontswap_curr_pages();
+ if (!cur_frontswap_pages ||
+ (cur_frontswap_pages > last_frontswap_pages)) {
+ frontswap_inertia_counter = frontswap_inertia;
+ return;
+ }
+ if (frontswap_inertia_counter && --frontswap_inertia_counter)
+ return;
+ if (cur_frontswap_pages <= frontswap_hysteresis)
+ tgt_frontswap_pages = 0;
+ else
+ tgt_frontswap_pages = cur_frontswap_pages -
+ (cur_frontswap_pages / frontswap_hysteresis);
+ frontswap_shrink(tgt_frontswap_pages);
+}
+
+static int __init xen_nofrontswap_selfshrink_setup(char *s)
+{
+ use_frontswap_selfshrink = false;
+ return 1;
+}
+
+__setup("noselfshrink", xen_nofrontswap_selfshrink_setup);
+
+/* Disable with kernel boot option. */
+static bool use_selfballooning __initdata = true;
+
+static int __init xen_noselfballooning_setup(char *s)
+{
+ use_selfballooning = false;
+ return 1;
+}
+
+__setup("noselfballooning", xen_noselfballooning_setup);
+#else /* !CONFIG_FRONTSWAP */
+/* Enable with kernel boot option. */
+static bool use_selfballooning __initdata = false;
+
+static int __init xen_selfballooning_setup(char *s)
+{
+ use_selfballooning = true;
+ return 1;
+}
+
+__setup("selfballooning", xen_selfballooning_setup);
+#endif /* CONFIG_FRONTSWAP */
+
+/*
+ * Use current balloon size, the goal (vm_committed_as), and hysteresis
+ * parameters to set a new target balloon size
+ */
+static void selfballoon_process(struct work_struct *work)
+{
+ unsigned long cur_pages, goal_pages, tgt_pages;
+ bool reset_timer = false;
+
+ if (xen_selfballooning_enabled) {
+ cur_pages = balloon_stats.current_pages;
+ tgt_pages = cur_pages; /* default is no change */
+ goal_pages = percpu_counter_read_positive(&vm_committed_as) +
+ balloon_stats.current_pages - totalram_pages;
+#ifdef CONFIG_FRONTSWAP
+ /* allow space for frontswap pages to be repatriated */
+ if (frontswap_selfshrinking && frontswap_enabled)
+ goal_pages += frontswap_curr_pages();
+#endif
+ if (cur_pages > goal_pages)
+ tgt_pages = cur_pages -
+ ((cur_pages - goal_pages) /
+ selfballoon_downhysteresis);
+ else if (cur_pages < goal_pages)
+ tgt_pages = cur_pages +
+ ((goal_pages - cur_pages) /
+ selfballoon_uphysteresis);
+ /* else if cur_pages == goal_pages, no change */
+ balloon_set_new_target(tgt_pages);
+ reset_timer = true;
+ }
+#ifdef CONFIG_FRONTSWAP
+ if (frontswap_selfshrinking && frontswap_enabled) {
+ frontswap_selfshrink();
+ reset_timer = true;
+ }
+#endif
+ if (reset_timer)
+ schedule_delayed_work(&selfballoon_worker,
+ selfballoon_interval * HZ);
+}
+
+#ifdef CONFIG_SYSFS
+
+#include <linux/sysdev.h>
+#include <linux/capability.h>
+
+#define SELFBALLOON_SHOW(name, format, args...) \
+ static ssize_t show_##name(struct sys_device *dev, \
+ struct sysdev_attribute *attr, \
+ char *buf) \
+ { \
+ return sprintf(buf, format, ##args); \
+ }
+
+SELFBALLOON_SHOW(selfballooning, "%d\n", xen_selfballooning_enabled);
+
+static ssize_t store_selfballooning(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ bool was_enabled = xen_selfballooning_enabled;
+ unsigned long tmp;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = strict_strtoul(buf, 10, &tmp);
+ if (err || ((tmp != 0) && (tmp != 1)))
+ return -EINVAL;
+
+ xen_selfballooning_enabled = !!tmp;
+ if (!was_enabled && xen_selfballooning_enabled)
+ schedule_delayed_work(&selfballoon_worker,
+ selfballoon_interval * HZ);
+
+ return count;
+}
+
+static SYSDEV_ATTR(selfballooning, S_IRUGO | S_IWUSR,
+ show_selfballooning, store_selfballooning);
+
+SELFBALLOON_SHOW(selfballoon_interval, "%d\n", selfballoon_interval);
+
+static ssize_t store_selfballoon_interval(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_interval = val;
+ return count;
+}
+
+static SYSDEV_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR,
+ show_selfballoon_interval, store_selfballoon_interval);
+
+SELFBALLOON_SHOW(selfballoon_downhys, "%d\n", selfballoon_downhysteresis);
+
+static ssize_t store_selfballoon_downhys(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_downhysteresis = val;
+ return count;
+}
+
+static SYSDEV_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR,
+ show_selfballoon_downhys, store_selfballoon_downhys);
+
+
+SELFBALLOON_SHOW(selfballoon_uphys, "%d\n", selfballoon_uphysteresis);
+
+static ssize_t store_selfballoon_uphys(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_uphysteresis = val;
+ return count;
+}
+
+static SYSDEV_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
+ show_selfballoon_uphys, store_selfballoon_uphys);
+
+#ifdef CONFIG_FRONTSWAP
+SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
+
+static ssize_t store_frontswap_selfshrinking(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ bool was_enabled = frontswap_selfshrinking;
+ unsigned long tmp;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &tmp);
+ if (err || ((tmp != 0) && (tmp != 1)))
+ return -EINVAL;
+ frontswap_selfshrinking = !!tmp;
+ if (!was_enabled && !xen_selfballooning_enabled &&
+ frontswap_selfshrinking)
+ schedule_delayed_work(&selfballoon_worker,
+ selfballoon_interval * HZ);
+
+ return count;
+}
+
+static SYSDEV_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR,
+ show_frontswap_selfshrinking, store_frontswap_selfshrinking);
+
+SELFBALLOON_SHOW(frontswap_inertia, "%d\n", frontswap_inertia);
+
+static ssize_t store_frontswap_inertia(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ frontswap_inertia = val;
+ frontswap_inertia_counter = val;
+ return count;
+}
+
+static SYSDEV_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR,
+ show_frontswap_inertia, store_frontswap_inertia);
+
+SELFBALLOON_SHOW(frontswap_hysteresis, "%d\n", frontswap_hysteresis);
+
+static ssize_t store_frontswap_hysteresis(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ frontswap_hysteresis = val;
+ return count;
+}
+
+static SYSDEV_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR,
+ show_frontswap_hysteresis, store_frontswap_hysteresis);
+
+#endif /* CONFIG_FRONTSWAP */
+
+static struct attribute *selfballoon_attrs[] = {
+ &attr_selfballooning.attr,
+ &attr_selfballoon_interval.attr,
+ &attr_selfballoon_downhysteresis.attr,
+ &attr_selfballoon_uphysteresis.attr,
+#ifdef CONFIG_FRONTSWAP
+ &attr_frontswap_selfshrinking.attr,
+ &attr_frontswap_hysteresis.attr,
+ &attr_frontswap_inertia.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group selfballoon_group = {
+ .name = "selfballoon",
+ .attrs = selfballoon_attrs
+};
+#endif
+
+int register_xen_selfballooning(struct sys_device *sysdev)
+{
+ int error = -1;
+
+#ifdef CONFIG_SYSFS
+ error = sysfs_create_group(&sysdev->kobj, &selfballoon_group);
+#endif
+ return error;
+}
+EXPORT_SYMBOL(register_xen_selfballooning);
+
+static int __init xen_selfballoon_init(void)
+{
+ bool enable = false;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (xen_initial_domain()) {
+ pr_info("xen/balloon: Xen selfballooning driver "
+ "disabled for domain0.\n");
+ return -ENODEV;
+ }
+
+ xen_selfballooning_enabled = tmem_enabled && use_selfballooning;
+ if (xen_selfballooning_enabled) {
+ pr_info("xen/balloon: Initializing Xen "
+ "selfballooning driver.\n");
+ enable = true;
+ }
+#ifdef CONFIG_FRONTSWAP
+ frontswap_selfshrinking = tmem_enabled && use_frontswap_selfshrink;
+ if (frontswap_selfshrinking) {
+ pr_info("xen/balloon: Initializing frontswap "
+ "selfshrinking driver.\n");
+ enable = true;
+ }
+#endif
+ if (!enable)
+ return -ENODEV;
+
+ schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ);
+
+ return 0;
+}
+
+subsys_initcall(xen_selfballoon_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 739769551e3..bd2f90c9ac8 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -378,26 +378,32 @@ static void xenbus_dev_release(struct device *dev)
kfree(to_xenbus_device(dev));
}
-static ssize_t xendev_show_nodename(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t nodename_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
}
-static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
-static ssize_t xendev_show_devtype(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t devtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
}
-static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
-static ssize_t xendev_show_modalias(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype);
+ return sprintf(buf, "%s:%s\n", dev->bus->name,
+ to_xenbus_device(dev)->devicetype);
}
-static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
+
+struct device_attribute xenbus_dev_attrs[] = {
+ __ATTR_RO(nodename),
+ __ATTR_RO(devtype),
+ __ATTR_RO(modalias),
+ __ATTR_NULL
+};
+EXPORT_SYMBOL_GPL(xenbus_dev_attrs);
int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
@@ -449,25 +455,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
if (err)
goto fail;
- err = device_create_file(&xendev->dev, &dev_attr_nodename);
- if (err)
- goto fail_unregister;
-
- err = device_create_file(&xendev->dev, &dev_attr_devtype);
- if (err)
- goto fail_remove_nodename;
-
- err = device_create_file(&xendev->dev, &dev_attr_modalias);
- if (err)
- goto fail_remove_devtype;
-
return 0;
-fail_remove_devtype:
- device_remove_file(&xendev->dev, &dev_attr_devtype);
-fail_remove_nodename:
- device_remove_file(&xendev->dev, &dev_attr_nodename);
-fail_unregister:
- device_unregister(&xendev->dev);
fail:
kfree(xendev);
return err;
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 888b9900ca0..b814935378c 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -48,6 +48,8 @@ struct xen_bus_type
struct bus_type bus;
};
+extern struct device_attribute xenbus_dev_attrs[];
+
extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
extern int xenbus_dev_probe(struct device *_dev);
extern int xenbus_dev_remove(struct device *_dev);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 6cf467bf63e..60adf919d78 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -107,6 +107,9 @@ static int xenbus_uevent_backend(struct device *dev,
if (xdev == NULL)
return -ENODEV;
+ if (add_uevent_var(env, "MODALIAS=xen-backend:%s", xdev->devicetype))
+ return -ENOMEM;
+
/* stuff we want to pass to /sbin/hotplug */
if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype))
return -ENOMEM;
@@ -183,10 +186,6 @@ static void frontend_changed(struct xenbus_watch *watch,
xenbus_otherend_changed(watch, vec, len, 0);
}
-static struct device_attribute xenbus_backend_dev_attrs[] = {
- __ATTR_NULL
-};
-
static struct xen_bus_type xenbus_backend = {
.root = "backend",
.levels = 3, /* backend/type/<frontend>/<id> */
@@ -200,7 +199,7 @@ static struct xen_bus_type xenbus_backend = {
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
- .dev_attrs = xenbus_backend_dev_attrs,
+ .dev_attrs = xenbus_dev_attrs,
},
};
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index b6a2690c9d4..ed2ba474a56 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -81,10 +81,6 @@ static void backend_changed(struct xenbus_watch *watch,
xenbus_otherend_changed(watch, vec, len, 1);
}
-static struct device_attribute xenbus_frontend_dev_attrs[] = {
- __ATTR_NULL
-};
-
static const struct dev_pm_ops xenbus_pm_ops = {
.suspend = xenbus_dev_suspend,
.resume = xenbus_dev_resume,
@@ -106,7 +102,7 @@ static struct xen_bus_type xenbus_frontend = {
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
- .dev_attrs = xenbus_frontend_dev_attrs,
+ .dev_attrs = xenbus_dev_attrs,
.pm = &xenbus_pm_ops,
},